Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pull drm updates from Dave Airlie:
 "This is the main pull request for the drm for 4.3.  Nouveau is
  probably the biggest amount of changes in here, since it missed 4.2.
  Highlights below, along with the usual bunch of fixes.

  All stuff outside drm should have applicable acks.

  Highlights:

   - new drivers:
        freescale dcu kms driver

   - core:
        more atomic fixes
        disable some dri1 interfaces on kms drivers
        drop fb panic handling, this was just getting more broken, as more locking was required.
        new core fbdev Kconfig support - instead of each driver enable/disabling it
        struct_mutex cleanups

   - panel:
        more new panels
        cleanup Kconfig

   - i915:
        Skylake support enabled by default
        legacy modesetting using atomic infrastructure
        Skylake fixes
        GEN9 workarounds

   - amdgpu:
        Fiji support
        CGS support for amdgpu
        Initial GPU scheduler - off by default
        Lots of bug fixes and optimisations.

   - radeon:
        DP fixes
        misc fixes

   - amdkfd:
        Add Carrizo support for amdkfd using amdgpu.

   - nouveau:
        long pending cleanup to complete driver,
        fully bisectable which makes it larger,
        perfmon work
        more reclocking improvements
        maxwell displayport fixes

   - vmwgfx:
        new DX device support, supports OpenGL 3.3
        screen targets support

   - mgag200:
        G200eW support
        G200e new revision support

   - msm:
        dragonboard 410c support, msm8x94 support, msm8x74v1 support
        yuv format support
        dma plane support
        mdp5 rotation
        initial hdcp

   - sti:
        atomic support

   - exynos:
        lots of cleanups
        atomic modesetting/pageflipping support
        render node support

   - tegra:
        tegra210 support (dc, dsi, dp/hdmi)
        dpms with atomic modesetting support

   - atmel:
        support for 3 more atmel SoCs
        new input formats, PRIME support.

   - dwhdmi:
        preparing to add audio support

   - rockchip:
        yuv plane support"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (1369 commits)
  drm/amdgpu: rename gmc_v8_0_init_compute_vmid
  drm/amdgpu: fix vce3 instance handling
  drm/amdgpu: remove ib test for the second VCE Ring
  drm/amdgpu: properly enable VM fault interrupts
  drm/amdgpu: fix warning in scheduler
  drm/amdgpu: fix buffer placement under memory pressure
  drm/amdgpu/cz: fix cz_dpm_update_low_memory_pstate logic
  drm/amdgpu: fix typo in dce11 watermark setup
  drm/amdgpu: fix typo in dce10 watermark setup
  drm/amdgpu: use top down allocation for non-CPU accessible vram
  drm/amdgpu: be explicit about cpu vram access for driver BOs (v2)
  drm/amdgpu: set MEC doorbell range for Fiji
  drm/amdgpu: implement burst NOP for SDMA
  drm/amdgpu: add insert_nop ring func and default implementation
  drm/amdgpu: add amdgpu_get_sdma_instance helper function
  drm/amdgpu: add AMDGPU_MAX_SDMA_INSTANCES
  drm/amdgpu: add burst_nop flag for sdma
  drm/amdgpu: add count field for the SDMA NOP packet v2
  drm/amdgpu: use PT for VM sync on unmap
  drm/amdgpu: make wait_event uninterruptible in push_job
  ...
diff --git a/CREDITS b/CREDITS
index 1d61664..bcb8efa 100644
--- a/CREDITS
+++ b/CREDITS
@@ -20,6 +20,10 @@
 S: (ask for current address)
 S: Finland
 
+N: Thomas Abraham
+E: thomas.ab@samsung.com
+D: Samsung pin controller driver
+
 N: Dragos Acostachioaie
 E: dragos@iname.com
 W: http://www.arbornet.org/~dragos
@@ -3219,6 +3223,11 @@
 S: 75013 Paris
 S: France
 
+N: Aleksa Sarai
+E: cyphar@cyphar.com
+W: https://www.cyphar.com/
+D: `pids` cgroup subsystem
+
 N: Dipankar Sarma
 E: dipankar@in.ibm.com
 D: RCU
diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus
new file mode 100644
index 0000000..636e938
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-bus-vmbus
@@ -0,0 +1,29 @@
+What:		/sys/bus/vmbus/devices/vmbus_*/id
+Date:		Jul 2009
+KernelVersion:	2.6.31
+Contact:	K. Y. Srinivasan <kys@microsoft.com>
+Description:	The VMBus child_relid of the device's primary channel
+Users:		tools/hv/lsvmbus
+
+What:		/sys/bus/vmbus/devices/vmbus_*/class_id
+Date:		Jul 2009
+KernelVersion:	2.6.31
+Contact:	K. Y. Srinivasan <kys@microsoft.com>
+Description:	The VMBus interface type GUID of the device
+Users:		tools/hv/lsvmbus
+
+What:		/sys/bus/vmbus/devices/vmbus_*/device_id
+Date:		Jul 2009
+KernelVersion:	2.6.31
+Contact:	K. Y. Srinivasan <kys@microsoft.com>
+Description:	The VMBus interface instance GUID of the device
+Users:		tools/hv/lsvmbus
+
+What:		/sys/bus/vmbus/devices/vmbus_*/channel_vp_mapping
+Date:		Jul 2015
+KernelVersion:	4.2.0
+Contact:	K. Y. Srinivasan <kys@microsoft.com>
+Description:	The mapping of which primary/sub channels are bound to which
+		Virtual Processors.
+		Format: <channel's child_relid:the bound cpu's number>
+Users:		tools/hv/lsvmbus
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-loopback b/Documentation/ABI/testing/configfs-usb-gadget-loopback
index 9aae5bf..06beefbc 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-loopback
+++ b/Documentation/ABI/testing/configfs-usb-gadget-loopback
@@ -5,4 +5,4 @@
 		The attributes:
 
 		qlen		- depth of loopback queue
-		bulk_buflen	- buffer length
+		buflen		- buffer length
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-sourcesink b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
index 29477c3..bc7ff73 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
+++ b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
@@ -9,4 +9,4 @@
 		isoc_maxpacket	- 0 - 1023 (fs), 0 - 1024 (hs/ss)
 		isoc_mult	- 0..2 (hs/ss only)
 		isoc_maxburst	- 0..15 (ss only)
-		qlen		- buffer length
+		buflen		- buffer length
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x
index b4d0b99..d72ca17 100644
--- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm3x
@@ -112,7 +112,7 @@
 Contact:	Mathieu Poirier <mathieu.poirier@linaro.org>
 Description: 	(RW) Mask to apply to all the context ID comparator.
 
-What:		/sys/bus/coresight/devices/<memory_map>.[etm|ptm]/ctxid_val
+What:		/sys/bus/coresight/devices/<memory_map>.[etm|ptm]/ctxid_pid
 Date:		November 2014
 KernelVersion:	3.19
 Contact:	Mathieu Poirier <mathieu.poirier@linaro.org>
diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
index 2fe2e3d..2355ed8 100644
--- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
+++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-etm4x
@@ -249,7 +249,7 @@
 Contact:	Mathieu Poirier <mathieu.poirier@linaro.org>
 Description:	(RW) Select which context ID comparator to work with.
 
-What:		/sys/bus/coresight/devices/<memory_map>.etm/ctxid_val
+What:		/sys/bus/coresight/devices/<memory_map>.etm/ctxid_pid
 Date:		April 2015
 KernelVersion:	4.01
 Contact:	Mathieu Poirier <mathieu.poirier@linaro.org>
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 70c9b1a..42d360f 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -413,6 +413,11 @@
 		to compute the calories burnt by the user.
 
 What:		/sys/bus/iio/devices/iio:deviceX/in_accel_scale_available
+What:		/sys/.../iio:deviceX/in_anglvel_scale_available
+What:		/sys/.../iio:deviceX/in_magn_scale_available
+What:		/sys/.../iio:deviceX/in_illuminance_scale_available
+What:		/sys/.../iio:deviceX/in_intensity_scale_available
+What:		/sys/.../iio:deviceX/in_proximity_scale_available
 What:		/sys/.../iio:deviceX/in_voltageX_scale_available
 What:		/sys/.../iio:deviceX/in_voltage-voltage_scale_available
 What:		/sys/.../iio:deviceX/out_voltageX_scale_available
@@ -488,7 +493,7 @@
 Description:
 		Specifies the output powerdown mode.
 		DAC output stage is disconnected from the amplifier and
-		1kohm_to_gnd: connected	to ground via an 1kOhm resistor,
+		1kohm_to_gnd: connected to ground via an 1kOhm resistor,
 		6kohm_to_gnd: connected to ground via a 6kOhm resistor,
 		20kohm_to_gnd: connected to ground via a 20kOhm resistor,
 		100kohm_to_gnd: connected to ground via an 100kOhm resistor,
@@ -498,9 +503,9 @@
 		outX_powerdown_mode_available. If Y is not present the
 		mode is shared across all outputs.
 
-What:		/sys/.../iio:deviceX/out_votlageY_powerdown_mode_available
+What:		/sys/.../iio:deviceX/out_voltageY_powerdown_mode_available
 What:		/sys/.../iio:deviceX/out_voltage_powerdown_mode_available
-What:		/sys/.../iio:deviceX/out_altvotlageY_powerdown_mode_available
+What:		/sys/.../iio:deviceX/out_altvoltageY_powerdown_mode_available
 What:		/sys/.../iio:deviceX/out_altvoltage_powerdown_mode_available
 KernelVersion:	2.6.38
 Contact:	linux-iio@vger.kernel.org
@@ -1035,13 +1040,6 @@
 Description:
 		Number of scans contained by the buffer.
 
-What:		/sys/bus/iio/devices/iio:deviceX/buffer/bytes_per_datum
-KernelVersion:	2.6.37
-Contact:	linux-iio@vger.kernel.org
-Description:
-		Bytes per scan.  Due to alignment fun, the scan may be larger
-		than implied directly by the scan_element parameters.
-
 What:		/sys/bus/iio/devices/iio:deviceX/buffer/enable
 KernelVersion:	2.6.35
 Contact:	linux-iio@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-trigger-sysfs b/Documentation/ABI/testing/sysfs-bus-iio-trigger-sysfs
index 5235e6c..bbb0392 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-trigger-sysfs
+++ b/Documentation/ABI/testing/sysfs-bus-iio-trigger-sysfs
@@ -9,3 +9,12 @@
 		automated testing or in situations, where other trigger methods
 		are not applicable. For example no RTC or spare GPIOs.
 		X is the IIO index of the trigger.
+
+What:		/sys/bus/iio/devices/triggerX/name
+KernelVersion:	2.6.39
+Contact:	linux-iio@vger.kernel.org
+Description:
+		The name attribute holds a description string for the current
+		trigger. In order to associate the trigger with an IIO device
+		one should write this name string to
+		/sys/bus/iio/devices/iio:deviceY/trigger/current_trigger.
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index e5cc763..864637f 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -114,6 +114,20 @@
 		enabled for the device. Developer can write y/Y/1 or n/N/0 to
 		the file to enable/disable the feature.
 
+What:		/sys/bus/usb/devices/.../power/usb3_hardware_lpm
+Date:		June 2015
+Contact:	Kevin Strasser <kevin.strasser@linux.intel.com>
+Description:
+		If CONFIG_PM is set and a USB 3.0 lpm-capable device is plugged
+		in to a xHCI host which supports link PM, it will check if U1
+		and U2 exit latencies have been set in the BOS descriptor; if
+		the check is is passed and the host supports USB3 hardware LPM,
+		USB3 hardware LPM will be enabled for the device and the USB
+		device directory will contain a file named
+		power/usb3_hardware_lpm. The file holds a string value (enable
+		or disable) indicating whether or not USB3 hardware LPM is
+		enabled for the device.
+
 What:		/sys/bus/usb/devices/.../removable
 Date:		February 2012
 Contact:	Matthew Garrett <mjg@redhat.com>
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
index acfe9df..b07e86d 100644
--- a/Documentation/ABI/testing/sysfs-class-cxl
+++ b/Documentation/ABI/testing/sysfs-class-cxl
@@ -223,3 +223,13 @@
                 Writing 1 will issue a PERST to card which may cause the card
                 to reload the FPGA depending on load_image_on_perst.
 Users:		https://github.com/ibm-capi/libcxl
+
+What:		/sys/class/cxl/<card>/perst_reloads_same_image
+Date:		July 2015
+Contact:	linuxppc-dev@lists.ozlabs.org
+Description:	read/write
+		Trust that when an image is reloaded via PERST, it will not
+		have changed.
+		0 = don't trust, the image may be different (default)
+		1 = trust that the image will not change.
+Users:		https://github.com/ibm-capi/libcxl
diff --git a/Documentation/ABI/testing/sysfs-class-power-twl4030 b/Documentation/ABI/testing/sysfs-class-power-twl4030
new file mode 100644
index 0000000..be26af0
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-power-twl4030
@@ -0,0 +1,45 @@
+What: /sys/class/power_supply/twl4030_ac/max_current
+      /sys/class/power_supply/twl4030_usb/max_current
+Description:
+	Read/Write limit on current which may
+	be drawn from the ac (Accessory Charger) or
+	USB port.
+
+	Value is in micro-Amps.
+
+	Value is set automatically to an appropriate
+	value when a cable is plugged or unplugged.
+
+	Value can the set by writing to the attribute.
+	The change will only persist until the next
+	plug event.  These event are reported via udev.
+
+
+What: /sys/class/power_supply/twl4030_usb/mode
+Description:
+	Changing mode for USB port.
+	Writing to this can disable charging.
+
+	Possible values are:
+		"auto" - draw power as appropriate for detected
+			 power source and battery status.
+		"off"  - do not draw any power.
+		"continuous"
+		       - activate mode described as "linear" in
+		         TWL data sheets.  This uses whatever
+			 current is available and doesn't switch off
+			 when voltage drops.
+
+			 This is useful for unstable power sources
+			 such as bicycle dynamo, but care should
+			 be taken that battery is not over-charged.
+
+What: /sys/class/power_supply/twl4030_ac/mode
+Description:
+	Changing mode for 'ac' port.
+	Writing to this can disable charging.
+
+	Possible values are:
+		"auto" - draw power as appropriate for detected
+			 power source and battery status.
+		"off"  - do not draw any power.
diff --git a/Documentation/ABI/testing/sysfs-driver-sunxi-sid b/Documentation/ABI/testing/sysfs-driver-sunxi-sid
deleted file mode 100644
index ffb9536..0000000
--- a/Documentation/ABI/testing/sysfs-driver-sunxi-sid
+++ /dev/null
@@ -1,22 +0,0 @@
-What:		/sys/devices/*/<our-device>/eeprom
-Date:		August 2013
-Contact:	Oliver Schinagl <oliver@schinagl.nl>
-Description:	read-only access to the SID (Security-ID) on current
-		A-series SoC's from Allwinner. Currently supports A10, A10s, A13
-		and A20 CPU's. The earlier A1x series of SoCs exports 16 bytes,
-		whereas the newer A20 SoC exposes 512 bytes split into sections.
-		Besides the 16 bytes of SID, there's also an SJTAG area,
-		HDMI-HDCP key and some custom keys. Below a quick overview, for
-		details see the user manual:
-		0x000  128 bit root-key (sun[457]i)
-		0x010  128 bit boot-key (sun7i)
-		0x020   64 bit security-jtag-key (sun7i)
-		0x028   16 bit key configuration (sun7i)
-		0x02b   16 bit custom-vendor-key (sun7i)
-		0x02c  320 bit low general key (sun7i)
-		0x040   32 bit read-control access (sun7i)
-		0x064  224 bit low general key (sun7i)
-		0x080 2304 bit HDCP-key (sun7i)
-		0x1a0  768 bit high general key (sun7i)
-Users:		any user space application which wants to read the SID on
-		Allwinner's A-series of CPU's.
diff --git a/Documentation/ABI/testing/sysfs-driver-wacom b/Documentation/ABI/testing/sysfs-driver-wacom
index c4f0fed..dca4293 100644
--- a/Documentation/ABI/testing/sysfs-driver-wacom
+++ b/Documentation/ABI/testing/sysfs-driver-wacom
@@ -77,3 +77,22 @@
 		The format is also scrambled, like in the USB mode, and it can
 		be summarized by converting 76543210 into GECA6420.
 					    HGFEDCBA      HFDB7531
+
+What:		/sys/bus/hid/devices/<bus>:<vid>:<pid>.<n>/wacom_remote/unpair_remote
+Date:		July 2015
+Contact:	linux-input@vger.kernel.org
+Description:
+		Writing the character sequence '*' followed by a newline to
+		this file will delete all of the current pairings on the
+		device. Other character sequences are reserved. This file is
+		write only.
+
+What:		/sys/bus/hid/devices/<bus>:<vid>:<pid>.<n>/wacom_remote/<serial_number>/remote_mode
+Date:		July 2015
+Contact:	linux-input@vger.kernel.org
+Description:
+		Reading from this file reports the mode status of the
+		remote as indicated by the LED lights on the device. If no
+		reports have been received from the paired device, reading
+		from this file will report '-1'. The mode is read-only
+		and cannot be set through the driver.
diff --git a/Documentation/ABI/testing/sysfs-gpio b/Documentation/ABI/testing/sysfs-gpio
index 80f4c94..55ffa2d 100644
--- a/Documentation/ABI/testing/sysfs-gpio
+++ b/Documentation/ABI/testing/sysfs-gpio
@@ -16,7 +16,8 @@
     /sys/class/gpio
 	/export ... asks the kernel to export a GPIO to userspace
 	/unexport ... to return a GPIO to the kernel
-	/gpioN ... for each exported GPIO #N
+	/gpioN ... for each exported GPIO #N OR
+	/<LINE-NAME> ... for a properly named GPIO line
 	    /value ... always readable, writes fail for input GPIOs
 	    /direction ... r/w as: in, out (default low); write: high, low
 	    /edge ... r/w as: none, falling, rising, both
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index b713c35..c06f817 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -929,13 +929,11 @@
 by Brian W. Kernighan and Dennis M. Ritchie.
 Prentice Hall, Inc., 1988.
 ISBN 0-13-110362-8 (paperback), 0-13-110370-9 (hardback).
-URL: http://cm.bell-labs.com/cm/cs/cbook/
 
 The Practice of Programming
 by Brian W. Kernighan and Rob Pike.
 Addison-Wesley, Inc., 1999.
 ISBN 0-201-61586-X.
-URL: http://cm.bell-labs.com/cm/cs/tpop/
 
 GNU manuals - where in compliance with K&R and this text - for cpp, gcc,
 gcc internals and indent, all available from http://www.gnu.org/manual/
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index b6a6a2e..93eff64 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -15,7 +15,7 @@
 	    80211.xml debugobjects.xml sh.xml regulator.xml \
 	    alsa-driver-api.xml writing-an-alsa-driver.xml \
 	    tracepoint.xml drm.xml media_api.xml w1.xml \
-	    writing_musb_glue_layer.xml crypto-API.xml
+	    writing_musb_glue_layer.xml crypto-API.xml iio.xml
 
 include Documentation/DocBook/media/Makefile
 
@@ -56,16 +56,19 @@
 
 MAN := $(patsubst %.xml, %.9, $(BOOKS))
 mandocs: $(MAN)
-	find $(obj)/man -name '*.9' | xargs gzip -f
+	find $(obj)/man -name '*.9' | xargs gzip -nf
 
 installmandocs: mandocs
 	mkdir -p /usr/local/man/man9/
-	install $(obj)/man/*.9.gz /usr/local/man/man9/
+	find $(obj)/man -name '*.9.gz' -printf '%h %f\n' | \
+		sort -k 2 -k 1 | uniq -f 1 | sed -e 's: :/:' | \
+		xargs install -m 644 -t /usr/local/man/man9/
 
 ###
 #External programs used
-KERNELDOC = $(srctree)/scripts/kernel-doc
-DOCPROC   = $(objtree)/scripts/docproc
+KERNELDOCXMLREF = $(srctree)/scripts/kernel-doc-xml-ref
+KERNELDOC       = $(srctree)/scripts/kernel-doc
+DOCPROC         = $(objtree)/scripts/docproc
 
 XMLTOFLAGS = -m $(srctree)/$(src)/stylesheet.xsl
 XMLTOFLAGS += --skip-validation
@@ -89,7 +92,7 @@
         ) > $(dir $@).$(notdir $@).cmd
 endef
 
-%.xml: %.tmpl $(KERNELDOC) $(DOCPROC) FORCE
+%.xml: %.tmpl $(KERNELDOC) $(DOCPROC) $(KERNELDOCXMLREF) FORCE
 	$(call if_changed_rule,docproc)
 
 # Tell kbuild to always build the programs
@@ -140,7 +143,20 @@
 		echo '<a HREF="$(patsubst %.html,%,$(notdir $@))/index.html"> \
 		$(patsubst %.html,%,$(notdir $@))</a><p>' > $@
 
-%.html:	%.xml
+###
+# Rules to create an aux XML and .db, and use them to re-process the DocBook XML
+# to fill internal hyperlinks
+       gen_aux_xml = :
+ quiet_gen_aux_xml = echo '  XMLREF  $@'
+silent_gen_aux_xml = :
+%.aux.xml: %.xml
+	@$($(quiet)gen_aux_xml)
+	@rm -rf $@
+	@(cat $< | egrep "^<refentry id" | egrep -o "\".*\"" | cut -f 2 -d \" > $<.db)
+	@$(KERNELDOCXMLREF) -db $<.db $< > $@
+.PRECIOUS: %.aux.xml
+
+%.html:	%.aux.xml
 	@(which xmlto > /dev/null 2>&1) || \
 	 (echo "*** You need to install xmlto ***"; \
 	  exit 1)
@@ -150,12 +166,12 @@
             cp $(PNG-$(basename $(notdir $@))) $(patsubst %.html,%,$@); fi
 
 quiet_cmd_db2man = MAN     $@
-      cmd_db2man = if grep -q refentry $<; then xmlto man $(XMLTOFLAGS) -o $(obj)/man $< ; fi
+      cmd_db2man = if grep -q refentry $<; then xmlto man $(XMLTOFLAGS) -o $(obj)/man/$(*F) $< ; fi
 %.9 : %.xml
 	@(which xmlto > /dev/null 2>&1) || \
 	 (echo "*** You need to install xmlto ***"; \
 	  exit 1)
-	$(Q)mkdir -p $(obj)/man
+	$(Q)mkdir -p $(obj)/man/$(*F)
 	$(call cmd,db2man)
 	@touch $@
 
@@ -209,15 +225,18 @@
 ###
 # Temporary files left by various tools
 clean-files := $(DOCBOOKS) \
-	$(patsubst %.xml, %.dvi,  $(DOCBOOKS)) \
-	$(patsubst %.xml, %.aux,  $(DOCBOOKS)) \
-	$(patsubst %.xml, %.tex,  $(DOCBOOKS)) \
-	$(patsubst %.xml, %.log,  $(DOCBOOKS)) \
-	$(patsubst %.xml, %.out,  $(DOCBOOKS)) \
-	$(patsubst %.xml, %.ps,   $(DOCBOOKS)) \
-	$(patsubst %.xml, %.pdf,  $(DOCBOOKS)) \
-	$(patsubst %.xml, %.html, $(DOCBOOKS)) \
-	$(patsubst %.xml, %.9,    $(DOCBOOKS)) \
+	$(patsubst %.xml, %.dvi,     $(DOCBOOKS)) \
+	$(patsubst %.xml, %.aux,     $(DOCBOOKS)) \
+	$(patsubst %.xml, %.tex,     $(DOCBOOKS)) \
+	$(patsubst %.xml, %.log,     $(DOCBOOKS)) \
+	$(patsubst %.xml, %.out,     $(DOCBOOKS)) \
+	$(patsubst %.xml, %.ps,      $(DOCBOOKS)) \
+	$(patsubst %.xml, %.pdf,     $(DOCBOOKS)) \
+	$(patsubst %.xml, %.html,    $(DOCBOOKS)) \
+	$(patsubst %.xml, %.9,       $(DOCBOOKS)) \
+	$(patsubst %.xml, %.aux.xml, $(DOCBOOKS)) \
+	$(patsubst %.xml, %.xml.db,  $(DOCBOOKS)) \
+	$(patsubst %.xml, %.xml,     $(DOCBOOKS)) \
 	$(index)
 
 clean-dirs := $(patsubst %.xml,%,$(DOCBOOKS)) man
diff --git a/Documentation/DocBook/alsa-driver-api.tmpl b/Documentation/DocBook/alsa-driver-api.tmpl
index 71f9246..e94a10b 100644
--- a/Documentation/DocBook/alsa-driver-api.tmpl
+++ b/Documentation/DocBook/alsa-driver-api.tmpl
@@ -108,7 +108,7 @@
      <sect1><title>ASoC Core API</title>
 !Iinclude/sound/soc.h
 !Esound/soc/soc-core.c
-!Esound/soc/soc-cache.c
+<!-- !Esound/soc/soc-cache.c no docbook comments here -->
 !Esound/soc/soc-devres.c
 !Esound/soc/soc-io.c
 !Esound/soc/soc-pcm.c
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl
index 0992531..07df23e 100644
--- a/Documentation/DocBook/crypto-API.tmpl
+++ b/Documentation/DocBook/crypto-API.tmpl
@@ -585,7 +585,7 @@
 +-----------+                                    |
 |           |            (1)
 |   aead    | <-----------------------------------  esp_output
-| (seqniv)  | ---+
+|  (seqiv)  | ---+
 +-----------+    |
                  | (2)
 +-----------+    |
@@ -1101,7 +1101,7 @@
     </para>
 
     <para>
-     [1] http://www.chronox.de/libkcapi.html
+     [1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink>
     </para>
 
    </sect1>
@@ -1661,7 +1661,7 @@
     </para>
 
     <para>
-     [1] http://www.chronox.de/libkcapi.html
+     [1] <ulink url="http://www.chronox.de/libkcapi.html">http://www.chronox.de/libkcapi.html</ulink>
     </para>
 
    </sect1>
@@ -1687,7 +1687,7 @@
 !Pinclude/linux/crypto.h Block Cipher Algorithm Definitions
 !Finclude/linux/crypto.h crypto_alg
 !Finclude/linux/crypto.h ablkcipher_alg
-!Finclude/linux/crypto.h aead_alg
+!Finclude/crypto/aead.h aead_alg
 !Finclude/linux/crypto.h blkcipher_alg
 !Finclude/linux/crypto.h cipher_alg
 !Finclude/crypto/rng.h rng_alg
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index faf09d4..bbc1d7e 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -66,6 +66,7 @@
 !Ekernel/time/hrtimer.c
      </sect1>
      <sect1><title>Workqueues and Kevents</title>
+!Iinclude/linux/workqueue.h
 !Ekernel/workqueue.c
      </sect1>
      <sect1><title>Internal Functions</title>
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
index bcdfdb9..6006b63 100644
--- a/Documentation/DocBook/filesystems.tmpl
+++ b/Documentation/DocBook/filesystems.tmpl
@@ -146,36 +146,30 @@
 The journalling layer is  easy to use. You need to
 first of all create a journal_t data structure. There are
 two calls to do this dependent on how you decide to allocate the physical
-media on which the journal resides. The journal_init_inode() call
-is for journals stored in filesystem inodes, or the journal_init_dev()
-call can be use for journal stored on a raw device (in a continuous range
+media on which the journal resides. The jbd2_journal_init_inode() call
+is for journals stored in filesystem inodes, or the jbd2_journal_init_dev()
+call can be used for journal stored on a raw device (in a continuous range
 of blocks). A journal_t is a typedef for a struct pointer, so when
-you are finally finished make sure you call journal_destroy() on it
+you are finally finished make sure you call jbd2_journal_destroy() on it
 to free up any used kernel memory.
 </para>
 
 <para>
 Once you have got your journal_t object you need to 'mount' or load the journal
-file, unless of course you haven't initialised it yet - in which case you
-need to call journal_create().
+file. The journalling layer expects the space for the journal was already
+allocated and initialized properly by the userspace tools.  When loading the
+journal you must call jbd2_journal_load() to process journal contents.  If the
+client file system detects the journal contents does not need to be processed
+(or even need not have valid contents), it may call jbd2_journal_wipe() to
+clear the journal contents before calling jbd2_journal_load().
 </para>
 
 <para>
-Most of the time however your journal file will already have been created, but
-before you load it you must call journal_wipe() to empty the journal file.
-Hang on, you say , what if the filesystem wasn't cleanly umount()'d . Well, it is the
-job of the client file system to detect this and skip the call to journal_wipe().
-</para>
-
-<para>
-In either case the next call should be to journal_load() which prepares the
-journal file for use. Note that journal_wipe(..,0) calls journal_skip_recovery()
-for you if it detects any outstanding transactions in the journal and similarly
-journal_load() will call journal_recover() if necessary.
-I would advise reading fs/ext3/super.c for examples on this stage.
-[RGG: Why is the journal_wipe() call necessary - doesn't this needlessly
-complicate the API. Or isn't a good idea for the journal layer to hide
-dirty mounts from the client fs]
+Note that jbd2_journal_wipe(..,0) calls jbd2_journal_skip_recovery() for you if
+it detects any outstanding transactions in the journal and similarly
+jbd2_journal_load() will call jbd2_journal_recover() if necessary.  I would
+advise reading ext4_load_journal() in fs/ext4/super.c for examples on this
+stage.
 </para>
 
 <para>
@@ -189,41 +183,41 @@
 is done by wrapping them into transactions. Additionally you
 also need to wrap the modification of each of the buffers
 with calls to the journal layer, so it knows what the modifications
-you are actually making are. To do this use  journal_start() which
+you are actually making are. To do this use jbd2_journal_start() which
 returns a transaction handle.
 </para>
 
 <para>
-journal_start()
-and its counterpart journal_stop(), which indicates the end of a transaction
-are nestable calls, so you can reenter a transaction if necessary,
-but remember you must call journal_stop() the same number of times as
-journal_start() before the transaction is completed (or more accurately
-leaves the update phase). Ext3/VFS makes use of this feature to simplify
-quota support.
+jbd2_journal_start()
+and its counterpart jbd2_journal_stop(), which indicates the end of a
+transaction are nestable calls, so you can reenter a transaction if necessary,
+but remember you must call jbd2_journal_stop() the same number of times as
+jbd2_journal_start() before the transaction is completed (or more accurately
+leaves the update phase). Ext4/VFS makes use of this feature to simplify
+handling of inode dirtying, quota support, etc.
 </para>
 
 <para>
 Inside each transaction you need to wrap the modifications to the
 individual buffers (blocks). Before you start to modify a buffer you
-need to call journal_get_{create,write,undo}_access() as appropriate,
+need to call jbd2_journal_get_{create,write,undo}_access() as appropriate,
 this allows the journalling layer to copy the unmodified data if it
 needs to. After all the buffer may be part of a previously uncommitted
 transaction.
 At this point you are at last ready to modify a buffer, and once
-you are have done so you need to call journal_dirty_{meta,}data().
+you are have done so you need to call jbd2_journal_dirty_{meta,}data().
 Or if you've asked for access to a buffer you now know is now longer
-required to be pushed back on the device you can call journal_forget()
+required to be pushed back on the device you can call jbd2_journal_forget()
 in much the same way as you might have used bforget() in the past.
 </para>
 
 <para>
-A journal_flush() may be called at any time to commit and checkpoint
+A jbd2_journal_flush() may be called at any time to commit and checkpoint
 all your transactions.
 </para>
 
 <para>
-Then at umount time , in your put_super() you can then call journal_destroy()
+Then at umount time , in your put_super() you can then call jbd2_journal_destroy()
 to clean up your in-core journal object.
 </para>
 
@@ -231,53 +225,68 @@
 Unfortunately there a couple of ways the journal layer can cause a deadlock.
 The first thing to note is that each task can only have
 a single outstanding transaction at any one time, remember nothing
-commits until the outermost journal_stop(). This means
+commits until the outermost jbd2_journal_stop(). This means
 you must complete the transaction at the end of each file/inode/address
 etc. operation you perform, so that the journalling system isn't re-entered
 on another journal. Since transactions can't be nested/batched
 across differing journals, and another filesystem other than
-yours (say ext3) may be modified in a later syscall.
+yours (say ext4) may be modified in a later syscall.
 </para>
 
 <para>
-The second case to bear in mind is that journal_start() can
+The second case to bear in mind is that jbd2_journal_start() can
 block if there isn't enough space in the journal for your transaction
 (based on the passed nblocks param) - when it blocks it merely(!) needs to
 wait for transactions to complete and be committed from other tasks,
-so essentially we are waiting for journal_stop(). So to avoid
-deadlocks you must treat journal_start/stop() as if they
+so essentially we are waiting for jbd2_journal_stop(). So to avoid
+deadlocks you must treat jbd2_journal_start/stop() as if they
 were semaphores and include them in your semaphore ordering rules to prevent
-deadlocks. Note that journal_extend() has similar blocking behaviour to
-journal_start() so you can deadlock here just as easily as on journal_start().
+deadlocks. Note that jbd2_journal_extend() has similar blocking behaviour to
+jbd2_journal_start() so you can deadlock here just as easily as on
+jbd2_journal_start().
 </para>
 
 <para>
 Try to reserve the right number of blocks the first time. ;-). This will
 be the maximum number of blocks you are going to touch in this transaction.
-I advise having a look at at least ext3_jbd.h to see the basis on which
-ext3 uses to make these decisions.
+I advise having a look at at least ext4_jbd.h to see the basis on which
+ext4 uses to make these decisions.
 </para>
 
 <para>
 Another wriggle to watch out for is your on-disk block allocation strategy.
-why? Because, if you undo a delete, you need to ensure you haven't reused any
-of the freed blocks in a later transaction. One simple way of doing this
-is make sure any blocks you allocate only have checkpointed transactions
-listed against them. Ext3 does this in ext3_test_allocatable().
+Why? Because, if you do a delete, you need to ensure you haven't reused any
+of the freed blocks until the transaction freeing these blocks commits. If you
+reused these blocks and crash happens, there is no way to restore the contents
+of the reallocated blocks at the end of the last fully committed transaction.
+
+One simple way of doing this is to mark blocks as free in internal in-memory
+block allocation structures only after the transaction freeing them commits.
+Ext4 uses journal commit callback for this purpose.
 </para>
 
 <para>
-Lock is also providing through journal_{un,}lock_updates(),
-ext3 uses this when it wants a window with a clean and stable fs for a moment.
-eg.
+With journal commit callbacks you can ask the journalling layer to call a
+callback function when the transaction is finally committed to disk, so that
+you can do some of your own management. You ask the journalling layer for
+calling the callback by simply setting journal->j_commit_callback function
+pointer and that function is called after each transaction commit. You can also
+use transaction->t_private_list for attaching entries to a transaction that
+need processing when the transaction commits.
+</para>
+
+<para>
+JBD2 also provides a way to block all transaction updates via
+jbd2_journal_{un,}lock_updates(). Ext4 uses this when it wants a window with a
+clean and stable fs for a moment.  E.g.
 </para>
 
 <programlisting>
 
-	journal_lock_updates() //stop new stuff happening..
-	journal_flush()        // checkpoint everything.
+	jbd2_journal_lock_updates() //stop new stuff happening..
+	jbd2_journal_flush()        // checkpoint everything.
 	..do stuff on stable fs
-	journal_unlock_updates() // carry on with filesystem use.
+	jbd2_journal_unlock_updates() // carry on with filesystem use.
 </programlisting>
 
 <para>
@@ -286,29 +295,6 @@
 calls.
 </para>
 
-<para>
-A new feature of jbd since 2.5.25 is commit callbacks with the new
-journal_callback_set() function you can now ask the journalling layer
-to call you back when the transaction is finally committed to disk, so that
-you can do some of your own management. The key to this is the journal_callback
-struct, this maintains the internal callback information but you can
-extend it like this:-
-</para>
-<programlisting>
-	struct  myfs_callback_s {
-		//Data structure element required by jbd..
-		struct journal_callback for_jbd;
-		// Stuff for myfs allocated together.
-		myfs_inode*    i_commited;
-
-	}
-</programlisting>
-
-<para>
-this would be useful if you needed to know when data was committed to a
-particular inode.
-</para>
-
     </sect2>
 
     <sect2 id="jbd_summary">
@@ -319,36 +305,6 @@
 to tell the journalling layer about them.
 </para>
 
-<para>
-Here is a some pseudo code to give you an idea of how it works, as
-an example.
-</para>
-
-<programlisting>
-  journal_t* my_jnrl = journal_create();
-  journal_init_{dev,inode}(jnrl,...)
-  if (clean) journal_wipe();
-  journal_load();
-
-   foreach(transaction) { /*transactions must be
-                            completed before
-                            a syscall returns to
-                            userspace*/
-
-          handle_t * xct=journal_start(my_jnrl);
-          foreach(bh) {
-                journal_get_{create,write,undo}_access(xact,bh);
-                if ( myfs_modify(bh) ) { /* returns true
-                                        if makes changes */
-                           journal_dirty_{meta,}data(xact,bh);
-                } else {
-                           journal_forget(bh);
-                }
-          }
-          journal_stop(xct);
-   }
-   journal_destroy(my_jrnl);
-</programlisting>
     </sect2>
 
     </sect1>
@@ -357,13 +313,13 @@
      <title>Data Types</title>
      <para>
 	The journalling layer uses typedefs to 'hide' the concrete definitions
-	of the structures used. As a client of the JBD layer you can
+	of the structures used. As a client of the JBD2 layer you can
 	just rely on the using the pointer as a magic cookie  of some sort.
 
 	Obviously the hiding is not enforced as this is 'C'.
      </para>
 	<sect2 id="structures"><title>Structures</title>
-!Iinclude/linux/jbd.h
+!Iinclude/linux/jbd2.h
 	</sect2>
     </sect1>
 
@@ -375,11 +331,11 @@
 	manage transactions
      </para>
 	<sect2 id="journal_level"><title>Journal Level</title>
-!Efs/jbd/journal.c
-!Ifs/jbd/recovery.c
+!Efs/jbd2/journal.c
+!Ifs/jbd2/recovery.c
 	</sect2>
 	<sect2 id="transaction_level"><title>Transasction Level</title>
-!Efs/jbd/transaction.c
+!Efs/jbd2/transaction.c
 	</sect2>
     </sect1>
     <sect1 id="see_also">
diff --git a/Documentation/DocBook/iio.tmpl b/Documentation/DocBook/iio.tmpl
new file mode 100644
index 0000000..06bb53d
--- /dev/null
+++ b/Documentation/DocBook/iio.tmpl
@@ -0,0 +1,697 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="iioid">
+  <bookinfo>
+    <title>Industrial I/O driver developer's guide </title>
+
+    <authorgroup>
+      <author>
+        <firstname>Daniel</firstname>
+        <surname>Baluta</surname>
+        <affiliation>
+          <address>
+            <email>daniel.baluta@intel.com</email>
+          </address>
+        </affiliation>
+      </author>
+    </authorgroup>
+
+    <copyright>
+      <year>2015</year>
+      <holder>Intel Corporation</holder>
+    </copyright>
+
+    <legalnotice>
+      <para>
+        This documentation is free software; you can redistribute
+        it and/or modify it under the terms of the GNU General Public
+        License version 2.
+      </para>
+    </legalnotice>
+  </bookinfo>
+
+  <toc></toc>
+
+  <chapter id="intro">
+    <title>Introduction</title>
+    <para>
+      The main purpose of the Industrial I/O subsystem (IIO) is to provide
+      support for devices that in some sense perform either analog-to-digital
+      conversion (ADC) or digital-to-analog conversion (DAC) or both. The aim
+      is to fill the gap between the somewhat similar hwmon and input
+      subsystems.
+      Hwmon is directed at low sample rate sensors used to monitor and
+      control the system itself, like fan speed control or temperature
+      measurement. Input is, as its name suggests, focused on human interaction
+      input devices (keyboard, mouse, touchscreen). In some cases there is
+      considerable overlap between these and IIO.
+  </para>
+  <para>
+    Devices that fall into this category include:
+    <itemizedlist>
+      <listitem>
+        analog to digital converters (ADCs)
+      </listitem>
+      <listitem>
+        accelerometers
+      </listitem>
+      <listitem>
+        capacitance to digital converters (CDCs)
+      </listitem>
+      <listitem>
+        digital to analog converters (DACs)
+      </listitem>
+      <listitem>
+        gyroscopes
+      </listitem>
+      <listitem>
+        inertial measurement units (IMUs)
+      </listitem>
+      <listitem>
+        color and light sensors
+      </listitem>
+      <listitem>
+        magnetometers
+      </listitem>
+      <listitem>
+        pressure sensors
+      </listitem>
+      <listitem>
+        proximity sensors
+      </listitem>
+      <listitem>
+        temperature sensors
+      </listitem>
+    </itemizedlist>
+    Usually these sensors are connected via SPI or I2C. A common use case of the
+    sensors devices is to have combined functionality (e.g. light plus proximity
+    sensor).
+  </para>
+  </chapter>
+  <chapter id='iiosubsys'>
+    <title>Industrial I/O core</title>
+    <para>
+      The Industrial I/O core offers:
+      <itemizedlist>
+        <listitem>
+         a unified framework for writing drivers for many different types of
+         embedded sensors.
+        </listitem>
+        <listitem>
+         a standard interface to user space applications manipulating sensors.
+        </listitem>
+      </itemizedlist>
+      The implementation can be found under <filename>
+      drivers/iio/industrialio-*</filename>
+  </para>
+  <sect1 id="iiodevice">
+    <title> Industrial I/O devices </title>
+
+!Finclude/linux/iio/iio.h iio_dev
+!Fdrivers/iio/industrialio-core.c iio_device_alloc
+!Fdrivers/iio/industrialio-core.c iio_device_free
+!Fdrivers/iio/industrialio-core.c iio_device_register
+!Fdrivers/iio/industrialio-core.c iio_device_unregister
+
+    <para>
+      An IIO device usually corresponds to a single hardware sensor and it
+      provides all the information needed by a driver handling a device.
+      Let's first have a look at the functionality embedded in an IIO
+      device then we will show how a device driver makes use of an IIO
+      device.
+    </para>
+    <para>
+        There are two ways for a user space application to interact
+        with an IIO driver.
+      <itemizedlist>
+        <listitem>
+          <filename>/sys/bus/iio/iio:deviceX/</filename>, this
+          represents a hardware sensor and groups together the data
+          channels of the same chip.
+        </listitem>
+        <listitem>
+          <filename>/dev/iio:deviceX</filename>, character device node
+          interface used for buffered data transfer and for events information
+          retrieval.
+        </listitem>
+      </itemizedlist>
+    </para>
+    A typical IIO driver will register itself as an I2C or SPI driver and will
+    create two routines, <function> probe </function> and <function> remove
+    </function>. At <function>probe</function>:
+    <itemizedlist>
+    <listitem>call <function>iio_device_alloc</function>, which allocates memory
+      for an IIO device.
+    </listitem>
+    <listitem> initialize IIO device fields with driver specific information
+              (e.g. device name, device channels).
+    </listitem>
+    <listitem>call <function> iio_device_register</function>, this registers the
+      device with the IIO core. After this call the device is ready to accept
+      requests from user space applications.
+    </listitem>
+    </itemizedlist>
+      At <function>remove</function>, we free the resources allocated in
+      <function>probe</function> in reverse order:
+    <itemizedlist>
+    <listitem><function>iio_device_unregister</function>, unregister the device
+      from the IIO core.
+    </listitem>
+    <listitem><function>iio_device_free</function>, free the memory allocated
+      for the IIO device.
+    </listitem>
+    </itemizedlist>
+
+    <sect2 id="iioattr"> <title> IIO device sysfs interface </title>
+      <para>
+        Attributes are sysfs files used to expose chip info and also allowing
+        applications to set various configuration parameters. For device
+        with index X, attributes can be found under
+        <filename>/sys/bus/iio/iio:deviceX/ </filename> directory.
+        Common attributes are:
+        <itemizedlist>
+          <listitem><filename>name</filename>, description of the physical
+            chip.
+          </listitem>
+          <listitem><filename>dev</filename>, shows the major:minor pair
+            associated with <filename>/dev/iio:deviceX</filename> node.
+          </listitem>
+          <listitem><filename>sampling_frequency_available</filename>,
+            available discrete set of sampling frequency values for
+            device.
+          </listitem>
+      </itemizedlist>
+      Available standard attributes for IIO devices are described in the
+      <filename>Documentation/ABI/testing/sysfs-bus-iio </filename> file
+      in the Linux kernel sources.
+      </para>
+    </sect2>
+    <sect2 id="iiochannel"> <title> IIO device channels </title>
+!Finclude/linux/iio/iio.h iio_chan_spec structure.
+      <para>
+        An IIO device channel is a representation of a data channel. An
+        IIO device can have one or multiple channels. For example:
+        <itemizedlist>
+          <listitem>
+          a thermometer sensor has one channel representing the
+          temperature measurement.
+          </listitem>
+          <listitem>
+          a light sensor with two channels indicating the measurements in
+          the visible and infrared spectrum.
+          </listitem>
+          <listitem>
+          an accelerometer can have up to 3 channels representing
+          acceleration on X, Y and Z axes.
+          </listitem>
+        </itemizedlist>
+      An IIO channel is described by the <type> struct iio_chan_spec
+      </type>. A thermometer driver for the temperature sensor in the
+      example above would have to describe its channel as follows:
+      <programlisting>
+      static const struct iio_chan_spec temp_channel[] = {
+          {
+              .type = IIO_TEMP,
+              .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+          },
+      };
+
+      </programlisting>
+      Channel sysfs attributes exposed to userspace are specified in
+      the form of <emphasis>bitmasks</emphasis>. Depending on their
+      shared info, attributes can be set in one of the following masks:
+      <itemizedlist>
+      <listitem><emphasis>info_mask_separate</emphasis>, attributes will
+        be specific to this channel</listitem>
+      <listitem><emphasis>info_mask_shared_by_type</emphasis>,
+        attributes are shared by all channels of the same type</listitem>
+      <listitem><emphasis>info_mask_shared_by_dir</emphasis>, attributes
+        are shared by all channels of the same direction </listitem>
+      <listitem><emphasis>info_mask_shared_by_all</emphasis>,
+        attributes are shared by all channels</listitem>
+      </itemizedlist>
+      When there are multiple data channels per channel type we have two
+      ways to distinguish between them:
+      <itemizedlist>
+      <listitem> set <emphasis> .modified</emphasis> field of <type>
+        iio_chan_spec</type> to 1. Modifiers are specified using
+        <emphasis>.channel2</emphasis> field of the same
+        <type>iio_chan_spec</type> structure and are used to indicate a
+        physically unique characteristic of the channel such as its direction
+        or spectral response. For example, a light sensor can have two channels,
+        one for infrared light and one for both infrared and visible light.
+      </listitem>
+      <listitem> set <emphasis>.indexed </emphasis> field of
+        <type>iio_chan_spec</type> to 1. In this case the channel is
+        simply another instance with an index specified by the
+        <emphasis>.channel</emphasis> field.
+      </listitem>
+      </itemizedlist>
+      Here is how we can make use of the channel's modifiers:
+      <programlisting>
+      static const struct iio_chan_spec light_channels[] = {
+          {
+              .type = IIO_INTENSITY,
+              .modified = 1,
+              .channel2 = IIO_MOD_LIGHT_IR,
+              .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+              .info_mask_shared = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+          },
+          {
+              .type = IIO_INTENSITY,
+              .modified = 1,
+              .channel2 = IIO_MOD_LIGHT_BOTH,
+              .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+              .info_mask_shared = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+          },
+          {
+              .type = IIO_LIGHT,
+              .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+              .info_mask_shared = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+          },
+
+      }
+      </programlisting>
+      This channel's definition will generate two separate sysfs files
+      for raw data retrieval:
+      <itemizedlist>
+      <listitem>
+      <filename>/sys/bus/iio/iio:deviceX/in_intensity_ir_raw</filename>
+      </listitem>
+      <listitem>
+      <filename>/sys/bus/iio/iio:deviceX/in_intensity_both_raw</filename>
+      </listitem>
+      </itemizedlist>
+      one file for processed data:
+      <itemizedlist>
+      <listitem>
+      <filename>/sys/bus/iio/iio:deviceX/in_illuminance_input
+      </filename>
+      </listitem>
+      </itemizedlist>
+      and one shared sysfs file for sampling frequency:
+      <itemizedlist>
+      <listitem>
+      <filename>/sys/bus/iio/iio:deviceX/sampling_frequency.
+      </filename>
+      </listitem>
+      </itemizedlist>
+      </para>
+      <para>
+      Here is how we can make use of the channel's indexing:
+      <programlisting>
+      static const struct iio_chan_spec light_channels[] = {
+          {
+              .type = IIO_VOLTAGE,
+              .indexed = 1,
+              .channel = 0,
+              .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+          },
+          {
+              .type = IIO_VOLTAGE,
+              .indexed = 1,
+              .channel = 1,
+              .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+          },
+      }
+      </programlisting>
+      This will generate two separate attributes files for raw data
+      retrieval:
+      <itemizedlist>
+      <listitem>
+        <filename>/sys/bus/iio/devices/iio:deviceX/in_voltage0_raw</filename>,
+          representing voltage measurement for channel 0.
+      </listitem>
+      <listitem>
+        <filename>/sys/bus/iio/devices/iio:deviceX/in_voltage1_raw</filename>,
+          representing voltage measurement for channel 1.
+      </listitem>
+      </itemizedlist>
+      </para>
+    </sect2>
+  </sect1>
+
+  <sect1 id="iiobuffer"> <title> Industrial I/O buffers </title>
+!Finclude/linux/iio/buffer.h iio_buffer
+!Edrivers/iio/industrialio-buffer.c
+
+    <para>
+    The Industrial I/O core offers a way for continuous data capture
+    based on a trigger source. Multiple data channels can be read at once
+    from <filename>/dev/iio:deviceX</filename> character device node,
+    thus reducing the CPU load.
+    </para>
+
+    <sect2 id="iiobuffersysfs">
+    <title>IIO buffer sysfs interface </title>
+    <para>
+      An IIO buffer has an associated attributes directory under <filename>
+      /sys/bus/iio/iio:deviceX/buffer/</filename>. Here are the existing
+      attributes:
+      <itemizedlist>
+      <listitem>
+      <emphasis>length</emphasis>, the total number of data samples
+      (capacity) that can be stored by the buffer.
+      </listitem>
+      <listitem>
+        <emphasis>enable</emphasis>, activate buffer capture.
+      </listitem>
+      </itemizedlist>
+
+    </para>
+    </sect2>
+    <sect2 id="iiobuffersetup"> <title> IIO buffer setup </title>
+      <para>The meta information associated with a channel reading
+        placed in a buffer is called a <emphasis> scan element </emphasis>.
+        The important bits configuring scan elements are exposed to
+        userspace applications via the <filename>
+        /sys/bus/iio/iio:deviceX/scan_elements/</filename> directory. This
+        file contains attributes of the following form:
+      <itemizedlist>
+      <listitem><emphasis>enable</emphasis>, used for enabling a channel.
+        If and only if its attribute is non zero, then a triggered capture
+        will contain data samples for this channel.
+      </listitem>
+      <listitem><emphasis>type</emphasis>, description of the scan element
+        data storage within the buffer and hence the form in which it is
+        read from user space. Format is <emphasis>
+        [be|le]:[s|u]bits/storagebitsXrepeat[>>shift] </emphasis>.
+        <itemizedlist>
+        <listitem> <emphasis>be</emphasis> or <emphasis>le</emphasis>, specifies
+          big or little endian.
+        </listitem>
+        <listitem>
+        <emphasis>s </emphasis>or <emphasis>u</emphasis>, specifies if
+          signed (2's complement) or unsigned.
+        </listitem>
+        <listitem><emphasis>bits</emphasis>, is the number of valid data
+          bits.
+        </listitem>
+        <listitem><emphasis>storagebits</emphasis>, is the number of bits
+          (after padding) that it occupies in the buffer.
+        </listitem>
+        <listitem>
+        <emphasis>shift</emphasis>, if specified, is the shift that needs
+          to be applied prior to masking out unused bits.
+        </listitem>
+        <listitem>
+        <emphasis>repeat</emphasis>, specifies the number of bits/storagebits
+        repetitions. When the repeat element is 0 or 1, then the repeat
+        value is omitted.
+        </listitem>
+        </itemizedlist>
+      </listitem>
+      </itemizedlist>
+      For example, a driver for a 3-axis accelerometer with 12 bit
+      resolution where data is stored in two 8-bits registers as
+      follows:
+      <programlisting>
+        7   6   5   4   3   2   1   0
+      +---+---+---+---+---+---+---+---+
+      |D3 |D2 |D1 |D0 | X | X | X | X | (LOW byte, address 0x06)
+      +---+---+---+---+---+---+---+---+
+
+        7   6   5   4   3   2   1   0
+      +---+---+---+---+---+---+---+---+
+      |D11|D10|D9 |D8 |D7 |D6 |D5 |D4 | (HIGH byte, address 0x07)
+      +---+---+---+---+---+---+---+---+
+      </programlisting>
+
+      will have the following scan element type for each axis:
+      <programlisting>
+      $ cat /sys/bus/iio/devices/iio:device0/scan_elements/in_accel_y_type
+      le:s12/16>>4
+      </programlisting>
+      A user space application will interpret data samples read from the
+      buffer as two byte little endian signed data, that needs a 4 bits
+      right shift before masking out the 12 valid bits of data.
+    </para>
+    <para>
+      For implementing buffer support a driver should initialize the following
+      fields in <type>iio_chan_spec</type> definition:
+      <programlisting>
+          struct iio_chan_spec {
+              /* other members */
+              int scan_index
+              struct {
+                  char sign;
+                  u8 realbits;
+                  u8 storagebits;
+                  u8 shift;
+                  u8 repeat;
+                  enum iio_endian endianness;
+              } scan_type;
+          };
+      </programlisting>
+      The driver implementing the accelerometer described above will
+      have the following channel definition:
+      <programlisting>
+      struct struct iio_chan_spec accel_channels[] = {
+          {
+            .type = IIO_ACCEL,
+            .modified = 1,
+            .channel2 = IIO_MOD_X,
+            /* other stuff here */
+            .scan_index = 0,
+            .scan_type = {
+              .sign = 's',
+              .realbits = 12,
+              .storgebits = 16,
+              .shift = 4,
+              .endianness = IIO_LE,
+            },
+        }
+        /* similar for Y (with channel2 = IIO_MOD_Y, scan_index = 1)
+         * and Z (with channel2 = IIO_MOD_Z, scan_index = 2) axis
+         */
+    }
+    </programlisting>
+    </para>
+    <para>
+    Here <emphasis> scan_index </emphasis> defines the order in which
+    the enabled channels are placed inside the buffer. Channels with a lower
+    scan_index will be placed before channels with a higher index. Each
+    channel needs to have a unique scan_index.
+    </para>
+    <para>
+    Setting scan_index to -1 can be used to indicate that the specific
+    channel does not support buffered capture. In this case no entries will
+    be created for the channel in the scan_elements directory.
+    </para>
+    </sect2>
+  </sect1>
+
+  <sect1 id="iiotrigger"> <title> Industrial I/O triggers  </title>
+!Finclude/linux/iio/trigger.h iio_trigger
+!Edrivers/iio/industrialio-trigger.c
+    <para>
+      In many situations it is useful for a driver to be able to
+      capture data based on some external event (trigger) as opposed
+      to periodically polling for data. An IIO trigger can be provided
+      by a device driver that also has an IIO device based on hardware
+      generated events (e.g. data ready or threshold exceeded) or
+      provided by a separate driver from an independent interrupt
+      source (e.g. GPIO line connected to some external system, timer
+      interrupt or user space writing a specific file in sysfs). A
+      trigger may initiate data capture for a number of sensors and
+      also it may be completely unrelated to the sensor itself.
+    </para>
+
+    <sect2 id="iiotrigsysfs"> <title> IIO trigger sysfs interface </title>
+      There are two locations in sysfs related to triggers:
+      <itemizedlist>
+        <listitem><filename>/sys/bus/iio/devices/triggerY</filename>,
+          this file is created once an IIO trigger is registered with
+          the IIO core and corresponds to trigger with index Y. Because
+          triggers can be very different depending on type there are few
+          standard attributes that we can describe here:
+          <itemizedlist>
+            <listitem>
+              <emphasis>name</emphasis>, trigger name that can be later
+                used for association with a device.
+            </listitem>
+            <listitem>
+            <emphasis>sampling_frequency</emphasis>, some timer based
+              triggers use this attribute to specify the frequency for
+              trigger calls.
+            </listitem>
+          </itemizedlist>
+        </listitem>
+        <listitem>
+          <filename>/sys/bus/iio/devices/iio:deviceX/trigger/</filename>, this
+          directory is created once the device supports a triggered
+          buffer. We can associate a trigger with our device by writing
+          the trigger's name in the <filename>current_trigger</filename> file.
+        </listitem>
+      </itemizedlist>
+    </sect2>
+
+    <sect2 id="iiotrigattr"> <title> IIO trigger setup</title>
+
+    <para>
+      Let's see a simple example of how to setup a trigger to be used
+      by a driver.
+
+      <programlisting>
+      struct iio_trigger_ops trigger_ops = {
+          .set_trigger_state = sample_trigger_state,
+          .validate_device = sample_validate_device,
+      }
+
+      struct iio_trigger *trig;
+
+      /* first, allocate memory for our trigger */
+      trig = iio_trigger_alloc(dev, "trig-%s-%d", name, idx);
+
+      /* setup trigger operations field */
+      trig->ops = &amp;trigger_ops;
+
+      /* now register the trigger with the IIO core */
+      iio_trigger_register(trig);
+      </programlisting>
+    </para>
+    </sect2>
+
+    <sect2 id="iiotrigsetup"> <title> IIO trigger ops</title>
+!Finclude/linux/iio/trigger.h iio_trigger_ops
+     <para>
+        Notice that a trigger has a set of operations attached:
+        <itemizedlist>
+        <listitem>
+          <function>set_trigger_state</function>, switch the trigger on/off
+          on demand.
+        </listitem>
+        <listitem>
+          <function>validate_device</function>, function to validate the
+          device when the current trigger gets changed.
+        </listitem>
+        </itemizedlist>
+      </para>
+    </sect2>
+  </sect1>
+  <sect1 id="iiotriggered_buffer">
+    <title> Industrial I/O triggered buffers </title>
+    <para>
+    Now that we know what buffers and triggers are let's see how they
+    work together.
+    </para>
+    <sect2 id="iiotrigbufsetup"> <title> IIO triggered buffer setup</title>
+!Edrivers/iio/industrialio-triggered-buffer.c
+!Finclude/linux/iio/iio.h iio_buffer_setup_ops
+
+
+    <para>
+    A typical triggered buffer setup looks like this:
+    <programlisting>
+    const struct iio_buffer_setup_ops sensor_buffer_setup_ops = {
+      .preenable    = sensor_buffer_preenable,
+      .postenable   = sensor_buffer_postenable,
+      .postdisable  = sensor_buffer_postdisable,
+      .predisable   = sensor_buffer_predisable,
+    };
+
+    irqreturn_t sensor_iio_pollfunc(int irq, void *p)
+    {
+        pf->timestamp = iio_get_time_ns();
+        return IRQ_WAKE_THREAD;
+    }
+
+    irqreturn_t sensor_trigger_handler(int irq, void *p)
+    {
+        u16 buf[8];
+        int i = 0;
+
+        /* read data for each active channel */
+        for_each_set_bit(bit, active_scan_mask, masklength)
+            buf[i++] = sensor_get_data(bit)
+
+        iio_push_to_buffers_with_timestamp(indio_dev, buf, timestamp);
+
+        iio_trigger_notify_done(trigger);
+        return IRQ_HANDLED;
+    }
+
+    /* setup triggered buffer, usually in probe function */
+    iio_triggered_buffer_setup(indio_dev, sensor_iio_polfunc,
+                               sensor_trigger_handler,
+                               sensor_buffer_setup_ops);
+    </programlisting>
+    </para>
+    The important things to notice here are:
+    <itemizedlist>
+    <listitem><function> iio_buffer_setup_ops</function>, the buffer setup
+    functions to be called at predefined points in the buffer configuration
+    sequence (e.g. before enable, after disable). If not specified, the
+    IIO core uses the default <type>iio_triggered_buffer_setup_ops</type>.
+    </listitem>
+    <listitem><function>sensor_iio_pollfunc</function>, the function that
+    will be used as top half of poll function. It should do as little
+    processing as possible, because it runs in interrupt context. The most
+    common operation is recording of the current timestamp and for this reason
+    one can use the IIO core defined <function>iio_pollfunc_store_time
+    </function> function.
+    </listitem>
+    <listitem><function>sensor_trigger_handler</function>, the function that
+    will be used as bottom half of the poll function. This runs in the
+    context of a kernel thread and all the processing takes place here.
+    It usually reads data from the device and stores it in the internal
+    buffer together with the timestamp recorded in the top half.
+    </listitem>
+    </itemizedlist>
+    </sect2>
+  </sect1>
+  </chapter>
+  <chapter id='iioresources'>
+    <title> Resources </title>
+      IIO core may change during time so the best documentation to read is the
+      source code. There are several locations where you should look:
+      <itemizedlist>
+        <listitem>
+          <filename>drivers/iio/</filename>, contains the IIO core plus
+          and directories for each sensor type (e.g. accel, magnetometer,
+          etc.)
+        </listitem>
+        <listitem>
+          <filename>include/linux/iio/</filename>, contains the header
+          files, nice to read for the internal kernel interfaces.
+        </listitem>
+        <listitem>
+        <filename>include/uapi/linux/iio/</filename>, contains files to be
+          used by user space applications.
+        </listitem>
+        <listitem>
+         <filename>tools/iio/</filename>, contains tools for rapidly
+          testing buffers, events and device creation.
+        </listitem>
+        <listitem>
+          <filename>drivers/staging/iio/</filename>, contains code for some
+          drivers or experimental features that are not yet mature enough
+          to be moved out.
+        </listitem>
+      </itemizedlist>
+    <para>
+    Besides the code, there are some good online documentation sources:
+    <itemizedlist>
+    <listitem>
+      <ulink url="http://marc.info/?l=linux-iio"> Industrial I/O mailing
+      list </ulink>
+    </listitem>
+    <listitem>
+      <ulink url="http://wiki.analog.com/software/linux/docs/iio/iio">
+      Analog Device IIO wiki page </ulink>
+    </listitem>
+    <listitem>
+      <ulink url="https://fosdem.org/2015/schedule/event/iiosdr/">
+      Using the Linux IIO framework for SDR, Lars-Peter Clausen's
+      presentation at FOSDEM </ulink>
+    </listitem>
+    </itemizedlist>
+    </para>
+  </chapter>
+</book>
+
+<!--
+vim: softtabstop=2:shiftwidth=2:expandtab:textwidth=72
+-->
diff --git a/Documentation/DocBook/stylesheet.xsl b/Documentation/DocBook/stylesheet.xsl
index 85b2527..3bf4ecf 100644
--- a/Documentation/DocBook/stylesheet.xsl
+++ b/Documentation/DocBook/stylesheet.xsl
@@ -5,6 +5,7 @@
 <param name="funcsynopsis.tabular.threshold">80</param>
 <param name="callout.graphics">0</param>
 <!-- <param name="paper.type">A4</param> -->
+<param name="generate.consistent.ids">1</param>
 <param name="generate.section.toc.level">2</param>
 <param name="use.id.as.filename">1</param>
 </stylesheet>
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index 93aa860..21152d3 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -218,16 +218,16 @@
 Linux kernel development process currently consists of a few different
 main kernel "branches" and lots of different subsystem-specific kernel
 branches.  These different branches are:
-  - main 3.x kernel tree
-  - 3.x.y -stable kernel tree
-  - 3.x -git kernel patches
+  - main 4.x kernel tree
+  - 4.x.y -stable kernel tree
+  - 4.x -git kernel patches
   - subsystem specific kernel trees and patches
-  - the 3.x -next kernel tree for integration tests
+  - the 4.x -next kernel tree for integration tests
 
-3.x kernel tree
+4.x kernel tree
 -----------------
-3.x kernels are maintained by Linus Torvalds, and can be found on
-kernel.org in the pub/linux/kernel/v3.x/ directory.  Its development
+4.x kernels are maintained by Linus Torvalds, and can be found on
+kernel.org in the pub/linux/kernel/v4.x/ directory.  Its development
 process is as follows:
   - As soon as a new kernel is released a two weeks window is open,
     during this period of time maintainers can submit big diffs to
@@ -262,20 +262,20 @@
 	released according to perceived bug status, not according to a
 	preconceived timeline."
 
-3.x.y -stable kernel tree
+4.x.y -stable kernel tree
 ---------------------------
 Kernels with 3-part versions are -stable kernels. They contain
 relatively small and critical fixes for security problems or significant
-regressions discovered in a given 3.x kernel.
+regressions discovered in a given 4.x kernel.
 
 This is the recommended branch for users who want the most recent stable
 kernel and are not interested in helping test development/experimental
 versions.
 
-If no 3.x.y kernel is available, then the highest numbered 3.x
+If no 4.x.y kernel is available, then the highest numbered 4.x
 kernel is the current stable kernel.
 
-3.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
+4.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
 are released as needs dictate.  The normal release period is approximately
 two weeks, but it can be longer if there are no pressing problems.  A
 security-related problem, instead, can cause a release to happen almost
@@ -285,7 +285,7 @@
 documents what kinds of changes are acceptable for the -stable tree, and
 how the release process works.
 
-3.x -git patches
+4.x -git patches
 ------------------
 These are daily snapshots of Linus' kernel tree which are managed in a
 git repository (hence the name.) These patches are usually released
@@ -317,9 +317,9 @@
 accepted, or rejected.  Most of these patchwork sites are listed at
 http://patchwork.kernel.org/.
 
-3.x -next kernel tree for integration tests
+4.x -next kernel tree for integration tests
 ---------------------------------------------
-Before updates from subsystem trees are merged into the mainline 3.x
+Before updates from subsystem trees are merged into the mainline 4.x
 tree, they need to be integration-tested.  For this purpose, a special
 testing repository exists into which virtually all subsystem trees are
 pulled on an almost daily basis:
diff --git a/Documentation/Intel-IOMMU.txt b/Documentation/Intel-IOMMU.txt
index cf9431d..7b57fc0 100644
--- a/Documentation/Intel-IOMMU.txt
+++ b/Documentation/Intel-IOMMU.txt
@@ -10,7 +10,7 @@
 Some Keywords
 
 DMAR - DMA remapping
-DRHD - DMA Engine Reporting Structure
+DRHD - DMA Remapping Hardware Unit Definition
 RMRR - Reserved memory Region Reporting Structure
 ZLR  - Zero length reads from PCI devices
 IOVA - IO Virtual address.
diff --git a/Documentation/RCU/rcu_dereference.txt b/Documentation/RCU/rcu_dereference.txt
index 1e6c0da..c0bf244 100644
--- a/Documentation/RCU/rcu_dereference.txt
+++ b/Documentation/RCU/rcu_dereference.txt
@@ -28,7 +28,7 @@
 o	Avoid cancellation when using the "+" and "-" infix arithmetic
 	operators.  For example, for a given variable "x", avoid
 	"(x-x)".  There are similar arithmetic pitfalls from other
-	arithmetic operatiors, such as "(x*0)", "(x/(x+1))" or "(x%1)".
+	arithmetic operators, such as "(x*0)", "(x/(x+1))" or "(x%1)".
 	The compiler is within its rights to substitute zero for all of
 	these expressions, so that subsequent accesses no longer depend
 	on the rcu_dereference(), again possibly resulting in bugs due
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index b57c0c1..efb9454 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -26,12 +26,6 @@
 	Stall-warning messages may be enabled and disabled completely via
 	/sys/module/rcupdate/parameters/rcu_cpu_stall_suppress.
 
-CONFIG_RCU_CPU_STALL_INFO
-
-	This kernel configuration parameter causes the stall warning to
-	print out additional per-CPU diagnostic information, including
-	information on scheduling-clock ticks and RCU's idle-CPU tracking.
-
 RCU_STALL_DELAY_DELTA
 
 	Although the lockdep facility is extremely useful, it does add
@@ -101,15 +95,13 @@
 sort of false positive without resorting to things like stop_machine(),
 which is overkill for this sort of problem.
 
-If the CONFIG_RCU_CPU_STALL_INFO kernel configuration parameter is set,
-more information is printed with the stall-warning message, for example:
+Recent kernels will print a long form of the stall-warning message:
 
 	INFO: rcu_preempt detected stall on CPU
 	0: (63959 ticks this GP) idle=241/3fffffffffffffff/0 softirq=82/543
 	   (t=65000 jiffies)
 
-In kernels with CONFIG_RCU_FAST_NO_HZ, even more information is
-printed:
+In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed:
 
 	INFO: rcu_preempt detected stall on CPU
 	0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 softirq=82/543 last_accelerate: a345/d342 nonlazy_posted: 25 .D
@@ -171,6 +163,23 @@
 of the stall and the first message.
 
 
+Stall Warnings for Expedited Grace Periods
+
+If an expedited grace period detects a stall, it will place a message
+like the following in dmesg:
+
+	INFO: rcu_sched detected expedited stalls on CPUs: { 1 2 6 } 26009 jiffies s: 1043
+
+This indicates that CPUs 1, 2, and 6 have failed to respond to a
+reschedule IPI, that the expedited grace period has been going on for
+26,009 jiffies, and that the expedited grace-period sequence counter is
+1043.  The fact that this last value is odd indicates that an expedited
+grace period is in flight.
+
+It is entirely possible to see stall warnings from normal and from
+expedited grace periods at about the same time from the same run.
+
+
 What Causes RCU CPU Stall Warnings?
 
 So your kernel printed an RCU CPU stall warning.  The next question is
diff --git a/Documentation/RCU/trace.txt b/Documentation/RCU/trace.txt
index 08651da..97f17e9 100644
--- a/Documentation/RCU/trace.txt
+++ b/Documentation/RCU/trace.txt
@@ -237,42 +237,26 @@
 
 The output of "cat rcu/rcu_preempt/rcuexp" looks as follows:
 
-s=21872 d=21872 w=0 tf=0 wd1=0 wd2=0 n=0 sc=21872 dt=21872 dl=0 dx=21872
+s=21872 wd0=0 wd1=0 wd2=0 wd3=5 n=0 enq=0 sc=21872
 
 These fields are as follows:
 
-o	"s" is the starting sequence number.
+o	"s" is the sequence number, with an odd number indicating that
+	an expedited grace period is in progress.
 
-o	"d" is the ending sequence number.  When the starting and ending
-	numbers differ, there is an expedited grace period in progress.
-
-o	"w" is the number of times that the sequence numbers have been
-	in danger of wrapping.
-
-o	"tf" is the number of times that contention has resulted in a
-	failure to begin an expedited grace period.
-
-o	"wd1" and "wd2" are the number of times that an attempt to
-	start an expedited grace period found that someone else had
-	completed an expedited grace period that satisfies the
+o	"wd0", "wd1", "wd2", and "wd3" are the number of times that an
+	attempt to start an expedited grace period found that someone
+	else had completed an expedited grace period that satisfies the
 	attempted request.  "Our work is done."
 
-o	"n" is number of times that contention was so great that
-	the request was demoted from an expedited grace period to
-	a normal grace period.
+o	"n" is number of times that a concurrent CPU-hotplug operation
+	forced a fallback to a normal grace period.
+
+o	"enq" is the number of quiescent states still outstanding.
 
 o	"sc" is the number of times that the attempt to start a
 	new expedited grace period succeeded.
 
-o	"dt" is the number of times that we attempted to update
-	the "d" counter.
-
-o	"dl" is the number of times that we failed to update the "d"
-	counter.
-
-o	"dx" is the number of times that we succeeded in updating
-	the "d" counter.
-
 
 The output of "cat rcu/rcu_preempt/rcugp" looks as follows:
 
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 5746b0c..adc2184 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -883,7 +883,7 @@
 
 	rcu_access_pointer
 	rcu_dereference_raw
-	rcu_lockdep_assert
+	RCU_LOCKDEP_WARN
 	rcu_sleep_check
 	RCU_NONIDLE
 
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 27e7e5e..fd89b04 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -90,11 +90,11 @@
 
 Make sure your patch does not include any extra files which do not
 belong in a patch submission.  Make sure to review your patch -after-
-generated it with diff(1), to ensure accuracy.
+generating it with diff(1), to ensure accuracy.
 
 If your changes produce a lot of deltas, you need to split them into
 individual patches which modify things in logical stages; see section
-#3.  This will facilitate easier reviewing by other kernel developers,
+#3.  This will facilitate review by other kernel developers,
 very important if you want your patch accepted.
 
 If you're using git, "git rebase -i" can help you with this process.  If
@@ -267,7 +267,7 @@
 to code that they maintain; look through the MAINTAINERS file and the
 source code revision history to see who those maintainers are.  The
 script scripts/get_maintainer.pl can be very useful at this step.  If you
-cannot find a maintainer for the subsystem your are working on, Andrew
+cannot find a maintainer for the subsystem you are working on, Andrew
 Morton (akpm@linux-foundation.org) serves as a maintainer of last resort.
 
 You should also normally choose at least one mailing list to receive a copy
@@ -291,7 +291,7 @@
 
 If you have a patch that fixes an exploitable security bug, send that patch
 to security@kernel.org.  For severe bugs, a short embargo may be considered
-to allow distrbutors to get the patch out to users; in such cases,
+to allow distributors to get the patch out to users; in such cases,
 obviously, the patch should not be sent to any public lists.
 
 Patches that fix a severe bug in a released kernel should be directed
@@ -340,7 +340,7 @@
 developer to be able to "quote" your changes, using standard e-mail
 tools, so that they may comment on specific portions of your code.
 
-For this reason, all patches should be submitting e-mail "inline".
+For this reason, all patches should be submitted by e-mail "inline".
 WARNING:  Be wary of your editor's word-wrap corrupting your patch,
 if you choose to cut-n-paste your patch.
 
@@ -739,7 +739,7 @@
 
       git://jdelvare.pck.nerim.net/jdelvare-2.6 i2c-for-linus
 
-  to get these changes:"
+  to get these changes:
 
 A pull request should also include an overall message saying what will be
 included in the request, a "git shortlog" listing of the patches
@@ -796,7 +796,7 @@
   <https://lkml.org/lkml/2005/7/11/336>
 
 Kernel Documentation/CodingStyle:
-  <http://users.sosdg.org/~qiyong/lxr/source/Documentation/CodingStyle>
+  <Documentation/CodingStyle>
 
 Linus Torvalds's mail on the canonical patch format:
   <http://lkml.org/lkml/2005/4/7/183>
diff --git a/Documentation/acpi/method-tracing.txt b/Documentation/acpi/method-tracing.txt
index f6efb1e..c2505ee 100644
--- a/Documentation/acpi/method-tracing.txt
+++ b/Documentation/acpi/method-tracing.txt
@@ -1,26 +1,192 @@
-/sys/module/acpi/parameters/:
+ACPICA Trace Facility
 
-trace_method_name
-	The AML method name that the user wants to trace
+Copyright (C) 2015, Intel Corporation
+Author: Lv Zheng <lv.zheng@intel.com>
 
-trace_debug_layer
-	The temporary debug_layer used when tracing the method.
-	Using 0xffffffff by default if it is 0.
 
-trace_debug_level
-	The temporary debug_level used when tracing the method.
-	Using 0x00ffffff by default if it is 0.
+Abstract:
 
-trace_state
+This document describes the functions and the interfaces of the method
+tracing facility.
+
+1. Functionalities and usage examples:
+
+   ACPICA provides method tracing capability. And two functions are
+   currently implemented using this capability.
+
+   A. Log reducer
+   ACPICA subsystem provides debugging outputs when CONFIG_ACPI_DEBUG is
+   enabled. The debugging messages which are deployed via
+   ACPI_DEBUG_PRINT() macro can be reduced at 2 levels - per-component
+   level (known as debug layer, configured via
+   /sys/module/acpi/parameters/debug_layer) and per-type level (known as
+   debug level, configured via /sys/module/acpi/parameters/debug_level).
+
+   But when the particular layer/level is applied to the control method
+   evaluations, the quantity of the debugging outputs may still be too
+   large to be put into the kernel log buffer. The idea thus is worked out
+   to only enable the particular debug layer/level (normally more detailed)
+   logs when the control method evaluation is started, and disable the
+   detailed logging when the control method evaluation is stopped.
+
+   The following command examples illustrate the usage of the "log reducer"
+   functionality:
+   a. Filter out the debug layer/level matched logs when control methods
+      are being evaluated:
+      # cd /sys/module/acpi/parameters
+      # echo "0xXXXXXXXX" > trace_debug_layer
+      # echo "0xYYYYYYYY" > trace_debug_level
+      # echo "enable" > trace_state
+   b. Filter out the debug layer/level matched logs when the specified
+      control method is being evaluated:
+      # cd /sys/module/acpi/parameters
+      # echo "0xXXXXXXXX" > trace_debug_layer
+      # echo "0xYYYYYYYY" > trace_debug_level
+      # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+      # echo "method" > /sys/module/acpi/parameters/trace_state
+   c. Filter out the debug layer/level matched logs when the specified
+      control method is being evaluated for the first time:
+      # cd /sys/module/acpi/parameters
+      # echo "0xXXXXXXXX" > trace_debug_layer
+      # echo "0xYYYYYYYY" > trace_debug_level
+      # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+      # echo "method-once" > /sys/module/acpi/parameters/trace_state
+   Where:
+      0xXXXXXXXX/0xYYYYYYYY: Refer to Documentation/acpi/debug.txt for
+			     possible debug layer/level masking values.
+      \PPPP.AAAA.TTTT.HHHH: Full path of a control method that can be found
+			    in the ACPI namespace. It needn't be an entry
+			    of a control method evaluation.
+
+   B. AML tracer
+
+   There are special log entries added by the method tracing facility at
+   the "trace points" the AML interpreter starts/stops to execute a control
+   method, or an AML opcode. Note that the format of the log entries are
+   subject to change:
+     [    0.186427]   exdebug-0398 ex_trace_point        : Method Begin [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
+     [    0.186630]   exdebug-0398 ex_trace_point        : Opcode Begin [0xf5905c88:If] execution.
+     [    0.186820]   exdebug-0398 ex_trace_point        : Opcode Begin [0xf5905cc0:LEqual] execution.
+     [    0.187010]   exdebug-0398 ex_trace_point        : Opcode Begin [0xf5905a20:-NamePath-] execution.
+     [    0.187214]   exdebug-0398 ex_trace_point        : Opcode End [0xf5905a20:-NamePath-] execution.
+     [    0.187407]   exdebug-0398 ex_trace_point        : Opcode Begin [0xf5905f60:One] execution.
+     [    0.187594]   exdebug-0398 ex_trace_point        : Opcode End [0xf5905f60:One] execution.
+     [    0.187789]   exdebug-0398 ex_trace_point        : Opcode End [0xf5905cc0:LEqual] execution.
+     [    0.187980]   exdebug-0398 ex_trace_point        : Opcode Begin [0xf5905cc0:Return] execution.
+     [    0.188146]   exdebug-0398 ex_trace_point        : Opcode Begin [0xf5905f60:One] execution.
+     [    0.188334]   exdebug-0398 ex_trace_point        : Opcode End [0xf5905f60:One] execution.
+     [    0.188524]   exdebug-0398 ex_trace_point        : Opcode End [0xf5905cc0:Return] execution.
+     [    0.188712]   exdebug-0398 ex_trace_point        : Opcode End [0xf5905c88:If] execution.
+     [    0.188903]   exdebug-0398 ex_trace_point        : Method End [0xf58394d8:\_SB.PCI0.LPCB.ECOK] execution.
+
+   Developers can utilize these special log entries to track the AML
+   interpretion, thus can aid issue debugging and performance tuning. Note
+   that, as the "AML tracer" logs are implemented via ACPI_DEBUG_PRINT()
+   macro, CONFIG_ACPI_DEBUG is also required to be enabled for enabling
+   "AML tracer" logs.
+
+   The following command examples illustrate the usage of the "AML tracer"
+   functionality:
+   a. Filter out the method start/stop "AML tracer" logs when control
+      methods are being evaluated:
+      # cd /sys/module/acpi/parameters
+      # echo "0x80" > trace_debug_layer
+      # echo "0x10" > trace_debug_level
+      # echo "enable" > trace_state
+   b. Filter out the method start/stop "AML tracer" when the specified
+      control method is being evaluated:
+      # cd /sys/module/acpi/parameters
+      # echo "0x80" > trace_debug_layer
+      # echo "0x10" > trace_debug_level
+      # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+      # echo "method" > trace_state
+   c. Filter out the method start/stop "AML tracer" logs when the specified
+      control method is being evaluated for the first time:
+      # cd /sys/module/acpi/parameters
+      # echo "0x80" > trace_debug_layer
+      # echo "0x10" > trace_debug_level
+      # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+      # echo "method-once" > trace_state
+   d. Filter out the method/opcode start/stop "AML tracer" when the
+      specified control method is being evaluated:
+      # cd /sys/module/acpi/parameters
+      # echo "0x80" > trace_debug_layer
+      # echo "0x10" > trace_debug_level
+      # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+      # echo "opcode" > trace_state
+   e. Filter out the method/opcode start/stop "AML tracer" when the
+      specified control method is being evaluated for the first time:
+      # cd /sys/module/acpi/parameters
+      # echo "0x80" > trace_debug_layer
+      # echo "0x10" > trace_debug_level
+      # echo "\PPPP.AAAA.TTTT.HHHH" > trace_method_name
+      # echo "opcode-opcode" > trace_state
+
+  Note that all above method tracing facility related module parameters can
+  be used as the boot parameters, for example:
+      acpi.trace_debug_layer=0x80 acpi.trace_debug_level=0x10 \
+      acpi.trace_method_name=\_SB.LID0._LID acpi.trace_state=opcode-once
+
+2. Interface descriptions:
+
+   All method tracing functions can be configured via ACPI module
+   parameters that are accessible at /sys/module/acpi/parameters/:
+
+   trace_method_name
+	The full path of the AML method that the user wants to trace.
+	Note that the full path shouldn't contain the trailing "_"s in its
+	name segments but may contain "\" to form an absolute path.
+
+   trace_debug_layer
+	The temporary debug_layer used when the tracing feature is enabled.
+	Using ACPI_EXECUTER (0x80) by default, which is the debug_layer
+	used to match all "AML tracer" logs.
+
+   trace_debug_level
+	The temporary debug_level used when the tracing feature is enabled.
+	Using ACPI_LV_TRACE_POINT (0x10) by default, which is the
+	debug_level used to match all "AML tracer" logs.
+
+   trace_state
 	The status of the tracing feature.
-
-	"enabled" means this feature is enabled
-	and the AML method is traced every time it's executed.
-
-	"1" means this feature is enabled and the AML method
-	will only be traced during the next execution.
-
-	"disabled" means this feature is disabled.
-	Users can enable/disable this debug tracing feature by
-	"echo string > /sys/module/acpi/parameters/trace_state".
-	"string" should be one of "enable", "disable" and "1".
+	Users can enable/disable this debug tracing feature by executing
+	the following command:
+	    # echo string > /sys/module/acpi/parameters/trace_state
+	Where "string" should be one of the followings:
+	"disable"
+	    Disable the method tracing feature.
+	"enable"
+	    Enable the method tracing feature.
+	    ACPICA debugging messages matching
+	    "trace_debug_layer/trace_debug_level" during any method
+	    execution will be logged.
+	"method"
+	    Enable the method tracing feature.
+	    ACPICA debugging messages matching
+	    "trace_debug_layer/trace_debug_level" during method execution
+	    of "trace_method_name" will be logged.
+	"method-once"
+	    Enable the method tracing feature.
+	    ACPICA debugging messages matching
+	    "trace_debug_layer/trace_debug_level" during method execution
+	    of "trace_method_name" will be logged only once.
+	"opcode"
+	    Enable the method tracing feature.
+	    ACPICA debugging messages matching
+	    "trace_debug_layer/trace_debug_level" during method/opcode
+	    execution of "trace_method_name" will be logged.
+	"opcode-once"
+	    Enable the method tracing feature.
+	    ACPICA debugging messages matching
+	    "trace_debug_layer/trace_debug_level" during method/opcode
+	    execution of "trace_method_name" will be logged only once.
+	Note that, the difference between the "enable" and other feature
+        enabling options are:
+	1. When "enable" is specified, since
+	   "trace_debug_layer/trace_debug_level" shall apply to all control
+	   method evaluations, after configuring "trace_state" to "enable",
+	   "trace_method_name" will be reset to NULL.
+	2. When "method/opcode" is specified, if
+	   "trace_method_name" is NULL when "trace_state" is configured to
+	   these options, the "trace_debug_layer/trace_debug_level" will
+	   apply to all control method evaluations.
diff --git a/Documentation/adding-syscalls.txt b/Documentation/adding-syscalls.txt
new file mode 100644
index 0000000..cc2d4ac
--- /dev/null
+++ b/Documentation/adding-syscalls.txt
@@ -0,0 +1,527 @@
+Adding a New System Call
+========================
+
+This document describes what's involved in adding a new system call to the
+Linux kernel, over and above the normal submission advice in
+Documentation/SubmittingPatches.
+
+
+System Call Alternatives
+------------------------
+
+The first thing to consider when adding a new system call is whether one of
+the alternatives might be suitable instead.  Although system calls are the
+most traditional and most obvious interaction points between userspace and the
+kernel, there are other possibilities -- choose what fits best for your
+interface.
+
+ - If the operations involved can be made to look like a filesystem-like
+   object, it may make more sense to create a new filesystem or device.  This
+   also makes it easier to encapsulate the new functionality in a kernel module
+   rather than requiring it to be built into the main kernel.
+     - If the new functionality involves operations where the kernel notifies
+       userspace that something has happened, then returning a new file
+       descriptor for the relevant object allows userspace to use
+       poll/select/epoll to receive that notification.
+     - However, operations that don't map to read(2)/write(2)-like operations
+       have to be implemented as ioctl(2) requests, which can lead to a
+       somewhat opaque API.
+ - If you're just exposing runtime system information, a new node in sysfs
+   (see Documentation/filesystems/sysfs.txt) or the /proc filesystem may be
+   more appropriate.  However, access to these mechanisms requires that the
+   relevant filesystem is mounted, which might not always be the case (e.g.
+   in a namespaced/sandboxed/chrooted environment).  Avoid adding any API to
+   debugfs, as this is not considered a 'production' interface to userspace.
+ - If the operation is specific to a particular file or file descriptor, then
+   an additional fcntl(2) command option may be more appropriate.  However,
+   fcntl(2) is a multiplexing system call that hides a lot of complexity, so
+   this option is best for when the new function is closely analogous to
+   existing fcntl(2) functionality, or the new functionality is very simple
+   (for example, getting/setting a simple flag related to a file descriptor).
+ - If the operation is specific to a particular task or process, then an
+   additional prctl(2) command option may be more appropriate.  As with
+   fcntl(2), this system call is a complicated multiplexor so is best reserved
+   for near-analogs of existing prctl() commands or getting/setting a simple
+   flag related to a process.
+
+
+Designing the API: Planning for Extension
+-----------------------------------------
+
+A new system call forms part of the API of the kernel, and has to be supported
+indefinitely.  As such, it's a very good idea to explicitly discuss the
+interface on the kernel mailing list, and it's important to plan for future
+extensions of the interface.
+
+(The syscall table is littered with historical examples where this wasn't done,
+together with the corresponding follow-up system calls -- eventfd/eventfd2,
+dup2/dup3, inotify_init/inotify_init1,  pipe/pipe2, renameat/renameat2 -- so
+learn from the history of the kernel and plan for extensions from the start.)
+
+For simpler system calls that only take a couple of arguments, the preferred
+way to allow for future extensibility is to include a flags argument to the
+system call.  To make sure that userspace programs can safely use flags
+between kernel versions, check whether the flags value holds any unknown
+flags, and reject the system call (with EINVAL) if it does:
+
+    if (flags & ~(THING_FLAG1 | THING_FLAG2 | THING_FLAG3))
+        return -EINVAL;
+
+(If no flags values are used yet, check that the flags argument is zero.)
+
+For more sophisticated system calls that involve a larger number of arguments,
+it's preferred to encapsulate the majority of the arguments into a structure
+that is passed in by pointer.  Such a structure can cope with future extension
+by including a size argument in the structure:
+
+    struct xyzzy_params {
+        u32 size; /* userspace sets p->size = sizeof(struct xyzzy_params) */
+        u32 param_1;
+        u64 param_2;
+        u64 param_3;
+    };
+
+As long as any subsequently added field, say param_4, is designed so that a
+zero value gives the previous behaviour, then this allows both directions of
+version mismatch:
+
+ - To cope with a later userspace program calling an older kernel, the kernel
+   code should check that any memory beyond the size of the structure that it
+   expects is zero (effectively checking that param_4 == 0).
+ - To cope with an older userspace program calling a newer kernel, the kernel
+   code can zero-extend a smaller instance of the structure (effectively
+   setting param_4 = 0).
+
+See perf_event_open(2) and the perf_copy_attr() function (in
+kernel/events/core.c) for an example of this approach.
+
+
+Designing the API: Other Considerations
+---------------------------------------
+
+If your new system call allows userspace to refer to a kernel object, it
+should use a file descriptor as the handle for that object -- don't invent a
+new type of userspace object handle when the kernel already has mechanisms and
+well-defined semantics for using file descriptors.
+
+If your new xyzzy(2) system call does return a new file descriptor, then the
+flags argument should include a value that is equivalent to setting O_CLOEXEC
+on the new FD.  This makes it possible for userspace to close the timing
+window between xyzzy() and calling fcntl(fd, F_SETFD, FD_CLOEXEC), where an
+unexpected fork() and execve() in another thread could leak a descriptor to
+the exec'ed program. (However, resist the temptation to re-use the actual value
+of the O_CLOEXEC constant, as it is architecture-specific and is part of a
+numbering space of O_* flags that is fairly full.)
+
+If your system call returns a new file descriptor, you should also consider
+what it means to use the poll(2) family of system calls on that file
+descriptor. Making a file descriptor ready for reading or writing is the
+normal way for the kernel to indicate to userspace that an event has
+occurred on the corresponding kernel object.
+
+If your new xyzzy(2) system call involves a filename argument:
+
+    int sys_xyzzy(const char __user *path, ..., unsigned int flags);
+
+you should also consider whether an xyzzyat(2) version is more appropriate:
+
+    int sys_xyzzyat(int dfd, const char __user *path, ..., unsigned int flags);
+
+This allows more flexibility for how userspace specifies the file in question;
+in particular it allows userspace to request the functionality for an
+already-opened file descriptor using the AT_EMPTY_PATH flag, effectively giving
+an fxyzzy(3) operation for free:
+
+ - xyzzyat(AT_FDCWD, path, ..., 0) is equivalent to xyzzy(path,...)
+ - xyzzyat(fd, "", ..., AT_EMPTY_PATH) is equivalent to fxyzzy(fd, ...)
+
+(For more details on the rationale of the *at() calls, see the openat(2) man
+page; for an example of AT_EMPTY_PATH, see the statat(2) man page.)
+
+If your new xyzzy(2) system call involves a parameter describing an offset
+within a file, make its type loff_t so that 64-bit offsets can be supported
+even on 32-bit architectures.
+
+If your new xyzzy(2) system call involves privileged functionality, it needs
+to be governed by the appropriate Linux capability bit (checked with a call to
+capable()), as described in the capabilities(7) man page.  Choose an existing
+capability bit that governs related functionality, but try to avoid combining
+lots of only vaguely related functions together under the same bit, as this
+goes against capabilities' purpose of splitting the power of root.  In
+particular, avoid adding new uses of the already overly-general CAP_SYS_ADMIN
+capability.
+
+If your new xyzzy(2) system call manipulates a process other than the calling
+process, it should be restricted (using a call to ptrace_may_access()) so that
+only a calling process with the same permissions as the target process, or
+with the necessary capabilities, can manipulate the target process.
+
+Finally, be aware that some non-x86 architectures have an easier time if
+system call parameters that are explicitly 64-bit fall on odd-numbered
+arguments (i.e. parameter 1, 3, 5), to allow use of contiguous pairs of 32-bit
+registers.  (This concern does not apply if the arguments are part of a
+structure that's passed in by pointer.)
+
+
+Proposing the API
+-----------------
+
+To make new system calls easy to review, it's best to divide up the patchset
+into separate chunks.  These should include at least the following items as
+distinct commits (each of which is described further below):
+
+ - The core implementation of the system call, together with prototypes,
+   generic numbering, Kconfig changes and fallback stub implementation.
+ - Wiring up of the new system call for one particular architecture, usually
+   x86 (including all of x86_64, x86_32 and x32).
+ - A demonstration of the use of the new system call in userspace via a
+   selftest in tools/testing/selftests/.
+ - A draft man-page for the new system call, either as plain text in the
+   cover letter, or as a patch to the (separate) man-pages repository.
+
+New system call proposals, like any change to the kernel's API, should always
+be cc'ed to linux-api@vger.kernel.org.
+
+
+Generic System Call Implementation
+----------------------------------
+
+The main entry point for your new xyzzy(2) system call will be called
+sys_xyzzy(), but you add this entry point with the appropriate
+SYSCALL_DEFINEn() macro rather than explicitly.  The 'n' indicates the number
+of arguments to the system call, and the macro takes the system call name
+followed by the (type, name) pairs for the parameters as arguments.  Using
+this macro allows metadata about the new system call to be made available for
+other tools.
+
+The new entry point also needs a corresponding function prototype, in
+include/linux/syscalls.h, marked as asmlinkage to match the way that system
+calls are invoked:
+
+    asmlinkage long sys_xyzzy(...);
+
+Some architectures (e.g. x86) have their own architecture-specific syscall
+tables, but several other architectures share a generic syscall table. Add your
+new system call to the generic list by adding an entry to the list in
+include/uapi/asm-generic/unistd.h:
+
+    #define __NR_xyzzy 292
+    __SYSCALL(__NR_xyzzy, sys_xyzzy)
+
+Also update the __NR_syscalls count to reflect the additional system call, and
+note that if multiple new system calls are added in the same merge window,
+your new syscall number may get adjusted to resolve conflicts.
+
+The file kernel/sys_ni.c provides a fallback stub implementation of each system
+call, returning -ENOSYS.  Add your new system call here too:
+
+    cond_syscall(sys_xyzzy);
+
+Your new kernel functionality, and the system call that controls it, should
+normally be optional, so add a CONFIG option (typically to init/Kconfig) for
+it. As usual for new CONFIG options:
+
+ - Include a description of the new functionality and system call controlled
+   by the option.
+ - Make the option depend on EXPERT if it should be hidden from normal users.
+ - Make any new source files implementing the function dependent on the CONFIG
+   option in the Makefile (e.g. "obj-$(CONFIG_XYZZY_SYSCALL) += xyzzy.c").
+ - Double check that the kernel still builds with the new CONFIG option turned
+   off.
+
+To summarize, you need a commit that includes:
+
+ - CONFIG option for the new function, normally in init/Kconfig
+ - SYSCALL_DEFINEn(xyzzy, ...) for the entry point
+ - corresponding prototype in include/linux/syscalls.h
+ - generic table entry in include/uapi/asm-generic/unistd.h
+ - fallback stub in kernel/sys_ni.c
+
+
+x86 System Call Implementation
+------------------------------
+
+To wire up your new system call for x86 platforms, you need to update the
+master syscall tables.  Assuming your new system call isn't special in some
+way (see below), this involves a "common" entry (for x86_64 and x32) in
+arch/x86/entry/syscalls/syscall_64.tbl:
+
+    333   common   xyzzy     sys_xyzzy
+
+and an "i386" entry in arch/x86/entry/syscalls/syscall_32.tbl:
+
+    380   i386     xyzzy     sys_xyzzy
+
+Again, these numbers are liable to be changed if there are conflicts in the
+relevant merge window.
+
+
+Compatibility System Calls (Generic)
+------------------------------------
+
+For most system calls the same 64-bit implementation can be invoked even when
+the userspace program is itself 32-bit; even if the system call's parameters
+include an explicit pointer, this is handled transparently.
+
+However, there are a couple of situations where a compatibility layer is
+needed to cope with size differences between 32-bit and 64-bit.
+
+The first is if the 64-bit kernel also supports 32-bit userspace programs, and
+so needs to parse areas of (__user) memory that could hold either 32-bit or
+64-bit values.  In particular, this is needed whenever a system call argument
+is:
+
+ - a pointer to a pointer
+ - a pointer to a struct containing a pointer (e.g. struct iovec __user *)
+ - a pointer to a varying sized integral type (time_t, off_t, long, ...)
+ - a pointer to a struct containing a varying sized integral type.
+
+The second situation that requires a compatibility layer is if one of the
+system call's arguments has a type that is explicitly 64-bit even on a 32-bit
+architecture, for example loff_t or __u64.  In this case, a value that arrives
+at a 64-bit kernel from a 32-bit application will be split into two 32-bit
+values, which then need to be re-assembled in the compatibility layer.
+
+(Note that a system call argument that's a pointer to an explicit 64-bit type
+does *not* need a compatibility layer; for example, splice(2)'s arguments of
+type loff_t __user * do not trigger the need for a compat_ system call.)
+
+The compatibility version of the system call is called compat_sys_xyzzy(), and
+is added with the COMPAT_SYSCALL_DEFINEn() macro, analogously to
+SYSCALL_DEFINEn.  This version of the implementation runs as part of a 64-bit
+kernel, but expects to receive 32-bit parameter values and does whatever is
+needed to deal with them.  (Typically, the compat_sys_ version converts the
+values to 64-bit versions and either calls on to the sys_ version, or both of
+them call a common inner implementation function.)
+
+The compat entry point also needs a corresponding function prototype, in
+include/linux/compat.h, marked as asmlinkage to match the way that system
+calls are invoked:
+
+    asmlinkage long compat_sys_xyzzy(...);
+
+If the system call involves a structure that is laid out differently on 32-bit
+and 64-bit systems, say struct xyzzy_args, then the include/linux/compat.h
+header file should also include a compat version of the structure (struct
+compat_xyzzy_args) where each variable-size field has the appropriate compat_
+type that corresponds to the type in struct xyzzy_args.  The
+compat_sys_xyzzy() routine can then use this compat_ structure to parse the
+arguments from a 32-bit invocation.
+
+For example, if there are fields:
+
+    struct xyzzy_args {
+        const char __user *ptr;
+        __kernel_long_t varying_val;
+        u64 fixed_val;
+        /* ... */
+    };
+
+in struct xyzzy_args, then struct compat_xyzzy_args would have:
+
+    struct compat_xyzzy_args {
+        compat_uptr_t ptr;
+        compat_long_t varying_val;
+        u64 fixed_val;
+        /* ... */
+    };
+
+The generic system call list also needs adjusting to allow for the compat
+version; the entry in include/uapi/asm-generic/unistd.h should use
+__SC_COMP rather than __SYSCALL:
+
+    #define __NR_xyzzy 292
+    __SC_COMP(__NR_xyzzy, sys_xyzzy, compat_sys_xyzzy)
+
+To summarize, you need:
+
+ - a COMPAT_SYSCALL_DEFINEn(xyzzy, ...) for the compat entry point
+ - corresponding prototype in include/linux/compat.h
+ - (if needed) 32-bit mapping struct in include/linux/compat.h
+ - instance of __SC_COMP not __SYSCALL in include/uapi/asm-generic/unistd.h
+
+
+Compatibility System Calls (x86)
+--------------------------------
+
+To wire up the x86 architecture of a system call with a compatibility version,
+the entries in the syscall tables need to be adjusted.
+
+First, the entry in arch/x86/entry/syscalls/syscall_32.tbl gets an extra
+column to indicate that a 32-bit userspace program running on a 64-bit kernel
+should hit the compat entry point:
+
+    380   i386     xyzzy     sys_xyzzy    compat_sys_xyzzy
+
+Second, you need to figure out what should happen for the x32 ABI version of
+the new system call.  There's a choice here: the layout of the arguments
+should either match the 64-bit version or the 32-bit version.
+
+If there's a pointer-to-a-pointer involved, the decision is easy: x32 is
+ILP32, so the layout should match the 32-bit version, and the entry in
+arch/x86/entry/syscalls/syscall_64.tbl is split so that x32 programs hit the
+compatibility wrapper:
+
+    333   64       xyzzy     sys_xyzzy
+    ...
+    555   x32      xyzzy     compat_sys_xyzzy
+
+If no pointers are involved, then it is preferable to re-use the 64-bit system
+call for the x32 ABI (and consequently the entry in
+arch/x86/entry/syscalls/syscall_64.tbl is unchanged).
+
+In either case, you should check that the types involved in your argument
+layout do indeed map exactly from x32 (-mx32) to either the 32-bit (-m32) or
+64-bit (-m64) equivalents.
+
+
+System Calls Returning Elsewhere
+--------------------------------
+
+For most system calls, once the system call is complete the user program
+continues exactly where it left off -- at the next instruction, with the
+stack the same and most of the registers the same as before the system call,
+and with the same virtual memory space.
+
+However, a few system calls do things differently.  They might return to a
+different location (rt_sigreturn) or change the memory space (fork/vfork/clone)
+or even architecture (execve/execveat) of the program.
+
+To allow for this, the kernel implementation of the system call may need to
+save and restore additional registers to the kernel stack, allowing complete
+control of where and how execution continues after the system call.
+
+This is arch-specific, but typically involves defining assembly entry points
+that save/restore additional registers and invoke the real system call entry
+point.
+
+For x86_64, this is implemented as a stub_xyzzy entry point in
+arch/x86/entry/entry_64.S, and the entry in the syscall table
+(arch/x86/entry/syscalls/syscall_64.tbl) is adjusted to match:
+
+    333   common   xyzzy     stub_xyzzy
+
+The equivalent for 32-bit programs running on a 64-bit kernel is normally
+called stub32_xyzzy and implemented in arch/x86/entry/entry_64_compat.S,
+with the corresponding syscall table adjustment in
+arch/x86/entry/syscalls/syscall_32.tbl:
+
+    380   i386     xyzzy     sys_xyzzy    stub32_xyzzy
+
+If the system call needs a compatibility layer (as in the previous section)
+then the stub32_ version needs to call on to the compat_sys_ version of the
+system call rather than the native 64-bit version.  Also, if the x32 ABI
+implementation is not common with the x86_64 version, then its syscall
+table will also need to invoke a stub that calls on to the compat_sys_
+version.
+
+For completeness, it's also nice to set up a mapping so that user-mode Linux
+still works -- its syscall table will reference stub_xyzzy, but the UML build
+doesn't include arch/x86/entry/entry_64.S implementation (because UML
+simulates registers etc).  Fixing this is as simple as adding a #define to
+arch/x86/um/sys_call_table_64.c:
+
+    #define stub_xyzzy sys_xyzzy
+
+
+Other Details
+-------------
+
+Most of the kernel treats system calls in a generic way, but there is the
+occasional exception that may need updating for your particular system call.
+
+The audit subsystem is one such special case; it includes (arch-specific)
+functions that classify some special types of system call -- specifically
+file open (open/openat), program execution (execve/exeveat) or socket
+multiplexor (socketcall) operations. If your new system call is analogous to
+one of these, then the audit system should be updated.
+
+More generally, if there is an existing system call that is analogous to your
+new system call, it's worth doing a kernel-wide grep for the existing system
+call to check there are no other special cases.
+
+
+Testing
+-------
+
+A new system call should obviously be tested; it is also useful to provide
+reviewers with a demonstration of how user space programs will use the system
+call.  A good way to combine these aims is to include a simple self-test
+program in a new directory under tools/testing/selftests/.
+
+For a new system call, there will obviously be no libc wrapper function and so
+the test will need to invoke it using syscall(); also, if the system call
+involves a new userspace-visible structure, the corresponding header will need
+to be installed to compile the test.
+
+Make sure the selftest runs successfully on all supported architectures.  For
+example, check that it works when compiled as an x86_64 (-m64), x86_32 (-m32)
+and x32 (-mx32) ABI program.
+
+For more extensive and thorough testing of new functionality, you should also
+consider adding tests to the Linux Test Project, or to the xfstests project
+for filesystem-related changes.
+ - https://linux-test-project.github.io/
+ - git://git.kernel.org/pub/scm/fs/xfs/xfstests-dev.git
+
+
+Man Page
+--------
+
+All new system calls should come with a complete man page, ideally using groff
+markup, but plain text will do.  If groff is used, it's helpful to include a
+pre-rendered ASCII version of the man page in the cover email for the
+patchset, for the convenience of reviewers.
+
+The man page should be cc'ed to linux-man@vger.kernel.org
+For more details, see https://www.kernel.org/doc/man-pages/patches.html
+
+References and Sources
+----------------------
+
+ - LWN article from Michael Kerrisk on use of flags argument in system calls:
+   https://lwn.net/Articles/585415/
+ - LWN article from Michael Kerrisk on how to handle unknown flags in a system
+   call: https://lwn.net/Articles/588444/
+ - LWN article from Jake Edge describing constraints on 64-bit system call
+   arguments: https://lwn.net/Articles/311630/
+ - Pair of LWN articles from David Drysdale that describe the system call
+   implementation paths in detail for v3.14:
+    - https://lwn.net/Articles/604287/
+    - https://lwn.net/Articles/604515/
+ - Architecture-specific requirements for system calls are discussed in the
+   syscall(2) man-page:
+   http://man7.org/linux/man-pages/man2/syscall.2.html#NOTES
+ - Collated emails from Linus Torvalds discussing the problems with ioctl():
+   http://yarchive.net/comp/linux/ioctl.html
+ - "How to not invent kernel interfaces", Arnd Bergmann,
+   http://www.ukuug.org/events/linux2007/2007/papers/Bergmann.pdf
+ - LWN article from Michael Kerrisk on avoiding new uses of CAP_SYS_ADMIN:
+   https://lwn.net/Articles/486306/
+ - Recommendation from Andrew Morton that all related information for a new
+   system call should come in the same email thread:
+   https://lkml.org/lkml/2014/7/24/641
+ - Recommendation from Michael Kerrisk that a new system call should come with
+   a man page: https://lkml.org/lkml/2014/6/13/309
+ - Suggestion from Thomas Gleixner that x86 wire-up should be in a separate
+   commit: https://lkml.org/lkml/2014/11/19/254
+ - Suggestion from Greg Kroah-Hartman that it's good for new system calls to
+   come with a man-page & selftest: https://lkml.org/lkml/2014/3/19/710
+ - Discussion from Michael Kerrisk of new system call vs. prctl(2) extension:
+   https://lkml.org/lkml/2014/6/3/411
+ - Suggestion from Ingo Molnar that system calls that involve multiple
+   arguments should encapsulate those arguments in a struct, which includes a
+   size field for future extensibility: https://lkml.org/lkml/2015/7/30/117
+ - Numbering oddities arising from (re-)use of O_* numbering space flags:
+    - commit 75069f2b5bfb ("vfs: renumber FMODE_NONOTIFY and add to uniqueness
+      check")
+    - commit 12ed2e36c98a ("fanotify: FMODE_NONOTIFY and __O_SYNC in sparc
+      conflict")
+    - commit bb458c644a59 ("Safer ABI for O_TMPFILE")
+ - Discussion from Matthew Wilcox about restrictions on 64-bit arguments:
+   https://lkml.org/lkml/2008/12/12/187
+ - Recommendation from Greg Kroah-Hartman that unknown flags should be
+   policed: https://lkml.org/lkml/2014/7/17/577
+ - Recommendation from Linus Torvalds that x32 system calls should prefer
+   compatibility with 64-bit versions rather than 32-bit versions:
+   https://lkml.org/lkml/2011/8/31/244
diff --git a/Documentation/arm/Atmel/README b/Documentation/arm/Atmel/README
index c53a19b..0931cf7 100644
--- a/Documentation/arm/Atmel/README
+++ b/Documentation/arm/Atmel/README
@@ -90,6 +90,11 @@
         + Datasheet
           http://www.atmel.com/Images/Atmel-11238-32-bit-Cortex-A5-Microcontroller-SAMA5D4_Datasheet.pdf
 
+      - sama5d2 family
+        - sama5d27
+        + Datasheet
+          Coming soon
+
 
 Linux kernel information
 ------------------------
diff --git a/Documentation/arm/Samsung/Bootloader-interface.txt b/Documentation/arm/Samsung/Bootloader-interface.txt
index b96ead9..df8d4fb 100644
--- a/Documentation/arm/Samsung/Bootloader-interface.txt
+++ b/Documentation/arm/Samsung/Bootloader-interface.txt
@@ -15,6 +15,7 @@
 
 
 1. Non-Secure mode
+
 Address:      sysram_ns_base_addr
 Offset        Value                                        Purpose
 =============================================================================
@@ -28,6 +29,7 @@
 
 
 2. Secure mode
+
 Address:      sysram_base_addr
 Offset        Value                                        Purpose
 =============================================================================
@@ -40,14 +42,25 @@
 Address:      pmu_base_addr
 Offset        Value                                        Purpose
 =============================================================================
-0x0800        exynos_cpu_resume                            AFTR
+0x0800        exynos_cpu_resume                            AFTR, suspend
+0x0800        mcpm_entry_point (Exynos542x with MCPM)      AFTR, suspend
+0x0804        0xfcba0d10 (Magic cookie)                    AFTR
+0x0804        0x00000bad (Magic cookie)                    System suspend
 0x0814        exynos4_secondary_startup (Exynos4210 r1.1)  Secondary CPU boot
 0x0818        0xfcba0d10 (Magic cookie, Exynos4210 r1.1)   AFTR
 0x081C        exynos_cpu_resume (Exynos4210 r1.1)          AFTR
 
 
 3. Other (regardless of secure/non-secure mode)
+
 Address:      pmu_base_addr
 Offset        Value                           Purpose
 =============================================================================
 0x0908        Non-zero (only Exynos3250)      Secondary CPU boot up indicator
+
+
+4. Glossary
+
+AFTR - ARM Off Top Running, a low power mode, Cortex cores and many other
+modules are power gated, except the TOP modules
+MCPM - Multi-Cluster Power Management
diff --git a/Documentation/arm/keystone/Overview.txt b/Documentation/arm/keystone/Overview.txt
new file mode 100644
index 0000000..f17bc4c
--- /dev/null
+++ b/Documentation/arm/keystone/Overview.txt
@@ -0,0 +1,73 @@
+		TI Keystone Linux Overview
+		--------------------------
+
+Introduction
+------------
+Keystone range of SoCs are based on ARM Cortex-A15 MPCore Processors
+and c66x DSP cores. This document describes essential information required
+for users to run Linux on Keystone based EVMs from Texas Instruments.
+
+Following SoCs  & EVMs are currently supported:-
+
+------------ K2HK SoC and EVM --------------------------------------------------
+
+a.k.a Keystone 2 Hawking/Kepler SoC
+TCI6636K2H & TCI6636K2K: See documentation at
+	http://www.ti.com/product/tci6638k2k
+	http://www.ti.com/product/tci6638k2h
+
+EVM:
+http://www.advantech.com/Support/TI-EVM/EVMK2HX_sd.aspx
+
+------------ K2E SoC and EVM ---------------------------------------------------
+
+a.k.a Keystone 2 Edison SoC
+K2E  -  66AK2E05: See documentation at
+	http://www.ti.com/product/66AK2E05/technicaldocuments
+
+EVM:
+https://www.einfochips.com/index.php/partnerships/texas-instruments/k2e-evm.html
+
+------------ K2L SoC and EVM ---------------------------------------------------
+
+a.k.a Keystone 2 Lamarr SoC
+K2L  -  TCI6630K2L: See documentation at
+	http://www.ti.com/product/TCI6630K2L/technicaldocuments
+EVM:
+https://www.einfochips.com/index.php/partnerships/texas-instruments/k2l-evm.html
+
+Configuration
+-------------
+
+All of the K2 SoCs/EVMs share a common defconfig, keystone_defconfig and same
+image is used to boot on individual EVMs. The platform configuration is
+specified through DTS. Following are the DTS used:-
+	K2HK EVM : k2hk-evm.dts
+	K2E EVM  : k2e-evm.dts
+	K2L EVM  : k2l-evm.dts
+
+The device tree documentation for the keystone machines are located at
+        Documentation/devicetree/bindings/arm/keystone/keystone.txt
+
+Known issues & workaround
+-------------------------
+
+Some of the device drivers used on keystone are re-used from that from
+DaVinci and other TI SoCs. These device drivers may use clock APIs directly.
+Some of the keystone specific drivers such as netcp uses run time power
+management API instead to enable clock. As this API has limitations on
+keystone, following workaround is needed to boot Linux.
+
+   Add 'clk_ignore_unused' to the bootargs env variable in u-boot. Otherwise
+   clock frameworks will try to disable clocks that are unused and disable
+   the hardware. This is because netcp related power domain and clock
+   domains are enabled in u-boot as run time power management API currently
+   doesn't enable clocks for netcp due to a limitation. This workaround is
+   expected to be removed in the future when proper API support becomes
+   available. Until then, this work around is needed.
+
+
+Document Author
+---------------
+Murali Karicheri <m-karicheri2@ti.com>
+Copyright 2015 Texas Instruments
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 1690350..7d9d3c2 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -81,7 +81,7 @@
   u64 res3	= 0;		/* reserved */
   u64 res4	= 0;		/* reserved */
   u32 magic	= 0x644d5241;	/* Magic number, little endian, "ARM\x64" */
-  u32 res5;      		/* reserved (used for PE COFF offset) */
+  u32 res5;			/* reserved (used for PE COFF offset) */
 
 
 Header notes:
@@ -103,7 +103,7 @@
 
 - The flags field (introduced in v3.17) is a little-endian 64-bit field
   composed as follows:
-  Bit 0: 	Kernel endianness.  1 if BE, 0 if LE.
+  Bit 0:	Kernel endianness.  1 if BE, 0 if LE.
   Bits 1-63:	Reserved.
 
 - When image_size is zero, a bootloader should attempt to keep as much
@@ -115,11 +115,14 @@
 address near the start of usable system RAM and called there. Memory
 below that base address is currently unusable by Linux, and therefore it
 is strongly recommended that this location is the start of system RAM.
+The region between the 2 MB aligned base address and the start of the
+image has no special significance to the kernel, and may be used for
+other purposes.
 At least image_size bytes from the start of the image must be free for
 use by the kernel.
 
-Any memory described to the kernel (even that below the 2MB aligned base
-address) which is not marked as reserved from the kernel e.g. with a
+Any memory described to the kernel (even that below the start of the
+image) which is not marked as reserved from the kernel (e.g., with a
 memreserve region in the device tree) will be considered as available to
 the kernel.
 
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index dab6da3..b19fc34 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -266,7 +266,9 @@
 atomic_cmpxchg will only satisfy its atomicity semantics as long as all
 other accesses of *v are performed through atomic_xxx operations.
 
-atomic_cmpxchg must provide explicit memory barriers around the operation.
+atomic_cmpxchg must provide explicit memory barriers around the operation,
+although if the comparison fails then no memory ordering guarantees are
+required.
 
 The semantics for atomic_cmpxchg are the same as those defined for 'cas'
 below.
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index fd12c0d..5be8a7f 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -1109,7 +1109,7 @@
 as specified.
 
 Now bh->b_end_io is replaced by bio->bi_end_io, but most of the time the
-right thing to use is bio_endio(bio, uptodate) instead.
+right thing to use is bio_endio(bio) instead.
 
 If the driver is dropping the io_request_lock from its request_fn strategy,
 then it just needs to replace that with q->queue_lock instead.
diff --git a/Documentation/block/biovecs.txt b/Documentation/block/biovecs.txt
index 74a32ad..2568958 100644
--- a/Documentation/block/biovecs.txt
+++ b/Documentation/block/biovecs.txt
@@ -24,7 +24,7 @@
 normal code doesn't have to deal with bi_bvec_done.
 
  * Driver code should no longer refer to biovecs directly; we now have
-   bio_iovec() and bio_iovec_iter() macros that return literal struct biovecs,
+   bio_iovec() and bio_iter_iovec() macros that return literal struct biovecs,
    constructed from the raw biovecs but taking into account bi_bvec_done and
    bi_size.
 
@@ -109,3 +109,11 @@
    over all the biovecs in the new bio - which is silly as it's not needed.
 
    So, don't use bi_vcnt anymore.
+
+ * The current interface allows the block layer to split bios as needed, so we
+   could eliminate a lot of complexity particularly in stacked drivers. Code
+   that creates bios can then create whatever size bios are convenient, and
+   more importantly stacked drivers don't have to deal with both their own bio
+   size limitations and the limitations of the underlying devices. Thus
+   there's no need to define ->merge_bvec_fn() callbacks for individual block
+   drivers.
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 3a29f89..e5d9148 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -20,7 +20,7 @@
 reported by the device. A value of '0' means device does not support
 the discard functionality.
 
-discard_max_bytes (RO)
+discard_max_hw_bytes (RO)
 ----------------------
 Devices that support discard functionality may have internal limits on
 the number of bytes that can be trimmed or unmapped in a single operation.
@@ -29,6 +29,14 @@
 requests issued to the device must not exceed this limit. A discard_max_bytes
 value of 0 means that the device does not support discard functionality.
 
+discard_max_bytes (RW)
+----------------------
+While discard_max_hw_bytes is the hardware limit for the device, this
+setting is the software limit. Some devices exhibit large latencies when
+large discards are issued, setting this value lower will make Linux issue
+smaller discards and potentially help reduce latencies induced by large
+discard operations.
+
 discard_zeroes_data (RO)
 ------------------------
 When read, this file will show if the discarded block are zeroed by the
diff --git a/Documentation/cgroups/00-INDEX b/Documentation/cgroups/00-INDEX
index 96ce071..3f5a40f 100644
--- a/Documentation/cgroups/00-INDEX
+++ b/Documentation/cgroups/00-INDEX
@@ -22,6 +22,8 @@
 	- Network classifier cgroups details and usages.
 net_prio.txt
 	- Network priority cgroups details and usages.
+pids.txt
+	- Process number cgroups details and usages.
 resource_counter.txt
 	- Resource Counter API.
 unified-hierarchy.txt
diff --git a/Documentation/cgroups/pids.txt b/Documentation/cgroups/pids.txt
new file mode 100644
index 0000000..1a078b5
--- /dev/null
+++ b/Documentation/cgroups/pids.txt
@@ -0,0 +1,85 @@
+						   Process Number Controller
+						   =========================
+
+Abstract
+--------
+
+The process number controller is used to allow a cgroup hierarchy to stop any
+new tasks from being fork()'d or clone()'d after a certain limit is reached.
+
+Since it is trivial to hit the task limit without hitting any kmemcg limits in
+place, PIDs are a fundamental resource. As such, PID exhaustion must be
+preventable in the scope of a cgroup hierarchy by allowing resource limiting of
+the number of tasks in a cgroup.
+
+Usage
+-----
+
+In order to use the `pids` controller, set the maximum number of tasks in
+pids.max (this is not available in the root cgroup for obvious reasons). The
+number of processes currently in the cgroup is given by pids.current.
+
+Organisational operations are not blocked by cgroup policies, so it is possible
+to have pids.current > pids.max. This can be done by either setting the limit to
+be smaller than pids.current, or attaching enough processes to the cgroup such
+that pids.current > pids.max. However, it is not possible to violate a cgroup
+policy through fork() or clone(). fork() and clone() will return -EAGAIN if the
+creation of a new process would cause a cgroup policy to be violated.
+
+To set a cgroup to have no limit, set pids.max to "max". This is the default for
+all new cgroups (N.B. that PID limits are hierarchical, so the most stringent
+limit in the hierarchy is followed).
+
+pids.current tracks all child cgroup hierarchies, so parent/pids.current is a
+superset of parent/child/pids.current.
+
+Example
+-------
+
+First, we mount the pids controller:
+# mkdir -p /sys/fs/cgroup/pids
+# mount -t cgroup -o pids none /sys/fs/cgroup/pids
+
+Then we create a hierarchy, set limits and attach processes to it:
+# mkdir -p /sys/fs/cgroup/pids/parent/child
+# echo 2 > /sys/fs/cgroup/pids/parent/pids.max
+# echo $$ > /sys/fs/cgroup/pids/parent/cgroup.procs
+# cat /sys/fs/cgroup/pids/parent/pids.current
+2
+#
+
+It should be noted that attempts to overcome the set limit (2 in this case) will
+fail:
+
+# cat /sys/fs/cgroup/pids/parent/pids.current
+2
+# ( /bin/echo "Here's some processes for you." | cat )
+sh: fork: Resource temporary unavailable
+#
+
+Even if we migrate to a child cgroup (which doesn't have a set limit), we will
+not be able to overcome the most stringent limit in the hierarchy (in this case,
+parent's):
+
+# echo $$ > /sys/fs/cgroup/pids/parent/child/cgroup.procs
+# cat /sys/fs/cgroup/pids/parent/pids.current
+2
+# cat /sys/fs/cgroup/pids/parent/child/pids.current
+2
+# cat /sys/fs/cgroup/pids/parent/child/pids.max
+max
+# ( /bin/echo "Here's some processes for you." | cat )
+sh: fork: Resource temporary unavailable
+#
+
+We can set a limit that is smaller than pids.current, which will stop any new
+processes from being forked at all (note that the shell itself counts towards
+pids.current):
+
+# echo 1 > /sys/fs/cgroup/pids/parent/pids.max
+# /bin/echo "We can't even spawn a single process now."
+sh: fork: Resource temporary unavailable
+# echo 0 > /sys/fs/cgroup/pids/parent/pids.max
+# /bin/echo "We can't even spawn a single process now."
+sh: fork: Resource temporary unavailable
+#
diff --git a/Documentation/cgroups/unified-hierarchy.txt b/Documentation/cgroups/unified-hierarchy.txt
index 86847a7..1ee9caf 100644
--- a/Documentation/cgroups/unified-hierarchy.txt
+++ b/Documentation/cgroups/unified-hierarchy.txt
@@ -23,10 +23,13 @@
 5. Other Changes
   5-1. [Un]populated Notification
   5-2. Other Core Changes
-  5-3. Per-Controller Changes
-    5-3-1. blkio
-    5-3-2. cpuset
-    5-3-3. memory
+  5-3. Controller File Conventions
+    5-3-1. Format
+    5-3-2. Control Knobs
+  5-4. Per-Controller Changes
+    5-4-1. blkio
+    5-4-2. cpuset
+    5-4-3. memory
 6. Planned Changes
   6-1. CAP for resource control
 
@@ -372,14 +375,75 @@
 - The "cgroup.clone_children" file is removed.
 
 
-5-3. Per-Controller Changes
+5-3. Controller File Conventions
 
-5-3-1. blkio
+5-3-1. Format
+
+In general, all controller files should be in one of the following
+formats whenever possible.
+
+- Values only files
+
+  VAL0 VAL1...\n
+
+- Flat keyed files
+
+  KEY0 VAL0\n
+  KEY1 VAL1\n
+  ...
+
+- Nested keyed files
+
+  KEY0 SUB_KEY0=VAL00 SUB_KEY1=VAL01...
+  KEY1 SUB_KEY0=VAL10 SUB_KEY1=VAL11...
+  ...
+
+For a writeable file, the format for writing should generally match
+reading; however, controllers may allow omitting later fields or
+implement restricted shortcuts for most common use cases.
+
+For both flat and nested keyed files, only the values for a single key
+can be written at a time.  For nested keyed files, the sub key pairs
+may be specified in any order and not all pairs have to be specified.
+
+
+5-3-2. Control Knobs
+
+- Settings for a single feature should generally be implemented in a
+  single file.
+
+- In general, the root cgroup should be exempt from resource control
+  and thus shouldn't have resource control knobs.
+
+- If a controller implements ratio based resource distribution, the
+  control knob should be named "weight" and have the range [1, 10000]
+  and 100 should be the default value.  The values are chosen to allow
+  enough and symmetric bias in both directions while keeping it
+  intuitive (the default is 100%).
+
+- If a controller implements an absolute resource guarantee and/or
+  limit, the control knobs should be named "min" and "max"
+  respectively.  If a controller implements best effort resource
+  gurantee and/or limit, the control knobs should be named "low" and
+  "high" respectively.
+
+  In the above four control files, the special token "max" should be
+  used to represent upward infinity for both reading and writing.
+
+- If a setting has configurable default value and specific overrides,
+  the default settings should be keyed with "default" and appear as
+  the first entry in the file.  Specific entries can use "default" as
+  its value to indicate inheritance of the default value.
+
+
+5-4. Per-Controller Changes
+
+5-4-1. blkio
 
 - blk-throttle becomes properly hierarchical.
 
 
-5-3-2. cpuset
+5-4-2. cpuset
 
 - Tasks are kept in empty cpusets after hotplug and take on the masks
   of the nearest non-empty ancestor, instead of being moved to it.
@@ -388,7 +452,7 @@
   masks of the nearest non-empty ancestor.
 
 
-5-3-3. memory
+5-4-3. memory
 
 - use_hierarchy is on by default and the cgroup file for the flag is
   not created.
diff --git a/Documentation/clk.txt b/Documentation/clk.txt
index f463bdc..5c4bc4d 100644
--- a/Documentation/clk.txt
+++ b/Documentation/clk.txt
@@ -71,12 +71,8 @@
 		long		(*round_rate)(struct clk_hw *hw,
 						unsigned long rate,
 						unsigned long *parent_rate);
-		long		(*determine_rate)(struct clk_hw *hw,
-						unsigned long rate,
-						unsigned long min_rate,
-						unsigned long max_rate,
-						unsigned long *best_parent_rate,
-						struct clk_hw **best_parent_clk);
+		int		(*determine_rate)(struct clk_hw *hw,
+						  struct clk_rate_request *req);
 		int		(*set_parent)(struct clk_hw *hw, u8 index);
 		u8		(*get_parent)(struct clk_hw *hw);
 		int		(*set_rate)(struct clk_hw *hw,
diff --git a/Documentation/cpu-freq/core.txt b/Documentation/cpu-freq/core.txt
index 70933ea..ba78e7c 100644
--- a/Documentation/cpu-freq/core.txt
+++ b/Documentation/cpu-freq/core.txt
@@ -55,16 +55,13 @@
 ----------------------------
 
 These are notified when a new policy is intended to be set. Each
-CPUFreq policy notifier is called three times for a policy transition:
+CPUFreq policy notifier is called twice for a policy transition:
 
 1.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if
     they see a need for this - may it be thermal considerations or
     hardware limitations.
 
-2.) During CPUFREQ_INCOMPATIBLE only changes may be done in order to avoid
-    hardware failure.
-
-3.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy
+2.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy
    - if two hardware drivers failed to agree on a new policy before this
    stage, the incompatible hardware shall be shut down, and the user
    informed of this.
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index cb12af3..df2d636 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -209,6 +209,37 @@
 	"repair" - Initiate a repair of the array.
 	"reshape"- Currently unsupported (-EINVAL).
 
+
+Discard Support
+---------------
+The implementation of discard support among hardware vendors varies.
+When a block is discarded, some storage devices will return zeroes when
+the block is read.  These devices set the 'discard_zeroes_data'
+attribute.  Other devices will return random data.  Confusingly, some
+devices that advertise 'discard_zeroes_data' will not reliably return
+zeroes when discarded blocks are read!  Since RAID 4/5/6 uses blocks
+from a number of devices to calculate parity blocks and (for performance
+reasons) relies on 'discard_zeroes_data' being reliable, it is important
+that the devices be consistent.  Blocks may be discarded in the middle
+of a RAID 4/5/6 stripe and if subsequent read results are not
+consistent, the parity blocks may be calculated differently at any time;
+making the parity blocks useless for redundancy.  It is important to
+understand how your hardware behaves with discards if you are going to
+enable discards with RAID 4/5/6.
+
+Since the behavior of storage devices is unreliable in this respect,
+even when reporting 'discard_zeroes_data', by default RAID 4/5/6
+discard support is disabled -- this ensures data integrity at the
+expense of losing some performance.
+
+Storage devices that properly support 'discard_zeroes_data' are
+increasingly whitelisted in the kernel and can thus be trusted.
+
+For trusted devices, the following dm-raid module parameter can be set
+to safely enable discard support for RAID 4/5/6:
+    'devices_handle_discards_safely'
+
+
 Version History
 ---------------
 1.0.0	Initial version.  Support for RAID 4/5/6
diff --git a/Documentation/device-mapper/statistics.txt b/Documentation/device-mapper/statistics.txt
index 4919b2d..6f5ef94 100644
--- a/Documentation/device-mapper/statistics.txt
+++ b/Documentation/device-mapper/statistics.txt
@@ -121,6 +121,10 @@
 
 	Output format:
 	  <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
+	        precise_timestamps histogram:n1,n2,n3,...
+
+	The strings "precise_timestamps" and "histogram" are printed only
+	if they were specified when creating the region.
 
     @stats_print <region_id> [<starting_line> <number_of_lines>]
 
diff --git a/Documentation/devicetree/bindings/arc/archs-pct.txt b/Documentation/devicetree/bindings/arc/archs-pct.txt
new file mode 100644
index 0000000..1ae98b87
--- /dev/null
+++ b/Documentation/devicetree/bindings/arc/archs-pct.txt
@@ -0,0 +1,17 @@
+* ARC HS Performance Counters
+
+The ARC HS can be configured with a pipeline performance monitor for counting
+CPU and cache events like cache misses and hits. Like conventional PCT there
+are 100+ hardware conditions dynamically mapped to upto 32 counters.
+It also supports overflow interrupts.
+
+Required properties:
+
+- compatible : should contain
+	"snps,archs-pct"
+
+Example:
+
+pmu {
+        compatible = "snps,archs-pct";
+};
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt
index 424ac8c..7fd64ec 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt
@@ -27,6 +27,8 @@
     o "atmel,at91sam9xe"
  * "atmel,sama5" for SoCs using a Cortex-A5, shall be extended with the specific
    SoC family:
+    o "atmel,sama5d2" shall be extended with the specific SoC compatible:
+       - "atmel,sama5d27"
     o "atmel,sama5d3" shall be extended with the specific SoC compatible:
        - "atmel,sama5d31"
        - "atmel,sama5d33"
@@ -50,6 +52,7 @@
 - reg: Should contain registers location and length
 - interrupts: Should contain interrupt for the ST which is the IRQ line
   shared across all System Controller members.
+- clocks: phandle to input clock.
 Its subnodes can be:
 - watchdog: compatible should be "atmel,at91rm9200-wdt"
 
@@ -61,7 +64,7 @@
   Note that you can specify several interrupt cells if the TC
   block has one interrupt per channel.
 - clock-names: tuple listing input clock names.
-	Required elements: "t0_clk"
+	Required elements: "t0_clk", "slow_clk"
 	Optional elements: "t1_clk", "t2_clk"
 - clocks: phandles to input clocks.
 
@@ -87,14 +90,16 @@
 
 RSTC Reset Controller required properties:
 - compatible: Should be "atmel,<chip>-rstc".
-  <chip> can be "at91sam9260" or "at91sam9g45"
+  <chip> can be "at91sam9260" or "at91sam9g45" or "sama5d3"
 - reg: Should contain registers location and length
+- clocks: phandle to input clock.
 
 Example:
 
 	rstc@fffffd00 {
 		compatible = "atmel,at91sam9260-rstc";
 		reg = <0xfffffd00 0x10>;
+		clocks = <&clk32k>;
 	};
 
 RAMC SDRAM/DDR Controller required properties:
@@ -117,6 +122,7 @@
 - compatible: Should be "atmel,<chip>-shdwc".
   <chip> can be "at91sam9260", "at91sam9rl" or "at91sam9x5".
 - reg: Should contain registers location and length
+- clocks: phandle to input clock.
 
 optional properties:
 - atmel,wakeup-mode: String, operation mode of the wakeup mode.
@@ -135,9 +141,10 @@
 
 Example:
 
-	rstc@fffffd00 {
-		compatible = "atmel,at91sam9260-rstc";
-		reg = <0xfffffd00 0x10>;
+	shdwc@fffffd10 {
+		compatible = "atmel,at91sam9260-shdwc";
+		reg = <0xfffffd10 0x10>;
+		clocks = <&clk32k>;
 	};
 
 Special Function Registers (SFR)
diff --git a/Documentation/devicetree/bindings/arm/bcm/ns2.txt b/Documentation/devicetree/bindings/arm/bcm/ns2.txt
new file mode 100644
index 0000000..35f056f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/ns2.txt
@@ -0,0 +1,9 @@
+Broadcom North Star 2 (NS2) device tree bindings
+------------------------------------------------
+
+Boards with NS2 shall have the following properties:
+
+Required root node property:
+
+NS2 SVK board
+compatible = "brcm,ns2-svk", "brcm,ns2";
diff --git a/Documentation/devicetree/bindings/arm/bcm/raspberrypi,bcm2835-firmware.txt b/Documentation/devicetree/bindings/arm/bcm/raspberrypi,bcm2835-firmware.txt
new file mode 100644
index 0000000..6824b31
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/bcm/raspberrypi,bcm2835-firmware.txt
@@ -0,0 +1,14 @@
+Raspberry Pi VideoCore firmware driver
+
+Required properties:
+
+- compatible:		Should be "raspberrypi,bcm2835-firmware"
+- mboxes:		Phandle to the firmware device's Mailbox.
+			  (See: ../mailbox/mailbox.txt for more information)
+
+Example:
+
+firmware {
+	compatible = "raspberrypi,bcm2835-firmware";
+	mboxes = <&mailbox>;
+};
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 65a6db2..62938eb 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -17,6 +17,7 @@
 		- "arm,coresight-tmc", "arm,primecell";
 		- "arm,coresight-funnel", "arm,primecell";
 		- "arm,coresight-etm3x", "arm,primecell";
+		- "arm,coresight-etm4x", "arm,primecell";
 		- "qcom,coresight-replicator1x", "arm,primecell";
 
 	* reg: physical base address and length of the register
diff --git a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
index c431c67..c733e28 100644
--- a/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
+++ b/Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt
@@ -127,6 +127,24 @@
 		#clock-cells = <1>;
 	};
 
+
+Hisilicon Hi6220 SRAM controller
+
+Required properties:
+- compatible : "hisilicon,hi6220-sramctrl", "syscon"
+- reg : Register address and size
+
+Hisilicon's SoCs use sram for multiple purpose; on Hi6220 there have several
+SRAM banks for power management, modem, security, etc. Further, use "syscon"
+managing the common sram which can be shared by multiple modules.
+
+Example:
+	/*for Hi6220*/
+	sram: sram@fff80000 {
+		compatible = "hisilicon,hi6220-sramctrl", "syscon";
+		reg = <0x0 0xfff80000 0x0 0x12000>;
+	};
+
 -----------------------------------------------------------------------
 Hisilicon HiP01 system controller
 
diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt
index 2251dcc..06c88a4 100644
--- a/Documentation/devicetree/bindings/arm/l2cc.txt
+++ b/Documentation/devicetree/bindings/arm/l2cc.txt
@@ -67,6 +67,12 @@
   disable if zero.
 - arm,prefetch-offset : Override prefetch offset value. Valid values are
   0-7, 15, 23, and 31.
+- arm,shared-override : The default behavior of the pl310 cache controller with
+  respect to the shareable attribute is to transform "normal memory
+  non-cacheable transactions" into "cacheable no allocate" (for reads) or
+  "write through no write allocate" (for writes).
+  On systems where this may cause DMA buffer corruption, this property must be
+  specified to indicate that such transforms are precluded.
 - prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
   (forcibly enable), property absent (retain settings set by firmware)
 - prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
diff --git a/Documentation/devicetree/bindings/arm/marvell,kirkwood.txt b/Documentation/devicetree/bindings/arm/marvell,kirkwood.txt
index 4f40ff3..5171ad8 100644
--- a/Documentation/devicetree/bindings/arm/marvell,kirkwood.txt
+++ b/Documentation/devicetree/bindings/arm/marvell,kirkwood.txt
@@ -20,6 +20,8 @@
 board. Currently known boards are:
 
 "buffalo,lschlv2"
+"buffalo,lswvl"
+"buffalo,lswxl"
 "buffalo,lsxhl"
 "buffalo,lsxl"
 "dlink,dns-320"
diff --git a/Documentation/devicetree/bindings/arm/mediatek.txt b/Documentation/devicetree/bindings/arm/mediatek.txt
index dd7550a..618a9199 100644
--- a/Documentation/devicetree/bindings/arm/mediatek.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek.txt
@@ -1,12 +1,15 @@
-MediaTek mt65xx & mt81xx Platforms Device Tree Bindings
+MediaTek mt65xx, mt67xx & mt81xx Platforms Device Tree Bindings
 
-Boards with a MediaTek mt65xx/mt81xx SoC shall have the following property:
+Boards with a MediaTek mt65xx/mt67xx/mt81xx SoC shall have the
+following property:
 
 Required root node property:
 
 compatible: Must contain one of
+   "mediatek,mt6580"
    "mediatek,mt6589"
    "mediatek,mt6592"
+   "mediatek,mt6795"
    "mediatek,mt8127"
    "mediatek,mt8135"
    "mediatek,mt8173"
@@ -14,12 +17,18 @@
 
 Supported boards:
 
+- Evaluation board for MT6580:
+    Required root node properties:
+      - compatible = "mediatek,mt6580-evbp1", "mediatek,mt6580";
 - bq Aquaris5 smart phone:
     Required root node properties:
       - compatible = "mundoreader,bq-aquaris5", "mediatek,mt6589";
 - Evaluation board for MT6592:
     Required root node properties:
       - compatible = "mediatek,mt6592-evb", "mediatek,mt6592";
+- Evaluation board for MT6795(Helio X10):
+    Required root node properties:
+      - compatible = "mediatek,mt6795-evb", "mediatek,mt6795";
 - MTK mt8127 tablet moose EVB:
     Required root node properties:
       - compatible = "mediatek,mt8127-moose", "mediatek,mt8127";
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt
index 4f5a535..afef6a8 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sysirq.txt
@@ -1,4 +1,4 @@
-Mediatek 65xx/81xx sysirq
++Mediatek 65xx/67xx/81xx sysirq
 
 Mediatek SOCs sysirq support controllable irq inverter for each GIC SPI
 interrupt.
@@ -8,9 +8,11 @@
 	"mediatek,mt8173-sysirq"
 	"mediatek,mt8135-sysirq"
 	"mediatek,mt8127-sysirq"
+	"mediatek,mt6795-sysirq"
 	"mediatek,mt6592-sysirq"
 	"mediatek,mt6589-sysirq"
 	"mediatek,mt6582-sysirq"
+	"mediatek,mt6580-sysirq"
 	"mediatek,mt6577-sysirq"
 - interrupt-controller : Identifies the node as an interrupt controller
 - #interrupt-cells : Use the same format as specified by GIC in
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index 4f6a82c..9f4e513 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -135,6 +135,9 @@
 - AM335X OrionLXm : Substation Automation Platform
   compatible = "novatech,am335x-lxm", "ti,am33xx"
 
+- AM335X phyBOARD-WEGA: Single Board Computer dev kit
+  compatible = "phytec,am335x-wega", "phytec,am335x-phycore-som", "ti,am33xx"
+
 - OMAP5 EVM : Evaluation Module
   compatible = "ti,omap5-evm", "ti,omap5"
 
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt
index 3b5f5d1..435251f 100644
--- a/Documentation/devicetree/bindings/arm/pmu.txt
+++ b/Documentation/devicetree/bindings/arm/pmu.txt
@@ -26,13 +26,19 @@
 
 Optional properties:
 
-- interrupt-affinity : Valid only when using SPIs, specifies a list of phandles
-                       to CPU nodes corresponding directly to the affinity of
+- interrupt-affinity : When using SPIs, specifies a list of phandles to CPU
+                       nodes corresponding directly to the affinity of
 		       the SPIs listed in the interrupts property.
 
-		       This property should be present when there is more than
+                       When using a PPI, specifies a list of phandles to CPU
+		       nodes corresponding to the set of CPUs which have
+		       a PMU of this type signalling the PPI listed in the
+		       interrupts property.
+
+                       This property should be present when there is more than
 		       a single SPI.
 
+
 - qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd
                      events.
 
diff --git a/Documentation/devicetree/bindings/arm/rockchip.txt b/Documentation/devicetree/bindings/arm/rockchip.txt
index 60d4a1e..af58cd7 100644
--- a/Documentation/devicetree/bindings/arm/rockchip.txt
+++ b/Documentation/devicetree/bindings/arm/rockchip.txt
@@ -26,3 +26,38 @@
 - ChipSPARK PopMetal-RK3288 board:
     Required root node properties:
       - compatible = "chipspark,popmetal-rk3288", "rockchip,rk3288";
+
+- Netxeon R89 board:
+    Required root node properties:
+      - compatible = "netxeon,r89", "rockchip,rk3288";
+
+- Google Jerry (Hisense Chromebook C11 and more):
+    Required root node properties:
+      - compatible = "google,veyron-jerry-rev7", "google,veyron-jerry-rev6",
+		     "google,veyron-jerry-rev5", "google,veyron-jerry-rev4",
+		     "google,veyron-jerry-rev3", "google,veyron-jerry",
+		     "google,veyron", "rockchip,rk3288";
+
+- Google Minnie (Asus Chromebook Flip C100P):
+    Required root node properties:
+      - compatible = "google,veyron-minnie-rev4", "google,veyron-minnie-rev3",
+		     "google,veyron-minnie-rev2", "google,veyron-minnie-rev1",
+		     "google,veyron-minnie-rev0", "google,veyron-minnie",
+		     "google,veyron", "rockchip,rk3288";
+
+- Google Pinky (dev-board):
+    Required root node properties:
+      - compatible = "google,veyron-pinky-rev2", "google,veyron-pinky",
+		     "google,veyron", "rockchip,rk3288";
+
+- Google Speedy (Asus C201 Chromebook):
+    Required root node properties:
+      - compatible = "google,veyron-speedy-rev9", "google,veyron-speedy-rev8",
+		     "google,veyron-speedy-rev7", "google,veyron-speedy-rev6",
+		     "google,veyron-speedy-rev5", "google,veyron-speedy-rev4",
+		     "google,veyron-speedy-rev3", "google,veyron-speedy-rev2",
+		     "google,veyron-speedy", "google,veyron", "rockchip,rk3288";
+
+- Rockchip R88 board:
+    Required root node properties:
+      - compatible = "rockchip,r88", "rockchip,rk3368";
diff --git a/Documentation/devicetree/bindings/arm/sp810.txt b/Documentation/devicetree/bindings/arm/sp810.txt
new file mode 100644
index 0000000..6808fb5
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/sp810.txt
@@ -0,0 +1,46 @@
+SP810 System Controller
+-----------------------
+
+Required properties:
+
+- compatible:	standard compatible string for a Primecell peripheral,
+		see Documentation/devicetree/bindings/arm/primecell.txt
+		for more details
+		should be: "arm,sp810", "arm,primecell"
+
+- reg:		standard registers property, physical address and size
+		of the control registers
+
+- clock-names:	from the common clock bindings, for more details see
+		Documentation/devicetree/bindings/clock/clock-bindings.txt;
+		should be: "refclk", "timclk", "apb_pclk"
+
+- clocks:	from the common clock bindings, phandle and clock
+		specifier pairs for the entries of clock-names property
+
+- #clock-cells: from the common clock bindings;
+		should be: <1>
+
+- clock-output-names: from the common clock bindings;
+		should be: "timerclken0", "timerclken1", "timerclken2", "timerclken3"
+
+- assigned-clocks: from the common clock binding;
+		should be: clock specifier for each output clock of this
+		provider node
+
+- assigned-clock-parents: from the common clock binding;
+		should be: phandle of input clock listed in clocks
+		property with the highest frequency
+
+Example:
+	v2m_sysctl: sysctl@020000 {
+		compatible = "arm,sp810", "arm,primecell";
+		reg = <0x020000 0x1000>;
+		clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&smbclk>;
+		clock-names = "refclk", "timclk", "apb_pclk";
+		#clock-cells = <1>;
+		clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+		assigned-clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_sysctl 3>, <&v2m_sysctl 3>;
+		assigned-clock-parents = <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>;
+
+	};
diff --git a/Documentation/devicetree/bindings/clock/gpio-mux-clock.txt b/Documentation/devicetree/bindings/clock/gpio-mux-clock.txt
new file mode 100644
index 0000000..2be1e03
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/gpio-mux-clock.txt
@@ -0,0 +1,19 @@
+Binding for simple gpio clock multiplexer.
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be "gpio-mux-clock".
+- clocks: list of two references to parent clocks.
+- #clock-cells : from common clock binding; shall be set to 0.
+- select-gpios : GPIO reference for selecting the parent clock.
+
+Example:
+	clock {
+		compatible = "gpio-mux-clock";
+		clocks = <&parentclk1>, <&parentclk2>;
+		#clock-cells = <0>;
+		select-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>;
+	};
diff --git a/Documentation/devicetree/bindings/clock/hi6220-clock.txt b/Documentation/devicetree/bindings/clock/hi6220-clock.txt
index 259e30a..e4d5fea 100644
--- a/Documentation/devicetree/bindings/clock/hi6220-clock.txt
+++ b/Documentation/devicetree/bindings/clock/hi6220-clock.txt
@@ -15,19 +15,36 @@
 	- "hisilicon,hi6220-sysctrl"
 	- "hisilicon,hi6220-mediactrl"
 	- "hisilicon,hi6220-pmctrl"
+	- "hisilicon,hi6220-stub-clk"
 
 - reg: physical base address of the controller and length of memory mapped
   region.
 
 - #clock-cells: should be 1.
 
-For example:
+Optional Properties:
+
+- hisilicon,hi6220-clk-sram: phandle to the syscon managing the SoC internal sram;
+  the driver need use the sram to pass parameters for frequency change.
+
+- mboxes: use the label reference for the mailbox as the first parameter, the
+  second parameter is the channel number.
+
+Example 1:
 	sys_ctrl: sys_ctrl@f7030000 {
 		compatible = "hisilicon,hi6220-sysctrl", "syscon";
 		reg = <0x0 0xf7030000 0x0 0x2000>;
 		#clock-cells = <1>;
 	};
 
+Example 2:
+	stub_clock: stub_clock {
+		compatible = "hisilicon,hi6220-stub-clk";
+		hisilicon,hi6220-clk-sram = <&sram>;
+		#clock-cells = <1>;
+		mboxes = <&mailbox 1>;
+	};
+
 Each clock is assigned an identifier and client nodes use this identifier
 to specify the clock which they consume.
 
diff --git a/Documentation/devicetree/bindings/clock/imx6ul-clock.txt b/Documentation/devicetree/bindings/clock/imx6ul-clock.txt
new file mode 100644
index 0000000..571d503
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx6ul-clock.txt
@@ -0,0 +1,13 @@
+* Clock bindings for Freescale i.MX6 UltraLite
+
+Required properties:
+- compatible: Should be "fsl,imx6ul-ccm"
+- reg: Address and length of the register set
+- #clock-cells: Should be <1>
+- clocks: list of clock specifiers, must contain an entry for each required
+  entry in clock-names
+- clock-names: should include entries "ckil", "osc", "ipp_di0" and "ipp_di1"
+
+The clock consumer should specify the desired clock by having the clock
+ID in its "clocks" phandle cell.  See include/dt-bindings/clock/imx6ul-clock.h
+for the full list of i.MX6 UltraLite clock IDs.
diff --git a/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt b/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt
new file mode 100644
index 0000000..52b457c
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/mt8173-cpu-dvfs.txt
@@ -0,0 +1,83 @@
+Device Tree Clock bindins for CPU DVFS of Mediatek MT8173 SoC
+
+Required properties:
+- clocks: A list of phandle + clock-specifier pairs for the clocks listed in clock names.
+- clock-names: Should contain the following:
+	"cpu"		- The multiplexer for clock input of CPU cluster.
+	"intermediate"	- A parent of "cpu" clock which is used as "intermediate" clock
+			  source (usually MAINPLL) when the original CPU PLL is under
+			  transition and not stable yet.
+	Please refer to Documentation/devicetree/bindings/clk/clock-bindings.txt for
+	generic clock consumer properties.
+- proc-supply: Regulator for Vproc of CPU cluster.
+
+Optional properties:
+- sram-supply: Regulator for Vsram of CPU cluster. When present, the cpufreq driver
+	       needs to do "voltage tracking" to step by step scale up/down Vproc and
+	       Vsram to fit SoC specific needs. When absent, the voltage scaling
+	       flow is handled by hardware, hence no software "voltage tracking" is
+	       needed.
+
+Example:
+--------
+	cpu0: cpu@0 {
+		device_type = "cpu";
+		compatible = "arm,cortex-a53";
+		reg = <0x000>;
+		enable-method = "psci";
+		cpu-idle-states = <&CPU_SLEEP_0>;
+		clocks = <&infracfg CLK_INFRA_CA53SEL>,
+			 <&apmixedsys CLK_APMIXED_MAINPLL>;
+		clock-names = "cpu", "intermediate";
+	};
+
+	cpu1: cpu@1 {
+		device_type = "cpu";
+		compatible = "arm,cortex-a53";
+		reg = <0x001>;
+		enable-method = "psci";
+		cpu-idle-states = <&CPU_SLEEP_0>;
+		clocks = <&infracfg CLK_INFRA_CA53SEL>,
+			 <&apmixedsys CLK_APMIXED_MAINPLL>;
+		clock-names = "cpu", "intermediate";
+	};
+
+	cpu2: cpu@100 {
+		device_type = "cpu";
+		compatible = "arm,cortex-a57";
+		reg = <0x100>;
+		enable-method = "psci";
+		cpu-idle-states = <&CPU_SLEEP_0>;
+		clocks = <&infracfg CLK_INFRA_CA57SEL>,
+			 <&apmixedsys CLK_APMIXED_MAINPLL>;
+		clock-names = "cpu", "intermediate";
+	};
+
+	cpu3: cpu@101 {
+		device_type = "cpu";
+		compatible = "arm,cortex-a57";
+		reg = <0x101>;
+		enable-method = "psci";
+		cpu-idle-states = <&CPU_SLEEP_0>;
+		clocks = <&infracfg CLK_INFRA_CA57SEL>,
+			 <&apmixedsys CLK_APMIXED_MAINPLL>;
+		clock-names = "cpu", "intermediate";
+	};
+
+	&cpu0 {
+		proc-supply = <&mt6397_vpca15_reg>;
+	};
+
+	&cpu1 {
+		proc-supply = <&mt6397_vpca15_reg>;
+	};
+
+	&cpu2 {
+		proc-supply = <&da9211_vcpu_reg>;
+		sram-supply = <&mt6397_vsramca7_reg>;
+	};
+
+	&cpu3 {
+		proc-supply = <&da9211_vcpu_reg>;
+		sram-supply = <&mt6397_vsramca7_reg>;
+	};
diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt
new file mode 100644
index 0000000..ee7e5fd
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/nvidia,tegra124-dfll.txt
@@ -0,0 +1,79 @@
+NVIDIA Tegra124 DFLL FCPU clocksource
+
+This binding uses the common clock binding:
+Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+The DFLL IP block on Tegra is a root clocksource designed for clocking
+the fast CPU cluster. It consists of a free-running voltage controlled
+oscillator connected to the CPU voltage rail (VDD_CPU), and a closed loop
+control module that will automatically adjust the VDD_CPU voltage by
+communicating with an off-chip PMIC either via an I2C bus or via PWM signals.
+Currently only the I2C mode is supported by these bindings.
+
+Required properties:
+- compatible : should be "nvidia,tegra124-dfll"
+- reg : Defines the following set of registers, in the order listed:
+        - registers for the DFLL control logic.
+        - registers for the I2C output logic.
+        - registers for the integrated I2C master controller.
+        - look-up table RAM for voltage register values.
+- interrupts: Should contain the DFLL block interrupt.
+- clocks: Must contain an entry for each entry in clock-names.
+  See clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+  - soc: Clock source for the DFLL control logic.
+  - ref: The closed loop reference clock
+  - i2c: Clock source for the integrated I2C master.
+- resets: Must contain an entry for each entry in reset-names.
+  See ../reset/reset.txt for details.
+- reset-names: Must include the following entries:
+  - dvco: Reset control for the DFLL DVCO.
+- #clock-cells: Must be 0.
+- clock-output-names: Name of the clock output.
+- vdd-cpu-supply: Regulator for the CPU voltage rail that the DFLL
+  hardware will start controlling. The regulator will be queried for
+  the I2C register, control values and supported voltages.
+
+Required properties for the control loop parameters:
+- nvidia,sample-rate: Sample rate of the DFLL control loop.
+- nvidia,droop-ctrl: See the register CL_DVFS_DROOP_CTRL in the TRM.
+- nvidia,force-mode: See the field DFLL_PARAMS_FORCE_MODE in the TRM.
+- nvidia,cf: Numeric value, see the field DFLL_PARAMS_CF_PARAM in the TRM.
+- nvidia,ci: Numeric value, see the field DFLL_PARAMS_CI_PARAM in the TRM.
+- nvidia,cg: Numeric value, see the field DFLL_PARAMS_CG_PARAM in the TRM.
+
+Optional properties for the control loop parameters:
+- nvidia,cg-scale: Boolean value, see the field DFLL_PARAMS_CG_SCALE in the TRM.
+
+Required properties for I2C mode:
+- nvidia,i2c-fs-rate: I2C transfer rate, if using full speed mode.
+
+Example:
+
+clock@0,70110000 {
+        compatible = "nvidia,tegra124-dfll";
+        reg = <0 0x70110000 0 0x100>, /* DFLL control */
+              <0 0x70110000 0 0x100>, /* I2C output control */
+              <0 0x70110100 0 0x100>, /* Integrated I2C controller */
+              <0 0x70110200 0 0x100>; /* Look-up table RAM */
+        interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&tegra_car TEGRA124_CLK_DFLL_SOC>,
+                 <&tegra_car TEGRA124_CLK_DFLL_REF>,
+                 <&tegra_car TEGRA124_CLK_I2C5>;
+        clock-names = "soc", "ref", "i2c";
+        resets = <&tegra_car TEGRA124_RST_DFLL_DVCO>;
+        reset-names = "dvco";
+        #clock-cells = <0>;
+        clock-output-names = "dfllCPU_out";
+        vdd-cpu-supply = <&vdd_cpu>;
+        status = "okay";
+
+        nvidia,sample-rate = <12500>;
+        nvidia,droop-ctrl = <0x00000f00>;
+        nvidia,force-mode = <1>;
+        nvidia,cf = <10>;
+        nvidia,ci = <0>;
+        nvidia,cg = <2>;
+
+        nvidia,i2c-fs-rate = <400000>;
+};
diff --git a/Documentation/devicetree/bindings/clock/renesas,r8a7778-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,r8a7778-cpg-clocks.txt
index 2f3747f..e4cdaf1 100644
--- a/Documentation/devicetree/bindings/clock/renesas,r8a7778-cpg-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,r8a7778-cpg-clocks.txt
@@ -1,7 +1,9 @@
 * Renesas R8A7778 Clock Pulse Generator (CPG)
 
 The CPG generates core clocks for the R8A7778. It includes two PLLs and
-several fixed ratio dividers
+several fixed ratio dividers.
+The CPG also provides a Clock Domain for SoC devices, in combination with the
+CPG Module Stop (MSTP) Clocks.
 
 Required Properties:
 
@@ -10,10 +12,18 @@
   - #clock-cells: Must be 1
   - clock-output-names: The names of the clocks. Supported clocks are
     "plla", "pllb", "b", "out", "p", "s", and "s1".
+  - #power-domain-cells: Must be 0
+
+SoC devices that are part of the CPG/MSTP Clock Domain and can be power-managed
+through an MSTP clock should refer to the CPG device node in their
+"power-domains" property, as documented by the generic PM domain bindings in
+Documentation/devicetree/bindings/power/power_domain.txt.
 
 
-Example
--------
+Examples
+--------
+
+  - CPG device node:
 
 	cpg_clocks: cpg_clocks@ffc80000 {
 		compatible = "renesas,r8a7778-cpg-clocks";
@@ -22,4 +32,17 @@
 		clocks = <&extal_clk>;
 		clock-output-names = "plla", "pllb", "b",
 				     "out", "p", "s", "s1";
+		#power-domain-cells = <0>;
+	};
+
+
+  - CPG/MSTP Clock Domain member device node:
+
+	sdhi0: sd@ffe4c000 {
+		compatible = "renesas,sdhi-r8a7778";
+		reg = <0xffe4c000 0x100>;
+		interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp3_clks R8A7778_CLK_SDHI0>;
+		power-domains = <&cpg_clocks>;
+		status = "disabled";
 	};
diff --git a/Documentation/devicetree/bindings/clock/renesas,r8a7779-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,r8a7779-cpg-clocks.txt
index ed3c8cb..8c81547 100644
--- a/Documentation/devicetree/bindings/clock/renesas,r8a7779-cpg-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,r8a7779-cpg-clocks.txt
@@ -1,7 +1,9 @@
 * Renesas R8A7779 Clock Pulse Generator (CPG)
 
 The CPG generates core clocks for the R8A7779. It includes one PLL and
-several fixed ratio dividers
+several fixed ratio dividers.
+The CPG also provides a Clock Domain for SoC devices, in combination with the
+CPG Module Stop (MSTP) Clocks.
 
 Required Properties:
 
@@ -12,16 +14,36 @@
   - #clock-cells: Must be 1
   - clock-output-names: The names of the clocks. Supported clocks are "plla",
     "z", "zs", "s", "s1", "p", "b", "out".
+  - #power-domain-cells: Must be 0
+
+SoC devices that are part of the CPG/MSTP Clock Domain and can be power-managed
+through an MSTP clock should refer to the CPG device node in their
+"power-domains" property, as documented by the generic PM domain bindings in
+Documentation/devicetree/bindings/power/power_domain.txt.
 
 
-Example
--------
+Examples
+--------
+
+  - CPG device node:
 
 	cpg_clocks: cpg_clocks@ffc80000 {
 		compatible = "renesas,r8a7779-cpg-clocks";
-		reg = <0 0xffc80000 0 0x30>;
+		reg = <0xffc80000 0x30>;
 		clocks = <&extal_clk>;
 		#clock-cells = <1>;
 		clock-output-names = "plla", "z", "zs", "s", "s1", "p",
 		                     "b", "out";
+		#power-domain-cells = <0>;
+	};
+
+
+  - CPG/MSTP Clock Domain member device node:
+
+	sata: sata@fc600000 {
+		compatible = "renesas,sata-r8a7779", "renesas,rcar-sata";
+		reg = <0xfc600000 0x2000>;
+		interrupts = <0 100 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp1_clks R8A7779_CLK_SATA>;
+		power-domains = <&cpg_clocks>;
 	};
diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
index 56f111b..2a9a8ed 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
@@ -2,6 +2,8 @@
 
 The CPG generates core clocks for the R-Car Gen2 SoCs. It includes three PLLs
 and several fixed ratio dividers.
+The CPG also provides a Clock Domain for SoC devices, in combination with the
+CPG Module Stop (MSTP) Clocks.
 
 Required Properties:
 
@@ -20,10 +22,18 @@
   - clock-output-names: The names of the clocks. Supported clocks are "main",
     "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1", "z", "rcan", and
     "adsp"
+  - #power-domain-cells: Must be 0
+
+SoC devices that are part of the CPG/MSTP Clock Domain and can be power-managed
+through an MSTP clock should refer to the CPG device node in their
+"power-domains" property, as documented by the generic PM domain bindings in
+Documentation/devicetree/bindings/power/power_domain.txt.
 
 
-Example
--------
+Examples
+--------
+
+  - CPG device node:
 
 	cpg_clocks: cpg_clocks@e6150000 {
 		compatible = "renesas,r8a7790-cpg-clocks",
@@ -34,4 +44,16 @@
 		clock-output-names = "main", "pll0, "pll1", "pll3",
 				     "lb", "qspi", "sdh", "sd0", "sd1", "z",
 				     "rcan", "adsp";
+		#power-domain-cells = <0>;
+	};
+
+
+  - CPG/MSTP Clock Domain member device node:
+
+	thermal@e61f0000 {
+		compatible = "renesas,thermal-r8a7790", "renesas,rcar-thermal";
+		reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
+		interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
+		power-domains = <&cpg_clocks>;
 	};
diff --git a/Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt
index b0f7ddb..bb51a33 100644
--- a/Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt
@@ -2,6 +2,8 @@
 
 The CPG generates core clocks for the RZ SoCs. It includes the PLL, variable
 CPU and GPU clocks, and several fixed ratio dividers.
+The CPG also provides a Clock Domain for SoC devices, in combination with the
+CPG Module Stop (MSTP) Clocks.
 
 Required Properties:
 
@@ -14,10 +16,18 @@
   - #clock-cells: Must be 1
   - clock-output-names: The names of the clocks. Supported clocks are "pll",
     "i", and "g"
+  - #power-domain-cells: Must be 0
+
+SoC devices that are part of the CPG/MSTP Clock Domain and can be power-managed
+through an MSTP clock should refer to the CPG device node in their
+"power-domains" property, as documented by the generic PM domain bindings in
+Documentation/devicetree/bindings/power/power_domain.txt.
 
 
-Example
--------
+Examples
+--------
+
+  - CPG device node:
 
 	cpg_clocks: cpg_clocks@fcfe0000 {
 		#clock-cells = <1>;
@@ -26,4 +36,19 @@
 		reg = <0xfcfe0000 0x18>;
 		clocks = <&extal_clk>, <&usb_x1_clk>;
 		clock-output-names = "pll", "i", "g";
+		#power-domain-cells = <0>;
+	};
+
+
+  - CPG/MSTP Clock Domain member device node:
+
+	mtu2: timer@fcff0000 {
+		compatible = "renesas,mtu2-r7s72100", "renesas,mtu2";
+		reg = <0xfcff0000 0x400>;
+		interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "tgi0a";
+		clocks = <&mstp3_clks R7S72100_CLK_MTU2>;
+		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
+		status = "disabled";
 	};
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3368-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3368-cru.txt
new file mode 100644
index 0000000..7c8bbcf
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3368-cru.txt
@@ -0,0 +1,61 @@
+* Rockchip RK3368 Clock and Reset Unit
+
+The RK3368 clock controller generates and supplies clock to various
+controllers within the SoC and also implements a reset controller for SoC
+peripherals.
+
+Required Properties:
+
+- compatible: should be "rockchip,rk3368-cru"
+- reg: physical base address of the controller and length of memory mapped
+  region.
+- #clock-cells: should be 1.
+- #reset-cells: should be 1.
+
+Optional Properties:
+
+- rockchip,grf: phandle to the syscon managing the "general register files"
+  If missing, pll rates are not changeable, due to the missing pll lock status.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. All available clocks are defined as
+preprocessor macros in the dt-bindings/clock/rk3368-cru.h headers and can be
+used in device tree sources. Similar macros exist for the reset sources in
+these files.
+
+External clocks:
+
+There are several clocks that are generated outside the SoC. It is expected
+that they are defined using standard clock bindings with following
+clock-output-names:
+ - "xin24m" - crystal input - required,
+ - "xin32k" - rtc clock - optional,
+ - "ext_i2s" - external I2S clock - optional,
+ - "ext_gmac" - external GMAC clock - optional
+ - "ext_hsadc" - external HSADC clock - optional,
+ - "ext_isp" - external ISP clock - optional,
+ - "ext_jtag" - external JTAG clock - optional
+ - "ext_vip" - external VIP clock - optional,
+ - "usbotg_out" - output clock of the pll in the otg phy
+
+Example: Clock controller node:
+
+	cru: clock-controller@ff760000 {
+		compatible = "rockchip,rk3368-cru";
+		reg = <0x0 0xff760000 0x0 0x1000>;
+		rockchip,grf = <&grf>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+Example: UART controller node that consumes the clock generated by the clock
+  controller:
+
+	uart0: serial@10124000 {
+		compatible = "snps,dw-apb-uart";
+		reg = <0x10124000 0x400>;
+		interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <1>;
+		clocks = <&cru SCLK_UART0>;
+	};
diff --git a/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt b/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
index efb51cf..d8b168e 100644
--- a/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
@@ -21,8 +21,8 @@
 	"st,stih416-plls-c32-ddr",	"st,clkgen-plls-c32"
 	"st,stih407-plls-c32-a0",	"st,clkgen-plls-c32"
 	"st,stih407-plls-c32-a9",	"st,clkgen-plls-c32"
-	"st,stih407-plls-c32-c0_0",	"st,clkgen-plls-c32"
-	"st,stih407-plls-c32-c0_1",	"st,clkgen-plls-c32"
+	"sst,plls-c32-cx_0",		"st,clkgen-plls-c32"
+	"sst,plls-c32-cx_1",		"st,clkgen-plls-c32"
 
 	"st,stih415-gpu-pll-c32",	"st,clkgengpu-pll-c32"
 	"st,stih416-gpu-pll-c32",	"st,clkgengpu-pll-c32"
diff --git a/Documentation/devicetree/bindings/clock/ux500.txt b/Documentation/devicetree/bindings/clock/ux500.txt
new file mode 100644
index 0000000..e52bd4b
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/ux500.txt
@@ -0,0 +1,64 @@
+Clock bindings for ST-Ericsson Ux500 clocks
+
+Required properties :
+- compatible : shall contain only one of the following:
+  "stericsson,u8500-clks"
+  "stericsson,u8540-clks"
+  "stericsson,u9540-clks"
+- reg : shall contain base register location and length for
+  CLKRST1, 2, 3, 5, and 6 in an array. Note the absence of
+  CLKRST4, which does not exist.
+
+Required subnodes:
+- prcmu-clock: a subnode with one clock cell for PRCMU (power,
+  reset, control unit) clocks. The cell indicates which PRCMU
+  clock in the prcmu-clock node the consumer wants to use.
+- prcc-periph-clock: a subnode with two clock cells for
+  PRCC (programmable reset- and clock controller) peripheral clocks.
+  The first cell indicates which PRCC block the consumer
+  wants to use, possible values are 1, 2, 3, 5, 6. The second
+  cell indicates which clock inside the PRCC block it wants,
+  possible values are 0 thru 31.
+- prcc-kernel-clock: a subnode with two clock cells for
+  PRCC (programmable reset- and clock controller) kernel clocks
+  The first cell indicates which PRCC block the consumer
+  wants to use, possible values are 1, 2, 3, 5, 6. The second
+  cell indicates which clock inside the PRCC block it wants,
+  possible values are 0 thru 31.
+- rtc32k-clock: a subnode with zero clock cells for the 32kHz
+  RTC clock.
+- smp-twd-clock: a subnode for the ARM SMP Timer Watchdog cluster
+  with zero clock cells.
+
+Example:
+
+clocks {
+	compatible = "stericsson,u8500-clks";
+	/*
+	 * Registers for the CLKRST block on peripheral
+	 * groups 1, 2, 3, 5, 6,
+	 */
+	reg = <0x8012f000 0x1000>, <0x8011f000 0x1000>,
+	    <0x8000f000 0x1000>, <0xa03ff000 0x1000>,
+	    <0xa03cf000 0x1000>;
+
+	prcmu_clk: prcmu-clock {
+		#clock-cells = <1>;
+	};
+
+	prcc_pclk: prcc-periph-clock {
+		#clock-cells = <2>;
+	};
+
+	prcc_kclk: prcc-kernel-clock {
+		#clock-cells = <2>;
+	};
+
+	rtc_clk: rtc32k-clock {
+		#clock-cells = <0>;
+	};
+
+	smp_twd_clk: smp-twd-clock {
+		#clock-cells = <0>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/cpufreq/tegra124-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/tegra124-cpufreq.txt
new file mode 100644
index 0000000..b1669fb
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/tegra124-cpufreq.txt
@@ -0,0 +1,44 @@
+Tegra124 CPU frequency scaling driver bindings
+----------------------------------------------
+
+Both required and optional properties listed below must be defined
+under node /cpus/cpu@0.
+
+Required properties:
+- clocks: Must contain an entry for each entry in clock-names.
+  See ../clocks/clock-bindings.txt for details.
+- clock-names: Must include the following entries:
+  - cpu_g: Clock mux for the fast CPU cluster.
+  - cpu_lp: Clock mux for the low-power CPU cluster.
+  - pll_x: Fast PLL clocksource.
+  - pll_p: Auxiliary PLL used during fast PLL rate changes.
+  - dfll: Fast DFLL clocksource that also automatically scales CPU voltage.
+- vdd-cpu-supply: Regulator for CPU voltage
+
+Optional properties:
+- clock-latency: Specify the possible maximum transition latency for clock,
+  in unit of nanoseconds.
+
+Example:
+--------
+cpus {
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	cpu@0 {
+		device_type = "cpu";
+		compatible = "arm,cortex-a15";
+		reg = <0>;
+
+		clocks = <&tegra_car TEGRA124_CLK_CCLK_G>,
+			 <&tegra_car TEGRA124_CLK_CCLK_LP>,
+			 <&tegra_car TEGRA124_CLK_PLL_X>,
+			 <&tegra_car TEGRA124_CLK_PLL_P>,
+			 <&dfll>;
+		clock-names = "cpu_g", "cpu_lp", "pll_x", "pll_p", "dfll";
+		clock-latency = <300000>;
+		vdd-cpu-supply: <&vdd_cpu>;
+	};
+
+	<...>
+};
diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
index e402277..6831d02 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec4.txt
@@ -106,6 +106,18 @@
           to the interrupt parent to which the child domain
           is being mapped.
 
+   - clocks
+      Usage: required if SEC 4.0 requires explicit enablement of clocks
+      Value type: <prop_encoded-array>
+      Definition:  A list of phandle and clock specifier pairs describing
+          the clocks required for enabling and disabling SEC 4.0.
+
+   - clock-names
+      Usage: required if SEC 4.0 requires explicit enablement of clocks
+      Value type: <string>
+      Definition: A list of clock name strings in the same order as the
+          clocks property.
+
    Note: All other standard properties (see the ePAPR) are allowed
    but are optional.
 
@@ -120,6 +132,11 @@
 		ranges = <0 0x300000 0x10000>;
 		interrupt-parent = <&mpic>;
 		interrupts = <92 2>;
+		clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
+			 <&clks IMX6QDL_CLK_CAAM_ACLK>,
+			 <&clks IMX6QDL_CLK_CAAM_IPG>,
+			 <&clks IMX6QDL_CLK_EIM_SLOW>;
+		clock-names = "mem", "aclk", "ipg", "emi_slow";
 	};
 
 =====================================================================
@@ -288,12 +305,13 @@
     Node defines address range and the associated
     interrupt for the SNVS function.  This function
     monitors security state information & reports
-    security violations.
+    security violations. This also included rtc,
+    system power off and ON/OFF key.
 
   - compatible
       Usage: required
       Value type: <string>
-      Definition: Must include "fsl,sec-v4.0-mon".
+      Definition: Must include "fsl,sec-v4.0-mon" and "syscon".
 
   - reg
       Usage: required
@@ -324,7 +342,7 @@
            the child address, parent address, & length.
 
    - interrupts
-      Usage: required
+      Usage: optional
       Value type: <prop_encoded-array>
       Definition:  Specifies the interrupts generated by this
            device.  The value of the interrupts property
@@ -341,7 +359,7 @@
 
 EXAMPLE
 	sec_mon@314000 {
-		compatible = "fsl,sec-v4.0-mon";
+		compatible = "fsl,sec-v4.0-mon", "syscon";
 		reg = <0x314000 0x1000>;
 		ranges = <0 0x314000 0x1000>;
 		interrupt-parent = <&mpic>;
@@ -358,16 +376,72 @@
       Value type: <string>
       Definition: Must include "fsl,sec-v4.0-mon-rtc-lp".
 
-  - reg
+  - interrupts
       Usage: required
-      Value type: <prop-encoded-array>
-      Definition: A standard property.  Specifies the physical
-          address and length of the SNVS LP configuration registers.
+      Value type: <prop_encoded-array>
+      Definition: Specifies the interrupts generated by this
+	   device.  The value of the interrupts property
+	   consists of one interrupt specifier. The format
+	   of the specifier is defined by the binding document
+	   describing the node's interrupt parent.
+
+ - regmap
+	Usage: required
+	Value type: <phandle>
+	Definition: this is phandle to the register map node.
+
+ - offset
+	Usage: option
+	value type: <u32>
+	Definition: LP register offset. default it is 0x34.
 
 EXAMPLE
-	sec_mon_rtc_lp@314000 {
+	sec_mon_rtc_lp@1 {
 		compatible = "fsl,sec-v4.0-mon-rtc-lp";
-		reg = <0x34 0x58>;
+		interrupts = <93 2>;
+		regmap = <&snvs>;
+		offset = <0x34>;
+	};
+
+=====================================================================
+System ON/OFF key driver
+
+  The snvs-pwrkey is designed to enable POWER key function which controlled
+  by SNVS ONOFF, the driver can report the status of POWER key and wakeup
+  system if pressed after system suspend.
+
+  - compatible:
+      Usage: required
+      Value type: <string>
+      Definition: Mush include "fsl,sec-v4.0-pwrkey".
+
+  - interrupts:
+      Usage: required
+      Value type: <prop_encoded-array>
+      Definition: The SNVS ON/OFF interrupt number to the CPU(s).
+
+  - linux,keycode:
+      Usage: option
+      Value type: <int>
+      Definition: Keycode to emit, KEY_POWER by default.
+
+  - wakeup-source:
+      Usage: option
+      Value type: <boo>
+      Definition: Button can wake-up the system.
+
+ - regmap:
+      Usage: required:
+      Value type: <phandle>
+      Definition: this is phandle to the register map node.
+
+EXAMPLE:
+	snvs-pwrkey@0x020cc000 {
+		compatible = "fsl,sec-v4.0-pwrkey";
+		regmap = <&snvs>;
+		interrupts = <0 4 0x4>
+	        linux,keycode = <116>; /* KEY_POWER */
+		wakeup;
 	};
 
 =====================================================================
@@ -443,12 +517,20 @@
 		compatible = "fsl,sec-v4.0-mon";
 		reg = <0x314000 0x1000>;
 		ranges = <0 0x314000 0x1000>;
-		interrupt-parent = <&mpic>;
-		interrupts = <93 2>;
 
 		sec_mon_rtc_lp@34 {
 			compatible = "fsl,sec-v4.0-mon-rtc-lp";
-			reg = <0x34 0x58>;
+			regmap = <&sec_mon>;
+			offset = <0x34>;
+			interrupts = <93 2>;
+		};
+
+		snvs-pwrkey@0x020cc000 {
+			compatible = "fsl,sec-v4.0-pwrkey";
+			regmap = <&sec_mon>;
+			interrupts = <0 4 0x4>;
+			linux,keycode = <116>; /* KEY_POWER */
+			wakeup;
 		};
 	};
 
diff --git a/Documentation/devicetree/bindings/crypto/sun4i-ss.txt b/Documentation/devicetree/bindings/crypto/sun4i-ss.txt
new file mode 100644
index 0000000..5d38e9b
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/sun4i-ss.txt
@@ -0,0 +1,23 @@
+* Allwinner Security System found on A20 SoC
+
+Required properties:
+- compatible : Should be "allwinner,sun4i-a10-crypto".
+- reg: Should contain the Security System register location and length.
+- interrupts: Should contain the IRQ line for the Security System.
+- clocks : List of clock specifiers, corresponding to ahb and ss.
+- clock-names : Name of the functional clock, should be
+	* "ahb" : AHB gating clock
+	* "mod" : SS controller clock
+
+Optional properties:
+ - resets : phandle + reset specifier pair
+ - reset-names : must contain "ahb"
+
+Example:
+	crypto: crypto-engine@01c15000 {
+		compatible = "allwinner,sun4i-a10-crypto";
+		reg = <0x01c15000 0x1000>;
+		interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&ahb_gates 5>, <&ss_clk>;
+		clock-names = "ahb", "mod";
+	};
diff --git a/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
index b54bf3a..3e36c1d 100644
--- a/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
+++ b/Documentation/devicetree/bindings/devfreq/event/exynos-ppmu.txt
@@ -11,15 +11,14 @@
 derterming the current state of each IP.
 
 Required properties:
-- compatible: Should be "samsung,exynos-ppmu".
+- compatible: Should be "samsung,exynos-ppmu" or "samsung,exynos-ppmu-v2.
 - reg: physical base address of each PPMU and length of memory mapped region.
 
 Optional properties:
 - clock-names : the name of clock used by the PPMU, "ppmu"
 - clocks : phandles for clock specified in "clock-names" property
-- #clock-cells: should be 1.
 
-Example1 : PPMU nodes in exynos3250.dtsi are listed below.
+Example1 : PPMUv1 nodes in exynos3250.dtsi are listed below.
 
 		ppmu_dmc0: ppmu_dmc0@106a0000 {
 			compatible = "samsung,exynos-ppmu";
@@ -108,3 +107,41 @@
 			};
 		};
 	};
+
+Example3 : PPMUv2 nodes in exynos5433.dtsi are listed below.
+
+		ppmu_d0_cpu: ppmu_d0_cpu@10480000 {
+			compatible = "samsung,exynos-ppmu-v2";
+			reg = <0x10480000 0x2000>;
+			status = "disabled";
+		};
+
+		ppmu_d0_general: ppmu_d0_general@10490000 {
+			compatible = "samsung,exynos-ppmu-v2";
+			reg = <0x10490000 0x2000>;
+			status = "disabled";
+		};
+
+		ppmu_d0_rt: ppmu_d0_rt@104a0000 {
+			compatible = "samsung,exynos-ppmu-v2";
+			reg = <0x104a0000 0x2000>;
+			status = "disabled";
+		};
+
+		ppmu_d1_cpu: ppmu_d1_cpu@104b0000 {
+			compatible = "samsung,exynos-ppmu-v2";
+			reg = <0x104b0000 0x2000>;
+			status = "disabled";
+		};
+
+		ppmu_d1_general: ppmu_d1_general@104c0000 {
+			compatible = "samsung,exynos-ppmu-v2";
+			reg = <0x104c0000 0x2000>;
+			status = "disabled";
+		};
+
+		ppmu_d1_rt: ppmu_d1_rt@104d0000 {
+			compatible = "samsung,exynos-ppmu-v2";
+			reg = <0x104d0000 0x2000>;
+			status = "disabled";
+		};
diff --git a/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt b/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt
new file mode 100644
index 0000000..47cb1d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/adi,axi-dmac.txt
@@ -0,0 +1,61 @@
+Analog Device AXI-DMAC DMA controller
+
+Required properties:
+ - compatible: Must be "adi,axi-dmac-1.00.a".
+ - reg: Specification for the controllers memory mapped register map.
+ - interrupts: Specification for the controllers interrupt.
+ - clocks: Phandle and specifier to the controllers AXI interface clock
+ - #dma-cells: Must be 1.
+
+Required sub-nodes:
+ - adi,channels: This sub-node must contain a sub-node for each DMA channel. For
+   the channel sub-nodes the following bindings apply. They must match the
+   configuration options of the peripheral as it was instantiated.
+
+Required properties for adi,channels sub-node:
+ - #size-cells: Must be 0
+ - #address-cells: Must be 1
+
+Required channel sub-node properties:
+ - reg: Which channel this node refers to.
+ - adi,length-width: Width of the DMA transfer length register.
+ - adi,source-bus-width,
+   adi,destination-bus-width: Width of the source or destination bus in bits.
+ - adi,source-bus-type,
+   adi,destination-bus-type: Type of the source or destination bus. Must be one
+   of the following:
+	0 (AXI_DMAC_TYPE_AXI_MM): Memory mapped AXI interface
+	1 (AXI_DMAC_TYPE_AXI_STREAM): Streaming AXI interface
+	2 (AXI_DMAC_TYPE_AXI_FIFO): FIFO interface
+
+Optional channel properties:
+ - adi,cyclic: Must be set if the channel supports hardware cyclic DMA
+   transfers.
+ - adi,2d: Must be set if the channel supports hardware 2D DMA transfers.
+
+DMA clients connected to the AXI-DMAC DMA controller must use the format
+described in the dma.txt file using a one-cell specifier. The value of the
+specifier refers to the DMA channel index.
+
+Example:
+
+dma: dma@7c420000 {
+	compatible = "adi,axi-dmac-1.00.a";
+	reg = <0x7c420000 0x10000>;
+	interrupts = <0 57 0>;
+	clocks = <&clkc 16>;
+	#dma-cells = <1>;
+
+	adi,channels {
+		#size-cells = <0>;
+		#address-cells = <1>;
+
+		dma-channel@0 {
+			reg = <0>;
+			adi,source-bus-width = <32>;
+			adi,source-bus-type = <ADI_AXI_DMAC_TYPE_MM_AXI>;
+			adi,destination-bus-width = <64>;
+			adi,destination-bus-type = <ADI_AXI_DMAC_TYPE_FIFO>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/dma/arm-pl08x.txt b/Documentation/devicetree/bindings/dma/arm-pl08x.txt
new file mode 100644
index 0000000..8a0097a
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/arm-pl08x.txt
@@ -0,0 +1,54 @@
+* ARM PrimeCells PL080 and PL081 and derivatives DMA controller
+
+Required properties:
+- compatible: "arm,pl080", "arm,primecell";
+	      "arm,pl081", "arm,primecell";
+- reg: Address range of the PL08x registers
+- interrupt: The PL08x interrupt number
+- clocks: The clock running the IP core clock
+- clock-names: Must contain "apb_pclk"
+- lli-bus-interface-ahb1: if AHB master 1 is eligible for fetching LLIs
+- lli-bus-interface-ahb2: if AHB master 2 is eligible for fetching LLIs
+- mem-bus-interface-ahb1: if AHB master 1 is eligible for fetching memory contents
+- mem-bus-interface-ahb2: if AHB master 2 is eligible for fetching memory contents
+- #dma-cells: must be <2>. First cell should contain the DMA request,
+              second cell should contain either 1 or 2 depending on
+              which AHB master that is used.
+
+Optional properties:
+- dma-channels: contains the total number of DMA channels supported by the DMAC
+- dma-requests: contains the total number of DMA requests supported by the DMAC
+- memcpy-burst-size: the size of the bursts for memcpy: 1, 4, 8, 16, 32
+  64, 128 or 256 bytes are legal values
+- memcpy-bus-width: the bus width used for memcpy: 8, 16 or 32 are legal
+  values
+
+Clients
+Required properties:
+- dmas: List of DMA controller phandle, request channel and AHB master id
+- dma-names: Names of the aforementioned requested channels
+
+Example:
+
+dmac0: dma-controller@10130000 {
+	compatible = "arm,pl080", "arm,primecell";
+	reg = <0x10130000 0x1000>;
+	interrupt-parent = <&vica>;
+	interrupts = <15>;
+	clocks = <&hclkdma0>;
+	clock-names = "apb_pclk";
+	lli-bus-interface-ahb1;
+	lli-bus-interface-ahb2;
+	mem-bus-interface-ahb2;
+	memcpy-burst-size = <256>;
+	memcpy-bus-width = <32>;
+	#dma-cells = <2>;
+};
+
+device@40008000 {
+	...
+	dmas = <&dmac0 0 2
+		&dmac0 1 2>;
+	dma-names = "tx", "rx";
+	...
+};
diff --git a/Documentation/devicetree/bindings/dma/lpc1850-dmamux.txt b/Documentation/devicetree/bindings/dma/lpc1850-dmamux.txt
new file mode 100644
index 0000000..87740ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/lpc1850-dmamux.txt
@@ -0,0 +1,54 @@
+NXP LPC18xx/43xx DMA MUX (DMA request router)
+
+Required properties:
+- compatible:	"nxp,lpc1850-dmamux"
+- reg:		Memory map for accessing module
+- #dma-cells:	Should be set to <3>.
+		* 1st cell contain the master dma request signal
+		* 2nd cell contain the mux value (0-3) for the peripheral
+		* 3rd cell contain either 1 or 2 depending on the AHB
+		  master used.
+- dma-requests:	Number of DMA requests for the mux
+- dma-masters:	phandle pointing to the DMA controller
+
+The DMA controller node need to have the following poroperties:
+- dma-requests:	Number of DMA requests the controller can handle
+
+Example:
+
+dmac: dma@40002000 {
+	compatible = "nxp,lpc1850-gpdma", "arm,pl080", "arm,primecell";
+	arm,primecell-periphid = <0x00041080>;
+	reg = <0x40002000 0x1000>;
+	interrupts = <2>;
+	clocks = <&ccu1 CLK_CPU_DMA>;
+	clock-names = "apb_pclk";
+	#dma-cells = <2>;
+	dma-channels = <8>;
+	dma-requests = <16>;
+	lli-bus-interface-ahb1;
+	lli-bus-interface-ahb2;
+	mem-bus-interface-ahb1;
+	mem-bus-interface-ahb2;
+	memcpy-burst-size = <256>;
+	memcpy-bus-width = <32>;
+};
+
+dmamux: dma-mux {
+	compatible = "nxp,lpc1850-dmamux";
+	#dma-cells = <3>;
+	dma-requests = <64>;
+	dma-masters = <&dmac>;
+};
+
+uart0: serial@40081000 {
+	compatible = "nxp,lpc1850-uart", "ns16550a";
+	reg = <0x40081000 0x1000>;
+	reg-shift = <2>;
+	interrupts = <24>;
+	clocks = <&ccu2 CLK_APB0_UART0>, <&ccu1 CLK_CPU_UART0>;
+	clock-names = "uartclk", "reg";
+	dmas = <&dmamux 1 1 2
+		&dmamux 2 1 2>;
+	dma-names = "tx", "rx";
+};
diff --git a/Documentation/devicetree/bindings/dma/mv-xor.txt b/Documentation/devicetree/bindings/dma/mv-xor.txt
index cc29c35..276ef81 100644
--- a/Documentation/devicetree/bindings/dma/mv-xor.txt
+++ b/Documentation/devicetree/bindings/dma/mv-xor.txt
@@ -12,10 +12,13 @@
 properties:
 - interrupts: interrupt of the XOR channel
 
-And the following optional properties:
+The sub-nodes used to contain one or several of the following
+properties, but they are now deprecated:
 - dmacap,memcpy to indicate that the XOR channel is capable of memcpy operations
 - dmacap,memset to indicate that the XOR channel is capable of memset operations
 - dmacap,xor to indicate that the XOR channel is capable of xor operations
+- dmacap,interrupt to indicate that the XOR channel is capable of
+  generating interrupts
 
 Example:
 
@@ -28,13 +31,8 @@
 
 	xor00 {
 	      interrupts = <51>;
-	      dmacap,memcpy;
-	      dmacap,xor;
 	};
 	xor01 {
 	      interrupts = <52>;
-	      dmacap,memcpy;
-	      dmacap,xor;
-	      dmacap,memset;
 	};
 };
diff --git a/Documentation/devicetree/bindings/dma/sun4i-dma.txt b/Documentation/devicetree/bindings/dma/sun4i-dma.txt
new file mode 100644
index 0000000..f1634a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/sun4i-dma.txt
@@ -0,0 +1,46 @@
+Allwinner A10 DMA Controller
+
+This driver follows the generic DMA bindings defined in dma.txt.
+
+Required properties:
+
+- compatible:	Must be "allwinner,sun4i-a10-dma"
+- reg:		Should contain the registers base address and length
+- interrupts:	Should contain a reference to the interrupt used by this device
+- clocks:	Should contain a reference to the parent AHB clock
+- #dma-cells :	Should be 2, first cell denoting normal or dedicated dma,
+		second cell holding the request line number.
+
+Example:
+	dma: dma-controller@01c02000 {
+		compatible = "allwinner,sun4i-a10-dma";
+		reg = <0x01c02000 0x1000>;
+		interrupts = <27>;
+		clocks = <&ahb_gates 6>;
+		#dma-cells = <2>;
+	};
+
+Clients:
+
+DMA clients connected to the Allwinner A10 DMA controller must use the
+format described in the dma.txt file, using a three-cell specifier for
+each channel: a phandle plus two integer cells.
+The three cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Whether it is using normal (0) or dedicated (1) channels
+3. The port ID as specified in the datasheet
+
+Example:
+	spi2: spi@01c17000 {
+		compatible = "allwinner,sun4i-a10-spi";
+		reg = <0x01c17000 0x1000>;
+		interrupts = <0 12 4>;
+		clocks = <&ahb_gates 22>, <&spi2_clk>;
+		clock-names = "ahb", "mod";
+		dmas = <&dma 1 29>, <&dma 1 28>;
+		dma-names = "rx", "tx";
+		status = "disabled";
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/dma/zxdma.txt b/Documentation/devicetree/bindings/dma/zxdma.txt
new file mode 100644
index 0000000..3207ceb
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/zxdma.txt
@@ -0,0 +1,38 @@
+* ZTE ZX296702 DMA controller
+
+Required properties:
+- compatible: Should be "zte,zx296702-dma"
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain one interrupt shared by all channel
+- #dma-cells: see dma.txt, should be 1, para number
+- dma-channels: physical channels supported
+- dma-requests: virtual channels supported, each virtual channel
+		have specific request line
+- clocks: clock required
+
+Example:
+
+Controller:
+	dma: dma-controller@0x09c00000{
+		compatible = "zte,zx296702-dma";
+		reg = <0x09c00000 0x1000>;
+		clocks = <&topclk ZX296702_DMA_ACLK>;
+		interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
+		#dma-cells = <1>;
+		dma-channels = <24>;
+		dma-requests = <24>;
+	};
+
+Client:
+Use specific request line passing from dmax
+For example, spdif0 tx channel request line is 4
+	spdif0: spdif0@0b004000 {
+		#sound-dai-cells = <0>;
+		compatible = "zte,zx296702-spdif";
+		reg = <0x0b004000 0x1000>;
+		clocks = <&lsp0clk ZX296702_SPDIF0_DIV>;
+		clock-names = "tx";
+		interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+		dmas = <&dma 4>;
+		dma-names = "tx";
+	}
diff --git a/Documentation/devicetree/bindings/extcon/extcon-palmas.txt b/Documentation/devicetree/bindings/extcon/extcon-palmas.txt
index 45414bb..f61d5af 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-palmas.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-palmas.txt
@@ -10,8 +10,11 @@
 
 Optional Properties:
  - ti,wakeup : To enable the wakeup comparator in probe
- - ti,enable-id-detection: Perform ID detection.
+ - ti,enable-id-detection: Perform ID detection. If id-gpio is specified
+		it performs id-detection using GPIO else using OTG core.
  - ti,enable-vbus-detection: Perform VBUS detection.
+ - id-gpio: gpio for GPIO ID detection. See gpio binding.
+ - debounce-delay-ms: debounce delay for GPIO ID pin in milliseconds.
 
 palmas-usb {
        compatible = "ti,twl6035-usb", "ti,palmas-usb";
diff --git a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
index 435f1bc..b405b44 100644
--- a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
@@ -33,6 +33,13 @@
 - interrupt-parent:
     phandle of the parent interrupt controller
 
+- interrupts-extended:
+    Alternate form of specifying interrupts and parents that allows for
+    multiple parents.  This takes precedence over 'interrupts' and
+    'interrupt-parent'.  Wakeup-capable GPIO controllers often route their
+    wakeup interrupt lines through a different interrupt controller than the
+    primary interrupt line, making this property necessary.
+
 - #interrupt-cells:
     Should be <2>.  The first cell is the GPIO number, the second should specify
     flags.  The following subset of flags is supported:
@@ -47,19 +54,33 @@
 - interrupt-controller:
     Marks the device node as an interrupt controller
 
-- interrupt-names:
-    The name of the IRQ resource used by this controller
+- wakeup-source:
+    GPIOs for this controller can be used as a wakeup source
 
 Example:
 	upg_gio: gpio@f040a700 {
-		#gpio-cells = <0x2>;
-		#interrupt-cells = <0x2>;
+		#gpio-cells = <2>;
+		#interrupt-cells = <2>;
 		compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio";
 		gpio-controller;
 		interrupt-controller;
 		reg = <0xf040a700 0x80>;
-		interrupt-parent = <0xf>;
+		interrupt-parent = <&irq0_intc>;
 		interrupts = <0x6>;
-		interrupt-names = "upg_gio";
-		brcm,gpio-bank-widths = <0x20 0x20 0x20 0x18>;
+		brcm,gpio-bank-widths = <32 32 32 24>;
+	};
+
+	upg_gio_aon: gpio@f04172c0 {
+		#gpio-cells = <2>;
+		#interrupt-cells = <2>;
+		compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio";
+		gpio-controller;
+		interrupt-controller;
+		reg = <0xf04172c0 0x40>;
+		interrupt-parent = <&irq0_aon_intc>;
+		interrupts = <0x6>;
+		interrupts-extended = <&irq0_aon_intc 0x6>,
+			<&aon_pm_l2_intc 0x5>;
+		wakeup-source;
+		brcm,gpio-bank-widths = <18 4>;
 	};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-etraxfs.txt b/Documentation/devicetree/bindings/gpio/gpio-etraxfs.txt
index abf4db7..170194a 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-etraxfs.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-etraxfs.txt
@@ -2,8 +2,9 @@
 
 Required properties:
 
-- compatible:
+- compatible: one of:
   - "axis,etraxfs-gio"
+  - "axis,artpec3-gio"
 - reg: Physical base address and length of the controller's registers.
 - #gpio-cells: Should be 3
   - The first cell is the gpio offset number.
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
new file mode 100644
index 0000000..805ddcd
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-mpc8xxx.txt
@@ -0,0 +1,22 @@
+* Freescale MPC512x/MPC8xxx GPIO controller
+
+Required properties:
+- compatible : Should be "fsl,<soc>-gpio"
+  The following <soc>s are known to be supported:
+    mpc5121, mpc5125, mpc8349, mpc8572, mpc8610, pq3, qoriq
+- reg : Address and length of the register set for the device
+- interrupts : Should be the port interrupt shared by all 32 pins.
+- #gpio-cells : Should be two.  The first cell is the pin number and
+  the second cell is used to specify the gpio polarity:
+      0 = active high
+      1 = active low
+
+Example:
+
+gpio0: gpio@1100 {
+	compatible = "fsl,mpc5125-gpio";
+	#gpio-cells = <2>;
+	reg = <0x1100 0x080>;
+	interrupts = <78 0x8>;
+	status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
index 38fb86f..f60e2f4 100644
--- a/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+++ b/Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
@@ -9,6 +9,7 @@
     - "renesas,gpio-r8a7791": for R8A7791 (R-Car M2-W) compatible GPIO controller.
     - "renesas,gpio-r8a7793": for R8A7793 (R-Car M2-N) compatible GPIO controller.
     - "renesas,gpio-r8a7794": for R8A7794 (R-Car E2) compatible GPIO controller.
+    - "renesas,gpio-r8a7795": for R8A7795 (R-Car H3) compatible GPIO controller.
     - "renesas,gpio-rcar": for generic R-Car GPIO controller.
 
   - reg: Base address and length of each memory resource used by the GPIO
diff --git a/Documentation/devicetree/bindings/gpio/zx296702-gpio.txt b/Documentation/devicetree/bindings/gpio/zx296702-gpio.txt
new file mode 100644
index 0000000..0dab156
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/zx296702-gpio.txt
@@ -0,0 +1,24 @@
+ZTE ZX296702 GPIO controller
+
+Required properties:
+- compatible : "zte,zx296702-gpio"
+- #gpio-cells : Should be two. The first cell is the pin number and the
+  second cell is used to specify optional parameters:
+  - bit 0 specifies polarity (0 for normal, 1 for inverted)
+- gpio-controller : Marks the device node as a GPIO controller.
+- interrupts : Interrupt mapping for GPIO IRQ.
+- gpio-ranges : Interaction with the PINCTRL subsystem.
+
+gpio1: gpio@b008040 {
+	compatible = "zte,zx296702-gpio";
+	reg = <0xb008040 0x40>;
+	gpio-controller;
+	#gpio-cells = <2>;
+	gpio-ranges = < &pmx0 0 54 2 &pmx0 2 59 14>;
+	interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+	interrupt-parent = <&intc>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+	clock-names = "gpio_pclk";
+	clocks = <&lsp0clk ZX296702_GPIO_CLK>;
+};
diff --git a/Documentation/devicetree/bindings/hwmon/lm70.txt b/Documentation/devicetree/bindings/hwmon/lm70.txt
new file mode 100644
index 0000000..e7fd921
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/lm70.txt
@@ -0,0 +1,21 @@
+* LM70/TMP121/LM71/LM74 thermometer.
+
+Required properties:
+- compatible: one of
+		"ti,lm70"
+		"ti,tmp121"
+		"ti,lm71"
+		"ti,lm74"
+
+See Documentation/devicetree/bindings/spi/spi-bus.txt for more required and
+optional properties.
+
+Example:
+
+spi_master {
+	temperature-sensor@0 {
+		compatible = "ti,lm70";
+		reg = <0>;
+		spi-max-frequency = <1000000>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/hwmon/ltc2978.txt b/Documentation/devicetree/bindings/hwmon/ltc2978.txt
index ed2f09d..a7afbf6 100644
--- a/Documentation/devicetree/bindings/hwmon/ltc2978.txt
+++ b/Documentation/devicetree/bindings/hwmon/ltc2978.txt
@@ -3,10 +3,16 @@
 Required properties:
 - compatible: should contain one of:
   * "lltc,ltc2974"
+  * "lltc,ltc2975"
   * "lltc,ltc2977"
   * "lltc,ltc2978"
+  * "lltc,ltc2980"
   * "lltc,ltc3880"
+  * "lltc,ltc3882"
   * "lltc,ltc3883"
+  * "lltc,ltc3886"
+  * "lltc,ltc3887"
+  * "lltc,ltm2987"
   * "lltc,ltm4676"
 - reg: I2C slave address
 
@@ -17,10 +23,10 @@
   standard binding for regulators; see regulator.txt.
 
 Valid names of regulators depend on number of supplies supported per device:
-  * ltc2974 : vout0 - vout3
-  * ltc2977 : vout0 - vout7
+  * ltc2974, ltc2975 : vout0 - vout3
+  * ltc2977, ltc2980, ltm2987 : vout0 - vout7
   * ltc2978 : vout0 - vout7
-  * ltc3880 : vout0 - vout1
+  * ltc3880, ltc3882, ltc3886 : vout0 - vout1
   * ltc3883 : vout0
   * ltm4676 : vout0 - vout1
 
diff --git a/Documentation/devicetree/bindings/iio/adc/mcp320x.txt b/Documentation/devicetree/bindings/iio/adc/mcp320x.txt
index b851843..2a1f3af 100644
--- a/Documentation/devicetree/bindings/iio/adc/mcp320x.txt
+++ b/Documentation/devicetree/bindings/iio/adc/mcp320x.txt
@@ -18,6 +18,7 @@
 				"mcp3202"
 				"mcp3204"
 				"mcp3208"
+				"mcp3301"
 
 
 Examples:
diff --git a/Documentation/devicetree/bindings/iio/adc/vf610-adc.txt b/Documentation/devicetree/bindings/iio/adc/vf610-adc.txt
index 3eb40e2..1aad051 100644
--- a/Documentation/devicetree/bindings/iio/adc/vf610-adc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/vf610-adc.txt
@@ -17,6 +17,11 @@
   - Frequency in normal mode (ADLPC=0, ADHSC=0)
   - Frequency in high-speed mode (ADLPC=0, ADHSC=1)
   - Frequency in low-power mode (ADLPC=1, ADHSC=0)
+- min-sample-time: Minimum sampling time in nanoseconds. This value has
+  to be chosen according to the conversion mode and the connected analog
+  source resistance (R_as) and capacitance (C_as). Refer the datasheet's
+  operating requirements. A safe default across a wide range of R_as and
+  C_as as well as conversion modes is 1000ns.
 
 Example:
 adc0: adc@4003b000 {
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/mmc35240.txt b/Documentation/devicetree/bindings/iio/magnetometer/mmc35240.txt
new file mode 100644
index 0000000..a01235c
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/magnetometer/mmc35240.txt
@@ -0,0 +1,13 @@
+* MEMSIC MMC35240 magnetometer sensor
+
+Required properties:
+
+  - compatible : should be "memsic,mmc35240"
+  - reg : the I2C address of the magnetometer
+
+Example:
+
+mmc35240@30 {
+        compatible = "memsic,mmc35240";
+        reg = <0x30>;
+};
diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt
index 8a6be3b..d3ccdb1 100644
--- a/Documentation/devicetree/bindings/iio/st-sensors.txt
+++ b/Documentation/devicetree/bindings/iio/st-sensors.txt
@@ -35,6 +35,7 @@
 - st,lsm303dl-accel
 - st,lsm303dlm-accel
 - st,lsm330-accel
+- st,lsm303agr-accel
 
 Gyroscopes:
 - st,l3g4200d-gyro
@@ -46,6 +47,7 @@
 - st,lsm330-gyro
 
 Magnetometers:
+- st,lsm303agr-magn
 - st,lsm303dlh-magn
 - st,lsm303dlhc-magn
 - st,lsm303dlm-magn
diff --git a/Documentation/devicetree/bindings/input/ads7846.txt b/Documentation/devicetree/bindings/input/ads7846.txt
index 5f7619c..df8b127 100644
--- a/Documentation/devicetree/bindings/input/ads7846.txt
+++ b/Documentation/devicetree/bindings/input/ads7846.txt
@@ -64,7 +64,7 @@
 					pendown-gpio (u32).
 	pendown-gpio			GPIO handle describing the pin the !PENIRQ
 					line is connected to.
-	linux,wakeup			use any event on touchscreen as wakeup event.
+	wakeup-source			use any event on touchscreen as wakeup event.
 
 
 Example for a TSC2046 chip connected to an McSPI controller of an OMAP SoC::
diff --git a/Documentation/devicetree/bindings/input/cap11xx.txt b/Documentation/devicetree/bindings/input/cap11xx.txt
index 7d0a300..8c67a0b 100644
--- a/Documentation/devicetree/bindings/input/cap11xx.txt
+++ b/Documentation/devicetree/bindings/input/cap11xx.txt
@@ -55,5 +55,24 @@
 				 <105>,		/* KEY_LEFT */
 				 <109>,		/* KEY_PAGEDOWN */
 				 <104>;		/* KEY_PAGEUP */
+
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		usr@0 {
+			label = "cap11xx:green:usr0";
+			reg = <0>;
+		};
+
+		usr@1 {
+			label = "cap11xx:green:usr1";
+			reg = <1>;
+		};
+
+		alive@2 {
+			label = "cap11xx:green:alive";
+			reg = <2>;
+			linux,default_trigger = "heartbeat";
+		};
 	};
 }
diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
new file mode 100644
index 0000000..635a3b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt
@@ -0,0 +1,44 @@
+Cypress I2C Touchpad
+
+Required properties:
+- compatible: must be "cypress,cyapa".
+- reg: I2C address of the chip.
+- interrupt-parent: a phandle for the interrupt controller (see interrupt
+	binding[0]).
+- interrupts: interrupt to which the chip is connected (see interrupt
+	binding[0]).
+
+Optional properties:
+- wakeup-source: touchpad can be used as a wakeup source.
+- pinctrl-names: should be "default" (see pinctrl binding [1]).
+- pinctrl-0: a phandle pointing to the pin settings for the device (see
+	pinctrl binding [1]).
+- vcc-supply: a phandle for the regulator supplying 3.3V power.
+
+[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+[1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+
+Example:
+	&i2c0 {
+		/* ... */
+
+		/* Cypress Gen3 touchpad */
+		touchpad@67 {
+			compatible = "cypress,cyapa";
+			reg = <0x24>;
+			interrupt-parent = <&gpio>;
+			interrupts = <2 IRQ_TYPE_EDGE_FALLING>;	/* GPIO 2 */
+			wakeup-source;
+		};
+
+		/* Cypress Gen5 and later touchpad */
+		touchpad@24 {
+			compatible = "cypress,cyapa";
+			reg = <0x24>;
+			interrupt-parent = <&gpio>;
+			interrupts = <2 IRQ_TYPE_EDGE_FALLING>;	/* GPIO 2 */
+			wakeup-source;
+		};
+
+		/* ... */
+	};
diff --git a/Documentation/devicetree/bindings/input/elants_i2c.txt b/Documentation/devicetree/bindings/input/elants_i2c.txt
index a765232..8a71038 100644
--- a/Documentation/devicetree/bindings/input/elants_i2c.txt
+++ b/Documentation/devicetree/bindings/input/elants_i2c.txt
@@ -13,6 +13,9 @@
 - pinctrl-names: should be "default" (see pinctrl binding [1]).
 - pinctrl-0: a phandle pointing to the pin settings for the device (see
   pinctrl binding [1]).
+- reset-gpios: reset gpio the chip is connected to.
+- vcc33-supply: a phandle for the regulator supplying 3.3V power.
+- vccio-supply: a phandle for the regulator supplying IO power.
 
 [0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
 [1]: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
diff --git a/Documentation/devicetree/bindings/input/gpio-keys-polled.txt b/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
index 313abef..5b91f5a 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys-polled.txt
@@ -20,7 +20,7 @@
 	  If not specified defaults to <1> == EV_KEY.
 	- debounce-interval: Debouncing interval time in milliseconds.
 	  If not specified defaults to 5.
-	- gpio-key,wakeup: Boolean, button can wake-up the system.
+	- wakeup-source: Boolean, button can wake-up the system.
 
 Example nodes:
 
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index 44b7057..072bf75 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -23,7 +23,7 @@
 	  If not specified defaults to <1> == EV_KEY.
 	- debounce-interval: Debouncing interval time in milliseconds.
 	  If not specified defaults to 5.
-	- gpio-key,wakeup: Boolean, button can wake-up the system.
+	- wakeup-source: Boolean, button can wake-up the system.
 	- linux,can-disable: Boolean, indicates that button is connected
 	  to dedicated (not shared) interrupt which can be disabled to
 	  suppress events from the button.
diff --git a/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt b/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
index ead641c..4d86059 100644
--- a/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
+++ b/Documentation/devicetree/bindings/input/gpio-matrix-keypad.txt
@@ -19,7 +19,7 @@
 
 Optional Properties:
 - linux,no-autorepeat:	do no enable autorepeat feature.
-- linux,wakeup:		use any event on keypad as wakeup event.
+- wakeup-source:	use any event on keypad as wakeup event.
 - debounce-delay-ms:	debounce interval in milliseconds
 - col-scan-delay-us:	delay, measured in microseconds, that is needed
 			before we can scan keypad after activating column gpio
diff --git a/Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt b/Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt
index 7d8cb92..ee62156 100644
--- a/Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt
+++ b/Documentation/devicetree/bindings/input/qcom,pm8xxx-keypad.txt
@@ -33,7 +33,7 @@
 	Value type: <bool>
 	Definition: don't enable autorepeat feature.
 
-- linux,keypad-wakeup:
+- wakeup-source:
 	Usage: optional
 	Value type: <bool>
 	Definition: use any event on keypad as wakeup event.
diff --git a/Documentation/devicetree/bindings/input/samsung-keypad.txt b/Documentation/devicetree/bindings/input/samsung-keypad.txt
index 942d071..863e77f 100644
--- a/Documentation/devicetree/bindings/input/samsung-keypad.txt
+++ b/Documentation/devicetree/bindings/input/samsung-keypad.txt
@@ -36,9 +36,11 @@
 - pinctrl-0: Should specify pin control groups used for this controller.
 - pinctrl-names: Should contain only one value - "default".
 
+Optional Properties:
+- wakeup-source: use any event on keypad as wakeup event.
+
 Optional Properties specific to linux:
 - linux,keypad-no-autorepeat: do no enable autorepeat feature.
-- linux,keypad-wakeup: use any event on keypad as wakeup event.
 
 
 Example:
diff --git a/Documentation/devicetree/bindings/input/snvs-pwrkey.txt b/Documentation/devicetree/bindings/input/snvs-pwrkey.txt
new file mode 100644
index 0000000..70c1425
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/snvs-pwrkey.txt
@@ -0,0 +1 @@
+See Documentation/devicetree/bindings/crypto/fsl-sec4.txt
diff --git a/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt b/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt
index 6e55109..8eb240a 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/pixcir_i2c_ts.txt
@@ -8,6 +8,9 @@
 - touchscreen-size-x: horizontal resolution of touchscreen (in pixels)
 - touchscreen-size-y: vertical resolution of touchscreen (in pixels)
 
+Optional properties:
+- reset-gpio: GPIO connected to the RESET line of the chip
+
 Example:
 
 	i2c@00000000 {
diff --git a/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt b/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt
index 80c37df..e3c27c4 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/zforce_ts.txt
@@ -4,12 +4,12 @@
 - compatible: must be "neonode,zforce"
 - reg: I2C address of the chip
 - interrupts: interrupt to which the chip is connected
-- gpios: gpios the chip is connected to
-  first one is the interrupt gpio and second one the reset gpio
+- reset-gpios: reset gpio the chip is connected to
 - x-size: horizontal resolution of touchscreen
 - y-size: vertical resolution of touchscreen
 
 Optional properties:
+- irq-gpios : interrupt gpio the chip is connected to
 - vdd-supply: Regulator controlling the controller supply
 
 Example:
@@ -23,8 +23,8 @@
 			interrupts = <2 0>;
 			vdd-supply = <&reg_zforce_vdd>;
 
-			gpios = <&gpio5 6 0>, /* INT */
-				<&gpio5 9 0>; /* RST */
+			reset-gpios = <&gpio5 9 0>; /* RST */
+			irq-gpios = <&gpio5 6 0>; /* IRQ, optional */
 
 			x-size = <800>;
 			y-size = <600>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
index 7da578d..2d6c8bb 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
@@ -5,9 +5,14 @@
 controller, or the HW block containing it, is referred to occasionally
 as "armctrl" in the SoC documentation, hence naming of this binding.
 
+The BCM2836 contains the same interrupt controller with the same
+interrupts, but the per-CPU interrupt controller is the root, and an
+interrupt there indicates that the ARMCTRL has an interrupt to handle.
+
 Required properties:
 
-- compatible : should be "brcm,bcm2835-armctrl-ic"
+- compatible : should be "brcm,bcm2835-armctrl-ic" or
+                 "brcm,bcm2836-armctrl-ic"
 - reg : Specifies base physical address and size of the registers.
 - interrupt-controller : Identifies the node as an interrupt controller
 - #interrupt-cells : Specifies the number of cells needed to encode an
@@ -20,6 +25,12 @@
   The 2nd cell contains the interrupt number within the bank. Valid values
   are 0..7 for bank 0, and 0..31 for bank 1.
 
+Additional required properties for brcm,bcm2836-armctrl-ic:
+- interrupt-parent : Specifies the parent interrupt controller when this
+  controller is the second level.
+- interrupts : Specifies the interrupt on the parent for this interrupt
+  controller to handle.
+
 The interrupt sources are as follows:
 
 Bank 0:
@@ -102,9 +113,21 @@
 
 Example:
 
+/* BCM2835, first level */
 intc: interrupt-controller {
 	compatible = "brcm,bcm2835-armctrl-ic";
 	reg = <0x7e00b200 0x200>;
 	interrupt-controller;
 	#interrupt-cells = <2>;
 };
+
+/* BCM2836, second level */
+intc: interrupt-controller {
+	compatible = "brcm,bcm2836-armctrl-ic";
+	reg = <0x7e00b200 0x200>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+
+	interrupt-parent = <&local_intc>;
+	interrupts = <8>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt
new file mode 100644
index 0000000..f320dcd
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt
@@ -0,0 +1,37 @@
+BCM2836 per-CPU interrupt controller
+
+The BCM2836 has a per-cpu interrupt controller for the timer, PMU
+events, and SMP IPIs.  One of the CPUs may receive interrupts for the
+peripheral (GPU) events, which chain to the BCM2835-style interrupt
+controller.
+
+Required properties:
+
+- compatible:	 	Should be "brcm,bcm2836-l1-intc"
+- reg:			Specifies base physical address and size of the
+			  registers
+- interrupt-controller:	Identifies the node as an interrupt controller
+- #interrupt-cells:	Specifies the number of cells needed to encode an
+			  interrupt source. The value shall be 1
+
+Please refer to interrupts.txt in this directory for details of the common
+Interrupt Controllers bindings used by client devices.
+
+The interrupt sources are as follows:
+
+0: CNTPSIRQ
+1: CNTPNSIRQ
+2: CNTHPIRQ
+3: CNTVIRQ
+8: GPU_FAST
+9: PMU_FAST
+
+Example:
+
+local_intc: local_intc {
+	compatible = "brcm,bcm2836-l1-intc";
+	reg = <0x40000000 0x100>;
+	interrupt-controller;
+	#interrupt-cells = <1>;
+	interrupt-parent = <&local_intc>;
+};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/msi.txt b/Documentation/devicetree/bindings/interrupt-controller/msi.txt
new file mode 100644
index 0000000..c60c034
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/msi.txt
@@ -0,0 +1,135 @@
+This document describes the generic device tree binding for MSI controllers and
+their master(s).
+
+Message Signaled Interrupts (MSIs) are a class of interrupts generated by a
+write to an MMIO address.
+
+MSIs were originally specified by PCI (and are used with PCIe), but may also be
+used with other busses, and hence a mechanism is required to relate devices on
+those busses to the MSI controllers which they are capable of using,
+potentially including additional information.
+
+MSIs are distinguished by some combination of:
+
+- The doorbell (the MMIO address written to).
+  
+  Devices may be configured by software to write to arbitrary doorbells which
+  they can address. An MSI controller may feature a number of doorbells.
+
+- The payload (the value written to the doorbell).
+  
+  Devices may be configured to write an arbitrary payload chosen by software.
+  MSI controllers may have restrictions on permitted payloads.
+
+- Sideband information accompanying the write.
+  
+  Typically this is neither configurable nor probeable, and depends on the path
+  taken through the memory system (i.e. it is a property of the combination of
+  MSI controller and device rather than a property of either in isolation).
+
+
+MSI controllers:
+================
+
+An MSI controller signals interrupts to a CPU when a write is made to an MMIO
+address by some master. An MSI controller may feature a number of doorbells.
+
+Required properties:
+--------------------
+
+- msi-controller: Identifies the node as an MSI controller.
+
+Optional properties:
+--------------------
+
+- #msi-cells: The number of cells in an msi-specifier, required if not zero.
+
+  Typically this will encode information related to sideband data, and will
+  not encode doorbells or payloads as these can be configured dynamically.
+
+  The meaning of the msi-specifier is defined by the device tree binding of
+  the specific MSI controller. 
+
+
+MSI clients
+===========
+
+MSI clients are devices which generate MSIs. For each MSI they wish to
+generate, the doorbell and payload may be configured, though sideband
+information may not be configurable.
+
+Required properties:
+--------------------
+
+- msi-parent: A list of phandle + msi-specifier pairs, one for each MSI
+  controller which the device is capable of using.
+
+  This property is unordered, and MSIs may be allocated from any combination of
+  MSI controllers listed in the msi-parent property.
+
+  If a device has restrictions on the allocation of MSIs, these restrictions
+  must be described with additional properties.
+
+  When #msi-cells is non-zero, busses with an msi-parent will require
+  additional properties to describe the relationship between devices on the bus
+  and the set of MSIs they can potentially generate.
+
+
+Example
+=======
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	msi_a: msi-controller@a {
+		reg = <0xa 0xf00>;
+		compatible = "vendor-a,some-controller";
+		msi-controller;
+		/* No sideband data, so #msi-cells omitted */
+	};
+
+	msi_b: msi-controller@b {
+		reg = <0xb 0xf00>;
+		compatible = "vendor-b,another-controller";
+		msi-controller;
+		/* Each device has some unique ID */
+		#msi-cells = <1>;
+	};
+
+	msi_c: msi-controller@c {
+		reg = <0xb 0xf00>;
+		compatible = "vendor-b,another-controller";
+		msi-controller;
+		/* Each device has some unique ID */
+		#msi-cells = <1>;
+	};
+
+	dev@0 {
+		reg = <0x0 0xf00>;
+		compatible = "vendor-c,some-device";
+
+		/* Can only generate MSIs to msi_a */
+		msi-parent = <&msi_a>;
+	};
+
+	dev@1 {
+		reg = <0x1 0xf00>;
+		compatible = "vendor-c,some-device";
+
+		/* 
+		 * Can generate MSIs to either A or B.
+		 */
+		msi-parent = <&msi_a>, <&msi_b 0x17>;
+	};
+
+	dev@2 {
+		reg = <0x2 0xf00>;
+		compatible = "vendor-c,some-device";
+		/*
+		 * Has different IDs at each MSI controller.
+		 * Can generate MSIs to all of the MSI controllers.
+		 */
+		msi-parent = <&msi_a>, <&msi_b 0x17>, <&msi_c 0x53>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/leds/common.txt b/Documentation/devicetree/bindings/leds/common.txt
index 747c538..6841984 100644
--- a/Documentation/devicetree/bindings/leds/common.txt
+++ b/Documentation/devicetree/bindings/leds/common.txt
@@ -29,14 +29,23 @@
      "ide-disk" - LED indicates disk activity
      "timer" - LED flashes at a fixed, configurable rate
 
-- max-microamp : maximum intensity in microamperes of the LED
-		 (torch LED for flash devices)
-- flash-max-microamp : maximum intensity in microamperes of the
-                       flash LED; it is mandatory if the LED should
-		       support the flash mode
-- flash-timeout-us : timeout in microseconds after which the flash
-                     LED is turned off
+- led-max-microamp : Maximum LED supply current in microamperes. This property
+                     can be made mandatory for the board configurations
+                     introducing a risk of hardware damage in case an excessive
+                     current is set.
+                     For flash LED controllers with configurable current this
+                     property is mandatory for the LEDs in the non-flash modes
+                     (e.g. torch or indicator).
 
+Required properties for flash LED child nodes:
+- flash-max-microamp : Maximum flash LED supply current in microamperes.
+- flash-max-timeout-us : Maximum timeout in microseconds after which the flash
+                         LED is turned off.
+
+For controllers that have no configurable current the flash-max-microamp
+property can be omitted.
+For controllers that have no configurable timeout the flash-max-timeout-us
+property can be omitted.
 
 Examples:
 
@@ -49,7 +58,7 @@
 camera-flash {
 	label = "Flash";
 	led-sources = <0>, <1>;
-	max-microamp = <50000>;
+	led-max-microamp = <50000>;
 	flash-max-microamp = <320000>;
-	flash-timeout-us = <500000>;
+	flash-max-timeout-us = <500000>;
 };
diff --git a/Documentation/devicetree/bindings/leds/leds-ns2.txt b/Documentation/devicetree/bindings/leds/leds-ns2.txt
index aef3aca..9f81258 100644
--- a/Documentation/devicetree/bindings/leds/leds-ns2.txt
+++ b/Documentation/devicetree/bindings/leds/leds-ns2.txt
@@ -8,6 +8,9 @@
 Required sub-node properties:
 - cmd-gpio: Command LED GPIO. See OF device-tree GPIO specification.
 - slow-gpio: Slow LED GPIO. See OF device-tree GPIO specification.
+- modes-map: A mapping between LED modes (off, on or SATA activity blinking) and
+  the corresponding cmd-gpio/slow-gpio values. All the GPIO values combinations
+  should be given in order to avoid having an unknown mode at driver probe time.
 
 Optional sub-node properties:
 - label: Name for this LED. If omitted, the label is taken from the node name.
@@ -15,6 +18,8 @@
 
 Example:
 
+#include <dt-bindings/leds/leds-ns2.h>
+
 ns2-leds {
 	compatible = "lacie,ns2-leds";
 
@@ -22,5 +27,9 @@
 		label = "ns2:blue:sata";
 		slow-gpio = <&gpio0 29 0>;
 		cmd-gpio = <&gpio0 30 0>;
+		modes-map = <NS_V2_LED_OFF  0 1
+			     NS_V2_LED_ON   1 0
+			     NS_V2_LED_ON   0 0
+			     NS_V2_LED_SATA 1 1>;
 	};
 };
diff --git a/Documentation/devicetree/bindings/leds/leds-pm8941-wled.txt b/Documentation/devicetree/bindings/leds/leds-pm8941-wled.txt
deleted file mode 100644
index a85a964..0000000
--- a/Documentation/devicetree/bindings/leds/leds-pm8941-wled.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-Binding for Qualcomm PM8941 WLED driver
-
-Required properties:
-- compatible: should be "qcom,pm8941-wled"
-- reg: slave address
-
-Optional properties:
-- label: The label for this led
-  See Documentation/devicetree/bindings/leds/common.txt
-- linux,default-trigger: Default trigger assigned to the LED
-  See Documentation/devicetree/bindings/leds/common.txt
-- qcom,cs-out: bool; enable current sink output
-- qcom,cabc: bool; enable content adaptive backlight control
-- qcom,ext-gen: bool; use externally generated modulator signal to dim
-- qcom,current-limit: mA; per-string current limit; value from 0 to 25
-	default: 20mA
-- qcom,current-boost-limit: mA; boost current limit; one of:
-	105, 385, 525, 805, 980, 1260, 1400, 1680
-	default: 805mA
-- qcom,switching-freq: kHz; switching frequency; one of:
-	600, 640, 685, 738, 800, 872, 960, 1066, 1200, 1371,
-	1600, 1920, 2400, 3200, 4800, 9600,
-	default: 1600kHz
-- qcom,ovp: V; Over-voltage protection limit; one of:
-	27, 29, 32, 35
-	default: 29V
-- qcom,num-strings: #; number of led strings attached; value from 1 to 3
-	default: 2
-
-Example:
-
-pm8941-wled@d800 {
-	compatible = "qcom,pm8941-wled";
-	reg = <0xd800>;
-	label = "backlight";
-
-	qcom,cs-out;
-	qcom,current-limit = <20>;
-	qcom,current-boost-limit = <805>;
-	qcom,switching-freq = <1600>;
-	qcom,ovp = <29>;
-	qcom,num-strings = <2>;
-};
diff --git a/Documentation/devicetree/bindings/leds/leds-powernv.txt b/Documentation/devicetree/bindings/leds/leds-powernv.txt
new file mode 100644
index 0000000..6665569
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-powernv.txt
@@ -0,0 +1,26 @@
+Device Tree binding for LEDs on IBM Power Systems
+-------------------------------------------------
+
+Required properties:
+- compatible : Should be "ibm,opal-v3-led".
+- led-mode   : Should be "lightpath" or "guidinglight".
+
+Each location code of FRU/Enclosure must be expressed in the
+form of a sub-node.
+
+Required properties for the sub nodes:
+- led-types : Supported LED types (attention/identify/fault) provided
+              in the form of string array.
+
+Example:
+
+leds {
+	compatible = "ibm,opal-v3-led";
+	led-mode = "lightpath";
+
+	U78C9.001.RST0027-P1-C1 {
+		led-types = "identify", "fault";
+	};
+	...
+	...
+};
diff --git a/Documentation/devicetree/bindings/memory-controllers/arm,pl172.txt b/Documentation/devicetree/bindings/memory-controllers/arm,pl172.txt
new file mode 100644
index 0000000..e6df32f
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/arm,pl172.txt
@@ -0,0 +1,125 @@
+* Device tree bindings for ARM PL172 MultiPort Memory Controller
+
+Required properties:
+
+- compatible:		"arm,pl172", "arm,primecell"
+
+- reg:			Must contains offset/length value for controller.
+
+- #address-cells:	Must be 2. The partition number has to be encoded in the
+			first address cell and it may accept values 0..N-1
+			(N - total number of partitions). The second cell is the
+			offset into the partition.
+
+- #size-cells:		Must be set to 1.
+
+- ranges:		Must contain one or more chip select memory regions.
+
+- clocks:		Must contain references to controller clocks.
+
+- clock-names:		Must contain "mpmcclk" and "apb_pclk".
+
+- clock-ranges:		Empty property indicating that child nodes can inherit
+			named clocks. Required only if clock tree data present
+			in device tree.
+			See clock-bindings.txt
+
+Child chip-select (cs) nodes contain the memory devices nodes connected to
+such as NOR (e.g. cfi-flash) and NAND.
+
+Required child cs node properties:
+
+- #address-cells:	Must be 2.
+
+- #size-cells:		Must be 1.
+
+- ranges:		Empty property indicating that child nodes can inherit
+			memory layout.
+
+- clock-ranges:		Empty property indicating that child nodes can inherit
+			named clocks. Required only if clock tree data present
+			in device tree.
+
+- mpmc,cs:		Chip select number. Indicates to the pl0172 driver
+			which chipselect is used for accessing the memory.
+
+- mpmc,memory-width:	Width of the chip select memory. Must be equal to
+			either 8, 16 or 32.
+
+Optional child cs node config properties:
+
+- mpmc,async-page-mode:	Enable asynchronous page mode.
+
+- mpmc,cs-active-high:	Set chip select polarity to active high.
+
+- mpmc,byte-lane-low:	Set byte lane state to low.
+
+- mpmc,extended-wait:	Enable extended wait.
+
+- mpmc,buffer-enable:	Enable write buffer.
+
+- mpmc,write-protect:	Enable write protect.
+
+Optional child cs node timing properties:
+
+- mpmc,write-enable-delay:	Delay from chip select assertion to write
+				enable (WE signal) in nano seconds.
+
+- mpmc,output-enable-delay:	Delay from chip select assertion to output
+				enable (OE signal) in nano seconds.
+
+- mpmc,write-access-delay:	Delay from chip select assertion to write
+				access in nano seconds.
+
+- mpmc,read-access-delay:	Delay from chip select assertion to read
+				access in nano seconds.
+
+- mpmc,page-mode-read-delay:	Delay for asynchronous page mode sequential
+				accesses in nano seconds.
+
+- mpmc,turn-round-delay:	Delay between access to memory banks in nano
+				seconds.
+
+If any of the above timing parameters are absent, current parameter value will
+be taken from the corresponding HW reg.
+
+Example for pl172 with nor flash on chip select 0 shown below.
+
+emc: memory-controller@40005000 {
+	compatible = "arm,pl172", "arm,primecell";
+	reg = <0x40005000 0x1000>;
+	clocks = <&ccu1 CLK_CPU_EMCDIV>, <&ccu1 CLK_CPU_EMC>;
+	clock-names = "mpmcclk", "apb_pclk";
+	#address-cells = <2>;
+	#size-cells = <1>;
+	ranges = <0 0 0x1c000000 0x1000000
+		  1 0 0x1d000000 0x1000000
+		  2 0 0x1e000000 0x1000000
+		  3 0 0x1f000000 0x1000000>;
+
+	cs0 {
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges;
+
+		mpmc,cs = <0>;
+		mpmc,memory-width = <16>;
+		mpmc,byte-lane-low;
+		mpmc,write-enable-delay = <0>;
+		mpmc,output-enable-delay = <0>;
+		mpmc,read-enable-delay = <70>;
+		mpmc,page-mode-read-delay = <70>;
+
+		flash@0,0 {
+			compatible = "sst,sst39vf320", "cfi-flash";
+			reg = <0 0 0x400000>;
+			bank-width = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			partition@0 {
+				label = "data";
+				reg = <0 0x400000>;
+			};
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt
index d5e3704..89427b0 100644
--- a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt
@@ -18,6 +18,8 @@
               interrupt (NAND_EVTER_STAT).  If there is only one,
               that interrupt reports both types of event.
 
+- little-endian : If this property is absent, the big-endian mode will
+                  be in use as default for registers.
 
 - ranges : Each range corresponds to a single chipselect, and covers
            the entire access window as configured.
@@ -34,6 +36,7 @@
 		#size-cells = <1>;
 		reg = <0x0 0xffe1e000 0 0x2000>;
 		interrupts = <16 2 19 2>;
+		little-endian;
 
 		/* NOR, NAND Flashes and CPLD on board */
 		ranges = <0x0 0x0 0x0 0xee000000 0x02000000
diff --git a/Documentation/devicetree/bindings/memory-controllers/synopsys.txt b/Documentation/devicetree/bindings/memory-controllers/synopsys.txt
index f9c6454..a43d26d 100644
--- a/Documentation/devicetree/bindings/memory-controllers/synopsys.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/synopsys.txt
@@ -1,5 +1,9 @@
 Binding for Synopsys IntelliDDR Multi Protocol Memory Controller
 
+This controller has an optional ECC support in half-bus width (16-bit)
+configuration. The ECC controller corrects one bit error and detects
+two bit errors.
+
 Required properties:
  - compatible: Should be 'xlnx,zynq-ddrc-a05'
  - reg: Base address and size of the controllers memory area
diff --git a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
index f64de95a..ad5d904 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
@@ -2,7 +2,11 @@
 
 Required properties:
  - compatible: value should be one of the following:
+   "atmel,at91sam9n12-hlcdc"
+   "atmel,at91sam9x5-hlcdc"
+   "atmel,sama5d2-hlcdc"
    "atmel,sama5d3-hlcdc"
+   "atmel,sama5d4-hlcdc"
  - reg: base address and size of the HLCDC device registers.
  - clock-names: the name of the 3 clocks requested by the HLCDC device.
    Should contain "periph_clk", "sys_clk" and "slow_clk".
diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt
index 753f14f..4181122 100644
--- a/Documentation/devicetree/bindings/mfd/axp20x.txt
+++ b/Documentation/devicetree/bindings/mfd/axp20x.txt
@@ -1,12 +1,14 @@
 AXP family PMIC device tree bindings
 
 The axp20x family current members :
+axp152 (X-Powers)
 axp202 (X-Powers)
 axp209 (X-Powers)
 axp221 (X-Powers)
 
 Required properties:
-- compatible: "x-powers,axp202", "x-powers,axp209", "x-powers,axp221"
+- compatible: "x-powers,axp152", "x-powers,axp202", "x-powers,axp209",
+	      "x-powers,axp221"
 - reg: The I2C slave address for the AXP chip
 - interrupt-parent: The parent interrupt controller
 - interrupts: SoC NMI / GPIO interrupt connected to the PMIC's IRQ pin
diff --git a/Documentation/devicetree/bindings/mfd/da9062.txt b/Documentation/devicetree/bindings/mfd/da9062.txt
new file mode 100644
index 0000000..38802b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/da9062.txt
@@ -0,0 +1,88 @@
+* Dialog DA9062 Power Management Integrated Circuit (PMIC)
+
+DA9062 consists of a large and varied group of sub-devices:
+
+Device                   Supply Names    Description
+------                   ------------    -----------
+da9062-regulator        :               : LDOs & BUCKs
+da9062-rtc              :               : Real-Time Clock
+da9062-watchdog         :               : Watchdog Timer
+
+======
+
+Required properties:
+
+- compatible : Should be "dlg,da9062".
+- reg : Specifies the I2C slave address (this defaults to 0x58 but it can be
+  modified to match the chip's OTP settings).
+- interrupt-parent : Specifies the reference to the interrupt controller for
+  the DA9062.
+- interrupts : IRQ line information.
+- interrupt-controller
+
+See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt for
+further information on IRQ bindings.
+
+Sub-nodes:
+
+- regulators : This node defines the settings for the LDOs and BUCKs. The
+  DA9062 regulators are bound using their names listed below:
+
+    buck1    : BUCK_1
+    buck2    : BUCK_2
+    buck3    : BUCK_3
+    buck4    : BUCK_4
+    ldo1     : LDO_1
+    ldo2     : LDO_2
+    ldo3     : LDO_3
+    ldo4     : LDO_4
+
+  The component follows the standard regulator framework and the bindings
+  details of individual regulator device can be found in:
+  Documentation/devicetree/bindings/regulator/regulator.txt
+
+
+- rtc : This node defines settings required for the Real-Time Clock associated
+  with the DA9062. There are currently no entries in this binding, however
+  compatible = "dlg,da9062-rtc" should be added if a node is created.
+
+- watchdog: This node defines the settings for the watchdog driver associated
+  with the DA9062 PMIC. The compatible = "dlg,da9062-watchdog" should be added
+  if a node is created.
+
+
+Example:
+
+	pmic0: da9062@58 {
+		compatible = "dlg,da9062";
+		reg = <0x58>;
+		interrupt-parent = <&gpio6>;
+		interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+		interrupt-controller;
+
+		rtc {
+			compatible = "dlg,da9062-rtc";
+		};
+
+		watchdog {
+			compatible = "dlg,da9062-watchdog";
+		};
+
+		regulators {
+			DA9062_BUCK1: buck1 {
+				regulator-name = "BUCK1";
+				regulator-min-microvolt = <300000>;
+				regulator-max-microvolt = <1570000>;
+				regulator-min-microamp = <500000>;
+				regulator-max-microamp = <2000000>;
+				regulator-boot-on;
+			};
+			DA9062_LDO1: ldo1 {
+				regulator-name = "LDO_1";
+				regulator-min-microvolt = <900000>;
+				regulator-max-microvolt = <3600000>;
+				regulator-boot-on;
+			};
+		};
+	};
+
diff --git a/Documentation/devicetree/bindings/mfd/max77686.txt b/Documentation/devicetree/bindings/mfd/max77686.txt
index 163bd81..741e766 100644
--- a/Documentation/devicetree/bindings/mfd/max77686.txt
+++ b/Documentation/devicetree/bindings/mfd/max77686.txt
@@ -7,8 +7,9 @@
 client while probing.This document describes the binding for mfd device and
 PMIC submodule.
 
-Binding for the built-in 32k clock generator block is defined separately
-in bindings/clk/maxim,max77686.txt file.
+Bindings for the built-in 32k clock generator block and
+regulators are defined in ../clk/maxim,max77686.txt and
+../regulator/max77686.txt respectively.
 
 Required properties:
 - compatible : Must be "maxim,max77686";
@@ -16,67 +17,11 @@
 - interrupts : This i2c device has an IRQ line connected to the main SoC.
 - interrupt-parent : The parent interrupt controller.
 
-Optional node:
-- voltage-regulators : The regulators of max77686 have to be instantiated
-  under subnode named "voltage-regulators" using the following format.
-
-	regulator_name {
-		regulator-compatible = LDOn/BUCKn
-		standard regulator constraints....
-	};
-	refer Documentation/devicetree/bindings/regulator/regulator.txt
-
-  The regulator-compatible property of regulator should initialized with string
-to get matched with their hardware counterparts as follow:
-
-	-LDOn 	:	for LDOs, where n can lie in range 1 to 26.
-		 	example: LDO1, LDO2, LDO26.
-	-BUCKn 	:	for BUCKs, where n can lie in range 1 to 9.
-			example: BUCK1, BUCK5, BUCK9.
-
-  Regulators which can be turned off during system suspend:
-	-LDOn	:	2, 6-8, 10-12, 14-16,
-	-BUCKn	:	1-4.
-  Use standard regulator bindings for it ('regulator-off-in-suspend').
-
-  LDO20, LDO21, LDO22, BUCK8 and BUCK9 can be configured to GPIO enable
-  control. To turn this feature on this property must be added to the regulator
-  sub-node:
-	- maxim,ena-gpios :	one GPIO specifier enable control (the gpio
-				flags are actually ignored and always
-				ACTIVE_HIGH is used)
-
 Example:
 
-	max77686@09 {
+	max77686: pmic@09 {
 		compatible = "maxim,max77686";
 		interrupt-parent = <&wakeup_eint>;
 		interrupts = <26 0>;
 		reg = <0x09>;
-
-		voltage-regulators {
-			ldo11_reg {
-				regulator-compatible = "LDO11";
-				regulator-name = "vdd_ldo11";
-				regulator-min-microvolt = <1900000>;
-				regulator-max-microvolt = <1900000>;
-				regulator-always-on;
-			};
-
-			buck1_reg {
-				regulator-compatible = "BUCK1";
-				regulator-name = "vdd_mif";
-				regulator-min-microvolt = <950000>;
-				regulator-max-microvolt = <1300000>;
-				regulator-always-on;
-				regulator-boot-on;
-			};
-
-			buck9_reg {
-				regulator-compatible = "BUCK9";
-				regulator-name = "CAM_ISP_CORE_1.2V";
-				regulator-min-microvolt = <1000000>;
-				regulator-max-microvolt = <1200000>;
-				maxim,ena-gpios = <&gpm0 3 GPIO_ACTIVE_HIGH>;
-			};
-	}
+	};
diff --git a/Documentation/devicetree/bindings/mfd/max77802.txt b/Documentation/devicetree/bindings/mfd/max77802.txt
new file mode 100644
index 0000000..51fc1a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/max77802.txt
@@ -0,0 +1,26 @@
+Maxim MAX77802 multi-function device
+
+The Maxim MAX77802 is a Power Management IC (PMIC) that contains 10 high
+efficiency Buck regulators, 32 Low-DropOut (LDO) regulators used to power
+up application processors and peripherals, a 2-channel 32kHz clock outputs,
+a Real-Time-Clock (RTC) and a I2C interface to program the individual
+regulators, clocks outputs and the RTC.
+
+Bindings for the built-in 32k clock generator block and
+regulators are defined in ../clk/maxim,max77802.txt and
+../regulator/max77802.txt respectively.
+
+Required properties:
+- compatible		: Must be "maxim,max77802"
+- reg			: Specifies the I2C slave address of PMIC block.
+- interrupts		: I2C device IRQ line connected to the main SoC.
+- interrupt-parent	: The parent interrupt controller.
+
+Example:
+
+	max77802: pmic@09 {
+		compatible = "maxim,max77802";
+		interrupt-parent = <&intc>;
+		interrupts = <26 IRQ_TYPE_NONE>;
+		reg = <0x09>;
+	};
diff --git a/Documentation/devicetree/bindings/mfd/rk808.txt b/Documentation/devicetree/bindings/mfd/rk808.txt
index 9e6e259..4ca6aab 100644
--- a/Documentation/devicetree/bindings/mfd/rk808.txt
+++ b/Documentation/devicetree/bindings/mfd/rk808.txt
@@ -24,6 +24,10 @@
 - vcc10-supply: The input supply for LDO_REG6
 - vcc11-supply: The input supply for LDO_REG8
 - vcc12-supply: The input supply for SWITCH_REG2
+- dvs-gpios:  buck1/2 can be controlled by gpio dvs, this is GPIO specifiers
+  for 2 host gpio's used for dvs. The format of the gpio specifier depends in
+  the gpio controller. If DVS GPIOs aren't present, voltage changes will happen
+  very quickly with no slow ramp time.
 
 Regulators: All the regulators of RK808 to be instantiated shall be
 listed in a child node named 'regulators'. Each regulator is represented
@@ -55,7 +59,9 @@
 		interrupt-parent = <&gpio0>;
 		interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
 		pinctrl-names = "default";
-		pinctrl-0 = <&pmic_int>;
+		pinctrl-0 = <&pmic_int &dvs_1 &dvs_2>;
+		dvs-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>,
+			    <&gpio7 15 GPIO_ACTIVE_HIGH>;
 		reg = <0x1b>;
 		rockchip,system-power-controller;
 		wakeup-source;
diff --git a/Documentation/devicetree/bindings/mfd/tc3589x.txt b/Documentation/devicetree/bindings/mfd/tc3589x.txt
index 6fcedba..37bf7f1 100644
--- a/Documentation/devicetree/bindings/mfd/tc3589x.txt
+++ b/Documentation/devicetree/bindings/mfd/tc3589x.txt
@@ -55,7 +55,7 @@
  - linux,keymap: the definition can be found in
    bindings/input/matrix-keymap.txt
  - linux,no-autorepeat: do no enable autorepeat feature.
- - linux,wakeup: use any event on keypad as wakeup event.
+ - wakeup-source: use any event on keypad as wakeup event.
 
 Example:
 
@@ -84,7 +84,6 @@
 		keypad,num-columns = <8>;
 		keypad,num-rows = <8>;
 		linux,no-autorepeat;
-		linux,wakeup;
 		linux,keymap = <0x0301006b
 				0x04010066
 				0x06040072
@@ -103,5 +102,6 @@
 				0x01030039
 				0x07060069
 				0x050500d9>;
+		wakeup-source;
 	};
 };
diff --git a/Documentation/devicetree/bindings/misc/allwinner,sunxi-sid.txt b/Documentation/devicetree/bindings/misc/allwinner,sunxi-sid.txt
deleted file mode 100644
index fabdf64..0000000
--- a/Documentation/devicetree/bindings/misc/allwinner,sunxi-sid.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Allwinner sunxi-sid
-
-Required properties:
-- compatible: "allwinner,sun4i-a10-sid" or "allwinner,sun7i-a20-sid"
-- reg: Should contain registers location and length
-
-Example for sun4i:
-	sid@01c23800 {
-		compatible = "allwinner,sun4i-a10-sid";
-		reg = <0x01c23800 0x10>
-	};
-
-Example for sun7i:
-	sid@01c23800 {
-		compatible = "allwinner,sun7i-a20-sid";
-		reg = <0x01c23800 0x200>
-	};
diff --git a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
index 4461dc7..862aa2f 100644
--- a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
+++ b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt
@@ -1,7 +1,8 @@
 * Freescale Quad Serial Peripheral Interface(QuadSPI)
 
 Required properties:
-  - compatible : Should be "fsl,vf610-qspi" or "fsl,imx6sx-qspi"
+  - compatible : Should be "fsl,vf610-qspi", "fsl,imx6sx-qspi",
+		 "fsl,imx7d-qspi", "fsl,imx6ul-qspi"
   - reg : the first contains the register location and length,
           the second contains the memory mapping address and length
   - reg-names: Should contain the reg names "QuadSPI" and "QuadSPI-memory"
diff --git a/Documentation/devicetree/bindings/mtd/nxp-spifi.txt b/Documentation/devicetree/bindings/mtd/nxp-spifi.txt
new file mode 100644
index 0000000..f8b6b25
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/nxp-spifi.txt
@@ -0,0 +1,58 @@
+* NXP SPI Flash Interface (SPIFI)
+
+NXP SPIFI is a specialized SPI interface for serial Flash devices.
+It supports one Flash device with 1-, 2- and 4-bits width in SPI
+mode 0 or 3. The controller operates in either command or memory
+mode. In memory mode the Flash is accessible from the CPU as
+normal memory.
+
+Required properties:
+  - compatible : Should be "nxp,lpc1773-spifi"
+  - reg : the first contains the register location and length,
+          the second contains the memory mapping address and length
+  - reg-names: Should contain the reg names "spifi" and "flash"
+  - interrupts : Should contain the interrupt for the device
+  - clocks : The clocks needed by the SPIFI controller
+  - clock-names : Should contain the clock names "spifi" and "reg"
+
+Optional properties:
+ - resets : phandle + reset specifier
+
+The SPI Flash must be a child of the SPIFI node and must have a
+compatible property as specified in bindings/mtd/jedec,spi-nor.txt
+
+Optionally it can also contain the following properties.
+ - spi-cpol : Controller only supports mode 0 and 3 so either
+              both spi-cpol and spi-cpha should be present or
+              none of them
+ - spi-cpha : See above
+ - spi-rx-bus-width : Used to select how many pins that are used
+                      for input on the controller
+
+See bindings/spi/spi-bus.txt for more information.
+
+Example:
+spifi: spifi@40003000 {
+	compatible = "nxp,lpc1773-spifi";
+	reg = <0x40003000 0x1000>, <0x14000000 0x4000000>;
+	reg-names = "spifi", "flash";
+	interrupts = <30>;
+	clocks = <&ccu1 CLK_SPIFI>, <&ccu1 CLK_CPU_SPIFI>;
+	clock-names = "spifi", "reg";
+	resets = <&rgu 53>;
+
+	flash@0 {
+		compatible = "jedec,spi-nor";
+		spi-cpol;
+		spi-cpha;
+		spi-rx-bus-width = <4>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "data";
+			reg = <0 0x200000>;
+		};
+	};
+};
+
diff --git a/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt b/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
index 4f833e3..d9b655f 100644
--- a/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
@@ -11,6 +11,7 @@
 
 Optional properties:
 
+ - dmas:			dma data channel, see dma.txt binding doc
  - marvell,nand-enable-arbiter:	Set to enable the bus arbiter
  - marvell,nand-keep-config:	Set to keep the NAND controller config as set
 				by the bootloader
@@ -32,6 +33,8 @@
 		compatible = "marvell,pxa3xx-nand";
 		reg = <0x43100000 90>;
 		interrupts = <45>;
+		dmas = <&pdma 97 0>;
+		dma-names = "data";
 		#address-cells = <1>;
 
 		marvell,nand-enable-arbiter;
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 33fe846..a9df21a 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -2,7 +2,11 @@
 ------------------------------------------------------
 
 Required properties:
-- compatible		: Should be "ti,cpsw"
+- compatible		: Should be one of the below:-
+			  "ti,cpsw" for backward compatible
+			  "ti,am335x-cpsw" for AM335x controllers
+			  "ti,am4372-cpsw" for AM437x controllers
+			  "ti,dra7-cpsw" for DRA7x controllers
 - reg			: physical base address and size of the cpsw
 			  registers map
 - interrupts		: property with a value describing the interrupt
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index f0b4cd7..04e6bef 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -44,9 +44,10 @@
 described below.
 
 Optionnal property:
-- link			: Should be a phandle to another switch's DSA port.
+- link			: Should be a list of phandles to another switch's DSA port.
 			  This property is only used when switches are being
-			  chained/cascaded together.
+			  chained/cascaded together. This port is used as outgoing port
+			  towards the phandle port, which can be more than one hop away.
 
 - phy-handle		: Phandle to a PHY on an external MDIO bus, not the
 			  switch internal one. See
@@ -58,6 +59,10 @@
 			  Documentation/devicetree/bindings/net/ethernet.txt
 			  for details.
 
+- mii-bus		: Should be a phandle to a valid MDIO bus device node.
+			  This mii-bus will be used in preference to the
+			  global dsa,mii-bus defined above, for this switch.
+
 Optional subnodes:
 - fixed-link		: Fixed-link subnode describing a link to a non-MDIO
 			  managed entity. See
@@ -96,10 +101,11 @@
 				label = "cpu";
 			};
 
-			switch0uplink: port@6 {
+			switch0port6: port@6 {
 				reg = <6>;
 				label = "dsa";
-				link = <&switch1uplink>;
+				link = <&switch1port0
+				        &switch2port0>;
 			};
 		};
 
@@ -107,11 +113,31 @@
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <17 1>;	/* MDIO address 17, switch 1 in tree */
+			mii-bus = <&mii_bus1>;
 
-			switch1uplink: port@0 {
+			switch1port0: port@0 {
 				reg = <0>;
 				label = "dsa";
-				link = <&switch0uplink>;
+				link = <&switch0port6>;
+			};
+			switch1port1: port@1 {
+				reg = <1>;
+				label = "dsa";
+				link = <&switch2port1>;
+			};
+		};
+
+		switch@2 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <18 2>;	/* MDIO address 18, switch 2 in tree */
+			mii-bus = <&mii_bus1>;
+
+			switch2port0: port@0 {
+				reg = <0>;
+				label = "dsa";
+				link = <&switch1port1
+				        &switch0port6>;
 			};
 		};
 	};
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
index 41b3f3f..5d88f37 100644
--- a/Documentation/devicetree/bindings/net/ethernet.txt
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -25,7 +25,11 @@
   flow control thresholds.
 - tx-fifo-depth: the size of the controller's transmit fifo in bytes. This
   is used for components that can have configurable fifo sizes.
+- managed: string, specifies the PHY management type. Supported values are:
+  "auto", "in-band-status". "auto" is the default, it usess MDIO for
+  management if fixed-link is not specified.
 
 Child nodes of the Ethernet controller are typically the individual PHY devices
 connected via the MDIO bus (sometimes the MDIO bus controller is separate).
 They are described in the phy.txt file in this same directory.
+For non-MDIO PHY management see fixed-link.txt.
diff --git a/Documentation/devicetree/bindings/net/fixed-link.txt b/Documentation/devicetree/bindings/net/fixed-link.txt
index 82bf7e0..ec5d889 100644
--- a/Documentation/devicetree/bindings/net/fixed-link.txt
+++ b/Documentation/devicetree/bindings/net/fixed-link.txt
@@ -17,6 +17,8 @@
   enabled.
 * 'asym-pause' (boolean, optional), to indicate that asym_pause should
   be enabled.
+* 'link-gpios' ('gpio-list', optional), to indicate if a gpio can be read
+  to determine if the link is up.
 
 Old, deprecated 'fixed-link' binding:
 
@@ -30,7 +32,7 @@
   - e: asymmetric pause configuration: 0 for no asymmetric pause, 1 for
     asymmetric pause
 
-Example:
+Examples:
 
 ethernet@0 {
 	...
@@ -40,3 +42,13 @@
 	};
 	...
 };
+
+ethernet@1 {
+	...
+	fixed-link {
+	      speed = <1000>;
+	      pause;
+	      link-gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>;
+	};
+	...
+};
diff --git a/Documentation/devicetree/bindings/net/keystone-netcp.txt b/Documentation/devicetree/bindings/net/keystone-netcp.txt
index d0e6fa3..b30ab6b 100644
--- a/Documentation/devicetree/bindings/net/keystone-netcp.txt
+++ b/Documentation/devicetree/bindings/net/keystone-netcp.txt
@@ -130,7 +130,11 @@
 
 Optional properties:
 - efuse-mac:	If this is 1, then the MAC address for the interface is
-		obtained from the device efuse mac address register
+		obtained from the device efuse mac address register.
+		If this is 2, the two DWORDs occupied by the MAC address
+		are swapped.  The netcp driver will swap the two DWORDs
+		back to the proper order when this property is set to 2
+		when it obtains the mac address from efuse.
 - local-mac-address:	the driver is designed to use the of_get_mac_address api
 			only if efuse-mac is 0. When efuse-mac is 0, the MAC
 			address is obtained from local-mac-address. If this
diff --git a/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt b/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt
new file mode 100644
index 0000000..fb1e75f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt
@@ -0,0 +1,27 @@
+* Samsung S3FWRN5 NCI NFC Controller
+
+Required properties:
+- compatible: Should be "samsung,s3fwrn5-i2c".
+- reg: address on the bus
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- s3fwrn5,en-gpios: Output GPIO pin used for enabling/disabling the chip
+- s3fwrn5,fw-gpios: Output GPIO pin used to enter firmware mode and
+  sleep/wakeup control
+
+Example:
+
+&hsi2c_4 {
+	status = "okay";
+	s3fwrn5@27 {
+		compatible = "samsung,s3fwrn5-i2c";
+
+		reg = <0x27>;
+
+		interrupt-parent = <&gpa1>;
+		interrupts = <3 0 0>;
+
+		s3fwrn5,en-gpios = <&gpf1 4 0>;
+		s3fwrn5,fw-gpios = <&gpj0 2 0>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
similarity index 100%
rename from Documentation/devicetree/bindings/net/nfc/st-nci.txt
rename to Documentation/devicetree/bindings/net/nfc/st-nci-i2c.txt
diff --git a/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt b/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
new file mode 100644
index 0000000..525681b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nfc/st-nci-spi.txt
@@ -0,0 +1,31 @@
+* STMicroelectronics SAS. ST NCI NFC Controller
+
+Required properties:
+- compatible: Should be "st,st21nfcb-spi"
+- spi-max-frequency: Maximum SPI frequency (<= 10000000).
+- interrupt-parent: phandle for the interrupt gpio controller
+- interrupts: GPIO interrupt to which the chip is connected
+- reset-gpios: Output GPIO pin used to reset the ST21NFCB
+
+Optional SoC Specific Properties:
+- pinctrl-names: Contains only one value - "default".
+- pintctrl-0: Specifies the pin control groups used for this controller.
+
+Example (for ARM-based BeagleBoard xM with ST21NFCB on SPI4):
+
+&mcspi4 {
+
+	status = "okay";
+
+	st21nfcb: st21nfcb@0 {
+
+		compatible = "st,st21nfcb-spi";
+
+		clock-frequency = <4000000>;
+
+		interrupt-parent = <&gpio5>;
+		interrupts = <2 IRQ_TYPE_EDGE_RISING>;
+
+		reset-gpios = <&gpio5 29 GPIO_ACTIVE_HIGH>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
new file mode 100644
index 0000000..51f8d2e
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
@@ -0,0 +1,75 @@
+* Synopsys DWC Ethernet QoS IP version 4.10 driver (GMAC)
+
+
+Required properties:
+- compatible: Should be "snps,dwc-qos-ethernet-4.10"
+- reg: Address and length of the register set for the device
+- clocks: Phandles to the reference clock and the bus clock
+- clock-names: Should be "phy_ref_clk" for the reference clock and "apb_pclk"
+  for the bus clock.
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the core's combined interrupt signal
+- phy-mode: See ethernet.txt file in the same directory
+
+Optional properties:
+- dma-coherent: Present if dma operations are coherent
+- mac-address: See ethernet.txt in the same directory
+- local-mac-address: See ethernet.txt in the same directory
+- snps,en-lpi: If present it enables use of the AXI low-power interface
+- snps,write-requests: Number of write requests that the AXI port can issue.
+  It depends on the SoC configuration.
+- snps,read-requests: Number of read requests that the AXI port can issue.
+  It depends on the SoC configuration.
+- snps,burst-map: Bitmap of allowed AXI burst lengts, with the LSB
+  representing 4, then 8 etc.
+- snps,txpbl: DMA Programmable burst length for the TX DMA
+- snps,rxpbl: DMA Programmable burst length for the RX DMA
+- snps,en-tx-lpi-clockgating: Enable gating of the MAC TX clock during
+  TX low-power mode.
+- phy-handle: See ethernet.txt file in the same directory
+- mdio device tree subnode: When the GMAC has a phy connected to its local
+    mdio, there must be device tree subnode with the following
+    required properties:
+    - compatible: Must be "snps,dwc-qos-ethernet-mdio".
+    - #address-cells: Must be <1>.
+    - #size-cells: Must be <0>.
+
+    For each phy on the mdio bus, there must be a node with the following
+    fields:
+
+    - reg: phy id used to communicate to phy.
+    - device_type: Must be "ethernet-phy".
+    - fixed-mode device tree subnode: see fixed-link.txt in the same directory
+
+Examples:
+ethernet2@40010000 {
+	clock-names = "phy_ref_clk", "apb_pclk";
+	clocks = <&clkc 17>, <&clkc 15>;
+	compatible = "snps,dwc-qos-ethernet-4.10";
+	interrupt-parent = <&intc>;
+	interrupts = <0x0 0x1e 0x4>;
+	reg = <0x40010000 0x4000>;
+	phy-handle = <&phy2>;
+	phy-mode = "gmii";
+
+	snps,en-tx-lpi-clockgating;
+	snps,en-lpi;
+	snps,write-requests = <2>;
+	snps,read-requests = <16>;
+	snps,burst-map = <0x7>;
+	snps,txpbl = <8>;
+	snps,rxpbl = <2>;
+
+	dma-coherent;
+
+	mdio {
+		#address-cells = <0x1>;
+		#size-cells = <0x0>;
+		phy2: phy@1 {
+			compatible = "ethernet-phy-ieee802.3-c22";
+			device_type = "ethernet-phy";
+			reg = <0x1>;
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt b/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
new file mode 100644
index 0000000..d543ed3
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/allwinner,sunxi-sid.txt
@@ -0,0 +1,21 @@
+Allwinner sunxi-sid
+
+Required properties:
+- compatible: "allwinner,sun4i-a10-sid" or "allwinner,sun7i-a20-sid"
+- reg: Should contain registers location and length
+
+= Data cells =
+Are child nodes of qfprom, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
+Example for sun4i:
+	sid@01c23800 {
+		compatible = "allwinner,sun4i-a10-sid";
+		reg = <0x01c23800 0x10>
+	};
+
+Example for sun7i:
+	sid@01c23800 {
+		compatible = "allwinner,sun7i-a20-sid";
+		reg = <0x01c23800 0x200>
+	};
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.txt b/Documentation/devicetree/bindings/nvmem/nvmem.txt
new file mode 100644
index 0000000..b52bc11
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.txt
@@ -0,0 +1,80 @@
+= NVMEM(Non Volatile Memory) Data Device Tree Bindings =
+
+This binding is intended to represent the location of hardware
+configuration data stored in NVMEMs like eeprom, efuses and so on.
+
+On a significant proportion of boards, the manufacturer has stored
+some data on NVMEM, for the OS to be able to retrieve these information
+and act upon it. Obviously, the OS has to know about where to retrieve
+these data from, and where they are stored on the storage device.
+
+This document is here to document this.
+
+= Data providers =
+Contains bindings specific to provider drivers and data cells as children
+of this node.
+
+Optional properties:
+ read-only: Mark the provider as read only.
+
+= Data cells =
+These are the child nodes of the provider which contain data cell
+information like offset and size in nvmem provider.
+
+Required properties:
+reg:	specifies the offset in byte within the storage device.
+
+Optional properties:
+
+bits:	Is pair of bit location and number of bits, which specifies offset
+	in bit and number of bits within the address range specified by reg property.
+	Offset takes values from 0-7.
+
+For example:
+
+	/* Provider */
+	qfprom: qfprom@00700000 {
+		...
+
+		/* Data cells */
+		tsens_calibration: calib@404 {
+			reg = <0x404 0x10>;
+		};
+
+		tsens_calibration_bckp: calib_bckp@504 {
+			reg = <0x504 0x11>;
+			bits = <6 128>
+		};
+
+		pvs_version: pvs-version@6 {
+			reg = <0x6 0x2>
+			bits = <7 2>
+		};
+
+		speed_bin: speed-bin@c{
+			reg = <0xc 0x1>;
+			bits = <2 3>;
+
+		};
+		...
+	};
+
+= Data consumers =
+Are device nodes which consume nvmem data cells/providers.
+
+Required-properties:
+nvmem-cells: list of phandle to the nvmem data cells.
+nvmem-cell-names: names for the each nvmem-cells specified. Required if
+	nvmem-cells is used.
+
+Optional-properties:
+nvmem	: list of phandles to nvmem providers.
+nvmem-names: names for the each nvmem provider. required if nvmem is used.
+
+For example:
+
+	tsens {
+		...
+		nvmem-cells = <&tsens_calibration>;
+		nvmem-cell-names = "calibration";
+	};
diff --git a/Documentation/devicetree/bindings/nvmem/qfprom.txt b/Documentation/devicetree/bindings/nvmem/qfprom.txt
new file mode 100644
index 0000000..4ad68b7
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/qfprom.txt
@@ -0,0 +1,35 @@
+= Qualcomm QFPROM device tree bindings =
+
+This binding is intended to represent QFPROM which is found in most QCOM SOCs.
+
+Required properties:
+- compatible: should be "qcom,qfprom"
+- reg: Should contain registers location and length
+
+= Data cells =
+Are child nodes of qfprom, bindings of which as described in
+bindings/nvmem/nvmem.txt
+
+Example:
+
+	qfprom: qfprom@00700000 {
+		compatible 	= "qcom,qfprom";
+		reg		= <0x00700000 0x8000>;
+		...
+		/* Data cells */
+		tsens_calibration: calib@404 {
+			reg = <0x4404 0x10>;
+		};
+	};
+
+
+= Data consumers =
+Are device nodes which consume nvmem data cells.
+
+For example:
+
+	tsens {
+		...
+		nvmem-cells = <&tsens_calibration>;
+		nvmem-cell-names = "calibration";
+	};
diff --git a/Documentation/devicetree/bindings/opp/opp.txt b/Documentation/devicetree/bindings/opp/opp.txt
new file mode 100644
index 0000000..0cb44dc
--- /dev/null
+++ b/Documentation/devicetree/bindings/opp/opp.txt
@@ -0,0 +1,465 @@
+Generic OPP (Operating Performance Points) Bindings
+----------------------------------------------------
+
+Devices work at voltage-current-frequency combinations and some implementations
+have the liberty of choosing these. These combinations are called Operating
+Performance Points aka OPPs. This document defines bindings for these OPPs
+applicable across wide range of devices. For illustration purpose, this document
+uses CPU as a device.
+
+This document contain multiple versions of OPP binding and only one of them
+should be used per device.
+
+Binding 1: operating-points
+============================
+
+This binding only supports voltage-frequency pairs.
+
+Properties:
+- operating-points: An array of 2-tuples items, and each item consists
+  of frequency and voltage like <freq-kHz vol-uV>.
+	freq: clock frequency in kHz
+	vol: voltage in microvolt
+
+Examples:
+
+cpu@0 {
+	compatible = "arm,cortex-a9";
+	reg = <0>;
+	next-level-cache = <&L2>;
+	operating-points = <
+		/* kHz    uV */
+		792000  1100000
+		396000  950000
+		198000  850000
+	>;
+};
+
+
+Binding 2: operating-points-v2
+============================
+
+* Property: operating-points-v2
+
+Devices supporting OPPs must set their "operating-points-v2" property with
+phandle to a OPP table in their DT node. The OPP core will use this phandle to
+find the operating points for the device.
+
+Devices may want to choose OPP tables at runtime and so can provide a list of
+phandles here. But only *one* of them should be chosen at runtime. This must be
+accompanied by a corresponding "operating-points-names" property, to uniquely
+identify the OPP tables.
+
+If required, this can be extended for SoC vendor specfic bindings. Such bindings
+should be documented as Documentation/devicetree/bindings/power/<vendor>-opp.txt
+and should have a compatible description like: "operating-points-v2-<vendor>".
+
+Optional properties:
+- operating-points-names: Names of OPP tables (required if multiple OPP
+  tables are present), to uniquely identify them. The same list must be present
+  for all the CPUs which are sharing clock/voltage rails and hence the OPP
+  tables.
+
+* OPP Table Node
+
+This describes the OPPs belonging to a device. This node can have following
+properties:
+
+Required properties:
+- compatible: Allow OPPs to express their compatibility. It should be:
+  "operating-points-v2".
+
+- OPP nodes: One or more OPP nodes describing voltage-current-frequency
+  combinations. Their name isn't significant but their phandle can be used to
+  reference an OPP.
+
+Optional properties:
+- opp-shared: Indicates that device nodes using this OPP Table Node's phandle
+  switch their DVFS state together, i.e. they share clock/voltage/current lines.
+  Missing property means devices have independent clock/voltage/current lines,
+  but they share OPP tables.
+
+- status: Marks the OPP table enabled/disabled.
+
+
+* OPP Node
+
+This defines voltage-current-frequency combinations along with other related
+properties.
+
+Required properties:
+- opp-hz: Frequency in Hz, expressed as a 64-bit big-endian integer.
+
+Optional properties:
+- opp-microvolt: voltage in micro Volts.
+
+  A single regulator's voltage is specified with an array of size one or three.
+  Single entry is for target voltage and three entries are for <target min max>
+  voltages.
+
+  Entries for multiple regulators must be present in the same order as
+  regulators are specified in device's DT node.
+
+- opp-microamp: The maximum current drawn by the device in microamperes
+  considering system specific parameters (such as transients, process, aging,
+  maximum operating temperature range etc.) as necessary. This may be used to
+  set the most efficient regulator operating mode.
+
+  Should only be set if opp-microvolt is set for the OPP.
+
+  Entries for multiple regulators must be present in the same order as
+  regulators are specified in device's DT node. If this property isn't required
+  for few regulators, then this should be marked as zero for them. If it isn't
+  required for any regulator, then this property need not be present.
+
+- clock-latency-ns: Specifies the maximum possible transition latency (in
+  nanoseconds) for switching to this OPP from any other OPP.
+
+- turbo-mode: Marks the OPP to be used only for turbo modes. Turbo mode is
+  available on some platforms, where the device can run over its operating
+  frequency for a short duration of time limited by the device's power, current
+  and thermal limits.
+
+- opp-suspend: Marks the OPP to be used during device suspend. Only one OPP in
+  the table should have this.
+
+- status: Marks the node enabled/disabled.
+
+Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
+
+/ {
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			compatible = "arm,cortex-a9";
+			reg = <0>;
+			next-level-cache = <&L2>;
+			clocks = <&clk_controller 0>;
+			clock-names = "cpu";
+			cpu-supply = <&cpu_supply0>;
+			operating-points-v2 = <&cpu0_opp_table>;
+		};
+
+		cpu@1 {
+			compatible = "arm,cortex-a9";
+			reg = <1>;
+			next-level-cache = <&L2>;
+			clocks = <&clk_controller 0>;
+			clock-names = "cpu";
+			cpu-supply = <&cpu_supply0>;
+			operating-points-v2 = <&cpu0_opp_table>;
+		};
+	};
+
+	cpu0_opp_table: opp_table0 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp00 {
+			opp-hz = /bits/ 64 <1000000000>;
+			opp-microvolt = <970000 975000 985000>;
+			opp-microamp = <70000>;
+			clock-latency-ns = <300000>;
+			opp-suspend;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <1100000000>;
+			opp-microvolt = <980000 1000000 1010000>;
+			opp-microamp = <80000>;
+			clock-latency-ns = <310000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-microvolt = <1025000>;
+			clock-latency-ns = <290000>;
+			turbo-mode;
+		};
+	};
+};
+
+Example 2: Single cluster, Quad-core Qualcom-krait, switches DVFS states
+independently.
+
+/ {
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			compatible = "qcom,krait";
+			reg = <0>;
+			next-level-cache = <&L2>;
+			clocks = <&clk_controller 0>;
+			clock-names = "cpu";
+			cpu-supply = <&cpu_supply0>;
+			operating-points-v2 = <&cpu_opp_table>;
+		};
+
+		cpu@1 {
+			compatible = "qcom,krait";
+			reg = <1>;
+			next-level-cache = <&L2>;
+			clocks = <&clk_controller 1>;
+			clock-names = "cpu";
+			cpu-supply = <&cpu_supply1>;
+			operating-points-v2 = <&cpu_opp_table>;
+		};
+
+		cpu@2 {
+			compatible = "qcom,krait";
+			reg = <2>;
+			next-level-cache = <&L2>;
+			clocks = <&clk_controller 2>;
+			clock-names = "cpu";
+			cpu-supply = <&cpu_supply2>;
+			operating-points-v2 = <&cpu_opp_table>;
+		};
+
+		cpu@3 {
+			compatible = "qcom,krait";
+			reg = <3>;
+			next-level-cache = <&L2>;
+			clocks = <&clk_controller 3>;
+			clock-names = "cpu";
+			cpu-supply = <&cpu_supply3>;
+			operating-points-v2 = <&cpu_opp_table>;
+		};
+	};
+
+	cpu_opp_table: opp_table {
+		compatible = "operating-points-v2";
+
+		/*
+		 * Missing opp-shared property means CPUs switch DVFS states
+		 * independently.
+		 */
+
+		opp00 {
+			opp-hz = /bits/ 64 <1000000000>;
+			opp-microvolt = <970000 975000 985000>;
+			opp-microamp = <70000>;
+			clock-latency-ns = <300000>;
+			opp-suspend;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <1100000000>;
+			opp-microvolt = <980000 1000000 1010000>;
+			opp-microamp = <80000>;
+			clock-latency-ns = <310000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-microvolt = <1025000>;
+			opp-microamp = <90000;
+			lock-latency-ns = <290000>;
+			turbo-mode;
+		};
+	};
+};
+
+Example 3: Dual-cluster, Dual-core per cluster. CPUs within a cluster switch
+DVFS state together.
+
+/ {
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			compatible = "arm,cortex-a7";
+			reg = <0>;
+			next-level-cache = <&L2>;
+			clocks = <&clk_controller 0>;
+			clock-names = "cpu";
+			cpu-supply = <&cpu_supply0>;
+			operating-points-v2 = <&cluster0_opp>;
+		};
+
+		cpu@1 {
+			compatible = "arm,cortex-a7";
+			reg = <1>;
+			next-level-cache = <&L2>;
+			clocks = <&clk_controller 0>;
+			clock-names = "cpu";
+			cpu-supply = <&cpu_supply0>;
+			operating-points-v2 = <&cluster0_opp>;
+		};
+
+		cpu@100 {
+			compatible = "arm,cortex-a15";
+			reg = <100>;
+			next-level-cache = <&L2>;
+			clocks = <&clk_controller 1>;
+			clock-names = "cpu";
+			cpu-supply = <&cpu_supply1>;
+			operating-points-v2 = <&cluster1_opp>;
+		};
+
+		cpu@101 {
+			compatible = "arm,cortex-a15";
+			reg = <101>;
+			next-level-cache = <&L2>;
+			clocks = <&clk_controller 1>;
+			clock-names = "cpu";
+			cpu-supply = <&cpu_supply1>;
+			operating-points-v2 = <&cluster1_opp>;
+		};
+	};
+
+	cluster0_opp: opp_table0 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp00 {
+			opp-hz = /bits/ 64 <1000000000>;
+			opp-microvolt = <970000 975000 985000>;
+			opp-microamp = <70000>;
+			clock-latency-ns = <300000>;
+			opp-suspend;
+		};
+		opp01 {
+			opp-hz = /bits/ 64 <1100000000>;
+			opp-microvolt = <980000 1000000 1010000>;
+			opp-microamp = <80000>;
+			clock-latency-ns = <310000>;
+		};
+		opp02 {
+			opp-hz = /bits/ 64 <1200000000>;
+			opp-microvolt = <1025000>;
+			opp-microamp = <90000>;
+			clock-latency-ns = <290000>;
+			turbo-mode;
+		};
+	};
+
+	cluster1_opp: opp_table1 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp10 {
+			opp-hz = /bits/ 64 <1300000000>;
+			opp-microvolt = <1045000 1050000 1055000>;
+			opp-microamp = <95000>;
+			clock-latency-ns = <400000>;
+			opp-suspend;
+		};
+		opp11 {
+			opp-hz = /bits/ 64 <1400000000>;
+			opp-microvolt = <1075000>;
+			opp-microamp = <100000>;
+			clock-latency-ns = <400000>;
+		};
+		opp12 {
+			opp-hz = /bits/ 64 <1500000000>;
+			opp-microvolt = <1010000 1100000 1110000>;
+			opp-microamp = <95000>;
+			clock-latency-ns = <400000>;
+			turbo-mode;
+		};
+	};
+};
+
+Example 4: Handling multiple regulators
+
+/ {
+	cpus {
+		cpu@0 {
+			compatible = "arm,cortex-a7";
+			...
+
+			cpu-supply = <&cpu_supply0>, <&cpu_supply1>, <&cpu_supply2>;
+			operating-points-v2 = <&cpu0_opp_table>;
+		};
+	};
+
+	cpu0_opp_table: opp_table0 {
+		compatible = "operating-points-v2";
+		opp-shared;
+
+		opp00 {
+			opp-hz = /bits/ 64 <1000000000>;
+			opp-microvolt = <970000>, /* Supply 0 */
+					<960000>, /* Supply 1 */
+					<960000>; /* Supply 2 */
+			opp-microamp =  <70000>,  /* Supply 0 */
+					<70000>,  /* Supply 1 */
+					<70000>;  /* Supply 2 */
+			clock-latency-ns = <300000>;
+		};
+
+		/* OR */
+
+		opp00 {
+			opp-hz = /bits/ 64 <1000000000>;
+			opp-microvolt = <970000 975000 985000>, /* Supply 0 */
+					<960000 965000 975000>, /* Supply 1 */
+					<960000 965000 975000>; /* Supply 2 */
+			opp-microamp =  <70000>,		/* Supply 0 */
+					<70000>,		/* Supply 1 */
+					<70000>;		/* Supply 2 */
+			clock-latency-ns = <300000>;
+		};
+
+		/* OR */
+
+		opp00 {
+			opp-hz = /bits/ 64 <1000000000>;
+			opp-microvolt = <970000 975000 985000>, /* Supply 0 */
+					<960000 965000 975000>, /* Supply 1 */
+					<960000 965000 975000>; /* Supply 2 */
+			opp-microamp =  <70000>,		/* Supply 0 */
+					<0>,			/* Supply 1 doesn't need this */
+					<70000>;		/* Supply 2 */
+			clock-latency-ns = <300000>;
+		};
+	};
+};
+
+Example 5: Multiple OPP tables
+
+/ {
+	cpus {
+		cpu@0 {
+			compatible = "arm,cortex-a7";
+			...
+
+			cpu-supply = <&cpu_supply>
+			operating-points-v2 = <&cpu0_opp_table_slow>, <&cpu0_opp_table_fast>;
+			operating-points-names = "slow", "fast";
+		};
+	};
+
+	cpu0_opp_table_slow: opp_table_slow {
+		compatible = "operating-points-v2";
+		status = "okay";
+		opp-shared;
+
+		opp00 {
+			opp-hz = /bits/ 64 <600000000>;
+			...
+		};
+
+		opp01 {
+			opp-hz = /bits/ 64 <800000000>;
+			...
+		};
+	};
+
+	cpu0_opp_table_fast: opp_table_fast {
+		compatible = "operating-points-v2";
+		status = "okay";
+		opp-shared;
+
+		opp10 {
+			opp-hz = /bits/ 64 <1000000000>;
+			...
+		};
+
+		opp11 {
+			opp-hz = /bits/ 64 <1100000000>;
+			...
+		};
+	};
+};
diff --git a/Documentation/devicetree/bindings/pci/ti-pci.txt b/Documentation/devicetree/bindings/pci/ti-pci.txt
index 3d21791..60e2516 100644
--- a/Documentation/devicetree/bindings/pci/ti-pci.txt
+++ b/Documentation/devicetree/bindings/pci/ti-pci.txt
@@ -23,6 +23,9 @@
    interrupt-map-mask,
    interrupt-map : as specified in ../designware-pcie.txt
 
+Optional Property:
+ - gpios : Should be added if a gpio line is required to drive PERST# line
+
 Example:
 axi {
 	compatible = "simple-bus";
diff --git a/Documentation/devicetree/bindings/phy/phy-lpc18xx-usb-otg.txt b/Documentation/devicetree/bindings/phy/phy-lpc18xx-usb-otg.txt
new file mode 100644
index 0000000..bd61b46
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-lpc18xx-usb-otg.txt
@@ -0,0 +1,26 @@
+NXP LPC18xx/43xx internal USB OTG PHY binding
+---------------------------------------------
+
+This file contains documentation for the internal USB OTG PHY found
+in NXP LPC18xx and LPC43xx SoCs.
+
+Required properties:
+- compatible	: must be "nxp,lpc1850-usb-otg-phy"
+- clocks	: must be exactly one entry
+See: Documentation/devicetree/bindings/clock/clock-bindings.txt
+- #phy-cells	: must be 0 for this phy
+See: Documentation/devicetree/bindings/phy/phy-bindings.txt
+
+The phy node must be a child of the creg syscon node.
+
+Example:
+creg: syscon@40043000 {
+	compatible = "nxp,lpc1850-creg", "syscon", "simple-mfd";
+	reg = <0x40043000 0x1000>;
+
+	usb0_otg_phy: phy@004 {
+		compatible = "nxp,lpc1850-usb-otg-phy";
+		clocks = <&ccu1 CLK_USB0>;
+		#phy-cells = <0>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt b/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
index 16528b9..0cebf74 100644
--- a/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
+++ b/Documentation/devicetree/bindings/phy/sun4i-usb-phy.txt
@@ -7,6 +7,8 @@
   * allwinner,sun5i-a13-usb-phy
   * allwinner,sun6i-a31-usb-phy
   * allwinner,sun7i-a20-usb-phy
+  * allwinner,sun8i-a23-usb-phy
+  * allwinner,sun8i-a33-usb-phy
 - reg : a list of offset + length pairs
 - reg-names :
   * "phy_ctrl"
@@ -17,12 +19,21 @@
 - clock-names :
   * "usb_phy" for sun4i, sun5i or sun7i
   * "usb0_phy", "usb1_phy" and "usb2_phy" for sun6i
+  * "usb0_phy", "usb1_phy" for sun8i
 - resets : a list of phandle + reset specifier pairs
 - reset-names :
   * "usb0_reset"
   * "usb1_reset"
   * "usb2_reset" for sun4i, sun6i or sun7i
 
+Optional properties:
+- usb0_id_det-gpios : gpio phandle for reading the otg id pin value
+- usb0_vbus_det-gpios : gpio phandle for detecting the presence of usb0 vbus
+- usb0_vbus_power-supply: power-supply phandle for usb0 vbus presence detect
+- usb0_vbus-supply : regulator phandle for controller usb0 vbus
+- usb1_vbus-supply : regulator phandle for controller usb1 vbus
+- usb2_vbus-supply : regulator phandle for controller usb2 vbus
+
 Example:
 	usbphy: phy@0x01c13400 {
 		#phy-cells = <1>;
@@ -32,6 +43,13 @@
 		reg-names = "phy_ctrl", "pmu1", "pmu2";
 		clocks = <&usb_clk 8>;
 		clock-names = "usb_phy";
-		resets = <&usb_clk 1>, <&usb_clk 2>;
-		reset-names = "usb1_reset", "usb2_reset";
+		resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>;
+		reset-names = "usb0_reset", "usb1_reset", "usb2_reset";
+		pinctrl-names = "default";
+		pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
+		usb0_id_det-gpios = <&pio 7 19 GPIO_ACTIVE_HIGH>; /* PH19 */
+		usb0_vbus_det-gpios = <&pio 7 22 GPIO_ACTIVE_HIGH>; /* PH22 */
+		usb0_vbus-supply = <&reg_usb0_vbus>;
+		usb1_vbus-supply = <&reg_usb1_vbus>;
+		usb2_vbus-supply = <&reg_usb2_vbus>;
 	};
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
index 9462ab7..3c821cd 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
@@ -48,7 +48,7 @@
 
 Examples:
 
-pinctrl@01c20800 {
+pio: pinctrl@01c20800 {
 	compatible = "allwinner,sun5i-a13-pinctrl";
 	reg = <0x01c20800 0x400>;
 	#address-cells = <1>;
@@ -68,3 +68,38 @@
 		allwinner,pull = <0>;
 	};
 };
+
+
+GPIO and interrupt controller
+-----------------------------
+
+This hardware also acts as a GPIO controller and an interrupt
+controller.
+
+Consumers that would want to refer to one or the other (or both)
+should provide through the usual *-gpios and interrupts properties a
+cell with 3 arguments, first the number of the bank, then the pin
+inside that bank, and finally the flags for the GPIO/interrupts.
+
+Example:
+
+xio: gpio@38 {
+	compatible = "nxp,pcf8574a";
+	reg = <0x38>;
+
+	gpio-controller;
+	#gpio-cells = <2>;
+
+	interrupt-parent = <&pio>;
+	interrupts = <6 0 IRQ_TYPE_EDGE_FALLING>;
+	interrupt-controller;
+	#interrupt-cells = <2>;
+};
+
+reg_usb1_vbus: usb1-vbus {
+	compatible = "regulator-fixed";
+	regulator-name = "usb1-vbus";
+	regulator-min-microvolt = <5000000>;
+	regulator-max-microvolt = <5000000>;
+	gpio = <&pio 7 6 GPIO_ACTIVE_HIGH>;
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/cnxt,cx92755-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/cnxt,cx92755-pinctrl.txt
new file mode 100644
index 0000000..23ce8dc26
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/cnxt,cx92755-pinctrl.txt
@@ -0,0 +1,86 @@
+Conexant Digicolor CX92755 General Purpose Pin Mapping
+
+This document describes the device tree binding of the pin mapping hardware
+modules in the Conexant Digicolor CX92755 SoCs. The CX92755 in one of the
+Digicolor series of SoCs.
+
+=== Pin Controller Node ===
+
+Required Properties:
+
+- compatible: Must be "cnxt,cx92755-pinctrl"
+- reg: Base address of the General Purpose Pin Mapping register block and the
+  size of the block.
+- gpio-controller: Marks the device node as a GPIO controller.
+- #gpio-cells: Must be <2>. The first cell is the pin number and the
+  second cell is used to specify flags. See include/dt-bindings/gpio/gpio.h
+  for possible values.
+
+For example, the following is the bare minimum node:
+
+	pinctrl: pinctrl@f0000e20 {
+		compatible = "cnxt,cx92755-pinctrl";
+		reg = <0xf0000e20 0x100>;
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+As a pin controller device, in addition to the required properties, this node
+should also contain the pin configuration nodes that client devices reference,
+if any.
+
+For a general description of GPIO bindings, please refer to ../gpio/gpio.txt.
+
+=== Pin Configuration Node ===
+
+Each pin configuration node is a sub-node of the pin controller node and is a
+container of an arbitrary number of subnodes, called pin group nodes in this
+document.
+
+Please refer to the pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the definition of a
+"pin configuration node".
+
+=== Pin Group Node ===
+
+A pin group node specifies the desired pin mux for an arbitrary number of
+pins. The name of the pin group node is optional and not used.
+
+A pin group node only affects the properties specified in the node, and has no
+effect on any properties that are omitted.
+
+The pin group node accepts a subset of the generic pin config properties. For
+details generic pin config properties, please refer to pinctrl-bindings.txt
+and <include/linux/pinctrl/pinconfig-generic.h>.
+
+Required Pin Group Node Properties:
+
+- pins: Multiple strings. Specifies the name(s) of one or more pins to be
+  configured by this node. The format of a pin name string is "GP_xy", where x
+  is an uppercase character from 'A' to 'R', and y is a digit from 0 to 7.
+- function: String. Specifies the pin mux selection. Values must be one of:
+  "gpio", "client_a", "client_b", "client_c"
+
+Example:
+	pinctrl: pinctrl@f0000e20 {
+		compatible = "cnxt,cx92755-pinctrl";
+		reg = <0xf0000e20 0x100>;
+
+		uart0_default: uart0_active {
+			data_signals {
+				pins = "GP_O0", "GP_O1";
+				function = "client_b";
+			};
+		};
+	};
+
+	uart0: uart@f0000740 {
+		compatible = "cnxt,cx92755-usart";
+		...
+		pinctrl-0 = <&uart0_default>;
+		pinctrl-names = "default";
+	};
+
+In the example above, a single pin group configuration node defines the
+"client select" for the Rx and Tx signals of uart0. The uart0 node references
+that pin configuration node using the &uart0_default phandle.
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx6ul-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx6ul-pinctrl.txt
new file mode 100644
index 0000000..a81bbf3
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx6ul-pinctrl.txt
@@ -0,0 +1,36 @@
+* Freescale i.MX6 UltraLite IOMUX Controller
+
+Please refer to fsl,imx-pinctrl.txt in this directory for common binding part
+and usage.
+
+Required properties:
+- compatible: "fsl,imx6ul-iomuxc"
+- fsl,pins: each entry consists of 6 integers and represents the mux and config
+  setting for one pin.  The first 5 integers <mux_reg conf_reg input_reg mux_val
+  input_val> are specified using a PIN_FUNC_ID macro, which can be found in
+  imx6ul-pinfunc.h under device tree source folder.  The last integer CONFIG is
+  the pad setting value like pull-up on this pin.  Please refer to i.MX6 UltraLite
+  Reference Manual for detailed CONFIG settings.
+
+CONFIG bits definition:
+PAD_CTL_HYS                     (1 << 16)
+PAD_CTL_PUS_100K_DOWN           (0 << 14)
+PAD_CTL_PUS_47K_UP              (1 << 14)
+PAD_CTL_PUS_100K_UP             (2 << 14)
+PAD_CTL_PUS_22K_UP              (3 << 14)
+PAD_CTL_PUE                     (1 << 13)
+PAD_CTL_PKE                     (1 << 12)
+PAD_CTL_ODE                     (1 << 11)
+PAD_CTL_SPEED_LOW               (0 << 6)
+PAD_CTL_SPEED_MED               (1 << 6)
+PAD_CTL_SPEED_HIGH              (3 << 6)
+PAD_CTL_DSE_DISABLE             (0 << 3)
+PAD_CTL_DSE_260ohm              (1 << 3)
+PAD_CTL_DSE_130ohm              (2 << 3)
+PAD_CTL_DSE_87ohm               (3 << 3)
+PAD_CTL_DSE_65ohm               (4 << 3)
+PAD_CTL_DSE_52ohm               (5 << 3)
+PAD_CTL_DSE_43ohm               (6 << 3)
+PAD_CTL_DSE_37ohm               (7 << 3)
+PAD_CTL_SRE_FAST                (1 << 0)
+PAD_CTL_SRE_SLOW                (0 << 0)
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
index ed19991..d7803a2 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
@@ -7,8 +7,13 @@
 	Usage: required
 	Value type: <string>
 	Definition: Should contain one of:
+		    "qcom,pm8018-mpp",
+		    "qcom,pm8038-mpp",
+		    "qcom,pm8821-mpp",
 		    "qcom,pm8841-mpp",
 		    "qcom,pm8916-mpp",
+		    "qcom,pm8917-mpp",
+		    "qcom,pm8921-mpp",
 		    "qcom,pm8941-mpp",
 		    "qcom,pma8084-mpp",
 
@@ -77,12 +82,9 @@
 	Value type: <string>
 	Definition: Specify the alternative function to be configured for the
 		    specified pins.  Valid values are:
-		    "normal",
-		    "paired",
-		    "dtest1",
-		    "dtest2",
-		    "dtest3",
-		    "dtest4"
+		    "digital",
+		    "analog",
+		    "sink"
 
 - bias-disable:
 	Usage: optional
@@ -127,12 +129,18 @@
 	Definition: Selects the power source for the specified pins. Valid power
 		    sources are defined in <dt-bindings/pinctrl/qcom,pmic-mpp.h>
 
-- qcom,analog-mode:
+- qcom,analog-level:
 	Usage: optional
-	Value type: <none>
-	Definition: Selects Analog mode of operation: combined with input-enable
-		    and/or output-high, output-low MPP could operate as
-		    Bidirectional Logic, Analog Input, Analog Output.
+	Value type: <u32>
+	Definition: Selects the source for analog output. Valued values are
+		    defined in <dt-binding/pinctrl/qcom,pmic-mpp.h>
+		    PMIC_MPP_AOUT_LVL_*
+
+- qcom,dtest:
+	Usage: optional
+	Value type: <u32>
+	Definition: Selects which dtest rail to be routed in the various functions.
+		    Valid values are 1-4
 
 - qcom,amux-route:
 	Usage: optional
@@ -140,6 +148,10 @@
 	Definition: Selects the source for analog input. Valid values are
 		    defined in <dt-bindings/pinctrl/qcom,pmic-mpp.h>
 		    PMIC_MPP_AMUX_ROUTE_CH5, PMIC_MPP_AMUX_ROUTE_CH6...
+- qcom,paired:
+	Usage: optional
+	Value type: <none>
+	Definition: Indicates that the pin should be operating in paired mode.
 
 Example:
 
@@ -156,7 +168,7 @@
 		pm8841_default: default {
 			gpio {
 				pins = "mpp1", "mpp2", "mpp3", "mpp4";
-				function = "normal";
+				function = "digital";
 				input-enable;
 				power-source = <PM8841_MPP_S3>;
 			};
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
index 51cee44..9496934 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
@@ -58,12 +58,12 @@
 
 Pin Configuration Node Properties:
 
-- renesas,pins : An array of strings, each string containing the name of a pin.
-- renesas,groups : An array of strings, each string containing the name of a pin
+- pins : An array of strings, each string containing the name of a pin.
+- groups : An array of strings, each string containing the name of a pin
   group.
 
-- renesas,function: A string containing the name of the function to mux to the
-  pin group(s) specified by the renesas,groups property
+- function: A string containing the name of the function to mux to the pin
+  group(s) specified by the groups property.
 
   Valid values for pin, group and function names can be found in the group and
   function arrays of the PFC data file corresponding to the SoC
@@ -71,7 +71,9 @@
 
 The pin configuration parameters use the generic pinconf bindings defined in
 pinctrl-bindings.txt in this directory. The supported parameters are
-bias-disable, bias-pull-up and bias-pull-down.
+bias-disable, bias-pull-up, bias-pull-down and power-source. For pins that
+have a configurable I/O voltage, the power-source value should be the
+nominal I/O voltage in millivolts.
 
 
 GPIO
@@ -141,19 +143,19 @@
 
 		mmcif_pins: mmcif {
 			mux {
-				renesas,groups = "mmc0_data8_0", "mmc0_ctrl_0";
-				renesas,function = "mmc0";
+				groups = "mmc0_data8_0", "mmc0_ctrl_0";
+				function = "mmc0";
 			};
 			cfg {
-				renesas,groups = "mmc0_data8_0";
-				renesas,pins = "PORT279";
+				groups = "mmc0_data8_0";
+				pins = "PORT279";
 				bias-pull-up;
 			};
 		};
 
 		scifa4_pins: scifa4 {
-			renesas,groups = "scifa4_data", "scifa4_ctrl";
-			renesas,function = "scifa4";
+			groups = "scifa4_data", "scifa4_ctrl";
+			function = "scifa4";
 		};
 	};
 
diff --git a/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt b/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
index f63fcb3..2213802 100644
--- a/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
+++ b/Documentation/devicetree/bindings/pinctrl/ste,nomadik.txt
@@ -3,7 +3,9 @@
 Required properties:
 - compatible: "stericsson,db8500-pinctrl", "stericsson,db8540-pinctrl",
               "stericsson,stn8815-pinctrl"
-- reg: Should contain the register physical address and length of the PRCMU.
+- nomadik-gpio-chips: array of phandles to the corresponding GPIO chips
+              (these have the register ranges used by the pin controller).
+- prcm: phandle to the PRCMU managing the back end of this pin controller
 
 Please refer to pinctrl-bindings.txt in this directory for details of the
 common pinctrl bindings used by client devices, including the meaning of the
@@ -74,7 +76,8 @@
 
 	pinctrl@80157000 {
 		compatible = "stericsson,db8500-pinctrl";
-		reg = <0x80157000 0x2000>;
+		nomadik-gpio-chips = <&gpio0>, <&gpio1>, <&gpio2>, <&gpio3>;
+		prcm = <&prcmu>;
 
 		pinctrl-names = "default";
 
diff --git a/Documentation/devicetree/bindings/power/opp.txt b/Documentation/devicetree/bindings/power/opp.txt
deleted file mode 100644
index 0d5e7c9..0000000
--- a/Documentation/devicetree/bindings/power/opp.txt
+++ /dev/null
@@ -1,465 +0,0 @@
-Generic OPP (Operating Performance Points) Bindings
-----------------------------------------------------
-
-Devices work at voltage-current-frequency combinations and some implementations
-have the liberty of choosing these. These combinations are called Operating
-Performance Points aka OPPs. This document defines bindings for these OPPs
-applicable across wide range of devices. For illustration purpose, this document
-uses CPU as a device.
-
-This document contain multiple versions of OPP binding and only one of them
-should be used per device.
-
-Binding 1: operating-points
-============================
-
-This binding only supports voltage-frequency pairs.
-
-Properties:
-- operating-points: An array of 2-tuples items, and each item consists
-  of frequency and voltage like <freq-kHz vol-uV>.
-	freq: clock frequency in kHz
-	vol: voltage in microvolt
-
-Examples:
-
-cpu@0 {
-	compatible = "arm,cortex-a9";
-	reg = <0>;
-	next-level-cache = <&L2>;
-	operating-points = <
-		/* kHz    uV */
-		792000  1100000
-		396000  950000
-		198000  850000
-	>;
-};
-
-
-Binding 2: operating-points-v2
-============================
-
-* Property: operating-points-v2
-
-Devices supporting OPPs must set their "operating-points-v2" property with
-phandle to a OPP table in their DT node. The OPP core will use this phandle to
-find the operating points for the device.
-
-Devices may want to choose OPP tables at runtime and so can provide a list of
-phandles here. But only *one* of them should be chosen at runtime. This must be
-accompanied by a corresponding "operating-points-names" property, to uniquely
-identify the OPP tables.
-
-If required, this can be extended for SoC vendor specfic bindings. Such bindings
-should be documented as Documentation/devicetree/bindings/power/<vendor>-opp.txt
-and should have a compatible description like: "operating-points-v2-<vendor>".
-
-Optional properties:
-- operating-points-names: Names of OPP tables (required if multiple OPP
-  tables are present), to uniquely identify them. The same list must be present
-  for all the CPUs which are sharing clock/voltage rails and hence the OPP
-  tables.
-
-* OPP Table Node
-
-This describes the OPPs belonging to a device. This node can have following
-properties:
-
-Required properties:
-- compatible: Allow OPPs to express their compatibility. It should be:
-  "operating-points-v2".
-
-- OPP nodes: One or more OPP nodes describing voltage-current-frequency
-  combinations. Their name isn't significant but their phandle can be used to
-  reference an OPP.
-
-Optional properties:
-- opp-shared: Indicates that device nodes using this OPP Table Node's phandle
-  switch their DVFS state together, i.e. they share clock/voltage/current lines.
-  Missing property means devices have independent clock/voltage/current lines,
-  but they share OPP tables.
-
-- status: Marks the OPP table enabled/disabled.
-
-
-* OPP Node
-
-This defines voltage-current-frequency combinations along with other related
-properties.
-
-Required properties:
-- opp-hz: Frequency in Hz
-
-Optional properties:
-- opp-microvolt: voltage in micro Volts.
-
-  A single regulator's voltage is specified with an array of size one or three.
-  Single entry is for target voltage and three entries are for <target min max>
-  voltages.
-
-  Entries for multiple regulators must be present in the same order as
-  regulators are specified in device's DT node.
-
-- opp-microamp: The maximum current drawn by the device in microamperes
-  considering system specific parameters (such as transients, process, aging,
-  maximum operating temperature range etc.) as necessary. This may be used to
-  set the most efficient regulator operating mode.
-
-  Should only be set if opp-microvolt is set for the OPP.
-
-  Entries for multiple regulators must be present in the same order as
-  regulators are specified in device's DT node. If this property isn't required
-  for few regulators, then this should be marked as zero for them. If it isn't
-  required for any regulator, then this property need not be present.
-
-- clock-latency-ns: Specifies the maximum possible transition latency (in
-  nanoseconds) for switching to this OPP from any other OPP.
-
-- turbo-mode: Marks the OPP to be used only for turbo modes. Turbo mode is
-  available on some platforms, where the device can run over its operating
-  frequency for a short duration of time limited by the device's power, current
-  and thermal limits.
-
-- opp-suspend: Marks the OPP to be used during device suspend. Only one OPP in
-  the table should have this.
-
-- status: Marks the node enabled/disabled.
-
-Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
-
-/ {
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu@0 {
-			compatible = "arm,cortex-a9";
-			reg = <0>;
-			next-level-cache = <&L2>;
-			clocks = <&clk_controller 0>;
-			clock-names = "cpu";
-			cpu-supply = <&cpu_supply0>;
-			operating-points-v2 = <&cpu0_opp_table>;
-		};
-
-		cpu@1 {
-			compatible = "arm,cortex-a9";
-			reg = <1>;
-			next-level-cache = <&L2>;
-			clocks = <&clk_controller 0>;
-			clock-names = "cpu";
-			cpu-supply = <&cpu_supply0>;
-			operating-points-v2 = <&cpu0_opp_table>;
-		};
-	};
-
-	cpu0_opp_table: opp_table0 {
-		compatible = "operating-points-v2";
-		opp-shared;
-
-		opp00 {
-			opp-hz = <1000000000>;
-			opp-microvolt = <970000 975000 985000>;
-			opp-microamp = <70000>;
-			clock-latency-ns = <300000>;
-			opp-suspend;
-		};
-		opp01 {
-			opp-hz = <1100000000>;
-			opp-microvolt = <980000 1000000 1010000>;
-			opp-microamp = <80000>;
-			clock-latency-ns = <310000>;
-		};
-		opp02 {
-			opp-hz = <1200000000>;
-			opp-microvolt = <1025000>;
-			clock-latency-ns = <290000>;
-			turbo-mode;
-		};
-	};
-};
-
-Example 2: Single cluster, Quad-core Qualcom-krait, switches DVFS states
-independently.
-
-/ {
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu@0 {
-			compatible = "qcom,krait";
-			reg = <0>;
-			next-level-cache = <&L2>;
-			clocks = <&clk_controller 0>;
-			clock-names = "cpu";
-			cpu-supply = <&cpu_supply0>;
-			operating-points-v2 = <&cpu_opp_table>;
-		};
-
-		cpu@1 {
-			compatible = "qcom,krait";
-			reg = <1>;
-			next-level-cache = <&L2>;
-			clocks = <&clk_controller 1>;
-			clock-names = "cpu";
-			cpu-supply = <&cpu_supply1>;
-			operating-points-v2 = <&cpu_opp_table>;
-		};
-
-		cpu@2 {
-			compatible = "qcom,krait";
-			reg = <2>;
-			next-level-cache = <&L2>;
-			clocks = <&clk_controller 2>;
-			clock-names = "cpu";
-			cpu-supply = <&cpu_supply2>;
-			operating-points-v2 = <&cpu_opp_table>;
-		};
-
-		cpu@3 {
-			compatible = "qcom,krait";
-			reg = <3>;
-			next-level-cache = <&L2>;
-			clocks = <&clk_controller 3>;
-			clock-names = "cpu";
-			cpu-supply = <&cpu_supply3>;
-			operating-points-v2 = <&cpu_opp_table>;
-		};
-	};
-
-	cpu_opp_table: opp_table {
-		compatible = "operating-points-v2";
-
-		/*
-		 * Missing opp-shared property means CPUs switch DVFS states
-		 * independently.
-		 */
-
-		opp00 {
-			opp-hz = <1000000000>;
-			opp-microvolt = <970000 975000 985000>;
-			opp-microamp = <70000>;
-			clock-latency-ns = <300000>;
-			opp-suspend;
-		};
-		opp01 {
-			opp-hz = <1100000000>;
-			opp-microvolt = <980000 1000000 1010000>;
-			opp-microamp = <80000>;
-			clock-latency-ns = <310000>;
-		};
-		opp02 {
-			opp-hz = <1200000000>;
-			opp-microvolt = <1025000>;
-			opp-microamp = <90000;
-			lock-latency-ns = <290000>;
-			turbo-mode;
-		};
-	};
-};
-
-Example 3: Dual-cluster, Dual-core per cluster. CPUs within a cluster switch
-DVFS state together.
-
-/ {
-	cpus {
-		#address-cells = <1>;
-		#size-cells = <0>;
-
-		cpu@0 {
-			compatible = "arm,cortex-a7";
-			reg = <0>;
-			next-level-cache = <&L2>;
-			clocks = <&clk_controller 0>;
-			clock-names = "cpu";
-			cpu-supply = <&cpu_supply0>;
-			operating-points-v2 = <&cluster0_opp>;
-		};
-
-		cpu@1 {
-			compatible = "arm,cortex-a7";
-			reg = <1>;
-			next-level-cache = <&L2>;
-			clocks = <&clk_controller 0>;
-			clock-names = "cpu";
-			cpu-supply = <&cpu_supply0>;
-			operating-points-v2 = <&cluster0_opp>;
-		};
-
-		cpu@100 {
-			compatible = "arm,cortex-a15";
-			reg = <100>;
-			next-level-cache = <&L2>;
-			clocks = <&clk_controller 1>;
-			clock-names = "cpu";
-			cpu-supply = <&cpu_supply1>;
-			operating-points-v2 = <&cluster1_opp>;
-		};
-
-		cpu@101 {
-			compatible = "arm,cortex-a15";
-			reg = <101>;
-			next-level-cache = <&L2>;
-			clocks = <&clk_controller 1>;
-			clock-names = "cpu";
-			cpu-supply = <&cpu_supply1>;
-			operating-points-v2 = <&cluster1_opp>;
-		};
-	};
-
-	cluster0_opp: opp_table0 {
-		compatible = "operating-points-v2";
-		opp-shared;
-
-		opp00 {
-			opp-hz = <1000000000>;
-			opp-microvolt = <970000 975000 985000>;
-			opp-microamp = <70000>;
-			clock-latency-ns = <300000>;
-			opp-suspend;
-		};
-		opp01 {
-			opp-hz = <1100000000>;
-			opp-microvolt = <980000 1000000 1010000>;
-			opp-microamp = <80000>;
-			clock-latency-ns = <310000>;
-		};
-		opp02 {
-			opp-hz = <1200000000>;
-			opp-microvolt = <1025000>;
-			opp-microamp = <90000>;
-			clock-latency-ns = <290000>;
-			turbo-mode;
-		};
-	};
-
-	cluster1_opp: opp_table1 {
-		compatible = "operating-points-v2";
-		opp-shared;
-
-		opp10 {
-			opp-hz = <1300000000>;
-			opp-microvolt = <1045000 1050000 1055000>;
-			opp-microamp = <95000>;
-			clock-latency-ns = <400000>;
-			opp-suspend;
-		};
-		opp11 {
-			opp-hz = <1400000000>;
-			opp-microvolt = <1075000>;
-			opp-microamp = <100000>;
-			clock-latency-ns = <400000>;
-		};
-		opp12 {
-			opp-hz = <1500000000>;
-			opp-microvolt = <1010000 1100000 1110000>;
-			opp-microamp = <95000>;
-			clock-latency-ns = <400000>;
-			turbo-mode;
-		};
-	};
-};
-
-Example 4: Handling multiple regulators
-
-/ {
-	cpus {
-		cpu@0 {
-			compatible = "arm,cortex-a7";
-			...
-
-			cpu-supply = <&cpu_supply0>, <&cpu_supply1>, <&cpu_supply2>;
-			operating-points-v2 = <&cpu0_opp_table>;
-		};
-	};
-
-	cpu0_opp_table: opp_table0 {
-		compatible = "operating-points-v2";
-		opp-shared;
-
-		opp00 {
-			opp-hz = <1000000000>;
-			opp-microvolt = <970000>, /* Supply 0 */
-					<960000>, /* Supply 1 */
-					<960000>; /* Supply 2 */
-			opp-microamp =  <70000>,  /* Supply 0 */
-					<70000>,  /* Supply 1 */
-					<70000>;  /* Supply 2 */
-			clock-latency-ns = <300000>;
-		};
-
-		/* OR */
-
-		opp00 {
-			opp-hz = <1000000000>;
-			opp-microvolt = <970000 975000 985000>, /* Supply 0 */
-					<960000 965000 975000>, /* Supply 1 */
-					<960000 965000 975000>; /* Supply 2 */
-			opp-microamp =  <70000>,		/* Supply 0 */
-					<70000>,		/* Supply 1 */
-					<70000>;		/* Supply 2 */
-			clock-latency-ns = <300000>;
-		};
-
-		/* OR */
-
-		opp00 {
-			opp-hz = <1000000000>;
-			opp-microvolt = <970000 975000 985000>, /* Supply 0 */
-					<960000 965000 975000>, /* Supply 1 */
-					<960000 965000 975000>; /* Supply 2 */
-			opp-microamp =  <70000>,		/* Supply 0 */
-					<0>,			/* Supply 1 doesn't need this */
-					<70000>;		/* Supply 2 */
-			clock-latency-ns = <300000>;
-		};
-	};
-};
-
-Example 5: Multiple OPP tables
-
-/ {
-	cpus {
-		cpu@0 {
-			compatible = "arm,cortex-a7";
-			...
-
-			cpu-supply = <&cpu_supply>
-			operating-points-v2 = <&cpu0_opp_table_slow>, <&cpu0_opp_table_fast>;
-			operating-points-names = "slow", "fast";
-		};
-	};
-
-	cpu0_opp_table_slow: opp_table_slow {
-		compatible = "operating-points-v2";
-		status = "okay";
-		opp-shared;
-
-		opp00 {
-			opp-hz = <600000000>;
-			...
-		};
-
-		opp01 {
-			opp-hz = <800000000>;
-			...
-		};
-	};
-
-	cpu0_opp_table_fast: opp_table_fast {
-		compatible = "operating-points-v2";
-		status = "okay";
-		opp-shared;
-
-		opp10 {
-			opp-hz = <1000000000>;
-			...
-		};
-
-		opp11 {
-			opp-hz = <1100000000>;
-			...
-		};
-	};
-};
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 0f8ed37..025b5e7 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -48,7 +48,7 @@
 		#power-domain-cells = <1>;
 	};
 
-	child: power-controller@12340000 {
+	child: power-controller@12341000 {
 		compatible = "foo,power-controller";
 		reg = <0x12341000 0x1000>;
 		power-domains = <&parent 0>;
diff --git a/Documentation/devicetree/bindings/power/qcom,coincell-charger.txt b/Documentation/devicetree/bindings/power/qcom,coincell-charger.txt
new file mode 100644
index 0000000..0e6d875
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/qcom,coincell-charger.txt
@@ -0,0 +1,48 @@
+Qualcomm Coincell Charger:
+
+The hardware block controls charging for a coincell or capacitor that is
+used to provide power backup for certain features of the power management
+IC (PMIC)
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: must be: "qcom,pm8941-coincell"
+
+- reg:
+	Usage: required
+	Value type: <u32>
+	Definition: base address of the coincell charger registers
+
+- qcom,rset-ohms:
+	Usage: required
+	Value type: <u32>
+	Definition: resistance (in ohms) for current-limiting resistor
+		must be one of: 800, 1200, 1700, 2100
+
+- qcom,vset-millivolts:
+	Usage: required
+	Value type: <u32>
+	Definition: voltage (in millivolts) to apply for charging
+		must be one of: 2500, 3000, 3100, 3200
+
+- qcom,charger-disable:
+	Usage: optional
+	Value type: <boolean>
+	Definition: definining this property disables charging
+
+This charger is a sub-node of one of the 8941 PMIC blocks, and is specified
+as a child node in DTS of that node.  See ../mfd/qcom,spmi-pmic.txt and
+../mfd/qcom-pm8xxx.txt
+
+Example:
+
+	pm8941@0 {
+		coincell@2800 {
+			compatible = "qcom,pm8941-coincell";
+			reg = <0x2800>;
+
+			qcom,rset-ohms = <2100>;
+			qcom,vset-millivolts = <3000>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
index 8b70db1..b8627e7 100644
--- a/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
+++ b/Documentation/devicetree/bindings/power/rockchip-io-domain.txt
@@ -33,6 +33,8 @@
 - compatible: should be one of:
   - "rockchip,rk3188-io-voltage-domain" for rk3188
   - "rockchip,rk3288-io-voltage-domain" for rk3288
+  - "rockchip,rk3368-io-voltage-domain" for rk3368
+  - "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains
 - rockchip,grf: phandle to the syscon managing the "general register files"
 
 
@@ -64,6 +66,18 @@
 - sdcard-supply: The supply connected to SDMMC0_VDD.
 - wifi-supply:   The supply connected to APIO3_VDD.  Also known as SDIO0.
 
+Possible supplies for rk3368:
+- audio-supply:  The supply connected to APIO3_VDD.
+- dvp-supply:    The supply connected to DVPIO_VDD.
+- flash0-supply: The supply connected to FLASH0_VDD.  Typically for eMMC
+- gpio30-supply: The supply connected to APIO1_VDD.
+- gpio1830       The supply connected to APIO4_VDD.
+- sdcard-supply: The supply connected to SDMMC0_VDD.
+- wifi-supply:   The supply connected to APIO2_VDD.  Also known as SDIO0.
+
+Possible supplies for rk3368 pmu-domains:
+- pmu-supply:    The supply connected to PMUIO_VDD.
+- vop-supply:    The supply connected to LCDC_VDD.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
index 8832e87..6478175 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
+++ b/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
@@ -6,14 +6,14 @@
 For PSC in UART mode the needed PSC serial devices
 are specified by fsl,mpc5121-psc-uart nodes in the
 fsl,mpc5121-immr SoC node. Additionally the PSC FIFO
-Controller node fsl,mpc5121-psc-fifo is requered there:
+Controller node fsl,mpc5121-psc-fifo is required there:
 
-fsl,mpc5121-psc-uart nodes
+fsl,mpc512x-psc-uart nodes
 --------------------------
 
 Required properties :
- - compatible : Should contain "fsl,mpc5121-psc-uart" and "fsl,mpc5121-psc"
- - cell-index : Index of the PSC in hardware
+ - compatible : Should contain "fsl,<soc>-psc-uart" and "fsl,<soc>-psc"
+   Supported <soc>s: mpc5121, mpc5125
  - reg : Offset and length of the register set for the PSC device
  - interrupts : <a b> where a is the interrupt number of the
    PSC FIFO Controller and b is a field that represents an
@@ -25,12 +25,21 @@
  - fsl,rx-fifo-size : the size of the RX fifo slice (a multiple of 4)
  - fsl,tx-fifo-size : the size of the TX fifo slice (a multiple of 4)
 
+PSC in SPI mode
+---------------
 
-fsl,mpc5121-psc-fifo node
+Similar to the UART mode a PSC can be operated in SPI mode. The compatible used
+for that is fsl,mpc5121-psc-spi. It requires a fsl,mpc5121-psc-fifo as well.
+The required and recommended properties are identical to the
+fsl,mpc5121-psc-uart nodes, just use spi instead of uart in the compatible
+string.
+
+fsl,mpc512x-psc-fifo node
 -------------------------
 
 Required properties :
- - compatible : Should be "fsl,mpc5121-psc-fifo"
+ - compatible : Should be "fsl,<soc>-psc-fifo"
+   Supported <soc>s: mpc5121, mpc5125
  - reg : Offset and length of the register set for the PSC
          FIFO Controller
  - interrupts : <a b> where a is the interrupt number of the
@@ -39,6 +48,9 @@
  - interrupt-parent : the phandle for the interrupt controller that
    services interrupts for this device.
 
+Recommended properties :
+ - clocks : specifies the clock needed to operate the fifo controller
+ - clock-names : name(s) for the clock(s) listed in clocks
 
 Example for a board using PSC0 and PSC1 devices in serial mode:
 
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/scfg.txt b/Documentation/devicetree/bindings/powerpc/fsl/scfg.txt
new file mode 100644
index 0000000..0532c46
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/scfg.txt
@@ -0,0 +1,18 @@
+Freescale Supplement configuration unit (SCFG)
+
+SCFG is the supplemental configuration unit, that provides SoC specific
+configuration and status registers for the chip. Such as getting PEX port
+status.
+
+Required properties:
+
+- compatible: should be "fsl,<chip>-scfg"
+- reg: should contain base address and length of SCFG memory-mapped
+registers
+
+Example:
+
+	scfg: global-utilities@fc000 {
+		compatible = "fsl,t1040-scfg";
+		reg = <0xfc000 0x1000>;
+	};
diff --git a/Documentation/devicetree/bindings/regulator/da9210.txt b/Documentation/devicetree/bindings/regulator/da9210.txt
index 3297c53..7aa9b1f 100644
--- a/Documentation/devicetree/bindings/regulator/da9210.txt
+++ b/Documentation/devicetree/bindings/regulator/da9210.txt
@@ -5,6 +5,10 @@
 - compatible:	must be "dlg,da9210"
 - reg:		the i2c slave address of the regulator. It should be 0x68.
 
+Optional properties:
+
+- interrupts:	a reference to the DA9210 interrupt, if available.
+
 Any standard regulator properties can be used to configure the single da9210
 DCDC.
 
diff --git a/Documentation/devicetree/bindings/regulator/da9211.txt b/Documentation/devicetree/bindings/regulator/da9211.txt
index eb61890..c620493 100644
--- a/Documentation/devicetree/bindings/regulator/da9211.txt
+++ b/Documentation/devicetree/bindings/regulator/da9211.txt
@@ -1,7 +1,7 @@
-* Dialog Semiconductor DA9211/DA9213 Voltage Regulator
+* Dialog Semiconductor DA9211/DA9213/DA9215 Voltage Regulator
 
 Required properties:
-- compatible: "dlg,da9211" or "dlg,da9213".
+- compatible: "dlg,da9211" or "dlg,da9213" or "dlg,da9215"
 - reg: I2C slave address, usually 0x68.
 - interrupts: the interrupt outputs of the controller
 - regulators: A node that houses a sub-node for each regulator within the
@@ -66,3 +66,31 @@
 			};
 		};
 	};
+
+
+Example 3) DA9215
+	pmic: da9215@68 {
+		compatible = "dlg,da9215";
+		reg = <0x68>;
+		interrupts = <3 27>;
+
+		regulators {
+			BUCKA {
+				regulator-name = "VBUCKA";
+				regulator-min-microvolt = < 300000>;
+				regulator-max-microvolt = <1570000>;
+				regulator-min-microamp 	= <4000000>;
+				regulator-max-microamp 	= <7000000>;
+				enable-gpios = <&gpio 27 0>;
+			};
+			BUCKB {
+				regulator-name = "VBUCKB";
+				regulator-min-microvolt = < 300000>;
+				regulator-max-microvolt = <1570000>;
+				regulator-min-microamp 	= <4000000>;
+				regulator-max-microamp 	= <7000000>;
+				enable-gpios = <&gpio 17 0>;
+			};
+		};
+	};
+
diff --git a/Documentation/devicetree/bindings/regulator/max77686.txt b/Documentation/devicetree/bindings/regulator/max77686.txt
new file mode 100644
index 0000000..0dded64
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/max77686.txt
@@ -0,0 +1,71 @@
+Binding for Maxim MAX77686 regulators
+
+This is a part of the device tree bindings of MAX77686 multi-function device.
+More information can be found in ../mfd/max77686.txt file.
+
+The MAX77686 PMIC has 9 high-efficiency Buck and 26 Low-DropOut (LDO)
+regulators that can be controlled over I2C.
+
+Following properties should be present in main device node of the MFD chip.
+
+Optional node:
+- voltage-regulators : The regulators of max77686 have to be instantiated
+  under subnode named "voltage-regulators" using the following format.
+
+	regulator_name {
+		regulator-compatible = LDOn/BUCKn
+		standard regulator constraints....
+	};
+	refer Documentation/devicetree/bindings/regulator/regulator.txt
+
+  The regulator node's name should be initialized with a string
+to get matched with their hardware counterparts as follow:
+
+	-LDOn 	:	for LDOs, where n can lie in range 1 to 26.
+			example: LDO1, LDO2, LDO26.
+	-BUCKn 	:	for BUCKs, where n can lie in range 1 to 9.
+			example: BUCK1, BUCK5, BUCK9.
+
+  Regulators which can be turned off during system suspend:
+	-LDOn	:	2, 6-8, 10-12, 14-16,
+	-BUCKn	:	1-4.
+  Use standard regulator bindings for it ('regulator-off-in-suspend').
+
+  LDO20, LDO21, LDO22, BUCK8 and BUCK9 can be configured to GPIO enable
+  control. To turn this feature on this property must be added to the regulator
+  sub-node:
+	- maxim,ena-gpios :	one GPIO specifier enable control (the gpio
+				flags are actually ignored and always
+				ACTIVE_HIGH is used)
+
+Example:
+
+	max77686: pmic@09 {
+		compatible = "maxim,max77686";
+		interrupt-parent = <&wakeup_eint>;
+		interrupts = <26 IRQ_TYPE_NONE>;
+		reg = <0x09>;
+
+		voltage-regulators {
+			ldo11_reg: LDO11 {
+				regulator-name = "vdd_ldo11";
+				regulator-min-microvolt = <1900000>;
+				regulator-max-microvolt = <1900000>;
+				regulator-always-on;
+			};
+
+			buck1_reg: BUCK1 {
+				regulator-name = "vdd_mif";
+				regulator-min-microvolt = <950000>;
+				regulator-max-microvolt = <1300000>;
+				regulator-always-on;
+				regulator-boot-on;
+			};
+
+			buck9_reg: BUCK9 {
+				regulator-name = "CAM_ISP_CORE_1.2V";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1200000>;
+				maxim,ena-gpios = <&gpm0 3 GPIO_ACTIVE_HIGH>;
+			};
+	};
diff --git a/Documentation/devicetree/bindings/regulator/max8973-regulator.txt b/Documentation/devicetree/bindings/regulator/max8973-regulator.txt
index 55efb24..f80ea2f 100644
--- a/Documentation/devicetree/bindings/regulator/max8973-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/max8973-regulator.txt
@@ -25,6 +25,12 @@
 -maxim,enable-frequency-shift: boolean, enable 9% frequency shift.
 -maxim,enable-bias-control: boolean, enable bias control. By enabling this
 		startup delay can be reduce to 20us from 220us.
+-maxim,enable-etr: boolean, enable Enhanced Transient Response.
+-maxim,enable-high-etr-sensitivity: boolean, Enhanced transient response
+		circuit is enabled and set for high sensitivity. If this
+		property is available then etr will be enable default.
+
+Enhanced transient response (ETR) will affect the configuration of CKADV.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/regulator/mt6311-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6311-regulator.txt
new file mode 100644
index 0000000..02649d8
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mt6311-regulator.txt
@@ -0,0 +1,35 @@
+Mediatek MT6311 Regulator Driver
+
+Required properties:
+- compatible: "mediatek,mt6311-regulator"
+- reg: I2C slave address, usually 0x6b.
+- regulators: List of regulators provided by this controller. It is named
+  to VDVFS and VBIASN.
+  The definition for each of these nodes is defined using the standard binding
+  for regulators at Documentation/devicetree/bindings/regulator/regulator.txt.
+
+The valid names for regulators are:
+BUCK:
+  VDVFS
+LDO:
+  VBIASN
+
+Example:
+	mt6311: pmic@6b {
+		compatible = "mediatek,mt6311-regulator";
+		reg = <0x6b>;
+
+		regulators {
+			mt6311_vcpu_reg: VDVFS {
+				regulator-name = "VDVFS";
+				regulator-min-microvolt = < 600000>;
+				regulator-max-microvolt = <1400000>;
+				regulator-ramp-delay = <10000>;
+			};
+			mt6311_ldo_reg: VBIASN {
+				regulator-name = "VBIASN";
+				regulator-min-microvolt = <200000>;
+				regulator-max-microvolt = <800000>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/regulator/pwm-regulator.txt b/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
index ce91f61..ed936f0 100644
--- a/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/pwm-regulator.txt
@@ -1,27 +1,68 @@
-pwm regulator bindings
+Bindings for the Generic PWM Regulator
+======================================
+
+Currently supports 2 modes of operation:
+
+Voltage Table:		When in this mode, a voltage table (See below) of
+			predefined voltage <=> duty-cycle values must be
+			provided via DT. Limitations are that the regulator can
+			only operate at the voltages supplied in the table.
+			Intermediary duty-cycle values which would normally
+			allow finer grained voltage selection are ignored and
+			rendered useless.  Although more control is given to
+			the user if the assumptions made in continuous-voltage
+			mode do not reign true.
+
+Continuous Voltage:	This mode uses the regulator's maximum and minimum
+			supplied voltages specified in the
+			regulator-{min,max}-microvolt properties to calculate
+			appropriate duty-cycle values.  This allows for a much
+			more fine grained solution when compared with
+			voltage-table mode above.  This solution does make an
+			assumption that a %50 duty-cycle value will cause the
+			regulator voltage to run at half way between the
+			supplied max_uV and min_uV values.
 
 Required properties:
-- compatible: Should be "pwm-regulator"
-- pwms: OF device-tree PWM specification (see PWM binding pwm.txt)
-- voltage-table: voltage and duty table, include 2 members in each set of
-  brackets, first one is voltage(unit: uv), the next is duty(unit: percent)
+--------------------
+- compatible:		Should be "pwm-regulator"
 
-Any property defined as part of the core regulator binding defined in
-regulator.txt can also be used.
+- pwms:			PWM specification (See: ../pwm/pwm.txt)
 
-Example:
+Only required for Voltage Table Mode:
+- voltage-table: 	Voltage and Duty-Cycle table consisting of 2 cells
+			    First cell is voltage in microvolts (uV)
+			    Second cell is duty-cycle in percent (%)
+
+NB: To be clear, if voltage-table is provided, then the device will be used
+in Voltage Table Mode.  If no voltage-table is provided, then the device will
+be used in Continuous Voltage Mode.
+
+Any property defined as part of the core regulator binding can also be used.
+(See: ../regulator/regulator.txt)
+
+Continuous Voltage Example:
 	pwm_regulator {
 		compatible = "pwm-regulator;
 		pwms = <&pwm1 0 8448 0>;
+		regulator-min-microvolt = <1016000>;
+		regulator-max-microvolt = <1114000>;
+		regulator-name = "vdd_logic";
+	};
 
+Voltage Table Example:
+	pwm_regulator {
+		compatible = "pwm-regulator;
+		pwms = <&pwm1 0 8448 0>;
+		regulator-min-microvolt = <1016000>;
+		regulator-max-microvolt = <1114000>;
+		regulator-name = "vdd_logic";
+
+			      /* Voltage Duty-Cycle */
 		voltage-table = <1114000 0>,
 				<1095000 10>,
 				<1076000 20>,
 				<1056000 30>,
 				<1036000 40>,
 				<1016000 50>;
-
-		regulator-min-microvolt = <1016000>;
-		regulator-max-microvolt = <1114000>;
-		regulator-name = "vdd_logic";
 	};
diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
index 75b4604..d00bfd8 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
@@ -91,13 +91,65 @@
 - regulator-initial-mode:
 	Usage: optional
 	Value type: <u32>
-	Descrption: 1 = Set initial mode to high power mode (HPM), also referred
-		    to as NPM.  HPM consumes more ground current than LPM, but
+	Description: 2 = Set initial mode to auto mode (automatically select
+		    between HPM and LPM); not available on boost type
+		    regulators.
+
+		    1 = Set initial mode to high power mode (HPM), also referred
+		    to as NPM. HPM consumes more ground current than LPM, but
 		    it can source significantly higher load current. HPM is not
 		    available on boost type regulators. For voltage switch type
 		    regulators, HPM implies that over current protection and
-		    soft start are active all the time. 0 = Set initial mode to
-		    low power mode (LPM).
+		    soft start are active all the time.
+
+		    0 = Set initial mode to low power mode (LPM).
+
+- qcom,ocp-max-retries:
+	Usage: optional
+	Value type: <u32>
+	Description: Maximum number of times to try toggling a voltage switch
+		     off and back on as a result of consecutive over current
+		     events.
+
+- qcom,ocp-retry-delay:
+	Usage: optional
+	Value type: <u32>
+	Description: Time to delay in milliseconds between each voltage switch
+		     toggle after an over current event takes place.
+
+- qcom,pin-ctrl-enable:
+	Usage: optional
+	Value type: <u32>
+	Description: Bit mask specifying which hardware pins should be used to
+		     enable the regulator, if any; supported bits are:
+			0 = ignore all hardware enable signals
+			BIT(0) = follow HW0_EN signal
+			BIT(1) = follow HW1_EN signal
+			BIT(2) = follow HW2_EN signal
+			BIT(3) = follow HW3_EN signal
+
+- qcom,pin-ctrl-hpm:
+	Usage: optional
+	Value type: <u32>
+	Description: Bit mask specifying which hardware pins should be used to
+		     force the regulator into high power mode, if any;
+		     supported bits are:
+			0 = ignore all hardware enable signals
+			BIT(0) = follow HW0_EN signal
+			BIT(1) = follow HW1_EN signal
+			BIT(2) = follow HW2_EN signal
+			BIT(3) = follow HW3_EN signal
+			BIT(4) = follow PMIC awake state
+
+- qcom,vs-soft-start-strength:
+	Usage: optional
+	Value type: <u32>
+	Description: This property sets the soft start strength for voltage
+		     switch type regulators; supported values are:
+			0 = 0.05 uA
+			1 = 0.25 uA
+			2 = 0.55 uA
+			3 = 0.75 uA
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index db88feb..24bd422 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -42,6 +42,7 @@
 - regulator-system-load: Load in uA present on regulator that is not captured by
   any consumer request.
 - regulator-pull-down: Enable pull down resistor when the regulator is disabled.
+- regulator-over-current-protection: Enable over current protection.
 
 Deprecated properties:
 - regulator-compatible: If a regulator chip contains multiple
diff --git a/Documentation/devicetree/bindings/reset/ath79-reset.txt b/Documentation/devicetree/bindings/reset/ath79-reset.txt
new file mode 100644
index 0000000..4c56330
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/ath79-reset.txt
@@ -0,0 +1,20 @@
+Binding for Qualcomm Atheros AR7xxx/AR9XXX reset controller
+
+Please also refer to reset.txt in this directory for common reset
+controller binding usage.
+
+Required Properties:
+- compatible: has to be "qca,<soctype>-reset", "qca,ar7100-reset"
+              as fallback
+- reg: Base address and size of the controllers memory area
+- #reset-cells : Specifies the number of cells needed to encode reset
+                 line, should be 1
+
+Example:
+
+	reset-controller@1806001c {
+		compatible = "qca,ar9132-reset", "qca,ar7100-reset";
+		reg = <0x1806001c 0x4>;
+
+		#reset-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/reset/nxp,lpc1850-rgu.txt b/Documentation/devicetree/bindings/reset/nxp,lpc1850-rgu.txt
new file mode 100644
index 0000000..b4e96a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/nxp,lpc1850-rgu.txt
@@ -0,0 +1,84 @@
+NXP LPC1850  Reset Generation Unit (RGU)
+========================================
+
+Please also refer to reset.txt in this directory for common reset
+controller binding usage.
+
+Required properties:
+- compatible: Should be "nxp,lpc1850-rgu"
+- reg: register base and length
+- clocks: phandle and clock specifier to RGU clocks
+- clock-names: should contain "delay" and "reg"
+- #reset-cells: should be 1
+
+See table below for valid peripheral reset numbers. Numbers not
+in the table below are either reserved or not applicable for
+normal operation.
+
+Reset	Peripheral
+  9	System control unit (SCU)
+ 12	ARM Cortex-M0 subsystem core (LPC43xx only)
+ 13	CPU core
+ 16	LCD controller
+ 17	USB0
+ 18	USB1
+ 19	DMA
+ 20	SDIO
+ 21	External memory controller (EMC)
+ 22	Ethernet
+ 25	Flash bank A
+ 27	EEPROM
+ 28	GPIO
+ 29	Flash bank B
+ 32	Timer0
+ 33	Timer1
+ 34	Timer2
+ 35	Timer3
+ 36	Repetitive Interrupt timer (RIT)
+ 37	State Configurable Timer (SCT)
+ 38	Motor control PWM (MCPWM)
+ 39	QEI
+ 40	ADC0
+ 41	ADC1
+ 42	DAC
+ 44	USART0
+ 45	UART1
+ 46	USART2
+ 47	USART3
+ 48	I2C0
+ 49	I2C1
+ 50	SSP0
+ 51	SSP1
+ 52	I2S0 and I2S1
+ 53	Serial Flash Interface (SPIFI)
+ 54	C_CAN1
+ 55	C_CAN0
+ 56	ARM Cortex-M0 application core (LPC4370 only)
+ 57	SGPIO (LPC43xx only)
+ 58	SPI (LPC43xx only)
+ 60	ADCHS (12-bit ADC) (LPC4370 only)
+
+Refer to NXP LPC18xx or LPC43xx user manual for more details about
+the reset signals and the connected block/peripheral.
+
+Reset provider example:
+rgu: reset-controller@40053000 {
+	compatible = "nxp,lpc1850-rgu";
+	reg = <0x40053000 0x1000>;
+	clocks = <&cgu BASE_SAFE_CLK>, <&ccu1 CLK_CPU_BUS>;
+	clock-names = "delay", "reg";
+	#reset-cells = <1>;
+};
+
+Reset consumer example:
+mac: ethernet@40010000 {
+	compatible = "nxp,lpc1850-dwmac", "snps,dwmac-3.611", "snps,dwmac";
+	reg = <0x40010000 0x2000>;
+	interrupts = <5>;
+	interrupt-names = "macirq";
+	clocks = <&ccu1 CLK_CPU_ETHERNET>;
+	clock-names = "stmmaceth";
+	resets = <&rgu 22>;
+	reset-names = "stmmaceth";
+	status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/reset/socfpga-reset.txt b/Documentation/devicetree/bindings/reset/socfpga-reset.txt
index 32c1c8b..98c9f56 100644
--- a/Documentation/devicetree/bindings/reset/socfpga-reset.txt
+++ b/Documentation/devicetree/bindings/reset/socfpga-reset.txt
@@ -3,6 +3,7 @@
 Required properties:
 - compatible : "altr,rst-mgr"
 - reg : Should contain 1 register ranges(address and length)
+- altr,modrst-offset : Should contain the offset of the first modrst register.
 - #reset-cells: 1
 
 Example:
@@ -10,4 +11,5 @@
 		#reset-cells = <1>;
 		compatible = "altr,rst-mgr";
 		reg = <0xffd05000 0x1000>;
+		altr,modrst-offset = <0x10>;
 	};
diff --git a/Documentation/devicetree/bindings/reset/st,sti-picophyreset.txt b/Documentation/devicetree/bindings/reset/st,sti-picophyreset.txt
index 54ae9f7..9ca2776 100644
--- a/Documentation/devicetree/bindings/reset/st,sti-picophyreset.txt
+++ b/Documentation/devicetree/bindings/reset/st,sti-picophyreset.txt
@@ -39,4 +39,4 @@
 	};
 
 Macro definitions for the supported reset channels can be found in:
-include/dt-bindings/reset-controller/stih407-resets.h
+include/dt-bindings/reset/stih407-resets.h
diff --git a/Documentation/devicetree/bindings/reset/st,sti-powerdown.txt b/Documentation/devicetree/bindings/reset/st,sti-powerdown.txt
index 5ab26b7..1cfd21d 100644
--- a/Documentation/devicetree/bindings/reset/st,sti-powerdown.txt
+++ b/Documentation/devicetree/bindings/reset/st,sti-powerdown.txt
@@ -43,5 +43,5 @@
 
 Macro definitions for the supported reset channels can be found in:
 
-include/dt-bindings/reset-controller/stih415-resets.h
-include/dt-bindings/reset-controller/stih416-resets.h
+include/dt-bindings/reset/stih415-resets.h
+include/dt-bindings/reset/stih416-resets.h
diff --git a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
index a8d3d3c..891a2fd 100644
--- a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
+++ b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
@@ -42,5 +42,5 @@
 
 Macro definitions for the supported reset channels can be found in:
 
-include/dt-bindings/reset-controller/stih415-resets.h
-include/dt-bindings/reset-controller/stih416-resets.h
+include/dt-bindings/reset/stih415-resets.h
+include/dt-bindings/reset/stih416-resets.h
diff --git a/Documentation/devicetree/bindings/reset/zynq-reset.txt b/Documentation/devicetree/bindings/reset/zynq-reset.txt
new file mode 100644
index 0000000..5860120
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/zynq-reset.txt
@@ -0,0 +1,68 @@
+Xilinx Zynq Reset Manager
+
+The Zynq AP-SoC has several different resets.
+
+See Chapter 26 of the Zynq TRM (UG585) for more information about Zynq resets.
+
+Required properties:
+- compatible: "xlnx,zynq-reset"
+- reg: SLCR offset and size taken via syscon <0x200 0x48>
+- syscon: <&slcr>
+  This should be a phandle to the Zynq's SLCR registers.
+- #reset-cells: Must be 1
+
+The Zynq Reset Manager needs to be a childnode of the SLCR.
+
+Example:
+	rstc: rstc@200 {
+		compatible = "xlnx,zynq-reset";
+		reg = <0x200 0x48>;
+		#reset-cells = <1>;
+		syscon = <&slcr>;
+	};
+
+Reset outputs:
+ 0  : soft reset
+ 32 : ddr reset
+ 64 : topsw reset
+ 96 : dmac reset
+ 128: usb0 reset
+ 129: usb1 reset
+ 160: gem0 reset
+ 161: gem1 reset
+ 164: gem0 rx reset
+ 165: gem1 rx reset
+ 166: gem0 ref reset
+ 167: gem1 ref reset
+ 192: sdio0 reset
+ 193: sdio1 reset
+ 196: sdio0 ref reset
+ 197: sdio1 ref reset
+ 224: spi0 reset
+ 225: spi1 reset
+ 226: spi0 ref reset
+ 227: spi1 ref reset
+ 256: can0 reset
+ 257: can1 reset
+ 258: can0 ref reset
+ 259: can1 ref reset
+ 288: i2c0 reset
+ 289: i2c1 reset
+ 320: uart0 reset
+ 321: uart1 reset
+ 322: uart0 ref reset
+ 323: uart1 ref reset
+ 352: gpio reset
+ 384: lqspi reset
+ 385: qspi ref reset
+ 416: smc reset
+ 417: smc ref reset
+ 448: ocm reset
+ 512: fpga0 out reset
+ 513: fpga1 out reset
+ 514: fpga2 out reset
+ 515: fpga3 out reset
+ 544: a9 reset 0
+ 545: a9 reset 1
+ 552: peri reset
+
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
index 34c1505..5d3791e7 100644
--- a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.txt
@@ -5,6 +5,7 @@
 - reg: physical base address of the controller and length of memory mapped
   region.
 - interrupts: rtc alarm/event interrupt
+- clocks: phandle to input clock.
 
 Example:
 
@@ -12,4 +13,5 @@
 	compatible = "atmel,at91rm9200-rtc";
 	reg = <0xfffffe00 0x100>;
 	interrupts = <1 4 7>;
+	clocks = <&clk32k>;
 };
diff --git a/Documentation/devicetree/bindings/rtc/rtc-mxc.txt b/Documentation/devicetree/bindings/rtc/rtc-mxc.txt
new file mode 100644
index 0000000..5bcd31d
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-mxc.txt
@@ -0,0 +1,26 @@
+* Real Time Clock of the i.MX SoCs
+
+RTC controller for the i.MX SoCs
+
+Required properties:
+- compatible: Should be "fsl,imx1-rtc" or "fsl,imx21-rtc".
+- reg: physical base address of the controller and length of memory mapped
+  region.
+- interrupts: IRQ line for the RTC.
+- clocks: should contain two entries:
+  * one for the input reference
+  * one for the the SoC RTC
+- clock-names: should contain:
+  * "ref" for the input reference clock
+  * "ipg" for the SoC RTC clock
+
+Example:
+
+rtc@10007000 {
+	compatible = "fsl,imx21-rtc";
+	reg = <0x10007000 0x1000>;
+	interrupts = <22>;
+	clocks = <&clks IMX27_CLK_CKIL>,
+		 <&clks IMX27_CLK_RTC_IPG_GATE>;
+	clock-names = "ref", "ipg";
+};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-omap.txt b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
index 4ba4dbd..43a8366 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-omap.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-omap.txt
@@ -8,6 +8,7 @@
 			    Wakeup generation for event Alarm. It can also be
 			    used to control an external PMIC via the
 			    pmic_power_en pin.
+	- "ti,am4372-rtc" - for RTC IP used similar to that on AM437X SoC family.
 - reg: Address range of rtc register set
 - interrupts: rtc timer, alarm interrupts in order
 - interrupt-parent: phandle for the interrupt controller
diff --git a/Documentation/devicetree/bindings/rtc/rtc-st-lpc.txt b/Documentation/devicetree/bindings/rtc/rtc-st-lpc.txt
index 73407f50..daf8826 100644
--- a/Documentation/devicetree/bindings/rtc/rtc-st-lpc.txt
+++ b/Documentation/devicetree/bindings/rtc/rtc-st-lpc.txt
@@ -1,20 +1,23 @@
 STMicroelectronics Low Power Controller (LPC) - RTC
 ===================================================
 
-LPC currently supports Watchdog OR Real Time Clock functionality.
+LPC currently supports Watchdog OR Real Time Clock OR Clocksource
+functionality.
 
 [See: ../watchdog/st_lpc_wdt.txt for Watchdog options]
+[See: ../timer/st,stih407-lpc for Clocksource options]
 
 Required properties
 
-- compatible 	: Must be one of: "st,stih407-lpc" "st,stih416-lpc"
-				  "st,stih415-lpc" "st,stid127-lpc"
+- compatible 	: Must be: "st,stih407-lpc"
 - reg		: LPC registers base address + size
 - interrupts    : LPC interrupt line number and associated flags
 - clocks	: Clock used by LPC device (See: ../clock/clock-bindings.txt)
-- st,lpc-mode	: The LPC can run either one of two modes ST_LPC_MODE_RTC [0] or
-		  ST_LPC_MODE_WDT [1].  One (and only one) mode must be
-		  selected.
+- st,lpc-mode	: The LPC can run either one of three modes:
+                  ST_LPC_MODE_RTC    [0]
+                  ST_LPC_MODE_WDT    [1]
+                  ST_LPC_MODE_CLKSRC [2]
+		 One (and only one) mode must be selected.
 
 Example:
 	lpc@fde05000 {
diff --git a/Documentation/devicetree/bindings/serial/atmel-usart.txt b/Documentation/devicetree/bindings/serial/atmel-usart.txt
index 90787aa..e6e6142 100644
--- a/Documentation/devicetree/bindings/serial/atmel-usart.txt
+++ b/Documentation/devicetree/bindings/serial/atmel-usart.txt
@@ -22,6 +22,8 @@
 		memory peripheral interface and USART DMA channel ID, FIFO configuration.
 		Refer to dma.txt and atmel-dma.txt for details.
 	- dma-names: "rx" for RX channel, "tx" for TX channel.
+- atmel,fifo-size: maximum number of data the RX and TX FIFOs can store for FIFO
+  capable USARTs.
 
 <chip> compatible description:
 - at91rm9200:  legacy USART support
@@ -57,4 +59,5 @@
 		dmas = <&dma0 2 0x3>,
 		       <&dma0 2 0x204>;
 		dma-names = "tx", "rx";
+		atmel,fifo-size = <32>;
 	};
diff --git a/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt
index ebcbb62..51b3c9e 100644
--- a/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt
+++ b/Documentation/devicetree/bindings/serial/axis,etraxfs-uart.txt
@@ -6,7 +6,7 @@
 - interrupts: device interrupt
 
 Optional properties:
-- {dtr,dsr,ri,cd}-gpios: specify a GPIO for DTR/DSR/RI/CD
+- {dtr,dsr,rng,dcd}-gpios: specify a GPIO for DTR/DSR/RI/DCD
   line respectively.
 
 Example:
@@ -16,4 +16,8 @@
 	reg = <0xb0026000 0x1000>;
 	interrupts = <68>;
 	status = "disabled";
+	dtr-gpios = <&sysgpio 0 GPIO_ACTIVE_LOW>;
+	dsr-gpios = <&sysgpio 1 GPIO_ACTIVE_LOW>;
+	rng-gpios = <&sysgpio 2 GPIO_ACTIVE_LOW>;
+	dcd-gpios = <&sysgpio 3 GPIO_ACTIVE_LOW>;
 };
diff --git a/Documentation/devicetree/bindings/serial/mtk-uart.txt b/Documentation/devicetree/bindings/serial/mtk-uart.txt
index 8d63f1d..2d47add 100644
--- a/Documentation/devicetree/bindings/serial/mtk-uart.txt
+++ b/Documentation/devicetree/bindings/serial/mtk-uart.txt
@@ -5,10 +5,12 @@
   * "mediatek,mt8135-uart" for MT8135 compatible UARTS
   * "mediatek,mt8127-uart" for MT8127 compatible UARTS
   * "mediatek,mt8173-uart" for MT8173 compatible UARTS
+  * "mediatek,mt6795-uart" for MT6795 compatible UARTS
   * "mediatek,mt6589-uart" for MT6589 compatible UARTS
   * "mediatek,mt6582-uart" for MT6582 compatible UARTS
-  * "mediatek,mt6577-uart" for all compatible UARTS (MT8173, MT6589, MT6582, 
-	MT6577)
+  * "mediatek,mt6580-uart" for MT6580 compatible UARTS
+  * "mediatek,mt6577-uart" for all compatible UARTS (MT8173, MT6795,
+        MT6589, MT6582, MT6580, MT6577)
 
 - reg: The base address of the UART register bank.
 
diff --git a/Documentation/devicetree/bindings/serial/omap_serial.txt b/Documentation/devicetree/bindings/serial/omap_serial.txt
index 54c2a15..7a71b5d 100644
--- a/Documentation/devicetree/bindings/serial/omap_serial.txt
+++ b/Documentation/devicetree/bindings/serial/omap_serial.txt
@@ -4,6 +4,9 @@
 - compatible : should be "ti,omap2-uart" for OMAP2 controllers
 - compatible : should be "ti,omap3-uart" for OMAP3 controllers
 - compatible : should be "ti,omap4-uart" for OMAP4 controllers
+- compatible : should be "ti,am4372-uart" for AM437x controllers
+- compatible : should be "ti,am3352-uart" for AM335x controllers
+- compatible : should be "ti,dra742-uart" for DRA7x controllers
 - reg : address and length of the register space
 - interrupts or interrupts-extended : Should contain the uart interrupt
                                       specifier or both the interrupt
diff --git a/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
new file mode 100644
index 0000000..c051114
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/mediatek/scpsys.txt
@@ -0,0 +1,41 @@
+MediaTek SCPSYS
+===============
+
+The System Control Processor System (SCPSYS) has several power management
+related tasks in the system. The tasks include thermal measurement, dynamic
+voltage frequency scaling (DVFS), interrupt filter and lowlevel sleep control.
+The System Power Manager (SPM) inside the SCPSYS is for the MTCMOS power
+domain control.
+
+The driver implements the Generic PM domain bindings described in
+power/power_domain.txt. It provides the power domains defined in
+include/dt-bindings/power/mt8173-power.h.
+
+Required properties:
+- compatible: Must be "mediatek,mt8173-scpsys"
+- #power-domain-cells: Must be 1
+- reg: Address range of the SCPSYS unit
+- infracfg: must contain a phandle to the infracfg controller
+- clock, clock-names: clocks according to the common clock binding.
+                      The clocks needed "mm" and "mfg". These are the
+		      clocks which hardware needs to be enabled before
+		      enabling certain power domains.
+
+Example:
+
+	scpsys: scpsys@10006000 {
+		#power-domain-cells = <1>;
+		compatible = "mediatek,mt8173-scpsys";
+		reg = <0 0x10006000 0 0x1000>;
+		infracfg = <&infracfg>;
+		clocks = <&clk26m>,
+			 <&topckgen CLK_TOP_MM_SEL>;
+		clock-names = "mfg", "mm";
+	};
+
+Example consumer:
+
+	afe: mt8173-afe-pcm@11220000 {
+		compatible = "mediatek,mt8173-afe-pcm";
+		power-domains = <&scpsys MT8173_POWER_DOMAIN_AUDIO>;
+	};
diff --git a/Documentation/devicetree/bindings/soc/qcom,smd-rpm.txt b/Documentation/devicetree/bindings/soc/qcom,smd-rpm.txt
new file mode 100644
index 0000000..e27f5c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom,smd-rpm.txt
@@ -0,0 +1,117 @@
+Qualcomm Resource Power Manager (RPM) over SMD
+
+This driver is used to interface with the Resource Power Manager (RPM) found in
+various Qualcomm platforms. The RPM allows each component in the system to vote
+for state of the system resources, such as clocks, regulators and bus
+frequencies.
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: must be one of:
+		    "qcom,rpm-msm8974"
+
+- qcom,smd-channels:
+	Usage: required
+	Value type: <stringlist>
+	Definition: Shared Memory channel used for communication with the RPM
+
+= SUBDEVICES
+
+The RPM exposes resources to its subnodes. The below bindings specify the set
+of valid subnodes that can operate on these resources.
+
+== Regulators
+
+Regulator nodes are identified by their compatible:
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: must be one of:
+		    "qcom,rpm-pm8841-regulators"
+		    "qcom,rpm-pm8941-regulators"
+
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_s4-supply:
+- vdd_s5-supply:
+- vdd_s6-supply:
+- vdd_s7-supply:
+- vdd_s8-supply:
+	Usage: optional (pm8841 only)
+	Value type: <phandle>
+	Definition: reference to regulator supplying the input pin, as
+		    described in the data sheet
+
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_l1_l3-supply:
+- vdd_l2_lvs1_2_3-supply:
+- vdd_l4_l11-supply:
+- vdd_l5_l7-supply:
+- vdd_l6_l12_l14_l15-supply:
+- vdd_l8_l16_l18_l19-supply:
+- vdd_l9_l10_l17_l22-supply:
+- vdd_l13_l20_l23_l24-supply:
+- vdd_l21-supply:
+- vin_5vs-supply:
+	Usage: optional (pm8941 only)
+	Value type: <phandle>
+	Definition: reference to regulator supplying the input pin, as
+		    described in the data sheet
+
+The regulator node houses sub-nodes for each regulator within the device. Each
+sub-node is identified using the node's name, with valid values listed for each
+of the pmics below.
+
+pm8841:
+	s1, s2, s3, s4, s5, s6, s7, s8
+
+pm8941:
+	s1, s2, s3, s4, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13,
+	l14, l15, l16, l17, l18, l19, l20, l21, l22, l23, l24, lvs1, lvs2,
+	lvs3, 5vs1, 5vs2
+
+The content of each sub-node is defined by the standard binding for regulators -
+see regulator.txt.
+
+= EXAMPLE
+
+	smd {
+		compatible = "qcom,smd";
+
+		rpm {
+			interrupts = <0 168 1>;
+			qcom,ipc = <&apcs 8 0>;
+			qcom,smd-edge = <15>;
+
+			rpm_requests {
+				compatible = "qcom,rpm-msm8974";
+				qcom,smd-channels = "rpm_requests";
+
+				pm8941-regulators {
+					compatible = "qcom,rpm-pm8941-regulators";
+					vdd_l13_l20_l23_l24-supply = <&pm8941_boost>;
+
+					pm8941_s3: s3 {
+						regulator-min-microvolt = <1800000>;
+						regulator-max-microvolt = <1800000>;
+					};
+
+					pm8941_boost: s4 {
+						regulator-min-microvolt = <5000000>;
+						regulator-max-microvolt = <5000000>;
+					};
+
+					pm8941_l20: l20 {
+						regulator-min-microvolt = <2950000>;
+						regulator-max-microvolt = <2950000>;
+					};
+				};
+			};
+		};
+	};
+
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt
new file mode 100644
index 0000000..f65c76d
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt
@@ -0,0 +1,79 @@
+Qualcomm Shared Memory Driver (SMD) binding
+
+This binding describes the Qualcomm Shared Memory Driver, a fifo based
+communication channel for sending data between the various subsystems in
+Qualcomm platforms.
+
+- compatible:
+	Usage: required
+	Value type: <stringlist>
+	Definition: must be "qcom,smd"
+
+= EDGES
+
+Each subnode of the SMD node represents a remote subsystem or a remote
+processor of some sort - or in SMD language an "edge". The name of the edges
+are not important.
+The edge is described by the following properties:
+
+- interrupts:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: should specify the IRQ used by the remote processor to
+		    signal this processor about communication related updates
+
+- qcom,ipc:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: three entries specifying the outgoing ipc bit used for
+		    signaling the remote processor:
+		    - phandle to a syscon node representing the apcs registers
+		    - u32 representing offset to the register within the syscon
+		    - u32 representing the ipc bit within the register
+
+- qcom,smd-edge:
+	Usage: required
+	Value type: <u32>
+	Definition: the identifier of the remote processor in the smd channel
+		    allocation table
+
+= SMD DEVICES
+
+In turn, subnodes of the "edges" represent devices tied to SMD channels on that
+"edge". The names of the devices are not important. The properties of these
+nodes are defined by the individual bindings for the SMD devices - but must
+contain the following property:
+
+- qcom,smd-channels:
+	Usage: required
+	Value type: <stringlist>
+	Definition: a list of channels tied to this device, used for matching
+		    the device to channels
+
+= EXAMPLE
+
+The following example represents a smd node, with one edge representing the
+"rpm" subsystem. For the "rpm" subsystem we have a device tied to the
+"rpm_request" channel.
+
+	apcs: syscon@f9011000 {
+		compatible = "syscon";
+		reg = <0xf9011000 0x1000>;
+	};
+
+	smd {
+		compatible = "qcom,smd";
+
+		rpm {
+			interrupts = <0 168 1>;
+			qcom,ipc = <&apcs 8 0>;
+			qcom,smd-edge = <15>;
+
+			rpm_requests {
+				compatible = "qcom,rpm-msm8974";
+				qcom,smd-channels = "rpm_requests";
+
+				...
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/sound/cs4349.txt b/Documentation/devicetree/bindings/sound/cs4349.txt
new file mode 100644
index 0000000..54c117b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs4349.txt
@@ -0,0 +1,19 @@
+CS4349 audio CODEC
+
+Required properties:
+
+  - compatible : "cirrus,cs4349"
+
+  - reg : the I2C address of the device for I2C
+
+Optional properties:
+
+  - reset-gpios : a GPIO spec for the reset pin.
+
+Example:
+
+codec: cs4349@48 {
+        compatible = "cirrus,cs4349";
+        reg = <0x48>;
+        reset-gpios = <&gpio 54 0>;
+};
diff --git a/Documentation/devicetree/bindings/sound/ics43432.txt b/Documentation/devicetree/bindings/sound/ics43432.txt
new file mode 100644
index 0000000..b02e3a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ics43432.txt
@@ -0,0 +1,17 @@
+Invensense ICS-43432 MEMS microphone with I2S output.
+
+There are no software configuration options for this device, indeed, the only
+host connection is the I2S interface. Apart from requirements on clock
+frequency (460 kHz to 3.379 MHz according to the data sheet) there must be
+64 clock cycles in each stereo output frame; 24 of the 32 available bits
+contain audio data. A hardware pin determines if the device outputs data
+on the left or right channel of the I2S frame.
+
+Required properties:
+  - compatible : Must be "invensense,ics43432"
+
+Example:
+
+	ics43432: ics43432 {
+		compatible = "invensense,ics43432";
+	};
diff --git a/Documentation/devicetree/bindings/sound/max98357a.txt b/Documentation/devicetree/bindings/sound/max98357a.txt
index a7a149a..28645a2 100644
--- a/Documentation/devicetree/bindings/sound/max98357a.txt
+++ b/Documentation/devicetree/bindings/sound/max98357a.txt
@@ -4,7 +4,11 @@
 
 Required properties:
 - compatible   : "maxim,max98357a"
-- sdmode-gpios : GPIO specifier for the GPIO -> DAC SDMODE pin
+
+Optional properties:
+- sdmode-gpios : GPIO specifier for the chip's SD_MODE pin.
+        If this option is not specified then driver does not manage
+        the pin state (e.g. chip is always on).
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt
index 13e2ef4..275c6ea 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.txt
@@ -8,10 +8,10 @@
 - interrupts : The interrupt from the HDA controller.
 - clocks : Must contain an entry for each required entry in clock-names.
   See ../clocks/clock-bindings.txt for details.
-- clock-names : Must include the following entries: hda, hdacodec_2x, hda2hdmi
+- clock-names : Must include the following entries: hda, hda2hdmi, hda2codec_2x
 - resets : Must contain an entry for each entry in reset-names.
   See ../reset/reset.txt for details.
-- reset-names : Must include the following entries: hda, hdacodec_2x, hda2hdmi
+- reset-names : Must include the following entries: hda, hda2hdmi, hda2codec_2x
 
 Example:
 
@@ -24,7 +24,7 @@
 		 <&tegra_car TEGRA124_CLK_HDA2CODEC_2X>;
 	clock-names = "hda", "hda2hdmi", "hda2codec_2x";
 	resets = <&tegra_car 125>, /* hda */
-		 <&tegra_car 128>; /* hda2hdmi */
-		 <&tegra_car 111>, /* hda2codec_2x */
+		 <&tegra_car 128>, /* hda2hdmi */
+		 <&tegra_car 111>; /* hda2codec_2x */
 	reset-names = "hda", "hda2hdmi", "hda2codec_2x";
 };
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index b6b3a78..1173395 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -18,6 +18,12 @@
 - rcar_sound,src		: Should contain SRC feature.
 				  The number of SRC subnode should be same as HW.
 				  see below for detail.
+- rcar_sound,ctu		: Should contain CTU feature.
+				  The number of CTU subnode should be same as HW.
+				  see below for detail.
+- rcar_sound,mix		: Should contain MIX feature.
+				  The number of MIX subnode should be same as HW.
+				  see below for detail.
 - rcar_sound,dvc		: Should contain DVC feature.
 				  The number of DVC subnode should be same as HW.
 				  see below for detail.
@@ -90,6 +96,22 @@
 		};
 	};
 
+	rcar_sound,mix {
+		mix0: mix@0 { };
+		mix1: mix@1 { };
+	};
+
+	rcar_sound,ctu {
+		ctu00: ctu@0 { };
+		ctu01: ctu@1 { };
+		ctu02: ctu@2 { };
+		ctu03: ctu@3 { };
+		ctu10: ctu@4 { };
+		ctu11: ctu@5 { };
+		ctu12: ctu@6 { };
+		ctu13: ctu@7 { };
+	};
+
 	rcar_sound,src {
 		src0: src@0 {
 			interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt b/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
index c641550..962748a 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsrc-card.txt
@@ -6,6 +6,7 @@
 
 - compatible				: "renesas,rsrc-card,<board>"
 					  Examples with soctypes are:
+					    - "renesas,rsrc-card"
 					    - "renesas,rsrc-card,lager"
 					    - "renesas,rsrc-card,koelsch"
 Optional properties:
@@ -29,6 +30,12 @@
 - frame-inversion			: bool property. Add this if the
 					  dai-link uses frame clock inversion.
 - convert-rate				: platform specified sampling rate convert
+- audio-prefix				: see audio-routing
+- audio-routing				: A list of the connections between audio components.
+					  Each entry is a pair of strings, the first being the connection's sink,
+					  the second being the connection's source. Valid names for sources.
+					  use audio-prefix if some components is using same sink/sources naming.
+					  it can be used if compatible was "renesas,rsrc-card";
 
 Required CPU/CODEC subnodes properties:
 
diff --git a/Documentation/devicetree/bindings/sound/rockchip-max98090.txt b/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
new file mode 100644
index 0000000..a805aa9
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip-max98090.txt
@@ -0,0 +1,19 @@
+ROCKCHIP with MAX98090 CODEC
+
+Required properties:
+- compatible: "rockchip,rockchip-audio-max98090"
+- rockchip,model: The user-visible name of this sound complex
+- rockchip,i2s-controller: The phandle of the Rockchip I2S controller that's
+  connected to the CODEC
+- rockchip,audio-codec: The phandle of the MAX98090 audio codec
+- rockchip,headset-codec: The phandle of Ext chip for jack detection
+
+Example:
+
+sound {
+	compatible = "rockchip,rockchip-audio-max98090";
+	rockchip,model = "ROCKCHIP-I2S";
+	rockchip,i2s-controller = <&i2s>;
+	rockchip,audio-codec = <&max98090>;
+	rockchip,headset-codec = <&headsetcodec>;
+};
diff --git a/Documentation/devicetree/bindings/sound/rockchip-rt5645.txt b/Documentation/devicetree/bindings/sound/rockchip-rt5645.txt
new file mode 100644
index 0000000..411a62b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/rockchip-rt5645.txt
@@ -0,0 +1,17 @@
+ROCKCHIP with RT5645/RT5650 CODECS
+
+Required properties:
+- compatible: "rockchip,rockchip-audio-rt5645"
+- rockchip,model: The user-visible name of this sound complex
+- rockchip,i2s-controller: The phandle of the Rockchip I2S controller that's
+  connected to the CODEC
+- rockchip,audio-codec: The phandle of the RT5645/RT5650 audio codec
+
+Example:
+
+sound {
+	compatible = "rockchip,rockchip-audio-rt5645";
+	rockchip,model = "ROCKCHIP-I2S";
+	rockchip,i2s-controller = <&i2s>;
+	rockchip,audio-codec = <&rt5645>;
+};
diff --git a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
new file mode 100644
index 0000000..028fa1c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@ -0,0 +1,155 @@
+STMicroelectronics sti ASoC cards
+
+The sti ASoC Sound Card can be used, for all sti SoCs using internal sti-sas
+codec or external codecs.
+
+sti sound drivers allows to expose sti SoC audio interface through the
+generic ASoC simple card. For details about sound card declaration please refer to
+Documentation/devicetree/bindings/sound/simple-card.txt.
+
+1) sti-uniperiph-dai: audio dai device.
+---------------------------------------
+
+Required properties:
+  - compatible: "st,sti-uni-player" or "st,sti-uni-reader"
+
+  - st,syscfg: phandle to boot-device system configuration registers
+
+  - clock-names: name of the clocks listed in clocks property in the same order
+
+  - reg: CPU DAI IP Base address and size entries, listed  in same
+	 order than the CPU_DAI properties.
+
+  - reg-names: names of the mapped memory regions listed in regs property in
+	       the same order.
+
+  - interrupts: CPU_DAI interrupt line, listed in the same order than the
+		CPU_DAI properties.
+
+  - dma: CPU_DAI DMA controller phandle and DMA request line, listed in the same
+	 order than the CPU_DAI properties.
+
+  - dma-names: identifier string for each DMA request line in the dmas property.
+	"tx" for "st,sti-uni-player" compatibility
+	"rx" for "st,sti-uni-reader" compatibility
+
+  - version: IP version integrated in SOC.
+
+  - dai-name: DAI name that describes the IP.
+
+Required properties ("st,sti-uni-player" compatibility only):
+  - clocks: CPU_DAI IP clock source, listed in the same order than the
+	    CPU_DAI properties.
+
+  - uniperiph-id: internal SOC IP instance ID.
+
+  - IP mode: IP working mode depending on associated codec.
+	"HDMI" connected to HDMI codec IP and IEC HDMI formats.
+	"SPDIF"connected to SPDIF codec and support SPDIF formats.
+	"PCM"  PCM standard mode for I2S or TDM bus.
+
+Optional properties:
+  - pinctrl-0: defined for CPU_DAI@1 and CPU_DAI@4 to describe I2S PIOs for
+	       external codecs connection.
+
+  - pinctrl-names: should contain only one value - "default".
+
+Example:
+
+	sti_uni_player2: sti-uni-player@2 {
+		compatible = "st,sti-uni-player";
+		status = "okay";
+		#sound-dai-cells = <0>;
+		st,syscfg = <&syscfg_core>;
+		clocks = <&clk_s_d0_flexgen CLK_PCM_2>;
+		reg = <0x8D82000 0x158>;
+		interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+		dmas = <&fdma0 4 0 1>;
+		dai-name = "Uni Player #1 (DAC)";
+		dma-names = "tx";
+		uniperiph-id = <2>;
+		version = <5>;
+		mode = "PCM";
+	};
+
+	sti_uni_player3: sti-uni-player@3 {
+		compatible = "st,sti-uni-player";
+		status = "okay";
+		#sound-dai-cells = <0>;
+		st,syscfg = <&syscfg_core>;
+		clocks = <&clk_s_d0_flexgen CLK_SPDIFF>;
+		reg = <0x8D85000 0x158>;
+		interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
+		dmas = <&fdma0 7 0 1>;
+		dma-names = "tx";
+		dai-name = "Uni Player #1 (PIO)";
+		uniperiph-id = <3>;
+		version = <5>;
+		mode = "SPDIF";
+	};
+
+	sti_uni_reader1: sti-uni-reader@1 {
+		compatible = "st,sti-uni-reader";
+		status = "disabled";
+		#sound-dai-cells = <0>;
+		st,syscfg = <&syscfg_core>;
+		reg = <0x8D84000 0x158>;
+		interrupts = <GIC_SPI 88 IRQ_TYPE_NONE>;
+		dmas = <&fdma0 6 0 1>;
+		dma-names = "rx";
+		dai-name = "Uni Reader #1 (HDMI RX)";
+		version = <3>;
+	};
+
+2) sti-sas-codec: internal audio codec IPs driver
+-------------------------------------------------
+
+Required properties:
+  - compatible: "st,sti<chip>-sas-codec" .
+	Should be chip "st,stih416-sas-codec" or "st,stih407-sas-codec"
+
+  - st,syscfg: phandle to boot-device system configuration registers.
+
+  - pinctrl-0: SPDIF PIO description.
+
+  - pinctrl-names: should contain only one value - "default".
+
+Example:
+	sti_sas_codec: sti-sas-codec {
+		compatible = "st,stih407-sas-codec";
+		#sound-dai-cells = <1>;
+		st,reg_audio = <&syscfg_core>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pinctrl_spdif_out >;
+	};
+
+Example of audio card declaration:
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "sti audio card";
+		status = "okay";
+
+		simple-audio-card,dai-link@0 {
+			/* DAC */
+			format = "i2s";
+			dai-tdm-slot-width = <32>;
+			cpu {
+				sound-dai = <&sti_uni_player2>;
+			};
+
+			codec {
+				sound-dai = <&sti_sasg_codec 1>;
+			};
+		};
+		simple-audio-card,dai-link@1 {
+			/* SPDIF */
+			format = "left_j";
+			cpu {
+				sound-dai = <&sti_uni_player3>;
+			};
+
+			codec {
+				sound-dai = <&sti_sasg_codec 0>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
index bd99193..204b311 100644
--- a/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
+++ b/Documentation/devicetree/bindings/spi/snps,dw-apb-ssi.txt
@@ -10,6 +10,8 @@
 Optional properties:
 - cs-gpios : Specifies the gpio pis to be used for chipselects.
 - num-cs : The number of chipselects. If omitted, this will default to 4.
+- reg-io-width : The I/O register width (in bytes) implemented by this
+  device.  Supported values are 2 or 4 (the default).
 
 Child nodes as per the generic SPI binding.
 
diff --git a/Documentation/devicetree/bindings/spi/spi-davinci.txt b/Documentation/devicetree/bindings/spi/spi-davinci.txt
index 12ecfe9..d1e914a 100644
--- a/Documentation/devicetree/bindings/spi/spi-davinci.txt
+++ b/Documentation/devicetree/bindings/spi/spi-davinci.txt
@@ -12,6 +12,8 @@
 - compatible:
 	- "ti,dm6441-spi" for SPI used similar to that on DM644x SoC family
 	- "ti,da830-spi" for SPI used similar to that on DA8xx SoC family
+	- "ti,keystone-spi" for SPI used similar to that on Keystone2 SoC
+		family
 - reg: Offset and length of SPI controller register space
 - num-cs: Number of chip selects. This includes internal as well as
 	GPIO chip selects.
diff --git a/Documentation/devicetree/bindings/spi/spi-img-spfi.txt b/Documentation/devicetree/bindings/spi/spi-img-spfi.txt
index e02fbf1..494db60 100644
--- a/Documentation/devicetree/bindings/spi/spi-img-spfi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-img-spfi.txt
@@ -21,6 +21,7 @@
 Optional properties:
 - img,supports-quad-mode: Should be set if the interface supports quad mode
   SPI transfers.
+- spfi-max-frequency: Maximum speed supported by the spfi block.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
new file mode 100644
index 0000000..dcefc43
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
@@ -0,0 +1,51 @@
+Binding for MTK SPI controller
+
+Required properties:
+- compatible: should be one of the following.
+    - mediatek,mt8173-spi: for mt8173 platforms
+    - mediatek,mt8135-spi: for mt8135 platforms
+    - mediatek,mt6589-spi: for mt6589 platforms
+
+- #address-cells: should be 1.
+
+- #size-cells: should be 0.
+
+- reg: Address and length of the register set for the device
+
+- interrupts: Should contain spi interrupt
+
+- clocks: phandles to input clocks.
+  The first should be <&topckgen CLK_TOP_SPI_SEL>.
+  The second should be one of the following.
+   -  <&clk26m>: specify parent clock 26MHZ.
+   -  <&topckgen CLK_TOP_SYSPLL3_D2>: specify parent clock 109MHZ.
+				      It's the default one.
+   -  <&topckgen CLK_TOP_SYSPLL4_D2>: specify parent clock 78MHZ.
+   -  <&topckgen CLK_TOP_UNIVPLL2_D4>: specify parent clock 104MHZ.
+   -  <&topckgen CLK_TOP_UNIVPLL1_D8>: specify parent clock 78MHZ.
+
+- clock-names: shall be "spi-clk" for the controller clock, and
+  "parent-clk" for the parent clock.
+
+Optional properties:
+- mediatek,pad-select: specify which pins group(ck/mi/mo/cs) spi
+  controller used, this value should be 0~3, only required for MT8173.
+    0: specify GPIO69,70,71,72 for spi pins.
+    1: specify GPIO102,103,104,105 for spi pins.
+    2: specify GPIO128,129,130,131 for spi pins.
+    3: specify GPIO5,6,7,8 for spi pins.
+
+Example:
+
+- SoC Specific Portion:
+spi: spi@1100a000 {
+	compatible = "mediatek,mt8173-spi";
+	#address-cells = <1>;
+	#size-cells = <0>;
+	reg = <0 0x1100a000 0 0x1000>;
+	interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>;
+	clocks = <&topckgen CLK_TOP_SPI_SEL>, <&topckgen CLK_TOP_SYSPLL3_D2>;
+	clock-names = "spi-clk", "parent-clk";
+	mediatek,pad-select = <0>;
+	status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-xlp.txt b/Documentation/devicetree/bindings/spi/spi-xlp.txt
new file mode 100644
index 0000000..40e82d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-xlp.txt
@@ -0,0 +1,39 @@
+SPI Master controller for Netlogic XLP MIPS64 SOCs
+==================================================
+
+Currently this SPI controller driver is supported for the following
+Netlogic XLP SoCs:
+	XLP832, XLP316, XLP208, XLP980, XLP532
+
+Required properties:
+- compatible		: Should be "netlogic,xlp832-spi".
+- #address-cells	: Number of cells required to define a chip select address
+			  on the SPI bus.
+- #size-cells		: Should be zero.
+- reg			: Should contain register location and length.
+- clocks		: Phandle of the spi clock
+- interrupts		: Interrupt number used by this controller.
+- interrupt-parent	: Phandle of the parent interrupt controller.
+
+SPI slave nodes must be children of the SPI master node and can contain
+properties described in Documentation/devicetree/bindings/spi/spi-bus.txt.
+
+Example:
+
+	spi: xlp_spi@3a100 {
+		compatible = "netlogic,xlp832-spi";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		reg = <0 0x3a100 0x100>;
+		clocks = <&spi_clk>;
+		interrupts = <34>;
+		interrupt-parent = <&pic>;
+
+		spi_nor@1 {
+			compatible = "spansion,s25sl12801";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <1>;	/* Chip Select */
+			spi-max-frequency = <40000000>;
+		};
+};
diff --git a/Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt b/Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt
index 3075377..555fb11 100644
--- a/Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt
+++ b/Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt
@@ -1,4 +1,4 @@
-* Freescale i.MX28 LRADC device driver
+* Freescale MXS LRADC device driver
 
 Required properties:
 - compatible: Should be "fsl,imx23-lradc" for i.MX23 SoC and "fsl,imx28-lradc"
diff --git a/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.txt b/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.txt
new file mode 100644
index 0000000..7afce80
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/img,pistachio-gptimer.txt
@@ -0,0 +1,28 @@
+* Pistachio general-purpose timer based clocksource
+
+Required properties:
+ - compatible: "img,pistachio-gptimer".
+ - reg: Address range of the timer registers.
+ - interrupts: An interrupt for each of the four timers
+ - clocks: Should contain a clock specifier for each entry in clock-names
+ - clock-names: Should contain the following entries:
+                "sys", interface clock
+                "slow", slow counter clock
+                "fast", fast counter clock
+ - img,cr-periph: Must contain a phandle to the peripheral control
+		  syscon node.
+
+Example:
+	timer: timer@18102000 {
+		compatible = "img,pistachio-gptimer";
+		reg = <0x18102000 0x100>;
+		interrupts = <GIC_SHARED 60 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SHARED 61 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SHARED 62 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SHARED 63 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&clk_periph PERIPH_CLK_COUNTER_FAST>,
+		         <&clk_periph PERIPH_CLK_COUNTER_SLOW>,
+			 <&cr_periph SYS_CLK_TIMER>;
+		clock-names = "fast", "slow", "sys";
+		img,cr-periph = <&cr_periph>;
+	};
diff --git a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
index 7c4408f..53a3029 100644
--- a/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
+++ b/Documentation/devicetree/bindings/timer/mediatek,mtk-timer.txt
@@ -2,7 +2,11 @@
 ---------------------------------------
 
 Required properties:
-- compatible: Should be "mediatek,mt6577-timer"
+- compatible should contain:
+	* "mediatek,mt6589-timer" for MT6589 compatible timers
+	* "mediatek,mt6580-timer" for MT6580 compatible timers
+	* "mediatek,mt6577-timer" for all compatible timers (MT6589, MT6580,
+		MT6577)
 - reg: Should contain location and length for timers register.
 - clocks: Clocks driving the timer hardware. This list should include two
 	clocks. The order is system clock and as second clock the RTC clock.
diff --git a/Documentation/devicetree/bindings/timer/st,stih407-lpc b/Documentation/devicetree/bindings/timer/st,stih407-lpc
new file mode 100644
index 0000000..72acb48
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/st,stih407-lpc
@@ -0,0 +1,28 @@
+STMicroelectronics Low Power Controller (LPC) - Clocksource
+===========================================================
+
+LPC currently supports Watchdog OR Real Time Clock OR Clocksource
+functionality.
+
+[See: ../watchdog/st_lpc_wdt.txt for Watchdog options]
+[See: ../rtc/rtc-st-lpc.txt for RTC options]
+
+Required properties
+
+- compatible   : Must be: "st,stih407-lpc"
+- reg          : LPC registers base address + size
+- interrupts   : LPC interrupt line number and associated flags
+- clocks       : Clock used by LPC device (See: ../clock/clock-bindings.txt)
+- st,lpc-mode  : The LPC can run either one of three modes:
+                  ST_LPC_MODE_RTC    [0]
+                  ST_LPC_MODE_WDT    [1]
+                  ST_LPC_MODE_CLKSRC [2]
+		 One (and only one) mode must be selected.
+
+Example:
+       lpc@fde05000 {
+               compatible      = "st,stih407-lpc";
+               reg             = <0xfde05000 0x1000>;
+               clocks          = <&clk_s_d3_flexgen CLK_LPC_0>;
+               st,lpc-mode     = <ST_LPC_MODE_CLKSRC>;
+       };
diff --git a/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt
new file mode 100644
index 0000000..862cd7c
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/allwinner,sun4i-a10-musb.txt
@@ -0,0 +1,29 @@
+Allwinner sun4i A10 musb DRC/OTG controller
+-------------------------------------------
+
+Required properties:
+ - compatible      : "allwinner,sun4i-a10-musb", "allwinner,sun6i-a31-musb"
+                     or "allwinner,sun8i-a33-musb"
+ - reg             : mmio address range of the musb controller
+ - clocks          : clock specifier for the musb controller ahb gate clock
+ - reset           : reset specifier for the ahb reset (A31 and newer only)
+ - interrupts      : interrupt to which the musb controller is connected
+ - interrupt-names : must be "mc"
+ - phys            : phy specifier for the otg phy
+ - phy-names       : must be "usb"
+ - dr_mode         : Dual-Role mode must be "host" or "otg"
+ - extcon          : extcon specifier for the otg phy
+
+Example:
+
+	usb_otg: usb@01c13000 {
+		compatible = "allwinner,sun4i-a10-musb";
+		reg = <0x01c13000 0x0400>;
+		clocks = <&ahb_gates 0>;
+		interrupts = <38>;
+		interrupt-names = "mc";
+		phys = <&usbphy 0>;
+		phy-names = "usb";
+		extcon = <&usbphy 0>;
+		status = "disabled";
+	};
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
index 553e2fa..d71ef07 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
@@ -30,6 +30,21 @@
   argument that indicate usb controller index
 - disable-over-current: (FSL only) disable over current detect
 - external-vbus-divider: (FSL only) enables off-chip resistor divider for Vbus
+- itc-setting: interrupt threshold control register control, the setting
+  should be aligned with ITC bits at register USBCMD.
+- ahb-burst-config: it is vendor dependent, the required value should be
+  aligned with AHBBRST at SBUSCFG, the range is from 0x0 to 0x7. This
+  property is used to change AHB burst configuration, check the chipidea
+  spec for meaning of each value. If this property is not existed, it
+  will use the reset value.
+- tx-burst-size-dword: it is vendor dependent, the tx burst size in dword
+  (4 bytes), This register represents the maximum length of a the burst
+  in 32-bit words while moving data from system memory to the USB
+  bus, changing this value takes effect only the SBUSCFG.AHBBRST is 0.
+- rx-burst-size-dword: it is vendor dependent, the rx burst size in dword
+  (4 bytes), This register represents the maximum length of a the burst
+  in 32-bit words while moving data from the USB bus to system memory,
+  changing this value takes effect only the SBUSCFG.AHBBRST is 0.
 
 Example:
 
@@ -41,4 +56,9 @@
 		phys = <&usb_phy0>;
 		phy-names = "usb-phy";
 		vbus-supply = <&reg_usb0_vbus>;
+		gadget-itc-setting = <0x4>; /* 4 micro-frames */
+		 /* Incremental burst of unspecified length */
+		ahb-burst-config = <0x0>;
+		tx-burst-size-dword = <0x10>; /* 64 bytes */
+		rx-burst-size-dword = <0x10>;
 	};
diff --git a/Documentation/devicetree/bindings/usb/generic.txt b/Documentation/devicetree/bindings/usb/generic.txt
index 477d5bb..bba8257 100644
--- a/Documentation/devicetree/bindings/usb/generic.txt
+++ b/Documentation/devicetree/bindings/usb/generic.txt
@@ -11,6 +11,19 @@
 			"peripheral" and "otg". In case this attribute isn't
 			passed via DT, USB DRD controllers should default to
 			OTG.
+ - otg-rev: tells usb driver the release number of the OTG and EH supplement
+			with which the device and its descriptors are compliant,
+			in binary-coded decimal (i.e. 2.0 is 0200H). This
+			property is used if any real OTG features(HNP/SRP/ADP)
+			is enabled, if ADP is required, otg-rev should be
+			0x0200 or above.
+ - hnp-disable: tells OTG controllers we want to disable OTG HNP, normally HNP
+			is the basic function of real OTG except you want it
+			to be a srp-capable only B device.
+ - srp-disable: tells OTG controllers we want to disable OTG SRP, SRP is
+			optional for OTG device.
+ - adp-disable: tells OTG controllers we want to disable OTG ADP, ADP is
+			optional for OTG device.
 
 This is an attribute to a USB controller such as:
 
@@ -21,4 +34,6 @@
 	usb-phy = <&usb2_phy>, <&usb3,phy>;
 	maximum-speed = "super-speed";
 	dr_mode = "otg";
+	otg-rev = <0x0200>;
+	adp-disable;
 };
diff --git a/Documentation/devicetree/bindings/usb/msm-hsusb.txt b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
index bd8d9e7..8654a3e 100644
--- a/Documentation/devicetree/bindings/usb/msm-hsusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-hsusb.txt
@@ -52,6 +52,10 @@
 Optional properties:
 - dr_mode:      One of "host", "peripheral" or "otg". Defaults to "otg"
 
+- switch-gpio:  A phandle + gpio-specifier pair. Some boards are using Dual
+                SPDT USB Switch, witch is cotrolled by GPIO to de/multiplex
+                D+/D- USB lines between connectors.
+
 - qcom,phy-init-sequence: PHY configuration sequence values. This is related to Device
                 Mode Eye Diagram test. Start address at which these values will be
                 written is ULPI_EXT_VENDOR_SPECIFIC. Value of -1 is reserved as
diff --git a/Documentation/devicetree/bindings/usb/qcom,usb-8x16-phy.txt b/Documentation/devicetree/bindings/usb/qcom,usb-8x16-phy.txt
new file mode 100644
index 0000000..2cb2168
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/qcom,usb-8x16-phy.txt
@@ -0,0 +1,76 @@
+Qualcomm's APQ8016/MSM8916 USB transceiver controller
+
+- compatible:
+    Usage: required
+    Value type: <string>
+    Definition: Should contain "qcom,usb-8x16-phy".
+
+- reg:
+    Usage: required
+    Value type: <prop-encoded-array>
+    Definition: USB PHY base address and length of the register map
+
+- clocks:
+    Usage: required
+    Value type: <prop-encoded-array>
+    Definition: See clock-bindings.txt section "consumers". List of
+                two clock specifiers for interface and core controller
+                clocks.
+
+- clock-names:
+    Usage: required
+    Value type: <string>
+    Definition: Must contain "iface" and "core" strings.
+
+- vddcx-supply:
+    Usage: required
+    Value type: <phandle>
+    Definition: phandle to the regulator VDCCX supply node.
+
+- v1p8-supply:
+    Usage: required
+    Value type: <phandle>
+    Definition: phandle to the regulator 1.8V supply node.
+
+- v3p3-supply:
+    Usage: required
+    Value type: <phandle>
+    Definition: phandle to the regulator 3.3V supply node.
+
+- resets:
+    Usage: required
+    Value type: <prop-encoded-array>
+    Definition: See reset.txt section "consumers". PHY reset specifier.
+
+- reset-names:
+    Usage: required
+    Value type: <string>
+    Definition: Must contain "phy" string.
+
+- switch-gpio:
+    Usage: optional
+    Value type: <prop-encoded-array>
+    Definition: Some boards are using Dual SPDT USB Switch, witch is
+                controlled by GPIO to de/multiplex D+/D- USB lines
+                between connectors.
+
+Example:
+	usb_phy: phy@78d9000 {
+		compatible = "qcom,usb-8x16-phy";
+		reg = <0x78d9000 0x400>;
+
+		vddcx-supply = <&pm8916_s1_corner>;
+		v1p8-supply = <&pm8916_l7>;
+		v3p3-supply = <&pm8916_l13>;
+
+		clocks = <&gcc GCC_USB_HS_AHB_CLK>,
+			     <&gcc GCC_USB_HS_SYSTEM_CLK>;
+		clock-names = "iface", "core";
+
+		resets = <&gcc GCC_USB2A_PHY_BCR>;
+		reset-names = "phy";
+
+		// D+/D- lines: 1 - Routed to HUB, 0 - Device connector
+		switch-gpio = <&pm8916_gpios 4 GPIO_ACTIVE_HIGH>;
+	};
+
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 76228e3..ac5f0c3 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -46,6 +46,7 @@
 chipspark	ChipSPARK
 chrp	Common Hardware Reference Platform
 chunghwa	Chunghwa Picture Tubes Ltd.
+ciaa	Computadora Industrial Abierta Argentina
 cirrus	Cirrus Logic, Inc.
 cloudengines	Cloud Engines, Inc.
 cnm	Chips&Media, Inc.
@@ -54,6 +55,7 @@
 cosmic	Cosmic Circuits
 crystalfontz	Crystalfontz America, Inc.
 cubietech	Cubietech, Ltd.
+cypress	Cypress Semiconductor Corporation
 dallas	Maxim Integrated Products (formerly Dallas Semiconductor)
 davicom	DAVICOM Semiconductor, Inc.
 delta	Delta Electronics, Inc.
@@ -110,8 +112,10 @@
 innolux	Innolux Corporation
 intel	Intel Corporation
 intercontrol	Inter Control Group
+invensense	InvenSense Inc.
 isee	ISEE 2007 S.L.
 isil	Intersil
+jedec	JEDEC Solid State Technology Association
 karo	Ka-Ro electronics GmbH
 keymile	Keymile GmbH
 kinetic Kinetic Technologies
@@ -135,6 +139,7 @@
 mosaixtech	Mosaix Technologies, Inc.
 moxa	Moxa
 mpl	MPL AG
+msi	Micro-Star International Co. Ltd.
 mti	Imagination Technologies Ltd. (formerly MIPS Technologies Inc.)
 mundoreader	Mundo Reader S.L.
 murata	Murata Manufacturing Co., Ltd.
@@ -144,14 +149,17 @@
 neonode		Neonode Inc.
 netgear	NETGEAR
 netlogic	Broadcom Corporation (formerly NetLogic Microsystems)
+netxeon		Shenzhen Netxeon Technology CO., LTD
 newhaven	Newhaven Display International
 nintendo	Nintendo
 nokia	Nokia
+nuvoton	Nuvoton Technology Corporation
 nvidia	NVIDIA
 nxp	NXP Semiconductors
 okaya	Okaya Electric America, Inc.
 onnn	ON Semiconductor Corp.
 opencores	OpenCores.org
+option	Option NV
 ortustech	Ortus Technology Co., Ltd.
 ovti	OmniVision Technologies
 panasonic	Panasonic Corporation
@@ -183,6 +191,7 @@
 schindler	Schindler
 seagate	Seagate Technology PLC
 semtech	Semtech Corporation
+sharp	Sharp Corporation
 sil	Silicon Image
 silabs	Silicon Laboratories
 siliconmitus	Silicon Mitus, Inc.
diff --git a/Documentation/devicetree/bindings/video/backlight/pm8941-wled.txt b/Documentation/devicetree/bindings/video/backlight/pm8941-wled.txt
new file mode 100644
index 0000000..424f844
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/backlight/pm8941-wled.txt
@@ -0,0 +1,40 @@
+Binding for Qualcomm PM8941 WLED driver
+
+Required properties:
+- compatible: should be "qcom,pm8941-wled"
+- reg: slave address
+
+Optional properties:
+- label: The name of the backlight device
+- qcom,cs-out: bool; enable current sink output
+- qcom,cabc: bool; enable content adaptive backlight control
+- qcom,ext-gen: bool; use externally generated modulator signal to dim
+- qcom,current-limit: mA; per-string current limit; value from 0 to 25
+	default: 20mA
+- qcom,current-boost-limit: mA; boost current limit; one of:
+	105, 385, 525, 805, 980, 1260, 1400, 1680
+	default: 805mA
+- qcom,switching-freq: kHz; switching frequency; one of:
+	600, 640, 685, 738, 800, 872, 960, 1066, 1200, 1371,
+	1600, 1920, 2400, 3200, 4800, 9600,
+	default: 1600kHz
+- qcom,ovp: V; Over-voltage protection limit; one of:
+	27, 29, 32, 35
+	default: 29V
+- qcom,num-strings: #; number of led strings attached; value from 1 to 3
+	default: 2
+
+Example:
+
+pm8941-wled@d800 {
+	compatible = "qcom,pm8941-wled";
+	reg = <0xd800>;
+	label = "backlight";
+
+	qcom,cs-out;
+	qcom,current-limit = <20>;
+	qcom,current-boost-limit = <805>;
+	qcom,switching-freq = <1600>;
+	qcom,ovp = <29>;
+	qcom,num-strings = <2>;
+};
diff --git a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
index a4d8697..86fa6de 100644
--- a/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/atmel-wdt.txt
@@ -6,6 +6,7 @@
 - compatible: must be "atmel,at91sam9260-wdt".
 - reg: physical base address of the controller and length of memory mapped
   region.
+- clocks: phandle to input clock.
 
 Optional properties:
 - timeout-sec: contains the watchdog timeout in seconds.
@@ -39,6 +40,7 @@
 		compatible = "atmel,at91sam9260-wdt";
 		reg = <0xfffffd40 0x10>;
 		interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+		clocks = <&clk32k>;
 		timeout-sec = <15>;
 		atmel,watchdog-type = "hardware";
 		atmel,reset-type = "all";
diff --git a/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt b/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt
index 388c88a..039c5ca 100644
--- a/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/st_lpc_wdt.txt
@@ -1,9 +1,11 @@
 STMicroelectronics Low Power Controller (LPC) - Watchdog
 ========================================================
 
-LPC currently supports Watchdog OR Real Time Clock functionality.
+LPC currently supports Watchdog OR Real Time Clock OR Clocksource
+functionality.
 
 [See: ../rtc/rtc-st-lpc.txt for RTC options]
+[See: ../timer/st,stih407-lpc for Clocksource options]
 
 Required properties
 
@@ -12,9 +14,11 @@
 - reg		: LPC registers base address + size
 - interrupts    : LPC interrupt line number and associated flags
 - clocks	: Clock used by LPC device (See: ../clock/clock-bindings.txt)
-- st,lpc-mode	: The LPC can run either one of two modes ST_LPC_MODE_RTC [0] or
-		  ST_LPC_MODE_WDT [1].  One (and only one) mode must be
-		  selected.
+- st,lpc-mode	: The LPC can run either one of three modes:
+                  ST_LPC_MODE_RTC    [0]
+                  ST_LPC_MODE_WDT    [1]
+                  ST_LPC_MODE_CLKSRC [2]
+		 One (and only one) mode must be selected.
 
 Required properties [watchdog mode]
 
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
index ca67b0f..67d4ce4 100644
--- a/Documentation/dmaengine/provider.txt
+++ b/Documentation/dmaengine/provider.txt
@@ -345,12 +345,29 @@
       that abstracts it away.
 
   * DMA_CTRL_ACK
-    - If set, the transfer can be reused after being completed.
-    - There is a guarantee the transfer won't be freed until it is acked
-      by async_tx_ack().
+    - If clear, the descriptor cannot be reused by provider until the
+      client acknowledges receipt, i.e. has has a chance to establish any
+      dependency chains
+    - This can be acked by invoking async_tx_ack()
+    - If set, does not mean descriptor can be reused
+
+  * DMA_CTRL_REUSE
+    - If set, the descriptor can be reused after being completed. It should
+      not be freed by provider if this flag is set.
+    - The descriptor should be prepared for reuse by invoking
+      dmaengine_desc_set_reuse() which will set DMA_CTRL_REUSE.
+    - dmaengine_desc_set_reuse() will succeed only when channel support
+      reusable descriptor as exhibited by capablities
     - As a consequence, if a device driver wants to skip the dma_map_sg() and
       dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used,
       it can resubmit the transfer right after its completion.
+    - Descriptor can be freed in few ways
+	- Clearing DMA_CTRL_REUSE by invoking dmaengine_desc_clear_reuse()
+	  and submitting for last txn
+	- Explicitly invoking dmaengine_desc_free(), this can succeed only
+	  when DMA_CTRL_REUSE is already set
+	- Terminating the channel
+
 
 General Design Notes
 --------------------
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index c7d49b8..3fa4508 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -93,7 +93,7 @@
 Some people use this successfully for patches.
 
 When composing mail select: Preformat
-  from Format->Heading->Preformatted (Ctrl-7)
+  from Format->Paragraph Style->Preformatted (Ctrl-7)
   or the toolbar
 
 Then use:
diff --git a/Documentation/fault-injection/fault-injection.txt b/Documentation/fault-injection/fault-injection.txt
index 4cf1a2a..415484f 100644
--- a/Documentation/fault-injection/fault-injection.txt
+++ b/Documentation/fault-injection/fault-injection.txt
@@ -15,6 +15,10 @@
 
   injects page allocation failures. (alloc_pages(), get_free_pages(), ...)
 
+o fail_futex
+
+  injects futex deadlock and uaddr fault errors.
+
 o fail_make_request
 
   injects disk IO errors on devices permitted by setting
@@ -113,6 +117,12 @@
 	specifies the minimum page allocation order to be injected
 	failures.
 
+- /sys/kernel/debug/fail_futex/ignore-private:
+
+	Format: { 'Y' | 'N' }
+	default is 'N', setting it to 'Y' will disable failure injections
+	when dealing with private (address space) futexes.
+
 o Boot option
 
 In order to inject faults while debugfs is not available (early boot time),
@@ -121,6 +131,7 @@
 	failslab=
 	fail_page_alloc=
 	fail_make_request=
+	fail_futex=
 	mmc_core.fail_request=<interval>,<probability>,<space>,<times>
 
 How to add new fault injection capability
diff --git a/Documentation/fb/sm712fb.txt b/Documentation/fb/sm712fb.txt
new file mode 100644
index 0000000..c388442
--- /dev/null
+++ b/Documentation/fb/sm712fb.txt
@@ -0,0 +1,31 @@
+What is sm712fb?
+=================
+
+This is a graphics framebuffer driver for Silicon Motion SM712 based processors.
+
+How to use it?
+==============
+
+Switching modes is done using the video=sm712fb:... boot parameter.
+
+If you want, for example, enable a resolution of 1280x1024x24bpp you should
+pass to the kernel this command line: "video=sm712fb:0x31B".
+
+You should not compile-in vesafb.
+
+Currently supported video modes are:
+
+[Graphic modes]
+
+bpp | 640x480  800x600  1024x768  1280x1024
+----+--------------------------------------------
+  8 | 0x301    0x303    0x305    0x307
+ 16 | 0x311    0x314    0x317    0x31A
+ 24 | 0x312    0x315    0x318    0x31B
+
+Missing Features
+================
+(alias TODO list)
+
+	* 2D acceleratrion
+	* dual-head support
diff --git a/Documentation/features/debug/uprobes/arch-support.txt b/Documentation/features/debug/uprobes/arch-support.txt
index 4efe36c..d605c3f 100644
--- a/Documentation/features/debug/uprobes/arch-support.txt
+++ b/Documentation/features/debug/uprobes/arch-support.txt
@@ -22,7 +22,7 @@
     |        m68k: | TODO |
     |       metag: | TODO |
     |  microblaze: | TODO |
-    |        mips: | TODO |
+    |        mips: |  ok  |
     |     mn10300: | TODO |
     |       nios2: | TODO |
     |    openrisc: | TODO |
diff --git a/Documentation/features/seccomp/seccomp-filter/arch-support.txt b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
index bea8009..76d39d6 100644
--- a/Documentation/features/seccomp/seccomp-filter/arch-support.txt
+++ b/Documentation/features/seccomp/seccomp-filter/arch-support.txt
@@ -32,7 +32,7 @@
     |       score: | TODO |
     |          sh: | TODO |
     |       sparc: | TODO |
-    |        tile: | TODO |
+    |        tile: |  ok  |
     |          um: | TODO |
     |   unicore32: | TODO |
     |         x86: |  ok  |
diff --git a/Documentation/filesystems/btrfs.txt b/Documentation/filesystems/btrfs.txt
index d11cc2f..c772b47 100644
--- a/Documentation/filesystems/btrfs.txt
+++ b/Documentation/filesystems/btrfs.txt
@@ -61,7 +61,7 @@
 
 	check_int enables the integrity checker module, which examines all
 	block write requests to ensure on-disk consistency, at a large
-	memory and CPU cost.  
+	memory and CPU cost.
 
 	check_int_data includes extent data in the integrity checks, and
 	implies the check_int option.
@@ -113,7 +113,7 @@
 	Disable/enable debugging option to be more verbose in some ENOSPC conditions.
 
   fatal_errors=<action>
-	Action to take when encountering a fatal error: 
+	Action to take when encountering a fatal error:
 	  "bug" - BUG() on a fatal error.  This is the default.
 	  "panic" - panic() on a fatal error.
 
@@ -132,10 +132,10 @@
 
   max_inline=<bytes>
 	Specify the maximum amount of space, in bytes, that can be inlined in
-	a metadata B-tree leaf.  The value is specified in bytes, optionally 
+	a metadata B-tree leaf.  The value is specified in bytes, optionally
 	with a K, M, or G suffix, case insensitive.  In practice, this value
 	is limited by the root sector size, with some space unavailable due
-	to leaf headers.  For a 4k sectorsize, max inline data is ~3900 bytes.
+	to leaf headers.  For a 4k sector size, max inline data is ~3900 bytes.
 
   metadata_ratio=<value>
 	Specify that 1 metadata chunk should be allocated after every <value>
@@ -170,7 +170,7 @@
 
   recovery
 	Enable autorecovery attempts if a bad tree root is found at mount time.
-	Currently this scans a list of several previous tree roots and tries to 
+	Currently this scans a list of several previous tree roots and tries to
 	use the first readable.
 
   rescan_uuid_tree
@@ -194,7 +194,7 @@
   ssd_spread
 	Options to control ssd allocation schemes.  By default, BTRFS will
 	enable or disable ssd allocation heuristics depending on whether a
-	rotational or nonrotational disk is in use.  The ssd and nossd options
+	rotational or non-rotational disk is in use.  The ssd and nossd options
 	can override this autodetection.
 
 	The ssd_spread mount option attempts to allocate into big chunks
@@ -216,13 +216,13 @@
 	This allows mounting of subvolumes which are not in the root of the mounted
 	filesystem.
 	You can use "btrfs subvolume show " to see the object ID for a subvolume.
-	
+
   thread_pool=<number>
 	The number of worker threads to allocate.  The default number is equal
 	to the number of CPUs + 2, or 8, whichever is smaller.
 
   user_subvol_rm_allowed
-	Allow subvolumes to be deleted by a non-root user. Use with caution. 
+	Allow subvolumes to be deleted by a non-root user. Use with caution.
 
 MAILING LIST
 ============
diff --git a/Documentation/filesystems/debugfs.txt b/Documentation/filesystems/debugfs.txt
index 88ab81c..463f595 100644
--- a/Documentation/filesystems/debugfs.txt
+++ b/Documentation/filesystems/debugfs.txt
@@ -51,6 +51,17 @@
 the return value will be a dentry pointer to the created file, NULL for
 error, or ERR_PTR(-ENODEV) if debugfs support is missing.
 
+Create a file with an initial size, the following function can be used
+instead:
+
+    struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
+				struct dentry *parent, void *data,
+				const struct file_operations *fops,
+				loff_t file_size);
+
+file_size is the initial file size. The other parameters are the same
+as the function debugfs_create_file.
+
 In a number of cases, the creation of a set of file operations is not
 actually necessary; the debugfs code provides a number of helper functions
 for simple situations.  Files containing a single integer value can be
@@ -100,6 +111,14 @@
 N, followed by a newline.  If written to, it will accept either upper- or
 lower-case values, or 1 or 0.  Any other input will be silently ignored.
 
+Also, atomic_t values can be placed in debugfs with:
+
+    struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
+				struct dentry *parent, atomic_t *value)
+
+A read of this file will get atomic_t values, and a write of this file
+will set atomic_t values.
+
 Another option is exporting a block of arbitrary binary data, with
 this structure and function:
 
@@ -147,6 +166,27 @@
 using __stringify, and a number of register names (macros) are actually
 byte offsets over a base for the register block.
 
+If you want to dump an u32 array in debugfs, you can create file with:
+
+    struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
+			struct dentry *parent,
+			u32 *array, u32 elements);
+
+The "array" argument provides data, and the "elements" argument is
+the number of elements in the array. Note: Once array is created its
+size can not be changed.
+
+There is a helper function to create device related seq_file:
+
+   struct dentry *debugfs_create_devm_seqfile(struct device *dev,
+				const char *name,
+				struct dentry *parent,
+				int (*read_fn)(struct seq_file *s,
+					void *data));
+
+The "dev" argument is the device related to this debugfs file, and
+the "read_fn" is a function pointer which to be called to print the
+seq_file content.
 
 There are a couple of other directory-oriented helper functions:
 
diff --git a/Documentation/filesystems/ext2.txt b/Documentation/filesystems/ext2.txt
index b971456..5575539 100644
--- a/Documentation/filesystems/ext2.txt
+++ b/Documentation/filesystems/ext2.txt
@@ -360,8 +360,8 @@
 the time of the crash, then there is no guarantee of consistency for
 the blocks in that transaction so they are discarded (which means any
 filesystem changes they represent are also lost).
-Check Documentation/filesystems/ext3.txt if you want to read more about
-ext3 and journaling.
+Check Documentation/filesystems/ext4.txt if you want to read more about
+ext4 and journaling.
 
 References
 ==========
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 7ed0d17..58758fb 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -6,210 +6,7 @@
 for the 2.2 branch, and ported to 2.4 kernels by Peter Braam, Andreas Dilger,
 Andrew Morton, Alexander Viro, Ted Ts'o and Stephen Tweedie.
 
-Ext3 is the ext2 filesystem enhanced with journalling capabilities.
+Ext3 is the ext2 filesystem enhanced with journalling capabilities. The
+filesystem is a subset of ext4 filesystem so use ext4 driver for accessing
+ext3 filesystems.
 
-Options
-=======
-
-When mounting an ext3 filesystem, the following option are accepted:
-(*) == default
-
-ro			Mount filesystem read only. Note that ext3 will replay
-			the journal (and thus write to the partition) even when
-			mounted "read only". Mount options "ro,noload" can be
-			used to prevent writes to the filesystem.
-
-journal=update		Update the ext3 file system's journal to the current
-			format.
-
-journal=inum		When a journal already exists, this option is ignored.
-			Otherwise, it specifies the number of the inode which
-			will represent the ext3 file system's journal file.
-
-journal_path=path
-journal_dev=devnum	When the external journal device's major/minor numbers
-			have changed, these options allow the user to specify
-			the new journal location.  The journal device is
-			identified through either its new major/minor numbers
-			encoded in devnum, or via a path to the device.
-
-norecovery		Don't load the journal on mounting. Note that this forces
-noload			mount of inconsistent filesystem, which can lead to
-			various problems.
-
-data=journal		All data are committed into the journal prior to being
-			written into the main file system.
-
-data=ordered	(*)	All data are forced directly out to the main file
-			system prior to its metadata being committed to the
-			journal.
-
-data=writeback		Data ordering is not preserved, data may be written
-			into the main file system after its metadata has been
-			committed to the journal.
-
-commit=nrsec	(*)	Ext3 can be told to sync all its data and metadata
-			every 'nrsec' seconds. The default value is 5 seconds.
-			This means that if you lose your power, you will lose
-			as much as the latest 5 seconds of work (your
-			filesystem will not be damaged though, thanks to the
-			journaling).  This default value (or any low value)
-			will hurt performance, but it's good for data-safety.
-			Setting it to 0 will have the same effect as leaving
-			it at the default (5 seconds).
-			Setting it to very large values will improve
-			performance.
-
-barrier=<0|1(*)>	This enables/disables the use of write barriers in
-barrier	(*)		the jbd code.  barrier=0 disables, barrier=1 enables.
-nobarrier		This also requires an IO stack which can support
-			barriers, and if jbd gets an error on a barrier
-			write, it will disable again with a warning.
-			Write barriers enforce proper on-disk ordering
-			of journal commits, making volatile disk write caches
-			safe to use, at some performance penalty.  If
-			your disks are battery-backed in one way or another,
-			disabling barriers may safely improve performance.
-			The mount options "barrier" and "nobarrier" can
-			also be used to enable or disable barriers, for
-			consistency with other ext3 mount options.
-
-user_xattr		Enables Extended User Attributes.  Additionally, you
-			need to have extended attribute support enabled in the
-			kernel configuration (CONFIG_EXT3_FS_XATTR).  See the
-			attr(5) manual page and http://acl.bestbits.at/ to
-			learn more about extended attributes.
-
-nouser_xattr		Disables Extended User Attributes.
-
-acl			Enables POSIX Access Control Lists support.
-			Additionally, you need to have ACL support enabled in
-			the kernel configuration (CONFIG_EXT3_FS_POSIX_ACL).
-			See the acl(5) manual page and http://acl.bestbits.at/
-			for more information.
-
-noacl			This option disables POSIX Access Control List
-			support.
-
-reservation
-
-noreservation
-
-bsddf 		(*)	Make 'df' act like BSD.
-minixdf			Make 'df' act like Minix.
-
-check=none		Don't do extra checking of bitmaps on mount.
-nocheck
-
-debug			Extra debugging information is sent to syslog.
-
-errors=remount-ro	Remount the filesystem read-only on an error.
-errors=continue		Keep going on a filesystem error.
-errors=panic		Panic and halt the machine if an error occurs.
-			(These mount options override the errors behavior
-			specified in the superblock, which can be
-			configured using tune2fs.)
-
-data_err=ignore(*)	Just print an error message if an error occurs
-			in a file data buffer in ordered mode.
-data_err=abort		Abort the journal if an error occurs in a file
-			data buffer in ordered mode.
-
-grpid			Give objects the same group ID as their creator.
-bsdgroups
-
-nogrpid		(*)	New objects have the group ID of their creator.
-sysvgroups
-
-resgid=n		The group ID which may use the reserved blocks.
-
-resuid=n		The user ID which may use the reserved blocks.
-
-sb=n			Use alternate superblock at this location.
-
-quota			These options are ignored by the filesystem. They
-noquota			are used only by quota tools to recognize volumes
-grpquota		where quota should be turned on. See documentation
-usrquota		in the quota-tools package for more details
-			(http://sourceforge.net/projects/linuxquota).
-
-jqfmt=<quota type>	These options tell filesystem details about quota
-usrjquota=<file>	so that quota information can be properly updated
-grpjquota=<file>	during journal replay. They replace the above
-			quota options. See documentation in the quota-tools
-			package for more details
-			(http://sourceforge.net/projects/linuxquota).
-
-Specification
-=============
-Ext3 shares all disk implementation with the ext2 filesystem, and adds
-transactions capabilities to ext2.  Journaling is done by the Journaling Block
-Device layer.
-
-Journaling Block Device layer
------------------------------
-The Journaling Block Device layer (JBD) isn't ext3 specific.  It was designed
-to add journaling capabilities to a block device.  The ext3 filesystem code
-will inform the JBD of modifications it is performing (called a transaction).
-The journal supports the transactions start and stop, and in case of a crash,
-the journal can replay the transactions to quickly put the partition back into
-a consistent state.
-
-Handles represent a single atomic update to a filesystem.  JBD can handle an
-external journal on a block device.
-
-Data Mode
----------
-There are 3 different data modes:
-
-* writeback mode
-In data=writeback mode, ext3 does not journal data at all.  This mode provides
-a similar level of journaling as that of XFS, JFS, and ReiserFS in its default
-mode - metadata journaling.  A crash+recovery can cause incorrect data to
-appear in files which were written shortly before the crash.  This mode will
-typically provide the best ext3 performance.
-
-* ordered mode
-In data=ordered mode, ext3 only officially journals metadata, but it logically
-groups metadata and data blocks into a single unit called a transaction.  When
-it's time to write the new metadata out to disk, the associated data blocks
-are written first.  In general, this mode performs slightly slower than
-writeback but significantly faster than journal mode.
-
-* journal mode
-data=journal mode provides full data and metadata journaling.  All new data is
-written to the journal first, and then to its final location.
-In the event of a crash, the journal can be replayed, bringing both data and
-metadata into a consistent state.  This mode is the slowest except when data
-needs to be read from and written to disk at the same time where it
-outperforms all other modes.
-
-Compatibility
--------------
-
-Ext2 partitions can be easily convert to ext3, with `tune2fs -j <dev>`.
-Ext3 is fully compatible with Ext2.  Ext3 partitions can easily be mounted as
-Ext2.
-
-
-External Tools
-==============
-See manual pages to learn more.
-
-tune2fs: 	create a ext3 journal on a ext2 partition with the -j flag.
-mke2fs: 	create a ext3 partition with the -j flag.
-debugfs: 	ext2 and ext3 file system debugger.
-ext2online:	online (mounted) ext2 and ext3 filesystem resizer
-
-
-References
-==========
-
-kernel source:	<file:fs/ext3/>
-		<file:fs/jbd/>
-
-programs: 	http://e2fsprogs.sourceforge.net/
-		http://ext2resize.sourceforge.net
-
-useful links:	http://www.ibm.com/developerworks/library/l-fs7/index.html
-        http://www.ibm.com/developerworks/library/l-fs8/index.html
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index e9e750e..e2d5105 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -143,7 +143,9 @@
 extent_cache           Enable an extent cache based on rb-tree, it can cache
                        as many as extent which map between contiguous logical
                        address and physical address per inode, resulting in
-                       increasing the cache hit ratio.
+                       increasing the cache hit ratio. Set by default.
+noextent_cache         Diable an extent cache based on rb-tree explicitly, see
+                       the above extent_cache mount option.
 noinline_data          Disable the inline data feature, inline data feature is
                        enabled by default.
 
diff --git a/Documentation/filesystems/sysfs.txt b/Documentation/filesystems/sysfs.txt
index b35a64b..9494afb 100644
--- a/Documentation/filesystems/sysfs.txt
+++ b/Documentation/filesystems/sysfs.txt
@@ -212,7 +212,10 @@
 - show() methods should return the number of bytes printed into the
   buffer. This is the return value of scnprintf().
 
-- show() should always use scnprintf().
+- show() must not use snprintf() when formatting the value to be
+  returned to user space. If you can guarantee that an overflow
+  will never happen you can use sprintf() otherwise you must use
+  scnprintf().
 
 - store() should return the number of bytes used from the buffer. If the
   entire buffer has been used, just return the count argument.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 5eb8456..8c6f07a 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -769,7 +769,7 @@
 	to stall to allow flushers a chance to complete some IO. Ordinarily
 	it can use PageDirty and PageWriteback but some filesystems have
 	more complex state (unstable pages in NFS prevent reclaim) or
-	do not set those flags due to locking problems (jbd). This callback
+	do not set those flags due to locking problems. This callback
 	allows a filesystem to indicate to the VM if a page should be
 	treated as dirty or writeback for the purposes of stalling.
 
diff --git a/Documentation/gpio/00-INDEX b/Documentation/gpio/00-INDEX
index 1de43ae..179beb2 100644
--- a/Documentation/gpio/00-INDEX
+++ b/Documentation/gpio/00-INDEX
@@ -6,6 +6,9 @@
 	- How to obtain and use GPIOs in a driver
 driver.txt
 	- How to write a GPIO driver
+drivers-on-gpio.txt:
+	- Drivers in other subsystems that can use GPIO to provide more
+	  complex functionality.
 board.txt
 	- How to assign GPIOs to a consumer device and a function
 sysfs.txt
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt
index 75542b9..a206639 100644
--- a/Documentation/gpio/consumer.txt
+++ b/Documentation/gpio/consumer.txt
@@ -237,6 +237,39 @@
 should not have to care about the physical line level.
 
 
+The active-low property
+-----------------------
+
+As a driver should not have to care about the physical line level, all of the
+gpiod_set_value_xxx() or gpiod_set_array_value_xxx() functions operate with
+the *logical* value. With this they take the active-low property into account.
+This means that they check whether the GPIO is configured to be active-low,
+and if so, they manipulate the passed value before the physical line level is
+driven.
+
+With this, all the gpiod_set_(array)_value_xxx() functions interpret the
+parameter "value" as "active" ("1") or "inactive" ("0"). The physical line
+level will be driven accordingly.
+
+As an example, if the active-low property for a dedicated GPIO is set, and the
+gpiod_set_(array)_value_xxx() passes "active" ("1"), the physical line level
+will be driven low.
+
+To summarize:
+
+Function (example)               active-low proporty  physical line
+gpiod_set_raw_value(desc, 0);        don't care           low
+gpiod_set_raw_value(desc, 1);        don't care           high
+gpiod_set_value(desc, 0);       default (active-high)     low
+gpiod_set_value(desc, 1);       default (active-high)     high
+gpiod_set_value(desc, 0);             active-low          high
+gpiod_set_value(desc, 1);             active-low          low
+
+Please note again that the set_raw/get_raw functions should be avoided as much
+as possible, especially by drivers which should not care about the actual
+physical line level and worry about the logical value instead.
+
+
 Set multiple GPIO outputs with a single function call
 -----------------------------------------------------
 The following functions set the output values of an array of GPIOs:
diff --git a/Documentation/gpio/drivers-on-gpio.txt b/Documentation/gpio/drivers-on-gpio.txt
new file mode 100644
index 0000000..f612132
--- /dev/null
+++ b/Documentation/gpio/drivers-on-gpio.txt
@@ -0,0 +1,95 @@
+Subsystem drivers using GPIO
+============================
+
+Note that standard kernel drivers exist for common GPIO tasks and will provide
+the right in-kernel and userspace APIs/ABIs for the job, and that these
+drivers can quite easily interconnect with other kernel subsystems using
+hardware descriptions such as device tree or ACPI:
+
+- leds-gpio: drivers/leds/leds-gpio.c will handle LEDs connected to  GPIO
+  lines, giving you the LED sysfs interface
+
+- ledtrig-gpio: drivers/leds/trigger/ledtrig-gpio.c will provide a LED trigger,
+  i.e. a LED will turn on/off in response to a GPIO line going high or low
+  (and that LED may in turn use the leds-gpio as per above).
+
+- gpio-keys: drivers/input/keyboard/gpio_keys.c is used when your GPIO line
+  can generate interrupts in response to a key press. Also supports debounce.
+
+- gpio-keys-polled: drivers/input/keyboard/gpio_keys_polled.c is used when your
+  GPIO line cannot generate interrupts, so it needs to be periodically polled
+  by a timer.
+
+- gpio_mouse: drivers/input/mouse/gpio_mouse.c is used to provide a mouse with
+  up to three buttons by simply using GPIOs and no mouse port. You can cut the
+  mouse cable and connect the wires to GPIO lines or solder a mouse connector
+  to the lines for a more permanent solution of this type.
+
+- gpio-beeper: drivers/input/misc/gpio-beeper.c is used to provide a beep from
+  an external speaker connected to a GPIO line.
+
+- gpio-tilt-polled: drivers/input/misc/gpio_tilt_polled.c provides tilt
+  detection switches using GPIO, which is useful for your homebrewn pinball
+  machine if for nothing else. It can detect different tilt angles of the
+  monitored object.
+
+- extcon-gpio: drivers/extcon/extcon-gpio.c is used when you need to read an
+  external connector status, such as a headset line for an audio driver or an
+  HDMI connector. It will provide a better userspace sysfs interface than GPIO.
+
+- restart-gpio: drivers/power/gpio-restart.c is used to restart/reboot the
+  system by pulling a GPIO line and will register a restart handler so
+  userspace can issue the right system call to restart the system.
+
+- poweroff-gpio: drivers/power/gpio-poweroff.c is used to power the system down
+  by pulling a GPIO line and will register a pm_power_off() callback so that
+  userspace can issue the right system call to power down the system.
+
+- gpio-gate-clock: drivers/clk/clk-gpio-gate.c is used to control a gated clock
+  (off/on) that uses a GPIO, and integrated with the clock subsystem.
+
+- i2c-gpio: drivers/i2c/busses/i2c-gpio.c is used to drive an I2C bus
+  (two wires, SDA and SCL lines) by hammering (bitbang) two GPIO lines. It will
+  appear as any other I2C bus to the system and makes it possible to connect
+  drivers for the I2C devices on the bus like any other I2C bus driver.
+
+- spi_gpio: drivers/spi/spi-gpio.c is used to drive an SPI bus (variable number
+  of wires, atleast SCK and optionally MISO, MOSI and chip select lines) using
+  GPIO hammering (bitbang). It will appear as any other SPI bus on the system
+  and makes it possible to connect drivers for SPI devices on the bus like
+  any other SPI bus driver. For example any MMC/SD card can then be connected
+  to this SPI by using the mmc_spi host from the MMC/SD card subsystem.
+
+- w1-gpio: drivers/w1/masters/w1-gpio.c is used to drive a one-wire bus using
+  a GPIO line, integrating with the W1 subsystem and handling devices on
+  the bus like any other W1 device.
+
+- gpio-fan: drivers/hwmon/gpio-fan.c is used to control a fan for cooling the
+  system, connected to a GPIO line (and optionally a GPIO alarm line),
+  presenting all the right in-kernel and sysfs interfaces to make your system
+  not overheat.
+
+- gpio-regulator: drivers/regulator/gpio-regulator.c is used to control a
+  regulator providing a certain voltage by pulling a GPIO line, integrating
+  with the regulator subsystem and giving you all the right interfaces.
+
+- gpio-wdt: drivers/watchdog/gpio_wdt.c is used to provide a watchdog timer
+  that will periodically "ping" a hardware connected to a GPIO line by toggling
+  it from 1-to-0-to-1. If that hardware does not recieve its "ping"
+  periodically, it will reset the system.
+
+- gpio-nand: drivers/mtd/nand/gpio.c is used to connect a NAND flash chip to
+  a set of simple GPIO lines: RDY, NCE, ALE, CLE, NWP. It interacts with the
+  NAND flash MTD subsystem and provides chip access and partition parsing like
+  any other NAND driving hardware.
+
+Apart from this there are special GPIO drivers in subsystems like MMC/SD to
+read card detect and write protect GPIO lines, and in the TTY serial subsystem
+to emulate MCTRL (modem control) signals CTS/RTS by using two GPIO lines. The
+MTD NOR flash has add-ons for extra GPIO lines too, though the address bus is
+usually connected directly to the flash.
+
+Use those instead of talking directly to the GPIOs using sysfs; they integrate
+with kernel frameworks better than your userspace code could. Needless to say,
+just using the apropriate kernel drivers will simplify and speed up your
+embedded hacking in particular by providing ready-made components.
diff --git a/Documentation/gpio/sysfs.txt b/Documentation/gpio/sysfs.txt
index 535b6a8..0700b55 100644
--- a/Documentation/gpio/sysfs.txt
+++ b/Documentation/gpio/sysfs.txt
@@ -20,11 +20,10 @@
 standard kernels won't know about. And for some tasks, simple userspace
 GPIO drivers could be all that the system really needs.
 
-Note that standard kernel drivers exist for common "LEDs and Buttons"
-GPIO tasks:  "leds-gpio" and "gpio_keys", respectively. Use those
-instead of talking directly to the GPIOs; they integrate with kernel
-frameworks better than your userspace code could.
-
+DO NOT ABUSE SYFS TO CONTROL HARDWARE THAT HAS PROPER KERNEL DRIVERS.
+PLEASE READ THE DOCUMENT NAMED "drivers-on-gpio.txt" IN THIS DOCUMENTATION
+DIRECTORY TO AVOID REINVENTING KERNEL WHEELS IN USERSPACE. I MEAN IT.
+REALLY.
 
 Paths in Sysfs
 --------------
diff --git a/Documentation/hwmon/adm1275 b/Documentation/hwmon/adm1275
index 15b4a20..d697229 100644
--- a/Documentation/hwmon/adm1275
+++ b/Documentation/hwmon/adm1275
@@ -14,6 +14,10 @@
     Prefix: 'adm1276'
     Addresses scanned: -
     Datasheet: www.analog.com/static/imported-files/data_sheets/ADM1276.pdf
+  * Analog Devices ADM1293/ADM1294
+    Prefix: 'adm1293', 'adm1294'
+    Addresses scanned: -
+    Datasheet: http://www.analog.com/media/en/technical-documentation/data-sheets/ADM1293_1294.pdf
 
 Author: Guenter Roeck <linux@roeck-us.net>
 
@@ -22,12 +26,12 @@
 -----------
 
 This driver supports hardware montoring for Analog Devices ADM1075, ADM1275,
-and ADM1276 Hot-Swap Controller and Digital Power Monitor.
+ADM1276, ADM1293, and ADM1294 Hot-Swap Controller and Digital Power Monitors.
 
-ADM1075, ADM1275, and ADM1276 are hot-swap controllers that allow a circuit
-board to be removed from or inserted into a live backplane. They also feature
-current and voltage readback via an integrated 12-bit analog-to-digital
-converter (ADC), accessed using a PMBus interface.
+ADM1075, ADM1275, ADM1276, ADM1293, and ADM1294 are hot-swap controllers that
+allow a circuit board to be removed from or inserted into a live backplane.
+They also feature current and voltage readback via an integrated 12
+bit analog-to-digital converter (ADC), accessed using a PMBus interface.
 
 The driver is a client driver to the core PMBus driver. Please see
 Documentation/hwmon/pmbus for details on PMBus client drivers.
@@ -58,16 +62,16 @@
 The following attributes are supported. Limits are read-write, history reset
 attributes are write-only, all other attributes are read-only.
 
-in1_label		"vin1" or "vout1" depending on chip variant and
-			configuration. On ADM1075, vout1 reports the voltage on
-			the VAUX pin.
-in1_input		Measured voltage.
-in1_min			Minimum Voltage.
-in1_max			Maximum voltage.
-in1_min_alarm		Voltage low alarm.
-in1_max_alarm		Voltage high alarm.
-in1_highest		Historical maximum voltage.
-in1_reset_history	Write any value to reset history.
+inX_label		"vin1" or "vout1" depending on chip variant and
+			configuration. On ADM1075, ADM1293, and ADM1294,
+			vout1 reports the voltage on the VAUX pin.
+inX_input		Measured voltage.
+inX_min			Minimum Voltage.
+inX_max			Maximum voltage.
+inX_min_alarm		Voltage low alarm.
+inX_max_alarm		Voltage high alarm.
+inX_highest		Historical maximum voltage.
+inX_reset_history	Write any value to reset history.
 
 curr1_label		"iout1"
 curr1_input		Measured current.
@@ -86,7 +90,9 @@
 
 power1_label		"pin1"
 power1_input		Input power.
+power1_input_lowest	Lowest observed input power. ADM1293 and ADM1294 only.
+power1_input_highest	Highest observed input power.
 power1_reset_history	Write any value to reset history.
 
-			Power attributes are supported on ADM1075 and ADM1276
-			only.
+			Power attributes are supported on ADM1075, ADM1276,
+			ADM1293, and ADM1294.
diff --git a/Documentation/hwmon/fam15h_power b/Documentation/hwmon/fam15h_power
index 8065481..e2b1b69 100644
--- a/Documentation/hwmon/fam15h_power
+++ b/Documentation/hwmon/fam15h_power
@@ -3,12 +3,13 @@
 
 Supported chips:
 * AMD Family 15h Processors
+* AMD Family 16h Processors
 
   Prefix: 'fam15h_power'
   Addresses scanned: PCI space
   Datasheets:
   BIOS and Kernel Developer's Guide (BKDG) For AMD Family 15h Processors
-    (not yet published)
+  BIOS and Kernel Developer's Guide (BKDG) For AMD Family 16h Processors
 
 Author: Andreas Herrmann <herrmann.der.user@googlemail.com>
 
@@ -16,10 +17,11 @@
 -----------
 
 This driver permits reading of registers providing power information
-of AMD Family 15h processors.
+of AMD Family 15h and 16h processors.
 
-For AMD Family 15h processors the following power values can be
-calculated using different processor northbridge function registers:
+For AMD Family 15h and 16h processors the following power values can
+be calculated using different processor northbridge function
+registers:
 
 * BasePwrWatts: Specifies in watts the maximum amount of power
   consumed by the processor for NB and logic external to the core.
diff --git a/Documentation/hwmon/it87 b/Documentation/hwmon/it87
index e872948..733296d 100644
--- a/Documentation/hwmon/it87
+++ b/Documentation/hwmon/it87
@@ -38,6 +38,10 @@
     Prefix: 'it8728'
     Addresses scanned: from Super I/O config space (8 I/O ports)
     Datasheet: Not publicly available
+  * IT8732F
+    Prefix: 'it8732'
+    Addresses scanned: from Super I/O config space (8 I/O ports)
+    Datasheet: Not publicly available
   * IT8771E
     Prefix: 'it8771'
     Addresses scanned: from Super I/O config space (8 I/O ports)
@@ -111,9 +115,9 @@
 -----------
 
 This driver implements support for the IT8603E, IT8620E, IT8623E, IT8705F,
-IT8712F, IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E,
-IT8771E, IT8772E, IT8781F, IT8782F, IT8783E/F, IT8786E, IT8790E, and SiS950
-chips.
+IT8712F, IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8732F,
+IT8758E, IT8771E, IT8772E, IT8781F, IT8782F, IT8783E/F, IT8786E, IT8790E, and
+SiS950 chips.
 
 These chips are 'Super I/O chips', supporting floppy disks, infrared ports,
 joysticks and other miscellaneous stuff. For hardware monitoring, they
@@ -137,10 +141,10 @@
 have support for 2 additional fans. The additional fans are supported by the
 driver.
 
-The IT8716F, IT8718F, IT8720F, IT8721F/IT8758E, IT8781F, IT8782F, IT8783E/F,
-and late IT8712F and IT8705F also have optional 16-bit tachometer counters
-for fans 1 to 3. This is better (no more fan clock divider mess) but not
-compatible with the older chips and revisions. The 16-bit tachometer mode
+The IT8716F, IT8718F, IT8720F, IT8721F/IT8758E, IT8732F, IT8781F, IT8782F,
+IT8783E/F, and late IT8712F and IT8705F also have optional 16-bit tachometer
+counters for fans 1 to 3. This is better (no more fan clock divider mess) but
+not compatible with the older chips and revisions. The 16-bit tachometer mode
 is enabled by the driver when one of the above chips is detected.
 
 The IT8726F is just bit enhanced IT8716F with additional hardware
@@ -159,6 +163,9 @@
 
 The IT8790E supports up to 3 fans. 16-bit fan mode is always enabled.
 
+The IT8732F supports a closed-loop mode for fan control, but this is not
+currently implemented by the driver.
+
 Temperatures are measured in degrees Celsius. An alarm is triggered once
 when the Overtemperature Shutdown limit is crossed.
 
@@ -173,12 +180,14 @@
 Voltage sensors (also known as IN sensors) report their values in volts. An
 alarm is triggered if the voltage has crossed a programmable minimum or
 maximum limit. Note that minimum in this case always means 'closest to
-zero'; this is important for negative voltage measurements. All voltage
-inputs can measure voltages between 0 and 4.08 volts, with a resolution of
-0.016 volt (except IT8603E, IT8721F/IT8758E and IT8728F: 0.012 volt.) The
-battery voltage in8 does not have limit registers.
+zero'; this is important for negative voltage measurements. On most chips, all
+voltage inputs can measure voltages between 0 and 4.08 volts, with a resolution
+of 0.016 volt.  IT8603E, IT8721F/IT8758E and IT8728F can measure between 0 and
+3.06 volts, with a resolution of 0.012 volt.  IT8732F can measure between 0 and
+2.8 volts with a resolution of 0.0109 volt.  The battery voltage in8 does not
+have limit registers.
 
-On the IT8603E, IT8721F/IT8758E, IT8781F, IT8782F, and IT8783E/F, some
+On the IT8603E, IT8721F/IT8758E, IT8732F, IT8781F, IT8782F, and IT8783E/F, some
 voltage inputs are internal and scaled inside the chip:
 * in3 (optional)
 * in7 (optional for IT8781F, IT8782F, and IT8783E/F)
diff --git a/Documentation/hwmon/ltc2978 b/Documentation/hwmon/ltc2978
index 686c078..9a49d3c 100644
--- a/Documentation/hwmon/ltc2978
+++ b/Documentation/hwmon/ltc2978
@@ -6,6 +6,10 @@
     Prefix: 'ltc2974'
     Addresses scanned: -
     Datasheet: http://www.linear.com/product/ltc2974
+  * Linear Technology LTC2975
+    Prefix: 'ltc2975'
+    Addresses scanned: -
+    Datasheet: http://www.linear.com/product/ltc2975
   * Linear Technology LTC2977
     Prefix: 'ltc2977'
     Addresses scanned: -
@@ -15,14 +19,38 @@
     Addresses scanned: -
     Datasheet: http://www.linear.com/product/ltc2978
     	       http://www.linear.com/product/ltc2978a
+  * Linear Technology LTC2980
+    Prefix: 'ltc2980'
+    Addresses scanned: -
+    Datasheet: http://www.linear.com/product/ltc2980
   * Linear Technology LTC3880
     Prefix: 'ltc3880'
     Addresses scanned: -
     Datasheet: http://www.linear.com/product/ltc3880
+  * Linear Technology LTC3882
+    Prefix: 'ltc3882'
+    Addresses scanned: -
+    Datasheet: http://www.linear.com/product/ltc3882
   * Linear Technology LTC3883
     Prefix: 'ltc3883'
     Addresses scanned: -
     Datasheet: http://www.linear.com/product/ltc3883
+  * Linear Technology LTC3886
+    Prefix: 'ltc3886'
+    Addresses scanned: -
+    Datasheet: http://www.linear.com/product/ltc3886
+  * Linear Technology LTC3887
+    Prefix: 'ltc3887'
+    Addresses scanned: -
+    Datasheet: http://www.linear.com/product/ltc3887
+  * Linear Technology LTM2987
+    Prefix: 'ltm2987'
+    Addresses scanned: -
+    Datasheet: http://www.linear.com/product/ltm2987
+  * Linear Technology LTM4675
+    Prefix: 'ltm4675'
+    Addresses scanned: -
+    Datasheet: http://www.linear.com/product/ltm4675
   * Linear Technology LTM4676
     Prefix: 'ltm4676'
     Addresses scanned: -
@@ -34,11 +62,20 @@
 Description
 -----------
 
-LTC2974 is a quad digital power supply manager. LTC2978 is an octal power supply
-monitor. LTC2977 is a pin compatible replacement for LTC2978. LTC3880 is a dual
-output poly-phase step-down DC/DC controller. LTC3883 is a single phase
-step-down DC/DC controller. LTM4676 is a dual 13A or single 26A uModule
-regulator.
+LTC2974 and LTC2975 are quad digital power supply managers.
+LTC2978 is an octal power supply monitor.
+LTC2977 is a pin compatible replacement for LTC2978.
+LTC2980 is a 16-channel Power System Manager, consisting of two LTC2977
+in a single die. The chip is instantiated and reported as two separate chips
+on two different I2C bus addresses.
+LTC3880, LTC3882, LTC3886, and LTC3887 are dual output poly-phase step-down
+DC/DC controllers.
+LTC3883 is a single phase step-down DC/DC controller.
+LTM2987 is a 16-channel Power System Manager with two LTC2977 plus
+additional components on a single die. The chip is instantiated and reported
+as two separate chips on two different I2C bus addresses.
+LTM4675 is a dual 9A or single 18A μModule regulator
+LTM4676 is a dual 13A or single 26A uModule regulator.
 
 
 Usage Notes
@@ -61,26 +98,32 @@
 in1_input		Measured input voltage.
 in1_min			Minimum input voltage.
 in1_max			Maximum input voltage.
-			LTC2974, LTC2977, and LTC2978 only.
+			LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
+			LTM2987 only.
 in1_lcrit		Critical minimum input voltage.
-			LTC2974, LTC2977, and LTC2978 only.
+			LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
+			LTM2987 only.
 in1_crit		Critical maximum input voltage.
 in1_min_alarm		Input voltage low alarm.
 in1_max_alarm		Input voltage high alarm.
-			LTC2974, LTC2977, and LTC2978 only.
+			LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
+			LTM2987 only.
 in1_lcrit_alarm		Input voltage critical low alarm.
-			LTC2974, LTC2977, and LTC2978 only.
+			LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
+			LTM2987 only.
 in1_crit_alarm		Input voltage critical high alarm.
 in1_lowest		Lowest input voltage.
-			LTC2974, LTC2977, and LTC2978 only.
+			LTC2974, LTC2975, LTC2977, LTC2980, LTC2978, and
+			LTM2987 only.
 in1_highest		Highest input voltage.
 in1_reset_history	Reset input voltage history.
 
 in[N]_label		"vout[1-8]".
-			LTC2974: N=2-5
-			LTC2977: N=2-9
+			LTC2974, LTC2975: N=2-5
+			LTC2977, LTC2980, LTM2987: N=2-9
 			LTC2978: N=2-9
-			LTC3880, LTM4676: N=2-3
+			LTC3880, LTC3882, LTC23886 LTC3887, LTM4675, LTM4676:
+				N=2-3
 			LTC3883: N=2
 in[N]_input		Measured output voltage.
 in[N]_min		Minimum output voltage.
@@ -91,67 +134,78 @@
 in[N]_max_alarm		Output voltage high alarm.
 in[N]_lcrit_alarm	Output voltage critical low alarm.
 in[N]_crit_alarm	Output voltage critical high alarm.
-in[N]_lowest		Lowest output voltage. LTC2974 and LTC2978 only.
+in[N]_lowest		Lowest output voltage. LTC2974, LTC2975,
+			and LTC2978 only.
 in[N]_highest		Highest output voltage.
 in[N]_reset_history	Reset output voltage history.
 
 temp[N]_input		Measured temperature.
-			On LTC2974, temp[1-4] report external temperatures,
-			and temp5 reports the chip temperature.
-			On LTC2977 and LTC2978, only one temperature measurement
-			is supported and reports the chip temperature.
-			On LTC3880 and LTM4676, temp1 and temp2 report external
-			temperatures, and temp3 reports the chip temperature.
+			On LTC2974 and LTC2975, temp[1-4] report external
+			temperatures, and temp5 reports the chip temperature.
+			On LTC2977, LTC2980, LTC2978, and LTM2987, only one
+			temperature measurement is supported and reports
+			the chip temperature.
+			On LTC3880, LTC3882, LTC3887, LTM4675, and LTM4676,
+			temp1 and temp2 report external temperatures, and temp3
+			reports the chip temperature.
 			On LTC3883, temp1 reports an external temperature,
 			and temp2 reports the chip temperature.
-temp[N]_min		Mimimum temperature. LTC2974, LCT2977, and LTC2978 only.
+temp[N]_min		Mimimum temperature. LTC2974, LCT2977, LTM2980, LTC2978,
+			and LTM2987 only.
 temp[N]_max		Maximum temperature.
 temp[N]_lcrit		Critical low temperature.
 temp[N]_crit		Critical high temperature.
 temp[N]_min_alarm	Temperature low alarm.
-			LTC2974, LTC2977, and LTC2978 only.
+			LTC2974, LTC2975, LTC2977, LTM2980, LTC2978, and
+			LTM2987 only.
 temp[N]_max_alarm	Temperature high alarm.
 temp[N]_lcrit_alarm	Temperature critical low alarm.
 temp[N]_crit_alarm	Temperature critical high alarm.
 temp[N]_lowest		Lowest measured temperature.
-			LTC2974, LTC2977, and LTC2978 only.
-			Not supported for chip temperature sensor on LTC2974.
+			LTC2974, LTC2975, LTC2977, LTM2980, LTC2978, and
+			LTM2987 only.
+			Not supported for chip temperature sensor on LTC2974 and
+			LTC2975.
 temp[N]_highest		Highest measured temperature. Not supported for chip
-			temperature sensor on LTC2974.
+			temperature sensor on LTC2974 and LTC2975.
 temp[N]_reset_history	Reset temperature history. Not supported for chip
-			temperature sensor on LTC2974.
+			temperature sensor on LTC2974 and LTC2975.
 
-power1_label		"pin". LTC3883 only.
+power1_label		"pin". LTC3883 and LTC3886 only.
 power1_input		Measured input power.
 
 power[N]_label		"pout[1-4]".
-			LTC2974: N=1-4
-			LTC2977: Not supported
+			LTC2974, LTC2975: N=1-4
+			LTC2977, LTC2980, LTM2987: Not supported
 			LTC2978: Not supported
-			LTC3880, LTM4676: N=1-2
+			LTC3880, LTC3882, LTC3886, LTC3887, LTM4675, LTM4676:
+				N=1-2
 			LTC3883: N=2
 power[N]_input		Measured output power.
 
-curr1_label		"iin". LTC3880, LTC3883, and LTM4676 only.
+curr1_label		"iin". LTC3880, LTC3883, LTC3886, LTC3887, LTM4675,
+			and LTM4676 only.
 curr1_input		Measured input current.
 curr1_max		Maximum input current.
 curr1_max_alarm		Input current high alarm.
-curr1_highest		Highest input current. LTC3883 only.
-curr1_reset_history	Reset input current history. LTC3883 only.
+curr1_highest		Highest input current. LTC3883 and LTC3886 only.
+curr1_reset_history	Reset input current history. LTC3883 and LTC3886 only.
 
 curr[N]_label		"iout[1-4]".
-			LTC2974: N=1-4
-			LTC2977: not supported
+			LTC2974, LTC2975: N=1-4
+			LTC2977, LTC2980, LTM2987: not supported
 			LTC2978: not supported
-			LTC3880, LTM4676: N=2-3
+			LTC3880, LTC3882, LTC3886, LTC3887, LTM4675, LTM4676:
+				N=2-3
 			LTC3883: N=2
 curr[N]_input		Measured output current.
 curr[N]_max		Maximum output current.
 curr[N]_crit		Critical high output current.
-curr[N]_lcrit		Critical low output current. LTC2974 only.
+curr[N]_lcrit		Critical low output current. LTC2974 and LTC2975 only.
 curr[N]_max_alarm	Output current high alarm.
 curr[N]_crit_alarm	Output current critical high alarm.
-curr[N]_lcrit_alarm	Output current critical low alarm. LTC2974 only.
-curr[N]_lowest		Lowest output current. LTC2974 only.
+curr[N]_lcrit_alarm	Output current critical low alarm.
+			LTC2974 and LTC2975 only.
+curr[N]_lowest		Lowest output current. LTC2974 and LTC2975 only.
 curr[N]_highest		Highest output current.
 curr[N]_reset_history	Reset output current history.
diff --git a/Documentation/hwmon/max20751 b/Documentation/hwmon/max20751
new file mode 100644
index 0000000..f9fa25e
--- /dev/null
+++ b/Documentation/hwmon/max20751
@@ -0,0 +1,77 @@
+Kernel driver max20751
+======================
+
+Supported chips:
+  * maxim MAX20751
+    Prefix: 'max20751'
+    Addresses scanned: -
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX20751.pdf
+    Application note: http://pdfserv.maximintegrated.com/en/an/AN5941.pdf
+
+Author: Guenter Roeck <linux@roeck-us.net>
+
+
+Description
+-----------
+
+This driver supports MAX20751 Multiphase Master with PMBus Interface
+and Internal Buck Converter.
+
+The driver is a client driver to the core PMBus driver.
+Please see Documentation/hwmon/pmbus for details on PMBus client drivers.
+
+
+Usage Notes
+-----------
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices for
+details.
+
+
+Platform data support
+---------------------
+
+The driver supports standard PMBus driver platform data.
+
+
+Sysfs entries
+-------------
+
+The following attributes are supported.
+
+in1_label		"vin1"
+in1_input		Measured voltage.
+in1_min			Minimum input voltage.
+in1_max			Maximum input voltage.
+in1_lcrit		Critical minimum input voltage.
+in1_crit		Critical maximum input voltage.
+in1_min_alarm		Input voltage low alarm.
+in1_lcrit_alarm		Input voltage critical low alarm.
+in1_min_alarm		Input voltage low alarm.
+in1_max_alarm		Input voltage high alarm.
+
+in2_label		"vout1"
+in2_input		Measured voltage.
+in2_min			Minimum output voltage.
+in2_max			Maximum output voltage.
+in2_lcrit		Critical minimum output voltage.
+in2_crit		Critical maximum output voltage.
+in2_min_alarm		Output voltage low alarm.
+in2_lcrit_alarm		Output voltage critical low alarm.
+in2_min_alarm		Output voltage low alarm.
+in2_max_alarm		Output voltage high alarm.
+
+curr1_input		Measured output current.
+curr1_label		"iout1"
+curr1_max		Maximum output current.
+curr1_alarm		Current high alarm.
+
+temp1_input		Measured temperature.
+temp1_max		Maximum temperature.
+temp1_crit		Critical high temperature.
+temp1_max_alarm		Chip temperature high alarm.
+temp1_crit_alarm	Chip temperature critical high alarm.
+
+power1_input		Output power.
+power1_label		"pout1"
diff --git a/Documentation/hwmon/nct7802 b/Documentation/hwmon/nct7802
index 2e00f5e..5438deb 100644
--- a/Documentation/hwmon/nct7802
+++ b/Documentation/hwmon/nct7802
@@ -17,8 +17,7 @@
 chip. NCT7802Y supports 6 temperature sensors, 5 voltage sensors, and 3 fan
 speed sensors.
 
-The chip also supports intelligent fan speed control. This functionality is
-not currently supported by the driver.
+Smart Fan™ speed control is available via pwmX_auto_point attributes.
 
 Tested Boards and BIOS Versions
 -------------------------------
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus
index a3557da..b397675 100644
--- a/Documentation/hwmon/pmbus
+++ b/Documentation/hwmon/pmbus
@@ -23,11 +23,15 @@
 	http://www.lineagepower.com/oem/pdf/PDT012A0X.pdf
 	http://www.lineagepower.com/oem/pdf/UDT020A0X.pdf
 	http://www.lineagepower.com/oem/pdf/MDT040A0X.pdf
-  * Texas Instruments TPS40400
-    Prefixes: 'tps40400'
+  * Texas Instruments TPS40400, TPS544B20, TPS544B25, TPS544C20, TPS544C25
+    Prefixes: 'tps40400', 'tps544b20', 'tps544b25', 'tps544c20', 'tps544c25'
     Addresses scanned: -
     Datasheets:
 	http://www.ti.com/lit/gpn/tps40400
+	http://www.ti.com/lit/gpn/tps544b20
+	http://www.ti.com/lit/gpn/tps544b25
+	http://www.ti.com/lit/gpn/tps544c20
+	http://www.ti.com/lit/gpn/tps544c25
   * Generic PMBus devices
     Prefix: 'pmbus'
     Addresses scanned: -
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 611c522..64df08d 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -124,6 +124,8 @@
 'H'	00-7F	linux/hiddev.h		conflict!
 'H'	00-0F	linux/hidraw.h		conflict!
 'H'	01	linux/mei.h		conflict!
+'H'	02	linux/mei.h		conflict!
+'H'	03	linux/mei.h		conflict!
 'H'	00-0F	sound/asound.h		conflict!
 'H'	20-40	sound/asound_fm.h	conflict!
 'H'	80-8F	sound/sfnt_info.h	conflict!
@@ -314,6 +316,7 @@
 0xB3	00	linux/mmc/ioctl.h
 0xC0	00-0F	linux/usb/iowarrior.h
 0xCA	00-0F	uapi/misc/cxl.h
+0xCA	80-8F	uapi/scsi/cxlflash_ioctl.h
 0xCB	00-1F	CBM serial IEC bus	in development:
 					<mailto:michael.klein@puffin.lb.shuttle.de>
 0xCD	01	linux/reiserfs_fs.h
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt
index acbc1a3..78f69cd 100644
--- a/Documentation/kernel-doc-nano-HOWTO.txt
+++ b/Documentation/kernel-doc-nano-HOWTO.txt
@@ -128,7 +128,7 @@
   special place-holders for where the extracted documentation should
   go.
 
-- scripts/basic/docproc.c
+- scripts/docproc.c
 
   This is a program for converting SGML template files into SGML
   files. When a file is referenced it is searched for symbols
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1d6f045..f252928 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -910,6 +910,8 @@
 			Disable PIN 1 of APIC timer
 			Can be useful to work around chipset bugs.
 
+	dis_ucode_ldr	[X86] Disable the microcode loader.
+
 	dma_debug=off	If the kernel is compiled with DMA_API_DEBUG support,
 			this option disables the debugging code at boot.
 
@@ -1315,6 +1317,10 @@
 			     <bus_id>,<clkrate>
 
 	i8042.debug	[HW] Toggle i8042 debug mode
+	i8042.unmask_kbd_data
+			[HW] Enable printing of interrupt data from the KBD port
+			     (disabled by default, and as a pre-condition
+			     requires that i8042.debug=1 be enabled)
 	i8042.direct	[HW] Put keyboard port into non-translated mode
 	i8042.dumbkbd	[HW] Pretend that controller can only read data from
 			     keyboard and cannot control its state
@@ -3135,22 +3141,35 @@
 			in a given burst of a callback-flood test.
 
 	rcutorture.fqs_duration= [KNL]
-			Set duration of force_quiescent_state bursts.
+			Set duration of force_quiescent_state bursts
+			in microseconds.
 
 	rcutorture.fqs_holdoff= [KNL]
-			Set holdoff time within force_quiescent_state bursts.
+			Set holdoff time within force_quiescent_state bursts
+			in microseconds.
 
 	rcutorture.fqs_stutter= [KNL]
-			Set wait time between force_quiescent_state bursts.
+			Set wait time between force_quiescent_state bursts
+			in seconds.
+
+	rcutorture.gp_cond= [KNL]
+			Use conditional/asynchronous update-side
+			primitives, if available.
 
 	rcutorture.gp_exp= [KNL]
-			Use expedited update-side primitives.
+			Use expedited update-side primitives, if available.
 
 	rcutorture.gp_normal= [KNL]
-			Use normal (non-expedited) update-side primitives.
-			If both gp_exp and gp_normal are set, do both.
-			If neither gp_exp nor gp_normal are set, still
-			do both.
+			Use normal (non-expedited) asynchronous
+			update-side primitives, if available.
+
+	rcutorture.gp_sync= [KNL]
+			Use normal (non-expedited) synchronous
+			update-side primitives, if available.  If all
+			of rcutorture.gp_cond=, rcutorture.gp_exp=,
+			rcutorture.gp_normal=, and rcutorture.gp_sync=
+			are zero, rcutorture acts as if is interpreted
+			they are all non-zero.
 
 	rcutorture.n_barrier_cbs= [KNL]
 			Set callbacks/threads for rcu_barrier() testing.
@@ -3177,9 +3196,6 @@
 			Set time (s) between CPU-hotplug operations, or
 			zero to disable CPU-hotplug testing.
 
-	rcutorture.torture_runnable= [BOOT]
-			Start rcutorture running at boot time.
-
 	rcutorture.shuffle_interval= [KNL]
 			Set task-shuffle interval (s).  Shuffling tasks
 			allows some CPUs to go into dyntick-idle mode
@@ -3220,6 +3236,9 @@
 			Test RCU's dyntick-idle handling.  See also the
 			rcutorture.shuffle_interval parameter.
 
+	rcutorture.torture_runnable= [BOOT]
+			Start rcutorture running at boot time.
+
 	rcutorture.torture_type= [KNL]
 			Specify the RCU implementation to test.
 
diff --git a/Documentation/mailbox.txt b/Documentation/mailbox.txt
index 1092ad9..7ed371c 100644
--- a/Documentation/mailbox.txt
+++ b/Documentation/mailbox.txt
@@ -51,8 +51,7 @@
  */
 static void message_from_remote(struct mbox_client *cl, void *mssg)
 {
-	struct demo_client *dc = container_of(mbox_client,
-						struct demo_client, cl);
+	struct demo_client *dc = container_of(cl, struct demo_client, cl);
 	if (dc->async) {
 		if (is_an_ack(mssg)) {
 			/* An ACK to our last sample sent */
@@ -68,8 +67,7 @@
 
 static void sample_sent(struct mbox_client *cl, void *mssg, int r)
 {
-	struct demo_client *dc = container_of(mbox_client,
-						struct demo_client, cl);
+	struct demo_client *dc = container_of(cl, struct demo_client, cl);
 	complete(&dc->c);
 }
 
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 13feb69..2ba8461 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -194,22 +194,22 @@
  (*) On any given CPU, dependent memory accesses will be issued in order, with
      respect to itself.  This means that for:
 
-	ACCESS_ONCE(Q) = P; smp_read_barrier_depends(); D = ACCESS_ONCE(*Q);
+	WRITE_ONCE(Q, P); smp_read_barrier_depends(); D = READ_ONCE(*Q);
 
      the CPU will issue the following memory operations:
 
 	Q = LOAD P, D = LOAD *Q
 
      and always in that order.  On most systems, smp_read_barrier_depends()
-     does nothing, but it is required for DEC Alpha.  The ACCESS_ONCE()
-     is required to prevent compiler mischief.  Please note that you
-     should normally use something like rcu_dereference() instead of
-     open-coding smp_read_barrier_depends().
+     does nothing, but it is required for DEC Alpha.  The READ_ONCE()
+     and WRITE_ONCE() are required to prevent compiler mischief.  Please
+     note that you should normally use something like rcu_dereference()
+     instead of open-coding smp_read_barrier_depends().
 
  (*) Overlapping loads and stores within a particular CPU will appear to be
      ordered within that CPU.  This means that for:
 
-	a = ACCESS_ONCE(*X); ACCESS_ONCE(*X) = b;
+	a = READ_ONCE(*X); WRITE_ONCE(*X, b);
 
      the CPU will only issue the following sequence of memory operations:
 
@@ -217,7 +217,7 @@
 
      And for:
 
-	ACCESS_ONCE(*X) = c; d = ACCESS_ONCE(*X);
+	WRITE_ONCE(*X, c); d = READ_ONCE(*X);
 
      the CPU will only issue:
 
@@ -228,11 +228,11 @@
 
 And there are a number of things that _must_ or _must_not_ be assumed:
 
- (*) It _must_not_ be assumed that the compiler will do what you want with
-     memory references that are not protected by ACCESS_ONCE().  Without
-     ACCESS_ONCE(), the compiler is within its rights to do all sorts
-     of "creative" transformations, which are covered in the Compiler
-     Barrier section.
+ (*) It _must_not_ be assumed that the compiler will do what you want
+     with memory references that are not protected by READ_ONCE() and
+     WRITE_ONCE().  Without them, the compiler is within its rights to
+     do all sorts of "creative" transformations, which are covered in
+     the Compiler Barrier section.
 
  (*) It _must_not_ be assumed that independent loads and stores will be issued
      in the order given.  This means that for:
@@ -520,8 +520,8 @@
 	{ A == 1, B == 2, C = 3, P == &A, Q == &C }
 	B = 4;
 	<write barrier>
-	ACCESS_ONCE(P) = &B
-			      Q = ACCESS_ONCE(P);
+	WRITE_ONCE(P, &B)
+			      Q = READ_ONCE(P);
 			      D = *Q;
 
 There's a clear data dependency here, and it would seem that by the end of the
@@ -547,8 +547,8 @@
 	{ A == 1, B == 2, C = 3, P == &A, Q == &C }
 	B = 4;
 	<write barrier>
-	ACCESS_ONCE(P) = &B
-			      Q = ACCESS_ONCE(P);
+	WRITE_ONCE(P, &B);
+			      Q = READ_ONCE(P);
 			      <data dependency barrier>
 			      D = *Q;
 
@@ -574,8 +574,8 @@
 	{ M[0] == 1, M[1] == 2, M[3] = 3, P == 0, Q == 3 }
 	M[1] = 4;
 	<write barrier>
-	ACCESS_ONCE(P) = 1
-			      Q = ACCESS_ONCE(P);
+	WRITE_ONCE(P, 1);
+			      Q = READ_ONCE(P);
 			      <data dependency barrier>
 			      D = M[Q];
 
@@ -596,10 +596,10 @@
 simply a data dependency barrier to make it work correctly.  Consider the
 following bit of code:
 
-	q = ACCESS_ONCE(a);
+	q = READ_ONCE(a);
 	if (q) {
 		<data dependency barrier>  /* BUG: No data dependency!!! */
-		p = ACCESS_ONCE(b);
+		p = READ_ONCE(b);
 	}
 
 This will not have the desired effect because there is no actual data
@@ -608,10 +608,10 @@
 the load from b as having happened before the load from a.  In such a
 case what's actually required is:
 
-	q = ACCESS_ONCE(a);
+	q = READ_ONCE(a);
 	if (q) {
 		<read barrier>
-		p = ACCESS_ONCE(b);
+		p = READ_ONCE(b);
 	}
 
 However, stores are not speculated.  This means that ordering -is- provided
@@ -619,7 +619,7 @@
 
 	q = READ_ONCE_CTRL(a);
 	if (q) {
-		ACCESS_ONCE(b) = p;
+		WRITE_ONCE(b, p);
 	}
 
 Control dependencies pair normally with other types of barriers.  That
@@ -647,11 +647,11 @@
 	q = READ_ONCE_CTRL(a);
 	if (q) {
 		barrier();
-		ACCESS_ONCE(b) = p;
+		WRITE_ONCE(b, p);
 		do_something();
 	} else {
 		barrier();
-		ACCESS_ONCE(b) = p;
+		WRITE_ONCE(b, p);
 		do_something_else();
 	}
 
@@ -660,12 +660,12 @@
 
 	q = READ_ONCE_CTRL(a);
 	barrier();
-	ACCESS_ONCE(b) = p;  /* BUG: No ordering vs. load from a!!! */
+	WRITE_ONCE(b, p);  /* BUG: No ordering vs. load from a!!! */
 	if (q) {
-		/* ACCESS_ONCE(b) = p; -- moved up, BUG!!! */
+		/* WRITE_ONCE(b, p); -- moved up, BUG!!! */
 		do_something();
 	} else {
-		/* ACCESS_ONCE(b) = p; -- moved up, BUG!!! */
+		/* WRITE_ONCE(b, p); -- moved up, BUG!!! */
 		do_something_else();
 	}
 
@@ -676,7 +676,7 @@
 Therefore, if you need ordering in this example, you need explicit
 memory barriers, for example, smp_store_release():
 
-	q = ACCESS_ONCE(a);
+	q = READ_ONCE(a);
 	if (q) {
 		smp_store_release(&b, p);
 		do_something();
@@ -690,10 +690,10 @@
 
 	q = READ_ONCE_CTRL(a);
 	if (q) {
-		ACCESS_ONCE(b) = p;
+		WRITE_ONCE(b, p);
 		do_something();
 	} else {
-		ACCESS_ONCE(b) = r;
+		WRITE_ONCE(b, r);
 		do_something_else();
 	}
 
@@ -706,10 +706,10 @@
 
 	q = READ_ONCE_CTRL(a);
 	if (q % MAX) {
-		ACCESS_ONCE(b) = p;
+		WRITE_ONCE(b, p);
 		do_something();
 	} else {
-		ACCESS_ONCE(b) = r;
+		WRITE_ONCE(b, r);
 		do_something_else();
 	}
 
@@ -718,7 +718,7 @@
 transform the above code into the following:
 
 	q = READ_ONCE_CTRL(a);
-	ACCESS_ONCE(b) = p;
+	WRITE_ONCE(b, p);
 	do_something_else();
 
 Given this transformation, the CPU is not required to respect the ordering
@@ -731,10 +731,10 @@
 	q = READ_ONCE_CTRL(a);
 	BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */
 	if (q % MAX) {
-		ACCESS_ONCE(b) = p;
+		WRITE_ONCE(b, p);
 		do_something();
 	} else {
-		ACCESS_ONCE(b) = r;
+		WRITE_ONCE(b, r);
 		do_something_else();
 	}
 
@@ -746,18 +746,18 @@
 evaluation.  Consider this example:
 
 	q = READ_ONCE_CTRL(a);
-	if (a || 1 > 0)
-		ACCESS_ONCE(b) = 1;
+	if (q || 1 > 0)
+		WRITE_ONCE(b, 1);
 
 Because the first condition cannot fault and the second condition is
 always true, the compiler can transform this example as following,
 defeating control dependency:
 
 	q = READ_ONCE_CTRL(a);
-	ACCESS_ONCE(b) = 1;
+	WRITE_ONCE(b, 1);
 
 This example underscores the need to ensure that the compiler cannot
-out-guess your code.  More generally, although ACCESS_ONCE() does force
+out-guess your code.  More generally, although READ_ONCE() does force
 the compiler to actually emit code for a given load, it does not force
 the compiler to use the results.
 
@@ -769,7 +769,7 @@
 	=======================   =======================
 	r1 = READ_ONCE_CTRL(x);   r2 = READ_ONCE_CTRL(y);
 	if (r1 > 0)               if (r2 > 0)
-	  ACCESS_ONCE(y) = 1;       ACCESS_ONCE(x) = 1;
+	  WRITE_ONCE(y, 1);         WRITE_ONCE(x, 1);
 
 	assert(!(r1 == 1 && r2 == 1));
 
@@ -779,7 +779,7 @@
 
 	CPU 2
 	=====================
-	ACCESS_ONCE(x) = 2;
+	WRITE_ONCE(x, 2);
 
 	assert(!(r1 == 2 && r2 == 1 && x == 2)); /* FAILS!!! */
 
@@ -798,8 +798,7 @@
 
   (*) Control dependencies must be headed by READ_ONCE_CTRL().
       Or, as a much less preferable alternative, interpose
-      be headed by READ_ONCE() or an ACCESS_ONCE() read and must
-      have smp_read_barrier_depends() between this read and the
+      smp_read_barrier_depends() between a READ_ONCE() and the
       control-dependent write.
 
   (*) Control dependencies can order prior loads against later stores.
@@ -815,15 +814,16 @@
 
   (*) Control dependencies require at least one run-time conditional
       between the prior load and the subsequent store, and this
-      conditional must involve the prior load.  If the compiler
-      is able to optimize the conditional away, it will have also
-      optimized away the ordering.  Careful use of ACCESS_ONCE() can
-      help to preserve the needed conditional.
+      conditional must involve the prior load.  If the compiler is able
+      to optimize the conditional away, it will have also optimized
+      away the ordering.  Careful use of READ_ONCE_CTRL() READ_ONCE(),
+      and WRITE_ONCE() can help to preserve the needed conditional.
 
   (*) Control dependencies require that the compiler avoid reordering the
-      dependency into nonexistence.  Careful use of ACCESS_ONCE() or
-      barrier() can help to preserve your control dependency.  Please
-      see the Compiler Barrier section for more information.
+      dependency into nonexistence.  Careful use of READ_ONCE_CTRL()
+      or smp_read_barrier_depends() can help to preserve your control
+      dependency.  Please see the Compiler Barrier section for more
+      information.
 
   (*) Control dependencies pair normally with other types of barriers.
 
@@ -848,11 +848,11 @@
 
 	CPU 1		      CPU 2
 	===============	      ===============
-	ACCESS_ONCE(a) = 1;
+	WRITE_ONCE(a, 1);
 	<write barrier>
-	ACCESS_ONCE(b) = 2;   x = ACCESS_ONCE(b);
+	WRITE_ONCE(b, 2);     x = READ_ONCE(b);
 			      <read barrier>
-			      y = ACCESS_ONCE(a);
+			      y = READ_ONCE(a);
 
 Or:
 
@@ -860,7 +860,7 @@
 	===============	      ===============================
 	a = 1;
 	<write barrier>
-	ACCESS_ONCE(b) = &a;  x = ACCESS_ONCE(b);
+	WRITE_ONCE(b, &a);    x = READ_ONCE(b);
 			      <data dependency barrier>
 			      y = *x;
 
@@ -868,11 +868,11 @@
 
 	CPU 1		      CPU 2
 	===============	      ===============================
-	r1 = ACCESS_ONCE(y);
+	r1 = READ_ONCE(y);
 	<general barrier>
-	ACCESS_ONCE(y) = 1;   if (r2 = ACCESS_ONCE(x)) {
+	WRITE_ONCE(y, 1);     if (r2 = READ_ONCE(x)) {
 			         <implicit control dependency>
-			         ACCESS_ONCE(y) = 1;
+			         WRITE_ONCE(y, 1);
 			      }
 
 	assert(r1 == 0 || r2 == 0);
@@ -886,11 +886,11 @@
 
 	CPU 1                               CPU 2
 	===================                 ===================
-	ACCESS_ONCE(a) = 1;  }----   --->{  v = ACCESS_ONCE(c);
-	ACCESS_ONCE(b) = 2;  }    \ /    {  w = ACCESS_ONCE(d);
+	WRITE_ONCE(a, 1);    }----   --->{  v = READ_ONCE(c);
+	WRITE_ONCE(b, 2);    }    \ /    {  w = READ_ONCE(d);
 	<write barrier>            \        <read barrier>
-	ACCESS_ONCE(c) = 3;  }    / \    {  x = ACCESS_ONCE(a);
-	ACCESS_ONCE(d) = 4;  }----   --->{  y = ACCESS_ONCE(b);
+	WRITE_ONCE(c, 3);    }    / \    {  x = READ_ONCE(a);
+	WRITE_ONCE(d, 4);    }----   --->{  y = READ_ONCE(b);
 
 
 EXAMPLES OF MEMORY BARRIER SEQUENCES
@@ -1340,10 +1340,10 @@
 
 	barrier();
 
-This is a general barrier -- there are no read-read or write-write variants
-of barrier().  However, ACCESS_ONCE() can be thought of as a weak form
-for barrier() that affects only the specific accesses flagged by the
-ACCESS_ONCE().
+This is a general barrier -- there are no read-read or write-write
+variants of barrier().  However, READ_ONCE() and WRITE_ONCE() can be
+thought of as weak forms of barrier() that affect only the specific
+accesses flagged by the READ_ONCE() or WRITE_ONCE().
 
 The barrier() function has the following effects:
 
@@ -1355,9 +1355,10 @@
  (*) Within a loop, forces the compiler to load the variables used
      in that loop's conditional on each pass through that loop.
 
-The ACCESS_ONCE() function can prevent any number of optimizations that,
-while perfectly safe in single-threaded code, can be fatal in concurrent
-code.  Here are some examples of these sorts of optimizations:
+The READ_ONCE() and WRITE_ONCE() functions can prevent any number of
+optimizations that, while perfectly safe in single-threaded code, can
+be fatal in concurrent code.  Here are some examples of these sorts
+of optimizations:
 
  (*) The compiler is within its rights to reorder loads and stores
      to the same variable, and in some cases, the CPU is within its
@@ -1370,11 +1371,11 @@
      Might result in an older value of x stored in a[1] than in a[0].
      Prevent both the compiler and the CPU from doing this as follows:
 
-	a[0] = ACCESS_ONCE(x);
-	a[1] = ACCESS_ONCE(x);
+	a[0] = READ_ONCE(x);
+	a[1] = READ_ONCE(x);
 
-     In short, ACCESS_ONCE() provides cache coherence for accesses from
-     multiple CPUs to a single variable.
+     In short, READ_ONCE() and WRITE_ONCE() provide cache coherence for
+     accesses from multiple CPUs to a single variable.
 
  (*) The compiler is within its rights to merge successive loads from
      the same variable.  Such merging can cause the compiler to "optimize"
@@ -1391,9 +1392,9 @@
 		for (;;)
 			do_something_with(tmp);
 
-     Use ACCESS_ONCE() to prevent the compiler from doing this to you:
+     Use READ_ONCE() to prevent the compiler from doing this to you:
 
-	while (tmp = ACCESS_ONCE(a))
+	while (tmp = READ_ONCE(a))
 		do_something_with(tmp);
 
  (*) The compiler is within its rights to reload a variable, for example,
@@ -1415,9 +1416,9 @@
      a was modified by some other CPU between the "while" statement and
      the call to do_something_with().
 
-     Again, use ACCESS_ONCE() to prevent the compiler from doing this:
+     Again, use READ_ONCE() to prevent the compiler from doing this:
 
-	while (tmp = ACCESS_ONCE(a))
+	while (tmp = READ_ONCE(a))
 		do_something_with(tmp);
 
      Note that if the compiler runs short of registers, it might save
@@ -1437,21 +1438,21 @@
 
 	do { } while (0);
 
-     This transformation is a win for single-threaded code because it gets
-     rid of a load and a branch.  The problem is that the compiler will
-     carry out its proof assuming that the current CPU is the only one
-     updating variable 'a'.  If variable 'a' is shared, then the compiler's
-     proof will be erroneous.  Use ACCESS_ONCE() to tell the compiler
-     that it doesn't know as much as it thinks it does:
+     This transformation is a win for single-threaded code because it
+     gets rid of a load and a branch.  The problem is that the compiler
+     will carry out its proof assuming that the current CPU is the only
+     one updating variable 'a'.  If variable 'a' is shared, then the
+     compiler's proof will be erroneous.  Use READ_ONCE() to tell the
+     compiler that it doesn't know as much as it thinks it does:
 
-	while (tmp = ACCESS_ONCE(a))
+	while (tmp = READ_ONCE(a))
 		do_something_with(tmp);
 
      But please note that the compiler is also closely watching what you
-     do with the value after the ACCESS_ONCE().  For example, suppose you
+     do with the value after the READ_ONCE().  For example, suppose you
      do the following and MAX is a preprocessor macro with the value 1:
 
-	while ((tmp = ACCESS_ONCE(a)) % MAX)
+	while ((tmp = READ_ONCE(a)) % MAX)
 		do_something_with(tmp);
 
      Then the compiler knows that the result of the "%" operator applied
@@ -1475,12 +1476,12 @@
      surprise if some other CPU might have stored to variable 'a' in the
      meantime.
 
-     Use ACCESS_ONCE() to prevent the compiler from making this sort of
+     Use WRITE_ONCE() to prevent the compiler from making this sort of
      wrong guess:
 
-	ACCESS_ONCE(a) = 0;
+	WRITE_ONCE(a, 0);
 	/* Code that does not store to variable a. */
-	ACCESS_ONCE(a) = 0;
+	WRITE_ONCE(a, 0);
 
  (*) The compiler is within its rights to reorder memory accesses unless
      you tell it not to.  For example, consider the following interaction
@@ -1509,40 +1510,43 @@
 	}
 
      If the interrupt occurs between these two statement, then
-     interrupt_handler() might be passed a garbled msg.  Use ACCESS_ONCE()
+     interrupt_handler() might be passed a garbled msg.  Use WRITE_ONCE()
      to prevent this as follows:
 
 	void process_level(void)
 	{
-		ACCESS_ONCE(msg) = get_message();
-		ACCESS_ONCE(flag) = true;
+		WRITE_ONCE(msg, get_message());
+		WRITE_ONCE(flag, true);
 	}
 
 	void interrupt_handler(void)
 	{
-		if (ACCESS_ONCE(flag))
-			process_message(ACCESS_ONCE(msg));
+		if (READ_ONCE(flag))
+			process_message(READ_ONCE(msg));
 	}
 
-     Note that the ACCESS_ONCE() wrappers in interrupt_handler()
-     are needed if this interrupt handler can itself be interrupted
-     by something that also accesses 'flag' and 'msg', for example,
-     a nested interrupt or an NMI.  Otherwise, ACCESS_ONCE() is not
-     needed in interrupt_handler() other than for documentation purposes.
-     (Note also that nested interrupts do not typically occur in modern
-     Linux kernels, in fact, if an interrupt handler returns with
-     interrupts enabled, you will get a WARN_ONCE() splat.)
+     Note that the READ_ONCE() and WRITE_ONCE() wrappers in
+     interrupt_handler() are needed if this interrupt handler can itself
+     be interrupted by something that also accesses 'flag' and 'msg',
+     for example, a nested interrupt or an NMI.  Otherwise, READ_ONCE()
+     and WRITE_ONCE() are not needed in interrupt_handler() other than
+     for documentation purposes.  (Note also that nested interrupts
+     do not typically occur in modern Linux kernels, in fact, if an
+     interrupt handler returns with interrupts enabled, you will get a
+     WARN_ONCE() splat.)
 
-     You should assume that the compiler can move ACCESS_ONCE() past
-     code not containing ACCESS_ONCE(), barrier(), or similar primitives.
+     You should assume that the compiler can move READ_ONCE() and
+     WRITE_ONCE() past code not containing READ_ONCE(), WRITE_ONCE(),
+     barrier(), or similar primitives.
 
-     This effect could also be achieved using barrier(), but ACCESS_ONCE()
-     is more selective:  With ACCESS_ONCE(), the compiler need only forget
-     the contents of the indicated memory locations, while with barrier()
-     the compiler must discard the value of all memory locations that
-     it has currented cached in any machine registers.  Of course,
-     the compiler must also respect the order in which the ACCESS_ONCE()s
-     occur, though the CPU of course need not do so.
+     This effect could also be achieved using barrier(), but READ_ONCE()
+     and WRITE_ONCE() are more selective:  With READ_ONCE() and
+     WRITE_ONCE(), the compiler need only forget the contents of the
+     indicated memory locations, while with barrier() the compiler must
+     discard the value of all memory locations that it has currented
+     cached in any machine registers.  Of course, the compiler must also
+     respect the order in which the READ_ONCE()s and WRITE_ONCE()s occur,
+     though the CPU of course need not do so.
 
  (*) The compiler is within its rights to invent stores to a variable,
      as in the following example:
@@ -1562,16 +1566,16 @@
      a branch.  Unfortunately, in concurrent code, this optimization
      could cause some other CPU to see a spurious value of 42 -- even
      if variable 'a' was never zero -- when loading variable 'b'.
-     Use ACCESS_ONCE() to prevent this as follows:
+     Use WRITE_ONCE() to prevent this as follows:
 
 	if (a)
-		ACCESS_ONCE(b) = a;
+		WRITE_ONCE(b, a);
 	else
-		ACCESS_ONCE(b) = 42;
+		WRITE_ONCE(b, 42);
 
      The compiler can also invent loads.  These are usually less
      damaging, but they can result in cache-line bouncing and thus in
-     poor performance and scalability.  Use ACCESS_ONCE() to prevent
+     poor performance and scalability.  Use READ_ONCE() to prevent
      invented loads.
 
  (*) For aligned memory locations whose size allows them to be accessed
@@ -1590,9 +1594,9 @@
      This optimization can therefore be a win in single-threaded code.
      In fact, a recent bug (since fixed) caused GCC to incorrectly use
      this optimization in a volatile store.  In the absence of such bugs,
-     use of ACCESS_ONCE() prevents store tearing in the following example:
+     use of WRITE_ONCE() prevents store tearing in the following example:
 
-	ACCESS_ONCE(p) = 0x00010002;
+	WRITE_ONCE(p, 0x00010002);
 
      Use of packed structures can also result in load and store tearing,
      as in this example:
@@ -1609,22 +1613,23 @@
 	foo2.b = foo1.b;
 	foo2.c = foo1.c;
 
-     Because there are no ACCESS_ONCE() wrappers and no volatile markings,
-     the compiler would be well within its rights to implement these three
-     assignment statements as a pair of 32-bit loads followed by a pair
-     of 32-bit stores.  This would result in load tearing on 'foo1.b'
-     and store tearing on 'foo2.b'.  ACCESS_ONCE() again prevents tearing
-     in this example:
+     Because there are no READ_ONCE() or WRITE_ONCE() wrappers and no
+     volatile markings, the compiler would be well within its rights to
+     implement these three assignment statements as a pair of 32-bit
+     loads followed by a pair of 32-bit stores.  This would result in
+     load tearing on 'foo1.b' and store tearing on 'foo2.b'.  READ_ONCE()
+     and WRITE_ONCE() again prevent tearing in this example:
 
 	foo2.a = foo1.a;
-	ACCESS_ONCE(foo2.b) = ACCESS_ONCE(foo1.b);
+	WRITE_ONCE(foo2.b, READ_ONCE(foo1.b));
 	foo2.c = foo1.c;
 
-All that aside, it is never necessary to use ACCESS_ONCE() on a variable
-that has been marked volatile.  For example, because 'jiffies' is marked
-volatile, it is never necessary to say ACCESS_ONCE(jiffies).  The reason
-for this is that ACCESS_ONCE() is implemented as a volatile cast, which
-has no effect when its argument is already marked volatile.
+All that aside, it is never necessary to use READ_ONCE() and
+WRITE_ONCE() on a variable that has been marked volatile.  For example,
+because 'jiffies' is marked volatile, it is never necessary to
+say READ_ONCE(jiffies).  The reason for this is that READ_ONCE() and
+WRITE_ONCE() are implemented as volatile casts, which has no effect when
+its argument is already marked volatile.
 
 Please note that these compiler barriers have no direct effect on the CPU,
 which may then reorder things however it wishes.
@@ -1646,14 +1651,15 @@
 All memory barriers except the data dependency barriers imply a compiler
 barrier. Data dependencies do not impose any additional compiler ordering.
 
-Aside: In the case of data dependencies, the compiler would be expected to
-issue the loads in the correct order (eg. `a[b]` would have to load the value
-of b before loading a[b]), however there is no guarantee in the C specification
-that the compiler may not speculate the value of b (eg. is equal to 1) and load
-a before b (eg. tmp = a[1]; if (b != 1) tmp = a[b]; ). There is also the
-problem of a compiler reloading b after having loaded a[b], thus having a newer
-copy of b than a[b]. A consensus has not yet been reached about these problems,
-however the ACCESS_ONCE macro is a good place to start looking.
+Aside: In the case of data dependencies, the compiler would be expected
+to issue the loads in the correct order (eg. `a[b]` would have to load
+the value of b before loading a[b]), however there is no guarantee in
+the C specification that the compiler may not speculate the value of b
+(eg. is equal to 1) and load a before b (eg. tmp = a[1]; if (b != 1)
+tmp = a[b]; ). There is also the problem of a compiler reloading b after
+having loaded a[b], thus having a newer copy of b than a[b]. A consensus
+has not yet been reached about these problems, however the READ_ONCE()
+macro is a good place to start looking.
 
 SMP memory barriers are reduced to compiler barriers on uniprocessor compiled
 systems because it is assumed that a CPU will appear to be self-consistent,
@@ -1848,15 +1854,10 @@
 another CPU not holding that lock.  In short, a ACQUIRE followed by an
 RELEASE may -not- be assumed to be a full memory barrier.
 
-Similarly, the reverse case of a RELEASE followed by an ACQUIRE does not
-imply a full memory barrier.  If it is necessary for a RELEASE-ACQUIRE
-pair to produce a full barrier, the ACQUIRE can be followed by an
-smp_mb__after_unlock_lock() invocation.  This will produce a full barrier
-if either (a) the RELEASE and the ACQUIRE are executed by the same
-CPU or task, or (b) the RELEASE and ACQUIRE act on the same variable.
-The smp_mb__after_unlock_lock() primitive is free on many architectures.
-Without smp_mb__after_unlock_lock(), the CPU's execution of the critical
-sections corresponding to the RELEASE and the ACQUIRE can cross, so that:
+Similarly, the reverse case of a RELEASE followed by an ACQUIRE does
+not imply a full memory barrier.  Therefore, the CPU's execution of the
+critical sections corresponding to the RELEASE and the ACQUIRE can cross,
+so that:
 
 	*A = a;
 	RELEASE M
@@ -1894,29 +1895,6 @@
 	a sleep-unlock race, but the locking primitive needs to resolve
 	such races properly in any case.
 
-With smp_mb__after_unlock_lock(), the two critical sections cannot overlap.
-For example, with the following code, the store to *A will always be
-seen by other CPUs before the store to *B:
-
-	*A = a;
-	RELEASE M
-	ACQUIRE N
-	smp_mb__after_unlock_lock();
-	*B = b;
-
-The operations will always occur in one of the following orders:
-
-	STORE *A, RELEASE, ACQUIRE, smp_mb__after_unlock_lock(), STORE *B
-	STORE *A, ACQUIRE, RELEASE, smp_mb__after_unlock_lock(), STORE *B
-	ACQUIRE, STORE *A, RELEASE, smp_mb__after_unlock_lock(), STORE *B
-
-If the RELEASE and ACQUIRE were instead both operating on the same lock
-variable, only the first of these alternatives can occur.  In addition,
-the more strongly ordered systems may rule out some of the above orders.
-But in any case, as noted earlier, the smp_mb__after_unlock_lock()
-ensures that the store to *A will always be seen as happening before
-the store to *B.
-
 Locks and semaphores may not provide any guarantee of ordering on UP compiled
 systems, and so cannot be counted on in such a situation to actually achieve
 anything at all - especially with respect to I/O accesses - unless combined
@@ -2126,12 +2104,12 @@
 
 	CPU 1				CPU 2
 	===============================	===============================
-	ACCESS_ONCE(*A) = a;		ACCESS_ONCE(*E) = e;
+	WRITE_ONCE(*A, a);		WRITE_ONCE(*E, e);
 	ACQUIRE M			ACQUIRE Q
-	ACCESS_ONCE(*B) = b;		ACCESS_ONCE(*F) = f;
-	ACCESS_ONCE(*C) = c;		ACCESS_ONCE(*G) = g;
+	WRITE_ONCE(*B, b);		WRITE_ONCE(*F, f);
+	WRITE_ONCE(*C, c);		WRITE_ONCE(*G, g);
 	RELEASE M			RELEASE Q
-	ACCESS_ONCE(*D) = d;		ACCESS_ONCE(*H) = h;
+	WRITE_ONCE(*D, d);		WRITE_ONCE(*H, h);
 
 Then there is no guarantee as to what order CPU 3 will see the accesses to *A
 through *H occur in, other than the constraints imposed by the separate locks
@@ -2147,40 +2125,6 @@
 	*E, *F or *G following RELEASE Q
 
 
-However, if the following occurs:
-
-	CPU 1				CPU 2
-	===============================	===============================
-	ACCESS_ONCE(*A) = a;
-	ACQUIRE M		     [1]
-	ACCESS_ONCE(*B) = b;
-	ACCESS_ONCE(*C) = c;
-	RELEASE M	     [1]
-	ACCESS_ONCE(*D) = d;		ACCESS_ONCE(*E) = e;
-					ACQUIRE M		     [2]
-					smp_mb__after_unlock_lock();
-					ACCESS_ONCE(*F) = f;
-					ACCESS_ONCE(*G) = g;
-					RELEASE M	     [2]
-					ACCESS_ONCE(*H) = h;
-
-CPU 3 might see:
-
-	*E, ACQUIRE M [1], *C, *B, *A, RELEASE M [1],
-		ACQUIRE M [2], *H, *F, *G, RELEASE M [2], *D
-
-But assuming CPU 1 gets the lock first, CPU 3 won't see any of:
-
-	*B, *C, *D, *F, *G or *H preceding ACQUIRE M [1]
-	*A, *B or *C following RELEASE M [1]
-	*F, *G or *H preceding ACQUIRE M [2]
-	*A, *B, *C, *E, *F or *G following RELEASE M [2]
-
-Note that the smp_mb__after_unlock_lock() is critically important
-here: Without it CPU 3 might see some of the above orderings.
-Without smp_mb__after_unlock_lock(), the accesses are not guaranteed
-to be seen in order unless CPU 3 holds lock M.
-
 
 ACQUIRES VS I/O ACCESSES
 ------------------------
@@ -2383,9 +2327,7 @@
 explicit lock operations, described later).  These include:
 
 	xchg();
-	cmpxchg();
 	atomic_xchg();			atomic_long_xchg();
-	atomic_cmpxchg();		atomic_long_cmpxchg();
 	atomic_inc_return();		atomic_long_inc_return();
 	atomic_dec_return();		atomic_long_dec_return();
 	atomic_add_return();		atomic_long_add_return();
@@ -2398,7 +2340,9 @@
 	test_and_clear_bit();
 	test_and_change_bit();
 
-	/* when succeeds (returns 1) */
+	/* when succeeds */
+	cmpxchg();
+	atomic_cmpxchg();		atomic_long_cmpxchg();
 	atomic_add_unless();		atomic_long_add_unless();
 
 These are used for such things as implementing ACQUIRE-class and RELEASE-class
@@ -2881,11 +2825,11 @@
 operations in exactly the order specified, so that if the CPU is, for example,
 given the following piece of code to execute:
 
-	a = ACCESS_ONCE(*A);
-	ACCESS_ONCE(*B) = b;
-	c = ACCESS_ONCE(*C);
-	d = ACCESS_ONCE(*D);
-	ACCESS_ONCE(*E) = e;
+	a = READ_ONCE(*A);
+	WRITE_ONCE(*B, b);
+	c = READ_ONCE(*C);
+	d = READ_ONCE(*D);
+	WRITE_ONCE(*E, e);
 
 they would then expect that the CPU will complete the memory operation for each
 instruction before moving on to the next one, leading to a definite sequence of
@@ -2932,12 +2876,12 @@
 _own_ accesses appear to be correctly ordered, without the need for a memory
 barrier.  For instance with the following code:
 
-	U = ACCESS_ONCE(*A);
-	ACCESS_ONCE(*A) = V;
-	ACCESS_ONCE(*A) = W;
-	X = ACCESS_ONCE(*A);
-	ACCESS_ONCE(*A) = Y;
-	Z = ACCESS_ONCE(*A);
+	U = READ_ONCE(*A);
+	WRITE_ONCE(*A, V);
+	WRITE_ONCE(*A, W);
+	X = READ_ONCE(*A);
+	WRITE_ONCE(*A, Y);
+	Z = READ_ONCE(*A);
 
 and assuming no intervention by an external influence, it can be assumed that
 the final result will appear to be:
@@ -2953,13 +2897,14 @@
 	U=LOAD *A, STORE *A=V, STORE *A=W, X=LOAD *A, STORE *A=Y, Z=LOAD *A
 
 in that order, but, without intervention, the sequence may have almost any
-combination of elements combined or discarded, provided the program's view of
-the world remains consistent.  Note that ACCESS_ONCE() is -not- optional
-in the above example, as there are architectures where a given CPU might
-reorder successive loads to the same location.  On such architectures,
-ACCESS_ONCE() does whatever is necessary to prevent this, for example, on
-Itanium the volatile casts used by ACCESS_ONCE() cause GCC to emit the
-special ld.acq and st.rel instructions that prevent such reordering.
+combination of elements combined or discarded, provided the program's view
+of the world remains consistent.  Note that READ_ONCE() and WRITE_ONCE()
+are -not- optional in the above example, as there are architectures
+where a given CPU might reorder successive loads to the same location.
+On such architectures, READ_ONCE() and WRITE_ONCE() do whatever is
+necessary to prevent this, for example, on Itanium the volatile casts
+used by READ_ONCE() and WRITE_ONCE() cause GCC to emit the special ld.acq
+and st.rel instructions (respectively) that prevent such reordering.
 
 The compiler may also combine, discard or defer elements of the sequence before
 the CPU even sees them.
@@ -2973,13 +2918,14 @@
 
 	*A = W;
 
-since, without either a write barrier or an ACCESS_ONCE(), it can be
+since, without either a write barrier or an WRITE_ONCE(), it can be
 assumed that the effect of the storage of V to *A is lost.  Similarly:
 
 	*A = Y;
 	Z = *A;
 
-may, without a memory barrier or an ACCESS_ONCE(), be reduced to:
+may, without a memory barrier or an READ_ONCE() and WRITE_ONCE(), be
+reduced to:
 
 	*A = Y;
 	Z = Y;
diff --git a/Documentation/men-chameleon-bus.txt b/Documentation/men-chameleon-bus.txt
new file mode 100644
index 0000000..30ded73
--- /dev/null
+++ b/Documentation/men-chameleon-bus.txt
@@ -0,0 +1,163 @@
+                               MEN Chameleon Bus
+                               =================
+
+Table of Contents
+=================
+1 Introduction
+    1.1 Scope of this Document
+    1.2 Limitations of the current implementation
+2 Architecture
+    2.1 MEN Chameleon Bus
+    2.2 Carrier Devices
+    2.3 Parser
+3 Resource handling
+    3.1 Memory Resources
+    3.2 IRQs
+4 Writing an MCB driver
+    4.1 The driver structure
+    4.2 Probing and attaching
+    4.3 Initializing the driver
+
+
+1 Introduction
+===============
+  This document describes the architecture and implementation of the MEN
+  Chameleon Bus (called MCB throughout this document).
+
+1.1 Scope of this Document
+---------------------------
+  This document is intended to be a short overview of the current
+  implementation and does by no means describe the complete possibilities of MCB
+  based devices.
+
+1.2 Limitations of the current implementation
+----------------------------------------------
+  The current implementation is limited to PCI and PCIe based carrier devices
+  that only use a single memory resource and share the PCI legacy IRQ.  Not
+  implemented are:
+  - Multi-resource MCB devices like the VME Controller or M-Module carrier.
+  - MCB devices that need another MCB device, like SRAM for a DMA Controller's
+    buffer descriptors or a video controller's video memory.
+  - A per-carrier IRQ domain for carrier devices that have one (or more) IRQs
+    per MCB device like PCIe based carriers with MSI or MSI-X support.
+
+2 Architecture
+===============
+  MCB is divided into 3 functional blocks:
+  - The MEN Chameleon Bus itself,
+  - drivers for MCB Carrier Devices and
+  - the parser for the Chameleon table.
+
+2.1 MEN Chameleon Bus
+----------------------
+   The MEN Chameleon Bus is an artificial bus system that attaches to a so
+   called Chameleon FPGA device found on some hardware produced my MEN Mikro
+   Elektronik GmbH. These devices are multi-function devices implemented in a
+   single FPGA and usually attached via some sort of PCI or PCIe link. Each
+   FPGA contains a header section describing the content of the FPGA. The
+   header lists the device id, PCI BAR, offset from the beginning of the PCI
+   BAR, size in the FPGA, interrupt number and some other properties currently
+   not handled by the MCB implementation.
+
+2.2 Carrier Devices
+--------------------
+   A carrier device is just an abstraction for the real world physical bus the
+   Chameleon FPGA is attached to. Some IP Core drivers may need to interact with
+   properties of the carrier device (like querying the IRQ number of a PCI
+   device). To provide abstraction from the real hardware bus, an MCB carrier
+   device provides callback methods to translate the driver's MCB function calls
+   to hardware related function calls. For example a carrier device may
+   implement the get_irq() method which can be translated into a hardware bus
+   query for the IRQ number the device should use.
+
+2.3 Parser
+-----------
+   The parser reads the first 512 bytes of a Chameleon device and parses the
+   Chameleon table. Currently the parser only supports the Chameleon v2 variant
+   of the Chameleon table but can easily be adopted to support an older or
+   possible future variant. While parsing the table's entries new MCB devices
+   are allocated and their resources are assigned according to the resource
+   assignment in the Chameleon table. After resource assignment is finished, the
+   MCB devices are registered at the MCB and thus at the driver core of the
+   Linux kernel.
+
+3 Resource handling
+====================
+  The current implementation assigns exactly one memory and one IRQ resource
+  per MCB device. But this is likely going to change in the future.
+
+3.1 Memory Resources
+---------------------
+   Each MCB device has exactly one memory resource, which can be requested from
+   the MCB bus. This memory resource is the physical address of the MCB device
+   inside the carrier and is intended to be passed to ioremap() and friends. It
+   is already requested from the kernel by calling request_mem_region().
+
+3.2 IRQs
+---------
+   Each MCB device has exactly one IRQ resource, which can be requested from the
+   MCB bus. If a carrier device driver implements the ->get_irq() callback
+   method, the IRQ number assigned by the carrier device will be returned,
+   otherwise the IRQ number inside the Chameleon table will be returned. This
+   number is suitable to be passed to request_irq().
+
+4 Writing an MCB driver
+=======================
+
+4.1 The driver structure
+-------------------------
+    Each MCB driver has a structure to identify the device driver as well as
+    device ids which identify the IP Core inside the FPGA. The driver structure
+    also contains callback methods which get executed on driver probe and
+    removal from the system.
+
+
+  static const struct mcb_device_id foo_ids[] = {
+          { .device = 0x123 },
+          { }
+  };
+  MODULE_DEVICE_TABLE(mcb, foo_ids);
+
+  static struct mcb_driver foo_driver = {
+          driver = {
+                  .name = "foo-bar",
+                  .owner = THIS_MODULE,
+          },
+          .probe = foo_probe,
+          .remove = foo_remove,
+          .id_table = foo_ids,
+  };
+
+4.2 Probing and attaching
+--------------------------
+   When a driver is loaded and the MCB devices it services are found, the MCB
+   core will call the driver's probe callback method. When the driver is removed
+   from the system, the MCB core will call the driver's remove callback method.
+
+
+  static init foo_probe(struct mcb_device *mdev, const struct mcb_device_id *id);
+  static void foo_remove(struct mcb_device *mdev);
+
+4.3 Initializing the driver
+----------------------------
+   When the kernel is booted or your foo driver module is inserted, you have to
+   perform driver initialization. Usually it is enough to register your driver
+   module at the MCB core.
+
+
+  static int __init foo_init(void)
+  {
+          return mcb_register_driver(&foo_driver);
+  }
+  module_init(foo_init);
+
+  static void __exit foo_exit(void)
+  {
+          mcb_unregister_driver(&foo_driver);
+  }
+  module_exit(foo_exit);
+
+   The module_mcb_driver() macro can be used to reduce the above code.
+
+
+  module_mcb_driver(foo_driver);
diff --git a/Documentation/misc-devices/mei/mei.txt b/Documentation/misc-devices/mei/mei.txt
index 8d47501..91c1fa3 100644
--- a/Documentation/misc-devices/mei/mei.txt
+++ b/Documentation/misc-devices/mei/mei.txt
@@ -96,7 +96,7 @@
 IOCTL
 =====
 
-The Intel MEI Driver supports the following IOCTL command:
+The Intel MEI Driver supports the following IOCTL commands:
 	IOCTL_MEI_CONNECT_CLIENT	Connect to firmware Feature (client).
 
 	usage:
@@ -125,6 +125,49 @@
         data that can be sent or received. (e.g. if MTU=2K, can send
         requests up to bytes 2k and received responses up to 2k bytes).
 
+	IOCTL_MEI_NOTIFY_SET: enable or disable event notifications
+
+	Usage:
+		uint32_t enable;
+		ioctl(fd, IOCTL_MEI_NOTIFY_SET, &enable);
+
+	Inputs:
+		uint32_t enable = 1;
+		or
+		uint32_t enable[disable] = 0;
+
+	Error returns:
+		EINVAL	Wrong IOCTL Number
+		ENODEV	Device  is not initialized or the client not connected
+		ENOMEM	Unable to allocate memory to client internal data.
+		EFAULT	Fatal Error (e.g. Unable to access user input data)
+		EOPNOTSUPP if the device doesn't support the feature
+
+	Notes:
+	The client must be connected in order to enable notification events
+
+
+	IOCTL_MEI_NOTIFY_GET : retrieve event
+
+	Usage:
+		uint32_t event;
+		ioctl(fd, IOCTL_MEI_NOTIFY_GET, &event);
+
+	Outputs:
+		1 - if an event is pending
+		0 - if there is no even pending
+
+	Error returns:
+		EINVAL	Wrong IOCTL Number
+		ENODEV	Device is not initialized or the client not connected
+		ENOMEM	Unable to allocate memory to client internal data.
+		EFAULT	Fatal Error (e.g. Unable to access user input data)
+		EOPNOTSUPP if the device doesn't support the feature
+
+	Notes:
+	The client must be connected and event notification has to be enabled
+	in order to receive an event
+
 
 Intel ME Applications
 =====================
diff --git a/Documentation/networking/6lowpan.txt b/Documentation/networking/6lowpan.txt
new file mode 100644
index 0000000..a7dc7e9
--- /dev/null
+++ b/Documentation/networking/6lowpan.txt
@@ -0,0 +1,50 @@
+
+Netdev private dataroom for 6lowpan interfaces:
+
+All 6lowpan able net devices, means all interfaces with ARPHRD_6LOWPAN,
+must have "struct lowpan_priv" placed at beginning of netdev_priv.
+
+The priv_size of each interface should be calculate by:
+
+ dev->priv_size = LOWPAN_PRIV_SIZE(LL_6LOWPAN_PRIV_DATA);
+
+Where LL_PRIV_6LOWPAN_DATA is sizeof linklayer 6lowpan private data struct.
+To access the LL_PRIV_6LOWPAN_DATA structure you can cast:
+
+ lowpan_priv(dev)-priv;
+
+to your LL_6LOWPAN_PRIV_DATA structure.
+
+Before registering the lowpan netdev interface you must run:
+
+ lowpan_netdev_setup(dev, LOWPAN_LLTYPE_FOOBAR);
+
+wheres LOWPAN_LLTYPE_FOOBAR is a define for your 6LoWPAN linklayer type of
+enum lowpan_lltypes.
+
+Example to evaluate the private usually you can do:
+
+static inline sturct lowpan_priv_foobar *
+lowpan_foobar_priv(struct net_device *dev)
+{
+	return (sturct lowpan_priv_foobar *)lowpan_priv(dev)->priv;
+}
+
+switch (dev->type) {
+case ARPHRD_6LOWPAN:
+	lowpan_priv = lowpan_priv(dev);
+	/* do great stuff which is ARPHRD_6LOWPAN related */
+	switch (lowpan_priv->lltype) {
+	case LOWPAN_LLTYPE_FOOBAR:
+		/* do 802.15.4 6LoWPAN handling here */
+		lowpan_foobar_priv(dev)->bar = foo;
+		break;
+	...
+	}
+	break;
+...
+}
+
+In case of generic 6lowpan branch ("net/6lowpan") you can remove the check
+on ARPHRD_6LOWPAN, because you can be sure that these function are called
+by ARPHRD_6LOWPAN interfaces.
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index b48d4a1..fd1a1aa 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -510,7 +510,7 @@
 
   4.1.2 RAW socket option CAN_RAW_ERR_FILTER
 
-  As described in chapter 3.4 the CAN interface driver can generate so
+  As described in chapter 3.3 the CAN interface driver can generate so
   called Error Message Frames that can optionally be passed to the user
   application in the same way as other CAN frames. The possible
   errors are divided into different error classes that may be filtered
@@ -1152,7 +1152,7 @@
     $ ip link set canX type can restart
 
   Note that a restart will also create a CAN error message frame (see
-  also chapter 3.4).
+  also chapter 3.3).
 
   6.6 CAN FD (flexible data rate) driver support
 
diff --git a/Documentation/networking/dsa/bcm_sf2.txt b/Documentation/networking/dsa/bcm_sf2.txt
new file mode 100644
index 0000000..d999d0c
--- /dev/null
+++ b/Documentation/networking/dsa/bcm_sf2.txt
@@ -0,0 +1,114 @@
+Broadcom Starfighter 2 Ethernet switch driver
+=============================================
+
+Broadcom's Starfighter 2 Ethernet switch hardware block is commonly found and
+deployed in the following products:
+
+- xDSL gateways such as BCM63138
+- streaming/multimedia Set Top Box such as BCM7445
+- Cable Modem/residential gateways such as BCM7145/BCM3390
+
+The switch is typically deployed in a configuration involving between 5 to 13
+ports, offering a range of built-in and customizable interfaces:
+
+- single integrated Gigabit PHY
+- quad integrated Gigabit PHY
+- quad external Gigabit PHY w/ MDIO multiplexer
+- integrated MoCA PHY
+- several external MII/RevMII/GMII/RGMII interfaces
+
+The switch also supports specific congestion control features which allow MoCA
+fail-over not to lose packets during a MoCA role re-election, as well as out of
+band back-pressure to the host CPU network interface when downstream interfaces
+are connected at a lower speed.
+
+The switch hardware block is typically interfaced using MMIO accesses and
+contains a bunch of sub-blocks/registers:
+
+* SWITCH_CORE: common switch registers
+* SWITCH_REG: external interfaces switch register
+* SWITCH_MDIO: external MDIO bus controller (there is another one in SWITCH_CORE,
+  which is used for indirect PHY accesses)
+* SWITCH_INDIR_RW: 64-bits wide register helper block
+* SWITCH_INTRL2_0/1: Level-2 interrupt controllers
+* SWITCH_ACB: Admission control block
+* SWITCH_FCB: Fail-over control block
+
+Implementation details
+======================
+
+The driver is located in drivers/net/dsa/bcm_sf2.c and is implemented as a DSA
+driver; see Documentation/networking/dsa/dsa.txt for details on the subsytem
+and what it provides.
+
+The SF2 switch is configured to enable a Broadcom specific 4-bytes switch tag
+which gets inserted by the switch for every packet forwarded to the CPU
+interface, conversely, the CPU network interface should insert a similar tag for
+packets entering the CPU port. The tag format is described in
+net/dsa/tag_brcm.c.
+
+Overall, the SF2 driver is a fairly regular DSA driver; there are a few
+specifics covered below.
+
+Device Tree probing
+-------------------
+
+The DSA platform device driver is probed using a specific compatible string
+provided in net/dsa/dsa.c. The reason for that is because the DSA subsystem gets
+registered as a platform device driver currently. DSA will provide the needed
+device_node pointers which are then accessible by the switch driver setup
+function to setup resources such as register ranges and interrupts. This
+currently works very well because none of the of_* functions utilized by the
+driver require a struct device to be bound to a struct device_node, but things
+may change in the future.
+
+MDIO indirect accesses
+----------------------
+
+Due to a limitation in how Broadcom switches have been designed, external
+Broadcom switches connected to a SF2 require the use of the DSA slave MDIO bus
+in order to properly configure them. By default, the SF2 pseudo-PHY address, and
+an external switch pseudo-PHY address will both be snooping for incoming MDIO
+transactions, since they are at the same address (30), resulting in some kind of
+"double" programming. Using DSA, and setting ds->phys_mii_mask accordingly, we
+selectively divert reads and writes towards external Broadcom switches
+pseudo-PHY addresses. Newer revisions of the SF2 hardware have introduced a
+configurable pseudo-PHY address which circumvents the initial design limitation.
+
+Multimedia over CoAxial (MoCA) interfaces
+-----------------------------------------
+
+MoCA interfaces are fairly specific and require the use of a firmware blob which
+gets loaded onto the MoCA processor(s) for packet processing. The switch
+hardware contains logic which will assert/de-assert link states accordingly for
+the MoCA interface whenever the MoCA coaxial cable gets disconnected or the
+firmware gets reloaded. The SF2 driver relies on such events to properly set its
+MoCA interface carrier state and properly report this to the networking stack.
+
+The MoCA interfaces are supported using the PHY library's fixed PHY/emulated PHY
+device and the switch driver registers a fixed_link_update callback for such
+PHYs which reflects the link state obtained from the interrupt handler.
+
+
+Power Management
+----------------
+
+Whenever possible, the SF2 driver tries to minimize the overall switch power
+consumption by applying a combination of:
+
+- turning off internal buffers/memories
+- disabling packet processing logic
+- putting integrated PHYs in IDDQ/low-power
+- reducing the switch core clock based on the active port count
+- enabling and advertising EEE
+- turning off RGMII data processing logic when the link goes down
+
+Wake-on-LAN
+-----------
+
+Wake-on-LAN is currently implemented by utilizing the host processor Ethernet
+MAC controller wake-on logic. Whenever Wake-on-LAN is requested, an intersection
+between the user request and the supported host Ethernet interface WoL
+capabilities is done and the intersection result gets configured. During
+system-wide suspend/resume, only ports not participating in Wake-on-LAN are
+disabled.
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
new file mode 100644
index 0000000..aa9c1f9
--- /dev/null
+++ b/Documentation/networking/dsa/dsa.txt
@@ -0,0 +1,615 @@
+Distributed Switch Architecture
+===============================
+
+Introduction
+============
+
+This document describes the Distributed Switch Architecture (DSA) subsystem
+design principles, limitations, interactions with other subsystems, and how to
+develop drivers for this subsystem as well as a TODO for developers interested
+in joining the effort.
+
+Design principles
+=================
+
+The Distributed Switch Architecture is a subsystem which was primarily designed
+to support Marvell Ethernet switches (MV88E6xxx, a.k.a Linkstreet product line)
+using Linux, but has since evolved to support other vendors as well.
+
+The original philosophy behind this design was to be able to use unmodified
+Linux tools such as bridge, iproute2, ifconfig to work transparently whether
+they configured/queried a switch port network device or a regular network
+device.
+
+An Ethernet switch is typically comprised of multiple front-panel ports, and one
+or more CPU or management port. The DSA subsystem currently relies on the
+presence of a management port connected to an Ethernet controller capable of
+receiving Ethernet frames from the switch. This is a very common setup for all
+kinds of Ethernet switches found in Small Home and Office products: routers,
+gateways, or even top-of-the rack switches. This host Ethernet controller will
+be later referred to as "master" and "cpu" in DSA terminology and code.
+
+The D in DSA stands for Distributed, because the subsystem has been designed
+with the ability to configure and manage cascaded switches on top of each other
+using upstream and downstream Ethernet links between switches. These specific
+ports are referred to as "dsa" ports in DSA terminology and code. A collection
+of multiple switches connected to each other is called a "switch tree".
+
+For each front-panel port, DSA will create specialized network devices which are
+used as controlling and data-flowing endpoints for use by the Linux networking
+stack. These specialized network interfaces are referred to as "slave" network
+interfaces in DSA terminology and code.
+
+The ideal case for using DSA is when an Ethernet switch supports a "switch tag"
+which is a hardware feature making the switch insert a specific tag for each
+Ethernet frames it received to/from specific ports to help the management
+interface figure out:
+
+- what port is this frame coming from
+- what was the reason why this frame got forwarded
+- how to send CPU originated traffic to specific ports
+
+The subsystem does support switches not capable of inserting/stripping tags, but
+the features might be slightly limited in that case (traffic separation relies
+on Port-based VLAN IDs).
+
+Note that DSA does not currently create network interfaces for the "cpu" and
+"dsa" ports because:
+
+- the "cpu" port is the Ethernet switch facing side of the management
+  controller, and as such, would create a duplication of feature, since you
+  would get two interfaces for the same conduit: master netdev, and "cpu" netdev
+
+- the "dsa" port(s) are just conduits between two or more switches, and as such
+  cannot really be used as proper network interfaces either, only the
+  downstream, or the top-most upstream interface makes sense with that model
+
+Switch tagging protocols
+------------------------
+
+DSA currently supports 4 different tagging protocols, and a tag-less mode as
+well. The different protocols are implemented in:
+
+net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy)
+net/dsa/tag_dsa.c: Marvell's original DSA tag
+net/dsa/tag_edsa.c: Marvell's enhanced DSA tag
+net/dsa/tag_brcm.c: Broadcom's 4 bytes tag
+
+The exact format of the tag protocol is vendor specific, but in general, they
+all contain something which:
+
+- identifies which port the Ethernet frame came from/should be sent to
+- provides a reason why this frame was forwarded to the management interface
+
+Master network devices
+----------------------
+
+Master network devices are regular, unmodified Linux network device drivers for
+the CPU/management Ethernet interface. Such a driver might occasionally need to
+know whether DSA is enabled (e.g.: to enable/disable specific offload features),
+but the DSA subsystem has been proven to work with industry standard drivers:
+e1000e, mv643xx_eth etc. without having to introduce modifications to these
+drivers. Such network devices are also often referred to as conduit network
+devices since they act as a pipe between the host processor and the hardware
+Ethernet switch.
+
+Networking stack hooks
+----------------------
+
+When a master netdev is used with DSA, a small hook is placed in in the
+networking stack is in order to have the DSA subsystem process the Ethernet
+switch specific tagging protocol. DSA accomplishes this by registering a
+specific (and fake) Ethernet type (later becoming skb->protocol) with the
+networking stack, this is also known as a ptype or packet_type. A typical
+Ethernet Frame receive sequence looks like this:
+
+Master network device (e.g.: e1000e):
+
+Receive interrupt fires:
+- receive function is invoked
+- basic packet processing is done: getting length, status etc.
+- packet is prepared to be processed by the Ethernet layer by calling
+  eth_type_trans
+
+net/ethernet/eth.c:
+
+eth_type_trans(skb, dev)
+	if (dev->dsa_ptr != NULL)
+		-> skb->protocol = ETH_P_XDSA
+
+drivers/net/ethernet/*:
+
+netif_receive_skb(skb)
+	-> iterate over registered packet_type
+		-> invoke handler for ETH_P_XDSA, calls dsa_switch_rcv()
+
+net/dsa/dsa.c:
+	-> dsa_switch_rcv()
+		-> invoke switch tag specific protocol handler in
+		   net/dsa/tag_*.c
+
+net/dsa/tag_*.c:
+	-> inspect and strip switch tag protocol to determine originating port
+	-> locate per-port network device
+	-> invoke eth_type_trans() with the DSA slave network device
+	-> invoked netif_receive_skb()
+
+Past this point, the DSA slave network devices get delivered regular Ethernet
+frames that can be processed by the networking stack.
+
+Slave network devices
+---------------------
+
+Slave network devices created by DSA are stacked on top of their master network
+device, each of these network interfaces will be responsible for being a
+controlling and data-flowing end-point for each front-panel port of the switch.
+These interfaces are specialized in order to:
+
+- insert/remove the switch tag protocol (if it exists) when sending traffic
+  to/from specific switch ports
+- query the switch for ethtool operations: statistics, link state,
+  Wake-on-LAN, register dumps...
+- external/internal PHY management: link, auto-negotiation etc.
+
+These slave network devices have custom net_device_ops and ethtool_ops function
+pointers which allow DSA to introduce a level of layering between the networking
+stack/ethtool, and the switch driver implementation.
+
+Upon frame transmission from these slave network devices, DSA will look up which
+switch tagging protocol is currently registered with these network devices, and
+invoke a specific transmit routine which takes care of adding the relevant
+switch tag in the Ethernet frames.
+
+These frames are then queued for transmission using the master network device
+ndo_start_xmit() function, since they contain the appropriate switch tag, the
+Ethernet switch will be able to process these incoming frames from the
+management interface and delivers these frames to the physical switch port.
+
+Graphical representation
+------------------------
+
+Summarized, this is basically how DSA looks like from a network device
+perspective:
+
+
+			|---------------------------
+			| CPU network device (eth0)|
+			----------------------------
+			| <tag added by switch     |
+			|                          |
+			|                          |
+			|        tag added by CPU> |
+		|--------------------------------------------|
+		| Switch driver				     |
+		|--------------------------------------------|
+                    ||        ||         ||
+		|-------|  |-------|  |-------|
+		| sw0p0 |  | sw0p1 |  | sw0p2 |
+		|-------|  |-------|  |-------|
+
+Slave MDIO bus
+--------------
+
+In order to be able to read to/from a switch PHY built into it, DSA creates a
+slave MDIO bus which allows a specific switch driver to divert and intercept
+MDIO reads/writes towards specific PHY addresses. In most MDIO-connected
+switches, these functions would utilize direct or indirect PHY addressing mode
+to return standard MII registers from the switch builtin PHYs, allowing the PHY
+library and/or to return link status, link partner pages, auto-negotiation
+results etc..
+
+For Ethernet switches which have both external and internal MDIO busses, the
+slave MII bus can be utilized to mux/demux MDIO reads and writes towards either
+internal or external MDIO devices this switch might be connected to: internal
+PHYs, external PHYs, or even external switches.
+
+Data structures
+---------------
+
+DSA data structures are defined in include/net/dsa.h as well as
+net/dsa/dsa_priv.h.
+
+dsa_chip_data: platform data configuration for a given switch device, this
+structure describes a switch device's parent device, its address, as well as
+various properties of its ports: names/labels, and finally a routing table
+indication (when cascading switches)
+
+dsa_platform_data: platform device configuration data which can reference a
+collection of dsa_chip_data structure if multiples switches are cascaded, the
+master network device this switch tree is attached to needs to be referenced
+
+dsa_switch_tree: structure assigned to the master network device under
+"dsa_ptr", this structure references a dsa_platform_data structure as well as
+the tagging protocol supported by the switch tree, and which receive/transmit
+function hooks should be invoked, information about the directly attached switch
+is also provided: CPU port. Finally, a collection of dsa_switch are referenced
+to address individual switches in the tree.
+
+dsa_switch: structure describing a switch device in the tree, referencing a
+dsa_switch_tree as a backpointer, slave network devices, master network device,
+and a reference to the backing dsa_switch_driver
+
+dsa_switch_driver: structure referencing function pointers, see below for a full
+description.
+
+Design limitations
+==================
+
+DSA is a platform device driver
+-------------------------------
+
+DSA is implemented as a DSA platform device driver which is convenient because
+it will register the entire DSA switch tree attached to a master network device
+in one-shot, facilitating the device creation and simplifying the device driver
+model a bit, this comes however with a number of limitations:
+
+- building DSA and its switch drivers as modules is currently not working
+- the device driver parenting does not necessarily reflect the original
+  bus/device the switch can be created from
+- supporting non-MDIO and non-MMIO (platform) switches is not possible
+
+Limits on the number of devices and ports
+-----------------------------------------
+
+DSA currently limits the number of maximum switches within a tree to 4
+(DSA_MAX_SWITCHES), and the number of ports per switch to 12 (DSA_MAX_PORTS).
+These limits could be extended to support larger configurations would this need
+arise.
+
+Lack of CPU/DSA network devices
+-------------------------------
+
+DSA does not currently create slave network devices for the CPU or DSA ports, as
+described before. This might be an issue in the following cases:
+
+- inability to fetch switch CPU port statistics counters using ethtool, which
+  can make it harder to debug MDIO switch connected using xMII interfaces
+
+- inability to configure the CPU port link parameters based on the Ethernet
+  controller capabilities attached to it: http://patchwork.ozlabs.org/patch/509806/
+
+- inability to configure specific VLAN IDs / trunking VLANs between switches
+  when using a cascaded setup
+
+Common pitfalls using DSA setups
+--------------------------------
+
+Once a master network device is configured to use DSA (dev->dsa_ptr becomes
+non-NULL), and the switch behind it expects a tagging protocol, this network
+interface can only exclusively be used as a conduit interface. Sending packets
+directly through this interface (e.g.: opening a socket using this interface)
+will not make us go through the switch tagging protocol transmit function, so
+the Ethernet switch on the other end, expecting a tag will typically drop this
+frame.
+
+Slave network devices check that the master network device is UP before allowing
+you to administratively bring UP these slave network devices. A common
+configuration mistake is forgetting to bring UP the master network device first.
+
+Interactions with other subsystems
+==================================
+
+DSA currently leverages the following subsystems:
+
+- MDIO/PHY library: drivers/net/phy/phy.c, mdio_bus.c
+- Switchdev: net/switchdev/*
+- Device Tree for various of_* functions
+- HWMON: drivers/hwmon/*
+
+MDIO/PHY library
+----------------
+
+Slave network devices exposed by DSA may or may not be interfacing with PHY
+devices (struct phy_device as defined in include/linux/phy.h), but the DSA
+subsystem deals with all possible combinations:
+
+- internal PHY devices, built into the Ethernet switch hardware
+- external PHY devices, connected via an internal or external MDIO bus
+- internal PHY devices, connected via an internal MDIO bus
+- special, non-autonegotiated or non MDIO-managed PHY devices: SFPs, MoCA; a.k.a
+  fixed PHYs
+
+The PHY configuration is done by the dsa_slave_phy_setup() function and the
+logic basically looks like this:
+
+- if Device Tree is used, the PHY device is looked up using the standard
+  "phy-handle" property, if found, this PHY device is created and registered
+  using of_phy_connect()
+
+- if Device Tree is used, and the PHY device is "fixed", that is, conforms to
+  the definition of a non-MDIO managed PHY as defined in
+  Documentation/devicetree/bindings/net/fixed-link.txt, the PHY is registered
+  and connected transparently using the special fixed MDIO bus driver
+
+- finally, if the PHY is built into the switch, as is very common with
+  standalone switch packages, the PHY is probed using the slave MII bus created
+  by DSA
+
+
+SWITCHDEV
+---------
+
+DSA directly utilizes SWITCHDEV when interfacing with the bridge layer, and
+more specifically with its VLAN filtering portion when configuring VLANs on top
+of per-port slave network devices. Since DSA primarily deals with
+MDIO-connected switches, although not exclusively, SWITCHDEV's
+prepare/abort/commit phases are often simplified into a prepare phase which
+checks whether the operation is supporte by the DSA switch driver, and a commit
+phase which applies the changes.
+
+As of today, the only SWITCHDEV objects supported by DSA are the FDB and VLAN
+objects.
+
+Device Tree
+-----------
+
+DSA features a standardized binding which is documented in
+Documentation/devicetree/bindings/net/dsa/dsa.txt. PHY/MDIO library helper
+functions such as of_get_phy_mode(), of_phy_connect() are also used to query
+per-port PHY specific details: interface connection, MDIO bus location etc..
+
+HWMON
+-----
+
+Some switch drivers feature internal temperature sensors which are exposed as
+regular HWMON devices in /sys/class/hwmon/.
+
+Driver development
+==================
+
+DSA switch drivers need to implement a dsa_switch_driver structure which will
+contain the various members described below.
+
+register_switch_driver() registers this dsa_switch_driver in its internal list
+of drivers to probe for. unregister_switch_driver() does the exact opposite.
+
+Unless requested differently by setting the priv_size member accordingly, DSA
+does not allocate any driver private context space.
+
+Switch configuration
+--------------------
+
+- priv_size: additional size needed by the switch driver for its private context
+
+- tag_protocol: this is to indicate what kind of tagging protocol is supported,
+  should be a valid value from the dsa_tag_protocol enum
+
+- probe: probe routine which will be invoked by the DSA platform device upon
+  registration to test for the presence/absence of a switch device. For MDIO
+  devices, it is recommended to issue a read towards internal registers using
+  the switch pseudo-PHY and return whether this is a supported device. For other
+  buses, return a non-NULL string
+
+- setup: setup function for the switch, this function is responsible for setting
+  up the dsa_switch_driver private structure with all it needs: register maps,
+  interrupts, mutexes, locks etc.. This function is also expected to properly
+  configure the switch to separate all network interfaces from each other, that
+  is, they should be isolated by the switch hardware itself, typically by creating
+  a Port-based VLAN ID for each port and allowing only the CPU port and the
+  specific port to be in the forwarding vector. Ports that are unused by the
+  platform should be disabled. Past this function, the switch is expected to be
+  fully configured and ready to serve any kind of request. It is recommended
+  to issue a software reset of the switch during this setup function in order to
+  avoid relying on what a previous software agent such as a bootloader/firmware
+  may have previously configured.
+
+- set_addr: Some switches require the programming of the management interface's
+  Ethernet MAC address, switch drivers can also disable ageing of MAC addresses
+  on the management interface and "hardcode"/"force" this MAC address for the
+  CPU/management interface as an optimization
+
+PHY devices and link management
+-------------------------------
+
+- get_phy_flags: Some switches are interfaced to various kinds of Ethernet PHYs,
+  if the PHY library PHY driver needs to know about information it cannot obtain
+  on its own (e.g.: coming from switch memory mapped registers), this function
+  should return a 32-bits bitmask of "flags", that is private between the switch
+  driver and the Ethernet PHY driver in drivers/net/phy/*.
+
+- phy_read: Function invoked by the DSA slave MDIO bus when attempting to read
+  the switch port MDIO registers. If unavailable, return 0xffff for each read.
+  For builtin switch Ethernet PHYs, this function should allow reading the link
+  status, auto-negotiation results, link partner pages etc..
+
+- phy_write: Function invoked by the DSA slave MDIO bus when attempting to write
+  to the switch port MDIO registers. If unavailable return a negative error
+  code.
+
+- poll_link: Function invoked by DSA to query the link state of the switch
+  builtin Ethernet PHYs, per port. This function is responsible for calling
+  netif_carrier_{on,off} when appropriate, and can be used to poll all ports in a
+  single call. Executes from workqueue context.
+
+- adjust_link: Function invoked by the PHY library when a slave network device
+  is attached to a PHY device. This function is responsible for appropriately
+  configuring the switch port link parameters: speed, duplex, pause based on
+  what the phy_device is providing.
+
+- fixed_link_update: Function invoked by the PHY library, and specifically by
+  the fixed PHY driver asking the switch driver for link parameters that could
+  not be auto-negotiated, or obtained by reading the PHY registers through MDIO.
+  This is particularly useful for specific kinds of hardware such as QSGMII,
+  MoCA or other kinds of non-MDIO managed PHYs where out of band link
+  information is obtained
+
+Ethtool operations
+------------------
+
+- get_strings: ethtool function used to query the driver's strings, will
+  typically return statistics strings, private flags strings etc.
+
+- get_ethtool_stats: ethtool function used to query per-port statistics and
+  return their values. DSA overlays slave network devices general statistics:
+  RX/TX counters from the network device, with switch driver specific statistics
+  per port
+
+- get_sset_count: ethtool function used to query the number of statistics items
+
+- get_wol: ethtool function used to obtain Wake-on-LAN settings per-port, this
+  function may, for certain implementations also query the master network device
+  Wake-on-LAN settings if this interface needs to participate in Wake-on-LAN
+
+- set_wol: ethtool function used to configure Wake-on-LAN settings per-port,
+  direct counterpart to set_wol with similar restrictions
+
+- set_eee: ethtool function which is used to configure a switch port EEE (Green
+  Ethernet) settings, can optionally invoke the PHY library to enable EEE at the
+  PHY level if relevant. This function should enable EEE at the switch port MAC
+  controller and data-processing logic
+
+- get_eee: ethtool function which is used to query a switch port EEE settings,
+  this function should return the EEE state of the switch port MAC controller
+  and data-processing logic as well as query the PHY for its currently configured
+  EEE settings
+
+- get_eeprom_len: ethtool function returning for a given switch the EEPROM
+  length/size in bytes
+
+- get_eeprom: ethtool function returning for a given switch the EEPROM contents
+
+- set_eeprom: ethtool function writing specified data to a given switch EEPROM
+
+- get_regs_len: ethtool function returning the register length for a given
+  switch
+
+- get_regs: ethtool function returning the Ethernet switch internal register
+  contents. This function might require user-land code in ethtool to
+  pretty-print register values and registers
+
+Power management
+----------------
+
+- suspend: function invoked by the DSA platform device when the system goes to
+  suspend, should quiesce all Ethernet switch activities, but keep ports
+  participating in Wake-on-LAN active as well as additional wake-up logic if
+  supported
+
+- resume: function invoked by the DSA platform device when the system resumes,
+  should resume all Ethernet switch activities and re-configure the switch to be
+  in a fully active state
+
+- port_enable: function invoked by the DSA slave network device ndo_open
+  function when a port is administratively brought up, this function should be
+  fully enabling a given switch port. DSA takes care of marking the port with
+  BR_STATE_BLOCKING if the port is a bridge member, or BR_STATE_FORWARDING if it
+  was not, and propagating these changes down to the hardware
+
+- port_disable: function invoked by the DSA slave network device ndo_close
+  function when a port is administratively brought down, this function should be
+  fully disabling a given switch port. DSA takes care of marking the port with
+  BR_STATE_DISABLED and propagating changes to the hardware if this port is
+  disabled while being a bridge member
+
+Hardware monitoring
+-------------------
+
+These callbacks are only available if CONFIG_NET_DSA_HWMON is enabled:
+
+- get_temp: this function queries the given switch for its temperature
+
+- get_temp_limit: this function returns the switch current maximum temperature
+  limit
+
+- set_temp_limit: this function configures the maximum temperature limit allowed
+
+- get_temp_alarm: this function returns the critical temperature threshold
+  returning an alarm notification
+
+See Documentation/hwmon/sysfs-interface for details.
+
+Bridge layer
+------------
+
+- port_join_bridge: bridge layer function invoked when a given switch port is
+  added to a bridge, this function should be doing the necessary at the switch
+  level to permit the joining port from being added to the relevant logical
+  domain for it to ingress/egress traffic with other members of the bridge. DSA
+  does nothing but calculate a bitmask of switch ports currently members of the
+  specified bridge being requested the join
+
+- port_leave_bridge: bridge layer function invoked when a given switch port is
+  removed from a bridge, this function should be doing the necessary at the
+  switch level to deny the leaving port from ingress/egress traffic from the
+  remaining bridge members. When the port leaves the bridge, it should be aged
+  out at the switch hardware for the switch to (re) learn MAC addresses behind
+  this port. DSA calculates the bitmask of ports still members of the bridge
+  being left
+
+- port_stp_update: bridge layer function invoked when a given switch port STP
+  state is computed by the bridge layer and should be propagated to switch
+  hardware to forward/block/learn traffic. The switch driver is responsible for
+  computing a STP state change based on current and asked parameters and perform
+  the relevant ageing based on the intersection results
+
+Bridge VLAN filtering
+---------------------
+
+- port_pvid_get: bridge layer function invoked when a Port-based VLAN ID is
+  queried for the given switch port
+
+- port_pvid_set: bridge layer function invoked when a Port-based VLAN ID needs
+  to be configured on the given switch port
+
+- port_vlan_add: bridge layer function invoked when a VLAN is configured
+  (tagged or untagged) for the given switch port
+
+- port_vlan_del: bridge layer function invoked when a VLAN is removed from the
+  given switch port
+
+- vlan_getnext: bridge layer function invoked to query the next configured VLAN
+  in the switch, i.e. returns the bitmaps of members and untagged ports
+
+- port_fdb_add: bridge layer function invoked when the bridge wants to install a
+  Forwarding Database entry, the switch hardware should be programmed with the
+  specified address in the specified VLAN Id in the forwarding database
+  associated with this VLAN ID
+
+Note: VLAN ID 0 corresponds to the port private database, which, in the context
+of DSA, would be the its port-based VLAN, used by the associated bridge device.
+
+- port_fdb_del: bridge layer function invoked when the bridge wants to remove a
+  Forwarding Database entry, the switch hardware should be programmed to delete
+  the specified MAC address from the specified VLAN ID if it was mapped into
+  this port forwarding database
+
+TODO
+====
+
+The platform device problem
+---------------------------
+DSA is currently implemented as a platform device driver which is far from ideal
+as was discussed in this thread:
+
+http://permalink.gmane.org/gmane.linux.network/329848
+
+This basically prevents the device driver model to be properly used and applied,
+and support non-MDIO, non-MMIO Ethernet connected switches.
+
+Another problem with the platform device driver approach is that it prevents the
+use of a modular switch drivers build due to a circular dependency, illustrated
+here:
+
+http://comments.gmane.org/gmane.linux.network/345803
+
+Attempts of reworking this has been done here:
+
+https://lwn.net/Articles/643149/
+
+Making SWITCHDEV and DSA converge towards an unified codebase
+-------------------------------------------------------------
+
+SWITCHDEV properly takes care of abstracting the networking stack with offload
+capable hardware, but does not enforce a strict switch device driver model. On
+the other DSA enforces a fairly strict device driver model, and deals with most
+of the switch specific. At some point we should envision a merger between these
+two subsystems and get the best of both worlds.
+
+Other hanging fruits
+--------------------
+
+- making the number of ports fully dynamic and not dependent on DSA_MAX_PORTS
+- allowing more than one CPU/management interface:
+  http://comments.gmane.org/gmane.linux.network/365657
+- porting more drivers from other vendors:
+  http://comments.gmane.org/gmane.linux.network/365510
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 5fae770..ebe94f2 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -586,6 +586,21 @@
 	if available window is too small.
 	Default: 2
 
+tcp_pacing_ss_ratio - INTEGER
+	sk->sk_pacing_rate is set by TCP stack using a ratio applied
+	to current rate. (current_rate = cwnd * mss / srtt)
+	If TCP is in slow start, tcp_pacing_ss_ratio is applied
+	to let TCP probe for bigger speeds, assuming cwnd can be
+	doubled every other RTT.
+	Default: 200
+
+tcp_pacing_ca_ratio - INTEGER
+	sk->sk_pacing_rate is set by TCP stack using a ratio applied
+	to current rate. (current_rate = cwnd * mss / srtt)
+	If TCP is in congestion avoidance phase, tcp_pacing_ca_ratio
+	is applied to conservatively probe for bigger throughput.
+	Default: 120
+
 tcp_tso_win_divisor - INTEGER
 	This allows control over what percentage of the congestion window
 	can be consumed by a single TSO frame.
@@ -1181,6 +1196,16 @@
 	Allows you to write a number, which can be used as required.
 	Default value is 0.
 
+xfrm4_gc_thresh - INTEGER
+	The threshold at which we will start garbage collecting for IPv4
+	destination cache entries.  At twice this value the system will
+	refuse new allocations.
+
+igmp_link_local_mcast_reports - BOOLEAN
+	Enable IGMP reports for link local multicast groups in the
+	224.0.0.X range.
+	Default TRUE
+
 Alexey Kuznetsov.
 kuznet@ms2.inr.ac.ru
 
@@ -1215,14 +1240,20 @@
 	FALSE: disabled
 	Default: TRUE
 
-auto_flowlabels - BOOLEAN
-	Automatically generate flow labels based based on a flow hash
-	of the packet. This allows intermediate devices, such as routers,
-	to idenfify packet flows for mechanisms like Equal Cost Multipath
+auto_flowlabels - INTEGER
+	Automatically generate flow labels based on a flow hash of the
+	packet. This allows intermediate devices, such as routers, to
+	identify packet flows for mechanisms like Equal Cost Multipath
 	Routing (see RFC 6438).
-	TRUE: enabled
-	FALSE: disabled
-	Default: false
+	0: automatic flow labels are completely disabled
+	1: automatic flow labels are enabled by default, they can be
+	   disabled on a per socket basis using the IPV6_AUTOFLOWLABEL
+	   socket option
+	2: automatic flow labels are allowed, they may be enabled on a
+	   per socket basis using the IPV6_AUTOFLOWLABEL socket option
+	3: automatic flow labels are enabled and enforced, they cannot
+	   be disabled by the socket option
+	Default: 1
 
 flowlabel_state_ranges - BOOLEAN
 	Split the flow label number space into two ranges. 0-0x7FFFF is
@@ -1340,6 +1371,14 @@
 	   disabled if accept_ra_from_local is disabled
                on a specific interface.
 
+accept_ra_min_hop_limit - INTEGER
+	Minimum hop limit Information in Router Advertisement.
+
+	Hop limit Information in Router Advertisement less than this
+	variable shall be ignored.
+
+	Default: 1
+
 accept_ra_pinfo - BOOLEAN
 	Learn Prefix Information in Router Advertisement.
 
@@ -1435,6 +1474,11 @@
 	Default Maximum Transfer Unit
 	Default: 1280 (IPv6 required minimum)
 
+ip_nonlocal_bind - BOOLEAN
+	If set, allows processes to bind() to non-local IPv6 addresses,
+	which can be quite useful - but may break some applications.
+	Default: 0
+
 router_probe_interval - INTEGER
 	Minimum interval (in seconds) between Router Probing described
 	in RFC4191.
@@ -1455,6 +1499,13 @@
 	routers are present.
 	Default: 3
 
+use_oif_addrs_only - BOOLEAN
+	When enabled, the candidate source addresses for destinations
+	routed via this interface are restricted to the set of addresses
+	configured on this interface (vis. RFC 6724, section 4).
+
+	Default: false
+
 use_tempaddr - INTEGER
 	Preference for Privacy Extensions (RFC3041).
 	  <= 0 : disable Privacy Extensions
@@ -1591,6 +1642,11 @@
 	otherwise the minimal space between responses in milliseconds.
 	Default: 1000
 
+xfrm6_gc_thresh - INTEGER
+	The threshold at which we will start garbage collecting for IPv6
+	destination cache entries.  At twice this value the system will
+	refuse new allocations.
+
 
 IPv6 Update by:
 Pekka Savola <pekkas@netcore.fi>
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index e655e24..d64a147 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -135,12 +135,8 @@
 	int maxmtu;
 	void (*fix_mac_speed)(void *priv, unsigned int speed);
 	void (*bus_setup)(void __iomem *ioaddr);
-	void *(*setup)(struct platform_device *pdev);
-	void (*free)(struct platform_device *pdev, void *priv);
 	int (*init)(struct platform_device *pdev, void *priv);
 	void (*exit)(struct platform_device *pdev, void *priv);
-	void *custom_cfg;
-	void *custom_data;
 	void *bsp_priv;
 };
 
@@ -179,15 +175,11 @@
  o bus_setup: perform HW setup of the bus. For example, on some ST platforms
 	     this field is used to configure the AMBA  bridge to generate more
 	     efficient STBus traffic.
- o setup/init/exit: callbacks used for calling a custom initialization;
+ o init/exit: callbacks used for calling a custom initialization;
 	     this is sometime necessary on some platforms (e.g. ST boxes)
 	     where the HW needs to have set some PIO lines or system cfg
-	     registers. setup should return a pointer to private data,
-	     which will be stored in bsp_priv, and then passed to init and
-	     exit callbacks. init/exit callbacks should not use or modify
+	     registers.  init/exit callbacks should not use or modify
 	     platform data.
- o custom_cfg/custom_data: this is a custom configuration that can be passed
-			   while initializing the resources.
  o bsp_priv: another private pointer.
 
 For MDIO bus The we have:
@@ -262,7 +254,7 @@
 
 During the board's device_init we can configure the first
 MAC for fixed_link by calling:
-  fixed_phy_add(PHY_POLL, 1, &stmmac0_fixed_phy_status));)
+  fixed_phy_add(PHY_POLL, 1, &stmmac0_fixed_phy_status, -1);
 and the second one, with a real PHY device attached to the bus,
 by using the stmmac_mdio_bus_data structure (to provide the id, the
 reset procedure etc).
@@ -278,8 +270,6 @@
 Please see the following document:
 	Documentation/devicetree/bindings/net/stmmac.txt
 
-and the stmmac_of_data structure inside the include/linux/stmmac.h header file.
-
 4.11) This is a summary of the content of some relevant files:
  o stmmac_main.c: to implement the main network device driver;
  o stmmac_mdio.c: to provide mdio functions;
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt
index c5d7ade..476df04 100644
--- a/Documentation/networking/switchdev.txt
+++ b/Documentation/networking/switchdev.txt
@@ -279,8 +279,18 @@
 current STP state.  The switch driver, knowing which ports are within which
 vlan L2 domain, can program the switch device for flooding.  The packet should
 also be sent to the port netdev for processing by the bridge driver.  The
-bridge should not reflood the packet to the same ports the device flooded.
-XXX: the mechanism to avoid duplicate flood packets is being discuseed.
+bridge should not reflood the packet to the same ports the device flooded,
+otherwise there will be duplicate packets on the wire.
+
+To avoid duplicate packets, the device/driver should mark a packet as already
+forwarded using skb->offload_fwd_mark.  The same mark is set on the device
+ports in the domain using dev->offload_fwd_mark.  If the skb->offload_fwd_mark
+is non-zero and matches the forwarding egress port's dev->skb_mark, the kernel
+will drop the skb right before transmit on the egress port, with the
+understanding that the device already forwarded the packet on same egress port.
+The driver can use switchdev_port_fwd_mark_set() to set a globally unique mark
+for port's dev->offload_fwd_mark, based on the port's parent ID (switch ID) and
+a group ifindex.
 
 It is possible for the switch device to not handle flooding and push the
 packets up to the bridge driver for flooding.  This is not ideal as the number
@@ -357,4 +367,5 @@
 
 The driver can monitor for updates to arp_tbl using the netevent notifier
 NETEVENT_NEIGH_UPDATE.  The device can be programmed with resolved nexthops
-for the routes as arp_tbl updates.
+for the routes as arp_tbl updates.  The driver implements ndo_neigh_destroy
+to know when arp_tbl neighbor entries are purged from the port.
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 5f09226..a977339 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -359,6 +359,13 @@
 supported, the driver may time stamp more than just the requested types
 of packets.
 
+Drivers are free to use a more permissive configuration than the requested
+configuration. It is expected that drivers should only implement directly the
+most generic mode that can be supported. For example if the hardware can
+support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale
+HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT
+is more generic (and more useful to applications).
+
 A driver which supports hardware time stamping shall update the struct
 with the actual, possibly more permissive configuration. If the
 requested packets cannot be time stamped, then nothing should be
diff --git a/Documentation/networking/vxlan.txt b/Documentation/networking/vxlan.txt
index 6d99351..c28f498 100644
--- a/Documentation/networking/vxlan.txt
+++ b/Documentation/networking/vxlan.txt
@@ -1,32 +1,36 @@
 Virtual eXtensible Local Area Networking documentation
 ======================================================
 
-The VXLAN protocol is a tunnelling protocol that is designed to
-solve the problem of limited number of available VLAN's (4096).
-With VXLAN identifier is expanded to 24 bits.
+The VXLAN protocol is a tunnelling protocol designed to solve the
+problem of limited VLAN IDs (4096) in IEEE 802.1q.  With VXLAN the
+size of the identifier is expanded to 24 bits (16777216).
 
-It is a draft RFC standard, that is implemented by Cisco Nexus,
-Vmware and Brocade. The protocol runs over UDP using a single
-destination port (still not standardized by IANA).
-This document describes the Linux kernel tunnel device,
-there is also an implantation of VXLAN for Openvswitch.
+VXLAN is described by IETF RFC 7348, and has been implemented by a
+number of vendors.  The protocol runs over UDP using a single
+destination port.  This document describes the Linux kernel tunnel
+device, there is also a separate implementation of VXLAN for
+Openvswitch.
 
-Unlike most tunnels, a VXLAN is a 1 to N network, not just point
-to point. A VXLAN device can either dynamically learn the IP address
-of the other end, in a manner similar to a learning bridge, or the
-forwarding entries can be configured statically.
+Unlike most tunnels, a VXLAN is a 1 to N network, not just point to
+point. A VXLAN device can learn the IP address of the other endpoint
+either dynamically in a manner similar to a learning bridge, or make
+use of statically-configured forwarding entries.
 
-The management of vxlan is done in a similar fashion to it's
-too closest neighbors GRE and VLAN. Configuring VXLAN requires
-the version of iproute2 that matches the kernel release
-where VXLAN was first merged upstream.
+The management of vxlan is done in a manner similar to its two closest
+neighbors GRE and VLAN. Configuring VXLAN requires the version of
+iproute2 that matches the kernel release where VXLAN was first merged
+upstream.
 
 1. Create vxlan device
-  # ip li add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1
+ # ip link add vxlan0 type vxlan id 42 group 239.1.1.1 dev eth1 dstport 4789
 
-This creates a new device (vxlan0). The device uses the
-the multicast group 239.1.1.1 over eth1 to handle packets where
-no entry is in the forwarding table.
+This creates a new device named vxlan0.  The device uses the multicast
+group 239.1.1.1 over eth1 to handle traffic for which there is no
+entry in the forwarding table.  The destination port number is set to
+the IANA-assigned value of 4789.  The Linux implementation of VXLAN
+pre-dates the IANA's selection of a standard destination port number
+and uses the Linux-selected value by default to maintain backwards
+compatibility.
 
 2. Delete vxlan device
   # ip link delete vxlan0
diff --git a/Documentation/nvmem/nvmem.txt b/Documentation/nvmem/nvmem.txt
new file mode 100644
index 0000000..dbd40d8
--- /dev/null
+++ b/Documentation/nvmem/nvmem.txt
@@ -0,0 +1,152 @@
+			    NVMEM SUBSYSTEM
+	  Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+This document explains the NVMEM Framework along with the APIs provided,
+and how to use it.
+
+1. Introduction
+===============
+*NVMEM* is the abbreviation for Non Volatile Memory layer. It is used to
+retrieve configuration of SOC or Device specific data from non volatile
+memories like eeprom, efuses and so on.
+
+Before this framework existed, NVMEM drivers like eeprom were stored in
+drivers/misc, where they all had to duplicate pretty much the same code to
+register a sysfs file, allow in-kernel users to access the content of the
+devices they were driving, etc.
+
+This was also a problem as far as other in-kernel users were involved, since
+the solutions used were pretty much different from one driver to another, there
+was a rather big abstraction leak.
+
+This framework aims at solve these problems. It also introduces DT
+representation for consumer devices to go get the data they require (MAC
+Addresses, SoC/Revision ID, part numbers, and so on) from the NVMEMs. This
+framework is based on regmap, so that most of the abstraction available in
+regmap can be reused, across multiple types of buses.
+
+NVMEM Providers
++++++++++++++++
+
+NVMEM provider refers to an entity that implements methods to initialize, read
+and write the non-volatile memory.
+
+2. Registering/Unregistering the NVMEM provider
+===============================================
+
+A NVMEM provider can register with NVMEM core by supplying relevant
+nvmem configuration to nvmem_register(), on success core would return a valid
+nvmem_device pointer.
+
+nvmem_unregister(nvmem) is used to unregister a previously registered provider.
+
+For example, a simple qfprom case:
+
+static struct nvmem_config econfig = {
+	.name = "qfprom",
+	.owner = THIS_MODULE,
+};
+
+static int qfprom_probe(struct platform_device *pdev)
+{
+	...
+	econfig.dev = &pdev->dev;
+	nvmem = nvmem_register(&econfig);
+	...
+}
+
+It is mandatory that the NVMEM provider has a regmap associated with its
+struct device. Failure to do would return error code from nvmem_register().
+
+NVMEM Consumers
++++++++++++++++
+
+NVMEM consumers are the entities which make use of the NVMEM provider to
+read from and to NVMEM.
+
+3. NVMEM cell based consumer APIs
+=================================
+
+NVMEM cells are the data entries/fields in the NVMEM.
+The NVMEM framework provides 3 APIs to read/write NVMEM cells.
+
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name);
+struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name);
+
+void nvmem_cell_put(struct nvmem_cell *cell);
+void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
+
+void *nvmem_cell_read(struct nvmem_cell *cell, ssize_t *len);
+int nvmem_cell_write(struct nvmem_cell *cell, void *buf, ssize_t len);
+
+*nvmem_cell_get() apis will get a reference to nvmem cell for a given id,
+and nvmem_cell_read/write() can then read or write to the cell.
+Once the usage of the cell is finished the consumer should call *nvmem_cell_put()
+to free all the allocation memory for the cell.
+
+4. Direct NVMEM device based consumer APIs
+==========================================
+
+In some instances it is necessary to directly read/write the NVMEM.
+To facilitate such consumers NVMEM framework provides below apis.
+
+struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
+struct nvmem_device *devm_nvmem_device_get(struct device *dev,
+					   const char *name);
+void nvmem_device_put(struct nvmem_device *nvmem);
+int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset,
+		      size_t bytes, void *buf);
+int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset,
+		       size_t bytes, void *buf);
+int nvmem_device_cell_read(struct nvmem_device *nvmem,
+			   struct nvmem_cell_info *info, void *buf);
+int nvmem_device_cell_write(struct nvmem_device *nvmem,
+			    struct nvmem_cell_info *info, void *buf);
+
+Before the consumers can read/write NVMEM directly, it should get hold
+of nvmem_controller from one of the *nvmem_device_get() api.
+
+The difference between these apis and cell based apis is that these apis always
+take nvmem_device as parameter.
+
+5. Releasing a reference to the NVMEM
+=====================================
+
+When a consumers no longer needs the NVMEM, it has to release the reference
+to the NVMEM it has obtained using the APIs mentioned in the above section.
+The NVMEM framework provides 2 APIs to release a reference to the NVMEM.
+
+void nvmem_cell_put(struct nvmem_cell *cell);
+void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
+void nvmem_device_put(struct nvmem_device *nvmem);
+void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem);
+
+Both these APIs are used to release a reference to the NVMEM and
+devm_nvmem_cell_put and devm_nvmem_device_put destroys the devres associated
+with this NVMEM.
+
+Userspace
++++++++++
+
+6. Userspace binary interface
+==============================
+
+Userspace can read/write the raw NVMEM file located at
+/sys/bus/nvmem/devices/*/nvmem
+
+ex:
+
+hexdump /sys/bus/nvmem/devices/qfprom0/nvmem
+
+0000000 0000 0000 0000 0000 0000 0000 0000 0000
+*
+00000a0 db10 2240 0000 e000 0c00 0c00 0000 0c00
+0000000 0000 0000 0000 0000 0000 0000 0000 0000
+...
+*
+0001000
+
+7. DeviceTree Binding
+=====================
+
+See Documentation/devicetree/bindings/nvmem/nvmem.txt
diff --git a/Documentation/pcmcia/locking.txt b/Documentation/pcmcia/locking.txt
index 68f622b..b2c9b47 100644
--- a/Documentation/pcmcia/locking.txt
+++ b/Documentation/pcmcia/locking.txt
@@ -96,7 +96,7 @@
 3. Per PCMCIA-device Data:
 --------------------------
 
-The "main" struct pcmcia_devie is protected as follows (read-only fields
+The "main" struct pcmcia_device is protected as follows (read-only fields
 or single-use fields not mentioned):
 
 
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index d172bce..8ba6625 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -341,6 +341,13 @@
 	and is entirely responsible for bringing the device back to the
 	functional state as appropriate.
 
+	Note that this direct-complete procedure applies even if the device is
+	disabled for runtime PM; only the runtime-PM status matters.  It follows
+	that if a device has system-sleep callbacks but does not support runtime
+	PM, then its prepare callback must never return a positive value.  This
+	is because all devices are initially set to runtime-suspended with
+	runtime PM disabled.
+
     2.	The suspend methods should quiesce the device to stop it from performing
 	I/O.  They also may save the device registers and put it into the
 	appropriate low-power state, depending on the bus type the device is on,
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index e76dc0a..0784bc3 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -445,10 +445,6 @@
   bool pm_runtime_status_suspended(struct device *dev);
     - return true if the device's runtime PM status is 'suspended'
 
-  bool pm_runtime_suspended_if_enabled(struct device *dev);
-    - return true if the device's runtime PM status is 'suspended' and its
-      'power.disable_depth' field is equal to 1
-
   void pm_runtime_allow(struct device *dev);
     - set the power.runtime_auto flag for the device and decrease its usage
       counter (used by the /sys/devices/.../power/control interface to
diff --git a/Documentation/power/suspend-and-cpuhotplug.txt b/Documentation/power/suspend-and-cpuhotplug.txt
index 2850df3..2fc9095 100644
--- a/Documentation/power/suspend-and-cpuhotplug.txt
+++ b/Documentation/power/suspend-and-cpuhotplug.txt
@@ -72,7 +72,7 @@
                                         |
                                         v
                            Disable regular cpu hotplug
-                        by setting cpu_hotplug_disabled=1
+                        by increasing cpu_hotplug_disabled
                                         |
                                         v
                             Release cpu_add_remove_lock
@@ -89,7 +89,7 @@
 execution during resume):
 * enable_nonboot_cpus() which involves:
    |  Acquire cpu_add_remove_lock
-   |  Reset cpu_hotplug_disabled to 0, thereby enabling regular cpu hotplug
+   |  Decrease cpu_hotplug_disabled, thereby enabling regular cpu hotplug
    |  Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop]
    |  Release cpu_add_remove_lock
    v
@@ -120,7 +120,7 @@
                            Acquire cpu_add_remove_lock
                                         |
                                         v
-                          If cpu_hotplug_disabled is 1
+                          If cpu_hotplug_disabled > 0
                                 return gracefully
                                         |
                                         |
diff --git a/Documentation/powerpc/cxl.txt b/Documentation/powerpc/cxl.txt
index 2a230d01..205c1b8 100644
--- a/Documentation/powerpc/cxl.txt
+++ b/Documentation/powerpc/cxl.txt
@@ -133,7 +133,7 @@
     The following file operations are supported on both slave and
     master devices.
 
-    A userspace library libcxl is avaliable here:
+    A userspace library libcxl is available here:
 	https://github.com/ibm-capi/libcxl
     This provides a C interface to this kernel API.
 
diff --git a/Documentation/powerpc/cxlflash.txt b/Documentation/powerpc/cxlflash.txt
new file mode 100644
index 0000000..4202d1b
--- /dev/null
+++ b/Documentation/powerpc/cxlflash.txt
@@ -0,0 +1,318 @@
+Introduction
+============
+
+    The IBM Power architecture provides support for CAPI (Coherent
+    Accelerator Power Interface), which is available to certain PCIe slots
+    on Power 8 systems. CAPI can be thought of as a special tunneling
+    protocol through PCIe that allow PCIe adapters to look like special
+    purpose co-processors which can read or write an application's
+    memory and generate page faults. As a result, the host interface to
+    an adapter running in CAPI mode does not require the data buffers to
+    be mapped to the device's memory (IOMMU bypass) nor does it require
+    memory to be pinned.
+
+    On Linux, Coherent Accelerator (CXL) kernel services present CAPI
+    devices as a PCI device by implementing a virtual PCI host bridge.
+    This abstraction simplifies the infrastructure and programming
+    model, allowing for drivers to look similar to other native PCI
+    device drivers.
+
+    CXL provides a mechanism by which user space applications can
+    directly talk to a device (network or storage) bypassing the typical
+    kernel/device driver stack. The CXL Flash Adapter Driver enables a
+    user space application direct access to Flash storage.
+
+    The CXL Flash Adapter Driver is a kernel module that sits in the
+    SCSI stack as a low level device driver (below the SCSI disk and
+    protocol drivers) for the IBM CXL Flash Adapter. This driver is
+    responsible for the initialization of the adapter, setting up the
+    special path for user space access, and performing error recovery. It
+    communicates directly the Flash Accelerator Functional Unit (AFU)
+    as described in Documentation/powerpc/cxl.txt.
+
+    The cxlflash driver supports two, mutually exclusive, modes of
+    operation at the device (LUN) level:
+
+        - Any flash device (LUN) can be configured to be accessed as a
+          regular disk device (i.e.: /dev/sdc). This is the default mode.
+
+        - Any flash device (LUN) can be configured to be accessed from
+          user space with a special block library. This mode further
+          specifies the means of accessing the device and provides for
+          either raw access to the entire LUN (referred to as direct
+          or physical LUN access) or access to a kernel/AFU-mediated
+          partition of the LUN (referred to as virtual LUN access). The
+          segmentation of a disk device into virtual LUNs is assisted
+          by special translation services provided by the Flash AFU.
+
+Overview
+========
+
+    The Coherent Accelerator Interface Architecture (CAIA) introduces a
+    concept of a master context. A master typically has special privileges
+    granted to it by the kernel or hypervisor allowing it to perform AFU
+    wide management and control. The master may or may not be involved
+    directly in each user I/O, but at the minimum is involved in the
+    initial setup before the user application is allowed to send requests
+    directly to the AFU.
+
+    The CXL Flash Adapter Driver establishes a master context with the
+    AFU. It uses memory mapped I/O (MMIO) for this control and setup. The
+    Adapter Problem Space Memory Map looks like this:
+
+                     +-------------------------------+
+                     |    512 * 64 KB User MMIO      |
+                     |        (per context)          |
+                     |       User Accessible         |
+                     +-------------------------------+
+                     |    512 * 128 B per context    |
+                     |    Provisioning and Control   |
+                     |   Trusted Process accessible  |
+                     +-------------------------------+
+                     |         64 KB Global          |
+                     |   Trusted Process accessible  |
+                     +-------------------------------+
+
+    This driver configures itself into the SCSI software stack as an
+    adapter driver. The driver is the only entity that is considered a
+    Trusted Process to program the Provisioning and Control and Global
+    areas in the MMIO Space shown above.  The master context driver
+    discovers all LUNs attached to the CXL Flash adapter and instantiates
+    scsi block devices (/dev/sdb, /dev/sdc etc.) for each unique LUN
+    seen from each path.
+
+    Once these scsi block devices are instantiated, an application
+    written to a specification provided by the block library may get
+    access to the Flash from user space (without requiring a system call).
+
+    This master context driver also provides a series of ioctls for this
+    block library to enable this user space access.  The driver supports
+    two modes for accessing the block device.
+
+    The first mode is called a virtual mode. In this mode a single scsi
+    block device (/dev/sdb) may be carved up into any number of distinct
+    virtual LUNs. The virtual LUNs may be resized as long as the sum of
+    the sizes of all the virtual LUNs, along with the meta-data associated
+    with it does not exceed the physical capacity.
+
+    The second mode is called the physical mode. In this mode a single
+    block device (/dev/sdb) may be opened directly by the block library
+    and the entire space for the LUN is available to the application.
+
+    Only the physical mode provides persistence of the data.  i.e. The
+    data written to the block device will survive application exit and
+    restart and also reboot. The virtual LUNs do not persist (i.e. do
+    not survive after the application terminates or the system reboots).
+
+
+Block library API
+=================
+
+    Applications intending to get access to the CXL Flash from user
+    space should use the block library, as it abstracts the details of
+    interfacing directly with the cxlflash driver that are necessary for
+    performing administrative actions (i.e.: setup, tear down, resize).
+    The block library can be thought of as a 'user' of services,
+    implemented as IOCTLs, that are provided by the cxlflash driver
+    specifically for devices (LUNs) operating in user space access
+    mode. While it is not a requirement that applications understand
+    the interface between the block library and the cxlflash driver,
+    a high-level overview of each supported service (IOCTL) is provided
+    below.
+
+    The block library can be found on GitHub:
+    http://www.github.com/mikehollinger/ibmcapikv
+
+
+CXL Flash Driver IOCTLs
+=======================
+
+    Users, such as the block library, that wish to interface with a flash
+    device (LUN) via user space access need to use the services provided
+    by the cxlflash driver. As these services are implemented as ioctls,
+    a file descriptor handle must first be obtained in order to establish
+    the communication channel between a user and the kernel.  This file
+    descriptor is obtained by opening the device special file associated
+    with the scsi disk device (/dev/sdb) that was created during LUN
+    discovery. As per the location of the cxlflash driver within the
+    SCSI protocol stack, this open is actually not seen by the cxlflash
+    driver. Upon successful open, the user receives a file descriptor
+    (herein referred to as fd1) that should be used for issuing the
+    subsequent ioctls listed below.
+
+    The structure definitions for these IOCTLs are available in:
+    uapi/scsi/cxlflash_ioctl.h
+
+DK_CXLFLASH_ATTACH
+------------------
+
+    This ioctl obtains, initializes, and starts a context using the CXL
+    kernel services. These services specify a context id (u16) by which
+    to uniquely identify the context and its allocated resources. The
+    services additionally provide a second file descriptor (herein
+    referred to as fd2) that is used by the block library to initiate
+    memory mapped I/O (via mmap()) to the CXL flash device and poll for
+    completion events. This file descriptor is intentionally installed by
+    this driver and not the CXL kernel services to allow for intermediary
+    notification and access in the event of a non-user-initiated close(),
+    such as a killed process. This design point is described in further
+    detail in the description for the DK_CXLFLASH_DETACH ioctl.
+
+    There are a few important aspects regarding the "tokens" (context id
+    and fd2) that are provided back to the user:
+
+        - These tokens are only valid for the process under which they
+          were created. The child of a forked process cannot continue
+          to use the context id or file descriptor created by its parent
+          (see DK_CXLFLASH_VLUN_CLONE for further details).
+
+        - These tokens are only valid for the lifetime of the context and
+          the process under which they were created. Once either is
+          destroyed, the tokens are to be considered stale and subsequent
+          usage will result in errors.
+
+        - When a context is no longer needed, the user shall detach from
+          the context via the DK_CXLFLASH_DETACH ioctl.
+
+        - A close on fd2 will invalidate the tokens. This operation is not
+          required by the user.
+
+DK_CXLFLASH_USER_DIRECT
+-----------------------
+    This ioctl is responsible for transitioning the LUN to direct
+    (physical) mode access and configuring the AFU for direct access from
+    user space on a per-context basis. Additionally, the block size and
+    last logical block address (LBA) are returned to the user.
+
+    As mentioned previously, when operating in user space access mode,
+    LUNs may be accessed in whole or in part. Only one mode is allowed
+    at a time and if one mode is active (outstanding references exist),
+    requests to use the LUN in a different mode are denied.
+
+    The AFU is configured for direct access from user space by adding an
+    entry to the AFU's resource handle table. The index of the entry is
+    treated as a resource handle that is returned to the user. The user
+    is then able to use the handle to reference the LUN during I/O.
+
+DK_CXLFLASH_USER_VIRTUAL
+------------------------
+    This ioctl is responsible for transitioning the LUN to virtual mode
+    of access and configuring the AFU for virtual access from user space
+    on a per-context basis. Additionally, the block size and last logical
+    block address (LBA) are returned to the user.
+
+    As mentioned previously, when operating in user space access mode,
+    LUNs may be accessed in whole or in part. Only one mode is allowed
+    at a time and if one mode is active (outstanding references exist),
+    requests to use the LUN in a different mode are denied.
+
+    The AFU is configured for virtual access from user space by adding
+    an entry to the AFU's resource handle table. The index of the entry
+    is treated as a resource handle that is returned to the user. The
+    user is then able to use the handle to reference the LUN during I/O.
+
+    By default, the virtual LUN is created with a size of 0. The user
+    would need to use the DK_CXLFLASH_VLUN_RESIZE ioctl to adjust the grow
+    the virtual LUN to a desired size. To avoid having to perform this
+    resize for the initial creation of the virtual LUN, the user has the
+    option of specifying a size as part of the DK_CXLFLASH_USER_VIRTUAL
+    ioctl, such that when success is returned to the user, the
+    resource handle that is provided is already referencing provisioned
+    storage. This is reflected by the last LBA being a non-zero value.
+
+DK_CXLFLASH_VLUN_RESIZE
+-----------------------
+    This ioctl is responsible for resizing a previously created virtual
+    LUN and will fail if invoked upon a LUN that is not in virtual
+    mode. Upon success, an updated last LBA is returned to the user
+    indicating the new size of the virtual LUN associated with the
+    resource handle.
+
+    The partitioning of virtual LUNs is jointly mediated by the cxlflash
+    driver and the AFU. An allocation table is kept for each LUN that is
+    operating in the virtual mode and used to program a LUN translation
+    table that the AFU references when provided with a resource handle.
+
+DK_CXLFLASH_RELEASE
+-------------------
+    This ioctl is responsible for releasing a previously obtained
+    reference to either a physical or virtual LUN. This can be
+    thought of as the inverse of the DK_CXLFLASH_USER_DIRECT or
+    DK_CXLFLASH_USER_VIRTUAL ioctls. Upon success, the resource handle
+    is no longer valid and the entry in the resource handle table is
+    made available to be used again.
+
+    As part of the release process for virtual LUNs, the virtual LUN
+    is first resized to 0 to clear out and free the translation tables
+    associated with the virtual LUN reference.
+
+DK_CXLFLASH_DETACH
+------------------
+    This ioctl is responsible for unregistering a context with the
+    cxlflash driver and release outstanding resources that were
+    not explicitly released via the DK_CXLFLASH_RELEASE ioctl. Upon
+    success, all "tokens" which had been provided to the user from the
+    DK_CXLFLASH_ATTACH onward are no longer valid.
+
+DK_CXLFLASH_VLUN_CLONE
+----------------------
+    This ioctl is responsible for cloning a previously created
+    context to a more recently created context. It exists solely to
+    support maintaining user space access to storage after a process
+    forks. Upon success, the child process (which invoked the ioctl)
+    will have access to the same LUNs via the same resource handle(s)
+    and fd2 as the parent, but under a different context.
+
+    Context sharing across processes is not supported with CXL and
+    therefore each fork must be met with establishing a new context
+    for the child process. This ioctl simplifies the state management
+    and playback required by a user in such a scenario. When a process
+    forks, child process can clone the parents context by first creating
+    a context (via DK_CXLFLASH_ATTACH) and then using this ioctl to
+    perform the clone from the parent to the child.
+
+    The clone itself is fairly simple. The resource handle and lun
+    translation tables are copied from the parent context to the child's
+    and then synced with the AFU.
+
+DK_CXLFLASH_VERIFY
+------------------
+    This ioctl is used to detect various changes such as the capacity of
+    the disk changing, the number of LUNs visible changing, etc. In cases
+    where the changes affect the application (such as a LUN resize), the
+    cxlflash driver will report the changed state to the application.
+
+    The user calls in when they want to validate that a LUN hasn't been
+    changed in response to a check condition. As the user is operating out
+    of band from the kernel, they will see these types of events without
+    the kernel's knowledge. When encountered, the user's architected
+    behavior is to call in to this ioctl, indicating what they want to
+    verify and passing along any appropriate information. For now, only
+    verifying a LUN change (ie: size different) with sense data is
+    supported.
+
+DK_CXLFLASH_RECOVER_AFU
+-----------------------
+    This ioctl is used to drive recovery (if such an action is warranted)
+    of a specified user context. Any state associated with the user context
+    is re-established upon successful recovery.
+
+    User contexts are put into an error condition when the device needs to
+    be reset or is terminating. Users are notified of this error condition
+    by seeing all 0xF's on an MMIO read. Upon encountering this, the
+    architected behavior for a user is to call into this ioctl to recover
+    their context. A user may also call into this ioctl at any time to
+    check if the device is operating normally. If a failure is returned
+    from this ioctl, the user is expected to gracefully clean up their
+    context via release/detach ioctls. Until they do, the context they
+    hold is not relinquished. The user may also optionally exit the process
+    at which time the context/resources they held will be freed as part of
+    the release fop.
+
+DK_CXLFLASH_MANAGE_LUN
+----------------------
+    This ioctl is used to switch a LUN from a mode where it is available
+    for file-system access (legacy), to a mode where it is set aside for
+    exclusive user space access (superpipe). In case a LUN is visible
+    across multiple ports and adapters, this ioctl is used to uniquely
+    identify each LUN by its World Wide Node Name (WWNN).
diff --git a/Documentation/powerpc/dscr.txt b/Documentation/powerpc/dscr.txt
index 1ff4400..ece300c 100644
--- a/Documentation/powerpc/dscr.txt
+++ b/Documentation/powerpc/dscr.txt
@@ -4,7 +4,7 @@
 DSCR register in powerpc allows user to have some control of prefetch of data
 stream in the processor. Please refer to the ISA documents or related manual
 for more detailed information regarding how to use this DSCR to attain this
-control of the pefetches . This document here provides an overview of kernel
+control of the prefetches . This document here provides an overview of kernel
 support for DSCR, related kernel objects, it's functionalities and exported
 user interface.
 
@@ -44,7 +44,7 @@
 	value into every CPU's DSCR register right away and updates the current
 	thread's DSCR value as well.
 
-	Changing the CPU specif DSCR default value in the sysfs does exactly
+	Changing the CPU specific DSCR default value in the sysfs does exactly
 	the same thing as above but unlike the global one above, it just changes
 	stuff for that particular CPU instead for all the CPUs on the system.
 
@@ -62,7 +62,7 @@
 
 	Accessing DSCR through user level SPR (0x03) from user space will first
 	create a facility unavailable exception. Inside this exception handler
-	all mfspr isntruction based read attempts will get emulated and returned
+	all mfspr instruction based read attempts will get emulated and returned
 	where as the first mtspr instruction based write attempts will enable
 	the DSCR facility for the next time around (both for read and write) by
 	setting DSCR facility in the FSCR register.
diff --git a/Documentation/powerpc/qe_firmware.txt b/Documentation/powerpc/qe_firmware.txt
index 2031ddb..e7ac24a 100644
--- a/Documentation/powerpc/qe_firmware.txt
+++ b/Documentation/powerpc/qe_firmware.txt
@@ -117,7 +117,7 @@
 Extended Modes
 
 This is a double word bit array (64 bits) that defines special functionality
-which has an impact on the softwarew drivers.  Each bit has its own impact
+which has an impact on the software drivers.  Each bit has its own impact
 and has special instructions for the s/w associated with it.  This structure is
 described in this table:
 
diff --git a/Documentation/pps/pps.txt b/Documentation/pps/pps.txt
index c508cce..7cb7264 100644
--- a/Documentation/pps/pps.txt
+++ b/Documentation/pps/pps.txt
@@ -125,7 +125,7 @@
 (pps_ktimer_echo(), passing to it the "ptr" pointer) if the user
 asked for that... etc..
 
-Please see the file drivers/pps/clients/ktimer.c for example code.
+Please see the file drivers/pps/clients/pps-ktimer.c for example code.
 
 
 SYSFS support
diff --git a/Documentation/s390/00-INDEX b/Documentation/s390/00-INDEX
index 10c874e..9189535 100644
--- a/Documentation/s390/00-INDEX
+++ b/Documentation/s390/00-INDEX
@@ -16,8 +16,6 @@
 	- hints for debugging on s390 systems.
 driver-model.txt
 	- information on s390 devices and the driver model.
-kvm.txt
-	- ioctl calls to /dev/kvm on s390.
 monreader.txt
 	- information on accessing the z/VM monitor stream from Linux.
 qeth.txt
diff --git a/Documentation/s390/kvm.txt b/Documentation/s390/kvm.txt
deleted file mode 100644
index 85f3280..0000000
--- a/Documentation/s390/kvm.txt
+++ /dev/null
@@ -1,125 +0,0 @@
-*** BIG FAT WARNING ***
-The kvm module is currently in EXPERIMENTAL state for s390. This means that
-the interface to the module is not yet considered to remain stable. Thus, be
-prepared that we keep breaking your userspace application and guest
-compatibility over and over again until we feel happy with the result. Make sure
-your guest kernel, your host kernel, and your userspace launcher are in a
-consistent state.
-
-This Documentation describes the unique ioctl calls to /dev/kvm, the resulting
-kvm-vm file descriptors, and the kvm-vcpu file descriptors that differ from x86.
-
-1. ioctl calls to /dev/kvm
-KVM does support the following ioctls on s390 that are common with other
-architectures and do behave the same:
-KVM_GET_API_VERSION
-KVM_CREATE_VM		(*) see note
-KVM_CHECK_EXTENSION
-KVM_GET_VCPU_MMAP_SIZE
-
-Notes:
-* KVM_CREATE_VM may fail on s390, if the calling process has multiple
-threads and has not called KVM_S390_ENABLE_SIE before.
-
-In addition, on s390 the following architecture specific ioctls are supported:
-ioctl:		KVM_S390_ENABLE_SIE
-args:		none
-see also:	include/linux/kvm.h
-This call causes the kernel to switch on PGSTE in the user page table. This
-operation is needed in order to run a virtual machine, and it requires the
-calling process to be single-threaded. Note that the first call to KVM_CREATE_VM
-will implicitly try to switch on PGSTE if the user process has not called
-KVM_S390_ENABLE_SIE before. User processes that want to launch multiple threads
-before creating a virtual machine have to call KVM_S390_ENABLE_SIE, or will
-observe an error calling KVM_CREATE_VM. Switching on PGSTE is a one-time
-operation, is not reversible, and will persist over the entire lifetime of
-the calling process. It does not have any user-visible effect other than a small
-performance penalty.
-
-2. ioctl calls to the kvm-vm file descriptor
-KVM does support the following ioctls on s390 that are common with other
-architectures and do behave the same:
-KVM_CREATE_VCPU
-KVM_SET_USER_MEMORY_REGION      (*) see note
-KVM_GET_DIRTY_LOG		(**) see note
-
-Notes:
-*  kvm does only allow exactly one memory slot on s390, which has to start
-   at guest absolute address zero and at a user address that is aligned on any
-   page boundary. This hardware "limitation" allows us to have a few unique
-   optimizations. The memory slot doesn't have to be filled
-   with memory actually, it may contain sparse holes. That said, with different
-   user memory layout this does still allow a large flexibility when
-   doing the guest memory setup.
-** KVM_GET_DIRTY_LOG doesn't work properly yet. The user will receive an empty
-log. This ioctl call is only needed for guest migration, and we intend to
-implement this one in the future.
-
-In addition, on s390 the following architecture specific ioctls for the kvm-vm
-file descriptor are supported:
-ioctl:		KVM_S390_INTERRUPT
-args:		struct kvm_s390_interrupt *
-see also:	include/linux/kvm.h
-This ioctl is used to submit a floating interrupt for a virtual machine.
-Floating interrupts may be delivered to any virtual cpu in the configuration.
-Only some interrupt types defined in include/linux/kvm.h make sense when
-submitted as floating interrupts. The following interrupts are not considered
-to be useful as floating interrupts, and a call to inject them will result in
--EINVAL error code: program interrupts and interprocessor signals. Valid
-floating interrupts are:
-KVM_S390_INT_VIRTIO
-KVM_S390_INT_SERVICE
-
-3. ioctl calls to the kvm-vcpu file descriptor
-KVM does support the following ioctls on s390 that are common with other
-architectures and do behave the same:
-KVM_RUN
-KVM_GET_REGS
-KVM_SET_REGS
-KVM_GET_SREGS
-KVM_SET_SREGS
-KVM_GET_FPU
-KVM_SET_FPU
-
-In addition, on s390 the following architecture specific ioctls for the
-kvm-vcpu file descriptor are supported:
-ioctl:		KVM_S390_INTERRUPT
-args:		struct kvm_s390_interrupt *
-see also:	include/linux/kvm.h
-This ioctl is used to submit an interrupt for a specific virtual cpu.
-Only some interrupt types defined in include/linux/kvm.h make sense when
-submitted for a specific cpu. The following interrupts are not considered
-to be useful, and a call to inject them will result in -EINVAL error code:
-service processor calls and virtio interrupts. Valid interrupt types are:
-KVM_S390_PROGRAM_INT
-KVM_S390_SIGP_STOP
-KVM_S390_RESTART
-KVM_S390_SIGP_SET_PREFIX
-KVM_S390_INT_EMERGENCY
-
-ioctl:		KVM_S390_STORE_STATUS
-args:		unsigned long
-see also:	include/linux/kvm.h
-This ioctl stores the state of the cpu at the guest real address given as
-argument, unless one of the following values defined in include/linux/kvm.h
-is given as argument:
-KVM_S390_STORE_STATUS_NOADDR - the CPU stores its status to the save area in
-absolute lowcore as defined by the principles of operation
-KVM_S390_STORE_STATUS_PREFIXED - the CPU stores its status to the save area in
-its prefix page just like the dump tool that comes with zipl. This is useful
-to create a system dump for use with lkcdutils or crash.
-
-ioctl:		KVM_S390_SET_INITIAL_PSW
-args:		struct kvm_s390_psw *
-see also:	include/linux/kvm.h
-This ioctl can be used to set the processor status word (psw) of a stopped cpu
-prior to running it with KVM_RUN. Note that this call is not required to modify
-the psw during sie intercepts that fall back to userspace because struct kvm_run
-does contain the psw, and this value is evaluated during reentry of KVM_RUN
-after the intercept exit was recognized.
-
-ioctl:		KVM_S390_INITIAL_RESET
-args:		none
-see also:	include/linux/kvm.h
-This ioctl can be used to perform an initial cpu reset as defined by the
-principles of operation. The target cpu has to be in stopped state.
diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt
index c4407a4..f4cb0b2 100644
--- a/Documentation/static-keys.txt
+++ b/Documentation/static-keys.txt
@@ -1,7 +1,22 @@
 			Static Keys
 			-----------
 
-By: Jason Baron <jbaron@redhat.com>
+DEPRECATED API:
+
+The use of 'struct static_key' directly, is now DEPRECATED. In addition
+static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
+
+struct static_key false = STATIC_KEY_INIT_FALSE;
+struct static_key true = STATIC_KEY_INIT_TRUE;
+static_key_true()
+static_key_false()
+
+The updated API replacements are:
+
+DEFINE_STATIC_KEY_TRUE(key);
+DEFINE_STATIC_KEY_FALSE(key);
+static_key_likely()
+statick_key_unlikely()
 
 0) Abstract
 
@@ -9,22 +24,22 @@
 performance-sensitive fast-path kernel code, via a GCC feature and a code
 patching technique. A quick example:
 
-	struct static_key key = STATIC_KEY_INIT_FALSE;
+	DEFINE_STATIC_KEY_FALSE(key);
 
 	...
 
-        if (static_key_false(&key))
+        if (static_branch_unlikely(&key))
                 do unlikely code
         else
                 do likely code
 
 	...
-	static_key_slow_inc();
+	static_branch_enable(&key);
 	...
-	static_key_slow_inc();
+	static_branch_disable(&key);
 	...
 
-The static_key_false() branch will be generated into the code with as little
+The static_branch_unlikely() branch will be generated into the code with as little
 impact to the likely code path as possible.
 
 
@@ -56,7 +71,7 @@
 
 For example, if we have a simple branch that is disabled by default:
 
-	if (static_key_false(&key))
+	if (static_branch_unlikely(&key))
 		printk("I am the true branch\n");
 
 Thus, by default the 'printk' will not be emitted. And the code generated will
@@ -75,68 +90,55 @@
 
 In order to make use of this optimization you must first define a key:
 
-	struct static_key key;
-
-Which is initialized as:
-
-	struct static_key key = STATIC_KEY_INIT_TRUE;
+	DEFINE_STATIC_KEY_TRUE(key);
 
 or:
 
-	struct static_key key = STATIC_KEY_INIT_FALSE;
+	DEFINE_STATIC_KEY_FALSE(key);
 
-If the key is not initialized, it is default false. The 'struct static_key',
-must be a 'global'. That is, it can't be allocated on the stack or dynamically
+
+The key must be global, that is, it can't be allocated on the stack or dynamically
 allocated at run-time.
 
 The key is then used in code as:
 
-        if (static_key_false(&key))
+        if (static_branch_unlikely(&key))
                 do unlikely code
         else
                 do likely code
 
 Or:
 
-        if (static_key_true(&key))
+        if (static_branch_likely(&key))
                 do likely code
         else
                 do unlikely code
 
-A key that is initialized via 'STATIC_KEY_INIT_FALSE', must be used in a
-'static_key_false()' construct. Likewise, a key initialized via
-'STATIC_KEY_INIT_TRUE' must be used in a 'static_key_true()' construct. A
-single key can be used in many branches, but all the branches must match the
-way that the key has been initialized.
+Keys defined via DEFINE_STATIC_KEY_TRUE(), or DEFINE_STATIC_KEY_FALSE, may
+be used in either static_branch_likely() or static_branch_unlikely()
+statemnts.
 
-The branch(es) can then be switched via:
+Branch(es) can be set true via:
 
-	static_key_slow_inc(&key);
+static_branch_enable(&key);
+
+or false via:
+
+static_branch_disable(&key);
+
+The branch(es) can then be switched via reference counts:
+
+	static_branch_inc(&key);
 	...
-	static_key_slow_dec(&key);
+	static_branch_dec(&key);
 
-Thus, 'static_key_slow_inc()' means 'make the branch true', and
-'static_key_slow_dec()' means 'make the branch false' with appropriate
+Thus, 'static_branch_inc()' means 'make the branch true', and
+'static_branch_dec()' means 'make the branch false' with appropriate
 reference counting. For example, if the key is initialized true, a
-static_key_slow_dec(), will switch the branch to false. And a subsequent
-static_key_slow_inc(), will change the branch back to true. Likewise, if the
-key is initialized false, a 'static_key_slow_inc()', will change the branch to
-true. And then a 'static_key_slow_dec()', will again make the branch false.
-
-An example usage in the kernel is the implementation of tracepoints:
-
-        static inline void trace_##name(proto)                          \
-        {                                                               \
-                if (static_key_false(&__tracepoint_##name.key))		\
-                        __DO_TRACE(&__tracepoint_##name,                \
-                                TP_PROTO(data_proto),                   \
-                                TP_ARGS(data_args),                     \
-                                TP_CONDITION(cond));                    \
-        }
-
-Tracepoints are disabled by default, and can be placed in performance critical
-pieces of the kernel. Thus, by using a static key, the tracepoints can have
-absolutely minimal impact when not in use.
+static_branch_dec(), will switch the branch to false. And a subsequent
+static_branch_inc(), will change the branch back to true. Likewise, if the
+key is initialized false, a 'static_branch_inc()', will change the branch to
+true. And then a 'static_branch_dec()', will again make the branch false.
 
 
 4) Architecture level code patching interface, 'jump labels'
@@ -150,9 +152,12 @@
 
 * #define JUMP_LABEL_NOP_SIZE, see: arch/x86/include/asm/jump_label.h
 
-* __always_inline bool arch_static_branch(struct static_key *key), see:
+* __always_inline bool arch_static_branch(struct static_key *key, bool branch), see:
 					arch/x86/include/asm/jump_label.h
 
+* __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch),
+					see: arch/x86/include/asm/jump_label.h
+
 * void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type),
 					see: arch/x86/kernel/jump_label.c
 
@@ -173,7 +178,7 @@
 {
         int pid;
 
-+       if (static_key_false(&key))
++       if (static_branch_unlikely(&key))
 +               printk("I am the true branch\n");
 
         rcu_read_lock();
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 9832ec5..9c3f2f8 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -225,11 +225,11 @@
 extfrag_threshold
 
 This parameter affects whether the kernel will compact memory or direct
-reclaim to satisfy a high-order allocation. /proc/extfrag_index shows what
-the fragmentation index for each order is in each zone in the system. Values
-tending towards 0 imply allocations would fail due to lack of memory,
-values towards 1000 imply failures are due to fragmentation and -1 implies
-that the allocation will succeed as long as watermarks are met.
+reclaim to satisfy a high-order allocation. The extfrag/extfrag_index file in
+debugfs shows what the fragmentation index for each order is in each zone in
+the system. Values tending towards 0 imply allocations would fail due to lack
+of memory, values towards 1000 imply failures are due to fragmentation and -1
+implies that the allocation will succeed as long as watermarks are met.
 
 The kernel will not compact memory in a zone if the
 fragmentation index is <= extfrag_threshold. The default value is 500.
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt
index 0e307c9..267f393 100644
--- a/Documentation/sysrq.txt
+++ b/Documentation/sysrq.txt
@@ -119,6 +119,7 @@
 
 'x'	- Used by xmon interface on ppc/powerpc platforms.
           Show global PMU Registers on sparc64.
+          Dump all TLB entries on MIPS.
 
 'y'	- Show global CPU Registers [SPARC-64 specific]
 
diff --git a/Documentation/trace/coresight.txt b/Documentation/trace/coresight.txt
index 77d14d5..0a5c329 100644
--- a/Documentation/trace/coresight.txt
+++ b/Documentation/trace/coresight.txt
@@ -15,7 +15,7 @@
 that have many SoCs and other components like GPU and DMA engines.  ARM has
 developed a HW assisted tracing solution by means of different components, each
 being added to a design at synthesis time to cater to specific tracing needs.
-Compoments are generally categorised as source, link and sinks and are
+Components are generally categorised as source, link and sinks and are
 (usually) discovered using the AMBA bus.
 
 "Sources" generate a compressed stream representing the processor instruction
@@ -138,7 +138,7 @@
 
 The registering function is taking a "struct coresight_device *csdev" and
 register the device with the core framework.  The unregister function takes
-a reference to a "strut coresight_device", obtained at registration time.
+a reference to a "struct coresight_device", obtained at registration time.
 
 If everything goes well during the registration process the new devices will
 show up under /sys/bus/coresight/devices, as showns here for a TC2 platform:
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 7ddb1e3..87bb4aa 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -346,6 +346,11 @@
 	  x86-tsc: Architectures may define their own clocks. For
 	  	   example, x86 uses its own TSC cycle clock here.
 
+	  ppc-tb: This uses the powerpc timebase register value.
+		  This is in sync across CPUs and can also be used
+		  to correlate events across hypervisor/guest if
+		  tb_offset is known.
+
 	To set a clock, simply echo the clock name into this file.
 
 	  echo global > trace_clock
diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
index 5926780..b24d3ef 100644
--- a/Documentation/usb/gadget-testing.txt
+++ b/Documentation/usb/gadget-testing.txt
@@ -237,9 +237,7 @@
 -----------------------------
 
 device: run the gadget
-host: test-usb
-
-http://www.linux-usb.org/usbtest/testusb.c
+host: test-usb (tools/usb/testusb.c)
 
 8. MASS STORAGE function
 ========================
@@ -586,9 +584,8 @@
 -------------------------------
 
 device: run the gadget
-host: test-usb
+host: test-usb (tools/usb/testusb.c)
 
-http://www.linux-usb.org/usbtest/testusb.c
 
 16. UAC1 function
 =================
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index b5f8391..4a15c90 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -521,10 +521,10 @@
 lower power state(L1 for usb2.0 devices, or U1/U2 for usb3.0 devices),
 which state device can enter and resume very quickly.
 
-The user interface for controlling USB2 hardware LPM is located in the
+The user interface for controlling hardware LPM is located in the
 power/ subdirectory of each USB device's sysfs directory, that is, in
 /sys/bus/usb/devices/.../power/ where "..." is the device's ID. The
-relevant attribute files is usb2_hardware_lpm.
+relevant attribute files are usb2_hardware_lpm and usb3_hardware_lpm.
 
 	power/usb2_hardware_lpm
 
@@ -537,6 +537,17 @@
 		can write y/Y/1 or n/N/0 to the file to	enable/disable
 		USB2 hardware LPM manually. This is for	test purpose mainly.
 
+	power/usb3_hardware_lpm
+
+		When a USB 3.0 lpm-capable device is plugged in to a
+		xHCI host which supports link PM, it will check if U1
+		and U2 exit latencies have been set in the BOS
+		descriptor; if the check is is passed and the host
+		supports USB3 hardware LPM, USB3 hardware LPM will be
+		enabled for the device and this file will be created.
+		The file holds a string value (enable or disable)
+		indicating whether or not USB3 hardware LPM is
+		enabled for the device.
 
 	USB Port Power Control
 	----------------------
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index a7926a9..a4ebcb7 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -3277,6 +3277,7 @@
 		struct {
 #define KVM_SYSTEM_EVENT_SHUTDOWN       1
 #define KVM_SYSTEM_EVENT_RESET          2
+#define KVM_SYSTEM_EVENT_CRASH          3
 			__u32 type;
 			__u64 flags;
 		} system_event;
@@ -3296,6 +3297,10 @@
   KVM_SYSTEM_EVENT_RESET -- the guest has requested a reset of the VM.
    As with SHUTDOWN, userspace can choose to ignore the request, or
    to schedule the reset to occur in the future and may call KVM_RUN again.
+  KVM_SYSTEM_EVENT_CRASH -- the guest crash occurred and the guest
+   has requested a crash condition maintenance. Userspace can choose
+   to ignore the request, or to gather VM memory core dump and/or
+   reset/shutdown of the VM.
 
 		/* Fix the size of the union. */
 		char padding[256];
diff --git a/Documentation/x86/kernel-stacks b/Documentation/x86/kernel-stacks
index 0f3a6c2..9a0aa4d 100644
--- a/Documentation/x86/kernel-stacks
+++ b/Documentation/x86/kernel-stacks
@@ -16,7 +16,7 @@
 is in control on that CPU; when a CPU returns to user space the
 specialized stacks contain no useful data.  The main CPU stacks are:
 
-* Interrupt stack.  IRQSTACKSIZE
+* Interrupt stack.  IRQ_STACK_SIZE
 
   Used for external hardware interrupts.  If this is the first external
   hardware interrupt (i.e. not a nested hardware interrupt) then the
diff --git a/Documentation/x86/mtrr.txt b/Documentation/x86/mtrr.txt
index 860bc3a..dc3e703 100644
--- a/Documentation/x86/mtrr.txt
+++ b/Documentation/x86/mtrr.txt
@@ -6,10 +6,22 @@
 ===============================================================================
 Phasing out MTRR use
 
-MTRR use is replaced on modern x86 hardware with PAT. Over time the only type
-of effective MTRR that is expected to be supported will be for write-combining.
-As MTRR use is phased out device drivers should use arch_phys_wc_add() to make
-MTRR effective on non-PAT systems while a no-op on PAT enabled systems.
+MTRR use is replaced on modern x86 hardware with PAT. Direct MTRR use by
+drivers on Linux is now completely phased out, device drivers should use
+arch_phys_wc_add() in combination with ioremap_wc() to make MTRR effective on
+non-PAT systems while a no-op but equally effective on PAT enabled systems.
+
+Even if Linux does not use MTRRs directly, some x86 platform firmware may still
+set up MTRRs early before booting the OS. They do this as some platform
+firmware may still have implemented access to MTRRs which would be controlled
+and handled by the platform firmware directly. An example of platform use of
+MTRRs is through the use of SMI handlers, one case could be for fan control,
+the platform code would need uncachable access to some of its fan control
+registers. Such platform access does not need any Operating System MTRR code in
+place other than mtrr_type_lookup() to ensure any OS specific mapping requests
+are aligned with platform MTRR setup. If MTRRs are only set up by the platform
+firmware code though and the OS does not make any specific MTRR mapping
+requests mtrr_type_lookup() should always return MTRR_TYPE_INVALID.
 
 For details refer to Documentation/x86/pat.txt.
 
diff --git a/Documentation/x86/zero-page.txt b/Documentation/x86/zero-page.txt
index 82fbdbc..95a4d34 100644
--- a/Documentation/x86/zero-page.txt
+++ b/Documentation/x86/zero-page.txt
@@ -17,7 +17,8 @@
 				(struct ist_info)
 080/010	ALL	hd0_info	hd0 disk parameter, OBSOLETE!!
 090/010	ALL	hd1_info	hd1 disk parameter, OBSOLETE!!
-0A0/010	ALL	sys_desc_table	System description table (struct sys_desc_table)
+0A0/010	ALL	sys_desc_table	System description table (struct sys_desc_table),
+				OBSOLETE!!
 0B0/010	ALL	olpc_ofw_header	OLPC's OpenFirmware CIF and friends
 0C0/004	ALL	ext_ramdisk_image ramdisk_image high 32bits
 0C4/004	ALL	ext_ramdisk_size  ramdisk_size high 32bits
diff --git a/MAINTAINERS b/MAINTAINERS
index 740a13b..a9abe32 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -158,6 +158,7 @@
 S:	Maintained
 F:	net/6lowpan/
 F:	include/net/6lowpan.h
+F:	Documentation/networking/6lowpan.txt
 
 6PACK NETWORK DRIVER FOR AX.25
 M:	Andreas Koensgen <ajk@comnets.uni-bremen.de>
@@ -361,7 +362,7 @@
 F:	drivers/input/touchscreen/ad7879.c
 
 ADDRESS SPACE LAYOUT RANDOMIZATION (ASLR)
-M:	Jiri Kosina <jkosina@suse.com>
+M:	Jiri Kosina <jikos@kernel.org>
 S:	Maintained
 
 ADM1025 HARDWARE MONITOR DRIVER
@@ -556,6 +557,12 @@
 F:	Documentation/i2c/busses/i2c-ali1563
 F:	drivers/i2c/busses/i2c-ali1563.c
 
+ALLWINNER SECURITY SYSTEM
+M:	Corentin Labbe <clabbe.montjoie@gmail.com>
+L:	linux-crypto@vger.kernel.org
+S:	Maintained
+F:	drivers/crypto/sunxi-ss/
+
 ALPHA PORT
 M:	Richard Henderson <rth@twiddle.net>
 M:	Ivan Kokshaysky <ink@jurassic.park.msu.ru>
@@ -733,6 +740,12 @@
 F:	drivers/staging/iio/*/ad*
 F:	staging/iio/trigger/iio-trig-bfin-timer.c
 
+ANALOG DEVICES INC DMA DRIVERS
+M:	Lars-Peter Clausen <lars@metafoo.de>
+W:	http://ez.analog.com/community/linux-device-drivers
+S:	Supported
+F:	drivers/dma/dma-axi-dmac.c
+
 ANDROID DRIVERS
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 M:	Arve Hjønnevåg <arve@android.com>
@@ -751,7 +764,7 @@
 F:	sound/aoa/
 
 APM DRIVER
-M:	Jiri Kosina <jkosina@suse.com>
+M:	Jiri Kosina <jikos@kernel.org>
 S:	Odd fixes
 F:	arch/x86/kernel/apm_32.c
 F:	include/linux/apm_bios.h
@@ -804,11 +817,13 @@
 ARM PMU PROFILING AND DEBUGGING
 M:	Will Deacon <will.deacon@arm.com>
 S:	Maintained
-F:	arch/arm/kernel/perf_event*
+F:	arch/arm/kernel/perf_*
 F:	arch/arm/oprofile/common.c
-F:	arch/arm/include/asm/pmu.h
 F:	arch/arm/kernel/hw_breakpoint.c
 F:	arch/arm/include/asm/hw_breakpoint.h
+F:	arch/arm/include/asm/perf_event.h
+F:	drivers/perf/arm_pmu.c
+F:	include/linux/perf/arm_pmu.h
 
 ARM PORT
 M:	Russell King <linux@arm.linux.org.uk>
@@ -932,7 +947,7 @@
 M:	Robert Richter <rric@kernel.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Supported
-F:	drivers/net/ethernet/cavium/
+F:	drivers/net/ethernet/cavium/thunder/
 
 ARM/CIRRUS LOGIC CLPS711X ARM ARCHITECTURE
 M:	Alexander Shiyan <shc_work@mail.ru>
@@ -1470,9 +1485,7 @@
 F:	arch/arm/boot/dts/r7s*
 F:	arch/arm/boot/dts/r8a*
 F:	arch/arm/boot/dts/sh*
-F:	arch/arm/configs/armadillo800eva_defconfig
 F:	arch/arm/configs/bockw_defconfig
-F:	arch/arm/configs/kzm9g_defconfig
 F:	arch/arm/configs/marzen_defconfig
 F:	arch/arm/configs/shmobile_defconfig
 F:	arch/arm/include/debug/renesas-scif.S
@@ -1509,6 +1522,7 @@
 F:	arch/arm/mach-sti/
 F:	arch/arm/boot/dts/sti*
 F:	drivers/clocksource/arm_global_timer.c
+F:	drivers/clocksource/clksrc_st_lpc.c
 F:	drivers/i2c/busses/i2c-st.c
 F:	drivers/media/rc/st_rc.c
 F:	drivers/mmc/host/sdhci-st.c
@@ -1584,7 +1598,10 @@
 M:	Masahiro Yamada <yamada.masahiro@socionext.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
+F:	arch/arm/boot/dts/uniphier*
 F:	arch/arm/mach-uniphier/
+F:	drivers/pinctrl/uniphier/
+F:	drivers/tty/serial/8250/8250_uniphier.c
 N:	uniphier
 
 ARM/Ux500 ARM ARCHITECTURE
@@ -1679,7 +1696,7 @@
 R:	Sören Brinkmann <soren.brinkmann@xilinx.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:	http://wiki.xilinx.com
-T:	git git://git.xilinx.com/linux-xlnx.git
+T:	git https://github.com/Xilinx/linux-xlnx.git
 S:	Supported
 F:	arch/arm/mach-zynq/
 F:	drivers/cpuidle/cpuidle-zynq.c
@@ -1920,6 +1937,14 @@
 S:	Maintained
 F:	drivers/net/wireless/atmel*
 
+ATMEL MAXTOUCH DRIVER
+M:	Nick Dyer <nick.dyer@itdev.co.uk>
+T:	git git://github.com/atmel-maxtouch/linux.git
+S:	Supported
+F:	Documentation/devicetree/bindings/input/atmel,maxtouch.txt
+F:	drivers/input/touchscreen/atmel_mxt_ts.c
+F:	include/linux/platform_data/atmel_mxt_ts.h
+
 ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
 M:	Bradley Grove <linuxdrivers@attotech.com>
 L:	linux-scsi@vger.kernel.org
@@ -2223,7 +2248,9 @@
 BROADCOM BCM2835 ARM ARCHITECTURE
 M:	Stephen Warren <swarren@wwwdotorg.org>
 M:	Lee Jones <lee@kernel.org>
+M:	Eric Anholt <eric@anholt.net>
 L:	linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
 S:	Maintained
 N:	bcm2835
@@ -2547,7 +2574,6 @@
 L:     netdev@vger.kernel.org
 W:     http://www.cavium.com
 S:     Supported
-F:     drivers/net/ethernet/cavium/
 F:     drivers/net/ethernet/cavium/liquidio/
 
 CC2520 IEEE-802.15.4 RADIO DRIVER
@@ -3459,6 +3485,7 @@
 X:	Documentation/acpi
 X:	Documentation/power
 X:	Documentation/spi
+X:	Documentation/DocBook/media
 T:	git git://git.lwn.net/linux-2.6.git docs-next
 
 DOUBLETALK DRIVER
@@ -4082,15 +4109,6 @@
 F:	fs/ext2/
 F:	include/linux/ext2*
 
-EXT3 FILE SYSTEM
-M:	Jan Kara <jack@suse.com>
-M:	Andrew Morton <akpm@linux-foundation.org>
-M:	Andreas Dilger <adilger.kernel@dilger.ca>
-L:	linux-ext4@vger.kernel.org
-S:	Maintained
-F:	Documentation/filesystems/ext3.txt
-F:	fs/ext3/
-
 EXT4 FILE SYSTEM
 M:	"Theodore Ts'o" <tytso@mit.edu>
 M:	Andreas Dilger <adilger.kernel@dilger.ca>
@@ -4268,7 +4286,7 @@
 F:	drivers/block/rsxx/
 
 FLOPPY DRIVER
-M:	Jiri Kosina <jkosina@suse.com>
+M:	Jiri Kosina <jikos@kernel.org>
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
 S:	Odd fixes
 F:	drivers/block/floppy.c
@@ -4429,6 +4447,7 @@
 F2FS FILE SYSTEM
 M:	Jaegeuk Kim <jaegeuk@kernel.org>
 M:	Changman Lee <cm224.lee@samsung.com>
+R:	Chao Yu <chao2.yu@samsung.com>
 L:	linux-f2fs-devel@lists.sourceforge.net
 W:	http://en.wikipedia.org/wiki/F2FS
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git
@@ -4437,6 +4456,7 @@
 F:	Documentation/ABI/testing/sysfs-fs-f2fs
 F:	fs/f2fs/
 F:	include/linux/f2fs_fs.h
+F:	include/trace/events/f2fs.h
 
 FUJITSU FR-V (FRV) PORT
 M:	David Howells <dhowells@redhat.com>
@@ -4839,7 +4859,7 @@
 F:	arch/*/include/asm/suspend*.h
 
 HID CORE LAYER
-M:	Jiri Kosina <jkosina@suse.com>
+M:	Jiri Kosina <jikos@kernel.org>
 L:	linux-input@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
 S:	Maintained
@@ -4848,7 +4868,7 @@
 F:	include/uapi/linux/hid*
 
 HID SENSOR HUB DRIVERS
-M:	Jiri Kosina <jkosina@suse.com>
+M:	Jiri Kosina <jikos@kernel.org>
 M:	Jonathan Cameron <jic23@kernel.org>
 M:	Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
 L:	linux-input@vger.kernel.org
@@ -4980,6 +5000,7 @@
 F:	drivers/video/fbdev/hyperv_fb.c
 F:	include/linux/hyperv.h
 F:	tools/hv/
+F:	Documentation/ABI/stable/sysfs-bus-vmbus
 
 I2C OVER PARALLEL PORT
 M:	Jean Delvare <jdelvare@suse.com>
@@ -5090,9 +5111,21 @@
 S:	Maintained
 F:	arch/ia64/
 
+IBM Power VMX Cryptographic instructions
+M:	Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
+M:	Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+L:	linux-crypto@vger.kernel.org
+S:	Supported
+F:	drivers/crypto/vmx/Makefile
+F:	drivers/crypto/vmx/Kconfig
+F:	drivers/crypto/vmx/vmx.c
+F:	drivers/crypto/vmx/aes*
+F:	drivers/crypto/vmx/ghash*
+F:	drivers/crypto/vmx/ppc-xlate.pl
+
 IBM Power in-Nest Crypto Acceleration
-M:	Marcelo Henrique Cerri <mhcerri@linux.vnet.ibm.com>
-M:	Fionnuala Gunter <fin@linux.vnet.ibm.com>
+M:	Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
+M:	Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
 L:	linux-crypto@vger.kernel.org
 S:	Supported
 F:	drivers/crypto/nx/Makefile
@@ -5104,7 +5137,7 @@
 F:	drivers/crypto/nx/nx_debugfs.h
 
 IBM Power 842 compression accelerator
-M:	Dan Streetman <ddstreet@us.ibm.com>
+M:	Dan Streetman <ddstreet@ieee.org>
 S:	Supported
 F:	drivers/crypto/nx/Makefile
 F:	drivers/crypto/nx/Kconfig
@@ -5588,7 +5621,7 @@
 F:	net/netfilter/ipvs/
 
 IPWIRELESS DRIVER
-M:	Jiri Kosina <jkosina@suse.com>
+M:	Jiri Kosina <jikos@kernel.org>
 M:	David Sterba <dsterba@suse.com>
 S:	Odd Fixes
 F:	drivers/tty/ipwireless/
@@ -5778,16 +5811,9 @@
 F:	fs/jffs2/
 F:	include/uapi/linux/jffs2.h
 
-JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
-M:	Andrew Morton <akpm@linux-foundation.org>
-M:	Jan Kara <jack@suse.com>
-L:	linux-ext4@vger.kernel.org
-S:	Maintained
-F:	fs/jbd/
-F:	include/linux/jbd.h
-
 JOURNALLING LAYER FOR BLOCK DEVICES (JBD2)
 M:	"Theodore Ts'o" <tytso@mit.edu>
+M:	Jan Kara <jack@suse.com>
 L:	linux-ext4@vger.kernel.org
 S:	Maintained
 F:	fs/jbd2/
@@ -5863,6 +5889,7 @@
 
 KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
 M:	"J. Bruce Fields" <bfields@fieldses.org>
+M:	Jeff Layton <jlayton@poochiereds.net>
 L:	linux-nfs@vger.kernel.org
 W:	http://nfs.sourceforge.net/
 S:	Supported
@@ -5919,7 +5946,6 @@
 KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
 M:	Christian Borntraeger <borntraeger@de.ibm.com>
 M:	Cornelia Huck <cornelia.huck@de.ibm.com>
-M:	linux390@de.ibm.com
 L:	linux-s390@vger.kernel.org
 W:	http://www.ibm.com/developerworks/linux/linux390/
 S:	Supported
@@ -6054,11 +6080,10 @@
 F:	drivers/scsi/53c700*
 
 LED SUBSYSTEM
-M:	Bryan Wu <cooloney@gmail.com>
 M:	Richard Purdie <rpurdie@rpsys.net>
 M:	Jacek Anaszewski <j.anaszewski@samsung.com>
 L:	linux-leds@vger.kernel.org
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
 S:	Maintained
 F:	drivers/leds/
 F:	include/linux/leds.h
@@ -6278,7 +6303,7 @@
 LIVE PATCHING
 M:	Josh Poimboeuf <jpoimboe@redhat.com>
 M:	Seth Jennings <sjenning@redhat.com>
-M:	Jiri Kosina <jkosina@suse.com>
+M:	Jiri Kosina <jikos@kernel.org>
 M:	Vojtech Pavlik <vojtech@suse.com>
 S:	Maintained
 F:	kernel/livepatch/
@@ -6533,7 +6558,7 @@
 
 MARVELL MWIFIEX WIRELESS DRIVER
 M:	Amitkumar Karwar <akarwar@marvell.com>
-M:	Avinash Patil <patila@marvell.com>
+M:	Nishant Sarmukadam <nishants@marvell.com>
 L:	linux-wireless@vger.kernel.org
 S:	Maintained
 F:	drivers/net/wireless/mwifiex/
@@ -6562,6 +6587,13 @@
 F:	Documentation/hwmon/max16065
 F:	drivers/hwmon/max16065.c
 
+MAX20751 HARDWARE MONITOR DRIVER
+M:	Guenter Roeck <linux@roeck-us.net>
+L:	lm-sensors@lm-sensors.org
+S:	Maintained
+F:	Documentation/hwmon/max20751
+F:	drivers/hwmon/max20751.c
+
 MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
 M:	"Hans J. Koch" <hjk@hansjkoch.de>
 L:	lm-sensors@lm-sensors.org
@@ -6585,6 +6617,14 @@
 F:	drivers/power/max14577_charger.c
 F:	drivers/power/max77693_charger.c
 
+MAXIM MAX77802 MULTIFUNCTION PMIC DEVICE DRIVERS
+M:	Javier Martinez Canillas <javier@osg.samsung.com>
+L:	linux-kernel@vger.kernel.org
+S:	Supported
+F:	drivers/*/*max77802.c
+F:	Documentation/devicetree/bindings/*/*max77802.txt
+F:	include/dt-bindings/*/*max77802.h
+
 MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
 M:	Chanwoo Choi <cw00.choi@samsung.com>
 M:	Krzysztof Kozlowski <k.kozlowski@samsung.com>
@@ -6598,7 +6638,7 @@
 F:	drivers/rtc/rtc-max77686.c
 F:	drivers/clk/clk-max77686.c
 F:	Documentation/devicetree/bindings/mfd/max14577.txt
-F:	Documentation/devicetree/bindings/mfd/max77686.txt
+F:	Documentation/devicetree/bindings/*/max77686.txt
 F:	Documentation/devicetree/bindings/mfd/max77693.txt
 F:	Documentation/devicetree/bindings/clock/maxim,max77686.txt
 F:	include/linux/mfd/max14577*.h
@@ -6671,6 +6711,15 @@
 Q:	http://patchwork.ozlabs.org/project/netdev/list/
 F:	drivers/net/ethernet/mellanox/mlx4/en_*
 
+MELLANOX ETHERNET SWITCH DRIVERS
+M:	Jiri Pirko <jiri@mellanox.com>
+M:	Ido Schimmel <idosch@mellanox.com>
+L:	netdev@vger.kernel.org
+S:	Supported
+W:	http://www.mellanox.com
+Q:	http://patchwork.ozlabs.org/project/netdev/list/
+F:	drivers/net/ethernet/mellanox/mlxsw/
+
 MEMORY MANAGEMENT
 L:	linux-mm@kvack.org
 W:	http://www.linux-mm.org
@@ -6706,6 +6755,7 @@
 S:	Maintained
 F:	drivers/mcb/
 F:	include/linux/mcb.h
+F:	Documentation/men-chameleon-bus.txt
 
 MEN F21BMC (Board Management Controller)
 M:	Andreas Werner <andreas.werner@men.de>
@@ -7311,6 +7361,15 @@
 F:	drivers/block/nvme*
 F:	include/linux/nvme.h
 
+NVMEM FRAMEWORK
+M:	Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+M:	Maxime Ripard <maxime.ripard@free-electrons.com>
+S:	Maintained
+F:	drivers/nvmem/
+F:	Documentation/devicetree/bindings/nvmem/
+F:	include/linux/nvmem-consumer.h
+F:	include/linux/nvmem-provider.h
+
 NXP-NCI NFC DRIVER
 M:	Clément Perrochaud <clement.perrochaud@effinnov.com>
 R:	Charles Gorand <charles.gorand@effinnov.com>
@@ -7548,8 +7607,9 @@
 F:	drivers/i2c/busses/i2c-ocores.c
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE
-M:	Grant Likely <grant.likely@linaro.org>
 M:	Rob Herring <robh+dt@kernel.org>
+M:	Frank Rowand <frowand.list@gmail.com>
+M:	Grant Likely <grant.likely@linaro.org>
 L:	devicetree@vger.kernel.org
 W:	http://www.devicetree.org/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git
@@ -8022,7 +8082,6 @@
 
 PIN CONTROLLER - SAMSUNG
 M:	Tomasz Figa <tomasz.figa@gmail.com>
-M:	Thomas Abraham <thomas.abraham@linaro.org>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:	linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:	Maintained
@@ -8037,7 +8096,7 @@
 F:	drivers/pinctrl/spear/
 
 PKTCDVD DRIVER
-M:	Jiri Kosina <jkosina@suse.com>
+M:	Jiri Kosina <jikos@kernel.org>
 S:	Maintained
 F:	drivers/block/pktcdvd.c
 F:	include/linux/pktcdvd.h
@@ -8072,7 +8131,7 @@
 F:	drivers/scsi/pmcraid.*
 
 PMC SIERRA PM8001 DRIVER
-M:	xjtuwjp@gmail.com
+M:	Jack Wang <jinpu.wang@profitbricks.com>
 M:	lindar_liu@usish.com
 L:	pmchba@pmcs.com
 L:	linux-scsi@vger.kernel.org
@@ -8097,6 +8156,16 @@
 S:	Maintained
 F:	include/linux/power_supply.h
 F:	drivers/power/
+X:	drivers/power/avs/
+
+POWER STATE COORDINATION INTERFACE (PSCI)
+M:	Mark Rutland <mark.rutland@arm.com>
+M:	Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+L:	linux-arm-kernel@lists.infradead.org
+S:	Maintained
+F:	drivers/firmware/psci.c
+F:	include/linux/psci.h
+F:	include/uapi/linux/psci.h
 
 PNP SUPPORT
 M:	"Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
@@ -8495,7 +8564,7 @@
 M:	Josh Triplett <josh@joshtriplett.org>
 R:	Steven Rostedt <rostedt@goodmis.org>
 R:	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-R:	Lai Jiangshan <laijs@cn.fujitsu.com>
+R:	Lai Jiangshan <jiangshanlai@gmail.com>
 L:	linux-kernel@vger.kernel.org
 S:	Supported
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
@@ -8522,7 +8591,7 @@
 M:	Josh Triplett <josh@joshtriplett.org>
 R:	Steven Rostedt <rostedt@goodmis.org>
 R:	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-R:	Lai Jiangshan <laijs@cn.fujitsu.com>
+R:	Lai Jiangshan <jiangshanlai@gmail.com>
 L:	linux-kernel@vger.kernel.org
 W:	http://www.rdrop.com/users/paulmck/RCU/
 S:	Supported
@@ -8587,6 +8656,7 @@
 S:	Maintained
 F:	drivers/reset/
 F:	Documentation/devicetree/bindings/reset/
+F:	include/dt-bindings/reset/
 F:	include/linux/reset.h
 F:	include/linux/reset-controller.h
 
@@ -8721,7 +8791,6 @@
 S390
 M:	Martin Schwidefsky <schwidefsky@de.ibm.com>
 M:	Heiko Carstens <heiko.carstens@de.ibm.com>
-M:	linux390@de.ibm.com
 L:	linux-s390@vger.kernel.org
 W:	http://www.ibm.com/developerworks/linux/linux390/
 S:	Supported
@@ -8749,7 +8818,6 @@
 
 S390 NETWORK DRIVERS
 M:	Ursula Braun <ursula.braun@de.ibm.com>
-M:	linux390@de.ibm.com
 L:	linux-s390@vger.kernel.org
 W:	http://www.ibm.com/developerworks/linux/linux390/
 S:	Supported
@@ -8766,7 +8834,6 @@
 
 S390 ZCRYPT DRIVER
 M:	Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com>
-M:	linux390@de.ibm.com
 L:	linux-s390@vger.kernel.org
 W:	http://www.ibm.com/developerworks/linux/linux390/
 S:	Supported
@@ -8774,7 +8841,6 @@
 
 S390 ZFCP DRIVER
 M:	Steffen Maier <maier@linux.vnet.ibm.com>
-M:	linux390@de.ibm.com
 L:	linux-s390@vger.kernel.org
 W:	http://www.ibm.com/developerworks/linux/linux390/
 S:	Supported
@@ -8782,7 +8848,6 @@
 
 S390 IUCV NETWORK LAYER
 M:	Ursula Braun <ursula.braun@de.ibm.com>
-M:	linux390@de.ibm.com
 L:	linux-s390@vger.kernel.org
 W:	http://www.ibm.com/developerworks/linux/linux390/
 S:	Supported
@@ -8885,6 +8950,12 @@
 S:	Supported
 F:	drivers/media/i2c/s5k5baf.c
 
+SAMSUNG S3FWRN5 NFC DRIVER
+M:	Robert Baldyga <r.baldyga@samsung.com>
+L:	linux-nfc@lists.01.org (moderated for non-subscribers)
+S:	Supported
+F:	drivers/nfc/s3fwrn5
+
 SAMSUNG SOC CLOCK DRIVERS
 M:	Sylwester Nawrocki <s.nawrocki@samsung.com>
 M:	Tomasz Figa <tomasz.figa@gmail.com>
@@ -8935,6 +9006,13 @@
 F:	include/linux/platform_data/dma-dw.h
 F:	drivers/dma/dw/
 
+SYNOPSYS DESIGNWARE ETHERNET QOS 4.10a driver
+M: Lars Persson <lars.persson@axis.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
+F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
+
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
 M:	Seungwon Jeon <tgih.jun@samsung.com>
 M:	Jaehoon Chung <jh80.chung@samsung.com>
@@ -9351,6 +9429,15 @@
 F:	drivers/media/i2c/ov2659.c
 F:	include/media/ov2659.h
 
+SILICON MOTION SM712 FRAME BUFFER DRIVER
+M:	Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+M:	Teddy Wang <teddy.wang@siliconmotion.com>
+M:	Sudip Mukherjee <sudip@vectorindia.org>
+L:	linux-fbdev@vger.kernel.org
+S:	Maintained
+F:	drivers/video/fbdev/sm712*
+F:	Documentation/fb/sm712fb.txt
+
 SIS 190 ETHERNET DRIVER
 M:	Francois Romieu <romieu@fr.zoreil.com>
 L:	netdev@vger.kernel.org
@@ -9390,7 +9477,7 @@
 F:	mm/sl?b*
 
 SLEEPABLE READ-COPY UPDATE (SRCU)
-M:	Lai Jiangshan <laijs@cn.fujitsu.com>
+M:	Lai Jiangshan <jiangshanlai@gmail.com>
 M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 M:	Josh Triplett <josh@joshtriplett.org>
 R:	Steven Rostedt <rostedt@goodmis.org>
@@ -9748,11 +9835,6 @@
 S:	Maintained
 F:	drivers/staging/olpc_dcon/
 
-STAGING - OZMO DEVICES USB OVER WIFI DRIVER
-M:	Shigekatsu Tateno <shigekatsu.tateno@atmel.com>
-S:	Maintained
-F:	drivers/staging/ozwpan/
-
 STAGING - PARALLEL LCD/KEYPAD PANEL DRIVER
 M:	Willy Tarreau <willy@meta-x.org>
 S:	Odd Fixes
@@ -9771,14 +9853,6 @@
 S:	Maintained
 F:	drivers/staging/rtl8723au/
 
-STAGING - SILICON MOTION SM7XX FRAME BUFFER DRIVER
-M:	Sudip Mukherjee <sudipm.mukherjee@gmail.com>
-M:	Teddy Wang <teddy.wang@siliconmotion.com>
-M:	Sudip Mukherjee <sudip@vectorindia.org>
-L:	linux-fbdev@vger.kernel.org
-S:	Maintained
-F:	drivers/staging/sm7xxfb/
-
 STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER
 M:	Sudip Mukherjee <sudipm.mukherjee@gmail.com>
 M:	Teddy Wang <teddy.wang@siliconmotion.com>
@@ -9897,8 +9971,9 @@
 M:	Vineet Gupta <vgupta@synopsys.com>
 S:	Supported
 F:	arch/arc/
-F:	Documentation/devicetree/bindings/arc/
+F:	Documentation/devicetree/bindings/arc/*
 F:	drivers/tty/serial/arc_uart.c
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git
 
 SYNOPSYS ARC SDP platform support
 M:	Alexey Brodkin <abrodkin@synopsys.com>
@@ -10649,7 +10724,7 @@
 F:	include/linux/usb/gadget*
 
 USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
-M:	Jiri Kosina <jkosina@suse.com>
+M:	Jiri Kosina <jikos@kernel.org>
 L:	linux-usb@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
 S:	Maintained
@@ -11044,7 +11119,7 @@
 F:	drivers/input/mouse/vmmouse.h
 
 VMWARE VMXNET3 ETHERNET DRIVER
-M:	Shreyas Bhatewara <sbhatewara@vmware.com>
+M:	Shrikrishna Khare <skhare@vmware.com>
 M:	"VMware, Inc." <pv-drivers@vmware.com>
 L:	netdev@vger.kernel.org
 S:	Maintained
@@ -11069,6 +11144,14 @@
 F:	drivers/regulator/
 F:	include/linux/regulator/
 
+VRF
+M:	David Ahern <dsa@cumulusnetworks.com>
+M:	Shrijeet Mukherjee <shm@cumulusnetworks.com>
+L:	netdev@vger.kernel.org
+S:	Maintained
+F:	drivers/net/vrf.c
+F:	include/net/vrf.h
+
 VT1211 HARDWARE MONITOR DRIVER
 M:	Juerg Haefliger <juergh@gmail.com>
 L:	lm-sensors@lm-sensors.org
@@ -11224,6 +11307,7 @@
 
 WORKQUEUE
 M:	Tejun Heo <tj@kernel.org>
+R:	Lai Jiangshan <jiangshanlai@gmail.com>
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git
 S:	Maintained
 F:	include/linux/workqueue.h
diff --git a/Makefile b/Makefile
index 246053f..c361593 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc8
+EXTRAVERSION =
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
diff --git a/README b/README
index 69c68fb..a326a6a 100644
--- a/README
+++ b/README
@@ -161,7 +161,7 @@
 
      "make xconfig"     X windows (Qt) based configuration tool.
 
-     "make gconfig"     X windows (Gtk) based configuration tool.
+     "make gconfig"     X windows (GTK+) based configuration tool.
 
      "make oldconfig"   Default all questions based on the contents of
                         your existing ./.config file and asking about
diff --git a/arch/Kconfig b/arch/Kconfig
index 8a8ea71..a71cdbe 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -71,6 +71,12 @@
 	 ( On 32-bit x86, the necessary options added to the compiler
 	   flags may increase the size of the kernel slightly. )
 
+config STATIC_KEYS_SELFTEST
+	bool "Static key selftest"
+	depends on JUMP_LABEL
+	help
+	  Boot time self-test of the branch patching code.
+
 config OPTPROBES
 	def_bool y
 	depends on KPROBES && HAVE_OPTPROBES
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index bf9e9d3..f515a4d 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -3,6 +3,7 @@
 	default y
 	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select ARCH_MIGHT_HAVE_PC_SERIO
+	select ARCH_USE_CMPXCHG_LOCKREF
 	select HAVE_AOUT
 	select HAVE_IDE
 	select HAVE_OPROFILE
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index 8f8eafb..e8c9560 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -29,13 +29,13 @@
  * branch back to restart the operation.
  */
 
-#define ATOMIC_OP(op)							\
+#define ATOMIC_OP(op, asm_op)						\
 static __inline__ void atomic_##op(int i, atomic_t * v)			\
 {									\
 	unsigned long temp;						\
 	__asm__ __volatile__(						\
 	"1:	ldl_l %0,%1\n"						\
-	"	" #op "l %0,%2,%0\n"					\
+	"	" #asm_op " %0,%2,%0\n"					\
 	"	stl_c %0,%1\n"						\
 	"	beq %0,2f\n"						\
 	".subsection 2\n"						\
@@ -45,15 +45,15 @@
 	:"Ir" (i), "m" (v->counter));					\
 }									\
 
-#define ATOMIC_OP_RETURN(op)						\
+#define ATOMIC_OP_RETURN(op, asm_op)					\
 static inline int atomic_##op##_return(int i, atomic_t *v)		\
 {									\
 	long temp, result;						\
 	smp_mb();							\
 	__asm__ __volatile__(						\
 	"1:	ldl_l %0,%1\n"						\
-	"	" #op "l %0,%3,%2\n"					\
-	"	" #op "l %0,%3,%0\n"					\
+	"	" #asm_op " %0,%3,%2\n"					\
+	"	" #asm_op " %0,%3,%0\n"					\
 	"	stl_c %0,%1\n"						\
 	"	beq %0,2f\n"						\
 	".subsection 2\n"						\
@@ -65,13 +65,13 @@
 	return result;							\
 }
 
-#define ATOMIC64_OP(op)							\
+#define ATOMIC64_OP(op, asm_op)						\
 static __inline__ void atomic64_##op(long i, atomic64_t * v)		\
 {									\
 	unsigned long temp;						\
 	__asm__ __volatile__(						\
 	"1:	ldq_l %0,%1\n"						\
-	"	" #op "q %0,%2,%0\n"					\
+	"	" #asm_op " %0,%2,%0\n"					\
 	"	stq_c %0,%1\n"						\
 	"	beq %0,2f\n"						\
 	".subsection 2\n"						\
@@ -81,15 +81,15 @@
 	:"Ir" (i), "m" (v->counter));					\
 }									\
 
-#define ATOMIC64_OP_RETURN(op)						\
+#define ATOMIC64_OP_RETURN(op, asm_op)					\
 static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)	\
 {									\
 	long temp, result;						\
 	smp_mb();							\
 	__asm__ __volatile__(						\
 	"1:	ldq_l %0,%1\n"						\
-	"	" #op "q %0,%3,%2\n"					\
-	"	" #op "q %0,%3,%0\n"					\
+	"	" #asm_op " %0,%3,%2\n"					\
+	"	" #asm_op " %0,%3,%0\n"					\
 	"	stq_c %0,%1\n"						\
 	"	beq %0,2f\n"						\
 	".subsection 2\n"						\
@@ -101,15 +101,27 @@
 	return result;							\
 }
 
-#define ATOMIC_OPS(opg)							\
-	ATOMIC_OP(opg)							\
-	ATOMIC_OP_RETURN(opg)						\
-	ATOMIC64_OP(opg)						\
-	ATOMIC64_OP_RETURN(opg)
+#define ATOMIC_OPS(op)							\
+	ATOMIC_OP(op, op##l)						\
+	ATOMIC_OP_RETURN(op, op##l)					\
+	ATOMIC64_OP(op, op##q)						\
+	ATOMIC64_OP_RETURN(op, op##q)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+#define atomic_andnot atomic_andnot
+#define atomic64_andnot atomic64_andnot
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(andnot, bic)
+ATOMIC_OP(or, bis)
+ATOMIC_OP(xor, xor)
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(andnot, bic)
+ATOMIC64_OP(or, bis)
+ATOMIC64_OP(xor, xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index 37b570d..fed9c6f 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -16,6 +16,11 @@
 #define arch_spin_unlock_wait(x) \
 		do { cpu_relax(); } while ((x)->lock)
 
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+        return lock.lock == 0;
+}
+
 static inline void arch_spin_unlock(arch_spinlock_t * lock)
 {
 	mb();
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index 51f2c86..2804648 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -59,7 +59,7 @@
 		cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
 	last_cpu = cpu;
 
-	cpumask_copy(data->affinity, cpumask_of(cpu));
+	cpumask_copy(irq_data_get_affinity_mask(data), cpumask_of(cpu));
 	chip->irq_set_affinity(data, cpumask_of(cpu), false);
 	return 0;
 }
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 36dc91a..6cc0816 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1138,6 +1138,7 @@
 {
 	struct rusage32 r;
 	cputime_t utime, stime;
+	unsigned long utime_jiffies, stime_jiffies;
 
 	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
 		return -EINVAL;
@@ -1146,14 +1147,18 @@
 	switch (who) {
 	case RUSAGE_SELF:
 		task_cputime(current, &utime, &stime);
-		jiffies_to_timeval32(utime, &r.ru_utime);
-		jiffies_to_timeval32(stime, &r.ru_stime);
+		utime_jiffies = cputime_to_jiffies(utime);
+		stime_jiffies = cputime_to_jiffies(stime);
+		jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
+		jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
 		r.ru_minflt = current->min_flt;
 		r.ru_majflt = current->maj_flt;
 		break;
 	case RUSAGE_CHILDREN:
-		jiffies_to_timeval32(current->signal->cutime, &r.ru_utime);
-		jiffies_to_timeval32(current->signal->cstime, &r.ru_stime);
+		utime_jiffies = cputime_to_jiffies(current->signal->cutime);
+		stime_jiffies = cputime_to_jiffies(current->signal->cstime);
+		jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
+		jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
 		r.ru_minflt = current->signal->cmin_flt;
 		r.ru_majflt = current->signal->cmaj_flt;
 		break;
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 82f738e..cded02c 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -242,12 +242,7 @@
 
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
-	struct pci_dev *dev = bus->self;
-
-	if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
- 		   (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
- 		pci_read_bridge_bases(bus);
-	} 
+	struct pci_dev *dev;
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		pdev_save_srm_config(dev);
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 643a9dc..5b6202a 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -93,7 +93,7 @@
 	struct clock_event_device *ce = &per_cpu(cpu_ce, cpu);
 
 	/* Don't run the hook for UNUSED or SHUTDOWN.  */
-	if (likely(ce->mode == CLOCK_EVT_MODE_PERIODIC))
+	if (likely(clockevent_state_periodic(ce)))
 		ce->event_handler(ce);
 
 	if (test_irq_work_pending()) {
@@ -104,13 +104,6 @@
 	return IRQ_HANDLED;
 }
 
-static void
-rtc_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
-{
-	/* The mode member of CE is updated in generic code.
-	   Since we only support periodic events, nothing to do.  */
-}
-
 static int
 rtc_ce_set_next_event(unsigned long evt, struct clock_event_device *ce)
 {
@@ -129,7 +122,6 @@
 		.features = CLOCK_EVT_FEAT_PERIODIC,
 		.rating = 100,
 		.cpumask = cpumask_of(cpu),
-		.set_mode = rtc_ce_set_mode,
 		.set_next_event = rtc_ce_set_next_event,
 	};
 
@@ -161,12 +153,12 @@
  * The QEMU alarm as a clock_event_device primitive.
  */
 
-static void
-qemu_ce_set_mode(enum clock_event_mode mode, struct clock_event_device *ce)
+static int qemu_ce_shutdown(struct clock_event_device *ce)
 {
 	/* The mode member of CE is updated for us in generic code.
 	   Just make sure that the event is disabled.  */
 	qemu_set_alarm_abs(0);
+	return 0;
 }
 
 static int
@@ -197,7 +189,9 @@
 		.features = CLOCK_EVT_FEAT_ONESHOT,
 		.rating = 400,
 		.cpumask = cpumask_of(cpu),
-		.set_mode = qemu_ce_set_mode,
+		.set_state_shutdown = qemu_ce_shutdown,
+		.set_state_oneshot = qemu_ce_shutdown,
+		.tick_resume = qemu_ce_shutdown,
 		.set_next_event = qemu_ce_set_next_event,
 	};
 
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index bd4670d..78c0621 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -8,6 +8,7 @@
 
 config ARC
 	def_bool y
+	select ARCH_SUPPORTS_ATOMIC_RMW if ARC_HAS_LLSC
 	select BUILDTIME_EXTABLE_SORT
 	select COMMON_CLK
 	select CLONE_BACKWARDS
@@ -22,6 +23,7 @@
 	select GENERIC_SMP_IDLE_THREAD
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_TRACEHOOK
+	select HAVE_FUTEX_CMPXCHG
 	select HAVE_IOREMAP_PROT
 	select HAVE_KPROBES
 	select HAVE_KRETPROBES
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi
index 1cd5e82..846481f 100644
--- a/arch/arc/boot/dts/axc003.dtsi
+++ b/arch/arc/boot/dts/axc003.dtsi
@@ -72,12 +72,13 @@
 	};
 
 	/*
-	 * This INTC is actually connected to DW APB GPIO
-	 * which acts as a wire between MB INTC and CPU INTC.
-	 * GPIO INTC is configured in platform init code
-	 * and here we mimic direct connection from MB INTC to
-	 * CPU INTC, thus we set "interrupts = <7>" instead of
-	 * "interrupts = <12>"
+	 * The DW APB ICTL intc on MB is connected to CPU intc via a
+	 * DT "invisible" DW APB GPIO block, configured to simply pass thru
+	 * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c)
+	 *
+	 * So here we mimic a direct connection betwen them, ignoring the
+	 * ABPG GPIO. Thus set "interrupts = <24>" (DW APB GPIO to core)
+	 * instead of "interrupts = <12>" (DW APB ICTL to DW APB GPIO)
 	 *
 	 * This intc actually resides on MB, but we move it here to
 	 * avoid duplicating the MB dtsi file given that IRQ from
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index c8f57b8..d8023bc 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -35,6 +35,7 @@
 #define ARC_REG_RTT_BCR		0xF2
 #define ARC_REG_IRQ_BCR		0xF3
 #define ARC_REG_SMART_BCR	0xFF
+#define ARC_REG_CLUSTER_BCR	0xcf
 
 /* status32 Bits Positions */
 #define STATUS_AE_BIT		5	/* Exception active */
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 87d18ae..c3ecda0 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -172,9 +172,13 @@
 
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
-ATOMIC_OP(and, &=, and)
 
-#define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
+#define atomic_andnot atomic_andnot
+
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(andnot, &= ~, bic)
+ATOMIC_OP(or, |=, or)
+ATOMIC_OP(xor, ^=, xor)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index d67345d..e23ea6e 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -53,6 +53,8 @@
 extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
 extern void read_decode_cache_bcr(void);
 
+extern int ioc_exists;
+
 #endif	/* !__ASSEMBLY__ */
 
 /* Instruction cache related Auxiliary registers */
@@ -94,4 +96,10 @@
 #define SLC_CTRL_BUSY		0x100
 #define SLC_CTRL_RGN_OP_INV	0x200
 
+/* IO coherency related Auxiliary registers */
+#define ARC_REG_IO_COH_ENABLE	0x500
+#define ARC_REG_IO_COH_PARTIAL	0x501
+#define ARC_REG_IO_COH_AP0_BASE	0x508
+#define ARC_REG_IO_COH_AP0_SIZE	0x509
+
 #endif /* _ASM_CACHE_H */
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index 44fd531..af7a2db 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -110,18 +110,18 @@
 						 sizeof(*(ptr))))
 
 /*
- * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
- * not require any locking. However there's a quirk.
- * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
- * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
- * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
- * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
+ * xchg() maps directly to ARC EX instruction which guarantees atomicity.
+ * However in !LLSC config, it also needs to be use @atomic_ops_lock spinlock
+ * due to a subtle reason:
+ *  - For !LLSC, cmpxchg() needs to use that lock (see above) and there is lot
+ *    of  kernel code which calls xchg()/cmpxchg() on same data (see llist.h)
+ *    Hence xchg() needs to follow same locking rules.
  *
- * This however is only relevant if SMP and/or ARC lacks LLSC
- *   if (UP or LLSC)
- *      xchg doesn't need serialization
- *   else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
- *      xchg needs serialization
+ * Technically the lock is also needed for UP (boils down to irq save/restore)
+ * but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
+ * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
+ * Other way around, xchg is one instruction anyways, so can't be interrupted
+ * as such
  */
 
 #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
index 70cfe16..11e1b1f 100644
--- a/arch/arc/include/asm/futex.h
+++ b/arch/arc/include/asm/futex.h
@@ -20,6 +20,7 @@
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
 							\
+	smp_mb();					\
 	__asm__ __volatile__(				\
 	"1:	llock	%1, [%2]		\n"	\
 		insn				"\n"	\
@@ -30,7 +31,7 @@
 	"	.section .fixup,\"ax\"		\n"	\
 	"	.align  4			\n"	\
 	"4:	mov %0, %4			\n"	\
-	"	b   3b				\n"	\
+	"	j   3b				\n"	\
 	"	.previous			\n"	\
 	"	.section __ex_table,\"a\"	\n"	\
 	"	.align  4			\n"	\
@@ -40,12 +41,14 @@
 							\
 	: "=&r" (ret), "=&r" (oldval)			\
 	: "r" (uaddr), "r" (oparg), "ir" (-EFAULT)	\
-	: "cc", "memory")
+	: "cc", "memory");				\
+	smp_mb()					\
 
 #else	/* !CONFIG_ARC_HAS_LLSC */
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
 							\
+	smp_mb();					\
 	__asm__ __volatile__(				\
 	"1:	ld	%1, [%2]		\n"	\
 		insn				"\n"	\
@@ -55,7 +58,7 @@
 	"	.section .fixup,\"ax\"		\n"	\
 	"	.align  4			\n"	\
 	"4:	mov %0, %4			\n"	\
-	"	b   3b				\n"	\
+	"	j   3b				\n"	\
 	"	.previous			\n"	\
 	"	.section __ex_table,\"a\"	\n"	\
 	"	.align  4			\n"	\
@@ -65,7 +68,8 @@
 							\
 	: "=&r" (ret), "=&r" (oldval)			\
 	: "r" (uaddr), "r" (oparg), "ir" (-EFAULT)	\
-	: "cc", "memory")
+	: "cc", "memory");				\
+	smp_mb()					\
 
 #endif
 
@@ -83,6 +87,9 @@
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
 		return -EFAULT;
 
+#ifndef CONFIG_ARC_HAS_LLSC
+	preempt_disable();	/* to guarantee atomic r-m-w of futex op */
+#endif
 	pagefault_disable();
 
 	switch (op) {
@@ -90,6 +97,7 @@
 		__futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
 		break;
 	case FUTEX_OP_ADD:
+		/* oldval = *uaddr; *uaddr += oparg ; ret = *uaddr */
 		__futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
 		break;
 	case FUTEX_OP_OR:
@@ -106,6 +114,9 @@
 	}
 
 	pagefault_enable();
+#ifndef CONFIG_ARC_HAS_LLSC
+	preempt_enable();
+#endif
 
 	if (!ret) {
 		switch (cmp) {
@@ -134,54 +145,57 @@
 	return ret;
 }
 
-/* Compare-xchg with pagefaults disabled.
- *  Notes:
- *      -Best-Effort: Exchg happens only if compare succeeds.
- *          If compare fails, returns; leaving retry/looping to upper layers
- *      -successful cmp-xchg: return orig value in @addr (same as cmp val)
- *      -Compare fails: return orig value in @addr
- *      -user access r/w fails: return -EFAULT
+/*
+ * cmpxchg of futex (pagefaults disabled by caller)
+ * Return 0 for success, -EFAULT otherwise
  */
 static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
-					u32 newval)
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 expval,
+			      u32 newval)
 {
-	u32 val;
+	int ret = 0;
+	u32 existval;
 
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
 		return -EFAULT;
 
-	pagefault_disable();
+#ifndef CONFIG_ARC_HAS_LLSC
+	preempt_disable();	/* to guarantee atomic r-m-w of futex op */
+#endif
+	smp_mb();
 
 	__asm__ __volatile__(
 #ifdef CONFIG_ARC_HAS_LLSC
-	"1:	llock	%0, [%3]		\n"
-	"	brne	%0, %1, 3f		\n"
-	"2:	scond	%2, [%3]		\n"
+	"1:	llock	%1, [%4]		\n"
+	"	brne	%1, %2, 3f		\n"
+	"2:	scond	%3, [%4]		\n"
 	"	bnz	1b			\n"
 #else
-	"1:	ld	%0, [%3]		\n"
-	"	brne	%0, %1, 3f		\n"
-	"2:	st	%2, [%3]		\n"
+	"1:	ld	%1, [%4]		\n"
+	"	brne	%1, %2, 3f		\n"
+	"2:	st	%3, [%4]		\n"
 #endif
 	"3:	\n"
 	"	.section .fixup,\"ax\"	\n"
-	"4:	mov %0, %4	\n"
-	"	b   3b	\n"
+	"4:	mov %0, %5	\n"
+	"	j   3b	\n"
 	"	.previous	\n"
 	"	.section __ex_table,\"a\"	\n"
 	"	.align  4	\n"
 	"	.word   1b, 4b	\n"
 	"	.word   2b, 4b	\n"
 	"	.previous\n"
-	: "=&r"(val)
-	: "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
+	: "+&r"(ret), "=&r"(existval)
+	: "r"(expval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
 	: "cc", "memory");
 
-	pagefault_enable();
+	smp_mb();
 
-	*uval = val;
-	return val;
+#ifndef CONFIG_ARC_HAS_LLSC
+	preempt_enable();
+#endif
+	*uval = existval;
+	return ret;
 }
 
 #endif
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 2b8880e..5f07176 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -1,6 +1,7 @@
 /*
  * Linux performance counter support for ARC
  *
+ * Copyright (C) 2014-2015 Synopsys, Inc. (www.synopsys.com)
  * Copyright (C) 2011-2013 Synopsys, Inc. (www.synopsys.com)
  *
  * This program is free software; you can redistribute it and/or modify
@@ -12,8 +13,8 @@
 #ifndef __ASM_PERF_EVENT_H
 #define __ASM_PERF_EVENT_H
 
-/* real maximum varies per CPU, this is the maximum supported by the driver */
-#define ARC_PMU_MAX_HWEVENTS	64
+/* Max number of counters that PCT block may ever have */
+#define ARC_PERF_MAX_COUNTERS	32
 
 #define ARC_REG_CC_BUILD	0xF6
 #define ARC_REG_CC_INDEX	0x240
@@ -28,15 +29,22 @@
 #define ARC_REG_PCT_CONFIG	0x254
 #define ARC_REG_PCT_CONTROL	0x255
 #define ARC_REG_PCT_INDEX	0x256
+#define ARC_REG_PCT_INT_CNTL	0x25C
+#define ARC_REG_PCT_INT_CNTH	0x25D
+#define ARC_REG_PCT_INT_CTRL	0x25E
+#define ARC_REG_PCT_INT_ACT	0x25F
+
+#define ARC_REG_PCT_CONFIG_USER	(1 << 18)	/* count in user mode */
+#define ARC_REG_PCT_CONFIG_KERN	(1 << 19)	/* count in kernel mode */
 
 #define ARC_REG_PCT_CONTROL_CC	(1 << 16)	/* clear counts */
 #define ARC_REG_PCT_CONTROL_SN	(1 << 17)	/* snapshot */
 
 struct arc_reg_pct_build {
 #ifdef CONFIG_CPU_BIG_ENDIAN
-	unsigned int m:8, c:8, r:6, s:2, v:8;
+	unsigned int m:8, c:8, r:5, i:1, s:2, v:8;
 #else
-	unsigned int v:8, s:2, r:6, c:8, m:8;
+	unsigned int v:8, s:2, i:1, r:5, c:8, m:8;
 #endif
 };
 
@@ -95,10 +103,13 @@
 
 	/* counts condition */
 	[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp",
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
 	[PERF_COUNT_ARC_BPOK]         = "bpok",	  /* NP-NT, PT-T, PNT-NT */
+#ifdef CONFIG_ISA_ARCV2
+	[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
+#else
 	[PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
-
+#endif
 	[PERF_COUNT_ARC_LDC] = "imemrdc",	/* Instr: mem read cached */
 	[PERF_COUNT_ARC_STC] = "imemwrc",	/* Instr: mem write cached */
 
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index bd7105d..8fa7656 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -57,13 +57,8 @@
 
 	.section .text, "ax",@progbits
 
-res_service:		; processor restart
-	flag    0x1     ; not implemented
-	nop
-	nop
-
-reserved:		; processor restart
-	rtie            ; jump to processor initializations
+reserved:
+	flag 1		; Unexpected event, halt
 
 ;##################### Interrupt Handling ##############################
 
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index f7a82fd..589abf5 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -42,7 +42,7 @@
 	; when the forked child comes here from the __switch_to function
 	; r0 has the last task pointer.
 	; put last task in scheduler queue
-	bl   @schedule_tail
+	jl   @schedule_tail
 
 	ld   r9, [sp, PT_status32]
 	brne r9, 0, 1f
@@ -320,7 +320,7 @@
 	; --- (Slow Path #1) task preemption ---
 	bbit0  r9, TIF_NEED_RESCHED, .Lchk_pend_signals
 	mov    blink, resume_user_mode_begin  ; tail-call to U mode ret chks
-	b      @schedule 	; BTST+Bnz causes relo error in link
+	j      @schedule 	; BTST+Bnz causes relo error in link
 
 .Lchk_pend_signals:
 	IRQ_ENABLE	r10
@@ -381,7 +381,7 @@
 	bbit0  r9, TIF_NEED_RESCHED, .Lrestore_regs
 
 	; Invoke PREEMPTION
-	bl      preempt_schedule_irq
+	jl      preempt_schedule_irq
 
 	; preempt_schedule_irq() always returns with IRQ disabled
 #endif
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 2fb8658..d9e44b6 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -252,9 +252,10 @@
 
 static int idu_first_irq;
 
-static void idu_cascade_isr(unsigned int core_irq, struct irq_desc *desc)
+static void idu_cascade_isr(unsigned int __core_irq, struct irq_desc *desc)
 {
 	struct irq_domain *domain = irq_desc_get_handler_data(desc);
+	unsigned int core_irq = irq_desc_get_irq(desc);
 	unsigned int idu_irq;
 
 	idu_irq = core_irq - idu_first_irq;
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 1287388..0c08bb1 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -1,7 +1,7 @@
 /*
  * Linux performance counter support for ARC700 series
  *
- * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com)
  *
  * This code is inspired by the perf support of various other architectures.
  *
@@ -11,6 +11,7 @@
  *
  */
 #include <linux/errno.h>
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/perf_event.h>
@@ -20,12 +21,25 @@
 
 struct arc_pmu {
 	struct pmu	pmu;
-	int		counter_size;	/* in bits */
+	unsigned int	irq;
 	int		n_counters;
-	unsigned long	used_mask[BITS_TO_LONGS(ARC_PMU_MAX_HWEVENTS)];
+	u64		max_period;
 	int		ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
 };
 
+struct arc_pmu_cpu {
+	/*
+	 * A 1 bit for an index indicates that the counter is being used for
+	 * an event. A 0 means that the counter can be used.
+	 */
+	unsigned long	used_mask[BITS_TO_LONGS(ARC_PERF_MAX_COUNTERS)];
+
+	/*
+	 * The events that are active on the PMU for the given index.
+	 */
+	struct perf_event *act_counter[ARC_PERF_MAX_COUNTERS];
+};
+
 struct arc_callchain_trace {
 	int depth;
 	void *perf_stuff;
@@ -65,6 +79,7 @@
 }
 
 static struct arc_pmu *arc_pmu;
+static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
 
 /* read counter #idx; note that counter# != event# on ARC! */
 static uint64_t arc_pmu_read_counter(int idx)
@@ -88,18 +103,15 @@
 static void arc_perf_event_update(struct perf_event *event,
 				  struct hw_perf_event *hwc, int idx)
 {
-	uint64_t prev_raw_count, new_raw_count;
-	int64_t delta;
+	uint64_t prev_raw_count = local64_read(&hwc->prev_count);
+	uint64_t new_raw_count = arc_pmu_read_counter(idx);
+	int64_t delta = new_raw_count - prev_raw_count;
 
-	do {
-		prev_raw_count = local64_read(&hwc->prev_count);
-		new_raw_count = arc_pmu_read_counter(idx);
-	} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
-				 new_raw_count) != prev_raw_count);
-
-	delta = (new_raw_count - prev_raw_count) &
-		((1ULL << arc_pmu->counter_size) - 1ULL);
-
+	/*
+	 * We don't afaraid of hwc->prev_count changing beneath our feet
+	 * because there's no way for us to re-enter this function anytime.
+	 */
+	local64_set(&hwc->prev_count, new_raw_count);
 	local64_add(delta, &event->count);
 	local64_sub(delta, &hwc->period_left);
 }
@@ -142,22 +154,41 @@
 	struct hw_perf_event *hwc = &event->hw;
 	int ret;
 
+	if (!is_sampling_event(event)) {
+		hwc->sample_period  = arc_pmu->max_period;
+		hwc->last_period = hwc->sample_period;
+		local64_set(&hwc->period_left, hwc->sample_period);
+	}
+
+	hwc->config = 0;
+
+	if (is_isa_arcv2()) {
+		/* "exclude user" means "count only kernel" */
+		if (event->attr.exclude_user)
+			hwc->config |= ARC_REG_PCT_CONFIG_KERN;
+
+		/* "exclude kernel" means "count only user" */
+		if (event->attr.exclude_kernel)
+			hwc->config |= ARC_REG_PCT_CONFIG_USER;
+	}
+
 	switch (event->attr.type) {
 	case PERF_TYPE_HARDWARE:
 		if (event->attr.config >= PERF_COUNT_HW_MAX)
 			return -ENOENT;
 		if (arc_pmu->ev_hw_idx[event->attr.config] < 0)
 			return -ENOENT;
-		hwc->config = arc_pmu->ev_hw_idx[event->attr.config];
+		hwc->config |= arc_pmu->ev_hw_idx[event->attr.config];
 		pr_debug("init event %d with h/w %d \'%s\'\n",
 			 (int) event->attr.config, (int) hwc->config,
 			 arc_pmu_ev_hw_map[event->attr.config]);
 		return 0;
+
 	case PERF_TYPE_HW_CACHE:
 		ret = arc_pmu_cache_event(event->attr.config);
 		if (ret < 0)
 			return ret;
-		hwc->config = arc_pmu->ev_hw_idx[ret];
+		hwc->config |= arc_pmu->ev_hw_idx[ret];
 		return 0;
 	default:
 		return -ENOENT;
@@ -180,6 +211,47 @@
 	write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
 }
 
+static int arc_pmu_event_set_period(struct perf_event *event)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	s64 left = local64_read(&hwc->period_left);
+	s64 period = hwc->sample_period;
+	int idx = hwc->idx;
+	int overflow = 0;
+	u64 value;
+
+	if (unlikely(left <= -period)) {
+		/* left underflowed by more than period. */
+		left = period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		overflow = 1;
+	} else	if (unlikely(left <= 0)) {
+		/* left underflowed by less than period. */
+		left += period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		overflow = 1;
+	}
+
+	if (left > arc_pmu->max_period)
+		left = arc_pmu->max_period;
+
+	value = arc_pmu->max_period - left;
+	local64_set(&hwc->prev_count, value);
+
+	/* Select counter */
+	write_aux_reg(ARC_REG_PCT_INDEX, idx);
+
+	/* Write value */
+	write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value);
+	write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32));
+
+	perf_event_update_userpage(event);
+
+	return overflow;
+}
+
 /*
  * Assigns hardware counter to hardware condition.
  * Note that there is no separate start/stop mechanism;
@@ -194,13 +266,20 @@
 		return;
 
 	if (flags & PERF_EF_RELOAD)
-		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
 
-	event->hw.state = 0;
+	hwc->state = 0;
+
+	arc_pmu_event_set_period(event);
+
+	/* Enable interrupt for this counter */
+	if (is_sampling_event(event))
+		write_aux_reg(ARC_REG_PCT_INT_CTRL,
+			      read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
 
 	/* enable ARC pmu here */
-	write_aux_reg(ARC_REG_PCT_INDEX, idx);
-	write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config);
+	write_aux_reg(ARC_REG_PCT_INDEX, idx);		/* counter # */
+	write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config);	/* condition */
 }
 
 static void arc_pmu_stop(struct perf_event *event, int flags)
@@ -208,6 +287,17 @@
 	struct hw_perf_event *hwc = &event->hw;
 	int idx = hwc->idx;
 
+	/* Disable interrupt for this counter */
+	if (is_sampling_event(event)) {
+		/*
+		 * Reset interrupt flag by writing of 1. This is required
+		 * to make sure pending interrupt was not left.
+		 */
+		write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
+		write_aux_reg(ARC_REG_PCT_INT_CTRL,
+			      read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx));
+	}
+
 	if (!(event->hw.state & PERF_HES_STOPPED)) {
 		/* stop ARC pmu here */
 		write_aux_reg(ARC_REG_PCT_INDEX, idx);
@@ -227,8 +317,12 @@
 
 static void arc_pmu_del(struct perf_event *event, int flags)
 {
+	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
+
 	arc_pmu_stop(event, PERF_EF_UPDATE);
-	__clear_bit(event->hw.idx, arc_pmu->used_mask);
+	__clear_bit(event->hw.idx, pmu_cpu->used_mask);
+
+	pmu_cpu->act_counter[event->hw.idx] = 0;
 
 	perf_event_update_userpage(event);
 }
@@ -236,20 +330,31 @@
 /* allocate hardware counter and optionally start counting */
 static int arc_pmu_add(struct perf_event *event, int flags)
 {
+	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
 	struct hw_perf_event *hwc = &event->hw;
 	int idx = hwc->idx;
 
-	if (__test_and_set_bit(idx, arc_pmu->used_mask)) {
-		idx = find_first_zero_bit(arc_pmu->used_mask,
+	if (__test_and_set_bit(idx, pmu_cpu->used_mask)) {
+		idx = find_first_zero_bit(pmu_cpu->used_mask,
 					  arc_pmu->n_counters);
 		if (idx == arc_pmu->n_counters)
 			return -EAGAIN;
 
-		__set_bit(idx, arc_pmu->used_mask);
+		__set_bit(idx, pmu_cpu->used_mask);
 		hwc->idx = idx;
 	}
 
 	write_aux_reg(ARC_REG_PCT_INDEX, idx);
+
+	pmu_cpu->act_counter[idx] = event;
+
+	if (is_sampling_event(event)) {
+		/* Mimic full counter overflow as other arches do */
+		write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period);
+		write_aux_reg(ARC_REG_PCT_INT_CNTH,
+			      (arc_pmu->max_period >> 32));
+	}
+
 	write_aux_reg(ARC_REG_PCT_CONFIG, 0);
 	write_aux_reg(ARC_REG_PCT_COUNTL, 0);
 	write_aux_reg(ARC_REG_PCT_COUNTH, 0);
@@ -264,11 +369,82 @@
 	return 0;
 }
 
+#ifdef CONFIG_ISA_ARCV2
+static irqreturn_t arc_pmu_intr(int irq, void *dev)
+{
+	struct perf_sample_data data;
+	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
+	struct pt_regs *regs;
+	int active_ints;
+	int idx;
+
+	arc_pmu_disable(&arc_pmu->pmu);
+
+	active_ints = read_aux_reg(ARC_REG_PCT_INT_ACT);
+
+	regs = get_irq_regs();
+
+	for (idx = 0; idx < arc_pmu->n_counters; idx++) {
+		struct perf_event *event = pmu_cpu->act_counter[idx];
+		struct hw_perf_event *hwc;
+
+		if (!(active_ints & (1 << idx)))
+			continue;
+
+		/* Reset interrupt flag by writing of 1 */
+		write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
+
+		/*
+		 * On reset of "interrupt active" bit corresponding
+		 * "interrupt enable" bit gets automatically reset as well.
+		 * Now we need to re-enable interrupt for the counter.
+		 */
+		write_aux_reg(ARC_REG_PCT_INT_CTRL,
+			read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
+
+		hwc = &event->hw;
+
+		WARN_ON_ONCE(hwc->idx != idx);
+
+		arc_perf_event_update(event, &event->hw, event->hw.idx);
+		perf_sample_data_init(&data, 0, hwc->last_period);
+		if (!arc_pmu_event_set_period(event))
+			continue;
+
+		if (perf_event_overflow(event, &data, regs))
+			arc_pmu_stop(event, 0);
+	}
+
+	arc_pmu_enable(&arc_pmu->pmu);
+
+	return IRQ_HANDLED;
+}
+#else
+
+static irqreturn_t arc_pmu_intr(int irq, void *dev)
+{
+	return IRQ_NONE;
+}
+
+#endif /* CONFIG_ISA_ARCV2 */
+
+void arc_cpu_pmu_irq_init(void)
+{
+	struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
+
+	arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr,
+			       "ARC perf counters", pmu_cpu);
+
+	/* Clear all pending interrupt flags */
+	write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
+}
+
 static int arc_pmu_device_probe(struct platform_device *pdev)
 {
 	struct arc_reg_pct_build pct_bcr;
 	struct arc_reg_cc_build cc_bcr;
-	int i, j;
+	int i, j, has_interrupts;
+	int counter_size;	/* in bits */
 
 	union cc_name {
 		struct {
@@ -284,7 +460,7 @@
 		pr_err("This core does not have performance counters!\n");
 		return -ENODEV;
 	}
-	BUG_ON(pct_bcr.c > ARC_PMU_MAX_HWEVENTS);
+	BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS);
 
 	READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
 	BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */
@@ -293,11 +469,16 @@
 	if (!arc_pmu)
 		return -ENOMEM;
 
-	arc_pmu->n_counters = pct_bcr.c;
-	arc_pmu->counter_size = 32 + (pct_bcr.s << 4);
+	has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
 
-	pr_info("ARC perf\t: %d counters (%d bits), %d countable conditions\n",
-		arc_pmu->n_counters, arc_pmu->counter_size, cc_bcr.c);
+	arc_pmu->n_counters = pct_bcr.c;
+	counter_size = 32 + (pct_bcr.s << 4);
+
+	arc_pmu->max_period = (1ULL << counter_size) / 2 - 1ULL;
+
+	pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
+		arc_pmu->n_counters, counter_size, cc_bcr.c,
+		has_interrupts ? ", [overflow IRQ support]":"");
 
 	cc_name.str[8] = 0;
 	for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
@@ -332,8 +513,37 @@
 		.read		= arc_pmu_read,
 	};
 
-	/* ARC 700 PMU does not support sampling events */
-	arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+	if (has_interrupts) {
+		int irq = platform_get_irq(pdev, 0);
+		unsigned long flags;
+
+		if (irq < 0) {
+			pr_err("Cannot get IRQ number for the platform\n");
+			return -ENODEV;
+		}
+
+		arc_pmu->irq = irq;
+
+		/*
+		 * arc_cpu_pmu_irq_init() needs to be called on all cores for
+		 * their respective local PMU.
+		 * However we use opencoded on_each_cpu() to ensure it is called
+		 * on core0 first, so that arc_request_percpu_irq() sets up
+		 * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable
+		 * perf IRQ on non master cores.
+		 * see arc_request_percpu_irq()
+		 */
+		preempt_disable();
+		local_irq_save(flags);
+		arc_cpu_pmu_irq_init();
+		local_irq_restore(flags);
+		smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1);
+		preempt_enable();
+
+		/* Clean all pending interrupt flags */
+		write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
+	} else
+		arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
 
 	return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW);
 }
@@ -341,6 +551,7 @@
 #ifdef CONFIG_OF
 static const struct of_device_id arc_pmu_match[] = {
 	{ .compatible = "snps,arc700-pct" },
+	{ .compatible = "snps,archs-pct" },
 	{},
 };
 MODULE_DEVICE_TABLE(of, arc_pmu_match);
@@ -348,7 +559,7 @@
 
 static struct platform_driver arc_pmu_driver = {
 	.driver	= {
-		.name		= "arc700-pct",
+		.name		= "arc-pct",
 		.of_match_table = of_match_ptr(arc_pmu_match),
 	},
 	.probe		= arc_pmu_device_probe,
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 4409245..91d5a0f 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -65,7 +65,7 @@
  * ------------------
  * |     r25        |   <==== top of Stack (thread.ksp)
  * ~                ~
- * |    --to--      |   (CALLEE Regs of user mode)
+ * |    --to--      |   (CALLEE Regs of kernel mode)
  * |     r13        |
  * ------------------
  * |     fp         |
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 74db59b..abd961f 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -34,7 +34,7 @@
 	"	.section .fixup,\"ax\"\n"		\
 	"	.align	4\n"				\
 	"3:	mov	%0, 1\n"			\
-	"	b	2b\n"				\
+	"	j	2b\n"				\
 	"	.previous\n"				\
 	"	.section __ex_table,\"a\"\n"		\
 	"	.align	4\n"				\
@@ -82,7 +82,7 @@
 		"	.section .fixup,\"ax\"\n"	\
 		"	.align	4\n"			\
 		"4:	mov	%0, 1\n"		\
-		"	b	3b\n"			\
+		"	j	3b\n"			\
 		"	.previous\n"			\
 		"	.section __ex_table,\"a\"\n"	\
 		"	.align	4\n"			\
@@ -113,7 +113,7 @@
 		"	.section .fixup,\"ax\"\n"	\
 		"	.align	4\n"			\
 		"6:	mov	%0, 1\n"		\
-		"	b	5b\n"			\
+		"	j	5b\n"			\
 		"	.previous\n"			\
 		"	.section __ex_table,\"a\"\n"	\
 		"	.align	4\n"			\
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 1cd6695..0d1a6e9 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -22,15 +22,22 @@
 #include <asm/setup.h>
 
 static int l2_line_sz;
+int ioc_exists;
+volatile int slc_enable = 1, ioc_enable = 1;
 
 void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr,
 			       unsigned long sz, const int cacheop);
 
+void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz);
+void (*__dma_cache_inv)(unsigned long start, unsigned long sz);
+void (*__dma_cache_wback)(unsigned long start, unsigned long sz);
+
 char *arc_cache_mumbojumbo(int c, char *buf, int len)
 {
 	int n = 0;
 	struct cpuinfo_arc_cache *p;
 
+#define IS_USED_RUN(v)		((v) ? "" : "(disabled) ")
 #define PR_CACHE(p, cfg, str)						\
 	if (!(p)->ver)							\
 		n += scnprintf(buf + n, len - n, str"\t\t: N/A\n");	\
@@ -45,10 +52,18 @@
 	PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
 	PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
 
+	if (!is_isa_arcv2())
+                return buf;
+
 	p = &cpuinfo_arc700[c].slc;
 	if (p->ver)
 		n += scnprintf(buf + n, len - n,
-			"SLC\t\t: %uK, %uB Line\n", p->sz_k, p->line_len);
+			       "SLC\t\t: %uK, %uB Line%s\n",
+			       p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
+
+	if (ioc_exists)
+		n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n",
+				IS_USED_RUN(ioc_enable));
 
 	return buf;
 }
@@ -58,18 +73,9 @@
  * the cpuinfo structure for later use.
  * No Validation done here, simply read/convert the BCRs
  */
-void read_decode_cache_bcr(void)
+static void read_decode_cache_bcr_arcv2(int cpu)
 {
-	struct cpuinfo_arc_cache *p_ic, *p_dc, *p_slc;
-	unsigned int cpu = smp_processor_id();
-	struct bcr_cache {
-#ifdef CONFIG_CPU_BIG_ENDIAN
-		unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
-#else
-		unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
-#endif
-	} ibcr, dbcr;
-
+	struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
 	struct bcr_generic sbcr;
 
 	struct bcr_slc_cfg {
@@ -80,6 +86,39 @@
 #endif
 	} slc_cfg;
 
+	struct bcr_clust_cfg {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+		unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
+#else
+		unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
+#endif
+	} cbcr;
+
+	READ_BCR(ARC_REG_SLC_BCR, sbcr);
+	if (sbcr.ver) {
+		READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
+		p_slc->ver = sbcr.ver;
+		p_slc->sz_k = 128 << slc_cfg.sz;
+		l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
+	}
+
+	READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
+	if (cbcr.c && ioc_enable)
+		ioc_exists = 1;
+}
+
+void read_decode_cache_bcr(void)
+{
+	struct cpuinfo_arc_cache *p_ic, *p_dc;
+	unsigned int cpu = smp_processor_id();
+	struct bcr_cache {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+		unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
+#else
+		unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
+#endif
+	} ibcr, dbcr;
+
 	p_ic = &cpuinfo_arc700[cpu].icache;
 	READ_BCR(ARC_REG_IC_BCR, ibcr);
 
@@ -122,17 +161,8 @@
 	p_dc->ver = dbcr.ver;
 
 slc_chk:
-	if (!is_isa_arcv2())
-		return;
-
-	p_slc = &cpuinfo_arc700[cpu].slc;
-	READ_BCR(ARC_REG_SLC_BCR, sbcr);
-	if (sbcr.ver) {
-		READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
-		p_slc->ver = sbcr.ver;
-		p_slc->sz_k = 128 << slc_cfg.sz;
-		l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
-	}
+	if (is_isa_arcv2())
+                read_decode_cache_bcr_arcv2(cpu);
 }
 
 /*
@@ -516,11 +546,6 @@
 #endif
 }
 
-static inline int need_slc_flush(void)
-{
-	return is_isa_arcv2() && l2_line_sz;
-}
-
 /***********************************************************
  * Exported APIs
  */
@@ -569,30 +594,74 @@
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
-void dma_cache_wback_inv(unsigned long start, unsigned long sz)
+/*
+ * DMA ops for systems with L1 cache only
+ * Make memory coherent with L1 cache by flushing/invalidating L1 lines
+ */
+static void __dma_cache_wback_inv_l1(unsigned long start, unsigned long sz)
 {
 	__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
+}
 
-	if (need_slc_flush())
-		slc_op(start, sz, OP_FLUSH_N_INV);
+static void __dma_cache_inv_l1(unsigned long start, unsigned long sz)
+{
+	__dc_line_op_k(start, sz, OP_INV);
+}
+
+static void __dma_cache_wback_l1(unsigned long start, unsigned long sz)
+{
+	__dc_line_op_k(start, sz, OP_FLUSH);
+}
+
+/*
+ * DMA ops for systems with both L1 and L2 caches, but without IOC
+ * Both L1 and L2 lines need to be explicity flushed/invalidated
+ */
+static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz)
+{
+	__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
+	slc_op(start, sz, OP_FLUSH_N_INV);
+}
+
+static void __dma_cache_inv_slc(unsigned long start, unsigned long sz)
+{
+	__dc_line_op_k(start, sz, OP_INV);
+	slc_op(start, sz, OP_INV);
+}
+
+static void __dma_cache_wback_slc(unsigned long start, unsigned long sz)
+{
+	__dc_line_op_k(start, sz, OP_FLUSH);
+	slc_op(start, sz, OP_FLUSH);
+}
+
+/*
+ * DMA ops for systems with IOC
+ * IOC hardware snoops all DMA traffic keeping the caches consistent with
+ * memory - eliding need for any explicit cache maintenance of DMA buffers
+ */
+static void __dma_cache_wback_inv_ioc(unsigned long start, unsigned long sz) {}
+static void __dma_cache_inv_ioc(unsigned long start, unsigned long sz) {}
+static void __dma_cache_wback_ioc(unsigned long start, unsigned long sz) {}
+
+/*
+ * Exported DMA API
+ */
+void dma_cache_wback_inv(unsigned long start, unsigned long sz)
+{
+	__dma_cache_wback_inv(start, sz);
 }
 EXPORT_SYMBOL(dma_cache_wback_inv);
 
 void dma_cache_inv(unsigned long start, unsigned long sz)
 {
-	__dc_line_op_k(start, sz, OP_INV);
-
-	if (need_slc_flush())
-		slc_op(start, sz, OP_INV);
+	__dma_cache_inv(start, sz);
 }
 EXPORT_SYMBOL(dma_cache_inv);
 
 void dma_cache_wback(unsigned long start, unsigned long sz)
 {
-	__dc_line_op_k(start, sz, OP_FLUSH);
-
-	if (need_slc_flush())
-		slc_op(start, sz, OP_FLUSH);
+	__dma_cache_wback(start, sz);
 }
 EXPORT_SYMBOL(dma_cache_wback);
 
@@ -848,4 +917,41 @@
 				panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
 		}
 	}
+
+	if (is_isa_arcv2() && l2_line_sz && !slc_enable) {
+
+		/* IM set : flush before invalidate */
+		write_aux_reg(ARC_REG_SLC_CTRL,
+			read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
+
+		write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
+
+		/* Important to wait for flush to complete */
+		while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
+		write_aux_reg(ARC_REG_SLC_CTRL,
+			read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
+	}
+
+	if (is_isa_arcv2() && ioc_exists) {
+		/* IO coherency base - 0x8z */
+		write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
+		/* IO coherency aperture size - 512Mb: 0x8z-0xAz */
+		write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
+		/* Enable partial writes */
+		write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
+		/* Enable IO coherency */
+		write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
+
+		__dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
+		__dma_cache_inv = __dma_cache_inv_ioc;
+		__dma_cache_wback = __dma_cache_wback_ioc;
+	} else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
+		__dma_cache_wback_inv = __dma_cache_wback_inv_slc;
+		__dma_cache_inv = __dma_cache_inv_slc;
+		__dma_cache_wback = __dma_cache_wback_slc;
+	} else {
+		__dma_cache_wback_inv = __dma_cache_wback_inv_l1;
+		__dma_cache_inv = __dma_cache_inv_l1;
+		__dma_cache_wback = __dma_cache_wback_l1;
+	}
 }
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 57706a9..29a46bb 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -19,6 +19,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/dma-debug.h>
 #include <linux/export.h>
+#include <asm/cache.h>
 #include <asm/cacheflush.h>
 
 /*
@@ -53,6 +54,20 @@
 {
 	void *paddr, *kvaddr;
 
+	/*
+	 * IOC relies on all data (even coherent DMA data) being in cache
+	 * Thus allocate normal cached memory
+	 *
+	 * The gains with IOC are two pronged:
+	 *   -For streaming data, elides needs for cache maintenance, saving
+	 *    cycles in flush code, and bus bandwidth as all the lines of a
+	 *    buffer need to be flushed out to memory
+	 *   -For coherent data, Read/Write to buffers terminate early in cache
+	 *   (vs. always going to memory - thus are faster)
+	 */
+	if (is_isa_arcv2() && ioc_exists)
+		return dma_alloc_noncoherent(dev, size, dma_handle, gfp);
+
 	/* This is linear addr (0x8000_0000 based) */
 	paddr = alloc_pages_exact(size, gfp);
 	if (!paddr)
@@ -85,6 +100,9 @@
 void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
 		       dma_addr_t dma_handle)
 {
+	if (is_isa_arcv2() && ioc_exists)
+		return dma_free_noncoherent(dev, size, kvaddr, dma_handle);
+
 	iounmap((void __force __iomem *)kvaddr);
 
 	free_pages_exact((void *)dma_handle, size);
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index e7769c3..ad9825d 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -46,7 +46,7 @@
 	 * -------------------   -------------------
 	 * | snps,dw-apb-gpio |  | snps,dw-apb-gpio |
 	 * -------------------   -------------------
-	 *        |                         |
+	 *        | #12                     |
 	 *        |                 [ Debug UART on cpu card ]
 	 *        |
 	 * ------------------------
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1c50210..0d1b717 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -188,6 +188,9 @@
 config ARCH_HAS_BANDGAP
 	bool
 
+config FIX_EARLYCON_MEM
+	def_bool y if MMU
+
 config GENERIC_HWEIGHT
 	bool
 	default y
@@ -268,7 +271,6 @@
 	depends on !ARM_PATCH_PHYS_VIRT
 	default DRAM_BASE if !MMU
 	default 0x00000000 if ARCH_EBSA110 || \
-			EP93XX_SDCE3_SYNC_PHYS_OFFSET || \
 			ARCH_FOOTBRIDGE || \
 			ARCH_INTEGRATOR || \
 			ARCH_IOP13XX || \
@@ -277,10 +279,7 @@
 	default 0x10000000 if ARCH_OMAP1 || ARCH_RPC
 	default 0x20000000 if ARCH_S5PV210
 	default 0x70000000 if REALVIEW_HIGH_PHYS_OFFSET
-	default 0xc0000000 if EP93XX_SDCE0_PHYS_OFFSET || ARCH_SA1100
-	default 0xd0000000 if EP93XX_SDCE1_PHYS_OFFSET
-	default 0xe0000000 if EP93XX_SDCE2_PHYS_OFFSET
-	default 0xf0000000 if EP93XX_SDCE3_ASYNC_PHYS_OFFSET
+	default 0xc0000000 if ARCH_SA1100
 	help
 	  Please provide the physical address corresponding to the
 	  location of main memory in your system.
@@ -418,11 +417,14 @@
 	bool "EP93xx-based"
 	select ARCH_HAS_HOLES_MEMORYMODEL
 	select ARCH_REQUIRE_GPIOLIB
-	select ARCH_USES_GETTIMEOFFSET
 	select ARM_AMBA
+	select ARM_PATCH_PHYS_VIRT
 	select ARM_VIC
+	select AUTO_ZRELADDR
 	select CLKDEV_LOOKUP
+	select CLKSRC_MMIO
 	select CPU_ARM920T
+	select GENERIC_CLOCKEVENTS
 	help
 	  This enables support for the Cirrus EP93xx series of CPUs.
 
@@ -536,6 +538,7 @@
 	select MVEBU_MBUS
 	select PCI
 	select PLAT_ORION_LEGACY
+	select MULTI_IRQ_HANDLER
 	help
 	  Support for the following Marvell Orion 5x series SoCs:
 	  Orion-1 (5181), Orion-VoIP (5181L), Orion-NAS (5182),
@@ -1496,6 +1499,7 @@
 config ARM_PSCI
 	bool "Support for the ARM Power State Coordination Interface (PSCI)"
 	depends on CPU_V7
+	select ARM_PSCI_FW
 	help
 	  Say Y here if you want Linux to communicate with system firmware
 	  implementing the PSCI specification for CPU-centric power
@@ -1700,13 +1704,24 @@
 	  consumed by page tables.  Setting this option will allow
 	  user-space 2nd level page tables to reside in high memory.
 
-config HW_PERF_EVENTS
-	bool "Enable hardware performance counter support for perf events"
-	depends on PERF_EVENTS
+config CPU_SW_DOMAIN_PAN
+	bool "Enable use of CPU domains to implement privileged no-access"
+	depends on MMU && !ARM_LPAE
 	default y
 	help
-	  Enable hardware performance counter support for perf events. If
-	  disabled, perf events will use software events only.
+	  Increase kernel security by ensuring that normal kernel accesses
+	  are unable to access userspace addresses.  This can help prevent
+	  use-after-free bugs becoming an exploitable privilege escalation
+	  by ensuring that magic values (such as LIST_POISON) will always
+	  fault when dereferenced.
+
+	  CPUs with low-vector mappings use a best-efforts implementation.
+	  Their lower 1MB needs to remain accessible for the vectors, but
+	  the remainder of userspace will become appropriately inaccessible.
+
+config HW_PERF_EVENTS
+	def_bool y
+	depends on ARM_PMU
 
 config SYS_SUPPORTS_HUGETLBFS
        def_bool y
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index a2e16f9..0cfd7f9 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -141,6 +141,12 @@
 		depends on ARCH_AT91
 		depends on SOC_SAMA5
 
+	config AT91_DEBUG_LL_DBGU3
+		bool "Kernel low-level debugging on sama5d2"
+		select DEBUG_AT91_UART
+		depends on ARCH_AT91
+		depends on SOC_SAMA5
+
 	config DEBUG_BCM2835
 		bool "Kernel low-level debugging on BCM2835 PL011 UART"
 		depends on ARCH_BCM2835
@@ -411,6 +417,13 @@
 		  Say Y here if you want kernel low-level debugging support
 		  on i.MX6SX.
 
+	config DEBUG_IMX6UL_UART
+		bool "i.MX6UL Debug UART"
+		depends on SOC_IMX6UL
+		help
+		  Say Y here if you want kernel low-level debugging support
+		  on i.MX6UL.
+
 	config DEBUG_IMX7D_UART
 		bool "i.MX7D Debug UART"
 		depends on SOC_IMX7D
@@ -1269,6 +1282,7 @@
 						DEBUG_IMX6Q_UART || \
 						DEBUG_IMX6SL_UART || \
 						DEBUG_IMX6SX_UART || \
+						DEBUG_IMX6UL_UART || \
 						DEBUG_IMX7D_UART
 	default 1
 	depends on ARCH_MXC
@@ -1320,6 +1334,7 @@
 				 DEBUG_IMX6Q_UART || \
 				 DEBUG_IMX6SL_UART || \
 				 DEBUG_IMX6SX_UART || \
+				 DEBUG_IMX6UL_UART || \
 				 DEBUG_IMX7D_UART
 	default "debug/ks8695.S" if DEBUG_KS8695_UART
 	default "debug/msm.S" if DEBUG_QCOM_UARTDM
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 7a13aeb..3f9a9eb 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -51,10 +51,6 @@
 endif
 endif
 
-ifeq ($(CONFIG_ARCH_SHMOBILE_LEGACY),y)
-OBJS		+= head-shmobile.o
-endif
-
 #
 # We now have a PIC decompressor implementation.  Decompressors running
 # from RAM should not define ZTEXTADDR.  Decompressors running directly
diff --git a/arch/arm/boot/compressed/head-shmobile.S b/arch/arm/boot/compressed/head-shmobile.S
deleted file mode 100644
index 22a7525..0000000
--- a/arch/arm/boot/compressed/head-shmobile.S
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * The head-file for SH-Mobile ARM platforms
- *
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- * Simon Horman <horms@verge.net.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifdef CONFIG_ZBOOT_ROM
-
-	.section	".start", "ax"
-
-	/* load board-specific initialization code */
-#include <mach/zboot.h>
-
-	adr	r0, dtb_info
-	ldmia	r0, {r1, r3, r4, r5, r7}
-
-	sub	r0, r0, r1		@ calculate the delta offset
-	add	r5, r5, r0		@ _edata
-
-	ldr	lr, [r5, #0]		@ check if valid DTB is present
-	cmp	lr, r3
-	bne	0f
-
-	add	r9, r7, #31		@ rounded up to a multiple
-	bic	r9, r9, #31		@ ... of 32 bytes
-
-	add	r6, r9, r5		@ copy from _edata
-	add	r9, r9, r4		@ to MEMORY_START
-
-1:	ldmdb	r6!, {r0 - r3, r10 - r12, lr}
-	cmp	r6, r5
-	stmdb	r9!, {r0 - r3, r10 - r12, lr}
-	bhi	1b
-
-	/* Success: Zero board ID, pointer to start of memory for atag/dtb */
-	mov	r7, #0
-	mov	r8, r4
-	b	2f
-
-	.align	2
-dtb_info:
-	.word	dtb_info
-#ifndef __ARMEB__
-	.word	0xedfe0dd0		@ sig is 0xd00dfeed big endian
-#else
-	.word	0xd00dfeed
-#endif
-	.word	MEMORY_START
-	.word	_edata
-	.word	0x4000			@ maximum DTB size
-0:
-	/* Failure: Zero board ID, NULL atag/dtb */
-	mov 	r7, #0
-	mov	r8, #0			@ pass null pointer as atag
-2 :
-
-#endif /* CONFIG_ZBOOT_ROM */
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 246473a..233159d 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -41,6 +41,7 @@
 	at91sam9x35ek.dtb
 dtb-$(CONFIG_SOC_SAM_V7) += \
 	at91-kizbox2.dtb \
+	at91-sama5d2_xplained.dtb \
 	at91-sama5d3_xplained.dtb \
 	sama5d31ek.dtb \
 	sama5d33ek.dtb \
@@ -176,6 +177,8 @@
 	kirkwood-km_kirkwood.dtb \
 	kirkwood-laplug.dtb \
 	kirkwood-lschlv2.dtb \
+	kirkwood-lswvl.dtb \
+	kirkwood-lswxl.dtb \
 	kirkwood-lsxhl.dtb \
 	kirkwood-mplcec4.dtb \
 	kirkwood-mv88f6281gtw-ge.dtb \
@@ -211,6 +214,7 @@
 	kirkwood-ts419-6281.dtb \
 	kirkwood-ts419-6282.dtb
 dtb-$(CONFIG_ARCH_LPC18XX) += \
+	lpc4337-ciaa.dtb \
 	lpc4350-hitex-eval.dtb \
 	lpc4357-ea4357-devkit.dtb
 dtb-$(CONFIG_ARCH_LPC32XX) += \
@@ -331,6 +335,8 @@
 	imx6sx-sabreauto.dtb \
 	imx6sx-sdb-reva.dtb \
 	imx6sx-sdb.dtb
+dtb-$(CONFIG_SOC_IMX6UL) += \
+	imx6ul-14x14-evk.dtb
 dtb-$(CONFIG_SOC_IMX7D) += \
 	imx7d-sdb.dtb
 dtb-$(CONFIG_SOC_LS1021A) += \
@@ -390,6 +396,8 @@
 	omap3-cm-t3530.dtb \
 	omap3-cm-t3730.dtb \
 	omap3-devkit8000.dtb \
+	omap3-devkit8000-lcd43.dtb \
+	omap3-devkit8000-lcd70.dtb \
 	omap3-evm.dtb \
 	omap3-evm-37xx.dtb \
 	omap3-gta04a3.dtb \
@@ -409,15 +417,19 @@
 	omap3-overo-alto35.dtb \
 	omap3-overo-chestnut43.dtb \
 	omap3-overo-gallop43.dtb \
+	omap3-overo-palo35.dtb \
 	omap3-overo-palo43.dtb \
 	omap3-overo-storm-alto35.dtb \
 	omap3-overo-storm-chestnut43.dtb \
 	omap3-overo-storm-gallop43.dtb \
+	omap3-overo-storm-palo35.dtb \
 	omap3-overo-storm-palo43.dtb \
 	omap3-overo-storm-summit.dtb \
 	omap3-overo-storm-tobi.dtb \
+	omap3-overo-storm-tobiduo.dtb \
 	omap3-overo-summit.dtb \
 	omap3-overo-tobi.dtb \
+	omap3-overo-tobiduo.dtb \
 	omap3-pandora-600mhz.dtb \
 	omap3-pandora-1ghz.dtb \
 	omap3-sbc-t3517.dtb \
@@ -426,6 +438,8 @@
 	omap3-thunder.dtb \
 	omap3-zoom3.dtb
 dtb-$(CONFIG_SOC_TI81XX) += \
+	dm8148-evm.dtb \
+	dm8148-t410.dtb \
 	dm8168-evm.dtb
 dtb-$(CONFIG_SOC_AM33XX) += \
 	am335x-baltos-ir5221.dtb \
@@ -438,7 +452,8 @@
 	am335x-nano.dtb \
 	am335x-pepper.dtb \
 	am335x-lxm.dtb \
-	am335x-chiliboard.dtb
+	am335x-chiliboard.dtb \
+	am335x-wega-rdk.dtb
 dtb-$(CONFIG_ARCH_OMAP4) += \
 	omap4-duovero-parlor.dtb \
 	omap4-panda.dtb \
@@ -464,6 +479,8 @@
 dtb-$(CONFIG_ARCH_ORION5X) += \
 	orion5x-lacie-d2-network.dtb \
 	orion5x-lacie-ethernet-disk-mini-v2.dtb \
+	orion5x-linkstation-lswtgl.dtb \
+	orion5x-lswsgl.dtb \
 	orion5x-maxtor-shared-storage-2.dtb \
 	orion5x-rd88f5182-nas.dtb
 dtb-$(CONFIG_ARCH_PRIMA2) += \
@@ -488,7 +505,12 @@
 	rk3288-evb-act8846.dtb \
 	rk3288-evb-rk808.dtb \
 	rk3288-firefly-beta.dtb \
-	rk3288-firefly.dtb
+	rk3288-firefly.dtb \
+	rk3288-r89.dtb \
+	rk3288-veyron-jerry.dtb \
+	rk3288-veyron-minnie.dtb \
+	rk3288-veyron-pinky.dtb \
+	rk3288-veyron-speedy.dtb
 dtb-$(CONFIG_ARCH_S3C24XX) += \
 	s3c2416-smdk2416.dtb
 dtb-$(CONFIG_ARCH_S3C64XX) += \
@@ -501,11 +523,8 @@
 	s5pv210-smdkv210.dtb \
 	s5pv210-torbreck.dtb
 dtb-$(CONFIG_ARCH_SHMOBILE_LEGACY) += \
-	r8a7740-armadillo800eva.dtb \
 	r8a7778-bockw.dtb \
-	r8a7778-bockw-reference.dtb \
-	r8a7779-marzen.dtb \
-	sh73a0-kzm9g.dtb
+	r8a7778-bockw-reference.dtb
 dtb-$(CONFIG_ARCH_SHMOBILE_MULTI) += \
 	emev2-kzm9d.dtb \
 	r7s72100-genmai.dtb \
@@ -516,12 +535,15 @@
 	r8a7790-lager.dtb \
 	r8a7791-henninger.dtb \
 	r8a7791-koelsch.dtb \
+	r8a7793-gose.dtb \
 	r8a7794-alt.dtb \
+	r8a7794-silk.dtb \
 	sh73a0-kzm9g.dtb
 dtb-$(CONFIG_ARCH_SOCFPGA) += \
 	socfpga_arria5_socdk.dtb \
 	socfpga_arria10_socdk_sdmmc.dtb \
 	socfpga_cyclone5_socdk.dtb \
+	socfpga_cyclone5_de0_sockit.dtb \
 	socfpga_cyclone5_sockit.dtb \
 	socfpga_cyclone5_socrates.dtb \
 	socfpga_vt.dtb
@@ -544,7 +566,9 @@
 	stih416-b2020.dtb \
 	stih416-b2020e.dtb \
 	stih418-b2199.dtb
-dtb-$(CONFIG_ARCH_STM32)+= stm32f429-disco.dtb
+dtb-$(CONFIG_ARCH_STM32)+= \
+	stm32f429-disco.dtb \
+	stm32429i-eval.dtb
 dtb-$(CONFIG_MACH_SUN4I) += \
 	sun4i-a10-a1000.dtb \
 	sun4i-a10-ba10-tvbox.dtb \
@@ -554,6 +578,7 @@
 	sun4i-a10-hackberry.dtb \
 	sun4i-a10-hyundai-a7hd.dtb \
 	sun4i-a10-inet97fv2.dtb \
+	sun4i-a10-itead-iteaduino-plus.dts \
 	sun4i-a10-jesurun-q5.dtb \
 	sun4i-a10-marsboard.dtb \
 	sun4i-a10-mini-xplus.dtb \
@@ -601,6 +626,7 @@
 	sun8i-a23-ippo-q8h-v1.2.dtb \
 	sun8i-a33-et-q8-v1.6.dtb \
 	sun8i-a33-ga10h-v1.1.dtb \
+	sun8i-a33-ippo-q8h-v1.2.dtb \
 	sun8i-a33-sinlinx-sina33.dtb
 dtb-$(CONFIG_MACH_SUN9I) += \
 	sun9i-a80-optimus.dtb \
@@ -642,10 +668,11 @@
 	ste-ccu8540.dtb \
 	ste-ccu9540.dtb
 dtb-$(CONFIG_ARCH_UNIPHIER) += \
-	uniphier-ph1-sld3-ref.dtb \
 	uniphier-ph1-ld4-ref.dtb \
+	uniphier-ph1-ld6b-ref.dtb \
 	uniphier-ph1-pro4-ref.dtb \
-	uniphier-ph1-sld8-ref.dtb
+	uniphier-ph1-sld3-ref.dtb \
+	uniphier-ph1-sld8-ref.dtb 
 dtb-$(CONFIG_ARCH_VERSATILE) += \
 	versatile-ab.dtb \
 	versatile-pb.dtb
@@ -705,6 +732,7 @@
 	dove-dove-db.dtb \
 	dove-sbc-a510.dtb
 dtb-$(CONFIG_ARCH_MEDIATEK) += \
+	mt6580-evbp1.dtb \
 	mt6589-aquaris5.dtb \
 	mt6592-evb.dtb \
 	mt8127-moose.dtb \
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index 5c42d25..eadbba3 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -68,16 +68,26 @@
 
 &lcdc {
 	status = "okay";
+	port {
+		lcdc_0: endpoint@0 {
+			remote-endpoint = <&hdmi_0>;
+		};
+	};
 };
 
-/ {
-	hdmi {
-		compatible = "ti,tilcdc,slave";
-		i2c = <&i2c0>;
+&i2c0 {
+	tda19988 {
+		compatible = "nxp,tda998x";
+		reg = <0x70>;
 		pinctrl-names = "default", "off";
 		pinctrl-0 = <&nxp_hdmi_bonelt_pins>;
 		pinctrl-1 = <&nxp_hdmi_bonelt_off_pins>;
-		status = "okay";
+
+		port {
+			hdmi_0: endpoint@0 {
+				remote-endpoint = <&lcdc_0>;
+			};
+		};
 	};
 };
 
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index 765be27..1942a5c 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -136,16 +136,29 @@
 	};
 
 	sound {
-		compatible = "ti,da830-evm-audio";
-		ti,model = "AM335x-EVM";
-		ti,audio-codec = <&tlv320aic3106>;
-		ti,mcasp-controller = <&mcasp1>;
-		ti,codec-clock-rate = <12000000>;
-		ti,audio-routing =
-			"Headphone Jack",       "HPLOUT",
-			"Headphone Jack",       "HPROUT",
-			"LINE1L",               "Line In",
-			"LINE1R",               "Line In";
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "AM335x-EVM";
+		simple-audio-card,widgets =
+			"Headphone", "Headphone Jack",
+			"Line", "Line In";
+		simple-audio-card,routing =
+			"Headphone Jack",	"HPLOUT",
+			"Headphone Jack",	"HPROUT",
+			"LINE1L",		"Line In",
+			"LINE1R",		"Line In";
+		simple-audio-card,format = "dsp_b";
+		simple-audio-card,bitclock-master = <&sound_master>;
+		simple-audio-card,frame-master = <&sound_master>;
+		simple-audio-card,bitclock-inversion;
+
+		simple-audio-card,cpu {
+			sound-dai = <&mcasp1>;
+		};
+
+		sound_master: simple-audio-card,codec {
+			sound-dai = <&tlv320aic3106>;
+			system-clock-frequency = <12000000>;
+		};
 	};
 };
 
@@ -342,7 +355,7 @@
 		>;
 	};
 
-	am335x_evm_audio_pins: am335x_evm_audio_pins {
+	mcasp1_pins: mcasp1_pins {
 		pinctrl-single,pins = <
 			0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_crs.mcasp1_aclkx */
 			0x110 (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_rxerr.mcasp1_fsx */
@@ -351,6 +364,15 @@
 		>;
 	};
 
+	mcasp1_pins_sleep: mcasp1_pins_sleep {
+		pinctrl-single,pins = <
+			0x10c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+			0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+			0x108 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+			0x144 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+		>;
+	};
+
 	dcan1_pins_default: dcan1_pins_default {
 		pinctrl-single,pins = <
 			0x168 (PIN_OUTPUT | MUX_MODE2) /* uart0_ctsn.d_can1_tx */
@@ -460,6 +482,7 @@
 	};
 
 	tlv320aic3106: tlv320aic3106@1b {
+		#sound-dai-cells = <0>;
 		compatible = "ti,tlv320aic3106";
 		reg = <0x1b>;
 		status = "okay";
@@ -575,19 +598,21 @@
 #include "tps65910.dtsi"
 
 &mcasp1 {
-		pinctrl-names = "default";
-		pinctrl-0 = <&am335x_evm_audio_pins>;
+	#sound-dai-cells = <0>;
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&mcasp1_pins>;
+	pinctrl-1 = <&mcasp1_pins_sleep>;
 
-		status = "okay";
+	status = "okay";
 
-		op-mode = <0>;          /* MCASP_IIS_MODE */
-		tdm-slots = <2>;
-		/* 4 serializers */
-		serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
-			0 0 1 2
-		>;
-		tx-num-evt = <32>;
-		rx-num-evt = <32>;
+	op-mode = <0>;          /* MCASP_IIS_MODE */
+	tdm-slots = <2>;
+	/* 4 serializers */
+	serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
+		0 0 1 2
+	>;
+	tx-num-evt = <32>;
+	rx-num-evt = <32>;
 };
 
 &tps {
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 156d05e..315bb02 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -141,14 +141,26 @@
 	};
 
 	sound {
-		compatible = "ti,da830-evm-audio";
-		ti,model = "AM335x-EVMSK";
-		ti,audio-codec = <&tlv320aic3106>;
-		ti,mcasp-controller = <&mcasp1>;
-		ti,codec-clock-rate = <24000000>;
-		ti,audio-routing =
-			"Headphone Jack",       "HPLOUT",
-			"Headphone Jack",       "HPROUT";
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "AM335x-EVMSK";
+		simple-audio-card,widgets =
+			"Headphone", "Headphone Jack";
+		simple-audio-card,routing =
+			"Headphone Jack",	"HPLOUT",
+			"Headphone Jack",	"HPROUT";
+		simple-audio-card,format = "dsp_b";
+		simple-audio-card,bitclock-master = <&sound_master>;
+		simple-audio-card,frame-master = <&sound_master>;
+		simple-audio-card,bitclock-inversion;
+
+		simple-audio-card,cpu {
+			sound-dai = <&mcasp1>;
+		};
+
+		sound_master: simple-audio-card,codec {
+			sound-dai = <&tlv320aic3106>;
+			system-clock-frequency = <24000000>;
+		};
 	};
 
 	panel {
@@ -396,6 +408,15 @@
 		>;
 	};
 
+	mcasp1_pins_sleep: mcasp1_pins_sleep {
+		pinctrl-single,pins = <
+			0x10c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+			0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+			0x108 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+			0x144 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+		>;
+	};
+
 	mmc2_pins: pinmux_mmc2_pins {
 		pinctrl-single,pins = <
 			0x74 (PIN_INPUT_PULLUP | MUX_MODE7) /* gpmc_wpn.gpio0_31 */
@@ -462,6 +483,7 @@
 	};
 
 	tlv320aic3106: tlv320aic3106@1b {
+		#sound-dai-cells = <0>;
 		compatible = "ti,tlv320aic3106";
 		reg = <0x1b>;
 		status = "okay";
@@ -661,19 +683,21 @@
 };
 
 &mcasp1 {
-		pinctrl-names = "default";
-		pinctrl-0 = <&mcasp1_pins>;
+	#sound-dai-cells = <0>;
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&mcasp1_pins>;
+	pinctrl-1 = <&mcasp1_pins_sleep>;
 
-		status = "okay";
+	status = "okay";
 
-		op-mode = <0>;          /* MCASP_IIS_MODE */
-		tdm-slots = <2>;
-		/* 4 serializers */
-		serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
-			0 0 1 2
-		>;
-		tx-num-evt = <32>;
-		rx-num-evt = <32>;
+	op-mode = <0>;          /* MCASP_IIS_MODE */
+	tdm-slots = <2>;
+	/* 4 serializers */
+	serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
+		0 0 1 2
+	>;
+	tx-num-evt = <32>;
+	rx-num-evt = <32>;
 };
 
 &tscadc {
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi
new file mode 100644
index 0000000..4d28fc3
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi
@@ -0,0 +1,368 @@
+/*
+ * Copyright (C) 2015 Phytec Messtechnik GmbH
+ * Author: Teresa Remmet <t.remmet@phytec.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "am33xx.dtsi"
+
+/ {
+	model = "Phytec AM335x phyCORE";
+	compatible = "phytec,am335x-phycore-som", "ti,am33xx";
+
+	aliases {
+		rtc0 = &i2c_rtc;
+		rtc1 = &rtc;
+	};
+
+	cpus {
+		cpu@0 {
+			cpu0-supply = <&vdd1_reg>;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x10000000>; /* 256 MB */
+	};
+
+	vbat: fixedregulator@0 {
+		compatible = "regulator-fixed";
+	};
+};
+
+/* Crypto Module */
+&aes {
+	status = "okay";
+};
+
+&sham {
+	status = "okay";
+};
+
+/* Ethernet */
+&am33xx_pinmux {
+	ethernet0_pins: pinmux_ethernet0 {
+		pinctrl-single,pins = <
+			0x10c (PIN_INPUT_PULLDOWN | MUX_MODE1)	/* mii1_crs.rmii1_crs_dv */
+			0x110 (PIN_INPUT_PULLDOWN | MUX_MODE1)	/* mii1_rxerr.rmii1_rxerr */
+			0x114 (PIN_OUTPUT | MUX_MODE1)		/* mii1_txen.rmii1_txen */
+			0x124 (PIN_OUTPUT | MUX_MODE1)		/* mii1_txd1.rmii1_txd1 */
+			0x128 (PIN_OUTPUT | MUX_MODE1)		/* mii1_txd0.rmii1_txd0 */
+			0x13c (PIN_INPUT_PULLDOWN | MUX_MODE1)	/* mii1_rxd1.rmii1_rxd1 */
+			0x140 (PIN_INPUT_PULLDOWN | MUX_MODE1)	/* mii1_rxd0.rmii1_rxd0 */
+			0x144 (PIN_INPUT_PULLDOWN | MUX_MODE0)	/* rmii1_refclk.rmii1_refclk */
+		>;
+	};
+
+	mdio_pins: pinmux_mdio {
+		pinctrl-single,pins = <
+			/* MDIO */
+			0x148 (PIN_INPUT_PULLUP | SLEWCTRL_FAST | MUX_MODE0)	/* mdio_data.mdio_data */
+			0x14c (PIN_OUTPUT_PULLUP | MUX_MODE0)			/* mdio_clk.mdio_clk */
+		>;
+	};
+};
+
+&cpsw_emac0 {
+	phy_id = <&davinci_mdio>, <0>;
+	phy-mode = "rmii";
+	dual_emac_res_vlan = <1>;
+};
+
+&davinci_mdio {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mdio_pins>;
+	status = "okay";
+};
+
+&mac {
+	slaves = <1>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&ethernet0_pins>;
+	status = "okay";
+};
+
+&phy_sel {
+	rmii-clock-ext;
+};
+
+/* I2C Busses */
+&am33xx_pinmux {
+	i2c0_pins: pinmux_i2c0 {
+		pinctrl-single,pins = <
+			0x188 (PIN_INPUT | MUX_MODE0)	/* i2c0_sda.i2c0_sda */
+			0x18c (PIN_INPUT | MUX_MODE0)	/* i2c0_scl.i2c0_scl */
+		>;
+	};
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins>;
+	clock-frequency = <400000>;
+	status = "okay";
+
+	tps: pmic@2d {
+		reg = <0x2d>;
+	};
+
+	i2c_eeprom: eeprom@52 {
+		compatible = "atmel,24c32";
+		pagesize = <32>;
+		reg = <0x52>;
+		status = "disabled";
+	};
+
+	i2c_rtc: rtc@68 {
+		compatible = "rv4162";
+		reg = <0x68>;
+		status = "disabled";
+	};
+};
+
+/* NAND memory */
+&am33xx_pinmux {
+		nandflash_pins: pinmux_nandflash {
+			pinctrl-single,pins = <
+			0x0 (PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad0.gpmc_ad0 */
+			0x4 (PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad1.gpmc_ad1 */
+			0x8 (PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad2.gpmc_ad2 */
+			0xc (PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad3.gpmc_ad3 */
+			0x10 (PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad4.gpmc_ad4 */
+			0x14 (PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad5.gpmc_ad5 */
+			0x18 (PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad6.gpmc_ad6 */
+			0x1c (PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_ad7.gpmc_ad7 */
+			0x70 (PIN_INPUT_PULLUP | MUX_MODE0)	/* gpmc_wait0.gpmc_wait0 */
+			0x7c (PIN_OUTPUT | MUX_MODE0)		/* gpmc_csn0.gpmc_csn0 */
+			0x90 (PIN_OUTPUT | MUX_MODE0)		/* gpmc_advn_ale.gpmc_advn_ale */
+			0x94 (PIN_OUTPUT | MUX_MODE0)		/* gpmc_oen_ren.gpmc_oen_ren */
+			0x98 (PIN_OUTPUT | MUX_MODE0)		/* gpmc_wen.gpmc_wen */
+			0x9c (PIN_OUTPUT | MUX_MODE0)		/* gpmc_be0n_cle.gpmc_be0n_cle */
+		>;
+	};
+};
+
+&elm {
+	status = "okay";
+};
+
+&gpmc {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&nandflash_pins>;
+	ranges = <0 0 0x08000000 0x1000000>;   /* CS0: NAND */
+	nandflash: nand@0,0 {
+		reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
+		nand-bus-width = <8>;
+		ti,nand-ecc-opt = "bch8";
+		gpmc,device-nand = "true";
+		gpmc,device-width = <1>;
+		gpmc,sync-clk-ps = <0>;
+		gpmc,cs-on-ns = <0>;
+		gpmc,cs-rd-off-ns = <30>;
+		gpmc,cs-wr-off-ns = <30>;
+		gpmc,adv-on-ns = <0>;
+		gpmc,adv-rd-off-ns = <30>;
+		gpmc,adv-wr-off-ns = <30>;
+		gpmc,we-on-ns = <0>;
+		gpmc,we-off-ns = <20>;
+		gpmc,oe-on-ns = <10>;
+		gpmc,oe-off-ns = <30>;
+		gpmc,access-ns = <30>;
+		gpmc,rd-cycle-ns = <30>;
+		gpmc,wr-cycle-ns = <30>;
+		gpmc,wait-on-read = "true";
+		gpmc,wait-on-write = "true";
+		gpmc,bus-turnaround-ns = <0>;
+		gpmc,cycle2cycle-delay-ns = <50>;
+		gpmc,cycle2cycle-diffcsen;
+		gpmc,clk-activation-ns = <0>;
+		gpmc,wait-monitoring-ns = <0>;
+		gpmc,wr-access-ns = <30>;
+		gpmc,wr-data-mux-bus-ns = <0>;
+
+		elm_id = <&elm>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "xload";
+			reg = <0x0 0x20000>;
+		};
+		partition@1 {
+			label = "xload_backup1";
+			reg = <0x20000 0x20000>;
+		};
+		partition@2 {
+			label = "xload_backup2";
+			reg = <0x40000 0x20000>;
+		};
+		partition@3 {
+			label = "xload_backup3";
+			reg = <0x60000 0x20000>;
+		};
+		partition@4 {
+			label = "barebox";
+			reg = <0x80000 0x80000>;
+		};
+		partition@5 {
+			label = "bareboxenv";
+			reg = <0x100000 0x40000>;
+		};
+		partition@6 {
+			label = "oftree";
+			reg = <0x140000 0x40000>;
+		};
+		partition@7 {
+			label = "kernel";
+			reg = <0x180000 0x800000>;
+		};
+		partition@8 {
+			label = "root";
+			reg = <0x980000 0x0>;
+		};
+	};
+};
+
+/* Power */
+#include "tps65910.dtsi"
+
+&tps {
+	vcc1-supply = <&vbat>;
+	vcc2-supply = <&vbat>;
+	vcc3-supply = <&vbat>;
+	vcc4-supply = <&vbat>;
+	vcc5-supply = <&vbat>;
+	vcc6-supply = <&vbat>;
+	vcc7-supply = <&vbat>;
+	vccio-supply = <&vbat>;
+
+	regulators {
+		vrtc_reg: regulator@0 {
+			regulator-always-on;
+		};
+
+		vio_reg: regulator@1 {
+			regulator-always-on;
+		};
+
+		vdd1_reg: regulator@2 {
+			/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
+			regulator-name = "vdd_mpu";
+			regulator-min-microvolt = <912500>;
+			regulator-max-microvolt = <1312500>;
+			regulator-boot-on;
+			regulator-always-on;
+		};
+
+		vdd2_reg: regulator@3 {
+			/* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
+			regulator-name = "vdd_core";
+			regulator-min-microvolt = <912500>;
+			regulator-max-microvolt = <1150000>;
+			regulator-boot-on;
+			regulator-always-on;
+		};
+
+		vdd3_reg: regulator@4 {
+			regulator-always-on;
+		};
+
+		vdig1_reg: regulator@5 {
+			regulator-name = "vdig1_1p8v";
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+
+		vdig2_reg: regulator@6 {
+			regulator-always-on;
+		};
+
+		vpll_reg: regulator@7 {
+			regulator-always-on;
+		};
+
+		vdac_reg: regulator@8 {
+			regulator-always-on;
+		};
+
+		vaux1_reg: regulator@9 {
+			regulator-always-on;
+		};
+
+		vaux2_reg: regulator@10 {
+			regulator-always-on;
+		};
+
+		vaux33_reg: regulator@11 {
+			regulator-always-on;
+		};
+
+		vmmc_reg: regulator@12 {
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+	};
+};
+
+&vbat {
+	regulator-name = "vbat";
+	regulator-min-microvolt = <5000000>;
+	regulator-max-microvolt = <5000000>;
+	regulator-boot-on;
+};
+
+/* SPI Busses */
+&am33xx_pinmux {
+	spi0_pins: pinmux_spi0 {
+		pinctrl-single,pins = <
+			0x150 (PIN_INPUT_PULLDOWN | MUX_MODE0)	/* spi0_clk.spi0_clk */
+			0x154 (PIN_INPUT_PULLDOWN | MUX_MODE0)	/* spi0_d0.spi0_d0 */
+			0x158 (PIN_INPUT_PULLUP | MUX_MODE0)	/* spi0_d1.spi0_d1 */
+			0x15c (PIN_INPUT_PULLUP | MUX_MODE0)	/* spi0_cs0.spi0_cs0 */
+		>;
+	};
+};
+
+&spi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi0_pins>;
+	status = "okay";
+
+	serial_flash: m25p80@0 {
+		compatible = "m25p80";
+		spi-max-frequency = <48000000>;
+		reg = <0x0>;
+		m25p,fast-read;
+		status = "disabled";
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "xload";
+			reg = <0x0 0x20000>;
+		};
+		partition@1 {
+			label = "barebox";
+			reg = <0x20000 0x80000>;
+		};
+		partition@2 {
+			label = "bareboxenv";
+			reg = <0xa0000 0x20000>;
+		};
+		partition@3 {
+			label = "oftree";
+			reg = <0xc0000 0x20000>;
+		};
+		partition@4 {
+			label = "kernel";
+			reg = <0xe0000 0x0>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/am335x-wega-rdk.dts b/arch/arm/boot/dts/am335x-wega-rdk.dts
new file mode 100644
index 0000000..6431b7d
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-wega-rdk.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2015 Phytec Messtechnik GmbH
+ * Author: Teresa Remmet <t.remmet@phytec.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/dts-v1/;
+
+#include "am335x-phycore-som.dtsi"
+#include "am335x-wega.dtsi"
+
+/* SoM */
+&i2c_eeprom {
+	status = "okay";
+};
+
+&i2c_rtc {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/am335x-wega.dtsi b/arch/arm/boot/dts/am335x-wega.dtsi
new file mode 100644
index 0000000..5e541bd
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-wega.dtsi
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2015 Phytec Messtechnik GmbH
+ * Author: Teresa Remmet <t.remmet@phytec.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+	model = "Phytec AM335x phyBOARD-WEGA";
+	compatible = "phytec,am335x-wega", "phytec,am335x-phycore-som", "ti,am33xx";
+
+};
+
+/* CAN Busses */
+&am33xx_pinmux {
+	dcan1_pins: pinmux_dcan1 {
+		pinctrl-single,pins = <
+			0x168 (PIN_OUTPUT_PULLUP | MUX_MODE2) /* uart0_ctsn.d_can1_tx */
+			0x16c (PIN_INPUT_PULLUP | MUX_MODE2) /* uart0_rtsn.d_can1_rx */
+		>;
+	};
+};
+
+&dcan1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&dcan1_pins>;
+	status = "okay";
+};
+
+/* Ethernet */
+&am33xx_pinmux {
+	ethernet1_pins: pinmux_ethernet1 {
+		pinctrl-single,pins = <
+			0x40 (PIN_OUTPUT | MUX_MODE1)		/* gpmc_a0.mii2_txen */
+			0x44 (PIN_INPUT_PULLDOWN | MUX_MODE1)	/* gpmc_a1.mii2_rxdv */
+			0x48 (PIN_OUTPUT | MUX_MODE1)		/* gpmc_a2.mii2_txd3 */
+			0x4c (PIN_OUTPUT | MUX_MODE1)		/* gpmc_a3.mii2_txd2 */
+			0x50 (PIN_OUTPUT | MUX_MODE1)		/* gpmc_a4.mii2_txd1 */
+			0x54 (PIN_OUTPUT | MUX_MODE1)		/* gpmc_a5.mii2_txd0 */
+			0x58 (PIN_INPUT_PULLDOWN | MUX_MODE1)	/* gpmc_a6.mii2_txclk */
+			0x5c (PIN_INPUT_PULLDOWN | MUX_MODE1)	/* gpmc_a7.mii2_rxclk */
+			0x60 (PIN_INPUT_PULLDOWN | MUX_MODE1)	/* gpmc_a8.mii2_rxd3 */
+			0x64 (PIN_INPUT_PULLDOWN | MUX_MODE1)	/* gpmc_a9.mii2_rxd2 */
+			0x68 (PIN_INPUT_PULLDOWN | MUX_MODE1)	/* gpmc_a10.mii2_rxd1 */
+			0x6c (PIN_INPUT_PULLDOWN | MUX_MODE1)	/* gpmc_a11.mii2_rxd0 */
+			0x74 (PIN_INPUT_PULLDOWN | MUX_MODE1)	/* gpmc_wpn.mii2_rxerr */
+			0x78 (PIN_INPUT_PULLDOWN | MUX_MODE1)	/* gpmc_ben1.mii2_col */
+		>;
+	};
+};
+
+&cpsw_emac1 {
+	phy_id = <&davinci_mdio>, <1>;
+	phy-mode = "mii";
+	dual_emac_res_vlan = <2>;
+};
+
+&mac {
+	slaves = <2>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&ethernet0_pins &ethernet1_pins>;
+	dual_emac = <1>;
+};
+
+/* MMC */
+&am33xx_pinmux {
+	mmc1_pins: pinmux_mmc1 {
+		pinctrl-single,pins = <
+			0x0F0 (PIN_INPUT_PULLUP | MUX_MODE0)	/* mmc0_dat3.mmc0_dat3 */
+			0x0F4 (PIN_INPUT_PULLUP | MUX_MODE0)	/* mmc0_dat2.mmc0_dat2 */
+			0x0F8 (PIN_INPUT_PULLUP | MUX_MODE0)	/* mmc0_dat1.mmc0_dat1 */
+			0x0FC (PIN_INPUT_PULLUP | MUX_MODE0)	/* mmc0_dat0.mmc0_dat0 */
+			0x100 (PIN_INPUT_PULLUP | MUX_MODE0)	/* mmc0_clk.mmc0_clk */
+			0x104 (PIN_INPUT_PULLUP | MUX_MODE0)	/* mmc0_cmd.mmc0_cmd */
+			0x160 (PIN_INPUT_PULLUP | MUX_MODE7)	/* spi0_cs1.mmc0_sdcd */
+		>;
+	};
+};
+
+&mmc1 {
+	vmmc-supply = <&vmmc_reg>;
+	bus-width = <4>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc1_pins>;
+	cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+	status = "okay";
+};
+
+/* UARTs */
+&am33xx_pinmux {
+	uart0_pins: pinmux_uart0 {
+		pinctrl-single,pins = <
+			0x170 (PIN_INPUT_PULLUP | MUX_MODE0)    /* uart0_rxd.uart0_rxd */
+			0x174 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart0_txd.uart0_txd */
+		>;
+	};
+
+	uart1_pins: pinmux_uart1_pins {
+		pinctrl-single,pins = <
+			0x180 (PIN_INPUT_PULLUP | MUX_MODE0)	/* uart1_rxd.uart1_rxd */
+			0x184 (PIN_OUTPUT_PULLDOWN | MUX_MODE0)	/* uart1_txd.uart1_txd */
+			0x178 (PIN_INPUT | MUX_MODE0)		/* uart1_ctsn.uart1_ctsn */
+			0x17c (PIN_OUTPUT_PULLDOWN | MUX_MODE0)		/* uart1_rtsn.uart1_rtsn */
+		>;
+	};
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins>;
+	status = "okay";
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart1_pins>;
+	status = "okay";
+};
+
+/* USB */
+&cppi41dma {
+	status = "okay";
+};
+
+&usb_ctrl_mod {
+	status = "okay";
+};
+
+&usb {
+	status = "okay";
+};
+
+&usb0 {
+	dr_mode = "peripheral";
+	status = "okay";
+};
+
+&usb0_phy {
+	status = "okay";
+};
+
+&usb1 {
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usb1_phy {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 21fcc44..d23e252 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -103,6 +103,15 @@
 			#size-cells = <1>;
 			ranges = <0 0x44c00000 0x280000>;
 
+			wkup_m3: wkup_m3@100000 {
+				compatible = "ti,am3352-wkup-m3";
+				reg = <0x100000 0x4000>,
+				      <0x180000	0x2000>;
+				reg-names = "umem", "dmem";
+				ti,hwmods = "wkup_m3";
+				ti,pm-firmware = "am335x-pm-firmware.elf";
+			};
+
 			prcm: prcm@200000 {
 				compatible = "ti,am3-prcm";
 				reg = <0x200000 0x4000>;
@@ -144,6 +153,14 @@
 					};
 				};
 
+				wkup_m3_ipc: wkup_m3_ipc@1324 {
+					compatible = "ti,am3352-wkup-m3-ipc";
+					reg = <0x1324 0x24>;
+					interrupts = <78>;
+					ti,rproc = <&wkup_m3>;
+					mboxes = <&mailbox &mbox_wkupm3>;
+				};
+
 				scm_clockdomains: clockdomains {
 				};
 			};
@@ -210,7 +227,7 @@
 		};
 
 		uart0: serial@44e09000 {
-			compatible = "ti,omap3-uart";
+			compatible = "ti,am3352-uart", "ti,omap3-uart";
 			ti,hwmods = "uart1";
 			clock-frequency = <48000000>;
 			reg = <0x44e09000 0x2000>;
@@ -221,7 +238,7 @@
 		};
 
 		uart1: serial@48022000 {
-			compatible = "ti,omap3-uart";
+			compatible = "ti,am3352-uart", "ti,omap3-uart";
 			ti,hwmods = "uart2";
 			clock-frequency = <48000000>;
 			reg = <0x48022000 0x2000>;
@@ -232,7 +249,7 @@
 		};
 
 		uart2: serial@48024000 {
-			compatible = "ti,omap3-uart";
+			compatible = "ti,am3352-uart", "ti,omap3-uart";
 			ti,hwmods = "uart3";
 			clock-frequency = <48000000>;
 			reg = <0x48024000 0x2000>;
@@ -243,7 +260,7 @@
 		};
 
 		uart3: serial@481a6000 {
-			compatible = "ti,omap3-uart";
+			compatible = "ti,am3352-uart", "ti,omap3-uart";
 			ti,hwmods = "uart4";
 			clock-frequency = <48000000>;
 			reg = <0x481a6000 0x2000>;
@@ -252,7 +269,7 @@
 		};
 
 		uart4: serial@481a8000 {
-			compatible = "ti,omap3-uart";
+			compatible = "ti,am3352-uart", "ti,omap3-uart";
 			ti,hwmods = "uart5";
 			clock-frequency = <48000000>;
 			reg = <0x481a8000 0x2000>;
@@ -261,7 +278,7 @@
 		};
 
 		uart5: serial@481aa000 {
-			compatible = "ti,omap3-uart";
+			compatible = "ti,am3352-uart", "ti,omap3-uart";
 			ti,hwmods = "uart6";
 			clock-frequency = <48000000>;
 			reg = <0x481aa000 0x2000>;
@@ -700,7 +717,7 @@
 		};
 
 		mac: ethernet@4a100000 {
-			compatible = "ti,cpsw";
+			compatible = "ti,am335x-cpsw","ti,cpsw";
 			ti,hwmods = "cpgmac0";
 			clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
 			clock-names = "fck", "cpts";
@@ -762,14 +779,6 @@
 			reg = <0x40300000 0x10000>; /* 64k */
 		};
 
-		wkup_m3: wkup_m3@44d00000 {
-			compatible = "ti,am3353-wkup-m3";
-			reg = <0x44d00000 0x4000	/* M3 UMEM */
-			       0x44d80000 0x2000>;	/* M3 DMEM */
-			ti,hwmods = "wkup_m3";
-			ti,no-reset-on-init;
-		};
-
 		elm: elm@48080000 {
 			compatible = "ti,am3352-elm";
 			reg = <0x48080000 0x2000>;
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index ade28c79..564900b 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -23,6 +23,11 @@
 		i2c1 = &i2c1;
 		i2c2 = &i2c2;
 		serial0 = &uart0;
+		serial1 = &uart1;
+		serial2 = &uart2;
+		serial3 = &uart3;
+		serial4 = &uart4;
+		serial5 = &uart5;
 		ethernet0 = &cpsw_emac0;
 		ethernet1 = &cpsw_emac1;
 	};
@@ -59,6 +64,27 @@
 		interrupt-parent = <&gic>;
 	};
 
+	scu: scu@48240000 {
+		compatible = "arm,cortex-a9-scu";
+		reg = <0x48240000 0x100>;
+	};
+
+	global_timer: timer@48240200 {
+		compatible = "arm,cortex-a9-global-timer";
+		reg = <0x48240200 0x100>;
+		interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-parent = <&gic>;
+		clocks = <&dpll_mpu_m2_ck>;
+	};
+
+	local_timer: timer@48240600 {
+		compatible = "arm,cortex-a9-twd-timer";
+		reg = <0x48240600 0x100>;
+		interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-parent = <&gic>;
+		clocks = <&dpll_mpu_m2_ck>;
+	};
+
 	l2-cache-controller@48242000 {
 		compatible = "arm,pl310-cache";
 		reg = <0x48242000 0x1000>;
@@ -83,9 +109,19 @@
 			#size-cells = <1>;
 			ranges = <0 0x44c00000 0x287000>;
 
+			wkup_m3: wkup_m3@100000 {
+				compatible = "ti,am4372-wkup-m3";
+				reg = <0x100000 0x4000>,
+				      <0x180000	0x2000>;
+				reg-names = "umem", "dmem";
+				ti,hwmods = "wkup_m3";
+				ti,pm-firmware = "am335x-pm-firmware.elf";
+			};
+
 			prcm: prcm@1f0000 {
 				compatible = "ti,am4-prcm";
 				reg = <0x1f0000 0x11000>;
+				interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
 
 				prcm_clocks: clocks {
 					#address-cells = <1>;
@@ -127,6 +163,14 @@
 					};
 				};
 
+				wkup_m3_ipc: wkup_m3_ipc@1324 {
+					compatible = "ti,am4372-wkup-m3-ipc";
+					reg = <0x1324 0x44>;
+					interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+					ti,rproc = <&wkup_m3>;
+					mboxes = <&mailbox &mbox_wkupm3>;
+				};
+
 				scm_clockdomains: clockdomains {
 				};
 			};
@@ -308,7 +352,8 @@
 		};
 
 		rtc: rtc@44e3e000 {
-			compatible = "ti,am4372-rtc","ti,da830-rtc";
+			compatible = "ti,am4372-rtc", "ti,am3352-rtc",
+				     "ti,da830-rtc";
 			reg = <0x44e3e000 0x1000>;
 			interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH
 				      GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
@@ -527,8 +572,11 @@
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ti,hwmods = "cpgmac0";
-			clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
-			clock-names = "fck", "cpts";
+			clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>,
+				 <&dpll_clksel_mac_clk>;
+			clock-names = "fck", "cpts", "50mclk";
+			assigned-clocks = <&dpll_clksel_mac_clk>;
+			assigned-clock-rates = <50000000>;
 			status = "disabled";
 			cpdma_channels = <8>;
 			ale_entries = <1024>;
@@ -865,7 +913,12 @@
 			usb1: usb@48390000 {
 				compatible = "synopsys,dwc3";
 				reg = <0x48390000 0x10000>;
-				interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "peripheral",
+						  "host",
+						  "otg";
 				phys = <&usb2_phy1>;
 				phy-names = "usb2-phy";
 				maximum-speed = "high-speed";
@@ -889,7 +942,12 @@
 			usb2: usb@483d0000 {
 				compatible = "synopsys,dwc3";
 				reg = <0x483d0000 0x10000>;
-				interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "peripheral",
+						  "host",
+						  "otg";
 				phys = <&usb2_phy2>;
 				phy-names = "usb2-phy";
 				maximum-speed = "high-speed";
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index 84aa30c..215775d 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -21,12 +21,11 @@
 
 	aliases {
 		display0 = &lcd0;
-		serial3 = &uart3;
 	};
 
-	vmmcsd_fixed: fixedregulator-sd {
+	evm_v3_3d: fixedregulator-v3_3d {
 		compatible = "regulator-fixed";
-		regulator-name = "vmmcsd_fixed";
+		regulator-name = "evm_v3_3d";
 		regulator-min-microvolt = <3300000>;
 		regulator-max-microvolt = <3300000>;
 		enable-active-high;
@@ -83,17 +82,6 @@
 		compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
 		label = "lcd";
 
-		pinctrl-names = "default";
-		pinctrl-0 = <&lcd_pins>;
-
-		/*
-		 * SelLCDorHDMI, LOW to select HDMI. This is not really the
-		 * panel's enable GPIO, but we don't have HDMI driver support nor
-		 * support to switch between two displays, so using this gpio as
-		 * panel's enable should be safe.
-		 */
-		enable-gpios = <&gpio5 8 GPIO_ACTIVE_HIGH>;
-
 		panel-timing {
 			clock-frequency = <33000000>;
 			hactive = <800>;
@@ -124,6 +112,32 @@
 		clock-frequency = <12000000>;
 	};
 
+	sound0: sound@0 {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "AM437x-GP-EVM";
+		simple-audio-card,widgets =
+			"Headphone", "Headphone Jack",
+			"Line", "Line In";
+		simple-audio-card,routing =
+			"Headphone Jack",	"HPLOUT",
+			"Headphone Jack",	"HPROUT",
+			"LINE1L",		"Line In",
+			"LINE1R",		"Line In";
+		simple-audio-card,format = "dsp_b";
+		simple-audio-card,bitclock-master = <&sound0_master>;
+		simple-audio-card,frame-master = <&sound0_master>;
+		simple-audio-card,bitclock-inversion;
+
+		simple-audio-card,cpu {
+			sound-dai = <&mcasp1>;
+			system-clock-frequency = <12000000>;
+		};
+
+		sound0_master: simple-audio-card,codec {
+			sound-dai = <&tlv320aic3106>;
+			system-clock-frequency = <12000000>;
+		};
+	};
 };
 
 &am43xx_pinmux {
@@ -217,7 +231,6 @@
 
 	nand_flash_x8: nand_flash_x8 {
 		pinctrl-single,pins = <
-			0x26c(PIN_OUTPUT_PULLDOWN | MUX_MODE7)	/* spi2_cs0.gpio/eMMCorNANDsel */
 			0x0  (PIN_INPUT  | MUX_MODE0)	/* gpmc_ad0.gpmc_ad0 */
 			0x4  (PIN_INPUT  | MUX_MODE0)	/* gpmc_ad1.gpmc_ad1 */
 			0x8  (PIN_INPUT  | MUX_MODE0)	/* gpmc_ad2.gpmc_ad2 */
@@ -270,7 +283,7 @@
 		>;
 	};
 
-	lcd_pins: lcd_pins {
+	display_mux_pins: display_mux_pins {
 		pinctrl-single,pins = <
 			/* GPIO 5_8 to select LCD / HDMI */
 			0x238 (PIN_OUTPUT_PULLUP | MUX_MODE7)
@@ -409,6 +422,60 @@
 			0x234 (PIN_OUTPUT_PULLDOWN | MUX_MODE0) /* uart3_rtsn.uart3_rtsn */
 		>;
 	};
+
+	mcasp1_pins: mcasp1_pins {
+		pinctrl-single,pins = <
+			0x108 (PIN_OUTPUT_PULLDOWN | MUX_MODE4)	/* mii1_col.mcasp1_axr2 */
+			0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4)	/* mii1_crs.mcasp1_aclkx */
+			0x110 (PIN_INPUT_PULLDOWN | MUX_MODE4)	/* mii1_rxerr.mcasp1_fsx */
+			0x144 (PIN_INPUT_PULLDOWN | MUX_MODE4)	/* rmii1_ref_clk.mcasp1_axr3 */
+		>;
+	};
+
+	mcasp1_sleep_pins: mcasp1_sleep_pins {
+		pinctrl-single,pins = <
+			0x108 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+			0x10c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+			0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+			0x144 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+		>;
+	};
+
+	gpio0_pins: gpio0_pins {
+		pinctrl-single,pins = <
+			0x26c (PIN_OUTPUT | MUX_MODE9) /* spi2_cs0.gpio0_23 SEL_eMMCorNANDn */
+		>;
+	};
+
+	emmc_pins_default: emmc_pins_default {
+		pinctrl-single,pins = <
+			0x00 (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad0.mmc1_dat0 */
+			0x04 (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad1.mmc1_dat1 */
+			0x08 (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad2.mmc1_dat2 */
+			0x0c (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad3.mmc1_dat3 */
+			0x10 (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad4.mmc1_dat4 */
+			0x14 (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad5.mmc1_dat5 */
+			0x18 (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad6.mmc1_dat6 */
+			0x1c (PIN_INPUT_PULLUP | MUX_MODE1) /* gpmc_ad7.mmc1_dat7 */
+			0x80 (PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn1.mmc1_clk */
+			0x84 (PIN_INPUT_PULLUP | MUX_MODE2) /* gpmc_csn2.mmc1_cmd */
+		>;
+	};
+
+	emmc_pins_sleep: emmc_pins_sleep {
+		pinctrl-single,pins = <
+			0x00 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad0.gpio1_0 */
+			0x04 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad1.gpio1_1 */
+			0x08 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad2.gpio1_2 */
+			0x0c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad3.gpio1_3 */
+			0x10 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad4.gpio1_4 */
+			0x14 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad5.gpio1_5 */
+			0x18 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad6.gpio1_6 */
+			0x1c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_ad7.gpio1_7 */
+			0x80 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_csn1.gpio1_30 */
+			0x84 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* gpmc_csn2.gpio1_31 */
+		>;
+	};
 };
 
 &i2c0 {
@@ -455,6 +522,8 @@
 			regulator-name = "v1_0bat";
 			regulator-min-microvolt = <1000000>;
 			regulator-max-microvolt = <1000000>;
+			regulator-boot-on;
+			regulator-always-on;
 		};
 
 		dcdc6: regulator-dcdc6 {
@@ -462,6 +531,8 @@
 			regulator-name = "v1_8bat";
 			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <1800000>;
+			regulator-boot-on;
+			regulator-always-on;
 		};
 
 		ldo1: regulator-ldo1 {
@@ -521,6 +592,19 @@
 			};
 		};
 	};
+
+	tlv320aic3106: tlv320aic3106@1b {
+		#sound-dai-cells = <0>;
+		compatible = "ti,tlv320aic3106";
+		reg = <0x1b>;
+		status = "okay";
+
+		/* Regulators */
+		IOVDD-supply = <&evm_v3_3d>; /* V3_3D -> <tps63031> EN: V1_8D -> VBAT */
+		AVDD-supply = <&evm_v3_3d>; /* v3_3AUD -> V3_3D -> ... */
+		DRVDD-supply = <&evm_v3_3d>; /* v3_3AUD -> V3_3D -> ... */
+		DVDD-supply = <&ldo1>; /* V1_8D -> LDO1 */
+	};
 };
 
 &epwmss0 {
@@ -542,7 +626,23 @@
 };
 
 &gpio0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&gpio0_pins>;
 	status = "okay";
+
+	p23 {
+		gpio-hog;
+		gpios = <23 GPIO_ACTIVE_HIGH>;
+		/* SelEMMCorNAND selects between eMMC and NAND:
+		 * Low: NAND
+		 * High: eMMC
+		 * When changing this line make sure the newly
+		 * selected device node is enabled and the previously
+		 * selected device node is disabled.
+		 */
+		output-low;
+		line-name = "SelEMMCorNAND";
+	};
 };
 
 &gpio1 {
@@ -558,19 +658,48 @@
 };
 
 &gpio5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&display_mux_pins>;
 	status = "okay";
 	ti,no-reset-on-init;
+
+	p8 {
+		/*
+		 * SelLCDorHDMI selects between display and audio paths:
+		 * Low: HDMI display with audio via HDMI
+		 * High: LCD display with analog audio via aic3111 codec
+		 */
+		gpio-hog;
+		gpios = <8 GPIO_ACTIVE_HIGH>;
+		output-high;
+		line-name = "SelLCDorHDMI";
+	};
 };
 
 &mmc1 {
 	status = "okay";
-	vmmc-supply = <&vmmcsd_fixed>;
+	vmmc-supply = <&evm_v3_3d>;
 	bus-width = <4>;
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc1_pins>;
 	cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
 };
 
+/* eMMC sits on mmc2 */
+&mmc2 {
+	/*
+	 * When enabling eMMC, disable GPMC/NAND and set
+	 * SelEMMCorNAND to output-high
+	 */
+	status = "disabled";
+	vmmc-supply = <&evm_v3_3d>;
+	bus-width = <8>;
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&emmc_pins_default>;
+	pinctrl-1 = <&emmc_pins_sleep>;
+	ti,non-removable;
+};
+
 &mmc3 {
 	status = "okay";
 	/* these are on the crossbar and are outlined in the
@@ -651,6 +780,10 @@
 };
 
 &gpmc {
+	/*
+	 * When enabling GPMC, disable eMMC and set
+	 * SelEMMCorNAND to output-low
+	 */
 	status = "okay";
 	pinctrl-names = "default";
 	pinctrl-0 = <&nand_flash_x8>;
@@ -790,3 +923,21 @@
 		};
 	};
 };
+
+&mcasp1 {
+	#sound-dai-cells = <0>;
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&mcasp1_pins>;
+	pinctrl-1 = <&mcasp1_sleep_pins>;
+
+	status = "okay";
+
+	op-mode = <0>; /* MCASP_IIS_MODE */
+	tdm-slots = <2>;
+	/* 4 serializers */
+	serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
+		0 0 1 2
+	>;
+	tx-num-evt = <32>;
+	rx-num-evt = <32>;
+};
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index c17097d..22af448 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -32,14 +32,29 @@
 	};
 
 	sound {
-		compatible = "ti,da830-evm-audio";
-		ti,model = "AM437x-SK-EVM";
-		ti,audio-codec = <&tlv320aic3106>;
-		ti,mcasp-controller = <&mcasp1>;
-		ti,codec-clock-rate = <24000000>;
-		ti,audio-routing =
-			"Headphone Jack",       "HPLOUT",
-			"Headphone Jack",       "HPROUT";
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "AM437x-SK-EVM";
+		simple-audio-card,widgets =
+			"Headphone", "Headphone Jack",
+			"Line", "Line In";
+		simple-audio-card,routing =
+			"Headphone Jack",	"HPLOUT",
+			"Headphone Jack",	"HPROUT",
+			"LINE1L",		"Line In",
+			"LINE1R",		"Line In";
+		simple-audio-card,format = "dsp_b";
+		simple-audio-card,bitclock-master = <&sound_master>;
+		simple-audio-card,frame-master = <&sound_master>;
+		simple-audio-card,bitclock-inversion;
+
+		simple-audio-card,cpu {
+			sound-dai = <&mcasp1>;
+		};
+
+		sound_master: simple-audio-card,codec {
+			sound-dai = <&tlv320aic3106>;
+			system-clock-frequency = <24000000>;
+		};
 	};
 
 	matrix_keypad: matrix_keypad@0 {
@@ -364,6 +379,15 @@
 		>;
 	};
 
+	mcasp1_pins_sleep: mcasp1_pins_sleep {
+		pinctrl-single,pins = <
+			0x10c (PIN_INPUT_PULLDOWN | MUX_MODE7)
+			0x110 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+			0x108 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+			0x144 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+		>;
+	};
+
 	lcd_pins: lcd_pins {
 		pinctrl-single,pins = <
 			0x1c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpcm_ad7.gpio1_7 */
@@ -480,6 +504,7 @@
 	};
 
 	tlv320aic3106: tlv320aic3106@1b {
+		#sound-dai-cells = <0>;
 		compatible = "ti,tlv320aic3106";
 		reg = <0x1b>;
 		status = "okay";
@@ -640,8 +665,10 @@
 };
 
 &mcasp1 {
-	pinctrl-names = "default";
+	#sound-dai-cells = <0>;
+	pinctrl-names = "default", "sleep";
 	pinctrl-0 = <&mcasp1_pins>;
+	pinctrl-1 = <&mcasp1_pins_sleep>;
 
 	status = "okay";
 
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 795d68a..86c2dfb 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -14,6 +14,7 @@
 #include <dt-bindings/pinctrl/am43xx.h>
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/sound/tlv320aic31xx-micbias.h>
 
 / {
 	model = "TI AM43x EPOS EVM";
@@ -31,21 +32,18 @@
 		enable-active-high;
 	};
 
+	vbat: fixedregulator@0 {
+		compatible = "regulator-fixed";
+		regulator-name = "vbat";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-boot-on;
+	};
+
 	lcd0: display {
 		compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
 		label = "lcd";
 
-		pinctrl-names = "default";
-		pinctrl-0 = <&lcd_pins>;
-
-		/*
-		 * SelLCDorHDMI, LOW to select HDMI. This is not really the
-		 * panel's enable GPIO, but we don't have HDMI driver support nor
-		 * support to switch between two displays, so using this gpio as
-		 * panel's enable should be safe.
-		 */
-		enable-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
-
 		panel-timing {
 			clock-frequency = <33000000>;
 			hactive = <800>;
@@ -108,6 +106,38 @@
 		brightness-levels = <0 51 53 56 62 75 101 152 255>;
 		default-brightness-level = <8>;
 	};
+
+	sound0: sound@0 {
+		compatible = "simple-audio-card";
+		simple-audio-card,name = "AM43-EPOS-EVM";
+		simple-audio-card,widgets =
+			"Microphone", "Microphone Jack",
+			"Headphone", "Headphone Jack",
+			"Speaker", "Speaker";
+		simple-audio-card,routing =
+			"MIC1LP", "Microphone Jack",
+			"MIC1RP", "Microphone Jack",
+			"MIC1LP", "MICBIAS",
+			"MIC1RP", "MICBIAS",
+			"Headphone Jack", "HPL",
+			"Headphone Jack", "HPR",
+			"Speaker", "SPL",
+			"Speaker", "SPR";
+		simple-audio-card,format = "dsp_b";
+		simple-audio-card,bitclock-master = <&sound0_master>;
+		simple-audio-card,frame-master = <&sound0_master>;
+		simple-audio-card,bitclock-inversion;
+
+		simple-audio-card,cpu {
+			sound-dai = <&mcasp1>;
+			system-clock-frequency = <12000000>;
+		};
+
+		sound0_master: simple-audio-card,codec {
+			sound-dai = <&tlv320aic3111>;
+			system-clock-frequency = <12000000>;
+		};
+	};
 };
 
 &am43xx_pinmux {
@@ -278,7 +308,7 @@
 			>;
 		};
 
-		lcd_pins: lcd_pins {
+		display_mux_pins: display_mux_pins {
 			pinctrl-single,pins = <
 				/* GPMC CLK -> GPIO 2_1 to select LCD / HDMI */
 				0x08C (PIN_OUTPUT_PULLUP | MUX_MODE7)
@@ -320,6 +350,24 @@
 				0x204 (DS0_PULL_UP_DOWN_EN | INPUT_EN | MUX_MODE7)
 			>;
 		};
+
+		mcasp1_pins: mcasp1_pins {
+			pinctrl-single,pins = <
+				0x1a0 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* MCASP0_ACLKR/MCASP1_ACLKX */
+				0x1a4 (PIN_INPUT_PULLDOWN | MUX_MODE3) /* MCASP0_FSR/MCASP1_FSX */
+				0x1a8 (PIN_OUTPUT_PULLDOWN | MUX_MODE3)/* MCASP0_AXR1/MCASP1_AXR0 */
+				0x1ac (PIN_INPUT_PULLDOWN | MUX_MODE3) /* MCASP0_AHCLKX/MCASP1_AXR1 */
+			>;
+		};
+
+		mcasp1_sleep_pins: mcasp1_sleep_pins {
+			pinctrl-single,pins = <
+				0x1a0 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+				0x1a4 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+				0x1a8 (PIN_INPUT_PULLDOWN | MUX_MODE7)
+				0x1ac (PIN_INPUT_PULLDOWN | MUX_MODE7)
+			>;
+		};
 };
 
 &mmc1 {
@@ -399,6 +447,15 @@
 			regulator-always-on;
 		};
 
+		dcdc4: regulator-dcdc4 {
+			compatible = "ti,tps65218-dcdc4";
+			regulator-name = "vdcdc4";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-boot-on;
+			regulator-always-on;
+		};
+
 		dcdc5: regulator-dcdc5 {
 			compatible = "ti,tps65218-dcdc5";
 			regulator-name = "v1_0bat";
@@ -441,6 +498,23 @@
 		touchscreen-size-x = <1024>;
 		touchscreen-size-y = <600>;
 	};
+
+	tlv320aic3111: tlv320aic3111@18 {
+		#sound-dai-cells = <0>;
+		compatible = "ti,tlv320aic3111";
+		reg = <0x18>;
+		status = "okay";
+
+		ai31xx-micbias-vg = <MICBIAS_2_0V>;
+
+		/* Regulators */
+		HPVDD-supply = <&dcdc4>; /* v3_3AUD -> V3_3D -> DCDC4 */
+		SPRVDD-supply = <&vbat>; /* vbat */
+		SPLVDD-supply = <&vbat>; /* vbat */
+		AVDD-supply = <&dcdc4>; /* v3_3AUD -> V3_3D -> DCDC4 */
+		IOVDD-supply = <&dcdc4>; /* V3_3D -> DCDC4 */
+		DVDD-supply = <&ldo1>; /* V1_8AUD -> V1_8D -> LDO1 */
+	};
 };
 
 &i2c2 {
@@ -458,7 +532,21 @@
 };
 
 &gpio2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&display_mux_pins>;
 	status = "okay";
+
+	p1 {
+		/*
+		 * SelLCDorHDMI selects between display and audio paths:
+		 * Low: HDMI display with audio via HDMI
+		 * High: LCD display with analog audio via aic3111 codec
+		 */
+		gpio-hog;
+		gpios = <1 GPIO_ACTIVE_HIGH>;
+		output-high;
+		line-name = "SelLCDorHDMI";
+	};
 };
 
 &gpio3 {
@@ -686,3 +774,21 @@
 		};
 	};
 };
+
+&mcasp1 {
+	#sound-dai-cells = <0>;
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <&mcasp1_pins>;
+	pinctrl-1 = <&mcasp1_sleep_pins>;
+
+	status = "okay";
+
+	op-mode = <0>;          /* MCASP_IIS_MODE */
+	tdm-slots = <2>;
+	/* 4 serializer */
+	serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
+		1 2 0 0
+	>;
+	tx-num-evt = <32>;
+	rx-num-evt = <32>;
+};
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index d0c0dfa..cc88728 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -486,6 +486,15 @@
 		reg = <0x4238>;
 	};
 
+	dpll_clksel_mac_clk: dpll_clksel_mac_clk {
+		#clock-cells = <0>;
+		compatible = "ti,divider-clock";
+		clocks = <&dpll_core_m5_ck>;
+		reg = <0x4234>;
+		ti,bit-shift = <2>;
+		ti,dividers = <2>, <5>;
+	};
+
 	clk_32k_mosc_ck: clk_32k_mosc_ck {
 		#clock-cells = <0>;
 		compatible = "fixed-clock";
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index a63bf78..3a05b94 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -580,7 +580,6 @@
 
 	vmmc-supply = <&ldo1_reg>;
 	vmmc_aux-supply = <&vdd_3v3>;
-	pbias-supply = <&pbias_mmc_reg>;
 	bus-width = <4>;
 	cd-gpios = <&gpio6 27 0>; /* gpio 219 */
 };
@@ -693,3 +692,7 @@
 		};
 	};
 };
+
+&pcie1 {
+	gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
+};
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
index 67a0ab0..e9a3817 100644
--- a/arch/arm/boot/dts/armada-375.dtsi
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -176,6 +176,10 @@
 				reg = <0x8000 0x1000>;
 				cache-unified;
 				cache-level = <2>;
+				arm,double-linefill-incr = <1>;
+				arm,double-linefill-wrap = <0>;
+				arm,double-linefill = <1>;
+				prefetch-data = <1>;
 			};
 
 			scu@c000 {
diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
index fd4f6fd..353c925 100644
--- a/arch/arm/boot/dts/armada-388-gp.dts
+++ b/arch/arm/boot/dts/armada-388-gp.dts
@@ -81,10 +81,6 @@
 				pinctrl-0 = <&i2c0_pins>;
 				status = "okay";
 				clock-frequency = <100000>;
-				/*
-				 * The EEPROM located at adresse 54 is needed
-				 * for the boot - DO NOT ERASE IT -
-				 */
 
 				expander0: pca9555@20 {
 					compatible = "nxp,pca9555";
@@ -111,6 +107,10 @@
 					reg = <0x21>;
 				};
 
+				eeprom@57 {
+					compatible = "atmel,24c64";
+					reg = <0x57>;
+				};
 			};
 
 			serial@12000 {
@@ -301,9 +301,11 @@
 	reg_sata0: pwr-sata0 {
 		compatible = "regulator-fixed";
 		regulator-name = "pwr_en_sata0";
+		regulator-min-microvolt = <12000000>;
+		regulator-max-microvolt = <12000000>;
 		enable-active-high;
 		regulator-always-on;
-
+		gpio = <&expander0 2 GPIO_ACTIVE_HIGH>;
 	};
 
 	reg_5v_sata0: v5-sata0 {
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 04ecfe6..f9f2347 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -143,6 +143,10 @@
 				reg = <0x8000 0x1000>;
 				cache-unified;
 				cache-level = <2>;
+				arm,double-linefill-incr = <1>;
+				arm,double-linefill-wrap = <0>;
+				arm,double-linefill = <1>;
+				prefetch-data = <1>;
 			};
 
 			scu@c000 {
@@ -450,7 +454,7 @@
 			};
 
 			xor@60800 {
-				compatible = "marvell,orion-xor";
+				compatible = "marvell,armada-380-xor", "marvell,orion-xor";
 				reg = <0x60800 0x100
 				       0x60a00 0x100>;
 				clocks = <&gateclk 22>;
@@ -470,7 +474,7 @@
 			};
 
 			xor@60900 {
-				compatible = "marvell,orion-xor";
+				compatible = "marvell,armada-380-xor", "marvell,orion-xor";
 				reg = <0x60900 0x100
 				       0x60b00 0x100>;
 				clocks = <&gateclk 28>;
diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi
index fc9864f..dc6efd3 100644
--- a/arch/arm/boot/dts/armada-39x.dtsi
+++ b/arch/arm/boot/dts/armada-39x.dtsi
@@ -104,6 +104,10 @@
 				reg = <0x8000 0x1000>;
 				cache-unified;
 				cache-level = <2>;
+				arm,double-linefill-incr = <1>;
+				arm,double-linefill-wrap = <0>;
+				arm,double-linefill = <1>;
+				prefetch-data = <1>;
 			};
 
 			scu@c000 {
@@ -325,7 +329,7 @@
 			};
 
 			xor@60800 {
-				compatible = "marvell,orion-xor";
+				compatible = "marvell,armada-380-xor", "marvell,orion-xor";
 				reg = <0x60800 0x100
 				       0x60a00 0x100>;
 				clocks = <&gateclk 22>;
@@ -345,7 +349,7 @@
 			};
 
 			xor@60900 {
-				compatible = "marvell,orion-xor";
+				compatible = "marvell,armada-380-xor", "marvell,orion-xor";
 				reg = <0x60900 0x100
 				       0x60b00 0x100>;
 				clocks = <&gateclk 28>;
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
new file mode 100644
index 0000000..e8d63afd
--- /dev/null
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -0,0 +1,134 @@
+/*
+ * at91-sama5d2_xplained.dts - Device Tree file for SAMA5D2 Xplained board
+ *
+ *  Copyright (C) 2015 Atmel,
+ *                2015 Nicolas Ferre <nicolas.ferre@atmel.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+/dts-v1/;
+#include "sama5d2.dtsi"
+
+/ {
+	model = "Atmel SAMA5D2 Xplained";
+	compatible = "atmel,sama5d2-xplained", "atmel,sama5d2", "atmel,sama5";
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory {
+		reg = <0x20000000 0x80000>;
+	};
+
+	clocks {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		main_clock: clock@0 {
+			compatible = "atmel,osc", "fixed-clock";
+			clock-frequency = <12000000>;
+		};
+
+		slow_xtal {
+			clock-frequency = <32768>;
+		};
+
+		main_xtal {
+			clock-frequency = <12000000>;
+		};
+	};
+
+	ahb {
+		usb0: gadget@00300000 {
+			status = "okay";
+		};
+
+		usb1: ohci@00400000 {
+			num-ports = <3>;
+			status = "okay";
+		};
+
+		usb2: ehci@00500000 {
+			status = "okay";
+		};
+
+		apb {
+			spi0: spi@f8000000 {
+				status = "okay";
+
+				m25p80@0 {
+					compatible = "atmel,at25df321a";
+					reg = <0>;
+					spi-max-frequency = <50000000>;
+				};
+			};
+
+			macb0: ethernet@f8008000 {
+				phy-mode = "rmii";
+				status = "okay";
+			};
+
+			uart1: serial@f8020000 {
+				status = "okay";
+			};
+
+			i2c0: i2c@f8028000 {
+				dmas = <0>, <0>;
+				status = "okay";
+			};
+
+			uart3: serial@fc008000 {
+				status = "okay";
+			};
+
+			i2c1: i2c@fc028000 {
+				dmas = <0>, <0>;
+				status = "okay";
+
+				at24@54 {
+					compatible = "atmel,24c02";
+					reg = <0x54>;
+					pagesize = <16>;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
index 22ad7c9..07f4696 100644
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -131,6 +131,15 @@
 			};
 
 			adc0: adc@fc034000 {
+				pinctrl-names = "default";
+				pinctrl-0 = <
+					/* external trigger conflicts with USBA_VBUS */
+					&pinctrl_adc0_ad0
+					&pinctrl_adc0_ad1
+					&pinctrl_adc0_ad2
+					&pinctrl_adc0_ad3
+					&pinctrl_adc0_ad4
+					>;
 				atmel,adc-vref = <3300>;
 				status = "okay";
 			};
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
index d782f29..49a59c7 100644
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
@@ -100,6 +100,15 @@
 			};
 
 			adc0: adc@fc034000 {
+				pinctrl-names = "default";
+				pinctrl-0 = <
+					/* external trigger conflicts with USBA_VBUS */
+					&pinctrl_adc0_ad0
+					&pinctrl_adc0_ad1
+					&pinctrl_adc0_ad2
+					&pinctrl_adc0_ad3
+					&pinctrl_adc0_ad4
+					>;
 				/* The vref depends on JP22 of EK. If connect 1-2 then use 3.3V. connect 2-3 use 3.0V */
 				atmel,adc-vref = <3300>;
 				/*atmel,adc-ts-wires = <4>;*/	/* Set up ADC touch screen */
diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi
index e3cfb99..60edd8b 100644
--- a/arch/arm/boot/dts/at91rm9200.dtsi
+++ b/arch/arm/boot/dts/at91rm9200.dtsi
@@ -359,6 +359,7 @@
 				compatible = "atmel,at91rm9200-st", "syscon", "simple-mfd";
 				reg = <0xfffffd00 0x100>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&slow_xtal>;
 
 				watchdog {
 					compatible = "atmel,at91rm9200-wdt";
@@ -369,6 +370,7 @@
 				compatible = "atmel,at91rm9200-rtc";
 				reg = <0xfffffe00 0x40>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&slow_xtal>;
 				status = "disabled";
 			};
 
@@ -378,8 +380,8 @@
 				interrupts = <17 IRQ_TYPE_LEVEL_HIGH 0
 					      18 IRQ_TYPE_LEVEL_HIGH 0
 					      19 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tc0_clk>, <&tc1_clk>, <&tc2_clk>;
-				clock-names = "t0_clk", "t1_clk", "t2_clk";
+				clocks = <&tc0_clk>, <&tc1_clk>, <&tc2_clk>, <&slow_xtal>;
+				clock-names = "t0_clk", "t1_clk", "t2_clk", "slow_clk";
 			};
 
 			tcb1: timer@fffa4000 {
@@ -388,8 +390,8 @@
 				interrupts = <20 IRQ_TYPE_LEVEL_HIGH 0
 					      21 IRQ_TYPE_LEVEL_HIGH 0
 					      22 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tc3_clk>, <&tc4_clk>, <&tc5_clk>;
-				clock-names = "t0_clk", "t1_clk", "t2_clk";
+				clocks = <&tc3_clk>, <&tc4_clk>, <&tc5_clk>, <&slow_xtal>;
+				clock-names = "t0_clk", "t1_clk", "t2_clk", "slow_clk";
 			};
 
 			i2c0: i2c@fffb8000 {
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 4bc3475..be9c027 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -359,11 +359,13 @@
 			rstc@fffffd00 {
 				compatible = "atmel,at91sam9260-rstc";
 				reg = <0xfffffd00 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			shdwc@fffffd10 {
 				compatible = "atmel,at91sam9260-shdwc";
 				reg = <0xfffffd10 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			pit: timer@fffffd30 {
@@ -379,8 +381,8 @@
 				interrupts = <17 IRQ_TYPE_LEVEL_HIGH 0
 					      18 IRQ_TYPE_LEVEL_HIGH 0
 					      19 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tc0_clk>, <&tc1_clk>, <&tc2_clk>;
-				clock-names = "t0_clk", "t1_clk", "t2_clk";
+				clocks = <&tc0_clk>, <&tc1_clk>, <&tc2_clk>, <&clk32k>;
+				clock-names = "t0_clk", "t1_clk", "t2_clk", "slow_clk";
 			};
 
 			tcb1: timer@fffdc000 {
@@ -389,8 +391,8 @@
 				interrupts = <26 IRQ_TYPE_LEVEL_HIGH 0
 					      27 IRQ_TYPE_LEVEL_HIGH 0
 					      28 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tc3_clk>, <&tc4_clk>, <&tc5_clk>;
-				clock-names = "t0_clk", "t1_clk", "t2_clk";
+				clocks = <&tc3_clk>, <&tc4_clk>, <&tc5_clk>, <&clk32k>;
+				clock-names = "t0_clk", "t1_clk", "t2_clk", "slow_clk";
 			};
 
 			pinctrl@fffff400 {
@@ -973,6 +975,7 @@
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffd40 0x10>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
 				atmel,watchdog-type = "hardware";
 				atmel,reset-type = "all";
 				atmel,dbg-halt;
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index b2c44a0..ce1e3e9 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -119,8 +119,8 @@
 				interrupts = <17 IRQ_TYPE_LEVEL_HIGH 0>,
 					     <18 IRQ_TYPE_LEVEL_HIGH 0>,
 					     <19 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tc0_clk>, <&tc1_clk>, <&tc2_clk>;
-				clock-names = "t0_clk", "t1_clk", "t2_clk";
+				clocks = <&tc0_clk>, <&tc1_clk>, <&tc2_clk>, <&slow_xtal>;
+				clock-names = "t0_clk", "t1_clk", "t2_clk", "slow_clk";
 			};
 
 			usb1: gadget@fffa4000 {
@@ -820,11 +820,13 @@
 			rstc@fffffd00 {
 				compatible = "atmel,at91sam9260-rstc";
 				reg = <0xfffffd00 0x10>;
+				clocks = <&slow_xtal>;
 			};
 
 			shdwc@fffffd10 {
 				compatible = "atmel,at91sam9260-shdwc";
 				reg = <0xfffffd10 0x10>;
+				clocks = <&slow_xtal>;
 			};
 
 			pit: timer@fffffd30 {
@@ -846,6 +848,7 @@
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffd40 0x10>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&slow_xtal>;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index e36d966..f1f5fa3 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -377,18 +377,20 @@
 				compatible = "atmel,at91rm9200-tcb";
 				reg = <0xfff7c000 0x100>;
 				interrupts = <19 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tcb_clk>;
-				clock-names = "t0_clk";
+				clocks = <&tcb_clk>, <&slow_xtal>;
+				clock-names = "t0_clk", "slow_clk";
 			};
 
 			rstc@fffffd00 {
 				compatible = "atmel,at91sam9260-rstc";
 				reg = <0xfffffd00 0x10>;
+				clocks = <&slow_xtal>;
 			};
 
 			shdwc@fffffd10 {
 				compatible = "atmel,at91sam9260-shdwc";
 				reg = <0xfffffd10 0x10>;
+				clocks = <&slow_xtal>;
 			};
 
 			pinctrl@fffff200 {
@@ -902,6 +904,7 @@
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffd40 0x10>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&slow_xtal>;
 				atmel,watchdog-type = "hardware";
 				atmel,reset-type = "all";
 				atmel,dbg-halt;
diff --git a/arch/arm/boot/dts/at91sam9g15.dtsi b/arch/arm/boot/dts/at91sam9g15.dtsi
index cfd7044..27de7dc 100644
--- a/arch/arm/boot/dts/at91sam9g15.dtsi
+++ b/arch/arm/boot/dts/at91sam9g15.dtsi
@@ -7,6 +7,7 @@
  */
 
 #include "at91sam9x5.dtsi"
+#include "at91sam9x5_lcd.dtsi"
 
 / {
 	model = "Atmel AT91SAM9G15 SoC";
diff --git a/arch/arm/boot/dts/at91sam9g15ek.dts b/arch/arm/boot/dts/at91sam9g15ek.dts
index 26b0444..d1d2b40 100644
--- a/arch/arm/boot/dts/at91sam9g15ek.dts
+++ b/arch/arm/boot/dts/at91sam9g15ek.dts
@@ -8,9 +8,34 @@
  */
 /dts-v1/;
 #include "at91sam9g15.dtsi"
+#include "at91sam9x5dm.dtsi"
 #include "at91sam9x5ek.dtsi"
 
 / {
 	model = "Atmel AT91SAM9G15-EK";
 	compatible = "atmel,at91sam9g15ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
+
+	ahb {
+		apb {
+			hlcdc: hlcdc@f8038000 {
+				status = "okay";
+			};
+		};
+	};
+
+	backlight: backlight {
+		status = "okay";
+	};
+
+	bl_reg: backlight_regulator {
+		status = "okay";
+	};
+
+	panel: panel {
+		status = "okay";
+	};
+
+	panel_reg: panel_regulator {
+		status = "okay";
+	};
 };
diff --git a/arch/arm/boot/dts/at91sam9g35.dtsi b/arch/arm/boot/dts/at91sam9g35.dtsi
index e35c2fc..ff41158 100644
--- a/arch/arm/boot/dts/at91sam9g35.dtsi
+++ b/arch/arm/boot/dts/at91sam9g35.dtsi
@@ -7,6 +7,7 @@
  */
 
 #include "at91sam9x5.dtsi"
+#include "at91sam9x5_lcd.dtsi"
 #include "at91sam9x5_macb0.dtsi"
 
 / {
diff --git a/arch/arm/boot/dts/at91sam9g35ek.dts b/arch/arm/boot/dts/at91sam9g35ek.dts
index 641a9bf..23ec8b1 100644
--- a/arch/arm/boot/dts/at91sam9g35ek.dts
+++ b/arch/arm/boot/dts/at91sam9g35ek.dts
@@ -8,6 +8,7 @@
  */
 /dts-v1/;
 #include "at91sam9g35.dtsi"
+#include "at91sam9x5dm.dtsi"
 #include "at91sam9x5ek.dtsi"
 
 / {
@@ -20,6 +21,26 @@
 				phy-mode = "rmii";
 				status = "okay";
 			};
+
+			hlcdc: hlcdc@f8038000 {
+				status = "okay";
+			};
 		};
 	};
+
+	backlight: backlight {
+		status = "okay";
+	};
+
+	bl_reg: backlight_regulator {
+		status = "okay";
+	};
+
+	panel: panel {
+		status = "okay";
+	};
+
+	panel_reg: panel_regulator {
+		status = "okay";
+	};
 };
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 18177f5..18b8b9e 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -387,6 +387,7 @@
 			rstc@fffffd00 {
 				compatible = "atmel,at91sam9g45-rstc";
 				reg = <0xfffffd00 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			pit: timer@fffffd30 {
@@ -400,22 +401,23 @@
 			shdwc@fffffd10 {
 				compatible = "atmel,at91sam9rl-shdwc";
 				reg = <0xfffffd10 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			tcb0: timer@fff7c000 {
 				compatible = "atmel,at91rm9200-tcb";
 				reg = <0xfff7c000 0x100>;
 				interrupts = <18 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tcb0_clk>, <&tcb0_clk>, <&tcb0_clk>;
-				clock-names = "t0_clk", "t1_clk", "t2_clk";
+				clocks = <&tcb0_clk>, <&tcb0_clk>, <&tcb0_clk>, <&clk32k>;
+				clock-names = "t0_clk", "t1_clk", "t2_clk", "slow_clk";
 			};
 
 			tcb1: timer@fffd4000 {
 				compatible = "atmel,at91rm9200-tcb";
 				reg = <0xfffd4000 0x100>;
 				interrupts = <18 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tcb0_clk>, <&tcb0_clk>, <&tcb0_clk>;
-				clock-names = "t0_clk", "t1_clk", "t2_clk";
+				clocks = <&tcb0_clk>, <&tcb0_clk>, <&tcb0_clk>, <&clk32k>;
+				clock-names = "t0_clk", "t1_clk", "t2_clk", "slow_clk";
 			};
 
 			dma: dma-controller@ffffec00 {
@@ -498,23 +500,31 @@
 				};
 
 				isi {
-					pinctrl_isi: isi-0 {
-						atmel,pins = <AT91_PIOB 8 AT91_PERIPH_B AT91_PINCTRL_NONE /* D8 */
-							      AT91_PIOB 9 AT91_PERIPH_B AT91_PINCTRL_NONE /* D9 */
-							      AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE /* D10 */
-							      AT91_PIOB 11 AT91_PERIPH_B AT91_PINCTRL_NONE /* D11 */
-							      AT91_PIOB 20 AT91_PERIPH_A AT91_PINCTRL_NONE /* D0 */
-							      AT91_PIOB 21 AT91_PERIPH_A AT91_PINCTRL_NONE /* D1 */
-							      AT91_PIOB 22 AT91_PERIPH_A AT91_PINCTRL_NONE /* D2 */
-							      AT91_PIOB 23 AT91_PERIPH_A AT91_PINCTRL_NONE /* D3 */
-							      AT91_PIOB 24 AT91_PERIPH_A AT91_PINCTRL_NONE /* D4 */
-							      AT91_PIOB 25 AT91_PERIPH_A AT91_PINCTRL_NONE /* D5 */
-							      AT91_PIOB 26 AT91_PERIPH_A AT91_PINCTRL_NONE /* D6 */
-							      AT91_PIOB 27 AT91_PERIPH_A AT91_PINCTRL_NONE /* D7 */
-							      AT91_PIOB 28 AT91_PERIPH_A AT91_PINCTRL_NONE /* PCK */
-							      AT91_PIOB 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* VSYNC */
-							      AT91_PIOB 30 AT91_PERIPH_A AT91_PINCTRL_NONE /* HSYNC */
-							      AT91_PIOB 31 AT91_PERIPH_A AT91_PINCTRL_NONE /* MCK */>;
+					pinctrl_isi_data_0_7: isi-0-data-0-7 {
+						atmel,pins =
+							<AT91_PIOB 20 AT91_PERIPH_A AT91_PINCTRL_NONE /* D0 */
+							AT91_PIOB 21 AT91_PERIPH_A AT91_PINCTRL_NONE /* D1 */
+							AT91_PIOB 22 AT91_PERIPH_A AT91_PINCTRL_NONE /* D2 */
+							AT91_PIOB 23 AT91_PERIPH_A AT91_PINCTRL_NONE /* D3 */
+							AT91_PIOB 24 AT91_PERIPH_A AT91_PINCTRL_NONE /* D4 */
+							AT91_PIOB 25 AT91_PERIPH_A AT91_PINCTRL_NONE /* D5 */
+							AT91_PIOB 26 AT91_PERIPH_A AT91_PINCTRL_NONE /* D6 */
+							AT91_PIOB 27 AT91_PERIPH_A AT91_PINCTRL_NONE /* D7 */
+							AT91_PIOB 28 AT91_PERIPH_A AT91_PINCTRL_NONE /* PCK */
+							AT91_PIOB 29 AT91_PERIPH_A AT91_PINCTRL_NONE /* VSYNC */
+							AT91_PIOB 30 AT91_PERIPH_A AT91_PINCTRL_NONE>; /* HSYNC */
+					};
+
+					pinctrl_isi_data_8_9: isi-0-data-8-9 {
+						atmel,pins =
+							<AT91_PIOB 8 AT91_PERIPH_B AT91_PINCTRL_NONE /* D8 */
+							AT91_PIOB 9 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* D9 */
+					};
+
+					pinctrl_isi_data_10_11: isi-0-data-10-11 {
+						atmel,pins =
+							<AT91_PIOB 10 AT91_PERIPH_B AT91_PINCTRL_NONE /* D10 */
+							AT91_PIOB 11 AT91_PERIPH_B AT91_PINCTRL_NONE>; /* D11 */
 					};
 				};
 
@@ -1067,9 +1077,11 @@
 				interrupts = <26 IRQ_TYPE_LEVEL_HIGH 5>;
 				clocks = <&isi_clk>;
 				clock-names = "isi_clk";
-				pinctrl-names = "default";
-				pinctrl-0 = <&pinctrl_isi>;
 				status = "disabled";
+				port {
+					#address-cells = <1>;
+					#size-cells = <0>;
+				};
 			};
 
 			pwm0: pwm@fffb8000 {
@@ -1113,6 +1125,7 @@
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffd40 0x10>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
 				atmel,watchdog-type = "hardware";
 				atmel,reset-type = "all";
 				atmel,dbg-halt;
@@ -1247,6 +1260,7 @@
 				compatible = "atmel,at91rm9200-rtc";
 				reg = <0xfffffdb0 0x30>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index 1375d33..d1ae60a 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -63,6 +63,25 @@
 
 			i2c0: i2c@fff84000 {
 				status = "okay";
+				ov2640: camera@30 {
+					compatible = "ovti,ov2640";
+					reg = <0x30>;
+					pinctrl-names = "default";
+					pinctrl-0 = <&pinctrl_pck1_as_isi_mck &pinctrl_sensor_power &pinctrl_sensor_reset>;
+					resetb-gpios = <&pioD 12 GPIO_ACTIVE_LOW>;
+					pwdn-gpios = <&pioD 13 GPIO_ACTIVE_HIGH>;
+					clocks = <&pck1>;
+					clock-names = "xvclk";
+					assigned-clocks = <&pck1>;
+					assigned-clock-rates = <25000000>;
+
+					port {
+						ov2640_0: endpoint {
+							remote-endpoint = <&isi_0>;
+							bus-width = <8>;
+						};
+					};
+				};
 			};
 
 			i2c1: i2c@fff88000 {
@@ -101,6 +120,22 @@
 			};
 
 			pinctrl@fffff200 {
+				camera_sensor {
+					pinctrl_pck1_as_isi_mck: pck1_as_isi_mck-0 {
+						atmel,pins =
+							<AT91_PIOB 31 AT91_PERIPH_B AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_sensor_reset: sensor_reset-0 {
+						atmel,pins =
+							<AT91_PIOD 12 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+					};
+
+					pinctrl_sensor_power: sensor_power-0 {
+						atmel,pins =
+							<AT91_PIOD 13 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+					};
+				};
 				mmc0 {
 					pinctrl_board_mmc0: mmc0-board {
 						atmel,pins =
@@ -155,6 +190,18 @@
 				status = "okay";
 			};
 
+			isi@fffb4000 {
+				pinctrl-names = "default";
+				pinctrl-0 = <&pinctrl_isi_data_0_7>;
+				status = "okay";
+				port {
+					isi_0: endpoint {
+						remote-endpoint = <&ov2640_0>;
+						bus-width = <8>;
+					};
+				};
+			};
+
 			pwm0: pwm@fffb8000 {
 				status = "okay";
 
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 5c2a8c8..32bc9a1 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -376,6 +376,7 @@
 			rstc@fffffe00 {
 				compatible = "atmel,at91sam9g45-rstc";
 				reg = <0xfffffe00 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			pit: timer@fffffe30 {
@@ -388,6 +389,7 @@
 			shdwc@fffffe10 {
 				compatible = "atmel,at91sam9x5-shdwc";
 				reg = <0xfffffe10 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			sckc@fffffe50 {
@@ -431,16 +433,44 @@
 				compatible = "atmel,at91sam9x5-tcb";
 				reg = <0xf8008000 0x100>;
 				interrupts = <17 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tcb_clk>;
-				clock-names = "t0_clk";
+				clocks = <&tcb_clk>, <&clk32k>;
+				clock-names = "t0_clk", "slow_clk";
 			};
 
 			tcb1: timer@f800c000 {
 				compatible = "atmel,at91sam9x5-tcb";
 				reg = <0xf800c000 0x100>;
 				interrupts = <17 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tcb_clk>;
-				clock-names = "t0_clk";
+				clocks = <&tcb_clk>, <&clk32k>;
+				clock-names = "t0_clk", "slow_clk";
+			};
+
+			hlcdc: hlcdc@f8038000 {
+				compatible = "atmel,at91sam9n12-hlcdc";
+				reg = <0xf8038000 0x2000>;
+				interrupts = <25 IRQ_TYPE_LEVEL_HIGH 0>;
+				clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>;
+				clock-names = "periph_clk", "sys_clk", "slow_clk";
+				status = "disabled";
+
+				hlcdc-display-controller {
+					compatible = "atmel,hlcdc-display-controller";
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					port@0 {
+						#address-cells = <1>;
+						#size-cells = <0>;
+						reg = <0>;
+					};
+				};
+
+				hlcdc_pwm: hlcdc-pwm {
+					compatible = "atmel,hlcdc-pwm";
+					pinctrl-names = "default";
+					pinctrl-0 = <&pinctrl_lcd_pwm>;
+					#pwm-cells = <3>;
+				};
 			};
 
 			dma: dma-controller@ffffec00 {
@@ -475,6 +505,49 @@
 					};
 				};
 
+				lcd {
+					pinctrl_lcd_base: lcd-base-0 {
+						atmel,pins =
+							<AT91_PIOC 27 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDVSYNC */
+							 AT91_PIOC 28 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDHSYNC */
+							 AT91_PIOC 24 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDDISP */
+							 AT91_PIOC 29 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDDEN */
+							 AT91_PIOC 30 AT91_PERIPH_A AT91_PINCTRL_NONE>;	/* LCDPCK */
+					};
+
+					pinctrl_lcd_pwm: lcd-pwm-0 {
+						atmel,pins = <AT91_PIOC 26 AT91_PERIPH_A AT91_PINCTRL_NONE>;	/* LCDPWM */
+					};
+
+					pinctrl_lcd_rgb888: lcd-rgb-3 {
+						atmel,pins =
+							<AT91_PIOC 0 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD0 pin */
+							 AT91_PIOC 1 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD1 pin */
+							 AT91_PIOC 2 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD2 pin */
+							 AT91_PIOC 3 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD3 pin */
+							 AT91_PIOC 4 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD4 pin */
+							 AT91_PIOC 5 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD5 pin */
+							 AT91_PIOC 6 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD6 pin */
+							 AT91_PIOC 7 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD7 pin */
+							 AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD8 pin */
+							 AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD9 pin */
+							 AT91_PIOC 10 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD10 pin */
+							 AT91_PIOC 11 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD11 pin */
+							 AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD12 pin */
+							 AT91_PIOC 13 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD13 pin */
+							 AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD14 pin */
+							 AT91_PIOC 15 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD15 pin */
+							 AT91_PIOC 16 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD16 pin */
+							 AT91_PIOC 17 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD17 pin */
+							 AT91_PIOC 18 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD18 pin */
+							 AT91_PIOC 19 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD19 pin */
+							 AT91_PIOC 20 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD20 pin */
+							 AT91_PIOC 21 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD21 pin */
+							 AT91_PIOC 22 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD22 pin */
+							 AT91_PIOC 23 AT91_PERIPH_A AT91_PINCTRL_NONE>;	/* LCDD23 pin */
+					};
+				};
+
 				usart0 {
 					pinctrl_usart0: usart0-0 {
 						atmel,pins =
@@ -891,6 +964,7 @@
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffe40 0x10>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
 				atmel,watchdog-type = "hardware";
 				atmel,reset-type = "all";
 				atmel,dbg-halt;
@@ -901,6 +975,7 @@
 				compatible = "atmel,at91rm9200-rtc";
 				reg = <0xfffffeb0 0x40>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index eab17fc..efa7506 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -128,6 +128,22 @@
 				};
 			};
 
+			hlcdc: hlcdc@f8038000 {
+				status = "okay";
+
+				hlcdc-display-controller {
+					pinctrl-names = "default";
+					pinctrl-0 = <&pinctrl_lcd_base &pinctrl_lcd_rgb888>;
+
+					port@0 {
+						hlcdc_panel_output: endpoint@0 {
+							reg = <0>;
+							remote-endpoint = <&panel_input>;
+						};
+					};
+				};
+			};
+
 			usb1: gadget@f803c000 {
 				pinctrl-names = "default";
 				pinctrl-0 = <&pinctrl_usb1_vbus_sense>;
@@ -161,6 +177,23 @@
 		};
 	};
 
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pwms = <&hlcdc_pwm 0 50000 0>;
+		brightness-levels = <0 4 8 16 32 64 128 255>;
+		default-brightness-level = <6>;
+		power-supply = <&bl_reg>;
+		status = "okay";
+	};
+
+	bl_reg: backlight_regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "backlight-power-supply";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		status = "okay";
+	};
+
 	leds {
 		compatible = "gpio-leds";
 
@@ -194,6 +227,34 @@
 		};
 	};
 
+	panel: panel {
+		compatible = "qd,qd43003c0-40", "simple-panel";
+		backlight = <&backlight>;
+		power-supply = <&panel_reg>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "okay";
+
+		port@0 {
+			reg = <0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			panel_input: endpoint@0 {
+				reg = <0>;
+				remote-endpoint = <&hlcdc_panel_output>;
+			};
+		};
+	};
+
+	panel_reg: panel_regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "panel-power-supply";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		status = "okay";
+	};
+
 	sound {
 		compatible = "atmel,asoc-wm8904";
 		pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
index c9920c6..a0b90ae 100644
--- a/arch/arm/boot/dts/at91sam9rl.dtsi
+++ b/arch/arm/boot/dts/at91sam9rl.dtsi
@@ -121,8 +121,8 @@
 				interrupts = <16 IRQ_TYPE_LEVEL_HIGH 0>,
 					     <17 IRQ_TYPE_LEVEL_HIGH 0>,
 					     <18 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tc0_clk>, <&tc1_clk>, <&tc2_clk>;
-				clock-names = "t0_clk", "t1_clk", "t2_clk";
+				clocks = <&tc0_clk>, <&tc1_clk>, <&tc2_clk>, <&clk32k>;
+				clock-names = "t0_clk", "t1_clk", "t2_clk", "slow_clk";
 			};
 
 			mmc0: mmc@fffa4000 {
@@ -1018,11 +1018,13 @@
 			rstc@fffffd00 {
 				compatible = "atmel,at91sam9260-rstc";
 				reg = <0xfffffd00 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			shdwc@fffffd10 {
 				compatible = "atmel,at91sam9260-shdwc";
 				reg = <0xfffffd10 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			pit: timer@fffffd30 {
@@ -1036,6 +1038,7 @@
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffd40 0x10>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
 				status = "disabled";
 			};
 
@@ -1083,6 +1086,7 @@
 				compatible = "atmel,at91rm9200-rtc";
 				reg = <0xfffffe00 0x40>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9x35.dtsi b/arch/arm/boot/dts/at91sam9x35.dtsi
index 499cdc8..d9054e8 100644
--- a/arch/arm/boot/dts/at91sam9x35.dtsi
+++ b/arch/arm/boot/dts/at91sam9x35.dtsi
@@ -7,6 +7,7 @@
  */
 
 #include "at91sam9x5.dtsi"
+#include "at91sam9x5_lcd.dtsi"
 #include "at91sam9x5_macb0.dtsi"
 #include "at91sam9x5_can.dtsi"
 
diff --git a/arch/arm/boot/dts/at91sam9x35ek.dts b/arch/arm/boot/dts/at91sam9x35ek.dts
index 343d328..fcb6718 100644
--- a/arch/arm/boot/dts/at91sam9x35ek.dts
+++ b/arch/arm/boot/dts/at91sam9x35ek.dts
@@ -8,6 +8,7 @@
  */
 /dts-v1/;
 #include "at91sam9x35.dtsi"
+#include "at91sam9x5dm.dtsi"
 #include "at91sam9x5ek.dtsi"
 
 / {
@@ -20,6 +21,25 @@
 				phy-mode = "rmii";
 				status = "okay";
 			};
+			hlcdc: hlcdc@f8038000 {
+				status = "okay";
+			};
 		};
 	};
+
+	backlight: backlight {
+		status = "okay";
+	};
+
+	bl_reg: backlight_regulator {
+		status = "okay";
+	};
+
+	panel: panel {
+		status = "okay";
+	};
+
+	panel_reg: panel_regulator {
+		status = "okay";
+	};
 };
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index b6c8df8..747d8f0 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -376,11 +376,13 @@
 			rstc@fffffe00 {
 				compatible = "atmel,at91sam9g45-rstc";
 				reg = <0xfffffe00 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			shdwc@fffffe10 {
 				compatible = "atmel,at91sam9x5-shdwc";
 				reg = <0xfffffe10 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			pit: timer@fffffe30 {
@@ -418,16 +420,16 @@
 				compatible = "atmel,at91sam9x5-tcb";
 				reg = <0xf8008000 0x100>;
 				interrupts = <17 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tcb0_clk>;
-				clock-names = "t0_clk";
+				clocks = <&tcb0_clk>, <&clk32k>;
+				clock-names = "t0_clk", "slow_clk";
 			};
 
 			tcb1: timer@f800c000 {
 				compatible = "atmel,at91sam9x5-tcb";
 				reg = <0xf800c000 0x100>;
 				interrupts = <17 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tcb0_clk>;
-				clock-names = "t0_clk";
+				clocks = <&tcb0_clk>, <&clk32k>;
+				clock-names = "t0_clk", "slow_clk";
 			};
 
 			dma0: dma-controller@ffffec00 {
@@ -1173,6 +1175,7 @@
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffe40 0x10>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
 				atmel,watchdog-type = "hardware";
 				atmel,reset-type = "all";
 				atmel,dbg-halt;
@@ -1183,6 +1186,7 @@
 				compatible = "atmel,at91sam9x5-rtc";
 				reg = <0xfffffeb0 0x40>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
 				status = "disabled";
 			};
 
diff --git a/arch/arm/boot/dts/at91sam9x5_lcd.dtsi b/arch/arm/boot/dts/at91sam9x5_lcd.dtsi
index 485302e..1629db9 100644
--- a/arch/arm/boot/dts/at91sam9x5_lcd.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5_lcd.dtsi
@@ -13,6 +13,137 @@
 / {
 	ahb {
 		apb {
+			hlcdc: hlcdc@f8038000 {
+				compatible = "atmel,at91sam9x5-hlcdc";
+				reg = <0xf8038000 0x4000>;
+				interrupts = <25 IRQ_TYPE_LEVEL_HIGH 0>;
+				clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>;
+				clock-names = "periph_clk","sys_clk", "slow_clk";
+				status = "disabled";
+
+				hlcdc-display-controller {
+					compatible = "atmel,hlcdc-display-controller";
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					port@0 {
+						#address-cells = <1>;
+						#size-cells = <0>;
+						reg = <0>;
+					};
+				};
+
+				hlcdc_pwm: hlcdc-pwm {
+					compatible = "atmel,hlcdc-pwm";
+					pinctrl-names = "default";
+					pinctrl-0 = <&pinctrl_lcd_pwm>;
+					#pwm-cells = <3>;
+				};
+			};
+
+			pinctrl@fffff400 {
+				lcd {
+					pinctrl_lcd_base: lcd-base-0 {
+						atmel,pins =
+							<AT91_PIOC 27 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDVSYNC */
+							 AT91_PIOC 28 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDHSYNC */
+							 AT91_PIOC 24 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDDISP */
+							 AT91_PIOC 29 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDDEN */
+							 AT91_PIOC 30 AT91_PERIPH_A AT91_PINCTRL_NONE>;	/* LCDPCK */
+					};
+
+					pinctrl_lcd_pwm: lcd-pwm-0 {
+						atmel,pins = <AT91_PIOC 26 AT91_PERIPH_A AT91_PINCTRL_NONE>;	/* LCDPWM */
+					};
+
+					pinctrl_lcd_rgb444: lcd-rgb-0 {
+						atmel,pins =
+							<AT91_PIOC 0 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD0 pin */
+							 AT91_PIOC 1 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD1 pin */
+							 AT91_PIOC 2 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD2 pin */
+							 AT91_PIOC 3 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD3 pin */
+							 AT91_PIOC 4 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD4 pin */
+							 AT91_PIOC 5 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD5 pin */
+							 AT91_PIOC 6 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD6 pin */
+							 AT91_PIOC 7 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD7 pin */
+							 AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD8 pin */
+							 AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD9 pin */
+							 AT91_PIOC 10 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD10 pin */
+							 AT91_PIOC 11 AT91_PERIPH_A AT91_PINCTRL_NONE>;	/* LCDD11 pin */
+					};
+
+					pinctrl_lcd_rgb565: lcd-rgb-1 {
+						atmel,pins =
+							<AT91_PIOC 0 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD0 pin */
+							 AT91_PIOC 1 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD1 pin */
+							 AT91_PIOC 2 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD2 pin */
+							 AT91_PIOC 3 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD3 pin */
+							 AT91_PIOC 4 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD4 pin */
+							 AT91_PIOC 5 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD5 pin */
+							 AT91_PIOC 6 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD6 pin */
+							 AT91_PIOC 7 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD7 pin */
+							 AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD8 pin */
+							 AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD9 pin */
+							 AT91_PIOC 10 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD10 pin */
+							 AT91_PIOC 11 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD11 pin */
+							 AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD12 pin */
+							 AT91_PIOC 13 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD13 pin */
+							 AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD14 pin */
+							 AT91_PIOC 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;	/* LCDD15 pin */
+					};
+
+					pinctrl_lcd_rgb666: lcd-rgb-2 {
+						atmel,pins =
+							<AT91_PIOC 0 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD0 pin */
+							 AT91_PIOC 1 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD1 pin */
+							 AT91_PIOC 2 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD2 pin */
+							 AT91_PIOC 3 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD3 pin */
+							 AT91_PIOC 4 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD4 pin */
+							 AT91_PIOC 5 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD5 pin */
+							 AT91_PIOC 6 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD6 pin */
+							 AT91_PIOC 7 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD7 pin */
+							 AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD8 pin */
+							 AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD9 pin */
+							 AT91_PIOC 10 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD10 pin */
+							 AT91_PIOC 11 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD11 pin */
+							 AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD12 pin */
+							 AT91_PIOC 13 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD13 pin */
+							 AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD14 pin */
+							 AT91_PIOC 15 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD15 pin */
+							 AT91_PIOC 16 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD16 pin */
+							 AT91_PIOC 17 AT91_PERIPH_A AT91_PINCTRL_NONE>;	/* LCDD17 pin */
+					};
+
+					pinctrl_lcd_rgb888: lcd-rgb-3 {
+						atmel,pins =
+							<AT91_PIOC 0 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD0 pin */
+							 AT91_PIOC 1 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD1 pin */
+							 AT91_PIOC 2 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD2 pin */
+							 AT91_PIOC 3 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD3 pin */
+							 AT91_PIOC 4 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD4 pin */
+							 AT91_PIOC 5 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD5 pin */
+							 AT91_PIOC 6 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD6 pin */
+							 AT91_PIOC 7 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD7 pin */
+							 AT91_PIOC 8 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD8 pin */
+							 AT91_PIOC 9 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD9 pin */
+							 AT91_PIOC 10 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD10 pin */
+							 AT91_PIOC 11 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD11 pin */
+							 AT91_PIOC 12 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD12 pin */
+							 AT91_PIOC 13 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD13 pin */
+							 AT91_PIOC 14 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD14 pin */
+							 AT91_PIOC 15 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD15 pin */
+							 AT91_PIOC 16 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD16 pin */
+							 AT91_PIOC 17 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD17 pin */
+							 AT91_PIOC 18 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD18 pin */
+							 AT91_PIOC 19 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD19 pin */
+							 AT91_PIOC 20 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD20 pin */
+							 AT91_PIOC 21 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD21 pin */
+							 AT91_PIOC 22 AT91_PERIPH_A AT91_PINCTRL_NONE	/* LCDD22 pin */
+							 AT91_PIOC 23 AT91_PERIPH_A AT91_PINCTRL_NONE>;	/* LCDD23 pin */
+					};
+				};
+			};
+
 			pmc: pmc@fffffc00 {
 				periphck {
 					lcdc_clk: lcdc_clk {
@@ -20,6 +151,14 @@
 						reg = <25>;
 					};
 				};
+
+				systemck {
+					lcdck: lcdck {
+						#clock-cells = <0>;
+						reg = <3>;
+						clocks = <&mck>;
+					};
+				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/at91sam9x5dm.dtsi b/arch/arm/boot/dts/at91sam9x5dm.dtsi
new file mode 100644
index 0000000..34c089f
--- /dev/null
+++ b/arch/arm/boot/dts/at91sam9x5dm.dtsi
@@ -0,0 +1,101 @@
+/*
+ * at91sam9x5dm.dtsi - Device Tree file for SAM9x5 display module
+ *
+ *  Copyright (C) 2014 Atmel,
+ *                2014 Free Electrons
+ *
+ *  Author: Boris Brezillon <boris.brezillon@free-electrons.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+/ {
+	ahb {
+		apb {
+			i2c0: i2c@f8010000 {
+				qt1070: keyboard@1b {
+					compatible = "qt1070";
+					reg = <0x1b>;
+					interrupt-parent = <&pioA>;
+					interrupts = <7 0x0>;
+					pinctrl-names = "default";
+					pinctrl-0 = <&pinctrl_qt1070_irq>;
+					wakeup-source;
+				};
+			};
+
+			hlcdc: hlcdc@f8038000 {
+				hlcdc-display-controller {
+					pinctrl-names = "default";
+					pinctrl-0 = <&pinctrl_lcd_base &pinctrl_lcd_rgb888>;
+
+					port@0 {
+						hlcdc_panel_output: endpoint@0 {
+							reg = <0>;
+							remote-endpoint = <&panel_input>;
+						};
+					};
+				};
+			};
+
+			adc0: adc@f804c000 {
+				atmel,adc-ts-wires = <4>;
+				atmel,adc-ts-pressure-threshold = <10000>;
+				status = "okay";
+			};
+
+			pinctrl@fffff400 {
+				board {
+					pinctrl_qt1070_irq: qt1070_irq {
+						atmel,pins =
+							<AT91_PIOA 7 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+					};
+				};
+			};
+		};
+	};
+
+	backlight: backlight {
+		compatible = "pwm-backlight";
+		pwms = <&hlcdc_pwm 0 50000 0>;
+		brightness-levels = <0 4 8 16 32 64 128 255>;
+		default-brightness-level = <6>;
+		power-supply = <&bl_reg>;
+		status = "disabled";
+	};
+
+	bl_reg: backlight_regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "backlight-power-supply";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		status = "disabled";
+	};
+
+	panel: panel {
+		compatible = "foxlink,fl500wvr00-a0t", "simple-panel";
+		backlight = <&backlight>;
+		power-supply = <&panel_reg>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+
+		port@0 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			panel_input: endpoint@0 {
+				reg = <0>;
+				remote-endpoint = <&hlcdc_panel_output>;
+			};
+		};
+	};
+
+	panel_reg: panel_regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "panel-power-supply";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		status = "disabled";
+	};
+};
diff --git a/arch/arm/boot/dts/atlas7-evb.dts b/arch/arm/boot/dts/atlas7-evb.dts
index 49cf59a..1e9cd1a 100644
--- a/arch/arm/boot/dts/atlas7-evb.dts
+++ b/arch/arm/boot/dts/atlas7-evb.dts
@@ -10,6 +10,9 @@
 
 /include/ "atlas7.dtsi"
 
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/gpio.h>
+
 / {
 	model = "CSR SiRFatlas7 Evaluation Board";
 	compatible = "sirf,atlas7-cb", "sirf,atlas7";
@@ -106,5 +109,20 @@
 				};
 			};
 		};
+
+		gpio_keys {
+			compatible = "gpio-keys";
+			status = "okay";
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			rearview_key {
+				label = "rearview key";
+				linux,code = <KEY_CAMERA>;
+				gpios = <&gpio_1 3 GPIO_ACTIVE_LOW>;
+				debounce_interval = <100>;
+			};
+		};
+
 	};
 };
diff --git a/arch/arm/boot/dts/atlas7.dtsi b/arch/arm/boot/dts/atlas7.dtsi
index 3e21311..83449b3 100644
--- a/arch/arm/boot/dts/atlas7.dtsi
+++ b/arch/arm/boot/dts/atlas7.dtsi
@@ -21,6 +21,10 @@
 		serial5 = &uart5;
 		serial6 = &uart6;
 		serial9 = &usp2;
+		spi1 = &spi1;
+		spi2 = &usp1;
+		spi3 = &usp2;
+		spi4 = &usp3;
 	};
 	cpus {
 		#address-cells = <1>;
@@ -53,6 +57,11 @@
 		};
 	};
 
+	arm-pmu {
+		compatible = "arm,cortex-a7-pmu";
+		interrupts = <0 29 4>, <0 82 4>;
+	};
+
 	noc {
 		compatible = "simple-bus";
 		#address-cells = <1>;
@@ -1205,7 +1214,8 @@
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x18641000 0x18641000 0x3000>,
-					 <0x18620000 0x18620000 0x1000>;
+					 <0x18620000 0x18620000 0x1000>,
+					<0x18630000 0x18630000 0x10000>;
 
 			cgum@18641000 {
 				compatible = "sirf,nocfw-cgum";
@@ -1218,6 +1228,15 @@
 				#clock-cells = <1>;
 				#reset-cells = <1>;
 			};
+			pwm: pwm@18630000 {
+				compatible = "sirf,prima2-pwm";
+				#pwm-cells = <2>;
+				reg = <0x18630000 0x10000>;
+				clocks = <&car 138>, <&car 139>, <&car 237>,
+					<&car 240>,  <&car 140>, <&car 246>;
+				clock-names = "pwmc", "sigsrc0", "sigsrc1",
+					"sigsrc2", "sigsrc3", "sigsrc4";
+			};
 		};
 
 		gnssm {
@@ -1231,6 +1250,7 @@
 				<0x18040000 0x18040000 0x1000>,
 				<0x18050000 0x18050000 0x1000>,
 				<0x18060000 0x18060000 0x1000>,
+				<0x180b0000 0x180b0000 0x4000>,
 				<0x18100000 0x18100000 0x3000>,
 				<0x18250000 0x18250000 0x10000>,
 				<0x18200000 0x18200000 0x1000>;
@@ -1314,6 +1334,18 @@
 				dma-names = "rx", "tx";
 				status = "disabled";
 			};
+			gmac: eth@180b0000 {
+				compatible = "snps, dwc-eth-qos";
+				reg = <0x180b0000 0x4000>;
+				interrupts = <0 59 0>, <0 70 0>;
+				interrupt-names = "macirq", "macpmt";
+				clocks = <&car 39>, <&car 45>,
+				       <&car 86>, <&car 87>;
+				clock-names = "gnssm_rgmii", "gnssm_gmac",
+					"rgmii", "gmac";
+				local-mac-address = [00 00 00 00 00 00];
+				phy-mode = "rgmii";
+			};
 			dspub@18250000 {
 				compatible = "dx,cc44p";
 				reg = <0x18250000 0x10000>;
@@ -1338,18 +1370,51 @@
 			compatible = "arteris, flexnoc", "simple-bus";
 			#address-cells = <1>;
 			#size-cells = <1>;
-			ranges = <0x13000000 0x13000000 0x3000>;
+			ranges = <0x13000000 0x13000000 0x3000>,
+				<0x13010000 0x13010000 0x1400>,
+				<0x13010800 0x13010800 0x100>,
+				<0x13011000 0x13011000 0x100>;
 			gpum@0x13000000 {
 				compatible = "sirf,nocfw-gpum";
 				reg = <0x13000000 0x3000>;
 			};
+			dmacsdrr: dma-controller@13010800 {
+				cell-index = <5>;
+				compatible = "sirf,atlas7-dmac-v2";
+				reg = <0x13010800 0x100>;
+				interrupts = <0 8 0>;
+				clocks = <&car 127>;
+				#dma-cells = <1>;
+				#dma-channels = <1>;
+			};
+			dmacsdrw: dma-controller@13011000 {
+				cell-index = <6>;
+				compatible = "sirf,atlas7-dmac-v2";
+				reg = <0x13011000 0x100>;
+				interrupts = <0 9 0>;
+				clocks = <&car 127>;
+				#dma-cells = <1>;
+				#dma-channels = <1>;
+			};
+			sdr@0x13010000 {
+				compatible = "sirf,atlas7-sdr";
+				reg = <0x13010000 0x1400>;
+				interrupts = <0 7 0>,
+					   <0 8 0>,
+					   <0 9 0>;
+				clocks = <&car 127>;
+				dmas = <&dmacsdrr 0>, <&dmacsdrw 0>;
+				dma-names = "tx", "rx";
+			};
 		};
 
 		mediam {
 			compatible = "arteris, flexnoc", "simple-bus";
 			#address-cells = <1>;
 			#size-cells = <1>;
-			ranges = <0x16000000 0x16000000 0x00200000>,
+			ranges = <0x15000000 0x15000000 0x00600000>,
+				<0x16000000 0x16000000 0x00200000>,
+				<0x17000000 0x17000000 0x10000>,
 				<0x17020000 0x17020000 0x1000>,
 				<0x17030000 0x17030000 0x1000>,
 				<0x17040000 0x17040000 0x1000>,
@@ -1360,6 +1425,13 @@
 				<0x17070200 0x17070200 0x100>,
 				<0x170A0000 0x170A0000 0x3000>;
 
+			multimedia@15000000 {
+				compatible = "sirf,atlas7-video-codec";
+				reg = <0x15000000 0x10000>;
+				interrupts = <0 5 0>;
+				clocks = <&car 102>;
+			};
+
 			mediam@170A0000 {
 				compatible = "sirf,nocfw-mediam";
 				reg = <0x170A0000 0x3000>;
@@ -1386,6 +1458,8 @@
 			nand@17050000 {
 				compatible = "sirf,atlas7-nand";
 				reg = <0x17050000 0x10000>;
+				pinctrl-names = "default";
+				pinctrl-0 = <&nd_df_pmx>;
 				interrupts = <0 41 0>;
 				clocks = <&car 108>, <&car 112>;
 				clock-names = "nand_io", "nand_nand";
@@ -1416,6 +1490,14 @@
 				bus-width = <8>;
 			};
 
+			jpeg@17000000 {
+				compatible = "sirf,atlas7-jpeg";
+				reg = <0x17000000 0x10000>;
+				interrupts = <0 72 0>,
+					<0 73 0>;
+				clocks = <&car 103>;
+			};
+
 			usb0: usb@17060000 {
 				cell-index = <0>;
 				compatible = "sirf,atlas7-usb";
@@ -1826,7 +1908,8 @@
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges = <0x13100000 0x13100000 0x20000>,
-				 <0x10e10000 0x10e10000 0x10000>;
+				<0x10e10000 0x10e10000 0x10000>,
+				<0x17010000 0x17010000 0x10000>;
 
 			lcd@13100000 {
 				compatible = "sirf,atlas7-lcdc";
@@ -1848,6 +1931,12 @@
 				clocks = <&car 54>;
 				resets = <&car 29>;
 			};
+			g2d@17010000 {
+				compatible = "sirf, atlas7-g2d";
+				reg = <0x17010000 0x10000>;
+				interrupts = <0 61 0>;
+				clocks = <&car 104>;
+			};
 
 		};
 
diff --git a/arch/arm/boot/dts/axp152.dtsi b/arch/arm/boot/dts/axp152.dtsi
new file mode 100644
index 0000000..f90ad6c
--- /dev/null
+++ b/arch/arm/boot/dts/axp152.dtsi
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+&axp152 {
+	compatible = "x-powers,axp152";
+	interrupt-controller;
+	#interrupt-cells = <1>;
+};
diff --git a/arch/arm/boot/dts/bcm-cygnus-clock.dtsi b/arch/arm/boot/dts/bcm-cygnus-clock.dtsi
index 60d8389..32bcd45 100644
--- a/arch/arm/boot/dts/bcm-cygnus-clock.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus-clock.dtsi
@@ -36,56 +36,89 @@
 	ranges;
 
 	osc: oscillator {
+		#clock-cells = <0>;
 		compatible = "fixed-clock";
-		#clock-cells = <1>;
 		clock-frequency = <25000000>;
 	};
 
+	/* Cygnus ARM PLL */
+	armpll: armpll {
+		#clock-cells = <0>;
+		compatible = "brcm,cygnus-armpll";
+		clocks = <&osc>;
+		reg = <0x19000000 0x1000>;
+	};
+
+	/* peripheral clock for system timer */
+	periph_clk: arm_periph_clk {
+		#clock-cells = <0>;
+		compatible = "fixed-factor-clock";
+		clocks = <&armpll>;
+		clock-div = <2>;
+		clock-mult = <1>;
+	};
+
+	/* APB bus clock */
 	apb_clk: apb_clk {
-		compatible = "fixed-clock";
 		#clock-cells = <0>;
-		clock-frequency = <1000000000>;
+		compatible = "fixed-factor-clock";
+		clocks = <&armpll>;
+		clock-div = <4>;
+		clock-mult = <1>;
 	};
 
-	periph_clk: periph_clk {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <500000000>;
+	genpll: genpll {
+		#clock-cells = <1>;
+		compatible = "brcm,cygnus-genpll";
+		reg = <0x0301d000 0x2c>, <0x0301c020 0x4>;
+		clocks = <&osc>;
+		clock-output-names = "genpll", "axi21", "250mhz", "ihost_sys",
+				     "enet_sw", "audio_125", "can";
 	};
 
-	sdio_clk: lcpll_ch2 {
-		compatible = "fixed-clock";
+	/* always 1/2 of the axi21 clock */
+	axi41_clk: axi41_clk {
 		#clock-cells = <0>;
-		clock-frequency = <200000000>;
+		compatible = "fixed-factor-clock";
+		clocks = <&genpll 1>;
+		clock-div = <2>;
+		clock-mult = <1>;
 	};
 
+	/* always 1/4 of the axi21 clock */
 	axi81_clk: axi81_clk {
-		compatible = "fixed-clock";
 		#clock-cells = <0>;
-		clock-frequency = <100000000>;
+		compatible = "fixed-factor-clock";
+		clocks = <&genpll 1>;
+		clock-div = <4>;
+		clock-mult = <1>;
 	};
 
-	keypad_clk: keypad_clk {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <31806>;
+	lcpll0: lcpll0 {
+		#clock-cells = <1>;
+		compatible = "brcm,cygnus-lcpll0";
+		reg = <0x0301d02c 0x1c>, <0x0301c020 0x4>;
+		clocks = <&osc>;
+		clock-output-names = "lcpll0", "pcie_phy", "ddr_phy", "sdio",
+				     "usb_phy", "smart_card", "ch5";
 	};
 
-	adc_clk: adc_clk {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <1562500>;
+	mipipll: mipipll {
+		#clock-cells = <1>;
+		compatible = "brcm,cygnus-mipipll";
+		reg = <0x180a9800 0x2c>, <0x0301c020 0x4>, <0x180aa024 0x4>;
+		clocks = <&osc>;
+		clock-output-names = "mipipll", "ch0_unused", "ch1_lcd",
+				     "ch2_v3d", "ch3_unused", "ch4_unused",
+				     "ch5_unused";
 	};
 
-	pwm_clk: pwm_clk {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <1000000>;
-	};
+	asiu_clks: asiu_clks {
+		#clock-cells = <1>;
+		compatible = "brcm,cygnus-asiu-clk";
+		reg = <0x0301d048 0xc>, <0x180aa024 0x4>;
 
-	lcd_clk: mipipll_ch1 {
-		compatible = "fixed-clock";
-		#clock-cells = <0>;
-		clock-frequency = <100000000>;
+		clocks = <&osc>;
+		clock-output-names = "keypad", "adc/touch", "pwm";
 	};
 };
diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
index 46780bb..ab5474e 100644
--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
+++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
@@ -14,6 +14,13 @@
 			linux,default-trigger = "heartbeat";
 		};
 	};
+
+	soc {
+		firmware: firmware {
+			compatible = "raspberrypi,bcm2835-firmware";
+			mboxes = <&mailbox>;
+		};
+	};
 };
 
 &gpio {
diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
index 24f0ab5..42dcdfb 100644
--- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
+++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
@@ -135,3 +135,7 @@
 		};
 	};
 };
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts b/arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts
index f039393..f18e80e 100644
--- a/arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts
+++ b/arch/arm/boot/dts/bcm4708-luxul-xwc-1000.dts
@@ -55,3 +55,7 @@
 		};
 	};
 };
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
index 326ce8f..64b8d10 100644
--- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
+++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
@@ -24,16 +24,6 @@
 		reg = <0x00000000 0x08000000>;
 	};
 
-	chipcommonA {
-		uart0: serial@0300 {
-			status = "okay";
-		};
-
-		uart1: serial@0400 {
-			status = "okay";
-		};
-	};
-
 	leds {
 		compatible = "gpio-leds";
 
@@ -92,3 +82,7 @@
 		};
 	};
 };
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
index d6a033b..64a5e8a 100644
--- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
+++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
@@ -118,3 +118,7 @@
 		};
 	};
 };
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
index bb0cb0b..38f0c00 100644
--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
+++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
@@ -122,3 +122,7 @@
 		};
 	};
 };
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 21fefd4..6f50f67 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -78,10 +78,20 @@
 			compatible = "arm,pl310-cache";
 			reg = <0x2000 0x1000>;
 			cache-unified;
+			arm,shared-override;
+			prefetch-data = <1>;
+			prefetch-instr = <1>;
 			cache-level = <2>;
 		};
 	};
 
+	pmu {
+		compatible = "arm,cortex-a9-pmu";
+		interrupts =
+			<GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+			<GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+	};
+
 	clocks {
 		#address-cells = <1>;
 		#size-cells = <0>;
diff --git a/arch/arm/boot/dts/bcm7445.dtsi b/arch/arm/boot/dts/bcm7445.dtsi
index 58dcd66..3b6b175 100644
--- a/arch/arm/boot/dts/bcm7445.dtsi
+++ b/arch/arm/boot/dts/bcm7445.dtsi
@@ -109,6 +109,20 @@
 			brcm,int-fwd-mask = <0x70000>;
 		};
 
+		irq0_aon_intc: interrupt-controller@417280 {
+			compatible = "brcm,bcm7120-l2-intc";
+			reg = <0x417280 0x8>;
+			interrupt-parent = <&gic>;
+			#interrupt-cells = <1>;
+			interrupt-controller;
+			interrupts = <GIC_SPI 0x46 0x0>,
+				     <GIC_SPI 0x44 0x0>,
+				     <GIC_SPI 0x49 0x0>;
+			brcm,int-map-mask = <0x1e3 0x18000000 0x100000>;
+			brcm,int-fwd-mask = <0x0>;
+			brcm,irq-can-wake;
+		};
+
 		hif_intr2_intc: interrupt-controller@3e1000 {
 			compatible = "brcm,l2-intc";
 			reg = <0x3e1000 0x30>;
@@ -119,6 +133,16 @@
 			interrupt-names = "hif";
 		};
 
+                aon_pm_l2_intc: interrupt-controller@410640 {
+			compatible = "brcm,l2-intc";
+			reg = <0x410640 0x30>;
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupts = <GIC_SPI 0x40 0x0>;
+			interrupt-parent = <&gic>;
+			brcm,irq-can-wake;
+		};
+
 		nand: nand@3e2800 {
 			status = "disabled";
 			#address-cells = <1>;
@@ -167,6 +191,32 @@
 				#phy-cells = <0>;
 			};
 		};
+
+		upg_gio: gpio@40a700 {
+			compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio";
+			reg = <0x40a700 0x80>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupt-parent = <&irq0_intc>;
+			interrupts = <6>;
+			brcm,gpio-bank-widths = <32 32 32 24>;
+		};
+
+		upg_gio_aon: gpio@4172c0 {
+			compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio";
+			reg = <0x4172c0 0x40>;
+			#gpio-cells = <2>;
+			#interrupt-cells = <2>;
+			gpio-controller;
+			interrupt-controller;
+			interrupts-extended = <&irq0_aon_intc 0x6>,
+					      <&aon_pm_l2_intc 0x5>;
+			wakeup-source;
+			brcm,gpio-bank-widths = <18 4>;
+		};
+
 	};
 
 	smpboot {
diff --git a/arch/arm/boot/dts/cros-ec-sbs.dtsi b/arch/arm/boot/dts/cros-ec-sbs.dtsi
new file mode 100644
index 0000000..71f5c5e
--- /dev/null
+++ b/arch/arm/boot/dts/cros-ec-sbs.dtsi
@@ -0,0 +1,52 @@
+/*
+ * Smart battery dts fragment for devices that use cros-ec-sbs
+ *
+ * Copyright (c) 2015 Google, Inc
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+&i2c_tunnel {
+	battery: sbs-battery@b {
+		compatible = "sbs,sbs-battery";
+		reg = <0xb>;
+		sbs,i2c-retry-count = <2>;
+		sbs,poll-retry-count = <1>;
+	};
+};
diff --git a/arch/arm/boot/dts/cx92755.dtsi b/arch/arm/boot/dts/cx92755.dtsi
index af33326..df4c6f1 100644
--- a/arch/arm/boot/dts/cx92755.dtsi
+++ b/arch/arm/boot/dts/cx92755.dtsi
@@ -88,6 +88,13 @@
 		interrupts = <25>;
 	};
 
+	watchdog@f0000fc0 {
+		compatible = "cnxt,cx92755-wdt";
+		reg = <0xf0000fc0 0x8>;
+		clocks = <&main_clk>;
+		timeout-sec = <15>;
+	};
+
 	uc_regs: syscon@f00003a0 {
 		compatible = "cnxt,cx92755-uc", "syscon";
 		reg = <0xf00003a0 0x10>;
diff --git a/arch/arm/boot/dts/cx92755_equinox.dts b/arch/arm/boot/dts/cx92755_equinox.dts
index 90d52cc..5da0080 100644
--- a/arch/arm/boot/dts/cx92755_equinox.dts
+++ b/arch/arm/boot/dts/cx92755_equinox.dts
@@ -64,8 +64,7 @@
 	};
 
 	chosen {
-		bootargs = "console=ttyS0,115200";
-		stdout-path = &uart0;
+		stdout-path = "serial0:115200n8";
 	};
 };
 
diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts
new file mode 100644
index 0000000..92bacd3
--- /dev/null
+++ b/arch/arm/boot/dts/dm8148-evm.dts
@@ -0,0 +1,28 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "dm814x.dtsi"
+
+/ {
+	model = "DM8148 EVM";
+	compatible = "ti,dm8148-evm", "ti,dm8148";
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x40000000>;	/* 1 GB */
+	};
+};
+
+&cpsw_emac0 {
+	phy_id = <&davinci_mdio>, <0>;
+	phy-mode = "mii";
+};
+
+&cpsw_emac1 {
+	phy_id = <&davinci_mdio>, <1>;
+	phy-mode = "mii";
+};
diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts
new file mode 100644
index 0000000..8c4bbc7
--- /dev/null
+++ b/arch/arm/boot/dts/dm8148-t410.dts
@@ -0,0 +1,28 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+#include "dm814x.dtsi"
+
+/ {
+	model = "DM8148 EVM";
+	compatible = "hp,t410", "ti,dm8148";
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x40000000>;	/* 1 GB */
+	};
+};
+
+&cpsw_emac0 {
+	phy_id = <&davinci_mdio>, <0>;
+	phy-mode = "mii";
+};
+
+&cpsw_emac1 {
+	phy_id = <&davinci_mdio>, <1>;
+	phy-mode = "mii";
+};
diff --git a/arch/arm/boot/dts/dm814x-clocks.dtsi b/arch/arm/boot/dts/dm814x-clocks.dtsi
new file mode 100644
index 0000000..ef1e8e7
--- /dev/null
+++ b/arch/arm/boot/dts/dm814x-clocks.dtsi
@@ -0,0 +1,109 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+&scm_clocks {
+
+	tclkin_ck: tclkin_ck {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <32768>;
+	};
+
+	devosc_ck: devosc_ck {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <20000000>;
+	};
+
+	/* Optional auxosc, 20 - 30 MHz range, assume 27 MHz by default */
+	auxosc_ck: auxosc_ck {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <27000000>;
+	};
+
+	mpu_ck: mpu_ck {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <1000000000>;
+	};
+
+	sysclk4_ck: sysclk4_ck {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <222000000>;
+	};
+
+	sysclk6_ck: sysclk6_ck {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <100000000>;
+	};
+
+	sysclk10_ck: sysclk10_ck {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <48000000>;
+	};
+
+	sysclk18_ck: sysclk18_ck {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <32768>;
+	};
+
+        cpsw_125mhz_gclk: cpsw_125mhz_gclk {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <125000000>;
+	};
+
+	cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <250000000>;
+	};
+
+};
+
+&pllss_clocks {
+
+	aud_clkin0_ck: aud_clkin0_ck {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <20000000>;
+	};
+
+	aud_clkin1_ck: aud_clkin1_ck {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <20000000>;
+	};
+
+	aud_clkin2_ck: aud_clkin2_ck {
+		#clock-cells = <0>;
+		compatible = "fixed-clock";
+		clock-frequency = <20000000>;
+	};
+
+	timer1_mux_ck: timer1_mux_ck {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sysclk18_ck &aud_clkin0_ck &aud_clkin1_ck
+			  &aud_clkin2_ck &devosc_ck &auxosc_ck &tclkin_ck>;
+		ti,bit-shift = <3>;
+		reg = <0x2e0>;
+	};
+
+	timer2_mux_ck: timer2_mux_ck {
+		#clock-cells = <0>;
+		compatible = "ti,mux-clock";
+		clocks = <&sysclk18_ck &aud_clkin0_ck &aud_clkin1_ck
+			  &aud_clkin2_ck &devosc_ck &auxosc_ck &tclkin_ck>;
+		ti,bit-shift = <6>;
+		reg = <0x2e0>;
+	};
+};
diff --git a/arch/arm/boot/dts/dm814x.dtsi b/arch/arm/boot/dts/dm814x.dtsi
new file mode 100644
index 0000000..972c9c9
--- /dev/null
+++ b/arch/arm/boot/dts/dm814x.dtsi
@@ -0,0 +1,333 @@
+/*
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/omap.h>
+
+#include "skeleton.dtsi"
+
+/ {
+	compatible = "ti,dm814";
+	interrupt-parent = <&intc>;
+
+	aliases {
+		i2c0 = &i2c1;
+		i2c1 = &i2c2;
+		serial0 = &uart1;
+		serial1 = &uart2;
+		serial2 = &uart3;
+		ethernet0 = &cpsw_emac0;
+		ethernet1 = &cpsw_emac1;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cpu@0 {
+			compatible = "arm,cortex-a8";
+			device_type = "cpu";
+			reg = <0>;
+		};
+	};
+
+	pmu {
+		compatible = "arm,cortex-a8-pmu";
+		interrupts = <3>;
+	};
+
+	/*
+	 * The soc node represents the soc top level view. It is used for IPs
+	 * that are not memory mapped in the MPU view or for the MPU itself.
+	 */
+	soc {
+		compatible = "ti,omap-infra";
+		mpu {
+			compatible = "ti,omap3-mpu";
+			ti,hwmods = "mpu";
+		};
+	};
+
+	ocp {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		ti,hwmods = "l3_main";
+
+		/*
+		 * See TRM "Table 1-317. L4LS Instance Summary", just deduct
+		 * 0x1000 from the 1-317 addresses to get the device address
+		 */
+		l4ls: l4ls@48000000 {
+			compatible = "ti,dm814-l4ls", "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x48000000 0x2000000>;
+
+			i2c1: i2c@28000 {
+				compatible = "ti,omap4-i2c";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				ti,hwmods = "i2c1";
+				reg = <0x28000 0x1000>;
+				interrupts = <70>;
+			};
+
+			elm: elm@80000 {
+				compatible = "ti,814-elm";
+				ti,hwmods = "elm";
+				reg = <0x80000 0x2000>;
+				interrupts = <4>;
+			};
+
+			gpio1: gpio@32000 {
+				compatible = "ti,omap4-gpio";
+				ti,hwmods = "gpio1";
+				ti,gpio-always-on;
+				reg = <0x32000 0x2000>;
+				interrupts = <96>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+			};
+
+			gpio2: gpio@4c000 {
+				compatible = "ti,omap4-gpio";
+				ti,hwmods = "gpio2";
+				ti,gpio-always-on;
+				reg = <0x4c000 0x2000>;
+				interrupts = <98>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+			};
+
+			i2c2: i2c@2a000 {
+				compatible = "ti,omap4-i2c";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				ti,hwmods = "i2c2";
+				reg = <0x2a000 0x1000>;
+				interrupts = <71>;
+			};
+
+			mcspi1: spi@30000 {
+				compatible = "ti,omap4-mcspi";
+				reg = <0x30000 0x1000>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <65>;
+				ti,spi-num-cs = <4>;
+				ti,hwmods = "mcspi1";
+				dmas = <&edma 16 &edma 17
+					&edma 18 &edma 19>;
+				dma-names = "tx0", "rx0", "tx1", "rx1";
+			};
+
+			timer1: timer@2e000 {
+				compatible = "ti,dm814-timer";
+				reg = <0x2e000 0x2000>;
+				interrupts = <67>;
+				ti,hwmods = "timer1";
+				ti,timer-alwon;
+			};
+
+			uart1: uart@20000 {
+				compatible = "ti,omap3-uart";
+				ti,hwmods = "uart1";
+				reg = <0x20000 0x2000>;
+				clock-frequency = <48000000>;
+				interrupts = <72>;
+				dmas = <&edma 26 &edma 27>;
+				dma-names = "tx", "rx";
+			};
+
+			uart2: uart@22000 {
+				compatible = "ti,omap3-uart";
+				ti,hwmods = "uart2";
+				reg = <0x22000 0x2000>;
+				clock-frequency = <48000000>;
+				interrupts = <73>;
+				dmas = <&edma 28 &edma 29>;
+				dma-names = "tx", "rx";
+			};
+
+			uart3: uart@24000 {
+				compatible = "ti,omap3-uart";
+				ti,hwmods = "uart3";
+				reg = <0x24000 0x2000>;
+				clock-frequency = <48000000>;
+				interrupts = <74>;
+				dmas = <&edma 30 &edma 31>;
+				dma-names = "tx", "rx";
+			};
+
+			timer2: timer@40000 {
+				compatible = "ti,dm814-timer";
+				reg = <0x40000 0x2000>;
+				interrupts = <68>;
+				ti,hwmods = "timer2";
+			};
+
+			timer3: timer@42000 {
+				compatible = "ti,dm814-timer";
+				reg = <0x42000 0x2000>;
+				interrupts = <69>;
+				ti,hwmods = "timer3";
+			};
+
+			control: control@160000 {
+				compatible = "ti,dm814-scm", "simple-bus";
+				reg = <0x160000 0x16d000>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				ranges = <0 0x160000 0x16d000>;
+
+				scm_conf: scm_conf@0 {
+					compatible = "syscon";
+					reg = <0x0 0x800>;
+					#address-cells = <1>;
+					#size-cells = <1>;
+
+					scm_clocks: clocks {
+						#address-cells = <1>;
+						#size-cells = <0>;
+					};
+
+					scm_clockdomains: clockdomains {
+					};
+				};
+
+				pincntl: pinmux@800 {
+					compatible = "pinctrl-single";
+					reg = <0x800 0xc38>;
+					#address-cells = <1>;
+					#size-cells = <0>;
+					pinctrl-single,register-width = <32>;
+					pinctrl-single,function-mask = <0x300ff>;
+				};
+			};
+
+			prcm: prcm@180000 {
+				compatible = "ti,dm814-prcm", "simple-bus";
+				reg = <0x180000 0x4000>;
+
+				prcm_clocks: clocks {
+					#address-cells = <1>;
+					#size-cells = <0>;
+				};
+
+				prcm_clockdomains: clockdomains {
+				};
+			};
+
+			pllss: pllss@1c5000 {
+				compatible = "ti,dm814-pllss", "simple-bus";
+				reg = <0x1c5000 0x2000>;
+
+				pllss_clocks: clocks {
+					#address-cells = <1>;
+					#size-cells = <0>;
+				};
+
+				pllss_clockdomains: clockdomains {
+				};
+			};
+
+			wdt1: wdt@1c7000 {
+				compatible = "ti,omap3-wdt";
+				ti,hwmods = "wd_timer";
+				reg = <0x1c7000 0x1000>;
+				interrupts = <91>;
+			};
+		};
+
+		intc: interrupt-controller@48200000 {
+			compatible = "ti,dm814-intc";
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			reg = <0x48200000 0x1000>;
+		};
+
+		edma: edma@49000000 {
+			compatible = "ti,edma3";
+			ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
+			reg =	<0x49000000 0x10000>,
+				<0x44e10f90 0x40>;
+			interrupts = <12 13 14>;
+			#dma-cells = <1>;
+		};
+
+		/* See TRM "Table 1-318. L4HS Instance Summary" */
+		l4hs: l4hs@4a000000 {
+			compatible = "ti,dm814-l4hs", "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0x4a000000 0x1b4040>;
+		};
+
+		/* REVISIT: Move to live under l4hs once driver is fixed */
+		mac: ethernet@4a100000 {
+			compatible = "ti,cpsw";
+			ti,hwmods = "cpgmac0";
+			clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>;
+			clock-names = "fck", "cpts";
+			cpdma_channels = <8>;
+			ale_entries = <1024>;
+			bd_ram_size = <0x2000>;
+			no_bd_ram = <0>;
+			rx_descs = <64>;
+			mac_control = <0x20>;
+			slaves = <2>;
+			active_slave = <0>;
+			cpts_clock_mult = <0x80000000>;
+			cpts_clock_shift = <29>;
+			reg = <0x4a100000 0x800
+			       0x4a100900 0x100>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			interrupt-parent = <&intc>;
+			/*
+			 * c0_rx_thresh_pend
+			 * c0_rx_pend
+			 * c0_tx_pend
+			 * c0_misc_pend
+			 */
+			interrupts = <40 41 42 43>;
+			ranges;
+			syscon = <&scm_conf>;
+
+			davinci_mdio: mdio@4a100800 {
+				compatible = "ti,davinci_mdio";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				ti,hwmods = "davinci_mdio";
+				bus_freq = <1000000>;
+				reg = <0x4a100800 0x100>;
+			};
+
+			cpsw_emac0: slave@4a100200 {
+				/* Filled in by U-Boot */
+				mac-address = [ 00 00 00 00 00 00 ];
+			};
+
+			cpsw_emac1: slave@4a100300 {
+				/* Filled in by U-Boot */
+				mac-address = [ 00 00 00 00 00 00 ];
+			};
+
+			phy_sel: cpsw-phy-sel@0x48160650 {
+				compatible = "ti,am3352-cpsw-phy-sel";
+				reg= <0x48160650 0x4>;
+				reg-names = "gmii-sel";
+			};
+		};
+	};
+};
+
+#include "dm814x-clocks.dtsi"
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 289806a..3c99cfa 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -58,7 +58,7 @@
 	 * the whole bus hierarchy.
 	 */
 	ocp {
-		compatible = "ti,omap3-l3-smx", "simple-bus";
+		compatible = "simple-bus";
 		reg = <0x44000000 0x10000>;
 		interrupts = <9 10>;
 		#address-cells = <1>;
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 38b1f7e..1791216 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -419,293 +419,325 @@
 				status = "disabled";
 			};
 
-			thermal: thermal-diode@d001c {
-				compatible = "marvell,dove-thermal";
-				reg = <0xd001c 0x0c>, <0xd005c 0x08>;
-			};
-
-			gate_clk: clock-gating-ctrl@d0038 {
-				compatible = "marvell,dove-gating-clock";
-				reg = <0xd0038 0x4>;
-				clocks = <&core_clk 0>;
-				#clock-cells = <1>;
-			};
-
-			pinctrl: pin-ctrl@d0200 {
-				compatible = "marvell,dove-pinctrl";
-				reg = <0xd0200 0x14>,
-				      <0xd0440 0x04>;
-				clocks = <&gate_clk 22>;
-
-				pmx_gpio_0: pmx-gpio-0 {
-					marvell,pins = "mpp0";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_1: pmx-gpio-1 {
-					marvell,pins = "mpp1";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_2: pmx-gpio-2 {
-					marvell,pins = "mpp2";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_3: pmx-gpio-3 {
-					marvell,pins = "mpp3";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_4: pmx-gpio-4 {
-					marvell,pins = "mpp4";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_5: pmx-gpio-5 {
-					marvell,pins = "mpp5";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_6: pmx-gpio-6 {
-					marvell,pins = "mpp6";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_7: pmx-gpio-7 {
-					marvell,pins = "mpp7";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_8: pmx-gpio-8 {
-					marvell,pins = "mpp8";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_9: pmx-gpio-9 {
-					marvell,pins = "mpp9";
-					marvell,function = "gpio";
-				};
-
-				pmx_pcie1_clkreq: pmx-pcie1-clkreq {
-					marvell,pins = "mpp9";
-					marvell,function = "pex1";
-				};
-
-				pmx_gpio_10: pmx-gpio-10 {
-					marvell,pins = "mpp10";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_11: pmx-gpio-11 {
-					marvell,pins = "mpp11";
-					marvell,function = "gpio";
-				};
-
-				pmx_pcie0_clkreq: pmx-pcie0-clkreq {
-					marvell,pins = "mpp11";
-					marvell,function = "pex0";
-				};
-
-				pmx_gpio_12: pmx-gpio-12 {
-					marvell,pins = "mpp12";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_13: pmx-gpio-13 {
-					marvell,pins = "mpp13";
-					marvell,function = "gpio";
-				};
-
-				pmx_audio1_extclk: pmx-audio1-extclk {
-					marvell,pins = "mpp13";
-					marvell,function = "audio1";
-				};
-
-				pmx_gpio_14: pmx-gpio-14 {
-					marvell,pins = "mpp14";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_15: pmx-gpio-15 {
-					marvell,pins = "mpp15";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_16: pmx-gpio-16 {
-					marvell,pins = "mpp16";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_17: pmx-gpio-17 {
-					marvell,pins = "mpp17";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_18: pmx-gpio-18 {
-					marvell,pins = "mpp18";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_19: pmx-gpio-19 {
-					marvell,pins = "mpp19";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_20: pmx-gpio-20 {
-					marvell,pins = "mpp20";
-					marvell,function = "gpio";
-				};
-
-				pmx_gpio_21: pmx-gpio-21 {
-					marvell,pins = "mpp21";
-					marvell,function = "gpio";
-				};
-
-				pmx_camera: pmx-camera {
-					marvell,pins = "mpp_camera";
-					marvell,function = "camera";
-				};
-
-				pmx_camera_gpio: pmx-camera-gpio {
-					marvell,pins = "mpp_camera";
-					marvell,function = "gpio";
-				};
-
-				pmx_sdio0: pmx-sdio0 {
-					marvell,pins = "mpp_sdio0";
-					marvell,function = "sdio0";
-				};
-
-				pmx_sdio0_gpio: pmx-sdio0-gpio {
-					marvell,pins = "mpp_sdio0";
-					marvell,function = "gpio";
-				};
-
-				pmx_sdio1: pmx-sdio1 {
-					marvell,pins = "mpp_sdio1";
-					marvell,function = "sdio1";
-				};
-
-				pmx_sdio1_gpio: pmx-sdio1-gpio {
-					marvell,pins = "mpp_sdio1";
-					marvell,function = "gpio";
-				};
-
-				pmx_audio1_gpio: pmx-audio1-gpio {
-					marvell,pins = "mpp_audio1";
-					marvell,function = "gpio";
-				};
-
-				pmx_audio1_i2s1_spdifo: pmx-audio1-i2s1-spdifo {
-					marvell,pins = "mpp_audio1";
-					marvell,function = "i2s1/spdifo";
-				};
-
-				pmx_spi0: pmx-spi0 {
-					marvell,pins = "mpp_spi0";
-					marvell,function = "spi0";
-				};
-
-				pmx_spi0_gpio: pmx-spi0-gpio {
-					marvell,pins = "mpp_spi0";
-					marvell,function = "gpio";
-				};
-
-				pmx_spi1_4_7: pmx-spi1-4-7 {
-					marvell,pins = "mpp4", "mpp5",
-						"mpp6", "mpp7";
-					marvell,function = "spi1";
-				};
-
-				pmx_spi1_20_23: pmx-spi1-20-23 {
-					marvell,pins = "mpp20", "mpp21",
-						"mpp22", "mpp23";
-					marvell,function = "spi1";
-				};
-
-				pmx_uart1: pmx-uart1 {
-					marvell,pins = "mpp_uart1";
-					marvell,function = "uart1";
-				};
-
-				pmx_uart1_gpio: pmx-uart1-gpio {
-					marvell,pins = "mpp_uart1";
-					marvell,function = "gpio";
-				};
-
-				pmx_nand: pmx-nand {
-					marvell,pins = "mpp_nand";
-					marvell,function = "nand";
-				};
-
-				pmx_nand_gpo: pmx-nand-gpo {
-					marvell,pins = "mpp_nand";
-					marvell,function = "gpo";
-				};
-
-				pmx_i2c1: pmx-i2c1 {
-					marvell,pins = "mpp17", "mpp19";
-					marvell,function = "twsi";
-				};
-
-				pmx_i2c2: pmx-i2c2 {
-					marvell,pins = "mpp_audio1";
-					marvell,function = "twsi";
-				};
-
-				pmx_ssp_i2c2: pmx-ssp-i2c2 {
-					marvell,pins = "mpp_audio1";
-					marvell,function = "ssp/twsi";
-				};
-
-				pmx_i2cmux_0: pmx-i2cmux-0 {
-					marvell,pins = "twsi";
-					marvell,function = "twsi-opt1";
-				};
-
-				pmx_i2cmux_1: pmx-i2cmux-1 {
-					marvell,pins = "twsi";
-					marvell,function = "twsi-opt2";
-				};
-
-				pmx_i2cmux_2: pmx-i2cmux-2 {
-					marvell,pins = "twsi";
-					marvell,function = "twsi-opt3";
-				};
-			};
-
-			core_clk: core-clocks@d0214 {
-				compatible = "marvell,dove-core-clock";
-				reg = <0xd0214 0x4>;
-				#clock-cells = <1>;
-			};
-
-			gpio0: gpio-ctrl@d0400 {
-				compatible = "marvell,orion-gpio";
-				#gpio-cells = <2>;
-				gpio-controller;
-				reg = <0xd0400 0x20>;
-				ngpios = <32>;
+			pmu: power-management@d0000 {
+				compatible = "marvell,dove-pmu", "simple-bus";
+				reg = <0xd0000 0x8000>, <0xd8000 0x8000>;
+				ranges = <0x00000000 0x000d0000 0x8000
+					  0x00008000 0x000d8000 0x8000>;
+				interrupts = <33>;
 				interrupt-controller;
-				#interrupt-cells = <2>;
-				interrupts = <12>, <13>, <14>, <60>;
-			};
+				#address-cells = <1>;
+				#size-cells = <1>;
+				#interrupt-cells = <1>;
+				#reset-cells = <1>;
 
-			gpio1: gpio-ctrl@d0420 {
-				compatible = "marvell,orion-gpio";
-				#gpio-cells = <2>;
-				gpio-controller;
-				reg = <0xd0420 0x20>;
-				ngpios = <32>;
-				interrupt-controller;
-				#interrupt-cells = <2>;
-				interrupts = <61>;
-			};
+				domains {
+					vpu_domain: vpu-domain {
+						#power-domain-cells = <0>;
+						marvell,pmu_pwr_mask = <0x00000008>;
+						marvell,pmu_iso_mask = <0x00000001>;
+						resets = <&pmu 16>;
+					};
 
-			rtc: real-time-clock@d8500 {
-				compatible = "marvell,orion-rtc";
-				reg = <0xd8500 0x20>;
+					gpu_domain: gpu-domain {
+						#power-domain-cells = <0>;
+						marvell,pmu_pwr_mask = <0x00000004>;
+						marvell,pmu_iso_mask = <0x00000002>;
+						resets = <&pmu 18>;
+					};
+				};
+
+				thermal: thermal-diode@001c {
+					compatible = "marvell,dove-thermal";
+					reg = <0x001c 0x0c>, <0x005c 0x08>;
+				};
+
+				gate_clk: clock-gating-ctrl@0038 {
+					compatible = "marvell,dove-gating-clock";
+					reg = <0x0038 0x4>;
+					clocks = <&core_clk 0>;
+					#clock-cells = <1>;
+				};
+
+				pinctrl: pin-ctrl@0200 {
+					compatible = "marvell,dove-pinctrl";
+					reg = <0x0200 0x14>,
+					      <0x0440 0x04>;
+					clocks = <&gate_clk 22>;
+
+					pmx_gpio_0: pmx-gpio-0 {
+						marvell,pins = "mpp0";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_1: pmx-gpio-1 {
+						marvell,pins = "mpp1";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_2: pmx-gpio-2 {
+						marvell,pins = "mpp2";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_3: pmx-gpio-3 {
+						marvell,pins = "mpp3";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_4: pmx-gpio-4 {
+						marvell,pins = "mpp4";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_5: pmx-gpio-5 {
+						marvell,pins = "mpp5";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_6: pmx-gpio-6 {
+						marvell,pins = "mpp6";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_7: pmx-gpio-7 {
+						marvell,pins = "mpp7";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_8: pmx-gpio-8 {
+						marvell,pins = "mpp8";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_9: pmx-gpio-9 {
+						marvell,pins = "mpp9";
+						marvell,function = "gpio";
+					};
+
+					pmx_pcie1_clkreq: pmx-pcie1-clkreq {
+						marvell,pins = "mpp9";
+						marvell,function = "pex1";
+					};
+
+					pmx_gpio_10: pmx-gpio-10 {
+						marvell,pins = "mpp10";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_11: pmx-gpio-11 {
+						marvell,pins = "mpp11";
+						marvell,function = "gpio";
+					};
+
+					pmx_pcie0_clkreq: pmx-pcie0-clkreq {
+						marvell,pins = "mpp11";
+						marvell,function = "pex0";
+					};
+
+					pmx_gpio_12: pmx-gpio-12 {
+						marvell,pins = "mpp12";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_13: pmx-gpio-13 {
+						marvell,pins = "mpp13";
+						marvell,function = "gpio";
+					};
+
+					pmx_audio1_extclk: pmx-audio1-extclk {
+						marvell,pins = "mpp13";
+						marvell,function = "audio1";
+					};
+
+					pmx_gpio_14: pmx-gpio-14 {
+						marvell,pins = "mpp14";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_15: pmx-gpio-15 {
+						marvell,pins = "mpp15";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_16: pmx-gpio-16 {
+						marvell,pins = "mpp16";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_17: pmx-gpio-17 {
+						marvell,pins = "mpp17";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_18: pmx-gpio-18 {
+						marvell,pins = "mpp18";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_19: pmx-gpio-19 {
+						marvell,pins = "mpp19";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_20: pmx-gpio-20 {
+						marvell,pins = "mpp20";
+						marvell,function = "gpio";
+					};
+
+					pmx_gpio_21: pmx-gpio-21 {
+						marvell,pins = "mpp21";
+						marvell,function = "gpio";
+					};
+
+					pmx_camera: pmx-camera {
+						marvell,pins = "mpp_camera";
+						marvell,function = "camera";
+					};
+
+					pmx_camera_gpio: pmx-camera-gpio {
+						marvell,pins = "mpp_camera";
+						marvell,function = "gpio";
+					};
+
+					pmx_sdio0: pmx-sdio0 {
+						marvell,pins = "mpp_sdio0";
+						marvell,function = "sdio0";
+					};
+
+					pmx_sdio0_gpio: pmx-sdio0-gpio {
+						marvell,pins = "mpp_sdio0";
+						marvell,function = "gpio";
+					};
+
+					pmx_sdio1: pmx-sdio1 {
+						marvell,pins = "mpp_sdio1";
+						marvell,function = "sdio1";
+					};
+
+					pmx_sdio1_gpio: pmx-sdio1-gpio {
+						marvell,pins = "mpp_sdio1";
+						marvell,function = "gpio";
+					};
+
+					pmx_audio1_gpio: pmx-audio1-gpio {
+						marvell,pins = "mpp_audio1";
+						marvell,function = "gpio";
+					};
+
+					pmx_audio1_i2s1_spdifo: pmx-audio1-i2s1-spdifo {
+						marvell,pins = "mpp_audio1";
+						marvell,function = "i2s1/spdifo";
+					};
+
+					pmx_spi0: pmx-spi0 {
+						marvell,pins = "mpp_spi0";
+						marvell,function = "spi0";
+					};
+
+					pmx_spi0_gpio: pmx-spi0-gpio {
+						marvell,pins = "mpp_spi0";
+						marvell,function = "gpio";
+					};
+
+					pmx_spi1_4_7: pmx-spi1-4-7 {
+						marvell,pins = "mpp4", "mpp5",
+							"mpp6", "mpp7";
+						marvell,function = "spi1";
+					};
+
+					pmx_spi1_20_23: pmx-spi1-20-23 {
+						marvell,pins = "mpp20", "mpp21",
+							"mpp22", "mpp23";
+						marvell,function = "spi1";
+					};
+
+					pmx_uart1: pmx-uart1 {
+						marvell,pins = "mpp_uart1";
+						marvell,function = "uart1";
+					};
+
+					pmx_uart1_gpio: pmx-uart1-gpio {
+						marvell,pins = "mpp_uart1";
+						marvell,function = "gpio";
+					};
+
+					pmx_nand: pmx-nand {
+						marvell,pins = "mpp_nand";
+						marvell,function = "nand";
+					};
+
+					pmx_nand_gpo: pmx-nand-gpo {
+						marvell,pins = "mpp_nand";
+						marvell,function = "gpo";
+					};
+
+					pmx_i2c1: pmx-i2c1 {
+						marvell,pins = "mpp17", "mpp19";
+						marvell,function = "twsi";
+					};
+
+					pmx_i2c2: pmx-i2c2 {
+						marvell,pins = "mpp_audio1";
+						marvell,function = "twsi";
+					};
+
+					pmx_ssp_i2c2: pmx-ssp-i2c2 {
+						marvell,pins = "mpp_audio1";
+						marvell,function = "ssp/twsi";
+					};
+
+					pmx_i2cmux_0: pmx-i2cmux-0 {
+						marvell,pins = "twsi";
+						marvell,function = "twsi-opt1";
+					};
+
+					pmx_i2cmux_1: pmx-i2cmux-1 {
+						marvell,pins = "twsi";
+						marvell,function = "twsi-opt2";
+					};
+
+					pmx_i2cmux_2: pmx-i2cmux-2 {
+						marvell,pins = "twsi";
+						marvell,function = "twsi-opt3";
+					};
+				};
+
+				core_clk: core-clocks@0214 {
+					compatible = "marvell,dove-core-clock";
+					reg = <0x0214 0x4>;
+					#clock-cells = <1>;
+				};
+
+				gpio0: gpio-ctrl@0400 {
+					compatible = "marvell,orion-gpio";
+					#gpio-cells = <2>;
+					gpio-controller;
+					reg = <0x0400 0x20>;
+					ngpios = <32>;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+					interrupt-parent = <&intc>;
+					interrupts = <12>, <13>, <14>, <60>;
+				};
+
+				gpio1: gpio-ctrl@0420 {
+					compatible = "marvell,orion-gpio";
+					#gpio-cells = <2>;
+					gpio-controller;
+					reg = <0x0420 0x20>;
+					ngpios = <32>;
+					interrupt-controller;
+					#interrupt-cells = <2>;
+					interrupt-parent = <&intc>;
+					interrupts = <61>;
+				};
+
+				rtc: real-time-clock@8500 {
+					compatible = "marvell,orion-rtc";
+					reg = <0x8500 0x20>;
+					interrupts = <5>;
+				};
 			};
 
 			gconf: global-config@e802c {
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 096f68b..a6c82e5 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -19,6 +19,15 @@
 		reg = <0x80000000 0x60000000>; /* 1536 MB */
 	};
 
+	evm_3v3_sd: fixedregulator-sd {
+		compatible = "regulator-fixed";
+		regulator-name = "evm_3v3_sd";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		enable-active-high;
+		gpio = <&pcf_gpio_21 5 GPIO_ACTIVE_HIGH>;
+	};
+
 	mmc2_3v3: fixedregulator-mmc2 {
 		compatible = "regulator-fixed";
 		regulator-name = "mmc2_3v3";
@@ -349,6 +358,7 @@
 					regulator-name = "ldo1";
 					regulator-min-microvolt = <1800000>;
 					regulator-max-microvolt = <3300000>;
+					regulator-always-on;
 					regulator-boot-on;
 				};
 
@@ -462,8 +472,14 @@
 
 &mmc1 {
 	status = "okay";
-	vmmc-supply = <&ldo1_reg>;
+	vmmc-supply = <&evm_3v3_sd>;
+	vmmc_aux-supply = <&ldo1_reg>;
 	bus-width = <4>;
+	/*
+	 * SDCD signal is not being used here - using the fact that GPIO mode
+	 * is always hardwired.
+	 */
+	cd-gpios = <&gpio6 27 0>;
 };
 
 &mmc2 {
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 1e29ccf..5d65db9 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -141,7 +141,7 @@
 				dra7_pmx_core: pinmux@1400 {
 					compatible = "ti,dra7-padconf",
 						     "pinctrl-single";
-					reg = <0x1400 0x0464>;
+					reg = <0x1400 0x0468>;
 					#address-cells = <1>;
 					#size-cells = <0>;
 					#interrupt-cells = <1>;
@@ -149,6 +149,11 @@
 					pinctrl-single,register-width = <32>;
 					pinctrl-single,function-mask = <0x3fffffff>;
 				};
+
+				scm_conf1: scm_conf@1c04 {
+					compatible = "syscon";
+					reg = <0x1c04 0x0020>;
+				};
 			};
 
 			cm_core_aon: cm_core_aon@5000 {
@@ -211,7 +216,7 @@
 			#address-cells = <1>;
 			ranges = <0x51000000 0x51000000 0x3000
 				  0x0	     0x20000000 0x10000000>;
-			pcie@51000000 {
+			pcie1: pcie@51000000 {
 				compatible = "ti,dra7-pcie";
 				reg = <0x51000000 0x2000>, <0x51002000 0x14c>, <0x1000 0x2000>;
 				reg-names = "rc_dbics", "ti_conf", "config";
@@ -286,16 +291,6 @@
 				#thermal-sensor-cells = <1>;
 		};
 
-		dra7_ctrl_core: ctrl_core@4a002000 {
-			compatible = "syscon";
-			reg = <0x4a002000 0x6d0>;
-		};
-
-		dra7_ctrl_general: tisyscon@4a002e00 {
-			compatible = "syscon";
-			reg = <0x4a002e00 0x7c>;
-		};
-
 		sdma: dma-controller@4a056000 {
 			compatible = "ti,omap4430-sdma";
 			reg = <0x4a056000 0x1000>;
@@ -308,6 +303,15 @@
 			dma-requests = <127>;
 		};
 
+		sdma_xbar: dma-router@4a002b78 {
+			compatible = "ti,dra7-dma-crossbar";
+			reg = <0x4a002b78 0xfc>;
+			#dma-cells = <1>;
+			dma-requests = <205>;
+			ti,dma-safe-map = <0>;
+			dma-masters = <&sdma>;
+		};
+
 		gpio1: gpio@4ae10000 {
 			compatible = "ti,omap4-gpio";
 			reg = <0x4ae10000 0x200>;
@@ -397,73 +401,73 @@
 		};
 
 		uart1: serial@4806a000 {
-			compatible = "ti,omap4-uart";
+			compatible = "ti,dra742-uart", "ti,omap4-uart";
 			reg = <0x4806a000 0x100>;
 			interrupts-extended = <&crossbar_mpu GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "uart1";
 			clock-frequency = <48000000>;
 			status = "disabled";
-			dmas = <&sdma 49>, <&sdma 50>;
+			dmas = <&sdma_xbar 49>, <&sdma_xbar 50>;
 			dma-names = "tx", "rx";
 		};
 
 		uart2: serial@4806c000 {
-			compatible = "ti,omap4-uart";
+			compatible = "ti,dra742-uart", "ti,omap4-uart";
 			reg = <0x4806c000 0x100>;
 			interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "uart2";
 			clock-frequency = <48000000>;
 			status = "disabled";
-			dmas = <&sdma 51>, <&sdma 52>;
+			dmas = <&sdma_xbar 51>, <&sdma_xbar 52>;
 			dma-names = "tx", "rx";
 		};
 
 		uart3: serial@48020000 {
-			compatible = "ti,omap4-uart";
+			compatible = "ti,dra742-uart", "ti,omap4-uart";
 			reg = <0x48020000 0x100>;
 			interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "uart3";
 			clock-frequency = <48000000>;
 			status = "disabled";
-			dmas = <&sdma 53>, <&sdma 54>;
+			dmas = <&sdma_xbar 53>, <&sdma_xbar 54>;
 			dma-names = "tx", "rx";
 		};
 
 		uart4: serial@4806e000 {
-			compatible = "ti,omap4-uart";
+			compatible = "ti,dra742-uart", "ti,omap4-uart";
 			reg = <0x4806e000 0x100>;
 			interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "uart4";
 			clock-frequency = <48000000>;
                         status = "disabled";
-			dmas = <&sdma 55>, <&sdma 56>;
+			dmas = <&sdma_xbar 55>, <&sdma_xbar 56>;
 			dma-names = "tx", "rx";
 		};
 
 		uart5: serial@48066000 {
-			compatible = "ti,omap4-uart";
+			compatible = "ti,dra742-uart", "ti,omap4-uart";
 			reg = <0x48066000 0x100>;
 			interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "uart5";
 			clock-frequency = <48000000>;
 			status = "disabled";
-			dmas = <&sdma 63>, <&sdma 64>;
+			dmas = <&sdma_xbar 63>, <&sdma_xbar 64>;
 			dma-names = "tx", "rx";
 		};
 
 		uart6: serial@48068000 {
-			compatible = "ti,omap4-uart";
+			compatible = "ti,dra742-uart", "ti,omap4-uart";
 			reg = <0x48068000 0x100>;
 			interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "uart6";
 			clock-frequency = <48000000>;
 			status = "disabled";
-			dmas = <&sdma 79>, <&sdma 80>;
+			dmas = <&sdma_xbar 79>, <&sdma_xbar 80>;
 			dma-names = "tx", "rx";
 		};
 
 		uart7: serial@48420000 {
-			compatible = "ti,omap4-uart";
+			compatible = "ti,dra742-uart", "ti,omap4-uart";
 			reg = <0x48420000 0x100>;
 			interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "uart7";
@@ -472,7 +476,7 @@
 		};
 
 		uart8: serial@48422000 {
-			compatible = "ti,omap4-uart";
+			compatible = "ti,dra742-uart", "ti,omap4-uart";
 			reg = <0x48422000 0x100>;
 			interrupts = <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "uart8";
@@ -481,7 +485,7 @@
 		};
 
 		uart9: serial@48424000 {
-			compatible = "ti,omap4-uart";
+			compatible = "ti,dra742-uart", "ti,omap4-uart";
 			reg = <0x48424000 0x100>;
 			interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "uart9";
@@ -490,7 +494,7 @@
 		};
 
 		uart10: serial@4ae2b000 {
-			compatible = "ti,omap4-uart";
+			compatible = "ti,dra742-uart", "ti,omap4-uart";
 			reg = <0x4ae2b000 0x100>;
 			interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "uart10";
@@ -867,7 +871,7 @@
 			ti,hwmods = "mmc1";
 			ti,dual-volt;
 			ti,needs-special-reset;
-			dmas = <&sdma 61>, <&sdma 62>;
+			dmas = <&sdma_xbar 61>, <&sdma_xbar 62>;
 			dma-names = "tx", "rx";
 			status = "disabled";
 			pbias-supply = <&pbias_mmc_reg>;
@@ -879,7 +883,7 @@
 			interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "mmc2";
 			ti,needs-special-reset;
-			dmas = <&sdma 47>, <&sdma 48>;
+			dmas = <&sdma_xbar 47>, <&sdma_xbar 48>;
 			dma-names = "tx", "rx";
 			status = "disabled";
 		};
@@ -890,7 +894,7 @@
 			interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "mmc3";
 			ti,needs-special-reset;
-			dmas = <&sdma 77>, <&sdma 78>;
+			dmas = <&sdma_xbar 77>, <&sdma_xbar 78>;
 			dma-names = "tx", "rx";
 			status = "disabled";
 		};
@@ -901,7 +905,7 @@
 			interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
 			ti,hwmods = "mmc4";
 			ti,needs-special-reset;
-			dmas = <&sdma 57>, <&sdma 58>;
+			dmas = <&sdma_xbar 57>, <&sdma_xbar 58>;
 			dma-names = "tx", "rx";
 			status = "disabled";
 		};
@@ -1046,14 +1050,14 @@
 			#size-cells = <0>;
 			ti,hwmods = "mcspi1";
 			ti,spi-num-cs = <4>;
-			dmas = <&sdma 35>,
-			       <&sdma 36>,
-			       <&sdma 37>,
-			       <&sdma 38>,
-			       <&sdma 39>,
-			       <&sdma 40>,
-			       <&sdma 41>,
-			       <&sdma 42>;
+			dmas = <&sdma_xbar 35>,
+			       <&sdma_xbar 36>,
+			       <&sdma_xbar 37>,
+			       <&sdma_xbar 38>,
+			       <&sdma_xbar 39>,
+			       <&sdma_xbar 40>,
+			       <&sdma_xbar 41>,
+			       <&sdma_xbar 42>;
 			dma-names = "tx0", "rx0", "tx1", "rx1",
 				    "tx2", "rx2", "tx3", "rx3";
 			status = "disabled";
@@ -1067,10 +1071,10 @@
 			#size-cells = <0>;
 			ti,hwmods = "mcspi2";
 			ti,spi-num-cs = <2>;
-			dmas = <&sdma 43>,
-			       <&sdma 44>,
-			       <&sdma 45>,
-			       <&sdma 46>;
+			dmas = <&sdma_xbar 43>,
+			       <&sdma_xbar 44>,
+			       <&sdma_xbar 45>,
+			       <&sdma_xbar 46>;
 			dma-names = "tx0", "rx0", "tx1", "rx1";
 			status = "disabled";
 		};
@@ -1083,7 +1087,7 @@
 			#size-cells = <0>;
 			ti,hwmods = "mcspi3";
 			ti,spi-num-cs = <2>;
-			dmas = <&sdma 15>, <&sdma 16>;
+			dmas = <&sdma_xbar 15>, <&sdma_xbar 16>;
 			dma-names = "tx0", "rx0";
 			status = "disabled";
 		};
@@ -1096,7 +1100,7 @@
 			#size-cells = <0>;
 			ti,hwmods = "mcspi4";
 			ti,spi-num-cs = <1>;
-			dmas = <&sdma 70>, <&sdma 71>;
+			dmas = <&sdma_xbar 70>, <&sdma_xbar 71>;
 			dma-names = "tx0", "rx0";
 			status = "disabled";
 		};
@@ -1296,7 +1300,12 @@
 			usb1: usb@48890000 {
 				compatible = "snps,dwc3";
 				reg = <0x48890000 0x17000>;
-				interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "peripheral",
+						  "host",
+						  "otg";
 				phys = <&usb2_phy1>, <&usb3_phy1>;
 				phy-names = "usb2-phy", "usb3-phy";
 				tx-fifo-resize;
@@ -1319,7 +1328,12 @@
 			usb2: usb@488d0000 {
 				compatible = "snps,dwc3";
 				reg = <0x488d0000 0x17000>;
-				interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "peripheral",
+						  "host",
+						  "otg";
 				phys = <&usb2_phy2>;
 				phy-names = "usb2-phy";
 				tx-fifo-resize;
@@ -1344,7 +1358,12 @@
 			usb3: usb@48910000 {
 				compatible = "snps,dwc3";
 				reg = <0x48910000 0x17000>;
-				interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "peripheral",
+						  "host",
+						  "otg";
 				tx-fifo-resize;
 				maximum-speed = "high-speed";
 				dr_mode = "otg";
@@ -1399,7 +1418,7 @@
 		};
 
 		mac: ethernet@4a100000 {
-			compatible = "ti,cpsw";
+			compatible = "ti,dra7-cpsw","ti,cpsw";
 			ti,hwmods = "gmac";
 			clocks = <&dpll_gmac_ck>, <&gmac_gmii_ref_clk_div>;
 			clock-names = "fck", "cpts";
diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
index 8037384..6f6bd98 100644
--- a/arch/arm/boot/dts/dra72-evm.dts
+++ b/arch/arm/boot/dts/dra72-evm.dts
@@ -30,6 +30,15 @@
 		regulator-max-microvolt = <3300000>;
 	};
 
+	evm_3v3_sd: fixedregulator-sd {
+		compatible = "regulator-fixed";
+		regulator-name = "evm_3v3_sd";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		enable-active-high;
+		gpio = <&pcf_gpio_21 5 GPIO_ACTIVE_HIGH>;
+	};
+
 	extcon_usb1: extcon_usb1 {
 		compatible = "linux,extcon-usb-gpio";
 		id-gpio = <&pcf_gpio_21 1 GPIO_ACTIVE_HIGH>;
@@ -286,6 +295,7 @@
 					regulator-name = "ldo1";
 					regulator-min-microvolt = <1800000>;
 					regulator-max-microvolt = <3300000>;
+					regulator-always-on;
 					regulator-boot-on;
 				};
 
@@ -343,6 +353,12 @@
 		interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
 		interrupt-controller;
 		#interrupt-cells = <2>;
+
+		cpsw_sel_s0 {
+			gpio-hog;
+			gpios = <4 GPIO_ACTIVE_HIGH>;
+			output-low;
+		};
 	};
 };
 
@@ -491,14 +507,15 @@
 	status = "okay";
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc1_pins_default>;
-
-	vmmc-supply = <&ldo1_reg>;
+	vmmc-supply = <&evm_3v3_sd>;
+	vmmc_aux-supply = <&ldo1_reg>;
 	bus-width = <4>;
 	/*
 	 * SDCD signal is not being used here - using the fact that GPIO mode
 	 * is a viable alternative
 	 */
 	cd-gpios = <&gpio6 27 0>;
+	max-frequency = <192000000>;
 };
 
 &mmc2 {
@@ -510,6 +527,7 @@
 	vmmc-supply = <&evm_3v3>;
 	bus-width = <8>;
 	ti,non-removable;
+	max-frequency = <192000000>;
 };
 
 &dra7_pmx_core {
@@ -571,9 +589,10 @@
 	pinctrl-names = "default", "sleep";
 	pinctrl-0 = <&cpsw_default>;
 	pinctrl-1 = <&cpsw_sleep>;
+	slaves = <1>;
 };
 
-&cpsw_emac1 {
+&cpsw_emac0 {
 	phy_id = <&davinci_mdio>, <3>;
 	phy-mode = "rgmii";
 };
@@ -582,7 +601,6 @@
 	pinctrl-names = "default", "sleep";
 	pinctrl-0 = <&davinci_mdio_default>;
 	pinctrl-1 = <&davinci_mdio_sleep>;
-	active_slave = <1>;
 };
 
 &dcan1 {
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi
index fa995d0..feea98e 100644
--- a/arch/arm/boot/dts/dra74x.dtsi
+++ b/arch/arm/boot/dts/dra74x.dtsi
@@ -65,7 +65,12 @@
 			usb4: usb@48950000 {
 				compatible = "snps,dwc3";
 				reg = <0x48950000 0x17000>;
-				interrupts = <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "peripheral",
+						  "host",
+						  "otg";
 				tx-fifo-resize;
 				maximum-speed = "high-speed";
 				dr_mode = "otg";
diff --git a/arch/arm/boot/dts/emev2-kzm9d.dts b/arch/arm/boot/dts/emev2-kzm9d.dts
index 1dee0aa..955c24e 100644
--- a/arch/arm/boot/dts/emev2-kzm9d.dts
+++ b/arch/arm/boot/dts/emev2-kzm9d.dts
@@ -95,6 +95,14 @@
 	};
 };
 
+&iic0 {
+	status = "okay";
+};
+
+&iic1 {
+	status = "okay";
+};
+
 &pfc {
 	uart1_pins: serial@e1030000 {
 		renesas,groups = "uart1_ctrl", "uart1_data";
diff --git a/arch/arm/boot/dts/emev2.dtsi b/arch/arm/boot/dts/emev2.dtsi
index bb45694..edad0c4 100644
--- a/arch/arm/boot/dts/emev2.dtsi
+++ b/arch/arm/boot/dts/emev2.dtsi
@@ -21,6 +21,8 @@
 		gpio2 = &gpio2;
 		gpio3 = &gpio3;
 		gpio4 = &gpio4;
+		i2c0 = &iic0;
+		i2c1 = &iic1;
 	};
 
 	cpus {
@@ -66,6 +68,30 @@
 			clock-frequency = <32768>;
 			#clock-cells = <0>;
 		};
+		iic0_sclkdiv: iic0_sclkdiv {
+			compatible = "renesas,emev2-smu-clkdiv";
+			reg = <0x624 0>;
+			clocks = <&pll3_fo>;
+			#clock-cells = <0>;
+		};
+		iic0_sclk: iic0_sclk {
+			compatible = "renesas,emev2-smu-gclk";
+			reg = <0x48c 1>;
+			clocks = <&iic0_sclkdiv>;
+			#clock-cells = <0>;
+		};
+		iic1_sclkdiv: iic1_sclkdiv {
+			compatible = "renesas,emev2-smu-clkdiv";
+			reg = <0x624 16>;
+			clocks = <&pll3_fo>;
+			#clock-cells = <0>;
+		};
+		iic1_sclk: iic1_sclk {
+			compatible = "renesas,emev2-smu-gclk";
+			reg = <0x490 1>;
+			clocks = <&iic1_sclkdiv>;
+			#clock-cells = <0>;
+		};
 		pll3_fo: pll3_fo {
 			compatible = "fixed-factor-clock";
 			clocks = <&c32ki>;
@@ -234,4 +260,26 @@
 		interrupt-controller;
 		#interrupt-cells = <2>;
 	};
+
+	iic0: i2c@e0070000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "renesas,iic-emev2";
+		reg = <0xe0070000 0x28>;
+		interrupts = <0 32 IRQ_TYPE_EDGE_RISING>;
+		clocks = <&iic0_sclk>;
+		clock-names = "sclk";
+		status = "disabled";
+	};
+
+	iic1: i2c@e10a0000 {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		compatible = "renesas,iic-emev2";
+		reg = <0xe10a0000 0x28>;
+		interrupts = <0 33 IRQ_TYPE_EDGE_RISING>;
+		clocks = <&iic1_sclk>;
+		clock-names = "sclk";
+		status = "disabled";
+	};
 };
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index 031853b..baa9b2f 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -182,7 +182,7 @@
 
 		display-timings {
 			timing-0 {
-				clock-frequency = <0>;
+				clock-frequency = <4600000>;
 				hactive = <320>;
 				vactive = <320>;
 				hfront-porch = <1>;
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index 775892b..eb37952 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -69,66 +69,6 @@
 		enable-active-high;
 	};
 
-	hsotg@12480000 {
-		vusb_d-supply = <&ldo3_reg>;
-		vusb_a-supply = <&ldo8_reg>;
-		dr_mode = "peripheral";
-		status = "okay";
-	};
-
-	sdhci_emmc: sdhci@12510000 {
-		bus-width = <8>;
-		non-removable;
-		pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus8>;
-		pinctrl-names = "default";
-		vmmc-supply = <&vemmc_reg>;
-		status = "okay";
-	};
-
-	sdhci_sd: sdhci@12530000 {
-		bus-width = <4>;
-		pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_bus4>;
-		pinctrl-names = "default";
-		vmmc-supply = <&ldo5_reg>;
-		cd-gpios = <&gpx3 4 0>;
-		cd-inverted;
-		status = "okay";
-	};
-
-	ehci@12580000 {
-		status = "okay";
-		port@0 {
-			status = "okay";
-		};
-	};
-
-	ohci@12590000 {
-		status = "okay";
-		port@0 {
-			status = "okay";
-		};
-	};
-
-	exynos-usbphy@125B0000 {
-		status = "okay";
-	};
-
-	serial@13800000 {
-		status = "okay";
-	};
-
-	serial@13810000 {
-		status = "okay";
-	};
-
-	serial@13820000 {
-		status = "okay";
-	};
-
-	serial@13830000 {
-		status = "okay";
-	};
-
 	gpio-keys {
 		compatible = "gpio-keys";
 
@@ -186,218 +126,6 @@
 		enable-active-high;
 	};
 
-	i2c@13890000 {
-		samsung,i2c-sda-delay = <100>;
-		samsung,i2c-slave-addr = <0x10>;
-		samsung,i2c-max-bus-freq = <100000>;
-		pinctrl-0 = <&i2c3_bus>;
-		pinctrl-names = "default";
-		status = "okay";
-
-		tsp@4a {
-			/* TBD: Atmel maXtouch touchscreen */
-			reg = <0x4a>;
-		};
-	};
-
-	i2c@138B0000 {
-		samsung,i2c-sda-delay = <100>;
-		samsung,i2c-slave-addr = <0x10>;
-		samsung,i2c-max-bus-freq = <100000>;
-		pinctrl-0 = <&i2c5_bus>;
-		pinctrl-names = "default";
-		status = "okay";
-
-		vdd_arm_reg: pmic@60 {
-			compatible = "maxim,max8952";
-			reg = <0x60>;
-
-			max8952,vid-gpios = <&gpx0 3 0>, <&gpx0 4 0>;
-			max8952,default-mode = <0>;
-			max8952,dvs-mode-microvolt = <1250000>, <1200000>,
-							<1050000>, <950000>;
-			max8952,sync-freq = <0>;
-			max8952,ramp-speed = <0>;
-
-			regulator-name = "vdd_arm";
-			regulator-min-microvolt = <770000>;
-			regulator-max-microvolt = <1400000>;
-			regulator-always-on;
-			regulator-boot-on;
-		};
-
-		pmic@66 {
-			compatible = "national,lp3974";
-			reg = <0x66>;
-
-			max8998,pmic-buck1-default-dvs-idx = <0>;
-			max8998,pmic-buck1-dvs-gpios = <&gpx0 5 0>,
-							<&gpx0 6 0>;
-			max8998,pmic-buck1-dvs-voltage = <1100000>, <1000000>,
-							<1100000>, <1000000>;
-
-			max8998,pmic-buck2-default-dvs-idx = <0>;
-			max8998,pmic-buck2-dvs-gpio = <&gpe2 0 0>;
-			max8998,pmic-buck2-dvs-voltage = <1200000>, <1100000>;
-
-			regulators {
-				ldo2_reg: LDO2 {
-					regulator-name = "VALIVE_1.2V";
-					regulator-min-microvolt = <1200000>;
-					regulator-max-microvolt = <1200000>;
-					regulator-always-on;
-				};
-
-				ldo3_reg: LDO3 {
-					regulator-name = "VUSB+MIPI_1.1V";
-					regulator-min-microvolt = <1100000>;
-					regulator-max-microvolt = <1100000>;
-					regulator-always-on;
-				};
-
-				ldo4_reg: LDO4 {
-					regulator-name = "VADC_3.3V";
-					regulator-min-microvolt = <3300000>;
-					regulator-max-microvolt = <3300000>;
-				};
-
-				ldo5_reg: LDO5 {
-					regulator-name = "VTF_2.8V";
-					regulator-min-microvolt = <2800000>;
-					regulator-max-microvolt = <2800000>;
-				};
-
-				ldo6_reg: LDO6 {
-					regulator-name = "LDO6";
-					regulator-min-microvolt = <2000000>;
-					regulator-max-microvolt = <2000000>;
-				};
-
-				ldo7_reg: LDO7 {
-					regulator-name = "VLCD+VMIPI_1.8V";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-				};
-
-				ldo8_reg: LDO8 {
-					regulator-name = "VUSB+VDAC_3.3V";
-					regulator-min-microvolt = <3300000>;
-					regulator-max-microvolt = <3300000>;
-					regulator-always-on;
-				};
-
-				ldo9_reg: LDO9 {
-					regulator-name = "VCC_2.8V";
-					regulator-min-microvolt = <2800000>;
-					regulator-max-microvolt = <2800000>;
-					regulator-always-on;
-				};
-
-				ldo10_reg: LDO10 {
-					regulator-name = "VPLL_1.1V";
-					regulator-min-microvolt = <1100000>;
-					regulator-max-microvolt = <1100000>;
-					regulator-boot-on;
-					regulator-always-on;
-				};
-
-				ldo11_reg: LDO11 {
-					regulator-name = "CAM_AF_3.3V";
-					regulator-min-microvolt = <3300000>;
-					regulator-max-microvolt = <3300000>;
-				};
-
-				ldo12_reg: LDO12 {
-					regulator-name = "PS_2.8V";
-					regulator-min-microvolt = <2800000>;
-					regulator-max-microvolt = <2800000>;
-				};
-
-				ldo13_reg: LDO13 {
-					regulator-name = "VHIC_1.2V";
-					regulator-min-microvolt = <1200000>;
-					regulator-max-microvolt = <1200000>;
-				};
-
-				ldo14_reg: LDO14 {
-					regulator-name = "CAM_I_HOST_1.8V";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-				};
-
-				ldo15_reg: LDO15 {
-					regulator-name = "CAM_S_DIG+FM33_CORE_1.2V";
-					regulator-min-microvolt = <1200000>;
-					regulator-max-microvolt = <1200000>;
-				};
-
-				ldo16_reg: LDO16 {
-					regulator-name = "CAM_S_ANA_2.8V";
-					regulator-min-microvolt = <2800000>;
-					regulator-max-microvolt = <2800000>;
-				};
-
-				ldo17_reg: LDO17 {
-					regulator-name = "VCC_3.0V_LCD";
-					regulator-min-microvolt = <3000000>;
-					regulator-max-microvolt = <3000000>;
-				};
-
-				buck1_reg: BUCK1 {
-					regulator-name = "VINT_1.1V";
-					regulator-min-microvolt = <750000>;
-					regulator-max-microvolt = <1500000>;
-					regulator-boot-on;
-					regulator-always-on;
-				};
-
-				buck2_reg: BUCK2 {
-					regulator-name = "VG3D_1.1V";
-					regulator-min-microvolt = <750000>;
-					regulator-max-microvolt = <1500000>;
-					regulator-boot-on;
-				};
-
-				buck3_reg: BUCK3 {
-					regulator-name = "VCC_1.8V";
-					regulator-min-microvolt = <1800000>;
-					regulator-max-microvolt = <1800000>;
-					regulator-always-on;
-				};
-
-				buck4_reg: BUCK4 {
-					regulator-name = "VMEM_1.2V";
-					regulator-min-microvolt = <1200000>;
-					regulator-max-microvolt = <1200000>;
-					regulator-always-on;
-				};
-
-				ap32khz_reg: EN32KHz-AP {
-					regulator-name = "32KHz AP";
-					regulator-always-on;
-				};
-
-				cp32khz_reg: EN32KHz-CP {
-					regulator-name = "32KHz CP";
-				};
-
-				vichg_reg: ENVICHG {
-					regulator-name = "VICHG";
-				};
-
-				safeout1_reg: ESAFEOUT1 {
-					regulator-name = "SAFEOUT1";
-					regulator-always-on;
-				};
-
-				safeout2_reg: ESAFEOUT2 {
-					regulator-name = "SAFEOUT2";
-					regulator-boot-on;
-				};
-			};
-		};
-	};
-
 	spi-lcd {
 		compatible = "spi-gpio";
 		#address-cells = <1>;
@@ -446,27 +174,6 @@
 		};
 	};
 
-	fimd: fimd@11c00000 {
-		pinctrl-0 = <&lcd_clk>, <&lcd_data24>;
-		pinctrl-names = "default";
-		status = "okay";
-		samsung,invert-vden;
-		samsung,invert-vclk;
-		#address-cells = <1>;
-		#size-cells = <0>;
-		port@3 {
-			reg = <3>;
-			fimd_dpi_ep: endpoint {
-				remote-endpoint = <&lcd_ep>;
-			};
-		};
-	};
-
-	pwm@139D0000 {
-		compatible = "samsung,s5p6440-pwm";
-		status = "okay";
-	};
-
 	camera {
 		status = "okay";
 
@@ -526,32 +233,289 @@
 		pinctrl-names = "default";
 		status = "okay";
 	};
-
-	mixer@12C10000 {
-		status = "okay";
-	};
-
-	hdmi@12D00000 {
-		hpd-gpio = <&gpx3 7 0>;
-		pinctrl-names = "default";
-		pinctrl-0 = <&hdmi_hpd>;
-		hdmi-en-supply = <&hdmi_en>;
-		vdd-supply = <&ldo3_reg>;
-		vdd_osc-supply = <&ldo4_reg>;
-		vdd_pll-supply = <&ldo3_reg>;
-		ddc = <&hdmi_ddc>;
-		status = "okay";
-	};
-
-	i2c@138E0000 {
-		status = "okay";
-	};
 };
 
 &cpu0 {
 	cpu0-supply = <&vdd_arm_reg>;
 };
 
+&ehci {
+	status = "okay";
+	port@0 {
+		status = "okay";
+	};
+};
+
+&exynos_usbphy {
+	status = "okay";
+};
+
+&fimd {
+	pinctrl-0 = <&lcd_clk>, <&lcd_data24>;
+	pinctrl-names = "default";
+	status = "okay";
+	samsung,invert-vden;
+	samsung,invert-vclk;
+	#address-cells = <1>;
+	#size-cells = <0>;
+	port@3 {
+		reg = <3>;
+		fimd_dpi_ep: endpoint {
+			remote-endpoint = <&lcd_ep>;
+		};
+	};
+};
+
+&hdmi {
+	hpd-gpio = <&gpx3 7 0>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&hdmi_hpd>;
+	hdmi-en-supply = <&hdmi_en>;
+	vdd-supply = <&ldo3_reg>;
+	vdd_osc-supply = <&ldo4_reg>;
+	vdd_pll-supply = <&ldo3_reg>;
+	ddc = <&hdmi_ddc>;
+	status = "okay";
+};
+
+&hsotg {
+	vusb_d-supply = <&ldo3_reg>;
+	vusb_a-supply = <&ldo8_reg>;
+	dr_mode = "peripheral";
+	status = "okay";
+};
+
+&i2c_3 {
+	samsung,i2c-sda-delay = <100>;
+	samsung,i2c-slave-addr = <0x10>;
+	samsung,i2c-max-bus-freq = <100000>;
+	pinctrl-0 = <&i2c3_bus>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	tsp@4a {
+		/* TBD: Atmel maXtouch touchscreen */
+		reg = <0x4a>;
+	};
+};
+
+&i2c_5 {
+	samsung,i2c-sda-delay = <100>;
+	samsung,i2c-slave-addr = <0x10>;
+	samsung,i2c-max-bus-freq = <100000>;
+	pinctrl-0 = <&i2c5_bus>;
+	pinctrl-names = "default";
+	status = "okay";
+
+	vdd_arm_reg: pmic@60 {
+		compatible = "maxim,max8952";
+		reg = <0x60>;
+
+		max8952,vid-gpios = <&gpx0 3 0>, <&gpx0 4 0>;
+		max8952,default-mode = <0>;
+		max8952,dvs-mode-microvolt = <1250000>, <1200000>,
+						<1050000>, <950000>;
+		max8952,sync-freq = <0>;
+		max8952,ramp-speed = <0>;
+
+		regulator-name = "vdd_arm";
+		regulator-min-microvolt = <770000>;
+		regulator-max-microvolt = <1400000>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	pmic@66 {
+		compatible = "national,lp3974";
+		reg = <0x66>;
+
+		max8998,pmic-buck1-default-dvs-idx = <0>;
+		max8998,pmic-buck1-dvs-gpios = <&gpx0 5 0>,
+						<&gpx0 6 0>;
+		max8998,pmic-buck1-dvs-voltage = <1100000>, <1000000>,
+						<1100000>, <1000000>;
+
+		max8998,pmic-buck2-default-dvs-idx = <0>;
+		max8998,pmic-buck2-dvs-gpio = <&gpe2 0 0>;
+		max8998,pmic-buck2-dvs-voltage = <1200000>, <1100000>;
+
+		regulators {
+			ldo2_reg: LDO2 {
+				regulator-name = "VALIVE_1.2V";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-always-on;
+			};
+
+			ldo3_reg: LDO3 {
+				regulator-name = "VUSB+MIPI_1.1V";
+				regulator-min-microvolt = <1100000>;
+				regulator-max-microvolt = <1100000>;
+				regulator-always-on;
+			};
+
+			ldo4_reg: LDO4 {
+				regulator-name = "VADC_3.3V";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			ldo5_reg: LDO5 {
+				regulator-name = "VTF_2.8V";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+			};
+
+			ldo6_reg: LDO6 {
+				regulator-name = "LDO6";
+				regulator-min-microvolt = <2000000>;
+				regulator-max-microvolt = <2000000>;
+			};
+
+			ldo7_reg: LDO7 {
+				regulator-name = "VLCD+VMIPI_1.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			ldo8_reg: LDO8 {
+				regulator-name = "VUSB+VDAC_3.3V";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			ldo9_reg: LDO9 {
+				regulator-name = "VCC_2.8V";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-always-on;
+			};
+
+			ldo10_reg: LDO10 {
+				regulator-name = "VPLL_1.1V";
+				regulator-min-microvolt = <1100000>;
+				regulator-max-microvolt = <1100000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			ldo11_reg: LDO11 {
+				regulator-name = "CAM_AF_3.3V";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+			};
+
+			ldo12_reg: LDO12 {
+				regulator-name = "PS_2.8V";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+			};
+
+			ldo13_reg: LDO13 {
+				regulator-name = "VHIC_1.2V";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+			};
+
+			ldo14_reg: LDO14 {
+				regulator-name = "CAM_I_HOST_1.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+			};
+
+			ldo15_reg: LDO15 {
+				regulator-name = "CAM_S_DIG+FM33_CORE_1.2V";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+			};
+
+			ldo16_reg: LDO16 {
+				regulator-name = "CAM_S_ANA_2.8V";
+				regulator-min-microvolt = <2800000>;
+				regulator-max-microvolt = <2800000>;
+			};
+
+			ldo17_reg: LDO17 {
+				regulator-name = "VCC_3.0V_LCD";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3000000>;
+			};
+
+			buck1_reg: BUCK1 {
+				regulator-name = "VINT_1.1V";
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-boot-on;
+				regulator-always-on;
+			};
+
+			buck2_reg: BUCK2 {
+				regulator-name = "VG3D_1.1V";
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt = <1500000>;
+				regulator-boot-on;
+			};
+
+			buck3_reg: BUCK3 {
+				regulator-name = "VCC_1.8V";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+			};
+
+			buck4_reg: BUCK4 {
+				regulator-name = "VMEM_1.2V";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-always-on;
+			};
+
+			ap32khz_reg: EN32KHz-AP {
+				regulator-name = "32KHz AP";
+				regulator-always-on;
+			};
+
+			cp32khz_reg: EN32KHz-CP {
+				regulator-name = "32KHz CP";
+			};
+
+			vichg_reg: ENVICHG {
+				regulator-name = "VICHG";
+			};
+
+			safeout1_reg: ESAFEOUT1 {
+				regulator-name = "SAFEOUT1";
+				regulator-always-on;
+			};
+
+			safeout2_reg: ESAFEOUT2 {
+				regulator-name = "SAFEOUT2";
+				regulator-boot-on;
+			};
+		};
+	};
+};
+
+&i2c_8 {
+	status = "okay";
+};
+
+&mdma1 {
+	reg = <0x12840000 0x1000>;
+};
+
+&mixer {
+	status = "okay";
+};
+
+&ohci {
+	status = "okay";
+	port@0 {
+		status = "okay";
+	};
+};
+
 &pinctrl_1 {
 	hdmi_hpd: hdmi-hpd {
 		samsung,pins = "gpx3-7";
@@ -568,6 +532,42 @@
 	};
 };
 
-&mdma1 {
-	reg = <0x12840000 0x1000>;
+&pwm {
+	compatible = "samsung,s5p6440-pwm";
+	status = "okay";
+};
+
+&sdhci_0 {
+	bus-width = <8>;
+	non-removable;
+	pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus8>;
+	pinctrl-names = "default";
+	vmmc-supply = <&vemmc_reg>;
+	status = "okay";
+};
+
+&sdhci_2 {
+	bus-width = <4>;
+	pinctrl-0 = <&sd2_clk &sd2_cmd &sd2_bus4>;
+	pinctrl-names = "default";
+	vmmc-supply = <&ldo5_reg>;
+	cd-gpios = <&gpx3 4 0>;
+	cd-inverted;
+	status = "okay";
+};
+
+&serial_0 {
+	status = "okay";
+};
+
+&serial_1 {
+	status = "okay";
+};
+
+&serial_2 {
+	status = "okay";
+};
+
+&serial_3 {
+	status = "okay";
 };
diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts
index afc199d..8848400 100644
--- a/arch/arm/boot/dts/exynos4412-trats2.dts
+++ b/arch/arm/boot/dts/exynos4412-trats2.dts
@@ -190,6 +190,9 @@
 			interrupt-parent = <&gpx2>;
 			interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
 			reg = <0x36>;
+
+			maxim,over-heat-temp = <700>;
+			maxim,over-volt = <4500>;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
index 886cfca..880917e 100644
--- a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
@@ -12,807 +12,805 @@
  * published by the Free Software Foundation.
 */
 
-/ {
-	pinctrl@11400000 {
-		gpa0: gpa0 {
-			gpio-controller;
-			#gpio-cells = <2>;
+&pinctrl_0 {
+	gpa0: gpa0 {
+		gpio-controller;
+		#gpio-cells = <2>;
 
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpa1: gpa1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpa2: gpa2 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpb0: gpb0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpb1: gpb1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpb2: gpb2 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpb3: gpb3 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpc0: gpc0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpc1: gpc1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpc2: gpc2 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpc3: gpc3 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpd0: gpd0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpd1: gpd1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpy0: gpy0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpy1: gpy1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpy2: gpy2 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpy3: gpy3 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpy4: gpy4 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpy5: gpy5 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpy6: gpy6 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpc4: gpc4 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpx0: gpx0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			interrupt-parent = <&combiner>;
-			#interrupt-cells = <2>;
-			interrupts = <23 0>, <24 0>, <25 0>, <25 1>,
-				     <26 0>, <26 1>, <27 0>, <27 1>;
-		};
-
-		gpx1: gpx1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			interrupt-parent = <&combiner>;
-			#interrupt-cells = <2>;
-			interrupts = <28 0>, <28 1>, <29 0>, <29 1>,
-				     <30 0>, <30 1>, <31 0>, <31 1>;
-		};
-
-		gpx2: gpx2 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpx3: gpx3 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		uart0_data: uart0-data {
-			samsung,pins = "gpa0-0", "gpa0-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		uart0_fctl: uart0-fctl {
-			samsung,pins = "gpa0-2", "gpa0-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c2_bus: i2c2-bus {
-			samsung,pins = "gpa0-6", "gpa0-7";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c2_hs_bus: i2c2-hs-bus {
-			samsung,pins = "gpa0-6", "gpa0-7";
-			samsung,pin-function = <4>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		uart2_data: uart2-data {
-			samsung,pins = "gpa1-0", "gpa1-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		uart2_fctl: uart2-fctl {
-			samsung,pins = "gpa1-2", "gpa1-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c3_bus: i2c3-bus {
-			samsung,pins = "gpa1-2", "gpa1-3";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c3_hs_bus: i2c3-hs-bus {
-			samsung,pins = "gpa1-2", "gpa1-3";
-			samsung,pin-function = <4>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		uart3_data: uart3-data {
-			samsung,pins = "gpa1-4", "gpa1-4";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		spi0_bus: spi0-bus {
-			samsung,pins = "gpa2-0", "gpa2-2", "gpa2-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c4_bus: i2c4-bus {
-			samsung,pins = "gpa2-0", "gpa2-1";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c5_bus: i2c5-bus {
-			samsung,pins = "gpa2-2", "gpa2-3";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		spi1_bus: spi1-bus {
-			samsung,pins = "gpa2-4", "gpa2-6", "gpa2-7";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2s1_bus: i2s1-bus {
-			samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
-					"gpb0-4";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		pcm1_bus: pcm1-bus {
-			samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
-					"gpb0-4";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		ac97_bus: ac97-bus {
-			samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
-					"gpb0-4";
-			samsung,pin-function = <4>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2s2_bus: i2s2-bus {
-			samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3",
-					"gpb1-4";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		pcm2_bus: pcm2-bus {
-			samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3",
-					"gpb1-4";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		spdif_bus: spdif-bus {
-			samsung,pins = "gpb1-0", "gpb1-1";
-			samsung,pin-function = <4>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		spi2_bus: spi2-bus {
-			samsung,pins = "gpb1-1", "gpb1-3", "gpb1-4";
-			samsung,pin-function = <5>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c6_bus: i2c6-bus {
-			samsung,pins = "gpb1-3", "gpb1-4";
-			samsung,pin-function = <4>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		pwm0_out: pwm0-out {
-			samsung,pins = "gpb2-0";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		pwm1_out: pwm1-out {
-			samsung,pins = "gpb2-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		pwm2_out: pwm2-out {
-			samsung,pins = "gpb2-2";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		pwm3_out: pwm3-out {
-			samsung,pins = "gpb2-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c7_bus: i2c7-bus {
-			samsung,pins = "gpb2-2", "gpb2-3";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c0_bus: i2c0-bus {
-			samsung,pins = "gpb3-0", "gpb3-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c1_bus: i2c1-bus {
-			samsung,pins = "gpb3-2", "gpb3-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c0_hs_bus: i2c0-hs-bus {
-			samsung,pins = "gpb3-0", "gpb3-1";
-			samsung,pin-function = <4>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c1_hs_bus: i2c1-hs-bus {
-			samsung,pins = "gpb3-2", "gpb3-3";
-			samsung,pin-function = <4>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		sd0_clk: sd0-clk {
-			samsung,pins = "gpc0-0";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd0_cmd: sd0-cmd {
-			samsung,pins = "gpc0-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd0_cd: sd0-cd {
-			samsung,pins = "gpc0-2";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd0_bus1: sd0-bus-width1 {
-			samsung,pins = "gpc0-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd0_bus4: sd0-bus-width4 {
-			samsung,pins = "gpc0-3", "gpc0-4", "gpc0-5", "gpc0-6";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd0_bus8: sd0-bus-width8 {
-			samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd1_clk: sd1-clk {
-			samsung,pins = "gpc2-0";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd1_cmd: sd1-cmd {
-			samsung,pins = "gpc2-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd1_cd: sd1-cd {
-			samsung,pins = "gpc2-2";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd1_bus1: sd1-bus-width1 {
-			samsung,pins = "gpc2-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd1_bus4: sd1-bus-width4 {
-			samsung,pins = "gpc2-3", "gpc2-4", "gpc2-5", "gpc2-6";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd2_clk: sd2-clk {
-			samsung,pins = "gpc3-0";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd2_cmd: sd2-cmd {
-			samsung,pins = "gpc3-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd2_cd: sd2-cd {
-			samsung,pins = "gpc3-2";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd2_bus1: sd2-bus-width1 {
-			samsung,pins = "gpc3-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd2_bus4: sd2-bus-width4 {
-			samsung,pins = "gpc3-3", "gpc3-4", "gpc3-5", "gpc3-6";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd2_bus8: sd2-bus-width8 {
-			samsung,pins = "gpc4-3", "gpc4-4", "gpc4-5", "gpc4-6";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd3_clk: sd3-clk {
-			samsung,pins = "gpc4-0";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd3_cmd: sd3-cmd {
-			samsung,pins = "gpc4-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd3_cd: sd3-cd {
-			samsung,pins = "gpc4-2";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd3_bus1: sd3-bus-width1 {
-			samsung,pins = "gpc4-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd3_bus4: sd3-bus-width4 {
-			samsung,pins = "gpc4-3", "gpc4-4", "gpc4-5", "gpc4-6";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		uart1_data: uart1-data {
-			samsung,pins = "gpd0-0", "gpd0-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		uart1_fctl: uart1-fctl {
-			samsung,pins = "gpd0-2", "gpd0-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		dp_hpd: dp_hpd {
-			samsung,pins = "gpx0-7";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
+		interrupt-controller;
+		#interrupt-cells = <2>;
 	};
 
-	pinctrl@13400000 {
-		gpe0: gpe0 {
-			gpio-controller;
-			#gpio-cells = <2>;
+	gpa1: gpa1 {
+		gpio-controller;
+		#gpio-cells = <2>;
 
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpe1: gpe1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpf0: gpf0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpf1: gpf1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpg0: gpg0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpg1: gpg1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpg2: gpg2 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gph0: gph0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gph1: gph1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		cam_gpio_a: cam-gpio-a {
-			samsung,pins = "gpe0-0", "gpe0-1", "gpe0-2", "gpe0-3",
-				       "gpe0-4", "gpe0-5", "gpe0-6", "gpe0-7",
-				       "gpe1-0", "gpe1-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_gpio_b: cam-gpio-b {
-			samsung,pins = "gpf0-0", "gpf0-1", "gpf0-2", "gpf0-3",
-				       "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_i2c2_bus: cam-i2c2-bus {
-			samsung,pins = "gpe0-6", "gpe1-0";
-			samsung,pin-function = <4>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_spi1_bus: cam-spi1-bus {
-			samsung,pins = "gpe0-4", "gpe0-5", "gpf0-2", "gpf0-3";
-			samsung,pin-function = <4>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_i2c1_bus: cam-i2c1-bus {
-			samsung,pins = "gpf0-2", "gpf0-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_i2c0_bus: cam-i2c0-bus {
-			samsung,pins = "gpf0-0", "gpf0-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_spi0_bus: cam-spi0-bus {
-			samsung,pins = "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_bayrgb_bus: cam-bayrgb-bus {
-			samsung,pins = "gpg0-0", "gpg0-1", "gpg0-2", "gpg0-3",
-				       "gpg0-4", "gpg0-5", "gpg0-6", "gpg0-7",
-				       "gpg1-0", "gpg1-1", "gpg1-2", "gpg1-3",
-				       "gpg1-4", "gpg1-5", "gpg1-6", "gpg1-7",
-				       "gpg2-0", "gpg2-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_port_a: cam-port-a {
-			samsung,pins = "gph0-0", "gph0-1", "gph0-2", "gph0-3",
-				       "gph1-0", "gph1-1", "gph1-2", "gph1-3",
-				       "gph1-4", "gph1-5", "gph1-6", "gph1-7";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
+		interrupt-controller;
+		#interrupt-cells = <2>;
 	};
 
-	pinctrl@10d10000 {
-		gpv0: gpv0 {
-			gpio-controller;
-			#gpio-cells = <2>;
+	gpa2: gpa2 {
+		gpio-controller;
+		#gpio-cells = <2>;
 
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpv1: gpv1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpv2: gpv2 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpv3: gpv3 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpv4: gpv4 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		c2c_rxd: c2c-rxd {
-			samsung,pins = "gpv0-0", "gpv0-1", "gpv0-2", "gpv0-3",
-				       "gpv0-4", "gpv0-5", "gpv0-6", "gpv0-7",
-				       "gpv1-0", "gpv1-1", "gpv1-2", "gpv1-3",
-				       "gpv1-4", "gpv1-5", "gpv1-6", "gpv1-7";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		c2c_txd: c2c-txd {
-			samsung,pins = "gpv2-0", "gpv2-1", "gpv2-2", "gpv2-3",
-				       "gpv2-4", "gpv2-5", "gpv2-6", "gpv2-7",
-				       "gpv3-0", "gpv3-1", "gpv3-2", "gpv3-3",
-				       "gpv3-4", "gpv3-5", "gpv3-6", "gpv3-7";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
+		interrupt-controller;
+		#interrupt-cells = <2>;
 	};
 
-	pinctrl@03860000 {
-		gpz: gpz {
-			gpio-controller;
-			#gpio-cells = <2>;
+	gpb0: gpb0 {
+		gpio-controller;
+		#gpio-cells = <2>;
 
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
 
-		i2s0_bus: i2s0-bus {
-			samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
-					"gpz-4", "gpz-5", "gpz-6";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
+	gpb1: gpb1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpb2: gpb2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpb3: gpb3 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpc0: gpc0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpc1: gpc1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpc2: gpc2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpc3: gpc3 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpd0: gpd0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpd1: gpd1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpy0: gpy0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	gpy1: gpy1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	gpy2: gpy2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	gpy3: gpy3 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	gpy4: gpy4 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	gpy5: gpy5 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	gpy6: gpy6 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	gpc4: gpc4 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpx0: gpx0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		interrupt-parent = <&combiner>;
+		#interrupt-cells = <2>;
+		interrupts = <23 0>, <24 0>, <25 0>, <25 1>,
+			     <26 0>, <26 1>, <27 0>, <27 1>;
+	};
+
+	gpx1: gpx1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		interrupt-parent = <&combiner>;
+		#interrupt-cells = <2>;
+		interrupts = <28 0>, <28 1>, <29 0>, <29 1>,
+			     <30 0>, <30 1>, <31 0>, <31 1>;
+	};
+
+	gpx2: gpx2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpx3: gpx3 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	uart0_data: uart0-data {
+		samsung,pins = "gpa0-0", "gpa0-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart0_fctl: uart0-fctl {
+		samsung,pins = "gpa0-2", "gpa0-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c2_bus: i2c2-bus {
+		samsung,pins = "gpa0-6", "gpa0-7";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c2_hs_bus: i2c2-hs-bus {
+		samsung,pins = "gpa0-6", "gpa0-7";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart2_data: uart2-data {
+		samsung,pins = "gpa1-0", "gpa1-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart2_fctl: uart2-fctl {
+		samsung,pins = "gpa1-2", "gpa1-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c3_bus: i2c3-bus {
+		samsung,pins = "gpa1-2", "gpa1-3";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c3_hs_bus: i2c3-hs-bus {
+		samsung,pins = "gpa1-2", "gpa1-3";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart3_data: uart3-data {
+		samsung,pins = "gpa1-4", "gpa1-4";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	spi0_bus: spi0-bus {
+		samsung,pins = "gpa2-0", "gpa2-2", "gpa2-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c4_bus: i2c4-bus {
+		samsung,pins = "gpa2-0", "gpa2-1";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c5_bus: i2c5-bus {
+		samsung,pins = "gpa2-2", "gpa2-3";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	spi1_bus: spi1-bus {
+		samsung,pins = "gpa2-4", "gpa2-6", "gpa2-7";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2s1_bus: i2s1-bus {
+		samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
+			       "gpb0-4";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	pcm1_bus: pcm1-bus {
+		samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
+			       "gpb0-4";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	ac97_bus: ac97-bus {
+		samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
+			       "gpb0-4";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2s2_bus: i2s2-bus {
+		samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3",
+			       "gpb1-4";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	pcm2_bus: pcm2-bus {
+		samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3",
+			       "gpb1-4";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	spdif_bus: spdif-bus {
+		samsung,pins = "gpb1-0", "gpb1-1";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	spi2_bus: spi2-bus {
+		samsung,pins = "gpb1-1", "gpb1-3", "gpb1-4";
+		samsung,pin-function = <5>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c6_bus: i2c6-bus {
+		samsung,pins = "gpb1-3", "gpb1-4";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	pwm0_out: pwm0-out {
+		samsung,pins = "gpb2-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	pwm1_out: pwm1-out {
+		samsung,pins = "gpb2-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	pwm2_out: pwm2-out {
+		samsung,pins = "gpb2-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	pwm3_out: pwm3-out {
+		samsung,pins = "gpb2-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c7_bus: i2c7-bus {
+		samsung,pins = "gpb2-2", "gpb2-3";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c0_bus: i2c0-bus {
+		samsung,pins = "gpb3-0", "gpb3-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c1_bus: i2c1-bus {
+		samsung,pins = "gpb3-2", "gpb3-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c0_hs_bus: i2c0-hs-bus {
+		samsung,pins = "gpb3-0", "gpb3-1";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c1_hs_bus: i2c1-hs-bus {
+		samsung,pins = "gpb3-2", "gpb3-3";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	sd0_clk: sd0-clk {
+		samsung,pins = "gpc0-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_cmd: sd0-cmd {
+		samsung,pins = "gpc0-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_cd: sd0-cd {
+		samsung,pins = "gpc0-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_bus1: sd0-bus-width1 {
+		samsung,pins = "gpc0-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_bus4: sd0-bus-width4 {
+		samsung,pins = "gpc0-3", "gpc0-4", "gpc0-5", "gpc0-6";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_bus8: sd0-bus-width8 {
+		samsung,pins = "gpc1-0", "gpc1-1", "gpc1-2", "gpc1-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_clk: sd1-clk {
+		samsung,pins = "gpc2-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_cmd: sd1-cmd {
+		samsung,pins = "gpc2-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_cd: sd1-cd {
+		samsung,pins = "gpc2-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_bus1: sd1-bus-width1 {
+		samsung,pins = "gpc2-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_bus4: sd1-bus-width4 {
+		samsung,pins = "gpc2-3", "gpc2-4", "gpc2-5", "gpc2-6";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_clk: sd2-clk {
+		samsung,pins = "gpc3-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_cmd: sd2-cmd {
+		samsung,pins = "gpc3-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_cd: sd2-cd {
+		samsung,pins = "gpc3-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_bus1: sd2-bus-width1 {
+		samsung,pins = "gpc3-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_bus4: sd2-bus-width4 {
+		samsung,pins = "gpc3-3", "gpc3-4", "gpc3-5", "gpc3-6";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_bus8: sd2-bus-width8 {
+		samsung,pins = "gpc4-3", "gpc4-4", "gpc4-5", "gpc4-6";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd3_clk: sd3-clk {
+		samsung,pins = "gpc4-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd3_cmd: sd3-cmd {
+		samsung,pins = "gpc4-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd3_cd: sd3-cd {
+		samsung,pins = "gpc4-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd3_bus1: sd3-bus-width1 {
+		samsung,pins = "gpc4-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd3_bus4: sd3-bus-width4 {
+		samsung,pins = "gpc4-3", "gpc4-4", "gpc4-5", "gpc4-6";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	uart1_data: uart1-data {
+		samsung,pins = "gpd0-0", "gpd0-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart1_fctl: uart1-fctl {
+		samsung,pins = "gpd0-2", "gpd0-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	dp_hpd: dp_hpd {
+		samsung,pins = "gpx0-7";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+};
+
+&pinctrl_1 {
+	gpe0: gpe0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpe1: gpe1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpf0: gpf0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpf1: gpf1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpg0: gpg0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpg1: gpg1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpg2: gpg2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gph0: gph0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gph1: gph1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	cam_gpio_a: cam-gpio-a {
+		samsung,pins = "gpe0-0", "gpe0-1", "gpe0-2", "gpe0-3",
+			       "gpe0-4", "gpe0-5", "gpe0-6", "gpe0-7",
+			       "gpe1-0", "gpe1-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_gpio_b: cam-gpio-b {
+		samsung,pins = "gpf0-0", "gpf0-1", "gpf0-2", "gpf0-3",
+			       "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_i2c2_bus: cam-i2c2-bus {
+		samsung,pins = "gpe0-6", "gpe1-0";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_spi1_bus: cam-spi1-bus {
+		samsung,pins = "gpe0-4", "gpe0-5", "gpf0-2", "gpf0-3";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_i2c1_bus: cam-i2c1-bus {
+		samsung,pins = "gpf0-2", "gpf0-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_i2c0_bus: cam-i2c0-bus {
+		samsung,pins = "gpf0-0", "gpf0-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_spi0_bus: cam-spi0-bus {
+		samsung,pins = "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_bayrgb_bus: cam-bayrgb-bus {
+		samsung,pins = "gpg0-0", "gpg0-1", "gpg0-2", "gpg0-3",
+			       "gpg0-4", "gpg0-5", "gpg0-6", "gpg0-7",
+			       "gpg1-0", "gpg1-1", "gpg1-2", "gpg1-3",
+			       "gpg1-4", "gpg1-5", "gpg1-6", "gpg1-7",
+			       "gpg2-0", "gpg2-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_port_a: cam-port-a {
+		samsung,pins = "gph0-0", "gph0-1", "gph0-2", "gph0-3",
+			       "gph1-0", "gph1-1", "gph1-2", "gph1-3",
+			       "gph1-4", "gph1-5", "gph1-6", "gph1-7";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+};
+
+&pinctrl_2 {
+	gpv0: gpv0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpv1: gpv1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpv2: gpv2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpv3: gpv3 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpv4: gpv4 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	c2c_rxd: c2c-rxd {
+		samsung,pins = "gpv0-0", "gpv0-1", "gpv0-2", "gpv0-3",
+			       "gpv0-4", "gpv0-5", "gpv0-6", "gpv0-7",
+			       "gpv1-0", "gpv1-1", "gpv1-2", "gpv1-3",
+			       "gpv1-4", "gpv1-5", "gpv1-6", "gpv1-7";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	c2c_txd: c2c-txd {
+		samsung,pins = "gpv2-0", "gpv2-1", "gpv2-2", "gpv2-3",
+			       "gpv2-4", "gpv2-5", "gpv2-6", "gpv2-7",
+			       "gpv3-0", "gpv3-1", "gpv3-2", "gpv3-3",
+			       "gpv3-4", "gpv3-5", "gpv3-6", "gpv3-7";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+};
+
+&pinctrl_3 {
+	gpz: gpz {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	i2s0_bus: i2s0-bus {
+		samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
+				"gpz-4", "gpz-5", "gpz-6";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
 	};
 };
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index bf9bee6..4a1f883 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -19,7 +19,6 @@
 
 #include <dt-bindings/clock/exynos5250.h>
 #include "exynos5.dtsi"
-#include "exynos5250-pinctrl.dtsi"
 #include "exynos4-cpu-thermal.dtsi"
 #include <dt-bindings/clock/exynos-audss-clk.h>
 
@@ -1062,3 +1061,5 @@
 	clocks = <&clock CLK_UART3>, <&clock CLK_SCLK_UART3>;
 	clock-names = "uart", "clk_uart_baud0";
 };
+
+#include "exynos5250-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/exynos5410-smdk5410.dts b/arch/arm/boot/dts/exynos5410-smdk5410.dts
index be3e025..cebeaab 100644
--- a/arch/arm/boot/dts/exynos5410-smdk5410.dts
+++ b/arch/arm/boot/dts/exynos5410-smdk5410.dts
@@ -62,13 +62,13 @@
 };
 
 &uart0 {
-		status = "okay";
+	status = "okay";
 };
 
 &uart1 {
-		status = "okay";
+	status = "okay";
 };
 
 &uart2 {
-		status = "okay";
+	status = "okay";
 };
diff --git a/arch/arm/boot/dts/exynos5420-pinctrl.dtsi b/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
index 8b15316..130563b 100644
--- a/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5420-pinctrl.dtsi
@@ -12,711 +12,710 @@
  * published by the Free Software Foundation.
 */
 
-/ {
-	pinctrl@13400000 {
-		gpy7: gpy7 {
-			gpio-controller;
-			#gpio-cells = <2>;
+&pinctrl_0 {
+	gpy7: gpy7 {
+		gpio-controller;
+		#gpio-cells = <2>;
 
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpx0: gpx0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			interrupt-parent = <&combiner>;
-			#interrupt-cells = <2>;
-			interrupts = <23 0>, <24 0>, <25 0>, <25 1>,
-				     <26 0>, <26 1>, <27 0>, <27 1>;
-		};
-
-		gpx1: gpx1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			interrupt-parent = <&combiner>;
-			#interrupt-cells = <2>;
-			interrupts = <28 0>, <28 1>, <29 0>, <29 1>,
-				     <30 0>, <30 1>, <31 0>, <31 1>;
-		};
-
-		gpx2: gpx2 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpx3: gpx3 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		dp_hpd: dp_hpd {
-			samsung,pins = "gpx0-7";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
+		interrupt-controller;
+		#interrupt-cells = <2>;
 	};
 
-	pinctrl@13410000 {
-		gpc0: gpc0 {
-			gpio-controller;
-			#gpio-cells = <2>;
+	gpx0: gpx0 {
+		gpio-controller;
+		#gpio-cells = <2>;
 
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpc1: gpc1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpc2: gpc2 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpc3: gpc3 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpc4: gpc4 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpd1: gpd1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpy0: gpy0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpy1: gpy1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpy2: gpy2 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpy3: gpy3 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpy4: gpy4 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpy5: gpy5 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		gpy6: gpy6 {
-			gpio-controller;
-			#gpio-cells = <2>;
-		};
-
-		sd0_clk: sd0-clk {
-			samsung,pins = "gpc0-0";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd0_cmd: sd0-cmd {
-			samsung,pins = "gpc0-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd0_cd: sd0-cd {
-			samsung,pins = "gpc0-2";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd0_bus1: sd0-bus-width1 {
-			samsung,pins = "gpc0-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd0_bus4: sd0-bus-width4 {
-			samsung,pins = "gpc0-4", "gpc0-5", "gpc0-6";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd0_bus8: sd0-bus-width8 {
-			samsung,pins = "gpc3-0", "gpc3-1", "gpc3-2", "gpc3-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd1_clk: sd1-clk {
-			samsung,pins = "gpc1-0";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd0_rclk: sd0-rclk {
-			samsung,pins = "gpc0-7";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <1>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd1_cmd: sd1-cmd {
-			samsung,pins = "gpc1-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd1_cd: sd1-cd {
-			samsung,pins = "gpc1-2";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd1_int: sd1-int {
-			samsung,pins = "gpd1-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		sd1_bus1: sd1-bus-width1 {
-			samsung,pins = "gpc1-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd1_bus4: sd1-bus-width4 {
-			samsung,pins = "gpc1-4", "gpc1-5", "gpc1-6";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd1_bus8: sd1-bus-width8 {
-			samsung,pins = "gpd1-4", "gpd1-5", "gpd1-6", "gpd1-7";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd2_clk: sd2-clk {
-			samsung,pins = "gpc2-0";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd2_cmd: sd2-cmd {
-			samsung,pins = "gpc2-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd2_cd: sd2-cd {
-			samsung,pins = "gpc2-2";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd2_bus1: sd2-bus-width1 {
-			samsung,pins = "gpc2-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
-
-		sd2_bus4: sd2-bus-width4 {
-			samsung,pins = "gpc2-4", "gpc2-5", "gpc2-6";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <3>;
-		};
+		interrupt-controller;
+		interrupt-parent = <&combiner>;
+		#interrupt-cells = <2>;
+		interrupts = <23 0>, <24 0>, <25 0>, <25 1>,
+			     <26 0>, <26 1>, <27 0>, <27 1>;
 	};
 
-	pinctrl@14000000 {
-		gpe0: gpe0 {
-			gpio-controller;
-			#gpio-cells = <2>;
+	gpx1: gpx1 {
+		gpio-controller;
+		#gpio-cells = <2>;
 
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpe1: gpe1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpf0: gpf0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpf1: gpf1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpg0: gpg0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpg1: gpg1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpg2: gpg2 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpj4: gpj4 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		cam_gpio_a: cam-gpio-a {
-			samsung,pins = "gpe0-0", "gpe0-1", "gpe0-2", "gpe0-3",
-				       "gpe0-4", "gpe0-5", "gpe0-6", "gpe0-7",
-				       "gpe1-0", "gpe1-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_gpio_b: cam-gpio-b {
-			samsung,pins = "gpf0-0", "gpf0-1", "gpf0-2", "gpf0-3",
-				       "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_i2c2_bus: cam-i2c2-bus {
-			samsung,pins = "gpf0-4", "gpf0-5";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-		cam_spi1_bus: cam-spi1-bus {
-			samsung,pins = "gpe0-4", "gpe0-5", "gpf0-2", "gpf0-3";
-			samsung,pin-function = <4>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_i2c1_bus: cam-i2c1-bus {
-			samsung,pins = "gpf0-2", "gpf0-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_i2c0_bus: cam-i2c0-bus {
-			samsung,pins = "gpf0-0", "gpf0-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_spi0_bus: cam-spi0-bus {
-			samsung,pins = "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		cam_bayrgb_bus: cam-bayrgb-bus {
-			samsung,pins = "gpg0-0", "gpg0-1", "gpg0-2", "gpg0-3",
-				       "gpg0-4", "gpg0-5", "gpg0-6", "gpg0-7",
-				       "gpg1-0", "gpg1-1", "gpg1-2", "gpg1-3",
-				       "gpg1-4", "gpg1-5", "gpg1-6", "gpg1-7",
-				       "gpg2-0";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
+		interrupt-controller;
+		interrupt-parent = <&combiner>;
+		#interrupt-cells = <2>;
+		interrupts = <28 0>, <28 1>, <29 0>, <29 1>,
+			     <30 0>, <30 1>, <31 0>, <31 1>;
 	};
 
-	pinctrl@14010000 {
-		gpa0: gpa0 {
-			gpio-controller;
-			#gpio-cells = <2>;
+	gpx2: gpx2 {
+		gpio-controller;
+		#gpio-cells = <2>;
 
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpa1: gpa1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpa2: gpa2 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpb0: gpb0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpb1: gpb1 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpb2: gpb2 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpb3: gpb3 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gpb4: gpb4 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		gph0: gph0 {
-			gpio-controller;
-			#gpio-cells = <2>;
-
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
-
-		uart0_data: uart0-data {
-			samsung,pins = "gpa0-0", "gpa0-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		uart0_fctl: uart0-fctl {
-			samsung,pins = "gpa0-2", "gpa0-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		uart1_data: uart1-data {
-			samsung,pins = "gpa0-4", "gpa0-5";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		uart1_fctl: uart1-fctl {
-			samsung,pins = "gpa0-6", "gpa0-7";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c2_bus: i2c2-bus {
-			samsung,pins = "gpa0-6", "gpa0-7";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		uart2_data: uart2-data {
-			samsung,pins = "gpa1-0", "gpa1-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		uart2_fctl: uart2-fctl {
-			samsung,pins = "gpa1-2", "gpa1-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c3_bus: i2c3-bus {
-			samsung,pins = "gpa1-2", "gpa1-3";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		uart3_data: uart3-data {
-			samsung,pins = "gpa1-4", "gpa1-5";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		spi0_bus: spi0-bus {
-			samsung,pins = "gpa2-0", "gpa2-1", "gpa2-2", "gpa2-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		spi1_bus: spi1-bus {
-			samsung,pins = "gpa2-4", "gpa2-6", "gpa2-7";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c4_hs_bus: i2c4-hs-bus {
-			samsung,pins = "gpa2-0", "gpa2-1";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c5_hs_bus: i2c5-hs-bus {
-			samsung,pins = "gpa2-2", "gpa2-3";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2s1_bus: i2s1-bus {
-			samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
-					"gpb0-4";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		pcm1_bus: pcm1-bus {
-			samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
-					"gpb0-4";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2s2_bus: i2s2-bus {
-			samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3",
-					"gpb1-4";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		pcm2_bus: pcm2-bus {
-			samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3",
-					"gpb1-4";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		spdif_bus: spdif-bus {
-			samsung,pins = "gpb1-0", "gpb1-1";
-			samsung,pin-function = <4>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		spi2_bus: spi2-bus {
-			samsung,pins = "gpb1-1", "gpb1-3", "gpb1-4";
-			samsung,pin-function = <5>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c6_hs_bus: i2c6-hs-bus {
-			samsung,pins = "gpb1-3", "gpb1-4";
-			samsung,pin-function = <4>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		pwm0_out: pwm0-out {
-			samsung,pins = "gpb2-0";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		pwm1_out: pwm1-out {
-			samsung,pins = "gpb2-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		pwm2_out: pwm2-out {
-			samsung,pins = "gpb2-2";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		pwm3_out: pwm3-out {
-			samsung,pins = "gpb2-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c7_hs_bus: i2c7-hs-bus {
-			samsung,pins = "gpb2-2", "gpb2-3";
-			samsung,pin-function = <3>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c0_bus: i2c0-bus {
-			samsung,pins = "gpb3-0", "gpb3-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c1_bus: i2c1-bus {
-			samsung,pins = "gpb3-2", "gpb3-3";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c8_hs_bus: i2c8-hs-bus {
-			samsung,pins = "gpb3-4", "gpb3-5";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c9_hs_bus: i2c9-hs-bus {
-			samsung,pins = "gpb3-6", "gpb3-7";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
-
-		i2c10_hs_bus: i2c10-hs-bus {
-			samsung,pins = "gpb4-0", "gpb4-1";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <3>;
-			samsung,pin-drv = <0>;
-		};
+		interrupt-controller;
+		#interrupt-cells = <2>;
 	};
 
-	pinctrl@03860000 {
-		gpz: gpz {
-			gpio-controller;
-			#gpio-cells = <2>;
+	gpx3: gpx3 {
+		gpio-controller;
+		#gpio-cells = <2>;
 
-			interrupt-controller;
-			#interrupt-cells = <2>;
-		};
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
 
-		i2s0_bus: i2s0-bus {
-			samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
-					"gpz-4", "gpz-5", "gpz-6";
-			samsung,pin-function = <2>;
-			samsung,pin-pud = <0>;
-			samsung,pin-drv = <0>;
-		};
+	dp_hpd: dp_hpd {
+		samsung,pins = "gpx0-7";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+};
+
+&pinctrl_1 {
+	gpc0: gpc0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpc1: gpc1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpc2: gpc2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpc3: gpc3 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpc4: gpc4 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpd1: gpd1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpy0: gpy0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	gpy1: gpy1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	gpy2: gpy2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	gpy3: gpy3 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	gpy4: gpy4 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	gpy5: gpy5 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	gpy6: gpy6 {
+		gpio-controller;
+		#gpio-cells = <2>;
+	};
+
+	sd0_clk: sd0-clk {
+		samsung,pins = "gpc0-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_cmd: sd0-cmd {
+		samsung,pins = "gpc0-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_cd: sd0-cd {
+		samsung,pins = "gpc0-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_bus1: sd0-bus-width1 {
+		samsung,pins = "gpc0-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_bus4: sd0-bus-width4 {
+		samsung,pins = "gpc0-4", "gpc0-5", "gpc0-6";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_bus8: sd0-bus-width8 {
+		samsung,pins = "gpc3-0", "gpc3-1", "gpc3-2", "gpc3-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_clk: sd1-clk {
+		samsung,pins = "gpc1-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd0_rclk: sd0-rclk {
+		samsung,pins = "gpc0-7";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <1>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_cmd: sd1-cmd {
+		samsung,pins = "gpc1-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_cd: sd1-cd {
+		samsung,pins = "gpc1-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_int: sd1-int {
+		samsung,pins = "gpd1-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	sd1_bus1: sd1-bus-width1 {
+		samsung,pins = "gpc1-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_bus4: sd1-bus-width4 {
+		samsung,pins = "gpc1-4", "gpc1-5", "gpc1-6";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd1_bus8: sd1-bus-width8 {
+		samsung,pins = "gpd1-4", "gpd1-5", "gpd1-6", "gpd1-7";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_clk: sd2-clk {
+		samsung,pins = "gpc2-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_cmd: sd2-cmd {
+		samsung,pins = "gpc2-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_cd: sd2-cd {
+		samsung,pins = "gpc2-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_bus1: sd2-bus-width1 {
+		samsung,pins = "gpc2-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+
+	sd2_bus4: sd2-bus-width4 {
+		samsung,pins = "gpc2-4", "gpc2-5", "gpc2-6";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <3>;
+	};
+};
+
+&pinctrl_2 {
+	gpe0: gpe0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpe1: gpe1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpf0: gpf0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpf1: gpf1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpg0: gpg0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpg1: gpg1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpg2: gpg2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpj4: gpj4 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	cam_gpio_a: cam-gpio-a {
+		samsung,pins = "gpe0-0", "gpe0-1", "gpe0-2", "gpe0-3",
+			       "gpe0-4", "gpe0-5", "gpe0-6", "gpe0-7",
+			       "gpe1-0", "gpe1-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_gpio_b: cam-gpio-b {
+		samsung,pins = "gpf0-0", "gpf0-1", "gpf0-2", "gpf0-3",
+			       "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_i2c2_bus: cam-i2c2-bus {
+		samsung,pins = "gpf0-4", "gpf0-5";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_spi1_bus: cam-spi1-bus {
+		samsung,pins = "gpe0-4", "gpe0-5", "gpf0-2", "gpf0-3";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_i2c1_bus: cam-i2c1-bus {
+		samsung,pins = "gpf0-2", "gpf0-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_i2c0_bus: cam-i2c0-bus {
+		samsung,pins = "gpf0-0", "gpf0-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_spi0_bus: cam-spi0-bus {
+		samsung,pins = "gpf1-0", "gpf1-1", "gpf1-2", "gpf1-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	cam_bayrgb_bus: cam-bayrgb-bus {
+		samsung,pins = "gpg0-0", "gpg0-1", "gpg0-2", "gpg0-3",
+			       "gpg0-4", "gpg0-5", "gpg0-6", "gpg0-7",
+			       "gpg1-0", "gpg1-1", "gpg1-2", "gpg1-3",
+			       "gpg1-4", "gpg1-5", "gpg1-6", "gpg1-7",
+			       "gpg2-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+};
+
+&pinctrl_3 {
+	gpa0: gpa0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpa1: gpa1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpa2: gpa2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpb0: gpb0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpb1: gpb1 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpb2: gpb2 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpb3: gpb3 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gpb4: gpb4 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	gph0: gph0 {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	uart0_data: uart0-data {
+		samsung,pins = "gpa0-0", "gpa0-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart0_fctl: uart0-fctl {
+		samsung,pins = "gpa0-2", "gpa0-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart1_data: uart1-data {
+		samsung,pins = "gpa0-4", "gpa0-5";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart1_fctl: uart1-fctl {
+		samsung,pins = "gpa0-6", "gpa0-7";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c2_bus: i2c2-bus {
+		samsung,pins = "gpa0-6", "gpa0-7";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart2_data: uart2-data {
+		samsung,pins = "gpa1-0", "gpa1-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart2_fctl: uart2-fctl {
+		samsung,pins = "gpa1-2", "gpa1-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c3_bus: i2c3-bus {
+		samsung,pins = "gpa1-2", "gpa1-3";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	uart3_data: uart3-data {
+		samsung,pins = "gpa1-4", "gpa1-5";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	spi0_bus: spi0-bus {
+		samsung,pins = "gpa2-0", "gpa2-1", "gpa2-2", "gpa2-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	spi1_bus: spi1-bus {
+		samsung,pins = "gpa2-4", "gpa2-6", "gpa2-7";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c4_hs_bus: i2c4-hs-bus {
+		samsung,pins = "gpa2-0", "gpa2-1";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c5_hs_bus: i2c5-hs-bus {
+		samsung,pins = "gpa2-2", "gpa2-3";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2s1_bus: i2s1-bus {
+		samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
+			       "gpb0-4";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	pcm1_bus: pcm1-bus {
+		samsung,pins = "gpb0-0", "gpb0-1", "gpb0-2", "gpb0-3",
+			       "gpb0-4";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2s2_bus: i2s2-bus {
+		samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3",
+			       "gpb1-4";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	pcm2_bus: pcm2-bus {
+		samsung,pins = "gpb1-0", "gpb1-1", "gpb1-2", "gpb1-3",
+			       "gpb1-4";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	spdif_bus: spdif-bus {
+		samsung,pins = "gpb1-0", "gpb1-1";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	spi2_bus: spi2-bus {
+		samsung,pins = "gpb1-1", "gpb1-3", "gpb1-4";
+		samsung,pin-function = <5>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c6_hs_bus: i2c6-hs-bus {
+		samsung,pins = "gpb1-3", "gpb1-4";
+		samsung,pin-function = <4>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	pwm0_out: pwm0-out {
+		samsung,pins = "gpb2-0";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	pwm1_out: pwm1-out {
+		samsung,pins = "gpb2-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	pwm2_out: pwm2-out {
+		samsung,pins = "gpb2-2";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	pwm3_out: pwm3-out {
+		samsung,pins = "gpb2-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c7_hs_bus: i2c7-hs-bus {
+		samsung,pins = "gpb2-2", "gpb2-3";
+		samsung,pin-function = <3>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c0_bus: i2c0-bus {
+		samsung,pins = "gpb3-0", "gpb3-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c1_bus: i2c1-bus {
+		samsung,pins = "gpb3-2", "gpb3-3";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c8_hs_bus: i2c8-hs-bus {
+		samsung,pins = "gpb3-4", "gpb3-5";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c9_hs_bus: i2c9-hs-bus {
+		samsung,pins = "gpb3-6", "gpb3-7";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+
+	i2c10_hs_bus: i2c10-hs-bus {
+		samsung,pins = "gpb4-0", "gpb4-1";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <3>;
+		samsung,pin-drv = <0>;
+	};
+};
+
+&pinctrl_4 {
+	gpz: gpz {
+		gpio-controller;
+		#gpio-cells = <2>;
+
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	i2s0_bus: i2s0-bus {
+		samsung,pins = "gpz-0", "gpz-1", "gpz-2", "gpz-3",
+				"gpz-4", "gpz-5", "gpz-6";
+		samsung,pin-function = <2>;
+		samsung,pin-pud = <0>;
+		samsung,pin-drv = <0>;
 	};
 };
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 534f27c..df9aee9 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -15,7 +15,6 @@
 
 #include <dt-bindings/clock/exynos5420.h>
 #include "exynos5.dtsi"
-#include "exynos5420-pinctrl.dtsi"
 
 #include <dt-bindings/clock/exynos-audss-clk.h>
 
@@ -1166,3 +1165,5 @@
 	clocks = <&clock CLK_UART3>, <&clock CLK_SCLK_UART3>;
 	clock-names = "uart", "clk_uart_baud0";
 };
+
+#include "exynos5420-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/exynos5422-cpu-thermal.dtsi b/arch/arm/boot/dts/exynos5422-cpu-thermal.dtsi
new file mode 100644
index 0000000..2b289d7
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5422-cpu-thermal.dtsi
@@ -0,0 +1,59 @@
+/*
+ * Device tree sources for Exynos5422 thermal zone
+ *
+ * Copyright (c) 2015 Lukasz Majewski <l.majewski@samsung.com>
+ *			Anand Moon <linux.amoon@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal.h>
+
+/ {
+	thermal-zones {
+		cpu0_thermal: cpu0-thermal {
+			thermal-sensors = <&tmu_cpu0 0>;
+			polling-delay-passive = <0>;
+			polling-delay = <0>;
+			trips {
+				cpu_alert0: cpu-alert-0 {
+					temperature = <50000>; /* millicelsius */
+					hysteresis = <5000>; /* millicelsius */
+					type = "active";
+				};
+				cpu_alert1: cpu-alert-1 {
+					temperature = <60000>; /* millicelsius */
+					hysteresis = <5000>; /* millicelsius */
+					type = "active";
+				};
+				cpu_alert2: cpu-alert-2 {
+					temperature = <70000>; /* millicelsius */
+					hysteresis = <5000>; /* millicelsius */
+					type = "active";
+				};
+				cpu_crit0: cpu-crit-0 {
+					temperature = <120000>; /* millicelsius */
+					hysteresis = <0>; /* millicelsius */
+					type = "critical";
+				};
+			};
+			cooling-maps {
+				map0 {
+				     trip = <&cpu_alert0>;
+				     cooling-device = <&fan0 0 1>;
+				};
+				map1 {
+				     trip = <&cpu_alert1>;
+				     cooling-device = <&fan0 1 2>;
+				};
+				map2 {
+				     trip = <&cpu_alert2>;
+				     cooling-device = <&fan0 2 3>;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
index 8adf455..1565667 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
@@ -15,6 +15,7 @@
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/sound/samsung-i2s.h>
 #include "exynos5800.dtsi"
+#include "exynos5422-cpu-thermal.dtsi"
 
 / {
 	memory {
@@ -107,6 +108,15 @@
 			clocks = <&i2s0 CLK_I2S_CDCLK>;
 		};
 	};
+
+	fan0: pwm-fan {
+		compatible = "pwm-fan";
+		pwms = <&pwm 0 20972 0>;
+		cooling-min-state = <0>;
+		cooling-max-state = <3>;
+		#cooling-cells = <2>;
+		cooling-levels = <0 130 170 230>;
+	};
 };
 
 &clock_audss {
@@ -461,6 +471,32 @@
 	 */
 	pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>;
 	pinctrl-names = "default";
+	samsung,pwm-outputs = <0>;
+	status = "okay";
+};
+
+&tmu_cpu0 {
+	vtmu-supply = <&ldo7_reg>;
+	status = "okay";
+};
+
+&tmu_cpu1 {
+	vtmu-supply = <&ldo7_reg>;
+	status = "okay";
+};
+
+&tmu_cpu2 {
+	vtmu-supply = <&ldo7_reg>;
+	status = "okay";
+};
+
+&tmu_cpu3 {
+	vtmu-supply = <&ldo7_reg>;
+	status = "okay";
+};
+
+&tmu_gpu {
+	vtmu-supply = <&ldo7_reg>;
 	status = "okay";
 };
 
@@ -477,3 +513,13 @@
 &usbdrd_dwc3_1 {
 	dr_mode = "otg";
 };
+
+&usbdrd3_0 {
+	vdd33-supply = <&ldo9_reg>;
+	vdd10-supply = <&ldo11_reg>;
+};
+
+&usbdrd3_1 {
+	vdd33-supply = <&ldo9_reg>;
+	vdd10-supply = <&ldo11_reg>;
+};
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index b69be5c..feb9d34 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -144,6 +144,15 @@
 				clock-names = "ipg", "per";
 			};
 
+			rtc: rtc@10007000 {
+				compatible = "fsl,imx21-rtc";
+				reg = <0x10007000 0x1000>;
+				interrupts = <22>;
+				clocks = <&clks IMX27_CLK_CKIL>,
+					 <&clks IMX27_CLK_RTC_IPG_GATE>;
+				clock-names = "ref", "ipg";
+			};
+
 			kpp: kpp@10008000 {
 				compatible = "fsl,imx27-kpp", "fsl,imx21-kpp";
 				reg = <0x10008000 0x1000>;
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index b0d5542..53fd75c 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -228,10 +228,11 @@
 			>;
 		};
 
+		/* open drain */
 		pinctrl_i2c1: i2c1grp {
 			fsl,pins = <
-				MX53_PAD_CSI0_DAT8__I2C1_SDA		0xc0000000
-				MX53_PAD_CSI0_DAT9__I2C1_SCL		0xc0000000
+				MX53_PAD_CSI0_DAT8__I2C1_SDA		0x400001ec
+				MX53_PAD_CSI0_DAT9__I2C1_SCL		0x400001ec
 			>;
 		};
 
diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts
index 82d623d..66e47de 100644
--- a/arch/arm/boot/dts/imx53-qsrb.dts
+++ b/arch/arm/boot/dts/imx53-qsrb.dts
@@ -20,15 +20,7 @@
 };
 
 &iomuxc {
-	i2c1 {
-		/* open drain */
-		pinctrl_i2c1_qsrb: i2c1grp-1 {
-			fsl,pins = <
-				MX53_PAD_CSI0_DAT8__I2C1_SDA      0x400001ec
-				MX53_PAD_CSI0_DAT9__I2C1_SCL      0x400001ec
-			>;
-		};
-
+	imx53-qsrb {
 		pinctrl_pmic: pmicgrp {
 			fsl,pins = <
 				MX53_PAD_CSI0_DAT5__GPIO5_23	0x1e4 /* IRQ */
@@ -38,10 +30,6 @@
 };
 
 &i2c1 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&pinctrl_i2c1_qsrb>;
-	status = "okay";
-
 	pmic: mc34708@8 {
 		compatible = "fsl,mc34708";
 		pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
index f2867c4..7b31fdb 100644
--- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
@@ -248,7 +248,6 @@
 				MX6QDL_PAD_NANDF_WP_B__NAND_WP_B	0xb0b1
 				MX6QDL_PAD_NANDF_RB0__NAND_READY_B	0xb000
 				MX6QDL_PAD_NANDF_CS0__NAND_CE0_B	0xb0b1
-				MX6QDL_PAD_NANDF_CS1__NAND_CE1_B	0xb0b1
 				MX6QDL_PAD_SD4_CMD__NAND_RE_B		0xb0b1
 				MX6QDL_PAD_SD4_CLK__NAND_WE_B		0xb0b1
 				MX6QDL_PAD_NANDF_D0__NAND_DATA00	0xb0b1
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index 4493f6e..1b66328 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -316,10 +316,13 @@
 };
 
 &usdhc3 {
-	pinctrl-names = "default";
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
 	pinctrl-0 = <&pinctrl_usdhc3>;
+	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
 	cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
 	vmmc-supply = <&reg_3p3v>;
+	no-1-8-v; /* firmware will remove if board revision supports */
 	status = "okay";
 };
 
@@ -380,7 +383,6 @@
 				MX6QDL_PAD_NANDF_WP_B__NAND_WP_B	0xb0b1
 				MX6QDL_PAD_NANDF_RB0__NAND_READY_B	0xb000
 				MX6QDL_PAD_NANDF_CS0__NAND_CE0_B	0xb0b1
-				MX6QDL_PAD_NANDF_CS1__NAND_CE1_B	0xb0b1
 				MX6QDL_PAD_SD4_CMD__NAND_RE_B		0xb0b1
 				MX6QDL_PAD_SD4_CLK__NAND_WE_B		0xb0b1
 				MX6QDL_PAD_NANDF_D0__NAND_DATA00	0xb0b1
@@ -469,7 +471,34 @@
 				MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x17059
 				MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x17059
 				MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x17059
-				MX6QDL_PAD_SD3_DAT5__GPIO7_IO00	0x1b0b0 /* CD */
+				MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x17059 /* CD */
+				MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x17059
+			>;
+		};
+
+		pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+			fsl,pins = <
+				MX6QDL_PAD_SD3_CMD__SD3_CMD		0x170b9
+				MX6QDL_PAD_SD3_CLK__SD3_CLK		0x170b9
+				MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x170b9
+				MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x170b9
+				MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x170b9
+				MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x170b9
+				MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x170b9 /* CD */
+				MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x170b9
+			>;
+		};
+
+		pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+			fsl,pins = <
+				MX6QDL_PAD_SD3_CMD__SD3_CMD		0x170f9
+				MX6QDL_PAD_SD3_CLK__SD3_CLK		0x100f9
+				MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x170f9
+				MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x170f9
+				MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x170f9
+				MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x170f9
+				MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x170f9 /* CD */
+				MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x170f9
 			>;
 		};
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index a857d12..7c51839 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -322,10 +322,13 @@
 };
 
 &usdhc3 {
-	pinctrl-names = "default";
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
 	pinctrl-0 = <&pinctrl_usdhc3>;
+	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
 	cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
 	vmmc-supply = <&reg_3p3v>;
+	no-1-8-v; /* firmware will remove if board revision supports */
 	status = "okay";
 };
 
@@ -385,7 +388,6 @@
 				MX6QDL_PAD_NANDF_WP_B__NAND_WP_B	0xb0b1
 				MX6QDL_PAD_NANDF_RB0__NAND_READY_B	0xb000
 				MX6QDL_PAD_NANDF_CS0__NAND_CE0_B	0xb0b1
-				MX6QDL_PAD_NANDF_CS1__NAND_CE1_B	0xb0b1
 				MX6QDL_PAD_SD4_CMD__NAND_RE_B		0xb0b1
 				MX6QDL_PAD_SD4_CLK__NAND_WE_B		0xb0b1
 				MX6QDL_PAD_NANDF_D0__NAND_DATA00	0xb0b1
@@ -476,7 +478,34 @@
 				MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x17059
 				MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x17059
 				MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x17059
-				MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x1b0b0 /* CD */
+				MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x17059 /* CD */
+				MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x17059
+			>;
+		};
+
+		pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+			fsl,pins = <
+				MX6QDL_PAD_SD3_CMD__SD3_CMD		0x170b9
+				MX6QDL_PAD_SD3_CLK__SD3_CLK		0x100b9
+				MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x170b9
+				MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x170b9
+				MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x170b9
+				MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x170b9
+				MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x170b9 /* CD */
+				MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x170b9
+			>;
+		};
+
+		pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+			fsl,pins = <
+				MX6QDL_PAD_SD3_CMD__SD3_CMD		0x170f9
+				MX6QDL_PAD_SD3_CLK__SD3_CLK		0x100f9
+				MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x170f9
+				MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x170f9
+				MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x170f9
+				MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x170f9
+				MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x170f9 /* CD */
+				MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x170f9
 			>;
 		};
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 1afe338..929e0b3 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -415,10 +415,13 @@
 };
 
 &usdhc3 {
-	pinctrl-names = "default";
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
 	pinctrl-0 = <&pinctrl_usdhc3>;
+	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
 	cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
 	vmmc-supply = <&reg_3p3v>;
+	no-1-8-v; /* firmware will remove if board revision supports */
 	status = "okay";
 };
 
@@ -478,7 +481,6 @@
 				MX6QDL_PAD_NANDF_WP_B__NAND_WP_B	0xb0b1
 				MX6QDL_PAD_NANDF_RB0__NAND_READY_B	0xb000
 				MX6QDL_PAD_NANDF_CS0__NAND_CE0_B	0xb0b1
-				MX6QDL_PAD_NANDF_CS1__NAND_CE1_B	0xb0b1
 				MX6QDL_PAD_SD4_CMD__NAND_RE_B		0xb0b1
 				MX6QDL_PAD_SD4_CLK__NAND_WE_B		0xb0b1
 				MX6QDL_PAD_NANDF_D0__NAND_DATA00	0xb0b1
@@ -568,6 +570,34 @@
 				MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x17059
 				MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x17059
 				MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x17059
+				MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x17059 /* CD */
+				MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x17059
+			>;
+		};
+
+		pinctrl_usdhc3_100mhz: usdhc3grp100mhz {
+			fsl,pins = <
+				MX6QDL_PAD_SD3_CMD__SD3_CMD		0x170b9
+				MX6QDL_PAD_SD3_CLK__SD3_CLK		0x100b9
+				MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x170b9
+				MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x170b9
+				MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x170b9
+				MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x170b9
+				MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x170b9 /* CD */
+				MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x170b9
+			>;
+		};
+
+		pinctrl_usdhc3_200mhz: usdhc3grp200mhz {
+			fsl,pins = <
+				MX6QDL_PAD_SD3_CMD__SD3_CMD		0x170f9
+				MX6QDL_PAD_SD3_CLK__SD3_CLK		0x100f9
+				MX6QDL_PAD_SD3_DAT0__SD3_DATA0		0x170f9
+				MX6QDL_PAD_SD3_DAT1__SD3_DATA1		0x170f9
+				MX6QDL_PAD_SD3_DAT2__SD3_DATA2		0x170f9
+				MX6QDL_PAD_SD3_DAT3__SD3_DATA3		0x170f9
+				MX6QDL_PAD_SD3_DAT5__GPIO7_IO00		0x170f9 /* CD */
+				MX6QDL_PAD_NANDF_CS1__SD3_VSELECT	0x170f9
 			>;
 		};
 	};
diff --git a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
index d1866a0..741f3d5 100644
--- a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
@@ -250,7 +250,6 @@
 				MX6QDL_PAD_NANDF_WP_B__NAND_WP_B	0xb0b1
 				MX6QDL_PAD_NANDF_RB0__NAND_READY_B	0xb000
 				MX6QDL_PAD_NANDF_CS0__NAND_CE0_B	0xb0b1
-				MX6QDL_PAD_NANDF_CS1__NAND_CE1_B	0xb0b1
 				MX6QDL_PAD_SD4_CMD__NAND_RE_B		0xb0b1
 				MX6QDL_PAD_SD4_CLK__NAND_WE_B		0xb0b1
 				MX6QDL_PAD_NANDF_D0__NAND_DATA00	0xb0b1
diff --git a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
index 5c6587f..d1e5048 100644
--- a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
@@ -202,7 +202,6 @@
 				MX6QDL_PAD_NANDF_WP_B__NAND_WP_B	0xb0b1
 				MX6QDL_PAD_NANDF_RB0__NAND_READY_B	0xb000
 				MX6QDL_PAD_NANDF_CS0__NAND_CE0_B	0xb0b1
-				MX6QDL_PAD_NANDF_CS1__NAND_CE1_B	0xb0b1
 				MX6QDL_PAD_SD4_CMD__NAND_RE_B		0xb0b1
 				MX6QDL_PAD_SD4_CLK__NAND_WE_B		0xb0b1
 				MX6QDL_PAD_NANDF_D0__NAND_DATA00	0xb0b1
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
index d7fe667..340bc8e 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
@@ -54,6 +54,17 @@
 			gpio = <&gpio3 22 0>;
 			enable-active-high;
 		};
+
+		reg_can_xcvr: regulator@3 {
+			compatible = "regulator-fixed";
+			reg = <3>;
+			regulator-name = "CAN XCVR";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_can_xcvr>;
+			gpio = <&gpio1 2 GPIO_ACTIVE_LOW>;
+		};
 	};
 
 	gpio-keys {
@@ -149,6 +160,20 @@
 	status = "okay";
 };
 
+&can1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_can1>;
+	xceiver-supply = <&reg_can_xcvr>;
+	status = "okay";
+};
+
+&clks {
+	assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+	assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
+				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+};
+
 &ecspi1 {
 	fsl,spi-num-chipselects = <1>;
 	cs-gpios = <&gpio3 19 0>;
@@ -245,6 +270,20 @@
 			>;
 		};
 
+		pinctrl_can1: can1grp {
+			fsl,pins = <
+				MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX	0x1b0b0
+				MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX	0x1b0b0
+			>;
+		};
+
+		pinctrl_can_xcvr: can-xcvrgrp {
+			fsl,pins = <
+				/* Flexcan XCVR enable */
+				MX6QDL_PAD_GPIO_2__GPIO1_IO02		0x1b0b0
+			>;
+		};
+
 		pinctrl_ecspi1: ecspi1grp {
 			fsl,pins = <
 				MX6QDL_PAD_EIM_D17__ECSPI1_MISO		0x100b1
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index e329ca5..c37bb9ff 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -28,6 +28,71 @@
 		};
 	};
 
+	clocks {
+		codec_osc: anaclk2 {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <24576000>;
+		};
+	};
+
+	regulators {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		reg_audio: regulator@0 {
+			compatible = "regulator-fixed";
+			reg = <0>;
+			regulator-name = "cs42888_supply";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+
+		reg_usb_h1_vbus: regulator@1 {
+			compatible = "regulator-fixed";
+			reg = <1>;
+			regulator-name = "usb_h1_vbus";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			gpio = <&max7310_b 7 GPIO_ACTIVE_HIGH>;
+			enable-active-high;
+		};
+
+		reg_usb_otg_vbus: regulator@2 {
+			compatible = "regulator-fixed";
+			reg = <2>;
+			regulator-name = "usb_otg_vbus";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			gpio = <&max7310_c 1 GPIO_ACTIVE_HIGH>;
+			enable-active-high;
+		};
+	};
+
+	sound-cs42888 {
+		compatible = "fsl,imx6-sabreauto-cs42888",
+			"fsl,imx-audio-cs42888";
+		model = "imx-cs42888";
+		audio-cpu = <&esai>;
+		audio-asrc = <&asrc>;
+		audio-codec = <&codec>;
+		audio-routing =
+			"Line Out Jack", "AOUT1L",
+			"Line Out Jack", "AOUT1R",
+			"Line Out Jack", "AOUT2L",
+			"Line Out Jack", "AOUT2R",
+			"Line Out Jack", "AOUT3L",
+			"Line Out Jack", "AOUT3R",
+			"Line Out Jack", "AOUT4L",
+			"Line Out Jack", "AOUT4R",
+			"AIN1L", "Line In Jack",
+			"AIN1R", "Line In Jack",
+			"AIN2L", "Line In Jack",
+			"AIN2R", "Line In Jack";
+	};
+
 	sound-spdif {
 		compatible = "fsl,imx-audio-spdif",
 			   "fsl,imx-sabreauto-spdif";
@@ -45,6 +110,19 @@
 	};
 };
 
+&clks {
+	assigned-clocks = <&clks IMX6QDL_PLL4_BYPASS_SRC>,
+			  <&clks IMX6QDL_PLL4_BYPASS>,
+			  <&clks IMX6QDL_CLK_PLL4_POST_DIV>,
+			  <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+	assigned-clock-parents = <&clks IMX6QDL_CLK_LVDS2_IN>,
+				 <&clks IMX6QDL_PLL4_BYPASS_SRC>,
+				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
+				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+	assigned-clock-rates = <0>, <0>, <24576000>;
+};
+
 &ecspi1 {
 	fsl,spi-num-chipselects = <1>;
 	cs-gpios = <&gpio3 19 0>;
@@ -61,6 +139,16 @@
 	};
 };
 
+&esai {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_esai>;
+	assigned-clocks = <&clks IMX6QDL_CLK_ESAI_SEL>,
+			  <&clks IMX6QDL_CLK_ESAI_EXTAL>;
+	assigned-clock-parents = <&clks IMX6QDL_CLK_PLL4_AUDIO_DIV>;
+	assigned-clock-rates = <0>, <24576000>;
+	status = "okay";
+};
+
 &fec {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_enet>;
@@ -76,6 +164,10 @@
 	status = "okay";
 };
 
+&hdmi {
+	status = "okay";
+};
+
 &i2c2 {
 	clock-frequency = <100000>;
 	pinctrl-names = "default";
@@ -180,6 +272,18 @@
 			};
 		};
 	};
+
+	codec: cs42888@48 {
+		compatible = "cirrus,cs42888";
+		reg = <0x48>;
+		clocks = <&codec_osc>;
+		clock-names = "mclk";
+		VA-supply = <&reg_audio>;
+		VD-supply = <&reg_audio>;
+		VLS-supply = <&reg_audio>;
+		VLC-supply = <&reg_audio>;
+        };
+
 };
 
 &i2c3 {
@@ -257,6 +361,21 @@
 			>;
 		};
 
+		pinctrl_esai: esaigrp {
+			fsl,pins = <
+				MX6QDL_PAD_ENET_CRS_DV__ESAI_TX_CLK 0x1b030
+				MX6QDL_PAD_ENET_RXD1__ESAI_TX_FS    0x1b030
+				MX6QDL_PAD_ENET_TX_EN__ESAI_TX3_RX2 0x1b030
+				MX6QDL_PAD_GPIO_5__ESAI_TX2_RX3     0x1b030
+				MX6QDL_PAD_ENET_TXD0__ESAI_TX4_RX1  0x1b030
+				MX6QDL_PAD_ENET_MDC__ESAI_TX5_RX0   0x1b030
+				MX6QDL_PAD_GPIO_17__ESAI_TX0        0x1b030
+				MX6QDL_PAD_NANDF_CS3__ESAI_TX1      0x1b030
+				MX6QDL_PAD_ENET_MDIO__ESAI_RX_CLK   0x1b030
+				MX6QDL_PAD_GPIO_9__ESAI_RX_FS       0x1b030
+			>;
+		};
+
 		pinctrl_gpio_leds: gpioledsgrp {
 			fsl,pins = <
 				MX6QDL_PAD_DISP0_DAT21__GPIO5_IO15	0x80000000
@@ -318,6 +437,12 @@
 			>;
 		};
 
+		pinctrl_usbotg: usbotggrp {
+			fsl,pins = <
+				MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
+			>;
+		};
+
 		pinctrl_usdhc3: usdhc3grp {
 			fsl,pins = <
 				MX6QDL_PAD_SD3_CMD__SD3_CMD		0x17059
@@ -462,6 +587,18 @@
 	status = "okay";
 };
 
+&usbh1 {
+	vbus-supply = <&reg_usb_h1_vbus>;
+	status = "okay";
+};
+
+&usbotg {
+	vbus-supply = <&reg_usb_otg_vbus>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usbotg>;
+	status = "okay";
+};
+
 &usdhc3 {
 	pinctrl-names = "default", "state_100mhz", "state_200mhz";
 	pinctrl-0 = <&pinctrl_usdhc3>;
diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
index 7823793..ce4c731 100644
--- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
@@ -53,6 +53,17 @@
 			gpio = <&gpio3 22 0>;
 			enable-active-high;
 		};
+
+		reg_can_xcvr: regulator@3 {
+			compatible = "regulator-fixed";
+			reg = <3>;
+			regulator-name = "CAN XCVR";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_can_xcvr>;
+			gpio = <&gpio1 2 GPIO_ACTIVE_LOW>;
+		};
 	};
 
 	gpio-keys {
@@ -148,6 +159,20 @@
 	status = "okay";
 };
 
+&can1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_can1>;
+	xceiver-supply = <&reg_can_xcvr>;
+	status = "okay";
+};
+
+&clks {
+	assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+	assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
+				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+};
+
 &ecspi1 {
 	fsl,spi-num-chipselects = <1>;
 	cs-gpios = <&gpio3 19 0>;
@@ -239,6 +264,20 @@
 			>;
 		};
 
+		pinctrl_can1: can1grp {
+			fsl,pins = <
+				MX6QDL_PAD_KEY_COL2__FLEXCAN1_TX	0x1b0b0
+				MX6QDL_PAD_KEY_ROW2__FLEXCAN1_RX	0x1b0b0
+			>;
+		};
+
+		pinctrl_can_xcvr: can-xcvrgrp {
+			fsl,pins = <
+				/* Flexcan XCVR enable */
+				MX6QDL_PAD_GPIO_2__GPIO1_IO02		0x1b0b0
+			>;
+		};
+
 		pinctrl_ecspi1: ecspi1grp {
 			fsl,pins = <
 				MX6QDL_PAD_EIM_D17__ECSPI1_MISO		0x100b1
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index 944eb81..2c07d3a 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -141,6 +141,13 @@
 	status = "okay";
 };
 
+&clks {
+	assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
+			  <&clks IMX6QDL_CLK_LDB_DI1_SEL>;
+	assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
+				 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
+};
+
 &ecspi1 {
 	fsl,spi-num-chipselects = <1>;
 	cs-gpios = <&gpio4 9 0>;
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index b57033e..e716e6f 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -300,8 +300,19 @@
 				};
 
 				esai: esai@02024000 {
+					#sound-dai-cells = <0>;
+					compatible = "fsl,imx35-esai";
 					reg = <0x02024000 0x4000>;
 					interrupts = <0 51 IRQ_TYPE_LEVEL_HIGH>;
+					clocks = <&clks IMX6QDL_CLK_ESAI_IPG>,
+						 <&clks IMX6QDL_CLK_ESAI_MEM>,
+						 <&clks IMX6QDL_CLK_ESAI_EXTAL>,
+						 <&clks IMX6QDL_CLK_ESAI_IPG>,
+						 <&clks IMX6QDL_CLK_SPBA>;
+					clock-names = "core", "mem", "extal", "fsys", "dma";
+					dmas = <&sdma 23 21 0>, <&sdma 24 21 0>;
+					dma-names = "rx", "tx";
+					status = "disabled";
 				};
 
 				ssi1: ssi@02028000 {
@@ -353,8 +364,28 @@
 				};
 
 				asrc: asrc@02034000 {
+					compatible = "fsl,imx53-asrc";
 					reg = <0x02034000 0x4000>;
 					interrupts = <0 50 IRQ_TYPE_LEVEL_HIGH>;
+					clocks = <&clks IMX6QDL_CLK_ASRC_IPG>,
+						<&clks IMX6QDL_CLK_ASRC_MEM>, <&clks 0>,
+						<&clks 0>, <&clks 0>, <&clks 0>, <&clks 0>,
+						<&clks 0>, <&clks 0>, <&clks 0>, <&clks 0>,
+						<&clks 0>, <&clks 0>, <&clks 0>, <&clks 0>,
+						<&clks IMX6QDL_CLK_ASRC>, <&clks 0>, <&clks 0>,
+						<&clks IMX6QDL_CLK_SPBA>;
+					clock-names = "mem", "ipg", "asrck_0",
+						"asrck_1", "asrck_2", "asrck_3", "asrck_4",
+						"asrck_5", "asrck_6", "asrck_7", "asrck_8",
+						"asrck_9", "asrck_a", "asrck_b", "asrck_c",
+						"asrck_d", "asrck_e", "asrck_f", "dma";
+					dmas = <&sdma 17 23 1>, <&sdma 18 23 1>, <&sdma 19 23 1>,
+						<&sdma 20 23 1>, <&sdma 21 23 1>, <&sdma 22 23 1>;
+					dma-names = "rxa", "rxb", "rxc",
+							"txa", "txb", "txc";
+					fsl,asrc-rate  = <48000>;
+					fsl,asrc-width = <16>;
+					status = "okay";
 				};
 
 				spba@0203c000 {
@@ -687,22 +718,23 @@
 				fsl,anatop = <&anatop>;
 			};
 
-			snvs@020cc000 {
-				compatible = "fsl,sec-v4.0-mon", "simple-bus";
-				#address-cells = <1>;
-				#size-cells = <1>;
-				ranges = <0 0x020cc000 0x4000>;
+			snvs: snvs@020cc000 {
+				compatible = "fsl,sec-v4.0-mon", "syscon", "simple-mfd";
+				reg = <0x020cc000 0x4000>;
 
-				snvs_rtc: snvs-rtc-lp@34 {
+				snvs_rtc: snvs-rtc-lp {
 					compatible = "fsl,sec-v4.0-mon-rtc-lp";
-					reg = <0x34 0x58>;
+					regmap = <&snvs>;
+					offset = <0x34>;
 					interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>,
 						     <0 20 IRQ_TYPE_LEVEL_HIGH>;
 				};
 
-				snvs_poweroff: snvs-poweroff@38 {
-					compatible = "fsl,sec-v4.0-poweroff";
-					reg = <0x38 0x4>;
+				snvs_poweroff: snvs-poweroff {
+					compatible = "syscon-poweroff";
+					regmap = <&snvs>;
+					offset = <0x38>;
+					mask = <0x60>;
 					status = "disabled";
 				};
 			};
@@ -836,10 +868,31 @@
 			reg = <0x02100000 0x100000>;
 			ranges;
 
-			caam@02100000 {
-				reg = <0x02100000 0x40000>;
-				interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>,
-					     <0 106 IRQ_TYPE_LEVEL_HIGH>;
+			crypto: caam@2100000 {
+				compatible = "fsl,sec-v4.0";
+				fsl,sec-era = <4>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0x2100000 0x10000>;
+				ranges = <0 0x2100000 0x10000>;
+				interrupt-parent = <&intc>;
+				clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
+					 <&clks IMX6QDL_CLK_CAAM_ACLK>,
+					 <&clks IMX6QDL_CLK_CAAM_IPG>,
+					 <&clks IMX6QDL_CLK_EIM_SLOW>;
+				clock-names = "mem", "aclk", "ipg", "emi_slow";
+
+				sec_jr0: jr0@1000 {
+					compatible = "fsl,sec-v4.0-job-ring";
+					reg = <0x1000 0x1000>;
+					interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+				};
+
+				sec_jr1: jr1@2000 {
+					compatible = "fsl,sec-v4.0-job-ring";
+					reg = <0x2000 0x1000>;
+					interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+				};
 			};
 
 			aipstz@0217c000 { /* AIPSTZ2 */
diff --git a/arch/arm/boot/dts/imx6sl-warp.dts b/arch/arm/boot/dts/imx6sl-warp.dts
index 0da906b..10c6996 100644
--- a/arch/arm/boot/dts/imx6sl-warp.dts
+++ b/arch/arm/boot/dts/imx6sl-warp.dts
@@ -61,7 +61,9 @@
 	usdhc3_pwrseq: usdhc3_pwrseq {
 		compatible = "mmc-pwrseq-simple";
 		reset-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>, 	/* WL_REG_ON */
+			      <&gpio4 7 GPIO_ACTIVE_LOW>, 	/* WL_HOSTWAKE */
 			      <&gpio3 25 GPIO_ACTIVE_LOW>, 	/* BT_REG_ON */
+			      <&gpio3 27 GPIO_ACTIVE_LOW>,	/* BT_HOSTWAKE */
 			      <&gpio4 4 GPIO_ACTIVE_LOW>, 	/* BT_WAKE */
 			      <&gpio4 6 GPIO_ACTIVE_LOW>; 	/* BT_RST_N */
 	};
@@ -73,19 +75,19 @@
 	status = "okay";
 };
 
-&uart2 {
-	pinctrl-names = "default";
-	pinctrl-0 = <&pinctrl_uart2>;
-	fsl,uart-has-rtscts;
-	status = "okay";
-};
-
 &uart3 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&pinctrl_uart3>;
 	status = "okay";
 };
 
+&uart5 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart5>;
+	fsl,uart-has-rtscts;
+	status = "okay";
+};
+
 &usbotg1 {
 	dr_mode = "peripheral";
 	disable-over-current;
@@ -130,14 +132,6 @@
 			>;
 		};
 
-		pinctrl_uart2: uart2grp {
-			fsl,pins = <
-				MX6SL_PAD_EPDC_D12__UART2_RX_DATA	0x41b0b1
-				MX6SL_PAD_EPDC_D13__UART2_TX_DATA	0x41b0b1
-				MX6SL_PAD_EPDC_D14__UART2_RTS_B		0x4130B1
-				MX6SL_PAD_EPDC_D15__UART2_CTS_B		0x4130B1
-			>;
-		};
 
 		pinctrl_uart3: uart3grp {
 			fsl,pins = <
@@ -146,6 +140,15 @@
 			>;
 		};
 
+		pinctrl_uart5: uart5grp {
+			fsl,pins = <
+				MX6SL_PAD_ECSPI1_SCLK__UART5_RX_DATA	0x41b0b1
+				MX6SL_PAD_ECSPI1_MOSI__UART5_TX_DATA	0x41b0b1
+				MX6SL_PAD_ECSPI1_MISO__UART5_RTS_B	0x4130b1
+				MX6SL_PAD_ECSPI1_SS0__UART5_CTS_B	0x4130b1
+			>;
+		};
+
 		pinctrl_usdhc2: usdhc2grp {
 			fsl,pins = <
 				MX6SL_PAD_SD2_CMD__SD2_CMD		0x417059
@@ -158,6 +161,7 @@
 				MX6SL_PAD_SD2_DAT5__SD2_DATA5		0x417059
 				MX6SL_PAD_SD2_DAT6__SD2_DATA6		0x417059
 				MX6SL_PAD_SD2_DAT7__SD2_DATA7		0x417059
+				MX6SL_PAD_SD2_RST__SD2_RESET		0x417059
 			>;
 		};
 
@@ -173,6 +177,7 @@
 				MX6SL_PAD_SD2_DAT5__SD2_DATA5		0x4170b9
 				MX6SL_PAD_SD2_DAT6__SD2_DATA6		0x4170b9
 				MX6SL_PAD_SD2_DAT7__SD2_DATA7		0x4170b9
+				MX6SL_PAD_SD2_RST__SD2_RESET		0x4170b9
 			>;
 		};
 
@@ -188,6 +193,7 @@
 				MX6SL_PAD_SD2_DAT5__SD2_DATA5		0x4170f9
 				MX6SL_PAD_SD2_DAT6__SD2_DATA6		0x4170f9
 				MX6SL_PAD_SD2_DAT7__SD2_DATA7		0x4170f9
+				MX6SL_PAD_SD2_RST__SD2_RESET		0x4170f9
 			>;
 		};
 
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index a78e715..320a27f 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -563,22 +563,23 @@
 				fsl,anatop = <&anatop>;
 			};
 
-			snvs@020cc000 {
-				compatible = "fsl,sec-v4.0-mon", "simple-bus";
-				#address-cells = <1>;
-				#size-cells = <1>;
-				ranges = <0 0x020cc000 0x4000>;
+			snvs: snvs@020cc000 {
+				compatible = "fsl,sec-v4.0-mon", "syscon", "simple-mfd";
+				reg = <0x020cc000 0x4000>;
 
-				snvs_rtc: snvs-rtc-lp@34 {
+				snvs_rtc: snvs-rtc-lp {
 					compatible = "fsl,sec-v4.0-mon-rtc-lp";
-					reg = <0x34 0x58>;
+					regmap = <&snvs>;
+					offset = <0x34>;
 					interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>,
 						     <0 20 IRQ_TYPE_LEVEL_HIGH>;
 				};
 
-				snvs_poweroff: snvs-poweroff@38 {
-					compatible = "fsl,sec-v4.0-poweroff";
-					reg = <0x38 0x4>;
+				snvs_poweroff: snvs-poweroff {
+					compatible = "syscon-poweroff";
+					regmap = <&snvs>;
+					offset = <0x38>;
+					mask = <0x60>;
 					status = "disabled";
 				};
 			};
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 708175d..c94f2ea 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -8,6 +8,7 @@
 
 #include <dt-bindings/clock/imx6sx-clock.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include "imx6sx-pinfunc.h"
 #include "skeleton.dtsi"
@@ -662,22 +663,31 @@
 			};
 
 			snvs: snvs@020cc000 {
-				compatible = "fsl,sec-v4.0-mon", "simple-bus";
-				#address-cells = <1>;
-				#size-cells = <1>;
-				ranges = <0 0x020cc000 0x4000>;
+				compatible = "fsl,sec-v4.0-mon", "syscon", "simple-mfd";
+				reg = <0x020cc000 0x4000>;
 
-				snvs_rtc: snvs-rtc-lp@34 {
+				snvs_rtc: snvs-rtc-lp {
 					compatible = "fsl,sec-v4.0-mon-rtc-lp";
-					reg = <0x34 0x58>;
+					regmap = <&snvs>;
+					offset = <0x34>;
 					interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
 				};
 
-				snvs_poweroff: snvs-poweroff@38 {
-					compatible = "fsl,sec-v4.0-poweroff";
-					reg = <0x38 0x4>;
+				snvs_poweroff: snvs-poweroff {
+					compatible = "syscon-poweroff";
+					regmap = <&snvs>;
+					offset = <0x38>;
+					mask = <0x60>;
 					status = "disabled";
 				};
+
+				snvs_pwrkey: snvs-powerkey {
+					compatible = "fsl,sec-v4.0-pwrkey";
+					regmap = <&snvs>;
+					interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+					linux,keycode = <KEY_POWER>;
+					wakeup-source;
+				};
 			};
 
 			epit1: epit@020d0000 {
@@ -738,6 +748,33 @@
 			reg = <0x02100000 0x100000>;
 			ranges;
 
+			crypto: caam@2100000 {
+				compatible = "fsl,sec-v4.0";
+				fsl,sec-era = <4>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0x2100000 0x10000>;
+				ranges = <0 0x2100000 0x10000>;
+				interrupt-parent = <&intc>;
+				clocks = <&clks IMX6SX_CLK_CAAM_MEM>,
+					 <&clks IMX6SX_CLK_CAAM_ACLK>,
+					 <&clks IMX6SX_CLK_CAAM_IPG>,
+					 <&clks IMX6SX_CLK_EIM_SLOW>;
+				clock-names = "mem", "aclk", "ipg", "emi_slow";
+
+				sec_jr0: jr0@1000 {
+					compatible = "fsl,sec-v4.0-job-ring";
+					reg = <0x1000 0x1000>;
+					interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+				};
+
+				sec_jr1: jr1@2000 {
+					compatible = "fsl,sec-v4.0-job-ring";
+					reg = <0x2000 0x1000>;
+					interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+				};
+			};
+
 			usbotg1: usb@02184000 {
 				compatible = "fsl,imx6sx-usb", "fsl,imx27-usb";
 				reg = <0x02184000 0x200>;
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dts b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
new file mode 100644
index 0000000..25746b1
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dts
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/input/input.h>
+#include "imx6ul.dtsi"
+
+/ {
+	model = "Freescale i.MX6 UltraLite 14x14 EVK Board";
+	compatible = "fsl,imx6ul-14x14-evk", "fsl,imx6ul";
+
+	chosen {
+		stdout-path = &uart1;
+	};
+
+	memory {
+		reg = <0x80000000 0x20000000>;
+	};
+
+	regulators {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		reg_sd1_vmmc: sd1_regulator {
+			compatible = "regulator-fixed";
+			regulator-name = "VSD_3V3";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+			enable-active-high;
+		};
+	};
+};
+
+&cpu0 {
+	arm-supply = <&reg_arm>;
+	soc-supply = <&reg_soc>;
+};
+
+&fec1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet1>;
+	phy-mode = "rmii";
+	phy-handle = <&ethphy0>;
+	status = "okay";
+};
+
+&fec2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_enet2>;
+	phy-mode = "rmii";
+	phy-handle = <&ethphy1>;
+	status = "okay";
+
+	mdio {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		ethphy0: ethernet-phy@2 {
+			reg = <2>;
+		};
+
+		ethphy1: ethernet-phy@1 {
+			reg = <1>;
+		};
+	};
+};
+
+&qspi {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_qspi>;
+	status = "okay";
+
+	flash0: n25q256a@0 {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "micron,n25q256a";
+		spi-max-frequency = <29000000>;
+		reg = <0>;
+	};
+};
+
+&uart1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart1>;
+	status = "okay";
+};
+
+&uart2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_uart2>;
+	fsl,uart-has-rtscts;
+	status = "okay";
+};
+
+&usbotg1 {
+	dr_mode = "peripheral";
+	status = "okay";
+};
+
+&usbotg2 {
+	dr_mode = "host";
+	disable-over-current;
+	status = "okay";
+};
+
+&usdhc1 {
+	pinctrl-names = "default", "state_100mhz", "state_200mhz";
+	pinctrl-0 = <&pinctrl_usdhc1>;
+	pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+	pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+	cd-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
+	keep-power-in-suspend;
+	enable-sdio-wakeup;
+	vmmc-supply = <&reg_sd1_vmmc>;
+	status = "okay";
+};
+
+&usdhc2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pinctrl_usdhc2>;
+	no-1-8-v;
+	keep-power-in-suspend;
+	enable-sdio-wakeup;
+	status = "okay";
+};
+
+&iomuxc {
+	pinctrl-names = "default";
+
+	pinctrl_csi1: csi1grp {
+		fsl,pins = <
+			MX6UL_PAD_CSI_MCLK__CSI_MCLK		0x1b088
+			MX6UL_PAD_CSI_PIXCLK__CSI_PIXCLK	0x1b088
+			MX6UL_PAD_CSI_VSYNC__CSI_VSYNC		0x1b088
+			MX6UL_PAD_CSI_HSYNC__CSI_HSYNC		0x1b088
+			MX6UL_PAD_CSI_DATA00__CSI_DATA02	0x1b088
+			MX6UL_PAD_CSI_DATA01__CSI_DATA03	0x1b088
+			MX6UL_PAD_CSI_DATA02__CSI_DATA04	0x1b088
+			MX6UL_PAD_CSI_DATA03__CSI_DATA05	0x1b088
+			MX6UL_PAD_CSI_DATA04__CSI_DATA06	0x1b088
+			MX6UL_PAD_CSI_DATA05__CSI_DATA07	0x1b088
+			MX6UL_PAD_CSI_DATA06__CSI_DATA08	0x1b088
+			MX6UL_PAD_CSI_DATA07__CSI_DATA09	0x1b088
+		>;
+	};
+
+	pinctrl_enet1: enet1grp {
+		fsl,pins = <
+			MX6UL_PAD_ENET1_RX_EN__ENET1_RX_EN	0x1b0b0
+			MX6UL_PAD_ENET1_RX_ER__ENET1_RX_ER	0x1b0b0
+			MX6UL_PAD_ENET1_RX_DATA0__ENET1_RDATA00	0x1b0b0
+			MX6UL_PAD_ENET1_RX_DATA1__ENET1_RDATA01	0x1b0b0
+			MX6UL_PAD_ENET1_TX_EN__ENET1_TX_EN	0x1b0b0
+			MX6UL_PAD_ENET1_TX_DATA0__ENET1_TDATA00	0x1b0b0
+			MX6UL_PAD_ENET1_TX_DATA1__ENET1_TDATA01	0x1b0b0
+			MX6UL_PAD_ENET1_TX_CLK__ENET1_REF_CLK1	0x4001b031
+		>;
+	};
+
+	pinctrl_enet2: enet2grp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO07__ENET2_MDC		0x1b0b0
+			MX6UL_PAD_GPIO1_IO06__ENET2_MDIO	0x1b0b0
+			MX6UL_PAD_ENET2_RX_EN__ENET2_RX_EN	0x1b0b0
+			MX6UL_PAD_ENET2_RX_ER__ENET2_RX_ER	0x1b0b0
+			MX6UL_PAD_ENET2_RX_DATA0__ENET2_RDATA00	0x1b0b0
+			MX6UL_PAD_ENET2_RX_DATA1__ENET2_RDATA01	0x1b0b0
+			MX6UL_PAD_ENET2_TX_EN__ENET2_TX_EN	0x1b0b0
+			MX6UL_PAD_ENET2_TX_DATA0__ENET2_TDATA00	0x1b0b0
+			MX6UL_PAD_ENET2_TX_DATA1__ENET2_TDATA01	0x1b0b0
+			MX6UL_PAD_ENET2_TX_CLK__ENET2_REF_CLK2	0x4001b031
+			MX6UL_PAD_SNVS_TAMPER0__GPIO5_IO00	0x17059
+		>;
+	};
+
+	pinctrl_flexcan1: flexcan1grp{
+		fsl,pins = <
+			MX6UL_PAD_UART3_RTS_B__FLEXCAN1_RX	0x1b020
+			MX6UL_PAD_UART3_CTS_B__FLEXCAN1_TX	0x1b020
+		>;
+	};
+
+	pinctrl_flexcan2: flexcan2grp{
+		fsl,pins = <
+			MX6UL_PAD_UART2_RTS_B__FLEXCAN2_RX	0x1b020
+			MX6UL_PAD_UART2_CTS_B__FLEXCAN2_TX	0x1b020
+		>;
+	};
+
+	pinctrl_i2c1: i2c1grp {
+		fsl,pins = <
+			MX6UL_PAD_UART4_TX_DATA__I2C1_SCL 0x4001b8b0
+			MX6UL_PAD_UART4_RX_DATA__I2C1_SDA 0x4001b8b0
+		>;
+	};
+
+	pinctrl_i2c2: i2c2grp {
+		fsl,pins = <
+			MX6UL_PAD_UART5_TX_DATA__I2C2_SCL 0x4001b8b0
+			MX6UL_PAD_UART5_RX_DATA__I2C2_SDA 0x4001b8b0
+		>;
+	};
+
+	pinctrl_lcdif_dat: lcdifdatgrp {
+		fsl,pins = <
+			MX6UL_PAD_LCD_DATA00__LCDIF_DATA00  0x79
+			MX6UL_PAD_LCD_DATA01__LCDIF_DATA01  0x79
+			MX6UL_PAD_LCD_DATA02__LCDIF_DATA02  0x79
+			MX6UL_PAD_LCD_DATA03__LCDIF_DATA03  0x79
+			MX6UL_PAD_LCD_DATA04__LCDIF_DATA04  0x79
+			MX6UL_PAD_LCD_DATA05__LCDIF_DATA05  0x79
+			MX6UL_PAD_LCD_DATA06__LCDIF_DATA06  0x79
+			MX6UL_PAD_LCD_DATA07__LCDIF_DATA07  0x79
+			MX6UL_PAD_LCD_DATA08__LCDIF_DATA08  0x79
+			MX6UL_PAD_LCD_DATA09__LCDIF_DATA09  0x79
+			MX6UL_PAD_LCD_DATA10__LCDIF_DATA10  0x79
+			MX6UL_PAD_LCD_DATA11__LCDIF_DATA11  0x79
+			MX6UL_PAD_LCD_DATA12__LCDIF_DATA12  0x79
+			MX6UL_PAD_LCD_DATA13__LCDIF_DATA13  0x79
+			MX6UL_PAD_LCD_DATA14__LCDIF_DATA14  0x79
+			MX6UL_PAD_LCD_DATA15__LCDIF_DATA15  0x79
+			MX6UL_PAD_LCD_DATA16__LCDIF_DATA16  0x79
+			MX6UL_PAD_LCD_DATA17__LCDIF_DATA17  0x79
+			MX6UL_PAD_LCD_DATA18__LCDIF_DATA18  0x79
+			MX6UL_PAD_LCD_DATA19__LCDIF_DATA19  0x79
+			MX6UL_PAD_LCD_DATA20__LCDIF_DATA20  0x79
+			MX6UL_PAD_LCD_DATA21__LCDIF_DATA21  0x79
+			MX6UL_PAD_LCD_DATA22__LCDIF_DATA22  0x79
+			MX6UL_PAD_LCD_DATA23__LCDIF_DATA23  0x79
+		>;
+	};
+
+	pinctrl_lcdif_ctrl: lcdifctrlgrp {
+		fsl,pins = <
+			MX6UL_PAD_LCD_CLK__LCDIF_CLK	    0x79
+			MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE  0x79
+			MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC    0x79
+			MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC    0x79
+			/* used for lcd reset */
+			MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09  0x79
+		>;
+	};
+
+	pinctrl_qspi: qspigrp {
+		fsl,pins = <
+			MX6UL_PAD_NAND_WP_B__QSPI_A_SCLK	0x70a1
+			MX6UL_PAD_NAND_READY_B__QSPI_A_DATA00	0x70a1
+			MX6UL_PAD_NAND_CE0_B__QSPI_A_DATA01	0x70a1
+			MX6UL_PAD_NAND_CE1_B__QSPI_A_DATA02	0x70a1
+			MX6UL_PAD_NAND_CLE__QSPI_A_DATA03	0x70a1
+			MX6UL_PAD_NAND_DQS__QSPI_A_SS0_B	0x70a1
+		>;
+	};
+
+	pinctrl_pwm1: pwm1grp {
+		fsl,pins = <
+			MX6UL_PAD_GPIO1_IO08__PWM1_OUT   0x110b0
+		>;
+	};
+
+	pinctrl_sim2: sim2grp {
+		fsl,pins = <
+			MX6UL_PAD_CSI_DATA03__SIM2_PORT1_PD		0xb808
+			MX6UL_PAD_CSI_DATA04__SIM2_PORT1_CLK		0x31
+			MX6UL_PAD_CSI_DATA05__SIM2_PORT1_RST_B		0xb808
+			MX6UL_PAD_CSI_DATA06__SIM2_PORT1_SVEN		0xb808
+			MX6UL_PAD_CSI_DATA07__SIM2_PORT1_TRXD		0xb809
+			MX6UL_PAD_CSI_DATA02__GPIO4_IO23		0x3008
+		>;
+	};
+
+	pinctrl_uart1: uart1grp {
+		fsl,pins = <
+			MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX 0x1b0b1
+			MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX 0x1b0b1
+		>;
+	};
+
+	pinctrl_uart2: uart2grp {
+		fsl,pins = <
+			MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX	0x1b0b1
+			MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX	0x1b0b1
+			MX6UL_PAD_UART3_RX_DATA__UART2_DCE_RTS	0x1b0b1
+			MX6UL_PAD_UART3_TX_DATA__UART2_DCE_CTS	0x1b0b1
+		>;
+	};
+
+	pinctrl_usdhc1: usdhc1grp {
+		fsl,pins = <
+			MX6UL_PAD_SD1_CMD__USDHC1_CMD     	0x17059
+			MX6UL_PAD_SD1_CLK__USDHC1_CLK     	0x10059
+			MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 	0x17059
+			MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 	0x17059
+			MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 	0x17059
+			MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 	0x17059
+			MX6UL_PAD_UART1_RTS_B__GPIO1_IO19       0x17059 /* SD1 CD */
+			MX6UL_PAD_GPIO1_IO05__USDHC1_VSELECT    0x17059 /* SD1 VSELECT */
+			MX6UL_PAD_GPIO1_IO09__GPIO1_IO09        0x17059 /* SD1 RESET */
+		>;
+	};
+
+	pinctrl_usdhc1_100mhz: usdhc1grp100mhz {
+		fsl,pins = <
+			MX6UL_PAD_SD1_CMD__USDHC1_CMD     0x170b9
+			MX6UL_PAD_SD1_CLK__USDHC1_CLK     0x100b9
+			MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170b9
+			MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170b9
+			MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170b9
+			MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170b9
+
+		>;
+	};
+
+	pinctrl_usdhc1_200mhz: usdhc1grp200mhz {
+		fsl,pins = <
+			MX6UL_PAD_SD1_CMD__USDHC1_CMD     0x170f9
+			MX6UL_PAD_SD1_CLK__USDHC1_CLK     0x100f9
+			MX6UL_PAD_SD1_DATA0__USDHC1_DATA0 0x170f9
+			MX6UL_PAD_SD1_DATA1__USDHC1_DATA1 0x170f9
+			MX6UL_PAD_SD1_DATA2__USDHC1_DATA2 0x170f9
+			MX6UL_PAD_SD1_DATA3__USDHC1_DATA3 0x170f9
+		>;
+	};
+
+	pinctrl_usdhc2: usdhc2grp {
+		fsl,pins = <
+			MX6UL_PAD_NAND_RE_B__USDHC2_CLK     0x17059
+			MX6UL_PAD_NAND_WE_B__USDHC2_CMD     0x17059
+			MX6UL_PAD_NAND_DATA00__USDHC2_DATA0 0x17059
+			MX6UL_PAD_NAND_DATA01__USDHC2_DATA1 0x17059
+			MX6UL_PAD_NAND_DATA02__USDHC2_DATA2 0x17059
+			MX6UL_PAD_NAND_DATA03__USDHC2_DATA3 0x17059
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/imx6ul-pinfunc.h b/arch/arm/boot/dts/imx6ul-pinfunc.h
new file mode 100644
index 0000000..20c7da1
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul-pinfunc.h
@@ -0,0 +1,938 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DTS_IMX6UL_PINFUNC_H
+#define __DTS_IMX6UL_PINFUNC_H
+
+/*
+ * The pin function ID is a tuple of
+ * <mux_reg conf_reg input_reg mux_mode input_val>
+ */
+#define MX6UL_PAD_BOOT_MODE0__GPIO5_IO10				0x0014 0x02a0 0x0000 5 0
+#define MX6UL_PAD_BOOT_MODE1__GPIO5_IO11				0x0018 0x02a4 0x0000 5 0
+
+#define MX6UL_PAD_SNVS_TAMPER0__GPIO5_IO00				0x001c 0x02a8 0x0000 5 0
+#define MX6UL_PAD_SNVS_TAMPER1__GPIO5_IO01				0x0020 0x02ac 0x0000 5 0
+#define MX6UL_PAD_SNVS_TAMPER2__GPIO5_IO02				0x0024 0x02b0 0x0000 5 0
+#define MX6UL_PAD_SNVS_TAMPER3__GPIO5_IO03				0x0028 0x02b4 0x0000 5 0
+#define MX6UL_PAD_SNVS_TAMPER4__GPIO5_IO04				0x002c 0x02b8 0x0000 5 0
+#define MX6UL_PAD_SNVS_TAMPER5__GPIO5_IO05				0x0030 0x02bc 0x0000 5 0
+#define MX6UL_PAD_SNVS_TAMPER6__GPIO5_IO06				0x0034 0x02c0 0x0000 5 0
+#define MX6UL_PAD_SNVS_TAMPER7__GPIO5_IO07				0x0038 0x02c4 0x0000 5 0
+#define MX6UL_PAD_SNVS_TAMPER8__GPIO5_IO08				0x003c 0x02c8 0x0000 5 0
+#define MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09				0x0040 0x02cc 0x0000 5 0
+
+#define MX6UL_PAD_JTAG_MOD__SJC_MOD                              	0x0044 0x02d0 0x0000 0 0
+#define MX6UL_PAD_JTAG_MOD__GPT2_CLK                             	0x0044 0x02d0 0x05a0 1 0
+#define MX6UL_PAD_JTAG_MOD__SPDIF_OUT                            	0x0044 0x02d0 0x0000 2 0
+#define MX6UL_PAD_JTAG_MOD__ENET1_REF_CLK_25M                    	0x0044 0x02d0 0x0000 3 0
+#define MX6UL_PAD_JTAG_MOD__CCM_PMIC_RDY                         	0x0044 0x02d0 0x04c0 4 0
+#define MX6UL_PAD_JTAG_MOD__GPIO1_IO10                           	0x0044 0x02d0 0x0000 5 0
+#define MX6UL_PAD_JTAG_MOD__SDMA_EXT_EVENT00                     	0x0044 0x02d0 0x0000 6 0
+#define MX6UL_PAD_JTAG_TMS__SJC_TMS                              	0x0048 0x02d4 0x0000 0 0
+#define MX6UL_PAD_JTAG_TMS__GPT2_CAPTURE1                        	0x0048 0x02d4 0x0598 1 0
+#define MX6UL_PAD_JTAG_TMS__SAI2_MCLK                            	0x0048 0x02d4 0x0000 2 0
+#define MX6UL_PAD_JTAG_TMS__CCM_CLKO1                            	0x0048 0x02d4 0x0000 3 0
+#define MX6UL_PAD_JTAG_TMS__CCM_WAIT                             	0x0048 0x02d4 0x0000 4 0
+#define MX6UL_PAD_JTAG_TMS__GPIO1_IO11                           	0x0048 0x02d4 0x0000 5 0
+#define MX6UL_PAD_JTAG_TMS__SDMA_EXT_EVENT01                     	0x0048 0x02d4 0x0000 6 0
+#define MX6UL_PAD_JTAG_TMS__EPIT1_OUT                            	0x0048 0x02d4 0x0000 8 0
+#define MX6UL_PAD_JTAG_TDO__SJC_TDO                              	0x004c 0x02d8 0x0000 0 0
+#define MX6UL_PAD_JTAG_TDO__GPT2_CAPTURE2                        	0x004c 0x02d8 0x059c 1 0
+#define MX6UL_PAD_JTAG_TDO__SAI2_TX_SYNC                         	0x004c 0x02d8 0x05fc 2 0
+#define MX6UL_PAD_JTAG_TDO__CCM_CLKO2                            	0x004c 0x02d8 0x0000 3 0
+#define MX6UL_PAD_JTAG_TDO__CCM_STOP                             	0x004c 0x02d8 0x0000 4 0
+#define MX6UL_PAD_JTAG_TDO__GPIO1_IO12                           	0x004c 0x02d8 0x0000 5 0
+#define MX6UL_PAD_JTAG_TDO__MQS_RIGHT                            	0x004c 0x02d8 0x0000 6 0
+#define MX6UL_PAD_JTAG_TDO__EPIT2_OUT                            	0x004c 0x02d8 0x0000 8 0
+#define MX6UL_PAD_JTAG_TDI__SJC_TDI                              	0x0050 0x02dc 0x0000 0 0
+#define MX6UL_PAD_JTAG_TDI__GPT2_COMPARE1                        	0x0050 0x02dc 0x0000 1 0
+#define MX6UL_PAD_JTAG_TDI__SAI2_TX_BCLK                         	0x0050 0x02dc 0x05f8 2 0
+#define MX6UL_PAD_JTAG_TDI__PWM6_OUT                             	0x0050 0x02dc 0x0000 4 0
+#define MX6UL_PAD_JTAG_TDI__GPIO1_IO13                           	0x0050 0x02dc 0x0000 5 0
+#define MX6UL_PAD_JTAG_TDI__MQS_LEFT                             	0x0050 0x02dc 0x0000 6 0
+#define MX6UL_PAD_JTAG_TDI__SIM1_POWER_FAIL                      	0x0050 0x02dc 0x0000 8 0
+#define MX6UL_PAD_JTAG_TCK__SJC_TCK                              	0x0054 0x02e0 0x0000 0 0
+#define MX6UL_PAD_JTAG_TCK__GPT2_COMPARE2                        	0x0054 0x02e0 0x0000 1 0
+#define MX6UL_PAD_JTAG_TCK__SAI2_RX_DATA                         	0x0054 0x02e0 0x0000 2 0
+#define MX6UL_PAD_JTAG_TCK__PWM7_OUT                             	0x0054 0x02e0 0x0000 4 0
+#define MX6UL_PAD_JTAG_TCK__GPIO1_IO14                           	0x0054 0x02e0 0x0000 5 0
+#define MX6UL_PAD_JTAG_TCK__SIM2_POWER_FAIL                      	0x0054 0x02e0 0x0000 8 0
+#define MX6UL_PAD_JTAG_TRST_B__SJC_TRSTB                         	0x0058 0x02e4 0x0000 0 0
+#define MX6UL_PAD_JTAG_TRST_B__GPT2_COMPARE3                     	0x0058 0x02e4 0x0000 1 0
+#define MX6UL_PAD_JTAG_TRST_B__SAI2_TX_DATA                      	0x0058 0x02e4 0x0000 2 0
+#define MX6UL_PAD_JTAG_TRST_B__PWM8_OUT                          	0x0058 0x02e4 0x0000 4 0
+#define MX6UL_PAD_JTAG_TRST_B__GPIO1_IO15                        	0x0058 0x02e4 0x0000 5 0
+#define MX6UL_PAD_JTAG_TRST_B__CAAM_RNG_OSC_OBS                  	0x0058 0x02e4 0x0000 8 0
+#define MX6UL_PAD_GPIO1_IO00__I2C2_SCL                           	0x005c 0x02e8 0x05ac 0 1
+#define MX6UL_PAD_GPIO1_IO00__GPT1_CAPTURE1                      	0x005c 0x02e8 0x058c 1 0
+#define MX6UL_PAD_GPIO1_IO00__ANATOP_OTG1_ID                     	0x005c 0x02e8 0x04b8 2 0
+#define MX6UL_PAD_GPIO1_IO00__ENET1_REF_CLK1                     	0x005c 0x02e8 0x0574 3 0
+#define MX6UL_PAD_GPIO1_IO00__MQS_RIGHT                          	0x005c 0x02e8 0x0000 4 0
+#define MX6UL_PAD_GPIO1_IO00__GPIO1_IO00                         	0x005c 0x02e8 0x0000 5 0
+#define MX6UL_PAD_GPIO1_IO00__ENET1_1588_EVENT0_IN               	0x005c 0x02e8 0x0000 6 0
+#define MX6UL_PAD_GPIO1_IO00__SRC_SYSTEM_RESET                   	0x005c 0x02e8 0x0000 7 0
+#define MX6UL_PAD_GPIO1_IO00__WDOG3_WDOG_B                       	0x005c 0x02e8 0x0000 8 0
+#define MX6UL_PAD_GPIO1_IO01__I2C2_SDA                           	0x0060 0x02ec 0x05b0 0 1
+#define MX6UL_PAD_GPIO1_IO01__GPT1_COMPARE1                      	0x0060 0x02ec 0x0000 1 0
+#define MX6UL_PAD_GPIO1_IO01__USB_OTG1_OC                        	0x0060 0x02ec 0x0664 2 0
+#define MX6UL_PAD_GPIO1_IO01__ENET2_REF_CLK2                     	0x0060 0x02ec 0x057c 3 0
+#define MX6UL_PAD_GPIO1_IO01__MQS_LEFT                           	0x0060 0x02ec 0x0000 4 0
+#define MX6UL_PAD_GPIO1_IO01__GPIO1_IO01                         	0x0060 0x02ec 0x0000 5 0
+#define MX6UL_PAD_GPIO1_IO01__ENET1_1588_EVENT0_OUT              	0x0060 0x02ec 0x0000 6 0
+#define MX6UL_PAD_GPIO1_IO01__SRC_EARLY_RESET                    	0x0060 0x02ec 0x0000 7 0
+#define MX6UL_PAD_GPIO1_IO01__WDOG1_WDOG_B                       	0x0060 0x02ec 0x0000 8 0
+#define MX6UL_PAD_GPIO1_IO02__I2C1_SCL                           	0x0064 0x02f0 0x05a4 0 0
+#define MX6UL_PAD_GPIO1_IO02__GPT1_COMPARE2                      	0x0064 0x02f0 0x0000 1 0
+#define MX6UL_PAD_GPIO1_IO02__USB_OTG2_PWR                       	0x0064 0x02f0 0x0000 2 0
+#define MX6UL_PAD_GPIO1_IO02__ENET1_REF_CLK_25M                  	0x0064 0x02f0 0x0000 3 0
+#define MX6UL_PAD_GPIO1_IO02__USDHC1_WP                          	0x0064 0x02f0 0x066c 4 0
+#define MX6UL_PAD_GPIO1_IO02__GPIO1_IO02                         	0x0064 0x02f0 0x0000 5 0
+#define MX6UL_PAD_GPIO1_IO02__SDMA_EXT_EVENT00                   	0x0064 0x02f0 0x0000 6 0
+#define MX6UL_PAD_GPIO1_IO02__SRC_ANY_PU_RESET                   	0x0064 0x02f0 0x0000 7 0
+#define MX6UL_PAD_GPIO1_IO02__UART1_DCE_TX                           	0x0064 0x02f0 0x0000 8 0
+#define MX6UL_PAD_GPIO1_IO02__UART1_DTE_RX                           	0x0064 0x02f0 0x0624 8 0
+#define MX6UL_PAD_GPIO1_IO03__I2C1_SDA                           	0x0068 0x02f4 0x05a8 0 1
+#define MX6UL_PAD_GPIO1_IO03__GPT1_COMPARE3                      	0x0068 0x02f4 0x0000 1 0
+#define MX6UL_PAD_GPIO1_IO03__USB_OTG2_OC                        	0x0068 0x02f4 0x0660 2 0
+#define MX6UL_PAD_GPIO1_IO03__USDHC1_CD_B                        	0x0068 0x02f4 0x0668 4 0
+#define MX6UL_PAD_GPIO1_IO03__GPIO1_IO03                         	0x0068 0x02f4 0x0000 5 0
+#define MX6UL_PAD_GPIO1_IO03__CCM_DI0_eXT_CLK                    	0x0068 0x02f4 0x0000 6 0
+#define MX6UL_PAD_GPIO1_IO03__SRC_TESTER_ACK                     	0x0068 0x02f4 0x0000 7 0
+#define MX6UL_PAD_GPIO1_IO03__UART1_DTE_TX                           	0x0068 0x02f4 0x0000 8 0
+#define MX6UL_PAD_GPIO1_IO03__UART1_DCE_RX                           	0x0068 0x02f4 0x0624 8 1
+#define MX6UL_PAD_GPIO1_IO04__ENET1_REF_CLK1                     	0x006c 0x02f8 0x0574 0 1
+#define MX6UL_PAD_GPIO1_IO04__PWM3_OUT                           	0x006c 0x02f8 0x0000 1 0
+#define MX6UL_PAD_GPIO1_IO04__USB_OTG1_PWR                       	0x006c 0x02f8 0x0000 2 0
+#define MX6UL_PAD_GPIO1_IO04__USDHC1_RESET_B                     	0x006c 0x02f8 0x0000 4 0
+#define MX6UL_PAD_GPIO1_IO04__GPIO1_IO04                         	0x006c 0x02f8 0x0000 5 0
+#define MX6UL_PAD_GPIO1_IO04__ENET2_1588_EVENT0_IN               	0x006c 0x02f8 0x0000 6 0
+#define MX6UL_PAD_GPIO1_IO04__UART5_DCE_TX                           	0x006c 0x02f8 0x0000 8 0
+#define MX6UL_PAD_GPIO1_IO04__UART5_DTE_RX                           	0x006c 0x02f8 0x0644 8 2
+#define MX6UL_PAD_GPIO1_IO05__ENET2_REF_CLK2                     	0x0070 0x02fc 0x057c 0 1
+#define MX6UL_PAD_GPIO1_IO05__PWM4_OUT                           	0x0070 0x02fc 0x0000 1 0
+#define MX6UL_PAD_GPIO1_IO05__ANATOP_OTG2_ID                     	0x0070 0x02fc 0x04bc 2 0
+#define MX6UL_PAD_GPIO1_IO05__CSI_FIELD                          	0x0070 0x02fc 0x0530 3 0
+#define MX6UL_PAD_GPIO1_IO05__USDHC1_VSELECT                     	0x0070 0x02fc 0x0000 4 0
+#define MX6UL_PAD_GPIO1_IO05__GPIO1_IO05                         	0x0070 0x02fc 0x0000 5 0
+#define MX6UL_PAD_GPIO1_IO05__ENET2_1588_EVENT0_OUT              	0x0070 0x02fc 0x0000 6 0
+#define MX6UL_PAD_GPIO1_IO05__UART5_DCE_RX                           	0x0070 0x02fc 0x0644 8 3
+#define MX6UL_PAD_GPIO1_IO05__UART5_DTE_TX                           	0x0070 0x02fc 0x0000 8 0
+#define MX6UL_PAD_GPIO1_IO06__ENET1_MDIO                         	0x0074 0x0300 0x0578 0 0
+#define MX6UL_PAD_GPIO1_IO06__ENET2_MDIO                         	0x0074 0x0300 0x0580 1 0
+#define MX6UL_PAD_GPIO1_IO06__USB_OTG_PWR_WAKE                   	0x0074 0x0300 0x0000 2 0
+#define MX6UL_PAD_GPIO1_IO06__CSI_MCLK                           	0x0074 0x0300 0x0000 3 0
+#define MX6UL_PAD_GPIO1_IO06__USDHC2_WP                          	0x0074 0x0300 0x069c 4 0
+#define MX6UL_PAD_GPIO1_IO06__GPIO1_IO06                         	0x0074 0x0300 0x0000 5 0
+#define MX6UL_PAD_GPIO1_IO06__CCM_WAIT                           	0x0074 0x0300 0x0000 6 0
+#define MX6UL_PAD_GPIO1_IO06__CCM_REF_EN_B                       	0x0074 0x0300 0x0000 7 0
+#define MX6UL_PAD_GPIO1_IO06__UART1_DCE_CTS                      	0x0074 0x0300 0x0000 8 0
+#define MX6UL_PAD_GPIO1_IO06__UART1_DTE_RTS                      	0x0074 0x0300 0x0620 8 0
+#define MX6UL_PAD_GPIO1_IO07__ENET1_MDC                          	0x0078 0x0304 0x0000 0 0
+#define MX6UL_PAD_GPIO1_IO07__ENET2_MDC                          	0x0078 0x0304 0x0000 1 0
+#define MX6UL_PAD_GPIO1_IO07__USB_OTG_HOST_MODE                  	0x0078 0x0304 0x0000 2 0
+#define MX6UL_PAD_GPIO1_IO07__CSI_PIXCLK                         	0x0078 0x0304 0x0528 3 0
+#define MX6UL_PAD_GPIO1_IO07__USDHC2_CD_B                        	0x0078 0x0304 0x0674 4 1
+#define MX6UL_PAD_GPIO1_IO07__GPIO1_IO07                         	0x0078 0x0304 0x0000 5 0
+#define MX6UL_PAD_GPIO1_IO07__CCM_STOP                           	0x0078 0x0304 0x0000 6 0
+#define MX6UL_PAD_GPIO1_IO07__UART1_DCE_RTS                      	0x0078 0x0304 0x0620 8 1
+#define MX6UL_PAD_GPIO1_IO07__UART1_DTE_CTS                      	0x0078 0x0304 0x0000 8 0
+#define MX6UL_PAD_GPIO1_IO08__PWM1_OUT                           	0x007c 0x0308 0x0000 0 0
+#define MX6UL_PAD_GPIO1_IO08__WDOG1_WDOG_B                       	0x007c 0x0308 0x0000 1 0
+#define MX6UL_PAD_GPIO1_IO08__SPDIF_OUT                          	0x007c 0x0308 0x0000 2 0
+#define MX6UL_PAD_GPIO1_IO08__CSI_VSYNC                          	0x007c 0x0308 0x052c 3 1
+#define MX6UL_PAD_GPIO1_IO08__USDHC2_VSELECT                     	0x007c 0x0308 0x0000 4 0
+#define MX6UL_PAD_GPIO1_IO08__GPIO1_IO08                         	0x007c 0x0308 0x0000 5 0
+#define MX6UL_PAD_GPIO1_IO08__CCM_PMIC_RDY                       	0x007c 0x0308 0x04c0 6 1
+#define MX6UL_PAD_GPIO1_IO08__UART5_DCE_RTS                      	0x007c 0x0308 0x0640 8 1
+#define MX6UL_PAD_GPIO1_IO08__UART5_DTE_CTS                      	0x007c 0x0308 0x0000 8 0
+#define MX6UL_PAD_GPIO1_IO09__PWM2_OUT                           	0x0080 0x030c 0x0000 0 0
+#define MX6UL_PAD_GPIO1_IO09__WDOG1_WDOG_ANY                     	0x0080 0x030c 0x0000 1 0
+#define MX6UL_PAD_GPIO1_IO09__SPDIF_IN                           	0x0080 0x030c 0x0618 2 0
+#define MX6UL_PAD_GPIO1_IO09__CSI_HSYNC                          	0x0080 0x030c 0x0524 3 1
+#define MX6UL_PAD_GPIO1_IO09__USDHC2_RESET_B                     	0x0080 0x030c 0x0000 4 0
+#define MX6UL_PAD_GPIO1_IO09__GPIO1_IO09                         	0x0080 0x030c 0x0000 5 0
+#define MX6UL_PAD_GPIO1_IO09__USDHC1_RESET_B                     	0x0080 0x030c 0x0000 6 0
+#define MX6UL_PAD_GPIO1_IO09__UART5_DCE_CTS                      	0x0080 0x030c 0x0000 8 0
+#define MX6UL_PAD_GPIO1_IO09__UART5_DTE_RTS                      	0x0080 0x030c 0x0640 8 2
+#define MX6UL_PAD_UART1_TX_DATA__UART1_DCE_TX                        	0x0084 0x0310 0x0000 0 0
+#define MX6UL_PAD_UART1_TX_DATA__UART1_DTE_RX                        	0x0084 0x0310 0x0624 0 2
+#define MX6UL_PAD_UART1_TX_DATA__ENET1_RDATA02                   	0x0084 0x0310 0x0000 1 0
+#define MX6UL_PAD_UART1_TX_DATA__I2C3_SCL                        	0x0084 0x0310 0x05b4 2 0
+#define MX6UL_PAD_UART1_TX_DATA__CSI_DATA02                      	0x0084 0x0310 0x0000 3 0
+#define MX6UL_PAD_UART1_TX_DATA__GPT1_COMPARE1                   	0x0084 0x0310 0x0000 4 0
+#define MX6UL_PAD_UART1_TX_DATA__GPIO1_IO16                      	0x0084 0x0310 0x0000 5 0
+#define MX6UL_PAD_UART1_TX_DATA__SPDIF_OUT                       	0x0084 0x0310 0x0000 8 0
+#define MX6UL_PAD_UART1_RX_DATA__UART1_DCE_RX                        	0x0088 0x0314 0x0624 0 3
+#define MX6UL_PAD_UART1_RX_DATA__UART1_DTE_TX                        	0x0088 0x0314 0x0000 0 0
+#define MX6UL_PAD_UART1_RX_DATA__ENET1_RDATA03                   	0x0088 0x0314 0x0000 1 0
+#define MX6UL_PAD_UART1_RX_DATA__I2C3_SDA                        	0x0088 0x0314 0x05b8 2 0
+#define MX6UL_PAD_UART1_RX_DATA__CSI_DATA03                      	0x0088 0x0314 0x0000 3 0
+#define MX6UL_PAD_UART1_RX_DATA__GPT1_CLK                        	0x0088 0x0314 0x0594 4 0
+#define MX6UL_PAD_UART1_RX_DATA__GPIO1_IO17                      	0x0088 0x0314 0x0000 5 0
+#define MX6UL_PAD_UART1_RX_DATA__SPDIF_IN                        	0x0088 0x0314 0x0000 8 0
+#define MX6UL_PAD_UART1_CTS_B__UART1_DCE_CTS                     	0x008c 0x0318 0x0000 0 0
+#define MX6UL_PAD_UART1_CTS_B__UART1_DTE_RTS                     	0x008c 0x0318 0x0620 0 2
+#define MX6UL_PAD_UART1_CTS_B__ENET1_RX_CLK                      	0x008c 0x0318 0x0000 1 0
+#define MX6UL_PAD_UART1_CTS_B__USDHC1_WP                         	0x008c 0x0318 0x066c 2 1
+#define MX6UL_PAD_UART1_CTS_B__CSI_DATA04                        	0x008c 0x0318 0x0000 3 0
+#define MX6UL_PAD_UART1_CTS_B__ENET2_1588_EVENT1_IN              	0x008c 0x0318 0x0000 4 0
+#define MX6UL_PAD_UART1_CTS_B__GPIO1_IO18                        	0x008c 0x0318 0x0000 5 0
+#define MX6UL_PAD_UART1_CTS_B__USDHC2_WP                         	0x008c 0x0318 0x0000 8 0
+#define MX6UL_PAD_UART1_RTS_B__UART1_DCE_RTS                     	0x0090 0x031c 0x0620 0 3
+#define MX6UL_PAD_UART1_RTS_B__UART1_DTE_CTS                     	0x0090 0x031c 0x0000 0 0
+#define MX6UL_PAD_UART1_RTS_B__ENET1_TX_ER                       	0x0090 0x031c 0x0000 1 0
+#define MX6UL_PAD_UART1_RTS_B__USDHC1_CD_B                       	0x0090 0x031c 0x0668 2 1
+#define MX6UL_PAD_UART1_RTS_B__CSI_DATA05                        	0x0090 0x031c 0x0000 3 0
+#define MX6UL_PAD_UART1_RTS_B__ENET2_1588_EVENT1_OUT             	0x0090 0x031c 0x0000 4 0
+#define MX6UL_PAD_UART1_RTS_B__GPIO1_IO19                        	0x0090 0x031c 0x0000 5 0
+#define MX6UL_PAD_UART1_RTS_B__USDHC2_CD_B                       	0x0090 0x031c 0x0000 8 0
+#define MX6UL_PAD_UART2_TX_DATA__UART2_DCE_TX                        	0x0094 0x0320 0x0000 0 0
+#define MX6UL_PAD_UART2_TX_DATA__UART2_DTE_RX                        	0x0094 0x0320 0x062c 0 0
+#define MX6UL_PAD_UART2_TX_DATA__ENET1_TDATA02                   	0x0094 0x0320 0x0000 1 0
+#define MX6UL_PAD_UART2_TX_DATA__I2C4_SCL                        	0x0094 0x0320 0x05bc 2 0
+#define MX6UL_PAD_UART2_TX_DATA__CSI_DATA06                      	0x0094 0x0320 0x0000 3 0
+#define MX6UL_PAD_UART2_TX_DATA__GPT1_CAPTURE1                   	0x0094 0x0320 0x058c 4 1
+#define MX6UL_PAD_UART2_TX_DATA__GPIO1_IO20                      	0x0094 0x0320 0x0000 5 0
+#define MX6UL_PAD_UART2_TX_DATA__ECSPI3_SS0                      	0x0094 0x0320 0x0000 8 0
+#define MX6UL_PAD_UART2_RX_DATA__UART2_DCE_RX                        	0x0098 0x0324 0x062c 0 1
+#define MX6UL_PAD_UART2_RX_DATA__UART2_DTE_TX                        	0x0098 0x0324 0x0000 0 0
+#define MX6UL_PAD_UART2_RX_DATA__ENET1_TDATA03                   	0x0098 0x0324 0x0000 1 0
+#define MX6UL_PAD_UART2_RX_DATA__I2C4_SDA                        	0x0098 0x0324 0x05c0 2 0
+#define MX6UL_PAD_UART2_RX_DATA__CSI_DATA07                      	0x0098 0x0324 0x0000 3 0
+#define MX6UL_PAD_UART2_RX_DATA__GPT1_CAPTURE2                   	0x0098 0x0324 0x0590 4 0
+#define MX6UL_PAD_UART2_RX_DATA__GPIO1_IO21                      	0x0098 0x0324 0x0000 5 0
+#define MX6UL_PAD_UART2_RX_DATA__SJC_DONE                        	0x0098 0x0324 0x0000 7 0
+#define MX6UL_PAD_UART2_RX_DATA__ECSPI3_SCLK                     	0x0098 0x0324 0x0000 8 0
+#define MX6UL_PAD_UART2_CTS_B__UART2_DCE_CTS                     	0x009c 0x0328 0x0000 0 0
+#define MX6UL_PAD_UART2_CTS_B__UART2_DTE_RTS                     	0x009c 0x0328 0x0628 0 0
+#define MX6UL_PAD_UART2_CTS_B__ENET1_CRS                         	0x009c 0x0328 0x0000 1 0
+#define MX6UL_PAD_UART2_CTS_B__FLEXCAN2_TX                       	0x009c 0x0328 0x0000 2 0
+#define MX6UL_PAD_UART2_CTS_B__CSI_DATA08                        	0x009c 0x0328 0x0000 3 0
+#define MX6UL_PAD_UART2_CTS_B__GPT1_COMPARE2                     	0x009c 0x0328 0x0000 4 0
+#define MX6UL_PAD_UART2_CTS_B__GPIO1_IO22                        	0x009c 0x0328 0x0000 5 0
+#define MX6UL_PAD_UART2_CTS_B__SJC_DE_B                          	0x009c 0x0328 0x0000 7 0
+#define MX6UL_PAD_UART2_CTS_B__ECSPI3_MOSI                       	0x009c 0x0328 0x0000 8 0
+#define MX6UL_PAD_UART2_RTS_B__UART2_DCE_RTS                     	0x00a0 0x032c 0x0628 0 1
+#define MX6UL_PAD_UART2_RTS_B__UART2_DTE_CTS                     	0x00a0 0x032c 0x0000 0 0
+#define MX6UL_PAD_UART2_RTS_B__ENET1_COL                         	0x00a0 0x032c 0x0000 1 0
+#define MX6UL_PAD_UART2_RTS_B__FLEXCAN2_RX                       	0x00a0 0x032c 0x0588 2 0
+#define MX6UL_PAD_UART2_RTS_B__CSI_DATA09                        	0x00a0 0x032c 0x0000 3 0
+#define MX6UL_PAD_UART2_RTS_B__GPT1_COMPARE3                     	0x00a0 0x032c 0x0000 4 0
+#define MX6UL_PAD_UART2_RTS_B__GPIO1_IO23                        	0x00a0 0x032c 0x0000 5 0
+#define MX6UL_PAD_UART2_RTS_B__SJC_FAIL                          	0x00a0 0x032c 0x0000 7 0
+#define MX6UL_PAD_UART2_RTS_B__ECSPI3_MISO                       	0x00a0 0x032c 0x0000 8 0
+#define MX6UL_PAD_UART3_TX_DATA__UART3_DCE_TX                        	0x00a4 0x0330 0x0000 0 0
+#define MX6UL_PAD_UART3_TX_DATA__UART3_DTE_RX                        	0x00a4 0x0330 0x0634 0 0
+#define MX6UL_PAD_UART3_TX_DATA__ENET2_RDATA02                   	0x00a4 0x0330 0x0000 1 0
+#define MX6UL_PAD_UART3_TX_DATA__SIM1_PORT0_PD                   	0x00a4 0x0330 0x0000 2 0
+#define MX6UL_PAD_UART3_TX_DATA__CSI_DATA01                      	0x00a4 0x0330 0x0000 3 0
+#define MX6UL_PAD_UART3_TX_DATA__UART2_DCE_CTS                   	0x00a4 0x0330 0x0000 4 0
+#define MX6UL_PAD_UART3_TX_DATA__UART2_DTE_RTS                   	0x00a4 0x0330 0x0628 4 2
+#define MX6UL_PAD_UART3_TX_DATA__GPIO1_IO24                      	0x00a4 0x0330 0x0000 5 0
+#define MX6UL_PAD_UART3_TX_DATA__SJC_JTAG_ACT                    	0x00a4 0x0330 0x0000 7 0
+#define MX6UL_PAD_UART3_TX_DATA__ANATOP_OTG1_ID                  	0x00a4 0x0330 0x0000 8 0
+#define MX6UL_PAD_UART3_RX_DATA__UART3_DCE_RX                        	0x00a8 0x0334 0x0634 0 1
+#define MX6UL_PAD_UART3_RX_DATA__UART3_DTE_TX                        	0x00a8 0x0334 0x0000 0 0
+#define MX6UL_PAD_UART3_RX_DATA__ENET2_RDATA03                   	0x00a8 0x0334 0x0000 1 0
+#define MX6UL_PAD_UART3_RX_DATA__SIM2_PORT0_PD                   	0x00a8 0x0334 0x0000 2 0
+#define MX6UL_PAD_UART3_RX_DATA__CSI_DATA00                      	0x00a8 0x0334 0x0000 3 0
+#define MX6UL_PAD_UART3_RX_DATA__UART2_DCE_RTS                   	0x00a8 0x0334 0x0628 4 3
+#define MX6UL_PAD_UART3_RX_DATA__UART2_DTE_CTS                   	0x00a8 0x0334 0x0000 4 0
+#define MX6UL_PAD_UART3_RX_DATA__GPIO1_IO25                      	0x00a8 0x0334 0x0000 5 0
+#define MX6UL_PAD_UART3_RX_DATA__EPIT1_OUT                       	0x00a8 0x0334 0x0000 8 0
+#define MX6UL_PAD_UART3_CTS_B__UART3_DCE_CTS                     	0x00ac 0x0338 0x0000 0 0
+#define MX6UL_PAD_UART3_CTS_B__UART3_DTE_RTS                     	0x00ac 0x0338 0x0630 0 0
+#define MX6UL_PAD_UART3_CTS_B__ENET2_RX_CLK                      	0x00ac 0x0338 0x0000 1 0
+#define MX6UL_PAD_UART3_CTS_B__FLEXCAN1_TX                       	0x00ac 0x0338 0x0000 2 0
+#define MX6UL_PAD_UART3_CTS_B__CSI_DATA10                        	0x00ac 0x0338 0x0000 3 0
+#define MX6UL_PAD_UART3_CTS_B__ENET1_1588_EVENT1_IN              	0x00ac 0x0338 0x0000 4 0
+#define MX6UL_PAD_UART3_CTS_B__GPIO1_IO26                        	0x00ac 0x0338 0x0000 5 0
+#define MX6UL_PAD_UART3_CTS_B__EPIT2_OUT                         	0x00ac 0x0338 0x0000 8 0
+#define MX6UL_PAD_UART3_RTS_B__UART3_DCE_RTS                     	0x00b0 0x033c 0x0630 0 1
+#define MX6UL_PAD_UART3_RTS_B__UART3_DTE_CTS                     	0x00b0 0x033c 0x0000 0 0
+#define MX6UL_PAD_UART3_RTS_B__ENET2_TX_ER                       	0x00b0 0x033c 0x0000 1 0
+#define MX6UL_PAD_UART3_RTS_B__FLEXCAN1_RX                       	0x00b0 0x033c 0x0584 2 0
+#define MX6UL_PAD_UART3_RTS_B__CSI_DATA11                        	0x00b0 0x033c 0x0000 3 0
+#define MX6UL_PAD_UART3_RTS_B__ENET1_1588_EVENT1_OUT             	0x00b0 0x033c 0x0000 4 0
+#define MX6UL_PAD_UART3_RTS_B__GPIO1_IO27                        	0x00b0 0x033c 0x0000 5 0
+#define MX6UL_PAD_UART3_RTS_B__WDOG1_WDOG_B                      	0x00b0 0x033c 0x0000 8 0
+#define MX6UL_PAD_UART4_TX_DATA__UART4_DCE_TX                        	0x00b4 0x0340 0x0000 0 0
+#define MX6UL_PAD_UART4_TX_DATA__UART4_DTE_RX                        	0x00b4 0x0340 0x063c 0 0
+#define MX6UL_PAD_UART4_TX_DATA__ENET2_TDATA02                   	0x00b4 0x0340 0x0000 1 0
+#define MX6UL_PAD_UART4_TX_DATA__I2C1_SCL                        	0x00b4 0x0340 0x05a4 2 1
+#define MX6UL_PAD_UART4_TX_DATA__CSI_DATA12                      	0x00b4 0x0340 0x0000 3 0
+#define MX6UL_PAD_UART4_TX_DATA__CSU_CSU_ALARM_AUT02             	0x00b4 0x0340 0x0000 4 0
+#define MX6UL_PAD_UART4_TX_DATA__GPIO1_IO28                      	0x00b4 0x0340 0x0000 5 0
+#define MX6UL_PAD_UART4_TX_DATA__ECSPI2_SCLK                     	0x00b4 0x0340 0x0000 8 0
+#define MX6UL_PAD_UART4_RX_DATA__UART4_DCE_RX                        	0x00b8 0x0344 0x063c 0 1
+#define MX6UL_PAD_UART4_RX_DATA__UART4_DTE_TX                        	0x00b8 0x0344 0x0000 0 0
+#define MX6UL_PAD_UART4_RX_DATA__ENET2_TDATA03                   	0x00b8 0x0344 0x0000 1 0
+#define MX6UL_PAD_UART4_RX_DATA__I2C1_SDA                        	0x00b8 0x0344 0x05a8 2 2
+#define MX6UL_PAD_UART4_RX_DATA__CSI_DATA13                      	0x00b8 0x0344 0x0000 3 0
+#define MX6UL_PAD_UART4_RX_DATA__CSU_CSU_ALARM_AUT01             	0x00b8 0x0344 0x0000 4 0
+#define MX6UL_PAD_UART4_RX_DATA__GPIO1_IO29                      	0x00b8 0x0344 0x0000 5 0
+#define MX6UL_PAD_UART4_RX_DATA__ECSPI2_SS0                      	0x00b8 0x0344 0x0000 8 0
+#define MX6UL_PAD_UART5_TX_DATA__GPIO1_IO30                      	0x00bc 0x0348 0x0000 5 0
+#define MX6UL_PAD_UART5_TX_DATA__ECSPI2_MOSI                     	0x00bc 0x0348 0x0000 8 0
+#define MX6UL_PAD_UART5_TX_DATA__UART5_DCE_TX                        	0x00bc 0x0348 0x0000 0 0
+#define MX6UL_PAD_UART5_TX_DATA__UART5_DTE_RX                        	0x00bc 0x0348 0x0644 0 4
+#define MX6UL_PAD_UART5_TX_DATA__ENET2_CRS                       	0x00bc 0x0348 0x0000 1 0
+#define MX6UL_PAD_UART5_TX_DATA__I2C2_SCL                        	0x00bc 0x0348 0x05ac 2 2
+#define MX6UL_PAD_UART5_TX_DATA__CSI_DATA14                      	0x00bc 0x0348 0x0000 3 0
+#define MX6UL_PAD_UART5_TX_DATA__CSU_CSU_ALARM_AUT00             	0x00bc 0x0348 0x0000 4 0
+#define MX6UL_PAD_UART5_RX_DATA__UART5_DCE_RX                        	0x00c0 0x034c 0x0644 0 5
+#define MX6UL_PAD_UART5_RX_DATA__UART5_DTE_TX                        	0x00c0 0x034c 0x0000 0 0
+#define MX6UL_PAD_UART5_RX_DATA__ENET2_COL                       	0x00c0 0x034c 0x0000 1 0
+#define MX6UL_PAD_UART5_RX_DATA__I2C2_SDA                        	0x00c0 0x034c 0x05b0 2 2
+#define MX6UL_PAD_UART5_RX_DATA__CSI_DATA15                      	0x00c0 0x034c 0x0000 3 0
+#define MX6UL_PAD_UART5_RX_DATA__CSU_CSU_INT_DEB                 	0x00c0 0x034c 0x0000 4 0
+#define MX6UL_PAD_UART5_RX_DATA__GPIO1_IO31                      	0x00c0 0x034c 0x0000 5 0
+#define MX6UL_PAD_UART5_RX_DATA__ECSPI2_MISO                     	0x00c0 0x034c 0x0000 8 0
+#define MX6UL_PAD_ENET1_RX_DATA0__ENET1_RDATA00                  	0x00c4 0x0350 0x0000 0 0
+#define MX6UL_PAD_ENET1_RX_DATA0__UART4_DCE_RTS                  	0x00c4 0x0350 0x0638 1 0
+#define MX6UL_PAD_ENET1_RX_DATA0__UART4_DTE_CTS                  	0x00c4 0x0350 0x0000 1 0
+#define MX6UL_PAD_ENET1_RX_DATA0__PWM1_OUT                       	0x00c4 0x0350 0x0000 2 0
+#define MX6UL_PAD_ENET1_RX_DATA0__CSI_DATA16                     	0x00c4 0x0350 0x0000 3 0
+#define MX6UL_PAD_ENET1_RX_DATA0__FLEXCAN1_TX                    	0x00c4 0x0350 0x0000 4 0
+#define MX6UL_PAD_ENET1_RX_DATA0__GPIO2_IO00                     	0x00c4 0x0350 0x0000 5 0
+#define MX6UL_PAD_ENET1_RX_DATA0__KPP_ROW00                      	0x00c4 0x0350 0x0000 6 0
+#define MX6UL_PAD_ENET1_RX_DATA0__USDHC1_LCTL                    	0x00c4 0x0350 0x0000 8 0
+#define MX6UL_PAD_ENET1_RX_DATA1__ENET1_RDATA01                  	0x00c8 0x0354 0x0000 0 0
+#define MX6UL_PAD_ENET1_RX_DATA1__UART4_DCE_CTS                  	0x00c8 0x0354 0x0000 1 0
+#define MX6UL_PAD_ENET1_RX_DATA1__UART4_DTE_RTS                  	0x00c8 0x0354 0x0638 1 1
+#define MX6UL_PAD_ENET1_RX_DATA1__PWM2_OUT                       	0x00c8 0x0354 0x0000 2 0
+#define MX6UL_PAD_ENET1_RX_DATA1__CSI_DATA17                     	0x00c8 0x0354 0x0000 3 0
+#define MX6UL_PAD_ENET1_RX_DATA1__FLEXCAN1_RX                    	0x00c8 0x0354 0x0584 4 1
+#define MX6UL_PAD_ENET1_RX_DATA1__GPIO2_IO01                     	0x00c8 0x0354 0x0000 5 0
+#define MX6UL_PAD_ENET1_RX_DATA1__KPP_COL00                      	0x00c8 0x0354 0x0000 6 0
+#define MX6UL_PAD_ENET1_RX_DATA1__USDHC2_LCTL                    	0x00c8 0x0354 0x0000 8 0
+#define MX6UL_PAD_ENET1_RX_EN__ENET1_RX_EN                       	0x00cc 0x0358 0x0000 0 0
+#define MX6UL_PAD_ENET1_RX_EN__UART5_DCE_RTS                     	0x00cc 0x0358 0x0640 1 3
+#define MX6UL_PAD_ENET1_RX_EN__UART5_DTE_CTS                     	0x00cc 0x0358 0x0000 1 0
+#define MX6UL_PAD_ENET1_RX_EN__CSI_DATA18                        	0x00cc 0x0358 0x0000 3 0
+#define MX6UL_PAD_ENET1_RX_EN__FLEXCAN2_TX                       	0x00cc 0x0358 0x0000 4 0
+#define MX6UL_PAD_ENET1_RX_EN__GPIO2_IO02                        	0x00cc 0x0358 0x0000 5 0
+#define MX6UL_PAD_ENET1_RX_EN__KPP_ROW01                         	0x00cc 0x0358 0x0000 6 0
+#define MX6UL_PAD_ENET1_RX_EN__USDHC1_VSELECT                    	0x00cc 0x0358 0x0000 8 0
+#define MX6UL_PAD_ENET1_TX_DATA0__ENET1_TDATA00                  	0x00d0 0x035c 0x0000 0 0
+#define MX6UL_PAD_ENET1_TX_DATA0__UART5_DCE_CTS                  	0x00d0 0x035c 0x0000 1 0
+#define MX6UL_PAD_ENET1_TX_DATA0__UART5_DTE_RTS                  	0x00d0 0x035c 0x0640 1 4
+#define MX6UL_PAD_ENET1_TX_DATA0__CSI_DATA19                     	0x00d0 0x035c 0x0000 3 0
+#define MX6UL_PAD_ENET1_TX_DATA0__FLEXCAN2_RX                    	0x00d0 0x035c 0x0588 4 1
+#define MX6UL_PAD_ENET1_TX_DATA0__GPIO2_IO03                     	0x00d0 0x035c 0x0000 5 0
+#define MX6UL_PAD_ENET1_TX_DATA0__KPP_COL01                      	0x00d0 0x035c 0x0000 6 0
+#define MX6UL_PAD_ENET1_TX_DATA0__USDHC2_VSELECT                 	0x00d0 0x035c 0x0000 8 0
+#define MX6UL_PAD_ENET1_TX_DATA1__ENET1_TDATA01                  	0x00d4 0x0360 0x0000 0 0
+#define MX6UL_PAD_ENET1_TX_DATA1__UART6_DCE_CTS                  	0x00d4 0x0360 0x0000 1 0
+#define MX6UL_PAD_ENET1_TX_DATA1__UART6_DTE_RTS                  	0x00d4 0x0360 0x0648 1 2
+#define MX6UL_PAD_ENET1_TX_DATA1__PWM5_OUT                       	0x00d4 0x0360 0x0000 2 0
+#define MX6UL_PAD_ENET1_TX_DATA1__CSI_DATA20                     	0x00d4 0x0360 0x0000 3 0
+#define MX6UL_PAD_ENET1_TX_DATA1__ENET2_MDIO                     	0x00d4 0x0360 0x0580 4 1
+#define MX6UL_PAD_ENET1_TX_DATA1__GPIO2_IO04                     	0x00d4 0x0360 0x0000 5 0
+#define MX6UL_PAD_ENET1_TX_DATA1__KPP_ROW02                      	0x00d4 0x0360 0x0000 6 0
+#define MX6UL_PAD_ENET1_TX_DATA1__WDOG1_WDOG_RST_B_DEB           	0x00d4 0x0360 0x0000 8 0
+#define MX6UL_PAD_ENET1_TX_EN__ENET1_TX_EN                       	0x00d8 0x0364 0x0000 0 0
+#define MX6UL_PAD_ENET1_TX_EN__UART6_DCE_RTS                     	0x00d8 0x0364 0x0648 1 3
+#define MX6UL_PAD_ENET1_TX_EN__UART6_DTE_CTS                     	0x00d8 0x0364 0x0000 1 0
+#define MX6UL_PAD_ENET1_TX_EN__PWM6_OUT                          	0x00d8 0x0364 0x0000 2 0
+#define MX6UL_PAD_ENET1_TX_EN__CSI_DATA21                        	0x00d8 0x0364 0x0000 3 0
+#define MX6UL_PAD_ENET1_TX_EN__ENET2_MDC                         	0x00d8 0x0364 0x0000 4 0
+#define MX6UL_PAD_ENET1_TX_EN__GPIO2_IO05                        	0x00d8 0x0364 0x0000 5 0
+#define MX6UL_PAD_ENET1_TX_EN__KPP_COL02                         	0x00d8 0x0364 0x0000 6 0
+#define MX6UL_PAD_ENET1_TX_EN__WDOG2_WDOG_RST_B_DEB              	0x00d8 0x0364 0x0000 8 0
+#define MX6UL_PAD_ENET1_TX_CLK__ENET1_TX_CLK                     	0x00dc 0x0368 0x0000 0 0
+#define MX6UL_PAD_ENET1_TX_CLK__UART7_DCE_CTS                    	0x00dc 0x0368 0x0000 1 0
+#define MX6UL_PAD_ENET1_TX_CLK__UART7_DTE_RTS                    	0x00dc 0x0368 0x0650 1 0
+#define MX6UL_PAD_ENET1_TX_CLK__PWM7_OUT                         	0x00dc 0x0368 0x0000 2 0
+#define MX6UL_PAD_ENET1_TX_CLK__CSI_DATA22                       	0x00dc 0x0368 0x0000 3 0
+#define MX6UL_PAD_ENET1_TX_CLK__ENET1_REF_CLK1                   	0x00dc 0x0368 0x0574 4 2
+#define MX6UL_PAD_ENET1_TX_CLK__GPIO2_IO06                       	0x00dc 0x0368 0x0000 5 0
+#define MX6UL_PAD_ENET1_TX_CLK__KPP_ROW03                        	0x00dc 0x0368 0x0000 6 0
+#define MX6UL_PAD_ENET1_TX_CLK__GPT1_CLK                         	0x00dc 0x0368 0x0000 8 0
+#define MX6UL_PAD_ENET1_RX_ER__ENET1_RX_ER                       	0x00e0 0x036c 0x0000 0 0
+#define MX6UL_PAD_ENET1_RX_ER__UART7_DCE_RTS                     	0x00e0 0x036c 0x0650 1 1
+#define MX6UL_PAD_ENET1_RX_ER__UART7_DTE_CTS                     	0x00e0 0x036c 0x0000 1 0
+#define MX6UL_PAD_ENET1_RX_ER__PWM8_OUT                          	0x00e0 0x036c 0x0000 2 0
+#define MX6UL_PAD_ENET1_RX_ER__CSI_DATA23                        	0x00e0 0x036c 0x0000 3 0
+#define MX6UL_PAD_ENET1_RX_ER__EIM_CRE                           	0x00e0 0x036c 0x0000 4 0
+#define MX6UL_PAD_ENET1_RX_ER__GPIO2_IO07                        	0x00e0 0x036c 0x0000 5 0
+#define MX6UL_PAD_ENET1_RX_ER__KPP_COL03                         	0x00e0 0x036c 0x0000 6 0
+#define MX6UL_PAD_ENET1_RX_ER__GPT1_CAPTURE2                     	0x00e0 0x036c 0x0000 8 0
+#define MX6UL_PAD_ENET2_RX_DATA0__ENET2_RDATA00                  	0x00e4 0x0370 0x0000 0 0
+#define MX6UL_PAD_ENET2_RX_DATA0__UART6_DCE_TX                       	0x00e4 0x0370 0x0000 1 0
+#define MX6UL_PAD_ENET2_RX_DATA0__UART6_DTE_RX                    	0x00e4 0x0370 0x064c 1 1
+#define MX6UL_PAD_ENET2_RX_DATA0__SIM1_PORT0_TRXD                	0x00e4 0x0370 0x0000 2 0
+#define MX6UL_PAD_ENET2_RX_DATA0__I2C3_SCL                       	0x00e4 0x0370 0x05b4 3 1
+#define MX6UL_PAD_ENET2_RX_DATA0__ENET1_MDIO                     	0x00e4 0x0370 0x0578 4 1
+#define MX6UL_PAD_ENET2_RX_DATA0__GPIO2_IO08                     	0x00e4 0x0370 0x0000 5 0
+#define MX6UL_PAD_ENET2_RX_DATA0__KPP_ROW04                      	0x00e4 0x0370 0x0000 6 0
+#define MX6UL_PAD_ENET2_RX_DATA0__USB_OTG1_PWR                   	0x00e4 0x0370 0x0000 8 0
+#define MX6UL_PAD_ENET2_RX_DATA1__ENET2_RDATA01                  	0x00e8 0x0374 0x0000 0 0
+#define MX6UL_PAD_ENET2_RX_DATA1__UART6_DCE_RX                       	0x00e8 0x0374 0x064c 1 2
+#define MX6UL_PAD_ENET2_RX_DATA1__UART6_DTE_TX                       	0x00e8 0x0374 0x0000 1 0
+#define MX6UL_PAD_ENET2_RX_DATA1__SIM1_PORT0_cLK                 	0x00e8 0x0374 0x0000 2 0
+#define MX6UL_PAD_ENET2_RX_DATA1__I2C3_SDA                       	0x00e8 0x0374 0x05b8 3 1
+#define MX6UL_PAD_ENET2_RX_DATA1__ENET1_MDC                      	0x00e8 0x0374 0x0000 4 0
+#define MX6UL_PAD_ENET2_RX_DATA1__GPIO2_IO09                     	0x00e8 0x0374 0x0000 5 0
+#define MX6UL_PAD_ENET2_RX_DATA1__KPP_COL04                      	0x00e8 0x0374 0x0000 6 0
+#define MX6UL_PAD_ENET2_RX_DATA1__USB_OTG1_OC                    	0x00e8 0x0374 0x0000 8 0
+#define MX6UL_PAD_ENET2_RX_EN__ENET2_RX_EN                       	0x00ec 0x0378 0x0000 0 0
+#define MX6UL_PAD_ENET2_RX_EN__UART7_DCE_TX                          	0x00ec 0x0378 0x0000 1 0
+#define MX6UL_PAD_ENET2_RX_EN__UART7_DTE_RX                          	0x00ec 0x0378 0x0654 1 0
+#define MX6UL_PAD_ENET2_RX_EN__SIM1_PORT0_RST_B                  	0x00ec 0x0378 0x0000 2 0
+#define MX6UL_PAD_ENET2_RX_EN__I2C4_SCL                          	0x00ec 0x0378 0x05bc 3 1
+#define MX6UL_PAD_ENET2_RX_EN__EIM_ADDR26                        	0x00ec 0x0378 0x0000 4 0
+#define MX6UL_PAD_ENET2_RX_EN__GPIO2_IO10                        	0x00ec 0x0378 0x0000 5 0
+#define MX6UL_PAD_ENET2_RX_EN__KPP_ROW05                         	0x00ec 0x0378 0x0000 6 0
+#define MX6UL_PAD_ENET2_RX_EN__ENET1_REF_CLK_25M                 	0x00ec 0x0378 0x0000 8 0
+#define MX6UL_PAD_ENET2_TX_DATA0__ENET2_TDATA00                  	0x00f0 0x037c 0x0000 0 0
+#define MX6UL_PAD_ENET2_TX_DATA0__UART7_DCE_RX                       	0x00f0 0x037c 0x0654 1 1
+#define MX6UL_PAD_ENET2_TX_DATA0__UART7_DTE_TX                       	0x00f0 0x037c 0x0000 1 0
+#define MX6UL_PAD_ENET2_TX_DATA0__SIM1_PORT0_SVEN                	0x00f0 0x037c 0x0000 2 0
+#define MX6UL_PAD_ENET2_TX_DATA0__I2C4_SDA                       	0x00f0 0x037c 0x05c0 3 1
+#define MX6UL_PAD_ENET2_TX_DATA0__EIM_EB_B02                     	0x00f0 0x037c 0x0000 4 0
+#define MX6UL_PAD_ENET2_TX_DATA0__GPIO2_IO11                     	0x00f0 0x037c 0x0000 5 0
+#define MX6UL_PAD_ENET2_TX_DATA0__KPP_COL05                      	0x00f0 0x037c 0x0000 6 0
+#define MX6UL_PAD_ENET2_TX_DATA1__ENET2_TDATA01                  	0x00f4 0x0380 0x0000 0 0
+#define MX6UL_PAD_ENET2_TX_DATA1__UART8_DCE_TX                       	0x00f4 0x0380 0x0000 1 0
+#define MX6UL_PAD_ENET2_TX_DATA1__UART8_DTE_RX                       	0x00f4 0x0380 0x065c 1 0
+#define MX6UL_PAD_ENET2_TX_DATA1__SIM2_PORT0_TRXD                	0x00f4 0x0380 0x0000 2 0
+#define MX6UL_PAD_ENET2_TX_DATA1__ECSPI4_SCLK                    	0x00f4 0x0380 0x0564 3 0
+#define MX6UL_PAD_ENET2_TX_DATA1__EIM_EB_B03                     	0x00f4 0x0380 0x0000 4 0
+#define MX6UL_PAD_ENET2_TX_DATA1__GPIO2_IO12                     	0x00f4 0x0380 0x0000 5 0
+#define MX6UL_PAD_ENET2_TX_DATA1__KPP_ROW06                      	0x00f4 0x0380 0x0000 6 0
+#define MX6UL_PAD_ENET2_TX_DATA1__USB_OTG2_PWR                   	0x00f4 0x0380 0x0000 8 0
+#define MX6UL_PAD_ENET2_TX_EN__ENET2_TX_EN                       	0x00f8 0x0384 0x0000 0 0
+#define MX6UL_PAD_ENET2_TX_EN__UART8_DCE_RX                          	0x00f8 0x0384 0x065c 1 1
+#define MX6UL_PAD_ENET2_TX_EN__UART8_DTE_TX                          	0x00f8 0x0384 0x0000 1 0
+#define MX6UL_PAD_ENET2_TX_EN__SIM2_PORT0_cLK                    	0x00f8 0x0384 0x0000 2 0
+#define MX6UL_PAD_ENET2_TX_EN__ECSPI4_MOSI                       	0x00f8 0x0384 0x056c 3 0
+#define MX6UL_PAD_ENET2_TX_EN__EIM_ACLK_FREERUN                  	0x00f8 0x0384 0x0000 4 0
+#define MX6UL_PAD_ENET2_TX_EN__GPIO2_IO13                        	0x00f8 0x0384 0x0000 5 0
+#define MX6UL_PAD_ENET2_TX_EN__KPP_COL06                         	0x00f8 0x0384 0x0000 6 0
+#define MX6UL_PAD_ENET2_TX_EN__USB_OTG2_OC                       	0x00f8 0x0384 0x0000 8 0
+#define MX6UL_PAD_ENET2_TX_CLK__ENET2_TX_CLK                     	0x00fc 0x0388 0x0000 0 0
+#define MX6UL_PAD_ENET2_TX_CLK__UART8_DCE_CTS                    	0x00fc 0x0388 0x0000 1 0
+#define MX6UL_PAD_ENET2_TX_CLK__UART8_DTE_RTS                    	0x00fc 0x0388 0x0658 1 0
+#define MX6UL_PAD_ENET2_TX_CLK__SIM2_PORT0_RST_B                 	0x00fc 0x0388 0x0000 2 0
+#define MX6UL_PAD_ENET2_TX_CLK__ECSPI4_MISO                      	0x00fc 0x0388 0x0568 3 0
+#define MX6UL_PAD_ENET2_TX_CLK__ENET2_REF_CLK2                   	0x00fc 0x0388 0x057c 4 2
+#define MX6UL_PAD_ENET2_TX_CLK__GPIO2_IO14                       	0x00fc 0x0388 0x0000 5 0
+#define MX6UL_PAD_ENET2_TX_CLK__KPP_ROW07                        	0x00fc 0x0388 0x0000 6 0
+#define MX6UL_PAD_ENET2_TX_CLK__ANATOP_OTG2_ID                   	0x00fc 0x0388 0x0000 8 0
+#define MX6UL_PAD_ENET2_RX_ER__ENET2_RX_ER                       	0x0100 0x038c 0x0000 0 0
+#define MX6UL_PAD_ENET2_RX_ER__UART8_DCE_RTS                     	0x0100 0x038c 0x0658 1 1
+#define MX6UL_PAD_ENET2_RX_ER__UART8_DTE_CTS                     	0x0100 0x038c 0x0000 1 0
+#define MX6UL_PAD_ENET2_RX_ER__SIM2_PORT0_SVEN                   	0x0100 0x038c 0x0000 2 0
+#define MX6UL_PAD_ENET2_RX_ER__ECSPI4_SS0                        	0x0100 0x038c 0x0000 3 0
+#define MX6UL_PAD_ENET2_RX_ER__EIM_ADDR25                        	0x0100 0x038c 0x0000 4 0
+#define MX6UL_PAD_ENET2_RX_ER__GPIO2_IO15                        	0x0100 0x038c 0x0000 5 0
+#define MX6UL_PAD_ENET2_RX_ER__KPP_COL07                         	0x0100 0x038c 0x0000 6 0
+#define MX6UL_PAD_ENET2_RX_ER__WDOG1_WDOG_ANY                    	0x0100 0x038c 0x0000 8 0
+#define MX6UL_PAD_LCD_CLK__LCDIF_CLK                             	0x0104 0x0390 0x0000 0 0
+#define MX6UL_PAD_LCD_CLK__LCDIF_WR_RWN                          	0x0104 0x0390 0x0000 1 0
+#define MX6UL_PAD_LCD_CLK__UART4_DCE_TX                              	0x0104 0x0390 0x0000 2 0
+#define MX6UL_PAD_LCD_CLK__UART4_DTE_RX                              	0x0104 0x0390 0x063c 2 2
+#define MX6UL_PAD_LCD_CLK__SAI3_MCLK                             	0x0104 0x0390 0x0000 3 0
+#define MX6UL_PAD_LCD_CLK__EIM_CS2_B                             	0x0104 0x0390 0x0000 4 0
+#define MX6UL_PAD_LCD_CLK__GPIO3_IO00                            	0x0104 0x0390 0x0000 5 0
+#define MX6UL_PAD_LCD_CLK__WDOG1_WDOG_RST_B_DEB                  	0x0104 0x0390 0x0000 8 0
+#define MX6UL_PAD_LCD_ENABLE__LCDIF_ENABLE                       	0x0108 0x0394 0x0000 0 0
+#define MX6UL_PAD_LCD_ENABLE__LCDIF_RD_E                         	0x0108 0x0394 0x0000 1 0
+#define MX6UL_PAD_LCD_ENABLE__UART4_DCE_RX                           	0x0108 0x0394 0x063c 2 3
+#define MX6UL_PAD_LCD_ENABLE__UART4_DTE_TX                           	0x0108 0x0394 0x0000 2 0
+#define MX6UL_PAD_LCD_ENABLE__SAI3_TX_SYNC                       	0x0108 0x0394 0x060c 3 0
+#define MX6UL_PAD_LCD_ENABLE__EIM_CS3_B                          	0x0108 0x0394 0x0000 4 0
+#define MX6UL_PAD_LCD_ENABLE__GPIO3_IO01                         	0x0108 0x0394 0x0000 5 0
+#define MX6UL_PAD_LCD_ENABLE__ECSPI2_RDY                         	0x0108 0x0394 0x0000 8 0
+#define MX6UL_PAD_LCD_HSYNC__LCDIF_HSYNC                         	0x010c 0x0398 0x05dc 0 0
+#define MX6UL_PAD_LCD_HSYNC__LCDIF_RS                            	0x010c 0x0398 0x0000 1 0
+#define MX6UL_PAD_LCD_HSYNC__UART4_DCE_CTS                       	0x010c 0x0398 0x0000 2 0
+#define MX6UL_PAD_LCD_HSYNC__UART4_DTE_RTS                       	0x010c 0x0398 0x0638 2 2
+#define MX6UL_PAD_LCD_HSYNC__SAI3_TX_BCLK                        	0x010c 0x0398 0x0608 3 0
+#define MX6UL_PAD_LCD_HSYNC__WDOG3_WDOG_RST_B_DEB                	0x010c 0x0398 0x0000 4 0
+#define MX6UL_PAD_LCD_HSYNC__GPIO3_IO02                          	0x010c 0x0398 0x0000 5 0
+#define MX6UL_PAD_LCD_HSYNC__ECSPI2_SS1                          	0x010c 0x0398 0x0000 8 0
+#define MX6UL_PAD_LCD_VSYNC__LCDIF_VSYNC                         	0x0110 0x039c 0x0000 0 0
+#define MX6UL_PAD_LCD_VSYNC__LCDIF_BUSY                          	0x0110 0x039c 0x05dc 1 1
+#define MX6UL_PAD_LCD_VSYNC__UART4_DCE_RTS                       	0x0110 0x039c 0x0638 2 3
+#define MX6UL_PAD_LCD_VSYNC__UART4_DTE_CTS                       	0x0110 0x039c 0x0000 2 0
+#define MX6UL_PAD_LCD_VSYNC__SAI3_RX_DATA                        	0x0110 0x039c 0x0000 3 0
+#define MX6UL_PAD_LCD_VSYNC__WDOG2_WDOG_B                        	0x0110 0x039c 0x0000 4 0
+#define MX6UL_PAD_LCD_VSYNC__GPIO3_IO03                          	0x0110 0x039c 0x0000 5 0
+#define MX6UL_PAD_LCD_VSYNC__ECSPI2_SS2                          	0x0110 0x039c 0x0000 8 0
+#define MX6UL_PAD_LCD_RESET__LCDIF_RESET                         	0x0114 0x03a0 0x0000 0 0
+#define MX6UL_PAD_LCD_RESET__LCDIF_CS                            	0x0114 0x03a0 0x0000 1 0
+#define MX6UL_PAD_LCD_RESET__CA7_MX6UL_EVENTI                    	0x0114 0x03a0 0x0000 2 0
+#define MX6UL_PAD_LCD_RESET__SAI3_TX_DATA                        	0x0114 0x03a0 0x0000 3 0
+#define MX6UL_PAD_LCD_RESET__WDOG1_WDOG_ANY                      	0x0114 0x03a0 0x0000 4 0
+#define MX6UL_PAD_LCD_RESET__GPIO3_IO04                          	0x0114 0x03a0 0x0000 5 0
+#define MX6UL_PAD_LCD_RESET__ECSPI2_SS3                          	0x0114 0x03a0 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA00__LCDIF_DATA00                       	0x0118 0x03a4 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA00__PWM1_OUT                           	0x0118 0x03a4 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA00__ENET1_1588_EVENT2_IN               	0x0118 0x03a4 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA00__I2C3_SDA                           	0x0118 0x03a4 0x05b8 4 2
+#define MX6UL_PAD_LCD_DATA00__GPIO3_IO05                         	0x0118 0x03a4 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA00__SRC_BT_CFG00                       	0x0118 0x03a4 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA00__SAI1_MCLK                          	0x0118 0x03a4 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA01__LCDIF_DATA01                       	0x011c 0x03a8 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA01__PWM2_OUT                           	0x011c 0x03a8 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA01__ENET1_1588_EVENT2_OUT              	0x011c 0x03a8 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA01__I2C3_SCL                           	0x011c 0x03a8 0x05b4 4 2
+#define MX6UL_PAD_LCD_DATA01__GPIO3_IO06                         	0x011c 0x03a8 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA01__SRC_BT_CFG01                       	0x011c 0x03a8 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA01__SAI1_TX_SYNC                       	0x011c 0x03a8 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA02__LCDIF_DATA02                       	0x0120 0x03ac 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA02__PWM3_OUT                           	0x0120 0x03ac 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA02__ENET1_1588_EVENT3_IN               	0x0120 0x03ac 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA02__I2C4_SDA                           	0x0120 0x03ac 0x05c0 4 2
+#define MX6UL_PAD_LCD_DATA02__GPIO3_IO07                         	0x0120 0x03ac 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA02__SRC_BT_CFG02                       	0x0120 0x03ac 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA02__SAI1_TX_BCLK                       	0x0120 0x03ac 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA03__LCDIF_DATA03                       	0x0124 0x03b0 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA03__PWM4_OUT                           	0x0124 0x03b0 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA03__ENET1_1588_EVENT3_OUT              	0x0124 0x03b0 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA03__I2C4_SCL                           	0x0124 0x03b0 0x05bc 4 2
+#define MX6UL_PAD_LCD_DATA03__GPIO3_IO08                         	0x0124 0x03b0 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA03__SRC_BT_CFG03                       	0x0124 0x03b0 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA03__SAI1_RX_DATA                       	0x0124 0x03b0 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA04__LCDIF_DATA04                       	0x0128 0x03b4 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA04__UART8_DCE_CTS                      	0x0128 0x03b4 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA04__UART8_DTE_RTS                      	0x0128 0x03b4 0x0658 1 2
+#define MX6UL_PAD_LCD_DATA04__ENET2_1588_EVENT2_IN               	0x0128 0x03b4 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA04__SPDIF_SR_CLK                       	0x0128 0x03b4 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA04__GPIO3_IO09                         	0x0128 0x03b4 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA04__SRC_BT_CFG04                       	0x0128 0x03b4 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA04__SAI1_TX_DATA                       	0x0128 0x03b4 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA05__LCDIF_DATA05                       	0x012c 0x03b8 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA05__UART8_DCE_RTS                      	0x012c 0x03b8 0x0658 1 3
+#define MX6UL_PAD_LCD_DATA05__UART8_DTE_CTS                      	0x012c 0x03b8 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA05__ENET2_1588_EVENT2_OUT              	0x012c 0x03b8 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA05__SPDIF_OUT                          	0x012c 0x03b8 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA05__GPIO3_IO10                         	0x012c 0x03b8 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA05__SRC_BT_CFG05                       	0x012c 0x03b8 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA05__ECSPI1_SS1                         	0x012c 0x03b8 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA06__LCDIF_DATA06                       	0x0130 0x03bc 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA06__UART7_DCE_CTS                      	0x0130 0x03bc 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA06__UART7_DTE_RTS                      	0x0130 0x03bc 0x0650 1 2
+#define MX6UL_PAD_LCD_DATA06__ENET2_1588_EVENT3_IN               	0x0130 0x03bc 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA06__SPDIF_LOCK                         	0x0130 0x03bc 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA06__GPIO3_IO11                         	0x0130 0x03bc 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA06__SRC_BT_CFG06                       	0x0130 0x03bc 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA06__ECSPI1_SS2                         	0x0130 0x03bc 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA07__LCDIF_DATA07                       	0x0134 0x03c0 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA07__UART7_DCE_RTS                      	0x0134 0x03c0 0x0650 1 3
+#define MX6UL_PAD_LCD_DATA07__UART7_DTE_CTS                      	0x0134 0x03c0 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA07__ENET2_1588_EVENT3_OUT              	0x0134 0x03c0 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA07__SPDIF_EXT_CLK                      	0x0134 0x03c0 0x061c 4 0
+#define MX6UL_PAD_LCD_DATA07__GPIO3_IO12                         	0x0134 0x03c0 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA07__SRC_BT_CFG07                       	0x0134 0x03c0 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA07__ECSPI1_SS3                         	0x0134 0x03c0 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA08__LCDIF_DATA08                       	0x0138 0x03c4 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA08__SPDIF_IN                           	0x0138 0x03c4 0x0618 1 2
+#define MX6UL_PAD_LCD_DATA08__CSI_DATA16                         	0x0138 0x03c4 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA08__EIM_DATA00                         	0x0138 0x03c4 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA08__GPIO3_IO13                         	0x0138 0x03c4 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA08__SRC_BT_CFG08                       	0x0138 0x03c4 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA08__FLEXCAN1_TX                        	0x0138 0x03c4 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA09__LCDIF_DATA09                       	0x013c 0x03c8 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA09__SAI3_MCLK                          	0x013c 0x03c8 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA09__CSI_DATA17                         	0x013c 0x03c8 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA09__EIM_DATA01                         	0x013c 0x03c8 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA09__GPIO3_IO14                         	0x013c 0x03c8 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA09__SRC_BT_CFG09                       	0x013c 0x03c8 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA09__FLEXCAN1_RX                        	0x013c 0x03c8 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA10__LCDIF_DATA10                       	0x0140 0x03cc 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA10__SAI3_RX_SYNC                       	0x0140 0x03cc 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA10__CSI_DATA18                         	0x0140 0x03cc 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA10__EIM_DATA02                         	0x0140 0x03cc 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA10__GPIO3_IO15                         	0x0140 0x03cc 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA10__SRC_BT_CFG10                       	0x0140 0x03cc 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA10__FLEXCAN2_TX                        	0x0140 0x03cc 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA11__LCDIF_DATA11                       	0x0144 0x03d0 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA11__SAI3_RX_BCLK                       	0x0144 0x03d0 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA11__CSI_DATA19                         	0x0144 0x03d0 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA11__EIM_DATA03                         	0x0144 0x03d0 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA11__GPIO3_IO16                         	0x0144 0x03d0 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA11__SRC_BT_CFG11                       	0x0144 0x03d0 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA11__FLEXCAN2_RX                        	0x0144 0x03d0 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA12__LCDIF_DATA12                       	0x0148 0x03d4 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA12__SAI3_TX_SYNC                       	0x0148 0x03d4 0x060c 1 1
+#define MX6UL_PAD_LCD_DATA12__CSI_DATA20                         	0x0148 0x03d4 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA12__EIM_DATA04                         	0x0148 0x03d4 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA12__GPIO3_IO17                         	0x0148 0x03d4 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA12__SRC_BT_CFG12                       	0x0148 0x03d4 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA12__ECSPI1_RDY                         	0x0148 0x03d4 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA13__LCDIF_DATA13                       	0x014c 0x03d8 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA13__SAI3_TX_BCLK                       	0x014c 0x03d8 0x0608 1 1
+#define MX6UL_PAD_LCD_DATA13__CSI_DATA21                         	0x014c 0x03d8 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA13__EIM_DATA05                         	0x014c 0x03d8 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA13__GPIO3_IO18                         	0x014c 0x03d8 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA13__SRC_BT_CFG13                       	0x014c 0x03d8 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA13__USDHC2_RESET_B                     	0x014c 0x03d8 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA14__LCDIF_DATA14                       	0x0150 0x03dc 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA14__SAI3_RX_DATA                       	0x0150 0x03dc 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA14__CSI_DATA22                         	0x0150 0x03dc 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA14__EIM_DATA06                         	0x0150 0x03dc 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA14__GPIO3_IO19                         	0x0150 0x03dc 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA14__SRC_BT_CFG14                       	0x0150 0x03dc 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA14__USDHC2_DATA4                       	0x0150 0x03dc 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA15__LCDIF_DATA15                       	0x0154 0x03e0 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA15__SAI3_TX_DATA                       	0x0154 0x03e0 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA15__CSI_DATA23                         	0x0154 0x03e0 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA15__EIM_DATA07                         	0x0154 0x03e0 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA15__GPIO3_IO20                         	0x0154 0x03e0 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA15__SRC_BT_CFG15                       	0x0154 0x03e0 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA15__USDHC2_DATA5                       	0x0154 0x03e0 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA16__LCDIF_DATA16                       	0x0158 0x03e4 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA16__UART7_DCE_TX                           	0x0158 0x03e4 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA16__UART7_DTE_RX                           	0x0158 0x03e4 0x0654 1 2
+#define MX6UL_PAD_LCD_DATA16__CSI_DATA01                         	0x0158 0x03e4 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA16__EIM_DATA08                         	0x0158 0x03e4 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA16__GPIO3_IO21                         	0x0158 0x03e4 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA16__SRC_BT_CFG24                       	0x0158 0x03e4 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA16__USDHC2_DATA6                       	0x0158 0x03e4 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA17__LCDIF_DATA17                       	0x015c 0x03e8 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA17__UART7_DCE_RX                           	0x015c 0x03e8 0x0654 1 3
+#define MX6UL_PAD_LCD_DATA17__UART7_DTE_TX                           	0x015c 0x03e8 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA17__CSI_DATA00                         	0x015c 0x03e8 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA17__EIM_DATA09                         	0x015c 0x03e8 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA17__GPIO3_IO22                         	0x015c 0x03e8 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA17__SRC_BT_CFG25                       	0x015c 0x03e8 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA17__USDHC2_DATA7                       	0x015c 0x03e8 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA18__LCDIF_DATA18                       	0x0160 0x03ec 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA18__PWM5_OUT                           	0x0160 0x03ec 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA18__CA7_MX6UL_EVENTO                   	0x0160 0x03ec 0x0000 2 0
+#define MX6UL_PAD_LCD_DATA18__CSI_DATA10                         	0x0160 0x03ec 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA18__EIM_DATA10                         	0x0160 0x03ec 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA18__GPIO3_IO23                         	0x0160 0x03ec 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA18__SRC_BT_CFG26                       	0x0160 0x03ec 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA18__USDHC2_CMD                         	0x0160 0x03ec 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA19__EIM_DATA11                         	0x0164 0x03f0 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA19__GPIO3_IO24                         	0x0164 0x03f0 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA19__SRC_BT_CFG27                       	0x0164 0x03f0 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA19__USDHC2_CLK                         	0x0164 0x03f0 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA19__LCDIF_DATA19                       	0x0164 0x03f0 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA19__PWM6_OUT                           	0x0164 0x03f0 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA19__WDOG1_WDOG_ANY                     	0x0164 0x03f0 0x0000 2 0
+#define MX6UL_PAD_LCD_DATA19__CSI_DATA11                         	0x0164 0x03f0 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA20__EIM_DATA12                         	0x0168 0x03f4 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA20__GPIO3_IO25                         	0x0168 0x03f4 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA20__SRC_BT_CFG28                       	0x0168 0x03f4 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA20__USDHC2_DATA0                       	0x0168 0x03f4 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA20__LCDIF_DATA20                       	0x0168 0x03f4 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA20__UART8_DCE_TX                           	0x0168 0x03f4 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA20__UART8_DTE_RX                           	0x0168 0x03f4 0x065c 1 2
+#define MX6UL_PAD_LCD_DATA20__ECSPI1_SCLK                        	0x0168 0x03f4 0x0534 2 0
+#define MX6UL_PAD_LCD_DATA20__CSI_DATA12                         	0x0168 0x03f4 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA21__LCDIF_DATA21                       	0x016c 0x03f8 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA21__UART8_DCE_RX                           	0x016c 0x03f8 0x065c 1 3
+#define MX6UL_PAD_LCD_DATA21__UART8_DTE_TX                           	0x016c 0x03f8 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA21__ECSPI1_SS0                         	0x016c 0x03f8 0x0000 2 0
+#define MX6UL_PAD_LCD_DATA21__CSI_DATA13                         	0x016c 0x03f8 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA21__EIM_DATA13                         	0x016c 0x03f8 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA21__GPIO3_IO26                         	0x016c 0x03f8 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA21__SRC_BT_CFG29                       	0x016c 0x03f8 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA21__USDHC2_DATA1                       	0x016c 0x03f8 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA22__LCDIF_DATA22                       	0x0170 0x03fc 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA22__MQS_RIGHT                          	0x0170 0x03fc 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA22__ECSPI1_MOSI                        	0x0170 0x03fc 0x053c 2 0
+#define MX6UL_PAD_LCD_DATA22__CSI_DATA14                         	0x0170 0x03fc 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA22__EIM_DATA14                         	0x0170 0x03fc 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA22__GPIO3_IO27                         	0x0170 0x03fc 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA22__SRC_BT_CFG30                       	0x0170 0x03fc 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA22__USDHC2_DATA2                       	0x0170 0x03fc 0x0000 8 0
+#define MX6UL_PAD_LCD_DATA23__LCDIF_DATA23                       	0x0174 0x0400 0x0000 0 0
+#define MX6UL_PAD_LCD_DATA23__MQS_LEFT                           	0x0174 0x0400 0x0000 1 0
+#define MX6UL_PAD_LCD_DATA23__ECSPI1_MISO                        	0x0174 0x0400 0x0538 2 0
+#define MX6UL_PAD_LCD_DATA23__CSI_DATA15                         	0x0174 0x0400 0x0000 3 0
+#define MX6UL_PAD_LCD_DATA23__EIM_DATA15                         	0x0174 0x0400 0x0000 4 0
+#define MX6UL_PAD_LCD_DATA23__GPIO3_IO28                         	0x0174 0x0400 0x0000 5 0
+#define MX6UL_PAD_LCD_DATA23__SRC_BT_CFG31                       	0x0174 0x0400 0x0000 6 0
+#define MX6UL_PAD_LCD_DATA23__USDHC2_DATA3                       	0x0174 0x0400 0x0000 8 0
+#define MX6UL_PAD_NAND_RE_B__RAWNAND_RE_B                        	0x0178 0x0404 0x0000 0 0
+#define MX6UL_PAD_NAND_RE_B__USDHC2_CLK                          	0x0178 0x0404 0x0670 1 2
+#define MX6UL_PAD_NAND_RE_B__QSPI_B_SCLK                         	0x0178 0x0404 0x0000 2 0
+#define MX6UL_PAD_NAND_RE_B__KPP_ROW00                           	0x0178 0x0404 0x0000 3 0
+#define MX6UL_PAD_NAND_RE_B__EIM_EB_B00                          	0x0178 0x0404 0x0000 4 0
+#define MX6UL_PAD_NAND_RE_B__GPIO4_IO00                          	0x0178 0x0404 0x0000 5 0
+#define MX6UL_PAD_NAND_RE_B__ECSPI3_SS2                          	0x0178 0x0404 0x0000 8 0
+#define MX6UL_PAD_NAND_WE_B__RAWNAND_WE_B                        	0x017c 0x0408 0x0000 0 0
+#define MX6UL_PAD_NAND_WE_B__USDHC2_CMD                          	0x017c 0x0408 0x0678 1 2
+#define MX6UL_PAD_NAND_WE_B__QSPI_B_SS0_B                        	0x017c 0x0408 0x0000 2 0
+#define MX6UL_PAD_NAND_WE_B__KPP_COL00                           	0x017c 0x0408 0x0000 3 0
+#define MX6UL_PAD_NAND_WE_B__EIM_EB_B01                          	0x017c 0x0408 0x0000 4 0
+#define MX6UL_PAD_NAND_WE_B__GPIO4_IO01                          	0x017c 0x0408 0x0000 5 0
+#define MX6UL_PAD_NAND_WE_B__ECSPI3_SS3                          	0x017c 0x0408 0x0000 8 0
+#define MX6UL_PAD_NAND_DATA00__RAWNAND_DATA00                    	0x0180 0x040c 0x0000 0 0
+#define MX6UL_PAD_NAND_DATA00__USDHC2_DATA0                      	0x0180 0x040c 0x067c 1 2
+#define MX6UL_PAD_NAND_DATA00__QSPI_B_SS1_B                      	0x0180 0x040c 0x0000 2 0
+#define MX6UL_PAD_NAND_DATA00__KPP_ROW01                         	0x0180 0x040c 0x0000 3 0
+#define MX6UL_PAD_NAND_DATA00__EIM_AD08                          	0x0180 0x040c 0x0000 4 0
+#define MX6UL_PAD_NAND_DATA00__GPIO4_IO02                        	0x0180 0x040c 0x0000 5 0
+#define MX6UL_PAD_NAND_DATA00__ECSPI4_RDY                        	0x0180 0x040c 0x0000 8 0
+#define MX6UL_PAD_NAND_DATA01__RAWNAND_DATA01                    	0x0184 0x0410 0x0000 0 0
+#define MX6UL_PAD_NAND_DATA01__USDHC2_DATA1                      	0x0184 0x0410 0x0680 1 2
+#define MX6UL_PAD_NAND_DATA01__QSPI_B_DQS                        	0x0184 0x0410 0x0000 2 0
+#define MX6UL_PAD_NAND_DATA01__KPP_COL01                         	0x0184 0x0410 0x0000 3 0
+#define MX6UL_PAD_NAND_DATA01__EIM_AD09                          	0x0184 0x0410 0x0000 4 0
+#define MX6UL_PAD_NAND_DATA01__GPIO4_IO03                        	0x0184 0x0410 0x0000 5 0
+#define MX6UL_PAD_NAND_DATA01__ECSPI4_SS1                        	0x0184 0x0410 0x0000 8 0
+#define MX6UL_PAD_NAND_DATA02__RAWNAND_DATA02                    	0x0188 0x0414 0x0000 0 0
+#define MX6UL_PAD_NAND_DATA02__USDHC2_DATA2                      	0x0188 0x0414 0x0684 1 1
+#define MX6UL_PAD_NAND_DATA02__QSPI_B_DATA00                     	0x0188 0x0414 0x0000 2 0
+#define MX6UL_PAD_NAND_DATA02__KPP_ROW02                         	0x0188 0x0414 0x0000 3 0
+#define MX6UL_PAD_NAND_DATA02__EIM_AD10                          	0x0188 0x0414 0x0000 4 0
+#define MX6UL_PAD_NAND_DATA02__GPIO4_IO04                        	0x0188 0x0414 0x0000 5 0
+#define MX6UL_PAD_NAND_DATA02__ECSPI4_SS2                        	0x0188 0x0414 0x0000 8 0
+#define MX6UL_PAD_NAND_DATA03__RAWNAND_DATA03                    	0x018c 0x0418 0x0000 0 0
+#define MX6UL_PAD_NAND_DATA03__USDHC2_DATA3                      	0x018c 0x0418 0x0688 1 2
+#define MX6UL_PAD_NAND_DATA03__QSPI_B_DATA01                     	0x018c 0x0418 0x0000 2 0
+#define MX6UL_PAD_NAND_DATA03__KPP_COL02                         	0x018c 0x0418 0x0000 3 0
+#define MX6UL_PAD_NAND_DATA03__EIM_AD11                          	0x018c 0x0418 0x0000 4 0
+#define MX6UL_PAD_NAND_DATA03__GPIO4_IO05                        	0x018c 0x0418 0x0000 5 0
+#define MX6UL_PAD_NAND_DATA03__ECSPI4_SS3                        	0x018c 0x0418 0x0000 8 0
+#define MX6UL_PAD_NAND_DATA04__RAWNAND_DATA04                    	0x0190 0x041c 0x0000 0 0
+#define MX6UL_PAD_NAND_DATA04__USDHC2_DATA4                      	0x0190 0x041c 0x068c 1 1
+#define MX6UL_PAD_NAND_DATA04__QSPI_B_DATA02                     	0x0190 0x041c 0x0000 2 0
+#define MX6UL_PAD_NAND_DATA04__ECSPI4_SCLK                       	0x0190 0x041c 0x0564 3 1
+#define MX6UL_PAD_NAND_DATA04__EIM_AD12                          	0x0190 0x041c 0x0000 4 0
+#define MX6UL_PAD_NAND_DATA04__GPIO4_IO06                        	0x0190 0x041c 0x0000 5 0
+#define MX6UL_PAD_NAND_DATA04__UART2_DCE_TX                          	0x0190 0x041c 0x0000 8 0
+#define MX6UL_PAD_NAND_DATA04__UART2_DTE_RX                          	0x0190 0x041c 0x062c 8 2
+#define MX6UL_PAD_NAND_DATA05__RAWNAND_DATA05                    	0x0194 0x0420 0x0000 0 0
+#define MX6UL_PAD_NAND_DATA05__USDHC2_DATA5                      	0x0194 0x0420 0x0690 1 1
+#define MX6UL_PAD_NAND_DATA05__QSPI_B_DATA03                     	0x0194 0x0420 0x0000 2 0
+#define MX6UL_PAD_NAND_DATA05__ECSPI4_MOSI                       	0x0194 0x0420 0x056c 3 1
+#define MX6UL_PAD_NAND_DATA05__EIM_AD13                          	0x0194 0x0420 0x0000 4 0
+#define MX6UL_PAD_NAND_DATA05__GPIO4_IO07                        	0x0194 0x0420 0x0000 5 0
+#define MX6UL_PAD_NAND_DATA05__UART2_DCE_RX                          	0x0194 0x0420 0x062c 8 3
+#define MX6UL_PAD_NAND_DATA05__UART2_DTE_TX                          	0x0194 0x0420 0x0000 8 0
+#define MX6UL_PAD_NAND_DATA06__RAWNAND_DATA06                    	0x0198 0x0424 0x0000 0 0
+#define MX6UL_PAD_NAND_DATA06__USDHC2_DATA6                      	0x0198 0x0424 0x0694 1 1
+#define MX6UL_PAD_NAND_DATA06__SAI2_RX_BCLK                      	0x0198 0x0424 0x0000 2 0
+#define MX6UL_PAD_NAND_DATA06__ECSPI4_MISO                       	0x0198 0x0424 0x0568 3 1
+#define MX6UL_PAD_NAND_DATA06__EIM_AD14                          	0x0198 0x0424 0x0000 4 0
+#define MX6UL_PAD_NAND_DATA06__GPIO4_IO08                        	0x0198 0x0424 0x0000 5 0
+#define MX6UL_PAD_NAND_DATA06__UART2_DCE_CTS                     	0x0198 0x0424 0x0000 8 0
+#define MX6UL_PAD_NAND_DATA06__UART2_DTE_RTS                     	0x0198 0x0424 0x0628 8 4
+#define MX6UL_PAD_NAND_DATA07__RAWNAND_DATA07                    	0x019c 0x0428 0x0000 0 0
+#define MX6UL_PAD_NAND_DATA07__USDHC2_DATA7                      	0x019c 0x0428 0x0698 1 1
+#define MX6UL_PAD_NAND_DATA07__QSPI_A_SS1_B                      	0x019c 0x0428 0x0000 2 0
+#define MX6UL_PAD_NAND_DATA07__ECSPI4_SS0                        	0x019c 0x0428 0x0000 3 0
+#define MX6UL_PAD_NAND_DATA07__EIM_AD15                          	0x019c 0x0428 0x0000 4 0
+#define MX6UL_PAD_NAND_DATA07__GPIO4_IO09                        	0x019c 0x0428 0x0000 5 0
+#define MX6UL_PAD_NAND_DATA07__UART2_DCE_RTS                     	0x019c 0x0428 0x0628 8 5
+#define MX6UL_PAD_NAND_DATA07__UART2_DTE_CTS                     	0x019c 0x0428 0x0000 8 0
+#define MX6UL_PAD_NAND_ALE__RAWNAND_ALE                          	0x01a0 0x042c 0x0000 0 0
+#define MX6UL_PAD_NAND_ALE__USDHC2_RESET_B                       	0x01a0 0x042c 0x0000 1 0
+#define MX6UL_PAD_NAND_ALE__QSPI_A_DQS                           	0x01a0 0x042c 0x0000 2 0
+#define MX6UL_PAD_NAND_ALE__PWM3_OUT                             	0x01a0 0x042c 0x0000 3 0
+#define MX6UL_PAD_NAND_ALE__EIM_ADDR17                           	0x01a0 0x042c 0x0000 4 0
+#define MX6UL_PAD_NAND_ALE__GPIO4_IO10                           	0x01a0 0x042c 0x0000 5 0
+#define MX6UL_PAD_NAND_ALE__ECSPI3_SS1                           	0x01a0 0x042c 0x0000 8 0
+#define MX6UL_PAD_NAND_WP_B__RAWNAND_WP_B                        	0x01a4 0x0430 0x0000 0 0
+#define MX6UL_PAD_NAND_WP_B__USDHC1_RESET_B                      	0x01a4 0x0430 0x0000 1 0
+#define MX6UL_PAD_NAND_WP_B__QSPI_A_SCLK                         	0x01a4 0x0430 0x0000 2 0
+#define MX6UL_PAD_NAND_WP_B__PWM4_OUT                            	0x01a4 0x0430 0x0000 3 0
+#define MX6UL_PAD_NAND_WP_B__EIM_BCLK                            	0x01a4 0x0430 0x0000 4 0
+#define MX6UL_PAD_NAND_WP_B__GPIO4_IO11                          	0x01a4 0x0430 0x0000 5 0
+#define MX6UL_PAD_NAND_WP_B__ECSPI3_RDY                          	0x01a4 0x0430 0x0000 8 0
+#define MX6UL_PAD_NAND_READY_B__RAWNAND_READY_B                  	0x01a8 0x0434 0x0000 0 0
+#define MX6UL_PAD_NAND_READY_B__USDHC1_DATA4                     	0x01a8 0x0434 0x0000 1 0
+#define MX6UL_PAD_NAND_READY_B__QSPI_A_DATA00                    	0x01a8 0x0434 0x0000 2 0
+#define MX6UL_PAD_NAND_READY_B__ECSPI3_SS0                       	0x01a8 0x0434 0x0000 3 0
+#define MX6UL_PAD_NAND_READY_B__EIM_CS1_B                        	0x01a8 0x0434 0x0000 4 0
+#define MX6UL_PAD_NAND_READY_B__GPIO4_IO12                       	0x01a8 0x0434 0x0000 5 0
+#define MX6UL_PAD_NAND_READY_B__UART3_DCE_TX                         	0x01a8 0x0434 0x0000 8 0
+#define MX6UL_PAD_NAND_READY_B__UART3_DTE_RX                         	0x01a8 0x0434 0x0634 8 2
+#define MX6UL_PAD_NAND_CE0_B__RAWNAND_CE0_B                      	0x01ac 0x0438 0x0000 0 0
+#define MX6UL_PAD_NAND_CE0_B__USDHC1_DATA5                       	0x01ac 0x0438 0x0000 1 0
+#define MX6UL_PAD_NAND_CE0_B__QSPI_A_DATA01                      	0x01ac 0x0438 0x0000 2 0
+#define MX6UL_PAD_NAND_CE0_B__ECSPI3_SCLK                        	0x01ac 0x0438 0x0554 3 1
+#define MX6UL_PAD_NAND_CE0_B__EIM_DTACK_B                        	0x01ac 0x0438 0x0000 4 0
+#define MX6UL_PAD_NAND_CE0_B__GPIO4_IO13                         	0x01ac 0x0438 0x0000 5 0
+#define MX6UL_PAD_NAND_CE0_B__UART3_DCE_RX                           	0x01ac 0x0438 0x0634 8 3
+#define MX6UL_PAD_NAND_CE0_B__UART3_DTE_TX                           	0x01ac 0x0438 0x0000 8 0
+#define MX6UL_PAD_NAND_CE1_B__RAWNAND_CE1_B                      	0x01b0 0x043c 0x0000 0 0
+#define MX6UL_PAD_NAND_CE1_B__USDHC1_DATA6                       	0x01b0 0x043c 0x0000 1 0
+#define MX6UL_PAD_NAND_CE1_B__QSPI_A_DATA02                      	0x01b0 0x043c 0x0000 2 0
+#define MX6UL_PAD_NAND_CE1_B__ECSPI3_MOSI                        	0x01b0 0x043c 0x055c 3 1
+#define MX6UL_PAD_NAND_CE1_B__EIM_ADDR18                         	0x01b0 0x043c 0x0000 4 0
+#define MX6UL_PAD_NAND_CE1_B__GPIO4_IO14                         	0x01b0 0x043c 0x0000 5 0
+#define MX6UL_PAD_NAND_CE1_B__UART3_DCE_CTS                      	0x01b0 0x043c 0x0000 8 0
+#define MX6UL_PAD_NAND_CE1_B__UART3_DTE_RTS                      	0x01b0 0x043c 0x0630 8 2
+#define MX6UL_PAD_NAND_CLE__RAWNAND_CLE                          	0x01b4 0x0440 0x0000 0 0
+#define MX6UL_PAD_NAND_CLE__USDHC1_DATA7                         	0x01b4 0x0440 0x0000 1 0
+#define MX6UL_PAD_NAND_CLE__QSPI_A_DATA03                        	0x01b4 0x0440 0x0000 2 0
+#define MX6UL_PAD_NAND_CLE__ECSPI3_MISO                          	0x01b4 0x0440 0x0558 3 1
+#define MX6UL_PAD_NAND_CLE__EIM_ADDR16                           	0x01b4 0x0440 0x0000 4 0
+#define MX6UL_PAD_NAND_CLE__GPIO4_IO15                           	0x01b4 0x0440 0x0000 5 0
+#define MX6UL_PAD_NAND_CLE__UART3_DCE_RTS                        	0x01b4 0x0440 0x0630 8 3
+#define MX6UL_PAD_NAND_CLE__UART3_DTE_CTS                        	0x01b4 0x0440 0x0000 8 0
+#define MX6UL_PAD_NAND_DQS__RAWNAND_DQS                          	0x01b8 0x0444 0x0000 0 0
+#define MX6UL_PAD_NAND_DQS__CSI_FIELD                            	0x01b8 0x0444 0x0530 1 1
+#define MX6UL_PAD_NAND_DQS__QSPI_A_SS0_B                         	0x01b8 0x0444 0x0000 2 0
+#define MX6UL_PAD_NAND_DQS__PWM5_OUT                             	0x01b8 0x0444 0x0000 3 0
+#define MX6UL_PAD_NAND_DQS__EIM_WAIT                             	0x01b8 0x0444 0x0000 4 0
+#define MX6UL_PAD_NAND_DQS__GPIO4_IO16                           	0x01b8 0x0444 0x0000 5 0
+#define MX6UL_PAD_NAND_DQS__SDMA_EXT_EVENT01                     	0x01b8 0x0444 0x0000 6 0
+#define MX6UL_PAD_NAND_DQS__SPDIF_EXT_CLK                        	0x01b8 0x0444 0x0000 8 0
+#define MX6UL_PAD_SD1_CMD__USDHC1_CMD                            	0x01bc 0x0448 0x0000 0 0
+#define MX6UL_PAD_SD1_CMD__GPT2_COMPARE1                         	0x01bc 0x0448 0x0000 1 0
+#define MX6UL_PAD_SD1_CMD__SAI2_RX_SYNC                          	0x01bc 0x0448 0x0000 2 0
+#define MX6UL_PAD_SD1_CMD__SPDIF_OUT                             	0x01bc 0x0448 0x0000 3 0
+#define MX6UL_PAD_SD1_CMD__EIM_ADDR19                            	0x01bc 0x0448 0x0000 4 0
+#define MX6UL_PAD_SD1_CMD__GPIO2_IO16                            	0x01bc 0x0448 0x0000 5 0
+#define MX6UL_PAD_SD1_CMD__SDMA_EXT_EVENT00                      	0x01bc 0x0448 0x0000 6 0
+#define MX6UL_PAD_SD1_CMD__USB_OTG1_PWR                          	0x01bc 0x0448 0x0000 8 0
+#define MX6UL_PAD_SD1_CLK__USDHC1_CLK                            	0x01c0 0x044c 0x0000 0 0
+#define MX6UL_PAD_SD1_CLK__GPT2_COMPARE2                         	0x01c0 0x044c 0x0000 1 0
+#define MX6UL_PAD_SD1_CLK__SAI2_MCLK                             	0x01c0 0x044c 0x0000 2 0
+#define MX6UL_PAD_SD1_CLK__SPDIF_IN                              	0x01c0 0x044c 0x0618 3 3
+#define MX6UL_PAD_SD1_CLK__EIM_ADDR20                            	0x01c0 0x044c 0x0000 4 0
+#define MX6UL_PAD_SD1_CLK__GPIO2_IO17                            	0x01c0 0x044c 0x0000 5 0
+#define MX6UL_PAD_SD1_CLK__USB_OTG1_OC                           	0x01c0 0x044c 0x0000 8 0
+#define MX6UL_PAD_SD1_DATA0__USDHC1_DATA0                        	0x01c4 0x0450 0x0000 0 0
+#define MX6UL_PAD_SD1_DATA0__GPT2_COMPARE3                       	0x01c4 0x0450 0x0000 1 0
+#define MX6UL_PAD_SD1_DATA0__SAI2_TX_SYNC                        	0x01c4 0x0450 0x05fc 2 1
+#define MX6UL_PAD_SD1_DATA0__FLEXCAN1_TX                         	0x01c4 0x0450 0x0000 3 0
+#define MX6UL_PAD_SD1_DATA0__EIM_ADDR21                          	0x01c4 0x0450 0x0000 4 0
+#define MX6UL_PAD_SD1_DATA0__GPIO2_IO18                          	0x01c4 0x0450 0x0000 5 0
+#define MX6UL_PAD_SD1_DATA0__ANATOP_OTG1_ID                      	0x01c4 0x0450 0x0000 8 0
+#define MX6UL_PAD_SD1_DATA1__USDHC1_DATA1                        	0x01c8 0x0454 0x0000 0 0
+#define MX6UL_PAD_SD1_DATA1__GPT2_CLK                            	0x01c8 0x0454 0x05a0 1 1
+#define MX6UL_PAD_SD1_DATA1__SAI2_TX_BCLK                        	0x01c8 0x0454 0x05f8 2 1
+#define MX6UL_PAD_SD1_DATA1__FLEXCAN1_RX                         	0x01c8 0x0454 0x0584 3 3
+#define MX6UL_PAD_SD1_DATA1__EIM_ADDR22                          	0x01c8 0x0454 0x0000 4 0
+#define MX6UL_PAD_SD1_DATA1__GPIO2_IO19                          	0x01c8 0x0454 0x0000 5 0
+#define MX6UL_PAD_SD1_DATA1__USB_OTG2_PWR                        	0x01c8 0x0454 0x0000 8 0
+#define MX6UL_PAD_SD1_DATA2__USDHC1_DATA2                        	0x01cc 0x0458 0x0000 0 0
+#define MX6UL_PAD_SD1_DATA2__GPT2_CAPTURE1                       	0x01cc 0x0458 0x0598 1 1
+#define MX6UL_PAD_SD1_DATA2__SAI2_RX_DATA                        	0x01cc 0x0458 0x05f4 2 1
+#define MX6UL_PAD_SD1_DATA2__FLEXCAN2_TX                         	0x01cc 0x0458 0x0000 3 0
+#define MX6UL_PAD_SD1_DATA2__EIM_ADDR23                          	0x01cc 0x0458 0x0000 4 0
+#define MX6UL_PAD_SD1_DATA2__GPIO2_IO20                          	0x01cc 0x0458 0x0000 5 0
+#define MX6UL_PAD_SD1_DATA2__CCM_CLKO1                           	0x01cc 0x0458 0x0000 6 0
+#define MX6UL_PAD_SD1_DATA2__USB_OTG2_OC                         	0x01cc 0x0458 0x0000 8 0
+#define MX6UL_PAD_SD1_DATA3__USDHC1_DATA3                        	0x01d0 0x045c 0x0000 0 0
+#define MX6UL_PAD_SD1_DATA3__GPT2_CAPTURE2                       	0x01d0 0x045c 0x059c 1 1
+#define MX6UL_PAD_SD1_DATA3__SAI2_TX_DATA                        	0x01d0 0x045c 0x0000 2 0
+#define MX6UL_PAD_SD1_DATA3__FLEXCAN2_RX                         	0x01d0 0x045c 0x0588 3 3
+#define MX6UL_PAD_SD1_DATA3__EIM_ADDR24                          	0x01d0 0x045c 0x0000 4 0
+#define MX6UL_PAD_SD1_DATA3__GPIO2_IO21                          	0x01d0 0x045c 0x0000 5 0
+#define MX6UL_PAD_SD1_DATA3__CCM_CLKO2                           	0x01d0 0x045c 0x0000 6 0
+#define MX6UL_PAD_SD1_DATA3__ANATOP_OTG2_ID                      	0x01d0 0x045c 0x0000 8 0
+#define MX6UL_PAD_CSI_MCLK__CSI_MCLK                             	0x01d4 0x0460 0x0000 0 0
+#define MX6UL_PAD_CSI_MCLK__USDHC2_CD_B                          	0x01d4 0x0460 0x0674 1 0
+#define MX6UL_PAD_CSI_MCLK__RAWNAND_CE2_B                        	0x01d4 0x0460 0x0000 2 0
+#define MX6UL_PAD_CSI_MCLK__I2C1_SDA                             	0x01d4 0x0460 0x05a8 3 0
+#define MX6UL_PAD_CSI_MCLK__EIM_CS0_B                            	0x01d4 0x0460 0x0000 4 0
+#define MX6UL_PAD_CSI_MCLK__GPIO4_IO17                           	0x01d4 0x0460 0x0000 5 0
+#define MX6UL_PAD_CSI_MCLK__SNVS_HP_VIO_5_CTL                    	0x01d4 0x0460 0x0000 6 0
+#define MX6UL_PAD_CSI_MCLK__UART6_DCE_TX                             	0x01d4 0x0460 0x0000 8 0
+#define MX6UL_PAD_CSI_MCLK__UART6_DTE_RX                             	0x01d4 0x0460 0x064c 8 0
+#define MX6UL_PAD_CSI_PIXCLK__CSI_PIXCLK                         	0x01d8 0x0464 0x0528 0 1
+#define MX6UL_PAD_CSI_PIXCLK__USDHC2_WP                          	0x01d8 0x0464 0x069c 1 2
+#define MX6UL_PAD_CSI_PIXCLK__RAWNAND_CE3_B                      	0x01d8 0x0464 0x0000 2 0
+#define MX6UL_PAD_CSI_PIXCLK__I2C1_SCL                           	0x01d8 0x0464 0x05a4 3 2
+#define MX6UL_PAD_CSI_PIXCLK__EIM_OE                             	0x01d8 0x0464 0x0000 4 0
+#define MX6UL_PAD_CSI_PIXCLK__GPIO4_IO18                         	0x01d8 0x0464 0x0000 5 0
+#define MX6UL_PAD_CSI_PIXCLK__SNVS_HP_VIO_5                      	0x01d8 0x0464 0x0000 6 0
+#define MX6UL_PAD_CSI_PIXCLK__UART6_DCE_RX                           	0x01d8 0x0464 0x064c 8 3
+#define MX6UL_PAD_CSI_PIXCLK__UART6_DTE_TX                           	0x01d8 0x0464 0x0000 8 0
+#define MX6UL_PAD_CSI_VSYNC__CSI_VSYNC                           	0x01dc 0x0468 0x052c 0 0
+#define MX6UL_PAD_CSI_VSYNC__USDHC2_CLK                          	0x01dc 0x0468 0x0670 1 0
+#define MX6UL_PAD_CSI_VSYNC__SIM1_PORT1_CLK                      	0x01dc 0x0468 0x0000 2 0
+#define MX6UL_PAD_CSI_VSYNC__I2C2_SDA                            	0x01dc 0x0468 0x05b0 3 0
+#define MX6UL_PAD_CSI_VSYNC__EIM_RW                              	0x01dc 0x0468 0x0000 4 0
+#define MX6UL_PAD_CSI_VSYNC__GPIO4_IO19                          	0x01dc 0x0468 0x0000 5 0
+#define MX6UL_PAD_CSI_VSYNC__PWM7_OUT                            	0x01dc 0x0468 0x0000 6 0
+#define MX6UL_PAD_CSI_VSYNC__UART6_DCE_RTS                       	0x01dc 0x0468 0x0648 8 0
+#define MX6UL_PAD_CSI_VSYNC__UART6_DTE_CTS                       	0x01dc 0x0468 0x0000 8 0
+#define MX6UL_PAD_CSI_HSYNC__CSI_HSYNC                           	0x01e0 0x046c 0x0524 0 0
+#define MX6UL_PAD_CSI_HSYNC__USDHC2_CMD                          	0x01e0 0x046c 0x0678 1 0
+#define MX6UL_PAD_CSI_HSYNC__SIM1_PORT1_PD                       	0x01e0 0x046c 0x0000 2 0
+#define MX6UL_PAD_CSI_HSYNC__I2C2_SCL                            	0x01e0 0x046c 0x05ac 3 0
+#define MX6UL_PAD_CSI_HSYNC__EIM_LBA_B                           	0x01e0 0x046c 0x0000 4 0
+#define MX6UL_PAD_CSI_HSYNC__GPIO4_IO20                          	0x01e0 0x046c 0x0000 5 0
+#define MX6UL_PAD_CSI_HSYNC__PWM8_OUT                            	0x01e0 0x046c 0x0000 6 0
+#define MX6UL_PAD_CSI_HSYNC__UART6_DCE_CTS                       	0x01e0 0x046c 0x0000 8 0
+#define MX6UL_PAD_CSI_HSYNC__UART6_DTE_RTS                       	0x01e0 0x046c 0x0648 8 1
+#define MX6UL_PAD_CSI_DATA00__CSI_DATA02                         	0x01e4 0x0470 0x04c4 0 0
+#define MX6UL_PAD_CSI_DATA00__USDHC2_DATA0                       	0x01e4 0x0470 0x067c 1 0
+#define MX6UL_PAD_CSI_DATA00__SIM1_PORT1_RST_B                   	0x01e4 0x0470 0x0000 2 0
+#define MX6UL_PAD_CSI_DATA00__ECSPI2_SCLK                        	0x01e4 0x0470 0x0544 3 0
+#define MX6UL_PAD_CSI_DATA00__EIM_AD00                           	0x01e4 0x0470 0x0000 4 0
+#define MX6UL_PAD_CSI_DATA00__GPIO4_IO21                         	0x01e4 0x0470 0x0000 5 0
+#define MX6UL_PAD_CSI_DATA00__SRC_INT_BOOT                       	0x01e4 0x0470 0x0000 6 0
+#define MX6UL_PAD_CSI_DATA00__UART5_DCE_TX                           	0x01e4 0x0470 0x0000 8 0
+#define MX6UL_PAD_CSI_DATA00__UART5_DTE_RX                           	0x01e4 0x0470 0x0644 8 0
+#define MX6UL_PAD_CSI_DATA01__CSI_DATA03                         	0x01e8 0x0474 0x04c8 0 0
+#define MX6UL_PAD_CSI_DATA01__USDHC2_DATA1                       	0x01e8 0x0474 0x0680 1 0
+#define MX6UL_PAD_CSI_DATA01__SIM1_PORT1_SVEN                    	0x01e8 0x0474 0x0000 2 0
+#define MX6UL_PAD_CSI_DATA01__ECSPI2_SS0                         	0x01e8 0x0474 0x0000 3 0
+#define MX6UL_PAD_CSI_DATA01__EIM_AD01                           	0x01e8 0x0474 0x0000 4 0
+#define MX6UL_PAD_CSI_DATA01__GPIO4_IO22                         	0x01e8 0x0474 0x0000 5 0
+#define MX6UL_PAD_CSI_DATA01__SAI1_MCLK                          	0x01e8 0x0474 0x0000 6 0
+#define MX6UL_PAD_CSI_DATA01__UART5_DCE_RX                           	0x01e8 0x0474 0x0644 8 1
+#define MX6UL_PAD_CSI_DATA01__UART5_DTE_TX                           	0x01e8 0x0474 0x0000 8 0
+#define MX6UL_PAD_CSI_DATA02__CSI_DATA04                         	0x01ec 0x0478 0x04d8 0 1
+#define MX6UL_PAD_CSI_DATA02__USDHC2_DATA2                       	0x01ec 0x0478 0x0684 1 2
+#define MX6UL_PAD_CSI_DATA02__SIM1_PORT1_TRXD                    	0x01ec 0x0478 0x0000 2 0
+#define MX6UL_PAD_CSI_DATA02__ECSPI2_MOSI                        	0x01ec 0x0478 0x054c 3 1
+#define MX6UL_PAD_CSI_DATA02__EIM_AD02                           	0x01ec 0x0478 0x0000 4 0
+#define MX6UL_PAD_CSI_DATA02__GPIO4_IO23                         	0x01ec 0x0478 0x0000 5 0
+#define MX6UL_PAD_CSI_DATA02__SAI1_RX_SYNC                       	0x01ec 0x0478 0x0000 6 0
+#define MX6UL_PAD_CSI_DATA02__UART5_DCE_RTS                      	0x01ec 0x0478 0x0640 8 5
+#define MX6UL_PAD_CSI_DATA02__UART5_DTE_CTS                      	0x01ec 0x0478 0x0000 8 0
+#define MX6UL_PAD_CSI_DATA03__CSI_DATA05                         	0x01f0 0x047c 0x04cc 0 0
+#define MX6UL_PAD_CSI_DATA03__USDHC2_DATA3                       	0x01f0 0x047c 0x0688 1 0
+#define MX6UL_PAD_CSI_DATA03__SIM2_PORT1_PD                      	0x01f0 0x047c 0x0000 2 0
+#define MX6UL_PAD_CSI_DATA03__ECSPI2_MISO                        	0x01f0 0x047c 0x0548 3 0
+#define MX6UL_PAD_CSI_DATA03__EIM_AD03                           	0x01f0 0x047c 0x0000 4 0
+#define MX6UL_PAD_CSI_DATA03__GPIO4_IO24                         	0x01f0 0x047c 0x0000 5 0
+#define MX6UL_PAD_CSI_DATA03__SAI1_RX_BCLK                       	0x01f0 0x047c 0x0000 6 0
+#define MX6UL_PAD_CSI_DATA03__UART5_DCE_CTS                      	0x01f0 0x047c 0x0000 8 0
+#define MX6UL_PAD_CSI_DATA03__UART5_DTE_RTS                      	0x01f0 0x047c 0x0640 8 0
+#define MX6UL_PAD_CSI_DATA04__CSI_DATA06                         	0x01f4 0x0480 0x04dc 0 1
+#define MX6UL_PAD_CSI_DATA04__USDHC2_DATA4                       	0x01f4 0x0480 0x068c 1 2
+#define MX6UL_PAD_CSI_DATA04__SIM2_PORT1_CLK                     	0x01f4 0x0480 0x0000 2 0
+#define MX6UL_PAD_CSI_DATA04__ECSPI1_SCLK                        	0x01f4 0x0480 0x0534 3 1
+#define MX6UL_PAD_CSI_DATA04__EIM_AD04                           	0x01f4 0x0480 0x0000 4 0
+#define MX6UL_PAD_CSI_DATA04__GPIO4_IO25                         	0x01f4 0x0480 0x0000 5 0
+#define MX6UL_PAD_CSI_DATA04__SAI1_TX_SYNC                       	0x01f4 0x0480 0x05ec 6 1
+#define MX6UL_PAD_CSI_DATA04__USDHC1_WP                          	0x01f4 0x0480 0x0000 8 0
+#define MX6UL_PAD_CSI_DATA05__CSI_DATA07                         	0x01f8 0x0484 0x04e0 0 1
+#define MX6UL_PAD_CSI_DATA05__USDHC2_DATA5                       	0x01f8 0x0484 0x0690 1 2
+#define MX6UL_PAD_CSI_DATA05__SIM2_PORT1_RST_B                   	0x01f8 0x0484 0x0000 2 0
+#define MX6UL_PAD_CSI_DATA05__ECSPI1_SS0                         	0x01f8 0x0484 0x0000 3 0
+#define MX6UL_PAD_CSI_DATA05__EIM_AD05                           	0x01f8 0x0484 0x0000 4 0
+#define MX6UL_PAD_CSI_DATA05__GPIO4_IO26                         	0x01f8 0x0484 0x0000 5 0
+#define MX6UL_PAD_CSI_DATA05__SAI1_TX_BCLK                       	0x01f8 0x0484 0x05e8 6 1
+#define MX6UL_PAD_CSI_DATA05__USDHC1_CD_B                        	0x01f8 0x0484 0x0000 8 0
+#define MX6UL_PAD_CSI_DATA06__CSI_DATA08                         	0x01fc 0x0488 0x04e4 0 1
+#define MX6UL_PAD_CSI_DATA06__USDHC2_DATA6                       	0x01fc 0x0488 0x0694 1 2
+#define MX6UL_PAD_CSI_DATA06__SIM2_PORT1_SVEN                    	0x01fc 0x0488 0x0000 2 0
+#define MX6UL_PAD_CSI_DATA06__ECSPI1_MOSI                        	0x01fc 0x0488 0x053c 3 1
+#define MX6UL_PAD_CSI_DATA06__EIM_AD06                           	0x01fc 0x0488 0x0000 4 0
+#define MX6UL_PAD_CSI_DATA06__GPIO4_IO27                         	0x01fc 0x0488 0x0000 5 0
+#define MX6UL_PAD_CSI_DATA06__SAI1_RX_DATA                       	0x01fc 0x0488 0x0000 6 0
+#define MX6UL_PAD_CSI_DATA06__USDHC1_RESET_B                     	0x01fc 0x0488 0x0000 8 0
+#define MX6UL_PAD_CSI_DATA07__CSI_DATA09                         	0x0200 0x048c 0x04e8 0 1
+#define MX6UL_PAD_CSI_DATA07__USDHC2_DATA7                       	0x0200 0x048c 0x0698 1 2
+#define MX6UL_PAD_CSI_DATA07__SIM2_PORT1_TRXD                    	0x0200 0x048c 0x0000 2 0
+#define MX6UL_PAD_CSI_DATA07__ECSPI1_MISO                        	0x0200 0x048c 0x0538 3 1
+#define MX6UL_PAD_CSI_DATA07__EIM_AD07                           	0x0200 0x048c 0x0000 4 0
+#define MX6UL_PAD_CSI_DATA07__GPIO4_IO28                         	0x0200 0x048c 0x0000 5 0
+#define MX6UL_PAD_CSI_DATA07__SAI1_TX_DATA                       	0x0200 0x048c 0x0000 6 0
+#define MX6UL_PAD_CSI_DATA07__USDHC1_VSELECT                     	0x0200 0x048c 0x0000 8 0
+
+#endif /* __DTS_IMX6UL_PINFUNC_H */
diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
new file mode 100644
index 0000000..09edbed
--- /dev/null
+++ b/arch/arm/boot/dts/imx6ul.dtsi
@@ -0,0 +1,707 @@
+/*
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <dt-bindings/clock/imx6ul-clock.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "imx6ul-pinfunc.h"
+#include "skeleton.dtsi"
+
+/ {
+	aliases {
+		ethernet0 = &fec1;
+		ethernet1 = &fec2;
+		gpio0 = &gpio1;
+		gpio1 = &gpio2;
+		gpio2 = &gpio3;
+		gpio3 = &gpio4;
+		gpio4 = &gpio5;
+		i2c0 = &i2c1;
+		i2c1 = &i2c2;
+		i2c2 = &i2c3;
+		i2c3 = &i2c4;
+		mmc0 = &usdhc1;
+		mmc1 = &usdhc2;
+		serial0 = &uart1;
+		serial1 = &uart2;
+		serial2 = &uart3;
+		serial3 = &uart4;
+		serial4 = &uart5;
+		serial5 = &uart6;
+		serial6 = &uart7;
+		serial7 = &uart8;
+		spi0 = &ecspi1;
+		spi1 = &ecspi2;
+		spi2 = &ecspi3;
+		spi3 = &ecspi4;
+		usbphy0 = &usbphy1;
+		usbphy1 = &usbphy2;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			compatible = "arm,cortex-a7";
+			device_type = "cpu";
+			reg = <0>;
+			clock-latency = <61036>; /* two CLK32 periods */
+			operating-points = <
+				/* kHz	uV */
+				528000	1250000
+				396000	1150000
+				198000	1150000
+			>;
+			fsl,soc-operating-points = <
+				/* KHz	uV */
+				528000	1250000
+				396000	1150000
+				198000	1150000
+			>;
+			clocks = <&clks IMX6UL_CLK_ARM>,
+				 <&clks IMX6UL_CLK_PLL2_BUS>,
+				 <&clks IMX6UL_CLK_PLL2_PFD2>,
+				 <&clks IMX6UL_CA7_SECONDARY_SEL>,
+				 <&clks IMX6UL_CLK_STEP>,
+				 <&clks IMX6UL_CLK_PLL1_SW>,
+				 <&clks IMX6UL_CLK_PLL1_SYS>,
+				 <&clks IMX6UL_PLL1_BYPASS>,
+				 <&clks IMX6UL_CLK_PLL1>,
+				 <&clks IMX6UL_PLL1_BYPASS_SRC>,
+				 <&clks IMX6UL_CLK_OSC>;
+			clock-names = "arm", "pll2_bus",  "pll2_pfd2_396m",
+				      "secondary_sel", "step", "pll1_sw",
+				      "pll1_sys", "pll1_bypass", "pll1",
+				      "pll1_bypass_src", "osc";
+			arm-supply = <&reg_arm>;
+			soc-supply = <&reg_soc>;
+		};
+	};
+
+	intc: interrupt-controller@00a01000 {
+		compatible = "arm,cortex-a7-gic";
+		#interrupt-cells = <3>;
+		interrupt-controller;
+		reg = <0x00a01000 0x1000>,
+		      <0x00a02000 0x1000>,
+		      <0x00a04000 0x2000>,
+		      <0x00a06000 0x2000>;
+	};
+
+	ckil: clock-cli {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		clock-output-names = "ckil";
+	};
+
+	osc: clock-osc {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <24000000>;
+		clock-output-names = "osc";
+	};
+
+	ipp_di0: clock-di0 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+		clock-output-names = "ipp_di0";
+	};
+
+	ipp_di1: clock-di1 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <0>;
+		clock-output-names = "ipp_di1";
+	};
+
+	soc {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		compatible = "simple-bus";
+		interrupt-parent = <&gpc>;
+		ranges;
+
+		pmu {
+			compatible = "arm,cortex-a7-pmu";
+			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+		};
+
+		aips1: aips-bus@02000000 {
+			compatible = "fsl,aips-bus", "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x02000000 0x100000>;
+			ranges;
+
+			spba-bus@02000000 {
+				compatible = "fsl,spba-bus", "simple-bus";
+				#address-cells = <1>;
+				#size-cells = <1>;
+				reg = <0x02000000 0x40000>;
+				ranges;
+
+				ecspi1: ecspi@02008000 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					compatible = "fsl,imx6ul-ecspi", "fsl,imx51-ecspi";
+					reg = <0x02008000 0x4000>;
+					interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+					clocks = <&clks IMX6UL_CLK_ECSPI1>,
+						 <&clks IMX6UL_CLK_ECSPI1>;
+					clock-names = "ipg", "per";
+					status = "disabled";
+				};
+
+				ecspi2: ecspi@0200c000 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					compatible = "fsl,imx6ul-ecspi", "fsl,imx51-ecspi";
+					reg = <0x0200c000 0x4000>;
+					interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+					clocks = <&clks IMX6UL_CLK_ECSPI2>,
+						 <&clks IMX6UL_CLK_ECSPI2>;
+					clock-names = "ipg", "per";
+					status = "disabled";
+				};
+
+				ecspi3: ecspi@02010000 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					compatible = "fsl,imx6ul-ecspi", "fsl,imx51-ecspi";
+					reg = <0x02010000 0x4000>;
+					interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+					clocks = <&clks IMX6UL_CLK_ECSPI3>,
+						 <&clks IMX6UL_CLK_ECSPI3>;
+					clock-names = "ipg", "per";
+					status = "disabled";
+				};
+
+				ecspi4: ecspi@02014000 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					compatible = "fsl,imx6ul-ecspi", "fsl,imx51-ecspi";
+					reg = <0x02014000 0x4000>;
+					interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+					clocks = <&clks IMX6UL_CLK_ECSPI4>,
+						 <&clks IMX6UL_CLK_ECSPI4>;
+					clock-names = "ipg", "per";
+					status = "disabled";
+				};
+
+				uart7: serial@02018000 {
+					compatible = "fsl,imx6ul-uart",
+						     "fsl,imx6q-uart";
+					reg = <0x02018000 0x4000>;
+					interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+					clocks = <&clks IMX6UL_CLK_UART7_IPG>,
+						 <&clks IMX6UL_CLK_UART7_SERIAL>;
+					clock-names = "ipg", "per";
+					status = "disabled";
+				};
+
+				uart1: serial@02020000 {
+					compatible = "fsl,imx6ul-uart",
+						     "fsl,imx6q-uart";
+					reg = <0x02020000 0x4000>;
+					interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+					clocks = <&clks IMX6UL_CLK_UART1_IPG>,
+						 <&clks IMX6UL_CLK_UART1_SERIAL>;
+					clock-names = "ipg", "per";
+					status = "disabled";
+				};
+
+				uart8: serial@02024000 {
+					compatible = "fsl,imx6ul-uart",
+						     "fsl,imx6q-uart";
+					reg = <0x02024000 0x4000>;
+					interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+					clocks = <&clks IMX6UL_CLK_UART8_IPG>,
+						 <&clks IMX6UL_CLK_UART8_SERIAL>;
+					clock-names = "ipg", "per";
+					status = "disabled";
+				};
+			};
+
+			gpt1: gpt@02098000 {
+				compatible = "fsl,imx6ul-gpt", "fsl,imx6sx-gpt";
+				reg = <0x02098000 0x4000>;
+				interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_GPT1_BUS>,
+					 <&clks IMX6UL_CLK_GPT1_SERIAL>;
+				clock-names = "ipg", "per";
+			};
+
+			gpio1: gpio@0209c000 {
+				compatible = "fsl,imx6ul-gpio", "fsl,imx35-gpio";
+				reg = <0x0209c000 0x4000>;
+				interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+			};
+
+			gpio2: gpio@020a0000 {
+				compatible = "fsl,imx6ul-gpio", "fsl,imx35-gpio";
+				reg = <0x020a0000 0x4000>;
+				interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+			};
+
+			gpio3: gpio@020a4000 {
+				compatible = "fsl,imx6ul-gpio", "fsl,imx35-gpio";
+				reg = <0x020a4000 0x4000>;
+				interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+			};
+
+			gpio4: gpio@020a8000 {
+				compatible = "fsl,imx6ul-gpio", "fsl,imx35-gpio";
+				reg = <0x020a8000 0x4000>;
+				interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+			};
+
+			gpio5: gpio@020ac000 {
+				compatible = "fsl,imx6ul-gpio", "fsl,imx35-gpio";
+				reg = <0x020ac000 0x4000>;
+				interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+			};
+
+			fec2: ethernet@020b4000 {
+				compatible = "fsl,imx6ul-fec", "fsl,imx6q-fec";
+				reg = <0x020b4000 0x4000>;
+				interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_ENET>,
+					 <&clks IMX6UL_CLK_ENET_AHB>,
+					 <&clks IMX6UL_CLK_ENET_PTP>,
+					 <&clks IMX6UL_CLK_ENET2_REF_125M>,
+					 <&clks IMX6UL_CLK_ENET2_REF_125M>;
+				clock-names = "ipg", "ahb", "ptp",
+					      "enet_clk_ref", "enet_out";
+				fsl,num-tx-queues=<1>;
+				fsl,num-rx-queues=<1>;
+				status = "disabled";
+			};
+
+			wdog1: wdog@020bc000 {
+				compatible = "fsl,imx6ul-wdt", "fsl,imx21-wdt";
+				reg = <0x020bc000 0x4000>;
+				interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_WDOG1>;
+			};
+
+			wdog2: wdog@020c0000 {
+				compatible = "fsl,imx6ul-wdt", "fsl,imx21-wdt";
+				reg = <0x020c0000 0x4000>;
+				interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_WDOG2>;
+				status = "disabled";
+			};
+
+			clks: ccm@020c4000 {
+				compatible = "fsl,imx6ul-ccm";
+				reg = <0x020c4000 0x4000>;
+				interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
+				#clock-cells = <1>;
+				clocks = <&ckil>, <&osc>, <&ipp_di0>, <&ipp_di1>;
+				clock-names = "ckil", "osc", "ipp_di0", "ipp_di1";
+			};
+
+			anatop: anatop@020c8000 {
+				compatible = "fsl,imx6ul-anatop", "fsl,imx6q-anatop",
+					     "syscon", "simple-bus";
+				reg = <0x020c8000 0x1000>;
+				interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
+
+				reg_3p0: regulator-3p0@120 {
+					compatible = "fsl,anatop-regulator";
+					regulator-name = "vdd3p0";
+					regulator-min-microvolt = <2625000>;
+					regulator-max-microvolt = <3400000>;
+					anatop-reg-offset = <0x120>;
+					anatop-vol-bit-shift = <8>;
+					anatop-vol-bit-width = <5>;
+					anatop-min-bit-val = <0>;
+					anatop-min-voltage = <2625000>;
+					anatop-max-voltage = <3400000>;
+					anatop-enable-bit = <0>;
+				};
+
+				reg_arm: regulator-vddcore@140 {
+					compatible = "fsl,anatop-regulator";
+					regulator-name = "cpu";
+					regulator-min-microvolt = <725000>;
+					regulator-max-microvolt = <1450000>;
+					regulator-always-on;
+					anatop-reg-offset = <0x140>;
+					anatop-vol-bit-shift = <0>;
+					anatop-vol-bit-width = <5>;
+					anatop-delay-reg-offset = <0x170>;
+					anatop-delay-bit-shift = <24>;
+					anatop-delay-bit-width = <2>;
+					anatop-min-bit-val = <1>;
+					anatop-min-voltage = <725000>;
+					anatop-max-voltage = <1450000>;
+				};
+
+				reg_soc: regulator-vddsoc@140 {
+					compatible = "fsl,anatop-regulator";
+					regulator-name = "vddsoc";
+					regulator-min-microvolt = <725000>;
+					regulator-max-microvolt = <1450000>;
+					regulator-always-on;
+					anatop-reg-offset = <0x140>;
+					anatop-vol-bit-shift = <18>;
+					anatop-vol-bit-width = <5>;
+					anatop-delay-reg-offset = <0x170>;
+					anatop-delay-bit-shift = <28>;
+					anatop-delay-bit-width = <2>;
+					anatop-min-bit-val = <1>;
+					anatop-min-voltage = <725000>;
+					anatop-max-voltage = <1450000>;
+				};
+			};
+
+			usbphy1: usbphy@020c9000 {
+				compatible = "fsl,imx6ul-usbphy", "fsl,imx23-usbphy";
+				reg = <0x020c9000 0x1000>;
+				interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_USBPHY1>;
+				phy-3p0-supply = <&reg_3p0>;
+				fsl,anatop = <&anatop>;
+			};
+
+			usbphy2: usbphy@020ca000 {
+				compatible = "fsl,imx6ul-usbphy", "fsl,imx23-usbphy";
+				reg = <0x020ca000 0x1000>;
+				interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_USBPHY2>;
+				phy-3p0-supply = <&reg_3p0>;
+				fsl,anatop = <&anatop>;
+			};
+
+			snvs: snvs@020cc000 {
+				compatible = "fsl,sec-v4.0-mon", "syscon", "simple-mfd";
+				reg = <0x020cc000 0x4000>;
+
+				snvs_rtc: snvs-rtc-lp {
+					compatible = "fsl,sec-v4.0-mon-rtc-lp";
+					regmap = <&snvs>;
+					offset = <0x34>;
+					interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+						     <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+				};
+
+				snvs_pwrkey: snvs-powerkey {
+					compatible = "fsl,sec-v4.0-pwrkey";
+					regmap = <&snvs>;
+					interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+					linux,keycode = <KEY_POWER>;
+					wakeup-source;
+				};
+			};
+
+			epit1: epit@020d0000 {
+				reg = <0x020d0000 0x4000>;
+				interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+			};
+
+			epit2: epit@020d4000 {
+				reg = <0x020d4000 0x4000>;
+				interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+			};
+
+			src: src@020d8000 {
+				compatible = "fsl,imx6ul-src", "fsl,imx51-src";
+				reg = <0x020d8000 0x4000>;
+				interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+				#reset-cells = <1>;
+			};
+
+			gpc: gpc@020dc000 {
+				compatible = "fsl,imx6ul-gpc", "fsl,imx6q-gpc";
+				reg = <0x020dc000 0x4000>;
+				interrupt-controller;
+				#interrupt-cells = <3>;
+				interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-parent = <&intc>;
+			};
+
+			iomuxc: iomuxc@020e0000 {
+				compatible = "fsl,imx6ul-iomuxc";
+				reg = <0x020e0000 0x4000>;
+			};
+
+			gpr: iomuxc-gpr@020e4000 {
+				compatible = "fsl,imx6ul-iomuxc-gpr", "syscon";
+				reg = <0x020e4000 0x4000>;
+			};
+
+			gpt2: gpt@020e8000 {
+				compatible = "fsl,imx6ul-gpt", "fsl,imx6sx-gpt";
+				reg = <0x020e8000 0x4000>;
+				interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_DUMMY>,
+					 <&clks IMX6UL_CLK_DUMMY>;
+				clock-names = "ipg", "per";
+			};
+
+			pwm5: pwm@020f0000 {
+				compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
+				reg = <0x020f0000 0x4000>;
+				interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_DUMMY>,
+					 <&clks IMX6UL_CLK_DUMMY>;
+				clock-names = "ipg", "per";
+				#pwm-cells = <2>;
+			};
+
+			pwm6: pwm@020f4000 {
+				compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
+				reg = <0x020f4000 0x4000>;
+				interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_DUMMY>,
+					 <&clks IMX6UL_CLK_DUMMY>;
+				clock-names = "ipg", "per";
+				#pwm-cells = <2>;
+			};
+
+			pwm7: pwm@020f8000 {
+				compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
+				reg = <0x020f8000 0x4000>;
+				interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_DUMMY>,
+					 <&clks IMX6UL_CLK_DUMMY>;
+				clock-names = "ipg", "per";
+				#pwm-cells = <2>;
+			};
+
+			pwm8: pwm@020fc000 {
+				compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
+				reg = <0x020fc000 0x4000>;
+				interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_DUMMY>,
+					 <&clks IMX6UL_CLK_DUMMY>;
+				clock-names = "ipg", "per";
+				#pwm-cells = <2>;
+			};
+		};
+
+		aips2: aips-bus@02100000 {
+			compatible = "fsl,aips-bus", "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			reg = <0x02100000 0x100000>;
+			ranges;
+
+			usbotg1: usb@02184000 {
+				compatible = "fsl,imx6ul-usb", "fsl,imx27-usb";
+				reg = <0x02184000 0x200>;
+				interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_USBOH3>;
+				fsl,usbphy = <&usbphy1>;
+				fsl,usbmisc = <&usbmisc 0>;
+				fsl,anatop = <&anatop>;
+				status = "disabled";
+			};
+
+			usbotg2: usb@02184200 {
+				compatible = "fsl,imx6ul-usb", "fsl,imx27-usb";
+				reg = <0x02184200 0x200>;
+				interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_USBOH3>;
+				fsl,usbphy = <&usbphy2>;
+				fsl,usbmisc = <&usbmisc 1>;
+				status = "disabled";
+			};
+
+			usbmisc: usbmisc@02184800 {
+				#index-cells = <1>;
+				compatible = "fsl,imx6ul-usbmisc", "fsl,imx6q-usbmisc";
+				reg = <0x02184800 0x200>;
+			};
+
+			fec1: ethernet@02188000 {
+				compatible = "fsl,imx6ul-fec", "fsl,imx6q-fec";
+				reg = <0x02188000 0x4000>;
+				interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_ENET>,
+					 <&clks IMX6UL_CLK_ENET_AHB>,
+					 <&clks IMX6UL_CLK_ENET_PTP>,
+					 <&clks IMX6UL_CLK_ENET_REF>,
+					 <&clks IMX6UL_CLK_ENET_REF>;
+				clock-names = "ipg", "ahb", "ptp",
+					      "enet_clk_ref", "enet_out";
+				fsl,num-tx-queues=<1>;
+				fsl,num-rx-queues=<1>;
+				status = "disabled";
+			};
+
+			usdhc1: usdhc@02190000 {
+				compatible = "fsl,imx6ul-usdhc", "fsl,imx6sx-usdhc";
+				reg = <0x02190000 0x4000>;
+				interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_USDHC1>,
+					 <&clks IMX6UL_CLK_USDHC1>,
+					 <&clks IMX6UL_CLK_USDHC1>;
+				clock-names = "ipg", "ahb", "per";
+				bus-width = <4>;
+				status = "disabled";
+			};
+
+			usdhc2: usdhc@02194000 {
+				compatible = "fsl,imx6ul-usdhc", "fsl,imx6sx-usdhc";
+				reg = <0x02194000 0x4000>;
+				interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_USDHC2>,
+					 <&clks IMX6UL_CLK_USDHC2>,
+					 <&clks IMX6UL_CLK_USDHC2>;
+				clock-names = "ipg", "ahb", "per";
+				bus-width = <4>;
+				status = "disabled";
+			};
+
+			i2c1: i2c@021a0000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "fsl,imx6ul-i2c", "fsl,imx21-i2c";
+				reg = <0x021a0000 0x4000>;
+				interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_I2C1>;
+				status = "disabled";
+			};
+
+			i2c2: i2c@021a4000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "fsl,imx6ul-i2c", "fsl,imx21-i2c";
+				reg = <0x021a4000 0x4000>;
+				interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_I2C2>;
+				status = "disabled";
+			};
+
+			i2c3: i2c@021a8000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "fsl,imx6ul-i2c", "fsl,imx21-i2c";
+				reg = <0x021a8000 0x4000>;
+				interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_I2C3>;
+				status = "disabled";
+			};
+
+			qspi: qspi@021e0000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "fsl,imx6ul-qspi", "fsl,imx6sx-qspi";
+				reg = <0x021e0000 0x4000>, <0x60000000 0x10000000>;
+				reg-names = "QuadSPI", "QuadSPI-memory";
+				interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_QSPI>,
+					 <&clks IMX6UL_CLK_QSPI>;
+				clock-names = "qspi_en", "qspi";
+				status = "disabled";
+			};
+
+			uart2: serial@021e8000 {
+				compatible = "fsl,imx6ul-uart",
+					     "fsl,imx6q-uart";
+				reg = <0x021e8000 0x4000>;
+				interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_UART2_IPG>,
+					 <&clks IMX6UL_CLK_UART2_SERIAL>;
+				clock-names = "ipg", "per";
+				status = "disabled";
+			};
+
+			uart3: serial@021ec000 {
+				compatible = "fsl,imx6ul-uart",
+					     "fsl,imx6q-uart";
+				reg = <0x021ec000 0x4000>;
+				interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_UART3_IPG>,
+					 <&clks IMX6UL_CLK_UART3_SERIAL>;
+				clock-names = "ipg", "per";
+				status = "disabled";
+			};
+
+			uart4: serial@021f0000 {
+				compatible = "fsl,imx6ul-uart",
+					     "fsl,imx6q-uart";
+				reg = <0x021f0000 0x4000>;
+				interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_UART4_IPG>,
+					 <&clks IMX6UL_CLK_UART4_SERIAL>;
+				clock-names = "ipg", "per";
+				status = "disabled";
+			};
+
+			uart5: serial@021f4000 {
+				compatible = "fsl,imx6ul-uart",
+					     "fsl,imx6q-uart";
+				reg = <0x021f4000 0x4000>;
+				interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_UART5_IPG>,
+					 <&clks IMX6UL_CLK_UART5_SERIAL>;
+				clock-names = "ipg", "per";
+				status = "disabled";
+			};
+
+			i2c4: i2c@021f8000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "fsl,imx6ul-i2c", "fsl,imx21-i2c";
+				reg = <0x021f8000 0x4000>;
+				interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_I2C4>;
+				status = "disabled";
+			};
+
+			uart6: serial@021fc000 {
+				compatible = "fsl,imx6ul-uart",
+					     "fsl,imx6q-uart";
+				reg = <0x021fc000 0x4000>;
+				interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX6UL_CLK_UART6_IPG>,
+					 <&clks IMX6UL_CLK_UART6_SERIAL>;
+				clock-names = "ipg", "per";
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index c42cf8d..b738ce0 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -121,6 +121,209 @@
 		clock-output-names = "osc";
 	};
 
+	etr@30086000 {
+		compatible = "arm,coresight-tmc", "arm,primecell";
+		reg = <0x30086000 0x1000>;
+		clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+		clock-names = "apb_pclk";
+
+		port {
+			etr_in_port: endpoint {
+				slave-mode;
+				remote-endpoint = <&replicator_out_port1>;
+			};
+		};
+	};
+
+	tpiu@30087000 {
+		compatible = "arm,coresight-tpiu", "arm,primecell";
+		reg = <0x30087000 0x1000>;
+		clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+		clock-names = "apb_pclk";
+
+		port {
+			tpiu_in_port: endpoint {
+				slave-mode;
+				remote-endpoint = <&replicator_out_port1>;
+			};
+		};
+	};
+
+	replicator {
+		/*
+		 * non-configurable replicators don't show up on the
+		 * AMBA bus.  As such no need to add "arm,primecell"
+		 */
+		compatible = "arm,coresight-replicator";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			/* replicator output ports */
+			port@0 {
+				reg = <0>;
+				replicator_out_port0: endpoint {
+					remote-endpoint = <&tpiu_in_port>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				replicator_out_port1: endpoint {
+					remote-endpoint = <&etr_in_port>;
+				};
+			};
+
+			/* replicator input port */
+			port@2 {
+				reg = <0>;
+				replicator_in_port0: endpoint {
+					slave-mode;
+					remote-endpoint = <&etf_out_port>;
+				};
+			};
+		};
+	};
+
+	etf@30084000 {
+		compatible = "arm,coresight-tmc", "arm,primecell";
+		reg = <0x30084000 0x1000>;
+		clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				etf_in_port: endpoint {
+					slave-mode;
+					remote-endpoint = <&hugo_funnel_out_port0>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				etf_out_port: endpoint {
+					remote-endpoint = <&replicator_in_port0>;
+				};
+			};
+		};
+	};
+
+	funnel@30083000 {
+		compatible = "arm,coresight-funnel", "arm,primecell";
+		reg = <0x30083000 0x1000>;
+		clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			/* funnel input ports */
+			port@0 {
+				reg = <0>;
+				hugo_funnel_in_port0: endpoint {
+					slave-mode;
+					remote-endpoint = <&ca_funnel_out_port0>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				hugo_funnel_in_port1: endpoint {
+					slave-mode; /* M4 input */
+				};
+			};
+
+			port@2 {
+				reg = <0>;
+				hugo_funnel_out_port0: endpoint {
+					remote-endpoint = <&etf_in_port>;
+				};
+			};
+
+			/* the other input ports are not connect to anything */
+		};
+	};
+
+	funnel@30041000 {
+		compatible = "arm,coresight-funnel", "arm,primecell";
+		reg = <0x30041000 0x1000>;
+		clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+		clock-names = "apb_pclk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			/* funnel input ports */
+			port@0 {
+				reg = <0>;
+				ca_funnel_in_port0: endpoint {
+					slave-mode;
+					remote-endpoint = <&etm0_out_port>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				ca_funnel_in_port1: endpoint {
+					slave-mode;
+					remote-endpoint = <&etm1_out_port>;
+				};
+			};
+
+			/* funnel output port */
+			port@2 {
+				reg = <0>;
+				ca_funnel_out_port0: endpoint {
+					remote-endpoint = <&hugo_funnel_in_port0>;
+				};
+			};
+
+			/* the other input ports are not connect to anything */
+		};
+	};
+
+	etm@3007c000 {
+		compatible = "arm,coresight-etm3x", "arm,primecell";
+		reg = <0x3007c000 0x1000>;
+		cpu = <&cpu0>;
+		clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+		clock-names = "apb_pclk";
+
+		port {
+			etm0_out_port: endpoint {
+				remote-endpoint = <&ca_funnel_in_port0>;
+			};
+		};
+	};
+
+	etm@3007d000 {
+		compatible = "arm,coresight-etm3x", "arm,primecell";
+		reg = <0x3007d000 0x1000>;
+
+		/*
+		 * System will hang if added nosmp in kernel command line
+		 * without arm,primecell-periphid because amba bus try to
+		 * read id and core1 power off at this time.
+		 */
+		arm,primecell-periphid = <0xbb956>;
+		cpu = <&cpu1>;
+		clocks = <&clks IMX7D_MAIN_AXI_ROOT_CLK>;
+		clock-names = "apb_pclk";
+
+		port {
+			etm1_out_port: endpoint {
+				remote-endpoint = <&ca_funnel_in_port1>;
+			};
+		};
+	};
+
 	soc {
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -212,6 +415,37 @@
 				#interrupt-cells = <2>;
 			};
 
+			wdog1: wdog@30280000 {
+				compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
+				reg = <0x30280000 0x10000>;
+				interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX7D_WDOG1_ROOT_CLK>;
+			};
+
+			wdog2: wdog@30290000 {
+				compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
+				reg = <0x30290000 0x10000>;
+				interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX7D_WDOG2_ROOT_CLK>;
+				status = "disabled";
+			};
+
+			wdog3: wdog@302a0000 {
+				compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
+				reg = <0x302a0000 0x10000>;
+				interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX7D_WDOG3_ROOT_CLK>;
+				status = "disabled";
+			};
+
+			wdog4: wdog@302b0000 {
+				compatible = "fsl,imx7d-wdt", "fsl,imx21-wdt";
+				reg = <0x302b0000 0x10000>;
+				interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks IMX7D_WDOG4_ROOT_CLK>;
+				status = "disabled";
+			};
+
 			gpt1: gpt@302d0000 {
 				compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
 				reg = <0x302d0000 0x10000>;
@@ -291,17 +525,31 @@
 			};
 
 			snvs: snvs@30370000 {
-				compatible = "fsl,sec-v4.0-mon", "simple-bus";
-				#address-cells = <1>;
-				#size-cells = <1>;
-				ranges = <0 0x30370000 0x10000>;
+				compatible = "fsl,sec-v4.0-mon", "syscon", "simple-mfd";
+				reg = <0x30370000 0x10000>;
 
-				snvs-rtc-lp@34 {
+				snvs_rtc: snvs-rtc-lp {
 					compatible = "fsl,sec-v4.0-mon-rtc-lp";
-					reg = <0x34 0x58>;
+					regmap = <&snvs>;
+					offset = <0x34>;
 					interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
 						     <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
 				};
+
+				snvs_poweroff: snvs-poweroff {
+					compatible = "syscon-poweroff";
+					regmap = <&snvs>;
+					offset = <0x38>;
+					mask = <0x60>;
+				};
+
+				snvs_pwrkey: snvs-powerkey {
+					compatible = "fsl,sec-v4.0-pwrkey";
+					regmap = <&snvs>;
+					interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
+					linux,keycode = <KEY_POWER>;
+					wakeup-source;
+				};
 			};
 
 			clks: ccm@30380000 {
diff --git a/arch/arm/boot/dts/kirkwood-d2net.dts b/arch/arm/boot/dts/kirkwood-d2net.dts
index 6b78560..e1c25c3 100644
--- a/arch/arm/boot/dts/kirkwood-d2net.dts
+++ b/arch/arm/boot/dts/kirkwood-d2net.dts
@@ -10,6 +10,7 @@
 
 /dts-v1/;
 
+#include <dt-bindings/leds/leds-ns2.h>
 #include "kirkwood-netxbig.dtsi"
 
 / {
@@ -28,6 +29,10 @@
 			label = "d2net_v2:blue:sata";
 			slow-gpio = <&gpio0 29 GPIO_ACTIVE_HIGH>;
 			cmd-gpio = <&gpio0 30 GPIO_ACTIVE_HIGH>;
+			modes-map = <NS_V2_LED_OFF  1 0
+				     NS_V2_LED_ON   0 1
+				     NS_V2_LED_ON   1 1
+				     NS_V2_LED_SATA 0 0>;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/kirkwood-is2.dts b/arch/arm/boot/dts/kirkwood-is2.dts
index da674bb..4121674 100644
--- a/arch/arm/boot/dts/kirkwood-is2.dts
+++ b/arch/arm/boot/dts/kirkwood-is2.dts
@@ -1,5 +1,6 @@
 /dts-v1/;
 
+#include <dt-bindings/leds/leds-ns2.h>
 #include "kirkwood-ns2-common.dtsi"
 
 / {
@@ -27,6 +28,10 @@
 			label = "ns2:blue:sata";
 			slow-gpio = <&gpio0 29 0>;
 			cmd-gpio = <&gpio0 30 0>;
+			modes-map = <NS_V2_LED_OFF  1 0
+				     NS_V2_LED_ON   0 1
+				     NS_V2_LED_ON   1 1
+				     NS_V2_LED_SATA 0 0>;
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/kirkwood-lswvl.dts b/arch/arm/boot/dts/kirkwood-lswvl.dts
new file mode 100644
index 0000000..09eed3c
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-lswvl.dts
@@ -0,0 +1,301 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-WVL/VL
+ *
+ * Copyright (C) 2015, rogershimizu@gmail.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/dts-v1/;
+
+#include "kirkwood.dtsi"
+#include "kirkwood-6282.dtsi"
+
+/ {
+	model = "Buffalo Linkstation LS-WVL/VL";
+	compatible = "buffalo,lswvl", "buffalo,lsvl", "marvell,kirkwood-88f6282", "marvell,kirkwood";
+
+	memory { /* 256 MB */
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>;
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,115200n8 earlyprintk";
+		stdout-path = &uart0;
+	};
+
+	mbus {
+		pcie-controller {
+			status = "okay";
+			pcie@1,0 {
+				status = "okay";
+			};
+		};
+	};
+
+	ocp@f1000000 {
+		pinctrl: pin-controller@10000 {
+			pmx_power_hdd0: pmx-power-hdd0 {
+				marvell,pins = "mpp8";
+				marvell,function = "gpio";
+			};
+			pmx_power_hdd1: pmx-power-hdd1 {
+				marvell,pins = "mpp9";
+				marvell,function = "gpio";
+			};
+			pmx_usb_vbus: pmx-usb-vbus {
+				marvell,pins = "mpp12";
+				marvell,function = "gpio";
+			};
+			pmx_fan_high: pmx-fan-high {
+				marvell,pins = "mpp16";
+				marvell,function = "gpio";
+			};
+			pmx_fan_low: pmx-fan-low {
+				marvell,pins = "mpp17";
+				marvell,function = "gpio";
+			};
+			pmx_led_hdderr0: pmx-led-hdderr0 {
+				marvell,pins = "mpp34";
+				marvell,function = "gpio";
+			};
+			pmx_led_hdderr1: pmx-led-hdderr1 {
+				marvell,pins = "mpp35";
+				marvell,function = "gpio";
+			};
+			pmx_led_alarm: pmx-led-alarm {
+				marvell,pins = "mpp36";
+				marvell,function = "gpio";
+			};
+			pmx_led_function_red: pmx-led-function-red {
+				marvell,pins = "mpp37";
+				marvell,function = "gpio";
+			};
+			pmx_led_info: pmx-led-info {
+				marvell,pins = "mpp38";
+				marvell,function = "gpio";
+			};
+			pmx_led_function_blue: pmx-led-function-blue {
+				marvell,pins = "mpp39";
+				marvell,function = "gpio";
+			};
+			pmx_led_power: pmx-led-power {
+				marvell,pins = "mpp40";
+				marvell,function = "gpio";
+			};
+			pmx_fan_lock: pmx-fan-lock {
+				marvell,pins = "mpp43";
+				marvell,function = "gpio";
+			};
+			pmx_button_function: pmx-button-function {
+				marvell,pins = "mpp45";
+				marvell,function = "gpio";
+			};
+			pmx_power_switch: pmx-power-switch {
+				marvell,pins = "mpp46";
+				marvell,function = "gpio";
+			};
+			pmx_power_auto_switch: pmx-power-auto-switch {
+				marvell,pins = "mpp47";
+				marvell,function = "gpio";
+			};
+		};
+
+		serial@12000 {
+			status = "okay";
+		};
+
+		sata@80000 {
+			status = "okay";
+			nr-ports = <2>;
+		};
+
+		spi@10600 {
+			status = "okay";
+
+			m25p40@0 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				compatible = "st,m25p40", "jedec,spi-nor";
+				reg = <0>;
+				spi-max-frequency = <25000000>;
+				mode = <0>;
+
+				partition@0 {
+					reg = <0x0 0x60000>;
+					label = "uboot";
+					read-only;
+				};
+
+				partition@60000 {
+					reg = <0x60000 0x10000>;
+					label = "dtb";
+					read-only;
+				};
+
+				partition@70000 {
+					reg = <0x70000 0x10000>;
+					label = "uboot_env";
+				};
+			};
+		};
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-0 = <&pmx_button_function &pmx_power_switch
+			     &pmx_power_auto_switch>;
+		pinctrl-names = "default";
+
+		button@1 {
+			label = "Function Button";
+			linux,code = <KEY_OPTION>;
+			gpios = <&gpio0 45 GPIO_ACTIVE_LOW>;
+		};
+
+		button@2 {
+			label = "Power-on Switch";
+			linux,code = <KEY_RESERVED>;
+			linux,input-type = <5>;
+			gpios = <&gpio0 46 GPIO_ACTIVE_LOW>;
+		};
+
+		button@3 {
+			label = "Power-auto Switch";
+			linux,code = <KEY_ESC>;
+			linux,input-type = <5>;
+			gpios = <&gpio0 47 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	gpio_leds {
+		compatible = "gpio-leds";
+		pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
+			     &pmx_led_info &pmx_led_power
+			     &pmx_led_function_blue
+			     &pmx_led_hdderr0
+			     &pmx_led_hdderr1>;
+		pinctrl-names = "default";
+
+		led@1 {
+			label = "lswvl:red:alarm";
+			gpios = <&gpio0 36 GPIO_ACTIVE_LOW>;
+		};
+
+		led@2 {
+			label = "lswvl:red:func";
+			gpios = <&gpio0 37 GPIO_ACTIVE_LOW>;
+		};
+
+		led@3 {
+			label = "lswvl:amber:info";
+			gpios = <&gpio0 38 GPIO_ACTIVE_LOW>;
+		};
+
+		led@4 {
+			label = "lswvl:blue:func";
+			gpios = <&gpio0 39 GPIO_ACTIVE_LOW>;
+		};
+
+		led@5 {
+			label = "lswvl:blue:power";
+			gpios = <&gpio0 40 GPIO_ACTIVE_LOW>;
+			default-state = "keep";
+		};
+
+		led@6 {
+			label = "lswvl:red:hdderr0";
+			gpios = <&gpio0 34 GPIO_ACTIVE_LOW>;
+		};
+
+		led@7 {
+			label = "lswvl:red:hdderr1";
+			gpios = <&gpio0 35 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	gpio_fan {
+		compatible = "gpio-fan";
+		pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
+		pinctrl-names = "default";
+
+		gpios = <&gpio0 17 GPIO_ACTIVE_LOW
+			 &gpio0 16 GPIO_ACTIVE_LOW>;
+
+		gpio-fan,speed-map = <0 3
+				1500 2
+				3250 1
+				5000 0>;
+
+		alarm-gpios = <&gpio0 43 GPIO_ACTIVE_HIGH>;
+	};
+
+	restart_poweroff {
+		compatible = "restart-poweroff";
+	};
+
+	regulators {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-0 = <&pmx_power_hdd0 &pmx_power_hdd1 &pmx_usb_vbus>;
+		pinctrl-names = "default";
+
+		usb_power: regulator@1 {
+			compatible = "regulator-fixed";
+			reg = <1>;
+			regulator-name = "USB Power";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			enable-active-high;
+			regulator-always-on;
+			regulator-boot-on;
+			gpio = <&gpio0 12 GPIO_ACTIVE_HIGH>;
+		};
+		hdd_power0: regulator@2 {
+			compatible = "regulator-fixed";
+			reg = <2>;
+			regulator-name = "HDD0 Power";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			enable-active-high;
+			regulator-always-on;
+			regulator-boot-on;
+			gpio = <&gpio0 8 GPIO_ACTIVE_HIGH>;
+		};
+		hdd_power1: regulator@3 {
+			compatible = "regulator-fixed";
+			reg = <3>;
+			regulator-name = "HDD1 Power";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			enable-active-high;
+			regulator-always-on;
+			regulator-boot-on;
+			gpio = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+		};
+	};
+};
+
+&mdio {
+	status = "okay";
+
+	ethphy0: ethernet-phy@0 {
+		device_type = "ethernet-phy";
+		reg = <0>;
+	};
+};
+
+&eth0 {
+	status = "okay";
+
+	ethernet0-port@0 {
+		phy-handle = <&ethphy0>;
+	};
+};
diff --git a/arch/arm/boot/dts/kirkwood-lswxl.dts b/arch/arm/boot/dts/kirkwood-lswxl.dts
new file mode 100644
index 0000000..f5db16a
--- /dev/null
+++ b/arch/arm/boot/dts/kirkwood-lswxl.dts
@@ -0,0 +1,301 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-WXL/WSXL
+ *
+ * Copyright (C) 2015, rogershimizu@gmail.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/dts-v1/;
+
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
+
+/ {
+	model = "Buffalo Linkstation LS-WXL/WSXL";
+	compatible = "buffalo,lswxl", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+
+	memory { /* 128 MB */
+		device_type = "memory";
+		reg = <0x00000000 0x8000000>;
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,115200n8 earlyprintk";
+		stdout-path = &uart0;
+	};
+
+	mbus {
+		pcie-controller {
+			status = "okay";
+			pcie@1,0 {
+				status = "okay";
+			};
+		};
+	};
+
+	ocp@f1000000 {
+		pinctrl: pin-controller@10000 {
+			pmx_power_hdd0: pmx-power-hdd0 {
+				marvell,pins = "mpp28";
+				marvell,function = "gpio";
+			};
+			pmx_power_hdd1: pmx-power-hdd1 {
+				marvell,pins = "mpp29";
+				marvell,function = "gpio";
+			};
+			pmx_usb_vbus: pmx-usb-vbus {
+				marvell,pins = "mpp37";
+				marvell,function = "gpio";
+			};
+			pmx_fan_high: pmx-fan-high {
+				marvell,pins = "mpp47";
+				marvell,function = "gpio";
+			};
+			pmx_fan_low: pmx-fan-low {
+				marvell,pins = "mpp48";
+				marvell,function = "gpio";
+			};
+			pmx_led_hdderr0: pmx-led-hdderr0 {
+				marvell,pins = "mpp8";
+				marvell,function = "gpio";
+			};
+			pmx_led_hdderr1: pmx-led-hdderr1 {
+				marvell,pins = "mpp46";
+				marvell,function = "gpio";
+			};
+			pmx_led_alarm: pmx-led-alarm {
+				marvell,pins = "mpp49";
+				marvell,function = "gpio";
+			};
+			pmx_led_function_red: pmx-led-function-red {
+				marvell,pins = "mpp34";
+				marvell,function = "gpio";
+			};
+			pmx_led_function_blue: pmx-led-function-blue {
+				marvell,pins = "mpp36";
+				marvell,function = "gpio";
+			};
+			pmx_led_info: pmx-led-info {
+				marvell,pins = "mpp38";
+				marvell,function = "gpio";
+			};
+			pmx_led_power: pmx-led-power {
+				marvell,pins = "mpp39";
+				marvell,function = "gpio";
+			};
+			pmx_fan_lock: pmx-fan-lock {
+				marvell,pins = "mpp40";
+				marvell,function = "gpio";
+			};
+			pmx_button_function: pmx-button-function {
+				marvell,pins = "mpp41";
+				marvell,function = "gpio";
+			};
+			pmx_power_switch: pmx-power-switch {
+				marvell,pins = "mpp42";
+				marvell,function = "gpio";
+			};
+			pmx_power_auto_switch: pmx-power-auto-switch {
+				marvell,pins = "mpp43";
+				marvell,function = "gpio";
+			};
+		};
+
+		serial@12000 {
+			status = "okay";
+		};
+
+		sata@80000 {
+			status = "okay";
+			nr-ports = <2>;
+		};
+
+		spi@10600 {
+			status = "okay";
+
+			m25p40@0 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				compatible = "st,m25p40", "jedec,spi-nor";
+				reg = <0>;
+				spi-max-frequency = <25000000>;
+				mode = <0>;
+
+				partition@0 {
+					reg = <0x0 0x60000>;
+					label = "uboot";
+					read-only;
+				};
+
+				partition@60000 {
+					reg = <0x60000 0x10000>;
+					label = "dtb";
+					read-only;
+				};
+
+				partition@70000 {
+					reg = <0x70000 0x10000>;
+					label = "uboot_env";
+				};
+			};
+		};
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-0 = <&pmx_button_function &pmx_power_switch
+			     &pmx_power_auto_switch>;
+		pinctrl-names = "default";
+
+		button@1 {
+			label = "Function Button";
+			linux,code = <KEY_OPTION>;
+			gpios = <&gpio1 41 GPIO_ACTIVE_LOW>;
+		};
+
+		button@2 {
+			label = "Power-on Switch";
+			linux,code = <KEY_RESERVED>;
+			linux,input-type = <5>;
+			gpios = <&gpio1 42 GPIO_ACTIVE_LOW>;
+		};
+
+		button@3 {
+			label = "Power-auto Switch";
+			linux,code = <KEY_ESC>;
+			linux,input-type = <5>;
+			gpios = <&gpio1 43 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	gpio_leds {
+		compatible = "gpio-leds";
+		pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
+			     &pmx_led_info &pmx_led_power
+			     &pmx_led_function_blue
+			     &pmx_led_hdderr0
+			     &pmx_led_hdderr1>;
+		pinctrl-names = "default";
+
+		led@1 {
+			label = "lswxl:blue:func";
+			gpios = <&gpio1 36 GPIO_ACTIVE_LOW>;
+		};
+
+		led@2 {
+			label = "lswxl:red:alarm";
+			gpios = <&gpio1 49 GPIO_ACTIVE_LOW>;
+		};
+
+		led@3 {
+			label = "lswxl:amber:info";
+			gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
+		};
+
+		led@4 {
+			label = "lswxl:blue:power";
+			gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
+		};
+
+		led@5 {
+			label = "lswxl:red:func";
+			gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
+			default-state = "keep";
+		};
+
+		led@6 {
+			label = "lswxl:red:hdderr0";
+			gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
+		};
+
+		led@7 {
+			label = "lswxl:red:hdderr1";
+			gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	gpio_fan {
+		compatible = "gpio-fan";
+		pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
+		pinctrl-names = "default";
+
+		gpios = <&gpio0 47 GPIO_ACTIVE_LOW
+			 &gpio0 48 GPIO_ACTIVE_LOW>;
+
+		gpio-fan,speed-map = <0 3
+				1500 2
+				3250 1
+				5000 0>;
+
+		alarm-gpios = <&gpio1 49 GPIO_ACTIVE_HIGH>;
+	};
+
+	restart_poweroff {
+		compatible = "restart-poweroff";
+	};
+
+	regulators {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-0 = <&pmx_power_hdd0 &pmx_power_hdd1 &pmx_usb_vbus>;
+		pinctrl-names = "default";
+
+		usb_power: regulator@1 {
+			compatible = "regulator-fixed";
+			reg = <1>;
+			regulator-name = "USB Power";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			enable-active-high;
+			regulator-always-on;
+			regulator-boot-on;
+			gpio = <&gpio0 37 GPIO_ACTIVE_HIGH>;
+		};
+		hdd_power0: regulator@2 {
+			compatible = "regulator-fixed";
+			reg = <2>;
+			regulator-name = "HDD0 Power";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			enable-active-high;
+			regulator-always-on;
+			regulator-boot-on;
+			gpio = <&gpio0 28 GPIO_ACTIVE_HIGH>;
+		};
+		hdd_power1: regulator@3 {
+			compatible = "regulator-fixed";
+			reg = <3>;
+			regulator-name = "HDD1 Power";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			enable-active-high;
+			regulator-always-on;
+			regulator-boot-on;
+			gpio = <&gpio0 29 GPIO_ACTIVE_HIGH>;
+		};
+	};
+};
+
+&mdio {
+	status = "okay";
+
+	ethphy1: ethernet-phy@8 {
+		device_type = "ethernet-phy";
+		reg = <8>;
+	};
+};
+
+&eth1 {
+	status = "okay";
+
+	ethernet1-port@0 {
+		phy-handle = <&ethphy1>;
+	};
+};
diff --git a/arch/arm/boot/dts/kirkwood-ns2.dts b/arch/arm/boot/dts/kirkwood-ns2.dts
index 53368d1..190189d 100644
--- a/arch/arm/boot/dts/kirkwood-ns2.dts
+++ b/arch/arm/boot/dts/kirkwood-ns2.dts
@@ -1,5 +1,6 @@
 /dts-v1/;
 
+#include <dt-bindings/leds/leds-ns2.h>
 #include "kirkwood-ns2-common.dtsi"
 
 / {
@@ -27,6 +28,10 @@
 			label = "ns2:blue:sata";
 			slow-gpio = <&gpio0 29 0>;
 			cmd-gpio = <&gpio0 30 0>;
+			modes-map = <NS_V2_LED_OFF  1 0
+				     NS_V2_LED_ON   0 1
+				     NS_V2_LED_ON   1 1
+				     NS_V2_LED_SATA 0 0>;
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/kirkwood-ns2max.dts b/arch/arm/boot/dts/kirkwood-ns2max.dts
index 72c78d0..55cc41d 100644
--- a/arch/arm/boot/dts/kirkwood-ns2max.dts
+++ b/arch/arm/boot/dts/kirkwood-ns2max.dts
@@ -1,5 +1,6 @@
 /dts-v1/;
 
+#include <dt-bindings/leds/leds-ns2.h>
 #include "kirkwood-ns2-common.dtsi"
 
 / {
@@ -46,6 +47,10 @@
 			label = "ns2:blue:sata";
 			slow-gpio = <&gpio0 29 0>;
 			cmd-gpio = <&gpio0 30 0>;
+			modes-map = <NS_V2_LED_OFF  1 0
+				     NS_V2_LED_ON   0 1
+				     NS_V2_LED_ON   1 1
+				     NS_V2_LED_SATA 0 0>;
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/kirkwood-ns2mini.dts b/arch/arm/boot/dts/kirkwood-ns2mini.dts
index c441bf6..9935f3e 100644
--- a/arch/arm/boot/dts/kirkwood-ns2mini.dts
+++ b/arch/arm/boot/dts/kirkwood-ns2mini.dts
@@ -1,5 +1,6 @@
 /dts-v1/;
 
+#include <dt-bindings/leds/leds-ns2.h>
 #include "kirkwood-ns2-common.dtsi"
 
 / {
@@ -47,6 +48,10 @@
 			label = "ns2:blue:sata";
 			slow-gpio = <&gpio0 29 0>;
 			cmd-gpio = <&gpio0 30 0>;
+			modes-map = <NS_V2_LED_OFF  1 0
+				     NS_V2_LED_ON   0 1
+				     NS_V2_LED_ON   1 1
+				     NS_V2_LED_SATA 0 0>;
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/lpc18xx.dtsi b/arch/arm/boot/dts/lpc18xx.dtsi
index 204da5b..2c569a6 100644
--- a/arch/arm/boot/dts/lpc18xx.dtsi
+++ b/arch/arm/boot/dts/lpc18xx.dtsi
@@ -13,6 +13,12 @@
 
 #include "armv7-m.dtsi"
 
+#include "dt-bindings/clock/lpc18xx-cgu.h"
+#include "dt-bindings/clock/lpc18xx-ccu.h"
+
+#define LPC_PIN(port, pin)	(0x##port * 32 + pin)
+#define LPC_GPIO(port, pin)	(port * 32 + pin)
+
 / {
 	cpus {
 		#address-cells = <1>;
@@ -22,6 +28,7 @@
 			compatible = "arm,cortex-m3";
 			device_type = "cpu";
 			reg = <0x0>;
+			clocks = <&ccu1 CLK_CPU_CORE>;
 		};
 	};
 
@@ -32,32 +39,173 @@
 			clock-frequency = <12000000>;
 		};
 
-		/* Temporary hardcode PLL1 until clk drivers are merged */
-		pll1: pll1 {
-			compatible = "fixed-factor-clock";
-			clocks = <&xtal>;
+		xtal32: xtal32 {
+			compatible = "fixed-clock";
 			#clock-cells = <0>;
-			clock-div = <1>;
-			clock-mult = <12>;
+			clock-frequency = <32768>;
+		};
+
+		enet_rx_clk: enet_rx_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <0>;
+			clock-output-names = "enet_rx_clk";
+		};
+
+		enet_tx_clk: enet_tx_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <0>;
+			clock-output-names = "enet_tx_clk";
+		};
+
+		gp_clkin: gp_clkin {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <0>;
+			clock-output-names = "gp_clkin";
 		};
 	};
 
 	soc {
+		mmcsd: mmcsd@40004000 {
+			compatible = "snps,dw-mshc";
+			reg = <0x40004000 0x1000>;
+			interrupts = <6>;
+			num-slots = <1>;
+			clocks = <&ccu2 CLK_SDIO>, <&ccu1 CLK_CPU_SDIO>;
+			clock-names = "ciu", "biu";
+			status = "disabled";
+		};
+
+		usb0: ehci@40006100 {
+			compatible = "nxp,lpc1850-ehci", "generic-ehci";
+			reg = <0x40006100 0x100>;
+			interrupts = <8>;
+			clocks = <&ccu1 CLK_CPU_USB0>;
+			phys = <&usb0_otg_phy>;
+			phy-names = "usb";
+			has-transaction-translator;
+			status = "disabled";
+		};
+
+		usb1: ehci@40007100 {
+			compatible = "nxp,lpc1850-ehci", "generic-ehci";
+			reg = <0x40007100 0x100>;
+			interrupts = <9>;
+			clocks = <&ccu1 CLK_CPU_USB1>;
+			status = "disabled";
+		};
+
+		emc: memory-controller@40005000 {
+			compatible = "arm,pl172", "arm,primecell";
+			reg = <0x40005000 0x1000>;
+			clocks = <&ccu1 CLK_CPU_EMCDIV>, <&ccu1 CLK_CPU_EMC>;
+			clock-names = "mpmcclk", "apb_pclk";
+			#address-cells = <2>;
+			#size-cells = <1>;
+			ranges = <0 0 0x1c000000 0x1000000
+				  1 0 0x1d000000 0x1000000
+				  2 0 0x1e000000 0x1000000
+				  3 0 0x1f000000 0x1000000>;
+			status = "disabled";
+		};
+
+		lcdc: lcd-controller@40008000 {
+			compatible = "arm,pl111", "arm,primecell";
+			reg = <0x40008000 0x1000>;
+			interrupts = <7>;
+			interrupt-names = "combined";
+			clocks = <&cgu BASE_LCD_CLK>, <&ccu1 CLK_CPU_LCD>;
+			clock-names = "clcdclk", "apb_pclk";
+			status = "disabled";
+		};
+
+		mac: ethernet@40010000 {
+			compatible = "nxp,lpc1850-dwmac", "snps,dwmac-3.611", "snps,dwmac";
+			reg = <0x40010000 0x2000>;
+			interrupts = <5>;
+			interrupt-names	= "macirq";
+			clocks = <&ccu1 CLK_CPU_ETHERNET>;
+			clock-names = "stmmaceth";
+			status = "disabled";
+		};
+
+		creg: syscon@40043000 {
+			compatible = "nxp,lpc1850-creg", "syscon", "simple-mfd";
+			reg = <0x40043000 0x1000>;
+			clocks = <&ccu1 CLK_CPU_CREG>;
+
+			usb0_otg_phy: phy@004 {
+				compatible = "nxp,lpc1850-usb-otg-phy";
+				clocks = <&ccu1 CLK_USB0>;
+				#phy-cells = <0>;
+			};
+		};
+
+		cgu: clock-controller@40050000 {
+			compatible = "nxp,lpc1850-cgu";
+			reg = <0x40050000 0x1000>;
+			#clock-cells = <1>;
+			clocks = <&xtal>, <&xtal32>, <&enet_rx_clk>, <&enet_tx_clk>, <&gp_clkin>;
+		};
+
+		ccu1: clock-controller@40051000 {
+			compatible = "nxp,lpc1850-ccu";
+			reg = <0x40051000 0x1000>;
+			#clock-cells = <1>;
+			clocks = <&cgu BASE_APB3_CLK>,   <&cgu BASE_APB1_CLK>,
+				 <&cgu BASE_SPIFI_CLK>,  <&cgu BASE_CPU_CLK>,
+				 <&cgu BASE_PERIPH_CLK>, <&cgu BASE_USB0_CLK>,
+				 <&cgu BASE_USB1_CLK>,   <&cgu BASE_SPI_CLK>;
+			clock-names = "base_apb3_clk",   "base_apb1_clk",
+				      "base_spifi_clk",  "base_cpu_clk",
+				      "base_periph_clk", "base_usb0_clk",
+				      "base_usb1_clk",   "base_spi_clk";
+		};
+
+		ccu2: clock-controller@40052000 {
+			compatible = "nxp,lpc1850-ccu";
+			reg = <0x40052000 0x1000>;
+			#clock-cells = <1>;
+			clocks = <&cgu BASE_AUDIO_CLK>, <&cgu BASE_UART3_CLK>,
+				 <&cgu BASE_UART2_CLK>, <&cgu BASE_UART1_CLK>,
+				 <&cgu BASE_UART0_CLK>, <&cgu BASE_SSP1_CLK>,
+				 <&cgu BASE_SSP0_CLK>,  <&cgu BASE_SDIO_CLK>;
+			clock-names = "base_audio_clk", "base_uart3_clk",
+				      "base_uart2_clk", "base_uart1_clk",
+				      "base_uart0_clk", "base_ssp1_clk",
+				      "base_ssp0_clk",  "base_sdio_clk";
+		};
+
 		uart0: serial@40081000 {
-			compatible = "ns16550a";
+			compatible = "nxp,lpc1850-uart", "ns16550a";
 			reg = <0x40081000 0x1000>;
 			reg-shift = <2>;
 			interrupts = <24>;
-			clocks = <&pll1>;
+			clocks = <&ccu2 CLK_APB0_UART0>, <&ccu1 CLK_CPU_UART0>;
+			clock-names = "uartclk", "reg";
 			status = "disabled";
 		};
 
 		uart1: serial@40082000 {
-			compatible = "ns16550a";
+			compatible = "nxp,lpc1850-uart", "ns16550a";
 			reg = <0x40082000 0x1000>;
 			reg-shift = <2>;
 			interrupts = <25>;
-			clocks = <&pll1>;
+			clocks = <&ccu2 CLK_APB0_UART1>, <&ccu1 CLK_CPU_UART1>;
+			clock-names = "uartclk", "reg";
+			status = "disabled";
+		};
+
+		ssp0: spi@40083000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x40083000 0x1000>;
+			interrupts = <22>;
+			clocks = <&ccu2 CLK_APB0_SSP0>, <&ccu1 CLK_CPU_SSP0>;
+			clock-names = "sspclk", "apb_pclk";
+			#address-cells = <1>;
+			#size-cells = <0>;
 			status = "disabled";
 		};
 
@@ -65,7 +213,7 @@
 			compatible = "nxp,lpc3220-timer";
 			reg = <0x40084000 0x1000>;
 			interrupts = <12>;
-			clocks = <&pll1>;
+			clocks = <&ccu1 CLK_CPU_TIMER0>;
 			clock-names = "timerclk";
 		};
 
@@ -73,25 +221,41 @@
 			compatible = "nxp,lpc3220-timer";
 			reg = <0x40085000 0x1000>;
 			interrupts = <13>;
-			clocks = <&pll1>;
+			clocks = <&ccu1 CLK_CPU_TIMER1>;
 			clock-names = "timerclk";
 		};
 
+		pinctrl: pinctrl@40086000 {
+			compatible = "nxp,lpc1850-scu";
+			reg = <0x40086000 0x1000>;
+			clocks = <&ccu1 CLK_CPU_SCU>;
+		};
+
+		can1: can@400a4000 {
+			compatible = "bosch,c_can";
+			reg = <0x400a4000 0x1000>;
+			interrupts = <43>;
+			clocks = <&ccu1 CLK_APB1_CAN1>;
+			status = "disabled";
+		};
+
 		uart2: serial@400c1000 {
-			compatible = "ns16550a";
+			compatible = "nxp,lpc1850-uart", "ns16550a";
 			reg = <0x400c1000 0x1000>;
 			reg-shift = <2>;
 			interrupts = <26>;
-			clocks = <&pll1>;
+			clocks = <&ccu2 CLK_APB2_UART2>, <&ccu1 CLK_CPU_UART2>;
+			clock-names = "uartclk", "reg";
 			status = "disabled";
 		};
 
 		uart3: serial@400c2000 {
-			compatible = "ns16550a";
+			compatible = "nxp,lpc1850-uart", "ns16550a";
 			reg = <0x400c2000 0x1000>;
 			reg-shift = <2>;
 			interrupts = <27>;
-			clocks = <&pll1>;
+			clocks = <&ccu2 CLK_APB2_UART3>, <&ccu1 CLK_CPU_UART3>;
+			clock-names = "uartclk", "reg";
 			status = "disabled";
 		};
 
@@ -99,7 +263,7 @@
 			compatible = "nxp,lpc3220-timer";
 			reg = <0x400c3000 0x1000>;
 			interrupts = <14>;
-			clocks = <&pll1>;
+			clocks = <&ccu1 CLK_CPU_TIMER2>;
 			clock-names = "timerclk";
 		};
 
@@ -107,8 +271,75 @@
 			compatible = "nxp,lpc3220-timer";
 			reg = <0x400c4000 0x1000>;
 			interrupts = <15>;
-			clocks = <&pll1>;
+			clocks = <&ccu1 CLK_CPU_TIMER3>;
 			clock-names = "timerclk";
 		};
+
+		ssp1: spi@400c5000 {
+			compatible = "arm,pl022", "arm,primecell";
+			reg = <0x400c5000 0x1000>;
+			interrupts = <23>;
+			clocks = <&ccu2 CLK_APB2_SSP1>, <&ccu1 CLK_CPU_SSP1>;
+			clock-names = "sspclk", "apb_pclk";
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		can0: can@400e2000 {
+			compatible = "bosch,c_can";
+			reg = <0x400e2000 0x1000>;
+			interrupts = <51>;
+			clocks = <&ccu1 CLK_APB3_CAN0>;
+			status = "disabled";
+		};
+
+		gpio: gpio@400f4000 {
+			compatible = "nxp,lpc1850-gpio";
+			reg = <0x400f4000 0x4000>;
+			clocks = <&ccu1 CLK_CPU_GPIO>;
+			gpio-controller;
+			#gpio-cells = <2>;
+			gpio-ranges =	<&pinctrl LPC_GPIO(0,0)  LPC_PIN(0,0)  2>,
+					<&pinctrl LPC_GPIO(0,4)  LPC_PIN(1,0)  1>,
+					<&pinctrl LPC_GPIO(0,8)  LPC_PIN(1,1)  4>,
+					<&pinctrl LPC_GPIO(1,8)  LPC_PIN(1,5)  2>,
+					<&pinctrl LPC_GPIO(1,0)  LPC_PIN(1,7)  8>,
+					<&pinctrl LPC_GPIO(0,2)  LPC_PIN(1,15) 2>,
+					<&pinctrl LPC_GPIO(0,12) LPC_PIN(1,17) 2>,
+					<&pinctrl LPC_GPIO(0,15) LPC_PIN(1,20) 1>,
+					<&pinctrl LPC_GPIO(5,0)  LPC_PIN(2,0)  7>,
+					<&pinctrl LPC_GPIO(0,7)  LPC_PIN(2,7)  1>,
+					<&pinctrl LPC_GPIO(5,7)  LPC_PIN(2,8)  1>,
+					<&pinctrl LPC_GPIO(1,10) LPC_PIN(2,9)  1>,
+					<&pinctrl LPC_GPIO(0,14) LPC_PIN(2,10) 1>,
+					<&pinctrl LPC_GPIO(1,11) LPC_PIN(2,11) 3>,
+					<&pinctrl LPC_GPIO(5,8)  LPC_PIN(3,1)  2>,
+					<&pinctrl LPC_GPIO(1,14) LPC_PIN(3,4)  2>,
+					<&pinctrl LPC_GPIO(0,6)  LPC_PIN(3,6)  1>,
+					<&pinctrl LPC_GPIO(5,10) LPC_PIN(3,7)  2>,
+					<&pinctrl LPC_GPIO(2,0)  LPC_PIN(4,0)  7>,
+					<&pinctrl LPC_GPIO(5,12) LPC_PIN(4,8)  3>,
+					<&pinctrl LPC_GPIO(2,9)  LPC_PIN(5,0)  7>,
+					<&pinctrl LPC_GPIO(2,7)  LPC_PIN(5,7)  1>,
+					<&pinctrl LPC_GPIO(3,0)  LPC_PIN(6,1)  5>,
+					<&pinctrl LPC_GPIO(0,5)  LPC_PIN(6,6)  1>,
+					<&pinctrl LPC_GPIO(5,15) LPC_PIN(6,7)  2>,
+					<&pinctrl LPC_GPIO(3,5)  LPC_PIN(6,9)  3>,
+					<&pinctrl LPC_GPIO(2,8)  LPC_PIN(6,12) 1>,
+					<&pinctrl LPC_GPIO(3,8)  LPC_PIN(7,0)  8>,
+					<&pinctrl LPC_GPIO(4,0)  LPC_PIN(8,0)  8>,
+					<&pinctrl LPC_GPIO(4,12) LPC_PIN(9,0)  4>,
+					<&pinctrl LPC_GPIO(5,17) LPC_PIN(9,4)  2>,
+					<&pinctrl LPC_GPIO(4,11) LPC_PIN(9,6)  1>,
+					<&pinctrl LPC_GPIO(4,8)  LPC_PIN(a,1)  3>,
+					<&pinctrl LPC_GPIO(5,19) LPC_PIN(a,4)  1>,
+					<&pinctrl LPC_GPIO(5,20) LPC_PIN(b,0)  7>,
+					<&pinctrl LPC_GPIO(6,0)  LPC_PIN(c,1) 14>,
+					<&pinctrl LPC_GPIO(6,14) LPC_PIN(d,0) 17>,
+					<&pinctrl LPC_GPIO(7,0)  LPC_PIN(e,0) 16>,
+					<&pinctrl LPC_GPIO(7,16) LPC_PIN(f,1)  3>,
+					<&pinctrl LPC_GPIO(7,19) LPC_PIN(f,5)  7>;
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/lpc4337-ciaa.dts b/arch/arm/boot/dts/lpc4337-ciaa.dts
new file mode 100644
index 0000000..5f500c1
--- /dev/null
+++ b/arch/arm/boot/dts/lpc4337-ciaa.dts
@@ -0,0 +1,187 @@
+/*
+ * CIAA NXP LPC4337 (http://www.proyecto-ciaa.com.ar)
+ *
+ * Copyright (C) 2015 VanguardiaSur - www.vanguardiasur.com.ar
+ *
+ * This code is released using a dual license strategy: BSD/GPL
+ * You can choose the licence that better fits your requirements.
+ *
+ * Released under the terms of 3-clause BSD License
+ * Released under the terms of GNU General Public License Version 2.0
+ */
+/dts-v1/;
+
+#include "lpc18xx.dtsi"
+#include "lpc4357.dtsi"
+
+#include "dt-bindings/gpio/gpio.h"
+
+/ {
+	model = "CIAA NXP LPC4337";
+	compatible = "ciaa,lpc4337", "nxp,lpc4337", "nxp,lpc4350";
+
+	aliases {
+		serial0 = &uart2;
+		serial1 = &uart3;
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+		stdout-path = &uart2;
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x28000000 0x0800000>; /* 8 MB */
+	};
+};
+
+&pinctrl {
+	enet_rmii_pins: enet-rmii-pins {
+		enet_rmii_rxd_cfg {
+			pins = "p1_15", "p0_0";
+			function = "enet";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		enet_rmii_txd_cfg {
+			pins = "p1_18", "p1_20";
+			function = "enet";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		enet_rmii_rx_dv_cfg {
+			pins = "p1_16";
+			function = "enet";
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		enet_rmii_tx_en_cfg {
+			pins = "p0_1";
+			function = "enet";
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		enet_ref_clk_cfg {
+			pins = "p1_19";
+			function = "enet";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		enet_mdio_cfg {
+			pins = "p1_17";
+			function = "enet";
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		enet_mdc_cfg {
+			pins = "p7_7";
+			function = "enet";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+	};
+
+	ssp_pins: ssp-pins {
+		ssp1_cs {
+			pins = "p6_7";
+			function = "gpio";
+			bias-pull-up;
+			bias-disable;
+		};
+
+		ssp1_miso_mosi {
+			pins = "p1_3", "p1_4";
+			function = "ssp1";
+			slew-rate = <1>;
+			bias-pull-down;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		ssp1_sck {
+			pins = "pf_4";
+			function = "ssp1";
+			slew-rate = <1>;
+			bias-disable;
+		};
+	};
+
+	uart2_pins: uart2-pins {
+		uart2_rx_cfg {
+			pins = "p7_2";
+			function = "uart2";
+			bias-disable;
+			input-enable;
+		};
+
+		uart2_tx_cfg {
+			pins = "p7_1";
+			function = "uart2";
+			bias-disable;
+		};
+	};
+
+	uart3_pins: uart3-pins {
+		uart3_rx_cfg {
+			pins = "p2_4";
+			function = "uart3";
+			bias-disable;
+			input-enable;
+		};
+
+		uart3_tx_cfg {
+			pins = "p2_3";
+			function = "uart3";
+			bias-disable;
+		};
+	};
+};
+
+&enet_tx_clk {
+	clock-frequency = <50000000>;
+};
+
+&mac {
+	status = "okay";
+	phy-mode = "rmii";
+	pinctrl-names = "default";
+	pinctrl-0 = <&enet_rmii_pins>;
+};
+
+&ssp1 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&ssp_pins>;
+	cs-gpios = <&gpio LPC_GPIO(5,15) GPIO_ACTIVE_HIGH>;
+	num-cs = <1>;
+};
+
+&uart2 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart2_pins>;
+};
+
+&uart3 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart3_pins>;
+};
diff --git a/arch/arm/boot/dts/lpc4350-hitex-eval.dts b/arch/arm/boot/dts/lpc4350-hitex-eval.dts
index d04072f..32bc7ff 100644
--- a/arch/arm/boot/dts/lpc4350-hitex-eval.dts
+++ b/arch/arm/boot/dts/lpc4350-hitex-eval.dts
@@ -36,10 +36,250 @@
 	};
 };
 
-&pll1 {
-	clock-mult = <15>;
+&pinctrl {
+	emc_pins: emc-pins {
+		emc_addr0_23_cfg {
+			pins =	"p2_9",  "p2_10", "p2_11", "p2_12",
+				"p2_13", "p1_0",  "p1_1",  "p1_2",
+				"p2_8",  "p2_7",  "p2_6",  "p2_2",
+				"p2_1",  "p2_0",  "p6_8",  "p6_7",
+				"pd_16", "pd_15", "pe_0",  "pe_1",
+				"pe_2",  "pe_3",  "pe_4",  "pa_4";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_data0_15_cfg {
+			pins =	"p1_7",  "p1_8",  "p1_9",  "p1_10",
+				"p1_11", "p1_12", "p1_13", "p1_14",
+				"p5_4",  "p5_5",  "p5_6",  "p5_7",
+				"p5_0",  "p5_1",  "p5_2",  "p5_3";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_we_oe_cfg {
+			pins = "p1_6", "p1_3";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_bls0_3_cfg {
+			pins = "p1_4", "p6_6", "pd_13", "pd_10";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_cs0_cs2_cfg {
+			pins = "p1_5", "pd_12";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_sdram_dqm0_3_cfg {
+			pins = "p6_12", "p6_10", "pd_0", "pe_13";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_sdram_ras_cas_cfg {
+			pins = "p6_5", "p6_4";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_sdram_dycs0_cfg {
+			pins = "p6_9";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_sdram_cke_cfg {
+			pins = "p6_11";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_sdram_clock_cfg {
+			pins = "clk0", "clk1", "clk2", "clk3";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+	};
+
+	enet_mii_pins: enet-mii-pins {
+		enet_mii_rxd0_3_cfg {
+			pins = "p1_15", "p0_0", "p9_3", "p9_2";
+			function = "enet";
+			bias-disable;
+			input-enable;
+		};
+
+		enet_mii_txd0_3_cfg {
+			pins = "p1_18", "p1_20", "p9_4", "p9_5";
+			function = "enet";
+			bias-disable;
+		};
+
+		enet_mii_crs_col_cfg {
+			pins = "p9_0", "p9_6";
+			function = "enet";
+			bias-disable;
+			input-enable;
+		};
+
+		enet_mii_rx_clk_dv_er_cfg {
+			pins = "pc_0", "p1_16", "p9_1";
+			function = "enet";
+			bias-disable;
+			input-enable;
+		};
+
+		enet_mii_tx_clk_en_cfg {
+			pins = "p1_19", "p0_1";
+			function = "enet";
+			bias-disable;
+			input-enable;
+		};
+
+		enet_mdio_cfg {
+			pins = "p1_17";
+			function = "enet";
+			bias-disable;
+			input-enable;
+		};
+
+		enet_mdc_cfg {
+			pins = "pc_1";
+			function = "enet";
+			bias-disable;
+		};
+	};
+
+	uart0_pins: uart0-pins {
+		uart0_rx_cfg {
+			pins = "pf_11";
+			function = "uart0";
+			input-schmitt-disable;
+			bias-disable;
+			input-enable;
+		};
+
+		uart0_tx_cfg {
+			pins = "pf_10";
+			function = "uart0";
+			bias-pull-down;
+		};
+	};
+};
+
+&emc {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&emc_pins>;
+
+	cs0 {
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges;
+
+		mpmc,cs = <0>;
+		mpmc,memory-width = <16>;
+		mpmc,byte-lane-low;
+		mpmc,write-enable-delay = <0>;
+		mpmc,output-enable-delay = <0>;
+		mpmc,read-access-delay = <70>;
+		mpmc,page-mode-read-delay = <70>;
+
+		flash@0,0 {
+			compatible = "sst,sst39vf320", "cfi-flash";
+			reg = <0 0 0x400000>;
+			bank-width = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			partition@0 {
+				label = "bootloader";
+				reg = <0x000000 0x040000>; /* 256 KiB */
+			};
+
+			partition@1 {
+				label = "kernel";
+				reg = <0x040000 0x2C0000>; /* 2.75 MiB */
+			};
+
+			partition@2 {
+				label = "rootfs";
+				reg = <0x300000 0x100000>; /* 1 MiB */
+			};
+		};
+	};
+
+	cs2 {
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges;
+
+		mpmc,cs = <2>;
+		mpmc,memory-width = <16>;
+		mpmc,byte-lane-low;
+		mpmc,write-enable-delay = <0>;
+		mpmc,output-enable-delay = <30>;
+		mpmc,read-access-delay = <90>;
+		mpmc,page-mode-read-delay = <55>;
+		mpmc,write-access-delay = <55>;
+		mpmc,turn-round-delay = <55>;
+
+		ext_sram: sram@2,0 {
+			compatible = "mmio-sram";
+			reg = <2 0 0x80000>; /* 512 KiB SRAM on IS62WV25616 */
+		};
+	};
+};
+
+&enet_tx_clk {
+	clock-frequency = <25000000>;
+};
+
+&mac {
+	status = "okay";
+	phy-mode = "mii";
+	pinctrl-names = "default";
+	pinctrl-0 = <&enet_mii_pins>;
 };
 
 &uart0 {
 	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins>;
 };
diff --git a/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts b/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts
index 08a6f75..5f7bdad 100644
--- a/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts
+++ b/arch/arm/boot/dts/lpc4357-ea4357-devkit.dts
@@ -15,6 +15,9 @@
 #include "lpc18xx.dtsi"
 #include "lpc4357.dtsi"
 
+#include "dt-bindings/input/input.h"
+#include "dt-bindings/gpio/gpio.h"
+
 / {
 	model = "Embedded Artists' LPC4357 Developer's Kit";
 	compatible = "ea,lpc4357-developers-kit", "nxp,lpc4357", "nxp,lpc4350";
@@ -34,8 +37,472 @@
 		device_type = "memory";
 		reg = <0x28000000 0x2000000>; /* 32 MB */
 	};
+
+	/* vmmc is controlled by sdmmc host internally */
+	vmmc: vmmc_fixed {
+		compatible = "regulator-fixed";
+		regulator-name = "vmmc-supply";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	gpio_joystick {
+		compatible = "gpio-keys-polled";
+		pinctrl-names = "default";
+		pinctrl-0 = <&gpio_joystick_pins>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		poll-interval = <100>;
+		autorepeat;
+
+		button@0 {
+			label = "joy_enter";
+			linux,code = <KEY_ENTER>;
+			gpios = <&gpio LPC_GPIO(4,8) GPIO_ACTIVE_LOW>;
+		};
+
+		button@1 {
+			label = "joy_left";
+			linux,code = <KEY_LEFT>;
+			gpios = <&gpio LPC_GPIO(4,9) GPIO_ACTIVE_LOW>;
+		};
+
+		button@2 {
+			label = "joy_up";
+			linux,code = <KEY_UP>;
+			gpios = <&gpio LPC_GPIO(4,10) GPIO_ACTIVE_LOW>;
+		};
+
+		button@3 {
+			label = "joy_right";
+			linux,code = <KEY_RIGHT>;
+			gpios = <&gpio LPC_GPIO(4,12) GPIO_ACTIVE_LOW>;
+		};
+
+		button@4 {
+			label = "joy_down";
+			linux,code = <KEY_DOWN>;
+			gpios = <&gpio LPC_GPIO(4,13) GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	leds_mmio {
+		compatible = "gpio-leds";
+
+		led1 {
+			gpios = <&mmio_leds 15 GPIO_ACTIVE_HIGH>;
+			linux,default-trigger = "heartbeat";
+		};
+
+		led2 {
+			gpios = <&mmio_leds 14 GPIO_ACTIVE_HIGH>;
+		};
+
+		led3 {
+			gpios = <&mmio_leds 13 GPIO_ACTIVE_HIGH>;
+		};
+
+		led4 {
+			gpios = <&mmio_leds 12 GPIO_ACTIVE_HIGH>;
+		};
+
+		led5 {
+			gpios = <&mmio_leds 11 GPIO_ACTIVE_HIGH>;
+		};
+
+		led6 {
+			gpios = <&mmio_leds 10 GPIO_ACTIVE_HIGH>;
+		};
+
+		led7 {
+			gpios = <&mmio_leds 9 GPIO_ACTIVE_HIGH>;
+		};
+
+		led8 {
+			gpios = <&mmio_leds 8 GPIO_ACTIVE_HIGH>;
+		};
+
+		led9 {
+			gpios = <&mmio_leds 7 GPIO_ACTIVE_HIGH>;
+		};
+
+		led10 {
+			gpios = <&mmio_leds 6 GPIO_ACTIVE_HIGH>;
+		};
+
+		led11 {
+			gpios = <&mmio_leds 5 GPIO_ACTIVE_HIGH>;
+		};
+
+		led12 {
+			gpios = <&mmio_leds 4 GPIO_ACTIVE_HIGH>;
+		};
+
+		led13 {
+			gpios = <&mmio_leds 3 GPIO_ACTIVE_HIGH>;
+		};
+
+		led14 {
+			gpios = <&mmio_leds 2 GPIO_ACTIVE_HIGH>;
+		};
+
+		led15 {
+			gpios = <&mmio_leds 1 GPIO_ACTIVE_HIGH>;
+		};
+
+		led16 {
+			gpios = <&mmio_leds 0 GPIO_ACTIVE_HIGH>;
+		};
+	};
+};
+
+&pinctrl {
+	emc_pins: emc-pins {
+		emc_addr0_23_cfg {
+			pins =	"p2_9",  "p2_10", "p2_11", "p2_12",
+				"p2_13", "p1_0",  "p1_1",  "p1_2",
+				"p2_8",  "p2_7",  "p2_6",  "p2_2",
+				"p2_1",  "p2_0",  "p6_8",  "p6_7",
+				"pd_16", "pd_15", "pe_0",  "pe_1",
+				"pe_2",  "pe_3",  "pe_4",  "pa_4";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_data0_31_cfg {
+			pins =	"p1_7",  "p1_8",  "p1_9",  "p1_10",
+				"p1_11", "p1_12", "p1_13", "p1_14",
+				"p5_4",  "p5_5",  "p5_6",  "p5_7",
+				"p5_0",  "p5_1",  "p5_2",  "p5_3",
+				"pd_2",  "pd_3",  "pd_4",  "pd_5",
+				"pd_6",  "pd_7",  "pd_8",  "pd_9",
+				"pe_5",  "pe_6",  "pe_7",  "pe_8",
+				"pe_9",  "pe_10", "pe_11", "pe_12";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_we_oe_cfg {
+			pins = "p1_6", "p1_3";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_bls0_3_cfg {
+			pins = "p1_4", "p6_6", "pd_13", "pd_10";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_cs0_3_cfg {
+			pins = "p1_5", "p6_3", "pd_12", "pd_11";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_sdram_dqm0_3_cfg {
+			pins = "p6_12", "p6_10", "pd_0", "pe_13";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_sdram_ras_cas_cfg {
+			pins = "p6_5", "p6_4";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_sdram_dycs0_cfg {
+			pins = "p6_9";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_sdram_cke_cfg {
+			pins = "p6_11";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		emc_sdram_clock_cfg {
+			pins = "clk0", "clk1", "clk2", "clk3";
+			function = "emc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+	};
+
+	enet_rmii_pins: enet-rmii-pins {
+		enet_rmii_rxd_cfg {
+			pins = "p1_15", "p0_0";
+			function = "enet";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		enet_rmii_txd_cfg {
+			pins = "p1_18", "p1_20";
+			function = "enet";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		enet_rmii_rx_dv_cfg {
+			pins = "p1_16";
+			function = "enet";
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		enet_rmii_tx_en_cfg {
+			pins = "p0_1";
+			function = "enet";
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		enet_ref_clk_cfg {
+			pins = "p1_19";
+			function = "enet";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		enet_mdio_cfg {
+			pins = "p1_17";
+			function = "enet";
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		enet_mdc_cfg {
+			pins = "pc_1";
+			function = "enet";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+	};
+
+	gpio_joystick_pins: gpio-joystick-pins {
+		gpio_joystick_cfg {
+			pins =	"p9_0", "p9_1", "pa_1", "pa_2", "pa_3";
+			function = "gpio";
+			input-enable;
+			bias-disable;
+		};
+	};
+
+	sdmmc_pins: sdmmc-pins {
+		sdmmc_clk_cfg {
+			pins = "pc_0";
+			function = "sdmmc";
+			slew-rate = <1>;
+			bias-pull-down;
+		};
+
+		sdmmc_cmd_dat0_3_cfg {
+			pins = "pc_4", "pc_5", "pc_6", "pc_7", "pc_10";
+			function = "sdmmc";
+			slew-rate = <1>;
+			bias-disable;
+			input-enable;
+			input-schmitt-disable;
+		};
+
+		sdmmc_cd_cfg {
+			pins = "pc_8";
+			function = "sdmmc";
+			bias-pull-down;
+			input-enable;
+		};
+
+		sdmmc_pow_cfg {
+			pins = "pc_9";
+			function = "sdmmc";
+			bias-pull-down;
+		};
+	};
+
+	uart0_pins: uart0-pins {
+		uart0_rx_cfg {
+			pins = "pf_11";
+			function = "uart0";
+			input-schmitt-disable;
+			bias-disable;
+			input-enable;
+		};
+
+		uart0_tx_cfg {
+			pins = "pf_10";
+			function = "uart0";
+			bias-pull-down;
+		};
+	};
+
+	uart3_pins: uart3-pins {
+		uart3_rx_cfg {
+			pins = "p2_4";
+			function = "uart3";
+			input-schmitt-disable;
+			bias-disable;
+			input-enable;
+		};
+
+		uart3_tx_cfg {
+			pins = "p9_3";
+			function = "uart3";
+			bias-pull-down;
+		};
+	};
+
+	usb0_pins: usb0-pins {
+		usb0_pwr_enable {
+			pins = "p2_3";
+			function = "usb0";
+		};
+
+		usb0_pwr_fault {
+			pins = "p8_0";
+			function = "usb0";
+			bias-disable;
+			input-enable;
+		};
+	};
+};
+
+&emc {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&emc_pins>;
+
+	cs0 {
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges;
+
+		mpmc,cs = <0>;
+		mpmc,memory-width = <16>;
+		mpmc,byte-lane-low;
+		mpmc,write-enable-delay = <0>;
+		mpmc,output-enable-delay = <0>;
+		mpmc,read-access-delay = <70>;
+		mpmc,page-mode-read-delay = <70>;
+
+		flash@0,0 {
+			compatible = "sst,sst39vf320", "cfi-flash";
+			reg = <0 0 0x400000>;
+			bank-width = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			partition@0 {
+				label = "bootloader";
+				reg = <0x000000 0x040000>; /* 256 KiB */
+			};
+
+			partition@1 {
+				label = "kernel";
+				reg = <0x040000 0x2c0000>; /* 2.75 MiB */
+			};
+
+			partition@2 {
+				label = "rootfs";
+				reg = <0x300000 0x100000>; /* 1 MiB */
+			};
+		};
+	};
+
+	cs2 {
+		#address-cells = <2>;
+		#size-cells = <1>;
+		ranges;
+
+		mpmc,cs = <2>;
+		mpmc,memory-width = <16>;
+
+		mmio_leds: gpio@2,0 {
+			compatible = "ti,7416374";
+			reg = <2 0 0x2>;
+			gpio-controller;
+			#gpio-cells = <2>;
+		};
+
+	};
+};
+
+&enet_tx_clk {
+	clock-frequency = <50000000>;
+};
+
+&mac {
+	status = "okay";
+	phy-mode = "rmii";
+	pinctrl-names = "default";
+	pinctrl-0 = <&enet_rmii_pins>;
+};
+
+&mmcsd {
+	status = "okay";
+	bus-width = <4>;
+	vmmc-supply = <&vmmc>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdmmc_pins>;
 };
 
 &uart0 {
 	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins>;
+};
+
+&uart3 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart3_pins>;
+};
+
+&usb0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_pins>;
 };
diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts
index 9c5e16b..0521e68 100644
--- a/arch/arm/boot/dts/ls1021a-qds.dts
+++ b/arch/arm/boot/dts/ls1021a-qds.dts
@@ -58,6 +58,55 @@
 		enet0_sgmii_phy = &sgmii_phy1c;
 		enet1_sgmii_phy = &sgmii_phy1d;
 	};
+
+	sys_mclk: clock-mclk {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <24576000>;
+	};
+
+	regulators {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		reg_3p3v: regulator@0 {
+			compatible = "regulator-fixed";
+			reg = <0>;
+			regulator-name = "3P3V";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+	};
+
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,format = "i2s";
+		simple-audio-card,widgets =
+			"Microphone", "Microphone Jack",
+			"Headphone", "Headphone Jack",
+			"Speaker", "Speaker Ext",
+			"Line", "Line In Jack";
+		simple-audio-card,routing =
+			"MIC_IN", "Microphone Jack",
+			"Microphone Jack", "Mic Bias",
+			"LINE_IN", "Line In Jack",
+			"Headphone Jack", "HP_OUT",
+			"Speaker Ext", "LINE_OUT";
+
+		simple-audio-card,cpu {
+			sound-dai = <&sai2>;
+			frame-master;
+			bitclock-master;
+		};
+
+		simple-audio-card,codec {
+			sound-dai = <&codec>;
+			frame-master;
+			bitclock-master;
+		};
+	};
 };
 
 &dspi0 {
@@ -75,10 +124,31 @@
 	};
 };
 
+&enet0 {
+	tbi-handle = <&tbi0>;
+	phy-handle = <&sgmii_phy1c>;
+	phy-connection-type = "sgmii";
+	status = "okay";
+};
+
+&enet1 {
+	tbi-handle = <&tbi0>;
+	phy-handle = <&sgmii_phy1d>;
+	phy-connection-type = "sgmii";
+	status = "okay";
+};
+
+&enet2 {
+	phy-handle = <&rgmii_phy3>;
+	phy-connection-type = "rgmii-id";
+	status = "okay";
+};
+
 &i2c0 {
 	status = "okay";
 
 	pca9547: mux@77 {
+		compatible = "nxp,pca9547";
 		reg = <0x77>;
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -133,6 +203,21 @@
 				reg = <0x4c>;
 			};
 		};
+
+		i2c@4 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			reg = <0x4>;
+
+			codec: sgtl5000@2a {
+				#sound-dai-cells = <0>;
+				compatible = "fsl,sgtl5000";
+				reg = <0x2a>;
+				VDDA-supply = <&reg_3p3v>;
+				VDDIO-supply = <&reg_3p3v>;
+				clocks = <&sys_mclk 1>;
+			};
+		};
 	};
 };
 
@@ -231,6 +316,10 @@
 	};
 };
 
+&sai2 {
+	status = "okay";
+};
+
 &uart0 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
index a2c591e..e008f93 100644
--- a/arch/arm/boot/dts/ls1021a-twr.dts
+++ b/arch/arm/boot/dts/ls1021a-twr.dts
@@ -56,6 +56,55 @@
 		enet0_sgmii_phy = &sgmii_phy2;
 		enet1_sgmii_phy = &sgmii_phy0;
 	};
+
+	sys_mclk: clock-mclk {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <24576000>;
+	};
+
+	regulators {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		reg_3p3v: regulator@0 {
+			compatible = "regulator-fixed";
+			reg = <0>;
+			regulator-name = "3P3V";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+	};
+
+	sound {
+		compatible = "simple-audio-card";
+		simple-audio-card,format = "i2s";
+		simple-audio-card,widgets =
+			"Microphone", "Microphone Jack",
+			"Headphone", "Headphone Jack",
+			"Speaker", "Speaker Ext",
+			"Line", "Line In Jack";
+		simple-audio-card,routing =
+			"MIC_IN", "Microphone Jack",
+			"Microphone Jack", "Mic Bias",
+			"LINE_IN", "Line In Jack",
+			"Headphone Jack", "HP_OUT",
+			"Speaker Ext", "LINE_OUT";
+
+		simple-audio-card,cpu {
+			sound-dai = <&sai1>;
+			frame-master;
+			bitclock-master;
+		};
+
+		simple-audio-card,codec {
+			sound-dai = <&codec>;
+			frame-master;
+			bitclock-master;
+		};
+	};
 };
 
 &dspi1 {
@@ -73,12 +122,40 @@
 	};
 };
 
+&enet0 {
+	tbi-handle = <&tbi1>;
+	phy-handle = <&sgmii_phy2>;
+	phy-connection-type = "sgmii";
+	status = "okay";
+};
+
+&enet1 {
+	tbi-handle = <&tbi1>;
+	phy-handle = <&sgmii_phy0>;
+	phy-connection-type = "sgmii";
+	status = "okay";
+};
+
+&enet2 {
+	phy-handle = <&rgmii_phy1>;
+	phy-connection-type = "rgmii-id";
+	status = "okay";
+};
+
 &i2c0 {
 	status = "okay";
 };
 
 &i2c1 {
 	status = "okay";
+	codec: sgtl5000@a {
+		#sound-dai-cells = <0>;
+		compatible = "fsl,sgtl5000";
+		reg = <0x0a>;
+		VDDA-supply = <&reg_3p3v>;
+		VDDIO-supply = <&reg_3p3v>;
+		clocks = <&sys_mclk 1>;
+	};
 };
 
 &ifc {
@@ -118,6 +195,10 @@
 	};
 };
 
+&sai1 {
+	status = "okay";
+};
+
 &uart0 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index c70bb27..973a496 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -53,6 +53,9 @@
 	interrupt-parent = <&gic>;
 
 	aliases {
+		ethernet0 = &enet0;
+		ethernet1 = &enet1;
+		ethernet2 = &enet2;
 		serial0 = &lpuart0;
 		serial1 = &lpuart1;
 		serial2 = &lpuart2;
@@ -184,7 +187,7 @@
 		};
 
 		dspi0: dspi@2100000 {
-			compatible = "fsl,vf610-dspi";
+			compatible = "fsl,ls1021a-v1.0-dspi";
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x0 0x2100000 0x0 0x10000>;
@@ -197,7 +200,7 @@
 		};
 
 		dspi1: dspi@2110000 {
-			compatible = "fsl,vf610-dspi";
+			compatible = "fsl,ls1021a-v1.0-dspi";
 			#address-cells = <1>;
 			#size-cells = <0>;
 			reg = <0x0 0x2110000 0x0 0x10000>;
@@ -342,28 +345,30 @@
 		};
 
 		sai1: sai@2b50000 {
+			#sound-dai-cells = <0>;
 			compatible = "fsl,vf610-sai";
 			reg = <0x0 0x2b50000 0x0 0x10000>;
 			interrupts = <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&platform_clk 1>;
-			clock-names = "sai";
+			clocks = <&platform_clk 1>, <&platform_clk 1>,
+				 <&platform_clk 1>, <&platform_clk 1>;
+			clock-names = "bus", "mclk1", "mclk2", "mclk3";
 			dma-names = "tx", "rx";
 			dmas = <&edma0 1 47>,
 			       <&edma0 1 46>;
-			big-endian;
 			status = "disabled";
 		};
 
 		sai2: sai@2b60000 {
+			#sound-dai-cells = <0>;
 			compatible = "fsl,vf610-sai";
 			reg = <0x0 0x2b60000 0x0 0x10000>;
 			interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
-			clocks = <&platform_clk 1>;
-			clock-names = "sai";
+			clocks = <&platform_clk 1>, <&platform_clk 1>,
+				 <&platform_clk 1>, <&platform_clk 1>;
+			clock-names = "bus", "mclk1", "mclk2", "mclk3";
 			dma-names = "tx", "rx";
 			dmas = <&edma0 1 45>,
 			       <&edma0 1 44>;
-			big-endian;
 			status = "disabled";
 		};
 
@@ -391,6 +396,91 @@
 			reg = <0x0 0x2d24000 0x0 0x4000>;
 		};
 
+		enet0: ethernet@2d10000 {
+			compatible = "fsl,etsec2";
+			device_type = "network";
+			#address-cells = <2>;
+			#size-cells = <2>;
+			interrupt-parent = <&gic>;
+			model = "eTSEC";
+			fsl,magic-packet;
+			ranges;
+
+			queue-group@2d10000 {
+				#address-cells = <2>;
+				#size-cells = <2>;
+				reg = <0x0 0x2d10000 0x0 0x1000>;
+				interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+			};
+
+			queue-group@2d14000  {
+				#address-cells = <2>;
+				#size-cells = <2>;
+				reg = <0x0 0x2d14000 0x0 0x1000>;
+				interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
+			};
+		};
+
+		enet1: ethernet@2d50000 {
+			compatible = "fsl,etsec2";
+			device_type = "network";
+			#address-cells = <2>;
+			#size-cells = <2>;
+			interrupt-parent = <&gic>;
+			model = "eTSEC";
+			ranges;
+
+			queue-group@2d50000  {
+				#address-cells = <2>;
+				#size-cells = <2>;
+				reg = <0x0 0x2d50000 0x0 0x1000>;
+				interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+			};
+
+			queue-group@2d54000  {
+				#address-cells = <2>;
+				#size-cells = <2>;
+				reg = <0x0 0x2d54000 0x0 0x1000>;
+				interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
+			};
+		};
+
+		enet2: ethernet@2d90000 {
+			compatible = "fsl,etsec2";
+			device_type = "network";
+			#address-cells = <2>;
+			#size-cells = <2>;
+			interrupt-parent = <&gic>;
+			model = "eTSEC";
+			ranges;
+
+			queue-group@2d90000  {
+				#address-cells = <2>;
+				#size-cells = <2>;
+				reg = <0x0 0x2d90000 0x0 0x1000>;
+				interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
+			};
+
+			queue-group@2d94000  {
+				#address-cells = <2>;
+				#size-cells = <2>;
+				reg = <0x0 0x2d94000 0x0 0x1000>;
+				interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+					<GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
+			};
+		};
+
 		usb@8600000 {
 			compatible = "fsl-usb2-dr-v2.5", "fsl-usb2-dr";
 			reg = <0x0 0x8600000 0x0 0x1000>;
diff --git a/arch/arm/boot/dts/mt6580-evbp1.dts b/arch/arm/boot/dts/mt6580-evbp1.dts
new file mode 100644
index 0000000..17daeae
--- /dev/null
+++ b/arch/arm/boot/dts/mt6580-evbp1.dts
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Mars.C <mars.cheng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+#include "mt6580.dtsi"
+
+/ {
+	model = "MediaTek MT6580 evaluation board";
+	compatible = "mediatek,mt6580-evbp1", "mediatek,mt6580";
+
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+	};
+
+	chosen {
+		stdout-path = "serial0:921600n8";
+	};
+
+	memory {
+		reg = <0x80000000 0x20000000>;
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/mt6580.dtsi b/arch/arm/boot/dts/mt6580.dtsi
new file mode 100644
index 0000000..06fdf6c
--- /dev/null
+++ b/arch/arm/boot/dts/mt6580.dtsi
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Mars.C <mars.cheng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "skeleton.dtsi"
+
+/ {
+	compatible = "mediatek,mt6580";
+	#address-cells = <1>;
+	#size-cells = <1>;
+	interrupt-parent = <&sysirq>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a7";
+			reg = <0x0>;
+		};
+		cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a7";
+			reg = <0x1>;
+		};
+		cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a7";
+			reg = <0x2>;
+		};
+		cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a7";
+			reg = <0x3>;
+		};
+
+	};
+
+	system_clk: dummy13m {
+		compatible = "fixed-clock";
+		clock-frequency = <13000000>;
+		#clock-cells = <0>;
+	};
+
+	rtc_clk: dummy32k {
+		compatible = "fixed-clock";
+		clock-frequency = <32000>;
+		#clock-cells = <0>;
+	};
+
+	uart_clk: dummy26m {
+		compatible = "fixed-clock";
+		clock-frequency = <26000000>;
+		#clock-cells = <0>;
+	};
+
+	timer: timer@10008000 {
+		compatible = "mediatek,mt6580-timer",
+			     "mediatek,mt6577-timer";
+		reg = <0x10008000 0x80>;
+		interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&system_clk>, <&rtc_clk>;
+		clock-names = "system-clk", "rtc-clk";
+	};
+
+	sysirq: interrupt-controller@10200100 {
+		compatible = "mediatek,mt6580-sysirq",
+			     "mediatek,mt6577-sysirq";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		interrupt-parent = <&gic>;
+		reg = <0x10200100 0x1c>;
+	};
+
+	gic: interrupt-controller@10211000 {
+		compatible = "arm,cortex-a7-gic";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		interrupt-parent = <&gic>;
+		reg = <0x10211000 0x1000>,
+		      <0x10212000 0x1000>,
+		      <0x10214000 0x2000>,
+		      <0x10216000 0x2000>;
+	};
+
+	uart0: serial@11005000 {
+		compatible = "mediatek,mt6580-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0x11005000 0x400>;
+		interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+
+	uart1: serial@11006000 {
+		compatible = "mediatek,mt6580-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0x11006000 0x400>;
+		interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+};
diff --git a/arch/arm/boot/dts/mt8135-evbp1.dts b/arch/arm/boot/dts/mt8135-evbp1.dts
index 3667738..357a91f 100644
--- a/arch/arm/boot/dts/mt8135-evbp1.dts
+++ b/arch/arm/boot/dts/mt8135-evbp1.dts
@@ -24,6 +24,199 @@
 	};
 };
 
+&pwrap {
+	pmic: mt6397 {
+		compatible = "mediatek,mt6397";
+
+		mt6397regulator: mt6397regulator {
+			compatible = "mediatek,mt6397-regulator";
+
+			mt6397_vpca15_reg: buck_vpca15 {
+				regulator-compatible = "buck_vpca15";
+				regulator-name = "vpca15";
+				regulator-min-microvolt = < 850000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+			};
+
+			mt6397_vpca7_reg: buck_vpca7 {
+				regulator-compatible = "buck_vpca7";
+				regulator-name = "vpca7";
+				regulator-min-microvolt = < 850000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+			};
+
+			mt6397_vsramca15_reg: buck_vsramca15 {
+				regulator-compatible = "buck_vsramca15";
+				regulator-name = "vsramca15";
+				regulator-min-microvolt = < 850000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+			};
+
+			mt6397_vsramca7_reg: buck_vsramca7 {
+				regulator-compatible = "buck_vsramca7";
+				regulator-name = "vsramca7";
+				regulator-min-microvolt = < 850000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+			};
+
+			mt6397_vcore_reg: buck_vcore {
+				regulator-compatible = "buck_vcore";
+				regulator-name = "vcore";
+				regulator-min-microvolt = < 850000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+			};
+
+			mt6397_vgpu_reg: buck_vgpu {
+				regulator-compatible = "buck_vgpu";
+				regulator-name = "vgpu";
+				regulator-min-microvolt = < 700000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-enable-ramp-delay = <115>;
+			};
+
+			mt6397_vdrm_reg: buck_vdrm {
+				regulator-compatible = "buck_vdrm";
+				regulator-name = "vdrm";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1400000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+			};
+
+			mt6397_vio18_reg: buck_vio18 {
+				regulator-compatible = "buck_vio18";
+				regulator-name = "vio18";
+				regulator-min-microvolt = <1620000>;
+				regulator-max-microvolt = <1980000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+			};
+
+			mt6397_vtcxo_reg: ldo_vtcxo {
+				regulator-compatible = "ldo_vtcxo";
+				regulator-name = "vtcxo";
+				regulator-always-on;
+			};
+
+			mt6397_va28_reg: ldo_va28 {
+				regulator-compatible = "ldo_va28";
+				regulator-name = "va28";
+				regulator-always-on;
+			};
+
+			mt6397_vcama_reg: ldo_vcama {
+				regulator-compatible = "ldo_vcama";
+				regulator-name = "vcama";
+				regulator-min-microvolt = <1500000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vio28_reg: ldo_vio28 {
+				regulator-compatible = "ldo_vio28";
+				regulator-name = "vio28";
+				regulator-always-on;
+			};
+
+			mt6397_vusb_reg: ldo_vusb {
+				regulator-compatible = "ldo_vusb";
+				regulator-name = "vusb";
+			};
+
+			mt6397_vmc_reg: ldo_vmc {
+				regulator-compatible = "ldo_vmc";
+				regulator-name = "vmc";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vmch_reg: ldo_vmch {
+				regulator-compatible = "ldo_vmch";
+				regulator-name = "vmch";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+				regulator-compatible = "ldo_vemc3v3";
+				regulator-name = "vemc_3v3";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vgp1_reg: ldo_vgp1 {
+				regulator-compatible = "ldo_vgp1";
+				regulator-name = "vcamd";
+				regulator-min-microvolt = <1220000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <240>;
+			};
+
+			mt6397_vgp2_reg: ldo_vgp2 {
+				regulator-compatible = "ldo_vgp2";
+				regulator-name = "vcamio";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vgp3_reg: ldo_vgp3 {
+				regulator-compatible = "ldo_vgp3";
+				regulator-name = "vcamaf";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vgp4_reg: ldo_vgp4 {
+				regulator-compatible = "ldo_vgp4";
+				regulator-name = "vgp4";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vgp5_reg: ldo_vgp5 {
+				regulator-compatible = "ldo_vgp5";
+				regulator-name = "vgp5";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vgp6_reg: ldo_vgp6 {
+				regulator-compatible = "ldo_vgp6";
+				regulator-name = "vgp6";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vibr_reg: ldo_vibr {
+				regulator-compatible = "ldo_vibr";
+				regulator-name = "vibr";
+				regulator-min-microvolt = <1300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+		};
+	};
+};
+
 &uart3 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/mt8135.dtsi b/arch/arm/boot/dts/mt8135.dtsi
index 0aba9eb..08371db 100644
--- a/arch/arm/boot/dts/mt8135.dtsi
+++ b/arch/arm/boot/dts/mt8135.dtsi
@@ -12,8 +12,10 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/mt8135-clk.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset-controller/mt8135-resets.h>
 #include "skeleton64.dtsi"
 #include "mt8135-pinfunc.h"
 
@@ -88,12 +90,11 @@
 			#clock-cells = <0>;
 		};
 
-		uart_clk: dummy26m {
+		clk26m: clk26m {
 			compatible = "fixed-clock";
-			clock-frequency = <26000000>;
 			#clock-cells = <0>;
+			clock-frequency = <26000000>;
 		};
-
 	};
 
 	soc {
@@ -102,6 +103,26 @@
 		compatible = "simple-bus";
 		ranges;
 
+		topckgen: topckgen@10000000 {
+			compatible = "mediatek,mt8135-topckgen";
+			reg = <0 0x10000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		infracfg: infracfg@10001000 {
+			#reset-cells = <1>;
+			#clock-cells = <1>;
+			compatible = "mediatek,mt8135-infracfg", "syscon";
+			reg = <0 0x10001000 0 0x1000>;
+		};
+
+		pericfg: pericfg@10003000 {
+			#reset-cells = <1>;
+			#clock-cells = <1>;
+			compatible = "mediatek,mt8135-pericfg", "syscon";
+			reg = <0 0x10003000 0 0x1000>;
+		};
+
 		/*
 		 * Pinctrl access register at 0x10005000 and 0x1020c000 through
 		 * regmap. Register 0x1000b000 is used by EINT.
@@ -134,6 +155,19 @@
 			clock-names = "system-clk", "rtc-clk";
 		};
 
+		pwrap: pwrap@1000f000 {
+			compatible = "mediatek,mt8135-pwrap";
+			reg = <0 0x1000f000 0 0x1000>,
+				<0 0x11017000 0 0x1000>;
+			reg-names = "pwrap", "pwrap-bridge";
+			interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&infracfg MT8135_INFRA_PMIC_WRAP_RST>,
+					<&pericfg MT8135_PERI_PWRAP_BRIDGE_SW_RST>;
+			reset-names = "pwrap", "pwrap-bridge";
+			clocks = <&clk26m>, <&clk26m>;
+			clock-names = "spi", "wrap";
+		};
+
 		sysirq: interrupt-controller@10200030 {
 			compatible = "mediatek,mt8135-sysirq",
 				     "mediatek,mt6577-sysirq";
@@ -143,6 +177,12 @@
 			reg = <0 0x10200030 0 0x1c>;
 		};
 
+		apmixedsys: apmixedsys@10209000 {
+			compatible = "mediatek,mt8135-apmixedsys";
+			reg = <0 0x10209000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
 		syscfg_pctl_b: syscfg_pctl_b@1020c000 {
 			compatible = "mediatek,mt8135-pctl-b-syscfg", "syscon";
 			reg = <0 0x1020c000 0 0x1000>;
@@ -163,7 +203,8 @@
 			compatible = "mediatek,mt8135-uart","mediatek,mt6577-uart";
 			reg = <0 0x11006000 0 0x400>;
 			interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_LOW>;
-			clocks = <&uart_clk>;
+			clocks = <&pericfg CLK_PERI_UART0_SEL>, <&pericfg CLK_PERI_UART0>;
+			clock-names = "baud", "bus";
 			status = "disabled";
 		};
 
@@ -171,7 +212,8 @@
 			compatible = "mediatek,mt8135-uart","mediatek,mt6577-uart";
 			reg = <0 0x11007000 0 0x400>;
 			interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_LOW>;
-			clocks = <&uart_clk>;
+			clocks = <&pericfg CLK_PERI_UART1_SEL>, <&pericfg CLK_PERI_UART1>;
+			clock-names = "baud", "bus";
 			status = "disabled";
 		};
 
@@ -179,7 +221,8 @@
 			compatible = "mediatek,mt8135-uart","mediatek,mt6577-uart";
 			reg = <0 0x11008000 0 0x400>;
 			interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_LOW>;
-			clocks = <&uart_clk>;
+			clocks = <&pericfg CLK_PERI_UART2_SEL>, <&pericfg CLK_PERI_UART2>;
+			clock-names = "baud", "bus";
 			status = "disabled";
 		};
 
@@ -187,7 +230,8 @@
 			compatible = "mediatek,mt8135-uart","mediatek,mt6577-uart";
 			reg = <0 0x11009000 0 0x400>;
 			interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_LOW>;
-			clocks = <&uart_clk>;
+			clocks = <&pericfg CLK_PERI_UART3_SEL>, <&pericfg CLK_PERI_UART3>;
+			clock-names = "baud", "bus";
 			status = "disabled";
 		};
 
diff --git a/arch/arm/boot/dts/omap3-devkit8000-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
new file mode 100644
index 0000000..9ca2865a
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-devkit8000-common.dtsi
@@ -0,0 +1,369 @@
+/*
+ * Author: Anil Kumar <anilk4.v@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <dt-bindings/input/input.h>
+
+#include "omap34xx.dtsi"
+/ {
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x10000000>;	/* 256 MB */
+	};
+
+	leds {
+		compatible = "gpio-leds";
+
+		heartbeat {
+			label = "devkit8000::led1";
+			gpios = <&gpio6 26 GPIO_ACTIVE_HIGH>;	/* 186 -> LED1 */
+			default-state = "on";
+			linux,default-trigger = "heartbeat";
+		};
+
+		mmc {
+			label = "devkit8000::led2";
+			gpios = <&gpio6 3 GPIO_ACTIVE_HIGH>;	/* 163 -> LED2 */
+			default-state = "on";
+			linux,default-trigger = "none";
+		};
+
+		usr {
+			label = "devkit8000::led3";
+			gpios = <&gpio6 4 GPIO_ACTIVE_HIGH>;	/* 164 -> LED3 */
+			default-state = "on";
+			linux,default-trigger = "usr";
+		};
+
+		pmu_stat {
+			label = "devkit8000::pmu_stat";
+			gpios = <&twl_gpio 19 GPIO_ACTIVE_HIGH>; /* LEDB */
+		};
+	};
+
+	sound {
+		compatible = "ti,omap-twl4030";
+		ti,model = "devkit8000";
+
+		ti,mcbsp = <&mcbsp2>;
+		ti,audio-routing =
+			"Ext Spk", "PREDRIVEL",
+			"Ext Spk", "PREDRIVER",
+			"MAINMIC", "Main Mic",
+			"Main Mic", "Mic Bias 1";
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+
+		user {
+			label = "user";
+			gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+			linux,code = <BTN_EXTRA>;
+			gpio-key,wakeup;
+		};
+	};
+
+	tfp410: encoder@0 {
+		compatible = "ti,tfp410";
+		powerdown-gpios = <&twl_gpio 7 GPIO_ACTIVE_LOW>;
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+
+				tfp410_in: endpoint@0 {
+					remote-endpoint = <&dpi_dvi_out>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+
+				tfp410_out: endpoint@0 {
+					remote-endpoint = <&dvi_connector_in>;
+				};
+			};
+		};
+	};
+
+	dvi0: connector@0 {
+		compatible = "dvi-connector";
+		label = "dvi";
+
+		digital;
+
+		ddc-i2c-bus = <&i2c2>;
+
+		port {
+			dvi_connector_in: endpoint {
+				remote-endpoint = <&tfp410_out>;
+			};
+		};
+	};
+
+	tv0: connector@1 {
+		compatible = "svideo-connector";
+		label = "tv";
+
+		port {
+			tv_connector_in: endpoint {
+				remote-endpoint = <&venc_out>;
+			};
+		};
+	};
+};
+
+&i2c1 {
+	clock-frequency = <2600000>;
+
+	twl: twl@48 {
+		reg = <0x48>;
+		interrupts = <7>;	/* SYS_NIRQ cascaded to intc */
+
+		twl_audio: audio {
+			compatible = "ti,twl4030-audio";
+			codec {
+			};
+		};
+	};
+};
+
+&i2c2 {
+	clock-frequency = <400000>;
+};
+
+&i2c3 {
+	status = "disabled";
+};
+
+#include "twl4030.dtsi"
+#include "twl4030_omap3.dtsi"
+
+&mmc1 {
+	vmmc-supply = <&vmmc1>;
+	vmmc_aux-supply = <&vsim>;
+	bus-width = <8>;
+};
+
+&mmc2 {
+	status = "disabled";
+};
+
+&mmc3 {
+	status = "disabled";
+};
+
+&twl_gpio {
+	ti,use-leds;
+	/*
+	 * pulldowns:
+	 * BIT(1), BIT(2), BIT(6), BIT(7), BIT(8), BIT(13)
+	 * BIT(15), BIT(16), BIT(17)
+	 */
+	ti,pulldowns = <0x03a1c6>;
+};
+
+&twl_keypad {
+	linux,keymap = <MATRIX_KEY(0, 0, KEY_1)
+			MATRIX_KEY(1, 0, KEY_2)
+			MATRIX_KEY(2, 0, KEY_3)
+			MATRIX_KEY(0, 1, KEY_4)
+			MATRIX_KEY(1, 1, KEY_5)
+			MATRIX_KEY(2, 1, KEY_6)
+			MATRIX_KEY(3, 1, KEY_F5)
+			MATRIX_KEY(0, 2, KEY_7)
+			MATRIX_KEY(1, 2, KEY_8)
+			MATRIX_KEY(2, 2, KEY_9)
+			MATRIX_KEY(3, 2, KEY_F6)
+			MATRIX_KEY(0, 3, KEY_F7)
+			MATRIX_KEY(1, 3, KEY_0)
+			MATRIX_KEY(2, 3, KEY_F8)
+			MATRIX_KEY(4, 5, KEY_RESERVED)
+			MATRIX_KEY(4, 4, KEY_VOLUMEUP)
+			MATRIX_KEY(5, 5, KEY_VOLUMEDOWN)
+			>;
+};
+
+&wdt2 {
+	status = "disabled";
+};
+
+&mcbsp2 {
+	status = "okay";
+};
+
+&gpmc {
+	ranges = <0 0 0x30000000 0x1000000>;       /* CS0: 16MB for NAND */
+
+	nand@0,0 {
+		reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
+		nand-bus-width = <16>;
+		gpmc,device-width = <2>;
+		ti,nand-ecc-opt = "sw";
+
+		gpmc,sync-clk-ps = <0>;
+		gpmc,cs-on-ns = <0>;
+		gpmc,cs-rd-off-ns = <44>;
+		gpmc,cs-wr-off-ns = <44>;
+		gpmc,adv-on-ns = <6>;
+		gpmc,adv-rd-off-ns = <34>;
+		gpmc,adv-wr-off-ns = <44>;
+		gpmc,we-off-ns = <40>;
+		gpmc,oe-off-ns = <54>;
+		gpmc,access-ns = <64>;
+		gpmc,rd-cycle-ns = <82>;
+		gpmc,wr-cycle-ns = <82>;
+		gpmc,wr-access-ns = <40>;
+		gpmc,wr-data-mux-bus-ns = <0>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		x-loader@0 {
+			label = "X-Loader";
+			reg = <0 0x80000>;
+		};
+
+		bootloaders@80000 {
+			label = "U-Boot";
+			reg = <0x80000 0x1e0000>;
+		};
+
+		bootloaders_env@260000 {
+			label = "U-Boot Env";
+			reg = <0x260000 0x20000>;
+		};
+
+		kernel@280000 {
+			label = "Kernel";
+			reg = <0x280000 0x400000>;
+		};
+
+		filesystem@680000 {
+			label = "File System";
+			reg = <0x680000 0xf980000>;
+		};
+	};
+};
+
+&gpmc {
+	ranges = <6 0 0x2c000000 0x1000000>;       /* CS6: 16MB for DM9000 */
+
+	ethernet@0,0 {
+		compatible = "davicom,dm9000";
+		reg =  <6 0x000 2
+			6 0x400 2>; /* CS6, offset 0 and 0x400, IO size 2 */
+		bank-width = <2>;
+		interrupt-parent = <&gpio1>;
+		interrupts = <25 IRQ_TYPE_LEVEL_LOW>;
+		davicom,no-eeprom;
+
+		gpmc,mux-add-data = <0>;
+		gpmc,device-width = <1>;
+		gpmc,wait-pin = <0>;
+		gpmc,cycle2cycle-samecsen = <1>;
+		gpmc,cycle2cycle-diffcsen = <1>;
+
+		gpmc,cs-on-ns = <6>;
+		gpmc,cs-rd-off-ns = <180>;
+		gpmc,cs-wr-off-ns = <180>;
+		gpmc,adv-on-ns = <0>;
+		gpmc,adv-rd-off-ns = <18>;
+		gpmc,adv-wr-off-ns = <48>;
+		gpmc,oe-on-ns = <54>;
+		gpmc,oe-off-ns = <168>;
+		gpmc,we-on-ns = <54>;
+		gpmc,we-off-ns = <168>;
+		gpmc,rd-cycle-ns = <186>;
+		gpmc,wr-cycle-ns = <186>;
+		gpmc,access-ns = <144>;
+		gpmc,page-burst-access-ns = <24>;
+		gpmc,bus-turnaround-ns = <90>;
+		gpmc,cycle2cycle-delay-ns = <90>;
+		gpmc,wait-monitoring-ns = <0>;
+		gpmc,clk-activation-ns = <0>;
+		gpmc,wr-data-mux-bus-ns = <0>;
+		gpmc,wr-access-ns = <0>;
+	};
+};
+
+&omap3_pmx_core {
+	dss_dpi_pins: pinmux_dss_dpi_pins {
+		pinctrl-single,pins = <
+			OMAP3_CORE1_IOPAD(0x20d4, PIN_OUTPUT | MUX_MODE0)	/* dss_pclk.dss_pclk */
+			OMAP3_CORE1_IOPAD(0x20d6, PIN_OUTPUT | MUX_MODE0)	/* dss_hsync.dss_hsync */
+			OMAP3_CORE1_IOPAD(0x20d8, PIN_OUTPUT | MUX_MODE0)	/* dss_vsync.dss_vsync */
+			OMAP3_CORE1_IOPAD(0x20da, PIN_OUTPUT | MUX_MODE0)	/* dss_acbias.dss_acbias */
+			OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE0)	/* dss_data0.dss_data0 */
+			OMAP3_CORE1_IOPAD(0x20de, PIN_OUTPUT | MUX_MODE0)	/* dss_data1.dss_data1 */
+			OMAP3_CORE1_IOPAD(0x20e0, PIN_OUTPUT | MUX_MODE0)	/* dss_data2.dss_data2 */
+			OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE0)	/* dss_data3.dss_data3 */
+			OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE0)	/* dss_data4.dss_data4 */
+			OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE0)	/* dss_data5.dss_data5 */
+			OMAP3_CORE1_IOPAD(0x20e8, PIN_OUTPUT | MUX_MODE0)	/* dss_data6.dss_data6 */
+			OMAP3_CORE1_IOPAD(0x20ea, PIN_OUTPUT | MUX_MODE0)	/* dss_data7.dss_data7 */
+			OMAP3_CORE1_IOPAD(0x20ec, PIN_OUTPUT | MUX_MODE0)	/* dss_data8.dss_data8 */
+			OMAP3_CORE1_IOPAD(0x20ee, PIN_OUTPUT | MUX_MODE0)	/* dss_data9.dss_data9 */
+			OMAP3_CORE1_IOPAD(0x20f0, PIN_OUTPUT | MUX_MODE0)	/* dss_data10.dss_data10 */
+			OMAP3_CORE1_IOPAD(0x20f2, PIN_OUTPUT | MUX_MODE0)	/* dss_data11.dss_data11 */
+			OMAP3_CORE1_IOPAD(0x20f4, PIN_OUTPUT | MUX_MODE0)	/* dss_data12.dss_data12 */
+			OMAP3_CORE1_IOPAD(0x20f6, PIN_OUTPUT | MUX_MODE0)	/* dss_data13.dss_data13 */
+			OMAP3_CORE1_IOPAD(0x20f8, PIN_OUTPUT | MUX_MODE0)	/* dss_data14.dss_data14 */
+			OMAP3_CORE1_IOPAD(0x20fa, PIN_OUTPUT | MUX_MODE0)	/* dss_data15.dss_data15 */
+			OMAP3_CORE1_IOPAD(0x20fc, PIN_OUTPUT | MUX_MODE0)	/* dss_data16.dss_data16 */
+			OMAP3_CORE1_IOPAD(0x20fe, PIN_OUTPUT | MUX_MODE0)	/* dss_data17.dss_data17 */
+			OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE0)	/* dss_data18.dss_data18 */
+			OMAP3_CORE1_IOPAD(0x2102, PIN_OUTPUT | MUX_MODE0)	/* dss_data19.dss_data19 */
+			OMAP3_CORE1_IOPAD(0x2104, PIN_OUTPUT | MUX_MODE0)	/* dss_data20.dss_data20 */
+			OMAP3_CORE1_IOPAD(0x2106, PIN_OUTPUT | MUX_MODE0)	/* dss_data21.dss_data21 */
+			OMAP3_CORE1_IOPAD(0x2108, PIN_OUTPUT | MUX_MODE0)	/* dss_data22.dss_data22 */
+			OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE0)	/* dss_data23.dss_data23 */
+		>;
+	};
+};
+
+&vpll1 {
+	/* Needed for DSS */
+	regulator-name = "vdds_dsi";
+
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+};
+
+&dss {
+	status = "ok";
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&dss_dpi_pins>;
+
+	vdds_dsi-supply = <&vpll1>;
+	vdda_dac-supply = <&vdac>;
+
+	port {
+		dpi_dvi_out: endpoint@0 {
+			remote-endpoint = <&tfp410_in>;
+			data-lines = <24>;
+		};
+	};
+};
+
+&venc {
+	status = "ok";
+
+	vdda-supply = <&vdac>;
+
+	port {
+		venc_out: endpoint {
+			remote-endpoint = <&tv_connector_in>;
+			ti,channels = <2>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi b/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi
new file mode 100644
index 0000000..e84184d
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-devkit8000-lcd-common.dtsi
@@ -0,0 +1,73 @@
+/*
+ * Author: Anthoine Bourgeois <anthoine.bourgois@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "omap3-devkit8000-common.dtsi"
+/ {
+	aliases {
+		display0 = &lcd0;
+		display1 = &dvi0;
+		display2 = &tv0;
+	};
+
+	lcd0: display@0 {
+		compatible = "panel-dpi";
+		label = "lcd";
+
+		enable-gpios = <&twl_gpio 18 GPIO_ACTIVE_HIGH>;
+
+		port {
+			lcd_in: endpoint {
+				remote-endpoint = <&dpi_lcd_out>;
+			};
+		};
+	};
+};
+
+&dss {
+	port {
+		dpi_lcd_out: endpoint@1 {
+			remote-endpoint = <&lcd_in>;
+			data-lines = <24>;
+		};
+	};
+};
+
+&vio {
+	regulator-min-microvolt = <1800000>;
+	regulator-max-microvolt = <1800000>;
+};
+
+&mcspi2 {
+
+	/* touch controller */
+	ads7846@0 {
+		compatible = "ti,ads7846";
+		vcc-supply = <&vio>;
+
+		reg = <0>;			/* CS0 */
+		spi-max-frequency = <1500000>;
+
+		interrupt-parent = <&gpio1>;
+		interrupts = <27 0>;		/* gpio_27 */
+		pendown-gpio = <&gpio1 27 0>;
+
+		ti,x-min = /bits/ 16 <0x0>;
+		ti,x-max = /bits/ 16 <0x0fff>;
+		ti,y-min = /bits/ 16 <0x0>;
+		ti,y-max = /bits/ 16 <0x0fff>;
+		ti,x-plate-ohms = /bits/ 16 <180>;
+		ti,pressure-max = /bits/ 16 <255>;
+		ti,debounce-max = /bits/ 16 <10>;
+		ti,debounce-tol = /bits/ 16 <5>;
+		ti,debounce-rep = /bits/ 16 <1>;
+		ti,keep-vref-on = <1>;
+		ti,settle-delay-usec = /bits/ 16 <150>;
+
+		linux,wakeup;
+	};
+};
diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts b/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts
new file mode 100644
index 0000000..d570535
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-devkit8000-lcd43.dts
@@ -0,0 +1,37 @@
+/*
+ * Author: Anthoine Bourgeois <anthoine.bourgois@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/*
+ * 4.3'' LCD panel sold with devkit8000 board
+ */
+
+#include "omap3-devkit8000-lcd-common.dtsi"
+/ {
+	model = "TimLL OMAP3 Devkit8000 with 4.3'' LCD panel";
+	compatible = "timll,omap3-devkit8000", "ti,omap3";
+
+	lcd0: display@0 {
+		panel-timing {
+			clock-frequency = <10164705>;
+			hactive = <480>;
+			vactive = <272>;
+			hfront-porch = <2>;
+			hback-porch = <2>;
+			hsync-len = <41>;
+			vback-porch = <2>;
+			vfront-porch = <2>;
+			vsync-len = <10>;
+
+			hsync-active = <0>;
+			vsync-active = <0>;
+			de-active = <1>;
+			pixelclk-active = <1>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts b/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts
new file mode 100644
index 0000000..4afad4b
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-devkit8000-lcd70.dts
@@ -0,0 +1,37 @@
+/*
+ * Author: Anthoine Bourgeois <anthoine.bourgois@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/*
+ * 7.0'' LCD panel sold with some devkit8000 board
+ */
+
+#include "omap3-devkit8000-lcd-common.dtsi"
+/ {
+	model = "TimLL OMAP3 Devkit8000 with 7.0'' LCD panel";
+	compatible = "timll,omap3-devkit8000", "ti,omap3";
+
+	lcd0: display@0 {
+		panel-timing {
+			clock-frequency = <40000000>;
+			hactive = <800>;
+			vactive = <480>;
+			hfront-porch = <1>;
+			hback-porch = <1>;
+			hsync-len = <48>;
+			vback-porch = <25>;
+			vfront-porch = <12>;
+			vsync-len = <3>;
+
+			hsync-active = <0>;
+			vsync-active = <0>;
+			de-active = <1>;
+			pixelclk-active = <1>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/omap3-devkit8000.dts b/arch/arm/boot/dts/omap3-devkit8000.dts
index be22971..40ac894 100644
--- a/arch/arm/boot/dts/omap3-devkit8000.dts
+++ b/arch/arm/boot/dts/omap3-devkit8000.dts
@@ -1,5 +1,5 @@
 /*
- * Author: Anil Kumar <anilk4.v@gmail.com>
+ * Author: Anthoine Bourgeois <anthoine.bourgeois@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -7,194 +7,13 @@
  */
 /dts-v1/;
 
-#include "omap34xx.dtsi"
+#include "omap3-devkit8000-common.dtsi"
 / {
 	model = "TimLL OMAP3 Devkit8000";
 	compatible = "timll,omap3-devkit8000", "ti,omap3";
 
-	memory {
-		device_type = "memory";
-		reg = <0x80000000 0x10000000>;	/* 256 MB */
-	};
-
-	leds {
-		compatible = "gpio-leds";
-
-		heartbeat {
-			label = "devkit8000::led1";
-			gpios = <&gpio6 26 GPIO_ACTIVE_HIGH>;	/* 186 -> LED1 */
-			default-state = "on";
-			linux,default-trigger = "heartbeat";
-		};
-
-		mmc {
-			label = "devkit8000::led2";
-			gpios = <&gpio6 3 GPIO_ACTIVE_HIGH>;	/* 163 -> LED2 */
-			default-state = "on";
-			linux,default-trigger = "none";
-		};
-
-		usr {
-			label = "devkit8000::led3";
-			gpios = <&gpio6 4 GPIO_ACTIVE_HIGH>;	/* 164 -> LED3 */
-			default-state = "on";
-			linux,default-trigger = "usr";
-                };
-
-	};
-
-	sound {
-		compatible = "ti,omap-twl4030";
-		ti,model = "devkit8000";
-
-		ti,mcbsp = <&mcbsp2>;
-		ti,audio-routing =
-			"Ext Spk", "PREDRIVEL",
-			"Ext Spk", "PREDRIVER",
-			"MAINMIC", "Main Mic",
-			"Main Mic", "Mic Bias 1";
-	};
-};
-
-&i2c1 {
-	clock-frequency = <2600000>;
-
-	twl: twl@48 {
-		reg = <0x48>;
-		interrupts = <7>;	/* SYS_NIRQ cascaded to intc */
-
-		twl_audio: audio {
-			compatible = "ti,twl4030-audio";
-			codec {
-			};
-		};
-	};
-};
-
-&i2c2 {
-	status = "disabled";
-};
-
-&i2c3 {
-	status = "disabled";
-};
-
-#include "twl4030.dtsi"
-#include "twl4030_omap3.dtsi"
-
-&mmc1 {
-	vmmc-supply = <&vmmc1>;
-	vmmc_aux-supply = <&vsim>;
-	bus-width = <8>;
-};
-
-&mmc2 {
-	status = "disabled";
-};
-
-&mmc3 {
-	status = "disabled";
-};
-
-&wdt2 {
-	status = "disabled";
-};
-
-&mcbsp2 {
-	status = "okay";
-};
-
-&gpmc {
-	ranges = <0 0 0x30000000 0x1000000>;       /* CS0: 16MB for NAND */
-
-	nand@0,0 {
-		reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
-		nand-bus-width = <16>;
-		gpmc,device-width = <2>;
-		ti,nand-ecc-opt = "sw";
-
-		gpmc,sync-clk-ps = <0>;
-		gpmc,cs-on-ns = <0>;
-		gpmc,cs-rd-off-ns = <44>;
-		gpmc,cs-wr-off-ns = <44>;
-		gpmc,adv-on-ns = <6>;
-		gpmc,adv-rd-off-ns = <34>;
-		gpmc,adv-wr-off-ns = <44>;
-		gpmc,we-off-ns = <40>;
-		gpmc,oe-off-ns = <54>;
-		gpmc,access-ns = <64>;
-		gpmc,rd-cycle-ns = <82>;
-		gpmc,wr-cycle-ns = <82>;
-		gpmc,wr-access-ns = <40>;
-		gpmc,wr-data-mux-bus-ns = <0>;
-
-		#address-cells = <1>;
-		#size-cells = <1>;
-
-		x-loader@0 {
-			label = "X-Loader";
-			reg = <0 0x80000>;
-		};
-
-		bootloaders@80000 {
-			label = "U-Boot";
-			reg = <0x80000 0x1e0000>;
-		};
-
-		bootloaders_env@260000 {
-			label = "U-Boot Env";
-			reg = <0x260000 0x20000>;
-		};
-
-		kernel@280000 {
-			label = "Kernel";
-			reg = <0x280000 0x400000>;
-		};
-
-		filesystem@680000 {
-			label = "File System";
-			reg = <0x680000 0xf980000>;
-		};
-	};
-};
-
-&gpmc {
-	ranges = <6 0 0x2c000000 0x1000000>;       /* CS6: 16MB for DM9000 */
-
-	ethernet@0,0 {
-		compatible = "davicom,dm9000";
-		reg =  <6 0x000 2
-			6 0x400 2>; /* CS6, offset 0 and 0x400, IO size 2 */
-		bank-width = <2>;
-		interrupt-parent = <&gpio1>;
-		interrupts = <25 IRQ_TYPE_LEVEL_LOW>;
-		davicom,no-eeprom;
-
-		gpmc,mux-add-data = <0>;
-		gpmc,device-width = <1>;
-		gpmc,wait-pin = <0>;
-		gpmc,cycle2cycle-samecsen = <1>;
-		gpmc,cycle2cycle-diffcsen = <1>;
-
-		gpmc,cs-on-ns = <6>;
-		gpmc,cs-rd-off-ns = <180>;
-		gpmc,cs-wr-off-ns = <180>;
-		gpmc,adv-on-ns = <0>;
-		gpmc,adv-rd-off-ns = <18>;
-		gpmc,adv-wr-off-ns = <48>;
-		gpmc,oe-on-ns = <54>;
-		gpmc,oe-off-ns = <168>;
-		gpmc,we-on-ns = <54>;
-		gpmc,we-off-ns = <168>;
-		gpmc,rd-cycle-ns = <186>;
-		gpmc,wr-cycle-ns = <186>;
-		gpmc,access-ns = <144>;
-		gpmc,page-burst-access-ns = <24>;
-		gpmc,bus-turnaround-ns = <90>;
-		gpmc,cycle2cycle-delay-ns = <90>;
-		gpmc,wait-monitoring-ns = <0>;
-		gpmc,clk-activation-ns = <0>;
-		gpmc,wr-data-mux-bus-ns = <0>;
-		gpmc,wr-access-ns = <0>;
+	aliases {
+		display1 = &dvi0;
+		display2 = &tv0;
 	};
 };
diff --git a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
index e631333..d0dd036 100644
--- a/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
+++ b/arch/arm/boot/dts/omap3-lilly-a83x.dtsi
@@ -319,12 +319,12 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <&tsc2048_pins>;
 
-		ti,x-min = <300>;
-		ti,x-max = <3000>;
-		ti,y-min = <600>;
-		ti,y-max = <3600>;
-		ti,x-plate-ohms = <80>;
-		ti,pressure-max = <255>;
+		ti,x-min = /bits/ 16 <300>;
+		ti,x-max = /bits/ 16 <3000>;
+		ti,y-min = /bits/ 16 <600>;
+		ti,y-max = /bits/ 16 <3600>;
+		ti,x-plate-ohms = /bits/ 16 <80>;
+		ti,pressure-max = /bits/ 16 <255>;
 		ti,swap-xy;
 
 		linux,wakeup;
diff --git a/arch/arm/boot/dts/omap3-overo-base.dtsi b/arch/arm/boot/dts/omap3-overo-base.dtsi
index 18e1649..28430f1 100644
--- a/arch/arm/boot/dts/omap3-overo-base.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-base.dtsi
@@ -218,3 +218,58 @@
 	pinctrl-0 = <&uart2_pins>;
 };
 
+&mcbsp2 {
+	status = "okay";
+};
+
+&gpmc {
+	ranges = <0 0 0x00000000 0x20000000>;
+
+	nand@0,0 {
+		linux,mtd-name= "micron,mt29c4g96maz";
+		reg = <0 0 0>;
+		nand-bus-width = <16>;
+		gpmc,device-width = <2>;
+		ti,nand-ecc-opt = "bch8";
+
+		gpmc,sync-clk-ps = <0>;
+		gpmc,cs-on-ns = <0>;
+		gpmc,cs-rd-off-ns = <44>;
+		gpmc,cs-wr-off-ns = <44>;
+		gpmc,adv-on-ns = <6>;
+		gpmc,adv-rd-off-ns = <34>;
+		gpmc,adv-wr-off-ns = <44>;
+		gpmc,we-off-ns = <40>;
+		gpmc,oe-off-ns = <54>;
+		gpmc,access-ns = <64>;
+		gpmc,rd-cycle-ns = <82>;
+		gpmc,wr-cycle-ns = <82>;
+		gpmc,wr-access-ns = <40>;
+		gpmc,wr-data-mux-bus-ns = <0>;
+
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "SPL";
+			reg = <0 0x80000>; /* 512KiB */
+		};
+		partition@80000 {
+			label = "U-Boot";
+			reg = <0x80000 0x1C0000>; /* 1792KiB */
+		};
+		partition@1c0000 {
+			label = "Environment";
+			reg = <0x240000 0x40000>; /* 256KiB */
+		};
+		partition@280000 {
+			label = "Kernel";
+			reg = <0x280000 0x800000>; /* 8192KiB */
+		};
+		partition@780000 {
+			label = "Filesystem";
+			reg = <0xA80000 0>;
+			/* HACK: MTDPART_SIZ_FULL=0 so fill to end */
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
index df8908a..80d236a 100644
--- a/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
+++ b/arch/arm/boot/dts/omap3-overo-common-lcd35.dtsi
@@ -62,6 +62,7 @@
 			OMAP3_CORE1_IOPAD(0x21ca, PIN_INPUT | MUX_MODE0)	/* mcspi1_simo.mcspi1_simo */
 			OMAP3_CORE1_IOPAD(0x21cc, PIN_INPUT | MUX_MODE0)	/* mcspi1_somi.mcspi1_somi */
 			OMAP3_CORE1_IOPAD(0x21ce, PIN_INPUT | MUX_MODE0)	/* mcspi1_cs0.mcspi1_cs0 */
+			OMAP3_CORE1_IOPAD(0x21d0, PIN_INPUT | MUX_MODE0)	/* mcspi1_cs1.mcspi1_cs1 */
 		>;
 	};
 
@@ -123,7 +124,7 @@
 		label = "lcd35";
 
 		reg = <1>;					/* CS1 */
-		spi-max-frequency = <10000000>;
+		spi-max-frequency = <500000>;
 		spi-cpol;
 		spi-cpha;
 
diff --git a/arch/arm/boot/dts/omap3-overo-palo35-common.dtsi b/arch/arm/boot/dts/omap3-overo-palo35-common.dtsi
new file mode 100644
index 0000000..680d726
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-overo-palo35-common.dtsi
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 Ash Charles, Gumstix Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Palo35 expansion board is manufactured by Gumstix Inc.
+ */
+
+#include "omap3-overo-common-peripherals.dtsi"
+#include "omap3-overo-common-lcd35.dtsi"
+
+#include <dt-bindings/input/input.h>
+
+/ {
+	leds {
+		compatible = "gpio-leds";
+		pinctrl-names = "default";
+		pinctrl-0 = <&led_pins>;
+		heartbeat {
+			label = "overo:red:gpio21";
+			gpios = <&gpio1 21 GPIO_ACTIVE_LOW>;		/* gpio_21 */
+			linux,default-trigger = "heartbeat";
+		};
+		gpio22 {
+			label = "overo:blue:gpio22";
+			gpios = <&gpio1 22 GPIO_ACTIVE_LOW>;		/* gpio_22 */
+		};
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		pinctrl-names = "default";
+		pinctrl-0 = <&button_pins>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		button0@23 {
+			label = "button0";
+			linux,code = <BTN_0>;
+			gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;		/* gpio_23 */
+			gpio-key,wakeup;
+		};
+		button1@14 {
+			label = "button1";
+			linux,code = <BTN_1>;
+			gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;		/* gpio_14 */
+			gpio-key,wakeup;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/omap3-overo-palo35.dts b/arch/arm/boot/dts/omap3-overo-palo35.dts
new file mode 100644
index 0000000..e3e2bce6
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-overo-palo35.dts
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 Ash Charles, Gumstix Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Palo35 expansion board is manufactured by Gumstix Inc.
+ */
+
+/dts-v1/;
+
+#include "omap3-overo.dtsi"
+#include "omap3-overo-palo35-common.dtsi"
+
+/ {
+	model = "OMAP35xx Gumstix Overo on Palo35";
+	compatible = "gumstix,omap3-overo-palo35", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3";
+};
+
+&omap3_pmx_core2 {
+	led_pins: pinmux_led_pins {
+		pinctrl-single,pins = <
+			OMAP3430_CORE2_IOPAD(0x25ea, PIN_OUTPUT | MUX_MODE4)	/* etk_d7.gpio_21 */
+			OMAP3430_CORE2_IOPAD(0x25ec, PIN_OUTPUT | MUX_MODE4)	/* etk_d8.gpio_22 */
+		>;
+	};
+
+	button_pins: pinmux_button_pins {
+		pinctrl-single,pins = <
+			OMAP3430_CORE2_IOPAD(0x25ee, PIN_INPUT | MUX_MODE4)	/* etk_d9.gpio_23 */
+			OMAP3430_CORE2_IOPAD(0x25dc, PIN_INPUT | MUX_MODE4)	/* etk_d0.gpio_14 */
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/omap3-overo-storm-palo35.dts b/arch/arm/boot/dts/omap3-overo-storm-palo35.dts
new file mode 100644
index 0000000..4e725d2
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-overo-storm-palo35.dts
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 Ash Charles, Gumstix, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Palo35 expansion board is manufactured by Gumstix Inc.
+ */
+
+/dts-v1/;
+
+#include "omap3-overo-storm.dtsi"
+#include "omap3-overo-palo35-common.dtsi"
+
+/ {
+	model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Palo35";
+	compatible = "gumstix,omap3-overo-palo35", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+};
+
+&omap3_pmx_core2 {
+	led_pins: pinmux_led_pins {
+		pinctrl-single,pins = <
+			OMAP3630_CORE2_IOPAD(0x25ea, PIN_OUTPUT | MUX_MODE4)	/* etk_d7.gpio_21 */
+			OMAP3630_CORE2_IOPAD(0x25ec, PIN_OUTPUT | MUX_MODE4)	/* etk_d8.gpio_22 */
+		>;
+	};
+
+	button_pins: pinmux_button_pins {
+		pinctrl-single,pins = <
+			OMAP3630_CORE2_IOPAD(0x25ee, PIN_INPUT | MUX_MODE4)	/* etk_d9.gpio_23 */
+			OMAP3630_CORE2_IOPAD(0x25dc, PIN_INPUT | MUX_MODE4)	/* etk_d0.gpio_14 */
+		>;
+	};
+};
diff --git a/arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts b/arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts
new file mode 100644
index 0000000..da6afaf
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-overo-storm-tobiduo.dts
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2015 Ash Charles, Gumstix, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * TobiDuo expansion board is manufactured by Gumstix Inc.
+ */
+
+/dts-v1/;
+
+#include "omap3-overo-storm.dtsi"
+#include "omap3-overo-tobiduo-common.dtsi"
+
+/ {
+	model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on TobiDuo";
+	compatible = "gumstix,omap3-overo-tobiduo", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+};
diff --git a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
new file mode 100644
index 0000000..334109e
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2015 Ash Charles, Gumstix, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * TobiDuo expansion board is manufactured by Gumstix Inc.
+ */
+
+#include "omap3-overo-common-peripherals.dtsi"
+
+#include "omap-gpmc-smsc9221.dtsi"
+
+&gpmc {
+	ranges = <4 0 0x2b000000 0x1000000>,	/* CS4 */
+		 <5 0 0x2c000000 0x1000000>;	/* CS5 */
+
+	smsc1: ethernet@gpmc {
+		reg = <5 0 0xff>;
+		interrupt-parent = <&gpio6>;
+		interrupts = <16 IRQ_TYPE_LEVEL_LOW>;	/* GPIO 176 */
+	};
+
+	smsc2: ethernet@4,0 {
+		compatible = "smsc,lan9221","smsc,lan9115";
+		bank-width = <2>;
+
+		gpmc,mux-add-data;
+		gpmc,cs-on-ns = <0>;
+		gpmc,cs-rd-off-ns = <42>;
+		gpmc,cs-wr-off-ns = <36>;
+		gpmc,adv-on-ns = <6>;
+		gpmc,adv-rd-off-ns = <12>;
+		gpmc,adv-wr-off-ns = <12>;
+		gpmc,oe-on-ns = <0>;
+		gpmc,oe-off-ns = <42>;
+		gpmc,we-on-ns = <0>;
+		gpmc,we-off-ns = <36>;
+		gpmc,rd-cycle-ns = <60>;
+		gpmc,wr-cycle-ns = <54>;
+		gpmc,access-ns = <36>;
+		gpmc,page-burst-access-ns = <0>;
+		gpmc,bus-turnaround-ns = <0>;
+		gpmc,cycle2cycle-delay-ns = <0>;
+		gpmc,wr-data-mux-bus-ns = <18>;
+		gpmc,wr-access-ns = <42>;
+		gpmc,cycle2cycle-samecsen;
+		gpmc,cycle2cycle-diffcsen;
+		vddvario-supply = <&vddvario>;
+		vdd33a-supply = <&vdd33a>;
+		reg-io-width = <4>;
+		smsc,save-mac-address;
+
+		reg = <4 0 0xff>;
+		interrupt-parent = <&gpio3>;
+		interrupts = <1 IRQ_TYPE_LEVEL_LOW>;	/* GPIO 65 */
+	};
+};
+
+&lis33de {
+	status = "disabled";
+};
diff --git a/arch/arm/boot/dts/omap3-overo-tobiduo.dts b/arch/arm/boot/dts/omap3-overo-tobiduo.dts
new file mode 100644
index 0000000..b9ce310
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-overo-tobiduo.dts
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2015 Ash Charles, Gumstix, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * TobiDuo expansion board is manufactured by Gumstix Inc.
+ */
+
+/dts-v1/;
+
+#include "omap3-overo.dtsi"
+#include "omap3-overo-tobiduo-common.dtsi"
+
+/ {
+	model = "OMAP35xx Gumstix Overo on TobiDuo";
+	compatible = "gumstix,omap3-overo-tobiduo", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3";
+};
diff --git a/arch/arm/boot/dts/omap3-overo.dtsi b/arch/arm/boot/dts/omap3-overo.dtsi
index 69ca7c4..932a02f 100644
--- a/arch/arm/boot/dts/omap3-overo.dtsi
+++ b/arch/arm/boot/dts/omap3-overo.dtsi
@@ -32,7 +32,3 @@
 		>;
 	};
 };
-
-&mcbsp2 {
-	status = "okay";
-};
diff --git a/arch/arm/boot/dts/omap3-pandora-1ghz.dts b/arch/arm/boot/dts/omap3-pandora-1ghz.dts
index 9619a28..25498f7 100644
--- a/arch/arm/boot/dts/omap3-pandora-1ghz.dts
+++ b/arch/arm/boot/dts/omap3-pandora-1ghz.dts
@@ -19,7 +19,7 @@
 / {
 	model = "Pandora Handheld Console 1GHz";
 
-	compatible = "ti,omap36xx", "ti,omap3";
+	compatible = "openpandora,omap3-pandora-1ghz", "ti,omap36xx", "ti,omap3";
 };
 
 &omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-pandora-600mhz.dts b/arch/arm/boot/dts/omap3-pandora-600mhz.dts
index fb803a7..8775897 100644
--- a/arch/arm/boot/dts/omap3-pandora-600mhz.dts
+++ b/arch/arm/boot/dts/omap3-pandora-600mhz.dts
@@ -19,7 +19,7 @@
 / {
 	model = "Pandora Handheld Console";
 
-	compatible = "ti,omap3";
+	compatible = "openpandora,omap3-pandora-600mhz", "ti,omap3430", "ti,omap3";
 };
 
 &omap3_pmx_core2 {
diff --git a/arch/arm/boot/dts/omap3-pandora-common.dtsi b/arch/arm/boot/dts/omap3-pandora-common.dtsi
index 782ab1f..f2084e6 100644
--- a/arch/arm/boot/dts/omap3-pandora-common.dtsi
+++ b/arch/arm/boot/dts/omap3-pandora-common.dtsi
@@ -199,6 +199,38 @@
 			gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>;   /* GPIO_108 */
 		};
 	};
+
+	/* HS USB Host PHY on PORT 2 */
+	hsusb2_phy: hsusb2_phy {
+		compatible = "usb-nop-xceiv";
+		reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>; /* GPIO_16 */
+		vcc-supply = <&vaux2>;
+	};
+
+	/* HS USB Host VBUS supply
+	 * disabling this regulator causes current leakage, and LCD flicker
+	 * on earlier (CC) board revisions, so keep it always on */
+	usb_host_5v: fixed-regulator-usb_host_5v {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_host_5v";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+		regulator-boot-on;
+		enable-active-high;
+		gpio = <&gpio6 4 0>;	/* GPIO_164 */
+	};
+
+	/* wg7210 (wifi+bt module) 32k clock buffer */
+	wg7210_32k: fixed-regulator-wg7210_32k {
+		compatible = "regulator-fixed";
+		regulator-name = "wg7210_32k";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-always-on;
+		enable-active-high;
+		gpio = <&twl_gpio 13 GPIO_ACTIVE_HIGH>;
+	};
 };
 
 &omap3_pmx_core {
@@ -459,13 +491,18 @@
 	power = <50>;
 };
 
+/*
+ * Many pandora boards have been produced with defective write-protect switches
+ * on either slot, so it was decided not to use this feature. If you know
+ * your board has good switches, feel free to uncomment wp-gpios below.
+ */
 &mmc1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc1_pins>;
 	vmmc-supply = <&vmmc1>;
 	bus-width = <4>;
 	cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>;
-	wp-gpios = <&gpio4 30 GPIO_ACTIVE_LOW>;	/* GPIO_126 */
+	/*wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>;*/	/* GPIO_126 */
 };
 
 &mmc2 {
@@ -473,8 +510,13 @@
 	pinctrl-0 = <&mmc2_pins>;
 	vmmc-supply = <&vmmc2>;
 	bus-width = <4>;
-	cd-gpios = <&twl_gpio 1 GPIO_ACTIVE_HIGH>;
-	wp-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>;	/* GPIO_127 */
+	cd-gpios = <&twl_gpio 1 GPIO_ACTIVE_LOW>;
+	/*wp-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>;*/	/* GPIO_127 */
+};
+
+/* mmc3 is probed using pdata-quirks to pass wl1251 card data */
+&mmc3 {
+	status = "disabled";
 };
 
 /* bluetooth*/
@@ -496,6 +538,10 @@
 	port2-mode = "ehci-phy";
 };
 
+&usbhsehci {
+	phys = <0 &hsusb2_phy>;
+};
+
 &gpmc {
 	ranges = <0 0 0x30000000 0x1000000>; /* CS0: 16MB for NAND */
 
@@ -545,7 +591,7 @@
 			reg = <0x280000 0xa00000>;
 		};
 
-		filesystem@680000 {
+		filesystem@c80000 {
 			label = "rootfs";
 			reg = <0xc80000 0>;	/* 0 = MTDPART_SIZ_FULL */
 		};
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 275618f..3cc8f35 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -510,6 +510,13 @@
 				};
 			};
 		};
+
+		palmas_power_button: palmas_power_button {
+			compatible = "ti,palmas-pwrbutton";
+			interrupt-parent = <&palmas>;
+			interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+			wakeup-source;
+		};
 	};
 
 	twl6040: twl@4b {
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index b1a1263..4205a8a 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -873,7 +873,12 @@
 			dwc3@4a030000 {
 				compatible = "snps,dwc3";
 				reg = <0x4a030000 0x10000>;
-				interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+				interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
+					     <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+				interrupt-names = "peripheral",
+						  "host",
+						  "otg";
 				phys = <&usb2_phy>, <&usb3_phy>;
 				phy-names = "usb2-phy", "usb3-phy";
 				dr_mode = "peripheral";
diff --git a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
new file mode 100644
index 0000000..3daec91
--- /dev/null
+++ b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
@@ -0,0 +1,273 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-WTGL
+ *
+ * Copyright (C) 2015, Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include "orion5x-mv88f5182.dtsi"
+
+/ {
+	model = "Buffalo Linkstation LS-WTGL";
+	compatible = "buffalo,lswtgl", "marvell,orion5x-88f5182", "marvell,orion5x";
+
+	memory { /* 64 MB */
+		device_type = "memory";
+		reg = <0x00000000 0x4000000>;
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,115200n8 earlyprintk";
+		linux,stdout-path = &uart0;
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>,
+		         <MBUS_ID(0x09, 0x00) 0 0xf2200000 0x800>,
+			 <MBUS_ID(0x01, 0x0f) 0 0xf4000000 0x40000>;
+
+		internal-regs {
+			pinctrl: pinctrl@10000 {
+				pinctrl-0 = <&pmx_usb_power &pmx_power_hdd
+					&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
+				pinctrl-names = "default";
+
+				pmx_led_power: pmx-leds {
+					marvell,pins = "mpp0";
+					marvell,function = "gpio";
+				};
+
+				pmx_led_alarm: pmx-leds {
+					marvell,pins = "mpp2";
+					marvell,function = "gpio";
+				};
+
+				pmx_led_info: pmx-leds {
+					marvell,pins = "mpp3";
+					marvell,function = "gpio";
+				};
+
+				pmx_power_hdd: pmx-power-hdd {
+					marvell,pins = "mpp1";
+					marvell,function = "gpio";
+				};
+
+				pmx_usb_power: pmx-usb-power {
+					marvell,pins = "mpp9";
+					marvell,function = "gpio";
+				};
+
+				pmx_sata0: pmx-sata0 {
+					marvell,pins = "mpp12";
+					marvell,function = "sata0";
+				};
+
+				pmx_sata1: pmx-sata1 {
+					marvell,pins = "mpp13";
+					marvell,function = "sata1";
+				};
+
+				pmx_fan_high: pmx-fan-high {
+					marvell,pins = "mpp14";
+					marvell,function = "gpio";
+				};
+
+				pmx_fan_low: pmx-fan-low {
+					marvell,pins = "mpp17";
+					marvell,function = "gpio";
+				};
+
+				pmx_fan_lock: pmx-fan-lock {
+					marvell,pins = "mpp6";
+					marvell,function = "gpio";
+				};
+
+				pmx_power_switch: pmx-power-switch {
+					marvell,pins = "mpp8", "mpp10";
+					marvell,function = "gpio";
+				};
+			};
+		};
+	};
+
+	gpio_keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-0 = <&pmx_power_switch>;
+		pinctrl-names = "default";
+
+		button@1 {
+			label = "Power-on Switch";
+			linux,code = <KEY_RESERVED>;
+			linux,input-type = <5>;
+			gpios = <&gpio0 8 GPIO_ACTIVE_LOW>;
+		};
+
+		button@2 {
+			label = "Power-auto Switch";
+			linux,code = <KEY_ESC>;
+			linux,input-type = <5>;
+			gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	gpio_leds {
+		compatible = "gpio-leds";
+		pinctrl-0 = <&pmx_led_power &pmx_led_alarm
+			     &pmx_led_info>;
+		pinctrl-names = "default";
+
+		led@1 {
+			label = "lswtgl:blue:power";
+			gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
+		};
+
+		led@2 {
+			label = "lswtgl:red:alarm";
+			gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
+		};
+
+		led@3 {
+			label = "lswtgl:amber:info";
+			gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	gpio_fan {
+		compatible = "gpio-fan";
+		pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
+		pinctrl-names = "default";
+
+		gpios = <&gpio0 14 GPIO_ACTIVE_LOW
+			 &gpio0 17 GPIO_ACTIVE_LOW>;
+
+		gpio-fan,speed-map = <0 3
+				1500 2
+				3250 1
+				5000 0>;
+
+		alarm-gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>;
+	};
+
+	restart_poweroff {
+		compatible = "restart-poweroff";
+	};
+
+	regulators {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-0 = <&pmx_power_hdd &pmx_usb_power>;
+		pinctrl-names = "default";
+
+		usb_power: regulator@1 {
+			compatible = "regulator-fixed";
+			reg = <1>;
+			regulator-name = "USB Power";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			enable-active-high;
+			regulator-always-on;
+			regulator-boot-on;
+			gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+		};
+
+		hdd_power: regulator@2 {
+			compatible = "regulator-fixed";
+			reg = <2>;
+			regulator-name = "HDD Power";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			enable-active-high;
+			regulator-always-on;
+			regulator-boot-on;
+			gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+		};
+	};
+};
+
+&mdio {
+	status = "okay";
+
+	ethphy: ethernet-phy {
+		reg = <8>;
+	};
+};
+
+&eth {
+	status = "okay";
+
+	ethernet-port@0 {
+		phy-handle = <&ethphy>;
+	};
+};
+
+&ehci0 {
+	status = "okay";
+};
+
+&i2c {
+	status = "okay";
+
+	rtc {
+		compatible = "ricoh,rs5c372a";
+		reg = <0x32>;
+	};
+};
+
+&wdt {
+	status = "disabled";
+};
+
+&sata {
+	pinctrl-0 = <&pmx_sata0 &pmx_sata1>;
+	pinctrl-names = "default";
+	status = "okay";
+	nr-ports = <2>;
+};
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/orion5x-lswsgl.dts b/arch/arm/boot/dts/orion5x-lswsgl.dts
new file mode 100644
index 0000000..6b47a52
--- /dev/null
+++ b/arch/arm/boot/dts/orion5x-lswsgl.dts
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2015 Benjamin Cama <benoar@dolka.fr>
+ * Copyright (C) 2014 Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Based on the board file arch/arm/mach-orion5x/lsmini-setup.c,
+ * Copyright (C) 2008 Alexey Kopytko <alexey@kopytko.ru>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "orion5x-mv88f5182.dtsi"
+
+/ {
+	model = "Buffalo Linkstation Mini (LS-WSGL)";
+	compatible = "buffalo,lswsgl", "marvell,orion5x-88f5182", "marvell,orion5x";
+
+	memory {
+		reg = <0x00000000 0x8000000>; /* 128 MB */
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+		linux,stdout-path = &uart0;
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>,
+			 <MBUS_ID(0x09, 0x00) 0 0xf2200000 0x800>,
+			 <MBUS_ID(0x01, 0x0f) 0 0xf4000000 0x40000>;
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		pinctrl-0 = <&pmx_buttons>;
+		pinctrl-names = "default";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		func {
+			label = "Function Button";
+			linux,code = <KEY_OPTION>;
+			gpios = <&gpio0 15 GPIO_ACTIVE_LOW>;
+		};
+
+		power {
+			label = "Power-on Switch";
+			linux,input-type = <5>; /* EV_SW */
+			linux,code = <KEY_RESERVED>; /* LSMINI_SW_POWER */
+			gpios = <&gpio0 18 GPIO_ACTIVE_LOW>;
+		};
+
+		autopower {
+			label = "Power-auto Switch";
+			linux,input-type = <5>; /* EV_SW */
+			linux,code = <KEY_ESC>; /* LSMINI_SW_AUTOPOWER */
+			gpios = <&gpio0 17 GPIO_ACTIVE_LOW>;
+		};
+	};
+
+	gpio-leds {
+		compatible = "gpio-leds";
+		pinctrl-0 = <&pmx_led_alarm &pmx_led_info &pmx_led_func
+			     &pmx_led_power>;
+		pinctrl-names = "default";
+
+		alarm {
+			label = "lswsgl:alarm:red";
+			gpio = <&gpio0 2 GPIO_ACTIVE_LOW>;
+		};
+
+		info {
+			label = "lswsgl:info:amber";
+			gpio = <&gpio0 3 GPIO_ACTIVE_LOW>;
+		};
+
+		func {
+			label = "lswsgl:func:blue:top";
+			gpio = <&gpio0 9 GPIO_ACTIVE_LOW>;
+		};
+
+		power {
+			label = "lswsgl:power:blue:bottom";
+			gpio = <&gpio0 14 GPIO_ACTIVE_LOW>;
+			default-state = "on";
+		};
+	};
+
+	restart_poweroff {
+		compatible = "restart-poweroff";
+	};
+
+	regulators {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-0 = <&pmx_sata0_power &pmx_sata1_power &pmx_usb_power>;
+		pinctrl-names = "default";
+
+		sata0_power: regulator@0 {
+			compatible = "regulator-fixed";
+			reg = <0>;
+			regulator-name = "SATA0 Power";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			enable-active-high;
+			regulator-always-on;
+			regulator-boot-on;
+			gpio = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+		};
+
+		sata1_power: regulator@1 {
+			compatible = "regulator-fixed";
+			reg = <1>;
+			regulator-name = "SATA1 Power";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			enable-active-high;
+			regulator-always-on;
+			regulator-boot-on;
+			gpio = <&gpio0 19 GPIO_ACTIVE_HIGH>;
+		};
+
+		usb_power: regulator@2 {
+			compatible = "regulator-fixed";
+			reg = <2>;
+			regulator-name = "USB Power";
+			regulator-min-microvolt = <5000000>;
+			regulator-max-microvolt = <5000000>;
+			enable-active-high;
+			regulator-always-on;
+			regulator-boot-on;
+			gpio = <&gpio0 16 GPIO_ACTIVE_HIGH>;
+		};
+	};
+};
+
+&devbus_bootcs {
+	status = "okay";
+
+	devbus,keep-config;
+
+	flash@0 {
+		compatible = "cfi-flash";
+		reg = <0 0x40000>;
+		bank-width = <1>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		partition@0 {
+			label = "Full256Kb";
+			reg = <0 0x40000>;
+			read-only;
+		};
+	};
+};
+
+&mdio {
+	status = "okay";
+
+	ethphy: ethernet-phy {
+		reg = <8>;
+	};
+};
+
+&ehci0 {
+	status = "okay";
+};
+
+&eth {
+	status = "okay";
+
+	ethernet-port@0 {
+		phy-handle = <&ethphy>;
+	};
+};
+
+&i2c {
+	status = "okay";
+	clock-frequency = <100000>;
+	#address-cells = <1>;
+
+	rtc@32 {
+		compatible = "ricoh,rs5c372a";
+		reg = <0x32>;
+	};
+};
+
+&pinctrl {
+	pmx_buttons: pmx-buttons {
+		marvell,pins = "mpp15", "mpp17", "mpp18";
+		marvell,function = "gpio";
+	};
+
+	pmx_led_alarm: pmx-leds {
+		marvell,pins = "mpp2";
+		marvell,function = "gpio";
+	};
+
+	pmx_led_info: pmx-leds {
+		marvell,pins = "mpp3";
+		marvell,function = "gpio";
+	};
+
+	pmx_led_func: pmx-leds {
+		marvell,pins = "mpp9";
+		marvell,function = "gpio";
+	};
+
+	pmx_led_power: pmx-leds {
+		marvell,pins = "mpp14";
+		marvell,function = "gpio";
+	};
+
+	pmx_sata0_power: pmx-sata0-power {
+		marvell,pins = "mpp1";
+		marvell,function = "gpio";
+	};
+
+	pmx_sata1_power: pmx-sata1-power {
+		marvell,pins = "mpp19";
+		marvell,function = "gpio";
+	};
+
+	pmx_usb_power: pmx-usb-power {
+		marvell,pins = "mpp16";
+		marvell,function = "gpio";
+	};
+};
+
+&sata {
+	status = "okay";
+	nr-ports = <2>;
+};
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/pxa27x.dtsi b/arch/arm/boot/dts/pxa27x.dtsi
index 90b9971..7f68a1e 100644
--- a/arch/arm/boot/dts/pxa27x.dtsi
+++ b/arch/arm/boot/dts/pxa27x.dtsi
@@ -7,6 +7,15 @@
 	compatible = "marvell,pxa27x";
 
 	pxabus {
+		pdma: dma-controller@40000000 {
+			compatible = "marvell,pdma-1.0";
+			reg = <0x40000000 0x10000>;
+			interrupts = <25>;
+			#dma-channels = <32>;
+			#dma-cells = <2>;
+			status = "okay";
+		};
+
 		pxairq: interrupt-controller@40d00000 {
 			marvell,intc-priority;
 			marvell,intc-nr-irqs = <34>;
@@ -17,6 +26,14 @@
 			clocks = <&clks CLK_NONE>;
 		};
 
+		pxa27x_ohci: usb@4c000000 {
+			compatible = "marvell,pxa-ohci";
+			reg = <0x4c000000 0x10000>;
+			interrupts = <3>;
+			clocks = <&clks CLK_USBHOST>;
+			status = "disabled";
+		};
+
 		pwm0: pwm@40b00000 {
 			compatible = "marvell,pxa270-pwm", "marvell,pxa250-pwm";
 			reg = <0x40b00000 0x10>;
@@ -50,6 +67,8 @@
 			reg = <0x40f00180 0x24>;
 			interrupts = <6>;
 			clocks = <&clks CLK_PWRI2C>;
+			#address-cells = <0x1>;
+			#size-cells = <0>;
 			status = "disabled";
 		};
 
@@ -68,6 +87,23 @@
 			clocks = <&clks CLK_KEYPAD>;
 			status = "disabled";
 		};
+
+		pxa_camera: imaging@50000000 {
+			compatible = "marvell,pxa270-qci";
+			reg = <0x50000000 0x1000>;
+			interrupts = <33>;
+			dmas = <&pdma 68 0	/* Y channel */
+				&pdma 69 0	/* U channel */
+				&pdma 70 0>;	/* V channel */
+			dma-names = "CI_Y", "CI_U", "CI_V";
+
+			clocks = <&clks CLK_CAMERA>;
+			clock-names = "ciclk";
+			clock-frequency = <5000000>;
+			clock-output-names = "qci_mclk";
+
+			status = "disabled";
+		};
 	};
 
 	clocks {
diff --git a/arch/arm/boot/dts/pxa2xx.dtsi b/arch/arm/boot/dts/pxa2xx.dtsi
index 71a0cd7..5e5af07 100644
--- a/arch/arm/boot/dts/pxa2xx.dtsi
+++ b/arch/arm/boot/dts/pxa2xx.dtsi
@@ -128,6 +128,10 @@
 			compatible = "marvell,pxa-mmc";
 			reg = <0x41100000 0x1000>;
 			interrupts = <23>;
+			clocks = <&clks CLK_MMC>;
+			dmas = <&pdma 21 3
+				&pdma 22 3>;
+			dma-names = "rx", "tx";
 			status = "disabled";
 		};
 
diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
index 7ad0b17..cf6998a 100644
--- a/arch/arm/boot/dts/pxa3xx.dtsi
+++ b/arch/arm/boot/dts/pxa3xx.dtsi
@@ -6,6 +6,15 @@
 	compatible = "marvell,pxa3xx";
 
 	pxabus {
+		pdma: dma-controller@40000000 {
+			compatible = "marvell,pdma-1.0";
+			reg = <0x40000000 0x10000>;
+			interrupts = <25>;
+			#dma-channels = <32>;
+			#dma-cells = <2>;
+			status = "okay";
+		};
+
 		pwri2c: i2c@40f500c0 {
 			compatible = "mrvl,pwri2c";
 			reg = <0x40f500c0 0x30>;
@@ -21,6 +30,8 @@
 			reg = <0x43100000 90>;
 			interrupts = <45>;
 			clocks = <&clks CLK_NAND>;
+			dmas = <&pdma 97>;
+			dma-names = "data";
 			#address-cells = <1>;
 			#size-cells = <1>;	
 			status = "disabled";
@@ -42,6 +53,47 @@
 			interrupt-controller;
 			#interrupt-cells = <0x2>;
 		};
+
+		mmc0: mmc@41100000 {
+			compatible = "marvell,pxa-mmc";
+			reg = <0x41100000 0x1000>;
+			interrupts = <23>;
+			clocks = <&clks CLK_MMC>;
+			dmas = <&pdma 21 3
+				&pdma 22 3>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
+		mmc1: mmc@42000000 {
+			compatible = "marvell,pxa-mmc";
+			reg = <0x42000000 0x1000>;
+			interrupts = <41>;
+			clocks = <&clks CLK_MMC1>;
+			dmas = <&pdma 93 3
+				&pdma 94 3>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
+		mmc2: mmc@42500000 {
+			compatible = "marvell,pxa-mmc";
+			reg = <0x42500000 0x1000>;
+			interrupts = <55>;
+			clocks = <&clks CLK_MMC2>;
+			dmas = <&pdma 46 3
+				&pdma 47 3>;
+			dma-names = "rx", "tx";
+			status = "disabled";
+		};
+
+		pxa3xx_ohci: usb@4c000000 {
+			compatible = "marvell,pxa-ohci";
+			reg = <0x4c000000 0x10000>;
+			interrupts = <3>;
+			clocks = <&clks CLK_USBHOST>;
+			status = "disabled";
+		};
 	};
 
 	clocks {
diff --git a/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts b/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
index 71512b3..34ccb26 100644
--- a/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
@@ -67,6 +67,12 @@
 					bias-pull-down;
 				};
 
+				pm8921_l5: l5 {
+					regulator-min-microvolt = <2750000>;
+					regulator-max-microvolt = <3000000>;
+					bias-pull-down;
+				};
+
 				pm8921_l23: l23 {
 					regulator-min-microvolt = <1700000>;
 					regulator-max-microvolt = <1900000>;
@@ -140,19 +146,33 @@
 			status = "okay";
 		};
 
+		/* on board fixed 3.3v supply */
+		v3p3_fixed: v3p3 {
+			compatible = "regulator-fixed";
+			regulator-name = "PCIE V3P3";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-always-on;
+		};
+
 		amba {
 			/* eMMC */
 			sdcc1: sdcc@12400000 {
 				status = "okay";
+				vmmc-supply = <&pm8921_l5>;
+				vqmmc-supply = <&pm8921_s4>;
 			};
 
 			/* External micro SD card */
 			sdcc3: sdcc@12180000 {
 				status = "okay";
+				vmmc-supply = <&v3p3_fixed>;
 			};
 			/* WLAN */
 			sdcc4: sdcc@121c0000 {
 				status = "okay";
+				vmmc-supply = <&v3p3_fixed>;
+				vqmmc-supply = <&v3p3_fixed>;
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
index a7c939b..88d6655 100644
--- a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
@@ -7,6 +7,7 @@
 
 	aliases {
 		serial0 = &gsbi7_serial;
+		serial1 = &gsbi6_serial;
 	};
 
 	soc {
@@ -73,6 +74,12 @@
 					bias-pull-down;
 				};
 
+				pm8921_l5: l5 {
+					regulator-min-microvolt = <2750000>;
+					regulator-max-microvolt = <3000000>;
+					bias-pull-down;
+				};
+
 				pm8921_l6: l6 {
 					regulator-min-microvolt = <2950000>;
 					regulator-max-microvolt = <2950000>;
@@ -84,9 +91,25 @@
 					regulator-max-microvolt = <1900000>;
 					bias-pull-down;
 				};
+
+				pm8921_lvs1: lvs1 {
+					bias-pull-down;
+				};
 			};
 		};
 
+		ext_3p3v: regulator-fixed@1 {
+			compatible = "regulator-fixed";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-name = "ext_3p3v";
+			regulator-type = "voltage";
+			startup-delay-us = <0>;
+			gpio = <&tlmm_pinmux 77 GPIO_ACTIVE_HIGH>;
+			enable-active-high;
+			regulator-boot-on;
+		};
+
 		gsbi3: gsbi@16200000 {
 			status = "okay";
 			qcom,mode = <GSBI_PROT_I2C>;
@@ -115,6 +138,18 @@
 			};
 		};
 
+		gsbi@16500000 {
+			status = "ok";
+			qcom,mode = <GSBI_PROT_I2C_UART>;
+
+			serial@16540000 {
+				status = "ok";
+
+				pinctrl-names = "default";
+				pinctrl-0 = <&uart_pins>;
+			};
+		};
+
 		gsbi@16600000 {
 			status = "ok";
 			qcom,mode = <GSBI_PROT_I2C_UART>;
@@ -175,11 +210,14 @@
 			/* eMMC */
 			sdcc1: sdcc@12400000 {
 				status = "okay";
+				vmmc-supply = <&pm8921_l5>;
+				vqmmc-supply = <&pm8921_s4>;
 			};
 
 			/* External micro SD card */
 			sdcc3: sdcc@12180000 {
 				status = "okay";
+				vmmc-supply = <&pm8921_l6>;
 				pinctrl-names	= "default";
 				pinctrl-0	= <&card_detect>;
 				cd-gpios	= <&tlmm_pinmux 26 GPIO_ACTIVE_LOW>;
@@ -187,6 +225,8 @@
 			/* WLAN */
 			sdcc4: sdcc@121c0000 {
 				status = "okay";
+				vmmc-supply = <&ext_3p3v>;
+				vqmmc-supply = <&pm8921_lvs1>;
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index df2061e..d2e94d6 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -6,7 +6,6 @@
 #include <dt-bindings/clock/qcom,mmcc-msm8960.h>
 #include <dt-bindings/soc/qcom,gsbi.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
-
 / {
 	model = "Qualcomm APQ8064";
 	compatible = "qcom,apq8064";
@@ -127,6 +126,13 @@
 					function = "gsbi3";
 				};
 			};
+
+			uart_pins: uart_pins {
+				mux {
+					pins = "gpio14", "gpio15", "gpio16", "gpio17";
+					function = "gsbi6";
+				};
+			};
 		};
 
 		intc: interrupt-controller@2000000 {
@@ -243,13 +249,13 @@
 		gsbi3: gsbi@16200000 {
 			status = "disabled";
 			compatible = "qcom,gsbi-v1.0.0";
+			cell-index = <3>;
 			reg = <0x16200000 0x100>;
 			clocks = <&gcc GSBI3_H_CLK>;
 			clock-names = "iface";
 			#address-cells = <1>;
 			#size-cells = <1>;
 			ranges;
-
 			i2c3: i2c@16280000 {
 				compatible = "qcom,i2c-qup-v1.1.1";
 				reg = <0x16280000 0x1000>;
@@ -260,6 +266,28 @@
 			};
 		};
 
+		gsbi6: gsbi@16500000 {
+			status = "disabled";
+			compatible = "qcom,gsbi-v1.0.0";
+			cell-index = <6>;
+			reg = <0x16500000 0x03>;
+			clocks = <&gcc GSBI6_H_CLK>;
+			clock-names = "iface";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			gsbi6_serial: serial@16540000 {
+				compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
+				reg = <0x16540000 0x100>,
+				      <0x16500000 0x03>;
+				interrupts = <0 156 0x0>;
+				clocks = <&gcc GSBI6_UART_CLK>, <&gcc GSBI6_H_CLK>;
+				clock-names = "core", "iface";
+				status = "disabled";
+			};
+		};
+
 		gsbi7: gsbi@16600000 {
 			status = "disabled";
 			compatible = "qcom,gsbi-v1.0.0";
@@ -287,6 +315,53 @@
 			compatible = "qcom,ssbi";
 			reg = <0x00500000 0x1000>;
 			qcom,controller-type = "pmic-arbiter";
+
+			pmicintc: pmic@0 {
+				compatible = "qcom,pm8921";
+				interrupt-parent = <&tlmm_pinmux>;
+				interrupts = <74 8>;
+				#interrupt-cells = <2>;
+				interrupt-controller;
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				pm8921_gpio: gpio@150 {
+
+					compatible = "qcom,pm8921-gpio";
+					reg = <0x150>;
+					interrupts = <192 1>, <193 1>, <194 1>,
+						     <195 1>, <196 1>, <197 1>,
+						     <198 1>, <199 1>, <200 1>,
+						     <201 1>, <202 1>, <203 1>,
+						     <204 1>, <205 1>, <206 1>,
+						     <207 1>, <208 1>, <209 1>,
+						     <210 1>, <211 1>, <212 1>,
+						     <213 1>, <214 1>, <215 1>,
+						     <216 1>, <217 1>, <218 1>,
+						     <219 1>, <220 1>, <221 1>,
+						     <222 1>, <223 1>, <224 1>,
+						     <225 1>, <226 1>, <227 1>,
+						     <228 1>, <229 1>, <230 1>,
+						     <231 1>, <232 1>, <233 1>,
+						     <234 1>, <235 1>;
+
+					gpio-controller;
+					#gpio-cells = <2>;
+
+				};
+
+				pm8921_mpps: mpps@50 {
+					compatible = "qcom,pm8921-mpp";
+					reg = <0x50>;
+					gpio-controller;
+					#gpio-cells = <2>;
+					interrupts =
+					<128 1>, <129 1>, <130 1>, <131 1>,
+					<132 1>, <133 1>, <134 1>, <135 1>,
+					<136 1>, <137 1>, <138 1>, <139 1>;
+				};
+
+			};
 		};
 
 		gcc: clock-controller@900000 {
@@ -448,14 +523,6 @@
 		};
 
 		/* Temporary fixed regulator */
-		vsdcc_fixed: vsdcc-regulator {
-			compatible = "regulator-fixed";
-			regulator-name = "SDCC Power";
-			regulator-min-microvolt = <2700000>;
-			regulator-max-microvolt = <2700000>;
-			regulator-always-on;
-		};
-
 		sdcc1bam:dma@12402000{
 			compatible = "qcom,bam-v1.3.0";
 			reg = <0x12402000 0x8000>;
@@ -505,7 +572,6 @@
 				non-removable;
 				cap-sd-highspeed;
 				cap-mmc-highspeed;
-				vmmc-supply = <&vsdcc_fixed>;
 				dmas = <&sdcc1bam 2>, <&sdcc1bam 1>;
 				dma-names = "tx", "rx";
 			};
@@ -524,7 +590,6 @@
 				cap-mmc-highspeed;
 				max-frequency	= <192000000>;
 				no-1-8-v;
-				vmmc-supply = <&vsdcc_fixed>;
 				dmas = <&sdcc3bam 2>, <&sdcc3bam 1>;
 				dma-names = "tx", "rx";
 			};
@@ -542,8 +607,6 @@
 				cap-sd-highspeed;
 				cap-mmc-highspeed;
 				max-frequency	= <48000000>;
-				vmmc-supply = <&vsdcc_fixed>;
-				vqmmc-supply = <&vsdcc_fixed>;
 				dmas = <&sdcc4bam 2>, <&sdcc4bam 1>;
 				dma-names = "tx", "rx";
 				pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/qcom-msm8660.dtsi b/arch/arm/boot/dts/qcom-msm8660.dtsi
index e0b2ce2..ef2fe72 100644
--- a/arch/arm/boot/dts/qcom-msm8660.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8660.dtsi
@@ -67,15 +67,16 @@
 			cpu-offset = <0x40000>;
 		};
 
-		msmgpio: gpio@800000 {
-			compatible = "qcom,msm-gpio";
-			reg = <0x00800000 0x4000>;
+		tlmm: pinctrl@800000 {
+			compatible = "qcom,msm8660-pinctrl";
+			reg = <0x800000 0x4000>;
+
 			gpio-controller;
 			#gpio-cells = <2>;
-			ngpio = <173>;
 			interrupts = <0 16 0x4>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
+
 		};
 
 		gcc: clock-controller@900000 {
@@ -115,7 +116,7 @@
 
 			pmicintc: pmic@0 {
 				compatible = "qcom,pm8058";
-				interrupt-parent = <&msmgpio>;
+				interrupt-parent = <&tlmm>;
 				interrupts = <88 8>;
 				#interrupt-cells = <2>;
 				interrupt-controller;
diff --git a/arch/arm/boot/dts/qcom-msm8960-cdp.dts b/arch/arm/boot/dts/qcom-msm8960-cdp.dts
index 7f70fae..fad71d5 100644
--- a/arch/arm/boot/dts/qcom-msm8960-cdp.dts
+++ b/arch/arm/boot/dts/qcom-msm8960-cdp.dts
@@ -26,6 +26,308 @@
 				status = "okay";
 			};
 		};
+
+		rpm@108000 {
+			regulators {
+				compatible = "qcom,rpm-pm8921-regulators";
+				vin_lvs1_3_6-supply = <&pm8921_s4>;
+				vin_lvs2-supply = <&pm8921_s4>;
+				vin_lvs4_5_7-supply = <&pm8921_s4>;
+				vdd_ncp-supply = <&pm8921_l6>;
+				vdd_l1_l2_l12_l18-supply = <&pm8921_s4>;
+				vdd_l21_l23_l29-supply = <&pm8921_s8>;
+				vdd_l24-supply = <&pm8921_s1>;
+				vdd_l25-supply = <&pm8921_s1>;
+				vdd_l27-supply = <&pm8921_s7>;
+				vdd_l28-supply = <&pm8921_s7>;
+
+				/* Buck SMPS */
+				pm8921_s1: s1 {
+					regulator-always-on;
+					regulator-min-microvolt = <1225000>;
+					regulator-max-microvolt = <1225000>;
+					qcom,switch-mode-frequency = <3200000>;
+					bias-pull-down;
+				};
+
+				pm8921_s2: s2 {
+					regulator-min-microvolt = <1300000>;
+					regulator-max-microvolt = <1300000>;
+					qcom,switch-mode-frequency = <1600000>;
+					bias-pull-down;
+				};
+
+				pm8921_s3: s3 {
+					regulator-min-microvolt = <500000>;
+					regulator-max-microvolt = <1150000>;
+					qcom,switch-mode-frequency = <4800000>;
+					bias-pull-down;
+				};
+
+				pm8921_s4: s4 {
+					regulator-always-on;
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					qcom,switch-mode-frequency = <1600000>;
+					bias-pull-down;
+					qcom,force-mode = <QCOM_RPM_FORCE_MODE_AUTO>;
+				};
+
+				pm8921_s7: s7 {
+					regulator-min-microvolt = <1150000>;
+					regulator-max-microvolt = <1150000>;
+					qcom,switch-mode-frequency = <3200000>;
+					bias-pull-down;
+				};
+
+				pm8921_s8: s8 {
+					regulator-always-on;
+					regulator-min-microvolt = <2050000>;
+					regulator-max-microvolt = <2050000>;
+					qcom,switch-mode-frequency = <1600000>;
+					bias-pull-down;
+				};
+
+				/* PMOS LDO */
+				pm8921_l1: l1 {
+					regulator-always-on;
+					regulator-min-microvolt = <1050000>;
+					regulator-max-microvolt = <1050000>;
+					bias-pull-down;
+				};
+
+				pm8921_l2: l2 {
+					regulator-min-microvolt = <1200000>;
+					regulator-max-microvolt = <1200000>;
+					bias-pull-down;
+				};
+
+				pm8921_l3: l3 {
+					regulator-min-microvolt = <3075000>;
+					regulator-max-microvolt = <3075000>;
+					bias-pull-down;
+				};
+
+				pm8921_l4: l4 {
+					regulator-always-on;
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					bias-pull-down;
+				};
+
+				pm8921_l5: l5 {
+					regulator-min-microvolt = <2950000>;
+					regulator-max-microvolt = <2950000>;
+					bias-pull-down;
+				};
+
+				pm8921_l6: l6 {
+					regulator-min-microvolt = <2950000>;
+					regulator-max-microvolt = <2950000>;
+					bias-pull-down;
+				};
+
+				pm8921_l7: l7 {
+					regulator-always-on;
+					regulator-min-microvolt = <1850000>;
+					regulator-max-microvolt = <2950000>;
+					bias-pull-down;
+				};
+
+				pm8921_l8: l8 {
+					regulator-min-microvolt = <2800000>;
+					regulator-max-microvolt = <3000000>;
+					bias-pull-down;
+				};
+
+				pm8921_l9: l9 {
+					regulator-min-microvolt = <3000000>;
+					regulator-max-microvolt = <3000000>;
+					bias-pull-down;
+				};
+
+				pm8921_l10: l10 {
+					regulator-min-microvolt = <3000000>;
+					regulator-max-microvolt = <3000000>;
+					bias-pull-down;
+				};
+
+				pm8921_l11: l11 {
+					regulator-min-microvolt = <2850000>;
+					regulator-max-microvolt = <2850000>;
+					bias-pull-down;
+				};
+
+				pm8921_l12: l12 {
+					regulator-min-microvolt = <1200000>;
+					regulator-max-microvolt = <1200000>;
+					bias-pull-down;
+				};
+
+				pm8921_l14: l14 {
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					bias-pull-down;
+				};
+
+				pm8921_l15: l15 {
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <2950000>;
+					bias-pull-down;
+				};
+
+				pm8921_l16: l16 {
+					regulator-min-microvolt = <2800000>;
+					regulator-max-microvolt = <2800000>;
+					bias-pull-down;
+				};
+
+				pm8921_l17: l17 {
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <2950000>;
+					bias-pull-down;
+				};
+
+				pm8921_l18: l18 {
+					regulator-min-microvolt = <1300000>;
+					regulator-max-microvolt = <1300000>;
+					bias-pull-down;
+				};
+
+				pm8921_l21: l21 {
+					regulator-min-microvolt = <1900000>;
+					regulator-max-microvolt = <1900000>;
+					bias-pull-down;
+				};
+
+				pm8921_l22: l22 {
+					regulator-min-microvolt = <2750000>;
+					regulator-max-microvolt = <2750000>;
+					bias-pull-down;
+				};
+
+				pm8921_l23: l23 {
+					regulator-always-on;
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					bias-pull-down;
+				};
+
+				pm8921_l24: l24 {
+					regulator-min-microvolt = <750000>;
+					regulator-max-microvolt = <1150000>;
+					bias-pull-down;
+				};
+
+				pm8921_l25: l25 {
+					regulator-always-on;
+					regulator-min-microvolt = <1250000>;
+					regulator-max-microvolt = <1250000>;
+					bias-pull-down;
+				};
+
+				/* Low Voltage Switch */
+				pm8921_lvs1: lvs1 {
+					bias-pull-down;
+				};
+
+				pm8921_lvs2: lvs2 {
+					bias-pull-down;
+				};
+
+				pm8921_lvs3: lvs3 {
+					bias-pull-down;
+				};
+
+				pm8921_lvs4: lvs4 {
+					bias-pull-down;
+				};
+
+				pm8921_lvs5: lvs5 {
+					bias-pull-down;
+				};
+
+				pm8921_lvs6: lvs6 {
+					bias-pull-down;
+				};
+
+				pm8921_lvs7: lvs7 {
+					bias-pull-down;
+				};
+
+				pm8921_ncp: ncp {
+					regulator-min-microvolt = <1800000>;
+					regulator-max-microvolt = <1800000>;
+					qcom,switch-mode-frequency = <1600000>;
+				};
+			};
+		};
+
+		gsbi@16000000 {
+			status = "ok";
+			qcom,mode = <GSBI_PROT_SPI>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&spi1_default>;
+			spi@16080000 {
+				status = "ok";
+				eth@0 {
+					compatible = "micrel,ks8851";
+					reg = <0>;
+					interrupt-parent = <&msmgpio>;
+					interrupts = <90 8>;
+					spi-max-frequency = <5400000>;
+					vdd-supply = <&ext_l2>;
+					vdd-io-supply = <&pm8921_lvs6>;
+					reset-gpios = <&msmgpio 89 0>;
+				};
+			};
+		};
+
+		pinctrl@800000 {
+			spi1_default: spi1_default {
+				mux {
+					pins = "gpio6", "gpio7", "gpio9";
+					function = "gsbi1";
+				};
+
+				mosi {
+					pins = "gpio6";
+					drive-strength = <12>;
+					bias-disable;
+				};
+
+				miso {
+					pins = "gpio7";
+					drive-strength = <12>;
+					bias-disable;
+				};
+
+				cs {
+					pins = "gpio8";
+					drive-strength = <12>;
+					bias-disable;
+					output-low;
+				};
+
+				clk {
+					pins = "gpio9";
+					drive-strength = <12>;
+					bias-disable;
+				};
+			};
+		};
+	};
+
+	regulators {
+		compatible = "simple-bus";
+
+		ext_l2: gpio-regulator@91 {
+			compatible = "regulator-fixed";
+			regulator-name = "ext_l2";
+			gpio = <&msmgpio 91 0>;
+			startup-delay-us = <10000>;
+			enable-active-high;
+		};
 	};
 };
 
diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi
index a02b984..2096a94 100644
--- a/arch/arm/boot/dts/qcom-msm8960.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8960.dtsi
@@ -4,6 +4,7 @@
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/clock/qcom,gcc-msm8960.h>
+#include <dt-bindings/mfd/qcom-rpm.h>
 #include <dt-bindings/soc/qcom,gsbi.h>
 
 / {
@@ -73,11 +74,10 @@
 			cpu-offset = <0x80000>;
 		};
 
-		msmgpio: gpio@800000 {
-			compatible = "qcom,msm-gpio";
+		msmgpio: pinctrl@800000 {
+			compatible = "qcom,msm8960-pinctrl";
 			gpio-controller;
 			#gpio-cells = <2>;
-			ngpio = <150>;
 			interrupts = <0 16 0x4>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
@@ -105,6 +105,24 @@
 			#reset-cells = <1>;
 		};
 
+		l2cc: clock-controller@2011000 {
+			compatible	= "syscon";
+			reg		= <0x2011000 0x1000>;
+		};
+
+		rpm@108000 {
+			compatible	= "qcom,rpm-msm8960";
+			reg		= <0x108000 0x1000>;
+			qcom,ipc	= <&l2cc 0x8 2>;
+
+			interrupts	= <0 19 0>, <0 21 0>, <0 22 0>;
+			interrupt-names	= "ack", "err", "wakeup";
+
+			regulators {
+				compatible = "qcom,rpm-pm8921-regulators";
+			};
+		};
+
 		acc0: clock-controller@2088000 {
 			compatible = "qcom,kpss-acc-v1";
 			reg = <0x02088000 0x1000>, <0x02008000 0x1000>;
@@ -253,5 +271,30 @@
 			compatible = "qcom,tcsr-msm8960", "syscon";
 			reg = <0x1a400000 0x100>;
 		};
+
+		gsbi@16000000 {
+			compatible = "qcom,gsbi-v1.0.0";
+			cell-index = <1>;
+			reg = <0x16000000 0x100>;
+			clocks = <&gcc GSBI1_H_CLK>;
+			clock-names = "iface";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			spi@16080000 {
+				compatible = "qcom,spi-qup-v1.1.1";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				reg = <0x16080000 0x1000>;
+				interrupts = <0 147 0>;
+				spi-max-frequency = <24000000>;
+				cs-gpios = <&msmgpio 8 0>;
+
+				clocks = <&gcc GSBI1_QUP_CLK>, <&gcc GSBI1_H_CLK>;
+				clock-names = "core", "iface";
+				status = "disabled";
+			};
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
index bd35b06..9bc72a3 100644
--- a/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
@@ -17,3 +17,13 @@
 		status = "ok";
 	};
 };
+
+&spmi_bus {
+	pm8941@0 {
+		coincell@2800 {
+			status = "ok";
+			qcom,rset-ohms = <2100>;
+			qcom,vset-millivolts = <3000>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index 37b47b5..d7c99b8 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -9,6 +9,17 @@
 	compatible = "qcom,msm8974";
 	interrupt-parent = <&intc>;
 
+	reserved-memory {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		smem_region: smem@fa00000 {
+			reg = <0xfa00000 0x200000>;
+			no-map;
+		};
+	};
+
 	cpus {
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -220,6 +231,11 @@
 			reg = <0xfc400000 0x4000>;
 		};
 
+		tcsr_mutex_block: syscon@fd484000 {
+			compatible = "syscon";
+			reg = <0xfd484000 0x2000>;
+		};
+
 		mmcc: clock-controller@fd8c0000 {
 			compatible = "qcom,mmcc-msm8974";
 			#clock-cells = <1>;
@@ -227,6 +243,22 @@
 			reg = <0xfd8c0000 0x6000>;
 		};
 
+		tcsr_mutex: tcsr-mutex {
+			compatible = "qcom,tcsr-mutex";
+			syscon = <&tcsr_mutex_block 0 0x80>;
+
+			#hwlock-cells = <1>;
+		};
+
+		smem@fa00000 {
+			compatible = "qcom,smem";
+
+			memory-region = <&smem_region>;
+			reg = <0xfc428000 0x4000>;
+
+			hwlocks = <&tcsr_mutex 3>;
+		};
+
 		serial@f991e000 {
 			compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
 			reg = <0xf991e000 0x1000>;
diff --git a/arch/arm/boot/dts/qcom-pm8941.dtsi b/arch/arm/boot/dts/qcom-pm8941.dtsi
index aa774e6..968f104 100644
--- a/arch/arm/boot/dts/qcom-pm8941.dtsi
+++ b/arch/arm/boot/dts/qcom-pm8941.dtsi
@@ -125,6 +125,12 @@
 			interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>;
 			qcom,external-resistor-micro-ohms = <10000>;
 		};
+
+		coincell@2800 {
+			compatible = "qcom,pm8941-coincell";
+			reg = <0x2800>;
+			status = "disabled";
+		};
 	};
 
 	usid1: pm8941@1 {
diff --git a/arch/arm/boot/dts/r7s72100.dtsi b/arch/arm/boot/dts/r7s72100.dtsi
index 277e73c..060c32c 100644
--- a/arch/arm/boot/dts/r7s72100.dtsi
+++ b/arch/arm/boot/dts/r7s72100.dtsi
@@ -86,6 +86,7 @@
 			reg = <0xfcfe0000 0x18>;
 			clocks = <&extal_clk>, <&usb_x1_clk>;
 			clock-output-names = "pll", "i", "g";
+			#power-domain-cells = <0>;
 		};
 
 		/* MSTP clocks */
@@ -157,6 +158,7 @@
 			     <0 189 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R7S72100_CLK_SCIF0>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -169,6 +171,7 @@
 			     <0 193 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R7S72100_CLK_SCIF1>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -181,6 +184,7 @@
 			     <0 197 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R7S72100_CLK_SCIF2>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -193,6 +197,7 @@
 			     <0 201 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R7S72100_CLK_SCIF3>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -205,6 +210,7 @@
 			     <0 205 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R7S72100_CLK_SCIF4>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -217,6 +223,7 @@
 			     <0 209 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R7S72100_CLK_SCIF5>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -229,6 +236,7 @@
 			     <0 213 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R7S72100_CLK_SCIF6>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -241,6 +249,7 @@
 			     <0 217 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R7S72100_CLK_SCIF7>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -252,6 +261,7 @@
 			     <0 240 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "error", "rx", "tx";
 		clocks = <&mstp10_clks R7S72100_CLK_SPI0>;
+		power-domains = <&cpg_clocks>;
 		num-cs = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -266,6 +276,7 @@
 			     <0 243 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "error", "rx", "tx";
 		clocks = <&mstp10_clks R7S72100_CLK_SPI1>;
+		power-domains = <&cpg_clocks>;
 		num-cs = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -280,6 +291,7 @@
 			     <0 246 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "error", "rx", "tx";
 		clocks = <&mstp10_clks R7S72100_CLK_SPI2>;
+		power-domains = <&cpg_clocks>;
 		num-cs = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -294,6 +306,7 @@
 			     <0 249 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "error", "rx", "tx";
 		clocks = <&mstp10_clks R7S72100_CLK_SPI3>;
+		power-domains = <&cpg_clocks>;
 		num-cs = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -308,6 +321,7 @@
 			     <0 252 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "error", "rx", "tx";
 		clocks = <&mstp10_clks R7S72100_CLK_SPI4>;
+		power-domains = <&cpg_clocks>;
 		num-cs = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -338,6 +352,7 @@
 			     <0 164 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R7S72100_CLK_I2C0>;
 		clock-frequency = <100000>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -356,6 +371,7 @@
 			     <0 172 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R7S72100_CLK_I2C1>;
 		clock-frequency = <100000>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -374,6 +390,7 @@
 			     <0 180 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R7S72100_CLK_I2C2>;
 		clock-frequency = <100000>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -392,6 +409,7 @@
 			     <0 188 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R7S72100_CLK_I2C3>;
 		clock-frequency = <100000>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -402,6 +420,7 @@
 		interrupt-names = "tgi0a";
 		clocks = <&mstp3_clks R7S72100_CLK_MTU2>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 };
diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi
index 7ee22a4..cb4f7b2 100644
--- a/arch/arm/boot/dts/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/r8a73a4.dtsi
@@ -207,6 +207,13 @@
 		reg = <0 0xe6050000 0 0x9000>;
 		gpio-controller;
 		#gpio-cells = <2>;
+		gpio-ranges =
+			<&pfc 0 0 31>, <&pfc 32 32 9>,
+			<&pfc 64 64 22>, <&pfc 96 96 31>,
+			<&pfc 128 128 7>, <&pfc 160 160 19>,
+			<&pfc 192 192 31>, <&pfc 224 224 27>,
+			<&pfc 256 256 28>, <&pfc 288 288 21>,
+			<&pfc 320 320 10>;
 		interrupts-extended =
 			<&irqc0  0 0>, <&irqc0  1 0>, <&irqc0  2 0>, <&irqc0  3 0>,
 			<&irqc0  4 0>, <&irqc0  5 0>, <&irqc0  6 0>, <&irqc0  7 0>,
@@ -434,7 +441,7 @@
 	};
 
 	gic: interrupt-controller@f1001000 {
-		compatible = "arm,cortex-a15-gic";
+		compatible = "arm,gic-400";
 		#interrupt-cells = <3>;
 		#address-cells = <0>;
 		interrupt-controller;
diff --git a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
index 2e31d8c..105d9c9 100644
--- a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
+++ b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts
@@ -224,6 +224,9 @@
 };
 
 &pfc {
+	pinctrl-0 = <&lcd0_pins>;
+	pinctrl-names = "default";
+
 	ether_pins: ether {
 		renesas,groups = "gether_mii", "gether_int";
 		renesas,function = "gether";
@@ -259,6 +262,16 @@
 				 "fsia_data_in_1", "fsia_data_out_0";
 		renesas,function = "fsia";
 	};
+
+	lcd0_pins: lcd0 {
+		renesas,groups = "lcd0_data24_0", "lcd0_lclk_1", "lcd0_sync";
+		renesas,function = "lcd0";
+
+		/* DBGMD/LCDC0/FSIA MUX */
+		gpio-hog;
+		gpios = <176 0>;
+		output-high;
+	};
 };
 
 &tpu {
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
index d847144..e14cb14 100644
--- a/arch/arm/boot/dts/r8a7740.dtsi
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -291,6 +291,7 @@
 		      <0xe605800c 0x20>;
 		gpio-controller;
 		#gpio-cells = <2>;
+		gpio-ranges = <&pfc 0 0 212>;
 		interrupts-extended =
 			<&irqpin0 0 0>, <&irqpin0 1 0>, <&irqpin0 2 0>, <&irqpin0 3 0>,
 			<&irqpin0 4 0>, <&irqpin0 5 0>, <&irqpin0 6 0>, <&irqpin0 7 0>,
diff --git a/arch/arm/boot/dts/r8a7778.dtsi b/arch/arm/boot/dts/r8a7778.dtsi
index 7ce9f5f..4b1fa9f 100644
--- a/arch/arm/boot/dts/r8a7778.dtsi
+++ b/arch/arm/boot/dts/r8a7778.dtsi
@@ -53,6 +53,7 @@
 		reg = <0xfde00000 0x400>;
 		interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7778_CLK_ETHER>;
+		power-domains = <&cpg_clocks>;
 		phy-mode = "rmii";
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -152,6 +153,7 @@
 		reg = <0xffc70000 0x1000>;
 		interrupts = <0 67 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_I2C0>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -162,6 +164,7 @@
 		reg = <0xffc71000 0x1000>;
 		interrupts = <0 78 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_I2C1>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -172,6 +175,7 @@
 		reg = <0xffc72000 0x1000>;
 		interrupts = <0 76 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_I2C2>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -182,6 +186,7 @@
 		reg = <0xffc73000 0x1000>;
 		interrupts = <0 77 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_I2C3>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -193,6 +198,7 @@
 			     <0 34 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_TMU0>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 
 		#renesas,channels = <3>;
 
@@ -207,6 +213,7 @@
 			     <0 38 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_TMU1>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 
 		#renesas,channels = <3>;
 
@@ -221,6 +228,7 @@
 			     <0 42 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_TMU2>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 
 		#renesas,channels = <3>;
 
@@ -288,6 +296,7 @@
 		interrupts = <0 70 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_SCIF0>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -297,6 +306,7 @@
 		interrupts = <0 71 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_SCIF1>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -306,6 +316,7 @@
 		interrupts = <0 72 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_SCIF2>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -315,6 +326,7 @@
 		interrupts = <0 73 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_SCIF3>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -324,6 +336,7 @@
 		interrupts = <0 74 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_SCIF4>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -333,6 +346,7 @@
 		interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_SCIF5>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -341,6 +355,7 @@
 		reg = <0xffe4e000 0x100>;
 		interrupts = <0 61 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7778_CLK_MMC>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -349,6 +364,7 @@
 		reg = <0xffe4c000 0x100>;
 		interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7778_CLK_SDHI0>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -357,6 +373,7 @@
 		reg = <0xffe4d000 0x100>;
 		interrupts = <0 88 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7778_CLK_SDHI1>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -365,6 +382,7 @@
 		reg = <0xffe4f000 0x100>;
 		interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7778_CLK_SDHI2>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -373,6 +391,7 @@
 		reg = <0xfffc7000 0x18>;
 		interrupts = <0 63 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_HSPI>;
+		power-domains = <&cpg_clocks>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -383,6 +402,7 @@
 		reg = <0xfffc8000 0x18>;
 		interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_HSPI>;
+		power-domains = <&cpg_clocks>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -393,6 +413,7 @@
 		reg = <0xfffc6000 0x18>;
 		interrupts = <0 85 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7778_CLK_HSPI>;
+		power-domains = <&cpg_clocks>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -419,6 +440,7 @@
 			clocks = <&extal_clk>;
 			clock-output-names = "plla", "pllb", "b",
 					     "out", "p", "s", "s1";
+			#power-domain-cells = <0>;
 		};
 
 		/* Audio clocks; frequencies are set by boards if applicable. */
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index 5c8071e..6afa909 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -148,7 +148,7 @@
 		interrupt-controller;
 	};
 
-	irqpin0: interrupt-controller@fe780010 {
+	irqpin0: interrupt-controller@fe78001c {
 		compatible = "renesas,intc-irqpin-r8a7779", "renesas,intc-irqpin";
 		#interrupt-cells = <2>;
 		status = "disabled";
@@ -157,7 +157,8 @@
 			<0xfe780010 4>,
 			<0xfe780024 4>,
 			<0xfe780044 4>,
-			<0xfe780064 4>;
+			<0xfe780064 4>,
+			<0xfe780000 4>;
 		interrupts = <0 27 IRQ_TYPE_LEVEL_HIGH
 			      0 28 IRQ_TYPE_LEVEL_HIGH
 			      0 29 IRQ_TYPE_LEVEL_HIGH
@@ -172,6 +173,7 @@
 		reg = <0xffc70000 0x1000>;
 		interrupts = <0 79 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_I2C0>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -182,6 +184,7 @@
 		reg = <0xffc71000 0x1000>;
 		interrupts = <0 82 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_I2C1>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -192,6 +195,7 @@
 		reg = <0xffc72000 0x1000>;
 		interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_I2C2>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -202,6 +206,7 @@
 		reg = <0xffc73000 0x1000>;
 		interrupts = <0 81 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_I2C3>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -211,6 +216,7 @@
 		interrupts = <0 88 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_SCIF0>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -220,6 +226,7 @@
 		interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_SCIF1>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -229,6 +236,7 @@
 		interrupts = <0 90 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_SCIF2>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -238,6 +246,7 @@
 		interrupts = <0 91 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_SCIF3>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -247,6 +256,7 @@
 		interrupts = <0 92 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_SCIF4>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -256,6 +266,7 @@
 		interrupts = <0 93 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_SCIF5>;
 		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -277,6 +288,7 @@
 			     <0 34 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_TMU0>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 
 		#renesas,channels = <3>;
 
@@ -291,6 +303,7 @@
 			     <0 38 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_TMU1>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 
 		#renesas,channels = <3>;
 
@@ -305,6 +318,7 @@
 			     <0 42 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp0_clks R8A7779_CLK_TMU2>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 
 		#renesas,channels = <3>;
 
@@ -316,6 +330,7 @@
 		reg = <0xfc600000 0x2000>;
 		interrupts = <0 100 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7779_CLK_SATA>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	sdhi0: sd@ffe4c000 {
@@ -323,6 +338,7 @@
 		reg = <0xffe4c000 0x100>;
 		interrupts = <0 104 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7779_CLK_SDHI0>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -331,6 +347,7 @@
 		reg = <0xffe4d000 0x100>;
 		interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7779_CLK_SDHI1>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -339,6 +356,7 @@
 		reg = <0xffe4e000 0x100>;
 		interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7779_CLK_SDHI2>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -347,6 +365,7 @@
 		reg = <0xffe4f000 0x100>;
 		interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7779_CLK_SDHI3>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -357,6 +376,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 		clocks = <&mstp0_clks R8A7779_CLK_HSPI>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -367,6 +387,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 		clocks = <&mstp0_clks R8A7779_CLK_HSPI>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -377,6 +398,7 @@
 		#address-cells = <1>;
 		#size-cells = <0>;
 		clocks = <&mstp0_clks R8A7779_CLK_HSPI>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -385,6 +407,7 @@
 		reg = <0 0xfff80000 0 0x40000>;
 		interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7779_CLK_DU>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 
 		ports {
@@ -426,6 +449,7 @@
 			#clock-cells = <1>;
 			clock-output-names = "plla", "z", "zs", "s",
 					     "s1", "p", "b", "out";
+			#power-domain-cells = <0>;
 		};
 
 		/* Fixed factor clocks */
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
index 2eb8a99..37dec52 100644
--- a/arch/arm/boot/dts/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/r8a7790-lager.dts
@@ -174,7 +174,7 @@
 			  1800000 0>;
 	};
 
-	sound {
+	rsnd_ak4643: sound {
 		compatible = "simple-audio-card";
 
 		simple-audio-card,format = "left_j";
@@ -548,7 +548,7 @@
 		compatible = "adi,adv7511w";
 		reg = <0x39>;
 		interrupt-parent = <&gpio1>;
-		interrupts = <15 IRQ_TYPE_EDGE_FALLING>;
+		interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
 
 		adi,input-depth = <8>;
 		adi,input-colorspace = "rgb";
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index 51ab886..a0b2a79 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -113,7 +113,7 @@
 	};
 
 	gic: interrupt-controller@f1001000 {
-		compatible = "arm,cortex-a15-gic";
+		compatible = "arm,gic-400";
 		#interrupt-cells = <3>;
 		#address-cells = <0>;
 		interrupt-controller;
@@ -134,6 +134,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7790_CLK_GPIO0>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	gpio1: gpio@e6051000 {
@@ -146,6 +147,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7790_CLK_GPIO1>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	gpio2: gpio@e6052000 {
@@ -158,6 +160,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7790_CLK_GPIO2>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	gpio3: gpio@e6053000 {
@@ -170,6 +173,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7790_CLK_GPIO3>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	gpio4: gpio@e6054000 {
@@ -182,6 +186,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7790_CLK_GPIO4>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	gpio5: gpio@e6055000 {
@@ -194,6 +199,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7790_CLK_GPIO5>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	thermal@e61f0000 {
@@ -201,6 +207,7 @@
 		reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
 		interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	timer {
@@ -218,6 +225,7 @@
 			     <0 143 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7790_CLK_CMT0>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 
 		renesas,channels-mask = <0x60>;
 
@@ -237,6 +245,7 @@
 			     <0 127 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7790_CLK_CMT1>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 
 		renesas,channels-mask = <0xff>;
 
@@ -253,6 +262,7 @@
 			     <0 2 IRQ_TYPE_LEVEL_HIGH>,
 			     <0 3 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R8A7790_CLK_IRQC>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	dmac0: dma-controller@e6700000 {
@@ -281,6 +291,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7790_CLK_SYS_DMAC0>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -311,6 +322,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7790_CLK_SYS_DMAC1>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -339,6 +351,7 @@
 				"ch12";
 		clocks = <&mstp5_clks R8A7790_CLK_AUDIO_DMAC0>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <13>;
 	};
@@ -367,6 +380,7 @@
 				"ch12";
 		clocks = <&mstp5_clks R8A7790_CLK_AUDIO_DMAC1>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <13>;
 	};
@@ -378,6 +392,7 @@
 			      0 109 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "ch0", "ch1";
 		clocks = <&mstp3_clks R8A7790_CLK_USBDMAC0>;
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <2>;
 	};
@@ -389,6 +404,7 @@
 			      0 110 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "ch0", "ch1";
 		clocks = <&mstp3_clks R8A7790_CLK_USBDMAC1>;
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <2>;
 	};
@@ -400,6 +416,7 @@
 		reg = <0 0xe6508000 0 0x40>;
 		interrupts = <0 287 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7790_CLK_I2C0>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -410,6 +427,7 @@
 		reg = <0 0xe6518000 0 0x40>;
 		interrupts = <0 288 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7790_CLK_I2C1>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -420,6 +438,7 @@
 		reg = <0 0xe6530000 0 0x40>;
 		interrupts = <0 286 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7790_CLK_I2C2>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -430,6 +449,7 @@
 		reg = <0 0xe6540000 0 0x40>;
 		interrupts = <0 290 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7790_CLK_I2C3>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -442,6 +462,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_IIC0>;
 		dmas = <&dmac0 0x61>, <&dmac0 0x62>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -454,6 +475,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_IIC1>;
 		dmas = <&dmac0 0x65>, <&dmac0 0x66>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -466,6 +488,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_IIC2>;
 		dmas = <&dmac0 0x69>, <&dmac0 0x6a>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -478,6 +501,7 @@
 		clocks = <&mstp9_clks R8A7790_CLK_IICDVFS>;
 		dmas = <&dmac0 0x77>, <&dmac0 0x78>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -488,6 +512,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_MMCIF0>;
 		dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		reg-io-width = <4>;
 		status = "disabled";
 		max-frequency = <97500000>;
@@ -500,6 +525,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_MMCIF1>;
 		dmas = <&dmac0 0xe1>, <&dmac0 0xe2>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		reg-io-width = <4>;
 		status = "disabled";
 		max-frequency = <97500000>;
@@ -517,6 +543,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_SDHI0>;
 		dmas = <&dmac1 0xcd>, <&dmac1 0xce>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -527,6 +554,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_SDHI1>;
 		dmas = <&dmac1 0xc9>, <&dmac1 0xca>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -537,6 +565,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_SDHI2>;
 		dmas = <&dmac1 0xc1>, <&dmac1 0xc2>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -547,6 +576,7 @@
 		clocks = <&mstp3_clks R8A7790_CLK_SDHI3>;
 		dmas = <&dmac1 0xd3>, <&dmac1 0xd4>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -558,6 +588,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x21>, <&dmac0 0x22>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -569,6 +600,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x25>, <&dmac0 0x26>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -580,6 +612,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x27>, <&dmac0 0x28>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -591,6 +624,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -602,6 +636,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -613,6 +648,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -624,6 +660,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -635,6 +672,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -646,6 +684,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -657,6 +696,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -665,17 +705,30 @@
 		reg = <0 0xee700000 0 0x400>;
 		interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7790_CLK_ETHER>;
+		power-domains = <&cpg_clocks>;
 		phy-mode = "rmii";
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
 	};
 
+	avb: ethernet@e6800000 {
+		compatible = "renesas,etheravb-r8a7790";
+		reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
+		interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R8A7790_CLK_ETHERAVB>;
+		power-domains = <&cpg_clocks>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
 	sata0: sata@ee300000 {
 		compatible = "renesas,sata-r8a7790";
 		reg = <0 0xee300000 0 0x2000>;
 		interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7790_CLK_SATA0>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -684,6 +737,7 @@
 		reg = <0 0xee500000 0 0x2000>;
 		interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7790_CLK_SATA1>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -692,12 +746,13 @@
 		reg = <0 0xe6590000 0 0x100>;
 		interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp7_clks R8A7790_CLK_HSUSB>;
-		renesas,buswait = <4>;
-		phys = <&usb0 1>;
-		phy-names = "usb";
 		dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
 		       <&usb_dmac1 0>, <&usb_dmac1 1>;
 		dma-names = "ch0", "ch1", "ch2", "ch3";
+		power-domains = <&cpg_clocks>;
+		renesas,buswait = <4>;
+		phys = <&usb0 1>;
+		phy-names = "usb";
 		status = "disabled";
 	};
 
@@ -708,6 +763,7 @@
 		#size-cells = <0>;
 		clocks = <&mstp7_clks R8A7790_CLK_HSUSB>;
 		clock-names = "usbhs";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 
 		usb0: usb-channel@0 {
@@ -722,33 +778,37 @@
 
 	vin0: video@e6ef0000 {
 		compatible = "renesas,vin-r8a7790";
-		clocks = <&mstp8_clks R8A7790_CLK_VIN0>;
 		reg = <0 0xe6ef0000 0 0x1000>;
 		interrupts = <0 188 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R8A7790_CLK_VIN0>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
 	vin1: video@e6ef1000 {
 		compatible = "renesas,vin-r8a7790";
-		clocks = <&mstp8_clks R8A7790_CLK_VIN1>;
 		reg = <0 0xe6ef1000 0 0x1000>;
 		interrupts = <0 189 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R8A7790_CLK_VIN1>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
 	vin2: video@e6ef2000 {
 		compatible = "renesas,vin-r8a7790";
-		clocks = <&mstp8_clks R8A7790_CLK_VIN2>;
 		reg = <0 0xe6ef2000 0 0x1000>;
 		interrupts = <0 190 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R8A7790_CLK_VIN2>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
 	vin3: video@e6ef3000 {
 		compatible = "renesas,vin-r8a7790";
-		clocks = <&mstp8_clks R8A7790_CLK_VIN3>;
 		reg = <0 0xe6ef3000 0 0x1000>;
 		interrupts = <0 191 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R8A7790_CLK_VIN3>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -757,6 +817,7 @@
 		reg = <0 0xfe920000 0 0x8000>;
 		interrupts = <0 266 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7790_CLK_VSP1_R>;
+		power-domains = <&cpg_clocks>;
 
 		renesas,has-sru;
 		renesas,#rpf = <5>;
@@ -769,6 +830,7 @@
 		reg = <0 0xfe928000 0 0x8000>;
 		interrupts = <0 267 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7790_CLK_VSP1_S>;
+		power-domains = <&cpg_clocks>;
 
 		renesas,has-lut;
 		renesas,has-sru;
@@ -782,6 +844,7 @@
 		reg = <0 0xfe930000 0 0x8000>;
 		interrupts = <0 246 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7790_CLK_VSP1_DU0>;
+		power-domains = <&cpg_clocks>;
 
 		renesas,has-lif;
 		renesas,has-lut;
@@ -795,6 +858,7 @@
 		reg = <0 0xfe938000 0 0x8000>;
 		interrupts = <0 247 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7790_CLK_VSP1_DU1>;
+		power-domains = <&cpg_clocks>;
 
 		renesas,has-lif;
 		renesas,has-lut;
@@ -849,6 +913,7 @@
 		clocks = <&mstp9_clks R8A7790_CLK_RCAN0>,
 			 <&cpg_clocks R8A7790_CLK_RCAN>, <&can_clk>;
 		clock-names = "clkp1", "clkp2", "can_clk";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -859,9 +924,18 @@
 		clocks = <&mstp9_clks R8A7790_CLK_RCAN1>,
 			 <&cpg_clocks R8A7790_CLK_RCAN>, <&can_clk>;
 		clock-names = "clkp1", "clkp2", "can_clk";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
+	jpu: jpeg-codec@fe980000 {
+		compatible = "renesas,jpu-r8a7790";
+		reg = <0 0xfe980000 0 0x10300>;
+		interrupts = <0 272 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp1_clks R8A7790_CLK_JPU>;
+		power-domains = <&cpg_clocks>;
+	};
+
 	clocks {
 		#address-cells = <2>;
 		#size-cells = <2>;
@@ -936,6 +1010,7 @@
 			clock-output-names = "main", "pll0", "pll1", "pll3",
 					     "lb", "qspi", "sdh", "sd0", "sd1",
 					     "z", "rcan", "adsp";
+			#power-domain-cells = <0>;
 		};
 
 		/* Variable factor clocks */
@@ -1249,16 +1324,18 @@
 			compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks";
 			reg = <0 0xe6150990 0 4>, <0 0xe61509a0 0 4>;
 			clocks = <&hp_clk>, <&zg_clk>, <&zg_clk>, <&zg_clk>,
-			         <&zg_clk>, <&p_clk>, <&zs_clk>, <&zs_clk>;
+			         <&zg_clk>, <&hp_clk>, <&p_clk>, <&zs_clk>,
+				 <&zs_clk>;
 			#clock-cells = <1>;
 			clock-indices = <
 				R8A7790_CLK_MLB R8A7790_CLK_VIN3 R8A7790_CLK_VIN2
-				R8A7790_CLK_VIN1 R8A7790_CLK_VIN0 R8A7790_CLK_ETHER
+				R8A7790_CLK_VIN1 R8A7790_CLK_VIN0
+				R8A7790_CLK_ETHERAVB R8A7790_CLK_ETHER
 				R8A7790_CLK_SATA1 R8A7790_CLK_SATA0
 			>;
 			clock-output-names =
-				"mlb", "vin3", "vin2", "vin1", "vin0", "ether",
-				"sata1", "sata0";
+				"mlb", "vin3", "vin2", "vin1", "vin0",
+				"etheravb", "ether", "sata1", "sata0";
 		};
 		mstp9_clks: mstp9_clks@e6150994 {
 			compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks";
@@ -1291,6 +1368,7 @@
 				<&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,
 				<&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,
 				<&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,
+				<&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,
 				<&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>;
 
 			#clock-cells = <1>;
@@ -1300,6 +1378,7 @@
 				R8A7790_CLK_SSI4 R8A7790_CLK_SSI3 R8A7790_CLK_SSI2 R8A7790_CLK_SSI1 R8A7790_CLK_SSI0
 				R8A7790_CLK_SCU_ALL
 				R8A7790_CLK_SCU_DVC1 R8A7790_CLK_SCU_DVC0
+				R8A7790_CLK_SCU_CTU1_MIX1 R8A7790_CLK_SCU_CTU0_MIX0
 				R8A7790_CLK_SCU_SRC9 R8A7790_CLK_SCU_SRC8 R8A7790_CLK_SCU_SRC7 R8A7790_CLK_SCU_SRC6 R8A7790_CLK_SCU_SRC5
 				R8A7790_CLK_SCU_SRC4 R8A7790_CLK_SCU_SRC3 R8A7790_CLK_SCU_SRC2 R8A7790_CLK_SCU_SRC1 R8A7790_CLK_SCU_SRC0
 			>;
@@ -1309,6 +1388,7 @@
 				"ssi4", "ssi3", "ssi2", "ssi1", "ssi0",
 				"scu-all",
 				"scu-dvc1", "scu-dvc0",
+				"scu-ctu1-mix1", "scu-ctu0-mix0",
 				"scu-src9", "scu-src8", "scu-src7", "scu-src6", "scu-src5",
 				"scu-src4", "scu-src3", "scu-src2", "scu-src1", "scu-src0";
 		};
@@ -1321,6 +1401,7 @@
 		clocks = <&mstp9_clks R8A7790_CLK_QSPI_MOD>;
 		dmas = <&dmac0 0x17>, <&dmac0 0x18>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		num-cs = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -1334,6 +1415,7 @@
 		clocks = <&mstp0_clks R8A7790_CLK_MSIOF0>;
 		dmas = <&dmac0 0x51>, <&dmac0 0x52>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1346,6 +1428,7 @@
 		clocks = <&mstp2_clks R8A7790_CLK_MSIOF1>;
 		dmas = <&dmac0 0x55>, <&dmac0 0x56>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1358,6 +1441,7 @@
 		clocks = <&mstp2_clks R8A7790_CLK_MSIOF2>;
 		dmas = <&dmac0 0x41>, <&dmac0 0x42>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1370,6 +1454,7 @@
 		clocks = <&mstp2_clks R8A7790_CLK_MSIOF3>;
 		dmas = <&dmac0 0x45>, <&dmac0 0x46>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1380,6 +1465,7 @@
 		reg = <0 0xee000000 0 0xc00>;
 		interrupts = <0 101 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7790_CLK_SSUSB>;
+		power-domains = <&cpg_clocks>;
 		phys = <&usb2 1>;
 		phy-names = "usb";
 		status = "disabled";
@@ -1388,10 +1474,11 @@
 	pci0: pci@ee090000 {
 		compatible = "renesas,pci-r8a7790";
 		device_type = "pci";
-		clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
 		reg = <0 0xee090000 0 0xc00>,
 		      <0 0xee080000 0 0x1100>;
 		interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 
 		bus-range = <0 0>;
@@ -1422,10 +1509,11 @@
 	pci1: pci@ee0b0000 {
 		compatible = "renesas,pci-r8a7790";
 		device_type = "pci";
-		clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
 		reg = <0 0xee0b0000 0 0xc00>,
 		      <0 0xee0a0000 0 0x1100>;
 		interrupts = <0 112 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 
 		bus-range = <1 1>;
@@ -1443,6 +1531,7 @@
 		compatible = "renesas,pci-r8a7790";
 		device_type = "pci";
 		clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
+		power-domains = <&cpg_clocks>;
 		reg = <0 0xee0d0000 0 0xc00>,
 		      <0 0xee0c0000 0 0x1100>;
 		interrupts = <0 113 IRQ_TYPE_LEVEL_HIGH>;
@@ -1495,6 +1584,7 @@
 		interrupt-map = <0 0 0 0 &gic 0 116 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7790_CLK_PCIEC>, <&pcie_bus_clk>;
 		clock-names = "pcie", "pcie_bus";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -1524,6 +1614,8 @@
 			<&mstp10_clks R8A7790_CLK_SCU_SRC5>, <&mstp10_clks R8A7790_CLK_SCU_SRC4>,
 			<&mstp10_clks R8A7790_CLK_SCU_SRC3>, <&mstp10_clks R8A7790_CLK_SCU_SRC2>,
 			<&mstp10_clks R8A7790_CLK_SCU_SRC1>, <&mstp10_clks R8A7790_CLK_SCU_SRC0>,
+			<&mstp10_clks R8A7790_CLK_SCU_CTU0_MIX0>, <&mstp10_clks R8A7790_CLK_SCU_CTU1_MIX1>,
+			<&mstp10_clks R8A7790_CLK_SCU_CTU0_MIX0>, <&mstp10_clks R8A7790_CLK_SCU_CTU1_MIX1>,
 			<&mstp10_clks R8A7790_CLK_SCU_DVC0>, <&mstp10_clks R8A7790_CLK_SCU_DVC1>,
 			<&audio_clk_a>, <&audio_clk_b>, <&audio_clk_c>, <&m2_clk>;
 		clock-names = "ssi-all",
@@ -1531,6 +1623,8 @@
 				"ssi.4", "ssi.3", "ssi.2", "ssi.1", "ssi.0",
 				"src.9", "src.8", "src.7", "src.6", "src.5",
 				"src.4", "src.3", "src.2", "src.1", "src.0",
+				"ctu.0", "ctu.1",
+				"mix.0", "mix.1",
 				"dvc.0", "dvc.1",
 				"clk_a", "clk_b", "clk_c", "clk_i";
 
@@ -1547,6 +1641,22 @@
 			};
 		};
 
+		rcar_sound,mix {
+			mix0: mix@0 { };
+			mix1: mix@1 { };
+		};
+
+		rcar_sound,ctu {
+			ctu00: ctu@0 { };
+			ctu01: ctu@1 { };
+			ctu02: ctu@2 { };
+			ctu03: ctu@3 { };
+			ctu10: ctu@4 { };
+			ctu11: ctu@5 { };
+			ctu12: ctu@6 { };
+			ctu13: ctu@7 { };
+		};
+
 		rcar_sound,src {
 			src0: src@0 {
 				interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index cffe33f..dc15884 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -242,7 +242,7 @@
 			  1800000 0>;
 	};
 
-	sound {
+	rsnd_ak4643: sound {
 		compatible = "simple-audio-card";
 
 		simple-audio-card,format = "left_j";
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index dc1cd3f..831525d 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -70,7 +70,7 @@
 	};
 
 	gic: interrupt-controller@f1001000 {
-		compatible = "arm,cortex-a15-gic";
+		compatible = "arm,gic-400";
 		#interrupt-cells = <3>;
 		#address-cells = <0>;
 		interrupt-controller;
@@ -91,6 +91,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO0>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	gpio1: gpio@e6051000 {
@@ -103,6 +104,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO1>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	gpio2: gpio@e6052000 {
@@ -115,6 +117,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO2>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	gpio3: gpio@e6053000 {
@@ -127,6 +130,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO3>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	gpio4: gpio@e6054000 {
@@ -139,6 +143,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO4>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	gpio5: gpio@e6055000 {
@@ -151,6 +156,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO5>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	gpio6: gpio@e6055400 {
@@ -163,6 +169,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO6>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	gpio7: gpio@e6055800 {
@@ -175,6 +182,7 @@
 		#interrupt-cells = <2>;
 		interrupt-controller;
 		clocks = <&mstp9_clks R8A7791_CLK_GPIO7>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	thermal@e61f0000 {
@@ -182,6 +190,7 @@
 		reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
 		interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp5_clks R8A7791_CLK_THERMAL>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	timer {
@@ -199,6 +208,7 @@
 			     <0 143 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7791_CLK_CMT0>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 
 		renesas,channels-mask = <0x60>;
 
@@ -218,6 +228,7 @@
 			     <0 127 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7791_CLK_CMT1>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 
 		renesas,channels-mask = <0xff>;
 
@@ -240,6 +251,7 @@
 			     <0 16 IRQ_TYPE_LEVEL_HIGH>,
 			     <0 17 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R8A7791_CLK_IRQC>;
+		power-domains = <&cpg_clocks>;
 	};
 
 	dmac0: dma-controller@e6700000 {
@@ -268,6 +280,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7791_CLK_SYS_DMAC0>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -298,6 +311,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7791_CLK_SYS_DMAC1>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -326,6 +340,7 @@
 				"ch12";
 		clocks = <&mstp5_clks R8A7791_CLK_AUDIO_DMAC0>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <13>;
 	};
@@ -354,6 +369,7 @@
 				"ch12";
 		clocks = <&mstp5_clks R8A7791_CLK_AUDIO_DMAC1>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <13>;
 	};
@@ -365,6 +381,7 @@
 			      0 109 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "ch0", "ch1";
 		clocks = <&mstp3_clks R8A7791_CLK_USBDMAC0>;
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <2>;
 	};
@@ -376,6 +393,7 @@
 			      0 110 IRQ_TYPE_LEVEL_HIGH>;
 		interrupt-names = "ch0", "ch1";
 		clocks = <&mstp3_clks R8A7791_CLK_USBDMAC1>;
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <2>;
 	};
@@ -388,6 +406,7 @@
 		reg = <0 0xe6508000 0 0x40>;
 		interrupts = <0 287 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_I2C0>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -398,6 +417,7 @@
 		reg = <0 0xe6518000 0 0x40>;
 		interrupts = <0 288 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_I2C1>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -408,6 +428,7 @@
 		reg = <0 0xe6530000 0 0x40>;
 		interrupts = <0 286 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_I2C2>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -418,6 +439,7 @@
 		reg = <0 0xe6540000 0 0x40>;
 		interrupts = <0 290 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_I2C3>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -428,6 +450,7 @@
 		reg = <0 0xe6520000 0 0x40>;
 		interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_I2C4>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -439,6 +462,7 @@
 		reg = <0 0xe6528000 0 0x40>;
 		interrupts = <0 20 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp9_clks R8A7791_CLK_I2C5>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -452,6 +476,7 @@
 		clocks = <&mstp9_clks R8A7791_CLK_IICDVFS>;
 		dmas = <&dmac0 0x77>, <&dmac0 0x78>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -464,6 +489,7 @@
 		clocks = <&mstp3_clks R8A7791_CLK_IIC0>;
 		dmas = <&dmac0 0x61>, <&dmac0 0x62>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -476,6 +502,7 @@
 		clocks = <&mstp3_clks R8A7791_CLK_IIC1>;
 		dmas = <&dmac0 0x65>, <&dmac0 0x66>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -492,6 +519,7 @@
 		clocks = <&mstp3_clks R8A7791_CLK_MMCIF0>;
 		dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		reg-io-width = <4>;
 		status = "disabled";
 		max-frequency = <97500000>;
@@ -504,6 +532,7 @@
 		clocks = <&mstp3_clks R8A7791_CLK_SDHI0>;
 		dmas = <&dmac1 0xcd>, <&dmac1 0xce>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -514,6 +543,7 @@
 		clocks = <&mstp3_clks R8A7791_CLK_SDHI1>;
 		dmas = <&dmac1 0xc1>, <&dmac1 0xc2>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -524,6 +554,7 @@
 		clocks = <&mstp3_clks R8A7791_CLK_SDHI2>;
 		dmas = <&dmac1 0xd3>, <&dmac1 0xd4>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -535,6 +566,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x21>, <&dmac0 0x22>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -546,6 +578,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x25>, <&dmac0 0x26>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -557,6 +590,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x27>, <&dmac0 0x28>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -568,6 +602,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -579,6 +614,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -590,6 +626,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x23>, <&dmac0 0x24>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -601,6 +638,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -612,6 +650,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -623,6 +662,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -634,6 +674,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -645,6 +686,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -656,6 +698,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -667,6 +710,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -678,6 +722,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -689,6 +734,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -700,6 +746,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -711,6 +758,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -722,6 +770,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -730,6 +779,7 @@
 		reg = <0 0xee700000 0 0x400>;
 		interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7791_CLK_ETHER>;
+		power-domains = <&cpg_clocks>;
 		phy-mode = "rmii";
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -741,6 +791,7 @@
 		reg = <0 0xee300000 0 0x2000>;
 		interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7791_CLK_SATA0>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -749,6 +800,7 @@
 		reg = <0 0xee500000 0 0x2000>;
 		interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7791_CLK_SATA1>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -757,12 +809,13 @@
 		reg = <0 0xe6590000 0 0x100>;
 		interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp7_clks R8A7791_CLK_HSUSB>;
-		renesas,buswait = <4>;
-		phys = <&usb0 1>;
-		phy-names = "usb";
 		dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
 		       <&usb_dmac1 0>, <&usb_dmac1 1>;
 		dma-names = "ch0", "ch1", "ch2", "ch3";
+		power-domains = <&cpg_clocks>;
+		renesas,buswait = <4>;
+		phys = <&usb0 1>;
+		phy-names = "usb";
 		status = "disabled";
 	};
 
@@ -773,6 +826,7 @@
 		#size-cells = <0>;
 		clocks = <&mstp7_clks R8A7791_CLK_HSUSB>;
 		clock-names = "usbhs";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 
 		usb0: usb-channel@0 {
@@ -787,25 +841,28 @@
 
 	vin0: video@e6ef0000 {
 		compatible = "renesas,vin-r8a7791";
-		clocks = <&mstp8_clks R8A7791_CLK_VIN0>;
 		reg = <0 0xe6ef0000 0 0x1000>;
 		interrupts = <0 188 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R8A7791_CLK_VIN0>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
 	vin1: video@e6ef1000 {
 		compatible = "renesas,vin-r8a7791";
-		clocks = <&mstp8_clks R8A7791_CLK_VIN1>;
 		reg = <0 0xe6ef1000 0 0x1000>;
 		interrupts = <0 189 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R8A7791_CLK_VIN1>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
 	vin2: video@e6ef2000 {
 		compatible = "renesas,vin-r8a7791";
-		clocks = <&mstp8_clks R8A7791_CLK_VIN2>;
 		reg = <0 0xe6ef2000 0 0x1000>;
 		interrupts = <0 190 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R8A7791_CLK_VIN2>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -814,6 +871,7 @@
 		reg = <0 0xfe928000 0 0x8000>;
 		interrupts = <0 267 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7791_CLK_VSP1_S>;
+		power-domains = <&cpg_clocks>;
 
 		renesas,has-lut;
 		renesas,has-sru;
@@ -827,6 +885,7 @@
 		reg = <0 0xfe930000 0 0x8000>;
 		interrupts = <0 246 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7791_CLK_VSP1_DU0>;
+		power-domains = <&cpg_clocks>;
 
 		renesas,has-lif;
 		renesas,has-lut;
@@ -840,6 +899,7 @@
 		reg = <0 0xfe938000 0 0x8000>;
 		interrupts = <0 247 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7791_CLK_VSP1_DU1>;
+		power-domains = <&cpg_clocks>;
 
 		renesas,has-lif;
 		renesas,has-lut;
@@ -885,6 +945,7 @@
 		clocks = <&mstp9_clks R8A7791_CLK_RCAN0>,
 			 <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>;
 		clock-names = "clkp1", "clkp2", "can_clk";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -895,9 +956,18 @@
 		clocks = <&mstp9_clks R8A7791_CLK_RCAN1>,
 			 <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>;
 		clock-names = "clkp1", "clkp2", "can_clk";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
+	jpu: jpeg-codec@fe980000 {
+		compatible = "renesas,jpu-r8a7791";
+		reg = <0 0xfe980000 0 0x10300>;
+		interrupts = <0 272 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp1_clks R8A7791_CLK_JPU>;
+		power-domains = <&cpg_clocks>;
+	};
+
 	clocks {
 		#address-cells = <2>;
 		#size-cells = <2>;
@@ -972,6 +1042,7 @@
 			clock-output-names = "main", "pll0", "pll1", "pll3",
 					     "lb", "qspi", "sdh", "sd0", "z",
 					     "rcan", "adsp";
+			#power-domain-cells = <0>;
 		};
 
 		/* Variable factor clocks */
@@ -1311,6 +1382,7 @@
 				<&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,
 				<&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,
 				<&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,
+				<&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,
 				<&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>;
 
 			#clock-cells = <1>;
@@ -1320,6 +1392,7 @@
 				R8A7791_CLK_SSI4 R8A7791_CLK_SSI3 R8A7791_CLK_SSI2 R8A7791_CLK_SSI1 R8A7791_CLK_SSI0
 				R8A7791_CLK_SCU_ALL
 				R8A7791_CLK_SCU_DVC1 R8A7791_CLK_SCU_DVC0
+				R8A7791_CLK_SCU_CTU1_MIX1 R8A7791_CLK_SCU_CTU0_MIX0
 				R8A7791_CLK_SCU_SRC9 R8A7791_CLK_SCU_SRC8 R8A7791_CLK_SCU_SRC7 R8A7791_CLK_SCU_SRC6 R8A7791_CLK_SCU_SRC5
 				R8A7791_CLK_SCU_SRC4 R8A7791_CLK_SCU_SRC3 R8A7791_CLK_SCU_SRC2 R8A7791_CLK_SCU_SRC1 R8A7791_CLK_SCU_SRC0
 			>;
@@ -1329,6 +1402,7 @@
 				"ssi4", "ssi3", "ssi2", "ssi1", "ssi0",
 				"scu-all",
 				"scu-dvc1", "scu-dvc0",
+				"scu-ctu1-mix1", "scu-ctu0-mix0",
 				"scu-src9", "scu-src8", "scu-src7", "scu-src6", "scu-src5",
 				"scu-src4", "scu-src3", "scu-src2", "scu-src1", "scu-src0";
 		};
@@ -1351,6 +1425,7 @@
 		clocks = <&mstp9_clks R8A7791_CLK_QSPI_MOD>;
 		dmas = <&dmac0 0x17>, <&dmac0 0x18>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		num-cs = <1>;
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -1364,6 +1439,7 @@
 		clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
 		dmas = <&dmac0 0x51>, <&dmac0 0x52>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1376,6 +1452,7 @@
 		clocks = <&mstp2_clks R8A7791_CLK_MSIOF1>;
 		dmas = <&dmac0 0x55>, <&dmac0 0x56>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1388,6 +1465,7 @@
 		clocks = <&mstp2_clks R8A7791_CLK_MSIOF2>;
 		dmas = <&dmac0 0x41>, <&dmac0 0x42>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
@@ -1398,6 +1476,7 @@
 		reg = <0 0xee000000 0 0xc00>;
 		interrupts = <0 101 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7791_CLK_SSUSB>;
+		power-domains = <&cpg_clocks>;
 		phys = <&usb2 1>;
 		phy-names = "usb";
 		status = "disabled";
@@ -1406,10 +1485,11 @@
 	pci0: pci@ee090000 {
 		compatible = "renesas,pci-r8a7791";
 		device_type = "pci";
-		clocks = <&mstp7_clks R8A7791_CLK_EHCI>;
 		reg = <0 0xee090000 0 0xc00>,
 		      <0 0xee080000 0 0x1100>;
 		interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp7_clks R8A7791_CLK_EHCI>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 
 		bus-range = <0 0>;
@@ -1440,10 +1520,11 @@
 	pci1: pci@ee0d0000 {
 		compatible = "renesas,pci-r8a7791";
 		device_type = "pci";
-		clocks = <&mstp7_clks R8A7791_CLK_EHCI>;
 		reg = <0 0xee0d0000 0 0xc00>,
 		      <0 0xee0c0000 0 0x1100>;
 		interrupts = <0 113 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp7_clks R8A7791_CLK_EHCI>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 
 		bus-range = <1 1>;
@@ -1493,6 +1574,7 @@
 		interrupt-map = <0 0 0 0 &gic 0 116 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7791_CLK_PCIEC>, <&pcie_bus_clk>;
 		clock-names = "pcie", "pcie_bus";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -1582,6 +1664,8 @@
 			<&mstp10_clks R8A7791_CLK_SCU_SRC5>, <&mstp10_clks R8A7791_CLK_SCU_SRC4>,
 			<&mstp10_clks R8A7791_CLK_SCU_SRC3>, <&mstp10_clks R8A7791_CLK_SCU_SRC2>,
 			<&mstp10_clks R8A7791_CLK_SCU_SRC1>, <&mstp10_clks R8A7791_CLK_SCU_SRC0>,
+			<&mstp10_clks R8A7791_CLK_SCU_CTU0_MIX0>, <&mstp10_clks R8A7791_CLK_SCU_CTU1_MIX1>,
+			<&mstp10_clks R8A7791_CLK_SCU_CTU0_MIX0>, <&mstp10_clks R8A7791_CLK_SCU_CTU1_MIX1>,
 			<&mstp10_clks R8A7791_CLK_SCU_DVC0>, <&mstp10_clks R8A7791_CLK_SCU_DVC1>,
 			<&audio_clk_a>, <&audio_clk_b>, <&audio_clk_c>, <&m2_clk>;
 		clock-names = "ssi-all",
@@ -1589,6 +1673,8 @@
 				"ssi.4", "ssi.3", "ssi.2", "ssi.1", "ssi.0",
 				"src.9", "src.8", "src.7", "src.6", "src.5",
 				"src.4", "src.3", "src.2", "src.1", "src.0",
+				"ctu.0", "ctu.1",
+				"mix.0", "mix.1",
 				"dvc.0", "dvc.1",
 				"clk_a", "clk_b", "clk_c", "clk_i";
 
@@ -1605,6 +1691,22 @@
 			};
 		};
 
+		rcar_sound,mix {
+			mix0: mix@0 { };
+			mix1: mix@1 { };
+		};
+
+		rcar_sound,ctu {
+			ctu00: ctu@0 { };
+			ctu01: ctu@1 { };
+			ctu02: ctu@2 { };
+			ctu03: ctu@3 { };
+			ctu10: ctu@4 { };
+			ctu11: ctu@5 { };
+			ctu12: ctu@6 { };
+			ctu13: ctu@7 { };
+		};
+
 		rcar_sound,src {
 			src0: src@0 {
 				interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
new file mode 100644
index 0000000..96443ec
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7793-gose.dts
@@ -0,0 +1,63 @@
+/*
+ * Device Tree Source for the Gose board
+ *
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "r8a7793.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+/ {
+	model = "Gose";
+	compatible = "renesas,gose", "renesas,r8a7793";
+
+	aliases {
+		serial0 = &scif0;
+		serial1 = &scif1;
+	};
+
+	chosen {
+		bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
+		stdout-path = &scif0;
+	};
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0 0x40000000 0 0x40000000>;
+	};
+};
+
+&extal_clk {
+	clock-frequency = <20000000>;
+};
+
+&ether {
+	phy-handle = <&phy1>;
+	renesas,ether-link-active-low;
+	status = "okay";
+
+	phy1: ethernet-phy@1 {
+		reg = <1>;
+		interrupt-parent = <&irqc0>;
+		interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+		micrel,led-mode = <1>;
+	};
+};
+
+&cmt0 {
+	status = "okay";
+};
+
+&scif0 {
+	status = "okay";
+};
+
+&scif1 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
new file mode 100644
index 0000000..c465404
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -0,0 +1,374 @@
+/*
+ * Device Tree Source for the r8a7793 SoC
+ *
+ * Copyright (C) 2014-2015 Renesas Electronics Corporation
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <dt-bindings/clock/r8a7793-clock.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+	compatible = "renesas,r8a7793";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a15";
+			reg = <0>;
+			clock-frequency = <1500000000>;
+			voltage-tolerance = <1>; /* 1% */
+			clocks = <&cpg_clocks R8A7793_CLK_Z>;
+			clock-latency = <300000>; /* 300 us */
+
+			/* kHz - uV - OPPs unknown yet */
+			operating-points = <1500000 1000000>,
+					   <1312500 1000000>,
+					   <1125000 1000000>,
+					   < 937500 1000000>,
+					   < 750000 1000000>,
+					   < 375000 1000000>;
+		};
+	};
+
+	gic: interrupt-controller@f1001000 {
+		compatible = "arm,gic-400";
+		#interrupt-cells = <3>;
+		#address-cells = <0>;
+		interrupt-controller;
+		reg = <0 0xf1001000 0 0x1000>,
+			<0 0xf1002000 0 0x1000>,
+			<0 0xf1004000 0 0x2000>,
+			<0 0xf1006000 0 0x2000>;
+		interrupts = <1 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+	};
+
+	timer {
+		compatible = "arm,armv7-timer";
+		interrupts = <1 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+			     <1 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+			     <1 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+			     <1 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+	cmt0: timer@ffca0000 {
+		compatible = "renesas,cmt-48-r8a7793", "renesas,cmt-48-gen2";
+		reg = <0 0xffca0000 0 0x1004>;
+		interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 143 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp1_clks R8A7793_CLK_CMT0>;
+		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
+
+		renesas,channels-mask = <0x60>;
+
+		status = "disabled";
+	};
+
+	cmt1: timer@e6130000 {
+		compatible = "renesas,cmt-48-r8a7793", "renesas,cmt-48-gen2";
+		reg = <0 0xe6130000 0 0x1004>;
+		interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 121 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 122 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 123 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 124 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 125 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 126 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 127 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp3_clks R8A7793_CLK_CMT1>;
+		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
+
+		renesas,channels-mask = <0xff>;
+
+		status = "disabled";
+	};
+
+	irqc0: interrupt-controller@e61c0000 {
+		compatible = "renesas,irqc-r8a7793", "renesas,irqc";
+		#interrupt-cells = <2>;
+		interrupt-controller;
+		reg = <0 0xe61c0000 0 0x200>;
+		interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 1 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 2 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 3 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 12 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 13 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 14 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 15 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 16 IRQ_TYPE_LEVEL_HIGH>,
+			     <0 17 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp4_clks R8A7793_CLK_IRQC>;
+		power-domains = <&cpg_clocks>;
+	};
+
+	scif0: serial@e6e60000 {
+		compatible = "renesas,scif-r8a7793", "renesas,scif";
+		reg = <0 0xe6e60000 0 64>;
+		interrupts = <0 152 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp7_clks R8A7793_CLK_SCIF0>;
+		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
+		status = "disabled";
+	};
+
+	scif1: serial@e6e68000 {
+		compatible = "renesas,scif-r8a7793", "renesas,scif";
+		reg = <0 0xe6e68000 0 64>;
+		interrupts = <0 153 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp7_clks R8A7793_CLK_SCIF1>;
+		clock-names = "sci_ick";
+		power-domains = <&cpg_clocks>;
+		status = "disabled";
+	};
+
+	ether: ethernet@ee700000 {
+		compatible = "renesas,ether-r8a7793";
+		reg = <0 0xee700000 0 0x400>;
+		interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp8_clks R8A7793_CLK_ETHER>;
+		power-domains = <&cpg_clocks>;
+		phy-mode = "rmii";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	clocks {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		/* External root clock */
+		extal_clk: extal_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			/* This value must be overridden by the board. */
+			clock-frequency = <0>;
+			clock-output-names = "extal";
+		};
+
+		/* Special CPG clocks */
+		cpg_clocks: cpg_clocks@e6150000 {
+			compatible = "renesas,r8a7793-cpg-clocks",
+				     "renesas,rcar-gen2-cpg-clocks";
+			reg = <0 0xe6150000 0 0x1000>;
+			clocks = <&extal_clk>;
+			#clock-cells = <1>;
+			clock-output-names = "main", "pll0", "pll1", "pll3",
+					     "lb", "qspi", "sdh", "sd0", "z",
+					     "rcan", "adsp";
+			#power-domain-cells = <0>;
+		};
+
+		/* Variable factor clocks */
+		sd2_clk: sd2_clk@e6150078 {
+			compatible = "renesas,r8a7793-div6-clock",
+				     "renesas,cpg-div6-clock";
+			reg = <0 0xe6150078 0 4>;
+			clocks = <&pll1_div2_clk>;
+			#clock-cells = <0>;
+			clock-output-names = "sd2";
+		};
+		sd3_clk: sd3_clk@e615026c {
+			compatible = "renesas,r8a7793-div6-clock",
+				     "renesas,cpg-div6-clock";
+			reg = <0 0xe615026c 0 4>;
+			clocks = <&pll1_div2_clk>;
+			#clock-cells = <0>;
+			clock-output-names = "sd3";
+		};
+		mmc0_clk: mmc0_clk@e6150240 {
+			compatible = "renesas,r8a7793-div6-clock",
+				     "renesas,cpg-div6-clock";
+			reg = <0 0xe6150240 0 4>;
+			clocks = <&pll1_div2_clk>;
+			#clock-cells = <0>;
+			clock-output-names = "mmc0";
+		};
+
+		/* Fixed factor clocks */
+		pll1_div2_clk: pll1_div2_clk {
+			compatible = "fixed-factor-clock";
+			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
+			#clock-cells = <0>;
+			clock-div = <2>;
+			clock-mult = <1>;
+			clock-output-names = "pll1_div2";
+		};
+		zg_clk: zg_clk {
+			compatible = "fixed-factor-clock";
+			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
+			#clock-cells = <0>;
+			clock-div = <5>;
+			clock-mult = <1>;
+			clock-output-names = "zg";
+		};
+		zx_clk: zx_clk {
+			compatible = "fixed-factor-clock";
+			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
+			#clock-cells = <0>;
+			clock-div = <3>;
+			clock-mult = <1>;
+			clock-output-names = "zx";
+		};
+		zs_clk: zs_clk {
+			compatible = "fixed-factor-clock";
+			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
+			#clock-cells = <0>;
+			clock-div = <6>;
+			clock-mult = <1>;
+			clock-output-names = "zs";
+		};
+		hp_clk: hp_clk {
+			compatible = "fixed-factor-clock";
+			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
+			#clock-cells = <0>;
+			clock-div = <12>;
+			clock-mult = <1>;
+			clock-output-names = "hp";
+		};
+		p_clk: p_clk {
+			compatible = "fixed-factor-clock";
+			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
+			#clock-cells = <0>;
+			clock-div = <24>;
+			clock-mult = <1>;
+			clock-output-names = "p";
+		};
+		rclk_clk: rclk_clk {
+			compatible = "fixed-factor-clock";
+			clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
+			#clock-cells = <0>;
+			clock-div = <(48 * 1024)>;
+			clock-mult = <1>;
+			clock-output-names = "rclk";
+		};
+		mp_clk: mp_clk {
+			compatible = "fixed-factor-clock";
+			clocks = <&pll1_div2_clk>;
+			#clock-cells = <0>;
+			clock-div = <15>;
+			clock-mult = <1>;
+			clock-output-names = "mp";
+		};
+		cp_clk: cp_clk {
+			compatible = "fixed-factor-clock";
+			clocks = <&extal_clk>;
+			#clock-cells = <0>;
+			clock-div = <2>;
+			clock-mult = <1>;
+			clock-output-names = "cp";
+		};
+
+		/* Gate clocks */
+		mstp1_clks: mstp1_clks@e6150134 {
+			compatible = "renesas,r8a7793-mstp-clocks",
+				     "renesas,cpg-mstp-clocks";
+			reg = <0 0xe6150134 0 4>, <0 0xe6150038 0 4>;
+			clocks = <&zs_clk>, <&zs_clk>, <&zs_clk>, <&p_clk>,
+				 <&zg_clk>, <&zs_clk>, <&zs_clk>, <&zs_clk>,
+				 <&p_clk>, <&p_clk>, <&rclk_clk>, <&cp_clk>,
+				 <&zs_clk>, <&zs_clk>, <&zs_clk>;
+			#clock-cells = <1>;
+			clock-indices = <
+				R8A7793_CLK_VCP0 R8A7793_CLK_VPC0
+				R8A7793_CLK_SSP1 R8A7793_CLK_TMU1
+				R8A7793_CLK_3DG R8A7793_CLK_2DDMAC
+				R8A7793_CLK_FDP1_1 R8A7793_CLK_FDP1_0
+				R8A7793_CLK_TMU3 R8A7793_CLK_TMU2
+				R8A7793_CLK_CMT0 R8A7793_CLK_TMU0
+				R8A7793_CLK_VSP1_DU1 R8A7793_CLK_VSP1_DU0
+				R8A7793_CLK_VSP1_S
+			>;
+			clock-output-names =
+				"vcp0", "vpc0", "ssp_dev", "tmu1",
+				"pvrsrvkm", "tddmac", "fdp1", "fdp0",
+				"tmu3", "tmu2", "cmt0", "tmu0", "vsp1-du1",
+				"vsp1-du0", "vsps";
+		};
+		mstp3_clks: mstp3_clks@e615013c {
+			compatible = "renesas,r8a7793-mstp-clocks",
+				     "renesas,cpg-mstp-clocks";
+			reg = <0 0xe615013c 0 4>, <0 0xe6150048 0 4>;
+			clocks = <&cp_clk>, <&sd3_clk>, <&sd2_clk>,
+				 <&cpg_clocks R8A7793_CLK_SD0>, <&mmc0_clk>,
+				 <&hp_clk>, <&mp_clk>, <&hp_clk>, <&mp_clk>,
+				 <&rclk_clk>, <&hp_clk>, <&hp_clk>;
+			#clock-cells = <1>;
+			clock-indices = <
+				R8A7793_CLK_TPU0 R8A7793_CLK_SDHI2
+				R8A7793_CLK_SDHI1 R8A7793_CLK_SDHI0
+				R8A7793_CLK_MMCIF0 R8A7793_CLK_IIC0
+				R8A7793_CLK_PCIEC R8A7793_CLK_IIC1
+				R8A7793_CLK_SSUSB R8A7793_CLK_CMT1
+				R8A7793_CLK_USBDMAC0 R8A7793_CLK_USBDMAC1
+			>;
+			clock-output-names =
+				"tpu0", "sdhi2", "sdhi1", "sdhi0", "mmcif0",
+				"i2c7", "pciec", "i2c8", "ssusb", "cmt1",
+				"usbdmac0", "usbdmac1";
+		};
+		mstp4_clks: mstp4_clks@e6150140 {
+			compatible = "renesas,r8a7793-mstp-clocks", "renesas,cpg-mstp-clocks";
+			reg = <0 0xe6150140 0 4>, <0 0xe615004c 0 4>;
+			clocks = <&cp_clk>;
+			#clock-cells = <1>;
+			clock-indices = <R8A7793_CLK_IRQC>;
+			clock-output-names = "irqc";
+		};
+		mstp7_clks: mstp7_clks@e615014c {
+			compatible = "renesas,r8a7793-mstp-clocks",
+				     "renesas,cpg-mstp-clocks";
+			reg = <0 0xe615014c 0 4>, <0 0xe61501c4 0 4>;
+			clocks = <&mp_clk>,  <&hp_clk>, <&zs_clk>, <&p_clk>,
+				 <&p_clk>, <&zs_clk>, <&zs_clk>, <&p_clk>,
+				 <&p_clk>, <&p_clk>, <&p_clk>, <&zx_clk>,
+				 <&zx_clk>, <&zx_clk>;
+			#clock-cells = <1>;
+			clock-indices = <
+				R8A7793_CLK_EHCI R8A7793_CLK_HSUSB
+				R8A7793_CLK_HSCIF2 R8A7793_CLK_SCIF5
+				R8A7793_CLK_SCIF4 R8A7793_CLK_HSCIF1
+				R8A7793_CLK_HSCIF0 R8A7793_CLK_SCIF3
+				R8A7793_CLK_SCIF2 R8A7793_CLK_SCIF1
+				R8A7793_CLK_SCIF0 R8A7793_CLK_DU1
+				R8A7793_CLK_DU0 R8A7793_CLK_LVDS0
+			>;
+			clock-output-names =
+				"ehci", "hsusb", "hscif2", "scif5", "scif4",
+				"hscif1", "hscif0", "scif3", "scif2",
+				"scif1", "scif0", "du1", "du0", "lvds0";
+		};
+		mstp8_clks: mstp8_clks@e6150990 {
+			compatible = "renesas,r8a7793-mstp-clocks",
+				     "renesas,cpg-mstp-clocks";
+			reg = <0 0xe6150990 0 4>, <0 0xe61509a0 0 4>;
+			clocks = <&zx_clk>, <&zg_clk>, <&zg_clk>, <&zg_clk>,
+				 <&p_clk>, <&zs_clk>, <&zs_clk>;
+			#clock-cells = <1>;
+			clock-indices = <
+				R8A7793_CLK_IPMMU_SGX R8A7793_CLK_VIN2
+				R8A7793_CLK_VIN1 R8A7793_CLK_VIN0
+				R8A7793_CLK_ETHER R8A7793_CLK_SATA1
+				R8A7793_CLK_SATA0
+			>;
+			clock-output-names =
+				"ipmmu_sgx", "vin2", "vin1", "vin0", "ether",
+				"sata1", "sata0";
+		};
+	};
+
+};
diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
new file mode 100644
index 0000000..d4dd5a3
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7794-silk.dts
@@ -0,0 +1,102 @@
+/*
+ * Device Tree Source for the SILK board
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014-2015 Renesas Solutions Corp.
+ * Copyright (C) 2014-2015 Cogent Embedded, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "r8a7794.dtsi"
+
+/ {
+	model = "SILK";
+	compatible = "renesas,silk", "renesas,r8a7794";
+
+	aliases {
+		serial0 = &scif2;
+	};
+
+	chosen {
+		bootargs = "ignore_loglevel rw root=/dev/nfs ip=dhcp";
+		stdout-path = &scif2;
+	};
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0 0x40000000 0 0x40000000>;
+	};
+
+	d3_3v: regulator@0 {
+		compatible = "regulator-fixed";
+		regulator-name = "D3.3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-boot-on;
+		regulator-always-on;
+	};
+};
+
+&extal_clk {
+	clock-frequency = <20000000>;
+};
+
+&pfc {
+	scif2_pins: serial2 {
+		renesas,groups = "scif2_data";
+		renesas,function = "scif2";
+	};
+
+	ether_pins: ether {
+		renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
+		renesas,function = "eth";
+	};
+
+	phy1_pins: phy1 {
+		renesas,groups = "intc_irq8";
+		renesas,function = "intc";
+	};
+
+	mmcif0_pins: mmcif0 {
+		renesas,groups = "mmc_data8", "mmc_ctrl";
+		renesas,function = "mmc";
+	};
+};
+
+&scif2 {
+	pinctrl-0 = <&scif2_pins>;
+	pinctrl-names = "default";
+
+	status = "okay";
+};
+
+&ether {
+	pinctrl-0 = <&ether_pins &phy1_pins>;
+	pinctrl-names = "default";
+
+	phy-handle = <&phy1>;
+	renesas,ether-link-active-low;
+	status = "okay";
+
+	phy1: ethernet-phy@1 {
+		reg = <1>;
+		interrupt-parent = <&irqc0>;
+		interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+		micrel,led-mode = <1>;
+	};
+};
+
+&mmcif0 {
+	pinctrl-0 = <&mmcif0_pins>;
+	pinctrl-names = "default";
+
+	vmmc-supply = <&d3_3v>;
+	vqmmc-supply = <&d3_3v>;
+	bus-width = <8>;
+	non-removable;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index b738194..97c8e9a 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -39,7 +39,7 @@
 	};
 
 	gic: interrupt-controller@f1001000 {
-		compatible = "arm,cortex-a7-gic";
+		compatible = "arm,gic-400";
 		#interrupt-cells = <3>;
 		#address-cells = <0>;
 		interrupt-controller;
@@ -57,6 +57,7 @@
 			     <0 143 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp1_clks R8A7794_CLK_CMT0>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 
 		renesas,channels-mask = <0x60>;
 
@@ -76,6 +77,7 @@
 			     <0 127 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7794_CLK_CMT1>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 
 		renesas,channels-mask = <0xff>;
 
@@ -106,6 +108,13 @@
 			     <0 16 IRQ_TYPE_LEVEL_HIGH>,
 			     <0 17 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp4_clks R8A7794_CLK_IRQC>;
+		power-domains = <&cpg_clocks>;
+	};
+
+	pfc: pin-controller@e6060000 {
+		compatible = "renesas,pfc-r8a7794";
+		reg = <0 0xe6060000 0 0x11c>;
+		#gpio-range-cells = <3>;
 	};
 
 	dmac0: dma-controller@e6700000 {
@@ -134,6 +143,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7794_CLK_SYS_DMAC0>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -164,6 +174,7 @@
 				"ch12", "ch13", "ch14";
 		clocks = <&mstp2_clks R8A7794_CLK_SYS_DMAC1>;
 		clock-names = "fck";
+		power-domains = <&cpg_clocks>;
 		#dma-cells = <1>;
 		dma-channels = <15>;
 	};
@@ -176,6 +187,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x21>, <&dmac0 0x22>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -187,6 +199,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x25>, <&dmac0 0x26>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -198,6 +211,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x27>, <&dmac0 0x28>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -209,6 +223,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -220,6 +235,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -231,6 +247,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x23>, <&dmac0 0x24>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -242,6 +259,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -253,6 +271,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -264,6 +283,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -275,6 +295,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -286,6 +307,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -297,6 +319,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -308,6 +331,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -319,6 +343,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -330,6 +355,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -341,6 +367,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -352,6 +379,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -363,6 +391,7 @@
 		clock-names = "sci_ick";
 		dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
 		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -371,17 +400,31 @@
 		reg = <0 0xee700000 0 0x400>;
 		interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp8_clks R8A7794_CLK_ETHER>;
+		power-domains = <&cpg_clocks>;
 		phy-mode = "rmii";
 		#address-cells = <1>;
 		#size-cells = <0>;
 		status = "disabled";
 	};
 
+	mmcif0: mmc@ee200000 {
+		compatible = "renesas,mmcif-r8a7794", "renesas,sh-mmcif";
+		reg = <0 0xee200000 0 0x80>;
+		interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&mstp3_clks R8A7794_CLK_MMCIF0>;
+		dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
+		dma-names = "tx", "rx";
+		power-domains = <&cpg_clocks>;
+		reg-io-width = <4>;
+		status = "disabled";
+	};
+
 	sdhi0: sd@ee100000 {
 		compatible = "renesas,sdhi-r8a7794";
 		reg = <0 0xee100000 0 0x200>;
 		interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7794_CLK_SDHI0>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -390,6 +433,7 @@
 		reg = <0 0xee140000 0 0x100>;
 		interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7794_CLK_SDHI1>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -398,6 +442,7 @@
 		reg = <0 0xee160000 0 0x100>;
 		interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&mstp3_clks R8A7794_CLK_SDHI2>;
+		power-domains = <&cpg_clocks>;
 		status = "disabled";
 	};
 
@@ -424,6 +469,7 @@
 			#clock-cells = <1>;
 			clock-output-names = "main", "pll0", "pll1", "pll3",
 					     "lb", "qspi", "sdh", "sd0", "z";
+			#power-domain-cells = <0>;
 		};
 		/* Variable factor clocks */
 		sd2_clk: sd2_clk@e6150078 {
diff --git a/arch/arm/boot/dts/rk3066a-bqcurie2.dts b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
index b299b26..c027375 100644
--- a/arch/arm/boot/dts/rk3066a-bqcurie2.dts
+++ b/arch/arm/boot/dts/rk3066a-bqcurie2.dts
@@ -49,6 +49,7 @@
 	compatible = "mundoreader,bq-curie2", "rockchip,rk3066a";
 
 	memory {
+		device_type = "memory";
 		reg = <0x60000000 0x40000000>;
 	};
 
diff --git a/arch/arm/boot/dts/rk3066a-marsboard.dts b/arch/arm/boot/dts/rk3066a-marsboard.dts
index 0a7304b..bae965c 100644
--- a/arch/arm/boot/dts/rk3066a-marsboard.dts
+++ b/arch/arm/boot/dts/rk3066a-marsboard.dts
@@ -48,6 +48,7 @@
 	compatible = "haoyu,marsboard-rk3066", "rockchip,rk3066a";
 
 	memory {
+		device_type = "memory";
 		reg = <0x60000000 0x40000000>;
 	};
 
@@ -201,6 +202,18 @@
 	status = "okay";
 };
 
+&usbphy {
+	status = "okay";
+};
+
+&usb_host {
+	status = "okay";
+};
+
+&usb_otg {
+	status = "okay";
+};
+
 &wdt {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/rk3066a-rayeager.dts b/arch/arm/boot/dts/rk3066a-rayeager.dts
index 3ac1511..e36383c 100644
--- a/arch/arm/boot/dts/rk3066a-rayeager.dts
+++ b/arch/arm/boot/dts/rk3066a-rayeager.dts
@@ -48,6 +48,7 @@
 	compatible = "chipspark,rayeager-px2", "rockchip,rk3066a";
 
 	memory {
+		device_type = "memory";
 		reg = <0x60000000 0x40000000>;
 	};
 
@@ -459,6 +460,10 @@
 	status = "okay";
 };
 
+&usbphy {
+	status = "okay";
+};
+
 &usb_otg {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/rk3066a.dtsi b/arch/arm/boot/dts/rk3066a.dtsi
index d32229b..946f187 100644
--- a/arch/arm/boot/dts/rk3066a.dtsi
+++ b/arch/arm/boot/dts/rk3066a.dtsi
@@ -169,6 +169,28 @@
 		clock-names = "timer", "pclk";
 	};
 
+	usbphy: phy {
+		compatible = "rockchip,rk3066a-usb-phy", "rockchip,rk3288-usb-phy";
+		rockchip,grf = <&grf>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+
+		usbphy0: usb-phy0 {
+			#phy-cells = <0>;
+			reg = <0x17c>;
+			clocks = <&cru SCLK_OTGPHY0>;
+			clock-names = "phyclk";
+		};
+
+		usbphy1: usb-phy1 {
+			#phy-cells = <0>;
+			reg = <0x188>;
+			clocks = <&cru SCLK_OTGPHY1>;
+			clock-names = "phyclk";
+		};
+	};
+
 	pinctrl: pinctrl {
 		compatible = "rockchip,rk3066a-pinctrl";
 		rockchip,grf = <&grf>;
diff --git a/arch/arm/boot/dts/rk3188-radxarock.dts b/arch/arm/boot/dts/rk3188-radxarock.dts
index 42faa19..d2180e5 100644
--- a/arch/arm/boot/dts/rk3188-radxarock.dts
+++ b/arch/arm/boot/dts/rk3188-radxarock.dts
@@ -48,6 +48,7 @@
 	compatible = "radxa,rock", "rockchip,rk3188";
 
 	memory {
+		device_type = "memory";
 		reg = <0x60000000 0x80000000>;
 	};
 
@@ -358,6 +359,10 @@
 	status = "okay";
 };
 
+&usbphy {
+	status = "okay";
+};
+
 &usb_host {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi
index 0f23aed..3163042 100644
--- a/arch/arm/boot/dts/rk3188.dtsi
+++ b/arch/arm/boot/dts/rk3188.dtsi
@@ -130,6 +130,28 @@
 		#reset-cells = <1>;
 	};
 
+	usbphy: phy {
+		compatible = "rockchip,rk3188-usb-phy", "rockchip,rk3288-usb-phy";
+		rockchip,grf = <&grf>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+
+		usbphy0: usb-phy0 {
+			#phy-cells = <0>;
+			reg = <0x10c>;
+			clocks = <&cru SCLK_OTGPHY0>;
+			clock-names = "phyclk";
+		};
+
+		usbphy1: usb-phy1 {
+			#phy-cells = <0>;
+			reg = <0x11c>;
+			clocks = <&cru SCLK_OTGPHY1>;
+			clock-names = "phyclk";
+		};
+	};
+
 	pinctrl: pinctrl {
 		compatible = "rockchip,rk3188-pinctrl";
 		rockchip,grf = <&grf>;
diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi
index 844a6fb..f6d2e78 100644
--- a/arch/arm/boot/dts/rk3288-evb.dtsi
+++ b/arch/arm/boot/dts/rk3288-evb.dtsi
@@ -43,6 +43,7 @@
 
 / {
 	memory {
+		device_type = "memory";
 		reg = <0x0 0x80000000>;
 	};
 
diff --git a/arch/arm/boot/dts/rk3288-firefly.dtsi b/arch/arm/boot/dts/rk3288-firefly.dtsi
index 0b42372..20fa0ef 100644
--- a/arch/arm/boot/dts/rk3288-firefly.dtsi
+++ b/arch/arm/boot/dts/rk3288-firefly.dtsi
@@ -44,6 +44,7 @@
 
 / {
 	memory {
+		device_type = "memory";
 		reg = <0 0x80000000>;
 	};
 
@@ -213,6 +214,8 @@
 		regulator-max-microvolt = <1350000>;
 		regulator-always-on;
 		regulator-boot-on;
+		regulator-enable-ramp-delay = <300>;
+		regulator-ramp-delay = <8000>;
 		vin-supply = <&vcc_sys>;
 	};
 
diff --git a/arch/arm/boot/dts/rk3288-popmetal.dts b/arch/arm/boot/dts/rk3288-popmetal.dts
index d582811..f82b956 100644
--- a/arch/arm/boot/dts/rk3288-popmetal.dts
+++ b/arch/arm/boot/dts/rk3288-popmetal.dts
@@ -49,6 +49,7 @@
 	compatible = "chipspark,popmetal-rk3288", "rockchip,rk3288";
 
 	memory{
+		device_type = "memory";
 		reg = <0 0x80000000>;
 	};
 
diff --git a/arch/arm/boot/dts/rk3288-r89.dts b/arch/arm/boot/dts/rk3288-r89.dts
new file mode 100644
index 0000000..14b9fc7
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-r89.dts
@@ -0,0 +1,413 @@
+/*
+ * Copyright (c) 2015 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include <dt-bindings/pwm/pwm.h>
+#include "rk3288.dtsi"
+
+/ {
+	compatible = "netxeon,r89", "rockchip,rk3288";
+
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x80000000>;
+	};
+
+	ext_gmac: external-gmac-clock {
+		compatible = "fixed-clock";
+		clock-frequency = <125000000>;
+		clock-output-names = "ext_gmac";
+		#clock-cells = <0>;
+	};
+
+	gpio-keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		autorepeat;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&pwrbtn>;
+
+		button@0 {
+			gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+			linux,code = <116>;
+			label = "GPIO Key Power";
+			linux,input-type = <1>;
+			gpio-key,wakeup = <1>;
+			debounce-interval = <100>;
+		};
+	};
+
+	vcc_host: vcc-host-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio0 14 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&host_vbus_drv>;
+		regulator-name = "vcc_host";
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	vcc_otg: vcc-otg-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio0 12 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&otg_vbus_drv>;
+		regulator-name = "vcc_otg";
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	vcc_sdmmc: sdmmc-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "sdmmc-supply";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		gpio = <&gpio7 11 GPIO_ACTIVE_LOW>;
+		startup-delay-us = <100000>;
+		vin-supply = <&vcc_io>;
+	};
+
+	vcc_sys: sys-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "sys-supply";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+};
+
+&cpu0 {
+	cpu0-supply = <&vdd_cpu>;
+};
+
+&gmac {
+	phy-supply = <&vcc_lan>;
+	phy-mode = "rgmii";
+	clock_in_out = "input";
+	snps,reset-gpio = <&gpio4 7 0>;
+	snps,reset-active-low;
+	snps,reset-delays-us = <0 10000 1000000>;
+	assigned-clocks = <&cru SCLK_MAC>;
+	assigned-clock-parents = <&ext_gmac>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&rgmii_pins>;
+	tx_delay = <0x30>;
+	rx_delay = <0x10>;
+	status = "ok";
+};
+
+&hdmi {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+
+	vdd_cpu: pmic@40 {
+		compatible = "silergy,syr827";
+		reg = <0x40>;
+		fcs,suspend-voltage-selector = <1>;
+		regulator-name = "VDD_CPU";
+		regulator-enable-ramp-delay = <300>;
+		regulator-min-microvolt = <850000>;
+		regulator-max-microvolt = <1350000>;
+		regulator-ramp-delay = <8000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc_sys>;
+	};
+
+	vdd_gpu: pmic@41 {
+		compatible = "silergy,syr828";
+		reg = <0x41>;
+		fcs,suspend-voltage-selector = <1>;
+		regulator-name = "VDD_GPU";
+		regulator-enable-ramp-delay = <300>;
+		regulator-min-microvolt = <850000>;
+		regulator-max-microvolt = <1350000>;
+		regulator-ramp-delay = <8000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc_sys>;
+	};
+
+	rtc@51 {
+		compatible = "haoyu,hym8563";
+		reg = <0x51>;
+		#clock-cells = <0>;
+		clock-output-names = "xin32k";
+		interrupt-parent = <&gpio0>;
+		interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pmic_int>;
+	};
+
+	act8846: pmic@5a {
+		compatible = "active-semi,act8846";
+		reg = <0x5a>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pmic_vsel>, <&pwr_hold>;
+		system-power-controller;
+
+		regulators {
+			vcc_ddr: REG1 {
+				regulator-name = "VCC_DDR";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1200000>;
+				regulator-always-on;
+			};
+
+			vcc_io: REG2 {
+				regulator-name = "VCC_IO";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vdd_log: REG3 {
+				regulator-name = "VDD_LOG";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+			};
+
+			vcc_20: REG4 {
+				regulator-name = "VCC_20";
+				regulator-min-microvolt = <2000000>;
+				regulator-max-microvolt = <2000000>;
+				regulator-always-on;
+			};
+
+			vccio_sd: REG5 {
+				regulator-name = "VCCIO_SD";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vdd10_lcd: REG6 {
+				regulator-name = "VDD10_LCD";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+			};
+
+			vcc_wl: REG7 {
+				regulator-name = "VCC_WL";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vcca_33: REG8 {
+				regulator-name = "VCCA_33";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vcc_lan: REG9 {
+				regulator-name = "VCC_LAN";
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-always-on;
+			};
+
+			vdd_10: REG10 {
+				regulator-name = "VDD_10";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-always-on;
+			};
+
+			vcc_18: REG11 {
+				regulator-name = "VCC_18";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+			};
+
+			vcc18_lcd: REG12 {
+				regulator-name = "VCC18_LCD";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-always-on;
+			};
+		};
+	};
+};
+
+&i2c5 {
+	status = "okay";
+};
+
+&pinctrl {
+	pcfg_output_high: pcfg-output-high {
+		output-high;
+	};
+
+	pcfg_output_low: pcfg-output-low {
+		output-low;
+	};
+
+	act8846 {
+		pmic_vsel: pmic-vsel {
+			rockchip,pins = <7 1 RK_FUNC_GPIO &pcfg_output_low>;
+		};
+
+		pwr_hold: pwr-hold {
+			rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_output_high>;
+		};
+	};
+
+	buttons {
+		pwrbtn: pwrbtn {
+			rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	pmic {
+		pmic_int: pmic-int {
+			rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	usb {
+		host_vbus_drv: host-vbus-drv {
+			rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		otg_vbus_drv: otg-vbus-drv {
+			rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+};
+
+&pwm0 {
+	status = "okay";
+};
+
+&saradc {
+	vref-supply = <&vcc_18>;
+	status = "okay";
+};
+
+&sdmmc {
+	bus-width = <4>;
+	cap-mmc-highspeed;
+	cap-sd-highspeed;
+	card-detect-delay = <200>;
+	disable-wp;
+	num-slots = <1>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd &sdmmc_bus4>;
+	vmmc-supply = <&vcc_sdmmc>;
+	vqmmc-supply = <&vccio_sd>;
+	status = "okay";
+};
+
+&tsadc {
+	rockchip,hw-tshut-mode = <0>;
+	rockchip,hw-tshut-polarity = <0>;
+	status = "okay";
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&uart1 {
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&uart3 {
+	status = "okay";
+};
+
+&uart4 {
+	status = "okay";
+};
+
+&usb_host0_ehci {
+	status = "okay";
+};
+
+&usb_host1 {
+	status = "okay";
+};
+
+&usb_otg {
+	status = "okay";
+};
+
+&usbphy {
+	status = "okay";
+};
+
+&vopb {
+	status = "okay";
+};
+
+&vopb_mmu {
+	status = "okay";
+};
+
+&vopl {
+	status = "okay";
+};
+
+&vopl_mmu {
+	status = "okay";
+};
+
+&wdt {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi b/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
new file mode 100644
index 0000000..136d650
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
@@ -0,0 +1,232 @@
+/*
+ * Google Veyron (and derivatives) board device tree source
+ * Chromebook specific parts
+ *
+ * Copyright 2015 Google, Inc
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *  Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/clock/rockchip,rk808.h>
+#include <dt-bindings/input/input.h>
+#include "rk3288-veyron.dtsi"
+#include "rk3288-veyron-sdmmc.dtsi"
+
+/ {
+	aliases {
+		/* Assign 20 so we don't get confused w/ builtin ones */
+		i2c20 = &i2c_tunnel;
+	};
+
+	gpio-charger {
+		compatible = "gpio-charger";
+		charger-type = "mains";
+		gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ac_present_ap>;
+	};
+
+	/* A non-regulated voltage from power supply or battery */
+	vccsys: vccsys {
+		compatible = "regulator-fixed";
+		regulator-name = "vccsys";
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	vcc33_sys: vcc33-sys {
+		vin-supply = <&vccsys>;
+	};
+
+	vcc_5v: vcc-5v {
+		vin-supply = <&vccsys>;
+	};
+
+	/* This turns on vbus for host1 (dwc2) */
+	vcc5_host1: vcc5-host1-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio0 11 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&host1_pwr_en>;
+		regulator-name = "vcc5_host1";
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	/* This turns on vbus for otg for host mode (dwc2) */
+	vcc5v_otg: vcc5v-otg-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio0 12 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&usbotg_pwren_h>;
+		regulator-name = "vcc5_host2";
+		regulator-always-on;
+		regulator-boot-on;
+	};
+};
+
+&gpio_keys {
+	pinctrl-0 = <&pwr_key_l &ap_lid_int_l>;
+	lid {
+		label = "Lid";
+		gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
+		gpio-key,wakeup;
+		linux,code = <0>; /* SW_LID */
+		linux,input-type = <5>; /* EV_SW */
+		debounce-interval = <1>;
+	};
+};
+
+&rk808 {
+	vcc11-supply = <&vcc_5v>;
+
+	regulators {
+		vcc33_ccd: LDO_REG8 {
+			regulator-name = "vcc33_ccd";
+			regulator-always-on;
+			regulator-boot-on;
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-state-mem {
+				regulator-on-in-suspend;
+				regulator-suspend-microvolt = <3300000>;
+			};
+		};
+	};
+};
+
+&spi0 {
+	status = "okay";
+
+	cros_ec: ec@0 {
+		compatible = "google,cros-ec-spi";
+		reg = <0>;
+		google,cros-ec-spi-pre-delay = <30>;
+		interrupt-parent = <&gpio7>;
+		interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ec_int>;
+		spi-max-frequency = <3000000>;
+
+		i2c_tunnel: i2c-tunnel {
+			compatible = "google,cros-ec-i2c-tunnel";
+			google,remote-bus = <0>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+	};
+};
+
+&i2c4 {
+	trackpad@15 {
+		compatible = "elan,ekth3000";
+		reg = <0x15>;
+		interrupt-parent = <&gpio7>;
+		interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&trackpad_int>;
+		vcc-supply = <&vcc33_io>;
+		wakeup-source;
+	};
+};
+
+&pinctrl {
+	pinctrl-0 = <
+		/* Common for sleep and wake, but no owners */
+		&global_pwroff
+
+		/* Wake only */
+		&suspend_l_wake
+	>;
+	pinctrl-1 = <
+		/* Common for sleep and wake, but no owners */
+		&global_pwroff
+
+		/* Sleep only */
+		&suspend_l_sleep
+	>;
+
+	buttons {
+		ap_lid_int_l: ap-lid-int-l {
+			rockchip,pins = <0 6 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	charger {
+		ac_present_ap: ac-present-ap {
+			rockchip,pins = <0 8 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	cros-ec {
+		ec_int: ec-int {
+			rockchip,pins = <7 7 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	suspend {
+		suspend_l_wake: suspend-l-wake {
+			rockchip,pins = <0 17 RK_FUNC_GPIO &pcfg_output_low>;
+		};
+
+		suspend_l_sleep: suspend-l-sleep {
+			rockchip,pins = <0 17 RK_FUNC_GPIO &pcfg_output_high>;
+		};
+	};
+
+	trackpad {
+		trackpad_int: trackpad-int {
+			rockchip,pins = <7 3 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	usb-host {
+		host1_pwr_en: host1-pwr-en {
+			rockchip,pins = <0 11 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		usbotg_pwren_h: usbotg-pwren-h {
+			rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+};
+
+#include "cros-ec-keyboard.dtsi"
diff --git a/arch/arm/boot/dts/rk3288-veyron-jerry.dts b/arch/arm/boot/dts/rk3288-veyron-jerry.dts
new file mode 100644
index 0000000..60bd6e9
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-veyron-jerry.dts
@@ -0,0 +1,197 @@
+/*
+ * Google Veyron Jerry Rev 3+ board device tree source
+ *
+ * Copyright 2015 Google, Inc
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *  Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "rk3288-veyron-chromebook.dtsi"
+#include "cros-ec-sbs.dtsi"
+
+/ {
+	model = "Google Jerry";
+	compatible = "google,veyron-jerry-rev7", "google,veyron-jerry-rev6",
+		     "google,veyron-jerry-rev5", "google,veyron-jerry-rev4",
+		     "google,veyron-jerry-rev3", "google,veyron-jerry",
+		     "google,veyron", "rockchip,rk3288";
+
+	panel_regulator: panel-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio7 14 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&lcd_enable_h>;
+		regulator-name = "panel_regulator";
+		vin-supply = <&vcc33_sys>;
+	};
+
+	vcc18_lcd: vcc18-lcd {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio2 13 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&avdd_1v8_disp_en>;
+		regulator-name = "vcc18_lcd";
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc18_wl>;
+	};
+
+	backlight_regulator: backlight-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio2 12 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&bl_pwr_en>;
+		regulator-name = "backlight_regulator";
+		vin-supply = <&vcc33_sys>;
+		startup-delay-us = <15000>;
+	};
+};
+
+&rk808 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pmic_int_l>;
+
+	regulators {
+		mic_vcc: LDO_REG2 {
+			regulator-name = "mic_vcc";
+			regulator-always-on;
+			regulator-boot-on;
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+			regulator-state-mem {
+				regulator-off-in-suspend;
+			};
+		};
+	};
+};
+
+&sdmmc {
+	disable-wp;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd_disabled &sdmmc_cd_gpio
+			&sdmmc_bus4>;
+};
+
+&vcc_5v {
+	enable-active-high;
+	gpio = <&gpio7 21 GPIO_ACTIVE_HIGH>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&drv_5v>;
+};
+
+&vcc50_hdmi {
+	enable-active-high;
+	gpio = <&gpio5 19 GPIO_ACTIVE_HIGH>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&vcc50_hdmi_en>;
+};
+
+&pinctrl {
+	backlight {
+		bl_pwr_en: bl_pwr_en {
+			rockchip,pins = <2 12 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	buck-5v {
+		drv_5v: drv-5v {
+			rockchip,pins = <7 21 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	hdmi {
+		vcc50_hdmi_en: vcc50-hdmi-en {
+			rockchip,pins = <5 19 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	lcd {
+		lcd_enable_h: lcd-en {
+			rockchip,pins = <7 14 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		avdd_1v8_disp_en: avdd-1v8-disp-en {
+			rockchip,pins = <2 13 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	pmic {
+		dvs_1: dvs-1 {
+			rockchip,pins = <7 12 RK_FUNC_GPIO &pcfg_pull_down>;
+		};
+
+		dvs_2: dvs-2 {
+			rockchip,pins = <7 15 RK_FUNC_GPIO &pcfg_pull_down>;
+		};
+	};
+};
+
+&i2c4 {
+	status = "okay";
+
+	/*
+	 * Trackpad pin control is shared between Elan and Synaptics devices
+	 * so we have to pull it up to the bus level.
+	 */
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c4_xfer &trackpad_int>;
+
+	trackpad@15 {
+		/*
+		 * Remove the inherited pinctrl settings to avoid clashing
+		 * with bus-wide ones.
+		 */
+		/delete-property/pinctrl-names;
+		/delete-property/pinctrl-0;
+	};
+
+	trackpad@2c {
+		compatible = "hid-over-i2c";
+		interrupt-parent = <&gpio7>;
+		interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+		reg = <0x2c>;
+		hid-descr-addr = <0x0020>;
+		vcc-supply = <&vcc33_io>;
+		wakeup-source;
+	};
+};
diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
new file mode 100644
index 0000000..8fd8ef2
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
@@ -0,0 +1,230 @@
+/*
+ * Google Veyron Minnie Rev 0+ board device tree source
+ *
+ * Copyright 2015 Google, Inc
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *  Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "rk3288-veyron-chromebook.dtsi"
+
+/ {
+	model = "Google Minnie";
+	compatible = "google,veyron-minnie-rev4", "google,veyron-minnie-rev3",
+		     "google,veyron-minnie-rev2", "google,veyron-minnie-rev1",
+		     "google,veyron-minnie-rev0", "google,veyron-minnie",
+		     "google,veyron", "rockchip,rk3288";
+
+	backlight_regulator: backlight-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio2 12 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&bl_pwr_en>;
+		regulator-name = "backlight_regulator";
+		vin-supply = <&vcc33_sys>;
+		startup-delay-us = <15000>;
+	};
+
+	panel_regulator: panel-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio7 14 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&lcd_enable_h>;
+		regulator-name = "panel_regulator";
+		vin-supply = <&vcc33_sys>;
+	};
+
+	vcc18_lcd: vcc18-lcd {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio2 13 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&avdd_1v8_disp_en>;
+		regulator-name = "vcc18_lcd";
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc18_wl>;
+	};
+};
+
+&gpio_keys {
+	pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;
+
+	volum_down {
+		label = "Volum_down";
+		gpios = <&gpio5 11 GPIO_ACTIVE_LOW>;
+		linux,code = <KEY_VOLUMEDOWN>;
+		debounce-interval = <100>;
+	};
+
+	volum_up {
+		label = "Volum_up";
+		gpios = <&gpio5 10 GPIO_ACTIVE_LOW>;
+		linux,code = <KEY_VOLUMEUP>;
+		debounce-interval = <100>;
+	};
+};
+
+&i2c_tunnel {
+	battery: bq27500@55 {
+		compatible = "ti,bq27500";
+		reg = <0x55>;
+	};
+};
+
+&i2c3 {
+	status = "okay";
+
+	clock-frequency = <400000>;
+	i2c-scl-falling-time-ns = <50>;
+	i2c-scl-rising-time-ns = <300>;
+};
+
+&rk808 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pmic_int_l &dvs_1 &dvs_2>;
+
+	regulators {
+		vcc33_touch: LDO_REG2 {
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-name = "vcc33_touch";
+			regulator-state-mem {
+				regulator-off-in-suspend;
+			};
+		};
+
+		vcc5v_touch: SWITCH_REG2 {
+			regulator-name = "vcc5v_touch";
+			regulator-state-mem {
+				regulator-off-in-suspend;
+			};
+		};
+	};
+};
+
+&sdmmc {
+	disable-wp;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd_disabled &sdmmc_cd_gpio
+			&sdmmc_bus4>;
+};
+
+&vcc_5v {
+	enable-active-high;
+	gpio = <&gpio7 21 GPIO_ACTIVE_HIGH>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&drv_5v>;
+};
+
+&vcc50_hdmi {
+	enable-active-high;
+	gpio = <&gpio5 19 GPIO_ACTIVE_HIGH>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&vcc50_hdmi_en>;
+};
+
+&pinctrl {
+	backlight {
+		bl_pwr_en: bl_pwr_en {
+			rockchip,pins = <2 12 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	buck-5v {
+		drv_5v: drv-5v {
+			rockchip,pins = <7 21 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	buttons {
+		volum_down_l: volum-down-l {
+			rockchip,pins = <5 11 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+
+		volum_up_l: volum-up-l {
+			rockchip,pins = <5 10 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	hdmi {
+		vcc50_hdmi_en: vcc50-hdmi-en {
+			rockchip,pins = <5 19 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	lcd {
+		lcd_enable_h: lcd-en {
+			rockchip,pins = <7 14 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		avdd_1v8_disp_en: avdd-1v8-disp-en {
+			rockchip,pins = <2 13 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	pmic {
+		dvs_1: dvs-1 {
+			rockchip,pins = <7 12 RK_FUNC_GPIO &pcfg_pull_down>;
+		};
+
+		dvs_2: dvs-2 {
+			rockchip,pins = <7 15 RK_FUNC_GPIO &pcfg_pull_down>;
+		};
+	};
+
+	prochot {
+		gpio_prochot: gpio-prochot {
+			rockchip,pins = <2 8 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	touchscreen {
+		touch_int: touch-int {
+			rockchip,pins = <2 14 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		touch_rst: touch-rst {
+			rockchip,pins = <2 15 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/rk3288-veyron-pinky.dts b/arch/arm/boot/dts/rk3288-veyron-pinky.dts
new file mode 100644
index 0000000..94b56e3
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-veyron-pinky.dts
@@ -0,0 +1,128 @@
+/*
+ * Google Veyron Pinky Rev 2 board device tree source
+ *
+ * Copyright 2015 Google, Inc
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *  Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "rk3288-veyron-chromebook.dtsi"
+#include "cros-ec-sbs.dtsi"
+
+/ {
+	model = "Google Pinky";
+	compatible = "google,veyron-pinky-rev2", "google,veyron-pinky",
+		     "google,veyron", "rockchip,rk3288";
+
+	/delete-node/emmc-pwrseq;
+};
+
+&emmc {
+	/*
+	 * Use a pullup instead of a drive since the output is 3.3V and
+	 * really should be 1.8V (oops).  The external pulldown will help
+	 * bring the voltage down if we only drive with a pullup here.
+	 * Therefore disable the powerseq (and actual reset) for pinky.
+	 */
+	/delete-property/mmc-pwrseq;
+	pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8 &emmc_reset>;
+};
+
+&gpio_keys {
+	pinctrl-0 = <&pwr_key_h &ap_lid_int_l>;
+
+	power {
+		gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+/* Touchpad connector */
+&i2c3 {
+	status = "okay";
+
+	clock-frequency = <400000>;
+	i2c-scl-falling-time-ns = <50>;
+	i2c-scl-rising-time-ns = <300>;
+};
+
+&pinctrl {
+	buttons {
+		pwr_key_h: pwr-key-h {
+			rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	emmc {
+		emmc_reset: emmc-reset {
+			rockchip,pins = <7 12 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	sdmmc {
+		sdmmc_wp_gpio: sdmmc-wp-gpio {
+			rockchip,pins = <7 10 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+};
+
+&rk808 {
+	regulators {
+		vcc18_lcd: SWITCH_REG2 {
+			regulator-always-on;
+			regulator-boot-on;
+			regulator-name = "vcc18_lcd";
+			regulator-state-mem {
+				regulator-off-in-suspend;
+			};
+		};
+	};
+};
+
+&sdmmc {
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd_disabled &sdmmc_cd_gpio
+		     &sdmmc_wp_gpio &sdmmc_bus4>;
+	wp-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>;
+};
+
+&tsadc {
+	/* Some connection is flaky making the tsadc hang the system */
+	status = "disabled";
+};
diff --git a/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi b/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi
new file mode 100644
index 0000000..b5334ec
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-veyron-sdmmc.dtsi
@@ -0,0 +1,122 @@
+/*
+ * Google Veyron (and derivatives) fragment for sdmmc cards
+ *
+ * Copyright 2015 Google, Inc
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *  Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+&io_domains {
+	sdcard-supply = <&vccio_sd>;
+};
+
+&pinctrl {
+	sdmmc {
+		/*
+		 * We run sdmmc at max speed; bump up drive strength.
+		 * We also have external pulls, so disable the internal ones.
+		 */
+		sdmmc_bus4: sdmmc-bus4 {
+			rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_none_drv_8ma>,
+					<6 17 RK_FUNC_1 &pcfg_pull_none_drv_8ma>,
+					<6 18 RK_FUNC_1 &pcfg_pull_none_drv_8ma>,
+					<6 19 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+		};
+
+		sdmmc_clk: sdmmc-clk {
+			rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+		};
+
+		sdmmc_cmd: sdmmc-cmd {
+			rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+		};
+
+		/*
+		 * Builtin CD line is hooked to ground to prevent JTAG at boot
+		 * (and also to get the voltage rail correct).
+		 * Configure gpio6_C6 as GPIO so dw_mmc builtin CD doesn't
+		 * think there's a card inserted
+		 */
+		sdmmc_cd_disabled: sdmmc-cd-disabled {
+			rockchip,pins = <6 22 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		/* This is where we actually hook up CD */
+		sdmmc_cd_gpio: sdmmc-cd-gpio {
+			rockchip,pins = <7 5 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+};
+
+&rk808 {
+	vcc9-supply = <&vcc_5v>;
+
+	regulators {
+		vccio_sd: LDO_REG4 {
+			regulator-name = "vccio_sd";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-state-mem {
+				regulator-off-in-suspend;
+			};
+		};
+
+		vcc33_sd: LDO_REG5 {
+			regulator-name = "vcc33_sd";
+			regulator-min-microvolt = <3300000>;
+			regulator-max-microvolt = <3300000>;
+			regulator-state-mem {
+				regulator-off-in-suspend;
+			};
+		};
+	};
+};
+
+&sdmmc {
+	status = "okay";
+
+	bus-width = <4>;
+	cap-mmc-highspeed;
+	cap-sd-highspeed;
+	card-detect-delay = <200>;
+	cd-gpios = <&gpio7 5 GPIO_ACTIVE_LOW>;
+	num-slots = <1>;
+	vmmc-supply = <&vcc33_sd>;
+	vqmmc-supply = <&vccio_sd>;
+};
diff --git a/arch/arm/boot/dts/rk3288-veyron-speedy.dts b/arch/arm/boot/dts/rk3288-veyron-speedy.dts
new file mode 100644
index 0000000..a7ea7d0
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-veyron-speedy.dts
@@ -0,0 +1,155 @@
+/*
+ * Google Veyron Speedy Rev 1+ board device tree source
+ *
+ * Copyright 2015 Google, Inc
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *  Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "rk3288-veyron-chromebook.dtsi"
+#include "cros-ec-sbs.dtsi"
+
+/ {
+	model = "Google Speedy";
+	compatible = "google,veyron-speedy-rev9", "google,veyron-speedy-rev8",
+		     "google,veyron-speedy-rev7", "google,veyron-speedy-rev6",
+		     "google,veyron-speedy-rev5", "google,veyron-speedy-rev4",
+		     "google,veyron-speedy-rev3", "google,veyron-speedy-rev2",
+		     "google,veyron-speedy", "google,veyron", "rockchip,rk3288";
+
+	panel_regulator: panel-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio7 14 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&lcd_enable_h>;
+		regulator-name = "panel_regulator";
+		vin-supply = <&vcc33_sys>;
+	};
+
+	vcc18_lcd: vcc18-lcd {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio2 13 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&avdd_1v8_disp_en>;
+		regulator-name = "vcc18_lcd";
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc18_wl>;
+	};
+
+	backlight_regulator: backlight-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio2 12 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&bl_pwr_en>;
+		regulator-name = "backlight_regulator";
+		vin-supply = <&vcc33_sys>;
+		startup-delay-us = <15000>;
+	};
+};
+
+&rk808 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&pmic_int_l>;
+};
+
+&sdmmc {
+	disable-wp;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdmmc_clk &sdmmc_cmd &sdmmc_cd_disabled &sdmmc_cd_gpio
+			&sdmmc_bus4>;
+};
+
+&vcc_5v {
+	enable-active-high;
+	gpio = <&gpio7 21 GPIO_ACTIVE_HIGH>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&drv_5v>;
+};
+
+&vcc50_hdmi {
+	enable-active-high;
+	gpio = <&gpio5 19 GPIO_ACTIVE_HIGH>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&vcc50_hdmi_en>;
+};
+
+&pinctrl {
+	backlight {
+		bl_pwr_en: bl_pwr_en {
+			rockchip,pins = <2 12 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	buck-5v {
+		drv_5v: drv-5v {
+			rockchip,pins = <7 21 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	hdmi {
+		vcc50_hdmi_en: vcc50-hdmi-en {
+			rockchip,pins = <5 19 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	lcd {
+		lcd_enable_h: lcd-en {
+			rockchip,pins = <7 14 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		avdd_1v8_disp_en: avdd-1v8-disp-en {
+			rockchip,pins = <2 13 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	pmic {
+		dvs_1: dvs-1 {
+			rockchip,pins = <7 12 RK_FUNC_GPIO &pcfg_pull_down>;
+		};
+
+		dvs_2: dvs-2 {
+			rockchip,pins = <7 15 RK_FUNC_GPIO &pcfg_pull_down>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
new file mode 100644
index 0000000..2fa7a0d
--- /dev/null
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -0,0 +1,563 @@
+/*
+ * Google Veyron (and derivatives) board device tree source
+ *
+ * Copyright 2015 Google, Inc
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *  Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/clock/rockchip,rk808.h>
+#include <dt-bindings/input/input.h>
+#include "rk3288.dtsi"
+
+/ {
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x80000000>;
+	};
+
+	gpio_keys: gpio-keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&pwr_key_l>;
+		power {
+			label = "Power";
+			gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+			linux,code = <KEY_POWER>;
+			debounce-interval = <100>;
+			gpio-key,wakeup;
+		};
+	};
+
+	gpio-restart {
+		compatible = "gpio-restart";
+		gpios = <&gpio0 13 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ap_warm_reset_h>;
+		priority = <200>;
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		pinctrl-0 = <&emmc_reset>;
+		pinctrl-names = "default";
+		reset-gpios = <&gpio2 9 GPIO_ACTIVE_HIGH>;
+	};
+
+	io_domains: io-domains {
+		compatible = "rockchip,rk3288-io-voltage-domain";
+		rockchip,grf = <&grf>;
+
+		bb-supply = <&vcc33_io>;
+		dvp-supply = <&vcc_18>;
+		flash0-supply = <&vcc18_flashio>;
+		gpio1830-supply = <&vcc33_io>;
+		gpio30-supply = <&vcc33_io>;
+		lcdc-supply = <&vcc33_lcd>;
+		wifi-supply = <&vcc18_wl>;
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		clocks = <&rk808 RK808_CLKOUT1>;
+		clock-names = "ext_clock";
+		pinctrl-names = "default";
+		pinctrl-0 = <&bt_enable_l>, <&wifi_enable_h>;
+
+		/*
+		 * On the module itself this is one of these (depending
+		 * on the actual card populated):
+		 * - SDIO_RESET_L_WL_REG_ON
+		 * - PDN (power down when low)
+		 */
+		reset-gpios = <&gpio4 28 GPIO_ACTIVE_LOW>;
+	};
+
+	vcc_5v: vcc-5v {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_5v";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+	};
+
+	vcc33_sys: vcc33-sys {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc33_sys";
+		regulator-always-on;
+		regulator-boot-on;
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	vcc50_hdmi: vcc50-hdmi {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc50_hdmi";
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc_5v>;
+	};
+};
+
+&cpu0 {
+	cpu0-supply = <&vdd_cpu>;
+};
+
+&emmc {
+	status = "okay";
+
+	broken-cd;
+	bus-width = <8>;
+	cap-mmc-highspeed;
+	disable-wp;
+	mmc-pwrseq = <&emmc_pwrseq>;
+	non-removable;
+	num-slots = <1>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
+};
+
+&hdmi {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+
+	clock-frequency = <400000>;
+	i2c-scl-falling-time-ns = <50>;		/* 2.5ns measured */
+	i2c-scl-rising-time-ns = <100>;		/* 45ns measured */
+
+	rk808: pmic@1b {
+		compatible = "rockchip,rk808";
+		reg = <0x1b>;
+		clock-output-names = "xin32k", "wifibt_32kin";
+		interrupt-parent = <&gpio0>;
+		interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pmic_int_l>;
+		rockchip,system-power-controller;
+		wakeup-source;
+		#clock-cells = <1>;
+
+		vcc1-supply = <&vcc33_sys>;
+		vcc2-supply = <&vcc33_sys>;
+		vcc3-supply = <&vcc33_sys>;
+		vcc4-supply = <&vcc33_sys>;
+		vcc6-supply = <&vcc_5v>;
+		vcc7-supply = <&vcc33_sys>;
+		vcc8-supply = <&vcc33_sys>;
+		vcc12-supply = <&vcc_18>;
+		vddio-supply = <&vcc33_io>;
+
+		regulators {
+			vdd_cpu: DCDC_REG1 {
+				regulator-name = "vdd_arm";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <750000>;
+				regulator-max-microvolt = <1450000>;
+				regulator-ramp-delay = <6001>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vdd_gpu: DCDC_REG2 {
+				regulator-name = "vdd_gpu";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <800000>;
+				regulator-max-microvolt = <1250000>;
+				regulator-ramp-delay = <6001>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1000000>;
+				};
+			};
+
+			vcc135_ddr: DCDC_REG3 {
+				regulator-name = "vcc135_ddr";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+				};
+			};
+
+			/*
+			 * vcc_18 has several aliases.  (vcc18_flashio and
+			 * vcc18_wl).  We'll add those aliases here just to
+			 * make it easier to follow the schematic.  The signals
+			 * are actually hooked together and only separated for
+			 * power measurement purposes).
+			 */
+			vcc18_wl: vcc18_flashio: vcc_18: DCDC_REG4 {
+				regulator-name = "vcc_18";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <1800000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1800000>;
+				};
+			};
+
+			/*
+			 * Note that both vcc33_io and vcc33_pmuio are always
+			 * powered together. To simplify the logic in the dts
+			 * we just refer to vcc33_io every time something is
+			 * powered from vcc33_pmuio. In fact, on later boards
+			 * (such as danger) they're the same net.
+			 */
+			vcc33_io: LDO_REG1 {
+				regulator-name = "vcc33_io";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <3300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <3300000>;
+				};
+			};
+
+			vdd_10: LDO_REG3 {
+				regulator-name = "vdd_10";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <1000000>;
+				regulator-state-mem {
+					regulator-on-in-suspend;
+					regulator-suspend-microvolt = <1000000>;
+				};
+			};
+
+			vdd10_lcd_pwren_h: LDO_REG7 {
+				regulator-name = "vdd10_lcd_pwren_h";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-min-microvolt = <2500000>;
+				regulator-max-microvolt = <2500000>;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+
+			vcc33_lcd: SWITCH_REG1 {
+				regulator-name = "vcc33_lcd";
+				regulator-always-on;
+				regulator-boot-on;
+				regulator-state-mem {
+					regulator-off-in-suspend;
+				};
+			};
+		};
+	};
+};
+
+&i2c1 {
+	status = "okay";
+
+	clock-frequency = <400000>;
+	i2c-scl-falling-time-ns = <50>;		/* 2.5ns measured */
+	i2c-scl-rising-time-ns = <100>;		/* 40ns measured */
+
+	tpm: tpm@20 {
+		compatible = "infineon,slb9645tt";
+		reg = <0x20>;
+		powered-while-suspended;
+	};
+};
+
+&i2c2 {
+	status = "okay";
+
+	/* 100kHz since 4.7k resistors don't rise fast enough */
+	clock-frequency = <100000>;
+	i2c-scl-falling-time-ns = <50>;		/* 10ns measured */
+	i2c-scl-rising-time-ns = <800>;		/* 600ns measured */
+};
+
+&i2c4 {
+	status = "okay";
+
+	clock-frequency = <400000>;
+	i2c-scl-falling-time-ns = <50>;		/* 11ns measured */
+	i2c-scl-rising-time-ns = <300>;		/* 225ns measured */
+};
+
+&i2c5 {
+	status = "okay";
+
+	clock-frequency = <100000>;
+	i2c-scl-falling-time-ns = <300>;
+	i2c-scl-rising-time-ns = <1000>;
+};
+
+&pwm1 {
+	status = "okay";
+};
+
+&sdio0 {
+	status = "okay";
+
+	broken-cd;
+	bus-width = <4>;
+	cap-sd-highspeed;
+	cap-sdio-irq;
+	keep-power-in-suspend;
+	mmc-pwrseq = <&sdio_pwrseq>;
+	non-removable;
+	num-slots = <1>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdio0_clk &sdio0_cmd &sdio0_bus4>;
+	vmmc-supply = <&vcc33_sys>;
+	vqmmc-supply = <&vcc18_wl>;
+};
+
+&spi2 {
+	status = "okay";
+
+	rx-sample-delay-ns = <12>;
+};
+
+&tsadc {
+	status = "okay";
+
+	rockchip,hw-tshut-mode = <1>; /* tshut mode 0:CRU 1:GPIO */
+	rockchip,hw-tshut-polarity = <1>; /* tshut polarity 0:LOW 1:HIGH */
+};
+
+&uart0 {
+	status = "okay";
+
+	/* We need to go faster than 24MHz, so adjust clock parents / rates */
+	assigned-clocks = <&cru SCLK_UART0>;
+	assigned-clock-rates = <48000000>;
+
+	/* Pins don't include flow control by default; add that in */
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_xfer &uart0_cts &uart0_rts>;
+};
+
+&uart1 {
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&usbphy {
+	status = "okay";
+};
+
+&usb_host0_ehci {
+	status = "okay";
+
+	needs-reset-on-resume;
+};
+
+&usb_host1 {
+	status = "okay";
+};
+
+&usb_otg {
+	status = "okay";
+
+	assigned-clocks = <&cru SCLK_USBPHY480M_SRC>;
+	assigned-clock-parents = <&cru SCLK_OTGPHY0>;
+	dr_mode = "host";
+};
+
+&vopb {
+	status = "okay";
+};
+
+&vopb_mmu {
+	status = "okay";
+};
+
+&wdt {
+	status = "okay";
+};
+
+&pinctrl {
+	pinctrl-names = "default", "sleep";
+	pinctrl-0 = <
+		/* Common for sleep and wake, but no owners */
+		&global_pwroff
+	>;
+	pinctrl-1 = <
+		/* Common for sleep and wake, but no owners */
+		&global_pwroff
+	>;
+
+	pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
+		bias-disable;
+		drive-strength = <8>;
+	};
+
+	pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
+		bias-pull-up;
+		drive-strength = <8>;
+	};
+
+	pcfg_output_high: pcfg-output-high {
+		output-high;
+	};
+
+	pcfg_output_low: pcfg-output-low {
+		output-low;
+	};
+
+	buttons {
+		pwr_key_l: pwr-key-l {
+			rockchip,pins = <0 5 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	emmc {
+		emmc_reset: emmc-reset {
+			rockchip,pins = <2 9 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		/*
+		 * We run eMMC at max speed; bump up drive strength.
+		 * We also have external pulls, so disable the internal ones.
+		 */
+		emmc_clk: emmc-clk {
+			rockchip,pins = <3 18 RK_FUNC_2 &pcfg_pull_none_drv_8ma>;
+		};
+
+		emmc_cmd: emmc-cmd {
+			rockchip,pins = <3 16 RK_FUNC_2 &pcfg_pull_none_drv_8ma>;
+		};
+
+		emmc_bus8: emmc-bus8 {
+			rockchip,pins = <3 0 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
+					<3 1 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
+					<3 2 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
+					<3 3 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
+					<3 4 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
+					<3 5 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
+					<3 6 RK_FUNC_2 &pcfg_pull_none_drv_8ma>,
+					<3 7 RK_FUNC_2 &pcfg_pull_none_drv_8ma>;
+		};
+	};
+
+	pmic {
+		pmic_int_l: pmic-int-l {
+			rockchip,pins = <RK_GPIO0 4 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	reboot {
+		ap_warm_reset_h: ap-warm-reset-h {
+			rockchip,pins = <RK_GPIO0 13 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	recovery-switch {
+		rec_mode_l: rec-mode-l {
+			rockchip,pins = <0 9 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	sdio0 {
+		wifi_enable_h: wifienable-h {
+			rockchip,pins = <4 28 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		/* NOTE: mislabelled on schematic; should be bt_enable_h */
+		bt_enable_l: bt-enable-l {
+			rockchip,pins = <4 29 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		/*
+		 * We run sdio0 at max speed; bump up drive strength.
+		 * We also have external pulls, so disable the internal ones.
+		 */
+		sdio0_bus4: sdio0-bus4 {
+			rockchip,pins = <4 20 RK_FUNC_1 &pcfg_pull_none_drv_8ma>,
+					<4 21 RK_FUNC_1 &pcfg_pull_none_drv_8ma>,
+					<4 22 RK_FUNC_1 &pcfg_pull_none_drv_8ma>,
+					<4 23 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+		};
+
+		sdio0_cmd: sdio0-cmd {
+			rockchip,pins = <4 24 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+		};
+
+		sdio0_clk: sdio0-clk {
+			rockchip,pins = <4 25 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+		};
+	};
+
+	tpm {
+		tpm_int_h: tpm-int-h {
+			rockchip,pins = <7 4 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	/*
+	 * On Marvell-based hardware this is a no-connect.  Make sure we enable
+	 * the pullup so that the line doesn't float.  The pullup shouldn't
+	 * hurt on Broadcom-based hardware since the other side is actively
+	 * driving this signal.  As proof: we've already got a pullup on RX.
+	 */
+	uart0 {
+		uart0_cts: uart0-cts {
+			rockchip,pins = <4 18 RK_FUNC_1 &pcfg_pull_up>;
+		};
+	};
+
+	write-protect {
+		fw_wp_ap: fw-wp-ap {
+			rockchip,pins = <7 6 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 22316d0..906e938 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -78,6 +78,7 @@
 			     <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
 	};
 
 	cpus {
@@ -110,19 +111,19 @@
 			clock-latency = <40000>;
 			clocks = <&cru ARMCLK>;
 		};
-		cpu@501 {
+		cpu1: cpu@501 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a12";
 			reg = <0x501>;
 			resets = <&cru SRST_CORE1>;
 		};
-		cpu@502 {
+		cpu2: cpu@502 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a12";
 			reg = <0x502>;
 			resets = <&cru SRST_CORE2>;
 		};
-		cpu@503 {
+		cpu3: cpu@503 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a12";
 			reg = <0x503>;
@@ -168,6 +169,26 @@
 		};
 	};
 
+	reserved-memory {
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		/*
+		 * The rk3288 cannot use the memory area above 0xfe000000
+		 * for dma operations for some reason. While there is
+		 * probably a better solution available somewhere, we
+		 * haven't found it yet and while devices with 2GB of ram
+		 * are not affected, this issue prevents 4GB from booting.
+		 * So to make these devices at least bootable, block
+		 * this area for the time being until the real solution
+		 * is found.
+		 */
+		dma-unusable@fe000000 {
+			reg = <0xfe000000 0x1000000>;
+		};
+	};
+
 	xin24m: oscillator {
 		compatible = "fixed-clock";
 		clock-frequency = <24000000>;
@@ -447,6 +468,8 @@
 			"mac_clk_rx", "mac_clk_tx",
 			"clk_mac_ref", "clk_mac_refout",
 			"aclk_mac", "pclk_mac";
+		resets = <&cru SRST_MAC>;
+		reset-names = "stmmaceth";
 		status = "disabled";
 	};
 
@@ -626,7 +649,7 @@
 		compatible = "rockchip,rk3288-wdt", "snps,dw-wdt";
 		reg = <0xff800000 0x100>;
 		clocks = <&cru PCLK_WDT>;
-		interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+		interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
 		status = "disabled";
 	};
 
diff --git a/arch/arm/boot/dts/rk3xxx.dtsi b/arch/arm/boot/dts/rk3xxx.dtsi
index a2ae9f3..4497d28 100644
--- a/arch/arm/boot/dts/rk3xxx.dtsi
+++ b/arch/arm/boot/dts/rk3xxx.dtsi
@@ -172,6 +172,13 @@
 		interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&cru HCLK_OTG0>;
 		clock-names = "otg";
+		dr_mode = "otg";
+		g-np-tx-fifo-size = <16>;
+		g-rx-fifo-size = <275>;
+		g-tx-fifo-size = <256 128 128 64 64 32>;
+		g-use-dma;
+		phys = <&usbphy0>;
+		phy-names = "usb2-phy";
 		status = "disabled";
 	};
 
@@ -181,6 +188,9 @@
 		interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
 		clocks = <&cru HCLK_OTG1>;
 		clock-names = "otg";
+		dr_mode = "host";
+		phys = <&usbphy1>;
+		phy-names = "usb2-phy";
 		status = "disabled";
 	};
 
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
new file mode 100644
index 0000000..034cd48
--- /dev/null
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -0,0 +1,926 @@
+/*
+ * sama5d2.dtsi - Device Tree Include file for SAMA5D2 family SoC
+ *
+ *  Copyright (C) 2015 Atmel,
+ *                2015 Ludovic Desroches <ludovic.desroches@atmel.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "skeleton.dtsi"
+#include <dt-bindings/dma/at91.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/clock/at91.h>
+
+/ {
+	model = "Atmel SAMA5D2 family SoC";
+	compatible = "atmel,sama5d2";
+	interrupt-parent = <&aic>;
+
+	aliases {
+		serial0 = &uart1;
+		serial1 = &uart3;
+		tcb0 = &tcb0;
+		tcb1 = &tcb1;
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a5";
+			reg = <0>;
+			next-level-cache = <&L2>;
+		};
+	};
+
+	memory {
+		reg = <0x20000000 0x20000000>;
+	};
+
+	clocks {
+		slow_xtal: slow_xtal {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <0>;
+		};
+
+		main_xtal: main_xtal {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <0>;
+		};
+
+		adc_op_clk: adc_op_clk{
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <1000000>;
+		};
+	};
+
+	ns_sram: sram@00200000 {
+		compatible = "mmio-sram";
+		reg = <0x00200000 0x20000>;
+	};
+
+	ahb {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		usb0: gadget@00300000 {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			compatible = "atmel,sama5d3-udc";
+			reg = <0x00300000 0x100000
+			       0xfc02c000 0x400>;
+			interrupts = <42 IRQ_TYPE_LEVEL_HIGH 2>;
+			clocks = <&udphs_clk>, <&utmi>;
+			clock-names = "pclk", "hclk";
+			status = "disabled";
+
+			ep0 {
+				reg = <0>;
+				atmel,fifo-size = <64>;
+				atmel,nb-banks = <1>;
+			};
+
+			ep1 {
+				reg = <1>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <3>;
+				atmel,can-dma;
+				atmel,can-isoc;
+			};
+
+			ep2 {
+				reg = <2>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <3>;
+				atmel,can-dma;
+				atmel,can-isoc;
+			};
+
+			ep3 {
+				reg = <3>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <2>;
+				atmel,can-dma;
+				atmel,can-isoc;
+			};
+
+			ep4 {
+				reg = <4>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <2>;
+				atmel,can-dma;
+				atmel,can-isoc;
+			};
+
+			ep5 {
+				reg = <5>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <2>;
+				atmel,can-dma;
+				atmel,can-isoc;
+			};
+
+			ep6 {
+				reg = <6>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <2>;
+				atmel,can-dma;
+				atmel,can-isoc;
+			};
+
+			ep7 {
+				reg = <7>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <2>;
+				atmel,can-dma;
+				atmel,can-isoc;
+			};
+
+			ep8 {
+				reg = <8>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <2>;
+				atmel,can-isoc;
+			};
+
+			ep9 {
+				reg = <9>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <2>;
+				atmel,can-isoc;
+			};
+
+			ep10 {
+				reg = <10>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <2>;
+				atmel,can-isoc;
+			};
+
+			ep11 {
+				reg = <11>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <2>;
+				atmel,can-isoc;
+			};
+
+			ep12 {
+				reg = <12>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <2>;
+				atmel,can-isoc;
+			};
+
+			ep13 {
+				reg = <13>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <2>;
+				atmel,can-isoc;
+			};
+
+			ep14 {
+				reg = <14>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <2>;
+				atmel,can-isoc;
+			};
+
+			ep15 {
+				reg = <15>;
+				atmel,fifo-size = <1024>;
+				atmel,nb-banks = <2>;
+				atmel,can-isoc;
+			};
+		};
+
+		usb1: ohci@00400000 {
+			compatible = "atmel,at91rm9200-ohci", "usb-ohci";
+			reg = <0x00400000 0x100000>;
+			interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
+			clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
+			clock-names = "ohci_clk", "hclk", "uhpck";
+			status = "disabled";
+		};
+
+		usb2: ehci@00500000 {
+			compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
+			reg = <0x00500000 0x100000>;
+			interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
+			clocks = <&utmi>, <&uhphs_clk>;
+			clock-names = "usb_clk", "ehci_clk";
+			status = "disabled";
+		};
+
+		L2: cache-controller@00a00000 {
+			compatible = "arm,pl310-cache";
+			reg = <0x00a00000 0x1000>;
+			interrupts = <63 IRQ_TYPE_LEVEL_HIGH 4>;
+			cache-unified;
+			cache-level = <2>;
+		};
+
+		apb {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges;
+
+			ramc0: ramc@f000c000 {
+				compatible = "atmel,sama5d3-ddramc";
+				reg = <0xf000c000 0x200>;
+				clocks = <&ddrck>, <&mpddr_clk>;
+				clock-names = "ddrck", "mpddr";
+			};
+
+			dma0: dma-controller@f0010000 {
+				compatible = "atmel,sama5d4-dma";
+				reg = <0xf0010000 0x1000>;
+				interrupts = <6 IRQ_TYPE_LEVEL_HIGH 0>;
+				#dma-cells = <1>;
+				clocks = <&dma0_clk>;
+				clock-names = "dma_clk";
+			};
+
+			pmc: pmc@f0014000 {
+				compatible = "atmel,sama5d2-pmc";
+				reg = <0xf0014000 0x160>;
+				interrupts = <74 IRQ_TYPE_LEVEL_HIGH 7>;
+				interrupt-controller;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				#interrupt-cells = <1>;
+
+				main_rc_osc: main_rc_osc {
+					compatible = "atmel,at91sam9x5-clk-main-rc-osc";
+					#clock-cells = <0>;
+					interrupt-parent = <&pmc>;
+					interrupts = <AT91_PMC_MOSCRCS>;
+					clock-frequency = <12000000>;
+					clock-accuracy = <100000000>;
+				};
+
+				main_osc: main_osc {
+					compatible = "atmel,at91rm9200-clk-main-osc";
+					#clock-cells = <0>;
+					interrupt-parent = <&pmc>;
+					interrupts = <AT91_PMC_MOSCS>;
+					clocks = <&main_xtal>;
+				};
+
+				main: mainck {
+					compatible = "atmel,at91sam9x5-clk-main";
+					#clock-cells = <0>;
+					interrupt-parent = <&pmc>;
+					interrupts = <AT91_PMC_MOSCSELS>;
+					clocks = <&main_rc_osc &main_osc>;
+				};
+
+				plla: pllack {
+					compatible = "atmel,sama5d3-clk-pll";
+					#clock-cells = <0>;
+					interrupt-parent = <&pmc>;
+					interrupts = <AT91_PMC_LOCKA>;
+					clocks = <&main>;
+					reg = <0>;
+					atmel,clk-input-range = <12000000 12000000>;
+					#atmel,pll-clk-output-range-cells = <4>;
+					atmel,pll-clk-output-ranges = <600000000 1200000000 0 0>;
+				};
+
+				plladiv: plladivck {
+					compatible = "atmel,at91sam9x5-clk-plldiv";
+					#clock-cells = <0>;
+					clocks = <&plla>;
+				};
+
+				utmi: utmick {
+					compatible = "atmel,at91sam9x5-clk-utmi";
+					#clock-cells = <0>;
+					interrupt-parent = <&pmc>;
+					interrupts = <AT91_PMC_LOCKU>;
+					clocks = <&main>;
+				};
+
+				mck: masterck {
+					compatible = "atmel,at91sam9x5-clk-master";
+					#clock-cells = <0>;
+					interrupt-parent = <&pmc>;
+					interrupts = <AT91_PMC_MCKRDY>;
+					clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>;
+					atmel,clk-output-range = <124000000 166000000>;
+					atmel,clk-divisors = <1 2 4 3>;
+				};
+
+				h32ck: h32mxck {
+					#clock-cells = <0>;
+					compatible = "atmel,sama5d4-clk-h32mx";
+					clocks = <&mck>;
+				};
+
+				usb: usbck {
+					compatible = "atmel,at91sam9x5-clk-usb";
+					#clock-cells = <0>;
+					clocks = <&plladiv>, <&utmi>;
+				};
+
+				prog: progck {
+					compatible = "atmel,at91sam9x5-clk-programmable";
+					#address-cells = <1>;
+					#size-cells = <0>;
+					interrupt-parent = <&pmc>;
+					clocks = <&clk32k>, <&main>, <&plladiv>, <&utmi>, <&mck>;
+
+					prog0: prog0 {
+						#clock-cells = <0>;
+						reg = <0>;
+						interrupts = <AT91_PMC_PCKRDY(0)>;
+					};
+
+					prog1: prog1 {
+						#clock-cells = <0>;
+						reg = <1>;
+						interrupts = <AT91_PMC_PCKRDY(1)>;
+					};
+
+					prog2: prog2 {
+						#clock-cells = <0>;
+						reg = <2>;
+						interrupts = <AT91_PMC_PCKRDY(2)>;
+					};
+				};
+
+				systemck {
+					compatible = "atmel,at91rm9200-clk-system";
+					#address-cells = <1>;
+					#size-cells = <0>;
+
+					ddrck: ddrck {
+						#clock-cells = <0>;
+						reg = <2>;
+						clocks = <&mck>;
+					};
+
+					lcdck: lcdck {
+						#clock-cells = <0>;
+						reg = <3>;
+						clocks = <&mck>;
+					};
+
+					uhpck: uhpck {
+						#clock-cells = <0>;
+						reg = <6>;
+						clocks = <&usb>;
+					};
+
+					udpck: udpck {
+						#clock-cells = <0>;
+						reg = <7>;
+						clocks = <&usb>;
+					};
+
+					pck0: pck0 {
+						#clock-cells = <0>;
+						reg = <8>;
+						clocks = <&prog0>;
+					};
+
+					pck1: pck1 {
+						#clock-cells = <0>;
+						reg = <9>;
+						clocks = <&prog1>;
+					};
+
+					pck2: pck2 {
+						#clock-cells = <0>;
+						reg = <10>;
+						clocks = <&prog2>;
+					};
+
+					iscck: iscck {
+						#clock-cells = <0>;
+						reg = <18>;
+						clocks = <&mck>;
+					};
+				};
+
+				periph32ck {
+					compatible = "atmel,at91sam9x5-clk-peripheral";
+					#address-cells = <1>;
+					#size-cells = <0>;
+					clocks = <&h32ck>;
+
+					macb0_clk: macb0_clk {
+						#clock-cells = <0>;
+						reg = <5>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					tdes_clk: tdes_clk {
+						#clock-cells = <0>;
+						reg = <11>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					matrix1_clk: matrix1_clk {
+						#clock-cells = <0>;
+						reg = <14>;
+					};
+
+					hsmc_clk: hsmc_clk {
+						#clock-cells = <0>;
+						reg = <17>;
+					};
+
+					pioA_clk: pioA_clk {
+						#clock-cells = <0>;
+						reg = <18>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					flx0_clk: flx0_clk {
+						#clock-cells = <0>;
+						reg = <19>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					flx1_clk: flx1_clk {
+						#clock-cells = <0>;
+						reg = <20>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					flx2_clk: flx2_clk {
+						#clock-cells = <0>;
+						reg = <21>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					flx3_clk: flx3_clk {
+						#clock-cells = <0>;
+						reg = <22>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					flx4_clk: flx4_clk {
+						#clock-cells = <0>;
+						reg = <23>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					uart0_clk: uart0_clk {
+						#clock-cells = <0>;
+						reg = <24>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					uart1_clk: uart1_clk {
+						#clock-cells = <0>;
+						reg = <25>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					uart2_clk: uart2_clk {
+						#clock-cells = <0>;
+						reg = <26>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					uart3_clk: uart3_clk {
+						#clock-cells = <0>;
+						reg = <27>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					uart4_clk: uart4_clk {
+						#clock-cells = <0>;
+						reg = <28>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					twi0_clk: twi0_clk {
+						reg = <29>;
+						#clock-cells = <0>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					twi1_clk: twi1_clk {
+						#clock-cells = <0>;
+						reg = <30>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					spi0_clk: spi0_clk {
+						#clock-cells = <0>;
+						reg = <33>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					spi1_clk: spi1_clk {
+						#clock-cells = <0>;
+						reg = <34>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					tcb0_clk: tcb0_clk {
+						#clock-cells = <0>;
+						reg = <35>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					tcb1_clk: tcb1_clk {
+						#clock-cells = <0>;
+						reg = <36>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					pwm_clk: pwm_clk {
+						#clock-cells = <0>;
+						reg = <38>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					adc_clk: adc_clk {
+						#clock-cells = <0>;
+						reg = <40>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					uhphs_clk: uhphs_clk {
+						#clock-cells = <0>;
+						reg = <41>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					udphs_clk: udphs_clk {
+						#clock-cells = <0>;
+						reg = <42>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					ssc0_clk: ssc0_clk {
+						#clock-cells = <0>;
+						reg = <43>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					ssc1_clk: ssc1_clk {
+						#clock-cells = <0>;
+						reg = <44>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					trng_clk: trng_clk {
+						#clock-cells = <0>;
+						reg = <47>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+
+					classd_clk: classd_clk {
+						#clock-cells = <0>;
+						reg = <59>;
+						atmel,clk-output-range = <0 83000000>;
+					};
+				};
+
+				periph64ck {
+					compatible = "atmel,at91sam9x5-clk-peripheral";
+					#address-cells = <1>;
+					#size-cells = <0>;
+					clocks = <&mck>;
+
+					dma0_clk: dma0_clk {
+						#clock-cells = <0>;
+						reg = <6>;
+					};
+
+					dma1_clk: dma1_clk {
+						#clock-cells = <0>;
+						reg = <7>;
+					};
+
+					aes_clk: aes_clk {
+						#clock-cells = <0>;
+						reg = <9>;
+					};
+
+					aesb_clk: aesb_clk {
+						#clock-cells = <0>;
+						reg = <10>;
+					};
+
+					sha_clk: sha_clk {
+						#clock-cells = <0>;
+						reg = <12>;
+					};
+
+					mpddr_clk: mpddr_clk {
+						#clock-cells = <0>;
+						reg = <13>;
+					};
+
+					matrix0_clk: matrix0_clk {
+						#clock-cells = <0>;
+						reg = <15>;
+					};
+
+					sdmmc0_hclk: sdmmc0_hclk {
+						#clock-cells = <0>;
+						reg = <31>;
+					};
+
+					sdmmc1_hclk: sdmmc1_hclk {
+						#clock-cells = <0>;
+						reg = <32>;
+					};
+
+					lcdc_clk: lcdc_clk {
+						#clock-cells = <0>;
+						reg = <45>;
+					};
+
+					isc_clk: isc_clk {
+						#clock-cells = <0>;
+						reg = <46>;
+					};
+
+					qspi0_clk: qspi0_clk {
+						#clock-cells = <0>;
+						reg = <52>;
+					};
+
+					qspi1_clk: qspi1_clk {
+						#clock-cells = <0>;
+						reg = <53>;
+					};
+				};
+			};
+
+			sha@f0028000 {
+				compatible = "atmel,at91sam9g46-sha";
+				reg = <0xf0028000 0x100>;
+				interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(30))>;
+				dma-names = "tx";
+				clocks = <&sha_clk>;
+				clock-names = "sha_clk";
+				status = "disabled";
+			};
+
+			aes@f002c000 {
+				compatible = "atmel,at91sam9g46-aes";
+				reg = <0xf002c000 0x100>;
+				interrupts = <9 IRQ_TYPE_LEVEL_HIGH 0>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(26))>,
+				       <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(27))>;
+				dma-names = "tx", "rx";
+				clocks = <&aes_clk>;
+				clock-names = "aes_clk";
+				status = "disabled";
+			};
+
+			spi0: spi@f8000000 {
+				compatible = "atmel,at91rm9200-spi";
+				reg = <0xf8000000 0x100>;
+				interrupts = <33 IRQ_TYPE_LEVEL_HIGH 7>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(6))>,
+				       <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(7))>;
+				dma-names = "tx", "rx";
+				clocks = <&spi0_clk>;
+				clock-names = "spi_clk";
+				atmel,fifo-size = <16>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			macb0: ethernet@f8008000 {
+				compatible = "atmel,sama5d2-gem";
+				reg = <0xf8008000 0x1000>;
+				interrupts = <5  IRQ_TYPE_LEVEL_HIGH 3		/* Queue 0 */
+					      66 IRQ_TYPE_LEVEL_HIGH 3          /* Queue 1 */
+					      67 IRQ_TYPE_LEVEL_HIGH 3>;        /* Queue 2 */
+				#address-cells = <1>;
+				#size-cells = <0>;
+				clocks = <&macb0_clk>, <&macb0_clk>;
+				clock-names = "hclk", "pclk";
+				status = "disabled";
+			};
+
+			tcb0: timer@f800c000 {
+				compatible = "atmel,at91sam9x5-tcb";
+				reg = <0xf800c000 0x100>;
+				interrupts = <35 IRQ_TYPE_LEVEL_HIGH 0>;
+				clocks = <&tcb0_clk>, <&clk32k>;
+				clock-names = "t0_clk", "slow_clk";
+			};
+
+			tcb1: timer@f8010000 {
+				compatible = "atmel,at91sam9x5-tcb";
+				reg = <0xf8010000 0x100>;
+				interrupts = <36 IRQ_TYPE_LEVEL_HIGH 0>;
+				clocks = <&tcb1_clk>, <&clk32k>;
+				clock-names = "t0_clk", "slow_clk";
+			};
+
+			uart0: serial@f801c000 {
+				compatible = "atmel,at91sam9260-usart";
+				reg = <0xf801c000 0x100>;
+				interrupts = <24 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&uart0_clk>;
+				clock-names = "usart";
+				status = "disabled";
+			};
+
+			uart1: serial@f8020000 {
+				compatible = "atmel,at91sam9260-usart";
+				reg = <0xf8020000 0x100>;
+				interrupts = <25 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&uart1_clk>;
+				clock-names = "usart";
+				status = "disabled";
+			};
+
+			uart2: serial@f8024000 {
+				compatible = "atmel,at91sam9260-usart";
+				reg = <0xf8024000 0x100>;
+				interrupts = <26 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&uart2_clk>;
+				clock-names = "usart";
+				status = "disabled";
+			};
+
+			i2c0: i2c@f8028000 {
+				compatible = "atmel,sama5d2-i2c";
+				reg = <0xf8028000 0x100>;
+				interrupts = <29 IRQ_TYPE_LEVEL_HIGH 7>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(0))>,
+				       <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(1))>;
+				dma-names = "tx", "rx";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				clocks = <&twi0_clk>;
+				status = "disabled";
+			};
+
+			pit: timer@f8048030 {
+				compatible = "atmel,at91sam9260-pit";
+				reg = <0xf8048030 0x10>;
+				interrupts = <3 IRQ_TYPE_LEVEL_HIGH 5>;
+				clocks = <&h32ck>;
+			};
+
+			sckc@f8048050 {
+				compatible = "atmel,at91sam9x5-sckc";
+				reg = <0xf8048050 0x4>;
+
+				slow_rc_osc: slow_rc_osc {
+					compatible = "atmel,at91sam9x5-clk-slow-rc-osc";
+					#clock-cells = <0>;
+					clock-frequency = <32768>;
+					clock-accuracy = <250000000>;
+					atmel,startup-time-usec = <75>;
+				};
+
+				slow_osc: slow_osc {
+					compatible = "atmel,at91sam9x5-clk-slow-osc";
+					#clock-cells = <0>;
+					clocks = <&slow_xtal>;
+					atmel,startup-time-usec = <1200000>;
+				};
+
+				clk32k: slowck {
+					compatible = "atmel,at91sam9x5-clk-slow";
+					#clock-cells = <0>;
+					clocks = <&slow_rc_osc &slow_osc>;
+				};
+			};
+
+			rtc@f80480b0 {
+				compatible = "atmel,at91rm9200-rtc";
+				reg = <0xf80480b0 0x30>;
+				interrupts = <74 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
+			};
+
+			spi1: spi@fc000000 {
+				compatible = "atmel,at91rm9200-spi";
+				reg = <0xfc000000 0x100>;
+				interrupts = <34 IRQ_TYPE_LEVEL_HIGH 7>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(8))>,
+				       <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(9))>;
+				dma-names = "tx", "rx";
+				clocks = <&spi1_clk>;
+				clock-names = "spi_clk";
+				atmel,fifo-size = <16>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				status = "disabled";
+			};
+
+			uart3: serial@fc008000 {
+				compatible = "atmel,at91sam9260-usart";
+				reg = <0xfc008000 0x100>;
+				interrupts = <27 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&uart3_clk>;
+				clock-names = "usart";
+				status = "disabled";
+			};
+
+			uart4: serial@fc00c000 {
+				compatible = "atmel,at91sam9260-usart";
+				reg = <0xfc00c000 0x100>;
+				interrupts = <28 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&uart4_clk>;
+				clock-names = "usart";
+				status = "disabled";
+			};
+
+			aic: interrupt-controller@fc020000 {
+				#interrupt-cells = <3>;
+				compatible = "atmel,sama5d2-aic";
+				interrupt-controller;
+				reg = <0xfc020000 0x200>;
+				atmel,external-irqs = <49>;
+			};
+
+			i2c1: i2c@fc028000 {
+				compatible = "atmel,sama5d2-i2c";
+				reg = <0xfc028000 0x100>;
+				interrupts = <30 IRQ_TYPE_LEVEL_HIGH 7>;
+				dmas = <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(2))>,
+				       <&dma0
+					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+					 AT91_XDMAC_DT_PERID(3))>;
+				dma-names = "tx", "rx";
+				#address-cells = <1>;
+				#size-cells = <0>;
+				clocks = <&twi1_clk>;
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 9e2444b..7fa2765 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -145,8 +145,8 @@
 				compatible = "atmel,at91sam9x5-tcb";
 				reg = <0xf0010000 0x100>;
 				interrupts = <26 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tcb0_clk>;
-				clock-names = "t0_clk";
+				clocks = <&tcb0_clk>, <&clk32k>;
+				clock-names = "t0_clk", "slow_clk";
 			};
 
 			i2c0: i2c@f0014000 {
@@ -1259,13 +1259,15 @@
 			};
 
 			rstc@fffffe00 {
-				compatible = "atmel,at91sam9g45-rstc";
+				compatible = "atmel,sama5d3-rstc", "atmel,at91sam9g45-rstc";
 				reg = <0xfffffe00 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			shutdown-controller@fffffe10 {
 				compatible = "atmel,at91sam9x5-shdwc";
 				reg = <0xfffffe10 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			pit: timer@fffffe30 {
@@ -1279,6 +1281,7 @@
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfffffe40 0x10>;
 				interrupts = <4 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
 				atmel,watchdog-type = "hardware";
 				atmel,reset-type = "all";
 				atmel,dbg-halt;
@@ -1315,6 +1318,7 @@
 				compatible = "atmel,at91rm9200-rtc";
 				reg = <0xfffffeb0 0x30>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
 			};
 		};
 
diff --git a/arch/arm/boot/dts/sama5d3_tcb1.dtsi b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
index f7fa58f..801f974 100644
--- a/arch/arm/boot/dts/sama5d3_tcb1.dtsi
+++ b/arch/arm/boot/dts/sama5d3_tcb1.dtsi
@@ -31,8 +31,8 @@
 				compatible = "atmel,at91sam9x5-tcb";
 				reg = <0xf8014000 0x100>;
 				interrupts = <27 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tcb1_clk>;
-				clock-names = "t0_clk";
+				clocks = <&tcb1_clk>, <&clk32k>;
+				clock-names = "t0_clk", "slow_clk";
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 3ee22ee..8d1de29 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -957,8 +957,8 @@
 				compatible = "atmel,at91sam9x5-tcb";
 				reg = <0xf801c000 0x100>;
 				interrupts = <40 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tcb0_clk>;
-				clock-names = "t0_clk";
+				clocks = <&tcb0_clk>, <&clk32k>;
+				clock-names = "t0_clk", "slow_clk";
 			};
 
 			macb0: ethernet@f8020000 {
@@ -1185,29 +1185,20 @@
 				compatible = "atmel,at91sam9x5-tcb";
 				reg = <0xfc020000 0x100>;
 				interrupts = <41 IRQ_TYPE_LEVEL_HIGH 0>;
-				clocks = <&tcb1_clk>;
-				clock-names = "t0_clk";
+				clocks = <&tcb1_clk>, <&clk32k>;
+				clock-names = "t0_clk", "slow_clk";
 			};
 
 			adc0: adc@fc034000 {
 				compatible = "atmel,at91sam9x5-adc";
 				reg = <0xfc034000 0x100>;
 				interrupts = <44 IRQ_TYPE_LEVEL_HIGH 5>;
-				pinctrl-names = "default";
-				pinctrl-0 = <
-					/* external trigger is conflict with USBA_VBUS */
-					&pinctrl_adc0_ad0
-					&pinctrl_adc0_ad1
-					&pinctrl_adc0_ad2
-					&pinctrl_adc0_ad3
-					&pinctrl_adc0_ad4
-					>;
 				clocks = <&adc_clk>,
 					 <&adc_op_clk>;
 				clock-names = "adc_clk", "adc_op_clk";
 				atmel,adc-channels-used = <0x01f>;
 				atmel,adc-startup-time = <40>;
-				atmel,adc-use-external;
+				atmel,adc-use-external-triggers;
 				atmel,adc-vref = <3000>;
 				atmel,adc-res = <8 10>;
 				atmel,adc-sample-hold-time = <11>;
@@ -1277,13 +1268,15 @@
 			};
 
 			rstc@fc068600 {
-				compatible = "atmel,at91sam9g45-rstc";
+				compatible = "atmel,sama5d3-rstc", "atmel,at91sam9g45-rstc";
 				reg = <0xfc068600 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			shdwc@fc068610 {
 				compatible = "atmel,at91sam9x5-shdwc";
 				reg = <0xfc068610 0x10>;
+				clocks = <&clk32k>;
 			};
 
 			pit: timer@fc068630 {
@@ -1296,6 +1289,7 @@
 			watchdog@fc068640 {
 				compatible = "atmel,at91sam9260-wdt";
 				reg = <0xfc068640 0x10>;
+				clocks = <&clk32k>;
 				status = "disabled";
 			};
 
@@ -1329,6 +1323,7 @@
 				compatible = "atmel,at91rm9200-rtc";
 				reg = <0xfc0686b0 0x30>;
 				interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
+				clocks = <&clk32k>;
 			};
 
 			dbgu: serial@fc069000 {
diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi
index 11e17c5..ff7c8f2 100644
--- a/arch/arm/boot/dts/sh73a0.dtsi
+++ b/arch/arm/boot/dts/sh73a0.dtsi
@@ -392,6 +392,9 @@
 		      <0xe605801c 0x1c>;
 		gpio-controller;
 		#gpio-cells = <2>;
+		gpio-ranges =
+			<&pfc 0 0 119>, <&pfc 128 128 37>, <&pfc 192 192 91>,
+			<&pfc 288 288 22>;
 		interrupts-extended =
 			<&irqpin0 0 0>, <&irqpin0 1 0>, <&irqpin0 2 0>, <&irqpin0 3 0>,
 			<&irqpin0 4 0>, <&irqpin0 5 0>, <&irqpin0 6 0>, <&irqpin0 7 0>,
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 80f924d..314e589 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -164,7 +164,7 @@
 						dbg_base_clk: dbg_base_clk {
 							#clock-cells = <0>;
 							compatible = "altr,socfpga-perip-clk";
-							clocks = <&main_pll>;
+							clocks = <&main_pll>, <&osc1>;
 							div-reg = <0xe8 0 9>;
 							reg = <0x50>;
 						};
@@ -318,7 +318,7 @@
 					l3_sp_clk: l3_sp_clk {
 						#clock-cells = <0>;
 						compatible = "altr,socfpga-gate-clk";
-						clocks = <&mainclk>;
+						clocks = <&l3_mp_clk>;
 						div-reg = <0x64 2 2>;
 					};
 
@@ -349,7 +349,7 @@
 					dbg_clk: dbg_clk {
 						#clock-cells = <0>;
 						compatible = "altr,socfpga-gate-clk";
-						clocks = <&dbg_base_clk>;
+						clocks = <&dbg_at_clk>;
 						div-reg = <0x68 2 2>;
 						clk-gate = <0x60 5>;
 					};
@@ -481,8 +481,37 @@
 						clocks = <&f2s_periph_ref_clk>, <&main_qspi_clk>, <&per_qspi_clk>;
 						clk-gate = <0xa0 11>;
 					};
+
+					ddr_dqs_clk_gate: ddr_dqs_clk_gate {
+						#clock-cells = <0>;
+						compatible = "altr,socfpga-gate-clk";
+						clocks = <&ddr_dqs_clk>;
+						clk-gate = <0xd8 0>;
+					};
+
+					ddr_2x_dqs_clk_gate: ddr_2x_dqs_clk_gate {
+						#clock-cells = <0>;
+						compatible = "altr,socfpga-gate-clk";
+						clocks = <&ddr_2x_dqs_clk>;
+						clk-gate = <0xd8 1>;
+					};
+
+					ddr_dq_clk_gate: ddr_dq_clk_gate {
+						#clock-cells = <0>;
+						compatible = "altr,socfpga-gate-clk";
+						clocks = <&ddr_dq_clk>;
+						clk-gate = <0xd8 2>;
+					};
+
+					h2f_user2_clk: h2f_user2_clk {
+						#clock-cells = <0>;
+						compatible = "altr,socfpga-gate-clk";
+						clocks = <&h2f_usr2_clk>;
+						clk-gate = <0xd8 3>;
+					};
+
 				};
-			};
+		};
 
 		gmac0: ethernet@ff700000 {
 			compatible = "altr,socfpga-stmmac", "snps,dwmac-3.70a", "snps,dwmac";
@@ -565,7 +594,7 @@
 			#size-cells = <0>;
 			compatible = "snps,dw-apb-gpio";
 			reg = <0xff708000 0x1000>;
-			clocks = <&per_base_clk>;
+			clocks = <&l4_mp_clk>;
 			status = "disabled";
 
 			porta: gpio-controller@0 {
@@ -585,7 +614,7 @@
 			#size-cells = <0>;
 			compatible = "snps,dw-apb-gpio";
 			reg = <0xff709000 0x1000>;
-			clocks = <&per_base_clk>;
+			clocks = <&l4_mp_clk>;
 			status = "disabled";
 
 			portb: gpio-controller@0 {
@@ -605,7 +634,7 @@
 			#size-cells = <0>;
 			compatible = "snps,dw-apb-gpio";
 			reg = <0xff70a000 0x1000>;
-			clocks = <&per_base_clk>;
+			clocks = <&l4_mp_clk>;
 			status = "disabled";
 
 			portc: gpio-controller@0 {
@@ -639,6 +668,8 @@
 			cache-level = <2>;
 			arm,tag-latency = <1 1 1>;
 			arm,data-latency = <2 1 1>;
+			prefetch-data = <1>;
+			prefetch-instr = <1>;
 		};
 
 		mmc: dwmmc0@ff704000 {
@@ -752,6 +783,7 @@
 			#reset-cells = <1>;
 			compatible = "altr,rst-mgr";
 			reg = <0xffd05000 0x1000>;
+			altr,modrst-offset = <0x10>;
 		};
 
 		usbphy0: usbphy@0 {
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index 4779b07..2340fcb 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -16,11 +16,17 @@
 
 #include "skeleton.dtsi"
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/altr,rst-mgr-a10.h>
 
 / {
 	#address-cells = <1>;
 	#size-cells = <1>;
 
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+	};
+
 	cpus {
 		#address-cells = <1>;
 		#size-cells = <0>;
@@ -409,6 +415,8 @@
 			rx-fifo-depth = <16384>;
 			clocks = <&l4_mp_clk>;
 			clock-names = "stmmaceth";
+			resets = <&rst EMAC0_RESET>;
+			reset-names = "stmmaceth";
 			status = "disabled";
 		};
 
@@ -426,6 +434,8 @@
 			rx-fifo-depth = <16384>;
 			clocks = <&l4_mp_clk>;
 			clock-names = "stmmaceth";
+			resets = <&rst EMAC1_RESET>;
+			reset-names = "stmmaceth";
 			status = "disabled";
 		};
 
@@ -588,6 +598,7 @@
 			#reset-cells = <1>;
 			compatible = "altr,rst-mgr";
 			reg = <0xffd05000 0x100>;
+			altr,modrst-offset = <0x20>;
 		};
 
 		scu: snoop-control-unit@ffffc000 {
diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi b/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
index 94a0709..99aa9a1 100644
--- a/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10_socdk.dtsi
@@ -21,7 +21,8 @@
 	compatible = "altr,socfpga-arria10", "altr,socfpga";
 
 	chosen {
-		bootargs = "console=ttyS0,115200 rootwait";
+		bootargs = "earlyprintk";
+		stdout-path = "serial1:115200n8";
 	};
 
 	memory {
diff --git a/arch/arm/boot/dts/socfpga_arria5_socdk.dts b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
index ccaf417..a75a666 100644
--- a/arch/arm/boot/dts/socfpga_arria5_socdk.dts
+++ b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
@@ -22,7 +22,8 @@
 	compatible = "altr,socfpga-arria5", "altr,socfpga";
 
 	chosen {
-		bootargs = "console=ttyS0,115200";
+		bootargs = "earlyprintk";
+		stdout-path = "serial0:115200n8";
 	};
 
 	memory {
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
new file mode 100644
index 0000000..555e9ca
--- /dev/null
+++ b/arch/arm/boot/dts/socfpga_cyclone5_de0_sockit.dts
@@ -0,0 +1,111 @@
+/*
+ * Copyright Altera Corporation (C) 2015. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "socfpga_cyclone5.dtsi"
+
+/ {
+	model = "Terasic DE-0(Atlas)";
+	compatible = "altr,socfpga-cyclone5", "altr,socfpga";
+
+	chosen {
+		bootargs = "earlyprintk";
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory {
+		name = "memory";
+		device_type = "memory";
+		reg = <0x0 0x40000000>; /* 1GB */
+	};
+
+	aliases {
+		ethernet0 = &gmac1;
+	};
+
+	regulator_3_3v: 3-3-v-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "3.3V";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+	};
+
+	leds {
+		compatible = "gpio-leds";
+		hps0 {
+			label = "hps_led0";
+			gpios = <&portb 24 0>;
+			linux,default-trigger = "heartbeat";
+		};
+	};
+};
+
+&gmac1 {
+	status = "okay";
+	phy-mode = "rgmii";
+
+	txd0-skew-ps = <0>; /* -420ps */
+	txd1-skew-ps = <0>; /* -420ps */
+	txd2-skew-ps = <0>; /* -420ps */
+	txd3-skew-ps = <0>; /* -420ps */
+	rxd0-skew-ps = <420>; /* 0ps */
+	rxd1-skew-ps = <420>; /* 0ps */
+	rxd2-skew-ps = <420>; /* 0ps */
+	rxd3-skew-ps = <420>; /* 0ps */
+	txen-skew-ps = <0>; /* -420ps */
+	txc-skew-ps = <1860>; /* 960ps */
+	rxdv-skew-ps = <420>; /* 0ps */
+	rxc-skew-ps = <1680>; /* 780ps */
+
+	max-frame-size = <3800>;
+};
+
+&gpio0 {
+	status = "okay";
+};
+
+&gpio1 {
+	status = "okay";
+};
+
+&gpio2 {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+	speed-mode = <0>;
+
+	adxl345: adxl345@0 {
+		compatible = "adi,adxl345";
+		reg = <0x53>;
+
+		interrupt-parent = <&portc>;
+		interrupts = <3 2>;
+	};
+};
+
+&mmc0 {
+	vmmc-supply = <&regulator_3_3v>;
+	vqmmc-supply = <&regulator_3_3v>;
+};
+
+&uart0 {
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
index 258865d..d4d0a28 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
@@ -22,7 +22,8 @@
 	compatible = "altr,socfpga-cyclone5", "altr,socfpga";
 
 	chosen {
-		bootargs = "console=ttyS0,115200";
+		bootargs = "earlyprintk";
+		stdout-path = "serial0:115200n8";
 	};
 
 	memory {
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
index 5e17fd1..48bf651 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
@@ -22,7 +22,8 @@
 	compatible = "altr,socfpga-cyclone5", "altr,socfpga";
 
 	chosen {
-		bootargs = "console=ttyS0,115200";
+		bootargs = "earlyprintk";
+		stdout-path = "serial0:115200n8";
 	};
 
 	memory {
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index b8f81fb..50f5e9d 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -220,6 +220,13 @@
 
 		clocks {
 			compatible = "stericsson,u8500-clks";
+			/*
+			 * Registers for the CLKRST block on peripheral
+			 * groups 1, 2, 3, 5, 6,
+			 */
+			reg = <0x8012f000 0x1000>, <0x8011f000 0x1000>,
+			    <0x8000f000 0x1000>, <0xa03ff000 0x1000>,
+			    <0xa03cf000 0x1000>;
 
 			prcmu_clk: prcmu-clock {
 				#clock-cells = <1>;
@@ -287,7 +294,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <0>;
-
+			gpio-ranges = <&pinctrl 0 0 32>;
 			clocks = <&prcc_pclk 1 9>;
 		};
 
@@ -302,7 +309,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <1>;
-
+			gpio-ranges = <&pinctrl 0 32 5>;
 			clocks = <&prcc_pclk 1 9>;
 		};
 
@@ -317,7 +324,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <2>;
-
+			gpio-ranges = <&pinctrl 0 64 32>;
 			clocks = <&prcc_pclk 3 8>;
 		};
 
@@ -332,7 +339,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <3>;
-
+			gpio-ranges = <&pinctrl 0 96 2>;
 			clocks = <&prcc_pclk 3 8>;
 		};
 
@@ -347,7 +354,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <4>;
-
+			gpio-ranges = <&pinctrl 0 128 32>;
 			clocks = <&prcc_pclk 3 8>;
 		};
 
@@ -362,7 +369,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <5>;
-
+			gpio-ranges = <&pinctrl 0 160 12>;
 			clocks = <&prcc_pclk 3 8>;
 		};
 
@@ -377,7 +384,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <6>;
-
+			gpio-ranges = <&pinctrl 0 192 32>;
 			clocks = <&prcc_pclk 2 11>;
 		};
 
@@ -392,7 +399,7 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <7>;
-
+			gpio-ranges = <&pinctrl 0 224 7>;
 			clocks = <&prcc_pclk 2 11>;
 		};
 
@@ -407,12 +414,15 @@
 			gpio-controller;
 			#gpio-cells = <2>;
 			gpio-bank = <8>;
-
+			gpio-ranges = <&pinctrl 0 256 12>;
 			clocks = <&prcc_pclk 5 1>;
 		};
 
-		pinctrl {
+		pinctrl: pinctrl {
 			compatible = "stericsson,db8500-pinctrl";
+			nomadik-gpio-chips = <&gpio0>, <&gpio1>, <&gpio2>, <&gpio3>,
+						<&gpio4>, <&gpio5>, <&gpio6>, <&gpio7>,
+						<&gpio8>;
 			prcm = <&prcmu>;
 		};
 
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index 3d25dba..4a21c64 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -17,12 +17,22 @@
 	};
 
 	aliases {
+		serial0 = &uart0;
 		serial1 = &uart1;
 		stmpe-i2c0 = &stmpe0;
 		stmpe-i2c1 = &stmpe1;
 	};
 
 	pinctrl {
+		uart0 {
+			uart0_nhk_mode: uart0_mux {
+				u0_default_mux {
+					function = "u0";
+					groups = "u0txrx_a_1", "u0ctsrts_a_1";
+				};
+			};
+		};
+
 		stmpe2401_1 {
 			stmpe2401_1_nhk_mode: stmpe2401_1_nhk {
 				nhk_cfg1 {
@@ -73,6 +83,11 @@
 	};
 
 	i2c0 {
+		lis3lv02dl@1d {
+			/* Accelerometer */
+			compatible = "st,lis3lv02dl-accel";
+			reg = <0x1d>;
+		};
 		stmpe0: stmpe2401@43 {
 			compatible = "st,stmpe2401";
 			reg = <0x43>;
@@ -131,22 +146,30 @@
 				#gpio-cells = <2>;
 				interrupt-controller;
 				#interrupt-cells = <2>;
+				/*
+				 * This will turn off SATA so that MMC/SD
+				 * can thrive
+				 */
+				mmcsd-gpio {
+					gpio-hog;
+					gpios = <2 0x0>;
+					output-low;
+					line-name = "SATA EN";
+				};
 			};
 		};
 	};
 
 	amba {
+		/* Activate RX/TX and CTS/RTS on UART 0 */
+		uart0: uart@101fd000 {
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart0_nhk_mode>;
+			status = "okay";
+		};
 		mmcsd: sdi@101f6000 {
 			cd-gpios = <&stmpe_gpio44 7 GPIO_ACTIVE_LOW>;
 			wp-gpios = <&stmpe_gpio44 18 GPIO_ACTIVE_HIGH>;
 		};
 	};
-
-	/* Custom board node with GPIO pins to active etc */
-	usb-s8815 {
-		/* This will turn off SATA so that MMC/SD can thrive */
-		mmcsd-gpio {
-			gpios = <&stmpe_gpio44 2 0x1>;
-		};
-	};
 };
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index 3c140d0..35282c0 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -16,9 +16,20 @@
 	};
 
 	aliases {
+		serial0 = &uart0;
 		serial1 = &uart1;
 	};
 
+	gpio3: gpio@101e7000 {
+		/* This hog will bias the MMC/SD card detect line */
+		mmcsd-gpio {
+			gpio-hog;
+			gpios = <16 0x0>;
+			output-low;
+			line-name = "card detect bias";
+		};
+	};
+
 	src@101e0000 {
 		/* These chrystal drivers are not used on this board */
 		disable-sxtalo;
@@ -30,6 +41,15 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <&cd_default_mode>;
 
+		uart0 {
+			/* Only use RX/TX pins */
+			uart0_s8815_mode: uart0_mux {
+				u0_default_mux {
+					function = "u0";
+					groups = "u0txrx_a_1";
+				};
+			};
+		};
 		mmcsd-cd {
 			cd_default_mode: cd_default {
 				cd_default_cfg1 {
@@ -85,6 +105,14 @@
 		};
 	};
 
+	i2c1 {
+		lis3lv02dl@1d {
+			/* Accelerometer */
+			compatible = "st,lis3lv02dl-accel";
+			reg = <0x1d>;
+		};
+	};
+
 	/* GPIO I2C connected to the USB portions of the STw4811 only */
 	gpio-i2c {
 		compatible = "i2c-gpio";
@@ -102,21 +130,19 @@
 	};
 
 
-	/* Configure card detect for the uSD slot */
 	amba {
+		/* Activate RXTX on UART 0 */
+		uart0: uart@101fd000 {
+			pinctrl-names = "default";
+			pinctrl-0 = <&uart0_s8815_mode>;
+			status = "okay";
+		};
+		/* Configure card detect for the uSD slot */
 		mmcsd: sdi@101f6000 {
 			cd-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>;
 		};
 	};
 
-	/* Custom board node with GPIO pins to active etc */
-	usb-s8815 {
-		/* This will bias the MMC/SD card detect line */
-		mmcsd-gpio {
-			gpios = <&gpio3 16 0x1>;
-		};
-	};
-
 	/* The user LED on the board is set up to be used for heartbeat */
 	leds {
 		compatible = "gpio-leds";
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index ef794a3..314f59c 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -21,6 +21,13 @@
 		interrupts = <30>;
 		cache-unified;
 		cache-level = <2>;
+		cache-size = <131072>;
+		cache-sets = <512>;
+		cache-line-size = <32>;
+		/* At full speed latency must be >=2 */
+		arm,tag-latency = <2>;
+		arm,data-latency = <2 2>;
+		arm,dirty-latency = <2>;
 	};
 
 	mtu0: mtu@101e2000 {
@@ -52,6 +59,7 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 		gpio-bank = <0>;
+		gpio-ranges = <&pinctrl 0 0 32>;
 		clocks = <&pclk>;
 	};
 
@@ -65,6 +73,7 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 		gpio-bank = <1>;
+		gpio-ranges = <&pinctrl 0 32 32>;
 		clocks = <&pclk>;
 	};
 
@@ -78,12 +87,14 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 		gpio-bank = <2>;
+		gpio-ranges = <&pinctrl 0 64 32>;
 		clocks = <&pclk>;
 	};
 
 	gpio3: gpio@101e7000 {
 		compatible = "st,nomadik-gpio";
 		reg =  <0x101e7000 0x80>;
+		ngpio = <28>;
 		interrupt-parent = <&vica>;
 		interrupts = <9>;
 		interrupt-controller;
@@ -91,20 +102,14 @@
 		gpio-controller;
 		#gpio-cells = <2>;
 		gpio-bank = <3>;
+		gpio-ranges = <&pinctrl 0 96 28>;
 		clocks = <&pclk>;
 	};
 
-	pinctrl {
+	pinctrl: pinctrl {
 		compatible = "stericsson,stn8815-pinctrl";
+		nomadik-gpio-chips = <&gpio0>, <&gpio1>, <&gpio2>, <&gpio3>;
 		/* Pin configurations */
-		uart0 {
-			uart0_default_mux: uart0_mux {
-				u0_default_mux {
-					function = "u0";
-					groups = "u0_a_1";
-				};
-			};
-		};
 		uart1 {
 			uart1_default_mux: uart1_mux {
 				u1_default_mux {
@@ -721,11 +726,6 @@
 			   compatible = "st,stw5095";
 			   reg = <0x1a>;
 		};
-		lis3lv02dl@1d {
-			/* Accelerometer */
-			compatible = "st,lis3lv02dl-accel";
-			reg = <0x1d>;
-		};
 	};
 
 	amba {
@@ -755,8 +755,6 @@
 			interrupts = <12>;
 			clocks = <&uart0clk>, <&pclkuart0>;
 			clock-names = "uartclk", "apb_pclk";
-			pinctrl-names = "default";
-			pinctrl-0 = <&uart0_default_mux>;
 			status = "disabled";
 		};
 
diff --git a/arch/arm/boot/dts/stih407-clock.dtsi b/arch/arm/boot/dts/stih407-clock.dtsi
index e65744f..ad45f5e 100644
--- a/arch/arm/boot/dts/stih407-clock.dtsi
+++ b/arch/arm/boot/dts/stih407-clock.dtsi
@@ -134,7 +134,7 @@
 
 			clk_s_c0_pll0: clk-s-c0-pll0 {
 				#clock-cells = <1>;
-				compatible = "st,stih407-plls-c32-c0_0", "st,clkgen-plls-c32";
+				compatible = "st,plls-c32-cx_0", "st,clkgen-plls-c32";
 
 				clocks = <&clk_sysin>;
 
@@ -143,7 +143,7 @@
 
 			clk_s_c0_pll1: clk-s-c0-pll1 {
 				#clock-cells = <1>;
-				compatible = "st,stih407-plls-c32-c0_1", "st,clkgen-plls-c32";
+				compatible = "st,plls-c32-cx_1", "st,clkgen-plls-c32";
 
 				clocks = <&clk_sysin>;
 
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi
index 838b812..ae05277 100644
--- a/arch/arm/boot/dts/stih407-family.dtsi
+++ b/arch/arm/boot/dts/stih407-family.dtsi
@@ -9,7 +9,7 @@
 #include "stih407-pinctrl.dtsi"
 #include <dt-bindings/mfd/st-lpc.h>
 #include <dt-bindings/phy/phy.h>
-#include <dt-bindings/reset-controller/stih407-resets.h>
+#include <dt-bindings/reset/stih407-resets.h>
 #include <dt-bindings/interrupt-controller/irq-st.h>
 / {
 	#address-cells = <1>;
@@ -22,11 +22,15 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <0>;
+			/* u-boot puts hpen in SBC dmem at 0xa4 offset */
+			cpu-release-addr = <0x94100A4>;
 		};
 		cpu@1 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <1>;
+			/* u-boot puts hpen in SBC dmem at 0xa4 offset */
+			cpu-release-addr = <0x94100A4>;
 		};
 	};
 
@@ -65,6 +69,17 @@
 		interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_HIGH>;
 	};
 
+	pwm_regulator: pwm-regulator {
+		compatible = "pwm-regulator";
+		pwms = <&pwm1 3 8448>;
+		regulator-name = "CPU_1V0_AVS";
+		regulator-min-microvolt = <784000>;
+		regulator-max-microvolt = <1299000>;
+		regulator-always-on;
+		max-duty-cycle = <255>;
+		status = "okay";
+	};
+
 	soc {
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -539,6 +554,7 @@
 			status = "disabled";
 		};
 
+
 		st_dwc3: dwc3@8f94000 {
 			compatible	= "st,stih407-dwc3";
 			reg		= <0x08f94000 0x1000>, <0x110 0x4>;
@@ -565,5 +581,34 @@
 						  <&phy_port2 PHY_TYPE_USB3>;
 			};
 		};
+
+		/* COMMS PWM Module */
+		pwm0: pwm@9810000 {
+			compatible	= "st,sti-pwm";
+			status		= "okay";
+			#pwm-cells	= <2>;
+			reg		= <0x9810000 0x68>;
+			pinctrl-names	= "default";
+			pinctrl-0	= <&pinctrl_pwm0_chan0_default>;
+			clock-names	= "pwm";
+			clocks		= <&clk_sysin>;
+			st,pwm-num-chan = <1>;
+		};
+
+		/* SBC PWM Module */
+		pwm1: pwm@9510000 {
+			compatible	= "st,sti-pwm";
+			status		= "okay";
+			#pwm-cells	= <2>;
+			reg		= <0x9510000 0x68>;
+			pinctrl-names	= "default";
+			pinctrl-0	= <&pinctrl_pwm1_chan0_default
+					&pinctrl_pwm1_chan1_default
+					&pinctrl_pwm1_chan2_default
+					&pinctrl_pwm1_chan3_default>;
+			clock-names	= "pwm";
+			clocks		= <&clk_sysin>;
+			st,pwm-num-chan = <4>;
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/stih407-pinctrl.dtsi b/arch/arm/boot/dts/stih407-pinctrl.dtsi
index 0a754f2..1683deb 100644
--- a/arch/arm/boot/dts/stih407-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stih407-pinctrl.dtsi
@@ -439,6 +439,194 @@
 					};
 				};
 			};
+
+			tsin0 {
+				pinctrl_tsin0_parallel: tsin0_parallel {
+					st,pins {
+						DATA7 = <&pio10 4 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA6 = <&pio10 5 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA5 = <&pio10 6 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA4 = <&pio10 7 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA3 = <&pio11 0 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA2 = <&pio11 1 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA1 = <&pio11 2 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA0 = <&pio11 3 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio10 3 ALT1 IN CLKNOTDATA 0 CLK_A>;
+						VALID = <&pio10 1 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio10 0 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio10 2 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+				pinctrl_tsin0_serial: tsin0_serial {
+					st,pins {
+						DATA7 = <&pio10 4 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio10 3 ALT1 IN CLKNOTDATA 0 CLK_A>;
+						VALID = <&pio10 1 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio10 0 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio10 2 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+			};
+
+			tsin1 {
+				pinctrl_tsin1_parallel: tsin1_parallel {
+					st,pins {
+						DATA7 = <&pio12 0 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA6 = <&pio12 1 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA5 = <&pio12 2 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA4 = <&pio12 3 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA3 = <&pio12 4 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA2 = <&pio12 5 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA1 = <&pio12 6 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA0 = <&pio12 7 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio11 7 ALT1 IN CLKNOTDATA 0 CLK_A>;
+						VALID = <&pio11 5 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio11 4 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio11 6 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+				pinctrl_tsin1_serial: tsin1_serial {
+					st,pins {
+						DATA7 = <&pio12 0 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio11 7 ALT1 IN CLKNOTDATA 0 CLK_A>;
+						VALID = <&pio11 5 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio11 4 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio11 6 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+			};
+
+			tsin2 {
+				pinctrl_tsin2_parallel: tsin2_parallel {
+					st,pins {
+						DATA7 = <&pio13 4 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						DATA6 = <&pio13 5 ALT2 IN SE_NICLK_IO 0 CLK_B>;
+						DATA5 = <&pio13 6 ALT2 IN SE_NICLK_IO 0 CLK_B>;
+						DATA4 = <&pio13 7 ALT2 IN SE_NICLK_IO 0 CLK_B>;
+						DATA3 = <&pio14 0 ALT2 IN SE_NICLK_IO 0 CLK_A>;
+						DATA2 = <&pio14 1 ALT2 IN SE_NICLK_IO 0 CLK_B>;
+						DATA1 = <&pio14 2 ALT2 IN SE_NICLK_IO 0 CLK_A>;
+						DATA0 = <&pio14 3 ALT2 IN SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio13 3 ALT1 IN CLKNOTDATA 0 CLK_A>;
+						VALID = <&pio13 1 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio13 0 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio13 2 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+				pinctrl_tsin2_serial: tsin2_serial {
+					st,pins {
+						DATA7 = <&pio13 4 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio13 3 ALT1 IN CLKNOTDATA 0 CLK_A>;
+						VALID = <&pio13 1 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio13 0 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio13 2 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+			};
+
+			tsin3 {
+				pinctrl_tsin3_serial: tsin3_serial {
+					st,pins {
+						DATA7 = <&pio14 1 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio14 0 ALT1 IN CLKNOTDATA 0 CLK_A>;
+						VALID = <&pio13 6 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio13 5 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio13 7 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+			};
+
+			tsin4 {
+				pinctrl_tsin4_serial_alt3: tsin4_serial_alt3 {
+					st,pins {
+						DATA7 = <&pio14 6 ALT3 IN SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio14 5 ALT3 IN CLKNOTDATA 0 CLK_A>;
+						VALID = <&pio14 3 ALT3 IN SE_NICLK_IO 0 CLK_B>;
+						ERROR = <&pio14 2 ALT3 IN SE_NICLK_IO 0 CLK_B>;
+						PKCLK = <&pio14 4 ALT3 IN SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+			};
+
+			tsin5 {
+				pinctrl_tsin5_serial_alt1: tsin5_serial_alt1 {
+					st,pins {
+						DATA7 = <&pio18 4 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio18 3 ALT1 IN CLKNOTDATA 0 CLK_A>;
+						VALID = <&pio18 1 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio18 0 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio18 2 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+				pinctrl_tsin5_serial_alt2: tsin5_serial_alt2 {
+					st,pins {
+						DATA7 = <&pio19 4 ALT2 IN SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio19 3 ALT2 IN CLKNOTDATA 0 CLK_A>;
+						VALID = <&pio19 1 ALT2 IN SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio19 0 ALT2 IN SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio19 2 ALT2 IN SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+			};
+
+			tsout0 {
+				pinctrl_tsout0_parallel: tsout0_parallel {
+					st,pins {
+						DATA7 = <&pio12 0 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+						DATA6 = <&pio12 1 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+						DATA5 = <&pio12 2 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+						DATA4 = <&pio12 3 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+						DATA3 = <&pio12 4 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+						DATA2 = <&pio12 5 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+						DATA1 = <&pio12 6 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+						DATA0 = <&pio12 7 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio11 7 ALT3 OUT NICLK 0 CLK_A>;
+						VALID = <&pio11 5 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio11 4 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio11 6 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+				pinctrl_tsout0_serial: tsout0_serial {
+					st,pins {
+						DATA7 = <&pio12 0 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio11 7 ALT3 OUT NICLK 0 CLK_A>;
+						VALID = <&pio11 5 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio11 4 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio11 6 ALT3 OUT SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+			};
+
+			tsout1 {
+				pinctrl_tsout1_serial: tsout1_serial {
+					st,pins {
+						DATA7 = <&pio19 4 ALT1 OUT SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio19 3 ALT1 OUT NICLK 0 CLK_A>;
+						VALID = <&pio19 1 ALT1 OUT SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio19 0 ALT1 OUT SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio19 2 ALT1 OUT SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+			};
+
+			mtsin0 {
+				pinctrl_mtsin0_parallel: mtsin0_parallel {
+					st,pins {
+						DATA7 = <&pio10 4 ALT3 IN SE_NICLK_IO 0 CLK_A>;
+						DATA6 = <&pio10 5 ALT3 IN SE_NICLK_IO 0 CLK_A>;
+						DATA5 = <&pio10 6 ALT3 IN SE_NICLK_IO 0 CLK_A>;
+						DATA4 = <&pio10 7 ALT3 IN SE_NICLK_IO 0 CLK_A>;
+						DATA3 = <&pio11 0 ALT3 IN SE_NICLK_IO 0 CLK_A>;
+						DATA2 = <&pio11 1 ALT3 IN SE_NICLK_IO 0 CLK_A>;
+						DATA1 = <&pio11 2 ALT3 IN SE_NICLK_IO 0 CLK_A>;
+						DATA0 = <&pio11 3 ALT3 IN SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio10 3 ALT3 IN CLKNOTDATA 0 CLK_A>;
+						VALID = <&pio10 1 ALT3 IN SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio10 0 ALT3 IN SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio10 2 ALT3 IN SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+			};
 		};
 
 		pin-controller-front1 {
@@ -452,6 +640,18 @@
 			interrupts-names = "irqmux";
 			ranges = <0 0x09210000 0x10000>;
 
+			tsin4 {
+				pinctrl_tsin4_serial_alt1: tsin4_serial_alt1 {
+					st,pins {
+						DATA7 = <&pio20 4 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						CLKIN = <&pio20 3 ALT1 IN CLKNOTDATA 0 CLK_A>;
+						VALID = <&pio20 1 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						ERROR = <&pio20 0 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+						PKCLK = <&pio20 2 ALT1 IN SE_NICLK_IO 0 CLK_A>;
+					};
+				};
+			};
+
 			pio20: pio@09210000 {
 				gpio-controller;
 				#gpio-cells = <1>;
diff --git a/arch/arm/boot/dts/stih407.dtsi b/arch/arm/boot/dts/stih407.dtsi
index 2c560fc..3efa3b2 100644
--- a/arch/arm/boot/dts/stih407.dtsi
+++ b/arch/arm/boot/dts/stih407.dtsi
@@ -147,33 +147,5 @@
 				};
 			};
 		};
-
-		/* COMMS PWM Module */
-		pwm0: pwm@9810000 {
-			compatible	= "st,sti-pwm";
-			status		= "disabled";
-			#pwm-cells	= <2>;
-			reg		= <0x9810000 0x68>;
-			pinctrl-names	= "default";
-			pinctrl-0	= <&pinctrl_pwm0_chan0_default>;
-			clock-names	= "pwm";
-			clocks		= <&clk_sysin>;
-		};
-
-		/* SBC PWM Module */
-		pwm1: pwm@9510000 {
-			compatible	= "st,sti-pwm";
-			status		= "disabled";
-			#pwm-cells	= <2>;
-			reg		= <0x9510000 0x68>;
-			pinctrl-names	= "default";
-			pinctrl-0	= <&pinctrl_pwm1_chan0_default
-					&pinctrl_pwm1_chan1_default
-					&pinctrl_pwm1_chan2_default
-					&pinctrl_pwm1_chan3_default>;
-			clock-names	= "pwm";
-			clocks		= <&clk_sysin>;
-			st,pwm-num-chan = <4>;
-		};
 	};
 };
diff --git a/arch/arm/boot/dts/stih410-clock.dtsi b/arch/arm/boot/dts/stih410-clock.dtsi
index 6b5803a..d1f2aca 100644
--- a/arch/arm/boot/dts/stih410-clock.dtsi
+++ b/arch/arm/boot/dts/stih410-clock.dtsi
@@ -137,7 +137,7 @@
 
 			clk_s_c0_pll0: clk-s-c0-pll0 {
 				#clock-cells = <1>;
-				compatible = "st,stih407-plls-c32-c0_0", "st,clkgen-plls-c32";
+				compatible = "st,plls-c32-cx_0", "st,clkgen-plls-c32";
 
 				clocks = <&clk_sysin>;
 
@@ -146,7 +146,7 @@
 
 			clk_s_c0_pll1: clk-s-c0-pll1 {
 				#clock-cells = <1>;
-				compatible = "st,stih407-plls-c32-c0_1", "st,clkgen-plls-c32";
+				compatible = "st,plls-c32-cx_1", "st,clkgen-plls-c32";
 
 				clocks = <&clk_sysin>;
 
diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
index 208b5e8..6f40bc9 100644
--- a/arch/arm/boot/dts/stih410.dtsi
+++ b/arch/arm/boot/dts/stih410.dtsi
@@ -10,6 +10,10 @@
 #include "stih407-family.dtsi"
 #include "stih410-pinctrl.dtsi"
 / {
+	aliases {
+		bdisp0 = &bdisp0;
+	};
+
 	soc {
 		usb2_picophy1: phy2 {
 			compatible = "st,stih407-usb2-phy";
@@ -218,5 +222,13 @@
 				};
 			};
 		};
+
+		bdisp0:bdisp@9f10000 {
+			compatible = "st,stih407-bdisp";
+			reg = <0x9f10000 0x1000>;
+			interrupts = <GIC_SPI 38 IRQ_TYPE_NONE>;
+			clock-names = "bdisp";
+			clocks = <&clk_s_c0_flexgen CLK_IC_BDISP_0>;
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/stih415.dtsi b/arch/arm/boot/dts/stih415.dtsi
index 19b019b..12427e6 100644
--- a/arch/arm/boot/dts/stih415.dtsi
+++ b/arch/arm/boot/dts/stih415.dtsi
@@ -10,7 +10,7 @@
 #include "stih415-clock.dtsi"
 #include "stih415-pinctrl.dtsi"
 #include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/reset-controller/stih415-resets.h>
+#include <dt-bindings/reset/stih415-resets.h>
 / {
 
 	L2: cache-controller {
diff --git a/arch/arm/boot/dts/stih416.dtsi b/arch/arm/boot/dts/stih416.dtsi
index 9dca173..9e3170c 100644
--- a/arch/arm/boot/dts/stih416.dtsi
+++ b/arch/arm/boot/dts/stih416.dtsi
@@ -12,7 +12,7 @@
 
 #include <dt-bindings/phy/phy.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
-#include <dt-bindings/reset-controller/stih416-resets.h>
+#include <dt-bindings/reset/stih416-resets.h>
 #include <dt-bindings/interrupt-controller/irq-st.h>
 / {
 	L2: cache-controller {
diff --git a/arch/arm/boot/dts/stih418-clock.dtsi b/arch/arm/boot/dts/stih418-clock.dtsi
index 0ab23da..148e177 100644
--- a/arch/arm/boot/dts/stih418-clock.dtsi
+++ b/arch/arm/boot/dts/stih418-clock.dtsi
@@ -137,7 +137,7 @@
 
 			clk_s_c0_pll0: clk-s-c0-pll0 {
 				#clock-cells = <1>;
-				compatible = "st,stih407-plls-c32-c0_0", "st,clkgen-plls-c32";
+				compatible = "st,plls-c32-cx_0", "st,clkgen-plls-c32";
 
 				clocks = <&clk_sysin>;
 
@@ -146,7 +146,7 @@
 
 			clk_s_c0_pll1: clk-s-c0-pll1 {
 				#clock-cells = <1>;
-				compatible = "st,stih407-plls-c32-c0_1", "st,clkgen-plls-c32";
+				compatible = "st,plls-c32-cx_1", "st,clkgen-plls-c32";
 
 				clocks = <&clk_sysin>;
 
diff --git a/arch/arm/boot/dts/stih418.dtsi b/arch/arm/boot/dts/stih418.dtsi
index 354d90f..8160a75 100644
--- a/arch/arm/boot/dts/stih418.dtsi
+++ b/arch/arm/boot/dts/stih418.dtsi
@@ -17,11 +17,15 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <2>;
+			/* u-boot puts hpen in SBC dmem at 0xa4 offset */
+			cpu-release-addr = <0x94100A4>;
 		};
 		cpu@3 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a9";
 			reg = <3>;
+			/* u-boot puts hpen in SBC dmem at 0xa4 offset */
+			cpu-release-addr = <0x94100A4>;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/stm32429i-eval.dts b/arch/arm/boot/dts/stm32429i-eval.dts
new file mode 100644
index 0000000..6964fc9
--- /dev/null
+++ b/arch/arm/boot/dts/stm32429i-eval.dts
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2015 - Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public
+ *     License along with this file; if not, write to the Free
+ *     Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
+ *     MA 02110-1301 USA
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "stm32f429.dtsi"
+
+/ {
+	model = "STMicroelectronics STM32429i-EVAL board";
+	compatible = "st,stm32429i-eval", "st,stm32f429";
+
+	chosen {
+		bootargs = "root=/dev/ram rdinit=/linuxrc";
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory {
+		reg = <0xc0000000 0x2000000>;
+	};
+
+	aliases {
+		serial0 = &usart1;
+	};
+};
+
+&clk_hse {
+	clock-frequency = <25000000>;
+};
+
+&usart1 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts
index 6b9aa59..f0b731d 100644
--- a/arch/arm/boot/dts/stm32f429-disco.dts
+++ b/arch/arm/boot/dts/stm32f429-disco.dts
@@ -53,8 +53,8 @@
 	compatible = "st,stm32f429i-disco", "st,stm32f429";
 
 	chosen {
-		bootargs = "console=ttyS0,115200 root=/dev/ram rdinit=/linuxrc";
-		linux,stdout-path = &usart1;
+		bootargs = "root=/dev/ram rdinit=/linuxrc";
+		stdout-path = "serial0:115200n8";
 	};
 
 	memory {
@@ -66,6 +66,10 @@
 	};
 };
 
+&clk_hse {
+	clock-frequency = <8000000>;
+};
+
 &usart1 {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index aa73b4f..d78a481 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -49,48 +49,10 @@
 
 / {
 	clocks {
-		clk_sysclk: clk-sysclk {
+		clk_hse: clk-hse {
 			#clock-cells = <0>;
 			compatible = "fixed-clock";
-			clock-frequency = <180000000>;
-		};
-
-		clk_hclk: clk-hclk {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <180000000>;
-		};
-
-		clk_pclk1: clk-pclk1 {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <45000000>;
-		};
-
-		clk_pclk2: clk-pclk2 {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <90000000>;
-		};
-
-		clk_pmtr1: clk-pmtr1 {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <90000000>;
-		};
-
-		clk_pmtr2: clk-pmtr2 {
-			#clock-cells = <0>;
-			compatible = "fixed-clock";
-			clock-frequency = <180000000>;
-		};
-
-		clk_systick: clk-systick {
-			compatible = "fixed-factor-clock";
-			clocks = <&clk_hclk>;
-			#clock-cells = <0>;
-			clock-div = <8>;
-			clock-mult = <1>;
+			clock-frequency = <0>;
 		};
 	};
 
@@ -99,7 +61,7 @@
 			compatible = "st,stm32-timer";
 			reg = <0x40000000 0x400>;
 			interrupts = <28>;
-			clocks = <&clk_pmtr1>;
+			clocks = <&rcc 0 128>;
 			status = "disabled";
 		};
 
@@ -107,7 +69,7 @@
 			compatible = "st,stm32-timer";
 			reg = <0x40000400 0x400>;
 			interrupts = <29>;
-			clocks = <&clk_pmtr1>;
+			clocks = <&rcc 0 129>;
 			status = "disabled";
 		};
 
@@ -115,7 +77,7 @@
 			compatible = "st,stm32-timer";
 			reg = <0x40000800 0x400>;
 			interrupts = <30>;
-			clocks = <&clk_pmtr1>;
+			clocks = <&rcc 0 130>;
 			status = "disabled";
 		};
 
@@ -123,14 +85,14 @@
 			compatible = "st,stm32-timer";
 			reg = <0x40000c00 0x400>;
 			interrupts = <50>;
-			clocks = <&clk_pmtr1>;
+			clocks = <&rcc 0 131>;
 		};
 
 		timer6: timer@40001000 {
 			compatible = "st,stm32-timer";
 			reg = <0x40001000 0x400>;
 			interrupts = <54>;
-			clocks = <&clk_pmtr1>;
+			clocks = <&rcc 0 132>;
 			status = "disabled";
 		};
 
@@ -138,7 +100,7 @@
 			compatible = "st,stm32-timer";
 			reg = <0x40001400 0x400>;
 			interrupts = <55>;
-			clocks = <&clk_pmtr1>;
+			clocks = <&rcc 0 133>;
 			status = "disabled";
 		};
 
@@ -146,7 +108,7 @@
 			compatible = "st,stm32-usart", "st,stm32-uart";
 			reg = <0x40004400 0x400>;
 			interrupts = <38>;
-			clocks = <&clk_pclk1>;
+			clocks =  <&rcc 0 145>;
 			status = "disabled";
 		};
 
@@ -154,7 +116,7 @@
 			compatible = "st,stm32-usart", "st,stm32-uart";
 			reg = <0x40004800 0x400>;
 			interrupts = <39>;
-			clocks = <&clk_pclk1>;
+			clocks = <&rcc 0 146>;
 			status = "disabled";
 		};
 
@@ -162,7 +124,7 @@
 			compatible = "st,stm32-uart";
 			reg = <0x40004c00 0x400>;
 			interrupts = <52>;
-			clocks = <&clk_pclk1>;
+			clocks = <&rcc 0 147>;
 			status = "disabled";
 		};
 
@@ -170,7 +132,7 @@
 			compatible = "st,stm32-uart";
 			reg = <0x40005000 0x400>;
 			interrupts = <53>;
-			clocks = <&clk_pclk1>;
+			clocks = <&rcc 0 148>;
 			status = "disabled";
 		};
 
@@ -178,7 +140,7 @@
 			compatible = "st,stm32-usart", "st,stm32-uart";
 			reg = <0x40007800 0x400>;
 			interrupts = <82>;
-			clocks = <&clk_pclk1>;
+			clocks = <&rcc 0 158>;
 			status = "disabled";
 		};
 
@@ -186,7 +148,7 @@
 			compatible = "st,stm32-usart", "st,stm32-uart";
 			reg = <0x40007c00 0x400>;
 			interrupts = <83>;
-			clocks = <&clk_pclk1>;
+			clocks = <&rcc 0 159>;
 			status = "disabled";
 		};
 
@@ -194,7 +156,7 @@
 			compatible = "st,stm32-usart", "st,stm32-uart";
 			reg = <0x40011000 0x400>;
 			interrupts = <37>;
-			clocks = <&clk_pclk2>;
+			clocks = <&rcc 0 164>;
 			status = "disabled";
 		};
 
@@ -202,13 +164,20 @@
 			compatible = "st,stm32-usart", "st,stm32-uart";
 			reg = <0x40011400 0x400>;
 			interrupts = <71>;
-			clocks = <&clk_pclk2>;
+			clocks = <&rcc 0 165>;
 			status = "disabled";
 		};
+
+		rcc: rcc@40023810 {
+			#clock-cells = <2>;
+			compatible = "st,stm32f42xx-rcc", "st,stm32-rcc";
+			reg = <0x40023800 0x400>;
+			clocks = <&clk_hse>;
+		};
 	};
 };
 
 &systick {
-	clocks = <&clk_systick>;
+	clocks = <&rcc 1 0>;
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun4i-a10-ba10-tvbox.dts b/arch/arm/boot/dts/sun4i-a10-ba10-tvbox.dts
index 93d4356..f3cb297 100644
--- a/arch/arm/boot/dts/sun4i-a10-ba10-tvbox.dts
+++ b/arch/arm/boot/dts/sun4i-a10-ba10-tvbox.dts
@@ -125,12 +125,21 @@
 	status = "okay";
 };
 
+&otg_sram {
+	status = "okay";
+};
+
 &pio {
 	usb2_vbus_pin_a: usb2_vbus_pin@0 {
 		allwinner,pins = "PH12";
 	};
 };
 
+&reg_usb0_vbus {
+	regulator-boot-on;
+	status = "okay";
+};
+
 &reg_usb1_vbus {
 	status = "okay";
 };
@@ -146,7 +155,13 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "host";
+	status = "okay";
+};
+
 &usbphy {
+	usb0_vbus-supply = <&reg_usb0_vbus>;
 	usb1_vbus-supply = <&reg_usb1_vbus>;
 	usb2_vbus-supply = <&reg_usb2_vbus>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts b/arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts
index 5878a0b..1430568 100644
--- a/arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts
+++ b/arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts
@@ -114,6 +114,30 @@
 	status = "okay";
 };
 
+&otg_sram {
+	status = "okay";
+};
+
+&pio {
+	usb0_id_detect_pin: usb0_id_detect_pin@0 {
+		allwinner,pins = "PH4";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+
+	usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
+		allwinner,pins = "PH5";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_DOWN>;
+	};
+};
+
+&reg_usb0_vbus {
+	status = "okay";
+};
+
 &reg_usb2_vbus {
 	status = "okay";
 };
@@ -124,7 +148,17 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
 &usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
+	usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+	usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
 	usb2_vbus-supply = <&reg_usb2_vbus>;
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
index 9afb4e0..046a84d 100644
--- a/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
+++ b/arch/arm/boot/dts/sun4i-a10-cubieboard.dts
@@ -155,6 +155,10 @@
 	status = "okay";
 };
 
+&otg_sram {
+	status = "okay";
+};
+
 &pio {
 	led_pins_cubieboard: led_pins@0 {
 		allwinner,pins = "PH20", "PH21";
@@ -162,6 +166,13 @@
 		allwinner,drive = <SUN4I_PINCTRL_20_MA>;
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
+
+	usb0_id_detect_pin: usb0_id_detect_pin@0 {
+		allwinner,pins = "PH4";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
 };
 
 &reg_ahci_5v {
@@ -216,7 +227,15 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
 &usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_detect_pin>;
+	usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
 	usb1_vbus-supply = <&reg_usb1_vbus>;
 	usb2_vbus-supply = <&reg_usb2_vbus>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10-itead-iteaduino-plus.dts b/arch/arm/boot/dts/sun4i-a10-itead-iteaduino-plus.dts
new file mode 100644
index 0000000..985e155
--- /dev/null
+++ b/arch/arm/boot/dts/sun4i-a10-itead-iteaduino-plus.dts
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2015 Josef Gajdusek <atx@atx.name>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun4i-a10.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+	model = "Iteaduino Plus A10";
+	compatible = "itead,iteaduino-plus-a10", "allwinner,sun4i-a10";
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+};
+
+&ahci {
+	target-supply = <&reg_ahci_5v>;
+	status = "okay";
+};
+
+&cpu0 {
+	cpu-supply = <&reg_dcdc2>;
+};
+
+&ehci0 {
+	status = "okay";
+};
+
+&ehci1 {
+	status = "okay";
+};
+
+&emac {
+	pinctrl-names = "default";
+	pinctrl-0 = <&emac_pins_a>;
+	phy = <&phy1>;
+	status = "okay";
+};
+
+&emac_sram {
+	status = "okay";
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins_a>;
+	status = "okay";
+
+	axp209: pmic@34 {
+		reg = <0x34>;
+		interrupts = <0>;
+	};
+};
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins_a>;
+	status = "okay";
+};
+
+&i2c2 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c2_pins_a>;
+	status = "okay";
+};
+
+&ir0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&ir0_rx_pins_a>;
+	status = "okay";
+};
+
+&mdio {
+	status = "okay";
+
+	phy1: ethernet-phy@1 {
+		reg = <1>;
+	};
+};
+
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+	vmmc-supply = <&reg_vcc3v3>;
+	bus-width = <4>;
+	cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
+	cd-inverted;
+	status = "okay";
+};
+
+&ohci0 {
+	status = "okay";
+};
+
+&ohci1 {
+	status = "okay";
+};
+
+&reg_ahci_5v {
+	status = "okay";
+};
+
+#include "axp209.dtsi"
+
+&reg_dcdc2 {
+	regulator-always-on;
+	regulator-min-microvolt = <1000000>;
+	regulator-max-microvolt = <1450000>;
+	regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc3 {
+	regulator-always-on;
+	regulator-min-microvolt = <1000000>;
+	regulator-max-microvolt = <1400000>;
+	regulator-name = "vdd-int-dll";
+};
+
+&reg_ldo1 {
+	regulator-name = "vdd-rtc";
+};
+
+&reg_ldo2 {
+	regulator-always-on;
+	regulator-min-microvolt = <3000000>;
+	regulator-max-microvolt = <3000000>;
+	regulator-name = "avcc";
+};
+
+&reg_usb1_vbus {
+	status = "okay";
+};
+
+&reg_usb2_vbus {
+	status = "okay";
+};
+
+&spi0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&spi0_pins_a>,
+		    <&spi0_cs0_pins_a>;
+	status = "okay";
+};
+
+&uart0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_pins_a>;
+	status = "okay";
+};
+
+&usbphy {
+	usb1_vbus-supply = <&reg_usb1_vbus>;
+	usb2_vbus-supply = <&reg_usb2_vbus>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts b/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
index ebe2a04..a7dd86d 100644
--- a/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
+++ b/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
@@ -114,6 +114,15 @@
 	status = "okay";
 };
 
+&otg_sram {
+	status = "okay";
+};
+
+&reg_usb0_vbus {
+	regulator-boot-on;
+	status = "okay";
+};
+
 &reg_usb1_vbus {
 	status = "okay";
 };
@@ -128,7 +137,13 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "host";
+	status = "okay";
+};
+
 &usbphy {
+	usb0_vbus-supply = <&reg_usb0_vbus>;
 	usb1_vbus-supply = <&reg_usb1_vbus>;
 	usb2_vbus-supply = <&reg_usb2_vbus>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
index b64aa4e..28e32ad 100644
--- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
@@ -150,6 +150,10 @@
 	status = "okay";
 };
 
+&otg_sram {
+	status = "okay";
+};
+
 &pio {
 	ahci_pwr_pin_olinuxinolime: ahci_pwr_pin@1 {
 		allwinner,pins = "PC3";
@@ -164,6 +168,20 @@
 		allwinner,drive = <SUN4I_PINCTRL_20_MA>;
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
+
+	usb0_id_detect_pin: usb0_id_detect_pin@0 {
+		allwinner,pins = "PH4";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+
+	usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
+		allwinner,pins = "PH5";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_DOWN>;
+	};
 };
 
 &reg_ahci_5v {
@@ -172,6 +190,10 @@
 	status = "okay";
 };
 
+&reg_usb0_vbus {
+	status = "okay";
+};
+
 &reg_usb1_vbus {
 	status = "okay";
 };
@@ -186,7 +208,17 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
 &usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
+	usb0_id_det-gpio   = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+	usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+	usb0_vbus-supply   = <&reg_usb0_vbus>;
 	usb1_vbus-supply = <&reg_usb1_vbus>;
 	usb2_vbus-supply = <&reg_usb2_vbus>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 61c03d1..1f3c51a 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -241,6 +241,7 @@
 			compatible = "allwinner,sun4i-a10-axi-gates-clk";
 			reg = <0x01c2005c 0x4>;
 			clocks = <&axi>;
+			clock-indices = <0>;
 			clock-output-names = "axi_dram";
 		};
 
@@ -257,17 +258,36 @@
 			compatible = "allwinner,sun4i-a10-ahb-gates-clk";
 			reg = <0x01c20060 0x8>;
 			clocks = <&ahb>;
+			clock-indices = <0>, <1>,
+					<2>, <3>,
+					<4>, <5>, <6>,
+					<7>, <8>, <9>,
+					<10>, <11>, <12>,
+					<13>, <14>, <16>,
+					<17>, <18>, <20>,
+					<21>, <22>, <23>,
+					<24>, <25>, <26>,
+					<32>, <33>, <34>,
+					<35>, <36>, <37>,
+					<40>, <41>, <43>,
+					<44>, <45>,
+					<46>, <47>,
+					<50>, <52>;
 			clock-output-names = "ahb_usb0", "ahb_ehci0",
-				"ahb_ohci0", "ahb_ehci1", "ahb_ohci1", "ahb_ss",
-				"ahb_dma", "ahb_bist", "ahb_mmc0", "ahb_mmc1",
-				"ahb_mmc2", "ahb_mmc3", "ahb_ms", "ahb_nand",
-				"ahb_sdram", "ahb_ace",	"ahb_emac", "ahb_ts",
-				"ahb_spi0", "ahb_spi1", "ahb_spi2", "ahb_spi3",
-				"ahb_pata", "ahb_sata", "ahb_gps", "ahb_ve",
-				"ahb_tvd", "ahb_tve0", "ahb_tve1", "ahb_lcd0",
-				"ahb_lcd1", "ahb_csi0", "ahb_csi1", "ahb_hdmi",
-				"ahb_de_be0", "ahb_de_be1", "ahb_de_fe0",
-				"ahb_de_fe1", "ahb_mp", "ahb_mali400";
+					     "ahb_ohci0", "ahb_ehci1",
+					     "ahb_ohci1", "ahb_ss", "ahb_dma",
+					     "ahb_bist", "ahb_mmc0", "ahb_mmc1",
+					     "ahb_mmc2", "ahb_mmc3", "ahb_ms",
+					     "ahb_nand", "ahb_sdram", "ahb_ace",
+					     "ahb_emac", "ahb_ts", "ahb_spi0",
+					     "ahb_spi1", "ahb_spi2", "ahb_spi3",
+					     "ahb_pata", "ahb_sata", "ahb_gps",
+					     "ahb_ve", "ahb_tvd", "ahb_tve0",
+					     "ahb_tve1", "ahb_lcd0", "ahb_lcd1",
+					     "ahb_csi0", "ahb_csi1", "ahb_hdmi",
+					     "ahb_de_be0", "ahb_de_be1",
+					     "ahb_de_fe0", "ahb_de_fe1",
+					     "ahb_mp", "ahb_mali400";
 		};
 
 		apb0: apb0@01c20054 {
@@ -283,9 +303,14 @@
 			compatible = "allwinner,sun4i-a10-apb0-gates-clk";
 			reg = <0x01c20068 0x4>;
 			clocks = <&apb0>;
+			clock-indices = <0>, <1>,
+					<2>, <3>,
+					<5>, <6>,
+					<7>, <10>;
 			clock-output-names = "apb0_codec", "apb0_spdif",
-				"apb0_ac97", "apb0_iis", "apb0_pio", "apb0_ir0",
-				"apb0_ir1", "apb0_keypad";
+					     "apb0_ac97", "apb0_iis",
+					     "apb0_pio", "apb0_ir0",
+					     "apb0_ir1", "apb0_keypad";
 		};
 
 		apb1: clk@01c20058 {
@@ -301,12 +326,22 @@
 			compatible = "allwinner,sun4i-a10-apb1-gates-clk";
 			reg = <0x01c2006c 0x4>;
 			clocks = <&apb1>;
+			clock-indices = <0>, <1>,
+					<2>, <4>,
+					<5>, <6>,
+					<7>, <16>,
+					<17>, <18>,
+					<19>, <20>,
+					<21>, <22>,
+					<23>;
 			clock-output-names = "apb1_i2c0", "apb1_i2c1",
-				"apb1_i2c2", "apb1_can", "apb1_scr",
-				"apb1_ps20", "apb1_ps21", "apb1_uart0",
-				"apb1_uart1", "apb1_uart2", "apb1_uart3",
-				"apb1_uart4", "apb1_uart5", "apb1_uart6",
-				"apb1_uart7";
+					     "apb1_i2c2", "apb1_can",
+					     "apb1_scr", "apb1_ps20",
+					     "apb1_ps21", "apb1_uart0",
+					     "apb1_uart1", "apb1_uart2",
+					     "apb1_uart3", "apb1_uart4",
+					     "apb1_uart5", "apb1_uart6",
+					     "apb1_uart7";
 		};
 
 		nand_clk: clk@01c20080 {
@@ -611,6 +646,19 @@
 			#size-cells = <0>;
 		};
 
+		usb_otg: usb@01c13000 {
+			compatible = "allwinner,sun4i-a10-musb";
+			reg = <0x01c13000 0x0400>;
+			clocks = <&ahb_gates 0>;
+			interrupts = <38>;
+			interrupt-names = "mc";
+			phys = <&usbphy 0>;
+			phy-names = "usb";
+			extcon = <&usbphy 0>;
+			allwinner,sram = <&otg_sram 1>;
+			status = "disabled";
+		};
+
 		usbphy: phy@01c13400 {
 			#phy-cells = <1>;
 			compatible = "allwinner,sun4i-a10-usb-phy";
@@ -643,6 +691,14 @@
 			status = "disabled";
 		};
 
+		crypto: crypto-engine@01c15000 {
+			compatible = "allwinner,sun4i-a10-crypto";
+			reg = <0x01c15000 0x1000>;
+			interrupts = <86>;
+			clocks = <&ahb_gates 5>, <&ss_clk>;
+			clock-names = "ahb", "mod";
+		};
+
 		spi2: spi@01c17000 {
 			compatible = "allwinner,sun4i-a10-spi";
 			reg = <0x01c17000 0x1000>;
@@ -713,8 +769,7 @@
 			clocks = <&apb0_gates 5>;
 			gpio-controller;
 			interrupt-controller;
-			#interrupt-cells = <2>;
-			#size-cells = <0>;
+			#interrupt-cells = <3>;
 			#gpio-cells = <3>;
 
 			pwm0_pins_a: pwm0@0 {
diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
index a7e19e4..5a422c1 100644
--- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
@@ -96,8 +96,15 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&i2c0_pins_a>;
 	status = "okay";
+
+	axp152: pmic@30 {
+		reg = <0x30>;
+		interrupts = <0>;
+	};
 };
 
+#include "axp152.dtsi"
+
 &i2c1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&i2c1_pins_a>;
@@ -189,6 +196,10 @@
 	status = "okay";
 };
 
+&otg_sram {
+	status = "okay";
+};
+
 &pio {
 	mmc0_cd_pin_olinuxino_micro: mmc0_cd_pin@0 {
 		allwinner,pins = "PG1";
@@ -217,6 +228,18 @@
 		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
+
+	usb0_id_detect_pin: usb0_id_detect_pin@0 {
+		allwinner,pins = "PG12";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+};
+
+&reg_usb0_vbus {
+	gpio = <&pio 6 11 GPIO_ACTIVE_HIGH>; /* PG11 */
+	status = "okay";
 };
 
 &reg_usb1_vbus {
@@ -243,8 +266,20 @@
 	status = "okay";
 };
 
-&usbphy {
-	usb1_vbus-supply = <&reg_usb1_vbus>;
+&usb_otg {
+	dr_mode = "otg";
 	status = "okay";
 };
 
+&usb0_vbus_pin_a {
+	allwinner,pins = "PG11";
+};
+
+&usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_detect_pin>;
+	usb0_id_det-gpio = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
+	usb1_vbus-supply = <&reg_usb1_vbus>;
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index f11efb7..a513b41 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -85,6 +85,17 @@
 			compatible = "allwinner,sun5i-a10s-ahb-gates-clk";
 			reg = <0x01c20060 0x8>;
 			clocks = <&ahb>;
+			clock-indices = <0>, <1>,
+					<2>, <5>, <6>,
+					<7>, <8>, <9>,
+					<10>, <13>,
+					<14>, <17>, <18>,
+					<20>, <21>, <22>,
+					<26>, <28>, <32>,
+					<34>, <36>, <40>,
+					<43>, <44>,
+					<46>, <51>,
+					<52>;
 			clock-output-names = "ahb_usbotg", "ahb_ehci",
 					     "ahb_ohci", "ahb_ss", "ahb_dma",
 					     "ahb_bist", "ahb_mmc0", "ahb_mmc1",
@@ -103,6 +114,9 @@
 			compatible = "allwinner,sun5i-a10s-apb0-gates-clk";
 			reg = <0x01c20068 0x4>;
 			clocks = <&apb0>;
+			clock-indices = <0>, <3>,
+					<5>, <6>,
+					<10>;
 			clock-output-names = "apb0_codec", "apb0_iis",
 					     "apb0_pio", "apb0_ir",
 					     "apb0_keypad";
@@ -113,9 +127,14 @@
 			compatible = "allwinner,sun5i-a10s-apb1-gates-clk";
 			reg = <0x01c2006c 0x4>;
 			clocks = <&apb1>;
+			clock-indices = <0>, <1>,
+					<2>, <16>,
+					<17>, <18>,
+					<19>;
 			clock-output-names = "apb1_i2c0", "apb1_i2c1",
-				"apb1_i2c2", "apb1_uart0", "apb1_uart1",
-				"apb1_uart2", "apb1_uart3";
+					     "apb1_i2c2", "apb1_uart0",
+					     "apb1_uart1", "apb1_uart2",
+					     "apb1_uart3";
 		};
 	};
 
diff --git a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
index 990f9d6..3724b98 100644
--- a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
+++ b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
@@ -45,6 +45,7 @@
 #include "sunxi-common-regulators.dtsi"
 
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
 #include <dt-bindings/pinctrl/sun4i-a10.h>
 
 / {
@@ -96,6 +97,25 @@
 	status = "okay";
 };
 
+&lradc {
+	vref-supply = <&reg_ldo2>;
+	status = "okay";
+
+	button@200 {
+		label = "Volume Up";
+		linux,code = <KEY_VOLUMEUP>;
+		channel = <0>;
+		voltage = <200000>;
+	};
+
+	button@400 {
+		label = "Volume Down";
+		linux,code = <KEY_VOLUMEDOWN>;
+		channel = <0>;
+		voltage = <400000>;
+	};
+};
+
 &mmc0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_h702>;
@@ -110,6 +130,10 @@
 	status = "okay";
 };
 
+&otg_sram {
+	status = "okay";
+};
+
 &pio {
 	mmc0_cd_pin_h702: mmc0_cd_pin@0 {
 		allwinner,pins = "PG0";
@@ -117,6 +141,20 @@
 		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
 	};
+
+	usb0_id_detect_pin: usb0_id_detect_pin@0 {
+		allwinner,pins = "PG2";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+
+	usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
+		allwinner,pins = "PG1";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
 };
 
 #include "axp209.dtsi"
@@ -152,13 +190,33 @@
 	regulator-name = "vcc-wifi";
 };
 
+&reg_usb0_vbus {
+	pinctrl-0 = <&usb0_vbus_pin_a>;
+	gpio = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+	status = "okay";
+};
+
 &uart1 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart1_pins_b>;
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&usb0_vbus_pin_a {
+	allwinner,pins = "PG12";
+};
+
 &usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
+	usb0_id_det-gpios = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
+	usb0_vbus_det-gpios = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
 	usb1_vbus-supply = <&reg_ldo3>;
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index 4232400..b3c234c 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -159,6 +159,10 @@
 	status = "okay";
 };
 
+&otg_sram {
+	status = "okay";
+};
+
 &pio {
 	mmc0_cd_pin_olinuxino: mmc0_cd_pin@0 {
 		allwinner,pins = "PG0";
@@ -174,6 +178,20 @@
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
 
+	usb0_id_detect_pin: usb0_id_detect_pin@0 {
+		allwinner,pins = "PG2";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+
+	usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
+		allwinner,pins = "PG1";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_DOWN>;
+	};
+
 	usb1_vbus_pin_olinuxino: usb1_vbus_pin@0 {
 		allwinner,pins = "PG11";
 		allwinner,function = "gpio_out";
@@ -182,6 +200,11 @@
 	};
 };
 
+&reg_usb0_vbus {
+	status = "okay";
+	gpio = <&pio 6 12 GPIO_ACTIVE_HIGH>; /* PG12 */
+};
+
 &reg_usb1_vbus {
 	pinctrl-0 = <&usb1_vbus_pin_olinuxino>;
 	gpio = <&pio 6 11 GPIO_ACTIVE_HIGH>;
@@ -194,7 +217,21 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
+&usb0_vbus_pin_a {
+	allwinner,pins = "PG12";
+};
+
 &usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
+	usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
+	usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
 	usb1_vbus-supply = <&reg_usb1_vbus>;
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts b/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts
index 514f159..eb793d5 100644
--- a/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts
+++ b/arch/arm/boot/dts/sun5i-a13-utoo-p66.dts
@@ -93,7 +93,7 @@
 		compatible = "chipone,icn8318";
 		reg = <0x40>;
 		interrupt-parent = <&pio>;
-		interrupts = <9 IRQ_TYPE_EDGE_FALLING>; /* EINT9 (PG9) */
+		interrupts = <6 9 IRQ_TYPE_EDGE_FALLING>; /* EINT9 (PG9) */
 		pinctrl-names = "default";
 		pinctrl-0 = <&ts_wake_pin_p66>;
 		wake-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */
@@ -153,6 +153,10 @@
 	};
 };
 
+&otg_sram {
+	status = "okay";
+};
+
 &pio {
 	mmc0_cd_pin_p66: mmc0_cd_pin@0 {
 		allwinner,pins = "PG0";
@@ -161,6 +165,20 @@
 		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
 	};
 
+	usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
+		allwinner,pins = "PG1";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_DOWN>;
+	};
+
+	usb0_id_detect_pin: usb0_id_detect_pin@0 {
+		allwinner,pins = "PG2";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+
 	i2c_lcd_pins: i2c_lcd_pin@0 {
 		allwinner,pins = "PG10", "PG12";
 		allwinner,function = "gpio_out";
@@ -219,7 +237,16 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
 &usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
+	usb0_id_det-gpio = <&pio 6 2 GPIO_ACTIVE_HIGH>; /* PG2 */
+	usb0_vbus_det-gpio = <&pio 6 1 GPIO_ACTIVE_HIGH>; /* PG1 */
 	usb0_vbus-supply = <&reg_usb0_vbus>;
 	usb1_vbus-supply = <&reg_ldo3>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 976d4fa..f3631c9 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -104,6 +104,16 @@
 			compatible = "allwinner,sun5i-a13-ahb-gates-clk";
 			reg = <0x01c20060 0x8>;
 			clocks = <&ahb>;
+			clock-indices = <0>, <1>,
+					<2>, <5>, <6>,
+					<7>, <8>, <9>,
+					<10>, <13>,
+					<14>, <20>,
+					<21>, <22>,
+				        <28>, <32>, <36>,
+				        <40>, <44>,
+					<46>, <51>,
+					<52>;
 			clock-output-names = "ahb_usbotg", "ahb_ehci",
 					     "ahb_ohci", "ahb_ss", "ahb_dma",
 					     "ahb_bist", "ahb_mmc0", "ahb_mmc1",
@@ -121,6 +131,8 @@
 			compatible = "allwinner,sun5i-a13-apb0-gates-clk";
 			reg = <0x01c20068 0x4>;
 			clocks = <&apb0>;
+			clock-indices = <0>, <5>,
+					<6>;
 			clock-output-names = "apb0_codec", "apb0_pio",
 					     "apb0_ir";
 		};
@@ -130,8 +142,12 @@
 			compatible = "allwinner,sun5i-a13-apb1-gates-clk";
 			reg = <0x01c2006c 0x4>;
 			clocks = <&apb1>;
+			clock-indices = <0>, <1>,
+					<2>, <17>,
+					<19>;
 			clock-output-names = "apb1_i2c0", "apb1_i2c1",
-				"apb1_i2c2", "apb1_uart1", "apb1_uart3";
+					     "apb1_i2c2", "apb1_uart1",
+					     "apb1_uart3";
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
index 54b0978..78b993a 100644
--- a/arch/arm/boot/dts/sun5i.dtsi
+++ b/arch/arm/boot/dts/sun5i.dtsi
@@ -178,6 +178,7 @@
 			compatible = "allwinner,sun4i-a10-axi-gates-clk";
 			reg = <0x01c2005c 0x4>;
 			clocks = <&axi>;
+			clock-indices = <0>;
 			clock-output-names = "axi_dram";
 		};
 
@@ -416,6 +417,19 @@
 			#size-cells = <0>;
 		};
 
+		usb_otg: usb@01c13000 {
+			compatible = "allwinner,sun4i-a10-musb";
+			reg = <0x01c13000 0x0400>;
+			clocks = <&ahb_gates 0>;
+			interrupts = <38>;
+			interrupt-names = "mc";
+			phys = <&usbphy 0>;
+			phy-names = "usb";
+			extcon = <&usbphy 0>;
+			allwinner,sram = <&otg_sram 1>;
+			status = "disabled";
+		};
+
 		usbphy: phy@01c13400 {
 			#phy-cells = <1>;
 			compatible = "allwinner,sun5i-a13-usb-phy";
@@ -475,8 +489,7 @@
 			clocks = <&apb0_gates 5>;
 			gpio-controller;
 			interrupt-controller;
-			#interrupt-cells = <2>;
-			#size-cells = <0>;
+			#interrupt-cells = <3>;
 			#gpio-cells = <3>;
 
 			i2c0_pins_a: i2c0@0 {
diff --git a/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts b/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
index 4404f37..4dd70cc 100644
--- a/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
+++ b/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
@@ -143,6 +143,11 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "host";
+	status = "okay";
+};
+
 &usbphy {
 	usb1_vbus-supply = <&reg_usb1_vbus>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 008047a..54bb83b 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -252,6 +252,20 @@
 			compatible = "allwinner,sun6i-a31-ahb1-gates-clk";
 			reg = <0x01c20060 0x8>;
 			clocks = <&ahb1>;
+			clock-indices = <1>, <5>,
+					<6>, <8>, <9>,
+					<10>, <11>, <12>,
+					<13>, <14>,
+					<17>, <18>, <19>,
+					<20>, <21>, <22>,
+					<23>, <24>, <26>,
+					<27>, <29>,
+					<30>, <31>, <32>,
+					<36>, <37>, <40>,
+					<43>, <44>, <45>,
+					<46>, <47>, <50>,
+					<52>, <55>, <56>,
+					<57>, <58>;
 			clock-output-names = "ahb1_mipidsi", "ahb1_ss",
 					"ahb1_dma", "ahb1_mmc0", "ahb1_mmc1",
 					"ahb1_mmc2", "ahb1_mmc3", "ahb1_nand1",
@@ -281,6 +295,9 @@
 			compatible = "allwinner,sun6i-a31-apb1-gates-clk";
 			reg = <0x01c20068 0x4>;
 			clocks = <&apb1>;
+			clock-indices = <0>, <4>,
+					<5>, <12>,
+					<13>;
 			clock-output-names = "apb1_codec", "apb1_digital_mic",
 					"apb1_pio", "apb1_daudio0",
 					"apb1_daudio1";
@@ -299,6 +316,10 @@
 			compatible = "allwinner,sun6i-a31-apb2-gates-clk";
 			reg = <0x01c2006c 0x4>;
 			clocks = <&apb2>;
+			clock-indices = <0>, <1>,
+					<2>, <3>, <16>,
+					<17>, <18>, <19>,
+					<20>, <21>;
 			clock-output-names = "apb2_i2c0", "apb2_i2c1",
 					     "apb2_i2c2", "apb2_i2c3",
 					     "apb2_uart0", "apb2_uart1",
@@ -346,6 +367,14 @@
 					     "mmc3_sample";
 		};
 
+		ss_clk: clk@01c2009c {
+			#clock-cells = <0>;
+			compatible = "allwinner,sun4i-a10-mod0-clk";
+			reg = <0x01c2009c 0x4>;
+			clocks = <&osc24M>, <&pll6 0>;
+			clock-output-names = "ss";
+		};
+
 		spi0_clk: clk@01c200a0 {
 			#clock-cells = <0>;
 			compatible = "allwinner,sun4i-a10-mod0-clk";
@@ -384,6 +413,9 @@
 			compatible = "allwinner,sun6i-a31-usb-clk";
 			reg = <0x01c200cc 0x4>;
 			clocks = <&osc24M>;
+			clock-indices = <8>, <9>, <10>,
+					<16>, <17>,
+					<18>;
 			clock-output-names = "usb_phy0", "usb_phy1", "usb_phy2",
 					     "usb_ohci0", "usb_ohci1",
 					     "usb_ohci2";
@@ -512,6 +544,19 @@
 			#size-cells = <0>;
 		};
 
+		usb_otg: usb@01c19000 {
+			compatible = "allwinner,sun6i-a31-musb";
+			reg = <0x01c19000 0x0400>;
+			clocks = <&ahb1_gates 24>;
+			resets = <&ahb1_rst 24>;
+			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "mc";
+			phys = <&usbphy 0>;
+			phy-names = "usb";
+			extcon = <&usbphy 0>;
+			status = "disabled";
+		};
+
 		usbphy: phy@01c19400 {
 			compatible = "allwinner,sun6i-a31-usb-phy";
 			reg = <0x01c19400 0x10>,
@@ -599,8 +644,7 @@
 			clocks = <&apb1_gates 5>;
 			gpio-controller;
 			interrupt-controller;
-			#interrupt-cells = <2>;
-			#size-cells = <0>;
+			#interrupt-cells = <3>;
 			#gpio-cells = <3>;
 
 			uart0_pins_a: uart0@0 {
@@ -870,6 +914,16 @@
 			#size-cells = <0>;
 		};
 
+		crypto: crypto-engine@01c15000 {
+			compatible = "allwinner,sun4i-a10-crypto";
+			reg = <0x01c15000 0x1000>;
+			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ahb1_gates 5>, <&ss_clk>;
+			clock-names = "ahb", "mod";
+			resets = <&ahb1_rst 5>;
+			reset-names = "ahb";
+		};
+
 		timer@01c60000 {
 			compatible = "allwinner,sun6i-a31-hstimer",
 				     "allwinner,sun7i-a20-hstimer";
diff --git a/arch/arm/boot/dts/sun6i-a31s-cs908.dts b/arch/arm/boot/dts/sun6i-a31s-cs908.dts
index 1e2411a..5e8f8c4 100644
--- a/arch/arm/boot/dts/sun6i-a31s-cs908.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-cs908.dts
@@ -93,6 +93,11 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "host";
+	status = "okay";
+};
+
 &usbphy {
 	status = "okay";
 };
diff --git a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
index 4611e2f..e6b0192 100644
--- a/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
+++ b/arch/arm/boot/dts/sun7i-a20-cubietruck.dts
@@ -88,15 +88,11 @@
 		};
 	};
 
-	reg_vmmc3: vmmc3 {
-		compatible = "regulator-fixed";
+	mmc3_pwrseq: mmc3_pwrseq {
+		compatible = "mmc-pwrseq-simple";
 		pinctrl-names = "default";
-		pinctrl-0 = <&vmmc3_pin_cubietruck>;
-		regulator-name = "vmmc3";
-		regulator-min-microvolt = <3300000>;
-		regulator-max-microvolt = <3300000>;
-		enable-active-high;
-		gpio = <&pio 7 9 GPIO_ACTIVE_HIGH>;
+		pinctrl-0 = <&mmc3_pwrseq_pin_cubietruck>;
+		reset-gpios = <&pio 7 9 GPIO_ACTIVE_LOW>; /* PH9 WIFI_EN */
 	};
 };
 
@@ -172,7 +168,8 @@
 &mmc3 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&mmc3_pins_a>;
-	vmmc-supply = <&reg_vmmc3>;
+	vmmc-supply = <&reg_vcc3v3>;
+	mmc-pwrseq = <&mmc3_pwrseq>;
 	bus-width = <4>;
 	non-removable;
 	status = "okay";
@@ -181,7 +178,7 @@
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&pio>;
-		interrupts = <10 IRQ_TYPE_LEVEL_LOW>; /* PH10 / EINT10 */
+		interrupts = <7 10 IRQ_TYPE_LEVEL_LOW>; /* PH10 / EINT10 */
 		interrupt-names = "host-wake";
 	};
 };
@@ -199,14 +196,11 @@
 	status = "okay";
 };
 
-&pio {
-	vmmc3_pin_cubietruck: vmmc3_pin@0 {
-		allwinner,pins = "PH9";
-		allwinner,function = "gpio_out";
-		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
-		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
-	};
+&otg_sram {
+	status = "okay";
+};
 
+&pio {
 	ahci_pwr_pin_cubietruck: ahci_pwr_pin@1 {
 		allwinner,pins = "PH12";
 		allwinner,function = "gpio_out";
@@ -221,12 +215,33 @@
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
 
+	mmc3_pwrseq_pin_cubietruck: mmc3_pwrseq_pin@0 {
+		allwinner,pins = "PH9";
+		allwinner,function = "gpio_out";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
 	usb0_vbus_pin_a: usb0_vbus_pin@0 {
 		allwinner,pins = "PH17";
 		allwinner,function = "gpio_out";
 		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
+
+	usb0_id_detect_pin: usb0_id_detect_pin@0 {
+		allwinner,pins = "PH19";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
+
+	usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
+		allwinner,pins = "PH22";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+	};
 };
 
 &pwm {
@@ -288,7 +303,16 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
 &usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
+	usb0_id_det-gpios = <&pio 7 19 GPIO_ACTIVE_HIGH>; /* PH19 */
+	usb0_vbus_det-gpios = <&pio 7 22 GPIO_ACTIVE_HIGH>; /* PH22 */
 	usb0_vbus-supply = <&reg_usb0_vbus>;
 	usb1_vbus-supply = <&reg_usb1_vbus>;
 	usb2_vbus-supply = <&reg_usb2_vbus>;
diff --git a/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts b/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts
index f32f6f2..1e6bd36 100644
--- a/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts
+++ b/arch/arm/boot/dts/sun7i-a20-i12-tvbox.dts
@@ -178,7 +178,7 @@
 		reg = <1>;
 		compatible = "brcm,bcm4329-fmac";
 		interrupt-parent = <&pio>;
-		interrupts = <10 IRQ_TYPE_LEVEL_LOW>; /* PH10 / EINT10 */
+		interrupts = <7 10 IRQ_TYPE_LEVEL_LOW>; /* PH10 / EINT10 */
 		interrupt-names = "host-wake";
 	};
 };
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
index 769726d..0423708 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime.dts
@@ -135,6 +135,10 @@
 	status = "okay";
 };
 
+&otg_sram {
+	status = "okay";
+};
+
 &pio {
 	ahci_pwr_pin_olinuxinolime: ahci_pwr_pin@1 {
 		allwinner,pins = "PC3";
@@ -149,6 +153,20 @@
 		allwinner,drive = <SUN4I_PINCTRL_20_MA>;
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
+
+	usb0_id_detect_pin: usb0_id_detect_pin@0 {
+		allwinner,pins = "PH4";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+
+	usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
+		allwinner,pins = "PH5";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_DOWN>;
+	};
 };
 
 &reg_ahci_5v {
@@ -157,6 +175,10 @@
 	status = "okay";
 };
 
+&reg_usb0_vbus {
+	status = "okay";
+};
+
 &reg_usb1_vbus {
 	status = "okay";
 };
@@ -171,7 +193,17 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
 &usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
+	usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+	usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
 	usb1_vbus-supply = <&reg_usb1_vbus>;
 	usb2_vbus-supply = <&reg_usb2_vbus>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
index 00f8f25..c5d70ca 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
@@ -215,6 +215,10 @@
 	status = "okay";
 };
 
+&otg_sram {
+	status = "okay";
+};
+
 &pio {
 	mmc3_cd_pin_olinuxinom: mmc3_cd_pin@0 {
 		allwinner,pins = "PH11";
@@ -229,12 +233,30 @@
 		allwinner,drive = <SUN4I_PINCTRL_20_MA>;
 		allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
 	};
+
+	usb0_id_detect_pin: usb0_id_detect_pin@0 {
+		allwinner,pins = "PH4";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+
+	usb0_vbus_detect_pin: usb0_vbus_detect_pin@0 {
+		allwinner,pins = "PH5";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_DOWN>;
+	};
 };
 
 &reg_ahci_5v {
 	status = "okay";
 };
 
+&reg_usb0_vbus {
+	status = "okay";
+};
+
 &reg_usb1_vbus {
 	status = "okay";
 };
@@ -275,7 +297,17 @@
 	status = "okay";
 };
 
+&usb_otg {
+	dr_mode = "otg";
+	status = "okay";
+};
+
 &usbphy {
+	pinctrl-names = "default";
+	pinctrl-0 = <&usb0_id_detect_pin>, <&usb0_vbus_detect_pin>;
+	usb0_id_det-gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+	usb0_vbus_det-gpio = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+	usb0_vbus-supply = <&reg_usb0_vbus>;
 	usb1_vbus-supply = <&reg_usb1_vbus>;
 	usb2_vbus-supply = <&reg_usb2_vbus>;
 	status = "okay";
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 6a63f30..2bebaa2 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -267,6 +267,19 @@
 			compatible = "allwinner,sun7i-a20-ahb-gates-clk";
 			reg = <0x01c20060 0x8>;
 			clocks = <&ahb>;
+			clock-indices = <0>, <1>,
+					<2>, <3>, <4>,
+					<5>, <6>, <7>, <8>,
+					<9>, <10>, <11>, <12>,
+					<13>, <14>, <16>,
+					<17>, <18>, <20>, <21>,
+					<22>, <23>, <25>,
+					<28>, <32>, <33>, <34>,
+					<35>, <36>, <37>, <40>,
+					<41>, <42>, <43>,
+					<44>, <45>, <46>,
+					<47>, <49>, <50>,
+					<52>;
 			clock-output-names = "ahb_usb0", "ahb_ehci0",
 				"ahb_ohci0", "ahb_ehci1", "ahb_ohci1",
 				"ahb_ss", "ahb_dma", "ahb_bist", "ahb_mmc0",
@@ -295,6 +308,10 @@
 			compatible = "allwinner,sun7i-a20-apb0-gates-clk";
 			reg = <0x01c20068 0x4>;
 			clocks = <&apb0>;
+			clock-indices = <0>, <1>,
+					<2>, <3>, <4>,
+					<5>, <6>, <7>,
+					<8>, <10>;
 			clock-output-names = "apb0_codec", "apb0_spdif",
 				"apb0_ac97", "apb0_iis0", "apb0_iis1",
 				"apb0_pio", "apb0_ir0", "apb0_ir1",
@@ -314,6 +331,12 @@
 			compatible = "allwinner,sun7i-a20-apb1-gates-clk";
 			reg = <0x01c2006c 0x4>;
 			clocks = <&apb1>;
+			clock-indices = <0>, <1>,
+					<2>, <3>, <4>,
+					<5>, <6>, <7>,
+					<15>, <16>, <17>,
+					<18>, <19>, <20>,
+					<21>, <22>, <23>;
 			clock-output-names = "apb1_i2c0", "apb1_i2c1",
 				"apb1_i2c2", "apb1_i2c3", "apb1_can",
 				"apb1_scr", "apb1_ps20", "apb1_ps21",
@@ -699,6 +722,19 @@
 			#size-cells = <0>;
 		};
 
+		usb_otg: usb@01c13000 {
+			compatible = "allwinner,sun4i-a10-musb";
+			reg = <0x01c13000 0x0400>;
+			clocks = <&ahb_gates 0>;
+			interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "mc";
+			phys = <&usbphy 0>;
+			phy-names = "usb";
+			extcon = <&usbphy 0>;
+			allwinner,sram = <&otg_sram 1>;
+			status = "disabled";
+		};
+
 		usbphy: phy@01c13400 {
 			#phy-cells = <1>;
 			compatible = "allwinner,sun7i-a20-usb-phy";
@@ -731,6 +767,14 @@
 			status = "disabled";
 		};
 
+		crypto: crypto-engine@01c15000 {
+			compatible = "allwinner,sun4i-a10-crypto";
+			reg = <0x01c15000 0x1000>;
+			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ahb_gates 5>, <&ss_clk>;
+			clock-names = "ahb", "mod";
+		};
+
 		spi2: spi@01c17000 {
 			compatible = "allwinner,sun4i-a10-spi";
 			reg = <0x01c17000 0x1000>;
@@ -794,8 +838,7 @@
 			clocks = <&apb0_gates 5>;
 			gpio-controller;
 			interrupt-controller;
-			#interrupt-cells = <2>;
-			#size-cells = <0>;
+			#interrupt-cells = <3>;
 			#gpio-cells = <3>;
 
 			pwm0_pins_a: pwm0@0 {
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index 7abd0ae..27a925e 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -180,6 +180,15 @@
 			compatible = "allwinner,sun8i-a23-ahb1-gates-clk";
 			reg = <0x01c20060 0x8>;
 			clocks = <&ahb1>;
+			clock-indices = <1>, <6>,
+					<8>, <9>, <10>,
+					<13>, <14>,
+					<19>, <20>,
+					<21>, <24>, <26>,
+					<29>, <32>, <36>,
+					<40>, <44>, <46>,
+					<52>, <54>,
+					<57>;
 			clock-output-names = "ahb1_mipidsi", "ahb1_dma",
 					"ahb1_mmc0", "ahb1_mmc1", "ahb1_mmc2",
 					"ahb1_nand", "ahb1_sdram",
@@ -196,6 +205,8 @@
 			compatible = "allwinner,sun8i-a23-apb1-gates-clk";
 			reg = <0x01c20068 0x4>;
 			clocks = <&apb1>;
+			clock-indices = <0>, <5>,
+					<12>, <13>;
 			clock-output-names = "apb1_codec", "apb1_pio",
 					"apb1_daudio0",	"apb1_daudio1";
 		};
@@ -213,6 +224,10 @@
 			compatible = "allwinner,sun8i-a23-apb2-gates-clk";
 			reg = <0x01c2006c 0x4>;
 			clocks = <&apb2>;
+			clock-indices = <0>, <1>,
+					<2>, <16>,
+					<17>, <18>,
+					<19>, <20>;
 			clock-output-names = "apb2_i2c0", "apb2_i2c1",
 					"apb2_i2c2", "apb2_uart0",
 					"apb2_uart1", "apb2_uart2",
@@ -332,6 +347,28 @@
 			#size-cells = <0>;
 		};
 
+		ehci0: usb@01c1a000 {
+			compatible = "allwinner,sun8i-a23-ehci", "generic-ehci";
+			reg = <0x01c1a000 0x100>;
+			interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ahb1_gates 26>;
+			resets = <&ahb1_rst 26>;
+			phys = <&usbphy 1>;
+			phy-names = "usb";
+			status = "disabled";
+		};
+
+		ohci0: usb@01c1a400 {
+			compatible = "allwinner,sun8i-a23-ohci", "generic-ohci";
+			reg = <0x01c1a400 0x100>;
+			interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&ahb1_gates 29>, <&usb_clk 16>;
+			resets = <&ahb1_rst 29>;
+			phys = <&usbphy 1>;
+			phy-names = "usb";
+			status = "disabled";
+		};
+
 		pio: pinctrl@01c20800 {
 			/* compatible gets set in SoC specific dtsi file */
 			reg = <0x01c20800 0x400>;
@@ -339,8 +376,7 @@
 			clocks = <&apb1_gates 5>;
 			gpio-controller;
 			interrupt-controller;
-			#address-cells = <1>;
-			#size-cells = <0>;
+			#interrupt-cells = <3>;
 			#gpio-cells = <3>;
 
 			uart0_pins_a: uart0@0 {
diff --git a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
index 95134c6..8d9da68 100644
--- a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
+++ b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
@@ -125,3 +125,12 @@
 	pinctrl-0 = <&r_uart_pins_a>;
 	status = "okay";
 };
+
+&usb_otg {
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usbphy {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi
index 8698f7a..2cc27c7 100644
--- a/arch/arm/boot/dts/sun8i-a23.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23.dtsi
@@ -58,6 +58,39 @@
 			clock-output-names = "mbus";
 		};
 	};
+
+	soc@01c00000 {
+		usb_otg: usb@01c19000 {
+			compatible = "allwinner,sun6i-a31-musb";
+			reg = <0x01c19000 0x0400>;
+			clocks = <&ahb1_gates 24>;
+			resets = <&ahb1_rst 24>;
+			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "mc";
+			phys = <&usbphy 0>;
+			phy-names = "usb";
+			extcon = <&usbphy 0>;
+			status = "disabled";
+		};
+
+		usbphy: phy@01c19400 {
+			compatible = "allwinner,sun8i-a23-usb-phy";
+			reg = <0x01c19400 0x10>,
+			      <0x01c1a800 0x4>;
+			reg-names = "phy_ctrl",
+				    "pmu1";
+			clocks = <&usb_clk 8>,
+				 <&usb_clk 9>;
+			clock-names = "usb0_phy",
+				      "usb1_phy";
+			resets = <&usb_clk 0>,
+				 <&usb_clk 1>;
+			reset-names = "usb0_reset",
+				      "usb1_reset";
+			status = "disabled";
+			#phy-cells = <1>;
+		};
+	};
 };
 
 &pio {
diff --git a/arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts b/arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts
index 8667033..1aefc67 100644
--- a/arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts
+++ b/arch/arm/boot/dts/sun8i-a33-ga10h-v1.1.dts
@@ -61,6 +61,10 @@
 	};
 };
 
+&ehci0 {
+	status = "okay";
+};
+
 &i2c0 {
 	pinctrl-names = "default";
 	pinctrl-0 = <&i2c0_pins_a>;
@@ -109,6 +113,10 @@
 	status = "okay";
 };
 
+&ohci0 {
+	status = "okay";
+};
+
 &pio {
 	mmc0_cd_pin_q8h: mmc0_cd_pin@0 {
 		allwinner,pins = "PB4";
@@ -123,3 +131,12 @@
 	pinctrl-0 = <&r_uart_pins_a>;
 	status = "okay";
 };
+
+&usb_otg {
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usbphy {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-a33-ippo-q8h-v1.2.dts b/arch/arm/boot/dts/sun8i-a33-ippo-q8h-v1.2.dts
new file mode 100644
index 0000000..a438975
--- /dev/null
+++ b/arch/arm/boot/dts/sun8i-a33-ippo-q8h-v1.2.dts
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2015 Hans de Goede <hdegoede@redhat.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-a33.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+	model = "Ippo Q8H Quad Core Tablet (v1.2)";
+	compatible = "ippo,a33-q8h-v1.2", "allwinner,sun8i-a33";
+
+	aliases {
+		serial0 = &r_uart;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+};
+
+&i2c0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c0_pins_a>;
+	status = "okay";
+};
+
+&i2c1 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&i2c1_pins_a>;
+	status = "okay";
+};
+
+&lradc {
+	vref-supply = <&reg_vcc3v0>;
+	status = "okay";
+
+	button@200 {
+		label = "Volume Up";
+		linux,code = <KEY_VOLUMEUP>;
+		channel = <0>;
+		voltage = <200000>;
+	};
+
+	button@400 {
+		label = "Volume Down";
+		linux,code = <KEY_VOLUMEDOWN>;
+		channel = <0>;
+		voltage = <400000>;
+	};
+};
+
+&mmc0 {
+	pinctrl-names = "default";
+	pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_q8h>;
+	vmmc-supply = <&reg_vcc3v0>;
+	bus-width = <4>;
+	cd-gpios = <&pio 1 4 GPIO_ACTIVE_HIGH>; /* PB4 */
+	cd-inverted;
+	status = "okay";
+};
+
+&pio {
+	mmc0_cd_pin_q8h: mmc0_cd_pin@0 {
+		allwinner,pins = "PB4";
+		allwinner,function = "gpio_in";
+		allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+		allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
+	};
+};
+
+&r_uart {
+	pinctrl-names = "default";
+	pinctrl-0 = <&r_uart_pins_a>;
+	status = "okay";
+};
+
+/*
+ * FIXME for now we only support host mode and rely on u-boot to have
+ * turned on Vbus which is controlled by the axp223 pmic on the board.
+ *
+ * Once we have axp223 support we should switch to fully supporting otg.
+ */
+&usb_otg {
+	dr_mode = "host";
+	status = "okay";
+};
+
+&usbphy {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
index 5788c29..1d5390d 100644
--- a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
+++ b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
@@ -63,6 +63,10 @@
 	};
 };
 
+&ehci0 {
+	status = "okay";
+};
+
 &lradc {
 	vref-supply = <&reg_vcc3v0>;
 	status = "okay";
@@ -113,6 +117,10 @@
 	allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
 };
 
+&ohci0 {
+	status = "okay";
+};
+
 &pio {
 	mmc0_cd_pin_sina33: mmc0_cd_pin@0 {
 		allwinner,pins = "PB4";
@@ -127,3 +135,8 @@
 	pinctrl-0 = <&uart0_pins_b>;
 	status = "okay";
 };
+
+&usbphy {
+	status = "okay";
+	usb1_vbus-supply = <&reg_vcc5v0>; /* USB1 VBUS is always on */
+};
diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi
index 85ee080..faa7d3c 100644
--- a/arch/arm/boot/dts/sun8i-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a33.dtsi
@@ -80,6 +80,39 @@
 			clock-output-names = "mbus";
 		};
 	};
+
+	soc@01c00000 {
+		usb_otg: usb@01c19000 {
+			compatible = "allwinner,sun8i-a33-musb";
+			reg = <0x01c19000 0x0400>;
+			clocks = <&ahb1_gates 24>;
+			resets = <&ahb1_rst 24>;
+			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "mc";
+			phys = <&usbphy 0>;
+			phy-names = "usb";
+			extcon = <&usbphy 0>;
+			status = "disabled";
+		};
+
+		usbphy: phy@01c19400 {
+			compatible = "allwinner,sun8i-a33-usb-phy";
+			reg = <0x01c19400 0x14>,
+			      <0x01c1a800 0x4>;
+			reg-names = "phy_ctrl",
+				    "pmu1";
+			clocks = <&usb_clk 8>,
+				 <&usb_clk 9>;
+			clock-names = "usb0_phy",
+				      "usb1_phy";
+			resets = <&usb_clk 0>,
+				 <&usb_clk 1>;
+			reset-names = "usb0_reset",
+				      "usb1_reset";
+			status = "disabled";
+			#phy-cells = <1>;
+		};
+	};
 };
 
 &pio {
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index a43ad77..5908e3d 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -277,9 +277,12 @@
 			compatible = "allwinner,sun9i-a80-ahb0-gates-clk";
 			reg = <0x06000580 0x4>;
 			clocks = <&ahb0>;
-			clock-indices = <0>, <1>, <3>, <5>, <8>, <12>, <13>,
-					<14>, <15>, <16>, <18>, <20>, <21>,
-					<22>, <23>;
+			clock-indices = <0>, <1>, <3>,
+					<5>, <8>, <12>,
+					<13>, <14>,
+					<15>, <16>, <18>,
+					<20>, <21>, <22>,
+					<23>;
 			clock-output-names = "ahb0_fd", "ahb0_ve", "ahb0_gpu",
 					"ahb0_ss", "ahb0_sd", "ahb0_nand1",
 					"ahb0_nand0", "ahb0_sdram",
@@ -293,7 +296,10 @@
 			compatible = "allwinner,sun9i-a80-ahb1-gates-clk";
 			reg = <0x06000584 0x4>;
 			clocks = <&ahb1>;
-			clock-indices = <0>, <1>, <17>, <21>, <22>, <23>, <24>;
+			clock-indices = <0>, <1>,
+					<17>, <21>,
+					<22>, <23>,
+					<24>;
 			clock-output-names = "ahb1_usbotg", "ahb1_usbhci",
 					"ahb1_gmac", "ahb1_msgbox",
 					"ahb1_spinlock", "ahb1_hstimer",
@@ -305,8 +311,9 @@
 			compatible = "allwinner,sun9i-a80-ahb2-gates-clk";
 			reg = <0x06000588 0x4>;
 			clocks = <&ahb2>;
-			clock-indices = <0>, <1>, <2>, <4>, <5>, <7>, <8>,
-					<11>;
+			clock-indices = <0>, <1>,
+					<2>, <4>, <5>,
+					<7>, <8>, <11>;
 			clock-output-names = "ahb2_lcd0", "ahb2_lcd1",
 					"ahb2_edp", "ahb2_csi", "ahb2_hdmi",
 					"ahb2_de", "ahb2_mp", "ahb2_mipi_dsi";
@@ -317,8 +324,10 @@
 			compatible = "allwinner,sun9i-a80-apb0-gates-clk";
 			reg = <0x06000590 0x4>;
 			clocks = <&apb0>;
-			clock-indices = <1>, <5>, <11>, <12>, <13>, <15>,
-					<17>, <18>, <19>;
+			clock-indices = <1>, <5>,
+					<11>, <12>, <13>,
+					<15>, <17>, <18>,
+					<19>;
 			clock-output-names = "apb0_spdif", "apb0_pio",
 					"apb0_ac97", "apb0_i2s0", "apb0_i2s1",
 					"apb0_lradc", "apb0_gpadc", "apb0_twd",
@@ -330,8 +339,11 @@
 			compatible = "allwinner,sun9i-a80-apb1-gates-clk";
 			reg = <0x06000594 0x4>;
 			clocks = <&apb1>;
-			clock-indices = <0>, <1>, <2>, <3>, <4>,
-					<16>, <17>, <18>, <19>, <20>, <21>;
+			clock-indices = <0>, <1>,
+					<2>, <3>, <4>,
+					<16>, <17>,
+					<18>, <19>,
+					<20>, <21>;
 			clock-output-names = "apb1_i2c0", "apb1_i2c1",
 					"apb1_i2c2", "apb1_i2c3", "apb1_i2c4",
 					"apb1_uart0", "apb1_uart1",
diff --git a/arch/arm/boot/dts/sunxi-common-regulators.dtsi b/arch/arm/boot/dts/sunxi-common-regulators.dtsi
index 51cc838..f1953b0 100644
--- a/arch/arm/boot/dts/sunxi-common-regulators.dtsi
+++ b/arch/arm/boot/dts/sunxi-common-regulators.dtsi
@@ -108,6 +108,7 @@
 		regulator-name = "usb1-vbus";
 		regulator-min-microvolt = <5000000>;
 		regulator-max-microvolt = <5000000>;
+		regulator-boot-on;
 		enable-active-high;
 		gpio = <&pio 7 6 GPIO_ACTIVE_HIGH>;
 		status = "disabled";
@@ -120,6 +121,7 @@
 		regulator-name = "usb2-vbus";
 		regulator-min-microvolt = <5000000>;
 		regulator-max-microvolt = <5000000>;
+		regulator-boot-on;
 		enable-active-high;
 		gpio = <&pio 7 3 GPIO_ACTIVE_HIGH>;
 		status = "disabled";
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index f58a3d9..9d4f86e 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -214,9 +214,9 @@
 		#dma-cells = <1>;
 	};
 
-	ahb: ahb@6000c004 {
+	ahb: ahb@6000c000 {
 		compatible = "nvidia,tegra114-ahb", "nvidia,tegra30-ahb";
-		reg = <0x6000c004 0x14c>;
+		reg = <0x6000c000 0x150>;
 	};
 
 	gpio: gpio@6000d000 {
@@ -234,6 +234,7 @@
 		gpio-controller;
 		#interrupt-cells = <2>;
 		interrupt-controller;
+		gpio-ranges = <&pinmux 0 0 246>;
 	};
 
 	apbmisc@70000800 {
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index bd43ed6..66b4451 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -53,6 +53,14 @@
 		};
 	};
 
+	gpu@0,57000000 {
+		/*
+		 * Node left disabled on purpose - the bootloader will enable
+		 * it after having set the VPR up
+		 */
+		vdd-supply = <&vdd_gpu>;
+	};
+
 	pinmux: pinmux@0,70000868 {
 		pinctrl-names = "boot";
 		pinctrl-0 = <&state_boot>;
@@ -1462,7 +1470,7 @@
 				vin-ldo9-10-supply = <&vdd_5v0_sys>;
 				vin-ldo11-supply = <&vdd_3v3_run>;
 
-				sd0 {
+				vdd_cpu: sd0 {
 					regulator-name = "+VDD_CPU_AP";
 					regulator-min-microvolt = <700000>;
 					regulator-max-microvolt = <1400000>;
@@ -1514,7 +1522,7 @@
 					regulator-always-on;
 				};
 
-				sd6 {
+				vdd_gpu: sd6 {
 					regulator-name = "+VDD_GPU_AP";
 					regulator-min-microvolt = <650000>;
 					regulator-max-microvolt = <1200000>;
@@ -1694,6 +1702,13 @@
 		non-removable;
 	};
 
+	/* CPU DFLL clock */
+	clock@0,70110000 {
+		status = "okay";
+		vdd-cpu-supply = <&vdd_cpu>;
+		nvidia,i2c-fs-rate = <400000>;
+	};
+
 	ahub@0,70300000 {
 		i2s@0,70301100 {
 			status = "okay";
@@ -1732,6 +1747,12 @@
 		};
 	};
 
+	cpus {
+		cpu@0 {
+			vdd-cpu-supply = <&vdd_cpu>;
+		};
+	};
+
 	gpio-keys {
 		compatible = "gpio-keys";
 
diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts
index 79e724b..cfbdf42 100644
--- a/arch/arm/boot/dts/tegra124-venice2.dts
+++ b/arch/arm/boot/dts/tegra124-venice2.dts
@@ -43,6 +43,14 @@
 		};
 	};
 
+	gpu@0,57000000 {
+		/*
+		 * Node left disabled on purpose - the bootloader will enable
+		 * it after having set the VPR up
+		 */
+		vdd-supply = <&vdd_gpu>;
+	};
+
 	pinmux: pinmux@0,70000868 {
 		pinctrl-names = "boot";
 		pinctrl-0 = <&pinmux_boot>;
@@ -735,7 +743,7 @@
 					regulator-always-on;
 				};
 
-				sd6 {
+				vdd_gpu: sd6 {
 					regulator-name = "+VDD_GPU_AP";
 					regulator-min-microvolt = <650000>;
 					regulator-max-microvolt = <1200000>;
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index 01a9f74..1e204a6 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -4,6 +4,7 @@
 #include <dt-bindings/pinctrl/pinctrl-tegra.h>
 #include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/reset/tegra124-car.h>
 #include <dt-bindings/thermal/tegra124-soctherm.h>
 
 #include "skeleton.dtsi"
@@ -188,6 +189,9 @@
 		clock-names = "gpu", "pwr";
 		resets = <&tegra_car 184>;
 		reset-names = "gpu";
+
+		iommus = <&mc TEGRA_SWGROUP_GPU>;
+
 		status = "disabled";
 	};
 
@@ -254,6 +258,7 @@
 		gpio-controller;
 		#interrupt-cells = <2>;
 		interrupt-controller;
+		gpio-ranges = <&pinmux 0 0 251>;
 	};
 
 	apbdma: dma@0,60020000 {
@@ -702,6 +707,30 @@
 		#thermal-sensor-cells = <1>;
 	};
 
+	dfll: clock@0,70110000 {
+		compatible = "nvidia,tegra124-dfll";
+		reg = <0 0x70110000 0 0x100>, /* DFLL control */
+		      <0 0x70110000 0 0x100>, /* I2C output control */
+		      <0 0x70110100 0 0x100>, /* Integrated I2C controller */
+		      <0 0x70110200 0 0x100>; /* Look-up table RAM */
+		interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&tegra_car TEGRA124_CLK_DFLL_SOC>,
+			 <&tegra_car TEGRA124_CLK_DFLL_REF>,
+			 <&tegra_car TEGRA124_CLK_I2C5>;
+		clock-names = "soc", "ref", "i2c";
+		resets = <&tegra_car TEGRA124_RST_DFLL_DVCO>;
+		reset-names = "dvco";
+		#clock-cells = <0>;
+		clock-output-names = "dfllCPU_out";
+		nvidia,sample-rate = <12500>;
+		nvidia,droop-ctrl = <0x00000f00>;
+		nvidia,force-mode = <1>;
+		nvidia,cf = <10>;
+		nvidia,ci = <0>;
+		nvidia,cg = <2>;
+		status = "disabled";
+	};
+
 	ahub@0,70300000 {
 		compatible = "nvidia,tegra124-ahub";
 		reg = <0x0 0x70300000 0x0 0x200>,
@@ -922,6 +951,15 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a15";
 			reg = <0>;
+
+			clocks = <&tegra_car TEGRA124_CLK_CCLK_G>,
+				 <&tegra_car TEGRA124_CLK_CCLK_LP>,
+				 <&tegra_car TEGRA124_CLK_PLL_X>,
+				 <&tegra_car TEGRA124_CLK_PLL_P>,
+				 <&dfll>;
+			clock-names = "cpu_g", "cpu_lp", "pll_x", "pll_p", "dfll";
+			/* FIXME: what's the actual transition time? */
+			clock-latency = <300000>;
 		};
 
 		cpu@1 {
@@ -943,6 +981,18 @@
 		};
 	};
 
+	pmu {
+		compatible = "arm,cortex-a15-pmu";
+		interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&{/cpus/cpu@0}>,
+				     <&{/cpus/cpu@1}>,
+				     <&{/cpus/cpu@2}>,
+				     <&{/cpus/cpu@3}>;
+	};
+
 	thermal-zones {
 		cpu {
 			polling-delay-passive = <1000>;
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index f444b67..e058709 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -225,9 +225,9 @@
 		#dma-cells = <1>;
 	};
 
-	ahb@6000c004 {
+	ahb@6000c000 {
 		compatible = "nvidia,tegra20-ahb";
-		reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */
+		reg = <0x6000c000 0x110>; /* AHB Arbitration + Gizmo Controller */
 	};
 
 	gpio: gpio@6000d000 {
@@ -244,6 +244,7 @@
 		gpio-controller;
 		#interrupt-cells = <2>;
 		interrupt-controller;
+		gpio-ranges = <&pinmux 0 0 224>;
 	};
 
 	apbmisc@70000800 {
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 782b11b..fe04fb5 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -329,9 +329,9 @@
 		#dma-cells = <1>;
 	};
 
-	ahb: ahb@6000c004 {
+	ahb: ahb@6000c000 {
 		compatible = "nvidia,tegra30-ahb";
-		reg = <0x6000c004 0x14c>; /* AHB Arbitration + Gizmo Controller */
+		reg = <0x6000c000 0x150>; /* AHB Arbitration + Gizmo Controller */
 	};
 
 	gpio: gpio@6000d000 {
@@ -349,6 +349,7 @@
 		gpio-controller;
 		#interrupt-cells = <2>;
 		interrupt-controller;
+		gpio-ranges = <&pinmux 0 0 248>;
 	};
 
 	apbmisc@70000800 {
diff --git a/arch/arm/boot/dts/uniphier-ph1-ld4-ref.dts b/arch/arm/boot/dts/uniphier-ph1-ld4-ref.dts
index 200b0c9..bfd3bb8 100644
--- a/arch/arm/boot/dts/uniphier-ph1-ld4-ref.dts
+++ b/arch/arm/boot/dts/uniphier-ph1-ld4-ref.dts
@@ -44,6 +44,7 @@
 
 /dts-v1/;
 /include/ "uniphier-ph1-ld4.dtsi"
+/include/ "uniphier-ref-daughter.dtsi"
 /include/ "uniphier-support-card.dtsi"
 
 / {
@@ -57,11 +58,18 @@
 
 	chosen {
 		bootargs = "console=ttyS0,115200";
-		stdout-path = &serialsc;
+		stdout-path = &serial0;
 	};
 
 	aliases {
-		serial0 = &serialsc;
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		serial3 = &serial3;
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
 	};
 };
 
@@ -74,6 +82,30 @@
 	ranges = <0x00000000 1 0x03f00000 0x00100000>;
 };
 
-&serialsc {
+&ethsc {
 	interrupts = <0 49 4>;
 };
+
+&serial0 {
+	status = "okay";
+};
+
+&serial2 {
+	status = "okay";
+};
+
+&serial3 {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/uniphier-ph1-ld4.dtsi b/arch/arm/boot/dts/uniphier-ph1-ld4.dtsi
index 6a34c56..a6a185f 100644
--- a/arch/arm/boot/dts/uniphier-ph1-ld4.dtsi
+++ b/arch/arm/boot/dts/uniphier-ph1-ld4.dtsi
@@ -64,6 +64,18 @@
 			compatible = "fixed-clock";
 			clock-frequency = <50000000>;
 		};
+
+		uart_clk: uart_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <36864000>;
+		};
+
+		iobus_clk: iobus_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <100000000>;
+		};
 	};
 
 	soc {
@@ -79,12 +91,141 @@
 			#size-cells = <1>;
 		};
 
+		serial0: serial@54006800 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006800 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart0>;
+			interrupts = <0 33 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		serial1: serial@54006900 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006900 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart1>;
+			interrupts = <0 35 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		serial2: serial@54006a00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006a00 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart2>;
+			interrupts = <0 37 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		serial3: serial@54006b00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006b00 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart3>;
+			interrupts = <0 29 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		i2c0: i2c@58400000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58400000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c0>;
+			interrupts = <0 41 1>;
+			clocks = <&iobus_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c1: i2c@58480000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58480000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c1>;
+			interrupts = <0 42 1>;
+			clocks = <&iobus_clk>;
+			clock-frequency = <100000>;
+		};
+
+		/* chip-internal connection for DMD */
+		i2c2: i2c@58500000 {
+			compatible = "socionext,uniphier-i2c";
+			reg = <0x58500000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c2>;
+			interrupts = <0 43 1>;
+			clocks = <&iobus_clk>;
+			clock-frequency = <400000>;
+		};
+
+		i2c3: i2c@58580000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58580000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c3>;
+			interrupts = <0 44 1>;
+			clocks = <&iobus_clk>;
+			clock-frequency = <100000>;
+		};
+
 		system-bus-controller-misc@59800000 {
 			compatible = "socionext,uniphier-system-bus-controller-misc",
 				     "syscon";
 			reg = <0x59800000 0x2000>;
 		};
 
+		usb0: usb@5a800100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a800100 0x100>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb0>;
+			interrupts = <0 80 4>;
+		};
+
+		usb1: usb@5a810100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a810100 0x100>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb1>;
+			interrupts = <0 81 4>;
+		};
+
+		usb2: usb@5a820100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a820100 0x100>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb2>;
+			interrupts = <0 82 4>;
+		};
+
+		pinctrl: pinctrl@5f801000 {
+			compatible = "socionext,ph1-ld4-pinctrl",
+				     "syscon";
+			reg = <0x5f801000 0xe00>;
+		};
+
 		timer@60000200 {
 			compatible = "arm,cortex-a9-global-timer";
 			reg = <0x60000200 0x20>;
@@ -108,3 +249,5 @@
 		};
 	};
 };
+
+/include/ "uniphier-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts b/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts
new file mode 100644
index 0000000..33963ac
--- /dev/null
+++ b/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts
@@ -0,0 +1,105 @@
+/*
+ * Device Tree Source for UniPhier PH1-LD6b Reference Board
+ *
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+/include/ "uniphier-ph1-ld6b.dtsi"
+/include/ "uniphier-ref-daughter.dtsi"
+/include/ "uniphier-support-card.dtsi"
+
+/ {
+	model = "UniPhier PH1-LD6b Reference Board";
+	compatible = "socionext,ph1-ld6b-ref", "socionext,ph1-ld6b";
+
+	memory {
+		device_type = "memory";
+		reg = <0x80000000 0x80000000>;
+	};
+
+	chosen {
+		bootargs = "console=ttyS0,115200";
+		stdout-path = &serial0;
+	};
+
+	aliases {
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
+		i2c4 = &i2c4;
+		i2c5 = &i2c5;
+		i2c6 = &i2c6;
+	};
+};
+
+&extbus {
+	ranges = <0 0x00000000 0x0f000000 0x01000000
+		  1 0x00000000 0x00000000 0x08000000>;
+};
+
+&support_card {
+	ranges = <0x00000000 1 0x03f00000 0x00100000>;
+};
+
+&ethsc {
+	interrupts = <0 50 4>;
+};
+
+&serial0 {
+	status = "okay";
+};
+
+&serial1 {
+	status = "okay";
+};
+
+&serial2 {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/uniphier-ph1-ld6b.dtsi b/arch/arm/boot/dts/uniphier-ph1-ld6b.dtsi
new file mode 100644
index 0000000..c6499ee
--- /dev/null
+++ b/arch/arm/boot/dts/uniphier-ph1-ld6b.dtsi
@@ -0,0 +1,67 @@
+/*
+ * Device Tree Source for UniPhier PH1-LD6b SoC
+ *
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * PH1-LD6b consists of two silicon dies: D-chip and A-chip.
+ * The D-chip (digital chip) is the same as the ProXstream2 die.
+ * Reuse the ProXstream2 device tree with some properties overridden.
+ */
+/include/ "uniphier-proxstream2.dtsi"
+
+/ {
+	compatible = "socionext,ph1-ld6b";
+};
+
+/* UART3 unavilable: the pads are not wired to the package balls */
+&serial3 {
+	status = "disabled";
+};
+
+/*
+ * PH1-LD6b and ProXstream2 have completely different packages,
+ * which makes the pinctrl driver unshareable.
+ */
+&pinctrl {
+	compatible = "socionext,ph1-ld6b-pinctrl", "syscon";
+};
diff --git a/arch/arm/boot/dts/uniphier-ph1-pro4-ref.dts b/arch/arm/boot/dts/uniphier-ph1-pro4-ref.dts
index d891135..69a5b7d 100644
--- a/arch/arm/boot/dts/uniphier-ph1-pro4-ref.dts
+++ b/arch/arm/boot/dts/uniphier-ph1-pro4-ref.dts
@@ -44,6 +44,7 @@
 
 /dts-v1/;
 /include/ "uniphier-ph1-pro4.dtsi"
+/include/ "uniphier-ref-daughter.dtsi"
 /include/ "uniphier-support-card.dtsi"
 
 / {
@@ -57,11 +58,20 @@
 
 	chosen {
 		bootargs = "console=ttyS0,115200";
-		stdout-path = &serialsc;
+		stdout-path = &serial0;
 	};
 
 	aliases {
-		serial0 = &serialsc;
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		serial3 = &serial3;
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
+		i2c5 = &i2c5;
+		i2c6 = &i2c6;
 	};
 };
 
@@ -74,6 +84,30 @@
 	ranges = <0x00000000 1 0x03f00000 0x00100000>;
 };
 
-&serialsc {
+&ethsc {
 	interrupts = <0 50 4>;
 };
+
+&serial0 {
+	status = "okay";
+};
+
+&serial1 {
+	status = "okay";
+};
+
+&serial2 {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+};
+
+&usb2 {
+	status = "okay";
+};
+
+&usb3 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/uniphier-ph1-pro4.dtsi b/arch/arm/boot/dts/uniphier-ph1-pro4.dtsi
index dc63360..e8bbc45 100644
--- a/arch/arm/boot/dts/uniphier-ph1-pro4.dtsi
+++ b/arch/arm/boot/dts/uniphier-ph1-pro4.dtsi
@@ -71,6 +71,18 @@
 			compatible = "fixed-clock";
 			clock-frequency = <50000000>;
 		};
+
+		uart_clk: uart_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <73728000>;
+		};
+
+		i2c_clk: i2c_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <50000000>;
+		};
 	};
 
 	soc {
@@ -86,12 +98,156 @@
 			#size-cells = <1>;
 		};
 
+		serial0: serial@54006800 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006800 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart0>;
+			interrupts = <0 33 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		serial1: serial@54006900 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006900 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart1>;
+			interrupts = <0 35 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		serial2: serial@54006a00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006a00 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart2>;
+			interrupts = <0 37 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		serial3: serial@54006b00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006b00 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart3>;
+			interrupts = <0 29 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		i2c0: i2c@58780000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58780000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c0>;
+			interrupts = <0 41 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c1: i2c@58781000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58781000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c1>;
+			interrupts = <0 42 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c2: i2c@58782000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58782000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c2>;
+			interrupts = <0 43 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c3: i2c@58783000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58783000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c3>;
+			interrupts = <0 44 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <100000>;
+		};
+
+		/* i2c4 does not exist */
+
+		/* chip-internal connection for DMD */
+		i2c5: i2c@58785000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58785000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 25 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <400000>;
+		};
+
+		/* chip-internal connection for HDMI */
+		i2c6: i2c@58786000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58786000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 26 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <400000>;
+		};
+
 		system-bus-controller-misc@59800000 {
 			compatible = "socionext,uniphier-system-bus-controller-misc",
 				     "syscon";
 			reg = <0x59800000 0x2000>;
 		};
 
+		usb2: usb@5a800100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a800100 0x100>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb2>;
+			interrupts = <0 80 4>;
+		};
+
+		usb3: usb@5a810100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a810100 0x100>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb3>;
+			interrupts = <0 81 4>;
+		};
+
+		pinctrl: pinctrl@5f801000 {
+			compatible = "socionext,ph1-pro4-pinctrl",
+				     "syscon";
+			reg = <0x5f801000 0xe00>;
+		};
+
 		timer@60000200 {
 			compatible = "arm,cortex-a9-global-timer";
 			reg = <0x60000200 0x20>;
@@ -115,3 +271,5 @@
 		};
 	};
 };
+
+/include/ "uniphier-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/uniphier-ph1-pro5.dtsi b/arch/arm/boot/dts/uniphier-ph1-pro5.dtsi
new file mode 100644
index 0000000..59c2b12
--- /dev/null
+++ b/arch/arm/boot/dts/uniphier-ph1-pro5.dtsi
@@ -0,0 +1,252 @@
+/*
+ * Device Tree Source for UniPhier PH1-Pro5 SoC
+ *
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+	compatible = "socionext,ph1-pro5";
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		enable-method = "socionext,uniphier-smp";
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <0>;
+		};
+
+		cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <1>;
+		};
+	};
+
+	clocks {
+		arm_timer_clk: arm_timer_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <50000000>;
+		};
+
+		uart_clk: uart_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <73728000>;
+		};
+
+		i2c_clk: i2c_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <50000000>;
+		};
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		interrupt-parent = <&intc>;
+
+		extbus: extbus {
+			compatible = "simple-bus";
+			#address-cells = <2>;
+			#size-cells = <1>;
+		};
+
+		serial0: serial@54006800 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006800 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart0>;
+			interrupts = <0 33 4>;
+			clocks = <&uart_clk>;
+		};
+
+		serial1: serial@54006900 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006900 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart1>;
+			interrupts = <0 35 4>;
+			clocks = <&uart_clk>;
+		};
+
+		serial2: serial@54006a00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006a00 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart2>;
+			interrupts = <0 37 4>;
+			clocks = <&uart_clk>;
+		};
+
+		serial3: serial@54006b00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006b00 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart3>;
+			interrupts = <0 177 4>;
+			clocks = <&uart_clk>;
+		};
+
+		i2c0: i2c@58780000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58780000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c0>;
+			interrupts = <0 41 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c1: i2c@58781000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58781000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c1>;
+			interrupts = <0 42 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c2: i2c@58782000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58782000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c2>;
+			interrupts = <0 43 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c3: i2c@58783000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58783000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c3>;
+			interrupts = <0 44 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <100000>;
+		};
+
+		/* i2c4 does not exist */
+
+		/* chip-internal connection for DMD */
+		i2c5: i2c@58785000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58785000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 25 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <400000>;
+		};
+
+		/* chip-internal connection for HDMI */
+		i2c6: i2c@58786000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58786000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 26 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <400000>;
+		};
+
+		system-bus-controller-misc@59800000 {
+			compatible = "socionext,uniphier-system-bus-controller-misc",
+				     "syscon";
+			reg = <0x59800000 0x2000>;
+		};
+
+		pinctrl: pinctrl@5f801000 {
+			compatible = "socionext,ph1-pro5-pinctrl", "syscon";
+			reg = <0x5f801000 0xe00>;
+		};
+
+		timer@60000200 {
+			compatible = "arm,cortex-a9-global-timer";
+			reg = <0x60000200 0x20>;
+			interrupts = <1 11 0x304>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		timer@60000600 {
+			compatible = "arm,cortex-a9-twd-timer";
+			reg = <0x60000600 0x20>;
+			interrupts = <1 13 0x304>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		intc: interrupt-controller@60001000 {
+			compatible = "arm,cortex-a9-gic";
+			#interrupt-cells = <3>;
+			interrupt-controller;
+			reg = <0x60001000 0x1000>,
+			      <0x60000100 0x100>;
+		};
+	};
+};
+
+/include/ "uniphier-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/uniphier-ph1-sld3-ref.dts b/arch/arm/boot/dts/uniphier-ph1-sld3-ref.dts
index 3ea64ae..1a440f8 100644
--- a/arch/arm/boot/dts/uniphier-ph1-sld3-ref.dts
+++ b/arch/arm/boot/dts/uniphier-ph1-sld3-ref.dts
@@ -44,6 +44,7 @@
 
 /dts-v1/;
 /include/ "uniphier-ph1-sld3.dtsi"
+/include/ "uniphier-ref-daughter.dtsi"
 /include/ "uniphier-support-card.dtsi"
 
 / {
@@ -58,11 +59,18 @@
 
 	chosen {
 		bootargs = "console=ttyS0,115200";
-		stdout-path = &serialsc;
+		stdout-path = &serial0;
 	};
 
 	aliases {
-		serial0 = &serialsc;
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
+		i2c4 = &i2c4;
 	};
 };
 
@@ -75,6 +83,38 @@
 	ranges = <0x00000000 1 0x03f00000 0x00100000>;
 };
 
-&serialsc {
+&ethsc {
 	interrupts = <0 49 4>;
 };
+
+&serial0 {
+	status = "okay";
+};
+
+&serial1 {
+	status = "okay";
+};
+
+&serial2 {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
+};
+
+&usb2 {
+	status = "okay";
+};
+
+&usb3 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/uniphier-ph1-sld3.dtsi b/arch/arm/boot/dts/uniphier-ph1-sld3.dtsi
index 248b188..3cc90cd 100644
--- a/arch/arm/boot/dts/uniphier-ph1-sld3.dtsi
+++ b/arch/arm/boot/dts/uniphier-ph1-sld3.dtsi
@@ -71,6 +71,18 @@
 			compatible = "fixed-clock";
 			clock-frequency = <50000000>;
 		};
+
+		uart_clk: uart_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <36864000>;
+		};
+
+		iobus_clk: iobus_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <100000000>;
+		};
 	};
 
 	soc {
@@ -108,10 +120,120 @@
 			      <0x20000100 0x100>;
 		};
 
+		serial0: serial@54006800 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006800 0x40>;
+			interrupts = <0 33 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		serial1: serial@54006900 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006900 0x40>;
+			interrupts = <0 35 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		serial2: serial@54006a00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006a00 0x40>;
+			interrupts = <0 37 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		i2c0: i2c@58400000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58400000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 41 1>;
+			clocks = <&iobus_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c1: i2c@58480000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58480000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 42 1>;
+			clocks = <&iobus_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c2: i2c@58500000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58500000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 43 1>;
+			clocks = <&iobus_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c3: i2c@58580000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58580000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 44 1>;
+			clocks = <&iobus_clk>;
+			clock-frequency = <100000>;
+		};
+
+		/* chip-internal connection for DMD */
+		i2c4: i2c@58600000 {
+			compatible = "socionext,uniphier-i2c";
+			reg = <0x58600000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 45 1>;
+			clocks = <&iobus_clk>;
+			clock-frequency = <400000>;
+		};
+
 		system-bus-controller-misc@59800000 {
 			compatible = "socionext,uniphier-system-bus-controller-misc",
 				     "syscon";
 			reg = <0x59800000 0x2000>;
 		};
+
+		usb0: usb@5a800100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a800100 0x100>;
+			interrupts = <0 80 4>;
+		};
+
+		usb1: usb@5a810100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a810100 0x100>;
+			interrupts = <0 81 4>;
+		};
+
+		usb2: usb@5a820100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a820100 0x100>;
+			interrupts = <0 82 4>;
+		};
+
+		usb3: usb@5a830100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a830100 0x100>;
+			interrupts = <0 83 4>;
+		};
 	};
 };
diff --git a/arch/arm/boot/dts/uniphier-ph1-sld8-ref.dts b/arch/arm/boot/dts/uniphier-ph1-sld8-ref.dts
index dcdc4f7..955d417 100644
--- a/arch/arm/boot/dts/uniphier-ph1-sld8-ref.dts
+++ b/arch/arm/boot/dts/uniphier-ph1-sld8-ref.dts
@@ -44,6 +44,7 @@
 
 /dts-v1/;
 /include/ "uniphier-ph1-sld8.dtsi"
+/include/ "uniphier-ref-daughter.dtsi"
 /include/ "uniphier-support-card.dtsi"
 
 / {
@@ -57,11 +58,18 @@
 
 	chosen {
 		bootargs = "console=ttyS0,115200";
-		stdout-path = &serialsc;
+		stdout-path = &serial0;
 	};
 
 	aliases {
-		serial0 = &serialsc;
+		serial0 = &serial0;
+		serial1 = &serial1;
+		serial2 = &serial2;
+		serial3 = &serial3;
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
 	};
 };
 
@@ -74,6 +82,34 @@
 	ranges = <0x00000000 1 0x03f00000 0x00100000>;
 };
 
-&serialsc {
+&ethsc {
 	interrupts = <0 48 4>;
 };
+
+&serial0 {
+	status = "okay";
+};
+
+&serial2 {
+	status = "okay";
+};
+
+&serial3 {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+};
+
+&usb0 {
+	status = "okay";
+};
+
+&usb1 {
+	status = "okay";
+};
+
+&usb2 {
+	status = "okay";
+};
diff --git a/arch/arm/boot/dts/uniphier-ph1-sld8.dtsi b/arch/arm/boot/dts/uniphier-ph1-sld8.dtsi
index baa71e1..58067df 100644
--- a/arch/arm/boot/dts/uniphier-ph1-sld8.dtsi
+++ b/arch/arm/boot/dts/uniphier-ph1-sld8.dtsi
@@ -64,6 +64,18 @@
 			compatible = "fixed-clock";
 			clock-frequency = <50000000>;
 		};
+
+		uart_clk: uart_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <80000000>;
+		};
+
+		iobus_clk: iobus_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <100000000>;
+		};
 	};
 
 	soc {
@@ -79,12 +91,141 @@
 			#size-cells = <1>;
 		};
 
+		serial0: serial@54006800 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006800 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart0>;
+			interrupts = <0 33 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		serial1: serial@54006900 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006900 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart1>;
+			interrupts = <0 35 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		serial2: serial@54006a00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006a00 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart2>;
+			interrupts = <0 37 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		serial3: serial@54006b00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006b00 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart3>;
+			interrupts = <0 29 4>;
+			clocks = <&uart_clk>;
+			fifo-size = <64>;
+		};
+
+		i2c0: i2c@58400000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58400000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c0>;
+			interrupts = <0 41 1>;
+			clocks = <&iobus_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c1: i2c@58480000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58480000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c1>;
+			interrupts = <0 42 1>;
+			clocks = <&iobus_clk>;
+			clock-frequency = <100000>;
+		};
+
+		/* chip-internal connection for DMD */
+		i2c2: i2c@58500000 {
+			compatible = "socionext,uniphier-i2c";
+			reg = <0x58500000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c2>;
+			interrupts = <0 43 1>;
+			clocks = <&iobus_clk>;
+			clock-frequency = <400000>;
+		};
+
+		i2c3: i2c@58580000 {
+			compatible = "socionext,uniphier-i2c";
+			status = "disabled";
+			reg = <0x58580000 0x40>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c3>;
+			interrupts = <0 44 1>;
+			clocks = <&iobus_clk>;
+			clock-frequency = <100000>;
+		};
+
 		system-bus-controller-misc@59800000 {
 			compatible = "socionext,uniphier-system-bus-controller-misc",
 				     "syscon";
 			reg = <0x59800000 0x2000>;
 		};
 
+		usb0: usb@5a800100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a800100 0x100>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb0>;
+			interrupts = <0 80 4>;
+		};
+
+		usb1: usb@5a810100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a810100 0x100>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb1>;
+			interrupts = <0 81 4>;
+		};
+
+		usb2: usb@5a820100 {
+			compatible = "socionext,uniphier-ehci", "generic-ehci";
+			status = "disabled";
+			reg = <0x5a820100 0x100>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_usb2>;
+			interrupts = <0 82 4>;
+		};
+
+		pinctrl: pinctrl@5f801000 {
+			compatible = "socionext,ph1-sld8-pinctrl",
+				     "syscon";
+			reg = <0x5f801000 0xe00>;
+		};
+
 		timer@60000200 {
 			compatible = "arm,cortex-a9-global-timer";
 			reg = <0x60000200 0x20>;
@@ -108,3 +249,5 @@
 		};
 	};
 };
+
+/include/ "uniphier-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/uniphier-pinctrl.dtsi b/arch/arm/boot/dts/uniphier-pinctrl.dtsi
new file mode 100644
index 0000000..f67445f
--- /dev/null
+++ b/arch/arm/boot/dts/uniphier-pinctrl.dtsi
@@ -0,0 +1,105 @@
+/*
+ * Device Tree Source for UniPhier SoCs default pinctrl settings
+ *
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+&pinctrl {
+	pinctrl_i2c0: i2c0_grp {
+		groups = "i2c0";
+		function = "i2c0";
+	};
+
+	pinctrl_i2c1: i2c1_grp {
+		groups = "i2c1";
+		function = "i2c1";
+	};
+
+	pinctrl_i2c2: i2c2_grp {
+		groups = "i2c2";
+		function = "i2c2";
+	};
+
+	pinctrl_i2c3: i2c3_grp {
+		groups = "i2c3";
+		function = "i2c3";
+	};
+
+	pinctrl_uart0: uart0_grp {
+		groups = "uart0";
+		function = "uart0";
+	};
+
+	pinctrl_uart1: uart1_grp {
+		groups = "uart1";
+		function = "uart1";
+	};
+
+	pinctrl_uart2: uart2_grp {
+		groups = "uart2";
+		function = "uart2";
+	};
+
+	pinctrl_uart3: uart3_grp {
+		groups = "uart3";
+		function = "uart3";
+	};
+
+	pinctrl_usb0: usb0_grp {
+		groups = "usb0";
+		function = "usb0";
+	};
+
+	pinctrl_usb1: usb1_grp {
+		groups = "usb1";
+		function = "usb1";
+	};
+
+	pinctrl_usb2: usb2_grp {
+		groups = "usb2";
+		function = "usb2";
+	};
+
+	pinctrl_usb3: usb3_grp {
+		groups = "usb3";
+		function = "usb3";
+	};
+};
diff --git a/arch/arm/boot/dts/uniphier-proxstream2.dtsi b/arch/arm/boot/dts/uniphier-proxstream2.dtsi
new file mode 100644
index 0000000..4c7b246
--- /dev/null
+++ b/arch/arm/boot/dts/uniphier-proxstream2.dtsi
@@ -0,0 +1,273 @@
+/*
+ * Device Tree Source for UniPhier ProXstream2 SoC
+ *
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+	compatible = "socionext,proxstream2";
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		enable-method = "socionext,uniphier-smp";
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <0>;
+		};
+
+		cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <1>;
+		};
+
+		cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <2>;
+		};
+
+		cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <3>;
+		};
+	};
+
+	clocks {
+		arm_timer_clk: arm_timer_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <50000000>;
+		};
+
+		uart_clk: uart_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <88900000>;
+		};
+
+		i2c_clk: i2c_clk {
+			#clock-cells = <0>;
+			compatible = "fixed-clock";
+			clock-frequency = <50000000>;
+		};
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		interrupt-parent = <&intc>;
+
+		extbus: extbus {
+			compatible = "simple-bus";
+			#address-cells = <2>;
+			#size-cells = <1>;
+		};
+
+		serial0: serial@54006800 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006800 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart0>;
+			interrupts = <0 33 4>;
+			clocks = <&uart_clk>;
+		};
+
+		serial1: serial@54006900 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006900 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart1>;
+			interrupts = <0 35 4>;
+			clocks = <&uart_clk>;
+		};
+
+		serial2: serial@54006a00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006a00 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart2>;
+			interrupts = <0 37 4>;
+			clocks = <&uart_clk>;
+		};
+
+		serial3: serial@54006b00 {
+			compatible = "socionext,uniphier-uart";
+			status = "disabled";
+			reg = <0x54006b00 0x40>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_uart3>;
+			interrupts = <0 177 4>;
+			clocks = <&uart_clk>;
+		};
+
+		i2c0: i2c@58780000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58780000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c0>;
+			interrupts = <0 41 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c1: i2c@58781000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58781000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c1>;
+			interrupts = <0 42 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c2: i2c@58782000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58782000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c2>;
+			interrupts = <0 43 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <100000>;
+		};
+
+		i2c3: i2c@58783000 {
+			compatible = "socionext,uniphier-fi2c";
+			status = "disabled";
+			reg = <0x58783000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			pinctrl-names = "default";
+			pinctrl-0 = <&pinctrl_i2c3>;
+			interrupts = <0 44 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <100000>;
+		};
+
+		/* chip-internal connection for DMD */
+		i2c4: i2c@58784000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58784000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 45 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <400000>;
+		};
+
+		/* chip-internal connection for STM */
+		i2c5: i2c@58785000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58785000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 25 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <400000>;
+		};
+
+		/* chip-internal connection for HDMI */
+		i2c6: i2c@58786000 {
+			compatible = "socionext,uniphier-fi2c";
+			reg = <0x58786000 0x80>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			interrupts = <0 26 4>;
+			clocks = <&i2c_clk>;
+			clock-frequency = <400000>;
+		};
+
+		system-bus-controller-misc@59800000 {
+			compatible = "socionext,uniphier-system-bus-controller-misc",
+				     "syscon";
+			reg = <0x59800000 0x2000>;
+		};
+
+		pinctrl: pinctrl@5f801000 {
+			compatible = "socionext,proxstream2-pinctrl", "syscon";
+			reg = <0x5f801000 0xe00>;
+		};
+
+		timer@60000200 {
+			compatible = "arm,cortex-a9-global-timer";
+			reg = <0x60000200 0x20>;
+			interrupts = <1 11 0xf04>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		timer@60000600 {
+			compatible = "arm,cortex-a9-twd-timer";
+			reg = <0x60000600 0x20>;
+			interrupts = <1 13 0xf04>;
+			clocks = <&arm_timer_clk>;
+		};
+
+		intc: interrupt-controller@60001000 {
+			compatible = "arm,cortex-a9-gic";
+			#interrupt-cells = <3>;
+			interrupt-controller;
+			reg = <0x60001000 0x1000>,
+			      <0x60000100 0x100>;
+		};
+	};
+};
+
+/include/ "uniphier-pinctrl.dtsi"
diff --git a/arch/arm/boot/dts/uniphier-ref-daughter.dtsi b/arch/arm/boot/dts/uniphier-ref-daughter.dtsi
new file mode 100644
index 0000000..3d29d28
--- /dev/null
+++ b/arch/arm/boot/dts/uniphier-ref-daughter.dtsi
@@ -0,0 +1,50 @@
+/*
+ * Device Tree Source for UniPhier Reference Daughter Board
+ *
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+&i2c0 {
+	eeprom {
+		compatible = "microchip,24lc128";
+		reg = <0x50>;
+	};
+};
diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
index 2efb205..21b0287 100644
--- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
@@ -101,6 +101,8 @@
 				clock-names = "refclk", "timclk", "apb_pclk";
 				#clock-cells = <1>;
 				clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+				assigned-clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_sysctl 3>, <&v2m_sysctl 3>;
+				assigned-clock-parents = <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>;
 			};
 
 			/* PCI-E I2C bus */
diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi
index cb3090f..e712c0a 100644
--- a/arch/arm/boot/dts/vexpress-v2m.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m.dtsi
@@ -100,6 +100,8 @@
 				clock-names = "refclk", "timclk", "apb_pclk";
 				#clock-cells = <1>;
 				clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+				assigned-clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_sysctl 3>, <&v2m_sysctl 3>;
+				assigned-clock-parents = <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>;
 			};
 
 			/* PCI-E I2C bus */
diff --git a/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi b/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
index 606753e..ed65e0f 100644
--- a/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
+++ b/arch/arm/boot/dts/vf-colibri-eval-v3.dtsi
@@ -9,7 +9,7 @@
 
 / {
 	chosen {
-		bootargs = "console=ttyLP0,115200";
+		stdout-path = "serial0:115200n8";
 	};
 
 	clk16m: clk16m {
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 4aa3351..6865137 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -226,7 +226,10 @@
 				interrupts = <53 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clks VF610_CLK_ADC0>;
 				clock-names = "adc";
+				#io-channel-cells = <1>;
 				status = "disabled";
+				fsl,adck-max-frequency = <30000000>, <40000000>,
+							<20000000>;
 			};
 
 			wdoga5: wdog@4003e000 {
@@ -242,7 +245,8 @@
 				#address-cells = <1>;
 				#size-cells = <0>;
 				compatible = "fsl,vf610-qspi";
-				reg = <0x40044000 0x1000>;
+				reg = <0x40044000 0x1000>, <0x20000000 0x10000000>;
+				reg-names = "QuadSPI", "QuadSPI-memory";
 				interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clks VF610_CLK_QSPI0_EN>,
 					<&clks VF610_CLK_QSPI0>;
@@ -347,6 +351,20 @@
 				status = "disabled";
 			};
 
+			i2c1: i2c@40067000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "fsl,vf610-i2c";
+				reg = <0x40067000 0x1000>;
+				interrupts = <72 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks VF610_CLK_I2C1>;
+				clock-names = "ipg";
+				dmas = <&edma0 0 52>,
+					<&edma0 0 53>;
+				dma-names = "rx","tx";
+				status = "disabled";
+			};
+
 			clks: ccm@4006b000 {
 				compatible = "fsl,vf610-ccm";
 				reg = <0x4006b000 0x1000>;
@@ -404,14 +422,13 @@
 			};
 
 			snvs0: snvs@400a7000 {
-			    compatible = "fsl,sec-v4.0-mon", "simple-bus";
-				#address-cells = <1>;
-				#size-cells = <1>;
-				ranges = <0 0x400a7000 0x2000>;
+			    compatible = "fsl,sec-v4.0-mon", "syscon", "simple-mfd";
+				reg = <0x400a7000 0x2000>;
 
-				snvsrtc: snvs-rtc-lp@34 {
+				snvsrtc: snvs-rtc-lp {
 					compatible = "fsl,sec-v4.0-mon-rtc-lp";
-					reg = <0x34 0x58>;
+					regmap = <&snvs0>;
+					offset = <0x34>;
 					interrupts = <100 IRQ_TYPE_LEVEL_HIGH>;
 					clocks = <&clks VF610_CLK_SNVS>;
 					clock-names = "snvs-rtc";
@@ -442,9 +459,23 @@
 				interrupts = <54 IRQ_TYPE_LEVEL_HIGH>;
 				clocks = <&clks VF610_CLK_ADC1>;
 				clock-names = "adc";
+				#io-channel-cells = <1>;
 				status = "disabled";
 			};
 
+			esdhc0: esdhc@400b1000 {
+				compatible = "fsl,imx53-esdhc";
+				reg = <0x400b1000 0x1000>;
+				interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks VF610_CLK_IPG_BUS>,
+					<&clks VF610_CLK_PLATFORM_BUS>,
+					<&clks VF610_CLK_ESDHC0>;
+				clock-names = "ipg", "ahb", "per";
+				status = "disabled";
+				fsl,adck-max-frequency = <30000000>, <40000000>,
+							<20000000>;
+			};
+
 			esdhc1: esdhc@400b2000 {
 				compatible = "fsl,imx53-esdhc";
 				reg = <0x400b2000 0x1000>;
@@ -488,6 +519,19 @@
 				status = "disabled";
 			};
 
+			qspi1: quadspi@400c4000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "fsl,vf610-qspi";
+				reg = <0x400c4000 0x1000>, <0x50000000 0x10000000>;
+				reg-names = "QuadSPI", "QuadSPI-memory";
+				interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks VF610_CLK_QSPI1_EN>,
+					<&clks VF610_CLK_QSPI1>;
+				clock-names = "qspi_en", "qspi";
+				status = "disabled";
+			};
+
 			fec0: ethernet@400d0000 {
 				compatible = "fsl,mvf600-fec";
 				reg = <0x400d0000 0x1000>;
@@ -520,6 +564,33 @@
 				status = "disabled";
 			};
 
+			i2c2: i2c@400e6000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "fsl,vf610-i2c";
+				reg = <0x400e6000 0x1000>;
+				interrupts = <73 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks VF610_CLK_I2C2>;
+				clock-names = "ipg";
+				dmas = <&edma0 1 36>,
+					<&edma0 1 37>;
+				dma-names = "rx","tx";
+				status = "disabled";
+			};
+
+			i2c3: i2c@400e7000 {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "fsl,vf610-i2c";
+				reg = <0x400e7000 0x1000>;
+				interrupts = <74 IRQ_TYPE_LEVEL_HIGH>;
+				clocks = <&clks VF610_CLK_I2C3>;
+				clock-names = "ipg";
+				dmas = <&edma0 1 38>,
+					<&edma0 1 39>;
+				dma-names = "rx","tx";
+				status = "disabled";
+			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
index 0691508..dc0457e 100644
--- a/arch/arm/boot/dts/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -57,7 +57,7 @@
 		regulator-always-on;
 	};
 
-	amba {
+	amba: amba {
 		compatible = "simple-bus";
 		#address-cells = <1>;
 		#size-cells = <1>;
@@ -139,6 +139,7 @@
 		L2: cache-controller@f8f02000 {
 			compatible = "arm,pl310-cache";
 			reg = <0xF8F02000 0x1000>;
+			interrupts = <0 2 4>;
 			arm,data-latency = <3 2 2>;
 			arm,tag-latency = <2 2 2>;
 			cache-unified;
@@ -258,6 +259,13 @@
 				reg = <0x100 0x100>;
 			};
 
+			rstc: rstc@200 {
+				compatible = "xlnx,zynq-reset";
+				reg = <0x200 0x48>;
+				#reset-cells = <1>;
+				syscon = <&slcr>;
+			};
+
 			pinctrl0: pinctrl@700 {
 				compatible = "xlnx,pinctrl-zynq";
 				reg = <0x700 0x200>;
diff --git a/arch/arm/boot/dts/zynq-zc702.dts b/arch/arm/boot/dts/zynq-zc702.dts
index fb59d34..5df8f81 100644
--- a/arch/arm/boot/dts/zynq-zc702.dts
+++ b/arch/arm/boot/dts/zynq-zc702.dts
@@ -34,6 +34,27 @@
 		stdout-path = "serial0:115200n8";
 	};
 
+	gpio-keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		autorepeat;
+		sw14 {
+			label = "sw14";
+			gpios = <&gpio0 12 0>;
+			linux,code = <108>; /* down */
+			gpio-key,wakeup;
+			autorepeat;
+		};
+		sw13 {
+			label = "sw13";
+			gpios = <&gpio0 14 0>;
+			linux,code = <103>; /* up */
+			gpio-key,wakeup;
+			autorepeat;
+		};
+	};
+
 	leds {
 		compatible = "gpio-leds";
 
@@ -50,6 +71,13 @@
 	};
 };
 
+&amba {
+	ocm: sram@fffc0000 {
+		compatible = "mmio-sram";
+		reg = <0xfffc0000 0x10000>;
+	};
+};
+
 &can0 {
 	status = "okay";
 	pinctrl-names = "default";
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 5114b68..96dabcb 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -91,7 +91,7 @@
 	for (irq = IT8152_IRQ(0); irq <= IT8152_LAST_IRQ; irq++) {
 		irq_set_chip_and_handler(irq, &it8152_irq_chip,
 					 handle_level_irq);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 }
 
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index b55c362..304adea 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -138,9 +138,9 @@
 	},
 };
 
-static void locomo_handler(unsigned int irq, struct irq_desc *desc)
+static void locomo_handler(unsigned int __irq, struct irq_desc *desc)
 {
-	struct locomo *lchip = irq_get_chip_data(irq);
+	struct locomo *lchip = irq_desc_get_chip_data(desc);
 	int req, i;
 
 	/* Acknowledge the parent IRQ */
@@ -150,6 +150,8 @@
 	req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00;
 
 	if (req) {
+		unsigned int irq;
+
 		/* generate the next interrupt(s) */
 		irq = lchip->irq_base;
 		for (i = 0; i <= 3; i++, irq++) {
@@ -205,7 +207,7 @@
 	for ( ; irq <= lchip->irq_base + 3; irq++) {
 		irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq);
 		irq_set_chip_data(irq, lchip);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 }
 
@@ -475,8 +477,7 @@
 	device_for_each_child(lchip->dev, NULL, locomo_remove_child);
 
 	if (lchip->irq != NO_IRQ) {
-		irq_set_chained_handler(lchip->irq, NULL);
-		irq_set_handler_data(lchip->irq, NULL);
+		irq_set_chained_handler_and_data(lchip->irq, NULL, NULL);
 	}
 
 	iounmap(lchip->base);
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 92e54d7..2b25b60 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -65,14 +65,10 @@
 	return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
 }
 
-static int mcpm_cpu_disable(unsigned int cpu)
+static bool mcpm_cpu_can_disable(unsigned int cpu)
 {
-	/*
-	 * We assume all CPUs may be shut down.
-	 * This would be the hook to use for eventual Secure
-	 * OS migration requests as described in the PSCI spec.
-	 */
-	return 0;
+	/* We assume all CPUs may be shut down. */
+	return true;
 }
 
 static void mcpm_cpu_die(unsigned int cpu)
@@ -92,7 +88,7 @@
 	.smp_secondary_init	= mcpm_secondary_init,
 #ifdef CONFIG_HOTPLUG_CPU
 	.cpu_kill		= mcpm_cpu_kill,
-	.cpu_disable		= mcpm_cpu_disable,
+	.cpu_can_disable	= mcpm_cpu_can_disable,
 	.cpu_die		= mcpm_cpu_die,
 #endif
 };
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 93ee70d..4f29025 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -197,10 +197,11 @@
  * will call us again if there are more interrupts to process.
  */
 static void
-sa1111_irq_handler(unsigned int irq, struct irq_desc *desc)
+sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	unsigned int stat0, stat1, i;
-	struct sa1111 *sachip = irq_get_handler_data(irq);
+	struct sa1111 *sachip = irq_desc_get_handler_data(desc);
 	void __iomem *mapbase = sachip->base + SA1111_INTC;
 
 	stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0);
@@ -486,7 +487,7 @@
 		irq_set_chip_and_handler(irq, &sa1111_low_chip,
 					 handle_edge_irq);
 		irq_set_chip_data(irq, sachip);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	for (i = AUDXMTDMADONEA; i <= IRQ_S1_BVD1_STSCHG; i++) {
@@ -494,7 +495,7 @@
 		irq_set_chip_and_handler(irq, &sa1111_high_chip,
 					 handle_edge_irq);
 		irq_set_chip_data(irq, sachip);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	/*
diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig
deleted file mode 100644
index 5666e37..0000000
--- a/arch/arm/configs/armadillo800eva_defconfig
+++ /dev/null
@@ -1,162 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-# CONFIG_UTS_NS is not set
-# CONFIG_IPC_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_PERF_EVENTS=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_SHMOBILE_LEGACY=y
-CONFIG_ARCH_R8A7740=y
-CONFIG_MACH_ARMADILLO800EVA=y
-# CONFIG_SH_TIMER_TMU is not set
-CONFIG_ARM_THUMB=y
-CONFIG_CACHE_L2X0=y
-CONFIG_ARM_ERRATA_430973=y
-CONFIG_ARM_ERRATA_458693=y
-CONFIG_ARM_ERRATA_460075=y
-CONFIG_PL310_ERRATA_588369=y
-CONFIG_ARM_ERRATA_720789=y
-CONFIG_PL310_ERRATA_727915=y
-CONFIG_ARM_ERRATA_743622=y
-CONFIG_ARM_ERRATA_751472=y
-CONFIG_PL310_ERRATA_753970=y
-CONFIG_ARM_ERRATA_754322=y
-CONFIG_PL310_ERRATA_769419=y
-CONFIG_ARM_ERRATA_775420=y
-CONFIG_AEABI=y
-# CONFIG_OABI_COMPAT is not set
-CONFIG_FORCE_MAX_ZONEORDER=13
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_ARM_APPENDED_DTB=y
-CONFIG_KEXEC=y
-CONFIG_VFP=y
-CONFIG_NEON=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_MD=y
-CONFIG_BLK_DEV_DM=y
-CONFIG_NETDEVICES=y
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CHELSIO is not set
-# CONFIG_NET_VENDOR_CIRRUS is not set
-# CONFIG_NET_VENDOR_FARADAY is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-CONFIG_SH_ETH=y
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SMSC is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ST1232=y
-# CONFIG_SERIO is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_NR_UARTS=9
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_GPIO=y
-CONFIG_I2C_SH_MOBILE=y
-# CONFIG_HWMON is not set
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_GPIO=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_VIDEO_DEV=y
-CONFIG_MEDIA_CAMERA_SUPPORT=y
-CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_SOC_CAMERA=y
-CONFIG_SOC_CAMERA_MT9T112=y
-CONFIG_VIDEO_SH_MOBILE_CEU=y
-CONFIG_FB=y
-CONFIG_FB_SH_MOBILE_LCDC=y
-CONFIG_FB_SH_MOBILE_HDMI=y
-CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_PWM=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_SND_SUPPORT_OLD_API is not set
-# CONFIG_SND_VERBOSE_PROCFS is not set
-# CONFIG_SND_DRIVERS is not set
-# CONFIG_SND_ARM is not set
-CONFIG_SND_SOC_SH4_FSI=y
-# CONFIG_HID_SUPPORT is not set
-CONFIG_USB=y
-CONFIG_USB_RENESAS_USBHS=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_RENESAS_USBHS_UDC=y
-CONFIG_USB_ETH=m
-CONFIG_MMC=y
-CONFIG_MMC_SDHI=y
-CONFIG_MMC_SH_MMCIF=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_S35390A=y
-CONFIG_DMADEVICES=y
-CONFIG_SH_DMAE=y
-CONFIG_UIO=y
-CONFIG_UIO_PDRV_GENIRQ=y
-CONFIG_PWM=y
-CONFIG_PWM_RENESAS_TPU=y
-# CONFIG_DNOTIFY is not set
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_V4_1=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_ARM_UNWIND is not set
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_DES=y
-CONFIG_CRYPTO_ANSI_CPRNG=y
-CONFIG_XZ_DEC=y
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index 94b5dca..090c5b2 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -73,7 +73,6 @@
 CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_NETDEVICES=y
-CONFIG_ARM_AT91_ETHER=y
 CONFIG_MACB=y
 # CONFIG_NET_VENDOR_BROADCOM is not set
 CONFIG_DM9000=y
@@ -131,8 +130,18 @@
 CONFIG_WATCHDOG=y
 CONFIG_AT91SAM9X_WATCHDOG=y
 CONFIG_SSB=m
+CONFIG_MFD_ATMEL_HLCDC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_VIDEO_ATMEL_ISI=y
+CONFIG_SOC_CAMERA_OV2640=m
+CONFIG_DRM=y
+CONFIG_DRM_ATMEL_HLCDC=y
+CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_FB=y
 CONFIG_FB_ATMEL=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -140,6 +149,7 @@
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_ATMEL_LCDC=y
 # CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 CONFIG_LOGO=y
@@ -186,6 +196,7 @@
 CONFIG_AT91_ADC=y
 CONFIG_PWM=y
 CONFIG_PWM_ATMEL=y
+CONFIG_PWM_ATMEL_HLCDC_PWM=y
 CONFIG_PWM_ATMEL_TCB=y
 CONFIG_EXT4_FS=y
 CONFIG_FANOTIFY=y
diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig
index 72233b9..a7846d6 100644
--- a/arch/arm/configs/ep93xx_defconfig
+++ b/arch/arm/configs/ep93xx_defconfig
@@ -1,4 +1,6 @@
 CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -15,19 +17,25 @@
 CONFIG_MACH_ADSSPHERE=y
 CONFIG_MACH_EDB9301=y
 CONFIG_MACH_EDB9302=y
+CONFIG_MACH_EDB9302A=y
 CONFIG_MACH_EDB9307=y
+CONFIG_MACH_EDB9307A=y
 CONFIG_MACH_EDB9312=y
 CONFIG_MACH_EDB9315=y
+CONFIG_MACH_EDB9315A=y
 CONFIG_MACH_GESBC9312=y
 CONFIG_MACH_MICRO9H=y
+CONFIG_MACH_MICRO9M=y
 CONFIG_MACH_MICRO9L=y
+CONFIG_MACH_MICRO9S=y
+CONFIG_MACH_SIM_ONE=y
+CONFIG_MACH_SNAPPER_CL15=y
 CONFIG_MACH_TS72XX=y
+CONFIG_MACH_VISION_EP9307=y
 CONFIG_AEABI=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_CMDLINE="console=ttyAM0,115200 root=/dev/nfs ip=bootp"
-CONFIG_FPE_NWFPE=y
-CONFIG_FPE_NWFPE_XP=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -74,11 +82,18 @@
 # CONFIG_HW_RANDOM is not set
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_GPIO=y
 CONFIG_I2C_DEBUG_CORE=y
 CONFIG_I2C_DEBUG_ALGO=y
 CONFIG_I2C_DEBUG_BUS=y
+CONFIG_SPI=y
+CONFIG_SPI_EP93XX=y
 CONFIG_WATCHDOG=y
 CONFIG_EP93XX_WATCHDOG=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_EP93XX=y
+CONFIG_LOGO=y
 CONFIG_USB=y
 CONFIG_USB_DYNAMIC_MINORS=y
 CONFIG_USB_OHCI_HCD=y
@@ -87,13 +102,23 @@
 CONFIG_USB_SERIAL=y
 CONFIG_USB_SERIAL_CONSOLE=y
 CONFIG_USB_SERIAL_PL2303=y
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_DS1307=y
 CONFIG_RTC_DRV_M48T86=y
 CONFIG_RTC_DRV_EP93XX=y
+CONFIG_DMADEVICES=y
+CONFIG_EP93XX_DMA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index b47863d..79194c6 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -3,6 +3,8 @@
 CONFIG_FHANDLE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=18
 CONFIG_CGROUPS=y
 CONFIG_RELAY=y
@@ -38,9 +40,10 @@
 CONFIG_SOC_IMX6Q=y
 CONFIG_SOC_IMX6SL=y
 CONFIG_SOC_IMX6SX=y
+CONFIG_SOC_IMX6UL=y
 CONFIG_SOC_IMX7D=y
-CONFIG_SOC_VF610=y
 CONFIG_SOC_LS1021A=y
+CONFIG_SOC_VF610=y
 CONFIG_PCI=y
 CONFIG_PCI_IMX6=y
 CONFIG_SMP=y
@@ -50,13 +53,13 @@
 CONFIG_HIGHMEM=y
 CONFIG_CMA=y
 CONFIG_CMDLINE="noinitrd console=ttymxc0,115200"
+CONFIG_KEXEC=y
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_ARM_IMX6Q_CPUFREQ=y
 CONFIG_VFP=y
 CONFIG_NEON=y
 CONFIG_BINFMT_MISC=m
-CONFIG_PM=y
 CONFIG_PM_DEBUG=y
 CONFIG_PM_TEST_SUSPEND=y
 CONFIG_NET=y
@@ -75,8 +78,8 @@
 CONFIG_CAN_FLEXCAN=y
 CONFIG_BT=y
 CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
 CONFIG_BT_HCIUART_LL=y
-CONFIG_BT_HCIUART_3WIRE=y
 CONFIG_CFG80211=y
 CONFIG_MAC80211=y
 CONFIG_RFKILL=y
@@ -150,6 +153,7 @@
 CONFIG_INPUT_EVDEV=y
 CONFIG_INPUT_EVBUG=m
 CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_SNVS_PWRKEY=y
 CONFIG_KEYBOARD_IMX=y
 CONFIG_MOUSE_PS2=m
 CONFIG_MOUSE_PS2_ELANTECH=y
@@ -185,6 +189,7 @@
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_IMX=y
 CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
 CONFIG_SENSORS_GPIO_FAN=y
 CONFIG_THERMAL=y
 CONFIG_CPU_THERMAL=y
@@ -219,10 +224,16 @@
 CONFIG_IMX_IPUV3_CORE=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_IMX=y
+CONFIG_DRM_IMX_FB_HELPER=y
+CONFIG_DRM_IMX_PARALLEL_DISPLAY=y
+CONFIG_DRM_IMX_TVE=y
+CONFIG_DRM_IMX_LDB=y
+CONFIG_DRM_IMX_HDMI=y
+CONFIG_FB_MXS=y
 CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_LCD_L4F00242T03=y
 CONFIG_LCD_PLATFORM=y
-CONFIG_FB_MXS=y
 CONFIG_BACKLIGHT_PWM=y
 CONFIG_BACKLIGHT_GPIO=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -231,7 +242,7 @@
 CONFIG_SND=y
 CONFIG_SND_USB_AUDIO=m
 CONFIG_SND_SOC=y
-CONFIG_SND_SOC_FSL_SAI=y
+CONFIG_SND_SOC_FSL_ASRC=y
 CONFIG_SND_IMX_SOC=y
 CONFIG_SND_SOC_PHYCORE_AC97=y
 CONFIG_SND_SOC_EUKREA_TLV320=y
@@ -239,6 +250,8 @@
 CONFIG_SND_SOC_IMX_SGTL5000=y
 CONFIG_SND_SOC_IMX_SPDIF=y
 CONFIG_SND_SOC_IMX_MC13783=y
+CONFIG_SND_SOC_FSL_ASOC_CARD=y
+CONFIG_SND_SOC_CS42XX8_I2C=y
 CONFIG_SND_SOC_TLV320AIC3X=y
 CONFIG_SND_SIMPLE_CARD=y
 CONFIG_USB=y
@@ -301,13 +314,6 @@
 CONFIG_MXS_DMA=y
 CONFIG_FSL_EDMA=y
 CONFIG_STAGING=y
-CONFIG_DRM_IMX=y
-CONFIG_DRM_IMX_FB_HELPER=y
-CONFIG_DRM_IMX_PARALLEL_DISPLAY=y
-CONFIG_DRM_IMX_TVE=y
-CONFIG_DRM_IMX_LDB=y
-CONFIG_DRM_IMX_IPUV3=y
-CONFIG_DRM_IMX_HDMI=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_PWM=y
 CONFIG_PWM_IMX=y
@@ -354,8 +360,7 @@
 # CONFIG_FTRACE is not set
 # CONFIG_ARM_UNWIND is not set
 CONFIG_SECURITYFS=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
 CONFIG_CRC_CCITT=m
 CONFIG_CRC_T10DIF=y
 CONFIG_CRC7=m
diff --git a/arch/arm/configs/kzm9g_defconfig b/arch/arm/configs/kzm9g_defconfig
deleted file mode 100644
index 23e8d14..0000000
--- a/arch/arm/configs/kzm9g_defconfig
+++ /dev/null
@@ -1,154 +0,0 @@
-# CONFIG_ARM_PATCH_PHYS_VIRT is not set
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_IPC_NS is not set
-# CONFIG_USER_NS is not set
-# CONFIG_PID_NS is not set
-# CONFIG_NET_NS is not set
-CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_EMBEDDED=y
-CONFIG_PERF_EVENTS=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_SHMOBILE_LEGACY=y
-CONFIG_ARCH_SH73A0=y
-CONFIG_MACH_KZM9G=y
-CONFIG_MEMORY_START=0x41000000
-CONFIG_MEMORY_SIZE=0x1f000000
-CONFIG_ARM_ERRATA_743622=y
-CONFIG_ARM_ERRATA_754322=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_SMP=y
-CONFIG_SCHED_MC=y
-CONFIG_AEABI=y
-# CONFIG_OABI_COMPAT is not set
-CONFIG_HIGHMEM=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_ARM_APPENDED_DTB=y
-CONFIG_KEXEC=y
-CONFIG_VFP=y
-CONFIG_NEON=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_IRDA=y
-CONFIG_SH_IRDA=y
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_NETDEVICES=y
-CONFIG_SMSC911X=y
-# CONFIG_WLAN is not set
-CONFIG_INPUT_SPARSEKMAP=y
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_TOUCHSCREEN_ST1232=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_ADXL34X=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_NR_UARTS=9
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_SH_MOBILE=y
-CONFIG_GPIO_PCF857X=y
-# CONFIG_HWMON is not set
-CONFIG_MFD_AS3711=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_AS3711=y
-CONFIG_FB=y
-CONFIG_FB_SH_MOBILE_LCDC=y
-CONFIG_BACKLIGHT_AS3711=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-CONFIG_FB_SH_MOBILE_MERAM=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
-# CONFIG_SND_VERBOSE_PROCFS is not set
-# CONFIG_SND_DRIVERS is not set
-# CONFIG_SND_ARM is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_SOC_SH4_FSI=y
-# CONFIG_HID_SUPPORT is not set
-CONFIG_USB=y
-CONFIG_USB_R8A66597_HCD=y
-CONFIG_USB_RENESAS_USBHS=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_RENESAS_USBHS_UDC=y
-CONFIG_USB_ETH=m
-CONFIG_USB_MASS_STORAGE=m
-CONFIG_MMC=y
-# CONFIG_MMC_BLOCK_BOUNCE is not set
-CONFIG_MMC_SDHI=y
-CONFIG_MMC_SH_MMCIF=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_RS5C372=y
-CONFIG_DMADEVICES=y
-CONFIG_SH_DMAE=y
-CONFIG_ASYNC_TX_DMA=y
-CONFIG_STAGING=y
-CONFIG_IIO=y
-CONFIG_AK8975=y
-# CONFIG_DNOTIFY is not set
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_V4_1=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ISO8859_1=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_FTRACE is not set
-# CONFIG_ARM_UNWIND is not set
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_DES=y
-CONFIG_CRC16=y
diff --git a/arch/arm/configs/marzen_defconfig b/arch/arm/configs/marzen_defconfig
deleted file mode 100644
index 3c8b6d8..0000000
--- a/arch/arm/configs/marzen_defconfig
+++ /dev/null
@@ -1,124 +0,0 @@
-# CONFIG_ARM_PATCH_PHYS_VIRT is not set
-CONFIG_EXPERIMENTAL=y
-CONFIG_KERNEL_LZMA=y
-CONFIG_NO_HZ=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_EMBEDDED=y
-CONFIG_SLAB=y
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_ARCH_SHMOBILE_LEGACY=y
-CONFIG_ARCH_R8A7779=y
-CONFIG_MACH_MARZEN=y
-CONFIG_MEMORY_START=0x60000000
-CONFIG_MEMORY_SIZE=0x10000000
-CONFIG_SHMOBILE_TIMER_HZ=1024
-# CONFIG_SH_TIMER_CMT is not set
-# CONFIG_SWP_EMULATE is not set
-CONFIG_ARM_ERRATA_430973=y
-CONFIG_ARM_ERRATA_458693=y
-CONFIG_ARM_ERRATA_460075=y
-CONFIG_ARM_ERRATA_743622=y
-CONFIG_ARM_ERRATA_754322=y
-CONFIG_SMP=y
-# CONFIG_ARM_CPU_TOPOLOGY is not set
-CONFIG_AEABI=y
-# CONFIG_OABI_COMPAT is not set
-CONFIG_HIGHMEM=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_ARM_APPENDED_DTB=y
-CONFIG_VFP=y
-CONFIG_KEXEC=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_IPV6 is not set
-# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-# CONFIG_FW_LOADER is not set
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_ATA=y
-CONFIG_ATA_SFF=y
-CONFIG_ATA_BMDMA=y
-CONFIG_SATA_RCAR=y
-CONFIG_NETDEVICES=y
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_FARADAY is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-CONFIG_SMSC911X=y
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_VT is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_NR_UARTS=6
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_RCAR=y
-CONFIG_SPI=y
-CONFIG_SPI_SH_HSPI=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_RCAR=y
-# CONFIG_HWMON is not set
-CONFIG_THERMAL=y
-CONFIG_RCAR_THERMAL=y
-CONFIG_SSB=y
-CONFIG_REGULATOR=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_MEDIA_CAMERA_SUPPORT=y
-CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_SOC_CAMERA=y
-CONFIG_VIDEO_RCAR_VIN=y
-# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
-CONFIG_VIDEO_ADV7180=y
-CONFIG_DRM=y
-CONFIG_DRM_RCAR_DU=y
-CONFIG_USB=y
-CONFIG_USB_RCAR_PHY=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHI=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PLATFORM=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_STORAGE=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_DMADEVICES=y
-CONFIG_RCAR_HPB_DMAE=y
-CONFIG_UIO=y
-CONFIG_UIO_PDRV_GENIRQ=y
-# CONFIG_IOMMU_SUPPORT is not set
-# CONFIG_DNOTIFY is not set
-CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_INFO_REDUCED=y
-# CONFIG_FTRACE is not set
-CONFIG_DEBUG_USER=y
-CONFIG_AVERAGE=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 6413390..f84471d 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -49,6 +49,8 @@
 CONFIG_SOC_IMX6Q=y
 CONFIG_SOC_IMX6SL=y
 CONFIG_SOC_IMX6SX=y
+CONFIG_SOC_IMX6UL=y
+CONFIG_SOC_IMX7D=y
 CONFIG_SOC_VF610=y
 CONFIG_SOC_LS1021A=y
 CONFIG_ARCH_OMAP3=y
@@ -80,6 +82,7 @@
 CONFIG_ARCH_R8A7779=y
 CONFIG_ARCH_R8A7790=y
 CONFIG_ARCH_R8A7791=y
+CONFIG_ARCH_R8A7793=y
 CONFIG_ARCH_R8A7794=y
 CONFIG_ARCH_SH73A0=y
 CONFIG_MACH_MARZEN=y
@@ -98,6 +101,7 @@
 CONFIG_MACH_UX500_DT=y
 CONFIG_ARCH_VEXPRESS=y
 CONFIG_ARCH_VEXPRESS_CA9X4=y
+CONFIG_ARCH_VEXPRESS_TC2_PM=y
 CONFIG_ARCH_WM8850=y
 CONFIG_ARCH_ZYNQ=y
 CONFIG_TRUSTED_FOUNDATIONS=y
@@ -251,6 +255,7 @@
 CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_8250_EM=y
 CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_SERIAL_ATMEL=y
@@ -301,6 +306,7 @@
 CONFIG_I2C_SH_MOBILE=y
 CONFIG_I2C_SIRF=y
 CONFIG_I2C_ST=y
+CONFIG_I2C_SUN6I_P2WI=y
 CONFIG_I2C_TEGRA=y
 CONFIG_I2C_XILINX=y
 CONFIG_I2C_RCAR=y
@@ -370,7 +376,9 @@
 CONFIG_ORION_WATCHDOG=y
 CONFIG_ST_LPC_WATCHDOG=y
 CONFIG_SUNXI_WATCHDOG=y
+CONFIG_TEGRA_WATCHDOG=m
 CONFIG_MESON_WATCHDOG=y
+CONFIG_DIGICOLOR_WATCHDOG=y
 CONFIG_MFD_AS3711=y
 CONFIG_MFD_AS3722=y
 CONFIG_MFD_BCM590XX=y
@@ -403,6 +411,7 @@
 CONFIG_REGULATOR_MAX77686=y
 CONFIG_REGULATOR_MAX77693=m
 CONFIG_REGULATOR_PALMAS=y
+CONFIG_REGULATOR_PWM=m
 CONFIG_REGULATOR_S2MPS11=y
 CONFIG_REGULATOR_S5M8767=y
 CONFIG_REGULATOR_TPS51632=y
@@ -429,8 +438,11 @@
 CONFIG_VIDEO_ADV7180=m
 CONFIG_VIDEO_ML86V7667=m
 CONFIG_DRM=y
+# CONFIG_DRM_I2C_CH7006 is not set
+# CONFIG_DRM_I2C_SIL164 is not set
 CONFIG_DRM_NXP_PTN3460=m
 CONFIG_DRM_PARADE_PS8622=m
+CONFIG_DRM_NOUVEAU=m
 CONFIG_DRM_EXYNOS=m
 CONFIG_DRM_EXYNOS_DSI=y
 CONFIG_DRM_EXYNOS_FIMD=y
@@ -454,12 +466,18 @@
 CONFIG_SOUND=m
 CONFIG_SND=m
 CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_HDA_TEGRA=m
+CONFIG_SND_HDA_INPUT_BEEP=y
+CONFIG_SND_HDA_PATCH_LOADER=y
+CONFIG_SND_HDA_CODEC_REALTEK=m
+CONFIG_SND_HDA_CODEC_HDMI=m
 CONFIG_SND_USB_AUDIO=y
 CONFIG_SND_SOC=m
 CONFIG_SND_ATMEL_SOC=m
 CONFIG_SND_ATMEL_SOC_WM8904=m
 CONFIG_SND_SOC_SH4_FSI=m
 CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_RSRC_CARD=m
 CONFIG_SND_SOC_TEGRA=m
 CONFIG_SND_SOC_TEGRA_RT5640=m
 CONFIG_SND_SOC_TEGRA_WM8753=m
@@ -587,6 +605,7 @@
 CONFIG_MXS_DMA=y
 CONFIG_DMA_OMAP=y
 CONFIG_XILINX_VDMA=y
+CONFIG_DMA_SUN6I=y
 CONFIG_STAGING=y
 CONFIG_SENSORS_ISL29018=y
 CONFIG_SENSORS_ISL29028=y
@@ -612,6 +631,7 @@
 CONFIG_PM_DEVFREQ=y
 CONFIG_ARM_TEGRA_DEVFREQ=m
 CONFIG_MEMORY=y
+CONFIG_EXTCON=y
 CONFIG_TI_AEMIF=y
 CONFIG_IIO=y
 CONFIG_AT91_ADC=m
@@ -622,9 +642,11 @@
 CONFIG_PWM_ATMEL_TCB=m
 CONFIG_PWM_RENESAS_TPU=y
 CONFIG_PWM_SAMSUNG=m
+CONFIG_PWM_SUN4I=y
 CONFIG_PWM_TEGRA=y
 CONFIG_PWM_VT8500=y
 CONFIG_PHY_HIX5HD2_SATA=y
+CONFIG_PWM_STI=m
 CONFIG_OMAP_USB2=y
 CONFIG_TI_PIPE3=y
 CONFIG_PHY_MIPHY28LP=y
@@ -660,6 +682,7 @@
 CONFIG_CRYPTO_DEV_TEGRA_AES=y
 CONFIG_CPUFREQ_DT=y
 CONFIG_KEYSTONE_IRQ=y
+CONFIG_CRYPTO_DEV_SUN4I_SS=m
 CONFIG_ARM_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM=m
 CONFIG_CRYPTO_SHA1_ARM_NEON=m
diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig
index cacc9f4..13fcd02 100644
--- a/arch/arm/configs/mvebu_v7_defconfig
+++ b/arch/arm/configs/mvebu_v7_defconfig
@@ -58,6 +58,7 @@
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_PXA3xx=y
 CONFIG_MTD_SPI_NOR=y
+CONFIG_EEPROM_AT24=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_ATA=y
 CONFIG_AHCI_MVEBU=y
@@ -83,11 +84,14 @@
 CONFIG_SPI=y
 CONFIG_SPI_ORION=y
 CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_PCA953X=y
 CONFIG_SENSORS_GPIO_FAN=y
 CONFIG_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
 CONFIG_WATCHDOG=y
 CONFIG_ORION_WATCHDOG=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_SOUND=y
 CONFIG_SND=y
 CONFIG_SND_SOC=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index ac521e7..50c84e1 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -136,6 +136,8 @@
 CONFIG_MTD_ONENAND_VERIFY_WRITE=y
 CONFIG_MTD_ONENAND_OMAP2=y
 CONFIG_MTD_UBI=y
+CONFIG_MTD_SPI_NOR=m
+CONFIG_MTD_M25P80=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=16384
@@ -169,6 +171,7 @@
 # CONFIG_NET_VENDOR_STMICRO is not set
 CONFIG_TI_DAVINCI_EMAC=y
 CONFIG_TI_CPSW=y
+CONFIG_TI_CPTS=y
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_AT803X_PHY=y
@@ -208,6 +211,7 @@
 CONFIG_TOUCHSCREEN_PIXCIR=m
 CONFIG_TOUCHSCREEN_TSC2005=m
 CONFIG_TOUCHSCREEN_TSC2007=m
+CONFIG_TOUCHSCREEN_TI_AM335X_TSC=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_TPS65218_PWRBUTTON=m
 CONFIG_INPUT_TWL4030_PWRBUTTON=m
@@ -269,6 +273,7 @@
 CONFIG_MFD_TPS65217=y
 CONFIG_MFD_TPS65218=y
 CONFIG_MFD_TPS65910=y
+CONFIG_MFD_TI_AM335X_TSCADC=m
 CONFIG_TWL6040_CORE=y
 CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_PBIAS=y
@@ -324,10 +329,13 @@
 CONFIG_SND_SOC=m
 CONFIG_SND_EDMA_SOC=m
 CONFIG_SND_AM33XX_SOC_EVM=m
+CONFIG_SND_DAVINCI_SOC_MCASP=m
 CONFIG_SND_OMAP_SOC=m
 CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m
 CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040=m
 CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
+CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_SOC_TLV320AIC3X=m
 CONFIG_HID_GENERIC=m
 CONFIG_USB_HIDDEV=y
 CONFIG_USB_KBD=m
@@ -398,6 +406,8 @@
 CONFIG_EXTCON_USB_GPIO=m
 CONFIG_EXTCON_PALMAS=m
 CONFIG_TI_EMIF=m
+CONFIG_IIO=m
+CONFIG_TI_AM335X_ADC=m
 CONFIG_PWM=y
 CONFIG_PWM_TIECAP=m
 CONFIG_PWM_TIEHRPWM=m
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index 855143f..8099417 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -14,8 +14,10 @@
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_BSD_DISKLABEL=y
 CONFIG_ARCH_ORION5X=y
+CONFIG_ARCH_ORION5X_DT=y
 CONFIG_MACH_DB88F5281=y
 CONFIG_MACH_RD88F5182=y
+CONFIG_MACH_RD88F5182_DT=y
 CONFIG_MACH_KUROBOX_PRO=y
 CONFIG_MACH_DNS323=y
 CONFIG_MACH_TS209=y
@@ -41,6 +43,7 @@
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_FPE_NWFPE=y
 CONFIG_VFP=y
 CONFIG_NET=y
diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig
index f610230..7cc8e8e 100644
--- a/arch/arm/configs/prima2_defconfig
+++ b/arch/arm/configs/prima2_defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_RELAY=y
@@ -11,14 +10,12 @@
 CONFIG_BSD_DISKLABEL=y
 CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_ARCH_SIRF=y
-# CONFIG_SWP_EMULATE is not set
 CONFIG_SMP=y
 CONFIG_SCHED_MC=y
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_KEXEC=y
 CONFIG_BINFMT_MISC=y
-CONFIG_PM=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
@@ -29,6 +26,7 @@
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_MOUSE is not set
 CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
 CONFIG_SERIAL_SIRFSOC=y
 CONFIG_SERIAL_SIRFSOC_CONSOLE=y
 CONFIG_HW_RANDOM=y
@@ -45,10 +43,14 @@
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_SIRF=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_SIRFSOC=y
 CONFIG_DMADEVICES=y
 CONFIG_DMADEVICES_DEBUG=y
 CONFIG_DMADEVICES_VDEBUG=y
 CONFIG_SIRF_DMA=y
+CONFIG_HWSPINLOCK_SIRF=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT2_FS=y
 CONFIG_MSDOS_FS=y
@@ -60,12 +62,12 @@
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
-CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 # CONFIG_DEBUG_PREEMPT is not set
 CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_INFO=y
 CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index e6a6f28..ff7985b 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -67,6 +67,7 @@
 CONFIG_SCSI_SCAN_ASYNC=y
 CONFIG_NETDEVICES=y
 CONFIG_DUMMY=y
+CONFIG_KS8851=y
 CONFIG_MDIO_BITBANG=y
 CONFIG_MDIO_GPIO=y
 CONFIG_SLIP=y
@@ -104,8 +105,10 @@
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_MSM=y
 CONFIG_THERMAL=y
+CONFIG_MFD_QCOM_RPM=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_QCOM_RPM=y
 CONFIG_MEDIA_SUPPORT=y
 CONFIG_FB=y
 CONFIG_SOUND=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 9961fbd..89bf31c 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -18,6 +18,7 @@
 CONFIG_ARCH_R8A7779=y
 CONFIG_ARCH_R8A7790=y
 CONFIG_ARCH_R8A7791=y
+CONFIG_ARCH_R8A7793=y
 CONFIG_ARCH_R8A7794=y
 CONFIG_ARCH_SH73A0=y
 CONFIG_MACH_MARZEN=y
@@ -121,6 +122,7 @@
 CONFIG_DA9063_WATCHDOG=y
 CONFIG_MFD_AS3711=y
 CONFIG_MFD_DA9063=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_AS3711=y
 CONFIG_REGULATOR_DA9210=y
 CONFIG_REGULATOR_GPIO=y
@@ -151,6 +153,7 @@
 CONFIG_SND_SOC=y
 CONFIG_SND_SOC_SH4_FSI=y
 CONFIG_SND_SOC_RCAR=y
+CONFIG_SND_SOC_RSRC_CARD=y
 CONFIG_SND_SOC_AK4642=y
 CONFIG_SND_SOC_WM8978=y
 CONFIG_USB=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 7ebc346b..51eea22 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -1,5 +1,7 @@
+CONFIG_FHANDLE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
+CONFIG_CGROUPS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_PERF_EVENTS=y
 CONFIG_MODULES=y
@@ -72,12 +74,12 @@
 CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_SUN6I_P2WI=y
 CONFIG_SPI=y
 CONFIG_SPI_SUN4I=y
 CONFIG_SPI_SUN6I=y
 CONFIG_GPIO_SYSFS=y
 CONFIG_POWER_SUPPLY=y
-CONFIG_POWER_RESET=y
 CONFIG_THERMAL=y
 CONFIG_CPU_THERMAL=y
 CONFIG_WATCHDOG=y
@@ -109,7 +111,12 @@
 # CONFIG_RTC_INTF_PROC is not set
 CONFIG_RTC_DRV_SUN6I=y
 CONFIG_RTC_DRV_SUNXI=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_SUN6I=y
 # CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXTCON=y
+CONFIG_PWM=y
+CONFIG_PWM_SUN4I=y
 CONFIG_PHY_SUN4I_USB=y
 CONFIG_PHY_SUN9I_USB=y
 CONFIG_EXT4_FS=y
@@ -123,3 +130,4 @@
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_FS=y
+CONFIG_CRYPTO_DEV_SUN4I_SS=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index cdf9abb..9808581 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -42,6 +42,7 @@
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_STAT_DETAILS=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPUFREQ_DT=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
@@ -146,7 +147,6 @@
 CONFIG_GPIO_PALMAS=y
 CONFIG_GPIO_TPS6586X=y
 CONFIG_GPIO_TPS65910=y
-CONFIG_POWER_SUPPLY=y
 CONFIG_BATTERY_SBS=y
 CONFIG_CHARGER_TPS65090=y
 CONFIG_POWER_RESET=y
@@ -182,11 +182,10 @@
 CONFIG_USB_VIDEO_CLASS=y
 CONFIG_USB_GSPCA=y
 CONFIG_DRM=y
+CONFIG_DRM_NOUVEAU=m
 CONFIG_DRM_TEGRA=y
 CONFIG_DRM_PANEL_SIMPLE=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
 # CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
 # CONFIG_BACKLIGHT_GENERIC is not set
 CONFIG_BACKLIGHT_PWM=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -196,14 +195,11 @@
 CONFIG_SND=y
 # CONFIG_SND_SUPPORT_OLD_API is not set
 # CONFIG_SND_DRIVERS is not set
-CONFIG_SND_HDA=y
 CONFIG_SND_HDA_TEGRA=y
 CONFIG_SND_HDA_INPUT_BEEP=y
-CONFIG_SND_HDA_INPUT_JACK=y
 CONFIG_SND_HDA_PATCH_LOADER=y
 CONFIG_SND_HDA_CODEC_REALTEK=y
 CONFIG_SND_HDA_CODEC_HDMI=y
-CONFIG_SND_HDA_GENERIC=y
 # CONFIG_SND_ARM is not set
 # CONFIG_SND_SPI is not set
 # CONFIG_SND_USB is not set
@@ -300,5 +296,4 @@
 CONFIG_DEBUG_LL=y
 CONFIG_EARLY_PRINTK=y
 CONFIG_CRYPTO_TWOFISH=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRC_CCITT=y
diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore
index 6231d36..31e1f53 100644
--- a/arch/arm/crypto/.gitignore
+++ b/arch/arm/crypto/.gitignore
@@ -1 +1,3 @@
 aesbs-core.S
+sha256-core.S
+sha512-core.S
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 30b3bc1..be648eb 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -12,7 +12,6 @@
 generic-y += kdebug.h
 generic-y += local.h
 generic-y += local64.h
-generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += msgbuf.h
 generic-y += param.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 4abe572..7bbf325 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -108,33 +108,37 @@
 	.endm
 #endif
 
-	.macro asm_trace_hardirqs_off
+	.macro asm_trace_hardirqs_off, save=1
 #if defined(CONFIG_TRACE_IRQFLAGS)
+	.if \save
 	stmdb   sp!, {r0-r3, ip, lr}
+	.endif
 	bl	trace_hardirqs_off
+	.if \save
 	ldmia	sp!, {r0-r3, ip, lr}
+	.endif
 #endif
 	.endm
 
-	.macro asm_trace_hardirqs_on_cond, cond
+	.macro asm_trace_hardirqs_on, cond=al, save=1
 #if defined(CONFIG_TRACE_IRQFLAGS)
 	/*
 	 * actually the registers should be pushed and pop'd conditionally, but
 	 * after bl the flags are certainly clobbered
 	 */
+	.if \save
 	stmdb   sp!, {r0-r3, ip, lr}
+	.endif
 	bl\cond	trace_hardirqs_on
+	.if \save
 	ldmia	sp!, {r0-r3, ip, lr}
+	.endif
 #endif
 	.endm
 
-	.macro asm_trace_hardirqs_on
-	asm_trace_hardirqs_on_cond al
-	.endm
-
-	.macro disable_irq
+	.macro disable_irq, save=1
 	disable_irq_notrace
-	asm_trace_hardirqs_off
+	asm_trace_hardirqs_off \save
 	.endm
 
 	.macro enable_irq
@@ -173,7 +177,7 @@
 
 	.macro restore_irqs, oldcpsr
 	tst	\oldcpsr, #PSR_I_BIT
-	asm_trace_hardirqs_on_cond eq
+	asm_trace_hardirqs_on cond=eq
 	restore_irqs_notrace \oldcpsr
 	.endm
 
@@ -445,6 +449,53 @@
 #endif
 	.endm
 
+	.macro	uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/*
+	 * Whenever we re-enter userspace, the domains should always be
+	 * set appropriately.
+	 */
+	mov	\tmp, #DACR_UACCESS_DISABLE
+	mcr	p15, 0, \tmp, c3, c0, 0		@ Set domain register
+	.if	\isb
+	instr_sync
+	.endif
+#endif
+	.endm
+
+	.macro	uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/*
+	 * Whenever we re-enter userspace, the domains should always be
+	 * set appropriately.
+	 */
+	mov	\tmp, #DACR_UACCESS_ENABLE
+	mcr	p15, 0, \tmp, c3, c0, 0
+	.if	\isb
+	instr_sync
+	.endif
+#endif
+	.endm
+
+	.macro	uaccess_save, tmp
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	mrc	p15, 0, \tmp, c3, c0, 0
+	str	\tmp, [sp, #S_FRAME_SIZE]
+#endif
+	.endm
+
+	.macro	uaccess_restore
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	ldr	r0, [sp, #S_FRAME_SIZE]
+	mcr	p15, 0, r0, c3, c0, 0
+#endif
+	.endm
+
+	.macro	uaccess_save_and_disable, tmp
+	uaccess_save \tmp
+	uaccess_disable \tmp
+	.endm
+
 	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
 	.macro	ret\c, reg
 #if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index e22c119..fe3ef39 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -57,12 +57,11 @@
 }									\
 
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
-static inline int atomic_##op##_return(int i, atomic_t *v)		\
+static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
 {									\
 	unsigned long tmp;						\
 	int result;							\
 									\
-	smp_mb();							\
 	prefetchw(&v->counter);						\
 									\
 	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
@@ -75,17 +74,17 @@
 	: "r" (&v->counter), "Ir" (i)					\
 	: "cc");							\
 									\
-	smp_mb();							\
-									\
 	return result;							\
 }
 
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+#define atomic_add_return_relaxed	atomic_add_return_relaxed
+#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
+
+static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
 {
 	int oldval;
 	unsigned long res;
 
-	smp_mb();
 	prefetchw(&ptr->counter);
 
 	do {
@@ -99,10 +98,9 @@
 		    : "cc");
 	} while (res);
 
-	smp_mb();
-
 	return oldval;
 }
+#define atomic_cmpxchg_relaxed		atomic_cmpxchg_relaxed
 
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
@@ -194,6 +192,13 @@
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
 
+#define atomic_andnot atomic_andnot
+
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(andnot, &= ~, bic)
+ATOMIC_OP(or,  |=, orr)
+ATOMIC_OP(xor, ^=, eor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -290,12 +295,12 @@
 }									\
 
 #define ATOMIC64_OP_RETURN(op, op1, op2)				\
-static inline long long atomic64_##op##_return(long long i, atomic64_t *v) \
+static inline long long							\
+atomic64_##op##_return_relaxed(long long i, atomic64_t *v)		\
 {									\
 	long long result;						\
 	unsigned long tmp;						\
 									\
-	smp_mb();							\
 	prefetchw(&v->counter);						\
 									\
 	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
@@ -309,8 +314,6 @@
 	: "r" (&v->counter), "r" (i)					\
 	: "cc");							\
 									\
-	smp_mb();							\
-									\
 	return result;							\
 }
 
@@ -321,17 +324,26 @@
 ATOMIC64_OPS(add, adds, adc)
 ATOMIC64_OPS(sub, subs, sbc)
 
+#define atomic64_add_return_relaxed	atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
+
+#define atomic64_andnot atomic64_andnot
+
+ATOMIC64_OP(and, and, and)
+ATOMIC64_OP(andnot, bic, bic)
+ATOMIC64_OP(or,  orr, orr)
+ATOMIC64_OP(xor, eor, eor)
+
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
-static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
-					long long new)
+static inline long long
+atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
 {
 	long long oldval;
 	unsigned long res;
 
-	smp_mb();
 	prefetchw(&ptr->counter);
 
 	do {
@@ -346,17 +358,15 @@
 		: "cc");
 	} while (res);
 
-	smp_mb();
-
 	return oldval;
 }
+#define atomic64_cmpxchg_relaxed	atomic64_cmpxchg_relaxed
 
-static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
+static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
 {
 	long long result;
 	unsigned long tmp;
 
-	smp_mb();
 	prefetchw(&ptr->counter);
 
 	__asm__ __volatile__("@ atomic64_xchg\n"
@@ -368,10 +378,9 @@
 	: "r" (&ptr->counter), "r" (new)
 	: "cc");
 
-	smp_mb();
-
 	return result;
 }
+#define atomic64_xchg_relaxed		atomic64_xchg_relaxed
 
 static inline long long atomic64_dec_if_positive(atomic64_t *v)
 {
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 6c2327e..3ff5642 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -2,7 +2,6 @@
 #define __ASM_BARRIER_H
 
 #ifndef __ASSEMBLY__
-#include <asm/outercache.h>
 
 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
 
@@ -37,12 +36,20 @@
 #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
 #endif
 
+#ifdef CONFIG_ARM_HEAVY_MB
+extern void (*soc_mb)(void);
+extern void arm_heavy_mb(void);
+#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
+#else
+#define __arm_heavy_mb(x...) dsb(x)
+#endif
+
 #ifdef CONFIG_ARCH_HAS_BARRIERS
 #include <mach/barriers.h>
 #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
-#define mb()		do { dsb(); outer_sync(); } while (0)
+#define mb()		__arm_heavy_mb()
 #define rmb()		dsb()
-#define wmb()		do { dsb(st); outer_sync(); } while (0)
+#define wmb()		__arm_heavy_mb(st)
 #define dma_rmb()	dmb(osh)
 #define dma_wmb()	dmb(oshst)
 #else
@@ -67,12 +74,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 5638099..e943e6c 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -35,9 +35,9 @@
 static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long flags;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	*p |= mask;
@@ -47,9 +47,9 @@
 static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long flags;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	*p &= ~mask;
@@ -59,9 +59,9 @@
 static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
 {
 	unsigned long flags;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	*p ^= mask;
@@ -73,9 +73,9 @@
 {
 	unsigned long flags;
 	unsigned int res;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	res = *p;
@@ -90,9 +90,9 @@
 {
 	unsigned long flags;
 	unsigned int res;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	res = *p;
@@ -107,9 +107,9 @@
 {
 	unsigned long flags;
 	unsigned int res;
-	unsigned long mask = 1UL << (bit & 31);
+	unsigned long mask = BIT_MASK(bit);
 
-	p += bit >> 5;
+	p += BIT_WORD(bit);
 
 	raw_local_irq_save(flags);
 	res = *p;
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4812cda..d5525bf 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -140,8 +140,6 @@
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
-#define dmac_map_area			cpu_cache.dma_map_area
-#define dmac_unmap_area			cpu_cache.dma_unmap_area
 #define dmac_flush_range		cpu_cache.dma_flush_range
 
 #else
@@ -161,8 +159,6 @@
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
 extern void dmac_flush_range(const void *, const void *);
 
 #endif
@@ -506,4 +502,21 @@
 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
 			     void *kaddr, unsigned long len);
 
+/**
+ * secure_flush_area - ensure coherency across the secure boundary
+ * @addr: virtual address
+ * @size: size of region
+ *
+ * Ensure that the specified area of memory is coherent across the secure
+ * boundary from the non-secure side.  This is used when calling secure
+ * firmware where the secure firmware does not ensure coherency.
+ */
+static inline void secure_flush_area(const void *addr, size_t size)
+{
+	phys_addr_t phys = __pa(addr);
+
+	__cpuc_flush_dcache_area((void *)addr, size);
+	outer_flush_range(phys, phys + size);
+}
+
 #endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 1692a05..916a274 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -35,7 +35,6 @@
 	unsigned int tmp;
 #endif
 
-	smp_mb();
 	prefetchw((const void *)ptr);
 
 	switch (size) {
@@ -98,12 +97,11 @@
 		__bad_xchg(ptr, size), ret = 0;
 		break;
 	}
-	smp_mb();
 
 	return ret;
 }
 
-#define xchg(ptr, x) ({							\
+#define xchg_relaxed(ptr, x) ({						\
 	(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr),		\
 				   sizeof(*(ptr)));			\
 })
@@ -117,6 +115,8 @@
 #error "SMP is not supported on this platform"
 #endif
 
+#define xchg xchg_relaxed
+
 /*
  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
  * them available.
@@ -194,23 +194,11 @@
 	return oldval;
 }
 
-static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
-					 unsigned long new, int size)
-{
-	unsigned long ret;
-
-	smp_mb();
-	ret = __cmpxchg(ptr, old, new, size);
-	smp_mb();
-
-	return ret;
-}
-
-#define cmpxchg(ptr,o,n) ({						\
-	(__typeof__(*(ptr)))__cmpxchg_mb((ptr),				\
-					 (unsigned long)(o),		\
-					 (unsigned long)(n),		\
-					 sizeof(*(ptr)));		\
+#define cmpxchg_relaxed(ptr,o,n) ({					\
+	(__typeof__(*(ptr)))__cmpxchg((ptr),				\
+				      (unsigned long)(o),		\
+				      (unsigned long)(n),		\
+				      sizeof(*(ptr)));			\
 })
 
 static inline unsigned long __cmpxchg_local(volatile void *ptr,
@@ -273,25 +261,6 @@
 
 #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
 
-static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
-						unsigned long long old,
-						unsigned long long new)
-{
-	unsigned long long ret;
-
-	smp_mb();
-	ret = __cmpxchg64(ptr, old, new);
-	smp_mb();
-
-	return ret;
-}
-
-#define cmpxchg64(ptr, o, n) ({						\
-	(__typeof__(*(ptr)))__cmpxchg64_mb((ptr),			\
-					   (unsigned long long)(o),	\
-					   (unsigned long long)(n));	\
-})
-
 #endif	/* __LINUX_ARM_ARCH__ >= 6 */
 
 #endif /* __ASM_ARM_CMPXCHG_H */
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index b52101d..a68b9d8 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -14,7 +14,7 @@
 #include <xen/xen.h>
 #include <asm/xen/hypervisor.h>
 
-#define DMA_ERROR_CODE	(~0)
+#define DMA_ERROR_CODE	(~(dma_addr_t)0x0)
 extern struct dma_map_ops arm_dma_ops;
 extern struct dma_map_ops arm_coherent_dma_ops;
 
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe44..e878129 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -34,15 +34,14 @@
  */
 #ifndef CONFIG_IO_36
 #define DOMAIN_KERNEL	0
-#define DOMAIN_TABLE	0
 #define DOMAIN_USER	1
 #define DOMAIN_IO	2
 #else
 #define DOMAIN_KERNEL	2
-#define DOMAIN_TABLE	2
 #define DOMAIN_USER	1
 #define DOMAIN_IO	0
 #endif
+#define DOMAIN_VECTORS	3
 
 /*
  * Domain types
@@ -55,11 +54,46 @@
 #define DOMAIN_MANAGER	1
 #endif
 
-#define domain_val(dom,type)	((type) << (2*(dom)))
+#define domain_mask(dom)	((3) << (2 * (dom)))
+#define domain_val(dom,type)	((type) << (2 * (dom)))
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+	(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
+#define DACR_INIT \
+	(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+	 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+	domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+	domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+	domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE	\
+	(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE	\
+	(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_CPU_USE_DOMAINS
+static inline unsigned int get_domain(void)
+{
+	unsigned int domain;
+
+	asm(
+	"mrc	p15, 0, %0, c3, c0	@ get domain"
+	 : "=r" (domain));
+
+	return domain;
+}
+
 static inline void set_domain(unsigned val)
 {
 	asm volatile(
@@ -68,17 +102,16 @@
 	isb();
 }
 
+#ifdef CONFIG_CPU_USE_DOMAINS
 #define modify_domain(dom,type)					\
 	do {							\
-	struct thread_info *thread = current_thread_info();	\
-	unsigned int domain = thread->cpu_domain;		\
-	domain &= ~domain_val(dom, DOMAIN_MANAGER);		\
-	thread->cpu_domain = domain | domain_val(dom, type);	\
-	set_domain(thread->cpu_domain);				\
+		unsigned int domain = get_domain();		\
+		domain &= ~domain_mask(dom);			\
+		domain = domain | domain_val(dom, type);	\
+		set_domain(domain);				\
 	} while (0)
 
 #else
-static inline void set_domain(unsigned val) { }
 static inline void modify_domain(unsigned dom, unsigned type)	{ }
 #endif
 
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 0415eae..58cfe9f 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -6,9 +6,13 @@
 #define FIXADDR_TOP		(FIXADDR_END - PAGE_SIZE)
 
 #include <asm/kmap_types.h>
+#include <asm/pgtable.h>
 
 enum fixed_addresses {
-	FIX_KMAP_BEGIN,
+	FIX_EARLYCON_MEM_BASE,
+	__end_of_permanent_fixed_addresses,
+
+	FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
 	FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
 
 	/* Support writing RO kernel text via kprobes, jump labels, etc. */
@@ -18,7 +22,16 @@
 	__end_of_fixed_addresses
 };
 
+#define FIXMAP_PAGE_COMMON	(L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
+
+#define FIXMAP_PAGE_NORMAL	(FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
+
+/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
+#define FIXMAP_PAGE_IO		(FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
+#define FIXMAP_PAGE_NOCACHE	FIXMAP_PAGE_IO
+
 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+void __init early_fixmap_init(void);
 
 #include <asm-generic/fixmap.h>
 
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 5eed828..6795368 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -22,8 +22,11 @@
 #ifdef CONFIG_SMP
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
+({								\
+	unsigned int __ua_flags;				\
 	smp_mb();						\
 	prefetchw(uaddr);					\
+	__ua_flags = uaccess_save_and_enable();			\
 	__asm__ __volatile__(					\
 	"1:	ldrex	%1, [%3]\n"				\
 	"	" insn "\n"					\
@@ -34,12 +37,15 @@
 	__futex_atomic_ex_table("%5")				\
 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
-	: "cc", "memory")
+	: "cc", "memory");					\
+	uaccess_restore(__ua_flags);				\
+})
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 			      u32 oldval, u32 newval)
 {
+	unsigned int __ua_flags;
 	int ret;
 	u32 val;
 
@@ -49,6 +55,7 @@
 	smp_mb();
 	/* Prefetching cannot fault */
 	prefetchw(uaddr);
+	__ua_flags = uaccess_save_and_enable();
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	"1:	ldrex	%1, [%4]\n"
 	"	teq	%1, %2\n"
@@ -61,6 +68,7 @@
 	: "=&r" (ret), "=&r" (val)
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "cc", "memory");
+	uaccess_restore(__ua_flags);
 	smp_mb();
 
 	*uval = val;
@@ -73,6 +81,8 @@
 #include <asm/domain.h>
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)	\
+({								\
+	unsigned int __ua_flags = uaccess_save_and_enable();	\
 	__asm__ __volatile__(					\
 	"1:	" TUSER(ldr) "	%1, [%3]\n"			\
 	"	" insn "\n"					\
@@ -81,12 +91,15 @@
 	__futex_atomic_ex_table("%5")				\
 	: "=&r" (ret), "=&r" (oldval), "=&r" (tmp)		\
 	: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)		\
-	: "cc", "memory")
+	: "cc", "memory");					\
+	uaccess_restore(__ua_flags);				\
+})
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 			      u32 oldval, u32 newval)
 {
+	unsigned int __ua_flags;
 	int ret = 0;
 	u32 val;
 
@@ -94,6 +107,7 @@
 		return -EFAULT;
 
 	preempt_disable();
+	__ua_flags = uaccess_save_and_enable();
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	"1:	" TUSER(ldr) "	%1, [%4]\n"
 	"	teq	%1, %2\n"
@@ -103,6 +117,7 @@
 	: "+r" (ret), "=&r" (val)
 	: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
 	: "cc", "memory");
+	uaccess_restore(__ua_flags);
 
 	*uval = val;
 	preempt_enable();
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index a3c24cd..cab07f6 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -158,8 +158,6 @@
 #define __cpuc_coherent_user_range	__glue(_CACHE,_coherent_user_range)
 #define __cpuc_flush_dcache_area	__glue(_CACHE,_flush_kern_dcache_area)
 
-#define dmac_map_area			__glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area			__glue(_CACHE,_dma_unmap_area)
 #define dmac_flush_range		__glue(_CACHE,_dma_flush_range)
 #endif
 
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 5f337dc..34f7b69 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -4,23 +4,32 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/types.h>
+#include <asm/unified.h>
 
 #define JUMP_LABEL_NOP_SIZE 4
 
-#ifdef CONFIG_THUMB2_KERNEL
-#define JUMP_LABEL_NOP	"nop.w"
-#else
-#define JUMP_LABEL_NOP	"nop"
-#endif
-
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:\n\t"
-		 JUMP_LABEL_NOP "\n\t"
+		 WASM(nop) "\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 ".word 1b, %l[l_yes], %c0\n\t"
 		 ".popsection\n\t"
-		 : :  "i" (key) :  : l_yes);
+		 : :  "i" (&((char *)key)[branch]) :  : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\n\t"
+		 WASM(b) " %l[l_yes]\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 ".word 1b, %l[l_yes], %c0\n\t"
+		 ".popsection\n\t"
+		 : :  "i" (&((char *)key)[branch]) :  : l_yes);
 
 	return false;
 l_yes:
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 28b9bb3..8857d28 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -19,9 +19,7 @@
 struct device;
 
 struct hw_pci {
-#ifdef CONFIG_PCI_MSI
 	struct msi_controller *msi_ctrl;
-#endif
 	struct pci_ops	*ops;
 	int		nr_controllers;
 	void		**private_data;
@@ -42,9 +40,6 @@
  * Per-controller structure
  */
 struct pci_sys_data {
-#ifdef CONFIG_PCI_MSI
-	struct msi_controller *msi_ctrl;
-#endif
 	struct list_head node;
 	int		busnr;		/* primary bus number			*/
 	u64		mem_offset;	/* bus->cpu memory mapping offset	*/
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 563b92f..c2bf24f 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -129,21 +129,4 @@
 
 #endif
 
-#ifdef CONFIG_OUTER_CACHE_SYNC
-/**
- * outer_sync - perform a sync point for outer cache
- *
- * Ensure that all outer cache operations are complete and any store
- * buffers are drained.
- */
-static inline void outer_sync(void)
-{
-	if (outer_cache.sync)
-		outer_cache.sync();
-}
-#else
-static inline void outer_sync(void)
-{ }
-#endif
-
 #endif	/* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5e68278..d0131ee 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -23,6 +23,7 @@
 #define PMD_PXNTABLE		(_AT(pmdval_t, 1) << 2)     /* v7 */
 #define PMD_BIT4		(_AT(pmdval_t, 1) << 4)
 #define PMD_DOMAIN(x)		(_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK		PMD_DOMAIN(0x0f)
 #define PMD_PROTECTION		(_AT(pmdval_t, 1) << 9)		/* v5 */
 /*
  *   - section
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
deleted file mode 100644
index 3fc87df..0000000
--- a/arch/arm/include/asm/pmu.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- *  linux/arch/arm/include/asm/pmu.h
- *
- *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef __ARM_PMU_H__
-#define __ARM_PMU_H__
-
-#include <linux/interrupt.h>
-#include <linux/perf_event.h>
-
-#include <asm/cputype.h>
-
-/*
- * struct arm_pmu_platdata - ARM PMU platform data
- *
- * @handle_irq: an optional handler which will be called from the
- *	interrupt and passed the address of the low level handler,
- *	and can be used to implement any platform specific handling
- *	before or after calling it.
- */
-struct arm_pmu_platdata {
-	irqreturn_t (*handle_irq)(int irq, void *dev,
-				  irq_handler_t pmu_handler);
-};
-
-#ifdef CONFIG_HW_PERF_EVENTS
-
-/*
- * The ARMv7 CPU PMU supports up to 32 event counters.
- */
-#define ARMPMU_MAX_HWEVENTS		32
-
-#define HW_OP_UNSUPPORTED		0xFFFF
-#define C(_x)				PERF_COUNT_HW_CACHE_##_x
-#define CACHE_OP_UNSUPPORTED		0xFFFF
-
-#define PERF_MAP_ALL_UNSUPPORTED					\
-	[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
-
-#define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
-[0 ... C(MAX) - 1] = {							\
-	[0 ... C(OP_MAX) - 1] = {					\
-		[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,	\
-	},								\
-}
-
-/* The events for a given PMU register set. */
-struct pmu_hw_events {
-	/*
-	 * The events that are active on the PMU for the given index.
-	 */
-	struct perf_event	*events[ARMPMU_MAX_HWEVENTS];
-
-	/*
-	 * A 1 bit for an index indicates that the counter is being used for
-	 * an event. A 0 means that the counter can be used.
-	 */
-	DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
-
-	/*
-	 * Hardware lock to serialize accesses to PMU registers. Needed for the
-	 * read/modify/write sequences.
-	 */
-	raw_spinlock_t		pmu_lock;
-
-	/*
-	 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
-	 * already have to allocate this struct per cpu.
-	 */
-	struct arm_pmu		*percpu_pmu;
-};
-
-struct arm_pmu {
-	struct pmu	pmu;
-	cpumask_t	active_irqs;
-	cpumask_t	supported_cpus;
-	int		*irq_affinity;
-	char		*name;
-	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
-	void		(*enable)(struct perf_event *event);
-	void		(*disable)(struct perf_event *event);
-	int		(*get_event_idx)(struct pmu_hw_events *hw_events,
-					 struct perf_event *event);
-	void		(*clear_event_idx)(struct pmu_hw_events *hw_events,
-					 struct perf_event *event);
-	int		(*set_event_filter)(struct hw_perf_event *evt,
-					    struct perf_event_attr *attr);
-	u32		(*read_counter)(struct perf_event *event);
-	void		(*write_counter)(struct perf_event *event, u32 val);
-	void		(*start)(struct arm_pmu *);
-	void		(*stop)(struct arm_pmu *);
-	void		(*reset)(void *);
-	int		(*request_irq)(struct arm_pmu *, irq_handler_t handler);
-	void		(*free_irq)(struct arm_pmu *);
-	int		(*map_event)(struct perf_event *event);
-	int		num_events;
-	atomic_t	active_events;
-	struct mutex	reserve_mutex;
-	u64		max_period;
-	struct platform_device	*plat_device;
-	struct pmu_hw_events	__percpu *hw_events;
-	struct notifier_block	hotplug_nb;
-};
-
-#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
-
-int armpmu_register(struct arm_pmu *armpmu, int type);
-
-u64 armpmu_event_update(struct perf_event *event);
-
-int armpmu_event_set_period(struct perf_event *event);
-
-int armpmu_map_event(struct perf_event *event,
-		     const unsigned (*event_map)[PERF_COUNT_HW_MAX],
-		     const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
-						[PERF_COUNT_HW_CACHE_OP_MAX]
-						[PERF_COUNT_HW_CACHE_RESULT_MAX],
-		     u32 raw_event_mask);
-
-struct pmu_probe_info {
-	unsigned int cpuid;
-	unsigned int mask;
-	int (*init)(struct arm_pmu *);
-};
-
-#define PMU_PROBE(_cpuid, _mask, _fn)	\
-{					\
-	.cpuid = (_cpuid),		\
-	.mask = (_mask),		\
-	.init = (_fn),			\
-}
-
-#define ARM_PMU_PROBE(_cpuid, _fn) \
-	PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
-
-#define ARM_PMU_XSCALE_MASK	((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
-
-#define XSCALE_PMU_PROBE(_version, _fn) \
-	PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
-
-int arm_pmu_device_probe(struct platform_device *pdev,
-			 const struct of_device_id *of_table,
-			 const struct pmu_probe_info *probe_table);
-
-#endif /* CONFIG_HW_PERF_EVENTS */
-
-#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
index c25ef3e..68ee3ce 100644
--- a/arch/arm/include/asm/psci.h
+++ b/arch/arm/include/asm/psci.h
@@ -14,34 +14,11 @@
 #ifndef __ASM_ARM_PSCI_H
 #define __ASM_ARM_PSCI_H
 
-#define PSCI_POWER_STATE_TYPE_STANDBY		0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN	1
-
-struct psci_power_state {
-	u16	id;
-	u8	type;
-	u8	affinity_level;
-};
-
-struct psci_operations {
-	int (*cpu_suspend)(struct psci_power_state state,
-			   unsigned long entry_point);
-	int (*cpu_off)(struct psci_power_state state);
-	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
-	int (*migrate)(unsigned long cpuid);
-	int (*affinity_info)(unsigned long target_affinity,
-			unsigned long lowest_affinity_level);
-	int (*migrate_info_type)(void);
-};
-
-extern struct psci_operations psci_ops;
 extern struct smp_operations psci_smp_ops;
 
 #ifdef CONFIG_ARM_PSCI
-int psci_init(void);
 bool psci_smp_available(void);
 #else
-static inline int psci_init(void) { return 0; }
 static inline bool psci_smp_available(void) { return false; }
 #endif
 
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 2f3ac1b..ef35665 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -74,7 +74,6 @@
 extern int __cpu_disable(void);
 
 extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -105,6 +104,7 @@
 #ifdef CONFIG_HOTPLUG_CPU
 	int  (*cpu_kill)(unsigned int cpu);
 	void (*cpu_die)(unsigned int cpu);
+	bool  (*cpu_can_disable)(unsigned int cpu);
 	int  (*cpu_disable)(unsigned int cpu);
 #endif
 #endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 993e522..f908071 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -107,4 +107,13 @@
 extern int platform_can_secondary_boot(void);
 extern int platform_can_cpu_hotplug(void);
 
+#ifdef CONFIG_HOTPLUG_CPU
+extern int platform_can_hotplug_cpu(unsigned int cpu);
+#else
+static inline int platform_can_hotplug_cpu(unsigned int cpu)
+{
+	return 0;
+}
+#endif
+
 #endif
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index c99e259..12ebfcc 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -10,7 +10,9 @@
  * CPU.
  */
 #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
-#define finish_arch_switch(prev)	dsb(ish)
+#define __complete_pending_tlbi()	dsb(ish)
+#else
+#define __complete_pending_tlbi()
 #endif
 
 /*
@@ -22,6 +24,7 @@
 
 #define switch_to(prev,next,last)					\
 do {									\
+	__complete_pending_tlbi();					\
 	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\
 } while (0)
 
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index bd32ede..d0a1119 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -74,9 +74,6 @@
 	.flags		= 0,						\
 	.preempt_count	= INIT_PREEMPT_COUNT,				\
 	.addr_limit	= KERNEL_DS,					\
-	.cpu_domain	= domain_val(DOMAIN_USER, DOMAIN_MANAGER) |	\
-			  domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |	\
-			  domain_val(DOMAIN_IO, DOMAIN_CLIENT),		\
 }
 
 #define init_thread_info	(init_thread_union.thread_info)
@@ -136,22 +133,18 @@
 
 /*
  * thread information flags:
- *  TIF_SYSCALL_TRACE	- syscall trace active
- *  TIF_SYSCAL_AUDIT	- syscall auditing active
- *  TIF_SIGPENDING	- signal pending
- *  TIF_NEED_RESCHED	- rescheduling necessary
- *  TIF_NOTIFY_RESUME	- callback before returning to user
  *  TIF_USEDFPU		- FPU was used by this task this quantum (SMP)
  *  TIF_POLLING_NRFLAG	- true if poll_idle() is polling TIF_NEED_RESCHED
  */
-#define TIF_SIGPENDING		0
-#define TIF_NEED_RESCHED	1
+#define TIF_SIGPENDING		0	/* signal pending */
+#define TIF_NEED_RESCHED	1	/* rescheduling necessary */
 #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
-#define TIF_UPROBE		7
-#define TIF_SYSCALL_TRACE	8
-#define TIF_SYSCALL_AUDIT	9
-#define TIF_SYSCALL_TRACEPOINT	10
-#define TIF_SECCOMP		11	/* seccomp syscall filtering active */
+#define TIF_UPROBE		3	/* breakpointed or singlestepping */
+#define TIF_SYSCALL_TRACE	4	/* syscall trace active */
+#define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT	6	/* syscall tracepoint instrumentation */
+#define TIF_SECCOMP		7	/* seccomp syscall filtering active */
+
 #define TIF_NOHZ		12	/* in adaptive nohz mode */
 #define TIF_USING_IWMMXT	17
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 74b17d0..8cc85a4 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -50,6 +50,35 @@
 extern int fixup_exception(struct pt_regs *regs);
 
 /*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
+ */
+static inline unsigned int uaccess_save_and_enable(void)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	unsigned int old_domain = get_domain();
+
+	/* Set the current domain access to permit user accesses */
+	set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+		   domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+	return old_domain;
+#else
+	return 0;
+#endif
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+	/* Restore the user access mask */
+	set_domain(flags);
+#endif
+}
+
+/*
  * These two are intentionally not defined anywhere - if the kernel
  * code generates any references to them, that's a bug.
  */
@@ -165,6 +194,7 @@
 		register typeof(x) __r2 asm("r2");			\
 		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
+		unsigned int __ua_flags = uaccess_save_and_enable();	\
 		switch (sizeof(*(__p))) {				\
 		case 1:							\
 			if (sizeof((x)) >= 8)				\
@@ -192,6 +222,7 @@
 			break;						\
 		default: __e = __get_user_bad(); break;			\
 		}							\
+		uaccess_restore(__ua_flags);				\
 		x = (typeof(*(p))) __r2;				\
 		__e;							\
 	})
@@ -224,6 +255,7 @@
 		register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
 		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
+		unsigned int __ua_flags = uaccess_save_and_enable();	\
 		switch (sizeof(*(__p))) {				\
 		case 1:							\
 			__put_user_x(__r2, __p, __e, __l, 1);		\
@@ -239,6 +271,7 @@
 			break;						\
 		default: __e = __put_user_bad(); break;			\
 		}							\
+		uaccess_restore(__ua_flags);				\
 		__e;							\
 	})
 
@@ -300,20 +333,23 @@
 do {									\
 	unsigned long __gu_addr = (unsigned long)(ptr);			\
 	unsigned long __gu_val;						\
+	unsigned int __ua_flags;					\
 	__chk_user_ptr(ptr);						\
 	might_fault();							\
+	__ua_flags = uaccess_save_and_enable();				\
 	switch (sizeof(*(ptr))) {					\
 	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err);	break;	\
 	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err);	break;	\
 	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err);	break;	\
 	default: (__gu_val) = __get_user_bad();				\
 	}								\
+	uaccess_restore(__ua_flags);					\
 	(x) = (__typeof__(*(ptr)))__gu_val;				\
 } while (0)
 
-#define __get_user_asm_byte(x, addr, err)			\
+#define __get_user_asm(x, addr, err, instr)			\
 	__asm__ __volatile__(					\
-	"1:	" TUSER(ldrb) "	%1,[%2],#0\n"			\
+	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
 	"2:\n"							\
 	"	.pushsection .text.fixup,\"ax\"\n"		\
 	"	.align	2\n"					\
@@ -329,6 +365,9 @@
 	: "r" (addr), "i" (-EFAULT)				\
 	: "cc")
 
+#define __get_user_asm_byte(x, addr, err)			\
+	__get_user_asm(x, addr, err, ldrb)
+
 #ifndef __ARMEB__
 #define __get_user_asm_half(x, __gu_addr, err)			\
 ({								\
@@ -348,22 +387,7 @@
 #endif
 
 #define __get_user_asm_word(x, addr, err)			\
-	__asm__ __volatile__(					\
-	"1:	" TUSER(ldr) "	%1,[%2],#0\n"			\
-	"2:\n"							\
-	"	.pushsection .text.fixup,\"ax\"\n"		\
-	"	.align	2\n"					\
-	"3:	mov	%0, %3\n"				\
-	"	mov	%1, #0\n"				\
-	"	b	2b\n"					\
-	"	.popsection\n"					\
-	"	.pushsection __ex_table,\"a\"\n"		\
-	"	.align	3\n"					\
-	"	.long	1b, 3b\n"				\
-	"	.popsection"					\
-	: "+r" (err), "=&r" (x)					\
-	: "r" (addr), "i" (-EFAULT)				\
-	: "cc")
+	__get_user_asm(x, addr, err, ldr)
 
 #define __put_user(x, ptr)						\
 ({									\
@@ -381,9 +405,11 @@
 #define __put_user_err(x, ptr, err)					\
 do {									\
 	unsigned long __pu_addr = (unsigned long)(ptr);			\
+	unsigned int __ua_flags;					\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__chk_user_ptr(ptr);						\
 	might_fault();							\
+	__ua_flags = uaccess_save_and_enable();				\
 	switch (sizeof(*(ptr))) {					\
 	case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);	break;	\
 	case 2: __put_user_asm_half(__pu_val, __pu_addr, err);	break;	\
@@ -391,11 +417,12 @@
 	case 8:	__put_user_asm_dword(__pu_val, __pu_addr, err);	break;	\
 	default: __put_user_bad();					\
 	}								\
+	uaccess_restore(__ua_flags);					\
 } while (0)
 
-#define __put_user_asm_byte(x, __pu_addr, err)			\
+#define __put_user_asm(x, __pu_addr, err, instr)		\
 	__asm__ __volatile__(					\
-	"1:	" TUSER(strb) "	%1,[%2],#0\n"			\
+	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
 	"2:\n"							\
 	"	.pushsection .text.fixup,\"ax\"\n"		\
 	"	.align	2\n"					\
@@ -410,6 +437,9 @@
 	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
 	: "cc")
 
+#define __put_user_asm_byte(x, __pu_addr, err)			\
+	__put_user_asm(x, __pu_addr, err, strb)
+
 #ifndef __ARMEB__
 #define __put_user_asm_half(x, __pu_addr, err)			\
 ({								\
@@ -427,21 +457,7 @@
 #endif
 
 #define __put_user_asm_word(x, __pu_addr, err)			\
-	__asm__ __volatile__(					\
-	"1:	" TUSER(str) "	%1,[%2],#0\n"			\
-	"2:\n"							\
-	"	.pushsection .text.fixup,\"ax\"\n"		\
-	"	.align	2\n"					\
-	"3:	mov	%0, %3\n"				\
-	"	b	2b\n"					\
-	"	.popsection\n"					\
-	"	.pushsection __ex_table,\"a\"\n"		\
-	"	.align	3\n"					\
-	"	.long	1b, 3b\n"				\
-	"	.popsection"					\
-	: "+r" (err)						\
-	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
-	: "cc")
+	__put_user_asm(x, __pu_addr, err, str)
 
 #ifndef __ARMEB__
 #define	__reg_oper0	"%R2"
@@ -474,11 +490,46 @@
 
 
 #ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_copy_from_user(to, from, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_copy_to_user(to, from, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+	unsigned int __ua_flags = uaccess_save_and_enable();
+	n = arm_clear_user(addr, n);
+	uaccess_restore(__ua_flags);
+	return n;
+}
+
 #else
 #define __copy_from_user(to, from, n)	(memcpy(to, (void __force *)from, n), 0)
 #define __copy_to_user(to, from, n)	(memcpy((void __force *)to, from, n), 0)
@@ -511,6 +562,7 @@
 	return n;
 }
 
+/* These are from lib/ code, and use __get_user() and friends */
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
 extern __must_check long strlen_user(const char __user *str);
diff --git a/arch/arm/include/debug/at91.S b/arch/arm/include/debug/at91.S
index c3c45e6..2556a88 100644
--- a/arch/arm/include/debug/at91.S
+++ b/arch/arm/include/debug/at91.S
@@ -13,9 +13,12 @@
 #define AT91_DBGU 0xfffff200 /* AT91_BASE_DBGU0 */
 #elif defined(CONFIG_AT91_DEBUG_LL_DBGU1)
 #define AT91_DBGU 0xffffee00 /* AT91_BASE_DBGU1 */
-#else
+#elif defined(CONFIG_AT91_DEBUG_LL_DBGU2)
 /* On sama5d4, use USART3 as low level serial console */
 #define AT91_DBGU 0xfc00c000 /* SAMA5D4_BASE_USART3 */
+#else
+/* On sama5d2, use UART1 as low level serial console */
+#define AT91_DBGU 0xf8020000
 #endif
 
 #ifdef CONFIG_MMU
diff --git a/arch/arm/include/debug/imx-uart.h b/arch/arm/include/debug/imx-uart.h
index 66f736f..bce58e9 100644
--- a/arch/arm/include/debug/imx-uart.h
+++ b/arch/arm/include/debug/imx-uart.h
@@ -90,6 +90,17 @@
 #define IMX6SX_UART_BASE_ADDR(n) IMX6SX_UART##n##_BASE_ADDR
 #define IMX6SX_UART_BASE(n)	IMX6SX_UART_BASE_ADDR(n)
 
+#define IMX6UL_UART1_BASE_ADDR	0x02020000
+#define IMX6UL_UART2_BASE_ADDR	0x021e8000
+#define IMX6UL_UART3_BASE_ADDR	0x021ec000
+#define IMX6UL_UART4_BASE_ADDR	0x021f0000
+#define IMX6UL_UART5_BASE_ADDR	0x021f4000
+#define IMX6UL_UART6_BASE_ADDR	0x021fc000
+#define IMX6UL_UART7_BASE_ADDR	0x02018000
+#define IMX6UL_UART8_BASE_ADDR	0x02024000
+#define IMX6UL_UART_BASE_ADDR(n) IMX6UL_UART##n##_BASE_ADDR
+#define IMX6UL_UART_BASE(n)	IMX6UL_UART_BASE_ADDR(n)
+
 #define IMX7D_UART1_BASE_ADDR	0x30860000
 #define IMX7D_UART2_BASE_ADDR	0x30890000
 #define IMX7D_UART3_BASE_ADDR	0x30880000
@@ -124,6 +135,8 @@
 #define UART_PADDR	IMX_DEBUG_UART_BASE(IMX6SL)
 #elif defined(CONFIG_DEBUG_IMX6SX_UART)
 #define UART_PADDR	IMX_DEBUG_UART_BASE(IMX6SX)
+#elif defined(CONFIG_DEBUG_IMX6UL_UART)
+#define UART_PADDR	IMX_DEBUG_UART_BASE(IMX6UL)
 #elif defined(CONFIG_DEBUG_IMX7D_UART)
 #define UART_PADDR	IMX_DEBUG_UART_BASE(IMX7D)
 
diff --git a/arch/arm/include/debug/zynq.S b/arch/arm/include/debug/zynq.S
index bd13ded..de86b92 100644
--- a/arch/arm/include/debug/zynq.S
+++ b/arch/arm/include/debug/zynq.S
@@ -38,7 +38,7 @@
 		.endm
 
 		.macro	senduart,rd,rx
-		str	\rd, [\rx, #UART_FIFO_OFFSET]	@ TXDATA
+		strb	\rd, [\rx, #UART_FIFO_OFFSET]	@ TXDATA
 		.endm
 
 		.macro	waituart,rd,rx
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index e69f7a1..af9e59b 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -71,8 +71,7 @@
 obj-$(CONFIG_CPU_PJ4B)		+= pj4-cp0.o
 obj-$(CONFIG_IWMMXT)		+= iwmmxt.o
 obj-$(CONFIG_PERF_EVENTS)	+= perf_regs.o perf_callchain.o
-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o \
-				   perf_event_xscale.o perf_event_v6.o \
+obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event_xscale.o perf_event_v6.o \
 				   perf_event_v7.o
 CFLAGS_pj4-cp0.o		:= -marm
 AFLAGS_iwmmxt.o			:= -Wa,-mcpu=iwmmxt
@@ -89,7 +88,7 @@
 
 obj-$(CONFIG_ARM_VIRT_EXT)	+= hyp-stub.o
 ifeq ($(CONFIG_ARM_PSCI),y)
-obj-y				+= psci.o psci-call.o
+obj-y				+= psci-call.o
 obj-$(CONFIG_SMP)		+= psci_smp.o
 endif
 
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 5e5a51a..f89811f 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -97,9 +97,9 @@
 #ifdef CONFIG_MMU
 EXPORT_SYMBOL(copy_page);
 
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(arm_copy_from_user);
+EXPORT_SYMBOL(arm_copy_to_user);
+EXPORT_SYMBOL(arm_clear_user);
 
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index fcbbbb1..874e182 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -18,15 +18,6 @@
 
 static int debug_pci;
 
-#ifdef CONFIG_PCI_MSI
-struct msi_controller *pcibios_msi_controller(struct pci_dev *dev)
-{
-	struct pci_sys_data *sysdata = dev->bus->sysdata;
-
-	return sysdata->msi_ctrl;
-}
-#endif
-
 /*
  * We can't use pci_get_device() here since we are
  * called from interrupt context.
@@ -459,12 +450,9 @@
 
 	for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
 		sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
-		if (!sys)
-			panic("PCI: unable to allocate sys data!");
+		if (WARN(!sys, "PCI: unable to allocate sys data!"))
+			break;
 
-#ifdef CONFIG_PCI_MSI
-		sys->msi_ctrl = hw->msi_ctrl;
-#endif
 		sys->busnr   = busnr;
 		sys->swizzle = hw->swizzle;
 		sys->map_irq = hw->map_irq;
@@ -486,11 +474,14 @@
 			if (hw->scan)
 				sys->bus = hw->scan(nr, sys);
 			else
-				sys->bus = pci_scan_root_bus(parent, sys->busnr,
-						hw->ops, sys, &sys->resources);
+				sys->bus = pci_scan_root_bus_msi(parent,
+					sys->busnr, hw->ops, sys,
+					&sys->resources, hw->msi_ctrl);
 
-			if (!sys->bus)
-				panic("PCI: unable to scan bus!");
+			if (WARN(!sys->bus, "PCI: unable to scan bus!")) {
+				kfree(sys);
+				break;
+			}
 
 			busnr = sys->bus->busn_res.end + 1;
 
@@ -521,6 +512,8 @@
 		struct pci_bus *bus = sys->bus;
 
 		if (!pci_has_flag(PCI_PROBE_ONLY)) {
+			struct pci_bus *child;
+
 			/*
 			 * Size the bridge windows.
 			 */
@@ -530,24 +523,14 @@
 			 * Assign resources.
 			 */
 			pci_bus_assign_resources(bus);
-		}
-
-		/*
-		 * Tell drivers about devices found.
-		 */
-		pci_bus_add_devices(bus);
-	}
-
-	list_for_each_entry(sys, &head, node) {
-		struct pci_bus *bus = sys->bus;
-
-		/* Configure PCI Express settings */
-		if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
-			struct pci_bus *child;
 
 			list_for_each_entry(child, &bus->children, node)
 				pcie_bus_configure_settings(child);
 		}
+		/*
+		 * Tell drivers about devices found.
+		 */
+		pci_bus_add_devices(bus);
 	}
 }
 
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index cb4fb1e..3e1c26e 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -149,10 +149,10 @@
 #define SPFIX(code...)
 #endif
 
-	.macro	svc_entry, stack_hole=0, trace=1
+	.macro	svc_entry, stack_hole=0, trace=1, uaccess=1
  UNWIND(.fnstart		)
  UNWIND(.save {r0 - pc}		)
-	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+	sub	sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
 #ifdef CONFIG_THUMB2_KERNEL
  SPFIX(	str	r0, [sp]	)	@ temporarily saved
  SPFIX(	mov	r0, sp		)
@@ -167,7 +167,7 @@
 	ldmia	r0, {r3 - r5}
 	add	r7, sp, #S_SP - 4	@ here for interlock avoidance
 	mov	r6, #-1			@  ""  ""      ""       ""
-	add	r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+	add	r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
  SPFIX(	addeq	r2, r2, #4	)
 	str	r3, [sp, #-4]!		@ save the "real" r0 copied
 					@ from the exception stack
@@ -185,6 +185,11 @@
 	@
 	stmia	r7, {r2 - r6}
 
+	uaccess_save r0
+	.if \uaccess
+	uaccess_disable r0
+	.endif
+
 	.if \trace
 #ifdef CONFIG_TRACE_IRQFLAGS
 	bl	trace_hardirqs_off
@@ -194,7 +199,7 @@
 
 	.align	5
 __dabt_svc:
-	svc_entry
+	svc_entry uaccess=0
 	mov	r2, sp
 	dabt_helper
  THUMB(	ldr	r5, [sp, #S_PSR]	)	@ potentially updated CPSR
@@ -368,7 +373,7 @@
 #error "sizeof(struct pt_regs) must be a multiple of 8"
 #endif
 
-	.macro	usr_entry, trace=1
+	.macro	usr_entry, trace=1, uaccess=1
  UNWIND(.fnstart	)
  UNWIND(.cantunwind	)	@ don't unwind the user space
 	sub	sp, sp, #S_FRAME_SIZE
@@ -400,6 +405,10 @@
  ARM(	stmdb	r0, {sp, lr}^			)
  THUMB(	store_user_sp_lr r0, r1, S_SP - S_PC	)
 
+	.if \uaccess
+	uaccess_disable ip
+	.endif
+
 	@ Enable the alignment trap while in kernel mode
  ATRAP(	teq	r8, r7)
  ATRAP( mcrne	p15, 0, r8, c1, c0, 0)
@@ -435,7 +444,7 @@
 
 	.align	5
 __dabt_usr:
-	usr_entry
+	usr_entry uaccess=0
 	kuser_cmpxchg_check
 	mov	r2, sp
 	dabt_helper
@@ -458,7 +467,7 @@
 
 	.align	5
 __und_usr:
-	usr_entry
+	usr_entry uaccess=0
 
 	mov	r2, r4
 	mov	r3, r5
@@ -484,6 +493,8 @@
 1:	ldrt	r0, [r4]
  ARM_BE8(rev	r0, r0)				@ little endian instruction
 
+	uaccess_disable ip
+
 	@ r0 = 32-bit ARM instruction which caused the exception
 	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
 	@ r4 = PC value for the faulting instruction
@@ -518,9 +529,10 @@
 2:	ldrht	r5, [r4]
 ARM_BE8(rev16	r5, r5)				@ little endian instruction
 	cmp	r5, #0xe800			@ 32bit instruction if xx != 0
-	blo	__und_usr_fault_16		@ 16bit undefined instruction
+	blo	__und_usr_fault_16_pan		@ 16bit undefined instruction
 3:	ldrht	r0, [r2]
 ARM_BE8(rev16	r0, r0)				@ little endian instruction
+	uaccess_disable ip
 	add	r2, r2, #2			@ r2 is PC + 2, make it PC + 4
 	str	r2, [sp, #S_PC]			@ it's a 2x16bit instr, update
 	orr	r0, r0, r5, lsl #16
@@ -715,6 +727,8 @@
 __und_usr_fault_32:
 	mov	r1, #4
 	b	1f
+__und_usr_fault_16_pan:
+	uaccess_disable ip
 __und_usr_fault_16:
 	mov	r1, #2
 1:	mov	r0, sp
@@ -770,6 +784,8 @@
 	ldr	r4, [r2, #TI_TP_VALUE]
 	ldr	r5, [r2, #TI_TP_VALUE + 4]
 #ifdef CONFIG_CPU_USE_DOMAINS
+	mrc	p15, 0, r6, c3, c0, 0		@ Get domain register
+	str	r6, [r1, #TI_CPU_DOMAIN]	@ Save old domain register
 	ldr	r6, [r2, #TI_CPU_DOMAIN]
 #endif
 	switch_tls r1, r4, r5, r3, r7
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index b48dd4f..30a7228 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -24,35 +24,55 @@
 
 
 	.align	5
+#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
 /*
- * This is the fast syscall return path.  We do as little as
- * possible here, and this includes saving r0 back into the SVC
- * stack.
+ * This is the fast syscall return path.  We do as little as possible here,
+ * such as avoiding writing r0 to the stack.  We only use this path if we
+ * have tracing and context tracking disabled - the overheads from those
+ * features make this path too inefficient.
  */
 ret_fast_syscall:
  UNWIND(.fnstart	)
  UNWIND(.cantunwind	)
-	disable_irq				@ disable interrupts
+	disable_irq_notrace			@ disable interrupts
 	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
-	tst	r1, #_TIF_SYSCALL_WORK
-	bne	__sys_trace_return
-	tst	r1, #_TIF_WORK_MASK
+	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
 	bne	fast_work_pending
-	asm_trace_hardirqs_on
 
 	/* perform architecture specific actions before user return */
 	arch_ret_to_user r1, lr
-	ct_user_enter
 
 	restore_user_regs fast = 1, offset = S_OFF
  UNWIND(.fnend		)
+ENDPROC(ret_fast_syscall)
 
-/*
- * Ok, we need to do extra processing, enter the slow path.
- */
+	/* Ok, we need to do extra processing, enter the slow path. */
 fast_work_pending:
 	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
-work_pending:
+	/* fall through to work_pending */
+#else
+/*
+ * The "replacement" ret_fast_syscall for when tracing or context tracking
+ * is enabled.  As we will need to call out to some C functions, we save
+ * r0 first to avoid needing to save registers around each C function call.
+ */
+ret_fast_syscall:
+ UNWIND(.fnstart	)
+ UNWIND(.cantunwind	)
+	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
+	disable_irq_notrace			@ disable interrupts
+	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
+	tst	r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+	beq	no_work_pending
+ UNWIND(.fnend		)
+ENDPROC(ret_fast_syscall)
+
+	/* Slower path - fall through to work_pending */
+#endif
+
+	tst	r1, #_TIF_SYSCALL_WORK
+	bne	__sys_trace_return_nosave
+slow_work_pending:
 	mov	r0, sp				@ 'regs'
 	mov	r2, why				@ 'syscall'
 	bl	do_work_pending
@@ -65,16 +85,19 @@
 
 /*
  * "slow" syscall return path.  "why" tells us if this was a real syscall.
+ * IRQs may be enabled here, so always disable them.  Note that we use the
+ * "notrace" version to avoid calling into the tracing code unnecessarily.
+ * do_work_pending() will update this state if necessary.
  */
 ENTRY(ret_to_user)
 ret_slow_syscall:
-	disable_irq				@ disable interrupts
+	disable_irq_notrace			@ disable interrupts
 ENTRY(ret_to_user_from_irq)
 	ldr	r1, [tsk, #TI_FLAGS]
 	tst	r1, #_TIF_WORK_MASK
-	bne	work_pending
+	bne	slow_work_pending
 no_work_pending:
-	asm_trace_hardirqs_on
+	asm_trace_hardirqs_on save = 0
 
 	/* perform architecture specific actions before user return */
 	arch_ret_to_user r1, lr
@@ -174,6 +197,8 @@
  USER(	ldr	scno, [lr, #-4]		)	@ get SWI instruction
 #endif
 
+	uaccess_disable tbl
+
 	adr	tbl, sys_call_table		@ load syscall table pointer
 
 #if defined(CONFIG_OABI_COMPAT)
@@ -252,6 +277,12 @@
 	bl	syscall_trace_exit
 	b	ret_slow_syscall
 
+__sys_trace_return_nosave:
+	enable_irq_notrace
+	mov	r0, sp
+	bl	syscall_trace_exit
+	b	ret_slow_syscall
+
 	.align	5
 #ifdef CONFIG_ALIGNMENT_TRAP
 	.type	__cr_alignment, #object
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 1a0045a..0d22ad2 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -196,7 +196,7 @@
 	msr	cpsr_c, \rtemp			@ switch back to the SVC mode
 	.endm
 
-#ifndef CONFIG_THUMB2_KERNEL
+
 	.macro	svc_exit, rpsr, irq = 0
 	.if	\irq != 0
 	@ IRQs already off
@@ -215,6 +215,10 @@
 	blne	trace_hardirqs_off
 #endif
 	.endif
+	uaccess_restore
+
+#ifndef CONFIG_THUMB2_KERNEL
+	@ ARM mode SVC restore
 	msr	spsr_cxsf, \rpsr
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
 	@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -222,6 +226,20 @@
 	strex	r1, r2, [r0]			@ clear the exclusive monitor
 #endif
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
+#else
+	@ Thumb mode SVC restore
+	ldr	lr, [sp, #S_SP]			@ top of the stack
+	ldrd	r0, r1, [sp, #S_LR]		@ calling lr and pc
+
+	@ We must avoid clrex due to Cortex-A15 erratum #830321
+	strex	r2, r1, [sp, #S_LR]		@ clear the exclusive monitor
+
+	stmdb	lr!, {r0, r1, \rpsr}		@ calling lr and rfe context
+	ldmia	sp, {r0 - r12}
+	mov	sp, lr
+	ldr	lr, [sp], #4
+	rfeia	sp!
+#endif
 	.endm
 
 	@
@@ -241,6 +259,9 @@
 	@ on the stack remains correct).
 	@
 	.macro  svc_exit_via_fiq
+	uaccess_restore
+#ifndef CONFIG_THUMB2_KERNEL
+	@ ARM mode restore
 	mov	r0, sp
 	ldmib	r0, {r1 - r14}	@ abort is deadly from here onward (it will
 				@ clobber state restored below)
@@ -250,9 +271,27 @@
 	msr	spsr_cxsf, r9
 	ldr	r0, [r0, #S_R0]
 	ldmia	r8, {pc}^
+#else
+	@ Thumb mode restore
+	add	r0, sp, #S_R2
+	ldr	lr, [sp, #S_LR]
+	ldr	sp, [sp, #S_SP] @ abort is deadly from here onward (it will
+			        @ clobber state restored below)
+	ldmia	r0, {r2 - r12}
+	mov	r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
+	msr	cpsr_c, r1
+	sub	r0, #S_R2
+	add	r8, r0, #S_PC
+	ldmia	r0, {r0 - r1}
+	rfeia	r8
+#endif
 	.endm
 
+
 	.macro	restore_user_regs, fast = 0, offset = 0
+	uaccess_enable r1, isb=0
+#ifndef CONFIG_THUMB2_KERNEL
+	@ ARM mode restore
 	mov	r2, sp
 	ldr	r1, [r2, #\offset + S_PSR]	@ get calling cpsr
 	ldr	lr, [r2, #\offset + S_PC]!	@ get pc
@@ -270,72 +309,16 @@
 						@ after ldm {}^
 	add	sp, sp, #\offset + S_FRAME_SIZE
 	movs	pc, lr				@ return & move spsr_svc into cpsr
-	.endm
-
-#else	/* CONFIG_THUMB2_KERNEL */
-	.macro	svc_exit, rpsr, irq = 0
-	.if	\irq != 0
-	@ IRQs already off
-#ifdef CONFIG_TRACE_IRQFLAGS
-	@ The parent context IRQs must have been enabled to get here in
-	@ the first place, so there's no point checking the PSR I bit.
-	bl	trace_hardirqs_on
-#endif
-	.else
-	@ IRQs off again before pulling preserved data off the stack
-	disable_irq_notrace
-#ifdef CONFIG_TRACE_IRQFLAGS
-	tst	\rpsr, #PSR_I_BIT
-	bleq	trace_hardirqs_on
-	tst	\rpsr, #PSR_I_BIT
-	blne	trace_hardirqs_off
-#endif
-	.endif
-	ldr	lr, [sp, #S_SP]			@ top of the stack
-	ldrd	r0, r1, [sp, #S_LR]		@ calling lr and pc
-
-	@ We must avoid clrex due to Cortex-A15 erratum #830321
-	strex	r2, r1, [sp, #S_LR]		@ clear the exclusive monitor
-
-	stmdb	lr!, {r0, r1, \rpsr}		@ calling lr and rfe context
-	ldmia	sp, {r0 - r12}
-	mov	sp, lr
-	ldr	lr, [sp], #4
-	rfeia	sp!
-	.endm
-
-	@
-	@ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
-	@
-	@ For full details see non-Thumb implementation above.
-	@
-	.macro  svc_exit_via_fiq
-	add	r0, sp, #S_R2
-	ldr	lr, [sp, #S_LR]
-	ldr	sp, [sp, #S_SP] @ abort is deadly from here onward (it will
-			        @ clobber state restored below)
-	ldmia	r0, {r2 - r12}
-	mov	r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
-	msr	cpsr_c, r1
-	sub	r0, #S_R2
-	add	r8, r0, #S_PC
-	ldmia	r0, {r0 - r1}
-	rfeia	r8
-	.endm
-
-#ifdef CONFIG_CPU_V7M
-	/*
-	 * Note we don't need to do clrex here as clearing the local monitor is
-	 * part of each exception entry and exit sequence.
-	 */
-	.macro	restore_user_regs, fast = 0, offset = 0
+#elif defined(CONFIG_CPU_V7M)
+	@ V7M restore.
+	@ Note that we don't need to do clrex here as clearing the local
+	@ monitor is part of the exception entry and exit sequence.
 	.if	\offset
 	add	sp, #\offset
 	.endif
 	v7m_exception_slow_exit ret_r0 = \fast
-	.endm
-#else	/* ifdef CONFIG_CPU_V7M */
-	.macro	restore_user_regs, fast = 0, offset = 0
+#else
+	@ Thumb mode restore
 	mov	r2, sp
 	load_user_sp_lr r2, r3, \offset + S_SP	@ calling sp, lr
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
@@ -353,9 +336,8 @@
 	.endif
 	add	sp, sp, #S_FRAME_SIZE - S_SP
 	movs	pc, lr				@ return & move spsr_svc into cpsr
-	.endm
-#endif	/* ifdef CONFIG_CPU_V7M / else */
 #endif	/* !CONFIG_THUMB2_KERNEL */
+	.endm
 
 /*
  * Context tracking subsystem.  Used to instrument transitions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 29e2991..04286fd 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -464,10 +464,7 @@
 #ifdef CONFIG_ARM_LPAE
 	mcrr	p15, 0, r4, r5, c2		@ load TTBR0
 #else
-	mov	r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
-		      domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+	mov	r5, #DACR_INIT
 	mcr	p15, 0, r5, c3, c0, 0		@ load domain access register
 	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
 #endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 350f188..5ff4826 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -39,6 +39,7 @@
 #include <linux/export.h>
 
 #include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
 #include <asm/exception.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/irq.h>
@@ -140,7 +141,7 @@
 static bool migrate_one_irq(struct irq_desc *desc)
 {
 	struct irq_data *d = irq_desc_get_irq_data(desc);
-	const struct cpumask *affinity = d->affinity;
+	const struct cpumask *affinity = irq_data_get_affinity_mask(d);
 	struct irq_chip *c;
 	bool ret = false;
 
@@ -160,7 +161,7 @@
 	if (!c->irq_set_affinity)
 		pr_debug("IRQ%u: unable to set affinity\n", d->irq);
 	else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
-		cpumask_copy(d->affinity, affinity);
+		cpumask_copy(irq_data_get_affinity_mask(d), affinity);
 
 	return ret;
 }
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c
index e39cbf4..845a5dd 100644
--- a/arch/arm/kernel/jump_label.c
+++ b/arch/arm/kernel/jump_label.c
@@ -12,7 +12,7 @@
 	void *addr = (void *)entry->code;
 	unsigned int insn;
 
-	if (type == JUMP_LABEL_ENABLE)
+	if (type == JUMP_LABEL_JMP)
 		insn = arm_gen_branch(entry->code, entry->target);
 	else
 		insn = arm_gen_nop();
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
deleted file mode 100644
index 54272e0..0000000
--- a/arch/arm/kernel/perf_event.c
+++ /dev/null
@@ -1,896 +0,0 @@
-#undef DEBUG
-
-/*
- * ARM performance counter support.
- *
- * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
- * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
- *
- * This code is based on the sparc64 perf event code, which is in turn based
- * on the x86 code.
- */
-#define pr_fmt(fmt) "hw perfevents: " fmt
-
-#include <linux/bitmap.h>
-#include <linux/cpumask.h>
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/irq.h>
-#include <linux/irqdesc.h>
-
-#include <asm/cputype.h>
-#include <asm/irq_regs.h>
-#include <asm/pmu.h>
-
-static int
-armpmu_map_cache_event(const unsigned (*cache_map)
-				      [PERF_COUNT_HW_CACHE_MAX]
-				      [PERF_COUNT_HW_CACHE_OP_MAX]
-				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
-		       u64 config)
-{
-	unsigned int cache_type, cache_op, cache_result, ret;
-
-	cache_type = (config >>  0) & 0xff;
-	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
-		return -EINVAL;
-
-	cache_op = (config >>  8) & 0xff;
-	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
-		return -EINVAL;
-
-	cache_result = (config >> 16) & 0xff;
-	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
-		return -EINVAL;
-
-	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
-
-	if (ret == CACHE_OP_UNSUPPORTED)
-		return -ENOENT;
-
-	return ret;
-}
-
-static int
-armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
-{
-	int mapping;
-
-	if (config >= PERF_COUNT_HW_MAX)
-		return -EINVAL;
-
-	mapping = (*event_map)[config];
-	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
-}
-
-static int
-armpmu_map_raw_event(u32 raw_event_mask, u64 config)
-{
-	return (int)(config & raw_event_mask);
-}
-
-int
-armpmu_map_event(struct perf_event *event,
-		 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
-		 const unsigned (*cache_map)
-				[PERF_COUNT_HW_CACHE_MAX]
-				[PERF_COUNT_HW_CACHE_OP_MAX]
-				[PERF_COUNT_HW_CACHE_RESULT_MAX],
-		 u32 raw_event_mask)
-{
-	u64 config = event->attr.config;
-	int type = event->attr.type;
-
-	if (type == event->pmu->type)
-		return armpmu_map_raw_event(raw_event_mask, config);
-
-	switch (type) {
-	case PERF_TYPE_HARDWARE:
-		return armpmu_map_hw_event(event_map, config);
-	case PERF_TYPE_HW_CACHE:
-		return armpmu_map_cache_event(cache_map, config);
-	case PERF_TYPE_RAW:
-		return armpmu_map_raw_event(raw_event_mask, config);
-	}
-
-	return -ENOENT;
-}
-
-int armpmu_event_set_period(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-	s64 left = local64_read(&hwc->period_left);
-	s64 period = hwc->sample_period;
-	int ret = 0;
-
-	if (unlikely(left <= -period)) {
-		left = period;
-		local64_set(&hwc->period_left, left);
-		hwc->last_period = period;
-		ret = 1;
-	}
-
-	if (unlikely(left <= 0)) {
-		left += period;
-		local64_set(&hwc->period_left, left);
-		hwc->last_period = period;
-		ret = 1;
-	}
-
-	/*
-	 * Limit the maximum period to prevent the counter value
-	 * from overtaking the one we are about to program. In
-	 * effect we are reducing max_period to account for
-	 * interrupt latency (and we are being very conservative).
-	 */
-	if (left > (armpmu->max_period >> 1))
-		left = armpmu->max_period >> 1;
-
-	local64_set(&hwc->prev_count, (u64)-left);
-
-	armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
-
-	perf_event_update_userpage(event);
-
-	return ret;
-}
-
-u64 armpmu_event_update(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-	u64 delta, prev_raw_count, new_raw_count;
-
-again:
-	prev_raw_count = local64_read(&hwc->prev_count);
-	new_raw_count = armpmu->read_counter(event);
-
-	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
-			     new_raw_count) != prev_raw_count)
-		goto again;
-
-	delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
-
-	local64_add(delta, &event->count);
-	local64_sub(delta, &hwc->period_left);
-
-	return new_raw_count;
-}
-
-static void
-armpmu_read(struct perf_event *event)
-{
-	armpmu_event_update(event);
-}
-
-static void
-armpmu_stop(struct perf_event *event, int flags)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-
-	/*
-	 * ARM pmu always has to update the counter, so ignore
-	 * PERF_EF_UPDATE, see comments in armpmu_start().
-	 */
-	if (!(hwc->state & PERF_HES_STOPPED)) {
-		armpmu->disable(event);
-		armpmu_event_update(event);
-		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
-	}
-}
-
-static void armpmu_start(struct perf_event *event, int flags)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-
-	/*
-	 * ARM pmu always has to reprogram the period, so ignore
-	 * PERF_EF_RELOAD, see the comment below.
-	 */
-	if (flags & PERF_EF_RELOAD)
-		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
-
-	hwc->state = 0;
-	/*
-	 * Set the period again. Some counters can't be stopped, so when we
-	 * were stopped we simply disabled the IRQ source and the counter
-	 * may have been left counting. If we don't do this step then we may
-	 * get an interrupt too soon or *way* too late if the overflow has
-	 * happened since disabling.
-	 */
-	armpmu_event_set_period(event);
-	armpmu->enable(event);
-}
-
-static void
-armpmu_del(struct perf_event *event, int flags)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
-	struct hw_perf_event *hwc = &event->hw;
-	int idx = hwc->idx;
-
-	armpmu_stop(event, PERF_EF_UPDATE);
-	hw_events->events[idx] = NULL;
-	clear_bit(idx, hw_events->used_mask);
-	if (armpmu->clear_event_idx)
-		armpmu->clear_event_idx(hw_events, event);
-
-	perf_event_update_userpage(event);
-}
-
-static int
-armpmu_add(struct perf_event *event, int flags)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
-	struct hw_perf_event *hwc = &event->hw;
-	int idx;
-	int err = 0;
-
-	/* An event following a process won't be stopped earlier */
-	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
-		return -ENOENT;
-
-	perf_pmu_disable(event->pmu);
-
-	/* If we don't have a space for the counter then finish early. */
-	idx = armpmu->get_event_idx(hw_events, event);
-	if (idx < 0) {
-		err = idx;
-		goto out;
-	}
-
-	/*
-	 * If there is an event in the counter we are going to use then make
-	 * sure it is disabled.
-	 */
-	event->hw.idx = idx;
-	armpmu->disable(event);
-	hw_events->events[idx] = event;
-
-	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
-	if (flags & PERF_EF_START)
-		armpmu_start(event, PERF_EF_RELOAD);
-
-	/* Propagate our changes to the userspace mapping. */
-	perf_event_update_userpage(event);
-
-out:
-	perf_pmu_enable(event->pmu);
-	return err;
-}
-
-static int
-validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
-			       struct perf_event *event)
-{
-	struct arm_pmu *armpmu;
-
-	if (is_software_event(event))
-		return 1;
-
-	/*
-	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
-	 * core perf code won't check that the pmu->ctx == leader->ctx
-	 * until after pmu->event_init(event).
-	 */
-	if (event->pmu != pmu)
-		return 0;
-
-	if (event->state < PERF_EVENT_STATE_OFF)
-		return 1;
-
-	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
-		return 1;
-
-	armpmu = to_arm_pmu(event->pmu);
-	return armpmu->get_event_idx(hw_events, event) >= 0;
-}
-
-static int
-validate_group(struct perf_event *event)
-{
-	struct perf_event *sibling, *leader = event->group_leader;
-	struct pmu_hw_events fake_pmu;
-
-	/*
-	 * Initialise the fake PMU. We only need to populate the
-	 * used_mask for the purposes of validation.
-	 */
-	memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
-
-	if (!validate_event(event->pmu, &fake_pmu, leader))
-		return -EINVAL;
-
-	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
-		if (!validate_event(event->pmu, &fake_pmu, sibling))
-			return -EINVAL;
-	}
-
-	if (!validate_event(event->pmu, &fake_pmu, event))
-		return -EINVAL;
-
-	return 0;
-}
-
-static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
-{
-	struct arm_pmu *armpmu;
-	struct platform_device *plat_device;
-	struct arm_pmu_platdata *plat;
-	int ret;
-	u64 start_clock, finish_clock;
-
-	/*
-	 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
-	 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
-	 * do any necessary shifting, we just need to perform the first
-	 * dereference.
-	 */
-	armpmu = *(void **)dev;
-	plat_device = armpmu->plat_device;
-	plat = dev_get_platdata(&plat_device->dev);
-
-	start_clock = sched_clock();
-	if (plat && plat->handle_irq)
-		ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
-	else
-		ret = armpmu->handle_irq(irq, armpmu);
-	finish_clock = sched_clock();
-
-	perf_sample_event_took(finish_clock - start_clock);
-	return ret;
-}
-
-static void
-armpmu_release_hardware(struct arm_pmu *armpmu)
-{
-	armpmu->free_irq(armpmu);
-}
-
-static int
-armpmu_reserve_hardware(struct arm_pmu *armpmu)
-{
-	int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
-	if (err) {
-		armpmu_release_hardware(armpmu);
-		return err;
-	}
-
-	return 0;
-}
-
-static void
-hw_perf_event_destroy(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	atomic_t *active_events	 = &armpmu->active_events;
-	struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
-
-	if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
-		armpmu_release_hardware(armpmu);
-		mutex_unlock(pmu_reserve_mutex);
-	}
-}
-
-static int
-event_requires_mode_exclusion(struct perf_event_attr *attr)
-{
-	return attr->exclude_idle || attr->exclude_user ||
-	       attr->exclude_kernel || attr->exclude_hv;
-}
-
-static int
-__hw_perf_event_init(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	struct hw_perf_event *hwc = &event->hw;
-	int mapping;
-
-	mapping = armpmu->map_event(event);
-
-	if (mapping < 0) {
-		pr_debug("event %x:%llx not supported\n", event->attr.type,
-			 event->attr.config);
-		return mapping;
-	}
-
-	/*
-	 * We don't assign an index until we actually place the event onto
-	 * hardware. Use -1 to signify that we haven't decided where to put it
-	 * yet. For SMP systems, each core has it's own PMU so we can't do any
-	 * clever allocation or constraints checking at this point.
-	 */
-	hwc->idx		= -1;
-	hwc->config_base	= 0;
-	hwc->config		= 0;
-	hwc->event_base		= 0;
-
-	/*
-	 * Check whether we need to exclude the counter from certain modes.
-	 */
-	if ((!armpmu->set_event_filter ||
-	     armpmu->set_event_filter(hwc, &event->attr)) &&
-	     event_requires_mode_exclusion(&event->attr)) {
-		pr_debug("ARM performance counters do not support "
-			 "mode exclusion\n");
-		return -EOPNOTSUPP;
-	}
-
-	/*
-	 * Store the event encoding into the config_base field.
-	 */
-	hwc->config_base	    |= (unsigned long)mapping;
-
-	if (!is_sampling_event(event)) {
-		/*
-		 * For non-sampling runs, limit the sample_period to half
-		 * of the counter width. That way, the new counter value
-		 * is far less likely to overtake the previous one unless
-		 * you have some serious IRQ latency issues.
-		 */
-		hwc->sample_period  = armpmu->max_period >> 1;
-		hwc->last_period    = hwc->sample_period;
-		local64_set(&hwc->period_left, hwc->sample_period);
-	}
-
-	if (event->group_leader != event) {
-		if (validate_group(event) != 0)
-			return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int armpmu_event_init(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	int err = 0;
-	atomic_t *active_events = &armpmu->active_events;
-
-	/*
-	 * Reject CPU-affine events for CPUs that are of a different class to
-	 * that which this PMU handles. Process-following events (where
-	 * event->cpu == -1) can be migrated between CPUs, and thus we have to
-	 * reject them later (in armpmu_add) if they're scheduled on a
-	 * different class of CPU.
-	 */
-	if (event->cpu != -1 &&
-		!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
-		return -ENOENT;
-
-	/* does not support taken branch sampling */
-	if (has_branch_stack(event))
-		return -EOPNOTSUPP;
-
-	if (armpmu->map_event(event) == -ENOENT)
-		return -ENOENT;
-
-	event->destroy = hw_perf_event_destroy;
-
-	if (!atomic_inc_not_zero(active_events)) {
-		mutex_lock(&armpmu->reserve_mutex);
-		if (atomic_read(active_events) == 0)
-			err = armpmu_reserve_hardware(armpmu);
-
-		if (!err)
-			atomic_inc(active_events);
-		mutex_unlock(&armpmu->reserve_mutex);
-	}
-
-	if (err)
-		return err;
-
-	err = __hw_perf_event_init(event);
-	if (err)
-		hw_perf_event_destroy(event);
-
-	return err;
-}
-
-static void armpmu_enable(struct pmu *pmu)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(pmu);
-	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
-	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
-
-	/* For task-bound events we may be called on other CPUs */
-	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
-		return;
-
-	if (enabled)
-		armpmu->start(armpmu);
-}
-
-static void armpmu_disable(struct pmu *pmu)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(pmu);
-
-	/* For task-bound events we may be called on other CPUs */
-	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
-		return;
-
-	armpmu->stop(armpmu);
-}
-
-/*
- * In heterogeneous systems, events are specific to a particular
- * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
- * the same microarchitecture.
- */
-static int armpmu_filter_match(struct perf_event *event)
-{
-	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-	unsigned int cpu = smp_processor_id();
-	return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
-}
-
-static void armpmu_init(struct arm_pmu *armpmu)
-{
-	atomic_set(&armpmu->active_events, 0);
-	mutex_init(&armpmu->reserve_mutex);
-
-	armpmu->pmu = (struct pmu) {
-		.pmu_enable	= armpmu_enable,
-		.pmu_disable	= armpmu_disable,
-		.event_init	= armpmu_event_init,
-		.add		= armpmu_add,
-		.del		= armpmu_del,
-		.start		= armpmu_start,
-		.stop		= armpmu_stop,
-		.read		= armpmu_read,
-		.filter_match	= armpmu_filter_match,
-	};
-}
-
-int armpmu_register(struct arm_pmu *armpmu, int type)
-{
-	armpmu_init(armpmu);
-	pr_info("enabled with %s PMU driver, %d counters available\n",
-			armpmu->name, armpmu->num_events);
-	return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
-}
-
-/* Set at runtime when we know what CPU type we are. */
-static struct arm_pmu *__oprofile_cpu_pmu;
-
-/*
- * Despite the names, these two functions are CPU-specific and are used
- * by the OProfile/perf code.
- */
-const char *perf_pmu_name(void)
-{
-	if (!__oprofile_cpu_pmu)
-		return NULL;
-
-	return __oprofile_cpu_pmu->name;
-}
-EXPORT_SYMBOL_GPL(perf_pmu_name);
-
-int perf_num_counters(void)
-{
-	int max_events = 0;
-
-	if (__oprofile_cpu_pmu != NULL)
-		max_events = __oprofile_cpu_pmu->num_events;
-
-	return max_events;
-}
-EXPORT_SYMBOL_GPL(perf_num_counters);
-
-static void cpu_pmu_enable_percpu_irq(void *data)
-{
-	int irq = *(int *)data;
-
-	enable_percpu_irq(irq, IRQ_TYPE_NONE);
-}
-
-static void cpu_pmu_disable_percpu_irq(void *data)
-{
-	int irq = *(int *)data;
-
-	disable_percpu_irq(irq);
-}
-
-static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
-{
-	int i, irq, irqs;
-	struct platform_device *pmu_device = cpu_pmu->plat_device;
-	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
-
-	irqs = min(pmu_device->num_resources, num_possible_cpus());
-
-	irq = platform_get_irq(pmu_device, 0);
-	if (irq >= 0 && irq_is_percpu(irq)) {
-		on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
-		free_percpu_irq(irq, &hw_events->percpu_pmu);
-	} else {
-		for (i = 0; i < irqs; ++i) {
-			int cpu = i;
-
-			if (cpu_pmu->irq_affinity)
-				cpu = cpu_pmu->irq_affinity[i];
-
-			if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
-				continue;
-			irq = platform_get_irq(pmu_device, i);
-			if (irq >= 0)
-				free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
-		}
-	}
-}
-
-static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
-{
-	int i, err, irq, irqs;
-	struct platform_device *pmu_device = cpu_pmu->plat_device;
-	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
-
-	if (!pmu_device)
-		return -ENODEV;
-
-	irqs = min(pmu_device->num_resources, num_possible_cpus());
-	if (irqs < 1) {
-		pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
-		return 0;
-	}
-
-	irq = platform_get_irq(pmu_device, 0);
-	if (irq >= 0 && irq_is_percpu(irq)) {
-		err = request_percpu_irq(irq, handler, "arm-pmu",
-					 &hw_events->percpu_pmu);
-		if (err) {
-			pr_err("unable to request IRQ%d for ARM PMU counters\n",
-				irq);
-			return err;
-		}
-		on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
-	} else {
-		for (i = 0; i < irqs; ++i) {
-			int cpu = i;
-
-			err = 0;
-			irq = platform_get_irq(pmu_device, i);
-			if (irq < 0)
-				continue;
-
-			if (cpu_pmu->irq_affinity)
-				cpu = cpu_pmu->irq_affinity[i];
-
-			/*
-			 * If we have a single PMU interrupt that we can't shift,
-			 * assume that we're running on a uniprocessor machine and
-			 * continue. Otherwise, continue without this interrupt.
-			 */
-			if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
-				pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
-					irq, cpu);
-				continue;
-			}
-
-			err = request_irq(irq, handler,
-					  IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
-					  per_cpu_ptr(&hw_events->percpu_pmu, cpu));
-			if (err) {
-				pr_err("unable to request IRQ%d for ARM PMU counters\n",
-					irq);
-				return err;
-			}
-
-			cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
-		}
-	}
-
-	return 0;
-}
-
-/*
- * PMU hardware loses all context when a CPU goes offline.
- * When a CPU is hotplugged back in, since some hardware registers are
- * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
- * junk values out of them.
- */
-static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
-			  void *hcpu)
-{
-	int cpu = (unsigned long)hcpu;
-	struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
-
-	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
-		return NOTIFY_DONE;
-
-	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
-		return NOTIFY_DONE;
-
-	if (pmu->reset)
-		pmu->reset(pmu);
-	else
-		return NOTIFY_DONE;
-
-	return NOTIFY_OK;
-}
-
-static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
-{
-	int err;
-	int cpu;
-	struct pmu_hw_events __percpu *cpu_hw_events;
-
-	cpu_hw_events = alloc_percpu(struct pmu_hw_events);
-	if (!cpu_hw_events)
-		return -ENOMEM;
-
-	cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
-	err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
-	if (err)
-		goto out_hw_events;
-
-	for_each_possible_cpu(cpu) {
-		struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
-		raw_spin_lock_init(&events->pmu_lock);
-		events->percpu_pmu = cpu_pmu;
-	}
-
-	cpu_pmu->hw_events	= cpu_hw_events;
-	cpu_pmu->request_irq	= cpu_pmu_request_irq;
-	cpu_pmu->free_irq	= cpu_pmu_free_irq;
-
-	/* Ensure the PMU has sane values out of reset. */
-	if (cpu_pmu->reset)
-		on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
-			 cpu_pmu, 1);
-
-	/* If no interrupts available, set the corresponding capability flag */
-	if (!platform_get_irq(cpu_pmu->plat_device, 0))
-		cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
-
-	return 0;
-
-out_hw_events:
-	free_percpu(cpu_hw_events);
-	return err;
-}
-
-static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
-{
-	unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
-	free_percpu(cpu_pmu->hw_events);
-}
-
-/*
- * CPU PMU identification and probing.
- */
-static int probe_current_pmu(struct arm_pmu *pmu,
-			     const struct pmu_probe_info *info)
-{
-	int cpu = get_cpu();
-	unsigned int cpuid = read_cpuid_id();
-	int ret = -ENODEV;
-
-	pr_info("probing PMU on CPU %d\n", cpu);
-
-	for (; info->init != NULL; info++) {
-		if ((cpuid & info->mask) != info->cpuid)
-			continue;
-		ret = info->init(pmu);
-		break;
-	}
-
-	put_cpu();
-	return ret;
-}
-
-static int of_pmu_irq_cfg(struct arm_pmu *pmu)
-{
-	int i, irq, *irqs;
-	struct platform_device *pdev = pmu->plat_device;
-
-	/* Don't bother with PPIs; they're already affine */
-	irq = platform_get_irq(pdev, 0);
-	if (irq >= 0 && irq_is_percpu(irq))
-		return 0;
-
-	irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
-	if (!irqs)
-		return -ENOMEM;
-
-	for (i = 0; i < pdev->num_resources; ++i) {
-		struct device_node *dn;
-		int cpu;
-
-		dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
-				      i);
-		if (!dn) {
-			pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
-				of_node_full_name(pdev->dev.of_node), i);
-			break;
-		}
-
-		for_each_possible_cpu(cpu)
-			if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
-				break;
-
-		if (cpu >= nr_cpu_ids) {
-			pr_warn("Failed to find logical CPU for %s\n",
-				dn->name);
-			of_node_put(dn);
-			break;
-		}
-		of_node_put(dn);
-
-		irqs[i] = cpu;
-		cpumask_set_cpu(cpu, &pmu->supported_cpus);
-	}
-
-	if (i == pdev->num_resources) {
-		pmu->irq_affinity = irqs;
-	} else {
-		kfree(irqs);
-		cpumask_setall(&pmu->supported_cpus);
-	}
-
-	return 0;
-}
-
-int arm_pmu_device_probe(struct platform_device *pdev,
-			 const struct of_device_id *of_table,
-			 const struct pmu_probe_info *probe_table)
-{
-	const struct of_device_id *of_id;
-	const int (*init_fn)(struct arm_pmu *);
-	struct device_node *node = pdev->dev.of_node;
-	struct arm_pmu *pmu;
-	int ret = -ENODEV;
-
-	pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
-	if (!pmu) {
-		pr_info("failed to allocate PMU device!\n");
-		return -ENOMEM;
-	}
-
-	if (!__oprofile_cpu_pmu)
-		__oprofile_cpu_pmu = pmu;
-
-	pmu->plat_device = pdev;
-
-	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
-		init_fn = of_id->data;
-
-		ret = of_pmu_irq_cfg(pmu);
-		if (!ret)
-			ret = init_fn(pmu);
-	} else {
-		ret = probe_current_pmu(pmu, probe_table);
-		cpumask_setall(&pmu->supported_cpus);
-	}
-
-	if (ret) {
-		pr_info("failed to probe PMU!\n");
-		goto out_free;
-	}
-
-	ret = cpu_pmu_init(pmu);
-	if (ret)
-		goto out_free;
-
-	ret = armpmu_register(pmu, -1);
-	if (ret)
-		goto out_destroy;
-
-	return 0;
-
-out_destroy:
-	cpu_pmu_destroy(pmu);
-out_free:
-	pr_info("failed to register PMU devices!\n");
-	kfree(pmu);
-	return ret;
-}
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 09f83e4..09413e7 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -34,9 +34,9 @@
 
 #include <asm/cputype.h>
 #include <asm/irq_regs.h>
-#include <asm/pmu.h>
 
 #include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 
 enum armv6_perf_types {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index f9b37f8..126dc67 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -21,11 +21,11 @@
 #include <asm/cp15.h>
 #include <asm/cputype.h>
 #include <asm/irq_regs.h>
-#include <asm/pmu.h>
 #include <asm/vfp.h>
 #include "../vfp/vfpinstr.h"
 
 #include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 
 /*
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 304d056..aa0499e 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -16,9 +16,9 @@
 
 #include <asm/cputype.h>
 #include <asm/irq_regs.h>
-#include <asm/pmu.h>
 
 #include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/platform_device.h>
 
 enum xscale_perf_types {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f192a2a..a3089ba 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -91,13 +91,6 @@
 	ledtrig_cpu(CPU_LED_IDLE_END);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
-{
-	cpu_die();
-}
-#endif
-
 void __show_regs(struct pt_regs *regs)
 {
 	unsigned long flags;
@@ -129,12 +122,36 @@
 	buf[4] = '\0';
 
 #ifndef CONFIG_CPU_V7M
-	printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
-		buf, interrupts_enabled(regs) ? "n" : "ff",
-		fast_interrupts_enabled(regs) ? "n" : "ff",
-		processor_modes[processor_mode(regs)],
-		isa_modes[isa_mode(regs)],
-		get_fs() == get_ds() ? "kernel" : "user");
+	{
+		unsigned int domain = get_domain();
+		const char *segment;
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+		/*
+		 * Get the domain register for the parent context. In user
+		 * mode, we don't save the DACR, so lets use what it should
+		 * be. For other modes, we place it after the pt_regs struct.
+		 */
+		if (user_mode(regs))
+			domain = DACR_UACCESS_ENABLE;
+		else
+			domain = *(unsigned int *)(regs + 1);
+#endif
+
+		if ((domain & domain_mask(DOMAIN_USER)) ==
+		    domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+			segment = "none";
+		else if (get_fs() == get_ds())
+			segment = "kernel";
+		else
+			segment = "user";
+
+		printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
+			buf, interrupts_enabled(regs) ? "n" : "ff",
+			fast_interrupts_enabled(regs) ? "n" : "ff",
+			processor_modes[processor_mode(regs)],
+			isa_modes[isa_mode(regs)], segment);
+	}
 #else
 	printk("xPSR: %08lx\n", regs->ARM_cpsr);
 #endif
@@ -146,10 +163,9 @@
 		buf[0] = '\0';
 #ifdef CONFIG_CPU_CP15_MMU
 		{
-			unsigned int transbase, dac;
+			unsigned int transbase, dac = get_domain();
 			asm("mrc p15, 0, %0, c2, c0\n\t"
-			    "mrc p15, 0, %1, c3, c0\n"
-			    : "=r" (transbase), "=r" (dac));
+			    : "=r" (transbase));
 			snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
 			  	transbase, dac);
 		}
@@ -210,6 +226,14 @@
 
 	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
 
+	/*
+	 * Copy the initial value of the domain access control register
+	 * from the current thread: thread->addr_limit will have been
+	 * copied from the current thread via setup_thread_stack() in
+	 * kernel/fork.c
+	 */
+	thread->cpu_domain = get_domain();
+
 	if (likely(!(p->flags & PF_KTHREAD))) {
 		*childregs = *current_pt_regs();
 		childregs->ARM_r0 = 0;
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
deleted file mode 100644
index f90fdf4..0000000
--- a/arch/arm/kernel/psci.c
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2012 ARM Limited
- *
- * Author: Will Deacon <will.deacon@arm.com>
- */
-
-#define pr_fmt(fmt) "psci: " fmt
-
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/reboot.h>
-#include <linux/pm.h>
-#include <uapi/linux/psci.h>
-
-#include <asm/compiler.h>
-#include <asm/errno.h>
-#include <asm/psci.h>
-#include <asm/system_misc.h>
-
-struct psci_operations psci_ops;
-
-static int (*invoke_psci_fn)(u32, u32, u32, u32);
-typedef int (*psci_initcall_t)(const struct device_node *);
-
-asmlinkage int __invoke_psci_fn_hvc(u32, u32, u32, u32);
-asmlinkage int __invoke_psci_fn_smc(u32, u32, u32, u32);
-
-enum psci_function {
-	PSCI_FN_CPU_SUSPEND,
-	PSCI_FN_CPU_ON,
-	PSCI_FN_CPU_OFF,
-	PSCI_FN_MIGRATE,
-	PSCI_FN_AFFINITY_INFO,
-	PSCI_FN_MIGRATE_INFO_TYPE,
-	PSCI_FN_MAX,
-};
-
-static u32 psci_function_id[PSCI_FN_MAX];
-
-static int psci_to_linux_errno(int errno)
-{
-	switch (errno) {
-	case PSCI_RET_SUCCESS:
-		return 0;
-	case PSCI_RET_NOT_SUPPORTED:
-		return -EOPNOTSUPP;
-	case PSCI_RET_INVALID_PARAMS:
-		return -EINVAL;
-	case PSCI_RET_DENIED:
-		return -EPERM;
-	};
-
-	return -EINVAL;
-}
-
-static u32 psci_power_state_pack(struct psci_power_state state)
-{
-	return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
-			& PSCI_0_2_POWER_STATE_ID_MASK) |
-		((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
-		 & PSCI_0_2_POWER_STATE_TYPE_MASK) |
-		((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
-		 & PSCI_0_2_POWER_STATE_AFFL_MASK);
-}
-
-static int psci_get_version(void)
-{
-	int err;
-
-	err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
-	return err;
-}
-
-static int psci_cpu_suspend(struct psci_power_state state,
-			    unsigned long entry_point)
-{
-	int err;
-	u32 fn, power_state;
-
-	fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
-	power_state = psci_power_state_pack(state);
-	err = invoke_psci_fn(fn, power_state, entry_point, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_off(struct psci_power_state state)
-{
-	int err;
-	u32 fn, power_state;
-
-	fn = psci_function_id[PSCI_FN_CPU_OFF];
-	power_state = psci_power_state_pack(state);
-	err = invoke_psci_fn(fn, power_state, 0, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_CPU_ON];
-	err = invoke_psci_fn(fn, cpuid, entry_point, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_migrate(unsigned long cpuid)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_MIGRATE];
-	err = invoke_psci_fn(fn, cpuid, 0, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_affinity_info(unsigned long target_affinity,
-		unsigned long lowest_affinity_level)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
-	err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
-	return err;
-}
-
-static int psci_migrate_info_type(void)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE];
-	err = invoke_psci_fn(fn, 0, 0, 0);
-	return err;
-}
-
-static int get_set_conduit_method(struct device_node *np)
-{
-	const char *method;
-
-	pr_info("probing for conduit method from DT.\n");
-
-	if (of_property_read_string(np, "method", &method)) {
-		pr_warn("missing \"method\" property\n");
-		return -ENXIO;
-	}
-
-	if (!strcmp("hvc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_hvc;
-	} else if (!strcmp("smc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_smc;
-	} else {
-		pr_warn("invalid \"method\" property: %s\n", method);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
-{
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
-}
-
-static void psci_sys_poweroff(void)
-{
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
-}
-
-/*
- * PSCI Function IDs for v0.2+ are well defined so use
- * standard values.
- */
-static int psci_0_2_init(struct device_node *np)
-{
-	int err, ver;
-
-	err = get_set_conduit_method(np);
-
-	if (err)
-		goto out_put_node;
-
-	ver = psci_get_version();
-
-	if (ver == PSCI_RET_NOT_SUPPORTED) {
-		/* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
-		pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
-		err = -EOPNOTSUPP;
-		goto out_put_node;
-	} else {
-		pr_info("PSCIv%d.%d detected in firmware.\n",
-				PSCI_VERSION_MAJOR(ver),
-				PSCI_VERSION_MINOR(ver));
-
-		if (PSCI_VERSION_MAJOR(ver) == 0 &&
-				PSCI_VERSION_MINOR(ver) < 2) {
-			err = -EINVAL;
-			pr_err("Conflicting PSCI version detected.\n");
-			goto out_put_node;
-		}
-	}
-
-	pr_info("Using standard PSCI v0.2 function IDs\n");
-	psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_CPU_SUSPEND;
-	psci_ops.cpu_suspend = psci_cpu_suspend;
-
-	psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
-	psci_ops.cpu_off = psci_cpu_off;
-
-	psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_CPU_ON;
-	psci_ops.cpu_on = psci_cpu_on;
-
-	psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_MIGRATE;
-	psci_ops.migrate = psci_migrate;
-
-	psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN_AFFINITY_INFO;
-	psci_ops.affinity_info = psci_affinity_info;
-
-	psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
-		PSCI_0_2_FN_MIGRATE_INFO_TYPE;
-	psci_ops.migrate_info_type = psci_migrate_info_type;
-
-	arm_pm_restart = psci_sys_reset;
-
-	pm_power_off = psci_sys_poweroff;
-
-out_put_node:
-	of_node_put(np);
-	return err;
-}
-
-/*
- * PSCI < v0.2 get PSCI Function IDs via DT.
- */
-static int psci_0_1_init(struct device_node *np)
-{
-	u32 id;
-	int err;
-
-	err = get_set_conduit_method(np);
-
-	if (err)
-		goto out_put_node;
-
-	pr_info("Using PSCI v0.1 Function IDs from DT\n");
-
-	if (!of_property_read_u32(np, "cpu_suspend", &id)) {
-		psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
-		psci_ops.cpu_suspend = psci_cpu_suspend;
-	}
-
-	if (!of_property_read_u32(np, "cpu_off", &id)) {
-		psci_function_id[PSCI_FN_CPU_OFF] = id;
-		psci_ops.cpu_off = psci_cpu_off;
-	}
-
-	if (!of_property_read_u32(np, "cpu_on", &id)) {
-		psci_function_id[PSCI_FN_CPU_ON] = id;
-		psci_ops.cpu_on = psci_cpu_on;
-	}
-
-	if (!of_property_read_u32(np, "migrate", &id)) {
-		psci_function_id[PSCI_FN_MIGRATE] = id;
-		psci_ops.migrate = psci_migrate;
-	}
-
-out_put_node:
-	of_node_put(np);
-	return err;
-}
-
-static const struct of_device_id psci_of_match[] __initconst = {
-	{ .compatible = "arm,psci", .data = psci_0_1_init},
-	{ .compatible = "arm,psci-0.2", .data = psci_0_2_init},
-	{},
-};
-
-int __init psci_init(void)
-{
-	struct device_node *np;
-	const struct of_device_id *matched_np;
-	psci_initcall_t init_fn;
-
-	np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
-	if (!np)
-		return -ENODEV;
-
-	init_fn = (psci_initcall_t)matched_np->data;
-	return init_fn(np);
-}
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index 28a1db4..61c04b0 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -17,6 +17,8 @@
 #include <linux/smp.h>
 #include <linux/of.h>
 #include <linux/delay.h>
+#include <linux/psci.h>
+
 #include <uapi/linux/psci.h>
 
 #include <asm/psci.h>
@@ -51,22 +53,34 @@
 {
 	if (psci_ops.cpu_on)
 		return psci_ops.cpu_on(cpu_logical_map(cpu),
-				       __pa(secondary_startup));
+					virt_to_idmap(&secondary_startup));
 	return -ENODEV;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
+int psci_cpu_disable(unsigned int cpu)
+{
+	/* Fail early if we don't have CPU_OFF support */
+	if (!psci_ops.cpu_off)
+		return -EOPNOTSUPP;
+
+	/* Trusted OS will deny CPU_OFF */
+	if (psci_tos_resident_on(cpu))
+		return -EPERM;
+
+	return 0;
+}
+
 void __ref psci_cpu_die(unsigned int cpu)
 {
-       const struct psci_power_state ps = {
-               .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
-       };
+	u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
+		    PSCI_0_2_POWER_STATE_TYPE_SHIFT;
 
-       if (psci_ops.cpu_off)
-               psci_ops.cpu_off(ps);
+	if (psci_ops.cpu_off)
+		psci_ops.cpu_off(state);
 
-       /* We should never return */
-       panic("psci: cpu %d failed to shutdown\n", cpu);
+	/* We should never return */
+	panic("psci: cpu %d failed to shutdown\n", cpu);
 }
 
 int __ref psci_cpu_kill(unsigned int cpu)
@@ -109,6 +123,7 @@
 struct smp_operations __initdata psci_smp_ops = {
 	.smp_boot_secondary	= psci_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
+	.cpu_disable		= psci_cpu_disable,
 	.cpu_die		= psci_cpu_die,
 	.cpu_kill		= psci_cpu_kill,
 #endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 36c18b7..20edd34 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -31,12 +31,14 @@
 #include <linux/bug.h>
 #include <linux/compiler.h>
 #include <linux/sort.h>
+#include <linux/psci.h>
 
 #include <asm/unified.h>
 #include <asm/cp15.h>
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/elf.h>
+#include <asm/fixmap.h>
 #include <asm/procinfo.h>
 #include <asm/psci.h>
 #include <asm/sections.h>
@@ -954,6 +956,9 @@
 	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
 	*cmdline_p = cmd_line;
 
+	if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
+		early_fixmap_init();
+
 	parse_early_param();
 
 #ifdef CONFIG_MMU
@@ -972,7 +977,7 @@
 	unflatten_device_tree();
 
 	arm_dt_init_cpu_maps();
-	psci_init();
+	psci_dt_init();
 	xen_early_init();
 #ifdef CONFIG_SMP
 	if (is_smp()) {
@@ -1015,7 +1020,7 @@
 
 	for_each_possible_cpu(cpu) {
 		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
-		cpuinfo->cpu.hotpluggable = 1;
+		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
 		register_cpu(&cpuinfo->cpu, cpu);
 	}
 
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 423663e..b6cda06 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -562,6 +562,12 @@
 asmlinkage int
 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
 {
+	/*
+	 * The assembly code enters us with IRQs off, but it hasn't
+	 * informed the tracing code of that for efficiency reasons.
+	 * Update the trace code with the current status.
+	 */
+	trace_hardirqs_off();
 	do {
 		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
 			schedule();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 3d6b782..ba0063c 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -175,13 +175,26 @@
 	if (smp_ops.cpu_disable)
 		return smp_ops.cpu_disable(cpu);
 
+	return 0;
+}
+
+int platform_can_hotplug_cpu(unsigned int cpu)
+{
+	/* cpu_die must be specified to support hotplug */
+	if (!smp_ops.cpu_die)
+		return 0;
+
+	if (smp_ops.cpu_can_disable)
+		return smp_ops.cpu_can_disable(cpu);
+
 	/*
 	 * By default, allow disabling all CPUs except the first one,
 	 * since this is special on a lot of platforms, e.g. because
 	 * of clock tick interrupts.
 	 */
-	return cpu == 0 ? -EPERM : 0;
+	return cpu != 0;
 }
+
 /*
  * __cpu_disable runs on the processor to be shutdown.
  */
@@ -253,7 +266,7 @@
  * of the other hotplug-cpu capable cores, so presumably coming
  * out of idle fixes this.
  */
-void __ref cpu_die(void)
+void arch_cpu_idle_dead(void)
 {
 	unsigned int cpu = smp_processor_id();
 
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 172c6a05..e9035cd 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -36,29 +36,30 @@
 static struct clock_event_device __percpu *twd_evt;
 static int twd_ppi;
 
-static void twd_set_mode(enum clock_event_mode mode,
-			struct clock_event_device *clk)
+static int twd_shutdown(struct clock_event_device *clk)
 {
-	unsigned long ctrl;
+	writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
+	return 0;
+}
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
-			| TWD_TIMER_CONTROL_PERIODIC;
-		writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
-			twd_base + TWD_TIMER_LOAD);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* period set, and timer enabled in 'next_event' hook */
-		ctrl = TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT;
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	default:
-		ctrl = 0;
-	}
+static int twd_set_oneshot(struct clock_event_device *clk)
+{
+	/* period set, and timer enabled in 'next_event' hook */
+	writel_relaxed(TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT,
+		       twd_base + TWD_TIMER_CONTROL);
+	return 0;
+}
 
+static int twd_set_periodic(struct clock_event_device *clk)
+{
+	unsigned long ctrl = TWD_TIMER_CONTROL_ENABLE |
+			     TWD_TIMER_CONTROL_IT_ENABLE |
+			     TWD_TIMER_CONTROL_PERIODIC;
+
+	writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
+		       twd_base + TWD_TIMER_LOAD);
 	writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
+	return 0;
 }
 
 static int twd_set_next_event(unsigned long evt,
@@ -94,7 +95,7 @@
 {
 	struct clock_event_device *clk = raw_cpu_ptr(twd_evt);
 
-	twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
+	twd_shutdown(clk);
 	disable_percpu_irq(clk->irq);
 }
 
@@ -296,7 +297,10 @@
 	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
 			CLOCK_EVT_FEAT_C3STOP;
 	clk->rating = 350;
-	clk->set_mode = twd_set_mode;
+	clk->set_state_shutdown = twd_shutdown;
+	clk->set_state_periodic = twd_set_periodic;
+	clk->set_state_oneshot = twd_set_oneshot;
+	clk->tick_resume = twd_shutdown;
 	clk->set_next_event = twd_set_next_event;
 	clk->irq = twd_ppi;
 	clk->cpumask = cpumask_of(cpu);
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 1361756..5b26e7e 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -141,11 +141,14 @@
 
 	while (1) {
 		unsigned long temp;
+		unsigned int __ua_flags;
 
+		__ua_flags = uaccess_save_and_enable();
 		if (type == TYPE_SWPB)
 			__user_swpb_asm(*data, address, res, temp);
 		else
 			__user_swp_asm(*data, address, res, temp);
+		uaccess_restore(__ua_flags);
 
 		if (likely(res != -EAGAIN) || signal_pending(current))
 			break;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d358226..969f9d9 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -870,7 +870,6 @@
 	kuser_init(vectors_base);
 
 	flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
-	modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
 #else /* ifndef CONFIG_CPU_V7M */
 	/*
 	 * on V7-M there is no need to copy the vector table to a dedicated
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 1710fd7..970d6c0 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -12,14 +12,14 @@
 
 		.text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
  * Returns  : number of bytes NOT cleared
  */
 ENTRY(__clear_user_std)
-WEAK(__clear_user)
+WEAK(arm_clear_user)
 		stmfd	sp!, {r1, lr}
 		mov	r2, #0
 		cmp	r1, #4
@@ -44,7 +44,7 @@
 USER(		strnebt	r2, [r0])
 		mov	r0, #0
 		ldmfd	sp!, {r1, pc}
-ENDPROC(__clear_user)
+ENDPROC(arm_clear_user)
 ENDPROC(__clear_user_std)
 
 		.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a235b9..1512beb 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *	size_t __copy_from_user(void *to, const void *from, size_t n)
+ *	size_t arm_copy_from_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
@@ -89,11 +89,11 @@
 
 	.text
 
-ENTRY(__copy_from_user)
+ENTRY(arm_copy_from_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_from_user)
+ENDPROC(arm_copy_from_user)
 
 	.pushsection .fixup,"ax"
 	.align 0
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 9648b06..caf5019 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *	size_t __copy_to_user(void *to, const void *from, size_t n)
+ *	size_t arm_copy_to_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
@@ -93,11 +93,11 @@
 	.text
 
 ENTRY(__copy_to_user_std)
-WEAK(__copy_to_user)
+WEAK(arm_copy_to_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_to_user)
+ENDPROC(arm_copy_to_user)
 ENDPROC(__copy_to_user_std)
 
 	.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1d0957e..1712f13 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -17,6 +17,19 @@
 
 		.text
 
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+		.macro	save_regs
+		mrc	p15, 0, ip, c3, c0, 0
+		stmfd	sp!, {r1, r2, r4 - r8, ip, lr}
+		uaccess_enable ip
+		.endm
+
+		.macro	load_regs
+		ldmfd	sp!, {r1, r2, r4 - r8, ip, lr}
+		mcr	p15, 0, ip, c3, c0, 0
+		ret	lr
+		.endm
+#else
 		.macro	save_regs
 		stmfd	sp!, {r1, r2, r4 - r8, lr}
 		.endm
@@ -24,6 +37,7 @@
 		.macro	load_regs
 		ldmfd	sp!, {r1, r2, r4 - r8, pc}
 		.endm
+#endif
 
 		.macro	load1b,	reg1
 		ldrusr	\reg1, r0, 1
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 4b39af2..d72b909 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -136,7 +136,7 @@
 }
 
 unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+arm_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
 	/*
 	 * This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@
 	return n;
 }
 
-unsigned long __clear_user(void __user *addr, unsigned long n)
+unsigned long arm_clear_user(void __user *addr, unsigned long n)
 {
 	/* See rational for this in __copy_to_user() above. */
 	if (n < 64)
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index fd95f34..89a755b 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -8,6 +8,18 @@
 	select SOC_BUS
 
 if ARCH_AT91
+config SOC_SAMA5D2
+	bool "SAMA5D2 family" if ARCH_MULTI_V7
+	select SOC_SAMA5
+	select CACHE_L2X0
+	select HAVE_FB_ATMEL
+	select HAVE_AT91_UTMI
+	select HAVE_AT91_USB_CLK
+	select HAVE_AT91_H32MX
+	select HAVE_AT91_GENERATED_CLK
+	help
+	  Select this if ou are using one of Atmel's SAMA5D2 family SoC.
+
 config SOC_SAMA5D3
 	bool "SAMA5D3 family" if ARCH_MULTI_V7
 	select SOC_SAMA5
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index eaf58f8..c1a7c6c 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -8,7 +8,6 @@
  * Licensed under GPLv2 or later.
  */
 
-#include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
 
@@ -38,7 +37,7 @@
 	at91rm9200_pm_init();
 }
 
-static const char *at91rm9200_dt_board_compat[] __initconst = {
+static const char *const at91rm9200_dt_board_compat[] __initconst = {
 	"atmel,at91rm9200",
 	NULL
 };
diff --git a/arch/arm/mach-at91/at91sam9.c b/arch/arm/mach-at91/at91sam9.c
index e47a209..7eb64f7 100644
--- a/arch/arm/mach-at91/at91sam9.c
+++ b/arch/arm/mach-at91/at91sam9.c
@@ -72,7 +72,7 @@
 	at91sam9260_pm_init();
 }
 
-static const char *at91_dt_board_compat[] __initconst = {
+static const char *const at91_dt_board_compat[] __initconst = {
 	"atmel,at91sam9",
 	NULL
 };
@@ -89,7 +89,7 @@
 	at91sam9g45_pm_init();
 }
 
-static const char *at91sam9g45_board_compat[] __initconst = {
+static const char *const at91sam9g45_board_compat[] __initconst = {
 	"atmel,at91sam9g45",
 	NULL
 };
@@ -106,7 +106,7 @@
 	at91sam9x5_pm_init();
 }
 
-static const char *at91sam9x5_board_compat[] __initconst = {
+static const char *const at91sam9x5_board_compat[] __initconst = {
 	"atmel,at91sam9x5",
 	"atmel,at91sam9n12",
 	NULL
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index e24df77..265ffeb 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -311,7 +311,7 @@
 		at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
 }
 
-static const struct of_device_id ramc_ids[] __initconst = {
+static const struct of_device_id const ramc_ids[] __initconst = {
 	{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
 	{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
 	{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
diff --git a/arch/arm/mach-at91/sama5.c b/arch/arm/mach-at91/sama5.c
index 41d829d..d9cf679 100644
--- a/arch/arm/mach-at91/sama5.c
+++ b/arch/arm/mach-at91/sama5.c
@@ -18,6 +18,8 @@
 #include "soc.h"
 
 static const struct at91_soc sama5_socs[] = {
+	AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D27_EXID_MATCH,
+		 "sama5d27", "sama5d2"),
 	AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D31_EXID_MATCH,
 		 "sama5d31", "sama5d3"),
 	AT91_SOC(SAMA5D3_CIDR_MATCH, SAMA5D33_EXID_MATCH,
@@ -52,7 +54,7 @@
 	at91sam9x5_pm_init();
 }
 
-static const char *sama5_dt_board_compat[] __initconst = {
+static const char *const sama5_dt_board_compat[] __initconst = {
 	"atmel,sama5",
 	NULL
 };
@@ -63,7 +65,8 @@
 	.dt_compat	= sama5_dt_board_compat,
 MACHINE_END
 
-static const char *sama5_alt_dt_board_compat[] __initconst = {
+static const char *const sama5_alt_dt_board_compat[] __initconst = {
+	"atmel,sama5d2",
 	"atmel,sama5d4",
 	NULL
 };
diff --git a/arch/arm/mach-at91/soc.h b/arch/arm/mach-at91/soc.h
index be23c40..8ede0ef 100644
--- a/arch/arm/mach-at91/soc.h
+++ b/arch/arm/mach-at91/soc.h
@@ -62,6 +62,9 @@
 #define AT91SAM9XE256_CIDR_MATCH	0x329a93a0
 #define AT91SAM9XE512_CIDR_MATCH	0x329aa3a0
 
+#define SAMA5D2_CIDR_MATCH		0x0a5c08c0
+#define SAMA5D27_EXID_MATCH		0x00000021
+
 #define SAMA5D3_CIDR_MATCH		0x0a5c07c0
 #define SAMA5D31_EXID_MATCH		0x00444300
 #define SAMA5D33_EXID_MATCH		0x00414300
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 0ac9e4b3..1319c3c 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -140,10 +140,12 @@
 config ARCH_BRCMSTB
 	bool "Broadcom BCM7XXX based boards" if ARCH_MULTI_V7
 	select ARM_GIC
+	select ARM_ERRATA_798181 if SMP
 	select HAVE_ARM_ARCH_TIMER
 	select BRCMSTB_GISB_ARB
 	select BRCMSTB_L2_IRQ
 	select BCM7120_L2_IRQ
+	select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	help
 	  Say Y if you intend to run the kernel on a Broadcom ARM-based STB
diff --git a/arch/arm/mach-bcm/Makefile b/arch/arm/mach-bcm/Makefile
index 4fb0da4..1780a3ff 100644
--- a/arch/arm/mach-bcm/Makefile
+++ b/arch/arm/mach-bcm/Makefile
@@ -39,10 +39,8 @@
 
 # BCM63XXx
 ifeq ($(CONFIG_ARCH_BCM_63XX),y)
-CFLAGS_bcm63xx_headsmp.o	+= -march=armv7-a
 obj-y				+= bcm63xx.o
-obj-$(CONFIG_SMP)		+= bcm63xx_smp.o bcm63xx_headsmp.o \
-				   bcm63xx_pmb.o
+obj-$(CONFIG_SMP)		+= bcm63xx_smp.o bcm63xx_pmb.o
 endif
 
 ifeq ($(CONFIG_ARCH_BRCMSTB),y)
diff --git a/arch/arm/mach-bcm/bcm63xx_headsmp.S b/arch/arm/mach-bcm/bcm63xx_headsmp.S
deleted file mode 100644
index c7af397..0000000
--- a/arch/arm/mach-bcm/bcm63xx_headsmp.S
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- *  Copyright (C) 2015, Broadcom Corporation
- *  All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/assembler.h>
-
-ENTRY(bcm63138_secondary_startup)
- ARM_BE8(setend	be)
-	/*
-	 * L1 cache does have unpredictable contents at power-up clean its
-	 * contents without flushing
-	 */
-	bl      v7_invalidate_l1
-	nop
-
-	b	secondary_startup
-ENDPROC(bcm63138_secondary_startup)
diff --git a/arch/arm/mach-bcm/bcm63xx_smp.c b/arch/arm/mach-bcm/bcm63xx_smp.c
index 3f014f1..19be904 100644
--- a/arch/arm/mach-bcm/bcm63xx_smp.c
+++ b/arch/arm/mach-bcm/bcm63xx_smp.c
@@ -127,7 +127,7 @@
 	}
 
 	/* Locate the secondary CPU node */
-	dn = of_get_cpu_node(cpu_logical_map(cpu), NULL);
+	dn = of_get_cpu_node(cpu, NULL);
 	if (!dn) {
 		pr_err("SMP: failed to locate secondary CPU%d node\n", cpu);
 		ret = -ENODEV;
@@ -135,7 +135,7 @@
 	}
 
 	/* Write the secondary init routine to the BootLUT reset vector */
-	val = virt_to_phys(bcm63138_secondary_startup);
+	val = virt_to_phys(secondary_startup);
 	writel_relaxed(val, bootlut_base + BOOTLUT_RESET_VECT);
 
 	/* Power up the core, will jump straight to its reset vector when we
diff --git a/arch/arm/mach-bcm/bcm63xx_smp.h b/arch/arm/mach-bcm/bcm63xx_smp.h
index 50b7604..9c6d50e 100644
--- a/arch/arm/mach-bcm/bcm63xx_smp.h
+++ b/arch/arm/mach-bcm/bcm63xx_smp.h
@@ -3,7 +3,6 @@
 
 struct device_node;
 
-extern void bcm63138_secondary_startup(void);
 extern int bcm63xx_pmb_power_on_cpu(struct device_node *dn);
 
 #endif /* __BCM63XX_SMP_H */
diff --git a/arch/arm/mach-bcm/bcm_5301x.c b/arch/arm/mach-bcm/bcm_5301x.c
index 7aef927..5478fe6 100644
--- a/arch/arm/mach-bcm/bcm_5301x.c
+++ b/arch/arm/mach-bcm/bcm_5301x.c
@@ -44,7 +44,7 @@
 			"imprecise external abort");
 }
 
-static const char __initconst *bcm5301x_dt_compat[] = {
+static const char *const bcm5301x_dt_compat[] __initconst = {
 	"brcm,bcm4708",
 	NULL,
 };
diff --git a/arch/arm/mach-bcm/bcm_kona_smc.c b/arch/arm/mach-bcm/bcm_kona_smc.c
index a55a7ec..cf3f865 100644
--- a/arch/arm/mach-bcm/bcm_kona_smc.c
+++ b/arch/arm/mach-bcm/bcm_kona_smc.c
@@ -33,7 +33,7 @@
 	unsigned result;
 };
 
-static const struct of_device_id bcm_kona_smc_ids[] __initconst = {
+static const struct of_device_id const bcm_kona_smc_ids[] __initconst = {
 	{.compatible = "brcm,kona-smc"},
 	{.compatible = "bcm,kona-smc"}, /* deprecated name */
 	{},
diff --git a/arch/arm/mach-clps711x/board-autcpu12.c b/arch/arm/mach-clps711x/board-autcpu12.c
index 45abf6b..c3d9642 100644
--- a/arch/arm/mach-clps711x/board-autcpu12.c
+++ b/arch/arm/mach-clps711x/board-autcpu12.c
@@ -160,7 +160,7 @@
 	},
 };
 
-static const struct gpio autcpu12_gpios[] __initconst = {
+static const struct gpio const autcpu12_gpios[] __initconst = {
 	{ AUTCPU12_DPOT_CS,	GPIOF_OUT_INIT_HIGH,	"DPOT CS" },
 	{ AUTCPU12_DPOT_CLK,	GPIOF_OUT_INIT_LOW,	"DPOT CLK" },
 	{ AUTCPU12_DPOT_UD,	GPIOF_OUT_INIT_LOW,	"DPOT UD" },
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index 4e9837d..9b1dc22 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -113,30 +113,33 @@
  */
 static void __iomem *cns3xxx_tmr1;
 
-static void cns3xxx_timer_set_mode(enum clock_event_mode mode,
-				   struct clock_event_device *clk)
+static int cns3xxx_shutdown(struct clock_event_device *clk)
+{
+	writel(0, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+	return 0;
+}
+
+static int cns3xxx_set_oneshot(struct clock_event_device *clk)
+{
+	unsigned long ctrl = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+
+	/* period set, and timer enabled in 'next_event' hook */
+	ctrl |= (1 << 2) | (1 << 9);
+	writel(ctrl, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+	return 0;
+}
+
+static int cns3xxx_set_periodic(struct clock_event_device *clk)
 {
 	unsigned long ctrl = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
 	int pclk = cns3xxx_cpu_clock() / 8;
 	int reload;
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		reload = pclk * 20 / (3 * HZ) * 0x25000;
-		writel(reload, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET);
-		ctrl |= (1 << 0) | (1 << 2) | (1 << 9);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* period set, and timer enabled in 'next_event' hook */
-		ctrl |= (1 << 2) | (1 << 9);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	default:
-		ctrl = 0;
-	}
-
+	reload = pclk * 20 / (3 * HZ) * 0x25000;
+	writel(reload, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET);
+	ctrl |= (1 << 0) | (1 << 2) | (1 << 9);
 	writel(ctrl, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
+	return 0;
 }
 
 static int cns3xxx_timer_set_next_event(unsigned long evt,
@@ -151,12 +154,16 @@
 }
 
 static struct clock_event_device cns3xxx_tmr1_clockevent = {
-	.name		= "cns3xxx timer1",
-	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode	= cns3xxx_timer_set_mode,
-	.set_next_event	= cns3xxx_timer_set_next_event,
-	.rating		= 350,
-	.cpumask	= cpu_all_mask,
+	.name			= "cns3xxx timer1",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_shutdown	= cns3xxx_shutdown,
+	.set_state_periodic	= cns3xxx_set_periodic,
+	.set_state_oneshot	= cns3xxx_set_oneshot,
+	.tick_resume		= cns3xxx_shutdown,
+	.set_next_event		= cns3xxx_timer_set_next_event,
+	.rating			= 350,
+	.cpumask		= cpu_all_mask,
 };
 
 static void __init cns3xxx_clockevents_init(unsigned int timer_irq)
@@ -339,7 +346,7 @@
 	.power_off	= csn3xxx_usb_power_off,
 };
 
-static struct of_dev_auxdata cns3xxx_auxdata[] __initconst = {
+static const struct of_dev_auxdata const cns3xxx_auxdata[] __initconst = {
 	{ "intel,usb-ehci", CNS3XXX_USB_BASE, "ehci-platform", &cns3xxx_usb_ehci_pdata },
 	{ "intel,usb-ohci", CNS3XXX_USB_OHCI_BASE, "ohci-platform", &cns3xxx_usb_ohci_pdata },
 	{ "cavium,cns3420-ahci", CNS3XXX_SATA2_BASE, "ahci", NULL },
@@ -392,7 +399,7 @@
                         cns3xxx_auxdata, NULL);
 }
 
-static const char *cns3xxx_dt_compat[] __initdata = {
+static const char *const cns3xxx_dt_compat[] __initconst = {
 	"cavium,cns3410",
 	"cavium,cns3420",
 	NULL,
diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c
index 006dae8..507aad4 100644
--- a/arch/arm/mach-davinci/cp_intc.c
+++ b/arch/arm/mach-davinci/cp_intc.c
@@ -85,23 +85,13 @@
 	return 0;
 }
 
-/*
- * Faking this allows us to to work with suspend functions of
- * generic drivers which call {enable|disable}_irq_wake for
- * wake up interrupt sources (eg RTC on DA850).
- */
-static int cp_intc_set_wake(struct irq_data *d, unsigned int on)
-{
-	return 0;
-}
-
 static struct irq_chip cp_intc_irq_chip = {
 	.name		= "cp_intc",
 	.irq_ack	= cp_intc_ack_irq,
 	.irq_mask	= cp_intc_mask_irq,
 	.irq_unmask	= cp_intc_unmask_irq,
 	.irq_set_type	= cp_intc_set_irq_type,
-	.irq_set_wake	= cp_intc_set_wake,
+	.flags		= IRQCHIP_SKIP_SET_WAKE,
 };
 
 static struct irq_domain *cp_intc_domain;
@@ -112,7 +102,7 @@
 	pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw);
 
 	irq_set_chip(virq, &cp_intc_irq_chip);
-	set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
+	irq_set_probe(virq);
 	irq_set_handler(virq, handle_edge_irq);
 	return 0;
 }
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 3b8740c..6769978 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -715,7 +715,7 @@
 	-1
 };
 
-const short da850_vpif_capture_pins[] __initdata = {
+const short da850_vpif_capture_pins[] __initconst = {
 	DA850_VPIF_DIN0, DA850_VPIF_DIN1, DA850_VPIF_DIN2, DA850_VPIF_DIN3,
 	DA850_VPIF_DIN4, DA850_VPIF_DIN5, DA850_VPIF_DIN6, DA850_VPIF_DIN7,
 	DA850_VPIF_DIN8, DA850_VPIF_DIN9, DA850_VPIF_DIN10, DA850_VPIF_DIN11,
@@ -725,7 +725,7 @@
 	-1
 };
 
-const short da850_vpif_display_pins[] __initdata = {
+const short da850_vpif_display_pins[] __initconst = {
 	DA850_VPIF_DOUT0, DA850_VPIF_DOUT1, DA850_VPIF_DOUT2, DA850_VPIF_DOUT3,
 	DA850_VPIF_DOUT4, DA850_VPIF_DOUT5, DA850_VPIF_DOUT6, DA850_VPIF_DOUT7,
 	DA850_VPIF_DOUT8, DA850_VPIF_DOUT9, DA850_VPIF_DOUT10,
diff --git a/arch/arm/mach-davinci/da8xx-dt.c b/arch/arm/mach-davinci/da8xx-dt.c
index 438f685..06b6451 100644
--- a/arch/arm/mach-davinci/da8xx-dt.c
+++ b/arch/arm/mach-davinci/da8xx-dt.c
@@ -20,7 +20,7 @@
 
 #define DA8XX_NUM_UARTS	3
 
-static const struct of_device_id da8xx_irq_match[] __initconst = {
+static const struct of_device_id const da8xx_irq_match[] __initconst = {
 	{ .compatible = "ti,cp-intc", .data = cp_intc_of_init, },
 	{ }
 };
@@ -59,7 +59,7 @@
 
 }
 
-static const char *da850_boards_compat[] __initdata = {
+static const char *const da850_boards_compat[] __initconst = {
 	"enbw,cmc",
 	"ti,da850-evm",
 	"ti,da850",
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index ddfdd82..29e08aa 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -1010,11 +1010,13 @@
 		.version	= SPI_VERSION_2,
 		.intr_line	= 1,
 		.dma_event_q	= EVENTQ_0,
+		.prescaler_limit = 2,
 	},
 	[1] = {
 		.version	= SPI_VERSION_2,
 		.intr_line	= 1,
 		.dma_event_q	= EVENTQ_0,
+		.prescaler_limit = 2,
 	},
 };
 
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 9cbeda7..567dc56 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -411,6 +411,7 @@
 	.num_chipselect = 2,
 	.cshold_bug	= true,
 	.dma_event_q	= EVENTQ_1,
+	.prescaler_limit = 1,
 };
 static struct platform_device dm355_spi0_device = {
 	.name = "spi_davinci",
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index e3a3c54..6a890a8 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -646,6 +646,7 @@
 	.version 	= SPI_VERSION_1,
 	.num_chipselect = 2,
 	.dma_event_q	= EVENTQ_3,
+	.prescaler_limit = 1,
 };
 
 static struct resource dm365_spi0_resources[] = {
diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c
index 160c960..6c18445 100644
--- a/arch/arm/mach-davinci/time.c
+++ b/arch/arm/mach-davinci/time.c
@@ -303,36 +303,42 @@
 	return 0;
 }
 
-static void davinci_set_mode(enum clock_event_mode mode,
-			     struct clock_event_device *evt)
+static int davinci_shutdown(struct clock_event_device *evt)
 {
 	struct timer_s *t = &timers[TID_CLOCKEVENT];
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		t->period = davinci_clock_tick_rate / (HZ);
-		t->opts &= ~TIMER_OPTS_STATE_MASK;
-		t->opts |= TIMER_OPTS_PERIODIC;
-		timer32_config(t);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		t->opts &= ~TIMER_OPTS_STATE_MASK;
-		t->opts |= TIMER_OPTS_ONESHOT;
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		t->opts &= ~TIMER_OPTS_STATE_MASK;
-		t->opts |= TIMER_OPTS_DISABLED;
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	t->opts &= ~TIMER_OPTS_STATE_MASK;
+	t->opts |= TIMER_OPTS_DISABLED;
+	return 0;
+}
+
+static int davinci_set_oneshot(struct clock_event_device *evt)
+{
+	struct timer_s *t = &timers[TID_CLOCKEVENT];
+
+	t->opts &= ~TIMER_OPTS_STATE_MASK;
+	t->opts |= TIMER_OPTS_ONESHOT;
+	return 0;
+}
+
+static int davinci_set_periodic(struct clock_event_device *evt)
+{
+	struct timer_s *t = &timers[TID_CLOCKEVENT];
+
+	t->period = davinci_clock_tick_rate / (HZ);
+	t->opts &= ~TIMER_OPTS_STATE_MASK;
+	t->opts |= TIMER_OPTS_PERIODIC;
+	timer32_config(t);
+	return 0;
 }
 
 static struct clock_event_device clockevent_davinci = {
-	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_next_event	= davinci_set_next_event,
-	.set_mode	= davinci_set_mode,
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_next_event		= davinci_set_next_event,
+	.set_state_shutdown	= davinci_shutdown,
+	.set_state_periodic	= davinci_set_periodic,
+	.set_state_oneshot	= davinci_set_oneshot,
 };
 
 
diff --git a/arch/arm/mach-digicolor/digicolor.c b/arch/arm/mach-digicolor/digicolor.c
index cfc88d1..4d62f1b 100644
--- a/arch/arm/mach-digicolor/digicolor.c
+++ b/arch/arm/mach-digicolor/digicolor.c
@@ -8,7 +8,7 @@
 
 #include <asm/mach/arch.h>
 
-static const char *digicolor_dt_compat[] __initconst = {
+static const char *const digicolor_dt_compat[] __initconst = {
 	"cnxt,cx92755",
 	NULL,
 };
diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
index df0223f..305d7c6 100644
--- a/arch/arm/mach-dove/irq.c
+++ b/arch/arm/mach-dove/irq.c
@@ -69,8 +69,9 @@
 	.irq_ack	= pmu_irq_ack,
 };
 
-static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void pmu_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	unsigned long cause = readl(PMU_INTERRUPT_CAUSE);
 
 	cause &= readl(PMU_INTERRUPT_MASK);
@@ -172,7 +173,7 @@
 	for (i = IRQ_DOVE_PMU_START; i < NR_IRQS; i++) {
 		irq_set_chip_and_handler(i, &pmu_irq_chip, handle_level_irq);
 		irq_set_status_flags(i, IRQ_LEVEL);
-		set_irq_flags(i, IRQF_VALID);
+		irq_clear_status_flags(i, IRQ_NOREQUEST);
 	}
 	irq_set_chained_handler(IRQ_DOVE_PMU, pmu_irq_handler);
 }
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
index 8254e71..688e5fe 100644
--- a/arch/arm/mach-ebsa110/core.c
+++ b/arch/arm/mach-ebsa110/core.c
@@ -65,7 +65,7 @@
 	for (irq = 0; irq < NR_IRQS; irq++) {
 		irq_set_chip_and_handler(irq, &ebsa110_irq_chip,
 					 handle_level_irq);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 }
 
diff --git a/arch/arm/mach-ep93xx/Kconfig b/arch/arm/mach-ep93xx/Kconfig
index bec570a..61a75ca 100644
--- a/arch/arm/mach-ep93xx/Kconfig
+++ b/arch/arm/mach-ep93xx/Kconfig
@@ -15,45 +15,8 @@
 
 comment "EP93xx Platforms"
 
-choice
-	prompt "EP93xx first SDRAM bank selection"
-	default EP93XX_SDCE3_SYNC_PHYS_OFFSET
-
-config EP93XX_SDCE3_SYNC_PHYS_OFFSET
-	bool "0x00000000 - SDCE3/SyncBoot"
-	help
-	  Select this option if you want support for EP93xx boards with the
-	  first SDRAM bank at 0x00000000.
-
-config EP93XX_SDCE0_PHYS_OFFSET
-	bool "0xc0000000 - SDCEO"
-	help
-	  Select this option if you want support for EP93xx boards with the
-	  first SDRAM bank at 0xc0000000.
-
-config EP93XX_SDCE1_PHYS_OFFSET
-	bool "0xd0000000 - SDCE1"
-	help
-	  Select this option if you want support for EP93xx boards with the
-	  first SDRAM bank at 0xd0000000.
-
-config EP93XX_SDCE2_PHYS_OFFSET
-	bool "0xe0000000 - SDCE2"
-	help
-	  Select this option if you want support for EP93xx boards with the
-	  first SDRAM bank at 0xe0000000.
-
-config EP93XX_SDCE3_ASYNC_PHYS_OFFSET
-	bool "0xf0000000 - SDCE3/AsyncBoot"
-	help
-	  Select this option if you want support for EP93xx boards with the
-	  first SDRAM bank at 0xf0000000.
-
-endchoice
-
 config MACH_ADSSPHERE
 	bool "Support ADS Sphere"
-	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	help
 	  Say 'Y' here if you want your kernel to support the ADS
 	  Sphere board.
@@ -63,7 +26,6 @@
 
 config MACH_EDB9301
 	bool "Support Cirrus Logic EDB9301"
-	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	select MACH_EDB93XX
 	help
 	  Say 'Y' here if you want your kernel to support the Cirrus
@@ -71,7 +33,6 @@
 
 config MACH_EDB9302
 	bool "Support Cirrus Logic EDB9302"
-	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	select MACH_EDB93XX
 	help
 	  Say 'Y' here if you want your kernel to support the Cirrus
@@ -79,7 +40,6 @@
 
 config MACH_EDB9302A
 	bool "Support Cirrus Logic EDB9302A"
-	depends on EP93XX_SDCE0_PHYS_OFFSET
 	select MACH_EDB93XX
 	help
 	  Say 'Y' here if you want your kernel to support the Cirrus
@@ -87,7 +47,6 @@
 
 config MACH_EDB9307
 	bool "Support Cirrus Logic EDB9307"
-	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	select MACH_EDB93XX
 	help
 	  Say 'Y' here if you want your kernel to support the Cirrus
@@ -95,7 +54,6 @@
 
 config MACH_EDB9307A
 	bool "Support Cirrus Logic EDB9307A"
-	depends on EP93XX_SDCE0_PHYS_OFFSET
 	select MACH_EDB93XX
 	help
 	  Say 'Y' here if you want your kernel to support the Cirrus
@@ -103,7 +61,6 @@
 
 config MACH_EDB9312
 	bool "Support Cirrus Logic EDB9312"
-	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	select MACH_EDB93XX
 	help
 	  Say 'Y' here if you want your kernel to support the Cirrus
@@ -111,7 +68,6 @@
 
 config MACH_EDB9315
 	bool "Support Cirrus Logic EDB9315"
-	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	select MACH_EDB93XX
 	help
 	  Say 'Y' here if you want your kernel to support the Cirrus
@@ -119,14 +75,12 @@
 
 config MACH_EDB9315A
 	bool "Support Cirrus Logic EDB9315A"
-	depends on EP93XX_SDCE0_PHYS_OFFSET
 	select MACH_EDB93XX
 	help
 	  Say 'Y' here if you want your kernel to support the Cirrus
 	  Logic EDB9315A Evaluation Board.
 
 config MACH_GESBC9312
-	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	bool "Support Glomation GESBC-9312-sx"
 	help
 	  Say 'Y' here if you want your kernel to support the Glomation
@@ -137,7 +91,6 @@
 
 config MACH_MICRO9H
 	bool "Support Contec Micro9-High"
-	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	select MACH_MICRO9
 	help
 	  Say 'Y' here if you want your kernel to support the
@@ -145,7 +98,6 @@
 
 config MACH_MICRO9M
 	bool "Support Contec Micro9-Mid"
-	depends on EP93XX_SDCE3_ASYNC_PHYS_OFFSET
 	select MACH_MICRO9
 	help
 	  Say 'Y' here if you want your kernel to support the
@@ -153,7 +105,6 @@
 
 config MACH_MICRO9L
 	bool "Support Contec Micro9-Lite"
-	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	select MACH_MICRO9
 	help
 	  Say 'Y' here if you want your kernel to support the
@@ -161,7 +112,6 @@
 
 config MACH_MICRO9S
 	bool "Support Contec Micro9-Slim"
-	depends on EP93XX_SDCE3_ASYNC_PHYS_OFFSET
 	select MACH_MICRO9
 	help
 	  Say 'Y' here if you want your kernel to support the
@@ -169,28 +119,24 @@
 
 config MACH_SIM_ONE
         bool "Support Simplemachines Sim.One board"
-        depends on EP93XX_SDCE0_PHYS_OFFSET
         help
           Say 'Y' here if you want your kernel to support the
           Simplemachines Sim.One board.
 
 config MACH_SNAPPER_CL15
 	bool "Support Bluewater Systems Snapper CL15 Module"
-	depends on EP93XX_SDCE0_PHYS_OFFSET
 	help
 	  Say 'Y' here if you want your kernel to support the Bluewater
 	  Systems Snapper CL15 Module.
 
 config MACH_TS72XX
 	bool "Support Technologic Systems TS-72xx SBC"
-	depends on EP93XX_SDCE3_SYNC_PHYS_OFFSET
 	help
 	  Say 'Y' here if you want your kernel to support the
 	  Technologic Systems TS-72xx board.
 
 config MACH_VISION_EP9307
 	bool "Support Vision Engraving Systems EP9307 SoM"
-	depends on EP93XX_SDCE0_PHYS_OFFSET
 	help
 	  Say 'Y' here if you want your kernel to support the
 	  Vision Engraving Systems EP9307 SoM.
diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile
index 78d427b..b7ae434 100644
--- a/arch/arm/mach-ep93xx/Makefile
+++ b/arch/arm/mach-ep93xx/Makefile
@@ -1,7 +1,7 @@
 #
 # Makefile for the linux kernel.
 #
-obj-y			:= core.o clock.o
+obj-y			:= core.o clock.o timer-ep93xx.o
 
 obj-$(CONFIG_EP93XX_DMA)	+= dma.o
 
diff --git a/arch/arm/mach-ep93xx/Makefile.boot b/arch/arm/mach-ep93xx/Makefile.boot
index d3113a7..ed82ed7 100644
--- a/arch/arm/mach-ep93xx/Makefile.boot
+++ b/arch/arm/mach-ep93xx/Makefile.boot
@@ -1,14 +1 @@
-   zreladdr-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET)	+= 0x00008000
-params_phys-$(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET)	:= 0x00000100
-
-   zreladdr-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)		+= 0xc0008000
-params_phys-$(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)		:= 0xc0000100
-
-   zreladdr-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)		+= 0xd0008000
-params_phys-$(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)		:= 0xd0000100
-
-   zreladdr-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)		+= 0xe0008000
-params_phys-$(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)		:= 0xe0000100
-
-   zreladdr-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)	+= 0xf0008000
-params_phys-$(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)	:= 0xf0000100
+# Empty file waiting for deletion once Makefile.boot isn't needed any more.
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 0e571f1..c393b1b 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -22,7 +22,6 @@
 #include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
 #include <linux/sys_soc.h>
-#include <linux/timex.h>
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
@@ -38,6 +37,7 @@
 #include <linux/irqchip/arm-vic.h>
 #include <linux/reboot.h>
 #include <linux/usb/ohci_pdriver.h>
+#include <linux/random.h>
 
 #include <mach/hardware.h>
 #include <linux/platform_data/video-ep93xx.h>
@@ -47,7 +47,6 @@
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
-#include <asm/mach/time.h>
 
 #include "soc.h"
 
@@ -73,113 +72,6 @@
 	iotable_init(ep93xx_io_desc, ARRAY_SIZE(ep93xx_io_desc));
 }
 
-
-/*************************************************************************
- * Timer handling for EP93xx
- *************************************************************************
- * The ep93xx has four internal timers.  Timers 1, 2 (both 16 bit) and
- * 3 (32 bit) count down at 508 kHz, are self-reloading, and can generate
- * an interrupt on underflow.  Timer 4 (40 bit) counts down at 983.04 kHz,
- * is free-running, and can't generate interrupts.
- *
- * The 508 kHz timers are ideal for use for the timer interrupt, as the
- * most common values of HZ divide 508 kHz nicely.  We pick one of the 16
- * bit timers (timer 1) since we don't need more than 16 bits of reload
- * value as long as HZ >= 8.
- *
- * The higher clock rate of timer 4 makes it a better choice than the
- * other timers for use in gettimeoffset(), while the fact that it can't
- * generate interrupts means we don't have to worry about not being able
- * to use this timer for something else.  We also use timer 4 for keeping
- * track of lost jiffies.
- */
-#define EP93XX_TIMER_REG(x)		(EP93XX_TIMER_BASE + (x))
-#define EP93XX_TIMER1_LOAD		EP93XX_TIMER_REG(0x00)
-#define EP93XX_TIMER1_VALUE		EP93XX_TIMER_REG(0x04)
-#define EP93XX_TIMER1_CONTROL		EP93XX_TIMER_REG(0x08)
-#define EP93XX_TIMER123_CONTROL_ENABLE	(1 << 7)
-#define EP93XX_TIMER123_CONTROL_MODE	(1 << 6)
-#define EP93XX_TIMER123_CONTROL_CLKSEL	(1 << 3)
-#define EP93XX_TIMER1_CLEAR		EP93XX_TIMER_REG(0x0c)
-#define EP93XX_TIMER2_LOAD		EP93XX_TIMER_REG(0x20)
-#define EP93XX_TIMER2_VALUE		EP93XX_TIMER_REG(0x24)
-#define EP93XX_TIMER2_CONTROL		EP93XX_TIMER_REG(0x28)
-#define EP93XX_TIMER2_CLEAR		EP93XX_TIMER_REG(0x2c)
-#define EP93XX_TIMER4_VALUE_LOW		EP93XX_TIMER_REG(0x60)
-#define EP93XX_TIMER4_VALUE_HIGH	EP93XX_TIMER_REG(0x64)
-#define EP93XX_TIMER4_VALUE_HIGH_ENABLE	(1 << 8)
-#define EP93XX_TIMER3_LOAD		EP93XX_TIMER_REG(0x80)
-#define EP93XX_TIMER3_VALUE		EP93XX_TIMER_REG(0x84)
-#define EP93XX_TIMER3_CONTROL		EP93XX_TIMER_REG(0x88)
-#define EP93XX_TIMER3_CLEAR		EP93XX_TIMER_REG(0x8c)
-
-#define EP93XX_TIMER123_CLOCK		508469
-#define EP93XX_TIMER4_CLOCK		983040
-
-#define TIMER1_RELOAD			((EP93XX_TIMER123_CLOCK / HZ) - 1)
-#define TIMER4_TICKS_PER_JIFFY		DIV_ROUND_CLOSEST(EP93XX_TIMER4_CLOCK, HZ)
-
-static unsigned int last_jiffy_time;
-
-static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id)
-{
-	/* Writing any value clears the timer interrupt */
-	__raw_writel(1, EP93XX_TIMER1_CLEAR);
-
-	/* Recover lost jiffies */
-	while ((signed long)
-		(__raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time)
-						>= TIMER4_TICKS_PER_JIFFY) {
-		last_jiffy_time += TIMER4_TICKS_PER_JIFFY;
-		timer_tick();
-	}
-
-	return IRQ_HANDLED;
-}
-
-static struct irqaction ep93xx_timer_irq = {
-	.name		= "ep93xx timer",
-	.flags		= IRQF_TIMER | IRQF_IRQPOLL,
-	.handler	= ep93xx_timer_interrupt,
-};
-
-static u32 ep93xx_gettimeoffset(void)
-{
-	int offset;
-
-	offset = __raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time;
-
-	/*
-	 * Timer 4 is based on a 983.04 kHz reference clock,
-	 * so dividing by 983040 gives the fraction of a second,
-	 * so dividing by 0.983040 converts to uS.
-	 * Refactor the calculation to avoid overflow.
-	 * Finally, multiply by 1000 to give nS.
-	 */
-	return (offset + (53 * offset / 3072)) * 1000;
-}
-
-void __init ep93xx_timer_init(void)
-{
-	u32 tmode = EP93XX_TIMER123_CONTROL_MODE |
-		    EP93XX_TIMER123_CONTROL_CLKSEL;
-
-	arch_gettimeoffset = ep93xx_gettimeoffset;
-
-	/* Enable periodic HZ timer.  */
-	__raw_writel(tmode, EP93XX_TIMER1_CONTROL);
-	__raw_writel(TIMER1_RELOAD, EP93XX_TIMER1_LOAD);
-	__raw_writel(tmode | EP93XX_TIMER123_CONTROL_ENABLE,
-			EP93XX_TIMER1_CONTROL);
-
-	/* Enable lost jiffy timer.  */
-	__raw_writel(EP93XX_TIMER4_VALUE_HIGH_ENABLE,
-			EP93XX_TIMER4_VALUE_HIGH);
-
-	setup_irq(IRQ_EP93XX_TIMER1, &ep93xx_timer_irq);
-}
-
-
 /*************************************************************************
  * EP93xx IRQ handling
  *************************************************************************/
@@ -971,6 +863,12 @@
 	if (id != id2)
 		return "invalid";
 
+	/* Toss the unique ID into the entropy pool */
+	add_device_randomness(&id2, 4);
+	add_device_randomness(&id3, 4);
+	add_device_randomness(&id4, 4);
+	add_device_randomness(&id5, 4);
+
 	snprintf(ep93xx_soc_id, sizeof(ep93xx_soc_id),
 		 "%08x%08x%08x%08x", id2, id3, id4, id5);
 
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index 27b14ae..ad92d9f 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -205,8 +205,6 @@
  * EDB93xx framebuffer
  *************************************************************************/
 static struct ep93xxfb_mach_info __initdata edb93xxfb_info = {
-	.num_modes	= EP93XXFB_USE_MODEDB,
-	.bpp		= 16,
 	.flags		= 0,
 };
 
diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
index 3c950f5..7bb540c4 100644
--- a/arch/arm/mach-ep93xx/simone.c
+++ b/arch/arm/mach-ep93xx/simone.c
@@ -40,8 +40,6 @@
 };
 
 static struct ep93xxfb_mach_info __initdata simone_fb_info = {
-	.num_modes	= EP93XXFB_USE_MODEDB,
-	.bpp		= 16,
 	.flags		= EP93XXFB_USE_SDCSN0 | EP93XXFB_PCLK_FALLING,
 };
 
@@ -169,6 +167,7 @@
 
 static struct ep93xx_spi_info simone_spi_info __initdata = {
 	.num_chipselect	= ARRAY_SIZE(simone_spi_devices),
+	.use_dma = 1,
 };
 
 static struct i2c_gpio_platform_data __initdata simone_i2c_gpio_data = {
diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
index aa86f86..c490426 100644
--- a/arch/arm/mach-ep93xx/snappercl15.c
+++ b/arch/arm/mach-ep93xx/snappercl15.c
@@ -144,8 +144,6 @@
 };
 
 static struct ep93xxfb_mach_info __initdata snappercl15_fb_info = {
-	.num_modes		= EP93XXFB_USE_MODEDB,
-	.bpp			= 16,
 };
 
 static struct platform_device snappercl15_audio_device = {
diff --git a/arch/arm/mach-ep93xx/timer-ep93xx.c b/arch/arm/mach-ep93xx/timer-ep93xx.c
new file mode 100644
index 0000000..e5f7911
--- /dev/null
+++ b/arch/arm/mach-ep93xx/timer-ep93xx.c
@@ -0,0 +1,143 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/sched_clock.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <asm/mach/time.h>
+#include "soc.h"
+
+/*************************************************************************
+ * Timer handling for EP93xx
+ *************************************************************************
+ * The ep93xx has four internal timers.  Timers 1, 2 (both 16 bit) and
+ * 3 (32 bit) count down at 508 kHz, are self-reloading, and can generate
+ * an interrupt on underflow.  Timer 4 (40 bit) counts down at 983.04 kHz,
+ * is free-running, and can't generate interrupts.
+ *
+ * The 508 kHz timers are ideal for use for the timer interrupt, as the
+ * most common values of HZ divide 508 kHz nicely.  We pick the 32 bit
+ * timer (timer 3) to get as long sleep intervals as possible when using
+ * CONFIG_NO_HZ.
+ *
+ * The higher clock rate of timer 4 makes it a better choice than the
+ * other timers for use as clock source and for sched_clock(), providing
+ * a stable 40 bit time base.
+ *************************************************************************
+ */
+#define EP93XX_TIMER_REG(x)		(EP93XX_TIMER_BASE + (x))
+#define EP93XX_TIMER1_LOAD		EP93XX_TIMER_REG(0x00)
+#define EP93XX_TIMER1_VALUE		EP93XX_TIMER_REG(0x04)
+#define EP93XX_TIMER1_CONTROL		EP93XX_TIMER_REG(0x08)
+#define EP93XX_TIMER123_CONTROL_ENABLE	(1 << 7)
+#define EP93XX_TIMER123_CONTROL_MODE	(1 << 6)
+#define EP93XX_TIMER123_CONTROL_CLKSEL	(1 << 3)
+#define EP93XX_TIMER1_CLEAR		EP93XX_TIMER_REG(0x0c)
+#define EP93XX_TIMER2_LOAD		EP93XX_TIMER_REG(0x20)
+#define EP93XX_TIMER2_VALUE		EP93XX_TIMER_REG(0x24)
+#define EP93XX_TIMER2_CONTROL		EP93XX_TIMER_REG(0x28)
+#define EP93XX_TIMER2_CLEAR		EP93XX_TIMER_REG(0x2c)
+#define EP93XX_TIMER4_VALUE_LOW		EP93XX_TIMER_REG(0x60)
+#define EP93XX_TIMER4_VALUE_HIGH	EP93XX_TIMER_REG(0x64)
+#define EP93XX_TIMER4_VALUE_HIGH_ENABLE	(1 << 8)
+#define EP93XX_TIMER3_LOAD		EP93XX_TIMER_REG(0x80)
+#define EP93XX_TIMER3_VALUE		EP93XX_TIMER_REG(0x84)
+#define EP93XX_TIMER3_CONTROL		EP93XX_TIMER_REG(0x88)
+#define EP93XX_TIMER3_CLEAR		EP93XX_TIMER_REG(0x8c)
+
+#define EP93XX_TIMER123_RATE		508469
+#define EP93XX_TIMER4_RATE		983040
+
+static u64 notrace ep93xx_read_sched_clock(void)
+{
+	u64 ret;
+
+	ret = readl(EP93XX_TIMER4_VALUE_LOW);
+	ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32);
+	return ret;
+}
+
+cycle_t ep93xx_clocksource_read(struct clocksource *c)
+{
+	u64 ret;
+
+	ret = readl(EP93XX_TIMER4_VALUE_LOW);
+	ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32);
+	return (cycle_t) ret;
+}
+
+static int ep93xx_clkevt_set_next_event(unsigned long next,
+					struct clock_event_device *evt)
+{
+	/* Default mode: periodic, off, 508 kHz */
+	u32 tmode = EP93XX_TIMER123_CONTROL_MODE |
+		    EP93XX_TIMER123_CONTROL_CLKSEL;
+
+	/* Clear timer */
+	writel(tmode, EP93XX_TIMER3_CONTROL);
+
+	/* Set next event */
+	writel(next, EP93XX_TIMER3_LOAD);
+	writel(tmode | EP93XX_TIMER123_CONTROL_ENABLE,
+	       EP93XX_TIMER3_CONTROL);
+        return 0;
+}
+
+
+static int ep93xx_clkevt_shutdown(struct clock_event_device *evt)
+{
+	/* Disable timer */
+	writel(0, EP93XX_TIMER3_CONTROL);
+
+	return 0;
+}
+
+static struct clock_event_device ep93xx_clockevent = {
+	.name			= "timer1",
+	.features		= CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_shutdown	= ep93xx_clkevt_shutdown,
+	.set_state_oneshot	= ep93xx_clkevt_shutdown,
+	.tick_resume		= ep93xx_clkevt_shutdown,
+	.set_next_event		= ep93xx_clkevt_set_next_event,
+	.rating			= 300,
+};
+
+static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id)
+{
+	struct clock_event_device *evt = dev_id;
+
+	/* Writing any value clears the timer interrupt */
+	writel(1, EP93XX_TIMER3_CLEAR);
+
+	evt->event_handler(evt);
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction ep93xx_timer_irq = {
+	.name		= "ep93xx timer",
+	.flags		= IRQF_TIMER | IRQF_IRQPOLL,
+	.handler	= ep93xx_timer_interrupt,
+	.dev_id		= &ep93xx_clockevent,
+};
+
+void __init ep93xx_timer_init(void)
+{
+	/* Enable and register clocksource and sched_clock on timer 4 */
+	writel(EP93XX_TIMER4_VALUE_HIGH_ENABLE,
+	       EP93XX_TIMER4_VALUE_HIGH);
+	clocksource_mmio_init(NULL, "timer4",
+			      EP93XX_TIMER4_RATE, 200, 40,
+			      ep93xx_clocksource_read);
+	sched_clock_register(ep93xx_read_sched_clock, 40,
+			     EP93XX_TIMER4_RATE);
+
+	/* Set up clockevent on timer 3 */
+	setup_irq(IRQ_EP93XX_TIMER3, &ep93xx_timer_irq);
+	clockevents_config_and_register(&ep93xx_clockevent,
+					EP93XX_TIMER123_RATE,
+					1,
+					0xffffffffU);
+}
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index 6bc1c18..5cced59 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -29,6 +29,8 @@
 #include <linux/spi/mmc_spi.h>
 #include <linux/mmc/host.h>
 
+#include <sound/cs4271.h>
+
 #include <mach/hardware.h>
 #include <linux/platform_data/video-ep93xx.h>
 #include <linux/platform_data/spi-ep93xx.h>
@@ -104,8 +106,6 @@
 }
 
 static struct ep93xxfb_mach_info ep93xxfb_info __initdata = {
-	.num_modes	= EP93XXFB_USE_MODEDB,
-	.bpp		= 16,
 	.flags		= EP93XXFB_USE_SDCSN0 | EP93XXFB_PCLK_FALLING,
 	.setup		= vision_lcd_setup,
 	.teardown	= vision_lcd_teardown,
@@ -169,6 +169,35 @@
 };
 
 /*************************************************************************
+ * SPI CS4271 Audio Codec
+ *************************************************************************/
+static struct cs4271_platform_data vision_cs4271_data = {
+	.gpio_nreset	= EP93XX_GPIO_LINE_H(2),
+};
+
+static int vision_cs4271_hw_setup(struct spi_device *spi)
+{
+	return gpio_request_one(EP93XX_GPIO_LINE_EGPIO6,
+				GPIOF_OUT_INIT_HIGH, spi->modalias);
+}
+
+static void vision_cs4271_hw_cleanup(struct spi_device *spi)
+{
+	gpio_free(EP93XX_GPIO_LINE_EGPIO6);
+}
+
+static void vision_cs4271_hw_cs_control(struct spi_device *spi, int value)
+{
+	gpio_set_value(EP93XX_GPIO_LINE_EGPIO6, value);
+}
+
+static struct ep93xx_spi_chip_ops vision_cs4271_hw = {
+	.setup		= vision_cs4271_hw_setup,
+	.cleanup	= vision_cs4271_hw_cleanup,
+	.cs_control	= vision_cs4271_hw_cs_control,
+};
+
+/*************************************************************************
  * SPI Flash
  *************************************************************************/
 #define VISION_SPI_FLASH_CS	EP93XX_GPIO_LINE_EGPIO7
@@ -262,12 +291,20 @@
  *************************************************************************/
 static struct spi_board_info vision_spi_board_info[] __initdata = {
 	{
+		.modalias		= "cs4271",
+		.platform_data		= &vision_cs4271_data,
+		.controller_data	= &vision_cs4271_hw,
+		.max_speed_hz		= 6000000,
+		.bus_num		= 0,
+		.chip_select		= 0,
+		.mode			= SPI_MODE_3,
+	}, {
 		.modalias		= "sst25l",
 		.platform_data		= &vision_spi_flash_data,
 		.controller_data	= &vision_spi_flash_hw,
 		.max_speed_hz		= 20000000,
 		.bus_num		= 0,
-		.chip_select		= 0,
+		.chip_select		= 1,
 		.mode			= SPI_MODE_3,
 	}, {
 		.modalias		= "mmc_spi",
@@ -275,16 +312,31 @@
 		.controller_data	= &vision_spi_mmc_hw,
 		.max_speed_hz		= 20000000,
 		.bus_num		= 0,
-		.chip_select		= 1,
+		.chip_select		= 2,
 		.mode			= SPI_MODE_3,
 	},
 };
 
 static struct ep93xx_spi_info vision_spi_master __initdata = {
-	.num_chipselect		= ARRAY_SIZE(vision_spi_board_info),
+	.num_chipselect	= ARRAY_SIZE(vision_spi_board_info),
+	.use_dma	= 1,
 };
 
 /*************************************************************************
+ * I2S Audio
+ *************************************************************************/
+static struct platform_device vision_audio_device = {
+	.name		= "edb93xx-audio",
+	.id		= -1,
+};
+
+static void __init vision_register_i2s(void)
+{
+	ep93xx_register_i2s();
+	platform_device_register(&vision_audio_device);
+}
+
+/*************************************************************************
  * Machine Initialization
  *************************************************************************/
 static void __init vision_init_machine(void)
@@ -309,6 +361,7 @@
 				ARRAY_SIZE(vision_i2c_info));
 	ep93xx_register_spi(&vision_spi_master, vision_spi_board_info,
 				ARRAY_SIZE(vision_spi_board_info));
+	vision_register_i2s();
 }
 
 MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307")
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 81064cd..4c4858c 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -30,6 +30,11 @@
 
 if ARCH_EXYNOS
 
+config S5P_DEV_MFC
+	bool
+	help
+	  Compile in setup memory (init) code for MFC
+
 config ARCH_EXYNOS3
 	bool "SAMSUNG EXYNOS3"
 	select ARM_CPU_SUSPEND if PM
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index bcefb54..2f30676 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -23,3 +23,5 @@
 
 obj-$(CONFIG_EXYNOS5420_MCPM)	+= mcpm-exynos.o
 CFLAGS_mcpm-exynos.o		+= -march=armv7-a
+
+obj-$(CONFIG_S5P_DEV_MFC)	+= s5p-dev-mfc.o
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index e3a9256..1534925 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -128,6 +128,12 @@
 
 /* CPU BOOT mode flag for Exynos3250 SoC bootloader */
 #define C2_STATE	(1 << 3)
+/*
+ * Magic values for bootloader indicating chosen low power mode.
+ * See also Documentation/arm/Samsung/Bootloader-interface.txt
+ */
+#define EXYNOS_SLEEP_MAGIC	0x00000bad
+#define EXYNOS_AFTR_MAGIC	0xfcba0d10
 
 void exynos_set_boot_flag(unsigned int cpu, unsigned int mode);
 void exynos_clear_boot_flag(unsigned int cpu, unsigned int mode);
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index 245f6de..111cfbf 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -25,8 +25,6 @@
 #include "common.h"
 #include "smc.h"
 
-#define EXYNOS_SLEEP_MAGIC	0x00000bad
-#define EXYNOS_AFTR_MAGIC	0xfcba0d10
 #define EXYNOS_BOOT_ADDR	0x8
 #define EXYNOS_BOOT_FLAG	0xc
 
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 58e05a2..98a2c0c 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -182,7 +182,7 @@
 
 	boot_reg = cpu_boot_reg_base();
 	if (!boot_reg)
-		return ERR_PTR(-ENODEV);
+		return IOMEM_ERR_PTR(-ENODEV);
 	if (soc_is_exynos4412())
 		boot_reg += 4*cpu;
 	else if (soc_is_exynos5420() || soc_is_exynos5800())
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index e812c1c..de68938 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -698,7 +698,7 @@
 		;
 }
 
-void exynos5420_powerdown_conf(enum sys_powerdown mode)
+static void exynos5420_powerdown_conf(enum sys_powerdown mode)
 {
 	u32 this_cluster;
 
@@ -991,7 +991,6 @@
 static struct platform_driver exynos_pmu_driver = {
 	.driver  = {
 		.name   = "exynos-pmu",
-		.owner	= THIS_MODULE,
 		.of_match_table = exynos_pmu_of_device_ids,
 	},
 	.probe = exynos_pmu_probe,
diff --git a/arch/arm/mach-exynos/regs-srom.h b/arch/arm/mach-exynos/regs-srom.h
new file mode 100644
index 0000000..5c4d442
--- /dev/null
+++ b/arch/arm/mach-exynos/regs-srom.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * S5P SROMC register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __PLAT_SAMSUNG_REGS_SROM_H
+#define __PLAT_SAMSUNG_REGS_SROM_H __FILE__
+
+#include <mach/map.h>
+
+#define S5P_SROMREG(x)		(S5P_VA_SROMC + (x))
+
+#define S5P_SROM_BW		S5P_SROMREG(0x0)
+#define S5P_SROM_BC0		S5P_SROMREG(0x4)
+#define S5P_SROM_BC1		S5P_SROMREG(0x8)
+#define S5P_SROM_BC2		S5P_SROMREG(0xc)
+#define S5P_SROM_BC3		S5P_SROMREG(0x10)
+#define S5P_SROM_BC4		S5P_SROMREG(0x14)
+#define S5P_SROM_BC5		S5P_SROMREG(0x18)
+
+/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
+
+#define S5P_SROM_BW__DATAWIDTH__SHIFT		0
+#define S5P_SROM_BW__ADDRMODE__SHIFT		1
+#define S5P_SROM_BW__WAITENABLE__SHIFT		2
+#define S5P_SROM_BW__BYTEENABLE__SHIFT		3
+
+#define S5P_SROM_BW__CS_MASK			0xf
+
+#define S5P_SROM_BW__NCS0__SHIFT		0
+#define S5P_SROM_BW__NCS1__SHIFT		4
+#define S5P_SROM_BW__NCS2__SHIFT		8
+#define S5P_SROM_BW__NCS3__SHIFT		12
+#define S5P_SROM_BW__NCS4__SHIFT		16
+#define S5P_SROM_BW__NCS5__SHIFT		20
+
+/* applies to same to BCS0 - BCS3 */
+
+#define S5P_SROM_BCX__PMC__SHIFT		0
+#define S5P_SROM_BCX__TACP__SHIFT		4
+#define S5P_SROM_BCX__TCAH__SHIFT		8
+#define S5P_SROM_BCX__TCOH__SHIFT		12
+#define S5P_SROM_BCX__TACC__SHIFT		16
+#define S5P_SROM_BCX__TCOS__SHIFT		24
+#define S5P_SROM_BCX__TACS__SHIFT		28
+
+#endif /* __PLAT_SAMSUNG_REGS_SROM_H */
diff --git a/arch/arm/plat-samsung/s5p-dev-mfc.c b/arch/arm/mach-exynos/s5p-dev-mfc.c
similarity index 100%
rename from arch/arm/plat-samsung/s5p-dev-mfc.c
rename to arch/arm/mach-exynos/s5p-dev-mfc.c
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index f572219..e00eb39 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -32,13 +32,11 @@
 #include <asm/suspend.h>
 
 #include <plat/pm-common.h>
-#include <plat/regs-srom.h>
 
 #include "common.h"
-#include "regs-pmu.h"
 #include "exynos-pmu.h"
-
-#define S5P_CHECK_SLEEP 0x00000BAD
+#include "regs-pmu.h"
+#include "regs-srom.h"
 
 #define REG_TABLE_END (-1U)
 
@@ -331,7 +329,7 @@
 {
 	/* Set value of power down register for sleep mode */
 	exynos_sys_powerdown_conf(SYS_SLEEP);
-	pmu_raw_writel(S5P_CHECK_SLEEP, S5P_INFORM1);
+	pmu_raw_writel(EXYNOS_SLEEP_MAGIC, S5P_INFORM1);
 }
 
 static void exynos_pm_prepare(void)
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 9e8220e..0f0c9e0 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -106,7 +106,7 @@
 
 	for (irq = _DC21285_IRQ(0); irq < _DC21285_IRQ(20); irq++) {
 		irq_set_chip_and_handler(irq, &fb_chip, handle_level_irq);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 }
 
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
index bf7aa7d..810edc7 100644
--- a/arch/arm/mach-footbridge/dc21285-timer.c
+++ b/arch/arm/mach-footbridge/dc21285-timer.c
@@ -57,34 +57,32 @@
 	return 0;
 }
 
-static void ckevt_dc21285_set_mode(enum clock_event_mode mode,
-	struct clock_event_device *c)
+static int ckevt_dc21285_shutdown(struct clock_event_device *c)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_RESUME:
-	case CLOCK_EVT_MODE_PERIODIC:
-		*CSR_TIMER1_CLR = 0;
-		*CSR_TIMER1_LOAD = (mem_fclk_21285 + 8 * HZ) / (16 * HZ);
-		*CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD |
-				   TIMER_CNTL_DIV16;
-		break;
+	*CSR_TIMER1_CNTL = 0;
+	return 0;
+}
 
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		*CSR_TIMER1_CNTL = 0;
-		break;
-	}
+static int ckevt_dc21285_set_periodic(struct clock_event_device *c)
+{
+	*CSR_TIMER1_CLR = 0;
+	*CSR_TIMER1_LOAD = (mem_fclk_21285 + 8 * HZ) / (16 * HZ);
+	*CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD |
+			   TIMER_CNTL_DIV16;
+	return 0;
 }
 
 static struct clock_event_device ckevt_dc21285 = {
-	.name		= "dc21285_timer1",
-	.features	= CLOCK_EVT_FEAT_PERIODIC |
-			  CLOCK_EVT_FEAT_ONESHOT,
-	.rating		= 200,
-	.irq		= IRQ_TIMER1,
-	.set_next_event	= ckevt_dc21285_set_next_event,
-	.set_mode	= ckevt_dc21285_set_mode,
+	.name			= "dc21285_timer1",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.rating			= 200,
+	.irq			= IRQ_TIMER1,
+	.set_next_event		= ckevt_dc21285_set_next_event,
+	.set_state_shutdown	= ckevt_dc21285_shutdown,
+	.set_state_periodic	= ckevt_dc21285_set_periodic,
+	.set_state_oneshot	= ckevt_dc21285_shutdown,
+	.tick_resume		= ckevt_dc21285_set_periodic,
 };
 
 static irqreturn_t timer1_interrupt(int irq, void *dev_id)
@@ -94,7 +92,7 @@
 	*CSR_TIMER1_CLR = 0;
 
 	/* Stop the timer if in one-shot mode */
-	if (ce->mode == CLOCK_EVT_MODE_ONESHOT)
+	if (clockevent_state_oneshot(ce))
 		*CSR_TIMER1_CNTL = 0;
 
 	ce->event_handler(ce);
diff --git a/arch/arm/mach-footbridge/isa-irq.c b/arch/arm/mach-footbridge/isa-irq.c
index c3a0abb..fcd79bc 100644
--- a/arch/arm/mach-footbridge/isa-irq.c
+++ b/arch/arm/mach-footbridge/isa-irq.c
@@ -153,13 +153,13 @@
 		for (irq = _ISA_IRQ(0); irq < _ISA_IRQ(8); irq++) {
 			irq_set_chip_and_handler(irq, &isa_lo_chip,
 						 handle_level_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 
 		for (irq = _ISA_IRQ(8); irq < _ISA_IRQ(16); irq++) {
 			irq_set_chip_and_handler(irq, &isa_hi_chip,
 						 handle_level_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 
 		request_resource(&ioport_resource, &pic1_resource);
@@ -175,8 +175,8 @@
 		 * resistor on this line.
 		 */
 		if (machine_is_netwinder())
-			set_irq_flags(_ISA_IRQ(11), IRQF_VALID |
-				      IRQF_PROBE | IRQF_NOAUTOEN);
+			irq_modify_status(_ISA_IRQ(11),
+				IRQ_NOREQUEST | IRQ_NOPROBE, IRQ_NOAUTOEN);
 	}
 }
 
diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c
index 3292f2e..220333e 100644
--- a/arch/arm/mach-gemini/gpio.c
+++ b/arch/arm/mach-gemini/gpio.c
@@ -220,7 +220,7 @@
 		     j < GPIO_IRQ_BASE + (i + 1) * 32; j++) {
 			irq_set_chip_and_handler(j, &gpio_irq_chip,
 						 handle_edge_irq);
-			set_irq_flags(j, IRQF_VALID);
+			irq_clear_status_flags(j, IRQ_NOREQUEST);
 		}
 
 		irq_set_chained_handler_and_data(IRQ_GPIO(i), gpio_irq_handler,
diff --git a/arch/arm/mach-gemini/include/mach/hardware.h b/arch/arm/mach-gemini/include/mach/hardware.h
index 98e7b0f..f0390f1 100644
--- a/arch/arm/mach-gemini/include/mach/hardware.h
+++ b/arch/arm/mach-gemini/include/mach/hardware.h
@@ -57,9 +57,6 @@
 #define GEMINI_USB1_BASE	0x69000000
 #define GEMINI_BIG_ENDIAN_BASE	0x80000000
 
-#define GEMINI_TIMER1_BASE	GEMINI_TIMER_BASE
-#define GEMINI_TIMER2_BASE	(GEMINI_TIMER_BASE + 0x10)
-#define GEMINI_TIMER3_BASE	(GEMINI_TIMER_BASE + 0x20)
 
 /*
  * UART Clock when System clk is 150MHz
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c
index 44f50dc..d929b3f 100644
--- a/arch/arm/mach-gemini/irq.c
+++ b/arch/arm/mach-gemini/irq.c
@@ -92,7 +92,7 @@
 		} else {			
 			irq_set_handler(i, handle_level_irq);
 		}
-		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	/* Disable all interrupts */
diff --git a/arch/arm/mach-gemini/time.c b/arch/arm/mach-gemini/time.c
index 0a63c4d2..f5f18df 100644
--- a/arch/arm/mach-gemini/time.c
+++ b/arch/arm/mach-gemini/time.c
@@ -15,93 +15,147 @@
 #include <asm/mach/time.h>
 #include <linux/clockchips.h>
 #include <linux/clocksource.h>
+#include <linux/sched_clock.h>
 
 /*
  * Register definitions for the timers
  */
-#define TIMER_COUNT(BASE_ADDR)		(BASE_ADDR  + 0x00)
-#define TIMER_LOAD(BASE_ADDR)		(BASE_ADDR  + 0x04)
-#define TIMER_MATCH1(BASE_ADDR)		(BASE_ADDR  + 0x08)
-#define TIMER_MATCH2(BASE_ADDR)		(BASE_ADDR  + 0x0C)
-#define TIMER_CR(BASE_ADDR)		(BASE_ADDR  + 0x30)
 
-#define TIMER_1_CR_ENABLE		(1 << 0)
-#define TIMER_1_CR_CLOCK		(1 << 1)
-#define TIMER_1_CR_INT			(1 << 2)
-#define TIMER_2_CR_ENABLE		(1 << 3)
-#define TIMER_2_CR_CLOCK		(1 << 4)
-#define TIMER_2_CR_INT			(1 << 5)
-#define TIMER_3_CR_ENABLE		(1 << 6)
-#define TIMER_3_CR_CLOCK		(1 << 7)
-#define TIMER_3_CR_INT			(1 << 8)
+#define TIMER1_BASE		GEMINI_TIMER_BASE
+#define TIMER2_BASE		(GEMINI_TIMER_BASE + 0x10)
+#define TIMER3_BASE		(GEMINI_TIMER_BASE + 0x20)
+
+#define TIMER_COUNT(BASE)	(IO_ADDRESS(BASE) + 0x00)
+#define TIMER_LOAD(BASE)	(IO_ADDRESS(BASE) + 0x04)
+#define TIMER_MATCH1(BASE)	(IO_ADDRESS(BASE) + 0x08)
+#define TIMER_MATCH2(BASE)	(IO_ADDRESS(BASE) + 0x0C)
+#define TIMER_CR		(IO_ADDRESS(GEMINI_TIMER_BASE) + 0x30)
+#define TIMER_INTR_STATE	(IO_ADDRESS(GEMINI_TIMER_BASE) + 0x34)
+#define TIMER_INTR_MASK		(IO_ADDRESS(GEMINI_TIMER_BASE) + 0x38)
+
+#define TIMER_1_CR_ENABLE	(1 << 0)
+#define TIMER_1_CR_CLOCK	(1 << 1)
+#define TIMER_1_CR_INT		(1 << 2)
+#define TIMER_2_CR_ENABLE	(1 << 3)
+#define TIMER_2_CR_CLOCK	(1 << 4)
+#define TIMER_2_CR_INT		(1 << 5)
+#define TIMER_3_CR_ENABLE	(1 << 6)
+#define TIMER_3_CR_CLOCK	(1 << 7)
+#define TIMER_3_CR_INT		(1 << 8)
+#define TIMER_1_CR_UPDOWN	(1 << 9)
+#define TIMER_2_CR_UPDOWN	(1 << 10)
+#define TIMER_3_CR_UPDOWN	(1 << 11)
+#define TIMER_DEFAULT_FLAGS	(TIMER_1_CR_UPDOWN | \
+				 TIMER_3_CR_ENABLE | \
+				 TIMER_3_CR_UPDOWN)
+
+#define TIMER_1_INT_MATCH1	(1 << 0)
+#define TIMER_1_INT_MATCH2	(1 << 1)
+#define TIMER_1_INT_OVERFLOW	(1 << 2)
+#define TIMER_2_INT_MATCH1	(1 << 3)
+#define TIMER_2_INT_MATCH2	(1 << 4)
+#define TIMER_2_INT_OVERFLOW	(1 << 5)
+#define TIMER_3_INT_MATCH1	(1 << 6)
+#define TIMER_3_INT_MATCH2	(1 << 7)
+#define TIMER_3_INT_OVERFLOW	(1 << 8)
+#define TIMER_INT_ALL_MASK	0x1ff
+
 
 static unsigned int tick_rate;
 
+static u64 notrace gemini_read_sched_clock(void)
+{
+	return readl(TIMER_COUNT(TIMER3_BASE));
+}
+
 static int gemini_timer_set_next_event(unsigned long cycles,
 				       struct clock_event_device *evt)
 {
 	u32 cr;
 
-	cr = readl(TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
-
-	/* This may be overdoing it, feel free to test without this */
-	cr &= ~TIMER_2_CR_ENABLE;
-	cr &= ~TIMER_2_CR_INT;
-	writel(cr, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
-
-	/* Set next event */
-	writel(cycles, TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER2_BASE)));
-	writel(cycles, TIMER_LOAD(IO_ADDRESS(GEMINI_TIMER2_BASE)));
-	cr |= TIMER_2_CR_ENABLE;
-	cr |= TIMER_2_CR_INT;
-	writel(cr, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
+	/* Setup the match register */
+	cr = readl(TIMER_COUNT(TIMER1_BASE));
+	writel(cr + cycles, TIMER_MATCH1(TIMER1_BASE));
+	if (readl(TIMER_COUNT(TIMER1_BASE)) - cr > cycles)
+		return -ETIME;
 
 	return 0;
 }
 
-static void gemini_timer_set_mode(enum clock_event_mode mode,
-				  struct clock_event_device *evt)
+static int gemini_timer_shutdown(struct clock_event_device *evt)
+{
+	u32 cr;
+
+	/*
+	 * Disable also for oneshot: the set_next() call will arm the timer
+	 * instead.
+	 */
+	/* Stop timer and interrupt. */
+	cr = readl(TIMER_CR);
+	cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT);
+	writel(cr, TIMER_CR);
+
+	/* Setup counter start from 0 */
+	writel(0, TIMER_COUNT(TIMER1_BASE));
+	writel(0, TIMER_LOAD(TIMER1_BASE));
+
+	/* enable interrupt */
+	cr = readl(TIMER_INTR_MASK);
+	cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2);
+	cr |= TIMER_1_INT_MATCH1;
+	writel(cr, TIMER_INTR_MASK);
+
+	/* start the timer */
+	cr = readl(TIMER_CR);
+	cr |= TIMER_1_CR_ENABLE;
+	writel(cr, TIMER_CR);
+
+	return 0;
+}
+
+static int gemini_timer_set_periodic(struct clock_event_device *evt)
 {
 	u32 period = DIV_ROUND_CLOSEST(tick_rate, HZ);
 	u32 cr;
 
-	switch (mode) {
-        case CLOCK_EVT_MODE_PERIODIC:
-		/* Start the timer */
-		writel(period,
-		       TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER2_BASE)));
-		writel(period,
-		       TIMER_LOAD(IO_ADDRESS(GEMINI_TIMER2_BASE)));
-		cr = readl(TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
-		cr |= TIMER_2_CR_ENABLE;
-		cr |= TIMER_2_CR_INT;
-		writel(cr, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-        case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_RESUME:
-		/*
-		 * Disable also for oneshot: the set_next() call will
-		 * arm the timer instead.
-		 */
-		cr = readl(TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
-		cr &= ~TIMER_2_CR_ENABLE;
-		cr &= ~TIMER_2_CR_INT;
-		writel(cr, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
-		break;
-	default:
-                break;
-	}
+	/* Stop timer and interrupt */
+	cr = readl(TIMER_CR);
+	cr &= ~(TIMER_1_CR_ENABLE | TIMER_1_CR_INT);
+	writel(cr, TIMER_CR);
+
+	/* Setup timer to fire at 1/HT intervals. */
+	cr = 0xffffffff - (period - 1);
+	writel(cr, TIMER_COUNT(TIMER1_BASE));
+	writel(cr, TIMER_LOAD(TIMER1_BASE));
+
+	/* enable interrupt on overflow */
+	cr = readl(TIMER_INTR_MASK);
+	cr &= ~(TIMER_1_INT_MATCH1 | TIMER_1_INT_MATCH2);
+	cr |= TIMER_1_INT_OVERFLOW;
+	writel(cr, TIMER_INTR_MASK);
+
+	/* Start the timer */
+	cr = readl(TIMER_CR);
+	cr |= TIMER_1_CR_ENABLE;
+	cr |= TIMER_1_CR_INT;
+	writel(cr, TIMER_CR);
+
+	return 0;
 }
 
-/* Use TIMER2 as clock event */
+/* Use TIMER1 as clock event */
 static struct clock_event_device gemini_clockevent = {
-	.name		= "TIMER2",
-	.rating		= 300, /* Reasonably fast and accurate clock event */
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_next_event	= gemini_timer_set_next_event,
-	.set_mode	= gemini_timer_set_mode,
+	.name			= "TIMER1",
+	/* Reasonably fast and accurate clock event */
+	.rating			= 300,
+	.shift                  = 32,
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_next_event		= gemini_timer_set_next_event,
+	.set_state_shutdown	= gemini_timer_shutdown,
+	.set_state_periodic	= gemini_timer_set_periodic,
+	.set_state_oneshot	= gemini_timer_shutdown,
+	.tick_resume		= gemini_timer_shutdown,
 };
 
 /*
@@ -151,20 +205,35 @@
 	}
 
 	/*
-	 * Make irqs happen for the system timer
+	 * Reset the interrupt mask and status
 	 */
-	setup_irq(IRQ_TIMER2, &gemini_timer_irq);
+	writel(TIMER_INT_ALL_MASK, TIMER_INTR_MASK);
+	writel(0, TIMER_INTR_STATE);
+	writel(TIMER_DEFAULT_FLAGS, TIMER_CR);
 
-	/* Enable and use TIMER1 as clock source */
-	writel(0xffffffff, TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER1_BASE)));
-	writel(0xffffffff, TIMER_LOAD(IO_ADDRESS(GEMINI_TIMER1_BASE)));
-	writel(TIMER_1_CR_ENABLE, TIMER_CR(IO_ADDRESS(GEMINI_TIMER_BASE)));
-	if (clocksource_mmio_init(TIMER_COUNT(IO_ADDRESS(GEMINI_TIMER1_BASE)),
-				  "TIMER1", tick_rate, 300, 32,
-				  clocksource_mmio_readl_up))
-		pr_err("timer: failed to initialize gemini clock source\n");
+	/*
+	 * Setup free-running clocksource timer (interrupts
+	 * disabled.)
+	 */
+	writel(0, TIMER_COUNT(TIMER3_BASE));
+	writel(0, TIMER_LOAD(TIMER3_BASE));
+	writel(0, TIMER_MATCH1(TIMER3_BASE));
+	writel(0, TIMER_MATCH2(TIMER3_BASE));
+	clocksource_mmio_init(TIMER_COUNT(TIMER3_BASE),
+			      "gemini_clocksource", tick_rate,
+			      300, 32, clocksource_mmio_readl_up);
+	sched_clock_register(gemini_read_sched_clock, 32, tick_rate);
 
-	/* Configure and register the clockevent */
+	/*
+	 * Setup clockevent timer (interrupt-driven.)
+	*/
+	writel(0, TIMER_COUNT(TIMER1_BASE));
+	writel(0, TIMER_LOAD(TIMER1_BASE));
+	writel(0, TIMER_MATCH1(TIMER1_BASE));
+	writel(0, TIMER_MATCH2(TIMER1_BASE));
+	setup_irq(IRQ_TIMER1, &gemini_timer_irq);
+	gemini_clockevent.cpumask = cpumask_of(0);
 	clockevents_config_and_register(&gemini_clockevent, tick_rate,
 					1, 0xffffffff);
+
 }
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 231fba0..6050a14 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -28,8 +28,8 @@
 #include <linux/reboot.h>
 #include <linux/amba/bus.h>
 #include <linux/platform_device.h>
+#include <linux/psci.h>
 
-#include <asm/psci.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
diff --git a/arch/arm/mach-highbank/pm.c b/arch/arm/mach-highbank/pm.c
index 7f2bd85..4003116 100644
--- a/arch/arm/mach-highbank/pm.c
+++ b/arch/arm/mach-highbank/pm.c
@@ -16,19 +16,21 @@
 
 #include <linux/cpu_pm.h>
 #include <linux/init.h>
+#include <linux/psci.h>
 #include <linux/suspend.h>
 
 #include <asm/suspend.h>
-#include <asm/psci.h>
+
+#include <uapi/linux/psci.h>
+
+#define HIGHBANK_SUSPEND_PARAM \
+	((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
+	 (1 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
+	 (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
 
 static int highbank_suspend_finish(unsigned long val)
 {
-	const struct psci_power_state ps = {
-		.type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
-		.affinity_level = 1,
-	};
-
-	return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
+	return psci_ops.cpu_suspend(HIGHBANK_SUSPEND_PARAM, __pa(cpu_resume));
 }
 
 static int highbank_pm_enter(suspend_state_t state)
diff --git a/arch/arm/mach-hisi/hisilicon.c b/arch/arm/mach-hisi/hisilicon.c
index c6bd7c7..8cc6215 100644
--- a/arch/arm/mach-hisi/hisilicon.c
+++ b/arch/arm/mach-hisi/hisilicon.c
@@ -11,7 +11,6 @@
  * published by the Free Software Foundation.
 */
 
-#include <linux/clk-provider.h>
 #include <linux/clocksource.h>
 #include <linux/irqchip.h>
 
diff --git a/arch/arm/mach-imx/3ds_debugboard.c b/arch/arm/mach-imx/3ds_debugboard.c
index 1343773..45903be 100644
--- a/arch/arm/mach-imx/3ds_debugboard.c
+++ b/arch/arm/mach-imx/3ds_debugboard.c
@@ -195,7 +195,7 @@
 
 	for (i = irq_base; i < irq_base + MXC_MAX_EXP_IO_LINES; i++) {
 		irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq);
-		set_irq_flags(i, IRQF_VALID);
+		irq_clear_status_flags(i, IRQ_NOREQUEST);
 	}
 	irq_set_irq_type(p_irq, IRQF_TRIGGER_LOW);
 	irq_set_chained_handler(p_irq, mxc_expio_irq_handler);
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 573536f..8ceda28 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -548,6 +548,14 @@
 	help
 	  This enables support for Freescale i.MX6 SoloX processor.
 
+config SOC_IMX6UL
+	bool "i.MX6 UltraLite support"
+	select PINCTRL_IMX6UL
+	select SOC_IMX6
+
+	help
+	  This enables support for Freescale i.MX6 UltraLite processor.
+
 config SOC_IMX7D
 	bool "i.MX7 Dual support"
 	select PINCTRL_IMX7D
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 37c502a..fb689d8 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -83,6 +83,7 @@
 obj-$(CONFIG_SOC_IMX6Q) += mach-imx6q.o
 obj-$(CONFIG_SOC_IMX6SL) += mach-imx6sl.o
 obj-$(CONFIG_SOC_IMX6SX) += mach-imx6sx.o
+obj-$(CONFIG_SOC_IMX6UL) += mach-imx6ul.o
 obj-$(CONFIG_SOC_IMX7D) += mach-imx7d.o
 
 ifeq ($(CONFIG_SUSPEND),y)
diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c
index a7fa92a..5b0f752 100644
--- a/arch/arm/mach-imx/cpu.c
+++ b/arch/arm/mach-imx/cpu.c
@@ -130,6 +130,9 @@
 	case MXC_CPU_IMX6Q:
 		soc_id = "i.MX6Q";
 		break;
+	case MXC_CPU_IMX6UL:
+		soc_id = "i.MX6UL";
+		break;
 	case MXC_CPU_IMX7D:
 		soc_id = "i.MX7D";
 		break;
diff --git a/arch/arm/mach-imx/epit.c b/arch/arm/mach-imx/epit.c
index 074b1a8..08ce2077 100644
--- a/arch/arm/mach-imx/epit.c
+++ b/arch/arm/mach-imx/epit.c
@@ -57,7 +57,6 @@
 #include "hardware.h"
 
 static struct clock_event_device clockevent_epit;
-static enum clock_event_mode clockevent_mode = CLOCK_EVT_MODE_UNUSED;
 
 static void __iomem *timer_base;
 
@@ -106,8 +105,8 @@
 	return 0;
 }
 
-static void epit_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+/* Left event sources disabled, no more interrupts appear */
+static int epit_shutdown(struct clock_event_device *evt)
 {
 	unsigned long flags;
 
@@ -120,39 +119,41 @@
 	/* Disable interrupt in GPT module */
 	epit_irq_disable();
 
-	if (mode != clockevent_mode) {
-		/* Set event time into far-far future */
+	/* Clear pending interrupt */
+	epit_irq_acknowledge();
 
-		/* Clear pending interrupt */
-		epit_irq_acknowledge();
-	}
-
-	/* Remember timer mode */
-	clockevent_mode = mode;
 	local_irq_restore(flags);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		printk(KERN_ERR "epit_set_mode: Periodic mode is not "
-				"supported for i.MX EPIT\n");
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
+	return 0;
+}
+
+static int epit_set_oneshot(struct clock_event_device *evt)
+{
+	unsigned long flags;
+
+	/*
+	 * The timer interrupt generation is disabled at least
+	 * for enough time to call epit_set_next_event()
+	 */
+	local_irq_save(flags);
+
+	/* Disable interrupt in GPT module */
+	epit_irq_disable();
+
+	/* Clear pending interrupt, only while switching mode */
+	if (!clockevent_state_oneshot(evt))
+		epit_irq_acknowledge();
+
 	/*
 	 * Do not put overhead of interrupt enable/disable into
 	 * epit_set_next_event(), the core has about 4 minutes
 	 * to call epit_set_next_event() or shutdown clock after
 	 * mode switching
 	 */
-		local_irq_save(flags);
-		epit_irq_enable();
-		local_irq_restore(flags);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_RESUME:
-		/* Left event sources disabled, no more interrupts appear */
-		break;
-	}
+	epit_irq_enable();
+	local_irq_restore(flags);
+
+	return 0;
 }
 
 /*
@@ -176,11 +177,13 @@
 };
 
 static struct clock_event_device clockevent_epit = {
-	.name		= "epit",
-	.features	= CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode	= epit_set_mode,
-	.set_next_event	= epit_set_next_event,
-	.rating		= 200,
+	.name			= "epit",
+	.features		= CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_shutdown	= epit_shutdown,
+	.tick_resume		= epit_shutdown,
+	.set_state_oneshot	= epit_set_oneshot,
+	.set_next_event		= epit_set_next_event,
+	.rating			= 200,
 };
 
 static int __init epit_clockevent_init(struct clk *timer_clk)
diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
new file mode 100644
index 0000000..1b97fe1
--- /dev/null
+++ b/arch/arm/mach-imx/mach-imx6ul.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/irqchip.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/micrel_phy.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include "common.h"
+
+static void __init imx6ul_enet_clk_init(void)
+{
+	struct regmap *gpr;
+
+	gpr = syscon_regmap_lookup_by_compatible("fsl,imx6ul-iomuxc-gpr");
+	if (!IS_ERR(gpr))
+		regmap_update_bits(gpr, IOMUXC_GPR1, IMX6UL_GPR1_ENET_CLK_DIR,
+				   IMX6UL_GPR1_ENET_CLK_OUTPUT);
+	else
+		pr_err("failed to find fsl,imx6ul-iomux-gpr regmap\n");
+
+}
+
+static int ksz8081_phy_fixup(struct phy_device *dev)
+{
+	if (dev && dev->interface == PHY_INTERFACE_MODE_MII) {
+		phy_write(dev, 0x1f, 0x8110);
+		phy_write(dev, 0x16, 0x201);
+	} else if (dev && dev->interface == PHY_INTERFACE_MODE_RMII) {
+		phy_write(dev, 0x1f, 0x8190);
+		phy_write(dev, 0x16, 0x202);
+	}
+
+	return 0;
+}
+
+static void __init imx6ul_enet_phy_init(void)
+{
+	if (IS_BUILTIN(CONFIG_PHYLIB))
+		phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff,
+					   ksz8081_phy_fixup);
+}
+
+static inline void imx6ul_enet_init(void)
+{
+	imx6ul_enet_clk_init();
+	imx6ul_enet_phy_init();
+}
+
+static void __init imx6ul_init_machine(void)
+{
+	struct device *parent;
+
+	parent = imx_soc_device_init();
+	if (parent == NULL)
+		pr_warn("failed to initialize soc device\n");
+
+	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+	imx6ul_enet_init();
+	imx_anatop_init();
+}
+
+static void __init imx6ul_init_irq(void)
+{
+	imx_init_revision_from_anatop();
+	imx_src_init();
+	irqchip_init();
+}
+
+static const char *imx6ul_dt_compat[] __initconst = {
+	"fsl,imx6ul",
+	NULL,
+};
+
+DT_MACHINE_START(IMX6UL, "Freescale i.MX6 Ultralite (Device Tree)")
+	.init_irq	= imx6ul_init_irq,
+	.init_machine	= imx6ul_init_machine,
+	.dt_compat	= imx6ul_dt_compat,
+MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx7d.c b/arch/arm/mach-imx/mach-imx7d.c
index 4d4a190..62f3437 100644
--- a/arch/arm/mach-imx/mach-imx7d.c
+++ b/arch/arm/mach-imx/mach-imx7d.c
@@ -31,7 +31,7 @@
 	irqchip_init();
 }
 
-static const char *imx7d_dt_compat[] __initconst = {
+static const char *const imx7d_dt_compat[] __initconst = {
 	"fsl,imx7d",
 	NULL,
 };
diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c
index d08c37c..2c08535 100644
--- a/arch/arm/mach-imx/mach-mx31ads.c
+++ b/arch/arm/mach-imx/mach-mx31ads.c
@@ -238,7 +238,7 @@
 
 	for (i = irq_base; i < irq_base + MXC_MAX_EXP_IO_LINES; i++) {
 		irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq);
-		set_irq_flags(i, IRQF_VALID);
+		irq_clear_status_flags(i, IRQ_NOREQUEST);
 	}
 	irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_4));
 	irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
diff --git a/arch/arm/mach-imx/mxc.h b/arch/arm/mach-imx/mxc.h
index c4436d4..a5b1af6 100644
--- a/arch/arm/mach-imx/mxc.h
+++ b/arch/arm/mach-imx/mxc.h
@@ -38,6 +38,7 @@
 #define MXC_CPU_IMX6DL		0x61
 #define MXC_CPU_IMX6SX		0x62
 #define MXC_CPU_IMX6Q		0x63
+#define MXC_CPU_IMX6UL		0x64
 #define MXC_CPU_IMX7D		0x72
 
 #define IMX_DDR_TYPE_LPDDR2		1
@@ -165,6 +166,11 @@
 	return __mxc_cpu_type == MXC_CPU_IMX6SX;
 }
 
+static inline bool cpu_is_imx6ul(void)
+{
+	return __mxc_cpu_type == MXC_CPU_IMX6UL;
+}
+
 static inline bool cpu_is_imx6q(void)
 {
 	return __mxc_cpu_type == MXC_CPU_IMX6Q;
diff --git a/arch/arm/mach-iop13xx/irq.c b/arch/arm/mach-iop13xx/irq.c
index bc73970..623d85a 100644
--- a/arch/arm/mach-iop13xx/irq.c
+++ b/arch/arm/mach-iop13xx/irq.c
@@ -233,7 +233,7 @@
 			irq_set_chip(i, &iop13xx_irqchip4);
 
 		irq_set_handler(i, handle_level_irq);
-		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	iop13xx_msi_init();
diff --git a/arch/arm/mach-iop32x/irq.c b/arch/arm/mach-iop32x/irq.c
index d7ee278..2d1f69a 100644
--- a/arch/arm/mach-iop32x/irq.c
+++ b/arch/arm/mach-iop32x/irq.c
@@ -69,6 +69,6 @@
 
 	for (i = 0; i < NR_IRQS; i++) {
 		irq_set_chip_and_handler(i, &ext_chip, handle_level_irq);
-		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 }
diff --git a/arch/arm/mach-iop33x/irq.c b/arch/arm/mach-iop33x/irq.c
index f7f5d3e..c99ec8d 100644
--- a/arch/arm/mach-iop33x/irq.c
+++ b/arch/arm/mach-iop33x/irq.c
@@ -113,6 +113,6 @@
 		irq_set_chip_and_handler(i,
 					 (i < 32) ? &iop33x_irqchip1 : &iop33x_irqchip2,
 					 handle_level_irq);
-		set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 }
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 8537d4c..1cb6f2f 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -296,7 +296,7 @@
 	for(i = 0; i < NR_IRQS; i++) {
 		irq_set_chip_and_handler(i, &ixp4xx_irq_chip,
 					 handle_level_irq);
-		set_irq_flags(i, IRQF_VALID);
+		irq_clear_status_flags(i, IRQ_NOREQUEST);
 	}
 }
 
@@ -521,43 +521,55 @@
 	return 0;
 }
 
-static void ixp4xx_set_mode(enum clock_event_mode mode,
-			    struct clock_event_device *evt)
+static int ixp4xx_shutdown(struct clock_event_device *evt)
 {
 	unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK;
 	unsigned long osrt = *IXP4XX_OSRT1 & ~IXP4XX_OST_RELOAD_MASK;
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		osrt = IXP4XX_LATCH & ~IXP4XX_OST_RELOAD_MASK;
- 		opts = IXP4XX_OST_ENABLE;
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* period set by 'set next_event' */
-		osrt = 0;
-		opts = IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT;
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		opts &= ~IXP4XX_OST_ENABLE;
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		opts |= IXP4XX_OST_ENABLE;
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	default:
-		osrt = opts = 0;
-		break;
-	}
+	opts &= ~IXP4XX_OST_ENABLE;
+	*IXP4XX_OSRT1 = osrt | opts;
+	return 0;
+}
+
+static int ixp4xx_set_oneshot(struct clock_event_device *evt)
+{
+	unsigned long opts = IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT;
+	unsigned long osrt = 0;
+
+	/* period set by 'set next_event' */
+	*IXP4XX_OSRT1 = osrt | opts;
+	return 0;
+}
+
+static int ixp4xx_set_periodic(struct clock_event_device *evt)
+{
+	unsigned long opts = IXP4XX_OST_ENABLE;
+	unsigned long osrt = IXP4XX_LATCH & ~IXP4XX_OST_RELOAD_MASK;
 
 	*IXP4XX_OSRT1 = osrt | opts;
+	return 0;
+}
+
+static int ixp4xx_resume(struct clock_event_device *evt)
+{
+	unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK;
+	unsigned long osrt = *IXP4XX_OSRT1 & ~IXP4XX_OST_RELOAD_MASK;
+
+	opts |= IXP4XX_OST_ENABLE;
+	*IXP4XX_OSRT1 = osrt | opts;
+	return 0;
 }
 
 static struct clock_event_device clockevent_ixp4xx = {
-	.name		= "ixp4xx timer1",
-	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.rating         = 200,
-	.set_mode	= ixp4xx_set_mode,
-	.set_next_event	= ixp4xx_set_next_event,
+	.name			= "ixp4xx timer1",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.rating			= 200,
+	.set_state_shutdown	= ixp4xx_shutdown,
+	.set_state_periodic	= ixp4xx_set_periodic,
+	.set_state_oneshot	= ixp4xx_set_oneshot,
+	.tick_resume		= ixp4xx_resume,
+	.set_next_event		= ixp4xx_set_next_event,
 };
 
 static void __init ixp4xx_clockevent_init(void)
diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c
index edea697..e283939 100644
--- a/arch/arm/mach-keystone/pm_domain.c
+++ b/arch/arm/mach-keystone/pm_domain.c
@@ -16,7 +16,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/pm_clock.h>
 #include <linux/platform_device.h>
-#include <linux/clk-provider.h>
 #include <linux/of.h>
 
 static struct dev_pm_domain keystone_pm_domain = {
diff --git a/arch/arm/mach-ks8695/irq.c b/arch/arm/mach-ks8695/irq.c
index 76802aa..31439f2 100644
--- a/arch/arm/mach-ks8695/irq.c
+++ b/arch/arm/mach-ks8695/irq.c
@@ -172,6 +172,6 @@
 							 handle_edge_irq);
 		}
 
-		set_irq_flags(irq, IRQF_VALID);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST);
 	}
 }
diff --git a/arch/arm/mach-ks8695/time.c b/arch/arm/mach-ks8695/time.c
index a197874..18eb0fb 100644
--- a/arch/arm/mach-ks8695/time.c
+++ b/arch/arm/mach-ks8695/time.c
@@ -54,28 +54,25 @@
 /* Timer0 Timeout Counter Register */
 #define T0TC_WATCHDOG		(0xff)		/* Enable watchdog mode */
 
-static void ks8695_set_mode(enum clock_event_mode mode,
-			    struct clock_event_device *evt)
+static int ks8695_set_periodic(struct clock_event_device *evt)
 {
+	u32 rate = DIV_ROUND_CLOSEST(KS8695_CLOCK_RATE, HZ);
+	u32 half = DIV_ROUND_CLOSEST(rate, 2);
 	u32 tmcon;
 
-	if (mode == CLOCK_EVT_FEAT_PERIODIC) {
-		u32 rate = DIV_ROUND_CLOSEST(KS8695_CLOCK_RATE, HZ);
-		u32 half = DIV_ROUND_CLOSEST(rate, 2);
+	/* Disable timer 1 */
+	tmcon = readl_relaxed(KS8695_TMR_VA + KS8695_TMCON);
+	tmcon &= ~TMCON_T1EN;
+	writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON);
 
-		/* Disable timer 1 */
-		tmcon = readl_relaxed(KS8695_TMR_VA + KS8695_TMCON);
-		tmcon &= ~TMCON_T1EN;
-		writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON);
+	/* Both registers need to count down */
+	writel_relaxed(half, KS8695_TMR_VA + KS8695_T1TC);
+	writel_relaxed(half, KS8695_TMR_VA + KS8695_T1PD);
 
-		/* Both registers need to count down */
-		writel_relaxed(half, KS8695_TMR_VA + KS8695_T1TC);
-		writel_relaxed(half, KS8695_TMR_VA + KS8695_T1PD);
-
-		/* Re-enable timer1 */
-		tmcon |= TMCON_T1EN;
-		writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON);
-	}
+	/* Re-enable timer1 */
+	tmcon |= TMCON_T1EN;
+	writel_relaxed(tmcon, KS8695_TMR_VA + KS8695_TMCON);
+	return 0;
 }
 
 static int ks8695_set_next_event(unsigned long cycles,
@@ -102,11 +99,13 @@
 }
 
 static struct clock_event_device clockevent_ks8695 = {
-	.name		= "ks8695_t1tc",
-	.rating		= 300, /* Reasonably fast and accurate clock event */
-	.features	= CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
-	.set_next_event	= ks8695_set_next_event,
-	.set_mode	= ks8695_set_mode,
+	.name			= "ks8695_t1tc",
+	/* Reasonably fast and accurate clock event */
+	.rating			= 300,
+	.features		= CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_PERIODIC,
+	.set_next_event		= ks8695_set_next_event,
+	.set_state_periodic	= ks8695_set_periodic,
 };
 
 /*
diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c
index d4f7dc8..cce4cef 100644
--- a/arch/arm/mach-lpc32xx/irq.c
+++ b/arch/arm/mach-lpc32xx/irq.c
@@ -283,25 +283,25 @@
 	case IRQ_TYPE_EDGE_RISING:
 		/* Rising edge sensitive */
 		__lpc32xx_set_irq_type(d->hwirq, 1, 1);
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		break;
 
 	case IRQ_TYPE_EDGE_FALLING:
 		/* Falling edge sensitive */
 		__lpc32xx_set_irq_type(d->hwirq, 0, 1);
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		break;
 
 	case IRQ_TYPE_LEVEL_LOW:
 		/* Low level sensitive */
 		__lpc32xx_set_irq_type(d->hwirq, 0, 0);
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		break;
 
 	case IRQ_TYPE_LEVEL_HIGH:
 		/* High level sensitive */
 		__lpc32xx_set_irq_type(d->hwirq, 1, 0);
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		break;
 
 	/* Other modes are not supported */
@@ -434,7 +434,7 @@
 	for (i = 0; i < NR_IRQS; i++) {
 		irq_set_chip_and_handler(i, &lpc32xx_irq_chip,
 					 handle_level_irq);
-		set_irq_flags(i, IRQF_VALID);
+		irq_clear_status_flags(i, IRQ_NOREQUEST);
 	}
 
 	/* Set default mappings */
diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c
index 7858d5b..77d6b1b 100644
--- a/arch/arm/mach-lpc32xx/phy3250.c
+++ b/arch/arm/mach-lpc32xx/phy3250.c
@@ -212,7 +212,7 @@
 	.dma_filter = pl08x_filter_id,
 };
 
-static const struct of_dev_auxdata lpc32xx_auxdata_lookup[] __initconst = {
+static const struct of_dev_auxdata const lpc32xx_auxdata_lookup[] __initconst = {
 	OF_DEV_AUXDATA("arm,pl022", 0x20084000, "dev:ssp0", NULL),
 	OF_DEV_AUXDATA("arm,pl022", 0x2008C000, "dev:ssp1", NULL),
 	OF_DEV_AUXDATA("arm,pl110", 0x31040000, "dev:clcd", &lpc32xx_clcd_data),
@@ -248,7 +248,7 @@
 			     lpc32xx_auxdata_lookup, NULL);
 }
 
-static char const *lpc32xx_dt_compat[] __initdata = {
+static const char *const lpc32xx_dt_compat[] __initconst = {
 	"nxp,lpc3220",
 	"nxp,lpc3230",
 	"nxp,lpc3240",
diff --git a/arch/arm/mach-lpc32xx/timer.c b/arch/arm/mach-lpc32xx/timer.c
index 4e583729..ff3499d 100644
--- a/arch/arm/mach-lpc32xx/timer.c
+++ b/arch/arm/mach-lpc32xx/timer.c
@@ -43,36 +43,24 @@
 	return 0;
 }
 
-static void lpc32xx_clkevt_mode(enum clock_event_mode mode,
-    struct clock_event_device *dev)
+static int lpc32xx_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		WARN_ON(1);
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		/*
-		 * Disable the timer. When using oneshot, we must also
-		 * disable the timer to wait for the first call to
-		 * set_next_event().
-		 */
-		__raw_writel(0, LPC32XX_TIMER_TCR(LPC32XX_TIMER0_BASE));
-		break;
-
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	/*
+	 * Disable the timer. When using oneshot, we must also
+	 * disable the timer to wait for the first call to
+	 * set_next_event().
+	 */
+	__raw_writel(0, LPC32XX_TIMER_TCR(LPC32XX_TIMER0_BASE));
+	return 0;
 }
 
 static struct clock_event_device lpc32xx_clkevt = {
-	.name		= "lpc32xx_clkevt",
-	.features	= CLOCK_EVT_FEAT_ONESHOT,
-	.rating		= 300,
-	.set_next_event	= lpc32xx_clkevt_next_event,
-	.set_mode	= lpc32xx_clkevt_mode,
+	.name			= "lpc32xx_clkevt",
+	.features		= CLOCK_EVT_FEAT_ONESHOT,
+	.rating			= 300,
+	.set_next_event		= lpc32xx_clkevt_next_event,
+	.set_state_shutdown	= lpc32xx_shutdown,
+	.set_state_oneshot	= lpc32xx_shutdown,
 };
 
 static irqreturn_t lpc32xx_timer_interrupt(int irq, void *dev_id)
diff --git a/arch/arm/mach-mediatek/Kconfig b/arch/arm/mach-mediatek/Kconfig
index 9f59e58..aeece17 100644
--- a/arch/arm/mach-mediatek/Kconfig
+++ b/arch/arm/mach-mediatek/Kconfig
@@ -3,6 +3,7 @@
 	select ARM_GIC
 	select PINCTRL
 	select MTK_TIMER
+	select MFD_SYSCON
 	help
 	  Support for Mediatek MT65xx & MT81xx SoCs
 
diff --git a/arch/arm/mach-mmp/mmp-dt.c b/arch/arm/mach-mmp/mmp-dt.c
index b2296c9..6e155f0 100644
--- a/arch/arm/mach-mmp/mmp-dt.c
+++ b/arch/arm/mach-mmp/mmp-dt.c
@@ -20,12 +20,12 @@
 
 extern void __init mmp_dt_init_timer(void);
 
-static const char *pxa168_dt_board_compat[] __initdata = {
+static const char *const pxa168_dt_board_compat[] __initconst = {
 	"mrvl,pxa168-aspenite",
 	NULL,
 };
 
-static const char *pxa910_dt_board_compat[] __initdata = {
+static const char *const pxa910_dt_board_compat[] __initconst = {
 	"mrvl,pxa910-dkb",
 	NULL,
 };
diff --git a/arch/arm/mach-mmp/mmp2-dt.c b/arch/arm/mach-mmp/mmp2-dt.c
index 998c0f5..0341359 100644
--- a/arch/arm/mach-mmp/mmp2-dt.c
+++ b/arch/arm/mach-mmp/mmp2-dt.c
@@ -30,7 +30,7 @@
 	of_clk_init(NULL);
 }
 
-static const char *mmp2_dt_board_compat[] __initdata = {
+static const char *const mmp2_dt_board_compat[] __initconst = {
 	"mrvl,mmp2-brownstone",
 	NULL,
 };
diff --git a/arch/arm/mach-mmp/pm-pxa910.c b/arch/arm/mach-mmp/pm-pxa910.c
index 04c9daf..7db5870 100644
--- a/arch/arm/mach-mmp/pm-pxa910.c
+++ b/arch/arm/mach-mmp/pm-pxa910.c
@@ -18,6 +18,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <asm/mach-types.h>
+#include <asm/outercache.h>
 #include <mach/hardware.h>
 #include <mach/cputype.h>
 #include <mach/addr-map.h>
diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
index 10bfa03..dbc697b 100644
--- a/arch/arm/mach-mmp/time.c
+++ b/arch/arm/mach-mmp/time.c
@@ -124,32 +124,25 @@
 	return 0;
 }
 
-static void timer_set_mode(enum clock_event_mode mode,
-			   struct clock_event_device *dev)
+static int timer_set_shutdown(struct clock_event_device *evt)
 {
 	unsigned long flags;
 
 	local_irq_save(flags);
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		/* disable the matching interrupt */
-		__raw_writel(0x00, mmp_timer_base + TMR_IER(0));
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-	case CLOCK_EVT_MODE_PERIODIC:
-		break;
-	}
+	/* disable the matching interrupt */
+	__raw_writel(0x00, mmp_timer_base + TMR_IER(0));
 	local_irq_restore(flags);
+
+	return 0;
 }
 
 static struct clock_event_device ckevt = {
-	.name		= "clockevent",
-	.features	= CLOCK_EVT_FEAT_ONESHOT,
-	.rating		= 200,
-	.set_next_event	= timer_set_next_event,
-	.set_mode	= timer_set_mode,
+	.name			= "clockevent",
+	.features		= CLOCK_EVT_FEAT_ONESHOT,
+	.rating			= 200,
+	.set_next_event		= timer_set_next_event,
+	.set_state_shutdown	= timer_set_shutdown,
+	.set_state_oneshot	= timer_set_shutdown,
 };
 
 static cycle_t clksrc_read(struct clocksource *cs)
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 9747316..c86a5a0 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -96,6 +96,7 @@
 	select MACH_MVEBU_ANY
 	select ORION_IRQCHIP
 	select ORION_TIMER
+	select PM_GENERIC_DOMAINS if PM
 	select PINCTRL_DOVE
 	help
 	  Say 'Y' here if you want your kernel to support the
diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c
index afee908..9f739f3 100644
--- a/arch/arm/mach-mvebu/board-v7.c
+++ b/arch/arm/mach-mvebu/board-v7.c
@@ -14,7 +14,6 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/clk-provider.h>
 #include <linux/of_address.h>
 #include <linux/of_fdt.h>
 #include <linux/of_platform.h>
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index e46e9ea..44eedf3 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -65,18 +65,6 @@
 int ll_enable_coherency(void);
 void ll_add_cpu_to_smp_group(void);
 
-int set_cpu_coherent(void)
-{
-	if (!coherency_base) {
-		pr_warn("Can't make current CPU cache coherent.\n");
-		pr_warn("Coherency fabric is not initialized\n");
-		return 1;
-	}
-
-	ll_add_cpu_to_smp_group();
-	return ll_enable_coherency();
-}
-
 static int mvebu_hwcc_notifier(struct notifier_block *nb,
 			       unsigned long event, void *__dev)
 {
@@ -206,6 +194,23 @@
 	return type;
 }
 
+int set_cpu_coherent(void)
+{
+	int type = coherency_type();
+
+	if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP) {
+		if (!coherency_base) {
+			pr_warn("Can't make current CPU cache coherent.\n");
+			pr_warn("Coherency fabric is not initialized\n");
+			return 1;
+		}
+		ll_add_cpu_to_smp_group();
+		return ll_enable_coherency();
+	}
+
+	return 0;
+}
+
 int coherency_available(void)
 {
 	return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
diff --git a/arch/arm/mach-mvebu/common.h b/arch/arm/mach-mvebu/common.h
index 3e0aca1..6b77549 100644
--- a/arch/arm/mach-mvebu/common.h
+++ b/arch/arm/mach-mvebu/common.h
@@ -25,6 +25,6 @@
 
 void __iomem *mvebu_get_scu_base(void);
 
-int mvebu_pm_init(void (*board_pm_enter)(void __iomem *sdram_reg, u32 srcmd));
-
+int mvebu_pm_suspend_init(void (*board_pm_enter)(void __iomem *sdram_reg,
+							u32 srcmd));
 #endif
diff --git a/arch/arm/mach-mvebu/dove.c b/arch/arm/mach-mvebu/dove.c
index 5a17415..1aebb82 100644
--- a/arch/arm/mach-mvebu/dove.c
+++ b/arch/arm/mach-mvebu/dove.c
@@ -12,6 +12,7 @@
 #include <linux/mbus.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/soc/dove/pmu.h>
 #include <asm/hardware/cache-tauros2.h>
 #include <asm/mach/arch.h>
 #include "common.h"
@@ -24,6 +25,7 @@
 	tauros2_init(0);
 #endif
 	BUG_ON(mvebu_mbus_dt_init(false));
+	dove_init_pmu();
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
diff --git a/arch/arm/mach-mvebu/pm-board.c b/arch/arm/mach-mvebu/pm-board.c
index 301ab38..db17121 100644
--- a/arch/arm/mach-mvebu/pm-board.c
+++ b/arch/arm/mach-mvebu/pm-board.c
@@ -1,7 +1,7 @@
 /*
  * Board-level suspend/resume support.
  *
- * Copyright (C) 2014 Marvell
+ * Copyright (C) 2014-2015 Marvell
  *
  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
  *
@@ -20,27 +20,27 @@
 #include <linux/slab.h>
 #include "common.h"
 
-#define ARMADA_XP_GP_PIC_NR_GPIOS 3
+#define ARMADA_PIC_NR_GPIOS 3
 
 static void __iomem *gpio_ctrl;
-static int pic_gpios[ARMADA_XP_GP_PIC_NR_GPIOS];
-static int pic_raw_gpios[ARMADA_XP_GP_PIC_NR_GPIOS];
+static int pic_gpios[ARMADA_PIC_NR_GPIOS];
+static int pic_raw_gpios[ARMADA_PIC_NR_GPIOS];
 
-static void mvebu_armada_xp_gp_pm_enter(void __iomem *sdram_reg, u32 srcmd)
+static void mvebu_armada_pm_enter(void __iomem *sdram_reg, u32 srcmd)
 {
 	u32 reg, ackcmd;
 	int i;
 
 	/* Put 001 as value on the GPIOs */
 	reg = readl(gpio_ctrl);
-	for (i = 0; i < ARMADA_XP_GP_PIC_NR_GPIOS; i++)
+	for (i = 0; i < ARMADA_PIC_NR_GPIOS; i++)
 		reg &= ~BIT(pic_raw_gpios[i]);
 	reg |= BIT(pic_raw_gpios[0]);
 	writel(reg, gpio_ctrl);
 
 	/* Prepare writing 111 to the GPIOs */
 	ackcmd = readl(gpio_ctrl);
-	for (i = 0; i < ARMADA_XP_GP_PIC_NR_GPIOS; i++)
+	for (i = 0; i < ARMADA_PIC_NR_GPIOS; i++)
 		ackcmd |= BIT(pic_raw_gpios[i]);
 
 	srcmd = cpu_to_le32(srcmd);
@@ -76,7 +76,7 @@
 		  [ackcmd] "r" (ackcmd), [gpio_ctrl] "r" (gpio_ctrl) : "r1");
 }
 
-static int mvebu_armada_xp_gp_pm_init(void)
+static int __init mvebu_armada_pm_init(void)
 {
 	struct device_node *np;
 	struct device_node *gpio_ctrl_np;
@@ -89,7 +89,7 @@
 	if (!np)
 		return -ENODEV;
 
-	for (i = 0; i < ARMADA_XP_GP_PIC_NR_GPIOS; i++) {
+	for (i = 0; i < ARMADA_PIC_NR_GPIOS; i++) {
 		char *name;
 		struct of_phandle_args args;
 
@@ -134,11 +134,19 @@
 	if (!gpio_ctrl)
 		return -ENOMEM;
 
-	mvebu_pm_init(mvebu_armada_xp_gp_pm_enter);
+	mvebu_pm_suspend_init(mvebu_armada_pm_enter);
 
 out:
 	of_node_put(np);
 	return ret;
 }
 
-late_initcall(mvebu_armada_xp_gp_pm_init);
+/*
+ * Registering the mvebu_board_pm_enter callback must be done before
+ * the platform_suspend_ops will be registered. In the same time we
+ * also need to have the gpio devices registered. That's why we use a
+ * device_initcall_sync which is called after all the device_initcall
+ * (used by the gpio device) but before the late_initcall (used to
+ * register the platform_suspend_ops)
+ */
+device_initcall_sync(mvebu_armada_pm_init);
diff --git a/arch/arm/mach-mvebu/pm.c b/arch/arm/mach-mvebu/pm.c
index 6573a8f..8d32bf7 100644
--- a/arch/arm/mach-mvebu/pm.c
+++ b/arch/arm/mach-mvebu/pm.c
@@ -105,12 +105,10 @@
 	return of_translate_address(np, in_addr);
 }
 
-static void mvebu_pm_store_bootinfo(void)
+static void mvebu_pm_store_armadaxp_bootinfo(u32 *store_addr)
 {
-	u32 *store_addr;
 	phys_addr_t resume_pc;
 
-	store_addr = phys_to_virt(BOOT_INFO_ADDR);
 	resume_pc = virt_to_phys(armada_370_xp_cpu_resume);
 
 	/*
@@ -151,14 +149,30 @@
 	writel(BOOT_MAGIC_LIST_END, store_addr);
 }
 
-static int mvebu_pm_enter(suspend_state_t state)
+static int mvebu_pm_store_bootinfo(void)
 {
-	if (state != PM_SUSPEND_MEM)
-		return -EINVAL;
+	u32 *store_addr;
+
+	store_addr = phys_to_virt(BOOT_INFO_ADDR);
+
+	if (of_machine_is_compatible("marvell,armadaxp"))
+		mvebu_pm_store_armadaxp_bootinfo(store_addr);
+	else
+		return -ENODEV;
+
+	return 0;
+}
+
+static int mvebu_enter_suspend(void)
+{
+	int ret;
+
+	ret = mvebu_pm_store_bootinfo();
+	if (ret)
+		return ret;
 
 	cpu_pm_enter();
 
-	mvebu_pm_store_bootinfo();
 	cpu_suspend(0, mvebu_pm_powerdown);
 
 	outer_resume();
@@ -168,23 +182,62 @@
 	set_cpu_coherent();
 
 	cpu_pm_exit();
+	return 0;
+}
+
+static int mvebu_pm_enter(suspend_state_t state)
+{
+	switch (state) {
+	case PM_SUSPEND_STANDBY:
+		cpu_do_idle();
+		break;
+	case PM_SUSPEND_MEM:
+		pr_warn("Entering suspend to RAM. Only special wake-up sources will resume the system\n");
+		return mvebu_enter_suspend();
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mvebu_pm_valid(suspend_state_t state)
+{
+	if (state == PM_SUSPEND_STANDBY)
+		return 1;
+
+	if (state == PM_SUSPEND_MEM && mvebu_board_pm_enter != NULL)
+		return 1;
 
 	return 0;
 }
 
 static const struct platform_suspend_ops mvebu_pm_ops = {
 	.enter = mvebu_pm_enter,
-	.valid = suspend_valid_only_mem,
+	.valid = mvebu_pm_valid,
 };
 
-int mvebu_pm_init(void (*board_pm_enter)(void __iomem *sdram_reg, u32 srcmd))
+static int __init mvebu_pm_init(void)
+{
+	if (!of_machine_is_compatible("marvell,armadaxp") &&
+	    !of_machine_is_compatible("marvell,armada370") &&
+	    !of_machine_is_compatible("marvell,armada380") &&
+	    !of_machine_is_compatible("marvell,armada390"))
+		return -ENODEV;
+
+	suspend_set_ops(&mvebu_pm_ops);
+
+	return 0;
+}
+
+
+late_initcall(mvebu_pm_init);
+
+int __init mvebu_pm_suspend_init(void (*board_pm_enter)(void __iomem *sdram_reg,
+							u32 srcmd))
 {
 	struct device_node *np;
 	struct resource res;
 
-	if (!of_machine_is_compatible("marvell,armadaxp"))
-		return -ENODEV;
-
 	np = of_find_compatible_node(NULL, NULL,
 				     "marvell,armada-xp-sdram-controller");
 	if (!np)
@@ -212,7 +265,5 @@
 
 	mvebu_board_pm_enter = board_pm_enter;
 
-	suspend_set_ops(&mvebu_pm_ops);
-
 	return 0;
 }
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index 4f4e222..e8fdb9c 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -415,7 +415,7 @@
 	void __iomem *mpsoc_base;
 	u32 reg;
 
-	pr_warn("CPU idle is currently broken on Armada 38x: disabling");
+	pr_warn("CPU idle is currently broken on Armada 38x: disabling\n");
 	return 0;
 
 	np = of_find_compatible_node(NULL, NULL,
@@ -486,7 +486,7 @@
 	 */
 	if (of_machine_is_compatible("marvell,armada380")) {
 		cpu_hotplug_disable();
-		pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling");
+		pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling\n");
 	}
 
 	if (of_machine_is_compatible("marvell,armadaxp"))
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 2e7cec8..f1ea470 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -282,7 +282,7 @@
 #define TX28_FEC_PHY_RESET	MXS_GPIO_NR(4, 13)
 #define TX28_FEC_nINT		MXS_GPIO_NR(4, 5)
 
-static const struct gpio tx28_gpios[] __initconst = {
+static const struct gpio const tx28_gpios[] __initconst = {
 	{ ENET0_MDC__GPIO_4_0, GPIOF_OUT_INIT_LOW, "GPIO_4_0" },
 	{ ENET0_MDIO__GPIO_4_1, GPIOF_OUT_INIT_LOW, "GPIO_4_1" },
 	{ ENET0_RX_EN__GPIO_4_2, GPIOF_OUT_INIT_LOW, "GPIO_4_2" },
@@ -528,7 +528,7 @@
 	soft_restart(0);
 }
 
-static const char *mxs_dt_compat[] __initdata = {
+static const char *const mxs_dt_compat[] __initconst = {
 	"fsl,imx28",
 	"fsl,imx23",
 	NULL,
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
index db25b0c..6373e2b 100644
--- a/arch/arm/mach-netx/generic.c
+++ b/arch/arm/mach-netx/generic.c
@@ -174,7 +174,7 @@
 	for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
 		irq_set_chip_and_handler(irq, &netx_hif_chip,
 					 handle_level_irq);
-		set_irq_flags(irq, IRQF_VALID);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST);
 	}
 
 	writel(NETX_DPMAS_INT_EN_GLB_EN, NETX_DPMAS_INT_EN);
diff --git a/arch/arm/mach-netx/time.c b/arch/arm/mach-netx/time.c
index 5fb2a59..054a8a6 100644
--- a/arch/arm/mach-netx/time.c
+++ b/arch/arm/mach-netx/time.c
@@ -34,40 +34,40 @@
 #define TIMER_CLOCKEVENT 0
 #define TIMER_CLOCKSOURCE 1
 
-static void netx_set_mode(enum clock_event_mode mode,
-		struct clock_event_device *clk)
+static inline void timer_shutdown(struct clock_event_device *evt)
 {
-	u32 tmode;
-
 	/* disable timer */
 	writel(0, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT));
+}
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		writel(NETX_LATCH, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT));
-		tmode = NETX_GPIO_COUNTER_CTRL_RST_EN |
-			NETX_GPIO_COUNTER_CTRL_IRQ_EN |
-			NETX_GPIO_COUNTER_CTRL_RUN;
-		break;
+static int netx_shutdown(struct clock_event_device *evt)
+{
+	timer_shutdown(evt);
 
-	case CLOCK_EVT_MODE_ONESHOT:
-		writel(0, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT));
-		tmode = NETX_GPIO_COUNTER_CTRL_IRQ_EN |
-			NETX_GPIO_COUNTER_CTRL_RUN;
-		break;
+	return 0;
+}
 
-	default:
-		WARN(1, "%s: unhandled mode %d\n", __func__, mode);
-		/* fall through */
+static int netx_set_oneshot(struct clock_event_device *evt)
+{
+	u32 tmode = NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN;
 
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_RESUME:
-		tmode = 0;
-		break;
-	}
-
+	timer_shutdown(evt);
+	writel(0, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT));
 	writel(tmode, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT));
+
+	return 0;
+}
+
+static int netx_set_periodic(struct clock_event_device *evt)
+{
+	u32 tmode = NETX_GPIO_COUNTER_CTRL_RST_EN |
+		    NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN;
+
+	timer_shutdown(evt);
+	writel(NETX_LATCH, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT));
+	writel(tmode, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT));
+
+	return 0;
 }
 
 static int netx_set_next_event(unsigned long evt,
@@ -81,7 +81,10 @@
 	.name = "netx-timer" __stringify(TIMER_CLOCKEVENT),
 	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.set_next_event = netx_set_next_event,
-	.set_mode = netx_set_mode,
+	.set_state_shutdown = netx_shutdown,
+	.set_state_periodic = netx_set_periodic,
+	.set_state_oneshot = netx_set_oneshot,
+	.tick_resume = netx_shutdown,
 };
 
 /*
diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c
index 9bda46f..0c612d9 100644
--- a/arch/arm/mach-nomadik/cpu-8815.c
+++ b/arch/arm/mach-nomadik/cpu-8815.c
@@ -26,10 +26,8 @@
 #include <linux/irq.h>
 #include <linux/dma-mapping.h>
 #include <linux/of_irq.h>
-#include <linux/of_gpio.h>
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
-#include <linux/gpio.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -109,40 +107,6 @@
 	writel(1, srcbase + 0x18);
 }
 
-/*
- * This GPIO pin turns on a line that is used to detect card insertion
- * on this board.
- */
-static int __init cpu8815_mmcsd_init(void)
-{
-	struct device_node *cdbias;
-	int gpio, err;
-
-	cdbias = of_find_node_by_path("/usb-s8815/mmcsd-gpio");
-	if (!cdbias) {
-		pr_info("could not find MMC/SD card detect bias node\n");
-		return 0;
-	}
-	gpio = of_get_gpio(cdbias, 0);
-	if (gpio < 0) {
-		pr_info("could not obtain MMC/SD card detect bias GPIO\n");
-		return 0;
-	}
-	err = gpio_request(gpio, "card detect bias");
-	if (err) {
-		pr_info("failed to request card detect bias GPIO %d\n", gpio);
-		return -ENODEV;
-	}
-	err = gpio_direction_output(gpio, 0);
-	if (err){
-		pr_info("failed to set GPIO %d as output, low\n", gpio);
-		return err;
-	}
-	pr_info("enabled USB-S8815 CD bias GPIO %d, low\n", gpio);
-	return 0;
-}
-device_initcall(cpu8815_mmcsd_init);
-
 static const char * cpu8815_board_compat[] = {
 	"st,nomadik-nhk-15",
 	"calaosystems,usb-s8815",
@@ -150,9 +114,8 @@
 };
 
 DT_MACHINE_START(NOMADIK_DT, "Nomadik STn8815")
-	/* At full speed latency must be >=2, so 0x249 in low bits */
-	.l2c_aux_val	= 0x00700249,
-	.l2c_aux_mask	= 0xfe0fefff,
+	.l2c_aux_val	= 0,
+	.l2c_aux_mask	= ~0,
 	.map_io		= cpu8815_map_io,
 	.restart	= cpu8815_restart,
 	.dt_compat      = cpu8815_board_compat,
diff --git a/arch/arm/mach-omap1/fpga.c b/arch/arm/mach-omap1/fpga.c
index 3c0e422..dfec671 100644
--- a/arch/arm/mach-omap1/fpga.c
+++ b/arch/arm/mach-omap1/fpga.c
@@ -169,7 +169,7 @@
 		}
 
 		irq_set_handler(i, handle_edge_irq);
-		set_irq_flags(i, IRQF_VALID);
+		irq_clear_status_flags(i, IRQ_NOREQUEST);
 	}
 
 	/*
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index f4d346f..b11edc8 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -262,7 +262,7 @@
 
 			irq_trigger = irq_banks[i].trigger_map >> IRQ_BIT(j);
 			omap_irq_set_cfg(j, 0, 0, irq_trigger);
-			set_irq_flags(j, IRQF_VALID);
+			irq_clear_status_flags(j, IRQ_NOREQUEST);
 		}
 		omap_alloc_gc(irq_banks[i].va, irq_base + i * 32, 32);
 	}
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index a7588cf..524977a 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -124,29 +124,26 @@
 	return 0;
 }
 
-static void omap_mpu_set_mode(enum clock_event_mode mode,
-			      struct clock_event_device *evt)
+static int omap_mpu_set_oneshot(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		omap_mpu_set_autoreset(0);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		omap_mpu_timer_stop(0);
-		omap_mpu_remove_autoreset(0);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	omap_mpu_timer_stop(0);
+	omap_mpu_remove_autoreset(0);
+	return 0;
+}
+
+static int omap_mpu_set_periodic(struct clock_event_device *evt)
+{
+	omap_mpu_set_autoreset(0);
+	return 0;
 }
 
 static struct clock_event_device clockevent_mpu_timer1 = {
-	.name		= "mpu_timer1",
-	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_next_event	= omap_mpu_set_next_event,
-	.set_mode	= omap_mpu_set_mode,
+	.name			= "mpu_timer1",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_next_event		= omap_mpu_set_next_event,
+	.set_state_periodic	= omap_mpu_set_periodic,
+	.set_state_oneshot	= omap_mpu_set_oneshot,
 };
 
 static irqreturn_t omap_mpu_timer1_interrupt(int irq, void *dev_id)
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 36bf174..0ae6c52 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -114,29 +114,28 @@
 	return 0;
 }
 
-static void omap_32k_timer_set_mode(enum clock_event_mode mode,
-				    struct clock_event_device *evt)
+static int omap_32k_timer_shutdown(struct clock_event_device *evt)
 {
 	omap_32k_timer_stop();
+	return 0;
+}
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+static int omap_32k_timer_set_periodic(struct clock_event_device *evt)
+{
+	omap_32k_timer_stop();
+	omap_32k_timer_start(OMAP_32K_TIMER_TICK_PERIOD);
+	return 0;
 }
 
 static struct clock_event_device clockevent_32k_timer = {
-	.name		= "32k-timer",
-	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_next_event	= omap_32k_timer_set_next_event,
-	.set_mode	= omap_32k_timer_set_mode,
+	.name			= "32k-timer",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_next_event		= omap_32k_timer_set_next_event,
+	.set_state_shutdown	= omap_32k_timer_shutdown,
+	.set_state_periodic	= omap_32k_timer_set_periodic,
+	.set_state_oneshot	= omap_32k_timer_shutdown,
+	.tick_resume		= omap_32k_timer_shutdown,
 };
 
 static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id)
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 4a023e8..07d2e10 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -29,6 +29,7 @@
 	select HAVE_ARM_SCU if SMP
 	select HAVE_ARM_TWD if SMP
 	select OMAP_INTERCONNECT
+	select OMAP_INTERCONNECT_BARRIER
 	select PL310_ERRATA_588369 if CACHE_L2X0
 	select PL310_ERRATA_727915 if CACHE_L2X0
 	select PM_OPP if PM
@@ -46,6 +47,7 @@
 	select HAVE_ARM_TWD if SMP
 	select HAVE_ARM_ARCH_TIMER
 	select ARM_ERRATA_798181 if SMP
+	select OMAP_INTERCONNECT_BARRIER
 
 config SOC_AM33XX
 	bool "TI AM33XX"
@@ -71,6 +73,7 @@
 	select HAVE_ARM_ARCH_TIMER
 	select IRQ_CROSSBAR
 	select ARM_ERRATA_798181 if SMP
+	select OMAP_INTERCONNECT_BARRIER
 
 config ARCH_OMAP2PLUS
 	bool
@@ -92,6 +95,10 @@
 	help
 	  Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
 
+config OMAP_INTERCONNECT_BARRIER
+	bool
+	select ARM_HEAVY_MB
+	
 
 if ARCH_OMAP2PLUS
 
@@ -178,26 +185,6 @@
 	default y
 	select OMAP_PACKAGE_CBB
 
-config MACH_OMAP3530_LV_SOM
-	bool "OMAP3 Logic 3530 LV SOM board"
-	depends on ARCH_OMAP3
-	default y
-	select OMAP_PACKAGE_CBB
-	help
-	 Support for the LogicPD OMAP3530 SOM Development kit
-	 for full description please see the products webpage at
-	 http://www.logicpd.com/products/development-kits/texas-instruments-zoom%E2%84%A2-omap35x-development-kit
-
-config MACH_OMAP3_TORPEDO
-	bool "OMAP3 Logic 35x Torpedo board"
-	depends on ARCH_OMAP3
-	default y
-	select OMAP_PACKAGE_CBB
-	help
-	 Support for the LogicPD OMAP35x Torpedo Development kit
-	 for full description please see the products webpage at
-	 http://www.logicpd.com/products/development-kits/zoom-omap35x-torpedo-development-kit
-
 config MACH_OMAP3517EVM
 	bool "OMAP3517/ AM3517 EVM board"
 	depends on ARCH_OMAP3
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 903c85b..9358696 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -12,8 +12,7 @@
 
 hwmod-common				= omap_hwmod.o omap_hwmod_reset.o \
 					  omap_hwmod_common_data.o
-clock-common				= clock.o clock_common_data.o \
-					  clkt_dpll.o clkt_clksel.o
+clock-common				= clock.o
 secure-common				= omap-smc.o omap-secure.o
 
 obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
@@ -182,24 +181,17 @@
 obj-$(CONFIG_SOC_DRA7XX)		+= clockdomains7xx_data.o
 
 # Clock framework
-obj-$(CONFIG_ARCH_OMAP2)		+= $(clock-common) clock2xxx.o
+obj-$(CONFIG_ARCH_OMAP2)		+= $(clock-common)
 obj-$(CONFIG_ARCH_OMAP2)		+= clkt2xxx_dpllcore.o
 obj-$(CONFIG_ARCH_OMAP2)		+= clkt2xxx_virt_prcm_set.o
-obj-$(CONFIG_ARCH_OMAP2)		+= clkt2xxx_dpll.o clkt_iclk.o
-obj-$(CONFIG_SOC_OMAP2430)		+= clock2430.o
-obj-$(CONFIG_ARCH_OMAP3)		+= $(clock-common) clock3xxx.o
-obj-$(CONFIG_ARCH_OMAP3)		+= clock34xx.o clkt34xx_dpll3m2.o
-obj-$(CONFIG_ARCH_OMAP3)		+= clock3517.o clock36xx.o
-obj-$(CONFIG_ARCH_OMAP3)		+= dpll3xxx.o
-obj-$(CONFIG_ARCH_OMAP3)		+= clkt_iclk.o
+obj-$(CONFIG_ARCH_OMAP2)		+= clkt2xxx_dpll.o
+obj-$(CONFIG_ARCH_OMAP3)		+= $(clock-common)
+obj-$(CONFIG_ARCH_OMAP3)		+= clkt34xx_dpll3m2.o
 obj-$(CONFIG_ARCH_OMAP4)		+= $(clock-common)
-obj-$(CONFIG_ARCH_OMAP4)		+= dpll3xxx.o dpll44xx.o
-obj-$(CONFIG_SOC_AM33XX)		+= $(clock-common) dpll3xxx.o
+obj-$(CONFIG_SOC_AM33XX)		+= $(clock-common)
 obj-$(CONFIG_SOC_OMAP5)			+= $(clock-common)
-obj-$(CONFIG_SOC_OMAP5)			+= dpll3xxx.o dpll44xx.o
 obj-$(CONFIG_SOC_DRA7XX)		+= $(clock-common)
-obj-$(CONFIG_SOC_DRA7XX)		+= dpll3xxx.o dpll44xx.o
-obj-$(CONFIG_SOC_AM43XX)		+= $(clock-common) dpll3xxx.o
+obj-$(CONFIG_SOC_AM43XX)		+= $(clock-common)
 
 # OMAP2 clock rate set data (old "OPP" data)
 obj-$(CONFIG_SOC_OMAP2420)		+= opp2420_data.o
@@ -234,8 +226,7 @@
 # EMU peripherals
 obj-$(CONFIG_HW_PERF_EVENTS)		+= pmu.o
 
-iommu-$(CONFIG_OMAP_IOMMU)		:= omap-iommu.o
-obj-y					+= $(iommu-m) $(iommu-y)
+obj-$(CONFIG_OMAP_IOMMU)		+= omap-iommu.o
 
 # OMAP2420 MSDI controller integration support ("MMC")
 obj-$(CONFIG_SOC_OMAP2420)		+= msdi.o
@@ -243,9 +234,6 @@
 # Specific board support
 obj-$(CONFIG_MACH_OMAP_GENERIC)		+= board-generic.o pdata-quirks.o
 obj-$(CONFIG_MACH_OMAP_LDP)		+= board-ldp.o
-obj-$(CONFIG_MACH_OMAP3530_LV_SOM)      += board-omap3logic.o
-obj-$(CONFIG_MACH_OMAP3_TORPEDO)        += board-omap3logic.o
-obj-$(CONFIG_MACH_OMAP3_PANDORA)	+= board-omap3pandora.o
 obj-$(CONFIG_MACH_NOKIA_N8X0)		+= board-n8x0.o
 obj-$(CONFIG_MACH_NOKIA_RX51)		+= board-rx51.o sdram-nokia.o
 obj-$(CONFIG_MACH_NOKIA_RX51)		+= board-rx51-peripherals.o
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 34ff14b..24c9afc 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -169,7 +169,7 @@
 	NULL,
 };
 
-DT_MACHINE_START(TI81XX_DT, "Generic ti814x (Flattened Device Tree)")
+DT_MACHINE_START(TI814X_DT, "Generic ti814x (Flattened Device Tree)")
 	.reserve	= omap_reserve,
 	.map_io		= ti81xx_map_io,
 	.init_early	= ti814x_init_early,
@@ -297,7 +297,7 @@
 DT_MACHINE_START(DRA74X_DT, "Generic DRA74X (Flattened Device Tree)")
 	.reserve	= omap_reserve,
 	.smp		= smp_ops(omap4_smp_ops),
-	.map_io		= omap5_map_io,
+	.map_io		= dra7xx_map_io,
 	.init_early	= dra7xx_init_early,
 	.init_late	= dra7xx_init_late,
 	.init_irq	= omap_gic_of_init,
@@ -316,7 +316,7 @@
 
 DT_MACHINE_START(DRA72X_DT, "Generic DRA72X (Flattened Device Tree)")
 	.reserve	= omap_reserve,
-	.map_io		= omap5_map_io,
+	.map_io		= dra7xx_map_io,
 	.init_early	= dra7xx_init_early,
 	.init_late	= dra7xx_init_late,
 	.init_irq	= omap_gic_of_init,
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
deleted file mode 100644
index 6049f60..0000000
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * linux/arch/arm/mach-omap2/board-omap3logic.c
- *
- * Copyright (C) 2010 Li-Pro.Net
- * Stephan Linz <linz@li-pro.net>
- *
- * Copyright (C) 2010-2012 Logic Product Development, Inc.
- * Peter Barada <peter.barada@logicpd.com>
- * Ashwin BIhari <ashwin.bihari@logicpd.com>
- *
- * Modified from Beagle, EVM, and RX51
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/machine.h>
-
-#include <linux/i2c/twl.h>
-#include <linux/mmc/host.h>
-#include <linux/usb/phy.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-#include "mux.h"
-#include "hsmmc.h"
-#include "control.h"
-#include "common-board-devices.h"
-#include "gpmc.h"
-#include "gpmc-smsc911x.h"
-
-#define OMAP3LOGIC_SMSC911X_CS			1
-
-#define OMAP3530_LV_SOM_MMC_GPIO_CD		110
-#define OMAP3530_LV_SOM_MMC_GPIO_WP		126
-#define OMAP3530_LV_SOM_SMSC911X_GPIO_IRQ	152
-
-#define OMAP3_TORPEDO_MMC_GPIO_CD		127
-#define OMAP3_TORPEDO_SMSC911X_GPIO_IRQ		129
-
-static struct regulator_consumer_supply omap3logic_vmmc1_supply[] = {
-	REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
-};
-
-/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
-static struct regulator_init_data omap3logic_vmmc1 = {
-	.constraints = {
-		.name			= "VMMC1",
-		.min_uV			= 1850000,
-		.max_uV			= 3150000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
-					| REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies  = ARRAY_SIZE(omap3logic_vmmc1_supply),
-	.consumer_supplies      = omap3logic_vmmc1_supply,
-};
-
-static struct twl4030_gpio_platform_data omap3logic_gpio_data = {
-	.use_leds	= true,
-	.pullups	= BIT(1),
-	.pulldowns	= BIT(2)  | BIT(6)  | BIT(7)  | BIT(8)
-			| BIT(13) | BIT(15) | BIT(16) | BIT(17),
-};
-
-static struct twl4030_usb_data omap3logic_usb_data = {
-	.usb_mode	= T2_USB_MODE_ULPI,
-};
-
-
-static struct twl4030_platform_data omap3logic_twldata = {
-	/* platform_data for children goes here */
-	.gpio		= &omap3logic_gpio_data,
-	.vmmc1		= &omap3logic_vmmc1,
-	.usb		= &omap3logic_usb_data,
-};
-
-static int __init omap3logic_i2c_init(void)
-{
-	omap3_pmic_init("twl4030", &omap3logic_twldata);
-	return 0;
-}
-
-static struct omap2_hsmmc_info __initdata board_mmc_info[] = {
-	{
-		.name		= "external",
-		.mmc		= 1,
-		.caps		= MMC_CAP_4_BIT_DATA,
-		.gpio_cd	= -EINVAL,
-		.gpio_wp	= -EINVAL,
-	},
-	{}      /* Terminator */
-};
-
-static void __init board_mmc_init(void)
-{
-	if (machine_is_omap3530_lv_som()) {
-		/* OMAP3530 LV SOM board */
-		board_mmc_info[0].gpio_cd = OMAP3530_LV_SOM_MMC_GPIO_CD;
-		board_mmc_info[0].gpio_wp = OMAP3530_LV_SOM_MMC_GPIO_WP;
-		omap_mux_init_signal("gpio_110", OMAP_PIN_OUTPUT);
-		omap_mux_init_signal("gpio_126", OMAP_PIN_OUTPUT);
-	} else if (machine_is_omap3_torpedo()) {
-		/* OMAP3 Torpedo board */
-		board_mmc_info[0].gpio_cd = OMAP3_TORPEDO_MMC_GPIO_CD;
-		omap_mux_init_signal("gpio_127", OMAP_PIN_OUTPUT);
-	} else {
-		/* unsupported board */
-		printk(KERN_ERR "%s(): unknown machine type\n", __func__);
-		return;
-	}
-
-	omap_hsmmc_init(board_mmc_info);
-}
-
-static struct omap_smsc911x_platform_data __initdata board_smsc911x_data = {
-	.cs             = OMAP3LOGIC_SMSC911X_CS,
-	.gpio_irq       = -EINVAL,
-	.gpio_reset     = -EINVAL,
-};
-
-/* TODO/FIXME (comment by Peter Barada, LogicPD):
- * Fix the PBIAS voltage for Torpedo MMC1 pins that
- * are used for other needs (IRQs, etc).            */
-static void omap3torpedo_fix_pbias_voltage(void)
-{
-	u16 control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
-	u32 reg;
-
-	if (machine_is_omap3_torpedo())
-	{
-		/* Set the bias for the pin */
-		reg = omap_ctrl_readl(control_pbias_offset);
-
-		reg &= ~OMAP343X_PBIASLITEPWRDNZ1;
-		omap_ctrl_writel(reg, control_pbias_offset);
-
-		/* 100ms delay required for PBIAS configuration */
-		msleep(100);
-
-		reg |= OMAP343X_PBIASLITEVMODE1;
-		reg |= OMAP343X_PBIASLITEPWRDNZ1;
-		omap_ctrl_writel(reg | 0x300, control_pbias_offset);
-	}
-}
-
-static inline void __init board_smsc911x_init(void)
-{
-	if (machine_is_omap3530_lv_som()) {
-		/* OMAP3530 LV SOM board */
-		board_smsc911x_data.gpio_irq =
-					OMAP3530_LV_SOM_SMSC911X_GPIO_IRQ;
-		omap_mux_init_signal("gpio_152", OMAP_PIN_INPUT);
-	} else if (machine_is_omap3_torpedo()) {
-		/* OMAP3 Torpedo board */
-		board_smsc911x_data.gpio_irq = OMAP3_TORPEDO_SMSC911X_GPIO_IRQ;
-		omap_mux_init_signal("gpio_129", OMAP_PIN_INPUT);
-	} else {
-		/* unsupported board */
-		printk(KERN_ERR "%s(): unknown machine type\n", __func__);
-		return;
-	}
-
-	gpmc_smsc911x_init(&board_smsc911x_data);
-}
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
-	/* mUSB */
-	OMAP3_MUX(HSUSB0_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-	OMAP3_MUX(HSUSB0_STP, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
-	OMAP3_MUX(HSUSB0_DIR, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-	OMAP3_MUX(HSUSB0_NXT, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-	OMAP3_MUX(HSUSB0_DATA0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-	OMAP3_MUX(HSUSB0_DATA1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-	OMAP3_MUX(HSUSB0_DATA2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-	OMAP3_MUX(HSUSB0_DATA3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-	OMAP3_MUX(HSUSB0_DATA4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-	OMAP3_MUX(HSUSB0_DATA5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-	OMAP3_MUX(HSUSB0_DATA6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-	OMAP3_MUX(HSUSB0_DATA7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
-
-	{ .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-static struct regulator_consumer_supply dummy_supplies[] = {
-	REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
-	REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
-};
-
-static void __init omap3logic_init(void)
-{
-	regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
-	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
-	omap3torpedo_fix_pbias_voltage();
-	omap3logic_i2c_init();
-	omap_serial_init();
-	omap_sdrc_init(NULL, NULL);
-	board_mmc_init();
-	board_smsc911x_init();
-
-	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
-	usb_musb_init(NULL);
-
-	/* Ensure SDRC pins are mux'd for self-refresh */
-	omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
-	omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
-}
-
-MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board")
-	.atag_offset	= 0x100,
-	.reserve	= omap_reserve,
-	.map_io		= omap3_map_io,
-	.init_early	= omap35xx_init_early,
-	.init_irq	= omap3_init_irq,
-	.init_machine	= omap3logic_init,
-	.init_late	= omap35xx_init_late,
-	.init_time	= omap3_sync32k_timer_init,
-	.restart	= omap3xxx_restart,
-MACHINE_END
-
-MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
-	.atag_offset	= 0x100,
-	.reserve	= omap_reserve,
-	.map_io		= omap3_map_io,
-	.init_early	= omap35xx_init_early,
-	.init_irq	= omap3_init_irq,
-	.init_machine	= omap3logic_init,
-	.init_late	= omap35xx_init_late,
-	.init_time	= omap3_sync32k_timer_init,
-	.restart	= omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
deleted file mode 100644
index 969e100..0000000
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ /dev/null
@@ -1,633 +0,0 @@
-/*
- * board-omap3pandora.c (Pandora Handheld Console)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-
-#include <linux/spi/spi.h>
-#include <linux/regulator/machine.h>
-#include <linux/i2c/twl.h>
-#include <linux/omap-gpmc.h>
-#include <linux/wl12xx.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/nand.h>
-#include <linux/leds.h>
-#include <linux/input.h>
-#include <linux/input/matrix_keypad.h>
-#include <linux/gpio.h>
-#include <linux/gpio_keys.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/card.h>
-#include <linux/regulator/fixed.h>
-#include <linux/usb/phy.h>
-#include <linux/platform_data/spi-omap2-mcspi.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include "common.h"
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-#include <linux/platform_data/mtd-nand-omap2.h>
-
-#include "mux.h"
-#include "sdram-micron-mt46h32m32lf-6.h"
-#include "hsmmc.h"
-#include "common-board-devices.h"
-
-#define PANDORA_WIFI_IRQ_GPIO		21
-#define PANDORA_WIFI_NRESET_GPIO	23
-#define OMAP3_PANDORA_TS_GPIO		94
-
-static struct mtd_partition omap3pandora_nand_partitions[] = {
-	{
-		.name           = "xloader",
-		.offset         = 0,
-		.size           = 4 * NAND_BLOCK_SIZE,
-		.mask_flags     = MTD_WRITEABLE
-	}, {
-		.name           = "uboot",
-		.offset         = MTDPART_OFS_APPEND,
-		.size           = 15 * NAND_BLOCK_SIZE,
-	}, {
-		.name           = "uboot-env",
-		.offset         = MTDPART_OFS_APPEND,
-		.size           = 1 * NAND_BLOCK_SIZE,
-	}, {
-		.name           = "boot",
-		.offset         = MTDPART_OFS_APPEND,
-		.size           = 80 * NAND_BLOCK_SIZE,
-	}, {
-		.name           = "rootfs",
-		.offset         = MTDPART_OFS_APPEND,
-		.size           = MTDPART_SIZ_FULL,
-	},
-};
-
-static struct omap_nand_platform_data pandora_nand_data = {
-	.cs		= 0,
-	.devsize	= NAND_BUSWIDTH_16,
-	.xfer_type	= NAND_OMAP_PREFETCH_DMA,
-	.parts		= omap3pandora_nand_partitions,
-	.nr_parts	= ARRAY_SIZE(omap3pandora_nand_partitions),
-};
-
-static struct gpio_led pandora_gpio_leds[] = {
-	{
-		.name			= "pandora::sd1",
-		.default_trigger	= "mmc0",
-		.gpio			= 128,
-	}, {
-		.name			= "pandora::sd2",
-		.default_trigger	= "mmc1",
-		.gpio			= 129,
-	}, {
-		.name			= "pandora::bluetooth",
-		.gpio			= 158,
-	}, {
-		.name			= "pandora::wifi",
-		.gpio			= 159,
-	},
-};
-
-static struct gpio_led_platform_data pandora_gpio_led_data = {
-	.leds		= pandora_gpio_leds,
-	.num_leds	= ARRAY_SIZE(pandora_gpio_leds),
-};
-
-static struct platform_device pandora_leds_gpio = {
-	.name	= "leds-gpio",
-	.id	= -1,
-	.dev	= {
-		.platform_data	= &pandora_gpio_led_data,
-	},
-};
-
-static struct platform_device pandora_backlight = {
-	.name	= "pandora-backlight",
-	.id	= -1,
-};
-
-#define GPIO_BUTTON(gpio_num, ev_type, ev_code, act_low, descr)	\
-{								\
-	.gpio		= gpio_num,				\
-	.type		= ev_type,				\
-	.code		= ev_code,				\
-	.active_low	= act_low,				\
-	.debounce_interval = 4,					\
-	.desc		= "btn " descr,				\
-}
-
-#define GPIO_BUTTON_LOW(gpio_num, event_code, description)	\
-	GPIO_BUTTON(gpio_num, EV_KEY, event_code, 1, description)
-
-static struct gpio_keys_button pandora_gpio_keys[] = {
-	GPIO_BUTTON_LOW(110,	KEY_UP,		"up"),
-	GPIO_BUTTON_LOW(103,	KEY_DOWN,	"down"),
-	GPIO_BUTTON_LOW(96,	KEY_LEFT,	"left"),
-	GPIO_BUTTON_LOW(98,	KEY_RIGHT,	"right"),
-	GPIO_BUTTON_LOW(109,	KEY_PAGEUP,	"game 1"),
-	GPIO_BUTTON_LOW(111,	KEY_END,	"game 2"),
-	GPIO_BUTTON_LOW(106,	KEY_PAGEDOWN,	"game 3"),
-	GPIO_BUTTON_LOW(101,	KEY_HOME,	"game 4"),
-	GPIO_BUTTON_LOW(102,	KEY_RIGHTSHIFT,	"l"),
-	GPIO_BUTTON_LOW(97,	KEY_KPPLUS,	"l2"),
-	GPIO_BUTTON_LOW(105,	KEY_RIGHTCTRL,	"r"),
-	GPIO_BUTTON_LOW(107,	KEY_KPMINUS,	"r2"),
-	GPIO_BUTTON_LOW(104,	KEY_LEFTCTRL,	"ctrl"),
-	GPIO_BUTTON_LOW(99,	KEY_MENU,	"menu"),
-	GPIO_BUTTON_LOW(176,	KEY_COFFEE,	"hold"),
-	GPIO_BUTTON(100, EV_KEY, KEY_LEFTALT, 0, "alt"),
-	GPIO_BUTTON(108, EV_SW, SW_LID, 1, "lid"),
-};
-
-static struct gpio_keys_platform_data pandora_gpio_key_info = {
-	.buttons	= pandora_gpio_keys,
-	.nbuttons	= ARRAY_SIZE(pandora_gpio_keys),
-};
-
-static struct platform_device pandora_keys_gpio = {
-	.name	= "gpio-keys",
-	.id	= -1,
-	.dev	= {
-		.platform_data	= &pandora_gpio_key_info,
-	},
-};
-
-static const uint32_t board_keymap[] = {
-	/* row, col, code */
-	KEY(0, 0, KEY_9),
-	KEY(0, 1, KEY_8),
-	KEY(0, 2, KEY_I),
-	KEY(0, 3, KEY_J),
-	KEY(0, 4, KEY_N),
-	KEY(0, 5, KEY_M),
-	KEY(1, 0, KEY_0),
-	KEY(1, 1, KEY_7),
-	KEY(1, 2, KEY_U),
-	KEY(1, 3, KEY_H),
-	KEY(1, 4, KEY_B),
-	KEY(1, 5, KEY_SPACE),
-	KEY(2, 0, KEY_BACKSPACE),
-	KEY(2, 1, KEY_6),
-	KEY(2, 2, KEY_Y),
-	KEY(2, 3, KEY_G),
-	KEY(2, 4, KEY_V),
-	KEY(2, 5, KEY_FN),
-	KEY(3, 0, KEY_O),
-	KEY(3, 1, KEY_5),
-	KEY(3, 2, KEY_T),
-	KEY(3, 3, KEY_F),
-	KEY(3, 4, KEY_C),
-	KEY(4, 0, KEY_P),
-	KEY(4, 1, KEY_4),
-	KEY(4, 2, KEY_R),
-	KEY(4, 3, KEY_D),
-	KEY(4, 4, KEY_X),
-	KEY(5, 0, KEY_K),
-	KEY(5, 1, KEY_3),
-	KEY(5, 2, KEY_E),
-	KEY(5, 3, KEY_S),
-	KEY(5, 4, KEY_Z),
-	KEY(6, 0, KEY_L),
-	KEY(6, 1, KEY_2),
-	KEY(6, 2, KEY_W),
-	KEY(6, 3, KEY_A),
-	KEY(6, 4, KEY_DOT),
-	KEY(7, 0, KEY_ENTER),
-	KEY(7, 1, KEY_1),
-	KEY(7, 2, KEY_Q),
-	KEY(7, 3, KEY_LEFTSHIFT),
-	KEY(7, 4, KEY_COMMA),
-};
-
-static struct matrix_keymap_data board_map_data = {
-	.keymap			= board_keymap,
-	.keymap_size		= ARRAY_SIZE(board_keymap),
-};
-
-static struct twl4030_keypad_data pandora_kp_data = {
-	.keymap_data	= &board_map_data,
-	.rows		= 8,
-	.cols		= 6,
-	.rep		= 1,
-};
-
-static struct connector_atv_platform_data pandora_tv_pdata = {
-	.name = "tv",
-	.source = "venc.0",
-	.connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
-	.invert_polarity = false,
-};
-
-static struct platform_device pandora_tv_connector_device = {
-	.name                   = "connector-analog-tv",
-	.id                     = 0,
-	.dev.platform_data      = &pandora_tv_pdata,
-};
-
-static struct omap_dss_board_info pandora_dss_data = {
-	.default_display_name = "lcd",
-};
-
-static void pandora_wl1251_init_card(struct mmc_card *card)
-{
-	/*
-	 * We have TI wl1251 attached to MMC3. Pass this information to
-	 * SDIO core because it can't be probed by normal methods.
-	 */
-	if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
-		card->quirks |= MMC_QUIRK_NONSTD_SDIO;
-		card->cccr.wide_bus = 1;
-		card->cis.vendor = 0x104c;
-		card->cis.device = 0x9066;
-		card->cis.blksize = 512;
-		card->cis.max_dtr = 20000000;
-	}
-}
-
-static struct omap2_hsmmc_info omap3pandora_mmc[] = {
-	{
-		.mmc		= 1,
-		.caps		= MMC_CAP_4_BIT_DATA,
-		.gpio_cd	= -EINVAL,
-		.gpio_wp	= 126,
-		.ext_clock	= 0,
-		.deferred	= true,
-	},
-	{
-		.mmc		= 2,
-		.caps		= MMC_CAP_4_BIT_DATA,
-		.gpio_cd	= -EINVAL,
-		.gpio_wp	= 127,
-		.ext_clock	= 1,
-		.transceiver	= true,
-		.deferred	= true,
-	},
-	{
-		.mmc		= 3,
-		.caps		= MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
-		.gpio_cd	= -EINVAL,
-		.gpio_wp	= -EINVAL,
-		.init_card	= pandora_wl1251_init_card,
-	},
-	{}	/* Terminator */
-};
-
-static int omap3pandora_twl_gpio_setup(struct device *dev,
-		unsigned gpio, unsigned ngpio)
-{
-	int ret, gpio_32khz;
-
-	/* gpio + {0,1} is "mmc{0,1}_cd" (input/IRQ) */
-	omap3pandora_mmc[0].gpio_cd = gpio + 0;
-	omap3pandora_mmc[1].gpio_cd = gpio + 1;
-	omap_hsmmc_late_init(omap3pandora_mmc);
-
-	/* gpio + 13 drives 32kHz buffer for wifi module */
-	gpio_32khz = gpio + 13;
-	ret = gpio_request_one(gpio_32khz, GPIOF_OUT_INIT_HIGH, "wifi 32kHz");
-	if (ret < 0) {
-		pr_err("Cannot get GPIO line %d, ret=%d\n", gpio_32khz, ret);
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-static struct twl4030_gpio_platform_data omap3pandora_gpio_data = {
-	.setup		= omap3pandora_twl_gpio_setup,
-};
-
-static struct regulator_consumer_supply pandora_vmmc1_supply[] = {
-	REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
-};
-
-static struct regulator_consumer_supply pandora_vmmc2_supply[] = {
-	REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1")
-};
-
-static struct regulator_consumer_supply pandora_vmmc3_supply[] = {
-	REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2"),
-};
-
-static struct regulator_consumer_supply pandora_vdds_supplies[] = {
-	REGULATOR_SUPPLY("vdds_sdi", "omapdss"),
-	REGULATOR_SUPPLY("vdds_dsi", "omapdss"),
-	REGULATOR_SUPPLY("vdds_dsi", "omapdss_dpi.0"),
-	REGULATOR_SUPPLY("vdds_dsi", "omapdss_dsi.0"),
-};
-
-static struct regulator_consumer_supply pandora_vcc_lcd_supply[] = {
-	REGULATOR_SUPPLY("vcc", "spi1.1"),
-};
-
-static struct regulator_consumer_supply pandora_usb_phy_supply[] = {
-	REGULATOR_SUPPLY("vcc", "usb_phy_gen_xceiv.2"),	/* hsusb port 2 */
-};
-
-/* ads7846 on SPI and 2 nub controllers on I2C */
-static struct regulator_consumer_supply pandora_vaux4_supplies[] = {
-	REGULATOR_SUPPLY("vcc", "spi1.0"),
-	REGULATOR_SUPPLY("vcc", "3-0066"),
-	REGULATOR_SUPPLY("vcc", "3-0067"),
-};
-
-static struct regulator_consumer_supply pandora_adac_supply[] = {
-	REGULATOR_SUPPLY("vcc", "soc-audio"),
-};
-
-/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
-static struct regulator_init_data pandora_vmmc1 = {
-	.constraints = {
-		.min_uV			= 1850000,
-		.max_uV			= 3150000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
-					| REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies	= ARRAY_SIZE(pandora_vmmc1_supply),
-	.consumer_supplies	= pandora_vmmc1_supply,
-};
-
-/* VMMC2 for MMC2 pins CMD, CLK, DAT0..DAT3 (max 100 mA) */
-static struct regulator_init_data pandora_vmmc2 = {
-	.constraints = {
-		.min_uV			= 1850000,
-		.max_uV			= 3150000,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_VOLTAGE
-					| REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies	= ARRAY_SIZE(pandora_vmmc2_supply),
-	.consumer_supplies	= pandora_vmmc2_supply,
-};
-
-/* VAUX1 for LCD */
-static struct regulator_init_data pandora_vaux1 = {
-	.constraints = {
-		.min_uV			= 3000000,
-		.max_uV			= 3000000,
-		.apply_uV		= true,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies	= ARRAY_SIZE(pandora_vcc_lcd_supply),
-	.consumer_supplies	= pandora_vcc_lcd_supply,
-};
-
-/* VAUX2 for USB host PHY */
-static struct regulator_init_data pandora_vaux2 = {
-	.constraints = {
-		.min_uV			= 1800000,
-		.max_uV			= 1800000,
-		.apply_uV		= true,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies	= ARRAY_SIZE(pandora_usb_phy_supply),
-	.consumer_supplies	= pandora_usb_phy_supply,
-};
-
-/* VAUX4 for ads7846 and nubs */
-static struct regulator_init_data pandora_vaux4 = {
-	.constraints = {
-		.min_uV			= 2800000,
-		.max_uV			= 2800000,
-		.apply_uV		= true,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies	= ARRAY_SIZE(pandora_vaux4_supplies),
-	.consumer_supplies	= pandora_vaux4_supplies,
-};
-
-/* VSIM for audio DAC */
-static struct regulator_init_data pandora_vsim = {
-	.constraints = {
-		.min_uV			= 2800000,
-		.max_uV			= 2800000,
-		.apply_uV		= true,
-		.valid_modes_mask	= REGULATOR_MODE_NORMAL
-					| REGULATOR_MODE_STANDBY,
-		.valid_ops_mask		= REGULATOR_CHANGE_MODE
-					| REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies	= ARRAY_SIZE(pandora_adac_supply),
-	.consumer_supplies	= pandora_adac_supply,
-};
-
-/* Fixed regulator internal to Wifi module */
-static struct regulator_init_data pandora_vmmc3 = {
-	.constraints = {
-		.valid_ops_mask		= REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies	= ARRAY_SIZE(pandora_vmmc3_supply),
-	.consumer_supplies	= pandora_vmmc3_supply,
-};
-
-static struct fixed_voltage_config pandora_vwlan = {
-	.supply_name		= "vwlan",
-	.microvolts		= 1800000, /* 1.8V */
-	.gpio			= PANDORA_WIFI_NRESET_GPIO,
-	.startup_delay		= 50000, /* 50ms */
-	.enable_high		= 1,
-	.enabled_at_boot	= 0,
-	.init_data		= &pandora_vmmc3,
-};
-
-static struct platform_device pandora_vwlan_device = {
-	.name		= "reg-fixed-voltage",
-	.id		= 1,
-	.dev = {
-		.platform_data = &pandora_vwlan,
-	},
-};
-
-static struct twl4030_bci_platform_data pandora_bci_data;
-
-static struct twl4030_power_data pandora_power_data = {
-	.use_poweroff	= true,
-};
-
-static struct twl4030_platform_data omap3pandora_twldata = {
-	.gpio		= &omap3pandora_gpio_data,
-	.vmmc1		= &pandora_vmmc1,
-	.vmmc2		= &pandora_vmmc2,
-	.vaux1		= &pandora_vaux1,
-	.vaux2		= &pandora_vaux2,
-	.vaux4		= &pandora_vaux4,
-	.vsim		= &pandora_vsim,
-	.keypad		= &pandora_kp_data,
-	.bci		= &pandora_bci_data,
-	.power		= &pandora_power_data,
-};
-
-static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = {
-	{
-		I2C_BOARD_INFO("bq27500", 0x55),
-		.flags = I2C_CLIENT_WAKE,
-	},
-};
-
-static int __init omap3pandora_i2c_init(void)
-{
-	omap3_pmic_get_config(&omap3pandora_twldata,
-			TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_AUDIO,
-			TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
-
-	omap3pandora_twldata.vdac->constraints.apply_uV = true;
-
-	omap3pandora_twldata.vpll2->constraints.apply_uV = true;
-	omap3pandora_twldata.vpll2->num_consumer_supplies =
-					ARRAY_SIZE(pandora_vdds_supplies);
-	omap3pandora_twldata.vpll2->consumer_supplies = pandora_vdds_supplies;
-
-	omap3_pmic_init("tps65950", &omap3pandora_twldata);
-	/* i2c2 pins are not connected */
-	omap_register_i2c_bus(3, 100, omap3pandora_i2c3_boardinfo,
-			ARRAY_SIZE(omap3pandora_i2c3_boardinfo));
-	return 0;
-}
-
-static struct panel_tpo_td043mtea1_platform_data pandora_lcd_pdata = {
-	.name                   = "lcd",
-	.source                 = "dpi.0",
-
-	.data_lines		= 24,
-	.nreset_gpio		= 157,
-};
-
-static struct spi_board_info omap3pandora_spi_board_info[] __initdata = {
-	{
-		.modalias		= "panel-tpo-td043mtea1",
-		.bus_num		= 1,
-		.chip_select		= 1,
-		.max_speed_hz		= 375000,
-		.platform_data		= &pandora_lcd_pdata,
-	}
-};
-
-static void __init pandora_wl1251_init(void)
-{
-	struct wl1251_platform_data pandora_wl1251_pdata;
-	int ret;
-
-	memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata));
-
-	pandora_wl1251_pdata.power_gpio = -1;
-
-	ret = gpio_request_one(PANDORA_WIFI_IRQ_GPIO, GPIOF_IN, "wl1251 irq");
-	if (ret < 0)
-		goto fail;
-
-	pandora_wl1251_pdata.irq = gpio_to_irq(PANDORA_WIFI_IRQ_GPIO);
-	if (pandora_wl1251_pdata.irq < 0)
-		goto fail_irq;
-
-	pandora_wl1251_pdata.use_eeprom = true;
-	ret = wl1251_set_platform_data(&pandora_wl1251_pdata);
-	if (ret < 0)
-		goto fail_irq;
-
-	return;
-
-fail_irq:
-	gpio_free(PANDORA_WIFI_IRQ_GPIO);
-fail:
-	printk(KERN_ERR "wl1251 board initialisation failed\n");
-}
-
-static struct usbhs_phy_data phy_data[] __initdata = {
-	{
-		.port = 2,
-		.reset_gpio = 16,
-		.vcc_gpio = -EINVAL,
-	},
-};
-
-static struct platform_device *omap3pandora_devices[] __initdata = {
-	&pandora_leds_gpio,
-	&pandora_keys_gpio,
-	&pandora_vwlan_device,
-	&pandora_backlight,
-	&pandora_tv_connector_device,
-};
-
-static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
-	.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
-};
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
-	{ .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-static void __init omap3pandora_init(void)
-{
-	omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
-	omap_hsmmc_init(omap3pandora_mmc);
-	omap3pandora_i2c_init();
-	pandora_wl1251_init();
-	platform_add_devices(omap3pandora_devices,
-			ARRAY_SIZE(omap3pandora_devices));
-	omap_display_init(&pandora_dss_data);
-	omap_serial_init();
-	omap_sdrc_init(mt46h32m32lf6_sdrc_params,
-				  mt46h32m32lf6_sdrc_params);
-	spi_register_board_info(omap3pandora_spi_board_info,
-			ARRAY_SIZE(omap3pandora_spi_board_info));
-	omap_ads7846_init(1, OMAP3_PANDORA_TS_GPIO, 0, NULL);
-
-	usbhs_init_phys(phy_data, ARRAY_SIZE(phy_data));
-	usbhs_init(&usbhs_bdata);
-
-	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
-	usb_musb_init(NULL);
-	gpmc_nand_init(&pandora_nand_data, NULL);
-
-	/* Ensure SDRC pins are mux'd for self-refresh */
-	omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
-	omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
-}
-
-MACHINE_START(OMAP3_PANDORA, "Pandora Handheld Console")
-	.atag_offset	= 0x100,
-	.reserve	= omap_reserve,
-	.map_io		= omap3_map_io,
-	.init_early	= omap35xx_init_early,
-	.init_irq	= omap3_init_irq,
-	.init_machine	= omap3pandora_init,
-	.init_late	= omap35xx_init_late,
-	.init_time	= omap3_sync32k_timer_init,
-	.restart	= omap3xxx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
index eb69acf..3f65213 100644
--- a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
+++ b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
@@ -23,12 +23,13 @@
 
 #include "clock.h"
 #include "clock3xxx.h"
-#include "clock34xx.h"
 #include "sdrc.h"
 #include "sram.h"
 
 #define CYCLES_PER_MHZ			1000000
 
+struct clk *sdrc_ick_p, *arm_fck_p;
+
 /*
  * CORE DPLL (DPLL3) M2 divider rate programming functions
  *
@@ -60,12 +61,14 @@
 	if (!clk || !rate)
 		return -EINVAL;
 
-	validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
+	new_div = DIV_ROUND_UP(parent_rate, rate);
+	validrate = parent_rate / new_div;
+
 	if (validrate != rate)
 		return -EINVAL;
 
-	sdrcrate = __clk_get_rate(sdrc_ick_p);
-	clkrate = __clk_get_rate(hw->clk);
+	sdrcrate = clk_get_rate(sdrc_ick_p);
+	clkrate = clk_hw_get_rate(hw);
 	if (rate > clkrate)
 		sdrcrate <<= ((rate / clkrate) >> 1);
 	else
@@ -83,7 +86,7 @@
 	/*
 	 * XXX This only needs to be done when the CPU frequency changes
 	 */
-	_mpurate = __clk_get_rate(arm_fck_p) / CYCLES_PER_MHZ;
+	_mpurate = clk_get_rate(arm_fck_p) / CYCLES_PER_MHZ;
 	c = (_mpurate << SDRC_MPURATE_SCALE) >> SDRC_MPURATE_BASE_SHIFT;
 	c += 1;  /* for safety */
 	c *= SDRC_MPURATE_LOOPS;
diff --git a/arch/arm/mach-omap2/clkt_clksel.c b/arch/arm/mach-omap2/clkt_clksel.c
deleted file mode 100644
index 7ee2610..0000000
--- a/arch/arm/mach-omap2/clkt_clksel.c
+++ /dev/null
@@ -1,466 +0,0 @@
-/*
- * clkt_clksel.c - OMAP2/3/4 clksel clock functions
- *
- * Copyright (C) 2005-2008 Texas Instruments, Inc.
- * Copyright (C) 2004-2010 Nokia Corporation
- *
- * Contacts:
- * Richard Woodruff <r-woodruff2@ti.com>
- * Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *
- * clksel clocks are clocks that do not have a fixed parent, or that
- * can divide their parent's rate, or possibly both at the same time, based
- * on the contents of a hardware register bitfield.
- *
- * All of the various mux and divider settings can be encoded into
- * struct clksel* data structures, and then these can be autogenerated
- * from some hardware database for each new chip generation.  This
- * should avoid the need to write, review, and validate a lot of new
- * clock code for each new chip, since it can be exported from the SoC
- * design flow.  This is now done on OMAP4.
- *
- * The fusion of mux and divider clocks is a software creation.  In
- * hardware reality, the multiplexer (parent selection) and the
- * divider exist separately.  XXX At some point these clksel clocks
- * should be split into "divider" clocks and "mux" clocks to better
- * match the hardware.
- *
- * (The name "clksel" comes from the name of the corresponding
- * register field in the OMAP2/3 family of SoCs.)
- *
- * XXX Currently these clocks are only used in the OMAP2/3/4 code, but
- * many of the OMAP1 clocks should be convertible to use this
- * mechanism.
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-#include <linux/bug.h>
-
-#include "clock.h"
-
-/* Private functions */
-
-/**
- * _get_clksel_by_parent() - return clksel struct for a given clk & parent
- * @clk: OMAP struct clk ptr to inspect
- * @src_clk: OMAP struct clk ptr of the parent clk to search for
- *
- * Scan the struct clksel array associated with the clock to find
- * the element associated with the supplied parent clock address.
- * Returns a pointer to the struct clksel on success or NULL on error.
- */
-static const struct clksel *_get_clksel_by_parent(struct clk_hw_omap *clk,
-						  struct clk *src_clk)
-{
-	const struct clksel *clks;
-
-	if (!src_clk)
-		return NULL;
-
-	for (clks = clk->clksel; clks->parent; clks++)
-		if (clks->parent == src_clk)
-			break; /* Found the requested parent */
-
-	if (!clks->parent) {
-		/* This indicates a data problem */
-		WARN(1, "clock: %s: could not find parent clock %s in clksel array\n",
-		     __clk_get_name(clk->hw.clk), __clk_get_name(src_clk));
-		return NULL;
-	}
-
-	return clks;
-}
-
-/**
- * _write_clksel_reg() - program a clock's clksel register in hardware
- * @clk: struct clk * to program
- * @v: clksel bitfield value to program (with LSB at bit 0)
- *
- * Shift the clksel register bitfield value @v to its appropriate
- * location in the clksel register and write it in.  This function
- * will ensure that the write to the clksel_reg reaches its
- * destination before returning -- important since PRM and CM register
- * accesses can be quite slow compared to ARM cycles -- but does not
- * take into account any time the hardware might take to switch the
- * clock source.
- */
-static void _write_clksel_reg(struct clk_hw_omap *clk, u32 field_val)
-{
-	u32 v;
-
-	v = omap2_clk_readl(clk, clk->clksel_reg);
-	v &= ~clk->clksel_mask;
-	v |= field_val << __ffs(clk->clksel_mask);
-	omap2_clk_writel(v, clk, clk->clksel_reg);
-
-	v = omap2_clk_readl(clk, clk->clksel_reg); /* OCP barrier */
-}
-
-/**
- * _clksel_to_divisor() - turn clksel field value into integer divider
- * @clk: OMAP struct clk to use
- * @field_val: register field value to find
- *
- * Given a struct clk of a rate-selectable clksel clock, and a register field
- * value to search for, find the corresponding clock divisor.  The register
- * field value should be pre-masked and shifted down so the LSB is at bit 0
- * before calling.  Returns 0 on error or returns the actual integer divisor
- * upon success.
- */
-static u32 _clksel_to_divisor(struct clk_hw_omap *clk, u32 field_val)
-{
-	const struct clksel *clks;
-	const struct clksel_rate *clkr;
-	struct clk *parent;
-
-	parent = __clk_get_parent(clk->hw.clk);
-
-	clks = _get_clksel_by_parent(clk, parent);
-	if (!clks)
-		return 0;
-
-	for (clkr = clks->rates; clkr->div; clkr++) {
-		if (!(clkr->flags & cpu_mask))
-			continue;
-
-		if (clkr->val == field_val)
-			break;
-	}
-
-	if (!clkr->div) {
-		/* This indicates a data error */
-		WARN(1, "clock: %s: could not find fieldval %d for parent %s\n",
-		     __clk_get_name(clk->hw.clk), field_val,
-		     __clk_get_name(parent));
-		return 0;
-	}
-
-	return clkr->div;
-}
-
-/**
- * _divisor_to_clksel() - turn clksel integer divisor into a field value
- * @clk: OMAP struct clk to use
- * @div: integer divisor to search for
- *
- * Given a struct clk of a rate-selectable clksel clock, and a clock
- * divisor, find the corresponding register field value.  Returns the
- * register field value _before_ left-shifting (i.e., LSB is at bit
- * 0); or returns 0xFFFFFFFF (~0) upon error.
- */
-static u32 _divisor_to_clksel(struct clk_hw_omap *clk, u32 div)
-{
-	const struct clksel *clks;
-	const struct clksel_rate *clkr;
-	struct clk *parent;
-
-	/* should never happen */
-	WARN_ON(div == 0);
-
-	parent = __clk_get_parent(clk->hw.clk);
-	clks = _get_clksel_by_parent(clk, parent);
-	if (!clks)
-		return ~0;
-
-	for (clkr = clks->rates; clkr->div; clkr++) {
-		if (!(clkr->flags & cpu_mask))
-			continue;
-
-		if (clkr->div == div)
-			break;
-	}
-
-	if (!clkr->div) {
-		pr_err("clock: %s: could not find divisor %d for parent %s\n",
-		       __clk_get_name(clk->hw.clk), div,
-		       __clk_get_name(parent));
-		return ~0;
-	}
-
-	return clkr->val;
-}
-
-/**
- * _read_divisor() - get current divisor applied to parent clock (from hdwr)
- * @clk: OMAP struct clk to use.
- *
- * Read the current divisor register value for @clk that is programmed
- * into the hardware, convert it into the actual divisor value, and
- * return it; or return 0 on error.
- */
-static u32 _read_divisor(struct clk_hw_omap *clk)
-{
-	u32 v;
-
-	if (!clk->clksel || !clk->clksel_mask)
-		return 0;
-
-	v = omap2_clk_readl(clk, clk->clksel_reg);
-	v &= clk->clksel_mask;
-	v >>= __ffs(clk->clksel_mask);
-
-	return _clksel_to_divisor(clk, v);
-}
-
-/* Public functions */
-
-/**
- * omap2_clksel_round_rate_div() - find divisor for the given clock and rate
- * @clk: OMAP struct clk to use
- * @target_rate: desired clock rate
- * @new_div: ptr to where we should store the divisor
- *
- * Finds 'best' divider value in an array based on the source and target
- * rates.  The divider array must be sorted with smallest divider first.
- * This function is also used by the DPLL3 M2 divider code.
- *
- * Returns the rounded clock rate or returns 0xffffffff on error.
- */
-u32 omap2_clksel_round_rate_div(struct clk_hw_omap *clk,
-						 unsigned long target_rate,
-				u32 *new_div)
-{
-	unsigned long test_rate;
-	const struct clksel *clks;
-	const struct clksel_rate *clkr;
-	u32 last_div = 0;
-	struct clk *parent;
-	unsigned long parent_rate;
-	const char *clk_name;
-
-	parent = __clk_get_parent(clk->hw.clk);
-	clk_name = __clk_get_name(clk->hw.clk);
-	parent_rate = __clk_get_rate(parent);
-
-	if (!clk->clksel || !clk->clksel_mask)
-		return ~0;
-
-	pr_debug("clock: clksel_round_rate_div: %s target_rate %ld\n",
-		 clk_name, target_rate);
-
-	*new_div = 1;
-
-	clks = _get_clksel_by_parent(clk, parent);
-	if (!clks)
-		return ~0;
-
-	for (clkr = clks->rates; clkr->div; clkr++) {
-		if (!(clkr->flags & cpu_mask))
-			continue;
-
-		/* Sanity check */
-		if (clkr->div <= last_div)
-			pr_err("clock: %s: clksel_rate table not sorted\n",
-			       clk_name);
-
-		last_div = clkr->div;
-
-		test_rate = parent_rate / clkr->div;
-
-		if (test_rate <= target_rate)
-			break; /* found it */
-	}
-
-	if (!clkr->div) {
-		pr_err("clock: %s: could not find divisor for target rate %ld for parent %s\n",
-		       clk_name, target_rate, __clk_get_name(parent));
-		return ~0;
-	}
-
-	*new_div = clkr->div;
-
-	pr_debug("clock: new_div = %d, new_rate = %ld\n", *new_div,
-		 (parent_rate / clkr->div));
-
-	return parent_rate / clkr->div;
-}
-
-/*
- * Clocktype interface functions to the OMAP clock code
- * (i.e., those used in struct clk field function pointers, etc.)
- */
-
-/**
- * omap2_clksel_find_parent_index() - return the array index of the current
- * hardware parent of @hw
- * @hw: struct clk_hw * to find the current hardware parent of
- *
- * Given a struct clk_hw pointer @hw to the 'hw' member of a struct
- * clk_hw_omap record representing a source-selectable hardware clock,
- * read the hardware register and determine what its parent is
- * currently set to.  Intended to be called only by the common clock
- * framework struct clk_hw_ops.get_parent function pointer.  Return
- * the array index of this parent clock upon success -- there is no
- * way to return an error, so if we encounter an error, just WARN()
- * and pretend that we know that we're doing.
- */
-u8 omap2_clksel_find_parent_index(struct clk_hw *hw)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-	const struct clksel *clks;
-	const struct clksel_rate *clkr;
-	u32 r, found = 0;
-	struct clk *parent;
-	const char *clk_name;
-	int ret = 0, f = 0;
-
-	parent = __clk_get_parent(hw->clk);
-	clk_name = __clk_get_name(hw->clk);
-
-	/* XXX should be able to return an error */
-	WARN((!clk->clksel || !clk->clksel_mask),
-	     "clock: %s: attempt to call on a non-clksel clock", clk_name);
-
-	r = omap2_clk_readl(clk, clk->clksel_reg) & clk->clksel_mask;
-	r >>= __ffs(clk->clksel_mask);
-
-	for (clks = clk->clksel; clks->parent && !found; clks++) {
-		for (clkr = clks->rates; clkr->div && !found; clkr++) {
-			if (!(clkr->flags & cpu_mask))
-				continue;
-
-			if (clkr->val == r) {
-				found = 1;
-				ret = f;
-			}
-		}
-		f++;
-	}
-
-	/* This indicates a data error */
-	WARN(!found, "clock: %s: init parent: could not find regval %0x\n",
-	     clk_name, r);
-
-	return ret;
-}
-
-
-/**
- * omap2_clksel_recalc() - function ptr to pass via struct clk .recalc field
- * @clk: struct clk *
- *
- * This function is intended to be called only by the clock framework.
- * Each clksel clock should have its struct clk .recalc field set to this
- * function.  Returns the clock's current rate, based on its parent's rate
- * and its current divisor setting in the hardware.
- */
-unsigned long omap2_clksel_recalc(struct clk_hw *hw, unsigned long parent_rate)
-{
-	unsigned long rate;
-	u32 div = 0;
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-
-	if (!parent_rate)
-		return 0;
-
-	div = _read_divisor(clk);
-	if (!div)
-		rate = parent_rate;
-	else
-		rate = parent_rate / div;
-
-	pr_debug("%s: recalc'd %s's rate to %lu (div %d)\n", __func__,
-		 __clk_get_name(hw->clk), rate, div);
-
-	return rate;
-}
-
-/**
- * omap2_clksel_round_rate() - find rounded rate for the given clock and rate
- * @clk: OMAP struct clk to use
- * @target_rate: desired clock rate
- *
- * This function is intended to be called only by the clock framework.
- * Finds best target rate based on the source clock and possible dividers.
- * rates. The divider array must be sorted with smallest divider first.
- *
- * Returns the rounded clock rate or returns 0xffffffff on error.
- */
-long omap2_clksel_round_rate(struct clk_hw *hw, unsigned long target_rate,
-			unsigned long *parent_rate)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-	u32 new_div;
-
-	return omap2_clksel_round_rate_div(clk, target_rate, &new_div);
-}
-
-/**
- * omap2_clksel_set_rate() - program clock rate in hardware
- * @clk: struct clk * to program rate
- * @rate: target rate to program
- *
- * This function is intended to be called only by the clock framework.
- * Program @clk's rate to @rate in the hardware.  The clock can be
- * either enabled or disabled when this happens, although if the clock
- * is enabled, some downstream devices may glitch or behave
- * unpredictably when the clock rate is changed - this depends on the
- * hardware. This function does not currently check the usecount of
- * the clock, so if multiple drivers are using the clock, and the rate
- * is changed, they will all be affected without any notification.
- * Returns -EINVAL upon error, or 0 upon success.
- */
-int omap2_clksel_set_rate(struct clk_hw *hw, unsigned long rate,
-				unsigned long parent_rate)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-	u32 field_val, validrate, new_div = 0;
-
-	if (!clk->clksel || !clk->clksel_mask)
-		return -EINVAL;
-
-	validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
-	if (validrate != rate)
-		return -EINVAL;
-
-	field_val = _divisor_to_clksel(clk, new_div);
-	if (field_val == ~0)
-		return -EINVAL;
-
-	_write_clksel_reg(clk, field_val);
-
-	pr_debug("clock: %s: set rate to %ld\n", __clk_get_name(hw->clk),
-		 __clk_get_rate(hw->clk));
-
-	return 0;
-}
-
-/*
- * Clksel parent setting function - not passed in struct clk function
- * pointer - instead, the OMAP clock code currently assumes that any
- * parent-setting clock is a clksel clock, and calls
- * omap2_clksel_set_parent() by default
- */
-
-/**
- * omap2_clksel_set_parent() - change a clock's parent clock
- * @clk: struct clk * of the child clock
- * @new_parent: struct clk * of the new parent clock
- *
- * This function is intended to be called only by the clock framework.
- * Change the parent clock of clock @clk to @new_parent.  This is
- * intended to be used while @clk is disabled.  This function does not
- * currently check the usecount of the clock, so if multiple drivers
- * are using the clock, and the parent is changed, they will all be
- * affected without any notification.  Returns -EINVAL upon error, or
- * 0 upon success.
- */
-int omap2_clksel_set_parent(struct clk_hw *hw, u8 field_val)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-
-	if (!clk->clksel || !clk->clksel_mask)
-		return -EINVAL;
-
-	_write_clksel_reg(clk, field_val);
-	return 0;
-}
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
deleted file mode 100644
index f251a14..0000000
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * OMAP2/3/4 DPLL clock functions
- *
- * Copyright (C) 2005-2008 Texas Instruments, Inc.
- * Copyright (C) 2004-2010 Nokia Corporation
- *
- * Contacts:
- * Richard Woodruff <r-woodruff2@ti.com>
- * Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-
-#include <asm/div64.h>
-
-#include "clock.h"
-
-/* DPLL rate rounding: minimum DPLL multiplier, divider values */
-#define DPLL_MIN_MULTIPLIER		2
-#define DPLL_MIN_DIVIDER		1
-
-/* Possible error results from _dpll_test_mult */
-#define DPLL_MULT_UNDERFLOW		-1
-
-/*
- * Scale factor to mitigate roundoff errors in DPLL rate rounding.
- * The higher the scale factor, the greater the risk of arithmetic overflow,
- * but the closer the rounded rate to the target rate.  DPLL_SCALE_FACTOR
- * must be a power of DPLL_SCALE_BASE.
- */
-#define DPLL_SCALE_FACTOR		64
-#define DPLL_SCALE_BASE			2
-#define DPLL_ROUNDING_VAL		((DPLL_SCALE_BASE / 2) * \
-					 (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE))
-
-/*
- * DPLL valid Fint frequency range for OMAP36xx and OMAP4xxx.
- * From device data manual section 4.3 "DPLL and DLL Specifications".
- */
-#define OMAP3PLUS_DPLL_FINT_JTYPE_MIN	500000
-#define OMAP3PLUS_DPLL_FINT_JTYPE_MAX	2500000
-
-/* _dpll_test_fint() return codes */
-#define DPLL_FINT_UNDERFLOW		-1
-#define DPLL_FINT_INVALID		-2
-
-/* Private functions */
-
-/*
- * _dpll_test_fint - test whether an Fint value is valid for the DPLL
- * @clk: DPLL struct clk to test
- * @n: divider value (N) to test
- *
- * Tests whether a particular divider @n will result in a valid DPLL
- * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter
- * Correction".  Returns 0 if OK, -1 if the enclosing loop can terminate
- * (assuming that it is counting N upwards), or -2 if the enclosing loop
- * should skip to the next iteration (again assuming N is increasing).
- */
-static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n)
-{
-	struct dpll_data *dd;
-	long fint, fint_min, fint_max;
-	int ret = 0;
-
-	dd = clk->dpll_data;
-
-	/* DPLL divider must result in a valid jitter correction val */
-	fint = __clk_get_rate(__clk_get_parent(clk->hw.clk)) / n;
-
-	if (dd->flags & DPLL_J_TYPE) {
-		fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN;
-		fint_max = OMAP3PLUS_DPLL_FINT_JTYPE_MAX;
-	} else {
-		fint_min = ti_clk_features.fint_min;
-		fint_max = ti_clk_features.fint_max;
-	}
-
-	if (!fint_min || !fint_max) {
-		WARN(1, "No fint limits available!\n");
-		return DPLL_FINT_INVALID;
-	}
-
-	if (fint < ti_clk_features.fint_min) {
-		pr_debug("rejecting n=%d due to Fint failure, lowering max_divider\n",
-			 n);
-		dd->max_divider = n;
-		ret = DPLL_FINT_UNDERFLOW;
-	} else if (fint > ti_clk_features.fint_max) {
-		pr_debug("rejecting n=%d due to Fint failure, boosting min_divider\n",
-			 n);
-		dd->min_divider = n;
-		ret = DPLL_FINT_INVALID;
-	} else if (fint > ti_clk_features.fint_band1_max &&
-		   fint < ti_clk_features.fint_band2_min) {
-		pr_debug("rejecting n=%d due to Fint failure\n", n);
-		ret = DPLL_FINT_INVALID;
-	}
-
-	return ret;
-}
-
-static unsigned long _dpll_compute_new_rate(unsigned long parent_rate,
-					    unsigned int m, unsigned int n)
-{
-	unsigned long long num;
-
-	num = (unsigned long long)parent_rate * m;
-	do_div(num, n);
-	return num;
-}
-
-/*
- * _dpll_test_mult - test a DPLL multiplier value
- * @m: pointer to the DPLL m (multiplier) value under test
- * @n: current DPLL n (divider) value under test
- * @new_rate: pointer to storage for the resulting rounded rate
- * @target_rate: the desired DPLL rate
- * @parent_rate: the DPLL's parent clock rate
- *
- * This code tests a DPLL multiplier value, ensuring that the
- * resulting rate will not be higher than the target_rate, and that
- * the multiplier value itself is valid for the DPLL.  Initially, the
- * integer pointed to by the m argument should be prescaled by
- * multiplying by DPLL_SCALE_FACTOR.  The code will replace this with
- * a non-scaled m upon return.  This non-scaled m will result in a
- * new_rate as close as possible to target_rate (but not greater than
- * target_rate) given the current (parent_rate, n, prescaled m)
- * triple. Returns DPLL_MULT_UNDERFLOW in the event that the
- * non-scaled m attempted to underflow, which can allow the calling
- * function to bail out early; or 0 upon success.
- */
-static int _dpll_test_mult(int *m, int n, unsigned long *new_rate,
-			   unsigned long target_rate,
-			   unsigned long parent_rate)
-{
-	int r = 0, carry = 0;
-
-	/* Unscale m and round if necessary */
-	if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL)
-		carry = 1;
-	*m = (*m / DPLL_SCALE_FACTOR) + carry;
-
-	/*
-	 * The new rate must be <= the target rate to avoid programming
-	 * a rate that is impossible for the hardware to handle
-	 */
-	*new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
-	if (*new_rate > target_rate) {
-		(*m)--;
-		*new_rate = 0;
-	}
-
-	/* Guard against m underflow */
-	if (*m < DPLL_MIN_MULTIPLIER) {
-		*m = DPLL_MIN_MULTIPLIER;
-		*new_rate = 0;
-		r = DPLL_MULT_UNDERFLOW;
-	}
-
-	if (*new_rate == 0)
-		*new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
-
-	return r;
-}
-
-/**
- * _omap2_dpll_is_in_bypass - check if DPLL is in bypass mode or not
- * @v: bitfield value of the DPLL enable
- *
- * Checks given DPLL enable bitfield to see whether the DPLL is in bypass
- * mode or not. Returns 1 if the DPLL is in bypass, 0 otherwise.
- */
-static int _omap2_dpll_is_in_bypass(u32 v)
-{
-	u8 mask, val;
-
-	mask = ti_clk_features.dpll_bypass_vals;
-
-	/*
-	 * Each set bit in the mask corresponds to a bypass value equal
-	 * to the bitshift. Go through each set-bit in the mask and
-	 * compare against the given register value.
-	 */
-	while (mask) {
-		val = __ffs(mask);
-		mask ^= (1 << val);
-		if (v == val)
-			return 1;
-	}
-
-	return 0;
-}
-
-/* Public functions */
-u8 omap2_init_dpll_parent(struct clk_hw *hw)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-	u32 v;
-	struct dpll_data *dd;
-
-	dd = clk->dpll_data;
-	if (!dd)
-		return -EINVAL;
-
-	v = omap2_clk_readl(clk, dd->control_reg);
-	v &= dd->enable_mask;
-	v >>= __ffs(dd->enable_mask);
-
-	/* Reparent the struct clk in case the dpll is in bypass */
-	if (_omap2_dpll_is_in_bypass(v))
-		return 1;
-
-	return 0;
-}
-
-/**
- * omap2_get_dpll_rate - returns the current DPLL CLKOUT rate
- * @clk: struct clk * of a DPLL
- *
- * DPLLs can be locked or bypassed - basically, enabled or disabled.
- * When locked, the DPLL output depends on the M and N values.  When
- * bypassed, on OMAP2xxx, the output rate is either the 32KiHz clock
- * or sys_clk.  Bypass rates on OMAP3 depend on the DPLL: DPLLs 1 and
- * 2 are bypassed with dpll1_fclk and dpll2_fclk respectively
- * (generated by DPLL3), while DPLL 3, 4, and 5 bypass rates are sys_clk.
- * Returns the current DPLL CLKOUT rate (*not* CLKOUTX2) if the DPLL is
- * locked, or the appropriate bypass rate if the DPLL is bypassed, or 0
- * if the clock @clk is not a DPLL.
- */
-unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
-{
-	long long dpll_clk;
-	u32 dpll_mult, dpll_div, v;
-	struct dpll_data *dd;
-
-	dd = clk->dpll_data;
-	if (!dd)
-		return 0;
-
-	/* Return bypass rate if DPLL is bypassed */
-	v = omap2_clk_readl(clk, dd->control_reg);
-	v &= dd->enable_mask;
-	v >>= __ffs(dd->enable_mask);
-
-	if (_omap2_dpll_is_in_bypass(v))
-		return __clk_get_rate(dd->clk_bypass);
-
-	v = omap2_clk_readl(clk, dd->mult_div1_reg);
-	dpll_mult = v & dd->mult_mask;
-	dpll_mult >>= __ffs(dd->mult_mask);
-	dpll_div = v & dd->div1_mask;
-	dpll_div >>= __ffs(dd->div1_mask);
-
-	dpll_clk = (long long) __clk_get_rate(dd->clk_ref) * dpll_mult;
-	do_div(dpll_clk, dpll_div + 1);
-
-	return dpll_clk;
-}
-
-/* DPLL rate rounding code */
-
-/**
- * omap2_dpll_round_rate - round a target rate for an OMAP DPLL
- * @clk: struct clk * for a DPLL
- * @target_rate: desired DPLL clock rate
- *
- * Given a DPLL and a desired target rate, round the target rate to a
- * possible, programmable rate for this DPLL.  Attempts to select the
- * minimum possible n.  Stores the computed (m, n) in the DPLL's
- * dpll_data structure so set_rate() will not need to call this
- * (expensive) function again.  Returns ~0 if the target rate cannot
- * be rounded, or the rounded rate upon success.
- */
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
-		unsigned long *parent_rate)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-	int m, n, r, scaled_max_m;
-	int min_delta_m = INT_MAX, min_delta_n = INT_MAX;
-	unsigned long scaled_rt_rp;
-	unsigned long new_rate = 0;
-	struct dpll_data *dd;
-	unsigned long ref_rate;
-	long delta;
-	long prev_min_delta = LONG_MAX;
-	const char *clk_name;
-
-	if (!clk || !clk->dpll_data)
-		return ~0;
-
-	dd = clk->dpll_data;
-
-	ref_rate = __clk_get_rate(dd->clk_ref);
-	clk_name = __clk_get_name(hw->clk);
-	pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n",
-		 clk_name, target_rate);
-
-	scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR);
-	scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR;
-
-	dd->last_rounded_rate = 0;
-
-	for (n = dd->min_divider; n <= dd->max_divider; n++) {
-
-		/* Is the (input clk, divider) pair valid for the DPLL? */
-		r = _dpll_test_fint(clk, n);
-		if (r == DPLL_FINT_UNDERFLOW)
-			break;
-		else if (r == DPLL_FINT_INVALID)
-			continue;
-
-		/* Compute the scaled DPLL multiplier, based on the divider */
-		m = scaled_rt_rp * n;
-
-		/*
-		 * Since we're counting n up, a m overflow means we
-		 * can bail out completely (since as n increases in
-		 * the next iteration, there's no way that m can
-		 * increase beyond the current m)
-		 */
-		if (m > scaled_max_m)
-			break;
-
-		r = _dpll_test_mult(&m, n, &new_rate, target_rate,
-				    ref_rate);
-
-		/* m can't be set low enough for this n - try with a larger n */
-		if (r == DPLL_MULT_UNDERFLOW)
-			continue;
-
-		/* skip rates above our target rate */
-		delta = target_rate - new_rate;
-		if (delta < 0)
-			continue;
-
-		if (delta < prev_min_delta) {
-			prev_min_delta = delta;
-			min_delta_m = m;
-			min_delta_n = n;
-		}
-
-		pr_debug("clock: %s: m = %d: n = %d: new_rate = %lu\n",
-			 clk_name, m, n, new_rate);
-
-		if (delta == 0)
-			break;
-	}
-
-	if (prev_min_delta == LONG_MAX) {
-		pr_debug("clock: %s: cannot round to rate %lu\n",
-			 clk_name, target_rate);
-		return ~0;
-	}
-
-	dd->last_rounded_m = min_delta_m;
-	dd->last_rounded_n = min_delta_n;
-	dd->last_rounded_rate = target_rate - prev_min_delta;
-
-	return dd->last_rounded_rate;
-}
-
diff --git a/arch/arm/mach-omap2/clkt_iclk.c b/arch/arm/mach-omap2/clkt_iclk.c
deleted file mode 100644
index 55eb579..0000000
--- a/arch/arm/mach-omap2/clkt_iclk.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * OMAP2/3 interface clock control
- *
- * Copyright (C) 2011 Nokia Corporation
- * Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-
-#include "clock.h"
-
-/* Register offsets */
-#define CM_AUTOIDLE			0x30
-#define CM_ICLKEN			0x10
-
-/* Private functions */
-
-/* XXX */
-void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk)
-{
-	u32 v;
-	void __iomem *r;
-
-	r = (__force void __iomem *)
-		((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN));
-
-	v = omap2_clk_readl(clk, r);
-	v |= (1 << clk->enable_bit);
-	omap2_clk_writel(v, clk, r);
-}
-
-/* XXX */
-void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk)
-{
-	u32 v;
-	void __iomem *r;
-
-	r = (__force void __iomem *)
-		((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN));
-
-	v = omap2_clk_readl(clk, r);
-	v &= ~(1 << clk->enable_bit);
-	omap2_clk_writel(v, clk, r);
-}
-
-/* Public data */
-
-const struct clk_hw_omap_ops clkhwops_iclk = {
-	.allow_idle	= omap2_clkt_iclk_allow_idle,
-	.deny_idle	= omap2_clkt_iclk_deny_idle,
-};
-
-const struct clk_hw_omap_ops clkhwops_iclk_wait = {
-	.allow_idle	= omap2_clkt_iclk_allow_idle,
-	.deny_idle	= omap2_clkt_iclk_deny_idle,
-	.find_idlest	= omap2_clk_dflt_find_idlest,
-	.find_companion	= omap2_clk_dflt_find_companion,
-};
-
-
-
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index a699d71..acb60ed 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -20,12 +20,11 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/delay.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/bitops.h>
-#include <linux/regmap.h>
 #include <linux/of_address.h>
-#include <linux/bootmem.h>
 #include <asm/cpu.h>
 
 #include <trace/events/power.h>
@@ -40,19 +39,8 @@
 #include "cm-regbits-34xx.h"
 #include "common.h"
 
-/*
- * MAX_MODULE_ENABLE_WAIT: maximum of number of microseconds to wait
- * for a module to indicate that it is no longer in idle
- */
-#define MAX_MODULE_ENABLE_WAIT		100000
-
 u16 cpu_mask;
 
-/*
- * Clock features setup. Used instead of CPU type checks.
- */
-struct ti_clk_features ti_clk_features;
-
 /* DPLL valid Fint frequency band limits - from 34xx TRM Section 4.7.6.2 */
 #define OMAP3430_DPLL_FINT_BAND1_MIN	750000
 #define OMAP3430_DPLL_FINT_BAND1_MAX	2100000
@@ -66,119 +54,24 @@
 #define OMAP3PLUS_DPLL_FINT_MIN		32000
 #define OMAP3PLUS_DPLL_FINT_MAX		52000000
 
-/*
- * clkdm_control: if true, then when a clock is enabled in the
- * hardware, its clockdomain will first be enabled; and when a clock
- * is disabled in the hardware, its clockdomain will be disabled
- * afterwards.
- */
-static bool clkdm_control = true;
-
-static LIST_HEAD(clk_hw_omap_clocks);
-
-struct clk_iomap {
-	struct regmap *regmap;
-	void __iomem *mem;
-};
-
-static struct clk_iomap *clk_memmaps[CLK_MAX_MEMMAPS];
-
-static void clk_memmap_writel(u32 val, void __iomem *reg)
-{
-	struct clk_omap_reg *r = (struct clk_omap_reg *)&reg;
-	struct clk_iomap *io = clk_memmaps[r->index];
-
-	if (io->regmap)
-		regmap_write(io->regmap, r->offset, val);
-	else
-		writel_relaxed(val, io->mem + r->offset);
-}
-
-static u32 clk_memmap_readl(void __iomem *reg)
-{
-	u32 val;
-	struct clk_omap_reg *r = (struct clk_omap_reg *)&reg;
-	struct clk_iomap *io = clk_memmaps[r->index];
-
-	if (io->regmap)
-		regmap_read(io->regmap, r->offset, &val);
-	else
-		val = readl_relaxed(io->mem + r->offset);
-
-	return val;
-}
-
-void omap2_clk_writel(u32 val, struct clk_hw_omap *clk, void __iomem *reg)
-{
-	if (WARN_ON_ONCE(!(clk->flags & MEMMAP_ADDRESSING)))
-		writel_relaxed(val, reg);
-	else
-		clk_memmap_writel(val, reg);
-}
-
-u32 omap2_clk_readl(struct clk_hw_omap *clk, void __iomem *reg)
-{
-	if (WARN_ON_ONCE(!(clk->flags & MEMMAP_ADDRESSING)))
-		return readl_relaxed(reg);
-	else
-		return clk_memmap_readl(reg);
-}
-
 static struct ti_clk_ll_ops omap_clk_ll_ops = {
-	.clk_readl = clk_memmap_readl,
-	.clk_writel = clk_memmap_writel,
+	.clkdm_clk_enable = clkdm_clk_enable,
+	.clkdm_clk_disable = clkdm_clk_disable,
+	.cm_wait_module_ready = omap_cm_wait_module_ready,
+	.cm_split_idlest_reg = cm_split_idlest_reg,
 };
 
 /**
- * omap2_clk_provider_init - initialize a clock provider
- * @match_table: DT device table to match for devices to init
- * @np: device node pointer for the this clock provider
- * @index: index for the clock provider
- + @syscon: syscon regmap pointer
- * @mem: iomem pointer for the clock provider memory area, only used if
- *	 syscon is not provided
+ * omap2_clk_setup_ll_ops - setup clock driver low-level ops
  *
- * Initializes a clock provider module (CM/PRM etc.), registering
- * the memory mapping at specified index and initializing the
- * low level driver infrastructure. Returns 0 in success.
+ * Sets up clock driver low-level platform ops. These are needed
+ * for register accesses and various other misc platform operations.
+ * Returns 0 on success, -EBUSY if low level ops have been registered
+ * already.
  */
-int __init omap2_clk_provider_init(struct device_node *np, int index,
-				   struct regmap *syscon, void __iomem *mem)
+int __init omap2_clk_setup_ll_ops(void)
 {
-	struct clk_iomap *io;
-
-	ti_clk_ll_ops = &omap_clk_ll_ops;
-
-	io = kzalloc(sizeof(*io), GFP_KERNEL);
-
-	io->regmap = syscon;
-	io->mem = mem;
-
-	clk_memmaps[index] = io;
-
-	ti_dt_clk_init_provider(np, index);
-
-	return 0;
-}
-
-/**
- * omap2_clk_legacy_provider_init - initialize a legacy clock provider
- * @index: index for the clock provider
- * @mem: iomem pointer for the clock provider memory area
- *
- * Initializes a legacy clock provider memory mapping.
- */
-void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
-{
-	struct clk_iomap *io;
-
-	ti_clk_ll_ops = &omap_clk_ll_ops;
-
-	io = memblock_virt_alloc(sizeof(*io), 0);
-
-	io->mem = mem;
-
-	clk_memmaps[index] = io;
+	return ti_clk_setup_ll_ops(&omap_clk_ll_ops);
 }
 
 /*
@@ -187,77 +80,6 @@
 
 /* Private functions */
 
-
-/**
- * _wait_idlest_generic - wait for a module to leave the idle state
- * @clk: module clock to wait for (needed for register offsets)
- * @reg: virtual address of module IDLEST register
- * @mask: value to mask against to determine if the module is active
- * @idlest: idle state indicator (0 or 1) for the clock
- * @name: name of the clock (for printk)
- *
- * Wait for a module to leave idle, where its idle-status register is
- * not inside the CM module.  Returns 1 if the module left idle
- * promptly, or 0 if the module did not leave idle before the timeout
- * elapsed.  XXX Deprecated - should be moved into drivers for the
- * individual IP block that the IDLEST register exists in.
- */
-static int _wait_idlest_generic(struct clk_hw_omap *clk, void __iomem *reg,
-				u32 mask, u8 idlest, const char *name)
-{
-	int i = 0, ena = 0;
-
-	ena = (idlest) ? 0 : mask;
-
-	omap_test_timeout(((omap2_clk_readl(clk, reg) & mask) == ena),
-			  MAX_MODULE_ENABLE_WAIT, i);
-
-	if (i < MAX_MODULE_ENABLE_WAIT)
-		pr_debug("omap clock: module associated with clock %s ready after %d loops\n",
-			 name, i);
-	else
-		pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n",
-		       name, MAX_MODULE_ENABLE_WAIT);
-
-	return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0;
-};
-
-/**
- * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE
- * @clk: struct clk * belonging to the module
- *
- * If the necessary clocks for the OMAP hardware IP block that
- * corresponds to clock @clk are enabled, then wait for the module to
- * indicate readiness (i.e., to leave IDLE).  This code does not
- * belong in the clock code and will be moved in the medium term to
- * module-dependent code.  No return value.
- */
-static void _omap2_module_wait_ready(struct clk_hw_omap *clk)
-{
-	void __iomem *companion_reg, *idlest_reg;
-	u8 other_bit, idlest_bit, idlest_val, idlest_reg_id;
-	s16 prcm_mod;
-	int r;
-
-	/* Not all modules have multiple clocks that their IDLEST depends on */
-	if (clk->ops->find_companion) {
-		clk->ops->find_companion(clk, &companion_reg, &other_bit);
-		if (!(omap2_clk_readl(clk, companion_reg) & (1 << other_bit)))
-			return;
-	}
-
-	clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
-	r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id);
-	if (r) {
-		/* IDLEST register not in the CM module */
-		_wait_idlest_generic(clk, idlest_reg, (1 << idlest_bit),
-				     idlest_val, __clk_get_name(clk->hw.clk));
-	} else {
-		omap_cm_wait_module_ready(0, prcm_mod, idlest_reg_id,
-					  idlest_bit);
-	};
-}
-
 /* Public functions */
 
 /**
@@ -290,279 +112,6 @@
 	}
 }
 
-/**
- * omap2_clk_disable_clkdm_control - disable clkdm control on clk enable/disable
- *
- * Prevent the OMAP clock code from calling into the clockdomain code
- * when a hardware clock in that clockdomain is enabled or disabled.
- * Intended to be called at init time from omap*_clk_init().  No
- * return value.
- */
-void __init omap2_clk_disable_clkdm_control(void)
-{
-	clkdm_control = false;
-}
-
-/**
- * omap2_clk_dflt_find_companion - find companion clock to @clk
- * @clk: struct clk * to find the companion clock of
- * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
- * @other_bit: u8 ** to return the companion clock bit shift in
- *
- * Note: We don't need special code here for INVERT_ENABLE for the
- * time being since INVERT_ENABLE only applies to clocks enabled by
- * CM_CLKEN_PLL
- *
- * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes it's
- * just a matter of XORing the bits.
- *
- * Some clocks don't have companion clocks.  For example, modules with
- * only an interface clock (such as MAILBOXES) don't have a companion
- * clock.  Right now, this code relies on the hardware exporting a bit
- * in the correct companion register that indicates that the
- * nonexistent 'companion clock' is active.  Future patches will
- * associate this type of code with per-module data structures to
- * avoid this issue, and remove the casts.  No return value.
- */
-void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
-			void __iomem **other_reg, u8 *other_bit)
-{
-	u32 r;
-
-	/*
-	 * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes
-	 * it's just a matter of XORing the bits.
-	 */
-	r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN));
-
-	*other_reg = (__force void __iomem *)r;
-	*other_bit = clk->enable_bit;
-}
-
-/**
- * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk
- * @clk: struct clk * to find IDLEST info for
- * @idlest_reg: void __iomem ** to return the CM_IDLEST va in
- * @idlest_bit: u8 * to return the CM_IDLEST bit shift in
- * @idlest_val: u8 * to return the idle status indicator
- *
- * Return the CM_IDLEST register address and bit shift corresponding
- * to the module that "owns" this clock.  This default code assumes
- * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that
- * the IDLEST register address ID corresponds to the CM_*CLKEN
- * register address ID (e.g., that CM_FCLKEN2 corresponds to
- * CM_IDLEST2).  This is not true for all modules.  No return value.
- */
-void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
-		void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val)
-{
-	u32 r;
-
-	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
-	*idlest_reg = (__force void __iomem *)r;
-	*idlest_bit = clk->enable_bit;
-
-	/*
-	 * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
-	 * 34xx reverses this, just to keep us on our toes
-	 * AM35xx uses both, depending on the module.
-	 */
-	*idlest_val = ti_clk_features.cm_idlest_val;
-}
-
-/**
- * omap2_dflt_clk_enable - enable a clock in the hardware
- * @hw: struct clk_hw * of the clock to enable
- *
- * Enable the clock @hw in the hardware.  We first call into the OMAP
- * clockdomain code to "enable" the corresponding clockdomain if this
- * is the first enabled user of the clockdomain.  Then program the
- * hardware to enable the clock.  Then wait for the IP block that uses
- * this clock to leave idle (if applicable).  Returns the error value
- * from clkdm_clk_enable() if it terminated with an error, or -EINVAL
- * if @hw has a null clock enable_reg, or zero upon success.
- */
-int omap2_dflt_clk_enable(struct clk_hw *hw)
-{
-	struct clk_hw_omap *clk;
-	u32 v;
-	int ret = 0;
-
-	clk = to_clk_hw_omap(hw);
-
-	if (clkdm_control && clk->clkdm) {
-		ret = clkdm_clk_enable(clk->clkdm, hw->clk);
-		if (ret) {
-			WARN(1, "%s: could not enable %s's clockdomain %s: %d\n",
-			     __func__, __clk_get_name(hw->clk),
-			     clk->clkdm->name, ret);
-			return ret;
-		}
-	}
-
-	if (unlikely(clk->enable_reg == NULL)) {
-		pr_err("%s: %s missing enable_reg\n", __func__,
-		       __clk_get_name(hw->clk));
-		ret = -EINVAL;
-		goto err;
-	}
-
-	/* FIXME should not have INVERT_ENABLE bit here */
-	v = omap2_clk_readl(clk, clk->enable_reg);
-	if (clk->flags & INVERT_ENABLE)
-		v &= ~(1 << clk->enable_bit);
-	else
-		v |= (1 << clk->enable_bit);
-	omap2_clk_writel(v, clk, clk->enable_reg);
-	v = omap2_clk_readl(clk, clk->enable_reg); /* OCP barrier */
-
-	if (clk->ops && clk->ops->find_idlest)
-		_omap2_module_wait_ready(clk);
-
-	return 0;
-
-err:
-	if (clkdm_control && clk->clkdm)
-		clkdm_clk_disable(clk->clkdm, hw->clk);
-	return ret;
-}
-
-/**
- * omap2_dflt_clk_disable - disable a clock in the hardware
- * @hw: struct clk_hw * of the clock to disable
- *
- * Disable the clock @hw in the hardware, and call into the OMAP
- * clockdomain code to "disable" the corresponding clockdomain if all
- * clocks/hwmods in that clockdomain are now disabled.  No return
- * value.
- */
-void omap2_dflt_clk_disable(struct clk_hw *hw)
-{
-	struct clk_hw_omap *clk;
-	u32 v;
-
-	clk = to_clk_hw_omap(hw);
-	if (!clk->enable_reg) {
-		/*
-		 * 'independent' here refers to a clock which is not
-		 * controlled by its parent.
-		 */
-		pr_err("%s: independent clock %s has no enable_reg\n",
-		       __func__, __clk_get_name(hw->clk));
-		return;
-	}
-
-	v = omap2_clk_readl(clk, clk->enable_reg);
-	if (clk->flags & INVERT_ENABLE)
-		v |= (1 << clk->enable_bit);
-	else
-		v &= ~(1 << clk->enable_bit);
-	omap2_clk_writel(v, clk, clk->enable_reg);
-	/* No OCP barrier needed here since it is a disable operation */
-
-	if (clkdm_control && clk->clkdm)
-		clkdm_clk_disable(clk->clkdm, hw->clk);
-}
-
-/**
- * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw
- * @hw: struct clk_hw * of the clock being enabled
- *
- * Increment the usecount of the clockdomain of the clock pointed to
- * by @hw; if the usecount is 1, the clockdomain will be "enabled."
- * Only needed for clocks that don't use omap2_dflt_clk_enable() as
- * their enable function pointer.  Passes along the return value of
- * clkdm_clk_enable(), -EINVAL if @hw is not associated with a
- * clockdomain, or 0 if clock framework-based clockdomain control is
- * not implemented.
- */
-int omap2_clkops_enable_clkdm(struct clk_hw *hw)
-{
-	struct clk_hw_omap *clk;
-	int ret = 0;
-
-	clk = to_clk_hw_omap(hw);
-
-	if (unlikely(!clk->clkdm)) {
-		pr_err("%s: %s: no clkdm set ?!\n", __func__,
-		       __clk_get_name(hw->clk));
-		return -EINVAL;
-	}
-
-	if (unlikely(clk->enable_reg))
-		pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__,
-		       __clk_get_name(hw->clk));
-
-	if (!clkdm_control) {
-		pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
-		       __func__, __clk_get_name(hw->clk));
-		return 0;
-	}
-
-	ret = clkdm_clk_enable(clk->clkdm, hw->clk);
-	WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n",
-	     __func__, __clk_get_name(hw->clk), clk->clkdm->name, ret);
-
-	return ret;
-}
-
-/**
- * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw
- * @hw: struct clk_hw * of the clock being disabled
- *
- * Decrement the usecount of the clockdomain of the clock pointed to
- * by @hw; if the usecount is 0, the clockdomain will be "disabled."
- * Only needed for clocks that don't use omap2_dflt_clk_disable() as their
- * disable function pointer.  No return value.
- */
-void omap2_clkops_disable_clkdm(struct clk_hw *hw)
-{
-	struct clk_hw_omap *clk;
-
-	clk = to_clk_hw_omap(hw);
-
-	if (unlikely(!clk->clkdm)) {
-		pr_err("%s: %s: no clkdm set ?!\n", __func__,
-		       __clk_get_name(hw->clk));
-		return;
-	}
-
-	if (unlikely(clk->enable_reg))
-		pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__,
-		       __clk_get_name(hw->clk));
-
-	if (!clkdm_control) {
-		pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
-		       __func__, __clk_get_name(hw->clk));
-		return;
-	}
-
-	clkdm_clk_disable(clk->clkdm, hw->clk);
-}
-
-/**
- * omap2_dflt_clk_is_enabled - is clock enabled in the hardware?
- * @hw: struct clk_hw * to check
- *
- * Return 1 if the clock represented by @hw is enabled in the
- * hardware, or 0 otherwise.  Intended for use in the struct
- * clk_ops.is_enabled function pointer.
- */
-int omap2_dflt_clk_is_enabled(struct clk_hw *hw)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-	u32 v;
-
-	v = omap2_clk_readl(clk, clk->enable_reg);
-
-	if (clk->flags & INVERT_ENABLE)
-		v ^= BIT(clk->enable_bit);
-
-	v &= BIT(clk->enable_bit);
-
-	return v ? 1 : 0;
-}
-
 static int __initdata mpurate;
 
 /*
@@ -584,178 +133,6 @@
 __setup("mpurate=", omap_clk_setup);
 
 /**
- * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
- * @clk: struct clk * to initialize
- *
- * Add an OMAP clock @clk to the internal list of OMAP clocks.  Used
- * temporarily for autoidle handling, until this support can be
- * integrated into the common clock framework code in some way.  No
- * return value.
- */
-void omap2_init_clk_hw_omap_clocks(struct clk *clk)
-{
-	struct clk_hw_omap *c;
-
-	if (__clk_get_flags(clk) & CLK_IS_BASIC)
-		return;
-
-	c = to_clk_hw_omap(__clk_get_hw(clk));
-	list_add(&c->node, &clk_hw_omap_clocks);
-}
-
-/**
- * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
- * support it
- *
- * Enable clock autoidle on all OMAP clocks that have allow_idle
- * function pointers associated with them.  This function is intended
- * to be temporary until support for this is added to the common clock
- * code.  Returns 0.
- */
-int omap2_clk_enable_autoidle_all(void)
-{
-	struct clk_hw_omap *c;
-
-	list_for_each_entry(c, &clk_hw_omap_clocks, node)
-		if (c->ops && c->ops->allow_idle)
-			c->ops->allow_idle(c);
-
-	of_ti_clk_allow_autoidle_all();
-
-	return 0;
-}
-
-/**
- * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that
- * support it
- *
- * Disable clock autoidle on all OMAP clocks that have allow_idle
- * function pointers associated with them.  This function is intended
- * to be temporary until support for this is added to the common clock
- * code.  Returns 0.
- */
-int omap2_clk_disable_autoidle_all(void)
-{
-	struct clk_hw_omap *c;
-
-	list_for_each_entry(c, &clk_hw_omap_clocks, node)
-		if (c->ops && c->ops->deny_idle)
-			c->ops->deny_idle(c);
-
-	of_ti_clk_deny_autoidle_all();
-
-	return 0;
-}
-
-/**
- * omap2_clk_deny_idle - disable autoidle on an OMAP clock
- * @clk: struct clk * to disable autoidle for
- *
- * Disable autoidle on an OMAP clock.
- */
-int omap2_clk_deny_idle(struct clk *clk)
-{
-	struct clk_hw_omap *c;
-
-	if (__clk_get_flags(clk) & CLK_IS_BASIC)
-		return -EINVAL;
-
-	c = to_clk_hw_omap(__clk_get_hw(clk));
-	if (c->ops && c->ops->deny_idle)
-		c->ops->deny_idle(c);
-	return 0;
-}
-
-/**
- * omap2_clk_allow_idle - enable autoidle on an OMAP clock
- * @clk: struct clk * to enable autoidle for
- *
- * Enable autoidle on an OMAP clock.
- */
-int omap2_clk_allow_idle(struct clk *clk)
-{
-	struct clk_hw_omap *c;
-
-	if (__clk_get_flags(clk) & CLK_IS_BASIC)
-		return -EINVAL;
-
-	c = to_clk_hw_omap(__clk_get_hw(clk));
-	if (c->ops && c->ops->allow_idle)
-		c->ops->allow_idle(c);
-	return 0;
-}
-
-/**
- * omap2_clk_enable_init_clocks - prepare & enable a list of clocks
- * @clk_names: ptr to an array of strings of clock names to enable
- * @num_clocks: number of clock names in @clk_names
- *
- * Prepare and enable a list of clocks, named by @clk_names.  No
- * return value. XXX Deprecated; only needed until these clocks are
- * properly claimed and enabled by the drivers or core code that uses
- * them.  XXX What code disables & calls clk_put on these clocks?
- */
-void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks)
-{
-	struct clk *init_clk;
-	int i;
-
-	for (i = 0; i < num_clocks; i++) {
-		init_clk = clk_get(NULL, clk_names[i]);
-		if (WARN(IS_ERR(init_clk), "could not find init clock %s\n",
-				clk_names[i]))
-			continue;
-		clk_prepare_enable(init_clk);
-	}
-}
-
-const struct clk_hw_omap_ops clkhwops_wait = {
-	.find_idlest	= omap2_clk_dflt_find_idlest,
-	.find_companion	= omap2_clk_dflt_find_companion,
-};
-
-/**
- * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
- * @mpurate_ck_name: clk name of the clock to change rate
- *
- * Change the ARM MPU clock rate to the rate specified on the command
- * line, if one was specified.  @mpurate_ck_name should be
- * "virt_prcm_set" on OMAP2xxx and "dpll1_ck" on OMAP34xx/OMAP36xx.
- * XXX Does not handle voltage scaling - on OMAP2xxx this is currently
- * handled by the virt_prcm_set clock, but this should be handled by
- * the OPP layer.  XXX This is intended to be handled by the OPP layer
- * code in the near future and should be removed from the clock code.
- * Returns -EINVAL if 'mpurate' is zero or if clk_set_rate() rejects
- * the rate, -ENOENT if the struct clk referred to by @mpurate_ck_name
- * cannot be found, or 0 upon success.
- */
-int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name)
-{
-	struct clk *mpurate_ck;
-	int r;
-
-	if (!mpurate)
-		return -EINVAL;
-
-	mpurate_ck = clk_get(NULL, mpurate_ck_name);
-	if (WARN(IS_ERR(mpurate_ck), "Failed to get %s.\n", mpurate_ck_name))
-		return -ENOENT;
-
-	r = clk_set_rate(mpurate_ck, mpurate);
-	if (r < 0) {
-		WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n",
-		     mpurate_ck_name, mpurate, r);
-		clk_put(mpurate_ck);
-		return -EINVAL;
-	}
-
-	calibrate_delay();
-	clk_put(mpurate_ck);
-
-	return 0;
-}
-
-/**
  * omap2_clk_print_new_rates - print summary of current clock tree rates
  * @hfclkin_ck_name: clk name for the off-chip HF oscillator
  * @core_ck_name: clk name for the on-chip CORE_CLK
@@ -801,29 +178,30 @@
  */
 void __init ti_clk_init_features(void)
 {
+	struct ti_clk_features features = { 0 };
 	/* Fint setup for DPLLs */
 	if (cpu_is_omap3430()) {
-		ti_clk_features.fint_min = OMAP3430_DPLL_FINT_BAND1_MIN;
-		ti_clk_features.fint_max = OMAP3430_DPLL_FINT_BAND2_MAX;
-		ti_clk_features.fint_band1_max = OMAP3430_DPLL_FINT_BAND1_MAX;
-		ti_clk_features.fint_band2_min = OMAP3430_DPLL_FINT_BAND2_MIN;
+		features.fint_min = OMAP3430_DPLL_FINT_BAND1_MIN;
+		features.fint_max = OMAP3430_DPLL_FINT_BAND2_MAX;
+		features.fint_band1_max = OMAP3430_DPLL_FINT_BAND1_MAX;
+		features.fint_band2_min = OMAP3430_DPLL_FINT_BAND2_MIN;
 	} else {
-		ti_clk_features.fint_min = OMAP3PLUS_DPLL_FINT_MIN;
-		ti_clk_features.fint_max = OMAP3PLUS_DPLL_FINT_MAX;
+		features.fint_min = OMAP3PLUS_DPLL_FINT_MIN;
+		features.fint_max = OMAP3PLUS_DPLL_FINT_MAX;
 	}
 
 	/* Bypass value setup for DPLLs */
 	if (cpu_is_omap24xx()) {
-		ti_clk_features.dpll_bypass_vals |=
+		features.dpll_bypass_vals |=
 			(1 << OMAP2XXX_EN_DPLL_LPBYPASS) |
 			(1 << OMAP2XXX_EN_DPLL_FRBYPASS);
 	} else if (cpu_is_omap34xx()) {
-		ti_clk_features.dpll_bypass_vals |=
+		features.dpll_bypass_vals |=
 			(1 << OMAP3XXX_EN_DPLL_LPBYPASS) |
 			(1 << OMAP3XXX_EN_DPLL_FRBYPASS);
 	} else if (soc_is_am33xx() || cpu_is_omap44xx() || soc_is_am43xx() ||
 		   soc_is_omap54xx() || soc_is_dra7xx()) {
-		ti_clk_features.dpll_bypass_vals |=
+		features.dpll_bypass_vals |=
 			(1 << OMAP4XXX_EN_DPLL_LPBYPASS) |
 			(1 << OMAP4XXX_EN_DPLL_FRBYPASS) |
 			(1 << OMAP4XXX_EN_DPLL_MNBYPASS);
@@ -831,7 +209,7 @@
 
 	/* Jitter correction only available on OMAP343X */
 	if (cpu_is_omap343x())
-		ti_clk_features.flags |= TI_CLK_DPLL_HAS_FREQSEL;
+		features.flags |= TI_CLK_DPLL_HAS_FREQSEL;
 
 	/* Idlest value for interface clocks.
 	 * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
@@ -839,11 +217,13 @@
 	 * AM35xx uses both, depending on the module.
 	 */
 	if (cpu_is_omap24xx())
-		ti_clk_features.cm_idlest_val = OMAP24XX_CM_IDLEST_VAL;
+		features.cm_idlest_val = OMAP24XX_CM_IDLEST_VAL;
 	else if (cpu_is_omap34xx())
-		ti_clk_features.cm_idlest_val = OMAP34XX_CM_IDLEST_VAL;
+		features.cm_idlest_val = OMAP34XX_CM_IDLEST_VAL;
 
 	/* On OMAP3430 ES1.0, DPLL4 can't be re-programmed */
 	if (omap_rev() == OMAP3430_REV_ES1_0)
-		ti_clk_features.flags |= TI_CLK_DPLL4_DENY_REPROGRAM;
+		features.flags |= TI_CLK_DPLL4_DENY_REPROGRAM;
+
+	ti_clk_setup_features(&features);
 }
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index 652ed0a..67da640 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -23,90 +23,6 @@
 #include <linux/clk-provider.h>
 #include <linux/clk/ti.h>
 
-struct omap_clk {
-	u16				cpu;
-	struct clk_lookup		lk;
-};
-
-#define CLK(dev, con, ck)		\
-	{				\
-		.lk = {			\
-			.dev_id = dev,	\
-			.con_id = con,	\
-			.clk = ck,	\
-		},			\
-	}
-
-struct clockdomain;
-
-#define DEFINE_STRUCT_CLK(_name, _parent_array_name, _clkops_name)	\
-	static struct clk_core _name##_core = {			\
-		.name = #_name,					\
-		.hw = &_name##_hw.hw,				\
-		.parent_names = _parent_array_name,		\
-		.num_parents = ARRAY_SIZE(_parent_array_name),	\
-		.ops = &_clkops_name,				\
-	};							\
-	static struct clk _name = {				\
-		.core = &_name##_core,				\
-	};
-
-#define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name,	\
-				_clkops_name, _flags)		\
-	static struct clk_core _name##_core = {			\
-		.name = #_name,					\
-		.hw = &_name##_hw.hw,				\
-		.parent_names = _parent_array_name,		\
-		.num_parents = ARRAY_SIZE(_parent_array_name),	\
-		.ops = &_clkops_name,				\
-		.flags = _flags,				\
-	};							\
-	static struct clk _name = {				\
-		.core = &_name##_core,				\
-	};
-
-#define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name)		\
-	static struct clk_hw_omap _name##_hw = {		\
-		.hw = {						\
-			.clk = &_name,				\
-		},						\
-		.clkdm_name = _clkdm_name,			\
-	};
-
-#define DEFINE_CLK_OMAP_MUX(_name, _clkdm_name, _clksel,	\
-			    _clksel_reg, _clksel_mask,		\
-			    _parent_names, _ops)		\
-	static struct clk _name;				\
-	static struct clk_hw_omap _name##_hw = {		\
-		.hw = {						\
-			.clk = &_name,				\
-		},						\
-		.clksel		= _clksel,			\
-		.clksel_reg	= _clksel_reg,			\
-		.clksel_mask	= _clksel_mask,			\
-		.clkdm_name	= _clkdm_name,			\
-	};							\
-	DEFINE_STRUCT_CLK(_name, _parent_names, _ops);
-
-#define DEFINE_CLK_OMAP_MUX_GATE(_name, _clkdm_name, _clksel,	\
-				 _clksel_reg, _clksel_mask,	\
-				 _enable_reg, _enable_bit,	\
-				 _hwops, _parent_names, _ops)	\
-	static struct clk _name;				\
-	static struct clk_hw_omap _name##_hw = {		\
-		.hw = {						\
-			.clk = &_name,				\
-		},						\
-		.ops		= _hwops,			\
-		.enable_reg	= _enable_reg,			\
-		.enable_bit	= _enable_bit,			\
-		.clksel		= _clksel,			\
-		.clksel_reg	= _clksel_reg,			\
-		.clksel_mask	= _clksel_mask,			\
-		.clkdm_name	= _clkdm_name,			\
-	};							\
-	DEFINE_STRUCT_CLK(_name, _parent_names, _ops);
-
 /* struct clksel_rate.flags possibilities */
 #define RATE_IN_242X		(1 << 0)
 #define RATE_IN_243X		(1 << 1)
@@ -127,38 +43,6 @@
 /* RATE_IN_3430ES2PLUS_36XX includes 34xx/35xx with ES >=2, and all 36xx/37xx */
 #define RATE_IN_3430ES2PLUS_36XX	(RATE_IN_3430ES2PLUS | RATE_IN_36XX)
 
-
-/**
- * struct clksel_rate - register bitfield values corresponding to clk divisors
- * @val: register bitfield value (shifted to bit 0)
- * @div: clock divisor corresponding to @val
- * @flags: (see "struct clksel_rate.flags possibilities" above)
- *
- * @val should match the value of a read from struct clk.clksel_reg
- * AND'ed with struct clk.clksel_mask, shifted right to bit 0.
- *
- * @div is the divisor that should be applied to the parent clock's rate
- * to produce the current clock's rate.
- */
-struct clksel_rate {
-	u32			val;
-	u8			div;
-	u16			flags;
-};
-
-/**
- * struct clksel - available parent clocks, and a pointer to their divisors
- * @parent: struct clk * to a possible parent clock
- * @rates: available divisors for this parent clock
- *
- * A struct clksel is always associated with one or more struct clks
- * and one or more struct clksel_rates.
- */
-struct clksel {
-	struct clk		 *parent;
-	const struct clksel_rate *rates;
-};
-
 /* CM_CLKSEL2_PLL.CORE_CLK_SRC bits (2XXX) */
 #define CORE_CLK_SRC_32K		0x0
 #define CORE_CLK_SRC_DPLL		0x1
@@ -180,105 +64,18 @@
 #define OMAP4XXX_EN_DPLL_FRBYPASS		0x6
 #define OMAP4XXX_EN_DPLL_LOCKED			0x7
 
-u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk);
-void omap3_dpll_allow_idle(struct clk_hw_omap *clk);
-void omap3_dpll_deny_idle(struct clk_hw_omap *clk);
-void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk);
-void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk);
-
-void __init omap2_clk_disable_clkdm_control(void);
-
-/* clkt_clksel.c public functions */
-u32 omap2_clksel_round_rate_div(struct clk_hw_omap *clk,
-				unsigned long target_rate,
-				u32 *new_div);
-u8 omap2_clksel_find_parent_index(struct clk_hw *hw);
-unsigned long omap2_clksel_recalc(struct clk_hw *hw, unsigned long parent_rate);
-long omap2_clksel_round_rate(struct clk_hw *hw, unsigned long target_rate,
-				unsigned long *parent_rate);
-int omap2_clksel_set_rate(struct clk_hw *hw, unsigned long rate,
-				unsigned long parent_rate);
-int omap2_clksel_set_parent(struct clk_hw *hw, u8 field_val);
-
-/* clkt_iclk.c public functions */
-extern void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk);
-extern void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk);
-
-unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk);
-
-void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
-				   void __iomem **other_reg,
-				   u8 *other_bit);
-void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
-				void __iomem **idlest_reg,
-				u8 *idlest_bit, u8 *idlest_val);
-int omap2_clk_enable_autoidle_all(void);
-int omap2_clk_allow_idle(struct clk *clk);
-int omap2_clk_deny_idle(struct clk *clk);
-int omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name);
 void omap2_clk_print_new_rates(const char *hfclkin_ck_name,
 			       const char *core_ck_name,
 			       const char *mpu_ck_name);
 
-u32 omap2_clk_readl(struct clk_hw_omap *clk, void __iomem *reg);
-void omap2_clk_writel(u32 val, struct clk_hw_omap *clk, void __iomem *reg);
-
 extern u16 cpu_mask;
 
-/*
- * Clock features setup. Used instead of CPU type checks.
- */
-struct ti_clk_features {
-	u32 flags;
-	long fint_min;
-	long fint_max;
-	long fint_band1_max;
-	long fint_band2_min;
-	u8 dpll_bypass_vals;
-	u8 cm_idlest_val;
-};
-
-#define TI_CLK_DPLL_HAS_FREQSEL		(1 << 0)
-#define TI_CLK_DPLL4_DENY_REPROGRAM	(1 << 1)
-
-extern struct ti_clk_features ti_clk_features;
-
 extern const struct clkops clkops_omap2_dflt_wait;
 extern const struct clkops clkops_omap2_dflt;
 
 extern struct clk_functions omap2_clk_functions;
 
-extern const struct clksel_rate gpt_32k_rates[];
-extern const struct clksel_rate gpt_sys_rates[];
-extern const struct clksel_rate gfx_l3_rates[];
-extern const struct clksel_rate dsp_ick_rates[];
-
-extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
-extern const struct clk_hw_omap_ops clkhwops_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_ssi_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait;
-extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
-extern const struct clk_hw_omap_ops clkhwops_apll54;
-extern const struct clk_hw_omap_ops clkhwops_apll96;
-
-/* clksel_rate blocks shared between OMAP44xx and AM33xx */
-extern const struct clksel_rate div_1_0_rates[];
-extern const struct clksel_rate div3_1to4_rates[];
-extern const struct clksel_rate div_1_1_rates[];
-extern const struct clksel_rate div_1_2_rates[];
-extern const struct clksel_rate div_1_3_rates[];
-extern const struct clksel_rate div_1_4_rates[];
-extern const struct clksel_rate div31_1to31_rates[];
-
-extern int omap2_clkops_enable_clkdm(struct clk_hw *hw);
-extern void omap2_clkops_disable_clkdm(struct clk_hw *hw);
-
-struct regmap;
-
-int __init omap2_clk_provider_init(struct device_node *np, int index,
-				   struct regmap *syscon, void __iomem *mem);
-void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem);
+int __init omap2_clk_setup_ll_ops(void);
 
 void __init ti_clk_init_features(void);
 #endif
diff --git a/arch/arm/mach-omap2/clock2430.c b/arch/arm/mach-omap2/clock2430.c
deleted file mode 100644
index cef0c8d..0000000
--- a/arch/arm/mach-omap2/clock2430.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * clock2430.c - OMAP2430-specific clock integration code
- *
- * Copyright (C) 2005-2008 Texas Instruments, Inc.
- * Copyright (C) 2004-2010 Nokia Corporation
- *
- * Contacts:
- * Richard Woodruff <r-woodruff2@ti.com>
- * Paul Walmsley
- *
- * Based on earlier work by Tuukka Tikkanen, Tony Lindgren,
- * Gordon McNutt and RidgeRun, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include "soc.h"
-#include "iomap.h"
-#include "clock.h"
-#include "clock2xxx.h"
-#include "cm2xxx.h"
-#include "cm-regbits-24xx.h"
-
-/**
- * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS
- * @clk: struct clk * being enabled
- * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
- * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
- * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
- *
- * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the
- * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE.  This custom function
- * passes back the correct CM_IDLEST register address for I2CHS
- * modules.  No return value.
- */
-static void omap2430_clk_i2chs_find_idlest(struct clk_hw_omap *clk,
-					   void __iomem **idlest_reg,
-					   u8 *idlest_bit,
-					   u8 *idlest_val)
-{
-	*idlest_reg = OMAP2430_CM_REGADDR(CORE_MOD, CM_IDLEST);
-	*idlest_bit = clk->enable_bit;
-	*idlest_val = OMAP24XX_CM_IDLEST_VAL;
-}
-
-/* 2430 I2CHS has non-standard IDLEST register */
-const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait = {
-	.find_idlest	= omap2430_clk_i2chs_find_idlest,
-	.find_companion	= omap2_clk_dflt_find_companion,
-};
diff --git a/arch/arm/mach-omap2/clock2xxx.c b/arch/arm/mach-omap2/clock2xxx.c
deleted file mode 100644
index b870f6a..0000000
--- a/arch/arm/mach-omap2/clock2xxx.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * clock2xxx.c - OMAP2xxx-specific clock integration code
- *
- * Copyright (C) 2005-2008 Texas Instruments, Inc.
- * Copyright (C) 2004-2010 Nokia Corporation
- *
- * Contacts:
- * Richard Woodruff <r-woodruff2@ti.com>
- * Paul Walmsley
- *
- * Based on earlier work by Tuukka Tikkanen, Tony Lindgren,
- * Gordon McNutt and RidgeRun, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include "soc.h"
-#include "clock.h"
-#include "clock2xxx.h"
-#include "cm.h"
-#include "cm-regbits-24xx.h"
-
-struct clk_hw *dclk_hw;
-/*
- * Omap24xx specific clock functions
- */
-
-/*
- * Switch the MPU rate if specified on cmdline.  We cannot do this
- * early until cmdline is parsed.  XXX This should be removed from the
- * clock code and handled by the OPP layer code in the near future.
- */
-static int __init omap2xxx_clk_arch_init(void)
-{
-	int ret;
-
-	if (!cpu_is_omap24xx())
-		return 0;
-
-	ret = omap2_clk_switch_mpurate_at_boot("virt_prcm_set");
-	if (!ret)
-		omap2_clk_print_new_rates("sys_ck", "dpll_ck", "mpu_ck");
-
-	return ret;
-}
-
-omap_arch_initcall(omap2xxx_clk_arch_init);
-
-
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
deleted file mode 100644
index 4596468..0000000
--- a/arch/arm/mach-omap2/clock34xx.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * OMAP3-specific clock framework functions
- *
- * Copyright (C) 2007-2008 Texas Instruments, Inc.
- * Copyright (C) 2007-2011 Nokia Corporation
- *
- * Paul Walmsley
- * Jouni Högander
- *
- * Parts of this code are based on code written by
- * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu,
- * Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include "clock.h"
-#include "clock34xx.h"
-#include "cm3xxx.h"
-#include "cm-regbits-34xx.h"
-
-/**
- * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
- * @clk: struct clk * being enabled
- * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
- * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
- * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
- *
- * The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift
- * from the CM_{I,F}CLKEN bit.  Pass back the correct info via
- * @idlest_reg and @idlest_bit.  No return value.
- */
-static void omap3430es2_clk_ssi_find_idlest(struct clk_hw_omap *clk,
-					    void __iomem **idlest_reg,
-					    u8 *idlest_bit,
-					    u8 *idlest_val)
-{
-	u32 r;
-
-	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
-	*idlest_reg = (__force void __iomem *)r;
-	*idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT;
-	*idlest_val = OMAP34XX_CM_IDLEST_VAL;
-}
-const struct clk_hw_omap_ops clkhwops_omap3430es2_ssi_wait = {
-	.find_idlest	= omap3430es2_clk_ssi_find_idlest,
-	.find_companion	= omap2_clk_dflt_find_companion,
-};
-
-const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait = {
-	.allow_idle	= omap2_clkt_iclk_allow_idle,
-	.deny_idle	= omap2_clkt_iclk_deny_idle,
-	.find_idlest	= omap3430es2_clk_ssi_find_idlest,
-	.find_companion = omap2_clk_dflt_find_companion,
-};
-
-/**
- * omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST
- * @clk: struct clk * being enabled
- * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
- * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
- * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
- *
- * Some OMAP modules on OMAP3 ES2+ chips have both initiator and
- * target IDLEST bits.  For our purposes, we are concerned with the
- * target IDLEST bits, which exist at a different bit position than
- * the *CLKEN bit position for these modules (DSS and USBHOST) (The
- * default find_idlest code assumes that they are at the same
- * position.)  No return value.
- */
-static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk,
-						    void __iomem **idlest_reg,
-						    u8 *idlest_bit,
-						    u8 *idlest_val)
-{
-	u32 r;
-
-	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
-	*idlest_reg = (__force void __iomem *)r;
-	/* USBHOST_IDLE has same shift */
-	*idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT;
-	*idlest_val = OMAP34XX_CM_IDLEST_VAL;
-}
-
-const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait = {
-	.find_idlest	= omap3430es2_clk_dss_usbhost_find_idlest,
-	.find_companion	= omap2_clk_dflt_find_companion,
-};
-
-const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait = {
-	.allow_idle	= omap2_clkt_iclk_allow_idle,
-	.deny_idle	= omap2_clkt_iclk_deny_idle,
-	.find_idlest	= omap3430es2_clk_dss_usbhost_find_idlest,
-	.find_companion	= omap2_clk_dflt_find_companion,
-};
-
-/**
- * omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB
- * @clk: struct clk * being enabled
- * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
- * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
- * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
- *
- * The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different
- * shift from the CM_{I,F}CLKEN bit.  Pass back the correct info via
- * @idlest_reg and @idlest_bit.  No return value.
- */
-static void omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk,
-						 void __iomem **idlest_reg,
-						 u8 *idlest_bit,
-						 u8 *idlest_val)
-{
-	u32 r;
-
-	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
-	*idlest_reg = (__force void __iomem *)r;
-	*idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT;
-	*idlest_val = OMAP34XX_CM_IDLEST_VAL;
-}
-
-const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait = {
-	.allow_idle	= omap2_clkt_iclk_allow_idle,
-	.deny_idle	= omap2_clkt_iclk_deny_idle,
-	.find_idlest	= omap3430es2_clk_hsotgusb_find_idlest,
-	.find_companion	= omap2_clk_dflt_find_companion,
-};
-
-const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait = {
-	.find_idlest	= omap3430es2_clk_hsotgusb_find_idlest,
-	.find_companion	= omap2_clk_dflt_find_companion,
-};
diff --git a/arch/arm/mach-omap2/clock34xx.h b/arch/arm/mach-omap2/clock34xx.h
deleted file mode 100644
index 084ba71..0000000
--- a/arch/arm/mach-omap2/clock34xx.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * OMAP34xx clock function prototypes and macros
- *
- * Copyright (C) 2007-2010 Texas Instruments, Inc.
- * Copyright (C) 2007-2011 Nokia Corporation
- */
-
-#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK34XX_H
-#define __ARCH_ARM_MACH_OMAP2_CLOCK34XX_H
-
-extern const struct clkops clkops_omap3430es2_ssi_wait;
-extern const struct clkops clkops_omap3430es2_iclk_ssi_wait;
-extern const struct clkops clkops_omap3430es2_hsotgusb_wait;
-extern const struct clkops clkops_omap3430es2_iclk_hsotgusb_wait;
-extern const struct clkops clkops_omap3430es2_dss_usbhost_wait;
-extern const struct clkops clkops_omap3430es2_iclk_dss_usbhost_wait;
-
-#endif
diff --git a/arch/arm/mach-omap2/clock3517.c b/arch/arm/mach-omap2/clock3517.c
deleted file mode 100644
index 4d79ae2..0000000
--- a/arch/arm/mach-omap2/clock3517.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * OMAP3517/3505-specific clock framework functions
- *
- * Copyright (C) 2010 Texas Instruments, Inc.
- * Copyright (C) 2011 Nokia Corporation
- *
- * Ranjith Lohithakshan
- * Paul Walmsley
- *
- * Parts of this code are based on code written by
- * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu,
- * Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include "clock.h"
-#include "clock3517.h"
-#include "cm3xxx.h"
-#include "cm-regbits-34xx.h"
-
-/*
- * In AM35xx IPSS, the {ICK,FCK} enable bits for modules are exported
- * in the same register at a bit offset of 0x8. The EN_ACK for ICK is
- * at an offset of 4 from ICK enable bit.
- */
-#define AM35XX_IPSS_ICK_MASK			0xF
-#define AM35XX_IPSS_ICK_EN_ACK_OFFSET 		0x4
-#define AM35XX_IPSS_ICK_FCK_OFFSET		0x8
-#define AM35XX_IPSS_CLK_IDLEST_VAL		0
-
-/**
- * am35xx_clk_find_idlest - return clock ACK info for AM35XX IPSS
- * @clk: struct clk * being enabled
- * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
- * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
- * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
- *
- * The interface clocks on AM35xx IPSS reflects the clock idle status
- * in the enable register itsel at a bit offset of 4 from the enable
- * bit. A value of 1 indicates that clock is enabled.
- */
-static void am35xx_clk_find_idlest(struct clk_hw_omap *clk,
-					    void __iomem **idlest_reg,
-					    u8 *idlest_bit,
-					    u8 *idlest_val)
-{
-	*idlest_reg = (__force void __iomem *)(clk->enable_reg);
-	*idlest_bit = clk->enable_bit + AM35XX_IPSS_ICK_EN_ACK_OFFSET;
-	*idlest_val = AM35XX_IPSS_CLK_IDLEST_VAL;
-}
-
-/**
- * am35xx_clk_find_companion - find companion clock to @clk
- * @clk: struct clk * to find the companion clock of
- * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
- * @other_bit: u8 ** to return the companion clock bit shift in
- *
- * Some clocks don't have companion clocks.  For example, modules with
- * only an interface clock (such as HECC) don't have a companion
- * clock.  Right now, this code relies on the hardware exporting a bit
- * in the correct companion register that indicates that the
- * nonexistent 'companion clock' is active.  Future patches will
- * associate this type of code with per-module data structures to
- * avoid this issue, and remove the casts.  No return value.
- */
-static void am35xx_clk_find_companion(struct clk_hw_omap *clk,
-				      void __iomem **other_reg,
-				      u8 *other_bit)
-{
-	*other_reg = (__force void __iomem *)(clk->enable_reg);
-	if (clk->enable_bit & AM35XX_IPSS_ICK_MASK)
-		*other_bit = clk->enable_bit + AM35XX_IPSS_ICK_FCK_OFFSET;
-	else
-		*other_bit = clk->enable_bit - AM35XX_IPSS_ICK_FCK_OFFSET;
-}
-const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait = {
-	.find_idlest	= am35xx_clk_find_idlest,
-	.find_companion	= am35xx_clk_find_companion,
-};
-
-/**
- * am35xx_clk_ipss_find_idlest - return CM_IDLEST info for IPSS
- * @clk: struct clk * being enabled
- * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
- * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
- * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
- *
- * The IPSS target CM_IDLEST bit is at a different shift from the
- * CM_{I,F}CLKEN bit.  Pass back the correct info via @idlest_reg
- * and @idlest_bit.  No return value.
- */
-static void am35xx_clk_ipss_find_idlest(struct clk_hw_omap *clk,
-					    void __iomem **idlest_reg,
-					    u8 *idlest_bit,
-					    u8 *idlest_val)
-{
-	u32 r;
-
-	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
-	*idlest_reg = (__force void __iomem *)r;
-	*idlest_bit = AM35XX_ST_IPSS_SHIFT;
-	*idlest_val = OMAP34XX_CM_IDLEST_VAL;
-}
-
-const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait = {
-	.allow_idle	= omap2_clkt_iclk_allow_idle,
-	.deny_idle	= omap2_clkt_iclk_deny_idle,
-	.find_idlest	= am35xx_clk_ipss_find_idlest,
-	.find_companion	= omap2_clk_dflt_find_companion,
-};
diff --git a/arch/arm/mach-omap2/clock3517.h b/arch/arm/mach-omap2/clock3517.h
deleted file mode 100644
index ca5e5a6..0000000
--- a/arch/arm/mach-omap2/clock3517.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * OMAP3517/3505 clock function prototypes and macros
- *
- * Copyright (C) 2010 Texas Instruments, Inc.
- * Copyright (C) 2010 Nokia Corporation
- */
-
-#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK3517_H
-#define __ARCH_ARM_MACH_OMAP2_CLOCK3517_H
-
-extern const struct clkops clkops_am35xx_ipss_module_wait;
-extern const struct clkops clkops_am35xx_ipss_wait;
-
-#endif
diff --git a/arch/arm/mach-omap2/clock36xx.c b/arch/arm/mach-omap2/clock36xx.c
deleted file mode 100644
index 91ccb96..0000000
--- a/arch/arm/mach-omap2/clock36xx.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * OMAP36xx-specific clkops
- *
- * Copyright (C) 2010 Texas Instruments, Inc.
- * Copyright (C) 2010 Nokia Corporation
- *
- * Mike Turquette
- * Vijaykumar GN
- * Paul Walmsley
- *
- * Parts of this code are based on code written by
- * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu,
- * Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-
-#include "clock.h"
-#include "clock36xx.h"
-#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
-
-/**
- * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering
- *         from HSDivider PWRDN problem Implements Errata ID: i556.
- * @clk: DPLL output struct clk
- *
- * 3630 only: dpll3_m3_ck, dpll4_m2_ck, dpll4_m3_ck, dpll4_m4_ck,
- * dpll4_m5_ck & dpll4_m6_ck dividers gets loaded with reset
- * valueafter their respective PWRDN bits are set.  Any dummy write
- * (Any other value different from the Read value) to the
- * corresponding CM_CLKSEL register will refresh the dividers.
- */
-int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
-{
-	struct clk_divider *parent;
-	struct clk_hw *parent_hw;
-	u32 dummy_v, orig_v;
-	struct clk_hw_omap *omap_clk = to_clk_hw_omap(clk);
-	int ret;
-
-	/* Clear PWRDN bit of HSDIVIDER */
-	ret = omap2_dflt_clk_enable(clk);
-
-	parent_hw = __clk_get_hw(__clk_get_parent(clk->clk));
-	parent = to_clk_divider(parent_hw);
-
-	/* Restore the dividers */
-	if (!ret) {
-		orig_v = omap2_clk_readl(omap_clk, parent->reg);
-		dummy_v = orig_v;
-
-		/* Write any other value different from the Read value */
-		dummy_v ^= (1 << parent->shift);
-		omap2_clk_writel(dummy_v, omap_clk, parent->reg);
-
-		/* Write the original divider */
-		omap2_clk_writel(orig_v, omap_clk, parent->reg);
-	}
-
-	return ret;
-}
diff --git a/arch/arm/mach-omap2/clock36xx.h b/arch/arm/mach-omap2/clock36xx.h
deleted file mode 100644
index 945bb7f..0000000
--- a/arch/arm/mach-omap2/clock36xx.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * OMAP36xx clock function prototypes and macros
- *
- * Copyright (C) 2010 Texas Instruments, Inc.
- * Copyright (C) 2010 Nokia Corporation
- */
-
-#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK36XX_H
-#define __ARCH_ARM_MACH_OMAP2_CLOCK36XX_H
-
-extern int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *hw);
-
-#endif
diff --git a/arch/arm/mach-omap2/clock3xxx.c b/arch/arm/mach-omap2/clock3xxx.c
deleted file mode 100644
index a9e86db..0000000
--- a/arch/arm/mach-omap2/clock3xxx.c
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * OMAP3-specific clock framework functions
- *
- * Copyright (C) 2007-2008 Texas Instruments, Inc.
- * Copyright (C) 2007-2010 Nokia Corporation
- *
- * Paul Walmsley
- * Jouni Högander
- *
- * Parts of this code are based on code written by
- * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include "soc.h"
-#include "clock.h"
-#include "clock3xxx.h"
-#include "prm2xxx_3xxx.h"
-#include "prm-regbits-34xx.h"
-#include "cm2xxx_3xxx.h"
-#include "cm-regbits-34xx.h"
-
-/*
- * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
- * that are sourced by DPLL5, and both of these require this clock
- * to be at 120 MHz for proper operation.
- */
-#define DPLL5_FREQ_FOR_USBHOST		120000000
-
-/* needed by omap3_core_dpll_m2_set_rate() */
-struct clk *sdrc_ick_p, *arm_fck_p;
-
-/**
- * omap3_dpll4_set_rate - set rate for omap3 per-dpll
- * @hw: clock to change
- * @rate: target rate for clock
- * @parent_rate: rate of the parent clock
- *
- * Check if the current SoC supports the per-dpll reprogram operation
- * or not, and then do the rate change if supported. Returns -EINVAL
- * if not supported, 0 for success, and potential error codes from the
- * clock rate change.
- */
-int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate,
-				unsigned long parent_rate)
-{
-	/*
-	 * According to the 12-5 CDP code from TI, "Limitation 2.5"
-	 * on 3430ES1 prevents us from changing DPLL multipliers or dividers
-	 * on DPLL4.
-	 */
-	if (ti_clk_features.flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
-		pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
-		return -EINVAL;
-	}
-
-	return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
-}
-
-/**
- * omap3_dpll4_set_rate_and_parent - set rate and parent for omap3 per-dpll
- * @hw: clock to change
- * @rate: target rate for clock
- * @parent_rate: rate of the parent clock
- * @index: parent index, 0 - reference clock, 1 - bypass clock
- *
- * Check if the current SoC support the per-dpll reprogram operation
- * or not, and then do the rate + parent change if supported. Returns
- * -EINVAL if not supported, 0 for success, and potential error codes
- * from the clock rate change.
- */
-int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
-				    unsigned long parent_rate, u8 index)
-{
-	if (ti_clk_features.flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
-		pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
-		return -EINVAL;
-	}
-
-	return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
-						      index);
-}
-
-void __init omap3_clk_lock_dpll5(void)
-{
-	struct clk *dpll5_clk;
-	struct clk *dpll5_m2_clk;
-
-	dpll5_clk = clk_get(NULL, "dpll5_ck");
-	clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
-	clk_prepare_enable(dpll5_clk);
-
-	/* Program dpll5_m2_clk divider for no division */
-	dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
-	clk_prepare_enable(dpll5_m2_clk);
-	clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);
-
-	clk_disable_unprepare(dpll5_m2_clk);
-	clk_disable_unprepare(dpll5_clk);
-	return;
-}
-
-/* Common clock code */
-
-/*
- * Switch the MPU rate if specified on cmdline.  We cannot do this
- * early until cmdline is parsed.  XXX This should be removed from the
- * clock code and handled by the OPP layer code in the near future.
- */
-static int __init omap3xxx_clk_arch_init(void)
-{
-	int ret;
-
-	if (!cpu_is_omap34xx())
-		return 0;
-
-	ret = omap2_clk_switch_mpurate_at_boot("dpll1_ck");
-	if (!ret)
-		omap2_clk_print_new_rates("osc_sys_ck", "core_ck", "arm_fck");
-
-	return ret;
-}
-
-omap_arch_initcall(omap3xxx_clk_arch_init);
-
-
diff --git a/arch/arm/mach-omap2/clock44xx.h b/arch/arm/mach-omap2/clock44xx.h
deleted file mode 100644
index 287a46f..0000000
--- a/arch/arm/mach-omap2/clock44xx.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * OMAP4 clock function prototypes and macros
- *
- * Copyright (C) 2009 Texas Instruments, Inc.
- * Copyright (C) 2010 Nokia Corporation
- */
-
-#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H
-#define __ARCH_ARM_MACH_OMAP2_CLOCK44XX_H
-
-/*
- * OMAP4430_REGM4XEN_MULT: If the CM_CLKMODE_DPLL_ABE.DPLL_REGM4XEN bit is
- *    set, then the DPLL's lock frequency is multiplied by 4 (OMAP4430 TRM
- *    vV Section 3.6.3.3.1 "DPLLs Output Clocks Parameters")
- */
-#define OMAP4430_REGM4XEN_MULT	4
-
-int omap4xxx_clk_init(void);
-
-#endif
diff --git a/arch/arm/mach-omap2/clock_common_data.c b/arch/arm/mach-omap2/clock_common_data.c
deleted file mode 100644
index 61b60df..0000000
--- a/arch/arm/mach-omap2/clock_common_data.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- *  linux/arch/arm/mach-omap2/clock_common_data.c
- *
- *  Copyright (C) 2005-2009 Texas Instruments, Inc.
- *  Copyright (C) 2004-2009 Nokia Corporation
- *
- *  Contacts:
- *  Richard Woodruff <r-woodruff2@ti.com>
- *  Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This file contains clock data that is common to both the OMAP2xxx and
- * OMAP3xxx clock definition files.
- */
-
-#include "clock.h"
-
-/* clksel_rate data common to 24xx/343x */
-const struct clksel_rate gpt_32k_rates[] = {
-	 { .div = 1, .val = 0, .flags = RATE_IN_24XX | RATE_IN_3XXX },
-	 { .div = 0 }
-};
-
-const struct clksel_rate gpt_sys_rates[] = {
-	 { .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_3XXX },
-	 { .div = 0 }
-};
-
-const struct clksel_rate gfx_l3_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX | RATE_IN_3XXX },
-	{ .div = 2, .val = 2, .flags = RATE_IN_24XX | RATE_IN_3XXX },
-	{ .div = 3, .val = 3, .flags = RATE_IN_243X | RATE_IN_3XXX },
-	{ .div = 4, .val = 4, .flags = RATE_IN_243X | RATE_IN_3XXX },
-	{ .div = 0 }
-};
-
-const struct clksel_rate dsp_ick_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
-	{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
-	{ .div = 3, .val = 3, .flags = RATE_IN_243X },
-	{ .div = 0 },
-};
-
-
-/* clksel_rate blocks shared between OMAP44xx and AM33xx */
-
-const struct clksel_rate div_1_0_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 0 },
-};
-
-const struct clksel_rate div3_1to4_rates[] = {
-	{ .div = 1, .val = 0, .flags = RATE_IN_4430 },
-	{ .div = 2, .val = 1, .flags = RATE_IN_4430 },
-	{ .div = 4, .val = 2, .flags = RATE_IN_4430 },
-	{ .div = 0 },
-};
-
-const struct clksel_rate div_1_1_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 0 },
-};
-
-const struct clksel_rate div_1_2_rates[] = {
-	{ .div = 1, .val = 2, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 0 },
-};
-
-const struct clksel_rate div_1_3_rates[] = {
-	{ .div = 1, .val = 3, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 0 },
-};
-
-const struct clksel_rate div_1_4_rates[] = {
-	{ .div = 1, .val = 4, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 0 },
-};
-
-const struct clksel_rate div31_1to31_rates[] = {
-	{ .div = 1, .val = 1, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 2, .val = 2, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 3, .val = 3, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 4, .val = 4, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 5, .val = 5, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 6, .val = 6, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 7, .val = 7, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 8, .val = 8, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 9, .val = 9, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 10, .val = 10, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 11, .val = 11, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 12, .val = 12, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 13, .val = 13, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 14, .val = 14, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 15, .val = 15, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 16, .val = 16, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 17, .val = 17, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 18, .val = 18, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 19, .val = 19, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 20, .val = 20, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 21, .val = 21, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 22, .val = 22, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 23, .val = 23, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 24, .val = 24, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 25, .val = 25, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 26, .val = 26, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 27, .val = 27, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 28, .val = 28, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 29, .val = 29, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 30, .val = 30, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 31, .val = 31, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
-	{ .div = 0 },
-};
diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
index 77bab5f..2c398ce 100644
--- a/arch/arm/mach-omap2/clockdomain.h
+++ b/arch/arm/mach-omap2/clockdomain.h
@@ -216,7 +216,8 @@
 extern void __init omap243x_clockdomains_init(void);
 extern void __init omap3xxx_clockdomains_init(void);
 extern void __init am33xx_clockdomains_init(void);
-extern void __init ti81xx_clockdomains_init(void);
+extern void __init ti814x_clockdomains_init(void);
+extern void __init ti816x_clockdomains_init(void);
 extern void __init omap44xx_clockdomains_init(void);
 extern void __init omap54xx_clockdomains_init(void);
 extern void __init dra7xx_clockdomains_init(void);
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
index 57d5df0..7581e03 100644
--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
@@ -331,7 +331,7 @@
 	.dep_bit	  = DRA7XX_L4PER2_STATDEP_SHIFT,
 	.wkdep_srcs	  = l4per2_wkup_sleep_deps,
 	.sleepdep_srcs	  = l4per2_wkup_sleep_deps,
-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
+	.flags		  = CLKDM_CAN_SWSUP,
 };
 
 static struct clockdomain mpu0_7xx_clkdm = {
diff --git a/arch/arm/mach-omap2/clockdomains81xx_data.c b/arch/arm/mach-omap2/clockdomains81xx_data.c
index ce2a820..53442c8 100644
--- a/arch/arm/mach-omap2/clockdomains81xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains81xx_data.c
@@ -165,7 +165,24 @@
 	.flags		= CLKDM_CAN_SWSUP,
 };
 
-static struct clockdomain *clockdomains_ti81xx[] __initdata = {
+static struct clockdomain *clockdomains_ti814x[] __initdata = {
+	&alwon_l3_slow_81xx_clkdm,
+	&alwon_l3_med_81xx_clkdm,
+	&alwon_l3_fast_81xx_clkdm,
+	&alwon_ethernet_81xx_clkdm,
+	&mmu_81xx_clkdm,
+	&mmu_cfg_81xx_clkdm,
+	NULL,
+};
+
+void __init ti814x_clockdomains_init(void)
+{
+	clkdm_register_platform_funcs(&am33xx_clkdm_operations);
+	clkdm_register_clkdms(clockdomains_ti814x);
+	clkdm_complete_init();
+}
+
+static struct clockdomain *clockdomains_ti816x[] __initdata = {
 	&alwon_mpu_816x_clkdm,
 	&alwon_l3_slow_81xx_clkdm,
 	&alwon_l3_med_81xx_clkdm,
@@ -185,10 +202,10 @@
 	NULL,
 };
 
-void __init ti81xx_clockdomains_init(void)
+void __init ti816x_clockdomains_init(void)
 {
 	clkdm_register_platform_funcs(&am33xx_clkdm_operations);
-	clkdm_register_clkdms(clockdomains_ti81xx);
+	clkdm_register_clkdms(clockdomains_ti816x);
 	clkdm_complete_init();
 }
 #endif
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index eae6a0e..484cdad 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -30,4 +30,5 @@
 void __init omap_reserve(void)
 {
 	omap_secure_ram_reserve_memblock();
+	omap_barrier_reserve_memblock();
 }
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index cf3cf22..92e92cf 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -189,6 +189,15 @@
 }
 #endif
 
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
+void omap_barrier_reserve_memblock(void);
+void omap_barriers_init(void);
+#else
+static inline void omap_barrier_reserve_memblock(void)
+{
+}
+#endif
+
 /* This gets called from mach-omap2/io.c, do not call this */
 void __init omap2_set_globals_tap(u32 class, void __iomem *tap);
 
@@ -198,6 +207,7 @@
 void __init am33xx_map_io(void);
 void __init omap4_map_io(void);
 void __init omap5_map_io(void);
+void __init dra7xx_map_io(void);
 void __init ti81xx_map_io(void);
 
 /**
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index f008930..cf58551 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -652,6 +652,7 @@
 	{ .compatible = "ti,am4-scm", .data = &ctrl_data },
 	{ .compatible = "ti,omap2-scm", .data = &omap2_ctrl_data },
 	{ .compatible = "ti,omap3-scm", .data = &omap2_ctrl_data },
+	{ .compatible = "ti,dm814-scm", .data = &ctrl_data },
 	{ .compatible = "ti,dm816-scrm", .data = &ctrl_data },
 	{ .compatible = "ti,omap4-scm-core", .data = &ctrl_data },
 	{ .compatible = "ti,omap5-scm-core", .data = &ctrl_data },
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
deleted file mode 100644
index 44e57ec..0000000
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ /dev/null
@@ -1,818 +0,0 @@
-/*
- * OMAP3/4 - specific DPLL control functions
- *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
- * Copyright (C) 2009-2010 Nokia Corporation
- *
- * Written by Paul Walmsley
- * Testing and integration fixes by Jouni Högander
- *
- * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth
- * Menon
- *
- * Parts of this code are based on code written by
- * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/clkdev.h>
-
-#include "clockdomain.h"
-#include "clock.h"
-
-/* CM_AUTOIDLE_PLL*.AUTO_* bit values */
-#define DPLL_AUTOIDLE_DISABLE			0x0
-#define DPLL_AUTOIDLE_LOW_POWER_STOP		0x1
-
-#define MAX_DPLL_WAIT_TRIES		1000000
-
-/* Private functions */
-
-/* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */
-static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits)
-{
-	const struct dpll_data *dd;
-	u32 v;
-
-	dd = clk->dpll_data;
-
-	v = omap2_clk_readl(clk, dd->control_reg);
-	v &= ~dd->enable_mask;
-	v |= clken_bits << __ffs(dd->enable_mask);
-	omap2_clk_writel(v, clk, dd->control_reg);
-}
-
-/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */
-static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
-{
-	const struct dpll_data *dd;
-	int i = 0;
-	int ret = -EINVAL;
-	const char *clk_name;
-
-	dd = clk->dpll_data;
-	clk_name = __clk_get_name(clk->hw.clk);
-
-	state <<= __ffs(dd->idlest_mask);
-
-	while (((omap2_clk_readl(clk, dd->idlest_reg) & dd->idlest_mask)
-		!= state) && i < MAX_DPLL_WAIT_TRIES) {
-		i++;
-		udelay(1);
-	}
-
-	if (i == MAX_DPLL_WAIT_TRIES) {
-		printk(KERN_ERR "clock: %s failed transition to '%s'\n",
-		       clk_name, (state) ? "locked" : "bypassed");
-	} else {
-		pr_debug("clock: %s transition to '%s' in %d loops\n",
-			 clk_name, (state) ? "locked" : "bypassed", i);
-
-		ret = 0;
-	}
-
-	return ret;
-}
-
-/* From 3430 TRM ES2 4.7.6.2 */
-static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
-{
-	unsigned long fint;
-	u16 f = 0;
-
-	fint = __clk_get_rate(clk->dpll_data->clk_ref) / n;
-
-	pr_debug("clock: fint is %lu\n", fint);
-
-	if (fint >= 750000 && fint <= 1000000)
-		f = 0x3;
-	else if (fint > 1000000 && fint <= 1250000)
-		f = 0x4;
-	else if (fint > 1250000 && fint <= 1500000)
-		f = 0x5;
-	else if (fint > 1500000 && fint <= 1750000)
-		f = 0x6;
-	else if (fint > 1750000 && fint <= 2100000)
-		f = 0x7;
-	else if (fint > 7500000 && fint <= 10000000)
-		f = 0xB;
-	else if (fint > 10000000 && fint <= 12500000)
-		f = 0xC;
-	else if (fint > 12500000 && fint <= 15000000)
-		f = 0xD;
-	else if (fint > 15000000 && fint <= 17500000)
-		f = 0xE;
-	else if (fint > 17500000 && fint <= 21000000)
-		f = 0xF;
-	else
-		pr_debug("clock: unknown freqsel setting for %d\n", n);
-
-	return f;
-}
-
-/*
- * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
- * @clk: pointer to a DPLL struct clk
- *
- * Instructs a non-CORE DPLL to lock.  Waits for the DPLL to report
- * readiness before returning.  Will save and restore the DPLL's
- * autoidle state across the enable, per the CDP code.  If the DPLL
- * locked successfully, return 0; if the DPLL did not lock in the time
- * allotted, or DPLL3 was passed in, return -EINVAL.
- */
-static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
-{
-	const struct dpll_data *dd;
-	u8 ai;
-	u8 state = 1;
-	int r = 0;
-
-	pr_debug("clock: locking DPLL %s\n", __clk_get_name(clk->hw.clk));
-
-	dd = clk->dpll_data;
-	state <<= __ffs(dd->idlest_mask);
-
-	/* Check if already locked */
-	if ((omap2_clk_readl(clk, dd->idlest_reg) & dd->idlest_mask) == state)
-		goto done;
-
-	ai = omap3_dpll_autoidle_read(clk);
-
-	if (ai)
-		omap3_dpll_deny_idle(clk);
-
-	_omap3_dpll_write_clken(clk, DPLL_LOCKED);
-
-	r = _omap3_wait_dpll_status(clk, 1);
-
-	if (ai)
-		omap3_dpll_allow_idle(clk);
-
-done:
-	return r;
-}
-
-/*
- * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness
- * @clk: pointer to a DPLL struct clk
- *
- * Instructs a non-CORE DPLL to enter low-power bypass mode.  In
- * bypass mode, the DPLL's rate is set equal to its parent clock's
- * rate.  Waits for the DPLL to report readiness before returning.
- * Will save and restore the DPLL's autoidle state across the enable,
- * per the CDP code.  If the DPLL entered bypass mode successfully,
- * return 0; if the DPLL did not enter bypass in the time allotted, or
- * DPLL3 was passed in, or the DPLL does not support low-power bypass,
- * return -EINVAL.
- */
-static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
-{
-	int r;
-	u8 ai;
-
-	if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
-		return -EINVAL;
-
-	pr_debug("clock: configuring DPLL %s for low-power bypass\n",
-		 __clk_get_name(clk->hw.clk));
-
-	ai = omap3_dpll_autoidle_read(clk);
-
-	_omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);
-
-	r = _omap3_wait_dpll_status(clk, 0);
-
-	if (ai)
-		omap3_dpll_allow_idle(clk);
-
-	return r;
-}
-
-/*
- * _omap3_noncore_dpll_stop - instruct a DPLL to stop
- * @clk: pointer to a DPLL struct clk
- *
- * Instructs a non-CORE DPLL to enter low-power stop. Will save and
- * restore the DPLL's autoidle state across the stop, per the CDP
- * code.  If DPLL3 was passed in, or the DPLL does not support
- * low-power stop, return -EINVAL; otherwise, return 0.
- */
-static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk)
-{
-	u8 ai;
-
-	if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
-		return -EINVAL;
-
-	pr_debug("clock: stopping DPLL %s\n", __clk_get_name(clk->hw.clk));
-
-	ai = omap3_dpll_autoidle_read(clk);
-
-	_omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);
-
-	if (ai)
-		omap3_dpll_allow_idle(clk);
-
-	return 0;
-}
-
-/**
- * _lookup_dco - Lookup DCO used by j-type DPLL
- * @clk: pointer to a DPLL struct clk
- * @dco: digital control oscillator selector
- * @m: DPLL multiplier to set
- * @n: DPLL divider to set
- *
- * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
- *
- * XXX This code is not needed for 3430/AM35xx; can it be optimized
- * out in non-multi-OMAP builds for those chips?
- */
-static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n)
-{
-	unsigned long fint, clkinp; /* watch out for overflow */
-
-	clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk));
-	fint = (clkinp / n) * m;
-
-	if (fint < 1000000000)
-		*dco = 2;
-	else
-		*dco = 4;
-}
-
-/**
- * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL
- * @clk: pointer to a DPLL struct clk
- * @sd_div: target sigma-delta divider
- * @m: DPLL multiplier to set
- * @n: DPLL divider to set
- *
- * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
- *
- * XXX This code is not needed for 3430/AM35xx; can it be optimized
- * out in non-multi-OMAP builds for those chips?
- */
-static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
-{
-	unsigned long clkinp, sd; /* watch out for overflow */
-	int mod1, mod2;
-
-	clkinp = __clk_get_rate(__clk_get_parent(clk->hw.clk));
-
-	/*
-	 * target sigma-delta to near 250MHz
-	 * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)]
-	 */
-	clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */
-	mod1 = (clkinp * m) % (250 * n);
-	sd = (clkinp * m) / (250 * n);
-	mod2 = sd % 10;
-	sd /= 10;
-
-	if (mod1 || mod2)
-		sd++;
-	*sd_div = sd;
-}
-
-/*
- * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
- * @clk:	struct clk * of DPLL to set
- * @freqsel:	FREQSEL value to set
- *
- * Program the DPLL with the last M, N values calculated, and wait for
- * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
- */
-static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
-{
-	struct dpll_data *dd = clk->dpll_data;
-	u8 dco, sd_div;
-	u32 v;
-
-	/* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
-	_omap3_noncore_dpll_bypass(clk);
-
-	/*
-	 * Set jitter correction. Jitter correction applicable for OMAP343X
-	 * only since freqsel field is no longer present on other devices.
-	 */
-	if (ti_clk_features.flags & TI_CLK_DPLL_HAS_FREQSEL) {
-		v = omap2_clk_readl(clk, dd->control_reg);
-		v &= ~dd->freqsel_mask;
-		v |= freqsel << __ffs(dd->freqsel_mask);
-		omap2_clk_writel(v, clk, dd->control_reg);
-	}
-
-	/* Set DPLL multiplier, divider */
-	v = omap2_clk_readl(clk, dd->mult_div1_reg);
-
-	/* Handle Duty Cycle Correction */
-	if (dd->dcc_mask) {
-		if (dd->last_rounded_rate >= dd->dcc_rate)
-			v |= dd->dcc_mask; /* Enable DCC */
-		else
-			v &= ~dd->dcc_mask; /* Disable DCC */
-	}
-
-	v &= ~(dd->mult_mask | dd->div1_mask);
-	v |= dd->last_rounded_m << __ffs(dd->mult_mask);
-	v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
-
-	/* Configure dco and sd_div for dplls that have these fields */
-	if (dd->dco_mask) {
-		_lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
-		v &= ~(dd->dco_mask);
-		v |= dco << __ffs(dd->dco_mask);
-	}
-	if (dd->sddiv_mask) {
-		_lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
-			      dd->last_rounded_n);
-		v &= ~(dd->sddiv_mask);
-		v |= sd_div << __ffs(dd->sddiv_mask);
-	}
-
-	omap2_clk_writel(v, clk, dd->mult_div1_reg);
-
-	/* Set 4X multiplier and low-power mode */
-	if (dd->m4xen_mask || dd->lpmode_mask) {
-		v = omap2_clk_readl(clk, dd->control_reg);
-
-		if (dd->m4xen_mask) {
-			if (dd->last_rounded_m4xen)
-				v |= dd->m4xen_mask;
-			else
-				v &= ~dd->m4xen_mask;
-		}
-
-		if (dd->lpmode_mask) {
-			if (dd->last_rounded_lpmode)
-				v |= dd->lpmode_mask;
-			else
-				v &= ~dd->lpmode_mask;
-		}
-
-		omap2_clk_writel(v, clk, dd->control_reg);
-	}
-
-	/* We let the clock framework set the other output dividers later */
-
-	/* REVISIT: Set ramp-up delay? */
-
-	_omap3_noncore_dpll_lock(clk);
-
-	return 0;
-}
-
-/* Public functions */
-
-/**
- * omap3_dpll_recalc - recalculate DPLL rate
- * @clk: DPLL struct clk
- *
- * Recalculate and propagate the DPLL rate.
- */
-unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-
-	return omap2_get_dpll_rate(clk);
-}
-
-/* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */
-
-/**
- * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
- * @clk: pointer to a DPLL struct clk
- *
- * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
- * The choice of modes depends on the DPLL's programmed rate: if it is
- * the same as the DPLL's parent clock, it will enter bypass;
- * otherwise, it will enter lock.  This code will wait for the DPLL to
- * indicate readiness before returning, unless the DPLL takes too long
- * to enter the target state.  Intended to be used as the struct clk's
- * enable function.  If DPLL3 was passed in, or the DPLL does not
- * support low-power stop, or if the DPLL took too long to enter
- * bypass or lock, return -EINVAL; otherwise, return 0.
- */
-int omap3_noncore_dpll_enable(struct clk_hw *hw)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-	int r;
-	struct dpll_data *dd;
-	struct clk_hw *parent;
-
-	dd = clk->dpll_data;
-	if (!dd)
-		return -EINVAL;
-
-	if (clk->clkdm) {
-		r = clkdm_clk_enable(clk->clkdm, hw->clk);
-		if (r) {
-			WARN(1,
-			     "%s: could not enable %s's clockdomain %s: %d\n",
-			     __func__, __clk_get_name(hw->clk),
-			     clk->clkdm->name, r);
-			return r;
-		}
-	}
-
-	parent = __clk_get_hw(__clk_get_parent(hw->clk));
-
-	if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) {
-		WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
-		r = _omap3_noncore_dpll_bypass(clk);
-	} else {
-		WARN_ON(parent != __clk_get_hw(dd->clk_ref));
-		r = _omap3_noncore_dpll_lock(clk);
-	}
-
-	return r;
-}
-
-/**
- * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop
- * @clk: pointer to a DPLL struct clk
- *
- * Instructs a non-CORE DPLL to enter low-power stop.  This function is
- * intended for use in struct clkops.  No return value.
- */
-void omap3_noncore_dpll_disable(struct clk_hw *hw)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-
-	_omap3_noncore_dpll_stop(clk);
-	if (clk->clkdm)
-		clkdm_clk_disable(clk->clkdm, hw->clk);
-}
-
-
-/* Non-CORE DPLL rate set code */
-
-/**
- * omap3_noncore_dpll_determine_rate - determine rate for a DPLL
- * @hw: pointer to the clock to determine rate for
- * @rate: target rate for the DPLL
- * @best_parent_rate: pointer for returning best parent rate
- * @best_parent_clk: pointer for returning best parent clock
- *
- * Determines which DPLL mode to use for reaching a desired target rate.
- * Checks whether the DPLL shall be in bypass or locked mode, and if
- * locked, calculates the M,N values for the DPLL via round-rate.
- * Returns a positive clock rate with success, negative error value
- * in failure.
- */
-long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate,
-				       unsigned long min_rate,
-				       unsigned long max_rate,
-				       unsigned long *best_parent_rate,
-				       struct clk_hw **best_parent_clk)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-	struct dpll_data *dd;
-
-	if (!hw || !rate)
-		return -EINVAL;
-
-	dd = clk->dpll_data;
-	if (!dd)
-		return -EINVAL;
-
-	if (__clk_get_rate(dd->clk_bypass) == rate &&
-	    (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
-		*best_parent_clk = __clk_get_hw(dd->clk_bypass);
-	} else {
-		rate = omap2_dpll_round_rate(hw, rate, best_parent_rate);
-		*best_parent_clk = __clk_get_hw(dd->clk_ref);
-	}
-
-	*best_parent_rate = rate;
-
-	return rate;
-}
-
-/**
- * omap3_noncore_dpll_set_parent - set parent for a DPLL clock
- * @hw: pointer to the clock to set parent for
- * @index: parent index to select
- *
- * Sets parent for a DPLL clock. This sets the DPLL into bypass or
- * locked mode. Returns 0 with success, negative error value otherwise.
- */
-int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-	int ret;
-
-	if (!hw)
-		return -EINVAL;
-
-	if (index)
-		ret = _omap3_noncore_dpll_bypass(clk);
-	else
-		ret = _omap3_noncore_dpll_lock(clk);
-
-	return ret;
-}
-
-/**
- * omap3_noncore_dpll_set_rate - set rate for a DPLL clock
- * @hw: pointer to the clock to set parent for
- * @rate: target rate for the clock
- * @parent_rate: rate of the parent clock
- *
- * Sets rate for a DPLL clock. First checks if the clock parent is
- * reference clock (in bypass mode, the rate of the clock can't be
- * changed) and proceeds with the rate change operation. Returns 0
- * with success, negative error value otherwise.
- */
-int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
-				unsigned long parent_rate)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-	struct dpll_data *dd;
-	u16 freqsel = 0;
-	int ret;
-
-	if (!hw || !rate)
-		return -EINVAL;
-
-	dd = clk->dpll_data;
-	if (!dd)
-		return -EINVAL;
-
-	if (__clk_get_hw(__clk_get_parent(hw->clk)) !=
-	    __clk_get_hw(dd->clk_ref))
-		return -EINVAL;
-
-	if (dd->last_rounded_rate == 0)
-		return -EINVAL;
-
-	/* Freqsel is available only on OMAP343X devices */
-	if (ti_clk_features.flags & TI_CLK_DPLL_HAS_FREQSEL) {
-		freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
-		WARN_ON(!freqsel);
-	}
-
-	pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__,
-		 __clk_get_name(hw->clk), rate);
-
-	ret = omap3_noncore_dpll_program(clk, freqsel);
-
-	return ret;
-}
-
-/**
- * omap3_noncore_dpll_set_rate_and_parent - set rate and parent for a DPLL clock
- * @hw: pointer to the clock to set rate and parent for
- * @rate: target rate for the DPLL
- * @parent_rate: clock rate of the DPLL parent
- * @index: new parent index for the DPLL, 0 - reference, 1 - bypass
- *
- * Sets rate and parent for a DPLL clock. If new parent is the bypass
- * clock, only selects the parent. Otherwise proceeds with a rate
- * change, as this will effectively also change the parent as the
- * DPLL is put into locked mode. Returns 0 with success, negative error
- * value otherwise.
- */
-int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
-					   unsigned long rate,
-					   unsigned long parent_rate,
-					   u8 index)
-{
-	int ret;
-
-	if (!hw || !rate)
-		return -EINVAL;
-
-	/*
-	 * clk-ref at index[0], in which case we only need to set rate,
-	 * the parent will be changed automatically with the lock sequence.
-	 * With clk-bypass case we only need to change parent.
-	 */
-	if (index)
-		ret = omap3_noncore_dpll_set_parent(hw, index);
-	else
-		ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
-
-	return ret;
-}
-
-/* DPLL autoidle read/set code */
-
-/**
- * omap3_dpll_autoidle_read - read a DPLL's autoidle bits
- * @clk: struct clk * of the DPLL to read
- *
- * Return the DPLL's autoidle bits, shifted down to bit 0.  Returns
- * -EINVAL if passed a null pointer or if the struct clk does not
- * appear to refer to a DPLL.
- */
-u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk)
-{
-	const struct dpll_data *dd;
-	u32 v;
-
-	if (!clk || !clk->dpll_data)
-		return -EINVAL;
-
-	dd = clk->dpll_data;
-
-	if (!dd->autoidle_reg)
-		return -EINVAL;
-
-	v = omap2_clk_readl(clk, dd->autoidle_reg);
-	v &= dd->autoidle_mask;
-	v >>= __ffs(dd->autoidle_mask);
-
-	return v;
-}
-
-/**
- * omap3_dpll_allow_idle - enable DPLL autoidle bits
- * @clk: struct clk * of the DPLL to operate on
- *
- * Enable DPLL automatic idle control.  This automatic idle mode
- * switching takes effect only when the DPLL is locked, at least on
- * OMAP3430.  The DPLL will enter low-power stop when its downstream
- * clocks are gated.  No return value.
- */
-void omap3_dpll_allow_idle(struct clk_hw_omap *clk)
-{
-	const struct dpll_data *dd;
-	u32 v;
-
-	if (!clk || !clk->dpll_data)
-		return;
-
-	dd = clk->dpll_data;
-
-	if (!dd->autoidle_reg)
-		return;
-
-	/*
-	 * REVISIT: CORE DPLL can optionally enter low-power bypass
-	 * by writing 0x5 instead of 0x1.  Add some mechanism to
-	 * optionally enter this mode.
-	 */
-	v = omap2_clk_readl(clk, dd->autoidle_reg);
-	v &= ~dd->autoidle_mask;
-	v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
-	omap2_clk_writel(v, clk, dd->autoidle_reg);
-
-}
-
-/**
- * omap3_dpll_deny_idle - prevent DPLL from automatically idling
- * @clk: struct clk * of the DPLL to operate on
- *
- * Disable DPLL automatic idle control.  No return value.
- */
-void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
-{
-	const struct dpll_data *dd;
-	u32 v;
-
-	if (!clk || !clk->dpll_data)
-		return;
-
-	dd = clk->dpll_data;
-
-	if (!dd->autoidle_reg)
-		return;
-
-	v = omap2_clk_readl(clk, dd->autoidle_reg);
-	v &= ~dd->autoidle_mask;
-	v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
-	omap2_clk_writel(v, clk, dd->autoidle_reg);
-
-}
-
-/* Clock control for DPLL outputs */
-
-/* Find the parent DPLL for the given clkoutx2 clock */
-static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
-{
-	struct clk_hw_omap *pclk = NULL;
-	struct clk *parent;
-
-	/* Walk up the parents of clk, looking for a DPLL */
-	do {
-		do {
-			parent = __clk_get_parent(hw->clk);
-			hw = __clk_get_hw(parent);
-		} while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC));
-		if (!hw)
-			break;
-		pclk = to_clk_hw_omap(hw);
-	} while (pclk && !pclk->dpll_data);
-
-	/* clk does not have a DPLL as a parent?  error in the clock data */
-	if (!pclk) {
-		WARN_ON(1);
-		return NULL;
-	}
-
-	return pclk;
-}
-
-/**
- * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
- * @clk: DPLL output struct clk
- *
- * Using parent clock DPLL data, look up DPLL state.  If locked, set our
- * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
- */
-unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
-				    unsigned long parent_rate)
-{
-	const struct dpll_data *dd;
-	unsigned long rate;
-	u32 v;
-	struct clk_hw_omap *pclk = NULL;
-
-	if (!parent_rate)
-		return 0;
-
-	pclk = omap3_find_clkoutx2_dpll(hw);
-
-	if (!pclk)
-		return 0;
-
-	dd = pclk->dpll_data;
-
-	WARN_ON(!dd->enable_mask);
-
-	v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask;
-	v >>= __ffs(dd->enable_mask);
-	if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
-		rate = parent_rate;
-	else
-		rate = parent_rate * 2;
-	return rate;
-}
-
-int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
-					unsigned long parent_rate)
-{
-	return 0;
-}
-
-long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
-		unsigned long *prate)
-{
-	const struct dpll_data *dd;
-	u32 v;
-	struct clk_hw_omap *pclk = NULL;
-
-	if (!*prate)
-		return 0;
-
-	pclk = omap3_find_clkoutx2_dpll(hw);
-
-	if (!pclk)
-		return 0;
-
-	dd = pclk->dpll_data;
-
-	/* TYPE J does not have a clkoutx2 */
-	if (dd->flags & DPLL_J_TYPE) {
-		*prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate);
-		return *prate;
-	}
-
-	WARN_ON(!dd->enable_mask);
-
-	v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask;
-	v >>= __ffs(dd->enable_mask);
-
-	/* If in bypass, the rate is fixed to the bypass rate*/
-	if (v != OMAP3XXX_EN_DPLL_LOCKED)
-		return *prate;
-
-	if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
-		unsigned long best_parent;
-
-		best_parent = (rate / 2);
-		*prate = __clk_round_rate(__clk_get_parent(hw->clk),
-				best_parent);
-	}
-
-	return *prate * 2;
-}
-
-/* OMAP3/4 non-CORE DPLL clkops */
-const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
-	.allow_idle	= omap3_dpll_allow_idle,
-	.deny_idle	= omap3_dpll_deny_idle,
-};
diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c
deleted file mode 100644
index f231be0..0000000
--- a/arch/arm/mach-omap2/dpll44xx.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * OMAP4-specific DPLL control functions
- *
- * Copyright (C) 2011 Texas Instruments, Inc.
- * Rajendra Nayak
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-
-#include "clock.h"
-
-/*
- * Maximum DPLL input frequency (FINT) and output frequency (FOUT) that
- * can supported when using the DPLL low-power mode. Frequencies are
- * defined in OMAP4430/60 Public TRM section 3.6.3.3.2 "Enable Control,
- * Status, and Low-Power Operation Mode".
- */
-#define OMAP4_DPLL_LP_FINT_MAX	1000000
-#define OMAP4_DPLL_LP_FOUT_MAX	100000000
-
-/*
- * Bitfield declarations
- */
-#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK		(1 << 8)
-#define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK		(1 << 10)
-#define OMAP4430_DPLL_REGM4XEN_MASK			(1 << 11)
-
-/* Static rate multiplier for OMAP4 REGM4XEN clocks */
-#define OMAP4430_REGM4XEN_MULT				4
-
-void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk)
-{
-	u32 v;
-	u32 mask;
-
-	if (!clk || !clk->clksel_reg)
-		return;
-
-	mask = clk->flags & CLOCK_CLKOUTX2 ?
-			OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK :
-			OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK;
-
-	v = omap2_clk_readl(clk, clk->clksel_reg);
-	/* Clear the bit to allow gatectrl */
-	v &= ~mask;
-	omap2_clk_writel(v, clk, clk->clksel_reg);
-}
-
-void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk)
-{
-	u32 v;
-	u32 mask;
-
-	if (!clk || !clk->clksel_reg)
-		return;
-
-	mask = clk->flags & CLOCK_CLKOUTX2 ?
-			OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK :
-			OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK;
-
-	v = omap2_clk_readl(clk, clk->clksel_reg);
-	/* Set the bit to deny gatectrl */
-	v |= mask;
-	omap2_clk_writel(v, clk, clk->clksel_reg);
-}
-
-const struct clk_hw_omap_ops clkhwops_omap4_dpllmx = {
-	.allow_idle	= omap4_dpllmx_allow_gatectrl,
-	.deny_idle      = omap4_dpllmx_deny_gatectrl,
-};
-
-/**
- * omap4_dpll_lpmode_recalc - compute DPLL low-power setting
- * @dd: pointer to the dpll data structure
- *
- * Calculates if low-power mode can be enabled based upon the last
- * multiplier and divider values calculated. If low-power mode can be
- * enabled, then the bit to enable low-power mode is stored in the
- * last_rounded_lpmode variable. This implementation is based upon the
- * criteria for enabling low-power mode as described in the OMAP4430/60
- * Public TRM section 3.6.3.3.2 "Enable Control, Status, and Low-Power
- * Operation Mode".
- */
-static void omap4_dpll_lpmode_recalc(struct dpll_data *dd)
-{
-	long fint, fout;
-
-	fint = __clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
-	fout = fint * dd->last_rounded_m;
-
-	if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX))
-		dd->last_rounded_lpmode = 1;
-	else
-		dd->last_rounded_lpmode = 0;
-}
-
-/**
- * omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit
- * @clk: struct clk * of the DPLL to compute the rate for
- *
- * Compute the output rate for the OMAP4 DPLL represented by @clk.
- * Takes the REGM4XEN bit into consideration, which is needed for the
- * OMAP4 ABE DPLL.  Returns the DPLL's output rate (before M-dividers)
- * upon success, or 0 upon error.
- */
-unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
-			unsigned long parent_rate)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-	u32 v;
-	unsigned long rate;
-	struct dpll_data *dd;
-
-	if (!clk || !clk->dpll_data)
-		return 0;
-
-	dd = clk->dpll_data;
-
-	rate = omap2_get_dpll_rate(clk);
-
-	/* regm4xen adds a multiplier of 4 to DPLL calculations */
-	v = omap2_clk_readl(clk, dd->control_reg);
-	if (v & OMAP4430_DPLL_REGM4XEN_MASK)
-		rate *= OMAP4430_REGM4XEN_MULT;
-
-	return rate;
-}
-
-/**
- * omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit
- * @clk: struct clk * of the DPLL to round a rate for
- * @target_rate: the desired rate of the DPLL
- *
- * Compute the rate that would be programmed into the DPLL hardware
- * for @clk if set_rate() were to be provided with the rate
- * @target_rate.  Takes the REGM4XEN bit into consideration, which is
- * needed for the OMAP4 ABE DPLL.  Returns the rounded rate (before
- * M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or
- * ~0 if an error occurred in omap2_dpll_round_rate().
- */
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
-				    unsigned long target_rate,
-				    unsigned long *parent_rate)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-	struct dpll_data *dd;
-	long r;
-
-	if (!clk || !clk->dpll_data)
-		return -EINVAL;
-
-	dd = clk->dpll_data;
-
-	dd->last_rounded_m4xen = 0;
-
-	/*
-	 * First try to compute the DPLL configuration for
-	 * target rate without using the 4X multiplier.
-	 */
-	r = omap2_dpll_round_rate(hw, target_rate, NULL);
-	if (r != ~0)
-		goto out;
-
-	/*
-	 * If we did not find a valid DPLL configuration, try again, but
-	 * this time see if using the 4X multiplier can help. Enabling the
-	 * 4X multiplier is equivalent to dividing the target rate by 4.
-	 */
-	r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT,
-				  NULL);
-	if (r == ~0)
-		return r;
-
-	dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
-	dd->last_rounded_m4xen = 1;
-
-out:
-	omap4_dpll_lpmode_recalc(dd);
-
-	return dd->last_rounded_rate;
-}
-
-/**
- * omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL
- * @hw: pointer to the clock to determine rate for
- * @rate: target rate for the DPLL
- * @best_parent_rate: pointer for returning best parent rate
- * @best_parent_clk: pointer for returning best parent clock
- *
- * Determines which DPLL mode to use for reaching a desired rate.
- * Checks whether the DPLL shall be in bypass or locked mode, and if
- * locked, calculates the M,N values for the DPLL via round-rate.
- * Returns a positive clock rate with success, negative error value
- * in failure.
- */
-long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate,
-					unsigned long min_rate,
-					unsigned long max_rate,
-					unsigned long *best_parent_rate,
-					struct clk_hw **best_parent_clk)
-{
-	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
-	struct dpll_data *dd;
-
-	if (!hw || !rate)
-		return -EINVAL;
-
-	dd = clk->dpll_data;
-	if (!dd)
-		return -EINVAL;
-
-	if (__clk_get_rate(dd->clk_bypass) == rate &&
-	    (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
-		*best_parent_clk = __clk_get_hw(dd->clk_bypass);
-	} else {
-		rate = omap4_dpll_regm4xen_round_rate(hw, rate,
-						      best_parent_rate);
-		*best_parent_clk = __clk_get_hw(dd->clk_ref);
-	}
-
-	*best_parent_rate = rate;
-
-	return rate;
-}
diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h
deleted file mode 100644
index 1c582a8..0000000
--- a/arch/arm/mach-omap2/include/mach/barriers.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * OMAP memory barrier header.
- *
- * Copyright (C) 2011 Texas Instruments, Inc.
- *  Santosh Shilimkar <santosh.shilimkar@ti.com>
- *  Richard Woodruff <r-woodruff2@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __MACH_BARRIERS_H
-#define __MACH_BARRIERS_H
-
-#include <asm/outercache.h>
-
-extern void omap_bus_sync(void);
-
-#define rmb()		dsb()
-#define wmb()		do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
-#define mb()		wmb()
-
-#endif	/* __MACH_BARRIERS_H */
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 820dde8..980c937 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -37,7 +37,6 @@
 #include "clock.h"
 #include "clock2xxx.h"
 #include "clock3xxx.h"
-#include "clock44xx.h"
 #include "omap-pm.h"
 #include "sdrc.h"
 #include "control.h"
@@ -236,7 +235,7 @@
 };
 #endif
 
-#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX)
+#ifdef CONFIG_SOC_OMAP5
 static struct map_desc omap54xx_io_desc[] __initdata = {
 	{
 		.virtual	= L3_54XX_VIRT,
@@ -265,6 +264,53 @@
 };
 #endif
 
+#ifdef CONFIG_SOC_DRA7XX
+static struct map_desc dra7xx_io_desc[] __initdata = {
+	{
+		.virtual	= L4_CFG_MPU_DRA7XX_VIRT,
+		.pfn		= __phys_to_pfn(L4_CFG_MPU_DRA7XX_PHYS),
+		.length		= L4_CFG_MPU_DRA7XX_SIZE,
+		.type		= MT_DEVICE,
+	},
+	{
+		.virtual	= L3_MAIN_SN_DRA7XX_VIRT,
+		.pfn		= __phys_to_pfn(L3_MAIN_SN_DRA7XX_PHYS),
+		.length		= L3_MAIN_SN_DRA7XX_SIZE,
+		.type		= MT_DEVICE,
+	},
+	{
+		.virtual	= L4_PER1_DRA7XX_VIRT,
+		.pfn		= __phys_to_pfn(L4_PER1_DRA7XX_PHYS),
+		.length		= L4_PER1_DRA7XX_SIZE,
+		.type		= MT_DEVICE,
+	},
+	{
+		.virtual	= L4_PER2_DRA7XX_VIRT,
+		.pfn		= __phys_to_pfn(L4_PER2_DRA7XX_PHYS),
+		.length		= L4_PER2_DRA7XX_SIZE,
+		.type		= MT_DEVICE,
+	},
+	{
+		.virtual	= L4_PER3_DRA7XX_VIRT,
+		.pfn		= __phys_to_pfn(L4_PER3_DRA7XX_PHYS),
+		.length		= L4_PER3_DRA7XX_SIZE,
+		.type		= MT_DEVICE,
+	},
+	{
+		.virtual	= L4_CFG_DRA7XX_VIRT,
+		.pfn		= __phys_to_pfn(L4_CFG_DRA7XX_PHYS),
+		.length		= L4_CFG_DRA7XX_SIZE,
+		.type		= MT_DEVICE,
+	},
+	{
+		.virtual	= L4_WKUP_DRA7XX_VIRT,
+		.pfn		= __phys_to_pfn(L4_WKUP_DRA7XX_PHYS),
+		.length		= L4_WKUP_DRA7XX_SIZE,
+		.type		= MT_DEVICE,
+	},
+};
+#endif
+
 #ifdef CONFIG_SOC_OMAP2420
 void __init omap242x_map_io(void)
 {
@@ -306,13 +352,22 @@
 void __init omap4_map_io(void)
 {
 	iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
+	omap_barriers_init();
 }
 #endif
 
-#if defined(CONFIG_SOC_OMAP5) ||  defined(CONFIG_SOC_DRA7XX)
+#ifdef CONFIG_SOC_OMAP5
 void __init omap5_map_io(void)
 {
 	iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
+	omap_barriers_init();
+}
+#endif
+
+#ifdef CONFIG_SOC_DRA7XX
+void __init dra7xx_map_io(void)
+{
+	iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
 }
 #endif
 /*
@@ -554,11 +609,11 @@
 	omap2_prcm_base_init();
 	omap3xxx_voltagedomains_init();
 	omap3xxx_powerdomains_init();
-	ti81xx_clockdomains_init();
-	ti81xx_hwmod_init();
+	ti814x_clockdomains_init();
+	dm814x_hwmod_init();
 	omap_hwmod_init_postsetup();
 	if (of_have_populated_dt())
-		omap_clk_soc_init = ti81xx_dt_clk_init;
+		omap_clk_soc_init = dm814x_dt_clk_init;
 }
 
 void __init ti816x_init_early(void)
@@ -571,11 +626,11 @@
 	omap2_prcm_base_init();
 	omap3xxx_voltagedomains_init();
 	omap3xxx_powerdomains_init();
-	ti81xx_clockdomains_init();
-	ti81xx_hwmod_init();
+	ti816x_clockdomains_init();
+	dm816x_hwmod_init();
 	omap_hwmod_init_postsetup();
 	if (of_have_populated_dt())
-		omap_clk_soc_init = ti81xx_dt_clk_init;
+		omap_clk_soc_init = dm816x_dt_clk_init;
 }
 #endif
 
@@ -723,6 +778,8 @@
 
 	ti_clk_init_features();
 
+	omap2_clk_setup_ll_ops();
+
 	if (of_have_populated_dt()) {
 		ret = omap_control_init();
 		if (ret)
diff --git a/arch/arm/mach-omap2/iomap.h b/arch/arm/mach-omap2/iomap.h
index cce2b65..6191d24 100644
--- a/arch/arm/mach-omap2/iomap.h
+++ b/arch/arm/mach-omap2/iomap.h
@@ -194,3 +194,66 @@
 #define L4_PER_54XX_PHYS	L4_PER_54XX_BASE /* 0x48000000 --> 0xfa000000 */
 #define L4_PER_54XX_VIRT	(L4_PER_54XX_PHYS + OMAP2_L4_IO_OFFSET)
 #define L4_PER_54XX_SIZE	SZ_4M
+
+/*
+ * ----------------------------------------------------------------------------
+ * DRA7xx specific IO mapping
+ * ----------------------------------------------------------------------------
+ */
+/*
+ * L3_MAIN_SN_DRA7XX_PHYS 0x44000000 --> 0xf8000000
+ * The overall space is 24MiB (0x4400_0000<->0x457F_FFFF), but mapping
+ * everything is just inefficient, since, there are too many address holes.
+ */
+#define L3_MAIN_SN_DRA7XX_PHYS		L3_MAIN_SN_DRA7XX_BASE
+#define L3_MAIN_SN_DRA7XX_VIRT		(L3_MAIN_SN_DRA7XX_PHYS + OMAP4_L3_IO_OFFSET)
+#define L3_MAIN_SN_DRA7XX_SIZE		SZ_1M
+
+/*
+ * L4_PER1_DRA7XX_PHYS	(0x4800_000<>0x480D_2FFF) -> 0.82MiB (alloc 1MiB)
+ *	(0x48000000<->0x48100000) <=> (0xFA000000<->0xFA100000)
+ */
+#define L4_PER1_DRA7XX_PHYS		L4_PER1_DRA7XX_BASE
+#define L4_PER1_DRA7XX_VIRT		(L4_PER1_DRA7XX_PHYS + OMAP2_L4_IO_OFFSET)
+#define L4_PER1_DRA7XX_SIZE		SZ_1M
+
+/*
+ * L4_CFG_MPU_DRA7XX_PHYS	(0x48210000<>0x482A_F2FF) -> 0.62MiB (alloc 1MiB)
+ *	(0x48210000<->0x48310000) <=> (0xFA210000<->0xFA310000)
+ * NOTE: This is a bit of an orphan memory map sitting isolated in TRM
+ */
+#define L4_CFG_MPU_DRA7XX_PHYS		L4_CFG_MPU_DRA7XX_BASE
+#define L4_CFG_MPU_DRA7XX_VIRT		(L4_CFG_MPU_DRA7XX_PHYS + OMAP2_L4_IO_OFFSET)
+#define L4_CFG_MPU_DRA7XX_SIZE		SZ_1M
+
+/*
+ * L4_PER2_DRA7XX_PHYS	(0x4840_0000<>0x4848_8FFF) -> .53MiB (alloc 1MiB)
+ *	(0x48400000<->0x48500000) <=> (0xFA400000<->0xFA500000)
+ */
+#define L4_PER2_DRA7XX_PHYS		L4_PER2_DRA7XX_BASE
+#define L4_PER2_DRA7XX_VIRT		(L4_PER2_DRA7XX_PHYS + OMAP2_L4_IO_OFFSET)
+#define L4_PER2_DRA7XX_SIZE		SZ_1M
+
+/*
+ * L4_PER3_DRA7XX_PHYS	(0x4880_0000<>0x489E_0FFF) -> 1.87MiB (alloc 2MiB)
+ *	(0x48800000<->0x48A00000) <=> (0xFA800000<->0xFAA00000)
+ */
+#define L4_PER3_DRA7XX_PHYS		L4_PER3_DRA7XX_BASE
+#define L4_PER3_DRA7XX_VIRT		(L4_PER3_DRA7XX_PHYS + OMAP2_L4_IO_OFFSET)
+#define L4_PER3_DRA7XX_SIZE		SZ_2M
+
+/*
+ * L4_CFG_DRA7XX_PHYS	(0x4A00_0000<>0x4A22_BFFF) ->2.17MiB (alloc 3MiB)?
+ *	(0x4A000000<->0x4A300000) <=> (0xFC000000<->0xFC300000)
+ */
+#define L4_CFG_DRA7XX_PHYS		L4_CFG_DRA7XX_BASE
+#define L4_CFG_DRA7XX_VIRT		(L4_CFG_DRA7XX_PHYS + OMAP2_L4_IO_OFFSET)
+#define L4_CFG_DRA7XX_SIZE		(SZ_1M + SZ_2M)
+
+/*
+ * L4_WKUP_DRA7XX_PHYS	(0x4AE0_0000<>0x4AE3_EFFF) -> .24 mb (alloc 1MiB)?
+ *	(0x4AE00000<->4AF00000)	<=> (0xFCE00000<->0xFCF00000)
+ */
+#define L4_WKUP_DRA7XX_PHYS		L4_WKUP_DRA7XX_BASE
+#define L4_WKUP_DRA7XX_VIRT		(L4_WKUP_DRA7XX_PHYS + OMAP2_L4_IO_OFFSET)
+#define L4_WKUP_DRA7XX_SIZE		SZ_1M
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index 4068350..8867eb4 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -11,7 +11,6 @@
  */
 
 #include <linux/of.h>
-#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/err.h>
 #include <linux/slab.h>
@@ -63,15 +62,5 @@
 
 	return omap_hwmod_for_each_by_class("mmu", omap_iommu_dev_init, NULL);
 }
-/* must be ready before omap3isp is probed */
 omap_subsys_initcall(omap_iommu_init);
-
-static void __exit omap_iommu_exit(void)
-{
-	/* Do nothing */
-}
-module_exit(omap_iommu_exit);
-
-MODULE_AUTHOR("Hiroshi DOYU");
-MODULE_DESCRIPTION("omap iommu: omap device registration");
-MODULE_LICENSE("GPL v2");
+/* must be ready before omap3isp is probed */
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 79f49d9..65024af 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -105,7 +105,7 @@
 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
 {}
 
-struct cpu_pm_ops omap_pm_ops = {
+static struct cpu_pm_ops omap_pm_ops = {
 	.finish_suspend		= default_finish_suspend,
 	.resume			= dummy_cpu_resume,
 	.scu_prepare		= dummy_scu_prepare,
diff --git a/arch/arm/mach-omap2/omap3-restart.c b/arch/arm/mach-omap2/omap3-restart.c
index 103a49f..4bdd22e 100644
--- a/arch/arm/mach-omap2/omap3-restart.c
+++ b/arch/arm/mach-omap2/omap3-restart.c
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/reboot.h>
 
+#include "common.h"
 #include "control.h"
 #include "prm.h"
 
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 16350ee..949696b 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -51,6 +51,127 @@
 
 #define IRQ_LOCALTIMER		29
 
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
+
+/* Used to implement memory barrier on DRAM path */
+#define OMAP4_DRAM_BARRIER_VA			0xfe600000
+
+static void __iomem *dram_sync, *sram_sync;
+static phys_addr_t dram_sync_paddr;
+static u32 dram_sync_size;
+
+/*
+ * The OMAP4 bus structure contains asynchrnous bridges which can buffer
+ * data writes from the MPU. These asynchronous bridges can be found on
+ * paths between the MPU to EMIF, and the MPU to L3 interconnects.
+ *
+ * We need to be careful about re-ordering which can happen as a result
+ * of different accesses being performed via different paths, and
+ * therefore different asynchronous bridges.
+ */
+
+/*
+ * OMAP4 interconnect barrier which is called for each mb() and wmb().
+ * This is to ensure that normal paths to DRAM (normal memory, cacheable
+ * accesses) are properly synchronised with writes to DMA coherent memory
+ * (normal memory, uncacheable) and device writes.
+ *
+ * The mb() and wmb() barriers only operate only on the MPU->MA->EMIF
+ * path, as we need to ensure that data is visible to other system
+ * masters prior to writes to those system masters being seen.
+ *
+ * Note: the SRAM path is not synchronised via mb() and wmb().
+ */
+static void omap4_mb(void)
+{
+	if (dram_sync)
+		writel_relaxed(0, dram_sync);
+}
+
+/*
+ * OMAP4 Errata i688 - asynchronous bridge corruption when entering WFI.
+ *
+ * If a data is stalled inside asynchronous bridge because of back
+ * pressure, it may be accepted multiple times, creating pointer
+ * misalignment that will corrupt next transfers on that data path until
+ * next reset of the system. No recovery procedure once the issue is hit,
+ * the path remains consistently broken.
+ *
+ * Async bridges can be found on paths between MPU to EMIF and MPU to L3
+ * interconnects.
+ *
+ * This situation can happen only when the idle is initiated by a Master
+ * Request Disconnection (which is trigged by software when executing WFI
+ * on the CPU).
+ *
+ * The work-around for this errata needs all the initiators connected
+ * through an async bridge to ensure that data path is properly drained
+ * before issuing WFI. This condition will be met if one Strongly ordered
+ * access is performed to the target right before executing the WFI.
+ *
+ * In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
+ * IO barrier ensure that there is no synchronisation loss on initiators
+ * operating on both interconnect port simultaneously.
+ *
+ * This is a stronger version of the OMAP4 memory barrier below, and
+ * operates on both the MPU->MA->EMIF path but also the MPU->OCP path
+ * as well, and is necessary prior to executing a WFI.
+ */
+void omap_interconnect_sync(void)
+{
+	if (dram_sync && sram_sync) {
+		writel_relaxed(readl_relaxed(dram_sync), dram_sync);
+		writel_relaxed(readl_relaxed(sram_sync), sram_sync);
+		isb();
+	}
+}
+
+static int __init omap4_sram_init(void)
+{
+	struct device_node *np;
+	struct gen_pool *sram_pool;
+
+	np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
+	if (!np)
+		pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
+			__func__);
+	sram_pool = of_gen_pool_get(np, "sram", 0);
+	if (!sram_pool)
+		pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
+			__func__);
+	else
+		sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
+
+	return 0;
+}
+omap_arch_initcall(omap4_sram_init);
+
+/* Steal one page physical memory for barrier implementation */
+void __init omap_barrier_reserve_memblock(void)
+{
+	dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M);
+	dram_sync_paddr = arm_memblock_steal(dram_sync_size, SZ_1M);
+}
+
+void __init omap_barriers_init(void)
+{
+	struct map_desc dram_io_desc[1];
+
+	dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
+	dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr);
+	dram_io_desc[0].length = dram_sync_size;
+	dram_io_desc[0].type = MT_MEMORY_RW_SO;
+	iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
+	dram_sync = (void __iomem *) dram_io_desc[0].virtual;
+
+	pr_info("OMAP4: Map %pa to %p for dram barrier\n",
+		&dram_sync_paddr, dram_sync);
+
+	soc_mb = omap4_mb;
+}
+
+#endif
+
 void gic_dist_disable(void)
 {
 	if (gic_dist_base_addr)
diff --git a/arch/arm/mach-omap2/omap4-restart.c b/arch/arm/mach-omap2/omap4-restart.c
index a99e7f7..e17136a 100644
--- a/arch/arm/mach-omap2/omap4-restart.c
+++ b/arch/arm/mach-omap2/omap4-restart.c
@@ -9,6 +9,7 @@
 
 #include <linux/types.h>
 #include <linux/reboot.h>
+#include "common.h"
 #include "prm.h"
 
 /**
diff --git a/arch/arm/mach-omap2/omap54xx.h b/arch/arm/mach-omap2/omap54xx.h
index 2d35c57..0ca8e93 100644
--- a/arch/arm/mach-omap2/omap54xx.h
+++ b/arch/arm/mach-omap2/omap54xx.h
@@ -30,6 +30,14 @@
 #define OMAP54XX_CTRL_BASE		0x4a002800
 #define OMAP54XX_SAR_RAM_BASE		0x4ae26000
 
+/* DRA7 specific base addresses */
+#define L3_MAIN_SN_DRA7XX_BASE		0x44000000
+#define L4_PER1_DRA7XX_BASE		0x48000000
+#define L4_CFG_MPU_DRA7XX_BASE		0x48210000
+#define L4_PER2_DRA7XX_BASE		0x48400000
+#define L4_PER3_DRA7XX_BASE		0x48800000
+#define L4_CFG_DRA7XX_BASE		0x4A000000
+#define L4_WKUP_DRA7XX_BASE		0x4ae00000
 #define DRA7XX_CM_CORE_AON_BASE		0x4a005000
 #define DRA7XX_CTRL_BASE		0x4a003400
 #define DRA7XX_TAP_BASE			0x4ae0c000
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 486cc4d..cc8a987 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -130,6 +130,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/io.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/err.h>
@@ -299,7 +300,20 @@
 
 	/* Module might have lost context, always update cache and register */
 	oh->_sysc_cache = v;
+
+	/*
+	 * Some IP blocks (such as RTC) require unlocking of IP before
+	 * accessing its registers. If a function pointer is present
+	 * to unlock, then call it before accessing sysconfig and
+	 * call lock after writing sysconfig.
+	 */
+	if (oh->class->unlock)
+		oh->class->unlock(oh);
+
 	omap_hwmod_write(v, oh, oh->class->sysc->sysc_offs);
+
+	if (oh->class->lock)
+		oh->class->lock(oh);
 }
 
 /**
@@ -3886,7 +3900,8 @@
 		soc_ops.init_clkdm = _init_clkdm;
 		soc_ops.update_context_lost = _omap4_update_context_lost;
 		soc_ops.get_context_lost = _omap4_get_context_lost;
-	} else if (cpu_is_ti816x() || soc_is_am33xx() || soc_is_am43xx()) {
+	} else if (cpu_is_ti814x() || cpu_is_ti816x() || soc_is_am33xx() ||
+		   soc_is_am43xx()) {
 		soc_ops.enable_module = _omap4_enable_module;
 		soc_ops.disable_module = _omap4_disable_module;
 		soc_ops.wait_target_ready = _omap4_wait_target_ready;
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index b5d27ec..ca6df1a 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -576,6 +576,8 @@
  * @pre_shutdown: ptr to fn to be executed immediately prior to device shutdown
  * @reset: ptr to fn to be executed in place of the standard hwmod reset fn
  * @enable_preprogram:  ptr to fn to be executed during device enable
+ * @lock: ptr to fn to be executed to lock IP registers
+ * @unlock: ptr to fn to be executed to unlock IP registers
  *
  * Represent the class of a OMAP hardware "modules" (e.g. timer,
  * smartreflex, gpio, uart...)
@@ -600,6 +602,8 @@
 	int					(*pre_shutdown)(struct omap_hwmod *oh);
 	int					(*reset)(struct omap_hwmod *oh);
 	int					(*enable_preprogram)(struct omap_hwmod *oh);
+	void					(*lock)(struct omap_hwmod *oh);
+	void					(*unlock)(struct omap_hwmod *oh);
 };
 
 /**
@@ -755,7 +759,8 @@
 extern int omap44xx_hwmod_init(void);
 extern int omap54xx_hwmod_init(void);
 extern int am33xx_hwmod_init(void);
-extern int ti81xx_hwmod_init(void);
+extern int dm814x_hwmod_init(void);
+extern int dm816x_hwmod_init(void);
 extern int dra7xx_hwmod_init(void);
 int am43xx_hwmod_init(void);
 
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index 6dcfd03..36bcd2e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -20,7 +20,7 @@
 #include "prm-regbits-24xx.h"
 #include "wd_timer.h"
 
-struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = {
+static struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = {
 	{ .name = "dispc", .dma_req = 5 },
 	{ .dma_req = -1, },
 };
diff --git a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
index 215d5ef..e97a894 100644
--- a/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_43xx_data.c
@@ -480,7 +480,7 @@
 
 /* dispc */
 
-struct omap_dss_dispc_dev_attr am43xx_dss_dispc_dev_attr = {
+static struct omap_dss_dispc_dev_attr am43xx_dss_dispc_dev_attr = {
 	.manager_count		= 1,
 	.has_framedonetv_irq	= 0
 };
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
index c924137..b1288f5 100644
--- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
@@ -32,21 +32,59 @@
  */
 
 /*
- * The alwon .clkctrl_offs field is offset from the CM_ALWON, that's
- * TRM 18.7.17 CM_ALWON device register values minus 0x1400.
+ * Common alwon .clkctrl_offs from dm814x TRM "Table 2-278. CM_ALWON REGISTERS"
+ * also dm816x TRM 18.7.17 CM_ALWON device register values minus 0x1400.
  */
+#define DM81XX_CM_ALWON_MCASP0_CLKCTRL		0x140
+#define DM81XX_CM_ALWON_MCASP1_CLKCTRL		0x144
+#define DM81XX_CM_ALWON_MCASP2_CLKCTRL		0x148
+#define DM81XX_CM_ALWON_MCBSP_CLKCTRL		0x14c
+#define DM81XX_CM_ALWON_UART_0_CLKCTRL		0x150
+#define DM81XX_CM_ALWON_UART_1_CLKCTRL		0x154
+#define DM81XX_CM_ALWON_UART_2_CLKCTRL		0x158
+#define DM81XX_CM_ALWON_GPIO_0_CLKCTRL		0x15c
+#define DM81XX_CM_ALWON_GPIO_1_CLKCTRL		0x160
+#define DM81XX_CM_ALWON_I2C_0_CLKCTRL		0x164
+#define DM81XX_CM_ALWON_I2C_1_CLKCTRL		0x168
+#define DM81XX_CM_ALWON_WDTIMER_CLKCTRL		0x18c
+#define DM81XX_CM_ALWON_SPI_CLKCTRL		0x190
+#define DM81XX_CM_ALWON_MAILBOX_CLKCTRL		0x194
+#define DM81XX_CM_ALWON_SPINBOX_CLKCTRL		0x198
+#define DM81XX_CM_ALWON_MMUDATA_CLKCTRL		0x19c
+#define DM81XX_CM_ALWON_MMUCFG_CLKCTRL		0x1a8
+#define DM81XX_CM_ALWON_CONTROL_CLKCTRL		0x1c4
+#define DM81XX_CM_ALWON_GPMC_CLKCTRL		0x1d0
+#define DM81XX_CM_ALWON_ETHERNET_0_CLKCTRL	0x1d4
+#define DM81XX_CM_ALWON_L3_CLKCTRL		0x1e4
+#define DM81XX_CM_ALWON_L4HS_CLKCTRL		0x1e8
+#define DM81XX_CM_ALWON_L4LS_CLKCTRL		0x1ec
+#define DM81XX_CM_ALWON_RTC_CLKCTRL		0x1f0
+#define DM81XX_CM_ALWON_TPCC_CLKCTRL		0x1f4
+#define DM81XX_CM_ALWON_TPTC0_CLKCTRL		0x1f8
+#define DM81XX_CM_ALWON_TPTC1_CLKCTRL		0x1fc
+#define DM81XX_CM_ALWON_TPTC2_CLKCTRL		0x200
+#define DM81XX_CM_ALWON_TPTC3_CLKCTRL		0x204
+
+/* Registers specific to dm814x */
+#define DM814X_CM_ALWON_MCASP_3_4_5_CLKCTRL	0x16c
+#define DM814X_CM_ALWON_ATL_CLKCTRL		0x170
+#define DM814X_CM_ALWON_MLB_CLKCTRL		0x174
+#define DM814X_CM_ALWON_PATA_CLKCTRL		0x178
+#define DM814X_CM_ALWON_UART_3_CLKCTRL		0x180
+#define DM814X_CM_ALWON_UART_4_CLKCTRL		0x184
+#define DM814X_CM_ALWON_UART_5_CLKCTRL		0x188
+#define DM814X_CM_ALWON_OCM_0_CLKCTRL		0x1b4
+#define DM814X_CM_ALWON_VCP_CLKCTRL		0x1b8
+#define DM814X_CM_ALWON_MPU_CLKCTRL		0x1dc
+#define DM814X_CM_ALWON_DEBUGSS_CLKCTRL		0x1e0
+#define DM814X_CM_ALWON_DCAN_0_1_CLKCTRL	0x218
+#define DM814X_CM_ALWON_MMCHS_0_CLKCTRL		0x21c
+#define DM814X_CM_ALWON_MMCHS_1_CLKCTRL		0x220
+#define DM814X_CM_ALWON_MMCHS_2_CLKCTRL		0x224
+#define DM814X_CM_ALWON_CUST_EFUSE_CLKCTRL	0x228
+
+/* Registers specific to dm816x */
 #define DM816X_DM_ALWON_BASE		0x1400
-#define DM816X_CM_ALWON_MCASP0_CLKCTRL	(0x1540 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_MCASP1_CLKCTRL	(0x1544 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_MCASP2_CLKCTRL	(0x1548 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_MCBSP_CLKCTRL	(0x154c - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_UART_0_CLKCTRL	(0x1550 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_UART_1_CLKCTRL	(0x1554 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_UART_2_CLKCTRL	(0x1558 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_GPIO_0_CLKCTRL	(0x155c - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_GPIO_1_CLKCTRL	(0x1560 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_I2C_0_CLKCTRL	(0x1564 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_I2C_1_CLKCTRL	(0x1568 - DM816X_DM_ALWON_BASE)
 #define DM816X_CM_ALWON_TIMER_1_CLKCTRL	(0x1570 - DM816X_DM_ALWON_BASE)
 #define DM816X_CM_ALWON_TIMER_2_CLKCTRL	(0x1574 - DM816X_DM_ALWON_BASE)
 #define DM816X_CM_ALWON_TIMER_3_CLKCTRL	(0x1578 - DM816X_DM_ALWON_BASE)
@@ -54,29 +92,11 @@
 #define DM816X_CM_ALWON_TIMER_5_CLKCTRL	(0x1580 - DM816X_DM_ALWON_BASE)
 #define DM816X_CM_ALWON_TIMER_6_CLKCTRL	(0x1584 - DM816X_DM_ALWON_BASE)
 #define DM816X_CM_ALWON_TIMER_7_CLKCTRL	(0x1588 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_WDTIMER_CLKCTRL	(0x158c - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_SPI_CLKCTRL	(0x1590 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_MAILBOX_CLKCTRL	(0x1594 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_SPINBOX_CLKCTRL	(0x1598 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_MMUDATA_CLKCTRL	(0x159c - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_MMUCFG_CLKCTRL	(0x15a8 - DM816X_DM_ALWON_BASE)
 #define DM816X_CM_ALWON_SDIO_CLKCTRL	(0x15b0 - DM816X_DM_ALWON_BASE)
 #define DM816X_CM_ALWON_OCMC_0_CLKCTRL	(0x15b4 - DM816X_DM_ALWON_BASE)
 #define DM816X_CM_ALWON_OCMC_1_CLKCTRL	(0x15b8 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_CONTRL_CLKCTRL	(0x15c4 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_GPMC_CLKCTRL	(0x15d0 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_ETHERNET_0_CLKCTRL (0x15d4 - DM816X_DM_ALWON_BASE)
 #define DM816X_CM_ALWON_ETHERNET_1_CLKCTRL (0x15d8 - DM816X_DM_ALWON_BASE)
 #define DM816X_CM_ALWON_MPU_CLKCTRL	(0x15dc - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_L3_CLKCTRL	(0x15e4 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_L4HS_CLKCTRL	(0x15e8 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_L4LS_CLKCTRL	(0x15ec - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_RTC_CLKCTRL	(0x15f0 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_TPCC_CLKCTRL	(0x15f4 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_TPTC0_CLKCTRL	(0x15f8 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_TPTC1_CLKCTRL	(0x15fc - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_TPTC2_CLKCTRL	(0x1600 - DM816X_DM_ALWON_BASE)
-#define DM816X_CM_ALWON_TPTC3_CLKCTRL	(0x1604 - DM816X_DM_ALWON_BASE)
 #define DM816X_CM_ALWON_SR_0_CLKCTRL	(0x1608 - DM816X_DM_ALWON_BASE)
 #define DM816X_CM_ALWON_SR_1_CLKCTRL	(0x160c - DM816X_DM_ALWON_BASE)
 
@@ -88,28 +108,28 @@
 #define DM816X_CM_DEFAULT_USB_CLKCTRL	(0x558 - DM816X_CM_DEFAULT_OFFSET)
 
 /* L3 Interconnect entries clocked at 125, 250 and 500MHz */
-static struct omap_hwmod dm816x_alwon_l3_slow_hwmod = {
+static struct omap_hwmod dm81xx_alwon_l3_slow_hwmod = {
 	.name		= "alwon_l3_slow",
 	.clkdm_name	= "alwon_l3s_clkdm",
 	.class		= &l3_hwmod_class,
 	.flags		= HWMOD_NO_IDLEST,
 };
 
-static struct omap_hwmod dm816x_default_l3_slow_hwmod = {
+static struct omap_hwmod dm81xx_default_l3_slow_hwmod = {
 	.name		= "default_l3_slow",
 	.clkdm_name	= "default_l3_slow_clkdm",
 	.class		= &l3_hwmod_class,
 	.flags		= HWMOD_NO_IDLEST,
 };
 
-static struct omap_hwmod dm816x_alwon_l3_med_hwmod = {
+static struct omap_hwmod dm81xx_alwon_l3_med_hwmod = {
 	.name		= "l3_med",
 	.clkdm_name	= "alwon_l3_med_clkdm",
 	.class		= &l3_hwmod_class,
 	.flags		= HWMOD_NO_IDLEST,
 };
 
-static struct omap_hwmod dm816x_alwon_l3_fast_hwmod = {
+static struct omap_hwmod dm81xx_alwon_l3_fast_hwmod = {
 	.name		= "l3_fast",
 	.clkdm_name	= "alwon_l3_fast_clkdm",
 	.class		= &l3_hwmod_class,
@@ -120,7 +140,7 @@
  * L4 standard peripherals, see TRM table 1-12 for devices using this.
  * See TRM table 1-73 for devices using the 125MHz SYSCLK6 clock.
  */
-static struct omap_hwmod dm816x_l4_ls_hwmod = {
+static struct omap_hwmod dm81xx_l4_ls_hwmod = {
 	.name		= "l4_ls",
 	.clkdm_name	= "alwon_l3s_clkdm",
 	.class		= &l4_hwmod_class,
@@ -131,27 +151,54 @@
  * table 1-13. On dm816x, only EMAC, MDIO and SATA use this. See also TRM
  * table 1-73 for devices using 250MHz SYSCLK5 clock.
  */
-static struct omap_hwmod dm816x_l4_hs_hwmod = {
+static struct omap_hwmod dm81xx_l4_hs_hwmod = {
 	.name		= "l4_hs",
 	.clkdm_name	= "alwon_l3_med_clkdm",
 	.class		= &l4_hwmod_class,
 };
 
 /* L3 slow -> L4 ls peripheral interface running at 125MHz */
-static struct omap_hwmod_ocp_if dm816x_alwon_l3_slow__l4_ls = {
-	.master	= &dm816x_alwon_l3_slow_hwmod,
-	.slave	= &dm816x_l4_ls_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__l4_ls = {
+	.master	= &dm81xx_alwon_l3_slow_hwmod,
+	.slave	= &dm81xx_l4_ls_hwmod,
 	.user	= OCP_USER_MPU,
 };
 
 /* L3 med -> L4 fast peripheral interface running at 250MHz */
-static struct omap_hwmod_ocp_if dm816x_alwon_l3_slow__l4_hs = {
-	.master	= &dm816x_alwon_l3_med_hwmod,
-	.slave	= &dm816x_l4_hs_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__l4_hs = {
+	.master	= &dm81xx_alwon_l3_med_hwmod,
+	.slave	= &dm81xx_l4_hs_hwmod,
 	.user	= OCP_USER_MPU,
 };
 
 /* MPU */
+static struct omap_hwmod dm814x_mpu_hwmod = {
+	.name		= "mpu",
+	.clkdm_name	= "alwon_l3s_clkdm",
+	.class		= &mpu_hwmod_class,
+	.flags		= HWMOD_INIT_NO_IDLE,
+	.main_clk	= "mpu_ck",
+	.prcm		= {
+		.omap4 = {
+			.clkctrl_offs = DM814X_CM_ALWON_MPU_CLKCTRL,
+			.modulemode = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+static struct omap_hwmod_ocp_if dm814x_mpu__alwon_l3_slow = {
+	.master		= &dm814x_mpu_hwmod,
+	.slave		= &dm81xx_alwon_l3_slow_hwmod,
+	.user		= OCP_USER_MPU,
+};
+
+/* L3 med peripheral interface running at 200MHz */
+static struct omap_hwmod_ocp_if dm814x_mpu__alwon_l3_med = {
+	.master	= &dm814x_mpu_hwmod,
+	.slave	= &dm81xx_alwon_l3_med_hwmod,
+	.user	= OCP_USER_MPU,
+};
+
 static struct omap_hwmod dm816x_mpu_hwmod = {
 	.name		= "mpu",
 	.clkdm_name	= "alwon_mpu_clkdm",
@@ -168,14 +215,14 @@
 
 static struct omap_hwmod_ocp_if dm816x_mpu__alwon_l3_slow = {
 	.master		= &dm816x_mpu_hwmod,
-	.slave		= &dm816x_alwon_l3_slow_hwmod,
+	.slave		= &dm81xx_alwon_l3_slow_hwmod,
 	.user		= OCP_USER_MPU,
 };
 
 /* L3 med peripheral interface running at 250MHz */
 static struct omap_hwmod_ocp_if dm816x_mpu__alwon_l3_med = {
 	.master	= &dm816x_mpu_hwmod,
-	.slave	= &dm816x_alwon_l3_med_hwmod,
+	.slave	= &dm81xx_alwon_l3_med_hwmod,
 	.user	= OCP_USER_MPU,
 };
 
@@ -197,13 +244,13 @@
 	.sysc = &uart_sysc,
 };
 
-static struct omap_hwmod dm816x_uart1_hwmod = {
+static struct omap_hwmod dm81xx_uart1_hwmod = {
 	.name		= "uart1",
 	.clkdm_name	= "alwon_l3s_clkdm",
 	.main_clk	= "sysclk10_ck",
 	.prcm		= {
 		.omap4 = {
-			.clkctrl_offs = DM816X_CM_ALWON_UART_0_CLKCTRL,
+			.clkctrl_offs = DM81XX_CM_ALWON_UART_0_CLKCTRL,
 			.modulemode = MODULEMODE_SWCTRL,
 		},
 	},
@@ -211,20 +258,20 @@
 	.flags		= DEBUG_TI81XXUART1_FLAGS,
 };
 
-static struct omap_hwmod_ocp_if dm816x_l4_ls__uart1 = {
-	.master		= &dm816x_l4_ls_hwmod,
-	.slave		= &dm816x_uart1_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__uart1 = {
+	.master		= &dm81xx_l4_ls_hwmod,
+	.slave		= &dm81xx_uart1_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod dm816x_uart2_hwmod = {
+static struct omap_hwmod dm81xx_uart2_hwmod = {
 	.name		= "uart2",
 	.clkdm_name	= "alwon_l3s_clkdm",
 	.main_clk	= "sysclk10_ck",
 	.prcm		= {
 		.omap4 = {
-			.clkctrl_offs = DM816X_CM_ALWON_UART_1_CLKCTRL,
+			.clkctrl_offs = DM81XX_CM_ALWON_UART_1_CLKCTRL,
 			.modulemode = MODULEMODE_SWCTRL,
 		},
 	},
@@ -232,20 +279,20 @@
 	.flags		= DEBUG_TI81XXUART2_FLAGS,
 };
 
-static struct omap_hwmod_ocp_if dm816x_l4_ls__uart2 = {
-	.master		= &dm816x_l4_ls_hwmod,
-	.slave		= &dm816x_uart2_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__uart2 = {
+	.master		= &dm81xx_l4_ls_hwmod,
+	.slave		= &dm81xx_uart2_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod dm816x_uart3_hwmod = {
+static struct omap_hwmod dm81xx_uart3_hwmod = {
 	.name		= "uart3",
 	.clkdm_name	= "alwon_l3s_clkdm",
 	.main_clk	= "sysclk10_ck",
 	.prcm		= {
 		.omap4 = {
-			.clkctrl_offs = DM816X_CM_ALWON_UART_2_CLKCTRL,
+			.clkctrl_offs = DM81XX_CM_ALWON_UART_2_CLKCTRL,
 			.modulemode = MODULEMODE_SWCTRL,
 		},
 	},
@@ -253,9 +300,9 @@
 	.flags		= DEBUG_TI81XXUART3_FLAGS,
 };
 
-static struct omap_hwmod_ocp_if dm816x_l4_ls__uart3 = {
-	.master		= &dm816x_l4_ls_hwmod,
-	.slave		= &dm816x_uart3_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__uart3 = {
+	.master		= &dm81xx_l4_ls_hwmod,
+	.slave		= &dm81xx_uart3_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
 };
@@ -276,23 +323,23 @@
 	.reset		= &omap2_wd_timer_reset,
 };
 
-static struct omap_hwmod dm816x_wd_timer_hwmod = {
+static struct omap_hwmod dm81xx_wd_timer_hwmod = {
 	.name		= "wd_timer",
 	.clkdm_name	= "alwon_l3s_clkdm",
 	.main_clk	= "sysclk18_ck",
 	.flags		= HWMOD_NO_IDLEST,
 	.prcm		= {
 		.omap4 = {
-			.clkctrl_offs = DM816X_CM_ALWON_WDTIMER_CLKCTRL,
+			.clkctrl_offs = DM81XX_CM_ALWON_WDTIMER_CLKCTRL,
 			.modulemode = MODULEMODE_SWCTRL,
 		},
 	},
 	.class		= &wd_timer_class,
 };
 
-static struct omap_hwmod_ocp_if dm816x_l4_ls__wd_timer1 = {
-	.master		= &dm816x_l4_ls_hwmod,
-	.slave		= &dm816x_wd_timer_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__wd_timer1 = {
+	.master		= &dm81xx_l4_ls_hwmod,
+	.slave		= &dm81xx_wd_timer_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
 };
@@ -320,27 +367,27 @@
 	.main_clk	= "sysclk10_ck",
 	.prcm		= {
 		.omap4 = {
-			.clkctrl_offs = DM816X_CM_ALWON_I2C_0_CLKCTRL,
+			.clkctrl_offs = DM81XX_CM_ALWON_I2C_0_CLKCTRL,
 			.modulemode = MODULEMODE_SWCTRL,
 		},
 	},
 	.class		= &i2c_class,
 };
 
-static struct omap_hwmod_ocp_if dm816x_l4_ls__i2c1 = {
-	.master		= &dm816x_l4_ls_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__i2c1 = {
+	.master		= &dm81xx_l4_ls_hwmod,
 	.slave		= &dm81xx_i2c1_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod dm816x_i2c2_hwmod = {
+static struct omap_hwmod dm81xx_i2c2_hwmod = {
 	.name		= "i2c2",
 	.clkdm_name	= "alwon_l3s_clkdm",
 	.main_clk	= "sysclk10_ck",
 	.prcm		= {
 		.omap4 = {
-			.clkctrl_offs = DM816X_CM_ALWON_I2C_1_CLKCTRL,
+			.clkctrl_offs = DM81XX_CM_ALWON_I2C_1_CLKCTRL,
 			.modulemode = MODULEMODE_SWCTRL,
 		},
 	},
@@ -358,9 +405,9 @@
 	.sysc_fields	= &omap_hwmod_sysc_type1,
 };
 
-static struct omap_hwmod_ocp_if dm816x_l4_ls__i2c2 = {
-	.master		= &dm816x_l4_ls_hwmod,
-	.slave		= &dm816x_i2c2_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__i2c2 = {
+	.master		= &dm81xx_l4_ls_hwmod,
+	.slave		= &dm81xx_i2c2_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
 };
@@ -378,7 +425,7 @@
 };
 
 static struct omap_hwmod_ocp_if dm81xx_l4_ls__elm = {
-	.master		= &dm816x_l4_ls_hwmod,
+	.master		= &dm81xx_l4_ls_hwmod,
 	.slave		= &dm81xx_elm_hwmod,
 	.user		= OCP_USER_MPU,
 };
@@ -417,7 +464,7 @@
 	.main_clk	= "sysclk6_ck",
 	.prcm = {
 		.omap4 = {
-			.clkctrl_offs = DM816X_CM_ALWON_GPIO_0_CLKCTRL,
+			.clkctrl_offs = DM81XX_CM_ALWON_GPIO_0_CLKCTRL,
 			.modulemode = MODULEMODE_SWCTRL,
 		},
 	},
@@ -427,7 +474,7 @@
 };
 
 static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio1 = {
-	.master		= &dm816x_l4_ls_hwmod,
+	.master		= &dm81xx_l4_ls_hwmod,
 	.slave		= &dm81xx_gpio1_hwmod,
 	.user		= OCP_USER_MPU,
 };
@@ -443,7 +490,7 @@
 	.main_clk	= "sysclk6_ck",
 	.prcm = {
 		.omap4 = {
-			.clkctrl_offs = DM816X_CM_ALWON_GPIO_1_CLKCTRL,
+			.clkctrl_offs = DM81XX_CM_ALWON_GPIO_1_CLKCTRL,
 			.modulemode = MODULEMODE_SWCTRL,
 		},
 	},
@@ -453,7 +500,7 @@
 };
 
 static struct omap_hwmod_ocp_if dm81xx_l4_ls__gpio2 = {
-	.master		= &dm816x_l4_ls_hwmod,
+	.master		= &dm81xx_l4_ls_hwmod,
 	.slave		= &dm81xx_gpio2_hwmod,
 	.user		= OCP_USER_MPU,
 };
@@ -482,14 +529,14 @@
 	.flags		= DEBUG_OMAP_GPMC_HWMOD_FLAGS,
 	.prcm = {
 		.omap4 = {
-			.clkctrl_offs = DM816X_CM_ALWON_GPMC_CLKCTRL,
+			.clkctrl_offs = DM81XX_CM_ALWON_GPMC_CLKCTRL,
 			.modulemode = MODULEMODE_SWCTRL,
 		},
 	},
 };
 
-struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__gpmc = {
-	.master		= &dm816x_alwon_l3_slow_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_alwon_l3_slow__gpmc = {
+	.master		= &dm81xx_alwon_l3_slow_hwmod,
 	.slave		= &dm81xx_gpmc_hwmod,
 	.user		= OCP_USER_MPU,
 };
@@ -522,7 +569,7 @@
 };
 
 static struct omap_hwmod_ocp_if dm81xx_default_l3_slow__usbss = {
-	.master		= &dm816x_default_l3_slow_hwmod,
+	.master		= &dm81xx_default_l3_slow_hwmod,
 	.slave		= &dm81xx_usbss_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
@@ -547,6 +594,22 @@
 	.timer_capability	= OMAP_TIMER_ALWON,
 };
 
+static struct omap_hwmod dm814x_timer1_hwmod = {
+	.name		= "timer1",
+	.clkdm_name	= "alwon_l3s_clkdm",
+	.main_clk	= "timer_sys_ck",
+	.dev_attr	= &capability_alwon_dev_attr,
+	.class		= &dm816x_timer_hwmod_class,
+	.flags		= HWMOD_NO_IDLEST,
+};
+
+static struct omap_hwmod_ocp_if dm814x_l4_ls__timer1 = {
+	.master		= &dm81xx_l4_ls_hwmod,
+	.slave		= &dm814x_timer1_hwmod,
+	.clk		= "timer_sys_ck",
+	.user		= OCP_USER_MPU,
+};
+
 static struct omap_hwmod dm816x_timer1_hwmod = {
 	.name		= "timer1",
 	.clkdm_name	= "alwon_l3s_clkdm",
@@ -562,12 +625,28 @@
 };
 
 static struct omap_hwmod_ocp_if dm816x_l4_ls__timer1 = {
-	.master		= &dm816x_l4_ls_hwmod,
+	.master		= &dm81xx_l4_ls_hwmod,
 	.slave		= &dm816x_timer1_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
 };
 
+static struct omap_hwmod dm814x_timer2_hwmod = {
+	.name		= "timer2",
+	.clkdm_name	= "alwon_l3s_clkdm",
+	.main_clk	= "timer_sys_ck",
+	.dev_attr	= &capability_alwon_dev_attr,
+	.class		= &dm816x_timer_hwmod_class,
+	.flags		= HWMOD_NO_IDLEST,
+};
+
+static struct omap_hwmod_ocp_if dm814x_l4_ls__timer2 = {
+	.master		= &dm81xx_l4_ls_hwmod,
+	.slave		= &dm814x_timer2_hwmod,
+	.clk		= "timer_sys_ck",
+	.user		= OCP_USER_MPU,
+};
+
 static struct omap_hwmod dm816x_timer2_hwmod = {
 	.name		= "timer2",
 	.clkdm_name	= "alwon_l3s_clkdm",
@@ -583,7 +662,7 @@
 };
 
 static struct omap_hwmod_ocp_if dm816x_l4_ls__timer2 = {
-	.master		= &dm816x_l4_ls_hwmod,
+	.master		= &dm81xx_l4_ls_hwmod,
 	.slave		= &dm816x_timer2_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
@@ -604,7 +683,7 @@
 };
 
 static struct omap_hwmod_ocp_if dm816x_l4_ls__timer3 = {
-	.master		= &dm816x_l4_ls_hwmod,
+	.master		= &dm81xx_l4_ls_hwmod,
 	.slave		= &dm816x_timer3_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
@@ -625,7 +704,7 @@
 };
 
 static struct omap_hwmod_ocp_if dm816x_l4_ls__timer4 = {
-	.master		= &dm816x_l4_ls_hwmod,
+	.master		= &dm81xx_l4_ls_hwmod,
 	.slave		= &dm816x_timer4_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
@@ -646,7 +725,7 @@
 };
 
 static struct omap_hwmod_ocp_if dm816x_l4_ls__timer5 = {
-	.master		= &dm816x_l4_ls_hwmod,
+	.master		= &dm81xx_l4_ls_hwmod,
 	.slave		= &dm816x_timer5_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
@@ -667,7 +746,7 @@
 };
 
 static struct omap_hwmod_ocp_if dm816x_l4_ls__timer6 = {
-	.master		= &dm816x_l4_ls_hwmod,
+	.master		= &dm81xx_l4_ls_hwmod,
 	.slave		= &dm816x_timer6_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
@@ -688,12 +767,68 @@
 };
 
 static struct omap_hwmod_ocp_if dm816x_l4_ls__timer7 = {
-	.master		= &dm816x_l4_ls_hwmod,
+	.master		= &dm81xx_l4_ls_hwmod,
 	.slave		= &dm816x_timer7_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
 };
 
+/* CPSW on dm814x */
+static struct omap_hwmod_class_sysconfig dm814x_cpgmac_sysc = {
+	.rev_offs	= 0x0,
+	.sysc_offs	= 0x8,
+	.syss_offs	= 0x4,
+	.sysc_flags	= SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
+			  SYSS_HAS_RESET_STATUS,
+	.idlemodes	= SIDLE_FORCE | SIDLE_NO | MSTANDBY_FORCE |
+			  MSTANDBY_NO,
+	.sysc_fields	= &omap_hwmod_sysc_type3,
+};
+
+static struct omap_hwmod_class dm814x_cpgmac0_hwmod_class = {
+	.name		= "cpgmac0",
+	.sysc		= &dm814x_cpgmac_sysc,
+};
+
+static struct omap_hwmod dm814x_cpgmac0_hwmod = {
+	.name		= "cpgmac0",
+	.class		= &dm814x_cpgmac0_hwmod_class,
+	.clkdm_name	= "alwon_ethernet_clkdm",
+	.flags		= HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
+	.main_clk	= "cpsw_125mhz_gclk",
+	.prcm		= {
+		.omap4	= {
+			.clkctrl_offs = DM81XX_CM_ALWON_ETHERNET_0_CLKCTRL,
+			.modulemode = MODULEMODE_SWCTRL,
+		},
+	},
+};
+
+static struct omap_hwmod_class dm814x_mdio_hwmod_class = {
+	.name		= "davinci_mdio",
+};
+
+static struct omap_hwmod dm814x_mdio_hwmod = {
+	.name		= "davinci_mdio",
+	.class		= &dm814x_mdio_hwmod_class,
+	.clkdm_name	= "alwon_ethernet_clkdm",
+	.main_clk	= "cpsw_125mhz_gclk",
+};
+
+static struct omap_hwmod_ocp_if dm814x_l4_hs__cpgmac0 = {
+	.master		= &dm81xx_l4_hs_hwmod,
+	.slave		= &dm814x_cpgmac0_hwmod,
+	.clk		= "cpsw_125mhz_gclk",
+	.user		= OCP_USER_MPU,
+};
+
+static struct omap_hwmod_ocp_if dm814x_cpgmac0__mdio = {
+	.master		= &dm814x_cpgmac0_hwmod,
+	.slave		= &dm814x_mdio_hwmod,
+	.user		= OCP_USER_MPU,
+	.flags		= HWMOD_NO_IDLEST,
+};
+
 /* EMAC Ethernet */
 static struct omap_hwmod_class_sysconfig dm816x_emac_sysc = {
 	.rev_offs	= 0x0,
@@ -717,21 +852,21 @@
 	.class		= &dm816x_emac_hwmod_class,
 };
 
-static struct omap_hwmod_ocp_if dm816x_l4_hs__emac0 = {
-	.master		= &dm816x_l4_hs_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = {
+	.master		= &dm81xx_l4_hs_hwmod,
 	.slave		= &dm816x_emac0_hwmod,
 	.clk		= "sysclk5_ck",
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_class dm816x_mdio_hwmod_class = {
+static struct omap_hwmod_class dm81xx_mdio_hwmod_class = {
 	.name		= "davinci_mdio",
 	.sysc		= &dm816x_emac_sysc,
 };
 
-struct omap_hwmod dm816x_emac0_mdio_hwmod = {
+static struct omap_hwmod dm81xx_emac0_mdio_hwmod = {
 	.name		= "davinci_mdio",
-	.class		= &dm816x_mdio_hwmod_class,
+	.class		= &dm81xx_mdio_hwmod_class,
 	.clkdm_name	= "alwon_ethernet_clkdm",
 	.main_clk	= "sysclk24_ck",
 	.flags		= HWMOD_NO_IDLEST,
@@ -741,15 +876,15 @@
 	 */
 	.prcm		= {
 		.omap4 = {
-			.clkctrl_offs = DM816X_CM_ALWON_ETHERNET_0_CLKCTRL,
+			.clkctrl_offs = DM81XX_CM_ALWON_ETHERNET_0_CLKCTRL,
 			.modulemode = MODULEMODE_SWCTRL,
 		},
 	},
 };
 
-struct omap_hwmod_ocp_if dm816x_emac0__mdio = {
-	.master		= &dm816x_l4_hs_hwmod,
-	.slave		= &dm816x_emac0_mdio_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_emac0__mdio = {
+	.master		= &dm81xx_l4_hs_hwmod,
+	.slave		= &dm81xx_emac0_mdio_hwmod,
 	.user		= OCP_USER_MPU,
 };
 
@@ -768,7 +903,7 @@
 };
 
 static struct omap_hwmod_ocp_if dm816x_l4_hs__emac1 = {
-	.master		= &dm816x_l4_hs_hwmod,
+	.master		= &dm81xx_l4_hs_hwmod,
 	.slave		= &dm816x_emac1_hwmod,
 	.clk		= "sysclk5_ck",
 	.user		= OCP_USER_MPU,
@@ -815,7 +950,7 @@
 };
 
 static struct omap_hwmod_ocp_if dm816x_l4_ls__mmc1 = {
-	.master		= &dm816x_l4_ls_hwmod,
+	.master		= &dm81xx_l4_ls_hwmod,
 	.slave		= &dm816x_mmc1_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
@@ -843,13 +978,13 @@
 	.num_chipselect = 4,
 };
 
-static struct omap_hwmod dm816x_mcspi1_hwmod = {
+static struct omap_hwmod dm81xx_mcspi1_hwmod = {
 	.name		= "mcspi1",
 	.clkdm_name	= "alwon_l3s_clkdm",
 	.main_clk	= "sysclk10_ck",
 	.prcm		= {
 		.omap4 = {
-			.clkctrl_offs = DM816X_CM_ALWON_SPI_CLKCTRL,
+			.clkctrl_offs = DM81XX_CM_ALWON_SPI_CLKCTRL,
 			.modulemode = MODULEMODE_SWCTRL,
 		},
 	},
@@ -857,14 +992,14 @@
 	.dev_attr	= &dm816x_mcspi1_dev_attr,
 };
 
-static struct omap_hwmod_ocp_if dm816x_l4_ls__mcspi1 = {
-	.master		= &dm816x_l4_ls_hwmod,
-	.slave		= &dm816x_mcspi1_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__mcspi1 = {
+	.master		= &dm81xx_l4_ls_hwmod,
+	.slave		= &dm81xx_mcspi1_hwmod,
 	.clk		= "sysclk6_ck",
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_class_sysconfig dm816x_mailbox_sysc = {
+static struct omap_hwmod_class_sysconfig dm81xx_mailbox_sysc = {
 	.rev_offs	= 0x000,
 	.sysc_offs	= 0x010,
 	.syss_offs	= 0x014,
@@ -874,55 +1009,55 @@
 	.sysc_fields	= &omap_hwmod_sysc_type1,
 };
 
-static struct omap_hwmod_class dm816x_mailbox_hwmod_class = {
+static struct omap_hwmod_class dm81xx_mailbox_hwmod_class = {
 	.name = "mailbox",
-	.sysc = &dm816x_mailbox_sysc,
+	.sysc = &dm81xx_mailbox_sysc,
 };
 
-static struct omap_hwmod dm816x_mailbox_hwmod = {
+static struct omap_hwmod dm81xx_mailbox_hwmod = {
 	.name		= "mailbox",
 	.clkdm_name	= "alwon_l3s_clkdm",
-	.class		= &dm816x_mailbox_hwmod_class,
+	.class		= &dm81xx_mailbox_hwmod_class,
 	.main_clk	= "sysclk6_ck",
 	.prcm		= {
 		.omap4 = {
-			.clkctrl_offs = DM816X_CM_ALWON_MAILBOX_CLKCTRL,
+			.clkctrl_offs = DM81XX_CM_ALWON_MAILBOX_CLKCTRL,
 			.modulemode = MODULEMODE_SWCTRL,
 		},
 	},
 };
 
-static struct omap_hwmod_ocp_if dm816x_l4_ls__mailbox = {
-	.master		= &dm816x_l4_ls_hwmod,
-	.slave		= &dm816x_mailbox_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_l4_ls__mailbox = {
+	.master		= &dm81xx_l4_ls_hwmod,
+	.slave		= &dm81xx_mailbox_hwmod,
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_class dm816x_tpcc_hwmod_class = {
+static struct omap_hwmod_class dm81xx_tpcc_hwmod_class = {
 	.name		= "tpcc",
 };
 
-struct omap_hwmod dm816x_tpcc_hwmod = {
+static struct omap_hwmod dm81xx_tpcc_hwmod = {
 	.name		= "tpcc",
-	.class		= &dm816x_tpcc_hwmod_class,
+	.class		= &dm81xx_tpcc_hwmod_class,
 	.clkdm_name	= "alwon_l3s_clkdm",
 	.main_clk	= "sysclk4_ck",
 	.prcm		= {
 		.omap4	= {
-			.clkctrl_offs	= DM816X_CM_ALWON_TPCC_CLKCTRL,
+			.clkctrl_offs	= DM81XX_CM_ALWON_TPCC_CLKCTRL,
 			.modulemode	= MODULEMODE_SWCTRL,
 		},
 	},
 };
 
-struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tpcc = {
-	.master		= &dm816x_alwon_l3_fast_hwmod,
-	.slave		= &dm816x_tpcc_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tpcc = {
+	.master		= &dm81xx_alwon_l3_fast_hwmod,
+	.slave		= &dm81xx_tpcc_hwmod,
 	.clk		= "sysclk4_ck",
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_addr_space dm816x_tptc0_addr_space[] = {
+static struct omap_hwmod_addr_space dm81xx_tptc0_addr_space[] = {
 	{
 		.pa_start	= 0x49800000,
 		.pa_end		= 0x49800000 + SZ_8K - 1,
@@ -931,40 +1066,40 @@
 	{ },
 };
 
-static struct omap_hwmod_class dm816x_tptc0_hwmod_class = {
+static struct omap_hwmod_class dm81xx_tptc0_hwmod_class = {
 	.name		= "tptc0",
 };
 
-struct omap_hwmod dm816x_tptc0_hwmod = {
+static struct omap_hwmod dm81xx_tptc0_hwmod = {
 	.name		= "tptc0",
-	.class		= &dm816x_tptc0_hwmod_class,
+	.class		= &dm81xx_tptc0_hwmod_class,
 	.clkdm_name	= "alwon_l3s_clkdm",
 	.main_clk	= "sysclk4_ck",
 	.prcm		= {
 		.omap4	= {
-			.clkctrl_offs	= DM816X_CM_ALWON_TPTC0_CLKCTRL,
+			.clkctrl_offs	= DM81XX_CM_ALWON_TPTC0_CLKCTRL,
 			.modulemode	= MODULEMODE_SWCTRL,
 		},
 	},
 };
 
-struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tptc0 = {
-	.master		= &dm816x_alwon_l3_fast_hwmod,
-	.slave		= &dm816x_tptc0_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tptc0 = {
+	.master		= &dm81xx_alwon_l3_fast_hwmod,
+	.slave		= &dm81xx_tptc0_hwmod,
 	.clk		= "sysclk4_ck",
-	.addr		= dm816x_tptc0_addr_space,
+	.addr		= dm81xx_tptc0_addr_space,
 	.user		= OCP_USER_MPU,
 };
 
-struct omap_hwmod_ocp_if dm816x_tptc0__alwon_l3_fast = {
-	.master		= &dm816x_tptc0_hwmod,
-	.slave		= &dm816x_alwon_l3_fast_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_tptc0__alwon_l3_fast = {
+	.master		= &dm81xx_tptc0_hwmod,
+	.slave		= &dm81xx_alwon_l3_fast_hwmod,
 	.clk		= "sysclk4_ck",
-	.addr		= dm816x_tptc0_addr_space,
+	.addr		= dm81xx_tptc0_addr_space,
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_addr_space dm816x_tptc1_addr_space[] = {
+static struct omap_hwmod_addr_space dm81xx_tptc1_addr_space[] = {
 	{
 		.pa_start	= 0x49900000,
 		.pa_end		= 0x49900000 + SZ_8K - 1,
@@ -973,40 +1108,40 @@
 	{ },
 };
 
-static struct omap_hwmod_class dm816x_tptc1_hwmod_class = {
+static struct omap_hwmod_class dm81xx_tptc1_hwmod_class = {
 	.name		= "tptc1",
 };
 
-struct omap_hwmod dm816x_tptc1_hwmod = {
+static struct omap_hwmod dm81xx_tptc1_hwmod = {
 	.name		= "tptc1",
-	.class		= &dm816x_tptc1_hwmod_class,
+	.class		= &dm81xx_tptc1_hwmod_class,
 	.clkdm_name	= "alwon_l3s_clkdm",
 	.main_clk	= "sysclk4_ck",
 	.prcm		= {
 		.omap4	= {
-			.clkctrl_offs	= DM816X_CM_ALWON_TPTC1_CLKCTRL,
+			.clkctrl_offs	= DM81XX_CM_ALWON_TPTC1_CLKCTRL,
 			.modulemode	= MODULEMODE_SWCTRL,
 		},
 	},
 };
 
-struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tptc1 = {
-	.master		= &dm816x_alwon_l3_fast_hwmod,
-	.slave		= &dm816x_tptc1_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tptc1 = {
+	.master		= &dm81xx_alwon_l3_fast_hwmod,
+	.slave		= &dm81xx_tptc1_hwmod,
 	.clk		= "sysclk4_ck",
-	.addr		= dm816x_tptc1_addr_space,
+	.addr		= dm81xx_tptc1_addr_space,
 	.user		= OCP_USER_MPU,
 };
 
-struct omap_hwmod_ocp_if dm816x_tptc1__alwon_l3_fast = {
-	.master		= &dm816x_tptc1_hwmod,
-	.slave		= &dm816x_alwon_l3_fast_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_tptc1__alwon_l3_fast = {
+	.master		= &dm81xx_tptc1_hwmod,
+	.slave		= &dm81xx_alwon_l3_fast_hwmod,
 	.clk		= "sysclk4_ck",
-	.addr		= dm816x_tptc1_addr_space,
+	.addr		= dm81xx_tptc1_addr_space,
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_addr_space dm816x_tptc2_addr_space[] = {
+static struct omap_hwmod_addr_space dm81xx_tptc2_addr_space[] = {
 	{
 		.pa_start	= 0x49a00000,
 		.pa_end		= 0x49a00000 + SZ_8K - 1,
@@ -1015,40 +1150,40 @@
 	{ },
 };
 
-static struct omap_hwmod_class dm816x_tptc2_hwmod_class = {
+static struct omap_hwmod_class dm81xx_tptc2_hwmod_class = {
 	.name		= "tptc2",
 };
 
-struct omap_hwmod dm816x_tptc2_hwmod = {
+static struct omap_hwmod dm81xx_tptc2_hwmod = {
 	.name		= "tptc2",
-	.class		= &dm816x_tptc2_hwmod_class,
+	.class		= &dm81xx_tptc2_hwmod_class,
 	.clkdm_name	= "alwon_l3s_clkdm",
 	.main_clk	= "sysclk4_ck",
 	.prcm		= {
 		.omap4	= {
-			.clkctrl_offs	= DM816X_CM_ALWON_TPTC2_CLKCTRL,
+			.clkctrl_offs	= DM81XX_CM_ALWON_TPTC2_CLKCTRL,
 			.modulemode	= MODULEMODE_SWCTRL,
 		},
 	},
 };
 
-struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tptc2 = {
-	.master		= &dm816x_alwon_l3_fast_hwmod,
-	.slave		= &dm816x_tptc2_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tptc2 = {
+	.master		= &dm81xx_alwon_l3_fast_hwmod,
+	.slave		= &dm81xx_tptc2_hwmod,
 	.clk		= "sysclk4_ck",
-	.addr		= dm816x_tptc2_addr_space,
+	.addr		= dm81xx_tptc2_addr_space,
 	.user		= OCP_USER_MPU,
 };
 
-struct omap_hwmod_ocp_if dm816x_tptc2__alwon_l3_fast = {
-	.master		= &dm816x_tptc2_hwmod,
-	.slave		= &dm816x_alwon_l3_fast_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_tptc2__alwon_l3_fast = {
+	.master		= &dm81xx_tptc2_hwmod,
+	.slave		= &dm81xx_alwon_l3_fast_hwmod,
 	.clk		= "sysclk4_ck",
-	.addr		= dm816x_tptc2_addr_space,
+	.addr		= dm81xx_tptc2_addr_space,
 	.user		= OCP_USER_MPU,
 };
 
-static struct omap_hwmod_addr_space dm816x_tptc3_addr_space[] = {
+static struct omap_hwmod_addr_space dm81xx_tptc3_addr_space[] = {
 	{
 		.pa_start	= 0x49b00000,
 		.pa_end		= 0x49b00000 + SZ_8K - 1,
@@ -1057,50 +1192,96 @@
 	{ },
 };
 
-static struct omap_hwmod_class dm816x_tptc3_hwmod_class = {
+static struct omap_hwmod_class dm81xx_tptc3_hwmod_class = {
 	.name		= "tptc3",
 };
 
-struct omap_hwmod dm816x_tptc3_hwmod = {
+static struct omap_hwmod dm81xx_tptc3_hwmod = {
 	.name		= "tptc3",
-	.class		= &dm816x_tptc3_hwmod_class,
+	.class		= &dm81xx_tptc3_hwmod_class,
 	.clkdm_name	= "alwon_l3s_clkdm",
 	.main_clk	= "sysclk4_ck",
 	.prcm		= {
 		.omap4	= {
-			.clkctrl_offs	= DM816X_CM_ALWON_TPTC3_CLKCTRL,
+			.clkctrl_offs	= DM81XX_CM_ALWON_TPTC3_CLKCTRL,
 			.modulemode	= MODULEMODE_SWCTRL,
 		},
 	},
 };
 
-struct omap_hwmod_ocp_if dm816x_alwon_l3_fast__tptc3 = {
-	.master		= &dm816x_alwon_l3_fast_hwmod,
-	.slave		= &dm816x_tptc3_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_alwon_l3_fast__tptc3 = {
+	.master		= &dm81xx_alwon_l3_fast_hwmod,
+	.slave		= &dm81xx_tptc3_hwmod,
 	.clk		= "sysclk4_ck",
-	.addr		= dm816x_tptc3_addr_space,
+	.addr		= dm81xx_tptc3_addr_space,
 	.user		= OCP_USER_MPU,
 };
 
-struct omap_hwmod_ocp_if dm816x_tptc3__alwon_l3_fast = {
-	.master		= &dm816x_tptc3_hwmod,
-	.slave		= &dm816x_alwon_l3_fast_hwmod,
+static struct omap_hwmod_ocp_if dm81xx_tptc3__alwon_l3_fast = {
+	.master		= &dm81xx_tptc3_hwmod,
+	.slave		= &dm81xx_alwon_l3_fast_hwmod,
 	.clk		= "sysclk4_ck",
-	.addr		= dm816x_tptc3_addr_space,
+	.addr		= dm81xx_tptc3_addr_space,
 	.user		= OCP_USER_MPU,
 };
 
+/*
+ * REVISIT: Test and enable the following once clocks work:
+ * dm81xx_l4_ls__gpio1
+ * dm81xx_l4_ls__gpio2
+ * dm81xx_l4_ls__mailbox
+ * dm81xx_alwon_l3_slow__gpmc
+ * dm81xx_default_l3_slow__usbss
+ *
+ * Also note that some devices share a single clkctrl_offs..
+ * For example, i2c1 and 3 share one, and i2c2 and 4 share one.
+ */
+static struct omap_hwmod_ocp_if *dm814x_hwmod_ocp_ifs[] __initdata = {
+	&dm814x_mpu__alwon_l3_slow,
+	&dm814x_mpu__alwon_l3_med,
+	&dm81xx_alwon_l3_slow__l4_ls,
+	&dm81xx_alwon_l3_slow__l4_hs,
+	&dm81xx_l4_ls__uart1,
+	&dm81xx_l4_ls__uart2,
+	&dm81xx_l4_ls__uart3,
+	&dm81xx_l4_ls__wd_timer1,
+	&dm81xx_l4_ls__i2c1,
+	&dm81xx_l4_ls__i2c2,
+	&dm81xx_l4_ls__elm,
+	&dm81xx_l4_ls__mcspi1,
+	&dm81xx_alwon_l3_fast__tpcc,
+	&dm81xx_alwon_l3_fast__tptc0,
+	&dm81xx_alwon_l3_fast__tptc1,
+	&dm81xx_alwon_l3_fast__tptc2,
+	&dm81xx_alwon_l3_fast__tptc3,
+	&dm81xx_tptc0__alwon_l3_fast,
+	&dm81xx_tptc1__alwon_l3_fast,
+	&dm81xx_tptc2__alwon_l3_fast,
+	&dm81xx_tptc3__alwon_l3_fast,
+	&dm814x_l4_ls__timer1,
+	&dm814x_l4_ls__timer2,
+	&dm814x_l4_hs__cpgmac0,
+	&dm814x_cpgmac0__mdio,
+	NULL,
+};
+
+int __init dm814x_hwmod_init(void)
+{
+	omap_hwmod_init();
+	return omap_hwmod_register_links(dm814x_hwmod_ocp_ifs);
+}
+
 static struct omap_hwmod_ocp_if *dm816x_hwmod_ocp_ifs[] __initdata = {
 	&dm816x_mpu__alwon_l3_slow,
 	&dm816x_mpu__alwon_l3_med,
-	&dm816x_alwon_l3_slow__l4_ls,
-	&dm816x_alwon_l3_slow__l4_hs,
-	&dm816x_l4_ls__uart1,
-	&dm816x_l4_ls__uart2,
-	&dm816x_l4_ls__uart3,
-	&dm816x_l4_ls__wd_timer1,
-	&dm816x_l4_ls__i2c1,
-	&dm816x_l4_ls__i2c2,
+	&dm81xx_alwon_l3_slow__l4_ls,
+	&dm81xx_alwon_l3_slow__l4_hs,
+	&dm81xx_l4_ls__uart1,
+	&dm81xx_l4_ls__uart2,
+	&dm81xx_l4_ls__uart3,
+	&dm81xx_l4_ls__wd_timer1,
+	&dm81xx_l4_ls__i2c1,
+	&dm81xx_l4_ls__i2c2,
 	&dm81xx_l4_ls__gpio1,
 	&dm81xx_l4_ls__gpio2,
 	&dm81xx_l4_ls__elm,
@@ -1112,26 +1293,26 @@
 	&dm816x_l4_ls__timer5,
 	&dm816x_l4_ls__timer6,
 	&dm816x_l4_ls__timer7,
-	&dm816x_l4_ls__mcspi1,
-	&dm816x_l4_ls__mailbox,
-	&dm816x_l4_hs__emac0,
-	&dm816x_emac0__mdio,
+	&dm81xx_l4_ls__mcspi1,
+	&dm81xx_l4_ls__mailbox,
+	&dm81xx_l4_hs__emac0,
+	&dm81xx_emac0__mdio,
 	&dm816x_l4_hs__emac1,
-	&dm816x_alwon_l3_fast__tpcc,
-	&dm816x_alwon_l3_fast__tptc0,
-	&dm816x_alwon_l3_fast__tptc1,
-	&dm816x_alwon_l3_fast__tptc2,
-	&dm816x_alwon_l3_fast__tptc3,
-	&dm816x_tptc0__alwon_l3_fast,
-	&dm816x_tptc1__alwon_l3_fast,
-	&dm816x_tptc2__alwon_l3_fast,
-	&dm816x_tptc3__alwon_l3_fast,
+	&dm81xx_alwon_l3_fast__tpcc,
+	&dm81xx_alwon_l3_fast__tptc0,
+	&dm81xx_alwon_l3_fast__tptc1,
+	&dm81xx_alwon_l3_fast__tptc2,
+	&dm81xx_alwon_l3_fast__tptc3,
+	&dm81xx_tptc0__alwon_l3_fast,
+	&dm81xx_tptc1__alwon_l3_fast,
+	&dm81xx_tptc2__alwon_l3_fast,
+	&dm81xx_tptc3__alwon_l3_fast,
 	&dm81xx_alwon_l3_slow__gpmc,
 	&dm81xx_default_l3_slow__usbss,
 	NULL,
 };
 
-int __init ti81xx_hwmod_init(void)
+int __init dm816x_hwmod_init(void)
 {
 	omap_hwmod_init();
 	return omap_hwmod_register_links(dm816x_hwmod_ocp_ifs);
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 821171c..ea56397 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -14,9 +14,18 @@
 #include <linux/kernel.h>
 #include <linux/of_platform.h>
 #include <linux/ti_wilink_st.h>
+#include <linux/wl12xx.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
 
 #include <linux/platform_data/pinctrl-single.h>
 #include <linux/platform_data/iommu-omap.h>
+#include <linux/platform_data/wkup_m3.h>
+
+#include <asm/siginfo.h>
+#include <asm/signal.h>
 
 #include "common.h"
 #include "common-board-devices.h"
@@ -25,13 +34,14 @@
 #include "omap_device.h"
 #include "omap-secure.h"
 #include "soc.h"
+#include "hsmmc.h"
 
 struct pdata_init {
 	const char *compatible;
 	void (*fn)(void);
 };
 
-struct of_dev_auxdata omap_auxdata_lookup[];
+static struct of_dev_auxdata omap_auxdata_lookup[];
 static struct twl4030_gpio_platform_data twl_gpio_auxdata;
 
 #ifdef CONFIG_MACH_NOKIA_N8X0
@@ -128,7 +138,7 @@
 	omap3_sbc_t3x_usb_hub_init(167, "sb-t35 usb hub");
 }
 
-struct ti_st_plat_data wilink_pdata = {
+static struct ti_st_plat_data wilink_pdata = {
 	.nshutdown_gpio = 137,
 	.dev_name = "/dev/ttyO1",
 	.flow_cntrl = 1,
@@ -268,8 +278,136 @@
 {
 	hsmmc2_internal_input_clk();
 }
+
+/* omap3pandora legacy devices */
+#define PANDORA_WIFI_IRQ_GPIO		21
+#define PANDORA_WIFI_NRESET_GPIO	23
+
+static struct platform_device pandora_backlight = {
+	.name	= "pandora-backlight",
+	.id	= -1,
+};
+
+static struct regulator_consumer_supply pandora_vmmc3_supply[] = {
+	REGULATOR_SUPPLY("vmmc", "omap_hsmmc.2"),
+};
+
+static struct regulator_init_data pandora_vmmc3 = {
+	.constraints = {
+		.valid_ops_mask		= REGULATOR_CHANGE_STATUS,
+	},
+	.num_consumer_supplies	= ARRAY_SIZE(pandora_vmmc3_supply),
+	.consumer_supplies	= pandora_vmmc3_supply,
+};
+
+static struct fixed_voltage_config pandora_vwlan = {
+	.supply_name		= "vwlan",
+	.microvolts		= 1800000, /* 1.8V */
+	.gpio			= PANDORA_WIFI_NRESET_GPIO,
+	.startup_delay		= 50000, /* 50ms */
+	.enable_high		= 1,
+	.init_data		= &pandora_vmmc3,
+};
+
+static struct platform_device pandora_vwlan_device = {
+	.name		= "reg-fixed-voltage",
+	.id		= 1,
+	.dev = {
+		.platform_data = &pandora_vwlan,
+	},
+};
+
+static void pandora_wl1251_init_card(struct mmc_card *card)
+{
+	/*
+	 * We have TI wl1251 attached to MMC3. Pass this information to
+	 * SDIO core because it can't be probed by normal methods.
+	 */
+	if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
+		card->quirks |= MMC_QUIRK_NONSTD_SDIO;
+		card->cccr.wide_bus = 1;
+		card->cis.vendor = 0x104c;
+		card->cis.device = 0x9066;
+		card->cis.blksize = 512;
+		card->cis.max_dtr = 24000000;
+		card->ocr = 0x80;
+	}
+}
+
+static struct omap2_hsmmc_info pandora_mmc3[] = {
+	{
+		.mmc		= 3,
+		.caps		= MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
+		.gpio_cd	= -EINVAL,
+		.gpio_wp	= -EINVAL,
+		.init_card	= pandora_wl1251_init_card,
+	},
+	{}	/* Terminator */
+};
+
+static void __init pandora_wl1251_init(void)
+{
+	struct wl1251_platform_data pandora_wl1251_pdata;
+	int ret;
+
+	memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata));
+
+	pandora_wl1251_pdata.power_gpio = -1;
+
+	ret = gpio_request_one(PANDORA_WIFI_IRQ_GPIO, GPIOF_IN, "wl1251 irq");
+	if (ret < 0)
+		goto fail;
+
+	pandora_wl1251_pdata.irq = gpio_to_irq(PANDORA_WIFI_IRQ_GPIO);
+	if (pandora_wl1251_pdata.irq < 0)
+		goto fail_irq;
+
+	pandora_wl1251_pdata.use_eeprom = true;
+	ret = wl1251_set_platform_data(&pandora_wl1251_pdata);
+	if (ret < 0)
+		goto fail_irq;
+
+	return;
+
+fail_irq:
+	gpio_free(PANDORA_WIFI_IRQ_GPIO);
+fail:
+	pr_err("wl1251 board initialisation failed\n");
+}
+
+static void __init omap3_pandora_legacy_init(void)
+{
+	platform_device_register(&pandora_backlight);
+	platform_device_register(&pandora_vwlan_device);
+	omap_hsmmc_init(pandora_mmc3);
+	omap_hsmmc_late_init(pandora_mmc3);
+	pandora_wl1251_init();
+}
 #endif /* CONFIG_ARCH_OMAP3 */
 
+#ifdef CONFIG_SOC_TI81XX
+static int fault_fixed_up;
+
+static int t410_abort_handler(unsigned long addr, unsigned int fsr,
+			      struct pt_regs *regs)
+{
+	if ((fsr == 0x406 || fsr == 0xc06) && !fault_fixed_up) {
+		pr_warn("External imprecise Data abort at addr=%#lx, fsr=%#x ignored.\n",
+			addr, fsr);
+		fault_fixed_up = 1;
+		return 0;
+	}
+
+	return 1;
+}
+
+static void __init t410_abort_init(void)
+{
+	hook_fault_code(16 + 6, t410_abort_handler, SIGBUS, BUS_OBJERR,
+			"imprecise external abort");
+}
+#endif
+
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
 static struct iommu_platform_data omap4_iommu_pdata = {
 	.reset_name = "mmu_cache",
@@ -278,6 +416,14 @@
 };
 #endif
 
+#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
+static struct wkup_m3_platform_data wkup_m3_data = {
+	.reset_name = "wkup_m3",
+	.assert_reset = omap_device_assert_hardreset,
+	.deassert_reset = omap_device_deassert_hardreset,
+};
+#endif
+
 #ifdef CONFIG_SOC_OMAP5
 static void __init omap5_uevm_legacy_init(void)
 {
@@ -323,7 +469,7 @@
 	{ /* sentinel */ },
 };
 
-struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
+static struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
 #ifdef CONFIG_MACH_NOKIA_N8X0
 	OF_DEV_AUXDATA("ti,omap2420-mmc", 0x4809c000, "mmci-omap.0", NULL),
 	OF_DEV_AUXDATA("menelaus", 0x72, "1-0072", &n8x0_menelaus_platform_data),
@@ -340,6 +486,10 @@
 	OF_DEV_AUXDATA("ti,am3517-emac", 0x5c000000, "davinci_emac.0",
 		       &am35xx_emac_pdata),
 #endif
+#ifdef CONFIG_SOC_AM33XX
+	OF_DEV_AUXDATA("ti,am3352-wkup-m3", 0x44d00000, "44d00000.wkup_m3",
+		       &wkup_m3_data),
+#endif
 #ifdef CONFIG_ARCH_OMAP4
 	OF_DEV_AUXDATA("ti,omap4-padconf", 0x4a100040, "4a100040.pinmux", &pcs_pdata),
 	OF_DEV_AUXDATA("ti,omap4-padconf", 0x4a31e040, "4a31e040.pinmux", &pcs_pdata),
@@ -353,6 +503,8 @@
 #endif
 #ifdef CONFIG_SOC_AM43XX
 	OF_DEV_AUXDATA("ti,am437-padconf", 0x44e10800, "44e10800.pinmux", &pcs_pdata),
+	OF_DEV_AUXDATA("ti,am4372-wkup-m3", 0x44d00000, "44d00000.wkup_m3",
+		       &wkup_m3_data),
 #endif
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
 	OF_DEV_AUXDATA("ti,omap4-iommu", 0x4a066000, "4a066000.mmu",
@@ -381,6 +533,11 @@
 	{ "ti,omap3-evm-37xx", omap3_evm_legacy_init, },
 	{ "ti,am3517-evm", am3517_evm_legacy_init, },
 	{ "technexion,omap3-tao3530", omap3_tao3530_legacy_init, },
+	{ "openpandora,omap3-pandora-600mhz", omap3_pandora_legacy_init, },
+	{ "openpandora,omap3-pandora-1ghz", omap3_pandora_legacy_init, },
+#endif
+#ifdef CONFIG_SOC_TI81XX
+	{ "hp,t410", t410_abort_init, },
 #endif
 #ifdef CONFIG_SOC_OMAP5
 	{ "ti,omap5-uevm", omap5_uevm_legacy_init, },
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index b1aad7e..2a1a418 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -25,6 +25,7 @@
 #include <linux/sysfs.h>
 #include <linux/module.h>
 #include <linux/delay.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/irq.h>
 #include <linux/time.h>
diff --git a/arch/arm/mach-omap2/powerdomains3xxx_data.c b/arch/arm/mach-omap2/powerdomains3xxx_data.c
index 70bc706..d31c495 100644
--- a/arch/arm/mach-omap2/powerdomains3xxx_data.c
+++ b/arch/arm/mach-omap2/powerdomains3xxx_data.c
@@ -349,6 +349,41 @@
 	.voltdm		  = { .name = "core" },
 };
 
+static struct powerdomain gem_814x_pwrdm = {
+	.name		= "gem_pwrdm",
+	.prcm_offs	= TI814X_PRM_DSP_MOD,
+	.pwrsts		= PWRSTS_OFF_ON,
+	.voltdm		= { .name = "dsp" },
+};
+
+static struct powerdomain ivahd_814x_pwrdm = {
+	.name		= "ivahd_pwrdm",
+	.prcm_offs	= TI814X_PRM_HDVICP_MOD,
+	.pwrsts		= PWRSTS_OFF_ON,
+	.voltdm		= { .name = "iva" },
+};
+
+static struct powerdomain hdvpss_814x_pwrdm = {
+	.name		= "hdvpss_pwrdm",
+	.prcm_offs	= TI814X_PRM_HDVPSS_MOD,
+	.pwrsts		= PWRSTS_OFF_ON,
+	.voltdm		= { .name = "dsp" },
+};
+
+static struct powerdomain sgx_814x_pwrdm = {
+	.name		= "sgx_pwrdm",
+	.prcm_offs	= TI814X_PRM_GFX_MOD,
+	.pwrsts		= PWRSTS_OFF_ON,
+	.voltdm		= { .name = "core" },
+};
+
+static struct powerdomain isp_814x_pwrdm = {
+	.name		= "isp_pwrdm",
+	.prcm_offs	= TI814X_PRM_ISP_MOD,
+	.pwrsts		= PWRSTS_OFF_ON,
+	.voltdm		= { .name = "core" },
+};
+
 static struct powerdomain active_816x_pwrdm = {
 	.name		  = "active_pwrdm",
 	.prcm_offs	  = TI816X_PRM_ACTIVE_MOD,
@@ -448,7 +483,18 @@
 	NULL
 };
 
-static struct powerdomain *powerdomains_ti81xx[] __initdata = {
+static struct powerdomain *powerdomains_ti814x[] __initdata = {
+	&alwon_81xx_pwrdm,
+	&device_81xx_pwrdm,
+	&gem_814x_pwrdm,
+	&ivahd_814x_pwrdm,
+	&hdvpss_814x_pwrdm,
+	&sgx_814x_pwrdm,
+	&isp_814x_pwrdm,
+	NULL
+};
+
+static struct powerdomain *powerdomains_ti816x[] __initdata = {
 	&alwon_81xx_pwrdm,
 	&device_81xx_pwrdm,
 	&active_816x_pwrdm,
@@ -460,6 +506,73 @@
 	NULL
 };
 
+/* TI81XX specific ops */
+#define TI81XX_PM_PWSTCTRL				0x0000
+#define TI81XX_RM_RSTCTRL				0x0010
+#define TI81XX_PM_PWSTST				0x0004
+
+static int ti81xx_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
+{
+	omap2_prm_rmw_mod_reg_bits(OMAP_POWERSTATE_MASK,
+				   (pwrst << OMAP_POWERSTATE_SHIFT),
+				   pwrdm->prcm_offs, TI81XX_PM_PWSTCTRL);
+	return 0;
+}
+
+static int ti81xx_pwrdm_read_next_pwrst(struct powerdomain *pwrdm)
+{
+	return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs,
+					     TI81XX_PM_PWSTCTRL,
+					     OMAP_POWERSTATE_MASK);
+}
+
+static int ti81xx_pwrdm_read_pwrst(struct powerdomain *pwrdm)
+{
+	return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs,
+		(pwrdm->prcm_offs == TI814X_PRM_GFX_MOD) ? TI81XX_RM_RSTCTRL :
+					     TI81XX_PM_PWSTST,
+					     OMAP_POWERSTATEST_MASK);
+}
+
+static int ti81xx_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm)
+{
+	return omap2_prm_read_mod_bits_shift(pwrdm->prcm_offs,
+		(pwrdm->prcm_offs == TI814X_PRM_GFX_MOD) ? TI81XX_RM_RSTCTRL :
+					     TI81XX_PM_PWSTST,
+					     OMAP3430_LOGICSTATEST_MASK);
+}
+
+static int ti81xx_pwrdm_wait_transition(struct powerdomain *pwrdm)
+{
+	u32 c = 0;
+
+	while ((omap2_prm_read_mod_reg(pwrdm->prcm_offs,
+		(pwrdm->prcm_offs == TI814X_PRM_GFX_MOD) ? TI81XX_RM_RSTCTRL :
+				       TI81XX_PM_PWSTST) &
+		OMAP_INTRANSITION_MASK) &&
+		(c++ < PWRDM_TRANSITION_BAILOUT))
+			udelay(1);
+
+	if (c > PWRDM_TRANSITION_BAILOUT) {
+		pr_err("powerdomain: %s timeout waiting for transition\n",
+		       pwrdm->name);
+		return -EAGAIN;
+	}
+
+	pr_debug("powerdomain: completed transition in %d loops\n", c);
+
+	return 0;
+}
+
+/* For dm814x we need to fix up fix GFX pwstst and rstctrl reg offsets */
+static struct pwrdm_ops ti81xx_pwrdm_operations = {
+	.pwrdm_set_next_pwrst	= ti81xx_pwrdm_set_next_pwrst,
+	.pwrdm_read_next_pwrst	= ti81xx_pwrdm_read_next_pwrst,
+	.pwrdm_read_pwrst	= ti81xx_pwrdm_read_pwrst,
+	.pwrdm_read_logic_pwrst	= ti81xx_pwrdm_read_logic_pwrst,
+	.pwrdm_wait_transition	= ti81xx_pwrdm_wait_transition,
+};
+
 void __init omap3xxx_powerdomains_init(void)
 {
 	unsigned int rev;
@@ -467,15 +580,22 @@
 	if (!cpu_is_omap34xx() && !cpu_is_ti81xx())
 		return;
 
-	pwrdm_register_platform_funcs(&omap3_pwrdm_operations);
+	/* Only 81xx needs custom pwrdm_operations */
+	if (!cpu_is_ti81xx())
+		pwrdm_register_platform_funcs(&omap3_pwrdm_operations);;
 
 	rev = omap_rev();
 
 	if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) {
 		pwrdm_register_pwrdms(powerdomains_am35x);
+	} else if (rev == TI8148_REV_ES1_0 || rev == TI8148_REV_ES2_0 ||
+		   rev == TI8148_REV_ES2_1) {
+		pwrdm_register_platform_funcs(&ti81xx_pwrdm_operations);
+		pwrdm_register_pwrdms(powerdomains_ti814x);
 	} else if (rev == TI8168_REV_ES1_0 || rev == TI8168_REV_ES1_1
 			|| rev == TI8168_REV_ES2_0 || rev == TI8168_REV_ES2_1) {
-		pwrdm_register_pwrdms(powerdomains_ti81xx);
+		pwrdm_register_platform_funcs(&ti81xx_pwrdm_operations);
+		pwrdm_register_pwrdms(powerdomains_ti816x);
 	} else {
 		pwrdm_register_pwrdms(powerdomains_omap3430_common);
 
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index 6ae0b3a..c8f590b 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -51,6 +51,12 @@
 /*
  * TI81XX PRM module offsets
  */
+#define TI814X_PRM_DSP_MOD				0x0a00
+#define TI814X_PRM_HDVICP_MOD				0x0c00
+#define TI814X_PRM_ISP_MOD				0x0d00
+#define TI814X_PRM_HDVPSS_MOD				0x0e00
+#define TI814X_PRM_GFX_MOD				0x0f00
+
 #define TI81XX_PRM_DEVICE_MOD			0x0000
 #define TI816X_PRM_ACTIVE_MOD			0x0a00
 #define TI81XX_PRM_DEFAULT_MOD			0x0b00
@@ -472,6 +478,7 @@
  * struct omap_prcm_irq_setup - PRCM interrupt controller details
  * @ack: PRM register offset for the first PRM_IRQSTATUS_MPU register
  * @mask: PRM register offset for the first PRM_IRQENABLE_MPU register
+ * @pm_ctrl: PRM register offset for the PRM_IO_PMCTRL register
  * @nr_regs: number of PRM_IRQ{STATUS,ENABLE}_MPU* registers
  * @nr_irqs: number of entries in the @irqs array
  * @irqs: ptr to an array of PRCM interrupt bits (see @nr_irqs)
@@ -494,6 +501,7 @@
 struct omap_prcm_irq_setup {
 	u16 ack;
 	u16 mask;
+	u16 pm_ctrl;
 	u8 nr_regs;
 	u8 nr_irqs;
 	const struct omap_prcm_irq *irqs;
diff --git a/arch/arm/mach-omap2/prcm43xx.h b/arch/arm/mach-omap2/prcm43xx.h
index 7eebc27..7c34c44e 100644
--- a/arch/arm/mach-omap2/prcm43xx.h
+++ b/arch/arm/mach-omap2/prcm43xx.h
@@ -25,6 +25,13 @@
 #define AM43XX_PRM_WKUP_INST				0x2000
 #define AM43XX_PRM_DEVICE_INST				0x4000
 
+/* PRM_IRQ offsets */
+#define AM43XX_PRM_IRQSTATUS_MPU_OFFSET			0x0004
+#define AM43XX_PRM_IRQENABLE_MPU_OFFSET			0x0008
+
+/* Other PRM offsets */
+#define AM43XX_PRM_IO_PMCTRL_OFFSET			0x0024
+
 /* RM RSTCTRL offsets */
 #define AM43XX_RM_PER_RSTCTRL_OFFSET			0x0010
 #define AM43XX_RM_GFX_RSTCTRL_OFFSET			0x0010
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 4541700..3076800 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -18,13 +18,14 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/of_irq.h>
-
+#include <linux/of.h>
 
 #include "soc.h"
 #include "iomap.h"
 #include "common.h"
 #include "vp.h"
 #include "prm44xx.h"
+#include "prcm43xx.h"
 #include "prm-regbits-44xx.h"
 #include "prcm44xx.h"
 #include "prminst44xx.h"
@@ -45,6 +46,7 @@
 static struct omap_prcm_irq_setup omap4_prcm_irq_setup = {
 	.ack			= OMAP4_PRM_IRQSTATUS_MPU_OFFSET,
 	.mask			= OMAP4_PRM_IRQENABLE_MPU_OFFSET,
+	.pm_ctrl		= OMAP4_PRM_IO_PMCTRL_OFFSET,
 	.nr_regs		= 2,
 	.irqs			= omap4_prcm_irqs,
 	.nr_irqs		= ARRAY_SIZE(omap4_prcm_irqs),
@@ -216,11 +218,11 @@
  */
 static void omap44xx_prm_read_pending_irqs(unsigned long *events)
 {
-	events[0] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_OFFSET,
-					  OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
+	int i;
 
-	events[1] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_2_OFFSET,
-					  OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET);
+	for (i = 0; i < omap4_prcm_irq_setup.nr_regs; i++)
+		events[i] = _read_pending_irq_reg(omap4_prcm_irq_setup.mask +
+				i * 4, omap4_prcm_irq_setup.ack + i * 4);
 }
 
 /**
@@ -250,17 +252,17 @@
  */
 static void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask)
 {
-	saved_mask[0] =
-		omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
-					OMAP4_PRM_IRQENABLE_MPU_OFFSET);
-	saved_mask[1] =
-		omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
-					OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
+	int i;
+	u16 reg;
 
-	omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST,
-				 OMAP4_PRM_IRQENABLE_MPU_OFFSET);
-	omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST,
-				 OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
+	for (i = 0; i < omap4_prcm_irq_setup.nr_regs; i++) {
+		reg = omap4_prcm_irq_setup.mask + i * 4;
+
+		saved_mask[i] =
+			omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
+						reg);
+		omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, reg);
+	}
 
 	/* OCP barrier */
 	omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST,
@@ -279,10 +281,12 @@
  */
 static void omap44xx_prm_restore_irqen(u32 *saved_mask)
 {
-	omap4_prm_write_inst_reg(saved_mask[0], OMAP4430_PRM_OCP_SOCKET_INST,
-				 OMAP4_PRM_IRQENABLE_MPU_OFFSET);
-	omap4_prm_write_inst_reg(saved_mask[1], OMAP4430_PRM_OCP_SOCKET_INST,
-				 OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
+	int i;
+
+	for (i = 0; i < omap4_prcm_irq_setup.nr_regs; i++)
+		omap4_prm_write_inst_reg(saved_mask[i],
+					 OMAP4430_PRM_OCP_SOCKET_INST,
+					 omap4_prcm_irq_setup.mask + i * 4);
 }
 
 /**
@@ -306,10 +310,10 @@
 	omap4_prm_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK,
 				    OMAP4430_WUCLK_CTRL_MASK,
 				    inst,
-				    OMAP4_PRM_IO_PMCTRL_OFFSET);
+				    omap4_prcm_irq_setup.pm_ctrl);
 	omap_test_timeout(
 		(((omap4_prm_read_inst_reg(inst,
-					   OMAP4_PRM_IO_PMCTRL_OFFSET) &
+					   omap4_prcm_irq_setup.pm_ctrl) &
 		   OMAP4430_WUCLK_STATUS_MASK) >>
 		  OMAP4430_WUCLK_STATUS_SHIFT) == 1),
 		MAX_IOPAD_LATCH_TIME, i);
@@ -319,10 +323,10 @@
 	/* Trigger WUCLKIN disable */
 	omap4_prm_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK, 0x0,
 				    inst,
-				    OMAP4_PRM_IO_PMCTRL_OFFSET);
+				    omap4_prcm_irq_setup.pm_ctrl);
 	omap_test_timeout(
 		(((omap4_prm_read_inst_reg(inst,
-					   OMAP4_PRM_IO_PMCTRL_OFFSET) &
+					   omap4_prcm_irq_setup.pm_ctrl) &
 		   OMAP4430_WUCLK_STATUS_MASK) >>
 		  OMAP4430_WUCLK_STATUS_SHIFT) == 0),
 		MAX_IOPAD_LATCH_TIME, i);
@@ -350,7 +354,7 @@
 	omap4_prm_rmw_inst_reg_bits(OMAP4430_GLOBAL_WUEN_MASK,
 				    OMAP4430_GLOBAL_WUEN_MASK,
 				    inst,
-				    OMAP4_PRM_IO_PMCTRL_OFFSET);
+				    omap4_prcm_irq_setup.pm_ctrl);
 }
 
 /**
@@ -719,6 +723,15 @@
 
 	omap4_prminst_set_prm_dev_inst(data->device_inst_offset);
 
+	/* Add AM437X specific differences */
+	if (of_device_is_compatible(data->np, "ti,am4-prcm")) {
+		omap4_prcm_irq_setup.nr_irqs = 1;
+		omap4_prcm_irq_setup.nr_regs = 1;
+		omap4_prcm_irq_setup.pm_ctrl = AM43XX_PRM_IO_PMCTRL_OFFSET;
+		omap4_prcm_irq_setup.ack = AM43XX_PRM_IRQSTATUS_MPU_OFFSET;
+		omap4_prcm_irq_setup.mask = AM43XX_PRM_IRQENABLE_MPU_OFFSET;
+	}
+
 	return prm_register(&omap44xx_prm_ll_data);
 }
 
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 7add799..257e98c 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -696,6 +696,7 @@
 	.index = TI_CLKM_PRM,
 	.init = omap44xx_prm_init,
 	.device_inst_offset = AM43XX_PRM_DEVICE_INST,
+	.flags = PRM_HAS_IO_WAKEUP,
 };
 #endif
 
@@ -705,7 +706,7 @@
 };
 #endif
 
-static const struct of_device_id omap_prcm_dt_match_table[] __initconst = {
+static const struct of_device_id const omap_prcm_dt_match_table[] __initconst = {
 #ifdef CONFIG_SOC_AM33XX
 	{ .compatible = "ti,am3-prcm", .data = &am3_prm_data },
 #endif
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index ad1bb94..9b09d85 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -333,14 +333,12 @@
 
 #endif	/* defined(CONFIG_SMP) && defined(CONFIG_PM) */
 
-ENTRY(omap_bus_sync)
-	ret	lr
-ENDPROC(omap_bus_sync)
-
 ENTRY(omap_do_wfi)
 	stmfd	sp!, {lr}
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
 	/* Drain interconnect write buffers. */
-	bl omap_bus_sync
+	bl	omap_interconnect_sync
+#endif
 
 	/*
 	 * Execute an ISB instruction to ensure that all of the
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index cac46d8..e4d8701 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -102,38 +102,38 @@
 	return 0;
 }
 
-static void omap2_gp_timer_set_mode(enum clock_event_mode mode,
-				    struct clock_event_device *evt)
+static int omap2_gp_timer_shutdown(struct clock_event_device *evt)
+{
+	__omap_dm_timer_stop(&clkev, OMAP_TIMER_POSTED, clkev.rate);
+	return 0;
+}
+
+static int omap2_gp_timer_set_periodic(struct clock_event_device *evt)
 {
 	u32 period;
 
 	__omap_dm_timer_stop(&clkev, OMAP_TIMER_POSTED, clkev.rate);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		period = clkev.rate / HZ;
-		period -= 1;
-		/* Looks like we need to first set the load value separately */
-		__omap_dm_timer_write(&clkev, OMAP_TIMER_LOAD_REG,
-				      0xffffffff - period, OMAP_TIMER_POSTED);
-		__omap_dm_timer_load_start(&clkev,
-					OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
-					0xffffffff - period, OMAP_TIMER_POSTED);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	period = clkev.rate / HZ;
+	period -= 1;
+	/* Looks like we need to first set the load value separately */
+	__omap_dm_timer_write(&clkev, OMAP_TIMER_LOAD_REG, 0xffffffff - period,
+			      OMAP_TIMER_POSTED);
+	__omap_dm_timer_load_start(&clkev,
+				   OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
+				   0xffffffff - period, OMAP_TIMER_POSTED);
+	return 0;
 }
 
 static struct clock_event_device clockevent_gpt = {
-	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.rating		= 300,
-	.set_next_event	= omap2_gp_timer_set_next_event,
-	.set_mode	= omap2_gp_timer_set_mode,
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.rating			= 300,
+	.set_next_event		= omap2_gp_timer_set_next_event,
+	.set_state_shutdown	= omap2_gp_timer_shutdown,
+	.set_state_periodic	= omap2_gp_timer_set_periodic,
+	.set_state_oneshot	= omap2_gp_timer_shutdown,
+	.tick_resume		= omap2_gp_timer_shutdown,
 };
 
 static struct property device_disabled = {
@@ -208,8 +208,7 @@
 	/* If we are a secure device, remove any secure timer nodes */
 	if ((omap_type() != OMAP2_DEVICE_TYPE_GP)) {
 		np = omap_get_timer_dt(omap_timer_match, "ti,timer-secure");
-		if (np)
-			of_node_put(np);
+		of_node_put(np);
 	}
 }
 
@@ -649,23 +648,10 @@
 
 #ifdef CONFIG_ARCH_OMAP4
 #ifdef CONFIG_HAVE_ARM_TWD
-static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, OMAP44XX_LOCAL_TWD_BASE, 29);
 void __init omap4_local_timer_init(void)
 {
 	omap4_sync32k_timer_init();
-	/* Local timers are not supprted on OMAP4430 ES1.0 */
-	if (omap_rev() != OMAP4430_REV_ES1_0) {
-		int err;
-
-		if (of_have_populated_dt()) {
-			clocksource_of_init();
-			return;
-		}
-
-		err = twd_local_timer_register(&twd_local_timer);
-		if (err)
-			pr_err("twd_local_timer_register failed %d\n", err);
-	}
+	clocksource_of_init();
 }
 #else
 void __init omap4_local_timer_init(void)
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index 076fd20..e5a35f6 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -563,7 +563,7 @@
 	u8 hsscll_12;
 };
 
-static const __initdata struct i2c_init_data omap4_i2c_timing_data[] = {
+static const struct i2c_init_data const omap4_i2c_timing_data[] __initconst = {
 	{
 		.load = 50,
 		.loadbits = 0x3,
diff --git a/arch/arm/mach-omap2/voltagedomains3xxx_data.c b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
index 261bb7c..307676d 100644
--- a/arch/arm/mach-omap2/voltagedomains3xxx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
@@ -95,7 +95,7 @@
 };
 
 
-static const char *sys_clk_name __initdata = "sys_ck";
+static const char *const sys_clk_name __initconst = "sys_ck";
 
 void __init omap3xxx_voltagedomains_init(void)
 {
diff --git a/arch/arm/mach-omap2/voltagedomains44xx_data.c b/arch/arm/mach-omap2/voltagedomains44xx_data.c
index 48b22a0..9b1f245 100644
--- a/arch/arm/mach-omap2/voltagedomains44xx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains44xx_data.c
@@ -92,7 +92,7 @@
 	NULL,
 };
 
-static const char *sys_clk_name __initdata = "sys_clkin_ck";
+static const char *const sys_clk_name __initconst = "sys_clkin_ck";
 
 void __init omap44xx_voltagedomains_init(void)
 {
diff --git a/arch/arm/mach-omap2/voltagedomains54xx_data.c b/arch/arm/mach-omap2/voltagedomains54xx_data.c
index 33d22b8..af5ff64 100644
--- a/arch/arm/mach-omap2/voltagedomains54xx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains54xx_data.c
@@ -78,7 +78,7 @@
 	NULL,
 };
 
-static const char *sys_clk_name __initdata = "sys_clkin";
+static const char *const sys_clk_name __initconst = "sys_clkin";
 
 void __init omap54xx_voltagedomains_init(void)
 {
diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig
index 2412efb..08d2be2 100644
--- a/arch/arm/mach-orion5x/Kconfig
+++ b/arch/arm/mach-orion5x/Kconfig
@@ -78,11 +78,11 @@
 	  Buffalo Linkstation Live v3 (LS-CHL) platform.
 
 config MACH_LINKSTATION_MINI
-	bool "Buffalo Linkstation Mini"
-	select I2C_BOARDINFO
+	bool "Buffalo Linkstation Mini (Flattened Device Tree)"
+	select ARCH_ORION5X_DT
 	help
 	  Say 'Y' here if you want your kernel to support the
-	  Buffalo Linkstation Mini platform.
+	  Buffalo Linkstation Mini (LS-WSGL) platform.
 
 config MACH_LINKSTATION_LS_HGL
 	bool "Buffalo Linkstation LS-HGL"
diff --git a/arch/arm/mach-orion5x/Makefile b/arch/arm/mach-orion5x/Makefile
index a40b5c9..a1e0fbe 100644
--- a/arch/arm/mach-orion5x/Makefile
+++ b/arch/arm/mach-orion5x/Makefile
@@ -4,7 +4,6 @@
 obj-$(CONFIG_MACH_KUROBOX_PRO)	+= kurobox_pro-setup.o
 obj-$(CONFIG_MACH_TERASTATION_PRO2)	+= terastation_pro2-setup.o
 obj-$(CONFIG_MACH_LINKSTATION_PRO) += kurobox_pro-setup.o
-obj-$(CONFIG_MACH_LINKSTATION_MINI) += lsmini-setup.o
 obj-$(CONFIG_MACH_LINKSTATION_LS_HGL) += ls_hgl-setup.o
 obj-$(CONFIG_MACH_DNS323)	+= dns323-setup.o
 obj-$(CONFIG_MACH_TS209)	+= ts209-setup.o tsx09-common.o
diff --git a/arch/arm/mach-orion5x/board-dt.c b/arch/arm/mach-orion5x/board-dt.c
index 79f033b..d087178 100644
--- a/arch/arm/mach-orion5x/board-dt.c
+++ b/arch/arm/mach-orion5x/board-dt.c
@@ -16,7 +16,6 @@
 #include <linux/of_platform.h>
 #include <linux/cpu.h>
 #include <linux/mbus.h>
-#include <linux/clk-provider.h>
 #include <linux/clocksource.h>
 #include <asm/system_misc.h>
 #include <asm/mach/arch.h>
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index 09d2a26..f267e58 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -236,9 +236,7 @@
 	}
 
 	iounmap(mac_page);
-	printk("DNS-323: Found ethernet MAC address: ");
-	for (i = 0; i < 6; i++)
-		printk("%.2x%s", addr[i], (i < 5) ? ":" : ".\n");
+	printk("DNS-323: Found ethernet MAC address: %pM\n", addr);
 
 	memcpy(dns323_eth_data.mac_addr, addr, 6);
 
diff --git a/arch/arm/mach-orion5x/include/mach/irqs.h b/arch/arm/mach-orion5x/include/mach/irqs.h
index a6fa9d8..2431d99 100644
--- a/arch/arm/mach-orion5x/include/mach/irqs.h
+++ b/arch/arm/mach-orion5x/include/mach/irqs.h
@@ -16,42 +16,42 @@
 /*
  * Orion Main Interrupt Controller
  */
-#define IRQ_ORION5X_BRIDGE		0
-#define IRQ_ORION5X_DOORBELL_H2C	1
-#define IRQ_ORION5X_DOORBELL_C2H	2
-#define IRQ_ORION5X_UART0		3
-#define IRQ_ORION5X_UART1		4
-#define IRQ_ORION5X_I2C			5
-#define IRQ_ORION5X_GPIO_0_7		6
-#define IRQ_ORION5X_GPIO_8_15		7
-#define IRQ_ORION5X_GPIO_16_23		8
-#define IRQ_ORION5X_GPIO_24_31		9
-#define IRQ_ORION5X_PCIE0_ERR		10
-#define IRQ_ORION5X_PCIE0_INT		11
-#define IRQ_ORION5X_USB1_CTRL		12
-#define IRQ_ORION5X_DEV_BUS_ERR		14
-#define IRQ_ORION5X_PCI_ERR		15
-#define IRQ_ORION5X_USB_BR_ERR		16
-#define IRQ_ORION5X_USB0_CTRL		17
-#define IRQ_ORION5X_ETH_RX		18
-#define IRQ_ORION5X_ETH_TX		19
-#define IRQ_ORION5X_ETH_MISC		20
-#define IRQ_ORION5X_ETH_SUM		21
-#define IRQ_ORION5X_ETH_ERR		22
-#define IRQ_ORION5X_IDMA_ERR		23
-#define IRQ_ORION5X_IDMA_0		24
-#define IRQ_ORION5X_IDMA_1		25
-#define IRQ_ORION5X_IDMA_2		26
-#define IRQ_ORION5X_IDMA_3		27
-#define IRQ_ORION5X_CESA		28
-#define IRQ_ORION5X_SATA		29
-#define IRQ_ORION5X_XOR0		30
-#define IRQ_ORION5X_XOR1		31
+#define IRQ_ORION5X_BRIDGE		(1 + 0)
+#define IRQ_ORION5X_DOORBELL_H2C	(1 + 1)
+#define IRQ_ORION5X_DOORBELL_C2H	(1 + 2)
+#define IRQ_ORION5X_UART0		(1 + 3)
+#define IRQ_ORION5X_UART1		(1 + 4)
+#define IRQ_ORION5X_I2C			(1 + 5)
+#define IRQ_ORION5X_GPIO_0_7		(1 + 6)
+#define IRQ_ORION5X_GPIO_8_15		(1 + 7)
+#define IRQ_ORION5X_GPIO_16_23		(1 + 8)
+#define IRQ_ORION5X_GPIO_24_31		(1 + 9)
+#define IRQ_ORION5X_PCIE0_ERR		(1 + 10)
+#define IRQ_ORION5X_PCIE0_INT		(1 + 11)
+#define IRQ_ORION5X_USB1_CTRL		(1 + 12)
+#define IRQ_ORION5X_DEV_BUS_ERR		(1 + 14)
+#define IRQ_ORION5X_PCI_ERR		(1 + 15)
+#define IRQ_ORION5X_USB_BR_ERR		(1 + 16)
+#define IRQ_ORION5X_USB0_CTRL		(1 + 17)
+#define IRQ_ORION5X_ETH_RX		(1 + 18)
+#define IRQ_ORION5X_ETH_TX		(1 + 19)
+#define IRQ_ORION5X_ETH_MISC		(1 + 20)
+#define IRQ_ORION5X_ETH_SUM		(1 + 21)
+#define IRQ_ORION5X_ETH_ERR		(1 + 22)
+#define IRQ_ORION5X_IDMA_ERR		(1 + 23)
+#define IRQ_ORION5X_IDMA_0		(1 + 24)
+#define IRQ_ORION5X_IDMA_1		(1 + 25)
+#define IRQ_ORION5X_IDMA_2		(1 + 26)
+#define IRQ_ORION5X_IDMA_3		(1 + 27)
+#define IRQ_ORION5X_CESA		(1 + 28)
+#define IRQ_ORION5X_SATA		(1 + 29)
+#define IRQ_ORION5X_XOR0		(1 + 30)
+#define IRQ_ORION5X_XOR1		(1 + 31)
 
 /*
  * Orion General Purpose Pins
  */
-#define IRQ_ORION5X_GPIO_START	32
+#define IRQ_ORION5X_GPIO_START	33
 #define NR_GPIO_IRQS		32
 
 #define NR_IRQS			(IRQ_ORION5X_GPIO_START + NR_GPIO_IRQS)
diff --git a/arch/arm/mach-orion5x/irq.c b/arch/arm/mach-orion5x/irq.c
index cd4bac4..086ecb8 100644
--- a/arch/arm/mach-orion5x/irq.c
+++ b/arch/arm/mach-orion5x/irq.c
@@ -42,7 +42,7 @@
 	stat = readl_relaxed(MAIN_IRQ_CAUSE);
 	stat &= readl_relaxed(MAIN_IRQ_MASK);
 	if (stat) {
-		unsigned int hwirq = __fls(stat);
+		unsigned int hwirq = 1 + __fls(stat);
 		handle_IRQ(hwirq, regs);
 		return;
 	}
@@ -51,7 +51,7 @@
 
 void __init orion5x_init_irq(void)
 {
-	orion_irq_init(0, MAIN_IRQ_MASK);
+	orion_irq_init(1, MAIN_IRQ_MASK);
 
 #ifdef CONFIG_MULTI_IRQ_HANDLER
 	set_handle_irq(orion5x_legacy_handle_irq);
diff --git a/arch/arm/mach-orion5x/lsmini-setup.c b/arch/arm/mach-orion5x/lsmini-setup.c
deleted file mode 100644
index a6493e7..0000000
--- a/arch/arm/mach-orion5x/lsmini-setup.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * arch/arm/mach-orion5x/lsmini-setup.c
- *
- * Maintainer: Alexey Kopytko <alexey@kopytko.ru>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2.  This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/physmap.h>
-#include <linux/mv643xx_eth.h>
-#include <linux/leds.h>
-#include <linux/gpio_keys.h>
-#include <linux/input.h>
-#include <linux/i2c.h>
-#include <linux/ata_platform.h>
-#include <linux/gpio.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <mach/orion5x.h>
-#include "common.h"
-#include "mpp.h"
-
-/*****************************************************************************
- * Linkstation Mini Info
- ****************************************************************************/
-
-/*
- * 256K NOR flash Device bus boot chip select
- */
-
-#define LSMINI_NOR_BOOT_BASE	0xf4000000
-#define LSMINI_NOR_BOOT_SIZE	SZ_256K
-
-/*****************************************************************************
- * 256KB NOR Flash on BOOT Device
- ****************************************************************************/
-
-static struct physmap_flash_data lsmini_nor_flash_data = {
-	.width		= 1,
-};
-
-static struct resource lsmini_nor_flash_resource = {
-	.flags	= IORESOURCE_MEM,
-	.start	= LSMINI_NOR_BOOT_BASE,
-	.end	= LSMINI_NOR_BOOT_BASE + LSMINI_NOR_BOOT_SIZE - 1,
-};
-
-static struct platform_device lsmini_nor_flash = {
-	.name			= "physmap-flash",
-	.id			= 0,
-	.dev		= {
-		.platform_data	= &lsmini_nor_flash_data,
-	},
-	.num_resources		= 1,
-	.resource		= &lsmini_nor_flash_resource,
-};
-
-/*****************************************************************************
- * Ethernet
- ****************************************************************************/
-
-static struct mv643xx_eth_platform_data lsmini_eth_data = {
-	.phy_addr	= 8,
-};
-
-/*****************************************************************************
- * RTC 5C372a on I2C bus
- ****************************************************************************/
-
-static struct i2c_board_info __initdata lsmini_i2c_rtc = {
-	I2C_BOARD_INFO("rs5c372a", 0x32),
-};
-
-/*****************************************************************************
- * LEDs attached to GPIO
- ****************************************************************************/
-
-#define LSMINI_GPIO_LED_ALARM	2
-#define LSMINI_GPIO_LED_INFO	3
-#define LSMINI_GPIO_LED_FUNC	9
-#define LSMINI_GPIO_LED_PWR	14
-
-static struct gpio_led lsmini_led_pins[] = {
-	{
-		.name	   = "alarm:red",
-		.gpio	   = LSMINI_GPIO_LED_ALARM,
-		.active_low     = 1,
-	}, {
-		.name	   = "info:amber",
-		.gpio	   = LSMINI_GPIO_LED_INFO,
-		.active_low     = 1,
-	}, {
-		.name	   = "func:blue:top",
-		.gpio	   = LSMINI_GPIO_LED_FUNC,
-		.active_low     = 1,
-	}, {
-		.name	   = "power:blue:bottom",
-		.gpio	   = LSMINI_GPIO_LED_PWR,
-	},
-};
-
-static struct gpio_led_platform_data lsmini_led_data = {
-	.leds	   = lsmini_led_pins,
-	.num_leds       = ARRAY_SIZE(lsmini_led_pins),
-};
-
-static struct platform_device lsmini_leds = {
-	.name   = "leds-gpio",
-	.id     = -1,
-	.dev    = {
-		.platform_data  = &lsmini_led_data,
-	},
-};
-
-/****************************************************************************
- * GPIO Attached Keys
- ****************************************************************************/
-
-#define LSMINI_GPIO_KEY_FUNC       15
-#define LSMINI_GPIO_KEY_POWER	   18
-#define LSMINI_GPIO_KEY_AUTOPOWER 17
-
-#define LSMINI_SW_POWER		0x00
-#define LSMINI_SW_AUTOPOWER	0x01
-
-static struct gpio_keys_button lsmini_buttons[] = {
-	{
-		.code	   = KEY_OPTION,
-		.gpio	   = LSMINI_GPIO_KEY_FUNC,
-		.desc	   = "Function Button",
-		.active_low     = 1,
-	}, {
-		.type		= EV_SW,
-		.code	   = LSMINI_SW_POWER,
-		.gpio	   = LSMINI_GPIO_KEY_POWER,
-		.desc	   = "Power-on Switch",
-		.active_low     = 1,
-	}, {
-		.type		= EV_SW,
-		.code	   = LSMINI_SW_AUTOPOWER,
-		.gpio	   = LSMINI_GPIO_KEY_AUTOPOWER,
-		.desc	   = "Power-auto Switch",
-		.active_low     = 1,
-	},
-};
-
-static struct gpio_keys_platform_data lsmini_button_data = {
-	.buttons	= lsmini_buttons,
-	.nbuttons       = ARRAY_SIZE(lsmini_buttons),
-};
-
-static struct platform_device lsmini_button_device = {
-	.name	   = "gpio-keys",
-	.id	     = -1,
-	.num_resources  = 0,
-	.dev	    = {
-		.platform_data  = &lsmini_button_data,
-	},
-};
-
-
-/*****************************************************************************
- * SATA
- ****************************************************************************/
-static struct mv_sata_platform_data lsmini_sata_data = {
-	.n_ports	= 2,
-};
-
-
-/*****************************************************************************
- * Linkstation Mini specific power off method: reboot
- ****************************************************************************/
-/*
- * On the Linkstation Mini, the shutdown process is following:
- * - Userland monitors key events until the power switch goes to off position
- * - The board reboots
- * - U-boot starts and goes into an idle mode waiting for the user
- *   to move the switch to ON position
- */
-
-static void lsmini_power_off(void)
-{
-	orion5x_restart(REBOOT_HARD, NULL);
-}
-
-
-/*****************************************************************************
- * General Setup
- ****************************************************************************/
-
-#define LSMINI_GPIO_USB_POWER	16
-#define LSMINI_GPIO_AUTO_POWER	17
-#define LSMINI_GPIO_POWER	18
-
-#define LSMINI_GPIO_HDD_POWER0	1
-#define LSMINI_GPIO_HDD_POWER1	19
-
-static unsigned int lsmini_mpp_modes[] __initdata = {
-	MPP0_UNUSED, /* LED_RESERVE1 (unused) */
-	MPP1_GPIO, /* HDD_PWR */
-	MPP2_GPIO, /* LED_ALARM */
-	MPP3_GPIO, /* LED_INFO */
-	MPP4_UNUSED,
-	MPP5_UNUSED,
-	MPP6_UNUSED,
-	MPP7_UNUSED,
-	MPP8_UNUSED,
-	MPP9_GPIO, /* LED_FUNC */
-	MPP10_UNUSED,
-	MPP11_UNUSED, /* LED_ETH (dummy) */
-	MPP12_UNUSED,
-	MPP13_UNUSED,
-	MPP14_GPIO, /* LED_PWR */
-	MPP15_GPIO, /* FUNC */
-	MPP16_GPIO, /* USB_PWR */
-	MPP17_GPIO, /* AUTO_POWER */
-	MPP18_GPIO, /* POWER */
-	MPP19_GPIO, /* HDD_PWR1 */
-	0,
-};
-
-static void __init lsmini_init(void)
-{
-	/*
-	 * Setup basic Orion functions. Need to be called early.
-	 */
-	orion5x_init();
-
-	orion5x_mpp_conf(lsmini_mpp_modes);
-
-	/*
-	 * Configure peripherals.
-	 */
-	orion5x_ehci0_init();
-	orion5x_ehci1_init();
-	orion5x_eth_init(&lsmini_eth_data);
-	orion5x_i2c_init();
-	orion5x_sata_init(&lsmini_sata_data);
-	orion5x_uart0_init();
-	orion5x_xor_init();
-
-	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
-				    ORION_MBUS_DEVBUS_BOOT_ATTR,
-				    LSMINI_NOR_BOOT_BASE,
-				    LSMINI_NOR_BOOT_SIZE);
-	platform_device_register(&lsmini_nor_flash);
-
-	platform_device_register(&lsmini_button_device);
-
-	platform_device_register(&lsmini_leds);
-
-	i2c_register_board_info(0, &lsmini_i2c_rtc, 1);
-
-	/* enable USB power */
-	gpio_set_value(LSMINI_GPIO_USB_POWER, 1);
-
-	/* register power-off method */
-	pm_power_off = lsmini_power_off;
-
-	pr_info("%s: finished\n", __func__);
-}
-
-#ifdef CONFIG_MACH_LINKSTATION_MINI
-MACHINE_START(LINKSTATION_MINI, "Buffalo Linkstation Mini")
-	/* Maintainer: Alexey Kopytko <alexey@kopytko.ru> */
-	.atag_offset	= 0x100,
-	.init_machine	= lsmini_init,
-	.map_io		= orion5x_map_io,
-	.init_early	= orion5x_init_early,
-	.init_irq	= orion5x_init_irq,
-	.init_time	= orion5x_timer_init,
-	.fixup		= tag_fixup_mem32,
-	.restart	= orion5x_restart,
-MACHINE_END
-#endif
diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c
index 7189827..24b2959 100644
--- a/arch/arm/mach-orion5x/tsx09-common.c
+++ b/arch/arm/mach-orion5x/tsx09-common.c
@@ -101,9 +101,7 @@
 		addr[i] = byte;
 	}
 
-	printk(KERN_INFO "tsx09: found ethernet mac address ");
-	for (i = 0; i < 6; i++)
-		printk("%.2x%s", addr[i], (i < 5) ? ":" : ".\n");
+	printk(KERN_INFO "tsx09: found ethernet mac address %pM\n", addr);
 
 	memcpy(qnap_tsx09_eth_data.mac_addr, addr, 6);
 
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index d99d08e..83e94c9 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -16,6 +16,7 @@
 #include <linux/of_platform.h>
 #include <linux/io.h>
 #include <linux/rtc/sirfsoc_rtciobrg.h>
+#include <asm/outercache.h>
 #include <asm/suspend.h>
 #include <asm/hardware/cache-l2x0.h>
 
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index d897292..70366b3 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -496,18 +496,18 @@
 	.irq_unmask	= balloon3_unmask_irq,
 };
 
-static void balloon3_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void balloon3_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
 	unsigned long pending = __raw_readl(BALLOON3_INT_CONTROL_REG) &
 					balloon3_irq_enabled;
 	do {
-		/* clear useless edge notification */
-		if (desc->irq_data.chip->irq_ack) {
-			struct irq_data *d;
+		struct irq_data *d = irq_desc_get_irq_data(desc);
+		struct irq_chip *chip = irq_data_get_chip(d);
+		unsigned int irq;
 
-			d = irq_get_irq_data(BALLOON3_AUX_NIRQ);
-			desc->irq_data.chip->irq_ack(d);
-		}
+		/* clear useless edge notification */
+		if (chip->irq_ack)
+			chip->irq_ack(d);
 
 		while (pending) {
 			irq = BALLOON3_IRQ(0) + __ffs(pending);
@@ -528,7 +528,7 @@
 	for (irq = BALLOON3_IRQ(0); irq <= BALLOON3_IRQ(7); irq++) {
 		irq_set_chip_and_handler(irq, &balloon3_irq_chip,
 					 handle_level_irq);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	irq_set_chained_handler(BALLOON3_AUX_NIRQ, balloon3_irq_handler);
diff --git a/arch/arm/mach-pxa/cm-x2xx-pci.c b/arch/arm/mach-pxa/cm-x2xx-pci.c
index d8f816c..1fa79f1 100644
--- a/arch/arm/mach-pxa/cm-x2xx-pci.c
+++ b/arch/arm/mach-pxa/cm-x2xx-pci.c
@@ -29,8 +29,9 @@
 void __iomem *it8152_base_address;
 static int cmx2xx_it8152_irq_gpio;
 
-static void cmx2xx_it8152_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void cmx2xx_it8152_irq_demux(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	/* clear our parent irq */
 	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c
index 3543466..e6ce669 100644
--- a/arch/arm/mach-pxa/devices.c
+++ b/arch/arm/mach-pxa/devices.c
@@ -17,6 +17,7 @@
 #include <linux/platform_data/camera-pxa.h>
 #include <mach/audio.h>
 #include <mach/hardware.h>
+#include <linux/platform_data/mmp_dma.h>
 #include <linux/platform_data/mtd-nand-pxa3xx.h>
 
 #include "devices.h"
@@ -1193,3 +1194,39 @@
 	pd->dev.platform_data = info;
 	platform_device_add(pd);
 }
+
+static struct mmp_dma_platdata pxa_dma_pdata = {
+	.dma_channels	= 0,
+};
+
+static struct resource pxa_dma_resource[] = {
+	[0] = {
+		.start	= 0x40000000,
+		.end	= 0x4000ffff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= IRQ_DMA,
+		.end	= IRQ_DMA,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static u64 pxadma_dmamask = 0xffffffffUL;
+
+static struct platform_device pxa2xx_pxa_dma = {
+	.name		= "pxa-dma",
+	.id		= 0,
+	.dev		= {
+		.dma_mask = &pxadma_dmamask,
+		.coherent_dma_mask = 0xffffffff,
+	},
+	.num_resources	= ARRAY_SIZE(pxa_dma_resource),
+	.resource	= pxa_dma_resource,
+};
+
+void __init pxa2xx_set_dmac_info(int nb_channels)
+{
+	pxa_dma_pdata.dma_channels = nb_channels;
+	pxa_register_device(&pxa2xx_pxa_dma, &pxa_dma_pdata);
+}
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 98608c5..9c10248 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -133,7 +133,6 @@
 	irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
 				 handle_level_irq);
 	irq_set_chip_data(virq, base);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c
index eaee2c2..b070167 100644
--- a/arch/arm/mach-pxa/lpd270.c
+++ b/arch/arm/mach-pxa/lpd270.c
@@ -120,8 +120,9 @@
 	.irq_unmask	= lpd270_unmask_irq,
 };
 
-static void lpd270_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void lpd270_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq;
 	unsigned long pending;
 
 	pending = __raw_readw(LPD270_INT_STATUS) & lpd270_irq_enabled;
@@ -151,7 +152,7 @@
 	for (irq = LPD270_IRQ(2); irq <= LPD270_IRQ(4); irq++) {
 		irq_set_chip_and_handler(irq, &lpd270_irq_chip,
 					 handle_level_irq);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 	irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), lpd270_irq_handler);
 	irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index 2897da2..9a0c8af 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -284,8 +284,9 @@
 	.irq_unmask	= pcm990_unmask_irq,
 };
 
-static void pcm990_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void pcm990_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq;
 	unsigned long pending;
 
 	pending = ~pcm990_cpld_readb(PCM990_CTRL_INTSETCLR);
@@ -311,7 +312,7 @@
 	for (irq = PCM027_IRQ(0); irq <= PCM027_IRQ(3); irq++) {
 		irq_set_chip_and_handler(irq, &pcm990_irq_chip,
 					 handle_level_irq);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	/* disable all Interrupts */
diff --git a/arch/arm/mach-pxa/pxa-dt.c b/arch/arm/mach-pxa/pxa-dt.c
index 7e0e5bd..8e0e62c 100644
--- a/arch/arm/mach-pxa/pxa-dt.c
+++ b/arch/arm/mach-pxa/pxa-dt.c
@@ -19,7 +19,7 @@
 #include "generic.h"
 
 #ifdef CONFIG_PXA3xx
-static const struct of_dev_auxdata pxa3xx_auxdata_lookup[] __initconst = {
+static const struct of_dev_auxdata const pxa3xx_auxdata_lookup[] __initconst = {
 	OF_DEV_AUXDATA("mrvl,pxa-uart",		0x40100000, "pxa2xx-uart.0", NULL),
 	OF_DEV_AUXDATA("mrvl,pxa-uart",		0x40200000, "pxa2xx-uart.1", NULL),
 	OF_DEV_AUXDATA("mrvl,pxa-uart",		0x40700000, "pxa2xx-uart.2", NULL),
@@ -39,7 +39,7 @@
 			     pxa3xx_auxdata_lookup, NULL);
 }
 
-static const char *pxa3xx_dt_board_compat[] __initdata = {
+static const char *const pxa3xx_dt_board_compat[] __initconst = {
 	"marvell,pxa300",
 	"marvell,pxa310",
 	"marvell,pxa320",
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 23a90c6..1dc85ff 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -206,6 +206,7 @@
 		register_syscore_ops(&pxa_irq_syscore_ops);
 		register_syscore_ops(&pxa2xx_mfp_syscore_ops);
 
+		pxa2xx_set_dmac_info(16);
 		pxa_register_device(&pxa25x_device_gpio, &pxa25x_gpio_info);
 		ret = platform_add_devices(pxa25x_devices,
 					   ARRAY_SIZE(pxa25x_devices));
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index b5abdeb..e6aae9e 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -310,6 +310,7 @@
 		if (!of_have_populated_dt()) {
 			pxa_register_device(&pxa27x_device_gpio,
 					    &pxa27x_gpio_info);
+			pxa2xx_set_dmac_info(32);
 			ret = platform_add_devices(devices,
 						   ARRAY_SIZE(devices));
 		}
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index bd4cbef..1656384 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -325,7 +325,7 @@
 	for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) {
 		irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip,
 					 handle_edge_irq);
-		set_irq_flags(irq, IRQF_VALID);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST);
 	}
 
 	pxa_ext_wakeup_chip.irq_set_wake = fn;
@@ -431,6 +431,7 @@
 		if (of_have_populated_dt())
 			return 0;
 
+		pxa2xx_set_dmac_info(32);
 		ret = platform_add_devices(devices, ARRAY_SIZE(devices));
 		if (ret)
 			return ret;
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index 051a655..bdc0c41 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -841,11 +841,9 @@
 	sharpsl_pm.charge_mode = CHRG_OFF;
 	sharpsl_pm.flags = 0;
 
-	init_timer(&sharpsl_pm.ac_timer);
-	sharpsl_pm.ac_timer.function = sharpsl_ac_timer;
+	setup_timer(&sharpsl_pm.ac_timer, sharpsl_ac_timer, 0UL);
 
-	init_timer(&sharpsl_pm.chrg_full_timer);
-	sharpsl_pm.chrg_full_timer.function = sharpsl_chrg_full_timer;
+	setup_timer(&sharpsl_pm.chrg_full_timer, sharpsl_chrg_full_timer, 0UL);
 
 	led_trigger_register_simple("sharpsl-charge", &sharpsl_charge_led_trigger);
 
diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c
index 685deff..e0a5320 100644
--- a/arch/arm/mach-pxa/tosa-bt.c
+++ b/arch/arm/mach-pxa/tosa-bt.c
@@ -131,17 +131,4 @@
 		.name = "tosa-bt",
 	},
 };
-
-
-static int __init tosa_bt_init(void)
-{
-	return platform_driver_register(&tosa_bt_driver);
-}
-
-static void __exit tosa_bt_exit(void)
-{
-	platform_driver_unregister(&tosa_bt_driver);
-}
-
-module_init(tosa_bt_init);
-module_exit(tosa_bt_exit);
+module_platform_driver(tosa_bt_driver);
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index de3b080..4841d6c 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -276,8 +276,9 @@
 			viper_irq_enabled_mask;
 }
 
-static void viper_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void viper_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq;
 	unsigned long pending;
 
 	pending = viper_irq_pending();
@@ -313,7 +314,7 @@
 		isa_irq = viper_bit_to_irq(level);
 		irq_set_chip_and_handler(isa_irq, &viper_irq_chip,
 					 handle_edge_irq);
-		set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(isa_irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	irq_set_chained_handler(gpio_to_irq(VIPER_CPLD_GPIO),
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 6158566f..6f94dd7 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -105,8 +105,9 @@
 	return __raw_readw(ZEUS_CPLD_ISA_IRQ) & zeus_irq_enabled_mask;
 }
 
-static void zeus_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void zeus_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq;
 	unsigned long pending;
 
 	pending = zeus_irq_pending();
@@ -151,7 +152,7 @@
 		isa_irq = zeus_bit_to_irq(level);
 		irq_set_chip_and_handler(isa_irq, &zeus_irq_chip,
 					 handle_edge_irq);
-		set_irq_flags(isa_irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(isa_irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	irq_set_irq_type(gpio_to_irq(ZEUS_ISA_GPIO), IRQ_TYPE_EDGE_RISING);
diff --git a/arch/arm/mach-realview/realview-dt.c b/arch/arm/mach-realview/realview-dt.c
index cc28b89..382cc1b 100644
--- a/arch/arm/mach-realview/realview-dt.c
+++ b/arch/arm/mach-realview/realview-dt.c
@@ -13,7 +13,7 @@
 #include <asm/hardware/cache-l2x0.h>
 #include "core.h"
 
-static const char *realview_dt_platform_compat[] __initconst = {
+static const char *const realview_dt_platform_compat[] __initconst = {
 	"arm,realview-eb",
 	"arm,realview-pb1176",
 	"arm,realview-pb11mp",
diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
index 8fcec1c..3e7a4b7 100644
--- a/arch/arm/mach-rockchip/platsmp.c
+++ b/arch/arm/mach-rockchip/platsmp.c
@@ -72,29 +72,22 @@
 static int pmu_set_power_domain(int pd, bool on)
 {
 	u32 val = (on) ? 0 : BIT(pd);
+	struct reset_control *rstc = rockchip_get_core_reset(pd);
 	int ret;
 
+	if (IS_ERR(rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
+		pr_err("%s: could not get reset control for core %d\n",
+		       __func__, pd);
+		return PTR_ERR(rstc);
+	}
+
 	/*
 	 * We need to soft reset the cpu when we turn off the cpu power domain,
 	 * or else the active processors might be stalled when the individual
 	 * processor is powered down.
 	 */
-	if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
-		struct reset_control *rstc = rockchip_get_core_reset(pd);
-
-		if (IS_ERR(rstc)) {
-			pr_err("%s: could not get reset control for core %d\n",
-			       __func__, pd);
-			return PTR_ERR(rstc);
-		}
-
-		if (on)
-			reset_control_deassert(rstc);
-		else
-			reset_control_assert(rstc);
-
-		reset_control_put(rstc);
-	}
+	if (!IS_ERR(rstc) && !on)
+		reset_control_assert(rstc);
 
 	ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val);
 	if (ret < 0) {
@@ -107,11 +100,17 @@
 		ret = pmu_power_domain_is_on(pd);
 		if (ret < 0) {
 			pr_err("%s: could not read power domain state\n",
-				 __func__);
+			       __func__);
 			return ret;
 		}
 	}
 
+	if (!IS_ERR(rstc)) {
+		if (on)
+			reset_control_deassert(rstc);
+		reset_control_put(rstc);
+	}
+
 	return 0;
 }
 
@@ -130,7 +129,7 @@
 
 	if (cpu >= ncores) {
 		pr_err("%s: cpu %d outside maximum number of cpus %d\n",
-							__func__, cpu, ncores);
+		       __func__, cpu, ncores);
 		return -ENXIO;
 	}
 
@@ -140,14 +139,19 @@
 		return ret;
 
 	if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
-		/* We communicate with the bootrom to active the cpus other
+		/*
+		 * We communicate with the bootrom to active the cpus other
 		 * than cpu0, after a blob of initialize code, they will
 		 * stay at wfe state, once they are actived, they will check
 		 * the mailbox:
 		 * sram_base_addr + 4: 0xdeadbeaf
 		 * sram_base_addr + 8: start address for pc
-		 * */
-		udelay(10);
+		 * The cpu0 need to wait the other cpus other than cpu0 entering
+		 * the wfe state.The wait time is affected by many aspects.
+		 * (e.g: cpu frequency, bootrom frequency, sram frequency, ...)
+		 */
+		mdelay(1); /* ensure the cpus other than cpu0 to startup */
+
 		writel(virt_to_phys(secondary_startup), sram_base_addr + 8);
 		writel(0xDEADBEAF, sram_base_addr + 4);
 		dsb_sev();
@@ -317,6 +321,13 @@
 #ifdef CONFIG_HOTPLUG_CPU
 static int rockchip_cpu_kill(unsigned int cpu)
 {
+	/*
+	 * We need a delay here to ensure that the dying CPU can finish
+	 * executing v7_coherency_exit() and reach the WFI/WFE state
+	 * prior to having the power domain disabled.
+	 */
+	mdelay(1);
+
 	pmu_set_power_domain(0 + cpu, false);
 	return 1;
 }
@@ -324,7 +335,7 @@
 static void rockchip_cpu_die(unsigned int cpu)
 {
 	v7_exit_coherency_flush(louis);
-	while(1)
+	while (1)
 		cpu_do_idle();
 }
 #endif
@@ -337,4 +348,5 @@
 	.cpu_die		= rockchip_cpu_die,
 #endif
 };
+
 CPU_METHOD_OF_DECLARE(rk3066_smp, "rockchip,rk3066-smp", &rockchip_smp_ops);
diff --git a/arch/arm/mach-rockchip/pm.c b/arch/arm/mach-rockchip/pm.c
index b0dcbe2..bee8c80 100644
--- a/arch/arm/mach-rockchip/pm.c
+++ b/arch/arm/mach-rockchip/pm.c
@@ -45,9 +45,11 @@
 
 static struct regmap *pmu_regmap;
 static struct regmap *sgrf_regmap;
+static struct regmap *grf_regmap;
 
 static u32 rk3288_pmu_pwr_mode_con;
 static u32 rk3288_sgrf_soc_con0;
+static u32 rk3288_sgrf_cpu_con0;
 
 static inline u32 rk3288_l2_config(void)
 {
@@ -66,10 +68,37 @@
 	rkpm_bootdata_l2ctlr = rk3288_l2_config();
 }
 
+#define GRF_UOC0_CON0			0x320
+#define GRF_UOC1_CON0			0x334
+#define GRF_UOC2_CON0			0x348
+#define GRF_SIDDQ			BIT(13)
+
+static bool rk3288_slp_disable_osc(void)
+{
+	static const u32 reg_offset[] = { GRF_UOC0_CON0, GRF_UOC1_CON0,
+					  GRF_UOC2_CON0 };
+	u32 reg, i;
+
+	/*
+	 * if any usb phy is still on(GRF_SIDDQ==0), that means we need the
+	 * function of usb wakeup, so do not switch to 32khz, since the usb phy
+	 * clk does not connect to 32khz osc
+	 */
+	for (i = 0; i < ARRAY_SIZE(reg_offset); i++) {
+		regmap_read(grf_regmap, reg_offset[i], &reg);
+		if (!(reg & GRF_SIDDQ))
+			return false;
+	}
+
+	return true;
+}
+
 static void rk3288_slp_mode_set(int level)
 {
 	u32 mode_set, mode_set1;
+	bool osc_disable = rk3288_slp_disable_osc();
 
+	regmap_read(sgrf_regmap, RK3288_SGRF_CPU_CON0, &rk3288_sgrf_cpu_con0);
 	regmap_read(sgrf_regmap, RK3288_SGRF_SOC_CON0, &rk3288_sgrf_soc_con0);
 
 	regmap_read(pmu_regmap, RK3288_PMU_PWRMODE_CON,
@@ -94,9 +123,6 @@
 	regmap_write(sgrf_regmap, RK3288_SGRF_FAST_BOOT_ADDR,
 		     rk3288_bootram_phy);
 
-	regmap_write(pmu_regmap, RK3288_PMU_WAKEUP_CFG1,
-		     PMU_ARMINT_WAKEUP_EN);
-
 	mode_set = BIT(PMU_GLOBAL_INT_DISABLE) | BIT(PMU_L2FLUSH_EN) |
 		   BIT(PMU_SREF0_ENTER_EN) | BIT(PMU_SREF1_ENTER_EN) |
 		   BIT(PMU_DDR0_GATING_EN) | BIT(PMU_DDR1_GATING_EN) |
@@ -107,13 +133,31 @@
 
 	if (level == ROCKCHIP_ARM_OFF_LOGIC_DEEP) {
 		/* arm off, logic deep sleep */
-		mode_set |= BIT(PMU_BUS_PD_EN) |
+		mode_set |= BIT(PMU_BUS_PD_EN) | BIT(PMU_PMU_USE_LF) |
 			    BIT(PMU_DDR1IO_RET_EN) | BIT(PMU_DDR0IO_RET_EN) |
-			    BIT(PMU_OSC_24M_DIS) | BIT(PMU_PMU_USE_LF) |
 			    BIT(PMU_ALIVE_USE_LF) | BIT(PMU_PLL_PD_EN);
 
+		if (osc_disable)
+			mode_set |= BIT(PMU_OSC_24M_DIS);
+
 		mode_set1 |= BIT(PMU_CLR_ALIVE) | BIT(PMU_CLR_BUS) |
 			     BIT(PMU_CLR_PERI) | BIT(PMU_CLR_DMA);
+
+		regmap_write(pmu_regmap, RK3288_PMU_WAKEUP_CFG1,
+			     PMU_ARMINT_WAKEUP_EN);
+
+		/*
+		 * In deep suspend we use PMU_PMU_USE_LF to let the rk3288
+		 * switch its main clock supply to the alternative 32kHz
+		 * source. Therefore set 30ms on a 32kHz clock for pmic
+		 * stabilization. Similar 30ms on 24MHz for the other
+		 * mode below.
+		 */
+		regmap_write(pmu_regmap, RK3288_PMU_STABL_CNT, 32 * 30);
+
+		/* only wait for stabilization, if we turned the osc off */
+		regmap_write(pmu_regmap, RK3288_PMU_OSC_CNT,
+					 osc_disable ? 32 * 30 : 0);
 	} else {
 		/*
 		 * arm off, logic normal
@@ -121,6 +165,15 @@
 		 * wakeup will be error
 		 */
 		mode_set |= BIT(PMU_CLK_CORE_SRC_GATE_EN);
+
+		regmap_write(pmu_regmap, RK3288_PMU_WAKEUP_CFG1,
+			     PMU_ARMINT_WAKEUP_EN | PMU_GPIOINT_WAKEUP_EN);
+
+		/* 30ms on a 24MHz clock for pmic stabilization */
+		regmap_write(pmu_regmap, RK3288_PMU_STABL_CNT, 24000 * 30);
+
+		/* oscillator is still running, so no need to wait */
+		regmap_write(pmu_regmap, RK3288_PMU_OSC_CNT, 0);
 	}
 
 	regmap_write(pmu_regmap, RK3288_PMU_PWRMODE_CON, mode_set);
@@ -129,6 +182,9 @@
 
 static void rk3288_slp_mode_set_resume(void)
 {
+	regmap_write(sgrf_regmap, RK3288_SGRF_CPU_CON0,
+		     rk3288_sgrf_cpu_con0 | SGRF_DAPDEVICEEN_WRITE);
+
 	regmap_write(pmu_regmap, RK3288_PMU_PWRMODE_CON,
 		     rk3288_pmu_pwr_mode_con);
 
@@ -190,7 +246,14 @@
 				"rockchip,rk3288-sgrf");
 	if (IS_ERR(sgrf_regmap)) {
 		pr_err("%s: could not find sgrf regmap\n", __func__);
-		return PTR_ERR(pmu_regmap);
+		return PTR_ERR(sgrf_regmap);
+	}
+
+	grf_regmap = syscon_regmap_lookup_by_compatible(
+				"rockchip,rk3288-grf");
+	if (IS_ERR(grf_regmap)) {
+		pr_err("%s: could not find grf regmap\n", __func__);
+		return PTR_ERR(grf_regmap);
 	}
 
 	sram_np = of_find_compatible_node(NULL, NULL,
@@ -221,9 +284,6 @@
 	memcpy(rk3288_bootram_base, rockchip_slp_cpu_resume,
 	       rk3288_bootram_sz);
 
-	regmap_write(pmu_regmap, RK3288_PMU_OSC_CNT, OSC_STABL_CNT_THRESH);
-	regmap_write(pmu_regmap, RK3288_PMU_STABL_CNT, PMU_STABL_CNT_THRESH);
-
 	return 0;
 }
 
diff --git a/arch/arm/mach-rockchip/pm.h b/arch/arm/mach-rockchip/pm.h
index 3e8d39c..b5af26f 100644
--- a/arch/arm/mach-rockchip/pm.h
+++ b/arch/arm/mach-rockchip/pm.h
@@ -59,19 +59,9 @@
 #define SGRF_DAPDEVICEEN		BIT(0)
 #define SGRF_DAPDEVICEEN_WRITE		BIT(16)
 
-#define RK3288_CRU_MODE_CON		0x50
-#define RK3288_CRU_SEL0_CON		0x60
-#define RK3288_CRU_SEL1_CON		0x64
-#define RK3288_CRU_SEL10_CON		0x88
-#define RK3288_CRU_SEL33_CON		0xe4
-#define RK3288_CRU_SEL37_CON		0xf4
-
 /* PMU_WAKEUP_CFG1 bits */
 #define PMU_ARMINT_WAKEUP_EN		BIT(0)
-
-/* wait 30ms for OSC stable and 30ms for pmic stable */
-#define OSC_STABL_CNT_THRESH	(32 * 30)
-#define PMU_STABL_CNT_THRESH	(32 * 30)
+#define PMU_GPIOINT_WAKEUP_EN		BIT(3)
 
 enum rk3288_pwr_mode_con {
 	PMU_PWR_MODE_EN = 0,
diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c
index fcb1d59..f726d4c 100644
--- a/arch/arm/mach-rpc/ecard.c
+++ b/arch/arm/mach-rpc/ecard.c
@@ -946,7 +946,7 @@
 		irq_set_chip_and_handler(ec->irq, &ecard_chip,
 					 handle_level_irq);
 		irq_set_chip_data(ec->irq, ec);
-		set_irq_flags(ec->irq, IRQF_VALID);
+		irq_clear_status_flags(ec->irq, IRQ_NOREQUEST);
 	}
 
 #ifdef CONFIG_ARCH_RPC
diff --git a/arch/arm/mach-rpc/irq.c b/arch/arm/mach-rpc/irq.c
index 3e4fa84..66502e6 100644
--- a/arch/arm/mach-rpc/irq.c
+++ b/arch/arm/mach-rpc/irq.c
@@ -117,7 +117,7 @@
 
 void __init rpc_init_irq(void)
 {
-	unsigned int irq, flags;
+	unsigned int irq, clr, set = 0;
 
 	iomd_writeb(0, IOMD_IRQMASKA);
 	iomd_writeb(0, IOMD_IRQMASKB);
@@ -128,37 +128,37 @@
 		&rpc_default_fiq_end - &rpc_default_fiq_start);
 
 	for (irq = 0; irq < NR_IRQS; irq++) {
-		flags = IRQF_VALID;
+		clr = IRQ_NOREQUEST;
 
 		if (irq <= 6 || (irq >= 9 && irq <= 15))
-			flags |= IRQF_PROBE;
+			clr |= IRQ_NOPROBE;
 
 		if (irq == 21 || (irq >= 16 && irq <= 19) ||
 		    irq == IRQ_KEYBOARDTX)
-			flags |= IRQF_NOAUTOEN;
+			set |= IRQ_NOAUTOEN;
 
 		switch (irq) {
 		case 0 ... 7:
 			irq_set_chip_and_handler(irq, &iomd_a_chip,
 						 handle_level_irq);
-			set_irq_flags(irq, flags);
+			irq_modify_status(irq, clr, set);
 			break;
 
 		case 8 ... 15:
 			irq_set_chip_and_handler(irq, &iomd_b_chip,
 						 handle_level_irq);
-			set_irq_flags(irq, flags);
+			irq_modify_status(irq, clr, set);
 			break;
 
 		case 16 ... 21:
 			irq_set_chip_and_handler(irq, &iomd_dma_chip,
 						 handle_level_irq);
-			set_irq_flags(irq, flags);
+			irq_modify_status(irq, clr, set);
 			break;
 
 		case 64 ... 71:
 			irq_set_chip(irq, &iomd_fiq_chip);
-			set_irq_flags(irq, IRQF_VALID);
+			irq_modify_status(irq, clr, set);
 			break;
 		}
 	}
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index 23bec3a..ef68ecb 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -124,6 +124,11 @@
 	  This also means that the PLL tables for the selected CPU(s) will
 	  be built which may increase the size of the kernel image.
 
+config S3C_SETUP_CAMIF
+	bool
+	help
+	  Compile in common setup code for S3C CAMIF devices
+
 # cpu frequency items common between s3c2410 and s3c2440/s3c2442
 
 config S3C2410_IOTIMING
diff --git a/arch/arm/mach-s3c24xx/Makefile b/arch/arm/mach-s3c24xx/Makefile
index 05920c8..8ac2f58 100644
--- a/arch/arm/mach-s3c24xx/Makefile
+++ b/arch/arm/mach-s3c24xx/Makefile
@@ -99,3 +99,4 @@
 obj-$(CONFIG_S3C2443_SETUP_SPI)		+= setup-spi.o
 obj-$(CONFIG_ARCH_S3C24XX)		+= setup-i2c.o
 obj-$(CONFIG_S3C24XX_SETUP_TS)		+= setup-ts.o
+obj-$(CONFIG_S3C_SETUP_CAMIF)		+= setup-camif.o
diff --git a/arch/arm/mach-s3c24xx/bast-irq.c b/arch/arm/mach-s3c24xx/bast-irq.c
index cb1b791..ced1ab86 100644
--- a/arch/arm/mach-s3c24xx/bast-irq.c
+++ b/arch/arm/mach-s3c24xx/bast-irq.c
@@ -147,7 +147,7 @@
 
 			irq_set_chip_and_handler(irqno, &bast_pc104_chip,
 						 handle_level_irq);
-			set_irq_flags(irqno, IRQF_VALID);
+			irq_clear_status_flags(irqno, IRQ_NOREQUEST);
 		}
 	}
 
diff --git a/arch/arm/mach-s3c24xx/fb-core.h b/arch/arm/mach-s3c24xx/fb-core.h
new file mode 100644
index 0000000..103bdba
--- /dev/null
+++ b/arch/arm/mach-s3c24xx/fb-core.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2010 Samsung Electronics Co., Ltd.
+ *	Pawel Osciak <p.osciak@samsung.com>
+ *
+ * Samsung framebuffer driver core functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_PLAT_FB_CORE_H
+#define __ASM_PLAT_FB_CORE_H __FILE__
+
+/*
+ * These functions are only for use with the core support code, such as
+ * the CPU-specific initialization code.
+ */
+
+/* Re-define device name depending on support. */
+static inline void s3c_fb_setname(char *name)
+{
+#ifdef CONFIG_S3C_DEV_FB
+	s3c_device_fb.name = name;
+#endif
+}
+
+#endif /* __ASM_PLAT_FB_CORE_H */
diff --git a/arch/arm/mach-s3c24xx/mach-s3c2416-dt.c b/arch/arm/mach-s3c24xx/mach-s3c2416-dt.c
index f886478..5f028ff 100644
--- a/arch/arm/mach-s3c24xx/mach-s3c2416-dt.c
+++ b/arch/arm/mach-s3c24xx/mach-s3c2416-dt.c
@@ -39,7 +39,7 @@
 	s3c_pm_init();
 }
 
-static char const *s3c2416_dt_compat[] __initdata = {
+static const char *const s3c2416_dt_compat[] __initconst = {
 	"samsung,s3c2416",
 	"samsung,s3c2450",
 	NULL
diff --git a/arch/arm/mach-s3c24xx/nand-core.h b/arch/arm/mach-s3c24xx/nand-core.h
new file mode 100644
index 0000000..7e811fe
--- /dev/null
+++ b/arch/arm/mach-s3c24xx/nand-core.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com/
+ *
+ * S3C -  Nand Controller core functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_NAND_CORE_H
+#define __ASM_ARCH_NAND_CORE_H __FILE__
+
+/* These functions are only for use with the core support code, such as
+ * the cpu specific initialisation code
+ */
+
+/* re-define device name depending on support. */
+static inline void s3c_nand_setname(char *name)
+{
+#ifdef CONFIG_S3C_DEV_NAND
+	s3c_device_nand.name = name;
+#endif
+}
+
+#endif /* __ASM_ARCH_NAND_CORE_H */
diff --git a/arch/arm/mach-s3c24xx/s3c2412.c b/arch/arm/mach-s3c24xx/s3c2412.c
index 64a1360..fb5ee8d 100644
--- a/arch/arm/mach-s3c24xx/s3c2412.c
+++ b/arch/arm/mach-s3c24xx/s3c2412.c
@@ -40,11 +40,11 @@
 #include <plat/cpu.h>
 #include <plat/cpu-freq.h>
 #include <plat/devs.h>
-#include <plat/nand-core.h>
 #include <plat/pm.h>
 #include <plat/regs-spi.h>
 
 #include "common.h"
+#include "nand-core.h"
 #include "regs-dsc.h"
 #include "s3c2412-power.h"
 
diff --git a/arch/arm/mach-s3c24xx/s3c2416.c b/arch/arm/mach-s3c24xx/s3c2416.c
index 3f8ca2a..621b864 100644
--- a/arch/arm/mach-s3c24xx/s3c2416.c
+++ b/arch/arm/mach-s3c24xx/s3c2416.c
@@ -59,12 +59,12 @@
 #include <plat/pm.h>
 
 #include <plat/iic-core.h>
-#include <plat/fb-core.h>
-#include <plat/nand-core.h>
 #include <plat/adc-core.h>
-#include <plat/spi-core.h>
 
 #include "common.h"
+#include "fb-core.h"
+#include "nand-core.h"
+#include "spi-core.h"
 
 static struct map_desc s3c2416_iodesc[] __initdata = {
 	IODESC_ENT(WATCHDOG),
diff --git a/arch/arm/mach-s3c24xx/s3c2443.c b/arch/arm/mach-s3c24xx/s3c2443.c
index 87b6b89..b559d37 100644
--- a/arch/arm/mach-s3c24xx/s3c2443.c
+++ b/arch/arm/mach-s3c24xx/s3c2443.c
@@ -41,10 +41,11 @@
 #include <plat/gpio-cfg-helpers.h>
 #include <plat/devs.h>
 #include <plat/cpu.h>
-#include <plat/fb-core.h>
-#include <plat/nand-core.h>
 #include <plat/adc-core.h>
-#include <plat/spi-core.h>
+
+#include "fb-core.h"
+#include "nand-core.h"
+#include "spi-core.h"
 
 static struct map_desc s3c2443_iodesc[] __initdata = {
 	IODESC_ENT(WATCHDOG),
diff --git a/arch/arm/mach-s3c24xx/s3c244x.c b/arch/arm/mach-s3c24xx/s3c244x.c
index b141195..31fd273 100644
--- a/arch/arm/mach-s3c24xx/s3c244x.c
+++ b/arch/arm/mach-s3c24xx/s3c244x.c
@@ -41,9 +41,9 @@
 #include <plat/devs.h>
 #include <plat/cpu.h>
 #include <plat/pm.h>
-#include <plat/nand-core.h>
 
 #include "common.h"
+#include "nand-core.h"
 #include "regs-dsc.h"
 
 static struct map_desc s3c244x_iodesc[] __initdata = {
diff --git a/arch/arm/plat-samsung/setup-camif.c b/arch/arm/mach-s3c24xx/setup-camif.c
similarity index 100%
rename from arch/arm/plat-samsung/setup-camif.c
rename to arch/arm/mach-s3c24xx/setup-camif.c
diff --git a/arch/arm/plat-samsung/include/plat/spi-core.h b/arch/arm/mach-s3c24xx/spi-core.h
similarity index 100%
rename from arch/arm/plat-samsung/include/plat/spi-core.h
rename to arch/arm/mach-s3c24xx/spi-core.h
diff --git a/arch/arm/mach-s3c64xx/Kconfig b/arch/arm/mach-s3c64xx/Kconfig
index eff95e9..28c7097 100644
--- a/arch/arm/mach-s3c64xx/Kconfig
+++ b/arch/arm/mach-s3c64xx/Kconfig
@@ -34,6 +34,12 @@
 	help
 	  Compile in platform device definition for OneNAND1 controller
 
+config SAMSUNG_DEV_BACKLIGHT
+	bool
+	depends on SAMSUNG_DEV_PWM
+	help
+	  Compile in platform device definition LCD backlight with PWM Timer
+
 # platform specific device setup
 
 config S3C64XX_SETUP_I2C0
diff --git a/arch/arm/mach-s3c64xx/Makefile b/arch/arm/mach-s3c64xx/Makefile
index 17f4b07..bb233f3 100644
--- a/arch/arm/mach-s3c64xx/Makefile
+++ b/arch/arm/mach-s3c64xx/Makefile
@@ -40,6 +40,8 @@
 obj-$(CONFIG_S3C64XX_SETUP_SPI)		+= setup-spi.o
 obj-$(CONFIG_S3C64XX_SETUP_USB_PHY) += setup-usb-phy.o
 
+obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT)	+= dev-backlight.o
+
 # Machine support
 
 obj-$(CONFIG_MACH_ANW6410)		+= mach-anw6410.o
diff --git a/arch/arm/mach-s3c64xx/ata-core.h b/arch/arm/mach-s3c64xx/ata-core.h
new file mode 100644
index 0000000..5951f24
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/ata-core.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2010 Samsung Electronics Co., Ltd.
+ *		http://www.samsung.com
+ *
+ * Samsung CF-ATA Controller core functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_PLAT_ATA_CORE_H
+#define __ASM_PLAT_ATA_CORE_H __FILE__
+
+/* These functions are only for use with the core support code, such as
+ * the cpu specific initialisation code
+*/
+
+/* re-define device name depending on support. */
+static inline void s3c_cfcon_setname(char *name)
+{
+#ifdef CONFIG_SAMSUNG_DEV_IDE
+	s3c_device_cfcon.name = name;
+#endif
+}
+
+#endif /* __ASM_PLAT_ATA_CORE_H */
diff --git a/arch/arm/mach-s3c64xx/backlight.h b/arch/arm/mach-s3c64xx/backlight.h
new file mode 100644
index 0000000..8dcacac
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/backlight.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *              http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_PLAT_BACKLIGHT_H
+#define __ASM_PLAT_BACKLIGHT_H __FILE__
+
+/* samsung_bl_gpio_info - GPIO info for PWM Backlight control
+ * @no:		GPIO number for PWM timer out
+ * @func:	Special function of GPIO line for PWM timer
+ */
+struct samsung_bl_gpio_info {
+	int no;
+	int func;
+};
+
+extern void __init samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
+	struct platform_pwm_backlight_data *bl_data);
+
+#endif /* __ASM_PLAT_BACKLIGHT_H */
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c
index 16547f2..fd63ecf 100644
--- a/arch/arm/mach-s3c64xx/common.c
+++ b/arch/arm/mach-s3c64xx/common.c
@@ -21,7 +21,6 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/module.h>
-#include <linux/clk-provider.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/serial_core.h>
@@ -48,12 +47,12 @@
 #include <plat/devs.h>
 #include <plat/pm.h>
 #include <plat/gpio-cfg.h>
-#include <plat/irq-uart.h>
 #include <plat/pwm-core.h>
 #include <plat/regs-irqtype.h>
-#include <plat/watchdog-reset.h>
 
 #include "common.h"
+#include "irq-uart.h"
+#include "watchdog-reset.h"
 
 /* External clock frequency */
 static unsigned long xtal_f = 12000000, xusbxti_f = 48000000;
@@ -420,7 +419,7 @@
 	for (irq = IRQ_EINT(0); irq <= IRQ_EINT(27); irq++) {
 		irq_set_chip_and_handler(irq, &s3c_irq_eint, handle_level_irq);
 		irq_set_chip_data(irq, (void *)eint_irq_to_bit(irq));
-		set_irq_flags(irq, IRQF_VALID);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST);
 	}
 
 	irq_set_chained_handler(IRQ_EINT0_3, s3c_irq_demux_eint0_3);
diff --git a/arch/arm/mach-s3c64xx/dev-backlight.c b/arch/arm/mach-s3c64xx/dev-backlight.c
new file mode 100644
index 0000000..38c323e
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/dev-backlight.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *              http://www.samsung.com
+ *
+ * Common infrastructure for PWM Backlight for Samsung boards
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/pwm_backlight.h>
+
+#include <plat/devs.h>
+#include <plat/gpio-cfg.h>
+
+#include "backlight.h"
+
+struct samsung_bl_drvdata {
+	struct platform_pwm_backlight_data plat_data;
+	struct samsung_bl_gpio_info *gpio_info;
+};
+
+static int samsung_bl_init(struct device *dev)
+{
+	int ret = 0;
+	struct platform_pwm_backlight_data *pdata = dev->platform_data;
+	struct samsung_bl_drvdata *drvdata = container_of(pdata,
+					struct samsung_bl_drvdata, plat_data);
+	struct samsung_bl_gpio_info *bl_gpio_info = drvdata->gpio_info;
+
+	ret = gpio_request(bl_gpio_info->no, "Backlight");
+	if (ret) {
+		printk(KERN_ERR "failed to request GPIO for LCD Backlight\n");
+		return ret;
+	}
+
+	/* Configure GPIO pin with specific GPIO function for PWM timer */
+	s3c_gpio_cfgpin(bl_gpio_info->no, bl_gpio_info->func);
+
+	return 0;
+}
+
+static void samsung_bl_exit(struct device *dev)
+{
+	struct platform_pwm_backlight_data *pdata = dev->platform_data;
+	struct samsung_bl_drvdata *drvdata = container_of(pdata,
+					struct samsung_bl_drvdata, plat_data);
+	struct samsung_bl_gpio_info *bl_gpio_info = drvdata->gpio_info;
+
+	s3c_gpio_cfgpin(bl_gpio_info->no, S3C_GPIO_OUTPUT);
+	gpio_free(bl_gpio_info->no);
+}
+
+/* Initialize few important fields of platform_pwm_backlight_data
+ * structure with default values. These fields can be overridden by
+ * board-specific values sent from machine file.
+ * For ease of operation, these fields are initialized with values
+ * used by most samsung boards.
+ * Users has the option of sending info about other parameters
+ * for their specific boards
+ */
+
+static struct samsung_bl_drvdata samsung_dfl_bl_data __initdata = {
+	.plat_data = {
+		.max_brightness = 255,
+		.dft_brightness = 255,
+		.pwm_period_ns  = 78770,
+		.enable_gpio    = -1,
+		.init           = samsung_bl_init,
+		.exit           = samsung_bl_exit,
+	},
+};
+
+static struct platform_device samsung_dfl_bl_device __initdata = {
+	.name		= "pwm-backlight",
+};
+
+/* samsung_bl_set - Set board specific data (if any) provided by user for
+ * PWM Backlight control and register specific PWM and backlight device.
+ * @gpio_info:	structure containing GPIO info for PWM timer
+ * @bl_data:	structure containing Backlight control data
+ */
+void __init samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
+	struct platform_pwm_backlight_data *bl_data)
+{
+	int ret = 0;
+	struct platform_device *samsung_bl_device;
+	struct samsung_bl_drvdata *samsung_bl_drvdata;
+	struct platform_pwm_backlight_data *samsung_bl_data;
+
+	samsung_bl_device = kmemdup(&samsung_dfl_bl_device,
+			sizeof(struct platform_device), GFP_KERNEL);
+	if (!samsung_bl_device) {
+		printk(KERN_ERR "%s: no memory for platform dev\n", __func__);
+		return;
+	}
+
+	samsung_bl_drvdata = kmemdup(&samsung_dfl_bl_data,
+				sizeof(samsung_dfl_bl_data), GFP_KERNEL);
+	if (!samsung_bl_drvdata) {
+		printk(KERN_ERR "%s: no memory for platform dev\n", __func__);
+		goto err_data;
+	}
+	samsung_bl_device->dev.platform_data = &samsung_bl_drvdata->plat_data;
+	samsung_bl_drvdata->gpio_info = gpio_info;
+	samsung_bl_data = &samsung_bl_drvdata->plat_data;
+
+	/* Copy board specific data provided by user */
+	samsung_bl_data->pwm_id = bl_data->pwm_id;
+	samsung_bl_device->dev.parent = &samsung_device_pwm.dev;
+
+	if (bl_data->max_brightness)
+		samsung_bl_data->max_brightness = bl_data->max_brightness;
+	if (bl_data->dft_brightness)
+		samsung_bl_data->dft_brightness = bl_data->dft_brightness;
+	if (bl_data->lth_brightness)
+		samsung_bl_data->lth_brightness = bl_data->lth_brightness;
+	if (bl_data->pwm_period_ns)
+		samsung_bl_data->pwm_period_ns = bl_data->pwm_period_ns;
+	if (bl_data->enable_gpio >= 0)
+		samsung_bl_data->enable_gpio = bl_data->enable_gpio;
+	if (bl_data->init)
+		samsung_bl_data->init = bl_data->init;
+	if (bl_data->notify)
+		samsung_bl_data->notify = bl_data->notify;
+	if (bl_data->notify_after)
+		samsung_bl_data->notify_after = bl_data->notify_after;
+	if (bl_data->exit)
+		samsung_bl_data->exit = bl_data->exit;
+	if (bl_data->check_fb)
+		samsung_bl_data->check_fb = bl_data->check_fb;
+
+	/* Register the Backlight dev */
+	ret = platform_device_register(samsung_bl_device);
+	if (ret) {
+		printk(KERN_ERR "failed to register backlight device: %d\n", ret);
+		goto err_plat_reg2;
+	}
+
+	return;
+
+err_plat_reg2:
+	kfree(samsung_bl_data);
+err_data:
+	kfree(samsung_bl_device);
+	return;
+}
diff --git a/arch/arm/mach-s3c64xx/irq-uart.h b/arch/arm/mach-s3c64xx/irq-uart.h
new file mode 100644
index 0000000..4b29613
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/irq-uart.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2010 Simtec Electronics
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ * Header file for Samsung SoC UART IRQ demux for S3C64XX and later
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+struct s3c_uart_irq {
+	void __iomem	*regs;
+	unsigned int	 base_irq;
+	unsigned int	 parent_irq;
+};
+
+extern void s3c_init_uart_irqs(struct s3c_uart_irq *irq, unsigned int nr_irqs);
+
diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
index 2fddf38..bbf74ed 100644
--- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
+++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
@@ -15,11 +15,10 @@
 #include <asm/system_misc.h>
 
 #include <plat/cpu.h>
-#include <plat/watchdog-reset.h>
-
 #include <mach/map.h>
 
 #include "common.h"
+#include "watchdog-reset.h"
 
 /*
  * IO mapping for shared system controller IP.
@@ -61,7 +60,7 @@
 	soft_restart(0);
 }
 
-static char const *s3c64xx_dt_compat[] __initdata = {
+static const char *const s3c64xx_dt_compat[] __initconst = {
 	"samsung,s3c6400",
 	"samsung,s3c6410",
 	NULL
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index b7447a9..d590b88 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -68,9 +68,9 @@
 #include <plat/adc.h>
 #include <linux/platform_data/touchscreen-s3c2410.h>
 #include <plat/keypad.h>
-#include <plat/backlight.h>
 #include <plat/samsung-time.h>
 
+#include "backlight.h"
 #include "common.h"
 #include "regs-modem.h"
 #include "regs-srom.h"
diff --git a/arch/arm/mach-s3c64xx/onenand-core.h b/arch/arm/mach-s3c64xx/onenand-core.h
new file mode 100644
index 0000000..925eb13
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/onenand-core.h
@@ -0,0 +1,35 @@
+/*
+ *  Copyright (c) 2010 Samsung Electronics
+ *  Kyungmin Park <kyungmin.park@samsung.com>
+ *  Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * Samsung OneNAD Controller core functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __ASM_ARCH_ONENAND_CORE_H
+#define __ASM_ARCH_ONENAND_CORE_H __FILE__
+
+/* These functions are only for use with the core support code, such as
+ * the cpu specific initialisation code
+ */
+
+/* re-define device name depending on support. */
+static inline void s3c_onenand_setname(char *name)
+{
+#ifdef CONFIG_S3C_DEV_ONENAND
+	s3c_device_onenand.name = name;
+#endif
+}
+
+static inline void s3c64xx_onenand1_setname(char *name)
+{
+#ifdef CONFIG_S3C64XX_DEV_ONENAND1
+	s3c64xx_device_onenand1.name = name;
+#endif
+}
+
+#endif /* __ASM_ARCH_ONENAND_CORE_H */
diff --git a/arch/arm/mach-s3c64xx/regs-usb-hsotg-phy.h b/arch/arm/mach-s3c64xx/regs-usb-hsotg-phy.h
new file mode 100644
index 0000000..eae3c31
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/regs-usb-hsotg-phy.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ *      http://armlinux.simtec.co.uk/
+ *      Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C - USB2.0 Highspeed/OtG device PHY registers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* Note, this is a separate header file as some of the clock framework
+ * needs to touch this if the clk_48m is used as the USB OHCI or other
+ * peripheral source.
+*/
+
+#ifndef __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H
+#define __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H __FILE__
+
+/* S3C64XX_PA_USB_HSPHY */
+
+#define S3C_HSOTG_PHYREG(x)	((x) + S3C_VA_USB_HSPHY)
+
+#define S3C_PHYPWR				S3C_HSOTG_PHYREG(0x00)
+#define S3C_PHYPWR_NORMAL_MASK			(0x19 << 0)
+#define S3C_PHYPWR_OTG_DISABLE			(1 << 4)
+#define S3C_PHYPWR_ANALOG_POWERDOWN		(1 << 3)
+#define SRC_PHYPWR_FORCE_SUSPEND		(1 << 1)
+
+#define S3C_PHYCLK				S3C_HSOTG_PHYREG(0x04)
+#define S3C_PHYCLK_MODE_USB11			(1 << 6)
+#define S3C_PHYCLK_EXT_OSC			(1 << 5)
+#define S3C_PHYCLK_CLK_FORCE			(1 << 4)
+#define S3C_PHYCLK_ID_PULL			(1 << 2)
+#define S3C_PHYCLK_CLKSEL_MASK			(0x3 << 0)
+#define S3C_PHYCLK_CLKSEL_SHIFT			(0)
+#define S3C_PHYCLK_CLKSEL_48M			(0x0 << 0)
+#define S3C_PHYCLK_CLKSEL_12M			(0x2 << 0)
+#define S3C_PHYCLK_CLKSEL_24M			(0x3 << 0)
+
+#define S3C_RSTCON				S3C_HSOTG_PHYREG(0x08)
+#define S3C_RSTCON_PHYCLK			(1 << 2)
+#define S3C_RSTCON_HCLK				(1 << 1)
+#define S3C_RSTCON_PHY				(1 << 0)
+
+#define S3C_PHYTUNE				S3C_HSOTG_PHYREG(0x20)
+
+#endif /* __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H */
diff --git a/arch/arm/mach-s3c64xx/s3c6400.c b/arch/arm/mach-s3c64xx/s3c6400.c
index 1ce48c5..33273ab 100644
--- a/arch/arm/mach-s3c64xx/s3c6400.c
+++ b/arch/arm/mach-s3c64xx/s3c6400.c
@@ -41,9 +41,9 @@
 #include <plat/devs.h>
 #include <plat/sdhci.h>
 #include <plat/iic-core.h>
-#include <plat/onenand-core.h>
 
 #include "common.h"
+#include "onenand-core.h"
 
 void __init s3c6400_map_io(void)
 {
diff --git a/arch/arm/mach-s3c64xx/s3c6410.c b/arch/arm/mach-s3c64xx/s3c6410.c
index b2a7930..eadc48d 100644
--- a/arch/arm/mach-s3c64xx/s3c6410.c
+++ b/arch/arm/mach-s3c64xx/s3c6410.c
@@ -41,12 +41,12 @@
 #include <plat/cpu.h>
 #include <plat/devs.h>
 #include <plat/sdhci.h>
-#include <plat/ata-core.h>
 #include <plat/adc-core.h>
 #include <plat/iic-core.h>
-#include <plat/onenand-core.h>
 
+#include "ata-core.h"
 #include "common.h"
+#include "onenand-core.h"
 
 void __init s3c6410_map_io(void)
 {
diff --git a/arch/arm/mach-s3c64xx/setup-usb-phy.c b/arch/arm/mach-s3c64xx/setup-usb-phy.c
index ca960bd..2b17b7f 100644
--- a/arch/arm/mach-s3c64xx/setup-usb-phy.c
+++ b/arch/arm/mach-s3c64xx/setup-usb-phy.c
@@ -16,10 +16,10 @@
 #include <linux/platform_device.h>
 #include <mach/map.h>
 #include <plat/cpu.h>
-#include <plat/regs-usb-hsotg-phy.h>
 #include <plat/usb-phy.h>
 
 #include "regs-sys.h"
+#include "regs-usb-hsotg-phy.h"
 
 static int s3c_usb_otgphy_init(struct platform_device *pdev)
 {
diff --git a/arch/arm/mach-s3c64xx/watchdog-reset.h b/arch/arm/mach-s3c64xx/watchdog-reset.h
new file mode 100644
index 0000000..42707df
--- /dev/null
+++ b/arch/arm/mach-s3c64xx/watchdog-reset.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2008 Simtec Electronics
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C2410 - System define for arch_reset() function
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __PLAT_SAMSUNG_WATCHDOG_RESET_H
+#define __PLAT_SAMSUNG_WATCHDOG_RESET_H
+
+extern void samsung_wdt_reset(void);
+extern void samsung_wdt_reset_of_init(void);
+extern void samsung_wdt_reset_init(void __iomem *base);
+
+#endif /* __PLAT_SAMSUNG_WATCHDOG_RESET_H */
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 99d9a3b..6d237b4 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -320,10 +320,10 @@
 
 	irq_set_chip_and_handler(d->irq_base + NEP_IRQ_SMC91X, &nochip,
 		handle_simple_irq);
-	set_irq_flags(d->irq_base + NEP_IRQ_SMC91X, IRQF_VALID | IRQF_PROBE);
+	irq_clear_status_flags(d->irq_base + NEP_IRQ_SMC91X, IRQ_NOREQUEST | IRQ_NOPROBE);
 	irq_set_chip_and_handler(d->irq_base + NEP_IRQ_USAR, &nochip,
 		handle_simple_irq);
-	set_irq_flags(d->irq_base + NEP_IRQ_USAR, IRQF_VALID | IRQF_PROBE);
+	irq_clear_status_flags(d->irq_base + NEP_IRQ_USAR, IRQ_NOREQUEST | IRQ_NOPROBE);
 	irq_set_chip(d->irq_base + NEP_IRQ_SA1111, &nochip);
 
 	irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 4500647..926e336 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -4,6 +4,7 @@
 
 config PM_RCAR
 	bool
+	select PM_GENERIC_DOMAINS if PM
 
 config PM_RMOBILE
 	bool
@@ -50,6 +51,7 @@
 
 config ARCH_R7S72100
 	bool "RZ/A1H (R7S72100)"
+	select PM_GENERIC_DOMAINS if PM
 	select SYS_SUPPORTS_SH_MTU2
 
 config ARCH_R8A73A4
@@ -80,6 +82,11 @@
 	select ARCH_RCAR_GEN2
 	select I2C
 
+config ARCH_R8A7793
+	bool "R-Car M2-N (R8A7793)"
+	select ARCH_RCAR_GEN2
+	select I2C
+
 config ARCH_R8A7794
 	bool "R-Car E2 (R8A77940)"
 	select ARCH_RCAR_GEN2
@@ -89,13 +96,6 @@
 	select ARCH_RMOBILE
 	select RENESAS_INTC_IRQPIN
 
-comment "Renesas ARM SoCs Board Type"
-
-config MACH_MARZEN
-	bool "MARZEN board"
-	depends on ARCH_R8A7779
-	select REGULATOR_FIXED_VOLTAGE if REGULATOR
-
 comment "Renesas ARM SoCs System Configuration"
 endif
 
@@ -103,22 +103,6 @@
 
 comment "Renesas ARM SoCs System Type"
 
-config ARCH_SH73A0
-	bool "SH-Mobile AG5 (R8A73A00)"
-	select ARCH_RMOBILE
-	select ARCH_WANT_OPTIONAL_GPIOLIB
-	select ARM_GIC
-	select I2C
-	select SH_INTC
-	select RENESAS_INTC_IRQPIN
-
-config ARCH_R8A7740
-	bool "R-Mobile A1 (R8A77400)"
-	select ARCH_RMOBILE
-	select ARCH_WANT_OPTIONAL_GPIOLIB
-	select ARM_GIC
-	select RENESAS_INTC_IRQPIN
-
 config ARCH_R8A7778
 	bool "R-Car M1A (R8A77781)"
 	select ARCH_RCAR_GEN1
@@ -133,15 +117,6 @@
 
 comment "Renesas ARM SoCs Board Type"
 
-config MACH_ARMADILLO800EVA
-	bool "Armadillo-800 EVA board"
-	depends on ARCH_R8A7740
-	select ARCH_REQUIRE_GPIOLIB
-	select REGULATOR_FIXED_VOLTAGE if REGULATOR
-	select SMSC_PHY if SH_ETH
-	select SND_SOC_WM8978 if SND_SIMPLE_CARD && I2C
-	select USE_OF
-
 config MACH_BOCKW
 	bool "BOCK-W platform"
 	depends on ARCH_R8A7778
@@ -164,21 +139,6 @@
 
 	   This is intended to aid developers
 
-config MACH_MARZEN
-	bool "MARZEN board"
-	depends on ARCH_R8A7779
-	select ARCH_REQUIRE_GPIOLIB
-	select REGULATOR_FIXED_VOLTAGE if REGULATOR
-	select USE_OF
-
-config MACH_KZM9G
-	bool "KZM-A9-GT board"
-	depends on ARCH_SH73A0
-	select ARCH_REQUIRE_GPIOLIB
-	select REGULATOR_FIXED_VOLTAGE if REGULATOR
-	select SND_SOC_AK4642 if SND_SIMPLE_CARD
-	select USE_OF
-
 comment "Renesas ARM SoCs System Configuration"
 
 config CPU_HAS_INTEVT
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index 89e463d..476de30 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -6,13 +6,14 @@
 obj-y				:= timer.o console.o
 
 # CPU objects
-obj-$(CONFIG_ARCH_SH73A0)	+= setup-sh73a0.o pm-sh73a0.o
+obj-$(CONFIG_ARCH_SH73A0)	+= setup-sh73a0.o
 obj-$(CONFIG_ARCH_R8A73A4)	+= setup-r8a73a4.o
-obj-$(CONFIG_ARCH_R8A7740)	+= setup-r8a7740.o pm-r8a7740.o
+obj-$(CONFIG_ARCH_R8A7740)	+= setup-r8a7740.o
 obj-$(CONFIG_ARCH_R8A7778)	+= setup-r8a7778.o
 obj-$(CONFIG_ARCH_R8A7779)	+= setup-r8a7779.o pm-r8a7779.o
 obj-$(CONFIG_ARCH_R8A7790)	+= setup-r8a7790.o
 obj-$(CONFIG_ARCH_R8A7791)	+= setup-r8a7791.o
+obj-$(CONFIG_ARCH_R8A7793)	+= setup-r8a7793.o
 obj-$(CONFIG_ARCH_R8A7794)	+= setup-r8a7794.o
 obj-$(CONFIG_ARCH_EMEV2)	+= setup-emev2.o
 obj-$(CONFIG_ARCH_R7S72100)	+= setup-r7s72100.o
@@ -20,10 +21,7 @@
 # Clock objects
 ifndef CONFIG_COMMON_CLK
 obj-y				+= clock.o
-obj-$(CONFIG_ARCH_SH73A0)	+= clock-sh73a0.o
-obj-$(CONFIG_ARCH_R8A7740)	+= clock-r8a7740.o
 obj-$(CONFIG_ARCH_R8A7778)	+= clock-r8a7778.o
-obj-$(CONFIG_ARCH_R8A7779)	+= clock-r8a7779.o
 endif
 
 # CPU reset vector handling objects
@@ -34,6 +32,7 @@
 CFLAGS_setup-rcar-gen2.o	+= -march=armv7-a
 obj-$(CONFIG_ARCH_R8A7790)	+= regulator-quirk-rcar-gen2.o
 obj-$(CONFIG_ARCH_R8A7791)	+= regulator-quirk-rcar-gen2.o
+obj-$(CONFIG_ARCH_R8A7793)	+= regulator-quirk-rcar-gen2.o
 
 # SMP objects
 smp-y				:= $(cpu-y)
@@ -51,14 +50,9 @@
 obj-$(CONFIG_ARCH_RCAR_GEN2)	+= pm-rcar-gen2.o
 
 # Board objects
-ifdef CONFIG_ARCH_SHMOBILE_MULTI
-obj-$(CONFIG_MACH_MARZEN)	+= board-marzen-reference.o
-else
+ifndef CONFIG_ARCH_SHMOBILE_MULTI
 obj-$(CONFIG_MACH_BOCKW)	+= board-bockw.o
 obj-$(CONFIG_MACH_BOCKW_REFERENCE)	+= board-bockw-reference.o
-obj-$(CONFIG_MACH_MARZEN)	+= board-marzen.o
-obj-$(CONFIG_MACH_ARMADILLO800EVA)	+= board-armadillo800eva.o
-obj-$(CONFIG_MACH_KZM9G)	+= board-kzm9g.o intc-sh73a0.o
 endif
 
 # Framework support
diff --git a/arch/arm/mach-shmobile/Makefile.boot b/arch/arm/mach-shmobile/Makefile.boot
index e1ef19c..a489fe9 100644
--- a/arch/arm/mach-shmobile/Makefile.boot
+++ b/arch/arm/mach-shmobile/Makefile.boot
@@ -1,10 +1,7 @@
 # per-board load address for uImage
 loadaddr-y	:=
-loadaddr-$(CONFIG_MACH_ARMADILLO800EVA) += 0x40008000
 loadaddr-$(CONFIG_MACH_BOCKW) += 0x60008000
 loadaddr-$(CONFIG_MACH_BOCKW_REFERENCE) += 0x60008000
-loadaddr-$(CONFIG_MACH_KZM9G) += 0x41008000
-loadaddr-$(CONFIG_MACH_MARZEN) += 0x60008000
 
 __ZRELADDR	:= $(sort $(loadaddr-y))
    zreladdr-y   += $(__ZRELADDR)
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
deleted file mode 100644
index bf37e3c..0000000
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ /dev/null
@@ -1,1365 +0,0 @@
-/*
- * armadillo 800 eva board support
- *
- * Copyright (C) 2012 Renesas Solutions Corp.
- * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/gpio_keys.h>
-#include <linux/i2c-gpio.h>
-#include <linux/input.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/sh_mmcif.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
-#include <linux/pinctrl/machine.h>
-#include <linux/platform_data/st1232_pdata.h>
-#include <linux/platform_device.h>
-#include <linux/pwm.h>
-#include <linux/pwm_backlight.h>
-#include <linux/reboot.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/gpio-regulator.h>
-#include <linux/regulator/machine.h>
-#include <linux/sh_eth.h>
-#include <linux/usb/renesas_usbhs.h>
-#include <linux/videodev2.h>
-
-#include <asm/hardware/cache-l2x0.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-#include <asm/mach/time.h>
-#include <asm/page.h>
-#include <media/mt9t112.h>
-#include <media/sh_mobile_ceu.h>
-#include <media/soc_camera.h>
-#include <sound/sh_fsi.h>
-#include <sound/simple_card.h>
-#include <video/sh_mobile_hdmi.h>
-#include <video/sh_mobile_lcdc.h>
-
-#include "common.h"
-#include "irqs.h"
-#include "pm-rmobile.h"
-#include "r8a7740.h"
-#include "sh-gpio.h"
-
-/*
- * CON1		Camera Module
- * CON2		Extension Bus
- * CON3		HDMI Output
- * CON4		Composite Video Output
- * CON5		H-UDI JTAG
- * CON6		ARM JTAG
- * CON7		SD1
- * CON8		SD2
- * CON9		RTC BackUp
- * CON10	Monaural Mic Input
- * CON11	Stereo Headphone Output
- * CON12	Audio Line Output(L)
- * CON13	Audio Line Output(R)
- * CON14	AWL13 Module
- * CON15	Extension
- * CON16	LCD1
- * CON17	LCD2
- * CON19	Power Input
- * CON20	USB1
- * CON21	USB2
- * CON22	Serial
- * CON23	LAN
- * CON24	USB3
- * LED1		Camera LED(Yellow)
- * LED2		Power LED (Green)
- * ED3-LED6	User LED(Yellow)
- * LED7		LAN link LED(Green)
- * LED8		LAN activity LED(Yellow)
- */
-
-/*
- * DipSwitch
- *
- *                    SW1
- *
- * -12345678-+---------------+----------------------------
- *  1        | boot          | hermit
- *  0        | boot          | OS auto boot
- * -12345678-+---------------+----------------------------
- *   00      | boot device   | eMMC
- *   10      | boot device   | SDHI0 (CON7)
- *   01      | boot device   | -
- *   11      | boot device   | Extension Buss (CS0)
- * -12345678-+---------------+----------------------------
- *     0     | Extension Bus | D8-D15 disable, eMMC enable
- *     1     | Extension Bus | D8-D15 enable,  eMMC disable
- * -12345678-+---------------+----------------------------
- *      0    | SDHI1         | COM8 disable, COM14 enable
- *      1    | SDHI1         | COM8 enable,  COM14 disable
- * -12345678-+---------------+----------------------------
- *       0   | USB0          | COM20 enable,  COM24 disable
- *       1   | USB0          | COM20 disable, COM24 enable
- * -12345678-+---------------+----------------------------
- *        00 | JTAG          | SH-X2
- *        10 | JTAG          | ARM
- *        01 | JTAG          | -
- *        11 | JTAG          | Boundary Scan
- *-----------+---------------+----------------------------
- */
-
-/*
- * FSI-WM8978
- *
- * this command is required when playback.
- *
- * # amixer set "Headphone" 50
- *
- * this command is required when capture.
- *
- * # amixer set "Input PGA" 15
- * # amixer set "Left Input Mixer MicP" on
- * # amixer set "Left Input Mixer MicN" on
- * # amixer set "Right Input Mixer MicN" on
- * # amixer set "Right Input Mixer MicP" on
- */
-
-/*
- * USB function
- *
- * When you use USB Function,
- * set SW1.6 ON, and connect cable to CN24.
- *
- * USBF needs workaround on R8A7740 chip.
- * These are a little bit complex.
- * see
- *	usbhsf_power_ctrl()
- */
-#define IRQ7		irq_pin(7)
-#define USBCR1		IOMEM(0xe605810a)
-#define USBH		0xC6700000
-#define USBH_USBCTR	0x10834
-
-struct usbhsf_private {
-	struct clk *phy;
-	struct clk *usb24;
-	struct clk *pci;
-	struct clk *func;
-	struct clk *host;
-	void __iomem *usbh_base;
-	struct renesas_usbhs_platform_info info;
-};
-
-#define usbhsf_get_priv(pdev)				\
-	container_of(renesas_usbhs_get_info(pdev),	\
-		     struct usbhsf_private, info)
-
-static int usbhsf_get_id(struct platform_device *pdev)
-{
-	return USBHS_GADGET;
-}
-
-static int usbhsf_power_ctrl(struct platform_device *pdev,
-			      void __iomem *base, int enable)
-{
-	struct usbhsf_private *priv = usbhsf_get_priv(pdev);
-
-	/*
-	 * Work around for USB Function.
-	 * It needs USB host clock, and settings
-	 */
-	if (enable) {
-		/*
-		 * enable all the related usb clocks
-		 * for usb workaround
-		 */
-		clk_enable(priv->usb24);
-		clk_enable(priv->pci);
-		clk_enable(priv->host);
-		clk_enable(priv->func);
-		clk_enable(priv->phy);
-
-		/*
-		 * set USBCR1
-		 *
-		 * Port1 is driven by USB function,
-		 * Port2 is driven by USB HOST
-		 * One HOST (Port1 or Port2 is HOST)
-		 * USB PLL input clock = 24MHz
-		 */
-		__raw_writew(0xd750, USBCR1);
-		mdelay(1);
-
-		/*
-		 * start USB Host
-		 */
-		__raw_writel(0x0000000c, priv->usbh_base + USBH_USBCTR);
-		__raw_writel(0x00000008, priv->usbh_base + USBH_USBCTR);
-		mdelay(10);
-
-		/*
-		 * USB PHY Power ON
-		 */
-		__raw_writew(0xd770, USBCR1);
-		__raw_writew(0x4000, base + 0x102); /* USBF :: SUSPMODE */
-
-	} else {
-		__raw_writel(0x0000010f, priv->usbh_base + USBH_USBCTR);
-		__raw_writew(0xd7c0, USBCR1); /* GPIO */
-
-		clk_disable(priv->phy);
-		clk_disable(priv->func);	/* usb work around */
-		clk_disable(priv->host);	/* usb work around */
-		clk_disable(priv->pci);		/* usb work around */
-		clk_disable(priv->usb24);	/* usb work around */
-	}
-
-	return 0;
-}
-
-static int usbhsf_get_vbus(struct platform_device *pdev)
-{
-	return gpio_get_value(209);
-}
-
-static irqreturn_t usbhsf_interrupt(int irq, void *data)
-{
-	struct platform_device *pdev = data;
-
-	renesas_usbhs_call_notify_hotplug(pdev);
-
-	return IRQ_HANDLED;
-}
-
-static int usbhsf_hardware_exit(struct platform_device *pdev)
-{
-	struct usbhsf_private *priv = usbhsf_get_priv(pdev);
-
-	if (!IS_ERR(priv->phy))
-		clk_put(priv->phy);
-	if (!IS_ERR(priv->usb24))
-		clk_put(priv->usb24);
-	if (!IS_ERR(priv->pci))
-		clk_put(priv->pci);
-	if (!IS_ERR(priv->host))
-		clk_put(priv->host);
-	if (!IS_ERR(priv->func))
-		clk_put(priv->func);
-	if (priv->usbh_base)
-		iounmap(priv->usbh_base);
-
-	priv->phy	= NULL;
-	priv->usb24	= NULL;
-	priv->pci	= NULL;
-	priv->host	= NULL;
-	priv->func	= NULL;
-	priv->usbh_base	= NULL;
-
-	free_irq(IRQ7, pdev);
-
-	return 0;
-}
-
-static int usbhsf_hardware_init(struct platform_device *pdev)
-{
-	struct usbhsf_private *priv = usbhsf_get_priv(pdev);
-	int ret;
-
-	priv->phy	= clk_get(&pdev->dev, "phy");
-	priv->usb24	= clk_get(&pdev->dev, "usb24");
-	priv->pci	= clk_get(&pdev->dev, "pci");
-	priv->func	= clk_get(&pdev->dev, "func");
-	priv->host	= clk_get(&pdev->dev, "host");
-	priv->usbh_base	= ioremap_nocache(USBH, 0x20000);
-
-	if (IS_ERR(priv->phy)		||
-	    IS_ERR(priv->usb24)		||
-	    IS_ERR(priv->pci)		||
-	    IS_ERR(priv->host)		||
-	    IS_ERR(priv->func)		||
-	    !priv->usbh_base) {
-		dev_err(&pdev->dev, "USB clock setting failed\n");
-		usbhsf_hardware_exit(pdev);
-		return -EIO;
-	}
-
-	ret = request_irq(IRQ7, usbhsf_interrupt, IRQF_TRIGGER_NONE,
-			  dev_name(&pdev->dev), pdev);
-	if (ret) {
-		dev_err(&pdev->dev, "request_irq err\n");
-		return ret;
-	}
-	irq_set_irq_type(IRQ7, IRQ_TYPE_EDGE_BOTH);
-
-	/* usb24 use 1/1 of parent clock (= usb24s = 24MHz) */
-	clk_set_rate(priv->usb24,
-		     clk_get_rate(clk_get_parent(priv->usb24)));
-
-	return 0;
-}
-
-static struct usbhsf_private usbhsf_private = {
-	.info = {
-		.platform_callback = {
-			.get_id		= usbhsf_get_id,
-			.get_vbus	= usbhsf_get_vbus,
-			.hardware_init	= usbhsf_hardware_init,
-			.hardware_exit	= usbhsf_hardware_exit,
-			.power_ctrl	= usbhsf_power_ctrl,
-		},
-		.driver_param = {
-			.buswait_bwait		= 5,
-			.detection_delay	= 5,
-			.d0_rx_id	= SHDMA_SLAVE_USBHS_RX,
-			.d1_tx_id	= SHDMA_SLAVE_USBHS_TX,
-		},
-	}
-};
-
-static struct resource usbhsf_resources[] = {
-	{
-		.name	= "USBHS",
-		.start	= 0xe6890000,
-		.end	= 0xe6890104 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	{
-		.start	= gic_spi(51),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device usbhsf_device = {
-	.name	= "renesas_usbhs",
-	.dev = {
-		.platform_data = &usbhsf_private.info,
-	},
-	.id = -1,
-	.num_resources	= ARRAY_SIZE(usbhsf_resources),
-	.resource	= usbhsf_resources,
-};
-
-/* Ether */
-static struct sh_eth_plat_data sh_eth_platdata = {
-	.phy			= 0x00, /* LAN8710A */
-	.edmac_endian		= EDMAC_LITTLE_ENDIAN,
-	.phy_interface		= PHY_INTERFACE_MODE_MII,
-};
-
-static struct resource sh_eth_resources[] = {
-	{
-		.start	= 0xe9a00000,
-		.end	= 0xe9a00800 - 1,
-		.flags	= IORESOURCE_MEM,
-	}, {
-		.start	= 0xe9a01800,
-		.end	= 0xe9a02000 - 1,
-		.flags	= IORESOURCE_MEM,
-	}, {
-		.start	= gic_spi(110),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device sh_eth_device = {
-	.name = "r8a7740-gether",
-	.id = -1,
-	.dev = {
-		.platform_data = &sh_eth_platdata,
-		.dma_mask = &sh_eth_device.dev.coherent_dma_mask,
-		.coherent_dma_mask = DMA_BIT_MASK(32),
-	},
-	.resource = sh_eth_resources,
-	.num_resources = ARRAY_SIZE(sh_eth_resources),
-};
-
-/* PWM */
-static struct resource pwm_resources[] = {
-	[0] = {
-		.start = 0xe6600000,
-		.end = 0xe66000ff,
-		.flags = IORESOURCE_MEM,
-	},
-};
-
-static struct platform_device pwm_device = {
-	.name = "renesas-tpu-pwm",
-	.id = -1,
-	.num_resources = ARRAY_SIZE(pwm_resources),
-	.resource = pwm_resources,
-};
-
-static struct pwm_lookup pwm_lookup[] = {
-	PWM_LOOKUP("renesas-tpu-pwm", 2, "pwm-backlight.0", NULL,
-		   33333, PWM_POLARITY_INVERSED),
-};
-
-/* LCDC and backlight */
-static struct platform_pwm_backlight_data pwm_backlight_data = {
-	.lth_brightness = 50,
-	.max_brightness = 255,
-	.dft_brightness = 255,
-	.pwm_period_ns = 33333, /* 30kHz */
-	.enable_gpio = 61,
-};
-
-static struct platform_device pwm_backlight_device = {
-	.name = "pwm-backlight",
-	.dev = {
-		.platform_data = &pwm_backlight_data,
-	},
-};
-
-static struct fb_videomode lcdc0_mode = {
-	.name		= "AMPIER/AM-800480",
-	.xres		= 800,
-	.yres		= 480,
-	.left_margin	= 88,
-	.right_margin	= 40,
-	.hsync_len	= 128,
-	.upper_margin	= 20,
-	.lower_margin	= 5,
-	.vsync_len	= 5,
-	.sync		= 0,
-};
-
-static struct sh_mobile_lcdc_info lcdc0_info = {
-	.clock_source	= LCDC_CLK_BUS,
-	.ch[0] = {
-		.chan		= LCDC_CHAN_MAINLCD,
-		.fourcc		= V4L2_PIX_FMT_RGB565,
-		.interface_type	= RGB24,
-		.clock_divider	= 5,
-		.flags		= 0,
-		.lcd_modes	= &lcdc0_mode,
-		.num_modes	= 1,
-		.panel_cfg = {
-			.width	= 111,
-			.height = 68,
-		},
-	},
-};
-
-static struct resource lcdc0_resources[] = {
-	[0] = {
-		.name	= "LCD0",
-		.start	= 0xfe940000,
-		.end	= 0xfe943fff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_spi(177),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device lcdc0_device = {
-	.name		= "sh_mobile_lcdc_fb",
-	.num_resources	= ARRAY_SIZE(lcdc0_resources),
-	.resource	= lcdc0_resources,
-	.id		= 0,
-	.dev	= {
-		.platform_data	= &lcdc0_info,
-		.coherent_dma_mask = DMA_BIT_MASK(32),
-	},
-};
-
-/*
- * LCDC1/HDMI
- */
-static struct sh_mobile_hdmi_info hdmi_info = {
-	.flags		= HDMI_OUTPUT_PUSH_PULL |
-			  HDMI_OUTPUT_POLARITY_HI |
-			  HDMI_32BIT_REG |
-			  HDMI_HAS_HTOP1 |
-			  HDMI_SND_SRC_SPDIF,
-};
-
-static struct resource hdmi_resources[] = {
-	[0] = {
-		.name	= "HDMI",
-		.start	= 0xe6be0000,
-		.end	= 0xe6be03ff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_spi(131),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[2] = {
-		.name	= "HDMI emma3pf",
-		.start	= 0xe6be4000,
-		.end	= 0xe6be43ff,
-		.flags	= IORESOURCE_MEM,
-	},
-};
-
-static struct platform_device hdmi_device = {
-	.name		= "sh-mobile-hdmi",
-	.num_resources	= ARRAY_SIZE(hdmi_resources),
-	.resource	= hdmi_resources,
-	.id             = -1,
-	.dev	= {
-		.platform_data	= &hdmi_info,
-	},
-};
-
-static const struct fb_videomode lcdc1_mode = {
-	.name		= "HDMI 720p",
-	.xres		= 1280,
-	.yres		= 720,
-	.pixclock	= 13468,
-	.left_margin	= 220,
-	.right_margin	= 110,
-	.hsync_len	= 40,
-	.upper_margin	= 20,
-	.lower_margin	= 5,
-	.vsync_len	= 5,
-	.refresh	= 60,
-	.sync		= FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT,
-};
-
-static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
-	.clock_source	= LCDC_CLK_PERIPHERAL, /* HDMI clock */
-	.ch[0] = {
-		.chan			= LCDC_CHAN_MAINLCD,
-		.fourcc			= V4L2_PIX_FMT_RGB565,
-		.interface_type		= RGB24,
-		.clock_divider		= 1,
-		.flags			= LCDC_FLAGS_DWPOL,
-		.lcd_modes		= &lcdc1_mode,
-		.num_modes		= 1,
-		.tx_dev			= &hdmi_device,
-		.panel_cfg = {
-			.width	= 1280,
-			.height = 720,
-		},
-	},
-};
-
-static struct resource hdmi_lcdc_resources[] = {
-	[0] = {
-		.name	= "LCDC1",
-		.start	= 0xfe944000,
-		.end	= 0xfe948000 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_spi(178),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device hdmi_lcdc_device = {
-	.name		= "sh_mobile_lcdc_fb",
-	.num_resources	= ARRAY_SIZE(hdmi_lcdc_resources),
-	.resource	= hdmi_lcdc_resources,
-	.id		= 1,
-	.dev	= {
-		.platform_data	= &hdmi_lcdc_info,
-		.coherent_dma_mask = DMA_BIT_MASK(32),
-	},
-};
-
-/* LEDS */
-static struct gpio_led gpio_leds[] = {
-	{
-		.name		= "LED3",
-		.gpio		= 102,
-		.default_state	= LEDS_GPIO_DEFSTATE_ON,
-	}, {
-		.name		= "LED4",
-		.gpio		= 111,
-		.default_state	= LEDS_GPIO_DEFSTATE_ON,
-	}, {
-		.name		= "LED5",
-		.gpio		= 110,
-		.default_state	= LEDS_GPIO_DEFSTATE_ON,
-	}, {
-		.name		= "LED6",
-		.gpio		= 177,
-		.default_state	= LEDS_GPIO_DEFSTATE_ON,
-	},
-};
-
-static struct gpio_led_platform_data leds_gpio_info = {
-	.leds		= gpio_leds,
-	.num_leds	= ARRAY_SIZE(gpio_leds),
-};
-
-static struct platform_device leds_gpio_device = {
-	.name   = "leds-gpio",
-	.id     = -1,
-	.dev    = {
-		.platform_data  = &leds_gpio_info,
-	},
-};
-
-/* GPIO KEY */
-#define GPIO_KEY(c, g, d, ...) \
-	{ .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ }
-
-static struct gpio_keys_button gpio_buttons[] = {
-	GPIO_KEY(KEY_POWER,	99,	"SW3", .wakeup = 1),
-	GPIO_KEY(KEY_BACK,	100,	"SW4"),
-	GPIO_KEY(KEY_MENU,	97,	"SW5"),
-	GPIO_KEY(KEY_HOME,	98,	"SW6"),
-};
-
-static struct gpio_keys_platform_data gpio_key_info = {
-	.buttons	= gpio_buttons,
-	.nbuttons	= ARRAY_SIZE(gpio_buttons),
-};
-
-static struct platform_device gpio_keys_device = {
-	.name   = "gpio-keys",
-	.id     = -1,
-	.dev    = {
-		.platform_data  = &gpio_key_info,
-	},
-};
-
-/* Fixed 3.3V regulator to be used by SDHI1, MMCIF */
-static struct regulator_consumer_supply fixed3v3_power_consumers[] = {
-	REGULATOR_SUPPLY("vmmc", "sh_mmcif"),
-	REGULATOR_SUPPLY("vqmmc", "sh_mmcif"),
-};
-
-/* Fixed 3.3V regulator used by LCD backlight */
-static struct regulator_consumer_supply fixed5v0_power_consumers[] = {
-	REGULATOR_SUPPLY("power", "pwm-backlight.0"),
-};
-
-/* Fixed 3.3V regulator to be used by SDHI0 */
-static struct regulator_consumer_supply vcc_sdhi0_consumers[] = {
-	REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
-};
-
-static struct regulator_init_data vcc_sdhi0_init_data = {
-	.constraints = {
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies  = ARRAY_SIZE(vcc_sdhi0_consumers),
-	.consumer_supplies      = vcc_sdhi0_consumers,
-};
-
-static struct fixed_voltage_config vcc_sdhi0_info = {
-	.supply_name = "SDHI0 Vcc",
-	.microvolts = 3300000,
-	.gpio = 75,
-	.enable_high = 1,
-	.init_data = &vcc_sdhi0_init_data,
-};
-
-static struct platform_device vcc_sdhi0 = {
-	.name = "reg-fixed-voltage",
-	.id   = 1,
-	.dev  = {
-		.platform_data = &vcc_sdhi0_info,
-	},
-};
-
-/* 1.8 / 3.3V SDHI0 VccQ regulator */
-static struct regulator_consumer_supply vccq_sdhi0_consumers[] = {
-	REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
-};
-
-static struct regulator_init_data vccq_sdhi0_init_data = {
-	.constraints = {
-		.input_uV	= 3300000,
-		.min_uV		= 1800000,
-		.max_uV         = 3300000,
-		.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
-				  REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies  = ARRAY_SIZE(vccq_sdhi0_consumers),
-	.consumer_supplies      = vccq_sdhi0_consumers,
-};
-
-static struct gpio vccq_sdhi0_gpios[] = {
-	{17, GPIOF_OUT_INIT_LOW, "vccq-sdhi0" },
-};
-
-static struct gpio_regulator_state vccq_sdhi0_states[] = {
-	{ .value = 3300000, .gpios = (0 << 0) },
-	{ .value = 1800000, .gpios = (1 << 0) },
-};
-
-static struct gpio_regulator_config vccq_sdhi0_info = {
-	.supply_name = "vqmmc",
-
-	.enable_gpio = 74,
-	.enable_high = 1,
-	.enabled_at_boot = 0,
-
-	.gpios = vccq_sdhi0_gpios,
-	.nr_gpios = ARRAY_SIZE(vccq_sdhi0_gpios),
-
-	.states = vccq_sdhi0_states,
-	.nr_states = ARRAY_SIZE(vccq_sdhi0_states),
-
-	.type = REGULATOR_VOLTAGE,
-	.init_data = &vccq_sdhi0_init_data,
-};
-
-static struct platform_device vccq_sdhi0 = {
-	.name = "gpio-regulator",
-	.id   = -1,
-	.dev  = {
-		.platform_data = &vccq_sdhi0_info,
-	},
-};
-
-/* Fixed 3.3V regulator to be used by SDHI1 */
-static struct regulator_consumer_supply vcc_sdhi1_consumers[] = {
-	REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"),
-};
-
-static struct regulator_init_data vcc_sdhi1_init_data = {
-	.constraints = {
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies  = ARRAY_SIZE(vcc_sdhi1_consumers),
-	.consumer_supplies      = vcc_sdhi1_consumers,
-};
-
-static struct fixed_voltage_config vcc_sdhi1_info = {
-	.supply_name = "SDHI1 Vcc",
-	.microvolts = 3300000,
-	.gpio = 16,
-	.enable_high = 1,
-	.init_data = &vcc_sdhi1_init_data,
-};
-
-static struct platform_device vcc_sdhi1 = {
-	.name = "reg-fixed-voltage",
-	.id   = 2,
-	.dev  = {
-		.platform_data = &vcc_sdhi1_info,
-	},
-};
-
-/* SDHI0 */
-static struct tmio_mmc_data sdhi0_info = {
-	.chan_priv_tx	= (void *)SHDMA_SLAVE_SDHI0_TX,
-	.chan_priv_rx	= (void *)SHDMA_SLAVE_SDHI0_RX,
-	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
-			  MMC_CAP_POWER_OFF_CARD,
-	.flags		= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD,
-	.cd_gpio	= 167,
-};
-
-static struct resource sdhi0_resources[] = {
-	{
-		.name	= "SDHI0",
-		.start	= 0xe6850000,
-		.end	= 0xe6850100 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	/*
-	 * no SH_MOBILE_SDHI_IRQ_CARD_DETECT here
-	 */
-	{
-		.name	= SH_MOBILE_SDHI_IRQ_SDCARD,
-		.start	= gic_spi(118),
-		.flags	= IORESOURCE_IRQ,
-	},
-	{
-		.name	= SH_MOBILE_SDHI_IRQ_SDIO,
-		.start	= gic_spi(119),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device sdhi0_device = {
-	.name		= "sh_mobile_sdhi",
-	.id		= 0,
-	.dev		= {
-		.platform_data	= &sdhi0_info,
-	},
-	.num_resources	= ARRAY_SIZE(sdhi0_resources),
-	.resource	= sdhi0_resources,
-};
-
-/* SDHI1 */
-static struct tmio_mmc_data sdhi1_info = {
-	.chan_priv_tx	= (void *)SHDMA_SLAVE_SDHI1_TX,
-	.chan_priv_rx	= (void *)SHDMA_SLAVE_SDHI1_RX,
-	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
-			  MMC_CAP_POWER_OFF_CARD,
-	.flags		= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD,
-	/* Port72 cannot generate IRQs, will be used in polling mode. */
-	.cd_gpio	= 72,
-};
-
-static struct resource sdhi1_resources[] = {
-	[0] = {
-		.name	= "SDHI1",
-		.start	= 0xe6860000,
-		.end	= 0xe6860100 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_spi(121),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[2] = {
-		.start	= gic_spi(122),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[3] = {
-		.start	= gic_spi(123),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device sdhi1_device = {
-	.name		= "sh_mobile_sdhi",
-	.id		= 1,
-	.dev		= {
-		.platform_data	= &sdhi1_info,
-	},
-	.num_resources	= ARRAY_SIZE(sdhi1_resources),
-	.resource	= sdhi1_resources,
-};
-
-static const struct pinctrl_map eva_sdhi1_pinctrl_map[] = {
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a7740",
-				  "sdhi1_data4", "sdhi1"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a7740",
-				  "sdhi1_ctrl", "sdhi1"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a7740",
-				  "sdhi1_cd", "sdhi1"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.1", "pfc-r8a7740",
-				  "sdhi1_wp", "sdhi1"),
-};
-
-/* MMCIF */
-static struct sh_mmcif_plat_data sh_mmcif_plat = {
-	.sup_pclk	= 0,
-	.caps		= MMC_CAP_4_BIT_DATA |
-			  MMC_CAP_8_BIT_DATA |
-			  MMC_CAP_NONREMOVABLE,
-	.ccs_unsupported = true,
-	.slave_id_tx	= SHDMA_SLAVE_MMCIF_TX,
-	.slave_id_rx	= SHDMA_SLAVE_MMCIF_RX,
-};
-
-static struct resource sh_mmcif_resources[] = {
-	[0] = {
-		.name	= "MMCIF",
-		.start	= 0xe6bd0000,
-		.end	= 0xe6bd0100 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		/* MMC ERR */
-		.start	= gic_spi(56),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[2] = {
-		/* MMC NOR */
-		.start	= gic_spi(57),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device sh_mmcif_device = {
-	.name		= "sh_mmcif",
-	.id		= -1,
-	.dev		= {
-		.platform_data	= &sh_mmcif_plat,
-	},
-	.num_resources	= ARRAY_SIZE(sh_mmcif_resources),
-	.resource	= sh_mmcif_resources,
-};
-
-/* Camera */
-static int mt9t111_power(struct device *dev, int mode)
-{
-	struct clk *mclk = clk_get(NULL, "video1");
-
-	if (IS_ERR(mclk)) {
-		dev_err(dev, "can't get video1 clock\n");
-		return -EINVAL;
-	}
-
-	if (mode) {
-		/* video1 (= CON1 camera) expect 24MHz */
-		clk_set_rate(mclk, clk_round_rate(mclk, 24000000));
-		clk_enable(mclk);
-		gpio_set_value(158, 1);
-	} else {
-		gpio_set_value(158, 0);
-		clk_disable(mclk);
-	}
-
-	clk_put(mclk);
-
-	return 0;
-}
-
-static struct i2c_board_info i2c_camera_mt9t111 = {
-	I2C_BOARD_INFO("mt9t112", 0x3d),
-};
-
-static struct mt9t112_camera_info mt9t111_info = {
-	.divider = { 16, 0, 0, 7, 0, 10, 14, 7, 7 },
-};
-
-static struct soc_camera_link mt9t111_link = {
-	.i2c_adapter_id	= 0,
-	.bus_id		= 0,
-	.board_info	= &i2c_camera_mt9t111,
-	.power		= mt9t111_power,
-	.priv		= &mt9t111_info,
-};
-
-static struct platform_device camera_device = {
-	.name	= "soc-camera-pdrv",
-	.id	= 0,
-	.dev	= {
-		.platform_data = &mt9t111_link,
-	},
-};
-
-/* CEU0 */
-static struct sh_mobile_ceu_info sh_mobile_ceu0_info = {
-	.flags = SH_CEU_FLAG_LOWER_8BIT,
-};
-
-static struct resource ceu0_resources[] = {
-	[0] = {
-		.name	= "CEU",
-		.start	= 0xfe910000,
-		.end	= 0xfe91009f,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start  = gic_spi(160),
-		.flags  = IORESOURCE_IRQ,
-	},
-	[2] = {
-		/* place holder for contiguous memory */
-	},
-};
-
-static struct platform_device ceu0_device = {
-	.name		= "sh_mobile_ceu",
-	.id		= 0,
-	.num_resources	= ARRAY_SIZE(ceu0_resources),
-	.resource	= ceu0_resources,
-	.dev	= {
-		.platform_data		= &sh_mobile_ceu0_info,
-		.coherent_dma_mask	= 0xffffffff,
-	},
-};
-
-/* FSI */
-static struct sh_fsi_platform_info fsi_info = {
-	/* FSI-WM8978 */
-	.port_a = {
-		.tx_id = SHDMA_SLAVE_FSIA_TX,
-	},
-	/* FSI-HDMI */
-	.port_b = {
-		.flags		= SH_FSI_FMT_SPDIF |
-				  SH_FSI_ENABLE_STREAM_MODE |
-				  SH_FSI_CLK_CPG,
-		.tx_id		= SHDMA_SLAVE_FSIB_TX,
-	}
-};
-
-static struct resource fsi_resources[] = {
-	[0] = {
-		.name	= "FSI",
-		.start	= 0xfe1f0000,
-		.end	= 0xfe1f0400 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start  = gic_spi(9),
-		.flags  = IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device fsi_device = {
-	.name		= "sh_fsi2",
-	.id		= -1,
-	.num_resources	= ARRAY_SIZE(fsi_resources),
-	.resource	= fsi_resources,
-	.dev	= {
-		.platform_data	= &fsi_info,
-	},
-};
-
-/* FSI-WM8978 */
-static struct asoc_simple_card_info fsi_wm8978_info = {
-	.name		= "wm8978",
-	.card		= "FSI2A-WM8978",
-	.codec		= "wm8978.0-001a",
-	.platform	= "sh_fsi2",
-	.daifmt		= SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM,
-	.cpu_dai = {
-		.name	= "fsia-dai",
-	},
-	.codec_dai = {
-		.name	= "wm8978-hifi",
-		.sysclk	= 12288000,
-	},
-};
-
-static struct platform_device fsi_wm8978_device = {
-	.name	= "asoc-simple-card",
-	.id	= 0,
-	.dev	= {
-		.platform_data	= &fsi_wm8978_info,
-		.coherent_dma_mask = DMA_BIT_MASK(32),
-		.dma_mask = &fsi_wm8978_device.dev.coherent_dma_mask,
-	},
-};
-
-/* FSI-HDMI */
-static struct asoc_simple_card_info fsi2_hdmi_info = {
-	.name		= "HDMI",
-	.card		= "FSI2B-HDMI",
-	.codec		= "sh-mobile-hdmi",
-	.platform	= "sh_fsi2",
-	.daifmt		= SND_SOC_DAIFMT_CBS_CFS,
-	.cpu_dai = {
-		.name	= "fsib-dai",
-	},
-	.codec_dai = {
-		.name = "sh_mobile_hdmi-hifi",
-	},
-};
-
-static struct platform_device fsi_hdmi_device = {
-	.name	= "asoc-simple-card",
-	.id	= 1,
-	.dev	= {
-		.platform_data	= &fsi2_hdmi_info,
-		.coherent_dma_mask = DMA_BIT_MASK(32),
-		.dma_mask = &fsi_hdmi_device.dev.coherent_dma_mask,
-	},
-};
-
-/* RTC: RTC connects i2c-gpio. */
-static struct i2c_gpio_platform_data i2c_gpio_data = {
-	.sda_pin	= 208,
-	.scl_pin	= 91,
-	.udelay		= 5, /* 100 kHz */
-};
-
-static struct platform_device i2c_gpio_device = {
-	.name = "i2c-gpio",
-	.id = 2,
-	.dev = {
-		.platform_data = &i2c_gpio_data,
-	},
-};
-
-/* I2C */
-static struct st1232_pdata st1232_i2c0_pdata = {
-	.reset_gpio = 166,
-};
-
-static struct i2c_board_info i2c0_devices[] = {
-	{
-		I2C_BOARD_INFO("st1232-ts", 0x55),
-		.irq = irq_pin(10),
-		.platform_data = &st1232_i2c0_pdata,
-	},
-	{
-		I2C_BOARD_INFO("wm8978", 0x1a),
-	},
-};
-
-static struct i2c_board_info i2c2_devices[] = {
-	{
-		I2C_BOARD_INFO("s35390a", 0x30),
-		.type = "s35390a",
-	},
-};
-
-/*
- * board devices
- */
-static struct platform_device *eva_devices[] __initdata = {
-	&lcdc0_device,
-	&pwm_device,
-	&pwm_backlight_device,
-	&leds_gpio_device,
-	&gpio_keys_device,
-	&sh_eth_device,
-	&vcc_sdhi0,
-	&vccq_sdhi0,
-	&sdhi0_device,
-	&sh_mmcif_device,
-	&hdmi_device,
-	&hdmi_lcdc_device,
-	&camera_device,
-	&ceu0_device,
-	&fsi_device,
-	&fsi_wm8978_device,
-	&fsi_hdmi_device,
-	&i2c_gpio_device,
-};
-
-static const struct pinctrl_map eva_pinctrl_map[] = {
-	/* CEU0 */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-r8a7740",
-				  "ceu0_data_0_7", "ceu0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-r8a7740",
-				  "ceu0_clk_0", "ceu0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-r8a7740",
-				  "ceu0_sync", "ceu0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_ceu.0", "pfc-r8a7740",
-				  "ceu0_field", "ceu0"),
-	/* FSIA */
-	PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-r8a7740",
-				  "fsia_sclk_in", "fsia"),
-	PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-r8a7740",
-				  "fsia_mclk_out", "fsia"),
-	PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-r8a7740",
-				  "fsia_data_in_1", "fsia"),
-	PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.0", "pfc-r8a7740",
-				  "fsia_data_out_0", "fsia"),
-	/* FSIB */
-	PIN_MAP_MUX_GROUP_DEFAULT("asoc-simple-card.1", "pfc-r8a7740",
-				  "fsib_mclk_in", "fsib"),
-	/* GETHER */
-	PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740",
-				  "gether_mii", "gether"),
-	PIN_MAP_MUX_GROUP_DEFAULT("r8a7740-gether", "pfc-r8a7740",
-				  "gether_int", "gether"),
-	/* HDMI */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-mobile-hdmi", "pfc-r8a7740",
-				  "hdmi", "hdmi"),
-	/* LCD0 */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-r8a7740",
-				  "lcd0_data24_0", "lcd0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-r8a7740",
-				  "lcd0_lclk_1", "lcd0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-r8a7740",
-				  "lcd0_sync", "lcd0"),
-	/* MMCIF */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-r8a7740",
-				  "mmc0_data8_1", "mmc0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-r8a7740",
-				  "mmc0_ctrl_1", "mmc0"),
-	/* SCIFA1 */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.1", "pfc-r8a7740",
-				  "scifa1_data", "scifa1"),
-	/* SDHI0 */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7740",
-				  "sdhi0_data4", "sdhi0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7740",
-				  "sdhi0_ctrl", "sdhi0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7740",
-				  "sdhi0_wp", "sdhi0"),
-	/* ST1232 */
-	PIN_MAP_MUX_GROUP_DEFAULT("0-0055", "pfc-r8a7740",
-				  "intc_irq10", "intc"),
-	/* TPU0 */
-	PIN_MAP_MUX_GROUP_DEFAULT("renesas-tpu-pwm", "pfc-r8a7740",
-				  "tpu0_to2_1", "tpu0"),
-	/* USBHS */
-	PIN_MAP_MUX_GROUP_DEFAULT("renesas_usbhs", "pfc-r8a7740",
-				  "intc_irq7_1", "intc"),
-};
-
-static void __init eva_clock_init(void)
-{
-	struct clk *system	= clk_get(NULL, "system_clk");
-	struct clk *xtal1	= clk_get(NULL, "extal1");
-	struct clk *usb24s	= clk_get(NULL, "usb24s");
-	struct clk *fsibck	= clk_get(NULL, "fsibck");
-
-	if (IS_ERR(system)	||
-	    IS_ERR(xtal1)	||
-	    IS_ERR(usb24s)	||
-	    IS_ERR(fsibck)) {
-		pr_err("armadillo800eva board clock init failed\n");
-		goto clock_error;
-	}
-
-	/* armadillo 800 eva extal1 is 24MHz */
-	clk_set_rate(xtal1, 24000000);
-
-	/* usb24s use extal1 (= system) clock (= 24MHz) */
-	clk_set_parent(usb24s, system);
-
-	/* FSIBCK is 12.288MHz, and it is parent of FSI-B */
-	clk_set_rate(fsibck, 12288000);
-
-clock_error:
-	if (!IS_ERR(system))
-		clk_put(system);
-	if (!IS_ERR(xtal1))
-		clk_put(xtal1);
-	if (!IS_ERR(usb24s))
-		clk_put(usb24s);
-	if (!IS_ERR(fsibck))
-		clk_put(fsibck);
-}
-
-/*
- * board init
- */
-#define GPIO_PORT7CR	IOMEM(0xe6050007)
-#define GPIO_PORT8CR	IOMEM(0xe6050008)
-static void __init eva_init(void)
-{
-	static struct pm_domain_device domain_devices[] __initdata = {
-		{ "A4LC", &lcdc0_device },
-		{ "A4LC", &hdmi_lcdc_device },
-		{ "A4MP", &hdmi_device },
-		{ "A4MP", &fsi_device },
-		{ "A4R",  &ceu0_device },
-		{ "A4S",  &sh_eth_device },
-		{ "A3SP", &pwm_device },
-		{ "A3SP", &sdhi0_device },
-		{ "A3SP", &sh_mmcif_device },
-	};
-	struct platform_device *usb = NULL, *sdhi1 = NULL;
-
-	regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
-				     ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
-	regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers,
-				     ARRAY_SIZE(fixed5v0_power_consumers), 5000000);
-
-	pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map));
-	pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup));
-
-	r8a7740_pinmux_init();
-	r8a7740_meram_workaround();
-
-	/* GETHER */
-	gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */
-
-	/* USB */
-	gpio_request_one(159, GPIOF_IN, NULL); /* USB_DEVICE_MODE */
-
-	if (gpio_get_value(159)) {
-		/* USB Host */
-	} else {
-		/* USB Func */
-		/*
-		 * The USBHS interrupt handlers needs to read the IRQ pin value
-		 * (HI/LOW) to diffentiate USB connection and disconnection
-		 * events (usbhsf_get_vbus()). We thus need to select both the
-		 * intc_irq7_1 pin group and GPIO 209 here.
-		 */
-		gpio_request_one(209, GPIOF_IN, NULL);
-
-		platform_device_register(&usbhsf_device);
-		usb = &usbhsf_device;
-	}
-
-	/* CON1/CON15 Camera */
-	gpio_request_one(173, GPIOF_OUT_INIT_LOW, NULL);  /* STANDBY */
-	gpio_request_one(172, GPIOF_OUT_INIT_HIGH, NULL); /* RST */
-	/* see mt9t111_power() */
-	gpio_request_one(158, GPIOF_OUT_INIT_LOW, NULL);  /* CAM_PON */
-
-	/* FSI-WM8978 */
-	gpio_request(7, NULL);
-	gpio_request(8, NULL);
-	gpio_direction_none(GPIO_PORT7CR); /* FSIAOBT needs no direction */
-	gpio_direction_none(GPIO_PORT8CR); /* FSIAOLR needs no direction */
-
-	/*
-	 * CAUTION
-	 *
-	 * DBGMD/LCDC0/FSIA MUX
-	 * DBGMD_SELECT_B should be set after setting PFC Function.
-	 */
-	gpio_request_one(176, GPIOF_OUT_INIT_HIGH, NULL);
-
-	/*
-	 * We can switch CON8/CON14 by SW1.5,
-	 * but it needs after DBGMD_SELECT_B
-	 */
-	gpio_request_one(6, GPIOF_IN, NULL);
-	if (gpio_get_value(6)) {
-		/* CON14 enable */
-	} else {
-		/* CON8 (SDHI1) enable */
-		pinctrl_register_mappings(eva_sdhi1_pinctrl_map,
-					  ARRAY_SIZE(eva_sdhi1_pinctrl_map));
-
-		platform_device_register(&vcc_sdhi1);
-		platform_device_register(&sdhi1_device);
-		sdhi1 = &sdhi1_device;
-	}
-
-
-#ifdef CONFIG_CACHE_L2X0
-	/* Shared attribute override enable, 32K*8way */
-	l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
-#endif
-
-	i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
-	i2c_register_board_info(2, i2c2_devices, ARRAY_SIZE(i2c2_devices));
-
-	r8a7740_add_standard_devices();
-
-	platform_add_devices(eva_devices,
-			     ARRAY_SIZE(eva_devices));
-
-	rmobile_add_devices_to_domains(domain_devices,
-				       ARRAY_SIZE(domain_devices));
-	if (usb)
-		rmobile_add_device_to_domain("A3SP", usb);
-	if (sdhi1)
-		rmobile_add_device_to_domain("A3SP", sdhi1);
-
-	r8a7740_pm_init();
-}
-
-static void __init eva_earlytimer_init(void)
-{
-	r8a7740_clock_init(MD_CK0 | MD_CK2);
-	shmobile_earlytimer_init();
-
-	/* the rate of extal1 clock must be set before late_time_init */
-	eva_clock_init();
-}
-
-#define RESCNT2 IOMEM(0xe6188020)
-static void eva_restart(enum reboot_mode mode, const char *cmd)
-{
-	/* Do soft power on reset */
-	writel((1 << 31), RESCNT2);
-}
-
-static const char *eva_boards_compat_dt[] __initdata = {
-	"renesas,armadillo800eva",
-	NULL,
-};
-
-DT_MACHINE_START(ARMADILLO800EVA_DT, "armadillo800eva")
-	.map_io		= r8a7740_map_io,
-	.init_early	= r8a7740_add_early_devices,
-	.init_irq	= r8a7740_init_irq_of,
-	.init_machine	= eva_init,
-	.init_late	= shmobile_init_late,
-	.init_time	= eva_earlytimer_init,
-	.dt_compat	= eva_boards_compat_dt,
-	.restart	= eva_restart,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-bockw-reference.c b/arch/arm/mach-shmobile/board-bockw-reference.c
index 9a74efd..4f78296 100644
--- a/arch/arm/mach-shmobile/board-bockw-reference.c
+++ b/arch/arm/mach-shmobile/board-bockw-reference.c
@@ -72,7 +72,7 @@
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
-static const char *bockw_boards_compat_dt[] __initdata = {
+static const char *const bockw_boards_compat_dt[] __initconst = {
 	"renesas,bockw-reference",
 	NULL,
 };
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index 25558d1..25a0e72 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -723,7 +723,7 @@
 	ADD_USB_FUNC_DEVICE_IF_POSSIBLE();
 }
 
-static const char *bockw_boards_compat_dt[] __initdata = {
+static const char *const bockw_boards_compat_dt[] __initconst = {
 	"renesas,bockw",
 	NULL,
 };
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
deleted file mode 100644
index 260d831..0000000
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ /dev/null
@@ -1,916 +0,0 @@
-/*
- * KZM-A9-GT board support
- *
- * Copyright (C) 2012	Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/gpio_keys.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/i2c.h>
-#include <linux/i2c/pcf857x.h>
-#include <linux/input.h>
-#include <linux/irqchip/arm-gic.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/sh_mmcif.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
-#include <linux/mfd/as3711.h>
-#include <linux/mfd/tmio.h>
-#include <linux/pinctrl/machine.h>
-#include <linux/pinctrl/pinconf-generic.h>
-#include <linux/platform_device.h>
-#include <linux/reboot.h>
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/machine.h>
-#include <linux/smsc911x.h>
-#include <linux/usb/r8a66597.h>
-#include <linux/usb/renesas_usbhs.h>
-#include <linux/videodev2.h>
-
-#include <sound/sh_fsi.h>
-#include <sound/simple_card.h>
-#include <asm/hardware/cache-l2x0.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <video/sh_mobile_lcdc.h>
-
-#include "common.h"
-#include "intc.h"
-#include "irqs.h"
-#include "sh73a0.h"
-
-/*
- * external GPIO
- */
-#define GPIO_PCF8575_BASE	(310)
-#define GPIO_PCF8575_PORT10	(GPIO_PCF8575_BASE + 8)
-#define GPIO_PCF8575_PORT11	(GPIO_PCF8575_BASE + 9)
-#define GPIO_PCF8575_PORT12	(GPIO_PCF8575_BASE + 10)
-#define GPIO_PCF8575_PORT13	(GPIO_PCF8575_BASE + 11)
-#define GPIO_PCF8575_PORT14	(GPIO_PCF8575_BASE + 12)
-#define GPIO_PCF8575_PORT15	(GPIO_PCF8575_BASE + 13)
-#define GPIO_PCF8575_PORT16	(GPIO_PCF8575_BASE + 14)
-
-/* Dummy supplies, where voltage doesn't matter */
-static struct regulator_consumer_supply dummy_supplies[] = {
-	REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
-	REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
-};
-
-/*
- * FSI-AK4648
- *
- * this command is required when playback.
- *
- * # amixer set "LINEOUT Mixer DACL" on
- */
-
-/* SMSC 9221 */
-static struct resource smsc9221_resources[] = {
-	[0] = {
-		.start	= 0x10000000, /* CS4 */
-		.end	= 0x100000ff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= irq_pin(3), /* IRQ3 */
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct smsc911x_platform_config smsc9221_platdata = {
-	.flags		= SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
-	.phy_interface	= PHY_INTERFACE_MODE_MII,
-	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
-	.irq_type	= SMSC911X_IRQ_TYPE_PUSH_PULL,
-};
-
-static struct platform_device smsc_device = {
-	.name		= "smsc911x",
-	.dev  = {
-		.platform_data = &smsc9221_platdata,
-	},
-	.resource	= smsc9221_resources,
-	.num_resources	= ARRAY_SIZE(smsc9221_resources),
-};
-
-/* USB external chip */
-static struct r8a66597_platdata usb_host_data = {
-	.on_chip	= 0,
-	.xtal		= R8A66597_PLATDATA_XTAL_48MHZ,
-};
-
-static struct resource usb_resources[] = {
-	[0] = {
-		.start	= 0x10010000,
-		.end	= 0x1001ffff - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= irq_pin(1), /* IRQ1 */
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device usb_host_device = {
-	.name	= "r8a66597_hcd",
-	.dev = {
-		.platform_data		= &usb_host_data,
-		.dma_mask		= NULL,
-		.coherent_dma_mask	= 0xffffffff,
-	},
-	.num_resources	= ARRAY_SIZE(usb_resources),
-	.resource	= usb_resources,
-};
-
-/* USB Func CN17 */
-struct usbhs_private {
-	void __iomem *phy;
-	void __iomem *cr2;
-	struct renesas_usbhs_platform_info info;
-};
-
-#define IRQ15			irq_pin(15)
-#define USB_PHY_MODE		(1 << 4)
-#define USB_PHY_INT_EN		((1 << 3) | (1 << 2))
-#define USB_PHY_ON		(1 << 1)
-#define USB_PHY_OFF		(1 << 0)
-#define USB_PHY_INT_CLR		(USB_PHY_ON | USB_PHY_OFF)
-
-#define usbhs_get_priv(pdev) \
-	container_of(renesas_usbhs_get_info(pdev), struct usbhs_private, info)
-
-static int usbhs_get_vbus(struct platform_device *pdev)
-{
-	struct usbhs_private *priv = usbhs_get_priv(pdev);
-
-	return !((1 << 7) & __raw_readw(priv->cr2));
-}
-
-static int usbhs_phy_reset(struct platform_device *pdev)
-{
-	struct usbhs_private *priv = usbhs_get_priv(pdev);
-
-	/* init phy */
-	__raw_writew(0x8a0a, priv->cr2);
-
-	return 0;
-}
-
-static int usbhs_get_id(struct platform_device *pdev)
-{
-	return USBHS_GADGET;
-}
-
-static irqreturn_t usbhs_interrupt(int irq, void *data)
-{
-	struct platform_device *pdev = data;
-	struct usbhs_private *priv = usbhs_get_priv(pdev);
-
-	renesas_usbhs_call_notify_hotplug(pdev);
-
-	/* clear status */
-	__raw_writew(__raw_readw(priv->phy) | USB_PHY_INT_CLR, priv->phy);
-
-	return IRQ_HANDLED;
-}
-
-static int usbhs_hardware_init(struct platform_device *pdev)
-{
-	struct usbhs_private *priv = usbhs_get_priv(pdev);
-	int ret;
-
-	/* clear interrupt status */
-	__raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy);
-
-	ret = request_irq(IRQ15, usbhs_interrupt, IRQF_TRIGGER_HIGH,
-			  dev_name(&pdev->dev), pdev);
-	if (ret) {
-		dev_err(&pdev->dev, "request_irq err\n");
-		return ret;
-	}
-
-	/* enable USB phy interrupt */
-	__raw_writew(USB_PHY_MODE | USB_PHY_INT_EN, priv->phy);
-
-	return 0;
-}
-
-static int usbhs_hardware_exit(struct platform_device *pdev)
-{
-	struct usbhs_private *priv = usbhs_get_priv(pdev);
-
-	/* clear interrupt status */
-	__raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy);
-
-	free_irq(IRQ15, pdev);
-
-	return 0;
-}
-
-static u32 usbhs_pipe_cfg[] = {
-	USB_ENDPOINT_XFER_CONTROL,
-	USB_ENDPOINT_XFER_ISOC,
-	USB_ENDPOINT_XFER_ISOC,
-	USB_ENDPOINT_XFER_BULK,
-	USB_ENDPOINT_XFER_BULK,
-	USB_ENDPOINT_XFER_BULK,
-	USB_ENDPOINT_XFER_INT,
-	USB_ENDPOINT_XFER_INT,
-	USB_ENDPOINT_XFER_INT,
-	USB_ENDPOINT_XFER_BULK,
-	USB_ENDPOINT_XFER_BULK,
-	USB_ENDPOINT_XFER_BULK,
-	USB_ENDPOINT_XFER_BULK,
-	USB_ENDPOINT_XFER_BULK,
-	USB_ENDPOINT_XFER_BULK,
-	USB_ENDPOINT_XFER_BULK,
-};
-
-static struct usbhs_private usbhs_private = {
-	.phy	= IOMEM(0xe60781e0),		/* USBPHYINT */
-	.cr2	= IOMEM(0xe605810c),		/* USBCR2 */
-	.info = {
-		.platform_callback = {
-			.hardware_init	= usbhs_hardware_init,
-			.hardware_exit	= usbhs_hardware_exit,
-			.get_id		= usbhs_get_id,
-			.phy_reset	= usbhs_phy_reset,
-			.get_vbus	= usbhs_get_vbus,
-		},
-		.driver_param = {
-			.buswait_bwait	= 4,
-			.has_otg	= 1,
-			.pipe_type	= usbhs_pipe_cfg,
-			.pipe_size	= ARRAY_SIZE(usbhs_pipe_cfg),
-		},
-	},
-};
-
-static struct resource usbhs_resources[] = {
-	[0] = {
-		.start	= 0xE6890000,
-		.end	= 0xE68900e6 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_spi(62),
-		.end	= gic_spi(62),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device usbhs_device = {
-	.name	= "renesas_usbhs",
-	.id	= -1,
-	.dev = {
-		.dma_mask		= NULL,
-		.coherent_dma_mask	= 0xffffffff,
-		.platform_data		= &usbhs_private.info,
-	},
-	.num_resources	= ARRAY_SIZE(usbhs_resources),
-	.resource	= usbhs_resources,
-};
-
-/* LCDC */
-static struct fb_videomode kzm_lcdc_mode = {
-	.name		= "WVGA Panel",
-	.xres		= 800,
-	.yres		= 480,
-	.left_margin	= 220,
-	.right_margin	= 110,
-	.hsync_len	= 70,
-	.upper_margin	= 20,
-	.lower_margin	= 5,
-	.vsync_len	= 5,
-	.sync		= 0,
-};
-
-static struct sh_mobile_lcdc_info lcdc_info = {
-	.clock_source = LCDC_CLK_BUS,
-	.ch[0] = {
-		.chan		= LCDC_CHAN_MAINLCD,
-		.fourcc		= V4L2_PIX_FMT_RGB565,
-		.interface_type	= RGB24,
-		.lcd_modes	= &kzm_lcdc_mode,
-		.num_modes	= 1,
-		.clock_divider	= 5,
-		.flags		= 0,
-		.panel_cfg = {
-			.width	= 152,
-			.height	= 91,
-		},
-	}
-};
-
-static struct resource lcdc_resources[] = {
-	[0] = {
-		.name	= "LCDC",
-		.start	= 0xfe940000,
-		.end	= 0xfe943fff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= intcs_evt2irq(0x580),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device lcdc_device = {
-	.name		= "sh_mobile_lcdc_fb",
-	.num_resources	= ARRAY_SIZE(lcdc_resources),
-	.resource	= lcdc_resources,
-	.dev	= {
-		.platform_data	= &lcdc_info,
-		.coherent_dma_mask = DMA_BIT_MASK(32),
-	},
-};
-
-/* Fixed 1.8V regulator to be used by MMCIF */
-static struct regulator_consumer_supply fixed1v8_power_consumers[] =
-{
-	REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
-	REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
-};
-
-/* MMCIF */
-static struct resource sh_mmcif_resources[] = {
-	[0] = {
-		.name	= "MMCIF",
-		.start	= 0xe6bd0000,
-		.end	= 0xe6bd00ff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_spi(140),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[2] = {
-		.start	= gic_spi(141),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct sh_mmcif_plat_data sh_mmcif_platdata = {
-	.ocr		= MMC_VDD_165_195,
-	.caps		= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
-	.ccs_unsupported = true,
-	.slave_id_tx	= SHDMA_SLAVE_MMCIF_TX,
-	.slave_id_rx	= SHDMA_SLAVE_MMCIF_RX,
-};
-
-static struct platform_device mmc_device = {
-	.name		= "sh_mmcif",
-	.dev		= {
-		.dma_mask		= NULL,
-		.coherent_dma_mask	= 0xffffffff,
-		.platform_data		= &sh_mmcif_platdata,
-	},
-	.num_resources	= ARRAY_SIZE(sh_mmcif_resources),
-	.resource	= sh_mmcif_resources,
-};
-
-/* Fixed 3.3V regulators to be used by SDHI0 */
-static struct regulator_consumer_supply vcc_sdhi0_consumers[] =
-{
-	REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
-};
-
-static struct regulator_init_data vcc_sdhi0_init_data = {
-	.constraints = {
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies  = ARRAY_SIZE(vcc_sdhi0_consumers),
-	.consumer_supplies      = vcc_sdhi0_consumers,
-};
-
-static struct fixed_voltage_config vcc_sdhi0_info = {
-	.supply_name = "SDHI0 Vcc",
-	.microvolts = 3300000,
-	.gpio = 15,
-	.enable_high = 1,
-	.init_data = &vcc_sdhi0_init_data,
-};
-
-static struct platform_device vcc_sdhi0 = {
-	.name = "reg-fixed-voltage",
-	.id   = 0,
-	.dev  = {
-		.platform_data = &vcc_sdhi0_info,
-	},
-};
-
-/* Fixed 3.3V regulators to be used by SDHI2 */
-static struct regulator_consumer_supply vcc_sdhi2_consumers[] =
-{
-	REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.2"),
-};
-
-static struct regulator_init_data vcc_sdhi2_init_data = {
-	.constraints = {
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-	.num_consumer_supplies  = ARRAY_SIZE(vcc_sdhi2_consumers),
-	.consumer_supplies      = vcc_sdhi2_consumers,
-};
-
-static struct fixed_voltage_config vcc_sdhi2_info = {
-	.supply_name = "SDHI2 Vcc",
-	.microvolts = 3300000,
-	.gpio = 14,
-	.enable_high = 1,
-	.init_data = &vcc_sdhi2_init_data,
-};
-
-static struct platform_device vcc_sdhi2 = {
-	.name = "reg-fixed-voltage",
-	.id   = 1,
-	.dev  = {
-		.platform_data = &vcc_sdhi2_info,
-	},
-};
-
-/* SDHI */
-static struct tmio_mmc_data sdhi0_info = {
-	.chan_priv_tx	= (void *)SHDMA_SLAVE_SDHI0_TX,
-	.chan_priv_rx	= (void *)SHDMA_SLAVE_SDHI0_RX,
-	.flags		= TMIO_MMC_HAS_IDLE_WAIT,
-	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
-			  MMC_CAP_POWER_OFF_CARD,
-};
-
-static struct resource sdhi0_resources[] = {
-	[0] = {
-		.name	= "SDHI0",
-		.start	= 0xee100000,
-		.end	= 0xee1000ff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.name	= SH_MOBILE_SDHI_IRQ_CARD_DETECT,
-		.start	= gic_spi(83),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[2] = {
-		.name	= SH_MOBILE_SDHI_IRQ_SDCARD,
-		.start	= gic_spi(84),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[3] = {
-		.name	= SH_MOBILE_SDHI_IRQ_SDIO,
-		.start	= gic_spi(85),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device sdhi0_device = {
-	.name		= "sh_mobile_sdhi",
-	.num_resources	= ARRAY_SIZE(sdhi0_resources),
-	.resource	= sdhi0_resources,
-	.dev	= {
-		.platform_data	= &sdhi0_info,
-	},
-};
-
-/* Micro SD */
-static struct tmio_mmc_data sdhi2_info = {
-	.chan_priv_tx	= (void *)SHDMA_SLAVE_SDHI2_TX,
-	.chan_priv_rx	= (void *)SHDMA_SLAVE_SDHI2_RX,
-	.flags		= TMIO_MMC_HAS_IDLE_WAIT |
-			  TMIO_MMC_USE_GPIO_CD |
-			  TMIO_MMC_WRPROTECT_DISABLE,
-	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_POWER_OFF_CARD,
-	.cd_gpio	= 13,
-};
-
-static struct resource sdhi2_resources[] = {
-	[0] = {
-		.name	= "SDHI2",
-		.start	= 0xee140000,
-		.end	= 0xee1400ff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.name	= SH_MOBILE_SDHI_IRQ_CARD_DETECT,
-		.start	= gic_spi(103),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[2] = {
-		.name	= SH_MOBILE_SDHI_IRQ_SDCARD,
-		.start	= gic_spi(104),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[3] = {
-		.name	= SH_MOBILE_SDHI_IRQ_SDIO,
-		.start	= gic_spi(105),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device sdhi2_device = {
-	.name		= "sh_mobile_sdhi",
-	.id		= 2,
-	.num_resources	= ARRAY_SIZE(sdhi2_resources),
-	.resource	= sdhi2_resources,
-	.dev	= {
-		.platform_data	= &sdhi2_info,
-	},
-};
-
-/* KEY */
-#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 }
-
-static struct gpio_keys_button gpio_buttons[] = {
-	GPIO_KEY(KEY_BACK,	GPIO_PCF8575_PORT10,	"SW3"),
-	GPIO_KEY(KEY_RIGHT,	GPIO_PCF8575_PORT11,	"SW2-R"),
-	GPIO_KEY(KEY_LEFT,	GPIO_PCF8575_PORT12,	"SW2-L"),
-	GPIO_KEY(KEY_ENTER,	GPIO_PCF8575_PORT13,	"SW2-P"),
-	GPIO_KEY(KEY_UP,	GPIO_PCF8575_PORT14,	"SW2-U"),
-	GPIO_KEY(KEY_DOWN,	GPIO_PCF8575_PORT15,	"SW2-D"),
-	GPIO_KEY(KEY_HOME,	GPIO_PCF8575_PORT16,	"SW1"),
-};
-
-static struct gpio_keys_platform_data gpio_key_info = {
-	.buttons	= gpio_buttons,
-	.nbuttons	= ARRAY_SIZE(gpio_buttons),
-};
-
-static struct platform_device gpio_keys_device = {
-	.name	= "gpio-keys",
-	.dev	= {
-		.platform_data  = &gpio_key_info,
-	},
-};
-
-/* FSI-AK4648 */
-static struct sh_fsi_platform_info fsi_info = {
-	.port_a = {
-		.tx_id = SHDMA_SLAVE_FSI2A_TX,
-	},
-};
-
-static struct resource fsi_resources[] = {
-	[0] = {
-		.name	= "FSI",
-		.start	= 0xEC230000,
-		.end	= 0xEC230400 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start  = gic_spi(146),
-		.flags  = IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device fsi_device = {
-	.name		= "sh_fsi2",
-	.id		= -1,
-	.num_resources	= ARRAY_SIZE(fsi_resources),
-	.resource	= fsi_resources,
-	.dev	= {
-		.platform_data	= &fsi_info,
-	},
-};
-
-static struct asoc_simple_card_info fsi2_ak4648_info = {
-	.name		= "AK4648",
-	.card		= "FSI2A-AK4648",
-	.codec		= "ak4642-codec.0-0012",
-	.platform	= "sh_fsi2",
-	.daifmt		= SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM,
-	.cpu_dai = {
-		.name	= "fsia-dai",
-	},
-	.codec_dai = {
-		.name	= "ak4642-hifi",
-		.sysclk	= 11289600,
-	},
-};
-
-static struct platform_device fsi_ak4648_device = {
-	.name	= "asoc-simple-card",
-	.dev	= {
-		.platform_data	= &fsi2_ak4648_info,
-		.coherent_dma_mask = DMA_BIT_MASK(32),
-		.dma_mask = &fsi_ak4648_device.dev.coherent_dma_mask,
-	},
-};
-
-/* I2C */
-
-/* StepDown1 is used to supply 1.315V to the CPU */
-static struct regulator_init_data as3711_sd1 = {
-	.constraints = {
-		.name = "1.315V CPU",
-		.boot_on = 1,
-		.always_on = 1,
-		.min_uV = 1315000,
-		.max_uV = 1335000,
-	},
-};
-
-/* StepDown2 is used to supply 1.8V to the CPU and to the board */
-static struct regulator_init_data as3711_sd2 = {
-	.constraints = {
-		.name = "1.8V",
-		.boot_on = 1,
-		.always_on = 1,
-		.min_uV = 1800000,
-		.max_uV = 1800000,
-	},
-};
-
-/*
- * StepDown3 is switched in parallel with StepDown2, seems to be off,
- * according to read-back pre-set register values
- */
-
-/* StepDown4 is used to supply 1.215V to the CPU and to the board */
-static struct regulator_init_data as3711_sd4 = {
-	.constraints = {
-		.name = "1.215V",
-		.boot_on = 1,
-		.always_on = 1,
-		.min_uV = 1215000,
-		.max_uV = 1235000,
-	},
-};
-
-/* LDO1 is unused and unconnected */
-
-/* LDO2 is used to supply 2.8V to the CPU */
-static struct regulator_init_data as3711_ldo2 = {
-	.constraints = {
-		.name = "2.8V CPU",
-		.boot_on = 1,
-		.always_on = 1,
-		.min_uV = 2800000,
-		.max_uV = 2800000,
-	},
-};
-
-/* LDO3 is used to supply 3.0V to the CPU */
-static struct regulator_init_data as3711_ldo3 = {
-	.constraints = {
-		.name = "3.0V CPU",
-		.boot_on = 1,
-		.always_on = 1,
-		.min_uV = 3000000,
-		.max_uV = 3000000,
-	},
-};
-
-/* LDO4 is used to supply 2.8V to the board */
-static struct regulator_init_data as3711_ldo4 = {
-	.constraints = {
-		.name = "2.8V",
-		.boot_on = 1,
-		.always_on = 1,
-		.min_uV = 2800000,
-		.max_uV = 2800000,
-	},
-};
-
-/* LDO5 is switched parallel to LDO4, also set to 2.8V */
-static struct regulator_init_data as3711_ldo5 = {
-	.constraints = {
-		.name = "2.8V #2",
-		.boot_on = 1,
-		.always_on = 1,
-		.min_uV = 2800000,
-		.max_uV = 2800000,
-	},
-};
-
-/* LDO6 is unused and unconnected */
-
-/* LDO7 is used to supply 1.15V to the CPU */
-static struct regulator_init_data as3711_ldo7 = {
-	.constraints = {
-		.name = "1.15V CPU",
-		.boot_on = 1,
-		.always_on = 1,
-		.min_uV = 1150000,
-		.max_uV = 1150000,
-	},
-};
-
-/* LDO8 is switched parallel to LDO7, also set to 1.15V */
-static struct regulator_init_data as3711_ldo8 = {
-	.constraints = {
-		.name = "1.15V CPU #2",
-		.boot_on = 1,
-		.always_on = 1,
-		.min_uV = 1150000,
-		.max_uV = 1150000,
-	},
-};
-
-static struct as3711_platform_data as3711_pdata = {
-	.regulator	= {
-		.init_data	= {
-			[AS3711_REGULATOR_SD_1] = &as3711_sd1,
-			[AS3711_REGULATOR_SD_2] = &as3711_sd2,
-			[AS3711_REGULATOR_SD_4] = &as3711_sd4,
-			[AS3711_REGULATOR_LDO_2] = &as3711_ldo2,
-			[AS3711_REGULATOR_LDO_3] = &as3711_ldo3,
-			[AS3711_REGULATOR_LDO_4] = &as3711_ldo4,
-			[AS3711_REGULATOR_LDO_5] = &as3711_ldo5,
-			[AS3711_REGULATOR_LDO_7] = &as3711_ldo7,
-			[AS3711_REGULATOR_LDO_8] = &as3711_ldo8,
-		},
-	},
-	.backlight	= {
-		.su2_fb = "sh_mobile_lcdc_fb.0",
-		.su2_max_uA = 36000,
-		.su2_feedback = AS3711_SU2_CURR_AUTO,
-		.su2_fbprot = AS3711_SU2_GPIO4,
-		.su2_auto_curr1 = true,
-		.su2_auto_curr2 = true,
-		.su2_auto_curr3 = true,
-	},
-};
-
-static struct pcf857x_platform_data pcf8575_pdata = {
-	.gpio_base	= GPIO_PCF8575_BASE,
-};
-
-static struct i2c_board_info i2c0_devices[] = {
-	{
-		I2C_BOARD_INFO("ak4648", 0x12),
-	},
-	{
-		I2C_BOARD_INFO("r2025sd", 0x32),
-	},
-	{
-		I2C_BOARD_INFO("ak8975", 0x0c),
-		.irq = irq_pin(28), /* IRQ28 */
-	},
-	{
-		I2C_BOARD_INFO("adxl34x", 0x1d),
-		.irq = irq_pin(26), /* IRQ26 */
-	},
-	{
-		I2C_BOARD_INFO("as3711", 0x40),
-		.irq = intcs_evt2irq(0x3300), /* IRQ24 */
-		.platform_data = &as3711_pdata,
-	},
-};
-
-static struct i2c_board_info i2c1_devices[] = {
-	{
-		I2C_BOARD_INFO("st1232-ts", 0x55),
-		.irq = irq_pin(8), /* IRQ8 */
-	},
-};
-
-static struct i2c_board_info i2c3_devices[] = {
-	{
-		I2C_BOARD_INFO("pcf8575", 0x20),
-		.irq = irq_pin(19), /* IRQ19 */
-		.platform_data = &pcf8575_pdata,
-	},
-};
-
-static struct platform_device *kzm_devices[] __initdata = {
-	&smsc_device,
-	&usb_host_device,
-	&usbhs_device,
-	&lcdc_device,
-	&mmc_device,
-	&vcc_sdhi0,
-	&vcc_sdhi2,
-	&sdhi0_device,
-	&sdhi2_device,
-	&gpio_keys_device,
-	&fsi_device,
-	&fsi_ak4648_device,
-};
-
-static unsigned long pin_pullup_conf[] = {
-	PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 0),
-};
-
-static const struct pinctrl_map kzm_pinctrl_map[] = {
-	/* FSIA (AK4648) */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2", "pfc-sh73a0",
-				  "fsia_mclk_in", "fsia"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2", "pfc-sh73a0",
-				  "fsia_sclk_in", "fsia"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2", "pfc-sh73a0",
-				  "fsia_data_in", "fsia"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_fsi2", "pfc-sh73a0",
-				  "fsia_data_out", "fsia"),
-	/* I2C3 */
-	PIN_MAP_MUX_GROUP_DEFAULT("i2c-sh_mobile.3", "pfc-sh73a0",
-				  "i2c3_1", "i2c3"),
-	/* LCD */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-sh73a0",
-				  "lcd_data24", "lcd"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_lcdc_fb.0", "pfc-sh73a0",
-				  "lcd_sync", "lcd"),
-	/* MMCIF */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
-				  "mmc0_data8_0", "mmc0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
-				  "mmc0_ctrl_0", "mmc0"),
-	PIN_MAP_CONFIGS_PIN_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
-				    "PORT279", pin_pullup_conf),
-	PIN_MAP_CONFIGS_GROUP_DEFAULT("sh_mmcif.0", "pfc-sh73a0",
-				      "mmc0_data8_0", pin_pullup_conf),
-	/* SCIFA4 */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-sh73a0",
-				  "scifa4_data", "scifa4"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-sh73a0",
-				  "scifa4_ctrl", "scifa4"),
-	/* SDHI0 */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
-				  "sdhi0_data4", "sdhi0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
-				  "sdhi0_ctrl", "sdhi0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
-				  "sdhi0_cd", "sdhi0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-sh73a0",
-				  "sdhi0_wp", "sdhi0"),
-	/* SDHI2 */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.2", "pfc-sh73a0",
-				  "sdhi2_data4", "sdhi2"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.2", "pfc-sh73a0",
-				  "sdhi2_ctrl", "sdhi2"),
-	/* SMSC */
-	PIN_MAP_MUX_GROUP_DEFAULT("smsc911x.0", "pfc-sh73a0",
-				  "bsc_cs4", "bsc"),
-	/* USB */
-	PIN_MAP_MUX_GROUP_DEFAULT("renesas_usbhs", "pfc-sh73a0",
-				  "usb_vbus", "usb"),
-};
-
-static void __init kzm_init(void)
-{
-	regulator_register_always_on(2, "fixed-1.8V", fixed1v8_power_consumers,
-				     ARRAY_SIZE(fixed1v8_power_consumers), 1800000);
-	regulator_register_fixed(3, dummy_supplies, ARRAY_SIZE(dummy_supplies));
-
-	pinctrl_register_mappings(kzm_pinctrl_map, ARRAY_SIZE(kzm_pinctrl_map));
-
-	sh73a0_pinmux_init();
-
-	/* SMSC */
-	gpio_request_one(224, GPIOF_IN, NULL); /* IRQ3 */
-
-	/* LCDC */
-	gpio_request_one(222, GPIOF_OUT_INIT_HIGH, NULL); /* LCDCDON */
-	gpio_request_one(226, GPIOF_OUT_INIT_HIGH, NULL); /* SC */
-
-	/* Touchscreen */
-	gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */
-
-#ifdef CONFIG_CACHE_L2X0
-	/* Shared attribute override enable, 64K*8way */
-	l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
-#endif
-
-	i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
-	i2c_register_board_info(1, i2c1_devices, ARRAY_SIZE(i2c1_devices));
-	i2c_register_board_info(3, i2c3_devices, ARRAY_SIZE(i2c3_devices));
-
-	sh73a0_add_standard_devices();
-	platform_add_devices(kzm_devices, ARRAY_SIZE(kzm_devices));
-
-	sh73a0_pm_init();
-}
-
-static void kzm9g_restart(enum reboot_mode mode, const char *cmd)
-{
-#define RESCNT2 IOMEM(0xe6188020)
-	/* Do soft power on reset */
-	writel((1 << 31), RESCNT2);
-}
-
-static const char *kzm9g_boards_compat_dt[] __initdata = {
-	"renesas,kzm9g",
-	NULL,
-};
-
-DT_MACHINE_START(KZM9G_DT, "kzm9g")
-	.smp		= smp_ops(sh73a0_smp_ops),
-	.map_io		= sh73a0_map_io,
-	.init_early	= sh73a0_add_early_devices,
-	.init_irq	= sh73a0_init_irq,
-	.init_machine	= kzm_init,
-	.init_late	= shmobile_init_late,
-	.init_time	= sh73a0_earlytimer_init,
-	.restart	= kzm9g_restart,
-	.dt_compat	= kzm9g_boards_compat_dt,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-marzen-reference.c b/arch/arm/mach-shmobile/board-marzen-reference.c
deleted file mode 100644
index b15eb92..0000000
--- a/arch/arm/mach-shmobile/board-marzen-reference.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * marzen board support - Reference DT implementation
- *
- * Copyright (C) 2011  Renesas Solutions Corp.
- * Copyright (C) 2011  Magnus Damm
- * Copyright (C) 2013  Simon Horman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/clk/shmobile.h>
-#include <linux/clocksource.h>
-#include <linux/of_platform.h>
-
-#include <asm/irq.h>
-#include <asm/mach/arch.h>
-
-#include "common.h"
-#include "irqs.h"
-#include "r8a7779.h"
-
-static void __init marzen_init_timer(void)
-{
-	r8a7779_clocks_init(r8a7779_read_mode_pins());
-	clocksource_of_init();
-}
-
-static void __init marzen_init(void)
-{
-	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-	r8a7779_init_irq_extpin_dt(1); /* IRQ1 as individual interrupt */
-}
-
-static const char *marzen_boards_compat_dt[] __initdata = {
-	"renesas,marzen",
-	"renesas,marzen-reference",
-	NULL,
-};
-
-DT_MACHINE_START(MARZEN, "marzen")
-	.smp		= smp_ops(r8a7779_smp_ops),
-	.map_io		= r8a7779_map_io,
-	.init_early	= shmobile_init_delay,
-	.init_time	= marzen_init_timer,
-	.init_irq	= r8a7779_init_irq_dt,
-	.init_machine	= marzen_init,
-	.init_late	= shmobile_init_late,
-	.dt_compat	= marzen_boards_compat_dt,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
deleted file mode 100644
index 51db288..0000000
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * marzen board support
- *
- * Copyright (C) 2011, 2013  Renesas Solutions Corp.
- * Copyright (C) 2011  Magnus Damm
- * Copyright (C) 2013  Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/leds.h>
-#include <linux/dma-mapping.h>
-#include <linux/pinctrl/machine.h>
-#include <linux/platform_data/camera-rcar.h>
-#include <linux/platform_data/gpio-rcar.h>
-#include <linux/platform_data/usb-rcar-phy.h>
-#include <linux/regulator/fixed.h>
-#include <linux/regulator/machine.h>
-#include <linux/smsc911x.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/sh_hspi.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/sh_mobile_sdhi.h>
-#include <linux/mfd/tmio.h>
-
-#include <media/soc_camera.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/traps.h>
-
-#include "common.h"
-#include "irqs.h"
-#include "r8a7779.h"
-
-/* Fixed 3.3V regulator to be used by SDHI0 */
-static struct regulator_consumer_supply fixed3v3_power_consumers[] = {
-	REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
-	REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
-};
-
-/* Dummy supplies, where voltage doesn't matter */
-static struct regulator_consumer_supply dummy_supplies[] = {
-	REGULATOR_SUPPLY("vddvario", "smsc911x"),
-	REGULATOR_SUPPLY("vdd33a", "smsc911x"),
-};
-
-/* USB PHY */
-static struct resource usb_phy_resources[] = {
-	[0] = {
-		.start		= 0xffe70800,
-		.end		= 0xffe70900 - 1,
-		.flags		= IORESOURCE_MEM,
-	},
-};
-
-static struct rcar_phy_platform_data usb_phy_platform_data;
-
-static struct platform_device usb_phy = {
-	.name		= "rcar_usb_phy",
-	.id		= -1,
-	.dev  = {
-		.platform_data = &usb_phy_platform_data,
-	},
-	.resource	= usb_phy_resources,
-	.num_resources	= ARRAY_SIZE(usb_phy_resources),
-};
-
-/* SMSC LAN89218 */
-static struct resource smsc911x_resources[] = {
-	[0] = {
-		.start		= 0x18000000, /* ExCS0 */
-		.end		= 0x180000ff, /* A1->A7 */
-		.flags		= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start		= irq_pin(1), /* IRQ 1 */
-		.flags		= IORESOURCE_IRQ,
-	},
-};
-
-static struct smsc911x_platform_config smsc911x_platdata = {
-	.flags		= SMSC911X_USE_32BIT, /* 32-bit SW on 16-bit HW bus */
-	.phy_interface	= PHY_INTERFACE_MODE_MII,
-	.irq_polarity	= SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
-	.irq_type	= SMSC911X_IRQ_TYPE_PUSH_PULL,
-};
-
-static struct platform_device eth_device = {
-	.name		= "smsc911x",
-	.id		= -1,
-	.dev  = {
-		.platform_data = &smsc911x_platdata,
-	},
-	.resource	= smsc911x_resources,
-	.num_resources	= ARRAY_SIZE(smsc911x_resources),
-};
-
-static struct resource sdhi0_resources[] = {
-	[0] = {
-		.name	= "sdhi0",
-		.start	= 0xffe4c000,
-		.end	= 0xffe4c0ff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_iid(0x88),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct tmio_mmc_data sdhi0_platform_data = {
-	.chan_priv_tx = (void *)HPBDMA_SLAVE_SDHI0_TX,
-	.chan_priv_rx = (void *)HPBDMA_SLAVE_SDHI0_RX,
-	.flags        = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT,
-	.capabilities = MMC_CAP_SD_HIGHSPEED,
-};
-
-static struct platform_device sdhi0_device = {
-	.name = "sh_mobile_sdhi",
-	.num_resources = ARRAY_SIZE(sdhi0_resources),
-	.resource = sdhi0_resources,
-	.id = 0,
-	.dev = {
-		.platform_data = &sdhi0_platform_data,
-	}
-};
-
-/* Thermal */
-static struct resource thermal_resources[] = {
-	[0] = {
-		.start		= 0xFFC48000,
-		.end		= 0xFFC48038 - 1,
-		.flags		= IORESOURCE_MEM,
-	},
-};
-
-static struct platform_device thermal_device = {
-	.name		= "rcar_thermal",
-	.resource	= thermal_resources,
-	.num_resources	= ARRAY_SIZE(thermal_resources),
-};
-
-/* HSPI */
-static struct resource hspi_resources[] = {
-	[0] = {
-		.start		= 0xFFFC7000,
-		.end		= 0xFFFC7018 - 1,
-		.flags		= IORESOURCE_MEM,
-	},
-};
-
-static struct platform_device hspi_device = {
-	.name	= "sh-hspi",
-	.id	= 0,
-	.resource	= hspi_resources,
-	.num_resources	= ARRAY_SIZE(hspi_resources),
-};
-
-/* LEDS */
-static struct gpio_led marzen_leds[] = {
-	{
-		.name		= "led2",
-		.gpio		= RCAR_GP_PIN(4, 29),
-		.default_state	= LEDS_GPIO_DEFSTATE_ON,
-	}, {
-		.name		= "led3",
-		.gpio		= RCAR_GP_PIN(4, 30),
-		.default_state	= LEDS_GPIO_DEFSTATE_ON,
-	}, {
-		.name		= "led4",
-		.gpio		= RCAR_GP_PIN(4, 31),
-		.default_state	= LEDS_GPIO_DEFSTATE_ON,
-	},
-};
-
-static struct gpio_led_platform_data marzen_leds_pdata = {
-	.leds		= marzen_leds,
-	.num_leds	= ARRAY_SIZE(marzen_leds),
-};
-
-static struct platform_device leds_device = {
-	.name	= "leds-gpio",
-	.id	= 0,
-	.dev	= {
-		.platform_data  = &marzen_leds_pdata,
-	},
-};
-
-/* VIN */
-static struct rcar_vin_platform_data vin_platform_data __initdata = {
-	.flags	= RCAR_VIN_BT656,
-};
-
-#define MARZEN_VIN(idx)						\
-static struct resource vin##idx##_resources[] __initdata = {	\
-	DEFINE_RES_MEM(0xffc50000 + 0x1000 * (idx), 0x1000),	\
-	DEFINE_RES_IRQ(gic_iid(0x5f + (idx))),			\
-};								\
-								\
-static struct platform_device_info vin##idx##_info __initdata = { \
-	.name		= "r8a7779-vin",			\
-	.id		= idx,					\
-	.res		= vin##idx##_resources,			\
-	.num_res	= ARRAY_SIZE(vin##idx##_resources),	\
-	.dma_mask	= DMA_BIT_MASK(32),			\
-	.data		= &vin_platform_data,			\
-	.size_data	= sizeof(vin_platform_data),		\
-}
-MARZEN_VIN(1);
-MARZEN_VIN(3);
-
-#define MARZEN_CAMERA(idx)					\
-static struct i2c_board_info camera##idx##_info = {		\
-	I2C_BOARD_INFO("adv7180", 0x20 + (idx)),		\
-};								\
-								\
-static struct soc_camera_link iclink##idx##_adv7180 = {		\
-	.bus_id		= 1 + 2 * (idx),			\
-	.i2c_adapter_id	= 0,					\
-	.board_info	= &camera##idx##_info,			\
-};								\
-								\
-static struct platform_device camera##idx##_device = {		\
-	.name	= "soc-camera-pdrv",				\
-	.id	= idx,						\
-	.dev	= {						\
-		.platform_data	= &iclink##idx##_adv7180,	\
-	},							\
-};
-
-MARZEN_CAMERA(0);
-MARZEN_CAMERA(1);
-
-static struct platform_device *marzen_devices[] __initdata = {
-	&eth_device,
-	&sdhi0_device,
-	&thermal_device,
-	&hspi_device,
-	&leds_device,
-	&usb_phy,
-	&camera0_device,
-	&camera1_device,
-};
-
-static const struct pinctrl_map marzen_pinctrl_map[] = {
-	/* DU (CN10: ARGB0, CN13: LVDS) */
-	PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779",
-				  "du0_rgb888", "du0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779",
-				  "du0_sync_1", "du0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779",
-				  "du0_clk_out_0", "du0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779",
-				  "du1_rgb666", "du1"),
-	PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779",
-				  "du1_sync_1", "du1"),
-	PIN_MAP_MUX_GROUP_DEFAULT("rcar-du-r8a7779", "pfc-r8a7779",
-				  "du1_clk_out", "du1"),
-	/* HSPI0 */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-hspi.0", "pfc-r8a7779",
-				  "hspi0", "hspi0"),
-	/* SCIF2 (CN18: DEBUG0) */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.2", "pfc-r8a7779",
-				  "scif2_data_c", "scif2"),
-	/* SCIF4 (CN19: DEBUG1) */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh-sci.4", "pfc-r8a7779",
-				  "scif4_data", "scif4"),
-	/* SDHI0 */
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7779",
-				  "sdhi0_data4", "sdhi0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7779",
-				  "sdhi0_ctrl", "sdhi0"),
-	PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7779",
-				  "sdhi0_cd", "sdhi0"),
-	/* SMSC */
-	PIN_MAP_MUX_GROUP_DEFAULT("smsc911x", "pfc-r8a7779",
-				  "intc_irq1_b", "intc"),
-	PIN_MAP_MUX_GROUP_DEFAULT("smsc911x", "pfc-r8a7779",
-				  "lbsc_ex_cs0", "lbsc"),
-	/* USB0 */
-	PIN_MAP_MUX_GROUP_DEFAULT("ehci-platform.0", "pfc-r8a7779",
-				  "usb0", "usb0"),
-	/* USB1 */
-	PIN_MAP_MUX_GROUP_DEFAULT("ehci-platform.0", "pfc-r8a7779",
-				  "usb1", "usb1"),
-	/* USB2 */
-	PIN_MAP_MUX_GROUP_DEFAULT("ehci-platform.1", "pfc-r8a7779",
-				  "usb2", "usb2"),
-	/* VIN1 */
-	PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.1", "pfc-r8a7779",
-				  "vin1_clk", "vin1"),
-	PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.1", "pfc-r8a7779",
-				  "vin1_data8", "vin1"),
-	/* VIN3 */
-	PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.3", "pfc-r8a7779",
-				  "vin3_clk", "vin3"),
-	PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.3", "pfc-r8a7779",
-				  "vin3_data8", "vin3"),
-};
-
-static void __init marzen_init(void)
-{
-	regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
-				ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
-	regulator_register_fixed(1, dummy_supplies,
-				ARRAY_SIZE(dummy_supplies));
-
-	pinctrl_register_mappings(marzen_pinctrl_map,
-				  ARRAY_SIZE(marzen_pinctrl_map));
-	r8a7779_pinmux_init();
-	r8a7779_init_irq_extpin(1); /* IRQ1 as individual interrupt */
-
-	r8a7779_add_standard_devices();
-	platform_device_register_full(&vin1_info);
-	platform_device_register_full(&vin3_info);
-	platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices));
-}
-
-static const char *marzen_boards_compat_dt[] __initdata = {
-        "renesas,marzen",
-        NULL,
-};
-
-DT_MACHINE_START(MARZEN, "marzen")
-	.smp		= smp_ops(r8a7779_smp_ops),
-	.map_io		= r8a7779_map_io,
-	.init_early	= r8a7779_add_early_devices,
-	.init_irq	= r8a7779_init_irq_dt,
-	.init_machine	= marzen_init,
-	.init_late	= r8a7779_init_late,
-	.dt_compat	= marzen_boards_compat_dt,
-	.init_time	= r8a7779_earlytimer_init,
-MACHINE_END
diff --git a/arch/arm/mach-shmobile/clock-r8a7740.c b/arch/arm/mach-shmobile/clock-r8a7740.c
deleted file mode 100644
index 9cac824..0000000
--- a/arch/arm/mach-shmobile/clock-r8a7740.c
+++ /dev/null
@@ -1,675 +0,0 @@
-/*
- * R8A7740 processor support
- *
- * Copyright (C) 2011  Renesas Solutions Corp.
- * Copyright (C) 2011  Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/sh_clk.h>
-#include <linux/clkdev.h>
-
-#include "clock.h"
-#include "common.h"
-#include "r8a7740.h"
-
-/*
- *        |  MDx  |  XTAL1/EXTAL1   |  System   | EXTALR |
- *  Clock |-------+-----------------+  clock    | 32.768 |   RCLK
- *  Mode  | 2/1/0 | src         MHz |  source   |  KHz   |  source
- * -------+-------+-----------------+-----------+--------+----------
- *    0   | 0 0 0 | External  20~50 | XTAL1     |    O   |  EXTALR
- *    1   | 0 0 1 | Crystal   20~30 | XTAL1     |    O   |  EXTALR
- *    2   | 0 1 0 | External  40~50 | XTAL1 / 2 |    O   |  EXTALR
- *    3   | 0 1 1 | Crystal   40~50 | XTAL1 / 2 |    O   |  EXTALR
- *    4   | 1 0 0 | External  20~50 | XTAL1     |    x   |  XTAL1 / 1024
- *    5   | 1 0 1 | Crystal   20~30 | XTAL1     |    x   |  XTAL1 / 1024
- *    6   | 1 1 0 | External  40~50 | XTAL1 / 2 |    x   |  XTAL1 / 2048
- *    7   | 1 1 1 | Crystal   40~50 | XTAL1 / 2 |    x   |  XTAL1 / 2048
- */
-
-/* CPG registers */
-#define FRQCRA		IOMEM(0xe6150000)
-#define FRQCRB		IOMEM(0xe6150004)
-#define VCLKCR1		IOMEM(0xE6150008)
-#define VCLKCR2		IOMEM(0xE615000c)
-#define FRQCRC		IOMEM(0xe61500e0)
-#define FSIACKCR	IOMEM(0xe6150018)
-#define PLLC01CR	IOMEM(0xe6150028)
-
-#define SUBCKCR		IOMEM(0xe6150080)
-#define USBCKCR		IOMEM(0xe615008c)
-
-#define MSTPSR0		IOMEM(0xe6150030)
-#define MSTPSR1		IOMEM(0xe6150038)
-#define MSTPSR2		IOMEM(0xe6150040)
-#define MSTPSR3		IOMEM(0xe6150048)
-#define MSTPSR4		IOMEM(0xe615004c)
-#define FSIBCKCR	IOMEM(0xe6150090)
-#define HDMICKCR	IOMEM(0xe6150094)
-#define SMSTPCR0	IOMEM(0xe6150130)
-#define SMSTPCR1	IOMEM(0xe6150134)
-#define SMSTPCR2	IOMEM(0xe6150138)
-#define SMSTPCR3	IOMEM(0xe615013c)
-#define SMSTPCR4	IOMEM(0xe6150140)
-
-#define FSIDIVA		IOMEM(0xFE1F8000)
-#define FSIDIVB		IOMEM(0xFE1F8008)
-
-/* Fixed 32 KHz root clock from EXTALR pin */
-static struct clk extalr_clk = {
-	.rate	= 32768,
-};
-
-/*
- * 25MHz default rate for the EXTAL1 root input clock.
- * If needed, reset this with clk_set_rate() from the platform code.
- */
-static struct clk extal1_clk = {
-	.rate	= 25000000,
-};
-
-/*
- * 48MHz default rate for the EXTAL2 root input clock.
- * If needed, reset this with clk_set_rate() from the platform code.
- */
-static struct clk extal2_clk = {
-	.rate	= 48000000,
-};
-
-/*
- * 27MHz default rate for the DV_CLKI root input clock.
- * If needed, reset this with clk_set_rate() from the platform code.
- */
-static struct clk dv_clk = {
-	.rate	= 27000000,
-};
-
-SH_CLK_RATIO(div2,	1, 2);
-SH_CLK_RATIO(div1k,	1, 1024);
-
-SH_FIXED_RATIO_CLK(extal1_div2_clk,	extal1_clk,		div2);
-SH_FIXED_RATIO_CLK(extal1_div1024_clk,	extal1_clk,		div1k);
-SH_FIXED_RATIO_CLK(extal1_div2048_clk,	extal1_div2_clk,	div1k);
-SH_FIXED_RATIO_CLK(extal2_div2_clk,	extal2_clk,		div2);
-
-static struct sh_clk_ops followparent_clk_ops = {
-	.recalc	= followparent_recalc,
-};
-
-/* Main clock */
-static struct clk system_clk = {
-	.ops	= &followparent_clk_ops,
-};
-
-SH_FIXED_RATIO_CLK(system_div2_clk, system_clk,	div2);
-
-/* r_clk */
-static struct clk r_clk = {
-	.ops	= &followparent_clk_ops,
-};
-
-/* PLLC0/PLLC1 */
-static unsigned long pllc01_recalc(struct clk *clk)
-{
-	unsigned long mult = 1;
-
-	if (__raw_readl(PLLC01CR) & (1 << 14))
-		mult = ((__raw_readl(clk->enable_reg) >> 24) & 0x7f) + 1;
-
-	return clk->parent->rate * mult;
-}
-
-static struct sh_clk_ops pllc01_clk_ops = {
-	.recalc		= pllc01_recalc,
-};
-
-static struct clk pllc0_clk = {
-	.ops		= &pllc01_clk_ops,
-	.flags		= CLK_ENABLE_ON_INIT,
-	.parent		= &system_clk,
-	.enable_reg	= (void __iomem *)FRQCRC,
-};
-
-static struct clk pllc1_clk = {
-	.ops		= &pllc01_clk_ops,
-	.flags		= CLK_ENABLE_ON_INIT,
-	.parent		= &system_div2_clk,
-	.enable_reg	= (void __iomem *)FRQCRA,
-};
-
-/* PLLC1 / 2 */
-SH_FIXED_RATIO_CLK(pllc1_div2_clk, pllc1_clk, div2);
-
-/* USB clock */
-/*
- * USBCKCR is controlling usb24 clock
- * bit[7] : parent clock
- * bit[6] : clock divide rate
- * And this bit[7] is used as a "usb24s" from other devices.
- * (Video clock / Sub clock / SPU clock)
- * You can controll this clock as a below.
- *
- * struct clk *usb24	= clk_get(dev,  "usb24");
- * struct clk *usb24s	= clk_get(NULL, "usb24s");
- * struct clk *system	= clk_get(NULL, "system_clk");
- * int rate = clk_get_rate(system);
- *
- * clk_set_parent(usb24s, system);  // for bit[7]
- * clk_set_rate(usb24, rate / 2);   // for bit[6]
- */
-static struct clk *usb24s_parents[] = {
-	[0] = &system_clk,
-	[1] = &extal2_clk
-};
-
-static int usb24s_enable(struct clk *clk)
-{
-	__raw_writel(__raw_readl(USBCKCR) & ~(1 << 8), USBCKCR);
-
-	return 0;
-}
-
-static void usb24s_disable(struct clk *clk)
-{
-	__raw_writel(__raw_readl(USBCKCR) | (1 << 8), USBCKCR);
-}
-
-static int usb24s_set_parent(struct clk *clk, struct clk *parent)
-{
-	int i, ret;
-	u32 val;
-
-	if (!clk->parent_table || !clk->parent_num)
-		return -EINVAL;
-
-	/* Search the parent */
-	for (i = 0; i < clk->parent_num; i++)
-		if (clk->parent_table[i] == parent)
-			break;
-
-	if (i == clk->parent_num)
-		return -ENODEV;
-
-	ret = clk_reparent(clk, parent);
-	if (ret < 0)
-		return ret;
-
-	val = __raw_readl(USBCKCR);
-	val &= ~(1 << 7);
-	val |= i << 7;
-	__raw_writel(val, USBCKCR);
-
-	return 0;
-}
-
-static struct sh_clk_ops usb24s_clk_ops = {
-	.recalc		= followparent_recalc,
-	.enable		= usb24s_enable,
-	.disable	= usb24s_disable,
-	.set_parent	= usb24s_set_parent,
-};
-
-static struct clk usb24s_clk = {
-	.ops		= &usb24s_clk_ops,
-	.parent_table	= usb24s_parents,
-	.parent_num	= ARRAY_SIZE(usb24s_parents),
-	.parent		= &system_clk,
-};
-
-static unsigned long usb24_recalc(struct clk *clk)
-{
-	return clk->parent->rate /
-		((__raw_readl(USBCKCR) & (1 << 6)) ? 1 : 2);
-};
-
-static int usb24_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 val;
-
-	/* closer to which ? parent->rate or parent->rate/2 */
-	val = __raw_readl(USBCKCR);
-	val &= ~(1 << 6);
-	val |= (rate > (clk->parent->rate / 4) * 3) << 6;
-	__raw_writel(val, USBCKCR);
-
-	return 0;
-}
-
-static struct sh_clk_ops usb24_clk_ops = {
-	.recalc		= usb24_recalc,
-	.set_rate	= usb24_set_rate,
-};
-
-static struct clk usb24_clk = {
-	.ops		= &usb24_clk_ops,
-	.parent		= &usb24s_clk,
-};
-
-/* External FSIACK/FSIBCK clock */
-static struct clk fsiack_clk = {
-};
-
-static struct clk fsibck_clk = {
-};
-
-static struct clk *main_clks[] = {
-	&extalr_clk,
-	&extal1_clk,
-	&extal2_clk,
-	&extal1_div2_clk,
-	&extal1_div1024_clk,
-	&extal1_div2048_clk,
-	&extal2_div2_clk,
-	&dv_clk,
-	&system_clk,
-	&system_div2_clk,
-	&r_clk,
-	&pllc0_clk,
-	&pllc1_clk,
-	&pllc1_div2_clk,
-	&usb24s_clk,
-	&usb24_clk,
-	&fsiack_clk,
-	&fsibck_clk,
-};
-
-/* DIV4 clocks */
-static void div4_kick(struct clk *clk)
-{
-	unsigned long value;
-
-	/* set KICK bit in FRQCRB to update hardware setting */
-	value = __raw_readl(FRQCRB);
-	value |= (1 << 31);
-	__raw_writel(value, FRQCRB);
-}
-
-static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 18,
-			  24, 32, 36, 48, 0, 72, 96, 0 };
-
-static struct clk_div_mult_table div4_div_mult_table = {
-	.divisors = divisors,
-	.nr_divisors = ARRAY_SIZE(divisors),
-};
-
-static struct clk_div4_table div4_table = {
-	.div_mult_table = &div4_div_mult_table,
-	.kick = div4_kick,
-};
-
-enum {
-	DIV4_I, DIV4_ZG, DIV4_B, DIV4_M1, DIV4_HP,
-	DIV4_HPP, DIV4_USBP, DIV4_S, DIV4_ZB, DIV4_M3, DIV4_CP,
-	DIV4_NR
-};
-
-static struct clk div4_clks[DIV4_NR] = {
-	[DIV4_I]	= SH_CLK_DIV4(&pllc1_clk, FRQCRA, 20, 0x6fff, CLK_ENABLE_ON_INIT),
-	[DIV4_ZG]	= SH_CLK_DIV4(&pllc1_clk, FRQCRA, 16, 0x6fff, CLK_ENABLE_ON_INIT),
-	[DIV4_B]	= SH_CLK_DIV4(&pllc1_clk, FRQCRA,  8, 0x6fff, CLK_ENABLE_ON_INIT),
-	[DIV4_M1]	= SH_CLK_DIV4(&pllc1_clk, FRQCRA,  4, 0x6fff, CLK_ENABLE_ON_INIT),
-	[DIV4_HP]	= SH_CLK_DIV4(&pllc1_clk, FRQCRB,  4, 0x6fff, 0),
-	[DIV4_HPP]	= SH_CLK_DIV4(&pllc1_clk, FRQCRC, 20, 0x6fff, 0),
-	[DIV4_USBP]	= SH_CLK_DIV4(&pllc1_clk, FRQCRC, 16, 0x6fff, 0),
-	[DIV4_S]	= SH_CLK_DIV4(&pllc1_clk, FRQCRC, 12, 0x6fff, 0),
-	[DIV4_ZB]	= SH_CLK_DIV4(&pllc1_clk, FRQCRC,  8, 0x6fff, 0),
-	[DIV4_M3]	= SH_CLK_DIV4(&pllc1_clk, FRQCRC,  4, 0x6fff, 0),
-	[DIV4_CP]	= SH_CLK_DIV4(&pllc1_clk, FRQCRC,  0, 0x6fff, 0),
-};
-
-/* DIV6 reparent */
-enum {
-	DIV6_HDMI,
-	DIV6_VCLK1, DIV6_VCLK2,
-	DIV6_FSIA, DIV6_FSIB,
-	DIV6_REPARENT_NR,
-};
-
-static struct clk *hdmi_parent[] = {
-	[0] = &pllc1_div2_clk,
-	[1] = &system_clk,
-	[2] = &dv_clk
-};
-
-static struct clk *vclk_parents[8] = {
-	[0] = &pllc1_div2_clk,
-	[2] = &dv_clk,
-	[3] = &usb24s_clk,
-	[4] = &extal1_div2_clk,
-	[5] = &extalr_clk,
-};
-
-static struct clk *fsia_parents[] = {
-	[0] = &pllc1_div2_clk,
-	[1] = &fsiack_clk, /* external clock */
-};
-
-static struct clk *fsib_parents[] = {
-	[0] = &pllc1_div2_clk,
-	[1] = &fsibck_clk, /* external clock */
-};
-
-static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
-	[DIV6_HDMI] = SH_CLK_DIV6_EXT(HDMICKCR, 0,
-				      hdmi_parent, ARRAY_SIZE(hdmi_parent), 6, 2),
-	[DIV6_VCLK1] = SH_CLK_DIV6_EXT(VCLKCR1, 0,
-				       vclk_parents, ARRAY_SIZE(vclk_parents), 12, 3),
-	[DIV6_VCLK2] = SH_CLK_DIV6_EXT(VCLKCR2, 0,
-				       vclk_parents, ARRAY_SIZE(vclk_parents), 12, 3),
-	[DIV6_FSIA] = SH_CLK_DIV6_EXT(FSIACKCR, 0,
-				      fsia_parents, ARRAY_SIZE(fsia_parents), 6, 2),
-	[DIV6_FSIB] = SH_CLK_DIV6_EXT(FSIBCKCR, 0,
-				      fsib_parents, ARRAY_SIZE(fsib_parents), 6, 2),
-};
-
-/* DIV6 clocks */
-enum {
-	DIV6_SUB,
-	DIV6_NR
-};
-
-static struct clk div6_clks[DIV6_NR] = {
-	[DIV6_SUB]	= SH_CLK_DIV6(&pllc1_div2_clk, SUBCKCR, 0),
-};
-
-/* HDMI1/2 clock */
-static unsigned long hdmi12_recalc(struct clk *clk)
-{
-	u32 val = __raw_readl(HDMICKCR);
-	int shift = (int)clk->priv;
-
-	val >>= shift;
-	val &= 0x3;
-
-	return clk->parent->rate / (1 << val);
-};
-
-static int hdmi12_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 val, mask;
-	int i, shift;
-
-	for (i = 0; i < 3; i++)
-		if (rate == clk->parent->rate / (1 << i))
-			goto find;
-	return -ENODEV;
-
-find:
-	shift = (int)clk->priv;
-
-	val = __raw_readl(HDMICKCR);
-	mask = ~(0x3 << shift);
-	val = (val & mask) | i << shift;
-	__raw_writel(val, HDMICKCR);
-
-	return 0;
-};
-
-static struct sh_clk_ops hdmi12_clk_ops = {
-	.recalc		= hdmi12_recalc,
-	.set_rate	= hdmi12_set_rate,
-};
-
-static struct clk hdmi1_clk = {
-	.ops		= &hdmi12_clk_ops,
-	.priv		= (void *)9,
-	.parent		= &div6_reparent_clks[DIV6_HDMI],  /* late install */
-};
-
-static struct clk hdmi2_clk = {
-	.ops		= &hdmi12_clk_ops,
-	.priv		= (void *)11,
-	.parent		= &div6_reparent_clks[DIV6_HDMI], /* late install */
-};
-
-static struct clk *late_main_clks[] = {
-	&hdmi1_clk,
-	&hdmi2_clk,
-};
-
-/* FSI DIV */
-enum { FSIDIV_A, FSIDIV_B, FSIDIV_REPARENT_NR };
-
-static struct clk fsidivs[] = {
-	[FSIDIV_A] = SH_CLK_FSIDIV(FSIDIVA, &div6_reparent_clks[DIV6_FSIA]),
-	[FSIDIV_B] = SH_CLK_FSIDIV(FSIDIVB, &div6_reparent_clks[DIV6_FSIB]),
-};
-
-/* MSTP */
-enum {
-	MSTP128, MSTP127, MSTP125,
-	MSTP116, MSTP111, MSTP100, MSTP117,
-
-	MSTP230, MSTP229,
-	MSTP222,
-	MSTP218, MSTP217, MSTP216, MSTP214,
-	MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
-
-	MSTP329, MSTP328, MSTP323, MSTP320,
-	MSTP314, MSTP313, MSTP312,
-	MSTP309, MSTP304,
-
-	MSTP416, MSTP415, MSTP407, MSTP406,
-
-	MSTP_NR
-};
-
-static struct clk mstp_clks[MSTP_NR] = {
-	[MSTP128] = SH_CLK_MSTP32(&div4_clks[DIV4_S],	SMSTPCR1, 28, 0), /* CEU21 */
-	[MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_S],	SMSTPCR1, 27, 0), /* CEU20 */
-	[MSTP125] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB],	SMSTPCR1, 25, 0), /* TMU0 */
-	[MSTP117] = SH_CLK_MSTP32(&div4_clks[DIV4_B],	SMSTPCR1, 17, 0), /* LCDC1 */
-	[MSTP116] = SH_CLK_MSTP32(&div4_clks[DIV4_HPP],	SMSTPCR1, 16, 0), /* IIC0 */
-	[MSTP111] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB],	SMSTPCR1, 11, 0), /* TMU1 */
-	[MSTP100] = SH_CLK_MSTP32(&div4_clks[DIV4_B],	SMSTPCR1,  0, 0), /* LCDC0 */
-
-	[MSTP230] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB],	SMSTPCR2, 30, 0), /* SCIFA6 */
-	[MSTP229] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],	SMSTPCR2, 29, 0), /* INTCA */
-	[MSTP222] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB],	SMSTPCR2, 22, 0), /* SCIFA7 */
-	[MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],  SMSTPCR2, 18, 0), /* DMAC1 */
-	[MSTP217] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],  SMSTPCR2, 17, 0), /* DMAC2 */
-	[MSTP216] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],  SMSTPCR2, 16, 0), /* DMAC3 */
-	[MSTP214] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],  SMSTPCR2, 14, 0), /* USBDMAC */
-	[MSTP207] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB],	SMSTPCR2,  7, 0), /* SCIFA5 */
-	[MSTP206] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB],	SMSTPCR2,  6, 0), /* SCIFB */
-	[MSTP204] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB],	SMSTPCR2,  4, 0), /* SCIFA0 */
-	[MSTP203] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB],	SMSTPCR2,  3, 0), /* SCIFA1 */
-	[MSTP202] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB],	SMSTPCR2,  2, 0), /* SCIFA2 */
-	[MSTP201] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB],	SMSTPCR2,  1, 0), /* SCIFA3 */
-	[MSTP200] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB],	SMSTPCR2,  0, 0), /* SCIFA4 */
-
-	[MSTP329] = SH_CLK_MSTP32(&r_clk,		SMSTPCR3, 29, 0), /* CMT10 */
-	[MSTP328] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],	SMSTPCR3, 28, 0), /* FSI */
-	[MSTP323] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB],	SMSTPCR3, 23, 0), /* IIC1 */
-	[MSTP320] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],	SMSTPCR3, 20, 0), /* USBF */
-	[MSTP314] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],	SMSTPCR3, 14, 0), /* SDHI0 */
-	[MSTP313] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],	SMSTPCR3, 13, 0), /* SDHI1 */
-	[MSTP312] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],	SMSTPCR3, 12, 0), /* MMC */
-	[MSTP309] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],	SMSTPCR3,  9, 0), /* GEther */
-	[MSTP304] = SH_CLK_MSTP32(&div4_clks[DIV4_CP],	SMSTPCR3,  4, 0), /* TPU0 */
-
-	[MSTP416] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],	SMSTPCR4, 16, 0), /* USBHOST */
-	[MSTP415] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],	SMSTPCR4, 15, 0), /* SDHI2 */
-	[MSTP407] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],	SMSTPCR4,  7, 0), /* USB-Func */
-	[MSTP406] = SH_CLK_MSTP32(&div4_clks[DIV4_HP],	SMSTPCR4,  6, 0), /* USB Phy */
-};
-
-static struct clk_lookup lookups[] = {
-	/* main clocks */
-	CLKDEV_CON_ID("extalr",			&extalr_clk),
-	CLKDEV_CON_ID("extal1",			&extal1_clk),
-	CLKDEV_CON_ID("extal2",			&extal2_clk),
-	CLKDEV_CON_ID("extal1_div2",		&extal1_div2_clk),
-	CLKDEV_CON_ID("extal1_div1024",		&extal1_div1024_clk),
-	CLKDEV_CON_ID("extal1_div2048",		&extal1_div2048_clk),
-	CLKDEV_CON_ID("extal2_div2",		&extal2_div2_clk),
-	CLKDEV_CON_ID("dv_clk",			&dv_clk),
-	CLKDEV_CON_ID("system_clk",		&system_clk),
-	CLKDEV_CON_ID("system_div2_clk",	&system_div2_clk),
-	CLKDEV_CON_ID("r_clk",			&r_clk),
-	CLKDEV_CON_ID("pllc0_clk",		&pllc0_clk),
-	CLKDEV_CON_ID("pllc1_clk",		&pllc1_clk),
-	CLKDEV_CON_ID("pllc1_div2_clk",		&pllc1_div2_clk),
-	CLKDEV_CON_ID("usb24s",			&usb24s_clk),
-	CLKDEV_CON_ID("hdmi1",			&hdmi1_clk),
-	CLKDEV_CON_ID("hdmi2",			&hdmi2_clk),
-	CLKDEV_CON_ID("video1",			&div6_reparent_clks[DIV6_VCLK1]),
-	CLKDEV_CON_ID("video2",			&div6_reparent_clks[DIV6_VCLK2]),
-	CLKDEV_CON_ID("fsiack",			&fsiack_clk),
-	CLKDEV_CON_ID("fsibck",			&fsibck_clk),
-
-	/* DIV4 clocks */
-	CLKDEV_CON_ID("i_clk",			&div4_clks[DIV4_I]),
-	CLKDEV_CON_ID("zg_clk",			&div4_clks[DIV4_ZG]),
-	CLKDEV_CON_ID("b_clk",			&div4_clks[DIV4_B]),
-	CLKDEV_CON_ID("m1_clk",			&div4_clks[DIV4_M1]),
-	CLKDEV_CON_ID("hp_clk",			&div4_clks[DIV4_HP]),
-	CLKDEV_CON_ID("hpp_clk",		&div4_clks[DIV4_HPP]),
-	CLKDEV_CON_ID("s_clk",			&div4_clks[DIV4_S]),
-	CLKDEV_CON_ID("zb_clk",			&div4_clks[DIV4_ZB]),
-	CLKDEV_CON_ID("m3_clk",			&div4_clks[DIV4_M3]),
-	CLKDEV_CON_ID("cp_clk",			&div4_clks[DIV4_CP]),
-
-	/* DIV6 clocks */
-	CLKDEV_CON_ID("sub_clk",		&div6_clks[DIV6_SUB]),
-
-	/* MSTP32 clocks */
-	CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0",	&mstp_clks[MSTP100]),
-	CLKDEV_DEV_ID("i2c-sh_mobile.0",	&mstp_clks[MSTP116]),
-	CLKDEV_DEV_ID("fff20000.i2c",		&mstp_clks[MSTP116]),
-	CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1",	&mstp_clks[MSTP117]),
-	CLKDEV_DEV_ID("sh_mobile_ceu.0",	&mstp_clks[MSTP127]),
-	CLKDEV_DEV_ID("sh_mobile_ceu.1",	&mstp_clks[MSTP128]),
-
-	CLKDEV_DEV_ID("sh-sci.4",		&mstp_clks[MSTP200]),
-	CLKDEV_DEV_ID("e6c80000.serial",	&mstp_clks[MSTP200]),
-	CLKDEV_DEV_ID("sh-sci.3",		&mstp_clks[MSTP201]),
-	CLKDEV_DEV_ID("e6c70000.serial",	&mstp_clks[MSTP201]),
-	CLKDEV_DEV_ID("sh-sci.2",		&mstp_clks[MSTP202]),
-	CLKDEV_DEV_ID("e6c60000.serial",	&mstp_clks[MSTP202]),
-	CLKDEV_DEV_ID("sh-sci.1",		&mstp_clks[MSTP203]),
-	CLKDEV_DEV_ID("e6c50000.serial",	&mstp_clks[MSTP203]),
-	CLKDEV_DEV_ID("sh-sci.0",		&mstp_clks[MSTP204]),
-	CLKDEV_DEV_ID("e6c40000.serial",	&mstp_clks[MSTP204]),
-	CLKDEV_DEV_ID("sh-sci.8",		&mstp_clks[MSTP206]),
-	CLKDEV_DEV_ID("e6c30000.serial",	&mstp_clks[MSTP206]),
-	CLKDEV_DEV_ID("sh-sci.5",		&mstp_clks[MSTP207]),
-	CLKDEV_DEV_ID("e6cb0000.serial",	&mstp_clks[MSTP207]),
-	CLKDEV_DEV_ID("sh-dma-engine.3",	&mstp_clks[MSTP214]),
-	CLKDEV_DEV_ID("sh-dma-engine.2",	&mstp_clks[MSTP216]),
-	CLKDEV_DEV_ID("sh-dma-engine.1",	&mstp_clks[MSTP217]),
-	CLKDEV_DEV_ID("sh-dma-engine.0",	&mstp_clks[MSTP218]),
-	CLKDEV_DEV_ID("sh-sci.7",		&mstp_clks[MSTP222]),
-	CLKDEV_DEV_ID("e6cd0000.serial",	&mstp_clks[MSTP222]),
-	CLKDEV_DEV_ID("renesas_intc_irqpin.0",	&mstp_clks[MSTP229]),
-	CLKDEV_DEV_ID("renesas_intc_irqpin.1",	&mstp_clks[MSTP229]),
-	CLKDEV_DEV_ID("renesas_intc_irqpin.2",	&mstp_clks[MSTP229]),
-	CLKDEV_DEV_ID("renesas_intc_irqpin.3",	&mstp_clks[MSTP229]),
-	CLKDEV_DEV_ID("sh-sci.6",		&mstp_clks[MSTP230]),
-	CLKDEV_DEV_ID("e6cc0000.serial",	&mstp_clks[MSTP230]),
-
-	CLKDEV_DEV_ID("sh_fsi2",		&mstp_clks[MSTP328]),
-	CLKDEV_DEV_ID("fe1f0000.sound",		&mstp_clks[MSTP328]),
-	CLKDEV_DEV_ID("i2c-sh_mobile.1",	&mstp_clks[MSTP323]),
-	CLKDEV_DEV_ID("e6c20000.i2c",		&mstp_clks[MSTP323]),
-	CLKDEV_DEV_ID("renesas_usbhs",		&mstp_clks[MSTP320]),
-	CLKDEV_DEV_ID("sh_mobile_sdhi.0",	&mstp_clks[MSTP314]),
-	CLKDEV_DEV_ID("e6850000.sd",		&mstp_clks[MSTP314]),
-	CLKDEV_DEV_ID("sh_mobile_sdhi.1",	&mstp_clks[MSTP313]),
-	CLKDEV_DEV_ID("e6860000.sd",		&mstp_clks[MSTP313]),
-	CLKDEV_DEV_ID("sh_mmcif",		&mstp_clks[MSTP312]),
-	CLKDEV_DEV_ID("e6bd0000.mmc",		&mstp_clks[MSTP312]),
-	CLKDEV_DEV_ID("r8a7740-gether",		&mstp_clks[MSTP309]),
-	CLKDEV_DEV_ID("e9a00000.ethernet",	&mstp_clks[MSTP309]),
-	CLKDEV_DEV_ID("renesas-tpu-pwm",	&mstp_clks[MSTP304]),
-	CLKDEV_DEV_ID("e6600000.pwm",		&mstp_clks[MSTP304]),
-
-	CLKDEV_DEV_ID("sh_mobile_sdhi.2",	&mstp_clks[MSTP415]),
-	CLKDEV_DEV_ID("e6870000.sd",		&mstp_clks[MSTP415]),
-
-	/* ICK */
-	CLKDEV_ICK_ID("fck",	"sh-tmu.1",		&mstp_clks[MSTP111]),
-	CLKDEV_ICK_ID("fck",	"fff90000.timer",	&mstp_clks[MSTP111]),
-	CLKDEV_ICK_ID("fck",	"sh-tmu.0",		&mstp_clks[MSTP125]),
-	CLKDEV_ICK_ID("fck",	"fff80000.timer",	&mstp_clks[MSTP125]),
-	CLKDEV_ICK_ID("fck",	"sh-cmt-48.1",		&mstp_clks[MSTP329]),
-	CLKDEV_ICK_ID("fck",	"e6138000.timer",	&mstp_clks[MSTP329]),
-	CLKDEV_ICK_ID("host",	"renesas_usbhs",	&mstp_clks[MSTP416]),
-	CLKDEV_ICK_ID("func",	"renesas_usbhs",	&mstp_clks[MSTP407]),
-	CLKDEV_ICK_ID("phy",	"renesas_usbhs",	&mstp_clks[MSTP406]),
-	CLKDEV_ICK_ID("pci",	"renesas_usbhs",	&div4_clks[DIV4_USBP]),
-	CLKDEV_ICK_ID("usb24",	"renesas_usbhs",	&usb24_clk),
-	CLKDEV_ICK_ID("ick",	"sh-mobile-hdmi",	&div6_reparent_clks[DIV6_HDMI]),
-
-	CLKDEV_ICK_ID("icka", "sh_fsi2",	&div6_reparent_clks[DIV6_FSIA]),
-	CLKDEV_ICK_ID("ickb", "sh_fsi2",	&div6_reparent_clks[DIV6_FSIB]),
-	CLKDEV_ICK_ID("diva", "sh_fsi2",	&fsidivs[FSIDIV_A]),
-	CLKDEV_ICK_ID("divb", "sh_fsi2",	&fsidivs[FSIDIV_B]),
-	CLKDEV_ICK_ID("xcka", "sh_fsi2",	&fsiack_clk),
-	CLKDEV_ICK_ID("xckb", "sh_fsi2",	&fsibck_clk),
-};
-
-void __init r8a7740_clock_init(u8 md_ck)
-{
-	int k, ret = 0;
-
-	/* detect system clock parent */
-	if (md_ck & MD_CK1)
-		system_clk.parent = &extal1_div2_clk;
-	else
-		system_clk.parent = &extal1_clk;
-
-	/* detect RCLK parent */
-	switch (md_ck & (MD_CK2 | MD_CK1)) {
-	case MD_CK2 | MD_CK1:
-		r_clk.parent = &extal1_div2048_clk;
-		break;
-	case MD_CK2:
-		r_clk.parent = &extal1_div1024_clk;
-		break;
-	case MD_CK1:
-	default:
-		r_clk.parent = &extalr_clk;
-		break;
-	}
-
-	for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
-		ret = clk_register(main_clks[k]);
-
-	if (!ret)
-		ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
-
-	if (!ret)
-		ret = sh_clk_div6_register(div6_clks, DIV6_NR);
-
-	if (!ret)
-		ret = sh_clk_div6_reparent_register(div6_reparent_clks,
-						    DIV6_REPARENT_NR);
-
-	if (!ret)
-		ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
-
-	for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++)
-		ret = clk_register(late_main_clks[k]);
-
-	if (!ret)
-		ret = sh_clk_fsidiv_register(fsidivs, FSIDIV_REPARENT_NR);
-
-	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-	if (!ret)
-		shmobile_clk_init();
-	else
-		panic("failed to setup r8a7740 clocks\n");
-}
diff --git a/arch/arm/mach-shmobile/clock-r8a7779.c b/arch/arm/mach-shmobile/clock-r8a7779.c
deleted file mode 100644
index fa8ab2c..0000000
--- a/arch/arm/mach-shmobile/clock-r8a7779.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * r8a7779 clock framework support
- *
- * Copyright (C) 2011  Renesas Solutions Corp.
- * Copyright (C) 2011  Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/bitops.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/sh_clk.h>
-#include <linux/clkdev.h>
-#include <linux/sh_timer.h>
-
-#include "clock.h"
-#include "common.h"
-#include "r8a7779.h"
-
-/*
- *		MD1 = 1			MD1 = 0
- *		(PLLA = 1500)		(PLLA = 1600)
- *		(MHz)			(MHz)
- *------------------------------------------------+--------------------
- * clkz		1000   (2/3)		800   (1/2)
- * clkzs	 250   (1/6)		200   (1/8)
- * clki		 750   (1/2)		800   (1/2)
- * clks		 250   (1/6)		200   (1/8)
- * clks1	 125   (1/12)		100   (1/16)
- * clks3	 187.5 (1/8)		200   (1/8)
- * clks4	  93.7 (1/16)		100   (1/16)
- * clkp		  62.5 (1/24)		 50   (1/32)
- * clkg		  62.5 (1/24)		 66.6 (1/24)
- * clkb, CLKOUT
- * (MD2 = 0)	  62.5 (1/24)		 66.6 (1/24)
- * (MD2 = 1)	  41.6 (1/36)		 50   (1/32)
-*/
-
-#define MD(nr)	BIT(nr)
-
-#define MSTPCR0		IOMEM(0xffc80030)
-#define MSTPCR1		IOMEM(0xffc80034)
-#define MSTPCR3		IOMEM(0xffc8003c)
-#define MSTPSR1		IOMEM(0xffc80044)
-
-/* ioremap() through clock mapping mandatory to avoid
- * collision with ARM coherent DMA virtual memory range.
- */
-
-static struct clk_mapping cpg_mapping = {
-	.phys	= 0xffc80000,
-	.len	= 0x80,
-};
-
-/*
- * Default rate for the root input clock, reset this with clk_set_rate()
- * from the platform code.
- */
-static struct clk plla_clk = {
-	/* .rate will be updated on r8a7779_clock_init() */
-	.mapping	= &cpg_mapping,
-};
-
-/*
- * clock ratio of these clock will be updated
- * on r8a7779_clock_init()
- */
-SH_FIXED_RATIO_CLK_SET(clkz_clk,	plla_clk, 1, 1);
-SH_FIXED_RATIO_CLK_SET(clkzs_clk,	plla_clk, 1, 1);
-SH_FIXED_RATIO_CLK_SET(clki_clk,	plla_clk, 1, 1);
-SH_FIXED_RATIO_CLK_SET(clks_clk,	plla_clk, 1, 1);
-SH_FIXED_RATIO_CLK_SET(clks1_clk,	plla_clk, 1, 1);
-SH_FIXED_RATIO_CLK_SET(clks3_clk,	plla_clk, 1, 1);
-SH_FIXED_RATIO_CLK_SET(clks4_clk,	plla_clk, 1, 1);
-SH_FIXED_RATIO_CLK_SET(clkb_clk,	plla_clk, 1, 1);
-SH_FIXED_RATIO_CLK_SET(clkout_clk,	plla_clk, 1, 1);
-SH_FIXED_RATIO_CLK_SET(clkp_clk,	plla_clk, 1, 1);
-SH_FIXED_RATIO_CLK_SET(clkg_clk,	plla_clk, 1, 1);
-
-static struct clk *main_clks[] = {
-	&plla_clk,
-	&clkz_clk,
-	&clkzs_clk,
-	&clki_clk,
-	&clks_clk,
-	&clks1_clk,
-	&clks3_clk,
-	&clks4_clk,
-	&clkb_clk,
-	&clkout_clk,
-	&clkp_clk,
-	&clkg_clk,
-};
-
-enum { MSTP323, MSTP322, MSTP321, MSTP320,
-	MSTP120,
-	MSTP116, MSTP115, MSTP114,
-	MSTP110, MSTP109, MSTP108,
-	MSTP103, MSTP101, MSTP100,
-	MSTP030,
-	MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024, MSTP023, MSTP022, MSTP021,
-	MSTP016, MSTP015, MSTP014,
-	MSTP007,
-	MSTP_NR };
-
-static struct clk mstp_clks[MSTP_NR] = {
-	[MSTP323] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 23, 0), /* SDHI0 */
-	[MSTP322] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 22, 0), /* SDHI1 */
-	[MSTP321] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 21, 0), /* SDHI2 */
-	[MSTP320] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 20, 0), /* SDHI3 */
-	[MSTP120] = SH_CLK_MSTP32_STS(&clks_clk, MSTPCR1, 20, MSTPSR1, 0), /* VIN3 */
-	[MSTP116] = SH_CLK_MSTP32_STS(&clkp_clk, MSTPCR1, 16, MSTPSR1, 0), /* PCIe */
-	[MSTP115] = SH_CLK_MSTP32_STS(&clkp_clk, MSTPCR1, 15, MSTPSR1, 0), /* SATA */
-	[MSTP114] = SH_CLK_MSTP32_STS(&clkp_clk, MSTPCR1, 14, MSTPSR1, 0), /* Ether */
-	[MSTP110] = SH_CLK_MSTP32_STS(&clks_clk, MSTPCR1, 10, MSTPSR1, 0), /* VIN0 */
-	[MSTP109] = SH_CLK_MSTP32_STS(&clks_clk, MSTPCR1,  9, MSTPSR1, 0), /* VIN1 */
-	[MSTP108] = SH_CLK_MSTP32_STS(&clks_clk, MSTPCR1,  8, MSTPSR1, 0), /* VIN2 */
-	[MSTP103] = SH_CLK_MSTP32_STS(&clks_clk, MSTPCR1,  3, MSTPSR1, 0), /* DU */
-	[MSTP101] = SH_CLK_MSTP32_STS(&clkp_clk, MSTPCR1,  1, MSTPSR1, 0), /* USB2 */
-	[MSTP100] = SH_CLK_MSTP32_STS(&clkp_clk, MSTPCR1,  0, MSTPSR1, 0), /* USB0/1 */
-	[MSTP030] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 30, 0), /* I2C0 */
-	[MSTP029] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 29, 0), /* I2C1 */
-	[MSTP028] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 28, 0), /* I2C2 */
-	[MSTP027] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 27, 0), /* I2C3 */
-	[MSTP026] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 26, 0), /* SCIF0 */
-	[MSTP025] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 25, 0), /* SCIF1 */
-	[MSTP024] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 24, 0), /* SCIF2 */
-	[MSTP023] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 23, 0), /* SCIF3 */
-	[MSTP022] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 22, 0), /* SCIF4 */
-	[MSTP021] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 21, 0), /* SCIF5 */
-	[MSTP016] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 16, 0), /* TMU0 */
-	[MSTP015] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 15, 0), /* TMU1 */
-	[MSTP014] = SH_CLK_MSTP32(&clkp_clk, MSTPCR0, 14, 0), /* TMU2 */
-	[MSTP007] = SH_CLK_MSTP32(&clks_clk, MSTPCR0,  7, 0), /* HSPI */
-};
-
-static struct clk_lookup lookups[] = {
-	/* main clocks */
-	CLKDEV_CON_ID("plla_clk", &plla_clk),
-	CLKDEV_CON_ID("clkz_clk", &clkz_clk),
-	CLKDEV_CON_ID("clkzs_clk", &clkzs_clk),
-
-	/* DIV4 clocks */
-	CLKDEV_CON_ID("shyway_clk",	&clks_clk),
-	CLKDEV_CON_ID("bus_clk",	&clkout_clk),
-	CLKDEV_CON_ID("shyway4_clk",	&clks4_clk),
-	CLKDEV_CON_ID("shyway3_clk",	&clks3_clk),
-	CLKDEV_CON_ID("shyway1_clk",	&clks1_clk),
-	CLKDEV_CON_ID("peripheral_clk",	&clkp_clk),
-
-	/* MSTP32 clocks */
-	CLKDEV_DEV_ID("r8a7779-vin.3", &mstp_clks[MSTP120]), /* VIN3 */
-	CLKDEV_DEV_ID("rcar-pcie", &mstp_clks[MSTP116]), /* PCIe */
-	CLKDEV_DEV_ID("sata_rcar", &mstp_clks[MSTP115]), /* SATA */
-	CLKDEV_DEV_ID("fc600000.sata", &mstp_clks[MSTP115]), /* SATA w/DT */
-	CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */
-	CLKDEV_DEV_ID("r8a7779-vin.0", &mstp_clks[MSTP110]), /* VIN0 */
-	CLKDEV_DEV_ID("r8a7779-vin.1", &mstp_clks[MSTP109]), /* VIN1 */
-	CLKDEV_DEV_ID("r8a7779-vin.2", &mstp_clks[MSTP108]), /* VIN2 */
-	CLKDEV_DEV_ID("ehci-platform.1", &mstp_clks[MSTP101]), /* USB EHCI port2 */
-	CLKDEV_DEV_ID("ohci-platform.1", &mstp_clks[MSTP101]), /* USB OHCI port2 */
-	CLKDEV_DEV_ID("ehci-platform.0", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */
-	CLKDEV_DEV_ID("ohci-platform.0", &mstp_clks[MSTP100]), /* USB OHCI port0/1 */
-	CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP016]), /* TMU0 */
-	CLKDEV_DEV_ID("i2c-rcar.0", &mstp_clks[MSTP030]), /* I2C0 */
-	CLKDEV_DEV_ID("ffc70000.i2c", &mstp_clks[MSTP030]), /* I2C0 */
-	CLKDEV_DEV_ID("i2c-rcar.1", &mstp_clks[MSTP029]), /* I2C1 */
-	CLKDEV_DEV_ID("ffc71000.i2c", &mstp_clks[MSTP029]), /* I2C1 */
-	CLKDEV_DEV_ID("i2c-rcar.2", &mstp_clks[MSTP028]), /* I2C2 */
-	CLKDEV_DEV_ID("ffc72000.i2c", &mstp_clks[MSTP028]), /* I2C2 */
-	CLKDEV_DEV_ID("i2c-rcar.3", &mstp_clks[MSTP027]), /* I2C3 */
-	CLKDEV_DEV_ID("ffc73000.i2c", &mstp_clks[MSTP027]), /* I2C3 */
-	CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP026]), /* SCIF0 */
-	CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP025]), /* SCIF1 */
-	CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP024]), /* SCIF2 */
-	CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP023]), /* SCIF3 */
-	CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP022]), /* SCIF4 */
-	CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP021]), /* SCIF6 */
-	CLKDEV_DEV_ID("sh-hspi.0", &mstp_clks[MSTP007]), /* HSPI0 */
-	CLKDEV_DEV_ID("fffc7000.spi", &mstp_clks[MSTP007]), /* HSPI0 */
-	CLKDEV_DEV_ID("sh-hspi.1", &mstp_clks[MSTP007]), /* HSPI1 */
-	CLKDEV_DEV_ID("fffc8000.spi", &mstp_clks[MSTP007]), /* HSPI1 */
-	CLKDEV_DEV_ID("sh-hspi.2", &mstp_clks[MSTP007]), /* HSPI2 */
-	CLKDEV_DEV_ID("fffc6000.spi", &mstp_clks[MSTP007]), /* HSPI2 */
-	CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP323]), /* SDHI0 */
-	CLKDEV_DEV_ID("ffe4c000.sd", &mstp_clks[MSTP323]), /* SDHI0 */
-	CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP322]), /* SDHI1 */
-	CLKDEV_DEV_ID("ffe4d000.sd", &mstp_clks[MSTP322]), /* SDHI1 */
-	CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP321]), /* SDHI2 */
-	CLKDEV_DEV_ID("ffe4e000.sd", &mstp_clks[MSTP321]), /* SDHI2 */
-	CLKDEV_DEV_ID("sh_mobile_sdhi.3", &mstp_clks[MSTP320]), /* SDHI3 */
-	CLKDEV_DEV_ID("ffe4f000.sd", &mstp_clks[MSTP320]), /* SDHI3 */
-	CLKDEV_DEV_ID("rcar-du-r8a7779", &mstp_clks[MSTP103]), /* DU */
-};
-
-void __init r8a7779_clock_init(void)
-{
-	u32 mode = r8a7779_read_mode_pins();
-	int k, ret = 0;
-
-	if (mode & MD(1)) {
-		plla_clk.rate = 1500000000;
-
-		SH_CLK_SET_RATIO(&clkz_clk_ratio,	2, 3);
-		SH_CLK_SET_RATIO(&clkzs_clk_ratio,	1, 6);
-		SH_CLK_SET_RATIO(&clki_clk_ratio,	1, 2);
-		SH_CLK_SET_RATIO(&clks_clk_ratio,	1, 6);
-		SH_CLK_SET_RATIO(&clks1_clk_ratio,	1, 12);
-		SH_CLK_SET_RATIO(&clks3_clk_ratio,	1, 8);
-		SH_CLK_SET_RATIO(&clks4_clk_ratio,	1, 16);
-		SH_CLK_SET_RATIO(&clkp_clk_ratio,	1, 24);
-		SH_CLK_SET_RATIO(&clkg_clk_ratio,	1, 24);
-		if (mode & MD(2)) {
-			SH_CLK_SET_RATIO(&clkb_clk_ratio,	1, 36);
-			SH_CLK_SET_RATIO(&clkout_clk_ratio,	1, 36);
-		} else {
-			SH_CLK_SET_RATIO(&clkb_clk_ratio,	1, 24);
-			SH_CLK_SET_RATIO(&clkout_clk_ratio,	1, 24);
-		}
-	} else {
-		plla_clk.rate = 1600000000;
-
-		SH_CLK_SET_RATIO(&clkz_clk_ratio,	1, 2);
-		SH_CLK_SET_RATIO(&clkzs_clk_ratio,	1, 8);
-		SH_CLK_SET_RATIO(&clki_clk_ratio,	1, 2);
-		SH_CLK_SET_RATIO(&clks_clk_ratio,	1, 8);
-		SH_CLK_SET_RATIO(&clks1_clk_ratio,	1, 16);
-		SH_CLK_SET_RATIO(&clks3_clk_ratio,	1, 8);
-		SH_CLK_SET_RATIO(&clks4_clk_ratio,	1, 16);
-		SH_CLK_SET_RATIO(&clkp_clk_ratio,	1, 32);
-		SH_CLK_SET_RATIO(&clkg_clk_ratio,	1, 24);
-		if (mode & MD(2)) {
-			SH_CLK_SET_RATIO(&clkb_clk_ratio,	1, 32);
-			SH_CLK_SET_RATIO(&clkout_clk_ratio,	1, 32);
-		} else {
-			SH_CLK_SET_RATIO(&clkb_clk_ratio,	1, 24);
-			SH_CLK_SET_RATIO(&clkout_clk_ratio,	1, 24);
-		}
-	}
-
-	for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
-		ret = clk_register(main_clks[k]);
-
-	if (!ret)
-		ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
-
-	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-	if (!ret)
-		shmobile_clk_init();
-	else
-		panic("failed to setup r8a7779 clocks\n");
-}
-
-/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */
-void __init __weak r8a7779_register_twd(void) { }
-
-void __init r8a7779_earlytimer_init(void)
-{
-	r8a7779_clock_init();
-	r8a7779_register_twd();
-	shmobile_earlytimer_init();
-}
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
deleted file mode 100644
index 3855fb0..0000000
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ /dev/null
@@ -1,752 +0,0 @@
-/*
- * sh73a0 clock framework support
- *
- * Copyright (C) 2010 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/io.h>
-#include <linux/sh_clk.h>
-#include <linux/clkdev.h>
-#include <asm/processor.h>
-#include "clock.h"
-#include "common.h"
-
-#define FRQCRA		IOMEM(0xe6150000)
-#define FRQCRB		IOMEM(0xe6150004)
-#define FRQCRD		IOMEM(0xe61500e4)
-#define VCLKCR1		IOMEM(0xe6150008)
-#define VCLKCR2		IOMEM(0xe615000C)
-#define VCLKCR3		IOMEM(0xe615001C)
-#define ZBCKCR		IOMEM(0xe6150010)
-#define FLCKCR		IOMEM(0xe6150014)
-#define SD0CKCR		IOMEM(0xe6150074)
-#define SD1CKCR		IOMEM(0xe6150078)
-#define SD2CKCR		IOMEM(0xe615007C)
-#define FSIACKCR	IOMEM(0xe6150018)
-#define FSIBCKCR	IOMEM(0xe6150090)
-#define SUBCKCR		IOMEM(0xe6150080)
-#define SPUACKCR	IOMEM(0xe6150084)
-#define SPUVCKCR	IOMEM(0xe6150094)
-#define MSUCKCR		IOMEM(0xe6150088)
-#define HSICKCR		IOMEM(0xe615008C)
-#define MFCK1CR		IOMEM(0xe6150098)
-#define MFCK2CR		IOMEM(0xe615009C)
-#define DSITCKCR	IOMEM(0xe6150060)
-#define DSI0PCKCR	IOMEM(0xe6150064)
-#define DSI1PCKCR	IOMEM(0xe6150068)
-#define DSI0PHYCR	0xe615006C
-#define DSI1PHYCR	0xe6150070
-#define PLLECR		IOMEM(0xe61500d0)
-#define PLL0CR		IOMEM(0xe61500d8)
-#define PLL1CR		IOMEM(0xe6150028)
-#define PLL2CR		IOMEM(0xe615002c)
-#define PLL3CR		IOMEM(0xe61500dc)
-#define SMSTPCR0	IOMEM(0xe6150130)
-#define SMSTPCR1	IOMEM(0xe6150134)
-#define SMSTPCR2	IOMEM(0xe6150138)
-#define SMSTPCR3	IOMEM(0xe615013c)
-#define SMSTPCR4	IOMEM(0xe6150140)
-#define SMSTPCR5	IOMEM(0xe6150144)
-#define CKSCR		IOMEM(0xe61500c0)
-
-/* Fixed 32 KHz root clock from EXTALR pin */
-static struct clk r_clk = {
-	.rate           = 32768,
-};
-
-/*
- * 26MHz default rate for the EXTAL1 root input clock.
- * If needed, reset this with clk_set_rate() from the platform code.
- */
-struct clk sh73a0_extal1_clk = {
-	.rate		= 26000000,
-};
-
-/*
- * 48MHz default rate for the EXTAL2 root input clock.
- * If needed, reset this with clk_set_rate() from the platform code.
- */
-struct clk sh73a0_extal2_clk = {
-	.rate		= 48000000,
-};
-
-static struct sh_clk_ops main_clk_ops = {
-	.recalc		= followparent_recalc,
-};
-
-/* Main clock */
-static struct clk main_clk = {
-	/* .parent wll be set on sh73a0_clock_init() */
-	.ops		= &main_clk_ops,
-};
-
-/* PLL0, PLL1, PLL2, PLL3 */
-static unsigned long pll_recalc(struct clk *clk)
-{
-	unsigned long mult = 1;
-
-	if (__raw_readl(PLLECR) & (1 << clk->enable_bit)) {
-		mult = (((__raw_readl(clk->enable_reg) >> 24) & 0x3f) + 1);
-		/* handle CFG bit for PLL1 and PLL2 */
-		switch (clk->enable_bit) {
-		case 1:
-		case 2:
-			if (__raw_readl(clk->enable_reg) & (1 << 20))
-				mult *= 2;
-		}
-	}
-
-	return clk->parent->rate * mult;
-}
-
-static struct sh_clk_ops pll_clk_ops = {
-	.recalc		= pll_recalc,
-};
-
-static struct clk pll0_clk = {
-	.ops		= &pll_clk_ops,
-	.flags		= CLK_ENABLE_ON_INIT,
-	.parent		= &main_clk,
-	.enable_reg	= (void __iomem *)PLL0CR,
-	.enable_bit	= 0,
-};
-
-static struct clk pll1_clk = {
-	.ops		= &pll_clk_ops,
-	.flags		= CLK_ENABLE_ON_INIT,
-	.parent		= &main_clk,
-	.enable_reg	= (void __iomem *)PLL1CR,
-	.enable_bit	= 1,
-};
-
-static struct clk pll2_clk = {
-	.ops		= &pll_clk_ops,
-	.flags		= CLK_ENABLE_ON_INIT,
-	.parent		= &main_clk,
-	.enable_reg	= (void __iomem *)PLL2CR,
-	.enable_bit	= 2,
-};
-
-static struct clk pll3_clk = {
-	.ops		= &pll_clk_ops,
-	.flags		= CLK_ENABLE_ON_INIT,
-	.parent		= &main_clk,
-	.enable_reg	= (void __iomem *)PLL3CR,
-	.enable_bit	= 3,
-};
-
-/* A fixed divide block */
-SH_CLK_RATIO(div2,  1, 2);
-SH_CLK_RATIO(div7,  1, 7);
-SH_CLK_RATIO(div13, 1, 13);
-
-SH_FIXED_RATIO_CLK(extal1_div2_clk,	sh73a0_extal1_clk,	div2);
-SH_FIXED_RATIO_CLK(extal2_div2_clk,	sh73a0_extal2_clk,	div2);
-SH_FIXED_RATIO_CLK(main_div2_clk,	main_clk,		div2);
-SH_FIXED_RATIO_CLK(pll1_div2_clk,	pll1_clk,		div2);
-SH_FIXED_RATIO_CLK(pll1_div7_clk,	pll1_clk,		div7);
-SH_FIXED_RATIO_CLK(pll1_div13_clk,	pll1_clk,		div13);
-
-/* External input clock */
-struct clk sh73a0_extcki_clk = {
-};
-
-struct clk sh73a0_extalr_clk = {
-};
-
-static struct clk *main_clks[] = {
-	&r_clk,
-	&sh73a0_extal1_clk,
-	&sh73a0_extal2_clk,
-	&extal1_div2_clk,
-	&extal2_div2_clk,
-	&main_clk,
-	&main_div2_clk,
-	&pll0_clk,
-	&pll1_clk,
-	&pll2_clk,
-	&pll3_clk,
-	&pll1_div2_clk,
-	&pll1_div7_clk,
-	&pll1_div13_clk,
-	&sh73a0_extcki_clk,
-	&sh73a0_extalr_clk,
-};
-
-static int frqcr_kick(void)
-{
-	int i;
-
-	/* set KICK bit in FRQCRB to update hardware setting, check success */
-	__raw_writel(__raw_readl(FRQCRB) | (1 << 31), FRQCRB);
-	for (i = 1000; i; i--)
-		if (__raw_readl(FRQCRB) & (1 << 31))
-			cpu_relax();
-		else
-			return i;
-
-	return -ETIMEDOUT;
-}
-
-static void div4_kick(struct clk *clk)
-{
-	frqcr_kick();
-}
-
-static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 18,
-			  24, 0, 36, 48, 7 };
-
-static struct clk_div_mult_table div4_div_mult_table = {
-	.divisors = divisors,
-	.nr_divisors = ARRAY_SIZE(divisors),
-};
-
-static struct clk_div4_table div4_table = {
-	.div_mult_table = &div4_div_mult_table,
-	.kick = div4_kick,
-};
-
-enum { DIV4_I, DIV4_ZG, DIV4_M3, DIV4_B, DIV4_M1, DIV4_M2,
-	DIV4_Z, DIV4_ZX, DIV4_HP, DIV4_NR };
-
-#define DIV4(_reg, _bit, _mask, _flags) \
-	SH_CLK_DIV4(&pll1_clk, _reg, _bit, _mask, _flags)
-
-static struct clk div4_clks[DIV4_NR] = {
-	[DIV4_I] = DIV4(FRQCRA, 20, 0xdff, CLK_ENABLE_ON_INIT),
-	/*
-	 * ZG clock is dividing PLL0 frequency to supply SGX. Make sure not to
-	 * exceed maximum frequencies of 201.5MHz for VDD_DVFS=1.175 and
-	 * 239.2MHz for VDD_DVFS=1.315V.
-	 */
-	[DIV4_ZG] = SH_CLK_DIV4(&pll0_clk, FRQCRA, 16, 0xd7f, CLK_ENABLE_ON_INIT),
-	[DIV4_M3] = DIV4(FRQCRA, 12, 0x1dff, CLK_ENABLE_ON_INIT),
-	[DIV4_B] = DIV4(FRQCRA, 8, 0xdff, CLK_ENABLE_ON_INIT),
-	[DIV4_M1] = DIV4(FRQCRA, 4, 0x1dff, 0),
-	[DIV4_M2] = DIV4(FRQCRA, 0, 0x1dff, 0),
-	[DIV4_Z] = SH_CLK_DIV4(&pll0_clk, FRQCRB, 24, 0x97f, 0),
-	[DIV4_ZX] = DIV4(FRQCRB, 12, 0xdff, 0),
-	[DIV4_HP] = DIV4(FRQCRB, 4, 0xdff, 0),
-};
-
-static unsigned long twd_recalc(struct clk *clk)
-{
-	return clk_get_rate(clk->parent) / 4;
-}
-
-static struct sh_clk_ops twd_clk_ops = {
-	.recalc = twd_recalc,
-};
-
-static struct clk twd_clk = {
-	.parent = &div4_clks[DIV4_Z],
-	.ops = &twd_clk_ops,
-};
-
-static struct sh_clk_ops zclk_ops, kicker_ops;
-static const struct sh_clk_ops *div4_clk_ops;
-
-static int zclk_set_rate(struct clk *clk, unsigned long rate)
-{
-	int ret;
-
-	if (!clk->parent || !__clk_get(clk->parent))
-		return -ENODEV;
-
-	if (readl(FRQCRB) & (1 << 31))
-		return -EBUSY;
-
-	if (rate == clk_get_rate(clk->parent)) {
-		/* 1:1 - switch off divider */
-		__raw_writel(__raw_readl(FRQCRB) & ~(1 << 28), FRQCRB);
-		/* nullify the divider to prepare for the next time */
-		ret = div4_clk_ops->set_rate(clk, rate / 2);
-		if (!ret)
-			ret = frqcr_kick();
-		if (ret > 0)
-			ret = 0;
-	} else {
-		/* Enable the divider */
-		__raw_writel(__raw_readl(FRQCRB) | (1 << 28), FRQCRB);
-
-		ret = frqcr_kick();
-		if (ret >= 0)
-			/*
-			 * set the divider - call the DIV4 method, it will kick
-			 * FRQCRB too
-			 */
-			ret = div4_clk_ops->set_rate(clk, rate);
-		if (ret < 0)
-			goto esetrate;
-	}
-
-esetrate:
-	__clk_put(clk->parent);
-	return ret;
-}
-
-static long zclk_round_rate(struct clk *clk, unsigned long rate)
-{
-	unsigned long div_freq = div4_clk_ops->round_rate(clk, rate),
-		parent_freq = clk_get_rate(clk->parent);
-
-	if (rate > div_freq && abs(parent_freq - rate) < rate - div_freq)
-		return parent_freq;
-
-	return div_freq;
-}
-
-static unsigned long zclk_recalc(struct clk *clk)
-{
-	/*
-	 * Must recalculate frequencies in case PLL0 has been changed, even if
-	 * the divisor is unused ATM!
-	 */
-	unsigned long div_freq = div4_clk_ops->recalc(clk);
-
-	if (__raw_readl(FRQCRB) & (1 << 28))
-		return div_freq;
-
-	return clk_get_rate(clk->parent);
-}
-
-static int kicker_set_rate(struct clk *clk, unsigned long rate)
-{
-	if (__raw_readl(FRQCRB) & (1 << 31))
-		return -EBUSY;
-
-	return div4_clk_ops->set_rate(clk, rate);
-}
-
-static void div4_clk_extend(void)
-{
-	int i;
-
-	div4_clk_ops = div4_clks[0].ops;
-
-	/* Add a kicker-busy check before changing the rate */
-	kicker_ops = *div4_clk_ops;
-	/* We extend the DIV4 clock with a 1:1 pass-through case */
-	zclk_ops = *div4_clk_ops;
-
-	kicker_ops.set_rate = kicker_set_rate;
-	zclk_ops.set_rate = zclk_set_rate;
-	zclk_ops.round_rate = zclk_round_rate;
-	zclk_ops.recalc = zclk_recalc;
-
-	for (i = 0; i < DIV4_NR; i++)
-		div4_clks[i].ops = i == DIV4_Z ? &zclk_ops : &kicker_ops;
-}
-
-enum { DIV6_VCK1, DIV6_VCK2, DIV6_VCK3, DIV6_ZB1,
-	DIV6_FLCTL, DIV6_SDHI0, DIV6_SDHI1, DIV6_SDHI2,
-	DIV6_FSIA, DIV6_FSIB, DIV6_SUB,
-	DIV6_SPUA, DIV6_SPUV, DIV6_MSU,
-	DIV6_HSI,  DIV6_MFG1, DIV6_MFG2,
-	DIV6_DSIT, DIV6_DSI0P, DIV6_DSI1P,
-	DIV6_NR };
-
-static struct clk *vck_parent[8] = {
-	[0] = &pll1_div2_clk,
-	[1] = &pll2_clk,
-	[2] = &sh73a0_extcki_clk,
-	[3] = &sh73a0_extal2_clk,
-	[4] = &main_div2_clk,
-	[5] = &sh73a0_extalr_clk,
-	[6] = &main_clk,
-};
-
-static struct clk *pll_parent[4] = {
-	[0] = &pll1_div2_clk,
-	[1] = &pll2_clk,
-	[2] = &pll1_div13_clk,
-};
-
-static struct clk *hsi_parent[4] = {
-	[0] = &pll1_div2_clk,
-	[1] = &pll2_clk,
-	[2] = &pll1_div7_clk,
-};
-
-static struct clk *pll_extal2_parent[] = {
-	[0] = &pll1_div2_clk,
-	[1] = &pll2_clk,
-	[2] = &sh73a0_extal2_clk,
-	[3] = &sh73a0_extal2_clk,
-};
-
-static struct clk *dsi_parent[8] = {
-	[0] = &pll1_div2_clk,
-	[1] = &pll2_clk,
-	[2] = &main_clk,
-	[3] = &sh73a0_extal2_clk,
-	[4] = &sh73a0_extcki_clk,
-};
-
-static struct clk div6_clks[DIV6_NR] = {
-	[DIV6_VCK1] = SH_CLK_DIV6_EXT(VCLKCR1, 0,
-			vck_parent, ARRAY_SIZE(vck_parent), 12, 3),
-	[DIV6_VCK2] = SH_CLK_DIV6_EXT(VCLKCR2, 0,
-			vck_parent, ARRAY_SIZE(vck_parent), 12, 3),
-	[DIV6_VCK3] = SH_CLK_DIV6_EXT(VCLKCR3, 0,
-			vck_parent, ARRAY_SIZE(vck_parent), 12, 3),
-	[DIV6_ZB1] = SH_CLK_DIV6_EXT(ZBCKCR, CLK_ENABLE_ON_INIT,
-			pll_parent, ARRAY_SIZE(pll_parent), 7, 1),
-	[DIV6_FLCTL] = SH_CLK_DIV6_EXT(FLCKCR, 0,
-			pll_parent, ARRAY_SIZE(pll_parent), 7, 1),
-	[DIV6_SDHI0] = SH_CLK_DIV6_EXT(SD0CKCR, 0,
-			pll_parent, ARRAY_SIZE(pll_parent), 6, 2),
-	[DIV6_SDHI1] = SH_CLK_DIV6_EXT(SD1CKCR, 0,
-			pll_parent, ARRAY_SIZE(pll_parent), 6, 2),
-	[DIV6_SDHI2] = SH_CLK_DIV6_EXT(SD2CKCR, 0,
-			pll_parent, ARRAY_SIZE(pll_parent), 6, 2),
-	[DIV6_FSIA] = SH_CLK_DIV6_EXT(FSIACKCR, 0,
-			pll_parent, ARRAY_SIZE(pll_parent), 6, 1),
-	[DIV6_FSIB] = SH_CLK_DIV6_EXT(FSIBCKCR, 0,
-			pll_parent, ARRAY_SIZE(pll_parent), 6, 1),
-	[DIV6_SUB] = SH_CLK_DIV6_EXT(SUBCKCR, 0,
-			pll_extal2_parent, ARRAY_SIZE(pll_extal2_parent), 6, 2),
-	[DIV6_SPUA] = SH_CLK_DIV6_EXT(SPUACKCR, 0,
-			pll_extal2_parent, ARRAY_SIZE(pll_extal2_parent), 6, 2),
-	[DIV6_SPUV] = SH_CLK_DIV6_EXT(SPUVCKCR, 0,
-			pll_extal2_parent, ARRAY_SIZE(pll_extal2_parent), 6, 2),
-	[DIV6_MSU] = SH_CLK_DIV6_EXT(MSUCKCR, 0,
-			pll_parent, ARRAY_SIZE(pll_parent), 7, 1),
-	[DIV6_HSI] = SH_CLK_DIV6_EXT(HSICKCR, 0,
-			hsi_parent, ARRAY_SIZE(hsi_parent), 6, 2),
-	[DIV6_MFG1] = SH_CLK_DIV6_EXT(MFCK1CR, 0,
-			pll_parent, ARRAY_SIZE(pll_parent), 7, 1),
-	[DIV6_MFG2] = SH_CLK_DIV6_EXT(MFCK2CR, 0,
-			pll_parent, ARRAY_SIZE(pll_parent), 7, 1),
-	[DIV6_DSIT] = SH_CLK_DIV6_EXT(DSITCKCR, 0,
-			pll_parent, ARRAY_SIZE(pll_parent), 7, 1),
-	[DIV6_DSI0P] = SH_CLK_DIV6_EXT(DSI0PCKCR, 0,
-			dsi_parent, ARRAY_SIZE(dsi_parent), 12, 3),
-	[DIV6_DSI1P] = SH_CLK_DIV6_EXT(DSI1PCKCR, 0,
-			dsi_parent, ARRAY_SIZE(dsi_parent), 12, 3),
-};
-
-/* DSI DIV */
-static unsigned long dsiphy_recalc(struct clk *clk)
-{
-	u32 value;
-
-	value = __raw_readl(clk->mapping->base);
-
-	/* FIXME */
-	if (!(value & 0x000B8000))
-		return clk->parent->rate;
-
-	value &= 0x3f;
-	value += 1;
-
-	if ((value < 12) ||
-	    (value > 33)) {
-		pr_err("DSIPHY has wrong value (%d)", value);
-		return 0;
-	}
-
-	return clk->parent->rate / value;
-}
-
-static long dsiphy_round_rate(struct clk *clk, unsigned long rate)
-{
-	return clk_rate_mult_range_round(clk, 12, 33, rate);
-}
-
-static void dsiphy_disable(struct clk *clk)
-{
-	u32 value;
-
-	value = __raw_readl(clk->mapping->base);
-	value &= ~0x000B8000;
-
-	__raw_writel(value , clk->mapping->base);
-}
-
-static int dsiphy_enable(struct clk *clk)
-{
-	u32 value;
-	int multi;
-
-	value = __raw_readl(clk->mapping->base);
-	multi = (value & 0x3f) + 1;
-
-	if ((multi < 12) || (multi > 33))
-		return -EIO;
-
-	__raw_writel(value | 0x000B8000, clk->mapping->base);
-
-	return 0;
-}
-
-static int dsiphy_set_rate(struct clk *clk, unsigned long rate)
-{
-	u32 value;
-	int idx;
-
-	idx = rate / clk->parent->rate;
-	if ((idx < 12) || (idx > 33))
-		return -EINVAL;
-
-	idx += -1;
-
-	value = __raw_readl(clk->mapping->base);
-	value = (value & ~0x3f) + idx;
-
-	__raw_writel(value, clk->mapping->base);
-
-	return 0;
-}
-
-static struct sh_clk_ops dsiphy_clk_ops = {
-	.recalc		= dsiphy_recalc,
-	.round_rate	= dsiphy_round_rate,
-	.set_rate	= dsiphy_set_rate,
-	.enable		= dsiphy_enable,
-	.disable	= dsiphy_disable,
-};
-
-static struct clk_mapping dsi0phy_clk_mapping = {
-	.phys	= DSI0PHYCR,
-	.len	= 4,
-};
-
-static struct clk_mapping dsi1phy_clk_mapping = {
-	.phys	= DSI1PHYCR,
-	.len	= 4,
-};
-
-static struct clk dsi0phy_clk = {
-	.ops		= &dsiphy_clk_ops,
-	.parent		= &div6_clks[DIV6_DSI0P], /* late install */
-	.mapping	= &dsi0phy_clk_mapping,
-};
-
-static struct clk dsi1phy_clk = {
-	.ops		= &dsiphy_clk_ops,
-	.parent		= &div6_clks[DIV6_DSI1P], /* late install */
-	.mapping	= &dsi1phy_clk_mapping,
-};
-
-static struct clk *late_main_clks[] = {
-	&dsi0phy_clk,
-	&dsi1phy_clk,
-	&twd_clk,
-};
-
-enum { MSTP001,
-	MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP112, MSTP100,
-	MSTP219, MSTP218, MSTP217,
-	MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
-	MSTP331, MSTP329, MSTP328, MSTP325, MSTP323, MSTP322,
-	MSTP314, MSTP313, MSTP312, MSTP311,
-	MSTP304, MSTP303, MSTP302, MSTP301, MSTP300,
-	MSTP411, MSTP410, MSTP403,
-	MSTP508,
-	MSTP_NR };
-
-#define MSTP(_parent, _reg, _bit, _flags) \
-	SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
-
-static struct clk mstp_clks[MSTP_NR] = {
-	[MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
-	[MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */
-	[MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */
-	[MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */
-	[MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */
-	[MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
-	[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */
-	[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
-	[MSTP112] = MSTP(&div4_clks[DIV4_ZG], SMSTPCR1, 12, 0), /* SGX */
-	[MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
-	[MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
-	[MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* SY-DMAC */
-	[MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* MP-DMAC */
-	[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
-	[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
-	[MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
-	[MSTP203] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 3, 0), /* SCIFA1 */
-	[MSTP202] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 2, 0), /* SCIFA2 */
-	[MSTP201] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 1, 0), /* SCIFA3 */
-	[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
-	[MSTP331] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 31, 0), /* SCIFA6 */
-	[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
-	[MSTP328] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 28, 0), /*FSI*/
-	[MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
-	[MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
-	[MSTP322] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 22, 0), /* USB */
-	[MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */
-	[MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
-	[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
-	[MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */
-	[MSTP304] = MSTP(&main_div2_clk, SMSTPCR3, 4, 0), /* TPU0 */
-	[MSTP303] = MSTP(&main_div2_clk, SMSTPCR3, 3, 0), /* TPU1 */
-	[MSTP302] = MSTP(&main_div2_clk, SMSTPCR3, 2, 0), /* TPU2 */
-	[MSTP301] = MSTP(&main_div2_clk, SMSTPCR3, 1, 0), /* TPU3 */
-	[MSTP300] = MSTP(&main_div2_clk, SMSTPCR3, 0, 0), /* TPU4 */
-	[MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
-	[MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
-	[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
-	[MSTP508] = MSTP(&div4_clks[DIV4_HP], SMSTPCR5, 8, 0), /* INTCA0 */
-};
-
-/* The lookups structure below includes duplicate entries for some clocks
- * with alternate names.
- * - The traditional name used when a device is initialised with platform data
- * - The name used when a device is initialised using device tree
- * The longer-term aim is to remove these duplicates, and indeed the
- * lookups table entirely, by describing clocks using device tree.
- */
-static struct clk_lookup lookups[] = {
-	/* main clocks */
-	CLKDEV_CON_ID("r_clk", &r_clk),
-	CLKDEV_DEV_ID("smp_twd", &twd_clk), /* smp_twd */
-
-	/* DIV4 clocks */
-	CLKDEV_DEV_ID("cpu0", &div4_clks[DIV4_Z]),
-
-	/* DIV6 clocks */
-	CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
-	CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]),
-	CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
-	CLKDEV_CON_ID("sdhi0_clk", &div6_clks[DIV6_SDHI0]),
-	CLKDEV_CON_ID("sdhi1_clk", &div6_clks[DIV6_SDHI1]),
-	CLKDEV_CON_ID("sdhi2_clk", &div6_clks[DIV6_SDHI2]),
-
-	/* MSTP32 clocks */
-	CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
-	CLKDEV_DEV_ID("e6824000.i2c", &mstp_clks[MSTP001]), /* I2C2 */
-	CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */
-	CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */
-	CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */
-	CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */
-	CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
-	CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
-	CLKDEV_DEV_ID("e6820000.i2c", &mstp_clks[MSTP116]), /* I2C0 */
-	CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
-	CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
-	CLKDEV_DEV_ID("e6cd0000.serial", &mstp_clks[MSTP219]), /* SCIFA7 */
-	CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* SY-DMAC */
-	CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* MP-DMAC */
-	CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
-	CLKDEV_DEV_ID("e6cb0000.serial", &mstp_clks[MSTP207]), /* SCIFA5 */
-	CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
-	CLKDEV_DEV_ID("e6c3000.serial", &mstp_clks[MSTP206]), /* SCIFB */
-	CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
-	CLKDEV_DEV_ID("e6c40000.serial", &mstp_clks[MSTP204]), /* SCIFA0 */
-	CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */
-	CLKDEV_DEV_ID("e6c50000.serial", &mstp_clks[MSTP203]), /* SCIFA1 */
-	CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[MSTP202]), /* SCIFA2 */
-	CLKDEV_DEV_ID("e6c60000.serial", &mstp_clks[MSTP202]), /* SCIFA2 */
-	CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), /* SCIFA3 */
-	CLKDEV_DEV_ID("e6c70000.serial", &mstp_clks[MSTP201]), /* SCIFA3 */
-	CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
-	CLKDEV_DEV_ID("e6c80000.serial", &mstp_clks[MSTP200]), /* SCIFA4 */
-	CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP331]), /* SCIFA6 */
-	CLKDEV_DEV_ID("e6cc0000.serial", &mstp_clks[MSTP331]), /* SCIFA6 */
-	CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI */
-	CLKDEV_DEV_ID("ec230000.sound", &mstp_clks[MSTP328]), /* FSI */
-	CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
-	CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
-	CLKDEV_DEV_ID("e6822000.i2c", &mstp_clks[MSTP323]), /* I2C1 */
-	CLKDEV_DEV_ID("renesas_usbhs", &mstp_clks[MSTP322]), /* USB */
-	CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
-	CLKDEV_DEV_ID("ee100000.sd", &mstp_clks[MSTP314]), /* SDHI0 */
-	CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
-	CLKDEV_DEV_ID("ee120000.sd", &mstp_clks[MSTP313]), /* SDHI1 */
-	CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
-	CLKDEV_DEV_ID("e6bd0000.mmc", &mstp_clks[MSTP312]), /* MMCIF0 */
-	CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */
-	CLKDEV_DEV_ID("ee140000.sd", &mstp_clks[MSTP311]), /* SDHI2 */
-	CLKDEV_DEV_ID("renesas-tpu-pwm.0", &mstp_clks[MSTP304]), /* TPU0 */
-	CLKDEV_DEV_ID("renesas-tpu-pwm.1", &mstp_clks[MSTP303]), /* TPU1 */
-	CLKDEV_DEV_ID("renesas-tpu-pwm.2", &mstp_clks[MSTP302]), /* TPU2 */
-	CLKDEV_DEV_ID("renesas-tpu-pwm.3", &mstp_clks[MSTP301]), /* TPU3 */
-	CLKDEV_DEV_ID("renesas-tpu-pwm.4", &mstp_clks[MSTP300]), /* TPU4 */
-	CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
-	CLKDEV_DEV_ID("e6826000.i2c", &mstp_clks[MSTP411]), /* I2C3 */
-	CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
-	CLKDEV_DEV_ID("e6828000.i2c", &mstp_clks[MSTP410]), /* I2C4 */
-	CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
-	CLKDEV_DEV_ID("renesas_intc_irqpin.0",	&mstp_clks[MSTP508]), /* INTCA0 */
-	CLKDEV_DEV_ID("e6900000.irqpin",	&mstp_clks[MSTP508]), /* INTCA0 */
-	CLKDEV_DEV_ID("renesas_intc_irqpin.1",	&mstp_clks[MSTP508]), /* INTCA0 */
-	CLKDEV_DEV_ID("e6900004.irqpin",	&mstp_clks[MSTP508]), /* INTCA0 */
-	CLKDEV_DEV_ID("renesas_intc_irqpin.2",	&mstp_clks[MSTP508]), /* INTCA0 */
-	CLKDEV_DEV_ID("e6900008.irqpin",	&mstp_clks[MSTP508]), /* INTCA0 */
-	CLKDEV_DEV_ID("renesas_intc_irqpin.3",	&mstp_clks[MSTP508]), /* INTCA0 */
-	CLKDEV_DEV_ID("e690000c.irqpin",	&mstp_clks[MSTP508]), /* INTCA0 */
-
-	/* ICK */
-	CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
-	CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
-	CLKDEV_ICK_ID("dsip_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
-	CLKDEV_ICK_ID("dsip_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSI1P]),
-	CLKDEV_ICK_ID("dsiphy_clk", "sh-mipi-dsi.0", &dsi0phy_clk),
-	CLKDEV_ICK_ID("dsiphy_clk", "sh-mipi-dsi.1", &dsi1phy_clk),
-	CLKDEV_ICK_ID("fck", "sh-cmt-48.1", &mstp_clks[MSTP329]), /* CMT1 */
-	CLKDEV_ICK_ID("fck", "e6138000.timer", &mstp_clks[MSTP329]), /* CMT1 */
-	CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP125]), /* TMU0 */
-};
-
-void __init sh73a0_clock_init(void)
-{
-	int k, ret = 0;
-
-	/* Set SDHI clocks to a known state */
-	__raw_writel(0x108, SD0CKCR);
-	__raw_writel(0x108, SD1CKCR);
-	__raw_writel(0x108, SD2CKCR);
-
-	/* detect main clock parent */
-	switch ((__raw_readl(CKSCR) >> 28) & 0x03) {
-	case 0:
-		main_clk.parent = &sh73a0_extal1_clk;
-		break;
-	case 1:
-		main_clk.parent = &extal1_div2_clk;
-		break;
-	case 2:
-		main_clk.parent = &sh73a0_extal2_clk;
-		break;
-	case 3:
-		main_clk.parent = &extal2_div2_clk;
-		break;
-	}
-
-	for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
-		ret = clk_register(main_clks[k]);
-
-	if (!ret) {
-		ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
-		if (!ret)
-			div4_clk_extend();
-	}
-
-	if (!ret)
-		ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR);
-
-	if (!ret)
-		ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
-
-	for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++)
-		ret = clk_register(late_main_clks[k]);
-
-	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-
-	if (!ret)
-		shmobile_clk_init();
-	else
-		panic("failed to setup sh73a0 clocks\n");
-}
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 476092b..8d27ec5 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -13,7 +13,7 @@
 extern void shmobile_smp_sleep(void);
 extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
 			      unsigned long arg);
-extern int shmobile_smp_cpu_disable(unsigned int cpu);
+extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
 extern void shmobile_boot_scu(void);
 extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
 extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-shmobile/dma-register.h b/arch/arm/mach-shmobile/dma-register.h
deleted file mode 100644
index 52a2f66..0000000
--- a/arch/arm/mach-shmobile/dma-register.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * SH-ARM CPU-specific DMA definitions, used by both DMA drivers
- *
- * Copyright (C) 2012 Renesas Solutions Corp
- *
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * Based on arch/sh/include/cpu-sh4/cpu/dma-register.h
- * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef DMA_REGISTER_H
-#define DMA_REGISTER_H
-
-/*
- *		Direct Memory Access Controller
- */
-
-/* Transmit sizes and respective CHCR register values */
-enum {
-	XMIT_SZ_8BIT		= 0,
-	XMIT_SZ_16BIT		= 1,
-	XMIT_SZ_32BIT		= 2,
-	XMIT_SZ_64BIT		= 7,
-	XMIT_SZ_128BIT		= 3,
-	XMIT_SZ_256BIT		= 4,
-	XMIT_SZ_512BIT		= 5,
-};
-
-/* log2(size / 8) - used to calculate number of transfers */
-static const unsigned int dma_ts_shift[] = {
-	[XMIT_SZ_8BIT]		= 0,
-	[XMIT_SZ_16BIT]		= 1,
-	[XMIT_SZ_32BIT]		= 2,
-	[XMIT_SZ_64BIT]		= 3,
-	[XMIT_SZ_128BIT]	= 4,
-	[XMIT_SZ_256BIT]	= 5,
-	[XMIT_SZ_512BIT]	= 6,
-};
-
-#define TS_LOW_BIT	0x3 /* --xx */
-#define TS_HI_BIT	0xc /* xx-- */
-
-#define TS_LOW_SHIFT	(3)
-#define TS_HI_SHIFT	(20 - 2)	/* 2 bits for shifted low TS */
-
-#define TS_INDEX2VAL(i) \
-	((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\
-	 (((i) & TS_HI_BIT)  << TS_HI_SHIFT))
-
-#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL((xmit_sz)))
-#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL((xmit_sz)))
-
-
-/*
- *		USB High-Speed DMAC
- */
-/* Transmit sizes and respective CHCR register values */
-enum {
-	USBTS_XMIT_SZ_8BYTE		= 0,
-	USBTS_XMIT_SZ_16BYTE		= 1,
-	USBTS_XMIT_SZ_32BYTE		= 2,
-};
-
-/* log2(size / 8) - used to calculate number of transfers */
-static const unsigned int dma_usbts_shift[] = {
-	[USBTS_XMIT_SZ_8BYTE]	= 3,
-	[USBTS_XMIT_SZ_16BYTE]	= 4,
-	[USBTS_XMIT_SZ_32BYTE]	= 5,
-};
-
-#define USBTS_LOW_BIT	0x3 /* --xx */
-#define USBTS_HI_BIT	0x0 /* ---- */
-
-#define USBTS_LOW_SHIFT	6
-#define USBTS_HI_SHIFT	0
-
-#define USBTS_INDEX2VAL(i) (((i) & 3) << 6)
-
-#endif /* DMA_REGISTER_H */
diff --git a/arch/arm/mach-shmobile/include/mach/head-kzm9g.txt b/arch/arm/mach-shmobile/include/mach/head-kzm9g.txt
deleted file mode 100644
index 9531f46..0000000
--- a/arch/arm/mach-shmobile/include/mach/head-kzm9g.txt
+++ /dev/null
@@ -1,410 +0,0 @@
-LIST "KZM9G low-level initialization routine."
-LIST "Adapted from u-boot KZM9G support code."
-
-LIST "Copyright (C) 2013 Ulrich Hecht"
-
-LIST "This program is free software; you can redistribute it and/or modify"
-LIST "it under the terms of the GNU General Public License version 2 as"
-LIST "published by the Free Software Foundation."
-
-LIST "This program is distributed in the hope that it will be useful,"
-LIST "but WITHOUT ANY WARRANTY; without even the implied warranty of"
-LIST "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the"
-LIST "GNU General Public License for more details."
-
-
-LIST "Register definitions:"
-
-LIST "Secure control register"
-#define LIFEC_SEC_SRC (0xE6110008)
-
-LIST "RWDT"
-#define RWDT_BASE   (0xE6020000)
-#define RWTCSRA0 (RWDT_BASE + 0x04)
-
-LIST "HPB Semaphore Control Registers"
-#define HPBSCR_BASE (0xE6000000)
-#define HPBCTRL6 (HPBSCR_BASE + 0x1030)
-
-#define SBSC1_BASE  (0xFE400000)
-#define SDCR0A		(SBSC1_BASE + 0x0008)
-#define SDCR1A		(SBSC1_BASE + 0x000C)
-#define SDPCRA		(SBSC1_BASE + 0x0010)
-#define SDCR0SA		(SBSC1_BASE + 0x0018)
-#define SDCR1SA		(SBSC1_BASE + 0x001C)
-#define RTCSRA		(SBSC1_BASE + 0x0020)
-#define RTCORA		(SBSC1_BASE + 0x0028)
-#define RTCORHA		(SBSC1_BASE + 0x002C)
-#define SDWCRC0A	(SBSC1_BASE + 0x0040)
-#define SDWCRC1A	(SBSC1_BASE + 0x0044)
-#define SDWCR00A	(SBSC1_BASE + 0x0048)
-#define SDWCR01A	(SBSC1_BASE + 0x004C)
-#define SDWCR10A	(SBSC1_BASE + 0x0050)
-#define SDWCR11A	(SBSC1_BASE + 0x0054)
-#define SDWCR2A		(SBSC1_BASE + 0x0060)
-#define SDWCRC2A	(SBSC1_BASE + 0x0064)
-#define ZQCCRA		(SBSC1_BASE + 0x0068)
-#define SDMRACR0A	(SBSC1_BASE + 0x0084)
-#define SDMRTMPCRA	(SBSC1_BASE + 0x008C)
-#define SDMRTMPMSKA	(SBSC1_BASE + 0x0094)
-#define SDGENCNTA	(SBSC1_BASE + 0x009C)
-#define SDDRVCR0A	(SBSC1_BASE + 0x00B4)
-#define DLLCNT0A	(SBSC1_BASE + 0x0354)
-
-#define SDMRA1  (0xFE500000)
-#define SDMRA2  (0xFE5C0000)
-#define SDMRA3  (0xFE504000)
-
-#define SBSC2_BASE  (0xFB400000)
-#define SDCR0B		(SBSC2_BASE + 0x0008)
-#define SDCR1B		(SBSC2_BASE + 0x000C)
-#define SDPCRB		(SBSC2_BASE + 0x0010)
-#define SDCR0SB		(SBSC2_BASE + 0x0018)
-#define SDCR1SB		(SBSC2_BASE + 0x001C)
-#define RTCSRB		(SBSC2_BASE + 0x0020)
-#define RTCORB		(SBSC2_BASE + 0x0028)
-#define RTCORHB		(SBSC2_BASE + 0x002C)
-#define SDWCRC0B	(SBSC2_BASE + 0x0040)
-#define SDWCRC1B	(SBSC2_BASE + 0x0044)
-#define SDWCR00B	(SBSC2_BASE + 0x0048)
-#define SDWCR01B	(SBSC2_BASE + 0x004C)
-#define SDWCR10B	(SBSC2_BASE + 0x0050)
-#define SDWCR11B	(SBSC2_BASE + 0x0054)
-#define SDPDCR0B	(SBSC2_BASE + 0x0058)
-#define SDWCR2B		(SBSC2_BASE + 0x0060)
-#define SDWCRC2B	(SBSC2_BASE + 0x0064)
-#define ZQCCRB		(SBSC2_BASE + 0x0068)
-#define SDMRACR0B	(SBSC2_BASE + 0x0084)
-#define SDMRTMPCRB	(SBSC2_BASE + 0x008C)
-#define SDMRTMPMSKB	(SBSC2_BASE + 0x0094)
-#define SDGENCNTB	(SBSC2_BASE + 0x009C)
-#define DPHYCNT0B	(SBSC2_BASE + 0x00A0)
-#define DPHYCNT1B	(SBSC2_BASE + 0x00A4)
-#define DPHYCNT2B	(SBSC2_BASE + 0x00A8)
-#define SDDRVCR0B	(SBSC2_BASE + 0x00B4)
-#define DLLCNT0B	(SBSC2_BASE + 0x0354)
-
-#define SDMRB1  (0xFB500000)
-#define SDMRB2  (0xFB5C0000)
-#define SDMRB3  (0xFB504000)
-
-#define CPG_BASE   (0xE6150000)
-#define FRQCRA		(CPG_BASE + 0x0000)
-#define FRQCRB		(CPG_BASE + 0x0004)
-#define FRQCRD		(CPG_BASE + 0x00E4)
-#define VCLKCR1		(CPG_BASE + 0x0008)
-#define VCLKCR2		(CPG_BASE + 0x000C)
-#define VCLKCR3		(CPG_BASE + 0x001C)
-#define ZBCKCR		(CPG_BASE + 0x0010)
-#define FLCKCR		(CPG_BASE + 0x0014)
-#define SD0CKCR		(CPG_BASE + 0x0074)
-#define SD1CKCR		(CPG_BASE + 0x0078)
-#define SD2CKCR		(CPG_BASE + 0x007C)
-#define FSIACKCR	(CPG_BASE + 0x0018)
-#define SUBCKCR		(CPG_BASE + 0x0080)
-#define SPUACKCR	(CPG_BASE + 0x0084)
-#define SPUVCKCR	(CPG_BASE + 0x0094)
-#define MSUCKCR		(CPG_BASE + 0x0088)
-#define HSICKCR		(CPG_BASE + 0x008C)
-#define FSIBCKCR	(CPG_BASE + 0x0090)
-#define MFCK1CR		(CPG_BASE + 0x0098)
-#define MFCK2CR		(CPG_BASE + 0x009C)
-#define DSITCKCR	(CPG_BASE + 0x0060)
-#define DSI0PCKCR	(CPG_BASE + 0x0064)
-#define DSI1PCKCR	(CPG_BASE + 0x0068)
-#define DSI0PHYCR	(CPG_BASE + 0x006C)
-#define DVFSCR3		(CPG_BASE + 0x0174)
-#define DVFSCR4		(CPG_BASE + 0x0178)
-#define DVFSCR5		(CPG_BASE + 0x017C)
-#define MPMODE		(CPG_BASE + 0x00CC)
-
-#define PLLECR		(CPG_BASE + 0x00D0)
-#define PLL0CR		(CPG_BASE + 0x00D8)
-#define PLL1CR		(CPG_BASE + 0x0028)
-#define PLL2CR		(CPG_BASE + 0x002C)
-#define PLL3CR		(CPG_BASE + 0x00DC)
-#define PLL0STPCR	(CPG_BASE + 0x00F0)
-#define PLL1STPCR	(CPG_BASE + 0x00C8)
-#define PLL2STPCR	(CPG_BASE + 0x00F8)
-#define PLL3STPCR	(CPG_BASE + 0x00FC)
-#define RMSTPCR0	(CPG_BASE + 0x0110)
-#define RMSTPCR1	(CPG_BASE + 0x0114)
-#define RMSTPCR2	(CPG_BASE + 0x0118)
-#define RMSTPCR3	(CPG_BASE + 0x011C)
-#define RMSTPCR4	(CPG_BASE + 0x0120)
-#define RMSTPCR5	(CPG_BASE + 0x0124)
-#define SMSTPCR0	(CPG_BASE + 0x0130)
-#define SMSTPCR2	(CPG_BASE + 0x0138)
-#define SMSTPCR3	(CPG_BASE + 0x013C)
-#define CPGXXCR4	(CPG_BASE + 0x0150)
-#define SRCR0		(CPG_BASE + 0x80A0)
-#define SRCR2		(CPG_BASE + 0x80B0)
-#define SRCR3		(CPG_BASE + 0x80A8)
-#define VREFCR		(CPG_BASE + 0x00EC)
-#define PCLKCR		(CPG_BASE + 0x1020)
-
-#define PORT32CR (0xE6051020)
-#define PORT33CR (0xE6051021)
-#define PORT34CR (0xE6051022)
-#define PORT35CR (0xE6051023)
-
-LIST "DRAM initialization code:"
-
-EW RWTCSRA0, 0xA507
-
-ED_AND LIFEC_SEC_SRC, 0xFFFF7FFF
-
-ED_AND SMSTPCR3,0xFFFF7FFF
-ED_AND SRCR3, 0xFFFF7FFF
-ED_AND SMSTPCR2,0xFFFBFFFF
-ED_AND SRCR2, 0xFFFBFFFF
-ED PLLECR, 0x00000000
-
-WAIT_MASK PLLECR, 0x00000F00, 0x00000000
-WAIT_MASK FRQCRB, 0x80000000, 0x00000000
-
-ED PLL0CR, 0x2D000000
-ED PLL1CR, 0x17100000
-ED FRQCRB, 0x96235880
-WAIT_MASK FRQCRB, 0x80000000, 0x00000000
-
-ED FLCKCR, 0x0000000B
-ED_AND SMSTPCR0, 0xFFFFFFFD
-
-ED_AND SRCR0, 0xFFFFFFFD
-ED 0xE6001628, 0x514
-ED 0xE6001648, 0x514
-ED 0xE6001658, 0x514
-ED 0xE6001678, 0x514
-
-ED DVFSCR4, 0x00092000
-ED DVFSCR5, 0x000000DC
-ED PLLECR, 0x00000000
-WAIT_MASK PLLECR, 0x00000F00, 0x00000000
-
-ED FRQCRA, 0x0012453C
-ED FRQCRB, 0x80431350
-WAIT_MASK FRQCRB, 0x80000000, 0x00000000
-ED FRQCRD, 0x00000B0B
-WAIT_MASK FRQCRD, 0x80000000, 0x00000000
-
-ED PCLKCR, 0x00000003
-ED VCLKCR1, 0x0000012F
-ED VCLKCR2, 0x00000119
-ED VCLKCR3, 0x00000119
-ED ZBCKCR, 0x00000002
-ED FLCKCR, 0x00000005
-ED SD0CKCR, 0x00000080
-ED SD1CKCR, 0x00000080
-ED SD2CKCR, 0x00000080
-ED FSIACKCR, 0x0000003F
-ED FSIBCKCR, 0x0000003F
-ED SUBCKCR, 0x00000080
-ED SPUACKCR, 0x0000000B
-ED SPUVCKCR, 0x0000000B
-ED MSUCKCR, 0x0000013F
-ED HSICKCR, 0x00000080
-ED MFCK1CR, 0x0000003F
-ED MFCK2CR, 0x0000003F
-ED DSITCKCR, 0x00000107
-ED DSI0PCKCR, 0x00000313
-ED DSI1PCKCR, 0x0000130D
-ED DSI0PHYCR, 0x2A800E0E
-ED PLL0CR, 0x1E000000
-ED PLL0CR, 0x2D000000
-ED PLL1CR, 0x17100000
-ED PLL2CR, 0x27000080
-ED PLL3CR, 0x1D000000
-ED PLL0STPCR, 0x00080000
-ED PLL1STPCR, 0x000120C0
-ED PLL2STPCR, 0x00012000
-ED PLL3STPCR, 0x00000030
-ED PLLECR, 0x0000000B
-WAIT_MASK PLLECR, 0x00000B00, 0x00000B00
-
-ED DVFSCR3, 0x000120F0
-ED MPMODE, 0x00000020
-ED VREFCR, 0x0000028A
-ED RMSTPCR0, 0xE4628087
-ED RMSTPCR1, 0xFFFFFFFF
-ED RMSTPCR2, 0x53FFFFFF
-ED RMSTPCR3, 0xFFFFFFFF
-ED RMSTPCR4, 0x00800D3D
-ED RMSTPCR5, 0xFFFFF3FF
-ED SMSTPCR2, 0x00000000
-ED SRCR2,  0x00040000
-ED_AND PLLECR, 0xFFFFFFF7
-WAIT_MASK PLLECR, 0x00000800, 0x00000000
-
-LIST "set SBSC operational"
-ED HPBCTRL6, 0x00000001
-WAIT_MASK HPBCTRL6, 0x00000001, 0x00000001
-
-LIST "set SBSC operating frequency"
-ED FRQCRD, 0x00001414
-WAIT_MASK FRQCRD, 0x80000000, 0x00000000
-ED PLL3CR, 0x1D000000
-ED_OR PLLECR, 0x00000008
-WAIT_MASK PLLECR, 0x00000800, 0x00000800
-
-LIST "enable DLL oscillation in DDRPHY"
-ED_OR DLLCNT0A, 0x00000002
-
-LIST "wait >= 100 ns"
-ED SDGENCNTA, 0x00000005
-WAIT_MASK SDGENCNTA, 0xFFFFFFFF, 0x00000000
-
-LIST "target LPDDR2 device settings"
-ED SDCR0A, 0xACC90159
-ED SDCR1A, 0x00010059
-ED SDWCRC0A, 0x50874114
-ED SDWCRC1A, 0x33199B37
-ED SDWCRC2A, 0x008F2313
-ED SDWCR00A, 0x31020707
-ED SDWCR01A, 0x0017040A
-ED SDWCR10A, 0x31020707
-ED SDWCR11A, 0x0017040A
-
-ED SDDRVCR0A, 0x055557ff
-
-ED SDWCR2A, 0x30000000
-
-LIST "drive CKE high"
-ED_OR SDPCRA, 0x00000080
-WAIT_MASK SDPCRA, 0x00000080, 0x00000080
-
-LIST "wait >= 200 us"
-ED SDGENCNTA, 0x00002710
-WAIT_MASK SDGENCNTA, 0xFFFFFFFF, 0x00000000
-
-LIST "issue reset command to LPDDR2 device"
-ED SDMRACR0A, 0x0000003F
-ED SDMRA1, 0x00000000
-
-LIST "wait >= 10 (or 1) us (docs inconsistent)"
-ED SDGENCNTA, 0x000001F4
-WAIT_MASK SDGENCNTA, 0xFFFFFFFF, 0x00000000
-
-LIST "MRW ZS initialization calibration command"
-ED SDMRACR0A, 0x0000FF0A
-ED SDMRA3, 0x00000000
-
-LIST "wait >= 1 us"
-ED SDGENCNTA, 0x00000032
-WAIT_MASK SDGENCNTA, 0xFFFFFFFF, 0x00000000
-
-LIST "specify operating mode in LPDDR2"
-ED SDMRACR0A, 0x00002201
-ED SDMRA1, 0x00000000
-ED SDMRACR0A, 0x00000402
-ED SDMRA1, 0x00000000
-ED SDMRACR0A, 0x00000203
-ED SDMRA1, 0x00000000
-
-LIST "initialize DDR interface"
-ED SDMRA2, 0x00000000
-
-LIST "temperature sensor control"
-ED SDMRTMPCRA, 0x88800004
-ED SDMRTMPMSKA,0x00000004
-
-LIST "auto-refreshing control"
-ED RTCORA, 0xA55A0032
-ED RTCORHA, 0xA55A000C
-ED RTCSRA, 0xA55A2048
-
-ED_OR SDCR0A, 0x00000800
-ED_OR SDCR1A, 0x00000400
-
-LIST "auto ZQ calibration control"
-ED ZQCCRA, 0xFFF20000
-
-ED_OR DLLCNT0B, 0x00000002
-ED SDGENCNTB, 0x00000005
-WAIT_MASK SDGENCNTB, 0xFFFFFFFF, 0x00000000
-
-ED SDCR0B, 0xACC90159
-ED SDCR1B, 0x00010059
-ED SDWCRC0B, 0x50874114
-ED SDWCRC1B, 0x33199B37
-ED SDWCRC2B, 0x008F2313
-ED SDWCR00B, 0x31020707
-ED SDWCR01B, 0x0017040A
-ED SDWCR10B, 0x31020707
-ED SDWCR11B, 0x0017040A
-ED SDDRVCR0B, 0x055557ff
-ED SDWCR2B, 0x30000000
-ED_OR SDPCRB, 0x00000080
-WAIT_MASK SDPCRB, 0x00000080, 0x00000080
-
-ED SDGENCNTB, 0x00002710
-WAIT_MASK SDGENCNTB, 0xFFFFFFFF, 0x00000000
-ED SDMRACR0B, 0x0000003F
-
-LIST "upstream u-boot writes to SDMRA1A for both SBSC 1 and 2, which does"
-LIST "not seem to make a lot of sense..."
-ED SDMRB1, 0x00000000
-
-ED SDGENCNTB, 0x000001F4
-WAIT_MASK SDGENCNTB, 0xFFFFFFFF, 0x00000000
-
-ED SDMRACR0B, 0x0000FF0A
-ED SDMRB3, 0x00000000
-ED SDGENCNTB, 0x00000032
-WAIT_MASK SDGENCNTB, 0xFFFFFFFF, 0x00000000
-
-ED SDMRACR0B, 0x00002201
-ED SDMRB1, 0x00000000
-ED SDMRACR0B, 0x00000402
-ED SDMRB1, 0x00000000
-ED SDMRACR0B, 0x00000203
-ED SDMRB1, 0x00000000
-ED SDMRB2, 0x00000000
-ED SDMRTMPCRB, 0x88800004
-ED SDMRTMPMSKB, 0x00000004
-ED RTCORB,  0xA55A0032
-ED RTCORHB, 0xA55A000C
-ED RTCSRB,  0xA55A2048
-ED_OR SDCR0B, 0x00000800
-ED_OR SDCR1B, 0x00000400
-ED ZQCCRB, 0xFFF20000
-ED_OR SDPDCR0B, 0x00030000
-ED DPHYCNT1B, 0xA5390000
-ED DPHYCNT0B, 0x00001200
-ED DPHYCNT1B, 0x07CE0000
-ED DPHYCNT0B, 0x00001247
-WAIT_MASK DPHYCNT2B, 0xFFFFFFFF, 0x07CE0000
-
-ED_AND SDPDCR0B, 0xFFFCFFFF
-
-ED FRQCRD, 0x00000B0B
-WAIT_MASK FRQCRD, 0x80000000, 0x00000000
-
-ED CPGXXCR4, 0xfffffffc
-
-LIST "Setup SCIF4 / workaround"
-EB PORT32CR, 0x12
-EB PORT33CR, 0x22
-EB PORT34CR, 0x12
-EB PORT35CR, 0x22
-
-EW 0xE6C80000, 0
-EB 0xE6C80004, 0x19
-EW 0xE6C80008, 0x0030
-EW 0xE6C80018, 0
-EW 0xE6C80030, 0x0014
-
-LIST "Magic to avoid hangs and corruption on DRAM writes."
-
-LIST "It has been observed that the system would most often hang while"
-LIST "decompressing the kernel, and if it didn't it would always write"
-LIST "a corrupt image to DRAM."
-LIST "This problem does not occur in u-boot, and the reason is that"
-LIST "u-boot performs an additional cache invalidation after setting up"
-LIST "the DRAM controller. Such an invalidation should not be necessary at"
-LIST "this point, and attempts at removing parts of the routine to arrive"
-LIST "at the minimal snippet of code necessary to avoid the DRAM stability"
-LIST "problem yielded the following:"
-
-MRC p15, 0, r0, c1, c0, 0
-MCR p15, 0, r0, c1, c0, 0
diff --git a/arch/arm/mach-shmobile/include/mach/zboot.h b/arch/arm/mach-shmobile/include/mach/zboot.h
deleted file mode 100644
index 175ee05..0000000
--- a/arch/arm/mach-shmobile/include/mach/zboot.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef ZBOOT_H
-#define ZBOOT_H
-
-#include <mach/zboot_macros.h>
-
-/**************************************************
- *
- *		board specific settings
- *
- **************************************************/
-
-#ifdef CONFIG_MACH_KZM9G
-#define MEMORY_START	0x43000000
-#include "mach/head-kzm9g.txt"
-#else
-#error "unsupported board."
-#endif
-
-#endif /* ZBOOT_H */
diff --git a/arch/arm/mach-shmobile/include/mach/zboot_macros.h b/arch/arm/mach-shmobile/include/mach/zboot_macros.h
deleted file mode 100644
index 14fd3d5..0000000
--- a/arch/arm/mach-shmobile/include/mach/zboot_macros.h
+++ /dev/null
@@ -1,108 +0,0 @@
-#ifndef __ZBOOT_MACRO_H
-#define __ZBOOT_MACRO_H
-
-/* The LIST command is used to include comments in the script */
-.macro	LIST comment
-.endm
-
-/* The ED command is used to write a 32-bit word */
-.macro ED, addr, data
-	LDR	r0, 1f
-	LDR	r1, 2f
-	STR	r1, [r0]
-	B	3f
-1 :	.long	\addr
-2 :	.long	\data
-3 :
-.endm
-
-/* The EW command is used to write a 16-bit word */
-.macro EW, addr, data
-	LDR	r0, 1f
-	LDR	r1, 2f
-	STRH	r1, [r0]
-	B	3f
-1 :	.long	\addr
-2 :	.long	\data
-3 :
-.endm
-
-/* The EB command is used to write an 8-bit word */
-.macro EB, addr, data
-	LDR	r0, 1f
-	LDR	r1, 2f
-	STRB	r1, [r0]
-	B	3f
-1 :	.long	\addr
-2 :	.long	\data
-3 :
-.endm
-
-/* The WAIT command is used to delay the execution */
-.macro  WAIT, time, reg
-	LDR	r1, 1f
-	LDR	r0, 2f
-	STR	r0, [r1]
-10 :
-	LDR	r0, [r1]
-	CMP	r0, #0x00000000
-	BNE	10b
-	NOP
-	B	3f
-1 :	.long	\reg
-2 :	.long	\time * 100
-3 :
-.endm
-
-/* The DD command is used to read a 32-bit word */
-.macro  DD, start, end
-	LDR	r1, 1f
-	B	2f
-1 :	.long	\start
-2 :
-.endm
-
-/* loop until a given value has been read (with mask) */
-.macro WAIT_MASK, addr, data, cmp
-	LDR	r0, 2f
-	LDR	r1, 3f
-	LDR	r2, 4f
-1:
-	LDR	r3, [r0, #0]
-	AND	r3, r1, r3
-	CMP	r2, r3
-	BNE	1b
-	B	5f
-2:	.long	\addr
-3:	.long	\data
-4:	.long	\cmp
-5:
-.endm
-
-/* read 32-bit value from addr, "or" an immediate and write back */
-.macro ED_OR, addr, data
-	LDR r4, 1f
-	LDR r5, 2f
-	LDR r6, [r4]
-	ORR r5, r6, r5
-	STR r5, [r4]
-	B	3f
-1:	.long	\addr
-2:	.long	\data
-3:
-.endm
-
-/* read 32-bit value from addr, "and" an immediate and write back */
-.macro ED_AND, addr, data
-	LDR r4, 1f
-	LDR r5, 2f
-	LDR r6, [r4]
-	AND r5, r6, r5
-	STR r5, [r4]
-	B	3f
-1:	.long \addr
-2:	.long \data
-3:
-.endm
-
-#endif /* __ZBOOT_MACRO_H */
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
deleted file mode 100644
index fd63ae6..0000000
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ /dev/null
@@ -1,337 +0,0 @@
-/*
- * sh73a0 processor support - INTC hardware block
- *
- * Copyright (C) 2010  Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/irqchip.h>
-#include <linux/irqchip/arm-gic.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include "intc.h"
-#include "irqs.h"
-#include "sh73a0.h"
-
-enum {
-	UNUSED = 0,
-
-	/* interrupt sources INTCS */
-	PINTCS_PINT1, PINTCS_PINT2,
-	RTDMAC_0_DEI0, RTDMAC_0_DEI1, RTDMAC_0_DEI2, RTDMAC_0_DEI3,
-	CEU, MFI, BBIF2, VPU, TSIF1, _3DG_SGX543, _2DDMAC_2DDM0,
-	RTDMAC_1_DEI4, RTDMAC_1_DEI5, RTDMAC_1_DADERR,
-	KEYSC_KEY, VINT, MSIOF,
-	TMU0_TUNI00, TMU0_TUNI01, TMU0_TUNI02,
-	CMT0, TSIF0, CMT2, LMB, MSUG, MSU_MSU, MSU_MSU2,
-	CTI, RWDT0, ICB, PEP, ASA, JPU_JPEG, LCDC, LCRC,
-	RTDMAC_2_DEI6, RTDMAC_2_DEI7, RTDMAC_2_DEI8, RTDMAC_2_DEI9,
-	RTDMAC_3_DEI10, RTDMAC_3_DEI11,
-	FRC, GCU, LCDC1, CSIRX,
-	DSITX0_DSITX00, DSITX0_DSITX01,
-	SPU2_SPU0, SPU2_SPU1, FSI,
-	TMU1_TUNI10, TMU1_TUNI11, TMU1_TUNI12,
-	TSIF2, CMT4, MFIS2, CPORTS2R, TSG, DMASCH1, SCUW,
-	VIO60, VIO61, CEU21, CSI21, DSITX1_DSITX10, DSITX1_DSITX11,
-	DISP, DSRV, EMUX2_EMUX20I, EMUX2_EMUX21I,
-	MSTIF0_MST00I, MSTIF0_MST01I, MSTIF1_MST10I, MSTIF1_MST11I,
-	SPUV,
-
-	/* interrupt groups INTCS */
-	RTDMAC_0, RTDMAC_1, RTDMAC_2, RTDMAC_3,
-	DSITX0, SPU2, TMU1, MSU,
-};
-
-static struct intc_vect intcs_vectors[] = {
-	INTCS_VECT(PINTCS_PINT1, 0x0600), INTCS_VECT(PINTCS_PINT2, 0x0620),
-	INTCS_VECT(RTDMAC_0_DEI0, 0x0800), INTCS_VECT(RTDMAC_0_DEI1, 0x0820),
-	INTCS_VECT(RTDMAC_0_DEI2, 0x0840), INTCS_VECT(RTDMAC_0_DEI3, 0x0860),
-	INTCS_VECT(CEU, 0x0880), INTCS_VECT(MFI, 0x0900),
-	INTCS_VECT(BBIF2, 0x0960), INTCS_VECT(VPU, 0x0980),
-	INTCS_VECT(TSIF1, 0x09a0), INTCS_VECT(_3DG_SGX543, 0x09e0),
-	INTCS_VECT(_2DDMAC_2DDM0, 0x0a00),
-	INTCS_VECT(RTDMAC_1_DEI4, 0x0b80), INTCS_VECT(RTDMAC_1_DEI5, 0x0ba0),
-	INTCS_VECT(RTDMAC_1_DADERR, 0x0bc0),
-	INTCS_VECT(KEYSC_KEY, 0x0be0), INTCS_VECT(VINT, 0x0c80),
-	INTCS_VECT(MSIOF, 0x0d20),
-	INTCS_VECT(TMU0_TUNI00, 0x0e80), INTCS_VECT(TMU0_TUNI01, 0x0ea0),
-	INTCS_VECT(TMU0_TUNI02, 0x0ec0),
-	INTCS_VECT(CMT0, 0x0f00), INTCS_VECT(TSIF0, 0x0f20),
-	INTCS_VECT(CMT2, 0x0f40), INTCS_VECT(LMB, 0x0f60),
-	INTCS_VECT(MSUG, 0x0f80),
-	INTCS_VECT(MSU_MSU, 0x0fa0), INTCS_VECT(MSU_MSU2, 0x0fc0),
-	INTCS_VECT(CTI, 0x0400), INTCS_VECT(RWDT0, 0x0440),
-	INTCS_VECT(ICB, 0x0480), INTCS_VECT(PEP, 0x04a0),
-	INTCS_VECT(ASA, 0x04c0), INTCS_VECT(JPU_JPEG, 0x0560),
-	INTCS_VECT(LCDC, 0x0580), INTCS_VECT(LCRC, 0x05a0),
-	INTCS_VECT(RTDMAC_2_DEI6, 0x1300), INTCS_VECT(RTDMAC_2_DEI7, 0x1320),
-	INTCS_VECT(RTDMAC_2_DEI8, 0x1340), INTCS_VECT(RTDMAC_2_DEI9, 0x1360),
-	INTCS_VECT(RTDMAC_3_DEI10, 0x1380), INTCS_VECT(RTDMAC_3_DEI11, 0x13a0),
-	INTCS_VECT(FRC, 0x1700), INTCS_VECT(GCU, 0x1760),
-	INTCS_VECT(LCDC1, 0x1780), INTCS_VECT(CSIRX, 0x17a0),
-	INTCS_VECT(DSITX0_DSITX00, 0x17c0), INTCS_VECT(DSITX0_DSITX01, 0x17e0),
-	INTCS_VECT(SPU2_SPU0, 0x1800), INTCS_VECT(SPU2_SPU1, 0x1820),
-	INTCS_VECT(FSI, 0x1840),
-	INTCS_VECT(TMU1_TUNI10, 0x1900), INTCS_VECT(TMU1_TUNI11, 0x1920),
-	INTCS_VECT(TMU1_TUNI12, 0x1940),
-	INTCS_VECT(TSIF2, 0x1960), INTCS_VECT(CMT4, 0x1980),
-	INTCS_VECT(MFIS2, 0x1a00), INTCS_VECT(CPORTS2R, 0x1a20),
-	INTCS_VECT(TSG, 0x1ae0), INTCS_VECT(DMASCH1, 0x1b00),
-	INTCS_VECT(SCUW, 0x1b40),
-	INTCS_VECT(VIO60, 0x1b60), INTCS_VECT(VIO61, 0x1b80),
-	INTCS_VECT(CEU21, 0x1ba0), INTCS_VECT(CSI21, 0x1be0),
-	INTCS_VECT(DSITX1_DSITX10, 0x1c00), INTCS_VECT(DSITX1_DSITX11, 0x1c20),
-	INTCS_VECT(DISP, 0x1c40), INTCS_VECT(DSRV, 0x1c60),
-	INTCS_VECT(EMUX2_EMUX20I, 0x1c80), INTCS_VECT(EMUX2_EMUX21I, 0x1ca0),
-	INTCS_VECT(MSTIF0_MST00I, 0x1cc0), INTCS_VECT(MSTIF0_MST01I, 0x1ce0),
-	INTCS_VECT(MSTIF1_MST10I, 0x1d00), INTCS_VECT(MSTIF1_MST11I, 0x1d20),
-	INTCS_VECT(SPUV, 0x2300),
-};
-
-static struct intc_group intcs_groups[] __initdata = {
-	INTC_GROUP(RTDMAC_0, RTDMAC_0_DEI0, RTDMAC_0_DEI1,
-		   RTDMAC_0_DEI2, RTDMAC_0_DEI3),
-	INTC_GROUP(RTDMAC_1, RTDMAC_1_DEI4, RTDMAC_1_DEI5, RTDMAC_1_DADERR),
-	INTC_GROUP(RTDMAC_2, RTDMAC_2_DEI6, RTDMAC_2_DEI7,
-		   RTDMAC_2_DEI8, RTDMAC_2_DEI9),
-	INTC_GROUP(RTDMAC_3, RTDMAC_3_DEI10, RTDMAC_3_DEI11),
-	INTC_GROUP(TMU1, TMU1_TUNI12, TMU1_TUNI11, TMU1_TUNI10),
-	INTC_GROUP(DSITX0, DSITX0_DSITX00, DSITX0_DSITX01),
-	INTC_GROUP(SPU2, SPU2_SPU0, SPU2_SPU1),
-	INTC_GROUP(MSU, MSU_MSU, MSU_MSU2),
-};
-
-static struct intc_mask_reg intcs_mask_registers[] = {
-	{ 0xffd20184, 0xffd201c4, 8, /* IMR1SA / IMCR1SA */
-	  { 0, 0, 0, CEU,
-	    0, 0, 0, 0 } },
-	{ 0xffd20188, 0xffd201c8, 8, /* IMR2SA / IMCR2SA */
-	  { 0, 0, 0, VPU,
-	    BBIF2, 0, 0, MFI } },
-	{ 0xffd2018c, 0xffd201cc, 8, /* IMR3SA / IMCR3SA */
-	  { 0, 0, 0, _2DDMAC_2DDM0,
-	    0, ASA, PEP, ICB } },
-	{ 0xffd20190, 0xffd201d0, 8, /* IMR4SA / IMCR4SA */
-	  { 0, 0, 0, CTI,
-	    JPU_JPEG, 0, LCRC, LCDC } },
-	{ 0xffd20194, 0xffd201d4, 8, /* IMR5SA / IMCR5SA */
-	  { KEYSC_KEY, RTDMAC_1_DADERR, RTDMAC_1_DEI5, RTDMAC_1_DEI4,
-	    RTDMAC_0_DEI3, RTDMAC_0_DEI2, RTDMAC_0_DEI1, RTDMAC_0_DEI0 } },
-	{ 0xffd20198, 0xffd201d8, 8, /* IMR6SA / IMCR6SA */
-	  { 0, 0, MSIOF, 0,
-	    _3DG_SGX543, 0, 0, 0 } },
-	{ 0xffd2019c, 0xffd201dc, 8, /* IMR7SA / IMCR7SA */
-	  { 0, TMU0_TUNI02, TMU0_TUNI01, TMU0_TUNI00,
-	    0, 0, 0, 0 } },
-	{ 0xffd201a0, 0xffd201e0, 8, /* IMR8SA / IMCR8SA */
-	  { 0, 0, 0, 0,
-	    0, MSU_MSU, MSU_MSU2, MSUG } },
-	{ 0xffd201a4, 0xffd201e4, 8, /* IMR9SA / IMCR9SA */
-	  { 0, RWDT0, CMT2, CMT0,
-	    0, 0, 0, 0 } },
-	{ 0xffd201ac, 0xffd201ec, 8, /* IMR11SA / IMCR11SA */
-	  { 0, 0, 0, 0,
-	    0, TSIF1, LMB, TSIF0 } },
-	{ 0xffd201b0, 0xffd201f0, 8, /* IMR12SA / IMCR12SA */
-	  { 0, 0, 0, 0,
-	    0, 0, PINTCS_PINT2, PINTCS_PINT1 } },
-	{ 0xffd50180, 0xffd501c0, 8, /* IMR0SA3 / IMCR0SA3 */
-	  { RTDMAC_2_DEI6, RTDMAC_2_DEI7, RTDMAC_2_DEI8, RTDMAC_2_DEI9,
-	    RTDMAC_3_DEI10, RTDMAC_3_DEI11, 0, 0 } },
-	{ 0xffd50190, 0xffd501d0, 8, /* IMR4SA3 / IMCR4SA3 */
-	  { FRC, 0, 0, GCU,
-	    LCDC1, CSIRX, DSITX0_DSITX00, DSITX0_DSITX01 } },
-	{ 0xffd50194, 0xffd501d4, 8, /* IMR5SA3 / IMCR5SA3 */
-	  { SPU2_SPU0, SPU2_SPU1, FSI, 0,
-	    0, 0, 0, 0 } },
-	{ 0xffd50198, 0xffd501d8, 8, /* IMR6SA3 / IMCR6SA3 */
-	  { TMU1_TUNI10, TMU1_TUNI11, TMU1_TUNI12, 0,
-	    TSIF2, CMT4, 0, 0 } },
-	{ 0xffd5019c, 0xffd501dc, 8, /* IMR7SA3 / IMCR7SA3 */
-	  { MFIS2, CPORTS2R, 0, 0,
-	    0, 0, 0, TSG } },
-	{ 0xffd501a0, 0xffd501e0, 8, /* IMR8SA3 / IMCR8SA3 */
-	  { DMASCH1, 0, SCUW, VIO60,
-	    VIO61, CEU21, 0, CSI21 } },
-	{ 0xffd501a4, 0xffd501e4, 8, /* IMR9SA3 / IMCR9SA3 */
-	  { DSITX1_DSITX10, DSITX1_DSITX11, DISP, DSRV,
-	    EMUX2_EMUX20I, EMUX2_EMUX21I, MSTIF0_MST00I, MSTIF0_MST01I } },
-	{ 0xffd501a8, 0xffd501e8, 8, /* IMR10SA3 / IMCR10SA3 */
-	  { MSTIF0_MST00I, MSTIF0_MST01I, 0, 0,
-	    0, 0, 0, 0  } },
-	{ 0xffd60180, 0xffd601c0, 8, /* IMR0SA4 / IMCR0SA4 */
-	  { SPUV, 0, 0, 0,
-	    0, 0, 0, 0  } },
-};
-
-/* Priority is needed for INTCA to receive the INTCS interrupt */
-static struct intc_prio_reg intcs_prio_registers[] = {
-	{ 0xffd20000, 0, 16, 4, /* IPRAS */ { CTI, 0, _2DDMAC_2DDM0, ICB } },
-	{ 0xffd20004, 0, 16, 4, /* IPRBS */ { JPU_JPEG, LCDC, 0, LCRC } },
-	{ 0xffd20008, 0, 16, 4, /* IPRCS */ { BBIF2, 0, 0, 0 } },
-	{ 0xffd2000c, 0, 16, 4, /* IPRDS */ { PINTCS_PINT1, PINTCS_PINT2,
-					      0, 0 } },
-	{ 0xffd20010, 0, 16, 4, /* IPRES */ { RTDMAC_0, CEU, MFI, VPU } },
-	{ 0xffd20014, 0, 16, 4, /* IPRFS */ { KEYSC_KEY, RTDMAC_1,
-					      CMT2, CMT0 } },
-	{ 0xffd20018, 0, 16, 4, /* IPRGS */ { TMU0_TUNI00, TMU0_TUNI01,
-					      TMU0_TUNI02, TSIF1 } },
-	{ 0xffd2001c, 0, 16, 4, /* IPRHS */ { VINT, 0, 0, 0 } },
-	{ 0xffd20020, 0, 16, 4, /* IPRIS */ { 0, MSIOF, TSIF0, 0 } },
-	{ 0xffd20024, 0, 16, 4, /* IPRJS */ { 0, _3DG_SGX543, MSUG, MSU } },
-	{ 0xffd20028, 0, 16, 4, /* IPRKS */ { 0, ASA, LMB, PEP } },
-	{ 0xffd20030, 0, 16, 4, /* IPRMS */ { 0, 0, 0, RWDT0 } },
-	{ 0xffd50000, 0, 16, 4, /* IPRAS3 */ { RTDMAC_2, 0, 0, 0 } },
-	{ 0xffd50004, 0, 16, 4, /* IPRBS3 */ { RTDMAC_3, 0, 0, 0 } },
-	{ 0xffd50020, 0, 16, 4, /* IPRIS3 */ { FRC, 0, 0, 0 } },
-	{ 0xffd50024, 0, 16, 4, /* IPRJS3 */ { LCDC1, CSIRX, DSITX0, 0 } },
-	{ 0xffd50028, 0, 16, 4, /* IPRKS3 */ { SPU2, 0, FSI, 0 } },
-	{ 0xffd50030, 0, 16, 4, /* IPRMS3 */ { TMU1, 0, 0, TSIF2 } },
-	{ 0xffd50034, 0, 16, 4, /* IPRNS3 */ { CMT4, 0, 0, 0 } },
-	{ 0xffd50038, 0, 16, 4, /* IPROS3 */ { MFIS2, CPORTS2R, 0, 0 } },
-	{ 0xffd50040, 0, 16, 4, /* IPRQS3 */ { DMASCH1, 0, SCUW, VIO60 } },
-	{ 0xffd50044, 0, 16, 4, /* IPRRS3 */ { VIO61, CEU21, 0, CSI21 } },
-	{ 0xffd50048, 0, 16, 4, /* IPRSS3 */ { DSITX1_DSITX10, DSITX1_DSITX11,
-					       DISP, DSRV } },
-	{ 0xffd5004c, 0, 16, 4, /* IPRTS3 */ { EMUX2_EMUX20I, EMUX2_EMUX21I,
-					       MSTIF0_MST00I, MSTIF0_MST01I } },
-	{ 0xffd50050, 0, 16, 4, /* IPRUS3 */ { MSTIF1_MST10I, MSTIF1_MST11I,
-					       0, 0 } },
-	{ 0xffd60000, 0, 16, 4, /* IPRAS4 */ { SPUV, 0, 0, 0 } },
-};
-
-static struct resource intcs_resources[] __initdata = {
-	[0] = {
-		.start	= 0xffd20000,
-		.end	= 0xffd201ff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= 0xffd50000,
-		.end	= 0xffd501ff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[2] = {
-		.start	= 0xffd60000,
-		.end	= 0xffd601ff,
-		.flags	= IORESOURCE_MEM,
-	}
-};
-
-static struct intc_desc intcs_desc __initdata = {
-	.name = "sh73a0-intcs",
-	.resource = intcs_resources,
-	.num_resources = ARRAY_SIZE(intcs_resources),
-	.hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers,
-			   intcs_prio_registers, NULL, NULL),
-};
-
-static struct irqaction sh73a0_intcs_cascade;
-
-static irqreturn_t sh73a0_intcs_demux(int irq, void *dev_id)
-{
-	unsigned int evtcodeas = ioread32((void __iomem *)dev_id);
-
-	generic_handle_irq(intcs_evt2irq(evtcodeas));
-
-	return IRQ_HANDLED;
-}
-
-#define PINTER0_PHYS 0xe69000a0
-#define PINTER1_PHYS 0xe69000a4
-#define PINTER0_VIRT IOMEM(0xe69000a0)
-#define PINTER1_VIRT IOMEM(0xe69000a4)
-#define PINTRR0 IOMEM(0xe69000d0)
-#define PINTRR1 IOMEM(0xe69000d4)
-
-#define PINT0A_IRQ(n, irq) INTC_IRQ((n), SH73A0_PINT0_IRQ(irq))
-#define PINT0B_IRQ(n, irq) INTC_IRQ((n), SH73A0_PINT0_IRQ(irq + 8))
-#define PINT0C_IRQ(n, irq) INTC_IRQ((n), SH73A0_PINT0_IRQ(irq + 16))
-#define PINT0D_IRQ(n, irq) INTC_IRQ((n), SH73A0_PINT0_IRQ(irq + 24))
-#define PINT1E_IRQ(n, irq) INTC_IRQ((n), SH73A0_PINT1_IRQ(irq))
-
-INTC_PINT(intc_pint0, PINTER0_PHYS, 0xe69000b0, "sh73a0-pint0",		\
-  INTC_PINT_E(A), INTC_PINT_E(B), INTC_PINT_E(C), INTC_PINT_E(D),	\
-  INTC_PINT_V(A, PINT0A_IRQ), INTC_PINT_V(B, PINT0B_IRQ),		\
-  INTC_PINT_V(C, PINT0C_IRQ), INTC_PINT_V(D, PINT0D_IRQ),		\
-  INTC_PINT_E(A), INTC_PINT_E(B), INTC_PINT_E(C), INTC_PINT_E(D),	\
-  INTC_PINT_E(A), INTC_PINT_E(B), INTC_PINT_E(C), INTC_PINT_E(D));
-
-INTC_PINT(intc_pint1, PINTER1_PHYS, 0xe69000c0, "sh73a0-pint1",		\
-  INTC_PINT_E(E), INTC_PINT_E_EMPTY, INTC_PINT_E_EMPTY, INTC_PINT_E_EMPTY, \
-  INTC_PINT_V(E, PINT1E_IRQ), INTC_PINT_V_NONE,				\
-  INTC_PINT_V_NONE, INTC_PINT_V_NONE,					\
-  INTC_PINT_E_NONE, INTC_PINT_E_NONE, INTC_PINT_E_NONE, INTC_PINT_E(E), \
-  INTC_PINT_E(E), INTC_PINT_E_NONE, INTC_PINT_E_NONE, INTC_PINT_E_NONE);
-
-static struct irqaction sh73a0_pint0_cascade;
-static struct irqaction sh73a0_pint1_cascade;
-
-static void pint_demux(void __iomem *rr, void __iomem *er, int base_irq)
-{
-	unsigned long value =  ioread32(rr) & ioread32(er);
-	int k;
-
-	for (k = 0; k < 32; k++) {
-		if (value & (1 << (31 - k))) {
-			generic_handle_irq(base_irq + k);
-			iowrite32(~(1 << (31 - k)), rr);
-		}
-	}
-}
-
-static irqreturn_t sh73a0_pint0_demux(int irq, void *dev_id)
-{
-	pint_demux(PINTRR0, PINTER0_VIRT, SH73A0_PINT0_IRQ(0));
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t sh73a0_pint1_demux(int irq, void *dev_id)
-{
-	pint_demux(PINTRR1, PINTER1_VIRT, SH73A0_PINT1_IRQ(0));
-	return IRQ_HANDLED;
-}
-
-void __init sh73a0_init_irq(void)
-{
-	void __iomem *gic_dist_base = IOMEM(0xf0001000);
-	void __iomem *gic_cpu_base = IOMEM(0xf0000100);
-	void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
-
-	gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE);
-	gic_init(0, 29, gic_dist_base, gic_cpu_base);
-
-	register_intc_controller(&intcs_desc);
-	register_intc_controller(&intc_pint0_desc);
-	register_intc_controller(&intc_pint1_desc);
-
-	/* demux using INTEVTSA */
-	sh73a0_intcs_cascade.name = "INTCS cascade";
-	sh73a0_intcs_cascade.handler = sh73a0_intcs_demux;
-	sh73a0_intcs_cascade.dev_id = intevtsa;
-	setup_irq(gic_spi(50), &sh73a0_intcs_cascade);
-
-	/* PINT pins are sanely tied to the GIC as SPI */
-	sh73a0_pint0_cascade.name = "PINT0 cascade";
-	sh73a0_pint0_cascade.handler = sh73a0_pint0_demux;
-	setup_irq(gic_spi(33), &sh73a0_pint0_cascade);
-
-	sh73a0_pint1_cascade.name = "PINT1 cascade";
-	sh73a0_pint1_cascade.handler = sh73a0_pint1_demux;
-	setup_irq(gic_spi(34), &sh73a0_pint1_cascade);
-}
diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
index b0790fc..4e54512 100644
--- a/arch/arm/mach-shmobile/platsmp-apmu.c
+++ b/arch/arm/mach-shmobile/platsmp-apmu.c
@@ -46,7 +46,7 @@
 	return 0;
 }
 
-static int apmu_power_off(void __iomem *p, int bit)
+static int __maybe_unused apmu_power_off(void __iomem *p, int bit)
 {
 	/* request Core Standby for next WFI */
 	writel_relaxed(3, p + CPUNCR_OFFS(bit));
@@ -67,7 +67,7 @@
 	return 0;
 }
 
-static int apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu))
+static int __maybe_unused apmu_wrap(int cpu, int (*fn)(void __iomem *p, int cpu))
 {
 	void __iomem *p = apmu_cpus[cpu].iomem;
 
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index 3923e09..b23378f 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -31,8 +31,8 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-int shmobile_smp_cpu_disable(unsigned int cpu)
+bool shmobile_smp_cpu_can_disable(unsigned int cpu)
 {
-	return 0; /* Hotplug of any CPU is supported */
+	return true; /* Hotplug of any CPU is supported */
 }
 #endif
diff --git a/arch/arm/mach-shmobile/pm-r8a7740.c b/arch/arm/mach-shmobile/pm-r8a7740.c
deleted file mode 100644
index 34608fc..0000000
--- a/arch/arm/mach-shmobile/pm-r8a7740.c
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * r8a7740 power management support
- *
- * Copyright (C) 2012  Renesas Solutions Corp.
- * Copyright (C) 2012  Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/console.h>
-#include <linux/io.h>
-#include <linux/suspend.h>
-
-#include "common.h"
-#include "pm-rmobile.h"
-
-#define SYSC_BASE	IOMEM(0xe6180000)
-
-#if defined(CONFIG_PM) && !defined(CONFIG_ARCH_MULTIPLATFORM)
-static int r8a7740_pd_a3sm_suspend(void)
-{
-	/*
-	 * The A3SM domain contains the CPU core and therefore it should
-	 * only be turned off if the CPU is not in use.
-	 */
-	return -EBUSY;
-}
-
-static int r8a7740_pd_a3sp_suspend(void)
-{
-	/*
-	 * Serial consoles make use of SCIF hardware located in A3SP,
-	 * keep such power domain on if "no_console_suspend" is set.
-	 */
-	return console_suspend_enabled ? 0 : -EBUSY;
-}
-
-static int r8a7740_pd_d4_suspend(void)
-{
-	/*
-	 * The D4 domain contains the Coresight-ETM hardware block and
-	 * therefore it should only be turned off if the debug module is
-	 * not in use.
-	 */
-	return -EBUSY;
-}
-
-static struct rmobile_pm_domain r8a7740_pm_domains[] = {
-	{
-		.genpd.name	= "A4LC",
-		.base		= SYSC_BASE,
-		.bit_shift	= 1,
-	}, {
-		.genpd.name	= "A4MP",
-		.base		= SYSC_BASE,
-		.bit_shift	= 2,
-	}, {
-		.genpd.name	= "D4",
-		.base		= SYSC_BASE,
-		.bit_shift	= 3,
-		.gov		= &pm_domain_always_on_gov,
-		.suspend	= r8a7740_pd_d4_suspend,
-	}, {
-		.genpd.name	= "A4R",
-		.base		= SYSC_BASE,
-		.bit_shift	= 5,
-	}, {
-		.genpd.name	= "A3RV",
-		.base		= SYSC_BASE,
-		.bit_shift	= 6,
-	}, {
-		.genpd.name	= "A4S",
-		.base		= SYSC_BASE,
-		.bit_shift	= 10,
-		.no_debug	= true,
-	}, {
-		.genpd.name	= "A3SP",
-		.base		= SYSC_BASE,
-		.bit_shift	= 11,
-		.gov		= &pm_domain_always_on_gov,
-		.no_debug	= true,
-		.suspend	= r8a7740_pd_a3sp_suspend,
-	}, {
-		.genpd.name	= "A3SM",
-		.base		= SYSC_BASE,
-		.bit_shift	= 12,
-		.gov		= &pm_domain_always_on_gov,
-		.suspend	= r8a7740_pd_a3sm_suspend,
-	}, {
-		.genpd.name	= "A3SG",
-		.base		= SYSC_BASE,
-		.bit_shift	= 13,
-	}, {
-		.genpd.name	= "A4SU",
-		.base		= SYSC_BASE,
-		.bit_shift	= 20,
-	},
-};
-
-void __init r8a7740_init_pm_domains(void)
-{
-	rmobile_init_domains(r8a7740_pm_domains, ARRAY_SIZE(r8a7740_pm_domains));
-	pm_genpd_add_subdomain_names("A4R", "A3RV");
-	pm_genpd_add_subdomain_names("A4S", "A3SP");
-	pm_genpd_add_subdomain_names("A4S", "A3SM");
-	pm_genpd_add_subdomain_names("A4S", "A3SG");
-}
-#endif /* CONFIG_PM && !CONFIG_ARCH_MULTIPLATFORM */
-
-#ifdef CONFIG_SUSPEND
-static int r8a7740_enter_suspend(suspend_state_t suspend_state)
-{
-	cpu_do_idle();
-	return 0;
-}
-
-static void r8a7740_suspend_init(void)
-{
-	shmobile_suspend_ops.enter = r8a7740_enter_suspend;
-}
-#else
-static void r8a7740_suspend_init(void) {}
-#endif
-
-void __init r8a7740_pm_init(void)
-{
-	r8a7740_suspend_init();
-}
diff --git a/arch/arm/mach-shmobile/pm-r8a7779.c b/arch/arm/mach-shmobile/pm-r8a7779.c
index 44a74c4..47a862e 100644
--- a/arch/arm/mach-shmobile/pm-r8a7779.c
+++ b/arch/arm/mach-shmobile/pm-r8a7779.c
@@ -35,7 +35,8 @@
 	struct rcar_sysc_ch ch;
 };
 
-static inline struct rcar_sysc_ch *to_r8a7779_ch(struct generic_pm_domain *d)
+static inline
+const struct rcar_sysc_ch *to_r8a7779_ch(struct generic_pm_domain *d)
 {
 	return &container_of(d, struct r8a7779_pm_domain, genpd)->ch;
 }
@@ -83,7 +84,6 @@
 {
 	struct generic_pm_domain *genpd = &r8a7779_pd->genpd;
 
-	genpd->flags = GENPD_FLAG_PM_CLK;
 	pm_genpd_init(genpd, NULL, false);
 	genpd->dev_ops.active_wakeup = pd_active_wakeup;
 	genpd->power_off = pd_power_down;
diff --git a/arch/arm/mach-shmobile/pm-rcar.c b/arch/arm/mach-shmobile/pm-rcar.c
index 00022ee..4092ad1 100644
--- a/arch/arm/mach-shmobile/pm-rcar.c
+++ b/arch/arm/mach-shmobile/pm-rcar.c
@@ -15,32 +15,58 @@
 #include <asm/io.h>
 #include "pm-rcar.h"
 
-/* SYSC */
-#define SYSCSR 0x00
-#define SYSCISR 0x04
-#define SYSCISCR 0x08
+/* SYSC Common */
+#define SYSCSR			0x00	/* SYSC Status Register */
+#define SYSCISR			0x04	/* Interrupt Status Register */
+#define SYSCISCR		0x08	/* Interrupt Status Clear Register */
+#define SYSCIER			0x0c	/* Interrupt Enable Register */
+#define SYSCIMR			0x10	/* Interrupt Mask Register */
 
-#define PWRSR_OFFS 0x00
-#define PWROFFCR_OFFS 0x04
-#define PWRONCR_OFFS 0x0c
-#define PWRER_OFFS 0x14
+/* SYSC Status Register */
+#define SYSCSR_PONENB		1	/* Ready for power resume requests */
+#define SYSCSR_POFFENB		0	/* Ready for power shutoff requests */
 
-#define SYSCSR_RETRIES 100
-#define SYSCSR_DELAY_US 1
+/*
+ * Power Control Register Offsets inside the register block for each domain
+ * Note: The "CR" registers for ARM cores exist on H1 only
+ *       Use WFI to power off, CPG/APMU to resume ARM cores on R-Car Gen2
+ */
+#define PWRSR_OFFS		0x00	/* Power Status Register */
+#define PWROFFCR_OFFS		0x04	/* Power Shutoff Control Register */
+#define PWROFFSR_OFFS		0x08	/* Power Shutoff Status Register */
+#define PWRONCR_OFFS		0x0c	/* Power Resume Control Register */
+#define PWRONSR_OFFS		0x10	/* Power Resume Status Register */
+#define PWRER_OFFS		0x14	/* Power Shutoff/Resume Error */
 
-#define SYSCISR_RETRIES 1000
-#define SYSCISR_DELAY_US 1
+
+#define SYSCSR_RETRIES		100
+#define SYSCSR_DELAY_US		1
+
+#define PWRER_RETRIES		100
+#define PWRER_DELAY_US		1
+
+#define SYSCISR_RETRIES		1000
+#define SYSCISR_DELAY_US	1
 
 static void __iomem *rcar_sysc_base;
 static DEFINE_SPINLOCK(rcar_sysc_lock); /* SMP CPUs + I/O devices */
 
-static int rcar_sysc_pwr_on_off(struct rcar_sysc_ch *sysc_ch,
-				int sr_bit, int reg_offs)
+static int rcar_sysc_pwr_on_off(const struct rcar_sysc_ch *sysc_ch, bool on)
 {
+	unsigned int sr_bit, reg_offs;
 	int k;
 
+	if (on) {
+		sr_bit = SYSCSR_PONENB;
+		reg_offs = PWRONCR_OFFS;
+	} else {
+		sr_bit = SYSCSR_POFFENB;
+		reg_offs = PWROFFCR_OFFS;
+	}
+
+	/* Wait until SYSC is ready to accept a power request */
 	for (k = 0; k < SYSCSR_RETRIES; k++) {
-		if (ioread32(rcar_sysc_base + SYSCSR) & (1 << sr_bit))
+		if (ioread32(rcar_sysc_base + SYSCSR) & BIT(sr_bit))
 			break;
 		udelay(SYSCSR_DELAY_US);
 	}
@@ -48,27 +74,17 @@
 	if (k == SYSCSR_RETRIES)
 		return -EAGAIN;
 
-	iowrite32(1 << sysc_ch->chan_bit,
+	/* Submit power shutoff or power resume request */
+	iowrite32(BIT(sysc_ch->chan_bit),
 		  rcar_sysc_base + sysc_ch->chan_offs + reg_offs);
 
 	return 0;
 }
 
-static int rcar_sysc_pwr_off(struct rcar_sysc_ch *sysc_ch)
+static int rcar_sysc_power(const struct rcar_sysc_ch *sysc_ch, bool on)
 {
-	return rcar_sysc_pwr_on_off(sysc_ch, 0, PWROFFCR_OFFS);
-}
-
-static int rcar_sysc_pwr_on(struct rcar_sysc_ch *sysc_ch)
-{
-	return rcar_sysc_pwr_on_off(sysc_ch, 1, PWRONCR_OFFS);
-}
-
-static int rcar_sysc_update(struct rcar_sysc_ch *sysc_ch,
-			    int (*on_off_fn)(struct rcar_sysc_ch *))
-{
-	unsigned int isr_mask = 1 << sysc_ch->isr_bit;
-	unsigned int chan_mask = 1 << sysc_ch->chan_bit;
+	unsigned int isr_mask = BIT(sysc_ch->isr_bit);
+	unsigned int chan_mask = BIT(sysc_ch->chan_bit);
 	unsigned int status;
 	unsigned long flags;
 	int ret = 0;
@@ -78,15 +94,26 @@
 
 	iowrite32(isr_mask, rcar_sysc_base + SYSCISCR);
 
-	do {
-		ret = on_off_fn(sysc_ch);
+	/* Submit power shutoff or resume request until it was accepted */
+	for (k = 0; k < PWRER_RETRIES; k++) {
+		ret = rcar_sysc_pwr_on_off(sysc_ch, on);
 		if (ret)
 			goto out;
 
 		status = ioread32(rcar_sysc_base +
 				  sysc_ch->chan_offs + PWRER_OFFS);
-	} while (status & chan_mask);
+		if (!(status & chan_mask))
+			break;
 
+		udelay(PWRER_DELAY_US);
+	}
+
+	if (k == PWRER_RETRIES) {
+		ret = -EIO;
+		goto out;
+	}
+
+	/* Wait until the power shutoff or resume request has completed * */
 	for (k = 0; k < SYSCISR_RETRIES; k++) {
 		if (ioread32(rcar_sysc_base + SYSCISR) & isr_mask)
 			break;
@@ -106,22 +133,22 @@
 	return ret;
 }
 
-int rcar_sysc_power_down(struct rcar_sysc_ch *sysc_ch)
+int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch)
 {
-	return rcar_sysc_update(sysc_ch, rcar_sysc_pwr_off);
+	return rcar_sysc_power(sysc_ch, false);
 }
 
-int rcar_sysc_power_up(struct rcar_sysc_ch *sysc_ch)
+int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch)
 {
-	return rcar_sysc_update(sysc_ch, rcar_sysc_pwr_on);
+	return rcar_sysc_power(sysc_ch, true);
 }
 
-bool rcar_sysc_power_is_off(struct rcar_sysc_ch *sysc_ch)
+bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch)
 {
 	unsigned int st;
 
 	st = ioread32(rcar_sysc_base + sysc_ch->chan_offs + PWRSR_OFFS);
-	if (st & (1 << sysc_ch->chan_bit))
+	if (st & BIT(sysc_ch->chan_bit))
 		return true;
 
 	return false;
diff --git a/arch/arm/mach-shmobile/pm-rcar.h b/arch/arm/mach-shmobile/pm-rcar.h
index ef3a1ef..1b901db4 100644
--- a/arch/arm/mach-shmobile/pm-rcar.h
+++ b/arch/arm/mach-shmobile/pm-rcar.h
@@ -2,14 +2,14 @@
 #define PM_RCAR_H
 
 struct rcar_sysc_ch {
-	unsigned long chan_offs;
-	unsigned int chan_bit;
-	unsigned int isr_bit;
+	u16 chan_offs;
+	u8 chan_bit;
+	u8 isr_bit;
 };
 
-int rcar_sysc_power_down(struct rcar_sysc_ch *sysc_ch);
-int rcar_sysc_power_up(struct rcar_sysc_ch *sysc_ch);
-bool rcar_sysc_power_is_off(struct rcar_sysc_ch *sysc_ch);
+int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch);
+int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch);
+bool rcar_sysc_power_is_off(const struct rcar_sysc_ch *sysc_ch);
 void __iomem *rcar_sysc_init(phys_addr_t base);
 
 #endif /* PM_RCAR_H */
diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c
index 9501820..a5b96b9 100644
--- a/arch/arm/mach-shmobile/pm-rmobile.c
+++ b/arch/arm/mach-shmobile/pm-rmobile.c
@@ -34,6 +34,12 @@
 #define PSTR_RETRIES	100
 #define PSTR_DELAY_US	10
 
+static inline
+struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d)
+{
+	return container_of(d, struct rmobile_pm_domain, genpd);
+}
+
 static int rmobile_pd_power_down(struct generic_pm_domain *genpd)
 {
 	struct rmobile_pm_domain *rmobile_pd = to_rmobile_pd(genpd);
@@ -42,7 +48,7 @@
 	if (rmobile_pd->bit_shift == ~0)
 		return -EBUSY;
 
-	mask = 1 << rmobile_pd->bit_shift;
+	mask = BIT(rmobile_pd->bit_shift);
 	if (rmobile_pd->suspend) {
 		int ret = rmobile_pd->suspend();
 
@@ -79,7 +85,7 @@
 	if (rmobile_pd->bit_shift == ~0)
 		return 0;
 
-	mask = 1 << rmobile_pd->bit_shift;
+	mask = BIT(rmobile_pd->bit_shift);
 	if (__raw_readl(rmobile_pd->base + PSTR) & mask)
 		goto out;
 
@@ -163,43 +169,6 @@
 	__rmobile_pd_power_up(rmobile_pd, false);
 }
 
-#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
-
-void rmobile_init_domains(struct rmobile_pm_domain domains[], int num)
-{
-	int j;
-
-	for (j = 0; j < num; j++)
-		rmobile_init_pm_domain(&domains[j]);
-}
-
-void rmobile_add_device_to_domain_td(const char *domain_name,
-				     struct platform_device *pdev,
-				     struct gpd_timing_data *td)
-{
-	struct device *dev = &pdev->dev;
-
-	__pm_genpd_name_add_device(domain_name, dev, td);
-}
-
-void rmobile_add_devices_to_domains(struct pm_domain_device data[],
-				    int size)
-{
-	struct gpd_timing_data latencies = {
-		.stop_latency_ns = DEFAULT_DEV_LATENCY_NS,
-		.start_latency_ns = DEFAULT_DEV_LATENCY_NS,
-		.save_state_latency_ns = DEFAULT_DEV_LATENCY_NS,
-		.restore_state_latency_ns = DEFAULT_DEV_LATENCY_NS,
-	};
-	int j;
-
-	for (j = 0; j < size; j++)
-		rmobile_add_device_to_domain_td(data[j].domain_name,
-						data[j].pdev, &latencies);
-}
-
-#else /* !CONFIG_ARCH_SHMOBILE_LEGACY */
-
 static int rmobile_pd_suspend_busy(void)
 {
 	/*
@@ -430,5 +399,3 @@
 }
 
 core_initcall(rmobile_init_pm_domains);
-
-#endif /* !CONFIG_ARCH_SHMOBILE_LEGACY */
diff --git a/arch/arm/mach-shmobile/pm-rmobile.h b/arch/arm/mach-shmobile/pm-rmobile.h
index 5321978..30a4a42 100644
--- a/arch/arm/mach-shmobile/pm-rmobile.h
+++ b/arch/arm/mach-shmobile/pm-rmobile.h
@@ -26,39 +26,9 @@
 	bool no_debug;
 };
 
-static inline
-struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d)
-{
-	return container_of(d, struct rmobile_pm_domain, genpd);
-}
-
 struct pm_domain_device {
 	const char *domain_name;
 	struct platform_device *pdev;
 };
 
-#if defined(CONFIG_PM_RMOBILE) && defined(CONFIG_ARCH_SHMOBILE_LEGACY)
-extern void rmobile_init_domains(struct rmobile_pm_domain domains[], int num);
-extern void rmobile_add_device_to_domain_td(const char *domain_name,
-					    struct platform_device *pdev,
-					    struct gpd_timing_data *td);
-
-static inline void rmobile_add_device_to_domain(const char *domain_name,
-						struct platform_device *pdev)
-{
-	rmobile_add_device_to_domain_td(domain_name, pdev, NULL);
-}
-
-extern void rmobile_add_devices_to_domains(struct pm_domain_device data[],
-					   int size);
-#else
-
-#define rmobile_init_domains(domains, num) do { } while (0)
-#define rmobile_add_device_to_domain_td(name, pdev, td) do { } while (0)
-#define rmobile_add_device_to_domain(name, pdev) do { } while (0)
-
-static inline void rmobile_add_devices_to_domains(struct pm_domain_device d[],
-						  int size) {}
-#endif /* CONFIG_PM_RMOBILE */
-
 #endif /* PM_RMOBILE_H */
diff --git a/arch/arm/mach-shmobile/pm-sh73a0.c b/arch/arm/mach-shmobile/pm-sh73a0.c
deleted file mode 100644
index a7e4668..0000000
--- a/arch/arm/mach-shmobile/pm-sh73a0.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * sh73a0 Power management support
- *
- *  Copyright (C) 2012 Bastian Hecht <hechtb+renesas@gmail.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/suspend.h>
-#include "common.h"
-
-#ifdef CONFIG_SUSPEND
-static int sh73a0_enter_suspend(suspend_state_t suspend_state)
-{
-	cpu_do_idle();
-	return 0;
-}
-
-static void sh73a0_suspend_init(void)
-{
-	shmobile_suspend_ops.enter = sh73a0_enter_suspend;
-}
-#else
-static void sh73a0_suspend_init(void) {}
-#endif
-
-void __init sh73a0_pm_init(void)
-{
-	sh73a0_suspend_init();
-}
diff --git a/arch/arm/mach-shmobile/r8a7740.h b/arch/arm/mach-shmobile/r8a7740.h
deleted file mode 100644
index ca7805a..0000000
--- a/arch/arm/mach-shmobile/r8a7740.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2011  Renesas Solutions Corp.
- * Copyright (C) 2011  Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __ASM_R8A7740_H__
-#define __ASM_R8A7740_H__
-
-/*
- * MD_CKx pin
- */
-#define MD_CK2	(1 << 2)
-#define MD_CK1	(1 << 1)
-#define MD_CK0	(1 << 0)
-
-/* DMA slave IDs */
-enum {
-	SHDMA_SLAVE_INVALID,
-	SHDMA_SLAVE_SDHI0_RX,
-	SHDMA_SLAVE_SDHI0_TX,
-	SHDMA_SLAVE_SDHI1_RX,
-	SHDMA_SLAVE_SDHI1_TX,
-	SHDMA_SLAVE_SDHI2_RX,
-	SHDMA_SLAVE_SDHI2_TX,
-	SHDMA_SLAVE_FSIA_RX,
-	SHDMA_SLAVE_FSIA_TX,
-	SHDMA_SLAVE_FSIB_TX,
-	SHDMA_SLAVE_USBHS_TX,
-	SHDMA_SLAVE_USBHS_RX,
-	SHDMA_SLAVE_MMCIF_TX,
-	SHDMA_SLAVE_MMCIF_RX,
-};
-
-extern void r8a7740_meram_workaround(void);
-extern void r8a7740_init_irq_of(void);
-extern void r8a7740_map_io(void);
-extern void r8a7740_add_early_devices(void);
-extern void r8a7740_add_standard_devices(void);
-extern void r8a7740_clock_init(u8 md_ck);
-extern void r8a7740_pinmux_init(void);
-extern void r8a7740_pm_init(void);
-
-#if defined(CONFIG_PM) && !defined(CONFIG_ARCH_MULTIPLATFORM)
-extern void __init r8a7740_init_pm_domains(void);
-#else
-static inline void r8a7740_init_pm_domains(void) {}
-#endif /* CONFIG_PM && !CONFIG_ARCH_MULTIPLATFORM */
-
-#endif /* __ASM_R8A7740_H__ */
diff --git a/arch/arm/mach-shmobile/r8a7779.h b/arch/arm/mach-shmobile/r8a7779.h
index 19f9704..db303f7 100644
--- a/arch/arm/mach-shmobile/r8a7779.h
+++ b/arch/arm/mach-shmobile/r8a7779.h
@@ -3,26 +3,7 @@
 
 #include <linux/sh_clk.h>
 
-/* HPB-DMA slave IDs */
-enum {
-	HPBDMA_SLAVE_DUMMY,
-	HPBDMA_SLAVE_SDHI0_TX,
-	HPBDMA_SLAVE_SDHI0_RX,
-};
-
-extern void r8a7779_init_irq_extpin(int irlm);
-extern void r8a7779_init_irq_extpin_dt(int irlm);
-extern void r8a7779_init_irq_dt(void);
-extern void r8a7779_map_io(void);
-extern void r8a7779_earlytimer_init(void);
-extern void r8a7779_add_early_devices(void);
-extern void r8a7779_add_standard_devices(void);
-extern void r8a7779_init_late(void);
-extern u32 r8a7779_read_mode_pins(void);
-extern void r8a7779_clock_init(void);
-extern void r8a7779_pinmux_init(void);
 extern void r8a7779_pm_init(void);
-extern void r8a7779_register_twd(void);
 
 #ifdef CONFIG_PM
 extern void __init r8a7779_init_pm_domains(void);
diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
index 384e6e9..62437b5 100644
--- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
@@ -123,7 +123,8 @@
 	u32 mon;
 
 	if (!of_machine_is_compatible("renesas,koelsch") &&
-	    !of_machine_is_compatible("renesas,lager"))
+	    !of_machine_is_compatible("renesas,lager") &&
+	    !of_machine_is_compatible("renesas,gose"))
 		return -ENODEV;
 
 	irqc = ioremap(IRQC_BASE, PAGE_SIZE);
diff --git a/arch/arm/mach-shmobile/setup-r7s72100.c b/arch/arm/mach-shmobile/setup-r7s72100.c
index 1711747..d46639f 100644
--- a/arch/arm/mach-shmobile/setup-r7s72100.c
+++ b/arch/arm/mach-shmobile/setup-r7s72100.c
@@ -20,7 +20,7 @@
 
 #include "common.h"
 
-static const char *r7s72100_boards_compat_dt[] __initdata = {
+static const char *const r7s72100_boards_compat_dt[] __initconst = {
 	"renesas,r7s72100",
 	NULL,
 };
diff --git a/arch/arm/mach-shmobile/setup-r8a73a4.c b/arch/arm/mach-shmobile/setup-r8a73a4.c
index 446cee6..20173c4 100644
--- a/arch/arm/mach-shmobile/setup-r8a73a4.c
+++ b/arch/arm/mach-shmobile/setup-r8a73a4.c
@@ -20,7 +20,7 @@
 
 #include "common.h"
 
-static const char *r8a73a4_boards_compat_dt[] __initdata = {
+static const char *const r8a73a4_boards_compat_dt[] __initconst = {
 	"renesas,r8a73a4",
 	NULL,
 };
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index 00291cc..0c8f80c 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -13,31 +13,19 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-#include <linux/dma-mapping.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/irqchip.h>
 #include <linux/irqchip/arm-gic.h>
-#include <linux/platform_data/irq-renesas-intc-irqpin.h>
-#include <linux/platform_device.h>
 #include <linux/of_platform.h>
-#include <linux/serial_sci.h>
-#include <linux/sh_dma.h>
-#include <linux/sh_timer.h>
-#include <linux/platform_data/sh_ipmmu.h>
 
-#include <asm/mach-types.h>
 #include <asm/mach/map.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
 #include <asm/hardware/cache-l2x0.h>
 
 #include "common.h"
-#include "dma-register.h"
-#include "irqs.h"
-#include "pm-rmobile.h"
-#include "r8a7740.h"
 
 static struct map_desc r8a7740_io_desc[] __initdata = {
 	 /*
@@ -64,613 +52,12 @@
 #endif
 };
 
-void __init r8a7740_map_io(void)
+static void __init r8a7740_map_io(void)
 {
 	debug_ll_io_init();
 	iotable_init(r8a7740_io_desc, ARRAY_SIZE(r8a7740_io_desc));
 }
 
-/* PFC */
-static const struct resource pfc_resources[] = {
-	DEFINE_RES_MEM(0xe6050000, 0x8000),
-	DEFINE_RES_MEM(0xe605800c, 0x0020),
-};
-
-void __init r8a7740_pinmux_init(void)
-{
-	platform_device_register_simple("pfc-r8a7740", -1, pfc_resources,
-					ARRAY_SIZE(pfc_resources));
-}
-
-static struct renesas_intc_irqpin_config irqpin0_platform_data = {
-	.irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
-};
-
-static struct resource irqpin0_resources[] = {
-	DEFINE_RES_MEM(0xe6900000, 4), /* ICR1A */
-	DEFINE_RES_MEM(0xe6900010, 4), /* INTPRI00A */
-	DEFINE_RES_MEM(0xe6900020, 1), /* INTREQ00A */
-	DEFINE_RES_MEM(0xe6900040, 1), /* INTMSK00A */
-	DEFINE_RES_MEM(0xe6900060, 1), /* INTMSKCLR00A */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ0 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ1 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ2 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ3 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ4 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ5 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ6 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ7 */
-};
-
-static struct platform_device irqpin0_device = {
-	.name		= "renesas_intc_irqpin",
-	.id		= 0,
-	.resource	= irqpin0_resources,
-	.num_resources	= ARRAY_SIZE(irqpin0_resources),
-	.dev		= {
-		.platform_data  = &irqpin0_platform_data,
-	},
-};
-
-static struct renesas_intc_irqpin_config irqpin1_platform_data = {
-	.irq_base = irq_pin(8), /* IRQ8 -> IRQ15 */
-};
-
-static struct resource irqpin1_resources[] = {
-	DEFINE_RES_MEM(0xe6900004, 4), /* ICR2A */
-	DEFINE_RES_MEM(0xe6900014, 4), /* INTPRI10A */
-	DEFINE_RES_MEM(0xe6900024, 1), /* INTREQ10A */
-	DEFINE_RES_MEM(0xe6900044, 1), /* INTMSK10A */
-	DEFINE_RES_MEM(0xe6900064, 1), /* INTMSKCLR10A */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ8 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ9 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ10 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ11 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ12 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ13 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ14 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ15 */
-};
-
-static struct platform_device irqpin1_device = {
-	.name		= "renesas_intc_irqpin",
-	.id		= 1,
-	.resource	= irqpin1_resources,
-	.num_resources	= ARRAY_SIZE(irqpin1_resources),
-	.dev		= {
-		.platform_data  = &irqpin1_platform_data,
-	},
-};
-
-static struct renesas_intc_irqpin_config irqpin2_platform_data = {
-	.irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
-};
-
-static struct resource irqpin2_resources[] = {
-	DEFINE_RES_MEM(0xe6900008, 4), /* ICR3A */
-	DEFINE_RES_MEM(0xe6900018, 4), /* INTPRI30A */
-	DEFINE_RES_MEM(0xe6900028, 1), /* INTREQ30A */
-	DEFINE_RES_MEM(0xe6900048, 1), /* INTMSK30A */
-	DEFINE_RES_MEM(0xe6900068, 1), /* INTMSKCLR30A */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ16 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ17 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ18 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ19 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ20 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ21 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ22 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ23 */
-};
-
-static struct platform_device irqpin2_device = {
-	.name		= "renesas_intc_irqpin",
-	.id		= 2,
-	.resource	= irqpin2_resources,
-	.num_resources	= ARRAY_SIZE(irqpin2_resources),
-	.dev		= {
-		.platform_data  = &irqpin2_platform_data,
-	},
-};
-
-static struct renesas_intc_irqpin_config irqpin3_platform_data = {
-	.irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
-};
-
-static struct resource irqpin3_resources[] = {
-	DEFINE_RES_MEM(0xe690000c, 4), /* ICR3A */
-	DEFINE_RES_MEM(0xe690001c, 4), /* INTPRI30A */
-	DEFINE_RES_MEM(0xe690002c, 1), /* INTREQ30A */
-	DEFINE_RES_MEM(0xe690004c, 1), /* INTMSK30A */
-	DEFINE_RES_MEM(0xe690006c, 1), /* INTMSKCLR30A */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ24 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ25 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ26 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ27 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ28 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ29 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ30 */
-	DEFINE_RES_IRQ(gic_spi(149)), /* IRQ31 */
-};
-
-static struct platform_device irqpin3_device = {
-	.name		= "renesas_intc_irqpin",
-	.id		= 3,
-	.resource	= irqpin3_resources,
-	.num_resources	= ARRAY_SIZE(irqpin3_resources),
-	.dev		= {
-		.platform_data  = &irqpin3_platform_data,
-	},
-};
-
-/* SCIF */
-#define R8A7740_SCIF(scif_type, index, baseaddr, irq)		\
-static struct plat_sci_port scif##index##_platform_data = {	\
-	.type		= scif_type,				\
-	.flags		= UPF_BOOT_AUTOCONF,			\
-	.scscr		= SCSCR_RE | SCSCR_TE,			\
-};								\
-								\
-static struct resource scif##index##_resources[] = {		\
-	DEFINE_RES_MEM(baseaddr, 0x100),			\
-	DEFINE_RES_IRQ(irq),					\
-};								\
-								\
-static struct platform_device scif##index##_device = {		\
-	.name		= "sh-sci",				\
-	.id		= index,				\
-	.resource	= scif##index##_resources,		\
-	.num_resources	= ARRAY_SIZE(scif##index##_resources),	\
-	.dev		= {					\
-		.platform_data	= &scif##index##_platform_data,	\
-	},							\
-}
-
-R8A7740_SCIF(PORT_SCIFA, 0, 0xe6c40000, gic_spi(100));
-R8A7740_SCIF(PORT_SCIFA, 1, 0xe6c50000, gic_spi(101));
-R8A7740_SCIF(PORT_SCIFA, 2, 0xe6c60000, gic_spi(102));
-R8A7740_SCIF(PORT_SCIFA, 3, 0xe6c70000, gic_spi(103));
-R8A7740_SCIF(PORT_SCIFA, 4, 0xe6c80000, gic_spi(104));
-R8A7740_SCIF(PORT_SCIFA, 5, 0xe6cb0000, gic_spi(105));
-R8A7740_SCIF(PORT_SCIFA, 6, 0xe6cc0000, gic_spi(106));
-R8A7740_SCIF(PORT_SCIFA, 7, 0xe6cd0000, gic_spi(107));
-R8A7740_SCIF(PORT_SCIFB, 8, 0xe6c30000, gic_spi(108));
-
-/* CMT */
-static struct sh_timer_config cmt1_platform_data = {
-	.channels_mask = 0x3f,
-};
-
-static struct resource cmt1_resources[] = {
-	DEFINE_RES_MEM(0xe6138000, 0x170),
-	DEFINE_RES_IRQ(gic_spi(58)),
-};
-
-static struct platform_device cmt1_device = {
-	.name		= "sh-cmt-48",
-	.id		= 1,
-	.dev = {
-		.platform_data	= &cmt1_platform_data,
-	},
-	.resource	= cmt1_resources,
-	.num_resources	= ARRAY_SIZE(cmt1_resources),
-};
-
-/* TMU */
-static struct sh_timer_config tmu0_platform_data = {
-	.channels_mask = 7,
-};
-
-static struct resource tmu0_resources[] = {
-	DEFINE_RES_MEM(0xfff80000, 0x2c),
-	DEFINE_RES_IRQ(gic_spi(198)),
-	DEFINE_RES_IRQ(gic_spi(199)),
-	DEFINE_RES_IRQ(gic_spi(200)),
-};
-
-static struct platform_device tmu0_device = {
-	.name		= "sh-tmu",
-	.id		= 0,
-	.dev = {
-		.platform_data	= &tmu0_platform_data,
-	},
-	.resource	= tmu0_resources,
-	.num_resources	= ARRAY_SIZE(tmu0_resources),
-};
-
-/* IPMMUI (an IPMMU module for ICB/LMB) */
-static struct resource ipmmu_resources[] = {
-	[0] = {
-		.name	= "IPMMUI",
-		.start	= 0xfe951000,
-		.end	= 0xfe9510ff,
-		.flags	= IORESOURCE_MEM,
-	},
-};
-
-static const char * const ipmmu_dev_names[] = {
-	"sh_mobile_lcdc_fb.0",
-	"sh_mobile_lcdc_fb.1",
-	"sh_mobile_ceu.0",
-};
-
-static struct shmobile_ipmmu_platform_data ipmmu_platform_data = {
-	.dev_names = ipmmu_dev_names,
-	.num_dev_names = ARRAY_SIZE(ipmmu_dev_names),
-};
-
-static struct platform_device ipmmu_device = {
-	.name           = "ipmmu",
-	.id             = -1,
-	.dev = {
-		.platform_data = &ipmmu_platform_data,
-	},
-	.resource       = ipmmu_resources,
-	.num_resources  = ARRAY_SIZE(ipmmu_resources),
-};
-
-static struct platform_device *r8a7740_early_devices[] __initdata = {
-	&scif0_device,
-	&scif1_device,
-	&scif2_device,
-	&scif3_device,
-	&scif4_device,
-	&scif5_device,
-	&scif6_device,
-	&scif7_device,
-	&scif8_device,
-	&irqpin0_device,
-	&irqpin1_device,
-	&irqpin2_device,
-	&irqpin3_device,
-	&tmu0_device,
-	&ipmmu_device,
-	&cmt1_device,
-};
-
-/* DMA */
-static const struct sh_dmae_slave_config r8a7740_dmae_slaves[] = {
-	{
-		.slave_id	= SHDMA_SLAVE_SDHI0_TX,
-		.addr		= 0xe6850030,
-		.chcr		= CHCR_TX(XMIT_SZ_16BIT),
-		.mid_rid	= 0xc1,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SDHI0_RX,
-		.addr		= 0xe6850030,
-		.chcr		= CHCR_RX(XMIT_SZ_16BIT),
-		.mid_rid	= 0xc2,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SDHI1_TX,
-		.addr		= 0xe6860030,
-		.chcr		= CHCR_TX(XMIT_SZ_16BIT),
-		.mid_rid	= 0xc9,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SDHI1_RX,
-		.addr		= 0xe6860030,
-		.chcr		= CHCR_RX(XMIT_SZ_16BIT),
-		.mid_rid	= 0xca,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SDHI2_TX,
-		.addr		= 0xe6870030,
-		.chcr		= CHCR_TX(XMIT_SZ_16BIT),
-		.mid_rid	= 0xcd,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SDHI2_RX,
-		.addr		= 0xe6870030,
-		.chcr		= CHCR_RX(XMIT_SZ_16BIT),
-		.mid_rid	= 0xce,
-	}, {
-		.slave_id	= SHDMA_SLAVE_FSIA_TX,
-		.addr		= 0xfe1f0024,
-		.chcr		= CHCR_TX(XMIT_SZ_32BIT),
-		.mid_rid	= 0xb1,
-	}, {
-		.slave_id	= SHDMA_SLAVE_FSIA_RX,
-		.addr		= 0xfe1f0020,
-		.chcr		= CHCR_RX(XMIT_SZ_32BIT),
-		.mid_rid	= 0xb2,
-	}, {
-		.slave_id	= SHDMA_SLAVE_FSIB_TX,
-		.addr		= 0xfe1f0064,
-		.chcr		= CHCR_TX(XMIT_SZ_32BIT),
-		.mid_rid	= 0xb5,
-	}, {
-		.slave_id	= SHDMA_SLAVE_MMCIF_TX,
-		.addr		= 0xe6bd0034,
-		.chcr		= CHCR_TX(XMIT_SZ_32BIT),
-		.mid_rid	= 0xd1,
-	}, {
-		.slave_id	= SHDMA_SLAVE_MMCIF_RX,
-		.addr		= 0xe6bd0034,
-		.chcr		= CHCR_RX(XMIT_SZ_32BIT),
-		.mid_rid	= 0xd2,
-	},
-};
-
-#define DMA_CHANNEL(a, b, c)			\
-{						\
-	.offset		= a,			\
-	.dmars		= b,			\
-	.dmars_bit	= c,			\
-	.chclr_offset	= (0x220 - 0x20) + a	\
-}
-
-static const struct sh_dmae_channel r8a7740_dmae_channels[] = {
-	DMA_CHANNEL(0x00, 0, 0),
-	DMA_CHANNEL(0x10, 0, 8),
-	DMA_CHANNEL(0x20, 4, 0),
-	DMA_CHANNEL(0x30, 4, 8),
-	DMA_CHANNEL(0x50, 8, 0),
-	DMA_CHANNEL(0x60, 8, 8),
-};
-
-static struct sh_dmae_pdata dma_platform_data = {
-	.slave		= r8a7740_dmae_slaves,
-	.slave_num	= ARRAY_SIZE(r8a7740_dmae_slaves),
-	.channel	= r8a7740_dmae_channels,
-	.channel_num	= ARRAY_SIZE(r8a7740_dmae_channels),
-	.ts_low_shift	= TS_LOW_SHIFT,
-	.ts_low_mask	= TS_LOW_BIT << TS_LOW_SHIFT,
-	.ts_high_shift	= TS_HI_SHIFT,
-	.ts_high_mask	= TS_HI_BIT << TS_HI_SHIFT,
-	.ts_shift	= dma_ts_shift,
-	.ts_shift_num	= ARRAY_SIZE(dma_ts_shift),
-	.dmaor_init	= DMAOR_DME,
-	.chclr_present	= 1,
-};
-
-/* Resource order important! */
-static struct resource r8a7740_dmae0_resources[] = {
-	{
-		/* Channel registers and DMAOR */
-		.start	= 0xfe008020,
-		.end	= 0xfe00828f,
-		.flags	= IORESOURCE_MEM,
-	},
-	{
-		/* DMARSx */
-		.start	= 0xfe009000,
-		.end	= 0xfe00900b,
-		.flags	= IORESOURCE_MEM,
-	},
-	{
-		.name	= "error_irq",
-		.start	= gic_spi(34),
-		.end	= gic_spi(34),
-		.flags	= IORESOURCE_IRQ,
-	},
-	{
-		/* IRQ for channels 0-5 */
-		.start	= gic_spi(28),
-		.end	= gic_spi(33),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-/* Resource order important! */
-static struct resource r8a7740_dmae1_resources[] = {
-	{
-		/* Channel registers and DMAOR */
-		.start	= 0xfe018020,
-		.end	= 0xfe01828f,
-		.flags	= IORESOURCE_MEM,
-	},
-	{
-		/* DMARSx */
-		.start	= 0xfe019000,
-		.end	= 0xfe01900b,
-		.flags	= IORESOURCE_MEM,
-	},
-	{
-		.name	= "error_irq",
-		.start	= gic_spi(41),
-		.end	= gic_spi(41),
-		.flags	= IORESOURCE_IRQ,
-	},
-	{
-		/* IRQ for channels 0-5 */
-		.start	= gic_spi(35),
-		.end	= gic_spi(40),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-/* Resource order important! */
-static struct resource r8a7740_dmae2_resources[] = {
-	{
-		/* Channel registers and DMAOR */
-		.start	= 0xfe028020,
-		.end	= 0xfe02828f,
-		.flags	= IORESOURCE_MEM,
-	},
-	{
-		/* DMARSx */
-		.start	= 0xfe029000,
-		.end	= 0xfe02900b,
-		.flags	= IORESOURCE_MEM,
-	},
-	{
-		.name	= "error_irq",
-		.start	= gic_spi(48),
-		.end	= gic_spi(48),
-		.flags	= IORESOURCE_IRQ,
-	},
-	{
-		/* IRQ for channels 0-5 */
-		.start	= gic_spi(42),
-		.end	= gic_spi(47),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device dma0_device = {
-	.name		= "sh-dma-engine",
-	.id		= 0,
-	.resource	= r8a7740_dmae0_resources,
-	.num_resources	= ARRAY_SIZE(r8a7740_dmae0_resources),
-	.dev		= {
-		.platform_data	= &dma_platform_data,
-	},
-};
-
-static struct platform_device dma1_device = {
-	.name		= "sh-dma-engine",
-	.id		= 1,
-	.resource	= r8a7740_dmae1_resources,
-	.num_resources	= ARRAY_SIZE(r8a7740_dmae1_resources),
-	.dev		= {
-		.platform_data	= &dma_platform_data,
-	},
-};
-
-static struct platform_device dma2_device = {
-	.name		= "sh-dma-engine",
-	.id		= 2,
-	.resource	= r8a7740_dmae2_resources,
-	.num_resources	= ARRAY_SIZE(r8a7740_dmae2_resources),
-	.dev		= {
-		.platform_data	= &dma_platform_data,
-	},
-};
-
-/* USB-DMAC */
-static const struct sh_dmae_channel r8a7740_usb_dma_channels[] = {
-	{
-		.offset = 0,
-	}, {
-		.offset = 0x20,
-	},
-};
-
-static const struct sh_dmae_slave_config r8a7740_usb_dma_slaves[] = {
-	{
-		.slave_id	= SHDMA_SLAVE_USBHS_TX,
-		.chcr		= USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE),
-	}, {
-		.slave_id	= SHDMA_SLAVE_USBHS_RX,
-		.chcr		= USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE),
-	},
-};
-
-static struct sh_dmae_pdata usb_dma_platform_data = {
-	.slave		= r8a7740_usb_dma_slaves,
-	.slave_num	= ARRAY_SIZE(r8a7740_usb_dma_slaves),
-	.channel	= r8a7740_usb_dma_channels,
-	.channel_num	= ARRAY_SIZE(r8a7740_usb_dma_channels),
-	.ts_low_shift	= USBTS_LOW_SHIFT,
-	.ts_low_mask	= USBTS_LOW_BIT << USBTS_LOW_SHIFT,
-	.ts_high_shift	= USBTS_HI_SHIFT,
-	.ts_high_mask	= USBTS_HI_BIT << USBTS_HI_SHIFT,
-	.ts_shift	= dma_usbts_shift,
-	.ts_shift_num	= ARRAY_SIZE(dma_usbts_shift),
-	.dmaor_init	= DMAOR_DME,
-	.chcr_offset	= 0x14,
-	.chcr_ie_bit	= 1 << 5,
-	.dmaor_is_32bit	= 1,
-	.needs_tend_set	= 1,
-	.no_dmars	= 1,
-	.slave_only	= 1,
-};
-
-static struct resource r8a7740_usb_dma_resources[] = {
-	{
-		/* Channel registers and DMAOR */
-		.start	= 0xe68a0020,
-		.end	= 0xe68a0064 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	{
-		/* VCR/SWR/DMICR */
-		.start	= 0xe68a0000,
-		.end	= 0xe68a0014 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	{
-		/* IRQ for channels */
-		.start	= gic_spi(49),
-		.end	= gic_spi(49),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device usb_dma_device = {
-	.name		= "sh-dma-engine",
-	.id		= 3,
-	.resource	= r8a7740_usb_dma_resources,
-	.num_resources	= ARRAY_SIZE(r8a7740_usb_dma_resources),
-	.dev		= {
-		.platform_data	= &usb_dma_platform_data,
-	},
-};
-
-/* I2C */
-static struct resource i2c0_resources[] = {
-	[0] = {
-		.name	= "IIC0",
-		.start	= 0xfff20000,
-		.end	= 0xfff20425 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_spi(201),
-		.end	= gic_spi(204),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct resource i2c1_resources[] = {
-	[0] = {
-		.name	= "IIC1",
-		.start	= 0xe6c20000,
-		.end	= 0xe6c20425 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start  = gic_spi(70), /* IIC1_ALI1 */
-		.end    = gic_spi(73), /* IIC1_DTEI1 */
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device i2c0_device = {
-	.name		= "i2c-sh_mobile",
-	.id		= 0,
-	.resource	= i2c0_resources,
-	.num_resources	= ARRAY_SIZE(i2c0_resources),
-};
-
-static struct platform_device i2c1_device = {
-	.name		= "i2c-sh_mobile",
-	.id		= 1,
-	.resource	= i2c1_resources,
-	.num_resources	= ARRAY_SIZE(i2c1_resources),
-};
-
-static struct resource pmu_resources[] = {
-	[0] = {
-		.start	= gic_spi(83),
-		.end	= gic_spi(83),
-		.flags  = IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device pmu_device = {
-	.name	= "armv7-pmu",
-	.id	= -1,
-	.num_resources = ARRAY_SIZE(pmu_resources),
-	.resource = pmu_resources,
-};
-
-static struct platform_device *r8a7740_late_devices[] __initdata = {
-	&i2c0_device,
-	&i2c1_device,
-	&dma0_device,
-	&dma1_device,
-	&dma2_device,
-	&usb_dma_device,
-	&pmu_device,
-};
-
 /*
  * r8a7740 chip has lasting errata on MERAM buffer.
  * this is work-around for it.
@@ -678,7 +65,7 @@
  *	"Media RAM (MERAM)" on r8a7740 documentation
  */
 #define MEBUFCNTR	0xFE950098
-void __init r8a7740_meram_workaround(void)
+static void __init r8a7740_meram_workaround(void)
 {
 	void __iomem *reg;
 
@@ -689,70 +76,13 @@
 	}
 }
 
-void __init r8a7740_add_standard_devices(void)
-{
-	static struct pm_domain_device domain_devices[] __initdata = {
-		{ "A4R",  &tmu0_device },
-		{ "A4R",  &i2c0_device },
-		{ "A4S",  &irqpin0_device },
-		{ "A4S",  &irqpin1_device },
-		{ "A4S",  &irqpin2_device },
-		{ "A4S",  &irqpin3_device },
-		{ "A3SP", &scif0_device },
-		{ "A3SP", &scif1_device },
-		{ "A3SP", &scif2_device },
-		{ "A3SP", &scif3_device },
-		{ "A3SP", &scif4_device },
-		{ "A3SP", &scif5_device },
-		{ "A3SP", &scif6_device },
-		{ "A3SP", &scif7_device },
-		{ "A3SP", &scif8_device },
-		{ "A3SP", &i2c1_device },
-		{ "A3SP", &ipmmu_device },
-		{ "A3SP", &dma0_device },
-		{ "A3SP", &dma1_device },
-		{ "A3SP", &dma2_device },
-		{ "A3SP", &usb_dma_device },
-	};
-
-	r8a7740_init_pm_domains();
-
-	/* add devices */
-	platform_add_devices(r8a7740_early_devices,
-			    ARRAY_SIZE(r8a7740_early_devices));
-	platform_add_devices(r8a7740_late_devices,
-			     ARRAY_SIZE(r8a7740_late_devices));
-
-	/* add devices to PM domain  */
-	rmobile_add_devices_to_domains(domain_devices,
-				       ARRAY_SIZE(domain_devices));
-}
-
-void __init r8a7740_add_early_devices(void)
-{
-	early_platform_add_devices(r8a7740_early_devices,
-				   ARRAY_SIZE(r8a7740_early_devices));
-
-	/* setup early console here as well */
-	shmobile_setup_console();
-}
-
-#ifdef CONFIG_USE_OF
-
-void __init r8a7740_init_irq_of(void)
+static void __init r8a7740_init_irq_of(void)
 {
 	void __iomem *intc_prio_base = ioremap_nocache(0xe6900010, 0x10);
 	void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10);
 	void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4);
 
-#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
-	void __iomem *gic_dist_base = ioremap_nocache(0xc2800000, 0x1000);
-	void __iomem *gic_cpu_base = ioremap_nocache(0xc2000000, 0x1000);
-
-	gic_init(0, 29, gic_dist_base, gic_cpu_base);
-#else
 	irqchip_init();
-#endif
 
 	/* route signals to GIC */
 	iowrite32(0x0, pfc_inta_ctrl);
@@ -787,7 +117,7 @@
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
-static const char *r8a7740_boards_compat_dt[] __initdata = {
+static const char *const r8a7740_boards_compat_dt[] __initconst = {
 	"renesas,r8a7740",
 	NULL,
 };
@@ -800,5 +130,3 @@
 	.init_late	= shmobile_init_late,
 	.dt_compat	= r8a7740_boards_compat_dt,
 MACHINE_END
-
-#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index c49aa09..b9116c8 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -615,7 +615,7 @@
 	iounmap(base);
 }
 
-static const char *r8a7778_compat_dt[] __initdata = {
+static const char *const r8a7778_compat_dt[] __initconst = {
 	"renesas,r8a7778",
 	NULL,
 };
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index c03e562..6bfa640 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -14,37 +14,17 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
-#include <linux/kernel.h>
+#include <linux/clk/shmobile.h>
+#include <linux/clocksource.h>
 #include <linux/init.h>
-#include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irqchip.h>
 #include <linux/irqchip/arm-gic.h>
-#include <linux/of_platform.h>
-#include <linux/platform_data/dma-rcar-hpbdma.h>
-#include <linux/platform_data/gpio-rcar.h>
-#include <linux/platform_data/irq-renesas-intc-irqpin.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/input.h>
-#include <linux/io.h>
-#include <linux/serial_sci.h>
-#include <linux/sh_timer.h>
-#include <linux/dma-mapping.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/hcd.h>
-#include <linux/usb/ehci_pdriver.h>
-#include <linux/usb/ohci_pdriver.h>
-#include <linux/pm_runtime.h>
 
-#include <asm/mach-types.h>
 #include <asm/mach/arch.h>
-#include <asm/mach/time.h>
 #include <asm/mach/map.h>
-#include <asm/hardware/cache-l2x0.h>
 
 #include "common.h"
-#include "irqs.h"
 #include "r8a7779.h"
 
 static struct map_desc r8a7779_io_desc[] __initdata = {
@@ -64,7 +44,7 @@
 	},
 };
 
-void __init r8a7779_map_io(void)
+static void __init r8a7779_map_io(void)
 {
 	debug_ll_io_init();
 	iotable_init(r8a7779_io_desc, ARRAY_SIZE(r8a7779_io_desc));
@@ -80,652 +60,10 @@
 #define INT2NTSR0 IOMEM(0xfe700060)
 #define INT2NTSR1 IOMEM(0xfe700064)
 
-static struct renesas_intc_irqpin_config irqpin0_platform_data __initdata = {
-	.irq_base = irq_pin(0), /* IRQ0 -> IRQ3 */
-	.sense_bitfield_width = 2,
-};
-
-static struct resource irqpin0_resources[] __initdata = {
-	DEFINE_RES_MEM(0xfe78001c, 4), /* ICR1 */
-	DEFINE_RES_MEM(0xfe780010, 4), /* INTPRI */
-	DEFINE_RES_MEM(0xfe780024, 4), /* INTREQ */
-	DEFINE_RES_MEM(0xfe780044, 4), /* INTMSK0 */
-	DEFINE_RES_MEM(0xfe780064, 4), /* INTMSKCLR0 */
-	DEFINE_RES_IRQ(gic_spi(27)), /* IRQ0 */
-	DEFINE_RES_IRQ(gic_spi(28)), /* IRQ1 */
-	DEFINE_RES_IRQ(gic_spi(29)), /* IRQ2 */
-	DEFINE_RES_IRQ(gic_spi(30)), /* IRQ3 */
-};
-
-void __init r8a7779_init_irq_extpin_dt(int irlm)
+static void __init r8a7779_init_irq_dt(void)
 {
-	void __iomem *icr0 = ioremap_nocache(0xfe780000, PAGE_SIZE);
-	u32 tmp;
-
-	if (!icr0) {
-		pr_warn("r8a7779: unable to setup external irq pin mode\n");
-		return;
-	}
-
-	tmp = ioread32(icr0);
-	if (irlm)
-		tmp |= 1 << 23; /* IRQ0 -> IRQ3 as individual pins */
-	else
-		tmp &= ~(1 << 23); /* IRL mode - not supported */
-	tmp |= (1 << 21); /* LVLMODE = 1 */
-	iowrite32(tmp, icr0);
-	iounmap(icr0);
-}
-
-void __init r8a7779_init_irq_extpin(int irlm)
-{
-	r8a7779_init_irq_extpin_dt(irlm);
-	if (irlm)
-		platform_device_register_resndata(
-			NULL, "renesas_intc_irqpin", -1,
-			irqpin0_resources, ARRAY_SIZE(irqpin0_resources),
-			&irqpin0_platform_data, sizeof(irqpin0_platform_data));
-}
-
-/* PFC/GPIO */
-static struct resource r8a7779_pfc_resources[] = {
-	DEFINE_RES_MEM(0xfffc0000, 0x023c),
-};
-
-static struct platform_device r8a7779_pfc_device = {
-	.name		= "pfc-r8a7779",
-	.id		= -1,
-	.resource	= r8a7779_pfc_resources,
-	.num_resources	= ARRAY_SIZE(r8a7779_pfc_resources),
-};
-
-#define R8A7779_GPIO(idx, npins) \
-static struct resource r8a7779_gpio##idx##_resources[] = {		\
-	DEFINE_RES_MEM(0xffc40000 + (0x1000 * (idx)), 0x002c),		\
-	DEFINE_RES_IRQ(gic_iid(0xad + (idx))),				\
-};									\
-									\
-static struct gpio_rcar_config r8a7779_gpio##idx##_platform_data = {	\
-	.gpio_base	= 32 * (idx),					\
-	.irq_base	= 0,						\
-	.number_of_pins	= npins,					\
-	.pctl_name	= "pfc-r8a7779",				\
-};									\
-									\
-static struct platform_device r8a7779_gpio##idx##_device = {		\
-	.name		= "gpio_rcar",					\
-	.id		= idx,						\
-	.resource	= r8a7779_gpio##idx##_resources,		\
-	.num_resources	= ARRAY_SIZE(r8a7779_gpio##idx##_resources),	\
-	.dev		= {						\
-		.platform_data	= &r8a7779_gpio##idx##_platform_data,	\
-	},								\
-}
-
-R8A7779_GPIO(0, 32);
-R8A7779_GPIO(1, 32);
-R8A7779_GPIO(2, 32);
-R8A7779_GPIO(3, 32);
-R8A7779_GPIO(4, 32);
-R8A7779_GPIO(5, 32);
-R8A7779_GPIO(6, 9);
-
-static struct platform_device *r8a7779_pinctrl_devices[] __initdata = {
-	&r8a7779_pfc_device,
-	&r8a7779_gpio0_device,
-	&r8a7779_gpio1_device,
-	&r8a7779_gpio2_device,
-	&r8a7779_gpio3_device,
-	&r8a7779_gpio4_device,
-	&r8a7779_gpio5_device,
-	&r8a7779_gpio6_device,
-};
-
-void __init r8a7779_pinmux_init(void)
-{
-	platform_add_devices(r8a7779_pinctrl_devices,
-			    ARRAY_SIZE(r8a7779_pinctrl_devices));
-}
-
-/* SCIF */
-#define R8A7779_SCIF(index, baseaddr, irq)			\
-static struct plat_sci_port scif##index##_platform_data = {	\
-	.type		= PORT_SCIF,				\
-	.flags		= UPF_BOOT_AUTOCONF | UPF_IOREMAP,	\
-	.scscr		= SCSCR_RE | SCSCR_TE | SCSCR_CKE1,	\
-};								\
-								\
-static struct resource scif##index##_resources[] = {		\
-	DEFINE_RES_MEM(baseaddr, 0x100),			\
-	DEFINE_RES_IRQ(irq),					\
-};								\
-								\
-static struct platform_device scif##index##_device = {		\
-	.name		= "sh-sci",				\
-	.id		= index,				\
-	.resource	= scif##index##_resources,		\
-	.num_resources	= ARRAY_SIZE(scif##index##_resources),	\
-	.dev		= {					\
-		.platform_data	= &scif##index##_platform_data,	\
-	},							\
-}
-
-R8A7779_SCIF(0, 0xffe40000, gic_iid(0x78));
-R8A7779_SCIF(1, 0xffe41000, gic_iid(0x79));
-R8A7779_SCIF(2, 0xffe42000, gic_iid(0x7a));
-R8A7779_SCIF(3, 0xffe43000, gic_iid(0x7b));
-R8A7779_SCIF(4, 0xffe44000, gic_iid(0x7c));
-R8A7779_SCIF(5, 0xffe45000, gic_iid(0x7d));
-
-/* TMU */
-static struct sh_timer_config tmu0_platform_data = {
-	.channels_mask = 7,
-};
-
-static struct resource tmu0_resources[] = {
-	DEFINE_RES_MEM(0xffd80000, 0x30),
-	DEFINE_RES_IRQ(gic_iid(0x40)),
-	DEFINE_RES_IRQ(gic_iid(0x41)),
-	DEFINE_RES_IRQ(gic_iid(0x42)),
-};
-
-static struct platform_device tmu0_device = {
-	.name		= "sh-tmu",
-	.id		= 0,
-	.dev = {
-		.platform_data	= &tmu0_platform_data,
-	},
-	.resource	= tmu0_resources,
-	.num_resources	= ARRAY_SIZE(tmu0_resources),
-};
-
-/* I2C */
-static struct resource rcar_i2c0_res[] = {
-	{
-		.start  = 0xffc70000,
-		.end    = 0xffc70fff,
-		.flags  = IORESOURCE_MEM,
-	}, {
-		.start  = gic_iid(0x6f),
-		.flags  = IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device i2c0_device = {
-	.name		= "i2c-rcar",
-	.id		= 0,
-	.resource	= rcar_i2c0_res,
-	.num_resources	= ARRAY_SIZE(rcar_i2c0_res),
-};
-
-static struct resource rcar_i2c1_res[] = {
-	{
-		.start  = 0xffc71000,
-		.end    = 0xffc71fff,
-		.flags  = IORESOURCE_MEM,
-	}, {
-		.start  = gic_iid(0x72),
-		.flags  = IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device i2c1_device = {
-	.name		= "i2c-rcar",
-	.id		= 1,
-	.resource	= rcar_i2c1_res,
-	.num_resources	= ARRAY_SIZE(rcar_i2c1_res),
-};
-
-static struct resource rcar_i2c2_res[] = {
-	{
-		.start  = 0xffc72000,
-		.end    = 0xffc72fff,
-		.flags  = IORESOURCE_MEM,
-	}, {
-		.start  = gic_iid(0x70),
-		.flags  = IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device i2c2_device = {
-	.name		= "i2c-rcar",
-	.id		= 2,
-	.resource	= rcar_i2c2_res,
-	.num_resources	= ARRAY_SIZE(rcar_i2c2_res),
-};
-
-static struct resource rcar_i2c3_res[] = {
-	{
-		.start  = 0xffc73000,
-		.end    = 0xffc73fff,
-		.flags  = IORESOURCE_MEM,
-	}, {
-		.start  = gic_iid(0x71),
-		.flags  = IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device i2c3_device = {
-	.name		= "i2c-rcar",
-	.id		= 3,
-	.resource	= rcar_i2c3_res,
-	.num_resources	= ARRAY_SIZE(rcar_i2c3_res),
-};
-
-static struct resource sata_resources[] = {
-	[0] = {
-		.name	= "rcar-sata",
-		.start	= 0xfc600000,
-		.end	= 0xfc601fff,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_iid(0x84),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device sata_device = {
-	.name		= "sata_rcar",
-	.id		= -1,
-	.resource	= sata_resources,
-	.num_resources	= ARRAY_SIZE(sata_resources),
-	.dev		= {
-		.dma_mask		= &sata_device.dev.coherent_dma_mask,
-		.coherent_dma_mask	= DMA_BIT_MASK(32),
-	},
-};
-
-/* USB */
-static struct usb_phy *phy;
-
-static int usb_power_on(struct platform_device *pdev)
-{
-	if (IS_ERR(phy))
-		return PTR_ERR(phy);
-
-	pm_runtime_enable(&pdev->dev);
-	pm_runtime_get_sync(&pdev->dev);
-
-	usb_phy_init(phy);
-
-	return 0;
-}
-
-static void usb_power_off(struct platform_device *pdev)
-{
-	if (IS_ERR(phy))
-		return;
-
-	usb_phy_shutdown(phy);
-
-	pm_runtime_put_sync(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
-}
-
-static int ehci_init_internal_buffer(struct usb_hcd *hcd)
-{
-	/*
-	 * Below are recommended values from the datasheet;
-	 * see [USB :: Setting of EHCI Internal Buffer].
-	 */
-	/* EHCI IP internal buffer setting */
-	iowrite32(0x00ff0040, hcd->regs + 0x0094);
-	/* EHCI IP internal buffer enable */
-	iowrite32(0x00000001, hcd->regs + 0x009C);
-
-	return 0;
-}
-
-static struct usb_ehci_pdata ehcix_pdata = {
-	.power_on	= usb_power_on,
-	.power_off	= usb_power_off,
-	.power_suspend	= usb_power_off,
-	.pre_setup	= ehci_init_internal_buffer,
-};
-
-static struct resource ehci0_resources[] = {
-	[0] = {
-		.start	= 0xffe70000,
-		.end	= 0xffe70400 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_iid(0x4c),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device ehci0_device = {
-	.name	= "ehci-platform",
-	.id	= 0,
-	.dev	= {
-		.dma_mask		= &ehci0_device.dev.coherent_dma_mask,
-		.coherent_dma_mask	= 0xffffffff,
-		.platform_data		= &ehcix_pdata,
-	},
-	.num_resources	= ARRAY_SIZE(ehci0_resources),
-	.resource	= ehci0_resources,
-};
-
-static struct resource ehci1_resources[] = {
-	[0] = {
-		.start	= 0xfff70000,
-		.end	= 0xfff70400 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_iid(0x4d),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device ehci1_device = {
-	.name	= "ehci-platform",
-	.id	= 1,
-	.dev	= {
-		.dma_mask		= &ehci1_device.dev.coherent_dma_mask,
-		.coherent_dma_mask	= 0xffffffff,
-		.platform_data		= &ehcix_pdata,
-	},
-	.num_resources	= ARRAY_SIZE(ehci1_resources),
-	.resource	= ehci1_resources,
-};
-
-static struct usb_ohci_pdata ohcix_pdata = {
-	.power_on	= usb_power_on,
-	.power_off	= usb_power_off,
-	.power_suspend	= usb_power_off,
-};
-
-static struct resource ohci0_resources[] = {
-	[0] = {
-		.start	= 0xffe70400,
-		.end	= 0xffe70800 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_iid(0x4c),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device ohci0_device = {
-	.name	= "ohci-platform",
-	.id	= 0,
-	.dev	= {
-		.dma_mask		= &ohci0_device.dev.coherent_dma_mask,
-		.coherent_dma_mask	= 0xffffffff,
-		.platform_data		= &ohcix_pdata,
-	},
-	.num_resources	= ARRAY_SIZE(ohci0_resources),
-	.resource	= ohci0_resources,
-};
-
-static struct resource ohci1_resources[] = {
-	[0] = {
-		.start	= 0xfff70400,
-		.end	= 0xfff70800 - 1,
-		.flags	= IORESOURCE_MEM,
-	},
-	[1] = {
-		.start	= gic_iid(0x4d),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device ohci1_device = {
-	.name	= "ohci-platform",
-	.id	= 1,
-	.dev	= {
-		.dma_mask		= &ohci1_device.dev.coherent_dma_mask,
-		.coherent_dma_mask	= 0xffffffff,
-		.platform_data		= &ohcix_pdata,
-	},
-	.num_resources	= ARRAY_SIZE(ohci1_resources),
-	.resource	= ohci1_resources,
-};
-
-/* HPB-DMA */
-
-/* Asynchronous mode register bits */
-#define HPB_DMAE_ASYNCMDR_ASMD43_MASK		BIT(23)	/* MMC1 */
-#define HPB_DMAE_ASYNCMDR_ASMD43_SINGLE		BIT(23)	/* MMC1 */
-#define HPB_DMAE_ASYNCMDR_ASMD43_MULTI		0	/* MMC1 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD43_MASK		BIT(22)	/* MMC1 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD43_BURST	BIT(22)	/* MMC1 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD43_NBURST	0	/* MMC1 */
-#define HPB_DMAE_ASYNCMDR_ASMD24_MASK		BIT(21)	/* MMC0 */
-#define HPB_DMAE_ASYNCMDR_ASMD24_SINGLE		BIT(21)	/* MMC0 */
-#define HPB_DMAE_ASYNCMDR_ASMD24_MULTI		0	/* MMC0 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD24_MASK		BIT(20)	/* MMC0 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD24_BURST	BIT(20)	/* MMC0 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD24_NBURST	0	/* MMC0 */
-#define HPB_DMAE_ASYNCMDR_ASMD41_MASK		BIT(19)	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASMD41_SINGLE		BIT(19)	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASMD41_MULTI		0	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD41_MASK		BIT(18)	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD41_BURST	BIT(18)	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD41_NBURST	0	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASMD40_MASK		BIT(17)	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASMD40_SINGLE		BIT(17)	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASMD40_MULTI		0	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD40_MASK		BIT(16)	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD40_BURST	BIT(16)	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD40_NBURST	0	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASMD39_MASK		BIT(15)	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASMD39_SINGLE		BIT(15)	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASMD39_MULTI		0	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD39_MASK		BIT(14)	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD39_BURST	BIT(14)	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD39_NBURST	0	/* SDHI3 */
-#define HPB_DMAE_ASYNCMDR_ASMD27_MASK		BIT(13)	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASMD27_SINGLE		BIT(13)	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASMD27_MULTI		0	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD27_MASK		BIT(12)	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD27_BURST	BIT(12)	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD27_NBURST	0	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASMD26_MASK		BIT(11)	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASMD26_SINGLE		BIT(11)	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASMD26_MULTI		0	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD26_MASK		BIT(10)	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD26_BURST	BIT(10)	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD26_NBURST	0	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASMD25_MASK		BIT(9)	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASMD25_SINGLE		BIT(9)	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASMD25_MULTI		0	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD25_MASK		BIT(8)	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD25_BURST	BIT(8)	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD25_NBURST	0	/* SDHI2 */
-#define HPB_DMAE_ASYNCMDR_ASMD23_MASK		BIT(7)	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASMD23_SINGLE		BIT(7)	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASMD23_MULTI		0	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD23_MASK		BIT(6)	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD23_BURST	BIT(6)	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD23_NBURST	0	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASMD22_MASK		BIT(5)	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASMD22_SINGLE		BIT(5)	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASMD22_MULTI		0	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD22_MASK		BIT(4)	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD22_BURST	BIT(4)	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD22_NBURST	0	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASMD21_MASK		BIT(3)	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASMD21_SINGLE		BIT(3)	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASMD21_MULTI		0	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD21_MASK		BIT(2)	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD21_BURST	BIT(2)	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD21_NBURST	0	/* SDHI0 */
-#define HPB_DMAE_ASYNCMDR_ASMD20_MASK		BIT(1)	/* SDHI1 */
-#define HPB_DMAE_ASYNCMDR_ASMD20_SINGLE		BIT(1)	/* SDHI1 */
-#define HPB_DMAE_ASYNCMDR_ASMD20_MULTI		0	/* SDHI1 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD20_MASK		BIT(0)	/* SDHI1 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD20_BURST	BIT(0)	/* SDHI1 */
-#define HPB_DMAE_ASYNCMDR_ASBTMD20_NBURST	0	/* SDHI1 */
-
-static const struct hpb_dmae_slave_config hpb_dmae_slaves[] = {
-	{
-		.id	= HPBDMA_SLAVE_SDHI0_TX,
-		.addr	= 0xffe4c000 + 0x30,
-		.dcr	= HPB_DMAE_DCR_SPDS_16BIT |
-			  HPB_DMAE_DCR_DMDL |
-			  HPB_DMAE_DCR_DPDS_16BIT,
-		.rstr	= HPB_DMAE_ASYNCRSTR_ASRST21 |
-			  HPB_DMAE_ASYNCRSTR_ASRST22 |
-			  HPB_DMAE_ASYNCRSTR_ASRST23,
-		.mdr	= HPB_DMAE_ASYNCMDR_ASMD21_SINGLE |
-			  HPB_DMAE_ASYNCMDR_ASBTMD21_NBURST,
-		.mdm	= HPB_DMAE_ASYNCMDR_ASMD21_MASK |
-			  HPB_DMAE_ASYNCMDR_ASBTMD21_MASK,
-		.port	= 0x0D0C,
-		.flags	= HPB_DMAE_SET_ASYNC_RESET | HPB_DMAE_SET_ASYNC_MODE,
-		.dma_ch	= 21,
-	}, {
-		.id	= HPBDMA_SLAVE_SDHI0_RX,
-		.addr	= 0xffe4c000 + 0x30,
-		.dcr	= HPB_DMAE_DCR_SMDL |
-			  HPB_DMAE_DCR_SPDS_16BIT |
-			  HPB_DMAE_DCR_DPDS_16BIT,
-		.rstr	= HPB_DMAE_ASYNCRSTR_ASRST21 |
-			  HPB_DMAE_ASYNCRSTR_ASRST22 |
-			  HPB_DMAE_ASYNCRSTR_ASRST23,
-		.mdr	= HPB_DMAE_ASYNCMDR_ASMD22_SINGLE |
-			  HPB_DMAE_ASYNCMDR_ASBTMD22_NBURST,
-		.mdm	= HPB_DMAE_ASYNCMDR_ASMD22_MASK |
-			  HPB_DMAE_ASYNCMDR_ASBTMD22_MASK,
-		.port	= 0x0D0C,
-		.flags	= HPB_DMAE_SET_ASYNC_RESET | HPB_DMAE_SET_ASYNC_MODE,
-		.dma_ch	= 22,
-	},
-};
-
-static const struct hpb_dmae_channel hpb_dmae_channels[] = {
-	HPB_DMAE_CHANNEL(0x93, HPBDMA_SLAVE_SDHI0_TX), /* ch. 21 */
-	HPB_DMAE_CHANNEL(0x93, HPBDMA_SLAVE_SDHI0_RX), /* ch. 22 */
-};
-
-static struct hpb_dmae_pdata dma_platform_data __initdata = {
-	.slaves			= hpb_dmae_slaves,
-	.num_slaves		= ARRAY_SIZE(hpb_dmae_slaves),
-	.channels		= hpb_dmae_channels,
-	.num_channels		= ARRAY_SIZE(hpb_dmae_channels),
-	.ts_shift		= {
-		[XMIT_SZ_8BIT]	= 0,
-		[XMIT_SZ_16BIT]	= 1,
-		[XMIT_SZ_32BIT]	= 2,
-	},
-	.num_hw_channels	= 44,
-};
-
-static struct resource hpb_dmae_resources[] __initdata = {
-	/* Channel registers */
-	DEFINE_RES_MEM(0xffc08000, 0x1000),
-	/* Common registers */
-	DEFINE_RES_MEM(0xffc09000, 0x170),
-	/* Asynchronous reset registers */
-	DEFINE_RES_MEM(0xffc00300, 4),
-	/* Asynchronous mode registers */
-	DEFINE_RES_MEM(0xffc00400, 4),
-	/* IRQ for DMA channels */
-	DEFINE_RES_NAMED(gic_iid(0x8e), 12, NULL, IORESOURCE_IRQ),
-};
-
-static void __init r8a7779_register_hpb_dmae(void)
-{
-	platform_device_register_resndata(NULL, "hpb-dma-engine",
-					  -1, hpb_dmae_resources,
-					  ARRAY_SIZE(hpb_dmae_resources),
-					  &dma_platform_data,
-					  sizeof(dma_platform_data));
-}
-
-static struct platform_device *r8a7779_early_devices[] __initdata = {
-	&tmu0_device,
-};
-
-static struct platform_device *r8a7779_standard_devices[] __initdata = {
-	&scif0_device,
-	&scif1_device,
-	&scif2_device,
-	&scif3_device,
-	&scif4_device,
-	&scif5_device,
-	&i2c0_device,
-	&i2c1_device,
-	&i2c2_device,
-	&i2c3_device,
-	&sata_device,
-};
-
-void __init r8a7779_add_standard_devices(void)
-{
-#ifdef CONFIG_CACHE_L2X0
-	/* Shared attribute override enable, 64K*16way */
-	l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
-#endif
-	r8a7779_pm_init();
-
-	r8a7779_init_pm_domains();
-
-	platform_add_devices(r8a7779_early_devices,
-			    ARRAY_SIZE(r8a7779_early_devices));
-	platform_add_devices(r8a7779_standard_devices,
-			    ARRAY_SIZE(r8a7779_standard_devices));
-	r8a7779_register_hpb_dmae();
-}
-
-void __init r8a7779_add_early_devices(void)
-{
-	early_platform_add_devices(r8a7779_early_devices,
-				   ARRAY_SIZE(r8a7779_early_devices));
-
-	/* Early serial console setup is not included here due to
-	 * memory map collisions. The SCIF serial ports in r8a7779
-	 * are difficult to identity map 1:1 due to collision with the
-	 * virtual memory range used by the coherent DMA code on ARM.
-	 *
-	 * Anyone wanting to debug early can remove UPF_IOREMAP from
-	 * the sh-sci serial console platform data, adjust mapbase
-	 * to a static M:N virt:phys mapping that needs to be added to
-	 * the mappings passed with iotable_init() above.
-	 *
-	 * Then add a call to shmobile_setup_console() from this function.
-	 *
-	 * As a final step pass earlyprint=sh-sci.2,115200 on the kernel
-	 * command line in case of the marzen board.
-	 */
-}
-
-static struct platform_device *r8a7779_late_devices[] __initdata = {
-	&ehci0_device,
-	&ehci1_device,
-	&ohci0_device,
-	&ohci1_device,
-};
-
-void __init r8a7779_init_late(void)
-{
-	/* get USB PHY */
-	phy = usb_get_phy(USB_PHY_TYPE_USB2);
-
-	shmobile_init_late();
-	platform_add_devices(r8a7779_late_devices,
-			     ARRAY_SIZE(r8a7779_late_devices));
-}
-
-#ifdef CONFIG_USE_OF
-void __init r8a7779_init_irq_dt(void)
-{
-#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
-	void __iomem *gic_dist_base = ioremap_nocache(0xf0001000, 0x1000);
-	void __iomem *gic_cpu_base = ioremap_nocache(0xf0000100, 0x1000);
-#endif
-	gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE);
-
-#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
-	gic_init(0, 29, gic_dist_base, gic_cpu_base);
-#else
 	irqchip_init();
-#endif
+
 	/* route all interrupts to ARM */
 	__raw_writel(0xffffffff, INT2NTSR0);
 	__raw_writel(0x3fffffff, INT2NTSR1);
@@ -740,7 +78,7 @@
 
 #define MODEMR		0xffcc0020
 
-u32 __init r8a7779_read_mode_pins(void)
+static u32 __init r8a7779_read_mode_pins(void)
 {
 	static u32 mode;
 	static bool mode_valid;
@@ -756,16 +94,23 @@
 	return mode;
 }
 
-static const char *r8a7779_compat_dt[] __initdata = {
+static void __init r8a7779_init_time(void)
+{
+	r8a7779_clocks_init(r8a7779_read_mode_pins());
+	clocksource_of_init();
+}
+
+static const char *const r8a7779_compat_dt[] __initconst = {
 	"renesas,r8a7779",
 	NULL,
 };
 
 DT_MACHINE_START(R8A7779_DT, "Generic R8A7779 (Flattened Device Tree)")
+	.smp		= smp_ops(r8a7779_smp_ops),
 	.map_io		= r8a7779_map_io,
 	.init_early	= shmobile_init_delay,
+	.init_time	= r8a7779_init_time,
 	.init_irq	= r8a7779_init_irq_dt,
 	.init_late	= shmobile_init_late,
 	.dt_compat	= r8a7779_compat_dt,
 MACHINE_END
-#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/setup-r8a7791.c b/arch/arm/mach-shmobile/setup-r8a7791.c
index ef8eb3a..3b8dbaf 100644
--- a/arch/arm/mach-shmobile/setup-r8a7791.c
+++ b/arch/arm/mach-shmobile/setup-r8a7791.c
@@ -23,7 +23,7 @@
 #include "r8a7791.h"
 #include "rcar-gen2.h"
 
-static const char *r8a7791_boards_compat_dt[] __initdata = {
+static const char *const r8a7791_boards_compat_dt[] __initconst = {
 	"renesas,r8a7791",
 	NULL,
 };
diff --git a/arch/arm/mach-shmobile/setup-r8a7793.c b/arch/arm/mach-shmobile/setup-r8a7793.c
new file mode 100644
index 0000000..1d2825c
--- /dev/null
+++ b/arch/arm/mach-shmobile/setup-r8a7793.c
@@ -0,0 +1,33 @@
+/*
+ * r8a7793 processor support
+ *
+ * Copyright (C) 2015  Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <asm/mach/arch.h>
+
+#include "common.h"
+#include "rcar-gen2.h"
+
+static const char *r8a7793_boards_compat_dt[] __initconst = {
+	"renesas,r8a7793",
+	NULL,
+};
+
+DT_MACHINE_START(R8A7793_DT, "Generic R8A7793 (Flattened Device Tree)")
+	.init_early	= shmobile_init_delay,
+	.init_time	= rcar_gen2_timer_init,
+	.init_late	= shmobile_init_late,
+	.reserve	= rcar_gen2_reserve,
+	.dt_compat	= r8a7793_boards_compat_dt,
+MACHINE_END
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index 5d13595..aa33392 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -128,9 +128,7 @@
 #endif /* CONFIG_ARM_ARCH_TIMER */
 
 	rcar_gen2_clocks_init(mode);
-#ifdef CONFIG_ARCH_SHMOBILE_MULTI
 	clocksource_of_init();
-#endif
 }
 
 struct memory_reserve_config {
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index fb2ab75..99a2004 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -18,28 +18,17 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
-#include <linux/platform_device.h>
 #include <linux/of_platform.h>
 #include <linux/delay.h>
 #include <linux/input.h>
-#include <linux/i2c/i2c-sh_mobile.h>
 #include <linux/io.h>
-#include <linux/serial_sci.h>
-#include <linux/sh_dma.h>
-#include <linux/sh_timer.h>
-#include <linux/platform_data/sh_ipmmu.h>
-#include <linux/platform_data/irq-renesas-intc-irqpin.h>
 
 #include <asm/hardware/cache-l2x0.h>
-#include <asm/mach-types.h>
 #include <asm/mach/map.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
 
 #include "common.h"
-#include "dma-register.h"
-#include "intc.h"
-#include "irqs.h"
 #include "sh73a0.h"
 
 static struct map_desc sh73a0_io_desc[] __initdata = {
@@ -54,737 +43,12 @@
 	},
 };
 
-void __init sh73a0_map_io(void)
+static void __init sh73a0_map_io(void)
 {
 	debug_ll_io_init();
 	iotable_init(sh73a0_io_desc, ARRAY_SIZE(sh73a0_io_desc));
 }
 
-/* PFC */
-static struct resource pfc_resources[] __initdata = {
-	DEFINE_RES_MEM(0xe6050000, 0x8000),
-	DEFINE_RES_MEM(0xe605801c, 0x000c),
-};
-
-void __init sh73a0_pinmux_init(void)
-{
-	platform_device_register_simple("pfc-sh73a0", -1, pfc_resources,
-					ARRAY_SIZE(pfc_resources));
-}
-
-/* SCIF */
-#define SH73A0_SCIF(scif_type, index, baseaddr, irq)		\
-static struct plat_sci_port scif##index##_platform_data = {	\
-	.type		= scif_type,				\
-	.flags		= UPF_BOOT_AUTOCONF,			\
-	.scscr		= SCSCR_RE | SCSCR_TE,			\
-};								\
-								\
-static struct resource scif##index##_resources[] = {		\
-	DEFINE_RES_MEM(baseaddr, 0x100),			\
-	DEFINE_RES_IRQ(irq),					\
-};								\
-								\
-static struct platform_device scif##index##_device = {		\
-	.name		= "sh-sci",				\
-	.id		= index,				\
-	.resource	= scif##index##_resources,		\
-	.num_resources	= ARRAY_SIZE(scif##index##_resources),	\
-	.dev		= {					\
-		.platform_data	= &scif##index##_platform_data,	\
-	},							\
-}
-
-SH73A0_SCIF(PORT_SCIFA, 0, 0xe6c40000, gic_spi(72));
-SH73A0_SCIF(PORT_SCIFA, 1, 0xe6c50000, gic_spi(73));
-SH73A0_SCIF(PORT_SCIFA, 2, 0xe6c60000, gic_spi(74));
-SH73A0_SCIF(PORT_SCIFA, 3, 0xe6c70000, gic_spi(75));
-SH73A0_SCIF(PORT_SCIFA, 4, 0xe6c80000, gic_spi(78));
-SH73A0_SCIF(PORT_SCIFA, 5, 0xe6cb0000, gic_spi(79));
-SH73A0_SCIF(PORT_SCIFA, 6, 0xe6cc0000, gic_spi(156));
-SH73A0_SCIF(PORT_SCIFA, 7, 0xe6cd0000, gic_spi(143));
-SH73A0_SCIF(PORT_SCIFB, 8, 0xe6c30000, gic_spi(80));
-
-static struct sh_timer_config cmt1_platform_data = {
-	.channels_mask = 0x3f,
-};
-
-static struct resource cmt1_resources[] = {
-	DEFINE_RES_MEM(0xe6138000, 0x200),
-	DEFINE_RES_IRQ(gic_spi(65)),
-};
-
-static struct platform_device cmt1_device = {
-	.name		= "sh-cmt-48",
-	.id		= 1,
-	.dev = {
-		.platform_data	= &cmt1_platform_data,
-	},
-	.resource	= cmt1_resources,
-	.num_resources	= ARRAY_SIZE(cmt1_resources),
-};
-
-/* TMU */
-static struct sh_timer_config tmu0_platform_data = {
-	.channels_mask = 7,
-};
-
-static struct resource tmu0_resources[] = {
-	DEFINE_RES_MEM(0xfff60000, 0x2c),
-	DEFINE_RES_IRQ(intcs_evt2irq(0xe80)),
-	DEFINE_RES_IRQ(intcs_evt2irq(0xea0)),
-	DEFINE_RES_IRQ(intcs_evt2irq(0xec0)),
-};
-
-static struct platform_device tmu0_device = {
-	.name		= "sh-tmu",
-	.id		= 0,
-	.dev = {
-		.platform_data	= &tmu0_platform_data,
-	},
-	.resource	= tmu0_resources,
-	.num_resources	= ARRAY_SIZE(tmu0_resources),
-};
-
-static struct resource i2c0_resources[] = {
-	[0] = DEFINE_RES_MEM(0xe6820000, 0x426),
-	[1] = {
-		.start	= gic_spi(167),
-		.end	= gic_spi(170),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct resource i2c1_resources[] = {
-	[0] = DEFINE_RES_MEM(0xe6822000, 0x426),
-	[1] = {
-		.start	= gic_spi(51),
-		.end	= gic_spi(54),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct resource i2c2_resources[] = {
-	[0] = DEFINE_RES_MEM(0xe6824000, 0x426),
-	[1] = {
-		.start	= gic_spi(171),
-		.end	= gic_spi(174),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct resource i2c3_resources[] = {
-	[0] = DEFINE_RES_MEM(0xe6826000, 0x426),
-	[1] = {
-		.start	= gic_spi(183),
-		.end	= gic_spi(186),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct resource i2c4_resources[] = {
-	[0] = DEFINE_RES_MEM(0xe6828000, 0x426),
-	[1] = {
-		.start	= gic_spi(187),
-		.end	= gic_spi(190),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct i2c_sh_mobile_platform_data i2c_platform_data = {
-	.clks_per_count	= 2,
-};
-
-static struct platform_device i2c0_device = {
-	.name		= "i2c-sh_mobile",
-	.id		= 0,
-	.resource	= i2c0_resources,
-	.num_resources	= ARRAY_SIZE(i2c0_resources),
-	.dev		= {
-		.platform_data	= &i2c_platform_data,
-	},
-};
-
-static struct platform_device i2c1_device = {
-	.name		= "i2c-sh_mobile",
-	.id		= 1,
-	.resource	= i2c1_resources,
-	.num_resources	= ARRAY_SIZE(i2c1_resources),
-	.dev		= {
-		.platform_data	= &i2c_platform_data,
-	},
-};
-
-static struct platform_device i2c2_device = {
-	.name		= "i2c-sh_mobile",
-	.id		= 2,
-	.resource	= i2c2_resources,
-	.num_resources	= ARRAY_SIZE(i2c2_resources),
-	.dev		= {
-		.platform_data	= &i2c_platform_data,
-	},
-};
-
-static struct platform_device i2c3_device = {
-	.name		= "i2c-sh_mobile",
-	.id		= 3,
-	.resource	= i2c3_resources,
-	.num_resources	= ARRAY_SIZE(i2c3_resources),
-	.dev		= {
-		.platform_data	= &i2c_platform_data,
-	},
-};
-
-static struct platform_device i2c4_device = {
-	.name		= "i2c-sh_mobile",
-	.id		= 4,
-	.resource	= i2c4_resources,
-	.num_resources	= ARRAY_SIZE(i2c4_resources),
-	.dev		= {
-		.platform_data	= &i2c_platform_data,
-	},
-};
-
-static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = {
-	{
-		.slave_id	= SHDMA_SLAVE_SCIF0_TX,
-		.addr		= 0xe6c40020,
-		.chcr		= CHCR_TX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x21,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF0_RX,
-		.addr		= 0xe6c40024,
-		.chcr		= CHCR_RX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x22,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF1_TX,
-		.addr		= 0xe6c50020,
-		.chcr		= CHCR_TX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x25,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF1_RX,
-		.addr		= 0xe6c50024,
-		.chcr		= CHCR_RX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x26,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF2_TX,
-		.addr		= 0xe6c60020,
-		.chcr		= CHCR_TX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x29,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF2_RX,
-		.addr		= 0xe6c60024,
-		.chcr		= CHCR_RX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x2a,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF3_TX,
-		.addr		= 0xe6c70020,
-		.chcr		= CHCR_TX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x2d,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF3_RX,
-		.addr		= 0xe6c70024,
-		.chcr		= CHCR_RX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x2e,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF4_TX,
-		.addr		= 0xe6c80020,
-		.chcr		= CHCR_TX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x39,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF4_RX,
-		.addr		= 0xe6c80024,
-		.chcr		= CHCR_RX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x3a,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF5_TX,
-		.addr		= 0xe6cb0020,
-		.chcr		= CHCR_TX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x35,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF5_RX,
-		.addr		= 0xe6cb0024,
-		.chcr		= CHCR_RX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x36,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF6_TX,
-		.addr		= 0xe6cc0020,
-		.chcr		= CHCR_TX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x1d,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF6_RX,
-		.addr		= 0xe6cc0024,
-		.chcr		= CHCR_RX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x1e,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF7_TX,
-		.addr		= 0xe6cd0020,
-		.chcr		= CHCR_TX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x19,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF7_RX,
-		.addr		= 0xe6cd0024,
-		.chcr		= CHCR_RX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x1a,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF8_TX,
-		.addr		= 0xe6c30040,
-		.chcr		= CHCR_TX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x3d,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SCIF8_RX,
-		.addr		= 0xe6c30060,
-		.chcr		= CHCR_RX(XMIT_SZ_8BIT),
-		.mid_rid	= 0x3e,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SDHI0_TX,
-		.addr		= 0xee100030,
-		.chcr		= CHCR_TX(XMIT_SZ_16BIT),
-		.mid_rid	= 0xc1,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SDHI0_RX,
-		.addr		= 0xee100030,
-		.chcr		= CHCR_RX(XMIT_SZ_16BIT),
-		.mid_rid	= 0xc2,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SDHI1_TX,
-		.addr		= 0xee120030,
-		.chcr		= CHCR_TX(XMIT_SZ_16BIT),
-		.mid_rid	= 0xc9,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SDHI1_RX,
-		.addr		= 0xee120030,
-		.chcr		= CHCR_RX(XMIT_SZ_16BIT),
-		.mid_rid	= 0xca,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SDHI2_TX,
-		.addr		= 0xee140030,
-		.chcr		= CHCR_TX(XMIT_SZ_16BIT),
-		.mid_rid	= 0xcd,
-	}, {
-		.slave_id	= SHDMA_SLAVE_SDHI2_RX,
-		.addr		= 0xee140030,
-		.chcr		= CHCR_RX(XMIT_SZ_16BIT),
-		.mid_rid	= 0xce,
-	}, {
-		.slave_id	= SHDMA_SLAVE_MMCIF_TX,
-		.addr		= 0xe6bd0034,
-		.chcr		= CHCR_TX(XMIT_SZ_32BIT),
-		.mid_rid	= 0xd1,
-	}, {
-		.slave_id	= SHDMA_SLAVE_MMCIF_RX,
-		.addr		= 0xe6bd0034,
-		.chcr		= CHCR_RX(XMIT_SZ_32BIT),
-		.mid_rid	= 0xd2,
-	},
-};
-
-#define DMAE_CHANNEL(_offset)					\
-	{							\
-		.offset         = _offset - 0x20,		\
-		.dmars          = _offset - 0x20 + 0x40,	\
-	}
-
-static const struct sh_dmae_channel sh73a0_dmae_channels[] = {
-	DMAE_CHANNEL(0x8000),
-	DMAE_CHANNEL(0x8080),
-	DMAE_CHANNEL(0x8100),
-	DMAE_CHANNEL(0x8180),
-	DMAE_CHANNEL(0x8200),
-	DMAE_CHANNEL(0x8280),
-	DMAE_CHANNEL(0x8300),
-	DMAE_CHANNEL(0x8380),
-	DMAE_CHANNEL(0x8400),
-	DMAE_CHANNEL(0x8480),
-	DMAE_CHANNEL(0x8500),
-	DMAE_CHANNEL(0x8580),
-	DMAE_CHANNEL(0x8600),
-	DMAE_CHANNEL(0x8680),
-	DMAE_CHANNEL(0x8700),
-	DMAE_CHANNEL(0x8780),
-	DMAE_CHANNEL(0x8800),
-	DMAE_CHANNEL(0x8880),
-	DMAE_CHANNEL(0x8900),
-	DMAE_CHANNEL(0x8980),
-};
-
-static struct sh_dmae_pdata sh73a0_dmae_platform_data = {
-	.slave          = sh73a0_dmae_slaves,
-	.slave_num      = ARRAY_SIZE(sh73a0_dmae_slaves),
-	.channel        = sh73a0_dmae_channels,
-	.channel_num    = ARRAY_SIZE(sh73a0_dmae_channels),
-	.ts_low_shift   = TS_LOW_SHIFT,
-	.ts_low_mask    = TS_LOW_BIT << TS_LOW_SHIFT,
-	.ts_high_shift  = TS_HI_SHIFT,
-	.ts_high_mask   = TS_HI_BIT << TS_HI_SHIFT,
-	.ts_shift       = dma_ts_shift,
-	.ts_shift_num   = ARRAY_SIZE(dma_ts_shift),
-	.dmaor_init     = DMAOR_DME,
-};
-
-static struct resource sh73a0_dmae_resources[] = {
-	DEFINE_RES_MEM(0xfe000020, 0x89e0),
-	{
-		.name	= "error_irq",
-		.start  = gic_spi(129),
-		.end    = gic_spi(129),
-		.flags  = IORESOURCE_IRQ,
-	},
-	{
-		/* IRQ for channels 0-19 */
-		.start  = gic_spi(109),
-		.end    = gic_spi(128),
-		.flags  = IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device dma0_device = {
-	.name		= "sh-dma-engine",
-	.id		= 0,
-	.resource	= sh73a0_dmae_resources,
-	.num_resources	= ARRAY_SIZE(sh73a0_dmae_resources),
-	.dev		= {
-		.platform_data	= &sh73a0_dmae_platform_data,
-	},
-};
-
-/* MPDMAC */
-static const struct sh_dmae_slave_config sh73a0_mpdma_slaves[] = {
-	{
-		.slave_id	= SHDMA_SLAVE_FSI2A_RX,
-		.addr		= 0xec230020,
-		.chcr		= CHCR_RX(XMIT_SZ_32BIT),
-		.mid_rid	= 0xd6, /* CHECK ME */
-	}, {
-		.slave_id	= SHDMA_SLAVE_FSI2A_TX,
-		.addr		= 0xec230024,
-		.chcr		= CHCR_TX(XMIT_SZ_32BIT),
-		.mid_rid	= 0xd5, /* CHECK ME */
-	}, {
-		.slave_id	= SHDMA_SLAVE_FSI2C_RX,
-		.addr		= 0xec230060,
-		.chcr		= CHCR_RX(XMIT_SZ_32BIT),
-		.mid_rid	= 0xda, /* CHECK ME */
-	}, {
-		.slave_id	= SHDMA_SLAVE_FSI2C_TX,
-		.addr		= 0xec230064,
-		.chcr		= CHCR_TX(XMIT_SZ_32BIT),
-		.mid_rid	= 0xd9, /* CHECK ME */
-	}, {
-		.slave_id	= SHDMA_SLAVE_FSI2B_RX,
-		.addr		= 0xec240020,
-		.chcr		= CHCR_RX(XMIT_SZ_32BIT),
-		.mid_rid	= 0x8e, /* CHECK ME */
-	}, {
-		.slave_id	= SHDMA_SLAVE_FSI2B_TX,
-		.addr		= 0xec240024,
-		.chcr		= CHCR_RX(XMIT_SZ_32BIT),
-		.mid_rid	= 0x8d, /* CHECK ME */
-	}, {
-		.slave_id	= SHDMA_SLAVE_FSI2D_RX,
-		.addr		=  0xec240060,
-		.chcr		= CHCR_RX(XMIT_SZ_32BIT),
-		.mid_rid	= 0x9a, /* CHECK ME */
-	},
-};
-
-#define MPDMA_CHANNEL(a, b, c)			\
-{						\
-	.offset		= a,			\
-	.dmars		= b,			\
-	.dmars_bit	= c,			\
-	.chclr_offset	= (0x220 - 0x20) + a	\
-}
-
-static const struct sh_dmae_channel sh73a0_mpdma_channels[] = {
-	MPDMA_CHANNEL(0x00, 0, 0),
-	MPDMA_CHANNEL(0x10, 0, 8),
-	MPDMA_CHANNEL(0x20, 4, 0),
-	MPDMA_CHANNEL(0x30, 4, 8),
-	MPDMA_CHANNEL(0x50, 8, 0),
-	MPDMA_CHANNEL(0x70, 8, 8),
-};
-
-static struct sh_dmae_pdata sh73a0_mpdma_platform_data = {
-	.slave		= sh73a0_mpdma_slaves,
-	.slave_num	= ARRAY_SIZE(sh73a0_mpdma_slaves),
-	.channel	= sh73a0_mpdma_channels,
-	.channel_num	= ARRAY_SIZE(sh73a0_mpdma_channels),
-	.ts_low_shift	= TS_LOW_SHIFT,
-	.ts_low_mask	= TS_LOW_BIT << TS_LOW_SHIFT,
-	.ts_high_shift	= TS_HI_SHIFT,
-	.ts_high_mask	= TS_HI_BIT << TS_HI_SHIFT,
-	.ts_shift	= dma_ts_shift,
-	.ts_shift_num	= ARRAY_SIZE(dma_ts_shift),
-	.dmaor_init	= DMAOR_DME,
-	.chclr_present	= 1,
-};
-
-/* Resource order important! */
-static struct resource sh73a0_mpdma_resources[] = {
-	/* Channel registers and DMAOR */
-	DEFINE_RES_MEM(0xec618020, 0x270),
-	/* DMARSx */
-	DEFINE_RES_MEM(0xec619000, 0xc),
-	{
-		.name	= "error_irq",
-		.start	= gic_spi(181),
-		.end	= gic_spi(181),
-		.flags	= IORESOURCE_IRQ,
-	},
-	{
-		/* IRQ for channels 0-5 */
-		.start	= gic_spi(175),
-		.end	= gic_spi(180),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device mpdma0_device = {
-	.name		= "sh-dma-engine",
-	.id		= 1,
-	.resource	= sh73a0_mpdma_resources,
-	.num_resources	= ARRAY_SIZE(sh73a0_mpdma_resources),
-	.dev		= {
-		.platform_data	= &sh73a0_mpdma_platform_data,
-	},
-};
-
-static struct resource pmu_resources[] = {
-	[0] = {
-		.start	= gic_spi(55),
-		.end	= gic_spi(55),
-		.flags	= IORESOURCE_IRQ,
-	},
-	[1] = {
-		.start	= gic_spi(56),
-		.end	= gic_spi(56),
-		.flags	= IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device pmu_device = {
-	.name		= "armv7-pmu",
-	.id		= -1,
-	.num_resources	= ARRAY_SIZE(pmu_resources),
-	.resource	= pmu_resources,
-};
-
-/* an IPMMU module for ICB */
-static struct resource ipmmu_resources[] = {
-	DEFINE_RES_MEM(0xfe951000, 0x100),
-};
-
-static const char * const ipmmu_dev_names[] = {
-	"sh_mobile_lcdc_fb.0",
-};
-
-static struct shmobile_ipmmu_platform_data ipmmu_platform_data = {
-	.dev_names = ipmmu_dev_names,
-	.num_dev_names = ARRAY_SIZE(ipmmu_dev_names),
-};
-
-static struct platform_device ipmmu_device = {
-	.name           = "ipmmu",
-	.id             = -1,
-	.dev = {
-		.platform_data = &ipmmu_platform_data,
-	},
-	.resource       = ipmmu_resources,
-	.num_resources  = ARRAY_SIZE(ipmmu_resources),
-};
-
-static struct renesas_intc_irqpin_config irqpin0_platform_data = {
-	.irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
-	.control_parent = true,
-};
-
-static struct resource irqpin0_resources[] = {
-	DEFINE_RES_MEM(0xe6900000, 4), /* ICR1A */
-	DEFINE_RES_MEM(0xe6900010, 4), /* INTPRI00A */
-	DEFINE_RES_MEM(0xe6900020, 1), /* INTREQ00A */
-	DEFINE_RES_MEM(0xe6900040, 1), /* INTMSK00A */
-	DEFINE_RES_MEM(0xe6900060, 1), /* INTMSKCLR00A */
-	DEFINE_RES_IRQ(gic_spi(1)), /* IRQ0 */
-	DEFINE_RES_IRQ(gic_spi(2)), /* IRQ1 */
-	DEFINE_RES_IRQ(gic_spi(3)), /* IRQ2 */
-	DEFINE_RES_IRQ(gic_spi(4)), /* IRQ3 */
-	DEFINE_RES_IRQ(gic_spi(5)), /* IRQ4 */
-	DEFINE_RES_IRQ(gic_spi(6)), /* IRQ5 */
-	DEFINE_RES_IRQ(gic_spi(7)), /* IRQ6 */
-	DEFINE_RES_IRQ(gic_spi(8)), /* IRQ7 */
-};
-
-static struct platform_device irqpin0_device = {
-	.name		= "renesas_intc_irqpin",
-	.id		= 0,
-	.resource	= irqpin0_resources,
-	.num_resources	= ARRAY_SIZE(irqpin0_resources),
-	.dev		= {
-		.platform_data	= &irqpin0_platform_data,
-	},
-};
-
-static struct renesas_intc_irqpin_config irqpin1_platform_data = {
-	.irq_base = irq_pin(8), /* IRQ8 -> IRQ15 */
-	.control_parent = true, /* Disable spurious IRQ10 */
-};
-
-static struct resource irqpin1_resources[] = {
-	DEFINE_RES_MEM(0xe6900004, 4), /* ICR2A */
-	DEFINE_RES_MEM(0xe6900014, 4), /* INTPRI10A */
-	DEFINE_RES_MEM(0xe6900024, 1), /* INTREQ10A */
-	DEFINE_RES_MEM(0xe6900044, 1), /* INTMSK10A */
-	DEFINE_RES_MEM(0xe6900064, 1), /* INTMSKCLR10A */
-	DEFINE_RES_IRQ(gic_spi(9)), /* IRQ8 */
-	DEFINE_RES_IRQ(gic_spi(10)), /* IRQ9 */
-	DEFINE_RES_IRQ(gic_spi(11)), /* IRQ10 */
-	DEFINE_RES_IRQ(gic_spi(12)), /* IRQ11 */
-	DEFINE_RES_IRQ(gic_spi(13)), /* IRQ12 */
-	DEFINE_RES_IRQ(gic_spi(14)), /* IRQ13 */
-	DEFINE_RES_IRQ(gic_spi(15)), /* IRQ14 */
-	DEFINE_RES_IRQ(gic_spi(16)), /* IRQ15 */
-};
-
-static struct platform_device irqpin1_device = {
-	.name		= "renesas_intc_irqpin",
-	.id		= 1,
-	.resource	= irqpin1_resources,
-	.num_resources	= ARRAY_SIZE(irqpin1_resources),
-	.dev		= {
-		.platform_data	= &irqpin1_platform_data,
-	},
-};
-
-static struct renesas_intc_irqpin_config irqpin2_platform_data = {
-	.irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
-	.control_parent = true,
-};
-
-static struct resource irqpin2_resources[] = {
-	DEFINE_RES_MEM(0xe6900008, 4), /* ICR3A */
-	DEFINE_RES_MEM(0xe6900018, 4), /* INTPRI20A */
-	DEFINE_RES_MEM(0xe6900028, 1), /* INTREQ20A */
-	DEFINE_RES_MEM(0xe6900048, 1), /* INTMSK20A */
-	DEFINE_RES_MEM(0xe6900068, 1), /* INTMSKCLR20A */
-	DEFINE_RES_IRQ(gic_spi(17)), /* IRQ16 */
-	DEFINE_RES_IRQ(gic_spi(18)), /* IRQ17 */
-	DEFINE_RES_IRQ(gic_spi(19)), /* IRQ18 */
-	DEFINE_RES_IRQ(gic_spi(20)), /* IRQ19 */
-	DEFINE_RES_IRQ(gic_spi(21)), /* IRQ20 */
-	DEFINE_RES_IRQ(gic_spi(22)), /* IRQ21 */
-	DEFINE_RES_IRQ(gic_spi(23)), /* IRQ22 */
-	DEFINE_RES_IRQ(gic_spi(24)), /* IRQ23 */
-};
-
-static struct platform_device irqpin2_device = {
-	.name		= "renesas_intc_irqpin",
-	.id		= 2,
-	.resource	= irqpin2_resources,
-	.num_resources	= ARRAY_SIZE(irqpin2_resources),
-	.dev		= {
-		.platform_data	= &irqpin2_platform_data,
-	},
-};
-
-static struct renesas_intc_irqpin_config irqpin3_platform_data = {
-	.irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
-	.control_parent = true,
-};
-
-static struct resource irqpin3_resources[] = {
-	DEFINE_RES_MEM(0xe690000c, 4), /* ICR4A */
-	DEFINE_RES_MEM(0xe690001c, 4), /* INTPRI30A */
-	DEFINE_RES_MEM(0xe690002c, 1), /* INTREQ30A */
-	DEFINE_RES_MEM(0xe690004c, 1), /* INTMSK30A */
-	DEFINE_RES_MEM(0xe690006c, 1), /* INTMSKCLR30A */
-	DEFINE_RES_IRQ(gic_spi(25)), /* IRQ24 */
-	DEFINE_RES_IRQ(gic_spi(26)), /* IRQ25 */
-	DEFINE_RES_IRQ(gic_spi(27)), /* IRQ26 */
-	DEFINE_RES_IRQ(gic_spi(28)), /* IRQ27 */
-	DEFINE_RES_IRQ(gic_spi(29)), /* IRQ28 */
-	DEFINE_RES_IRQ(gic_spi(30)), /* IRQ29 */
-	DEFINE_RES_IRQ(gic_spi(31)), /* IRQ30 */
-	DEFINE_RES_IRQ(gic_spi(32)), /* IRQ31 */
-};
-
-static struct platform_device irqpin3_device = {
-	.name		= "renesas_intc_irqpin",
-	.id		= 3,
-	.resource	= irqpin3_resources,
-	.num_resources	= ARRAY_SIZE(irqpin3_resources),
-	.dev		= {
-		.platform_data	= &irqpin3_platform_data,
-	},
-};
-
-static struct platform_device *sh73a0_early_devices[] __initdata = {
-	&scif0_device,
-	&scif1_device,
-	&scif2_device,
-	&scif3_device,
-	&scif4_device,
-	&scif5_device,
-	&scif6_device,
-	&scif7_device,
-	&scif8_device,
-	&tmu0_device,
-	&ipmmu_device,
-	&cmt1_device,
-};
-
-static struct platform_device *sh73a0_late_devices[] __initdata = {
-	&i2c0_device,
-	&i2c1_device,
-	&i2c2_device,
-	&i2c3_device,
-	&i2c4_device,
-	&dma0_device,
-	&mpdma0_device,
-	&pmu_device,
-	&irqpin0_device,
-	&irqpin1_device,
-	&irqpin2_device,
-	&irqpin3_device,
-};
-
-#define SRCR2          IOMEM(0xe61580b0)
-
-void __init sh73a0_add_standard_devices(void)
-{
-	/* Clear software reset bit on SY-DMAC module */
-	__raw_writel(__raw_readl(SRCR2) & ~(1 << 18), SRCR2);
-
-	platform_add_devices(sh73a0_early_devices,
-			    ARRAY_SIZE(sh73a0_early_devices));
-	platform_add_devices(sh73a0_late_devices,
-			    ARRAY_SIZE(sh73a0_late_devices));
-}
-
-/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */
-void __init __weak sh73a0_register_twd(void) { }
-
-void __init sh73a0_earlytimer_init(void)
-{
-	shmobile_init_delay();
-#ifndef CONFIG_COMMON_CLK
-	sh73a0_clock_init();
-#endif
-	shmobile_earlytimer_init();
-	sh73a0_register_twd();
-}
-
-void __init sh73a0_add_early_devices(void)
-{
-	early_platform_add_devices(sh73a0_early_devices,
-				   ARRAY_SIZE(sh73a0_early_devices));
-
-	/* setup early console here as well */
-	shmobile_setup_console();
-}
-
-#ifdef CONFIG_USE_OF
-
 static void __init sh73a0_generic_init(void)
 {
 #ifdef CONFIG_CACHE_L2X0
@@ -794,7 +58,7 @@
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
-static const char *sh73a0_boards_compat_dt[] __initdata = {
+static const char *const sh73a0_boards_compat_dt[] __initconst = {
 	"renesas,sh73a0",
 	NULL,
 };
@@ -807,4 +71,3 @@
 	.init_late	= shmobile_init_late,
 	.dt_compat	= sh73a0_boards_compat_dt,
 MACHINE_END
-#endif /* CONFIG_USE_OF */
diff --git a/arch/arm/mach-shmobile/sh73a0.h b/arch/arm/mach-shmobile/sh73a0.h
index 5a80f18..3964680 100644
--- a/arch/arm/mach-shmobile/sh73a0.h
+++ b/arch/arm/mach-shmobile/sh73a0.h
@@ -1,89 +1,6 @@
 #ifndef __ASM_SH73A0_H__
 #define __ASM_SH73A0_H__
 
-/* DMA slave IDs */
-enum {
-	SHDMA_SLAVE_INVALID,
-	SHDMA_SLAVE_SCIF0_TX,
-	SHDMA_SLAVE_SCIF0_RX,
-	SHDMA_SLAVE_SCIF1_TX,
-	SHDMA_SLAVE_SCIF1_RX,
-	SHDMA_SLAVE_SCIF2_TX,
-	SHDMA_SLAVE_SCIF2_RX,
-	SHDMA_SLAVE_SCIF3_TX,
-	SHDMA_SLAVE_SCIF3_RX,
-	SHDMA_SLAVE_SCIF4_TX,
-	SHDMA_SLAVE_SCIF4_RX,
-	SHDMA_SLAVE_SCIF5_TX,
-	SHDMA_SLAVE_SCIF5_RX,
-	SHDMA_SLAVE_SCIF6_TX,
-	SHDMA_SLAVE_SCIF6_RX,
-	SHDMA_SLAVE_SCIF7_TX,
-	SHDMA_SLAVE_SCIF7_RX,
-	SHDMA_SLAVE_SCIF8_TX,
-	SHDMA_SLAVE_SCIF8_RX,
-	SHDMA_SLAVE_SDHI0_TX,
-	SHDMA_SLAVE_SDHI0_RX,
-	SHDMA_SLAVE_SDHI1_TX,
-	SHDMA_SLAVE_SDHI1_RX,
-	SHDMA_SLAVE_SDHI2_TX,
-	SHDMA_SLAVE_SDHI2_RX,
-	SHDMA_SLAVE_MMCIF_TX,
-	SHDMA_SLAVE_MMCIF_RX,
-	SHDMA_SLAVE_FSI2A_TX,
-	SHDMA_SLAVE_FSI2A_RX,
-	SHDMA_SLAVE_FSI2B_TX,
-	SHDMA_SLAVE_FSI2B_RX,
-	SHDMA_SLAVE_FSI2C_TX,
-	SHDMA_SLAVE_FSI2C_RX,
-	SHDMA_SLAVE_FSI2D_RX,
-};
-
-/*
- *		SH73A0 IRQ LOCATION TABLE
- *
- * 416	-----------------------------------------
- *		IRQ0-IRQ15
- * 431	-----------------------------------------
- * ...
- * 448	-----------------------------------------
- *		sh73a0-intcs
- *		sh73a0-intca-irq-pins
- * 680	-----------------------------------------
- * ...
- * 700	-----------------------------------------
- *		sh73a0-pint0
- * 731	-----------------------------------------
- * 732	-----------------------------------------
- *		sh73a0-pint1
- * 739	-----------------------------------------
- * ...
- * 800	-----------------------------------------
- *		IRQ16-IRQ31
- * 815	-----------------------------------------
- * ...
- * 928	-----------------------------------------
- *		sh73a0-intca-irq-pins
- * 943	-----------------------------------------
- */
-
-/* PINT interrupts are located at Linux IRQ 700 and up */
-#define SH73A0_PINT0_IRQ(irq) ((irq) + 700)
-#define SH73A0_PINT1_IRQ(irq) ((irq) + 732)
-
-extern void sh73a0_init_irq(void);
-extern void sh73a0_init_irq_dt(void);
-extern void sh73a0_map_io(void);
-extern void sh73a0_earlytimer_init(void);
-extern void sh73a0_add_early_devices(void);
-extern void sh73a0_add_standard_devices(void);
-extern void sh73a0_clock_init(void);
-extern void sh73a0_pinmux_init(void);
-extern void sh73a0_pm_init(void);
-extern struct clk sh73a0_extal1_clk;
-extern struct clk sh73a0_extal2_clk;
-extern struct clk sh73a0_extcki_clk;
-extern struct clk sh73a0_extalr_clk;
 extern struct smp_operations sh73a0_smp_ops;
 
 #endif /* __ASM_SH73A0_H__ */
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index 01f792f..353562b8 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -23,7 +23,6 @@
 #include <asm/cacheflush.h>
 #include <asm/smp_plat.h>
 #include <asm/smp_scu.h>
-#include <asm/smp_twd.h>
 
 #include "common.h"
 #include "pm-rcar.h"
@@ -32,41 +31,33 @@
 #define AVECR IOMEM(0xfe700040)
 #define R8A7779_SCU_BASE 0xf0000000
 
-static struct rcar_sysc_ch r8a7779_ch_cpu1 = {
+static const struct rcar_sysc_ch r8a7779_ch_cpu1 = {
 	.chan_offs = 0x40, /* PWRSR0 .. PWRER0 */
 	.chan_bit = 1, /* ARM1 */
 	.isr_bit = 1, /* ARM1 */
 };
 
-static struct rcar_sysc_ch r8a7779_ch_cpu2 = {
+static const struct rcar_sysc_ch r8a7779_ch_cpu2 = {
 	.chan_offs = 0x40, /* PWRSR0 .. PWRER0 */
 	.chan_bit = 2, /* ARM2 */
 	.isr_bit = 2, /* ARM2 */
 };
 
-static struct rcar_sysc_ch r8a7779_ch_cpu3 = {
+static const struct rcar_sysc_ch r8a7779_ch_cpu3 = {
 	.chan_offs = 0x40, /* PWRSR0 .. PWRER0 */
 	.chan_bit = 3, /* ARM3 */
 	.isr_bit = 3, /* ARM3 */
 };
 
-static struct rcar_sysc_ch *r8a7779_ch_cpu[4] = {
+static const struct rcar_sysc_ch * const r8a7779_ch_cpu[4] = {
 	[1] = &r8a7779_ch_cpu1,
 	[2] = &r8a7779_ch_cpu2,
 	[3] = &r8a7779_ch_cpu3,
 };
 
-#if defined(CONFIG_HAVE_ARM_TWD) && !defined(CONFIG_ARCH_MULTIPLATFORM)
-static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, R8A7779_SCU_BASE + 0x600, 29);
-void __init r8a7779_register_twd(void)
-{
-	twd_local_timer_register(&twd_local_timer);
-}
-#endif
-
 static int r8a7779_platform_cpu_kill(unsigned int cpu)
 {
-	struct rcar_sysc_ch *ch = NULL;
+	const struct rcar_sysc_ch *ch = NULL;
 	int ret = -EIO;
 
 	cpu = cpu_logical_map(cpu);
@@ -82,7 +73,7 @@
 
 static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
-	struct rcar_sysc_ch *ch = NULL;
+	const struct rcar_sysc_ch *ch = NULL;
 	unsigned int lcpu = cpu_logical_map(cpu);
 	int ret;
 
diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c
index 930f45c..4b33d43 100644
--- a/arch/arm/mach-shmobile/smp-r8a7790.c
+++ b/arch/arm/mach-shmobile/smp-r8a7790.c
@@ -26,12 +26,12 @@
 #include "rcar-gen2.h"
 #include "r8a7790.h"
 
-static struct rcar_sysc_ch r8a7790_ca15_scu = {
+static const struct rcar_sysc_ch r8a7790_ca15_scu = {
 	.chan_offs = 0x180, /* PWRSR5 .. PWRER5 */
 	.isr_bit = 12, /* CA15-SCU */
 };
 
-static struct rcar_sysc_ch r8a7790_ca7_scu = {
+static const struct rcar_sysc_ch r8a7790_ca7_scu = {
 	.chan_offs = 0x100, /* PWRSR3 .. PWRER3 */
 	.isr_bit = 21, /* CA7-SCU */
 };
@@ -64,7 +64,7 @@
 	.smp_prepare_cpus	= r8a7790_smp_prepare_cpus,
 	.smp_boot_secondary	= shmobile_smp_apmu_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_disable		= shmobile_smp_cpu_disable,
+	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
 	.cpu_die		= shmobile_smp_apmu_cpu_die,
 	.cpu_kill		= shmobile_smp_apmu_cpu_kill,
 #endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7791.c b/arch/arm/mach-shmobile/smp-r8a7791.c
index 5e2d1db7..b2508c0 100644
--- a/arch/arm/mach-shmobile/smp-r8a7791.c
+++ b/arch/arm/mach-shmobile/smp-r8a7791.c
@@ -58,7 +58,7 @@
 	.smp_prepare_cpus	= r8a7791_smp_prepare_cpus,
 	.smp_boot_secondary	= r8a7791_smp_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_disable		= shmobile_smp_cpu_disable,
+	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
 	.cpu_die		= shmobile_smp_apmu_cpu_die,
 	.cpu_kill		= shmobile_smp_apmu_cpu_kill,
 #endif
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index 2106d6b..bc2824a 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -33,14 +33,6 @@
 
 #define SH73A0_SCU_BASE 0xf0000000
 
-#if defined(CONFIG_HAVE_ARM_TWD) && !defined(CONFIG_ARCH_MULTIPLATFORM)
-static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, SH73A0_SCU_BASE + 0x600, 29);
-void __init sh73a0_register_twd(void)
-{
-	twd_local_timer_register(&twd_local_timer);
-}
-#endif
-
 static int sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
 	unsigned int lcpu = cpu_logical_map(cpu);
@@ -68,7 +60,7 @@
 	.smp_prepare_cpus	= sh73a0_smp_prepare_cpus,
 	.smp_boot_secondary	= sh73a0_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-	.cpu_disable		= shmobile_smp_cpu_disable,
+	.cpu_can_disable	= shmobile_smp_cpu_can_disable,
 	.cpu_die		= shmobile_smp_scu_cpu_die,
 	.cpu_kill		= shmobile_smp_scu_cpu_kill,
 #endif
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index 0edf2a6..f1d027a 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -70,18 +70,6 @@
 	if (!max_freq)
 		return;
 
-#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
-	/* Non-multiplatform r8a73a4 SoC cannot use arch timer due
-	 * to GIC being initialized from C and arch timer via DT */
-	if (of_machine_is_compatible("renesas,r8a73a4"))
-		has_arch_timer = false;
-
-	/* Non-multiplatform r8a7790 SoC cannot use arch timer due
-	 * to GIC being initialized from C and arch timer via DT */
-	if (of_machine_is_compatible("renesas,r8a7790"))
-		has_arch_timer = false;
-#endif
-
 	if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
 		if (is_a7_a8_a9)
 			shmobile_setup_delay_hz(max_freq, 1, 3);
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
index 7259c37..5bc6ea8 100644
--- a/arch/arm/mach-socfpga/core.h
+++ b/arch/arm/mach-socfpga/core.h
@@ -25,6 +25,7 @@
 #define SOCFPGA_RSTMGR_MODPERRST	0x14
 #define SOCFPGA_RSTMGR_BRGMODRST	0x1c
 
+#define SOCFPGA_A10_RSTMGR_CTRL		0xC
 #define SOCFPGA_A10_RSTMGR_MODMPURST	0x20
 
 /* System Manager bits */
diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
index c6f1df8..15c8ce8 100644
--- a/arch/arm/mach-socfpga/platsmp.c
+++ b/arch/arm/mach-socfpga/platsmp.c
@@ -106,11 +106,23 @@
 		cpu_do_idle();
 }
 
+/*
+ * We need a dummy function so that platform_can_cpu_hotplug() knows
+ * we support CPU hotplug. However, the function does not need to do
+ * anything, because CPUs going offline just do WFI. We could reset
+ * the CPUs but it would increase power consumption.
+ */
+static int socfpga_cpu_kill(unsigned int cpu)
+{
+	return 1;
+}
+
 static struct smp_operations socfpga_smp_ops __initdata = {
 	.smp_prepare_cpus	= socfpga_smp_prepare_cpus,
 	.smp_boot_secondary	= socfpga_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
 	.cpu_die		= socfpga_cpu_die,
+	.cpu_kill		= socfpga_cpu_kill,
 #endif
 };
 
@@ -119,6 +131,7 @@
 	.smp_boot_secondary	= socfpga_a10_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
 	.cpu_die		= socfpga_cpu_die,
+	.cpu_kill		= socfpga_cpu_kill,
 #endif
 };
 
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
index 19643a7..a1c0efa 100644
--- a/arch/arm/mach-socfpga/socfpga.c
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -74,6 +74,19 @@
 	writel(temp, rst_manager_base_addr + SOCFPGA_RSTMGR_CTRL);
 }
 
+static void socfpga_arria10_restart(enum reboot_mode mode, const char *cmd)
+{
+	u32 temp;
+
+	temp = readl(rst_manager_base_addr + SOCFPGA_A10_RSTMGR_CTRL);
+
+	if (mode == REBOOT_HARD)
+		temp |= RSTMGR_CTRL_SWCOLDRSTREQ;
+	else
+		temp |= RSTMGR_CTRL_SWWARMRSTREQ;
+	writel(temp, rst_manager_base_addr + SOCFPGA_A10_RSTMGR_CTRL);
+}
+
 static const char *altera_dt_match[] = {
 	"altr,socfpga",
 	NULL
@@ -86,3 +99,16 @@
 	.restart	= socfpga_cyclone5_restart,
 	.dt_compat	= altera_dt_match,
 MACHINE_END
+
+static const char *altera_a10_dt_match[] = {
+	"altr,socfpga-arria10",
+	NULL
+};
+
+DT_MACHINE_START(SOCFPGA_A10, "Altera SOCFPGA Arria10")
+	.l2c_aux_val	= 0,
+	.l2c_aux_mask	= ~0,
+	.init_irq	= socfpga_init_irq,
+	.restart	= socfpga_arria10_restart,
+	.dt_compat	= altera_a10_dt_match,
+MACHINE_END
diff --git a/arch/arm/mach-spear/time.c b/arch/arm/mach-spear/time.c
index 26fda4e..9ccffc1 100644
--- a/arch/arm/mach-spear/time.c
+++ b/arch/arm/mach-spear/time.c
@@ -66,8 +66,6 @@
 static __iomem void *gpt_base;
 static struct clk *gpt_clk;
 
-static void clockevent_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *clk_event_dev);
 static int clockevent_next_event(unsigned long evt,
 				 struct clock_event_device *clk_event_dev);
 
@@ -95,54 +93,67 @@
 		200, 16, clocksource_mmio_readw_up);
 }
 
-static struct clock_event_device clkevt = {
-	.name = "tmr0",
-	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode = clockevent_set_mode,
-	.set_next_event = clockevent_next_event,
-	.shift = 0,	/* to be computed */
-};
+static inline void timer_shutdown(struct clock_event_device *evt)
+{
+	u16 val = readw(gpt_base + CR(CLKEVT));
 
-static void clockevent_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *clk_event_dev)
+	/* stop the timer */
+	val &= ~CTRL_ENABLE;
+	writew(val, gpt_base + CR(CLKEVT));
+}
+
+static int spear_shutdown(struct clock_event_device *evt)
+{
+	timer_shutdown(evt);
+
+	return 0;
+}
+
+static int spear_set_oneshot(struct clock_event_device *evt)
+{
+	u16 val;
+
+	/* stop the timer */
+	timer_shutdown(evt);
+
+	val = readw(gpt_base + CR(CLKEVT));
+	val |= CTRL_ONE_SHOT;
+	writew(val, gpt_base + CR(CLKEVT));
+
+	return 0;
+}
+
+static int spear_set_periodic(struct clock_event_device *evt)
 {
 	u32 period;
 	u16 val;
 
 	/* stop the timer */
+	timer_shutdown(evt);
+
+	period = clk_get_rate(gpt_clk) / HZ;
+	period >>= CTRL_PRESCALER16;
+	writew(period, gpt_base + LOAD(CLKEVT));
+
 	val = readw(gpt_base + CR(CLKEVT));
-	val &= ~CTRL_ENABLE;
+	val &= ~CTRL_ONE_SHOT;
+	val |= CTRL_ENABLE | CTRL_INT_ENABLE;
 	writew(val, gpt_base + CR(CLKEVT));
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		period = clk_get_rate(gpt_clk) / HZ;
-		period >>= CTRL_PRESCALER16;
-		writew(period, gpt_base + LOAD(CLKEVT));
-
-		val = readw(gpt_base + CR(CLKEVT));
-		val &= ~CTRL_ONE_SHOT;
-		val |= CTRL_ENABLE | CTRL_INT_ENABLE;
-		writew(val, gpt_base + CR(CLKEVT));
-
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		val = readw(gpt_base + CR(CLKEVT));
-		val |= CTRL_ONE_SHOT;
-		writew(val, gpt_base + CR(CLKEVT));
-
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_RESUME:
-
-		break;
-	default:
-		pr_err("Invalid mode requested\n");
-		break;
-	}
+	return 0;
 }
 
+static struct clock_event_device clkevt = {
+	.name = "tmr0",
+	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_shutdown = spear_shutdown,
+	.set_state_periodic = spear_set_periodic,
+	.set_state_oneshot = spear_set_oneshot,
+	.tick_resume = spear_shutdown,
+	.set_next_event = clockevent_next_event,
+	.shift = 0,	/* to be computed */
+};
+
 static int clockevent_next_event(unsigned long cycles,
 				 struct clock_event_device *clk_event_dev)
 {
@@ -193,7 +204,7 @@
 	setup_irq(irq, &spear_timer_irq);
 }
 
-const static struct of_device_id timer_of_match[] __initconst = {
+static const struct of_device_id const timer_of_match[] __initconst = {
 	{ .compatible = "st,spear-timer", },
 	{ },
 };
diff --git a/arch/arm/mach-sti/board-dt.c b/arch/arm/mach-sti/board-dt.c
index b373aca..ae10fb2 100644
--- a/arch/arm/mach-sti/board-dt.c
+++ b/arch/arm/mach-sti/board-dt.c
@@ -14,7 +14,7 @@
 
 #include "smp.h"
 
-static const char *stih41x_dt_match[] __initdata = {
+static const char *const stih41x_dt_match[] __initconst = {
 	"st,stih415",
 	"st,stih416",
 	"st,stih407",
diff --git a/arch/arm/mach-sti/headsmp.S b/arch/arm/mach-sti/headsmp.S
index 4c09bae..e0ad4517 100644
--- a/arch/arm/mach-sti/headsmp.S
+++ b/arch/arm/mach-sti/headsmp.S
@@ -37,6 +37,7 @@
 	 * should now contain the SVC stack for this core
 	 */
 	b	secondary_startup
+ENDPROC(sti_secondary_startup)
 
 1:	.long	.
 	.long	pen_release
diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
index d4b624f..c4ad6ea 100644
--- a/arch/arm/mach-sti/platsmp.c
+++ b/arch/arm/mach-sti/platsmp.c
@@ -20,6 +20,7 @@
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/memblock.h>
 
 #include <asm/cacheflush.h>
 #include <asm/smp_plat.h>
@@ -38,8 +39,6 @@
 
 static void sti_secondary_init(unsigned int cpu)
 {
-	trace_hardirqs_off();
-
 	/*
 	 * let the primary processor know we're out of the
 	 * pen, then head off into the C entry point
@@ -99,14 +98,62 @@
 
 static void __init sti_smp_prepare_cpus(unsigned int max_cpus)
 {
-	void __iomem *scu_base = NULL;
-	struct device_node *np = of_find_compatible_node(
-					NULL, NULL, "arm,cortex-a9-scu");
+	struct device_node *np;
+	void __iomem *scu_base;
+	u32 __iomem *cpu_strt_ptr;
+	u32 release_phys;
+	int cpu;
+	unsigned long entry_pa = virt_to_phys(sti_secondary_startup);
+
+	np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
+
 	if (np) {
 		scu_base = of_iomap(np, 0);
 		scu_enable(scu_base);
 		of_node_put(np);
 	}
+
+	if (max_cpus <= 1)
+		return;
+
+	for_each_possible_cpu(cpu) {
+
+		np = of_get_cpu_node(cpu, NULL);
+
+		if (!np)
+			continue;
+
+		if (of_property_read_u32(np, "cpu-release-addr",
+						&release_phys)) {
+			pr_err("CPU %d: missing or invalid cpu-release-addr "
+				"property\n", cpu);
+			continue;
+		}
+
+		/*
+		 * holding pen is usually configured in SBC DMEM but can also be
+		 * in RAM.
+		 */
+
+		if (!memblock_is_memory(release_phys))
+			cpu_strt_ptr =
+				ioremap(release_phys, sizeof(release_phys));
+		else
+			cpu_strt_ptr =
+				(u32 __iomem *)phys_to_virt(release_phys);
+
+		__raw_writel(entry_pa, cpu_strt_ptr);
+
+		/*
+		 * wmb so that data is actually written
+		 * before cache flush is done
+		 */
+		smp_wmb();
+		sync_cache_w(cpu_strt_ptr);
+
+		if (!memblock_is_memory(release_phys))
+			iounmap(cpu_strt_ptr);
+	}
 }
 
 struct smp_operations __initdata sti_smp_ops = {
diff --git a/arch/arm/mach-sti/smp.h b/arch/arm/mach-sti/smp.h
index 1871b72..ae22707 100644
--- a/arch/arm/mach-sti/smp.h
+++ b/arch/arm/mach-sti/smp.h
@@ -14,4 +14,6 @@
 
 extern struct smp_operations	sti_smp_ops;
 
+void sti_secondary_startup(void);
+
 #endif
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 5d1a318..0fa4c5f 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -8,6 +8,7 @@
 	select HAVE_ARM_SCU if SMP
 	select HAVE_ARM_TWD if SMP
 	select PINCTRL
+	select PM_OPP
 	select ARCH_HAS_RESET_CONTROLLER
 	select RESET_CONTROLLER
 	select SOC_BUS
diff --git a/arch/arm/mach-tegra/cpuidle-tegra114.c b/arch/arm/mach-tegra/cpuidle-tegra114.c
index 155807f..9157546 100644
--- a/arch/arm/mach-tegra/cpuidle-tegra114.c
+++ b/arch/arm/mach-tegra/cpuidle-tegra114.c
@@ -24,6 +24,7 @@
 #include <asm/cpuidle.h>
 #include <asm/smp_plat.h>
 #include <asm/suspend.h>
+#include <asm/psci.h>
 
 #include "pm.h"
 #include "sleep.h"
@@ -44,16 +45,12 @@
 	tegra_set_cpu_in_lp2();
 	cpu_pm_enter();
 
-	tick_broadcast_enter();
-
 	call_firmware_op(prepare_idle);
 
 	/* Do suspend by ourselves if the firmware does not implement it */
 	if (call_firmware_op(do_idle, 0) == -ENOSYS)
 		cpu_suspend(0, tegra30_sleep_cpu_secondary_finish);
 
-	tick_broadcast_exit();
-
 	cpu_pm_exit();
 	tegra_clear_cpu_in_lp2();
 
@@ -61,6 +58,13 @@
 
 	return index;
 }
+
+static void tegra114_idle_enter_freeze(struct cpuidle_device *dev,
+				       struct cpuidle_driver *drv,
+				       int index)
+{
+       tegra114_idle_power_down(dev, drv, index);
+}
 #endif
 
 static struct cpuidle_driver tegra_idle_driver = {
@@ -72,8 +76,10 @@
 #ifdef CONFIG_PM_SLEEP
 		[1] = {
 			.enter			= tegra114_idle_power_down,
+			.enter_freeze		= tegra114_idle_enter_freeze,
 			.exit_latency		= 500,
 			.target_residency	= 1000,
+			.flags			= CPUIDLE_FLAG_TIMER_STOP,
 			.power_usage		= 0,
 			.name			= "powered-down",
 			.desc			= "CPU power gated",
@@ -84,5 +90,8 @@
 
 int __init tegra114_cpuidle_init(void)
 {
-	return cpuidle_register(&tegra_idle_driver, NULL);
+	if (!psci_smp_available())
+		return cpuidle_register(&tegra_idle_driver, NULL);
+
+	return 0;
 }
diff --git a/arch/arm/mach-tegra/iomap.h b/arch/arm/mach-tegra/iomap.h
index 81dc950..9e5b2f8 100644
--- a/arch/arm/mach-tegra/iomap.h
+++ b/arch/arm/mach-tegra/iomap.h
@@ -82,9 +82,6 @@
 #define TEGRA_EMC_BASE			0x7000F400
 #define TEGRA_EMC_SIZE			SZ_1K
 
-#define TEGRA_FUSE_BASE			0x7000F800
-#define TEGRA_FUSE_SIZE			SZ_1K
-
 #define TEGRA_EMC0_BASE			0x7001A000
 #define TEGRA_EMC0_SIZE			SZ_2K
 
diff --git a/arch/arm/mach-uniphier/platsmp.c b/arch/arm/mach-uniphier/platsmp.c
index 5943e1c..4b784f7 100644
--- a/arch/arm/mach-uniphier/platsmp.c
+++ b/arch/arm/mach-uniphier/platsmp.c
@@ -60,12 +60,6 @@
 	sbcm_regmap = NULL;
 }
 
-static void __naked uniphier_secondary_startup(void)
-{
-	asm("bl		v7_invalidate_l1\n"
-	    "b		secondary_startup\n");
-};
-
 static int uniphier_boot_secondary(unsigned int cpu,
 				   struct task_struct *idle)
 {
@@ -75,7 +69,7 @@
 		return -ENODEV;
 
 	ret = regmap_write(sbcm_regmap, 0x1208,
-			   virt_to_phys(uniphier_secondary_startup));
+			   virt_to_phys(secondary_startup));
 	if (!ret)
 		asm("sev"); /* wake up secondary CPU */
 
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index 4418a50..c8643ac 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -7,7 +7,7 @@
 obj-$(CONFIG_UX500_SOC_DB8500)	+= cpu-db8500.o
 obj-$(CONFIG_MACH_MOP500)	+= board-mop500-regulators.o \
 				board-mop500-audio.o
-obj-$(CONFIG_SMP)		+= platsmp.o headsmp.o
+obj-$(CONFIG_SMP)		+= platsmp.o
 obj-$(CONFIG_HOTPLUG_CPU)	+= hotplug.o
 obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
 
diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c
index 7557bed..780bd13 100644
--- a/arch/arm/mach-ux500/cache-l2x0.c
+++ b/arch/arm/mach-ux500/cache-l2x0.c
@@ -8,6 +8,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 
+#include <asm/outercache.h>
 #include <asm/hardware/cache-l2x0.h>
 
 #include "db8500-regs.h"
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 1691380..f805603 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -20,10 +20,10 @@
 #include <linux/mfd/dbx500-prcmu.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/regulator/machine.h>
 #include <linux/random.h>
 
-#include <asm/pmu.h>
 #include <asm/mach/map.h>
 
 #include "setup.h"
@@ -154,7 +154,6 @@
 };
 
 DT_MACHINE_START(U8500_DT, "ST-Ericsson Ux5x0 platform (Device Tree Support)")
-	.smp            = smp_ops(ux500_smp_ops),
 	.map_io		= u8500_map_io,
 	.init_irq	= ux500_init_irq,
 	/* we re-use nomadik timer here */
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index e31d3d6..41b81c4 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -56,7 +56,6 @@
 	struct device_node *np;
 	struct resource r;
 
-	gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND);
 	irqchip_init();
 	np = of_find_compatible_node(NULL, NULL, "stericsson,db8500-prcmu");
 	of_address_to_resource(np, 0, &r);
@@ -72,21 +71,12 @@
 	 * Init clocks here so that they are available for system timer
 	 * initialization.
 	 */
-	if (cpu_is_u8500_family()) {
-		u8500_of_clk_init(U8500_CLKRST1_BASE,
-				  U8500_CLKRST2_BASE,
-				  U8500_CLKRST3_BASE,
-				  U8500_CLKRST5_BASE,
-				  U8500_CLKRST6_BASE);
-	} else if (cpu_is_u9540()) {
-		u9540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE,
-			       U8500_CLKRST3_BASE, U8500_CLKRST5_BASE,
-			       U8500_CLKRST6_BASE);
-	} else if (cpu_is_u8540()) {
-		u8540_clk_init(U8500_CLKRST1_BASE, U8500_CLKRST2_BASE,
-			       U8500_CLKRST3_BASE, U8500_CLKRST5_BASE,
-			       U8500_CLKRST6_BASE);
-	}
+	if (cpu_is_u8500_family())
+		u8500_clk_init();
+	else if (cpu_is_u9540())
+		u9540_clk_init();
+	else if (cpu_is_u8540())
+		u8540_clk_init();
 }
 
 static const char * __init ux500_get_machine(void)
diff --git a/arch/arm/mach-ux500/headsmp.S b/arch/arm/mach-ux500/headsmp.S
deleted file mode 100644
index 9cdea04..0000000
--- a/arch/arm/mach-ux500/headsmp.S
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- *  Copyright (c) 2009 ST-Ericsson
- *	This file is based  ARM Realview platform
- *  Copyright (c) 2003 ARM Limited
- *  All Rights Reserved
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-
-/*
- * U8500 specific entry point for secondary CPUs.
- */
-ENTRY(u8500_secondary_startup)
-	mrc	p15, 0, r0, c0, c0, 5
-	and	r0, r0, #15
-	adr	r4, 1f
-	ldmia	r4, {r5, r6}
-	sub	r4, r4, r5
-	add	r6, r6, r4
-pen:	ldr	r7, [r6]
-	cmp	r7, r0
-	bne	pen
-
-	/*
-	 * we've been released from the holding pen: secondary_stack
-	 * should now contain the SVC stack for this core
-	 */
-	b	secondary_startup
-ENDPROC(u8500_secondary_startup)
-
-	.align 2
-1:	.long	.
-	.long	pen_release
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index 62b1de9..70766b96 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -28,135 +28,81 @@
 #include "db8500-regs.h"
 #include "id.h"
 
-static void __iomem *scu_base;
-static void __iomem *backupram;
+/* Magic triggers in backup RAM */
+#define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4
+#define UX500_CPU1_WAKEMAGIC_OFFSET 0x1FF0
 
-/* This is called from headsmp.S to wakeup the secondary core */
-extern void u8500_secondary_startup(void);
-
-/*
- * Write pen_release in a way that is guaranteed to be visible to all
- * observers, irrespective of whether they're taking part in coherency
- * or not.  This is necessary for the hotplug code to work reliably.
- */
-static void write_pen_release(int val)
+static void wakeup_secondary(void)
 {
-	pen_release = val;
-	smp_wmb();
-	sync_cache_w(&pen_release);
-}
+	struct device_node *np;
+	static void __iomem *backupram;
 
-static DEFINE_SPINLOCK(boot_lock);
-
-static void ux500_secondary_init(unsigned int cpu)
-{
-	/*
-	 * let the primary processor know we're out of the
-	 * pen, then head off into the C entry point
-	 */
-	write_pen_release(-1);
-
-	/*
-	 * Synchronise with the boot thread.
-	 */
-	spin_lock(&boot_lock);
-	spin_unlock(&boot_lock);
-}
-
-static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
-{
-	unsigned long timeout;
-
-	/*
-	 * set synchronisation state between this boot processor
-	 * and the secondary one
-	 */
-	spin_lock(&boot_lock);
-
-	/*
-	 * The secondary processor is waiting to be released from
-	 * the holding pen - release it, then wait for it to flag
-	 * that it has been released by resetting pen_release.
-	 */
-	write_pen_release(cpu_logical_map(cpu));
-
-	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
-
-	timeout = jiffies + (1 * HZ);
-	while (time_before(jiffies, timeout)) {
-		if (pen_release == -1)
-			break;
+	np = of_find_compatible_node(NULL, NULL, "ste,dbx500-backupram");
+	if (!np) {
+		pr_err("No backupram base address\n");
+		return;
+	}
+	backupram = of_iomap(np, 0);
+	of_node_put(np);
+	if (!backupram) {
+		pr_err("No backupram remap\n");
+		return;
 	}
 
 	/*
-	 * now the secondary core is starting up let it run its
-	 * calibrations, then wait for it to finish
-	 */
-	spin_unlock(&boot_lock);
-
-	return pen_release != -1 ? -ENOSYS : 0;
-}
-
-static void __init wakeup_secondary(void)
-{
-	/*
 	 * write the address of secondary startup into the backup ram register
 	 * at offset 0x1FF4, then write the magic number 0xA1FEED01 to the
 	 * backup ram register at offset 0x1FF0, which is what boot rom code
-	 * is waiting for. This would wake up the secondary core from WFE
+	 * is waiting for. This will wake up the secondary core from WFE.
 	 */
-#define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4
-	__raw_writel(virt_to_phys(u8500_secondary_startup),
-		     backupram + UX500_CPU1_JUMPADDR_OFFSET);
-
-#define UX500_CPU1_WAKEMAGIC_OFFSET 0x1FF0
-	__raw_writel(0xA1FEED01,
-		     backupram + UX500_CPU1_WAKEMAGIC_OFFSET);
+	writel(virt_to_phys(secondary_startup),
+	       backupram + UX500_CPU1_JUMPADDR_OFFSET);
+	writel(0xA1FEED01,
+	       backupram + UX500_CPU1_WAKEMAGIC_OFFSET);
 
 	/* make sure write buffer is drained */
 	mb();
-}
-
-/*
- * Initialise the CPU possible map early - this describes the CPUs
- * which may be present or become present in the system.
- */
-static void __init ux500_smp_init_cpus(void)
-{
-	unsigned int i, ncores;
-	struct device_node *np;
-
-	np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
-	scu_base = of_iomap(np, 0);
-	of_node_put(np);
-	if (!scu_base)
-		return;
-	backupram = ioremap(U8500_BACKUPRAM0_BASE, SZ_8K);
-	ncores = scu_get_core_count(scu_base);
-
-	/* sanity check */
-	if (ncores > nr_cpu_ids) {
-		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
-			ncores, nr_cpu_ids);
-		ncores = nr_cpu_ids;
-	}
-
-	for (i = 0; i < ncores; i++)
-		set_cpu_possible(i, true);
+	iounmap(backupram);
 }
 
 static void __init ux500_smp_prepare_cpus(unsigned int max_cpus)
 {
+	struct device_node *np;
+	static void __iomem *scu_base;
+	unsigned int ncores;
+	int i;
+
+	np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
+	if (!np) {
+		pr_err("No SCU base address\n");
+		return;
+	}
+	scu_base = of_iomap(np, 0);
+	of_node_put(np);
+	if (!scu_base) {
+		pr_err("No SCU remap\n");
+		return;
+	}
+
 	scu_enable(scu_base);
+	ncores = scu_get_core_count(scu_base);
+	for (i = 0; i < ncores; i++)
+		set_cpu_possible(i, true);
+	iounmap(scu_base);
+}
+
+static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
 	wakeup_secondary();
+	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+	return 0;
 }
 
 struct smp_operations ux500_smp_ops __initdata = {
-	.smp_init_cpus		= ux500_smp_init_cpus,
 	.smp_prepare_cpus	= ux500_smp_prepare_cpus,
-	.smp_secondary_init	= ux500_secondary_init,
 	.smp_boot_secondary	= ux500_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
 	.cpu_die		= ux500_cpu_die,
 #endif
 };
+CPU_METHOD_OF_DECLARE(ux500_smp, "ste,dbx500-smp", &ux500_smp_ops);
diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
index 1fb6ad2..65876ea 100644
--- a/arch/arm/mach-ux500/setup.h
+++ b/arch/arm/mach-ux500/setup.h
@@ -26,7 +26,6 @@
 
 extern void ux500_timer_init(void);
 
-extern struct smp_operations ux500_smp_ops;
 extern void ux500_cpu_die(unsigned int cpu);
 
 #endif /*  __ASM_ARCH_SETUP_H */
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index b3328cd..1aa4cce 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -80,7 +80,7 @@
 	 * to the CPU by disabling the GIC CPU IF to prevent wfi
 	 * from completing execution behind power controller back
 	 */
-	gic_cpu_if_down();
+	gic_cpu_if_down(0);
 }
 
 static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster)
diff --git a/arch/arm/mach-w90x900/irq.c b/arch/arm/mach-w90x900/irq.c
index d66d43a..491b317 100644
--- a/arch/arm/mach-w90x900/irq.c
+++ b/arch/arm/mach-w90x900/irq.c
@@ -211,6 +211,6 @@
 	for (irqno = IRQ_WDT; irqno <= IRQ_ADC; irqno++) {
 		irq_set_chip_and_handler(irqno, &nuc900_irq_chip,
 					 handle_level_irq);
-		set_irq_flags(irqno, IRQF_VALID);
+		irq_clear_status_flags(irqno, IRQ_NOREQUEST);
 	}
 }
diff --git a/arch/arm/mach-w90x900/time.c b/arch/arm/mach-w90x900/time.c
index 9230d37..cd1966e 100644
--- a/arch/arm/mach-w90x900/time.c
+++ b/arch/arm/mach-w90x900/time.c
@@ -48,31 +48,32 @@
 
 static unsigned int timer0_load;
 
-static void nuc900_clockevent_setmode(enum clock_event_mode mode,
-		struct clock_event_device *clk)
+static int nuc900_clockevent_shutdown(struct clock_event_device *evt)
 {
-	unsigned int val;
-
-	val = __raw_readl(REG_TCSR0);
-	val &= ~(0x03 << 27);
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		__raw_writel(timer0_load, REG_TICR0);
-		val |= (PERIOD | COUNTEN | INTEN | PRESCALE);
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-		val |= (ONESHOT | COUNTEN | INTEN | PRESCALE);
-		break;
-
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	unsigned int val = __raw_readl(REG_TCSR0) & ~(0x03 << 27);
 
 	__raw_writel(val, REG_TCSR0);
+	return 0;
+}
+
+static int nuc900_clockevent_set_oneshot(struct clock_event_device *evt)
+{
+	unsigned int val = __raw_readl(REG_TCSR0) & ~(0x03 << 27);
+
+	val |= (ONESHOT | COUNTEN | INTEN | PRESCALE);
+
+	__raw_writel(val, REG_TCSR0);
+	return 0;
+}
+
+static int nuc900_clockevent_set_periodic(struct clock_event_device *evt)
+{
+	unsigned int val = __raw_readl(REG_TCSR0) & ~(0x03 << 27);
+
+	__raw_writel(timer0_load, REG_TICR0);
+	val |= (PERIOD | COUNTEN | INTEN | PRESCALE);
+	__raw_writel(val, REG_TCSR0);
+	return 0;
 }
 
 static int nuc900_clockevent_setnextevent(unsigned long evt,
@@ -90,11 +91,15 @@
 }
 
 static struct clock_event_device nuc900_clockevent_device = {
-	.name		= "nuc900-timer0",
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode	= nuc900_clockevent_setmode,
-	.set_next_event	= nuc900_clockevent_setnextevent,
-	.rating		= 300,
+	.name			= "nuc900-timer0",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_shutdown	= nuc900_clockevent_shutdown,
+	.set_state_periodic	= nuc900_clockevent_set_periodic,
+	.set_state_oneshot	= nuc900_clockevent_set_oneshot,
+	.tick_resume		= nuc900_clockevent_shutdown,
+	.set_next_event		= nuc900_clockevent_setnextevent,
+	.rating			= 300,
 };
 
 /*IRQ handler for the timer*/
diff --git a/arch/arm/mach-zx/Kconfig b/arch/arm/mach-zx/Kconfig
index 2a910dc..7fdc5bf 100644
--- a/arch/arm/mach-zx/Kconfig
+++ b/arch/arm/mach-zx/Kconfig
@@ -13,6 +13,7 @@
 	select ARM_GLOBAL_TIMER
 	select HAVE_ARM_SCU if SMP
 	select HAVE_ARM_TWD if SMP
+	select PM_GENERIC_DOMAINS
 	help
 	  Support for ZTE ZX296702 SoC which is a dual core CortexA9MP
 endif
diff --git a/arch/arm/mach-zx/Makefile b/arch/arm/mach-zx/Makefile
index 7c2edf6..a4b4864 100644
--- a/arch/arm/mach-zx/Makefile
+++ b/arch/arm/mach-zx/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_SOC_ZX296702) += zx296702.o
+obj-$(CONFIG_SOC_ZX296702) += zx296702.o zx296702-pm-domain.o
 obj-$(CONFIG_SMP) += headsmp.o platsmp.o
diff --git a/arch/arm/mach-zx/zx296702-pm-domain.c b/arch/arm/mach-zx/zx296702-pm-domain.c
new file mode 100644
index 0000000..e08574d
--- /dev/null
+++ b/arch/arm/mach-zx/zx296702-pm-domain.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * Author: Jun Nie <jun.nie@linaro.org>
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+
+#define PCU_DM_CLKEN        0x18
+#define PCU_DM_RSTEN        0x1C
+#define PCU_DM_ISOEN        0x20
+#define PCU_DM_PWRDN        0x24
+#define PCU_DM_ACK_SYNC     0x28
+
+enum {
+	PCU_DM_NEON0 = 0,
+	PCU_DM_NEON1,
+	PCU_DM_GPU,
+	PCU_DM_DECPPU,
+	PCU_DM_VOU,
+	PCU_DM_R2D,
+	PCU_DM_TOP,
+};
+
+static void __iomem *pcubase;
+
+struct zx_pm_domain {
+	struct generic_pm_domain dm;
+	unsigned int bit;
+};
+
+static int normal_power_off(struct generic_pm_domain *domain)
+{
+	struct zx_pm_domain *zpd = (struct zx_pm_domain *)domain;
+	unsigned long loop = 1000;
+	u32 tmp;
+
+	tmp = readl_relaxed(pcubase + PCU_DM_CLKEN);
+	tmp &= ~BIT(zpd->bit);
+	writel_relaxed(tmp, pcubase + PCU_DM_CLKEN);
+	udelay(5);
+
+	tmp = readl_relaxed(pcubase + PCU_DM_ISOEN);
+	tmp &= ~BIT(zpd->bit);
+	writel_relaxed(tmp | BIT(zpd->bit), pcubase + PCU_DM_ISOEN);
+	udelay(5);
+
+	tmp = readl_relaxed(pcubase + PCU_DM_RSTEN);
+	tmp &= ~BIT(zpd->bit);
+	writel_relaxed(tmp, pcubase + PCU_DM_RSTEN);
+	udelay(5);
+
+	tmp = readl_relaxed(pcubase + PCU_DM_PWRDN);
+	tmp &= ~BIT(zpd->bit);
+	writel_relaxed(tmp | BIT(zpd->bit), pcubase + PCU_DM_PWRDN);
+	do {
+		tmp = readl_relaxed(pcubase + PCU_DM_ACK_SYNC) & BIT(zpd->bit);
+	} while (--loop && !tmp);
+
+	if (!loop) {
+		pr_err("Error: %s %s fail\n", __func__, domain->name);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int normal_power_on(struct generic_pm_domain *domain)
+{
+	struct zx_pm_domain *zpd = (struct zx_pm_domain *)domain;
+	unsigned long loop = 10000;
+	u32 tmp;
+
+	tmp = readl_relaxed(pcubase + PCU_DM_PWRDN);
+	tmp &= ~BIT(zpd->bit);
+	writel_relaxed(tmp, pcubase + PCU_DM_PWRDN);
+	do {
+		tmp = readl_relaxed(pcubase + PCU_DM_ACK_SYNC) & BIT(zpd->bit);
+	} while (--loop && tmp);
+
+	if (!loop) {
+		pr_err("Error: %s %s fail\n", __func__, domain->name);
+		return -EIO;
+	}
+
+	tmp = readl_relaxed(pcubase + PCU_DM_RSTEN);
+	tmp &= ~BIT(zpd->bit);
+	writel_relaxed(tmp | BIT(zpd->bit), pcubase + PCU_DM_RSTEN);
+	udelay(5);
+
+	tmp = readl_relaxed(pcubase + PCU_DM_ISOEN);
+	tmp &= ~BIT(zpd->bit);
+	writel_relaxed(tmp, pcubase + PCU_DM_ISOEN);
+	udelay(5);
+
+	tmp = readl_relaxed(pcubase + PCU_DM_CLKEN);
+	tmp &= ~BIT(zpd->bit);
+	writel_relaxed(tmp | BIT(zpd->bit), pcubase + PCU_DM_CLKEN);
+	udelay(5);
+	return 0;
+}
+
+static struct zx_pm_domain gpu_domain = {
+	.dm = {
+		.name		= "gpu_domain",
+		.power_off	= normal_power_off,
+		.power_on	= normal_power_on,
+	},
+	.bit = PCU_DM_GPU,
+};
+
+static struct zx_pm_domain decppu_domain = {
+	.dm = {
+		.name		= "decppu_domain",
+		.power_off	= normal_power_off,
+		.power_on	= normal_power_on,
+	},
+	.bit = PCU_DM_DECPPU,
+};
+
+static struct zx_pm_domain vou_domain = {
+	.dm = {
+		.name		= "vou_domain",
+		.power_off	= normal_power_off,
+		.power_on	= normal_power_on,
+	},
+	.bit = PCU_DM_VOU,
+};
+
+static struct zx_pm_domain r2d_domain = {
+	.dm = {
+		.name		= "r2d_domain",
+		.power_off	= normal_power_off,
+		.power_on	= normal_power_on,
+	},
+	.bit = PCU_DM_R2D,
+};
+
+static struct generic_pm_domain *zx296702_pm_domains[] = {
+	&vou_domain.dm,
+	&gpu_domain.dm,
+	&decppu_domain.dm,
+	&r2d_domain.dm,
+};
+
+static int zx296702_pd_probe(struct platform_device *pdev)
+{
+	struct genpd_onecell_data *genpd_data;
+	struct resource *res;
+	int i;
+
+	genpd_data = devm_kzalloc(&pdev->dev, sizeof(*genpd_data), GFP_KERNEL);
+	if (!genpd_data)
+		return -ENOMEM;
+
+	genpd_data->domains = zx296702_pm_domains;
+	genpd_data->num_domains = ARRAY_SIZE(zx296702_pm_domains);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "no memory resource defined\n");
+		return -ENODEV;
+	}
+
+	pcubase = devm_ioremap_resource(&pdev->dev, res);
+	if (!pcubase) {
+		dev_err(&pdev->dev, "ioremap fail.\n");
+		return -EIO;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(zx296702_pm_domains); ++i)
+		pm_genpd_init(zx296702_pm_domains[i], NULL, false);
+
+	of_genpd_add_provider_onecell(pdev->dev.of_node, genpd_data);
+	return 0;
+}
+
+static const struct of_device_id zx296702_pm_domain_matches[] __initconst = {
+	{ .compatible = "zte,zx296702-pcu", },
+	{ },
+};
+
+static struct platform_driver zx296702_pd_driver __initdata = {
+	.driver = {
+		.name = "zx-powerdomain",
+		.owner = THIS_MODULE,
+		.of_match_table = zx296702_pm_domain_matches,
+	},
+	.probe = zx296702_pd_probe,
+};
+
+static int __init zx296702_pd_init(void)
+{
+	return platform_driver_register(&zx296702_pd_driver);
+}
+subsys_initcall(zx296702_pd_init);
diff --git a/arch/arm/mach-zx/zx296702.c b/arch/arm/mach-zx/zx296702.c
index 60bb1a8..a041e13 100644
--- a/arch/arm/mach-zx/zx296702.c
+++ b/arch/arm/mach-zx/zx296702.c
@@ -13,7 +13,7 @@
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 
-static const char *zx296702_dt_compat[] __initconst = {
+static const char *const zx296702_dt_compat[] __initconst = {
 	"zte,zx296702",
 	NULL,
 };
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 616d584..5a6e4e2 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -186,7 +186,6 @@
 
 static void __init zynq_irq_init(void)
 {
-	gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND);
 	irqchip_init();
 }
 
@@ -197,8 +196,8 @@
 
 DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform")
 	/* 64KB way size, 8-way associativity, parity disabled */
-	.l2c_aux_val	= 0x00000000,
-	.l2c_aux_mask	= 0xffffffff,
+	.l2c_aux_val    = 0x00400000,
+	.l2c_aux_mask	= 0xffbfffff,
 	.smp		= smp_ops(zynq_smp_ops),
 	.map_io		= zynq_map_io,
 	.init_irq	= zynq_irq_init,
diff --git a/arch/arm/mach-zynq/headsmp.S b/arch/arm/mach-zynq/headsmp.S
index 045c727..f6d5de0 100644
--- a/arch/arm/mach-zynq/headsmp.S
+++ b/arch/arm/mach-zynq/headsmp.S
@@ -18,7 +18,7 @@
 .globl zynq_secondary_trampoline_jump
 zynq_secondary_trampoline_jump:
 	/* Space for jumping address */
-	.word	/* cpu 1 */
+	.word	0	/* cpu 1 */
 .globl zynq_secondary_trampoline_end
 zynq_secondary_trampoline_end:
 ENDPROC(zynq_secondary_trampoline)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7c6b976..df7537f 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -883,6 +883,7 @@
 
 config OUTER_CACHE_SYNC
 	bool
+	select ARM_HEAVY_MB
 	help
 	  The outer cache has a outer_cache_fns.sync function pointer
 	  that can be used to drain the write buffer of the outer cache.
@@ -1031,6 +1032,9 @@
 	  This option allows the use of custom mandatory barriers
 	  included via the mach/barriers.h file.
 
+config ARM_HEAVY_MB
+	bool
+
 config ARCH_SUPPORTS_BIG_ENDIAN
 	bool
 	help
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S
index 54473cd4..b3b31e3 100644
--- a/arch/arm/mm/abort-ev4.S
+++ b/arch/arm/mm/abort-ev4.S
@@ -19,6 +19,7 @@
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	ldr	r3, [r4]			@ read aborted ARM instruction
+	uaccess_disable ip			@ disable userspace access
 	bic	r1, r1, #1 << 11 | 1 << 10	@ clear bits 11 and 10 of FSR
 	tst	r3, #1 << 20			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 11		@ yes.
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index a0908d4..a6a381a 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -21,8 +21,10 @@
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	ldreq	r3, [r4]			@ read aborted ARM instruction
+	uaccess_disable ip			@ disable user access
 	bic	r1, r1, #1 << 11		@ clear bits 11 of FSR
-	do_ldrd_abort tmp=ip, insn=r3
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	do_DataAbort			@ yes
 	tst	r3, #1 << 20			@ check write
 	orreq	r1, r1, #1 << 11
 	b	do_DataAbort
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index 4006b7a..00ab011 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -24,7 +24,9 @@
 	bne	do_DataAbort
 	do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
 	ldreq	r3, [r4]			@ read aborted ARM instruction
-	do_ldrd_abort tmp=ip, insn=r3
+	uaccess_disable ip			@ disable userspace access
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	do_DataAbort			@ yes
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
 	b	do_DataAbort
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8c48c5c..8801a15 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,16 +26,18 @@
 	ldr	ip, =0x4107b36
 	mrc	p15, 0, r3, c0, c0, 0		@ get processor id
 	teq	ip, r3, lsr #4			@ r0 ARM1136?
-	bne	do_DataAbort
+	bne	1f
 	tst	r5, #PSR_J_BIT			@ Java?
 	tsteq	r5, #PSR_T_BIT			@ Thumb?
-	bne	do_DataAbort
+	bne	1f
 	bic	r1, r1, #1 << 11		@ clear bit 11 of FSR
 	ldr	r3, [r4]			@ read aborted ARM instruction
  ARM_BE8(rev	r3, r3)
 
-	do_ldrd_abort tmp=ip, insn=r3
+	teq_ldrd tmp=ip, insn=r3		@ insn was LDRD?
+	beq	1f				@ yes
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
 #endif
+1:	uaccess_disable ip			@ disable userspace access
 	b	do_DataAbort
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 4812ad0..e8d0e08 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -15,6 +15,7 @@
 ENTRY(v7_early_abort)
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
+	uaccess_disable ip			@ disable userspace access
 
 	/*
 	 * V6 code adjusts the returned DFSR.
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index f398258..6d8e8e3 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -26,6 +26,7 @@
 #endif
 	bne	.data_thumb_abort
 	ldr	r8, [r4]			@ read arm instruction
+	uaccess_disable ip			@ disable userspace access
 	tst	r8, #1 << 20			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 11		@ yes.
 	and	r7, r8, #15 << 24
@@ -155,6 +156,7 @@
 
 .data_thumb_abort:
 	ldrh	r8, [r4]			@ read instruction
+	uaccess_disable ip			@ disable userspace access
 	tst	r8, #1 << 11			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 8			@ yes
 	and	r7, r8, #15 << 12
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 2cbf68e..4509bee 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -13,6 +13,7 @@
 	tst	\psr, #PSR_T_BIT
 	beq	not_thumb
 	ldrh	\tmp, [\pc]			@ Read aborted Thumb instruction
+	uaccess_disable ip			@ disable userspace access
 	and	\tmp, \tmp, # 0xfe00		@ Mask opcode field
 	cmp	\tmp, # 0x5600			@ Is it ldrsb?
 	orreq	\tmp, \tmp, #1 << 11		@ Set L-bit if yes
@@ -29,12 +30,9 @@
  *   [7:4] == 1101
  *    [20] == 0
  */
-	.macro	do_ldrd_abort, tmp, insn
-	tst	\insn, #0x0e100000		@ [27:25,20] == 0
-	bne	not_ldrd
-	and	\tmp, \insn, #0x000000f0	@ [7:4] == 1101
-	cmp	\tmp, #0x000000d0
-	beq	do_DataAbort
-not_ldrd:
+	.macro	teq_ldrd, tmp, insn
+	mov	\tmp, #0x0e100000
+	orr	\tmp, #0x000000f0
+	and	\tmp, \insn, \tmp
+	teq	\tmp, #0x000000d0
 	.endm
-
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 097181e..5c1b7a7 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -368,7 +368,6 @@
 	struct device_node *node;
 	void __iomem *base;
 	bool l2_wt_override = false;
-	struct resource res;
 
 #if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
 	l2_wt_override = true;
@@ -376,10 +375,7 @@
 
 	node = of_find_matching_node(NULL, feroceon_ids);
 	if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
-		if (of_address_to_resource(node, 0, &res))
-			return -ENODEV;
-
-		base = ioremap(res.start, resource_size(&res));
+		base = of_iomap(node, 0);
 		if (!base)
 			return -ENOMEM;
 
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 71b3d33..493692d 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1171,6 +1171,11 @@
 		}
 	}
 
+	if (of_property_read_bool(np, "arm,shared-override")) {
+		*aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
+		*aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
+	}
+
 	prefetch = l2x0_saved_regs.prefetch_ctrl;
 
 	ret = of_property_read_u32(np, "arm,double-linefill", &val);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index cba12f3..bf35abc 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -39,6 +39,7 @@
 #include <asm/system_info.h>
 #include <asm/dma-contiguous.h>
 
+#include "dma.h"
 #include "mm.h"
 
 /*
@@ -648,14 +649,18 @@
 	size = PAGE_ALIGN(size);
 	want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
 
-	if (is_coherent || nommu())
+	if (nommu())
+		addr = __alloc_simple_buffer(dev, size, gfp, &page);
+	else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
+		addr = __alloc_from_contiguous(dev, size, prot, &page,
+					       caller, want_vaddr);
+	else if (is_coherent)
 		addr = __alloc_simple_buffer(dev, size, gfp, &page);
 	else if (!(gfp & __GFP_WAIT))
 		addr = __alloc_from_pool(size, &page);
-	else if (!dev_get_cma_area(dev))
-		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
 	else
-		addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
+		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
+					    caller, want_vaddr);
 
 	if (page)
 		*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -683,13 +688,12 @@
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
 	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
 {
-	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
 	void *memory;
 
 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
 		return memory;
 
-	return __dma_alloc(dev, size, handle, gfp, prot, true,
+	return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
 			   attrs, __builtin_return_address(0));
 }
 
@@ -753,12 +757,12 @@
 
 	size = PAGE_ALIGN(size);
 
-	if (is_coherent || nommu()) {
+	if (nommu()) {
 		__dma_free_buffer(page, size);
-	} else if (__free_from_pool(cpu_addr, size)) {
+	} else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
 		return;
 	} else if (!dev_get_cma_area(dev)) {
-		if (want_vaddr)
+		if (want_vaddr && !is_coherent)
 			__dma_free_remap(cpu_addr, size);
 		__dma_free_buffer(page, size);
 	} else {
@@ -1520,7 +1524,7 @@
 		return -ENOMEM;
 
 	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
-		phys_addr_t phys = page_to_phys(sg_page(s));
+		phys_addr_t phys = sg_phys(s) & PAGE_MASK;
 		unsigned int len = PAGE_ALIGN(s->offset + s->length);
 
 		if (!is_coherent &&
diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h
new file mode 100644
index 0000000..70ea6852
--- /dev/null
+++ b/arch/arm/mm/dma.h
@@ -0,0 +1,32 @@
+#ifndef DMA_H
+#define DMA_H
+
+#include <asm/glue-cache.h>
+
+#ifndef MULTI_CACHE
+#define dmac_map_area			__glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area 		__glue(_CACHE,_dma_unmap_area)
+
+/*
+ * These are private to the dma-mapping API.  Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+extern void dmac_map_area(const void *, size_t, int);
+extern void dmac_unmap_area(const void *, size_t, int);
+
+#else
+
+/*
+ * These are private to the dma-mapping API.  Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+#define dmac_map_area			cpu_cache.dma_map_area
+#define dmac_unmap_area 		cpu_cache.dma_unmap_area
+
+#endif
+
+#endif
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 34b66af..1ec8e75 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -21,6 +21,21 @@
 
 #include "mm.h"
 
+#ifdef CONFIG_ARM_HEAVY_MB
+void (*soc_mb)(void);
+
+void arm_heavy_mb(void)
+{
+#ifdef CONFIG_OUTER_CACHE_SYNC
+	if (outer_cache.sync)
+		outer_cache.sync();
+#endif
+	if (soc_mb)
+		soc_mb();
+}
+EXPORT_SYMBOL(arm_heavy_mb);
+#endif
+
 #ifdef CONFIG_CPU_CACHE_VIPT
 
 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index ee8dfa7..9df5f09 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,7 @@
 
 	type = kmap_atomic_idx_push();
 
-	idx = type + KM_TYPE_NR * smp_processor_id();
+	idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
 	vaddr = __fix_to_virt(idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
 	/*
@@ -106,7 +106,7 @@
 
 	if (kvaddr >= (void *)FIXADDR_START) {
 		type = kmap_atomic_idx();
-		idx = type + KM_TYPE_NR * smp_processor_id();
+		idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
 
 		if (cache_is_vivt())
 			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
@@ -138,7 +138,7 @@
 		return page_address(page);
 
 	type = kmap_atomic_idx_push();
-	idx = type + KM_TYPE_NR * smp_processor_id();
+	idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
 	vaddr = __fix_to_virt(idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
 	BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 870838a..7cd1514 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -291,13 +291,13 @@
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_RDONLY,
 		.prot_l1   = PMD_TYPE_TABLE,
-		.domain    = DOMAIN_USER,
+		.domain    = DOMAIN_VECTORS,
 	},
 	[MT_HIGH_VECTORS] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 				L_PTE_USER | L_PTE_RDONLY,
 		.prot_l1   = PMD_TYPE_TABLE,
-		.domain    = DOMAIN_USER,
+		.domain    = DOMAIN_VECTORS,
 	},
 	[MT_MEMORY_RWX] = {
 		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
@@ -357,6 +357,47 @@
 }
 EXPORT_SYMBOL(get_mem_type);
 
+static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
+
+static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+	__aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
+
+static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
+{
+	return &bm_pte[pte_index(addr)];
+}
+
+static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
+{
+	return pte_offset_kernel(dir, addr);
+}
+
+static inline pmd_t * __init fixmap_pmd(unsigned long addr)
+{
+	pgd_t *pgd = pgd_offset_k(addr);
+	pud_t *pud = pud_offset(pgd, addr);
+	pmd_t *pmd = pmd_offset(pud, addr);
+
+	return pmd;
+}
+
+void __init early_fixmap_init(void)
+{
+	pmd_t *pmd;
+
+	/*
+	 * The early fixmap range spans multiple pmds, for which
+	 * we are not prepared:
+	 */
+	BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
+		     != FIXADDR_TOP >> PMD_SHIFT);
+
+	pmd = fixmap_pmd(FIXADDR_TOP);
+	pmd_populate_kernel(&init_mm, pmd, bm_pte);
+
+	pte_offset_fixmap = pte_offset_early_fixmap;
+}
+
 /*
  * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
  * As a result, this can only be called with preemption disabled, as under
@@ -365,7 +406,7 @@
 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
 {
 	unsigned long vaddr = __fix_to_virt(idx);
-	pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+	pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
 
 	/* Make sure fixmap region does not exceed available allocation. */
 	BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
@@ -855,7 +896,7 @@
 	}
 
 	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
-	    md->virtual >= PAGE_OFFSET &&
+	    md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
 	    (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
 		pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
 			(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
@@ -1219,10 +1260,10 @@
 
 /*
  * Set up the device mappings.  Since we clear out the page tables for all
- * mappings above VMALLOC_START, we will remove any debug device mappings.
- * This means you have to be careful how you debug this function, or any
- * called function.  This means you can't use any function or debugging
- * method which may touch any device, otherwise the kernel _will_ crash.
+ * mappings above VMALLOC_START, except early fixmap, we might remove debug
+ * device mappings.  This means earlycon can be used to debug this function
+ * Any other function or debugging method which may touch any device _will_
+ * crash the kernel.
  */
 static void __init devicemaps_init(const struct machine_desc *mdesc)
 {
@@ -1237,7 +1278,10 @@
 
 	early_trap_init(vectors);
 
-	for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
+	/*
+	 * Clear page table except top pmd used by early fixmaps
+	 */
+	for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
 		pmd_clear(pmd_off_k(addr));
 
 	/*
@@ -1489,6 +1533,35 @@
 
 #endif
 
+static void __init early_fixmap_shutdown(void)
+{
+	int i;
+	unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
+
+	pte_offset_fixmap = pte_offset_late_fixmap;
+	pmd_clear(fixmap_pmd(va));
+	local_flush_tlb_kernel_page(va);
+
+	for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
+		pte_t *pte;
+		struct map_desc map;
+
+		map.virtual = fix_to_virt(i);
+		pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
+
+		/* Only i/o device mappings are supported ATM */
+		if (pte_none(*pte) ||
+		    (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
+			continue;
+
+		map.pfn = pte_pfn(*pte);
+		map.type = MT_DEVICE;
+		map.length = PAGE_SIZE;
+
+		create_mapping(&map);
+	}
+}
+
 /*
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
@@ -1502,6 +1575,7 @@
 	map_lowmem();
 	memblock_set_current_limit(arm_lowmem_limit);
 	dma_contiguous_remap();
+	early_fixmap_shutdown();
 	devicemaps_init(mdesc);
 	kmap_init();
 	tcm_init();
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index a3681f1..e683db1 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -84,6 +84,16 @@
 		if (!new_pte)
 			goto no_pte;
 
+#ifndef CONFIG_ARM_LPAE
+		/*
+		 * Modify the PTE pointer to have the correct domain.  This
+		 * needs to be the vectors domain to avoid the low vectors
+		 * being unmapped.
+		 */
+		pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
+		pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
+#endif
+
 		init_pud = pud_offset(init_pgd, 0);
 		init_pmd = pmd_offset(init_pud, 0);
 		init_pte = pte_offset_map(init_pmd, 0);
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index c011e22..876060b 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -857,7 +857,9 @@
 			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
 			break;
 		case BPF_ANC | SKF_AD_IFINDEX:
+		case BPF_ANC | SKF_AD_HATYPE:
 			/* A = skb->dev->ifindex */
+			/* A = skb->dev->type */
 			ctx->seen |= SEEN_SKB;
 			off = offsetof(struct sk_buff, dev);
 			emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
@@ -867,8 +869,24 @@
 
 			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
 						  ifindex) != 4);
-			off = offsetof(struct net_device, ifindex);
-			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+						  type) != 2);
+
+			if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
+				off = offsetof(struct net_device, ifindex);
+				emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+			} else {
+				/*
+				 * offset of field "type" in "struct
+				 * net_device" is above what can be
+				 * used in the ldrh rd, [rn, #imm]
+				 * instruction, so load the offset in
+				 * a register and use ldrh rd, [rn, rm]
+				 */
+				off = offsetof(struct net_device, type);
+				emit_mov_i(ARM_R3, off, ctx);
+				emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
+			}
 			break;
 		case BPF_ANC | SKF_AD_MARK:
 			ctx->seen |= SEEN_SKB;
@@ -895,6 +913,17 @@
 				OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
 			}
 			break;
+		case BPF_ANC | SKF_AD_PKTTYPE:
+			ctx->seen |= SEEN_SKB;
+			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+						  __pkt_type_offset[0]) != 1);
+			off = PKT_TYPE_OFFSET();
+			emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
+			emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
+#ifdef __BIG_ENDIAN_BITFIELD
+			emit(ARM_LSR_I(r_A, r_A, 5), ctx);
+#endif
+			break;
 		case BPF_ANC | SKF_AD_QUEUE:
 			ctx->seen |= SEEN_SKB;
 			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -904,6 +933,14 @@
 			off = offsetof(struct sk_buff, queue_mapping);
 			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
 			break;
+		case BPF_ANC | SKF_AD_PAY_OFFSET:
+			ctx->seen |= SEEN_SKB | SEEN_CALL;
+
+			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
+			emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
+			emit_blx_r(ARM_R3, ctx);
+			emit(ARM_MOV_R(r_A, ARM_R0), ctx);
+			break;
 		case BPF_LDX | BPF_W | BPF_ABS:
 			/*
 			 * load a 32bit word from struct seccomp_data.
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index b2d7d92..4b17d5ab 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -74,6 +74,7 @@
 #define ARM_INST_LDRB_I		0x05d00000
 #define ARM_INST_LDRB_R		0x07d00000
 #define ARM_INST_LDRH_I		0x01d000b0
+#define ARM_INST_LDRH_R		0x019000b0
 #define ARM_INST_LDR_I		0x05900000
 
 #define ARM_INST_LDM		0x08900000
@@ -160,6 +161,8 @@
 				 | (rm))
 #define ARM_LDRH_I(rt, rn, off)	(ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \
 				 | (((off) & 0xf0) << 4) | ((off) & 0xf))
+#define ARM_LDRH_R(rt, rn, rm)	(ARM_INST_LDRH_R | (rt) << 12 | (rn) << 16 \
+				 | (rm))
 
 #define ARM_LDM(rn, regs)	(ARM_INST_LDM | (rn) << 16 | (regs))
 
diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c
index 6ad65d8..101e8f2 100644
--- a/arch/arm/plat-iop/time.c
+++ b/arch/arm/plat-iop/time.c
@@ -77,41 +77,57 @@
 
 static unsigned long ticks_per_jiffy;
 
-static void iop_set_mode(enum clock_event_mode mode,
-			 struct clock_event_device *unused)
+static int iop_set_periodic(struct clock_event_device *evt)
 {
 	u32 tmr = read_tmr0();
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		write_tmr0(tmr & ~IOP_TMR_EN);
-		write_tcr0(ticks_per_jiffy - 1);
-		write_trr0(ticks_per_jiffy - 1);
-		tmr |= (IOP_TMR_RELOAD | IOP_TMR_EN);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* ->set_next_event sets period and enables timer */
-		tmr &= ~(IOP_TMR_RELOAD | IOP_TMR_EN);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		tmr |= IOP_TMR_EN;
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-	default:
-		tmr &= ~IOP_TMR_EN;
-		break;
-	}
+	write_tmr0(tmr & ~IOP_TMR_EN);
+	write_tcr0(ticks_per_jiffy - 1);
+	write_trr0(ticks_per_jiffy - 1);
+	tmr |= (IOP_TMR_RELOAD | IOP_TMR_EN);
 
 	write_tmr0(tmr);
+	return 0;
+}
+
+static int iop_set_oneshot(struct clock_event_device *evt)
+{
+	u32 tmr = read_tmr0();
+
+	/* ->set_next_event sets period and enables timer */
+	tmr &= ~(IOP_TMR_RELOAD | IOP_TMR_EN);
+	write_tmr0(tmr);
+	return 0;
+}
+
+static int iop_shutdown(struct clock_event_device *evt)
+{
+	u32 tmr = read_tmr0();
+
+	tmr &= ~IOP_TMR_EN;
+	write_tmr0(tmr);
+	return 0;
+}
+
+static int iop_resume(struct clock_event_device *evt)
+{
+	u32 tmr = read_tmr0();
+
+	tmr |= IOP_TMR_EN;
+	write_tmr0(tmr);
+	return 0;
 }
 
 static struct clock_event_device iop_clockevent = {
-	.name		= "iop_timer0",
-	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.rating         = 300,
-	.set_next_event	= iop_set_next_event,
-	.set_mode	= iop_set_mode,
+	.name			= "iop_timer0",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.rating			= 300,
+	.set_next_event		= iop_set_next_event,
+	.set_state_shutdown	= iop_shutdown,
+	.set_state_periodic	= iop_set_periodic,
+	.tick_resume		= iop_resume,
+	.set_state_oneshot	= iop_set_oneshot,
 };
 
 static irqreturn_t
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index 5168a52..79c33ec 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -407,9 +407,9 @@
 	return 0;
 }
 
-static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
 {
-	struct orion_gpio_chip *ochip = irq_get_handler_data(irq);
+	struct orion_gpio_chip *ochip = irq_desc_get_handler_data(desc);
 	u32 cause, type;
 	int i;
 
@@ -582,8 +582,9 @@
 
 	for (i = 0; i < 4; i++) {
 		if (irqs[i]) {
-			irq_set_handler_data(irqs[i], ochip);
-			irq_set_chained_handler(irqs[i], gpio_irq_handler);
+			irq_set_chained_handler_and_data(irqs[i],
+							 gpio_irq_handler,
+							 ochip);
 		}
 	}
 
diff --git a/arch/arm/plat-orion/time.c b/arch/arm/plat-orion/time.c
index 261258f..8085a8a 100644
--- a/arch/arm/plat-orion/time.c
+++ b/arch/arm/plat-orion/time.c
@@ -106,60 +106,63 @@
 	return 0;
 }
 
-static void
-orion_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
+static int orion_clkevt_shutdown(struct clock_event_device *evt)
 {
 	unsigned long flags;
 	u32 u;
 
 	local_irq_save(flags);
-	if (mode == CLOCK_EVT_MODE_PERIODIC) {
-		/*
-		 * Setup timer to fire at 1/HZ intervals.
-		 */
-		writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD_OFF);
-		writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL_OFF);
 
-		/*
-		 * Enable timer interrupt.
-		 */
-		u = readl(bridge_base + BRIDGE_MASK_OFF);
-		writel(u | BRIDGE_INT_TIMER1, bridge_base + BRIDGE_MASK_OFF);
+	/* Disable timer */
+	u = readl(timer_base + TIMER_CTRL_OFF);
+	writel(u & ~TIMER1_EN, timer_base + TIMER_CTRL_OFF);
 
-		/*
-		 * Enable timer.
-		 */
-		u = readl(timer_base + TIMER_CTRL_OFF);
-		writel(u | TIMER1_EN | TIMER1_RELOAD_EN,
-		       timer_base + TIMER_CTRL_OFF);
-	} else {
-		/*
-		 * Disable timer.
-		 */
-		u = readl(timer_base + TIMER_CTRL_OFF);
-		writel(u & ~TIMER1_EN, timer_base + TIMER_CTRL_OFF);
+	/* Disable timer interrupt */
+	u = readl(bridge_base + BRIDGE_MASK_OFF);
+	writel(u & ~BRIDGE_INT_TIMER1, bridge_base + BRIDGE_MASK_OFF);
 
-		/*
-		 * Disable timer interrupt.
-		 */
-		u = readl(bridge_base + BRIDGE_MASK_OFF);
-		writel(u & ~BRIDGE_INT_TIMER1, bridge_base + BRIDGE_MASK_OFF);
+	/* ACK pending timer interrupt */
+	writel(bridge_timer1_clr_mask, bridge_base + BRIDGE_CAUSE_OFF);
 
-		/*
-		 * ACK pending timer interrupt.
-		 */
-		writel(bridge_timer1_clr_mask, bridge_base + BRIDGE_CAUSE_OFF);
-
-	}
 	local_irq_restore(flags);
+
+	return 0;
+}
+
+static int orion_clkevt_set_periodic(struct clock_event_device *evt)
+{
+	unsigned long flags;
+	u32 u;
+
+	local_irq_save(flags);
+
+	/* Setup timer to fire at 1/HZ intervals */
+	writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD_OFF);
+	writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL_OFF);
+
+	/* Enable timer interrupt */
+	u = readl(bridge_base + BRIDGE_MASK_OFF);
+	writel(u | BRIDGE_INT_TIMER1, bridge_base + BRIDGE_MASK_OFF);
+
+	/* Enable timer */
+	u = readl(timer_base + TIMER_CTRL_OFF);
+	writel(u | TIMER1_EN | TIMER1_RELOAD_EN, timer_base + TIMER_CTRL_OFF);
+
+	local_irq_restore(flags);
+
+	return 0;
 }
 
 static struct clock_event_device orion_clkevt = {
-	.name		= "orion_tick",
-	.features	= CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
-	.rating		= 300,
-	.set_next_event	= orion_clkevt_next_event,
-	.set_mode	= orion_clkevt_mode,
+	.name			= "orion_tick",
+	.features		= CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_PERIODIC,
+	.rating			= 300,
+	.set_next_event		= orion_clkevt_next_event,
+	.set_state_shutdown	= orion_clkevt_shutdown,
+	.set_state_periodic	= orion_clkevt_set_periodic,
+	.set_state_oneshot	= orion_clkevt_shutdown,
+	.tick_resume		= orion_clkevt_shutdown,
 };
 
 static irqreturn_t orion_timer_interrupt(int irq, void *dev_id)
diff --git a/arch/arm/plat-pxa/dma.c b/arch/arm/plat-pxa/dma.c
index d92f07f..de2b061 100644
--- a/arch/arm/plat-pxa/dma.c
+++ b/arch/arm/plat-pxa/dma.c
@@ -289,7 +289,8 @@
 		/* try grabbing a DMA channel with the requested priority */
 		for (i = 0; i < num_dma_channels; i++) {
 			if ((dma_channels[i].prio == prio) &&
-			    !dma_channels[i].name) {
+			    !dma_channels[i].name &&
+			    !pxad_toggle_reserved_channel(i)) {
 				found = 1;
 				break;
 			}
@@ -326,13 +327,14 @@
 	local_irq_save(flags);
 	DCSR(dma_ch) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
 	dma_channels[dma_ch].name = NULL;
+	pxad_toggle_reserved_channel(dma_ch);
 	local_irq_restore(flags);
 }
 EXPORT_SYMBOL(pxa_free_dma);
 
 static irqreturn_t dma_irq_handler(int irq, void *dev_id)
 {
-	int i, dint = DINT;
+	int i, dint = DINT, done = 0;
 	struct dma_channel *channel;
 
 	while (dint) {
@@ -341,16 +343,13 @@
 		channel = &dma_channels[i];
 		if (channel->name && channel->irq_handler) {
 			channel->irq_handler(i, channel->data);
-		} else {
-			/*
-			 * IRQ for an unregistered DMA channel:
-			 * let's clear the interrupts and disable it.
-			 */
-			printk (KERN_WARNING "spurious IRQ for DMA channel %d\n", i);
-			DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
+			done++;
 		}
 	}
-	return IRQ_HANDLED;
+	if (done)
+		return IRQ_HANDLED;
+	else
+		return IRQ_NONE;
 }
 
 int __init pxa_init_dma(int irq, int num_ch)
@@ -372,7 +371,8 @@
 		spin_lock_init(&dma_channels[i].lock);
 	}
 
-	ret = request_irq(irq, dma_irq_handler, 0, "DMA", NULL);
+	ret = request_irq(irq, dma_irq_handler, IRQF_SHARED, "DMA",
+			  dma_channels);
 	if (ret) {
 		printk (KERN_CRIT "Wow!  Can't register IRQ for DMA\n");
 		kfree(dma_channels);
diff --git a/arch/arm/plat-pxa/include/plat/dma.h b/arch/arm/plat-pxa/include/plat/dma.h
index a7b91dc..28848b3 100644
--- a/arch/arm/plat-pxa/include/plat/dma.h
+++ b/arch/arm/plat-pxa/include/plat/dma.h
@@ -82,4 +82,19 @@
 
 void pxa_free_dma (int dma_ch);
 
+/*
+ * Cooperation with pxa_dma + dmaengine while there remains at least one pxa
+ * driver not converted to dmaengine.
+ */
+#if defined(CONFIG_PXA_DMA)
+extern int pxad_toggle_reserved_channel(int legacy_channel);
+#else
+static inline int pxad_toggle_reserved_channel(int legacy_channel)
+{
+	return 0;
+}
+#endif
+
+extern void __init pxa2xx_set_dmac_info(int nb_channels);
+
 #endif /* __PLAT_DMA_H */
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index cb8e3d6..57729b9 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -217,12 +217,6 @@
 	help
 	  Compile in platform device definition for PWM Timer
 
-config SAMSUNG_DEV_BACKLIGHT
-	bool
-	depends on SAMSUNG_DEV_PWM
-	help
-	  Compile in platform device definition LCD backlight with PWM Timer
-
 config S3C24XX_PWM
 	bool "PWM device support"
 	select PWM
@@ -231,25 +225,14 @@
 	  Support for exporting the PWM timer blocks via the pwm device
 	  system
 
-config S3C_SETUP_CAMIF
-	bool
-	help
-	  Compile in common setup code for S3C CAMIF devices
-
 config SAMSUNG_PM_GPIO
 	bool
 	default y if GPIO_SAMSUNG && PM
 	help
 	  Include legacy GPIO power management code for platforms not using
 	  pinctrl-samsung driver.
-
 endif
 
-config S5P_DEV_MFC
-	bool
-	help
-	  Compile in setup memory (init) code for MFC
-
 comment "Power management"
 
 config SAMSUNG_PM_DEBUG
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 1a29ab1..8c91176 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -20,11 +20,6 @@
 
 obj-$(CONFIG_SAMSUNG_ATAGS)	+= devs.o
 obj-$(CONFIG_SAMSUNG_ATAGS)	+= dev-uart.o
-obj-$(CONFIG_S5P_DEV_MFC)	+= s5p-dev-mfc.o
-
-obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT)	+= dev-backlight.o
-
-obj-$(CONFIG_S3C_SETUP_CAMIF)	+= setup-camif.o
 
 # PM support
 
diff --git a/arch/arm/plat-samsung/dev-backlight.c b/arch/arm/plat-samsung/dev-backlight.c
deleted file mode 100644
index 2157c5b..0000000
--- a/arch/arm/plat-samsung/dev-backlight.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/* linux/arch/arm/plat-samsung/dev-backlight.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- *              http://www.samsung.com
- *
- * Common infrastructure for PWM Backlight for Samsung boards
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/gpio.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/pwm_backlight.h>
-
-#include <plat/devs.h>
-#include <plat/gpio-cfg.h>
-#include <plat/backlight.h>
-
-struct samsung_bl_drvdata {
-	struct platform_pwm_backlight_data plat_data;
-	struct samsung_bl_gpio_info *gpio_info;
-};
-
-static int samsung_bl_init(struct device *dev)
-{
-	int ret = 0;
-	struct platform_pwm_backlight_data *pdata = dev->platform_data;
-	struct samsung_bl_drvdata *drvdata = container_of(pdata,
-					struct samsung_bl_drvdata, plat_data);
-	struct samsung_bl_gpio_info *bl_gpio_info = drvdata->gpio_info;
-
-	ret = gpio_request(bl_gpio_info->no, "Backlight");
-	if (ret) {
-		printk(KERN_ERR "failed to request GPIO for LCD Backlight\n");
-		return ret;
-	}
-
-	/* Configure GPIO pin with specific GPIO function for PWM timer */
-	s3c_gpio_cfgpin(bl_gpio_info->no, bl_gpio_info->func);
-
-	return 0;
-}
-
-static void samsung_bl_exit(struct device *dev)
-{
-	struct platform_pwm_backlight_data *pdata = dev->platform_data;
-	struct samsung_bl_drvdata *drvdata = container_of(pdata,
-					struct samsung_bl_drvdata, plat_data);
-	struct samsung_bl_gpio_info *bl_gpio_info = drvdata->gpio_info;
-
-	s3c_gpio_cfgpin(bl_gpio_info->no, S3C_GPIO_OUTPUT);
-	gpio_free(bl_gpio_info->no);
-}
-
-/* Initialize few important fields of platform_pwm_backlight_data
- * structure with default values. These fields can be overridden by
- * board-specific values sent from machine file.
- * For ease of operation, these fields are initialized with values
- * used by most samsung boards.
- * Users has the option of sending info about other parameters
- * for their specific boards
- */
-
-static struct samsung_bl_drvdata samsung_dfl_bl_data __initdata = {
-	.plat_data = {
-		.max_brightness = 255,
-		.dft_brightness = 255,
-		.pwm_period_ns  = 78770,
-		.enable_gpio    = -1,
-		.init           = samsung_bl_init,
-		.exit           = samsung_bl_exit,
-	},
-};
-
-static struct platform_device samsung_dfl_bl_device __initdata = {
-	.name		= "pwm-backlight",
-};
-
-/* samsung_bl_set - Set board specific data (if any) provided by user for
- * PWM Backlight control and register specific PWM and backlight device.
- * @gpio_info:	structure containing GPIO info for PWM timer
- * @bl_data:	structure containing Backlight control data
- */
-void __init samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
-	struct platform_pwm_backlight_data *bl_data)
-{
-	int ret = 0;
-	struct platform_device *samsung_bl_device;
-	struct samsung_bl_drvdata *samsung_bl_drvdata;
-	struct platform_pwm_backlight_data *samsung_bl_data;
-
-	samsung_bl_device = kmemdup(&samsung_dfl_bl_device,
-			sizeof(struct platform_device), GFP_KERNEL);
-	if (!samsung_bl_device) {
-		printk(KERN_ERR "%s: no memory for platform dev\n", __func__);
-		return;
-	}
-
-	samsung_bl_drvdata = kmemdup(&samsung_dfl_bl_data,
-				sizeof(samsung_dfl_bl_data), GFP_KERNEL);
-	if (!samsung_bl_drvdata) {
-		printk(KERN_ERR "%s: no memory for platform dev\n", __func__);
-		goto err_data;
-	}
-	samsung_bl_device->dev.platform_data = &samsung_bl_drvdata->plat_data;
-	samsung_bl_drvdata->gpio_info = gpio_info;
-	samsung_bl_data = &samsung_bl_drvdata->plat_data;
-
-	/* Copy board specific data provided by user */
-	samsung_bl_data->pwm_id = bl_data->pwm_id;
-	samsung_bl_device->dev.parent = &samsung_device_pwm.dev;
-
-	if (bl_data->max_brightness)
-		samsung_bl_data->max_brightness = bl_data->max_brightness;
-	if (bl_data->dft_brightness)
-		samsung_bl_data->dft_brightness = bl_data->dft_brightness;
-	if (bl_data->lth_brightness)
-		samsung_bl_data->lth_brightness = bl_data->lth_brightness;
-	if (bl_data->pwm_period_ns)
-		samsung_bl_data->pwm_period_ns = bl_data->pwm_period_ns;
-	if (bl_data->enable_gpio >= 0)
-		samsung_bl_data->enable_gpio = bl_data->enable_gpio;
-	if (bl_data->init)
-		samsung_bl_data->init = bl_data->init;
-	if (bl_data->notify)
-		samsung_bl_data->notify = bl_data->notify;
-	if (bl_data->notify_after)
-		samsung_bl_data->notify_after = bl_data->notify_after;
-	if (bl_data->exit)
-		samsung_bl_data->exit = bl_data->exit;
-	if (bl_data->check_fb)
-		samsung_bl_data->check_fb = bl_data->check_fb;
-
-	/* Register the Backlight dev */
-	ret = platform_device_register(samsung_bl_device);
-	if (ret) {
-		printk(KERN_ERR "failed to register backlight device: %d\n", ret);
-		goto err_plat_reg2;
-	}
-
-	return;
-
-err_plat_reg2:
-	kfree(samsung_bl_data);
-err_data:
-	kfree(samsung_bl_device);
-	return;
-}
diff --git a/arch/arm/plat-samsung/include/plat/ata-core.h b/arch/arm/plat-samsung/include/plat/ata-core.h
deleted file mode 100644
index f5a4ec7..0000000
--- a/arch/arm/plat-samsung/include/plat/ata-core.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/ata-core.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * Samsung CF-ATA Controller core functions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_PLAT_ATA_CORE_H
-#define __ASM_PLAT_ATA_CORE_H __FILE__
-
-/* These functions are only for use with the core support code, such as
- * the cpu specific initialisation code
-*/
-
-/* re-define device name depending on support. */
-static inline void s3c_cfcon_setname(char *name)
-{
-#ifdef CONFIG_SAMSUNG_DEV_IDE
-	s3c_device_cfcon.name = name;
-#endif
-}
-
-#endif /* __ASM_PLAT_ATA_CORE_H */
diff --git a/arch/arm/plat-samsung/include/plat/backlight.h b/arch/arm/plat-samsung/include/plat/backlight.h
deleted file mode 100644
index ad530c7..0000000
--- a/arch/arm/plat-samsung/include/plat/backlight.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/backlight.h
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- *              http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_PLAT_BACKLIGHT_H
-#define __ASM_PLAT_BACKLIGHT_H __FILE__
-
-/* samsung_bl_gpio_info - GPIO info for PWM Backlight control
- * @no:		GPIO number for PWM timer out
- * @func:	Special function of GPIO line for PWM timer
- */
-struct samsung_bl_gpio_info {
-	int no;
-	int func;
-};
-
-extern void __init samsung_bl_set(struct samsung_bl_gpio_info *gpio_info,
-	struct platform_pwm_backlight_data *bl_data);
-
-#endif /* __ASM_PLAT_BACKLIGHT_H */
diff --git a/arch/arm/plat-samsung/include/plat/fb-core.h b/arch/arm/plat-samsung/include/plat/fb-core.h
deleted file mode 100644
index bca383e..0000000
--- a/arch/arm/plat-samsung/include/plat/fb-core.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * arch/arm/plat-samsung/include/plat/fb-core.h
- *
- * Copyright 2010 Samsung Electronics Co., Ltd.
- *	Pawel Osciak <p.osciak@samsung.com>
- *
- * Samsung framebuffer driver core functions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ASM_PLAT_FB_CORE_H
-#define __ASM_PLAT_FB_CORE_H __FILE__
-
-/*
- * These functions are only for use with the core support code, such as
- * the CPU-specific initialization code.
- */
-
-/* Re-define device name depending on support. */
-static inline void s3c_fb_setname(char *name)
-{
-#ifdef CONFIG_S3C_DEV_FB
-	s3c_device_fb.name = name;
-#endif
-}
-
-#endif /* __ASM_PLAT_FB_CORE_H */
diff --git a/arch/arm/plat-samsung/include/plat/irq-uart.h b/arch/arm/plat-samsung/include/plat/irq-uart.h
deleted file mode 100644
index a9331e4..0000000
--- a/arch/arm/plat-samsung/include/plat/irq-uart.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/plat-samsung/include/plat/irq-uart.h
- *
- * Copyright (c) 2010 Simtec Electronics
- *	Ben Dooks <ben@simtec.co.uk>
- *
- * Header file for Samsung SoC UART IRQ demux for S3C64XX and later
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-struct s3c_uart_irq {
-	void __iomem	*regs;
-	unsigned int	 base_irq;
-	unsigned int	 parent_irq;
-};
-
-extern void s3c_init_uart_irqs(struct s3c_uart_irq *irq, unsigned int nr_irqs);
-
diff --git a/arch/arm/plat-samsung/include/plat/keypad-core.h b/arch/arm/plat-samsung/include/plat/keypad-core.h
deleted file mode 100644
index d513e1b..0000000
--- a/arch/arm/plat-samsung/include/plat/keypad-core.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * linux/arch/arm/plat-samsung/include/plat/keypad-core.h
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * Samsung keypad controller core function
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#ifndef __ASM_ARCH_KEYPAD_CORE_H
-#define __ASM_ARCH_KEYPAD_CORE_H
-
-/* These function are only for use with the core support code, such as
- * the cpu specific initialisation code
- */
-
-/* re-define device name depending on support. */
-static inline void samsung_keypad_setname(char *name)
-{
-#ifdef CONFIG_SAMSUNG_DEV_KEYPAD
-	samsung_device_keypad.name = name;
-#endif
-}
-
-#endif /* __ASM_ARCH_KEYPAD_CORE_H */
diff --git a/arch/arm/plat-samsung/include/plat/nand-core.h b/arch/arm/plat-samsung/include/plat/nand-core.h
deleted file mode 100644
index 6de2078..0000000
--- a/arch/arm/plat-samsung/include/plat/nand-core.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* arch/arm/plat-samsung/include/plat/nand-core.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * S3C -  Nand Controller core functions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_NAND_CORE_H
-#define __ASM_ARCH_NAND_CORE_H __FILE__
-
-/* These functions are only for use with the core support code, such as
- * the cpu specific initialisation code
- */
-
-/* re-define device name depending on support. */
-static inline void s3c_nand_setname(char *name)
-{
-#ifdef CONFIG_S3C_DEV_NAND
-	s3c_device_nand.name = name;
-#endif
-}
-
-#endif /* __ASM_ARCH_NAND_CORE_H */
diff --git a/arch/arm/plat-samsung/include/plat/onenand-core.h b/arch/arm/plat-samsung/include/plat/onenand-core.h
deleted file mode 100644
index 7701cb7..0000000
--- a/arch/arm/plat-samsung/include/plat/onenand-core.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * linux/arch/arm/plat-samsung/onenand-core.h
- *
- *  Copyright (c) 2010 Samsung Electronics
- *  Kyungmin Park <kyungmin.park@samsung.com>
- *  Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * Samsung OneNAD Controller core functions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_ONENAND_CORE_H
-#define __ASM_ARCH_ONENAND_CORE_H __FILE__
-
-/* These functions are only for use with the core support code, such as
- * the cpu specific initialisation code
- */
-
-/* re-define device name depending on support. */
-static inline void s3c_onenand_setname(char *name)
-{
-#ifdef CONFIG_S3C_DEV_ONENAND
-	s3c_device_onenand.name = name;
-#endif
-}
-
-static inline void s3c64xx_onenand1_setname(char *name)
-{
-#ifdef CONFIG_S3C64XX_DEV_ONENAND1
-	s3c64xx_device_onenand1.name = name;
-#endif
-}
-
-#endif /* __ASM_ARCH_ONENAND_CORE_H */
diff --git a/arch/arm/plat-samsung/include/plat/regs-srom.h b/arch/arm/plat-samsung/include/plat/regs-srom.h
deleted file mode 100644
index 9b6729c..0000000
--- a/arch/arm/plat-samsung/include/plat/regs-srom.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/regs-srom.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com
- *
- * S5P SROMC register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_SAMSUNG_REGS_SROM_H
-#define __PLAT_SAMSUNG_REGS_SROM_H __FILE__
-
-#include <mach/map.h>
-
-#define S5P_SROMREG(x)		(S5P_VA_SROMC + (x))
-
-#define S5P_SROM_BW		S5P_SROMREG(0x0)
-#define S5P_SROM_BC0		S5P_SROMREG(0x4)
-#define S5P_SROM_BC1		S5P_SROMREG(0x8)
-#define S5P_SROM_BC2		S5P_SROMREG(0xc)
-#define S5P_SROM_BC3		S5P_SROMREG(0x10)
-#define S5P_SROM_BC4		S5P_SROMREG(0x14)
-#define S5P_SROM_BC5		S5P_SROMREG(0x18)
-
-/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
-
-#define S5P_SROM_BW__DATAWIDTH__SHIFT		0
-#define S5P_SROM_BW__ADDRMODE__SHIFT		1
-#define S5P_SROM_BW__WAITENABLE__SHIFT		2
-#define S5P_SROM_BW__BYTEENABLE__SHIFT		3
-
-#define S5P_SROM_BW__CS_MASK			0xf
-
-#define S5P_SROM_BW__NCS0__SHIFT		0
-#define S5P_SROM_BW__NCS1__SHIFT		4
-#define S5P_SROM_BW__NCS2__SHIFT		8
-#define S5P_SROM_BW__NCS3__SHIFT		12
-#define S5P_SROM_BW__NCS4__SHIFT		16
-#define S5P_SROM_BW__NCS5__SHIFT		20
-
-/* applies to same to BCS0 - BCS3 */
-
-#define S5P_SROM_BCX__PMC__SHIFT		0
-#define S5P_SROM_BCX__TACP__SHIFT		4
-#define S5P_SROM_BCX__TCAH__SHIFT		8
-#define S5P_SROM_BCX__TCOH__SHIFT		12
-#define S5P_SROM_BCX__TACC__SHIFT		16
-#define S5P_SROM_BCX__TCOS__SHIFT		24
-#define S5P_SROM_BCX__TACS__SHIFT		28
-
-#endif /* __PLAT_SAMSUNG_REGS_SROM_H */
diff --git a/arch/arm/plat-samsung/include/plat/regs-usb-hsotg-phy.h b/arch/arm/plat-samsung/include/plat/regs-usb-hsotg-phy.h
deleted file mode 100644
index fcf2796..0000000
--- a/arch/arm/plat-samsung/include/plat/regs-usb-hsotg-phy.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* arch/arm/plat-s3c/include/plat/regs-usb-hsotg-phy.h
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- *      http://armlinux.simtec.co.uk/
- *      Ben Dooks <ben@simtec.co.uk>
- *
- * S3C - USB2.0 Highspeed/OtG device PHY registers
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-/* Note, this is a separate header file as some of the clock framework
- * needs to touch this if the clk_48m is used as the USB OHCI or other
- * peripheral source.
-*/
-
-#ifndef __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H
-#define __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H __FILE__
-
-/* S3C64XX_PA_USB_HSPHY */
-
-#define S3C_HSOTG_PHYREG(x)	((x) + S3C_VA_USB_HSPHY)
-
-#define S3C_PHYPWR				S3C_HSOTG_PHYREG(0x00)
-#define S3C_PHYPWR_NORMAL_MASK			(0x19 << 0)
-#define S3C_PHYPWR_OTG_DISABLE			(1 << 4)
-#define S3C_PHYPWR_ANALOG_POWERDOWN		(1 << 3)
-#define SRC_PHYPWR_FORCE_SUSPEND		(1 << 1)
-
-#define S3C_PHYCLK				S3C_HSOTG_PHYREG(0x04)
-#define S3C_PHYCLK_MODE_USB11			(1 << 6)
-#define S3C_PHYCLK_EXT_OSC			(1 << 5)
-#define S3C_PHYCLK_CLK_FORCE			(1 << 4)
-#define S3C_PHYCLK_ID_PULL			(1 << 2)
-#define S3C_PHYCLK_CLKSEL_MASK			(0x3 << 0)
-#define S3C_PHYCLK_CLKSEL_SHIFT			(0)
-#define S3C_PHYCLK_CLKSEL_48M			(0x0 << 0)
-#define S3C_PHYCLK_CLKSEL_12M			(0x2 << 0)
-#define S3C_PHYCLK_CLKSEL_24M			(0x3 << 0)
-
-#define S3C_RSTCON				S3C_HSOTG_PHYREG(0x08)
-#define S3C_RSTCON_PHYCLK			(1 << 2)
-#define S3C_RSTCON_HCLK				(1 << 1)
-#define S3C_RSTCON_PHY				(1 << 0)
-
-#define S3C_PHYTUNE				S3C_HSOTG_PHYREG(0x20)
-
-#endif /* __PLAT_S3C64XX_REGS_USB_HSOTG_PHY_H */
diff --git a/arch/arm/plat-samsung/include/plat/watchdog-reset.h b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
deleted file mode 100644
index 0386b8f..0000000
--- a/arch/arm/plat-samsung/include/plat/watchdog-reset.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/plat-s3c/include/plat/watchdog-reset.h
- *
- * Copyright (c) 2008 Simtec Electronics
- *	Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 - System define for arch_reset() function
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_SAMSUNG_WATCHDOG_RESET_H
-#define __PLAT_SAMSUNG_WATCHDOG_RESET_H
-
-extern void samsung_wdt_reset(void);
-extern void samsung_wdt_reset_of_init(void);
-extern void samsung_wdt_reset_init(void __iomem *base);
-
-#endif /* __PLAT_SAMSUNG_WATCHDOG_RESET_H */
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 318175f..7d95663 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -20,6 +20,7 @@
 	select ARM_GIC_V2M if PCI_MSI
 	select ARM_GIC_V3
 	select ARM_GIC_V3_ITS if PCI_MSI
+	select ARM_PSCI_FW
 	select BUILDTIME_EXTABLE_SORT
 	select CLONE_BACKWARDS
 	select COMMON_CLK
@@ -28,7 +29,7 @@
 	select EDAC_SUPPORT
 	select GENERIC_ALLOCATOR
 	select GENERIC_CLOCKEVENTS
-	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+	select GENERIC_CLOCKEVENTS_BROADCAST
 	select GENERIC_CPU_AUTOPROBE
 	select GENERIC_EARLY_IOREMAP
 	select GENERIC_IRQ_PROBE
@@ -53,6 +54,7 @@
 	select HAVE_C_RECORDMCOUNT
 	select HAVE_CC_STACKPROTECTOR
 	select HAVE_CMPXCHG_DOUBLE
+	select HAVE_CMPXCHG_LOCAL
 	select HAVE_DEBUG_BUGVERBOSE
 	select HAVE_DEBUG_KMEMLEAK
 	select HAVE_DMA_API_DEBUG
@@ -104,6 +106,10 @@
 config STACKTRACE_SUPPORT
 	def_bool y
 
+config ILLEGAL_POINTER_VALUE
+	hex
+	default 0xdead000000000000
+
 config LOCKDEP_SUPPORT
 	def_bool y
 
@@ -113,6 +119,14 @@
 config RWSEM_XCHGADD_ALGORITHM
 	def_bool y
 
+config GENERIC_BUG
+	def_bool y
+	depends on BUG
+
+config GENERIC_BUG_RELATIVE_POINTERS
+	def_bool y
+	depends on GENERIC_BUG
+
 config GENERIC_HWEIGHT
 	def_bool y
 
@@ -137,6 +151,9 @@
 config NEED_SG_DMA_LENGTH
 	def_bool y
 
+config SMP
+	def_bool y
+
 config SWIOTLB
 	def_bool y
 
@@ -160,110 +177,7 @@
 
 source "kernel/Kconfig.freezer"
 
-menu "Platform selection"
-
-config ARCH_EXYNOS
-	bool
-	help
-	  This enables support for Samsung Exynos SoC family
-
-config ARCH_EXYNOS7
-	bool "ARMv8 based Samsung Exynos7"
-	select ARCH_EXYNOS
-	select COMMON_CLK_SAMSUNG
-	select HAVE_S3C2410_WATCHDOG if WATCHDOG
-	select HAVE_S3C_RTC if RTC_CLASS
-	select PINCTRL
-	select PINCTRL_EXYNOS
-
-	help
-	  This enables support for Samsung Exynos7 SoC family
-
-config ARCH_FSL_LS2085A
-	bool "Freescale LS2085A SOC"
-	help
-	  This enables support for Freescale LS2085A SOC.
-
-config ARCH_HISI
-	bool "Hisilicon SoC Family"
-	help
-	  This enables support for Hisilicon ARMv8 SoC family
-
-config ARCH_MEDIATEK
-	bool "Mediatek MT65xx & MT81xx ARMv8 SoC"
-	select ARM_GIC
-	select PINCTRL
-	help
-	  Support for Mediatek MT65xx & MT81xx ARMv8 SoCs
-
-config ARCH_QCOM
-	bool "Qualcomm Platforms"
-	select PINCTRL
-	help
-	  This enables support for the ARMv8 based Qualcomm chipsets.
-
-config ARCH_SEATTLE
-	bool "AMD Seattle SoC Family"
-	help
-	  This enables support for AMD Seattle SOC Family
-
-config ARCH_TEGRA
-	bool "NVIDIA Tegra SoC Family"
-	select ARCH_HAS_RESET_CONTROLLER
-	select ARCH_REQUIRE_GPIOLIB
-	select CLKDEV_LOOKUP
-	select CLKSRC_MMIO
-	select CLKSRC_OF
-	select GENERIC_CLOCKEVENTS
-	select HAVE_CLK
-	select PINCTRL
-	select RESET_CONTROLLER
-	help
-	  This enables support for the NVIDIA Tegra SoC family.
-
-config ARCH_TEGRA_132_SOC
-	bool "NVIDIA Tegra132 SoC"
-	depends on ARCH_TEGRA
-	select PINCTRL_TEGRA124
-	select USB_ULPI if USB_PHY
-	select USB_ULPI_VIEWPORT if USB_PHY
-	help
-	  Enable support for NVIDIA Tegra132 SoC, based on the Denver
-	  ARMv8 CPU.  The Tegra132 SoC is similar to the Tegra124 SoC,
-	  but contains an NVIDIA Denver CPU complex in place of
-	  Tegra124's "4+1" Cortex-A15 CPU complex.
-
-config ARCH_SPRD
-	bool "Spreadtrum SoC platform"
-	help
-	  Support for Spreadtrum ARM based SoCs
-
-config ARCH_THUNDER
-	bool "Cavium Inc. Thunder SoC Family"
-	help
-	  This enables support for Cavium's Thunder Family of SoCs.
-
-config ARCH_VEXPRESS
-	bool "ARMv8 software model (Versatile Express)"
-	select ARCH_REQUIRE_GPIOLIB
-	select COMMON_CLK_VERSATILE
-	select POWER_RESET_VEXPRESS
-	select VEXPRESS_CONFIG
-	help
-	  This enables support for the ARMv8 software model (Versatile
-	  Express).
-
-config ARCH_XGENE
-	bool "AppliedMicro X-Gene SOC Family"
-	help
-	  This enables support for AppliedMicro X-Gene SOC Family
-
-config ARCH_ZYNQMP
-	bool "Xilinx ZynqMP Family"
-	help
-	  This enables support for Xilinx ZynqMP Family
-
-endmenu
+source "arch/arm64/Kconfig.platforms"
 
 menu "Bus support"
 
@@ -474,22 +388,8 @@
        help
          Say Y if you plan on running a kernel in big-endian mode.
 
-config SMP
-	bool "Symmetric Multi-Processing"
-	help
-	  This enables support for systems with more than one CPU.  If
-	  you say N here, the kernel will run on single and
-	  multiprocessor machines, but will use only one CPU of a
-	  multiprocessor machine. If you say Y here, the kernel will run
-	  on many, but not all, single processor machines. On a single
-	  processor machine, the kernel will run faster if you say N
-	  here.
-
-	  If you don't know what to do here, say N.
-
 config SCHED_MC
 	bool "Multi-core scheduler support"
-	depends on SMP
 	help
 	  Multi-core scheduler support improves the CPU scheduler's decision
 	  making when dealing with multi-core CPU chips at a cost of slightly
@@ -497,7 +397,6 @@
 
 config SCHED_SMT
 	bool "SMT scheduler support"
-	depends on SMP
 	help
 	  Improves the CPU scheduler's decision making when dealing with
 	  MultiThreading at a cost of slightly increased overhead in some
@@ -506,23 +405,17 @@
 config NR_CPUS
 	int "Maximum number of CPUs (2-4096)"
 	range 2 4096
-	depends on SMP
 	# These have to remain sorted largest to smallest
 	default "64"
 
 config HOTPLUG_CPU
 	bool "Support for hot-pluggable CPUs"
-	depends on SMP
 	help
 	  Say Y here to experiment with turning CPUs off and on.  CPUs
 	  can be controlled through /sys/devices/system/cpu.
 
 source kernel/Kconfig.preempt
 
-config UP_LATE_INIT
-       def_bool y
-       depends on !SMP
-
 config HZ
 	int
 	default 100
@@ -664,6 +557,53 @@
 	  If unsure, say Y
 endif
 
+menu "ARMv8.1 architectural features"
+
+config ARM64_HW_AFDBM
+	bool "Support for hardware updates of the Access and Dirty page flags"
+	default y
+	help
+	  The ARMv8.1 architecture extensions introduce support for
+	  hardware updates of the access and dirty information in page
+	  table entries. When enabled in TCR_EL1 (HA and HD bits) on
+	  capable processors, accesses to pages with PTE_AF cleared will
+	  set this bit instead of raising an access flag fault.
+	  Similarly, writes to read-only pages with the DBM bit set will
+	  clear the read-only bit (AP[2]) instead of raising a
+	  permission fault.
+
+	  Kernels built with this configuration option enabled continue
+	  to work on pre-ARMv8.1 hardware and the performance impact is
+	  minimal. If unsure, say Y.
+
+config ARM64_PAN
+	bool "Enable support for Privileged Access Never (PAN)"
+	default y
+	help
+	 Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
+	 prevents the kernel or hypervisor from accessing user-space (EL0)
+	 memory directly.
+
+	 Choosing this option will cause any unprotected (not using
+	 copy_to_user et al) memory access to fail with a permission fault.
+
+	 The feature is detected at runtime, and will remain as a 'nop'
+	 instruction if the cpu does not implement the feature.
+
+config ARM64_LSE_ATOMICS
+	bool "Atomic instructions"
+	help
+	  As part of the Large System Extensions, ARMv8.1 introduces new
+	  atomic instructions that are designed specifically to scale in
+	  very large systems.
+
+	  Say Y here to make use of these instructions for the in-kernel
+	  atomic routines. This incurs a small overhead on CPUs that do
+	  not support these instructions and requires the kernel to be
+	  built with binutils >= 2.25.
+
+endmenu
+
 endmenu
 
 menu "Boot options"
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
new file mode 100644
index 0000000..23800a1
--- /dev/null
+++ b/arch/arm64/Kconfig.platforms
@@ -0,0 +1,125 @@
+menu "Platform selection"
+
+config ARCH_BCM_IPROC
+	bool "Broadcom iProc SoC Family"
+	help
+	  This enables support for Broadcom iProc based SoCs
+
+config ARCH_BERLIN
+	bool "Marvell Berlin SoC Family"
+	select DW_APB_ICTL
+	help
+	  This enables support for Marvell Berlin SoC Family
+
+config ARCH_EXYNOS
+	bool
+	help
+	  This enables support for Samsung Exynos SoC family
+
+config ARCH_EXYNOS7
+	bool "ARMv8 based Samsung Exynos7"
+	select ARCH_EXYNOS
+	select COMMON_CLK_SAMSUNG
+	select HAVE_S3C2410_WATCHDOG if WATCHDOG
+	select HAVE_S3C_RTC if RTC_CLASS
+	select PINCTRL
+	select PINCTRL_EXYNOS
+
+	help
+	  This enables support for Samsung Exynos7 SoC family
+
+config ARCH_FSL_LS2085A
+	bool "Freescale LS2085A SOC"
+	help
+	  This enables support for Freescale LS2085A SOC.
+
+config ARCH_HISI
+	bool "Hisilicon SoC Family"
+	help
+	  This enables support for Hisilicon ARMv8 SoC family
+
+config ARCH_MEDIATEK
+	bool "Mediatek MT65xx & MT81xx ARMv8 SoC"
+	select ARM_GIC
+	select PINCTRL
+	help
+	  Support for Mediatek MT65xx & MT81xx ARMv8 SoCs
+
+config ARCH_QCOM
+	bool "Qualcomm Platforms"
+	select PINCTRL
+	help
+	  This enables support for the ARMv8 based Qualcomm chipsets.
+
+config ARCH_ROCKCHIP
+	bool "Rockchip Platforms"
+	select ARCH_HAS_RESET_CONTROLLER
+	select ARCH_REQUIRE_GPIOLIB
+	select PINCTRL
+	select PINCTRL_ROCKCHIP
+	help
+	  This enables support for the ARMv8 based Rockchip chipsets,
+	  like the RK3368.
+
+config ARCH_SEATTLE
+	bool "AMD Seattle SoC Family"
+	help
+	  This enables support for AMD Seattle SOC Family
+
+config ARCH_TEGRA
+	bool "NVIDIA Tegra SoC Family"
+	select ARCH_HAS_RESET_CONTROLLER
+	select ARCH_REQUIRE_GPIOLIB
+	select CLKDEV_LOOKUP
+	select CLKSRC_MMIO
+	select CLKSRC_OF
+	select GENERIC_CLOCKEVENTS
+	select HAVE_CLK
+	select PINCTRL
+	select RESET_CONTROLLER
+	help
+	  This enables support for the NVIDIA Tegra SoC family.
+
+config ARCH_TEGRA_132_SOC
+	bool "NVIDIA Tegra132 SoC"
+	depends on ARCH_TEGRA
+	select PINCTRL_TEGRA124
+	select USB_ULPI if USB_PHY
+	select USB_ULPI_VIEWPORT if USB_PHY
+	help
+	  Enable support for NVIDIA Tegra132 SoC, based on the Denver
+	  ARMv8 CPU.  The Tegra132 SoC is similar to the Tegra124 SoC,
+	  but contains an NVIDIA Denver CPU complex in place of
+	  Tegra124's "4+1" Cortex-A15 CPU complex.
+
+config ARCH_SPRD
+	bool "Spreadtrum SoC platform"
+	help
+	  Support for Spreadtrum ARM based SoCs
+
+config ARCH_THUNDER
+	bool "Cavium Inc. Thunder SoC Family"
+	help
+	  This enables support for Cavium's Thunder Family of SoCs.
+
+config ARCH_VEXPRESS
+	bool "ARMv8 software model (Versatile Express)"
+	select ARCH_REQUIRE_GPIOLIB
+	select COMMON_CLK_VERSATILE
+	select POWER_RESET_VEXPRESS
+	select VEXPRESS_CONFIG
+	help
+	  This enables support for the ARMv8 software model (Versatile
+	  Express).
+
+config ARCH_XGENE
+	bool "AppliedMicro X-Gene SOC Family"
+	help
+	  This enables support for AppliedMicro X-Gene SOC Family
+
+config ARCH_ZYNQMP
+	bool "Xilinx ZynqMP Family"
+	help
+	  This enables support for Xilinx ZynqMP Family
+
+endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 4d2a925..15ff5b4 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -17,7 +17,18 @@
 
 KBUILD_DEFCONFIG := defconfig
 
-KBUILD_CFLAGS	+= -mgeneral-regs-only
+# Check for binutils support for specific extensions
+lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1)
+
+ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y)
+  ifeq ($(lseinstr),)
+$(warning LSE atomics not supported by binutils)
+  endif
+endif
+
+KBUILD_CFLAGS	+= -mgeneral-regs-only $(lseinstr)
+KBUILD_AFLAGS	+= $(lseinstr)
+
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
 KBUILD_CPPFLAGS	+= -mbig-endian
 AS		+= -EB
@@ -58,7 +69,10 @@
 
 boot := arch/arm64/boot
 
-Image Image.gz: vmlinux
+Image: vmlinux
+	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+Image.%: vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
 zinstall install: vmlinux
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 5a0e3ab..abcbba2 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -19,9 +19,21 @@
 $(obj)/Image: vmlinux FORCE
 	$(call if_changed,objcopy)
 
+$(obj)/Image.bz2: $(obj)/Image FORCE
+	$(call if_changed,bzip2)
+
 $(obj)/Image.gz: $(obj)/Image FORCE
 	$(call if_changed,gzip)
 
+$(obj)/Image.lz4: $(obj)/Image FORCE
+	$(call if_changed,lz4)
+
+$(obj)/Image.lzma: $(obj)/Image FORCE
+	$(call if_changed,lzma)
+
+$(obj)/Image.lzo: $(obj)/Image FORCE
+	$(call if_changed,lzo)
+
 install: $(obj)/Image
 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 	$(obj)/Image System.map "$(INSTALL_PATH)"
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 38913be..d9f8833 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,12 +1,15 @@
 dts-dirs += amd
 dts-dirs += apm
 dts-dirs += arm
+dts-dirs += broadcom
 dts-dirs += cavium
 dts-dirs += exynos
 dts-dirs += freescale
 dts-dirs += hisilicon
+dts-dirs += marvell
 dts-dirs += mediatek
 dts-dirs += qcom
+dts-dirs += rockchip
 dts-dirs += sprd
 dts-dirs += xilinx
 
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 58093ed..d831bc2 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -490,7 +490,8 @@
 				0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */
 			reg-names = "csr", "cfg";
 			ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000   /* io */
-				  0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */
+				  0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000   /* mem */
+				  0x43000000 0xf0 0x00000000 0xf0 0x00000000 0x10 0x00000000>; /* mem */
 			dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
 				      0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
 			interrupt-map-mask = <0x0 0x0 0x0 0x7>;
@@ -513,8 +514,9 @@
 			reg = < 0x00 0x1f2c0000 0x0 0x00010000   /* Controller registers */
 				0xd0 0xd0000000 0x0 0x00040000>; /* PCI config space */
 			reg-names = "csr", "cfg";
-			ranges = <0x01000000 0x0 0x00000000 0xd0 0x10000000 0x00 0x00010000   /* io  */
-				  0x02000000 0x0 0x80000000 0xd1 0x80000000 0x00 0x80000000>; /* mem */
+			ranges = <0x01000000 0x00 0x00000000 0xd0 0x10000000 0x00 0x00010000   /* io  */
+				  0x02000000 0x00 0x80000000 0xd1 0x80000000 0x00 0x80000000   /* mem */
+				  0x43000000 0xd8 0x00000000 0xd8 0x00000000 0x08 0x00000000>; /* mem */
 			dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
 				      0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
 			interrupt-map-mask = <0x0 0x0 0x0 0x7>;
@@ -537,8 +539,9 @@
 			reg =  < 0x00 0x1f2d0000 0x0 0x00010000   /* Controller registers */
 				 0x90 0xd0000000 0x0 0x00040000>; /* PCI config space */
 			reg-names = "csr", "cfg";
-			ranges = <0x01000000 0x0 0x00000000 0x90 0x10000000 0x0 0x00010000   /* io  */
-				  0x02000000 0x0 0x80000000 0x91 0x80000000 0x0 0x80000000>; /* mem */
+			ranges = <0x01000000 0x00 0x00000000 0x90 0x10000000 0x00 0x00010000   /* io  */
+				  0x02000000 0x00 0x80000000 0x91 0x80000000 0x00 0x80000000   /* mem */
+				  0x43000000 0x94 0x00000000 0x94 0x00000000 0x04 0x00000000>; /* mem */
 			dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
 				      0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
 			interrupt-map-mask = <0x0 0x0 0x0 0x7>;
@@ -561,8 +564,9 @@
 			reg = < 0x00 0x1f500000 0x0 0x00010000   /* Controller registers */
 				0xa0 0xd0000000 0x0 0x00040000>; /* PCI config space */
 			reg-names = "csr", "cfg";
-			ranges = <0x01000000 0x0 0x00000000 0xa0 0x10000000 0x0 0x00010000   /* io   */
-				  0x02000000 0x0 0x80000000 0xa1 0x80000000 0x0 0x80000000>; /* mem  */
+			ranges = <0x01000000 0x00 0x00000000 0xa0 0x10000000 0x00 0x00010000   /* io  */
+				  0x02000000 0x00 0x80000000 0xa1 0x80000000 0x00 0x80000000   /* mem */
+				  0x43000000 0xb0 0x00000000 0xb0 0x00000000 0x10 0x00000000>; /* mem */
 			dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
 				      0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
 			interrupt-map-mask = <0x0 0x0 0x0 0x7>;
@@ -585,8 +589,9 @@
 			reg = < 0x00 0x1f510000 0x0 0x00010000   /* Controller registers */
 				0xc0 0xd0000000 0x0 0x00200000>; /* PCI config space */
 			reg-names = "csr", "cfg";
-			ranges = <0x01000000 0x0 0x00000000 0xc0 0x10000000 0x0 0x00010000   /* io  */
-				  0x02000000 0x0 0x80000000 0xc1 0x80000000 0x0 0x80000000>; /* mem */
+			ranges = <0x01000000 0x00 0x00000000 0xc0 0x10000000 0x00 0x00010000   /* io  */
+				  0x02000000 0x00 0x80000000 0xc1 0x80000000 0x00 0x80000000   /* mem */
+				  0x43000000 0xc8 0x00000000 0xc8 0x00000000 0x08 0x00000000>; /* mem */
 			dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
 				      0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
 			interrupt-map-mask = <0x0 0x0 0x0 0x7>;
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
index 021e0f4..637e046 100644
--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
@@ -136,6 +136,8 @@
 					clock-names = "refclk", "timclk", "apb_pclk";
 					#clock-cells = <1>;
 					clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+					assigned-clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_sysctl 3>, <&v2m_sysctl 3>;
+					assigned-clock-parents = <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>;
 				};
 
 				apbregs@010000 {
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
index c46cbb2..88a7583 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
@@ -74,6 +74,8 @@
 				clock-names = "refclk", "timclk", "apb_pclk";
 				#clock-cells = <1>;
 				clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+				assigned-clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_sysctl 3>, <&v2m_sysctl 3>;
+				assigned-clock-parents = <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>;
 			};
 
 			aaci@040000 {
diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile
new file mode 100644
index 0000000..e21fe66
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_BCM_IPROC) += ns2-svk.dtb
+
+always		:= $(dtb-y)
+subdir-y	:= $(dts-dirs)
+clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
new file mode 100644
index 0000000..244baf8
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
@@ -0,0 +1,59 @@
+/*
+ *  BSD LICENSE
+ *
+ *  Copyright(c) 2015 Broadcom Corporation.  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *    * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in
+ *      the documentation and/or other materials provided with the
+ *      distribution.
+ *    * Neither the name of Broadcom Corporation nor the names of its
+ *      contributors may be used to endorse or promote products derived
+ *      from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "ns2.dtsi"
+
+/ {
+	model = "Broadcom NS2 SVK";
+	compatible = "brcm,ns2-svk", "brcm,ns2";
+
+	aliases {
+		serial0 = &uart3;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x000000000 0x80000000 0x00000000 0x40000000>;
+	};
+
+	soc: soc {
+		uart3: serial@66130000 {
+			status = "ok";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
new file mode 100644
index 0000000..3c92d92
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -0,0 +1,118 @@
+/*
+ *  BSD LICENSE
+ *
+ *  Copyright(c) 2015 Broadcom Corporation.  All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *    * Redistributions of source code must retain the above copyright
+ *      notice, this list of conditions and the following disclaimer.
+ *    * Redistributions in binary form must reproduce the above copyright
+ *      notice, this list of conditions and the following disclaimer in
+ *      the documentation and/or other materials provided with the
+ *      distribution.
+ *    * Neither the name of Broadcom Corporation nor the names of its
+ *      contributors may be used to endorse or promote products derived
+ *      from this software without specific prior written permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/memreserve/ 0x84b00000 0x00000008;
+
+/ {
+	compatible = "brcm,ns2";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0 0>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0x84b00000>;
+		};
+
+		cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0 1>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0x84b00000>;
+		};
+
+		cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0 2>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0x84b00000>;
+		};
+
+		cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a57", "arm,armv8";
+			reg = <0 3>;
+			enable-method = "spin-table";
+			cpu-release-addr = <0 0x84b00000>;
+		};
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) |
+			      IRQ_TYPE_EDGE_RISING)>,
+			     <GIC_PPI 14 (GIC_CPU_MASK_RAW(0xff) |
+			      IRQ_TYPE_EDGE_RISING)>,
+			     <GIC_PPI 11 (GIC_CPU_MASK_RAW(0xff) |
+			      IRQ_TYPE_EDGE_RISING)>,
+			     <GIC_PPI 10 (GIC_CPU_MASK_RAW(0xff) |
+			      IRQ_TYPE_EDGE_RISING)>;
+	};
+
+	soc: soc {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0 0xffffffff>;
+
+		gic: interrupt-controller@65210000 {
+			compatible = "arm,gic-400";
+			#interrupt-cells = <3>;
+			interrupt-controller;
+			reg = <0x65210000 0x1000>,
+			      <0x65220000 0x1000>,
+			      <0x65240000 0x2000>,
+			      <0x65260000 0x1000>;
+		};
+
+		uart3: serial@66130000 {
+			compatible = "snps,dw-apb-uart";
+			reg = <0x66130000 0x100>;
+			interrupts = <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>;
+			reg-shift = <2>;
+			reg-io-width = <4>;
+			clock-frequency = <23961600>;
+			status = "disabled";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/marvell/Makefile b/arch/arm64/boot/dts/marvell/Makefile
new file mode 100644
index 0000000..e2f6afa
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_BERLIN) += berlin4ct-dmp.dtb
+
+always		:= $(dtb-y)
+subdir-y	:= $(dts-dirs)
+clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/marvell/berlin4ct-dmp.dts b/arch/arm64/boot/dts/marvell/berlin4ct-dmp.dts
new file mode 100644
index 0000000..0d70d39
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/berlin4ct-dmp.dts
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2015 Marvell Technology Group Ltd.
+ *
+ * Author: Jisheng Zhang <jszhang@marvell.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "berlin4ct.dtsi"
+
+/ {
+	model = "Marvell BG4CT DMP board";
+	compatible = "marvell,berlin4ct-dmp", "marvell,berlin4ct", "marvell,berlin";
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	memory {
+		device_type = "memory";
+		/* the first 16MB is for firmwares' usage */
+		reg = <0 0x01000000 0 0x7f000000>;
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/berlin4ct.dtsi b/arch/arm64/boot/dts/marvell/berlin4ct.dtsi
new file mode 100644
index 0000000..dd4a10d
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/berlin4ct.dtsi
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2015 Marvell Technology Group Ltd.
+ *
+ * Author: Jisheng Zhang <jszhang@marvell.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	compatible = "marvell,berlin4ct", "marvell,berlin";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	aliases {
+		serial0 = &uart0;
+	};
+
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			compatible = "arm,cortex-a53", "arm,armv8";
+			device_type = "cpu";
+			reg = <0x0>;
+			enable-method = "psci";
+		};
+
+		cpu1: cpu@1 {
+			compatible = "arm,cortex-a53", "arm,armv8";
+			device_type = "cpu";
+			reg = <0x1>;
+			enable-method = "psci";
+		};
+
+		cpu2: cpu@2 {
+			compatible = "arm,cortex-a53", "arm,armv8";
+			device_type = "cpu";
+			reg = <0x2>;
+			enable-method = "psci";
+		};
+
+		cpu3: cpu@3 {
+			compatible = "arm,cortex-a53", "arm,armv8";
+			device_type = "cpu";
+			reg = <0x3>;
+			enable-method = "psci";
+		};
+	};
+
+	osc: osc {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <25000000>;
+	};
+
+	pmu {
+		compatible = "arm,armv8-pmuv3";
+		interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu0>,
+				     <&cpu1>,
+				     <&cpu2>,
+				     <&cpu3>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+	soc {
+		compatible = "simple-bus";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges = <0 0 0xf7000000 0x1000000>;
+
+		gic: interrupt-controller@901000 {
+			compatible = "arm,gic-400";
+			#interrupt-cells = <3>;
+			interrupt-controller;
+			reg = <0x901000 0x1000>,
+			      <0x902000 0x2000>,
+			      <0x904000 0x2000>,
+			      <0x906000 0x2000>;
+			interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+		};
+
+		apb@fc0000 {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 0xfc0000 0x10000>;
+			interrupt-parent = <&sic>;
+
+			sic: interrupt-controller@1000 {
+				compatible = "snps,dw-apb-ictl";
+				reg = <0x1000 0x30>;
+				interrupt-controller;
+				#interrupt-cells = <1>;
+				interrupt-parent = <&gic>;
+				interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+			};
+
+			uart0: uart@d000 {
+				compatible = "snps,dw-apb-uart";
+				reg = <0xd000 0x100>;
+				interrupts = <8>;
+				clocks = <&osc>;
+				reg-shift = <2>;
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index 3ce2462..e0a4bff 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -1,3 +1,4 @@
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt6795-evb.dtb
 dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
 
 always		:= $(dtb-y)
diff --git a/arch/arm64/boot/dts/mediatek/mt6795-evb.dts b/arch/arm64/boot/dts/mediatek/mt6795-evb.dts
new file mode 100644
index 0000000..ad665f5
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt6795-evb.dts
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Mars.C <mars.cheng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+#include "mt6795.dtsi"
+
+/ {
+	model = "MediaTek MT6795 Evaluation Board";
+	compatible = "mediatek,mt6795-evb", "mediatek,mt6795";
+
+	aliases {
+		serial0 = &uart0;
+		serial1 = &uart1;
+		serial2 = &uart2;
+		serial3 = &uart3;
+	};
+
+	memory@40000000 {
+		device_type = "memory";
+		reg = <0 0x40000000 0 0x1e800000>;
+	};
+
+	chosen {
+		stdout-path = "serial0:921600n8";
+	};
+};
+
+&uart0 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt6795.dtsi b/arch/arm64/boot/dts/mediatek/mt6795.dtsi
new file mode 100644
index 0000000..c85659d
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt6795.dtsi
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Mars.C <mars.cheng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+	compatible = "mediatek,mt6795";
+	interrupt-parent = <&sysirq>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x000>;
+		};
+
+		cpu1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x001>;
+		};
+
+		cpu2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x002>;
+		};
+
+		cpu3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x003>;
+		};
+
+		cpu4: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x100>;
+		};
+
+		cpu5: cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x101>;
+		};
+
+		cpu6: cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x102>;
+		};
+
+		cpu7: cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53";
+			enable-method = "psci";
+			reg = <0x103>;
+		};
+	};
+
+	system_clk: dummy13m {
+		compatible = "fixed-clock";
+		clock-frequency = <13000000>;
+		#clock-cells = <0>;
+	};
+
+	rtc_clk: dummy32k {
+		compatible = "fixed-clock";
+		clock-frequency = <32000>;
+		#clock-cells = <0>;
+	};
+
+	uart_clk: dummy26m {
+		compatible = "fixed-clock";
+		clock-frequency = <26000000>;
+		#clock-cells = <0>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_PPI 13
+			     (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 14
+			     (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 11
+			     (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+			     <GIC_PPI 10
+			     (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+	sysirq: intpol-controller@10200620 {
+		compatible = "mediatek,mt6795-sysirq",
+			     "mediatek,mt6577-sysirq";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		interrupt-parent = <&gic>;
+		reg = <0 0x10200620 0 0x20>;
+	};
+
+	gic: interrupt-controller@10221000 {
+		compatible = "arm,gic-400";
+		#interrupt-cells = <3>;
+		interrupt-parent = <&gic>;
+		interrupt-controller;
+		reg = <0 0x10221000 0 0x1000>,
+		      <0 0x10222000 0 0x2000>,
+		      <0 0x10224000 0 0x2000>,
+		      <0 0x10226000 0 0x2000>;
+	};
+
+	uart0: serial@11002000 {
+		compatible = "mediatek,mt6795-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11002000 0 0x400>;
+		interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+
+	uart1: serial@11003000 {
+		compatible = "mediatek,mt6795-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11003000 0 0x400>;
+		interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+
+	uart2: serial@11004000 {
+		compatible = "mediatek,mt6795-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11004000 0 0x400>;
+		interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+
+	uart3: serial@11005000 {
+		compatible = "mediatek,mt6795-uart",
+			     "mediatek,mt6577-uart";
+		reg = <0 0x11005000 0 0x400>;
+		interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_LOW>;
+		clocks = <&uart_clk>;
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index d0ab012..4be66ca 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -34,6 +34,359 @@
 	chosen { };
 };
 
+&i2c1 {
+	status = "okay";
+
+	buck: da9211@68 {
+		compatible = "dlg,da9211";
+		reg = <0x68>;
+
+		regulators {
+			da9211_vcpu_reg: BUCKA {
+				regulator-name = "VBUCKA";
+				regulator-min-microvolt = < 700000>;
+				regulator-max-microvolt = <1310000>;
+				regulator-min-microamp	= <2000000>;
+				regulator-max-microamp	= <4400000>;
+				regulator-ramp-delay = <10000>;
+				regulator-always-on;
+			};
+
+			da9211_vgpu_reg: BUCKB {
+				regulator-name = "VBUCKB";
+				regulator-min-microvolt = < 700000>;
+				regulator-max-microvolt = <1310000>;
+				regulator-min-microamp	= <2000000>;
+				regulator-max-microamp	= <3000000>;
+				regulator-ramp-delay = <10000>;
+			};
+		};
+	};
+};
+
+&mmc0 {
+	status = "okay";
+	pinctrl-names = "default", "state_uhs";
+	pinctrl-0 = <&mmc0_pins_default>;
+	pinctrl-1 = <&mmc0_pins_uhs>;
+	bus-width = <8>;
+	max-frequency = <50000000>;
+	cap-mmc-highspeed;
+	vmmc-supply = <&mt6397_vemc_3v3_reg>;
+	vqmmc-supply = <&mt6397_vio18_reg>;
+	non-removable;
+};
+
+&mmc1 {
+	status = "okay";
+	pinctrl-names = "default", "state_uhs";
+	pinctrl-0 = <&mmc1_pins_default>;
+	pinctrl-1 = <&mmc1_pins_uhs>;
+	bus-width = <4>;
+	max-frequency = <50000000>;
+	cap-sd-highspeed;
+	sd-uhs-sdr25;
+	cd-gpios = <&pio 132 0>;
+	vmmc-supply = <&mt6397_vmch_reg>;
+	vqmmc-supply = <&mt6397_vmc_reg>;
+};
+
+&pio {
+	mmc0_pins_default: mmc0default {
+		pins_cmd_dat {
+			pinmux = <MT8173_PIN_57_MSDC0_DAT0__FUNC_MSDC0_DAT0>,
+				 <MT8173_PIN_58_MSDC0_DAT1__FUNC_MSDC0_DAT1>,
+				 <MT8173_PIN_59_MSDC0_DAT2__FUNC_MSDC0_DAT2>,
+				 <MT8173_PIN_60_MSDC0_DAT3__FUNC_MSDC0_DAT3>,
+				 <MT8173_PIN_61_MSDC0_DAT4__FUNC_MSDC0_DAT4>,
+				 <MT8173_PIN_62_MSDC0_DAT5__FUNC_MSDC0_DAT5>,
+				 <MT8173_PIN_63_MSDC0_DAT6__FUNC_MSDC0_DAT6>,
+				 <MT8173_PIN_64_MSDC0_DAT7__FUNC_MSDC0_DAT7>,
+				 <MT8173_PIN_66_MSDC0_CMD__FUNC_MSDC0_CMD>;
+			input-enable;
+			bias-pull-up;
+		};
+
+		pins_clk {
+			pinmux = <MT8173_PIN_65_MSDC0_CLK__FUNC_MSDC0_CLK>;
+			bias-pull-down;
+		};
+
+		pins_rst {
+			pinmux = <MT8173_PIN_68_MSDC0_RST___FUNC_MSDC0_RSTB>;
+			bias-pull-up;
+		};
+	};
+
+	mmc1_pins_default: mmc1default {
+		pins_cmd_dat {
+			pinmux = <MT8173_PIN_73_MSDC1_DAT0__FUNC_MSDC1_DAT0>,
+				 <MT8173_PIN_74_MSDC1_DAT1__FUNC_MSDC1_DAT1>,
+				 <MT8173_PIN_75_MSDC1_DAT2__FUNC_MSDC1_DAT2>,
+				 <MT8173_PIN_76_MSDC1_DAT3__FUNC_MSDC1_DAT3>,
+				 <MT8173_PIN_78_MSDC1_CMD__FUNC_MSDC1_CMD>;
+			input-enable;
+			drive-strength = <MTK_DRIVE_4mA>;
+			bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+		};
+
+		pins_clk {
+			pinmux = <MT8173_PIN_77_MSDC1_CLK__FUNC_MSDC1_CLK>;
+			bias-pull-down;
+			drive-strength = <MTK_DRIVE_4mA>;
+		};
+
+		pins_insert {
+			pinmux = <MT8173_PIN_132_I2S0_DATA1__FUNC_GPIO132>;
+			bias-pull-up;
+		};
+	};
+
+	mmc0_pins_uhs: mmc0 {
+		pins_cmd_dat {
+			pinmux = <MT8173_PIN_57_MSDC0_DAT0__FUNC_MSDC0_DAT0>,
+				 <MT8173_PIN_58_MSDC0_DAT1__FUNC_MSDC0_DAT1>,
+				 <MT8173_PIN_59_MSDC0_DAT2__FUNC_MSDC0_DAT2>,
+				 <MT8173_PIN_60_MSDC0_DAT3__FUNC_MSDC0_DAT3>,
+				 <MT8173_PIN_61_MSDC0_DAT4__FUNC_MSDC0_DAT4>,
+				 <MT8173_PIN_62_MSDC0_DAT5__FUNC_MSDC0_DAT5>,
+				 <MT8173_PIN_63_MSDC0_DAT6__FUNC_MSDC0_DAT6>,
+				 <MT8173_PIN_64_MSDC0_DAT7__FUNC_MSDC0_DAT7>,
+				 <MT8173_PIN_66_MSDC0_CMD__FUNC_MSDC0_CMD>;
+			input-enable;
+			drive-strength = <MTK_DRIVE_2mA>;
+			bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+		};
+
+		pins_clk {
+			pinmux = <MT8173_PIN_65_MSDC0_CLK__FUNC_MSDC0_CLK>;
+			drive-strength = <MTK_DRIVE_2mA>;
+			bias-pull-down = <MTK_PUPD_SET_R1R0_01>;
+		};
+
+		pins_rst {
+			pinmux = <MT8173_PIN_68_MSDC0_RST___FUNC_MSDC0_RSTB>;
+			bias-pull-up;
+		};
+	};
+
+	mmc1_pins_uhs: mmc1 {
+		pins_cmd_dat {
+			pinmux = <MT8173_PIN_73_MSDC1_DAT0__FUNC_MSDC1_DAT0>,
+				 <MT8173_PIN_74_MSDC1_DAT1__FUNC_MSDC1_DAT1>,
+				 <MT8173_PIN_75_MSDC1_DAT2__FUNC_MSDC1_DAT2>,
+				 <MT8173_PIN_76_MSDC1_DAT3__FUNC_MSDC1_DAT3>,
+				 <MT8173_PIN_78_MSDC1_CMD__FUNC_MSDC1_CMD>;
+			input-enable;
+			drive-strength = <MTK_DRIVE_4mA>;
+			bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+		};
+
+		pins_clk {
+			pinmux = <MT8173_PIN_77_MSDC1_CLK__FUNC_MSDC1_CLK>;
+			drive-strength = <MTK_DRIVE_4mA>;
+			bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+		};
+	};
+};
+
+&pwrap {
+	pmic: mt6397 {
+		compatible = "mediatek,mt6397";
+		interrupt-parent = <&pio>;
+		interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+
+		mt6397regulator: mt6397regulator {
+			compatible = "mediatek,mt6397-regulator";
+
+			mt6397_vpca15_reg: buck_vpca15 {
+				regulator-compatible = "buck_vpca15";
+				regulator-name = "vpca15";
+				regulator-min-microvolt = < 700000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+			};
+
+			mt6397_vpca7_reg: buck_vpca7 {
+				regulator-compatible = "buck_vpca7";
+				regulator-name = "vpca7";
+				regulator-min-microvolt = < 700000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-enable-ramp-delay = <115>;
+			};
+
+			mt6397_vsramca15_reg: buck_vsramca15 {
+				regulator-compatible = "buck_vsramca15";
+				regulator-name = "vsramca15";
+				regulator-min-microvolt = < 700000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+			};
+
+			mt6397_vsramca7_reg: buck_vsramca7 {
+				regulator-compatible = "buck_vsramca7";
+				regulator-name = "vsramca7";
+				regulator-min-microvolt = < 700000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+			};
+
+			mt6397_vcore_reg: buck_vcore {
+				regulator-compatible = "buck_vcore";
+				regulator-name = "vcore";
+				regulator-min-microvolt = < 700000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+			};
+
+			mt6397_vgpu_reg: buck_vgpu {
+				regulator-compatible = "buck_vgpu";
+				regulator-name = "vgpu";
+				regulator-min-microvolt = < 700000>;
+				regulator-max-microvolt = <1350000>;
+				regulator-ramp-delay = <12500>;
+				regulator-enable-ramp-delay = <115>;
+			};
+
+			mt6397_vdrm_reg: buck_vdrm {
+				regulator-compatible = "buck_vdrm";
+				regulator-name = "vdrm";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <1400000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+			};
+
+			mt6397_vio18_reg: buck_vio18 {
+				regulator-compatible = "buck_vio18";
+				regulator-name = "vio18";
+				regulator-min-microvolt = <1620000>;
+				regulator-max-microvolt = <1980000>;
+				regulator-ramp-delay = <12500>;
+				regulator-always-on;
+			};
+
+			mt6397_vtcxo_reg: ldo_vtcxo {
+				regulator-compatible = "ldo_vtcxo";
+				regulator-name = "vtcxo";
+				regulator-always-on;
+			};
+
+			mt6397_va28_reg: ldo_va28 {
+				regulator-compatible = "ldo_va28";
+				regulator-name = "va28";
+				regulator-always-on;
+			};
+
+			mt6397_vcama_reg: ldo_vcama {
+				regulator-compatible = "ldo_vcama";
+				regulator-name = "vcama";
+				regulator-min-microvolt = <1500000>;
+				regulator-max-microvolt = <2800000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vio28_reg: ldo_vio28 {
+				regulator-compatible = "ldo_vio28";
+				regulator-name = "vio28";
+				regulator-always-on;
+			};
+
+			mt6397_vusb_reg: ldo_vusb {
+				regulator-compatible = "ldo_vusb";
+				regulator-name = "vusb";
+			};
+
+			mt6397_vmc_reg: ldo_vmc {
+				regulator-compatible = "ldo_vmc";
+				regulator-name = "vmc";
+				regulator-min-microvolt = <1800000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vmch_reg: ldo_vmch {
+				regulator-compatible = "ldo_vmch";
+				regulator-name = "vmch";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+				regulator-compatible = "ldo_vemc3v3";
+				regulator-name = "vemc_3v3";
+				regulator-min-microvolt = <3000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vgp1_reg: ldo_vgp1 {
+				regulator-compatible = "ldo_vgp1";
+				regulator-name = "vcamd";
+				regulator-min-microvolt = <1220000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <240>;
+			};
+
+			mt6397_vgp2_reg: ldo_vgp2 {
+				regulator-compatible = "ldo_vgp2";
+				regulator-name = "vcamio";
+				regulator-min-microvolt = <1000000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vgp3_reg: ldo_vgp3 {
+				regulator-compatible = "ldo_vgp3";
+				regulator-name = "vcamaf";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vgp4_reg: ldo_vgp4 {
+				regulator-compatible = "ldo_vgp4";
+				regulator-name = "vgp4";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vgp5_reg: ldo_vgp5 {
+				regulator-compatible = "ldo_vgp5";
+				regulator-name = "vgp5";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3000000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vgp6_reg: ldo_vgp6 {
+				regulator-compatible = "ldo_vgp6";
+				regulator-name = "vgp6";
+				regulator-min-microvolt = <1200000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+
+			mt6397_vibr_reg: ldo_vibr {
+				regulator-compatible = "ldo_vibr";
+				regulator-name = "vibr";
+				regulator-min-microvolt = <1300000>;
+				regulator-max-microvolt = <3300000>;
+				regulator-enable-ramp-delay = <218>;
+			};
+		};
+	};
+};
+
 &uart0 {
 	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 27237a1..d18ee42 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -11,8 +11,11 @@
  * GNU General Public License for more details.
  */
 
+#include <dt-bindings/clock/mt8173-clk.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/mt8173-power.h>
+#include <dt-bindings/reset-controller/mt8173-resets.h>
 #include "mt8173-pinfunc.h"
 
 / {
@@ -49,6 +52,8 @@
 			device_type = "cpu";
 			compatible = "arm,cortex-a53";
 			reg = <0x000>;
+			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0>;
 		};
 
 		cpu1: cpu@1 {
@@ -56,6 +61,7 @@
 			compatible = "arm,cortex-a53";
 			reg = <0x001>;
 			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0>;
 		};
 
 		cpu2: cpu@100 {
@@ -63,6 +69,7 @@
 			compatible = "arm,cortex-a57";
 			reg = <0x100>;
 			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0>;
 		};
 
 		cpu3: cpu@101 {
@@ -70,6 +77,20 @@
 			compatible = "arm,cortex-a57";
 			reg = <0x101>;
 			enable-method = "psci";
+			cpu-idle-states = <&CPU_SLEEP_0>;
+		};
+
+		idle-states {
+			entry-method = "arm,psci";
+
+			CPU_SLEEP_0: cpu-sleep-0 {
+				compatible = "arm,idle-state";
+				local-timer-stop;
+				entry-latency-us = <639>;
+				exit-latency-us = <680>;
+				min-residency-us = <1088>;
+				arm,psci-suspend-param = <0x0010000>;
+			};
 		};
 	};
 
@@ -81,10 +102,18 @@
 		cpu_on	      = <0x84000003>;
 	};
 
-	uart_clk: dummy26m {
+	clk26m: oscillator@0 {
 		compatible = "fixed-clock";
-		clock-frequency = <26000000>;
 		#clock-cells = <0>;
+		clock-frequency = <26000000>;
+		clock-output-names = "clk26m";
+	};
+
+	clk32k: oscillator@1 {
+		compatible = "fixed-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32000>;
+		clock-output-names = "clk32k";
 	};
 
 	timer {
@@ -106,11 +135,32 @@
 		compatible = "simple-bus";
 		ranges;
 
-		/*
-		 * Pinctrl access register at 0x10005000 through regmap.
-		 * Register 0x1000b000 is used by EINT.
-		 */
-		pio: pinctrl@10005000 {
+		topckgen: clock-controller@10000000 {
+			compatible = "mediatek,mt8173-topckgen";
+			reg = <0 0x10000000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
+		infracfg: power-controller@10001000 {
+			compatible = "mediatek,mt8173-infracfg", "syscon";
+			reg = <0 0x10001000 0 0x1000>;
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+		};
+
+		pericfg: power-controller@10003000 {
+			compatible = "mediatek,mt8173-pericfg", "syscon";
+			reg = <0 0x10003000 0 0x1000>;
+			#clock-cells = <1>;
+			#reset-cells = <1>;
+		};
+
+		syscfg_pctl_a: syscfg_pctl_a@10005000 {
+			compatible = "mediatek,mt8173-pctl-a-syscfg", "syscon";
+			reg = <0 0x10005000 0 0x1000>;
+		};
+
+		pio: pinctrl@0x10005000 {
 			compatible = "mediatek,mt8173-pinctrl";
 			reg = <0 0x1000b000 0 0x1000>;
 			mediatek,pctl-regmap = <&syscfg_pctl_a>;
@@ -122,11 +172,81 @@
 			interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
 				     <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+
+			i2c0_pins_a: i2c0 {
+				pins1 {
+					pinmux = <MT8173_PIN_45_SDA0__FUNC_SDA0>,
+						 <MT8173_PIN_46_SCL0__FUNC_SCL0>;
+					bias-disable;
+				};
+			};
+
+			i2c1_pins_a: i2c1 {
+				pins1 {
+					pinmux = <MT8173_PIN_125_SDA1__FUNC_SDA1>,
+						 <MT8173_PIN_126_SCL1__FUNC_SCL1>;
+					bias-disable;
+				};
+			};
+
+			i2c2_pins_a: i2c2 {
+				pins1 {
+					pinmux = <MT8173_PIN_43_SDA2__FUNC_SDA2>,
+						 <MT8173_PIN_44_SCL2__FUNC_SCL2>;
+					bias-disable;
+				};
+			};
+
+			i2c3_pins_a: i2c3 {
+				pins1 {
+					pinmux = <MT8173_PIN_106_SDA3__FUNC_SDA3>,
+						 <MT8173_PIN_107_SCL3__FUNC_SCL3>;
+					bias-disable;
+				};
+			};
+
+			i2c4_pins_a: i2c4 {
+				pins1 {
+					pinmux = <MT8173_PIN_133_SDA4__FUNC_SDA4>,
+						 <MT8173_PIN_134_SCL4__FUNC_SCL4>;
+					bias-disable;
+				};
+			};
+
+			i2c6_pins_a: i2c6 {
+				pins1 {
+					pinmux = <MT8173_PIN_100_MSDC2_DAT0__FUNC_SDA5>,
+						 <MT8173_PIN_101_MSDC2_DAT1__FUNC_SCL5>;
+					bias-disable;
+				};
+			};
 		};
 
-		syscfg_pctl_a: syscfg_pctl_a@10005000 {
-			compatible = "mediatek,mt8173-pctl-a-syscfg", "syscon";
-			reg = <0 0x10005000 0 0x1000>;
+		scpsys: scpsys@10006000 {
+			compatible = "mediatek,mt8173-scpsys";
+			#power-domain-cells = <1>;
+			reg = <0 0x10006000 0 0x1000>;
+			clocks = <&clk26m>,
+				 <&topckgen CLK_TOP_MM_SEL>;
+			clock-names = "mfg", "mm";
+			infracfg = <&infracfg>;
+		};
+
+		watchdog: watchdog@10007000 {
+			compatible = "mediatek,mt8173-wdt",
+				     "mediatek,mt6589-wdt";
+			reg = <0 0x10007000 0 0x100>;
+		};
+
+		pwrap: pwrap@1000d000 {
+			compatible = "mediatek,mt8173-pwrap";
+			reg = <0 0x1000d000 0 0x1000>;
+			reg-names = "pwrap";
+			interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+			resets = <&infracfg MT8173_INFRA_PMIC_WRAP_RST>;
+			reset-names = "pwrap";
+			clocks = <&infracfg CLK_INFRA_PMICSPI>, <&infracfg CLK_INFRA_PMICWRAP>;
+			clock-names = "spi", "wrap";
 		};
 
 		sysirq: intpol-controller@10200620 {
@@ -138,6 +258,12 @@
 			reg = <0 0x10200620 0 0x20>;
 		};
 
+		apmixedsys: clock-controller@10209000 {
+			compatible = "mediatek,mt8173-apmixedsys";
+			reg = <0 0x10209000 0 0x1000>;
+			#clock-cells = <1>;
+		};
+
 		gic: interrupt-controller@10220000 {
 			compatible = "arm,gic-400";
 			#interrupt-cells = <3>;
@@ -156,7 +282,8 @@
 				     "mediatek,mt6577-uart";
 			reg = <0 0x11002000 0 0x400>;
 			interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_LOW>;
-			clocks = <&uart_clk>;
+			clocks = <&pericfg CLK_PERI_UART0_SEL>, <&pericfg CLK_PERI_UART0>;
+			clock-names = "baud", "bus";
 			status = "disabled";
 		};
 
@@ -165,7 +292,8 @@
 				     "mediatek,mt6577-uart";
 			reg = <0 0x11003000 0 0x400>;
 			interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>;
-			clocks = <&uart_clk>;
+			clocks = <&pericfg CLK_PERI_UART1_SEL>, <&pericfg CLK_PERI_UART1>;
+			clock-names = "baud", "bus";
 			status = "disabled";
 		};
 
@@ -174,7 +302,8 @@
 				     "mediatek,mt6577-uart";
 			reg = <0 0x11004000 0 0x400>;
 			interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_LOW>;
-			clocks = <&uart_clk>;
+			clocks = <&pericfg CLK_PERI_UART2_SEL>, <&pericfg CLK_PERI_UART2>;
+			clock-names = "baud", "bus";
 			status = "disabled";
 		};
 
@@ -183,7 +312,179 @@
 				     "mediatek,mt6577-uart";
 			reg = <0 0x11005000 0 0x400>;
 			interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
-			clocks = <&uart_clk>;
+			clocks = <&pericfg CLK_PERI_UART3_SEL>, <&pericfg CLK_PERI_UART3>;
+			clock-names = "baud", "bus";
+			status = "disabled";
+		};
+
+		i2c0: i2c@11007000 {
+			compatible = "mediatek,mt8173-i2c";
+			reg = <0 0x11007000 0 0x70>,
+			      <0 0x11000100 0 0x80>;
+			interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_LOW>;
+			clock-div = <16>;
+			clocks = <&pericfg CLK_PERI_I2C0>,
+				 <&pericfg CLK_PERI_AP_DMA>;
+			clock-names = "main", "dma";
+			pinctrl-names = "default";
+			pinctrl-0 = <&i2c0_pins_a>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c1: i2c@11008000 {
+			compatible = "mediatek,mt8173-i2c";
+			reg = <0 0x11008000 0 0x70>,
+			      <0 0x11000180 0 0x80>;
+			interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_LOW>;
+			clock-div = <16>;
+			clocks = <&pericfg CLK_PERI_I2C1>,
+				 <&pericfg CLK_PERI_AP_DMA>;
+			clock-names = "main", "dma";
+			pinctrl-names = "default";
+			pinctrl-0 = <&i2c1_pins_a>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c2: i2c@11009000 {
+			compatible = "mediatek,mt8173-i2c";
+			reg = <0 0x11009000 0 0x70>,
+			      <0 0x11000200 0 0x80>;
+			interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_LOW>;
+			clock-div = <16>;
+			clocks = <&pericfg CLK_PERI_I2C2>,
+				 <&pericfg CLK_PERI_AP_DMA>;
+			clock-names = "main", "dma";
+			pinctrl-names = "default";
+			pinctrl-0 = <&i2c2_pins_a>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c3: i2c3@11010000 {
+			compatible = "mediatek,mt8173-i2c";
+			reg = <0 0x11010000 0 0x70>,
+			      <0 0x11000280 0 0x80>;
+			interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_LOW>;
+			clock-div = <16>;
+			clocks = <&pericfg CLK_PERI_I2C3>,
+				 <&pericfg CLK_PERI_AP_DMA>;
+			clock-names = "main", "dma";
+			pinctrl-names = "default";
+			pinctrl-0 = <&i2c3_pins_a>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c4: i2c4@11011000 {
+			compatible = "mediatek,mt8173-i2c";
+			reg = <0 0x11011000 0 0x70>,
+			      <0 0x11000300 0 0x80>;
+			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_LOW>;
+			clock-div = <16>;
+			clocks = <&pericfg CLK_PERI_I2C4>,
+				 <&pericfg CLK_PERI_AP_DMA>;
+			clock-names = "main", "dma";
+			pinctrl-names = "default";
+			pinctrl-0 = <&i2c4_pins_a>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		i2c6: i2c6@11013000 {
+			compatible = "mediatek,mt8173-i2c";
+			reg = <0 0x11013000 0 0x70>,
+			      <0 0x11000080 0 0x80>;
+			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_LOW>;
+			clock-div = <16>;
+			clocks = <&pericfg CLK_PERI_I2C6>,
+				 <&pericfg CLK_PERI_AP_DMA>;
+			clock-names = "main", "dma";
+			pinctrl-names = "default";
+			pinctrl-0 = <&i2c6_pins_a>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		afe: audio-controller@11220000  {
+			compatible = "mediatek,mt8173-afe-pcm";
+			reg = <0 0x11220000 0 0x1000>;
+			interrupts = <GIC_SPI 134 IRQ_TYPE_EDGE_FALLING>;
+			power-domains = <&scpsys MT8173_POWER_DOMAIN_AUDIO>;
+			clocks = <&infracfg CLK_INFRA_AUDIO>,
+				 <&topckgen CLK_TOP_AUDIO_SEL>,
+				 <&topckgen CLK_TOP_AUD_INTBUS_SEL>,
+				 <&topckgen CLK_TOP_APLL1_DIV0>,
+				 <&topckgen CLK_TOP_APLL2_DIV0>,
+				 <&topckgen CLK_TOP_I2S0_M_SEL>,
+				 <&topckgen CLK_TOP_I2S1_M_SEL>,
+				 <&topckgen CLK_TOP_I2S2_M_SEL>,
+				 <&topckgen CLK_TOP_I2S3_M_SEL>,
+				 <&topckgen CLK_TOP_I2S3_B_SEL>;
+			clock-names = "infra_sys_audio_clk",
+				      "top_pdn_audio",
+				      "top_pdn_aud_intbus",
+				      "bck0",
+				      "bck1",
+				      "i2s0_m",
+				      "i2s1_m",
+				      "i2s2_m",
+				      "i2s3_m",
+				      "i2s3_b";
+			assigned-clocks = <&topckgen CLK_TOP_AUD_1_SEL>,
+					  <&topckgen CLK_TOP_AUD_2_SEL>;
+			assigned-clock-parents = <&topckgen CLK_TOP_APLL1>,
+						 <&topckgen CLK_TOP_APLL2>;
+		};
+
+		mmc0: mmc@11230000 {
+			compatible = "mediatek,mt8173-mmc",
+				     "mediatek,mt8135-mmc";
+			reg = <0 0x11230000 0 0x1000>;
+			interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&pericfg CLK_PERI_MSDC30_0>,
+				 <&topckgen CLK_TOP_MSDC50_0_H_SEL>;
+			clock-names = "source", "hclk";
+			status = "disabled";
+		};
+
+		mmc1: mmc@11240000 {
+			compatible = "mediatek,mt8173-mmc",
+				     "mediatek,mt8135-mmc";
+			reg = <0 0x11240000 0 0x1000>;
+			interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&pericfg CLK_PERI_MSDC30_1>,
+				 <&topckgen CLK_TOP_AXI_SEL>;
+			clock-names = "source", "hclk";
+			status = "disabled";
+		};
+
+		mmc2: mmc@11250000 {
+			compatible = "mediatek,mt8173-mmc",
+				     "mediatek,mt8135-mmc";
+			reg = <0 0x11250000 0 0x1000>;
+			interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&pericfg CLK_PERI_MSDC30_2>,
+				 <&topckgen CLK_TOP_AXI_SEL>;
+			clock-names = "source", "hclk";
+			status = "disabled";
+		};
+
+		mmc3: mmc@11260000 {
+			compatible = "mediatek,mt8173-mmc",
+				     "mediatek,mt8135-mmc";
+			reg = <0 0x11260000 0 0x1000>;
+			interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_LOW>;
+			clocks = <&pericfg CLK_PERI_MSDC30_3>,
+				 <&topckgen CLK_TOP_MSDC50_2_H_SEL>;
+			clock-names = "source", "hclk";
 			status = "disabled";
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
index 535532b..e03c11d 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
@@ -2,28 +2,38 @@
 
 &pm8916_gpios {
 
-	pinctrl-names = "default";
-	pinctrl-0 = <&pm8916_gpios_default>;
-
-	pm8916_gpios_default: default {
-		usb_hub_reset_pm {
-			pins = "gpio1";
-			function = PMIC_GPIO_FUNC_NORMAL;
-			output-low;
-		};
-		usb_sw_sel_pm {
-			pins = "gpio2";
-			function = PMIC_GPIO_FUNC_NORMAL;
-			input-disable;
-		};
-		usr_led_3_ctrl {
+	usb_hub_reset_pm: usb_hub_reset_pm {
+		pinconf {
 			pins = "gpio3";
 			function = PMIC_GPIO_FUNC_NORMAL;
 			output-low;
 		};
-		usr_led_4_ctrl {
+	};
+
+	usb_sw_sel_pm: usb_sw_sel_pm {
+		pinconf {
 			pins = "gpio4";
 			function = PMIC_GPIO_FUNC_NORMAL;
+			power-source = <PM8916_GPIO_VPH>;
+			input-disable;
+		};
+	};
+
+	pm8916_gpios_leds: pm8916_gpios_leds {
+		pinconf {
+			pins = "gpio1", "gpio2";
+			function = PMIC_GPIO_FUNC_NORMAL;
+			output-low;
+		};
+	};
+};
+
+&pm8916_mpps {
+
+	pm8916_mpps_leds: pm8916_mpps_leds {
+		pinconf {
+			pins = "mpp2", "mpp3";
+			function = PMIC_GPIO_FUNC_NORMAL;
 			output-low;
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
index 5f7023f..cbeee0b 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
@@ -3,17 +3,9 @@
 
 &msmgpio {
 
-	pinctrl-names = "default";
-	pinctrl-0 = <&soc_gpios_default>;
-
-	soc_gpios_default: default {
-		usr_led_1_ctrl_default: usr_led_1_ctrl_default {
-			pins = "gpio21";
-			function = "gpio";
-			output-low;
-		};
-		usr_led_2_ctrl_default: usr_led_2_ctrl_default {
-			pins = "gpio120";
+	msmgpio_leds: msmgpio_leds {
+		pinconf {
+			pins = "gpio21", "gpio120";
 			function = "gpio";
 			output-low;
 		};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index 98abece..66804ff 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -32,5 +32,56 @@
 			pinctrl-0 = <&blsp1_uart2_default>;
 			pinctrl-1 = <&blsp1_uart2_sleep>;
 		};
+
+		leds {
+			pinctrl-names = "default";
+			pinctrl-0 = <&msmgpio_leds>,
+				    <&pm8916_gpios_leds>,
+				    <&pm8916_mpps_leds>;
+
+			compatible = "gpio-leds";
+
+			led@1 {
+				label = "apq8016-sbc:green:user1";
+				gpios = <&msmgpio 21 GPIO_ACTIVE_HIGH>;
+				linux,default-trigger = "heartbeat";
+				default-state = "off";
+			};
+
+			led@2 {
+				label = "apq8016-sbc:green:user2";
+				gpios = <&msmgpio 120 GPIO_ACTIVE_HIGH>;
+				linux,default-trigger = "mmc0";
+				default-state = "off";
+			};
+
+			led@3 {
+				label = "apq8016-sbc:green:user3";
+				gpios = <&pm8916_gpios 1 GPIO_ACTIVE_HIGH>;
+				linux,default-trigger = "mmc1";
+				default-state = "off";
+			};
+
+			led@4 {
+				label = "apq8016-sbc:green:user4";
+				gpios = <&pm8916_gpios 2 GPIO_ACTIVE_HIGH>;
+				linux,default-trigger = "none";
+				default-state = "off";
+			};
+
+			led@5 {
+				label = "apq8016-sbc:yellow:wlan";
+				gpios = <&pm8916_mpps 2 GPIO_ACTIVE_HIGH>;
+				linux,default-trigger = "wlan";
+				default-state = "off";
+			};
+
+			led@6 {
+				label = "apq8016-sbc:blue:bt";
+				gpios = <&pm8916_mpps 3 GPIO_ACTIVE_HIGH>;
+				linux,default-trigger = "bt";
+				default-state = "off";
+			};
+		};
 	};
 };
diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
new file mode 100644
index 0000000..5689568
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&msmgpio {
+
+	blsp1_uart2_default: blsp1_uart2_default {
+		pinmux {
+			function = "blsp_uart2";
+			pins = "gpio4", "gpio5";
+		};
+		pinconf {
+			pins = "gpio4", "gpio5";
+			drive-strength = <16>;
+			bias-disable;
+		};
+	};
+
+	blsp1_uart2_sleep: blsp1_uart2_sleep {
+		pinmux {
+			function = "blsp_uart2";
+			pins = "gpio4", "gpio5";
+		};
+		pinconf {
+			pins = "gpio4", "gpio5";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	spi1_default: spi1_default {
+		pinmux {
+			function = "blsp_spi1";
+			pins = "gpio0", "gpio1", "gpio3";
+		};
+		pinmux_cs {
+			function = "gpio";
+			pins = "gpio2";
+		};
+		pinconf {
+			pins = "gpio0", "gpio1", "gpio3";
+			drive-strength = <12>;
+			bias-disable;
+		};
+		pinconf_cs {
+			pins = "gpio2";
+			drive-strength = <2>;
+			bias-disable;
+			output-high;
+		};
+	};
+
+	spi1_sleep: spi1_sleep {
+		pinmux {
+			function = "gpio";
+			pins = "gpio0", "gpio1", "gpio2", "gpio3";
+		};
+		pinconf {
+			pins = "gpio0", "gpio1", "gpio2", "gpio3";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	spi2_default: spi2_default {
+		pinmux {
+			function = "blsp_spi2";
+			pins = "gpio4", "gpio5", "gpio7";
+		};
+		pinmux_cs {
+			function = "gpio";
+			pins = "gpio6";
+		};
+		pinconf {
+			pins = "gpio4", "gpio5", "gpio6", "gpio7";
+			drive-strength = <12>;
+			bias-disable;
+		};
+		pinconf_cs {
+			pins = "gpio6";
+			drive-strength = <2>;
+			bias-disable;
+			output-high;
+		};
+	};
+
+	spi2_sleep: spi2_sleep {
+		pinmux {
+			function = "gpio";
+			pins = "gpio4", "gpio5", "gpio6", "gpio7";
+		};
+		pinconf {
+			pins = "gpio4", "gpio5", "gpio6", "gpio7";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	spi3_default: spi3_default {
+		pinmux {
+			function = "blsp_spi3";
+			pins = "gpio8", "gpio9", "gpio11";
+		};
+		pinmux_cs {
+			function = "gpio";
+			pins = "gpio10";
+		};
+		pinconf {
+			pins = "gpio8", "gpio9", "gpio10", "gpio11";
+			drive-strength = <12>;
+			bias-disable;
+		};
+		pinconf_cs {
+			pins = "gpio10";
+			drive-strength = <2>;
+			bias-disable;
+			output-high;
+		};
+	};
+
+	spi3_sleep: spi3_sleep {
+		pinmux {
+			function = "gpio";
+			pins = "gpio8", "gpio9", "gpio10", "gpio11";
+		};
+		pinconf {
+			pins = "gpio8", "gpio9", "gpio10", "gpio11";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	spi4_default: spi4_default {
+		pinmux {
+			function = "blsp_spi4";
+			pins = "gpio12", "gpio13", "gpio15";
+		};
+		pinmux_cs {
+			function = "gpio";
+			pins = "gpio14";
+		};
+		pinconf {
+			pins = "gpio12", "gpio13", "gpio14", "gpio15";
+			drive-strength = <12>;
+			bias-disable;
+		};
+		pinconf_cs {
+			pins = "gpio14";
+			drive-strength = <2>;
+			bias-disable;
+			output-high;
+		};
+	};
+
+	spi4_sleep: spi4_sleep {
+		pinmux {
+			function = "gpio";
+			pins = "gpio12", "gpio13", "gpio14", "gpio15";
+		};
+		pinconf {
+			pins = "gpio12", "gpio13", "gpio14", "gpio15";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	spi5_default: spi5_default {
+		pinmux {
+			function = "blsp_spi5";
+			pins = "gpio16", "gpio17", "gpio19";
+		};
+		pinmux_cs {
+			function = "gpio";
+			pins = "gpio18";
+		};
+		pinconf {
+			pins = "gpio16", "gpio17", "gpio18", "gpio19";
+			drive-strength = <12>;
+			bias-disable;
+		};
+		pinconf_cs {
+			pins = "gpio18";
+			drive-strength = <2>;
+			bias-disable;
+			output-high;
+		};
+	};
+
+	spi5_sleep: spi5_sleep {
+		pinmux {
+			function = "gpio";
+			pins = "gpio16", "gpio17", "gpio18", "gpio19";
+		};
+		pinconf {
+			pins = "gpio16", "gpio17", "gpio18", "gpio19";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	spi6_default: spi6_default {
+		pinmux {
+			function = "blsp_spi6";
+			pins = "gpio20", "gpio21", "gpio23";
+		};
+		pinmux_cs {
+			function = "gpio";
+			pins = "gpio22";
+		};
+		pinconf {
+			pins = "gpio20", "gpio21", "gpio22", "gpio23";
+			drive-strength = <12>;
+			bias-disable;
+		};
+		pinconf_cs {
+			pins = "gpio22";
+			drive-strength = <2>;
+			bias-disable;
+			output-high;
+		};
+	};
+
+	spi6_sleep: spi6_sleep {
+		pinmux {
+			function = "gpio";
+			pins = "gpio20", "gpio21", "gpio22", "gpio23";
+		};
+		pinconf {
+			pins = "gpio20", "gpio21", "gpio22", "gpio23";
+			drive-strength = <2>;
+			bias-pull-down;
+		};
+	};
+
+	i2c4_default: i2c4_default {
+		pinmux {
+			function = "blsp_i2c4";
+			pins = "gpio14", "gpio15";
+		};
+		pinconf {
+			pins = "gpio14", "gpio15";
+			drive-strength = <2>;
+			bias-disable = <0>;
+		};
+	};
+
+	i2c4_sleep: i2c4_sleep {
+		pinmux {
+			function = "blsp_i2c4";
+			pins = "gpio14", "gpio15";
+		};
+		pinconf {
+			pins = "gpio14", "gpio15";
+			drive-strength = <2>;
+			bias-disable = <0>;
+		};
+	};
+
+	sdhc2_cd_pin {
+		sdc2_cd_on: cd_on {
+			pinmux {
+				function = "gpio";
+				pins = "gpio38";
+			};
+			pinconf {
+				pins = "gpio38";
+				drive-strength = <2>;
+				bias-pull-up;
+			};
+		};
+		sdc2_cd_off: cd_off {
+			pinmux {
+				function = "gpio";
+				pins = "gpio38";
+			};
+			pinconf {
+				pins = "gpio38";
+				drive-strength = <2>;
+				bias-disable;
+			};
+		};
+	};
+
+	pmx_sdc1_clk {
+		sdc1_clk_on: clk_on {
+			pinmux {
+				pins = "sdc1_clk";
+			};
+			pinconf {
+				pins = "sdc1_clk";
+				bias-disable;
+				drive-strength = <16>;
+			};
+		};
+		sdc1_clk_off: clk_off {
+			pinmux {
+				pins = "sdc1_clk";
+			};
+			pinconf {
+				pins = "sdc1_clk";
+				bias-disable;
+				drive-strength = <2>;
+			};
+		};
+	};
+
+	pmx_sdc1_cmd {
+		sdc1_cmd_on: cmd_on {
+			pinmux {
+				pins = "sdc1_cmd";
+			};
+			pinconf {
+				pins = "sdc1_cmd";
+				bias-pull-up;
+				drive-strength = <10>;
+			};
+		};
+		sdc1_cmd_off: cmd_off {
+			pinmux {
+				pins = "sdc1_cmd";
+			};
+			pinconf {
+				pins = "sdc1_cmd";
+				bias-pull-up;
+				drive-strength = <2>;
+			};
+		};
+	};
+
+	pmx_sdc1_data {
+		sdc1_data_on: data_on {
+			pinmux {
+				pins = "sdc1_data";
+			};
+			pinconf {
+				pins = "sdc1_data";
+				bias-pull-up;
+				drive-strength = <10>;
+			};
+		};
+		sdc1_data_off: data_off {
+			pinmux {
+				pins = "sdc1_data";
+			};
+			pinconf {
+				pins = "sdc1_data";
+				bias-pull-up;
+				drive-strength = <2>;
+			};
+		};
+	};
+
+	pmx_sdc2_clk {
+		sdc2_clk_on: clk_on {
+			pinmux {
+				pins = "sdc2_clk";
+			};
+			pinconf {
+				pins = "sdc2_clk";
+				bias-disable;
+				drive-strength = <16>;
+			};
+		};
+		sdc2_clk_off: clk_off {
+			pinmux {
+				pins = "sdc2_clk";
+			};
+			pinconf {
+				pins = "sdc2_clk";
+				bias-disable;
+				drive-strength = <2>;
+			};
+		};
+	};
+
+	pmx_sdc2_cmd {
+		sdc2_cmd_on: cmd_on {
+			pinmux {
+				pins = "sdc2_cmd";
+			};
+			pinconf {
+				pins = "sdc2_cmd";
+				bias-pull-up;
+				drive-strength = <10>;
+			};
+		};
+		sdc2_cmd_off: cmd_off {
+			pinmux {
+				pins = "sdc2_cmd";
+			};
+			pinconf {
+				pins = "sdc2_cmd";
+				bias-pull-up;
+				drive-strength = <2>;
+			};
+		};
+	};
+
+	pmx_sdc2_data {
+		sdc2_data_on: data_on {
+			pinmux {
+				pins = "sdc2_data";
+			};
+			pinconf {
+				pins = "sdc2_data";
+				bias-pull-up;
+				drive-strength = <10>;
+			};
+		};
+		sdc2_data_off: data_off {
+			pinmux {
+				pins = "sdc2_data";
+			};
+			pinconf {
+				pins = "sdc2_data";
+				bias-pull-up;
+				drive-strength = <2>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 0f49ebd..5911de0 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -24,7 +24,10 @@
 	#address-cells = <2>;
 	#size-cells = <2>;
 
-	aliases { };
+	aliases {
+		sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
+		sdhc2 = &sdhc_2; /* SDC2 SD card slot */
+	};
 
 	chosen { };
 
@@ -90,30 +93,6 @@
 			#gpio-cells = <2>;
 			interrupt-controller;
 			#interrupt-cells = <2>;
-
-			blsp1_uart2_default: blsp1_uart2_default {
-				pinmux {
-					function = "blsp_uart2";
-					pins = "gpio4", "gpio5";
-				};
-				pinconf {
-					pins = "gpio4", "gpio5";
-					drive-strength = <16>;
-					bias-disable;
-				};
-			};
-
-			blsp1_uart2_sleep: blsp1_uart2_sleep {
-				pinmux {
-					function = "blsp_uart2";
-					pins = "gpio4", "gpio5";
-				};
-				pinconf {
-					pins = "gpio4", "gpio5";
-					drive-strength = <2>;
-					bias-pull-down;
-				};
-			};
 		};
 
 		gcc: qcom,gcc@1800000 {
@@ -132,6 +111,202 @@
 			status = "disabled";
 		};
 
+		blsp_dma: dma@7884000 {
+			compatible = "qcom,bam-v1.7.0";
+			reg = <0x07884000 0x23000>;
+			interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "bam_clk";
+			#dma-cells = <1>;
+			qcom,ee = <0>;
+			status = "disabled";
+		};
+
+		blsp_spi1: spi@78b5000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b5000 0x600>;
+			interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 5>, <&blsp_dma 4>;
+			dma-names = "rx", "tx";
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&spi1_default>;
+			pinctrl-1 = <&spi1_sleep>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		blsp_spi2: spi@78b6000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b6000 0x600>;
+			interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 7>, <&blsp_dma 6>;
+			dma-names = "rx", "tx";
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&spi2_default>;
+			pinctrl-1 = <&spi2_sleep>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		blsp_spi3: spi@78b7000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b7000 0x600>;
+			interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP3_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 9>, <&blsp_dma 8>;
+			dma-names = "rx", "tx";
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&spi3_default>;
+			pinctrl-1 = <&spi3_sleep>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		blsp_spi4: spi@78b8000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b8000 0x600>;
+			interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP4_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 11>, <&blsp_dma 10>;
+			dma-names = "rx", "tx";
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&spi4_default>;
+			pinctrl-1 = <&spi4_sleep>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		blsp_spi5: spi@78b9000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078b9000 0x600>;
+			interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP5_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 13>, <&blsp_dma 12>;
+			dma-names = "rx", "tx";
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&spi5_default>;
+			pinctrl-1 = <&spi5_sleep>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		blsp_spi6: spi@78ba000 {
+			compatible = "qcom,spi-qup-v2.2.1";
+			reg = <0x078ba000 0x600>;
+			interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&gcc GCC_BLSP1_QUP6_SPI_APPS_CLK>,
+				 <&gcc GCC_BLSP1_AHB_CLK>;
+			clock-names = "core", "iface";
+			dmas = <&blsp_dma 15>, <&blsp_dma 14>;
+			dma-names = "rx", "tx";
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&spi6_default>;
+			pinctrl-1 = <&spi6_sleep>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		blsp_i2c4: i2c@78b8000 {
+			compatible = "qcom,i2c-qup-v2.2.1";
+			reg = <0x78b8000 0x1000>;
+			interrupts = <GIC_SPI 98 0>;
+			clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+				<&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>;
+			clock-names = "iface", "core";
+			pinctrl-names = "default", "sleep";
+			pinctrl-0 = <&i2c4_default>;
+			pinctrl-1 = <&i2c4_sleep>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+			status = "disabled";
+		};
+
+		sdhc_1: sdhci@07824000 {
+			compatible = "qcom,sdhci-msm-v4";
+			reg = <0x07824900 0x11c>, <0x07824000 0x800>;
+			reg-names = "hc_mem", "core_mem";
+
+			interrupts = <0 123 0>, <0 138 0>;
+			interrupt-names = "hc_irq", "pwr_irq";
+			clocks = <&gcc GCC_SDCC1_APPS_CLK>,
+				 <&gcc GCC_SDCC1_AHB_CLK>;
+			clock-names = "core", "iface";
+			bus-width = <8>;
+			non-removable;
+			status = "disabled";
+		};
+
+		sdhc_2: sdhci@07864000 {
+			compatible = "qcom,sdhci-msm-v4";
+			reg = <0x07864900 0x11c>, <0x07864000 0x800>;
+			reg-names = "hc_mem", "core_mem";
+
+			interrupts = <0 125 0>, <0 221 0>;
+			interrupt-names = "hc_irq", "pwr_irq";
+			clocks = <&gcc GCC_SDCC2_APPS_CLK>,
+				 <&gcc GCC_SDCC2_AHB_CLK>;
+			clock-names = "core", "iface";
+			bus-width = <4>;
+			status = "disabled";
+		};
+
+		usb_dev: usb@78d9000 {
+			compatible = "qcom,ci-hdrc";
+			reg = <0x78d9000 0x400>;
+			dr_mode = "peripheral";
+			interrupts = <GIC_SPI 134 IRQ_TYPE_NONE>;
+			usb-phy = <&usb_otg>;
+			status = "disabled";
+		};
+
+		usb_host: ehci@78d9000 {
+			compatible = "qcom,ehci-host";
+			reg = <0x78d9000 0x400>;
+			interrupts = <GIC_SPI 134 IRQ_TYPE_NONE>;
+			usb-phy = <&usb_otg>;
+			status = "disabled";
+		};
+
+		usb_otg: phy@78d9000 {
+			compatible = "qcom,usb-otg-snps";
+			reg = <0x78d9000 0x400>;
+			interrupts = <GIC_SPI 134 IRQ_TYPE_EDGE_BOTH>,
+				     <GIC_SPI 140 IRQ_TYPE_EDGE_RISING>;
+
+			qcom,vdd-levels = <1 5 7>;
+			qcom,phy-init-sequence = <0x44 0x6B 0x24 0x13>;
+			dr_mode = "peripheral";
+			qcom,otg-control = <2>; // PMIC
+
+			clocks = <&gcc GCC_USB_HS_AHB_CLK>,
+				 <&gcc GCC_USB_HS_SYSTEM_CLK>,
+				 <&gcc GCC_USB2A_PHY_SLEEP_CLK>;
+			clock-names = "iface", "core", "sleep";
+
+			resets = <&gcc GCC_USB2A_PHY_BCR>,
+				 <&gcc GCC_USB_HS_BCR>;
+			reset-names = "phy", "link";
+			status = "disabled";
+		};
+
 		intc: interrupt-controller@b000000 {
 			compatible = "qcom,msm-qgic2";
 			interrupt-controller;
@@ -217,3 +392,5 @@
 		};
 	};
 };
+
+#include "msm8916-pins.dtsi"
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
new file mode 100644
index 0000000..601e6a2
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-r88.dtb
+
+always		:= $(dtb-y)
+subdir-y	:= $(dts-dirs)
+clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
new file mode 100644
index 0000000..401a812
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2015 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "rk3368.dtsi"
+
+/ {
+	model = "Rockchip R88";
+	compatible = "rockchip,r88", "rockchip,rk3368";
+
+	chosen {
+		stdout-path = "serial2:115200n8";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x40000000>;
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		pinctrl-0 = <&emmc_reset>;
+		pinctrl-names = "default";
+		reset-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
+	};
+
+	keys: gpio-keys {
+		compatible = "gpio-keys";
+		#address-cells = <1>;
+		#size-cells = <0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&pwr_key>;
+
+		button@0 {
+			gpio-key,wakeup = <1>;
+			gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
+			label = "GPIO Power";
+			linux,code = <116>;
+		};
+	};
+
+	leds: gpio-leds {
+		compatible = "gpio-leds";
+
+		work {
+			gpios = <&gpio3 29 GPIO_ACTIVE_HIGH>;
+			label = "r88:green:led";
+			pinctrl-names = "default";
+			pinctrl-0 = <&led_ctl>;
+		};
+	};
+
+	ir: ir-receiver {
+		compatible = "gpio-ir-receiver";
+		gpios = <&gpio3 30 GPIO_ACTIVE_LOW>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&ir_int>;
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		clocks = <&hym8563>;
+		clock-names = "ext_clock";
+		pinctrl-names = "default";
+		pinctrl-0 = <&bt_rst>, <&wifi_reg_on>;
+
+		reset-gpios =
+			/* BT_RST_N */
+			<&gpio3 5 GPIO_ACTIVE_LOW>,
+
+			/* WL_REG_ON */
+			<&gpio3 4 GPIO_ACTIVE_LOW>;
+	};
+
+	vcc_18: vcc18-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_18";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc_sys>;
+	};
+
+	/* supplies both host and otg */
+	vcc_host: vcc-host-regulator {
+		compatible = "regulator-fixed";
+		enable-active-high;
+		gpio = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&host_vbus_drv>;
+		regulator-name = "vcc_host";
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc_sys>;
+	};
+
+	vcc_io: vcc-io-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_io";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc_sys>;
+	};
+
+	vcc_lan: vcc-lan-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_lan";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc_io>;
+	};
+
+	vcc_sys: vcc-sys-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vcc_sys";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		regulator-always-on;
+		regulator-boot-on;
+	};
+
+	vccio_wl: vccio-wl-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vccio_wl";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc_io>;
+	};
+
+	vdd_10: vdd-10-regulator {
+		compatible = "regulator-fixed";
+		regulator-name = "vdd_10";
+		regulator-min-microvolt = <1000000>;
+		regulator-max-microvolt = <1000000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc_sys>;
+	};
+};
+
+&emmc {
+	broken-cd;
+	bus-width = <8>;
+	cap-mmc-highspeed;
+	disable-wp;
+	mmc-pwrseq = <&emmc_pwrseq>;
+	non-removable;
+	num-slots = <1>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
+	status = "okay";
+};
+
+&gmac {
+	phy-supply = <&vcc_lan>;
+	phy-mode = "rmii";
+	clock_in_out = "output";
+	snps,reset-gpio = <&gpio3 12 0>;
+	snps,reset-active-low;
+	snps,reset-delays-us = <0 10000 1000000>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&rmii_pins>;
+	tx_delay = <0x30>;
+	rx_delay = <0x10>;
+	status = "ok";
+};
+
+&i2c0 {
+	status = "okay";
+
+	vdd_cpu: syr827@40 {
+		compatible = "silergy,syr827";
+		reg = <0x40>;
+		fcs,suspend-voltage-selector = <1>;
+		regulator-name = "vdd_cpu";
+		regulator-enable-ramp-delay = <300>;
+		regulator-min-microvolt = <712500>;
+		regulator-max-microvolt = <1500000>;
+		regulator-ramp-delay = <8000>;
+		regulator-always-on;
+		regulator-boot-on;
+		vin-supply = <&vcc_sys>;
+	};
+
+	hym8563: hym8563@51 {
+		compatible = "haoyu,hym8563";
+		reg = <0x51>;
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		clock-output-names = "xin32k";
+		/* rtc_int is not connected */
+	};
+};
+
+&sdio0 {
+	assigned-clocks = <&cru SCLK_SDIO0>;
+	assigned-clock-parents = <&cru PLL_CPLL>;
+	broken-cd;
+	bus-width = <4>;
+	cap-sd-highspeed;
+	cap-sdio-irq;
+	keep-power-in-suspend;
+	mmc-pwrseq = <&sdio_pwrseq>;
+	non-removable;
+	num-slots = <1>;
+	pinctrl-names = "default";
+	pinctrl-0 = <&sdio0_clk &sdio0_cmd &sdio0_bus4>;
+	vmmc-supply = <&vcc_io>;
+	vqmmc-supply = <&vccio_wl>;
+	status = "okay";
+};
+
+&pinctrl {
+	pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
+		bias-disable;
+		drive-strength = <8>;
+	};
+
+	pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
+		bias-pull-up;
+		drive-strength = <8>;
+	};
+
+	emmc {
+		emmc_bus8: emmc-bus8 {
+			rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+					<1 19 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+					<1 20 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+					<1 21 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+					<1 22 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+					<1 23 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+					<1 24 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+					<1 25 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+		};
+
+		emmc-clk {
+			rockchip,pins = <2 4 RK_FUNC_2 &pcfg_pull_none_drv_8ma>;
+		};
+
+		emmc-cmd {
+			rockchip,pins = <1 26 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+		};
+
+		emmc_reset: emmc-reset {
+			rockchip,pins = <2 3 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	ir {
+		ir_int: ir-int {
+			rockchip,pins = <3 30 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	keys {
+		pwr_key: pwr-key {
+			rockchip,pins = <0 2 RK_FUNC_GPIO &pcfg_pull_up>;
+		};
+	};
+
+	leds {
+		stby_pwren: stby-pwren {
+			rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		led_ctl: led-ctl {
+			rockchip,pins = <3 29 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	sdio {
+		wifi_reg_on: wifi-reg-on {
+			rockchip,pins = <3 4 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+
+		bt_rst: bt-rst {
+			rockchip,pins = <3 5 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+
+	usb {
+		host_vbus_drv: host-vbus-drv {
+			rockchip,pins = <0 4 RK_FUNC_GPIO &pcfg_pull_none>;
+		};
+	};
+};
+
+&saradc {
+	vref-supply = <&vcc_18>;
+	status = "okay";
+};
+
+&uart2 {
+	status = "okay";
+};
+
+&usb_host0_ehci {
+	status = "okay";
+};
+
+&usb_otg {
+	dr_mode = "host";
+	status = "okay";
+};
+
+&wdt {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
new file mode 100644
index 0000000..a712bea
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -0,0 +1,900 @@
+/*
+ * Copyright (c) 2015 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This library is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This library is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/clock/rk3368-cru.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+
+/ {
+	compatible = "rockchip,rk3368";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+	aliases {
+		i2c0 = &i2c0;
+		i2c1 = &i2c1;
+		i2c2 = &i2c2;
+		i2c3 = &i2c3;
+		i2c4 = &i2c4;
+		i2c5 = &i2c5;
+		serial0 = &uart0;
+		serial1 = &uart1;
+		serial2 = &uart2;
+		serial3 = &uart3;
+		serial4 = &uart4;
+		spi0 = &spi0;
+		spi1 = &spi1;
+		spi2 = &spi2;
+	};
+
+	cpus {
+		#address-cells = <0x2>;
+		#size-cells = <0x0>;
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&cpu_b0>;
+				};
+				core1 {
+					cpu = <&cpu_b1>;
+				};
+				core2 {
+					cpu = <&cpu_b2>;
+				};
+				core3 {
+					cpu = <&cpu_b3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&cpu_l0>;
+				};
+				core1 {
+					cpu = <&cpu_l1>;
+				};
+				core2 {
+					cpu = <&cpu_l2>;
+				};
+				core3 {
+					cpu = <&cpu_l3>;
+				};
+			};
+		};
+
+		idle-states {
+			entry-method = "arm,psci";
+
+			cpu_sleep: cpu-sleep-0 {
+				compatible = "arm,idle-state";
+				arm,psci-suspend-param = <0x1010000>;
+				entry-latency-us = <0x3fffffff>;
+				exit-latency-us = <0x40000000>;
+				min-residency-us = <0xffffffff>;
+			};
+		};
+
+		cpu_l0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x0>;
+			cpu-idle-states = <&cpu_sleep>;
+			enable-method = "psci";
+		};
+
+		cpu_l1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x1>;
+			cpu-idle-states = <&cpu_sleep>;
+			enable-method = "psci";
+		};
+
+		cpu_l2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x2>;
+			cpu-idle-states = <&cpu_sleep>;
+			enable-method = "psci";
+		};
+
+		cpu_l3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x3>;
+			cpu-idle-states = <&cpu_sleep>;
+			enable-method = "psci";
+		};
+
+		cpu_b0: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x100>;
+			cpu-idle-states = <&cpu_sleep>;
+			enable-method = "psci";
+		};
+
+		cpu_b1: cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x101>;
+			cpu-idle-states = <&cpu_sleep>;
+			enable-method = "psci";
+		};
+
+		cpu_b2: cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x102>;
+			cpu-idle-states = <&cpu_sleep>;
+			enable-method = "psci";
+		};
+
+		cpu_b3: cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a53", "arm,armv8";
+			reg = <0x0 0x103>;
+			cpu-idle-states = <&cpu_sleep>;
+			enable-method = "psci";
+		};
+	};
+
+	arm-pmu {
+		compatible = "arm,armv8-pmuv3";
+		interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+			     <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-affinity = <&cpu_l0>, <&cpu_l1>, <&cpu_l2>,
+				     <&cpu_l3>, <&cpu_b0>, <&cpu_b1>,
+				     <&cpu_b2>, <&cpu_b3>;
+	};
+
+	psci {
+		compatible = "arm,psci-0.2";
+		method = "smc";
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <GIC_PPI 13
+			(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+			     <GIC_PPI 14
+			(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+			     <GIC_PPI 11
+			(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+			     <GIC_PPI 10
+			(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+	};
+
+	xin24m: oscillator {
+		compatible = "fixed-clock";
+		clock-frequency = <24000000>;
+		clock-output-names = "xin24m";
+		#clock-cells = <0>;
+	};
+
+	sdmmc: dwmmc@ff0c0000 {
+		compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
+		reg = <0x0 0xff0c0000 0x0 0x4000>;
+		clock-freq-min-max = <400000 150000000>;
+		clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>;
+		clock-names = "biu", "ciu";
+		fifo-depth = <0x100>;
+		interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+		status = "disabled";
+	};
+
+	sdio0: dwmmc@ff0d0000 {
+		compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
+		reg = <0x0 0xff0d0000 0x0 0x4000>;
+		clock-freq-min-max = <400000 150000000>;
+		clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>,
+			 <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>;
+		clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+		fifo-depth = <0x100>;
+		interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+		status = "disabled";
+	};
+
+	emmc: dwmmc@ff0f0000 {
+		compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
+		reg = <0x0 0xff0f0000 0x0 0x4000>;
+		clock-freq-min-max = <400000 150000000>;
+		clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>;
+		clock-names = "biu", "ciu";
+		fifo-depth = <0x100>;
+		interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+		status = "disabled";
+	};
+
+	saradc: saradc@ff100000 {
+		compatible = "rockchip,saradc";
+		reg = <0x0 0xff100000 0x0 0x100>;
+		interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+		#io-channel-cells = <1>;
+		clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
+		clock-names = "saradc", "apb_pclk";
+		status = "disabled";
+	};
+
+	spi0: spi@ff110000 {
+		compatible = "rockchip,rk3368-spi", "rockchip,rk3066-spi";
+		reg = <0x0 0xff110000 0x0 0x1000>;
+		clocks = <&cru SCLK_SPI0>, <&cru PCLK_SPI0>;
+		clock-names = "spiclk", "apb_pclk";
+		interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&spi0_clk &spi0_tx &spi0_rx &spi0_cs0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	spi1: spi@ff120000 {
+		compatible = "rockchip,rk3368-spi", "rockchip,rk3066-spi";
+		reg = <0x0 0xff120000 0x0 0x1000>;
+		clocks = <&cru SCLK_SPI1>, <&cru PCLK_SPI1>;
+		clock-names = "spiclk", "apb_pclk";
+		interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&spi1_clk &spi1_tx &spi1_rx &spi1_cs0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	spi2: spi@ff130000 {
+		compatible = "rockchip,rk3368-spi", "rockchip,rk3066-spi";
+		reg = <0x0 0xff130000 0x0 0x1000>;
+		clocks = <&cru SCLK_SPI2>, <&cru PCLK_SPI2>;
+		clock-names = "spiclk", "apb_pclk";
+		interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&spi2_clk &spi2_tx &spi2_rx &spi2_cs0>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	i2c1: i2c@ff140000 {
+		compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
+		reg = <0x0 0xff140000 0x0 0x1000>;
+		interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "i2c";
+		clocks = <&cru PCLK_I2C1>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&i2c1_xfer>;
+		status = "disabled";
+	};
+
+	i2c3: i2c@ff150000 {
+		compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
+		reg = <0x0 0xff150000 0x0 0x1000>;
+		interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "i2c";
+		clocks = <&cru PCLK_I2C3>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&i2c3_xfer>;
+		status = "disabled";
+	};
+
+	i2c4: i2c@ff160000 {
+		compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
+		reg = <0x0 0xff160000 0x0 0x1000>;
+		interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "i2c";
+		clocks = <&cru PCLK_I2C4>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&i2c4_xfer>;
+		status = "disabled";
+	};
+
+	i2c5: i2c@ff170000 {
+		compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
+		reg = <0x0 0xff170000 0x0 0x1000>;
+		interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "i2c";
+		clocks = <&cru PCLK_I2C5>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&i2c5_xfer>;
+		status = "disabled";
+	};
+
+	uart0: serial@ff180000 {
+		compatible = "rockchip,rk3368-uart", "snps,dw-apb-uart";
+		reg = <0x0 0xff180000 0x0 0x100>;
+		clock-frequency = <24000000>;
+		clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
+		clock-names = "baudclk", "apb_pclk";
+		interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+		status = "disabled";
+	};
+
+	uart1: serial@ff190000 {
+		compatible = "rockchip,rk3368-uart", "snps,dw-apb-uart";
+		reg = <0x0 0xff190000 0x0 0x100>;
+		clock-frequency = <24000000>;
+		clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
+		clock-names = "baudclk", "apb_pclk";
+		interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+		status = "disabled";
+	};
+
+	uart3: serial@ff1b0000 {
+		compatible = "rockchip,rk3368-uart", "snps,dw-apb-uart";
+		reg = <0x0 0xff1b0000 0x0 0x100>;
+		clock-frequency = <24000000>;
+		clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>;
+		clock-names = "baudclk", "apb_pclk";
+		interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+		status = "disabled";
+	};
+
+	uart4: serial@ff1c0000 {
+		compatible = "rockchip,rk3368-uart", "snps,dw-apb-uart";
+		reg = <0x0 0xff1c0000 0x0 0x100>;
+		clock-frequency = <24000000>;
+		clocks = <&cru SCLK_UART4>, <&cru PCLK_UART4>;
+		clock-names = "baudclk", "apb_pclk";
+		interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+		status = "disabled";
+	};
+
+	gmac: ethernet@ff290000 {
+		compatible = "rockchip,rk3368-gmac";
+		reg = <0x0 0xff290000 0x0 0x10000>;
+		interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+		interrupt-names = "macirq";
+		rockchip,grf = <&grf>;
+		clocks = <&cru SCLK_MAC>,
+			<&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>,
+			<&cru SCLK_MACREF>, <&cru SCLK_MACREF_OUT>,
+			<&cru ACLK_GMAC>, <&cru PCLK_GMAC>;
+		clock-names = "stmmaceth",
+			"mac_clk_rx", "mac_clk_tx",
+			"clk_mac_ref", "clk_mac_refout",
+			"aclk_mac", "pclk_mac";
+		status = "disabled";
+	};
+
+	usb_host0_ehci: usb@ff500000 {
+		compatible = "generic-ehci";
+		reg = <0x0 0xff500000 0x0 0x100>;
+		interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_HOST0>;
+		clock-names = "usbhost";
+		status = "disabled";
+	};
+
+	usb_otg: usb@ff580000 {
+		compatible = "rockchip,rk3368-usb", "rockchip,rk3066-usb",
+				"snps,dwc2";
+		reg = <0x0 0xff580000 0x0 0x40000>;
+		interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+		clocks = <&cru HCLK_OTG0>;
+		clock-names = "otg";
+		dr_mode = "otg";
+		g-np-tx-fifo-size = <16>;
+		g-rx-fifo-size = <275>;
+		g-tx-fifo-size = <256 128 128 64 64 32>;
+		g-use-dma;
+		status = "disabled";
+	};
+
+	i2c0: i2c@ff650000 {
+		compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
+		reg = <0x0 0xff650000 0x0 0x1000>;
+		clocks = <&cru PCLK_I2C0>;
+		clock-names = "i2c";
+		interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&i2c0_xfer>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		status = "disabled";
+	};
+
+	i2c2: i2c@ff660000 {
+		compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
+		reg = <0x0 0xff660000 0x0 0x1000>;
+		interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+		#address-cells = <1>;
+		#size-cells = <0>;
+		clock-names = "i2c";
+		clocks = <&cru PCLK_I2C2>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&i2c2_xfer>;
+		status = "disabled";
+	};
+
+	uart2: serial@ff690000 {
+		compatible = "rockchip,rk3368-uart", "snps,dw-apb-uart";
+		reg = <0x0 0xff690000 0x0 0x100>;
+		clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
+		clock-names = "baudclk", "apb_pclk";
+		interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+		pinctrl-names = "default";
+		pinctrl-0 = <&uart2_xfer>;
+		reg-shift = <2>;
+		reg-io-width = <4>;
+		status = "disabled";
+	};
+
+	pmugrf: syscon@ff738000 {
+		compatible = "rockchip,rk3368-pmugrf", "syscon";
+		reg = <0x0 0xff738000 0x0 0x1000>;
+	};
+
+	cru: clock-controller@ff760000 {
+		compatible = "rockchip,rk3368-cru";
+		reg = <0x0 0xff760000 0x0 0x1000>;
+		rockchip,grf = <&grf>;
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	grf: syscon@ff770000 {
+		compatible = "rockchip,rk3368-grf", "syscon";
+		reg = <0x0 0xff770000 0x0 0x1000>;
+	};
+
+	wdt: watchdog@ff800000 {
+		compatible = "rockchip,rk3368-wdt", "snps,dw-wdt";
+		reg = <0x0 0xff800000 0x0 0x100>;
+		clocks = <&cru PCLK_WDT>;
+		interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+		status = "disabled";
+	};
+
+	gic: interrupt-controller@ffb71000 {
+		compatible = "arm,gic-400";
+		interrupt-controller;
+		#interrupt-cells = <3>;
+		#address-cells = <0>;
+
+		reg = <0x0 0xffb71000 0x0 0x1000>,
+		      <0x0 0xffb72000 0x0 0x1000>,
+		      <0x0 0xffb74000 0x0 0x2000>,
+		      <0x0 0xffb76000 0x0 0x2000>;
+		interrupts = <GIC_PPI 9
+		      (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+	};
+
+	pinctrl: pinctrl {
+		compatible = "rockchip,rk3368-pinctrl";
+		rockchip,grf = <&grf>;
+		rockchip,pmu = <&pmugrf>;
+		#address-cells = <0x2>;
+		#size-cells = <0x2>;
+		ranges;
+
+		gpio0: gpio0@ff750000 {
+			compatible = "rockchip,gpio-bank";
+			reg = <0x0 0xff750000 0x0 0x100>;
+			clocks = <&cru PCLK_GPIO0>;
+			interrupts = <GIC_SPI 0x51 IRQ_TYPE_LEVEL_HIGH>;
+
+			gpio-controller;
+			#gpio-cells = <0x2>;
+
+			interrupt-controller;
+			#interrupt-cells = <0x2>;
+		};
+
+		gpio1: gpio1@ff780000 {
+			compatible = "rockchip,gpio-bank";
+			reg = <0x0 0xff780000 0x0 0x100>;
+			clocks = <&cru PCLK_GPIO1>;
+			interrupts = <GIC_SPI 0x52 IRQ_TYPE_LEVEL_HIGH>;
+
+			gpio-controller;
+			#gpio-cells = <0x2>;
+
+			interrupt-controller;
+			#interrupt-cells = <0x2>;
+		};
+
+		gpio2: gpio2@ff790000 {
+			compatible = "rockchip,gpio-bank";
+			reg = <0x0 0xff790000 0x0 0x100>;
+			clocks = <&cru PCLK_GPIO2>;
+			interrupts = <GIC_SPI 0x53 IRQ_TYPE_LEVEL_HIGH>;
+
+			gpio-controller;
+			#gpio-cells = <0x2>;
+
+			interrupt-controller;
+			#interrupt-cells = <0x2>;
+		};
+
+		gpio3: gpio3@ff7a0000 {
+			compatible = "rockchip,gpio-bank";
+			reg = <0x0 0xff7a0000 0x0 0x100>;
+			clocks = <&cru PCLK_GPIO3>;
+			interrupts = <GIC_SPI 0x54 IRQ_TYPE_LEVEL_HIGH>;
+
+			gpio-controller;
+			#gpio-cells = <0x2>;
+
+			interrupt-controller;
+			#interrupt-cells = <0x2>;
+		};
+
+		pcfg_pull_up: pcfg-pull-up {
+			bias-pull-up;
+		};
+
+		pcfg_pull_down: pcfg-pull-down {
+			bias-pull-down;
+		};
+
+		pcfg_pull_none: pcfg-pull-none {
+			bias-disable;
+		};
+
+		pcfg_pull_none_12ma: pcfg-pull-none-12ma {
+			bias-disable;
+			drive-strength = <12>;
+		};
+
+		emmc {
+			emmc_clk: emmc-clk {
+				rockchip,pins = <2 4 RK_FUNC_2 &pcfg_pull_none>;
+			};
+
+			emmc_cmd: emmc-cmd {
+				rockchip,pins = <1 26 RK_FUNC_2 &pcfg_pull_up>;
+			};
+
+			emmc_pwr: emmc-pwr {
+				rockchip,pins = <1 27 RK_FUNC_2 &pcfg_pull_up>;
+			};
+
+			emmc_bus1: emmc-bus1 {
+				rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up>;
+			};
+
+			emmc_bus4: emmc-bus4 {
+				rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up>,
+						<1 19 RK_FUNC_2 &pcfg_pull_up>,
+						<1 20 RK_FUNC_2 &pcfg_pull_up>,
+						<1 21 RK_FUNC_2 &pcfg_pull_up>;
+			};
+
+			emmc_bus8: emmc-bus8 {
+				rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up>,
+						<1 19 RK_FUNC_2 &pcfg_pull_up>,
+						<1 20 RK_FUNC_2 &pcfg_pull_up>,
+						<1 21 RK_FUNC_2 &pcfg_pull_up>,
+						<1 22 RK_FUNC_2 &pcfg_pull_up>,
+						<1 23 RK_FUNC_2 &pcfg_pull_up>,
+						<1 24 RK_FUNC_2 &pcfg_pull_up>,
+						<1 25 RK_FUNC_2 &pcfg_pull_up>;
+			};
+		};
+
+		gmac {
+			rgmii_pins: rgmii-pins {
+				rockchip,pins =	<3 22 RK_FUNC_1 &pcfg_pull_none>,
+						<3 24 RK_FUNC_1 &pcfg_pull_none>,
+						<3 19 RK_FUNC_1 &pcfg_pull_none>,
+						<3 8 RK_FUNC_1 &pcfg_pull_none_12ma>,
+						<3 9 RK_FUNC_1 &pcfg_pull_none_12ma>,
+						<3 10 RK_FUNC_1 &pcfg_pull_none_12ma>,
+						<3 14 RK_FUNC_1 &pcfg_pull_none_12ma>,
+						<3 28 RK_FUNC_1 &pcfg_pull_none_12ma>,
+						<3 13 RK_FUNC_1 &pcfg_pull_none_12ma>,
+						<3 15 RK_FUNC_1 &pcfg_pull_none>,
+						<3 16 RK_FUNC_1 &pcfg_pull_none>,
+						<3 17 RK_FUNC_1 &pcfg_pull_none>,
+						<3 18 RK_FUNC_1 &pcfg_pull_none>,
+						<3 25 RK_FUNC_1 &pcfg_pull_none>,
+						<3 20 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			rmii_pins: rmii-pins {
+				rockchip,pins =	<3 22 RK_FUNC_1 &pcfg_pull_none>,
+						<3 24 RK_FUNC_1 &pcfg_pull_none>,
+						<3 19 RK_FUNC_1 &pcfg_pull_none>,
+						<3 8 RK_FUNC_1 &pcfg_pull_none_12ma>,
+						<3 9 RK_FUNC_1 &pcfg_pull_none_12ma>,
+						<3 13 RK_FUNC_1 &pcfg_pull_none_12ma>,
+						<3 15 RK_FUNC_1 &pcfg_pull_none>,
+						<3 16 RK_FUNC_1 &pcfg_pull_none>,
+						<3 20 RK_FUNC_1 &pcfg_pull_none>,
+						<3 21 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		i2c0 {
+			i2c0_xfer: i2c0-xfer {
+				rockchip,pins = <0 6 RK_FUNC_1 &pcfg_pull_none>,
+						<0 7 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		i2c1 {
+			i2c1_xfer: i2c1-xfer {
+				rockchip,pins = <2 21 RK_FUNC_1 &pcfg_pull_none>,
+						<2 22 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		i2c2 {
+			i2c2_xfer: i2c2-xfer {
+				rockchip,pins = <0 9 RK_FUNC_2 &pcfg_pull_none>,
+						<3 31 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		i2c3 {
+			i2c3_xfer: i2c3-xfer {
+				rockchip,pins = <1 16 RK_FUNC_1 &pcfg_pull_none>,
+						<1 17 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		i2c4 {
+			i2c4_xfer: i2c4-xfer {
+				rockchip,pins = <3 24 RK_FUNC_2 &pcfg_pull_none>,
+						<3 25 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		i2c5 {
+			i2c5_xfer: i2c5-xfer {
+				rockchip,pins = <3 26 RK_FUNC_2 &pcfg_pull_none>,
+						<3 27 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		sdio0 {
+			sdio0_bus1: sdio0-bus1 {
+				rockchip,pins = <2 28 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_bus4: sdio0-bus4 {
+				rockchip,pins = <2 28 RK_FUNC_1 &pcfg_pull_up>,
+						<2 29 RK_FUNC_1 &pcfg_pull_up>,
+						<2 30 RK_FUNC_1 &pcfg_pull_up>,
+						<2 31 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_cmd: sdio0-cmd {
+				rockchip,pins = <3 0 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_clk: sdio0-clk {
+				rockchip,pins = <3 1 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			sdio0_cd: sdio0-cd {
+				rockchip,pins = <3 2 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_wp: sdio0-wp {
+				rockchip,pins = <3 3 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_pwr: sdio0-pwr {
+				rockchip,pins = <3 4 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_bkpwr: sdio0-bkpwr {
+				rockchip,pins = <3 5 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdio0_int: sdio0-int {
+				rockchip,pins = <3 6 RK_FUNC_1 &pcfg_pull_up>;
+			};
+		};
+
+		sdmmc {
+			sdmmc_clk: sdmmc-clk {
+				rockchip,pins = <2 9 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			sdmmc_cmd: sdmmc-cmd {
+				rockchip,pins = <2 10 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdmmc_cd: sdmcc-cd {
+				rockchip,pins = <2 11 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdmmc_bus1: sdmmc-bus1 {
+				rockchip,pins = <2 5 RK_FUNC_1 &pcfg_pull_up>;
+			};
+
+			sdmmc_bus4: sdmmc-bus4 {
+				rockchip,pins = <2 5 RK_FUNC_1 &pcfg_pull_up>,
+						<2 6 RK_FUNC_1 &pcfg_pull_up>,
+						<2 7 RK_FUNC_1 &pcfg_pull_up>,
+						<2 8 RK_FUNC_1 &pcfg_pull_up>;
+			};
+		};
+
+		spi0 {
+			spi0_clk: spi0-clk {
+				rockchip,pins = <1 29 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi0_cs0: spi0-cs0 {
+				rockchip,pins = <1 24 RK_FUNC_3 &pcfg_pull_up>;
+			};
+			spi0_cs1: spi0-cs1 {
+				rockchip,pins = <1 25 RK_FUNC_3 &pcfg_pull_up>;
+			};
+			spi0_tx: spi0-tx {
+				rockchip,pins = <1 23 RK_FUNC_3 &pcfg_pull_up>;
+			};
+			spi0_rx: spi0-rx {
+				rockchip,pins = <1 22 RK_FUNC_3 &pcfg_pull_up>;
+			};
+		};
+
+		spi1 {
+			spi1_clk: spi1-clk {
+				rockchip,pins = <1 14 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi1_cs0: spi1-cs0 {
+				rockchip,pins = <1 15 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi1_cs1: spi1-cs1 {
+				rockchip,pins = <3 28 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi1_rx: spi1-rx {
+				rockchip,pins = <1 16 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi1_tx: spi1-tx {
+				rockchip,pins = <1 17 RK_FUNC_2 &pcfg_pull_up>;
+			};
+		};
+
+		spi2 {
+			spi2_clk: spi2-clk {
+				rockchip,pins = <0 12 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi2_cs0: spi2-cs0 {
+				rockchip,pins = <0 13 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi2_rx: spi2-rx {
+				rockchip,pins = <0 10 RK_FUNC_2 &pcfg_pull_up>;
+			};
+			spi2_tx: spi2-tx {
+				rockchip,pins = <0 11 RK_FUNC_2 &pcfg_pull_up>;
+			};
+		};
+
+		uart0 {
+			uart0_xfer: uart0-xfer {
+				rockchip,pins = <2 24 RK_FUNC_1 &pcfg_pull_up>,
+						<2 25 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			uart0_cts: uart0-cts {
+				rockchip,pins = <2 26 RK_FUNC_1 &pcfg_pull_none>;
+			};
+
+			uart0_rts: uart0-rts {
+				rockchip,pins = <2 27 RK_FUNC_1 &pcfg_pull_none>;
+			};
+		};
+
+		uart1 {
+			uart1_xfer: uart1-xfer {
+				rockchip,pins = <0 20 RK_FUNC_3 &pcfg_pull_up>,
+						<0 21 RK_FUNC_3 &pcfg_pull_none>;
+			};
+
+			uart1_cts: uart1-cts {
+				rockchip,pins = <0 22 RK_FUNC_3 &pcfg_pull_none>;
+			};
+
+			uart1_rts: uart1-rts {
+				rockchip,pins = <0 23 RK_FUNC_3 &pcfg_pull_none>;
+			};
+		};
+
+		uart2 {
+			uart2_xfer: uart2-xfer {
+				rockchip,pins = <2 6 RK_FUNC_2 &pcfg_pull_up>,
+						<2 5 RK_FUNC_2 &pcfg_pull_none>;
+			};
+			/* no rts / cts for uart2 */
+		};
+
+		uart3 {
+			uart3_xfer: uart3-xfer {
+				rockchip,pins = <3 29 RK_FUNC_2 &pcfg_pull_up>,
+						<3 30 RK_FUNC_3 &pcfg_pull_none>;
+			};
+
+			uart3_cts: uart3-cts {
+				rockchip,pins = <3 16 RK_FUNC_2 &pcfg_pull_none>;
+			};
+
+			uart3_rts: uart3-rts {
+				rockchip,pins = <3 17 RK_FUNC_2 &pcfg_pull_none>;
+			};
+		};
+
+		uart4 {
+			uart4_xfer: uart4-xfer {
+				rockchip,pins = <0 27 RK_FUNC_3 &pcfg_pull_up>,
+						<0 26 RK_FUNC_3 &pcfg_pull_none>;
+			};
+
+			uart4_cts: uart4-cts {
+				rockchip,pins = <0 24 RK_FUNC_3 &pcfg_pull_none>;
+			};
+
+			uart4_rts: uart4-rts {
+				rockchip,pins = <0 25 RK_FUNC_3 &pcfg_pull_none>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/sprd/sc9836.dtsi b/arch/arm64/boot/dts/sprd/sc9836.dtsi
index ee34e1a..63894c4 100644
--- a/arch/arm64/boot/dts/sprd/sc9836.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc9836.dtsi
@@ -16,28 +16,28 @@
 		#address-cells = <2>;
 		#size-cells = <0>;
 
-		cpu@0 {
+		cpu0: cpu@0 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a53", "arm,armv8";
 			reg = <0x0 0x0>;
 			enable-method = "psci";
 		};
 
-		cpu@1 {
+		cpu1: cpu@1 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a53", "arm,armv8";
 			reg = <0x0 0x1>;
 			enable-method = "psci";
 		};
 
-		cpu@2 {
+		cpu2: cpu@2 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a53", "arm,armv8";
 			reg = <0x0 0x2>;
 			enable-method = "psci";
 		};
 
-		cpu@3 {
+		cpu3: cpu@3 {
 			device_type = "cpu";
 			compatible = "arm,cortex-a53", "arm,armv8";
 			reg = <0x0 0x3>;
@@ -75,14 +75,103 @@
 				};
 			};
 
-			/* funnel input port 0~3 is reserved for ETMs */
+			/* funnel input port 0-4 */
 			port@1 {
+				reg = <0>;
+				funnel_in_port0: endpoint {
+					slave-mode;
+					remote-endpoint = <&etm0_out>;
+				};
+			};
+
+			port@2 {
+				reg = <1>;
+				funnel_in_port1: endpoint {
+					slave-mode;
+					remote-endpoint = <&etm1_out>;
+				};
+			};
+
+			port@3 {
+				reg = <2>;
+				funnel_in_port2: endpoint {
+					slave-mode;
+					remote-endpoint = <&etm2_out>;
+				};
+			};
+
+			port@4 {
+				reg = <3>;
+				funnel_in_port3: endpoint {
+					slave-mode;
+					remote-endpoint = <&etm3_out>;
+				};
+			};
+
+			port@5 {
 				reg = <4>;
 				funnel_in_port4: endpoint {
 					slave-mode;
 					remote-endpoint = <&stm_out>;
 				};
 			};
+			/* Other input ports aren't connected to anyone */
+		};
+	};
+
+	etm@10440000 {
+		compatible = "arm,coresight-etm4x", "arm,primecell";
+		reg = <0 0x10440000 0 0x1000>;
+
+		cpu = <&cpu0>;
+		clocks = <&clk26mhz>;
+		clock-names = "apb_pclk";
+		port {
+			etm0_out: endpoint {
+				remote-endpoint = <&funnel_in_port0>;
+			};
+		};
+	};
+
+	etm@10540000 {
+		compatible = "arm,coresight-etm4x", "arm,primecell";
+		reg = <0 0x10540000 0 0x1000>;
+
+		cpu = <&cpu1>;
+		clocks = <&clk26mhz>;
+		clock-names = "apb_pclk";
+		port {
+			etm1_out: endpoint {
+				remote-endpoint = <&funnel_in_port1>;
+			};
+		};
+	};
+
+	etm@10640000 {
+		compatible = "arm,coresight-etm4x", "arm,primecell";
+		reg = <0 0x10640000 0 0x1000>;
+
+		cpu = <&cpu2>;
+		clocks = <&clk26mhz>;
+		clock-names = "apb_pclk";
+		port {
+			etm2_out: endpoint {
+				remote-endpoint = <&funnel_in_port2>;
+			};
+		};
+	};
+
+	etm@10740000 {
+		compatible = "arm,coresight-etm4x", "arm,primecell";
+		reg = <0 0x10740000 0 0x1000>;
+
+		cpu = <&cpu3>;
+		clocks = <&clk26mhz>;
+		clock-names = "apb_pclk";
+		port {
+			etm3_out: endpoint {
+				remote-endpoint = <&funnel_in_port3>;
+			};
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
index 0a3f40e..ce5d848 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
@@ -32,6 +32,10 @@
 	};
 };
 
+&can0 {
+	status = "okay";
+};
+
 &gem0 {
 	status = "okay";
 	phy-handle = <&phy0>;
@@ -42,6 +46,91 @@
 	};
 };
 
+&gpio {
+	status = "okay";
+};
+
+&i2c0 {
+	status = "okay";
+	clock-frequency = <400000>;
+	eeprom@54 {
+		compatible = "at,24c64";
+		reg = <0x54>;
+	};
+};
+
+&i2c1 {
+	status = "okay";
+	clock-frequency = <400000>;
+	eeprom@55 {
+		compatible = "at,24c64";
+		reg = <0x55>;
+	};
+};
+
+&sata {
+	status = "okay";
+	ceva,broken-gen2;
+};
+
+&sdhci0 {
+	status = "okay";
+};
+
+&sdhci1 {
+	status = "okay";
+};
+
+&spi0 {
+	status = "okay";
+	num-cs = <1>;
+	spi0_flash0: spi0_flash0@0 {
+		compatible = "m25p80";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		spi-max-frequency = <50000000>;
+		reg = <0>;
+
+		spi0_flash0@00000000 {
+			label = "spi0_flash0";
+			reg = <0x0 0x100000>;
+		};
+	};
+};
+
+&spi1 {
+	status = "okay";
+	num-cs = <1>;
+	spi1_flash0: spi1_flash0@0 {
+		compatible = "m25p80";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		spi-max-frequency = <50000000>;
+		reg = <0>;
+
+		spi1_flash0@00000000 {
+			label = "spi1_flash0";
+			reg = <0x0 0x100000>;
+		};
+	};
+};
+
 &uart0 {
 	status = "okay";
 };
+
+&usb0 {
+	status = "okay";
+	dr_mode = "peripheral";
+	maximum-speed = "high-speed";
+};
+
+&usb1 {
+	status = "okay";
+	dr_mode = "host";
+	maximum-speed = "high-speed";
+};
+
+&watchdog0 {
+	status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 11e0b00..857eda5 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -96,12 +96,193 @@
 		#size-cells = <1>;
 		ranges;
 
+		can0: can@ff060000 {
+			compatible = "xlnx,zynq-can-1.0";
+			status = "disabled";
+			clocks = <&misc_clk &misc_clk>;
+			clock-names = "can_clk", "pclk";
+			reg = <0x0 0xff060000 0x1000>;
+			interrupts = <0 23 4>;
+			interrupt-parent = <&gic>;
+			tx-fifo-depth = <0x40>;
+			rx-fifo-depth = <0x40>;
+		};
+
+		can1: can@ff070000 {
+			compatible = "xlnx,zynq-can-1.0";
+			status = "disabled";
+			clocks = <&misc_clk &misc_clk>;
+			clock-names = "can_clk", "pclk";
+			reg = <0x0 0xff070000 0x1000>;
+			interrupts = <0 24 4>;
+			interrupt-parent = <&gic>;
+			tx-fifo-depth = <0x40>;
+			rx-fifo-depth = <0x40>;
+		};
+
 		misc_clk: misc_clk {
 			compatible = "fixed-clock";
 			#clock-cells = <0>;
 			clock-frequency = <25000000>;
 		};
 
+		gpio: gpio@ff0a0000 {
+			compatible = "xlnx,zynqmp-gpio-1.0";
+			status = "disabled";
+			#gpio-cells = <0x2>;
+			clocks = <&misc_clk>;
+			interrupt-parent = <&gic>;
+			interrupts = <0 16 4>;
+			reg = <0x0 0xff0a0000 0x1000>;
+		};
+
+		gem0: ethernet@ff0b0000 {
+			compatible = "cdns,gem";
+			status = "disabled";
+			interrupt-parent = <&gic>;
+			interrupts = <0 57 4>, <0 57 4>;
+			reg = <0x0 0xff0b0000 0x1000>;
+			clock-names = "pclk", "hclk", "tx_clk";
+			clocks = <&misc_clk>, <&misc_clk>, <&misc_clk>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		gem1: ethernet@ff0c0000 {
+			compatible = "cdns,gem";
+			status = "disabled";
+			interrupt-parent = <&gic>;
+			interrupts = <0 59 4>, <0 59 4>;
+			reg = <0x0 0xff0c0000 0x1000>;
+			clock-names = "pclk", "hclk", "tx_clk";
+			clocks = <&misc_clk>, <&misc_clk>, <&misc_clk>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		gem2: ethernet@ff0d0000 {
+			compatible = "cdns,gem";
+			status = "disabled";
+			interrupt-parent = <&gic>;
+			interrupts = <0 61 4>, <0 61 4>;
+			reg = <0x0 0xff0d0000 0x1000>;
+			clock-names = "pclk", "hclk", "tx_clk";
+			clocks = <&misc_clk>, <&misc_clk>, <&misc_clk>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		gem3: ethernet@ff0e0000 {
+			compatible = "cdns,gem";
+			status = "disabled";
+			interrupt-parent = <&gic>;
+			interrupts = <0 63 4>, <0 63 4>;
+			reg = <0x0 0xff0e0000 0x1000>;
+			clock-names = "pclk", "hclk", "tx_clk";
+			clocks = <&misc_clk>, <&misc_clk>, <&misc_clk>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		i2c_clk: i2c_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0x0>;
+			clock-frequency = <111111111>;
+		};
+
+		i2c0: i2c@ff020000 {
+			compatible = "cdns,i2c-r1p10";
+			status = "disabled";
+			interrupt-parent = <&gic>;
+			interrupts = <0 17 4>;
+			reg = <0x0 0xff020000 0x1000>;
+			clocks = <&i2c_clk>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		i2c1: i2c@ff030000 {
+			compatible = "cdns,i2c-r1p10";
+			status = "disabled";
+			interrupt-parent = <&gic>;
+			interrupts = <0 18 4>;
+			reg = <0x0 0xff030000 0x1000>;
+			clocks = <&i2c_clk>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		sata_clk: sata_clk {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <75000000>;
+		};
+
+		sata: ahci@fd0c0000 {
+			compatible = "ceva,ahci-1v84";
+			status = "disabled";
+			reg = <0x0 0xfd0c0000 0x2000>;
+			interrupt-parent = <&gic>;
+			interrupts = <0 133 4>;
+			clocks = <&sata_clk>;
+		};
+
+		sdhci0: sdhci@ff160000 {
+			compatible = "arasan,sdhci-8.9a";
+			status = "disabled";
+			interrupt-parent = <&gic>;
+			interrupts = <0 48 4>;
+			reg = <0x0 0xff160000 0x1000>;
+			clock-names = "clk_xin", "clk_ahb";
+			clocks = <&misc_clk>, <&misc_clk>;
+		};
+
+		sdhci1: sdhci@ff170000 {
+			compatible = "arasan,sdhci-8.9a";
+			status = "disabled";
+			interrupt-parent = <&gic>;
+			interrupts = <0 49 4>;
+			reg = <0x0 0xff170000 0x1000>;
+			clock-names = "clk_xin", "clk_ahb";
+			clocks = <&misc_clk>, <&misc_clk>;
+		};
+
+		smmu: smmu@fd800000 {
+			compatible = "arm,mmu-500";
+			reg = <0x0 0xfd800000 0x20000>;
+			#global-interrupts = <1>;
+			interrupt-parent = <&gic>;
+			interrupts = <0 157 4>,
+				<0 157 4>, <0 157 4>, <0 157 4>, <0 157 4>,
+				<0 157 4>, <0 157 4>, <0 157 4>, <0 157 4>,
+				<0 157 4>, <0 157 4>, <0 157 4>, <0 157 4>,
+				<0 157 4>, <0 157 4>, <0 157 4>, <0 157 4>;
+		};
+
+		spi0: spi@ff040000 {
+			compatible = "cdns,spi-r1p6";
+			status = "disabled";
+			interrupt-parent = <&gic>;
+			interrupts = <0 19 4>;
+			reg = <0x0 0xff040000 0x1000>;
+			clock-names = "ref_clk", "pclk";
+			clocks = <&misc_clk &misc_clk>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		spi1: spi@ff050000 {
+			compatible = "cdns,spi-r1p6";
+			status = "disabled";
+			interrupt-parent = <&gic>;
+			interrupts = <0 20 4>;
+			reg = <0x0 0xff050000 0x1000>;
+			clock-names = "ref_clk", "pclk";
+			clocks = <&misc_clk &misc_clk>;
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
 		ttc0: timer@ff110000 {
 			compatible = "cdns,ttc";
 			status = "disabled";
@@ -162,132 +343,22 @@
 			clocks = <&misc_clk &misc_clk>;
 		};
 
-		gpio: gpio@ff0a0000 {
-			compatible = "xlnx,zynq-gpio-1.0";
-			status = "disabled";
-			#gpio-cells = <0x2>;
-			clocks = <&misc_clk>;
-			interrupt-parent = <&gic>;
-			interrupts = <0 16 4>;
-			reg = <0x0 0xff0a0000 0x1000>;
-		};
-
-		gem0: ethernet@ff0b0000 {
-			compatible = "cdns,gem";
+		usb0: usb@fe200000 {
+			compatible = "snps,dwc3";
 			status = "disabled";
 			interrupt-parent = <&gic>;
-			interrupts = <0 57 4>, <0 57 4>;
-			reg = <0x0 0xff0b0000 0x1000>;
-			clock-names = "pclk", "hclk", "tx_clk";
-			clocks = <&misc_clk>, <&misc_clk>, <&misc_clk>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-		};
-
-		gem1: ethernet@ff0c0000 {
-			compatible = "cdns,gem";
-			status = "disabled";
-			interrupt-parent = <&gic>;
-			interrupts = <0 59 4>, <0 59 4>;
-			reg = <0x0 0xff0c0000 0x1000>;
-			clock-names = "pclk", "hclk", "tx_clk";
-			clocks = <&misc_clk>, <&misc_clk>, <&misc_clk>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-		};
-
-		gem2: ethernet@ff0d0000 {
-			compatible = "cdns,gem";
-			status = "disabled";
-			interrupt-parent = <&gic>;
-			interrupts = <0 61 4>, <0 61 4>;
-			reg = <0x0 0xff0d0000 0x1000>;
-			clock-names = "pclk", "hclk", "tx_clk";
-			clocks = <&misc_clk>, <&misc_clk>, <&misc_clk>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-		};
-
-		gem3: ethernet@ff0e0000 {
-			compatible = "cdns,gem";
-			status = "disabled";
-			interrupt-parent = <&gic>;
-			interrupts = <0 63 4>, <0 63 4>;
-			reg = <0x0 0xff0e0000 0x1000>;
-			clock-names = "pclk", "hclk", "tx_clk";
-			clocks = <&misc_clk>, <&misc_clk>, <&misc_clk>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-		};
-
-		spi0: spi@ff040000 {
-			compatible = "cdns,spi-r1p6";
-			status = "disabled";
-			interrupt-parent = <&gic>;
-			interrupts = <0 19 4>;
-			reg = <0x0 0xff040000 0x1000>;
-			clock-names = "ref_clk", "pclk";
-			clocks = <&misc_clk &misc_clk>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-		};
-
-		spi1: spi@ff050000 {
-			compatible = "cdns,spi-r1p6";
-			status = "disabled";
-			interrupt-parent = <&gic>;
-			interrupts = <0 20 4>;
-			reg = <0x0 0xff050000 0x1000>;
-			clock-names = "ref_clk", "pclk";
-			clocks = <&misc_clk &misc_clk>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-		};
-
-		i2c_clk: i2c_clk {
-			compatible = "fixed-clock";
-			#clock-cells = <0x0>;
-			clock-frequency = <111111111>;
-		};
-
-		i2c0: i2c@ff020000 {
-			compatible = "cdns,i2c-r1p10";
-			status = "disabled";
-			interrupt-parent = <&gic>;
-			interrupts = <0 17 4>;
-			reg = <0x0 0xff020000 0x1000>;
-			clocks = <&i2c_clk>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-		};
-
-		i2c1: i2c@ff030000 {
-			compatible = "cdns,i2c-r1p10";
-			status = "disabled";
-			interrupt-parent = <&gic>;
-			interrupts = <0 18 4>;
-			reg = <0x0 0xff030000 0x1000>;
-			clocks = <&i2c_clk>;
-			#address-cells = <1>;
-			#size-cells = <0>;
-		};
-
-		sdhci0: sdhci@ff160000 {
-			compatible = "arasan,sdhci-8.9a";
-			status = "disabled";
-			interrupt-parent = <&gic>;
-			interrupts = <0 48 4>;
-			reg = <0x0 0xff160000 0x1000>;
+			interrupts = <0 65 4>;
+			reg = <0x0 0xfe200000 0x40000>;
 			clock-names = "clk_xin", "clk_ahb";
 			clocks = <&misc_clk>, <&misc_clk>;
 		};
 
-		sdhci1: sdhci@ff170000 {
-			compatible = "arasan,sdhci-8.9a";
+		usb1: usb@fe300000 {
+			compatible = "snps,dwc3";
 			status = "disabled";
 			interrupt-parent = <&gic>;
-			interrupts = <0 49 4>;
-			reg = <0x0 0xff170000 0x1000>;
+			interrupts = <0 70 4>;
+			reg = <0x0 0xfe300000 0x40000>;
 			clock-names = "clk_xin", "clk_ahb";
 			clocks = <&misc_clk>, <&misc_clk>;
 		};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 4e17e7e..34d71dd 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -31,10 +31,13 @@
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
 CONFIG_ARCH_EXYNOS7=y
 CONFIG_ARCH_FSL_LS2085A=y
 CONFIG_ARCH_HISI=y
 CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_ROCKCHIP=y
 CONFIG_ARCH_SEATTLE=y
 CONFIG_ARCH_TEGRA=y
 CONFIG_ARCH_TEGRA_132_SOC=y
@@ -102,6 +105,7 @@
 CONFIG_LEGACY_PTY_COUNT=16
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 3303e8a..f4bf2f2 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -124,7 +124,7 @@
 
 	ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, &macp, ctx->key_enc,
 			     num_rounds(ctx));
-	scatterwalk_start(&walk, req->assoc);
+	scatterwalk_start(&walk, req->src);
 
 	do {
 		u32 n = scatterwalk_clamp(&walk, len);
@@ -151,6 +151,10 @@
 	struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
 	struct blkcipher_desc desc = { .info = req->iv };
 	struct blkcipher_walk walk;
+	struct scatterlist srcbuf[2];
+	struct scatterlist dstbuf[2];
+	struct scatterlist *src;
+	struct scatterlist *dst;
 	u8 __aligned(8) mac[AES_BLOCK_SIZE];
 	u8 buf[AES_BLOCK_SIZE];
 	u32 len = req->cryptlen;
@@ -168,7 +172,12 @@
 	/* preserve the original iv for the final round */
 	memcpy(buf, req->iv, AES_BLOCK_SIZE);
 
-	blkcipher_walk_init(&walk, req->dst, req->src, len);
+	src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
+	dst = src;
+	if (req->src != req->dst)
+		dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
+
+	blkcipher_walk_init(&walk, dst, src, len);
 	err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
 					     AES_BLOCK_SIZE);
 
@@ -194,7 +203,7 @@
 		return err;
 
 	/* copy authtag to end of dst */
-	scatterwalk_map_and_copy(mac, req->dst, req->cryptlen,
+	scatterwalk_map_and_copy(mac, dst, req->cryptlen,
 				 crypto_aead_authsize(aead), 1);
 
 	return 0;
@@ -207,6 +216,10 @@
 	unsigned int authsize = crypto_aead_authsize(aead);
 	struct blkcipher_desc desc = { .info = req->iv };
 	struct blkcipher_walk walk;
+	struct scatterlist srcbuf[2];
+	struct scatterlist dstbuf[2];
+	struct scatterlist *src;
+	struct scatterlist *dst;
 	u8 __aligned(8) mac[AES_BLOCK_SIZE];
 	u8 buf[AES_BLOCK_SIZE];
 	u32 len = req->cryptlen - authsize;
@@ -224,7 +237,12 @@
 	/* preserve the original iv for the final round */
 	memcpy(buf, req->iv, AES_BLOCK_SIZE);
 
-	blkcipher_walk_init(&walk, req->dst, req->src, len);
+	src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
+	dst = src;
+	if (req->src != req->dst)
+		dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
+
+	blkcipher_walk_init(&walk, dst, src, len);
 	err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
 					     AES_BLOCK_SIZE);
 
@@ -250,44 +268,42 @@
 		return err;
 
 	/* compare calculated auth tag with the stored one */
-	scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize,
+	scatterwalk_map_and_copy(buf, src, req->cryptlen - authsize,
 				 authsize, 0);
 
-	if (memcmp(mac, buf, authsize))
+	if (crypto_memneq(mac, buf, authsize))
 		return -EBADMSG;
 	return 0;
 }
 
-static struct crypto_alg ccm_aes_alg = {
-	.cra_name		= "ccm(aes)",
-	.cra_driver_name	= "ccm-aes-ce",
-	.cra_priority		= 300,
-	.cra_flags		= CRYPTO_ALG_TYPE_AEAD,
-	.cra_blocksize		= 1,
-	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
-	.cra_alignmask		= 7,
-	.cra_type		= &crypto_aead_type,
-	.cra_module		= THIS_MODULE,
-	.cra_aead = {
-		.ivsize		= AES_BLOCK_SIZE,
-		.maxauthsize	= AES_BLOCK_SIZE,
-		.setkey		= ccm_setkey,
-		.setauthsize	= ccm_setauthsize,
-		.encrypt	= ccm_encrypt,
-		.decrypt	= ccm_decrypt,
-	}
+static struct aead_alg ccm_aes_alg = {
+	.base = {
+		.cra_name		= "ccm(aes)",
+		.cra_driver_name	= "ccm-aes-ce",
+		.cra_priority		= 300,
+		.cra_blocksize		= 1,
+		.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
+		.cra_alignmask		= 7,
+		.cra_module		= THIS_MODULE,
+	},
+	.ivsize		= AES_BLOCK_SIZE,
+	.maxauthsize	= AES_BLOCK_SIZE,
+	.setkey		= ccm_setkey,
+	.setauthsize	= ccm_setauthsize,
+	.encrypt	= ccm_encrypt,
+	.decrypt	= ccm_decrypt,
 };
 
 static int __init aes_mod_init(void)
 {
 	if (!(elf_hwcap & HWCAP_AES))
 		return -ENODEV;
-	return crypto_register_alg(&ccm_aes_alg);
+	return crypto_register_aead(&ccm_aes_alg);
 }
 
 static void __exit aes_mod_exit(void)
 {
-	crypto_unregister_alg(&ccm_aes_alg);
+	crypto_unregister_aead(&ccm_aes_alg);
 }
 
 module_init(aes_mod_init);
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 406485e..208cec0 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -12,11 +12,11 @@
 #ifndef _ASM_ACPI_H
 #define _ASM_ACPI_H
 
-#include <linux/mm.h>
 #include <linux/irqchip/arm-gic-acpi.h>
+#include <linux/mm.h>
+#include <linux/psci.h>
 
 #include <asm/cputype.h>
-#include <asm/psci.h>
 #include <asm/smp_plat.h>
 
 /* Macros for consistency checks of the GICC subtable of MADT */
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index c385a0c..d56ec07 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -3,6 +3,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/init.h>
+#include <linux/kconfig.h>
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <linux/stringify.h>
@@ -15,7 +17,7 @@
 	u8  alt_len;		/* size of new instruction(s), <= orig_len */
 };
 
-void apply_alternatives_all(void);
+void __init apply_alternatives_all(void);
 void apply_alternatives(void *start, size_t length);
 void free_alternatives_memory(void);
 
@@ -40,7 +42,8 @@
  * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
  * containing commit 4e4d08cf7399b606 or c1baaddf8861).
  */
-#define ALTERNATIVE(oldinstr, newinstr, feature)			\
+#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled)	\
+	".if "__stringify(cfg_enabled)" == 1\n"				\
 	"661:\n\t"							\
 	oldinstr "\n"							\
 	"662:\n"							\
@@ -53,7 +56,11 @@
 	"664:\n\t"							\
 	".popsection\n\t"						\
 	".org	. - (664b-663b) + (662b-661b)\n\t"			\
-	".org	. - (662b-661b) + (664b-663b)\n"
+	".org	. - (662b-661b) + (664b-663b)\n"			\
+	".endif\n"
+
+#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...)	\
+	__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
 
 #else
 
@@ -65,7 +72,8 @@
 	.byte \alt_len
 .endm
 
-.macro alternative_insn insn1 insn2 cap
+.macro alternative_insn insn1, insn2, cap, enable = 1
+	.if \enable
 661:	\insn1
 662:	.pushsection .altinstructions, "a"
 	altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
@@ -75,8 +83,70 @@
 664:	.popsection
 	.org	. - (664b-663b) + (662b-661b)
 	.org	. - (662b-661b) + (664b-663b)
+	.endif
 .endm
 
+/*
+ * Begin an alternative code sequence.
+ *
+ * The code that follows this macro will be assembled and linked as
+ * normal. There are no restrictions on this code.
+ */
+.macro alternative_if_not cap, enable = 1
+	.if \enable
+	.pushsection .altinstructions, "a"
+	altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
+	.popsection
+661:
+	.endif
+.endm
+
+/*
+ * Provide the alternative code sequence.
+ *
+ * The code that follows this macro is assembled into a special
+ * section to be used for dynamic patching. Code that follows this
+ * macro must:
+ *
+ * 1. Be exactly the same length (in bytes) as the default code
+ *    sequence.
+ *
+ * 2. Not contain a branch target that is used outside of the
+ *    alternative sequence it is defined in (branches into an
+ *    alternative sequence are not fixed up).
+ */
+.macro alternative_else, enable = 1
+	.if \enable
+662:	.pushsection .altinstr_replacement, "ax"
+663:
+	.endif
+.endm
+
+/*
+ * Complete an alternative code sequence.
+ */
+.macro alternative_endif, enable = 1
+	.if \enable
+664:	.popsection
+	.org	. - (664b-663b) + (662b-661b)
+	.org	. - (662b-661b) + (664b-663b)
+	.endif
+.endm
+
+#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...)	\
+	alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
+
+
 #endif  /*  __ASSEMBLY__  */
 
+/*
+ * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature));
+ *
+ * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO));
+ * N.B. If CONFIG_FOO is specified, but not selected, the whole block
+ *      will be omitted, including oldinstr.
+ */
+#define ALTERNATIVE(oldinstr, newinstr, ...)   \
+	_ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
+
 #endif /* __ASM_ALTERNATIVE_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 144b64a..b51f2cc 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -50,18 +50,6 @@
 	.endm
 
 /*
- * Save/disable and restore interrupts.
- */
-	.macro	save_and_disable_irqs, olddaif
-	mrs	\olddaif, daif
-	disable_irq
-	.endm
-
-	.macro	restore_irqs, olddaif
-	msr	daif, \olddaif
-	.endm
-
-/*
  * Enable and disable debug exceptions.
  */
 	.macro	disable_dbg
@@ -103,9 +91,7 @@
  * SMP data memory barrier
  */
 	.macro	smp_dmb, opt
-#ifdef CONFIG_SMP
 	dmb	\opt
-#endif
 	.endm
 
 #define USER(l, x...)				\
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 7047051..35a6778 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -24,233 +24,72 @@
 #include <linux/types.h>
 
 #include <asm/barrier.h>
-#include <asm/cmpxchg.h>
-
-#define ATOMIC_INIT(i)	{ (i) }
+#include <asm/lse.h>
 
 #ifdef __KERNEL__
 
-/*
- * On ARM, ordinary assignment (str instruction) doesn't clear the local
- * strex/ldrex monitor on some implementations. The reason we can use it for
- * atomic_set() is the clrex or dummy strex done on every exception return.
- */
-#define atomic_read(v)	ACCESS_ONCE((v)->counter)
-#define atomic_set(v,i)	(((v)->counter) = (i))
+#define __ARM64_IN_ATOMIC_IMPL
 
-/*
- * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
- * store exclusive to ensure that these are atomic.  We may loop
- * to ensure that the update happens.
- */
+#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)
+#include <asm/atomic_lse.h>
+#else
+#include <asm/atomic_ll_sc.h>
+#endif
 
-#define ATOMIC_OP(op, asm_op)						\
-static inline void atomic_##op(int i, atomic_t *v)			\
-{									\
-	unsigned long tmp;						\
-	int result;							\
+#undef __ARM64_IN_ATOMIC_IMPL
+
+#include <asm/cmpxchg.h>
+
+#define ___atomic_add_unless(v, a, u, sfx)				\
+({									\
+	typeof((v)->counter) c, old;					\
 									\
-	asm volatile("// atomic_" #op "\n"				\
-"1:	ldxr	%w0, %2\n"						\
-"	" #asm_op "	%w0, %w0, %w3\n"				\
-"	stxr	%w1, %w0, %2\n"						\
-"	cbnz	%w1, 1b"						\
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
-	: "Ir" (i));							\
-}									\
+	c = atomic##sfx##_read(v);					\
+	while (c != (u) &&						\
+	      (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c)	\
+		c = old;						\
+	c;								\
+ })
 
-#define ATOMIC_OP_RETURN(op, asm_op)					\
-static inline int atomic_##op##_return(int i, atomic_t *v)		\
-{									\
-	unsigned long tmp;						\
-	int result;							\
-									\
-	asm volatile("// atomic_" #op "_return\n"			\
-"1:	ldxr	%w0, %2\n"						\
-"	" #asm_op "	%w0, %w0, %w3\n"				\
-"	stlxr	%w1, %w0, %2\n"						\
-"	cbnz	%w1, 1b"						\
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
-	: "Ir" (i)							\
-	: "memory");							\
-									\
-	smp_mb();							\
-	return result;							\
-}
+#define ATOMIC_INIT(i)	{ (i) }
 
-#define ATOMIC_OPS(op, asm_op)						\
-	ATOMIC_OP(op, asm_op)						\
-	ATOMIC_OP_RETURN(op, asm_op)
+#define atomic_read(v)			READ_ONCE((v)->counter)
+#define atomic_set(v, i)		(((v)->counter) = (i))
+#define atomic_xchg(v, new)		xchg(&((v)->counter), (new))
+#define atomic_cmpxchg(v, old, new)	cmpxchg(&((v)->counter), (old), (new))
 
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, sub)
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
-{
-	unsigned long tmp;
-	int oldval;
-
-	smp_mb();
-
-	asm volatile("// atomic_cmpxchg\n"
-"1:	ldxr	%w1, %2\n"
-"	cmp	%w1, %w3\n"
-"	b.ne	2f\n"
-"	stxr	%w0, %w4, %2\n"
-"	cbnz	%w0, 1b\n"
-"2:"
-	: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
-	: "Ir" (old), "r" (new)
-	: "cc");
-
-	smp_mb();
-	return oldval;
-}
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-	int c, old;
-
-	c = atomic_read(v);
-	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
-		c = old;
-	return c;
-}
-
-#define atomic_inc(v)		atomic_add(1, v)
-#define atomic_dec(v)		atomic_sub(1, v)
-
-#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v)    (atomic_add_return(1, v))
-#define atomic_dec_return(v)    (atomic_sub_return(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+#define atomic_inc(v)			atomic_add(1, (v))
+#define atomic_dec(v)			atomic_sub(1, (v))
+#define atomic_inc_return(v)		atomic_add_return(1, (v))
+#define atomic_dec_return(v)		atomic_sub_return(1, (v))
+#define atomic_inc_and_test(v)		(atomic_inc_return(v) == 0)
+#define atomic_dec_and_test(v)		(atomic_dec_return(v) == 0)
+#define atomic_sub_and_test(i, v)	(atomic_sub_return((i), (v)) == 0)
+#define atomic_add_negative(i, v)	(atomic_add_return((i), (v)) < 0)
+#define __atomic_add_unless(v, a, u)	___atomic_add_unless(v, a, u,)
+#define atomic_andnot			atomic_andnot
 
 /*
  * 64-bit atomic operations.
  */
-#define ATOMIC64_INIT(i) { (i) }
+#define ATOMIC64_INIT			ATOMIC_INIT
+#define atomic64_read			atomic_read
+#define atomic64_set			atomic_set
+#define atomic64_xchg			atomic_xchg
+#define atomic64_cmpxchg		atomic_cmpxchg
 
-#define atomic64_read(v)	ACCESS_ONCE((v)->counter)
-#define atomic64_set(v,i)	(((v)->counter) = (i))
-
-#define ATOMIC64_OP(op, asm_op)						\
-static inline void atomic64_##op(long i, atomic64_t *v)			\
-{									\
-	long result;							\
-	unsigned long tmp;						\
-									\
-	asm volatile("// atomic64_" #op "\n"				\
-"1:	ldxr	%0, %2\n"						\
-"	" #asm_op "	%0, %0, %3\n"					\
-"	stxr	%w1, %0, %2\n"						\
-"	cbnz	%w1, 1b"						\
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
-	: "Ir" (i));							\
-}									\
-
-#define ATOMIC64_OP_RETURN(op, asm_op)					\
-static inline long atomic64_##op##_return(long i, atomic64_t *v)	\
-{									\
-	long result;							\
-	unsigned long tmp;						\
-									\
-	asm volatile("// atomic64_" #op "_return\n"			\
-"1:	ldxr	%0, %2\n"						\
-"	" #asm_op "	%0, %0, %3\n"					\
-"	stlxr	%w1, %0, %2\n"						\
-"	cbnz	%w1, 1b"						\
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
-	: "Ir" (i)							\
-	: "memory");							\
-									\
-	smp_mb();							\
-	return result;							\
-}
-
-#define ATOMIC64_OPS(op, asm_op)					\
-	ATOMIC64_OP(op, asm_op)						\
-	ATOMIC64_OP_RETURN(op, asm_op)
-
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, sub)
-
-#undef ATOMIC64_OPS
-#undef ATOMIC64_OP_RETURN
-#undef ATOMIC64_OP
-
-static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
-{
-	long oldval;
-	unsigned long res;
-
-	smp_mb();
-
-	asm volatile("// atomic64_cmpxchg\n"
-"1:	ldxr	%1, %2\n"
-"	cmp	%1, %3\n"
-"	b.ne	2f\n"
-"	stxr	%w0, %4, %2\n"
-"	cbnz	%w0, 1b\n"
-"2:"
-	: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
-	: "Ir" (old), "r" (new)
-	: "cc");
-
-	smp_mb();
-	return oldval;
-}
-
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
-	long result;
-	unsigned long tmp;
-
-	asm volatile("// atomic64_dec_if_positive\n"
-"1:	ldxr	%0, %2\n"
-"	subs	%0, %0, #1\n"
-"	b.mi	2f\n"
-"	stlxr	%w1, %0, %2\n"
-"	cbnz	%w1, 1b\n"
-"	dmb	ish\n"
-"2:"
-	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-	:
-	: "cc", "memory");
-
-	return result;
-}
-
-static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
-	long c, old;
-
-	c = atomic64_read(v);
-	while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
-		c = old;
-
-	return c != u;
-}
-
-#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v)			atomic64_add(1LL, (v))
-#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
+#define atomic64_inc(v)			atomic64_add(1, (v))
+#define atomic64_dec(v)			atomic64_sub(1, (v))
+#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
+#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
 #define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v)			atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_dec_and_test(v)	(atomic64_dec_return(v) == 0)
+#define atomic64_sub_and_test(i, v)	(atomic64_sub_return((i), (v)) == 0)
+#define atomic64_add_negative(i, v)	(atomic64_add_return((i), (v)) < 0)
+#define atomic64_add_unless(v, a, u)	(___atomic_add_unless(v, a, u, 64) != u)
+#define atomic64_andnot			atomic64_andnot
+
+#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
 
 #endif
 #endif
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
new file mode 100644
index 0000000..b3b5c4a
--- /dev/null
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -0,0 +1,247 @@
+/*
+ * Based on arch/arm/include/asm/atomic.h
+ *
+ * Copyright (C) 1996 Russell King.
+ * Copyright (C) 2002 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ATOMIC_LL_SC_H
+#define __ASM_ATOMIC_LL_SC_H
+
+#ifndef __ARM64_IN_ATOMIC_IMPL
+#error "please don't include this file directly"
+#endif
+
+/*
+ * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
+ * store exclusive to ensure that these are atomic.  We may loop
+ * to ensure that the update happens.
+ *
+ * NOTE: these functions do *not* follow the PCS and must explicitly
+ * save any clobbered registers other than x0 (regardless of return
+ * value).  This is achieved through -fcall-saved-* compiler flags for
+ * this file, which unfortunately don't work on a per-function basis
+ * (the optimize attribute silently ignores these options).
+ */
+
+#define ATOMIC_OP(op, asm_op)						\
+__LL_SC_INLINE void							\
+__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v))				\
+{									\
+	unsigned long tmp;						\
+	int result;							\
+									\
+	asm volatile("// atomic_" #op "\n"				\
+"	prfm	pstl1strm, %2\n"					\
+"1:	ldxr	%w0, %2\n"						\
+"	" #asm_op "	%w0, %w0, %w3\n"				\
+"	stxr	%w1, %w0, %2\n"						\
+"	cbnz	%w1, 1b"						\
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
+	: "Ir" (i));							\
+}									\
+__LL_SC_EXPORT(atomic_##op);
+
+#define ATOMIC_OP_RETURN(op, asm_op)					\
+__LL_SC_INLINE int							\
+__LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v))		\
+{									\
+	unsigned long tmp;						\
+	int result;							\
+									\
+	asm volatile("// atomic_" #op "_return\n"			\
+"	prfm	pstl1strm, %2\n"					\
+"1:	ldxr	%w0, %2\n"						\
+"	" #asm_op "	%w0, %w0, %w3\n"				\
+"	stlxr	%w1, %w0, %2\n"						\
+"	cbnz	%w1, 1b"						\
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
+	: "Ir" (i)							\
+	: "memory");							\
+									\
+	smp_mb();							\
+	return result;							\
+}									\
+__LL_SC_EXPORT(atomic_##op##_return);
+
+#define ATOMIC_OPS(op, asm_op)						\
+	ATOMIC_OP(op, asm_op)						\
+	ATOMIC_OP_RETURN(op, asm_op)
+
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(andnot, bic)
+ATOMIC_OP(or, orr)
+ATOMIC_OP(xor, eor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define ATOMIC64_OP(op, asm_op)						\
+__LL_SC_INLINE void							\
+__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v))			\
+{									\
+	long result;							\
+	unsigned long tmp;						\
+									\
+	asm volatile("// atomic64_" #op "\n"				\
+"	prfm	pstl1strm, %2\n"					\
+"1:	ldxr	%0, %2\n"						\
+"	" #asm_op "	%0, %0, %3\n"					\
+"	stxr	%w1, %0, %2\n"						\
+"	cbnz	%w1, 1b"						\
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
+	: "Ir" (i));							\
+}									\
+__LL_SC_EXPORT(atomic64_##op);
+
+#define ATOMIC64_OP_RETURN(op, asm_op)					\
+__LL_SC_INLINE long							\
+__LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v))		\
+{									\
+	long result;							\
+	unsigned long tmp;						\
+									\
+	asm volatile("// atomic64_" #op "_return\n"			\
+"	prfm	pstl1strm, %2\n"					\
+"1:	ldxr	%0, %2\n"						\
+"	" #asm_op "	%0, %0, %3\n"					\
+"	stlxr	%w1, %0, %2\n"						\
+"	cbnz	%w1, 1b"						\
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
+	: "Ir" (i)							\
+	: "memory");							\
+									\
+	smp_mb();							\
+	return result;							\
+}									\
+__LL_SC_EXPORT(atomic64_##op##_return);
+
+#define ATOMIC64_OPS(op, asm_op)					\
+	ATOMIC64_OP(op, asm_op)						\
+	ATOMIC64_OP_RETURN(op, asm_op)
+
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, sub)
+
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(andnot, bic)
+ATOMIC64_OP(or, orr)
+ATOMIC64_OP(xor, eor)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+__LL_SC_INLINE long
+__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
+{
+	long result;
+	unsigned long tmp;
+
+	asm volatile("// atomic64_dec_if_positive\n"
+"	prfm	pstl1strm, %2\n"
+"1:	ldxr	%0, %2\n"
+"	subs	%0, %0, #1\n"
+"	b.lt	2f\n"
+"	stlxr	%w1, %0, %2\n"
+"	cbnz	%w1, 1b\n"
+"	dmb	ish\n"
+"2:"
+	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+	:
+	: "cc", "memory");
+
+	return result;
+}
+__LL_SC_EXPORT(atomic64_dec_if_positive);
+
+#define __CMPXCHG_CASE(w, sz, name, mb, rel, cl)			\
+__LL_SC_INLINE unsigned long						\
+__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,		\
+				     unsigned long old,			\
+				     unsigned long new))		\
+{									\
+	unsigned long tmp, oldval;					\
+									\
+	asm volatile(							\
+	"	prfm	pstl1strm, %[v]\n"				\
+	"1:	ldxr" #sz "\t%" #w "[oldval], %[v]\n"			\
+	"	eor	%" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"	\
+	"	cbnz	%" #w "[tmp], 2f\n"				\
+	"	st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n"	\
+	"	cbnz	%w[tmp], 1b\n"					\
+	"	" #mb "\n"						\
+	"	mov	%" #w "[oldval], %" #w "[old]\n"		\
+	"2:"								\
+	: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval),			\
+	  [v] "+Q" (*(unsigned long *)ptr)				\
+	: [old] "Lr" (old), [new] "r" (new)				\
+	: cl);								\
+									\
+	return oldval;							\
+}									\
+__LL_SC_EXPORT(__cmpxchg_case_##name);
+
+__CMPXCHG_CASE(w, b,    1,        ,  ,         )
+__CMPXCHG_CASE(w, h,    2,        ,  ,         )
+__CMPXCHG_CASE(w,  ,    4,        ,  ,         )
+__CMPXCHG_CASE( ,  ,    8,        ,  ,         )
+__CMPXCHG_CASE(w, b, mb_1, dmb ish, l, "memory")
+__CMPXCHG_CASE(w, h, mb_2, dmb ish, l, "memory")
+__CMPXCHG_CASE(w,  , mb_4, dmb ish, l, "memory")
+__CMPXCHG_CASE( ,  , mb_8, dmb ish, l, "memory")
+
+#undef __CMPXCHG_CASE
+
+#define __CMPXCHG_DBL(name, mb, rel, cl)				\
+__LL_SC_INLINE int							\
+__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1,		\
+				      unsigned long old2,		\
+				      unsigned long new1,		\
+				      unsigned long new2,		\
+				      volatile void *ptr))		\
+{									\
+	unsigned long tmp, ret;						\
+									\
+	asm volatile("// __cmpxchg_double" #name "\n"			\
+	"	prfm	pstl1strm, %2\n"				\
+	"1:	ldxp	%0, %1, %2\n"					\
+	"	eor	%0, %0, %3\n"					\
+	"	eor	%1, %1, %4\n"					\
+	"	orr	%1, %0, %1\n"					\
+	"	cbnz	%1, 2f\n"					\
+	"	st" #rel "xp	%w0, %5, %6, %2\n"			\
+	"	cbnz	%w0, 1b\n"					\
+	"	" #mb "\n"						\
+	"2:"								\
+	: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)	\
+	: "r" (old1), "r" (old2), "r" (new1), "r" (new2)		\
+	: cl);								\
+									\
+	return ret;							\
+}									\
+__LL_SC_EXPORT(__cmpxchg_double##name);
+
+__CMPXCHG_DBL(   ,        ,  ,         )
+__CMPXCHG_DBL(_mb, dmb ish, l, "memory")
+
+#undef __CMPXCHG_DBL
+
+#endif	/* __ASM_ATOMIC_LL_SC_H */
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
new file mode 100644
index 0000000..55d740e
--- /dev/null
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -0,0 +1,391 @@
+/*
+ * Based on arch/arm/include/asm/atomic.h
+ *
+ * Copyright (C) 1996 Russell King.
+ * Copyright (C) 2002 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ATOMIC_LSE_H
+#define __ASM_ATOMIC_LSE_H
+
+#ifndef __ARM64_IN_ATOMIC_IMPL
+#error "please don't include this file directly"
+#endif
+
+#define __LL_SC_ATOMIC(op)	__LL_SC_CALL(atomic_##op)
+
+static inline void atomic_andnot(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
+	"	stclr	%w[i], %[v]\n")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic_or(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
+	"	stset	%w[i], %[v]\n")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
+	"	steor	%w[i], %[v]\n")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
+	"	stadd	%w[i], %[v]\n")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC(add_return),
+	/* LSE atomics */
+	"	ldaddal	%w[i], w30, %[v]\n"
+	"	add	%w[i], %w[i], w30")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30", "memory");
+
+	return w0;
+}
+
+static inline void atomic_and(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC(and),
+	/* LSE atomics */
+	"	mvn	%w[i], %w[i]\n"
+	"	stclr	%w[i], %[v]")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC(sub),
+	/* LSE atomics */
+	"	neg	%w[i], %w[i]\n"
+	"	stadd	%w[i], %[v]")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+	register int w0 asm ("w0") = i;
+	register atomic_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC(sub_return)
+	"	nop",
+	/* LSE atomics */
+	"	neg	%w[i], %w[i]\n"
+	"	ldaddal	%w[i], w30, %[v]\n"
+	"	add	%w[i], %w[i], w30")
+	: [i] "+r" (w0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30", "memory");
+
+	return w0;
+}
+
+#undef __LL_SC_ATOMIC
+
+#define __LL_SC_ATOMIC64(op)	__LL_SC_CALL(atomic64_##op)
+
+static inline void atomic64_andnot(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
+	"	stclr	%[i], %[v]\n")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic64_or(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
+	"	stset	%[i], %[v]\n")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic64_xor(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
+	"	steor	%[i], %[v]\n")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic64_add(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
+	"	stadd	%[i], %[v]\n")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline long atomic64_add_return(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(add_return),
+	/* LSE atomics */
+	"	ldaddal	%[i], x30, %[v]\n"
+	"	add	%[i], %[i], x30")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30", "memory");
+
+	return x0;
+}
+
+static inline void atomic64_and(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(and),
+	/* LSE atomics */
+	"	mvn	%[i], %[i]\n"
+	"	stclr	%[i], %[v]")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline void atomic64_sub(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(sub),
+	/* LSE atomics */
+	"	neg	%[i], %[i]\n"
+	"	stadd	%[i], %[v]")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30");
+}
+
+static inline long atomic64_sub_return(long i, atomic64_t *v)
+{
+	register long x0 asm ("x0") = i;
+	register atomic64_t *x1 asm ("x1") = v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(sub_return)
+	"	nop",
+	/* LSE atomics */
+	"	neg	%[i], %[i]\n"
+	"	ldaddal	%[i], x30, %[v]\n"
+	"	add	%[i], %[i], x30")
+	: [i] "+r" (x0), [v] "+Q" (v->counter)
+	: "r" (x1)
+	: "x30", "memory");
+
+	return x0;
+}
+
+static inline long atomic64_dec_if_positive(atomic64_t *v)
+{
+	register long x0 asm ("x0") = (long)v;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	nop\n"
+	__LL_SC_ATOMIC64(dec_if_positive)
+	"	nop\n"
+	"	nop\n"
+	"	nop\n"
+	"	nop\n"
+	"	nop",
+	/* LSE atomics */
+	"1:	ldr	x30, %[v]\n"
+	"	subs	%[ret], x30, #1\n"
+	"	b.lt	2f\n"
+	"	casal	x30, %[ret], %[v]\n"
+	"	sub	x30, x30, #1\n"
+	"	sub	x30, x30, %[ret]\n"
+	"	cbnz	x30, 1b\n"
+	"2:")
+	: [ret] "+&r" (x0), [v] "+Q" (v->counter)
+	:
+	: "x30", "cc", "memory");
+
+	return x0;
+}
+
+#undef __LL_SC_ATOMIC64
+
+#define __LL_SC_CMPXCHG(op)	__LL_SC_CALL(__cmpxchg_case_##op)
+
+#define __CMPXCHG_CASE(w, sz, name, mb, cl...)				\
+static inline unsigned long __cmpxchg_case_##name(volatile void *ptr,	\
+						  unsigned long old,	\
+						  unsigned long new)	\
+{									\
+	register unsigned long x0 asm ("x0") = (unsigned long)ptr;	\
+	register unsigned long x1 asm ("x1") = old;			\
+	register unsigned long x2 asm ("x2") = new;			\
+									\
+	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
+	/* LL/SC */							\
+	"	nop\n"							\
+		__LL_SC_CMPXCHG(name)					\
+	"	nop",							\
+	/* LSE atomics */						\
+	"	mov	" #w "30, %" #w "[old]\n"			\
+	"	cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n"		\
+	"	mov	%" #w "[ret], " #w "30")			\
+	: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr)		\
+	: [old] "r" (x1), [new] "r" (x2)				\
+	: "x30" , ##cl);						\
+									\
+	return x0;							\
+}
+
+__CMPXCHG_CASE(w, b,    1,   )
+__CMPXCHG_CASE(w, h,    2,   )
+__CMPXCHG_CASE(w,  ,    4,   )
+__CMPXCHG_CASE(x,  ,    8,   )
+__CMPXCHG_CASE(w, b, mb_1, al, "memory")
+__CMPXCHG_CASE(w, h, mb_2, al, "memory")
+__CMPXCHG_CASE(w,  , mb_4, al, "memory")
+__CMPXCHG_CASE(x,  , mb_8, al, "memory")
+
+#undef __LL_SC_CMPXCHG
+#undef __CMPXCHG_CASE
+
+#define __LL_SC_CMPXCHG_DBL(op)	__LL_SC_CALL(__cmpxchg_double##op)
+
+#define __CMPXCHG_DBL(name, mb, cl...)					\
+static inline int __cmpxchg_double##name(unsigned long old1,		\
+					 unsigned long old2,		\
+					 unsigned long new1,		\
+					 unsigned long new2,		\
+					 volatile void *ptr)		\
+{									\
+	unsigned long oldval1 = old1;					\
+	unsigned long oldval2 = old2;					\
+	register unsigned long x0 asm ("x0") = old1;			\
+	register unsigned long x1 asm ("x1") = old2;			\
+	register unsigned long x2 asm ("x2") = new1;			\
+	register unsigned long x3 asm ("x3") = new2;			\
+	register unsigned long x4 asm ("x4") = (unsigned long)ptr;	\
+									\
+	asm volatile(ARM64_LSE_ATOMIC_INSN(				\
+	/* LL/SC */							\
+	"	nop\n"							\
+	"	nop\n"							\
+	"	nop\n"							\
+	__LL_SC_CMPXCHG_DBL(name),					\
+	/* LSE atomics */						\
+	"	casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
+	"	eor	%[old1], %[old1], %[oldval1]\n"			\
+	"	eor	%[old2], %[old2], %[oldval2]\n"			\
+	"	orr	%[old1], %[old1], %[old2]")			\
+	: [old1] "+r" (x0), [old2] "+r" (x1),				\
+	  [v] "+Q" (*(unsigned long *)ptr)				\
+	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
+	  [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)		\
+	: "x30" , ##cl);						\
+									\
+	return x0;							\
+}
+
+__CMPXCHG_DBL(   ,   )
+__CMPXCHG_DBL(_mb, al, "memory")
+
+#undef __LL_SC_CMPXCHG_DBL
+#undef __CMPXCHG_DBL
+
+#endif	/* __ASM_ATOMIC_LSE_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 0fa47c4..624f967 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -35,28 +35,6 @@
 #define dma_rmb()	dmb(oshld)
 #define dma_wmb()	dmb(oshst)
 
-#ifndef CONFIG_SMP
-#define smp_mb()	barrier()
-#define smp_rmb()	barrier()
-#define smp_wmb()	barrier()
-
-#define smp_store_release(p, v)						\
-do {									\
-	compiletime_assert_atomic_type(*p);				\
-	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
-} while (0)
-
-#define smp_load_acquire(p)						\
-({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
-	compiletime_assert_atomic_type(*p);				\
-	barrier();							\
-	___p1;								\
-})
-
-#else
-
 #define smp_mb()	dmb(ish)
 #define smp_rmb()	dmb(ishld)
 #define smp_wmb()	dmb(ishst)
@@ -109,8 +87,6 @@
 	___p1;								\
 })
 
-#endif
-
 #define read_barrier_depends()		do { } while(0)
 #define smp_read_barrier_depends()	do { } while(0)
 
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
new file mode 100644
index 0000000..4a748ce
--- /dev/null
+++ b/arch/arm64/include/asm/bug.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015  ARM Limited
+ * Author: Dave Martin <Dave.Martin@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ARCH_ARM64_ASM_BUG_H
+#define _ARCH_ARM64_ASM_BUG_H
+
+#include <asm/debug-monitors.h>
+
+#ifdef CONFIG_GENERIC_BUG
+#define HAVE_ARCH_BUG
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
+#define __BUGVERBOSE_LOCATION(file, line)				\
+		".pushsection .rodata.str,\"aMS\",@progbits,1\n"	\
+	"2:	.string \"" file "\"\n\t"				\
+		".popsection\n\t"					\
+									\
+		".long 2b - 0b\n\t"					\
+		".short " #line "\n\t"
+#else
+#define _BUGVERBOSE_LOCATION(file, line)
+#endif
+
+#define _BUG_FLAGS(flags) __BUG_FLAGS(flags)
+
+#define __BUG_FLAGS(flags) asm volatile (		\
+		".pushsection __bug_table,\"a\"\n\t"	\
+		".align 2\n\t"				\
+	"0:	.long 1f - 0b\n\t"			\
+_BUGVERBOSE_LOCATION(__FILE__, __LINE__)		\
+		".short " #flags "\n\t"			\
+		".popsection\n"				\
+							\
+	"1:	brk %[imm]"				\
+		:: [imm] "i" (BUG_BRK_IMM)		\
+)
+
+#define BUG() do {				\
+	_BUG_FLAGS(0);				\
+	unreachable();				\
+} while (0)
+
+#define __WARN_TAINT(taint) _BUG_FLAGS(BUGFLAG_TAINT(taint))
+
+#endif /* ! CONFIG_GENERIC_BUG */
+
+#include <asm-generic/bug.h>
+
+#endif /* ! _ARCH_ARM64_ASM_BUG_H */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index d8c25b7..899e9f1 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -21,7 +21,9 @@
 #include <linux/bug.h>
 #include <linux/mmdebug.h>
 
+#include <asm/atomic.h>
 #include <asm/barrier.h>
+#include <asm/lse.h>
 
 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
 {
@@ -29,37 +31,73 @@
 
 	switch (size) {
 	case 1:
-		asm volatile("//	__xchg1\n"
+		asm volatile(ARM64_LSE_ATOMIC_INSN(
+		/* LL/SC */
+		"	prfm	pstl1strm, %2\n"
 		"1:	ldxrb	%w0, %2\n"
 		"	stlxrb	%w1, %w3, %2\n"
 		"	cbnz	%w1, 1b\n"
+		"	dmb	ish",
+		/* LSE atomics */
+		"	nop\n"
+		"	nop\n"
+		"	swpalb	%w3, %w0, %2\n"
+		"	nop\n"
+		"	nop")
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
 			: "r" (x)
 			: "memory");
 		break;
 	case 2:
-		asm volatile("//	__xchg2\n"
+		asm volatile(ARM64_LSE_ATOMIC_INSN(
+		/* LL/SC */
+		"	prfm	pstl1strm, %2\n"
 		"1:	ldxrh	%w0, %2\n"
 		"	stlxrh	%w1, %w3, %2\n"
 		"	cbnz	%w1, 1b\n"
+		"	dmb	ish",
+		/* LSE atomics */
+		"	nop\n"
+		"	nop\n"
+		"	swpalh	%w3, %w0, %2\n"
+		"	nop\n"
+		"	nop")
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
 			: "r" (x)
 			: "memory");
 		break;
 	case 4:
-		asm volatile("//	__xchg4\n"
+		asm volatile(ARM64_LSE_ATOMIC_INSN(
+		/* LL/SC */
+		"	prfm	pstl1strm, %2\n"
 		"1:	ldxr	%w0, %2\n"
 		"	stlxr	%w1, %w3, %2\n"
 		"	cbnz	%w1, 1b\n"
+		"	dmb	ish",
+		/* LSE atomics */
+		"	nop\n"
+		"	nop\n"
+		"	swpal	%w3, %w0, %2\n"
+		"	nop\n"
+		"	nop")
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
 			: "r" (x)
 			: "memory");
 		break;
 	case 8:
-		asm volatile("//	__xchg8\n"
+		asm volatile(ARM64_LSE_ATOMIC_INSN(
+		/* LL/SC */
+		"	prfm	pstl1strm, %2\n"
 		"1:	ldxr	%0, %2\n"
 		"	stlxr	%w1, %3, %2\n"
 		"	cbnz	%w1, 1b\n"
+		"	dmb	ish",
+		/* LSE atomics */
+		"	nop\n"
+		"	nop\n"
+		"	swpal	%3, %0, %2\n"
+		"	nop\n"
+		"	nop")
 			: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
 			: "r" (x)
 			: "memory");
@@ -68,7 +106,6 @@
 		BUILD_BUG();
 	}
 
-	smp_mb();
 	return ret;
 }
 
@@ -83,131 +120,39 @@
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 				      unsigned long new, int size)
 {
-	unsigned long oldval = 0, res;
-
 	switch (size) {
 	case 1:
-		do {
-			asm volatile("// __cmpxchg1\n"
-			"	ldxrb	%w1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%w1, %w3\n"
-			"	b.ne	1f\n"
-			"	stxrb	%w0, %w4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_1(ptr, (u8)old, new);
 	case 2:
-		do {
-			asm volatile("// __cmpxchg2\n"
-			"	ldxrh	%w1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%w1, %w3\n"
-			"	b.ne	1f\n"
-			"	stxrh	%w0, %w4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_2(ptr, (u16)old, new);
 	case 4:
-		do {
-			asm volatile("// __cmpxchg4\n"
-			"	ldxr	%w1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%w1, %w3\n"
-			"	b.ne	1f\n"
-			"	stxr	%w0, %w4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_4(ptr, old, new);
 	case 8:
-		do {
-			asm volatile("// __cmpxchg8\n"
-			"	ldxr	%1, %2\n"
-			"	mov	%w0, #0\n"
-			"	cmp	%1, %3\n"
-			"	b.ne	1f\n"
-			"	stxr	%w0, %4, %2\n"
-			"1:\n"
-				: "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
-				: "Ir" (old), "r" (new)
-				: "cc");
-		} while (res);
-		break;
-
+		return __cmpxchg_case_8(ptr, old, new);
 	default:
 		BUILD_BUG();
 	}
 
-	return oldval;
-}
-
-#define system_has_cmpxchg_double()     1
-
-static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2,
-		unsigned long old1, unsigned long old2,
-		unsigned long new1, unsigned long new2, int size)
-{
-	unsigned long loop, lost;
-
-	switch (size) {
-	case 8:
-		VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1);
-		do {
-			asm volatile("// __cmpxchg_double8\n"
-			"	ldxp	%0, %1, %2\n"
-			"	eor	%0, %0, %3\n"
-			"	eor	%1, %1, %4\n"
-			"	orr	%1, %0, %1\n"
-			"	mov	%w0, #0\n"
-			"	cbnz	%1, 1f\n"
-			"	stxp	%w0, %5, %6, %2\n"
-			"1:\n"
-				: "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1)
-				: "r" (old1), "r"(old2), "r"(new1), "r"(new2));
-		} while (loop);
-		break;
-	default:
-		BUILD_BUG();
-	}
-
-	return !lost;
-}
-
-static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2,
-			unsigned long old1, unsigned long old2,
-			unsigned long new1, unsigned long new2, int size)
-{
-	int ret;
-
-	smp_mb();
-	ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size);
-	smp_mb();
-
-	return ret;
+	unreachable();
 }
 
 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
 					 unsigned long new, int size)
 {
-	unsigned long ret;
+	switch (size) {
+	case 1:
+		return __cmpxchg_case_mb_1(ptr, (u8)old, new);
+	case 2:
+		return __cmpxchg_case_mb_2(ptr, (u16)old, new);
+	case 4:
+		return __cmpxchg_case_mb_4(ptr, old, new);
+	case 8:
+		return __cmpxchg_case_mb_8(ptr, old, new);
+	default:
+		BUILD_BUG();
+	}
 
-	smp_mb();
-	ret = __cmpxchg(ptr, old, new, size);
-	smp_mb();
-
-	return ret;
+	unreachable();
 }
 
 #define cmpxchg(ptr, o, n) \
@@ -228,21 +173,32 @@
 	__ret; \
 })
 
+#define system_has_cmpxchg_double()     1
+
+#define __cmpxchg_double_check(ptr1, ptr2)					\
+({										\
+	if (sizeof(*(ptr1)) != 8)						\
+		BUILD_BUG();							\
+	VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);	\
+})
+
 #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
 ({\
 	int __ret;\
-	__ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \
-			(unsigned long)(o2), (unsigned long)(n1), \
-			(unsigned long)(n2), sizeof(*(ptr1)));\
+	__cmpxchg_double_check(ptr1, ptr2); \
+	__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
+				     (unsigned long)(n1), (unsigned long)(n2), \
+				     ptr1); \
 	__ret; \
 })
 
 #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
 ({\
 	int __ret;\
-	__ret = __cmpxchg_double((ptr1), (ptr2), (unsigned long)(o1), \
-			(unsigned long)(o2), (unsigned long)(n1), \
-			(unsigned long)(n2), sizeof(*(ptr1)));\
+	__cmpxchg_double_check(ptr1, ptr2); \
+	__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
+				  (unsigned long)(n1), (unsigned long)(n2), \
+				  ptr1); \
 	__ret; \
 })
 
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index c104421..1715707 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -25,15 +25,20 @@
 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE	1
 #define ARM64_WORKAROUND_845719			2
 #define ARM64_HAS_SYSREG_GIC_CPUIF		3
+#define ARM64_HAS_PAN				4
+#define ARM64_HAS_LSE_ATOMICS			5
 
-#define ARM64_NCAPS				4
+#define ARM64_NCAPS				6
 
 #ifndef __ASSEMBLY__
 
+#include <linux/kernel.h>
+
 struct arm64_cpu_capabilities {
 	const char *desc;
 	u16 capability;
 	bool (*matches)(const struct arm64_cpu_capabilities *);
+	void (*enable)(void);
 	union {
 		struct {	/* To be used for erratum handling only */
 			u32 midr_model;
@@ -41,8 +46,8 @@
 		};
 
 		struct {	/* Feature register checking */
-			u64 register_mask;
-			u64 register_value;
+			int field_pos;
+			int min_field_value;
 		};
 	};
 };
@@ -70,6 +75,13 @@
 		__set_bit(num, cpu_hwcaps);
 }
 
+static inline int __attribute_const__ cpuid_feature_extract_field(u64 features,
+								  int field)
+{
+	return (s64)(features << (64 - 4 - field)) >> (64 - 4);
+}
+
+
 void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 			    const char *info);
 void check_local_cpu_errata(void);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index a84ec60..ee6403d 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -81,9 +81,6 @@
 #define ID_AA64MMFR0_BIGEND(mmfr0)	\
 	(((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT)
 
-#define SCTLR_EL1_CP15BEN	(0x1 << 5)
-#define SCTLR_EL1_SED		(0x1 << 8)
-
 #ifndef __ASSEMBLY__
 
 /*
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 40ec68a..279c85b5 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -18,6 +18,12 @@
 
 #ifdef __KERNEL__
 
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <asm/esr.h>
+#include <asm/insn.h>
+#include <asm/ptrace.h>
+
 /* Low-level stepping controls. */
 #define DBG_MDSCR_SS		(1 << 0)
 #define DBG_SPSR_SS		(1 << 21)
@@ -38,12 +44,7 @@
 /*
  * Break point instruction encoding
  */
-#define BREAK_INSTR_SIZE		4
-
-/*
- * ESR values expected for dynamic and compile time BRK instruction
- */
-#define DBG_ESR_VAL_BRK(x)	(0xf2000000 | ((x) & 0xfffff))
+#define BREAK_INSTR_SIZE		AARCH64_INSN_SIZE
 
 /*
  * #imm16 values used for BRK instruction generation
@@ -51,10 +52,12 @@
  * 0x100: for triggering a fault on purpose (reserved)
  * 0x400: for dynamic BRK instruction
  * 0x401: for compile time BRK instruction
+ * 0x800: kernel-mode BUG() and WARN() traps
  */
 #define FAULT_BRK_IMM			0x100
 #define KGDB_DYN_DBG_BRK_IMM		0x400
 #define KGDB_COMPILED_DBG_BRK_IMM	0x401
+#define BUG_BRK_IMM			0x800
 
 /*
  * BRK instruction encoding
@@ -68,25 +71,10 @@
  */
 #define AARCH64_BREAK_FAULT	(AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
 
-/*
- * Extract byte from BRK instruction
- */
-#define KGDB_DYN_DBG_BRK_INS_BYTE(x) \
-	((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
-
-/*
- * Extract byte from BRK #imm16
- */
-#define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \
-	(((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
-
-#define KGDB_DYN_DBG_BRK_BYTE(x) \
-	(KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x))
-
-#define  KGDB_DYN_BRK_INS_BYTE0  KGDB_DYN_DBG_BRK_BYTE(0)
-#define  KGDB_DYN_BRK_INS_BYTE1  KGDB_DYN_DBG_BRK_BYTE(1)
-#define  KGDB_DYN_BRK_INS_BYTE2  KGDB_DYN_DBG_BRK_BYTE(2)
-#define  KGDB_DYN_BRK_INS_BYTE3  KGDB_DYN_DBG_BRK_BYTE(3)
+#define AARCH64_BREAK_KGDB_DYN_DBG	\
+	(AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
+#define KGDB_DYN_BRK_INS_BYTE(x)	\
+	((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff)
 
 #define CACHE_FLUSH_IS_SAFE		1
 
@@ -127,13 +115,13 @@
 
 u8 debug_monitors_arch(void);
 
-enum debug_el {
+enum dbg_active_el {
 	DBG_ACTIVE_EL0 = 0,
 	DBG_ACTIVE_EL1,
 };
 
-void enable_debug_monitors(enum debug_el el);
-void disable_debug_monitors(enum debug_el el);
+void enable_debug_monitors(enum dbg_active_el el);
+void disable_debug_monitors(enum dbg_active_el el);
 
 void user_rewind_single_step(struct task_struct *task);
 void user_fastforward_single_step(struct task_struct *task);
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 7052245..77eeb2c 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -18,6 +18,8 @@
 #ifndef __ASM_ESR_H
 #define __ASM_ESR_H
 
+#include <asm/memory.h>
+
 #define ESR_ELx_EC_UNKNOWN	(0x00)
 #define ESR_ELx_EC_WFx		(0x01)
 /* Unallocated EC: 0x02 */
@@ -99,6 +101,13 @@
 #define ESR_ELx_WFx_ISS_WFE	(UL(1) << 0)
 #define ESR_ELx_xVC_IMM_MASK	((1UL << 16) - 1)
 
+/* ESR value templates for specific events */
+
+/* BRK instruction trap from AArch64 state */
+#define ESR_ELx_VAL_BRK64(imm)					\
+	((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL |	\
+	 ((imm) & 0xffff))
+
 #ifndef __ASSEMBLY__
 #include <asm/types.h>
 
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 0303705..6cb7e1a 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -18,7 +18,13 @@
 #ifndef __ASM_EXCEPTION_H
 #define __ASM_EXCEPTION_H
 
+#include <linux/ftrace.h>
+
 #define __exception	__attribute__((section(".exception.text")))
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#define __exception_irq_entry	__irq_entry
+#else
 #define __exception_irq_entry	__exception
+#endif
 
 #endif	/* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index c0739187..8b9884c 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -8,7 +8,7 @@
  * Copyright (C) 1998 Ingo Molnar
  * Copyright (C) 2013 Mark Salter <msalter@redhat.com>
  *
- * Adapted from arch/x86_64 version.
+ * Adapted from arch/x86 version.
  *
  */
 
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 74069b3..007a69f 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -20,10 +20,17 @@
 
 #include <linux/futex.h>
 #include <linux/uaccess.h>
+
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/errno.h>
+#include <asm/sysreg.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)		\
 	asm volatile(							\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,		\
+		    CONFIG_ARM64_PAN)					\
+"	prfm	pstl1strm, %2\n"					\
 "1:	ldxr	%w1, %2\n"						\
 	insn "\n"							\
 "2:	stlxr	%w3, %w0, %2\n"						\
@@ -39,6 +46,8 @@
 "	.align	3\n"							\
 "	.quad	1b, 4b, 2b, 4b\n"					\
 "	.popsection\n"							\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,		\
+		    CONFIG_ARM64_PAN)					\
 	: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)	\
 	: "r" (oparg), "Ir" (-EFAULT)					\
 	: "memory")
@@ -112,6 +121,7 @@
 		return -EFAULT;
 
 	asm volatile("// futex_atomic_cmpxchg_inatomic\n"
+"	prfm	pstl1strm, %2\n"
 "1:	ldxr	%w1, %2\n"
 "	sub	%w3, %w1, %w4\n"
 "	cbnz	%w3, 3f\n"
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 6aae421..2bb7009 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -24,9 +24,7 @@
 
 typedef struct {
 	unsigned int __softirq_pending;
-#ifdef CONFIG_SMP
 	unsigned int ipi_irqs[NR_IPI];
-#endif
 } ____cacheline_aligned irq_cpustat_t;
 
 #include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
@@ -34,10 +32,8 @@
 #define __inc_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)++
 #define __get_irq_stat(cpu, member)	__IRQ_STAT(cpu, member)
 
-#ifdef CONFIG_SMP
 u64 smp_irq_stat_cpu(unsigned int cpu);
 #define arch_irq_stat_cpu	smp_irq_stat_cpu
-#endif
 
 #define __ARCH_IRQ_EXIT_IRQS_DISABLED	1
 
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 2fd9b14..bb4052e 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -13,10 +13,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #ifndef __ASM_HUGETLB_H
diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
index b4f6b19..8e24ef3 100644
--- a/arch/arm64/include/asm/irq_work.h
+++ b/arch/arm64/include/asm/irq_work.h
@@ -1,8 +1,6 @@
 #ifndef __ASM_IRQ_WORK_H
 #define __ASM_IRQ_WORK_H
 
-#ifdef CONFIG_SMP
-
 #include <asm/smp.h>
 
 static inline bool arch_irq_work_has_interrupt(void)
@@ -10,13 +8,4 @@
 	return !!__smp_cross_call;
 }
 
-#else
-
-static inline bool arch_irq_work_has_interrupt(void)
-{
-	return false;
-}
-
-#endif
-
 #endif /* __ASM_IRQ_WORK_H */
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index c0e5165..1b5e0e8 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -26,14 +26,28 @@
 
 #define JUMP_LABEL_NOP_SIZE		AARCH64_INSN_SIZE
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm goto("1: nop\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 ".align 3\n\t"
 		 ".quad 1b, %l[l_yes], %c0\n\t"
 		 ".popsection\n\t"
-		 :  :  "i"(key) :  : l_yes);
+		 :  :  "i"(&((char *)key)[branch]) :  : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm goto("1: b %l[l_yes]\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 ".align 3\n\t"
+		 ".quad 1b, %l[l_yes], %c0\n\t"
+		 ".popsection\n\t"
+		 :  :  "i"(&((char *)key)[branch]) :  : l_yes);
 
 	return false;
 l_yes:
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
new file mode 100644
index 0000000..3de42d6
--- /dev/null
+++ b/arch/arm64/include/asm/lse.h
@@ -0,0 +1,53 @@
+#ifndef __ASM_LSE_H
+#define __ASM_LSE_H
+
+#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+
+#include <linux/stringify.h>
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
+
+#ifdef __ASSEMBLER__
+
+.arch_extension	lse
+
+.macro alt_lse, llsc, lse
+	alternative_insn "\llsc", "\lse", ARM64_HAS_LSE_ATOMICS
+.endm
+
+#else	/* __ASSEMBLER__ */
+
+__asm__(".arch_extension	lse");
+
+/* Move the ll/sc atomics out-of-line */
+#define __LL_SC_INLINE
+#define __LL_SC_PREFIX(x)	__ll_sc_##x
+#define __LL_SC_EXPORT(x)	EXPORT_SYMBOL(__LL_SC_PREFIX(x))
+
+/* Macro for constructing calls to out-of-line ll/sc atomics */
+#define __LL_SC_CALL(op)	"bl\t" __stringify(__LL_SC_PREFIX(op)) "\n"
+
+/* In-line patching at runtime */
+#define ARM64_LSE_ATOMIC_INSN(llsc, lse)				\
+	ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
+
+#endif	/* __ASSEMBLER__ */
+#else	/* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+
+#ifdef __ASSEMBLER__
+
+.macro alt_lse, llsc, lse
+	\llsc
+.endm
+
+#else	/* __ASSEMBLER__ */
+
+#define __LL_SC_INLINE		static inline
+#define __LL_SC_PREFIX(x)	x
+#define __LL_SC_EXPORT(x)
+
+#define ARM64_LSE_ATOMIC_INSN(llsc, lse)	llsc
+
+#endif	/* __ASSEMBLER__ */
+#endif	/* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#endif	/* __ASM_LSE_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index f800d45..44a59c2 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -114,6 +114,14 @@
 #define PHYS_OFFSET		({ memstart_addr; })
 
 /*
+ * The maximum physical address that the linear direct mapping
+ * of system RAM can cover. (PAGE_OFFSET can be interpreted as
+ * a 2's complement signed quantity and negated to derive the
+ * maximum size of the linear mapping.)
+ */
+#define MAX_MEMBLOCK_ADDR	({ memstart_addr - PAGE_OFFSET - 1; })
+
+/*
  * PFNs are used to describe any physical page; this means
  * PFN 0 == physical address 0.
  *
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 79fcfb0..0302087 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -28,7 +28,6 @@
 #define ASID(mm)	((mm)->context.id & 0xffff)
 
 extern void paging_init(void);
-extern void setup_mm_for_reboot(void);
 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
 extern void init_mem_pgprot(void);
 extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 4fde8c1..0a456be 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,8 +16,6 @@
 #ifndef __ASM_PERCPU_H
 #define __ASM_PERCPU_H
 
-#ifdef CONFIG_SMP
-
 static inline void set_my_cpu_offset(unsigned long off)
 {
 	asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -38,12 +36,6 @@
 }
 #define __my_cpu_offset __my_cpu_offset()
 
-#else	/* !CONFIG_SMP */
-
-#define set_my_cpu_offset(x)	do { } while (0)
-
-#endif /* CONFIG_SMP */
-
 #define PERCPU_OP(op, asm_op)						\
 static inline unsigned long __percpu_##op(void *ptr,			\
 			unsigned long val, int size)			\
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 6471773..7bd3cdb 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,7 +17,7 @@
 #ifndef __ASM_PERF_EVENT_H
 #define __ASM_PERF_EVENT_H
 
-#ifdef CONFIG_HW_PERF_EVENTS
+#ifdef CONFIG_PERF_EVENTS
 struct pt_regs;
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 59bfae7..24154b0 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -104,6 +104,7 @@
 #define PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
 #define PTE_AF			(_AT(pteval_t, 1) << 10)	/* Access Flag */
 #define PTE_NG			(_AT(pteval_t, 1) << 11)	/* nG */
+#define PTE_DBM			(_AT(pteval_t, 1) << 51)	/* Dirty Bit Management */
 #define PTE_PXN			(_AT(pteval_t, 1) << 53)	/* Privileged XN */
 #define PTE_UXN			(_AT(pteval_t, 1) << 54)	/* User XN */
 
@@ -168,5 +169,7 @@
 #define TCR_TG1_64K		(UL(3) << 30)
 #define TCR_ASID16		(UL(1) << 36)
 #define TCR_TBI0		(UL(1) << 37)
+#define TCR_HA			(UL(1) << 39)
+#define TCR_HD			(UL(1) << 40)
 
 #endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 56283f8..6900b2d9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -16,6 +16,7 @@
 #ifndef __ASM_PGTABLE_H
 #define __ASM_PGTABLE_H
 
+#include <asm/bug.h>
 #include <asm/proc-fns.h>
 
 #include <asm/memory.h>
@@ -27,7 +28,11 @@
 #define PTE_VALID		(_AT(pteval_t, 1) << 0)
 #define PTE_DIRTY		(_AT(pteval_t, 1) << 55)
 #define PTE_SPECIAL		(_AT(pteval_t, 1) << 56)
+#ifdef CONFIG_ARM64_HW_AFDBM
+#define PTE_WRITE		(PTE_DBM)		 /* same as DBM */
+#else
 #define PTE_WRITE		(_AT(pteval_t, 1) << 57)
+#endif
 #define PTE_PROT_NONE		(_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
 
 /*
@@ -48,18 +53,16 @@
 #define FIRST_USER_ADDRESS	0UL
 
 #ifndef __ASSEMBLY__
+
+#include <linux/mmdebug.h>
+
 extern void __pte_error(const char *file, int line, unsigned long val);
 extern void __pmd_error(const char *file, int line, unsigned long val);
 extern void __pud_error(const char *file, int line, unsigned long val);
 extern void __pgd_error(const char *file, int line, unsigned long val);
 
-#ifdef CONFIG_SMP
 #define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#else
-#define PROT_DEFAULT		(PTE_TYPE_PAGE | PTE_AF)
-#define PROT_SECT_DEFAULT	(PMD_TYPE_SECT | PMD_SECT_AF)
-#endif
 
 #define PROT_DEVICE_nGnRE	(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_NORMAL_NC		(PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
@@ -137,12 +140,20 @@
  * The following only work if pte_present(). Undefined behaviour otherwise.
  */
 #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
-#define pte_dirty(pte)		(!!(pte_val(pte) & PTE_DIRTY))
 #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
 #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
 #define pte_exec(pte)		(!(pte_val(pte) & PTE_UXN))
 
+#ifdef CONFIG_ARM64_HW_AFDBM
+#define pte_hw_dirty(pte)	(!(pte_val(pte) & PTE_RDONLY))
+#else
+#define pte_hw_dirty(pte)	(0)
+#endif
+#define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
+#define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
+
+#define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
 #define pte_valid_user(pte) \
 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
 #define pte_valid_not_user(pte) \
@@ -209,20 +220,49 @@
 	}
 }
 
+struct mm_struct;
+struct vm_area_struct;
+
 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
 
+/*
+ * PTE bits configuration in the presence of hardware Dirty Bit Management
+ * (PTE_WRITE == PTE_DBM):
+ *
+ * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
+ *   0      0      |   1           0          0
+ *   0      1      |   1           1          0
+ *   1      0      |   1           0          1
+ *   1      1      |   0           1          x
+ *
+ * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
+ * the page fault mechanism. Checking the dirty status of a pte becomes:
+ *
+ *   PTE_DIRTY || !PTE_RDONLY
+ */
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 			      pte_t *ptep, pte_t pte)
 {
 	if (pte_valid_user(pte)) {
 		if (!pte_special(pte) && pte_exec(pte))
 			__sync_icache_dcache(pte, addr);
-		if (pte_dirty(pte) && pte_write(pte))
+		if (pte_sw_dirty(pte) && pte_write(pte))
 			pte_val(pte) &= ~PTE_RDONLY;
 		else
 			pte_val(pte) |= PTE_RDONLY;
 	}
 
+	/*
+	 * If the existing pte is valid, check for potential race with
+	 * hardware updates of the pte (ptep_set_access_flags safely changes
+	 * valid ptes without going through an invalid entry).
+	 */
+	if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
+	    pte_valid(*ptep)) {
+		BUG_ON(!pte_young(pte));
+		BUG_ON(pte_write(*ptep) && !pte_dirty(pte));
+	}
+
 	set_pte(ptep, pte);
 }
 
@@ -461,6 +501,9 @@
 {
 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
 			      PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
+	/* preserve the hardware dirty information */
+	if (pte_hw_dirty(pte))
+		newprot |= PTE_DIRTY;
 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
 	return pte;
 }
@@ -470,6 +513,101 @@
 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
 }
 
+#ifdef CONFIG_ARM64_HW_AFDBM
+/*
+ * Atomic pte/pmd modifications.
+ */
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pte_t *ptep)
+{
+	pteval_t pteval;
+	unsigned int tmp, res;
+
+	asm volatile("//	ptep_test_and_clear_young\n"
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldxr	%0, %2\n"
+	"	ubfx	%w3, %w0, %5, #1	// extract PTE_AF (young)\n"
+	"	and	%0, %0, %4		// clear PTE_AF\n"
+	"	stxr	%w1, %0, %2\n"
+	"	cbnz	%w1, 1b\n"
+	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res)
+	: "L" (~PTE_AF), "I" (ilog2(PTE_AF)));
+
+	return res;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+					    unsigned long address,
+					    pmd_t *pmdp)
+{
+	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+				       unsigned long address, pte_t *ptep)
+{
+	pteval_t old_pteval;
+	unsigned int tmp;
+
+	asm volatile("//	ptep_get_and_clear\n"
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldxr	%0, %2\n"
+	"	stxr	%w1, xzr, %2\n"
+	"	cbnz	%w1, 1b\n"
+	: "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)));
+
+	return __pte(old_pteval);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+				       unsigned long address, pmd_t *pmdp)
+{
+	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/*
+ * ptep_set_wrprotect - mark read-only while trasferring potential hardware
+ * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
+ */
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
+{
+	pteval_t pteval;
+	unsigned long tmp;
+
+	asm volatile("//	ptep_set_wrprotect\n"
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldxr	%0, %2\n"
+	"	tst	%0, %4			// check for hw dirty (!PTE_RDONLY)\n"
+	"	csel	%1, %3, xzr, eq		// set PTE_DIRTY|PTE_RDONLY if dirty\n"
+	"	orr	%0, %0, %1		// if !dirty, PTE_RDONLY is already set\n"
+	"	and	%0, %0, %5		// clear PTE_WRITE/PTE_DBM\n"
+	"	stxr	%w1, %0, %2\n"
+	"	cbnz	%w1, 1b\n"
+	: "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
+	: "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE)
+	: "cc");
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+				      unsigned long address, pmd_t *pmdp)
+{
+	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
+}
+#endif
+#endif	/* CONFIG_ARM64_HW_AFDBM */
+
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
 
@@ -505,6 +643,21 @@
 
 #define pgtable_cache_init() do { } while (0)
 
+/*
+ * On AArch64, the cache coherency is handled via the set_pte_at() function.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+				    unsigned long addr, pte_t *ptep)
+{
+	/*
+	 * set_pte() does not have a DSB for user mappings, so make sure that
+	 * the page table write is visible.
+	 */
+	dsb(ishst);
+}
+
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index e4c893e..98f3235 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -186,4 +186,6 @@
 
 #endif
 
+void cpu_enable_pan(void);
+
 #endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
deleted file mode 100644
index 49d7e1a..0000000
--- a/arch/arm64/include/asm/psci.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2013 ARM Limited
- */
-
-#ifndef __ASM_PSCI_H
-#define __ASM_PSCI_H
-
-int __init psci_dt_init(void);
-
-#ifdef CONFIG_ACPI
-int __init psci_acpi_init(void);
-bool __init acpi_psci_present(void);
-bool __init acpi_psci_use_hvc(void);
-#else
-static inline int psci_acpi_init(void) { return 0; }
-static inline bool acpi_psci_present(void) { return false; }
-#endif
-
-#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index d6dd9fd..536274e 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -183,11 +183,7 @@
 
 #define instruction_pointer(regs)	((unsigned long)(regs)->pc)
 
-#ifdef CONFIG_SMP
 extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
 
 #endif /* __ASSEMBLY__ */
 #endif
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index db02be8..d9c3d6a 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -20,10 +20,6 @@
 #include <linux/cpumask.h>
 #include <linux/thread_info.h>
 
-#ifndef CONFIG_SMP
-# error "<asm/smp.h> included in non-SMP build"
-#endif
-
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
 struct seq_file;
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index 7abf757..af58dcd 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -56,6 +56,4 @@
 	return -EINVAL;
 }
 
-void __init do_post_cpus_up_work(void);
-
 #endif /* __ASM_SMP_PLAT_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index cee1287..c85e96d 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -16,6 +16,7 @@
 #ifndef __ASM_SPINLOCK_H
 #define __ASM_SPINLOCK_H
 
+#include <asm/lse.h>
 #include <asm/spinlock_types.h>
 #include <asm/processor.h>
 
@@ -38,11 +39,21 @@
 
 	asm volatile(
 	/* Atomically increment the next ticket. */
+	ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
 "	prfm	pstl1strm, %3\n"
 "1:	ldaxr	%w0, %3\n"
 "	add	%w1, %w0, %w5\n"
 "	stxr	%w2, %w1, %3\n"
-"	cbnz	%w2, 1b\n"
+"	cbnz	%w2, 1b\n",
+	/* LSE atomics */
+"	mov	%w2, %w5\n"
+"	ldadda	%w2, %w0, %3\n"
+"	nop\n"
+"	nop\n"
+"	nop\n"
+	)
+
 	/* Did we get the lock? */
 "	eor	%w1, %w0, %w0, ror #16\n"
 "	cbz	%w1, 3f\n"
@@ -67,15 +78,25 @@
 	unsigned int tmp;
 	arch_spinlock_t lockval;
 
-	asm volatile(
-"	prfm	pstl1strm, %2\n"
-"1:	ldaxr	%w0, %2\n"
-"	eor	%w1, %w0, %w0, ror #16\n"
-"	cbnz	%w1, 2f\n"
-"	add	%w0, %w0, %3\n"
-"	stxr	%w1, %w0, %2\n"
-"	cbnz	%w1, 1b\n"
-"2:"
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	prfm	pstl1strm, %2\n"
+	"1:	ldaxr	%w0, %2\n"
+	"	eor	%w1, %w0, %w0, ror #16\n"
+	"	cbnz	%w1, 2f\n"
+	"	add	%w0, %w0, %3\n"
+	"	stxr	%w1, %w0, %2\n"
+	"	cbnz	%w1, 1b\n"
+	"2:",
+	/* LSE atomics */
+	"	ldr	%w0, %2\n"
+	"	eor	%w1, %w0, %w0, ror #16\n"
+	"	cbnz	%w1, 1f\n"
+	"	add	%w1, %w0, %3\n"
+	"	casa	%w0, %w1, %2\n"
+	"	and	%w1, %w1, #0xffff\n"
+	"	eor	%w1, %w1, %w0, lsr #16\n"
+	"1:")
 	: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
 	: "I" (1 << TICKET_SHIFT)
 	: "memory");
@@ -85,10 +106,19 @@
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
-	asm volatile(
-"	stlrh	%w1, %0\n"
-	: "=Q" (lock->owner)
-	: "r" (lock->owner + 1)
+	unsigned long tmp;
+
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	ldrh	%w1, %0\n"
+	"	add	%w1, %w1, #1\n"
+	"	stlrh	%w1, %0",
+	/* LSE atomics */
+	"	mov	%w1, #1\n"
+	"	nop\n"
+	"	staddlh	%w1, %0")
+	: "=Q" (lock->owner), "=&r" (tmp)
+	:
 	: "memory");
 }
 
@@ -123,13 +153,24 @@
 {
 	unsigned int tmp;
 
-	asm volatile(
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
 	"	sevl\n"
 	"1:	wfe\n"
 	"2:	ldaxr	%w0, %1\n"
 	"	cbnz	%w0, 1b\n"
 	"	stxr	%w0, %w2, %1\n"
 	"	cbnz	%w0, 2b\n"
+	"	nop",
+	/* LSE atomics */
+	"1:	mov	%w0, wzr\n"
+	"2:	casa	%w0, %w2, %1\n"
+	"	cbz	%w0, 3f\n"
+	"	ldxr	%w0, %1\n"
+	"	cbz	%w0, 2b\n"
+	"	wfe\n"
+	"	b	1b\n"
+	"3:")
 	: "=&r" (tmp), "+Q" (rw->lock)
 	: "r" (0x80000000)
 	: "memory");
@@ -139,11 +180,18 @@
 {
 	unsigned int tmp;
 
-	asm volatile(
-	"	ldaxr	%w0, %1\n"
-	"	cbnz	%w0, 1f\n"
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"1:	ldaxr	%w0, %1\n"
+	"	cbnz	%w0, 2f\n"
 	"	stxr	%w0, %w2, %1\n"
-	"1:\n"
+	"	cbnz	%w0, 1b\n"
+	"2:",
+	/* LSE atomics */
+	"	mov	%w0, wzr\n"
+	"	casa	%w0, %w2, %1\n"
+	"	nop\n"
+	"	nop")
 	: "=&r" (tmp), "+Q" (rw->lock)
 	: "r" (0x80000000)
 	: "memory");
@@ -153,9 +201,10 @@
 
 static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
-	asm volatile(
-	"	stlr	%w1, %0\n"
-	: "=Q" (rw->lock) : "r" (0) : "memory");
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	"	stlr	wzr, %0",
+	"	swpl	wzr, wzr, %0")
+	: "=Q" (rw->lock) :: "memory");
 }
 
 /* write_can_lock - would write_trylock() succeed? */
@@ -172,6 +221,10 @@
  *
  * The memory barriers are implicit with the load-acquire and store-release
  * instructions.
+ *
+ * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
+ * and LSE implementations may exhibit different behaviour (although this
+ * will have no effect on lockdep).
  */
 static inline void arch_read_lock(arch_rwlock_t *rw)
 {
@@ -179,26 +232,43 @@
 
 	asm volatile(
 	"	sevl\n"
+	ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
 	"1:	wfe\n"
 	"2:	ldaxr	%w0, %2\n"
 	"	add	%w0, %w0, #1\n"
 	"	tbnz	%w0, #31, 1b\n"
 	"	stxr	%w1, %w0, %2\n"
-	"	cbnz	%w1, 2b\n"
+	"	nop\n"
+	"	cbnz	%w1, 2b",
+	/* LSE atomics */
+	"1:	wfe\n"
+	"2:	ldxr	%w0, %2\n"
+	"	adds	%w1, %w0, #1\n"
+	"	tbnz	%w1, #31, 1b\n"
+	"	casa	%w0, %w1, %2\n"
+	"	sbc	%w0, %w1, %w0\n"
+	"	cbnz	%w0, 2b")
 	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
 	:
-	: "memory");
+	: "cc", "memory");
 }
 
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
 	unsigned int tmp, tmp2;
 
-	asm volatile(
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
 	"1:	ldxr	%w0, %2\n"
 	"	sub	%w0, %w0, #1\n"
 	"	stlxr	%w1, %w0, %2\n"
-	"	cbnz	%w1, 1b\n"
+	"	cbnz	%w1, 1b",
+	/* LSE atomics */
+	"	movn	%w0, #0\n"
+	"	nop\n"
+	"	nop\n"
+	"	staddl	%w0, %2")
 	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
 	:
 	: "memory");
@@ -206,17 +276,28 @@
 
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
-	unsigned int tmp, tmp2 = 1;
+	unsigned int tmp, tmp2;
 
-	asm volatile(
-	"	ldaxr	%w0, %2\n"
+	asm volatile(ARM64_LSE_ATOMIC_INSN(
+	/* LL/SC */
+	"	mov	%w1, #1\n"
+	"1:	ldaxr	%w0, %2\n"
 	"	add	%w0, %w0, #1\n"
-	"	tbnz	%w0, #31, 1f\n"
+	"	tbnz	%w0, #31, 2f\n"
 	"	stxr	%w1, %w0, %2\n"
-	"1:\n"
-	: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
+	"	cbnz	%w1, 1b\n"
+	"2:",
+	/* LSE atomics */
+	"	ldr	%w0, %2\n"
+	"	adds	%w1, %w0, #1\n"
+	"	tbnz	%w1, #31, 1f\n"
+	"	casa	%w0, %w1, %2\n"
+	"	sbc	%w1, %w1, %w0\n"
+	"	nop\n"
+	"1:")
+	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
 	:
-	: "memory");
+	: "cc", "memory");
 
 	return !tmp2;
 }
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index b8d3836..55be59a 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,6 +20,8 @@
 # error "please don't include this file directly"
 #endif
 
+#include <linux/types.h>
+
 #define TICKET_SHIFT	16
 
 typedef struct {
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 5c89df0..a7f3d4b 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -20,8 +20,29 @@
 #ifndef __ASM_SYSREG_H
 #define __ASM_SYSREG_H
 
+#include <asm/opcodes.h>
+
+#define SCTLR_EL1_CP15BEN	(0x1 << 5)
+#define SCTLR_EL1_SED		(0x1 << 8)
+
+/*
+ * ARMv8 ARM reserves the following encoding for system registers:
+ * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
+ *  C5.2, version:ARM DDI 0487A.f)
+ *	[20-19] : Op0
+ *	[18-16] : Op1
+ *	[15-12] : CRn
+ *	[11-8]  : CRm
+ *	[7-5]   : Op2
+ */
 #define sys_reg(op0, op1, crn, crm, op2) \
-	((((op0)-2)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+	((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+
+#define REG_PSTATE_PAN_IMM                     sys_reg(0, 0, 4, 0, 4)
+#define SCTLR_EL1_SPAN                         (1 << 23)
+
+#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
+				     (!!x)<<8 | 0x1f)
 
 #ifdef __ASSEMBLY__
 
@@ -31,11 +52,11 @@
 	.equ	__reg_num_xzr, 31
 
 	.macro	mrs_s, rt, sreg
-	.inst	0xd5300000|(\sreg)|(__reg_num_\rt)
+	.inst	0xd5200000|(\sreg)|(__reg_num_\rt)
 	.endm
 
 	.macro	msr_s, sreg, rt
-	.inst	0xd5100000|(\sreg)|(__reg_num_\rt)
+	.inst	0xd5000000|(\sreg)|(__reg_num_\rt)
 	.endm
 
 #else
@@ -47,14 +68,23 @@
 "	.equ	__reg_num_xzr, 31\n"
 "\n"
 "	.macro	mrs_s, rt, sreg\n"
-"	.inst	0xd5300000|(\\sreg)|(__reg_num_\\rt)\n"
+"	.inst	0xd5200000|(\\sreg)|(__reg_num_\\rt)\n"
 "	.endm\n"
 "\n"
 "	.macro	msr_s, sreg, rt\n"
-"	.inst	0xd5100000|(\\sreg)|(__reg_num_\\rt)\n"
+"	.inst	0xd5000000|(\\sreg)|(__reg_num_\\rt)\n"
 "	.endm\n"
 );
 
+static inline void config_sctlr_el1(u32 clear, u32 set)
+{
+	u32 val;
+
+	asm volatile("mrs %0, sctlr_el1" : "=r" (val));
+	val &= ~clear;
+	val |= set;
+	asm volatile("msr sctlr_el1, %0" : : "r" (val));
+}
 #endif
 
 #endif	/* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 3a0242c..d6e6b66 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -41,7 +41,12 @@
 		flush_tlb_mm(tlb->mm);
 	} else {
 		struct vm_area_struct vma = { .vm_mm = tlb->mm, };
-		flush_tlb_range(&vma, tlb->start, tlb->end);
+		/*
+		 * The intermediate page table levels are already handled by
+		 * the __(pte|pmd|pud)_free_tlb() functions, so last level
+		 * TLBI is sufficient here.
+		 */
+		__flush_tlb_range(&vma, tlb->start, tlb->end, true);
 	}
 }
 
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 934815d..7bd2da0 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -87,58 +87,64 @@
 		((unsigned long)ASID(vma->vm_mm) << 48);
 
 	dsb(ishst);
-	asm("tlbi	vae1is, %0" : : "r" (addr));
+	asm("tlbi	vale1is, %0" : : "r" (addr));
 	dsb(ish);
 }
 
-static inline void __flush_tlb_range(struct vm_area_struct *vma,
-				     unsigned long start, unsigned long end)
-{
-	unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
-	unsigned long addr;
-	start = asid | (start >> 12);
-	end = asid | (end >> 12);
-
-	dsb(ishst);
-	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
-		asm("tlbi vae1is, %0" : : "r"(addr));
-	dsb(ish);
-}
-
-static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
-	unsigned long addr;
-	start >>= 12;
-	end >>= 12;
-
-	dsb(ishst);
-	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
-		asm("tlbi vaae1is, %0" : : "r"(addr));
-	dsb(ish);
-	isb();
-}
-
 /*
  * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
  * necessarily a performance improvement.
  */
 #define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)
 
+static inline void __flush_tlb_range(struct vm_area_struct *vma,
+				     unsigned long start, unsigned long end,
+				     bool last_level)
+{
+	unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
+	unsigned long addr;
+
+	if ((end - start) > MAX_TLB_RANGE) {
+		flush_tlb_mm(vma->vm_mm);
+		return;
+	}
+
+	start = asid | (start >> 12);
+	end = asid | (end >> 12);
+
+	dsb(ishst);
+	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
+		if (last_level)
+			asm("tlbi vale1is, %0" : : "r"(addr));
+		else
+			asm("tlbi vae1is, %0" : : "r"(addr));
+	}
+	dsb(ish);
+}
+
 static inline void flush_tlb_range(struct vm_area_struct *vma,
 				   unsigned long start, unsigned long end)
 {
-	if ((end - start) <= MAX_TLB_RANGE)
-		__flush_tlb_range(vma, start, end);
-	else
-		flush_tlb_mm(vma->vm_mm);
+	__flush_tlb_range(vma, start, end, false);
 }
 
 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
-	if ((end - start) <= MAX_TLB_RANGE)
-		__flush_tlb_kernel_range(start, end);
-	else
+	unsigned long addr;
+
+	if ((end - start) > MAX_TLB_RANGE) {
 		flush_tlb_all();
+		return;
+	}
+
+	start >>= 12;
+	end >>= 12;
+
+	dsb(ishst);
+	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
+		asm("tlbi vaae1is, %0" : : "r"(addr));
+	dsb(ish);
+	isb();
 }
 
 /*
@@ -154,20 +160,6 @@
 	asm("tlbi	vae1is, %0" : : "r" (addr));
 	dsb(ish);
 }
-/*
- * On AArch64, the cache coherency is handled via the set_pte_at() function.
- */
-static inline void update_mmu_cache(struct vm_area_struct *vma,
-				    unsigned long addr, pte_t *ptep)
-{
-	/*
-	 * set_pte() does not have a DSB for user mappings, so make sure that
-	 * the page table write is visible.
-	 */
-	dsb(ishst);
-}
-
-#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
 
 #endif
 
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 225ec35..a3e9d6f 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -1,8 +1,6 @@
 #ifndef __ASM_TOPOLOGY_H
 #define __ASM_TOPOLOGY_H
 
-#ifdef CONFIG_SMP
-
 #include <linux/cpumask.h>
 
 struct cpu_topology {
@@ -24,13 +22,6 @@
 void store_cpu_topology(unsigned int cpuid);
 const struct cpumask *cpu_coregroup_mask(int cpu);
 
-#else
-
-static inline void init_cpu_topology(void) { }
-static inline void store_cpu_topology(unsigned int cpuid) { }
-
-#endif
-
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 232e4ba..0cc2f29 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -34,13 +34,32 @@
 void register_undef_hook(struct undef_hook *hook);
 void unregister_undef_hook(struct undef_hook *hook);
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static inline int __in_irqentry_text(unsigned long ptr)
+{
+	extern char __irqentry_text_start[];
+	extern char __irqentry_text_end[];
+
+	return ptr >= (unsigned long)&__irqentry_text_start &&
+	       ptr < (unsigned long)&__irqentry_text_end;
+}
+#else
+static inline int __in_irqentry_text(unsigned long ptr)
+{
+	return 0;
+}
+#endif
+
 static inline int in_exception_text(unsigned long ptr)
 {
 	extern char __exception_text_start[];
 	extern char __exception_text_end[];
+	int in;
 
-	return ptr >= (unsigned long)&__exception_text_start &&
-	       ptr < (unsigned long)&__exception_text_end;
+	in = ptr >= (unsigned long)&__exception_text_start &&
+	     ptr < (unsigned long)&__exception_text_end;
+
+	return in ? : __in_irqentry_text(ptr);
 }
 
 #endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 07e1ba44..b2ede967 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -24,7 +24,10 @@
 #include <linux/string.h>
 #include <linux/thread_info.h>
 
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/ptrace.h>
+#include <asm/sysreg.h>
 #include <asm/errno.h>
 #include <asm/memory.h>
 #include <asm/compiler.h>
@@ -131,6 +134,8 @@
 do {									\
 	unsigned long __gu_val;						\
 	__chk_user_ptr(ptr);						\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
 		__get_user_asm("ldrb", "%w", __gu_val, (ptr), (err));	\
@@ -148,6 +153,8 @@
 		BUILD_BUG();						\
 	}								\
 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 } while (0)
 
 #define __get_user(x, ptr)						\
@@ -194,6 +201,8 @@
 do {									\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__chk_user_ptr(ptr);						\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
 		__put_user_asm("strb", "%w", __pu_val, (ptr), (err));	\
@@ -210,6 +219,8 @@
 	default:							\
 		BUILD_BUG();						\
 	}								\
+	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
+			CONFIG_ARM64_PAN));				\
 } while (0)
 
 #define __put_user(x, ptr)						\
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 73cf0f5..361c8a8 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -27,5 +27,6 @@
 #define HWCAP_SHA1		(1 << 5)
 #define HWCAP_SHA2		(1 << 6)
 #define HWCAP_CRC32		(1 << 7)
+#define HWCAP_ATOMICS		(1 << 8)
 
 #endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 6913643..208db3d 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -44,6 +44,7 @@
 #define PSR_I_BIT	0x00000080
 #define PSR_A_BIT	0x00000100
 #define PSR_D_BIT	0x00000200
+#define PSR_PAN_BIT	0x00400000
 #define PSR_Q_BIT	0x08000000
 #define PSR_V_BIT	0x10000000
 #define PSR_C_BIT	0x20000000
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 426d076..22dc9bc 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -17,15 +17,15 @@
 			   sys.o stacktrace.o time.o traps.o io.o vdso.o	\
 			   hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o	\
 			   return_address.o cpuinfo.o cpu_errata.o		\
-			   cpufeature.o alternative.o cacheinfo.o
+			   cpufeature.o alternative.o cacheinfo.o		\
+			   smp.o smp_spin_table.o topology.o
 
 arm64-obj-$(CONFIG_COMPAT)		+= sys32.o kuser32.o signal32.o 	\
 					   sys_compat.o entry32.o		\
 					   ../../arm/kernel/opcodes.o
 arm64-obj-$(CONFIG_FUNCTION_TRACER)	+= ftrace.o entry-ftrace.o
 arm64-obj-$(CONFIG_MODULES)		+= arm64ksyms.o module.o
-arm64-obj-$(CONFIG_SMP)			+= smp.o smp_spin_table.o topology.o
-arm64-obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o
+arm64-obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o perf_callchain.o
 arm64-obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event.o
 arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= hw_breakpoint.o
 arm64-obj-$(CONFIG_CPU_PM)		+= sleep.o suspend.o
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 221b983..ab9db0e 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -85,7 +85,7 @@
 	return insn;
 }
 
-static int __apply_alternatives(void *alt_region)
+static void __apply_alternatives(void *alt_region)
 {
 	struct alt_instr *alt;
 	struct alt_region *region = alt_region;
@@ -114,19 +114,39 @@
 		flush_icache_range((uintptr_t)origptr,
 				   (uintptr_t)(origptr + nr_inst));
 	}
-
-	return 0;
 }
 
-void apply_alternatives_all(void)
+/*
+ * We might be patching the stop_machine state machine, so implement a
+ * really simple polling protocol here.
+ */
+static int __apply_alternatives_multi_stop(void *unused)
 {
+	static int patched = 0;
 	struct alt_region region = {
 		.begin	= __alt_instructions,
 		.end	= __alt_instructions_end,
 	};
 
+	/* We always have a CPU 0 at this point (__init) */
+	if (smp_processor_id()) {
+		while (!READ_ONCE(patched))
+			cpu_relax();
+		isb();
+	} else {
+		BUG_ON(patched);
+		__apply_alternatives(&region);
+		/* Barriers provided by the cache flushing */
+		WRITE_ONCE(patched, 1);
+	}
+
+	return 0;
+}
+
+void __init apply_alternatives_all(void)
+{
 	/* better not try code patching on a live SMP system */
-	stop_machine(__apply_alternatives, &region, NULL);
+	stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
 }
 
 void apply_alternatives(void *start, size_t length)
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 7922c2e..bcee7ab 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -14,8 +14,11 @@
 #include <linux/slab.h>
 #include <linux/sysctl.h>
 
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/insn.h>
 #include <asm/opcodes.h>
+#include <asm/sysreg.h>
 #include <asm/system_misc.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
@@ -279,6 +282,8 @@
  */
 #define __user_swpX_asm(data, addr, res, temp, B)		\
 	__asm__ __volatile__(					\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
+		    CONFIG_ARM64_PAN)				\
 	"	mov		%w2, %w1\n"			\
 	"0:	ldxr"B"		%w1, [%3]\n"			\
 	"1:	stxr"B"		%w0, %w2, [%3]\n"		\
@@ -294,7 +299,9 @@
 	"	.align		3\n"				\
 	"	.quad		0b, 3b\n"			\
 	"	.quad		1b, 3b\n"			\
-	"	.popsection"					\
+	"	.popsection\n"					\
+	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
+		CONFIG_ARM64_PAN)				\
 	: "=&r" (res), "+r" (data), "=&r" (temp)		\
 	: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT)		\
 	: "memory")
@@ -504,16 +511,6 @@
 	return 0;
 }
 
-static inline void config_sctlr_el1(u32 clear, u32 set)
-{
-	u32 val;
-
-	asm volatile("mrs %0, sctlr_el1" : "=r" (val));
-	val &= ~clear;
-	val |= set;
-	asm volatile("msr sctlr_el1, %0" : : "r" (val));
-}
-
 static int cp15_barrier_set_hw_mode(bool enable)
 {
 	if (enable)
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index 5ea337d..b6bd7d4 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -30,9 +30,7 @@
 const struct cpu_operations *cpu_ops[NR_CPUS];
 
 static const struct cpu_operations *supported_cpu_ops[] __initconst = {
-#ifdef CONFIG_SMP
 	&smp_spin_table_ops,
-#endif
 	&cpu_psci_ops,
 	NULL,
 };
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 5ad86ce..3c9aed3 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -21,24 +21,57 @@
 #include <linux/types.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
+#include <asm/processor.h>
 
 static bool
-has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry)
+feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
 {
-	u64 val;
+	int val = cpuid_feature_extract_field(reg, entry->field_pos);
 
-	val = read_cpuid(id_aa64pfr0_el1);
-	return (val & entry->register_mask) == entry->register_value;
+	return val >= entry->min_field_value;
 }
 
+#define __ID_FEAT_CHK(reg)						\
+static bool __maybe_unused						\
+has_##reg##_feature(const struct arm64_cpu_capabilities *entry)		\
+{									\
+	u64 val;							\
+									\
+	val = read_cpuid(reg##_el1);					\
+	return feature_matches(val, entry);				\
+}
+
+__ID_FEAT_CHK(id_aa64pfr0);
+__ID_FEAT_CHK(id_aa64mmfr1);
+__ID_FEAT_CHK(id_aa64isar0);
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "GIC system register CPU interface",
 		.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
 		.matches = has_id_aa64pfr0_feature,
-		.register_mask = (0xf << 24),
-		.register_value = (1 << 24),
+		.field_pos = 24,
+		.min_field_value = 1,
 	},
+#ifdef CONFIG_ARM64_PAN
+	{
+		.desc = "Privileged Access Never",
+		.capability = ARM64_HAS_PAN,
+		.matches = has_id_aa64mmfr1_feature,
+		.field_pos = 20,
+		.min_field_value = 1,
+		.enable = cpu_enable_pan,
+	},
+#endif /* CONFIG_ARM64_PAN */
+#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+	{
+		.desc = "LSE atomic instructions",
+		.capability = ARM64_HAS_LSE_ATOMICS,
+		.matches = has_id_aa64isar0_feature,
+		.field_pos = 20,
+		.min_field_value = 2,
+	},
+#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
 	{},
 };
 
@@ -55,9 +88,15 @@
 			pr_info("%s %s\n", info, caps[i].desc);
 		cpus_set_cap(caps[i].capability);
 	}
+
+	/* second pass allows enable() to consider interacting capabilities */
+	for (i = 0; caps[i].desc; i++) {
+		if (cpus_have_cap(caps[i].capability) && caps[i].enable)
+			caps[i].enable();
+	}
 }
 
 void check_local_cpu_features(void)
 {
-	check_cpu_capabilities(arm64_features, "detected feature");
+	check_cpu_capabilities(arm64_features, "detected feature:");
 }
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index b056369..9b3b62a 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -82,7 +82,7 @@
 static DEFINE_PER_CPU(int, mde_ref_count);
 static DEFINE_PER_CPU(int, kde_ref_count);
 
-void enable_debug_monitors(enum debug_el el)
+void enable_debug_monitors(enum dbg_active_el el)
 {
 	u32 mdscr, enable = 0;
 
@@ -102,7 +102,7 @@
 	}
 }
 
-void disable_debug_monitors(enum debug_el el)
+void disable_debug_monitors(enum dbg_active_el el)
 {
 	u32 mdscr, disable = 0;
 
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index f537406..816120e 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -13,7 +13,7 @@
 #include <asm/efi.h>
 #include <asm/sections.h>
 
-efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table,
+efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
 					unsigned long *image_addr,
 					unsigned long *image_size,
 					unsigned long *reserve_addr,
@@ -23,21 +23,44 @@
 {
 	efi_status_t status;
 	unsigned long kernel_size, kernel_memsize = 0;
+	unsigned long nr_pages;
+	void *old_image_addr = (void *)*image_addr;
 
 	/* Relocate the image, if required. */
 	kernel_size = _edata - _text;
 	if (*image_addr != (dram_base + TEXT_OFFSET)) {
 		kernel_memsize = kernel_size + (_end - _edata);
-		status = efi_low_alloc(sys_table, kernel_memsize + TEXT_OFFSET,
-				       SZ_2M, reserve_addr);
+
+		/*
+		 * First, try a straight allocation at the preferred offset.
+		 * This will work around the issue where, if dram_base == 0x0,
+		 * efi_low_alloc() refuses to allocate at 0x0 (to prevent the
+		 * address of the allocation to be mistaken for a FAIL return
+		 * value or a NULL pointer). It will also ensure that, on
+		 * platforms where the [dram_base, dram_base + TEXT_OFFSET)
+		 * interval is partially occupied by the firmware (like on APM
+		 * Mustang), we can still place the kernel at the address
+		 * 'dram_base + TEXT_OFFSET'.
+		 */
+		*image_addr = *reserve_addr = dram_base + TEXT_OFFSET;
+		nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
+			   EFI_PAGE_SIZE;
+		status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
+					EFI_LOADER_DATA, nr_pages,
+					(efi_physical_addr_t *)reserve_addr);
 		if (status != EFI_SUCCESS) {
-			pr_efi_err(sys_table, "Failed to relocate kernel\n");
-			return status;
+			kernel_memsize += TEXT_OFFSET;
+			status = efi_low_alloc(sys_table_arg, kernel_memsize,
+					       SZ_2M, reserve_addr);
+
+			if (status != EFI_SUCCESS) {
+				pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+				return status;
+			}
+			*image_addr = *reserve_addr + TEXT_OFFSET;
 		}
-		memcpy((void *)*reserve_addr + TEXT_OFFSET, (void *)*image_addr,
-		       kernel_size);
-		*image_addr = *reserve_addr + TEXT_OFFSET;
-		*reserve_size = kernel_memsize + TEXT_OFFSET;
+		memcpy((void *)*image_addr, old_image_addr, kernel_size);
+		*reserve_size = kernel_memsize;
 	}
 
 
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e163518..4306c93 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -116,41 +116,34 @@
 	*/
 	.endm
 
-	.macro	kernel_exit, el, ret = 0
+	.macro	kernel_exit, el
 	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
 	.if	\el == 0
 	ct_user_enter
 	ldr	x23, [sp, #S_SP]		// load return stack pointer
 	msr	sp_el0, x23
-
 #ifdef CONFIG_ARM64_ERRATUM_845719
-
-#undef SEQUENCE_ORG
-#undef SEQUENCE_ALT
-
+alternative_if_not ARM64_WORKAROUND_845719
+	nop
+	nop
 #ifdef CONFIG_PID_IN_CONTEXTIDR
-
-#define SEQUENCE_ORG	"nop ; nop ; nop"
-#define SEQUENCE_ALT	"tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:"
-
-#else
-
-#define SEQUENCE_ORG	"nop ; nop"
-#define SEQUENCE_ALT	"tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:"
-
+	nop
 #endif
-
-	alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719
-
+alternative_else
+	tbz	x22, #4, 1f
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+	mrs	x29, contextidr_el1
+	msr	contextidr_el1, x29
+#else
+	msr contextidr_el1, xzr
+#endif
+1:
+alternative_endif
 #endif
 	.endif
 	msr	elr_el1, x21			// set up the return data
 	msr	spsr_el1, x22
-	.if	\ret
-	ldr	x1, [sp, #S_X1]			// preserve x0 (syscall return)
-	.else
 	ldp	x0, x1, [sp, #16 * 0]
-	.endif
 	ldp	x2, x3, [sp, #16 * 1]
 	ldp	x4, x5, [sp, #16 * 2]
 	ldp	x6, x7, [sp, #16 * 3]
@@ -613,22 +606,21 @@
  */
 ret_fast_syscall:
 	disable_irq				// disable interrupts
+	str	x0, [sp, #S_X0]			// returned x0
 	ldr	x1, [tsk, #TI_FLAGS]		// re-check for syscall tracing
 	and	x2, x1, #_TIF_SYSCALL_WORK
 	cbnz	x2, ret_fast_syscall_trace
 	and	x2, x1, #_TIF_WORK_MASK
-	cbnz	x2, fast_work_pending
+	cbnz	x2, work_pending
 	enable_step_tsk x1, x2
-	kernel_exit 0, ret = 1
+	kernel_exit 0
 ret_fast_syscall_trace:
 	enable_irq				// enable interrupts
-	b	__sys_trace_return
+	b	__sys_trace_return_skipped	// we already saved x0
 
 /*
  * Ok, we need to do extra processing, enter the slow path.
  */
-fast_work_pending:
-	str	x0, [sp, #S_X0]			// returned x0
 work_pending:
 	tbnz	x1, #TIF_NEED_RESCHED, work_resched
 	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
@@ -652,7 +644,7 @@
 	cbnz	x2, work_pending
 	enable_step_tsk x1, x2
 no_work_pending:
-	kernel_exit 0, ret = 0
+	kernel_exit 0
 ENDPROC(ret_to_user)
 
 /*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 44d6f75..c56956a 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -158,6 +158,7 @@
 void fpsimd_flush_thread(void)
 {
 	memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
+	fpsimd_flush_task_state(current);
 	set_thread_flag(TIF_FOREIGN_FPSTATE);
 }
 
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c0ff3ce..a055be6 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -62,13 +62,8 @@
 /*
  * Initial memory map attributes.
  */
-#ifndef CONFIG_SMP
-#define PTE_FLAGS	PTE_TYPE_PAGE | PTE_AF
-#define PMD_FLAGS	PMD_TYPE_SECT | PMD_SECT_AF
-#else
 #define PTE_FLAGS	PTE_TYPE_PAGE | PTE_AF | PTE_SHARED
 #define PMD_FLAGS	PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S
-#endif
 
 #ifdef CONFIG_ARM64_64K_PAGES
 #define MM_MMUFLAGS	PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS
@@ -574,7 +569,6 @@
 	.long	BOOT_CPU_MODE_EL1
 	.popsection
 
-#ifdef CONFIG_SMP
 	/*
 	 * This provides a "holding pen" for platforms to hold all secondary
 	 * cores are held until we're ready for them to initialise.
@@ -622,7 +616,6 @@
 	mov	x29, #0
 	b	secondary_start_kernel
 ENDPROC(__secondary_switched)
-#endif	/* CONFIG_SMP */
 
 /*
  * Enable the MMU.
@@ -641,5 +634,13 @@
 	isb
 	msr	sctlr_el1, x0
 	isb
+	/*
+	 * Invalidate the local I-cache so that any instructions fetched
+	 * speculatively from the PoC are discarded, since they may have
+	 * been dynamically patched at the PoU.
+	 */
+	ic	iallu
+	dsb	nsh
+	isb
 	br	x27
 ENDPROC(__enable_mmu)
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 7a1a5da..003bc3d 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -156,7 +156,7 @@
  * Convert a breakpoint privilege level to the corresponding exception
  * level.
  */
-static enum debug_el debug_exception_level(int privilege)
+static enum dbg_active_el debug_exception_level(int privilege)
 {
 	switch (privilege) {
 	case AARCH64_BREAKPOINT_EL0:
@@ -230,7 +230,7 @@
 	struct perf_event **slots;
 	struct debug_info *debug_info = &current->thread.debug;
 	int i, max_slots, ctrl_reg, val_reg, reg_enable;
-	enum debug_el dbg_el = debug_exception_level(info->ctrl.privilege);
+	enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
 	u32 ctrl;
 
 	if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
@@ -537,7 +537,7 @@
  * exception level at the register level.
  * This is used when single-stepping after a breakpoint exception.
  */
-static void toggle_bp_registers(int reg, enum debug_el el, int enable)
+static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
 {
 	int i, max_slots, privilege;
 	u32 ctrl;
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index dd9671c..f341866 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -101,9 +101,8 @@
 		return addr;
 
 	BUG_ON(!page);
-	set_fixmap(fixmap, page_to_phys(page));
-
-	return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
+	return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
+			(uintaddr & ~PAGE_MASK));
 }
 
 static void __kprobes patch_unmap(int fixmap)
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 463fa2e..11dc3fd 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -33,9 +33,7 @@
 
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
-#ifdef CONFIG_SMP
 	show_ipi_list(p, prec);
-#endif
 	seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
 	return 0;
 }
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index 4f1fec7..c2dd1ad 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -28,7 +28,7 @@
 	void *addr = (void *)entry->code;
 	u32 insn;
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		insn = aarch64_insn_gen_branch_imm(entry->code,
 						   entry->target,
 						   AARCH64_INSN_BRANCH_NOLINK);
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index a0d10c5..bcac81e 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -235,13 +235,13 @@
 
 static struct break_hook kgdb_brkpt_hook = {
 	.esr_mask	= 0xffffffff,
-	.esr_val	= DBG_ESR_VAL_BRK(KGDB_DYN_DBG_BRK_IMM),
+	.esr_val	= (u32)ESR_ELx_VAL_BRK64(KGDB_DYN_DBG_BRK_IMM),
 	.fn		= kgdb_brk_fn
 };
 
 static struct break_hook kgdb_compiled_brkpt_hook = {
 	.esr_mask	= 0xffffffff,
-	.esr_val	= DBG_ESR_VAL_BRK(KGDB_COMPILED_DBG_BRK_IMM),
+	.esr_val	= (u32)ESR_ELx_VAL_BRK64(KGDB_COMPILED_DBG_BRK_IMM),
 	.fn		= kgdb_compiled_brk_fn
 };
 
@@ -328,9 +328,9 @@
  */
 struct kgdb_arch arch_kgdb_ops = {
 	.gdb_bpt_instr = {
-		KGDB_DYN_BRK_INS_BYTE0,
-		KGDB_DYN_BRK_INS_BYTE1,
-		KGDB_DYN_BRK_INS_BYTE2,
-		KGDB_DYN_BRK_INS_BYTE3,
+		KGDB_DYN_BRK_INS_BYTE(0),
+		KGDB_DYN_BRK_INS_BYTE(1),
+		KGDB_DYN_BRK_INS_BYTE(2),
+		KGDB_DYN_BRK_INS_BYTE(3),
 	}
 };
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 4095379..b3d098b 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -38,6 +38,19 @@
 	return res->start;
 }
 
+/**
+ * pcibios_enable_device - Enable I/O and memory.
+ * @dev: PCI device to be enabled
+ * @mask: bitmask of BARs to enable
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+	if (pci_has_flag(PCI_PROBE_ONLY))
+		return 0;
+
+	return pci_enable_resources(dev, mask);
+}
+
 /*
  * Try to assign the IRQ number from DT when adding a new device
  */
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
new file mode 100644
index 0000000..3aa7483
--- /dev/null
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -0,0 +1,196 @@
+/*
+ * arm64 callchain support
+ *
+ * Copyright (C) 2015 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/stacktrace.h>
+
+struct frame_tail {
+	struct frame_tail	__user *fp;
+	unsigned long		lr;
+} __attribute__((packed));
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static struct frame_tail __user *
+user_backtrace(struct frame_tail __user *tail,
+	       struct perf_callchain_entry *entry)
+{
+	struct frame_tail buftail;
+	unsigned long err;
+
+	/* Also check accessibility of one struct frame_tail beyond */
+	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+		return NULL;
+
+	pagefault_disable();
+	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+	pagefault_enable();
+
+	if (err)
+		return NULL;
+
+	perf_callchain_store(entry, buftail.lr);
+
+	/*
+	 * Frame pointers should strictly progress back up the stack
+	 * (towards higher addresses).
+	 */
+	if (tail >= buftail.fp)
+		return NULL;
+
+	return buftail.fp;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct compat_frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct compat_frame_tail {
+	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
+	u32		sp;
+	u32		lr;
+} __attribute__((packed));
+
+static struct compat_frame_tail __user *
+compat_user_backtrace(struct compat_frame_tail __user *tail,
+		      struct perf_callchain_entry *entry)
+{
+	struct compat_frame_tail buftail;
+	unsigned long err;
+
+	/* Also check accessibility of one struct frame_tail beyond */
+	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+		return NULL;
+
+	pagefault_disable();
+	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+	pagefault_enable();
+
+	if (err)
+		return NULL;
+
+	perf_callchain_store(entry, buftail.lr);
+
+	/*
+	 * Frame pointers should strictly progress back up the stack
+	 * (towards higher addresses).
+	 */
+	if (tail + 1 >= (struct compat_frame_tail __user *)
+			compat_ptr(buftail.fp))
+		return NULL;
+
+	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
+}
+#endif /* CONFIG_COMPAT */
+
+void perf_callchain_user(struct perf_callchain_entry *entry,
+			 struct pt_regs *regs)
+{
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+		/* We don't support guest os callchain now */
+		return;
+	}
+
+	perf_callchain_store(entry, regs->pc);
+
+	if (!compat_user_mode(regs)) {
+		/* AARCH64 mode */
+		struct frame_tail __user *tail;
+
+		tail = (struct frame_tail __user *)regs->regs[29];
+
+		while (entry->nr < PERF_MAX_STACK_DEPTH &&
+		       tail && !((unsigned long)tail & 0xf))
+			tail = user_backtrace(tail, entry);
+	} else {
+#ifdef CONFIG_COMPAT
+		/* AARCH32 compat mode */
+		struct compat_frame_tail __user *tail;
+
+		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
+
+		while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+			tail && !((unsigned long)tail & 0x3))
+			tail = compat_user_backtrace(tail, entry);
+#endif
+	}
+}
+
+/*
+ * Gets called by walk_stackframe() for every stackframe. This will be called
+ * whist unwinding the stackframe and is like a subroutine return so we use
+ * the PC.
+ */
+static int callchain_trace(struct stackframe *frame, void *data)
+{
+	struct perf_callchain_entry *entry = data;
+	perf_callchain_store(entry, frame->pc);
+	return 0;
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+			   struct pt_regs *regs)
+{
+	struct stackframe frame;
+
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+		/* We don't support guest os callchain now */
+		return;
+	}
+
+	frame.fp = regs->regs[29];
+	frame.sp = regs->sp;
+	frame.pc = regs->pc;
+
+	walk_stackframe(&frame, callchain_trace, entry);
+}
+
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+		return perf_guest_cbs->get_guest_ip();
+
+	return instruction_pointer(regs);
+}
+
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+	int misc = 0;
+
+	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+		if (perf_guest_cbs->is_user_mode())
+			misc |= PERF_RECORD_MISC_GUEST_USER;
+		else
+			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+	} else {
+		if (user_mode(regs))
+			misc |= PERF_RECORD_MISC_USER;
+		else
+			misc |= PERF_RECORD_MISC_KERNEL;
+	}
+
+	return misc;
+}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index b31e9a4..f9a74d4 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -25,7 +25,7 @@
 #include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/perf_event.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -36,7 +36,6 @@
 #include <asm/irq.h>
 #include <asm/irq_regs.h>
 #include <asm/pmu.h>
-#include <asm/stacktrace.h>
 
 /*
  * ARMv8 supports a maximum of 32 events.
@@ -78,6 +77,16 @@
 
 #define CACHE_OP_UNSUPPORTED		0xFFFF
 
+#define PERF_MAP_ALL_UNSUPPORTED					\
+	[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
+[0 ... C(MAX) - 1] = {							\
+	[0 ... C(OP_MAX) - 1] = {					\
+		[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,	\
+	},								\
+}
+
 static int
 armpmu_map_cache_event(const unsigned (*cache_map)
 				      [PERF_COUNT_HW_CACHE_MAX]
@@ -435,10 +444,8 @@
 	unsigned int i, irqs;
 	struct platform_device *pmu_device = armpmu->plat_device;
 
-	if (!pmu_device) {
-		pr_err("no PMU device registered\n");
+	if (!pmu_device)
 		return -ENODEV;
-	}
 
 	irqs = min(pmu_device->num_resources, num_possible_cpus());
 	if (!irqs) {
@@ -703,118 +710,28 @@
 
 /* PMUv3 HW events mapping. */
 static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
+	PERF_MAP_ALL_UNSUPPORTED,
 	[PERF_COUNT_HW_CPU_CYCLES]		= ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
 	[PERF_COUNT_HW_INSTRUCTIONS]		= ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
 	[PERF_COUNT_HW_CACHE_REFERENCES]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
 	[PERF_COUNT_HW_CACHE_MISSES]		= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= HW_OP_UNSUPPORTED,
 	[PERF_COUNT_HW_BRANCH_MISSES]		= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-	[PERF_COUNT_HW_BUS_CYCLES]		= HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= HW_OP_UNSUPPORTED,
-	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= HW_OP_UNSUPPORTED,
 };
 
 static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
 						[PERF_COUNT_HW_CACHE_OP_MAX]
 						[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
-	[C(L1D)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
-			[C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(L1I)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(LL)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(DTLB)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(ITLB)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(BPU)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-			[C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
-			[C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
-	[C(NODE)] = {
-		[C(OP_READ)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_WRITE)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-		[C(OP_PREFETCH)] = {
-			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED,
-			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,
-		},
-	},
+	PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+	[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+	[C(L1D)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+	[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+
+	[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+	[C(BPU)][C(OP_READ)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+	[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)]	= ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
 };
 
 /*
@@ -1337,7 +1254,7 @@
 		}
 
 		for_each_possible_cpu(cpu)
-			if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
+			if (dn == of_cpu_device_node_get(cpu))
 				break;
 
 		if (cpu >= nr_cpu_ids) {
@@ -1415,180 +1332,3 @@
 }
 early_initcall(init_hw_perf_events);
 
-/*
- * Callchain handling code.
- */
-struct frame_tail {
-	struct frame_tail	__user *fp;
-	unsigned long		lr;
-} __attribute__((packed));
-
-/*
- * Get the return address for a single stackframe and return a pointer to the
- * next frame tail.
- */
-static struct frame_tail __user *
-user_backtrace(struct frame_tail __user *tail,
-	       struct perf_callchain_entry *entry)
-{
-	struct frame_tail buftail;
-	unsigned long err;
-
-	/* Also check accessibility of one struct frame_tail beyond */
-	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
-		return NULL;
-
-	pagefault_disable();
-	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
-	pagefault_enable();
-
-	if (err)
-		return NULL;
-
-	perf_callchain_store(entry, buftail.lr);
-
-	/*
-	 * Frame pointers should strictly progress back up the stack
-	 * (towards higher addresses).
-	 */
-	if (tail >= buftail.fp)
-		return NULL;
-
-	return buftail.fp;
-}
-
-#ifdef CONFIG_COMPAT
-/*
- * The registers we're interested in are at the end of the variable
- * length saved register structure. The fp points at the end of this
- * structure so the address of this struct is:
- * (struct compat_frame_tail *)(xxx->fp)-1
- *
- * This code has been adapted from the ARM OProfile support.
- */
-struct compat_frame_tail {
-	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
-	u32		sp;
-	u32		lr;
-} __attribute__((packed));
-
-static struct compat_frame_tail __user *
-compat_user_backtrace(struct compat_frame_tail __user *tail,
-		      struct perf_callchain_entry *entry)
-{
-	struct compat_frame_tail buftail;
-	unsigned long err;
-
-	/* Also check accessibility of one struct frame_tail beyond */
-	if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
-		return NULL;
-
-	pagefault_disable();
-	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
-	pagefault_enable();
-
-	if (err)
-		return NULL;
-
-	perf_callchain_store(entry, buftail.lr);
-
-	/*
-	 * Frame pointers should strictly progress back up the stack
-	 * (towards higher addresses).
-	 */
-	if (tail + 1 >= (struct compat_frame_tail __user *)
-			compat_ptr(buftail.fp))
-		return NULL;
-
-	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
-}
-#endif /* CONFIG_COMPAT */
-
-void perf_callchain_user(struct perf_callchain_entry *entry,
-			 struct pt_regs *regs)
-{
-	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
-		/* We don't support guest os callchain now */
-		return;
-	}
-
-	perf_callchain_store(entry, regs->pc);
-
-	if (!compat_user_mode(regs)) {
-		/* AARCH64 mode */
-		struct frame_tail __user *tail;
-
-		tail = (struct frame_tail __user *)regs->regs[29];
-
-		while (entry->nr < PERF_MAX_STACK_DEPTH &&
-		       tail && !((unsigned long)tail & 0xf))
-			tail = user_backtrace(tail, entry);
-	} else {
-#ifdef CONFIG_COMPAT
-		/* AARCH32 compat mode */
-		struct compat_frame_tail __user *tail;
-
-		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
-
-		while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
-			tail && !((unsigned long)tail & 0x3))
-			tail = compat_user_backtrace(tail, entry);
-#endif
-	}
-}
-
-/*
- * Gets called by walk_stackframe() for every stackframe. This will be called
- * whist unwinding the stackframe and is like a subroutine return so we use
- * the PC.
- */
-static int callchain_trace(struct stackframe *frame, void *data)
-{
-	struct perf_callchain_entry *entry = data;
-	perf_callchain_store(entry, frame->pc);
-	return 0;
-}
-
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
-			   struct pt_regs *regs)
-{
-	struct stackframe frame;
-
-	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
-		/* We don't support guest os callchain now */
-		return;
-	}
-
-	frame.fp = regs->regs[29];
-	frame.sp = regs->sp;
-	frame.pc = regs->pc;
-
-	walk_stackframe(&frame, callchain_trace, entry);
-}
-
-unsigned long perf_instruction_pointer(struct pt_regs *regs)
-{
-	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
-		return perf_guest_cbs->get_guest_ip();
-
-	return instruction_pointer(regs);
-}
-
-unsigned long perf_misc_flags(struct pt_regs *regs)
-{
-	int misc = 0;
-
-	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
-		if (perf_guest_cbs->is_user_mode())
-			misc |= PERF_RECORD_MISC_GUEST_USER;
-		else
-			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
-	} else {
-		if (user_mode(regs))
-			misc |= PERF_RECORD_MISC_USER;
-		else
-			misc |= PERF_RECORD_MISC_KERNEL;
-	}
-
-	return misc;
-}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 869f202..aa94a88 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -18,23 +18,17 @@
 #include <linux/init.h>
 #include <linux/of.h>
 #include <linux/smp.h>
-#include <linux/reboot.h>
-#include <linux/pm.h>
 #include <linux/delay.h>
+#include <linux/psci.h>
 #include <linux/slab.h>
+
 #include <uapi/linux/psci.h>
 
 #include <asm/compiler.h>
-#include <asm/cputype.h>
 #include <asm/cpu_ops.h>
 #include <asm/errno.h>
-#include <asm/psci.h>
 #include <asm/smp_plat.h>
 #include <asm/suspend.h>
-#include <asm/system_misc.h>
-
-#define PSCI_POWER_STATE_TYPE_STANDBY		0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN	1
 
 static bool psci_power_state_loses_context(u32 state)
 {
@@ -50,122 +44,8 @@
 	return !(state & ~valid_mask);
 }
 
-/*
- * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
- * calls to its resident CPU, so we must avoid issuing those. We never migrate
- * a Trusted OS even if it claims to be capable of migration -- doing so will
- * require cooperation with a Trusted OS driver.
- */
-static int resident_cpu = -1;
-
-struct psci_operations {
-	int (*cpu_suspend)(u32 state, unsigned long entry_point);
-	int (*cpu_off)(u32 state);
-	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
-	int (*migrate)(unsigned long cpuid);
-	int (*affinity_info)(unsigned long target_affinity,
-			unsigned long lowest_affinity_level);
-	int (*migrate_info_type)(void);
-};
-
-static struct psci_operations psci_ops;
-
-typedef unsigned long (psci_fn)(unsigned long, unsigned long,
-				unsigned long, unsigned long);
-asmlinkage psci_fn __invoke_psci_fn_hvc;
-asmlinkage psci_fn __invoke_psci_fn_smc;
-static psci_fn *invoke_psci_fn;
-
-enum psci_function {
-	PSCI_FN_CPU_SUSPEND,
-	PSCI_FN_CPU_ON,
-	PSCI_FN_CPU_OFF,
-	PSCI_FN_MIGRATE,
-	PSCI_FN_MAX,
-};
-
 static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
 
-static u32 psci_function_id[PSCI_FN_MAX];
-
-static int psci_to_linux_errno(int errno)
-{
-	switch (errno) {
-	case PSCI_RET_SUCCESS:
-		return 0;
-	case PSCI_RET_NOT_SUPPORTED:
-		return -EOPNOTSUPP;
-	case PSCI_RET_INVALID_PARAMS:
-		return -EINVAL;
-	case PSCI_RET_DENIED:
-		return -EPERM;
-	};
-
-	return -EINVAL;
-}
-
-static u32 psci_get_version(void)
-{
-	return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
-}
-
-static int psci_cpu_suspend(u32 state, unsigned long entry_point)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
-	err = invoke_psci_fn(fn, state, entry_point, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_off(u32 state)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_CPU_OFF];
-	err = invoke_psci_fn(fn, state, 0, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_CPU_ON];
-	err = invoke_psci_fn(fn, cpuid, entry_point, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_migrate(unsigned long cpuid)
-{
-	int err;
-	u32 fn;
-
-	fn = psci_function_id[PSCI_FN_MIGRATE];
-	err = invoke_psci_fn(fn, cpuid, 0, 0);
-	return psci_to_linux_errno(err);
-}
-
-static int psci_affinity_info(unsigned long target_affinity,
-		unsigned long lowest_affinity_level)
-{
-	return invoke_psci_fn(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity,
-			      lowest_affinity_level, 0);
-}
-
-static int psci_migrate_info_type(void)
-{
-	return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
-}
-
-static unsigned long psci_migrate_info_up_cpu(void)
-{
-	return invoke_psci_fn(PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, 0, 0, 0);
-}
-
 static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
 {
 	int i, ret, count = 0;
@@ -230,240 +110,6 @@
 	return ret;
 }
 
-static int get_set_conduit_method(struct device_node *np)
-{
-	const char *method;
-
-	pr_info("probing for conduit method from DT.\n");
-
-	if (of_property_read_string(np, "method", &method)) {
-		pr_warn("missing \"method\" property\n");
-		return -ENXIO;
-	}
-
-	if (!strcmp("hvc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_hvc;
-	} else if (!strcmp("smc", method)) {
-		invoke_psci_fn = __invoke_psci_fn_smc;
-	} else {
-		pr_warn("invalid \"method\" property: %s\n", method);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
-{
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
-}
-
-static void psci_sys_poweroff(void)
-{
-	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
-}
-
-/*
- * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
- * return DENIED (which would be fatal).
- */
-static void __init psci_init_migrate(void)
-{
-	unsigned long cpuid;
-	int type, cpu;
-
-	type = psci_ops.migrate_info_type();
-
-	if (type == PSCI_0_2_TOS_MP) {
-		pr_info("Trusted OS migration not required\n");
-		return;
-	}
-
-	if (type == PSCI_RET_NOT_SUPPORTED) {
-		pr_info("MIGRATE_INFO_TYPE not supported.\n");
-		return;
-	}
-
-	if (type != PSCI_0_2_TOS_UP_MIGRATE &&
-	    type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
-		pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
-		return;
-	}
-
-	cpuid = psci_migrate_info_up_cpu();
-	if (cpuid & ~MPIDR_HWID_BITMASK) {
-		pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
-			cpuid);
-		return;
-	}
-
-	cpu = get_logical_index(cpuid);
-	resident_cpu = cpu >= 0 ? cpu : -1;
-
-	pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
-}
-
-static void __init psci_0_2_set_functions(void)
-{
-	pr_info("Using standard PSCI v0.2 function IDs\n");
-	psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
-	psci_ops.cpu_suspend = psci_cpu_suspend;
-
-	psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
-	psci_ops.cpu_off = psci_cpu_off;
-
-	psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
-	psci_ops.cpu_on = psci_cpu_on;
-
-	psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
-	psci_ops.migrate = psci_migrate;
-
-	psci_ops.affinity_info = psci_affinity_info;
-
-	psci_ops.migrate_info_type = psci_migrate_info_type;
-
-	arm_pm_restart = psci_sys_reset;
-
-	pm_power_off = psci_sys_poweroff;
-}
-
-/*
- * Probe function for PSCI firmware versions >= 0.2
- */
-static int __init psci_probe(void)
-{
-	u32 ver = psci_get_version();
-
-	pr_info("PSCIv%d.%d detected in firmware.\n",
-			PSCI_VERSION_MAJOR(ver),
-			PSCI_VERSION_MINOR(ver));
-
-	if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
-		pr_err("Conflicting PSCI version detected.\n");
-		return -EINVAL;
-	}
-
-	psci_0_2_set_functions();
-
-	psci_init_migrate();
-
-	return 0;
-}
-
-typedef int (*psci_initcall_t)(const struct device_node *);
-
-/*
- * PSCI init function for PSCI versions >=0.2
- *
- * Probe based on PSCI PSCI_VERSION function
- */
-static int __init psci_0_2_init(struct device_node *np)
-{
-	int err;
-
-	err = get_set_conduit_method(np);
-
-	if (err)
-		goto out_put_node;
-	/*
-	 * Starting with v0.2, the PSCI specification introduced a call
-	 * (PSCI_VERSION) that allows probing the firmware version, so
-	 * that PSCI function IDs and version specific initialization
-	 * can be carried out according to the specific version reported
-	 * by firmware
-	 */
-	err = psci_probe();
-
-out_put_node:
-	of_node_put(np);
-	return err;
-}
-
-/*
- * PSCI < v0.2 get PSCI Function IDs via DT.
- */
-static int __init psci_0_1_init(struct device_node *np)
-{
-	u32 id;
-	int err;
-
-	err = get_set_conduit_method(np);
-
-	if (err)
-		goto out_put_node;
-
-	pr_info("Using PSCI v0.1 Function IDs from DT\n");
-
-	if (!of_property_read_u32(np, "cpu_suspend", &id)) {
-		psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
-		psci_ops.cpu_suspend = psci_cpu_suspend;
-	}
-
-	if (!of_property_read_u32(np, "cpu_off", &id)) {
-		psci_function_id[PSCI_FN_CPU_OFF] = id;
-		psci_ops.cpu_off = psci_cpu_off;
-	}
-
-	if (!of_property_read_u32(np, "cpu_on", &id)) {
-		psci_function_id[PSCI_FN_CPU_ON] = id;
-		psci_ops.cpu_on = psci_cpu_on;
-	}
-
-	if (!of_property_read_u32(np, "migrate", &id)) {
-		psci_function_id[PSCI_FN_MIGRATE] = id;
-		psci_ops.migrate = psci_migrate;
-	}
-
-out_put_node:
-	of_node_put(np);
-	return err;
-}
-
-static const struct of_device_id psci_of_match[] __initconst = {
-	{ .compatible = "arm,psci",	.data = psci_0_1_init},
-	{ .compatible = "arm,psci-0.2",	.data = psci_0_2_init},
-	{},
-};
-
-int __init psci_dt_init(void)
-{
-	struct device_node *np;
-	const struct of_device_id *matched_np;
-	psci_initcall_t init_fn;
-
-	np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
-
-	if (!np)
-		return -ENODEV;
-
-	init_fn = (psci_initcall_t)matched_np->data;
-	return init_fn(np);
-}
-
-#ifdef CONFIG_ACPI
-/*
- * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
- * explicitly clarified in SBBR
- */
-int __init psci_acpi_init(void)
-{
-	if (!acpi_psci_present()) {
-		pr_info("is not implemented in ACPI.\n");
-		return -EOPNOTSUPP;
-	}
-
-	pr_info("probing for conduit method from ACPI.\n");
-
-	if (acpi_psci_use_hvc())
-		invoke_psci_fn = __invoke_psci_fn_hvc;
-	else
-		invoke_psci_fn = __invoke_psci_fn_smc;
-
-	return psci_probe();
-}
-#endif
-
-#ifdef CONFIG_SMP
-
 static int __init cpu_psci_cpu_init(unsigned int cpu)
 {
 	return 0;
@@ -489,11 +135,6 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static bool psci_tos_resident_on(int cpu)
-{
-	return cpu == resident_cpu;
-}
-
 static int cpu_psci_cpu_disable(unsigned int cpu)
 {
 	/* Fail early if we don't have CPU_OFF support */
@@ -550,7 +191,6 @@
 	return -ETIMEDOUT;
 }
 #endif
-#endif
 
 static int psci_suspend_finisher(unsigned long index)
 {
@@ -585,7 +225,6 @@
 	.cpu_init_idle	= cpu_psci_cpu_init_idle,
 	.cpu_suspend	= cpu_psci_cpu_suspend,
 #endif
-#ifdef CONFIG_SMP
 	.cpu_init	= cpu_psci_cpu_init,
 	.cpu_prepare	= cpu_psci_cpu_prepare,
 	.cpu_boot	= cpu_psci_cpu_boot,
@@ -594,6 +233,5 @@
 	.cpu_die	= cpu_psci_cpu_die,
 	.cpu_kill	= cpu_psci_cpu_kill,
 #endif
-#endif
 };
 
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index d882b83..1971f49 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -826,6 +826,30 @@
 	return ret;
 }
 
+static int compat_tls_get(struct task_struct *target,
+			  const struct user_regset *regset, unsigned int pos,
+			  unsigned int count, void *kbuf, void __user *ubuf)
+{
+	compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value;
+	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+}
+
+static int compat_tls_set(struct task_struct *target,
+			  const struct user_regset *regset, unsigned int pos,
+			  unsigned int count, const void *kbuf,
+			  const void __user *ubuf)
+{
+	int ret;
+	compat_ulong_t tls;
+
+	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+	if (ret)
+		return ret;
+
+	target->thread.tp_value = tls;
+	return ret;
+}
+
 static const struct user_regset aarch32_regsets[] = {
 	[REGSET_COMPAT_GPR] = {
 		.core_note_type = NT_PRSTATUS,
@@ -850,6 +874,64 @@
 	.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
 };
 
+static const struct user_regset aarch32_ptrace_regsets[] = {
+	[REGSET_GPR] = {
+		.core_note_type = NT_PRSTATUS,
+		.n = COMPAT_ELF_NGREG,
+		.size = sizeof(compat_elf_greg_t),
+		.align = sizeof(compat_elf_greg_t),
+		.get = compat_gpr_get,
+		.set = compat_gpr_set
+	},
+	[REGSET_FPR] = {
+		.core_note_type = NT_ARM_VFP,
+		.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
+		.size = sizeof(compat_ulong_t),
+		.align = sizeof(compat_ulong_t),
+		.get = compat_vfp_get,
+		.set = compat_vfp_set
+	},
+	[REGSET_TLS] = {
+		.core_note_type = NT_ARM_TLS,
+		.n = 1,
+		.size = sizeof(compat_ulong_t),
+		.align = sizeof(compat_ulong_t),
+		.get = compat_tls_get,
+		.set = compat_tls_set,
+	},
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+	[REGSET_HW_BREAK] = {
+		.core_note_type = NT_ARM_HW_BREAK,
+		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
+		.size = sizeof(u32),
+		.align = sizeof(u32),
+		.get = hw_break_get,
+		.set = hw_break_set,
+	},
+	[REGSET_HW_WATCH] = {
+		.core_note_type = NT_ARM_HW_WATCH,
+		.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
+		.size = sizeof(u32),
+		.align = sizeof(u32),
+		.get = hw_break_get,
+		.set = hw_break_set,
+	},
+#endif
+	[REGSET_SYSTEM_CALL] = {
+		.core_note_type = NT_ARM_SYSTEM_CALL,
+		.n = 1,
+		.size = sizeof(int),
+		.align = sizeof(int),
+		.get = system_call_get,
+		.set = system_call_set,
+	},
+};
+
+static const struct user_regset_view user_aarch32_ptrace_view = {
+	.name = "aarch32", .e_machine = EM_ARM,
+	.regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
+};
+
 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
 				   compat_ulong_t __user *ret)
 {
@@ -1109,8 +1191,16 @@
 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
 {
 #ifdef CONFIG_COMPAT
-	if (is_compat_thread(task_thread_info(task)))
+	/*
+	 * Core dumping of 32-bit tasks or compat ptrace requests must use the
+	 * user_aarch32_view compatible with arm32. Native ptrace requests on
+	 * 32-bit children use an extended user_aarch32_ptrace_view to allow
+	 * access to the TLS register.
+	 */
+	if (is_compat_task())
 		return &user_aarch32_view;
+	else if (is_compat_thread(task_thread_info(task)))
+		return &user_aarch32_ptrace_view;
 #endif
 	return &user_aarch64_view;
 }
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f3067d4..8884788 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -34,7 +34,6 @@
 #include <linux/kexec.h>
 #include <linux/crash_dump.h>
 #include <linux/root_dev.h>
-#include <linux/clk-provider.h>
 #include <linux/cpu.h>
 #include <linux/interrupt.h>
 #include <linux/smp.h>
@@ -46,6 +45,7 @@
 #include <linux/of_platform.h>
 #include <linux/efi.h>
 #include <linux/personality.h>
+#include <linux/psci.h>
 
 #include <asm/acpi.h>
 #include <asm/fixmap.h>
@@ -61,9 +61,7 @@
 #include <asm/tlbflush.h>
 #include <asm/traps.h>
 #include <asm/memblock.h>
-#include <asm/psci.h>
 #include <asm/efi.h>
-#include <asm/virt.h>
 #include <asm/xen/hypervisor.h>
 
 unsigned long elf_hwcap __read_mostly;
@@ -131,7 +129,6 @@
 }
 
 struct mpidr_hash mpidr_hash;
-#ifdef CONFIG_SMP
 /**
  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
  *			  level in order to build a linear index from an
@@ -197,35 +194,11 @@
 		pr_warn("Large number of MPIDR hash buckets detected\n");
 	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
 }
-#endif
-
-static void __init hyp_mode_check(void)
-{
-	if (is_hyp_mode_available())
-		pr_info("CPU: All CPU(s) started at EL2\n");
-	else if (is_hyp_mode_mismatched())
-		WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
-			   "CPU: CPUs started in inconsistent modes");
-	else
-		pr_info("CPU: All CPU(s) started at EL1\n");
-}
-
-void __init do_post_cpus_up_work(void)
-{
-	hyp_mode_check();
-	apply_alternatives_all();
-}
-
-#ifdef CONFIG_UP_LATE_INIT
-void __init up_late_init(void)
-{
-	do_post_cpus_up_work();
-}
-#endif /* CONFIG_UP_LATE_INIT */
 
 static void __init setup_processor(void)
 {
-	u64 features, block;
+	u64 features;
+	s64 block;
 	u32 cwg;
 	int cls;
 
@@ -255,8 +228,8 @@
 	 * for non-negative values. Negative values are reserved.
 	 */
 	features = read_cpuid(ID_AA64ISAR0_EL1);
-	block = (features >> 4) & 0xf;
-	if (!(block & 0x8)) {
+	block = cpuid_feature_extract_field(features, 4);
+	if (block > 0) {
 		switch (block) {
 		default:
 		case 2:
@@ -268,26 +241,36 @@
 		}
 	}
 
-	block = (features >> 8) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 8) > 0)
 		elf_hwcap |= HWCAP_SHA1;
 
-	block = (features >> 12) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 12) > 0)
 		elf_hwcap |= HWCAP_SHA2;
 
-	block = (features >> 16) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 16) > 0)
 		elf_hwcap |= HWCAP_CRC32;
 
+	block = cpuid_feature_extract_field(features, 20);
+	if (block > 0) {
+		switch (block) {
+		default:
+		case 2:
+			elf_hwcap |= HWCAP_ATOMICS;
+		case 1:
+			/* RESERVED */
+		case 0:
+			break;
+		}
+	}
+
 #ifdef CONFIG_COMPAT
 	/*
 	 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
-	 * the Aarch32 32-bit execution state.
+	 * the AArch32 32-bit execution state.
 	 */
 	features = read_cpuid(ID_ISAR5_EL1);
-	block = (features >> 4) & 0xf;
-	if (!(block & 0x8)) {
+	block = cpuid_feature_extract_field(features, 4);
+	if (block > 0) {
 		switch (block) {
 		default:
 		case 2:
@@ -299,16 +282,13 @@
 		}
 	}
 
-	block = (features >> 8) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 8) > 0)
 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
 
-	block = (features >> 12) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 12) > 0)
 		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
 
-	block = (features >> 16) & 0xf;
-	if (block && !(block & 0x8))
+	if (cpuid_feature_extract_field(features, 16) > 0)
 		compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
 #endif
 }
@@ -405,10 +385,8 @@
 	xen_early_init();
 
 	cpu_read_bootcpu_ops();
-#ifdef CONFIG_SMP
 	smp_init_cpus();
 	smp_build_mpidr_hash();
-#endif
 
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
@@ -427,8 +405,13 @@
 
 static int __init arm64_device_init(void)
 {
-	of_iommu_init();
-	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+	if (of_have_populated_dt()) {
+		of_iommu_init();
+		of_platform_populate(NULL, of_default_bus_match_table,
+				     NULL, NULL);
+	} else if (acpi_disabled) {
+		pr_crit("Device tree not populated\n");
+	}
 	return 0;
 }
 arch_initcall_sync(arm64_device_init);
@@ -456,6 +439,7 @@
 	"sha1",
 	"sha2",
 	"crc32",
+	"atomics",
 	NULL
 };
 
@@ -508,9 +492,7 @@
 		 * online processors, looking for lines beginning with
 		 * "processor".  Give glibc what it expects.
 		 */
-#ifdef CONFIG_SMP
 		seq_printf(m, "processor\t: %d\n", i);
-#endif
 
 		/*
 		 * Dump out the common processor features in a single line.
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 803cfea..f586f7c 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -82,7 +82,6 @@
 	str	x2, [x0, #CPU_CTX_SP]
 	ldr	x1, =sleep_save_sp
 	ldr	x1, [x1, #SLEEP_SAVE_SP_VIRT]
-#ifdef CONFIG_SMP
 	mrs	x7, mpidr_el1
 	ldr	x9, =mpidr_hash
 	ldr	x10, [x9, #MPIDR_HASH_MASK]
@@ -94,7 +93,6 @@
 	ldp	w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
 	compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
 	add	x1, x1, x8, lsl #3
-#endif
 	bl	__cpu_suspend_save
 	/*
 	 * Grab suspend finisher in x20 and its argument in x19
@@ -135,6 +133,14 @@
 	ldr	x3, =cpu_resume_after_mmu
 	msr	sctlr_el1, x0		// restore sctlr_el1
 	isb
+	/*
+	 * Invalidate the local I-cache so that any instructions fetched
+	 * speculatively from the PoC are discarded, since they may have
+	 * been dynamically patched at the PoU.
+	 */
+	ic	iallu
+	dsb	nsh
+	isb
 	br	x3			// global jump to virtual address
 ENDPROC(cpu_resume_mmu)
 	.popsection
@@ -151,7 +157,6 @@
 
 ENTRY(cpu_resume)
 	bl	el2_setup		// if in EL2 drop to EL1 cleanly
-#ifdef CONFIG_SMP
 	mrs	x1, mpidr_el1
 	adrp	x8, mpidr_hash
 	add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
@@ -161,9 +166,6 @@
 	ldp	w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
 	compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
         /* x7 contains hash index, let's use it to grab context pointer */
-#else
-	mov	x7, xzr
-#endif
 	ldr_l	x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
 	ldr	x0, [x0, x7, lsl #3]
 	/* load sp from context */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 50fb469..dbdaacd 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -52,6 +52,7 @@
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
 #include <asm/ptrace.h>
+#include <asm/virt.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ipi.h>
@@ -310,10 +311,22 @@
 }
 #endif
 
+static void __init hyp_mode_check(void)
+{
+	if (is_hyp_mode_available())
+		pr_info("CPU: All CPU(s) started at EL2\n");
+	else if (is_hyp_mode_mismatched())
+		WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
+			   "CPU: CPUs started in inconsistent modes");
+	else
+		pr_info("CPU: All CPU(s) started at EL1\n");
+}
+
 void __init smp_cpus_done(unsigned int max_cpus)
 {
 	pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
-	do_post_cpus_up_work();
+	hyp_mode_check();
+	apply_alternatives_all();
 }
 
 void __init smp_prepare_boot_cpu(void)
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 42f9195..149151f 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -42,7 +42,6 @@
 #include <asm/thread_info.h>
 #include <asm/stacktrace.h>
 
-#ifdef CONFIG_SMP
 unsigned long profile_pc(struct pt_regs *regs)
 {
 	struct stackframe frame;
@@ -62,7 +61,6 @@
 	return frame.pc;
 }
 EXPORT_SYMBOL(profile_pc);
-#endif
 
 void __init time_init(void)
 {
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index fcb8f7b..694f6de 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -300,6 +300,6 @@
 	 * Discard anything that was parsed if we hit an error so we
 	 * don't use partial information.
 	 */
-	if (parse_dt_topology())
+	if (of_have_populated_dt() && parse_dt_topology())
 		reset_cpu_topology();
 }
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 566bc4c..f93aae5 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -17,6 +17,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/bug.h>
 #include <linux/signal.h>
 #include <linux/personality.h>
 #include <linux/kallsyms.h>
@@ -32,8 +33,10 @@
 #include <linux/syscalls.h>
 
 #include <asm/atomic.h>
+#include <asm/bug.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
+#include <asm/insn.h>
 #include <asm/traps.h>
 #include <asm/stacktrace.h>
 #include <asm/exception.h>
@@ -52,11 +55,12 @@
  * Dump out the contents of some memory nicely...
  */
 static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
-		     unsigned long top)
+		     unsigned long top, bool compat)
 {
 	unsigned long first;
 	mm_segment_t fs;
 	int i;
+	unsigned int width = compat ? 4 : 8;
 
 	/*
 	 * We need to switch to kernel mode so that we can use __get_user
@@ -75,13 +79,22 @@
 		memset(str, ' ', sizeof(str));
 		str[sizeof(str) - 1] = '\0';
 
-		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
+		for (p = first, i = 0; i < (32 / width)
+					&& p < top; i++, p += width) {
 			if (p >= bottom && p < top) {
-				unsigned int val;
-				if (__get_user(val, (unsigned int *)p) == 0)
-					sprintf(str + i * 9, " %08x", val);
-				else
-					sprintf(str + i * 9, " ????????");
+				unsigned long val;
+
+				if (width == 8) {
+					if (__get_user(val, (unsigned long *)p) == 0)
+						sprintf(str + i * 17, " %016lx", val);
+					else
+						sprintf(str + i * 17, " ????????????????");
+				} else {
+					if (__get_user(val, (unsigned int *)p) == 0)
+						sprintf(str + i * 9, " %08lx", val);
+					else
+						sprintf(str + i * 9, " ????????");
+				}
 			}
 		}
 		printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
@@ -95,7 +108,7 @@
 	print_ip_sym(where);
 	if (in_exception_text(where))
 		dump_mem("", "Exception stack", stack,
-			 stack + sizeof(struct pt_regs));
+			 stack + sizeof(struct pt_regs), false);
 }
 
 static void dump_instr(const char *lvl, struct pt_regs *regs)
@@ -179,11 +192,7 @@
 #else
 #define S_PREEMPT ""
 #endif
-#ifdef CONFIG_SMP
 #define S_SMP " SMP"
-#else
-#define S_SMP ""
-#endif
 
 static int __die(const char *str, int err, struct thread_info *thread,
 		 struct pt_regs *regs)
@@ -207,7 +216,8 @@
 
 	if (!user_mode(regs) || in_interrupt()) {
 		dump_mem(KERN_EMERG, "Stack: ", regs->sp,
-			 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
+			 THREAD_SIZE + (unsigned long)task_stack_page(tsk),
+			 compat_user_mode(regs));
 		dump_backtrace(regs, tsk);
 		dump_instr(KERN_EMERG, regs);
 	}
@@ -459,7 +469,63 @@
 	pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
 }
 
+/* GENERIC_BUG traps */
+
+int is_valid_bugaddr(unsigned long addr)
+{
+	/*
+	 * bug_handler() only called for BRK #BUG_BRK_IMM.
+	 * So the answer is trivial -- any spurious instances with no
+	 * bug table entry will be rejected by report_bug() and passed
+	 * back to the debug-monitors code and handled as a fatal
+	 * unexpected debug exception.
+	 */
+	return 1;
+}
+
+static int bug_handler(struct pt_regs *regs, unsigned int esr)
+{
+	if (user_mode(regs))
+		return DBG_HOOK_ERROR;
+
+	switch (report_bug(regs->pc, regs)) {
+	case BUG_TRAP_TYPE_BUG:
+		die("Oops - BUG", regs, 0);
+		break;
+
+	case BUG_TRAP_TYPE_WARN:
+		/* Ideally, report_bug() should backtrace for us... but no. */
+		dump_backtrace(regs, NULL);
+		break;
+
+	default:
+		/* unknown/unrecognised bug trap type */
+		return DBG_HOOK_ERROR;
+	}
+
+	/* If thread survives, skip over the BUG instruction and continue: */
+	regs->pc += AARCH64_INSN_SIZE;	/* skip BRK and resume */
+	return DBG_HOOK_HANDLED;
+}
+
+static struct break_hook bug_break_hook = {
+	.esr_val = 0xf2000000 | BUG_BRK_IMM,
+	.esr_mask = 0xffffffff,
+	.fn = bug_handler,
+};
+
+/*
+ * Initial handler for AArch64 BRK exceptions
+ * This handler only used until debug_traps_init().
+ */
+int __init early_brk64(unsigned long addr, unsigned int esr,
+		struct pt_regs *regs)
+{
+	return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
+}
+
+/* This registration must happen early, before debug_traps_init(). */
 void __init trap_init(void)
 {
-	return;
+	register_break_hook(&bug_break_hook);
 }
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 17a8fb1..10915aa 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -810,7 +810,11 @@
  * Call into the vgic backend for state saving
  */
 .macro save_vgic_state
-	alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+	bl	__save_vgic_v2_state
+alternative_else
+	bl	__save_vgic_v3_state
+alternative_endif
 	mrs	x24, hcr_el2
 	mov	x25, #HCR_INT_OVERRIDE
 	neg	x25, x25
@@ -827,7 +831,11 @@
 	orr	x24, x24, #HCR_INT_OVERRIDE
 	orr	x24, x24, x25
 	msr	hcr_el2, x24
-	alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+	bl	__restore_vgic_v2_state
+alternative_else
+	bl	__restore_vgic_v3_state
+alternative_endif
 .endm
 
 .macro save_timer_state
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index f02530e..85c5715 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -168,8 +168,8 @@
 {
 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
 		inject_abt32(vcpu, false, addr);
-
-	inject_abt64(vcpu, false, addr);
+	else
+		inject_abt64(vcpu, false, addr);
 }
 
 /**
@@ -184,8 +184,8 @@
 {
 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
 		inject_abt32(vcpu, true, addr);
-
-	inject_abt64(vcpu, true, addr);
+	else
+		inject_abt64(vcpu, true, addr);
 }
 
 /**
@@ -198,6 +198,6 @@
 {
 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
 		inject_undef32(vcpu);
-
-	inject_undef64(vcpu);
+	else
+		inject_undef64(vcpu);
 }
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index d98d3e3..1a811ec 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -3,3 +3,16 @@
 		   clear_page.o memchr.o memcpy.o memmove.o memset.o	\
 		   memcmp.o strcmp.o strncmp.o strlen.o strnlen.o	\
 		   strchr.o strrchr.o
+
+# Tell the compiler to treat all general purpose registers as
+# callee-saved, which allows for efficient runtime patching of the bl
+# instruction in the caller with an atomic instruction when supported by
+# the CPU. Result and argument registers are handled correctly, based on
+# the function prototype.
+lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
+CFLAGS_atomic_ll_sc.o	:= -fcall-used-x0 -ffixed-x1 -ffixed-x2		\
+		   -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6		\
+		   -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9		\
+		   -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12	\
+		   -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15	\
+		   -fcall-saved-x16 -fcall-saved-x17 -fcall-saved-x18
diff --git a/arch/arm64/lib/atomic_ll_sc.c b/arch/arm64/lib/atomic_ll_sc.c
new file mode 100644
index 0000000..b0c538b
--- /dev/null
+++ b/arch/arm64/lib/atomic_ll_sc.c
@@ -0,0 +1,3 @@
+#include <asm/atomic.h>
+#define __ARM64_IN_ATOMIC_IMPL
+#include <asm/atomic_ll_sc.h>
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index 7dac371..43ac736 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -18,52 +18,59 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/lse.h>
 
 /*
  * x0: bits 5:0  bit offset
  *     bits 31:6 word offset
  * x1: address
  */
-	.macro	bitop, name, instr
+	.macro	bitop, name, llsc, lse
 ENTRY(	\name	)
 	and	w3, w0, #63		// Get bit offset
 	eor	w0, w0, w3		// Clear low bits
 	mov	x2, #1
 	add	x1, x1, x0, lsr #3	// Get word offset
+alt_lse "	prfm	pstl1strm, [x1]",	"nop"
 	lsl	x3, x2, x3		// Create mask
-1:	ldxr	x2, [x1]
-	\instr	x2, x2, x3
-	stxr	w0, x2, [x1]
-	cbnz	w0, 1b
+
+alt_lse	"1:	ldxr	x2, [x1]",		"\lse	x3, [x1]"
+alt_lse	"	\llsc	x2, x2, x3",		"nop"
+alt_lse	"	stxr	w0, x2, [x1]",		"nop"
+alt_lse	"	cbnz	w0, 1b",		"nop"
+
 	ret
 ENDPROC(\name	)
 	.endm
 
-	.macro	testop, name, instr
+	.macro	testop, name, llsc, lse
 ENTRY(	\name	)
 	and	w3, w0, #63		// Get bit offset
 	eor	w0, w0, w3		// Clear low bits
 	mov	x2, #1
 	add	x1, x1, x0, lsr #3	// Get word offset
+alt_lse "	prfm	pstl1strm, [x1]",	"nop"
 	lsl	x4, x2, x3		// Create mask
-1:	ldxr	x2, [x1]
-	lsr	x0, x2, x3		// Save old value of bit
-	\instr	x2, x2, x4		// toggle bit
-	stlxr	w5, x2, [x1]
-	cbnz	w5, 1b
-	dmb	ish
+
+alt_lse	"1:	ldxr	x2, [x1]",		"\lse	x4, x2, [x1]"
+	lsr	x0, x2, x3
+alt_lse	"	\llsc	x2, x2, x4",		"nop"
+alt_lse	"	stlxr	w5, x2, [x1]",		"nop"
+alt_lse	"	cbnz	w5, 1b",		"nop"
+alt_lse	"	dmb	ish",			"nop"
+
 	and	x0, x0, #1
-3:	ret
+	ret
 ENDPROC(\name	)
 	.endm
 
 /*
  * Atomic bit operations.
  */
-	bitop	change_bit, eor
-	bitop	clear_bit, bic
-	bitop	set_bit, orr
+	bitop	change_bit, eor, steor
+	bitop	clear_bit, bic, stclr
+	bitop	set_bit, orr, stset
 
-	testop	test_and_change_bit, eor
-	testop	test_and_clear_bit, bic
-	testop	test_and_set_bit, orr
+	testop	test_and_change_bit, eor, ldeoral
+	testop	test_and_clear_bit, bic, ldclral
+	testop	test_and_set_bit, orr, ldsetal
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index c17967f..a9723c7 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -16,7 +16,11 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 	.text
 
@@ -29,6 +33,8 @@
  * Alignment fixed up by hardware.
  */
 ENTRY(__clear_user)
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	mov	x2, x1			// save the size for fixup return
 	subs	x1, x1, #8
 	b.mi	2f
@@ -48,6 +54,8 @@
 	b.mi	5f
 USER(9f, strb	wzr, [x0]	)
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 ENDPROC(__clear_user)
 
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 5e27add..1be9ef2 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -15,7 +15,11 @@
  */
 
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 /*
  * Copy from user space to a kernel buffer (alignment handled by the hardware)
@@ -28,14 +32,21 @@
  *	x0 - bytes not copied
  */
 ENTRY(__copy_from_user)
-	add	x4, x1, x2			// upper user buffer boundary
-	subs	x2, x2, #8
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
+	add	x5, x1, x2			// upper user buffer boundary
+	subs	x2, x2, #16
+	b.mi	1f
+0:
+USER(9f, ldp	x3, x4, [x1], #16)
+	subs	x2, x2, #16
+	stp	x3, x4, [x0], #16
+	b.pl	0b
+1:	adds	x2, x2, #8
 	b.mi	2f
-1:
 USER(9f, ldr	x3, [x1], #8	)
-	subs	x2, x2, #8
+	sub	x2, x2, #8
 	str	x3, [x0], #8
-	b.pl	1b
 2:	adds	x2, x2, #4
 	b.mi	3f
 USER(9f, ldr	w3, [x1], #4	)
@@ -51,12 +62,14 @@
 USER(9f, ldrb	w3, [x1]	)
 	strb	w3, [x0]
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 ENDPROC(__copy_from_user)
 
 	.section .fixup,"ax"
 	.align	2
-9:	sub	x2, x4, x1
+9:	sub	x2, x5, x1
 	mov	x3, x2
 10:	strb	wzr, [x0], #1			// zero remaining buffer space
 	subs	x3, x3, #1
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 84b6c9b..1b94661e 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -17,7 +17,11 @@
  */
 
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 /*
  * Copy from user space to user space (alignment handled by the hardware)
@@ -30,14 +34,21 @@
  *	x0 - bytes not copied
  */
 ENTRY(__copy_in_user)
-	add	x4, x0, x2			// upper user buffer boundary
-	subs	x2, x2, #8
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
+	add	x5, x0, x2			// upper user buffer boundary
+	subs	x2, x2, #16
+	b.mi	1f
+0:
+USER(9f, ldp	x3, x4, [x1], #16)
+	subs	x2, x2, #16
+USER(9f, stp	x3, x4, [x0], #16)
+	b.pl	0b
+1:	adds	x2, x2, #8
 	b.mi	2f
-1:
 USER(9f, ldr	x3, [x1], #8	)
-	subs	x2, x2, #8
+	sub	x2, x2, #8
 USER(9f, str	x3, [x0], #8	)
-	b.pl	1b
 2:	adds	x2, x2, #4
 	b.mi	3f
 USER(9f, ldr	w3, [x1], #4	)
@@ -53,11 +64,13 @@
 USER(9f, ldrb	w3, [x1]	)
 USER(9f, strb	w3, [x0]	)
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 ENDPROC(__copy_in_user)
 
 	.section .fixup,"ax"
 	.align	2
-9:	sub	x0, x4, x0			// bytes not copied
+9:	sub	x0, x5, x0			// bytes not copied
 	ret
 	.previous
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index a0aeeb9..a257b47 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -15,7 +15,11 @@
  */
 
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 /*
  * Copy to user space from a kernel buffer (alignment handled by the hardware)
@@ -28,14 +32,21 @@
  *	x0 - bytes not copied
  */
 ENTRY(__copy_to_user)
-	add	x4, x0, x2			// upper user buffer boundary
-	subs	x2, x2, #8
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
+	add	x5, x0, x2			// upper user buffer boundary
+	subs	x2, x2, #16
+	b.mi	1f
+0:
+	ldp	x3, x4, [x1], #16
+	subs	x2, x2, #16
+USER(9f, stp	x3, x4, [x0], #16)
+	b.pl	0b
+1:	adds	x2, x2, #8
 	b.mi	2f
-1:
 	ldr	x3, [x1], #8
-	subs	x2, x2, #8
+	sub	x2, x2, #8
 USER(9f, str	x3, [x0], #8	)
-	b.pl	1b
 2:	adds	x2, x2, #4
 	b.mi	3f
 	ldr	w3, [x1], #4
@@ -51,11 +62,13 @@
 	ldrb	w3, [x1]
 USER(9f, strb	w3, [x0]	)
 5:	mov	x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+	    CONFIG_ARM64_PAN)
 	ret
 ENDPROC(__copy_to_user)
 
 	.section .fixup,"ax"
 	.align	2
-9:	sub	x0, x4, x0			// bytes not copied
+9:	sub	x0, x5, x0			// bytes not copied
 	ret
 	.previous
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index bdeb5d3..eb48d5d 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -143,7 +143,12 @@
 	dcache_line_size x2, x3
 	sub	x3, x2, #1
 	bic	x0, x0, x3
-1:	alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE
+1:
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+	dc	cvac, x0
+alternative_else
+	dc	civac, x0
+alternative_endif
 	add	x0, x0, x2
 	cmp	x0, x1
 	b.lo	1b
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 76c1e6c..d70ff14 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -53,8 +53,6 @@
 		__flush_icache_all();
 }
 
-#ifdef CONFIG_SMP
-
 static void set_mm_context(struct mm_struct *mm, unsigned int asid)
 {
 	unsigned long flags;
@@ -110,23 +108,12 @@
 	cpu_switch_mm(mm->pgd, mm);
 }
 
-#else
-
-static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
-{
-	mm->context.id = asid;
-	cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
-}
-
-#endif
-
 void __new_context(struct mm_struct *mm)
 {
 	unsigned int asid;
 	unsigned int bits = asid_bits();
 
 	raw_spin_lock(&cpu_asid_lock);
-#ifdef CONFIG_SMP
 	/*
 	 * Check the ASID again, in case the change was broadcast from another
 	 * CPU before we acquired the lock.
@@ -136,7 +123,6 @@
 		raw_spin_unlock(&cpu_asid_lock);
 		return;
 	}
-#endif
 	/*
 	 * At this point, it is guaranteed that the current mm (with an old
 	 * ASID) isn't active on any other CPU since the ASIDs are changed
@@ -155,10 +141,8 @@
 			cpu_last_asid = ASID_FIRST_VERSION;
 		asid = cpu_last_asid + smp_processor_id();
 		flush_context();
-#ifdef CONFIG_SMP
 		smp_wmb();
 		smp_call_function(reset_context, NULL, 1);
-#endif
 		cpu_last_asid += NR_CPUS - 1;
 	}
 
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d16a1ce..0bcc4bc 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -144,6 +144,7 @@
 	struct page *page;
 	void *ptr, *coherent_ptr;
 	bool coherent = is_device_dma_coherent(dev);
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
 
 	size = PAGE_ALIGN(size);
 
@@ -171,9 +172,7 @@
 	/* create a coherent mapping */
 	page = virt_to_page(ptr);
 	coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
-				__get_dma_pgprot(attrs,
-					__pgprot(PROT_NORMAL_NC), false),
-					NULL);
+						   prot, NULL);
 	if (!coherent_ptr)
 		goto no_map;
 
@@ -303,9 +302,10 @@
 				       sg->length, dir);
 }
 
-/* vma->vm_page_prot must be set appropriately before calling this function */
-static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-			     void *cpu_addr, dma_addr_t dma_addr, size_t size)
+static int __swiotlb_mmap(struct device *dev,
+			  struct vm_area_struct *vma,
+			  void *cpu_addr, dma_addr_t dma_addr, size_t size,
+			  struct dma_attrs *attrs)
 {
 	int ret = -ENXIO;
 	unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
@@ -314,6 +314,9 @@
 	unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
 	unsigned long off = vma->vm_pgoff;
 
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+					     is_device_dma_coherent(dev));
+
 	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
 		return ret;
 
@@ -327,20 +330,24 @@
 	return ret;
 }
 
-static int __swiotlb_mmap(struct device *dev,
-			  struct vm_area_struct *vma,
-			  void *cpu_addr, dma_addr_t dma_addr, size_t size,
-			  struct dma_attrs *attrs)
+static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+				 void *cpu_addr, dma_addr_t handle, size_t size,
+				 struct dma_attrs *attrs)
 {
-	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
-					     is_device_dma_coherent(dev));
-	return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+	int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+
+	if (!ret)
+		sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
+			    PAGE_ALIGN(size), 0);
+
+	return ret;
 }
 
 static struct dma_map_ops swiotlb_dma_ops = {
 	.alloc = __dma_alloc,
 	.free = __dma_free,
 	.mmap = __swiotlb_mmap,
+	.get_sgtable = __swiotlb_get_sgtable,
 	.map_page = __swiotlb_map_page,
 	.unmap_page = __swiotlb_unmap_page,
 	.map_sg = __swiotlb_map_sg_attrs,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 94d98cd..aba9ead 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -30,9 +30,11 @@
 #include <linux/highmem.h>
 #include <linux/perf_event.h>
 
+#include <asm/cpufeature.h>
 #include <asm/exception.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
+#include <asm/sysreg.h>
 #include <asm/system_misc.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -224,6 +226,13 @@
 	}
 
 	/*
+	 * PAN bit set implies the fault happened in kernel space, but not
+	 * in the arch's user access functions.
+	 */
+	if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
+		goto no_context;
+
+	/*
 	 * As per x86, we may deadlock here. However, since the kernel only
 	 * validly references user space from well defined areas of the code,
 	 * we can bug out early if this is from code which shouldn't.
@@ -492,14 +501,22 @@
 	arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
 }
 
-static struct fault_info debug_fault_info[] = {
+int __init early_brk64(unsigned long addr, unsigned int esr,
+		       struct pt_regs *regs);
+
+/*
+ * __refdata because early_brk64 is __init, but the reference to it is
+ * clobbered at arch_initcall time.
+ * See traps.c and debug-monitors.c:debug_traps_init().
+ */
+static struct fault_info __refdata debug_fault_info[] = {
 	{ do_bad,	SIGTRAP,	TRAP_HWBKPT,	"hardware breakpoint"	},
 	{ do_bad,	SIGTRAP,	TRAP_HWBKPT,	"hardware single-step"	},
 	{ do_bad,	SIGTRAP,	TRAP_HWBKPT,	"hardware watchpoint"	},
 	{ do_bad,	SIGBUS,		0,		"unknown 3"		},
 	{ do_bad,	SIGTRAP,	TRAP_BRKPT,	"aarch32 BKPT"		},
 	{ do_bad,	SIGTRAP,	0,		"aarch32 vector catch"	},
-	{ do_bad,	SIGTRAP,	TRAP_BRKPT,	"aarch64 BRK"		},
+	{ early_brk64,	SIGTRAP,	TRAP_BRKPT,	"aarch64 BRK"		},
 	{ do_bad,	SIGBUS,		0,		"unknown 7"		},
 };
 
@@ -536,3 +553,10 @@
 
 	return 0;
 }
+
+#ifdef CONFIG_ARM64_PAN
+void cpu_enable_pan(void)
+{
+	config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+}
+#endif /* CONFIG_ARM64_PAN */
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 4dfa397..c26b804 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -60,14 +60,10 @@
 		       unsigned long uaddr, void *dst, const void *src,
 		       unsigned long len)
 {
-#ifdef CONFIG_SMP
 	preempt_disable();
-#endif
 	memcpy(dst, src, len);
 	flush_ptrace_access(vma, page, uaddr, dst, len);
-#ifdef CONFIG_SMP
 	preempt_enable();
-#endif
 }
 
 void __sync_icache_dcache(pte_t pte, unsigned long addr)
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 831ec53..383b03f 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -13,10 +13,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  */
 
 #include <linux/init.h>
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index ad87ce8..f5c0680 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -358,9 +358,9 @@
 
 #ifdef CONFIG_BLK_DEV_INITRD
 
-static int keep_initrd;
+static int keep_initrd __initdata;
 
-void free_initrd_mem(unsigned long start, unsigned long end)
+void __init free_initrd_mem(unsigned long start, unsigned long end)
 {
 	if (!keep_initrd)
 		free_reserved_area((void *)start, (void *)end, 0, "initrd");
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a4ede4e..9211b85 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -267,7 +267,7 @@
 	return ptr;
 }
 
-static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
+static void __init create_mapping(phys_addr_t phys, unsigned long virt,
 				  phys_addr_t size, pgprot_t prot)
 {
 	if (virt < VMALLOC_START) {
@@ -461,17 +461,6 @@
 }
 
 /*
- * Enable the identity mapping to allow the MMU disabling.
- */
-void setup_mm_for_reboot(void)
-{
-	cpu_set_reserved_ttbr0();
-	flush_tlb_all();
-	cpu_set_idmap_tcr_t0sz();
-	cpu_switch_mm(idmap_pg_dir, &init_mm);
-}
-
-/*
  * Check whether a kernel address is valid (derived from arch/x86/).
  */
 int kern_addr_valid(unsigned long addr)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 39139a3..e4ee7bd 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -34,11 +34,7 @@
 #define TCR_TG_FLAGS	TCR_TG0_4K | TCR_TG1_4K
 #endif
 
-#ifdef CONFIG_SMP
 #define TCR_SMP_FLAGS	TCR_SHARED
-#else
-#define TCR_SMP_FLAGS	0
-#endif
 
 /* PTWs cacheable, inner/outer WBWA */
 #define TCR_CACHE_FLAGS	TCR_IRGN_WBWA | TCR_ORGN_WBWA
@@ -150,13 +146,13 @@
  *	value of the SCTLR_EL1 register.
  */
 ENTRY(__cpu_setup)
-	ic	iallu				// I+BTB cache invalidate
 	tlbi	vmalle1is			// invalidate I + D TLBs
 	dsb	ish
 
 	mov	x0, #3 << 20
 	msr	cpacr_el1, x0			// Enable FP/ASIMD
-	msr	mdscr_el1, xzr			// Reset mdscr_el1
+	mov	x0, #1 << 12			// Reset mdscr_el1 and disable
+	msr	mdscr_el1, x0			// access to the DCC from EL0
 	/*
 	 * Memory region attributes for LPAE:
 	 *
@@ -196,6 +192,19 @@
 	 */
 	mrs	x9, ID_AA64MMFR0_EL1
 	bfi	x10, x9, #32, #3
+#ifdef CONFIG_ARM64_HW_AFDBM
+	/*
+	 * Hardware update of the Access and Dirty bits.
+	 */
+	mrs	x9, ID_AA64MMFR1_EL1
+	and	x9, x9, #0xf
+	cbz	x9, 2f
+	cmp	x9, #2
+	b.lt	1f
+	orr	x10, x10, #TCR_HD		// hardware Dirty flag update
+1:	orr	x10, x10, #TCR_HA		// hardware Access flag update
+2:
+#endif	/* CONFIG_ARM64_HW_AFDBM */
 	msr	tcr_el1, x10
 	ret					// return to head.S
 ENDPROC(__cpu_setup)
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index 2d07ce1..97c9bdf 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -44,6 +44,18 @@
 ATOMIC_OP_RETURN(sub, sub, rKs21)
 ATOMIC_OP_RETURN(add, add, r)
 
+#define ATOMIC_OP(op, asm_op)						\
+ATOMIC_OP_RETURN(op, asm_op, r)						\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	(void)__atomic_##op##_return(i, v);				\
+}
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(or, or)
+ATOMIC_OP(xor, eor)
+
+#undef ATOMIC_OP
 #undef ATOMIC_OP_RETURN
 
 /*
diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h
index e998ff5..f855646 100644
--- a/arch/avr32/include/asm/io.h
+++ b/arch/avr32/include/asm/io.h
@@ -297,6 +297,7 @@
 
 #define ioremap_wc ioremap_nocache
 #define ioremap_wt ioremap_nocache
+#define ioremap_uc ioremap_nocache
 
 #define cached(addr) P1SEGADDR(addr)
 #define uncached(addr) P2SEGADDR(addr)
diff --git a/arch/avr32/include/asm/switch_to.h b/arch/avr32/include/asm/switch_to.h
index 9a8e9d5..6f00581 100644
--- a/arch/avr32/include/asm/switch_to.h
+++ b/arch/avr32/include/asm/switch_to.h
@@ -15,11 +15,13 @@
  */
 #ifdef CONFIG_OWNERSHIP_TRACE
 #include <asm/ocd.h>
-#define finish_arch_switch(prev)			\
+#define ocd_switch(prev, next)				\
 	do {						\
 		ocd_write(PID, prev->pid);		\
-		ocd_write(PID, current->pid);		\
+		ocd_write(PID, next->pid);		\
 	} while(0)
+#else
+#define ocd_switch(prev, next)
 #endif
 
 /*
@@ -38,6 +40,7 @@
 				       struct cpu_context *);
 #define switch_to(prev, next, last)					\
 	do {								\
+		ocd_switch(prev, next);					\
 		last = __switch_to(prev, &prev->thread.cpu_context + 1,	\
 				   &next->thread.cpu_context);		\
 	} while (0)
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c
index 2d48b6a..d51ff8f 100644
--- a/arch/avr32/mach-at32ap/extint.c
+++ b/arch/avr32/mach-at32ap/extint.c
@@ -128,9 +128,9 @@
 
 	irqd_set_trigger_type(d, flow_type);
 	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else
-		__irq_set_handler_locked(irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return IRQ_SET_MASK_OK_NOCOPY;
 }
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index 903c7d8..157a5e0 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -286,7 +286,7 @@
 	struct pio_device	*pio = irq_desc_get_chip_data(desc);
 	unsigned		gpio_irq;
 
-	gpio_irq = (unsigned) irq_get_handler_data(irq);
+	gpio_irq = (unsigned) irq_desc_get_handler_data(desc);
 	for (;;) {
 		u32		isr;
 
@@ -312,7 +312,6 @@
 	unsigned	i;
 
 	irq_set_chip_data(irq, pio);
-	irq_set_handler_data(irq, (void *)gpio_irq);
 
 	for (i = 0; i < 32; i++, gpio_irq++) {
 		irq_set_chip_data(gpio_irq, pio);
@@ -320,7 +319,8 @@
 					 handle_simple_irq);
 	}
 
-	irq_set_chained_handler(irq, gpio_irq_handler);
+	irq_set_chained_handler_and_data(irq, gpio_irq_handler,
+					 (void *)gpio_irq);
 }
 
 /*--------------------------------------------------------------------------*/
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index a107a98..1c1c423 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -16,19 +16,21 @@
 #include <linux/types.h>
 
 asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
-asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value);
-asmlinkage int __raw_atomic_clear_asm(volatile int *ptr, int value);
-asmlinkage int __raw_atomic_set_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
+
+asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
 asmlinkage int __raw_atomic_xor_asm(volatile int *ptr, int value);
 asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
 
 #define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
 
-#define atomic_add_return(i, v) __raw_atomic_update_asm(&(v)->counter, i)
-#define atomic_sub_return(i, v) __raw_atomic_update_asm(&(v)->counter, -(i))
+#define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
+#define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
 
-#define atomic_clear_mask(m, v) __raw_atomic_clear_asm(&(v)->counter, m)
-#define atomic_set_mask(m, v)   __raw_atomic_set_asm(&(v)->counter, m)
+#define atomic_or(i, v)  (void)__raw_atomic_or_asm(&(v)->counter, i)
+#define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
+#define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
 
 #endif
 
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index c446591..a401c27 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -83,11 +83,12 @@
 EXPORT_SYMBOL(insl_16);
 
 #ifdef CONFIG_SMP
-EXPORT_SYMBOL(__raw_atomic_update_asm);
-EXPORT_SYMBOL(__raw_atomic_clear_asm);
-EXPORT_SYMBOL(__raw_atomic_set_asm);
+EXPORT_SYMBOL(__raw_atomic_add_asm);
+EXPORT_SYMBOL(__raw_atomic_and_asm);
+EXPORT_SYMBOL(__raw_atomic_or_asm);
 EXPORT_SYMBOL(__raw_atomic_xor_asm);
 EXPORT_SYMBOL(__raw_atomic_test_asm);
+
 EXPORT_SYMBOL(__raw_xchg_1_asm);
 EXPORT_SYMBOL(__raw_xchg_2_asm);
 EXPORT_SYMBOL(__raw_xchg_4_asm);
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index cb0a484..fb9e95f 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -136,44 +136,44 @@
 	return 0;
 }
 
-static void bfin_gptmr0_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int bfin_gptmr0_set_periodic(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC: {
 #ifndef CONFIG_BF60x
-		set_gptimer_config(TIMER0_id, \
-			TIMER_OUT_DIS | TIMER_IRQ_ENA | \
-			TIMER_PERIOD_CNT | TIMER_MODE_PWM);
+	set_gptimer_config(TIMER0_id,
+			   TIMER_OUT_DIS | TIMER_IRQ_ENA |
+			   TIMER_PERIOD_CNT | TIMER_MODE_PWM);
 #else
-		set_gptimer_config(TIMER0_id,  TIMER_OUT_DIS
-			| TIMER_MODE_PWM_CONT | TIMER_PULSE_HI | TIMER_IRQ_PER);
+	set_gptimer_config(TIMER0_id,
+			   TIMER_OUT_DIS | TIMER_MODE_PWM_CONT |
+			   TIMER_PULSE_HI | TIMER_IRQ_PER);
 #endif
 
-		set_gptimer_period(TIMER0_id, get_sclk() / HZ);
-		set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1);
-		enable_gptimers(TIMER0bit);
-		break;
-	}
-	case CLOCK_EVT_MODE_ONESHOT:
-		disable_gptimers(TIMER0bit);
+	set_gptimer_period(TIMER0_id, get_sclk() / HZ);
+	set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1);
+	enable_gptimers(TIMER0bit);
+	return 0;
+}
+
+static int bfin_gptmr0_set_oneshot(struct clock_event_device *evt)
+{
+	disable_gptimers(TIMER0bit);
 #ifndef CONFIG_BF60x
-		set_gptimer_config(TIMER0_id, \
-			TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM);
+	set_gptimer_config(TIMER0_id,
+			   TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM);
 #else
-		set_gptimer_config(TIMER0_id, TIMER_OUT_DIS | TIMER_MODE_PWM
-			| TIMER_PULSE_HI | TIMER_IRQ_WID_DLY);
+	set_gptimer_config(TIMER0_id,
+			   TIMER_OUT_DIS | TIMER_MODE_PWM | TIMER_PULSE_HI |
+			   TIMER_IRQ_WID_DLY);
 #endif
 
-		set_gptimer_period(TIMER0_id, 0);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		disable_gptimers(TIMER0bit);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	set_gptimer_period(TIMER0_id, 0);
+	return 0;
+}
+
+static int bfin_gptmr0_shutdown(struct clock_event_device *evt)
+{
+	disable_gptimers(TIMER0bit);
+	return 0;
 }
 
 static void bfin_gptmr0_ack(void)
@@ -211,13 +211,16 @@
 };
 
 static struct clock_event_device clockevent_gptmr0 = {
-	.name		= "bfin_gptimer0",
-	.rating		= 300,
-	.irq		= IRQ_TIMER0,
-	.shift		= 32,
-	.features 	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_next_event = bfin_gptmr0_set_next_event,
-	.set_mode	= bfin_gptmr0_set_mode,
+	.name			= "bfin_gptimer0",
+	.rating			= 300,
+	.irq			= IRQ_TIMER0,
+	.shift			= 32,
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_next_event		= bfin_gptmr0_set_next_event,
+	.set_state_shutdown	= bfin_gptmr0_shutdown,
+	.set_state_periodic	= bfin_gptmr0_set_periodic,
+	.set_state_oneshot	= bfin_gptmr0_set_oneshot,
 };
 
 static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt)
@@ -250,36 +253,35 @@
 	return 0;
 }
 
-static void bfin_coretmr_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int bfin_coretmr_set_periodic(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC: {
-		unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
-		bfin_write_TCNTL(TMPWR);
-		CSYNC();
-		bfin_write_TSCALE(TIME_SCALE - 1);
-		bfin_write_TPERIOD(tcount);
-		bfin_write_TCOUNT(tcount);
-		CSYNC();
-		bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
-		break;
-	}
-	case CLOCK_EVT_MODE_ONESHOT:
-		bfin_write_TCNTL(TMPWR);
-		CSYNC();
-		bfin_write_TSCALE(TIME_SCALE - 1);
-		bfin_write_TPERIOD(0);
-		bfin_write_TCOUNT(0);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		bfin_write_TCNTL(0);
-		CSYNC();
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
+
+	bfin_write_TCNTL(TMPWR);
+	CSYNC();
+	bfin_write_TSCALE(TIME_SCALE - 1);
+	bfin_write_TPERIOD(tcount);
+	bfin_write_TCOUNT(tcount);
+	CSYNC();
+	bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
+	return 0;
+}
+
+static int bfin_coretmr_set_oneshot(struct clock_event_device *evt)
+{
+	bfin_write_TCNTL(TMPWR);
+	CSYNC();
+	bfin_write_TSCALE(TIME_SCALE - 1);
+	bfin_write_TPERIOD(0);
+	bfin_write_TCOUNT(0);
+	return 0;
+}
+
+static int bfin_coretmr_shutdown(struct clock_event_device *evt)
+{
+	bfin_write_TCNTL(0);
+	CSYNC();
+	return 0;
 }
 
 void bfin_coretmr_init(void)
@@ -335,7 +337,9 @@
 	evt->shift = 32;
 	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
 	evt->set_next_event = bfin_coretmr_set_next_event;
-	evt->set_mode = bfin_coretmr_set_mode;
+	evt->set_state_shutdown = bfin_coretmr_shutdown;
+	evt->set_state_periodic = bfin_coretmr_set_periodic;
+	evt->set_state_oneshot = bfin_coretmr_set_oneshot;
 
 	clock_tick = get_cclk() / TIME_SCALE;
 	evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
diff --git a/arch/blackfin/mach-bf537/ints-priority.c b/arch/blackfin/mach-bf537/ints-priority.c
index 2137a20..14b2f74 100644
--- a/arch/blackfin/mach-bf537/ints-priority.c
+++ b/arch/blackfin/mach-bf537/ints-priority.c
@@ -182,9 +182,11 @@
 	.irq_unmask = bf537_mac_rx_unmask_irq,
 };
 
-static void bf537_demux_mac_rx_irq(unsigned int int_irq,
+static void bf537_demux_mac_rx_irq(unsigned int __int_irq,
 				   struct irq_desc *desc)
 {
+	unsigned int int_irq = irq_desc_get_irq(desc);
+
 	if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR))
 		bfin_handle_irq(IRQ_MAC_RX);
 	else
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index 2a08df8..26fccb5 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -587,10 +587,10 @@
  * r0 = ptr
  * r1 = value
  *
- * Add a signed value to a 32bit word and return the new value atomically.
+ * ADD a signed value to a 32bit word and return the new value atomically.
  * Clobbers: r3:0, p1:0
  */
-ENTRY(___raw_atomic_update_asm)
+ENTRY(___raw_atomic_add_asm)
 	p1 = r0;
 	r3 = r1;
 	[--sp] = rets;
@@ -603,19 +603,19 @@
 	r0 = r3;
 	rets = [sp++];
 	rts;
-ENDPROC(___raw_atomic_update_asm)
+ENDPROC(___raw_atomic_add_asm)
 
 /*
  * r0 = ptr
  * r1 = mask
  *
- * Clear the mask bits from a 32bit word and return the old 32bit value
+ * AND the mask bits from a 32bit word and return the old 32bit value
  * atomically.
  * Clobbers: r3:0, p1:0
  */
-ENTRY(___raw_atomic_clear_asm)
+ENTRY(___raw_atomic_and_asm)
 	p1 = r0;
-	r3 = ~r1;
+	r3 = r1;
 	[--sp] = rets;
 	call _get_core_lock;
 	r2 = [p1];
@@ -627,17 +627,17 @@
 	r0 = r3;
 	rets = [sp++];
 	rts;
-ENDPROC(___raw_atomic_clear_asm)
+ENDPROC(___raw_atomic_and_asm)
 
 /*
  * r0 = ptr
  * r1 = mask
  *
- * Set the mask bits into a 32bit word and return the old 32bit value
+ * OR the mask bits into a 32bit word and return the old 32bit value
  * atomically.
  * Clobbers: r3:0, p1:0
  */
-ENTRY(___raw_atomic_set_asm)
+ENTRY(___raw_atomic_or_asm)
 	p1 = r0;
 	r3 = r1;
 	[--sp] = rets;
@@ -651,7 +651,7 @@
 	r0 = r3;
 	rets = [sp++];
 	rts;
-ENDPROC(___raw_atomic_set_asm)
+ENDPROC(___raw_atomic_or_asm)
 
 /*
  * r0 = ptr
@@ -787,7 +787,7 @@
 	r2 = r1;
 	r1 = 1;
 	r1 <<= r2;
-	jump ___raw_atomic_set_asm
+	jump ___raw_atomic_or_asm
 ENDPROC(___raw_bit_set_asm)
 
 /*
@@ -798,10 +798,10 @@
  * Clobbers: r3:0, p1:0
  */
 ENTRY(___raw_bit_clear_asm)
-	r2 = r1;
-	r1 = 1;
-	r1 <<= r2;
-	jump ___raw_atomic_clear_asm
+	r2 = 1;
+	r2 <<= r1;
+	r1 = ~r2;
+	jump ___raw_atomic_and_asm
 ENDPROC(___raw_bit_clear_asm)
 
 /*
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 7236bdf..a6d1b03 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -194,7 +194,8 @@
 #ifdef CONFIG_SMP
 static void bfin_internal_unmask_irq_chip(struct irq_data *d)
 {
-	bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
+	bfin_internal_unmask_irq_affinity(d->irq,
+					  irq_data_get_affinity_mask(d));
 }
 
 static int bfin_internal_set_affinity(struct irq_data *d,
@@ -685,12 +686,12 @@
 }
 #endif
 
-static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
+static inline void bfin_set_irq_handler(struct irq_data *d, irq_flow_handler_t handle)
 {
 #ifdef CONFIG_IPIPE
 	handle = handle_level_irq;
 #endif
-	__irq_set_handler_locked(irq, handle);
+	irq_set_handler_locked(d, handle);
 }
 
 #ifdef CONFIG_GPIO_ADI
@@ -802,9 +803,9 @@
 	}
 
 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
-		bfin_set_irq_handler(irq, handle_edge_irq);
+		bfin_set_irq_handler(d, handle_edge_irq);
 	else
-		bfin_set_irq_handler(irq, handle_level_irq);
+		bfin_set_irq_handler(d, handle_level_irq);
 
 	return 0;
 }
@@ -824,9 +825,9 @@
 	}
 }
 
-void bfin_demux_gpio_irq(unsigned int inta_irq,
-			struct irq_desc *desc)
+void bfin_demux_gpio_irq(unsigned int __inta_irq, struct irq_desc *desc)
 {
+	unsigned int inta_irq = irq_desc_get_irq(desc);
 	unsigned int irq;
 
 	switch (inta_irq) {
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 1c72595..0030e21 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -195,7 +195,7 @@
 	local_irq_save(flags);
 	for_each_cpu(cpu, cpumask) {
 		bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
-		atomic_set_mask((1 << msg), &bfin_ipi_data->bits);
+		atomic_or((1 << msg), &bfin_ipi_data->bits);
 		atomic_inc(&bfin_ipi_data->count);
 	}
 	local_irq_restore(flags);
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c
index 74e3371..d487698 100644
--- a/arch/c6x/platforms/megamod-pic.c
+++ b/arch/c6x/platforms/megamod-pic.c
@@ -93,10 +93,11 @@
 	.irq_unmask	= unmask_megamod,
 };
 
-static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc)
+static void megamod_irq_cascade(unsigned int __irq, struct irq_desc *desc)
 {
 	struct megamod_cascade_data *cascade;
 	struct megamod_pic *pic;
+	unsigned int irq;
 	u32 events;
 	int n, idx;
 
@@ -282,8 +283,8 @@
 		soc_writel(~0, &pic->regs->evtmask[i]);
 		soc_writel(~0, &pic->regs->evtclr[i]);
 
-		irq_set_handler_data(irq, &cascade_data[i]);
-		irq_set_chained_handler(irq, megamod_irq_cascade);
+		irq_set_chained_handler_and_data(irq, megamod_irq_cascade,
+						 &cascade_data[i]);
 	}
 
 	/* Finally, set up the MUX registers */
diff --git a/arch/c6x/platforms/timer64.c b/arch/c6x/platforms/timer64.c
index 3c73d74..c19901e 100644
--- a/arch/c6x/platforms/timer64.c
+++ b/arch/c6x/platforms/timer64.c
@@ -126,35 +126,37 @@
 	return 0;
 }
 
-static void set_clock_mode(enum clock_event_mode mode,
-			   struct clock_event_device *evt)
+static int set_periodic(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		timer64_enable();
-		timer64_mode = TIMER64_MODE_PERIODIC;
-		timer64_config(TIMER64_RATE / HZ);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		timer64_enable();
-		timer64_mode = TIMER64_MODE_ONE_SHOT;
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		timer64_mode = TIMER64_MODE_DISABLED;
-		timer64_disable();
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	timer64_enable();
+	timer64_mode = TIMER64_MODE_PERIODIC;
+	timer64_config(TIMER64_RATE / HZ);
+	return 0;
+}
+
+static int set_oneshot(struct clock_event_device *evt)
+{
+	timer64_enable();
+	timer64_mode = TIMER64_MODE_ONE_SHOT;
+	return 0;
+}
+
+static int shutdown(struct clock_event_device *evt)
+{
+	timer64_mode = TIMER64_MODE_DISABLED;
+	timer64_disable();
+	return 0;
 }
 
 static struct clock_event_device t64_clockevent_device = {
-	.name		= "TIMER64_EVT32_TIMER",
-	.features	= CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
-	.rating		= 200,
-	.set_mode	= set_clock_mode,
-	.set_next_event	= next_event,
+	.name			= "TIMER64_EVT32_TIMER",
+	.features		= CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_PERIODIC,
+	.rating			= 200,
+	.set_state_shutdown	= shutdown,
+	.set_state_periodic	= set_periodic,
+	.set_state_oneshot	= set_oneshot,
+	.set_next_event		= next_event,
 };
 
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index 4fce9f1..d2a8440 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -172,8 +172,7 @@
 extern void cris_profile_sample(struct pt_regs *regs);
 static void __iomem *timer_base;
 
-static void crisv32_clkevt_mode(enum clock_event_mode mode,
-				struct clock_event_device *dev)
+static int crisv32_clkevt_switch_state(struct clock_event_device *dev)
 {
 	reg_timer_rw_tmr0_ctrl ctrl = {
 		.op = regk_timer_hold,
@@ -181,6 +180,7 @@
 	};
 
 	REG_WR(timer, timer_base, rw_tmr0_ctrl, ctrl);
+	return 0;
 }
 
 static int crisv32_clkevt_next_event(unsigned long evt,
@@ -231,7 +231,9 @@
 	.name = "crisv32-timer",
 	.rating = 300,
 	.features = CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode = crisv32_clkevt_mode,
+	.set_state_oneshot = crisv32_clkevt_switch_state,
+	.set_state_shutdown = crisv32_clkevt_switch_state,
+	.tick_resume = crisv32_clkevt_switch_state,
 	.set_next_event = crisv32_clkevt_next_event,
 };
 
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 102190a..0da689d 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -15,7 +15,6 @@
 #define _ASM_ATOMIC_H
 
 #include <linux/types.h>
-#include <asm/spr-regs.h>
 #include <asm/cmpxchg.h>
 #include <asm/barrier.h>
 
@@ -23,6 +22,8 @@
 #error not SMP safe
 #endif
 
+#include <asm/atomic_defs.h>
+
 /*
  * Atomic operations that C can't guarantee us.  Useful for
  * resource counting etc..
@@ -34,56 +35,26 @@
 #define atomic_read(v)		ACCESS_ONCE((v)->counter)
 #define atomic_set(v, i)	(((v)->counter) = (i))
 
-#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+static inline int atomic_inc_return(atomic_t *v)
+{
+	return __atomic_add_return(1, &v->counter);
+}
+
+static inline int atomic_dec_return(atomic_t *v)
+{
+	return __atomic_sub_return(1, &v->counter);
+}
+
 static inline int atomic_add_return(int i, atomic_t *v)
 {
-	unsigned long val;
-
-	asm("0:						\n"
-	    "	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-	    "	ckeq		icc3,cc7		\n"
-	    "	ld.p		%M0,%1			\n"	/* LD.P/ORCR must be atomic */
-	    "	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-	    "	add%I2		%1,%2,%1		\n"
-	    "	cst.p		%1,%M0		,cc3,#1	\n"
-	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* clear ICC3.Z if store happens */
-	    "	beq		icc3,#0,0b		\n"
-	    : "+U"(v->counter), "=&r"(val)
-	    : "NPr"(i)
-	    : "memory", "cc7", "cc3", "icc3"
-	    );
-
-	return val;
+	return __atomic_add_return(i, &v->counter);
 }
 
 static inline int atomic_sub_return(int i, atomic_t *v)
 {
-	unsigned long val;
-
-	asm("0:						\n"
-	    "	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-	    "	ckeq		icc3,cc7		\n"
-	    "	ld.p		%M0,%1			\n"	/* LD.P/ORCR must be atomic */
-	    "	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-	    "	sub%I2		%1,%2,%1		\n"
-	    "	cst.p		%1,%M0		,cc3,#1	\n"
-	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* clear ICC3.Z if store happens */
-	    "	beq		icc3,#0,0b		\n"
-	    : "+U"(v->counter), "=&r"(val)
-	    : "NPr"(i)
-	    : "memory", "cc7", "cc3", "icc3"
-	    );
-
-	return val;
+	return __atomic_sub_return(i, &v->counter);
 }
 
-#else
-
-extern int atomic_add_return(int i, atomic_t *v);
-extern int atomic_sub_return(int i, atomic_t *v);
-
-#endif
-
 static inline int atomic_add_negative(int i, atomic_t *v)
 {
 	return atomic_add_return(i, v) < 0;
@@ -101,17 +72,14 @@
 
 static inline void atomic_inc(atomic_t *v)
 {
-	atomic_add_return(1, v);
+	atomic_inc_return(v);
 }
 
 static inline void atomic_dec(atomic_t *v)
 {
-	atomic_sub_return(1, v);
+	atomic_dec_return(v);
 }
 
-#define atomic_dec_return(v)		atomic_sub_return(1, (v))
-#define atomic_inc_return(v)		atomic_add_return(1, (v))
-
 #define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
 #define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
 #define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
@@ -120,18 +88,19 @@
  * 64-bit atomic ops
  */
 typedef struct {
-	volatile long long counter;
+	long long counter;
 } atomic64_t;
 
 #define ATOMIC64_INIT(i)	{ (i) }
 
-static inline long long atomic64_read(atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
 {
 	long long counter;
 
 	asm("ldd%I1 %M1,%0"
 	    : "=e"(counter)
 	    : "m"(v->counter));
+
 	return counter;
 }
 
@@ -142,10 +111,25 @@
 		     : "e"(i));
 }
 
-extern long long atomic64_inc_return(atomic64_t *v);
-extern long long atomic64_dec_return(atomic64_t *v);
-extern long long atomic64_add_return(long long i, atomic64_t *v);
-extern long long atomic64_sub_return(long long i, atomic64_t *v);
+static inline long long atomic64_inc_return(atomic64_t *v)
+{
+	return __atomic64_add_return(1, &v->counter);
+}
+
+static inline long long atomic64_dec_return(atomic64_t *v)
+{
+	return __atomic64_sub_return(1, &v->counter);
+}
+
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
+{
+	return __atomic64_add_return(i, &v->counter);
+}
+
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
+{
+	return __atomic64_sub_return(i, &v->counter);
+}
 
 static inline long long atomic64_add_negative(long long i, atomic64_t *v)
 {
@@ -176,6 +160,7 @@
 #define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
 #define atomic64_inc_and_test(v)	(atomic64_inc_return((v)) == 0)
 
+
 #define atomic_cmpxchg(v, old, new)	(cmpxchg(&(v)->counter, old, new))
 #define atomic_xchg(v, new)		(xchg(&(v)->counter, new))
 #define atomic64_cmpxchg(v, old, new)	(__cmpxchg_64(old, new, &(v)->counter))
@@ -196,5 +181,21 @@
 	return c;
 }
 
+#define ATOMIC_OP(op)							\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	(void)__atomic32_fetch_##op(i, &v->counter);			\
+}									\
+									\
+static inline void atomic64_##op(long long i, atomic64_t *v)		\
+{									\
+	(void)__atomic64_fetch_##op(i, &v->counter);			\
+}
+
+ATOMIC_OP(or)
+ATOMIC_OP(and)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
 
 #endif /* _ASM_ATOMIC_H */
diff --git a/arch/frv/include/asm/atomic_defs.h b/arch/frv/include/asm/atomic_defs.h
new file mode 100644
index 0000000..36e126d
--- /dev/null
+++ b/arch/frv/include/asm/atomic_defs.h
@@ -0,0 +1,172 @@
+
+#include <asm/spr-regs.h>
+
+#ifdef __ATOMIC_LIB__
+
+#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+
+#define ATOMIC_QUALS
+#define ATOMIC_EXPORT(x)	EXPORT_SYMBOL(x)
+
+#else /* !OUTOFLINE && LIB */
+
+#define ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op)
+
+#endif /* OUTOFLINE */
+
+#else /* !__ATOMIC_LIB__ */
+
+#define ATOMIC_EXPORT(x)
+
+#ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+
+#define ATOMIC_OP_RETURN(op)						\
+extern int __atomic_##op##_return(int i, int *v);			\
+extern long long __atomic64_##op##_return(long long i, long long *v);
+
+#define ATOMIC_FETCH_OP(op)						\
+extern int __atomic32_fetch_##op(int i, int *v);			\
+extern long long __atomic64_fetch_##op(long long i, long long *v);
+
+#else /* !OUTOFLINE && !LIB */
+
+#define ATOMIC_QUALS	static inline
+
+#endif /* OUTOFLINE */
+#endif /* __ATOMIC_LIB__ */
+
+
+/*
+ * Note on the 64 bit inline asm variants...
+ *
+ * CSTD is a conditional instruction and needs a constrained memory reference.
+ * Normally 'U' provides the correct constraints for conditional instructions
+ * and this is used for the 32 bit version, however 'U' does not appear to work
+ * for 64 bit values (gcc-4.9)
+ *
+ * The exact constraint is that conditional instructions cannot deal with an
+ * immediate displacement in the memory reference, so what we do is we read the
+ * address through a volatile cast into a local variable in order to insure we
+ * _have_ to compute the correct address without displacement. This allows us
+ * to use the regular 'm' for the memory address.
+ *
+ * Furthermore, the %Ln operand, which prints the low word register (r+1),
+ * really only works for registers, this means we cannot allow immediate values
+ * for the 64 bit versions -- like we do for the 32 bit ones.
+ *
+ */
+
+#ifndef ATOMIC_OP_RETURN
+#define ATOMIC_OP_RETURN(op)						\
+ATOMIC_QUALS int __atomic_##op##_return(int i, int *v)			\
+{									\
+	int val;							\
+									\
+	asm volatile(							\
+	    "0:						\n"		\
+	    "	orcc		gr0,gr0,gr0,icc3	\n"		\
+	    "	ckeq		icc3,cc7		\n"		\
+	    "	ld.p		%M0,%1			\n"		\
+	    "	orcr		cc7,cc7,cc3		\n"		\
+	    "   "#op"%I2	%1,%2,%1		\n"		\
+	    "	cst.p		%1,%M0		,cc3,#1	\n"		\
+	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"		\
+	    "	beq		icc3,#0,0b		\n"		\
+	    : "+U"(*v), "=&r"(val)					\
+	    : "NPr"(i)							\
+	    : "memory", "cc7", "cc3", "icc3"				\
+	    );								\
+									\
+	return val;							\
+}									\
+ATOMIC_EXPORT(__atomic_##op##_return);					\
+									\
+ATOMIC_QUALS long long __atomic64_##op##_return(long long i, long long *v)	\
+{									\
+	long long *__v = READ_ONCE(v);					\
+	long long val;							\
+									\
+	asm volatile(							\
+	    "0:						\n"		\
+	    "	orcc		gr0,gr0,gr0,icc3	\n"		\
+	    "	ckeq		icc3,cc7		\n"		\
+	    "	ldd.p		%M0,%1			\n"		\
+	    "	orcr		cc7,cc7,cc3		\n"		\
+	    "   "#op"cc		%L1,%L2,%L1,icc0	\n"		\
+	    "   "#op"x		%1,%2,%1,icc0		\n"		\
+	    "	cstd.p		%1,%M0		,cc3,#1	\n"		\
+	    "	corcc		gr29,gr29,gr0	,cc3,#1	\n"		\
+	    "	beq		icc3,#0,0b		\n"		\
+	    : "+m"(*__v), "=&e"(val)					\
+	    : "e"(i)							\
+	    : "memory", "cc7", "cc3", "icc0", "icc3"			\
+	    );								\
+									\
+	return val;							\
+}									\
+ATOMIC_EXPORT(__atomic64_##op##_return);
+#endif
+
+#ifndef ATOMIC_FETCH_OP
+#define ATOMIC_FETCH_OP(op)						\
+ATOMIC_QUALS int __atomic32_fetch_##op(int i, int *v)			\
+{									\
+	int old, tmp;							\
+									\
+	asm volatile(							\
+		"0:						\n"	\
+		"	orcc		gr0,gr0,gr0,icc3	\n"	\
+		"	ckeq		icc3,cc7		\n"	\
+		"	ld.p		%M0,%1			\n"	\
+		"	orcr		cc7,cc7,cc3		\n"	\
+		"	"#op"%I3	%1,%3,%2		\n"	\
+		"	cst.p		%2,%M0		,cc3,#1	\n"	\
+		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	\
+		"	beq		icc3,#0,0b		\n"	\
+		: "+U"(*v), "=&r"(old), "=r"(tmp)			\
+		: "NPr"(i)						\
+		: "memory", "cc7", "cc3", "icc3"			\
+		);							\
+									\
+	return old;							\
+}									\
+ATOMIC_EXPORT(__atomic32_fetch_##op);					\
+									\
+ATOMIC_QUALS long long __atomic64_fetch_##op(long long i, long long *v)	\
+{									\
+	long long *__v = READ_ONCE(v);					\
+	long long old, tmp;						\
+									\
+	asm volatile(							\
+		"0:						\n"	\
+		"	orcc		gr0,gr0,gr0,icc3	\n"	\
+		"	ckeq		icc3,cc7		\n"	\
+		"	ldd.p		%M0,%1			\n"	\
+		"	orcr		cc7,cc7,cc3		\n"	\
+		"	"#op"		%L1,%L3,%L2		\n"	\
+		"	"#op"		%1,%3,%2		\n"	\
+		"	cstd.p		%2,%M0		,cc3,#1	\n"	\
+		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	\
+		"	beq		icc3,#0,0b		\n"	\
+		: "+m"(*__v), "=&e"(old), "=e"(tmp)			\
+		: "e"(i)						\
+		: "memory", "cc7", "cc3", "icc3"			\
+		);							\
+									\
+	return old;							\
+}									\
+ATOMIC_EXPORT(__atomic64_fetch_##op);
+#endif
+
+ATOMIC_FETCH_OP(or)
+ATOMIC_FETCH_OP(and)
+ATOMIC_FETCH_OP(xor)
+
+ATOMIC_OP_RETURN(add)
+ATOMIC_OP_RETURN(sub)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_QUALS
+#undef ATOMIC_EXPORT
diff --git a/arch/frv/include/asm/bitops.h b/arch/frv/include/asm/bitops.h
index 96de220..0df8e95 100644
--- a/arch/frv/include/asm/bitops.h
+++ b/arch/frv/include/asm/bitops.h
@@ -25,109 +25,30 @@
 
 #include <asm-generic/bitops/ffz.h>
 
-#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
-static inline
-unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v)
-{
-	unsigned long old, tmp;
-
-	asm volatile(
-		"0:						\n"
-		"	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-		"	ckeq		icc3,cc7		\n"
-		"	ld.p		%M0,%1			\n"	/* LD.P/ORCR are atomic */
-		"	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-		"	and%I3		%1,%3,%2		\n"
-		"	cst.p		%2,%M0		,cc3,#1	\n"	/* if store happens... */
-		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* ... clear ICC3.Z */
-		"	beq		icc3,#0,0b		\n"
-		: "+U"(*v), "=&r"(old), "=r"(tmp)
-		: "NPr"(~mask)
-		: "memory", "cc7", "cc3", "icc3"
-		);
-
-	return old;
-}
-
-static inline
-unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v)
-{
-	unsigned long old, tmp;
-
-	asm volatile(
-		"0:						\n"
-		"	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-		"	ckeq		icc3,cc7		\n"
-		"	ld.p		%M0,%1			\n"	/* LD.P/ORCR are atomic */
-		"	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-		"	or%I3		%1,%3,%2		\n"
-		"	cst.p		%2,%M0		,cc3,#1	\n"	/* if store happens... */
-		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* ... clear ICC3.Z */
-		"	beq		icc3,#0,0b		\n"
-		: "+U"(*v), "=&r"(old), "=r"(tmp)
-		: "NPr"(mask)
-		: "memory", "cc7", "cc3", "icc3"
-		);
-
-	return old;
-}
-
-static inline
-unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v)
-{
-	unsigned long old, tmp;
-
-	asm volatile(
-		"0:						\n"
-		"	orcc		gr0,gr0,gr0,icc3	\n"	/* set ICC3.Z */
-		"	ckeq		icc3,cc7		\n"
-		"	ld.p		%M0,%1			\n"	/* LD.P/ORCR are atomic */
-		"	orcr		cc7,cc7,cc3		\n"	/* set CC3 to true */
-		"	xor%I3		%1,%3,%2		\n"
-		"	cst.p		%2,%M0		,cc3,#1	\n"	/* if store happens... */
-		"	corcc		gr29,gr29,gr0	,cc3,#1	\n"	/* ... clear ICC3.Z */
-		"	beq		icc3,#0,0b		\n"
-		: "+U"(*v), "=&r"(old), "=r"(tmp)
-		: "NPr"(mask)
-		: "memory", "cc7", "cc3", "icc3"
-		);
-
-	return old;
-}
-
-#else
-
-extern unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
-extern unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
-extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
-
-#endif
-
-#define atomic_clear_mask(mask, v)	atomic_test_and_ANDNOT_mask((mask), (v))
-#define atomic_set_mask(mask, v)	atomic_test_and_OR_mask((mask), (v))
+#include <asm/atomic.h>
 
 static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
 {
-	volatile unsigned long *ptr = addr;
-	unsigned long mask = 1UL << (nr & 31);
+	unsigned int *ptr = (void *)addr;
+	unsigned int mask = 1UL << (nr & 31);
 	ptr += nr >> 5;
-	return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0;
+	return (__atomic32_fetch_and(~mask, ptr) & mask) != 0;
 }
 
 static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
 {
-	volatile unsigned long *ptr = addr;
-	unsigned long mask = 1UL << (nr & 31);
+	unsigned int *ptr = (void *)addr;
+	unsigned int mask = 1UL << (nr & 31);
 	ptr += nr >> 5;
-	return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0;
+	return (__atomic32_fetch_or(mask, ptr) & mask) != 0;
 }
 
 static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
 {
-	volatile unsigned long *ptr = addr;
-	unsigned long mask = 1UL << (nr & 31);
+	unsigned int *ptr = (void *)addr;
+	unsigned int mask = 1UL << (nr & 31);
 	ptr += nr >> 5;
-	return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0;
+	return (__atomic32_fetch_xor(mask, ptr) & mask) != 0;
 }
 
 static inline void clear_bit(unsigned long nr, volatile void *addr)
diff --git a/arch/frv/include/asm/io.h b/arch/frv/include/asm/io.h
index a31b63e..70dfbea 100644
--- a/arch/frv/include/asm/io.h
+++ b/arch/frv/include/asm/io.h
@@ -278,6 +278,7 @@
 }
 
 #define ioremap_wc ioremap_nocache
+#define ioremap_uc ioremap_nocache
 
 extern void iounmap(void volatile __iomem *addr);
 
diff --git a/arch/frv/kernel/dma.c b/arch/frv/kernel/dma.c
index 156184e..370dc9f 100644
--- a/arch/frv/kernel/dma.c
+++ b/arch/frv/kernel/dma.c
@@ -109,13 +109,13 @@
 
 static DEFINE_RWLOCK(frv_dma_channels_lock);
 
-unsigned long frv_dma_inprogress;
+unsigned int frv_dma_inprogress;
 
 #define frv_clear_dma_inprogress(channel) \
-	atomic_clear_mask(1 << (channel), &frv_dma_inprogress);
+	(void)__atomic32_fetch_and(~(1 << (channel)), &frv_dma_inprogress);
 
 #define frv_set_dma_inprogress(channel) \
-	atomic_set_mask(1 << (channel), &frv_dma_inprogress);
+	(void)__atomic32_fetch_or(1 << (channel), &frv_dma_inprogress);
 
 /*****************************************************************************/
 /*
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index 86c516d..cdb4ce9 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -58,11 +58,6 @@
 EXPORT_SYMBOL(__insl_ns);
 
 #ifdef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
-EXPORT_SYMBOL(atomic_test_and_ANDNOT_mask);
-EXPORT_SYMBOL(atomic_test_and_OR_mask);
-EXPORT_SYMBOL(atomic_test_and_XOR_mask);
-EXPORT_SYMBOL(atomic_add_return);
-EXPORT_SYMBOL(atomic_sub_return);
 EXPORT_SYMBOL(__xchg_32);
 EXPORT_SYMBOL(__cmpxchg_32);
 #endif
diff --git a/arch/frv/lib/Makefile b/arch/frv/lib/Makefile
index 4ff2fb1..970e8b4 100644
--- a/arch/frv/lib/Makefile
+++ b/arch/frv/lib/Makefile
@@ -5,4 +5,4 @@
 lib-y := \
 	__ashldi3.o __lshrdi3.o __muldi3.o __ashrdi3.o __negdi2.o __ucmpdi2.o \
 	checksum.o memcpy.o memset.o atomic-ops.o atomic64-ops.o \
-	outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o
+	outsl_ns.o outsl_sw.o insl_ns.o insl_sw.o cache.o atomic-lib.o
diff --git a/arch/frv/lib/atomic-lib.c b/arch/frv/lib/atomic-lib.c
new file mode 100644
index 0000000..4d1b887
--- /dev/null
+++ b/arch/frv/lib/atomic-lib.c
@@ -0,0 +1,7 @@
+
+#include <linux/export.h>
+#include <asm/atomic.h>
+
+#define __ATOMIC_LIB__
+
+#include <asm/atomic_defs.h>
diff --git a/arch/frv/lib/atomic-ops.S b/arch/frv/lib/atomic-ops.S
index 5e9e6ab..b7439a9 100644
--- a/arch/frv/lib/atomic-ops.S
+++ b/arch/frv/lib/atomic-ops.S
@@ -19,116 +19,6 @@
 
 ###############################################################################
 #
-# unsigned long atomic_test_and_ANDNOT_mask(unsigned long mask, volatile unsigned long *v);
-#
-###############################################################################
-	.globl		atomic_test_and_ANDNOT_mask
-        .type		atomic_test_and_ANDNOT_mask,@function
-atomic_test_and_ANDNOT_mask:
-	not.p		gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	and		gr8,gr10,gr11
-	cst.p		gr11,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_test_and_ANDNOT_mask, .-atomic_test_and_ANDNOT_mask
-
-###############################################################################
-#
-# unsigned long atomic_test_and_OR_mask(unsigned long mask, volatile unsigned long *v);
-#
-###############################################################################
-	.globl		atomic_test_and_OR_mask
-        .type		atomic_test_and_OR_mask,@function
-atomic_test_and_OR_mask:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	or		gr8,gr10,gr11
-	cst.p		gr11,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_test_and_OR_mask, .-atomic_test_and_OR_mask
-
-###############################################################################
-#
-# unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsigned long *v);
-#
-###############################################################################
-	.globl		atomic_test_and_XOR_mask
-        .type		atomic_test_and_XOR_mask,@function
-atomic_test_and_XOR_mask:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	xor		gr8,gr10,gr11
-	cst.p		gr11,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_test_and_XOR_mask, .-atomic_test_and_XOR_mask
-
-###############################################################################
-#
-# int atomic_add_return(int i, atomic_t *v)
-#
-###############################################################################
-	.globl		atomic_add_return
-        .type		atomic_add_return,@function
-atomic_add_return:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	add		gr8,gr10,gr8
-	cst.p		gr8,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_add_return, .-atomic_add_return
-
-###############################################################################
-#
-# int atomic_sub_return(int i, atomic_t *v)
-#
-###############################################################################
-	.globl		atomic_sub_return
-        .type		atomic_sub_return,@function
-atomic_sub_return:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ld.p		@(gr9,gr0),gr8			/* LD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	sub		gr8,gr10,gr8
-	cst.p		gr8,@(gr9,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic_sub_return, .-atomic_sub_return
-
-###############################################################################
-#
 # uint32_t __xchg_32(uint32_t i, uint32_t *v)
 #
 ###############################################################################
diff --git a/arch/frv/lib/atomic64-ops.S b/arch/frv/lib/atomic64-ops.S
index b6194ee..c4c4723 100644
--- a/arch/frv/lib/atomic64-ops.S
+++ b/arch/frv/lib/atomic64-ops.S
@@ -20,100 +20,6 @@
 
 ###############################################################################
 #
-# long long atomic64_inc_return(atomic64_t *v)
-#
-###############################################################################
-	.globl		atomic64_inc_return
-        .type		atomic64_inc_return,@function
-atomic64_inc_return:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	addicc		gr9,#1,gr9,icc0
-	addxi		gr8,#0,gr8,icc0
-	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic64_inc_return, .-atomic64_inc_return
-
-###############################################################################
-#
-# long long atomic64_dec_return(atomic64_t *v)
-#
-###############################################################################
-	.globl		atomic64_dec_return
-        .type		atomic64_dec_return,@function
-atomic64_dec_return:
-	or.p		gr8,gr8,gr10
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	subicc		gr9,#1,gr9,icc0
-	subxi		gr8,#0,gr8,icc0
-	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic64_dec_return, .-atomic64_dec_return
-
-###############################################################################
-#
-# long long atomic64_add_return(long long i, atomic64_t *v)
-#
-###############################################################################
-	.globl		atomic64_add_return
-        .type		atomic64_add_return,@function
-atomic64_add_return:
-	or.p		gr8,gr8,gr4
-	or		gr9,gr9,gr5
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	addcc		gr9,gr5,gr9,icc0
-	addx		gr8,gr4,gr8,icc0
-	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic64_add_return, .-atomic64_add_return
-
-###############################################################################
-#
-# long long atomic64_sub_return(long long i, atomic64_t *v)
-#
-###############################################################################
-	.globl		atomic64_sub_return
-        .type		atomic64_sub_return,@function
-atomic64_sub_return:
-	or.p		gr8,gr8,gr4
-	or		gr9,gr9,gr5
-0:
-	orcc		gr0,gr0,gr0,icc3		/* set ICC3.Z */
-	ckeq		icc3,cc7
-	ldd.p		@(gr10,gr0),gr8			/* LDD.P/ORCR must be atomic */
-	orcr		cc7,cc7,cc3			/* set CC3 to true */
-	subcc		gr9,gr5,gr9,icc0
-	subx		gr8,gr4,gr8,icc0
-	cstd.p		gr8,@(gr10,gr0)		,cc3,#1
-	corcc		gr29,gr29,gr0		,cc3,#1	/* clear ICC3.Z if store happens */
-	beq		icc3,#0,0b
-	bralr
-
-	.size		atomic64_sub_return, .-atomic64_sub_return
-
-###############################################################################
-#
 # uint64_t __xchg_64(uint64_t i, uint64_t *v)
 #
 ###############################################################################
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 0635bd6..34bb4b1 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -175,14 +175,6 @@
 			if (!r->start && r->end)
 				pci_assign_resource(dev, idx);
 		}
-
-		if (pci_probe & PCI_ASSIGN_ROMS) {
-			r = &dev->resource[PCI_ROM_RESOURCE];
-			r->end -= r->start;
-			r->start = 0;
-			if (r->end)
-				pci_assign_resource(dev, PCI_ROM_RESOURCE);
-		}
 	}
 }
 
diff --git a/arch/frv/mb93090-mb00/pci-frv.h b/arch/frv/mb93090-mb00/pci-frv.h
index a7e487fe..d51992f 100644
--- a/arch/frv/mb93090-mb00/pci-frv.h
+++ b/arch/frv/mb93090-mb00/pci-frv.h
@@ -14,14 +14,6 @@
 #define DBG(x...)
 #endif
 
-#define PCI_PROBE_BIOS		0x0001
-#define PCI_PROBE_CONF1		0x0002
-#define PCI_PROBE_CONF2		0x0004
-#define PCI_NO_CHECKS		0x0400
-#define PCI_ASSIGN_ROMS		0x1000
-#define PCI_BIOS_IRQ_SCAN	0x2000
-#define PCI_ASSIGN_ALL_BUSSES	0x4000
-
 extern unsigned int __nongpreldata pci_probe;
 
 /* pci-frv.c */
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index f211839..f9c86c4 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -294,8 +294,6 @@
 	printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number);
 #endif
 
-	pci_read_bridge_bases(bus);
-
 	if (bus->number == 0) {
 		struct pci_dev *dev;
 		list_for_each_entry(dev, &bus->devices, bus_list) {
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index 7ca73f8..702ee53 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -16,83 +16,52 @@
 
 #include <linux/kernel.h>
 
-static inline int atomic_add_return(int i, atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
-
-	flags = arch_local_irq_save();
-	ret = v->counter += i;
-	arch_local_irq_restore(flags);
-	return ret;
+#define ATOMIC_OP_RETURN(op, c_op)				\
+static inline int atomic_##op##_return(int i, atomic_t *v)	\
+{								\
+	h8300flags flags;					\
+	int ret;						\
+								\
+	flags = arch_local_irq_save();				\
+	ret = v->counter c_op i;				\
+	arch_local_irq_restore(flags);				\
+	return ret;						\
 }
 
-#define atomic_add(i, v) atomic_add_return(i, v)
+#define ATOMIC_OP(op, c_op)					\
+static inline void atomic_##op(int i, atomic_t *v)		\
+{								\
+	h8300flags flags;					\
+								\
+	flags = arch_local_irq_save();				\
+	v->counter c_op i;					\
+	arch_local_irq_restore(flags);				\
+}
+
+ATOMIC_OP_RETURN(add, +=)
+ATOMIC_OP_RETURN(sub, -=)
+
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or,  |=)
+ATOMIC_OP(xor, ^=)
+
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define atomic_add(i, v)		(void)atomic_add_return(i, v)
 #define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
 
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
+#define atomic_sub(i, v)		(void)atomic_sub_return(i, v)
+#define atomic_sub_and_test(i, v)	(atomic_sub_return(i, v) == 0)
 
-	flags = arch_local_irq_save();
-	ret = v->counter -= i;
-	arch_local_irq_restore(flags);
-	return ret;
-}
+#define atomic_inc_return(v)		atomic_add_return(1, v)
+#define atomic_dec_return(v)		atomic_sub_return(1, v)
 
-#define atomic_sub(i, v) atomic_sub_return(i, v)
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+#define atomic_inc(v)			(void)atomic_inc_return(v)
+#define atomic_inc_and_test(v)		(atomic_inc_return(v) == 0)
 
-static inline int atomic_inc_return(atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
-
-	flags = arch_local_irq_save();
-	v->counter++;
-	ret = v->counter;
-	arch_local_irq_restore(flags);
-	return ret;
-}
-
-#define atomic_inc(v) atomic_inc_return(v)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-static inline int atomic_dec_return(atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
-
-	flags = arch_local_irq_save();
-	--v->counter;
-	ret = v->counter;
-	arch_local_irq_restore(flags);
-	return ret;
-}
-
-#define atomic_dec(v) atomic_dec_return(v)
-
-static inline int atomic_dec_and_test(atomic_t *v)
-{
-	h8300flags flags;
-	int ret;
-
-	flags = arch_local_irq_save();
-	--v->counter;
-	ret = v->counter;
-	arch_local_irq_restore(flags);
-	return ret == 0;
-}
+#define atomic_dec(v)			(void)atomic_dec_return(v)
+#define atomic_dec_and_test(v)		(atomic_dec_return(v) == 0)
 
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
@@ -120,40 +89,4 @@
 	return ret;
 }
 
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
-{
-	unsigned char ccr;
-	unsigned long tmp;
-
-	__asm__ __volatile__("stc ccr,%w3\n\t"
-			     "orc #0x80,ccr\n\t"
-			     "mov.l %0,%1\n\t"
-			     "and.l %2,%1\n\t"
-			     "mov.l %1,%0\n\t"
-			     "ldc %w3,ccr"
-			     : "=m"(*v), "=r"(tmp)
-			     : "g"(~(mask)), "r"(ccr));
-}
-
-static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
-{
-	unsigned char ccr;
-	unsigned long tmp;
-
-	__asm__ __volatile__("stc ccr,%w3\n\t"
-			     "orc #0x80,ccr\n\t"
-			     "mov.l %0,%1\n\t"
-			     "or.l %2,%1\n\t"
-			     "mov.l %1,%0\n\t"
-			     "ldc %w3,ccr"
-			     : "=m"(*v), "=r"(tmp)
-			     : "g"(~(mask)), "r"(ccr));
-}
-
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec()    barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc()    barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
 #endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 93d0702..811d61f 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -132,6 +132,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 0bf0350..be4beeb 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -45,8 +45,6 @@
 ATOMIC_OP(add, +)
 ATOMIC_OP(sub, -)
 
-#undef ATOMIC_OP
-
 #define atomic_add_return(i,v)						\
 ({									\
 	int __ia64_aar_i = (i);						\
@@ -71,6 +69,16 @@
 		: ia64_atomic_sub(__ia64_asr_i, v);			\
 })
 
+ATOMIC_OP(and, &)
+ATOMIC_OP(or, |)
+ATOMIC_OP(xor, ^)
+
+#define atomic_and(i,v)	(void)ia64_atomic_and(i,v)
+#define atomic_or(i,v)	(void)ia64_atomic_or(i,v)
+#define atomic_xor(i,v)	(void)ia64_atomic_xor(i,v)
+
+#undef ATOMIC_OP
+
 #define ATOMIC64_OP(op, c_op)						\
 static __inline__ long							\
 ia64_atomic64_##op (__s64 i, atomic64_t *v)				\
@@ -89,8 +97,6 @@
 ATOMIC64_OP(add, +)
 ATOMIC64_OP(sub, -)
 
-#undef ATOMIC64_OP
-
 #define atomic64_add_return(i,v)					\
 ({									\
 	long __ia64_aar_i = (i);					\
@@ -115,6 +121,16 @@
 		: ia64_atomic64_sub(__ia64_asr_i, v);			\
 })
 
+ATOMIC64_OP(and, &)
+ATOMIC64_OP(or, |)
+ATOMIC64_OP(xor, ^)
+
+#define atomic64_and(i,v)	(void)ia64_atomic64_and(i,v)
+#define atomic64_or(i,v)	(void)ia64_atomic64_or(i,v)
+#define atomic64_xor(i,v)	(void)ia64_atomic64_xor(i,v)
+
+#undef ATOMIC64_OP
+
 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index 843ba43..df896a1 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -66,12 +66,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
 	___p1;								\
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index bc9501e..d2fae05 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -610,9 +610,9 @@
 			       chip->name, irq_type->name);
 		chip = irq_type;
 	}
-	__irq_set_chip_handler_name_locked(irq, chip, trigger == IOSAPIC_EDGE ?
-					   handle_edge_irq : handle_level_irq,
-					   NULL);
+	irq_set_chip_handler_name_locked(irq_get_irq_data(irq), chip,
+		trigger == IOSAPIC_EDGE ? handle_edge_irq : handle_level_irq,
+		NULL);
 	return 0;
 }
 
@@ -838,7 +838,7 @@
 	if (iosapic_intr_info[irq].count == 0) {
 #ifdef CONFIG_SMP
 		/* Clear affinity */
-		cpumask_setall(irq_get_irq_data(irq)->affinity);
+		cpumask_setall(irq_get_affinity_mask(irq));
 #endif
 		/* Clear the interrupt information */
 		iosapic_intr_info[irq].dest = 0;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 812a1e6..de4fc00 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -67,7 +67,7 @@
 void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
 {
 	if (irq < NR_IRQS) {
-		cpumask_copy(irq_get_irq_data(irq)->affinity,
+		cpumask_copy(irq_get_affinity_mask(irq),
 			     cpumask_of(cpu_logical_id(hwid)));
 		irq_redir[irq] = (char) (redir & 0xff);
 	}
@@ -119,8 +119,8 @@
 		if (irqd_is_per_cpu(data))
 			continue;
 
-		if (cpumask_any_and(data->affinity, cpu_online_mask)
-		    >= nr_cpu_ids) {
+		if (cpumask_any_and(irq_data_get_affinity_mask(data),
+				    cpu_online_mask) >= nr_cpu_ids) {
 			/*
 			 * Save it for phase 2 processing
 			 */
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index d70bf15..af4eaec 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -23,7 +23,7 @@
 	if (irq_prepare_move(irq, cpu))
 		return -1;
 
-	__get_cached_msi_msg(idata->msi_desc, &msg);
+	__get_cached_msi_msg(irq_data_get_msi_desc(idata), &msg);
 
 	addr = msg.address_lo;
 	addr &= MSI_ADDR_DEST_ID_MASK;
@@ -36,7 +36,7 @@
 	msg.data = data;
 
 	pci_write_msi_msg(irq, &msg);
-	cpumask_copy(idata->affinity, cpumask_of(cpu));
+	cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu));
 
 	return 0;
 }
@@ -148,7 +148,7 @@
 	msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
 
 	dmar_msi_write(irq, &msg);
-	cpumask_copy(data->affinity, mask);
+	cpumask_copy(irq_data_get_affinity_mask(data), mask);
 
 	return 0;
 }
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index ed61297..46ecc5d 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -444,7 +444,7 @@
 			if (p->pte & 0x1)
 				if (is_tr_overlap(p, va, log_size)) {
 					printk(KERN_DEBUG "Overlapped Entry"
-						"Inserted for TR Reigster!!\n");
+						"Inserted for TR Register!!\n");
 					goto out;
 			}
 		}
@@ -456,7 +456,7 @@
 			if (p->pte & 0x1)
 				if (is_tr_overlap(p, va, log_size)) {
 					printk(KERN_DEBUG "Overlapped Entry"
-						"Inserted for TR Reigster!!\n");
+						"Inserted for TR Register!!\n");
 					goto out;
 				}
 		}
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 7cc3be9..d89b601 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -533,10 +533,9 @@
 {
 	struct pci_dev *dev;
 
-	if (b->self) {
-		pci_read_bridge_bases(b);
+	if (b->self)
 		pcibios_fixup_bridge_resources(b->self);
-	}
+
 	list_for_each_entry(dev, &b->devices, bus_list)
 		pcibios_fixup_device_resources(dev);
 	platform_pci_fixup_bus(b);
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index a0eb27b..fb25065 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -175,7 +175,7 @@
 	 * Release XIO resources for the old MSI PCI address
 	 */
 
-	__get_cached_msi_msg(data->msi_desc, &msg);
+	__get_cached_msi_msg(irq_data_get_msi_desc(data), &msg);
 	sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
 	pdev = sn_pdev->pdi_linux_pcidev;
 	provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -206,7 +206,7 @@
 	msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
 
 	pci_write_msi_msg(irq, &msg);
-	cpumask_copy(data->affinity, cpu_mask);
+	cpumask_copy(irq_data_get_affinity_mask(data), cpu_mask);
 
 	return 0;
 }
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 31bb74a..025e2a1 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -94,6 +94,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -239,45 +243,4 @@
 	return c;
 }
 
-
-static __inline__ void atomic_clear_mask(unsigned long  mask, atomic_t *addr)
-{
-	unsigned long flags;
-	unsigned long tmp;
-
-	local_irq_save(flags);
-	__asm__ __volatile__ (
-		"# atomic_clear_mask		\n\t"
-		DCACHE_CLEAR("%0", "r5", "%1")
-		M32R_LOCK" %0, @%1;		\n\t"
-		"and	%0, %2;			\n\t"
-		M32R_UNLOCK" %0, @%1;		\n\t"
-		: "=&r" (tmp)
-		: "r" (addr), "r" (~mask)
-		: "memory"
-		__ATOMIC_CLOBBER
-	);
-	local_irq_restore(flags);
-}
-
-static __inline__ void atomic_set_mask(unsigned long  mask, atomic_t *addr)
-{
-	unsigned long flags;
-	unsigned long tmp;
-
-	local_irq_save(flags);
-	__asm__ __volatile__ (
-		"# atomic_set_mask		\n\t"
-		DCACHE_CLEAR("%0", "r5", "%1")
-		M32R_LOCK" %0, @%1;		\n\t"
-		"or	%0, %2;			\n\t"
-		M32R_UNLOCK" %0, @%1;		\n\t"
-		: "=&r" (tmp)
-		: "r" (addr), "r" (mask)
-		: "memory"
-		__ATOMIC_CLOBBER
-	);
-	local_irq_restore(flags);
-}
-
 #endif	/* _ASM_M32R_ATOMIC_H */
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h
index f8de767..61b8931 100644
--- a/arch/m32r/include/asm/io.h
+++ b/arch/m32r/include/asm/io.h
@@ -69,6 +69,7 @@
 #define ioremap_nocache(off,size) ioremap(off,size)
 #define ioremap_wc ioremap_nocache
 #define ioremap_wt ioremap_nocache
+#define ioremap_uc ioremap_nocache
 
 /*
  * IO bus memory addresses are also 1:1 with the physical address
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index c18ddc7..62d6961 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -156,7 +156,7 @@
 	cpumask_clear_cpu(smp_processor_id(), &cpumask);
 	spin_lock(&flushcache_lock);
 	mask=cpumask_bits(&cpumask);
-	atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
+	atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
 	send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
 	_flush_cache_copyback_all();
 	while (flushcache_cpumask)
@@ -407,7 +407,7 @@
 	flush_vma = vma;
 	flush_va = va;
 	mask=cpumask_bits(&cpumask);
-	atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
+	atomic_or(*mask, (atomic_t *)&flush_cpumask);
 
 	/*
 	 * We have to send the IPI only to
diff --git a/arch/m68k/coldfire/intc-5272.c b/arch/m68k/coldfire/intc-5272.c
index d1e2fba..47371de 100644
--- a/arch/m68k/coldfire/intc-5272.c
+++ b/arch/m68k/coldfire/intc-5272.c
@@ -143,8 +143,10 @@
  * We need to be careful with the masking/acking due to the side effects
  * of masking an interrupt.
  */
-static void intc_external_irq(unsigned int irq, struct irq_desc *desc)
+static void intc_external_irq(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
+
 	irq_desc_get_chip(desc)->irq_ack(&desc->irq_data);
 	handle_simple_irq(irq, desc);
 }
diff --git a/arch/m68k/coldfire/m5272.c b/arch/m68k/coldfire/m5272.c
index b15219e..c525e4c 100644
--- a/arch/m68k/coldfire/m5272.c
+++ b/arch/m68k/coldfire/m5272.c
@@ -126,7 +126,7 @@
 static int __init init_BSP(void)
 {
 	m5272_uarts_init();
-	fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status);
+	fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status, -1);
 	return 0;
 }
 
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 753a623..0b6b40d 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -57,7 +57,7 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE=m
+CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -91,6 +91,7 @@
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -287,7 +288,6 @@
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_PMEM=m
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
@@ -320,7 +320,6 @@
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -345,6 +344,7 @@
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
+CONFIG_GENEVE=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -355,6 +355,7 @@
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HP is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
@@ -363,6 +364,7 @@
 CONFIG_APNE=y
 CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
@@ -448,6 +450,7 @@
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
@@ -536,6 +539,7 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
@@ -543,6 +547,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -571,14 +576,15 @@
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 1f93dca..eeb3a89 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -55,7 +55,7 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE=m
+CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -89,6 +89,7 @@
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -279,7 +280,6 @@
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_PMEM=m
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
@@ -302,7 +302,6 @@
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -327,17 +326,20 @@
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
+CONFIG_GENEVE=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
@@ -406,6 +408,7 @@
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
@@ -494,6 +497,7 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
@@ -501,6 +505,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -529,14 +534,15 @@
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 831b8b8..3a70066 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -55,7 +55,7 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE=m
+CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -89,6 +89,7 @@
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -283,7 +284,6 @@
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_PMEM=m
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
@@ -311,7 +311,6 @@
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -336,6 +335,7 @@
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
+CONFIG_GENEVE=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -343,11 +343,13 @@
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
@@ -428,6 +430,7 @@
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
@@ -516,6 +519,7 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
@@ -523,6 +527,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -551,14 +556,15 @@
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 91fd187..0586b32 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -53,7 +53,7 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE=m
+CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -87,6 +87,7 @@
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -277,7 +278,6 @@
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_PMEM=m
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
@@ -301,7 +301,6 @@
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -326,17 +325,20 @@
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
+CONFIG_GENEVE=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
 CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
@@ -399,6 +401,7 @@
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
@@ -487,6 +490,7 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
@@ -494,6 +498,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -522,14 +527,15 @@
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 9d4934f..ad1dbce 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -55,7 +55,7 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE=m
+CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -89,6 +89,7 @@
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -279,7 +280,6 @@
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_PMEM=m
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
@@ -302,7 +302,6 @@
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -327,6 +326,7 @@
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
+CONFIG_GENEVE=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -334,11 +334,13 @@
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
@@ -408,6 +410,7 @@
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
@@ -496,6 +499,7 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
@@ -503,6 +507,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -531,14 +536,15 @@
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 72bc187..b44acac 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -54,7 +54,7 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE=m
+CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -88,6 +88,7 @@
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -282,7 +283,6 @@
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_PMEM=m
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
@@ -311,7 +311,6 @@
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -343,6 +342,7 @@
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
+CONFIG_GENEVE=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -351,12 +351,14 @@
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 CONFIG_MAC89x0=y
+# CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_MACSONIC=y
 CONFIG_MAC8390=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
@@ -430,6 +432,7 @@
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
@@ -518,6 +521,7 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
@@ -525,6 +529,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -553,14 +558,15 @@
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 8fb6553..8afca37 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -64,7 +64,7 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE=m
+CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -98,6 +98,7 @@
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -301,7 +302,6 @@
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_PMEM=m
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
@@ -344,7 +344,6 @@
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -376,6 +375,7 @@
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
+CONFIG_GENEVE=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -391,6 +391,7 @@
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 CONFIG_MAC89x0=y
+# CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HP is not set
 CONFIG_BVME6000_NET=y
 CONFIG_MVME16x_NET=y
@@ -403,6 +404,7 @@
 CONFIG_APNE=y
 CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
@@ -510,6 +512,7 @@
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
@@ -598,6 +601,7 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
@@ -605,6 +609,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -633,14 +638,15 @@
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index f34491e..ef00875 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -52,7 +52,7 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE=m
+CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -86,6 +86,7 @@
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -276,7 +277,6 @@
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_PMEM=m
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
@@ -300,7 +300,6 @@
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -325,6 +324,7 @@
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
+CONFIG_GENEVE=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -332,11 +332,13 @@
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
@@ -399,6 +401,7 @@
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
@@ -487,6 +490,7 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
@@ -494,6 +498,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -522,14 +527,15 @@
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 3d3614d..387c2bd 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -53,7 +53,7 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE=m
+CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -87,6 +87,7 @@
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -277,7 +278,6 @@
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_PMEM=m
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
@@ -301,7 +301,6 @@
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -326,17 +325,20 @@
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
+CONFIG_GENEVE=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
 CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
@@ -399,6 +401,7 @@
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
@@ -487,6 +490,7 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
@@ -494,6 +498,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -522,14 +527,15 @@
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 643e9c9..35355c1 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -53,7 +53,7 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE=m
+CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -87,6 +87,7 @@
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -280,7 +281,6 @@
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_PMEM=m
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
@@ -307,7 +307,6 @@
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -332,6 +331,7 @@
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
+CONFIG_GENEVE=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -341,12 +341,14 @@
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HP is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
@@ -421,6 +423,7 @@
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
@@ -509,6 +512,7 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
@@ -516,6 +520,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -544,14 +549,15 @@
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 8fecc5a..8442d26 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -50,7 +50,7 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE=m
+CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -84,6 +84,7 @@
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -274,7 +275,6 @@
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_PMEM=m
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
@@ -298,7 +298,6 @@
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -323,17 +322,20 @@
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
+CONFIG_GENEVE=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
 CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
 CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
@@ -400,6 +402,7 @@
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
@@ -487,6 +490,7 @@
 CONFIG_TEST_FIRMWARE=m
 CONFIG_TEST_UDELAY=m
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
@@ -494,6 +498,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -522,14 +527,15 @@
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 9902c5b..0e1b542 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -50,7 +50,7 @@
 CONFIG_NET_IPGRE=m
 CONFIG_NET_IPVTI=m
 CONFIG_NET_FOU_IP_TUNNELS=y
-CONFIG_GENEVE=m
+CONFIG_GENEVE_CORE=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
@@ -84,6 +84,7 @@
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_INET=m
+CONFIG_NF_TABLES_NETDEV=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
@@ -274,7 +275,6 @@
 CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_PMEM=m
 CONFIG_CDROM_PKTCDVD=m
 CONFIG_ATA_OVER_ETH=m
 CONFIG_DUMMY_IRQ=m
@@ -298,7 +298,6 @@
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
-CONFIG_DM_CACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -323,6 +322,7 @@
 CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
+CONFIG_GENEVE=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
 CONFIG_VETH=m
@@ -330,11 +330,13 @@
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
 # CONFIG_NET_VENDOR_SAMSUNG is not set
 # CONFIG_NET_VENDOR_SEEQ is not set
@@ -400,6 +402,7 @@
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
 CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
 CONFIG_TMPFS=y
 CONFIG_AFFS_FS=m
 CONFIG_ECRYPT_FS=m
@@ -488,6 +491,7 @@
 CONFIG_TEST_UDELAY=m
 CONFIG_EARLY_PRINTK=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_CRYPTO_RSA=m
 CONFIG_CRYPTO_MANAGER=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_CRYPTD=m
@@ -495,6 +499,7 @@
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -523,14 +528,15 @@
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_ZLIB=m
 CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DRBG_MENU=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 # CONFIG_CRYPTO_HW is not set
 CONFIG_XZ_DEC_TEST=m
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index 2d75ae2..f2a00c5 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -76,7 +76,7 @@
 				bvec_to_phys(&bvec));
 		sec += len;
 	}
-	bio_endio(bio, 0);
+	bio_endio(bio);
 }
 
 static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index e85f047..039fac1 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -77,6 +77,10 @@
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
 
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(or, |=, or)
+ATOMIC_OP(xor, ^=, eor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -170,16 +174,6 @@
 	return c != 0;
 }
 
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
-{
-	__asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
-}
-
-static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
-{
-	__asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
-}
-
 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 {
 	int c, old;
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index f55cad5..c98ac81 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -468,6 +468,7 @@
 {
 	return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
 }
+#define ioremap_uc ioremap_nocache
 static inline void __iomem *ioremap_wt(unsigned long physaddr,
 					 unsigned long size)
 {
diff --git a/arch/m68k/kernel/bootinfo_proc.c b/arch/m68k/kernel/bootinfo_proc.c
index 7ee853e..2a33a96 100644
--- a/arch/m68k/kernel/bootinfo_proc.c
+++ b/arch/m68k/kernel/bootinfo_proc.c
@@ -62,12 +62,10 @@
 	if (!bootinfo_size)
 		return -EINVAL;
 
-	bootinfo_copy = kmalloc(bootinfo_size, GFP_KERNEL);
+	bootinfo_copy = kmemdup(bootinfo_tmp, bootinfo_size, GFP_KERNEL);
 	if (!bootinfo_copy)
 		return -ENOMEM;
 
-	memcpy(bootinfo_copy, bootinfo_tmp, bootinfo_size);
-
 	pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_fops, NULL);
 	if (!pde) {
 		kfree(bootinfo_copy);
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index bb11dce..191610d 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -63,13 +63,15 @@
  * Handle miscellaneous OSS interrupts.
  */
 
-static void oss_irq(unsigned int irq, struct irq_desc *desc)
+static void oss_irq(unsigned int __irq, struct irq_desc *desc)
 {
 	int events = oss->irq_pending &
-	             (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM);
+		(OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM);
 
 #ifdef DEBUG_IRQS
 	if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) {
+		unsigned int irq = irq_desc_get_irq(desc);
+
 		printk("oss_irq: irq %u events = 0x%04X\n", irq,
 			(int) oss->irq_pending);
 	}
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index 272dde4..3b9e302 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -113,9 +113,10 @@
  * PSC interrupt handler. It's a lot like the VIA interrupt handler.
  */
 
-static void psc_irq(unsigned int irq, struct irq_desc *desc)
+static void psc_irq(unsigned int __irq, struct irq_desc *desc)
 {
 	unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc);
+	unsigned int irq = irq_desc_get_irq(desc);
 	int pIFR	= pIFRbase + offset;
 	int pIER	= pIERbase + offset;
 	int irq_num;
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
index 948d868..21c4c26 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -74,44 +74,14 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	int temp;
-
-	asm volatile (
-		"1:	LNKGETD %0, [%1]\n"
-		"	AND	%0, %0, %2\n"
-		"	LNKSETD	[%1] %0\n"
-		"	DEFR	%0, TXSTAT\n"
-		"	ANDT	%0, %0, #HI(0x3f000000)\n"
-		"	CMPT	%0, #HI(0x02000000)\n"
-		"	BNZ	1b\n"
-		: "=&d" (temp)
-		: "da" (&v->counter), "bd" (~mask)
-		: "cc");
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	int temp;
-
-	asm volatile (
-		"1:	LNKGETD %0, [%1]\n"
-		"	OR	%0, %0, %2\n"
-		"	LNKSETD	[%1], %0\n"
-		"	DEFR	%0, TXSTAT\n"
-		"	ANDT	%0, %0, #HI(0x3f000000)\n"
-		"	CMPT	%0, #HI(0x02000000)\n"
-		"	BNZ	1b\n"
-		: "=&d" (temp)
-		: "da" (&v->counter), "bd" (mask)
-		: "cc");
-}
-
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
 	int result, temp;
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
index f5d5898..f8efe38 100644
--- a/arch/metag/include/asm/atomic_lock1.h
+++ b/arch/metag/include/asm/atomic_lock1.h
@@ -68,31 +68,14 @@
 
 ATOMIC_OPS(add, +=)
 ATOMIC_OPS(sub, -=)
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long flags;
-
-	__global_lock1(flags);
-	fence();
-	v->counter &= ~mask;
-	__global_unlock1(flags);
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long flags;
-
-	__global_lock1(flags);
-	fence();
-	v->counter |= mask;
-	__global_unlock1(flags);
-}
-
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
 	int ret;
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index 5a696e5..172b7e5 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -90,12 +90,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
diff --git a/arch/metag/include/asm/elf.h b/arch/metag/include/asm/elf.h
index d2baf69..87b0cf1 100644
--- a/arch/metag/include/asm/elf.h
+++ b/arch/metag/include/asm/elf.h
@@ -11,7 +11,7 @@
 #define R_METAG_RELBRANCH                4
 #define R_METAG_GETSETOFF                5
 
-/* Backward compatability */
+/* Backward compatibility */
 #define R_METAG_REG32OP1                 6
 #define R_METAG_REG32OP2                 7
 #define R_METAG_REG32OP3                 8
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index bf4dec2..c89da63 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -61,8 +61,7 @@
 	/* FIXME this part of code is untested */
 	for_each_sg(sgl, sg, nents, i) {
 		sg->dma_address = sg_phys(sg);
-		__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
-							sg->length, direction);
+		__dma_sync(sg_phys(sg), sg->length, direction);
 	}
 
 	return nents;
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 719feee..90bec7d 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -11,12 +11,11 @@
 
 #include <linux/irqdomain.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/of_address.h>
 #include <linux/io.h>
 #include <linux/bug.h>
 
-#include "../../drivers/irqchip/irqchip.h"
-
 static void __iomem *intc_baseaddr;
 
 /* No one else should require these constants, so define them locally here. */
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index c897745..67e2ef4 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -122,37 +122,29 @@
 	return 0;
 }
 
-static void xilinx_timer_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int xilinx_timer_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		pr_info("%s: periodic\n", __func__);
-		xilinx_timer0_start_periodic(freq_div_hz);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		pr_info("%s: oneshot\n", __func__);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-		pr_info("%s: unused\n", __func__);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		pr_info("%s: shutdown\n", __func__);
-		xilinx_timer0_stop();
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		pr_info("%s: resume\n", __func__);
-		break;
-	}
+	pr_info("%s\n", __func__);
+	xilinx_timer0_stop();
+	return 0;
+}
+
+static int xilinx_timer_set_periodic(struct clock_event_device *evt)
+{
+	pr_info("%s\n", __func__);
+	xilinx_timer0_start_periodic(freq_div_hz);
+	return 0;
 }
 
 static struct clock_event_device clockevent_xilinx_timer = {
-	.name		= "xilinx_clockevent",
-	.features       = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
-	.shift		= 8,
-	.rating		= 300,
-	.set_next_event	= xilinx_timer_set_next_event,
-	.set_mode	= xilinx_timer_set_mode,
+	.name			= "xilinx_clockevent",
+	.features		= CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_PERIODIC,
+	.shift			= 8,
+	.rating			= 300,
+	.set_next_event		= xilinx_timer_set_next_event,
+	.set_state_shutdown	= xilinx_timer_shutdown,
+	.set_state_periodic	= xilinx_timer_set_periodic,
 };
 
 static inline void timer_ack(void)
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index ae838ed..6b8b752 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -863,14 +863,7 @@
 
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
-	/* When called from the generic PCI probe, read PCI<->PCI bridge
-	 * bases. This is -not- called when generating the PCI tree from
-	 * the OF device-tree.
-	 */
-	if (bus->self != NULL)
-		pci_read_bridge_bases(bus);
-
-	/* Now fixup the bus bus */
+	/* Fixup the bus */
 	pcibios_setup_bus_self(bus);
 
 	/* Now fixup devices on that bus */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 199a835..752acca 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1,8 +1,10 @@
 config MIPS
 	bool
 	default y
+	select ARCH_SUPPORTS_UPROBES
 	select ARCH_MIGHT_HAVE_PC_PARPORT
 	select ARCH_MIGHT_HAVE_PC_SERIO
+	select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
 	select HAVE_CONTEXT_TRACKING
 	select HAVE_GENERIC_DMA_COHERENT
 	select HAVE_IDE
@@ -13,7 +15,6 @@
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_BPF_JIT if !CPU_MICROMIPS
-	select ARCH_HAVE_CUSTOM_GPIO_H
 	select HAVE_FUNCTION_TRACER
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_FTRACE_MCOUNT_RECORD
@@ -118,6 +119,7 @@
 
 config ATH79
 	bool "Atheros AR71XX/AR724X/AR913X based boards"
+	select ARCH_HAS_RESET_CONTROLLER
 	select ARCH_REQUIRE_GPIOLIB
 	select BOOT_RAW
 	select CEVT_R4K
@@ -408,6 +410,7 @@
 	select CEVT_R4K
 	select CSRC_R4K
 	select CLKSRC_MIPS_GIC
+	select COMMON_CLK
 	select DMA_MAYBE_COHERENT
 	select GENERIC_ISA_DMA
 	select HAVE_PCSPKR_PLATFORM
@@ -458,6 +461,7 @@
 	select CEVT_R4K
 	select CSRC_R4K
 	select CLKSRC_MIPS_GIC
+	select COMMON_CLK
 	select CPU_MIPSR2_IRQ_VI
 	select CPU_MIPSR2_IRQ_EI
 	select DMA_NONCOHERENT
@@ -898,6 +902,7 @@
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_64BIT_KERNEL
 	select ARCH_PHYS_ADDR_T_64BIT
+	select ARCH_REQUIRE_GPIOLIB
 	select SYS_SUPPORTS_BIG_ENDIAN
 	select SYS_SUPPORTS_LITTLE_ENDIAN
 	select SYS_SUPPORTS_HIGHMEM
@@ -947,6 +952,7 @@
 source "arch/mips/jz4740/Kconfig"
 source "arch/mips/lantiq/Kconfig"
 source "arch/mips/lasat/Kconfig"
+source "arch/mips/pistachio/Kconfig"
 source "arch/mips/pmcs-msp71xx/Kconfig"
 source "arch/mips/ralink/Kconfig"
 source "arch/mips/sgi-ip27/Kconfig"
@@ -1040,6 +1046,9 @@
 config ARCH_DMA_ADDR_T_64BIT
 	def_bool (HIGHMEM && ARCH_PHYS_ADDR_T_64BIT) || 64BIT
 
+config ARCH_SUPPORTS_UPROBES
+	bool
+
 config DMA_MAYBE_COHERENT
 	select DMA_NONCOHERENT
 	bool
@@ -1070,10 +1079,6 @@
 config SYS_SUPPORTS_HOTPLUG_CPU
 	bool
 
-config I8259
-	bool
-	select IRQ_DOMAIN
-
 config MIPS_BONITO64
 	bool
 
@@ -1367,7 +1372,7 @@
 	  otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
 
 config CPU_MIPS32_R6
-	bool "MIPS32 Release 6 (EXPERIMENTAL)"
+	bool "MIPS32 Release 6"
 	depends on SYS_HAS_CPU_MIPS32_R6
 	select CPU_HAS_PREFETCH
 	select CPU_SUPPORTS_32BIT_KERNEL
@@ -1418,7 +1423,7 @@
 	  otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
 
 config CPU_MIPS64_R6
-	bool "MIPS64 Release 6 (EXPERIMENTAL)"
+	bool "MIPS64 Release 6"
 	depends on SYS_HAS_CPU_MIPS64_R6
 	select CPU_HAS_PREFETCH
 	select CPU_SUPPORTS_32BIT_KERNEL
@@ -1968,6 +1973,7 @@
 	select TRAD_SIGNALS
 	help
 	  Select this option if you want to build a 32-bit kernel.
+
 config 64BIT
 	bool "64-bit kernel"
 	depends on CPU_SUPPORTS_64BIT_KERNEL && SYS_SUPPORTS_64BIT_KERNEL
@@ -2113,7 +2119,7 @@
 
 config MIPS_MT_SMP
 	bool "MIPS MT SMP support (1 TC on each available VPE)"
-	depends on SYS_SUPPORTS_MULTITHREADING
+	depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6
 	select CPU_MIPSR2_IRQ_VI
 	select CPU_MIPSR2_IRQ_EI
 	select SYNC_R4K
@@ -2214,7 +2220,7 @@
 
 config MIPS_CMP
 	bool "MIPS CMP framework support (DEPRECATED)"
-	depends on SYS_SUPPORTS_MIPS_CMP
+	depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
 	select MIPS_GIC_IPI
 	select SMP
 	select SYNC_R4K
@@ -2231,7 +2237,7 @@
 
 config MIPS_CPS
 	bool "MIPS Coherent Processing System support"
-	depends on SYS_SUPPORTS_MIPS_CPS
+	depends on SYS_SUPPORTS_MIPS_CPS && !CPU_MIPSR6
 	select MIPS_CM
 	select MIPS_CPC
 	select MIPS_CPS_PM if HOTPLUG_CPU
@@ -2306,7 +2312,7 @@
 endchoice
 
 config CPU_HAS_MSA
-	bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)"
+	bool "Support for the MIPS SIMD Architecture"
 	depends on CPU_SUPPORTS_MSA
 	depends on 64BIT || MIPS_O32_FP64_SUPPORT
 	help
@@ -2646,7 +2652,7 @@
 	  If unsure, say Y. Only embedded should say N here.
 
 config MIPS_O32_FP64_SUPPORT
-	bool "Support for O32 binaries using 64-bit FP (EXPERIMENTAL)"
+	bool "Support for O32 binaries using 64-bit FP"
 	depends on 32BIT || MIPS32_O32
 	help
 	  When this is enabled, the kernel will support use of 64-bit floating
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 3a2b775..e250524 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -87,15 +87,6 @@
 	  Select compile flags that produce code that can be processed by the
 	  Corelis mksym utility and UDB Emulator.
 
-config RUNTIME_DEBUG
-	bool "Enable run-time debugging"
-	depends on DEBUG_KERNEL
-	help
-	  If you say Y here, some debugging macros will do run-time checking.
-	  If you say N here, those macros will mostly turn to no-ops.  See
-	  arch/mips/include/asm/debug.h for debugging macros.
-	  If unsure, say N.
-
 config DEBUG_ZBOOT
 	bool "Enable compressed kernel support debugging"
 	depends on DEBUG_KERNEL && SYS_SUPPORTS_ZBOOT
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
index b962898..7fa2488 100644
--- a/arch/mips/alchemy/Kconfig
+++ b/arch/mips/alchemy/Kconfig
@@ -6,13 +6,6 @@
 config ALCHEMY_GPIOINT_AU1300
 	bool
 
-# select this in your board config if you don't want to use the gpio
-# namespace as documented in the manuals.  In this case however you need
-# to create the necessary gpio_* functions in your board code/headers!
-# see arch/mips/include/asm/mach-au1x00/gpio.h   for more information.
-config ALCHEMY_GPIO_INDIRECT
-	def_bool n
-
 choice
 	prompt "Machine type"
 	depends on MIPS_ALCHEMY
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index acf9a2a..79efe4c 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -34,6 +34,7 @@
 #include <asm/idle.h>
 #include <asm/reboot.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <prom.h>
 
 const char *get_system_type(void)
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 1e3b102..85bb756 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -32,6 +32,7 @@
 #include <asm/bootinfo.h>
 #include <asm/reboot.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <asm/mach-au1x00/au1xxx_eth.h>
 #include <prom.h>
 
diff --git a/arch/mips/alchemy/common/Makefile b/arch/mips/alchemy/common/Makefile
index f64744f..23800b8 100644
--- a/arch/mips/alchemy/common/Makefile
+++ b/arch/mips/alchemy/common/Makefile
@@ -5,10 +5,5 @@
 # Makefile for the Alchemy Au1xx0 CPUs, generic files.
 #
 
-obj-y += prom.o time.o clock.o platform.o power.o \
+obj-y += prom.o time.o clock.o platform.o power.o gpiolib.o \
 	 setup.o sleeper.o dma.o dbdma.o vss.o irq.o usb.o
-
-# optional gpiolib support
-ifeq ($(CONFIG_ALCHEMY_GPIO_INDIRECT),)
- obj-$(CONFIG_GPIOLIB) += gpiolib.o
-endif
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
index 6e46abe..bd34f40 100644
--- a/arch/mips/alchemy/common/clock.c
+++ b/arch/mips/alchemy/common/clock.c
@@ -35,6 +35,7 @@
 
 #include <linux/init.h>
 #include <linux/io.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/slab.h>
@@ -389,12 +390,11 @@
 	return div1;
 }
 
-static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate,
-					unsigned long *best_parent_rate,
-					struct clk_hw **best_parent_clk,
-					int scale, int maxdiv)
+static int alchemy_clk_fgcs_detr(struct clk_hw *hw,
+				 struct clk_rate_request *req,
+				 int scale, int maxdiv)
 {
-	struct clk *pc, *bpc, *free;
+	struct clk_hw *pc, *bpc, *free;
 	long tdv, tpr, pr, nr, br, bpr, diff, lastdiff;
 	int j;
 
@@ -408,7 +408,7 @@
 	 * the one that gets closest to but not over the requested rate.
 	 */
 	for (j = 0; j < 7; j++) {
-		pc = clk_get_parent_by_index(hw->clk, j);
+		pc = clk_hw_get_parent_by_index(hw, j);
 		if (!pc)
 			break;
 
@@ -416,20 +416,20 @@
 		 * XXX: we would actually want clk_has_active_children()
 		 * but this is a good-enough approximation for now.
 		 */
-		if (!__clk_is_prepared(pc)) {
+		if (!clk_hw_is_prepared(pc)) {
 			if (!free)
 				free = pc;
 		}
 
-		pr = clk_get_rate(pc);
-		if (pr < rate)
+		pr = clk_hw_get_rate(pc);
+		if (pr < req->rate)
 			continue;
 
 		/* what can hardware actually provide */
-		tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL);
+		tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL);
 		nr = pr / tdv;
-		diff = rate - nr;
-		if (nr > rate)
+		diff = req->rate - nr;
+		if (nr > req->rate)
 			continue;
 
 		if (diff < lastdiff) {
@@ -448,15 +448,16 @@
 	 */
 	if (lastdiff && free) {
 		for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) {
-			tpr = rate * j;
+			tpr = req->rate * j;
 			if (tpr < 0)
 				break;
-			pr = clk_round_rate(free, tpr);
+			pr = clk_hw_round_rate(free, tpr);
 
-			tdv = alchemy_calc_div(rate, pr, scale, maxdiv, NULL);
+			tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv,
+					       NULL);
 			nr = pr / tdv;
-			diff = rate - nr;
-			if (nr > rate)
+			diff = req->rate - nr;
+			if (nr > req->rate)
 				continue;
 			if (diff < lastdiff) {
 				lastdiff = diff;
@@ -469,9 +470,14 @@
 		}
 	}
 
-	*best_parent_rate = bpr;
-	*best_parent_clk = __clk_get_hw(bpc);
-	return br;
+	if (br < 0)
+		return br;
+
+	req->best_parent_rate = bpr;
+	req->best_parent_hw = bpc;
+	req->rate = br;
+
+	return 0;
 }
 
 static int alchemy_clk_fgv1_en(struct clk_hw *hw)
@@ -562,14 +568,10 @@
 	return parent_rate / v;
 }
 
-static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate,
-					unsigned long min_rate,
-					unsigned long max_rate,
-					unsigned long *best_parent_rate,
-					struct clk_hw **best_parent_clk)
+static int alchemy_clk_fgv1_detr(struct clk_hw *hw,
+				 struct clk_rate_request *req)
 {
-	return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate,
-				     best_parent_clk, 2, 512);
+	return alchemy_clk_fgcs_detr(hw, req, 2, 512);
 }
 
 /* Au1000, Au1100, Au15x0, Au12x0 */
@@ -696,11 +698,8 @@
 	return t;
 }
 
-static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate,
-					unsigned long min_rate,
-					unsigned long max_rate,
-					unsigned long *best_parent_rate,
-					struct clk_hw **best_parent_clk)
+static int alchemy_clk_fgv2_detr(struct clk_hw *hw,
+				 struct clk_rate_request *req)
 {
 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
 	int scale, maxdiv;
@@ -713,8 +712,7 @@
 		maxdiv = 512;
 	}
 
-	return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate,
-				     best_parent_clk, scale, maxdiv);
+	return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv);
 }
 
 /* Au1300 larger input mux, no separate disable bit, flexible divider */
@@ -917,17 +915,13 @@
 	return 0;
 }
 
-static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate,
-					unsigned long min_rate,
-					unsigned long max_rate,
-					unsigned long *best_parent_rate,
-					struct clk_hw **best_parent_clk)
+static int alchemy_clk_csrc_detr(struct clk_hw *hw,
+				 struct clk_rate_request *req)
 {
 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
 	int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */
 
-	return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate,
-				     best_parent_clk, scale, 4);
+	return alchemy_clk_fgcs_detr(hw, req, scale, 4);
 }
 
 static struct clk_ops alchemy_clkops_csrc = {
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index 6cb60ab..4c496c5 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -491,7 +491,7 @@
 	default:
 		ret = -EINVAL;
 	}
-	__irq_set_chip_handler_name_locked(d->irq, chip, handler, name);
+	irq_set_chip_handler_name_locked(d, chip, handler, name);
 
 	wmb();
 
@@ -703,7 +703,7 @@
 		return -EINVAL;
 	}
 
-	__irq_set_chip_handler_name_locked(d->irq, &au1300_gpic, hdl, name);
+	irq_set_chip_handler_name_locked(d, &au1300_gpic, hdl, name);
 
 	au1300_gpic_chgcfg(d->irq - ALCHEMY_GPIC_INT_BASE, GPIC_CFG_IC_MASK, s);
 
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index 50e17e1..f99d3ec 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -69,11 +69,6 @@
 	return 0;
 }
 
-static void au1x_rtcmatch2_set_mode(enum clock_event_mode mode,
-				    struct clock_event_device *cd)
-{
-}
-
 static irqreturn_t au1x_rtcmatch2_irq(int irq, void *dev_id)
 {
 	struct clock_event_device *cd = dev_id;
@@ -86,7 +81,6 @@
 	.features	= CLOCK_EVT_FEAT_ONESHOT,
 	.rating		= 1500,
 	.set_next_event = au1x_rtcmatch2_set_next_event,
-	.set_mode	= au1x_rtcmatch2_set_mode,
 	.cpumask	= cpu_all_mask,
 };
 
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
index c98c9ea..324ad72 100644
--- a/arch/mips/alchemy/devboards/bcsr.c
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -8,6 +8,7 @@
  */
 
 #include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/irq.h>
@@ -88,10 +89,11 @@
 static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
 {
 	unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
+	struct irq_chip *chip = irq_desc_get_chip(d);
 
-	disable_irq_nosync(irq);
+	chained_irq_enter(chip, d);
 	generic_handle_irq(bcsr_csc_base + __ffs(bisr));
-	enable_irq(irq);
+	chained_irq_exit(chip, d);
 }
 
 static void bcsr_irq_mask(struct irq_data *d)
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index 001102e..bdeed9d 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -33,6 +33,7 @@
 #include <linux/spi/spi_gpio.h>
 #include <linux/spi/ads7846.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <asm/mach-au1x00/au1000_dma.h>
 #include <asm/mach-au1x00/au1100_mmc.h>
 #include <asm/mach-db1x00/bcsr.h>
diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
index 1c64fdb..b580770 100644
--- a/arch/mips/alchemy/devboards/db1300.c
+++ b/arch/mips/alchemy/devboards/db1300.c
@@ -24,6 +24,7 @@
 #include <linux/wm97xx.h>
 
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1300.h>
 #include <asm/mach-au1x00/au1100_mmc.h>
 #include <asm/mach-au1x00/au1200fb.h>
 #include <asm/mach-au1x00/au1xxx_dbdma.h>
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index 0fd5177..5740bcf 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -20,6 +20,7 @@
 #include <linux/spi/flash.h>
 #include <asm/bootinfo.h>
 #include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <asm/mach-au1x00/au1xxx_eth.h>
 #include <asm/mach-au1x00/au1xxx_dbdma.h>
 #include <asm/mach-au1x00/au1xxx_psc.h>
diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
index bfeb8f3..93024dc 100644
--- a/arch/mips/alchemy/devboards/pm.c
+++ b/arch/mips/alchemy/devboards/pm.c
@@ -9,7 +9,7 @@
 #include <linux/suspend.h>
 #include <linux/sysfs.h>
 #include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-au1x00/gpio.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
 #include <asm/mach-db1x00/bcsr.h>
 
 /*
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c
index d8dbd8f..f493045 100644
--- a/arch/mips/ar7/gpio.c
+++ b/arch/mips/ar7/gpio.c
@@ -21,7 +21,10 @@
 #include <linux/module.h>
 #include <linux/gpio.h>
 
-#include <asm/mach-ar7/gpio.h>
+#include <asm/mach-ar7/ar7.h>
+
+#define AR7_GPIO_MAX 32
+#define TITAN_GPIO_MAX 51
 
 struct ar7_gpio_chip {
 	void __iomem		*regs;
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index be9ff16..58fca9a 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -39,7 +39,6 @@
 
 #include <asm/addrspace.h>
 #include <asm/mach-ar7/ar7.h>
-#include <asm/mach-ar7/gpio.h>
 #include <asm/mach-ar7/prom.h>
 
 /*****************************************************************************
@@ -679,7 +678,8 @@
 	}
 
 	if (ar7_has_high_cpmac()) {
-		res = fixed_phy_add(PHY_POLL, cpmac_high.id, &fixed_phy_status);
+		res = fixed_phy_add(PHY_POLL, cpmac_high.id,
+				    &fixed_phy_status, -1);
 		if (!res) {
 			cpmac_get_mac(1, cpmac_high_data.dev_addr);
 
@@ -692,7 +692,7 @@
 	} else
 		cpmac_low_data.phy_mask = 0xffffffff;
 
-	res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status);
+	res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status, -1);
 	if (!res) {
 		cpmac_get_mac(0, cpmac_low_data.dev_addr);
 		res = platform_device_register(&cpmac_low);
diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c
index 820b7a3..7bb9a67 100644
--- a/arch/mips/ar7/setup.c
+++ b/arch/mips/ar7/setup.c
@@ -23,7 +23,6 @@
 #include <asm/reboot.h>
 #include <asm/mach-ar7/ar7.h>
 #include <asm/mach-ar7/prom.h>
-#include <asm/mach-ar7/gpio.h>
 
 static void ar7_machine_restart(char *command)
 {
diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile
index 5c9ff69..fcc382c 100644
--- a/arch/mips/ath79/Makefile
+++ b/arch/mips/ath79/Makefile
@@ -8,7 +8,7 @@
 # under the terms of the GNU General Public License version 2 as published
 # by the Free Software Foundation.
 
-obj-y	:= prom.o setup.o irq.o common.o clock.o gpio.o
+obj-y	:= prom.o setup.o irq.o common.o clock.o
 
 obj-$(CONFIG_EARLY_PRINTK)		+= early_printk.o
 obj-$(CONFIG_PCI)			+= pci.o
diff --git a/arch/mips/ath79/common.h b/arch/mips/ath79/common.h
index e5ea712..ca7cc19 100644
--- a/arch/mips/ath79/common.h
+++ b/arch/mips/ath79/common.h
@@ -25,9 +25,6 @@
 void ath79_ddr_ctrl_init(void);
 void ath79_ddr_wb_flush(unsigned int reg);
 
-void ath79_gpio_function_enable(u32 mask);
-void ath79_gpio_function_disable(u32 mask);
-void ath79_gpio_function_setup(u32 set, u32 clear);
 void ath79_gpio_init(void);
 
 #endif /* __ATH79_COMMON_H */
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
deleted file mode 100644
index f59ccb2..0000000
--- a/arch/mips/ath79/gpio.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- *  Atheros AR71XX/AR724X/AR913X GPIO API support
- *
- *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
- *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/gpio.h>
-#include <linux/platform_data/gpio-ath79.h>
-#include <linux/of_device.h>
-
-#include <asm/mach-ath79/ar71xx_regs.h>
-#include <asm/mach-ath79/ath79.h>
-#include "common.h"
-
-static void __iomem *ath79_gpio_base;
-static u32 ath79_gpio_count;
-static DEFINE_SPINLOCK(ath79_gpio_lock);
-
-static void __ath79_gpio_set_value(unsigned gpio, int value)
-{
-	void __iomem *base = ath79_gpio_base;
-
-	if (value)
-		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_SET);
-	else
-		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_CLEAR);
-}
-
-static int __ath79_gpio_get_value(unsigned gpio)
-{
-	return (__raw_readl(ath79_gpio_base + AR71XX_GPIO_REG_IN) >> gpio) & 1;
-}
-
-static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned offset)
-{
-	return __ath79_gpio_get_value(offset);
-}
-
-static void ath79_gpio_set_value(struct gpio_chip *chip,
-				  unsigned offset, int value)
-{
-	__ath79_gpio_set_value(offset, value);
-}
-
-static int ath79_gpio_direction_input(struct gpio_chip *chip,
-				       unsigned offset)
-{
-	void __iomem *base = ath79_gpio_base;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
-		     base + AR71XX_GPIO_REG_OE);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-
-	return 0;
-}
-
-static int ath79_gpio_direction_output(struct gpio_chip *chip,
-					unsigned offset, int value)
-{
-	void __iomem *base = ath79_gpio_base;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	if (value)
-		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
-	else
-		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
-
-	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
-		     base + AR71XX_GPIO_REG_OE);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-
-	return 0;
-}
-
-static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-	void __iomem *base = ath79_gpio_base;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
-		     base + AR71XX_GPIO_REG_OE);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-
-	return 0;
-}
-
-static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
-					int value)
-{
-	void __iomem *base = ath79_gpio_base;
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	if (value)
-		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
-	else
-		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
-
-	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
-		     base + AR71XX_GPIO_REG_OE);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-
-	return 0;
-}
-
-static struct gpio_chip ath79_gpio_chip = {
-	.label			= "ath79",
-	.get			= ath79_gpio_get_value,
-	.set			= ath79_gpio_set_value,
-	.direction_input	= ath79_gpio_direction_input,
-	.direction_output	= ath79_gpio_direction_output,
-	.base			= 0,
-};
-
-static void __iomem *ath79_gpio_get_function_reg(void)
-{
-	u32 reg = 0;
-
-	if (soc_is_ar71xx() ||
-	    soc_is_ar724x() ||
-	    soc_is_ar913x() ||
-	    soc_is_ar933x())
-		reg = AR71XX_GPIO_REG_FUNC;
-	else if (soc_is_ar934x())
-		reg = AR934X_GPIO_REG_FUNC;
-	else
-		BUG();
-
-	return ath79_gpio_base + reg;
-}
-
-void ath79_gpio_function_setup(u32 set, u32 clear)
-{
-	void __iomem *reg = ath79_gpio_get_function_reg();
-	unsigned long flags;
-
-	spin_lock_irqsave(&ath79_gpio_lock, flags);
-
-	__raw_writel((__raw_readl(reg) & ~clear) | set, reg);
-	/* flush write */
-	__raw_readl(reg);
-
-	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
-}
-
-void ath79_gpio_function_enable(u32 mask)
-{
-	ath79_gpio_function_setup(mask, 0);
-}
-
-void ath79_gpio_function_disable(u32 mask)
-{
-	ath79_gpio_function_setup(0, mask);
-}
-
-static const struct of_device_id ath79_gpio_of_match[] = {
-	{ .compatible = "qca,ar7100-gpio" },
-	{ .compatible = "qca,ar9340-gpio" },
-	{},
-};
-
-static int ath79_gpio_probe(struct platform_device *pdev)
-{
-	struct ath79_gpio_platform_data *pdata = pdev->dev.platform_data;
-	struct device_node *np = pdev->dev.of_node;
-	struct resource *res;
-	bool oe_inverted;
-	int err;
-
-	if (np) {
-		err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
-		if (err) {
-			dev_err(&pdev->dev, "ngpios property is not valid\n");
-			return err;
-		}
-		if (ath79_gpio_count >= 32) {
-			dev_err(&pdev->dev, "ngpios must be less than 32\n");
-			return -EINVAL;
-		}
-		oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio");
-	} else if (pdata) {
-		ath79_gpio_count = pdata->ngpios;
-		oe_inverted = pdata->oe_inverted;
-	} else {
-		dev_err(&pdev->dev, "No DT node or platform data found\n");
-		return -EINVAL;
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	ath79_gpio_base = devm_ioremap_nocache(
-		&pdev->dev, res->start, resource_size(res));
-	if (!ath79_gpio_base)
-		return -ENOMEM;
-
-	ath79_gpio_chip.dev = &pdev->dev;
-	ath79_gpio_chip.ngpio = ath79_gpio_count;
-	if (oe_inverted) {
-		ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
-		ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
-	}
-
-	err = gpiochip_add(&ath79_gpio_chip);
-	if (err) {
-		dev_err(&pdev->dev,
-			"cannot add AR71xx GPIO chip, error=%d", err);
-		return err;
-	}
-
-	return 0;
-}
-
-static struct platform_driver ath79_gpio_driver = {
-	.driver = {
-		.name = "ath79-gpio",
-		.of_match_table	= ath79_gpio_of_match,
-	},
-	.probe = ath79_gpio_probe,
-};
-
-module_platform_driver(ath79_gpio_driver);
-
-int gpio_get_value(unsigned gpio)
-{
-	if (gpio < ath79_gpio_count)
-		return __ath79_gpio_get_value(gpio);
-
-	return __gpio_get_value(gpio);
-}
-EXPORT_SYMBOL(gpio_get_value);
-
-void gpio_set_value(unsigned gpio, int value)
-{
-	if (gpio < ath79_gpio_count)
-		__ath79_gpio_set_value(gpio, value);
-	else
-		__gpio_set_value(gpio, value);
-}
-EXPORT_SYMBOL(gpio_set_value);
-
-int gpio_to_irq(unsigned gpio)
-{
-	/* FIXME */
-	return -EINVAL;
-}
-EXPORT_SYMBOL(gpio_to_irq);
-
-int irq_to_gpio(unsigned irq)
-{
-	/* FIXME */
-	return -EINVAL;
-}
-EXPORT_SYMBOL(irq_to_gpio);
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index afb0096..807132b 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -17,7 +17,6 @@
 #include <linux/interrupt.h>
 #include <linux/irqchip.h>
 #include <linux/of_irq.h>
-#include "../../../drivers/irqchip/irqchip.h"
 
 #include <asm/irq_cpu.h>
 #include <asm/mipsregs.h>
@@ -124,8 +123,6 @@
 {
 	u32 status;
 
-	disable_irq_nosync(irq);
-
 	status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS);
 
 	if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) {
@@ -137,8 +134,6 @@
 	} else {
 		spurious_interrupt();
 	}
-
-	enable_irq(irq);
 }
 
 static void ar934x_ip2_irq_init(void)
@@ -157,14 +152,12 @@
 {
 	u32 status;
 
-	disable_irq_nosync(irq);
-
 	status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
 	status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL;
 
 	if (status == 0) {
 		spurious_interrupt();
-		goto enable;
+		return;
 	}
 
 	if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) {
@@ -176,17 +169,12 @@
 		/* TODO: flush DDR? */
 		generic_handle_irq(ATH79_IP2_IRQ(1));
 	}
-
-enable:
-	enable_irq(irq);
 }
 
 static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc)
 {
 	u32 status;
 
-	disable_irq_nosync(irq);
-
 	status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
 	status &= QCA955X_EXT_INT_PCIE_RC2_ALL |
 		  QCA955X_EXT_INT_USB1 |
@@ -194,7 +182,7 @@
 
 	if (status == 0) {
 		spurious_interrupt();
-		goto enable;
+		return;
 	}
 
 	if (status & QCA955X_EXT_INT_USB1) {
@@ -211,9 +199,6 @@
 		/* TODO: flush DDR? */
 		generic_handle_irq(ATH79_IP3_IRQ(2));
 	}
-
-enable:
-	enable_irq(irq);
 }
 
 static void qca955x_irq_init(void)
diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c
index 08a4abf..52caa75 100644
--- a/arch/mips/bcm47xx/buttons.c
+++ b/arch/mips/bcm47xx/buttons.c
@@ -396,10 +396,9 @@
 {
 	size_t size = nbuttons * sizeof(*buttons);
 
-	bcm47xx_button_pdata.buttons = kmalloc(size, GFP_KERNEL);
+	bcm47xx_button_pdata.buttons = kmemdup(buttons, size, GFP_KERNEL);
 	if (!bcm47xx_button_pdata.buttons)
 		return -ENOMEM;
-	memcpy(bcm47xx_button_pdata.buttons, buttons, size);
 	bcm47xx_button_pdata.nbuttons = nbuttons;
 
 	return 0;
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 98c075f..17503a0 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -263,7 +263,7 @@
 	bcm47xx_leds_register();
 	bcm47xx_workarounds();
 
-	fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status);
+	fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status, -1);
 	return 0;
 }
 device_initcall(bcm47xx_register_bus_complete);
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c
index e3e808a..1a47ec2 100644
--- a/arch/mips/bcm63xx/irq.c
+++ b/arch/mips/bcm63xx/irq.c
@@ -60,7 +60,7 @@
 	if (m)
 		enable &= cpumask_test_cpu(cpu, m);
 	else if (irqd_affinity_was_set(d))
-		enable &= cpumask_test_cpu(cpu, d->affinity);
+		enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
 #endif
 	return enable;
 }
@@ -365,9 +365,9 @@
 
 	irqd_set_trigger_type(d, flow_type);
 	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return IRQ_SET_MASK_OK_NOCOPY;
 }
diff --git a/arch/mips/bmips/irq.c b/arch/mips/bmips/irq.c
index 14552e5..e7fc6f934 100644
--- a/arch/mips/bmips/irq.c
+++ b/arch/mips/bmips/irq.c
@@ -34,5 +34,5 @@
 	irqchip_init();
 }
 
-OF_DECLARE_2(irqchip, mips_cpu_intc, "mti,cpu-interrupt-controller",
+IRQCHIP_DECLARE(mips_cpu_intc, "mti,cpu-interrupt-controller",
 	     mips_cpu_irq_of_init);
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index dc91bde..d5bdee1 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -78,7 +78,7 @@
 
 vmlinuzobjs-y += $(obj)/piggy.o
 
-quiet_cmd_zld = LD	$@
+quiet_cmd_zld = LD      $@
       cmd_zld = $(LD) $(LDFLAGS) -Ttext $(VMLINUZ_LOAD_ADDRESS) -T $< $(vmlinuzobjs-y) -o $@
 quiet_cmd_strip = STRIP	  $@
       cmd_strip = $(STRIP) -s $@
diff --git a/arch/mips/boot/dts/netlogic/xlp_evp.dts b/arch/mips/boot/dts/netlogic/xlp_evp.dts
index 89ad048..ec16ec2 100644
--- a/arch/mips/boot/dts/netlogic/xlp_evp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_evp.dts
@@ -110,6 +110,18 @@
 				read-only;
 			};
 		};
+
+		gpio: xlp_gpio@34100 {
+			compatible = "netlogic,xlp832-gpio";
+			reg = <0 0x34100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/boot/dts/netlogic/xlp_fvp.dts b/arch/mips/boot/dts/netlogic/xlp_fvp.dts
index 63e62b7..4bcebe6 100644
--- a/arch/mips/boot/dts/netlogic/xlp_fvp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_fvp.dts
@@ -110,6 +110,18 @@
 				read-only;
 			};
 		};
+
+		gpio: xlp_gpio@34100 {
+			compatible = "netlogic,xlp208-gpio";
+			reg = <0 0x34100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/boot/dts/netlogic/xlp_gvp.dts b/arch/mips/boot/dts/netlogic/xlp_gvp.dts
index bb4ecd1..b3ccb82 100644
--- a/arch/mips/boot/dts/netlogic/xlp_gvp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_gvp.dts
@@ -69,6 +69,17 @@
 			};
 		};
 
+		gpio: xlp_gpio@114100 {
+			compatible = "netlogic,xlp980-gpio";
+			reg = <0 0x114100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/boot/dts/netlogic/xlp_rvp.dts b/arch/mips/boot/dts/netlogic/xlp_rvp.dts
index 7188aed..3783639a 100644
--- a/arch/mips/boot/dts/netlogic/xlp_rvp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_rvp.dts
@@ -69,6 +69,17 @@
 			};
 		};
 
+		gpio: xlp_gpio@114100 {
+			compatible = "netlogic,xlp532-gpio";
+			reg = <0 0x114100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/boot/dts/netlogic/xlp_svp.dts b/arch/mips/boot/dts/netlogic/xlp_svp.dts
index 1ebd00ed..44d6640 100644
--- a/arch/mips/boot/dts/netlogic/xlp_svp.dts
+++ b/arch/mips/boot/dts/netlogic/xlp_svp.dts
@@ -110,6 +110,18 @@
 				read-only;
 			};
 		};
+
+		gpio: xlp_gpio@34100 {
+			compatible = "netlogic,xlp316-gpio";
+			reg = <0 0x34100 0x1000>;
+			#gpio-cells = <2>;
+			gpio-controller;
+
+			#interrupt-cells = <2>;
+			interrupt-parent = <&pic>;
+			interrupts = <39>;
+			interrupt-controller;
+		};
 	};
 
 	chosen {
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index 4759cff..fb7734e 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -115,6 +115,14 @@
 				interrupt-controller;
 				#interrupt-cells = <1>;
 			};
+
+			rst: reset-controller@1806001c {
+				compatible = "qca,ar9132-reset",
+						"qca,ar7100-reset";
+				reg = <0x1806001c 0x4>;
+
+				#reset-cells = <1>;
+			};
 		};
 
 		spi@1f000000 {
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index 9eb0fee..36e30d6 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -195,6 +195,12 @@
 			return 8;
 		else
 			return -1;
+	case CVMX_BOARD_TYPE_KONTRON_S1901:
+		if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
+			return 1;
+		else
+			return -1;
+
 	}
 
 	/* Some unknown board. Somebody forgot to update this function... */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
index 453d7f6..b45b297 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
@@ -95,9 +95,9 @@
 	uint8_t *data_address;
 	uint8_t *end_of_data;
 
-	cvmx_dprintf("Packet Length:   %u\n", work->len);
-	cvmx_dprintf("	  Input Port:  %u\n", work->ipprt);
-	cvmx_dprintf("	  QoS:	       %u\n", work->qos);
+	cvmx_dprintf("Packet Length:   %u\n", work->word1.len);
+	cvmx_dprintf("	  Input Port:  %u\n", cvmx_wqe_get_port(work));
+	cvmx_dprintf("	  QoS:	       %u\n", cvmx_wqe_get_qos(work));
 	cvmx_dprintf("	  Buffers:     %u\n", work->word2.s.bufs);
 
 	if (work->word2.s.bufs == 0) {
@@ -127,7 +127,7 @@
 		}
 	} else
 		buffer_ptr = work->packet_ptr;
-	remaining_bytes = work->len;
+	remaining_bytes = work->word1.len;
 
 	while (remaining_bytes) {
 		start_of_buffer =
@@ -382,6 +382,10 @@
 		return port + 32;
 	case 3:
 		return port + 36;
+	case 4:
+		return port + 40;
+	case 5:
+		return port + 44;
 	}
 	return -1;
 }
@@ -404,6 +408,10 @@
 		return 2;
 	else if (ipd_port < 40)
 		return 3;
+	else if (ipd_port < 44)
+		return 4;
+	else if (ipd_port < 48)
+		return 5;
 	else
 		cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD "
 			     "port number\n");
@@ -428,6 +436,10 @@
 		return ipd_port & 3;
 	else if (ipd_port < 40)
 		return ipd_port & 3;
+	else if (ipd_port < 44)
+		return ipd_port & 3;
+	else if (ipd_port < 48)
+		return ipd_port & 3;
 	else
 		cvmx_dprintf("cvmx_helper_get_interface_index_num: "
 			     "Illegal IPD port number\n");
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
index 7653b7e..a56ee59 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -124,6 +124,13 @@
 	union cvmx_gmxx_tx_int_en gmx_tx_int_en;
 	union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
 
+	/* Setup PKND */
+	if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
+		gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
+		gmx_cfg.s.pknd = cvmx_helper_get_ipd_port(interface, 0);
+		cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
+	}
+
 	/* (1) Interface has already been enabled. */
 
 	/* (2) Disable GMX. */
@@ -151,7 +158,12 @@
 	/* (4)c Aply reset sequence */
 	xauiCtl.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
 	xauiCtl.s.lo_pwr = 0;
-	xauiCtl.s.reset = 1;
+
+	/* Issuing a reset here seems to hang some CN68XX chips. */
+	if (!OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X) &&
+	    !OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X))
+		xauiCtl.s.reset = 1;
+
 	cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), xauiCtl.u64);
 
 	/* Wait for PCS to come out of reset */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 7e5cf7a..376701f 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -83,6 +83,8 @@
  */
 int cvmx_helper_get_number_of_interfaces(void)
 {
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return 9;
 	if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
 		return 4;
 	else
@@ -656,6 +658,21 @@
 	fau_to.s.tout_val = 0xfff;
 	fau_to.s.tout_enb = 0;
 	cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		union cvmx_pko_reg_min_pkt min_pkt;
+
+		min_pkt.u64 = 0;
+		min_pkt.s.size1 = 59;
+		min_pkt.s.size2 = 59;
+		min_pkt.s.size3 = 59;
+		min_pkt.s.size4 = 59;
+		min_pkt.s.size5 = 59;
+		min_pkt.s.size6 = 59;
+		min_pkt.s.size7 = 59;
+		cvmx_write_csr(CVMX_PKO_REG_MIN_PKT, min_pkt.u64);
+	}
+
 	return 0;
 }
 
diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c
index 008b881..87be167 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-pko.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -39,6 +39,143 @@
  * Internal state of packet output
  */
 
+static int __cvmx_pko_int(int interface, int index)
+{
+	switch (interface) {
+	case 0:
+		return index;
+	case 1:
+		return 4;
+	case 2:
+		return index + 0x08;
+	case 3:
+		return index + 0x0c;
+	case 4:
+		return index + 0x10;
+	case 5:
+		return 0x1c;
+	case 6:
+		return 0x1d;
+	case 7:
+		return 0x1e;
+	case 8:
+		return 0x1f;
+	default:
+		return -1;
+	}
+}
+
+static void __cvmx_pko_iport_config(int pko_port)
+{
+	int queue;
+	const int num_queues = 1;
+	const int base_queue = pko_port;
+	const int static_priority_end = 1;
+	const int static_priority_base = 1;
+
+	for (queue = 0; queue < num_queues; queue++) {
+		union cvmx_pko_mem_iqueue_ptrs config;
+		cvmx_cmd_queue_result_t cmd_res;
+		uint64_t *buf_ptr;
+
+		config.u64		= 0;
+		config.s.index		= queue;
+		config.s.qid		= base_queue + queue;
+		config.s.ipid		= pko_port;
+		config.s.tail		= (queue == (num_queues - 1));
+		config.s.s_tail		= (queue == static_priority_end);
+		config.s.static_p	= (static_priority_base >= 0);
+		config.s.static_q	= (queue <= static_priority_end);
+		config.s.qos_mask	= 0xff;
+
+		cmd_res = cvmx_cmd_queue_initialize(
+				CVMX_CMD_QUEUE_PKO(base_queue + queue),
+				CVMX_PKO_MAX_QUEUE_DEPTH,
+				CVMX_FPA_OUTPUT_BUFFER_POOL,
+				(CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE -
+				 CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8));
+
+		WARN(cmd_res,
+		     "%s: cmd_res=%d pko_port=%d base_queue=%d num_queues=%d queue=%d\n",
+			__func__, (int)cmd_res, pko_port, base_queue,
+			num_queues, queue);
+
+		buf_ptr = (uint64_t *)cvmx_cmd_queue_buffer(
+				CVMX_CMD_QUEUE_PKO(base_queue + queue));
+		config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr) >> 7;
+		CVMX_SYNCWS;
+		cvmx_write_csr(CVMX_PKO_MEM_IQUEUE_PTRS, config.u64);
+	}
+}
+
+static void __cvmx_pko_queue_alloc_o68(void)
+{
+	int port;
+
+	for (port = 0; port < 48; port++)
+		__cvmx_pko_iport_config(port);
+}
+
+static void __cvmx_pko_port_map_o68(void)
+{
+	int port;
+	int interface, index;
+	cvmx_helper_interface_mode_t mode;
+	union cvmx_pko_mem_iport_ptrs config;
+
+	/*
+	 * Initialize every iport with the invalid eid.
+	 */
+	config.u64 = 0;
+	config.s.eid = 31; /* Invalid */
+	for (port = 0; port < 128; port++) {
+		config.s.ipid = port;
+		cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+	}
+
+	/*
+	 * Set up PKO_MEM_IPORT_PTRS
+	 */
+	for (port = 0; port < 48; port++) {
+		interface = cvmx_helper_get_interface_num(port);
+		index = cvmx_helper_get_interface_index_num(port);
+		mode = cvmx_helper_interface_get_mode(interface);
+		if (mode == CVMX_HELPER_INTERFACE_MODE_DISABLED)
+			continue;
+
+		config.s.ipid = port;
+		config.s.qos_mask = 0xff;
+		config.s.crc = 1;
+		config.s.min_pkt = 1;
+		config.s.intr = __cvmx_pko_int(interface, index);
+		config.s.eid = config.s.intr;
+		config.s.pipe = (mode == CVMX_HELPER_INTERFACE_MODE_LOOP) ?
+			index : port;
+		cvmx_write_csr(CVMX_PKO_MEM_IPORT_PTRS, config.u64);
+	}
+}
+
+static void __cvmx_pko_chip_init(void)
+{
+	int i;
+
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		__cvmx_pko_port_map_o68();
+		__cvmx_pko_queue_alloc_o68();
+		return;
+	}
+
+	/*
+	 * Initialize queues
+	 */
+	for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++) {
+		const uint64_t priority = 8;
+
+		cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
+				     &priority);
+	}
+}
+
 /**
  * Call before any other calls to initialize the packet
  * output system.  This does chip global config, and should only be
@@ -47,8 +184,6 @@
 
 void cvmx_pko_initialize_global(void)
 {
-	int i;
-	uint64_t priority = 8;
 	union cvmx_pko_reg_cmd_buf config;
 
 	/*
@@ -62,9 +197,10 @@
 
 	cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);
 
-	for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++)
-		cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
-				     &priority);
+	/*
+	 * Chip-specific setup.
+	 */
+	__cvmx_pko_chip_init();
 
 	/*
 	 * If we aren't using all of the queues optimize PKO's
@@ -212,6 +348,9 @@
 	int static_priority_base = -1;
 	int static_priority_end = -1;
 
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return CVMX_PKO_SUCCESS;
+
 	if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
 	    && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
 		cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index d8124a3..f26c3c6 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -225,13 +225,14 @@
 
 #ifdef CONFIG_SMP
 	int cpu;
-	int weight = cpumask_weight(data->affinity);
+	struct cpumask *mask = irq_data_get_affinity_mask(data);
+	int weight = cpumask_weight(mask);
 	struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
 
 	if (weight > 1) {
 		cpu = cd->current_cpu;
 		for (;;) {
-			cpu = cpumask_next(cpu, data->affinity);
+			cpu = cpumask_next(cpu, mask);
 			if (cpu >= nr_cpu_ids) {
 				cpu = -1;
 				continue;
@@ -240,7 +241,7 @@
 			}
 		}
 	} else if (weight == 1) {
-		cpu = cpumask_first(data->affinity);
+		cpu = cpumask_first(mask);
 	} else {
 		cpu = smp_processor_id();
 	}
@@ -662,6 +663,11 @@
 	irqd_set_trigger_type(data, t);
 	octeon_irq_gpio_setup(data);
 
+	if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH)
+		irq_set_handler_locked(data, handle_edge_irq);
+	else
+		irq_set_handler_locked(data, handle_level_irq);
+
 	return IRQ_SET_MASK_OK;
 }
 
@@ -696,32 +702,23 @@
 	cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
 }
 
-static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc)
-{
-	struct irq_data *data = irq_desc_get_irq_data(desc);
-
-	if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH)
-		handle_edge_irq(irq, desc);
-	else
-		handle_level_irq(irq, desc);
-}
-
 #ifdef CONFIG_SMP
 
 static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
 {
 	int cpu = smp_processor_id();
 	cpumask_t new_affinity;
+	struct cpumask *mask = irq_data_get_affinity_mask(data);
 
-	if (!cpumask_test_cpu(cpu, data->affinity))
+	if (!cpumask_test_cpu(cpu, mask))
 		return;
 
-	if (cpumask_weight(data->affinity) > 1) {
+	if (cpumask_weight(mask) > 1) {
 		/*
 		 * It has multi CPU affinity, just remove this CPU
 		 * from the affinity set.
 		 */
-		cpumask_copy(&new_affinity, data->affinity);
+		cpumask_copy(&new_affinity, mask);
 		cpumask_clear_cpu(cpu, &new_affinity);
 	} else {
 		/* Otherwise, put it on lowest numbered online CPU. */
@@ -1227,8 +1224,13 @@
 		octeon_irq_ciu_to_irq[line][bit] != 0)
 		return -EINVAL;
 
+	/*
+	 * Default to handle_level_irq. If the DT contains a different
+	 * trigger type, it will call the irq_set_type callback and
+	 * the handler gets updated.
+	 */
 	r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
-		octeon_irq_gpio_chip, octeon_irq_handle_trigger);
+				       octeon_irq_gpio_chip, handle_level_irq);
 	return r;
 }
 
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 1f85460..40ec4ca 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -16,6 +16,5 @@
 generic-y += segment.h
 generic-y += serial.h
 generic-y += trace_clock.h
-generic-y += ucontext.h
 generic-y += user.h
 generic-y += xor.h
diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h
index 7186bb5..37f8407 100644
--- a/arch/mips/include/asm/abi.h
+++ b/arch/mips/include/asm/abi.h
@@ -20,6 +20,10 @@
 				     struct pt_regs *regs, sigset_t *set);
 	const unsigned long	rt_signal_return_offset;
 	const unsigned long	restart;
+
+	unsigned	off_sc_fpregs;
+	unsigned	off_sc_fpc_csr;
+	unsigned	off_sc_used_math;
 };
 
 #endif /* _ASM_ABI_H */
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 76317a7..867f924 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -232,6 +232,30 @@
 	.set	pop
 	.endm
 
+	.macro	ld_b	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	ld.b	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
+	.macro	ld_h	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	ld.h	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
+	.macro	ld_w	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	ld.w	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
 	.macro	ld_d	wd, off, base
 	.set	push
 	.set	mips32r2
@@ -241,6 +265,30 @@
 	.set	pop
 	.endm
 
+	.macro	st_b	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	st.b	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
+	.macro	st_h	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	st.h	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
+	.macro	st_w	wd, off, base
+	.set	push
+	.set	mips32r2
+	.set	msa
+	st.w	$w\wd, \off(\base)
+	.set	pop
+	.endm
+
 	.macro	st_d	wd, off, base
 	.set	push
 	.set	mips32r2
@@ -290,7 +338,13 @@
 #ifdef CONFIG_CPU_MICROMIPS
 #define CFC_MSA_INSN		0x587e0056
 #define CTC_MSA_INSN		0x583e0816
+#define LDB_MSA_INSN		0x58000807
+#define LDH_MSA_INSN		0x58000817
+#define LDW_MSA_INSN		0x58000827
 #define LDD_MSA_INSN		0x58000837
+#define STB_MSA_INSN		0x5800080f
+#define STH_MSA_INSN		0x5800081f
+#define STW_MSA_INSN		0x5800082f
 #define STD_MSA_INSN		0x5800083f
 #define COPY_UW_MSA_INSN	0x58f00056
 #define COPY_UD_MSA_INSN	0x58f80056
@@ -299,7 +353,13 @@
 #else
 #define CFC_MSA_INSN		0x787e0059
 #define CTC_MSA_INSN		0x783e0819
+#define LDB_MSA_INSN		0x78000820
+#define LDH_MSA_INSN		0x78000821
+#define LDW_MSA_INSN		0x78000822
 #define LDD_MSA_INSN		0x78000823
+#define STB_MSA_INSN		0x78000824
+#define STH_MSA_INSN		0x78000825
+#define STW_MSA_INSN		0x78000826
 #define STD_MSA_INSN		0x78000827
 #define COPY_UW_MSA_INSN	0x78f00059
 #define COPY_UD_MSA_INSN	0x78f80059
@@ -329,6 +389,33 @@
 	.set	pop
 	.endm
 
+	.macro	ld_b	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	LDB_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
+	.macro	ld_h	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	LDH_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
+	.macro	ld_w	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	LDW_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
 	.macro	ld_d	wd, off, base
 	.set	push
 	.set	noat
@@ -338,6 +425,33 @@
 	.set	pop
 	.endm
 
+	.macro	st_b	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	STB_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
+	.macro	st_h	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	STH_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
+	.macro	st_w	wd, off, base
+	.set	push
+	.set	noat
+	SET_HARDFLOAT
+	addu	$1, \base, \off
+	.word	STW_MSA_INSN | (\wd << 6)
+	.set	pop
+	.endm
+
 	.macro	st_d	wd, off, base
 	.set	push
 	.set	noat
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 26d4363..4c42fd9 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -137,6 +137,10 @@
 ATOMIC_OPS(add, +=, addu)
 ATOMIC_OPS(sub, -=, subu)
 
+ATOMIC_OP(and, &=, and)
+ATOMIC_OP(or, |=, or)
+ATOMIC_OP(xor, ^=, xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -416,6 +420,9 @@
 
 ATOMIC64_OPS(add, +=, daddu)
 ATOMIC64_OPS(sub, -=, dsubu)
+ATOMIC64_OP(and, &=, and)
+ATOMIC64_OP(or, |=, or)
+ATOMIC64_OP(xor, ^=, xor)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index 7ecba84..752e0b8 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -133,12 +133,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
diff --git a/arch/mips/include/asm/cdmm.h b/arch/mips/include/asm/cdmm.h
index 16e22ce..bece206 100644
--- a/arch/mips/include/asm/cdmm.h
+++ b/arch/mips/include/asm/cdmm.h
@@ -53,7 +53,7 @@
  * mips_cdmm_phys_base() - Choose a physical base address for CDMM region.
  *
  * Picking a suitable physical address at which to map the CDMM region is
- * platform specific, so this weak function can be defined by platform code to
+ * platform specific, so this function can be defined by platform code to
  * pick a suitable value if none is configured by the bootloader.
  *
  * This address must be 32kB aligned, and the region occupies a maximum of 32kB
@@ -61,7 +61,7 @@
  *
  * Returns:	Physical base address for CDMM region, or 0 on failure.
  */
-phys_addr_t __weak mips_cdmm_phys_base(void);
+phys_addr_t mips_cdmm_phys_base(void);
 
 extern struct bus_type mips_cdmm_bustype;
 void __iomem *mips_cdmm_early_probe(unsigned int dev_type);
diff --git a/arch/mips/include/asm/cevt-r4k.h b/arch/mips/include/asm/cevt-r4k.h
index f0edf6f..2e13a03 100644
--- a/arch/mips/include/asm/cevt-r4k.h
+++ b/arch/mips/include/asm/cevt-r4k.h
@@ -21,7 +21,6 @@
 
 void mips_event_handler(struct clock_event_device *dev);
 int c0_compare_int_usable(void);
-void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *);
 irqreturn_t c0_compare_interrupt(int, void *);
 
 extern struct irqaction c0_compare_irqaction;
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index f25de77..9801ac9 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -411,4 +411,8 @@
 # define cpu_has_cdmm		(cpu_data[0].options & MIPS_CPU_CDMM)
 #endif
 
+#ifndef cpu_has_small_pages
+# define cpu_has_small_pages	(cpu_data[0].options & MIPS_CPU_SP)
+#endif
+
 #endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index d41e8e2..abee2bf 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -77,6 +77,10 @@
 	 */
 #endif
 
+#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R6
+	case CPU_I6400:
+#endif
+
 #ifdef CONFIG_SYS_HAS_CPU_R3000
 	case CPU_R2000:
 	case CPU_R3000:
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index e46e406..cd89e98 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -120,6 +120,7 @@
 #define PRID_IMP_PROAPTIV_MP	0xa300
 #define PRID_IMP_M5150		0xa700
 #define PRID_IMP_P5600		0xa800
+#define PRID_IMP_I6400		0xa900
 
 /*
  * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -307,6 +308,7 @@
 	CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
 	CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
 	CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K, CPU_M5150,
+	CPU_I6400,
 
 	/*
 	 * MIPS64 class processors
@@ -382,6 +384,7 @@
 #define MIPS_CPU_XPA		0x2000000000ull /* CPU supports Extended Physical Addressing */
 #define MIPS_CPU_CDMM		0x4000000000ull	/* CPU has Common Device Memory Map */
 #define MIPS_CPU_BP_GHIST	0x8000000000ull /* R12K+ Branch Prediction Global History */
+#define MIPS_CPU_SP		0x10000000000ull /* Small (1KB) page support */
 
 /*
  * CPU ASE encodings
diff --git a/arch/mips/include/asm/debug.h b/arch/mips/include/asm/debug.h
deleted file mode 100644
index 1fd5a2b..0000000
--- a/arch/mips/include/asm/debug.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Debug macros for run-time debugging.
- * Turned on/off with CONFIG_RUNTIME_DEBUG option.
- *
- * Copyright (C) 2001 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- *
- */
-
-#ifndef _ASM_DEBUG_H
-#define _ASM_DEBUG_H
-
-
-/*
- * run-time macros for catching spurious errors.  Eable CONFIG_RUNTIME_DEBUG in
- * kernel hacking config menu to use them.
- *
- * Use them as run-time debugging aid.  NEVER USE THEM AS ERROR HANDLING CODE!!!
- */
-
-#ifdef CONFIG_RUNTIME_DEBUG
-
-#include <linux/kernel.h>
-
-#define db_assert(x)  if (!(x)) { \
-	panic("assertion failed at %s:%d: %s", __FILE__, __LINE__, #x); }
-#define db_warn(x)  if (!(x)) { \
-	printk(KERN_WARNING "warning at %s:%d: %s", __FILE__, __LINE__, #x); }
-#define db_verify(x, y) db_assert(x y)
-#define db_verify_warn(x, y) db_warn(x y)
-#define db_run(x)  do { x; } while (0)
-
-#else
-
-#define db_assert(x)
-#define db_warn(x)
-#define db_verify(x, y) x
-#define db_verify_warn(x, y) x
-#define db_run(x)
-
-#endif
-
-#endif /* _ASM_DEBUG_H */
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index f19e890..53b2693 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -382,7 +382,9 @@
    instruction set this cpu supports.  This could be done in userspace,
    but it's not easy, and we've already done it here.  */
 
-#define ELF_HWCAP	(0)
+#define ELF_HWCAP	(elf_hwcap)
+extern unsigned int elf_hwcap;
+#include <asm/hwcap.h>
 
 /*
  * This yields a string that ld.so will use to load implementation
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 1b06251..9cbf383 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -164,25 +164,30 @@
 	return ret;
 }
 
-static inline void lose_fpu(int save)
+static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
 {
-	preempt_disable();
 	if (is_msa_enabled()) {
 		if (save) {
-			save_msa(current);
-			current->thread.fpu.fcr31 =
+			save_msa(tsk);
+			tsk->thread.fpu.fcr31 =
 					read_32bit_cp1_register(CP1_STATUS);
 		}
 		disable_msa();
-		clear_thread_flag(TIF_USEDMSA);
+		clear_tsk_thread_flag(tsk, TIF_USEDMSA);
 		__disable_fpu();
 	} else if (is_fpu_owner()) {
 		if (save)
-			_save_fp(current);
+			_save_fp(tsk);
 		__disable_fpu();
 	}
-	KSTK_STATUS(current) &= ~ST0_CU1;
-	clear_thread_flag(TIF_USEDFPU);
+	KSTK_STATUS(tsk) &= ~ST0_CU1;
+	clear_tsk_thread_flag(tsk, TIF_USEDFPU);
+}
+
+static inline void lose_fpu(int save)
+{
+	preempt_disable();
+	lose_fpu_inatomic(save, current);
 	preempt_enable();
 }
 
diff --git a/arch/mips/include/asm/gpio.h b/arch/mips/include/asm/gpio.h
deleted file mode 100644
index 06e46fa..0000000
--- a/arch/mips/include/asm/gpio.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_MIPS_GPIO_H
-#define __ASM_MIPS_GPIO_H
-
-#include <gpio.h>
-
-#endif /* __ASM_MIPS_GPIO_H */
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index f0db99f..15e0fec 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -49,7 +49,7 @@
 extern int cp0_perfcount_irq;
 extern int cp0_fdc_irq;
 
-extern int __weak get_c0_fdc_int(void);
+extern int get_c0_fdc_int(void);
 
 void arch_trigger_all_cpu_backtrace(bool);
 #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 608aa57..e776725 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -26,14 +26,29 @@
 #define NOP_INSN "nop"
 #endif
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:\t" NOP_INSN "\n\t"
 		"nop\n\t"
 		".pushsection __jump_table,  \"aw\"\n\t"
 		WORD_INSN " 1b, %l[l_yes], %0\n\t"
 		".popsection\n\t"
-		: :  "i" (key) : : l_yes);
+		: :  "i" (&((char *)key)[branch]) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\tj %l[l_yes]\n\t"
+		"nop\n\t"
+		".pushsection __jump_table,  \"aw\"\n\t"
+		WORD_INSN " 1b, %l[l_yes], %0\n\t"
+		".popsection\n\t"
+		: :  "i" (&((char *)key)[branch]) : : l_yes);
+
 	return false;
 l_yes:
 	return true;
diff --git a/arch/mips/include/asm/kdebug.h b/arch/mips/include/asm/kdebug.h
index cba22ab..8e3d08e 100644
--- a/arch/mips/include/asm/kdebug.h
+++ b/arch/mips/include/asm/kdebug.h
@@ -11,7 +11,9 @@
 	DIE_PAGE_FAULT,
 	DIE_BREAK,
 	DIE_SSTEPBP,
-	DIE_MSAFP
+	DIE_MSAFP,
+	DIE_UPROBE,
+	DIE_UPROBE_XOL,
 };
 
 #endif /* _ASM_MIPS_KDEBUG_H */
diff --git a/arch/mips/include/asm/linkage.h b/arch/mips/include/asm/linkage.h
index 2767dda..99651b0 100644
--- a/arch/mips/include/asm/linkage.h
+++ b/arch/mips/include/asm/linkage.h
@@ -5,7 +5,6 @@
 #include <asm/asm.h>
 #endif
 
-#define __weak __attribute__((weak))
 #define cond_syscall(x) asm(".weak\t" #x "\n" #x "\t=\tsys_ni_syscall")
 #define SYSCALL_ALIAS(alias, name)					\
 	asm ( #alias " = " #name "\n\t.globl " #alias)
diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h
index 6c62b0f..b02891f 100644
--- a/arch/mips/include/asm/maar.h
+++ b/arch/mips/include/asm/maar.h
@@ -26,7 +26,7 @@
  *
  * Return:	The number of MAAR pairs configured.
  */
-unsigned __weak platform_maar_init(unsigned num_pairs);
+unsigned platform_maar_init(unsigned num_pairs);
 
 /**
  * write_maar_pair() - write to a pair of MAARs
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
index a47ea0c..468cbd6 100644
--- a/arch/mips/include/asm/mach-ar7/ar7.h
+++ b/arch/mips/include/asm/mach-ar7/ar7.h
@@ -203,4 +203,8 @@
 int __init ar7_gpio_init(void);
 void __init ar7_init_clocks(void);
 
+/* Board specific GPIO functions */
+int ar7_gpio_enable(unsigned gpio);
+int ar7_gpio_disable(unsigned gpio);
+
 #endif /* __AR7_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/gpio.h b/arch/mips/include/asm/mach-ar7/gpio.h
deleted file mode 100644
index c177cd1..0000000
--- a/arch/mips/include/asm/mach-ar7/gpio.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (C) 2007-2009 Florian Fainelli <florian@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef __AR7_GPIO_H__
-#define __AR7_GPIO_H__
-
-#include <asm/mach-ar7/ar7.h>
-
-#define AR7_GPIO_MAX 32
-#define TITAN_GPIO_MAX	51
-#define NR_BUILTIN_GPIO TITAN_GPIO_MAX
-
-#define gpio_to_irq(gpio)	-1
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-
-#define gpio_cansleep __gpio_cansleep
-
-/* Board specific GPIO functions */
-int ar7_gpio_enable(unsigned gpio);
-int ar7_gpio_disable(unsigned gpio);
-
-#include <asm-generic/gpio.h>
-
-#endif
diff --git a/arch/mips/include/asm/mach-ath25/gpio.h b/arch/mips/include/asm/mach-ath25/gpio.h
deleted file mode 100644
index 713564b..0000000
--- a/arch/mips/include/asm/mach-ath25/gpio.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __ASM_MACH_ATH25_GPIO_H
-#define __ASM_MACH_ATH25_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-#define gpio_to_irq __gpio_to_irq
-
-static inline int irq_to_gpio(unsigned irq)
-{
-	return -EINVAL;
-}
-
-#endif	/* __ASM_MACH_ATH25_GPIO_H */
diff --git a/arch/mips/include/asm/mach-ath79/gpio.h b/arch/mips/include/asm/mach-ath79/gpio.h
deleted file mode 100644
index 60dcb62..0000000
--- a/arch/mips/include/asm/mach-ath79/gpio.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- *  Atheros AR71XX/AR724X/AR913X GPIO API definitions
- *
- *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
- *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- */
-
-#ifndef __ASM_MACH_ATH79_GPIO_H
-#define __ASM_MACH_ATH79_GPIO_H
-
-#define ARCH_NR_GPIOS	64
-#include <asm-generic/gpio.h>
-
-int gpio_to_irq(unsigned gpio);
-int irq_to_gpio(unsigned irq);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-
-#define gpio_cansleep	__gpio_cansleep
-
-#endif /* __ASM_MACH_ATH79_GPIO_H */
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
index 9785e4e..adde1fa 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
@@ -266,6 +266,17 @@
 	return -ENXIO;
 }
 
+/* On Au1000, Au1500 and Au1100 GPIOs won't work as inputs before
+ * SYS_PININPUTEN is written to at least once.  On Au1550/Au1200/Au1300 this
+ * register enables use of GPIOs as wake source.
+ */
+static inline void alchemy_gpio1_input_enable(void)
+{
+	void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR);
+	__raw_writel(0, base + 0x110);		/* the write op is key */
+	wmb();
+}
+
 /*
  * GPIO2 block macros for common linux GPIO functions. The 'gpio'
  * parameter must be in range of ALCHEMY_GPIO2_BASE..ALCHEMY_GPIO2_MAX.
@@ -518,141 +529,4 @@
 	return -ENXIO;
 }
 
-/**********************************************************************/
-
-/* Linux gpio framework integration.
- *
- * 4 use cases of Au1000-Au1200 GPIOS:
- *(1) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=y:
- *	Board must register gpiochips.
- *(2) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=n:
- *	2 (1 for Au1000) gpio_chips are registered.
- *
- *(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y:
- *	the boards' gpio.h must provide the linux gpio wrapper functions,
- *
- *(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n:
- *	inlinable gpio functions are provided which enable access to the
- *	Au1000 gpios only by using the numbers straight out of the data-
- *	sheets.
-
- * Cases 1 and 3 are intended for boards which want to provide their own
- * GPIO namespace and -operations (i.e. for example you have 8 GPIOs
- * which are in part provided by spare Au1000 GPIO pins and in part by
- * an external FPGA but you still want them to be accssible in linux
- * as gpio0-7. The board can of course use the alchemy_gpioX_* functions
- * as required).
- */
-
-#ifndef CONFIG_GPIOLIB
-
-#ifdef CONFIG_ALCHEMY_GPIOINT_AU1000
-
-#ifndef CONFIG_ALCHEMY_GPIO_INDIRECT	/* case (4) */
-
-static inline int gpio_direction_input(int gpio)
-{
-	return alchemy_gpio_direction_input(gpio);
-}
-
-static inline int gpio_direction_output(int gpio, int v)
-{
-	return alchemy_gpio_direction_output(gpio, v);
-}
-
-static inline int gpio_get_value(int gpio)
-{
-	return alchemy_gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(int gpio, int v)
-{
-	alchemy_gpio_set_value(gpio, v);
-}
-
-static inline int gpio_get_value_cansleep(unsigned gpio)
-{
-	return gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value_cansleep(unsigned gpio, int value)
-{
-	gpio_set_value(gpio, value);
-}
-
-static inline int gpio_is_valid(int gpio)
-{
-	return alchemy_gpio_is_valid(gpio);
-}
-
-static inline int gpio_cansleep(int gpio)
-{
-	return alchemy_gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(int gpio)
-{
-	return alchemy_gpio_to_irq(gpio);
-}
-
-static inline int irq_to_gpio(int irq)
-{
-	return alchemy_irq_to_gpio(irq);
-}
-
-static inline int gpio_request(unsigned gpio, const char *label)
-{
-	return 0;
-}
-
-static inline int gpio_request_one(unsigned gpio,
-					unsigned long flags, const char *label)
-{
-	return 0;
-}
-
-static inline int gpio_request_array(struct gpio *array, size_t num)
-{
-	return 0;
-}
-
-static inline void gpio_free(unsigned gpio)
-{
-}
-
-static inline void gpio_free_array(struct gpio *array, size_t num)
-{
-}
-
-static inline int gpio_set_debounce(unsigned gpio, unsigned debounce)
-{
-	return -ENOSYS;
-}
-
-static inline int gpio_export(unsigned gpio, bool direction_may_change)
-{
-	return -ENOSYS;
-}
-
-static inline int gpio_export_link(struct device *dev, const char *name,
-				   unsigned gpio)
-{
-	return -ENOSYS;
-}
-
-static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
-{
-	return -ENOSYS;
-}
-
-static inline void gpio_unexport(unsigned gpio)
-{
-}
-
-#endif	/* !CONFIG_ALCHEMY_GPIO_INDIRECT */
-
-#endif	/* CONFIG_ALCHEMY_GPIOINT_AU1000 */
-
-#endif	/* !CONFIG_GPIOLIB */
-
 #endif /* _ALCHEMY_GPIO_AU1000_H_ */
diff --git a/arch/mips/include/asm/mach-au1x00/gpio.h b/arch/mips/include/asm/mach-au1x00/gpio.h
deleted file mode 100644
index 22e7ff1..0000000
--- a/arch/mips/include/asm/mach-au1x00/gpio.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Alchemy GPIO support.
- *
- * With CONFIG_GPIOLIB=y different types of on-chip GPIO can be supported within
- *  the same kernel image.
- * With CONFIG_GPIOLIB=n, your board must select ALCHEMY_GPIOINT_AU1XXX for the
- *  appropriate CPU type (AU1000 currently).
- */
-
-#ifndef _ALCHEMY_GPIO_H_
-#define _ALCHEMY_GPIO_H_
-
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-au1x00/gpio-au1000.h>
-#include <asm/mach-au1x00/gpio-au1300.h>
-
-/* On Au1000, Au1500 and Au1100 GPIOs won't work as inputs before
- * SYS_PININPUTEN is written to at least once.  On Au1550/Au1200/Au1300 this
- * register enables use of GPIOs as wake source.
- */
-static inline void alchemy_gpio1_input_enable(void)
-{
-	void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR);
-	__raw_writel(0, base + 0x110);		/* the write op is key */
-	wmb();
-}
-
-
-/* Linux gpio framework integration.
-*
-* 4 use cases of Alchemy GPIOS:
-*(1) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=y:
-*	Board must register gpiochips.
-*(2) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=n:
-*	A gpiochip for the 75 GPIOs is registered.
-*
-*(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y:
-*	the boards' gpio.h must provide	the linux gpio wrapper functions,
-*
-*(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n:
-*	inlinable gpio functions are provided which enable access to the
-*	Au1300 gpios only by using the numbers straight out of the data-
-*	sheets.
-
-* Cases 1 and 3 are intended for boards which want to provide their own
-* GPIO namespace and -operations (i.e. for example you have 8 GPIOs
-* which are in part provided by spare Au1300 GPIO pins and in part by
-* an external FPGA but you still want them to be accssible in linux
-* as gpio0-7. The board can of course use the alchemy_gpioX_* functions
-* as required).
-*/
-
-#ifdef CONFIG_GPIOLIB
-
-/* wraps the cpu-dependent irq_to_gpio functions */
-/* FIXME: gpiolib needs an irq_to_gpio hook */
-static inline int __au_irq_to_gpio(unsigned int irq)
-{
-	switch (alchemy_get_cputype()) {
-	case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200:
-		return alchemy_irq_to_gpio(irq);
-	case ALCHEMY_CPU_AU1300:
-		return au1300_irq_to_gpio(irq);
-	}
-	return -EINVAL;
-}
-
-
-/* using gpiolib to provide up to 2 gpio_chips for on-chip gpios */
-#ifndef CONFIG_ALCHEMY_GPIO_INDIRECT	/* case (2) */
-
-/* get everything through gpiolib */
-#define gpio_to_irq	__gpio_to_irq
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-#define irq_to_gpio	__au_irq_to_gpio
-
-#include <asm-generic/gpio.h>
-
-#endif	/* !CONFIG_ALCHEMY_GPIO_INDIRECT */
-
-
-#endif	/* CONFIG_GPIOLIB */
-
-#endif	/* _ALCHEMY_GPIO_H_ */
diff --git a/arch/mips/include/asm/mach-bcm47xx/gpio.h b/arch/mips/include/asm/mach-bcm47xx/gpio.h
deleted file mode 100644
index 90daefa..0000000
--- a/arch/mips/include/asm/mach-bcm47xx/gpio.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef __ASM_MIPS_MACH_BCM47XX_GPIO_H
-#define __ASM_MIPS_MACH_BCM47XX_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-
-#define gpio_cansleep __gpio_cansleep
-#define gpio_to_irq __gpio_to_irq
-
-static inline int irq_to_gpio(unsigned int irq)
-{
-	return -EINVAL;
-}
-
-#endif
diff --git a/arch/mips/include/asm/mach-bcm63xx/gpio.h b/arch/mips/include/asm/mach-bcm63xx/gpio.h
deleted file mode 100644
index 1eb534d..0000000
--- a/arch/mips/include/asm/mach-bcm63xx/gpio.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __ASM_MIPS_MACH_BCM63XX_GPIO_H
-#define __ASM_MIPS_MACH_BCM63XX_GPIO_H
-
-#include <bcm63xx_gpio.h>
-
-#define gpio_to_irq(gpio)	-1
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-
-#define gpio_cansleep __gpio_cansleep
-
-#include <asm-generic/gpio.h>
-
-#endif /* __ASM_MIPS_MACH_BCM63XX_GPIO_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/gpio.h b/arch/mips/include/asm/mach-cavium-octeon/gpio.h
deleted file mode 100644
index 34e9f7a..0000000
--- a/arch/mips/include/asm/mach-cavium-octeon/gpio.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __ASM_MACH_CAVIUM_OCTEON_GPIO_H
-#define __ASM_MACH_CAVIUM_OCTEON_GPIO_H
-
-#ifdef CONFIG_GPIOLIB
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-#else
-int gpio_request(unsigned gpio, const char *label);
-void gpio_free(unsigned gpio);
-int gpio_direction_input(unsigned gpio);
-int gpio_direction_output(unsigned gpio, int value);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-#endif
-
-#include <asm-generic/gpio.h>
-
-#define gpio_to_irq	__gpio_to_irq
-
-#endif /* __ASM_MACH_GENERIC_GPIO_H */
diff --git a/arch/mips/include/asm/mach-generic/gpio.h b/arch/mips/include/asm/mach-generic/gpio.h
deleted file mode 100644
index b4e7020..0000000
--- a/arch/mips/include/asm/mach-generic/gpio.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __ASM_MACH_GENERIC_GPIO_H
-#define __ASM_MACH_GENERIC_GPIO_H
-
-#ifdef CONFIG_GPIOLIB
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-#else
-int gpio_request(unsigned gpio, const char *label);
-void gpio_free(unsigned gpio);
-int gpio_direction_input(unsigned gpio);
-int gpio_direction_output(unsigned gpio, int value);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-#endif
-int gpio_to_irq(unsigned gpio);
-int irq_to_gpio(unsigned irq);
-
-#include <asm-generic/gpio.h>		/* cansleep wrappers */
-
-#endif /* __ASM_MACH_GENERIC_GPIO_H */
diff --git a/arch/mips/include/asm/mach-jz4740/gpio.h b/arch/mips/include/asm/mach-jz4740/gpio.h
index eaacba7..bf8c3e1 100644
--- a/arch/mips/include/asm/mach-jz4740/gpio.h
+++ b/arch/mips/include/asm/mach-jz4740/gpio.h
@@ -73,8 +73,6 @@
 void jz_gpio_port_set_value(int port, uint32_t value, uint32_t mask);
 uint32_t jz_gpio_port_get_value(int port, uint32_t mask);
 
-#include <asm/mach-generic/gpio.h>
-
 #define JZ_GPIO_PORTA(x) ((x) + 32 * 0)
 #define JZ_GPIO_PORTB(x) ((x) + 32 * 1)
 #define JZ_GPIO_PORTC(x) ((x) + 32 * 2)
diff --git a/arch/mips/include/asm/mach-lantiq/gpio.h b/arch/mips/include/asm/mach-lantiq/gpio.h
deleted file mode 100644
index 9ba1cae..0000000
--- a/arch/mips/include/asm/mach-lantiq/gpio.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_MIPS_MACH_LANTIQ_GPIO_H
-#define __ASM_MIPS_MACH_LANTIQ_GPIO_H
-
-#define gpio_to_irq __gpio_to_irq
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-
-#define gpio_cansleep __gpio_cansleep
-
-#include <asm-generic/gpio.h>
-
-#endif
diff --git a/arch/mips/include/asm/mach-loongson64/gpio.h b/arch/mips/include/asm/mach-loongson64/gpio.h
deleted file mode 100644
index b3b2169..0000000
--- a/arch/mips/include/asm/mach-loongson64/gpio.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Loongson GPIO Support
- *
- * Copyright (c) 2008  Richard Liu, STMicroelectronics <richard.liu@st.com>
- * Copyright (c) 2008-2010  Arnaud Patard <apatard@mandriva.com>
- * Copyright (c) 2014  Huacai Chen <chenhc@lemote.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __LOONGSON_GPIO_H
-#define __LOONGSON_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#define gpio_get_value __gpio_get_value
-#define gpio_set_value __gpio_set_value
-#define gpio_cansleep __gpio_cansleep
-
-/* The chip can do interrupt
- * but it has not been tested and doc not clear
- */
-static inline int gpio_to_irq(int gpio)
-{
-	return -EINVAL;
-}
-
-static inline int irq_to_gpio(int gpio)
-{
-	return -EINVAL;
-}
-
-#endif	/* __LOONGSON_GPIO_H */
diff --git a/arch/mips/include/asm/mach-pistachio/gpio.h b/arch/mips/include/asm/mach-pistachio/gpio.h
deleted file mode 100644
index 6c1649c..0000000
--- a/arch/mips/include/asm/mach-pistachio/gpio.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Pistachio IRQ setup
- *
- * Copyright (C) 2014 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-#ifndef __ASM_MACH_PISTACHIO_GPIO_H
-#define __ASM_MACH_PISTACHIO_GPIO_H
-
-#include <asm-generic/gpio.h>
-
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-#define gpio_to_irq	__gpio_to_irq
-
-#endif /* __ASM_MACH_PISTACHIO_GPIO_H */
diff --git a/arch/mips/include/asm/mach-rc32434/gpio.h b/arch/mips/include/asm/mach-rc32434/gpio.h
index 4dee0a3..db21121 100644
--- a/arch/mips/include/asm/mach-rc32434/gpio.h
+++ b/arch/mips/include/asm/mach-rc32434/gpio.h
@@ -13,18 +13,6 @@
 #ifndef _RC32434_GPIO_H_
 #define _RC32434_GPIO_H_
 
-#include <linux/types.h>
-#include <asm-generic/gpio.h>
-
-#define NR_BUILTIN_GPIO		32
-
-#define gpio_get_value	__gpio_get_value
-#define gpio_set_value	__gpio_set_value
-#define gpio_cansleep	__gpio_cansleep
-
-#define gpio_to_irq(gpio)	(8 + 4 * 32 + gpio)
-#define irq_to_gpio(irq)	(irq - (8 + 4 * 32))
-
 struct rb532_gpio_reg {
 	u32   gpiofunc;	  /* GPIO Function Register
 			   * gpiofunc[x]==0 bit = gpio
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index edc7ee9..d75b75e 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -33,6 +33,29 @@
  */
 extern phys_addr_t __mips_cm_phys_base(void);
 
+/*
+ * mips_cm_is64 - determine CM register width
+ *
+ * The CM register width is processor and CM specific. A 64-bit processor
+ * usually has a 64-bit CM and a 32-bit one has a 32-bit CM but a 64-bit
+ * processor could come with a 32-bit CM. Moreover, accesses on 64-bit CMs
+ * can be done either using regular 64-bit load/store instructions, or 32-bit
+ * load/store instruction on 32-bit register pairs. We opt for using 64-bit
+ * accesses on 64-bit CMs and kernels and 32-bit in any other case.
+ *
+ * It's set to 0 for 32-bit accesses and 1 for 64-bit accesses.
+ */
+extern int mips_cm_is64;
+
+/**
+ * mips_cm_error_report - Report CM cache errors
+ */
+#ifdef CONFIG_MIPS_CM
+extern void mips_cm_error_report(void);
+#else
+static inline void mips_cm_error_report(void) {}
+#endif
+
 /**
  * mips_cm_probe - probe for a Coherence Manager
  *
@@ -90,20 +113,46 @@
 
 /* Macros to ease the creation of register access functions */
 #define BUILD_CM_R_(name, off)					\
-static inline u32 __iomem *addr_gcr_##name(void)		\
+static inline unsigned long __iomem *addr_gcr_##name(void)	\
 {								\
-	return (u32 __iomem *)(mips_cm_base + (off));		\
+	return (unsigned long __iomem *)(mips_cm_base + (off));	\
 }								\
 								\
-static inline u32 read_gcr_##name(void)				\
+static inline u32 read32_gcr_##name(void)			\
 {								\
 	return __raw_readl(addr_gcr_##name());			\
+}								\
+								\
+static inline u64 read64_gcr_##name(void)			\
+{								\
+	return __raw_readq(addr_gcr_##name());			\
+}								\
+								\
+static inline unsigned long read_gcr_##name(void)		\
+{								\
+	if (mips_cm_is64)					\
+		return read64_gcr_##name();			\
+	else							\
+		return read32_gcr_##name();			\
 }
 
 #define BUILD_CM__W(name, off)					\
-static inline void write_gcr_##name(u32 value)			\
+static inline void write32_gcr_##name(u32 value)		\
 {								\
 	__raw_writel(value, addr_gcr_##name());			\
+}								\
+								\
+static inline void write64_gcr_##name(u64 value)		\
+{								\
+	__raw_writeq(value, addr_gcr_##name());			\
+}								\
+								\
+static inline void write_gcr_##name(unsigned long value)	\
+{								\
+	if (mips_cm_is64)					\
+		write64_gcr_##name(value);			\
+	else							\
+		write32_gcr_##name(value);			\
 }
 
 #define BUILD_CM_RW(name, off)					\
@@ -144,6 +193,7 @@
 BUILD_CM_RW(reg3_mask,		MIPS_CM_GCB_OFS + 0xc8)
 BUILD_CM_R_(gic_status,		MIPS_CM_GCB_OFS + 0xd0)
 BUILD_CM_R_(cpc_status,		MIPS_CM_GCB_OFS + 0xf0)
+BUILD_CM_RW(l2_config,		MIPS_CM_GCB_OFS + 0x130)
 
 /* Core Local & Core Other register accessor functions */
 BUILD_CM_Cx_RW(reset_release,	0x00)
@@ -189,6 +239,13 @@
 #define CM_GCR_REV_MINOR_SHF			0
 #define CM_GCR_REV_MINOR_MSK			(_ULCAST_(0xff) << 0)
 
+#define CM_ENCODE_REV(major, minor) \
+		(((major) << CM_GCR_REV_MAJOR_SHF) | \
+		 ((minor) << CM_GCR_REV_MINOR_SHF))
+
+#define CM_REV_CM2				CM_ENCODE_REV(6, 0)
+#define CM_REV_CM3				CM_ENCODE_REV(8, 0)
+
 /* GCR_ERROR_CAUSE register fields */
 #define CM_GCR_ERROR_CAUSE_ERRTYPE_SHF		27
 #define CM_GCR_ERROR_CAUSE_ERRTYPE_MSK		(_ULCAST_(0x1f) << 27)
@@ -249,6 +306,16 @@
 #define CM_GCR_CPC_STATUS_EX_SHF		0
 #define CM_GCR_CPC_STATUS_EX_MSK		(_ULCAST_(0x1) << 0)
 
+/* GCR_L2_CONFIG register fields */
+#define CM_GCR_L2_CONFIG_BYPASS_SHF		20
+#define CM_GCR_L2_CONFIG_BYPASS_MSK		(_ULCAST_(0x1) << 20)
+#define CM_GCR_L2_CONFIG_SET_SIZE_SHF		12
+#define CM_GCR_L2_CONFIG_SET_SIZE_MSK		(_ULCAST_(0xf) << 12)
+#define CM_GCR_L2_CONFIG_LINE_SIZE_SHF		8
+#define CM_GCR_L2_CONFIG_LINE_SIZE_MSK		(_ULCAST_(0xf) << 8)
+#define CM_GCR_L2_CONFIG_ASSOC_SHF		0
+#define CM_GCR_L2_CONFIG_ASSOC_MSK		(_ULCAST_(0xff) << 0)
+
 /* GCR_Cx_COHERENCE register fields */
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF	0
 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK	(_ULCAST_(0xff) << 0)
@@ -324,4 +391,18 @@
 	return 0;
 }
 
+/**
+ * mips_cm_revision() - return CM revision
+ *
+ * Return: The revision of the CM, from GCR_REV, or 0 if no CM is present. The
+ * return value should be checked against the CM_REV_* macros.
+ */
+static inline int mips_cm_revision(void)
+{
+	if (!mips_cm_present())
+		return 0;
+
+	return read_gcr_rev();
+}
+
 #endif /* __MIPS_ASM_MIPS_CM_H__ */
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
index 1cebe8c..f386f32 100644
--- a/arch/mips/include/asm/mips-cpc.h
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -28,16 +28,6 @@
 extern phys_addr_t mips_cpc_default_phys_base(void);
 
 /**
- * mips_cpc_phys_base - retrieve the physical base address of the CPC
- *
- * This function returns the physical base address of the Cluster Power
- * Controller memory mapped registers, or 0 if no Cluster Power Controller
- * is present. It may be overriden by individual platforms which determine
- * this address in a different way.
- */
-extern phys_addr_t __weak mips_cpc_phys_base(void);
-
-/**
  * mips_cpc_probe - probe for a Cluster Power Controller
  *
  * Attempt to detect the presence of a Cluster Power Controller. Returns 0 if
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index c5b0956..d3cd8ea 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -112,6 +112,30 @@
 #define CP0_TX39_CACHE	$7
 
 
+/* Generic EntryLo bit definitions */
+#define ENTRYLO_G		(_ULCAST_(1) << 0)
+#define ENTRYLO_V		(_ULCAST_(1) << 1)
+#define ENTRYLO_D		(_ULCAST_(1) << 2)
+#define ENTRYLO_C_SHIFT		3
+#define ENTRYLO_C		(_ULCAST_(7) << ENTRYLO_C_SHIFT)
+
+/* R3000 EntryLo bit definitions */
+#define R3K_ENTRYLO_G		(_ULCAST_(1) << 8)
+#define R3K_ENTRYLO_V		(_ULCAST_(1) << 9)
+#define R3K_ENTRYLO_D		(_ULCAST_(1) << 10)
+#define R3K_ENTRYLO_N		(_ULCAST_(1) << 11)
+
+/* MIPS32/64 EntryLo bit definitions */
+#ifdef CONFIG_64BIT
+/* as read by dmfc0 */
+#define MIPS_ENTRYLO_XI		(_ULCAST_(1) << 62)
+#define MIPS_ENTRYLO_RI		(_ULCAST_(1) << 63)
+#else
+/* as read by mfc0 */
+#define MIPS_ENTRYLO_XI		(_ULCAST_(1) << 30)
+#define MIPS_ENTRYLO_RI		(_ULCAST_(1) << 31)
+#endif
+
 /*
  * Values for PageMask register
  */
@@ -203,6 +227,9 @@
 #define PG_ESP		(_ULCAST_(1) <<	 28)
 #define PG_IEC		(_ULCAST_(1) <<  27)
 
+/* MIPS32/64 EntryHI bit definitions */
+#define MIPS_ENTRYHI_EHINV	(_ULCAST_(1) << 10)
+
 /*
  * R4x00 interrupt enable / cause bits
  */
@@ -579,6 +606,8 @@
 
 #define MIPS_CONF7_IAR		(_ULCAST_(1) << 10)
 #define MIPS_CONF7_AR		(_ULCAST_(1) << 16)
+/* FTLB probability bits for R6 */
+#define MIPS_CONF7_FTLBP_SHIFT	(18)
 
 /* MAAR bit definitions */
 #define MIPS_MAAR_ADDR		((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
@@ -586,31 +615,6 @@
 #define MIPS_MAAR_S		(_ULCAST_(1) << 1)
 #define MIPS_MAAR_V		(_ULCAST_(1) << 0)
 
-/*  EntryHI bit definition */
-#define MIPS_ENTRYHI_EHINV	(_ULCAST_(1) << 10)
-
-/* R3000 EntryLo bit definitions */
-#define R3K_ENTRYLO_G		(_ULCAST_(1) << 8)
-#define R3K_ENTRYLO_V		(_ULCAST_(1) << 9)
-#define R3K_ENTRYLO_D		(_ULCAST_(1) << 10)
-#define R3K_ENTRYLO_N		(_ULCAST_(1) << 11)
-
-/* R4000 compatible EntryLo bit definitions */
-#define MIPS_ENTRYLO_G		(_ULCAST_(1) << 0)
-#define MIPS_ENTRYLO_V		(_ULCAST_(1) << 1)
-#define MIPS_ENTRYLO_D		(_ULCAST_(1) << 2)
-#define MIPS_ENTRYLO_C_SHIFT	3
-#define MIPS_ENTRYLO_C		(_ULCAST_(7) << MIPS_ENTRYLO_C_SHIFT)
-#ifdef CONFIG_64BIT
-/* as read by dmfc0 */
-#define MIPS_ENTRYLO_XI		(_ULCAST_(1) << 62)
-#define MIPS_ENTRYLO_RI		(_ULCAST_(1) << 63)
-#else
-/* as read by mfc0 */
-#define MIPS_ENTRYLO_XI		(_ULCAST_(1) << 30)
-#define MIPS_ENTRYLO_RI		(_ULCAST_(1) << 31)
-#endif
-
 /* CMGCRBase bit definitions */
 #define MIPS_CMGCRB_BASE	11
 #define MIPS_CMGCRF_BASE	(~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
@@ -932,7 +936,7 @@
  */
 
 #define __read_32bit_c0_register(source, sel)				\
-({ int __res;								\
+({ unsigned int __res;							\
 	if (sel == 0)							\
 		__asm__ __volatile__(					\
 			"mfc0\t%0, " #source "\n\t"			\
@@ -1014,7 +1018,7 @@
  * On RM7000/RM9000 these are uses to access cop0 set 1 registers
  */
 #define __read_32bit_c0_ctrl_register(source)				\
-({ int __res;								\
+({ unsigned int __res;							\
 	__asm__ __volatile__(						\
 		"cfc0\t%0, " #source "\n\t"				\
 		: "=r" (__res));					\
@@ -1471,7 +1475,7 @@
  */
 #define _read_32bit_cp1_register(source, gas_hardfloat)			\
 ({									\
-	int __res;							\
+	unsigned int __res;						\
 									\
 	__asm__ __volatile__(						\
 	"	.set	push					\n"	\
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index af5638b..bbb85fe 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -14,10 +14,90 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/inst.h>
+
 extern void _save_msa(struct task_struct *);
 extern void _restore_msa(struct task_struct *);
 extern void _init_msa_upper(void);
 
+extern void read_msa_wr_b(unsigned idx, union fpureg *to);
+extern void read_msa_wr_h(unsigned idx, union fpureg *to);
+extern void read_msa_wr_w(unsigned idx, union fpureg *to);
+extern void read_msa_wr_d(unsigned idx, union fpureg *to);
+
+/**
+ * read_msa_wr() - Read a single MSA vector register
+ * @idx:	The index of the vector register to read
+ * @to:		The FPU register union to store the registers value in
+ * @fmt:	The format of the data in the vector register
+ *
+ * Read the value of MSA vector register idx into the FPU register
+ * union to, using the format fmt.
+ */
+static inline void read_msa_wr(unsigned idx, union fpureg *to,
+			       enum msa_2b_fmt fmt)
+{
+	switch (fmt) {
+	case msa_fmt_b:
+		read_msa_wr_b(idx, to);
+		break;
+
+	case msa_fmt_h:
+		read_msa_wr_h(idx, to);
+		break;
+
+	case msa_fmt_w:
+		read_msa_wr_w(idx, to);
+		break;
+
+	case msa_fmt_d:
+		read_msa_wr_d(idx, to);
+		break;
+
+	default:
+		BUG();
+	}
+}
+
+extern void write_msa_wr_b(unsigned idx, union fpureg *from);
+extern void write_msa_wr_h(unsigned idx, union fpureg *from);
+extern void write_msa_wr_w(unsigned idx, union fpureg *from);
+extern void write_msa_wr_d(unsigned idx, union fpureg *from);
+
+/**
+ * write_msa_wr() - Write a single MSA vector register
+ * @idx:	The index of the vector register to write
+ * @from:	The FPU register union to take the registers value from
+ * @fmt:	The format of the data in the vector register
+ *
+ * Write the value from the FPU register union from into MSA vector
+ * register idx, using the format fmt.
+ */
+static inline void write_msa_wr(unsigned idx, union fpureg *from,
+				enum msa_2b_fmt fmt)
+{
+	switch (fmt) {
+	case msa_fmt_b:
+		write_msa_wr_b(idx, from);
+		break;
+
+	case msa_fmt_h:
+		write_msa_wr_h(idx, from);
+		break;
+
+	case msa_fmt_w:
+		write_msa_wr_w(idx, from);
+		break;
+
+	case msa_fmt_d:
+		write_msa_wr_d(idx, from);
+		break;
+
+	default:
+		BUG();
+	}
+}
+
 static inline void enable_msa(void)
 {
 	if (cpu_has_msa) {
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
index c373d95..d92cf59 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -284,6 +284,7 @@
 	CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
 	CVMX_BOARD_TYPE_UBNT_E100 = 20002,
 	CVMX_BOARD_TYPE_CUST_DSR1000N = 20006,
+	CVMX_BOARD_TYPE_KONTRON_S1901 = 21901,
 	CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
 
 	/* The remaining range is reserved for future use. */
@@ -384,6 +385,7 @@
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN)
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E100)
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DSR1000N)
+		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KONTRON_S1901)
 		ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
 	}
 	return "Unsupported Board";
diff --git a/arch/mips/include/asm/octeon/cvmx-pip.h b/arch/mips/include/asm/octeon/cvmx-pip.h
index df69bfd..c210154 100644
--- a/arch/mips/include/asm/octeon/cvmx-pip.h
+++ b/arch/mips/include/asm/octeon/cvmx-pip.h
@@ -37,7 +37,7 @@
 #include <asm/octeon/cvmx-fpa.h>
 #include <asm/octeon/cvmx-pip-defs.h>
 
-#define CVMX_PIP_NUM_INPUT_PORTS		40
+#define CVMX_PIP_NUM_INPUT_PORTS		48
 #define CVMX_PIP_NUM_WATCHERS			4
 
 /*
diff --git a/arch/mips/include/asm/octeon/cvmx-pko.h b/arch/mips/include/asm/octeon/cvmx-pko.h
index 3da59bb..5f47f76 100644
--- a/arch/mips/include/asm/octeon/cvmx-pko.h
+++ b/arch/mips/include/asm/octeon/cvmx-pko.h
@@ -542,6 +542,9 @@
  */
 static inline int cvmx_pko_get_base_queue(int port)
 {
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		return port;
+
 	return cvmx_pko_get_base_queue_per_core(port, 0);
 }
 
diff --git a/arch/mips/include/asm/octeon/cvmx-pow-defs.h b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
index 9020ef4..6a3db4b 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
@@ -52,6 +52,12 @@
 #define CVMX_POW_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000000080ull) + ((offset) & 15) * 8)
 #define CVMX_POW_WS_PCX(offset) (CVMX_ADD_IO_SEG(0x0001670000000280ull) + ((offset) & 15) * 8)
 
+#define CVMX_SSO_WQ_INT (CVMX_ADD_IO_SEG(0x0001670000001000ull))
+#define CVMX_SSO_WQ_IQ_DIS (CVMX_ADD_IO_SEG(0x0001670000001010ull))
+#define CVMX_SSO_WQ_INT_PC (CVMX_ADD_IO_SEG(0x0001670000001020ull))
+#define CVMX_SSO_PPX_GRP_MSK(offset) (CVMX_ADD_IO_SEG(0x0001670000006000ull) + ((offset) & 31) * 8)
+#define CVMX_SSO_WQ_INT_THRX(offset) (CVMX_ADD_IO_SEG(0x0001670000007000ull) + ((offset) & 63) * 8)
+
 union cvmx_pow_bist_stat {
 	uint64_t u64;
 	struct cvmx_pow_bist_stat_s {
@@ -1286,4 +1292,27 @@
 	struct cvmx_pow_ws_pcx_s cnf71xx;
 };
 
+union cvmx_sso_wq_int_thrx {
+	uint64_t u64;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t reserved_33_63:31;
+		uint64_t tc_en:1;
+		uint64_t tc_thr:4;
+		uint64_t reserved_26_27:2;
+		uint64_t ds_thr:12;
+		uint64_t reserved_12_13:2;
+		uint64_t iq_thr:12;
+#else
+		uint64_t iq_thr:12;
+		uint64_t reserved_12_13:2;
+		uint64_t ds_thr:12;
+		uint64_t reserved_26_27:2;
+		uint64_t tc_thr:4;
+		uint64_t tc_en:1;
+		uint64_t reserved_33_63:31;
+#endif
+	} s;
+};
+
 #endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
index d5565d7..5153156 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -1810,10 +1810,11 @@
 	cvmx_addr_t ptr;
 	cvmx_pow_tag_req_t tag_req;
 
-	wqp->qos = qos;
-	wqp->tag = tag;
-	wqp->tag_type = tag_type;
-	wqp->grp = grp;
+	wqp->word1.tag = tag;
+	wqp->word1.tag_type = tag_type;
+
+	cvmx_wqe_set_qos(wqp, qos);
+	cvmx_wqe_set_grp(wqp, grp);
 
 	tag_req.u64 = 0;
 	tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ;
diff --git a/arch/mips/include/asm/octeon/cvmx-wqe.h b/arch/mips/include/asm/octeon/cvmx-wqe.h
index 2d6d0c7..0d697aa 100644
--- a/arch/mips/include/asm/octeon/cvmx-wqe.h
+++ b/arch/mips/include/asm/octeon/cvmx-wqe.h
@@ -193,6 +193,53 @@
 	        uint64_t bufs:8;
 #endif
 	} s;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t bufs:8;
+		uint64_t ip_offset:8;
+		uint64_t vlan_valid:1;
+		uint64_t vlan_stacked:1;
+		uint64_t unassigned:1;
+		uint64_t vlan_cfi:1;
+		uint64_t vlan_id:12;
+		uint64_t port:12;		/* MAC/PIP port number. */
+		uint64_t dec_ipcomp:1;
+		uint64_t tcp_or_udp:1;
+		uint64_t dec_ipsec:1;
+		uint64_t is_v6:1;
+		uint64_t software:1;
+		uint64_t L4_error:1;
+		uint64_t is_frag:1;
+		uint64_t IP_exc:1;
+		uint64_t is_bcast:1;
+		uint64_t is_mcast:1;
+		uint64_t not_IP:1;
+		uint64_t rcv_error:1;
+		uint64_t err_code:8;
+#else
+		uint64_t err_code:8;
+		uint64_t rcv_error:1;
+		uint64_t not_IP:1;
+		uint64_t is_mcast:1;
+		uint64_t is_bcast:1;
+		uint64_t IP_exc:1;
+		uint64_t is_frag:1;
+		uint64_t L4_error:1;
+		uint64_t software:1;
+		uint64_t is_v6:1;
+		uint64_t dec_ipsec:1;
+		uint64_t tcp_or_udp:1;
+		uint64_t dec_ipcomp:1;
+		uint64_t port:12;
+		uint64_t vlan_id:12;
+		uint64_t vlan_cfi:1;
+		uint64_t unassigned:1;
+		uint64_t vlan_stacked:1;
+		uint64_t vlan_valid:1;
+		uint64_t ip_offset:8;
+		uint64_t bufs:8;
+#endif
+	} s_cn68xx;
 
 	/* use this to get at the 16 vlan bits */
 	struct {
@@ -355,6 +402,146 @@
 
 } cvmx_pip_wqe_word2;
 
+union cvmx_pip_wqe_word0 {
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		/**
+		 * raw chksum result generated by the HW
+		 */
+		uint16_t hw_chksum;
+		/**
+		 * Field unused by hardware - available for software
+		 */
+		uint8_t unused;
+		/**
+		 * Next pointer used by hardware for list maintenance.
+		 * May be written/read by HW before the work queue
+		 * entry is scheduled to a PP (Only 36 bits used in
+		 * Octeon 1)
+		 */
+		uint64_t next_ptr:40;
+#else
+		uint64_t next_ptr:40;
+		uint8_t unused;
+		uint16_t hw_chksum;
+#endif
+	} cn38xx;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t l4ptr:8;       /* 56..63 */
+		uint64_t unused0:8;     /* 48..55 */
+		uint64_t l3ptr:8;       /* 40..47 */
+		uint64_t l2ptr:8;       /* 32..39 */
+		uint64_t unused1:18;    /* 14..31 */
+		uint64_t bpid:6;        /* 8..13 */
+		uint64_t unused2:2;     /* 6..7 */
+		uint64_t pknd:6;        /* 0..5 */
+#else
+		uint64_t pknd:6;        /* 0..5 */
+		uint64_t unused2:2;     /* 6..7 */
+		uint64_t bpid:6;        /* 8..13 */
+		uint64_t unused1:18;    /* 14..31 */
+		uint64_t l2ptr:8;       /* 32..39 */
+		uint64_t l3ptr:8;       /* 40..47 */
+		uint64_t unused0:8;     /* 48..55 */
+		uint64_t l4ptr:8;       /* 56..63 */
+#endif
+	} cn68xx;
+};
+
+union cvmx_wqe_word0 {
+	uint64_t u64;
+	union cvmx_pip_wqe_word0 pip;
+};
+
+union cvmx_wqe_word1 {
+	uint64_t u64;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t len:16;
+		uint64_t varies:14;
+		/**
+		 * the type of the tag (ORDERED, ATOMIC, NULL)
+		 */
+		uint64_t tag_type:2;
+		uint64_t tag:32;
+#else
+		uint64_t tag:32;
+		uint64_t tag_type:2;
+		uint64_t varies:14;
+		uint64_t len:16;
+#endif
+	};
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		uint64_t len:16;
+		uint64_t zero_0:1;
+		/**
+		 * HW sets this to what it thought the priority of
+		 * the input packet was
+		 */
+		uint64_t qos:3;
+
+		uint64_t zero_1:1;
+		/**
+		 * the group that the work queue entry will be scheduled to
+		 */
+		uint64_t grp:6;
+		uint64_t zero_2:3;
+		uint64_t tag_type:2;
+		uint64_t tag:32;
+#else
+		uint64_t tag:32;
+		uint64_t tag_type:2;
+		uint64_t zero_2:3;
+		uint64_t grp:6;
+		uint64_t zero_1:1;
+		uint64_t qos:3;
+		uint64_t zero_0:1;
+		uint64_t len:16;
+#endif
+	} cn68xx;
+	struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+		/**
+		 * HW sets to the total number of bytes in the packet
+		 */
+		uint64_t len:16;
+		/**
+		 * HW sets this to input physical port
+		 */
+		uint64_t ipprt:6;
+
+		/**
+		 * HW sets this to what it thought the priority of
+		 * the input packet was
+		 */
+		uint64_t qos:3;
+
+		/**
+		 * the group that the work queue entry will be scheduled to
+		 */
+		uint64_t grp:4;
+		/**
+		 * the type of the tag (ORDERED, ATOMIC, NULL)
+		 */
+		uint64_t tag_type:3;
+		/**
+		 * the synchronization/ordering tag
+		 */
+		uint64_t tag:32;
+#else
+		uint64_t tag:32;
+		uint64_t tag_type:2;
+		uint64_t zero_2:1;
+		uint64_t grp:4;
+		uint64_t qos:3;
+		uint64_t ipprt:6;
+		uint64_t len:16;
+#endif
+	} cn38xx;
+};
+
 /**
  * Work queue entry format
  *
@@ -366,70 +553,13 @@
      * WORD 0
      *	HW WRITE: the following 64 bits are filled by HW when a packet arrives
      */
-
-#ifdef __BIG_ENDIAN_BITFIELD
-    /**
-     * raw chksum result generated by the HW
-     */
-	uint16_t hw_chksum;
-    /**
-     * Field unused by hardware - available for software
-     */
-	uint8_t unused;
-    /**
-     * Next pointer used by hardware for list maintenance.
-     * May be written/read by HW before the work queue
-     *		 entry is scheduled to a PP
-     * (Only 36 bits used in Octeon 1)
-     */
-	uint64_t next_ptr:40;
-#else
-	uint64_t next_ptr:40;
-	uint8_t unused;
-	uint16_t hw_chksum;
-#endif
+	union cvmx_wqe_word0 word0;
 
     /*****************************************************************
      * WORD 1
      *	HW WRITE: the following 64 bits are filled by HW when a packet arrives
      */
-
-#ifdef __BIG_ENDIAN_BITFIELD
-    /**
-     * HW sets to the total number of bytes in the packet
-     */
-	uint64_t len:16;
-    /**
-     * HW sets this to input physical port
-     */
-	uint64_t ipprt:6;
-
-    /**
-     * HW sets this to what it thought the priority of the input packet was
-     */
-	uint64_t qos:3;
-
-    /**
-     * the group that the work queue entry will be scheduled to
-     */
-	uint64_t grp:4;
-    /**
-     * the type of the tag (ORDERED, ATOMIC, NULL)
-     */
-	uint64_t tag_type:3;
-    /**
-     * the synchronization/ordering tag
-     */
-	uint64_t tag:32;
-#else
-	uint64_t tag:32;
-	uint64_t tag_type:2;
-	uint64_t zero_2:1;
-	uint64_t grp:4;
-	uint64_t qos:3;
-	uint64_t ipprt:6;
-	uint64_t len:16;
-#endif
+	union cvmx_wqe_word1 word1;
 
     /**
      * WORD 2 HW WRITE: the following 64-bits are filled in by
@@ -465,4 +595,64 @@
 
 } CVMX_CACHE_LINE_ALIGNED cvmx_wqe_t;
 
+static inline int cvmx_wqe_get_port(cvmx_wqe_t *work)
+{
+	int port;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		port = work->word2.s_cn68xx.port;
+	else
+		port = work->word1.cn38xx.ipprt;
+
+	return port;
+}
+
+static inline void cvmx_wqe_set_port(cvmx_wqe_t *work, int port)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		work->word2.s_cn68xx.port = port;
+	else
+		work->word1.cn38xx.ipprt = port;
+}
+
+static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
+{
+	int grp;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		grp = work->word1.cn68xx.grp;
+	else
+		grp = work->word1.cn38xx.grp;
+
+	return grp;
+}
+
+static inline void cvmx_wqe_set_grp(cvmx_wqe_t *work, int grp)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		work->word1.cn68xx.grp = grp;
+	else
+		work->word1.cn38xx.grp = grp;
+}
+
+static inline int cvmx_wqe_get_qos(cvmx_wqe_t *work)
+{
+	int qos;
+
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		qos = work->word1.cn68xx.qos;
+	else
+		qos = work->word1.cn38xx.qos;
+
+	return qos;
+}
+
+static inline void cvmx_wqe_set_qos(cvmx_wqe_t *work, int qos)
+{
+	if (octeon_has_feature(OCTEON_FEATURE_CN68XX_WQE))
+		work->word1.cn68xx.qos = qos;
+	else
+		work->word1.cn38xx.qos = qos;
+}
+
 #endif /* __CVMX_WQE_H__ */
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index c28a849..ff7ad91 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -133,20 +133,13 @@
 #define _PAGE_HUGE		(1 << _PAGE_HUGE_SHIFT)
 #define _PAGE_SPLITTING_SHIFT	(_PAGE_HUGE_SHIFT + 1)
 #define _PAGE_SPLITTING		(1 << _PAGE_SPLITTING_SHIFT)
-
-/* Only R2 or newer cores have the XI bit */
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
-#define _PAGE_NO_EXEC_SHIFT	(_PAGE_SPLITTING_SHIFT + 1)
-#else
-#define _PAGE_GLOBAL_SHIFT	(_PAGE_SPLITTING_SHIFT + 1)
-#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
-#endif	/* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
-
 #endif	/* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */
 
 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 /* XI - page cannot be executed */
-#ifndef _PAGE_NO_EXEC_SHIFT
+#ifdef _PAGE_SPLITTING_SHIFT
+#define _PAGE_NO_EXEC_SHIFT	(_PAGE_SPLITTING_SHIFT + 1)
+#else
 #define _PAGE_NO_EXEC_SHIFT	(_PAGE_MODIFIED_SHIFT + 1)
 #endif
 #define _PAGE_NO_EXEC		(cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
@@ -156,14 +149,16 @@
 #define _PAGE_READ		(cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT))
 #define _PAGE_NO_READ_SHIFT	_PAGE_READ_SHIFT
 #define _PAGE_NO_READ		(cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0)
+#endif	/* defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) */
 
+#if defined(_PAGE_NO_READ_SHIFT)
 #define _PAGE_GLOBAL_SHIFT	(_PAGE_NO_READ_SHIFT + 1)
-#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
-
-#else	/* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */
+#elif defined(_PAGE_SPLITTING_SHIFT)
+#define _PAGE_GLOBAL_SHIFT	(_PAGE_SPLITTING_SHIFT + 1)
+#else
 #define _PAGE_GLOBAL_SHIFT	(_PAGE_MODIFIED_SHIFT + 1)
+#endif
 #define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
-#endif	/* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 #define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1)
 #define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
@@ -249,7 +244,7 @@
 #define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)  /* LOONGSON       */
 #define _CACHE_CACHABLE_COHERENT    (3<<_CACHE_SHIFT)  /* LOONGSON-3     */
 
-#elif defined(CONFIG_MACH_JZ4740)
+#elif defined(CONFIG_MACH_INGENIC)
 
 /* Ingenic uses the WA bit to achieve write-combine memory writes */
 #define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT)
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index ae85694..8957f15 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -393,6 +393,8 @@
 	return __pgprot(prot);
 }
 
+#define pgprot_writecombine pgprot_writecombine
+
 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
 {
 	unsigned long prot = pgprot_val(_prot);
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 9b3b48e..59ee6dc 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -275,6 +275,7 @@
 	unsigned long cp0_badvaddr;	/* Last user fault */
 	unsigned long cp0_baduaddr;	/* Last kernel fault accessing USEG */
 	unsigned long error_code;
+	unsigned long trap_nr;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
 	struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128)));
 	struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128)));
@@ -341,6 +342,7 @@
 	.cp0_badvaddr		= 0,				\
 	.cp0_baduaddr		= 0,				\
 	.error_code		= 0,				\
+	.trap_nr		= 0,				\
 	/*							\
 	 * Platform specific cop2 registers(null if no COP2)	\
 	 */							\
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index ffc3203..f6fc6aa 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -14,11 +14,16 @@
 #include <linux/linkage.h>
 #include <linux/types.h>
 #include <asm/isadep.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
 #include <uapi/asm/ptrace.h>
 
 /*
  * This struct defines the way the registers are stored on the stack during a
  * system call/exception. As usual the registers k0/k1 aren't being saved.
+ *
+ * If you add a register here, also add it to regoffset_table[] in
+ * arch/mips/kernel/ptrace.c.
  */
 struct pt_regs {
 #ifdef CONFIG_32BIT
@@ -43,8 +48,83 @@
 	unsigned long long mpl[6];        /* MTM{0-5} */
 	unsigned long long mtp[6];        /* MTP{0-5} */
 #endif
+	unsigned long __last[0];
 } __aligned(8);
 
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+	return regs->regs[31];
+}
+
+/*
+ * Don't use asm-generic/ptrace.h it defines FP accessors that don't make
+ * sense on MIPS.  We rather want an error if they get invoked.
+ */
+
+static inline void instruction_pointer_set(struct pt_regs *regs,
+                                           unsigned long val)
+{
+	regs->cp0_epc = val;
+}
+
+/* Query offset/name of register from its name/offset */
+extern int regs_query_register_offset(const char *name);
+#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs:       pt_regs from which register value is gotten.
+ * @offset:     offset number of the register.
+ *
+ * regs_get_register returns the value of a register. The @offset is the
+ * offset of the register in struct pt_regs address which specified by @regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+                                              unsigned int offset)
+{
+	if (unlikely(offset > MAX_REG_OFFSET))
+		return 0;
+
+	return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_within_kernel_stack() - check the address in the stack
+ * @regs:       pt_regs which contains kernel stack pointer.
+ * @addr:       address which is checked.
+ *
+ * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
+ * If @addr is within the kernel stack, it returns true. If not, returns false.
+ */
+static inline int regs_within_kernel_stack(struct pt_regs *regs,
+                                           unsigned long addr)
+{
+	return ((addr & ~(THREAD_SIZE - 1))  ==
+		(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
+}
+
+/**
+ * regs_get_kernel_stack_nth() - get Nth entry of the stack
+ * @regs:       pt_regs which contains kernel stack pointer.
+ * @n:          stack entry number.
+ *
+ * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
+ * is specified by @regs. If the @n th entry is NOT in the kernel stack,
+ * this returns 0.
+ */
+static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+                                                      unsigned int n)
+{
+	unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
+
+	addr += n;
+	if (regs_within_kernel_stack(regs, (unsigned long)addr))
+		return *addr;
+	else
+		return 0;
+}
+
 struct task_struct;
 
 extern int ptrace_getregs(struct task_struct *child,
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h
index 8efe5a9..003e273 100644
--- a/arch/mips/include/asm/signal.h
+++ b/arch/mips/include/asm/signal.h
@@ -23,4 +23,7 @@
 
 #define __ARCH_HAS_IRIX_SIGACTION
 
+extern int protected_save_fp_context(void __user *sc);
+extern int protected_restore_fp_context(void __user *sc);
+
 #endif /* _ASM_SIGNAL_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 9de4ba4..40196be 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -42,6 +42,11 @@
 	return ((counters >> 16) ^ counters) & 0xffff;
 }
 
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+	return lock.h.serving_now == lock.h.ticket;
+}
+
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 #define arch_spin_unlock_wait(x) \
 	while (arch_spin_is_locked(x)) { cpu_relax(); }
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 7163cd7..28b5d84a 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -16,29 +16,21 @@
 #include <asm/watch.h>
 #include <asm/dsp.h>
 #include <asm/cop2.h>
-#include <asm/msa.h>
+#include <asm/fpu.h>
 
 struct task_struct;
 
-enum {
-	FP_SAVE_NONE	= 0,
-	FP_SAVE_VECTOR	= -1,
-	FP_SAVE_SCALAR	= 1,
-};
-
 /**
  * resume - resume execution of a task
  * @prev:	The task previously executed.
  * @next:	The task to begin executing.
  * @next_ti:	task_thread_info(next).
- * @fp_save:	Which, if any, FP context to save for prev.
  *
  * This function is used whilst scheduling to save the context of prev & load
  * the context of next. Returns prev.
  */
 extern asmlinkage struct task_struct *resume(struct task_struct *prev,
-		struct task_struct *next, struct thread_info *next_ti,
-		s32 fp_save);
+		struct task_struct *next, struct thread_info *next_ti);
 
 extern unsigned int ll_bit;
 extern struct task_struct *ll_task;
@@ -83,45 +75,38 @@
 	}								\
 } while (0)
 
+/*
+ * For newly created kernel threads switch_to() will return to
+ * ret_from_kernel_thread, newly created user threads to ret_from_fork.
+ * That is, everything following resume() will be skipped for new threads.
+ * So everything that matters to new threads should be placed before resume().
+ */
 #define switch_to(prev, next, last)					\
 do {									\
-	u32 __c0_stat;							\
-	s32 __fpsave = FP_SAVE_NONE;					\
 	__mips_mt_fpaff_switch_to(prev);				\
-	if (cpu_has_dsp)						\
+	lose_fpu_inatomic(1, prev);					\
+	if (cpu_has_dsp) {						\
 		__save_dsp(prev);					\
-	if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) {		\
-		if (cop2_lazy_restore)					\
-			KSTK_STATUS(prev) &= ~ST0_CU2;			\
-		__c0_stat = read_c0_status();				\
-		write_c0_status(__c0_stat | ST0_CU2);			\
-		cop2_save(prev);					\
-		write_c0_status(__c0_stat & ~ST0_CU2);			\
+		__restore_dsp(next);					\
+	}								\
+	if (cop2_present) {						\
+		set_c0_status(ST0_CU2);					\
+		if ((KSTK_STATUS(prev) & ST0_CU2)) {			\
+			if (cop2_lazy_restore)				\
+				KSTK_STATUS(prev) &= ~ST0_CU2;		\
+			cop2_save(prev);				\
+		}							\
+		if (KSTK_STATUS(next) & ST0_CU2 &&			\
+		    !cop2_lazy_restore) {				\
+			cop2_restore(next);				\
+		}							\
+		clear_c0_status(ST0_CU2);				\
 	}								\
 	__clear_software_ll_bit();					\
-	if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU))		\
-		__fpsave = FP_SAVE_SCALAR;				\
-	if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA))		\
-		__fpsave = FP_SAVE_VECTOR;				\
-	(last) = resume(prev, next, task_thread_info(next), __fpsave);	\
-} while (0)
-
-#define finish_arch_switch(prev)					\
-do {									\
-	u32 __c0_stat;							\
-	if (cop2_present && !cop2_lazy_restore &&			\
-			(KSTK_STATUS(current) & ST0_CU2)) {		\
-		__c0_stat = read_c0_status();				\
-		write_c0_status(__c0_stat | ST0_CU2);			\
-		cop2_restore(current);					\
-		write_c0_status(__c0_stat & ~ST0_CU2);			\
-	}								\
-	if (cpu_has_dsp)						\
-		__restore_dsp(current);					\
 	if (cpu_has_userlocal)						\
-		write_c0_userlocal(current_thread_info()->tp_value);	\
+		write_c0_userlocal(task_thread_info(next)->tp_value);	\
 	__restore_watch();						\
-	disable_msa();							\
+	(last) = resume(prev, next, task_thread_info(next));		\
 } while (0)
 
 #endif /* _ASM_SWITCH_TO_H */
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 9c0014e..e309d8f 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -99,6 +99,7 @@
 #define TIF_SYSCALL_AUDIT	3	/* syscall auditing active */
 #define TIF_SECCOMP		4	/* secure computing */
 #define TIF_NOTIFY_RESUME	5	/* callback before returning to user */
+#define TIF_UPROBE		6	/* breakpointed or singlestepping */
 #define TIF_RESTORE_SIGMASK	9	/* restore signal mask in do_signal() */
 #define TIF_USEDFPU		16	/* FPU was used by this task this quantum (SMP) */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
@@ -122,6 +123,7 @@
 #define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP		(1<<TIF_SECCOMP)
 #define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
+#define _TIF_UPROBE		(1<<TIF_UPROBE)
 #define _TIF_USEDFPU		(1<<TIF_USEDFPU)
 #define _TIF_NOHZ		(1<<TIF_NOHZ)
 #define _TIF_FIXADE		(1<<TIF_FIXADE)
@@ -146,7 +148,8 @@
 
 /* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK		\
-	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
+	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME |	\
+	 _TIF_UPROBE)
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK	(_TIF_NOHZ | _TIF_WORK_MASK |		\
 				 _TIF_WORK_SYSCALL_EXIT |		\
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 8ab2874..17d4cd2 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -51,7 +51,7 @@
 /*
  * Initialize the calling CPU's compare interrupt as clockevent device
  */
-extern unsigned int __weak get_c0_compare_int(void);
+extern unsigned int get_c0_compare_int(void);
 extern int r4k_clockevent_init(void);
 
 static inline int mips_clockevent_init(void)
diff --git a/arch/mips/include/asm/tlbdebug.h b/arch/mips/include/asm/tlbdebug.h
index bb8f5c2..3a25a87 100644
--- a/arch/mips/include/asm/tlbdebug.h
+++ b/arch/mips/include/asm/tlbdebug.h
@@ -11,6 +11,7 @@
 /*
  * TLB debugging functions:
  */
+extern void dump_tlb_regs(void);
 extern void dump_tlb_all(void);
 
 #endif /* __ASM_TLBDEBUG_H */
diff --git a/arch/mips/include/asm/uprobes.h b/arch/mips/include/asm/uprobes.h
new file mode 100644
index 0000000..34c325c
--- /dev/null
+++ b/arch/mips/include/asm/uprobes.h
@@ -0,0 +1,58 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_UPROBES_H
+#define __ASM_UPROBES_H
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+#include <asm/break.h>
+#include <asm/inst.h>
+
+/*
+ * We want this to be defined as union mips_instruction but that makes the
+ * generic code blow up.
+ */
+typedef u32 uprobe_opcode_t;
+
+/*
+ * Classic MIPS (note this implementation doesn't consider microMIPS yet)
+ * instructions are always 4 bytes but in order to deal with branches and
+ * their delay slots, we treat instructions as having 8 bytes maximum.
+ */
+#define MAX_UINSN_BYTES			8
+#define UPROBE_XOL_SLOT_BYTES		128	/* Max. cache line size */
+
+#define UPROBE_BRK_UPROBE		0x000d000d	/* break 13 */
+#define UPROBE_BRK_UPROBE_XOL		0x000e000d	/* break 14 */
+
+#define UPROBE_SWBP_INSN		UPROBE_BRK_UPROBE
+#define UPROBE_SWBP_INSN_SIZE		4
+
+struct arch_uprobe {
+	unsigned long	resume_epc;
+	u32	insn[2];
+	u32	ixol[2];
+	union	mips_instruction orig_inst[MAX_UINSN_BYTES / 4];
+};
+
+struct arch_uprobe_task {
+	unsigned long saved_trap_nr;
+};
+
+extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
+	struct mm_struct *mm, unsigned long addr);
+extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
+extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
+extern int arch_uprobe_exception_notify(struct notifier_block *self,
+	unsigned long val, void *data);
+extern void arch_uprobe_abort_xol(struct arch_uprobe *aup,
+	struct pt_regs *regs);
+extern unsigned long arch_uretprobe_hijack_return_addr(
+	unsigned long trampoline_vaddr, struct pt_regs *regs);
+
+#endif /* __ASM_UPROBES_H */
diff --git a/arch/mips/include/asm/vpe.h b/arch/mips/include/asm/vpe.h
index 7849f39..80e70db 100644
--- a/arch/mips/include/asm/vpe.h
+++ b/arch/mips/include/asm/vpe.h
@@ -122,7 +122,7 @@
 void *alloc_progmem(unsigned long len);
 void release_progmem(void *ptr);
 
-int __weak vpe_run(struct vpe *v);
+int vpe_run(struct vpe *v);
 void cleanup_tc(struct tc *tc);
 
 int __init vpe_module_init(void);
diff --git a/arch/mips/include/uapi/asm/break.h b/arch/mips/include/uapi/asm/break.h
index 002c39e..9c4265c 100644
--- a/arch/mips/include/uapi/asm/break.h
+++ b/arch/mips/include/uapi/asm/break.h
@@ -21,6 +21,8 @@
 #define BRK_DIVZERO	7	/* Divide by zero check */
 #define BRK_RANGE	8	/* Range error check */
 #define BRK_BUG		12	/* Used by BUG() */
+#define BRK_UPROBE	13	/* See <asm/uprobes.h> */
+#define BRK_UPROBE_XOL	14	/* See <asm/uprobes.h> */
 #define BRK_MEMU	514	/* Used by FPU emulator */
 #define BRK_KPROBE_BP	515	/* Kprobe break */
 #define BRK_KPROBE_SSTEPBP 516	/* Kprobe single step software implementation */
diff --git a/arch/mips/include/uapi/asm/hwcap.h b/arch/mips/include/uapi/asm/hwcap.h
new file mode 100644
index 0000000..c7484a7
--- /dev/null
+++ b/arch/mips/include/uapi/asm/hwcap.h
@@ -0,0 +1,8 @@
+#ifndef _UAPI_ASM_HWCAP_H
+#define _UAPI_ASM_HWCAP_H
+
+/* HWCAP flags */
+#define HWCAP_MIPS_R6		(1 << 0)
+#define HWCAP_MIPS_MSA		(1 << 1)
+
+#endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index fc0cf5a..9b44d5a 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -26,7 +26,7 @@
 	cop0_op, cop1_op, cop2_op, cop1x_op,
 	beql_op, bnel_op, blezl_op, bgtzl_op,
 	daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
-	spec2_op, jalx_op, mdmx_op, spec3_op,
+	spec2_op, jalx_op, mdmx_op, msa_op = mdmx_op, spec3_op,
 	lb_op, lh_op, lwl_op, lw_op,
 	lbu_op, lhu_op, lwr_op, lwu_op,
 	sb_op, sh_op, swl_op, sw_op,
@@ -167,8 +167,13 @@
 	fround_op    =	0x0c, ftrunc_op	   =  0x0d,
 	fceil_op     =	0x0e, ffloor_op	   =  0x0f,
 	fmovc_op     =	0x11, fmovz_op	   =  0x12,
-	fmovn_op     =	0x13, frecip_op	   =  0x15,
-	frsqrt_op    =	0x16, fcvts_op	   =  0x20,
+	fmovn_op     =	0x13, fseleqz_op   =  0x14,
+	frecip_op    =  0x15, frsqrt_op    =  0x16,
+	fselnez_op   =  0x17, fmaddf_op    =  0x18,
+	fmsubf_op    =  0x19, frint_op     =  0x1a,
+	fclass_op    =  0x1b, fmin_op      =  0x1c,
+	fmina_op     =  0x1d, fmax_op      =  0x1e,
+	fmaxa_op     =  0x1f, fcvts_op     =  0x20,
 	fcvtd_op     =	0x21, fcvte_op	   =  0x22,
 	fcvtw_op     =	0x24, fcvtl_op	   =  0x25,
 	fcmp_op	     =	0x30
@@ -221,6 +226,24 @@
 };
 
 /*
+ * func field for MSA MI10 format.
+ */
+enum msa_mi10_func {
+	msa_ld_op = 8,
+	msa_st_op = 9,
+};
+
+/*
+ * MSA 2 bit format fields.
+ */
+enum msa_2b_fmt {
+	msa_fmt_b = 0,
+	msa_fmt_h = 1,
+	msa_fmt_w = 2,
+	msa_fmt_d = 3,
+};
+
+/*
  * (microMIPS) Major opcodes.
  */
 enum mm_major_op {
@@ -611,6 +634,16 @@
 	;)))))))
 };
 
+struct msa_mi10_format {		/* MSA MI10 */
+	__BITFIELD_FIELD(unsigned int opcode : 6,
+	__BITFIELD_FIELD(signed int s10 : 10,
+	__BITFIELD_FIELD(unsigned int rs : 5,
+	__BITFIELD_FIELD(unsigned int wd : 5,
+	__BITFIELD_FIELD(unsigned int func : 4,
+	__BITFIELD_FIELD(unsigned int df : 2,
+	;))))))
+};
+
 struct spec3_format {   /* SPEC3 */
 	__BITFIELD_FIELD(unsigned int opcode:6,
 	__BITFIELD_FIELD(unsigned int rs:5,
@@ -888,6 +921,7 @@
 	struct p_format p_format;
 	struct f_format f_format;
 	struct ma_format ma_format;
+	struct msa_mi10_format msa_mi10_format;
 	struct b_format b_format;
 	struct ps_format ps_format;
 	struct v_format v_format;
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index 9081d88..5cbd9ae 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -12,6 +12,18 @@
 #include <linux/types.h>
 #include <asm/sgidefs.h>
 
+/* scalar FP context was used */
+#define USED_FP			(1 << 0)
+
+/* the value of Status.FR when context was saved */
+#define USED_FR1		(1 << 1)
+
+/* FR=1, but with odd singles in bits 63:32 of preceding even double */
+#define USED_HYBRID_FPRS	(1 << 2)
+
+/* extended context was used, see struct extcontext for details */
+#define USED_EXTCONTEXT		(1 << 3)
+
 #if _MIPS_SIM == _MIPS_SIM_ABI32
 
 /*
diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h
index 8f2d184..c4ddc4f 100644
--- a/arch/mips/include/uapi/asm/swab.h
+++ b/arch/mips/include/uapi/asm/swab.h
@@ -16,11 +16,13 @@
 #if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) ||		\
     defined(_MIPS_ARCH_LOONGSON3A)
 
-static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+static inline __attribute__((nomips16)) __attribute_const__
+		__u16 __arch_swab16(__u16 x)
 {
 	__asm__(
 	"	.set	push			\n"
 	"	.set	arch=mips32r2		\n"
+	"	.set	nomips16		\n"
 	"	wsbh	%0, %1			\n"
 	"	.set	pop			\n"
 	: "=r" (x)
@@ -30,11 +32,13 @@
 }
 #define __arch_swab16 __arch_swab16
 
-static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+static inline __attribute__((nomips16)) __attribute_const__
+		__u32 __arch_swab32(__u32 x)
 {
 	__asm__(
 	"	.set	push			\n"
 	"	.set	arch=mips32r2		\n"
+	"	.set	nomips16		\n"
 	"	wsbh	%0, %1			\n"
 	"	rotr	%0, %0, 16		\n"
 	"	.set	pop			\n"
@@ -50,11 +54,13 @@
  * 64-bit kernel on r2 CPUs.
  */
 #ifdef __mips64
-static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+static inline __attribute__((nomips16)) __attribute_const__
+		__u64 __arch_swab64(__u64 x)
 {
 	__asm__(
 	"	.set	push			\n"
 	"	.set	arch=mips64r2		\n"
+	"	.set	nomips16		\n"
 	"	dsbh	%0, %1			\n"
 	"	dshd	%0, %0			\n"
 	"	.set	pop			\n"
diff --git a/arch/mips/include/uapi/asm/ucontext.h b/arch/mips/include/uapi/asm/ucontext.h
new file mode 100644
index 0000000..2320144
--- /dev/null
+++ b/arch/mips/include/uapi/asm/ucontext.h
@@ -0,0 +1,65 @@
+#ifndef __MIPS_UAPI_ASM_UCONTEXT_H
+#define __MIPS_UAPI_ASM_UCONTEXT_H
+
+/**
+ * struct extcontext - extended context header structure
+ * @magic:	magic value identifying the type of extended context
+ * @size:	the size in bytes of the enclosing structure
+ *
+ * Extended context structures provide context which does not fit within struct
+ * sigcontext. They are placed sequentially in memory at the end of struct
+ * ucontext and struct sigframe, with each extended context structure beginning
+ * with a header defined by this struct. The type of context represented is
+ * indicated by the magic field. Userland may check each extended context
+ * structure against magic values that it recognises. The size field allows any
+ * unrecognised context to be skipped, allowing for future expansion. The end
+ * of the extended context data is indicated by the magic value
+ * END_EXTCONTEXT_MAGIC.
+ */
+struct extcontext {
+	unsigned int		magic;
+	unsigned int		size;
+};
+
+/**
+ * struct msa_extcontext - MSA extended context structure
+ * @ext:	the extended context header, with magic == MSA_EXTCONTEXT_MAGIC
+ * @wr:		the most significant 64 bits of each MSA vector register
+ * @csr:	the value of the MSA control & status register
+ *
+ * If MSA context is live for a task at the time a signal is delivered to it,
+ * this structure will hold the MSA context of the task as it was prior to the
+ * signal delivery.
+ */
+struct msa_extcontext {
+	struct extcontext	ext;
+#define MSA_EXTCONTEXT_MAGIC	0x784d5341	/* xMSA */
+
+	unsigned long long	wr[32];
+	unsigned int		csr;
+};
+
+#define END_EXTCONTEXT_MAGIC	0x78454e44	/* xEND */
+
+/**
+ * struct ucontext - user context structure
+ * @uc_flags:
+ * @uc_link:
+ * @uc_stack:
+ * @uc_mcontext:	holds basic processor state
+ * @uc_sigmask:
+ * @uc_extcontext:	holds extended processor state
+ */
+struct ucontext {
+	/* Historic fields matching asm-generic */
+	unsigned long		uc_flags;
+	struct ucontext		*uc_link;
+	stack_t			uc_stack;
+	struct sigcontext	uc_mcontext;
+	sigset_t		uc_sigmask;
+
+	/* Extended context structures may follow ucontext */
+	unsigned long long	uc_extcontext[0];
+};
+
+#endif /* __MIPS_UAPI_ASM_UCONTEXT_H */
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index e1ea4f6..5d6828b 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -110,18 +110,11 @@
 	}
 }
 
-static void r4030_set_mode(enum clock_event_mode mode,
-			   struct clock_event_device *evt)
-{
-	/* Nothing to do ...  */
-}
-
 struct clock_event_device r4030_clockevent = {
 	.name		= "r4030",
 	.features	= CLOCK_EVT_FEAT_PERIODIC,
 	.rating		= 300,
 	.irq		= JAZZ_TIMER_IRQ,
-	.set_mode	= r4030_set_mode,
 };
 
 static irqreturn_t r4030_timer_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 54c80d4..6cd69fd 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -231,6 +231,13 @@
 	return 0;
 }
 
+static int jz_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+	struct jz_gpio_chip *jz_gpio = gpio_chip_to_jz_gpio_chip(chip);
+
+	return jz_gpio->irq_base + gpio;
+}
+
 int jz_gpio_port_direction_input(int port, uint32_t mask)
 {
 	writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_CLEAR));
@@ -262,18 +269,6 @@
 }
 EXPORT_SYMBOL(jz_gpio_port_get_value);
 
-int gpio_to_irq(unsigned gpio)
-{
-	return JZ4740_IRQ_GPIO(0) + gpio;
-}
-EXPORT_SYMBOL_GPL(gpio_to_irq);
-
-int irq_to_gpio(unsigned irq)
-{
-	return irq - JZ4740_IRQ_GPIO(0);
-}
-EXPORT_SYMBOL_GPL(irq_to_gpio);
-
 #define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f)
 
 static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq)
@@ -403,6 +398,7 @@
 		.get = jz_gpio_get_value, \
 		.direction_output = jz_gpio_direction_output, \
 		.direction_input = jz_gpio_direction_input, \
+		.to_irq = jz_gpio_to_irq, \
 		.base = JZ4740_GPIO_BASE_ ## _bank, \
 		.ngpio = JZ4740_GPIO_NUM_ ## _bank, \
 	}, \
@@ -423,8 +419,8 @@
 	chip->base = ioremap(JZ4740_GPIO_BASE_ADDR + (id * 0x100), 0x100);
 
 	chip->irq = JZ4740_IRQ_INTC_GPIO(id);
-	irq_set_handler_data(chip->irq, chip);
-	irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler);
+	irq_set_chained_handler_and_data(chip->irq,
+					 jz_gpio_irq_demux_handler, chip);
 
 	gc = irq_alloc_generic_chip(chip->gpio_chip.label, 1, chip->irq_base,
 		chip->base, handle_level_irq);
diff --git a/arch/mips/jz4740/time.c b/arch/mips/jz4740/time.c
index 7ab47fe..1f7ca2c 100644
--- a/arch/mips/jz4740/time.c
+++ b/arch/mips/jz4740/time.c
@@ -58,7 +58,7 @@
 
 	jz4740_timer_ack_full(TIMER_CLOCKEVENT);
 
-	if (cd->mode != CLOCK_EVT_MODE_PERIODIC)
+	if (!clockevent_state_periodic(cd))
 		jz4740_timer_disable(TIMER_CLOCKEVENT);
 
 	cd->event_handler(cd);
@@ -66,24 +66,29 @@
 	return IRQ_HANDLED;
 }
 
-static void jz4740_clockevent_set_mode(enum clock_event_mode mode,
-	struct clock_event_device *cd)
+static int jz4740_clockevent_set_periodic(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		jz4740_timer_set_count(TIMER_CLOCKEVENT, 0);
-		jz4740_timer_set_period(TIMER_CLOCKEVENT, jz4740_jiffies_per_tick);
-	case CLOCK_EVT_MODE_RESUME:
-		jz4740_timer_irq_full_enable(TIMER_CLOCKEVENT);
-		jz4740_timer_enable(TIMER_CLOCKEVENT);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		jz4740_timer_disable(TIMER_CLOCKEVENT);
-		break;
-	default:
-		break;
-	}
+	jz4740_timer_set_count(TIMER_CLOCKEVENT, 0);
+	jz4740_timer_set_period(TIMER_CLOCKEVENT, jz4740_jiffies_per_tick);
+	jz4740_timer_irq_full_enable(TIMER_CLOCKEVENT);
+	jz4740_timer_enable(TIMER_CLOCKEVENT);
+
+	return 0;
+}
+
+static int jz4740_clockevent_resume(struct clock_event_device *evt)
+{
+	jz4740_timer_irq_full_enable(TIMER_CLOCKEVENT);
+	jz4740_timer_enable(TIMER_CLOCKEVENT);
+
+	return 0;
+}
+
+static int jz4740_clockevent_shutdown(struct clock_event_device *evt)
+{
+	jz4740_timer_disable(TIMER_CLOCKEVENT);
+
+	return 0;
 }
 
 static int jz4740_clockevent_set_next(unsigned long evt,
@@ -100,7 +105,10 @@
 	.name = "jz4740-timer",
 	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
 	.set_next_event = jz4740_clockevent_set_next,
-	.set_mode = jz4740_clockevent_set_mode,
+	.set_state_shutdown = jz4740_clockevent_shutdown,
+	.set_state_periodic = jz4740_clockevent_set_periodic,
+	.set_state_oneshot = jz4740_clockevent_shutdown,
+	.tick_resume = jz4740_clockevent_resume,
 	.rating = 200,
 #ifdef CONFIG_MACH_JZ4740
 	.irq = JZ4740_IRQ_TCU0,
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 3f5cf8a..d982be1 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -61,7 +61,6 @@
 obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o
 obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o
 
-obj-$(CONFIG_I8259)		+= i8259.o
 obj-$(CONFIG_IRQ_CPU_RM7K)	+= irq-rm7000.o
 obj-$(CONFIG_MIPS_MSC)		+= irq-msc01.o
 obj-$(CONFIG_IRQ_TXX9)		+= irq_txx9.o
@@ -100,6 +99,7 @@
 obj-$(CONFIG_HW_PERF_EVENTS)	+= perf_event_mipsxx.o
 
 obj-$(CONFIG_JUMP_LABEL)	+= jump_label.o
+obj-$(CONFIG_UPROBES)		+= uprobes.o
 
 obj-$(CONFIG_MIPS_CM)		+= mips-cm.o
 obj-$(CONFIG_MIPS_CPC)		+= mips-cpc.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 072fab1..154e203 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -128,6 +128,7 @@
 	       thread.cp0_baduaddr);
 	OFFSET(THREAD_ECODE, task_struct, \
 	       thread.error_code);
+	OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
 	BLANK();
 }
 
@@ -245,17 +246,6 @@
 }
 #endif
 
-#ifdef CONFIG_MIPS32_COMPAT
-void output_sc32_defines(void)
-{
-	COMMENT("Linux 32-bit sigcontext offsets.");
-	OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
-	OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
-	OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
-	BLANK();
-}
-#endif
-
 void output_signal_defined(void)
 {
 	COMMENT("Linux signal numbers.");
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 7976457..940ac00 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -40,8 +40,8 @@
  * The general purpose timer ticks at 1MHz independent if
  * the rest of the system
  */
-static void sibyte_set_mode(enum clock_event_mode mode,
-			   struct clock_event_device *evt)
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
 {
 	unsigned int cpu = smp_processor_id();
 	void __iomem *cfg, *init;
@@ -49,24 +49,22 @@
 	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
 	init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		__raw_writeq(0, cfg);
-		__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
-		__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
-			     cfg);
-		break;
+	__raw_writeq(0, cfg);
+	__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+	__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
+	return 0;
+}
 
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* Stop the timer until we actually program a shot */
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		__raw_writeq(0, cfg);
-		break;
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+	unsigned int cpu = smp_processor_id();
+	void __iomem *cfg;
 
-	case CLOCK_EVT_MODE_UNUSED:	/* shuddup gcc */
-	case CLOCK_EVT_MODE_RESUME:
-		;
-	}
+	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+
+	/* Stop the timer until we actually program a shot */
+	__raw_writeq(0, cfg);
+	return 0;
 }
 
 static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
@@ -91,7 +89,7 @@
 	void __iomem *cfg;
 	unsigned long tmode;
 
-	if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+	if (clockevent_state_periodic(cd))
 		tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
 	else
 		tmode = 0;
@@ -130,7 +128,9 @@
 	cd->irq			= irq;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= sibyte_next_event;
-	cd->set_mode		= sibyte_set_mode;
+	cd->set_state_shutdown	= sibyte_shutdown;
+	cd->set_state_periodic	= sibyte_set_periodic;
+	cd->set_state_oneshot	= sibyte_shutdown;
 	clockevents_register_device(cd);
 
 	bcm1480_mask_irq(cpu, irq);
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index ff1f01b..77a5ddf 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -59,27 +59,32 @@
 	return -EINVAL;
 }
 
-static void ds1287_set_mode(enum clock_event_mode mode,
-			    struct clock_event_device *evt)
+static int ds1287_shutdown(struct clock_event_device *evt)
 {
 	u8 val;
 
 	spin_lock(&rtc_lock);
 
 	val = CMOS_READ(RTC_REG_B);
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		val |= RTC_PIE;
-		break;
-	default:
-		val &= ~RTC_PIE;
-		break;
-	}
-
+	val &= ~RTC_PIE;
 	CMOS_WRITE(val, RTC_REG_B);
 
 	spin_unlock(&rtc_lock);
+	return 0;
+}
+
+static int ds1287_set_periodic(struct clock_event_device *evt)
+{
+	u8 val;
+
+	spin_lock(&rtc_lock);
+
+	val = CMOS_READ(RTC_REG_B);
+	val |= RTC_PIE;
+	CMOS_WRITE(val, RTC_REG_B);
+
+	spin_unlock(&rtc_lock);
+	return 0;
 }
 
 static void ds1287_event_handler(struct clock_event_device *dev)
@@ -87,11 +92,13 @@
 }
 
 static struct clock_event_device ds1287_clockevent = {
-	.name		= "ds1287",
-	.features	= CLOCK_EVT_FEAT_PERIODIC,
-	.set_next_event = ds1287_set_next_event,
-	.set_mode	= ds1287_set_mode,
-	.event_handler	= ds1287_event_handler,
+	.name			= "ds1287",
+	.features		= CLOCK_EVT_FEAT_PERIODIC,
+	.set_next_event		= ds1287_set_next_event,
+	.set_state_shutdown	= ds1287_shutdown,
+	.set_state_periodic	= ds1287_set_periodic,
+	.tick_resume		= ds1287_shutdown,
+	.event_handler		= ds1287_event_handler,
 };
 
 static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index f069460..6604005 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -64,8 +64,7 @@
 	return 0;
 }
 
-static void gt641xx_timer0_set_mode(enum clock_event_mode mode,
-				    struct clock_event_device *evt)
+static int gt641xx_timer0_shutdown(struct clock_event_device *evt)
 {
 	u32 ctrl;
 
@@ -73,21 +72,39 @@
 
 	ctrl = GT_READ(GT_TC_CONTROL_OFS);
 	ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		ctrl |= GT_TC_CONTROL_ENTC0_MSK;
-		break;
-	default:
-		break;
-	}
-
 	GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
 
 	raw_spin_unlock(&gt641xx_timer_lock);
+	return 0;
+}
+
+static int gt641xx_timer0_set_oneshot(struct clock_event_device *evt)
+{
+	u32 ctrl;
+
+	raw_spin_lock(&gt641xx_timer_lock);
+
+	ctrl = GT_READ(GT_TC_CONTROL_OFS);
+	ctrl &= ~GT_TC_CONTROL_SELTC0_MSK;
+	ctrl |= GT_TC_CONTROL_ENTC0_MSK;
+	GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+	raw_spin_unlock(&gt641xx_timer_lock);
+	return 0;
+}
+
+static int gt641xx_timer0_set_periodic(struct clock_event_device *evt)
+{
+	u32 ctrl;
+
+	raw_spin_lock(&gt641xx_timer_lock);
+
+	ctrl = GT_READ(GT_TC_CONTROL_OFS);
+	ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
+	GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+	raw_spin_unlock(&gt641xx_timer_lock);
+	return 0;
 }
 
 static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
@@ -95,12 +112,16 @@
 }
 
 static struct clock_event_device gt641xx_timer0_clockevent = {
-	.name		= "gt641xx-timer0",
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.irq		= GT641XX_TIMER0_IRQ,
-	.set_next_event = gt641xx_timer0_set_next_event,
-	.set_mode	= gt641xx_timer0_set_mode,
-	.event_handler	= gt641xx_timer0_event_handler,
+	.name			= "gt641xx-timer0",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.irq			= GT641XX_TIMER0_IRQ,
+	.set_next_event		= gt641xx_timer0_set_next_event,
+	.set_state_shutdown	= gt641xx_timer0_shutdown,
+	.set_state_periodic	= gt641xx_timer0_set_periodic,
+	.set_state_oneshot	= gt641xx_timer0_set_oneshot,
+	.tick_resume		= gt641xx_timer0_shutdown,
+	.event_handler		= gt641xx_timer0_event_handler,
 };
 
 static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index d70c4d8..8dfe6a6 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -28,12 +28,6 @@
 	return res;
 }
 
-void mips_set_clock_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
-{
-	/* Nothing to do ...  */
-}
-
 DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
 int cp0_timer_irq_installed;
 
@@ -174,6 +168,11 @@
 	return 1;
 }
 
+unsigned int __weak get_c0_compare_int(void)
+{
+	return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+}
+
 int r4k_clockevent_init(void)
 {
 	unsigned int cpu = smp_processor_id();
@@ -189,11 +188,9 @@
 	/*
 	 * With vectored interrupts things are getting platform specific.
 	 * get_c0_compare_int is a hook to allow a platform to return the
-	 * interrupt number of it's liking.
+	 * interrupt number of its liking.
 	 */
-	irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
-	if (get_c0_compare_int)
-		irq = get_c0_compare_int();
+	irq = get_c0_compare_int();
 
 	cd = &per_cpu(mips_clockevent_device, cpu);
 
@@ -212,7 +209,6 @@
 	cd->irq			= irq;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= mips_next_event;
-	cd->set_mode		= mips_set_clock_mode;
 	cd->event_handler	= mips_event_handler;
 
 	clockevents_register_device(cd);
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 5ea6d6b..3d860ef 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -38,8 +38,20 @@
  * The general purpose timer ticks at 1MHz independent if
  * the rest of the system
  */
-static void sibyte_set_mode(enum clock_event_mode mode,
-			   struct clock_event_device *evt)
+
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+	void __iomem *cfg;
+
+	cfg = IOADDR(A_SCD_TIMER_REGISTER(smp_processor_id(), R_SCD_TIMER_CFG));
+
+	/* Stop the timer until we actually program a shot */
+	__raw_writeq(0, cfg);
+
+	return 0;
+}
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
 {
 	unsigned int cpu = smp_processor_id();
 	void __iomem *cfg, *init;
@@ -47,24 +59,11 @@
 	cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
 	init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		__raw_writeq(0, cfg);
-		__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
-		__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
-			     cfg);
-		break;
+	__raw_writeq(0, cfg);
+	__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+	__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
 
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* Stop the timer until we actually program a shot */
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		__raw_writeq(0, cfg);
-		break;
-
-	case CLOCK_EVT_MODE_UNUSED:	/* shuddup gcc */
-	case CLOCK_EVT_MODE_RESUME:
-		;
-	}
+	return 0;
 }
 
 static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
@@ -89,7 +88,7 @@
 	void __iomem *cfg;
 	unsigned long tmode;
 
-	if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+	if (clockevent_state_periodic(cd))
 		tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
 	else
 		tmode = 0;
@@ -129,7 +128,9 @@
 	cd->irq			= irq;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= sibyte_next_event;
-	cd->set_mode		= sibyte_set_mode;
+	cd->set_state_shutdown	= sibyte_shutdown;
+	cd->set_state_periodic	= sibyte_set_periodic;
+	cd->set_state_oneshot	= sibyte_shutdown;
 	clockevents_register_device(cd);
 
 	sb1250_mask_irq(cpu, irq);
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index 7239324..537eefd 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -85,36 +85,54 @@
 	__raw_writel(0, &tmrptr->tisr);
 }
 
-static void txx9tmr_set_mode(enum clock_event_mode mode,
-			     struct clock_event_device *evt)
+static int txx9tmr_set_state_periodic(struct clock_event_device *evt)
 {
 	struct txx9_clock_event_device *txx9_cd =
 		container_of(evt, struct txx9_clock_event_device, cd);
 	struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
 
 	txx9tmr_stop_and_clear(tmrptr);
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		__raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE,
-			     &tmrptr->itmr);
-		/* start timer */
-		__raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >>
-			     evt->shift,
-			     &tmrptr->cpra);
-		__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		__raw_writel(0, &tmrptr->itmr);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		__raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		__raw_writel(TIMER_CCD, &tmrptr->ccdr);
-		__raw_writel(0, &tmrptr->itmr);
-		break;
-	}
+
+	__raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE, &tmrptr->itmr);
+	/* start timer */
+	__raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >> evt->shift,
+		     &tmrptr->cpra);
+	__raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+	return 0;
+}
+
+static int txx9tmr_set_state_oneshot(struct clock_event_device *evt)
+{
+	struct txx9_clock_event_device *txx9_cd =
+		container_of(evt, struct txx9_clock_event_device, cd);
+	struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+	txx9tmr_stop_and_clear(tmrptr);
+	__raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
+	return 0;
+}
+
+static int txx9tmr_set_state_shutdown(struct clock_event_device *evt)
+{
+	struct txx9_clock_event_device *txx9_cd =
+		container_of(evt, struct txx9_clock_event_device, cd);
+	struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+	txx9tmr_stop_and_clear(tmrptr);
+	__raw_writel(0, &tmrptr->itmr);
+	return 0;
+}
+
+static int txx9tmr_tick_resume(struct clock_event_device *evt)
+{
+	struct txx9_clock_event_device *txx9_cd =
+		container_of(evt, struct txx9_clock_event_device, cd);
+	struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+	txx9tmr_stop_and_clear(tmrptr);
+	__raw_writel(TIMER_CCD, &tmrptr->ccdr);
+	__raw_writel(0, &tmrptr->itmr);
+	return 0;
 }
 
 static int txx9tmr_set_next_event(unsigned long delta,
@@ -133,12 +151,15 @@
 
 static struct txx9_clock_event_device txx9_clock_event_device = {
 	.cd = {
-		.name		= "TXx9",
-		.features	= CLOCK_EVT_FEAT_PERIODIC |
-				  CLOCK_EVT_FEAT_ONESHOT,
-		.rating		= 200,
-		.set_mode	= txx9tmr_set_mode,
-		.set_next_event = txx9tmr_set_next_event,
+		.name			= "TXx9",
+		.features		= CLOCK_EVT_FEAT_PERIODIC |
+					  CLOCK_EVT_FEAT_ONESHOT,
+		.rating			= 200,
+		.set_state_shutdown	= txx9tmr_set_state_shutdown,
+		.set_state_periodic	= txx9tmr_set_state_periodic,
+		.set_state_oneshot	= txx9tmr_set_state_oneshot,
+		.tick_resume		= txx9tmr_tick_resume,
+		.set_next_event		= txx9tmr_set_next_event,
 	},
 };
 
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 1b6ca63..9f71c06 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -152,7 +152,7 @@
 
 	/* Enter the coherent domain */
 	li	t0, 0xff
-	PTR_S	t0, GCR_CL_COHERENCE_OFS(v1)
+	sw	t0, GCR_CL_COHERENCE_OFS(v1)
 	ehb
 
 	/* Jump to kseg0 */
@@ -302,7 +302,7 @@
 	PTR_L	t0, 0(t0)
 
 	/* Calculate a pointer to this cores struct core_boot_config */
-	PTR_L	t0, GCR_CL_ID_OFS(t0)
+	lw	t0, GCR_CL_ID_OFS(t0)
 	li	t1, COREBOOTCFG_SIZE
 	mul	t0, t0, t1
 	PTR_LA	t1, mips_cps_core_bootcfg
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index dbe0792..571a8e6 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -32,6 +32,9 @@
 #include <asm/spram.h>
 #include <asm/uaccess.h>
 
+/* Hardware capabilities */
+unsigned int elf_hwcap __read_mostly;
+
 /*
  * Get the FPU Implementation/Revision.
  */
@@ -188,7 +191,7 @@
 static int mips_ftlb_disabled;
 static int mips_has_ftlb_configured;
 
-static void set_ftlb_enable(struct cpuinfo_mips *c, int enable);
+static int set_ftlb_enable(struct cpuinfo_mips *c, int enable);
 
 static int __init ftlb_disable(char *s)
 {
@@ -202,7 +205,10 @@
 		return 1;
 
 	/* Disable it in the boot cpu */
-	set_ftlb_enable(&cpu_data[0], 0);
+	if (set_ftlb_enable(&cpu_data[0], 0)) {
+		pr_warn("Can't turn FTLB off\n");
+		return 1;
+	}
 
 	back_to_back_c0_hazard();
 
@@ -364,30 +370,41 @@
 		return 3;
 }
 
-static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
+static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
 {
-	unsigned int config6;
+	unsigned int config;
 
 	/* It's implementation dependent how the FTLB can be enabled */
 	switch (c->cputype) {
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 		/* proAptiv & related cores use Config6 to enable the FTLB */
-		config6 = read_c0_config6();
+		config = read_c0_config6();
 		/* Clear the old probability value */
-		config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
+		config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
 		if (enable)
 			/* Enable FTLB */
-			write_c0_config6(config6 |
+			write_c0_config6(config |
 					 (calculate_ftlb_probability(c)
 					  << MIPS_CONF6_FTLBP_SHIFT)
 					 | MIPS_CONF6_FTLBEN);
 		else
 			/* Disable FTLB */
-			write_c0_config6(config6 &  ~MIPS_CONF6_FTLBEN);
-		back_to_back_c0_hazard();
+			write_c0_config6(config &  ~MIPS_CONF6_FTLBEN);
 		break;
+	case CPU_I6400:
+		/* I6400 & related cores use Config7 to configure FTLB */
+		config = read_c0_config7();
+		/* Clear the old probability value */
+		config &= ~(3 << MIPS_CONF7_FTLBP_SHIFT);
+		write_c0_config7(config | (calculate_ftlb_probability(c)
+					   << MIPS_CONF7_FTLBP_SHIFT));
+		break;
+	default:
+		return 1;
 	}
+
+	return 0;
 }
 
 static inline unsigned int decode_config0(struct cpuinfo_mips *c)
@@ -524,6 +541,8 @@
 	}
 	if (config3 & MIPS_CONF3_CDMM)
 		c->options |= MIPS_CPU_CDMM;
+	if (config3 & MIPS_CONF3_SP)
+		c->options |= MIPS_CPU_SP;
 
 	return config3 & MIPS_CONF_M;
 }
@@ -540,7 +559,16 @@
 	if (cpu_has_tlb) {
 		if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
 			c->options |= MIPS_CPU_TLBINV;
-		mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+		/*
+		 * This is a bit ugly. R6 has dropped that field from
+		 * config4 and the only valid configuration is VTLB+FTLB so
+		 * set a good value for mmuextdef for that case.
+		 */
+		if (cpu_has_mips_r6)
+			mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT;
+		else
+			mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+
 		switch (mmuextdef) {
 		case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
 			c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
@@ -1121,6 +1149,10 @@
 		c->cputype = CPU_P5600;
 		__cpu_name[cpu] = "MIPS P5600";
 		break;
+	case PRID_IMP_I6400:
+		c->cputype = CPU_I6400;
+		__cpu_name[cpu] = "MIPS I6400";
+		break;
 	case PRID_IMP_M5150:
 		c->cputype = CPU_M5150;
 		__cpu_name[cpu] = "MIPS M5150";
@@ -1492,10 +1524,14 @@
 	else
 		c->srsets = 1;
 
+	if (cpu_has_mips_r6)
+		elf_hwcap |= HWCAP_MIPS_R6;
+
 	if (cpu_has_msa) {
 		c->msa_id = cpu_get_msa_id();
 		WARN(c->msa_id & MSA_IR_WRPF,
 		     "Vector register partitioning unimplemented!");
+		elf_hwcap |= HWCAP_MIPS_MSA;
 	}
 
 	cpu_probe_vmbits(c);
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
deleted file mode 100644
index 74f6752..0000000
--- a/arch/mips/kernel/i8259.c
+++ /dev/null
@@ -1,384 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Code to handle x86 style IRQs plus some generic interrupt stuff.
- *
- * Copyright (C) 1992 Linus Torvalds
- * Copyright (C) 1994 - 2000 Ralf Baechle
- */
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <linux/kernel.h>
-#include <linux/of_irq.h>
-#include <linux/spinlock.h>
-#include <linux/syscore_ops.h>
-#include <linux/irq.h>
-
-#include <asm/i8259.h>
-#include <asm/io.h>
-
-#include "../../drivers/irqchip/irqchip.h"
-
-/*
- * This is the 'legacy' 8259A Programmable Interrupt Controller,
- * present in the majority of PC/AT boxes.
- * plus some generic x86 specific things if generic specifics makes
- * any sense at all.
- * this file should become arch/i386/kernel/irq.c when the old irq.c
- * moves to arch independent land
- */
-
-static int i8259A_auto_eoi = -1;
-DEFINE_RAW_SPINLOCK(i8259A_lock);
-static void disable_8259A_irq(struct irq_data *d);
-static void enable_8259A_irq(struct irq_data *d);
-static void mask_and_ack_8259A(struct irq_data *d);
-static void init_8259A(int auto_eoi);
-
-static struct irq_chip i8259A_chip = {
-	.name			= "XT-PIC",
-	.irq_mask		= disable_8259A_irq,
-	.irq_disable		= disable_8259A_irq,
-	.irq_unmask		= enable_8259A_irq,
-	.irq_mask_ack		= mask_and_ack_8259A,
-};
-
-/*
- * 8259A PIC functions to handle ISA devices:
- */
-
-/*
- * This contains the irq mask for both 8259A irq controllers,
- */
-static unsigned int cached_irq_mask = 0xffff;
-
-#define cached_master_mask	(cached_irq_mask)
-#define cached_slave_mask	(cached_irq_mask >> 8)
-
-static void disable_8259A_irq(struct irq_data *d)
-{
-	unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
-	unsigned long flags;
-
-	mask = 1 << irq;
-	raw_spin_lock_irqsave(&i8259A_lock, flags);
-	cached_irq_mask |= mask;
-	if (irq & 8)
-		outb(cached_slave_mask, PIC_SLAVE_IMR);
-	else
-		outb(cached_master_mask, PIC_MASTER_IMR);
-	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-}
-
-static void enable_8259A_irq(struct irq_data *d)
-{
-	unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
-	unsigned long flags;
-
-	mask = ~(1 << irq);
-	raw_spin_lock_irqsave(&i8259A_lock, flags);
-	cached_irq_mask &= mask;
-	if (irq & 8)
-		outb(cached_slave_mask, PIC_SLAVE_IMR);
-	else
-		outb(cached_master_mask, PIC_MASTER_IMR);
-	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-}
-
-int i8259A_irq_pending(unsigned int irq)
-{
-	unsigned int mask;
-	unsigned long flags;
-	int ret;
-
-	irq -= I8259A_IRQ_BASE;
-	mask = 1 << irq;
-	raw_spin_lock_irqsave(&i8259A_lock, flags);
-	if (irq < 8)
-		ret = inb(PIC_MASTER_CMD) & mask;
-	else
-		ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
-	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-
-	return ret;
-}
-
-void make_8259A_irq(unsigned int irq)
-{
-	disable_irq_nosync(irq);
-	irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
-	enable_irq(irq);
-}
-
-/*
- * This function assumes to be called rarely. Switching between
- * 8259A registers is slow.
- * This has to be protected by the irq controller spinlock
- * before being called.
- */
-static inline int i8259A_irq_real(unsigned int irq)
-{
-	int value;
-	int irqmask = 1 << irq;
-
-	if (irq < 8) {
-		outb(0x0B, PIC_MASTER_CMD);	/* ISR register */
-		value = inb(PIC_MASTER_CMD) & irqmask;
-		outb(0x0A, PIC_MASTER_CMD);	/* back to the IRR register */
-		return value;
-	}
-	outb(0x0B, PIC_SLAVE_CMD);	/* ISR register */
-	value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
-	outb(0x0A, PIC_SLAVE_CMD);	/* back to the IRR register */
-	return value;
-}
-
-/*
- * Careful! The 8259A is a fragile beast, it pretty
- * much _has_ to be done exactly like this (mask it
- * first, _then_ send the EOI, and the order of EOI
- * to the two 8259s is important!
- */
-static void mask_and_ack_8259A(struct irq_data *d)
-{
-	unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
-	unsigned long flags;
-
-	irqmask = 1 << irq;
-	raw_spin_lock_irqsave(&i8259A_lock, flags);
-	/*
-	 * Lightweight spurious IRQ detection. We do not want
-	 * to overdo spurious IRQ handling - it's usually a sign
-	 * of hardware problems, so we only do the checks we can
-	 * do without slowing down good hardware unnecessarily.
-	 *
-	 * Note that IRQ7 and IRQ15 (the two spurious IRQs
-	 * usually resulting from the 8259A-1|2 PICs) occur
-	 * even if the IRQ is masked in the 8259A. Thus we
-	 * can check spurious 8259A IRQs without doing the
-	 * quite slow i8259A_irq_real() call for every IRQ.
-	 * This does not cover 100% of spurious interrupts,
-	 * but should be enough to warn the user that there
-	 * is something bad going on ...
-	 */
-	if (cached_irq_mask & irqmask)
-		goto spurious_8259A_irq;
-	cached_irq_mask |= irqmask;
-
-handle_real_irq:
-	if (irq & 8) {
-		inb(PIC_SLAVE_IMR);	/* DUMMY - (do we need this?) */
-		outb(cached_slave_mask, PIC_SLAVE_IMR);
-		outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
-		outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
-	} else {
-		inb(PIC_MASTER_IMR);	/* DUMMY - (do we need this?) */
-		outb(cached_master_mask, PIC_MASTER_IMR);
-		outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
-	}
-	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-	return;
-
-spurious_8259A_irq:
-	/*
-	 * this is the slow path - should happen rarely.
-	 */
-	if (i8259A_irq_real(irq))
-		/*
-		 * oops, the IRQ _is_ in service according to the
-		 * 8259A - not spurious, go handle it.
-		 */
-		goto handle_real_irq;
-
-	{
-		static int spurious_irq_mask;
-		/*
-		 * At this point we can be sure the IRQ is spurious,
-		 * lets ACK and report it. [once per IRQ]
-		 */
-		if (!(spurious_irq_mask & irqmask)) {
-			printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
-			spurious_irq_mask |= irqmask;
-		}
-		atomic_inc(&irq_err_count);
-		/*
-		 * Theoretically we do not have to handle this IRQ,
-		 * but in Linux this does not cause problems and is
-		 * simpler for us.
-		 */
-		goto handle_real_irq;
-	}
-}
-
-static void i8259A_resume(void)
-{
-	if (i8259A_auto_eoi >= 0)
-		init_8259A(i8259A_auto_eoi);
-}
-
-static void i8259A_shutdown(void)
-{
-	/* Put the i8259A into a quiescent state that
-	 * the kernel initialization code can get it
-	 * out of.
-	 */
-	if (i8259A_auto_eoi >= 0) {
-		outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
-		outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-2 */
-	}
-}
-
-static struct syscore_ops i8259_syscore_ops = {
-	.resume = i8259A_resume,
-	.shutdown = i8259A_shutdown,
-};
-
-static int __init i8259A_init_sysfs(void)
-{
-	register_syscore_ops(&i8259_syscore_ops);
-	return 0;
-}
-
-device_initcall(i8259A_init_sysfs);
-
-static void init_8259A(int auto_eoi)
-{
-	unsigned long flags;
-
-	i8259A_auto_eoi = auto_eoi;
-
-	raw_spin_lock_irqsave(&i8259A_lock, flags);
-
-	outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
-	outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-2 */
-
-	/*
-	 * outb_p - this has to work on a wide range of PC hardware.
-	 */
-	outb_p(0x11, PIC_MASTER_CMD);	/* ICW1: select 8259A-1 init */
-	outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR);	/* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
-	outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);	/* 8259A-1 (the master) has a slave on IR2 */
-	if (auto_eoi)	/* master does Auto EOI */
-		outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
-	else		/* master expects normal EOI */
-		outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
-
-	outb_p(0x11, PIC_SLAVE_CMD);	/* ICW1: select 8259A-2 init */
-	outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR);	/* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
-	outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR);	/* 8259A-2 is a slave on master's IR2 */
-	outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
-	if (auto_eoi)
-		/*
-		 * In AEOI mode we just have to mask the interrupt
-		 * when acking.
-		 */
-		i8259A_chip.irq_mask_ack = disable_8259A_irq;
-	else
-		i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
-
-	udelay(100);		/* wait for 8259A to initialize */
-
-	outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
-	outb(cached_slave_mask, PIC_SLAVE_IMR);	  /* restore slave IRQ mask */
-
-	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-}
-
-/*
- * IRQ2 is cascade interrupt to second interrupt controller
- */
-static struct irqaction irq2 = {
-	.handler = no_action,
-	.name = "cascade",
-	.flags = IRQF_NO_THREAD,
-};
-
-static struct resource pic1_io_resource = {
-	.name = "pic1",
-	.start = PIC_MASTER_CMD,
-	.end = PIC_MASTER_IMR,
-	.flags = IORESOURCE_BUSY
-};
-
-static struct resource pic2_io_resource = {
-	.name = "pic2",
-	.start = PIC_SLAVE_CMD,
-	.end = PIC_SLAVE_IMR,
-	.flags = IORESOURCE_BUSY
-};
-
-static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
-				 irq_hw_number_t hw)
-{
-	irq_set_chip_and_handler(virq, &i8259A_chip, handle_level_irq);
-	irq_set_probe(virq);
-	return 0;
-}
-
-static struct irq_domain_ops i8259A_ops = {
-	.map = i8259A_irq_domain_map,
-	.xlate = irq_domain_xlate_onecell,
-};
-
-/*
- * On systems with i8259-style interrupt controllers we assume for
- * driver compatibility reasons interrupts 0 - 15 to be the i8259
- * interrupts even if the hardware uses a different interrupt numbering.
- */
-struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
-{
-	struct irq_domain *domain;
-
-	insert_resource(&ioport_resource, &pic1_io_resource);
-	insert_resource(&ioport_resource, &pic2_io_resource);
-
-	init_8259A(0);
-
-	domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0,
-				       &i8259A_ops, NULL);
-	if (!domain)
-		panic("Failed to add i8259 IRQ domain");
-
-	setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
-	return domain;
-}
-
-void __init init_i8259_irqs(void)
-{
-	__init_i8259_irqs(NULL);
-}
-
-static void i8259_irq_dispatch(unsigned int irq, struct irq_desc *desc)
-{
-	struct irq_domain *domain = irq_get_handler_data(irq);
-	int hwirq = i8259_irq();
-
-	if (hwirq < 0)
-		return;
-
-	irq = irq_linear_revmap(domain, hwirq);
-	generic_handle_irq(irq);
-}
-
-int __init i8259_of_init(struct device_node *node, struct device_node *parent)
-{
-	struct irq_domain *domain;
-	unsigned int parent_irq;
-
-	parent_irq = irq_of_parse_and_map(node, 0);
-	if (!parent_irq) {
-		pr_err("Failed to map i8259 parent IRQ\n");
-		return -ENODEV;
-	}
-
-	domain = __init_i8259_irqs(node);
-	irq_set_handler_data(parent_irq, domain);
-	irq_set_chained_handler(parent_irq, i8259_irq_dispatch);
-	return 0;
-}
-IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init);
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index e4f62b7..ab1478d 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -196,6 +196,7 @@
 	case CPU_INTERAPTIV:
 	case CPU_M5150:
 	case CPU_QEMU_GENERIC:
+	case CPU_I6400:
 		cpu_wait = r4k_wait;
 		if (read_c0_config7() & MIPS_CONF7_WII)
 			cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index dda800e..3e586da 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -51,7 +51,7 @@
 	/* Target must have the right alignment and ISA must be preserved. */
 	BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
 		insn.j_format.target = e->target >> J_RANGE_SHIFT;
 	} else {
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 85bbe9b..b8ceee5 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -15,11 +15,131 @@
 
 void __iomem *mips_cm_base;
 void __iomem *mips_cm_l2sync_base;
+int mips_cm_is64;
+
+static char *cm2_tr[8] = {
+	"mem",	"gcr",	"gic",	"mmio",
+	"0x04", "cpc", "0x06", "0x07"
+};
+
+/* CM3 Tag ECC transation type */
+static char *cm3_tr[16] = {
+	[0x0] = "ReqNoData",
+	[0x1] = "0x1",
+	[0x2] = "ReqWData",
+	[0x3] = "0x3",
+	[0x4] = "IReqNoResp",
+	[0x5] = "IReqWResp",
+	[0x6] = "IReqNoRespDat",
+	[0x7] = "IReqWRespDat",
+	[0x8] = "RespNoData",
+	[0x9] = "RespDataFol",
+	[0xa] = "RespWData",
+	[0xb] = "RespDataOnly",
+	[0xc] = "IRespNoData",
+	[0xd] = "IRespDataFol",
+	[0xe] = "IRespWData",
+	[0xf] = "IRespDataOnly"
+};
+
+static char *cm2_cmd[32] = {
+	[0x00] = "0x00",
+	[0x01] = "Legacy Write",
+	[0x02] = "Legacy Read",
+	[0x03] = "0x03",
+	[0x04] = "0x04",
+	[0x05] = "0x05",
+	[0x06] = "0x06",
+	[0x07] = "0x07",
+	[0x08] = "Coherent Read Own",
+	[0x09] = "Coherent Read Share",
+	[0x0a] = "Coherent Read Discard",
+	[0x0b] = "Coherent Ready Share Always",
+	[0x0c] = "Coherent Upgrade",
+	[0x0d] = "Coherent Writeback",
+	[0x0e] = "0x0e",
+	[0x0f] = "0x0f",
+	[0x10] = "Coherent Copyback",
+	[0x11] = "Coherent Copyback Invalidate",
+	[0x12] = "Coherent Invalidate",
+	[0x13] = "Coherent Write Invalidate",
+	[0x14] = "Coherent Completion Sync",
+	[0x15] = "0x15",
+	[0x16] = "0x16",
+	[0x17] = "0x17",
+	[0x18] = "0x18",
+	[0x19] = "0x19",
+	[0x1a] = "0x1a",
+	[0x1b] = "0x1b",
+	[0x1c] = "0x1c",
+	[0x1d] = "0x1d",
+	[0x1e] = "0x1e",
+	[0x1f] = "0x1f"
+};
+
+/* CM3 Tag ECC command type */
+static char *cm3_cmd[16] = {
+	[0x0] = "Legacy Read",
+	[0x1] = "Legacy Write",
+	[0x2] = "Coherent Read Own",
+	[0x3] = "Coherent Read Share",
+	[0x4] = "Coherent Read Discard",
+	[0x5] = "Coherent Evicted",
+	[0x6] = "Coherent Upgrade",
+	[0x7] = "Coherent Upgrade for Store Conditional",
+	[0x8] = "Coherent Writeback",
+	[0x9] = "Coherent Write Invalidate",
+	[0xa] = "0xa",
+	[0xb] = "0xb",
+	[0xc] = "0xc",
+	[0xd] = "0xd",
+	[0xe] = "0xe",
+	[0xf] = "0xf"
+};
+
+/* CM3 Tag ECC command group */
+static char *cm3_cmd_group[8] = {
+	[0x0] = "Normal",
+	[0x1] = "Registers",
+	[0x2] = "TLB",
+	[0x3] = "0x3",
+	[0x4] = "L1I",
+	[0x5] = "L1D",
+	[0x6] = "L3",
+	[0x7] = "L2"
+};
+
+static char *cm2_core[8] = {
+	"Invalid/OK",	"Invalid/Data",
+	"Shared/OK",	"Shared/Data",
+	"Modified/OK",	"Modified/Data",
+	"Exclusive/OK", "Exclusive/Data"
+};
+
+static char *cm2_causes[32] = {
+	"None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
+	"COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
+	"0x08", "0x09", "0x0a", "0x0b",
+	"0x0c", "0x0d", "0x0e", "0x0f",
+	"0x10", "0x11", "0x12", "0x13",
+	"0x14", "0x15", "0x16", "INTVN_WR_ERR",
+	"INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
+	"0x1c", "0x1d", "0x1e", "0x1f"
+};
+
+static char *cm3_causes[32] = {
+	"0x0", "MP_CORRECTABLE_ECC_ERR", "MP_REQUEST_DECODE_ERR",
+	"MP_UNCORRECTABLE_ECC_ERR", "MP_PARITY_ERR", "MP_COHERENCE_ERR",
+	"CMBIU_REQUEST_DECODE_ERR", "CMBIU_PARITY_ERR", "CMBIU_AXI_RESP_ERR",
+	"0x9", "RBI_BUS_ERR", "0xb", "0xc", "0xd", "0xe", "0xf", "0x10",
+	"0x11", "0x12", "0x13", "0x14", "0x15", "0x16", "0x17", "0x18",
+	"0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f"
+};
 
 phys_addr_t __mips_cm_phys_base(void)
 {
 	u32 config3 = read_c0_config3();
-	u32 cmgcr;
+	unsigned long cmgcr;
 
 	/* Check the CMGCRBase register is implemented */
 	if (!(config3 & MIPS_CONF3_CMGCR))
@@ -81,6 +201,13 @@
 	phys_addr_t addr;
 	u32 base_reg;
 
+	/*
+	 * No need to probe again if we have already been
+	 * here before.
+	 */
+	if (mips_cm_base)
+		return 0;
+
 	addr = mips_cm_phys_base();
 	BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr);
 	if (!addr)
@@ -117,5 +244,133 @@
 	/* probe for an L2-only sync region */
 	mips_cm_probe_l2sync();
 
+	/* determine register width for this CM */
+	mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
+
 	return 0;
 }
+
+void mips_cm_error_report(void)
+{
+	unsigned long revision = mips_cm_revision();
+	/*
+	 * CM3 has a 64-bit Error cause register with 0:57 containing the error
+	 * info and 63:58 the error type. For old CMs, everything is contained
+	 * in a single 32-bit register (0:26 and 31:27 respectively). Even
+	 * though the cm_error is u64, we will simply ignore the upper word
+	 * for CM2.
+	 */
+	u64 cm_error = read_gcr_error_cause();
+	int cm_error_cause_sft = CM_GCR_ERROR_CAUSE_ERRTYPE_SHF +
+				 ((revision >= CM_REV_CM3) ? 31 : 0);
+	unsigned long cm_addr = read_gcr_error_addr();
+	unsigned long cm_other = read_gcr_error_mult();
+	int ocause, cause;
+	char buf[256];
+
+	if (!mips_cm_present())
+		return;
+
+	cause = cm_error >> cm_error_cause_sft;
+
+	if (!cause)
+		/* All good */
+		return;
+
+	ocause = cm_other >> CM_GCR_ERROR_MULT_ERR2ND_SHF;
+	if (revision < CM_REV_CM3) { /* CM2 */
+		if (cause < 16) {
+			unsigned long cca_bits = (cm_error >> 15) & 7;
+			unsigned long tr_bits = (cm_error >> 12) & 7;
+			unsigned long cmd_bits = (cm_error >> 7) & 0x1f;
+			unsigned long stag_bits = (cm_error >> 3) & 15;
+			unsigned long sport_bits = (cm_error >> 0) & 7;
+
+			snprintf(buf, sizeof(buf),
+				 "CCA=%lu TR=%s MCmd=%s STag=%lu "
+				 "SPort=%lu\n", cca_bits, cm2_tr[tr_bits],
+				 cm2_cmd[cmd_bits], stag_bits, sport_bits);
+		} else {
+			/* glob state & sresp together */
+			unsigned long c3_bits = (cm_error >> 18) & 7;
+			unsigned long c2_bits = (cm_error >> 15) & 7;
+			unsigned long c1_bits = (cm_error >> 12) & 7;
+			unsigned long c0_bits = (cm_error >> 9) & 7;
+			unsigned long sc_bit = (cm_error >> 8) & 1;
+			unsigned long cmd_bits = (cm_error >> 3) & 0x1f;
+			unsigned long sport_bits = (cm_error >> 0) & 7;
+
+			snprintf(buf, sizeof(buf),
+				 "C3=%s C2=%s C1=%s C0=%s SC=%s "
+				 "MCmd=%s SPort=%lu\n",
+				 cm2_core[c3_bits], cm2_core[c2_bits],
+				 cm2_core[c1_bits], cm2_core[c0_bits],
+				 sc_bit ? "True" : "False",
+				 cm2_cmd[cmd_bits], sport_bits);
+		}
+			pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error,
+			       cm2_causes[cause], buf);
+		pr_err("CM_ADDR =%08lx\n", cm_addr);
+		pr_err("CM_OTHER=%08lx %s\n", cm_other, cm2_causes[ocause]);
+	} else { /* CM3 */
+	/* Used by cause == {1,2,3} */
+		unsigned long core_id_bits = (cm_error >> 22) & 0xf;
+		unsigned long vp_id_bits = (cm_error >> 18) & 0xf;
+		unsigned long cmd_bits = (cm_error >> 14) & 0xf;
+		unsigned long cmd_group_bits = (cm_error >> 11) & 0xf;
+		unsigned long cm3_cca_bits = (cm_error >> 8) & 7;
+		unsigned long mcp_bits = (cm_error >> 5) & 0xf;
+		unsigned long cm3_tr_bits = (cm_error >> 1) & 0xf;
+		unsigned long sched_bit = cm_error & 0x1;
+
+		if (cause == 1 || cause == 3) { /* Tag ECC */
+			unsigned long tag_ecc = (cm_error >> 57) & 0x1;
+			unsigned long tag_way_bits = (cm_error >> 29) & 0xffff;
+			unsigned long dword_bits = (cm_error >> 49) & 0xff;
+			unsigned long data_way_bits = (cm_error >> 45) & 0xf;
+			unsigned long data_sets_bits = (cm_error >> 29) & 0xfff;
+			unsigned long bank_bit = (cm_error >> 28) & 0x1;
+			snprintf(buf, sizeof(buf),
+				 "%s ECC Error: Way=%lu (DWORD=%lu, Sets=%lu)"
+				 "Bank=%lu CoreID=%lu VPID=%lu Command=%s"
+				 "Command Group=%s CCA=%lu MCP=%d"
+				 "Transaction type=%s Scheduler=%lu\n",
+				 tag_ecc ? "TAG" : "DATA",
+				 tag_ecc ? (unsigned long)ffs(tag_way_bits) - 1 :
+				 data_way_bits, bank_bit, dword_bits,
+				 data_sets_bits,
+				 core_id_bits, vp_id_bits,
+				 cm3_cmd[cmd_bits],
+				 cm3_cmd_group[cmd_group_bits],
+				 cm3_cca_bits, 1 << mcp_bits,
+				 cm3_tr[cm3_tr_bits], sched_bit);
+		} else if (cause == 2) {
+			unsigned long data_error_type = (cm_error >> 41) & 0xfff;
+			unsigned long data_decode_cmd = (cm_error >> 37) & 0xf;
+			unsigned long data_decode_group = (cm_error >> 34) & 0x7;
+			unsigned long data_decode_destination_id = (cm_error >> 28) & 0x3f;
+
+			snprintf(buf, sizeof(buf),
+				 "Decode Request Error: Type=%lu, Command=%lu"
+				 "Command Group=%lu Destination ID=%lu"
+				 "CoreID=%lu VPID=%lu Command=%s"
+				 "Command Group=%s CCA=%lu MCP=%d"
+				 "Transaction type=%s Scheduler=%lu\n",
+				 data_error_type, data_decode_cmd,
+				 data_decode_group, data_decode_destination_id,
+				 core_id_bits, vp_id_bits,
+				 cm3_cmd[cmd_bits],
+				 cm3_cmd_group[cmd_group_bits],
+				 cm3_cca_bits, 1 << mcp_bits,
+				 cm3_tr[cm3_tr_bits], sched_bit);
+		}
+
+		pr_err("CM_ERROR=%llx %s <%s>\n", cm_error,
+		       cm3_causes[cause], buf);
+		pr_err("CM_ADDR =%lx\n", cm_addr);
+		pr_err("CM_OTHER=%lx %s\n", cm_other, cm3_causes[ocause]);
+	}
+
+	/* reprime cause register */
+	write_gcr_error_cause(0);
+}
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 1196450..8af4d62 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -21,9 +21,16 @@
 
 static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
 
-phys_addr_t __weak mips_cpc_phys_base(void)
+/**
+ * mips_cpc_phys_base - retrieve the physical base address of the CPC
+ *
+ * This function returns the physical base address of the Cluster Power
+ * Controller memory mapped registers, or 0 if no Cluster Power Controller
+ * is present.
+ */
+static phys_addr_t mips_cpc_phys_base(void)
 {
-	u32 cpc_base;
+	unsigned long cpc_base;
 
 	if (!mips_cm_present())
 		return 0;
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index cc1b6fa..d7b8dd4 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1556,6 +1556,7 @@
 #endif
 		break;
 	case CPU_P5600:
+	case CPU_I6400:
 		/* 8-bit event numbers */
 		raw_id = config & 0x1ff;
 		base_id = raw_id & 0xff;
@@ -1717,6 +1718,11 @@
 		mipspmu.general_event_map = &mipsxxcore_event_map2;
 		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
 		break;
+	case CPU_I6400:
+		mipspmu.name = "mips/I6400";
+		mipspmu.general_event_map = &mipsxxcore_event_map2;
+		mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+		break;
 	case CPU_1004K:
 		mipspmu.name = "mips/1004K";
 		mipspmu.general_event_map = &mipsxxcore_event_map;
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 0614717..f63a289 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -267,6 +267,7 @@
 
 	/* CPUs which do not require the workaround */
 	case CPU_P5600:
+	case CPU_I6400:
 		return 0;
 
 	default:
@@ -671,6 +672,7 @@
 	case CPU_PROAPTIV:
 	case CPU_M5150:
 	case CPU_P5600:
+	case CPU_I6400:
 		stype_intervention = 0x2;
 		stype_memory = 0x3;
 		stype_ordering = 0x10;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index e933a30..4f0ac78 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -25,6 +25,7 @@
 #include <linux/regset.h>
 #include <linux/smp.h>
 #include <linux/security.h>
+#include <linux/stddef.h>
 #include <linux/tracehook.h>
 #include <linux/audit.h>
 #include <linux/seccomp.h>
@@ -490,6 +491,93 @@
 	REGSET_FPR,
 };
 
+struct pt_regs_offset {
+	const char *name;
+	int offset;
+};
+
+#define REG_OFFSET_NAME(reg, r) {					\
+	.name = #reg,							\
+	.offset = offsetof(struct pt_regs, r)				\
+}
+
+#define REG_OFFSET_END {						\
+	.name = NULL,							\
+	.offset = 0							\
+}
+
+static const struct pt_regs_offset regoffset_table[] = {
+	REG_OFFSET_NAME(r0, regs[0]),
+	REG_OFFSET_NAME(r1, regs[1]),
+	REG_OFFSET_NAME(r2, regs[2]),
+	REG_OFFSET_NAME(r3, regs[3]),
+	REG_OFFSET_NAME(r4, regs[4]),
+	REG_OFFSET_NAME(r5, regs[5]),
+	REG_OFFSET_NAME(r6, regs[6]),
+	REG_OFFSET_NAME(r7, regs[7]),
+	REG_OFFSET_NAME(r8, regs[8]),
+	REG_OFFSET_NAME(r9, regs[9]),
+	REG_OFFSET_NAME(r10, regs[10]),
+	REG_OFFSET_NAME(r11, regs[11]),
+	REG_OFFSET_NAME(r12, regs[12]),
+	REG_OFFSET_NAME(r13, regs[13]),
+	REG_OFFSET_NAME(r14, regs[14]),
+	REG_OFFSET_NAME(r15, regs[15]),
+	REG_OFFSET_NAME(r16, regs[16]),
+	REG_OFFSET_NAME(r17, regs[17]),
+	REG_OFFSET_NAME(r18, regs[18]),
+	REG_OFFSET_NAME(r19, regs[19]),
+	REG_OFFSET_NAME(r20, regs[20]),
+	REG_OFFSET_NAME(r21, regs[21]),
+	REG_OFFSET_NAME(r22, regs[22]),
+	REG_OFFSET_NAME(r23, regs[23]),
+	REG_OFFSET_NAME(r24, regs[24]),
+	REG_OFFSET_NAME(r25, regs[25]),
+	REG_OFFSET_NAME(r26, regs[26]),
+	REG_OFFSET_NAME(r27, regs[27]),
+	REG_OFFSET_NAME(r28, regs[28]),
+	REG_OFFSET_NAME(r29, regs[29]),
+	REG_OFFSET_NAME(r30, regs[30]),
+	REG_OFFSET_NAME(r31, regs[31]),
+	REG_OFFSET_NAME(c0_status, cp0_status),
+	REG_OFFSET_NAME(hi, hi),
+	REG_OFFSET_NAME(lo, lo),
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+	REG_OFFSET_NAME(acx, acx),
+#endif
+	REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
+	REG_OFFSET_NAME(c0_cause, cp0_cause),
+	REG_OFFSET_NAME(c0_epc, cp0_epc),
+#ifdef CONFIG_MIPS_MT_SMTC
+	REG_OFFSET_NAME(c0_tcstatus, cp0_tcstatus),
+#endif
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+	REG_OFFSET_NAME(mpl0, mpl[0]),
+	REG_OFFSET_NAME(mpl1, mpl[1]),
+	REG_OFFSET_NAME(mpl2, mpl[2]),
+	REG_OFFSET_NAME(mtp0, mtp[0]),
+	REG_OFFSET_NAME(mtp1, mtp[1]),
+	REG_OFFSET_NAME(mtp2, mtp[2]),
+#endif
+	REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name:       the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+        const struct pt_regs_offset *roff;
+        for (roff = regoffset_table; roff->name != NULL; roff++)
+                if (!strcmp(roff->name, name))
+                        return roff->offset;
+        return -EINVAL;
+}
+
 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 
 static const struct user_regset mips_regsets[] = {
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 1d88af2..f09546e 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -13,6 +13,7 @@
  * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
  */
 #include <asm/asm.h>
+#include <asm/asmmacro.h>
 #include <asm/errno.h>
 #include <asm/fpregdef.h>
 #include <asm/mipsregs.h>
@@ -35,6 +36,14 @@
 
 	.set	noreorder
 
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
 LEAF(_save_fp_context)
 	.set	push
 	SET_HARDFLOAT
@@ -54,117 +63,60 @@
 	 nop
 #endif
 	/* Store the 16 odd double precision registers */
-	EX	sdc1 $f1, SC_FPREGS+8(a0)
-	EX	sdc1 $f3, SC_FPREGS+24(a0)
-	EX	sdc1 $f5, SC_FPREGS+40(a0)
-	EX	sdc1 $f7, SC_FPREGS+56(a0)
-	EX	sdc1 $f9, SC_FPREGS+72(a0)
-	EX	sdc1 $f11, SC_FPREGS+88(a0)
-	EX	sdc1 $f13, SC_FPREGS+104(a0)
-	EX	sdc1 $f15, SC_FPREGS+120(a0)
-	EX	sdc1 $f17, SC_FPREGS+136(a0)
-	EX	sdc1 $f19, SC_FPREGS+152(a0)
-	EX	sdc1 $f21, SC_FPREGS+168(a0)
-	EX	sdc1 $f23, SC_FPREGS+184(a0)
-	EX	sdc1 $f25, SC_FPREGS+200(a0)
-	EX	sdc1 $f27, SC_FPREGS+216(a0)
-	EX	sdc1 $f29, SC_FPREGS+232(a0)
-	EX	sdc1 $f31, SC_FPREGS+248(a0)
+	EX	sdc1 $f1, 8(a0)
+	EX	sdc1 $f3, 24(a0)
+	EX	sdc1 $f5, 40(a0)
+	EX	sdc1 $f7, 56(a0)
+	EX	sdc1 $f9, 72(a0)
+	EX	sdc1 $f11, 88(a0)
+	EX	sdc1 $f13, 104(a0)
+	EX	sdc1 $f15, 120(a0)
+	EX	sdc1 $f17, 136(a0)
+	EX	sdc1 $f19, 152(a0)
+	EX	sdc1 $f21, 168(a0)
+	EX	sdc1 $f23, 184(a0)
+	EX	sdc1 $f25, 200(a0)
+	EX	sdc1 $f27, 216(a0)
+	EX	sdc1 $f29, 232(a0)
+	EX	sdc1 $f31, 248(a0)
 1:	.set	pop
 #endif
 
 	.set push
 	SET_HARDFLOAT
 	/* Store the 16 even double precision registers */
-	EX	sdc1 $f0, SC_FPREGS+0(a0)
-	EX	sdc1 $f2, SC_FPREGS+16(a0)
-	EX	sdc1 $f4, SC_FPREGS+32(a0)
-	EX	sdc1 $f6, SC_FPREGS+48(a0)
-	EX	sdc1 $f8, SC_FPREGS+64(a0)
-	EX	sdc1 $f10, SC_FPREGS+80(a0)
-	EX	sdc1 $f12, SC_FPREGS+96(a0)
-	EX	sdc1 $f14, SC_FPREGS+112(a0)
-	EX	sdc1 $f16, SC_FPREGS+128(a0)
-	EX	sdc1 $f18, SC_FPREGS+144(a0)
-	EX	sdc1 $f20, SC_FPREGS+160(a0)
-	EX	sdc1 $f22, SC_FPREGS+176(a0)
-	EX	sdc1 $f24, SC_FPREGS+192(a0)
-	EX	sdc1 $f26, SC_FPREGS+208(a0)
-	EX	sdc1 $f28, SC_FPREGS+224(a0)
-	EX	sdc1 $f30, SC_FPREGS+240(a0)
-	EX	sw t1, SC_FPC_CSR(a0)
+	EX	sdc1 $f0, 0(a0)
+	EX	sdc1 $f2, 16(a0)
+	EX	sdc1 $f4, 32(a0)
+	EX	sdc1 $f6, 48(a0)
+	EX	sdc1 $f8, 64(a0)
+	EX	sdc1 $f10, 80(a0)
+	EX	sdc1 $f12, 96(a0)
+	EX	sdc1 $f14, 112(a0)
+	EX	sdc1 $f16, 128(a0)
+	EX	sdc1 $f18, 144(a0)
+	EX	sdc1 $f20, 160(a0)
+	EX	sdc1 $f22, 176(a0)
+	EX	sdc1 $f24, 192(a0)
+	EX	sdc1 $f26, 208(a0)
+	EX	sdc1 $f28, 224(a0)
+	EX	sdc1 $f30, 240(a0)
+	EX	sw t1, 0(a1)
 	jr	ra
 	 li	v0, 0					# success
 	.set pop
 	END(_save_fp_context)
 
-#ifdef CONFIG_MIPS32_COMPAT
-	/* Save 32-bit process floating point context */
-LEAF(_save_fp_context32)
-	.set push
-	.set MIPS_ISA_ARCH_LEVEL_RAW
-	SET_HARDFLOAT
-	cfc1	t1, fcr31
-
-#ifndef CONFIG_CPU_MIPS64_R6
-	mfc0	t0, CP0_STATUS
-	sll	t0, t0, 5
-	bgez	t0, 1f			# skip storing odd if FR=0
-	 nop
-#endif
-
-	/* Store the 16 odd double precision registers */
-	EX      sdc1 $f1, SC32_FPREGS+8(a0)
-	EX      sdc1 $f3, SC32_FPREGS+24(a0)
-	EX      sdc1 $f5, SC32_FPREGS+40(a0)
-	EX      sdc1 $f7, SC32_FPREGS+56(a0)
-	EX      sdc1 $f9, SC32_FPREGS+72(a0)
-	EX      sdc1 $f11, SC32_FPREGS+88(a0)
-	EX      sdc1 $f13, SC32_FPREGS+104(a0)
-	EX      sdc1 $f15, SC32_FPREGS+120(a0)
-	EX      sdc1 $f17, SC32_FPREGS+136(a0)
-	EX      sdc1 $f19, SC32_FPREGS+152(a0)
-	EX      sdc1 $f21, SC32_FPREGS+168(a0)
-	EX      sdc1 $f23, SC32_FPREGS+184(a0)
-	EX      sdc1 $f25, SC32_FPREGS+200(a0)
-	EX      sdc1 $f27, SC32_FPREGS+216(a0)
-	EX      sdc1 $f29, SC32_FPREGS+232(a0)
-	EX      sdc1 $f31, SC32_FPREGS+248(a0)
-
-	/* Store the 16 even double precision registers */
-1:	EX	sdc1 $f0, SC32_FPREGS+0(a0)
-	EX	sdc1 $f2, SC32_FPREGS+16(a0)
-	EX	sdc1 $f4, SC32_FPREGS+32(a0)
-	EX	sdc1 $f6, SC32_FPREGS+48(a0)
-	EX	sdc1 $f8, SC32_FPREGS+64(a0)
-	EX	sdc1 $f10, SC32_FPREGS+80(a0)
-	EX	sdc1 $f12, SC32_FPREGS+96(a0)
-	EX	sdc1 $f14, SC32_FPREGS+112(a0)
-	EX	sdc1 $f16, SC32_FPREGS+128(a0)
-	EX	sdc1 $f18, SC32_FPREGS+144(a0)
-	EX	sdc1 $f20, SC32_FPREGS+160(a0)
-	EX	sdc1 $f22, SC32_FPREGS+176(a0)
-	EX	sdc1 $f24, SC32_FPREGS+192(a0)
-	EX	sdc1 $f26, SC32_FPREGS+208(a0)
-	EX	sdc1 $f28, SC32_FPREGS+224(a0)
-	EX	sdc1 $f30, SC32_FPREGS+240(a0)
-	EX	sw t1, SC32_FPC_CSR(a0)
-	cfc1	t0, $0				# implementation/version
-	EX	sw t0, SC32_FPC_EIR(a0)
-	.set pop
-
-	jr	ra
-	 li	v0, 0					# success
-	END(_save_fp_context32)
-#endif
-
-/*
- * Restore FPU state:
- *  - fp gp registers
- *  - cp1 status/control register
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
  */
 LEAF(_restore_fp_context)
-	EX	lw t1, SC_FPC_CSR(a0)
+	EX	lw t1, 0(a1)
 
 #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)  || \
 		defined(CONFIG_CPU_MIPS32_R6)
@@ -178,101 +130,231 @@
 	bgez	t0, 1f			# skip loading odd if FR=0
 	 nop
 #endif
-	EX	ldc1 $f1, SC_FPREGS+8(a0)
-	EX	ldc1 $f3, SC_FPREGS+24(a0)
-	EX	ldc1 $f5, SC_FPREGS+40(a0)
-	EX	ldc1 $f7, SC_FPREGS+56(a0)
-	EX	ldc1 $f9, SC_FPREGS+72(a0)
-	EX	ldc1 $f11, SC_FPREGS+88(a0)
-	EX	ldc1 $f13, SC_FPREGS+104(a0)
-	EX	ldc1 $f15, SC_FPREGS+120(a0)
-	EX	ldc1 $f17, SC_FPREGS+136(a0)
-	EX	ldc1 $f19, SC_FPREGS+152(a0)
-	EX	ldc1 $f21, SC_FPREGS+168(a0)
-	EX	ldc1 $f23, SC_FPREGS+184(a0)
-	EX	ldc1 $f25, SC_FPREGS+200(a0)
-	EX	ldc1 $f27, SC_FPREGS+216(a0)
-	EX	ldc1 $f29, SC_FPREGS+232(a0)
-	EX	ldc1 $f31, SC_FPREGS+248(a0)
+	EX	ldc1 $f1, 8(a0)
+	EX	ldc1 $f3, 24(a0)
+	EX	ldc1 $f5, 40(a0)
+	EX	ldc1 $f7, 56(a0)
+	EX	ldc1 $f9, 72(a0)
+	EX	ldc1 $f11, 88(a0)
+	EX	ldc1 $f13, 104(a0)
+	EX	ldc1 $f15, 120(a0)
+	EX	ldc1 $f17, 136(a0)
+	EX	ldc1 $f19, 152(a0)
+	EX	ldc1 $f21, 168(a0)
+	EX	ldc1 $f23, 184(a0)
+	EX	ldc1 $f25, 200(a0)
+	EX	ldc1 $f27, 216(a0)
+	EX	ldc1 $f29, 232(a0)
+	EX	ldc1 $f31, 248(a0)
 1:	.set pop
 #endif
 	.set push
 	SET_HARDFLOAT
-	EX	ldc1 $f0, SC_FPREGS+0(a0)
-	EX	ldc1 $f2, SC_FPREGS+16(a0)
-	EX	ldc1 $f4, SC_FPREGS+32(a0)
-	EX	ldc1 $f6, SC_FPREGS+48(a0)
-	EX	ldc1 $f8, SC_FPREGS+64(a0)
-	EX	ldc1 $f10, SC_FPREGS+80(a0)
-	EX	ldc1 $f12, SC_FPREGS+96(a0)
-	EX	ldc1 $f14, SC_FPREGS+112(a0)
-	EX	ldc1 $f16, SC_FPREGS+128(a0)
-	EX	ldc1 $f18, SC_FPREGS+144(a0)
-	EX	ldc1 $f20, SC_FPREGS+160(a0)
-	EX	ldc1 $f22, SC_FPREGS+176(a0)
-	EX	ldc1 $f24, SC_FPREGS+192(a0)
-	EX	ldc1 $f26, SC_FPREGS+208(a0)
-	EX	ldc1 $f28, SC_FPREGS+224(a0)
-	EX	ldc1 $f30, SC_FPREGS+240(a0)
+	EX	ldc1 $f0, 0(a0)
+	EX	ldc1 $f2, 16(a0)
+	EX	ldc1 $f4, 32(a0)
+	EX	ldc1 $f6, 48(a0)
+	EX	ldc1 $f8, 64(a0)
+	EX	ldc1 $f10, 80(a0)
+	EX	ldc1 $f12, 96(a0)
+	EX	ldc1 $f14, 112(a0)
+	EX	ldc1 $f16, 128(a0)
+	EX	ldc1 $f18, 144(a0)
+	EX	ldc1 $f20, 160(a0)
+	EX	ldc1 $f22, 176(a0)
+	EX	ldc1 $f24, 192(a0)
+	EX	ldc1 $f26, 208(a0)
+	EX	ldc1 $f28, 224(a0)
+	EX	ldc1 $f30, 240(a0)
 	ctc1	t1, fcr31
 	.set pop
 	jr	ra
 	 li	v0, 0					# success
 	END(_restore_fp_context)
 
-#ifdef CONFIG_MIPS32_COMPAT
-LEAF(_restore_fp_context32)
-	/* Restore an o32 sigcontext.  */
-	.set push
-	SET_HARDFLOAT
-	EX	lw t1, SC32_FPC_CSR(a0)
+#ifdef CONFIG_CPU_HAS_MSA
 
-#ifndef CONFIG_CPU_MIPS64_R6
-	mfc0	t0, CP0_STATUS
-	sll	t0, t0, 5
-	bgez	t0, 1f			# skip loading odd if FR=0
-	 nop
-#endif
-
-	EX      ldc1 $f1, SC32_FPREGS+8(a0)
-	EX      ldc1 $f3, SC32_FPREGS+24(a0)
-	EX      ldc1 $f5, SC32_FPREGS+40(a0)
-	EX      ldc1 $f7, SC32_FPREGS+56(a0)
-	EX      ldc1 $f9, SC32_FPREGS+72(a0)
-	EX      ldc1 $f11, SC32_FPREGS+88(a0)
-	EX      ldc1 $f13, SC32_FPREGS+104(a0)
-	EX      ldc1 $f15, SC32_FPREGS+120(a0)
-	EX      ldc1 $f17, SC32_FPREGS+136(a0)
-	EX      ldc1 $f19, SC32_FPREGS+152(a0)
-	EX      ldc1 $f21, SC32_FPREGS+168(a0)
-	EX      ldc1 $f23, SC32_FPREGS+184(a0)
-	EX      ldc1 $f25, SC32_FPREGS+200(a0)
-	EX      ldc1 $f27, SC32_FPREGS+216(a0)
-	EX      ldc1 $f29, SC32_FPREGS+232(a0)
-	EX      ldc1 $f31, SC32_FPREGS+248(a0)
-
-1:	EX	ldc1 $f0, SC32_FPREGS+0(a0)
-	EX	ldc1 $f2, SC32_FPREGS+16(a0)
-	EX	ldc1 $f4, SC32_FPREGS+32(a0)
-	EX	ldc1 $f6, SC32_FPREGS+48(a0)
-	EX	ldc1 $f8, SC32_FPREGS+64(a0)
-	EX	ldc1 $f10, SC32_FPREGS+80(a0)
-	EX	ldc1 $f12, SC32_FPREGS+96(a0)
-	EX	ldc1 $f14, SC32_FPREGS+112(a0)
-	EX	ldc1 $f16, SC32_FPREGS+128(a0)
-	EX	ldc1 $f18, SC32_FPREGS+144(a0)
-	EX	ldc1 $f20, SC32_FPREGS+160(a0)
-	EX	ldc1 $f22, SC32_FPREGS+176(a0)
-	EX	ldc1 $f24, SC32_FPREGS+192(a0)
-	EX	ldc1 $f26, SC32_FPREGS+208(a0)
-	EX	ldc1 $f28, SC32_FPREGS+224(a0)
-	EX	ldc1 $f30, SC32_FPREGS+240(a0)
-	ctc1	t1, fcr31
+	.macro	op_one_wr	op, idx, base
+	.align	4
+\idx:	\op	\idx, 0, \base
 	jr	ra
-	 li	v0, 0					# success
-	.set pop
-	END(_restore_fp_context32)
+	 nop
+	.endm
+
+	.macro	op_msa_wr	name, op
+LEAF(\name)
+	.set		push
+	.set		noreorder
+	sll		t0, a0, 4
+	PTR_LA		t1, 0f
+	PTR_ADDU	t0, t0, t1
+	jr		t0
+	  nop
+	op_one_wr	\op, 0, a1
+	op_one_wr	\op, 1, a1
+	op_one_wr	\op, 2, a1
+	op_one_wr	\op, 3, a1
+	op_one_wr	\op, 4, a1
+	op_one_wr	\op, 5, a1
+	op_one_wr	\op, 6, a1
+	op_one_wr	\op, 7, a1
+	op_one_wr	\op, 8, a1
+	op_one_wr	\op, 9, a1
+	op_one_wr	\op, 10, a1
+	op_one_wr	\op, 11, a1
+	op_one_wr	\op, 12, a1
+	op_one_wr	\op, 13, a1
+	op_one_wr	\op, 14, a1
+	op_one_wr	\op, 15, a1
+	op_one_wr	\op, 16, a1
+	op_one_wr	\op, 17, a1
+	op_one_wr	\op, 18, a1
+	op_one_wr	\op, 19, a1
+	op_one_wr	\op, 20, a1
+	op_one_wr	\op, 21, a1
+	op_one_wr	\op, 22, a1
+	op_one_wr	\op, 23, a1
+	op_one_wr	\op, 24, a1
+	op_one_wr	\op, 25, a1
+	op_one_wr	\op, 26, a1
+	op_one_wr	\op, 27, a1
+	op_one_wr	\op, 28, a1
+	op_one_wr	\op, 29, a1
+	op_one_wr	\op, 30, a1
+	op_one_wr	\op, 31, a1
+	.set		pop
+	END(\name)
+	.endm
+
+	op_msa_wr	read_msa_wr_b, st_b
+	op_msa_wr	read_msa_wr_h, st_h
+	op_msa_wr	read_msa_wr_w, st_w
+	op_msa_wr	read_msa_wr_d, st_d
+
+	op_msa_wr	write_msa_wr_b, ld_b
+	op_msa_wr	write_msa_wr_h, ld_h
+	op_msa_wr	write_msa_wr_w, ld_w
+	op_msa_wr	write_msa_wr_d, ld_d
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+	.macro	save_msa_upper	wr, off, base
+	.set	push
+	.set	noat
+#ifdef CONFIG_64BIT
+	copy_u_d \wr, 1
+	EX sd	$1, \off(\base)
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+	copy_u_w \wr, 2
+	EX sw	$1, \off(\base)
+	copy_u_w \wr, 3
+	EX sw	$1, (\off+4)(\base)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+	copy_u_w \wr, 2
+	EX sw	$1, (\off+4)(\base)
+	copy_u_w \wr, 3
+	EX sw	$1, \off(\base)
 #endif
+	.set	pop
+	.endm
+
+LEAF(_save_msa_all_upper)
+	save_msa_upper	0, 0x00, a0
+	save_msa_upper	1, 0x08, a0
+	save_msa_upper	2, 0x10, a0
+	save_msa_upper	3, 0x18, a0
+	save_msa_upper	4, 0x20, a0
+	save_msa_upper	5, 0x28, a0
+	save_msa_upper	6, 0x30, a0
+	save_msa_upper	7, 0x38, a0
+	save_msa_upper	8, 0x40, a0
+	save_msa_upper	9, 0x48, a0
+	save_msa_upper	10, 0x50, a0
+	save_msa_upper	11, 0x58, a0
+	save_msa_upper	12, 0x60, a0
+	save_msa_upper	13, 0x68, a0
+	save_msa_upper	14, 0x70, a0
+	save_msa_upper	15, 0x78, a0
+	save_msa_upper	16, 0x80, a0
+	save_msa_upper	17, 0x88, a0
+	save_msa_upper	18, 0x90, a0
+	save_msa_upper	19, 0x98, a0
+	save_msa_upper	20, 0xa0, a0
+	save_msa_upper	21, 0xa8, a0
+	save_msa_upper	22, 0xb0, a0
+	save_msa_upper	23, 0xb8, a0
+	save_msa_upper	24, 0xc0, a0
+	save_msa_upper	25, 0xc8, a0
+	save_msa_upper	26, 0xd0, a0
+	save_msa_upper	27, 0xd8, a0
+	save_msa_upper	28, 0xe0, a0
+	save_msa_upper	29, 0xe8, a0
+	save_msa_upper	30, 0xf0, a0
+	save_msa_upper	31, 0xf8, a0
+	jr	ra
+	 li	v0, 0
+	END(_save_msa_all_upper)
+
+	.macro	restore_msa_upper	wr, off, base
+	.set	push
+	.set	noat
+#ifdef CONFIG_64BIT
+	EX ld	$1, \off(\base)
+	insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+	EX lw	$1, \off(\base)
+	insert_w \wr, 2
+	EX lw	$1, (\off+4)(\base)
+	insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+	EX lw	$1, (\off+4)(\base)
+	insert_w \wr, 2
+	EX lw	$1, \off(\base)
+	insert_w \wr, 3
+#endif
+	.set	pop
+	.endm
+
+LEAF(_restore_msa_all_upper)
+	restore_msa_upper	0, 0x00, a0
+	restore_msa_upper	1, 0x08, a0
+	restore_msa_upper	2, 0x10, a0
+	restore_msa_upper	3, 0x18, a0
+	restore_msa_upper	4, 0x20, a0
+	restore_msa_upper	5, 0x28, a0
+	restore_msa_upper	6, 0x30, a0
+	restore_msa_upper	7, 0x38, a0
+	restore_msa_upper	8, 0x40, a0
+	restore_msa_upper	9, 0x48, a0
+	restore_msa_upper	10, 0x50, a0
+	restore_msa_upper	11, 0x58, a0
+	restore_msa_upper	12, 0x60, a0
+	restore_msa_upper	13, 0x68, a0
+	restore_msa_upper	14, 0x70, a0
+	restore_msa_upper	15, 0x78, a0
+	restore_msa_upper	16, 0x80, a0
+	restore_msa_upper	17, 0x88, a0
+	restore_msa_upper	18, 0x90, a0
+	restore_msa_upper	19, 0x98, a0
+	restore_msa_upper	20, 0xa0, a0
+	restore_msa_upper	21, 0xa8, a0
+	restore_msa_upper	22, 0xb0, a0
+	restore_msa_upper	23, 0xb8, a0
+	restore_msa_upper	24, 0xc0, a0
+	restore_msa_upper	25, 0xc8, a0
+	restore_msa_upper	26, 0xd0, a0
+	restore_msa_upper	27, 0xd8, a0
+	restore_msa_upper	28, 0xe0, a0
+	restore_msa_upper	29, 0xe8, a0
+	restore_msa_upper	30, 0xf0, a0
+	restore_msa_upper	31, 0xf8, a0
+	jr	ra
+	 li	v0, 0
+	END(_restore_msa_all_upper)
+
+#endif /* CONFIG_CPU_HAS_MSA */
 
 	.set	reorder
 
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 04cbbde3..92cd051 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -34,7 +34,7 @@
 #ifndef USE_ALTERNATE_RESUME_IMPL
 /*
  * task_struct *resume(task_struct *prev, task_struct *next,
- *		       struct thread_info *next_ti, s32 fp_save)
+ *		       struct thread_info *next_ti)
  */
 	.align	5
 	LEAF(resume)
@@ -43,45 +43,6 @@
 	cpu_save_nonscratch a0
 	LONG_S	ra, THREAD_REG31(a0)
 
-	/*
-	 * Check whether we need to save any FP context. FP context is saved
-	 * iff the process has used the context with the scalar FPU or the MSA
-	 * ASE in the current time slice, as indicated by _TIF_USEDFPU and
-	 * _TIF_USEDMSA respectively. switch_to will have set fp_save
-	 * accordingly to an FP_SAVE_ enum value.
-	 */
-	beqz	a3, 2f
-
-	/*
-	 * We do. Clear the saved CU1 bit for prev, such that next time it is
-	 * scheduled it will start in userland with the FPU disabled. If the
-	 * task uses the FPU then it will be enabled again via the do_cpu trap.
-	 * This allows us to lazily restore the FP context.
-	 */
-	PTR_L	t3, TASK_THREAD_INFO(a0)
-	LONG_L	t0, ST_OFF(t3)
-	li	t1, ~ST0_CU1
-	and	t0, t0, t1
-	LONG_S	t0, ST_OFF(t3)
-
-	/* Check whether we're saving scalar or vector context. */
-	bgtz	a3, 1f
-
-	/* Save 128b MSA vector context + scalar FP control & status. */
-	.set push
-	SET_HARDFLOAT
-	cfc1	t1, fcr31
-	msa_save_all	a0
-	.set pop	/* SET_HARDFLOAT */
-
-	sw	t1, THREAD_FCR31(a0)
-	b	2f
-
-1:	/* Save 32b/64b scalar FP context. */
-	fpu_save_double a0 t0 t1		# c0_status passed in t0
-						# clobbers t1
-2:
-
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 	PTR_LA	t8, __stack_chk_guard
 	LONG_L	t9, TASK_STACK_CANARY(a1)
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 008b337..35b8316 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -476,7 +476,7 @@
  *  o bootmem_init()
  *  o sparse_init()
  *  o paging_init()
- *  o dma_continguous_reserve()
+ *  o dma_contiguous_reserve()
  *
  * At this stage the bootmem allocator is ready to use.
  *
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index 0b85f82..f50d484 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -31,4 +31,13 @@
 #define lock_fpu_owner()	({ preempt_disable(); pagefault_disable(); })
 #define unlock_fpu_owner()	({ pagefault_enable(); preempt_enable(); })
 
+/* Assembly functions to move context to/from the FPU */
+extern asmlinkage int
+_save_fp_context(void __user *fpregs, void __user *csr);
+extern asmlinkage int
+_restore_fp_context(void __user *fpregs, void __user *csr);
+
+extern asmlinkage int _save_msa_all_upper(void __user *buf);
+extern asmlinkage int _restore_msa_all_upper(void __user *buf);
+
 #endif	/* __SIGNAL_COMMON_H */
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 6a28c79..2fec67b 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -21,6 +21,7 @@
 #include <linux/wait.h>
 #include <linux/ptrace.h>
 #include <linux/unistd.h>
+#include <linux/uprobes.h>
 #include <linux/compiler.h>
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
@@ -38,20 +39,21 @@
 #include <asm/vdso.h>
 #include <asm/dsp.h>
 #include <asm/inst.h>
+#include <asm/msa.h>
 
 #include "signal-common.h"
 
-static int (*save_fp_context)(struct sigcontext __user *sc);
-static int (*restore_fp_context)(struct sigcontext __user *sc);
-
-extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
-extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
+static int (*save_fp_context)(void __user *sc);
+static int (*restore_fp_context)(void __user *sc);
 
 struct sigframe {
 	u32 sf_ass[4];		/* argument save space for o32 */
 	u32 sf_pad[2];		/* Was: signal trampoline */
+
+	/* Matches struct ucontext from its uc_mcontext field onwards */
 	struct sigcontext sf_sc;
 	sigset_t sf_mask;
+	unsigned long long sf_extcontext[0];
 };
 
 struct rt_sigframe {
@@ -65,43 +67,255 @@
  * Thread saved context copy to/from a signal context presumed to be on the
  * user stack, and therefore accessed with appropriate macros from uaccess.h.
  */
-static int copy_fp_to_sigcontext(struct sigcontext __user *sc)
+static int copy_fp_to_sigcontext(void __user *sc)
 {
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
 	int i;
 	int err = 0;
+	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
 
-	for (i = 0; i < NUM_FPU_REGS; i++) {
+	for (i = 0; i < NUM_FPU_REGS; i += inc) {
 		err |=
 		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
-			       &sc->sc_fpregs[i]);
+			       &fpregs[i]);
 	}
-	err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+	err |= __put_user(current->thread.fpu.fcr31, csr);
 
 	return err;
 }
 
-static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
+static int copy_fp_from_sigcontext(void __user *sc)
 {
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
 	int i;
 	int err = 0;
+	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
 	u64 fpr_val;
 
-	for (i = 0; i < NUM_FPU_REGS; i++) {
-		err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
+	for (i = 0; i < NUM_FPU_REGS; i += inc) {
+		err |= __get_user(fpr_val, &fpregs[i]);
 		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
 	}
-	err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+	err |= __get_user(current->thread.fpu.fcr31, csr);
 
 	return err;
 }
 
 /*
+ * Wrappers for the assembly _{save,restore}_fp_context functions.
+ */
+static int save_hw_fp_context(void __user *sc)
+{
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+	return _save_fp_context(fpregs, csr);
+}
+
+static int restore_hw_fp_context(void __user *sc)
+{
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+	return _restore_fp_context(fpregs, csr);
+}
+
+/*
+ * Extended context handling.
+ */
+
+static inline void __user *sc_to_extcontext(void __user *sc)
+{
+	struct ucontext __user *uc;
+
+	/*
+	 * We can just pretend the sigcontext is always embedded in a struct
+	 * ucontext here, because the offset from sigcontext to extended
+	 * context is the same in the struct sigframe case.
+	 */
+	uc = container_of(sc, struct ucontext, uc_mcontext);
+	return &uc->uc_extcontext;
+}
+
+static int save_msa_extcontext(void __user *buf)
+{
+	struct msa_extcontext __user *msa = buf;
+	uint64_t val;
+	int i, err;
+
+	if (!thread_msa_context_live())
+		return 0;
+
+	/*
+	 * Ensure that we can't lose the live MSA context between checking
+	 * for it & writing it to memory.
+	 */
+	preempt_disable();
+
+	if (is_msa_enabled()) {
+		/*
+		 * There are no EVA versions of the vector register load/store
+		 * instructions, so MSA context has to be saved to kernel memory
+		 * and then copied to user memory. The save to kernel memory
+		 * should already have been done when handling scalar FP
+		 * context.
+		 */
+		BUG_ON(config_enabled(CONFIG_EVA));
+
+		err = __put_user(read_msa_csr(), &msa->csr);
+		err |= _save_msa_all_upper(&msa->wr);
+
+		preempt_enable();
+	} else {
+		preempt_enable();
+
+		err = __put_user(current->thread.fpu.msacsr, &msa->csr);
+
+		for (i = 0; i < NUM_FPU_REGS; i++) {
+			val = get_fpr64(&current->thread.fpu.fpr[i], 1);
+			err |= __put_user(val, &msa->wr[i]);
+		}
+	}
+
+	err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic);
+	err |= __put_user(sizeof(*msa), &msa->ext.size);
+
+	return err ? -EFAULT : sizeof(*msa);
+}
+
+static int restore_msa_extcontext(void __user *buf, unsigned int size)
+{
+	struct msa_extcontext __user *msa = buf;
+	unsigned long long val;
+	unsigned int csr;
+	int i, err;
+
+	if (size != sizeof(*msa))
+		return -EINVAL;
+
+	err = get_user(csr, &msa->csr);
+	if (err)
+		return err;
+
+	preempt_disable();
+
+	if (is_msa_enabled()) {
+		/*
+		 * There are no EVA versions of the vector register load/store
+		 * instructions, so MSA context has to be copied to kernel
+		 * memory and later loaded to registers. The same is true of
+		 * scalar FP context, so FPU & MSA should have already been
+		 * disabled whilst handling scalar FP context.
+		 */
+		BUG_ON(config_enabled(CONFIG_EVA));
+
+		write_msa_csr(csr);
+		err |= _restore_msa_all_upper(&msa->wr);
+		preempt_enable();
+	} else {
+		preempt_enable();
+
+		current->thread.fpu.msacsr = csr;
+
+		for (i = 0; i < NUM_FPU_REGS; i++) {
+			err |= __get_user(val, &msa->wr[i]);
+			set_fpr64(&current->thread.fpu.fpr[i], 1, val);
+		}
+	}
+
+	return err;
+}
+
+static int save_extcontext(void __user *buf)
+{
+	int sz;
+
+	sz = save_msa_extcontext(buf);
+	if (sz < 0)
+		return sz;
+	buf += sz;
+
+	/* If no context was saved then trivially return */
+	if (!sz)
+		return 0;
+
+	/* Write the end marker */
+	if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf))
+		return -EFAULT;
+
+	sz += sizeof(((struct extcontext *)NULL)->magic);
+	return sz;
+}
+
+static int restore_extcontext(void __user *buf)
+{
+	struct extcontext ext;
+	int err;
+
+	while (1) {
+		err = __get_user(ext.magic, (unsigned int *)buf);
+		if (err)
+			return err;
+
+		if (ext.magic == END_EXTCONTEXT_MAGIC)
+			return 0;
+
+		err = __get_user(ext.size, (unsigned int *)(buf
+			+ offsetof(struct extcontext, size)));
+		if (err)
+			return err;
+
+		switch (ext.magic) {
+		case MSA_EXTCONTEXT_MAGIC:
+			err = restore_msa_extcontext(buf, ext.size);
+			break;
+
+		default:
+			err = -EINVAL;
+			break;
+		}
+
+		if (err)
+			return err;
+
+		buf += ext.size;
+	}
+}
+
+/*
  * Helper routines
  */
-static int protected_save_fp_context(struct sigcontext __user *sc)
+int protected_save_fp_context(void __user *sc)
 {
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+	uint32_t __user *used_math = sc + abi->off_sc_used_math;
+	unsigned int used, ext_sz;
 	int err;
-#ifndef CONFIG_EVA
+
+	used = used_math() ? USED_FP : 0;
+	if (!used)
+		goto fp_done;
+
+	if (!test_thread_flag(TIF_32BIT_FPREGS))
+		used |= USED_FR1;
+	if (test_thread_flag(TIF_HYBRID_FPREGS))
+		used |= USED_HYBRID_FPRS;
+
+	/*
+	 * EVA does not have userland equivalents of ldc1 or sdc1, so
+	 * save to the kernel FP context & copy that to userland below.
+	 */
+	if (config_enabled(CONFIG_EVA))
+		lose_fpu(1);
+
 	while (1) {
 		lock_fpu_owner();
 		if (is_fpu_owner()) {
@@ -114,27 +328,57 @@
 		if (likely(!err))
 			break;
 		/* touch the sigcontext and try again */
-		err = __put_user(0, &sc->sc_fpregs[0]) |
-			__put_user(0, &sc->sc_fpregs[31]) |
-			__put_user(0, &sc->sc_fpc_csr);
+		err = __put_user(0, &fpregs[0]) |
+			__put_user(0, &fpregs[31]) |
+			__put_user(0, csr);
 		if (err)
-			break;	/* really bad sigcontext */
+			return err;	/* really bad sigcontext */
 	}
-#else
-	/*
-	 * EVA does not have FPU EVA instructions so saving fpu context directly
-	 * does not work.
-	 */
-	lose_fpu(1);
-	err = save_fp_context(sc); /* this might fail */
-#endif
-	return err;
+
+fp_done:
+	ext_sz = err = save_extcontext(sc_to_extcontext(sc));
+	if (err < 0)
+		return err;
+	used |= ext_sz ? USED_EXTCONTEXT : 0;
+
+	return __put_user(used, used_math);
 }
 
-static int protected_restore_fp_context(struct sigcontext __user *sc)
+int protected_restore_fp_context(void __user *sc)
 {
-	int err, tmp __maybe_unused;
-#ifndef CONFIG_EVA
+	struct mips_abi *abi = current->thread.abi;
+	uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+	uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+	uint32_t __user *used_math = sc + abi->off_sc_used_math;
+	unsigned int used;
+	int err, sig = 0, tmp __maybe_unused;
+
+	err = __get_user(used, used_math);
+	conditional_used_math(used & USED_FP);
+
+	/*
+	 * The signal handler may have used FPU; give it up if the program
+	 * doesn't want it following sigreturn.
+	 */
+	if (err || !(used & USED_FP))
+		lose_fpu(0);
+	if (err)
+		return err;
+	if (!(used & USED_FP))
+		goto fp_done;
+
+	err = sig = fpcsr_pending(csr);
+	if (err < 0)
+		return err;
+
+	/*
+	 * EVA does not have userland equivalents of ldc1 or sdc1, so we
+	 * disable the FPU here such that the code below simply copies to
+	 * the kernel FP context.
+	 */
+	if (config_enabled(CONFIG_EVA))
+		lose_fpu(0);
+
 	while (1) {
 		lock_fpu_owner();
 		if (is_fpu_owner()) {
@@ -147,28 +391,24 @@
 		if (likely(!err))
 			break;
 		/* touch the sigcontext and try again */
-		err = __get_user(tmp, &sc->sc_fpregs[0]) |
-			__get_user(tmp, &sc->sc_fpregs[31]) |
-			__get_user(tmp, &sc->sc_fpc_csr);
+		err = __get_user(tmp, &fpregs[0]) |
+			__get_user(tmp, &fpregs[31]) |
+			__get_user(tmp, csr);
 		if (err)
 			break;	/* really bad sigcontext */
 	}
-#else
-	/*
-	 * EVA does not have FPU EVA instructions so restoring fpu context
-	 * directly does not work.
-	 */
-	lose_fpu(0);
-	err = restore_fp_context(sc); /* this might fail */
-#endif
-	return err;
+
+fp_done:
+	if (used & USED_EXTCONTEXT)
+		err |= restore_extcontext(sc_to_extcontext(sc));
+
+	return err ?: sig;
 }
 
 int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 {
 	int err = 0;
 	int i;
-	unsigned int used_math;
 
 	err |= __put_user(regs->cp0_epc, &sc->sc_pc);
 
@@ -191,19 +431,38 @@
 		err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
 	}
 
-	used_math = !!used_math();
-	err |= __put_user(used_math, &sc->sc_used_math);
 
-	if (used_math) {
-		/*
-		 * Save FPU state to signal context. Signal handler
-		 * will "inherit" current FPU state.
-		 */
-		err |= protected_save_fp_context(sc);
-	}
+	/*
+	 * Save FPU state to signal context. Signal handler
+	 * will "inherit" current FPU state.
+	 */
+	err |= protected_save_fp_context(sc);
+
 	return err;
 }
 
+static size_t extcontext_max_size(void)
+{
+	size_t sz = 0;
+
+	/*
+	 * The assumption here is that between this point & the point at which
+	 * the extended context is saved the size of the context should only
+	 * ever be able to shrink (if the task is preempted), but never grow.
+	 * That is, what this function returns is an upper bound on the size of
+	 * the extended context for the current task at the current time.
+	 */
+
+	if (thread_msa_context_live())
+		sz += sizeof(struct msa_extcontext);
+
+	/* If any context is saved then we'll append the end marker */
+	if (sz)
+		sz += sizeof(((struct extcontext *)NULL)->magic);
+
+	return sz;
+}
+
 int fpcsr_pending(unsigned int __user *fpcsr)
 {
 	int err, sig = 0;
@@ -223,21 +482,8 @@
 	return err ?: sig;
 }
 
-static int
-check_and_restore_fp_context(struct sigcontext __user *sc)
-{
-	int err, sig;
-
-	err = sig = fpcsr_pending(&sc->sc_fpc_csr);
-	if (err > 0)
-		err = 0;
-	err |= protected_restore_fp_context(sc);
-	return err ?: sig;
-}
-
 int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
 {
-	unsigned int used_math;
 	unsigned long treg;
 	int err = 0;
 	int i;
@@ -265,19 +511,7 @@
 	for (i = 1; i < 32; i++)
 		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
 
-	err |= __get_user(used_math, &sc->sc_used_math);
-	conditional_used_math(used_math);
-
-	if (used_math) {
-		/* restore fpu context if we have used it before */
-		if (!err)
-			err = check_and_restore_fp_context(sc);
-	} else {
-		/* signal handler may have used FPU.  Give it up. */
-		lose_fpu(0);
-	}
-
-	return err;
+	return err ?: protected_restore_fp_context(sc);
 }
 
 void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
@@ -285,6 +519,9 @@
 {
 	unsigned long sp;
 
+	/* Leave space for potential extended context */
+	frame_size += extcontext_max_size();
+
 	/* Default to using normal stack */
 	sp = regs->regs[29];
 
@@ -520,7 +757,11 @@
 	.setup_rt_frame = setup_rt_frame,
 	.rt_signal_return_offset =
 		offsetof(struct mips_vdso, rt_signal_trampoline),
-	.restart	= __NR_restart_syscall
+	.restart	= __NR_restart_syscall,
+
+	.off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+	.off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+	.off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
 };
 
 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
@@ -616,6 +857,9 @@
 
 	user_exit();
 
+	if (thread_info_flags & _TIF_UPROBE)
+		uprobe_notify_resume(regs);
+
 	/* deal with pending signal delivery */
 	if (thread_info_flags & _TIF_SIGPENDING)
 		do_signal(regs);
@@ -629,43 +873,46 @@
 }
 
 #ifdef CONFIG_SMP
-#ifndef CONFIG_EVA
-static int smp_save_fp_context(struct sigcontext __user *sc)
+static int smp_save_fp_context(void __user *sc)
 {
 	return raw_cpu_has_fpu
-	       ? _save_fp_context(sc)
+	       ? save_hw_fp_context(sc)
 	       : copy_fp_to_sigcontext(sc);
 }
 
-static int smp_restore_fp_context(struct sigcontext __user *sc)
+static int smp_restore_fp_context(void __user *sc)
 {
 	return raw_cpu_has_fpu
-	       ? _restore_fp_context(sc)
+	       ? restore_hw_fp_context(sc)
 	       : copy_fp_from_sigcontext(sc);
 }
-#endif /* CONFIG_EVA */
 #endif
 
 static int signal_setup(void)
 {
-#ifndef CONFIG_EVA
+	/*
+	 * The offset from sigcontext to extended context should be the same
+	 * regardless of the type of signal, such that userland can always know
+	 * where to look if it wishes to find the extended context structures.
+	 */
+	BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) -
+		      offsetof(struct sigframe, sf_sc)) !=
+		     (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) -
+		      offsetof(struct rt_sigframe, rs_uc.uc_mcontext)));
+
 #ifdef CONFIG_SMP
 	/* For now just do the cpu_has_fpu check when the functions are invoked */
 	save_fp_context = smp_save_fp_context;
 	restore_fp_context = smp_restore_fp_context;
 #else
 	if (cpu_has_fpu) {
-		save_fp_context = _save_fp_context;
-		restore_fp_context = _restore_fp_context;
+		save_fp_context = save_hw_fp_context;
+		restore_fp_context = restore_hw_fp_context;
 	} else {
 		save_fp_context = copy_fp_to_sigcontext;
 		restore_fp_context = copy_fp_from_sigcontext;
 	}
 #endif /* CONFIG_SMP */
-#else
-	save_fp_context = copy_fp_to_sigcontext;
-	restore_fp_context = copy_fp_from_sigcontext;
-#endif
 
 	return 0;
 }
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 5d7f263..f7e89524 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -36,12 +36,6 @@
 
 #include "signal-common.h"
 
-static int (*save_fp_context32)(struct sigcontext32 __user *sc);
-static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
-
-extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
-
 /*
  * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
  */
@@ -74,99 +68,11 @@
 	struct ucontext32 rs_uc;
 };
 
-/*
- * Thread saved context copy to/from a signal context presumed to be on the
- * user stack, and therefore accessed with appropriate macros from uaccess.h.
- */
-static int copy_fp_to_sigcontext32(struct sigcontext32 __user *sc)
-{
-	int i;
-	int err = 0;
-	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
-
-	for (i = 0; i < NUM_FPU_REGS; i += inc) {
-		err |=
-		    __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
-			       &sc->sc_fpregs[i]);
-	}
-	err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
-	return err;
-}
-
-static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
-{
-	int i;
-	int err = 0;
-	int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
-	u64 fpr_val;
-
-	for (i = 0; i < NUM_FPU_REGS; i += inc) {
-		err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
-		set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
-	}
-	err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
-	return err;
-}
-
-/*
- * sigcontext handlers
- */
-static int protected_save_fp_context32(struct sigcontext32 __user *sc)
-{
-	int err;
-	while (1) {
-		lock_fpu_owner();
-		if (is_fpu_owner()) {
-			err = save_fp_context32(sc);
-			unlock_fpu_owner();
-		} else {
-			unlock_fpu_owner();
-			err = copy_fp_to_sigcontext32(sc);
-		}
-		if (likely(!err))
-			break;
-		/* touch the sigcontext and try again */
-		err = __put_user(0, &sc->sc_fpregs[0]) |
-			__put_user(0, &sc->sc_fpregs[31]) |
-			__put_user(0, &sc->sc_fpc_csr);
-		if (err)
-			break;	/* really bad sigcontext */
-	}
-	return err;
-}
-
-static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
-{
-	int err, tmp __maybe_unused;
-	while (1) {
-		lock_fpu_owner();
-		if (is_fpu_owner()) {
-			err = restore_fp_context32(sc);
-			unlock_fpu_owner();
-		} else {
-			unlock_fpu_owner();
-			err = copy_fp_from_sigcontext32(sc);
-		}
-		if (likely(!err))
-			break;
-		/* touch the sigcontext and try again */
-		err = __get_user(tmp, &sc->sc_fpregs[0]) |
-			__get_user(tmp, &sc->sc_fpregs[31]) |
-			__get_user(tmp, &sc->sc_fpc_csr);
-		if (err)
-			break;	/* really bad sigcontext */
-	}
-	return err;
-}
-
 static int setup_sigcontext32(struct pt_regs *regs,
 			      struct sigcontext32 __user *sc)
 {
 	int err = 0;
 	int i;
-	u32 used_math;
 
 	err |= __put_user(regs->cp0_epc, &sc->sc_pc);
 
@@ -186,35 +92,18 @@
 		err |= __put_user(mflo3(), &sc->sc_lo3);
 	}
 
-	used_math = !!used_math();
-	err |= __put_user(used_math, &sc->sc_used_math);
+	/*
+	 * Save FPU state to signal context.  Signal handler
+	 * will "inherit" current FPU state.
+	 */
+	err |= protected_save_fp_context(sc);
 
-	if (used_math) {
-		/*
-		 * Save FPU state to signal context.  Signal handler
-		 * will "inherit" current FPU state.
-		 */
-		err |= protected_save_fp_context32(sc);
-	}
 	return err;
 }
 
-static int
-check_and_restore_fp_context32(struct sigcontext32 __user *sc)
-{
-	int err, sig;
-
-	err = sig = fpcsr_pending(&sc->sc_fpc_csr);
-	if (err > 0)
-		err = 0;
-	err |= protected_restore_fp_context32(sc);
-	return err ?: sig;
-}
-
 static int restore_sigcontext32(struct pt_regs *regs,
 				struct sigcontext32 __user *sc)
 {
-	u32 used_math;
 	int err = 0;
 	s32 treg;
 	int i;
@@ -238,70 +127,7 @@
 	for (i = 1; i < 32; i++)
 		err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
 
-	err |= __get_user(used_math, &sc->sc_used_math);
-	conditional_used_math(used_math);
-
-	if (used_math) {
-		/* restore fpu context if we have used it before */
-		if (!err)
-			err = check_and_restore_fp_context32(sc);
-	} else {
-		/* signal handler may have used FPU.  Give it up. */
-		lose_fpu(0);
-	}
-
-	return err;
-}
-
-/*
- *
- */
-extern void __put_sigset_unknown_nsig(void);
-extern void __get_sigset_unknown_nsig(void);
-
-static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t __user *ubuf)
-{
-	int err = 0;
-
-	if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)))
-		return -EFAULT;
-
-	switch (_NSIG_WORDS) {
-	default:
-		__put_sigset_unknown_nsig();
-	case 2:
-		err |= __put_user(kbuf->sig[1] >> 32, &ubuf->sig[3]);
-		err |= __put_user(kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
-	case 1:
-		err |= __put_user(kbuf->sig[0] >> 32, &ubuf->sig[1]);
-		err |= __put_user(kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
-	}
-
-	return err;
-}
-
-static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf)
-{
-	int err = 0;
-	unsigned long sig[4];
-
-	if (!access_ok(VERIFY_READ, ubuf, sizeof(*ubuf)))
-		return -EFAULT;
-
-	switch (_NSIG_WORDS) {
-	default:
-		__get_sigset_unknown_nsig();
-	case 2:
-		err |= __get_user(sig[3], &ubuf->sig[3]);
-		err |= __get_user(sig[2], &ubuf->sig[2]);
-		kbuf->sig[1] = sig[2] | (sig[3] << 32);
-	case 1:
-		err |= __get_user(sig[1], &ubuf->sig[1]);
-		err |= __get_user(sig[0], &ubuf->sig[0]);
-		kbuf->sig[0] = sig[0] | (sig[1] << 32);
-	}
-
-	return err;
+	return err ?: protected_restore_fp_context(sc);
 }
 
 /*
@@ -585,20 +411,9 @@
 	.setup_rt_frame = setup_rt_frame_32,
 	.rt_signal_return_offset =
 		offsetof(struct mips_vdso, o32_rt_signal_trampoline),
-	.restart	= __NR_O32_restart_syscall
+	.restart	= __NR_O32_restart_syscall,
+
+	.off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
+	.off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
+	.off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
 };
-
-static int signal32_init(void)
-{
-	if (cpu_has_fpu) {
-		save_fp_context32 = _save_fp_context32;
-		restore_fp_context32 = _restore_fp_context32;
-	} else {
-		save_fp_context32 = copy_fp_to_sigcontext32;
-		restore_fp_context32 = copy_fp_from_sigcontext32;
-	}
-
-	return 0;
-}
-
-arch_initcall(signal32_init);
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index f1d4751..0d017fd 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -153,5 +153,9 @@
 	.setup_rt_frame = setup_rt_frame_n32,
 	.rt_signal_return_offset =
 		offsetof(struct mips_vdso, n32_rt_signal_trampoline),
-	.restart	= __NR_N32_restart_syscall
+	.restart	= __NR_N32_restart_syscall,
+
+	.off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+	.off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+	.off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
 };
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index d1168d7..8489c88 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -209,6 +209,7 @@
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 	case CPU_QEMU_GENERIC:
+	case CPU_I6400:
 		config0 = read_c0_config();
 		/* FIXME: addresses are Malta specific */
 		if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c
index 5b539f5..5f05539 100644
--- a/arch/mips/kernel/sysrq.c
+++ b/arch/mips/kernel/sysrq.c
@@ -21,24 +21,12 @@
 
 static void sysrq_tlbdump_single(void *dummy)
 {
-	const int field = 2 * sizeof(unsigned long);
 	unsigned long flags;
 
 	spin_lock_irqsave(&show_lock, flags);
 
 	pr_info("CPU%d:\n", smp_processor_id());
-	pr_info("Index	: %0x\n", read_c0_index());
-	pr_info("Pagemask: %0x\n", read_c0_pagemask());
-	pr_info("EntryHi : %0*lx\n", field, read_c0_entryhi());
-	pr_info("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
-	pr_info("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
-	pr_info("Wired   : %0x\n", read_c0_wired());
-	pr_info("Pagegrain: %0x\n", read_c0_pagegrain());
-	if (cpu_has_htw) {
-		pr_info("PWField : %0*lx\n", field, read_c0_pwfield());
-		pr_info("PWSize  : %0*lx\n", field, read_c0_pwsize());
-		pr_info("PWCtl   : %0x\n", read_c0_pwctl());
-	}
+	dump_tlb_regs();
 	pr_info("\n");
 	dump_tlb_all();
 	pr_info("\n");
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 8ea28e6..fdb392b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -370,11 +370,6 @@
 	set_fs(old_fs);
 }
 
-static int regs_to_trapnr(struct pt_regs *regs)
-{
-	return (regs->cp0_cause >> 2) & 0x1f;
-}
-
 static DEFINE_RAW_SPINLOCK(die_lock);
 
 void __noreturn die(const char *str, struct pt_regs *regs)
@@ -384,7 +379,7 @@
 
 	oops_enter();
 
-	if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
+	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
 		       SIGSEGV) == NOTIFY_STOP)
 		sig = 0;
 
@@ -470,7 +465,7 @@
 	printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
 	       data ? "Data" : "Instruction",
 	       field, regs->cp0_epc, field, regs->regs[31]);
-	if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
+	if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
 		       SIGBUS) == NOTIFY_STOP)
 		goto out;
 
@@ -826,7 +821,7 @@
 	int sig;
 
 	prev_state = exception_enter();
-	if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
+	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 		       SIGFPE) == NOTIFY_STOP)
 		goto out;
 
@@ -882,11 +877,12 @@
 	char b[40];
 
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
-	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+	if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
+			 SIGTRAP) == NOTIFY_STOP)
 		return;
 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 
-	if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
+	if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 		       SIGTRAP) == NOTIFY_STOP)
 		return;
 
@@ -948,6 +944,7 @@
 		set_fs(KERNEL_DS);
 
 	prev_state = exception_enter();
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 	if (get_isa16_mode(regs->cp0_epc)) {
 		u16 instr[2];
 
@@ -987,15 +984,27 @@
 	 * pertain to them.
 	 */
 	switch (bcode) {
+	case BRK_UPROBE:
+		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
+			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+			goto out;
+		else
+			break;
+	case BRK_UPROBE_XOL:
+		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
+			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+			goto out;
+		else
+			break;
 	case BRK_KPROBE_BP:
 		if (notify_die(DIE_BREAK, "debug", regs, bcode,
-			       regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
 			goto out;
 		else
 			break;
 	case BRK_KPROBE_SSTEPBP:
 		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
-			       regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
 			goto out;
 		else
 			break;
@@ -1028,6 +1037,7 @@
 		set_fs(get_ds());
 
 	prev_state = exception_enter();
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 	if (get_isa16_mode(regs->cp0_epc)) {
 		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
 		    __get_user(instr[1], (u16 __user *)(epc + 2)))
@@ -1094,8 +1104,9 @@
 no_r2_instr:
 
 	prev_state = exception_enter();
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 
-	if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
+	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
 		       SIGILL) == NOTIFY_STOP)
 		goto out;
 
@@ -1444,8 +1455,9 @@
 	enum ctx_state prev_state;
 
 	prev_state = exception_enter();
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
-		       regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP)
+		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
 		goto out;
 
 	/* Clear MSACSR.Cause before enabling interrupts */
@@ -1523,7 +1535,6 @@
 
 asmlinkage void do_mcheck(struct pt_regs *regs)
 {
-	const int field = 2 * sizeof(unsigned long);
 	int multi_match = regs->cp0_status & ST0_TS;
 	enum ctx_state prev_state;
 	mm_segment_t old_fs = get_fs();
@@ -1532,19 +1543,8 @@
 	show_regs(regs);
 
 	if (multi_match) {
-		pr_err("Index	: %0x\n", read_c0_index());
-		pr_err("Pagemask: %0x\n", read_c0_pagemask());
-		pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi());
-		pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
-		pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
-		pr_err("Wired   : %0x\n", read_c0_wired());
-		pr_err("Pagegrain: %0x\n", read_c0_pagegrain());
-		if (cpu_has_htw) {
-			pr_err("PWField : %0*lx\n", field, read_c0_pwfield());
-			pr_err("PWSize  : %0*lx\n", field, read_c0_pwsize());
-			pr_err("PWCtl   : %0x\n", read_c0_pwctl());
-		}
-		pr_err("\n");
+		dump_tlb_regs();
+		pr_info("\n");
 		dump_tlb_all();
 	}
 
@@ -1651,6 +1651,7 @@
 	case CPU_PROAPTIV:
 	case CPU_P5600:
 	case CPU_QEMU_GENERIC:
+	case CPU_I6400:
 		{
 #define ERRCTL_PE	0x80000000
 #define ERRCTL_L2P	0x00800000
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index eb3efd1..990354d 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -891,6 +891,9 @@
 #ifdef	CONFIG_EVA
 	mm_segment_t seg;
 #endif
+	union fpureg *fpr;
+	enum msa_2b_fmt df;
+	unsigned int wd;
 	origpc = (unsigned long)pc;
 	orig31 = regs->regs[31];
 
@@ -1202,6 +1205,75 @@
 			break;
 		return;
 
+	case msa_op:
+		if (!cpu_has_msa)
+			goto sigill;
+
+		/*
+		 * If we've reached this point then userland should have taken
+		 * the MSA disabled exception & initialised vector context at
+		 * some point in the past.
+		 */
+		BUG_ON(!thread_msa_context_live());
+
+		df = insn.msa_mi10_format.df;
+		wd = insn.msa_mi10_format.wd;
+		fpr = &current->thread.fpu.fpr[wd];
+
+		switch (insn.msa_mi10_format.func) {
+		case msa_ld_op:
+			if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
+				goto sigbus;
+
+			/*
+			 * Disable preemption to avoid a race between copying
+			 * state from userland, migrating to another CPU and
+			 * updating the hardware vector register below.
+			 */
+			preempt_disable();
+
+			res = __copy_from_user_inatomic(fpr, addr,
+							sizeof(*fpr));
+			if (res)
+				goto fault;
+
+			/*
+			 * Update the hardware register if it is in use by the
+			 * task in this quantum, in order to avoid having to
+			 * save & restore the whole vector context.
+			 */
+			if (test_thread_flag(TIF_USEDMSA))
+				write_msa_wr(wd, fpr, df);
+
+			preempt_enable();
+			break;
+
+		case msa_st_op:
+			if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr)))
+				goto sigbus;
+
+			/*
+			 * Update from the hardware register if it is in use by
+			 * the task in this quantum, in order to avoid having to
+			 * save & restore the whole vector context.
+			 */
+			preempt_disable();
+			if (test_thread_flag(TIF_USEDMSA))
+				read_msa_wr(wd, fpr, df);
+			preempt_enable();
+
+			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
+			if (res)
+				goto fault;
+			break;
+
+		default:
+			goto sigbus;
+		}
+
+		compute_return_epc(regs);
+		break;
+
 #ifndef CONFIG_CPU_MIPSR6
 	/*
 	 * COP2 is available to implementor for application specific use.
@@ -2240,5 +2312,5 @@
 		return -ENOMEM;
 	return 0;
 }
-__initcall(debugfs_unaligned);
+arch_initcall(debugfs_unaligned);
 #endif
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
new file mode 100644
index 0000000..8452d93
--- /dev/null
+++ b/arch/mips/kernel/uprobes.c
@@ -0,0 +1,341 @@
+#include <linux/highmem.h>
+#include <linux/kdebug.h>
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/uprobes.h>
+
+#include <asm/branch.h>
+#include <asm/cpu-features.h>
+#include <asm/ptrace.h>
+#include <asm/inst.h>
+
+static inline int insn_has_delay_slot(const union mips_instruction insn)
+{
+	switch (insn.i_format.opcode) {
+	/*
+	 * jr and jalr are in r_format format.
+	 */
+	case spec_op:
+		switch (insn.r_format.func) {
+		case jalr_op:
+		case jr_op:
+			return 1;
+		}
+		break;
+
+	/*
+	 * This group contains:
+	 * bltz_op, bgez_op, bltzl_op, bgezl_op,
+	 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+	 */
+	case bcond_op:
+		switch (insn.i_format.rt) {
+		case bltz_op:
+		case bltzl_op:
+		case bgez_op:
+		case bgezl_op:
+		case bltzal_op:
+		case bltzall_op:
+		case bgezal_op:
+		case bgezall_op:
+		case bposge32_op:
+			return 1;
+		}
+		break;
+
+	/*
+	 * These are unconditional and in j_format.
+	 */
+	case jal_op:
+	case j_op:
+	case beq_op:
+	case beql_op:
+	case bne_op:
+	case bnel_op:
+	case blez_op: /* not really i_format */
+	case blezl_op:
+	case bgtz_op:
+	case bgtzl_op:
+		return 1;
+
+	/*
+	 * And now the FPA/cp1 branch instructions.
+	 */
+	case cop1_op:
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+	case lwc2_op: /* This is bbit0 on Octeon */
+	case ldc2_op: /* This is bbit032 on Octeon */
+	case swc2_op: /* This is bbit1 on Octeon */
+	case sdc2_op: /* This is bbit132 on Octeon */
+#endif
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
+ * @mm: the probed address space.
+ * @arch_uprobe: the probepoint information.
+ * @addr: virtual address at which to install the probepoint
+ * Return 0 on success or a -ve number on error.
+ */
+int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
+	struct mm_struct *mm, unsigned long addr)
+{
+	union mips_instruction inst;
+
+	/*
+	 * For the time being this also blocks attempts to use uprobes with
+	 * MIPS16 and microMIPS.
+	 */
+	if (addr & 0x03)
+		return -EINVAL;
+
+	inst.word = aup->insn[0];
+	aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)];
+	aup->ixol[1] = UPROBE_BRK_UPROBE_XOL;		/* NOP  */
+
+	return 0;
+}
+
+/**
+ * is_trap_insn - check if the instruction is a trap variant
+ * @insn: instruction to be checked.
+ * Returns true if @insn is a trap variant.
+ *
+ * This definition overrides the weak definition in kernel/events/uprobes.c.
+ * and is needed for the case where an architecture has multiple trap
+ * instructions (like PowerPC or MIPS).  We treat BREAK just like the more
+ * modern conditional trap instructions.
+ */
+bool is_trap_insn(uprobe_opcode_t *insn)
+{
+	union mips_instruction inst;
+
+	inst.word = *insn;
+
+	switch (inst.i_format.opcode) {
+	case spec_op:
+		switch (inst.r_format.func) {
+		case break_op:
+		case teq_op:
+		case tge_op:
+		case tgeu_op:
+		case tlt_op:
+		case tltu_op:
+		case tne_op:
+			return 1;
+		}
+		break;
+
+	case bcond_op:	/* Yes, really ...  */
+		switch (inst.u_format.rt) {
+		case teqi_op:
+		case tgei_op:
+		case tgeiu_op:
+		case tlti_op:
+		case tltiu_op:
+		case tnei_op:
+			return 1;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+#define UPROBE_TRAP_NR	ULONG_MAX
+
+/*
+ * arch_uprobe_pre_xol - prepare to execute out of line.
+ * @auprobe: the probepoint information.
+ * @regs: reflects the saved user state of current task.
+ */
+int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+	union mips_instruction insn;
+
+	/*
+	 * Now find the EPC where to resume after the breakpoint has been
+	 * dealt with.  This may require emulation of a branch.
+	 */
+	aup->resume_epc = regs->cp0_epc + 4;
+	if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
+		unsigned long epc;
+
+		epc = regs->cp0_epc;
+		__compute_return_epc_for_insn(regs, insn);
+		aup->resume_epc = regs->cp0_epc;
+	}
+
+	utask->autask.saved_trap_nr = current->thread.trap_nr;
+	current->thread.trap_nr = UPROBE_TRAP_NR;
+	regs->cp0_epc = current->utask->xol_vaddr;
+
+	return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+
+	current->thread.trap_nr = utask->autask.saved_trap_nr;
+	regs->cp0_epc = aup->resume_epc;
+
+	return 0;
+}
+
+/*
+ * If xol insn itself traps and generates a signal(Say,
+ * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
+ * instruction jumps back to its own address. It is assumed that anything
+ * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
+ *
+ * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
+ * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
+ * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
+ */
+bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
+{
+	if (tsk->thread.trap_nr != UPROBE_TRAP_NR)
+		return true;
+
+	return false;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+	unsigned long val, void *data)
+{
+	struct die_args *args = data;
+	struct pt_regs *regs = args->regs;
+
+	/* regs == NULL is a kernel bug */
+	if (WARN_ON(!regs))
+		return NOTIFY_DONE;
+
+	/* We are only interested in userspace traps */
+	if (!user_mode(regs))
+		return NOTIFY_DONE;
+
+	switch (val) {
+	case DIE_BREAK:
+		if (uprobe_pre_sstep_notifier(regs))
+			return NOTIFY_STOP;
+		break;
+	case DIE_UPROBE_XOL:
+		if (uprobe_post_sstep_notifier(regs))
+			return NOTIFY_STOP;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * This function gets called when XOL instruction either gets trapped or
+ * the thread has a fatal signal. Reset the instruction pointer to its
+ * probed address for the potential restart or for post mortem analysis.
+ */
+void arch_uprobe_abort_xol(struct arch_uprobe *aup,
+	struct pt_regs *regs)
+{
+	struct uprobe_task *utask = current->utask;
+
+	instruction_pointer_set(regs, utask->vaddr);
+}
+
+unsigned long arch_uretprobe_hijack_return_addr(
+	unsigned long trampoline_vaddr, struct pt_regs *regs)
+{
+	unsigned long ra;
+
+	ra = regs->regs[31];
+
+	/* Replace the return address with the trampoline address */
+	regs->regs[31] = ra;
+
+	return ra;
+}
+
+/**
+ * set_swbp - store breakpoint at a given address.
+ * @auprobe: arch specific probepoint information.
+ * @mm: the probed process address space.
+ * @vaddr: the virtual address to insert the opcode.
+ *
+ * For mm @mm, store the breakpoint instruction at @vaddr.
+ * Return 0 (success) or a negative errno.
+ *
+ * This version overrides the weak version in kernel/events/uprobes.c.
+ * It is required to handle MIPS16 and microMIPS.
+ */
+int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
+	unsigned long vaddr)
+{
+	return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
+}
+
+/**
+ * set_orig_insn - Restore the original instruction.
+ * @mm: the probed process address space.
+ * @auprobe: arch specific probepoint information.
+ * @vaddr: the virtual address to insert the opcode.
+ *
+ * For mm @mm, restore the original opcode (opcode) at @vaddr.
+ * Return 0 (success) or a negative errno.
+ *
+ * This overrides the weak version in kernel/events/uprobes.c.
+ */
+int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+		 unsigned long vaddr)
+{
+	return uprobe_write_opcode(mm, vaddr,
+			*(uprobe_opcode_t *)&auprobe->orig_inst[0].word);
+}
+
+void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+				  void *src, unsigned long len)
+{
+	void *kaddr;
+
+	/* Initialize the slot */
+	kaddr = kmap_atomic(page);
+	memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
+	kunmap_atomic(kaddr);
+
+	/*
+	 * The MIPS version of flush_icache_range will operate safely on
+	 * user space addresses and more importantly, it doesn't require a
+	 * VMA argument.
+	 */
+	flush_icache_range(vaddr, vaddr + len);
+}
+
+/**
+ * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
+ * @regs: Reflects the saved state of the task after it has hit a breakpoint
+ * instruction.
+ * Return the address of the breakpoint instruction.
+ *
+ * This overrides the weak version in kernel/events/uprobes.c.
+ */
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+	return instruction_pointer(regs);
+}
+
+/*
+ * See if the instruction can be emulated.
+ * Returns true if instruction was emulated, false otherwise.
+ *
+ * For now we always emulate so this function just returns 0.
+ */
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+	return 0;
+}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 11da314..9067b65 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -817,6 +817,7 @@
 
 static int vpe_release(struct inode *inode, struct file *filp)
 {
+#if defined(CONFIG_MIPS_VPE_LOADER_MT) || defined(CONFIG_MIPS_VPE_LOADER_CMP)
 	struct vpe *v;
 	Elf_Ehdr *hdr;
 	int ret = 0;
@@ -827,7 +828,7 @@
 
 	hdr = (Elf_Ehdr *) v->pbuffer;
 	if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
-		if ((vpe_elfload(v) >= 0) && vpe_run) {
+		if (vpe_elfload(v) >= 0) {
 			vpe_run(v);
 		} else {
 			pr_warn("VPE loader: ELF load failed.\n");
@@ -850,6 +851,10 @@
 	v->plen = 0;
 
 	return ret;
+#else
+	pr_warn("VPE loader: ELF load failed.\n");
+	return -ENOEXEC;
+#endif
 }
 
 static ssize_t vpe_write(struct file *file, const char __user *buffer,
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c
index a57959e..c710d96 100644
--- a/arch/mips/lasat/sysctl.c
+++ b/arch/mips/lasat/sysctl.c
@@ -270,4 +270,4 @@
 	return 0;
 }
 
-__initcall(lasat_register_sysctl);
+arch_initcall(lasat_register_sysctl);
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 167f3563..92a3731 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -13,6 +13,33 @@
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
 
+void dump_tlb_regs(void)
+{
+	const int field = 2 * sizeof(unsigned long);
+
+	pr_info("Index    : %0x\n", read_c0_index());
+	pr_info("PageMask : %0x\n", read_c0_pagemask());
+	pr_info("EntryHi  : %0*lx\n", field, read_c0_entryhi());
+	pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0());
+	pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1());
+	pr_info("Wired    : %0x\n", read_c0_wired());
+	switch (current_cpu_type()) {
+	case CPU_R10000:
+	case CPU_R12000:
+	case CPU_R14000:
+	case CPU_R16000:
+		pr_info("FrameMask: %0x\n", read_c0_framemask());
+		break;
+	}
+	if (cpu_has_small_pages || cpu_has_rixi || cpu_has_xpa)
+		pr_info("PageGrain: %0x\n", read_c0_pagegrain());
+	if (cpu_has_htw) {
+		pr_info("PWField  : %0*lx\n", field, read_c0_pwfield());
+		pr_info("PWSize   : %0*lx\n", field, read_c0_pwsize());
+		pr_info("PWCtl    : %0x\n", read_c0_pwctl());
+	}
+}
+
 static inline const char *msk2str(unsigned int mask)
 {
 	switch (mask) {
@@ -87,7 +114,7 @@
 		 * leave only a single G bit set after a machine check exception
 		 * due to duplicate TLB entry.
 		 */
-		if (!((entrylo0 | entrylo1) & MIPS_ENTRYLO_G) &&
+		if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
 		    (entryhi & 0xff) != asid)
 			continue;
 
@@ -96,8 +123,8 @@
 		 */
 		printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));
 
-		c0 = (entrylo0 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT;
-		c1 = (entrylo1 & MIPS_ENTRYLO_C) >> MIPS_ENTRYLO_C_SHIFT;
+		c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
+		c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
 
 		printk("va=%0*lx asid=%02lx\n",
 		       vwidth, (entryhi & ~0x1fffUL),
@@ -114,9 +141,9 @@
 			       (entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
 		printk("pa=%0*llx c=%d d=%d v=%d g=%d] [",
 		       pwidth, pa, c0,
-		       (entrylo0 & MIPS_ENTRYLO_D) ? 1 : 0,
-		       (entrylo0 & MIPS_ENTRYLO_V) ? 1 : 0,
-		       (entrylo0 & MIPS_ENTRYLO_G) ? 1 : 0);
+		       (entrylo0 & ENTRYLO_D) ? 1 : 0,
+		       (entrylo0 & ENTRYLO_V) ? 1 : 0,
+		       (entrylo0 & ENTRYLO_G) ? 1 : 0);
 		/* RI/XI are in awkward places, so mask them off separately */
 		pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
 		if (xpa)
@@ -128,9 +155,9 @@
 			       (entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
 		printk("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
 		       pwidth, pa, c1,
-		       (entrylo1 & MIPS_ENTRYLO_D) ? 1 : 0,
-		       (entrylo1 & MIPS_ENTRYLO_V) ? 1 : 0,
-		       (entrylo1 & MIPS_ENTRYLO_G) ? 1 : 0);
+		       (entrylo1 & ENTRYLO_D) ? 1 : 0,
+		       (entrylo1 & ENTRYLO_V) ? 1 : 0,
+		       (entrylo1 & ENTRYLO_G) ? 1 : 0);
 	}
 	printk("\n");
 
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 8e0d3cf..cfcbb52 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -14,6 +14,17 @@
 #include <asm/pgtable.h>
 #include <asm/tlbdebug.h>
 
+extern int r3k_have_wired_reg;
+
+void dump_tlb_regs(void)
+{
+	pr_info("Index    : %0x\n", read_c0_index());
+	pr_info("EntryHi  : %0lx\n", read_c0_entryhi());
+	pr_info("EntryLo  : %0lx\n", read_c0_entrylo0());
+	if (r3k_have_wired_reg)
+		pr_info("Wired    : %0x\n", read_c0_wired());
+}
+
 static void dump_tlb(int first, int last)
 {
 	int	i;
diff --git a/arch/mips/loongson32/common/time.c b/arch/mips/loongson32/common/time.c
index df0f850..0996b02 100644
--- a/arch/mips/loongson32/common/time.c
+++ b/arch/mips/loongson32/common/time.c
@@ -126,26 +126,34 @@
 	return IRQ_HANDLED;
 }
 
-static void ls1x_clockevent_set_mode(enum clock_event_mode mode,
-				     struct clock_event_device *cd)
+static int ls1x_clockevent_set_state_periodic(struct clock_event_device *cd)
 {
 	raw_spin_lock(&ls1x_timer_lock);
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
-		ls1x_pwmtimer_restart();
-	case CLOCK_EVT_MODE_RESUME:
-		__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		__raw_writel(__raw_readl(timer_base + PWM_CTRL) & ~CNT_EN,
-			     timer_base + PWM_CTRL);
-		break;
-	default:
-		break;
-	}
+	ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
+	ls1x_pwmtimer_restart();
+	__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
 	raw_spin_unlock(&ls1x_timer_lock);
+
+	return 0;
+}
+
+static int ls1x_clockevent_tick_resume(struct clock_event_device *cd)
+{
+	raw_spin_lock(&ls1x_timer_lock);
+	__raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
+	raw_spin_unlock(&ls1x_timer_lock);
+
+	return 0;
+}
+
+static int ls1x_clockevent_set_state_shutdown(struct clock_event_device *cd)
+{
+	raw_spin_lock(&ls1x_timer_lock);
+	__raw_writel(__raw_readl(timer_base + PWM_CTRL) & ~CNT_EN,
+		     timer_base + PWM_CTRL);
+	raw_spin_unlock(&ls1x_timer_lock);
+
+	return 0;
 }
 
 static int ls1x_clockevent_set_next(unsigned long evt,
@@ -160,12 +168,15 @@
 }
 
 static struct clock_event_device ls1x_clockevent = {
-	.name		= "ls1x-pwmtimer",
-	.features	= CLOCK_EVT_FEAT_PERIODIC,
-	.rating		= 300,
-	.irq		= LS1X_TIMER_IRQ,
-	.set_next_event	= ls1x_clockevent_set_next,
-	.set_mode	= ls1x_clockevent_set_mode,
+	.name			= "ls1x-pwmtimer",
+	.features		= CLOCK_EVT_FEAT_PERIODIC,
+	.rating			= 300,
+	.irq			= LS1X_TIMER_IRQ,
+	.set_next_event		= ls1x_clockevent_set_next,
+	.set_state_shutdown	= ls1x_clockevent_set_state_shutdown,
+	.set_state_periodic	= ls1x_clockevent_set_state_periodic,
+	.set_state_oneshot	= ls1x_clockevent_set_state_shutdown,
+	.tick_resume		= ls1x_clockevent_tick_resume,
 };
 
 static struct irqaction ls1x_pwmtimer_irqaction = {
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
index 8750370..da77d41 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
+++ b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
@@ -51,40 +51,36 @@
 }
 EXPORT_SYMBOL(enable_mfgpt0_counter);
 
-static void init_mfgpt_timer(enum clock_event_mode mode,
-			     struct clock_event_device *evt)
+static int mfgpt_timer_set_periodic(struct clock_event_device *evt)
 {
 	raw_spin_lock(&mfgpt_lock);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		outw(COMPARE, MFGPT0_CMP2);	/* set comparator2 */
-		outw(0, MFGPT0_CNT);	/* set counter to 0 */
-		enable_mfgpt0_counter();
-		break;
+	outw(COMPARE, MFGPT0_CMP2);	/* set comparator2 */
+	outw(0, MFGPT0_CNT);		/* set counter to 0 */
+	enable_mfgpt0_counter();
 
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
-		    evt->mode == CLOCK_EVT_MODE_ONESHOT)
-			disable_mfgpt0_counter();
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* The oneshot mode have very high deviation, Not use it! */
-		break;
-
-	case CLOCK_EVT_MODE_RESUME:
-		/* Nothing to do here */
-		break;
-	}
 	raw_spin_unlock(&mfgpt_lock);
+	return 0;
+}
+
+static int mfgpt_timer_shutdown(struct clock_event_device *evt)
+{
+	if (clockevent_state_periodic(evt) || clockevent_state_oneshot(evt)) {
+		raw_spin_lock(&mfgpt_lock);
+		disable_mfgpt0_counter();
+		raw_spin_unlock(&mfgpt_lock);
+	}
+
+	return 0;
 }
 
 static struct clock_event_device mfgpt_clockevent = {
 	.name = "mfgpt",
 	.features = CLOCK_EVT_FEAT_PERIODIC,
-	.set_mode = init_mfgpt_timer,
+
+	/* The oneshot mode have very high deviation, don't use it! */
+	.set_state_shutdown = mfgpt_timer_shutdown,
+	.set_state_periodic = mfgpt_timer_set_periodic,
 	.irq = CS5536_MFGPT_INTR,
 };
 
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
index 5c21cd3..bf9f1a7 100644
--- a/arch/mips/loongson64/loongson-3/hpet.c
+++ b/arch/mips/loongson64/loongson-3/hpet.c
@@ -78,55 +78,77 @@
 	/* Do nothing on Loongson-3 */
 }
 
-static void hpet_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int hpet_set_state_periodic(struct clock_event_device *evt)
 {
-	int cfg = 0;
+	int cfg;
 
 	spin_lock(&hpet_lock);
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		pr_info("set clock event to periodic mode!\n");
-		/* stop counter */
-		hpet_stop_counter();
 
-		/* enables the timer0 to generate a periodic interrupt */
-		cfg = hpet_read(HPET_T0_CFG);
-		cfg &= ~HPET_TN_LEVEL;
-		cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
-				HPET_TN_SETVAL | HPET_TN_32BIT;
-		hpet_write(HPET_T0_CFG, cfg);
+	pr_info("set clock event to periodic mode!\n");
+	/* stop counter */
+	hpet_stop_counter();
 
-		/* set the comparator */
-		hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
-		udelay(1);
-		hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
+	/* enables the timer0 to generate a periodic interrupt */
+	cfg = hpet_read(HPET_T0_CFG);
+	cfg &= ~HPET_TN_LEVEL;
+	cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
+		HPET_TN_32BIT;
+	hpet_write(HPET_T0_CFG, cfg);
 
-		/* start counter */
-		hpet_start_counter();
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		cfg = hpet_read(HPET_T0_CFG);
-		cfg &= ~HPET_TN_ENABLE;
-		hpet_write(HPET_T0_CFG, cfg);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		pr_info("set clock event to one shot mode!\n");
-		cfg = hpet_read(HPET_T0_CFG);
-		/* set timer0 type
-		 * 1 : periodic interrupt
-		 * 0 : non-periodic(oneshot) interrupt
-		 */
-		cfg &= ~HPET_TN_PERIODIC;
-		cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
-		hpet_write(HPET_T0_CFG, cfg);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		hpet_enable_legacy_int();
-		break;
-	}
+	/* set the comparator */
+	hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
+	udelay(1);
+	hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
+
+	/* start counter */
+	hpet_start_counter();
+
 	spin_unlock(&hpet_lock);
+	return 0;
+}
+
+static int hpet_set_state_shutdown(struct clock_event_device *evt)
+{
+	int cfg;
+
+	spin_lock(&hpet_lock);
+
+	cfg = hpet_read(HPET_T0_CFG);
+	cfg &= ~HPET_TN_ENABLE;
+	hpet_write(HPET_T0_CFG, cfg);
+
+	spin_unlock(&hpet_lock);
+	return 0;
+}
+
+static int hpet_set_state_oneshot(struct clock_event_device *evt)
+{
+	int cfg;
+
+	spin_lock(&hpet_lock);
+
+	pr_info("set clock event to one shot mode!\n");
+	cfg = hpet_read(HPET_T0_CFG);
+	/*
+	 * set timer0 type
+	 * 1 : periodic interrupt
+	 * 0 : non-periodic(oneshot) interrupt
+	 */
+	cfg &= ~HPET_TN_PERIODIC;
+	cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
+	hpet_write(HPET_T0_CFG, cfg);
+
+	spin_unlock(&hpet_lock);
+	return 0;
+}
+
+static int hpet_tick_resume(struct clock_event_device *evt)
+{
+	spin_lock(&hpet_lock);
+	hpet_enable_legacy_int();
+	spin_unlock(&hpet_lock);
+
+	return 0;
 }
 
 static int hpet_next_event(unsigned long delta,
@@ -206,7 +228,10 @@
 	cd->name = "hpet";
 	cd->rating = 320;
 	cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
-	cd->set_mode = hpet_set_mode;
+	cd->set_state_shutdown = hpet_set_state_shutdown;
+	cd->set_state_periodic = hpet_set_state_periodic;
+	cd->set_state_oneshot = hpet_set_state_oneshot;
+	cd->tick_resume = hpet_tick_resume;
 	cd->set_next_event = hpet_next_event;
 	cd->irq = HPET_T0_IRQ;
 	cd->cpumask = cpumask_of(cpu);
diff --git a/arch/mips/math-emu/Makefile b/arch/mips/math-emu/Makefile
index 2e5f962..a19641d 100644
--- a/arch/mips/math-emu/Makefile
+++ b/arch/mips/math-emu/Makefile
@@ -4,9 +4,9 @@
 
 obj-y	+= cp1emu.o ieee754dp.o ieee754sp.o ieee754.o \
 	   dp_div.o dp_mul.o dp_sub.o dp_add.o dp_fsp.o dp_cmp.o dp_simple.o \
-	   dp_tint.o dp_fint.o \
+	   dp_tint.o dp_fint.o dp_maddf.o dp_msubf.o dp_2008class.o dp_fmin.o dp_fmax.o \
 	   sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_simple.o \
-	   sp_tint.o sp_fint.o \
+	   sp_tint.o sp_fint.o sp_maddf.o sp_msubf.o sp_2008class.o sp_fmin.o sp_fmax.o \
 	   dsemul.o
 
 lib-y	+= ieee754d.o \
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 712f17a..32f0e19 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1137,7 +1137,7 @@
 			break;
 
 		case mfhc_op:
-			if (!cpu_has_mips_r2)
+			if (!cpu_has_mips_r2_r6)
 				goto sigill;
 
 			/* copregister rd -> gpr[rt] */
@@ -1148,7 +1148,7 @@
 			break;
 
 		case mthc_op:
-			if (!cpu_has_mips_r2)
+			if (!cpu_has_mips_r2_r6)
 				goto sigill;
 
 			/* copregister rd <- gpr[rt] */
@@ -1181,6 +1181,24 @@
 			}
 			break;
 
+		case bc1eqz_op:
+		case bc1nez_op:
+			if (!cpu_has_mips_r6 || delay_slot(xcp))
+				return SIGILL;
+
+			cond = likely = 0;
+			switch (MIPSInst_RS(ir)) {
+			case bc1eqz_op:
+				if (get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)
+				    cond = 1;
+				break;
+			case bc1nez_op:
+				if (!(get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1))
+				    cond = 1;
+				break;
+			}
+			goto branch_common;
+
 		case bc_op:
 			if (delay_slot(xcp))
 				return SIGILL;
@@ -1207,7 +1225,7 @@
 			case bct_op:
 				break;
 			}
-
+branch_common:
 			set_delay_slot(xcp);
 			if (cond) {
 				/*
@@ -1376,6 +1394,14 @@
 	IEEE754_CLT | IEEE754_CEQ | IEEE754_CUN,	/* cmp_ule (sig) cmp_ngt */
 };
 
+static const unsigned char negative_cmptab[8] = {
+	0, /* Reserved */
+	IEEE754_CLT | IEEE754_CGT | IEEE754_CEQ,
+	IEEE754_CLT | IEEE754_CGT | IEEE754_CUN,
+	IEEE754_CLT | IEEE754_CGT,
+	/* Reserved */
+};
+
 
 /*
  * Additional MIPS4 instructions
@@ -1717,6 +1743,126 @@
 			SPFROMREG(rv.s, MIPSInst_FS(ir));
 			break;
 
+		case fseleqz_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(rv.s, MIPSInst_FT(ir));
+			if (rv.w & 0x1)
+				rv.w = 0;
+			else
+				SPFROMREG(rv.s, MIPSInst_FS(ir));
+			break;
+
+		case fselnez_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(rv.s, MIPSInst_FT(ir));
+			if (rv.w & 0x1)
+				SPFROMREG(rv.s, MIPSInst_FS(ir));
+			else
+				rv.w = 0;
+			break;
+
+		case fmaddf_op: {
+			union ieee754sp ft, fs, fd;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			SPFROMREG(fd, MIPSInst_FD(ir));
+			rv.s = ieee754sp_maddf(fd, fs, ft);
+			break;
+		}
+
+		case fmsubf_op: {
+			union ieee754sp ft, fs, fd;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			SPFROMREG(fd, MIPSInst_FD(ir));
+			rv.s = ieee754sp_msubf(fd, fs, ft);
+			break;
+		}
+
+		case frint_op: {
+			union ieee754sp fs;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.l = ieee754sp_tlong(fs);
+			rv.s = ieee754sp_flong(rv.l);
+			goto copcsr;
+		}
+
+		case fclass_op: {
+			union ieee754sp fs;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.w = ieee754sp_2008class(fs);
+			rfmt = w_fmt;
+			break;
+		}
+
+		case fmin_op: {
+			union ieee754sp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.s = ieee754sp_fmin(fs, ft);
+			break;
+		}
+
+		case fmina_op: {
+			union ieee754sp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.s = ieee754sp_fmina(fs, ft);
+			break;
+		}
+
+		case fmax_op: {
+			union ieee754sp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.s = ieee754sp_fmax(fs, ft);
+			break;
+		}
+
+		case fmaxa_op: {
+			union ieee754sp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			SPFROMREG(ft, MIPSInst_FT(ir));
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			rv.s = ieee754sp_fmaxa(fs, ft);
+			break;
+		}
+
 		case fabs_op:
 			handler.u = ieee754sp_abs;
 			goto scopuop;
@@ -1820,7 +1966,7 @@
 			goto copcsr;
 
 		default:
-			if (MIPSInst_FUNC(ir) >= fcmp_op) {
+			if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
 				unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
 				union ieee754sp fs, ft;
 
@@ -1914,6 +2060,127 @@
 				return 0;
 			DPFROMREG(rv.d, MIPSInst_FS(ir));
 			break;
+
+		case fseleqz_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(rv.d, MIPSInst_FT(ir));
+			if (rv.l & 0x1)
+				rv.l = 0;
+			else
+				DPFROMREG(rv.d, MIPSInst_FS(ir));
+			break;
+
+		case fselnez_op:
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(rv.d, MIPSInst_FT(ir));
+			if (rv.l & 0x1)
+				DPFROMREG(rv.d, MIPSInst_FS(ir));
+			else
+				rv.l = 0;
+			break;
+
+		case fmaddf_op: {
+			union ieee754dp ft, fs, fd;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			DPFROMREG(fd, MIPSInst_FD(ir));
+			rv.d = ieee754dp_maddf(fd, fs, ft);
+			break;
+		}
+
+		case fmsubf_op: {
+			union ieee754dp ft, fs, fd;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			DPFROMREG(fd, MIPSInst_FD(ir));
+			rv.d = ieee754dp_msubf(fd, fs, ft);
+			break;
+		}
+
+		case frint_op: {
+			union ieee754dp fs;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.l = ieee754dp_tlong(fs);
+			rv.d = ieee754dp_flong(rv.l);
+			goto copcsr;
+		}
+
+		case fclass_op: {
+			union ieee754dp fs;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.w = ieee754dp_2008class(fs);
+			rfmt = w_fmt;
+			break;
+		}
+
+		case fmin_op: {
+			union ieee754dp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.d = ieee754dp_fmin(fs, ft);
+			break;
+		}
+
+		case fmina_op: {
+			union ieee754dp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.d = ieee754dp_fmina(fs, ft);
+			break;
+		}
+
+		case fmax_op: {
+			union ieee754dp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.d = ieee754dp_fmax(fs, ft);
+			break;
+		}
+
+		case fmaxa_op: {
+			union ieee754dp fs, ft;
+
+			if (!cpu_has_mips_r6)
+				return SIGILL;
+
+			DPFROMREG(ft, MIPSInst_FT(ir));
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			rv.d = ieee754dp_fmaxa(fs, ft);
+			break;
+		}
+
 		case fabs_op:
 			handler.u = ieee754dp_abs;
 			goto dcopuop;
@@ -1997,7 +2264,7 @@
 			goto copcsr;
 
 		default:
-			if (MIPSInst_FUNC(ir) >= fcmp_op) {
+			if (!NO_R6EMU && MIPSInst_FUNC(ir) >= fcmp_op) {
 				unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
 				union ieee754dp fs, ft;
 
@@ -2021,8 +2288,11 @@
 			break;
 		}
 		break;
+	}
 
-	case w_fmt:
+	case w_fmt: {
+		union ieee754dp fs;
+
 		switch (MIPSInst_FUNC(ir)) {
 		case fcvts_op:
 			/* convert word to single precision real */
@@ -2036,10 +2306,65 @@
 			rv.d = ieee754dp_fint(fs.bits);
 			rfmt = d_fmt;
 			goto copcsr;
-		default:
-			return SIGILL;
+		default: {
+			/* Emulating the new CMP.condn.fmt R6 instruction */
+#define CMPOP_MASK	0x7
+#define SIGN_BIT	(0x1 << 3)
+#define PREDICATE_BIT	(0x1 << 4)
+
+			int cmpop = MIPSInst_FUNC(ir) & CMPOP_MASK;
+			int sig = MIPSInst_FUNC(ir) & SIGN_BIT;
+			union ieee754sp fs, ft;
+
+			/* This is an R6 only instruction */
+			if (!cpu_has_mips_r6 ||
+			    (MIPSInst_FUNC(ir) & 0x20))
+				return SIGILL;
+
+			/* fmt is w_fmt for single precision so fix it */
+			rfmt = s_fmt;
+			/* default to false */
+			rv.w = 0;
+
+			/* CMP.condn.S */
+			SPFROMREG(fs, MIPSInst_FS(ir));
+			SPFROMREG(ft, MIPSInst_FT(ir));
+
+			/* positive predicates */
+			if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
+				if (ieee754sp_cmp(fs, ft, cmptab[cmpop],
+						  sig))
+				    rv.w = -1; /* true, all 1s */
+				if ((sig) &&
+				    ieee754_cxtest(IEEE754_INVALID_OPERATION))
+					rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+				else
+					goto copcsr;
+			} else {
+				/* negative predicates */
+				switch (cmpop) {
+				case 1:
+				case 2:
+				case 3:
+					if (ieee754sp_cmp(fs, ft,
+							  negative_cmptab[cmpop],
+							  sig))
+						rv.w = -1; /* true, all 1s */
+					if (sig &&
+					    ieee754_cxtest(IEEE754_INVALID_OPERATION))
+						rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+					else
+						goto copcsr;
+					break;
+				default:
+					/* Reserved R6 ops */
+					pr_err("Reserved MIPS R6 CMP.condn.S operation\n");
+					return SIGILL;
+				}
+			}
+			break;
+			}
 		}
-		break;
 	}
 
 	case l_fmt:
@@ -2060,11 +2385,60 @@
 			rv.d = ieee754dp_flong(bits);
 			rfmt = d_fmt;
 			goto copcsr;
-		default:
-			return SIGILL;
-		}
-		break;
+		default: {
+			/* Emulating the new CMP.condn.fmt R6 instruction */
+			int cmpop = MIPSInst_FUNC(ir) & CMPOP_MASK;
+			int sig = MIPSInst_FUNC(ir) & SIGN_BIT;
+			union ieee754dp fs, ft;
 
+			if (!cpu_has_mips_r6 ||
+			    (MIPSInst_FUNC(ir) & 0x20))
+				return SIGILL;
+
+			/* fmt is l_fmt for double precision so fix it */
+			rfmt = d_fmt;
+			/* default to false */
+			rv.l = 0;
+
+			/* CMP.condn.D */
+			DPFROMREG(fs, MIPSInst_FS(ir));
+			DPFROMREG(ft, MIPSInst_FT(ir));
+
+			/* positive predicates */
+			if (!(MIPSInst_FUNC(ir) & PREDICATE_BIT)) {
+				if (ieee754dp_cmp(fs, ft,
+						  cmptab[cmpop], sig))
+				    rv.l = -1LL; /* true, all 1s */
+				if (sig &&
+				    ieee754_cxtest(IEEE754_INVALID_OPERATION))
+					rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+				else
+					goto copcsr;
+			} else {
+				/* negative predicates */
+				switch (cmpop) {
+				case 1:
+				case 2:
+				case 3:
+					if (ieee754dp_cmp(fs, ft,
+							  negative_cmptab[cmpop],
+							  sig))
+						rv.l = -1LL; /* true, all 1s */
+					if (sig &&
+					    ieee754_cxtest(IEEE754_INVALID_OPERATION))
+						rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+					else
+						goto copcsr;
+					break;
+				default:
+					/* Reserved R6 ops */
+					pr_err("Reserved MIPS R6 CMP.condn.D operation\n");
+					return SIGILL;
+				}
+			}
+			break;
+			}
+		}
 	default:
 		return SIGILL;
 	}
diff --git a/arch/mips/math-emu/dp_2008class.c b/arch/mips/math-emu/dp_2008class.c
new file mode 100644
index 0000000..9dc39fc
--- /dev/null
+++ b/arch/mips/math-emu/dp_2008class.c
@@ -0,0 +1,55 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: CLASS.f
+ * FPR[fd] = class(FPR[fs])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+int ieee754dp_2008class(union ieee754dp x)
+{
+	COMPXDP;
+
+	EXPLODEXDP;
+
+	/*
+	 * 10 bit mask as follows:
+	 *
+	 * bit0 = SNAN
+	 * bit1 = QNAN
+	 * bit2 = -INF
+	 * bit3 = -NORM
+	 * bit4 = -DNORM
+	 * bit5 = -ZERO
+	 * bit6 = INF
+	 * bit7 = NORM
+	 * bit8 = DNORM
+	 * bit9 = ZERO
+	 */
+
+	switch(xc) {
+	case IEEE754_CLASS_SNAN:
+		return 0x01;
+	case IEEE754_CLASS_QNAN:
+		return 0x02;
+	case IEEE754_CLASS_INF:
+		return 0x04 << (xs ? 0 : 4);
+	case IEEE754_CLASS_NORM:
+		return 0x08 << (xs ? 0 : 4);
+	case IEEE754_CLASS_DNORM:
+		return 0x10 << (xs ? 0 : 4);
+	case IEEE754_CLASS_ZERO:
+		return 0x20 << (xs ? 0 : 4);
+	default:
+		pr_err("Unknown class: %d\n", xc);
+		return 0;
+	}
+}
diff --git a/arch/mips/math-emu/dp_fmax.c b/arch/mips/math-emu/dp_fmax.c
new file mode 100644
index 0000000..fd71b8d
--- /dev/null
+++ b/arch/mips/math-emu/dp_fmax.c
@@ -0,0 +1,213 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MIN{,A}.f
+ * MIN : Scalar Floating-Point Minimum
+ * MINA: Scalar Floating-Point argument with Minimum Absolute Value
+ *
+ * MIN.D : FPR[fd] = minNum(FPR[fs],FPR[ft])
+ * MINA.D: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
+{
+	COMPXDP;
+	COMPYDP;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+
+	FLUSHXDP;
+	FLUSHYDP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return xs ? y : x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return ys ? x : y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754dp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		DPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	/* Compare signs */
+	if (xs > ys)
+		return y;
+	else if (xs < ys)
+		return x;
+
+	/* Compare exponent */
+	if (xe > ye)
+		return x;
+	else if (xe < ye)
+		return y;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return y;
+	return x;
+}
+
+union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+{
+	COMPXDP;
+	COMPYDP;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+
+	FLUSHXDP;
+	FLUSHYDP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754dp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		DPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	/* Compare exponent */
+	if (xe > ye)
+		return x;
+	else if (xe < ye)
+		return y;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return y;
+	return x;
+}
diff --git a/arch/mips/math-emu/dp_fmin.c b/arch/mips/math-emu/dp_fmin.c
new file mode 100644
index 0000000..c1072b0
--- /dev/null
+++ b/arch/mips/math-emu/dp_fmin.c
@@ -0,0 +1,213 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MIN{,A}.f
+ * MIN : Scalar Floating-Point Minimum
+ * MINA: Scalar Floating-Point argument with Minimum Absolute Value
+ *
+ * MIN.D : FPR[fd] = minNum(FPR[fs],FPR[ft])
+ * MINA.D: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
+{
+	COMPXDP;
+	COMPYDP;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+
+	FLUSHXDP;
+	FLUSHYDP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return xs ? x : y;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return ys ? y : x;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754dp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		DPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	/* Compare signs */
+	if (xs > ys)
+		return x;
+	else if (xs < ys)
+		return y;
+
+	/* Compare exponent */
+	if (xe > ye)
+		return y;
+	else if (xe < ye)
+		return x;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return x;
+	return y;
+}
+
+union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
+{
+	COMPXDP;
+	COMPYDP;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+
+	FLUSHXDP;
+	FLUSHYDP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754dp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		DPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	/* Compare exponent */
+	if (xe > ye)
+		return y;
+	else if (xe < ye)
+		return x;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return x;
+	return y;
+}
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
new file mode 100644
index 0000000..119eda9
--- /dev/null
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -0,0 +1,265 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MADDF.f (Fused Multiply Add)
+ * MADDF.fmt: FPR[fd] = FPR[fd] + (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y)
+{
+	int re;
+	int rs;
+	u64 rm;
+	unsigned lxm;
+	unsigned hxm;
+	unsigned lym;
+	unsigned hym;
+	u64 lrm;
+	u64 hrm;
+	u64 t;
+	u64 at;
+	int s;
+
+	COMPXDP;
+	COMPYDP;
+
+	u64 zm; int ze; int zs __maybe_unused; int zc;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+	EXPLODEDP(z, zc, zs, ze, zm)
+
+	FLUSHXDP;
+	FLUSHYDP;
+	FLUSHDP(z, zc, zs, ze, zm);
+
+	ieee754_clearcx();
+
+	switch (zc) {
+	case IEEE754_CLASS_SNAN:
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754dp_nanxcpt(z);
+	case IEEE754_CLASS_DNORM:
+		DPDNORMx(zm, ze);
+	/* QNAN is handled separately below */
+	}
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return x;
+
+
+	/*
+	 * Infinity handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754dp_indef();
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		return ieee754dp_inf(xs ^ ys);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		/* Multiplication is 0 so just return z */
+		return z;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		DPDNORMX;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		/* fall through to real computations */
+	}
+
+	/* Finally get to do some computation */
+
+	/*
+	 * Do the multiplication bit first
+	 *
+	 * rm = xm * ym, re = xe + ye basically
+	 *
+	 * At this point xm and ym should have been normalized.
+	 */
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	re = xe + ye;
+	rs = xs ^ ys;
+
+	/* shunt to top of word */
+	xm <<= 64 - (DP_FBITS + 1);
+	ym <<= 64 - (DP_FBITS + 1);
+
+	/*
+	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 */
+
+	/* 32 * 32 => 64 */
+#define DPXMULT(x, y)	((u64)(x) * (u64)y)
+
+	lxm = xm;
+	hxm = xm >> 32;
+	lym = ym;
+	hym = ym >> 32;
+
+	lrm = DPXMULT(lxm, lym);
+	hrm = DPXMULT(hxm, hym);
+
+	t = DPXMULT(lxm, hym);
+
+	at = lrm + (t << 32);
+	hrm += at < lrm;
+	lrm = at;
+
+	hrm = hrm + (t >> 32);
+
+	t = DPXMULT(hxm, lym);
+
+	at = lrm + (t << 32);
+	hrm += at < lrm;
+	lrm = at;
+
+	hrm = hrm + (t >> 32);
+
+	rm = hrm | (lrm != 0);
+
+	/*
+	 * Sticky shift down to normal rounding precision.
+	 */
+	if ((s64) rm < 0) {
+		rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
+		     ((rm << (DP_FBITS + 1 + 3)) != 0);
+			re++;
+	} else {
+		rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
+		     ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
+	}
+	assert(rm & (DP_HIDDEN_BIT << 3));
+
+	/* And now the addition */
+	assert(zm & DP_HIDDEN_BIT);
+
+	/*
+	 * Provide guard,round and stick bit space.
+	 */
+	zm <<= 3;
+
+	if (ze > re) {
+		/*
+		 * Have to shift y fraction right to align.
+		 */
+		s = ze - re;
+		rm = XDPSRS(rm, s);
+		re += s;
+	} else if (re > ze) {
+		/*
+		 * Have to shift x fraction right to align.
+		 */
+		s = re - ze;
+		zm = XDPSRS(zm, s);
+		ze += s;
+	}
+	assert(ze == re);
+	assert(ze <= DP_EMAX);
+
+	if (zs == rs) {
+		/*
+		 * Generate 28 bit result of adding two 27 bit numbers
+		 * leaving result in xm, xs and xe.
+		 */
+		zm = zm + rm;
+
+		if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */
+			zm = XDPSRS1(zm);
+			ze++;
+		}
+	} else {
+		if (zm >= rm) {
+			zm = zm - rm;
+		} else {
+			zm = rm - zm;
+			zs = rs;
+		}
+		if (zm == 0)
+			return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+		/*
+		 * Normalize to rounding precision.
+		 */
+		while ((zm >> (DP_FBITS + 3)) == 0) {
+			zm <<= 1;
+			ze--;
+		}
+	}
+
+	return ieee754dp_format(zs, ze, zm);
+}
diff --git a/arch/mips/math-emu/dp_msubf.c b/arch/mips/math-emu/dp_msubf.c
new file mode 100644
index 0000000..1224126
--- /dev/null
+++ b/arch/mips/math-emu/dp_msubf.c
@@ -0,0 +1,269 @@
+/*
+ * IEEE754 floating point arithmetic
+ * double precision: MSUB.f (Fused Multiply Subtract)
+ * MSUBF.fmt: FPR[fd] = FPR[fd] - (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754dp.h"
+
+union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y)
+{
+	int re;
+	int rs;
+	u64 rm;
+	unsigned lxm;
+	unsigned hxm;
+	unsigned lym;
+	unsigned hym;
+	u64 lrm;
+	u64 hrm;
+	u64 t;
+	u64 at;
+	int s;
+
+	COMPXDP;
+	COMPYDP;
+
+	u64 zm; int ze; int zs __maybe_unused; int zc;
+
+	EXPLODEXDP;
+	EXPLODEYDP;
+	EXPLODEDP(z, zc, zs, ze, zm)
+
+	FLUSHXDP;
+	FLUSHYDP;
+	FLUSHDP(z, zc, zs, ze, zm);
+
+	ieee754_clearcx();
+
+	switch (zc) {
+	case IEEE754_CLASS_SNAN:
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754dp_nanxcpt(z);
+	case IEEE754_CLASS_DNORM:
+		DPDNORMx(zm, ze);
+	/* QNAN is handled separately below */
+	}
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754dp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754dp_nanxcpt(x);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return x;
+
+
+	/*
+	 * Infinity handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754dp_indef();
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		return ieee754dp_inf(xs ^ ys);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		/* Multiplication is 0 so just return z */
+		return z;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		DPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		DPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		DPDNORMX;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754dp_inf(zs);
+		/* fall through to real computations */
+	}
+
+	/* Finally get to do some computation */
+
+	/*
+	 * Do the multiplication bit first
+	 *
+	 * rm = xm * ym, re = xe + ye basically
+	 *
+	 * At this point xm and ym should have been normalized.
+	 */
+	assert(xm & DP_HIDDEN_BIT);
+	assert(ym & DP_HIDDEN_BIT);
+
+	re = xe + ye;
+	rs = xs ^ ys;
+
+	/* shunt to top of word */
+	xm <<= 64 - (DP_FBITS + 1);
+	ym <<= 64 - (DP_FBITS + 1);
+
+	/*
+	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 */
+
+	/* 32 * 32 => 64 */
+#define DPXMULT(x, y)	((u64)(x) * (u64)y)
+
+	lxm = xm;
+	hxm = xm >> 32;
+	lym = ym;
+	hym = ym >> 32;
+
+	lrm = DPXMULT(lxm, lym);
+	hrm = DPXMULT(hxm, hym);
+
+	t = DPXMULT(lxm, hym);
+
+	at = lrm + (t << 32);
+	hrm += at < lrm;
+	lrm = at;
+
+	hrm = hrm + (t >> 32);
+
+	t = DPXMULT(hxm, lym);
+
+	at = lrm + (t << 32);
+	hrm += at < lrm;
+	lrm = at;
+
+	hrm = hrm + (t >> 32);
+
+	rm = hrm | (lrm != 0);
+
+	/*
+	 * Sticky shift down to normal rounding precision.
+	 */
+	if ((s64) rm < 0) {
+		rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
+		     ((rm << (DP_FBITS + 1 + 3)) != 0);
+			re++;
+	} else {
+		rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
+		     ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
+	}
+	assert(rm & (DP_HIDDEN_BIT << 3));
+
+	/* And now the subtraction */
+
+	/* flip sign of r and handle as add */
+	rs ^= 1;
+
+	assert(zm & DP_HIDDEN_BIT);
+
+	/*
+	 * Provide guard,round and stick bit space.
+	 */
+	zm <<= 3;
+
+	if (ze > re) {
+		/*
+		 * Have to shift y fraction right to align.
+		 */
+		s = ze - re;
+		rm = XDPSRS(rm, s);
+		re += s;
+	} else if (re > ze) {
+		/*
+		 * Have to shift x fraction right to align.
+		 */
+		s = re - ze;
+		zm = XDPSRS(zm, s);
+		ze += s;
+	}
+	assert(ze == re);
+	assert(ze <= DP_EMAX);
+
+	if (zs == rs) {
+		/*
+		 * Generate 28 bit result of adding two 27 bit numbers
+		 * leaving result in xm, xs and xe.
+		 */
+		zm = zm + rm;
+
+		if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */
+			zm = XDPSRS1(zm);
+			ze++;
+		}
+	} else {
+		if (zm >= rm) {
+			zm = zm - rm;
+		} else {
+			zm = rm - zm;
+			zs = rs;
+		}
+		if (zm == 0)
+			return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+		/*
+		 * Normalize to rounding precision.
+		 */
+		while ((zm >> (DP_FBITS + 3)) == 0) {
+			zm <<= 1;
+			ze--;
+		}
+	}
+
+	return ieee754dp_format(zs, ze, zm);
+}
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index e0b5cc2..cbb36c1 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -33,7 +33,6 @@
 
 int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
 {
-	extern asmlinkage void handle_dsemulret(void);
 	struct emuframe __user *fr;
 	int err;
 
diff --git a/arch/mips/math-emu/ieee754.h b/arch/mips/math-emu/ieee754.h
index a5ca108..df94720 100644
--- a/arch/mips/math-emu/ieee754.h
+++ b/arch/mips/math-emu/ieee754.h
@@ -75,6 +75,16 @@
 
 union ieee754sp ieee754sp_sqrt(union ieee754sp x);
 
+union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y);
+union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y);
+int ieee754sp_2008class(union ieee754sp x);
+union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y);
+union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y);
+
 /*
  * double precision (often aka double)
 */
@@ -99,6 +109,15 @@
 
 union ieee754dp ieee754dp_sqrt(union ieee754dp x);
 
+union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y);
+union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
+				union ieee754dp y);
+int ieee754dp_2008class(union ieee754dp x);
+union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y);
+union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y);
 
 
 /* 5 types of floating point number
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 05389d5..6383e2c 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -65,8 +65,8 @@
 			vc = IEEE754_CLASS_INF;				\
 		else if (vm & SP_MBIT(SP_FBITS-1))			\
 			vc = IEEE754_CLASS_SNAN;			\
-	else								\
-		vc = IEEE754_CLASS_QNAN;				\
+		else							\
+			vc = IEEE754_CLASS_QNAN;			\
 	} else if (ve == SP_EMIN-1+SP_EBIAS) {				\
 		if (vm) {						\
 			ve = SP_EMIN;					\
@@ -105,8 +105,8 @@
 		if (vm) {						\
 			ve = DP_EMIN;					\
 			vc = IEEE754_CLASS_DNORM;			\
-	} else								\
-		vc = IEEE754_CLASS_ZERO;				\
+		} else							\
+			vc = IEEE754_CLASS_ZERO;			\
 	} else {							\
 		ve -= DP_EBIAS;						\
 		vm |= DP_HIDDEN_BIT;					\
diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c
index f308e0f..506a67a 100644
--- a/arch/mips/math-emu/me-debugfs.c
+++ b/arch/mips/math-emu/me-debugfs.c
@@ -65,4 +65,4 @@
 
 	return 0;
 }
-__initcall(debugfs_fpuemu);
+arch_initcall(debugfs_fpuemu);
diff --git a/arch/mips/math-emu/sp_2008class.c b/arch/mips/math-emu/sp_2008class.c
new file mode 100644
index 0000000..ff62606
--- /dev/null
+++ b/arch/mips/math-emu/sp_2008class.c
@@ -0,0 +1,55 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: CLASS.f
+ * FPR[fd] = class(FPR[fs])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+int ieee754sp_2008class(union ieee754sp x)
+{
+	COMPXSP;
+
+	EXPLODEXSP;
+
+	/*
+	 * 10 bit mask as follows:
+	 *
+	 * bit0 = SNAN
+	 * bit1 = QNAN
+	 * bit2 = -INF
+	 * bit3 = -NORM
+	 * bit4 = -DNORM
+	 * bit5 = -ZERO
+	 * bit6 = INF
+	 * bit7 = NORM
+	 * bit8 = DNORM
+	 * bit9 = ZERO
+	 */
+
+	switch(xc) {
+	case IEEE754_CLASS_SNAN:
+		return 0x01;
+	case IEEE754_CLASS_QNAN:
+		return 0x02;
+	case IEEE754_CLASS_INF:
+		return 0x04 << (xs ? 0 : 4);
+	case IEEE754_CLASS_NORM:
+		return 0x08 << (xs ? 0 : 4);
+	case IEEE754_CLASS_DNORM:
+		return 0x10 << (xs ? 0 : 4);
+	case IEEE754_CLASS_ZERO:
+		return 0x20 << (xs ? 0 : 4);
+	default:
+		pr_err("Unknown class: %d\n", xc);
+		return 0;
+	}
+}
diff --git a/arch/mips/math-emu/sp_fmax.c b/arch/mips/math-emu/sp_fmax.c
new file mode 100644
index 0000000..4d00084
--- /dev/null
+++ b/arch/mips/math-emu/sp_fmax.c
@@ -0,0 +1,213 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MAX{,A}.f
+ * MAX : Scalar Floating-Point Maximum
+ * MAXA: Scalar Floating-Point argument with Maximum Absolute Value
+ *
+ * MAX.S : FPR[fd] = maxNum(FPR[fs],FPR[ft])
+ * MAXA.S: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
+{
+	COMPXSP;
+	COMPYSP;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+
+	FLUSHXSP;
+	FLUSHYSP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return xs ? y : x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return ys ? x : y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754sp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		SPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	/* Compare signs */
+	if (xs > ys)
+		return y;
+	else if (xs < ys)
+		return x;
+
+	/* Compare exponent */
+	if (xe > ye)
+		return x;
+	else if (xe < ye)
+		return y;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return y;
+	return x;
+}
+
+union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+{
+	COMPXSP;
+	COMPYSP;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+
+	FLUSHXSP;
+	FLUSHYSP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754sp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		SPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	/* Compare exponent */
+	if (xe > ye)
+		return x;
+	else if (xe < ye)
+		return y;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return y;
+	return x;
+}
diff --git a/arch/mips/math-emu/sp_fmin.c b/arch/mips/math-emu/sp_fmin.c
new file mode 100644
index 0000000..4eb1bb9
--- /dev/null
+++ b/arch/mips/math-emu/sp_fmin.c
@@ -0,0 +1,213 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MIN{,A}.f
+ * MIN : Scalar Floating-Point Minimum
+ * MINA: Scalar Floating-Point argument with Minimum Absolute Value
+ *
+ * MIN.S : FPR[fd] = minNum(FPR[fs],FPR[ft])
+ * MINA.S: FPR[fd] = maxNumMag(FPR[fs],FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
+{
+	COMPXSP;
+	COMPYSP;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+
+	FLUSHXSP;
+	FLUSHYSP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return xs ? x : y;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return ys ? y : x;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754sp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		SPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	/* Compare signs */
+	if (xs > ys)
+		return x;
+	else if (xs < ys)
+		return y;
+
+	/* Compare exponent */
+	if (xe > ye)
+		return y;
+	else if (xe < ye)
+		return x;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return x;
+	return y;
+}
+
+union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
+{
+	COMPXSP;
+	COMPYSP;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+
+	FLUSHXSP;
+	FLUSHYSP;
+
+	ieee754_clearcx();
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	/* numbers are preferred to NaNs */
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return y;
+
+	/*
+	 * Infinity and zero handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		return x;
+
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+		if (xs == ys)
+			return x;
+		return ieee754sp_zero(1);
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		SPDNORMX;
+	}
+
+	/* Finally get to do some computation */
+
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	/* Compare exponent */
+	if (xe > ye)
+		return y;
+	else if (xe < ye)
+		return x;
+
+	/* Compare mantissa */
+	if (xm <= ym)
+		return x;
+	return y;
+}
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
new file mode 100644
index 0000000..dd1dd83
--- /dev/null
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -0,0 +1,255 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MADDF.f (Fused Multiply Add)
+ * MADDF.fmt: FPR[fd] = FPR[fd] + (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y)
+{
+	int re;
+	int rs;
+	unsigned rm;
+	unsigned short lxm;
+	unsigned short hxm;
+	unsigned short lym;
+	unsigned short hym;
+	unsigned lrm;
+	unsigned hrm;
+	unsigned t;
+	unsigned at;
+	int s;
+
+	COMPXSP;
+	COMPYSP;
+	u32 zm; int ze; int zs __maybe_unused; int zc;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+	EXPLODESP(z, zc, zs, ze, zm)
+
+	FLUSHXSP;
+	FLUSHYSP;
+	FLUSHSP(z, zc, zs, ze, zm);
+
+	ieee754_clearcx();
+
+	switch (zc) {
+	case IEEE754_CLASS_SNAN:
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754sp_nanxcpt(z);
+	case IEEE754_CLASS_DNORM:
+		SPDNORMx(zm, ze);
+	/* QNAN is handled separately below */
+	}
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return x;
+
+	/*
+	 * Infinity handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754sp_indef();
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		return ieee754sp_inf(xs ^ ys);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		/* Multiplication is 0 so just return z */
+		return z;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		SPDNORMX;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		/* fall through to real computations */
+	}
+
+	/* Finally get to do some computation */
+
+	/*
+	 * Do the multiplication bit first
+	 *
+	 * rm = xm * ym, re = xe + ye basically
+	 *
+	 * At this point xm and ym should have been normalized.
+	 */
+
+	/* rm = xm * ym, re = xe+ye basically */
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	re = xe + ye;
+	rs = xs ^ ys;
+
+	/* shunt to top of word */
+	xm <<= 32 - (SP_FBITS + 1);
+	ym <<= 32 - (SP_FBITS + 1);
+
+	/*
+	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 */
+	lxm = xm & 0xffff;
+	hxm = xm >> 16;
+	lym = ym & 0xffff;
+	hym = ym >> 16;
+
+	lrm = lxm * lym;	/* 16 * 16 => 32 */
+	hrm = hxm * hym;	/* 16 * 16 => 32 */
+
+	t = lxm * hym; /* 16 * 16 => 32 */
+	at = lrm + (t << 16);
+	hrm += at < lrm;
+	lrm = at;
+	hrm = hrm + (t >> 16);
+
+	t = hxm * lym; /* 16 * 16 => 32 */
+	at = lrm + (t << 16);
+	hrm += at < lrm;
+	lrm = at;
+	hrm = hrm + (t >> 16);
+
+	rm = hrm | (lrm != 0);
+
+	/*
+	 * Sticky shift down to normal rounding precision.
+	 */
+	if ((int) rm < 0) {
+		rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
+		    ((rm << (SP_FBITS + 1 + 3)) != 0);
+		re++;
+	} else {
+		rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
+		     ((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
+	}
+	assert(rm & (SP_HIDDEN_BIT << 3));
+
+	/* And now the addition */
+
+	assert(zm & SP_HIDDEN_BIT);
+
+	/*
+	 * Provide guard,round and stick bit space.
+	 */
+	zm <<= 3;
+
+	if (ze > re) {
+		/*
+		 * Have to shift y fraction right to align.
+		 */
+		s = ze - re;
+		SPXSRSYn(s);
+	} else if (re > ze) {
+		/*
+		 * Have to shift x fraction right to align.
+		 */
+		s = re - ze;
+		SPXSRSYn(s);
+	}
+	assert(ze == re);
+	assert(ze <= SP_EMAX);
+
+	if (zs == rs) {
+		/*
+		 * Generate 28 bit result of adding two 27 bit numbers
+		 * leaving result in zm, zs and ze.
+		 */
+		zm = zm + rm;
+
+		if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */
+			SPXSRSX1();
+		}
+	} else {
+		if (zm >= rm) {
+			zm = zm - rm;
+		} else {
+			zm = rm - zm;
+			zs = rs;
+		}
+		if (zm == 0)
+			return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+		/*
+		 * Normalize in extended single precision
+		 */
+		while ((zm >> (SP_MBITS + 3)) == 0) {
+			zm <<= 1;
+			ze--;
+		}
+
+	}
+	return ieee754sp_format(zs, ze, zm);
+}
diff --git a/arch/mips/math-emu/sp_msubf.c b/arch/mips/math-emu/sp_msubf.c
new file mode 100644
index 0000000..81c38b980
--- /dev/null
+++ b/arch/mips/math-emu/sp_msubf.c
@@ -0,0 +1,258 @@
+/*
+ * IEEE754 floating point arithmetic
+ * single precision: MSUB.f (Fused Multiply Subtract)
+ * MSUBF.fmt: FPR[fd] = FPR[fd] - (FPR[fs] x FPR[ft])
+ *
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; version 2 of the License.
+ */
+
+#include "ieee754sp.h"
+
+union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
+				union ieee754sp y)
+{
+	int re;
+	int rs;
+	unsigned rm;
+	unsigned short lxm;
+	unsigned short hxm;
+	unsigned short lym;
+	unsigned short hym;
+	unsigned lrm;
+	unsigned hrm;
+	unsigned t;
+	unsigned at;
+	int s;
+
+	COMPXSP;
+	COMPYSP;
+	u32 zm; int ze; int zs __maybe_unused; int zc;
+
+	EXPLODEXSP;
+	EXPLODEYSP;
+	EXPLODESP(z, zc, zs, ze, zm)
+
+	FLUSHXSP;
+	FLUSHYSP;
+	FLUSHSP(z, zc, zs, ze, zm);
+
+	ieee754_clearcx();
+
+	switch (zc) {
+	case IEEE754_CLASS_SNAN:
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754sp_nanxcpt(z);
+	case IEEE754_CLASS_DNORM:
+		SPDNORMx(zm, ze);
+	/* QNAN is handled separately below */
+	}
+
+	switch (CLPAIR(xc, yc)) {
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+		return ieee754sp_nanxcpt(y);
+
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+		return ieee754sp_nanxcpt(x);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+		return y;
+
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+		return x;
+
+	/*
+	 * Infinity handling
+	 */
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		ieee754_setcx(IEEE754_INVALID_OPERATION);
+		return ieee754sp_indef();
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		return ieee754sp_inf(xs ^ ys);
+
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+	case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+		if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		/* Multiplication is 0 so just return z */
+		return z;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+		SPDNORMX;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		SPDNORMY;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		SPDNORMX;
+		break;
+
+	case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+		if (zc == IEEE754_CLASS_QNAN)
+			return z;
+		else if (zc == IEEE754_CLASS_INF)
+			return ieee754sp_inf(zs);
+		/* fall through to real compuation */
+	}
+
+	/* Finally get to do some computation */
+
+	/*
+	 * Do the multiplication bit first
+	 *
+	 * rm = xm * ym, re = xe + ye basically
+	 *
+	 * At this point xm and ym should have been normalized.
+	 */
+
+	/* rm = xm * ym, re = xe+ye basically */
+	assert(xm & SP_HIDDEN_BIT);
+	assert(ym & SP_HIDDEN_BIT);
+
+	re = xe + ye;
+	rs = xs ^ ys;
+
+	/* shunt to top of word */
+	xm <<= 32 - (SP_FBITS + 1);
+	ym <<= 32 - (SP_FBITS + 1);
+
+	/*
+	 * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+	 */
+	lxm = xm & 0xffff;
+	hxm = xm >> 16;
+	lym = ym & 0xffff;
+	hym = ym >> 16;
+
+	lrm = lxm * lym;	/* 16 * 16 => 32 */
+	hrm = hxm * hym;	/* 16 * 16 => 32 */
+
+	t = lxm * hym; /* 16 * 16 => 32 */
+	at = lrm + (t << 16);
+	hrm += at < lrm;
+	lrm = at;
+	hrm = hrm + (t >> 16);
+
+	t = hxm * lym; /* 16 * 16 => 32 */
+	at = lrm + (t << 16);
+	hrm += at < lrm;
+	lrm = at;
+	hrm = hrm + (t >> 16);
+
+	rm = hrm | (lrm != 0);
+
+	/*
+	 * Sticky shift down to normal rounding precision.
+	 */
+	if ((int) rm < 0) {
+		rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
+		    ((rm << (SP_FBITS + 1 + 3)) != 0);
+		re++;
+	} else {
+		rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
+		     ((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
+	}
+	assert(rm & (SP_HIDDEN_BIT << 3));
+
+	/* And now the subtraction */
+
+	/* Flip sign of r and handle as add */
+	rs ^= 1;
+
+	assert(zm & SP_HIDDEN_BIT);
+
+	/*
+	 * Provide guard,round and stick bit space.
+	 */
+	zm <<= 3;
+
+	if (ze > re) {
+		/*
+		 * Have to shift y fraction right to align.
+		 */
+		s = ze - re;
+		SPXSRSYn(s);
+	} else if (re > ze) {
+		/*
+		 * Have to shift x fraction right to align.
+		 */
+		s = re - ze;
+		SPXSRSYn(s);
+	}
+	assert(ze == re);
+	assert(ze <= SP_EMAX);
+
+	if (zs == rs) {
+		/*
+		 * Generate 28 bit result of adding two 27 bit numbers
+		 * leaving result in zm, zs and ze.
+		 */
+		zm = zm + rm;
+
+		if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */
+			SPXSRSX1(); /* shift preserving sticky */
+		}
+	} else {
+		if (zm >= rm) {
+			zm = zm - rm;
+		} else {
+			zm = rm - zm;
+			zs = rs;
+		}
+		if (zm == 0)
+			return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+		/*
+		 * Normalize in extended single precision
+		 */
+		while ((zm >> (SP_MBITS + 3)) == 0) {
+			zm <<= 1;
+			ze--;
+		}
+
+	}
+	return ieee754sp_format(zs, ze, zm);
+}
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index fbea443..5d3a25e 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1276,6 +1276,7 @@
 	case CPU_PROAPTIV:
 	case CPU_M5150:
 	case CPU_QEMU_GENERIC:
+	case CPU_I6400:
 		if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
 		    (c->icache.waysize > PAGE_SIZE))
 			c->icache.flags |= MIPS_CACHE_ALIASES;
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index eeaf024..8f23cf0 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -194,6 +194,40 @@
 		__free_pages(page, get_order(size));
 }
 
+static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+	void *cpu_addr, dma_addr_t dma_addr, size_t size,
+	struct dma_attrs *attrs)
+{
+	unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	unsigned long addr = (unsigned long)cpu_addr;
+	unsigned long off = vma->vm_pgoff;
+	unsigned long pfn;
+	int ret = -ENXIO;
+
+	if (!plat_device_is_coherent(dev) && !hw_coherentio)
+		addr = CAC_ADDR(addr);
+
+	pfn = page_to_pfn(virt_to_page((void *)addr));
+
+	if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+	else
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+		return ret;
+
+	if (off < count && user_count <= (count - off)) {
+		ret = remap_pfn_range(vma, vma->vm_start,
+				      pfn + off,
+				      user_count << PAGE_SHIFT,
+				      vma->vm_page_prot);
+	}
+
+	return ret;
+}
+
 static inline void __dma_sync_virtual(void *addr, size_t size,
 	enum dma_data_direction direction)
 {
@@ -380,6 +414,7 @@
 static struct dma_map_ops mips_default_dma_map_ops = {
 	.alloc = mips_dma_alloc_coherent,
 	.free = mips_dma_free_coherent,
+	.mmap = mips_dma_mmap,
 	.map_page = mips_dma_map_page,
 	.unmap_page = mips_dma_unmap_page,
 	.map_sg = mips_dma_map_sg,
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 852a41c..4b88fa03 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -57,12 +57,10 @@
 
 #ifdef CONFIG_KPROBES
 	/*
-	 * This is to notify the fault handler of the kprobes.	The
-	 * exception code is redundant as it is also carried in REGS,
-	 * but we pass it anyhow.
+	 * This is to notify the fault handler of the kprobes.
 	 */
 	if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
-		       (regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
+		       current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
 		return;
 #endif
 
@@ -224,6 +222,7 @@
 			print_vma_addr(" ", regs->regs[31]);
 			pr_info("\n");
 		}
+		current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 		info.si_signo = SIGSEGV;
 		info.si_errno = 0;
 		/* info.si_code has been set above */
@@ -282,6 +281,7 @@
 		       field, (unsigned long) regs->cp0_epc,
 		       field, (unsigned long) regs->regs[31]);
 #endif
+	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 	tsk->thread.cp0_badvaddr = address;
 	info.si_signo = SIGBUS;
 	info.si_errno = 0;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 198a314..66d0f49 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -37,6 +37,7 @@
 #include <asm/cpu.h>
 #include <asm/dma.h>
 #include <asm/kmap_types.h>
+#include <asm/maar.h>
 #include <asm/mmu_context.h>
 #include <asm/sections.h>
 #include <asm/pgtable.h>
@@ -333,9 +334,40 @@
 #endif
 }
 
-unsigned __weak platform_maar_init(unsigned num_maars)
+unsigned __weak platform_maar_init(unsigned num_pairs)
 {
-	return 0;
+	struct maar_config cfg[BOOT_MEM_MAP_MAX];
+	unsigned i, num_configured, num_cfg = 0;
+	phys_addr_t skip;
+
+	for (i = 0; i < boot_mem_map.nr_map; i++) {
+		switch (boot_mem_map.map[i].type) {
+		case BOOT_MEM_RAM:
+		case BOOT_MEM_INIT_RAM:
+			break;
+		default:
+			continue;
+		}
+
+		skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
+
+		cfg[num_cfg].lower = boot_mem_map.map[i].addr;
+		cfg[num_cfg].lower += skip;
+
+		cfg[num_cfg].upper = cfg[num_cfg].lower;
+		cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
+		cfg[num_cfg].upper -= skip;
+
+		cfg[num_cfg].attrs = MIPS_MAAR_S;
+		num_cfg++;
+	}
+
+	num_configured = maar_config(cfg, num_cfg, num_pairs);
+	if (num_configured < num_cfg)
+		pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
+			num_pairs, num_cfg);
+
+	return num_configured;
 }
 
 static void maar_init(void)
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 4ceafd1..53ea839 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -14,6 +14,7 @@
 #include <asm/pgtable.h>
 #include <asm/mmu_context.h>
 #include <asm/r4kcache.h>
+#include <asm/mips-cm.h>
 
 /*
  * MIPS32/MIPS64 L2 cache handling
@@ -94,6 +95,38 @@
 	return 1;
 }
 
+static int __init mips_sc_probe_cm3(void)
+{
+	struct cpuinfo_mips *c = &current_cpu_data;
+	unsigned long cfg = read_gcr_l2_config();
+	unsigned long sets, line_sz, assoc;
+
+	if (cfg & CM_GCR_L2_CONFIG_BYPASS_MSK)
+		return 0;
+
+	sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
+	sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
+	c->scache.sets = 64 << sets;
+
+	line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
+	line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
+	c->scache.linesz = 2 << line_sz;
+
+	assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
+	assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
+	c->scache.ways = assoc + 1;
+	c->scache.waysize = c->scache.sets * c->scache.linesz;
+	c->scache.waybit = __ffs(c->scache.waysize);
+
+	c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+
+	return 1;
+}
+
+void __weak platform_early_l2_init(void)
+{
+}
+
 static inline int __init mips_sc_probe(void)
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
@@ -103,6 +136,15 @@
 	/* Mark as not present until probe completed */
 	c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
 
+	/*
+	 * Do we need some platform specific probing before
+	 * we configure L2?
+	 */
+	platform_early_l2_init();
+
+	if (mips_cm_revision() >= CM_REV_CM3)
+		return mips_sc_probe_cm3();
+
 	/* Ignore anything but MIPSxx processors */
 	if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
 			      MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 2b75b8f..b4f366f 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -36,7 +36,7 @@
 		"nop\n\t"		\
 		".set	pop\n\t")
 
-static int r3k_have_wired_reg;			/* Should be in cpu_data? */
+int r3k_have_wired_reg;			/* Should be in cpu_data? */
 
 /* TLB operations. */
 static void local_flush_tlb_from(int entry)
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index cec3e18..53c2478 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -303,3 +303,10 @@
 	if (!register_vsmp_smp_ops())
 		return;
 }
+
+void platform_early_l2_init(void)
+{
+	/* L2 configuration lives in the CM3 */
+	if (mips_cm_revision() >= CM_REV_CM3)
+		mips_cm_probe();
+}
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index fa8f591..c6a6c7a 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -382,122 +382,12 @@
 	/* Could change CM error mask register. */
 }
 
-
-static char *tr[8] = {
-	"mem",	"gcr",	"gic",	"mmio",
-	"0x04", "0x05", "0x06", "0x07"
-};
-
-static char *mcmd[32] = {
-	[0x00] = "0x00",
-	[0x01] = "Legacy Write",
-	[0x02] = "Legacy Read",
-	[0x03] = "0x03",
-	[0x04] = "0x04",
-	[0x05] = "0x05",
-	[0x06] = "0x06",
-	[0x07] = "0x07",
-	[0x08] = "Coherent Read Own",
-	[0x09] = "Coherent Read Share",
-	[0x0a] = "Coherent Read Discard",
-	[0x0b] = "Coherent Ready Share Always",
-	[0x0c] = "Coherent Upgrade",
-	[0x0d] = "Coherent Writeback",
-	[0x0e] = "0x0e",
-	[0x0f] = "0x0f",
-	[0x10] = "Coherent Copyback",
-	[0x11] = "Coherent Copyback Invalidate",
-	[0x12] = "Coherent Invalidate",
-	[0x13] = "Coherent Write Invalidate",
-	[0x14] = "Coherent Completion Sync",
-	[0x15] = "0x15",
-	[0x16] = "0x16",
-	[0x17] = "0x17",
-	[0x18] = "0x18",
-	[0x19] = "0x19",
-	[0x1a] = "0x1a",
-	[0x1b] = "0x1b",
-	[0x1c] = "0x1c",
-	[0x1d] = "0x1d",
-	[0x1e] = "0x1e",
-	[0x1f] = "0x1f"
-};
-
-static char *core[8] = {
-	"Invalid/OK",	"Invalid/Data",
-	"Shared/OK",	"Shared/Data",
-	"Modified/OK",	"Modified/Data",
-	"Exclusive/OK", "Exclusive/Data"
-};
-
-static char *causes[32] = {
-	"None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
-	"COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
-	"0x08", "0x09", "0x0a", "0x0b",
-	"0x0c", "0x0d", "0x0e", "0x0f",
-	"0x10", "0x11", "0x12", "0x13",
-	"0x14", "0x15", "0x16", "INTVN_WR_ERR",
-	"INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
-	"0x1c", "0x1d", "0x1e", "0x1f"
-};
-
 int malta_be_handler(struct pt_regs *regs, int is_fixup)
 {
 	/* This duplicates the handling in do_be which seems wrong */
 	int retval = is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
 
-	if (mips_cm_present()) {
-		unsigned long cm_error = read_gcr_error_cause();
-		unsigned long cm_addr = read_gcr_error_addr();
-		unsigned long cm_other = read_gcr_error_mult();
-		unsigned long cause, ocause;
-		char buf[256];
-
-		cause = cm_error & CM_GCR_ERROR_CAUSE_ERRTYPE_MSK;
-		if (cause != 0) {
-			cause >>= CM_GCR_ERROR_CAUSE_ERRTYPE_SHF;
-			if (cause < 16) {
-				unsigned long cca_bits = (cm_error >> 15) & 7;
-				unsigned long tr_bits = (cm_error >> 12) & 7;
-				unsigned long cmd_bits = (cm_error >> 7) & 0x1f;
-				unsigned long stag_bits = (cm_error >> 3) & 15;
-				unsigned long sport_bits = (cm_error >> 0) & 7;
-
-				snprintf(buf, sizeof(buf),
-					 "CCA=%lu TR=%s MCmd=%s STag=%lu "
-					 "SPort=%lu\n",
-					 cca_bits, tr[tr_bits], mcmd[cmd_bits],
-					 stag_bits, sport_bits);
-			} else {
-				/* glob state & sresp together */
-				unsigned long c3_bits = (cm_error >> 18) & 7;
-				unsigned long c2_bits = (cm_error >> 15) & 7;
-				unsigned long c1_bits = (cm_error >> 12) & 7;
-				unsigned long c0_bits = (cm_error >> 9) & 7;
-				unsigned long sc_bit = (cm_error >> 8) & 1;
-				unsigned long cmd_bits = (cm_error >> 3) & 0x1f;
-				unsigned long sport_bits = (cm_error >> 0) & 7;
-				snprintf(buf, sizeof(buf),
-					 "C3=%s C2=%s C1=%s C0=%s SC=%s "
-					 "MCmd=%s SPort=%lu\n",
-					 core[c3_bits], core[c2_bits],
-					 core[c1_bits], core[c0_bits],
-					 sc_bit ? "True" : "False",
-					 mcmd[cmd_bits], sport_bits);
-			}
-
-			ocause = (cm_other & CM_GCR_ERROR_MULT_ERR2ND_MSK) >>
-				 CM_GCR_ERROR_MULT_ERR2ND_SHF;
-
-			pr_err("CM_ERROR=%08lx %s <%s>\n", cm_error,
-			       causes[cause], buf);
-			pr_err("CM_ADDR =%08lx\n", cm_addr);
-			pr_err("CM_OTHER=%08lx %s\n", cm_other, causes[ocause]);
-
-			/* reprime cause register */
-			write_gcr_error_cause(0);
-		}
-	}
+	mips_cm_error_report();
 
 	return retval;
 }
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index b769657..dadeb83 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -179,31 +179,6 @@
 	}
 }
 
-unsigned platform_maar_init(unsigned num_pairs)
-{
-	phys_addr_t mem_end = (physical_memsize & ~0xffffull) - 1;
-	struct maar_config cfg[] = {
-		/* DRAM preceding I/O */
-		{ 0x00000000, 0x0fffffff, MIPS_MAAR_S },
-
-		/* DRAM following I/O */
-		{ 0x20000000, mem_end, MIPS_MAAR_S },
-
-		/* DRAM alias in upper half of physical */
-		{ 0x80000000, 0x80000000 + mem_end, MIPS_MAAR_S },
-	};
-	unsigned i, num_cfg = ARRAY_SIZE(cfg);
-
-	/* If DRAM fits before I/O, drop the region following it */
-	if (physical_memsize <= 0x10000000) {
-		num_cfg--;
-		for (i = 1; i < num_cfg; i++)
-			cfg[i] = cfg[i + 1];
-	}
-
-	return maar_config(cfg, num_cfg, num_pairs);
-}
-
 phys_addr_t mips_cdmm_phys_base(void)
 {
 	/* This address is "typically unused" */
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index 5f5d18b..3660dc6 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -87,7 +87,7 @@
 static void xlp_pic_enable(struct irq_data *d)
 {
 	unsigned long flags;
-	struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
 
 	BUG_ON(!pd);
 	spin_lock_irqsave(&pd->node->piclock, flags);
@@ -97,7 +97,7 @@
 
 static void xlp_pic_disable(struct irq_data *d)
 {
-	struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
 	unsigned long flags;
 
 	BUG_ON(!pd);
@@ -108,7 +108,7 @@
 
 static void xlp_pic_mask_ack(struct irq_data *d)
 {
-	struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
 
 	clear_c0_eimr(pd->picirq);
 	ack_c0_eirr(pd->picirq);
@@ -116,7 +116,7 @@
 
 static void xlp_pic_unmask(struct irq_data *d)
 {
-	struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
 
 	BUG_ON(!pd);
 
@@ -193,7 +193,7 @@
 	pic_data->picirq = picirq;
 	pic_data->node = nlm_get_node(node);
 	irq_set_chip_and_handler(xirq, &xlp_pic, handle_level_irq);
-	irq_set_handler_data(xirq, pic_data);
+	irq_set_chip_data(xirq, pic_data);
 }
 
 void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *))
@@ -202,7 +202,7 @@
 	int xirq;
 
 	xirq = nlm_irq_to_xirq(node, irq);
-	pic_data = irq_get_handler_data(xirq);
+	pic_data = irq_get_chip_data(xirq);
 	if (WARN_ON(!pic_data))
 		return;
 	pic_data->extra_ack = xack;
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index f5fff22..0136b4f 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -82,8 +82,9 @@
 }
 
 /* IRQ_IPI_SMP_FUNCTION Handler */
-void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
+void nlm_smp_function_ipi_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	clear_c0_eimr(irq);
 	ack_c0_eirr(irq);
 	generic_smp_call_function_interrupt();
@@ -91,8 +92,9 @@
 }
 
 /* IRQ_IPI_SMP_RESCHEDULE  handler */
-void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
+void nlm_smp_resched_ipi_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	clear_c0_eimr(irq);
 	ack_c0_eirr(irq);
 	scheduler_ipi();
diff --git a/arch/mips/netlogic/xlp/ahci-init-xlp2.c b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
index 7b066a4..c11b9c7 100644
--- a/arch/mips/netlogic/xlp/ahci-init-xlp2.c
+++ b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
@@ -152,7 +152,7 @@
 	0xC9, 0xC9, 0x07, 0x07, 0x18, 0x18, 0x01, 0x01, 0x22, 0x00
 };
 
-/* SATA PHY config for register block 2 0x0x8065 .. 0x0x80A4 */
+/* SATA PHY config for register block 2 0x8065 .. 0x80A4 */
 static const u8 sata_phy_config2[]  = {
 	0xAA, 0x00, 0x4C, 0xC9, 0xC9, 0x07, 0x07, 0x18,
 	0x18, 0x05, 0x0C, 0x10, 0x00, 0x10, 0x00, 0xFF,
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index a8f4144..80ec929 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -91,6 +91,8 @@
 		return 134;
 	case PIC_SATA_IRQ:
 		return 143;
+	case PIC_NAND_IRQ:
+		return 151;
 	case PIC_SPI_IRQ:
 		return 152;
 	case PIC_MMC_IRQ:
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 81f5895..3c9ec3d 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -91,6 +91,7 @@
 	case CPU_INTERAPTIV:
 	case CPU_PROAPTIV:
 	case CPU_P5600:
+	case CPU_I6400:
 	case CPU_M5150:
 	case CPU_LOONGSON1:
 	case CPU_SB1:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 6a6e2cc..8f988a6 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -392,6 +392,10 @@
 		op_model_mipsxx_ops.cpu_type = "mips/P5600";
 		break;
 
+	case CPU_I6400:
+		op_model_mipsxx_ops.cpu_type = "mips/I6400";
+		break;
+
 	case CPU_M5150:
 		op_model_mipsxx_ops.cpu_type = "mips/M5150";
 		break;
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index cffaaf4..2a5bb84 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -200,7 +200,7 @@
 	if (type == PCI_CAP_ID_MSI && nvec > 1)
 		return 1;
 
-	list_for_each_entry(entry, &dev->msi_list, list) {
+	for_each_pci_msi_entry(entry, dev) {
 		ret = arch_setup_msi_irq(dev, entry);
 		if (ret < 0)
 			return ret;
diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c
index 3407495..bb14335 100644
--- a/arch/mips/pci/msi-xlp.c
+++ b/arch/mips/pci/msi-xlp.c
@@ -131,7 +131,7 @@
  */
 static void xlp_msi_enable(struct irq_data *d)
 {
-	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
+	struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
 	unsigned long flags;
 	int vec;
 
@@ -148,7 +148,7 @@
 
 static void xlp_msi_disable(struct irq_data *d)
 {
-	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
+	struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
 	unsigned long flags;
 	int vec;
 
@@ -165,7 +165,7 @@
 
 static void xlp_msi_mask_ack(struct irq_data *d)
 {
-	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
+	struct xlp_msi_data *md = irq_data_get_irq_chip_data(d);
 	int link, vec;
 
 	link = nlm_irq_msilink(d->irq);
@@ -211,7 +211,7 @@
 	msixvec = nlm_irq_msixvec(d->irq);
 	link = nlm_irq_msixlink(msixvec);
 	pci_msi_mask_irq(d);
-	md = irq_data_get_irq_handler_data(d);
+	md = irq_data_get_irq_chip_data(d);
 
 	/* Ack MSI on bridge */
 	if (cpu_is_xlp9xx()) {
@@ -302,7 +302,7 @@
 	/* Get MSI data for the link */
 	lirq = PIC_PCIE_LINK_MSI_IRQ(link);
 	xirq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
-	md = irq_get_handler_data(xirq);
+	md = irq_get_chip_data(xirq);
 	msiaddr = MSI_LINK_ADDR(node, link);
 
 	spin_lock_irqsave(&md->msi_lock, flags);
@@ -409,7 +409,7 @@
 	/* Get MSI data for the link */
 	lirq = PIC_PCIE_MSIX_IRQ(link);
 	xirq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
-	md = irq_get_handler_data(xirq);
+	md = irq_get_chip_data(xirq);
 	msixaddr = MSIX_LINK_ADDR(node, link);
 
 	spin_lock_irqsave(&md->msi_lock, flags);
@@ -485,7 +485,7 @@
 	irq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
 	for (i = irq; i < irq + XLP_MSIVEC_PER_LINK; i++) {
 		irq_set_chip_and_handler(i, &xlp_msi_chip, handle_level_irq);
-		irq_set_handler_data(i, md);
+		irq_set_chip_data(i, md);
 	}
 
 	for (i = 0; i < XLP_MSIXVEC_PER_LINK ; i++) {
@@ -508,7 +508,7 @@
 		/* Initialize MSI-X extended irq space for the link  */
 		irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i));
 		irq_set_chip_and_handler(irq, &xlp_msix_chip, handle_level_irq);
-		irq_set_handler_data(irq, md);
+		irq_set_chip_data(irq, md);
 	}
 }
 
@@ -520,7 +520,7 @@
 
 	link = lirq - PIC_PCIE_LINK_MSI_IRQ_BASE;
 	irqbase = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
-	md = irq_get_handler_data(irqbase);
+	md = irq_get_chip_data(irqbase);
 	if (cpu_is_xlp9xx())
 		status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSI_STATUS) &
 						md->msi_enabled_mask;
@@ -550,7 +550,7 @@
 
 	link = lirq - PIC_PCIE_MSIX_IRQ_BASE;
 	irqbase = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0));
-	md = irq_get_handler_data(irqbase);
+	md = irq_get_chip_data(irqbase);
 	if (cpu_is_xlp9xx())
 		status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSIX_STATUSX(link));
 	else
diff --git a/arch/mips/pci/ops-emma2rh.c b/arch/mips/pci/ops-emma2rh.c
index 710aef5..2dc97c4 100644
--- a/arch/mips/pci/ops-emma2rh.c
+++ b/arch/mips/pci/ops-emma2rh.c
@@ -25,7 +25,6 @@
 #include <linux/types.h>
 
 #include <asm/addrspace.h>
-#include <asm/debug.h>
 
 #include <asm/emma/emma2rh.h>
 
@@ -40,10 +39,9 @@
 static int check_args(struct pci_bus *bus, u32 devfn, u32 * bus_num)
 {
 	/* check if the bus is top-level */
-	if (bus->parent != NULL) {
+	if (bus->parent != NULL)
 		*bus_num = bus->number;
-		db_assert(bus_num != NULL);
-	} else
+	else
 		*bus_num = 0;
 
 	if (*bus_num == 0) {
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 283157f..ad35a5e 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -312,8 +312,8 @@
 		irq_set_chip_data(i, apc);
 	}
 
-	irq_set_handler_data(apc->irq, apc);
-	irq_set_chained_handler(apc->irq, ar71xx_pci_irq_handler);
+	irq_set_chained_handler_and_data(apc->irq, ar71xx_pci_irq_handler,
+					 apc);
 }
 
 static void ar71xx_pci_reset(void)
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 0af362b..907d11d 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -321,8 +321,8 @@
 		irq_set_chip_data(i, apc);
 	}
 
-	irq_set_handler_data(apc->irq, apc);
-	irq_set_chained_handler(apc->irq, ar724x_pci_irq_handler);
+	irq_set_chained_handler_and_data(apc->irq, ar724x_pci_irq_handler,
+					 apc);
 }
 
 static int ar724x_pci_probe(struct platform_device *pdev)
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index c5347d9..6a15dbd 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -20,7 +20,6 @@
 #include <linux/of_irq.h>
 #include <linux/of_pci.h>
 
-#include <asm/gpio.h>
 #include <asm/addrspace.h>
 
 #include <lantiq_soc.h>
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index 80fafe6..53c8efa 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -129,7 +129,7 @@
 	rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA);
 }
 
-static void rt3883_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void rt3883_pci_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
 	struct rt3883_pci_controller *rpc;
 	u32 pending;
@@ -145,7 +145,7 @@
 	}
 
 	while (pending) {
-		unsigned bit = __ffs(pending);
+		unsigned irq, bit = __ffs(pending);
 
 		irq = irq_find_mapping(rpc->irq_domain, bit);
 		generic_handle_irq(irq);
@@ -225,8 +225,7 @@
 		return -ENODEV;
 	}
 
-	irq_set_handler_data(irq, rpc);
-	irq_set_chained_handler(irq, rt3883_pci_irq_handler);
+	irq_set_chained_handler_and_data(irq, rt3883_pci_irq_handler, rpc);
 
 	return 0;
 }
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index b8a0bf5..c6996cf 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -311,12 +311,6 @@
 
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
-	struct pci_dev *dev = bus->self;
-
-	if (pci_has_flag(PCI_PROBE_ONLY) && dev &&
-	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
-		pci_read_bridge_bases(bus);
-	}
 }
 
 EXPORT_SYMBOL(PCIBIOS_MIN_IO);
diff --git a/arch/mips/pistachio/Kconfig b/arch/mips/pistachio/Kconfig
new file mode 100644
index 0000000..97731ea
--- /dev/null
+++ b/arch/mips/pistachio/Kconfig
@@ -0,0 +1,13 @@
+config PISTACHIO_GPTIMER_CLKSRC
+	bool "Enable General Purpose Timer based clocksource"
+	depends on MACH_PISTACHIO
+	select CLKSRC_PISTACHIO
+	select MIPS_EXTERNAL_TIMER
+	help
+	  This option enables a clocksource driver based on a Pistachio
+	  SoC General Purpose external timer.
+
+	  If you want to enable the CPUFreq, you need to enable
+	  this option.
+
+	  If you don't want to enable CPUFreq, you can leave this disabled.
diff --git a/arch/mips/pmcs-msp71xx/msp_irq_cic.c b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
index 1207ec4..8b9cf64 100644
--- a/arch/mips/pmcs-msp71xx/msp_irq_cic.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
@@ -88,7 +88,8 @@
 	* Make sure we have IRQ affinity.  It may have changed while
 	* we were processing the IRQ.
 	*/
-	if (!cpumask_test_cpu(smp_processor_id(), d->affinity))
+	if (!cpumask_test_cpu(smp_processor_id(),
+			      irq_data_get_affinity_mask(d)))
 		return;
 #endif
 
diff --git a/arch/mips/ralink/cevt-rt3352.c b/arch/mips/ralink/cevt-rt3352.c
index 24bf057..a8e70a9 100644
--- a/arch/mips/ralink/cevt-rt3352.c
+++ b/arch/mips/ralink/cevt-rt3352.c
@@ -36,8 +36,8 @@
 	int freq_scale;
 };
 
-static void systick_set_clock_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt);
+static int systick_set_oneshot(struct clock_event_device *evt);
+static int systick_shutdown(struct clock_event_device *evt);
 
 static int systick_next_event(unsigned long delta,
 				struct clock_event_device *evt)
@@ -73,11 +73,12 @@
 		 * cevt-r4k uses 300, make sure systick
 		 * gets used if available
 		 */
-		.rating		= 310,
-		.features	= CLOCK_EVT_FEAT_ONESHOT,
-		.set_next_event	= systick_next_event,
-		.set_mode	= systick_set_clock_mode,
-		.event_handler	= systick_event_handler,
+		.rating			= 310,
+		.features		= CLOCK_EVT_FEAT_ONESHOT,
+		.set_next_event		= systick_next_event,
+		.set_state_shutdown	= systick_shutdown,
+		.set_state_oneshot	= systick_set_oneshot,
+		.event_handler		= systick_event_handler,
 	},
 };
 
@@ -87,33 +88,33 @@
 	.dev_id = &systick.dev,
 };
 
-static void systick_set_clock_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int systick_shutdown(struct clock_event_device *evt)
 {
 	struct systick_device *sdev;
 
 	sdev = container_of(evt, struct systick_device, dev);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-		if (!sdev->irq_requested)
-			setup_irq(systick.dev.irq, &systick_irqaction);
-		sdev->irq_requested = 1;
-		iowrite32(CFG_EXT_STK_EN | CFG_CNT_EN,
-				systick.membase + SYSTICK_CONFIG);
-		break;
+	if (sdev->irq_requested)
+		free_irq(systick.dev.irq, &systick_irqaction);
+	sdev->irq_requested = 0;
+	iowrite32(0, systick.membase + SYSTICK_CONFIG);
 
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		if (sdev->irq_requested)
-			free_irq(systick.dev.irq, &systick_irqaction);
-		sdev->irq_requested = 0;
-		iowrite32(0, systick.membase + SYSTICK_CONFIG);
-		break;
+	return 0;
+}
 
-	default:
-		pr_err("%s: Unhandeled mips clock_mode\n", systick.dev.name);
-		break;
-	}
+static int systick_set_oneshot(struct clock_event_device *evt)
+{
+	struct systick_device *sdev;
+
+	sdev = container_of(evt, struct systick_device, dev);
+
+	if (!sdev->irq_requested)
+		setup_irq(systick.dev.irq, &systick_irqaction);
+	sdev->irq_requested = 1;
+	iowrite32(CFG_EXT_STK_EN | CFG_CNT_EN,
+		  systick.membase + SYSTICK_CONFIG);
+
+	return 0;
 }
 
 static void __init ralink_systick_init(struct device_node *np)
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index e31e8cd..9bd7a2d 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -23,6 +23,7 @@
 #include <linux/mtd/nand.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
+#include <linux/gpio.h>
 #include <linux/gpio_keys.h>
 #include <linux/input.h>
 #include <linux/serial_8250.h>
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index 5aa3df8..650d5d3 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -140,6 +140,11 @@
 	return 0;
 }
 
+static int rb532_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+	return 8 + 4 * 32 + gpio;
+}
+
 static struct rb532_gpio_chip rb532_gpio_chip[] = {
 	[0] = {
 		.chip = {
@@ -148,6 +153,7 @@
 			.direction_output	= rb532_gpio_direction_output,
 			.get			= rb532_gpio_get,
 			.set			= rb532_gpio_set,
+			.to_irq			= rb532_gpio_to_irq,
 			.base			= 0,
 			.ngpio			= 32,
 		},
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index a6d10f6..42d6cb9 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -64,12 +64,6 @@
 	return LOCAL_HUB_L(PI_RT_COUNT) >= cnt ? -ETIME : 0;
 }
 
-static void rt_set_mode(enum clock_event_mode mode,
-		struct clock_event_device *evt)
-{
-	/* Nothing to do ...  */
-}
-
 unsigned int rt_timer_irq;
 
 static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent);
@@ -124,7 +118,6 @@
 	cd->irq			= irq;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= rt_next_event;
-	cd->set_mode		= rt_set_mode;
 	clockevents_register_device(cd);
 }
 
diff --git a/arch/mips/sibyte/common/bus_watcher.c b/arch/mips/sibyte/common/bus_watcher.c
index 41a1d22..a4e55999 100644
--- a/arch/mips/sibyte/common/bus_watcher.c
+++ b/arch/mips/sibyte/common/bus_watcher.c
@@ -250,4 +250,4 @@
 	return 0;
 }
 
-__initcall(sibyte_bus_watcher);
+device_initcall(sibyte_bus_watcher);
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index cf8ec56..fb4b352 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -14,44 +14,33 @@
 #define SNI_COUNTER2_DIV	64
 #define SNI_COUNTER0_DIV	((SNI_CLOCK_TICK_RATE / SNI_COUNTER2_DIV) / HZ)
 
-static void a20r_set_mode(enum clock_event_mode mode,
-			  struct clock_event_device *evt)
+static int a20r_set_periodic(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0x34;
-		wmb();
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE +  0) = SNI_COUNTER0_DIV;
-		wmb();
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE +  0) = SNI_COUNTER0_DIV >> 8;
-		wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0x34;
+	wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV;
+	wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 0) = SNI_COUNTER0_DIV >> 8;
+	wmb();
 
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0xb4;
-		wmb();
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE +  8) = SNI_COUNTER2_DIV;
-		wmb();
-		*(volatile u8 *)(A20R_PT_CLOCK_BASE +  8) = SNI_COUNTER2_DIV >> 8;
-		wmb();
-
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 12) = 0xb4;
+	wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV;
+	wmb();
+	*(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV >> 8;
+	wmb();
+	return 0;
 }
 
 static struct clock_event_device a20r_clockevent_device = {
-	.name		= "a20r-timer",
-	.features	= CLOCK_EVT_FEAT_PERIODIC,
+	.name			= "a20r-timer",
+	.features		= CLOCK_EVT_FEAT_PERIODIC,
 
 	/* .mult, .shift, .max_delta_ns and .min_delta_ns left uninitialized */
 
-	.rating		= 300,
-	.irq		= SNI_A20R_IRQ_TIMER,
-	.set_mode	= a20r_set_mode,
+	.rating			= 300,
+	.irq			= SNI_A20R_IRQ_TIMER,
+	.set_state_periodic	= a20r_set_periodic,
 };
 
 static irqreturn_t a20r_interrupt(int irq, void *dev_id)
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 2791b86..9d9962a 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -117,22 +117,6 @@
 }
 EXPORT_SYMBOL(clk_put);
 
-/* GPIO support */
-
-#ifdef CONFIG_GPIOLIB
-int gpio_to_irq(unsigned gpio)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(gpio_to_irq);
-
-int irq_to_gpio(unsigned irq)
-{
-	return -EINVAL;
-}
-EXPORT_SYMBOL(irq_to_gpio);
-#endif
-
 #define BOARD_VEC(board)	extern struct txx9_board_vec board;
 #include <asm/txx9/boards.h>
 #undef BOARD_VEC
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 5be655e..375e591 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -89,6 +89,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -127,73 +131,6 @@
 #define atomic_xchg(ptr, v)		(xchg(&(ptr)->counter, (v)))
 #define atomic_cmpxchg(v, old, new)	(cmpxchg(&((v)->counter), (old), (new)))
 
-/**
- * atomic_clear_mask - Atomically clear bits in memory
- * @mask: Mask of the bits to be cleared
- * @v: pointer to word in memory
- *
- * Atomically clears the bits set in mask from the memory word specified.
- */
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
-#ifdef CONFIG_SMP
-	int status;
-
-	asm volatile(
-		"1:	mov	%3,(_AAR,%2)	\n"
-		"	mov	(_ADR,%2),%0	\n"
-		"	and	%4,%0		\n"
-		"	mov	%0,(_ADR,%2)	\n"
-		"	mov	(_ADR,%2),%0	\n"	/* flush */
-		"	mov	(_ASR,%2),%0	\n"
-		"	or	%0,%0		\n"
-		"	bne	1b		\n"
-		: "=&r"(status), "=m"(*addr)
-		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
-		: "memory", "cc");
-#else
-	unsigned long flags;
-
-	mask = ~mask;
-	flags = arch_local_cli_save();
-	*addr &= mask;
-	arch_local_irq_restore(flags);
-#endif
-}
-
-/**
- * atomic_set_mask - Atomically set bits in memory
- * @mask: Mask of the bits to be set
- * @v: pointer to word in memory
- *
- * Atomically sets the bits set in mask from the memory word specified.
- */
-static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
-{
-#ifdef CONFIG_SMP
-	int status;
-
-	asm volatile(
-		"1:	mov	%3,(_AAR,%2)	\n"
-		"	mov	(_ADR,%2),%0	\n"
-		"	or	%4,%0		\n"
-		"	mov	%0,(_ADR,%2)	\n"
-		"	mov	(_ADR,%2),%0	\n"	/* flush */
-		"	mov	(_ASR,%2),%0	\n"
-		"	or	%0,%0		\n"
-		"	bne	1b		\n"
-		: "=&r"(status), "=m"(*addr)
-		: "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
-		: "memory", "cc");
-#else
-	unsigned long flags;
-
-	flags = arch_local_cli_save();
-	*addr |= mask;
-	arch_local_irq_restore(flags);
-#endif
-}
-
 #endif /* __KERNEL__ */
 #endif /* CONFIG_SMP */
 #endif /* _ASM_ATOMIC_H */
diff --git a/arch/mn10300/include/asm/io.h b/arch/mn10300/include/asm/io.h
index 07c5b4a..6218935 100644
--- a/arch/mn10300/include/asm/io.h
+++ b/arch/mn10300/include/asm/io.h
@@ -283,6 +283,7 @@
 
 #define ioremap_wc ioremap_nocache
 #define ioremap_wt ioremap_nocache
+#define ioremap_uc ioremap_nocache
 
 static inline void iounmap(void __iomem *addr)
 {
diff --git a/arch/mn10300/kernel/cevt-mn10300.c b/arch/mn10300/kernel/cevt-mn10300.c
index 60f64ca..d9b34dd 100644
--- a/arch/mn10300/kernel/cevt-mn10300.c
+++ b/arch/mn10300/kernel/cevt-mn10300.c
@@ -41,12 +41,6 @@
 	return 0;
 }
 
-static void set_clock_mode(enum clock_event_mode mode,
-			   struct clock_event_device *evt)
-{
-	/* Nothing to do ...  */
-}
-
 static DEFINE_PER_CPU(struct clock_event_device, mn10300_clockevent_device);
 static DEFINE_PER_CPU(struct irqaction, timer_irq);
 
@@ -108,7 +102,6 @@
 
 	cd->rating		= 200;
 	cd->cpumask		= cpumask_of(smp_processor_id());
-	cd->set_mode		= set_clock_mode;
 	cd->event_handler	= event_handler;
 	cd->set_next_event	= next_event;
 
@@ -123,7 +116,7 @@
 	{
 		struct irq_data *data;
 		data = irq_get_irq_data(cd->irq);
-		cpumask_copy(data->affinity, cpumask_of(cpu));
+		cpumask_copy(irq_data_get_affinity_mask(data), cpumask_of(cpu));
 		iact->flags |= IRQF_NOBALANCING;
 	}
 #endif
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 480de70..c716437 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -87,7 +87,8 @@
 		tmp2 = GxICR(irq);
 
 		irq_affinity_online[irq] =
-			cpumask_any_and(d->affinity, cpu_online_mask);
+			cpumask_any_and(irq_data_get_affinity_mask(d),
+					cpu_online_mask);
 		CROSS_GxICR(irq, irq_affinity_online[irq]) =
 			(tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
 		tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
@@ -124,7 +125,7 @@
 	} else {
 		tmp = GxICR(irq);
 
-		irq_affinity_online[irq] = cpumask_any_and(d->affinity,
+		irq_affinity_online[irq] = cpumask_any_and(irq_data_get_affinity_mask(d),
 							   cpu_online_mask);
 		CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
 		tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
@@ -316,15 +317,16 @@
 	self = smp_processor_id();
 	for (irq = 0; irq < NR_IRQS; irq++) {
 		struct irq_data *data = irq_get_irq_data(irq);
+		struct cpumask *mask = irq_data_get_affinity_mask(data);
 
 		if (irqd_is_per_cpu(data))
 			continue;
 
-		if (cpumask_test_cpu(self, data->affinity) &&
+		if (cpumask_test_cpu(self, mask) &&
 		    !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) {
 			int cpu_id;
 			cpu_id = cpumask_first(cpu_online_mask);
-			cpumask_set_cpu(cpu_id, data->affinity);
+			cpumask_set_cpu(cpu_id, mask);
 		}
 		/* We need to operate irq_affinity_online atomically. */
 		arch_local_cli_save(flags);
@@ -335,8 +337,7 @@
 			GxICR(irq) = x & GxICR_LEVEL;
 			tmp = GxICR(irq);
 
-			new = cpumask_any_and(data->affinity,
-					      cpu_online_mask);
+			new = cpumask_any_and(mask, cpu_online_mask);
 			irq_affinity_online[irq] = new;
 
 			CROSS_GxICR(irq, new) =
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
index e5d0ef7..9a39ea9 100644
--- a/arch/mn10300/mm/tlb-smp.c
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -119,7 +119,7 @@
 	flush_mm = mm;
 	flush_va = va;
 #if NR_CPUS <= BITS_PER_LONG
-	atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
+	atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]);
 #else
 #error Not supported.
 #endif
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c
index b5b036f..b7ab837 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.c
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.c
@@ -183,18 +183,16 @@
 	struct pci_dev *dev = NULL;
 	struct resource *r;
 
-	if (!(pci_probe & PCI_ASSIGN_ROMS)) {
-		/* Try to use BIOS settings for ROMs, otherwise let
-		   pci_assign_unassigned_resources() allocate the new
-		   addresses. */
-		for_each_pci_dev(dev) {
-			r = &dev->resource[PCI_ROM_RESOURCE];
-			if (!r->flags || !r->start)
-				continue;
-			if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
-				r->end -= r->start;
-				r->start = 0;
-			}
+	/* Try to use BIOS settings for ROMs, otherwise let
+	   pci_assign_unassigned_resources() allocate the new
+	   addresses. */
+	for_each_pci_dev(dev) {
+		r = &dev->resource[PCI_ROM_RESOURCE];
+		if (!r->flags || !r->start)
+			continue;
+		if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
+			r->end -= r->start;
+			r->start = 0;
 		}
 	}
 
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.h b/arch/mn10300/unit-asb2305/pci-asb2305.h
index 9e17aca..96c484b 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.h
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.h
@@ -20,13 +20,6 @@
 #define DBG(x...)
 #endif
 
-#define PCI_PROBE_BIOS 1
-#define PCI_PROBE_CONF1 2
-#define PCI_PROBE_CONF2 4
-#define PCI_NO_CHECKS 0x400
-#define PCI_ASSIGN_ROMS 0x1000
-#define PCI_BIOS_IRQ_SCAN 0x2000
-
 extern unsigned int pci_probe;
 
 /* pci-asb2305.c */
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 3dfe2d3..deaa893 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -324,7 +324,6 @@
 	struct pci_dev *dev;
 
 	if (bus->self) {
-		pci_read_bridge_bases(bus);
 		pcibios_fixup_bridge_resources(bus->self);
 	}
 
diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c
index 7c52e94..50e9701 100644
--- a/arch/openrisc/kernel/time.c
+++ b/arch/openrisc/kernel/time.c
@@ -48,29 +48,6 @@
 	return 0;
 }
 
-static void openrisc_timer_set_mode(enum clock_event_mode mode,
-				    struct clock_event_device *evt)
-{
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		pr_debug(KERN_INFO "%s: periodic\n", __func__);
-		BUG();
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		pr_debug(KERN_INFO "%s: oneshot\n", __func__);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-		pr_debug(KERN_INFO "%s: unused\n", __func__);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		pr_debug(KERN_INFO "%s: shutdown\n", __func__);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		pr_debug(KERN_INFO "%s: resume\n", __func__);
-		break;
-	}
-}
-
 /* This is the clock event device based on the OR1K tick timer.
  * As the timer is being used as a continuous clock-source (required for HR
  * timers) we cannot enable the PERIODIC feature.  The tick timer can run using
@@ -82,7 +59,6 @@
 	.features = CLOCK_EVT_FEAT_ONESHOT,
 	.rating = 300,
 	.set_next_event = openrisc_timer_set_next_event,
-	.set_mode = openrisc_timer_set_mode,
 };
 
 static inline void timer_ack(void)
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index 269c23d..1a8f6f95 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -242,7 +242,6 @@
 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_RT_MUTEX_TESTER=y
 CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_BLOCK_EXT_DEVT=y
 CONFIG_LATENCYTOP=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index 33b148f..0ffb08f 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -295,7 +295,6 @@
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_RT_MUTEX_TESTER=y
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_RCU_CPU_STALL_INFO=y
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 226f8ca9..2536965 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -126,6 +126,10 @@
 ATOMIC_OPS(add, +=)
 ATOMIC_OPS(sub, -=)
 
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -185,6 +189,9 @@
 
 ATOMIC64_OPS(add, +=)
 ATOMIC64_OPS(sub, -=)
+ATOMIC64_OP(and, &=)
+ATOMIC64_OP(or, |=)
+ATOMIC64_OP(xor, ^=)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index f3191db..413ec3c 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -131,7 +131,7 @@
 	if (cpu_dest < 0)
 		return -1;
 
-	cpumask_copy(d->affinity, dest);
+	cpumask_copy(irq_data_get_affinity_mask(d), dest);
 
 	return 0;
 }
@@ -339,7 +339,7 @@
 {
 #ifdef CONFIG_SMP
 	struct irq_data *d = irq_get_irq_data(irq);
-	cpumask_copy(d->affinity, cpumask_of(cpu));
+	cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu));
 #endif
 
 	return per_cpu(cpu_data, cpu).txn_addr;
@@ -508,7 +508,7 @@
 	unsigned long eirr_val;
 	int irq, cpu = smp_processor_id();
 #ifdef CONFIG_SMP
-	struct irq_desc *desc;
+	struct irq_data *irq_data;
 	cpumask_t dest;
 #endif
 
@@ -522,9 +522,9 @@
 	irq = eirr_to_irq(eirr_val);
 
 #ifdef CONFIG_SMP
-	desc = irq_to_desc(irq);
-	cpumask_copy(&dest, desc->irq_data.affinity);
-	if (irqd_is_per_cpu(&desc->irq_data) &&
+	irq_data = irq_get_irq_data(irq);
+	cpumask_copy(&dest, irq_data_get_affinity_mask(irq_data));
+	if (irqd_is_per_cpu(irq_data) &&
 	    !cpumask_test_cpu(smp_processor_id(), &dest)) {
 		int cpu = cpumask_first(&dest);
 
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 5ef2711..b447918 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -82,6 +82,9 @@
 	bool
 	default y
 
+config ARCH_HAS_DMA_SET_COHERENT_MASK
+        bool
+
 config PPC
 	bool
 	default y
@@ -155,6 +158,8 @@
 	select HAVE_PERF_EVENTS_NMI if PPC64
 	select EDAC_SUPPORT
 	select EDAC_ATOMIC_SCRUB
+	select ARCH_HAS_DMA_SET_COHERENT_MASK
+	select HAVE_ARCH_SECCOMP_FILTER
 
 config GENERIC_CSUM
 	def_bool CPU_LITTLE_ENDIAN
@@ -514,11 +519,6 @@
 	def_bool y
 	depends on NEED_MULTIPLE_NODES
 
-config PPC_HAS_HASH_64K
-	bool
-	depends on PPC64
-	default n
-
 config STDBINUTILS
 	bool "Using standard binutils settings"
 	depends on 44x
@@ -560,16 +560,16 @@
 	bool "4k page size"
 
 config PPC_16K_PAGES
-	bool "16k page size" if 44x || PPC_8xx
+	bool "16k page size"
+	depends on 44x || PPC_8xx
 
 config PPC_64K_PAGES
-	bool "64k page size" if 44x || PPC_STD_MMU_64 || PPC_BOOK3E_64
-	depends on !PPC_FSL_BOOK3E
-	select PPC_HAS_HASH_64K if PPC_STD_MMU_64
+	bool "64k page size"
+	depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64)
 
 config PPC_256K_PAGES
-	bool "256k page size" if 44x
-	depends on !STDBINUTILS
+	bool "256k page size"
+	depends on 44x && !STDBINUTILS
 	help
 	  Make the page size 256k.
 
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 05f464e..4ca54fd 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -288,6 +288,26 @@
 pseries_le_defconfig:
 	$(call merge_into_defconfig,pseries_defconfig,le)
 
+PHONY += mpc85xx_defconfig
+mpc85xx_defconfig:
+	$(call merge_into_defconfig,mpc85xx_basic_defconfig,\
+		85xx-32bit 85xx-hw fsl-emb-nonhw)
+
+PHONY += mpc85xx_smp_defconfig
+mpc85xx_smp_defconfig:
+	$(call merge_into_defconfig,mpc85xx_basic_defconfig,\
+		85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw)
+
+PHONY += corenet32_smp_defconfig
+corenet32_smp_defconfig:
+	$(call merge_into_defconfig,corenet_basic_defconfig,\
+		85xx-32bit 85xx-smp 85xx-hw fsl-emb-nonhw)
+
+PHONY += corenet64_smp_defconfig
+corenet64_smp_defconfig:
+	$(call merge_into_defconfig,corenet_basic_defconfig,\
+		85xx-64bit 85xx-smp altivec 85xx-hw fsl-emb-nonhw)
+
 define archhelp
   @echo '* zImage          - Build default images selected by kernel config'
   @echo '  zImage.*        - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)'
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
index ebf2022..426bf41 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
@@ -175,7 +175,7 @@
 
 /include/ "pq3-gpio-0.dtsi"
 
-	display@10000 {
+	display: display@10000 {
 		compatible = "fsl,diu", "fsl,p1022-diu";
 		reg = <0x10000 1000>;
 		interrupts = <64 2 0 0>;
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
index 1956dea..de76ae8 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-pre.dtsi
@@ -50,6 +50,8 @@
 		pci0 = &pci0;
 		pci1 = &pci1;
 		pci2 = &pci2;
+		vga = &display;
+		display = &display;
 	};
 
 	cpus {
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 9e9f7e2..9770d02 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -484,6 +484,11 @@
 		reg	   = <0xea000 0x4000>;
 	};
 
+	scfg: global-utilities@fc000 {
+		compatible = "fsl,t1040-scfg";
+		reg = <0xfc000 0x1000>;
+	};
+
 /include/ "elo3-dma-0.dtsi"
 /include/ "elo3-dma-1.dtsi"
 /include/ "qoriq-espi-0.dtsi"
diff --git a/arch/powerpc/boot/dts/t1023rdb.dts b/arch/powerpc/boot/dts/t1023rdb.dts
index 06b090a..d3fa829 100644
--- a/arch/powerpc/boot/dts/t1023rdb.dts
+++ b/arch/powerpc/boot/dts/t1023rdb.dts
@@ -60,7 +60,7 @@
 			#address-cells = <1>;
 			#size-cells = <1>;
 			compatible = "fsl,ifc-nand";
-			reg = <0x2 0x0 0x10000>;
+			reg = <0x1 0x0 0x10000>;
 		};
 	};
 
@@ -99,6 +99,17 @@
 		};
 
 		i2c@118100 {
+			current-sensor@40 {
+				compatible = "ti,ina220";
+				reg = <0x40>;
+				shunt-resistor = <1000>;
+			};
+
+			current-sensor@41 {
+				compatible = "ti,ina220";
+				reg = <0x41>;
+				shunt-resistor = <1000>;
+			};
 		};
 	};
 
diff --git a/arch/powerpc/boot/dts/t1024rdb.dts b/arch/powerpc/boot/dts/t1024rdb.dts
index 733e723..bf05e32 100644
--- a/arch/powerpc/boot/dts/t1024rdb.dts
+++ b/arch/powerpc/boot/dts/t1024rdb.dts
@@ -114,6 +114,12 @@
 				reg = <0x4c>;
 			};
 
+			current-sensor@40 {
+				compatible = "ti,ina220";
+				reg = <0x40>;
+				shunt-resistor = <1000>;
+			};
+
 			eeprom@50 {
 				compatible = "atmel,24c256";
 				reg = <0x50>;
diff --git a/arch/powerpc/boot/dts/t1040d4rdb.dts b/arch/powerpc/boot/dts/t1040d4rdb.dts
new file mode 100644
index 0000000..2d1315a
--- /dev/null
+++ b/arch/powerpc/boot/dts/t1040d4rdb.dts
@@ -0,0 +1,46 @@
+/*
+ * T1040D4RDB Device Tree Source
+ *
+ * Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/t104xsi-pre.dtsi"
+/include/ "t104xd4rdb.dtsi"
+
+/ {
+	model = "fsl,T1040D4RDB";
+	compatible = "fsl,T1040D4RDB";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+};
+
+/include/ "fsl/t1040si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/t1042d4rdb.dts b/arch/powerpc/boot/dts/t1042d4rdb.dts
new file mode 100644
index 0000000..846f8c8
--- /dev/null
+++ b/arch/powerpc/boot/dts/t1042d4rdb.dts
@@ -0,0 +1,53 @@
+/*
+ * T1042D4RDB Device Tree Source
+ *
+ * Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/t104xsi-pre.dtsi"
+/include/ "t104xd4rdb.dtsi"
+
+/ {
+	model = "fsl,T1042D4RDB";
+	compatible = "fsl,T1042D4RDB";
+	#address-cells = <2>;
+	#size-cells = <2>;
+	interrupt-parent = <&mpic>;
+
+	ifc: localbus@ffe124000 {
+		cpld@3,0 {
+			compatible = "fsl,t1040d4rdb-cpld",
+					"fsl,deepsleep-cpld";
+		};
+	};
+};
+
+/include/ "fsl/t1040si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/t104xd4rdb.dtsi b/arch/powerpc/boot/dts/t104xd4rdb.dtsi
new file mode 100644
index 0000000..491367b
--- /dev/null
+++ b/arch/powerpc/boot/dts/t104xd4rdb.dtsi
@@ -0,0 +1,205 @@
+/*
+ * T1040D4RDB/T1042D4RDB Device Tree Source
+ *
+ * Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *	 notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *	 notice, this list of conditions and the following disclaimer in the
+ *	 documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Freescale Semiconductor nor the
+ *	 names of its contributors may be used to endorse or promote products
+ *	 derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/ {
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		bman_fbpr: bman-fbpr {
+			size = <0 0x1000000>;
+			alignment = <0 0x1000000>;
+		};
+		qman_fqd: qman-fqd {
+			size = <0 0x400000>;
+			alignment = <0 0x400000>;
+		};
+		qman_pfdr: qman-pfdr {
+			size = <0 0x2000000>;
+			alignment = <0 0x2000000>;
+		};
+	};
+
+	ifc: localbus@ffe124000 {
+		reg = <0xf 0xfe124000 0 0x2000>;
+		ranges = <0 0 0xf 0xe8000000 0x08000000
+			  2 0 0xf 0xff800000 0x00010000
+			  3 0 0xf 0xffdf0000 0x00008000>;
+
+		nor@0,0 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "cfi-flash";
+			reg = <0x0 0x0 0x8000000>;
+			bank-width = <2>;
+			device-width = <1>;
+		};
+
+		nand@2,0 {
+			#address-cells = <1>;
+			#size-cells = <1>;
+			compatible = "fsl,ifc-nand";
+			reg = <0x2 0x0 0x10000>;
+		};
+
+		cpld@3,0 {
+			compatible = "fsl,t1040d4rdb-cpld";
+			reg = <3 0 0x300>;
+		};
+	};
+
+	memory {
+		device_type = "memory";
+	};
+
+	dcsr: dcsr@f00000000 {
+		ranges = <0x00000000 0xf 0x00000000 0x01072000>;
+	};
+
+	bportals: bman-portals@ff4000000 {
+		ranges = <0x0 0xf 0xf4000000 0x2000000>;
+	};
+
+	qportals: qman-portals@ff6000000 {
+		ranges = <0x0 0xf 0xf6000000 0x2000000>;
+	};
+
+	soc: soc@ffe000000 {
+		ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
+		reg = <0xf 0xfe000000 0 0x00001000>;
+
+		spi@110000 {
+			flash@0 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				compatible = "micron,n25q512ax3";
+				reg = <0>;
+				/* input clock */
+				spi-max-frequency = <10000000>;
+			};
+		};
+		i2c@118000 {
+			hwmon@4c {
+				compatible = "adi,adt7461";
+				reg = <0x4c>;
+			};
+
+			rtc@68 {
+				compatible = "dallas,ds1337";
+				reg = <0x68>;
+				interrupts = <0x2 0x1 0 0>;
+			};
+		};
+
+		i2c@118100 {
+			mux@77 {
+				/*
+				 * Child nodes of mux depend on which i2c
+				 * devices are connected via the mini PCI
+				 * connector slot1, the mini PCI connector
+				 * slot2, the HDMI connector, and the PEX
+				 * slot. Systems with such devices attached
+				 * should provide a wrapper .dts file that
+				 * includes this one, and adds those nodes
+				 */
+				compatible = "nxp,pca9546";
+				reg = <0x77>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+			};
+		};
+
+	};
+
+	pci0: pcie@ffe240000 {
+		reg = <0xf 0xfe240000 0 0x10000>;
+		ranges = <0x02000000 0 0xe0000000 0xc 0x0 0x0 0x10000000
+			  0x01000000 0 0x0 0xf 0xf8000000 0x0 0x00010000>;
+		pcie@0 {
+			ranges = <0x02000000 0 0xe0000000
+				  0x02000000 0 0xe0000000
+				  0 0x10000000
+
+				  0x01000000 0 0x00000000
+				  0x01000000 0 0x00000000
+				  0 0x00010000>;
+		};
+	};
+
+	pci1: pcie@ffe250000 {
+		reg = <0xf 0xfe250000 0 0x10000>;
+		ranges = <0x02000000 0 0xe0000000 0xc 0x10000000 0 0x10000000
+			  0x01000000 0 0 0xf 0xf8010000 0 0x00010000>;
+		pcie@0 {
+			ranges = <0x02000000 0 0xe0000000
+				  0x02000000 0 0xe0000000
+				  0 0x10000000
+
+				  0x01000000 0 0x00000000
+				  0x01000000 0 0x00000000
+				  0 0x00010000>;
+		};
+	};
+
+	pci2: pcie@ffe260000 {
+		reg = <0xf 0xfe260000 0 0x10000>;
+		ranges = <0x02000000 0 0xe0000000 0xc 0x20000000 0 0x10000000
+			  0x01000000 0 0x00000000 0xf 0xf8020000 0 0x00010000>;
+		pcie@0 {
+			ranges = <0x02000000 0 0xe0000000
+				  0x02000000 0 0xe0000000
+				  0 0x10000000
+
+				  0x01000000 0 0x00000000
+				  0x01000000 0 0x00000000
+				  0 0x00010000>;
+		};
+	};
+
+	pci3: pcie@ffe270000 {
+		reg = <0xf 0xfe270000 0 0x10000>;
+		ranges = <0x02000000 0 0xe0000000 0xc 0x30000000 0 0x10000000
+			  0x01000000 0 0x00000000 0xf 0xf8030000 0 0x00010000>;
+		pcie@0 {
+			ranges = <0x02000000 0 0xe0000000
+				  0x02000000 0 0xe0000000
+				  0 0x10000000
+
+				  0x01000000 0 0x00000000
+				  0x01000000 0 0x00000000
+				  0 0x00010000>;
+		};
+	};
+};
diff --git a/arch/powerpc/configs/85xx-32bit.config b/arch/powerpc/configs/85xx-32bit.config
new file mode 100644
index 0000000..6b8894d
--- /dev/null
+++ b/arch/powerpc/configs/85xx-32bit.config
@@ -0,0 +1,5 @@
+CONFIG_HIGHMEM=y
+CONFIG_KEXEC=y
+CONFIG_PPC_85xx=y
+CONFIG_PROC_KCORE=y
+CONFIG_PHYS_64BIT=y
diff --git a/arch/powerpc/configs/85xx-64bit.config b/arch/powerpc/configs/85xx-64bit.config
new file mode 100644
index 0000000..4aba812
--- /dev/null
+++ b/arch/powerpc/configs/85xx-64bit.config
@@ -0,0 +1,4 @@
+CONFIG_MATH_EMULATION=y
+CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED=y
+CONFIG_PPC64=y
+CONFIG_PPC_BOOK3E_64=y
diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config
new file mode 100644
index 0000000..528ff0e
--- /dev/null
+++ b/arch/powerpc/configs/85xx-hw.config
@@ -0,0 +1,142 @@
+CONFIG_AQUANTIA_PHY=y
+CONFIG_AT803X_PHY=y
+CONFIG_ATA=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_C293_PCIE=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_CICADA_PHY=y
+CONFIG_CLK_QORIQ=y
+CONFIG_CRYPTO_DEV_FSL_CAAM=y
+CONFIG_CRYPTO_DEV_TALITOS=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_DMADEVICES=y
+CONFIG_E1000E=y
+CONFIG_E1000=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_MPC85XX=y
+CONFIG_EDAC=y
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_LEGACY=y
+CONFIG_FB_FSL_DIU=y
+CONFIG_FS_ENET=y
+CONFIG_FSL_CORENET_CF=y
+CONFIG_FSL_DMA=y
+CONFIG_FSL_HV_MANAGER=y
+CONFIG_FSL_PQ_MDIO=y
+CONFIG_FSL_RIO=y
+CONFIG_FSL_XGMAC_MDIO=y
+CONFIG_GIANFAR=y
+CONFIG_GPIO_MPC8XXX=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_CPM=m
+CONFIG_I2C_MPC=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C=y
+CONFIG_IGB=y
+CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_MARVELL_PHY=y
+CONFIG_MDIO_BUS_MUX_GPIO=y
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND_FSL_ELBC=y
+CONFIG_MTD_NAND_FSL_IFC=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PLATRAM=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_NETDEVICES=y
+CONFIG_NVRAM=y
+CONFIG_PATA_ALI=y
+CONFIG_PATA_SIL680=y
+CONFIG_PATA_VIA=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI=y
+CONFIG_PPC_EPAPR_HV_BYTECHAN=y
+# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
+CONFIG_QE_GPIO=y
+CONFIG_QUICC_ENGINE=y
+CONFIG_RAPIDIO=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_RTC_DRV_DS1307=y
+CONFIG_RTC_DRV_DS1374=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_FSL=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_SIL=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SENSORS_INA2XX=y
+CONFIG_SENSORS_LM90=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DETECT_IRQ=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_NR_UARTS=6
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_8250_RUNTIME_UARTS=6
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_QE=m
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_INTEL8X0=y
+CONFIG_SND_POWERPC_SOC=y
+# CONFIG_SND_PPC is not set
+CONFIG_SND_SOC=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_USB is not set
+CONFIG_SND=y
+CONFIG_SOUND=y
+CONFIG_SPI_FSL_ESPI=y
+CONFIG_SPI_FSL_SPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_SPI=y
+CONFIG_TERANETICS_PHY=y
+CONFIG_UCC_GETH=y
+CONFIG_USB_EHCI_FSL=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_HID=m
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
+CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_VIRT_DRIVERS=y
+CONFIG_VITESSE_PHY=y
diff --git a/arch/powerpc/configs/85xx-smp.config b/arch/powerpc/configs/85xx-smp.config
new file mode 100644
index 0000000..3b4d1e5
--- /dev/null
+++ b/arch/powerpc/configs/85xx-smp.config
@@ -0,0 +1,2 @@
+CONFIG_NR_CPUS=24
+CONFIG_SMP=y
diff --git a/arch/powerpc/configs/altivec.config b/arch/powerpc/configs/altivec.config
new file mode 100644
index 0000000..58a697c
--- /dev/null
+++ b/arch/powerpc/configs/altivec.config
@@ -0,0 +1 @@
+CONFIG_ALTIVEC=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
deleted file mode 100644
index 3765993..0000000
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ /dev/null
@@ -1,185 +0,0 @@
-CONFIG_PPC_85xx=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=8
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_EMBEDDED=y
-CONFIG_PERF_EVENTS=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_CORENET_GENERIC=y
-CONFIG_HIGHMEM=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_KEXEC=y
-CONFIG_FORCE_MAX_ZONEORDER=13
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-# CONFIG_PCIEASPM is not set
-CONFIG_PCI_MSI=y
-CONFIG_RAPIDIO=y
-CONFIG_FSL_RIO=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_XFRM_SUB_POLICY=y
-CONFIG_XFRM_STATISTICS=y
-CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_INET_AH=y
-CONFIG_INET_ESP=y
-CONFIG_INET_IPCOMP=y
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_SATA_SIL=y
-CONFIG_PATA_SIL680=y
-CONFIG_NETDEVICES=y
-CONFIG_FSL_PQ_MDIO=y
-CONFIG_FSL_XGMAC_MDIO=y
-CONFIG_E1000=y
-CONFIG_E1000E=y
-CONFIG_AT803X_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_MDIO_BUS_MUX_GPIO=y
-CONFIG_MDIO_BUS_MUX_MMIOREG=y
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_PPC_EPAPR_HV_BYTECHAN=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_NVRAM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_MPC=y
-CONFIG_I2C_MUX=y
-CONFIG_I2C_MUX_PCA954x=y
-CONFIG_SPI=y
-CONFIG_SPI_GPIO=y
-CONFIG_SPI_FSL_SPI=y
-CONFIG_SPI_FSL_ESPI=y
-CONFIG_SENSORS_LM90=y
-CONFIG_SENSORS_INA2XX=y
-CONFIG_USB_HID=m
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
-CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_EDAC_MPC85XX=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS1374=y
-CONFIG_RTC_DRV_DS3232=y
-CONFIG_UIO=y
-CONFIG_VIRT_DRIVERS=y
-CONFIG_FSL_HV_MANAGER=y
-CONFIG_STAGING=y
-CONFIG_FSL_CORENET_CF=y
-CONFIG_CLK_QORIQ=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_UTF8=m
-CONFIG_DEBUG_INFO=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SHIRQ=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_RCU_TRACE=y
-CONFIG_CRYPTO_NULL=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_MD4=y
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
deleted file mode 100644
index 33cd1df..0000000
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ /dev/null
@@ -1,176 +0,0 @@
-CONFIG_PPC64=y
-CONFIG_PPC_BOOK3E_64=y
-CONFIG_ALTIVEC=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=24
-CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_CGROUPS=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_CORENET_GENERIC=y
-# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_MATH_EMULATION=y
-CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_MSI=y
-CONFIG_RAPIDIO=y
-CONFIG_FSL_RIO=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_FTL=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_MTD_UBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_EEPROM_LEGACY=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_ATA=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-CONFIG_FSL_PQ_MDIO=y
-CONFIG_FSL_XGMAC_MDIO=y
-CONFIG_E1000E=y
-CONFIG_VITESSE_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_MDIO_BUS_MUX_GPIO=y
-CONFIG_MDIO_BUS_MUX_MMIOREG=y
-CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_PPC_EPAPR_HV_BYTECHAN=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_MPC=y
-CONFIG_I2C_MUX=y
-CONFIG_I2C_MUX_PCA954x=y
-CONFIG_SPI=y
-CONFIG_SPI_GPIO=y
-CONFIG_SPI_FSL_SPI=y
-CONFIG_SPI_FSL_ESPI=y
-CONFIG_SENSORS_LM90=y
-CONFIG_SENSORS_INA2XX=y
-CONFIG_USB_HID=m
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS1374=y
-CONFIG_RTC_DRV_DS3232=y
-CONFIG_DMADEVICES=y
-CONFIG_FSL_DMA=y
-CONFIG_VIRT_DRIVERS=y
-CONFIG_FSL_HV_MANAGER=y
-CONFIG_CLK_QORIQ=y
-CONFIG_FSL_CORENET_CF=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=1
-CONFIG_UBIFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_UTF8=m
-CONFIG_CRC_T10DIF=y
-CONFIG_DEBUG_INFO=y
-CONFIG_FRAME_WARN=1024
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_SHIRQ=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_CRYPTO_NULL=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_MD4=y
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
diff --git a/arch/powerpc/configs/corenet_basic_defconfig b/arch/powerpc/configs/corenet_basic_defconfig
new file mode 100644
index 0000000..b568d46
--- /dev/null
+++ b/arch/powerpc/configs/corenet_basic_defconfig
@@ -0,0 +1 @@
+CONFIG_CORENET_GENERIC=y
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
new file mode 100644
index 0000000..41e4d35
--- /dev/null
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -0,0 +1,126 @@
+CONFIG_ADFS_FS=m
+CONFIG_AFFS_FS=m
+CONFIG_AUDIT=y
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_BINFMT_MISC=m
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM_SIZE=131072
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CRC_T10DIF=y
+CONFIG_CPUSETS=y
+CONFIG_CRAMFS=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DEVTMPFS=y
+CONFIG_DUMMY=y
+CONFIG_EFS_FS=m
+CONFIG_EXPERT=y
+CONFIG_EXT2_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS=y
+CONFIG_FB=y
+CONFIG_FHANDLE=y
+CONFIG_FIXED_PHY=y
+CONFIG_FONT_8x16=y
+CONFIG_FONT_8x8=y
+CONFIG_FONTS=y
+CONFIG_FORCE_MAX_ZONEORDER=13
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAME_WARN=1024
+CONFIG_FTL=y
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_HPFS_FS=m
+CONFIG_HUGETLBFS=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKCONFIG=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IP_PNP=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_SCTP=m
+CONFIG_IPV6=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_ISO9660_FS=m
+CONFIG_JFFS2_FS_DEBUG=1
+CONFIG_JFFS2_FS=y
+CONFIG_JOLIET=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_MAC_PARTITION=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MSDOS_FS=m
+CONFIG_MTD_UBI=y
+CONFIG_MTD=y
+CONFIG_NET_IPIP=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_NET_KEY=y
+CONFIG_NET=y
+CONFIG_NFSD=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=m
+CONFIG_NO_HZ=y
+CONFIG_NTFS_FS=y
+CONFIG_PACKET=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_PERF_EVENTS=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_QNX4FS_FS=m
+CONFIG_RCU_TRACE=y
+CONFIG_ROOT_NFS=y
+CONFIG_SYSV_FS=m
+CONFIG_SYSVIPC=y
+CONFIG_TMPFS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UDF_FS=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_UFS_FS=m
+CONFIG_UIO=y
+CONFIG_UNIX=y
+CONFIG_VFAT_FS=y
+CONFIG_VXFS_FS=m
+CONFIG_XFRM_STATISTICS=y
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_USER=y
+CONFIG_ZISOFS=y
diff --git a/arch/powerpc/configs/mpc85xx_basic_defconfig b/arch/powerpc/configs/mpc85xx_basic_defconfig
new file mode 100644
index 0000000..850bd19
--- /dev/null
+++ b/arch/powerpc/configs/mpc85xx_basic_defconfig
@@ -0,0 +1,23 @@
+CONFIG_MATH_EMULATION=y
+CONFIG_MPC8536_DS=y
+CONFIG_MPC8540_ADS=y
+CONFIG_MPC8560_ADS=y
+CONFIG_MPC85xx_CDS=y
+CONFIG_MPC85xx_DS=y
+CONFIG_MPC85xx_MDS=y
+CONFIG_MPC85xx_RDB=y
+CONFIG_KSI8560=y
+CONFIG_MVME2500=y
+CONFIG_P1010_RDB=y
+CONFIG_P1022_DS=y
+CONFIG_P1022_RDK=y
+CONFIG_P1023_RDB=y
+CONFIG_SBC8548=y
+CONFIG_SOCRATES=y
+CONFIG_STX_GP3=y
+CONFIG_TQM8540=y
+CONFIG_TQM8541=y
+CONFIG_TQM8548=y
+CONFIG_TQM8555=y
+CONFIG_TQM8560=y
+CONFIG_XES_MPC85xx=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
deleted file mode 100644
index 6ecf7bd..0000000
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ /dev/null
@@ -1,252 +0,0 @@
-CONFIG_PPC_85xx=y
-CONFIG_PHYS_64BIT=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_AUDIT=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_C293_PCIE=y
-CONFIG_MPC8540_ADS=y
-CONFIG_MPC8560_ADS=y
-CONFIG_MPC85xx_CDS=y
-CONFIG_MPC85xx_MDS=y
-CONFIG_MPC8536_DS=y
-CONFIG_MPC85xx_DS=y
-CONFIG_MPC85xx_RDB=y
-CONFIG_P1010_RDB=y
-CONFIG_P1022_DS=y
-CONFIG_P1022_RDK=y
-CONFIG_P1023_RDB=y
-CONFIG_SOCRATES=y
-CONFIG_KSI8560=y
-CONFIG_XES_MPC85xx=y
-CONFIG_STX_GP3=y
-CONFIG_TQM8540=y
-CONFIG_TQM8541=y
-CONFIG_TQM8548=y
-CONFIG_TQM8555=y
-CONFIG_TQM8560=y
-CONFIG_SBC8548=y
-CONFIG_MVME2500=y
-CONFIG_QUICC_ENGINE=y
-CONFIG_QE_GPIO=y
-CONFIG_HIGHMEM=y
-CONFIG_BINFMT_MISC=m
-CONFIG_MATH_EMULATION=y
-CONFIG_FORCE_MAX_ZONEORDER=12
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-# CONFIG_PCIEASPM is not set
-CONFIG_PCI_MSI=y
-CONFIG_RAPIDIO=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_FTL=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_PLATRAM=y
-CONFIG_MTD_M25P80=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_MTD_UBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_NBD=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_EEPROM_AT24=y
-CONFIG_EEPROM_LEGACY=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_PATA_ALI=y
-CONFIG_PATA_VIA=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-CONFIG_FS_ENET=y
-CONFIG_UCC_GETH=y
-CONFIG_GIANFAR=y
-CONFIG_E1000=y
-CONFIG_E1000E=y
-CONFIG_IGB=y
-CONFIG_AT803X_PHY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=6
-CONFIG_SERIAL_8250_RUNTIME_UARTS=6
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_SERIAL_QE=m
-CONFIG_NVRAM=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_CPM=m
-CONFIG_I2C_MPC=y
-CONFIG_SPI=y
-CONFIG_SPI_FSL_SPI=y
-CONFIG_SPI_FSL_ESPI=y
-CONFIG_GPIO_MPC8XXX=y
-CONFIG_SENSORS_LM90=y
-CONFIG_FB=y
-CONFIG_FB_FSL_DIU=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
-# CONFIG_SND_DRIVERS is not set
-CONFIG_SND_INTEL8X0=y
-# CONFIG_SND_PPC is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_POWERPC_SOC=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
-CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMC_SDHCI_OF_ESDHC=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_EDAC_MPC85XX=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS1374=y
-CONFIG_RTC_DRV_DS3232=y
-CONFIG_RTC_DRV_CMOS=y
-CONFIG_DMADEVICES=y
-CONFIG_FSL_DMA=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_ADFS_FS=m
-CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=1
-CONFIG_UBIFS_FS=y
-CONFIG_CRAMFS=y
-CONFIG_VXFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_T10DIF=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
-CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
deleted file mode 100644
index b6c7111..0000000
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ /dev/null
@@ -1,244 +0,0 @@
-CONFIG_PPC_85xx=y
-CONFIG_PHYS_64BIT=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=8
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_AUDIT=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_C293_PCIE=y
-CONFIG_MPC8540_ADS=y
-CONFIG_MPC8560_ADS=y
-CONFIG_MPC85xx_CDS=y
-CONFIG_MPC85xx_MDS=y
-CONFIG_MPC8536_DS=y
-CONFIG_MPC85xx_DS=y
-CONFIG_MPC85xx_RDB=y
-CONFIG_P1010_RDB=y
-CONFIG_P1022_DS=y
-CONFIG_P1022_RDK=y
-CONFIG_P1023_RDB=y
-CONFIG_SOCRATES=y
-CONFIG_KSI8560=y
-CONFIG_XES_MPC85xx=y
-CONFIG_STX_GP3=y
-CONFIG_TQM8540=y
-CONFIG_TQM8541=y
-CONFIG_TQM8548=y
-CONFIG_TQM8555=y
-CONFIG_TQM8560=y
-CONFIG_SBC8548=y
-CONFIG_QUICC_ENGINE=y
-CONFIG_QE_GPIO=y
-CONFIG_HIGHMEM=y
-CONFIG_BINFMT_MISC=m
-CONFIG_MATH_EMULATION=y
-CONFIG_FORCE_MAX_ZONEORDER=12
-CONFIG_PCI=y
-CONFIG_PCI_MSI=y
-CONFIG_RAPIDIO=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NET_IPIP=y
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_INET_ESP=y
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-CONFIG_IPV6=y
-CONFIG_IP_SCTP=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_FTL=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_FSL_ELBC=y
-CONFIG_MTD_NAND_FSL_IFC=y
-CONFIG_MTD_SPI_NOR=y
-CONFIG_MTD_UBI=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_NBD=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_EEPROM_AT24=y
-CONFIG_EEPROM_LEGACY=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_FSL=y
-CONFIG_SATA_SIL24=y
-CONFIG_PATA_ALI=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-CONFIG_FS_ENET=y
-CONFIG_UCC_GETH=y
-CONFIG_GIANFAR=y
-CONFIG_E1000E=y
-CONFIG_AT803X_PHY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_FIXED_PHY=y
-CONFIG_INPUT_FF_MEMLESS=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_DETECT_IRQ=y
-CONFIG_SERIAL_8250_RSA=y
-CONFIG_SERIAL_QE=m
-CONFIG_NVRAM=y
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_CPM=m
-CONFIG_I2C_MPC=y
-CONFIG_SPI=y
-CONFIG_SPI_FSL_SPI=y
-CONFIG_SPI_FSL_ESPI=y
-CONFIG_GPIO_MPC8XXX=y
-CONFIG_SENSORS_LM90=y
-CONFIG_FB=y
-CONFIG_FB_FSL_DIU=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_SUPPORT_OLD_API is not set
-# CONFIG_SND_DRIVERS is not set
-CONFIG_SND_INTEL8X0=y
-# CONFIG_SND_PPC is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_POWERPC_SOC=y
-CONFIG_HID_A4TECH=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_FSL=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
-CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
-CONFIG_USB_STORAGE=y
-CONFIG_MMC=y
-CONFIG_MMC_SDHCI=y
-CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMC_SDHCI_OF_ESDHC=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1307=y
-CONFIG_RTC_DRV_DS1374=y
-CONFIG_RTC_DRV_DS3232=y
-CONFIG_RTC_DRV_CMOS=y
-CONFIG_DMADEVICES=y
-CONFIG_FSL_DMA=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=y
-CONFIG_NTFS_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_ADFS_FS=m
-CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_DEBUG=1
-CONFIG_UBIFS_FS=y
-CONFIG_CRAMFS=y
-CONFIG_VXFS_FS=m
-CONFIG_HPFS_FS=m
-CONFIG_QNX4FS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_850=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_CRC_T10DIF=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_SHA512=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRYPTO_DEV_FSL_CAAM=y
-CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index a97efc2..6bc0ee4 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -355,3 +355,6 @@
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM_BOOK3S_64=m
 CONFIG_KVM_BOOK3S_64_HV=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_POWERNV=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 0d9efce..7991f37 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -190,7 +190,8 @@
 CONFIG_HVCS=m
 CONFIG_VIRTIO_CONSOLE=m
 CONFIG_IBM_BSR=m
-CONFIG_GEN_RTC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=y
 CONFIG_RAW_DRIVER=y
 CONFIG_MAX_RAW_DEVS=1024
 CONFIG_FB=y
@@ -319,3 +320,6 @@
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM_BOOK3S_64=m
 CONFIG_KVM_BOOK3S_64_HV=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_POWERNV=m
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 050712e..ab9f4e0 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -6,5 +6,4 @@
 generic-y += mcs_spinlock.h
 generic-y += preempt.h
 generic-y += rwsem.h
-generic-y += trace_clock.h
 generic-y += vtime.h
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
index 0cc6eed..85e88f7 100644
--- a/arch/powerpc/include/asm/archrandom.h
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -7,14 +7,23 @@
 
 static inline int arch_get_random_long(unsigned long *v)
 {
-	if (ppc_md.get_random_long)
-		return ppc_md.get_random_long(v);
-
 	return 0;
 }
 
 static inline int arch_get_random_int(unsigned int *v)
 {
+	return 0;
+}
+
+static inline int arch_get_random_seed_long(unsigned long *v)
+{
+	if (ppc_md.get_random_seed)
+		return ppc_md.get_random_seed(v);
+
+	return 0;
+}
+static inline int arch_get_random_seed_int(unsigned int *v)
+{
 	unsigned long val;
 	int rc;
 
@@ -27,22 +36,13 @@
 
 static inline int arch_has_random(void)
 {
-	return !!ppc_md.get_random_long;
+	return 0;
 }
 
-static inline int arch_get_random_seed_long(unsigned long *v)
-{
-	return 0;
-}
-static inline int arch_get_random_seed_int(unsigned int *v)
-{
-	return 0;
-}
 static inline int arch_has_random_seed(void)
 {
-	return 0;
+	return !!ppc_md.get_random_seed;
 }
-
 #endif /* CONFIG_ARCH_RANDOM */
 
 #ifdef CONFIG_PPC_POWERNV
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 512d278..55f106e 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -67,6 +67,10 @@
 ATOMIC_OPS(add, add)
 ATOMIC_OPS(sub, subf)
 
+ATOMIC_OP(and, and)
+ATOMIC_OP(or, or)
+ATOMIC_OP(xor, xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -304,6 +308,9 @@
 
 ATOMIC64_OPS(add, add)
 ATOMIC64_OPS(sub, subf)
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(or, or)
+ATOMIC64_OP(xor, xor)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 51ccc72..0eca6ef 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -76,12 +76,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_lwsync();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_lwsync();							\
 	___p1;								\
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 30b35ff..6229e6b 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -40,7 +40,12 @@
 extern void flush_dcache_icache_page(struct page *page);
 #if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
 extern void __flush_dcache_icache_phys(unsigned long physaddr);
-#endif /* CONFIG_PPC32 && !CONFIG_BOOKE */
+#else
+static inline void __flush_dcache_icache_phys(unsigned long physaddr)
+{
+	BUG();
+}
+#endif
 
 extern void flush_dcache_range(unsigned long start, unsigned long stop);
 #ifdef CONFIG_PPC32
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 8251a3b..e8d9ef4 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -20,15 +20,6 @@
 extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 /*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-extern __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-					unsigned short len,
-					unsigned short proto,
-					__wsum sum);
-
-/*
  * computes the checksum of a memory block at buff, length len,
  * and adds in "sum" (32-bit)
  *
@@ -127,6 +118,34 @@
 #endif
 }
 
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
+					unsigned short len,
+					unsigned short proto,
+					__wsum sum)
+{
+	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+#define HAVE_ARCH_CSUM_ADD
+static inline __wsum csum_add(__wsum csum, __wsum addend)
+{
+#ifdef __powerpc64__
+	u64 res = (__force u64)csum;
+
+	res += (__force u64)addend;
+	return (__force __wsum)((u32)res + (res >> 32));
+#else
+	asm("addc %0,%0,%1;"
+	    "addze %0,%0;"
+	    : "+r" (csum) : "r" (addend));
+	return csum;
+#endif
+}
+
 #endif
 #endif /* __KERNEL__ */
 #endif
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index b142b8e..4f2df58 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -174,6 +174,13 @@
 			int _band;	/* POLL_IN, POLL_OUT, POLL_MSG */
 			int _fd;
 		} _sigpoll;
+
+		/* SIGSYS */
+		struct {
+			unsigned int _call_addr; /* calling insn */
+			int _syscall;		 /* triggering system call number */
+			unsigned int _arch;	 /* AUDIT_ARCH_* of syscall */
+		} _sigsys;
 	} _sifields;
 } compat_siginfo_t;
 
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index e9bdda8..406c2b1 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -10,6 +10,7 @@
 struct device_node;
 #ifdef CONFIG_PPC64
 struct pci_dn;
+struct iommu_table;
 #endif
 
 /*
@@ -23,13 +24,15 @@
 	struct dma_map_ops	*dma_ops;
 
 	/*
-	 * When an iommu is in use, dma_data is used as a ptr to the base of the
-	 * iommu_table.  Otherwise, it is a simple numerical offset.
+	 * These two used to be a union. However, with the hybrid ops we need
+	 * both so here we store both a DMA offset for direct mappings and
+	 * an iommu_table for remapped DMA.
 	 */
-	union {
-		dma_addr_t	dma_offset;
-		void		*iommu_table_base;
-	} dma_data;
+	dma_addr_t		dma_offset;
+
+#ifdef CONFIG_PPC64
+	struct iommu_table	*iommu_table_base;
+#endif
 
 #ifdef CONFIG_IOMMU_API
 	void			*iommu_domain;
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 9103687..710f60e 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -21,12 +21,12 @@
 #define DMA_ERROR_CODE		(~(dma_addr_t)0x0)
 
 /* Some dma direct funcs must be visible for use in other dma_ops */
-extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
-				       dma_addr_t *dma_handle, gfp_t flag,
+extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
+					 dma_addr_t *dma_handle, gfp_t flag,
+					 struct dma_attrs *attrs);
+extern void __dma_direct_free_coherent(struct device *dev, size_t size,
+				       void *vaddr, dma_addr_t dma_handle,
 				       struct dma_attrs *attrs);
-extern void dma_direct_free_coherent(struct device *dev, size_t size,
-				     void *vaddr, dma_addr_t dma_handle,
-				     struct dma_attrs *attrs);
 extern int dma_direct_mmap_coherent(struct device *dev,
 				    struct vm_area_struct *vma,
 				    void *cpu_addr, dma_addr_t handle,
@@ -106,7 +106,7 @@
 static inline dma_addr_t get_dma_offset(struct device *dev)
 {
 	if (dev)
-		return dev->archdata.dma_data.dma_offset;
+		return dev->archdata.dma_offset;
 
 	return PCI_DRAM_OFFSET;
 }
@@ -114,7 +114,7 @@
 static inline void set_dma_offset(struct device *dev, dma_addr_t off)
 {
 	if (dev)
-		dev->archdata.dma_data.dma_offset = off;
+		dev->archdata.dma_offset = off;
 }
 
 /* this will be removed soon */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index a8d2ef3..5879fde 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -721,6 +721,7 @@
 				  unsigned long flags);
 extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
 #define ioremap_nocache(addr, size)	ioremap((addr), (size))
+#define ioremap_uc(addr, size)		ioremap((addr), (size))
 
 extern void iounmap(volatile void __iomem *addr);
 
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index ca18cff..7b87bab 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -2,17 +2,17 @@
  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
  * Rewrite, cleanup:
  * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
- * 
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- * 
+ *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- * 
+ *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
@@ -131,16 +131,21 @@
 
 struct scatterlist;
 
-static inline void set_iommu_table_base(struct device *dev, void *base)
+#ifdef CONFIG_PPC64
+
+static inline void set_iommu_table_base(struct device *dev,
+					struct iommu_table *base)
 {
-	dev->archdata.dma_data.iommu_table_base = base;
+	dev->archdata.iommu_table_base = base;
 }
 
 static inline void *get_iommu_table_base(struct device *dev)
 {
-	return dev->archdata.dma_data.iommu_table_base;
+	return dev->archdata.iommu_table_base;
 }
 
+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
+
 /* Frees table for an individual device node */
 extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
 
@@ -225,6 +230,20 @@
 }
 #endif /* !CONFIG_IOMMU_API */
 
+#else
+
+static inline void *get_iommu_table_base(struct device *dev)
+{
+	return NULL;
+}
+
+static inline int dma_iommu_dma_supported(struct device *dev, u64 mask)
+{
+	return 0;
+}
+
+#endif /* CONFIG_PPC64 */
+
 extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 			    struct scatterlist *sglist, int nelems,
 			    unsigned long mask,
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index efbf9a3..47e155f 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -18,14 +18,29 @@
 #define JUMP_ENTRY_TYPE		stringify_in_c(FTR_ENTRY_LONG)
 #define JUMP_LABEL_NOP_SIZE	4
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:\n\t"
 		 "nop\n\t"
 		 ".pushsection __jump_table,  \"aw\"\n\t"
 		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
 		 ".popsection \n\t"
-		 : :  "i" (key) : : l_yes);
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\n\t"
+		 "b %l[l_yes]\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+		 ".popsection \n\t"
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
 	return false;
 l_yes:
 	return true;
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 952579f..cab6753 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -249,7 +249,7 @@
 #endif
 
 #ifdef CONFIG_ARCH_RANDOM
-	int (*get_random_long)(unsigned long *v);
+	int (*get_random_seed)(unsigned long *v);
 #endif
 };
 
diff --git a/arch/powerpc/include/asm/mpc52xx_psc.h b/arch/powerpc/include/asm/mpc52xx_psc.h
index d0ece25..04c7e8f 100644
--- a/arch/powerpc/include/asm/mpc52xx_psc.h
+++ b/arch/powerpc/include/asm/mpc52xx_psc.h
@@ -150,7 +150,10 @@
 
 /* Structure of the hardware registers */
 struct mpc52xx_psc {
-	u8		mode;		/* PSC + 0x00 */
+	union {
+		u8	mode;		/* PSC + 0x00 */
+		u8	mr2;
+	};
 	u8		reserved0[3];
 	union {				/* PSC + 0x04 */
 		u16	status;
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index e9e4c52..8374afe 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -154,7 +154,10 @@
 #define OPAL_FLASH_WRITE			111
 #define OPAL_FLASH_ERASE			112
 #define OPAL_PRD_MSG				113
-#define OPAL_LAST				113
+#define OPAL_LEDS_GET_INDICATOR			114
+#define OPAL_LEDS_SET_INDICATOR			115
+#define OPAL_CEC_REBOOT2			116
+#define OPAL_LAST				116
 
 /* Device tree flags */
 
@@ -340,6 +343,18 @@
 	OPAL_ASSERT_RESET   = 1
 };
 
+enum OpalSlotLedType {
+	OPAL_SLOT_LED_TYPE_ID = 0,	/* IDENTIFY LED */
+	OPAL_SLOT_LED_TYPE_FAULT = 1,	/* FAULT LED */
+	OPAL_SLOT_LED_TYPE_ATTN = 2,	/* System Attention LED */
+	OPAL_SLOT_LED_TYPE_MAX = 3
+};
+
+enum OpalSlotLedState {
+	OPAL_SLOT_LED_STATE_OFF = 0,	/* LED is OFF */
+	OPAL_SLOT_LED_STATE_ON = 1	/* LED is ON */
+};
+
 /*
  * Address cycle types for LPC accesses. These also correspond
  * to the content of the first cell of the "reg" property for
@@ -361,6 +376,7 @@
 	OPAL_MSG_HMI_EVT,
 	OPAL_MSG_DPO,
 	OPAL_MSG_PRD,
+	OPAL_MSG_OCC,
 	OPAL_MSG_TYPE_MAX,
 };
 
@@ -437,6 +453,7 @@
 /* HMI interrupt event */
 enum OpalHMI_Version {
 	OpalHMIEvt_V1 = 1,
+	OpalHMIEvt_V2 = 2,
 };
 
 enum OpalHMI_Severity {
@@ -467,6 +484,49 @@
 	OpalHMI_ERROR_CAPP_RECOVERY,
 };
 
+enum OpalHMI_XstopType {
+	CHECKSTOP_TYPE_UNKNOWN	=	0,
+	CHECKSTOP_TYPE_CORE	=	1,
+	CHECKSTOP_TYPE_NX	=	2,
+};
+
+enum OpalHMI_CoreXstopReason {
+	CORE_CHECKSTOP_IFU_REGFILE		= 0x00000001,
+	CORE_CHECKSTOP_IFU_LOGIC		= 0x00000002,
+	CORE_CHECKSTOP_PC_DURING_RECOV		= 0x00000004,
+	CORE_CHECKSTOP_ISU_REGFILE		= 0x00000008,
+	CORE_CHECKSTOP_ISU_LOGIC		= 0x00000010,
+	CORE_CHECKSTOP_FXU_LOGIC		= 0x00000020,
+	CORE_CHECKSTOP_VSU_LOGIC		= 0x00000040,
+	CORE_CHECKSTOP_PC_RECOV_IN_MAINT_MODE	= 0x00000080,
+	CORE_CHECKSTOP_LSU_REGFILE		= 0x00000100,
+	CORE_CHECKSTOP_PC_FWD_PROGRESS		= 0x00000200,
+	CORE_CHECKSTOP_LSU_LOGIC		= 0x00000400,
+	CORE_CHECKSTOP_PC_LOGIC			= 0x00000800,
+	CORE_CHECKSTOP_PC_HYP_RESOURCE		= 0x00001000,
+	CORE_CHECKSTOP_PC_HANG_RECOV_FAILED	= 0x00002000,
+	CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED	= 0x00004000,
+	CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ	= 0x00008000,
+	CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ	= 0x00010000,
+};
+
+enum OpalHMI_NestAccelXstopReason {
+	NX_CHECKSTOP_SHM_INVAL_STATE_ERR	= 0x00000001,
+	NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1	= 0x00000002,
+	NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2	= 0x00000004,
+	NX_CHECKSTOP_DMA_CH0_INVAL_STATE_ERR	= 0x00000008,
+	NX_CHECKSTOP_DMA_CH1_INVAL_STATE_ERR	= 0x00000010,
+	NX_CHECKSTOP_DMA_CH2_INVAL_STATE_ERR	= 0x00000020,
+	NX_CHECKSTOP_DMA_CH3_INVAL_STATE_ERR	= 0x00000040,
+	NX_CHECKSTOP_DMA_CH4_INVAL_STATE_ERR	= 0x00000080,
+	NX_CHECKSTOP_DMA_CH5_INVAL_STATE_ERR	= 0x00000100,
+	NX_CHECKSTOP_DMA_CH6_INVAL_STATE_ERR	= 0x00000200,
+	NX_CHECKSTOP_DMA_CH7_INVAL_STATE_ERR	= 0x00000400,
+	NX_CHECKSTOP_DMA_CRB_UE			= 0x00000800,
+	NX_CHECKSTOP_DMA_CRB_SUE		= 0x00001000,
+	NX_CHECKSTOP_PBI_ISN_UE			= 0x00002000,
+};
+
 struct OpalHMIEvent {
 	uint8_t		version;	/* 0x00 */
 	uint8_t		severity;	/* 0x01 */
@@ -477,6 +537,23 @@
 	__be64		hmer;
 	/* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */
 	__be64		tfmr;
+
+	/* version 2 and later */
+	union {
+		/*
+		 * checkstop info (Core/NX).
+		 * Valid for OpalHMI_ERROR_MALFUNC_ALERT.
+		 */
+		struct {
+			uint8_t	xstop_type;	/* enum OpalHMI_XstopType */
+			uint8_t reserved_1[3];
+			__be32  xstop_reason;
+			union {
+				__be32 pir;	/* for CHECKSTOP_TYPE_CORE */
+				__be32 chip_id;	/* for CHECKSTOP_TYPE_NX */
+			} u;
+		} xstop_error;
+	} u;
 };
 
 enum {
@@ -700,6 +777,17 @@
 
 struct opal_prd_msg;
 
+#define OCC_RESET                       0
+#define OCC_LOAD                        1
+#define OCC_THROTTLE                    2
+#define OCC_MAX_THROTTLE_STATUS         5
+
+struct opal_occ_msg {
+	__be64 type;
+	__be64 chip;
+	__be64 throttle_status;
+};
+
 /*
  * SG entries
  *
@@ -756,6 +844,52 @@
 	__be64 buffer_ra;		/* Buffer real address */
 };
 
+/*
+ * EPOW status sharing (OPAL and the host)
+ *
+ * The host will pass on OPAL, a buffer of length OPAL_SYSEPOW_MAX
+ * with individual elements being 16 bits wide to fetch the system
+ * wide EPOW status. Each element in the buffer will contain the
+ * EPOW status in it's bit representation for a particular EPOW sub
+ * class as defiend here. So multiple detailed EPOW status bits
+ * specific for any sub class can be represented in a single buffer
+ * element as it's bit representation.
+ */
+
+/* System EPOW type */
+enum OpalSysEpow {
+	OPAL_SYSEPOW_POWER	= 0,	/* Power EPOW */
+	OPAL_SYSEPOW_TEMP	= 1,	/* Temperature EPOW */
+	OPAL_SYSEPOW_COOLING	= 2,	/* Cooling EPOW */
+	OPAL_SYSEPOW_MAX	= 3,	/* Max EPOW categories */
+};
+
+/* Power EPOW */
+enum OpalSysPower {
+	OPAL_SYSPOWER_UPS	= 0x0001, /* System on UPS power */
+	OPAL_SYSPOWER_CHNG	= 0x0002, /* System power config change */
+	OPAL_SYSPOWER_FAIL	= 0x0004, /* System impending power failure */
+	OPAL_SYSPOWER_INCL	= 0x0008, /* System incomplete power */
+};
+
+/* Temperature EPOW */
+enum OpalSysTemp {
+	OPAL_SYSTEMP_AMB	= 0x0001, /* System over ambient temperature */
+	OPAL_SYSTEMP_INT	= 0x0002, /* System over internal temperature */
+	OPAL_SYSTEMP_HMD	= 0x0004, /* System over ambient humidity */
+};
+
+/* Cooling EPOW */
+enum OpalSysCooling {
+	OPAL_SYSCOOL_INSF	= 0x0001, /* System insufficient cooling */
+};
+
+/* Argument to OPAL_CEC_REBOOT2() */
+enum {
+	OPAL_REBOOT_NORMAL		= 0,
+	OPAL_REBOOT_PLATFORM_ERROR	= 1,
+};
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __OPAL_API_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 958e941..8001159 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -44,6 +44,7 @@
 		       uint32_t hour_min);
 int64_t opal_cec_power_down(uint64_t request);
 int64_t opal_cec_reboot(void);
+int64_t opal_cec_reboot2(uint32_t reboot_type, char *diag);
 int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
 int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
 int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask);
@@ -141,7 +142,8 @@
 int64_t opal_pci_reinit(uint64_t phb_id, uint64_t reinit_scope, uint64_t data);
 int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
 int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
-int64_t opal_get_epow_status(__be64 *status);
+int64_t opal_get_epow_status(__be16 *epow_status, __be16 *num_epow_classes);
+int64_t opal_get_dpo_status(__be64 *dpo_timeout);
 int64_t opal_set_system_attention_led(uint8_t led_action);
 int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,
 			    __be16 *pci_error_type, __be16 *severity);
@@ -195,6 +197,10 @@
 int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,
 			 struct opal_i2c_request *oreq);
 int64_t opal_prd_msg(struct opal_prd_msg *msg);
+int64_t opal_leds_get_ind(char *loc_code, __be64 *led_mask,
+			  __be64 *led_value, __be64 *max_led_type);
+int64_t opal_leds_set_ind(uint64_t token, char *loc_code, const u64 led_mask,
+			  const u64 led_value, __be64 *max_led_type);
 
 int64_t opal_flash_read(uint64_t id, uint64_t offset, uint64_t buf,
 		uint64_t size, uint64_t token);
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 712add5..37fc535 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -42,6 +42,7 @@
 #endif
 
 	int             (*dma_set_mask)(struct pci_dev *dev, u64 dma_mask);
+	u64		(*dma_get_required_mask)(struct pci_dev *dev);
 
 	void		(*shutdown)(struct pci_controller *);
 };
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 3bb7488..fa1dfb7 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -134,11 +134,11 @@
 
 #define pte_iterate_hashed_end() } while(0)
 
-#ifdef CONFIG_PPC_HAS_HASH_64K
-#define pte_pagesize_index(mm, addr, pte)	get_slice_psize(mm, addr)
-#else
+/*
+ * We expect this to be called only for user addresses or kernel virtual
+ * addresses other than the linear mapping.
+ */
 #define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K
-#endif
 
 #endif /* __real_pte */
 
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 11a3863..0717693 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -169,6 +169,17 @@
 	 * cases, and 32-bit non-hash with 32-bit PTEs.
 	 */
 	*ptep = pte;
+
+#ifdef CONFIG_PPC_BOOK3E_64
+	/*
+	 * With hardware tablewalk, a sync is needed to ensure that
+	 * subsequent accesses see the PTE we just wrote.  Unlike userspace
+	 * mappings, we can't tolerate spurious faults, so make sure
+	 * the new PTE will be seen the first time.
+	 */
+	if (is_kernel_addr(addr))
+		mb();
+#endif
 #endif
 }
 
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 4122a86..ca0c5bf 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -61,6 +61,7 @@
 int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
 void eeh_pe_state_mark(struct eeh_pe *pe, int state);
 void eeh_pe_state_clear(struct eeh_pe *pe, int state);
+void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state);
 void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode);
 
 void eeh_sysfs_add_device(struct pci_dev *pdev);
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 28ded5d..5afea36 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -264,7 +264,6 @@
 	u64		tm_tfhar;	/* Transaction fail handler addr */
 	u64		tm_texasr;	/* Transaction exception & summary */
 	u64		tm_tfiar;	/* Transaction fail instr address reg */
-	unsigned long	tm_orig_msr;	/* Thread's MSR on ctx switch */
 	struct pt_regs	ckpt_regs;	/* Checkpointed registers */
 
 	unsigned long	tm_tar;
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index b7c8d07..71537a3 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -109,7 +109,8 @@
  * the processor might need it for DMA coherency.
  */
 #define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) || \
+	defined(CONFIG_PPC_E500MC)
 #define _PAGE_BASE	(_PAGE_BASE_NC | _PAGE_COHERENT)
 #else
 #define _PAGE_BASE	(_PAGE_BASE_NC)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index af56b5c..aa1cc5f0 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1193,8 +1193,7 @@
 #ifdef CONFIG_PPC_BOOK3S_64
 #define __mtmsrd(v, l)	asm volatile("mtmsrd %0," __stringify(l) \
 				     : : "r" (v) : "memory")
-#define mtmsrd(v)	__mtmsrd((v), 0)
-#define mtmsr(v)	mtmsrd(v)
+#define mtmsr(v)	__mtmsrd((v), 0)
 #else
 #define mtmsr(v)	asm volatile("mtmsr %0" : \
 				     : "r" ((unsigned long)(v)) \
@@ -1281,6 +1280,15 @@
 
 extern void ppc_save_regs(struct pt_regs *regs);
 
+static inline void update_power8_hid0(unsigned long hid0)
+{
+	/*
+	 *  The HID0 update on Power8 should at the very least be
+	 *  preceded by a a SYNC instruction followed by an ISYNC
+	 *  instruction
+	 */
+	asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
+}
 #endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 7a4ede1..b77ef36 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -343,6 +343,7 @@
 extern void rtas_halt(void);
 extern void rtas_os_term(char *str);
 extern int rtas_get_sensor(int sensor, int index, int *state);
+extern int rtas_get_sensor_fast(int sensor, int index, int *state);
 extern int rtas_get_power_level(int powerdomain, int *level);
 extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
 extern bool rtas_indicator_present(int token, int *maxindex);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 4dbe072..523673d 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -28,8 +28,6 @@
 #include <asm/synch.h>
 #include <asm/ppc-opcode.h>
 
-#define smp_mb__after_unlock_lock()	smp_mb()  /* Full ordering for lock. */
-
 #ifdef CONFIG_PPC64
 /* use 0x800000yy when locked, where yy == CPU number */
 #ifdef __BIG_ENDIAN__
diff --git a/arch/powerpc/include/asm/spu_csa.h b/arch/powerpc/include/asm/spu_csa.h
index a40fd49..51f80b4 100644
--- a/arch/powerpc/include/asm/spu_csa.h
+++ b/arch/powerpc/include/asm/spu_csa.h
@@ -241,12 +241,6 @@
  */
 struct spu_state {
 	struct spu_lscsa *lscsa;
-#ifdef CONFIG_SPU_FS_64K_LS
-	int		use_big_pages;
-	/* One struct page per 64k page */
-#define SPU_LSCSA_NUM_BIG_PAGES	(sizeof(struct spu_lscsa) / 0x10000)
-	struct page	*lscsa_pages[SPU_LSCSA_NUM_BIG_PAGES];
-#endif
 	struct spu_problem_collapsed prob;
 	struct spu_priv1_collapsed priv1;
 	struct spu_priv2_collapsed priv2;
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 58abeda..15cca17 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -29,6 +29,7 @@
 
 extern void enable_kernel_fp(void);
 extern void enable_kernel_altivec(void);
+extern void enable_kernel_vsx(void);
 extern int emulate_altivec(struct pt_regs *);
 extern void __giveup_vsx(struct task_struct *);
 extern void giveup_vsx(struct task_struct *);
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index ff21b7a..ab9f3f0 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -22,10 +22,15 @@
 extern const unsigned long sys_call_table[];
 #endif /* CONFIG_FTRACE_SYSCALLS */
 
-static inline long syscall_get_nr(struct task_struct *task,
-				  struct pt_regs *regs)
+static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
 {
-	return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1L;
+	/*
+	 * Note that we are returning an int here. That means 0xffffffff, ie.
+	 * 32-bit negative 1, will be interpreted as -1 on a 64-bit kernel.
+	 * This is important for seccomp so that compat tasks can set r0 = -1
+	 * to reject the syscall.
+	 */
+	return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1;
 }
 
 static inline void syscall_rollback(struct task_struct *task,
@@ -34,12 +39,6 @@
 	regs->gpr[3] = regs->orig_gpr3;
 }
 
-static inline long syscall_get_error(struct task_struct *task,
-				     struct pt_regs *regs)
-{
-	return (regs->ccr & 0x10000000) ? -regs->gpr[3] : 0;
-}
-
 static inline long syscall_get_return_value(struct task_struct *task,
 					    struct pt_regs *regs)
 {
@@ -50,9 +49,15 @@
 					    struct pt_regs *regs,
 					    int error, long val)
 {
+	/*
+	 * In the general case it's not obvious that we must deal with CCR
+	 * here, as the syscall exit path will also do that for us. However
+	 * there are some places, eg. the signal code, which check ccr to
+	 * decide if the value in r3 is actually an error.
+	 */
 	if (error) {
 		regs->ccr |= 0x10000000L;
-		regs->gpr[3] = -error;
+		regs->gpr[3] = error;
 	} else {
 		regs->ccr &= ~0x10000000L;
 		regs->gpr[3] = val;
@@ -64,19 +69,22 @@
 					 unsigned int i, unsigned int n,
 					 unsigned long *args)
 {
+	unsigned long val, mask = -1UL;
+
 	BUG_ON(i + n > 6);
-#ifdef CONFIG_PPC64
-	if (test_tsk_thread_flag(task, TIF_32BIT)) {
-		/*
-		 * Zero-extend 32-bit argument values.  The high bits are
-		 * garbage ignored by the actual syscall dispatch.
-		 */
-		while (n-- > 0)
-			args[n] = (u32) regs->gpr[3 + i + n];
-		return;
-	}
+
+#ifdef CONFIG_COMPAT
+	if (test_tsk_thread_flag(task, TIF_32BIT))
+		mask = 0xffffffff;
 #endif
-	memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
+	while (n--) {
+		if (n == 0 && i == 0)
+			val = regs->orig_gpr3;
+		else
+			val = regs->gpr[3 + i + n];
+
+		args[n] = val & mask;
+	}
 }
 
 static inline void syscall_set_arguments(struct task_struct *task,
@@ -86,6 +94,10 @@
 {
 	BUG_ON(i + n > 6);
 	memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+
+	/* Also copy the first argument into orig_gpr3 */
+	if (i == 0 && n > 0)
+		regs->orig_gpr3 = args[0];
 }
 
 static inline int syscall_get_arch(void)
diff --git a/arch/powerpc/include/asm/trace_clock.h b/arch/powerpc/include/asm/trace_clock.h
new file mode 100644
index 0000000..cf1ee75
--- /dev/null
+++ b/arch/powerpc/include/asm/trace_clock.h
@@ -0,0 +1,19 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
+ */
+
+#ifndef _ASM_PPC_TRACE_CLOCK_H
+#define _ASM_PPC_TRACE_CLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+extern u64 notrace trace_clock_ppc_tb(void);
+
+#define ARCH_TRACE_CLOCKS { trace_clock_ppc_tb, "ppc-tb", 0 },
+
+#endif  /* _ASM_PPC_TRACE_CLOCK_H */
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index f44a0278..dab3717 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -6,6 +6,7 @@
 header-y += bootx.h
 header-y += byteorder.h
 header-y += cputable.h
+header-y += eeh.h
 header-y += elf.h
 header-y += epapr_hcalls.h
 header-y += errno.h
diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
index 8c145fd..e8b6b5f 100644
--- a/arch/powerpc/include/uapi/asm/errno.h
+++ b/arch/powerpc/include/uapi/asm/errno.h
@@ -6,6 +6,4 @@
 #undef	EDEADLOCK
 #define	EDEADLOCK	58	/* File locking deadlock error */
 
-#define _LAST_ERRNO	516
-
 #endif	/* _ASM_POWERPC_ERRNO_H */
diff --git a/arch/powerpc/include/uapi/asm/sigcontext.h b/arch/powerpc/include/uapi/asm/sigcontext.h
index 9c1f24f..3ad0c7f 100644
--- a/arch/powerpc/include/uapi/asm/sigcontext.h
+++ b/arch/powerpc/include/uapi/asm/sigcontext.h
@@ -28,7 +28,7 @@
 /*
  * To maintain compatibility with current implementations the sigcontext is
  * extended by appending a pointer (v_regs) to a quadword type (elf_vrreg_t)
- * followed by an unstructured (vmx_reserve) field of 69 doublewords.  This
+ * followed by an unstructured (vmx_reserve) field of 101 doublewords. This
  * allows the array of vector registers to be quadword aligned independent of
  * the alignment of the containing sigcontext or ucontext. It is the
  * responsibility of the code setting the sigcontext to set this pointer to
@@ -80,7 +80,7 @@
  * registers and vscr/vrsave.
  */
 	elf_vrreg_t	__user *v_regs;
-	long		vmx_reserve[ELF_NVRREG+ELF_NVRREG+32+1];
+	long		vmx_reserve[ELF_NVRREG + ELF_NVRREG + 1 + 32];
 #endif
 };
 
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 12868b1..ba33693 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -118,6 +118,7 @@
 obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)	+= ftrace.o
 obj-$(CONFIG_FTRACE_SYSCALLS)	+= ftrace.o
+obj-$(CONFIG_TRACING)		+= trace_clock.o
 
 ifneq ($(CONFIG_PPC_INDIRECT_PIO),y)
 obj-y				+= iomap.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9823057..810f433 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -213,7 +213,6 @@
 		offsetof(struct tlb_core_data, esel_max));
 	DEFINE(TCD_ESEL_FIRST,
 		offsetof(struct tlb_core_data, esel_first));
-	DEFINE(TCD_LOCK, offsetof(struct tlb_core_data, lock));
 #endif /* CONFIG_PPC_BOOK3E */
 
 #ifdef CONFIG_PPC_STD_MMU_64
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 4c68bfe..41a7d9d 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -73,7 +73,7 @@
 }
 
 /* We support DMA to/from any memory page via the iommu */
-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
+int dma_iommu_dma_supported(struct device *dev, u64 mask)
 {
 	struct iommu_table *tbl = get_iommu_table_base(dev);
 
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 6e8d764..c6689f6 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -47,8 +47,8 @@
  * for everything else.
  */
 struct dma_map_ops swiotlb_dma_ops = {
-	.alloc = dma_direct_alloc_coherent,
-	.free = dma_direct_free_coherent,
+	.alloc = __dma_direct_alloc_coherent,
+	.free = __dma_direct_free_coherent,
 	.mmap = dma_direct_mmap_coherent,
 	.map_sg = swiotlb_map_sg_attrs,
 	.unmap_sg = swiotlb_unmap_sg_attrs,
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 35e4dcc..59503ed 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -16,6 +16,7 @@
 #include <asm/bug.h>
 #include <asm/machdep.h>
 #include <asm/swiotlb.h>
+#include <asm/iommu.h>
 
 /*
  * Generic direct DMA implementation
@@ -39,9 +40,31 @@
 	return pfn;
 }
 
-void *dma_direct_alloc_coherent(struct device *dev, size_t size,
-				dma_addr_t *dma_handle, gfp_t flag,
-				struct dma_attrs *attrs)
+static int dma_direct_dma_supported(struct device *dev, u64 mask)
+{
+#ifdef CONFIG_PPC64
+	u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
+
+	/* Limit fits in the mask, we are good */
+	if (mask >= limit)
+		return 1;
+
+#ifdef CONFIG_FSL_SOC
+	/* Freescale gets another chance via ZONE_DMA/ZONE_DMA32, however
+	 * that will have to be refined if/when they support iommus
+	 */
+	return 1;
+#endif
+	/* Sorry ... */
+	return 0;
+#else
+	return 1;
+#endif
+}
+
+void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
+				  dma_addr_t *dma_handle, gfp_t flag,
+				  struct dma_attrs *attrs)
 {
 	void *ret;
 #ifdef CONFIG_NOT_COHERENT_CACHE
@@ -96,9 +119,9 @@
 #endif
 }
 
-void dma_direct_free_coherent(struct device *dev, size_t size,
-			      void *vaddr, dma_addr_t dma_handle,
-			      struct dma_attrs *attrs)
+void __dma_direct_free_coherent(struct device *dev, size_t size,
+				void *vaddr, dma_addr_t dma_handle,
+				struct dma_attrs *attrs)
 {
 #ifdef CONFIG_NOT_COHERENT_CACHE
 	__dma_free_coherent(size, vaddr);
@@ -107,6 +130,51 @@
 #endif
 }
 
+static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+				       dma_addr_t *dma_handle, gfp_t flag,
+				       struct dma_attrs *attrs)
+{
+	struct iommu_table *iommu;
+
+	/* The coherent mask may be smaller than the real mask, check if
+	 * we can really use the direct ops
+	 */
+	if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
+		return __dma_direct_alloc_coherent(dev, size, dma_handle,
+						   flag, attrs);
+
+	/* Ok we can't ... do we have an iommu ? If not, fail */
+	iommu = get_iommu_table_base(dev);
+	if (!iommu)
+		return NULL;
+
+	/* Try to use the iommu */
+	return iommu_alloc_coherent(dev, iommu, size, dma_handle,
+				    dev->coherent_dma_mask, flag,
+				    dev_to_node(dev));
+}
+
+static void dma_direct_free_coherent(struct device *dev, size_t size,
+				     void *vaddr, dma_addr_t dma_handle,
+				     struct dma_attrs *attrs)
+{
+	struct iommu_table *iommu;
+
+	/* See comments in dma_direct_alloc_coherent() */
+	if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
+		return __dma_direct_free_coherent(dev, size, vaddr, dma_handle,
+						  attrs);
+	/* Maybe we used an iommu ... */
+	iommu = get_iommu_table_base(dev);
+
+	/* If we hit that we should have never allocated in the first
+	 * place so how come we are freeing ?
+	 */
+	if (WARN_ON(!iommu))
+		return;
+	iommu_free_coherent(iommu, size, vaddr, dma_handle);
+}
+
 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
 			     void *cpu_addr, dma_addr_t handle, size_t size,
 			     struct dma_attrs *attrs)
@@ -147,18 +215,6 @@
 {
 }
 
-static int dma_direct_dma_supported(struct device *dev, u64 mask)
-{
-#ifdef CONFIG_PPC64
-	/* Could be improved so platforms can set the limit in case
-	 * they have limited DMA windows
-	 */
-	return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
-#else
-	return 1;
-#endif
-}
-
 static u64 dma_direct_get_required_mask(struct device *dev)
 {
 	u64 end, mask;
@@ -230,6 +286,25 @@
 };
 EXPORT_SYMBOL(dma_direct_ops);
 
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+	if (!dma_supported(dev, mask)) {
+		/*
+		 * We need to special case the direct DMA ops which can
+		 * support a fallback for coherent allocations. There
+		 * is no dma_op->set_coherent_mask() so we have to do
+		 * things the hard way:
+		 */
+		if (get_dma_ops(dev) != &dma_direct_ops ||
+		    get_iommu_table_base(dev) == NULL ||
+		    !dma_iommu_dma_supported(dev, mask))
+			return -EIO;
+	}
+	dev->coherent_dma_mask = mask;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dma_set_coherent_mask);
+
 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
 
 int __dma_set_mask(struct device *dev, u64 dma_mask)
@@ -278,6 +353,13 @@
 	if (ppc_md.dma_get_required_mask)
 		return ppc_md.dma_get_required_mask(dev);
 
+	if (dev_is_pci(dev)) {
+		struct pci_dev *pdev = to_pci_dev(dev);
+		struct pci_controller *phb = pci_bus_to_host(pdev->bus);
+		if (phb->controller_ops.dma_get_required_mask)
+			return phb->controller_ops.dma_get_required_mask(pdev);
+	}
+
 	return __dma_get_required_mask(dev);
 }
 EXPORT_SYMBOL_GPL(dma_get_required_mask);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index af9b597..e968533 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -308,11 +308,26 @@
 	if (!(pe->type & EEH_PE_PHB)) {
 		if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
 			eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
-		eeh_ops->configure_bridge(pe);
-		eeh_pe_restore_bars(pe);
 
-		pci_regs_buf[0] = 0;
-		eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
+		/*
+		 * The config space of some PCI devices can't be accessed
+		 * when their PEs are in frozen state. Otherwise, fenced
+		 * PHB might be seen. Those PEs are identified with flag
+		 * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED
+		 * is set automatically when the PE is put to EEH_PE_ISOLATED.
+		 *
+		 * Restoring BARs possibly triggers PCI config access in
+		 * (OPAL) firmware and then causes fenced PHB. If the
+		 * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's
+		 * pointless to restore BARs and dump config space.
+		 */
+		eeh_ops->configure_bridge(pe);
+		if (!(pe->state & EEH_PE_CFG_BLOCKED)) {
+			eeh_pe_restore_bars(pe);
+
+			pci_regs_buf[0] = 0;
+			eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
+		}
 	}
 
 	eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
@@ -750,14 +765,14 @@
 		eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
 		break;
 	case pcie_hot_reset:
-		eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+		eeh_pe_state_mark_with_cfg(pe, EEH_PE_ISOLATED);
 		eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
 		eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
 		eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
 		eeh_ops->reset(pe, EEH_RESET_HOT);
 		break;
 	case pcie_warm_reset:
-		eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+		eeh_pe_state_mark_with_cfg(pe, EEH_PE_ISOLATED);
 		eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
 		eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
 		eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
@@ -1116,9 +1131,6 @@
 		return;
 	}
 
-	if (eeh_has_flag(EEH_PROBE_MODE_DEV))
-		eeh_ops->probe(pdn, NULL);
-
 	/*
 	 * The EEH cache might not be removed correctly because of
 	 * unbalanced kref to the device during unplug time, which
@@ -1142,6 +1154,9 @@
 		dev->dev.archdata.edev = NULL;
 	}
 
+	if (eeh_has_flag(EEH_PROBE_MODE_DEV))
+		eeh_ops->probe(pdn, NULL);
+
 	edev->pdev = dev;
 	dev->dev.archdata.edev = edev;
 
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 35f0b62..8654cb1 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -657,6 +657,28 @@
 	eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
 }
 
+/**
+ * eeh_pe_state_mark_with_cfg - Mark PE state with unblocked config space
+ * @pe: PE
+ * @state: PE state to be set
+ *
+ * Set specified flag to PE and its child PEs. The PCI config space
+ * of some PEs is blocked automatically when EEH_PE_ISOLATED is set,
+ * which isn't needed in some situations. The function allows to set
+ * the specified flag to indicated PEs without blocking their PCI
+ * config space.
+ */
+void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state)
+{
+	eeh_pe_traverse(pe, __eeh_pe_state_mark, &state);
+	if (!(state & EEH_PE_ISOLATED))
+		return;
+
+	/* Clear EEH_PE_CFG_BLOCKED, which might be set just now */
+	state = EEH_PE_CFG_BLOCKED;
+	eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
+}
+
 /*
  * Some PCI bridges (e.g. PLX bridges) have primary/secondary
  * buses assigned explicitly by firmware, and we probably have
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 46fc0f4..2405631 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -20,6 +20,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/err.h>
 #include <linux/sys.h>
 #include <linux/threads.h>
 #include <asm/reg.h>
@@ -354,7 +355,7 @@
 	SYNC
 	MTMSRD(r10)
 	lwz	r9,TI_FLAGS(r12)
-	li	r8,-_LAST_ERRNO
+	li	r8,-MAX_ERRNO
 	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 	bne-	syscall_exit_work
 	cmplw	0,r3,r8
@@ -457,6 +458,10 @@
 	lwz	r7,GPR7(r1)
 	lwz	r8,GPR8(r1)
 	REST_NVGPRS(r1)
+
+	cmplwi	r0,NR_syscalls
+	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
+	bge-	ret_from_syscall
 	b	syscall_dotrace_cont
 
 syscall_exit_work:
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 579e0f9..a94f155 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -19,6 +19,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/err.h>
 #include <asm/unistd.h>
 #include <asm/processor.h>
 #include <asm/page.h>
@@ -150,8 +151,7 @@
 	CURRENT_THREAD_INFO(r11, r1)
 	ld	r10,TI_FLAGS(r11)
 	andi.	r11,r10,_TIF_SYSCALL_DOTRACE
-	bne	syscall_dotrace
-.Lsyscall_dotrace_cont:
+	bne	syscall_dotrace		/* does not return */
 	cmpldi	0,r0,NR_syscalls
 	bge-	syscall_enosys
 
@@ -207,7 +207,7 @@
 #endif /* CONFIG_PPC_BOOK3E */
 
 	ld	r9,TI_FLAGS(r12)
-	li	r11,-_LAST_ERRNO
+	li	r11,-MAX_ERRNO
 	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 	bne-	syscall_exit_work
 	cmpld	r3,r11
@@ -245,22 +245,34 @@
 	bl	save_nvgprs
 	addi	r3,r1,STACK_FRAME_OVERHEAD
 	bl	do_syscall_trace_enter
+
 	/*
-	 * Restore argument registers possibly just changed.
-	 * We use the return value of do_syscall_trace_enter
-	 * for the call number to look up in the table (r0).
+	 * We use the return value of do_syscall_trace_enter() as the syscall
+	 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
+	 * returns an invalid syscall number and the test below against
+	 * NR_syscalls will fail.
 	 */
 	mr	r0,r3
+
+	/* Restore argument registers just clobbered and/or possibly changed. */
 	ld	r3,GPR3(r1)
 	ld	r4,GPR4(r1)
 	ld	r5,GPR5(r1)
 	ld	r6,GPR6(r1)
 	ld	r7,GPR7(r1)
 	ld	r8,GPR8(r1)
+
+	/* Repopulate r9 and r10 for the system_call path */
 	addi	r9,r1,STACK_FRAME_OVERHEAD
 	CURRENT_THREAD_INFO(r10, r1)
 	ld	r10,TI_FLAGS(r10)
-	b	.Lsyscall_dotrace_cont
+
+	cmpldi	r0,NR_syscalls
+	blt+	system_call
+
+	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
+	b	.Lsyscall_exit
+
 
 syscall_enosys:
 	li	r3,-ENOSYS
@@ -277,7 +289,7 @@
 	beq+	0f
 	REST_NVGPRS(r1)
 	b	2f
-0:	cmpld	r3,r11		/* r10 is -LAST_ERRNO */
+0:	cmpld	r3,r11		/* r11 is -MAX_ERRNO */
 	blt+	1f
 	andi.	r0,r9,_TIF_NOERROR
 	bne-	1f
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 3e68d1c..f3bd5e7 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1313,11 +1313,14 @@
 	sync
 	isync
 
-/* The mapping only needs to be cache-coherent on SMP */
-#ifdef CONFIG_SMP
-#define M_IF_SMP	MAS2_M
+/*
+ * The mapping only needs to be cache-coherent on SMP, except on
+ * Freescale e500mc derivatives where it's also needed for coherent DMA.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+#define M_IF_NEEDED	MAS2_M
 #else
-#define M_IF_SMP	0
+#define M_IF_NEEDED	0
 #endif
 
 /* 6. Setup KERNELBASE mapping in TLB[0]
@@ -1332,7 +1335,7 @@
 	ori	r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
 	mtspr	SPRN_MAS1,r6
 
-	LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_SMP)
+	LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_NEEDED)
 	mtspr	SPRN_MAS2,r6
 
 	rlwinm	r5,r5,0,0,25
diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
index f22e7e4..83dd0f6 100644
--- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S
+++ b/arch/powerpc/kernel/fsl_booke_entry_mapping.S
@@ -152,11 +152,14 @@
 	tlbivax 0,r9
 	TLBSYNC
 
-/* The mapping only needs to be cache-coherent on SMP */
-#ifdef CONFIG_SMP
-#define M_IF_SMP	MAS2_M
+/*
+ * The mapping only needs to be cache-coherent on SMP, except on
+ * Freescale e500mc derivatives where it's also needed for coherent DMA.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+#define M_IF_NEEDED	MAS2_M
 #else
-#define M_IF_SMP	0
+#define M_IF_NEEDED	0
 #endif
 
 #if defined(ENTRY_MAPPING_BOOT_SETUP)
@@ -167,8 +170,8 @@
 	lis	r6,(MAS1_VALID|MAS1_IPROT)@h
 	ori	r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
 	mtspr	SPRN_MAS1,r6
-	lis	r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@h
-	ori	r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@l
+	lis	r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_NEEDED)@h
+	ori	r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_NEEDED)@l
 	mtspr	SPRN_MAS2,r6
 	mtspr	SPRN_MAS3,r8
 	tlbwe
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index a1ed8a8..6472472 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -17,7 +17,7 @@
 {
 	u32 *addr = (u32 *)(unsigned long)entry->code;
 
-	if (type == JUMP_LABEL_ENABLE)
+	if (type == JUMP_LABEL_JMP)
 		patch_branch(addr, entry->target, 0);
 	else
 		patch_instruction(addr, PPC_INST_NOP);
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 33aa4dd..9ad37f8 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -649,7 +649,6 @@
 			kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
 		}
 		break;
-		break;
 #endif
 	}
 
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 7c6bb4b..ed3ab50 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -596,25 +596,6 @@
 	b	2b
 
 /*
- * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
- * void atomic_set_mask(atomic_t mask, atomic_t *addr);
- */
-_GLOBAL(atomic_clear_mask)
-10:	lwarx	r5,0,r4
-	andc	r5,r5,r3
-	PPC405_ERR77(0,r4)
-	stwcx.	r5,0,r4
-	bne-	10b
-	blr
-_GLOBAL(atomic_set_mask)
-10:	lwarx	r5,0,r4
-	or	r5,r5,r3
-	PPC405_ERR77(0,r4)
-	stwcx.	r5,0,r4
-	bne-	10b
-	blr
-
-/*
  * Extended precision shifts.
  *
  * Updated to be valid for shift counts from 0 to 63 inclusive.
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 4e314b9..6e4168c 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -475,9 +475,18 @@
 #ifdef CONFIG_KEXEC		/* use no memory without kexec */
 	lwz	r4,0(r5)
 	cmpwi	0,r4,0
-	bnea	0x60
+	beq	99b
+#ifdef CONFIG_PPC_BOOK3S_64
+	li	r10,0x60
+	mfmsr	r11
+	clrrdi	r11,r11,1	/* Clear MSR_LE */
+	mtsrr0	r10
+	mtsrr1	r11
+	rfid
+#else
+	ba	0x60
 #endif
-	b	99b
+#endif
 
 /* this can be in text because we won't change it until we are
  * running in real anyways
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 1e703f8..98ba106 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -541,10 +541,9 @@
 			time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
 			time->tv_nsec = 0;
 		}
-		*buf = kmalloc(length, GFP_KERNEL);
+		*buf = kmemdup(buff + hdr_size, length, GFP_KERNEL);
 		if (*buf == NULL)
 			return -ENOMEM;
-		memcpy(*buf, buff + hdr_size, length);
 		kfree(buff);
 
 		if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
@@ -582,9 +581,10 @@
 	spin_lock_init(&nvram_pstore_info.buf_lock);
 
 	rc = pstore_register(&nvram_pstore_info);
-	if (rc != 0)
-		pr_err("nvram: pstore_register() failed, defaults to "
-				"kmsg_dump; returned %d\n", rc);
+	if (rc && (rc != -EPERM))
+		/* Print error only when pstore.backend == nvram */
+		pr_err("nvram: pstore_register() failed, returned %d. "
+				"Defaults to kmsg_dump\n", rc);
 
 	return rc;
 }
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index b9de34d..a1d0632 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -823,23 +823,15 @@
 		    (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
 			/* Only print message if not re-assigning */
 			if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
-				pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
-					 "is unassigned\n",
-					 pci_name(dev), i,
-					 (unsigned long long)res->start,
-					 (unsigned long long)res->end,
-					 (unsigned int)res->flags);
+				pr_debug("PCI:%s Resource %d %pR is unassigned\n",
+					 pci_name(dev), i, res);
 			res->end -= res->start;
 			res->start = 0;
 			res->flags |= IORESOURCE_UNSET;
 			continue;
 		}
 
-		pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
-			 pci_name(dev), i,
-			 (unsigned long long)res->start,\
-			 (unsigned long long)res->end,
-			 (unsigned int)res->flags);
+		pr_debug("PCI:%s Resource %d %pR\n", pci_name(dev), i, res);
 	}
 
 	/* Call machine specific resource fixup */
@@ -943,11 +935,7 @@
 			continue;
 		}
 
-		pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x]\n",
-			 pci_name(dev), i,
-			 (unsigned long long)res->start,\
-			 (unsigned long long)res->end,
-			 (unsigned int)res->flags);
+		pr_debug("PCI:%s Bus rsrc %d %pR\n", pci_name(dev), i, res);
 
 		/* Try to detect uninitialized P2P bridge resources,
 		 * and clear them out so they get re-assigned later
@@ -1044,13 +1032,7 @@
 
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
-	/* When called from the generic PCI probe, read PCI<->PCI bridge
-	 * bases. This is -not- called when generating the PCI tree from
-	 * the OF device-tree.
-	 */
-	pci_read_bridge_bases(bus);
-
-	/* Now fixup the bus bus */
+	/* Fixup the bus */
 	pcibios_setup_bus_self(bus);
 
 	/* Now fixup devices on that bus */
@@ -1132,10 +1114,8 @@
 	*pp = NULL;
 	for (p = res->child; p != NULL; p = p->sibling) {
 		p->parent = res;
-		pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
-			 p->name,
-			 (unsigned long long)p->start,
-			 (unsigned long long)p->end, res->name);
+		pr_debug("PCI: Reparented %s %pR under %s\n",
+			 p->name, p, res->name);
 	}
 	return 0;
 }
@@ -1204,14 +1184,9 @@
 			}
 		}
 
-		pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
-			 "[0x%x], parent %p (%s)\n",
-			 bus->self ? pci_name(bus->self) : "PHB",
-			 bus->number, i,
-			 (unsigned long long)res->start,
-			 (unsigned long long)res->end,
-			 (unsigned int)res->flags,
-			 pr, (pr && pr->name) ? pr->name : "nil");
+		pr_debug("PCI: %s (bus %d) bridge rsrc %d: %pR, parent %p (%s)\n",
+			 bus->self ? pci_name(bus->self) : "PHB", bus->number,
+			 i, res, pr, (pr && pr->name) ? pr->name : "nil");
 
 		if (pr && !(pr->flags & IORESOURCE_UNSET)) {
 			struct pci_dev *dev = bus->self;
@@ -1253,11 +1228,8 @@
 {
 	struct resource *pr, *r = &dev->resource[idx];
 
-	pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
-		 pci_name(dev), idx,
-		 (unsigned long long)r->start,
-		 (unsigned long long)r->end,
-		 (unsigned int)r->flags);
+	pr_debug("PCI: Allocating %s: Resource %d: %pR\n",
+		 pci_name(dev), idx, r);
 
 	pr = pci_find_parent_resource(dev, r);
 	if (!pr || (pr->flags & IORESOURCE_UNSET) ||
@@ -1265,11 +1237,7 @@
 		printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
 		       " of device %s, will remap\n", idx, pci_name(dev));
 		if (pr)
-			pr_debug("PCI:  parent is %p: %016llx-%016llx [%x]\n",
-				 pr,
-				 (unsigned long long)pr->start,
-				 (unsigned long long)pr->end,
-				 (unsigned int)pr->flags);
+			pr_debug("PCI:  parent is %p: %pR\n", pr, pr);
 		/* We'll assign a new address later */
 		r->flags |= IORESOURCE_UNSET;
 		r->end -= r->start;
@@ -1431,12 +1399,8 @@
 			if (r->parent || !r->start || !r->flags)
 				continue;
 
-			pr_debug("PCI: Claiming %s: "
-				 "Resource %d: %016llx..%016llx [%x]\n",
-				 pci_name(dev), i,
-				 (unsigned long long)r->start,
-				 (unsigned long long)r->end,
-				 (unsigned int)r->flags);
+			pr_debug("PCI: Claiming %s: Resource %d: %pR\n",
+				 pci_name(dev), i, r);
 
 			if (pci_claim_resource(dev, i) == 0)
 				continue;
@@ -1520,11 +1484,8 @@
 	} else {
 		offset = pcibios_io_space_offset(hose);
 
-		pr_debug("PCI: PHB IO resource    = %08llx-%08llx [%lx] off 0x%08llx\n",
-			 (unsigned long long)res->start,
-			 (unsigned long long)res->end,
-			 (unsigned long)res->flags,
-			 (unsigned long long)offset);
+		pr_debug("PCI: PHB IO resource    = %pR off 0x%08llx\n",
+			 res, (unsigned long long)offset);
 		pci_add_resource_offset(resources, res, offset);
 	}
 
@@ -1541,11 +1502,8 @@
 		offset = hose->mem_offset[i];
 
 
-		pr_debug("PCI: PHB MEM resource %d = %08llx-%08llx [%lx] off 0x%08llx\n", i,
-			 (unsigned long long)res->start,
-			 (unsigned long long)res->end,
-			 (unsigned long)res->flags,
-			 (unsigned long long)offset);
+		pr_debug("PCI: PHB MEM resource %d = %pR off 0x%08llx\n", i,
+			 res, (unsigned long long)offset);
 
 		pci_add_resource_offset(resources, res, offset);
 	}
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 42e02a2..c8c62c7 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -126,7 +126,6 @@
 {
 	struct pci_dev *dev;
 	const char *type;
-	struct pci_slot *slot;
 
 	dev = pci_alloc_dev(bus);
 	if (!dev)
@@ -145,10 +144,7 @@
 	dev->needs_freset = 0;		/* pcie fundamental reset required */
 	set_pcie_port_type(dev);
 
-	list_for_each_entry(slot, &dev->bus->slots, list)
-		if (PCI_SLOT(dev->devfn) == slot->number)
-			dev->slot = slot;
-
+	pci_dev_assign_slot(dev);
 	dev->vendor = get_int_prop(node, "vendor-id", 0xffff);
 	dev->device = get_int_prop(node, "device-id", 0xffff);
 	dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0);
@@ -191,6 +187,9 @@
 
 	pci_device_add(dev, bus);
 
+	/* Setup MSI caps & disable MSI/MSI-X interrupts */
+	pci_msi_setup_pci_dev(dev);
+
 	return dev;
 }
 EXPORT_SYMBOL(of_create_pci_dev);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8005e18..75b6676 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -86,7 +86,7 @@
 	if (tsk == current && tsk->thread.regs &&
 	    MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
 	    !test_thread_flag(TIF_RESTORE_TM)) {
-		tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
+		tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
 		set_thread_flag(TIF_RESTORE_TM);
 	}
 
@@ -104,7 +104,7 @@
 	if (tsk == current && tsk->thread.regs &&
 	    MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
 	    !test_thread_flag(TIF_RESTORE_TM)) {
-		tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
+		tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
 		set_thread_flag(TIF_RESTORE_TM);
 	}
 
@@ -204,8 +204,6 @@
 #endif /* CONFIG_ALTIVEC */
 
 #ifdef CONFIG_VSX
-#if 0
-/* not currently used, but some crazy RAID module might want to later */
 void enable_kernel_vsx(void)
 {
 	WARN_ON(preemptible());
@@ -220,7 +218,6 @@
 #endif /* CONFIG_SMP */
 }
 EXPORT_SYMBOL(enable_kernel_vsx);
-#endif
 
 void giveup_vsx(struct task_struct *tsk)
 {
@@ -543,7 +540,7 @@
 	 * the thread will no longer be transactional.
 	 */
 	if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
-		msr_diff = thr->tm_orig_msr & ~thr->regs->msr;
+		msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr;
 		if (msr_diff & MSR_FP)
 			memcpy(&thr->transact_fp, &thr->fp_state,
 			       sizeof(struct thread_fp_state));
@@ -594,10 +591,10 @@
 	/* Stash the original thread MSR, as giveup_fpu et al will
 	 * modify it.  We hold onto it to see whether the task used
 	 * FP & vector regs.  If the TIF_RESTORE_TM flag is set,
-	 * tm_orig_msr is already set.
+	 * ckpt_regs.msr is already set.
 	 */
 	if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
-		thr->tm_orig_msr = thr->regs->msr;
+		thr->ckpt_regs.msr = thr->regs->msr;
 
 	TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
 		 "ccr=%lx, msr=%lx, trap=%lx)\n",
@@ -666,7 +663,7 @@
 		tm_restore_sprs(&new->thread);
 		return;
 	}
-	msr = new->thread.tm_orig_msr;
+	msr = new->thread.ckpt_regs.msr;
 	/* Recheckpoint to restore original checkpointed register state. */
 	TM_DEBUG("*** tm_recheckpoint of pid %d "
 		 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
@@ -726,7 +723,7 @@
 	if (!MSR_TM_ACTIVE(regs->msr))
 		return;
 
-	msr_diff = current->thread.tm_orig_msr & ~regs->msr;
+	msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
 	msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
 	if (msr_diff & MSR_FP) {
 		fp_enable();
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8b888b1..bef76c5 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -218,22 +218,18 @@
 }
 
 #ifdef CONFIG_PPC_STD_MMU_64
-static void __init check_cpu_slb_size(unsigned long node)
+static void __init init_mmu_slb_size(unsigned long node)
 {
 	const __be32 *slb_size_ptr;
 
-	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
-	if (slb_size_ptr != NULL) {
+	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
+			of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
+
+	if (slb_size_ptr)
 		mmu_slb_size = be32_to_cpup(slb_size_ptr);
-		return;
-	}
-	slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
-	if (slb_size_ptr != NULL) {
-		mmu_slb_size = be32_to_cpup(slb_size_ptr);
-	}
 }
 #else
-#define check_cpu_slb_size(node) do { } while(0)
+#define init_mmu_slb_size(node) do { } while(0)
 #endif
 
 static struct feature_property {
@@ -380,7 +376,7 @@
 
 	check_cpu_feature_properties(node);
 	check_cpu_pa_features(node);
-	check_cpu_slb_size(node);
+	init_mmu_slb_size(node);
 
 #ifdef CONFIG_PPC64
 	if (nthreads > 1)
@@ -476,9 +472,10 @@
 		flags = of_read_number(&dm[3], 1);
 		/* skip DRC index, pad, assoc. list index, flags */
 		dm += 4;
-		/* skip this block if the reserved bit is set in flags (0x80)
-		   or if the block is not assigned to this partition (0x8) */
-		if ((flags & 0x80) || !(flags & 0x8))
+		/* skip this block if the reserved bit is set in flags
+		   or if the block is not assigned to this partition */
+		if ((flags & DRCONF_MEM_RESERVED) ||
+				!(flags & DRCONF_MEM_ASSIGNED))
 			continue;
 		size = memblock_size;
 		rngs = 1;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index fcca807..15099c4 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -641,6 +641,15 @@
 #define W(x)	((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
 		((x) >> 8) & 0xff, (x) & 0xff
 
+/* Firmware expects the value to be n - 1, where n is the # of vectors */
+#define NUM_VECTORS(n)		((n) - 1)
+
+/*
+ * Firmware expects 1 + n - 2, where n is the length of the option vector in
+ * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
+ */
+#define VECTOR_LENGTH(n)	(1 + (n) - 2)
+
 unsigned char ibm_architecture_vec[] = {
 	W(0xfffe0000), W(0x003a0000),	/* POWER5/POWER5+ */
 	W(0xffff0000), W(0x003e0000),	/* POWER6 */
@@ -651,16 +660,16 @@
 	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */
 	W(0xffffffff), W(0x0f000002),	/* all 2.05-compliant */
 	W(0xfffffffe), W(0x0f000001),	/* all 2.04-compliant and earlier */
-	6 - 1,				/* 6 option vectors */
+	NUM_VECTORS(6),			/* 6 option vectors */
 
 	/* option vector 1: processor architectures supported */
-	3 - 2,				/* length */
+	VECTOR_LENGTH(2),		/* length */
 	0,				/* don't ignore, don't halt */
 	OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
 	OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
 
 	/* option vector 2: Open Firmware options supported */
-	34 - 2,				/* length */
+	VECTOR_LENGTH(33),		/* length */
 	OV2_REAL_MODE,
 	0, 0,
 	W(0xffffffff),			/* real_base */
@@ -674,17 +683,17 @@
 	48,				/* max log_2(hash table size) */
 
 	/* option vector 3: processor options supported */
-	3 - 2,				/* length */
+	VECTOR_LENGTH(2),		/* length */
 	0,				/* don't ignore, don't halt */
 	OV3_FP | OV3_VMX | OV3_DFP,
 
 	/* option vector 4: IBM PAPR implementation */
-	3 - 2,				/* length */
+	VECTOR_LENGTH(2),		/* length */
 	0,				/* don't halt */
 	OV4_MIN_ENT_CAP,		/* minimum VP entitled capacity */
 
 	/* option vector 5: PAPR/OF options */
-	19 - 2,				/* length */
+	VECTOR_LENGTH(18),		/* length */
 	0,				/* don't ignore, don't halt */
 	OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
 	OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
@@ -717,12 +726,12 @@
 	OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
 	OV5_FEAT(OV5_PFO_HW_842),
 	OV5_FEAT(OV5_SUB_PROCESSORS),
+
 	/* option vector 6: IBM PAPR hints */
-	4 - 2,				/* length */
+	VECTOR_LENGTH(3),		/* length */
 	0,
 	0,
 	OV6_LINUX,
-
 };
 
 /* Old method - ELF header with PT_NOTE sections only works on BE */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index f21897b..737c0d0 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1762,26 +1762,81 @@
 	return ret;
 }
 
-/*
- * We must return the syscall number to actually look up in the table.
- * This can be -1L to skip running any syscall at all.
+#ifdef CONFIG_SECCOMP
+static int do_seccomp(struct pt_regs *regs)
+{
+	if (!test_thread_flag(TIF_SECCOMP))
+		return 0;
+
+	/*
+	 * The ABI we present to seccomp tracers is that r3 contains
+	 * the syscall return value and orig_gpr3 contains the first
+	 * syscall parameter. This is different to the ptrace ABI where
+	 * both r3 and orig_gpr3 contain the first syscall parameter.
+	 */
+	regs->gpr[3] = -ENOSYS;
+
+	/*
+	 * We use the __ version here because we have already checked
+	 * TIF_SECCOMP. If this fails, there is nothing left to do, we
+	 * have already loaded -ENOSYS into r3, or seccomp has put
+	 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
+	 */
+	if (__secure_computing())
+		return -1;
+
+	/*
+	 * The syscall was allowed by seccomp, restore the register
+	 * state to what ptrace and audit expect.
+	 * Note that we use orig_gpr3, which means a seccomp tracer can
+	 * modify the first syscall parameter (in orig_gpr3) and also
+	 * allow the syscall to proceed.
+	 */
+	regs->gpr[3] = regs->orig_gpr3;
+
+	return 0;
+}
+#else
+static inline int do_seccomp(struct pt_regs *regs) { return 0; }
+#endif /* CONFIG_SECCOMP */
+
+/**
+ * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
+ * @regs: the pt_regs of the task to trace (current)
+ *
+ * Performs various types of tracing on syscall entry. This includes seccomp,
+ * ptrace, syscall tracepoints and audit.
+ *
+ * The pt_regs are potentially visible to userspace via ptrace, so their
+ * contents is ABI.
+ *
+ * One or more of the tracers may modify the contents of pt_regs, in particular
+ * to modify arguments or even the syscall number itself.
+ *
+ * It's also possible that a tracer can choose to reject the system call. In
+ * that case this function will return an illegal syscall number, and will put
+ * an appropriate return value in regs->r3.
+ *
+ * Return: the (possibly changed) syscall number.
  */
 long do_syscall_trace_enter(struct pt_regs *regs)
 {
-	long ret = 0;
+	bool abort = false;
 
 	user_exit();
 
-	secure_computing_strict(regs->gpr[0]);
+	if (do_seccomp(regs))
+		return -1;
 
-	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
-	    tracehook_report_syscall_entry(regs))
+	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
 		/*
-		 * Tracing decided this syscall should not happen.
-		 * We'll return a bogus call number to get an ENOSYS
-		 * error, but leave the original number in regs->gpr[0].
+		 * The tracer may decide to abort the syscall, if so tracehook
+		 * will return !0. Note that the tracer may also just change
+		 * regs->gpr[0] to an invalid syscall number, that is handled
+		 * below on the exit path.
 		 */
-		ret = -1L;
+		abort = tracehook_report_syscall_entry(regs) != 0;
+	}
 
 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
 		trace_sys_enter(regs, regs->gpr[0]);
@@ -1798,7 +1853,17 @@
 				    regs->gpr[5] & 0xffffffff,
 				    regs->gpr[6] & 0xffffffff);
 
-	return ret ?: regs->gpr[0];
+	if (abort || regs->gpr[0] >= NR_syscalls) {
+		/*
+		 * If we are aborting explicitly, or if the syscall number is
+		 * now invalid, set the return value to -ENOSYS.
+		 */
+		regs->gpr[3] = -ENOSYS;
+		return -1;
+	}
+
+	/* Return the possibly modified but valid syscall number */
+	return regs->gpr[0];
 }
 
 void do_syscall_trace_leave(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 7a488c1..84bf934 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -478,8 +478,9 @@
 
 	if (status == RTAS_BUSY) {
 		ms = 1;
-	} else if (status >= 9900 && status <= 9905) {
-		order = status - 9900;
+	} else if (status >= RTAS_EXTENDED_DELAY_MIN &&
+		   status <= RTAS_EXTENDED_DELAY_MAX) {
+		order = status - RTAS_EXTENDED_DELAY_MIN;
 		for (ms = 1; order > 0; order--)
 			ms *= 10;
 	}
@@ -584,6 +585,23 @@
 }
 EXPORT_SYMBOL(rtas_get_sensor);
 
+int rtas_get_sensor_fast(int sensor, int index, int *state)
+{
+	int token = rtas_token("get-sensor-state");
+	int rc;
+
+	if (token == RTAS_UNKNOWN_SERVICE)
+		return -ENOENT;
+
+	rc = rtas_call(token, 2, 2, state, sensor, index);
+	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
+				    rc <= RTAS_EXTENDED_DELAY_MAX));
+
+	if (rc < 0)
+		return rtas_error_rc(rc);
+	return rc;
+}
+
 bool rtas_indicator_present(int token, int *maxindex)
 {
 	int proplen, count, i;
@@ -641,7 +659,8 @@
 
 	rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
 
-	WARN_ON(rc == -2 || (rc >= 9900 && rc <= 9905));
+	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
+				    rc <= RTAS_EXTENDED_DELAY_MAX));
 
 	if (rc < 0)
 		return rtas_error_rc(rc);
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index da50e0c..0dbee46 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -949,6 +949,11 @@
 		err |= __put_user(s->si_overrun, &d->si_overrun);
 		err |= __put_user(s->si_int, &d->si_int);
 		break;
+	case __SI_SYS >> 16:
+		err |= __put_user(ptr_to_compat(s->si_call_addr), &d->si_call_addr);
+		err |= __put_user(s->si_syscall, &d->si_syscall);
+		err |= __put_user(s->si_arch, &d->si_arch);
+		break;
 	case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
 	case __SI_MESGQ >> 16:
 		err |= __put_user(s->si_int, &d->si_int);
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index c7c24d2..20756df 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -74,6 +74,19 @@
 	"%s[%d]: bad frame in %s: %016lx nip %016lx lr %016lx\n";
 
 /*
+ * This computes a quad word aligned pointer inside the vmx_reserve array
+ * element. For historical reasons sigcontext might not be quad word aligned,
+ * but the location we write the VMX regs to must be. See the comment in
+ * sigcontext for more detail.
+ */
+#ifdef CONFIG_ALTIVEC
+static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc)
+{
+	return (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful);
+}
+#endif
+
+/*
  * Set up the sigcontext for the signal frame.
  */
 
@@ -90,7 +103,7 @@
 	 * v_regs pointer or not
 	 */
 #ifdef CONFIG_ALTIVEC
-	elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)(((unsigned long)sc->vmx_reserve + 15) & ~0xful);
+	elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
 #endif
 	unsigned long msr = regs->msr;
 	long err = 0;
@@ -181,10 +194,8 @@
 	 * v_regs pointer or not.
 	 */
 #ifdef CONFIG_ALTIVEC
-	elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *)
-		(((unsigned long)sc->vmx_reserve + 15) & ~0xful);
-	elf_vrreg_t __user *tm_v_regs = (elf_vrreg_t __user *)
-		(((unsigned long)tm_sc->vmx_reserve + 15) & ~0xful);
+	elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
+	elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
 #endif
 	unsigned long msr = regs->msr;
 	long err = 0;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 4392250..1be1092 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -99,16 +99,17 @@
 
 static int decrementer_set_next_event(unsigned long evt,
 				      struct clock_event_device *dev);
-static void decrementer_set_mode(enum clock_event_mode mode,
-				 struct clock_event_device *dev);
+static int decrementer_shutdown(struct clock_event_device *evt);
 
 struct clock_event_device decrementer_clockevent = {
-	.name           = "decrementer",
-	.rating         = 200,
-	.irq            = 0,
-	.set_next_event = decrementer_set_next_event,
-	.set_mode       = decrementer_set_mode,
-	.features       = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP,
+	.name			= "decrementer",
+	.rating			= 200,
+	.irq			= 0,
+	.set_next_event		= decrementer_set_next_event,
+	.set_state_shutdown	= decrementer_shutdown,
+	.tick_resume		= decrementer_shutdown,
+	.features		= CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_C3STOP,
 };
 EXPORT_SYMBOL(decrementer_clockevent);
 
@@ -862,11 +863,10 @@
 	return 0;
 }
 
-static void decrementer_set_mode(enum clock_event_mode mode,
-				 struct clock_event_device *dev)
+static int decrementer_shutdown(struct clock_event_device *dev)
 {
-	if (mode != CLOCK_EVT_MODE_ONESHOT)
-		decrementer_set_next_event(DECREMENTER_MAX, dev);
+	decrementer_set_next_event(DECREMENTER_MAX, dev);
+	return 0;
 }
 
 /* Interrupt handler for the timer broadcast IPI */
diff --git a/arch/powerpc/kernel/trace_clock.c b/arch/powerpc/kernel/trace_clock.c
new file mode 100644
index 0000000..4917069
--- /dev/null
+++ b/arch/powerpc/kernel/trace_clock.c
@@ -0,0 +1,15 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
+ */
+
+#include <asm/trace_clock.h>
+#include <asm/time.h>
+
+u64 notrace trace_clock_ppc_tb(void)
+{
+	return get_tb();
+}
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 05ea8fc..6d6398f 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -902,7 +902,7 @@
 {
 	/*
 	 * We always return 0 for book3s. We check
-	 * for compatability while loading the HV
+	 * for compatibility while loading the HV
 	 * or PR module
 	 */
 	return 0;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 68d067a..a9f753f 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2178,7 +2178,7 @@
 		vc->runner = vcpu;
 		if (n_ceded == vc->n_runnable) {
 			kvmppc_vcore_blocked(vc);
-		} else if (should_resched()) {
+		} else if (need_resched()) {
 			vc->vcore_state = VCORE_PREEMPT;
 			/* Let something else run */
 			cond_resched_lock(&vc->lock);
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
index 7874e8a..6d67e05 100644
--- a/arch/powerpc/lib/checksum_32.S
+++ b/arch/powerpc/lib/checksum_32.S
@@ -41,22 +41,6 @@
 	blr
 
 /*
- * Compute checksum of TCP or UDP pseudo-header:
- *   csum_tcpudp_magic(saddr, daddr, len, proto, sum)
- */	
-_GLOBAL(csum_tcpudp_magic)
-	rlwimi	r5,r6,16,0,15	/* put proto in upper half of len */
-	addc	r0,r3,r4	/* add 4 32-bit words together */
-	adde	r0,r0,r5
-	adde	r0,r0,r7
-	addze	r0,r0		/* add in final carry */
-	rlwinm	r3,r0,16,0,31	/* fold two halves together */
-	add	r3,r0,r3
-	not	r3,r3
-	srwi	r3,r3,16
-	blr
-
-/*
  * computes the checksum of a memory block at buff, length len,
  * and adds in "sum" (32-bit)
  *
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 57a0720..f3ef354 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -45,27 +45,6 @@
 	blr
 
 /*
- * Compute checksum of TCP or UDP pseudo-header:
- *   csum_tcpudp_magic(r3=saddr, r4=daddr, r5=len, r6=proto, r7=sum)
- * No real gain trying to do this specially for 64 bit, but
- * the 32 bit addition may spill into the upper bits of
- * the doubleword so we still must fold it down from 64.
- */	
-_GLOBAL(csum_tcpudp_magic)
-	rlwimi	r5,r6,16,0,15	/* put proto in upper half of len */
-	addc	r0,r3,r4	/* add 4 32-bit words together */
-	adde	r0,r0,r5
-	adde	r0,r0,r7
-        rldicl  r4,r0,32,0      /* fold 64 bit value */
-        add     r0,r4,r0
-        srdi    r0,r0,32
-	rlwinm	r3,r0,16,0,31	/* fold two halves together */
-	add	r3,r0,r3
-	not	r3,r3
-	srwi	r3,r3,16
-	blr
-
-/*
  * Computes the checksum of a memory block at buff, length len,
  * and adds in "sum" (32-bit).
  *
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 6813f80..2ef50c6 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -69,9 +69,15 @@
 LG_CACHELINE_BYTES = L1_CACHE_SHIFT
 CACHELINE_MASK = (L1_CACHE_BYTES-1)
 
+/*
+ * Use dcbz on the complete cache lines in the destination
+ * to set them to zero.  This requires that the destination
+ * area is cacheable.  -- paulus
+ */
 _GLOBAL(memset)
 	rlwimi	r4,r4,8,16,23
 	rlwimi	r4,r4,16,0,15
+
 	addi	r6,r3,-4
 	cmplwi	0,r5,4
 	blt	7f
@@ -80,7 +86,29 @@
 	andi.	r0,r6,3
 	add	r5,r0,r5
 	subf	r6,r0,r6
-	srwi	r0,r5,2
+	cmplwi	0,r4,0
+	bne	2f	/* Use normal procedure if r4 is not zero */
+
+	clrlwi	r7,r6,32-LG_CACHELINE_BYTES
+	add	r8,r7,r5
+	srwi	r9,r8,LG_CACHELINE_BYTES
+	addic.	r9,r9,-1	/* total number of complete cachelines */
+	ble	2f
+	xori	r0,r7,CACHELINE_MASK & ~3
+	srwi.	r0,r0,2
+	beq	3f
+	mtctr	r0
+4:	stwu	r4,4(r6)
+	bdnz	4b
+3:	mtctr	r9
+	li	r7,4
+10:	dcbz	r7,r6
+	addi	r6,r6,CACHELINE_BYTES
+	bdnz	10b
+	clrlwi	r5,r8,32-LG_CACHELINE_BYTES
+	addi	r5,r5,4
+
+2:	srwi	r0,r5,2
 	mtctr	r0
 	bdz	6f
 1:	stwu	r4,4(r6)
@@ -94,12 +122,91 @@
 	bdnz	8b
 	blr
 
+/*
+ * This version uses dcbz on the complete cache lines in the
+ * destination area to reduce memory traffic.  This requires that
+ * the destination area is cacheable.
+ * We only use this version if the source and dest don't overlap.
+ * -- paulus.
+ */
 _GLOBAL(memmove)
 	cmplw	0,r3,r4
 	bgt	backwards_memcpy
 	/* fall through */
 
 _GLOBAL(memcpy)
+	add	r7,r3,r5		/* test if the src & dst overlap */
+	add	r8,r4,r5
+	cmplw	0,r4,r7
+	cmplw	1,r3,r8
+	crand	0,0,4			/* cr0.lt &= cr1.lt */
+	blt	generic_memcpy		/* if regions overlap */
+
+	addi	r4,r4,-4
+	addi	r6,r3,-4
+	neg	r0,r3
+	andi.	r0,r0,CACHELINE_MASK	/* # bytes to start of cache line */
+	beq	58f
+
+	cmplw	0,r5,r0			/* is this more than total to do? */
+	blt	63f			/* if not much to do */
+	andi.	r8,r0,3			/* get it word-aligned first */
+	subf	r5,r0,r5
+	mtctr	r8
+	beq+	61f
+70:	lbz	r9,4(r4)		/* do some bytes */
+	addi	r4,r4,1
+	addi	r6,r6,1
+	stb	r9,3(r6)
+	bdnz	70b
+61:	srwi.	r0,r0,2
+	mtctr	r0
+	beq	58f
+72:	lwzu	r9,4(r4)		/* do some words */
+	stwu	r9,4(r6)
+	bdnz	72b
+
+58:	srwi.	r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
+	clrlwi	r5,r5,32-LG_CACHELINE_BYTES
+	li	r11,4
+	mtctr	r0
+	beq	63f
+53:
+	dcbz	r11,r6
+	COPY_16_BYTES
+#if L1_CACHE_BYTES >= 32
+	COPY_16_BYTES
+#if L1_CACHE_BYTES >= 64
+	COPY_16_BYTES
+	COPY_16_BYTES
+#if L1_CACHE_BYTES >= 128
+	COPY_16_BYTES
+	COPY_16_BYTES
+	COPY_16_BYTES
+	COPY_16_BYTES
+#endif
+#endif
+#endif
+	bdnz	53b
+
+63:	srwi.	r0,r5,2
+	mtctr	r0
+	beq	64f
+30:	lwzu	r0,4(r4)
+	stwu	r0,4(r6)
+	bdnz	30b
+
+64:	andi.	r0,r5,3
+	mtctr	r0
+	beq+	65f
+	addi	r4,r4,3
+	addi	r6,r6,3
+40:	lbzu	r0,1(r4)
+	stbu	r0,1(r6)
+	bdnz	40b
+65:	blr
+
+_GLOBAL(generic_memcpy)
 	srwi.	r7,r5,3
 	addi	r6,r3,-4
 	addi	r4,r4,-4
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 9c90e66..354ba3c 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -112,7 +112,7 @@
 
 	tsize = __ilog2(size) - 10;
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
 	if ((flags & _PAGE_NO_CACHE) == 0)
 		flags |= _PAGE_COHERENT;
 #endif
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 463174a..3b49e32 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -701,7 +701,7 @@
 
 #endif /* CONFIG_PPC_64K_PAGES */
 
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
 
 /*****************************************************************************
  *                                                                           *
@@ -993,7 +993,7 @@
 	b	ht64_bail
 
 
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
 
 
 /*****************************************************************************
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5ec987f..aee7017 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -640,7 +640,7 @@
 
 static void __init htab_finish_init(void)
 {
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
 	patch_branch(ht64_call_hpte_insert1,
 		ppc_function_entry(ppc_md.hpte_insert),
 		BRANCH_SET_LINK);
@@ -653,7 +653,7 @@
 	patch_branch(ht64_call_hpte_updatepp,
 		ppc_function_entry(ppc_md.hpte_updatepp),
 		BRANCH_SET_LINK);
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
 
 	patch_branch(htab_call_hpte_insert1,
 		ppc_function_entry(ppc_md.hpte_insert),
@@ -1151,12 +1151,12 @@
 		check_paca_psize(ea, mm, psize, user_region);
 #endif /* CONFIG_PPC_64K_PAGES */
 
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
 	if (psize == MMU_PAGE_64K)
 		rc = __hash_page_64K(ea, access, vsid, ptep, trap,
 				     flags, ssize);
 	else
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
 	{
 		int spp = subpage_protection(mm, ea);
 		if (access & spp)
@@ -1264,12 +1264,12 @@
 		update_flags |= HPTE_LOCAL_UPDATE;
 
 	/* Hash it in */
-#ifdef CONFIG_PPC_HAS_HASH_64K
+#ifdef CONFIG_PPC_64K_PAGES
 	if (mm->context.user_psize == MMU_PAGE_64K)
 		rc = __hash_page_64K(ea, access, vsid, ptep, trap,
 				     update_flags, ssize);
 	else
-#endif /* CONFIG_PPC_HAS_HASH_64K */
+#endif /* CONFIG_PPC_64K_PAGES */
 		rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags,
 				    ssize, subpage_protection(mm, ea));
 
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index bb0bd70..06c1452 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -808,14 +808,6 @@
 	if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
 		return -EINVAL;
 
-#ifdef CONFIG_SPU_FS_64K_LS
-	/* Disable support for 64K huge pages when 64K SPU local store
-	 * support is enabled as the current implementation conflicts.
-	 */
-	if (shift == PAGE_SHIFT_64K)
-		return -EINVAL;
-#endif /* CONFIG_SPU_FS_64K_LS */
-
 	BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
 
 	/* Return if huge page size has already been setup */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0f11819..e1fe333 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -414,17 +414,17 @@
 		return;
 	}
 #endif
-#ifdef CONFIG_BOOKE
-	{
+#if defined(CONFIG_8xx) || defined(CONFIG_PPC64)
+	/* On 8xx there is no need to kmap since highmem is not supported */
+	__flush_dcache_icache(page_address(page));
+#else
+	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
 		void *start = kmap_atomic(page);
 		__flush_dcache_icache(start);
 		kunmap_atomic(start);
+	} else {
+		__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
 	}
-#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
-	/* On 8xx there is no need to kmap since highmem is not supported */
-	__flush_dcache_icache(page_address(page)); 
-#else
-	__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
 #endif
 }
 EXPORT_SYMBOL(flush_dcache_icache_page);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 5e80621..8b9502a 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -225,7 +225,7 @@
 	for (i = 0; i < distance_ref_points_depth; i++) {
 		const __be32 *entry;
 
-		entry = &associativity[be32_to_cpu(distance_ref_points[i])];
+		entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
 		distance_lookup_table[nid][i] = of_read_number(entry, 1);
 	}
 }
@@ -248,8 +248,12 @@
 		nid = -1;
 
 	if (nid > 0 &&
-	    of_read_number(associativity, 1) >= distance_ref_points_depth)
-		initialize_distance_lookup_table(nid, associativity);
+		of_read_number(associativity, 1) >= distance_ref_points_depth) {
+		/*
+		 * Skip the length field and send start of associativity array
+		 */
+		initialize_distance_lookup_table(nid, associativity + 1);
+	}
 
 out:
 	return nid;
@@ -507,6 +511,12 @@
 
 		if (nid == 0xffff || nid >= MAX_NUMNODES)
 			nid = default_nid;
+
+		if (nid > 0) {
+			index = drmem->aa_index * aa->array_sz;
+			initialize_distance_lookup_table(nid,
+							&aa->arrays[index]);
+		}
 	}
 
 	return nid;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 876232d..e92cb21 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -149,17 +149,7 @@
 #endif /* !CONFIG_PPC_MMU_NOHASH */
 	}
 
-#ifdef CONFIG_PPC_BOOK3E_64
-	/*
-	 * With hardware tablewalk, a sync is needed to ensure that
-	 * subsequent accesses see the PTE we just wrote.  Unlike userspace
-	 * mappings, we can't tolerate spurious faults, so make sure
-	 * the new PTE will be seen the first time.
-	 */
-	mb();
-#else
 	smp_wmb();
-#endif
 	return 0;
 }
 
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 6e450ca..8a32a2b 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -41,9 +41,9 @@
 	(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
 
 static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
-					 unsigned long slot)
+					 unsigned long entry)
 {
-	return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
+	return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | entry;
 }
 
 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
@@ -249,11 +249,24 @@
 static inline void patch_slb_encoding(unsigned int *insn_addr,
 				      unsigned int immed)
 {
-	int insn = (*insn_addr & 0xffff0000) | immed;
+
+	/*
+	 * This function patches either an li or a cmpldi instruction with
+	 * a new immediate value. This relies on the fact that both li
+	 * (which is actually addi) and cmpldi both take a 16-bit immediate
+	 * value, and it is situated in the same location in the instruction,
+	 * ie. bits 16-31 (Big endian bit order) or the lower 16 bits.
+	 * The signedness of the immediate operand differs between the two
+	 * instructions however this code is only ever patching a small value,
+	 * much less than 1 << 15, so we can get away with it.
+	 * To patch the value we read the existing instruction, clear the
+	 * immediate value, and or in our new value, then write the instruction
+	 * back.
+	 */
+	unsigned int insn = (*insn_addr & 0xffff0000) | immed;
 	patch_instruction(insn_addr, insn);
 }
 
-extern u32 slb_compare_rr_to_size[];
 extern u32 slb_miss_kernel_load_linear[];
 extern u32 slb_miss_kernel_load_io[];
 extern u32 slb_compare_rr_to_size[];
@@ -309,12 +322,11 @@
 	lflags = SLB_VSID_KERNEL | linear_llp;
 	vflags = SLB_VSID_KERNEL | vmalloc_llp;
 
-	/* Invalidate the entire SLB (even slot 0) & all the ERATS */
+	/* Invalidate the entire SLB (even entry 0) & all the ERATS */
 	asm volatile("isync":::"memory");
 	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
 	asm volatile("isync; slbia; isync":::"memory");
 	create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
-
 	create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
 
 	/* For the boot cpu, we're running on the stack in init_thread_union,
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 765b419..e418558 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -308,11 +308,11 @@
 	 *
 	 * MAS6:IND should be already set based on MAS4
 	 */
-1:	lbarx	r15,0,r11
 	lhz	r10,PACAPACAINDEX(r13)
-	cmpdi	r15,0
-	cmpdi	cr1,r15,1	/* set cr1.eq = 0 for non-recursive */
 	addi	r10,r10,1
+	crclr	cr1*4+eq	/* set cr1.eq = 0 for non-recursive */
+1:	lbarx	r15,0,r11
+	cmpdi	r15,0
 	bne	2f
 	stbcx.	r10,0,r11
 	bne	1b
@@ -320,9 +320,9 @@
 	.subsection 1
 2:	cmpd	cr1,r15,r10	/* recursive lock due to mcheck/crit/etc? */
 	beq	cr1,3b		/* unlock will happen if cr1.eq = 0 */
-	lbz	r15,0(r11)
+10:	lbz	r15,0(r11)
 	cmpdi	r15,0
-	bne	2b
+	bne	10b
 	b	1b
 	.previous
 
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 962fe7b..4b32e94 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -207,7 +207,7 @@
 	unsigned int mmcr0;
 
 	/* set the PMM bit (see comment below) */
-	mtmsrd(mfmsr() | MSR_PMM);
+	mtmsr(mfmsr() | MSR_PMM);
 
 	for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
 		if (ctr[i].enabled) {
@@ -377,7 +377,7 @@
 	is_kernel = get_kernel(pc, mmcra);
 
 	/* set the PMM bit (see comment below) */
-	mtmsrd(mfmsr() | MSR_PMM);
+	mtmsr(mfmsr() | MSR_PMM);
 
 	/* Check that the SIAR  valid bit in MMCRA is set to 1. */
 	if ((mmcra & MMCRA_SIAR_VALID_MASK) == MMCRA_SIAR_VALID_MASK)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index d90893b..b0382f3 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -53,7 +53,7 @@
 
 	/* BHRB bits */
 	u64				bhrb_filter;	/* BHRB HW branch filter */
-	int				bhrb_users;
+	unsigned int			bhrb_users;
 	void				*bhrb_context;
 	struct	perf_branch_stack	bhrb_stack;
 	struct	perf_branch_entry	bhrb_entries[BHRB_MAX_ENTRIES];
@@ -369,8 +369,8 @@
 	if (!ppmu->bhrb_nr)
 		return;
 
+	WARN_ON_ONCE(!cpuhw->bhrb_users);
 	cpuhw->bhrb_users--;
-	WARN_ON_ONCE(cpuhw->bhrb_users < 0);
 	perf_sched_cb_dec(event->ctx->pmu);
 
 	if (!cpuhw->disabled && !cpuhw->bhrb_users) {
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index df95629..527c8b9 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -416,7 +416,7 @@
 }
 
 static struct attribute *event_to_desc_attr(struct hv_24x7_event_data *event,
-				int nonce)
+					    int nonce)
 {
 	int nl, dl;
 	char *name = event_name(event, &nl);
@@ -444,7 +444,7 @@
 }
 
 static ssize_t event_data_to_attrs(unsigned ix, struct attribute **attrs,
-		struct hv_24x7_event_data *event, int nonce)
+				   struct hv_24x7_event_data *event, int nonce)
 {
 	unsigned i;
 
@@ -512,7 +512,7 @@
 }
 
 static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
-					size_t s2, unsigned d2)
+		       size_t s2, unsigned d2)
 {
 	int r = memord(v1, s1, v2, s2);
 
@@ -526,7 +526,7 @@
 }
 
 static int event_uniq_add(struct rb_root *root, const char *name, int nl,
-				unsigned domain)
+			  unsigned domain)
 {
 	struct rb_node **new = &(root->rb_node), *parent = NULL;
 	struct event_uniq *data;
@@ -650,8 +650,8 @@
 #define MAX_4K (SIZE_MAX / 4096)
 
 static int create_events_from_catalog(struct attribute ***events_,
-		struct attribute ***event_descs_,
-		struct attribute ***event_long_descs_)
+				      struct attribute ***event_descs_,
+				      struct attribute ***event_long_descs_)
 {
 	unsigned long hret;
 	size_t catalog_len, catalog_page_len, event_entry_count,
@@ -1008,8 +1008,8 @@
 };
 
 static void log_24x7_hcall(struct hv_24x7_request_buffer *request_buffer,
-			struct hv_24x7_data_result_buffer *result_buffer,
-			unsigned long ret)
+			   struct hv_24x7_data_result_buffer *result_buffer,
+			   unsigned long ret)
 {
 	struct hv_24x7_request *req;
 
@@ -1026,7 +1026,7 @@
  * Start the process for a new H_GET_24x7_DATA hcall.
  */
 static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
-			struct hv_24x7_data_result_buffer *result_buffer)
+			      struct hv_24x7_data_result_buffer *result_buffer)
 {
 
 	memset(request_buffer, 0, 4096);
@@ -1041,7 +1041,7 @@
  * by 'init_24x7_request()' and 'add_event_to_24x7_request()'.
  */
 static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
-			struct hv_24x7_data_result_buffer *result_buffer)
+			     struct hv_24x7_data_result_buffer *result_buffer)
 {
 	unsigned long ret;
 
@@ -1104,7 +1104,6 @@
 	unsigned long ret;
 	struct hv_24x7_request_buffer *request_buffer;
 	struct hv_24x7_data_result_buffer *result_buffer;
-	struct hv_24x7_result *resb;
 
 	BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
 	BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
@@ -1125,8 +1124,7 @@
 	}
 
 	/* process result from hcall */
-	resb = &result_buffer->results[0];
-	*count = be64_to_cpu(resb->elements[0].element_data[0]);
+	*count = be64_to_cpu(result_buffer->results[0].elements[0].element_data[0]);
 
 out:
 	put_cpu_var(hv_24x7_reqb);
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index 5aa3f4b..48bf38d 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -7,8 +7,8 @@
 	select PPC_PCI_CHOICE
 	select FSL_PCI if PCI
 	select ARCH_WANT_OPTIONAL_GPIOLIB
-	select USB_EHCI_BIG_ENDIAN_MMIO
-	select USB_EHCI_BIG_ENDIAN_DESC
+	select USB_EHCI_BIG_ENDIAN_MMIO if USB_EHCI_HCD
+	select USB_EHCI_BIG_ENDIAN_DESC if USB_EHCI_HCD
 
 config MPC5121_ADS
 	bool "Freescale MPC5121E ADS"
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index f691bca..c50ea76 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -12,6 +12,7 @@
  */
 
 #include <linux/bitops.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/device.h>
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index ca3a062..11090ab 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -123,7 +123,8 @@
 }
 
 static int
-cpld_pic_host_match(struct irq_domain *h, struct device_node *node)
+cpld_pic_host_match(struct irq_domain *h, struct device_node *node,
+		    enum irq_domain_bus_token bus_token)
 {
 	return cpld_pic_node == node;
 }
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 84476b6..61bc851 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -66,10 +66,6 @@
 	.probe			= c293_pcie_probe,
 	.setup_arch		= c293_pcie_setup_arch,
 	.init_IRQ		= c293_pcie_pic_init,
-#ifdef CONFIG_PCI
-	.pcibios_fixup_bus	= fsl_pcibios_fixup_bus,
-	.pcibios_fixup_phb      = fsl_pcibios_fixup_phb,
-#endif
 	.get_irq		= mpic_get_irq,
 	.restart		= fsl_rstcr_restart,
 	.calibrate_decr		= generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index bd839dc..b395571 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -153,6 +153,8 @@
 	"fsl,T1023RDB",
 	"fsl,T1024QDS",
 	"fsl,T1024RDB",
+	"fsl,T1040D4RDB",
+	"fsl,T1042D4RDB",
 	"fsl,T1040QDS",
 	"fsl,T1042QDS",
 	"fsl,T1040RDB",
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 2f23133..b0ac177 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -57,21 +57,6 @@
 	  Units on machines implementing the Broadband Processor
 	  Architecture.
 
-config SPU_FS_64K_LS
-	bool "Use 64K pages to map SPE local  store"
-	# we depend on PPC_MM_SLICES for now rather than selecting
-	# it because we depend on hugetlbfs hooks being present. We
-	# will fix that when the generic code has been improved to
-	# not require hijacking hugetlbfs hooks.
-	depends on SPU_FS && PPC_MM_SLICES && !PPC_64K_PAGES
-	default y
-	select PPC_HAS_HASH_64K
-	help
-	  This option causes SPE local stores to be mapped in process
-	  address spaces using 64K pages while the rest of the kernel
-	  uses 4K pages. This can improve performances of applications
-	  using multiple SPEs by lowering the TLB pressure on them.
-
 config SPU_BASE
 	bool
 	default n
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index fe51de4..306888a 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -213,7 +213,7 @@
 		return -ENODEV;
 	}
 
-	entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
+	entry = first_pci_msi_entry(dev);
 
 	for (; dn; dn = of_get_next_parent(dn)) {
 		if (entry->msi_attrib.is_64) {
@@ -269,7 +269,7 @@
 	if (rc)
 		return rc;
 
-	list_for_each_entry(entry, &dev->msi_list, list) {
+	for_each_pci_msi_entry(entry, dev) {
 		virq = irq_create_direct_mapping(msic->irq_domain);
 		if (virq == NO_IRQ) {
 			dev_warn(&dev->dev,
@@ -292,7 +292,7 @@
 
 	dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
 
-	list_for_each_entry(entry, &dev->msi_list, list) {
+	for_each_pci_msi_entry(entry, dev) {
 		if (entry->irq == NO_IRQ)
 			continue;
 
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 3af8324..a15f1ef 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -222,7 +222,8 @@
 #endif /* CONFIG_SMP */
 
 
-static int iic_host_match(struct irq_domain *h, struct device_node *node)
+static int iic_host_match(struct irq_domain *h, struct device_node *node,
+			  enum irq_domain_bus_token bus_token)
 {
 	return of_device_is_compatible(node,
 				    "IBM,CBEA-Internal-Interrupt-Controller");
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index d966bbe..5038fd5 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -239,23 +239,6 @@
 	unsigned long address = (unsigned long)vmf->virtual_address;
 	unsigned long pfn, offset;
 
-#ifdef CONFIG_SPU_FS_64K_LS
-	struct spu_state *csa = &ctx->csa;
-	int psize;
-
-	/* Check what page size we are using */
-	psize = get_slice_psize(vma->vm_mm, address);
-
-	/* Some sanity checking */
-	BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));
-
-	/* Wow, 64K, cool, we need to align the address though */
-	if (csa->use_big_pages) {
-		BUG_ON(vma->vm_start & 0xffff);
-		address &= ~0xfffful;
-	}
-#endif /* CONFIG_SPU_FS_64K_LS */
-
 	offset = vmf->pgoff << PAGE_SHIFT;
 	if (offset >= LS_SIZE)
 		return VM_FAULT_SIGBUS;
@@ -310,22 +293,6 @@
 
 static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
 {
-#ifdef CONFIG_SPU_FS_64K_LS
-	struct spu_context	*ctx = file->private_data;
-	struct spu_state	*csa = &ctx->csa;
-
-	/* Sanity check VMA alignment */
-	if (csa->use_big_pages) {
-		pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
-			 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
-			 vma->vm_pgoff);
-		if (vma->vm_start & 0xffff)
-			return -EINVAL;
-		if (vma->vm_pgoff & 0xf)
-			return -EINVAL;
-	}
-#endif /* CONFIG_SPU_FS_64K_LS */
-
 	if (!(vma->vm_flags & VM_SHARED))
 		return -EINVAL;
 
@@ -336,25 +303,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_SPU_FS_64K_LS
-static unsigned long spufs_get_unmapped_area(struct file *file,
-		unsigned long addr, unsigned long len, unsigned long pgoff,
-		unsigned long flags)
-{
-	struct spu_context	*ctx = file->private_data;
-	struct spu_state	*csa = &ctx->csa;
-
-	/* If not using big pages, fallback to normal MM g_u_a */
-	if (!csa->use_big_pages)
-		return current->mm->get_unmapped_area(file, addr, len,
-						      pgoff, flags);
-
-	/* Else, try to obtain a 64K pages slice */
-	return slice_get_unmapped_area(addr, len, flags,
-				       MMU_PAGE_64K, 1);
-}
-#endif /* CONFIG_SPU_FS_64K_LS */
-
 static const struct file_operations spufs_mem_fops = {
 	.open			= spufs_mem_open,
 	.release		= spufs_mem_release,
@@ -362,9 +310,6 @@
 	.write			= spufs_mem_write,
 	.llseek			= generic_file_llseek,
 	.mmap			= spufs_mem_mmap,
-#ifdef CONFIG_SPU_FS_64K_LS
-	.get_unmapped_area	= spufs_get_unmapped_area,
-#endif
 };
 
 static int spufs_ps_fault(struct vm_area_struct *vma,
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
index 1470699..b847e94 100644
--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -31,7 +31,7 @@
 
 #include "spufs.h"
 
-static int spu_alloc_lscsa_std(struct spu_state *csa)
+int spu_alloc_lscsa(struct spu_state *csa)
 {
 	struct spu_lscsa *lscsa;
 	unsigned char *p;
@@ -48,7 +48,7 @@
 	return 0;
 }
 
-static void spu_free_lscsa_std(struct spu_state *csa)
+void spu_free_lscsa(struct spu_state *csa)
 {
 	/* Clear reserved bit before vfree. */
 	unsigned char *p;
@@ -61,123 +61,3 @@
 
 	vfree(csa->lscsa);
 }
-
-#ifdef CONFIG_SPU_FS_64K_LS
-
-#define SPU_64K_PAGE_SHIFT	16
-#define SPU_64K_PAGE_ORDER	(SPU_64K_PAGE_SHIFT - PAGE_SHIFT)
-#define SPU_64K_PAGE_COUNT	(1ul << SPU_64K_PAGE_ORDER)
-
-int spu_alloc_lscsa(struct spu_state *csa)
-{
-	struct page	**pgarray;
-	unsigned char	*p;
-	int		i, j, n_4k;
-
-	/* Check availability of 64K pages */
-	if (!spu_64k_pages_available())
-		goto fail;
-
-	csa->use_big_pages = 1;
-
-	pr_debug("spu_alloc_lscsa(csa=0x%p), trying to allocate 64K pages\n",
-		 csa);
-
-	/* First try to allocate our 64K pages. We need 5 of them
-	 * with the current implementation. In the future, we should try
-	 * to separate the lscsa with the actual local store image, thus
-	 * allowing us to require only 4 64K pages per context
-	 */
-	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) {
-		/* XXX This is likely to fail, we should use a special pool
-		 *     similar to what hugetlbfs does.
-		 */
-		csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL,
-						  SPU_64K_PAGE_ORDER);
-		if (csa->lscsa_pages[i] == NULL)
-			goto fail;
-	}
-
-	pr_debug(" success ! creating vmap...\n");
-
-	/* Now we need to create a vmalloc mapping of these for the kernel
-	 * and SPU context switch code to use. Currently, we stick to a
-	 * normal kernel vmalloc mapping, which in our case will be 4K
-	 */
-	n_4k = SPU_64K_PAGE_COUNT * SPU_LSCSA_NUM_BIG_PAGES;
-	pgarray = kmalloc(sizeof(struct page *) * n_4k, GFP_KERNEL);
-	if (pgarray == NULL)
-		goto fail;
-	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
-		for (j = 0; j < SPU_64K_PAGE_COUNT; j++)
-			/* We assume all the struct page's are contiguous
-			 * which should be hopefully the case for an order 4
-			 * allocation..
-			 */
-			pgarray[i * SPU_64K_PAGE_COUNT + j] =
-				csa->lscsa_pages[i] + j;
-	csa->lscsa = vmap(pgarray, n_4k, VM_USERMAP, PAGE_KERNEL);
-	kfree(pgarray);
-	if (csa->lscsa == NULL)
-		goto fail;
-
-	memset(csa->lscsa, 0, sizeof(struct spu_lscsa));
-
-	/* Set LS pages reserved to allow for user-space mapping.
-	 *
-	 * XXX isn't that a bit obsolete ? I think we should just
-	 * make sure the page count is high enough. Anyway, won't harm
-	 * for now
-	 */
-	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
-		SetPageReserved(vmalloc_to_page(p));
-
-	pr_debug(" all good !\n");
-
-	return 0;
-fail:
-	pr_debug("spufs: failed to allocate lscsa 64K pages, falling back\n");
-	spu_free_lscsa(csa);
-	return spu_alloc_lscsa_std(csa);
-}
-
-void spu_free_lscsa(struct spu_state *csa)
-{
-	unsigned char *p;
-	int i;
-
-	if (!csa->use_big_pages) {
-		spu_free_lscsa_std(csa);
-		return;
-	}
-	csa->use_big_pages = 0;
-
-	if (csa->lscsa == NULL)
-		goto free_pages;
-
-	for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
-		ClearPageReserved(vmalloc_to_page(p));
-
-	vunmap(csa->lscsa);
-	csa->lscsa = NULL;
-
- free_pages:
-
-	for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++)
-		if (csa->lscsa_pages[i])
-			__free_pages(csa->lscsa_pages[i], SPU_64K_PAGE_ORDER);
-}
-
-#else /* CONFIG_SPU_FS_64K_LS */
-
-int spu_alloc_lscsa(struct spu_state *csa)
-{
-	return spu_alloc_lscsa_std(csa);
-}
-
-void spu_free_lscsa(struct spu_state *csa)
-{
-	spu_free_lscsa_std(csa);
-}
-
-#endif /* !defined(CONFIG_SPU_FS_64K_LS) */
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index 4cde8e7..b7866e0 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -108,7 +108,8 @@
 	return 0;
 }
 
-static int flipper_pic_match(struct irq_domain *h, struct device_node *np)
+static int flipper_pic_match(struct irq_domain *h, struct device_node *np,
+			     enum irq_domain_bus_token bus_token)
 {
 	return 1;
 }
diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c
index 27f2b18..e66ef19 100644
--- a/arch/powerpc/platforms/pasemi/msi.c
+++ b/arch/powerpc/platforms/pasemi/msi.c
@@ -66,7 +66,7 @@
 
 	pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
 
-	list_for_each_entry(entry, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(entry, pdev) {
 		if (entry->irq == NO_IRQ)
 			continue;
 
@@ -94,7 +94,7 @@
 	msg.address_hi = 0;
 	msg.address_lo = PASEMI_MSI_ADDR;
 
-	list_for_each_entry(entry, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(entry, pdev) {
 		/* Allocate 16 interrupts for now, since that's the grouping for
 		 * affinity. This can be changed later if it turns out 32 is too
 		 * few MSIs for someone, but restrictions will apply to how the
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 59cfc9d..6f4f8b0 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -268,7 +268,8 @@
 	.name		= "cascade",
 };
 
-static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node)
+static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node,
+			       enum irq_domain_bus_token bus_token)
 {
 	/* We match all, we don't always have a node anyway */
 	return 1;
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 7cf0df8..3bb6acb 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1394,11 +1394,19 @@
 			 */
 			if (pnv_eeh_get_pe(hose,
 				be64_to_cpu(frozen_pe_no), pe)) {
-				/* Try best to clear it */
 				pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
-					hose->global_number, frozen_pe_no);
+					hose->global_number, be64_to_cpu(frozen_pe_no));
 				pr_info("EEH: PHB location: %s\n",
 					eeh_pe_loc_get(phb_pe));
+
+				/* Dump PHB diag-data */
+				rc = opal_pci_get_phb_diag_data2(phb->opal_id,
+					phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
+				if (rc == OPAL_SUCCESS)
+					pnv_pci_dump_phb_diag_data(hose,
+							phb->diag.blob);
+
+				/* Try best to clear it */
 				opal_pci_eeh_freeze_clear(phb->opal_id,
 					frozen_pe_no,
 					OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c
index a8f49d3..d000f4e 100644
--- a/arch/powerpc/platforms/powernv/opal-hmi.c
+++ b/arch/powerpc/platforms/powernv/opal-hmi.c
@@ -35,9 +35,134 @@
 	struct list_head list;
 	struct OpalHMIEvent hmi_evt;
 };
+
+struct xstop_reason {
+	uint32_t xstop_reason;
+	const char *unit_failed;
+	const char *description;
+};
+
 static LIST_HEAD(opal_hmi_evt_list);
 static DEFINE_SPINLOCK(opal_hmi_evt_lock);
 
+static void print_core_checkstop_reason(const char *level,
+					struct OpalHMIEvent *hmi_evt)
+{
+	int i;
+	static const struct xstop_reason xstop_reason[] = {
+		{ CORE_CHECKSTOP_IFU_REGFILE, "IFU",
+				"RegFile core check stop" },
+		{ CORE_CHECKSTOP_IFU_LOGIC, "IFU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_PC_DURING_RECOV, "PC",
+				"Core checkstop during recovery" },
+		{ CORE_CHECKSTOP_ISU_REGFILE, "ISU",
+				"RegFile core check stop (mapper error)" },
+		{ CORE_CHECKSTOP_ISU_LOGIC, "ISU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_FXU_LOGIC, "FXU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_VSU_LOGIC, "VSU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_PC_RECOV_IN_MAINT_MODE, "PC",
+				"Recovery in maintenance mode" },
+		{ CORE_CHECKSTOP_LSU_REGFILE, "LSU",
+				"RegFile core check stop" },
+		{ CORE_CHECKSTOP_PC_FWD_PROGRESS, "PC",
+				"Forward Progress Error" },
+		{ CORE_CHECKSTOP_LSU_LOGIC, "LSU", "Logic core check stop" },
+		{ CORE_CHECKSTOP_PC_LOGIC, "PC", "Logic core check stop" },
+		{ CORE_CHECKSTOP_PC_HYP_RESOURCE, "PC",
+				"Hypervisor Resource error - core check stop" },
+		{ CORE_CHECKSTOP_PC_HANG_RECOV_FAILED, "PC",
+				"Hang Recovery Failed (core check stop)" },
+		{ CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED, "PC",
+				"Ambiguous Hang Detected (unknown source)" },
+		{ CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ, "PC",
+				"Debug Trigger Error inject" },
+		{ CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ, "PC",
+				"Hypervisor check stop via SPRC/SPRD" },
+	};
+
+	/* Validity check */
+	if (!hmi_evt->u.xstop_error.xstop_reason) {
+		printk("%s	Unknown Core check stop.\n", level);
+		return;
+	}
+
+	printk("%s	CPU PIR: %08x\n", level,
+			be32_to_cpu(hmi_evt->u.xstop_error.u.pir));
+	for (i = 0; i < ARRAY_SIZE(xstop_reason); i++)
+		if (be32_to_cpu(hmi_evt->u.xstop_error.xstop_reason) &
+					xstop_reason[i].xstop_reason)
+			printk("%s	[Unit: %-3s] %s\n", level,
+					xstop_reason[i].unit_failed,
+					xstop_reason[i].description);
+}
+
+static void print_nx_checkstop_reason(const char *level,
+					struct OpalHMIEvent *hmi_evt)
+{
+	int i;
+	static const struct xstop_reason xstop_reason[] = {
+		{ NX_CHECKSTOP_SHM_INVAL_STATE_ERR, "DMA & Engine",
+					"SHM invalid state error" },
+		{ NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1, "DMA & Engine",
+					"DMA invalid state error bit 15" },
+		{ NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2, "DMA & Engine",
+					"DMA invalid state error bit 16" },
+		{ NX_CHECKSTOP_DMA_CH0_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 0 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH1_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 1 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH2_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 2 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH3_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 3 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH4_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 4 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH5_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 5 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH6_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 6 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CH7_INVAL_STATE_ERR, "DMA & Engine",
+					"Channel 7 invalid state error" },
+		{ NX_CHECKSTOP_DMA_CRB_UE, "DMA & Engine",
+					"UE error on CRB(CSB address, CCB)" },
+		{ NX_CHECKSTOP_DMA_CRB_SUE, "DMA & Engine",
+					"SUE error on CRB(CSB address, CCB)" },
+		{ NX_CHECKSTOP_PBI_ISN_UE, "PowerBus Interface",
+		"CRB Kill ISN received while holding ISN with UE error" },
+	};
+
+	/* Validity check */
+	if (!hmi_evt->u.xstop_error.xstop_reason) {
+		printk("%s	Unknown NX check stop.\n", level);
+		return;
+	}
+
+	printk("%s	NX checkstop on CHIP ID: %x\n", level,
+			be32_to_cpu(hmi_evt->u.xstop_error.u.chip_id));
+	for (i = 0; i < ARRAY_SIZE(xstop_reason); i++)
+		if (be32_to_cpu(hmi_evt->u.xstop_error.xstop_reason) &
+					xstop_reason[i].xstop_reason)
+			printk("%s	[Unit: %-3s] %s\n", level,
+					xstop_reason[i].unit_failed,
+					xstop_reason[i].description);
+}
+
+static void print_checkstop_reason(const char *level,
+					struct OpalHMIEvent *hmi_evt)
+{
+	switch (hmi_evt->u.xstop_error.xstop_type) {
+	case CHECKSTOP_TYPE_CORE:
+		print_core_checkstop_reason(level, hmi_evt);
+		break;
+	case CHECKSTOP_TYPE_NX:
+		print_nx_checkstop_reason(level, hmi_evt);
+		break;
+	case CHECKSTOP_TYPE_UNKNOWN:
+		printk("%s	Unknown Malfunction Alert.\n", level);
+		break;
+	}
+}
+
 static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
 {
 	const char *level, *sevstr, *error_info;
@@ -95,6 +220,13 @@
 		(hmi_evt->type == OpalHMI_ERROR_TFMR_PARITY))
 		printk("%s	TFMR: %016llx\n", level,
 						be64_to_cpu(hmi_evt->tfmr));
+
+	if (hmi_evt->version < OpalHMIEvt_V2)
+		return;
+
+	/* OpalHMIEvt_V2 and above provides reason for malfunction alert. */
+	if (hmi_evt->type == OpalHMI_ERROR_MALFUNC_ALERT)
+		print_checkstop_reason(level, hmi_evt);
 }
 
 static void hmi_event_handler(struct work_struct *work)
@@ -103,6 +235,8 @@
 	struct OpalHMIEvent *hmi_evt;
 	struct OpalHmiEvtNode *msg_node;
 	uint8_t disposition;
+	struct opal_msg msg;
+	int unrecoverable = 0;
 
 	spin_lock_irqsave(&opal_hmi_evt_lock, flags);
 	while (!list_empty(&opal_hmi_evt_list)) {
@@ -118,14 +252,53 @@
 
 		/*
 		 * Check if HMI event has been recovered or not. If not
-		 * then we can't continue, invoke panic.
+		 * then kernel can't continue, we need to panic.
+		 * But before we do that, display all the HMI event
+		 * available on the list and set unrecoverable flag to 1.
 		 */
 		if (disposition != OpalHMI_DISPOSITION_RECOVERED)
-			panic("Unrecoverable HMI exception");
+			unrecoverable = 1;
 
 		spin_lock_irqsave(&opal_hmi_evt_lock, flags);
 	}
 	spin_unlock_irqrestore(&opal_hmi_evt_lock, flags);
+
+	if (unrecoverable) {
+		int ret;
+
+		/* Pull all HMI events from OPAL before we panic. */
+		while (opal_get_msg(__pa(&msg), sizeof(msg)) == OPAL_SUCCESS) {
+			u32 type;
+
+			type = be32_to_cpu(msg.msg_type);
+
+			/* skip if not HMI event */
+			if (type != OPAL_MSG_HMI_EVT)
+				continue;
+
+			/* HMI event info starts from param[0] */
+			hmi_evt = (struct OpalHMIEvent *)&msg.params[0];
+			print_hmi_event_info(hmi_evt);
+		}
+
+		/*
+		 * Unrecoverable HMI exception. We need to inform BMC/OCC
+		 * about this error so that it can collect relevant data
+		 * for error analysis before rebooting.
+		 */
+		ret = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR,
+			"Unrecoverable HMI exception");
+		if (ret == OPAL_UNSUPPORTED) {
+			pr_emerg("Reboot type %d not supported\n",
+						OPAL_REBOOT_PLATFORM_ERROR);
+		}
+
+		/*
+		 * Fall through and panic if opal_cec_reboot2() returns
+		 * OPAL_UNSUPPORTED.
+		 */
+		panic("Unrecoverable HMI exception");
+	}
 }
 
 static DECLARE_WORK(hmi_event_work, hmi_event_handler);
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index e2e7d75..2c91ee7 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -134,7 +134,8 @@
 	opal_handle_events(be64_to_cpu(last_outstanding_events));
 }
 
-static int opal_event_match(struct irq_domain *h, struct device_node *node)
+static int opal_event_match(struct irq_domain *h, struct device_node *node,
+			    enum irq_domain_bus_token bus_token)
 {
 	return h->of_node == node;
 }
diff --git a/arch/powerpc/platforms/powernv/opal-power.c b/arch/powerpc/platforms/powernv/opal-power.c
index ac46c2c..58dc330 100644
--- a/arch/powerpc/platforms/powernv/opal-power.c
+++ b/arch/powerpc/platforms/powernv/opal-power.c
@@ -9,9 +9,12 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt)	"opal-power: "	fmt
+
 #include <linux/kernel.h>
 #include <linux/reboot.h>
 #include <linux/notifier.h>
+#include <linux/of.h>
 
 #include <asm/opal.h>
 #include <asm/machdep.h>
@@ -19,30 +22,116 @@
 #define SOFT_OFF 0x00
 #define SOFT_REBOOT 0x01
 
-static int opal_power_control_event(struct notifier_block *nb,
-				    unsigned long msg_type, void *msg)
+/* Detect EPOW event */
+static bool detect_epow(void)
 {
-	struct opal_msg *power_msg = msg;
+	u16 epow;
+	int i, rc;
+	__be16 epow_classes;
+	__be16 opal_epow_status[OPAL_SYSEPOW_MAX] = {0};
+
+	/*
+	* Check for EPOW event. Kernel sends supported EPOW classes info
+	* to OPAL. OPAL returns EPOW info along with classes present.
+	*/
+	epow_classes = cpu_to_be16(OPAL_SYSEPOW_MAX);
+	rc = opal_get_epow_status(opal_epow_status, &epow_classes);
+	if (rc != OPAL_SUCCESS) {
+		pr_err("Failed to get EPOW event information\n");
+		return false;
+	}
+
+	/* Look for EPOW events present */
+	for (i = 0; i < be16_to_cpu(epow_classes); i++) {
+		epow = be16_to_cpu(opal_epow_status[i]);
+
+		/* Filter events which do not need shutdown. */
+		if (i == OPAL_SYSEPOW_POWER)
+			epow &= ~(OPAL_SYSPOWER_CHNG | OPAL_SYSPOWER_FAIL |
+					OPAL_SYSPOWER_INCL);
+		if (epow)
+			return true;
+	}
+
+	return false;
+}
+
+/* Check for existing EPOW, DPO events */
+static bool poweroff_pending(void)
+{
+	int rc;
+	__be64 opal_dpo_timeout;
+
+	/* Check for DPO event */
+	rc = opal_get_dpo_status(&opal_dpo_timeout);
+	if (rc == OPAL_SUCCESS) {
+		pr_info("Existing DPO event detected.\n");
+		return true;
+	}
+
+	/* Check for EPOW event */
+	if (detect_epow()) {
+		pr_info("Existing EPOW event detected.\n");
+		return true;
+	}
+
+	return false;
+}
+
+/* OPAL power-control events notifier */
+static int opal_power_control_event(struct notifier_block *nb,
+					unsigned long msg_type, void *msg)
+{
 	uint64_t type;
 
-	type = be64_to_cpu(power_msg->params[0]);
-
-	switch (type) {
-	case SOFT_REBOOT:
-		pr_info("OPAL: reboot requested\n");
-		orderly_reboot();
+	switch (msg_type) {
+	case OPAL_MSG_EPOW:
+		if (detect_epow()) {
+			pr_info("EPOW msg received. Powering off system\n");
+			orderly_poweroff(true);
+		}
 		break;
-	case SOFT_OFF:
-		pr_info("OPAL: poweroff requested\n");
+	case OPAL_MSG_DPO:
+		pr_info("DPO msg received. Powering off system\n");
 		orderly_poweroff(true);
 		break;
+	case OPAL_MSG_SHUTDOWN:
+		type = be64_to_cpu(((struct opal_msg *)msg)->params[0]);
+		switch (type) {
+		case SOFT_REBOOT:
+			pr_info("Reboot requested\n");
+			orderly_reboot();
+			break;
+		case SOFT_OFF:
+			pr_info("Poweroff requested\n");
+			orderly_poweroff(true);
+			break;
+		default:
+			pr_err("Unknown power-control type %llu\n", type);
+		}
+		break;
 	default:
-		pr_err("OPAL: power control type unexpected %016llx\n", type);
+		pr_err("Unknown OPAL message type %lu\n", msg_type);
 	}
 
 	return 0;
 }
 
+/* OPAL EPOW event notifier block */
+static struct notifier_block opal_epow_nb = {
+	.notifier_call	= opal_power_control_event,
+	.next		= NULL,
+	.priority	= 0,
+};
+
+/* OPAL DPO event notifier block */
+static struct notifier_block opal_dpo_nb = {
+	.notifier_call	= opal_power_control_event,
+	.next		= NULL,
+	.priority	= 0,
+};
+
+/* OPAL power-control event notifier block */
 static struct notifier_block opal_power_control_nb = {
 	.notifier_call	= opal_power_control_event,
 	.next		= NULL,
@@ -51,16 +140,40 @@
 
 static int __init opal_power_control_init(void)
 {
-	int ret;
+	int ret, supported = 0;
+	struct device_node *np;
 
+	/* Register OPAL power-control events notifier */
 	ret = opal_message_notifier_register(OPAL_MSG_SHUTDOWN,
-					     &opal_power_control_nb);
-	if (ret) {
-		pr_err("%s: Can't register OPAL event notifier (%d)\n",
-				__func__, ret);
-		return ret;
+						&opal_power_control_nb);
+	if (ret)
+		pr_err("Failed to register SHUTDOWN notifier, ret = %d\n", ret);
+
+	/* Determine OPAL EPOW, DPO support */
+	np = of_find_node_by_path("/ibm,opal/epow");
+	if (np) {
+		supported = of_device_is_compatible(np, "ibm,opal-v3-epow");
+		of_node_put(np);
 	}
 
+	if (!supported)
+		return 0;
+	pr_info("OPAL EPOW, DPO support detected.\n");
+
+	/* Register EPOW event notifier */
+	ret = opal_message_notifier_register(OPAL_MSG_EPOW, &opal_epow_nb);
+	if (ret)
+		pr_err("Failed to register EPOW notifier, ret = %d\n", ret);
+
+	/* Register DPO event notifier */
+	ret = opal_message_notifier_register(OPAL_MSG_DPO, &opal_dpo_nb);
+	if (ret)
+		pr_err("Failed to register DPO notifier, ret = %d\n", ret);
+
+	/* Check for any pending EPOW or DPO events. */
+	if (poweroff_pending())
+		orderly_poweroff(true);
+
 	return 0;
 }
 machine_subsys_initcall(powernv, opal_power_control_init);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index d6a7b82..b7a464f 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -202,6 +202,7 @@
 OPAL_CALL(opal_rtc_write,			OPAL_RTC_WRITE);
 OPAL_CALL(opal_cec_power_down,			OPAL_CEC_POWER_DOWN);
 OPAL_CALL(opal_cec_reboot,			OPAL_CEC_REBOOT);
+OPAL_CALL(opal_cec_reboot2,			OPAL_CEC_REBOOT2);
 OPAL_CALL(opal_read_nvram,			OPAL_READ_NVRAM);
 OPAL_CALL(opal_write_nvram,			OPAL_WRITE_NVRAM);
 OPAL_CALL(opal_handle_interrupt,		OPAL_HANDLE_INTERRUPT);
@@ -249,6 +250,7 @@
 OPAL_CALL(opal_pci_mask_pe_error,		OPAL_PCI_MASK_PE_ERROR);
 OPAL_CALL(opal_set_slot_led_status,		OPAL_SET_SLOT_LED_STATUS);
 OPAL_CALL(opal_get_epow_status,			OPAL_GET_EPOW_STATUS);
+OPAL_CALL(opal_get_dpo_status,			OPAL_GET_DPO_STATUS);
 OPAL_CALL(opal_set_system_attention_led,	OPAL_SET_SYSTEM_ATTENTION_LED);
 OPAL_CALL(opal_pci_next_error,			OPAL_PCI_NEXT_ERROR);
 OPAL_CALL(opal_pci_poll,			OPAL_PCI_POLL);
@@ -297,3 +299,5 @@
 OPAL_CALL(opal_flash_write,			OPAL_FLASH_WRITE);
 OPAL_CALL(opal_flash_erase,			OPAL_FLASH_ERASE);
 OPAL_CALL(opal_prd_msg,				OPAL_PRD_MSG);
+OPAL_CALL(opal_leds_get_ind,			OPAL_LEDS_GET_INDICATOR);
+OPAL_CALL(opal_leds_set_ind,			OPAL_LEDS_SET_INDICATOR);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index f084afa..230f3a7 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -441,6 +441,7 @@
 int opal_machine_check(struct pt_regs *regs)
 {
 	struct machine_check_event evt;
+	int ret;
 
 	if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
 		return 0;
@@ -455,6 +456,40 @@
 
 	if (opal_recover_mce(regs, &evt))
 		return 1;
+
+	/*
+	 * Unrecovered machine check, we are heading to panic path.
+	 *
+	 * We may have hit this MCE in very early stage of kernel
+	 * initialization even before opal-prd has started running. If
+	 * this is the case then this MCE error may go un-noticed or
+	 * un-analyzed if we go down panic path. We need to inform
+	 * BMC/OCC about this error so that they can collect relevant
+	 * data for error analysis before rebooting.
+	 * Use opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR) to do so.
+	 * This function may not return on BMC based system.
+	 */
+	ret = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR,
+			"Unrecoverable Machine Check exception");
+	if (ret == OPAL_UNSUPPORTED) {
+		pr_emerg("Reboot type %d not supported\n",
+					OPAL_REBOOT_PLATFORM_ERROR);
+	}
+
+	/*
+	 * We reached here. There can be three possibilities:
+	 * 1. We are running on a firmware level that do not support
+	 *    opal_cec_reboot2()
+	 * 2. We are running on a firmware level that do not support
+	 *    OPAL_REBOOT_PLATFORM_ERROR reboot type.
+	 * 3. We are running on FSP based system that does not need opal
+	 *    to trigger checkstop explicitly for error analysis. The FSP
+	 *    PRD component would have already got notified about this
+	 *    error through other channels.
+	 *
+	 * In any case, let us just fall through. We anyway heading
+	 * down to panic path.
+	 */
 	return 0;
 }
 
@@ -648,7 +683,7 @@
 
 static int __init opal_init(void)
 {
-	struct device_node *np, *consoles;
+	struct device_node *np, *consoles, *leds;
 	int rc;
 
 	opal_node = of_find_node_by_path("/ibm,opal");
@@ -689,6 +724,13 @@
 	/* Setup a heatbeat thread if requested by OPAL */
 	opal_init_heartbeat();
 
+	/* Create leds platform devices */
+	leds = of_find_node_by_path("/ibm,opal/leds");
+	if (leds) {
+		of_platform_device_create(leds, "opal_leds", NULL);
+		of_node_put(leds);
+	}
+
 	/* Create "opal" kobject under /sys/firmware */
 	rc = opal_sysfs_init();
 	if (rc == 0) {
@@ -841,3 +883,6 @@
 EXPORT_SYMBOL_GPL(opal_tpo_read);
 EXPORT_SYMBOL_GPL(opal_tpo_write);
 EXPORT_SYMBOL_GPL(opal_i2c_request);
+/* Export these symbols for PowerNV LED class driver */
+EXPORT_SYMBOL_GPL(opal_leds_get_ind);
+EXPORT_SYMBOL_GPL(opal_leds_set_ind);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 85cbc96..2927cd5 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -140,11 +140,9 @@
 		return;
 	}
 
-	if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) {
-		pr_warn("%s: PE %d was assigned on PHB#%x\n",
-			__func__, pe_no, phb->hose->global_number);
-		return;
-	}
+	if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
+		pr_debug("%s: PE %d was reserved on PHB#%x\n",
+			 __func__, pe_no, phb->hose->global_number);
 
 	phb->ioda.pe_array[pe_no].phb = phb;
 	phb->ioda.pe_array[pe_no].pe_number = pe_no;
@@ -231,61 +229,60 @@
 	return -EIO;
 }
 
-static void pnv_ioda2_reserve_m64_pe(struct pnv_phb *phb)
+static void pnv_ioda2_reserve_dev_m64_pe(struct pci_dev *pdev,
+					 unsigned long *pe_bitmap)
 {
-	resource_size_t sgsz = phb->ioda.m64_segsize;
-	struct pci_dev *pdev;
+	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+	struct pnv_phb *phb = hose->private_data;
 	struct resource *r;
-	int base, step, i;
+	resource_size_t base, sgsz, start, end;
+	int segno, i;
 
-	/*
-	 * Root bus always has full M64 range and root port has
-	 * M64 range used in reality. So we're checking root port
-	 * instead of root bus.
-	 */
-	list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) {
-		for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
-			r = &pdev->resource[PCI_BRIDGE_RESOURCES + i];
-			if (!r->parent ||
-			    !pnv_pci_is_mem_pref_64(r->flags))
-				continue;
+	base = phb->ioda.m64_base;
+	sgsz = phb->ioda.m64_segsize;
+	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
+		r = &pdev->resource[i];
+		if (!r->parent || !pnv_pci_is_mem_pref_64(r->flags))
+			continue;
 
-			base = (r->start - phb->ioda.m64_base) / sgsz;
-			for (step = 0; step < resource_size(r) / sgsz; step++)
-				pnv_ioda_reserve_pe(phb, base + step);
+		start = _ALIGN_DOWN(r->start - base, sgsz);
+		end = _ALIGN_UP(r->end - base, sgsz);
+		for (segno = start / sgsz; segno < end / sgsz; segno++) {
+			if (pe_bitmap)
+				set_bit(segno, pe_bitmap);
+			else
+				pnv_ioda_reserve_pe(phb, segno);
 		}
 	}
 }
 
-static int pnv_ioda2_pick_m64_pe(struct pnv_phb *phb,
-				 struct pci_bus *bus, int all)
+static void pnv_ioda2_reserve_m64_pe(struct pci_bus *bus,
+				     unsigned long *pe_bitmap,
+				     bool all)
 {
-	resource_size_t segsz = phb->ioda.m64_segsize;
 	struct pci_dev *pdev;
-	struct resource *r;
+
+	list_for_each_entry(pdev, &bus->devices, bus_list) {
+		pnv_ioda2_reserve_dev_m64_pe(pdev, pe_bitmap);
+
+		if (all && pdev->subordinate)
+			pnv_ioda2_reserve_m64_pe(pdev->subordinate,
+						 pe_bitmap, all);
+	}
+}
+
+static int pnv_ioda2_pick_m64_pe(struct pci_bus *bus, bool all)
+{
+	struct pci_controller *hose = pci_bus_to_host(bus);
+	struct pnv_phb *phb = hose->private_data;
 	struct pnv_ioda_pe *master_pe, *pe;
 	unsigned long size, *pe_alloc;
-	bool found;
-	int start, i, j;
+	int i;
 
 	/* Root bus shouldn't use M64 */
 	if (pci_is_root_bus(bus))
 		return IODA_INVALID_PE;
 
-	/* We support only one M64 window on each bus */
-	found = false;
-	pci_bus_for_each_resource(bus, r, i) {
-		if (r && r->parent &&
-		    pnv_pci_is_mem_pref_64(r->flags)) {
-			found = true;
-			break;
-		}
-	}
-
-	/* No M64 window found ? */
-	if (!found)
-		return IODA_INVALID_PE;
-
 	/* Allocate bitmap */
 	size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
 	pe_alloc = kzalloc(size, GFP_KERNEL);
@@ -295,35 +292,8 @@
 		return IODA_INVALID_PE;
 	}
 
-	/*
-	 * Figure out reserved PE numbers by the PE
-	 * the its child PEs.
-	 */
-	start = (r->start - phb->ioda.m64_base) / segsz;
-	for (i = 0; i < resource_size(r) / segsz; i++)
-		set_bit(start + i, pe_alloc);
-
-	if (all)
-		goto done;
-
-	/*
-	 * If the PE doesn't cover all subordinate buses,
-	 * we need subtract from reserved PEs for children.
-	 */
-	list_for_each_entry(pdev, &bus->devices, bus_list) {
-		if (!pdev->subordinate)
-			continue;
-
-		pci_bus_for_each_resource(pdev->subordinate, r, i) {
-			if (!r || !r->parent ||
-			    !pnv_pci_is_mem_pref_64(r->flags))
-				continue;
-
-			start = (r->start - phb->ioda.m64_base) / segsz;
-			for (j = 0; j < resource_size(r) / segsz ; j++)
-				clear_bit(start + j, pe_alloc);
-                }
-        }
+	/* Figure out reserved PE numbers by the PE */
+	pnv_ioda2_reserve_m64_pe(bus, pe_alloc, all);
 
 	/*
 	 * the current bus might not own M64 window and that's all
@@ -339,7 +309,6 @@
 	 * Figure out the master PE and put all slave PEs to master
 	 * PE's list to form compound PE.
 	 */
-done:
 	master_pe = NULL;
 	i = -1;
 	while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) <
@@ -653,7 +622,7 @@
 		pdev = pe->pdev->bus->self;
 #ifdef CONFIG_PCI_IOV
 	else if (pe->flags & PNV_IODA_PE_VF)
-		pdev = pe->parent_dev->bus->self;
+		pdev = pe->parent_dev;
 #endif /* CONFIG_PCI_IOV */
 	while (pdev) {
 		struct pci_dn *pdn = pci_get_pdn(pdev);
@@ -732,7 +701,7 @@
 		parent = parent->bus->self;
 	}
 
-	opal_pci_eeh_freeze_set(phb->opal_id, pe->pe_number,
+	opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
 				  OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
 
 	/* Disassociate PE in PELT */
@@ -946,8 +915,9 @@
 		res2 = *res;
 		res->start += size * offset;
 
-		dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (enabling %d VFs shifted by %d)\n",
-			 i, &res2, res, num_vfs, offset);
+		dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
+			 i, &res2, res, (offset > 0) ? "En" : "Dis",
+			 num_vfs, offset);
 		pci_update_resource(dev, i + PCI_IOV_RESOURCES);
 	}
 	return 0;
@@ -1050,7 +1020,7 @@
  * subordinate PCI devices and buses. The second type of PE is normally
  * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
  */
-static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
+static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
 {
 	struct pci_controller *hose = pci_bus_to_host(bus);
 	struct pnv_phb *phb = hose->private_data;
@@ -1059,7 +1029,7 @@
 
 	/* Check if PE is determined by M64 */
 	if (phb->pick_m64_pe)
-		pe_num = phb->pick_m64_pe(phb, bus, all);
+		pe_num = phb->pick_m64_pe(bus, all);
 
 	/* The PE number isn't pinned by M64 */
 	if (pe_num == IODA_INVALID_PE)
@@ -1117,12 +1087,12 @@
 {
 	struct pci_dev *dev;
 
-	pnv_ioda_setup_bus_PE(bus, 0);
+	pnv_ioda_setup_bus_PE(bus, false);
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		if (dev->subordinate) {
 			if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
-				pnv_ioda_setup_bus_PE(dev->subordinate, 1);
+				pnv_ioda_setup_bus_PE(dev->subordinate, true);
 			else
 				pnv_ioda_setup_PEs(dev->subordinate);
 		}
@@ -1147,7 +1117,7 @@
 
 		/* M64 layout might affect PE allocation */
 		if (phb->reserve_m64_pe)
-			phb->reserve_m64_pe(phb);
+			phb->reserve_m64_pe(hose->bus, NULL, true);
 
 		pnv_ioda_setup_PEs(hose->bus);
 	}
@@ -1590,6 +1560,7 @@
 
 	pe = &phb->ioda.pe_array[pdn->pe_number];
 	WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
+	set_dma_offset(&pdev->dev, pe->tce_bypass_base);
 	set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
 	/*
 	 * Note: iommu_add_device() will fail here as
@@ -1620,19 +1591,18 @@
 	if (bypass) {
 		dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
 		set_dma_ops(&pdev->dev, &dma_direct_ops);
-		set_dma_offset(&pdev->dev, pe->tce_bypass_base);
 	} else {
 		dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
 		set_dma_ops(&pdev->dev, &dma_iommu_ops);
-		set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
 	}
 	*pdev->dev.dma_mask = dma_mask;
 	return 0;
 }
 
-static u64 pnv_pci_ioda_dma_get_required_mask(struct pnv_phb *phb,
-					      struct pci_dev *pdev)
+static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
 {
+	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+	struct pnv_phb *phb = hose->private_data;
 	struct pci_dn *pdn = pci_get_pdn(pdev);
 	struct pnv_ioda_pe *pe;
 	u64 end, mask;
@@ -1659,6 +1629,7 @@
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
 		set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
+		set_dma_offset(&dev->dev, pe->tce_bypass_base);
 		iommu_add_device(&dev->dev);
 
 		if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
@@ -3057,6 +3028,7 @@
        .window_alignment = pnv_pci_window_alignment,
        .reset_secondary_bus = pnv_pci_reset_secondary_bus,
        .dma_set_mask = pnv_pci_ioda_dma_set_mask,
+       .dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask,
        .shutdown = pnv_pci_ioda_shutdown,
 };
 
@@ -3203,7 +3175,6 @@
 
 	/* Setup TCEs */
 	phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
-	phb->dma_get_required_mask = pnv_pci_ioda_dma_get_required_mask;
 
 	/* Setup MSI support */
 	pnv_pci_init_ioda_msis(phb);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 765d8ed..9b2480b 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -61,7 +61,7 @@
 	if (pdev->no_64bit_msi && !phb->msi32_support)
 		return -ENODEV;
 
-	list_for_each_entry(entry, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(entry, pdev) {
 		if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
 			pr_warn("%s: Supports only 64-bit MSIs\n",
 				pci_name(pdev));
@@ -103,7 +103,7 @@
 	if (WARN_ON(!phb))
 		return;
 
-	list_for_each_entry(entry, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(entry, pdev) {
 		if (entry->irq == NO_IRQ)
 			continue;
 		irq_set_msi_desc(entry->irq, NULL);
@@ -761,17 +761,6 @@
 		phb->dma_dev_setup(phb, pdev);
 }
 
-u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
-{
-	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
-	struct pnv_phb *phb = hose->private_data;
-
-	if (phb && phb->dma_get_required_mask)
-		return phb->dma_get_required_mask(phb, pdev);
-
-	return __dma_get_required_mask(&pdev->dev);
-}
-
 void pnv_pci_shutdown(void)
 {
 	struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 8ef2d28..c8ff50e 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -105,13 +105,12 @@
 			 unsigned int hwirq, unsigned int virq,
 			 unsigned int is_64, struct msi_msg *msg);
 	void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
-	u64 (*dma_get_required_mask)(struct pnv_phb *phb,
-				     struct pci_dev *pdev);
 	void (*fixup_phb)(struct pci_controller *hose);
 	u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
 	int (*init_m64)(struct pnv_phb *phb);
-	void (*reserve_m64_pe)(struct pnv_phb *phb);
-	int (*pick_m64_pe)(struct pnv_phb *phb, struct pci_bus *bus, int all);
+	void (*reserve_m64_pe)(struct pci_bus *bus,
+			       unsigned long *pe_bitmap, bool all);
+	int (*pick_m64_pe)(struct pci_bus *bus, bool all);
 	int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
 	void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
 	int (*unfreeze_pe)(struct pnv_phb *phb, int pe_no, int opt);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 9269e30..6dbc0a1 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -12,15 +12,9 @@
 #ifdef CONFIG_PCI
 extern void pnv_pci_init(void);
 extern void pnv_pci_shutdown(void);
-extern u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev);
 #else
 static inline void pnv_pci_init(void) { }
 static inline void pnv_pci_shutdown(void) { }
-
-static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
-{
-	return 0;
-}
 #endif
 
 extern u32 pnv_get_supported_cpuidle_states(void);
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
index 6eb808f..5dcbdea 100644
--- a/arch/powerpc/platforms/powernv/rng.c
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -128,7 +128,7 @@
 
 	pr_info_once("Registering arch random hook.\n");
 
-	ppc_md.get_random_long = powernv_get_random_long;
+	ppc_md.get_random_seed = powernv_get_random_long;
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 53737e0..685b3cb 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -165,14 +165,6 @@
 {
 }
 
-static u64 pnv_dma_get_required_mask(struct device *dev)
-{
-	if (dev_is_pci(dev))
-		return pnv_pci_dma_get_required_mask(to_pci_dev(dev));
-
-	return __dma_get_required_mask(dev);
-}
-
 static void pnv_shutdown(void)
 {
 	/* Let the PCI code clear up IODA tables */
@@ -243,6 +235,13 @@
 	} else {
 		/* Primary waits for the secondaries to have reached OPAL */
 		pnv_kexec_wait_secondaries_down();
+
+		/*
+		 * We might be running as little-endian - now that interrupts
+		 * are disabled, reset the HILE bit to big-endian so we don't
+		 * take interrupts in the wrong endian later
+		 */
+		opal_reinit_cpus(OPAL_REINIT_CPUS_HILE_BE);
 	}
 }
 #endif /* CONFIG_KEXEC */
@@ -314,7 +313,6 @@
 	.machine_shutdown	= pnv_shutdown,
 	.power_save             = power7_idle,
 	.calibrate_decr		= generic_calibrate_decr,
-	.dma_get_required_mask	= pnv_dma_get_required_mask,
 #ifdef CONFIG_KEXEC
 	.kexec_cpu_down		= pnv_kexec_cpu_down,
 #endif
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index f60f80a..503a73f 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -190,7 +190,7 @@
 
 	hid0 = mfspr(SPRN_HID0);
 	hid0 &= ~HID0_POWER8_DYNLPARDIS;
-	mtspr(SPRN_HID0, hid0);
+	update_power8_hid0(hid0);
 	update_hid_in_slw(hid0);
 
 	while (mfspr(SPRN_HID0) & mask)
@@ -227,7 +227,7 @@
 	/* Write new mode */
 	hid0  = mfspr(SPRN_HID0);
 	hid0 |= HID0_POWER8_DYNLPARDIS | split_parms[i].value;
-	mtspr(SPRN_HID0, hid0);
+	update_power8_hid0(hid0);
 	update_hid_in_slw(hid0);
 
 	/* Wait for it to happen */
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index a6c42f3..638c406 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -678,7 +678,8 @@
 	return 0;
 }
 
-static int ps3_host_match(struct irq_domain *h, struct device_node *np)
+static int ps3_host_match(struct irq_domain *h, struct device_node *np,
+			  enum irq_domain_bus_token bus_token)
 {
 	/* Match all */
 	return 1;
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 0ced387..e9ff44c 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -92,13 +92,12 @@
 		return NULL;
 
 	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
-	new_prop->value = kmalloc(prop->length, GFP_KERNEL);
+	new_prop->value = kmemdup(prop->value, prop->length, GFP_KERNEL);
 	if (!new_prop->name || !new_prop->value) {
 		dlpar_free_drconf_property(new_prop);
 		return NULL;
 	}
 
-	memcpy(new_prop->value, prop->value, prop->length);
 	new_prop->length = prop->length;
 
 	/* Convert the property to cpu endian-ness */
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 10510de..0946b98 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1253,11 +1253,10 @@
 		}
 	}
 
-	/* fall back on iommu ops, restore table pointer with ops */
+	/* fall back on iommu ops */
 	if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
 		dev_info(dev, "Restoring 32-bit DMA via iommu\n");
 		set_dma_ops(dev, &dma_iommu_ops);
-		pci_dma_dev_setup_pSeriesLP(pdev);
 	}
 
 check_mask:
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index c22bb64..272e9ec 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -118,7 +118,7 @@
 {
 	struct msi_desc *entry;
 
-	list_for_each_entry(entry, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(entry, pdev) {
 		if (entry->irq == NO_IRQ)
 			continue;
 
@@ -350,7 +350,7 @@
 	 * So we must reject such requests. */
 
 	expected = 0;
-	list_for_each_entry(entry, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(entry, pdev) {
 		if (entry->msi_attrib.entry_nr != expected) {
 			pr_debug("rtas_msi: bad MSI-X entries.\n");
 			return -EINVAL;
@@ -462,7 +462,7 @@
 	}
 
 	i = 0;
-	list_for_each_entry(entry, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(entry, pdev) {
 		hwirq = rtas_query_irq_number(pdn, i++);
 		if (hwirq < 0) {
 			pr_debug("rtas_msi: error (%d) getting hwirq\n", rc);
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 02e4a17..3b6647e 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -189,7 +189,8 @@
 	int state;
 	int critical;
 
-	status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
+	status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
+				      &state);
 
 	if (state > 3)
 		critical = 1;		/* Time Critical */
diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
index e096087..31ca557 100644
--- a/arch/powerpc/platforms/pseries/rng.c
+++ b/arch/powerpc/platforms/pseries/rng.c
@@ -38,7 +38,7 @@
 
 	pr_info("Registering arch random hook.\n");
 
-	ppc_md.get_random_long = pseries_get_random_long;
+	ppc_md.get_random_seed = pseries_get_random_long;
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index df6a704..39a74fa 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -254,19 +254,26 @@
 static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
 {
 	struct of_reconfig_data *rd = data;
-	struct device_node *np = rd->dn;
-	struct pci_dn *pci = NULL;
+	struct device_node *parent, *np = rd->dn;
+	struct pci_dn *pdn;
 	int err = NOTIFY_OK;
 
 	switch (action) {
 	case OF_RECONFIG_ATTACH_NODE:
-		pci = np->parent->data;
-		if (pci) {
-			update_dn_pci_info(np, pci->phb);
-
-			/* Create EEH device for the OF node */
-			eeh_dev_init(PCI_DN(np), pci->phb);
+		parent = of_get_parent(np);
+		pdn = parent ? PCI_DN(parent) : NULL;
+		if (pdn) {
+			/* Create pdn and EEH device */
+			update_dn_pci_info(np, pdn->phb);
+			eeh_dev_init(PCI_DN(np), pdn->phb);
 		}
+
+		of_node_put(parent);
+		break;
+	case OF_RECONFIG_DETACH_NODE:
+		pdn = PCI_DN(np);
+		if (pdn)
+			list_del(&pdn->list);
 		break;
 	default:
 		err = NOTIFY_DONE;
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index ee90db1..f86250c 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -132,7 +132,7 @@
 		phys_mem += vec.bv_len;
 		transfered += vec.bv_len;
 	}
-	bio_endio(bio, 0);
+	bio_endio(bio);
 }
 
 /**
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 4f78695..e2ea519 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -147,7 +147,7 @@
 	spin_lock_irqsave(&cpm_muram_lock, flags);
 	cpm_muram_info.alignment = align;
 	start = rh_alloc(&cpm_muram_info, size, "commproc");
-	memset(cpm_muram_addr(start), 0, size);
+	memset_io(cpm_muram_addr(start), 0, size);
 	spin_unlock_irqrestore(&cpm_muram_lock, flags);
 
 	return start;
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 90bcdfe..b734863 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -313,20 +313,11 @@
 	set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map);
 }
 
-static void dma_dev_setup_dart(struct device *dev)
-{
-	/* We only have one iommu table on the mac for now, which makes
-	 * things simple. Setup all PCI devices to point to this table
-	 */
-	if (get_dma_ops(dev) == &dma_direct_ops)
-		set_dma_offset(dev, DART_U4_BYPASS_BASE);
-	else
-		set_iommu_table_base(dev, &iommu_table_dart);
-}
-
 static void pci_dma_dev_setup_dart(struct pci_dev *dev)
 {
-	dma_dev_setup_dart(&dev->dev);
+	if (dart_is_u4)
+		set_dma_offset(&dev->dev, DART_U4_BYPASS_BASE);
+	set_iommu_table_base(&dev->dev, &iommu_table_dart);
 }
 
 static void pci_dma_bus_setup_dart(struct pci_bus *bus)
@@ -370,7 +361,6 @@
 		dev_info(dev, "Using 32-bit DMA via iommu\n");
 		set_dma_ops(dev, &dma_iommu_ops);
 	}
-	dma_dev_setup_dart(dev);
 
 	*dev->dma_mask = dma_mask;
 	return 0;
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index 2d20f10..eca0b00 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -177,7 +177,8 @@
 	return irq_linear_revmap(global_ehv_pic->irqhost, irq);
 }
 
-static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node)
+static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node,
+			      enum irq_domain_bus_token bus_token)
 {
 	/* Exact match, unless ehv_pic node is NULL */
 	return h->of_node == NULL || h->of_node == node;
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 5236e54..5916da1 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -129,7 +129,7 @@
 	struct msi_desc *entry;
 	struct fsl_msi *msi_data;
 
-	list_for_each_entry(entry, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(entry, pdev) {
 		if (entry->irq == NO_IRQ)
 			continue;
 		msi_data = irq_get_chip_data(entry->irq);
@@ -219,7 +219,7 @@
 		}
 	}
 
-	list_for_each_entry(entry, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(entry, pdev) {
 		/*
 		 * Loop over all the MSI devices until we find one that has an
 		 * available interrupt.
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 9a8fcf0..ebc1f412 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -1113,7 +1113,7 @@
 			IRQF_SHARED,
 			"[PCI] PME", hose);
 	if (res < 0) {
-		dev_err(&dev->dev, "Unable to requiest irq %d for PME\n", pme_irq);
+		dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
 		irq_dispose_mapping(pme_irq);
 
 		return -ENODEV;
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index 31c3347..e1a9c2c 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -162,7 +162,8 @@
 	.flags = IORESOURCE_BUSY,
 };
 
-static int i8259_host_match(struct irq_domain *h, struct device_node *node)
+static int i8259_host_match(struct irq_domain *h, struct device_node *node,
+			    enum irq_domain_bus_token bus_token)
 {
 	return h->of_node == NULL || h->of_node == node;
 }
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index d78f136..6b2b689 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -671,7 +671,8 @@
 	.irq_set_type	= ipic_set_irq_type,
 };
 
-static int ipic_host_match(struct irq_domain *h, struct device_node *node)
+static int ipic_host_match(struct irq_domain *h, struct device_node *node,
+			   enum irq_domain_bus_token bus_token)
 {
 	/* Exact match, unless ipic node is NULL */
 	return h->of_node == NULL || h->of_node == node;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index c8e7333..97a8ae8 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1007,7 +1007,8 @@
 #endif /* CONFIG_MPIC_U3_HT_IRQS */
 
 
-static int mpic_host_match(struct irq_domain *h, struct device_node *node)
+static int mpic_host_match(struct irq_domain *h, struct device_node *node,
+			   enum irq_domain_bus_token bus_token)
 {
 	/* Exact match, unless mpic node is NULL */
 	return h->of_node == NULL || h->of_node == node;
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index fc46ef3..70fbd56 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -108,7 +108,7 @@
 {
 	struct msi_desc *entry;
 
-        list_for_each_entry(entry, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(entry, pdev) {
 		if (entry->irq == NO_IRQ)
 			continue;
 
@@ -140,7 +140,7 @@
 		return -ENXIO;
 	}
 
-	list_for_each_entry(entry, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(entry, pdev) {
 		hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1);
 		if (hwirq < 0) {
 			pr_debug("u3msi: failed allocating hwirq\n");
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
index 87f9623..52a93dc 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
@@ -51,7 +51,7 @@
 		return -EINVAL;
 	}
 
-	list_for_each_entry(entry, &dev->msi_list, list) {
+	for_each_pci_msi_entry(entry, dev) {
 		irq = msi_bitmap_alloc_hwirqs(&ppc4xx_hsta_msi.bmp, 1);
 		if (irq < 0) {
 			pr_debug("%s: Failed to allocate msi interrupt\n",
@@ -109,7 +109,7 @@
 	struct msi_desc *entry;
 	int irq;
 
-	list_for_each_entry(entry, &dev->msi_list, list) {
+	for_each_pci_msi_entry(entry, dev) {
 		if (entry->irq == NO_IRQ)
 			continue;
 
@@ -132,7 +132,7 @@
 	struct pci_controller *phb;
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (IS_ERR(mem)) {
+	if (!mem) {
 		dev_err(dev, "Unable to get mmio space\n");
 		return -EINVAL;
 	}
@@ -157,7 +157,7 @@
 		goto out;
 
 	ppc4xx_hsta_msi.irq_map = kmalloc(sizeof(int) * irq_count, GFP_KERNEL);
-	if (IS_ERR(ppc4xx_hsta_msi.irq_map)) {
+	if (!ppc4xx_hsta_msi.irq_map) {
 		ret = -ENOMEM;
 		goto out1;
 	}
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 6eb21f2..24d0470 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -93,7 +93,7 @@
 	if (!msi_data->msi_virqs)
 		return -ENOMEM;
 
-	list_for_each_entry(entry, &dev->msi_list, list) {
+	for_each_pci_msi_entry(entry, dev) {
 		int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
 		if (int_no >= 0)
 			break;
@@ -127,7 +127,7 @@
 
 	dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
 
-	list_for_each_entry(entry, &dev->msi_list, list) {
+	for_each_pci_msi_entry(entry, dev) {
 		if (entry->irq == NO_IRQ)
 			continue;
 		irq_set_msi_desc(entry->irq, NULL);
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 6512cd8..47b352e 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -244,7 +244,8 @@
 	.irq_mask_ack = qe_ic_mask_irq,
 };
 
-static int qe_ic_host_match(struct irq_domain *h, struct device_node *node)
+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
+			    enum irq_domain_bus_token bus_token)
 {
 	/* Exact match, unless qe_ic node is NULL */
 	return h->of_node == NULL || h->of_node == node;
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
index 68c7e5c..11ac964 100644
--- a/arch/powerpc/sysdev/xics/ics-opal.c
+++ b/arch/powerpc/sysdev/xics/ics-opal.c
@@ -72,7 +72,7 @@
 	 * card, using the MSI mask bits. Firmware doesn't appear to unmask
 	 * at that level, so we do it here by hand.
 	 */
-	if (d->msi_desc)
+	if (irq_data_get_msi_desc(d))
 		pci_msi_unmask_irq(d);
 #endif
 
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
index 0af97de..d1c625c 100644
--- a/arch/powerpc/sysdev/xics/ics-rtas.c
+++ b/arch/powerpc/sysdev/xics/ics-rtas.c
@@ -75,7 +75,7 @@
 	 * card, using the MSI mask bits. Firmware doesn't appear to unmask
 	 * at that level, so we do it here by hand.
 	 */
-	if (d->msi_desc)
+	if (irq_data_get_msi_desc(d))
 		pci_msi_unmask_irq(d);
 #endif
 	/* unmask it */
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 08c248e..47e43b7 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -298,7 +298,8 @@
 }
 #endif /* CONFIG_SMP */
 
-static int xics_host_match(struct irq_domain *h, struct device_node *node)
+static int xics_host_match(struct irq_domain *h, struct device_node *node,
+			   enum irq_domain_bus_token bus_token)
 {
 	struct ics *ics;
 
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index e599259..6ef1231 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1987,7 +1987,6 @@
 			case '^':
 				adrs -= size;
 				break;
-				break;
 			case '/':
 				if (nslash > 0)
 					adrs -= 1 << nslash;
@@ -2731,7 +2730,7 @@
 void dump_segments(void)
 {
 	int i;
-	unsigned long esid,vsid,valid;
+	unsigned long esid,vsid;
 	unsigned long llp;
 
 	printf("SLB contents of cpu 0x%x\n", smp_processor_id());
@@ -2739,10 +2738,9 @@
 	for (i = 0; i < mmu_slb_size; i++) {
 		asm volatile("slbmfee  %0,%1" : "=r" (esid) : "r" (i));
 		asm volatile("slbmfev  %0,%1" : "=r" (vsid) : "r" (i));
-		valid = (esid & SLB_ESID_V);
-		if (valid | esid | vsid) {
+		if (esid || vsid) {
 			printf("%02d %016lx %016lx", i, esid, vsid);
-			if (valid) {
+			if (esid & SLB_ESID_V) {
 				llp = vsid & SLB_VSID_LLP;
 				if (vsid & SLB_VSID_B_1T) {
 					printf("  1T  ESID=%9lx  VSID=%13lx LLP:%3lx \n",
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild
index 2938934..e256592 100644
--- a/arch/s390/Kbuild
+++ b/arch/s390/Kbuild
@@ -6,3 +6,4 @@
 obj-$(CONFIG_APPLDATA_BASE)	+= appldata/
 obj-y				+= net/
 obj-$(CONFIG_PCI)		+= pci/
+obj-$(CONFIG_NUMA)		+= numa/
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b06dc38..4827870 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -99,18 +99,22 @@
 	select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
 	select ARCH_SAVE_PAGE_KEYS if HIBERNATION
 	select ARCH_SUPPORTS_ATOMIC_RMW
+	select ARCH_SUPPORTS_NUMA_BALANCING
 	select ARCH_USE_CMPXCHG_LOCKREF
+	select ARCH_WANTS_PROT_NUMA_PROT_NONE
 	select ARCH_WANT_IPC_PARSE_VERSION
 	select BUILDTIME_EXTABLE_SORT
 	select CLONE_BACKWARDS2
 	select DYNAMIC_FTRACE if FUNCTION_TRACER
 	select GENERIC_CLOCKEVENTS
+	select GENERIC_CPU_AUTOPROBE
 	select GENERIC_CPU_DEVICES if !SMP
 	select GENERIC_FIND_FIRST_BIT
 	select GENERIC_SMP_IDLE_THREAD
 	select GENERIC_TIME_VSYSCALL
 	select HAVE_ALIGNED_STRUCT_PAGE if SLUB
 	select HAVE_ARCH_AUDITSYSCALL
+	select HAVE_ARCH_EARLY_PFN_TO_NID
 	select HAVE_ARCH_JUMP_LABEL
 	select HAVE_ARCH_SECCOMP_FILTER
 	select HAVE_ARCH_TRACEHOOK
@@ -153,6 +157,7 @@
 	select VIRT_CPU_ACCOUNTING
 	select VIRT_TO_BUS
 
+
 config SCHED_OMIT_FRAME_POINTER
 	def_bool y
 
@@ -385,6 +390,76 @@
 config SCHED_SMT
 	def_bool n
 
+# Some NUMA nodes have memory ranges that span
+# other nodes.	Even though a pfn is valid and
+# between a node's start and end pfns, it may not
+# reside on that node.	See memmap_init_zone()
+# for details. <- They meant memory holes!
+config NODES_SPAN_OTHER_NODES
+	def_bool NUMA
+
+config NUMA
+	bool "NUMA support"
+	depends on SMP && 64BIT && SCHED_TOPOLOGY
+	default n
+	help
+	  Enable NUMA support
+
+	  This option adds NUMA support to the kernel.
+
+	  An operation mode can be selected by appending
+	  numa=<method> to the kernel command line.
+
+	  The default behaviour is identical to appending numa=plain to
+	  the command line. This will create just one node with all
+	  available memory and all CPUs in it.
+
+config NODES_SHIFT
+	int "Maximum NUMA nodes (as a power of 2)"
+	range 1 10
+	depends on NUMA
+	default "4"
+	help
+	  Specify the maximum number of NUMA nodes available on the target
+	  system. Increases memory reserved to accommodate various tables.
+
+menu "Select NUMA modes"
+	depends on NUMA
+
+config NUMA_EMU
+	bool "NUMA emulation"
+	default y
+	help
+	  Numa emulation mode will split the available system memory into
+	  equal chunks which then are distributed over the configured number
+	  of nodes in a round-robin manner.
+
+	  The number of fake nodes is limited by the number of available memory
+	  chunks (i.e. memory size / fake size) and the number of supported
+	  nodes in the kernel.
+
+	  The CPUs are assigned to the nodes in a way that partially respects
+	  the original machine topology (if supported by the machine).
+	  Fair distribution of the CPUs is not guaranteed.
+
+config EMU_SIZE
+	hex "NUMA emulation memory chunk size"
+	default 0x10000000
+	range 0x400000 0x100000000
+	depends on NUMA_EMU
+	help
+	  Select the default size by which the memory is chopped and then
+	  assigned to emulated NUMA nodes.
+
+	  This can be overridden by specifying
+
+	  emu_size=<n>
+
+	  on the kernel command line where also suffixes K, M, G, and T are
+	  supported.
+
+endmenu
+
 config SCHED_MC
 	def_bool n
 
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 667b1bc..e8d4423 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -33,6 +33,8 @@
 mflags-$(CONFIG_MARCH_ZEC12)  := -march=zEC12
 mflags-$(CONFIG_MARCH_Z13)   := -march=z13
 
+export CC_FLAGS_MARCH := $(mflags-y)
+
 aflags-y += $(mflags-y)
 cflags-y += $(mflags-y)
 
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 940cbdd..0c98f15 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -13,6 +13,7 @@
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
@@ -50,6 +51,7 @@
 CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
 CONFIG_PREEMPT=y
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index d793fec..82083e1 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -13,6 +13,7 @@
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
@@ -49,6 +50,7 @@
 CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 38a77e9..c05c9e0 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -13,6 +13,8 @@
 CONFIG_RCU_FAST_NO_HZ=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
@@ -48,6 +50,7 @@
 CONFIG_MARCH_Z196=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=512
+CONFIG_NUMA=y
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 5566ce8..0b9b95f 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -24,6 +24,7 @@
 #include <crypto/algapi.h>
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/cpufeature.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include "crypt_s390.h"
@@ -976,7 +977,7 @@
 	crypto_unregister_alg(&aes_alg);
 }
 
-module_init(aes_s390_init);
+module_cpu_feature_match(MSA, aes_s390_init);
 module_exit(aes_s390_fini);
 
 MODULE_ALIAS_CRYPTO("aes-all");
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 9e05cc4..fba1c10 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -16,6 +16,7 @@
 
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/cpufeature.h>
 #include <linux/crypto.h>
 #include <crypto/algapi.h>
 #include <crypto/des.h>
@@ -616,7 +617,7 @@
 	crypto_unregister_alg(&des_alg);
 }
 
-module_init(des_s390_init);
+module_cpu_feature_match(MSA, des_s390_init);
 module_exit(des_s390_exit);
 
 MODULE_ALIAS_CRYPTO("des");
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index b258110..26e14ef 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -9,6 +9,7 @@
 
 #include <crypto/internal/hash.h>
 #include <linux/module.h>
+#include <linux/cpufeature.h>
 
 #include "crypt_s390.h"
 
@@ -158,7 +159,7 @@
 	crypto_unregister_shash(&ghash_alg);
 }
 
-module_init(ghash_mod_init);
+module_cpu_feature_match(MSA, ghash_mod_init);
 module_exit(ghash_mod_exit);
 
 MODULE_ALIAS_CRYPTO("ghash");
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 9d5192c..b8045b9 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/mutex.h>
+#include <linux/cpufeature.h>
 #include <linux/random.h>
 #include <linux/slab.h>
 #include <asm/debug.h>
@@ -914,6 +915,5 @@
 	}
 }
 
-
-module_init(prng_init);
+module_cpu_feature_match(MSA, prng_init);
 module_exit(prng_exit);
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index 5b2bee3..9208ead 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -26,6 +26,7 @@
 #include <crypto/internal/hash.h>
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/cpufeature.h>
 #include <crypto/sha.h>
 
 #include "crypt_s390.h"
@@ -100,7 +101,7 @@
 	crypto_unregister_shash(&alg);
 }
 
-module_init(sha1_s390_init);
+module_cpu_feature_match(MSA, sha1_s390_init);
 module_exit(sha1_s390_fini);
 
 MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index b74ff15..667888f 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -16,6 +16,7 @@
 #include <crypto/internal/hash.h>
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/cpufeature.h>
 #include <crypto/sha.h>
 
 #include "crypt_s390.h"
@@ -140,7 +141,7 @@
 	crypto_unregister_shash(&sha256_alg);
 }
 
-module_init(sha256_s390_init);
+module_cpu_feature_match(MSA, sha256_s390_init);
 module_exit(sha256_s390_fini);
 
 MODULE_ALIAS_CRYPTO("sha256");
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 0c36989..2ba66b1 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/cpufeature.h>
 
 #include "sha.h"
 #include "crypt_s390.h"
@@ -148,7 +149,7 @@
 	crypto_unregister_shash(&sha384_alg);
 }
 
-module_init(init);
+module_cpu_feature_match(MSA, init);
 module_exit(fini);
 
 MODULE_LICENSE("GPL");
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index adbe380..117fa5c 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -27,6 +27,7 @@
 #define __ATOMIC_OR	"lao"
 #define __ATOMIC_AND	"lan"
 #define __ATOMIC_ADD	"laa"
+#define __ATOMIC_XOR	"lax"
 #define __ATOMIC_BARRIER "bcr	14,0\n"
 
 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
@@ -49,6 +50,7 @@
 #define __ATOMIC_OR	"or"
 #define __ATOMIC_AND	"nr"
 #define __ATOMIC_ADD	"ar"
+#define __ATOMIC_XOR	"xr"
 #define __ATOMIC_BARRIER "\n"
 
 #define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)		\
@@ -118,15 +120,17 @@
 #define atomic_dec_return(_v)		atomic_sub_return(1, _v)
 #define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	__ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
+#define ATOMIC_OP(op, OP)						\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	__ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER);	\
 }
 
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	__ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
-}
+ATOMIC_OP(and, AND)
+ATOMIC_OP(or, OR)
+ATOMIC_OP(xor, XOR)
+
+#undef ATOMIC_OP
 
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
@@ -167,6 +171,7 @@
 #define __ATOMIC64_OR	"laog"
 #define __ATOMIC64_AND	"lang"
 #define __ATOMIC64_ADD	"laag"
+#define __ATOMIC64_XOR	"laxg"
 #define __ATOMIC64_BARRIER "bcr	14,0\n"
 
 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
@@ -189,6 +194,7 @@
 #define __ATOMIC64_OR	"ogr"
 #define __ATOMIC64_AND	"ngr"
 #define __ATOMIC64_ADD	"agr"
+#define __ATOMIC64_XOR	"xgr"
 #define __ATOMIC64_BARRIER "\n"
 
 #define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)		\
@@ -247,16 +253,6 @@
 	__ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
 }
 
-static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
-{
-	__ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
-}
-
-static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
-{
-	__ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
-}
-
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
 static inline long long atomic64_cmpxchg(atomic64_t *v,
@@ -270,6 +266,17 @@
 	return old;
 }
 
+#define ATOMIC64_OP(op, OP)						\
+static inline void atomic64_##op(long i, atomic64_t *v)			\
+{									\
+	__ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER);	\
+}
+
+ATOMIC64_OP(and, AND)
+ATOMIC64_OP(or, OR)
+ATOMIC64_OP(xor, XOR)
+
+#undef ATOMIC64_OP
 #undef __ATOMIC64_LOOP
 
 static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index e6f8615..d48fe01 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -42,12 +42,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
 	___p1;								\
diff --git a/arch/s390/include/asm/cpufeature.h b/arch/s390/include/asm/cpufeature.h
new file mode 100644
index 0000000..fa7e69b
--- /dev/null
+++ b/arch/s390/include/asm/cpufeature.h
@@ -0,0 +1,29 @@
+/*
+ * Module interface for CPU features
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#ifndef __ASM_S390_CPUFEATURE_H
+#define __ASM_S390_CPUFEATURE_H
+
+#include <asm/elf.h>
+
+/* Hardware features on Linux on z Systems are indicated by facility bits that
+ * are mapped to the so-called machine flags.  Particular machine flags are
+ * then used to define ELF hardware capabilities; most notably hardware flags
+ * that are essential for user space / glibc.
+ *
+ * Restrict the set of exposed CPU features to ELF hardware capabilities for
+ * now.  Additional machine flags can be indicated by values larger than
+ * MAX_ELF_HWCAP_FEATURES.
+ */
+#define MAX_ELF_HWCAP_FEATURES	(8 * sizeof(elf_hwcap))
+#define MAX_CPU_FEATURES	MAX_ELF_HWCAP_FEATURES
+
+#define cpu_feature(feat)	ilog2(HWCAP_S390_ ## feat)
+
+int cpu_have_feature(unsigned int nr);
+
+#endif /* __ASM_S390_CPUFEATURE_H */
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d7697ab..17a3735 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -46,6 +46,8 @@
 	__ctl_load(reg, cr, cr);
 }
 
+void __ctl_set_vx(void);
+
 void smp_ctl_set_bit(int cr, int bit);
 void smp_ctl_clear_bit(int cr, int bit);
 
diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h
index 629b79a..f7e5c36 100644
--- a/arch/s390/include/asm/etr.h
+++ b/arch/s390/include/asm/etr.h
@@ -214,6 +214,9 @@
 void etr_switch_to_local(void);
 void etr_sync_check(void);
 
+/* notifier for syncs */
+extern struct atomic_notifier_head s390_epoch_delta_notifier;
+
 /* STP interruption parameter */
 struct stp_irq_parm {
 	unsigned int _pad0	: 14;
diff --git a/arch/s390/include/asm/fpu-internal.h b/arch/s390/include/asm/fpu-internal.h
new file mode 100644
index 0000000..55dc2c0
--- /dev/null
+++ b/arch/s390/include/asm/fpu-internal.h
@@ -0,0 +1,110 @@
+/*
+ * General floating pointer and vector register helpers
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#ifndef _ASM_S390_FPU_INTERNAL_H
+#define _ASM_S390_FPU_INTERNAL_H
+
+#define FPU_USE_VX		1	/* Vector extension is active */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <asm/linkage.h>
+#include <asm/ctl_reg.h>
+#include <asm/sigcontext.h>
+
+struct fpu {
+	__u32 fpc;			/* Floating-point control */
+	__u32 flags;
+	union {
+		void *regs;
+		freg_t *fprs;		/* Floating-point register save area */
+		__vector128 *vxrs;	/* Vector register save area */
+	};
+};
+
+void save_fpu_regs(void);
+
+#define is_vx_fpu(fpu) (!!((fpu)->flags & FPU_USE_VX))
+#define is_vx_task(tsk) (!!((tsk)->thread.fpu.flags & FPU_USE_VX))
+
+/* VX array structure for address operand constraints in inline assemblies */
+struct vx_array { __vector128 _[__NUM_VXRS]; };
+
+static inline int test_fp_ctl(u32 fpc)
+{
+	u32 orig_fpc;
+	int rc;
+
+	asm volatile(
+		"	efpc    %1\n"
+		"	sfpc	%2\n"
+		"0:	sfpc	%1\n"
+		"	la	%0,0\n"
+		"1:\n"
+		EX_TABLE(0b,1b)
+		: "=d" (rc), "=d" (orig_fpc)
+		: "d" (fpc), "0" (-EINVAL));
+	return rc;
+}
+
+static inline void save_vx_regs_safe(__vector128 *vxrs)
+{
+	unsigned long cr0, flags;
+
+	flags = arch_local_irq_save();
+	__ctl_store(cr0, 0, 0);
+	__ctl_set_bit(0, 17);
+	__ctl_set_bit(0, 18);
+	asm volatile(
+		"	la	1,%0\n"
+		"	.word	0xe70f,0x1000,0x003e\n"	/* vstm 0,15,0(1) */
+		"	.word	0xe70f,0x1100,0x0c3e\n"	/* vstm 16,31,256(1) */
+		: "=Q" (*(struct vx_array *) vxrs) : : "1");
+	__ctl_load(cr0, 0, 0);
+	arch_local_irq_restore(flags);
+}
+
+static inline void convert_vx_to_fp(freg_t *fprs, __vector128 *vxrs)
+{
+	int i;
+
+	for (i = 0; i < __NUM_FPRS; i++)
+		fprs[i] = *(freg_t *)(vxrs + i);
+}
+
+static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
+{
+	int i;
+
+	for (i = 0; i < __NUM_FPRS; i++)
+		*(freg_t *)(vxrs + i) = fprs[i];
+}
+
+static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
+{
+	fpregs->pad = 0;
+	if (is_vx_fpu(fpu))
+		convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
+	else
+		memcpy((freg_t *)&fpregs->fprs, fpu->fprs,
+		       sizeof(fpregs->fprs));
+}
+
+static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
+{
+	if (is_vx_fpu(fpu))
+		convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
+	else
+		memcpy(fpu->fprs, (freg_t *)&fpregs->fprs,
+		       sizeof(fpregs->fprs));
+}
+
+#endif
+
+#endif /* _ASM_S390_FPU_INTERNAL_H */
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index cb5fdf3..437e9af 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -57,6 +57,8 @@
  */
 #define pci_iomap pci_iomap
 #define pci_iounmap pci_iounmap
+#define pci_iomap_wc pci_iomap
+#define pci_iomap_wc_range pci_iomap_range
 
 #define memcpy_fromio(dst, src, count)	zpci_memcpy_fromio(dst, src, count)
 #define memcpy_toio(dst, src, count)	zpci_memcpy_toio(dst, src, count)
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 69972b7..7f9fd5e 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -12,14 +12,29 @@
  * We use a brcl 0,2 instruction for jump labels at compile time so it
  * can be easily distinguished from a hotpatch generated instruction.
  */
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("0:	brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
 		".pushsection __jump_table, \"aw\"\n"
 		".balign 8\n"
 		".quad 0b, %l[label], %0\n"
 		".popsection\n"
-		: : "X" (key) : : label);
+		: : "X" (&((char *)key)[branch]) : : label);
+
+	return false;
+label:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("0:	brcl 15, %l[label]\n"
+		".pushsection __jump_table, \"aw\"\n"
+		".balign 8\n"
+		".quad 0b, %l[label], %0\n"
+		".popsection\n"
+		: : "X" (&((char *)key)[branch]) : : label);
+
 	return false;
 label:
 	return true;
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 3024acb..3d012e0 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -22,6 +22,7 @@
 #include <linux/kvm.h>
 #include <asm/debug.h>
 #include <asm/cpu.h>
+#include <asm/fpu-internal.h>
 #include <asm/isc.h>
 
 #define KVM_MAX_VCPUS 64
@@ -258,6 +259,9 @@
 	u32 diagnose_10;
 	u32 diagnose_44;
 	u32 diagnose_9c;
+	u32 diagnose_258;
+	u32 diagnose_308;
+	u32 diagnose_500;
 };
 
 #define PGM_OPERATION			0x01
@@ -498,10 +502,9 @@
 
 struct kvm_vcpu_arch {
 	struct kvm_s390_sie_block *sie_block;
-	s390_fp_regs      host_fpregs;
 	unsigned int      host_acrs[NUM_ACRS];
-	s390_fp_regs      guest_fpregs;
-	struct kvm_s390_vregs	*host_vregs;
+	struct fpu	  host_fpregs;
+	struct fpu	  guest_fpregs;
 	struct kvm_s390_local_interrupt local_int;
 	struct hrtimer    ckc_timer;
 	struct kvm_s390_pgm_info pgm;
@@ -630,7 +633,6 @@
 
 static inline void kvm_arch_hardware_disable(void) {}
 static inline void kvm_arch_check_processor_compat(void *rtn) {}
-static inline void kvm_arch_exit(void) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
diff --git a/arch/s390/include/asm/linkage.h b/arch/s390/include/asm/linkage.h
index fc8a828..27da78c 100644
--- a/arch/s390/include/asm/linkage.h
+++ b/arch/s390/include/asm/linkage.h
@@ -6,4 +6,26 @@
 #define __ALIGN .align 4, 0x07
 #define __ALIGN_STR __stringify(__ALIGN)
 
+#ifndef __ASSEMBLY__
+
+/*
+ * Helper macro for exception table entries
+ */
+#define EX_TABLE(_fault, _target)	\
+	".section __ex_table,\"a\"\n"	\
+	".align	4\n"			\
+	".long	(" #_fault ") - .\n"	\
+	".long	(" #_target ") - .\n"	\
+	".previous\n"
+
+#else /* __ASSEMBLY__ */
+
+#define EX_TABLE(_fault, _target)	\
+	.section __ex_table,"a"	;	\
+	.align	4 ;			\
+	.long	(_fault) - . ;		\
+	.long	(_target) - . ;		\
+	.previous
+
+#endif /* __ASSEMBLY__ */
 #endif
diff --git a/arch/s390/include/asm/mmzone.h b/arch/s390/include/asm/mmzone.h
new file mode 100644
index 0000000..a9e834e
--- /dev/null
+++ b/arch/s390/include/asm/mmzone.h
@@ -0,0 +1,16 @@
+/*
+ * NUMA support for s390
+ *
+ * Copyright IBM Corp. 2015
+ */
+
+#ifndef _ASM_S390_MMZONE_H
+#define _ASM_S390_MMZONE_H
+
+#ifdef CONFIG_NUMA
+
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid) (node_data[nid])
+
+#endif /* CONFIG_NUMA */
+#endif /* _ASM_S390_MMZONE_H */
diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h
new file mode 100644
index 0000000..2a0efc6
--- /dev/null
+++ b/arch/s390/include/asm/numa.h
@@ -0,0 +1,35 @@
+/*
+ * NUMA support for s390
+ *
+ * Declare the NUMA core code structures and functions.
+ *
+ * Copyright IBM Corp. 2015
+ */
+
+#ifndef _ASM_S390_NUMA_H
+#define _ASM_S390_NUMA_H
+
+#ifdef CONFIG_NUMA
+
+#include <linux/numa.h>
+#include <linux/cpumask.h>
+
+void numa_setup(void);
+int numa_pfn_to_nid(unsigned long pfn);
+int __node_distance(int a, int b);
+void numa_update_cpu_topology(void);
+
+extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+extern int numa_debug_enabled;
+
+#else
+
+static inline void numa_setup(void) { }
+static inline void numa_update_cpu_topology(void) { }
+static inline int numa_pfn_to_nid(unsigned long pfn)
+{
+	return 0;
+}
+
+#endif /* CONFIG_NUMA */
+#endif /* _ASM_S390_NUMA_H */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index a648338..34d9603 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -170,7 +170,11 @@
 #endif /* CONFIG_HOTPLUG_PCI_S390 */
 
 /* Helpers */
-struct zpci_dev *get_zdev(struct pci_dev *);
+static inline struct zpci_dev *to_zpci(struct pci_dev *pdev)
+{
+	return pdev->sysdata;
+}
+
 struct zpci_dev *get_zdev_by_fid(u32);
 
 /* DMA */
@@ -188,4 +192,20 @@
 void zpci_debug_exit_device(struct zpci_dev *);
 void zpci_debug_info(struct zpci_dev *, struct seq_file *);
 
+#ifdef CONFIG_NUMA
+
+/* Returns the node based on PCI bus */
+static inline int __pcibus_to_node(const struct pci_bus *bus)
+{
+	return NUMA_NO_NODE;
+}
+
+static inline const struct cpumask *
+cpumask_of_pcibus(const struct pci_bus *bus)
+{
+	return cpu_online_mask;
+}
+
+#endif /* CONFIG_NUMA */
+
 #endif
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index f66d827..bdb2f51 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -576,6 +576,19 @@
 	return pte_val(a) == pte_val(b);
 }
 
+#ifdef CONFIG_NUMA_BALANCING
+static inline int pte_protnone(pte_t pte)
+{
+	return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+	/* pmd_large(pmd) implies pmd_present(pmd) */
+	return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
+}
+#endif
+
 static inline pgste_t pgste_get_lock(pte_t *ptep)
 {
 	unsigned long new = 0;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index dedb621..085fb0d 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -14,10 +14,12 @@
 #define CIF_MCCK_PENDING	0	/* machine check handling is pending */
 #define CIF_ASCE		1	/* user asce needs fixup / uaccess */
 #define CIF_NOHZ_DELAY		2	/* delay HZ disable for a tick */
+#define CIF_FPU			3	/* restore vector registers */
 
 #define _CIF_MCCK_PENDING	(1<<CIF_MCCK_PENDING)
 #define _CIF_ASCE		(1<<CIF_ASCE)
 #define _CIF_NOHZ_DELAY		(1<<CIF_NOHZ_DELAY)
+#define _CIF_FPU		(1<<CIF_FPU)
 
 #ifndef __ASSEMBLY__
 
@@ -28,6 +30,7 @@
 #include <asm/ptrace.h>
 #include <asm/setup.h>
 #include <asm/runtime_instr.h>
+#include <asm/fpu-internal.h>
 
 static inline void set_cpu_flag(int flag)
 {
@@ -85,7 +88,7 @@
  * Thread structure
  */
 struct thread_struct {
-	s390_fp_regs fp_regs;
+	struct fpu fpu;			/* FP and VX register save area */
 	unsigned int  acrs[NUM_ACRS];
         unsigned long ksp;              /* kernel stack pointer             */
 	mm_segment_t mm_segment;
@@ -101,7 +104,6 @@
 	struct runtime_instr_cb *ri_cb;
 	int ri_signum;
 	unsigned char trap_tdb[256];	/* Transaction abort diagnose block */
-	__vector128 *vxrs;		/* Vector register save area */
 };
 
 /* Flag to disable transactions. */
@@ -231,6 +233,17 @@
 }
 
 /*
+ * Extract current PSW mask
+ */
+static inline unsigned long __extract_psw(void)
+{
+	unsigned int reg1, reg2;
+
+	asm volatile("epsw %0,%1" : "=d" (reg1), "=a" (reg2));
+	return (((unsigned long) reg1) << 32) | ((unsigned long) reg2);
+}
+
+/*
  * Rewind PSW instruction address by specified number of bytes.
  */
 static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
@@ -336,25 +349,6 @@
 	memcpy_absolute(&(dest), &__tmp, sizeof(__tmp));	\
 }
 
-/*
- * Helper macro for exception table entries
- */
-#define EX_TABLE(_fault, _target)	\
-	".section __ex_table,\"a\"\n"	\
-	".align	4\n"			\
-	".long	(" #_fault ") - .\n"	\
-	".long	(" #_target ") - .\n"	\
-	".previous\n"
-
-#else /* __ASSEMBLY__ */
-
-#define EX_TABLE(_fault, _target)	\
-	.section __ex_table,"a"	;	\
-	.align	4 ;			\
-	.long	(_fault) - . ;		\
-	.long	(_target) - . ;		\
-	.previous
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index f6ff060..821dde5 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -79,6 +79,6 @@
 int sclp_pci_deconfigure(u32 fid);
 int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
 void sclp_early_detect(void);
-long _sclp_print_early(const char *);
+int _sclp_print_early(const char *);
 
 #endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index d62e7a6..dcadfde 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -8,139 +8,12 @@
 #define __ASM_SWITCH_TO_H
 
 #include <linux/thread_info.h>
+#include <asm/fpu-internal.h>
 #include <asm/ptrace.h>
 
 extern struct task_struct *__switch_to(void *, void *);
 extern void update_cr_regs(struct task_struct *task);
 
-static inline int test_fp_ctl(u32 fpc)
-{
-	u32 orig_fpc;
-	int rc;
-
-	asm volatile(
-		"	efpc    %1\n"
-		"	sfpc	%2\n"
-		"0:	sfpc	%1\n"
-		"	la	%0,0\n"
-		"1:\n"
-		EX_TABLE(0b,1b)
-		: "=d" (rc), "=d" (orig_fpc)
-		: "d" (fpc), "0" (-EINVAL));
-	return rc;
-}
-
-static inline void save_fp_ctl(u32 *fpc)
-{
-	asm volatile(
-		"       stfpc   %0\n"
-		: "+Q" (*fpc));
-}
-
-static inline int restore_fp_ctl(u32 *fpc)
-{
-	int rc;
-
-	asm volatile(
-		"	lfpc    %1\n"
-		"0:	la	%0,0\n"
-		"1:\n"
-		EX_TABLE(0b,1b)
-		: "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
-	return rc;
-}
-
-static inline void save_fp_regs(freg_t *fprs)
-{
-	asm volatile("std 0,%0" : "=Q" (fprs[0]));
-	asm volatile("std 2,%0" : "=Q" (fprs[2]));
-	asm volatile("std 4,%0" : "=Q" (fprs[4]));
-	asm volatile("std 6,%0" : "=Q" (fprs[6]));
-	asm volatile("std 1,%0" : "=Q" (fprs[1]));
-	asm volatile("std 3,%0" : "=Q" (fprs[3]));
-	asm volatile("std 5,%0" : "=Q" (fprs[5]));
-	asm volatile("std 7,%0" : "=Q" (fprs[7]));
-	asm volatile("std 8,%0" : "=Q" (fprs[8]));
-	asm volatile("std 9,%0" : "=Q" (fprs[9]));
-	asm volatile("std 10,%0" : "=Q" (fprs[10]));
-	asm volatile("std 11,%0" : "=Q" (fprs[11]));
-	asm volatile("std 12,%0" : "=Q" (fprs[12]));
-	asm volatile("std 13,%0" : "=Q" (fprs[13]));
-	asm volatile("std 14,%0" : "=Q" (fprs[14]));
-	asm volatile("std 15,%0" : "=Q" (fprs[15]));
-}
-
-static inline void restore_fp_regs(freg_t *fprs)
-{
-	asm volatile("ld 0,%0" : : "Q" (fprs[0]));
-	asm volatile("ld 2,%0" : : "Q" (fprs[2]));
-	asm volatile("ld 4,%0" : : "Q" (fprs[4]));
-	asm volatile("ld 6,%0" : : "Q" (fprs[6]));
-	asm volatile("ld 1,%0" : : "Q" (fprs[1]));
-	asm volatile("ld 3,%0" : : "Q" (fprs[3]));
-	asm volatile("ld 5,%0" : : "Q" (fprs[5]));
-	asm volatile("ld 7,%0" : : "Q" (fprs[7]));
-	asm volatile("ld 8,%0" : : "Q" (fprs[8]));
-	asm volatile("ld 9,%0" : : "Q" (fprs[9]));
-	asm volatile("ld 10,%0" : : "Q" (fprs[10]));
-	asm volatile("ld 11,%0" : : "Q" (fprs[11]));
-	asm volatile("ld 12,%0" : : "Q" (fprs[12]));
-	asm volatile("ld 13,%0" : : "Q" (fprs[13]));
-	asm volatile("ld 14,%0" : : "Q" (fprs[14]));
-	asm volatile("ld 15,%0" : : "Q" (fprs[15]));
-}
-
-static inline void save_vx_regs(__vector128 *vxrs)
-{
-	typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
-
-	asm volatile(
-		"	la	1,%0\n"
-		"	.word	0xe70f,0x1000,0x003e\n"	/* vstm 0,15,0(1) */
-		"	.word	0xe70f,0x1100,0x0c3e\n"	/* vstm 16,31,256(1) */
-		: "=Q" (*(addrtype *) vxrs) : : "1");
-}
-
-static inline void save_vx_regs_safe(__vector128 *vxrs)
-{
-	unsigned long cr0, flags;
-
-	flags = arch_local_irq_save();
-	__ctl_store(cr0, 0, 0);
-	__ctl_set_bit(0, 17);
-	__ctl_set_bit(0, 18);
-	save_vx_regs(vxrs);
-	__ctl_load(cr0, 0, 0);
-	arch_local_irq_restore(flags);
-}
-
-static inline void restore_vx_regs(__vector128 *vxrs)
-{
-	typedef struct { __vector128 _[__NUM_VXRS]; } addrtype;
-
-	asm volatile(
-		"	la	1,%0\n"
-		"	.word	0xe70f,0x1000,0x0036\n"	/* vlm 0,15,0(1) */
-		"	.word	0xe70f,0x1100,0x0c36\n"	/* vlm 16,31,256(1) */
-		: : "Q" (*(addrtype *) vxrs) : "1");
-}
-
-static inline void save_fp_vx_regs(struct task_struct *task)
-{
-	if (task->thread.vxrs)
-		save_vx_regs(task->thread.vxrs);
-	else
-		save_fp_regs(task->thread.fp_regs.fprs);
-}
-
-static inline void restore_fp_vx_regs(struct task_struct *task)
-{
-	if (task->thread.vxrs)
-		restore_vx_regs(task->thread.vxrs);
-	else
-		restore_fp_regs(task->thread.fp_regs.fprs);
-}
-
 static inline void save_access_regs(unsigned int *acrs)
 {
 	typedef struct { int _[NUM_ACRS]; } acrstype;
@@ -157,15 +30,13 @@
 
 #define switch_to(prev,next,last) do {					\
 	if (prev->mm) {							\
-		save_fp_ctl(&prev->thread.fp_regs.fpc);			\
-		save_fp_vx_regs(prev);					\
+		save_fpu_regs();					\
 		save_access_regs(&prev->thread.acrs[0]);		\
 		save_ri_cb(prev->thread.ri_cb);				\
 	}								\
 	if (next->mm) {							\
 		update_cr_regs(next);					\
-		restore_fp_ctl(&next->thread.fp_regs.fpc);		\
-		restore_fp_vx_regs(next);				\
+		set_cpu_flag(CIF_FPU);					\
 		restore_access_regs(&next->thread.acrs[0]);		\
 		restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);	\
 	}								\
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 4990f6c..27ebde6 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -2,6 +2,7 @@
 #define _ASM_S390_TOPOLOGY_H
 
 #include <linux/cpumask.h>
+#include <asm/numa.h>
 
 struct sysinfo_15_1_x;
 struct cpu;
@@ -13,6 +14,7 @@
 	unsigned short core_id;
 	unsigned short socket_id;
 	unsigned short book_id;
+	unsigned short node_id;
 	cpumask_t thread_mask;
 	cpumask_t core_mask;
 	cpumask_t book_mask;
@@ -52,6 +54,43 @@
 #define POLARIZATION_VM		(2)
 #define POLARIZATION_VH		(3)
 
+#define SD_BOOK_INIT	SD_CPU_INIT
+
+#ifdef CONFIG_NUMA
+
+#define cpu_to_node cpu_to_node
+static inline int cpu_to_node(int cpu)
+{
+	return per_cpu(cpu_topology, cpu).node_id;
+}
+
+/* Returns a pointer to the cpumask of CPUs on node 'node'. */
+#define cpumask_of_node cpumask_of_node
+static inline const struct cpumask *cpumask_of_node(int node)
+{
+	return node_to_cpumask_map[node];
+}
+
+/*
+ * Returns the number of the node containing node 'node'. This
+ * architecture is flat, so it is a pretty simple function!
+ */
+#define parent_node(node) (node)
+
+#define pcibus_to_node(bus) __pcibus_to_node(bus)
+
+#define node_distance(a, b) __node_distance(a, b)
+
+#else /* !CONFIG_NUMA */
+
+#define numa_node_id numa_node_id
+static inline int numa_node_id(void)
+{
+	return 0;
+}
+
+#endif /* CONFIG_NUMA */
+
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 91f56b1..525cef7 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -11,16 +11,24 @@
 
 #define __IGNORE_time
 
-/* Ignore NUMA system calls. Not wired up on s390. */
-#define __IGNORE_mbind
-#define __IGNORE_get_mempolicy
-#define __IGNORE_set_mempolicy
-#define __IGNORE_migrate_pages
-#define __IGNORE_move_pages
-
-/* Ignore system calls that are also reachable via sys_socket */
+/* Ignore system calls that are also reachable via sys_socketcall */
 #define __IGNORE_recvmmsg
 #define __IGNORE_sendmmsg
+#define __IGNORE_socket
+#define __IGNORE_socketpair
+#define __IGNORE_bind
+#define __IGNORE_connect
+#define __IGNORE_listen
+#define __IGNORE_accept4
+#define __IGNORE_getsockopt
+#define __IGNORE_setsockopt
+#define __IGNORE_getsockname
+#define __IGNORE_getpeername
+#define __IGNORE_sendto
+#define __IGNORE_sendmsg
+#define __IGNORE_recvfrom
+#define __IGNORE_recvmsg
+#define __IGNORE_shutdown
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_SYS_ALARM
diff --git a/arch/s390/include/asm/vx-insn.h b/arch/s390/include/asm/vx-insn.h
new file mode 100644
index 0000000..4a31356
--- /dev/null
+++ b/arch/s390/include/asm/vx-insn.h
@@ -0,0 +1,480 @@
+/*
+ * Support for Vector Instructions
+ *
+ * Assembler macros to generate .byte/.word code for particular
+ * vector instructions that are supported by recent binutils (>= 2.26) only.
+ *
+ * Copyright IBM Corp. 2015
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ */
+
+#ifndef __ASM_S390_VX_INSN_H
+#define __ASM_S390_VX_INSN_H
+
+#ifdef __ASSEMBLY__
+
+
+/* Macros to generate vector instruction byte code */
+
+#define REG_NUM_INVALID	       255
+
+/* GR_NUM - Retrieve general-purpose register number
+ *
+ * @opd:	Operand to store register number
+ * @r64:	String designation register in the format "%rN"
+ */
+.macro	GR_NUM	opd gr
+	\opd = REG_NUM_INVALID
+	.ifc \gr,%r0
+		\opd = 0
+	.endif
+	.ifc \gr,%r1
+		\opd = 1
+	.endif
+	.ifc \gr,%r2
+		\opd = 2
+	.endif
+	.ifc \gr,%r3
+		\opd = 3
+	.endif
+	.ifc \gr,%r4
+		\opd = 4
+	.endif
+	.ifc \gr,%r5
+		\opd = 5
+	.endif
+	.ifc \gr,%r6
+		\opd = 6
+	.endif
+	.ifc \gr,%r7
+		\opd = 7
+	.endif
+	.ifc \gr,%r8
+		\opd = 8
+	.endif
+	.ifc \gr,%r9
+		\opd = 9
+	.endif
+	.ifc \gr,%r10
+		\opd = 10
+	.endif
+	.ifc \gr,%r11
+		\opd = 11
+	.endif
+	.ifc \gr,%r12
+		\opd = 12
+	.endif
+	.ifc \gr,%r13
+		\opd = 13
+	.endif
+	.ifc \gr,%r14
+		\opd = 14
+	.endif
+	.ifc \gr,%r15
+		\opd = 15
+	.endif
+	.if \opd == REG_NUM_INVALID
+		.error "Invalid general-purpose register designation: \gr"
+	.endif
+.endm
+
+/* VX_R() - Macro to encode the VX_NUM into the instruction */
+#define VX_R(v)		(v & 0x0F)
+
+/* VX_NUM - Retrieve vector register number
+ *
+ * @opd:	Operand to store register number
+ * @vxr:	String designation register in the format "%vN"
+ *
+ * The vector register number is used for as input number to the
+ * instruction and, as well as, to compute the RXB field of the
+ * instruction.  To encode the particular vector register number,
+ * use the VX_R(v) macro to extract the instruction opcode.
+ */
+.macro	VX_NUM	opd vxr
+	\opd = REG_NUM_INVALID
+	.ifc \vxr,%v0
+		\opd = 0
+	.endif
+	.ifc \vxr,%v1
+		\opd = 1
+	.endif
+	.ifc \vxr,%v2
+		\opd = 2
+	.endif
+	.ifc \vxr,%v3
+		\opd = 3
+	.endif
+	.ifc \vxr,%v4
+		\opd = 4
+	.endif
+	.ifc \vxr,%v5
+		\opd = 5
+	.endif
+	.ifc \vxr,%v6
+		\opd = 6
+	.endif
+	.ifc \vxr,%v7
+		\opd = 7
+	.endif
+	.ifc \vxr,%v8
+		\opd = 8
+	.endif
+	.ifc \vxr,%v9
+		\opd = 9
+	.endif
+	.ifc \vxr,%v10
+		\opd = 10
+	.endif
+	.ifc \vxr,%v11
+		\opd = 11
+	.endif
+	.ifc \vxr,%v12
+		\opd = 12
+	.endif
+	.ifc \vxr,%v13
+		\opd = 13
+	.endif
+	.ifc \vxr,%v14
+		\opd = 14
+	.endif
+	.ifc \vxr,%v15
+		\opd = 15
+	.endif
+	.ifc \vxr,%v16
+		\opd = 16
+	.endif
+	.ifc \vxr,%v17
+		\opd = 17
+	.endif
+	.ifc \vxr,%v18
+		\opd = 18
+	.endif
+	.ifc \vxr,%v19
+		\opd = 19
+	.endif
+	.ifc \vxr,%v20
+		\opd = 20
+	.endif
+	.ifc \vxr,%v21
+		\opd = 21
+	.endif
+	.ifc \vxr,%v22
+		\opd = 22
+	.endif
+	.ifc \vxr,%v23
+		\opd = 23
+	.endif
+	.ifc \vxr,%v24
+		\opd = 24
+	.endif
+	.ifc \vxr,%v25
+		\opd = 25
+	.endif
+	.ifc \vxr,%v26
+		\opd = 26
+	.endif
+	.ifc \vxr,%v27
+		\opd = 27
+	.endif
+	.ifc \vxr,%v28
+		\opd = 28
+	.endif
+	.ifc \vxr,%v29
+		\opd = 29
+	.endif
+	.ifc \vxr,%v30
+		\opd = 30
+	.endif
+	.ifc \vxr,%v31
+		\opd = 31
+	.endif
+	.if \opd == REG_NUM_INVALID
+		.error "Invalid vector register designation: \vxr"
+	.endif
+.endm
+
+/* RXB - Compute most significant bit used vector registers
+ *
+ * @rxb:	Operand to store computed RXB value
+ * @v1:		First vector register designated operand
+ * @v2:		Second vector register designated operand
+ * @v3:		Third vector register designated operand
+ * @v4:		Fourth vector register designated operand
+ */
+.macro	RXB	rxb v1 v2=0 v3=0 v4=0
+	\rxb = 0
+	.if \v1 & 0x10
+		\rxb = \rxb | 0x08
+	.endif
+	.if \v2 & 0x10
+		\rxb = \rxb | 0x04
+	.endif
+	.if \v3 & 0x10
+		\rxb = \rxb | 0x02
+	.endif
+	.if \v4 & 0x10
+		\rxb = \rxb | 0x01
+	.endif
+.endm
+
+/* MRXB - Generate Element Size Control and RXB value
+ *
+ * @m:		Element size control
+ * @v1:		First vector register designated operand (for RXB)
+ * @v2:		Second vector register designated operand (for RXB)
+ * @v3:		Third vector register designated operand (for RXB)
+ * @v4:		Fourth vector register designated operand (for RXB)
+ */
+.macro	MRXB	m v1 v2=0 v3=0 v4=0
+	rxb = 0
+	RXB	rxb, \v1, \v2, \v3, \v4
+	.byte	(\m << 4) | rxb
+.endm
+
+/* MRXBOPC - Generate Element Size Control, RXB, and final Opcode fields
+ *
+ * @m:		Element size control
+ * @opc:	Opcode
+ * @v1:		First vector register designated operand (for RXB)
+ * @v2:		Second vector register designated operand (for RXB)
+ * @v3:		Third vector register designated operand (for RXB)
+ * @v4:		Fourth vector register designated operand (for RXB)
+ */
+.macro	MRXBOPC	m opc v1 v2=0 v3=0 v4=0
+	MRXB	\m, \v1, \v2, \v3, \v4
+	.byte	\opc
+.endm
+
+/* Vector support instructions */
+
+/* VECTOR GENERATE BYTE MASK */
+.macro	VGBM	vr imm2
+	VX_NUM	v1, \vr
+	.word	(0xE700 | (VX_R(v1) << 4))
+	.word	\imm2
+	MRXBOPC	0, 0x44, v1
+.endm
+.macro	VZERO	vxr
+	VGBM	\vxr, 0
+.endm
+.macro	VONE	vxr
+	VGBM	\vxr, 0xFFFF
+.endm
+
+/* VECTOR LOAD VR ELEMENT FROM GR */
+.macro	VLVG	v, gr, disp, m
+	VX_NUM	v1, \v
+	GR_NUM	b2, "%r0"
+	GR_NUM	r3, \gr
+	.word	0xE700 | (VX_R(v1) << 4) | r3
+	.word	(b2 << 12) | (\disp)
+	MRXBOPC	\m, 0x22, v1
+.endm
+.macro	VLVGB	v, gr, index, base
+	VLVG	\v, \gr, \index, \base, 0
+.endm
+.macro	VLVGH	v, gr, index
+	VLVG	\v, \gr, \index, 1
+.endm
+.macro	VLVGF	v, gr, index
+	VLVG	\v, \gr, \index, 2
+.endm
+.macro	VLVGG	v, gr, index
+	VLVG	\v, \gr, \index, 3
+.endm
+
+/* VECTOR LOAD */
+.macro	VL	v, disp, index="%r0", base
+	VX_NUM	v1, \v
+	GR_NUM	x2, \index
+	GR_NUM	b2, \base
+	.word	0xE700 | (VX_R(v1) << 4) | x2
+	.word	(b2 << 12) | (\disp)
+	MRXBOPC 0, 0x06, v1
+.endm
+
+/* VECTOR LOAD ELEMENT */
+.macro	VLEx	vr1, disp, index="%r0", base, m3, opc
+	VX_NUM	v1, \vr1
+	GR_NUM	x2, \index
+	GR_NUM	b2, \base
+	.word	0xE700 | (VX_R(v1) << 4) | x2
+	.word	(b2 << 12) | (\disp)
+	MRXBOPC	\m3, \opc, v1
+.endm
+.macro	VLEB	vr1, disp, index="%r0", base, m3
+	VLEx	\vr1, \disp, \index, \base, \m3, 0x00
+.endm
+.macro	VLEH	vr1, disp, index="%r0", base, m3
+	VLEx	\vr1, \disp, \index, \base, \m3, 0x01
+.endm
+.macro	VLEF	vr1, disp, index="%r0", base, m3
+	VLEx	\vr1, \disp, \index, \base, \m3, 0x03
+.endm
+.macro	VLEG	vr1, disp, index="%r0", base, m3
+	VLEx	\vr1, \disp, \index, \base, \m3, 0x02
+.endm
+
+/* VECTOR LOAD ELEMENT IMMEDIATE */
+.macro	VLEIx	vr1, imm2, m3, opc
+	VX_NUM	v1, \vr1
+	.word	0xE700 | (VX_R(v1) << 4)
+	.word	\imm2
+	MRXBOPC	\m3, \opc, v1
+.endm
+.macro	VLEIB	vr1, imm2, index
+	VLEIx	\vr1, \imm2, \index, 0x40
+.endm
+.macro	VLEIH	vr1, imm2, index
+	VLEIx	\vr1, \imm2, \index, 0x41
+.endm
+.macro	VLEIF	vr1, imm2, index
+	VLEIx	\vr1, \imm2, \index, 0x43
+.endm
+.macro	VLEIG	vr1, imm2, index
+	VLEIx	\vr1, \imm2, \index, 0x42
+.endm
+
+/* VECTOR LOAD GR FROM VR ELEMENT */
+.macro	VLGV	gr, vr, disp, base="%r0", m
+	GR_NUM	r1, \gr
+	GR_NUM	b2, \base
+	VX_NUM	v3, \vr
+	.word	0xE700 | (r1 << 4) | VX_R(v3)
+	.word	(b2 << 12) | (\disp)
+	MRXBOPC	\m, 0x21, v3
+.endm
+.macro	VLGVB	gr, vr, disp, base="%r0"
+	VLGV	\gr, \vr, \disp, \base, 0
+.endm
+.macro	VLGVH	gr, vr, disp, base="%r0"
+	VLGV	\gr, \vr, \disp, \base, 1
+.endm
+.macro	VLGVF	gr, vr, disp, base="%r0"
+	VLGV	\gr, \vr, \disp, \base, 2
+.endm
+.macro	VLGVG	gr, vr, disp, base="%r0"
+	VLGV	\gr, \vr, \disp, \base, 3
+.endm
+
+/* VECTOR LOAD MULTIPLE */
+.macro	VLM	vfrom, vto, disp, base
+	VX_NUM	v1, \vfrom
+	VX_NUM	v3, \vto
+	GR_NUM	b2, \base	    /* Base register */
+	.word	0xE700 | (VX_R(v1) << 4) | VX_R(v3)
+	.word	(b2 << 12) | (\disp)
+	MRXBOPC	0, 0x36, v1, v3
+.endm
+
+/* VECTOR STORE MULTIPLE */
+.macro	VSTM	vfrom, vto, disp, base
+	VX_NUM	v1, \vfrom
+	VX_NUM	v3, \vto
+	GR_NUM	b2, \base	    /* Base register */
+	.word	0xE700 | (VX_R(v1) << 4) | VX_R(v3)
+	.word	(b2 << 12) | (\disp)
+	MRXBOPC	0, 0x3E, v1, v3
+.endm
+
+/* VECTOR PERMUTE */
+.macro	VPERM	vr1, vr2, vr3, vr4
+	VX_NUM	v1, \vr1
+	VX_NUM	v2, \vr2
+	VX_NUM	v3, \vr3
+	VX_NUM	v4, \vr4
+	.word	0xE700 | (VX_R(v1) << 4) | VX_R(v2)
+	.word	(VX_R(v3) << 12)
+	MRXBOPC	VX_R(v4), 0x8C, v1, v2, v3, v4
+.endm
+
+/* VECTOR UNPACK LOGICAL LOW */
+.macro	VUPLL	vr1, vr2, m3
+	VX_NUM	v1, \vr1
+	VX_NUM	v2, \vr2
+	.word	0xE700 | (VX_R(v1) << 4) | VX_R(v2)
+	.word	0x0000
+	MRXBOPC	\m3, 0xD4, v1, v2
+.endm
+.macro	VUPLLB	vr1, vr2
+	VUPLL	\vr1, \vr2, 0
+.endm
+.macro	VUPLLH	vr1, vr2
+	VUPLL	\vr1, \vr2, 1
+.endm
+.macro	VUPLLF	vr1, vr2
+	VUPLL	\vr1, \vr2, 2
+.endm
+
+
+/* Vector integer instructions */
+
+/* VECTOR EXCLUSIVE OR */
+.macro	VX	vr1, vr2, vr3
+	VX_NUM	v1, \vr1
+	VX_NUM	v2, \vr2
+	VX_NUM	v3, \vr3
+	.word	0xE700 | (VX_R(v1) << 4) | VX_R(v2)
+	.word	(VX_R(v3) << 12)
+	MRXBOPC	0, 0x6D, v1, v2, v3
+.endm
+
+/* VECTOR GALOIS FIELD MULTIPLY SUM */
+.macro	VGFM	vr1, vr2, vr3, m4
+	VX_NUM	v1, \vr1
+	VX_NUM	v2, \vr2
+	VX_NUM	v3, \vr3
+	.word	0xE700 | (VX_R(v1) << 4) | VX_R(v2)
+	.word	(VX_R(v3) << 12)
+	MRXBOPC	\m4, 0xB4, v1, v2, v3
+.endm
+.macro	VGFMB	vr1, vr2, vr3
+	VGFM	\vr1, \vr2, \vr3, 0
+.endm
+.macro	VGFMH	vr1, vr2, vr3
+	VGFM	\vr1, \vr2, \vr3, 1
+.endm
+.macro	VGFMF	vr1, vr2, vr3
+	VGFM	\vr1, \vr2, \vr3, 2
+.endm
+.macro	VGFMG	vr1, vr2, vr3
+	VGFM	\vr1, \vr2, \vr3, 3
+.endm
+
+/* VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */
+.macro	VGFMA	vr1, vr2, vr3, vr4, m5
+	VX_NUM	v1, \vr1
+	VX_NUM	v2, \vr2
+	VX_NUM	v3, \vr3
+	VX_NUM	v4, \vr4
+	.word	0xE700 | (VX_R(v1) << 4) | VX_R(v2)
+	.word	(VX_R(v3) << 12) | (\m5 << 8)
+	MRXBOPC	VX_R(v4), 0xBC, v1, v2, v3, v4
+.endm
+.macro	VGFMAB	vr1, vr2, vr3, vr4
+	VGFMA	\vr1, \vr2, \vr3, \vr4, 0
+.endm
+.macro	VGFMAH	vr1, vr2, vr3, vr4
+	VGFMA	\vr1, \vr2, \vr3, \vr4, 1
+.endm
+.macro	VGFMAF	vr1, vr2, vr3, vr4
+	VGFMA	\vr1, \vr2, \vr3, \vr4, 2
+.endm
+.macro	VGFMAG	vr1, vr2, vr3, vr4
+	VGFMA	\vr1, \vr2, \vr3, \vr4, 3
+.endm
+
+/* VECTOR SHIFT RIGHT LOGICAL BY BYTE */
+.macro	VSRLB	vr1, vr2, vr3
+	VX_NUM	v1, \vr1
+	VX_NUM	v2, \vr2
+	VX_NUM	v3, \vr3
+	.word	0xE700 | (VX_R(v1) << 4) | VX_R(v2)
+	.word	(VX_R(v3) << 12)
+	MRXBOPC	0, 0x7D, v1, v2, v3
+.endm
+
+
+#endif	/* __ASSEMBLY__ */
+#endif	/* __ASM_S390_VX_INSN_H */
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 67878af..59d2bb4 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -204,9 +204,9 @@
 #define __NR_statfs64		265
 #define __NR_fstatfs64		266
 #define __NR_remap_file_pages	267
-/* Number 268 is reserved for new sys_mbind */
-/* Number 269 is reserved for new sys_get_mempolicy */
-/* Number 270 is reserved for new sys_set_mempolicy */
+#define __NR_mbind		268
+#define __NR_get_mempolicy	269
+#define __NR_set_mempolicy	270
 #define __NR_mq_open		271
 #define __NR_mq_unlink		272
 #define __NR_mq_timedsend	273
@@ -223,7 +223,7 @@
 #define __NR_inotify_init	284
 #define __NR_inotify_add_watch	285
 #define __NR_inotify_rm_watch	286
-/* Number 287 is reserved for new sys_migrate_pages */
+#define __NR_migrate_pages	287
 #define __NR_openat		288
 #define __NR_mkdirat		289
 #define __NR_mknodat		290
@@ -245,7 +245,7 @@
 #define __NR_sync_file_range	307
 #define __NR_tee		308
 #define __NR_vmsplice		309
-/* Number 310 is reserved for new sys_move_pages */
+#define __NR_move_pages		310
 #define __NR_getcpu		311
 #define __NR_epoll_pwait	312
 #define __NR_utimes		313
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index ffb8761..b756c63 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -28,6 +28,17 @@
 
 CFLAGS_sysinfo.o += -w
 
+#
+# Use -march=z900 for sclp.c to be able to print an error message if
+# the kernel is started on a machine which is too old
+#
+CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
+ifneq ($(CC_FLAGS_MARCH),-march=z900)
+CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
+CFLAGS_sclp.o	+= -march=z900
+endif
+GCOV_PROFILE_sclp.o := n
+
 obj-y	:= traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
 obj-y	+= processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
 obj-y	+= debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index a2da259..48c9af7 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -28,6 +28,9 @@
 	DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
 	BLANK();
 	DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
+	DEFINE(__THREAD_FPU_fpc, offsetof(struct thread_struct, fpu.fpc));
+	DEFINE(__THREAD_FPU_flags, offsetof(struct thread_struct, fpu.flags));
+	DEFINE(__THREAD_FPU_regs, offsetof(struct thread_struct, fpu.regs));
 	DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
 	DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
 	DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index fe8d692..eb46642 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -153,33 +153,14 @@
 /* Store registers needed to create the signal frame */
 static void store_sigregs(void)
 {
-	int i;
-
 	save_access_regs(current->thread.acrs);
-	save_fp_ctl(&current->thread.fp_regs.fpc);
-	if (current->thread.vxrs) {
-		save_vx_regs(current->thread.vxrs);
-		for (i = 0; i < __NUM_FPRS; i++)
-			current->thread.fp_regs.fprs[i] =
-				*(freg_t *)(current->thread.vxrs + i);
-	} else
-		save_fp_regs(current->thread.fp_regs.fprs);
+	save_fpu_regs();
 }
 
 /* Load registers after signal return */
 static void load_sigregs(void)
 {
-	int i;
-
 	restore_access_regs(current->thread.acrs);
-	/* restore_fp_ctl is done in restore_sigregs */
-	if (current->thread.vxrs) {
-		for (i = 0; i < __NUM_FPRS; i++)
-			*(freg_t *)(current->thread.vxrs + i) =
-				current->thread.fp_regs.fprs[i];
-		restore_vx_regs(current->thread.vxrs);
-	} else
-		restore_fp_regs(current->thread.fp_regs.fprs);
 }
 
 static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
@@ -196,8 +177,7 @@
 		user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
 	memcpy(&user_sregs.regs.acrs, current->thread.acrs,
 	       sizeof(user_sregs.regs.acrs));
-	memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
-	       sizeof(user_sregs.fpregs));
+	fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, &current->thread.fpu);
 	if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
 		return -EFAULT;
 	return 0;
@@ -217,8 +197,8 @@
 	if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI))
 		return -EINVAL;
 
-	/* Loading the floating-point-control word can fail. Do that first. */
-	if (restore_fp_ctl(&user_sregs.fpregs.fpc))
+	/* Test the floating-point-control word. */
+	if (test_fp_ctl(user_sregs.fpregs.fpc))
 		return -EINVAL;
 
 	/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
@@ -235,9 +215,7 @@
 		regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
 	memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
 	       sizeof(current->thread.acrs));
-
-	memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
-	       sizeof(current->thread.fp_regs));
+	fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, &current->thread.fpu);
 
 	clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
 	return 0;
@@ -258,13 +236,13 @@
 		return -EFAULT;
 
 	/* Save vector registers to signal stack */
-	if (current->thread.vxrs) {
+	if (is_vx_task(current)) {
 		for (i = 0; i < __NUM_VXRS_LOW; i++)
-			vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1);
+			vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1);
 		if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
 				   sizeof(sregs_ext->vxrs_low)) ||
 		    __copy_to_user(&sregs_ext->vxrs_high,
-				   current->thread.vxrs + __NUM_VXRS_LOW,
+				   current->thread.fpu.vxrs + __NUM_VXRS_LOW,
 				   sizeof(sregs_ext->vxrs_high)))
 			return -EFAULT;
 	}
@@ -286,15 +264,15 @@
 		*(__u32 *)&regs->gprs[i] = gprs_high[i];
 
 	/* Restore vector registers from signal stack */
-	if (current->thread.vxrs) {
+	if (is_vx_task(current)) {
 		if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
 				     sizeof(sregs_ext->vxrs_low)) ||
-		    __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW,
+		    __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
 				     &sregs_ext->vxrs_high,
 				     sizeof(sregs_ext->vxrs_high)))
 			return -EFAULT;
 		for (i = 0; i < __NUM_VXRS_LOW; i++)
-			*((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
+			*((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i];
 	}
 	return 0;
 }
@@ -308,6 +286,7 @@
 	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
 		goto badframe;
 	set_current_blocked(&set);
+	save_fpu_regs();
 	if (restore_sigregs32(regs, &frame->sregs))
 		goto badframe;
 	if (restore_sigregs_ext32(regs, &frame->sregs_ext))
@@ -330,6 +309,7 @@
 	set_current_blocked(&set);
 	if (compat_restore_altstack(&frame->uc.uc_stack))
 		goto badframe;
+	save_fpu_regs();
 	if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
 		goto badframe;
 	if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
@@ -472,7 +452,7 @@
 	 */
 	uc_flags = UC_GPRS_HIGH;
 	if (MACHINE_HAS_VX) {
-		if (current->thread.vxrs)
+		if (is_vx_task(current))
 			uc_flags |= UC_VXRS;
 	} else
 		frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 84062e7..247b7aa 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -20,6 +20,8 @@
 #include <asm/page.h>
 #include <asm/sigp.h>
 #include <asm/irq.h>
+#include <asm/fpu-internal.h>
+#include <asm/vx-insn.h>
 
 __PT_R0      =	__PT_GPRS
 __PT_R1      =	__PT_GPRS + 8
@@ -46,10 +48,10 @@
 		   _TIF_UPROBE)
 _TIF_TRACE	= (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
 		   _TIF_SYSCALL_TRACEPOINT)
-_CIF_WORK	= (_CIF_MCCK_PENDING | _CIF_ASCE)
+_CIF_WORK	= (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU)
 _PIF_WORK	= (_PIF_PER_TRAP)
 
-#define BASED(name) name-system_call(%r13)
+#define BASED(name) name-cleanup_critical(%r13)
 
 	.macro	TRACE_IRQS_ON
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -73,38 +75,6 @@
 #endif
 	.endm
 
-	.macro LPP newpp
-#if IS_ENABLED(CONFIG_KVM)
-	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_LPP
-	jz	.+8
-	.insn	s,0xb2800000,\newpp
-#endif
-	.endm
-
-	.macro	HANDLE_SIE_INTERCEPT scratch,reason
-#if IS_ENABLED(CONFIG_KVM)
-	tmhh	%r8,0x0001		# interrupting from user ?
-	jnz	.+62
-	lgr	\scratch,%r9
-	slg	\scratch,BASED(.Lsie_critical)
-	clg	\scratch,BASED(.Lsie_critical_length)
-	.if	\reason==1
-	# Some program interrupts are suppressing (e.g. protection).
-	# We must also check the instruction after SIE in that case.
-	# do_protection_exception will rewind to .Lrewind_pad
-	jh	.+42
-	.else
-	jhe	.+42
-	.endif
-	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
-	LPP	__SF_EMPTY+16(%r15)		# set host id
-	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
-	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
-	larl	%r9,sie_exit			# skip forward to sie_exit
-	mvi	__SF_EMPTY+31(%r15),\reason	# set exit reason
-#endif
-	.endm
-
 	.macro	CHECK_STACK stacksize,savearea
 #ifdef CONFIG_CHECK_STACK
 	tml	%r15,\stacksize - CONFIG_STACK_GUARD
@@ -113,7 +83,7 @@
 #endif
 	.endm
 
-	.macro	SWITCH_ASYNC savearea,stack,shift
+	.macro	SWITCH_ASYNC savearea,timer
 	tmhh	%r8,0x0001		# interrupting from user ?
 	jnz	1f
 	lgr	%r14,%r9
@@ -124,26 +94,28 @@
 	brasl	%r14,cleanup_critical
 	tmhh	%r8,0x0001		# retest problem state after cleanup
 	jnz	1f
-0:	lg	%r14,\stack		# are we already on the target stack?
+0:	lg	%r14,__LC_ASYNC_STACK	# are we already on the async stack?
 	slgr	%r14,%r15
-	srag	%r14,%r14,\shift
-	jnz	1f
-	CHECK_STACK 1<<\shift,\savearea
+	srag	%r14,%r14,STACK_SHIFT
+	jnz	2f
+	CHECK_STACK 1<<STACK_SHIFT,\savearea
 	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
-	j	2f
-1:	lg	%r15,\stack		# load target stack
-2:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
+	j	3f
+1:	LAST_BREAK %r14
+	UPDATE_VTIME %r14,%r15,\timer
+2:	lg	%r15,__LC_ASYNC_STACK	# load async stack
+3:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 	.endm
 
-	.macro UPDATE_VTIME scratch,enter_timer
-	lg	\scratch,__LC_EXIT_TIMER
-	slg	\scratch,\enter_timer
-	alg	\scratch,__LC_USER_TIMER
-	stg	\scratch,__LC_USER_TIMER
-	lg	\scratch,__LC_LAST_UPDATE_TIMER
-	slg	\scratch,__LC_EXIT_TIMER
-	alg	\scratch,__LC_SYSTEM_TIMER
-	stg	\scratch,__LC_SYSTEM_TIMER
+	.macro UPDATE_VTIME w1,w2,enter_timer
+	lg	\w1,__LC_EXIT_TIMER
+	lg	\w2,__LC_LAST_UPDATE_TIMER
+	slg	\w1,\enter_timer
+	slg	\w2,__LC_EXIT_TIMER
+	alg	\w1,__LC_USER_TIMER
+	alg	\w2,__LC_SYSTEM_TIMER
+	stg	\w1,__LC_USER_TIMER
+	stg	\w2,__LC_SYSTEM_TIMER
 	mvc	__LC_LAST_UPDATE_TIMER(8),\enter_timer
 	.endm
 
@@ -197,6 +169,69 @@
 	br	%r14
 
 .L__critical_start:
+
+#if IS_ENABLED(CONFIG_KVM)
+/*
+ * sie64a calling convention:
+ * %r2 pointer to sie control block
+ * %r3 guest register save area
+ */
+ENTRY(sie64a)
+	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
+	stg	%r2,__SF_EMPTY(%r15)		# save control block pointer
+	stg	%r3,__SF_EMPTY+8(%r15)		# save guest register save area
+	xc	__SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
+	tm	__LC_CPU_FLAGS+7,_CIF_FPU	# load guest fp/vx registers ?
+	jno	.Lsie_load_guest_gprs
+	brasl	%r14,load_fpu_regs		# load guest fp/vx regs
+.Lsie_load_guest_gprs:
+	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
+	lg	%r14,__LC_GMAP			# get gmap pointer
+	ltgr	%r14,%r14
+	jz	.Lsie_gmap
+	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
+.Lsie_gmap:
+	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
+	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
+	tm	__SIE_PROG20+3(%r14),3		# last exit...
+	jnz	.Lsie_skip
+	tm	__LC_CPU_FLAGS+7,_CIF_FPU
+	jo	.Lsie_skip			# exit if fp/vx regs changed
+	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_LPP
+	jz	.Lsie_enter
+	.insn	s,0xb2800000,__LC_CURRENT_PID	# set guest id to pid
+.Lsie_enter:
+	sie	0(%r14)
+	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_LPP
+	jz	.Lsie_skip
+	.insn	s,0xb2800000,__SF_EMPTY+16(%r15)# set host id
+.Lsie_skip:
+	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
+	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
+.Lsie_done:
+# some program checks are suppressing. C code (e.g. do_protection_exception)
+# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
+# instructions between sie64a and .Lsie_done should not cause program
+# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
+# See also .Lcleanup_sie
+.Lrewind_pad:
+	nop	0
+	.globl sie_exit
+sie_exit:
+	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
+	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
+	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
+	lg	%r2,__SF_EMPTY+24(%r15)		# return exit reason code
+	br	%r14
+.Lsie_fault:
+	lghi	%r14,-EFAULT
+	stg	%r14,__SF_EMPTY+24(%r15)	# set exit reason code
+	j	sie_exit
+
+	EX_TABLE(.Lrewind_pad,.Lsie_fault)
+	EX_TABLE(sie_exit,.Lsie_fault)
+#endif
+
 /*
  * SVC interrupt handler routine. System calls are synchronous events and
  * are executed with interrupts enabled.
@@ -212,9 +247,9 @@
 .Lsysc_per:
 	lg	%r15,__LC_KERNEL_STACK
 	la	%r11,STACK_FRAME_OVERHEAD(%r15)	# pointer to pt_regs
-.Lsysc_vtime:
-	UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
 	LAST_BREAK %r13
+.Lsysc_vtime:
+	UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
 	stmg	%r0,%r7,__PT_R0(%r11)
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 	mvc	__PT_PSW(16,%r11),__LC_SVC_OLD_PSW
@@ -244,8 +279,6 @@
 .Lsysc_return:
 	LOCKDEP_SYS_EXIT
 .Lsysc_tif:
-	tm	__PT_PSW+1(%r11),0x01		# returning to user ?
-	jno	.Lsysc_restore
 	tm	__PT_FLAGS+7(%r11),_PIF_WORK
 	jnz	.Lsysc_work
 	tm	__TI_flags+7(%r12),_TIF_WORK
@@ -280,6 +313,8 @@
 	jo	.Lsysc_sigpending
 	tm	__TI_flags+7(%r12),_TIF_NOTIFY_RESUME
 	jo	.Lsysc_notify_resume
+	tm	__LC_CPU_FLAGS+7,_CIF_FPU
+	jo	.Lsysc_vxrs
 	tm	__LC_CPU_FLAGS+7,_CIF_ASCE
 	jo	.Lsysc_uaccess
 	j	.Lsysc_return		# beware of critical section cleanup
@@ -307,6 +342,13 @@
 	j	.Lsysc_return
 
 #
+# CIF_FPU is set, restore floating-point controls and floating-point registers.
+#
+.Lsysc_vxrs:
+	larl	%r14,.Lsysc_return
+	jg	load_fpu_regs
+
+#
 # _TIF_SIGPENDING is set, call do_signal
 #
 .Lsysc_sigpending:
@@ -405,28 +447,35 @@
 	stmg	%r8,%r15,__LC_SAVE_AREA_SYNC
 	lg	%r10,__LC_LAST_BREAK
 	lg	%r12,__LC_THREAD_INFO
-	larl	%r13,system_call
+	larl	%r13,cleanup_critical
 	lmg	%r8,%r9,__LC_PGM_OLD_PSW
-	HANDLE_SIE_INTERCEPT %r14,1
 	tmhh	%r8,0x0001		# test problem state bit
-	jnz	1f			# -> fault in user space
-	tmhh	%r8,0x4000		# PER bit set in old PSW ?
-	jnz	0f			# -> enabled, can't be a double fault
+	jnz	2f			# -> fault in user space
+#if IS_ENABLED(CONFIG_KVM)
+	# cleanup critical section for sie64a
+	lgr	%r14,%r9
+	slg	%r14,BASED(.Lsie_critical_start)
+	clg	%r14,BASED(.Lsie_critical_length)
+	jhe	0f
+	brasl	%r14,.Lcleanup_sie
+#endif
+0:	tmhh	%r8,0x4000		# PER bit set in old PSW ?
+	jnz	1f			# -> enabled, can't be a double fault
 	tm	__LC_PGM_ILC+3,0x80	# check for per exception
 	jnz	.Lpgm_svcper		# -> single stepped svc
-0:	CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
+1:	CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
 	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
-	j	2f
-1:	UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER
-	LAST_BREAK %r14
+	j	3f
+2:	LAST_BREAK %r14
+	UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
 	lg	%r15,__LC_KERNEL_STACK
 	lg	%r14,__TI_task(%r12)
 	aghi	%r14,__TASK_thread	# pointer to thread_struct
 	lghi	%r13,__LC_PGM_TDB
 	tm	__LC_PGM_ILC+2,0x02	# check for transaction abort
-	jz	2f
+	jz	3f
 	mvc	__THREAD_trap_tdb(256,%r14),0(%r13)
-2:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
+3:	la	%r11,STACK_FRAME_OVERHEAD(%r15)
 	stmg	%r0,%r7,__PT_R0(%r11)
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 	stmg	%r8,%r9,__PT_PSW(%r11)
@@ -435,24 +484,28 @@
 	xc	__PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 	stg	%r10,__PT_ARGS(%r11)
 	tm	__LC_PGM_ILC+3,0x80	# check for per exception
-	jz	0f
+	jz	4f
 	tmhh	%r8,0x0001		# kernel per event ?
 	jz	.Lpgm_kprobe
 	oi	__PT_FLAGS+7(%r11),_PIF_PER_TRAP
 	mvc	__THREAD_per_address(8,%r14),__LC_PER_ADDRESS
 	mvc	__THREAD_per_cause(2,%r14),__LC_PER_CODE
 	mvc	__THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
-0:	REENABLE_IRQS
+4:	REENABLE_IRQS
 	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 	larl	%r1,pgm_check_table
 	llgh	%r10,__PT_INT_CODE+2(%r11)
 	nill	%r10,0x007f
 	sll	%r10,2
-	je	.Lsysc_return
+	je	.Lpgm_return
 	lgf	%r1,0(%r10,%r1)		# load address of handler routine
 	lgr	%r2,%r11		# pass pointer to pt_regs
 	basr	%r14,%r1		# branch to interrupt-handler
-	j	.Lsysc_return
+.Lpgm_return:
+	LOCKDEP_SYS_EXIT
+	tm	__PT_PSW+1(%r11),0x01	# returning to user ?
+	jno	.Lsysc_restore
+	j	.Lsysc_tif
 
 #
 # PER event in supervisor state, must be kprobes
@@ -462,7 +515,7 @@
 	xc	__SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 	lgr	%r2,%r11		# pass pointer to pt_regs
 	brasl	%r14,do_per_trap
-	j	.Lsysc_return
+	j	.Lpgm_return
 
 #
 # single stepped system call
@@ -483,15 +536,9 @@
 	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 	lg	%r10,__LC_LAST_BREAK
 	lg	%r12,__LC_THREAD_INFO
-	larl	%r13,system_call
+	larl	%r13,cleanup_critical
 	lmg	%r8,%r9,__LC_IO_OLD_PSW
-	HANDLE_SIE_INTERCEPT %r14,2
-	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
-	tmhh	%r8,0x0001		# interrupting from user?
-	jz	.Lio_skip
-	UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
-	LAST_BREAK %r14
-.Lio_skip:
+	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
 	stmg	%r0,%r7,__PT_R0(%r11)
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 	stmg	%r8,%r9,__PT_PSW(%r11)
@@ -587,6 +634,8 @@
 	jo	.Lio_sigpending
 	tm	__TI_flags+7(%r12),_TIF_NOTIFY_RESUME
 	jo	.Lio_notify_resume
+	tm	__LC_CPU_FLAGS+7,_CIF_FPU
+	jo	.Lio_vxrs
 	tm	__LC_CPU_FLAGS+7,_CIF_ASCE
 	jo	.Lio_uaccess
 	j	.Lio_return		# beware of critical section cleanup
@@ -609,6 +658,13 @@
 	j	.Lio_return
 
 #
+# CIF_FPU is set, restore floating-point controls and floating-point registers.
+#
+.Lio_vxrs:
+	larl	%r14,.Lio_return
+	jg	load_fpu_regs
+
+#
 # _TIF_NEED_RESCHED is set, call schedule
 #
 .Lio_reschedule:
@@ -652,15 +708,9 @@
 	stmg	%r8,%r15,__LC_SAVE_AREA_ASYNC
 	lg	%r10,__LC_LAST_BREAK
 	lg	%r12,__LC_THREAD_INFO
-	larl	%r13,system_call
+	larl	%r13,cleanup_critical
 	lmg	%r8,%r9,__LC_EXT_OLD_PSW
-	HANDLE_SIE_INTERCEPT %r14,3
-	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
-	tmhh	%r8,0x0001		# interrupting from user ?
-	jz	.Lext_skip
-	UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
-	LAST_BREAK %r14
-.Lext_skip:
+	SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
 	stmg	%r0,%r7,__PT_R0(%r11)
 	mvc	__PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 	stmg	%r8,%r9,__PT_PSW(%r11)
@@ -690,6 +740,122 @@
 	br	%r14
 .Lpsw_idle_end:
 
+/* Store floating-point controls and floating-point or vector extension
+ * registers instead.  A critical section cleanup assures that the registers
+ * are stored even if interrupted for some other work.	The register %r2
+ * designates a struct fpu to store register contents.	If the specified
+ * structure does not contain a register save area, the register store is
+ * omitted (see also comments in arch_dup_task_struct()).
+ *
+ * The CIF_FPU flag is set in any case.  The CIF_FPU triggers a lazy restore
+ * of the register contents at system call or io return.
+ */
+ENTRY(save_fpu_regs)
+	lg	%r2,__LC_CURRENT
+	aghi	%r2,__TASK_thread
+	tm	__LC_CPU_FLAGS+7,_CIF_FPU
+	bor	%r14
+	stfpc	__THREAD_FPU_fpc(%r2)
+.Lsave_fpu_regs_fpc_end:
+	lg	%r3,__THREAD_FPU_regs(%r2)
+	ltgr	%r3,%r3
+	jz	.Lsave_fpu_regs_done	  # no save area -> set CIF_FPU
+	tm	__THREAD_FPU_flags+3(%r2),FPU_USE_VX
+	jz	.Lsave_fpu_regs_fp	  # no -> store FP regs
+.Lsave_fpu_regs_vx_low:
+	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
+.Lsave_fpu_regs_vx_high:
+	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
+	j	.Lsave_fpu_regs_done	  # -> set CIF_FPU flag
+.Lsave_fpu_regs_fp:
+	std	0,0(%r3)
+	std	1,8(%r3)
+	std	2,16(%r3)
+	std	3,24(%r3)
+	std	4,32(%r3)
+	std	5,40(%r3)
+	std	6,48(%r3)
+	std	7,56(%r3)
+	std	8,64(%r3)
+	std	9,72(%r3)
+	std	10,80(%r3)
+	std	11,88(%r3)
+	std	12,96(%r3)
+	std	13,104(%r3)
+	std	14,112(%r3)
+	std	15,120(%r3)
+.Lsave_fpu_regs_done:
+	oi	__LC_CPU_FLAGS+7,_CIF_FPU
+	br	%r14
+.Lsave_fpu_regs_end:
+
+/* Load floating-point controls and floating-point or vector extension
+ * registers.  A critical section cleanup assures that the register contents
+ * are loaded even if interrupted for some other work.	Depending on the saved
+ * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared.
+ *
+ * There are special calling conventions to fit into sysc and io return work:
+ *	%r15:	<kernel stack>
+ * The function requires:
+ *	%r4 and __SF_EMPTY+32(%r15)
+ */
+load_fpu_regs:
+	lg	%r4,__LC_CURRENT
+	aghi	%r4,__TASK_thread
+	tm	__LC_CPU_FLAGS+7,_CIF_FPU
+	bnor	%r14
+	lfpc	__THREAD_FPU_fpc(%r4)
+	stctg	%c0,%c0,__SF_EMPTY+32(%r15)	# store CR0
+	tm	__THREAD_FPU_flags+3(%r4),FPU_USE_VX	# VX-enabled task ?
+	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
+	jz	.Lload_fpu_regs_fp_ctl		# -> no VX, load FP regs
+.Lload_fpu_regs_vx_ctl:
+	tm	__SF_EMPTY+32+5(%r15),2		# test VX control
+	jo	.Lload_fpu_regs_vx
+	oi	__SF_EMPTY+32+5(%r15),2		# set VX control
+	lctlg	%c0,%c0,__SF_EMPTY+32(%r15)
+.Lload_fpu_regs_vx:
+	VLM	%v0,%v15,0,%r4
+.Lload_fpu_regs_vx_high:
+	VLM	%v16,%v31,256,%r4
+	j	.Lload_fpu_regs_done
+.Lload_fpu_regs_fp_ctl:
+	tm	__SF_EMPTY+32+5(%r15),2		# test VX control
+	jz	.Lload_fpu_regs_fp
+	ni	__SF_EMPTY+32+5(%r15),253	# clear VX control
+	lctlg	%c0,%c0,__SF_EMPTY+32(%r15)
+.Lload_fpu_regs_fp:
+	ld	0,0(%r4)
+	ld	1,8(%r4)
+	ld	2,16(%r4)
+	ld	3,24(%r4)
+	ld	4,32(%r4)
+	ld	5,40(%r4)
+	ld	6,48(%r4)
+	ld	7,56(%r4)
+	ld	8,64(%r4)
+	ld	9,72(%r4)
+	ld	10,80(%r4)
+	ld	11,88(%r4)
+	ld	12,96(%r4)
+	ld	13,104(%r4)
+	ld	14,112(%r4)
+	ld	15,120(%r4)
+.Lload_fpu_regs_done:
+	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
+	br	%r14
+.Lload_fpu_regs_end:
+
+/* Test and set the vector enablement control in CR0.46 */
+ENTRY(__ctl_set_vx)
+	stctg	%c0,%c0,__SF_EMPTY(%r15)
+	tm	__SF_EMPTY+5(%r15),2
+	bor	%r14
+	oi	__SF_EMPTY+5(%r15),2
+	lctlg	%c0,%c0,__SF_EMPTY(%r15)
+	br	%r14
+.L__ctl_set_vx_end:
+
 .L__critical_end:
 
 /*
@@ -702,9 +868,8 @@
 	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
 	lg	%r10,__LC_LAST_BREAK
 	lg	%r12,__LC_THREAD_INFO
-	larl	%r13,system_call
+	larl	%r13,cleanup_critical
 	lmg	%r8,%r9,__LC_MCK_OLD_PSW
-	HANDLE_SIE_INTERCEPT %r14,4
 	tm	__LC_MCCK_CODE,0x80	# system damage?
 	jo	.Lmcck_panic		# yes -> rest of mcck code invalid
 	lghi	%r14,__LC_CPU_TIMER_SAVE_AREA
@@ -725,11 +890,7 @@
 	mvc	__LC_MCCK_ENTER_TIMER(8),0(%r14)
 3:	tm	__LC_MCCK_CODE+2,0x09	# mwp + ia of old psw valid?
 	jno	.Lmcck_panic		# no -> skip cleanup critical
-	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
-	tm	%r8,0x0001		# interrupting from user ?
-	jz	.Lmcck_skip
-	UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
-	LAST_BREAK %r14
+	SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
 .Lmcck_skip:
 	lghi	%r14,__LC_GPREGS_SAVE_AREA+64
 	stmg	%r0,%r7,__PT_R0(%r11)
@@ -764,12 +925,8 @@
 	lpswe	__LC_RETURN_MCCK_PSW
 
 .Lmcck_panic:
-	lg	%r14,__LC_PANIC_STACK
-	slgr	%r14,%r15
-	srag	%r14,%r14,PAGE_SHIFT
-	jz	0f
 	lg	%r15,__LC_PANIC_STACK
-0:	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+	aghi	%r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 	j	.Lmcck_skip
 
 #
@@ -819,20 +976,13 @@
 	jg	kernel_stack_overflow
 #endif
 
-	.align	8
-.Lcleanup_table:
-	.quad	system_call
-	.quad	.Lsysc_do_svc
-	.quad	.Lsysc_tif
-	.quad	.Lsysc_restore
-	.quad	.Lsysc_done
-	.quad	.Lio_tif
-	.quad	.Lio_restore
-	.quad	.Lio_done
-	.quad	psw_idle
-	.quad	.Lpsw_idle_end
-
 cleanup_critical:
+#if IS_ENABLED(CONFIG_KVM)
+	clg	%r9,BASED(.Lcleanup_table_sie)	# .Lsie_gmap
+	jl	0f
+	clg	%r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
+	jl	.Lcleanup_sie
+#endif
 	clg	%r9,BASED(.Lcleanup_table)	# system_call
 	jl	0f
 	clg	%r9,BASED(.Lcleanup_table+8)	# .Lsysc_do_svc
@@ -853,8 +1003,54 @@
 	jl	0f
 	clg	%r9,BASED(.Lcleanup_table+72)	# .Lpsw_idle_end
 	jl	.Lcleanup_idle
+	clg	%r9,BASED(.Lcleanup_table+80)	# save_fpu_regs
+	jl	0f
+	clg	%r9,BASED(.Lcleanup_table+88)	# .Lsave_fpu_regs_end
+	jl	.Lcleanup_save_fpu_regs
+	clg	%r9,BASED(.Lcleanup_table+96)	# load_fpu_regs
+	jl	0f
+	clg	%r9,BASED(.Lcleanup_table+104)	# .Lload_fpu_regs_end
+	jl	.Lcleanup_load_fpu_regs
+	clg	%r9,BASED(.Lcleanup_table+112)	# __ctl_set_vx
+	jl	0f
+	clg	%r9,BASED(.Lcleanup_table+120)	# .L__ctl_set_vx_end
+	jl	.Lcleanup___ctl_set_vx
 0:	br	%r14
 
+	.align	8
+.Lcleanup_table:
+	.quad	system_call
+	.quad	.Lsysc_do_svc
+	.quad	.Lsysc_tif
+	.quad	.Lsysc_restore
+	.quad	.Lsysc_done
+	.quad	.Lio_tif
+	.quad	.Lio_restore
+	.quad	.Lio_done
+	.quad	psw_idle
+	.quad	.Lpsw_idle_end
+	.quad	save_fpu_regs
+	.quad	.Lsave_fpu_regs_end
+	.quad	load_fpu_regs
+	.quad	.Lload_fpu_regs_end
+	.quad	__ctl_set_vx
+	.quad	.L__ctl_set_vx_end
+
+#if IS_ENABLED(CONFIG_KVM)
+.Lcleanup_table_sie:
+	.quad	.Lsie_gmap
+	.quad	.Lsie_done
+
+.Lcleanup_sie:
+	lg	%r9,__SF_EMPTY(%r15)		# get control block pointer
+	tm	__LC_MACHINE_FLAGS+6,0x20	# MACHINE_FLAG_LPP
+	jz	0f
+	.insn	s,0xb2800000,__SF_EMPTY+16(%r15)# set host id
+0:	ni	__SIE_PROG0C+3(%r9),0xfe	# no longer in SIE
+	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
+	larl	%r9,sie_exit			# skip forward to sie_exit
+	br	%r14
+#endif
 
 .Lcleanup_system_call:
 	# check if stpt has been executed
@@ -915,7 +1111,7 @@
 	.quad	system_call
 	.quad	.Lsysc_stmg
 	.quad	.Lsysc_per
-	.quad	.Lsysc_vtime+18
+	.quad	.Lsysc_vtime+36
 	.quad	.Lsysc_vtime+42
 
 .Lcleanup_sysc_tif:
@@ -981,6 +1177,145 @@
 .Lcleanup_idle_insn:
 	.quad	.Lpsw_idle_lpsw
 
+.Lcleanup_save_fpu_regs:
+	tm	__LC_CPU_FLAGS+7,_CIF_FPU
+	bor	%r14
+	clg	%r9,BASED(.Lcleanup_save_fpu_regs_done)
+	jhe	5f
+	clg	%r9,BASED(.Lcleanup_save_fpu_regs_fp)
+	jhe	4f
+	clg	%r9,BASED(.Lcleanup_save_fpu_regs_vx_high)
+	jhe	3f
+	clg	%r9,BASED(.Lcleanup_save_fpu_regs_vx_low)
+	jhe	2f
+	clg	%r9,BASED(.Lcleanup_save_fpu_fpc_end)
+	jhe	1f
+	lg	%r2,__LC_CURRENT
+0:	# Store floating-point controls
+	stfpc	__THREAD_FPU_fpc(%r2)
+1:	# Load register save area and check if VX is active
+	lg	%r3,__THREAD_FPU_regs(%r2)
+	ltgr	%r3,%r3
+	jz	5f			  # no save area -> set CIF_FPU
+	tm	__THREAD_FPU_flags+3(%r2),FPU_USE_VX
+	jz	4f			  # no VX -> store FP regs
+2:	# Store vector registers (V0-V15)
+	VSTM	%v0,%v15,0,%r3		  # vstm 0,15,0(3)
+3:	# Store vector registers (V16-V31)
+	VSTM	%v16,%v31,256,%r3	  # vstm 16,31,256(3)
+	j	5f			  # -> done, set CIF_FPU flag
+4:	# Store floating-point registers
+	std	0,0(%r3)
+	std	1,8(%r3)
+	std	2,16(%r3)
+	std	3,24(%r3)
+	std	4,32(%r3)
+	std	5,40(%r3)
+	std	6,48(%r3)
+	std	7,56(%r3)
+	std	8,64(%r3)
+	std	9,72(%r3)
+	std	10,80(%r3)
+	std	11,88(%r3)
+	std	12,96(%r3)
+	std	13,104(%r3)
+	std	14,112(%r3)
+	std	15,120(%r3)
+5:	# Set CIF_FPU flag
+	oi	__LC_CPU_FLAGS+7,_CIF_FPU
+	lg	%r9,48(%r11)		# return from save_fpu_regs
+	br	%r14
+.Lcleanup_save_fpu_fpc_end:
+	.quad	.Lsave_fpu_regs_fpc_end
+.Lcleanup_save_fpu_regs_vx_low:
+	.quad	.Lsave_fpu_regs_vx_low
+.Lcleanup_save_fpu_regs_vx_high:
+	.quad	.Lsave_fpu_regs_vx_high
+.Lcleanup_save_fpu_regs_fp:
+	.quad	.Lsave_fpu_regs_fp
+.Lcleanup_save_fpu_regs_done:
+	.quad	.Lsave_fpu_regs_done
+
+.Lcleanup_load_fpu_regs:
+	tm	__LC_CPU_FLAGS+7,_CIF_FPU
+	bnor	%r14
+	clg	%r9,BASED(.Lcleanup_load_fpu_regs_done)
+	jhe	1f
+	clg	%r9,BASED(.Lcleanup_load_fpu_regs_fp)
+	jhe	2f
+	clg	%r9,BASED(.Lcleanup_load_fpu_regs_fp_ctl)
+	jhe	3f
+	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx_high)
+	jhe	4f
+	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx)
+	jhe	5f
+	clg	%r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl)
+	jhe	6f
+	lg	%r4,__LC_CURRENT
+	lfpc	__THREAD_FPU_fpc(%r4)
+	tm	__THREAD_FPU_flags+3(%r4),FPU_USE_VX	# VX-enabled task ?
+	lg	%r4,__THREAD_FPU_regs(%r4)	# %r4 <- reg save area
+	jz	3f				# -> no VX, load FP regs
+6:	# Set VX-enablement control
+	stctg	%c0,%c0,__SF_EMPTY+32(%r15)	# store CR0
+	tm	__SF_EMPTY+32+5(%r15),2		# test VX control
+	jo	5f
+	oi	__SF_EMPTY+32+5(%r15),2		# set VX control
+	lctlg	%c0,%c0,__SF_EMPTY+32(%r15)
+5:	# Load V0 ..V15 registers
+	VLM	%v0,%v15,0,%r4
+4:	# Load V16..V31 registers
+	VLM	%v16,%v31,256,%r4
+	j	1f
+3:	# Clear VX-enablement control for FP
+	stctg	%c0,%c0,__SF_EMPTY+32(%r15)	# store CR0
+	tm	__SF_EMPTY+32+5(%r15),2		# test VX control
+	jz	2f
+	ni	__SF_EMPTY+32+5(%r15),253	# clear VX control
+	lctlg	%c0,%c0,__SF_EMPTY+32(%r15)
+2:	# Load floating-point registers
+	ld	0,0(%r4)
+	ld	1,8(%r4)
+	ld	2,16(%r4)
+	ld	3,24(%r4)
+	ld	4,32(%r4)
+	ld	5,40(%r4)
+	ld	6,48(%r4)
+	ld	7,56(%r4)
+	ld	8,64(%r4)
+	ld	9,72(%r4)
+	ld	10,80(%r4)
+	ld	11,88(%r4)
+	ld	12,96(%r4)
+	ld	13,104(%r4)
+	ld	14,112(%r4)
+	ld	15,120(%r4)
+1:	# Clear CIF_FPU bit
+	ni	__LC_CPU_FLAGS+7,255-_CIF_FPU
+	lg	%r9,48(%r11)		# return from load_fpu_regs
+	br	%r14
+.Lcleanup_load_fpu_regs_vx_ctl:
+	.quad	.Lload_fpu_regs_vx_ctl
+.Lcleanup_load_fpu_regs_vx:
+	.quad	.Lload_fpu_regs_vx
+.Lcleanup_load_fpu_regs_vx_high:
+	.quad	.Lload_fpu_regs_vx_high
+.Lcleanup_load_fpu_regs_fp_ctl:
+	.quad	.Lload_fpu_regs_fp_ctl
+.Lcleanup_load_fpu_regs_fp:
+	.quad	.Lload_fpu_regs_fp
+.Lcleanup_load_fpu_regs_done:
+	.quad	.Lload_fpu_regs_done
+
+.Lcleanup___ctl_set_vx:
+	stctg	%c0,%c0,__SF_EMPTY(%r15)
+	tm	__SF_EMPTY+5(%r15),2
+	bor	%r14
+	oi	__SF_EMPTY+5(%r15),2
+	lctlg	%c0,%c0,__SF_EMPTY(%r15)
+	lg	%r9,48(%r11)		# return from __ctl_set_vx
+	br	%r14
+
 /*
  * Integer constants
  */
@@ -989,62 +1324,11 @@
 	.quad	.L__critical_start
 .Lcritical_length:
 	.quad	.L__critical_end - .L__critical_start
-
-
 #if IS_ENABLED(CONFIG_KVM)
-/*
- * sie64a calling convention:
- * %r2 pointer to sie control block
- * %r3 guest register save area
- */
-ENTRY(sie64a)
-	stmg	%r6,%r14,__SF_GPRS(%r15)	# save kernel registers
-	stg	%r2,__SF_EMPTY(%r15)		# save control block pointer
-	stg	%r3,__SF_EMPTY+8(%r15)		# save guest register save area
-	xc	__SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason
-	lmg	%r0,%r13,0(%r3)			# load guest gprs 0-13
-	lg	%r14,__LC_GMAP			# get gmap pointer
-	ltgr	%r14,%r14
-	jz	.Lsie_gmap
-	lctlg	%c1,%c1,__GMAP_ASCE(%r14)	# load primary asce
-.Lsie_gmap:
-	lg	%r14,__SF_EMPTY(%r15)		# get control block pointer
-	oi	__SIE_PROG0C+3(%r14),1		# we are going into SIE now
-	tm	__SIE_PROG20+3(%r14),3		# last exit...
-	jnz	.Lsie_done
-	LPP	__SF_EMPTY(%r15)		# set guest id
-	sie	0(%r14)
-.Lsie_done:
-	LPP	__SF_EMPTY+16(%r15)		# set host id
-	ni	__SIE_PROG0C+3(%r14),0xfe	# no longer in SIE
-	lctlg	%c1,%c1,__LC_USER_ASCE		# load primary asce
-# some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and .Lsie_done should not cause program
-# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
-# See also HANDLE_SIE_INTERCEPT
-.Lrewind_pad:
-	nop	0
-	.globl sie_exit
-sie_exit:
-	lg	%r14,__SF_EMPTY+8(%r15)		# load guest register save area
-	stmg	%r0,%r13,0(%r14)		# save guest gprs 0-13
-	lmg	%r6,%r14,__SF_GPRS(%r15)	# restore kernel registers
-	lg	%r2,__SF_EMPTY+24(%r15)		# return exit reason code
-	br	%r14
-.Lsie_fault:
-	lghi	%r14,-EFAULT
-	stg	%r14,__SF_EMPTY+24(%r15)	# set exit reason code
-	j	sie_exit
-
-	.align	8
-.Lsie_critical:
+.Lsie_critical_start:
 	.quad	.Lsie_gmap
 .Lsie_critical_length:
 	.quad	.Lsie_done - .Lsie_gmap
-
-	EX_TABLE(.Lrewind_pad,.Lsie_fault)
-	EX_TABLE(sie_exit,.Lsie_fault)
 #endif
 
 	.section .rodata, "a"
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 59b7c64..1255c6c 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -370,6 +370,7 @@
 	xc	0x200(256),0x200	# partially clear lowcore
 	xc	0x300(256),0x300
 	xc	0xe00(256),0xe00
+	lctlg	%c0,%c15,0x200(%r0)	# initialize control registers
 	stck	__LC_LAST_UPDATE_CLOCK
 	spt	6f-.LPG0(%r13)
 	mvc	__LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
@@ -413,9 +414,9 @@
 # followed by the facility words.
 
 #if defined(CONFIG_MARCH_Z13)
-	.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
+	.long 2, 0xc100eff2, 0xf46cc800
 #elif defined(CONFIG_MARCH_ZEC12)
-	.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
+	.long 2, 0xc100eff2, 0xf46cc800
 #elif defined(CONFIG_MARCH_Z196)
 	.long 2, 0xc100eff2, 0xf46c0000
 #elif defined(CONFIG_MARCH_Z10)
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index a902996..083b05f 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -44,12 +44,9 @@
 	unsigned char *ipn = (unsigned char *)new;
 
 	pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
-	pr_emerg("Found:    %02x %02x %02x %02x %02x %02x\n",
-		 ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
-	pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
-		 ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
-	pr_emerg("New:      %02x %02x %02x %02x %02x %02x\n",
-		 ipn[0], ipn[1], ipn[2], ipn[3], ipn[4], ipn[5]);
+	pr_emerg("Found:    %6ph\n", ipc);
+	pr_emerg("Expected: %6ph\n", ipe);
+	pr_emerg("New:      %6ph\n", ipn);
 	panic("Corrupted kernel text");
 }
 
@@ -64,7 +61,7 @@
 {
 	struct insn old, new;
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		jump_label_make_nop(entry, &old);
 		jump_label_make_branch(entry, &new);
 	} else {
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 56b5508..0ae6f8e 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -21,6 +21,7 @@
 #include <asm/nmi.h>
 #include <asm/crw.h>
 #include <asm/switch_to.h>
+#include <asm/fpu-internal.h>
 #include <asm/ctl_reg.h>
 
 struct mcck_struct {
@@ -164,8 +165,12 @@
 		cr0.val = S390_lowcore.cregs_save_area[0];
 		cr0.afp = cr0.vx = 1;
 		__ctl_load(cr0.val, 0, 0);
-		restore_vx_regs((__vector128 *)
-				&S390_lowcore.vector_save_area);
+		asm volatile(
+			"	la	1,%0\n"
+			"	.word	0xe70f,0x1000,0x0036\n"	/* vlm 0,15,0(1) */
+			"	.word	0xe70f,0x1100,0x0c36\n"	/* vlm 16,31,256(1) */
+			: : "Q" (*(struct vx_array *)
+				 &S390_lowcore.vector_save_area) : "1");
 		__ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
 	}
 	/* Revalidate access registers */
@@ -358,4 +363,4 @@
 	ctl_set_bit(14, 24);	/* enable warning MCH */
 	return 0;
 }
-arch_initcall(machine_check_init);
+early_initcall(machine_check_init);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index afe05bf..b973972 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1019,12 +1019,9 @@
 		break;
 	}
 
-	/* The host-program-parameter (hpp) contains the sie control
-	 * block that is set by sie64a() in entry64.S.	Check if hpp
-	 * refers to a valid control block and set sde_regs flags
-	 * accordingly.  This would allow to use hpp values for other
-	 * purposes too.
-	 * For now, simply use a non-zero value as guest indicator.
+	/* The host-program-parameter (hpp) contains the pid of
+	 * the CPU thread as set by sie64a() in entry.S.
+	 * If non-zero assume a guest sample.
 	 */
 	if (sfr->basic.hpp)
 		sde_regs->in_guest = 1;
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 8f587d8..f2dac9f 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -81,8 +81,38 @@
 
 void arch_release_task_struct(struct task_struct *tsk)
 {
-	if (tsk->thread.vxrs)
-		kfree(tsk->thread.vxrs);
+	/* Free either the floating-point or the vector register save area */
+	kfree(tsk->thread.fpu.regs);
+}
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+	*dst = *src;
+
+	/* Set up a new floating-point register save area */
+	dst->thread.fpu.fpc = 0;
+	dst->thread.fpu.flags = 0;	/* Always start with VX disabled */
+	dst->thread.fpu.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
+				       GFP_KERNEL|__GFP_REPEAT);
+	if (!dst->thread.fpu.fprs)
+		return -ENOMEM;
+
+	/*
+	 * Save the floating-point or vector register state of the current
+	 * task.  The state is not saved for early kernel threads, for example,
+	 * the init_task, which do not have an allocated save area.
+	 * The CIF_FPU flag is set in any case to lazy clear or restore a saved
+	 * state when switching to a different task or returning to user space.
+	 */
+	save_fpu_regs();
+	dst->thread.fpu.fpc = current->thread.fpu.fpc;
+	if (is_vx_task(current))
+		convert_vx_to_fp(dst->thread.fpu.fprs,
+				 current->thread.fpu.vxrs);
+	else
+		memcpy(dst->thread.fpu.fprs, current->thread.fpu.fprs,
+		       sizeof(freg_t) * __NUM_FPRS);
+	return 0;
 }
 
 int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
@@ -142,11 +172,6 @@
 	p->thread.ri_signum = 0;
 	frame->childregs.psw.mask &= ~PSW_MASK_RI;
 
-	/* Save the fpu registers to new thread structure. */
-	save_fp_ctl(&p->thread.fp_regs.fpc);
-	save_fp_regs(p->thread.fp_regs.fprs);
-	p->thread.fp_regs.pad = 0;
-	p->thread.vxrs = NULL;
 	/* Set a new TLS ?  */
 	if (clone_flags & CLONE_SETTLS) {
 		unsigned long tls = frame->childregs.gprs[6];
@@ -162,7 +187,7 @@
 
 asmlinkage void execve_tail(void)
 {
-	current->thread.fp_regs.fpc = 0;
+	current->thread.fpu.fpc = 0;
 	asm volatile("sfpc %0" : : "d" (0));
 }
 
@@ -171,8 +196,15 @@
  */
 int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
 {
-	save_fp_ctl(&fpregs->fpc);
-	save_fp_regs(fpregs->fprs);
+	save_fpu_regs();
+	fpregs->fpc = current->thread.fpu.fpc;
+	fpregs->pad = 0;
+	if (is_vx_task(current))
+		convert_vx_to_fp((freg_t *)&fpregs->fprs,
+				 current->thread.fpu.vxrs);
+	else
+		memcpy(&fpregs->fprs, current->thread.fpu.fprs,
+		       sizeof(fpregs->fprs));
 	return 1;
 }
 EXPORT_SYMBOL(dump_fpu);
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index dc488e1..e6e077a 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -41,6 +41,15 @@
 }
 
 /*
+ * cpu_have_feature - Test CPU features on module initialization
+ */
+int cpu_have_feature(unsigned int num)
+{
+	return elf_hwcap & (1UL << num);
+}
+EXPORT_SYMBOL(cpu_have_feature);
+
+/*
  * show_cpuinfo - Get information on one CPU for use by procfs.
  */
 static int show_cpuinfo(struct seq_file *m, void *v)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index d363c9c..8b1c8e3 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -45,39 +45,27 @@
 	struct per_regs old, new;
 
 	/* Take care of the enable/disable of transactional execution. */
-	if (MACHINE_HAS_TE || MACHINE_HAS_VX) {
+	if (MACHINE_HAS_TE) {
 		unsigned long cr, cr_new;
 
 		__ctl_store(cr, 0, 0);
-		cr_new = cr;
-		if (MACHINE_HAS_TE) {
-			/* Set or clear transaction execution TXC bit 8. */
-			cr_new |= (1UL << 55);
-			if (task->thread.per_flags & PER_FLAG_NO_TE)
-				cr_new &= ~(1UL << 55);
-		}
-		if (MACHINE_HAS_VX) {
-			/* Enable/disable of vector extension */
-			cr_new &= ~(1UL << 17);
-			if (task->thread.vxrs)
-				cr_new |= (1UL << 17);
-		}
+		/* Set or clear transaction execution TXC bit 8. */
+		cr_new = cr | (1UL << 55);
+		if (task->thread.per_flags & PER_FLAG_NO_TE)
+			cr_new &= ~(1UL << 55);
 		if (cr_new != cr)
 			__ctl_load(cr_new, 0, 0);
-		if (MACHINE_HAS_TE) {
-			/* Set/clear transaction execution TDC bits 62/63. */
-			__ctl_store(cr, 2, 2);
-			cr_new = cr & ~3UL;
-			if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
-				if (task->thread.per_flags &
-				    PER_FLAG_TE_ABORT_RAND_TEND)
-					cr_new |= 1UL;
-				else
-					cr_new |= 2UL;
-			}
-			if (cr_new != cr)
-				__ctl_load(cr_new, 2, 2);
+		/* Set or clear transaction execution TDC bits 62 and 63. */
+		__ctl_store(cr, 2, 2);
+		cr_new = cr & ~3UL;
+		if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
+			if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
+				cr_new |= 1UL;
+			else
+				cr_new |= 2UL;
 		}
+		if (cr_new != cr)
+			__ctl_load(cr_new, 2, 2);
 	}
 	/* Copy user specified PER registers */
 	new.control = thread->per_user.control;
@@ -242,21 +230,21 @@
 		/*
 		 * floating point control reg. is in the thread structure
 		 */
-		tmp = child->thread.fp_regs.fpc;
+		tmp = child->thread.fpu.fpc;
 		tmp <<= BITS_PER_LONG - 32;
 
 	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
 		/*
-		 * floating point regs. are either in child->thread.fp_regs
-		 * or the child->thread.vxrs array
+		 * floating point regs. are either in child->thread.fpu
+		 * or the child->thread.fpu.vxrs array
 		 */
 		offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
-		if (child->thread.vxrs)
+		if (is_vx_task(child))
 			tmp = *(addr_t *)
-			       ((addr_t) child->thread.vxrs + 2*offset);
+			       ((addr_t) child->thread.fpu.vxrs + 2*offset);
 		else
 			tmp = *(addr_t *)
-			       ((addr_t) &child->thread.fp_regs.fprs + offset);
+			       ((addr_t) &child->thread.fpu.fprs + offset);
 
 	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
 		/*
@@ -387,20 +375,20 @@
 		if ((unsigned int) data != 0 ||
 		    test_fp_ctl(data >> (BITS_PER_LONG - 32)))
 			return -EINVAL;
-		child->thread.fp_regs.fpc = data >> (BITS_PER_LONG - 32);
+		child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32);
 
 	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
 		/*
-		 * floating point regs. are either in child->thread.fp_regs
-		 * or the child->thread.vxrs array
+		 * floating point regs. are either in child->thread.fpu
+		 * or the child->thread.fpu.vxrs array
 		 */
 		offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
-		if (child->thread.vxrs)
+		if (is_vx_task(child))
 			*(addr_t *)((addr_t)
-				child->thread.vxrs + 2*offset) = data;
+				child->thread.fpu.vxrs + 2*offset) = data;
 		else
 			*(addr_t *)((addr_t)
-				&child->thread.fp_regs.fprs + offset) = data;
+				&child->thread.fpu.fprs + offset) = data;
 
 	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
 		/*
@@ -621,20 +609,20 @@
 		/*
 		 * floating point control reg. is in the thread structure
 		 */
-		tmp = child->thread.fp_regs.fpc;
+		tmp = child->thread.fpu.fpc;
 
 	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
 		/*
-		 * floating point regs. are either in child->thread.fp_regs
-		 * or the child->thread.vxrs array
+		 * floating point regs. are either in child->thread.fpu
+		 * or the child->thread.fpu.vxrs array
 		 */
 		offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
-		if (child->thread.vxrs)
+		if (is_vx_task(child))
 			tmp = *(__u32 *)
-			       ((addr_t) child->thread.vxrs + 2*offset);
+			       ((addr_t) child->thread.fpu.vxrs + 2*offset);
 		else
 			tmp = *(__u32 *)
-			       ((addr_t) &child->thread.fp_regs.fprs + offset);
+			       ((addr_t) &child->thread.fpu.fprs + offset);
 
 	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
 		/*
@@ -746,20 +734,20 @@
 		 */
 		if (test_fp_ctl(tmp))
 			return -EINVAL;
-		child->thread.fp_regs.fpc = data;
+		child->thread.fpu.fpc = data;
 
 	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
 		/*
-		 * floating point regs. are either in child->thread.fp_regs
-		 * or the child->thread.vxrs array
+		 * floating point regs. are either in child->thread.fpu
+		 * or the child->thread.fpu.vxrs array
 		 */
 		offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
-		if (child->thread.vxrs)
+		if (is_vx_task(child))
 			*(__u32 *)((addr_t)
-				child->thread.vxrs + 2*offset) = tmp;
+				child->thread.fpu.vxrs + 2*offset) = tmp;
 		else
 			*(__u32 *)((addr_t)
-				&child->thread.fp_regs.fprs + offset) = tmp;
+				&child->thread.fpu.fprs + offset) = tmp;
 
 	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
 		/*
@@ -952,18 +940,16 @@
 			   const struct user_regset *regset, unsigned int pos,
 			   unsigned int count, void *kbuf, void __user *ubuf)
 {
-	if (target == current) {
-		save_fp_ctl(&target->thread.fp_regs.fpc);
-		save_fp_regs(target->thread.fp_regs.fprs);
-	} else if (target->thread.vxrs) {
-		int i;
+	_s390_fp_regs fp_regs;
 
-		for (i = 0; i < __NUM_VXRS_LOW; i++)
-			target->thread.fp_regs.fprs[i] =
-				*(freg_t *)(target->thread.vxrs + i);
-	}
+	if (target == current)
+		save_fpu_regs();
+
+	fp_regs.fpc = target->thread.fpu.fpc;
+	fpregs_store(&fp_regs, &target->thread.fpu);
+
 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-				   &target->thread.fp_regs, 0, -1);
+				   &fp_regs, 0, -1);
 }
 
 static int s390_fpregs_set(struct task_struct *target,
@@ -972,41 +958,33 @@
 			   const void __user *ubuf)
 {
 	int rc = 0;
+	freg_t fprs[__NUM_FPRS];
 
-	if (target == current) {
-		save_fp_ctl(&target->thread.fp_regs.fpc);
-		save_fp_regs(target->thread.fp_regs.fprs);
-	}
+	if (target == current)
+		save_fpu_regs();
 
 	/* If setting FPC, must validate it first. */
 	if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
-		u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 };
+		u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
 		rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
 					0, offsetof(s390_fp_regs, fprs));
 		if (rc)
 			return rc;
 		if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
 			return -EINVAL;
-		target->thread.fp_regs.fpc = ufpc[0];
+		target->thread.fpu.fpc = ufpc[0];
 	}
 
 	if (rc == 0 && count > 0)
 		rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-					target->thread.fp_regs.fprs,
-					offsetof(s390_fp_regs, fprs), -1);
+					fprs, offsetof(s390_fp_regs, fprs), -1);
+	if (rc)
+		return rc;
 
-	if (rc == 0) {
-		if (target == current) {
-			restore_fp_ctl(&target->thread.fp_regs.fpc);
-			restore_fp_regs(target->thread.fp_regs.fprs);
-		} else if (target->thread.vxrs) {
-			int i;
-
-			for (i = 0; i < __NUM_VXRS_LOW; i++)
-				*(freg_t *)(target->thread.vxrs + i) =
-					target->thread.fp_regs.fprs[i];
-		}
-	}
+	if (is_vx_task(target))
+		convert_fp_to_vx(target->thread.fpu.vxrs, fprs);
+	else
+		memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs));
 
 	return rc;
 }
@@ -1069,11 +1047,11 @@
 
 	if (!MACHINE_HAS_VX)
 		return -ENODEV;
-	if (target->thread.vxrs) {
+	if (is_vx_task(target)) {
 		if (target == current)
-			save_vx_regs(target->thread.vxrs);
+			save_fpu_regs();
 		for (i = 0; i < __NUM_VXRS_LOW; i++)
-			vxrs[i] = *((__u64 *)(target->thread.vxrs + i) + 1);
+			vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
 	} else
 		memset(vxrs, 0, sizeof(vxrs));
 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
@@ -1089,20 +1067,17 @@
 
 	if (!MACHINE_HAS_VX)
 		return -ENODEV;
-	if (!target->thread.vxrs) {
+	if (!is_vx_task(target)) {
 		rc = alloc_vector_registers(target);
 		if (rc)
 			return rc;
 	} else if (target == current)
-		save_vx_regs(target->thread.vxrs);
+		save_fpu_regs();
 
 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
-	if (rc == 0) {
+	if (rc == 0)
 		for (i = 0; i < __NUM_VXRS_LOW; i++)
-			*((__u64 *)(target->thread.vxrs + i) + 1) = vxrs[i];
-		if (target == current)
-			restore_vx_regs(target->thread.vxrs);
-	}
+			*((__u64 *)(target->thread.fpu.vxrs + i) + 1) = vxrs[i];
 
 	return rc;
 }
@@ -1116,10 +1091,10 @@
 
 	if (!MACHINE_HAS_VX)
 		return -ENODEV;
-	if (target->thread.vxrs) {
+	if (is_vx_task(target)) {
 		if (target == current)
-			save_vx_regs(target->thread.vxrs);
-		memcpy(vxrs, target->thread.vxrs + __NUM_VXRS_LOW,
+			save_fpu_regs();
+		memcpy(vxrs, target->thread.fpu.vxrs + __NUM_VXRS_LOW,
 		       sizeof(vxrs));
 	} else
 		memset(vxrs, 0, sizeof(vxrs));
@@ -1135,18 +1110,15 @@
 
 	if (!MACHINE_HAS_VX)
 		return -ENODEV;
-	if (!target->thread.vxrs) {
+	if (!is_vx_task(target)) {
 		rc = alloc_vector_registers(target);
 		if (rc)
 			return rc;
 	} else if (target == current)
-		save_vx_regs(target->thread.vxrs);
+		save_fpu_regs();
 
 	rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-				target->thread.vxrs + __NUM_VXRS_LOW, 0, -1);
-	if (rc == 0 && target == current)
-		restore_vx_regs(target->thread.vxrs);
-
+				target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1);
 	return rc;
 }
 
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 9f60467..5090d3d 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -1,5 +1,6 @@
 #include <linux/module.h>
 #include <linux/kvm_host.h>
+#include <asm/fpu-internal.h>
 #include <asm/ftrace.h>
 
 #ifdef CONFIG_FUNCTION_TRACER
@@ -8,6 +9,8 @@
 #if IS_ENABLED(CONFIG_KVM)
 EXPORT_SYMBOL(sie64a);
 EXPORT_SYMBOL(sie_exit);
+EXPORT_SYMBOL(save_fpu_regs);
+EXPORT_SYMBOL(__ctl_set_vx);
 #endif
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memset);
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
deleted file mode 100644
index ada0c07..0000000
--- a/arch/s390/kernel/sclp.S
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Mini SCLP driver.
- *
- * Copyright IBM Corp. 2004, 2009
- *
- *   Author(s):	Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>,
- *		Heiko Carstens <heiko.carstens@de.ibm.com>,
- *
- */
-
-#include <linux/linkage.h>
-#include <asm/irq.h>
-
-LC_EXT_NEW_PSW		= 0x58			# addr of ext int handler
-LC_EXT_NEW_PSW_64	= 0x1b0			# addr of ext int handler 64 bit
-LC_EXT_INT_PARAM	= 0x80			# addr of ext int parameter
-LC_EXT_INT_CODE		= 0x86			# addr of ext int code
-LC_AR_MODE_ID		= 0xa3
-
-#
-# Subroutine which waits synchronously until either an external interruption
-# or a timeout occurs.
-#
-# Parameters:
-#   R2	= 0 for no timeout, non-zero for timeout in (approximated) seconds
-#
-# Returns:
-#   R2	= 0 on interrupt, 2 on timeout
-#   R3	= external interruption parameter if R2=0
-#
-
-_sclp_wait_int:
-	stm	%r6,%r15,24(%r15)		# save registers
-	basr	%r13,0				# get base register
-.LbaseS1:
-	ahi	%r15,-96			# create stack frame
-	la	%r8,LC_EXT_NEW_PSW		# register int handler
-	la	%r9,.LextpswS1-.LbaseS1(%r13)
-	tm	LC_AR_MODE_ID,1
-	jno	.Lesa1
-	la	%r8,LC_EXT_NEW_PSW_64		# register int handler 64 bit
-	la	%r9,.LextpswS1_64-.LbaseS1(%r13)
-.Lesa1:
-	mvc	.LoldpswS1-.LbaseS1(16,%r13),0(%r8)
-	mvc	0(16,%r8),0(%r9)
-	epsw	%r6,%r7				# set current addressing mode
-	nill	%r6,0x1				# in new psw (31 or 64 bit mode)
-	nilh	%r7,0x8000
-	stm	%r6,%r7,0(%r8)
-	lhi	%r6,0x0200			# cr mask for ext int (cr0.54)
-	ltr	%r2,%r2
-	jz	.LsetctS1
-	ahi	%r6,0x0800			# cr mask for clock int (cr0.52)
-	stck	.LtimeS1-.LbaseS1(%r13)		# initiate timeout
-	al	%r2,.LtimeS1-.LbaseS1(%r13)
-	st	%r2,.LtimeS1-.LbaseS1(%r13)
-	sckc	.LtimeS1-.LbaseS1(%r13)
-
-.LsetctS1:
-	stctl	%c0,%c0,.LctlS1-.LbaseS1(%r13)	# enable required interrupts
-	l	%r0,.LctlS1-.LbaseS1(%r13)
-	lhi	%r1,~(0x200 | 0x800)		# clear old values
-	nr	%r1,%r0
-	or	%r1,%r6				# set new value
-	st	%r1,.LctlS1-.LbaseS1(%r13)
-	lctl	%c0,%c0,.LctlS1-.LbaseS1(%r13)
-	st	%r0,.LctlS1-.LbaseS1(%r13)
-	lhi	%r2,2				# return code for timeout
-.LloopS1:
-	lpsw	.LwaitpswS1-.LbaseS1(%r13)	# wait until interrupt
-.LwaitS1:
-	lh	%r7,LC_EXT_INT_CODE
-	chi	%r7,EXT_IRQ_CLK_COMP		# timeout?
-	je	.LtimeoutS1
-	chi	%r7,EXT_IRQ_SERVICE_SIG		# service int?
-	jne	.LloopS1
-	sr	%r2,%r2
-	l	%r3,LC_EXT_INT_PARAM
-.LtimeoutS1:
-	lctl	%c0,%c0,.LctlS1-.LbaseS1(%r13)	# restore interrupt setting
-	# restore old handler
-	mvc	0(16,%r8),.LoldpswS1-.LbaseS1(%r13)
-	lm	%r6,%r15,120(%r15)		# restore registers
-	br	%r14				# return to caller
-
-	.align	8
-.LoldpswS1:
-	.long	0, 0, 0, 0			# old ext int PSW
-.LextpswS1:
-	.long	0x00080000, 0x80000000+.LwaitS1	# PSW to handle ext int
-.LextpswS1_64:
-	.quad	0, .LwaitS1			# PSW to handle ext int, 64 bit
-.LwaitpswS1:
-	.long	0x010a0000, 0x00000000+.LloopS1	# PSW to wait for ext int
-.LtimeS1:
-	.quad	0				# current time
-.LctlS1:
-	.long	0				# CT0 contents
-
-#
-# Subroutine to synchronously issue a service call.
-#
-# Parameters:
-#   R2	= command word
-#   R3	= sccb address
-#
-# Returns:
-#   R2	= 0 on success, 1 on failure
-#   R3	= sccb response code if R2 = 0
-#
-
-_sclp_servc:
-	stm	%r6,%r15,24(%r15)		# save registers
-	ahi	%r15,-96			# create stack frame
-	lr	%r6,%r2				# save command word
-	lr	%r7,%r3				# save sccb address
-.LretryS2:
-	lhi	%r2,1				# error return code
-	.insn	rre,0xb2200000,%r6,%r7		# servc
-	brc	1,.LendS2			# exit if not operational
-	brc	8,.LnotbusyS2			# go on if not busy
-	sr	%r2,%r2				# wait until no longer busy
-	bras	%r14,_sclp_wait_int
-	j	.LretryS2			# retry
-.LnotbusyS2:
-	sr	%r2,%r2				# wait until result
-	bras	%r14,_sclp_wait_int
-	sr	%r2,%r2
-	lh	%r3,6(%r7)
-.LendS2:
-	lm	%r6,%r15,120(%r15)		# restore registers
-	br	%r14
-
-#
-# Subroutine to set up the SCLP interface.
-#
-# Parameters:
-#   R2	= 0 to activate, non-zero to deactivate
-#
-# Returns:
-#   R2	= 0 on success, non-zero on failure
-#
-
-_sclp_setup:
-	stm	%r6,%r15,24(%r15)		# save registers
-	ahi	%r15,-96			# create stack frame
-	basr	%r13,0				# get base register
-.LbaseS3:
-	l	%r6,.LsccbS0-.LbaseS3(%r13)	# prepare init mask sccb
-	mvc	0(.LinitendS3-.LinitsccbS3,%r6),.LinitsccbS3-.LbaseS3(%r13)
-	ltr	%r2,%r2				# initialization?
-	jz	.LdoinitS3			# go ahead
-	# clear masks
-	xc	.LinitmaskS3-.LinitsccbS3(8,%r6),.LinitmaskS3-.LinitsccbS3(%r6)
-.LdoinitS3:
-	l	%r2,.LwritemaskS3-.LbaseS3(%r13)# get command word
-	lr	%r3,%r6				# get sccb address
-	bras	%r14,_sclp_servc		# issue service call
-	ltr	%r2,%r2				# servc successful?
-	jnz	.LerrorS3
-	chi	%r3,0x20			# write mask successful?
-	jne	.LerrorS3
-	# check masks
-	la	%r2,.LinitmaskS3-.LinitsccbS3(%r6)
-	l	%r1,0(%r2)			# receive mask ok?
-	n	%r1,12(%r2)
-	cl	%r1,0(%r2)
-	jne	.LerrorS3
-	l	%r1,4(%r2)			# send mask ok?
-	n	%r1,8(%r2)
-	cl	%r1,4(%r2)
-	sr	%r2,%r2
-	je	.LendS3
-.LerrorS3:
-	lhi	%r2,1				# error return code
-.LendS3:
-	lm	%r6,%r15,120(%r15)		# restore registers
-	br	%r14
-.LwritemaskS3:
-	.long	0x00780005			# SCLP command for write mask
-.LinitsccbS3:
-	.word	.LinitendS3-.LinitsccbS3
-	.byte	0,0,0,0
-	.word	0
-	.word	0
-	.word	4
-.LinitmaskS3:
-	.long	0x80000000
-	.long	0x40000000
-	.long	0
-	.long	0
-.LinitendS3:
-
-#
-# Subroutine which prints a given text to the SCLP console.
-#
-# Parameters:
-#   R2	= address of nil-terminated ASCII text
-#
-# Returns:
-#   R2	= 0 on success, 1 on failure
-#
-
-_sclp_print:
-	stm	%r6,%r15,24(%r15)		# save registers
-	ahi	%r15,-96			# create stack frame
-	basr	%r13,0				# get base register
-.LbaseS4:
-	l	%r8,.LsccbS0-.LbaseS4(%r13)	# prepare write data sccb
-	mvc	0(.LmtoS4-.LwritesccbS4,%r8),.LwritesccbS4-.LbaseS4(%r13)
-	la	%r7,.LmtoS4-.LwritesccbS4(%r8)	# current mto addr
-	sr	%r0,%r0
-	l	%r10,.Lascebc-.LbaseS4(%r13)	# address of translation table
-.LinitmtoS4:
-	# initialize mto
-	mvc	0(.LmtoendS4-.LmtoS4,%r7),.LmtoS4-.LbaseS4(%r13)
-	lhi	%r6,.LmtoendS4-.LmtoS4		# current mto length
-.LloopS4:
-	ic	%r0,0(%r2)			# get character
-	ahi	%r2,1
-	ltr	%r0,%r0				# end of string?
-	jz	.LfinalizemtoS4
-	chi	%r0,0x0a			# end of line (NL)?
-	jz	.LfinalizemtoS4
-	stc	%r0,0(%r6,%r7)			# copy to mto
-	la	%r11,0(%r6,%r7)
-	tr	0(1,%r11),0(%r10)		# translate to EBCDIC
-	ahi	%r6,1
-	j	.LloopS4
-.LfinalizemtoS4:
-	sth	%r6,0(%r7)			# update mto length
-	lh	%r9,.LmdbS4-.LwritesccbS4(%r8)	# update mdb length
-	ar	%r9,%r6
-	sth	%r9,.LmdbS4-.LwritesccbS4(%r8)
-	lh	%r9,.LevbufS4-.LwritesccbS4(%r8)# update evbuf length
-	ar	%r9,%r6
-	sth	%r9,.LevbufS4-.LwritesccbS4(%r8)
-	lh	%r9,0(%r8)			# update sccb length
-	ar	%r9,%r6
-	sth	%r9,0(%r8)
-	ar	%r7,%r6				# update current mto address
-	ltr	%r0,%r0				# more characters?
-	jnz	.LinitmtoS4
-	l	%r2,.LwritedataS4-.LbaseS4(%r13)# write data
-	lr	%r3,%r8
-	bras	%r14,_sclp_servc
-	ltr	%r2,%r2				# servc successful?
-	jnz	.LendS4
-	chi	%r3,0x20			# write data successful?
-	je	.LendS4
-	lhi	%r2,1				# error return code
-.LendS4:
-	lm	%r6,%r15,120(%r15)		# restore registers
-	br	%r14
-
-#
-# Function which prints a given text to the SCLP console.
-#
-# Parameters:
-#   R2	= address of nil-terminated ASCII text
-#
-# Returns:
-#   R2	= 0 on success, 1 on failure
-#
-
-ENTRY(_sclp_print_early)
-	stm	%r6,%r15,24(%r15)		# save registers
-	ahi	%r15,-96			# create stack frame
-	tm	LC_AR_MODE_ID,1
-	jno	.Lesa2
-	ahi	%r15,-80
-	stmh	%r6,%r15,96(%r15)		# store upper register halves
-	basr	%r13,0
-	lmh	%r0,%r15,.Lzeroes-.(%r13)	# clear upper register halves
-.Lesa2:
-	lr	%r10,%r2			# save string pointer
-	lhi	%r2,0
-	bras	%r14,_sclp_setup		# enable console
-	ltr	%r2,%r2
-	jnz	.LendS5
-	lr	%r2,%r10
-	bras	%r14,_sclp_print		# print string
-	ltr	%r2,%r2
-	jnz	.LendS5
-	lhi	%r2,1
-	bras	%r14,_sclp_setup		# disable console
-.LendS5:
-	tm	LC_AR_MODE_ID,1
-	jno	.Lesa3
-	lgfr	%r2,%r2				# sign extend return value
-	lmh	%r6,%r15,96(%r15)		# restore upper register halves
-	ahi	%r15,80
-.Lesa3:
-	lm	%r6,%r15,120(%r15)		# restore registers
-	br	%r14
-.Lzeroes:
-	.fill	64,4,0
-
-.LwritedataS4:
-	.long	0x00760005			# SCLP command for write data
-.LwritesccbS4:
-	# sccb
-	.word	.LmtoS4-.LwritesccbS4
-	.byte	0
-	.byte	0,0,0
-	.word	0
-
-	# evbuf
-.LevbufS4:
-	.word	.LmtoS4-.LevbufS4
-	.byte	0x02
-	.byte	0
-	.word	0
-
-.LmdbS4:
-	# mdb
-	.word	.LmtoS4-.LmdbS4
-	.word	1
-	.long	0xd4c4c240
-	.long	1
-
-	# go
-.LgoS4:
-	.word	.LmtoS4-.LgoS4
-	.word	1
-	.long	0
-	.byte	0,0,0,0,0,0,0,0
-	.byte	0,0,0
-	.byte	0
-	.byte	0,0,0,0,0,0,0
-	.byte	0
-	.word	0
-	.byte	0,0,0,0,0,0,0,0,0,0
-	.byte	0,0,0,0,0,0,0,0
-	.byte	0,0,0,0,0,0,0,0
-
-.LmtoS4:
-	.word	.LmtoendS4-.LmtoS4
-	.word	4
-	.word	0x1000
-	.byte	0
-	.byte	0,0,0
-.LmtoendS4:
-
-	# Global constants
-.LsccbS0:
-	.long	_sclp_work_area
-.Lascebc:
-	.long	_ascebc
-
-.section .data,"aw",@progbits
-	.balign 4096
-_sclp_work_area:
-	.fill	4096
-.previous
diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c
new file mode 100644
index 0000000..fa0bdff
--- /dev/null
+++ b/arch/s390/kernel/sclp.c
@@ -0,0 +1,160 @@
+/*
+ *    Copyright IBM Corp. 2015
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+#include <linux/kernel.h>
+#include <asm/ebcdic.h>
+#include <asm/irq.h>
+#include <asm/lowcore.h>
+#include <asm/processor.h>
+#include <asm/sclp.h>
+
+static char _sclp_work_area[4096] __aligned(PAGE_SIZE);
+
+static void _sclp_wait_int(void)
+{
+	unsigned long cr0, cr0_new, psw_mask, addr;
+	psw_t psw_ext_save, psw_wait;
+
+	__ctl_store(cr0, 0, 0);
+	cr0_new = cr0 | 0x200;
+	__ctl_load(cr0_new, 0, 0);
+
+	psw_ext_save = S390_lowcore.external_new_psw;
+	psw_mask = __extract_psw() & (PSW_MASK_EA | PSW_MASK_BA);
+	S390_lowcore.external_new_psw.mask = psw_mask;
+	psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
+	S390_lowcore.ext_int_code = 0;
+
+	do {
+		asm volatile(
+			"	larl	%[addr],0f\n"
+			"	stg	%[addr],%[psw_wait_addr]\n"
+			"	stg	%[addr],%[psw_ext_addr]\n"
+			"	lpswe	%[psw_wait]\n"
+			"0:\n"
+			: [addr] "=&d" (addr),
+			  [psw_wait_addr] "=Q" (psw_wait.addr),
+			  [psw_ext_addr] "=Q" (S390_lowcore.external_new_psw.addr)
+			: [psw_wait] "Q" (psw_wait)
+			: "cc", "memory");
+	} while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
+
+	__ctl_load(cr0, 0, 0);
+	S390_lowcore.external_new_psw = psw_ext_save;
+}
+
+static int _sclp_servc(unsigned int cmd, char *sccb)
+{
+	unsigned int cc;
+
+	do {
+		asm volatile(
+			"	.insn	rre,0xb2200000,%1,%2\n"
+			"	ipm	%0\n"
+			: "=d" (cc) : "d" (cmd), "a" (sccb)
+			: "cc", "memory");
+		cc >>= 28;
+		if (cc == 3)
+			return -EINVAL;
+		_sclp_wait_int();
+	} while (cc != 0);
+	return (*(unsigned short *)(sccb + 6) == 0x20) ? 0 : -EIO;
+}
+
+static int _sclp_setup(int disable)
+{
+	static unsigned char init_sccb[] = {
+		0x00, 0x1c,
+		0x00, 0x00, 0x00, 0x00,	0x00, 0x00, 0x00, 0x00,
+		0x00, 0x04,
+		0x80, 0x00, 0x00, 0x00,	0x40, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00,	0x00, 0x00, 0x00, 0x00
+	};
+	unsigned int *masks;
+	int rc;
+
+	memcpy(_sclp_work_area, init_sccb, 28);
+	masks = (unsigned int *)(_sclp_work_area + 12);
+	if (disable)
+		memset(masks, 0, 16);
+	/* SCLP write mask */
+	rc = _sclp_servc(0x00780005, _sclp_work_area);
+	if (rc)
+		return rc;
+	if ((masks[0] & masks[3]) != masks[0] ||
+	    (masks[1] & masks[2]) != masks[1])
+		return -EIO;
+	return 0;
+}
+
+static int _sclp_print(const char *str)
+{
+	static unsigned char write_head[] = {
+		/* sccb header */
+		0x00, 0x52,					/* 0 */
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00,		/* 2 */
+		/* evbuf */
+		0x00, 0x4a,					/* 8 */
+		0x02, 0x00, 0x00, 0x00,				/* 10 */
+		/* mdb */
+		0x00, 0x44,					/* 14 */
+		0x00, 0x01,					/* 16 */
+		0xd4, 0xc4, 0xc2, 0x40,				/* 18 */
+		0x00, 0x00, 0x00, 0x01,				/* 22 */
+		/* go */
+		0x00, 0x38,					/* 26 */
+		0x00, 0x01,					/* 28 */
+		0x00, 0x00, 0x00, 0x00,				/* 30 */
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,	/* 34 */
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,	/* 42 */
+		0x00, 0x00, 0x00, 0x00,				/* 50 */
+		0x00, 0x00,					/* 54 */
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,	/* 56 */
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,	/* 64 */
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,	/* 72 */
+		0x00, 0x00,					/* 80 */
+	};
+	static unsigned char write_mto[] = {
+		/* mto	*/
+		0x00, 0x0a,					/* 0 */
+		0x00, 0x04,					/* 2 */
+		0x10, 0x00,					/* 4 */
+		0x00, 0x00, 0x00, 0x00				/* 6 */
+	};
+	unsigned char *ptr, ch;
+	unsigned int count;
+
+	memcpy(_sclp_work_area, write_head, sizeof(write_head));
+	ptr = _sclp_work_area + sizeof(write_head);
+	do {
+		memcpy(ptr, write_mto, sizeof(write_mto));
+		for (count = sizeof(write_mto); (ch = *str++) != 0; count++) {
+			if (ch == 0x0a)
+				break;
+			ptr[count] = _ascebc[ch];
+		}
+		/* Update length fields in mto, mdb, evbuf and sccb */
+		*(unsigned short *) ptr = count;
+		*(unsigned short *)(_sclp_work_area + 14) += count;
+		*(unsigned short *)(_sclp_work_area + 8) += count;
+		*(unsigned short *)(_sclp_work_area + 0) += count;
+		ptr += count;
+	} while (ch != 0);
+
+	/* SCLP write data */
+	return _sclp_servc(0x00760005, _sclp_work_area);
+}
+
+int _sclp_print_early(const char *str)
+{
+	int rc;
+
+	rc = _sclp_setup(0);
+	if (rc)
+		return rc;
+	rc = _sclp_print(str);
+	if (rc)
+		return rc;
+	return _sclp_setup(1);
+}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index ca070d2..ce0cbd6 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -62,6 +62,7 @@
 #include <asm/os_info.h>
 #include <asm/sclp.h>
 #include <asm/sysinfo.h>
+#include <asm/numa.h>
 #include "entry.h"
 
 /*
@@ -76,7 +77,7 @@
 unsigned int console_irq = -1;
 EXPORT_SYMBOL(console_irq);
 
-unsigned long elf_hwcap = 0;
+unsigned long elf_hwcap __read_mostly = 0;
 char elf_platform[ELF_PLATFORM_SIZE];
 
 int __initdata memory_end_set;
@@ -688,7 +689,7 @@
 /*
  * Setup hardware capabilities.
  */
-static void __init setup_hwcaps(void)
+static int __init setup_hwcaps(void)
 {
 	static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
 	struct cpuid cpu_id;
@@ -754,9 +755,11 @@
 		elf_hwcap |= HWCAP_S390_TE;
 
 	/*
-	 * Vector extension HWCAP_S390_VXRS is bit 11.
+	 * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
+	 * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
+	 * instead of facility bit 129.
 	 */
-	if (test_facility(129))
+	if (MACHINE_HAS_VX)
 		elf_hwcap |= HWCAP_S390_VXRS;
 	get_cpu_id(&cpu_id);
 	add_device_randomness(&cpu_id, sizeof(cpu_id));
@@ -793,7 +796,9 @@
 		strcpy(elf_platform, "z13");
 		break;
 	}
+	return 0;
 }
+arch_initcall(setup_hwcaps);
 
 /*
  * Add system information as device randomness
@@ -879,11 +884,7 @@
 	setup_lowcore();
 	smp_fill_possible_mask();
         cpu_init();
-
-	/*
-	 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
-	 */
-	setup_hwcaps();
+	numa_setup();
 
 	/*
 	 * Create kernel page tables and switch to virtual addressing.
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index c551f22..9549af1 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -105,32 +105,13 @@
 static void store_sigregs(void)
 {
 	save_access_regs(current->thread.acrs);
-	save_fp_ctl(&current->thread.fp_regs.fpc);
-	if (current->thread.vxrs) {
-		int i;
-
-		save_vx_regs(current->thread.vxrs);
-		for (i = 0; i < __NUM_FPRS; i++)
-			current->thread.fp_regs.fprs[i] =
-				*(freg_t *)(current->thread.vxrs + i);
-	} else
-		save_fp_regs(current->thread.fp_regs.fprs);
+	save_fpu_regs();
 }
 
 /* Load registers after signal return */
 static void load_sigregs(void)
 {
 	restore_access_regs(current->thread.acrs);
-	/* restore_fp_ctl is done in restore_sigregs */
-	if (current->thread.vxrs) {
-		int i;
-
-		for (i = 0; i < __NUM_FPRS; i++)
-			*(freg_t *)(current->thread.vxrs + i) =
-				current->thread.fp_regs.fprs[i];
-		restore_vx_regs(current->thread.vxrs);
-	} else
-		restore_fp_regs(current->thread.fp_regs.fprs);
 }
 
 /* Returns non-zero on fault. */
@@ -146,8 +127,7 @@
 	memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
 	memcpy(&user_sregs.regs.acrs, current->thread.acrs,
 	       sizeof(user_sregs.regs.acrs));
-	memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
-	       sizeof(user_sregs.fpregs));
+	fpregs_store(&user_sregs.fpregs, &current->thread.fpu);
 	if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
 		return -EFAULT;
 	return 0;
@@ -166,8 +146,8 @@
 	if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI))
 		return -EINVAL;
 
-	/* Loading the floating-point-control word can fail. Do that first. */
-	if (restore_fp_ctl(&user_sregs.fpregs.fpc))
+	/* Test the floating-point-control word. */
+	if (test_fp_ctl(user_sregs.fpregs.fpc))
 		return -EINVAL;
 
 	/* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
@@ -185,8 +165,7 @@
 	memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
 	       sizeof(current->thread.acrs));
 
-	memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
-	       sizeof(current->thread.fp_regs));
+	fpregs_load(&user_sregs.fpregs, &current->thread.fpu);
 
 	clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
 	return 0;
@@ -200,13 +179,13 @@
 	int i;
 
 	/* Save vector registers to signal stack */
-	if (current->thread.vxrs) {
+	if (is_vx_task(current)) {
 		for (i = 0; i < __NUM_VXRS_LOW; i++)
-			vxrs[i] = *((__u64 *)(current->thread.vxrs + i) + 1);
+			vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1);
 		if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
 				   sizeof(sregs_ext->vxrs_low)) ||
 		    __copy_to_user(&sregs_ext->vxrs_high,
-				   current->thread.vxrs + __NUM_VXRS_LOW,
+				   current->thread.fpu.vxrs + __NUM_VXRS_LOW,
 				   sizeof(sregs_ext->vxrs_high)))
 			return -EFAULT;
 	}
@@ -220,15 +199,15 @@
 	int i;
 
 	/* Restore vector registers from signal stack */
-	if (current->thread.vxrs) {
+	if (is_vx_task(current)) {
 		if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
 				     sizeof(sregs_ext->vxrs_low)) ||
-		    __copy_from_user(current->thread.vxrs + __NUM_VXRS_LOW,
+		    __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
 				     &sregs_ext->vxrs_high,
 				     sizeof(sregs_ext->vxrs_high)))
 			return -EFAULT;
 		for (i = 0; i < __NUM_VXRS_LOW; i++)
-			*((__u64 *)(current->thread.vxrs + i) + 1) = vxrs[i];
+			*((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i];
 	}
 	return 0;
 }
@@ -243,6 +222,7 @@
 	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
 		goto badframe;
 	set_current_blocked(&set);
+	save_fpu_regs();
 	if (restore_sigregs(regs, &frame->sregs))
 		goto badframe;
 	if (restore_sigregs_ext(regs, &frame->sregs_ext))
@@ -266,6 +246,7 @@
 	set_current_blocked(&set);
 	if (restore_altstack(&frame->uc.uc_stack))
 		goto badframe;
+	save_fpu_regs();
 	if (restore_sigregs(regs, &frame->uc.uc_mcontext))
 		goto badframe;
 	if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext))
@@ -400,7 +381,7 @@
 	uc_flags = 0;
 	if (MACHINE_HAS_VX) {
 		frame_size += sizeof(_sigregs_ext);
-		if (current->thread.vxrs)
+		if (is_vx_task(current))
 			uc_flags |= UC_VXRS;
 	}
 	frame = get_sigframe(&ksig->ka, regs, frame_size);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 6f54c17..c6355e6 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -532,8 +532,8 @@
 
 #ifdef CONFIG_CRASH_DUMP
 
-static void __smp_store_cpu_state(struct save_area_ext *sa_ext, u16 address,
-				  int is_boot_cpu)
+static void __init __smp_store_cpu_state(struct save_area_ext *sa_ext,
+					 u16 address, int is_boot_cpu)
 {
 	void *lc = (void *)(unsigned long) store_prefix();
 	unsigned long vx_sa;
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 1acad02..f3f4a13 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -276,9 +276,9 @@
 SYSCALL(sys_statfs64,compat_sys_statfs64)
 SYSCALL(sys_fstatfs64,compat_sys_fstatfs64)
 SYSCALL(sys_remap_file_pages,compat_sys_remap_file_pages)
-NI_SYSCALL						/* 268 sys_mbind */
-NI_SYSCALL						/* 269 sys_get_mempolicy */
-NI_SYSCALL						/* 270 sys_set_mempolicy */
+SYSCALL(sys_mbind,compat_sys_mbind)
+SYSCALL(sys_get_mempolicy,compat_sys_get_mempolicy)
+SYSCALL(sys_set_mempolicy,compat_sys_set_mempolicy)
 SYSCALL(sys_mq_open,compat_sys_mq_open)
 SYSCALL(sys_mq_unlink,compat_sys_mq_unlink)
 SYSCALL(sys_mq_timedsend,compat_sys_mq_timedsend)
@@ -295,7 +295,7 @@
 SYSCALL(sys_inotify_init,sys_inotify_init)
 SYSCALL(sys_inotify_add_watch,compat_sys_inotify_add_watch)	/* 285 */
 SYSCALL(sys_inotify_rm_watch,compat_sys_inotify_rm_watch)
-NI_SYSCALL						/* 287 sys_migrate_pages */
+SYSCALL(sys_migrate_pages,compat_sys_migrate_pages)
 SYSCALL(sys_openat,compat_sys_openat)
 SYSCALL(sys_mkdirat,compat_sys_mkdirat)
 SYSCALL(sys_mknodat,compat_sys_mknodat)			/* 290 */
@@ -318,7 +318,7 @@
 SYSCALL(sys_sync_file_range,compat_sys_s390_sync_file_range)
 SYSCALL(sys_tee,compat_sys_tee)
 SYSCALL(sys_vmsplice,compat_sys_vmsplice)
-NI_SYSCALL						/* 310 sys_move_pages */
+SYSCALL(sys_move_pages,compat_sys_move_pages)
 SYSCALL(sys_getcpu,compat_sys_getcpu)
 SYSCALL(sys_epoll_pwait,compat_sys_epoll_pwait)
 SYSCALL(sys_utimes,compat_sys_utimes)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 9e733d9..017c3a9 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -58,6 +58,9 @@
 
 static DEFINE_PER_CPU(struct clock_event_device, comparators);
 
+ATOMIC_NOTIFIER_HEAD(s390_epoch_delta_notifier);
+EXPORT_SYMBOL(s390_epoch_delta_notifier);
+
 /*
  * Scheduler clock - returns current time in nanosec units.
  */
@@ -117,11 +120,6 @@
 	return 0;
 }
 
-static void s390_set_mode(enum clock_event_mode mode,
-			  struct clock_event_device *evt)
-{
-}
-
 /*
  * Set up lowcore and control register of the current cpu to
  * enable TOD clock and clock comparator interrupts.
@@ -145,7 +143,6 @@
 	cd->rating		= 400;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= s390_next_event;
-	cd->set_mode		= s390_set_mode;
 
 	clockevents_register_device(cd);
 
@@ -381,7 +378,7 @@
 	 * increase the "sequence" counter to avoid the race of an
 	 * etr event and the complete recovery against get_sync_clock.
 	 */
-	atomic_clear_mask(0x80000000, sw_ptr);
+	atomic_andnot(0x80000000, sw_ptr);
 	atomic_inc(sw_ptr);
 }
 
@@ -392,7 +389,7 @@
 static void enable_sync_clock(void)
 {
 	atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
-	atomic_set_mask(0x80000000, sw_ptr);
+	atomic_or(0x80000000, sw_ptr);
 }
 
 /*
@@ -752,7 +749,7 @@
 static int etr_sync_clock(void *data)
 {
 	static int first;
-	unsigned long long clock, old_clock, delay, delta;
+	unsigned long long clock, old_clock, clock_delta, delay, delta;
 	struct clock_sync_data *etr_sync;
 	struct etr_aib *sync_port, *aib;
 	int port;
@@ -789,6 +786,9 @@
 		delay = (unsigned long long)
 			(aib->edf2.etv - sync_port->edf2.etv) << 32;
 		delta = adjust_time(old_clock, clock, delay);
+		clock_delta = clock - old_clock;
+		atomic_notifier_call_chain(&s390_epoch_delta_notifier, 0,
+					   &clock_delta);
 		etr_sync->fixup_cc = delta;
 		fixup_clock_comparator(delta);
 		/* Verify that the clock is properly set. */
@@ -1526,7 +1526,7 @@
 static int stp_sync_clock(void *data)
 {
 	static int first;
-	unsigned long long old_clock, delta;
+	unsigned long long old_clock, delta, new_clock, clock_delta;
 	struct clock_sync_data *stp_sync;
 	int rc;
 
@@ -1551,7 +1551,11 @@
 		old_clock = get_tod_clock();
 		rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
 		if (rc == 0) {
-			delta = adjust_time(old_clock, get_tod_clock(), 0);
+			new_clock = get_tod_clock();
+			delta = adjust_time(old_clock, new_clock, 0);
+			clock_delta = new_clock - old_clock;
+			atomic_notifier_call_chain(&s390_epoch_delta_notifier,
+						   0, &clock_delta);
 			fixup_clock_comparator(delta);
 			rc = chsc_sstpi(stp_page, &stp_info,
 					sizeof(struct stp_sstpi));
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 5728c5b..bf05e7f 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -18,7 +18,10 @@
 #include <linux/cpu.h>
 #include <linux/smp.h>
 #include <linux/mm.h>
+#include <linux/nodemask.h>
+#include <linux/node.h>
 #include <asm/sysinfo.h>
+#include <asm/numa.h>
 
 #define PTF_HORIZONTAL	(0UL)
 #define PTF_VERTICAL	(1UL)
@@ -37,8 +40,10 @@
 static int topology_enabled = 1;
 static DECLARE_WORK(topology_work, topology_work_fn);
 
-/* topology_lock protects the socket and book linked lists */
-static DEFINE_SPINLOCK(topology_lock);
+/*
+ * Socket/Book linked lists and per_cpu(cpu_topology) updates are
+ * protected by "sched_domains_mutex".
+ */
 static struct mask_info socket_info;
 static struct mask_info book_info;
 
@@ -188,7 +193,6 @@
 {
 	struct cpuid cpu_id;
 
-	spin_lock_irq(&topology_lock);
 	get_cpu_id(&cpu_id);
 	clear_masks();
 	switch (cpu_id.machine) {
@@ -199,7 +203,6 @@
 	default:
 		__tl_to_masks_generic(info);
 	}
-	spin_unlock_irq(&topology_lock);
 }
 
 static void topology_update_polarization_simple(void)
@@ -244,10 +247,8 @@
 
 static void update_cpu_masks(void)
 {
-	unsigned long flags;
 	int cpu;
 
-	spin_lock_irqsave(&topology_lock, flags);
 	for_each_possible_cpu(cpu) {
 		per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
 		per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
@@ -259,7 +260,7 @@
 			per_cpu(cpu_topology, cpu).book_id = cpu;
 		}
 	}
-	spin_unlock_irqrestore(&topology_lock, flags);
+	numa_update_cpu_topology();
 }
 
 void store_topology(struct sysinfo_15_1_x *info)
@@ -274,21 +275,21 @@
 {
 	struct sysinfo_15_1_x *info = tl_info;
 	struct device *dev;
-	int cpu;
+	int cpu, rc = 0;
 
-	if (!MACHINE_HAS_TOPOLOGY) {
-		update_cpu_masks();
-		topology_update_polarization_simple();
-		return 0;
+	if (MACHINE_HAS_TOPOLOGY) {
+		rc = 1;
+		store_topology(info);
+		tl_to_masks(info);
 	}
-	store_topology(info);
-	tl_to_masks(info);
 	update_cpu_masks();
+	if (!MACHINE_HAS_TOPOLOGY)
+		topology_update_polarization_simple();
 	for_each_online_cpu(cpu) {
 		dev = get_cpu_device(cpu);
 		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
 	}
-	return 1;
+	return rc;
 }
 
 static void topology_work_fn(struct work_struct *work)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 7bea81d..9861613 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -19,7 +19,7 @@
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
-#include <asm/switch_to.h>
+#include <asm/fpu-internal.h>
 #include "entry.h"
 
 int show_unhandled_signals = 1;
@@ -151,7 +151,7 @@
 DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
 	      "transaction constraint exception")
 
-static inline void do_fp_trap(struct pt_regs *regs, int fpc)
+static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc)
 {
 	int si_code = 0;
 	/* FPC[2] is Data Exception Code */
@@ -227,7 +227,7 @@
 int alloc_vector_registers(struct task_struct *tsk)
 {
 	__vector128 *vxrs;
-	int i;
+	freg_t *fprs;
 
 	/* Allocate vector register save area. */
 	vxrs = kzalloc(sizeof(__vector128) * __NUM_VXRS,
@@ -236,15 +236,13 @@
 		return -ENOMEM;
 	preempt_disable();
 	if (tsk == current)
-		save_fp_regs(tsk->thread.fp_regs.fprs);
+		save_fpu_regs();
 	/* Copy the 16 floating point registers */
-	for (i = 0; i < 16; i++)
-		*(freg_t *) &vxrs[i] = tsk->thread.fp_regs.fprs[i];
-	tsk->thread.vxrs = vxrs;
-	if (tsk == current) {
-		__ctl_set_bit(0, 17);
-		restore_vx_regs(vxrs);
-	}
+	convert_fp_to_vx(vxrs, tsk->thread.fpu.fprs);
+	fprs = tsk->thread.fpu.fprs;
+	tsk->thread.fpu.vxrs = vxrs;
+	tsk->thread.fpu.flags |= FPU_USE_VX;
+	kfree(fprs);
 	preempt_enable();
 	return 0;
 }
@@ -259,8 +257,8 @@
 	}
 
 	/* get vector interrupt code from fpc */
-	asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
-	vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
+	save_fpu_regs();
+	vic = (current->thread.fpu.fpc & 0xf00) >> 8;
 	switch (vic) {
 	case 1: /* invalid vector operation */
 		si_code = FPE_FLTINV;
@@ -297,22 +295,22 @@
 
 	location = get_trap_ip(regs);
 
-	asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
+	save_fpu_regs();
 	/* Check for vector register enablement */
-	if (MACHINE_HAS_VX && !current->thread.vxrs &&
-	    (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
+	if (MACHINE_HAS_VX && !is_vx_task(current) &&
+	    (current->thread.fpu.fpc & FPC_DXC_MASK) == 0xfe00) {
 		alloc_vector_registers(current);
 		/* Vector data exception is suppressing, rewind psw. */
 		regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
 		clear_pt_regs_flag(regs, PIF_PER_TRAP);
 		return;
 	}
-	if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
+	if (current->thread.fpu.fpc & FPC_DXC_MASK)
 		signal = SIGFPE;
 	else
 		signal = SIGILL;
 	if (signal == SIGFPE)
-		do_fp_trap(regs, current->thread.fp_regs.fpc);
+		do_fp_trap(regs, current->thread.fpu.fpc);
 	else if (signal)
 		do_trap(regs, signal, ILL_ILLOPN, "data exception");
 }
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index 8ad2b34..ee8a18e 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -13,7 +13,7 @@
 KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS))
 KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin
 KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \
-			$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+			$(call cc-ldoption, -Wl$(comma)--hash-style=both)
 
 $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31)
 $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31)
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index 2a8ddfd..c4b03f9 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -13,7 +13,7 @@
 KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
 KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
 KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
-			$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+			$(call cc-ldoption, -Wl$(comma)--hash-style=both)
 
 $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
 $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e53d359..b9ce650 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -28,6 +28,7 @@
 static DEFINE_PER_CPU(u64, mt_cycles[32]);
 static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
+static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
 
 static inline u64 get_vtimer(void)
 {
@@ -85,7 +86,8 @@
 	S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
 
 	/* Do MT utilization calculation */
-	if (smp_cpu_mtid) {
+	if (smp_cpu_mtid &&
+	    time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) {
 		u64 cycles_new[32], *cycles_old;
 		u64 delta, mult, div;
 
@@ -105,6 +107,7 @@
 				       sizeof(u64) * (smp_cpu_mtid + 1));
 			}
 		}
+		__this_cpu_write(mt_scaling_jiffies, jiffies_64);
 	}
 
 	user = S390_lowcore.user_timer - ti->user_timer;
@@ -376,4 +379,11 @@
 {
 	/* set initial cpu timer */
 	set_vtimer(VTIMER_MAX_SLICE);
+	/* Setup initial MT scaling values */
+	if (smp_cpu_mtid) {
+		__this_cpu_write(mt_scaling_jiffies, jiffies);
+		__this_cpu_write(mt_scaling_mult, 1);
+		__this_cpu_write(mt_scaling_div, 1);
+		stcctm5(smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles));
+	}
 }
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index fc7ec95..5fbfb88 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -27,13 +27,13 @@
 
 	start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
 	end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + 4096;
+	vcpu->stat.diagnose_10++;
 
 	if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
 	    || start < 2 * PAGE_SIZE)
 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
 	VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end);
-	vcpu->stat.diagnose_10++;
 
 	/*
 	 * We checked for start >= end above, so lets check for the
@@ -75,6 +75,9 @@
 	u16 rx = (vcpu->arch.sie_block->ipa & 0xf0) >> 4;
 	u16 ry = (vcpu->arch.sie_block->ipa & 0x0f);
 
+	VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
+		   vcpu->run->s.regs.gprs[rx]);
+	vcpu->stat.diagnose_258++;
 	if (vcpu->run->s.regs.gprs[rx] & 7)
 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 	rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
@@ -85,6 +88,9 @@
 
 	switch (parm.subcode) {
 	case 0: /* TOKEN */
+		VCPU_EVENT(vcpu, 3, "pageref token addr 0x%llx "
+			   "select mask 0x%llx compare mask 0x%llx",
+			   parm.token_addr, parm.select_mask, parm.compare_mask);
 		if (vcpu->arch.pfault_token != KVM_S390_PFAULT_TOKEN_INVALID) {
 			/*
 			 * If the pagefault handshake is already activated,
@@ -114,6 +120,7 @@
 		 * the cancel, therefore to reduce code complexity, we assume
 		 * all outstanding tokens are already pending.
 		 */
+		VCPU_EVENT(vcpu, 3, "pageref cancel addr 0x%llx", parm.token_addr);
 		if (parm.token_addr || parm.select_mask ||
 		    parm.compare_mask || parm.zarch)
 			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -174,7 +181,8 @@
 	unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
 	unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
 
-	VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
+	VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
+	vcpu->stat.diagnose_308++;
 	switch (subcode) {
 	case 3:
 		vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
@@ -202,6 +210,7 @@
 {
 	int ret;
 
+	vcpu->stat.diagnose_500++;
 	/* No virtio-ccw notification? Get out quickly. */
 	if (!vcpu->kvm->arch.css_support ||
 	    (vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c
index e97b345..47518a3 100644
--- a/arch/s390/kvm/guestdbg.c
+++ b/arch/s390/kvm/guestdbg.c
@@ -473,10 +473,45 @@
 		vcpu->arch.sie_block->iprcc &= ~PGM_PER;
 }
 
+#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
+#define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH)
+#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
+#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
+
 void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
 {
+	int new_as;
+
 	if (debug_exit_required(vcpu))
 		vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
 
 	filter_guest_per_event(vcpu);
+
+	/*
+	 * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
+	 * a space-switch event. PER events enforce space-switch events
+	 * for these instructions. So if no PER event for the guest is left,
+	 * we might have to filter the space-switch element out, too.
+	 */
+	if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) {
+		vcpu->arch.sie_block->iprcc = 0;
+		new_as = psw_bits(vcpu->arch.sie_block->gpsw).as;
+
+		/*
+		 * If the AS changed from / to home, we had RP, SAC or SACF
+		 * instruction. Check primary and home space-switch-event
+		 * controls. (theoretically home -> home produced no event)
+		 */
+		if (((new_as == PSW_AS_HOME) ^ old_as_is_home(vcpu)) &&
+		     (pssec(vcpu) || hssec(vcpu)))
+			vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
+
+		/*
+		 * PT, PTI, PR, PC instruction operate on primary AS only. Check
+		 * if the primary-space-switch-event control was or got set.
+		 */
+		if (new_as == PSW_AS_PRIMARY && !old_as_is_home(vcpu) &&
+		    (pssec(vcpu) || old_ssec(vcpu)))
+			vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
+	}
 }
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index c98d897..5c2c169 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -30,7 +30,6 @@
 #define IOINT_SCHID_MASK 0x0000ffff
 #define IOINT_SSID_MASK 0x00030000
 #define IOINT_CSSID_MASK 0x03fc0000
-#define IOINT_AI_MASK 0x04000000
 #define PFAULT_INIT 0x0600
 #define PFAULT_DONE 0x0680
 #define VIRTIO_PARAM 0x0d00
@@ -72,9 +71,13 @@
 
 static int ckc_irq_pending(struct kvm_vcpu *vcpu)
 {
+	preempt_disable();
 	if (!(vcpu->arch.sie_block->ckc <
-	      get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
+	      get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
+		preempt_enable();
 		return 0;
+	}
+	preempt_enable();
 	return ckc_interrupts_enabled(vcpu);
 }
 
@@ -170,20 +173,20 @@
 
 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
 {
-	atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
 	set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
 }
 
 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+	atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
 	clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
 }
 
 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
-			  &vcpu->arch.sie_block->cpuflags);
+	atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
+		    &vcpu->arch.sie_block->cpuflags);
 	vcpu->arch.sie_block->lctl = 0x0000;
 	vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
 
@@ -196,7 +199,7 @@
 
 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
 {
-	atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
 }
 
 static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
@@ -311,8 +314,8 @@
 	li->irq.ext.ext_params2 = 0;
 	spin_unlock(&li->lock);
 
-	VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
-		   0, ext.ext_params2);
+	VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
+		   ext.ext_params2);
 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
 					 KVM_S390_INT_PFAULT_INIT,
 					 0, ext.ext_params2);
@@ -368,7 +371,7 @@
 	spin_unlock(&fi->lock);
 
 	if (deliver) {
-		VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
+		VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
 			   mchk.mcic);
 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
 						 KVM_S390_MCHK,
@@ -403,7 +406,7 @@
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 	int rc;
 
-	VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
+	VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
 	vcpu->stat.deliver_restart_signal++;
 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
 
@@ -427,7 +430,6 @@
 	clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
 	spin_unlock(&li->lock);
 
-	VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
 	vcpu->stat.deliver_prefix_signal++;
 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
 					 KVM_S390_SIGP_SET_PREFIX,
@@ -450,7 +452,7 @@
 		clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
 	spin_unlock(&li->lock);
 
-	VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
+	VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
 	vcpu->stat.deliver_emergency_signal++;
 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
 					 cpu_addr, 0);
@@ -477,7 +479,7 @@
 	clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
 	spin_unlock(&li->lock);
 
-	VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
+	VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
 	vcpu->stat.deliver_external_call++;
 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
 					 KVM_S390_INT_EXTERNAL_CALL,
@@ -506,7 +508,7 @@
 	memset(&li->irq.pgm, 0, sizeof(pgm_info));
 	spin_unlock(&li->lock);
 
-	VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
+	VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d",
 		   pgm_info.code, ilc);
 	vcpu->stat.deliver_program_int++;
 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
@@ -622,7 +624,7 @@
 	clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
 	spin_unlock(&fi->lock);
 
-	VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
+	VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
 		   ext.ext_params);
 	vcpu->stat.deliver_service_signal++;
 	trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
@@ -651,9 +653,6 @@
 					struct kvm_s390_interrupt_info,
 					list);
 	if (inti) {
-		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
-				KVM_S390_INT_PFAULT_DONE, 0,
-				inti->ext.ext_params2);
 		list_del(&inti->list);
 		fi->counters[FIRQ_CNTR_PFAULT] -= 1;
 	}
@@ -662,6 +661,12 @@
 	spin_unlock(&fi->lock);
 
 	if (inti) {
+		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+						 KVM_S390_INT_PFAULT_DONE, 0,
+						 inti->ext.ext_params2);
+		VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
+			   inti->ext.ext_params2);
+
 		rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
 				(u16 *)__LC_EXT_INT_CODE);
 		rc |= put_guest_lc(vcpu, PFAULT_DONE,
@@ -691,7 +696,7 @@
 					list);
 	if (inti) {
 		VCPU_EVENT(vcpu, 4,
-			   "interrupt: virtio parm:%x,parm64:%llx",
+			   "deliver: virtio parm: 0x%x,parm64: 0x%llx",
 			   inti->ext.ext_params, inti->ext.ext_params2);
 		vcpu->stat.deliver_virtio_interrupt++;
 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
@@ -741,7 +746,7 @@
 					struct kvm_s390_interrupt_info,
 					list);
 	if (inti) {
-		VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
+		VCPU_EVENT(vcpu, 4, "deliver: I/O 0x%llx", inti->type);
 		vcpu->stat.deliver_io_int++;
 		trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
 				inti->type,
@@ -855,7 +860,9 @@
 		goto no_timer;
 	}
 
+	preempt_disable();
 	now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
+	preempt_enable();
 	sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
 
 	/* underflow */
@@ -864,7 +871,7 @@
 
 	__set_cpu_idle(vcpu);
 	hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
-	VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
+	VCPU_EVENT(vcpu, 4, "enabled wait via clock comparator: %llu ns", sltime);
 no_timer:
 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 	kvm_vcpu_block(vcpu);
@@ -894,7 +901,9 @@
 	u64 now, sltime;
 
 	vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
+	preempt_disable();
 	now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
+	preempt_enable();
 	sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
 
 	/*
@@ -919,7 +928,7 @@
 	spin_unlock(&li->lock);
 
 	/* clear pending external calls set by sigp interpretation facility */
-	atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
+	atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
 	vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
 }
 
@@ -968,6 +977,10 @@
 {
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 
+	VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
+	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
+				   irq->u.pgm.code, 0);
+
 	li->irq.pgm = irq->u.pgm;
 	set_bit(IRQ_PEND_PROG, &li->pending_irqs);
 	return 0;
@@ -978,9 +991,6 @@
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 	struct kvm_s390_irq irq;
 
-	VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
-	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
-				   0, 1);
 	spin_lock(&li->lock);
 	irq.u.pgm.code = code;
 	__inject_prog(vcpu, &irq);
@@ -996,10 +1006,6 @@
 	struct kvm_s390_irq irq;
 	int rc;
 
-	VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
-		   pgm_info->code);
-	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
-				   pgm_info->code, 0, 1);
 	spin_lock(&li->lock);
 	irq.u.pgm = *pgm_info;
 	rc = __inject_prog(vcpu, &irq);
@@ -1012,15 +1018,15 @@
 {
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 
-	VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx",
-		   irq->u.ext.ext_params, irq->u.ext.ext_params2);
+	VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
+		   irq->u.ext.ext_params2);
 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
 				   irq->u.ext.ext_params,
-				   irq->u.ext.ext_params2, 2);
+				   irq->u.ext.ext_params2);
 
 	li->irq.ext = irq->u.ext;
 	set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1035,7 +1041,7 @@
 		/* another external call is pending */
 		return -EBUSY;
 	}
-	atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
 	return 0;
 }
 
@@ -1045,10 +1051,10 @@
 	struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
 	uint16_t src_id = irq->u.extcall.code;
 
-	VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
+	VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
 		   src_id);
 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
-				   src_id, 0, 2);
+				   src_id, 0);
 
 	/* sending vcpu invalid */
 	if (src_id >= KVM_MAX_VCPUS ||
@@ -1061,7 +1067,7 @@
 	if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
 		return -EBUSY;
 	*extcall = irq->u.extcall;
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1070,10 +1076,10 @@
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 	struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
 
-	VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
+	VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
 		   irq->u.prefix.address);
 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
-				   irq->u.prefix.address, 0, 2);
+				   irq->u.prefix.address, 0);
 
 	if (!is_vcpu_stopped(vcpu))
 		return -EBUSY;
@@ -1090,7 +1096,7 @@
 	struct kvm_s390_stop_info *stop = &li->irq.stop;
 	int rc = 0;
 
-	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
+	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
 
 	if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
 		return -EINVAL;
@@ -1114,8 +1120,8 @@
 {
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 
-	VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type);
-	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2);
+	VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
+	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
 
 	set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
 	return 0;
@@ -1126,14 +1132,14 @@
 {
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 
-	VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
+	VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
 		   irq->u.emerg.code);
 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
-				   irq->u.emerg.code, 0, 2);
+				   irq->u.emerg.code, 0);
 
 	set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
 	set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1142,10 +1148,10 @@
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 	struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
 
-	VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
+	VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
 		   irq->u.mchk.mcic);
 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
-				   irq->u.mchk.mcic, 2);
+				   irq->u.mchk.mcic);
 
 	/*
 	 * Because repressible machine checks can be indicated along with
@@ -1172,12 +1178,12 @@
 {
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 
-	VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP);
+	VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
-				   0, 0, 2);
+				   0, 0);
 
 	set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1185,12 +1191,12 @@
 {
 	struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
 
-	VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER);
+	VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
-				   0, 0, 2);
+				   0, 0);
 
 	set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
-	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+	atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 	return 0;
 }
 
@@ -1369,13 +1375,13 @@
 	spin_lock(&li->lock);
 	switch (type) {
 	case KVM_S390_MCHK:
-		atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+		atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
 		break;
 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
-		atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
+		atomic_or(CPUSTAT_IO_INT, li->cpuflags);
 		break;
 	default:
-		atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+		atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
 		break;
 	}
 	spin_unlock(&li->lock);
@@ -1435,20 +1441,20 @@
 		inti->ext.ext_params2 = s390int->parm64;
 		break;
 	case KVM_S390_INT_SERVICE:
-		VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
+		VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
 		inti->ext.ext_params = s390int->parm;
 		break;
 	case KVM_S390_INT_PFAULT_DONE:
 		inti->ext.ext_params2 = s390int->parm64;
 		break;
 	case KVM_S390_MCHK:
-		VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
+		VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
 			 s390int->parm64);
 		inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
 		inti->mchk.mcic = s390int->parm64;
 		break;
 	case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
-		if (inti->type & IOINT_AI_MASK)
+		if (inti->type & KVM_S390_INT_IO_AI_MASK)
 			VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
 		else
 			VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
@@ -1535,8 +1541,6 @@
 
 	switch (irq->type) {
 	case KVM_S390_PROGRAM_INT:
-		VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
-			   irq->u.pgm.code);
 		rc = __inject_prog(vcpu, irq);
 		break;
 	case KVM_S390_SIGP_SET_PREFIX:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f32f843..c91eb94 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -28,6 +28,7 @@
 #include <linux/vmalloc.h>
 #include <asm/asm-offsets.h>
 #include <asm/lowcore.h>
+#include <asm/etr.h>
 #include <asm/pgtable.h>
 #include <asm/nmi.h>
 #include <asm/switch_to.h>
@@ -108,6 +109,9 @@
 	{ "diagnose_10", VCPU_STAT(diagnose_10) },
 	{ "diagnose_44", VCPU_STAT(diagnose_44) },
 	{ "diagnose_9c", VCPU_STAT(diagnose_9c) },
+	{ "diagnose_258", VCPU_STAT(diagnose_258) },
+	{ "diagnose_308", VCPU_STAT(diagnose_308) },
+	{ "diagnose_500", VCPU_STAT(diagnose_500) },
 	{ NULL }
 };
 
@@ -124,6 +128,7 @@
 }
 
 static struct gmap_notifier gmap_notifier;
+debug_info_t *kvm_s390_dbf;
 
 /* Section: not file related */
 int kvm_arch_hardware_enable(void)
@@ -134,24 +139,69 @@
 
 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
 
+/*
+ * This callback is executed during stop_machine(). All CPUs are therefore
+ * temporarily stopped. In order not to change guest behavior, we have to
+ * disable preemption whenever we touch the epoch of kvm and the VCPUs,
+ * so a CPU won't be stopped while calculating with the epoch.
+ */
+static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
+			  void *v)
+{
+	struct kvm *kvm;
+	struct kvm_vcpu *vcpu;
+	int i;
+	unsigned long long *delta = v;
+
+	list_for_each_entry(kvm, &vm_list, vm_list) {
+		kvm->arch.epoch -= *delta;
+		kvm_for_each_vcpu(i, vcpu, kvm) {
+			vcpu->arch.sie_block->epoch -= *delta;
+		}
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block kvm_clock_notifier = {
+	.notifier_call = kvm_clock_sync,
+};
+
 int kvm_arch_hardware_setup(void)
 {
 	gmap_notifier.notifier_call = kvm_gmap_notifier;
 	gmap_register_ipte_notifier(&gmap_notifier);
+	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
+				       &kvm_clock_notifier);
 	return 0;
 }
 
 void kvm_arch_hardware_unsetup(void)
 {
 	gmap_unregister_ipte_notifier(&gmap_notifier);
+	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
+					 &kvm_clock_notifier);
 }
 
 int kvm_arch_init(void *opaque)
 {
+	kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
+	if (!kvm_s390_dbf)
+		return -ENOMEM;
+
+	if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
+		debug_unregister(kvm_s390_dbf);
+		return -ENOMEM;
+	}
+
 	/* Register floating interrupt controller interface. */
 	return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
 }
 
+void kvm_arch_exit(void)
+{
+	debug_unregister(kvm_s390_dbf);
+}
+
 /* Section: device related */
 long kvm_arch_dev_ioctl(struct file *filp,
 			unsigned int ioctl, unsigned long arg)
@@ -281,10 +331,12 @@
 
 	switch (cap->cap) {
 	case KVM_CAP_S390_IRQCHIP:
+		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
 		kvm->arch.use_irqchip = 1;
 		r = 0;
 		break;
 	case KVM_CAP_S390_USER_SIGP:
+		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
 		kvm->arch.user_sigp = 1;
 		r = 0;
 		break;
@@ -295,8 +347,11 @@
 			r = 0;
 		} else
 			r = -EINVAL;
+		VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
+			 r ? "(not available)" : "(success)");
 		break;
 	case KVM_CAP_S390_USER_STSI:
+		VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
 		kvm->arch.user_stsi = 1;
 		r = 0;
 		break;
@@ -314,6 +369,8 @@
 	switch (attr->attr) {
 	case KVM_S390_VM_MEM_LIMIT_SIZE:
 		ret = 0;
+		VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
+			 kvm->arch.gmap->asce_end);
 		if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
 			ret = -EFAULT;
 		break;
@@ -330,7 +387,13 @@
 	unsigned int idx;
 	switch (attr->attr) {
 	case KVM_S390_VM_MEM_ENABLE_CMMA:
+		/* enable CMMA only for z10 and later (EDAT_1) */
+		ret = -EINVAL;
+		if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
+			break;
+
 		ret = -EBUSY;
+		VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
 		mutex_lock(&kvm->lock);
 		if (atomic_read(&kvm->online_vcpus) == 0) {
 			kvm->arch.use_cmma = 1;
@@ -339,6 +402,11 @@
 		mutex_unlock(&kvm->lock);
 		break;
 	case KVM_S390_VM_MEM_CLR_CMMA:
+		ret = -EINVAL;
+		if (!kvm->arch.use_cmma)
+			break;
+
+		VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
 		mutex_lock(&kvm->lock);
 		idx = srcu_read_lock(&kvm->srcu);
 		s390_reset_cmma(kvm->arch.gmap->mm);
@@ -374,6 +442,7 @@
 			}
 		}
 		mutex_unlock(&kvm->lock);
+		VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
 		break;
 	}
 	default:
@@ -400,22 +469,26 @@
 			kvm->arch.crypto.crycb->aes_wrapping_key_mask,
 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
 		kvm->arch.crypto.aes_kw = 1;
+		VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
 		break;
 	case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
 		get_random_bytes(
 			kvm->arch.crypto.crycb->dea_wrapping_key_mask,
 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
 		kvm->arch.crypto.dea_kw = 1;
+		VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
 		break;
 	case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
 		kvm->arch.crypto.aes_kw = 0;
 		memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
 			sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
+		VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
 		break;
 	case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
 		kvm->arch.crypto.dea_kw = 0;
 		memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
 			sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
+		VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
 		break;
 	default:
 		mutex_unlock(&kvm->lock);
@@ -440,6 +513,7 @@
 
 	if (gtod_high != 0)
 		return -EINVAL;
+	VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
 
 	return 0;
 }
@@ -459,12 +533,15 @@
 		return r;
 
 	mutex_lock(&kvm->lock);
+	preempt_disable();
 	kvm->arch.epoch = gtod - host_tod;
 	kvm_s390_vcpu_block_all(kvm);
 	kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
 		cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
 	kvm_s390_vcpu_unblock_all(kvm);
+	preempt_enable();
 	mutex_unlock(&kvm->lock);
+	VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
 	return 0;
 }
 
@@ -496,6 +573,7 @@
 	if (copy_to_user((void __user *)attr->addr, &gtod_high,
 					 sizeof(gtod_high)))
 		return -EFAULT;
+	VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
 
 	return 0;
 }
@@ -509,9 +587,12 @@
 	if (r)
 		return r;
 
+	preempt_disable();
 	gtod = host_tod + kvm->arch.epoch;
+	preempt_enable();
 	if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
 		return -EFAULT;
+	VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
 
 	return 0;
 }
@@ -821,7 +902,9 @@
 	}
 
 	/* Enable storage key handling for the guest */
-	s390_enable_skey();
+	r = s390_enable_skey();
+	if (r)
+		goto out;
 
 	for (i = 0; i < args->count; i++) {
 		hva = gfn_to_hva(kvm, args->start_gfn + i);
@@ -879,8 +962,7 @@
 		if (kvm->arch.use_irqchip) {
 			/* Set up dummy routing. */
 			memset(&routing, 0, sizeof(routing));
-			kvm_set_irq_routing(kvm, &routing, 0, 0);
-			r = 0;
+			r = kvm_set_irq_routing(kvm, &routing, 0, 0);
 		}
 		break;
 	}
@@ -1043,7 +1125,7 @@
 
 	sprintf(debug_name, "kvm-%u", current->pid);
 
-	kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
+	kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
 	if (!kvm->arch.dbf)
 		goto out_err;
 
@@ -1086,7 +1168,7 @@
 	mutex_init(&kvm->arch.ipte_mutex);
 
 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
-	VM_EVENT(kvm, 3, "%s", "vm created");
+	VM_EVENT(kvm, 3, "vm created with type %lu", type);
 
 	if (type & KVM_VM_S390_UCONTROL) {
 		kvm->arch.gmap = NULL;
@@ -1103,6 +1185,7 @@
 	kvm->arch.epoch = 0;
 
 	spin_lock_init(&kvm->arch.start_stop_lock);
+	KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
 
 	return 0;
 out_err:
@@ -1110,6 +1193,7 @@
 	free_page((unsigned long)kvm->arch.model.fac);
 	debug_unregister(kvm->arch.dbf);
 	free_page((unsigned long)(kvm->arch.sca));
+	KVM_EVENT(3, "creation of vm failed: %d", rc);
 	return rc;
 }
 
@@ -1131,7 +1215,7 @@
 	if (kvm_is_ucontrol(vcpu->kvm))
 		gmap_free(vcpu->arch.gmap);
 
-	if (kvm_s390_cmma_enabled(vcpu->kvm))
+	if (vcpu->kvm->arch.use_cmma)
 		kvm_s390_vcpu_unsetup_cmma(vcpu);
 	free_page((unsigned long)(vcpu->arch.sie_block));
 
@@ -1166,6 +1250,7 @@
 		gmap_free(kvm->arch.gmap);
 	kvm_s390_destroy_adapters(kvm);
 	kvm_s390_clear_float_irqs(kvm);
+	KVM_EVENT(3, "vm 0x%p destroyed", kvm);
 }
 
 /* Section: vcpu related */
@@ -1198,43 +1283,79 @@
 	return 0;
 }
 
+/*
+ * Backs up the current FP/VX register save area on a particular
+ * destination.  Used to switch between different register save
+ * areas.
+ */
+static inline void save_fpu_to(struct fpu *dst)
+{
+	dst->fpc = current->thread.fpu.fpc;
+	dst->flags = current->thread.fpu.flags;
+	dst->regs = current->thread.fpu.regs;
+}
+
+/*
+ * Switches the FP/VX register save area from which to lazy
+ * restore register contents.
+ */
+static inline void load_fpu_from(struct fpu *from)
+{
+	current->thread.fpu.fpc = from->fpc;
+	current->thread.fpu.flags = from->flags;
+	current->thread.fpu.regs = from->regs;
+}
+
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
-	save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
-	if (test_kvm_facility(vcpu->kvm, 129))
-		save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
-	else
-		save_fp_regs(vcpu->arch.host_fpregs.fprs);
-	save_access_regs(vcpu->arch.host_acrs);
+	/* Save host register state */
+	save_fpu_regs();
+	save_fpu_to(&vcpu->arch.host_fpregs);
+
 	if (test_kvm_facility(vcpu->kvm, 129)) {
-		restore_fp_ctl(&vcpu->run->s.regs.fpc);
-		restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
-	} else {
-		restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
-		restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
-	}
+		current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
+		current->thread.fpu.flags = FPU_USE_VX;
+		/*
+		 * Use the register save area in the SIE-control block
+		 * for register restore and save in kvm_arch_vcpu_put()
+		 */
+		current->thread.fpu.vxrs =
+			(__vector128 *)&vcpu->run->s.regs.vrs;
+		/* Always enable the vector extension for KVM */
+		__ctl_set_vx();
+	} else
+		load_fpu_from(&vcpu->arch.guest_fpregs);
+
+	if (test_fp_ctl(current->thread.fpu.fpc))
+		/* User space provided an invalid FPC, let's clear it */
+		current->thread.fpu.fpc = 0;
+
+	save_access_regs(vcpu->arch.host_acrs);
 	restore_access_regs(vcpu->run->s.regs.acrs);
 	gmap_enable(vcpu->arch.gmap);
-	atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+	atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
 	gmap_disable(vcpu->arch.gmap);
-	if (test_kvm_facility(vcpu->kvm, 129)) {
-		save_fp_ctl(&vcpu->run->s.regs.fpc);
-		save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
-	} else {
-		save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
-		save_fp_regs(vcpu->arch.guest_fpregs.fprs);
-	}
-	save_access_regs(vcpu->run->s.regs.acrs);
-	restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
+
+	save_fpu_regs();
+
 	if (test_kvm_facility(vcpu->kvm, 129))
-		restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
+		/*
+		 * kvm_arch_vcpu_load() set up the register save area to
+		 * the &vcpu->run->s.regs.vrs and, thus, the vector registers
+		 * are already saved.  Only the floating-point control must be
+		 * copied.
+		 */
+		vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
 	else
-		restore_fp_regs(vcpu->arch.host_fpregs.fprs);
+		save_fpu_to(&vcpu->arch.guest_fpregs);
+	load_fpu_from(&vcpu->arch.host_fpregs);
+
+	save_access_regs(vcpu->run->s.regs.acrs);
 	restore_access_regs(vcpu->arch.host_acrs);
 }
 
@@ -1264,7 +1385,9 @@
 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 {
 	mutex_lock(&vcpu->kvm->lock);
+	preempt_disable();
 	vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
+	preempt_enable();
 	mutex_unlock(&vcpu->kvm->lock);
 	if (!kvm_is_ucontrol(vcpu->kvm))
 		vcpu->arch.gmap = vcpu->kvm->arch.gmap;
@@ -1320,9 +1443,9 @@
 						    CPUSTAT_STOPPED);
 
 	if (test_kvm_facility(vcpu->kvm, 78))
-		atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
+		atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
 	else if (test_kvm_facility(vcpu->kvm, 8))
-		atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
+		atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
 
 	kvm_s390_vcpu_setup_model(vcpu);
 
@@ -1342,7 +1465,7 @@
 	}
 	vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
 
-	if (kvm_s390_cmma_enabled(vcpu->kvm)) {
+	if (vcpu->kvm->arch.use_cmma) {
 		rc = kvm_s390_vcpu_setup_cmma(vcpu);
 		if (rc)
 			return rc;
@@ -1377,7 +1500,6 @@
 
 	vcpu->arch.sie_block = &sie_page->sie_block;
 	vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
-	vcpu->arch.host_vregs = &sie_page->vregs;
 
 	vcpu->arch.sie_block->icpua = id;
 	if (!kvm_is_ucontrol(kvm)) {
@@ -1399,6 +1521,19 @@
 	vcpu->arch.local_int.wq = &vcpu->wq;
 	vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
 
+	/*
+	 * Allocate a save area for floating-point registers.  If the vector
+	 * extension is available, register contents are saved in the SIE
+	 * control block.  The allocated save area is still required in
+	 * particular places, for example, in kvm_s390_vcpu_store_status().
+	 */
+	vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
+					       GFP_KERNEL);
+	if (!vcpu->arch.guest_fpregs.fprs) {
+		rc = -ENOMEM;
+		goto out_free_sie_block;
+	}
+
 	rc = kvm_vcpu_init(vcpu, kvm, id);
 	if (rc)
 		goto out_free_sie_block;
@@ -1422,24 +1557,24 @@
 
 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
 {
-	atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
+	atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
 	exit_sie(vcpu);
 }
 
 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
+	atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
 }
 
 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
 {
-	atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
 	exit_sie(vcpu);
 }
 
 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
 {
-	atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+	atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
 }
 
 /*
@@ -1448,7 +1583,7 @@
  * return immediately. */
 void exit_sie(struct kvm_vcpu *vcpu)
 {
-	atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
 	while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
 		cpu_relax();
 }
@@ -1621,16 +1756,16 @@
 {
 	if (test_fp_ctl(fpu->fpc))
 		return -EINVAL;
-	memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
+	memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
 	vcpu->arch.guest_fpregs.fpc = fpu->fpc;
-	restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
-	restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
+	save_fpu_regs();
+	load_fpu_from(&vcpu->arch.guest_fpregs);
 	return 0;
 }
 
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
-	memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
+	memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
 	fpu->fpc = vcpu->arch.guest_fpregs.fpc;
 	return 0;
 }
@@ -1672,19 +1807,19 @@
 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
 		vcpu->guest_debug = dbg->control;
 		/* enforce guest PER */
-		atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+		atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
 
 		if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
 			rc = kvm_s390_import_bp_data(vcpu, dbg);
 	} else {
-		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
 		vcpu->arch.guestdbg.last_bp = 0;
 	}
 
 	if (rc) {
 		vcpu->guest_debug = 0;
 		kvm_s390_clear_bp_data(vcpu);
-		atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+		atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
 	}
 
 	return rc;
@@ -1723,18 +1858,6 @@
 	return rc;
 }
 
-bool kvm_s390_cmma_enabled(struct kvm *kvm)
-{
-	if (!MACHINE_IS_LPAR)
-		return false;
-	/* only enable for z10 and later */
-	if (!MACHINE_HAS_EDAT1)
-		return false;
-	if (!kvm->arch.use_cmma)
-		return false;
-	return true;
-}
-
 static bool ibs_enabled(struct kvm_vcpu *vcpu)
 {
 	return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
@@ -1771,7 +1894,7 @@
 	if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
 		if (!ibs_enabled(vcpu)) {
 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
-			atomic_set_mask(CPUSTAT_IBS,
+			atomic_or(CPUSTAT_IBS,
 					&vcpu->arch.sie_block->cpuflags);
 		}
 		goto retry;
@@ -1780,7 +1903,7 @@
 	if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
 		if (ibs_enabled(vcpu)) {
 			trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
-			atomic_clear_mask(CPUSTAT_IBS,
+			atomic_andnot(CPUSTAT_IBS,
 					  &vcpu->arch.sie_block->cpuflags);
 		}
 		goto retry;
@@ -2193,8 +2316,21 @@
 	 * copying in vcpu load/put. Lets update our copies before we save
 	 * it into the save area
 	 */
-	save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
-	save_fp_regs(vcpu->arch.guest_fpregs.fprs);
+	save_fpu_regs();
+	if (test_kvm_facility(vcpu->kvm, 129)) {
+		/*
+		 * If the vector extension is available, the vector registers
+		 * which overlaps with floating-point registers are saved in
+		 * the SIE-control block.  Hence, extract the floating-point
+		 * registers and the FPC value and store them in the
+		 * guest_fpregs structure.
+		 */
+		WARN_ON(!is_vx_task(current));	  /* XXX remove later */
+		vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
+		convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
+				 current->thread.fpu.vxrs);
+	} else
+		save_fpu_to(&vcpu->arch.guest_fpregs);
 	save_access_regs(vcpu->run->s.regs.acrs);
 
 	return kvm_s390_store_status_unloaded(vcpu, addr);
@@ -2221,10 +2357,13 @@
 
 	/*
 	 * The guest VXRS are in the host VXRs due to the lazy
-	 * copying in vcpu load/put. Let's update our copies before we save
-	 * it into the save area.
+	 * copying in vcpu load/put. We can simply call save_fpu_regs()
+	 * to save the current register state because we are in the
+	 * middle of a load/put cycle.
+	 *
+	 * Let's update our copies before we save it into the save area.
 	 */
-	save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
+	save_fpu_regs();
 
 	return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
 }
@@ -2280,7 +2419,7 @@
 		__disable_ibs_on_all_vcpus(vcpu->kvm);
 	}
 
-	atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
+	atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
 	/*
 	 * Another VCPU might have used IBS while we were offline.
 	 * Let's play safe and flush the VCPU at startup.
@@ -2306,7 +2445,7 @@
 	/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
 	kvm_s390_clear_stop_irq(vcpu);
 
-	atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
+	atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
 	__disable_ibs_on_vcpu(vcpu);
 
 	for (i = 0; i < online_vcpus; i++) {
@@ -2340,6 +2479,7 @@
 	case KVM_CAP_S390_CSS_SUPPORT:
 		if (!vcpu->kvm->arch.css_support) {
 			vcpu->kvm->arch.css_support = 1;
+			VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
 			trace_kvm_s390_enable_css(vcpu->kvm);
 		}
 		r = 0;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index c570478..c446aab 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -27,6 +27,13 @@
 #define TDB_FORMAT1		1
 #define IS_ITDB_VALID(vcpu)	((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
 
+extern debug_info_t *kvm_s390_dbf;
+#define KVM_EVENT(d_loglevel, d_string, d_args...)\
+do { \
+	debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
+	  d_args); \
+} while (0)
+
 #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
 do { \
 	debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
@@ -65,6 +72,8 @@
 
 static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
 {
+	VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
+		   prefix);
 	vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 	kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
@@ -217,8 +226,6 @@
 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
-/* is cmma enabled */
-bool kvm_s390_cmma_enabled(struct kvm *kvm);
 unsigned long kvm_s390_fac_list_mask_size(void);
 extern unsigned long kvm_s390_fac_list_mask[];
 
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index ad42422..4d21dc4 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -53,11 +53,14 @@
 		kvm_s390_set_psw_cc(vcpu, 3);
 		return 0;
 	}
+	VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
 	val = (val - hostclk) & ~0x3fUL;
 
 	mutex_lock(&vcpu->kvm->lock);
+	preempt_disable();
 	kvm_for_each_vcpu(i, cpup, vcpu->kvm)
 		cpup->arch.sie_block->epoch = val;
+	preempt_enable();
 	mutex_unlock(&vcpu->kvm->lock);
 
 	kvm_s390_set_psw_cc(vcpu, 0);
@@ -98,8 +101,6 @@
 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 
 	kvm_s390_set_prefix(vcpu, address);
-
-	VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
 	trace_kvm_s390_handle_prefix(vcpu, 1, address);
 	return 0;
 }
@@ -129,7 +130,7 @@
 	if (rc)
 		return kvm_s390_inject_prog_cond(vcpu, rc);
 
-	VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
+	VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
 	trace_kvm_s390_handle_prefix(vcpu, 0, address);
 	return 0;
 }
@@ -155,7 +156,7 @@
 	if (rc)
 		return kvm_s390_inject_prog_cond(vcpu, rc);
 
-	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga);
+	VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
 	trace_kvm_s390_handle_stap(vcpu, ga);
 	return 0;
 }
@@ -167,6 +168,7 @@
 		return rc;
 
 	rc = s390_enable_skey();
+	VCPU_EVENT(vcpu, 3, "%s", "enabling storage keys for guest");
 	trace_kvm_s390_skey_related_inst(vcpu);
 	vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
 	return rc;
@@ -370,7 +372,7 @@
 			    &fac, sizeof(fac));
 	if (rc)
 		return rc;
-	VCPU_EVENT(vcpu, 5, "store facility list value %x", fac);
+	VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
 	trace_kvm_s390_handle_stfl(vcpu, fac);
 	return 0;
 }
@@ -468,7 +470,7 @@
 	if (rc)
 		return kvm_s390_inject_prog_cond(vcpu, rc);
 
-	VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
+	VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
 	return 0;
 }
 
@@ -521,7 +523,7 @@
 	ar_t ar;
 
 	vcpu->stat.instruction_stsi++;
-	VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
+	VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
 
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -758,10 +760,10 @@
 	struct gmap *gmap;
 	int i;
 
-	VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
+	VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
 	gmap = vcpu->arch.gmap;
 	vcpu->stat.instruction_essa++;
-	if (!kvm_s390_cmma_enabled(vcpu->kvm))
+	if (!vcpu->kvm->arch.use_cmma)
 		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
 
 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
@@ -829,7 +831,7 @@
 	if (ga & 3)
 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
-	VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
+	VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
 	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
 
 	nr_regs = ((reg3 - reg1) & 0xf) + 1;
@@ -868,7 +870,7 @@
 	if (ga & 3)
 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
-	VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
+	VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
 	trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
 
 	reg = reg1;
@@ -902,7 +904,7 @@
 	if (ga & 7)
 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
-	VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
+	VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
 	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
 
 	nr_regs = ((reg3 - reg1) & 0xf) + 1;
@@ -940,7 +942,7 @@
 	if (ga & 7)
 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
-	VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
+	VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
 	trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
 
 	reg = reg1;
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 72e58bd..da690b6 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -205,9 +205,6 @@
 		*reg &= 0xffffffff00000000UL;
 		*reg |= SIGP_STATUS_INCORRECT_STATE;
 		return SIGP_CC_STATUS_STORED;
-	} else if (rc == 0) {
-		VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x",
-			   dst_vcpu->vcpu_id, irq.u.prefix.address);
 	}
 
 	return rc;
@@ -371,7 +368,8 @@
 	return rc;
 }
 
-static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
+static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code,
+					   u16 cpu_addr)
 {
 	if (!vcpu->kvm->arch.user_sigp)
 		return 0;
@@ -414,9 +412,8 @@
 	default:
 		vcpu->stat.instruction_sigp_unknown++;
 	}
-
-	VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space",
-		   order_code);
+	VCPU_EVENT(vcpu, 3, "SIGP: order %u for CPU %d handled in userspace",
+		   order_code, cpu_addr);
 
 	return 1;
 }
@@ -435,7 +432,7 @@
 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 
 	order_code = kvm_s390_get_base_disp_rs(vcpu, NULL);
-	if (handle_sigp_order_in_user_space(vcpu, order_code))
+	if (handle_sigp_order_in_user_space(vcpu, order_code, cpu_addr))
 		return -EOPNOTSUPP;
 
 	if (r1 % 2)
diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h
index 3208d33..cc1d6c6 100644
--- a/arch/s390/kvm/trace-s390.h
+++ b/arch/s390/kvm/trace-s390.h
@@ -105,11 +105,22 @@
 	{KVM_S390_PROGRAM_INT, "program interrupt"},			\
 	{KVM_S390_SIGP_SET_PREFIX, "sigp set prefix"},			\
 	{KVM_S390_RESTART, "sigp restart"},				\
+	{KVM_S390_INT_PFAULT_INIT, "pfault init"},			\
+	{KVM_S390_INT_PFAULT_DONE, "pfault done"},			\
+	{KVM_S390_MCHK, "machine check"},				\
+	{KVM_S390_INT_CLOCK_COMP, "clock comparator"},			\
+	{KVM_S390_INT_CPU_TIMER, "cpu timer"},				\
 	{KVM_S390_INT_VIRTIO, "virtio interrupt"},			\
 	{KVM_S390_INT_SERVICE, "sclp interrupt"},			\
 	{KVM_S390_INT_EMERGENCY, "sigp emergency"},			\
 	{KVM_S390_INT_EXTERNAL_CALL, "sigp ext call"}
 
+#define get_irq_name(__type) \
+	(__type > KVM_S390_INT_IO_MAX ? \
+	__print_symbolic(__type, kvm_s390_int_type) : \
+		(__type & KVM_S390_INT_IO_AI_MASK ? \
+		 "adapter I/O interrupt" : "subchannel I/O interrupt"))
+
 TRACE_EVENT(kvm_s390_inject_vm,
 	    TP_PROTO(__u64 type, __u32 parm, __u64 parm64, int who),
 	    TP_ARGS(type, parm, parm64, who),
@@ -131,22 +142,19 @@
 	    TP_printk("inject%s: type:%x (%s) parm:%x parm64:%llx",
 		      (__entry->who == 1) ? " (from kernel)" :
 		      (__entry->who == 2) ? " (from user)" : "",
-		      __entry->inttype,
-		      __print_symbolic(__entry->inttype, kvm_s390_int_type),
+		      __entry->inttype, get_irq_name(__entry->inttype),
 		      __entry->parm, __entry->parm64)
 	);
 
 TRACE_EVENT(kvm_s390_inject_vcpu,
-	    TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64, \
-		     int who),
-	    TP_ARGS(id, type, parm, parm64, who),
+	    TP_PROTO(unsigned int id, __u64 type, __u32 parm, __u64 parm64),
+	    TP_ARGS(id, type, parm, parm64),
 
 	    TP_STRUCT__entry(
 		    __field(int, id)
 		    __field(__u32, inttype)
 		    __field(__u32, parm)
 		    __field(__u64, parm64)
-		    __field(int, who)
 		    ),
 
 	    TP_fast_assign(
@@ -154,15 +162,12 @@
 		    __entry->inttype = type & 0x00000000ffffffff;
 		    __entry->parm = parm;
 		    __entry->parm64 = parm64;
-		    __entry->who = who;
 		    ),
 
-	    TP_printk("inject%s (vcpu %d): type:%x (%s) parm:%x parm64:%llx",
-		      (__entry->who == 1) ? " (from kernel)" :
-		      (__entry->who == 2) ? " (from user)" : "",
+	    TP_printk("inject (vcpu %d): type:%x (%s) parm:%x parm64:%llx",
 		      __entry->id, __entry->inttype,
-		      __print_symbolic(__entry->inttype, kvm_s390_int_type),
-		      __entry->parm, __entry->parm64)
+		      get_irq_name(__entry->inttype), __entry->parm,
+		      __entry->parm64)
 	);
 
 /*
@@ -189,8 +194,8 @@
 	    TP_printk("deliver interrupt (vcpu %d): type:%x (%s) "	\
 		      "data:%08llx %016llx",
 		      __entry->id, __entry->inttype,
-		      __print_symbolic(__entry->inttype, kvm_s390_int_type),
-		      __entry->data0, __entry->data1)
+		      get_irq_name(__entry->inttype), __entry->data0,
+		      __entry->data1)
 	);
 
 /*
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 16dc42d..246a7eb 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -26,6 +26,7 @@
          */
 	asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1));
 }
+EXPORT_SYMBOL(__delay);
 
 static void __udelay_disabled(unsigned long long usecs)
 {
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 4614d41..ae4de55 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -15,7 +15,7 @@
 #include <asm/mmu_context.h>
 #include <asm/facility.h>
 
-static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
+static DEFINE_STATIC_KEY_FALSE(have_mvcos);
 
 static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
 						 unsigned long size)
@@ -104,7 +104,7 @@
 
 unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-	if (static_key_false(&have_mvcos))
+	if (static_branch_likely(&have_mvcos))
 		return copy_from_user_mvcos(to, from, n);
 	return copy_from_user_mvcp(to, from, n);
 }
@@ -177,7 +177,7 @@
 
 unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-	if (static_key_false(&have_mvcos))
+	if (static_branch_likely(&have_mvcos))
 		return copy_to_user_mvcos(to, from, n);
 	return copy_to_user_mvcs(to, from, n);
 }
@@ -240,7 +240,7 @@
 
 unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
 {
-	if (static_key_false(&have_mvcos))
+	if (static_branch_likely(&have_mvcos))
 		return copy_in_user_mvcos(to, from, n);
 	return copy_in_user_mvc(to, from, n);
 }
@@ -312,7 +312,7 @@
 
 unsigned long __clear_user(void __user *to, unsigned long size)
 {
-	if (static_key_false(&have_mvcos))
+	if (static_branch_likely(&have_mvcos))
 			return clear_user_mvcos(to, size);
 	return clear_user_xc(to, size);
 }
@@ -370,23 +370,10 @@
 }
 EXPORT_SYMBOL(__strncpy_from_user);
 
-/*
- * The "old" uaccess variant without mvcos can be enforced with the
- * uaccess_primary kernel parameter. This is mainly for debugging purposes.
- */
-static int uaccess_primary __initdata;
-
-static int __init parse_uaccess_pt(char *__unused)
-{
-	uaccess_primary = 1;
-	return 0;
-}
-early_param("uaccess_primary", parse_uaccess_pt);
-
 static int __init uaccess_init(void)
 {
-	if (!uaccess_primary && test_facility(27))
-		static_key_slow_inc(&have_mvcos);
+	if (test_facility(27))
+		static_branch_enable(&have_mvcos);
 	return 0;
 }
 early_initcall(uaccess_init);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 4c8f5d7..f985856 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -646,7 +646,7 @@
 		return;
 	inc_irq_stat(IRQEXT_PFL);
 	/* Get the token (= pid of the affected task). */
-	pid = sizeof(void *) == 4 ? param32 : param64;
+	pid = param64;
 	rcu_read_lock();
 	tsk = find_task_by_pid_ns(pid, &init_pid_ns);
 	if (tsk)
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 1eb41bb..12bbf0e 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -30,6 +30,9 @@
 	do {
 		pte = *ptep;
 		barrier();
+		/* Similar to the PMD case, NUMA hinting must take slow path */
+		if (pte_protnone(pte))
+			return 0;
 		if ((pte_val(pte) & mask) != 0)
 			return 0;
 		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
@@ -125,6 +128,13 @@
 		if (pmd_none(pmd) || pmd_trans_splitting(pmd))
 			return 0;
 		if (unlikely(pmd_large(pmd))) {
+			/*
+			 * NUMA hinting faults need to be handled in the GUP
+			 * slowpath for accounting purposes and so that they
+			 * can be serialised against THP migration.
+			 */
+			if (pmd_protnone(pmd))
+				return 0;
 			if (!gup_huge_pmd(pmdp, pmd, addr, next,
 					  write, pages, nr))
 				return 0;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 76e8737..2963b56 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -27,6 +27,7 @@
 #include <linux/initrd.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
+#include <linux/memblock.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -138,7 +139,7 @@
 	cpumask_set_cpu(0, mm_cpumask(&init_mm));
 	atomic_set(&init_mm.context.attach_count, 1);
 
-        max_mapnr = max_low_pfn;
+	set_max_mapnr(max_low_pfn);
         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 
 	/* Setup guest page hinting */
@@ -170,37 +171,36 @@
 #ifdef CONFIG_MEMORY_HOTPLUG
 int arch_add_memory(int nid, u64 start, u64 size)
 {
-	unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
+	unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
+	unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
 	unsigned long start_pfn = PFN_DOWN(start);
 	unsigned long size_pages = PFN_DOWN(size);
-	struct zone *zone;
-	int rc;
+	unsigned long nr_pages;
+	int rc, zone_enum;
 
 	rc = vmem_add_mapping(start, size);
 	if (rc)
 		return rc;
-	for_each_zone(zone) {
-		if (zone_idx(zone) != ZONE_MOVABLE) {
-			/* Add range within existing zone limits */
-			zone_start_pfn = zone->zone_start_pfn;
-			zone_end_pfn = zone->zone_start_pfn +
-				       zone->spanned_pages;
+
+	while (size_pages > 0) {
+		if (start_pfn < dma_end_pfn) {
+			nr_pages = (start_pfn + size_pages > dma_end_pfn) ?
+				   dma_end_pfn - start_pfn : size_pages;
+			zone_enum = ZONE_DMA;
+		} else if (start_pfn < normal_end_pfn) {
+			nr_pages = (start_pfn + size_pages > normal_end_pfn) ?
+				   normal_end_pfn - start_pfn : size_pages;
+			zone_enum = ZONE_NORMAL;
 		} else {
-			/* Add remaining range to ZONE_MOVABLE */
-			zone_start_pfn = start_pfn;
-			zone_end_pfn = start_pfn + size_pages;
+			nr_pages = size_pages;
+			zone_enum = ZONE_MOVABLE;
 		}
-		if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
-			continue;
-		nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
-			   zone_end_pfn - start_pfn : size_pages;
-		rc = __add_pages(nid, zone, start_pfn, nr_pages);
+		rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,
+				 start_pfn, size_pages);
 		if (rc)
 			break;
 		start_pfn += nr_pages;
 		size_pages -= nr_pages;
-		if (!size_pages)
-			break;
 	}
 	if (rc)
 		vmem_remove_mapping(start, size);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b33f661..54ef3bc 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -10,11 +10,7 @@
 #include <linux/mm.h>
 #include <linux/swap.h>
 #include <linux/smp.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
 #include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/quicklist.h>
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/swapops.h>
@@ -28,12 +24,9 @@
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 
-#define ALLOC_ORDER	2
-#define FRAG_MASK	0x03
-
 unsigned long *crst_table_alloc(struct mm_struct *mm)
 {
-	struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
+	struct page *page = alloc_pages(GFP_KERNEL, 2);
 
 	if (!page)
 		return NULL;
@@ -42,7 +35,7 @@
 
 void crst_table_free(struct mm_struct *mm, unsigned long *table)
 {
-	free_pages((unsigned long) table, ALLOC_ORDER);
+	free_pages((unsigned long) table, 2);
 }
 
 static void __crst_table_upgrade(void *arg)
@@ -176,7 +169,7 @@
 	INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
 	spin_lock_init(&gmap->guest_table_lock);
 	gmap->mm = mm;
-	page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
+	page = alloc_pages(GFP_KERNEL, 2);
 	if (!page)
 		goto out_free;
 	page->index = 0;
@@ -247,7 +240,7 @@
 
 	/* Free all segment & region tables. */
 	list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
-		__free_pages(page, ALLOC_ORDER);
+		__free_pages(page, 2);
 	gmap_radix_tree_free(&gmap->guest_to_host);
 	gmap_radix_tree_free(&gmap->host_to_guest);
 	down_write(&gmap->mm->mmap_sem);
@@ -287,7 +280,7 @@
 	unsigned long *new;
 
 	/* since we dont free the gmap table until gmap_free we can unlock */
-	page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
+	page = alloc_pages(GFP_KERNEL, 2);
 	if (!page)
 		return -ENOMEM;
 	new = (unsigned long *) page_to_phys(page);
@@ -302,7 +295,7 @@
 	}
 	spin_unlock(&gmap->mm->page_table_lock);
 	if (page)
-		__free_pages(page, ALLOC_ORDER);
+		__free_pages(page, 2);
 	return 0;
 }
 
@@ -795,40 +788,6 @@
 }
 EXPORT_SYMBOL_GPL(gmap_do_ipte_notify);
 
-static inline int page_table_with_pgste(struct page *page)
-{
-	return atomic_read(&page->_mapcount) == 0;
-}
-
-static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
-{
-	struct page *page;
-	unsigned long *table;
-
-	page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
-	if (!page)
-		return NULL;
-	if (!pgtable_page_ctor(page)) {
-		__free_page(page);
-		return NULL;
-	}
-	atomic_set(&page->_mapcount, 0);
-	table = (unsigned long *) page_to_phys(page);
-	clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
-	clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
-	return table;
-}
-
-static inline void page_table_free_pgste(unsigned long *table)
-{
-	struct page *page;
-
-	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
-	pgtable_page_dtor(page);
-	atomic_set(&page->_mapcount, -1);
-	__free_page(page);
-}
-
 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 			  unsigned long key, bool nq)
 {
@@ -957,20 +916,6 @@
 
 #else /* CONFIG_PGSTE */
 
-static inline int page_table_with_pgste(struct page *page)
-{
-	return 0;
-}
-
-static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
-{
-	return NULL;
-}
-
-static inline void page_table_free_pgste(unsigned long *table)
-{
-}
-
 static inline void gmap_unlink(struct mm_struct *mm, unsigned long *table,
 			unsigned long vmaddr)
 {
@@ -994,44 +939,55 @@
  */
 unsigned long *page_table_alloc(struct mm_struct *mm)
 {
-	unsigned long *uninitialized_var(table);
-	struct page *uninitialized_var(page);
+	unsigned long *table;
+	struct page *page;
 	unsigned int mask, bit;
 
-	if (mm_alloc_pgste(mm))
-		return page_table_alloc_pgste(mm);
-	/* Allocate fragments of a 4K page as 1K/2K page table */
-	spin_lock_bh(&mm->context.list_lock);
-	mask = FRAG_MASK;
-	if (!list_empty(&mm->context.pgtable_list)) {
-		page = list_first_entry(&mm->context.pgtable_list,
-					struct page, lru);
-		table = (unsigned long *) page_to_phys(page);
-		mask = atomic_read(&page->_mapcount);
-		mask = mask | (mask >> 4);
-	}
-	if ((mask & FRAG_MASK) == FRAG_MASK) {
-		spin_unlock_bh(&mm->context.list_lock);
-		page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
-		if (!page)
-			return NULL;
-		if (!pgtable_page_ctor(page)) {
-			__free_page(page);
-			return NULL;
+	/* Try to get a fragment of a 4K page as a 2K page table */
+	if (!mm_alloc_pgste(mm)) {
+		table = NULL;
+		spin_lock_bh(&mm->context.list_lock);
+		if (!list_empty(&mm->context.pgtable_list)) {
+			page = list_first_entry(&mm->context.pgtable_list,
+						struct page, lru);
+			mask = atomic_read(&page->_mapcount);
+			mask = (mask | (mask >> 4)) & 3;
+			if (mask != 3) {
+				table = (unsigned long *) page_to_phys(page);
+				bit = mask & 1;		/* =1 -> second 2K */
+				if (bit)
+					table += PTRS_PER_PTE;
+				atomic_xor_bits(&page->_mapcount, 1U << bit);
+				list_del(&page->lru);
+			}
 		}
+		spin_unlock_bh(&mm->context.list_lock);
+		if (table)
+			return table;
+	}
+	/* Allocate a fresh page */
+	page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+	if (!page)
+		return NULL;
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
+	/* Initialize page table */
+	table = (unsigned long *) page_to_phys(page);
+	if (mm_alloc_pgste(mm)) {
+		/* Return 4K page table with PGSTEs */
+		atomic_set(&page->_mapcount, 3);
+		clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
+		clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+	} else {
+		/* Return the first 2K fragment of the page */
 		atomic_set(&page->_mapcount, 1);
-		table = (unsigned long *) page_to_phys(page);
 		clear_table(table, _PAGE_INVALID, PAGE_SIZE);
 		spin_lock_bh(&mm->context.list_lock);
 		list_add(&page->lru, &mm->context.pgtable_list);
-	} else {
-		for (bit = 1; mask & bit; bit <<= 1)
-			table += PTRS_PER_PTE;
-		mask = atomic_xor_bits(&page->_mapcount, bit);
-		if ((mask & FRAG_MASK) == FRAG_MASK)
-			list_del(&page->lru);
+		spin_unlock_bh(&mm->context.list_lock);
 	}
-	spin_unlock_bh(&mm->context.list_lock);
 	return table;
 }
 
@@ -1041,37 +997,23 @@
 	unsigned int bit, mask;
 
 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
-	if (page_table_with_pgste(page))
-		return page_table_free_pgste(table);
-	/* Free 1K/2K page table fragment of a 4K page */
-	bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
-	spin_lock_bh(&mm->context.list_lock);
-	if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
-		list_del(&page->lru);
-	mask = atomic_xor_bits(&page->_mapcount, bit);
-	if (mask & FRAG_MASK)
-		list_add(&page->lru, &mm->context.pgtable_list);
-	spin_unlock_bh(&mm->context.list_lock);
-	if (mask == 0) {
-		pgtable_page_dtor(page);
-		atomic_set(&page->_mapcount, -1);
-		__free_page(page);
+	if (!mm_alloc_pgste(mm)) {
+		/* Free 2K page table fragment of a 4K page */
+		bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
+		spin_lock_bh(&mm->context.list_lock);
+		mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
+		if (mask & 3)
+			list_add(&page->lru, &mm->context.pgtable_list);
+		else
+			list_del(&page->lru);
+		spin_unlock_bh(&mm->context.list_lock);
+		if (mask != 0)
+			return;
 	}
-}
 
-static void __page_table_free_rcu(void *table, unsigned bit)
-{
-	struct page *page;
-
-	if (bit == FRAG_MASK)
-		return page_table_free_pgste(table);
-	/* Free 1K/2K page table fragment of a 4K page */
-	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
-	if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
-		pgtable_page_dtor(page);
-		atomic_set(&page->_mapcount, -1);
-		__free_page(page);
-	}
+	pgtable_page_dtor(page);
+	atomic_set(&page->_mapcount, -1);
+	__free_page(page);
 }
 
 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
@@ -1083,34 +1025,45 @@
 
 	mm = tlb->mm;
 	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
-	if (page_table_with_pgste(page)) {
+	if (mm_alloc_pgste(mm)) {
 		gmap_unlink(mm, table, vmaddr);
-		table = (unsigned long *) (__pa(table) | FRAG_MASK);
+		table = (unsigned long *) (__pa(table) | 3);
 		tlb_remove_table(tlb, table);
 		return;
 	}
-	bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
+	bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
 	spin_lock_bh(&mm->context.list_lock);
-	if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
-		list_del(&page->lru);
-	mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
-	if (mask & FRAG_MASK)
+	mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
+	if (mask & 3)
 		list_add_tail(&page->lru, &mm->context.pgtable_list);
+	else
+		list_del(&page->lru);
 	spin_unlock_bh(&mm->context.list_lock);
-	table = (unsigned long *) (__pa(table) | (bit << 4));
+	table = (unsigned long *) (__pa(table) | (1U << bit));
 	tlb_remove_table(tlb, table);
 }
 
 static void __tlb_remove_table(void *_table)
 {
-	const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
-	void *table = (void *)((unsigned long) _table & ~mask);
-	unsigned type = (unsigned long) _table & mask;
+	unsigned int mask = (unsigned long) _table & 3;
+	void *table = (void *)((unsigned long) _table ^ mask);
+	struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
 
-	if (type)
-		__page_table_free_rcu(table, type);
-	else
-		free_pages((unsigned long) table, ALLOC_ORDER);
+	switch (mask) {
+	case 0:		/* pmd or pud */
+		free_pages((unsigned long) table, 2);
+		break;
+	case 1:		/* lower 2K of a 4K page table */
+	case 2:		/* higher 2K of a 4K page table */
+		if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
+			break;
+		/* fallthrough */
+	case 3:		/* 4K page table with pgstes */
+		pgtable_page_dtor(page);
+		atomic_set(&page->_mapcount, -1);
+		__free_page(page);
+		break;
+	}
 }
 
 static void tlb_remove_table_smp_sync(void *arg)
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index f6498ee..f010c93 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -36,6 +36,8 @@
  *	      |   BPF stack   |     |
  *	      |		      |     |
  *	      +---------------+     |
+ *	      | 8 byte skbp   |     |
+ * R15+170 -> +---------------+     |
  *	      | 8 byte hlen   |     |
  * R15+168 -> +---------------+     |
  *	      | 4 byte align  |     |
@@ -51,11 +53,12 @@
  * We get 160 bytes stack space from calling function, but only use
  * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
  */
-#define STK_SPACE	(MAX_BPF_STACK + 8 + 4 + 4 + 160)
+#define STK_SPACE	(MAX_BPF_STACK + 8 + 8 + 4 + 4 + 160)
 #define STK_160_UNUSED	(160 - 12 * 8)
 #define STK_OFF		(STK_SPACE - STK_160_UNUSED)
 #define STK_OFF_TMP	160	/* Offset of tmp buffer on stack */
 #define STK_OFF_HLEN	168	/* Offset of SKB header length on stack */
+#define STK_OFF_SKBP	170	/* Offset of SKB pointer on stack */
 
 #define STK_OFF_R6	(160 - 11 * 8)	/* Offset of r6 on stack */
 #define STK_OFF_TCCNT	(160 - 12 * 8)	/* Offset of tail_call_cnt on stack */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 8d2e516..eeda051 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -45,7 +45,7 @@
 	int labels[1];		/* Labels for local jumps */
 };
 
-#define BPF_SIZE_MAX	4096	/* Max size for program */
+#define BPF_SIZE_MAX	0x7ffff	/* Max size for program (20 bit signed displ) */
 
 #define SEEN_SKB	1	/* skb access */
 #define SEEN_MEM	2	/* use mem[] for temporary storage */
@@ -53,6 +53,7 @@
 #define SEEN_LITERAL	8	/* code uses literals */
 #define SEEN_FUNC	16	/* calls C functions */
 #define SEEN_TAIL_CALL	32	/* code uses tail calls */
+#define SEEN_SKB_CHANGE	64	/* code changes skb data */
 #define SEEN_STACK	(SEEN_FUNC | SEEN_MEM | SEEN_SKB)
 
 /*
@@ -203,19 +204,11 @@
 	_EMIT6(op1 | __disp, op2);				\
 })
 
-#define EMIT6_DISP(op1, op2, b1, b2, b3, disp)			\
-({								\
-	_EMIT6_DISP(op1 | reg(b1, b2) << 16 |			\
-		    reg_high(b3) << 8, op2, disp);		\
-	REG_SET_SEEN(b1);					\
-	REG_SET_SEEN(b2);					\
-	REG_SET_SEEN(b3);					\
-})
-
 #define _EMIT6_DISP_LH(op1, op2, disp)				\
 ({								\
-	unsigned int __disp_h = ((u32)disp) & 0xff000;		\
-	unsigned int __disp_l = ((u32)disp) & 0x00fff;		\
+	u32 _disp = (u32) disp;					\
+	unsigned int __disp_h = _disp & 0xff000;		\
+	unsigned int __disp_l = _disp & 0x00fff;		\
 	_EMIT6(op1 | __disp_l, op2 | __disp_h >> 4);		\
 })
 
@@ -390,12 +383,32 @@
 }
 
 /*
+ * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
+ * we store the SKB header length on the stack and the SKB data
+ * pointer in REG_SKB_DATA.
+ */
+static void emit_load_skb_data_hlen(struct bpf_jit *jit)
+{
+	/* Header length: llgf %w1,<len>(%b1) */
+	EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
+		      offsetof(struct sk_buff, len));
+	/* s %w1,<data_len>(%b1) */
+	EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
+		   offsetof(struct sk_buff, data_len));
+	/* stg %w1,ST_OFF_HLEN(%r0,%r15) */
+	EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN);
+	/* lg %skb_data,data_off(%b1) */
+	EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
+		      BPF_REG_1, offsetof(struct sk_buff, data));
+}
+
+/*
  * Emit function prologue
  *
  * Save registers and create stack frame if necessary.
  * See stack frame layout desription in "bpf_jit.h"!
  */
-static void bpf_jit_prologue(struct bpf_jit *jit)
+static void bpf_jit_prologue(struct bpf_jit *jit, bool is_classic)
 {
 	if (jit->seen & SEEN_TAIL_CALL) {
 		/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
@@ -429,32 +442,21 @@
 			EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
 				      REG_15, 152);
 	}
-	/*
-	 * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
-	 * we store the SKB header length on the stack and the SKB data
-	 * pointer in REG_SKB_DATA.
-	 */
-	if (jit->seen & SEEN_SKB) {
-		/* Header length: llgf %w1,<len>(%b1) */
-		EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
-			      offsetof(struct sk_buff, len));
-		/* s %w1,<data_len>(%b1) */
-		EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
-			   offsetof(struct sk_buff, data_len));
-		/* stg %w1,ST_OFF_HLEN(%r0,%r15) */
+	if (jit->seen & SEEN_SKB)
+		emit_load_skb_data_hlen(jit);
+	if (jit->seen & SEEN_SKB_CHANGE)
+		/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
 		EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
-			      STK_OFF_HLEN);
-		/* lg %skb_data,data_off(%b1) */
-		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
-			      BPF_REG_1, offsetof(struct sk_buff, data));
+			      STK_OFF_SKBP);
+	/* Clear A (%b0) and X (%b7) registers for converted BPF programs */
+	if (is_classic) {
+		if (REG_SEEN(BPF_REG_A))
+			/* lghi %ba,0 */
+			EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
+		if (REG_SEEN(BPF_REG_X))
+			/* lghi %bx,0 */
+			EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
 	}
-	/* BPF compatibility: clear A (%b0) and X (%b7) registers */
-	if (REG_SEEN(BPF_REG_A))
-		/* lghi %ba,0 */
-		EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
-	if (REG_SEEN(BPF_REG_X))
-		/* lghi %bx,0 */
-		EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
 }
 
 /*
@@ -976,12 +978,19 @@
 		REG_SET_SEEN(BPF_REG_5);
 		jit->seen |= SEEN_FUNC;
 		/* lg %w1,<d(imm)>(%l) */
-		EMIT6_DISP(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
-			   EMIT_CONST_U64(func));
+		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
+			      EMIT_CONST_U64(func));
 		/* basr %r14,%w1 */
 		EMIT2(0x0d00, REG_14, REG_W1);
 		/* lgr %b0,%r2: load return value into %b0 */
 		EMIT4(0xb9040000, BPF_REG_0, REG_2);
+		if (bpf_helper_changes_skb_data((void *)func)) {
+			jit->seen |= SEEN_SKB_CHANGE;
+			/* lg %b1,ST_OFF_SKBP(%r15) */
+			EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
+				      REG_15, STK_OFF_SKBP);
+			emit_load_skb_data_hlen(jit);
+		}
 		break;
 	}
 	case BPF_JMP | BPF_CALL | BPF_X:
@@ -1023,7 +1032,7 @@
 				      MAX_TAIL_CALL_CNT, 0, 0x2);
 
 		/*
-		 * prog = array->prog[index];
+		 * prog = array->ptrs[index];
 		 * if (prog == NULL)
 		 *         goto out;
 		 */
@@ -1032,7 +1041,7 @@
 		EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
 		/* lg %r1,prog(%b2,%r1) */
 		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
-			      REG_1, offsetof(struct bpf_array, prog));
+			      REG_1, offsetof(struct bpf_array, ptrs));
 		/* clgij %r1,0,0x8,label0 */
 		EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007d, REG_1, 0, 0, 0x8);
 
@@ -1236,7 +1245,7 @@
 	jit->lit = jit->lit_start;
 	jit->prg = 0;
 
-	bpf_jit_prologue(jit);
+	bpf_jit_prologue(jit, bpf_prog_was_classic(fp));
 	for (i = 0; i < fp->len; i += insn_count) {
 		insn_count = bpf_jit_insn(jit, fp, i);
 		if (insn_count < 0)
diff --git a/arch/s390/numa/Makefile b/arch/s390/numa/Makefile
new file mode 100644
index 0000000..f94ecaf
--- /dev/null
+++ b/arch/s390/numa/Makefile
@@ -0,0 +1,3 @@
+obj-y			+= numa.o
+obj-y			+= toptree.o
+obj-$(CONFIG_NUMA_EMU)	+= mode_emu.o
diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c
new file mode 100644
index 0000000..7de4e2f
--- /dev/null
+++ b/arch/s390/numa/mode_emu.c
@@ -0,0 +1,530 @@
+/*
+ * NUMA support for s390
+ *
+ * NUMA emulation (aka fake NUMA) distributes the available memory to nodes
+ * without using real topology information about the physical memory of the
+ * machine.
+ *
+ * It distributes the available CPUs to nodes while respecting the original
+ * machine topology information. This is done by trying to avoid to separate
+ * CPUs which reside on the same book or even on the same MC.
+ *
+ * Because the current Linux scheduler code requires a stable cpu to node
+ * mapping, cores are pinned to nodes when the first CPU thread is set online.
+ *
+ * Copyright IBM Corp. 2015
+ */
+
+#define KMSG_COMPONENT "numa_emu"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/cpumask.h>
+#include <linux/memblock.h>
+#include <linux/node.h>
+#include <linux/memory.h>
+#include <linux/slab.h>
+#include <asm/smp.h>
+#include <asm/topology.h>
+#include "numa_mode.h"
+#include "toptree.h"
+
+/* Distances between the different system components */
+#define DIST_EMPTY	0
+#define DIST_CORE	1
+#define DIST_MC		2
+#define DIST_BOOK	3
+#define DIST_MAX	4
+
+/* Node distance reported to common code */
+#define EMU_NODE_DIST	10
+
+/* Node ID for free (not yet pinned) cores */
+#define NODE_ID_FREE	-1
+
+/* Different levels of toptree */
+enum toptree_level {CORE, MC, BOOK, NODE, TOPOLOGY};
+
+/* The two toptree IDs */
+enum {TOPTREE_ID_PHYS, TOPTREE_ID_NUMA};
+
+/* Number of NUMA nodes */
+static int emu_nodes = 1;
+/* NUMA stripe size */
+static unsigned long emu_size;
+
+/*
+ * Node to core pinning information updates are protected by
+ * "sched_domains_mutex".
+ */
+static struct {
+	s32 to_node_id[CONFIG_NR_CPUS];	/* Pinned core to node mapping */
+	int total;			/* Total number of pinned cores */
+	int per_node_target;		/* Cores per node without extra cores */
+	int per_node[MAX_NUMNODES];	/* Number of cores pinned to node */
+} *emu_cores;
+
+/*
+ * Pin a core to a node
+ */
+static void pin_core_to_node(int core_id, int node_id)
+{
+	if (emu_cores->to_node_id[core_id] == NODE_ID_FREE) {
+		emu_cores->per_node[node_id]++;
+		emu_cores->to_node_id[core_id] = node_id;
+		emu_cores->total++;
+	} else {
+		WARN_ON(emu_cores->to_node_id[core_id] != node_id);
+	}
+}
+
+/*
+ * Number of pinned cores of a node
+ */
+static int cores_pinned(struct toptree *node)
+{
+	return emu_cores->per_node[node->id];
+}
+
+/*
+ * ID of the node where the core is pinned (or NODE_ID_FREE)
+ */
+static int core_pinned_to_node_id(struct toptree *core)
+{
+	return emu_cores->to_node_id[core->id];
+}
+
+/*
+ * Number of cores in the tree that are not yet pinned
+ */
+static int cores_free(struct toptree *tree)
+{
+	struct toptree *core;
+	int count = 0;
+
+	toptree_for_each(core, tree, CORE) {
+		if (core_pinned_to_node_id(core) == NODE_ID_FREE)
+			count++;
+	}
+	return count;
+}
+
+/*
+ * Return node of core
+ */
+static struct toptree *core_node(struct toptree *core)
+{
+	return core->parent->parent->parent;
+}
+
+/*
+ * Return book of core
+ */
+static struct toptree *core_book(struct toptree *core)
+{
+	return core->parent->parent;
+}
+
+/*
+ * Return mc of core
+ */
+static struct toptree *core_mc(struct toptree *core)
+{
+	return core->parent;
+}
+
+/*
+ * Distance between two cores
+ */
+static int dist_core_to_core(struct toptree *core1, struct toptree *core2)
+{
+	if (core_book(core1)->id != core_book(core2)->id)
+		return DIST_BOOK;
+	if (core_mc(core1)->id != core_mc(core2)->id)
+		return DIST_MC;
+	/* Same core or sibling on same MC */
+	return DIST_CORE;
+}
+
+/*
+ * Distance of a node to a core
+ */
+static int dist_node_to_core(struct toptree *node, struct toptree *core)
+{
+	struct toptree *core_node;
+	int dist_min = DIST_MAX;
+
+	toptree_for_each(core_node, node, CORE)
+		dist_min = min(dist_min, dist_core_to_core(core_node, core));
+	return dist_min == DIST_MAX ? DIST_EMPTY : dist_min;
+}
+
+/*
+ * Unify will delete empty nodes, therefore recreate nodes.
+ */
+static void toptree_unify_tree(struct toptree *tree)
+{
+	int nid;
+
+	toptree_unify(tree);
+	for (nid = 0; nid < emu_nodes; nid++)
+		toptree_get_child(tree, nid);
+}
+
+/*
+ * Find the best/nearest node for a given core and ensure that no node
+ * gets more than "emu_cores->per_node_target + extra" cores.
+ */
+static struct toptree *node_for_core(struct toptree *numa, struct toptree *core,
+				     int extra)
+{
+	struct toptree *node, *node_best = NULL;
+	int dist_cur, dist_best, cores_target;
+
+	cores_target = emu_cores->per_node_target + extra;
+	dist_best = DIST_MAX;
+	node_best = NULL;
+	toptree_for_each(node, numa, NODE) {
+		/* Already pinned cores must use their nodes */
+		if (core_pinned_to_node_id(core) == node->id) {
+			node_best = node;
+			break;
+		}
+		/* Skip nodes that already have enough cores */
+		if (cores_pinned(node) >= cores_target)
+			continue;
+		dist_cur = dist_node_to_core(node, core);
+		if (dist_cur < dist_best) {
+			dist_best = dist_cur;
+			node_best = node;
+		}
+	}
+	return node_best;
+}
+
+/*
+ * Find the best node for each core with respect to "extra" core count
+ */
+static void toptree_to_numa_single(struct toptree *numa, struct toptree *phys,
+				   int extra)
+{
+	struct toptree *node, *core, *tmp;
+
+	toptree_for_each_safe(core, tmp, phys, CORE) {
+		node = node_for_core(numa, core, extra);
+		if (!node)
+			return;
+		toptree_move(core, node);
+		pin_core_to_node(core->id, node->id);
+	}
+}
+
+/*
+ * Move structures of given level to specified NUMA node
+ */
+static void move_level_to_numa_node(struct toptree *node, struct toptree *phys,
+				    enum toptree_level level, bool perfect)
+{
+	int cores_free, cores_target = emu_cores->per_node_target;
+	struct toptree *cur, *tmp;
+
+	toptree_for_each_safe(cur, tmp, phys, level) {
+		cores_free = cores_target - toptree_count(node, CORE);
+		if (perfect) {
+			if (cores_free == toptree_count(cur, CORE))
+				toptree_move(cur, node);
+		} else {
+			if (cores_free >= toptree_count(cur, CORE))
+				toptree_move(cur, node);
+		}
+	}
+}
+
+/*
+ * Move structures of a given level to NUMA nodes. If "perfect" is specified
+ * move only perfectly fitting structures. Otherwise move also smaller
+ * than needed structures.
+ */
+static void move_level_to_numa(struct toptree *numa, struct toptree *phys,
+			       enum toptree_level level, bool perfect)
+{
+	struct toptree *node;
+
+	toptree_for_each(node, numa, NODE)
+		move_level_to_numa_node(node, phys, level, perfect);
+}
+
+/*
+ * For the first run try to move the big structures
+ */
+static void toptree_to_numa_first(struct toptree *numa, struct toptree *phys)
+{
+	struct toptree *core;
+
+	/* Always try to move perfectly fitting structures first */
+	move_level_to_numa(numa, phys, BOOK, true);
+	move_level_to_numa(numa, phys, BOOK, false);
+	move_level_to_numa(numa, phys, MC, true);
+	move_level_to_numa(numa, phys, MC, false);
+	/* Now pin all the moved cores */
+	toptree_for_each(core, numa, CORE)
+		pin_core_to_node(core->id, core_node(core)->id);
+}
+
+/*
+ * Allocate new topology and create required nodes
+ */
+static struct toptree *toptree_new(int id, int nodes)
+{
+	struct toptree *tree;
+	int nid;
+
+	tree = toptree_alloc(TOPOLOGY, id);
+	if (!tree)
+		goto fail;
+	for (nid = 0; nid < nodes; nid++) {
+		if (!toptree_get_child(tree, nid))
+			goto fail;
+	}
+	return tree;
+fail:
+	panic("NUMA emulation could not allocate topology");
+}
+
+/*
+ * Allocate and initialize core to node mapping
+ */
+static void create_core_to_node_map(void)
+{
+	int i;
+
+	emu_cores = kzalloc(sizeof(*emu_cores), GFP_KERNEL);
+	if (emu_cores == NULL)
+		panic("Could not allocate cores to node memory");
+	for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
+		emu_cores->to_node_id[i] = NODE_ID_FREE;
+}
+
+/*
+ * Move cores from physical topology into NUMA target topology
+ * and try to keep as much of the physical topology as possible.
+ */
+static struct toptree *toptree_to_numa(struct toptree *phys)
+{
+	static int first = 1;
+	struct toptree *numa;
+	int cores_total;
+
+	cores_total = emu_cores->total + cores_free(phys);
+	emu_cores->per_node_target = cores_total / emu_nodes;
+	numa = toptree_new(TOPTREE_ID_NUMA, emu_nodes);
+	if (first) {
+		toptree_to_numa_first(numa, phys);
+		first = 0;
+	}
+	toptree_to_numa_single(numa, phys, 0);
+	toptree_to_numa_single(numa, phys, 1);
+	toptree_unify_tree(numa);
+
+	WARN_ON(cpumask_weight(&phys->mask));
+	return numa;
+}
+
+/*
+ * Create a toptree out of the physical topology that we got from the hypervisor
+ */
+static struct toptree *toptree_from_topology(void)
+{
+	struct toptree *phys, *node, *book, *mc, *core;
+	struct cpu_topology_s390 *top;
+	int cpu;
+
+	phys = toptree_new(TOPTREE_ID_PHYS, 1);
+
+	for_each_online_cpu(cpu) {
+		top = &per_cpu(cpu_topology, cpu);
+		node = toptree_get_child(phys, 0);
+		book = toptree_get_child(node, top->book_id);
+		mc = toptree_get_child(book, top->socket_id);
+		core = toptree_get_child(mc, top->core_id);
+		if (!book || !mc || !core)
+			panic("NUMA emulation could not allocate memory");
+		cpumask_set_cpu(cpu, &core->mask);
+		toptree_update_mask(mc);
+	}
+	return phys;
+}
+
+/*
+ * Add toptree core to topology and create correct CPU masks
+ */
+static void topology_add_core(struct toptree *core)
+{
+	struct cpu_topology_s390 *top;
+	int cpu;
+
+	for_each_cpu(cpu, &core->mask) {
+		top = &per_cpu(cpu_topology, cpu);
+		cpumask_copy(&top->thread_mask, &core->mask);
+		cpumask_copy(&top->core_mask, &core_mc(core)->mask);
+		cpumask_copy(&top->book_mask, &core_book(core)->mask);
+		cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]);
+		top->node_id = core_node(core)->id;
+	}
+}
+
+/*
+ * Apply toptree to topology and create CPU masks
+ */
+static void toptree_to_topology(struct toptree *numa)
+{
+	struct toptree *core;
+	int i;
+
+	/* Clear all node masks */
+	for (i = 0; i < MAX_NUMNODES; i++)
+		cpumask_clear(node_to_cpumask_map[i]);
+
+	/* Rebuild all masks */
+	toptree_for_each(core, numa, CORE)
+		topology_add_core(core);
+}
+
+/*
+ * Show the node to core mapping
+ */
+static void print_node_to_core_map(void)
+{
+	int nid, cid;
+
+	if (!numa_debug_enabled)
+		return;
+	printk(KERN_DEBUG "NUMA node to core mapping\n");
+	for (nid = 0; nid < emu_nodes; nid++) {
+		printk(KERN_DEBUG "  node %3d: ", nid);
+		for (cid = 0; cid < ARRAY_SIZE(emu_cores->to_node_id); cid++) {
+			if (emu_cores->to_node_id[cid] == nid)
+				printk(KERN_CONT "%d ", cid);
+		}
+		printk(KERN_CONT "\n");
+	}
+}
+
+/*
+ * Transfer physical topology into a NUMA topology and modify CPU masks
+ * according to the NUMA topology.
+ *
+ * Must be called with "sched_domains_mutex" lock held.
+ */
+static void emu_update_cpu_topology(void)
+{
+	struct toptree *phys, *numa;
+
+	if (emu_cores == NULL)
+		create_core_to_node_map();
+	phys = toptree_from_topology();
+	numa = toptree_to_numa(phys);
+	toptree_free(phys);
+	toptree_to_topology(numa);
+	toptree_free(numa);
+	print_node_to_core_map();
+}
+
+/*
+ * If emu_size is not set, use CONFIG_EMU_SIZE. Then round to minimum
+ * alignment (needed for memory hotplug).
+ */
+static unsigned long emu_setup_size_adjust(unsigned long size)
+{
+	size = size ? : CONFIG_EMU_SIZE;
+	size = roundup(size, memory_block_size_bytes());
+	return size;
+}
+
+/*
+ * If we have not enough memory for the specified nodes, reduce the node count.
+ */
+static int emu_setup_nodes_adjust(int nodes)
+{
+	int nodes_max;
+
+	nodes_max = memblock.memory.total_size / emu_size;
+	nodes_max = max(nodes_max, 1);
+	if (nodes_max >= nodes)
+		return nodes;
+	pr_warn("Not enough memory for %d nodes, reducing node count\n", nodes);
+	return nodes_max;
+}
+
+/*
+ * Early emu setup
+ */
+static void emu_setup(void)
+{
+	emu_size = emu_setup_size_adjust(emu_size);
+	emu_nodes = emu_setup_nodes_adjust(emu_nodes);
+	pr_info("Creating %d nodes with memory stripe size %ld MB\n",
+		emu_nodes, emu_size >> 20);
+}
+
+/*
+ * Return node id for given page number
+ */
+static int emu_pfn_to_nid(unsigned long pfn)
+{
+	return (pfn / (emu_size >> PAGE_SHIFT)) % emu_nodes;
+}
+
+/*
+ * Return stripe size
+ */
+static unsigned long emu_align(void)
+{
+	return emu_size;
+}
+
+/*
+ * Return distance between two nodes
+ */
+static int emu_distance(int node1, int node2)
+{
+	return (node1 != node2) * EMU_NODE_DIST;
+}
+
+/*
+ * Define callbacks for generic s390 NUMA infrastructure
+ */
+const struct numa_mode numa_mode_emu = {
+	.name = "emu",
+	.setup = emu_setup,
+	.update_cpu_topology = emu_update_cpu_topology,
+	.__pfn_to_nid = emu_pfn_to_nid,
+	.align = emu_align,
+	.distance = emu_distance,
+};
+
+/*
+ * Kernel parameter: emu_nodes=<n>
+ */
+static int __init early_parse_emu_nodes(char *p)
+{
+	int count;
+
+	if (kstrtoint(p, 0, &count) != 0 || count <= 0)
+		return 0;
+	if (count <= 0)
+		return 0;
+	emu_nodes = min(count, MAX_NUMNODES);
+	return 0;
+}
+early_param("emu_nodes", early_parse_emu_nodes);
+
+/*
+ * Kernel parameter: emu_size=[<n>[k|M|G|T]]
+ */
+static int __init early_parse_emu_size(char *p)
+{
+	emu_size = memparse(p, NULL);
+	return 0;
+}
+early_param("emu_size", early_parse_emu_size);
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
new file mode 100644
index 0000000..09b1d23
--- /dev/null
+++ b/arch/s390/numa/numa.c
@@ -0,0 +1,184 @@
+/*
+ * NUMA support for s390
+ *
+ * Implement NUMA core code.
+ *
+ * Copyright IBM Corp. 2015
+ */
+
+#define KMSG_COMPONENT "numa"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/mmzone.h>
+#include <linux/cpumask.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/node.h>
+
+#include <asm/numa.h>
+#include "numa_mode.h"
+
+pg_data_t *node_data[MAX_NUMNODES];
+EXPORT_SYMBOL(node_data);
+
+cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+EXPORT_SYMBOL(node_to_cpumask_map);
+
+const struct numa_mode numa_mode_plain = {
+	.name = "plain",
+};
+
+static const struct numa_mode *mode = &numa_mode_plain;
+
+int numa_pfn_to_nid(unsigned long pfn)
+{
+	return mode->__pfn_to_nid ? mode->__pfn_to_nid(pfn) : 0;
+}
+
+void numa_update_cpu_topology(void)
+{
+	if (mode->update_cpu_topology)
+		mode->update_cpu_topology();
+}
+
+int __node_distance(int a, int b)
+{
+	return mode->distance ? mode->distance(a, b) : 0;
+}
+
+int numa_debug_enabled;
+
+/*
+ * alloc_node_data() - Allocate node data
+ */
+static __init pg_data_t *alloc_node_data(void)
+{
+	pg_data_t *res;
+
+	res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 1);
+	if (!res)
+		panic("Could not allocate memory for node data!\n");
+	memset(res, 0, sizeof(pg_data_t));
+	return res;
+}
+
+/*
+ * numa_setup_memory() - Assign bootmem to nodes
+ *
+ * The memory is first added to memblock without any respect to nodes.
+ * This is fixed before remaining memblock memory is handed over to the
+ * buddy allocator.
+ * An important side effect is that large bootmem allocations might easily
+ * cross node boundaries, which can be needed for large allocations with
+ * smaller memory stripes in each node (i.e. when using NUMA emulation).
+ *
+ * Memory defines nodes:
+ * Therefore this routine also sets the nodes online with memory.
+ */
+static void __init numa_setup_memory(void)
+{
+	unsigned long cur_base, align, end_of_dram;
+	int nid = 0;
+
+	end_of_dram = memblock_end_of_DRAM();
+	align = mode->align ? mode->align() : ULONG_MAX;
+
+	/*
+	 * Step through all available memory and assign it to the nodes
+	 * indicated by the mode implementation.
+	 * All nodes which are seen here will be set online.
+	 */
+	cur_base = 0;
+	do {
+		nid = numa_pfn_to_nid(PFN_DOWN(cur_base));
+		node_set_online(nid);
+		memblock_set_node(cur_base, align, &memblock.memory, nid);
+		cur_base += align;
+	} while (cur_base < end_of_dram);
+
+	/* Allocate and fill out node_data */
+	for (nid = 0; nid < MAX_NUMNODES; nid++)
+		NODE_DATA(nid) = alloc_node_data();
+
+	for_each_online_node(nid) {
+		unsigned long start_pfn, end_pfn;
+		unsigned long t_start, t_end;
+		int i;
+
+		start_pfn = ULONG_MAX;
+		end_pfn = 0;
+		for_each_mem_pfn_range(i, nid, &t_start, &t_end, NULL) {
+			if (t_start < start_pfn)
+				start_pfn = t_start;
+			if (t_end > end_pfn)
+				end_pfn = t_end;
+		}
+		NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
+		NODE_DATA(nid)->node_id = nid;
+	}
+}
+
+/*
+ * numa_setup() - Earliest initialization
+ *
+ * Assign the mode and call the mode's setup routine.
+ */
+void __init numa_setup(void)
+{
+	pr_info("NUMA mode: %s\n", mode->name);
+	if (mode->setup)
+		mode->setup();
+	numa_setup_memory();
+	memblock_dump_all();
+}
+
+
+/*
+ * numa_init_early() - Initialization initcall
+ *
+ * This runs when only one CPU is online and before the first
+ * topology update is called for by the scheduler.
+ */
+static int __init numa_init_early(void)
+{
+	/* Attach all possible CPUs to node 0 for now. */
+	cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask);
+	return 0;
+}
+early_initcall(numa_init_early);
+
+/*
+ * numa_init_late() - Initialization initcall
+ *
+ * Register NUMA nodes.
+ */
+static int __init numa_init_late(void)
+{
+	int nid;
+
+	for_each_online_node(nid)
+		register_one_node(nid);
+	return 0;
+}
+device_initcall(numa_init_late);
+
+static int __init parse_debug(char *parm)
+{
+	numa_debug_enabled = 1;
+	return 0;
+}
+early_param("numa_debug", parse_debug);
+
+static int __init parse_numa(char *parm)
+{
+	if (strcmp(parm, numa_mode_plain.name) == 0)
+		mode = &numa_mode_plain;
+#ifdef CONFIG_NUMA_EMU
+	if (strcmp(parm, numa_mode_emu.name) == 0)
+		mode = &numa_mode_emu;
+#endif
+	return 0;
+}
+early_param("numa", parse_numa);
diff --git a/arch/s390/numa/numa_mode.h b/arch/s390/numa/numa_mode.h
new file mode 100644
index 0000000..08953b0b
--- /dev/null
+++ b/arch/s390/numa/numa_mode.h
@@ -0,0 +1,24 @@
+/*
+ * NUMA support for s390
+ *
+ * Define declarations used for communication between NUMA mode
+ * implementations and NUMA core functionality.
+ *
+ * Copyright IBM Corp. 2015
+ */
+#ifndef __S390_NUMA_MODE_H
+#define __S390_NUMA_MODE_H
+
+struct numa_mode {
+	char *name;				/* Name of mode */
+	void (*setup)(void);			/* Initizalize mode */
+	void (*update_cpu_topology)(void);	/* Called by topology code */
+	int (*__pfn_to_nid)(unsigned long pfn);	/* PFN to node ID */
+	unsigned long (*align)(void);		/* Minimum node alignment */
+	int (*distance)(int a, int b);		/* Distance between two nodes */
+};
+
+extern const struct numa_mode numa_mode_plain;
+extern const struct numa_mode numa_mode_emu;
+
+#endif /* __S390_NUMA_MODE_H */
diff --git a/arch/s390/numa/toptree.c b/arch/s390/numa/toptree.c
new file mode 100644
index 0000000..902d350
--- /dev/null
+++ b/arch/s390/numa/toptree.c
@@ -0,0 +1,342 @@
+/*
+ * NUMA support for s390
+ *
+ * A tree structure used for machine topology mangling
+ *
+ * Copyright IBM Corp. 2015
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpumask.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/slab.h>
+#include <asm/numa.h>
+
+#include "toptree.h"
+
+/**
+ * toptree_alloc - Allocate and initialize a new tree node.
+ * @level: The node's vertical level; level 0 contains the leaves.
+ * @id: ID number, explicitly not unique beyond scope of node's siblings
+ *
+ * Allocate a new tree node and initialize it.
+ *
+ * RETURNS:
+ * Pointer to the new tree node or NULL on error
+ */
+struct toptree *toptree_alloc(int level, int id)
+{
+	struct toptree *res = kzalloc(sizeof(struct toptree), GFP_KERNEL);
+
+	if (!res)
+		return res;
+
+	INIT_LIST_HEAD(&res->children);
+	INIT_LIST_HEAD(&res->sibling);
+	cpumask_clear(&res->mask);
+	res->level = level;
+	res->id = id;
+	return res;
+}
+
+/**
+ * toptree_remove - Remove a tree node from a tree
+ * @cand: Pointer to the node to remove
+ *
+ * The node is detached from its parent node. The parent node's
+ * masks will be updated to reflect the loss of the child.
+ */
+static void toptree_remove(struct toptree *cand)
+{
+	struct toptree *oldparent;
+
+	list_del_init(&cand->sibling);
+	oldparent = cand->parent;
+	cand->parent = NULL;
+	toptree_update_mask(oldparent);
+}
+
+/**
+ * toptree_free - discard a tree node
+ * @cand: Pointer to the tree node to discard
+ *
+ * Checks if @cand is attached to a parent node. Detaches it
+ * cleanly using toptree_remove. Possible children are freed
+ * recursively. In the end @cand itself is freed.
+ */
+void toptree_free(struct toptree *cand)
+{
+	struct toptree *child, *tmp;
+
+	if (cand->parent)
+		toptree_remove(cand);
+	toptree_for_each_child_safe(child, tmp, cand)
+		toptree_free(child);
+	kfree(cand);
+}
+
+/**
+ * toptree_update_mask - Update node bitmasks
+ * @cand: Pointer to a tree node
+ *
+ * The node's cpumask will be updated by combining all children's
+ * masks. Then toptree_update_mask is called recursively for the
+ * parent if applicable.
+ *
+ * NOTE:
+ * This must not be called on leaves. If called on a leaf, its
+ * CPU mask is cleared and lost.
+ */
+void toptree_update_mask(struct toptree *cand)
+{
+	struct toptree *child;
+
+	cpumask_clear(&cand->mask);
+	list_for_each_entry(child, &cand->children, sibling)
+		cpumask_or(&cand->mask, &cand->mask, &child->mask);
+	if (cand->parent)
+		toptree_update_mask(cand->parent);
+}
+
+/**
+ * toptree_insert - Insert a tree node into tree
+ * @cand: Pointer to the node to insert
+ * @target: Pointer to the node to which @cand will added as a child
+ *
+ * Insert a tree node into a tree. Masks will be updated automatically.
+ *
+ * RETURNS:
+ * 0 on success, -1 if NULL is passed as argument or the node levels
+ * don't fit.
+ */
+static int toptree_insert(struct toptree *cand, struct toptree *target)
+{
+	if (!cand || !target)
+		return -1;
+	if (target->level != (cand->level + 1))
+		return -1;
+	list_add_tail(&cand->sibling, &target->children);
+	cand->parent = target;
+	toptree_update_mask(target);
+	return 0;
+}
+
+/**
+ * toptree_move_children - Move all child nodes of a node to a new place
+ * @cand: Pointer to the node whose children are to be moved
+ * @target: Pointer to the node to which @cand's children will be attached
+ *
+ * Take all child nodes of @cand and move them using toptree_move.
+ */
+static void toptree_move_children(struct toptree *cand, struct toptree *target)
+{
+	struct toptree *child, *tmp;
+
+	toptree_for_each_child_safe(child, tmp, cand)
+		toptree_move(child, target);
+}
+
+/**
+ * toptree_unify - Merge children with same ID
+ * @cand: Pointer to node whose direct children should be made unique
+ *
+ * When mangling the tree it is possible that a node has two or more children
+ * which have the same ID. This routine merges these children into one and
+ * moves all children of the merged nodes into the unified node.
+ */
+void toptree_unify(struct toptree *cand)
+{
+	struct toptree *child, *tmp, *cand_copy;
+
+	/* Threads cannot be split, cores are not split */
+	if (cand->level < 2)
+		return;
+
+	cand_copy = toptree_alloc(cand->level, 0);
+	toptree_for_each_child_safe(child, tmp, cand) {
+		struct toptree *tmpchild;
+
+		if (!cpumask_empty(&child->mask)) {
+			tmpchild = toptree_get_child(cand_copy, child->id);
+			toptree_move_children(child, tmpchild);
+		}
+		toptree_free(child);
+	}
+	toptree_move_children(cand_copy, cand);
+	toptree_free(cand_copy);
+
+	toptree_for_each_child(child, cand)
+		toptree_unify(child);
+}
+
+/**
+ * toptree_move - Move a node to another context
+ * @cand: Pointer to the node to move
+ * @target: Pointer to the node where @cand should go
+ *
+ * In the easiest case @cand is exactly on the level below @target
+ * and will be immediately moved to the target.
+ *
+ * If @target's level is not the direct parent level of @cand,
+ * nodes for the missing levels are created and put between
+ * @cand and @target. The "stacking" nodes' IDs are taken from
+ * @cand's parents.
+ *
+ * After this it is likely to have redundant nodes in the tree
+ * which are addressed by means of toptree_unify.
+ */
+void toptree_move(struct toptree *cand, struct toptree *target)
+{
+	struct toptree *stack_target, *real_insert_point, *ptr, *tmp;
+
+	if (cand->level + 1 == target->level) {
+		toptree_remove(cand);
+		toptree_insert(cand, target);
+		return;
+	}
+
+	real_insert_point = NULL;
+	ptr = cand;
+	stack_target = NULL;
+
+	do {
+		tmp = stack_target;
+		stack_target = toptree_alloc(ptr->level + 1,
+					     ptr->parent->id);
+		toptree_insert(tmp, stack_target);
+		if (!real_insert_point)
+			real_insert_point = stack_target;
+		ptr = ptr->parent;
+	} while (stack_target->level < (target->level - 1));
+
+	toptree_remove(cand);
+	toptree_insert(cand, real_insert_point);
+	toptree_insert(stack_target, target);
+}
+
+/**
+ * toptree_get_child - Access a tree node's child by its ID
+ * @cand: Pointer to tree node whose child is to access
+ * @id: The desired child's ID
+ *
+ * @cand's children are searched for a child with matching ID.
+ * If no match can be found, a new child with the desired ID
+ * is created and returned.
+ */
+struct toptree *toptree_get_child(struct toptree *cand, int id)
+{
+	struct toptree *child;
+
+	toptree_for_each_child(child, cand)
+		if (child->id == id)
+			return child;
+	child = toptree_alloc(cand->level-1, id);
+	toptree_insert(child, cand);
+	return child;
+}
+
+/**
+ * toptree_first - Find the first descendant on specified level
+ * @context: Pointer to tree node whose descendants are to be used
+ * @level: The level of interest
+ *
+ * RETURNS:
+ * @context's first descendant on the specified level, or NULL
+ * if there is no matching descendant
+ */
+struct toptree *toptree_first(struct toptree *context, int level)
+{
+	struct toptree *child, *tmp;
+
+	if (context->level == level)
+		return context;
+
+	if (!list_empty(&context->children)) {
+		list_for_each_entry(child, &context->children, sibling) {
+			tmp = toptree_first(child, level);
+			if (tmp)
+				return tmp;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * toptree_next_sibling - Return next sibling
+ * @cur: Pointer to a tree node
+ *
+ * RETURNS:
+ * If @cur has a parent and is not the last in the parent's children list,
+ * the next sibling is returned. Or NULL when there are no siblings left.
+ */
+static struct toptree *toptree_next_sibling(struct toptree *cur)
+{
+	if (cur->parent == NULL)
+		return NULL;
+
+	if (cur == list_last_entry(&cur->parent->children,
+				   struct toptree, sibling))
+		return NULL;
+	return (struct toptree *) list_next_entry(cur, sibling);
+}
+
+/**
+ * toptree_next - Tree traversal function
+ * @cur: Pointer to current element
+ * @context: Pointer to the root node of the tree or subtree to
+ * be traversed.
+ * @level: The level of interest.
+ *
+ * RETURNS:
+ * Pointer to the next node on level @level
+ * or NULL when there is no next node.
+ */
+struct toptree *toptree_next(struct toptree *cur, struct toptree *context,
+			     int level)
+{
+	struct toptree *cur_context, *tmp;
+
+	if (!cur)
+		return NULL;
+
+	if (context->level == level)
+		return NULL;
+
+	tmp = toptree_next_sibling(cur);
+	if (tmp != NULL)
+		return tmp;
+
+	cur_context = cur;
+	while (cur_context->level < context->level - 1) {
+		/* Step up */
+		cur_context = cur_context->parent;
+		/* Step aside */
+		tmp = toptree_next_sibling(cur_context);
+		if (tmp != NULL) {
+			/* Step down */
+			tmp = toptree_first(tmp, level);
+			if (tmp != NULL)
+				return tmp;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * toptree_count - Count descendants on specified level
+ * @context: Pointer to node whose descendants are to be considered
+ * @level: Only descendants on the specified level will be counted
+ *
+ * RETURNS:
+ * Number of descendants on the specified level
+ */
+int toptree_count(struct toptree *context, int level)
+{
+	struct toptree *cur;
+	int cnt = 0;
+
+	toptree_for_each(cur, context, level)
+		cnt++;
+	return cnt;
+}
diff --git a/arch/s390/numa/toptree.h b/arch/s390/numa/toptree.h
new file mode 100644
index 0000000..bdf5020
--- /dev/null
+++ b/arch/s390/numa/toptree.h
@@ -0,0 +1,60 @@
+/*
+ * NUMA support for s390
+ *
+ * A tree structure used for machine topology mangling
+ *
+ * Copyright IBM Corp. 2015
+ */
+#ifndef S390_TOPTREE_H
+#define S390_TOPTREE_H
+
+#include <linux/cpumask.h>
+#include <linux/list.h>
+
+struct toptree {
+	int level;
+	int id;
+	cpumask_t mask;
+	struct toptree *parent;
+	struct list_head sibling;
+	struct list_head children;
+};
+
+struct toptree *toptree_alloc(int level, int id);
+void toptree_free(struct toptree *cand);
+void toptree_update_mask(struct toptree *cand);
+void toptree_unify(struct toptree *cand);
+struct toptree *toptree_get_child(struct toptree *cand, int id);
+void toptree_move(struct toptree *cand, struct toptree *target);
+int toptree_count(struct toptree *context, int level);
+
+struct toptree *toptree_first(struct toptree *context, int level);
+struct toptree *toptree_next(struct toptree *cur, struct toptree *context,
+			     int level);
+
+#define toptree_for_each_child(child, ptree)				\
+	list_for_each_entry(child,  &ptree->children, sibling)
+
+#define toptree_for_each_child_safe(child, ptmp, ptree)			\
+	list_for_each_entry_safe(child, ptmp, &ptree->children, sibling)
+
+#define toptree_is_last(ptree)					\
+	((ptree->parent == NULL) ||				\
+	 (ptree->parent->children.prev == &ptree->sibling))
+
+#define toptree_for_each(ptree, cont, ttype)		\
+	for (ptree = toptree_first(cont, ttype);	\
+	     ptree != NULL;				\
+	     ptree = toptree_next(ptree, cont, ttype))
+
+#define toptree_for_each_safe(ptree, tmp, cont, ttype)		\
+	for (ptree = toptree_first(cont, ttype),		\
+		     tmp = toptree_next(ptree, cont, ttype);	\
+	     ptree != NULL;					\
+	     ptree = tmp,					\
+		     tmp = toptree_next(ptree, cont, ttype))
+
+#define toptree_for_each_sibling(ptree, start)			\
+	toptree_for_each(ptree, start->parent, start->level)
+
+#endif /* S390_TOPTREE_H */
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 598f023..7ef12a3 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -76,11 +76,6 @@
 
 static struct kmem_cache *zdev_fmb_cache;
 
-struct zpci_dev *get_zdev(struct pci_dev *pdev)
-{
-	return (struct zpci_dev *) pdev->sysdata;
-}
-
 struct zpci_dev *get_zdev_by_fid(u32 fid)
 {
 	struct zpci_dev *tmp, *zdev = NULL;
@@ -269,7 +264,7 @@
 			      unsigned long offset,
 			      unsigned long max)
 {
-	struct zpci_dev *zdev =	get_zdev(pdev);
+	struct zpci_dev *zdev =	to_zpci(pdev);
 	u64 addr;
 	int idx;
 
@@ -385,7 +380,7 @@
 
 int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
 {
-	struct zpci_dev *zdev = get_zdev(pdev);
+	struct zpci_dev *zdev = to_zpci(pdev);
 	unsigned int hwirq, msi_vecs;
 	unsigned long aisb;
 	struct msi_desc *msi;
@@ -414,7 +409,7 @@
 
 	/* Request MSI interrupts */
 	hwirq = 0;
-	list_for_each_entry(msi, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(msi, pdev) {
 		rc = -EIO;
 		irq = irq_alloc_desc(0);	/* Alloc irq on node 0 */
 		if (irq < 0)
@@ -440,7 +435,7 @@
 	return (msi_vecs == nvec) ? 0 : msi_vecs;
 
 out_msi:
-	list_for_each_entry(msi, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(msi, pdev) {
 		if (hwirq-- == 0)
 			break;
 		irq_set_msi_desc(msi->irq, NULL);
@@ -460,7 +455,7 @@
 
 void arch_teardown_msi_irqs(struct pci_dev *pdev)
 {
-	struct zpci_dev *zdev = get_zdev(pdev);
+	struct zpci_dev *zdev = to_zpci(pdev);
 	struct msi_desc *msi;
 	int rc;
 
@@ -470,7 +465,7 @@
 		return;
 
 	/* Release MSI interrupts */
-	list_for_each_entry(msi, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(msi, pdev) {
 		if (msi->msi_attrib.is_msix)
 			__pci_msix_desc_mask_irq(msi, 1);
 		else
@@ -637,7 +632,7 @@
 	int i;
 
 	for (i = 0; i < PCI_BAR_COUNT; i++) {
-		if (!zdev->bars[i].size)
+		if (!zdev->bars[i].size || !zdev->bars[i].res)
 			continue;
 
 		zpci_free_iomap(zdev, zdev->bars[i].map_idx);
@@ -648,7 +643,7 @@
 
 int pcibios_add_device(struct pci_dev *pdev)
 {
-	struct zpci_dev *zdev = get_zdev(pdev);
+	struct zpci_dev *zdev = to_zpci(pdev);
 	struct resource *res;
 	int i;
 
@@ -673,7 +668,7 @@
 
 int pcibios_enable_device(struct pci_dev *pdev, int mask)
 {
-	struct zpci_dev *zdev = get_zdev(pdev);
+	struct zpci_dev *zdev = to_zpci(pdev);
 
 	zdev->pdev = pdev;
 	zpci_debug_init_device(zdev);
@@ -684,7 +679,7 @@
 
 void pcibios_disable_device(struct pci_dev *pdev)
 {
-	struct zpci_dev *zdev = get_zdev(pdev);
+	struct zpci_dev *zdev = to_zpci(pdev);
 
 	zpci_fmb_disable_device(zdev);
 	zpci_debug_exit_device(zdev);
@@ -695,7 +690,7 @@
 static int zpci_restore(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
-	struct zpci_dev *zdev = get_zdev(pdev);
+	struct zpci_dev *zdev = to_zpci(pdev);
 	int ret = 0;
 
 	if (zdev->state != ZPCI_FN_STATE_ONLINE)
@@ -717,7 +712,7 @@
 static int zpci_freeze(struct device *dev)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
-	struct zpci_dev *zdev = get_zdev(pdev);
+	struct zpci_dev *zdev = to_zpci(pdev);
 
 	if (zdev->state != ZPCI_FN_STATE_ONLINE)
 		return 0;
@@ -777,17 +772,22 @@
 
 	ret = zpci_setup_bus_resources(zdev, &resources);
 	if (ret)
-		return ret;
+		goto error;
 
 	zdev->bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
 				      zdev, &resources);
 	if (!zdev->bus) {
-		zpci_cleanup_bus_resources(zdev);
-		return -EIO;
+		ret = -EIO;
+		goto error;
 	}
 	zdev->bus->max_bus_speed = zdev->max_bus_speed;
 	pci_bus_add_devices(zdev->bus);
 	return 0;
+
+error:
+	zpci_cleanup_bus_resources(zdev);
+	pci_free_resource_list(&resources);
+	return ret;
 }
 
 int zpci_enable_device(struct zpci_dev *zdev)
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 6fd8d58..42b7658 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -277,7 +277,7 @@
 				     enum dma_data_direction direction,
 				     struct dma_attrs *attrs)
 {
-	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 	unsigned long nr_pages, iommu_page_index;
 	unsigned long pa = page_to_phys(page) + offset;
 	int flags = ZPCI_PTE_VALID;
@@ -316,7 +316,7 @@
 				 size_t size, enum dma_data_direction direction,
 				 struct dma_attrs *attrs)
 {
-	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 	unsigned long iommu_page_index;
 	int npages;
 
@@ -337,7 +337,7 @@
 			    dma_addr_t *dma_handle, gfp_t flag,
 			    struct dma_attrs *attrs)
 {
-	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 	struct page *page;
 	unsigned long pa;
 	dma_addr_t map;
@@ -367,7 +367,7 @@
 			  void *pa, dma_addr_t dma_handle,
 			  struct dma_attrs *attrs)
 {
-	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 
 	size = PAGE_ALIGN(size);
 	atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index ed2394d..369a3e0 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -46,15 +46,13 @@
 static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
 {
 	struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+	struct pci_dev *pdev = zdev ? zdev->pdev : NULL;
 
 	zpci_err("error CCDF:\n");
 	zpci_err_hex(ccdf, sizeof(*ccdf));
 
-	if (!zdev)
-		return;
-
 	pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
-	       pci_name(zdev->pdev), ccdf->pec, ccdf->fid);
+	       pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
 }
 
 void zpci_event_error(void *data)
@@ -89,7 +87,9 @@
 		ret = zpci_enable_device(zdev);
 		if (ret)
 			break;
+		pci_lock_rescan_remove();
 		pci_rescan_bus(zdev->bus);
+		pci_unlock_rescan_remove();
 		break;
 	case 0x0302: /* Reserved -> Standby */
 		if (!zdev)
@@ -97,7 +97,7 @@
 		break;
 	case 0x0303: /* Deconfiguration requested */
 		if (pdev)
-			pci_stop_and_remove_bus_device(pdev);
+			pci_stop_and_remove_bus_device_locked(pdev);
 
 		ret = zpci_disable_device(zdev);
 		if (ret)
@@ -114,7 +114,7 @@
 			/* Give the driver a hint that the function is
 			 * already unusable. */
 			pdev->error_state = pci_channel_io_perm_failure;
-			pci_stop_and_remove_bus_device(pdev);
+			pci_stop_and_remove_bus_device_locked(pdev);
 		}
 
 		zdev->fh = ccdf->fh;
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index 85267c0..dcc2634 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -8,10 +8,23 @@
 #include <linux/errno.h>
 #include <linux/delay.h>
 #include <asm/pci_insn.h>
+#include <asm/pci_debug.h>
 #include <asm/processor.h>
 
 #define ZPCI_INSN_BUSY_DELAY	1	/* 1 microsecond */
 
+static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset)
+{
+	struct {
+		u8 cc;
+		u8 status;
+		u64 req;
+		u64 offset;
+	} data = {cc, status, req, offset};
+
+	zpci_err_hex(&data, sizeof(data));
+}
+
 /* Modify PCI Function Controls */
 static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
 {
@@ -38,8 +51,8 @@
 	} while (cc == 2);
 
 	if (cc)
-		printk_once(KERN_ERR "%s: error cc: %d  status: %d\n",
-			     __func__, cc, status);
+		zpci_err_insn(cc, status, req, 0);
+
 	return (cc) ? -EIO : 0;
 }
 
@@ -72,8 +85,8 @@
 	} while (cc == 2);
 
 	if (cc)
-		printk_once(KERN_ERR "%s: error cc: %d  status: %d  dma_addr: %Lx  size: %Lx\n",
-			    __func__, cc, status, addr, range);
+		zpci_err_insn(cc, status, addr, range);
+
 	return (cc) ? -EIO : 0;
 }
 
@@ -121,8 +134,8 @@
 	} while (cc == 2);
 
 	if (cc)
-		printk_once(KERN_ERR "%s: error cc: %d  status: %d  req: %Lx  offset: %Lx\n",
-			    __func__, cc, status, req, offset);
+		zpci_err_insn(cc, status, req, offset);
+
 	return (cc > 0) ? -EIO : cc;
 }
 EXPORT_SYMBOL_GPL(zpci_load);
@@ -159,8 +172,8 @@
 	} while (cc == 2);
 
 	if (cc)
-		printk_once(KERN_ERR "%s: error cc: %d  status: %d  req: %Lx  offset: %Lx\n",
-			__func__, cc, status, req, offset);
+		zpci_err_insn(cc, status, req, offset);
+
 	return (cc > 0) ? -EIO : cc;
 }
 EXPORT_SYMBOL_GPL(zpci_store);
@@ -195,8 +208,8 @@
 	} while (cc == 2);
 
 	if (cc)
-		printk_once(KERN_ERR "%s: error cc: %d  status: %d  req: %Lx  offset: %Lx\n",
-			    __func__, cc, status, req, offset);
+		zpci_err_insn(cc, status, req, offset);
+
 	return (cc > 0) ? -EIO : cc;
 }
 EXPORT_SYMBOL_GPL(zpci_store_block);
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index fa3ce89..f37a580 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -16,7 +16,7 @@
 static ssize_t name##_show(struct device *dev,				\
 			   struct device_attribute *attr, char *buf)	\
 {									\
-	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));		\
+	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));		\
 									\
 	return sprintf(buf, fmt, zdev->member);				\
 }									\
@@ -38,23 +38,30 @@
 			     const char *buf, size_t count)
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
-	struct zpci_dev *zdev = get_zdev(pdev);
+	struct zpci_dev *zdev = to_zpci(pdev);
 	int ret;
 
 	if (!device_remove_file_self(dev, attr))
 		return count;
 
+	pci_lock_rescan_remove();
 	pci_stop_and_remove_bus_device(pdev);
 	ret = zpci_disable_device(zdev);
 	if (ret)
-		return ret;
+		goto error;
 
 	ret = zpci_enable_device(zdev);
 	if (ret)
-		return ret;
+		goto error;
 
 	pci_rescan_bus(zdev->bus);
+	pci_unlock_rescan_remove();
+
 	return count;
+
+error:
+	pci_unlock_rescan_remove();
+	return ret;
 }
 static DEVICE_ATTR_WO(recover);
 
@@ -64,7 +71,7 @@
 {
 	struct device *dev = kobj_to_dev(kobj);
 	struct pci_dev *pdev = to_pci_dev(dev);
-	struct zpci_dev *zdev = get_zdev(pdev);
+	struct zpci_dev *zdev = to_zpci(pdev);
 
 	return memory_read_from_buffer(buf, count, &off, zdev->util_str,
 				       sizeof(zdev->util_str));
diff --git a/arch/score/include/asm/switch_to.h b/arch/score/include/asm/switch_to.h
index 031756b..fda3f83 100644
--- a/arch/score/include/asm/switch_to.h
+++ b/arch/score/include/asm/switch_to.h
@@ -8,6 +8,4 @@
 	(last) = resume(prev, next, task_thread_info(next));	\
 } while (0)
 
-#define finish_arch_switch(prev)	do {} while (0)
-
 #endif /* _ASM_SCORE_SWITCH_TO_H */
diff --git a/arch/score/kernel/time.c b/arch/score/kernel/time.c
index 24770cd..679b8d7 100644
--- a/arch/score/kernel/time.c
+++ b/arch/score/kernel/time.c
@@ -55,31 +55,20 @@
 	return 0;
 }
 
-static void score_timer_set_mode(enum clock_event_mode mode,
-		struct clock_event_device *evdev)
+static int score_timer_set_periodic(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		outl((TMR_M_PERIODIC | TMR_IE_ENABLE), P_TIMER0_CTRL);
-		outl(SYSTEM_CLOCK/HZ, P_TIMER0_PRELOAD);
-		outl(inl(P_TIMER0_CTRL) | TMR_ENABLE, P_TIMER0_CTRL);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_RESUME:
-	case CLOCK_EVT_MODE_UNUSED:
-		break;
-	default:
-		BUG();
-	}
+	outl((TMR_M_PERIODIC | TMR_IE_ENABLE), P_TIMER0_CTRL);
+	outl(SYSTEM_CLOCK / HZ, P_TIMER0_PRELOAD);
+	outl(inl(P_TIMER0_CTRL) | TMR_ENABLE, P_TIMER0_CTRL);
+	return 0;
 }
 
 static struct clock_event_device score_clockevent = {
-	.name		= "score_clockevent",
-	.features	= CLOCK_EVT_FEAT_PERIODIC,
-	.shift		= 16,
-	.set_next_event	= score_timer_set_next_event,
-	.set_mode	= score_timer_set_mode,
+	.name			= "score_clockevent",
+	.features		= CLOCK_EVT_FEAT_PERIODIC,
+	.shift			= 16,
+	.set_next_event		= score_timer_set_next_event,
+	.set_state_periodic	= score_timer_set_periodic,
 };
 
 void __init time_init(void)
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c
index 1087dba..6f97a8f 100644
--- a/arch/sh/boards/mach-se/7343/irq.c
+++ b/arch/sh/boards/mach-se/7343/irq.c
@@ -31,7 +31,7 @@
 
 static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_data *data = irq_get_irq_data(irq);
+	struct irq_data *data = irq_desc_get_irq_data(desc);
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
 	unsigned long mask;
 	int bit;
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c
index 00e6992..60aebd1 100644
--- a/arch/sh/boards/mach-se/7722/irq.c
+++ b/arch/sh/boards/mach-se/7722/irq.c
@@ -30,7 +30,7 @@
 
 static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_data *data = irq_get_irq_data(irq);
+	struct irq_data *data = irq_desc_get_irq_data(desc);
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
 	unsigned long mask;
 	int bit;
diff --git a/arch/sh/boards/mach-se/7724/irq.c b/arch/sh/boards/mach-se/7724/irq.c
index 5d1d3ec..9f20338 100644
--- a/arch/sh/boards/mach-se/7724/irq.c
+++ b/arch/sh/boards/mach-se/7724/irq.c
@@ -92,8 +92,9 @@
 	.irq_unmask	= enable_se7724_irq,
 };
 
-static void se7724_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void se7724_irq_demux(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct fpga_irq set = get_fpga_irq(irq);
 	unsigned short intv = __raw_readw(set.sraddr);
 	unsigned int ext_irq = set.base;
diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c
index f035a7a..24555c3 100644
--- a/arch/sh/boards/mach-x3proto/gpio.c
+++ b/arch/sh/boards/mach-x3proto/gpio.c
@@ -62,7 +62,7 @@
 
 static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_data *data = irq_get_irq_data(irq);
+	struct irq_data *data = irq_desc_get_irq_data(desc);
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
 	unsigned long mask;
 	int pin;
diff --git a/arch/sh/drivers/pci/pci-sh4.h b/arch/sh/drivers/pci/pci-sh4.h
index cbf763b..0288efc 100644
--- a/arch/sh/drivers/pci/pci-sh4.h
+++ b/arch/sh/drivers/pci/pci-sh4.h
@@ -11,14 +11,6 @@
 
 #include <asm/io.h>
 
-/* startup values */
-#define PCI_PROBE_BIOS		1
-#define PCI_PROBE_CONF1		2
-#define PCI_PROBE_CONF2		4
-#define PCI_NO_CHECKS		0x400
-#define PCI_ASSIGN_ROMS		0x1000
-#define PCI_BIOS_IRQ_SCAN	0x2000
-
 #define SH4_PCICR		0x100		/* PCI Control Register */
   #define SH4_PCICR_PREFIX	  0xA5000000	/* CR prefix for write */
   #define SH4_PCICR_FTO		  0x00000400	/* TRDY/IRDY Enable */
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index 97a5fda..b94df40 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -48,47 +48,12 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	int tmp;
-	unsigned int _mask = ~mask;
-
-	__asm__ __volatile__ (
-		"   .align 2              \n\t"
-		"   mova    1f,   r0      \n\t" /* r0 = end point */
-		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
-		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
-		"   mov.l  @%1,   %0      \n\t" /* load  old value */
-		"   and     %2,   %0      \n\t" /* add */
-		"   mov.l   %0,   @%1     \n\t" /* store new value */
-		"1: mov     r1,   r15     \n\t" /* LOGOUT */
-		: "=&r" (tmp),
-		  "+r"  (v)
-		: "r"   (_mask)
-		: "memory" , "r0", "r1");
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	int tmp;
-
-	__asm__ __volatile__ (
-		"   .align 2              \n\t"
-		"   mova    1f,   r0      \n\t" /* r0 = end point */
-		"   mov    r15,   r1      \n\t" /* r1 = saved sp */
-		"   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */
-		"   mov.l  @%1,   %0      \n\t" /* load  old value */
-		"   or      %2,   %0      \n\t" /* or */
-		"   mov.l   %0,   @%1     \n\t" /* store new value */
-		"1: mov     r1,   r15     \n\t" /* LOGOUT */
-		: "=&r" (tmp),
-		  "+r"  (v)
-		: "r"   (mask)
-		: "memory" , "r0", "r1");
-}
-
 #endif /* __ASM_SH_ATOMIC_GRB_H */
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index 61d1075..23fcdad 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -37,27 +37,12 @@
 
 ATOMIC_OPS(add, +=)
 ATOMIC_OPS(sub, -=)
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long flags;
-
-	raw_local_irq_save(flags);
-	v->counter &= ~mask;
-	raw_local_irq_restore(flags);
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long flags;
-
-	raw_local_irq_save(flags);
-	v->counter |= mask;
-	raw_local_irq_restore(flags);
-}
-
 #endif /* __ASM_SH_ATOMIC_IRQ_H */
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 8575dcc..33d34b1 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -52,37 +52,12 @@
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long tmp;
-
-	__asm__ __volatile__ (
-"1:	movli.l @%2, %0		! atomic_clear_mask	\n"
-"	and	%1, %0					\n"
-"	movco.l	%0, @%2					\n"
-"	bf	1b					\n"
-	: "=&z" (tmp)
-	: "r" (~mask), "r" (&v->counter)
-	: "t");
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-	unsigned long tmp;
-
-	__asm__ __volatile__ (
-"1:	movli.l @%2, %0		! atomic_set_mask	\n"
-"	or	%1, %0					\n"
-"	movco.l	%0, @%2					\n"
-"	bf	1b					\n"
-	: "=&z" (tmp)
-	: "r" (mask), "r" (&v->counter)
-	: "t");
-}
-
 #endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 728c4c5..93ec906 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -368,6 +368,7 @@
 #endif
 
 #define ioremap_nocache	ioremap
+#define ioremap_uc	ioremap
 #define iounmap		__iounmap
 
 /*
diff --git a/arch/sh/include/asm/switch_to_32.h b/arch/sh/include/asm/switch_to_32.h
index 0c06551..7661b4ba 100644
--- a/arch/sh/include/asm/switch_to_32.h
+++ b/arch/sh/include/asm/switch_to_32.h
@@ -78,6 +78,8 @@
 								\
 	if (is_dsp_enabled(prev))				\
 		__save_dsp(prev);				\
+	if (is_dsp_enabled(next))				\
+		__restore_dsp(next);				\
 								\
 	__ts1 = (u32 *)&prev->thread.sp;			\
 	__ts2 = (u32 *)&prev->thread.pc;			\
@@ -125,10 +127,4 @@
 	last = __last;						\
 } while (0)
 
-#define finish_arch_switch(prev)				\
-do {								\
-	if (is_dsp_enabled(prev))				\
-		__restore_dsp(prev);				\
-} while (0)
-
 #endif /* __ASM_SH_SWITCH_TO_32_H */
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index 0a47bd3..4ca78ed 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -355,13 +355,12 @@
 	return error;
 }
 
-static int sq_dev_remove(struct device *dev, struct subsys_interface *sif)
+static void sq_dev_remove(struct device *dev, struct subsys_interface *sif)
 {
 	unsigned int cpu = dev->id;
 	struct kobject *kobj = sq_kobject[cpu];
 
 	kobject_put(kobj);
-	return 0;
 }
 
 static struct subsys_interface sq_interface = {
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index eb10ff8..6c0378c 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -227,16 +227,17 @@
 	for_each_active_irq(irq) {
 		struct irq_data *data = irq_get_irq_data(irq);
 
-		if (data->node == cpu) {
-			unsigned int newcpu = cpumask_any_and(data->affinity,
+		if (irq_data_get_node(data) == cpu) {
+			struct cpumask *mask = irq_data_get_affinity_mask(data);
+			unsigned int newcpu = cpumask_any_and(mask,
 							      cpu_online_mask);
 			if (newcpu >= nr_cpu_ids) {
 				pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
 						    irq, cpu);
 
-				cpumask_setall(data->affinity);
+				cpumask_setall(mask);
 			}
-			irq_set_affinity(irq, data->affinity);
+			irq_set_affinity(irq, mask);
 		}
 	}
 }
diff --git a/arch/sh/kernel/localtimer.c b/arch/sh/kernel/localtimer.c
index b880a7e..cbb7d46 100644
--- a/arch/sh/kernel/localtimer.c
+++ b/arch/sh/kernel/localtimer.c
@@ -39,11 +39,6 @@
 	irq_exit();
 }
 
-static void dummy_timer_set_mode(enum clock_event_mode mode,
-				 struct clock_event_device *clk)
-{
-}
-
 void local_timer_setup(unsigned int cpu)
 {
 	struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
@@ -54,7 +49,6 @@
 				  CLOCK_EVT_FEAT_DUMMY;
 	clk->rating		= 400;
 	clk->mult		= 1;
-	clk->set_mode		= dummy_timer_set_mode;
 	clk->broadcast		= smp_timer_broadcast;
 	clk->cpumask		= cpumask_of(cpu);
 
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 0e69b7e..7dcbebb 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -17,10 +17,12 @@
 #include <asm/barrier.h>
 #include <asm-generic/atomic64.h>
 
-
 #define ATOMIC_INIT(i)  { (i) }
 
 int atomic_add_return(int, atomic_t *);
+void atomic_and(int, atomic_t *);
+void atomic_or(int, atomic_t *);
+void atomic_xor(int, atomic_t *);
 int atomic_cmpxchg(atomic_t *, int, int);
 int atomic_xchg(atomic_t *, int);
 int __atomic_add_unless(atomic_t *, int, int);
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 4082749..917084a 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -33,6 +33,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 809941e..14a9286 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -60,12 +60,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
 	___p1;								\
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index cc9b04a..62d0354 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -7,16 +7,33 @@
 
 #define JUMP_LABEL_NOP_SIZE 4
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-		asm_volatile_goto("1:\n\t"
-			 "nop\n\t"
-			 "nop\n\t"
-			 ".pushsection __jump_table,  \"aw\"\n\t"
-			 ".align 4\n\t"
-			 ".word 1b, %l[l_yes], %c0\n\t"
-			 ".popsection \n\t"
-			 : :  "i" (key) : : l_yes);
+	asm_volatile_goto("1:\n\t"
+		 "nop\n\t"
+		 "nop\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 ".align 4\n\t"
+		 ".word 1b, %l[l_yes], %c0\n\t"
+		 ".popsection \n\t"
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:\n\t"
+		 "b %l[l_yes]\n\t"
+		 "nop\n\t"
+		 ".pushsection __jump_table,  \"aw\"\n\t"
+		 ".align 4\n\t"
+		 ".word 1b, %l[l_yes], %c0\n\t"
+		 ".popsection \n\t"
+		 : :  "i" (&((char *)key)[branch]) : : l_yes);
+
 	return false;
 l_yes:
 	return true;
diff --git a/arch/sparc/include/uapi/asm/pstate.h b/arch/sparc/include/uapi/asm/pstate.h
index 4b6b998..cf832e1 100644
--- a/arch/sparc/include/uapi/asm/pstate.h
+++ b/arch/sparc/include/uapi/asm/pstate.h
@@ -88,7 +88,7 @@
 #define VERS_MAXTL	_AC(0x000000000000ff00,UL) /* Max Trap Level.	*/
 #define VERS_MAXWIN	_AC(0x000000000000001f,UL) /* Max RegWindow Idx.*/
 
-/* Compatability Feature Register (%asr26), SPARC-T4 and later  */
+/* Compatibility Feature Register (%asr26), SPARC-T4 and later  */
 #define CFR_AES		_AC(0x0000000000000001,UL) /* Supports AES opcodes     */
 #define CFR_DES		_AC(0x0000000000000002,UL) /* Supports DES opcodes     */
 #define CFR_KASUMI	_AC(0x0000000000000004,UL) /* Supports KASUMI opcodes  */
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 4033c23..e22416c 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -210,21 +210,21 @@
 
 static inline unsigned int irq_data_to_handle(struct irq_data *data)
 {
-	struct irq_handler_data *ihd = data->handler_data;
+	struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
 
 	return ihd->dev_handle;
 }
 
 static inline unsigned int irq_data_to_ino(struct irq_data *data)
 {
-	struct irq_handler_data *ihd = data->handler_data;
+	struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
 
 	return ihd->dev_ino;
 }
 
 static inline unsigned long irq_data_to_sysino(struct irq_data *data)
 {
-	struct irq_handler_data *ihd = data->handler_data;
+	struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data);
 
 	return ihd->sysino;
 }
@@ -370,13 +370,15 @@
 
 static void sun4u_irq_enable(struct irq_data *data)
 {
-	struct irq_handler_data *handler_data = data->handler_data;
+	struct irq_handler_data *handler_data;
 
+	handler_data = irq_data_get_irq_handler_data(data);
 	if (likely(handler_data)) {
 		unsigned long cpuid, imap, val;
 		unsigned int tid;
 
-		cpuid = irq_choose_cpu(data->irq, data->affinity);
+		cpuid = irq_choose_cpu(data->irq,
+				       irq_data_get_affinity_mask(data));
 		imap = handler_data->imap;
 
 		tid = sun4u_compute_tid(imap, cpuid);
@@ -393,8 +395,9 @@
 static int sun4u_set_affinity(struct irq_data *data,
 			       const struct cpumask *mask, bool force)
 {
-	struct irq_handler_data *handler_data = data->handler_data;
+	struct irq_handler_data *handler_data;
 
+	handler_data = irq_data_get_irq_handler_data(data);
 	if (likely(handler_data)) {
 		unsigned long cpuid, imap, val;
 		unsigned int tid;
@@ -438,15 +441,17 @@
 
 static void sun4u_irq_eoi(struct irq_data *data)
 {
-	struct irq_handler_data *handler_data = data->handler_data;
+	struct irq_handler_data *handler_data;
 
+	handler_data = irq_data_get_irq_handler_data(data);
 	if (likely(handler_data))
 		upa_writeq(ICLR_IDLE, handler_data->iclr);
 }
 
 static void sun4v_irq_enable(struct irq_data *data)
 {
-	unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
+	unsigned long cpuid = irq_choose_cpu(data->irq,
+					     irq_data_get_affinity_mask(data));
 	unsigned int ino = irq_data_to_sysino(data);
 	int err;
 
@@ -508,7 +513,7 @@
 	unsigned long cpuid;
 	int err;
 
-	cpuid = irq_choose_cpu(data->irq, data->affinity);
+	cpuid = irq_choose_cpu(data->irq, irq_data_get_affinity_mask(data));
 
 	err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
 	if (err != HV_EOK)
@@ -881,8 +886,8 @@
 		if (desc->action && !irqd_is_per_cpu(data)) {
 			if (data->chip->irq_set_affinity)
 				data->chip->irq_set_affinity(data,
-							     data->affinity,
-							     false);
+					irq_data_get_affinity_mask(data),
+					false);
 		}
 		raw_spin_unlock_irqrestore(&desc->lock, flags);
 	}
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index 48565c1..59bbeff 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -16,7 +16,7 @@
 	u32 val;
 	u32 *insn = (u32 *) (unsigned long) entry->code;
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		s32 off = (s32)entry->target - (s32)entry->code;
 
 #ifdef CONFIG_SPARC64
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 9bbb8f2..0299f05 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -126,7 +126,7 @@
 	int oldcpu, newcpu;
 
 	mask = (unsigned long)data->chip_data;
-	oldcpu = irq_choose_cpu(data->affinity);
+	oldcpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
 	newcpu = irq_choose_cpu(dest);
 
 	if (oldcpu == newcpu)
@@ -149,7 +149,7 @@
 	int cpu;
 
 	mask = (unsigned long)data->chip_data;
-	cpu = irq_choose_cpu(data->affinity);
+	cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
 	spin_lock_irqsave(&leon_irq_lock, flags);
 	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
 	LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask));
@@ -162,7 +162,7 @@
 	int cpu;
 
 	mask = (unsigned long)data->chip_data;
-	cpu = irq_choose_cpu(data->affinity);
+	cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
 	spin_lock_irqsave(&leon_irq_lock, flags);
 	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
 	LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask));
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index c928bc6..3a14a35 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -249,7 +249,6 @@
 					 struct pci_bus *bus, int devfn)
 {
 	struct dev_archdata *sd;
-	struct pci_slot *slot;
 	struct platform_device *op;
 	struct pci_dev *dev;
 	const char *type;
@@ -290,10 +289,7 @@
 	dev->multifunction = 0;		/* maybe a lie? */
 	set_pcie_port_type(dev);
 
-	list_for_each_entry(slot, &dev->bus->slots, list)
-		if (PCI_SLOT(dev->devfn) == slot->number)
-			dev->slot = slot;
-
+	pci_dev_assign_slot(dev);
 	dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
 	dev->device = of_getintprop_default(node, "device-id", 0xffff);
 	dev->subsystem_vendor =
@@ -918,7 +914,7 @@
 void arch_teardown_msi_irq(unsigned int irq)
 {
 	struct msi_desc *entry = irq_get_msi_desc(irq);
-	struct pci_dev *pdev = entry->dev;
+	struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
 	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
 
 	if (pbm->teardown_msi_irq)
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 50e7b62..c5113c7 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -333,11 +333,11 @@
 	childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ);
 
 	/*
-	 * A new process must start with interrupts closed in 2.5,
-	 * because this is how Mingo's scheduler works (see schedule_tail
-	 * and finish_arch_switch). If we do not do it, a timer interrupt hits
-	 * before we unlock, attempts to re-take the rq->lock, and then we die.
-	 * Thus, kpsr|=PSR_PIL.
+	 * A new process must start with interrupts disabled, see schedule_tail()
+	 * and finish_task_switch(). (If we do not do it and if a timer interrupt
+	 * hits before we unlock and attempts to take the rq->lock, we deadlock.)
+	 *
+	 * Thus, kpsr |= PSR_PIL.
 	 */
 	ti->ksp = (unsigned long) new_stack;
 	p->thread.kregs = childregs;
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index a1bb267..a87d0e4 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -188,7 +188,7 @@
 
 static void sun4d_mask_irq(struct irq_data *data)
 {
-	struct sun4d_handler_data *handler_data = data->handler_data;
+	struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data);
 	unsigned int real_irq;
 #ifdef CONFIG_SMP
 	int cpuid = handler_data->cpuid;
@@ -206,7 +206,7 @@
 
 static void sun4d_unmask_irq(struct irq_data *data)
 {
-	struct sun4d_handler_data *handler_data = data->handler_data;
+	struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data);
 	unsigned int real_irq;
 #ifdef CONFIG_SMP
 	int cpuid = handler_data->cpuid;
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
index 8bb3b3f..da737c7 100644
--- a/arch/sparc/kernel/sun4m_irq.c
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -188,9 +188,10 @@
 
 static void sun4m_mask_irq(struct irq_data *data)
 {
-	struct sun4m_handler_data *handler_data = data->handler_data;
+	struct sun4m_handler_data *handler_data;
 	int cpu = smp_processor_id();
 
+	handler_data = irq_data_get_irq_handler_data(data);
 	if (handler_data->mask) {
 		unsigned long flags;
 
@@ -206,9 +207,10 @@
 
 static void sun4m_unmask_irq(struct irq_data *data)
 {
-	struct sun4m_handler_data *handler_data = data->handler_data;
+	struct sun4m_handler_data *handler_data;
 	int cpu = smp_processor_id();
 
+	handler_data = irq_data_get_irq_handler_data(data);
 	if (handler_data->mask) {
 		unsigned long flags;
 
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index d3408e7..278c40a 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -247,7 +247,7 @@
 
 	ce = &per_cpu(sparc32_clockevent, cpu);
 
-	if (ce->mode & CLOCK_EVT_MODE_PERIODIC)
+	if (clockevent_state_periodic(ce))
 		sun4m_clear_profile_irq(cpu);
 	else
 		sparc_config.load_profile_irq(cpu, 0); /* Is this needless? */
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index c9692f3..1affabc 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -101,21 +101,18 @@
 	return IRQ_HANDLED;
 }
 
-static void timer_ce_set_mode(enum clock_event_mode mode,
-			      struct clock_event_device *evt)
+static int timer_ce_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-		case CLOCK_EVT_MODE_PERIODIC:
-		case CLOCK_EVT_MODE_RESUME:
-			timer_ce_enabled = 1;
-			break;
-		case CLOCK_EVT_MODE_SHUTDOWN:
-			timer_ce_enabled = 0;
-			break;
-		default:
-			break;
-	}
+	timer_ce_enabled = 0;
 	smp_mb();
+	return 0;
+}
+
+static int timer_ce_set_periodic(struct clock_event_device *evt)
+{
+	timer_ce_enabled = 1;
+	smp_mb();
+	return 0;
 }
 
 static __init void setup_timer_ce(void)
@@ -127,7 +124,9 @@
 	ce->name     = "timer_ce";
 	ce->rating   = 100;
 	ce->features = CLOCK_EVT_FEAT_PERIODIC;
-	ce->set_mode = timer_ce_set_mode;
+	ce->set_state_shutdown = timer_ce_shutdown;
+	ce->set_state_periodic = timer_ce_set_periodic;
+	ce->tick_resume = timer_ce_set_periodic;
 	ce->cpumask  = cpu_possible_mask;
 	ce->shift    = 32;
 	ce->mult     = div_sc(sparc_config.clock_rate, NSEC_PER_SEC,
@@ -183,24 +182,20 @@
 }
 
 #ifdef CONFIG_SMP
-static void percpu_ce_setup(enum clock_event_mode mode,
-			struct clock_event_device *evt)
+static int percpu_ce_shutdown(struct clock_event_device *evt)
 {
 	int cpu = cpumask_first(evt->cpumask);
 
-	switch (mode) {
-		case CLOCK_EVT_MODE_PERIODIC:
-			sparc_config.load_profile_irq(cpu,
-						      SBUS_CLOCK_RATE / HZ);
-			break;
-		case CLOCK_EVT_MODE_ONESHOT:
-		case CLOCK_EVT_MODE_SHUTDOWN:
-		case CLOCK_EVT_MODE_UNUSED:
-			sparc_config.load_profile_irq(cpu, 0);
-			break;
-		default:
-			break;
-	}
+	sparc_config.load_profile_irq(cpu, 0);
+	return 0;
+}
+
+static int percpu_ce_set_periodic(struct clock_event_device *evt)
+{
+	int cpu = cpumask_first(evt->cpumask);
+
+	sparc_config.load_profile_irq(cpu, SBUS_CLOCK_RATE / HZ);
+	return 0;
 }
 
 static int percpu_ce_set_next_event(unsigned long delta,
@@ -224,7 +219,9 @@
 	ce->name           = "percpu_ce";
 	ce->rating         = 200;
 	ce->features       = features;
-	ce->set_mode       = percpu_ce_setup;
+	ce->set_state_shutdown = percpu_ce_shutdown;
+	ce->set_state_periodic = percpu_ce_set_periodic;
+	ce->set_state_oneshot = percpu_ce_shutdown;
 	ce->set_next_event = percpu_ce_set_next_event;
 	ce->cpumask        = cpumask_of(cpu);
 	ce->shift          = 32;
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 2e6035c..c69b21e 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -674,32 +674,19 @@
 	return tick_ops->add_compare(delta) ? -ETIME : 0;
 }
 
-static void sparc64_timer_setup(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int sparc64_timer_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		tick_ops->disable_irq();
-		break;
-
-	case CLOCK_EVT_MODE_PERIODIC:
-	case CLOCK_EVT_MODE_UNUSED:
-		WARN_ON(1);
-		break;
-	}
+	tick_ops->disable_irq();
+	return 0;
 }
 
 static struct clock_event_device sparc64_clockevent = {
-	.features	= CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode	= sparc64_timer_setup,
-	.set_next_event	= sparc64_next_event,
-	.rating		= 100,
-	.shift		= 30,
-	.irq		= -1,
+	.features		= CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_shutdown	= sparc64_timer_shutdown,
+	.set_next_event		= sparc64_next_event,
+	.rating			= 100,
+	.shift			= 30,
+	.irq			= -1,
 };
 static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
 
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 71cd65a..b9d63c0 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -27,22 +27,38 @@
 
 #endif /* SMP */
 
-#define ATOMIC_OP(op, cop)						\
+#define ATOMIC_OP_RETURN(op, c_op)					\
 int atomic_##op##_return(int i, atomic_t *v)				\
 {									\
 	int ret;							\
 	unsigned long flags;						\
 	spin_lock_irqsave(ATOMIC_HASH(v), flags);			\
 									\
-	ret = (v->counter cop i);					\
+	ret = (v->counter c_op i);					\
 									\
 	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);			\
 	return ret;							\
 }									\
 EXPORT_SYMBOL(atomic_##op##_return);
 
-ATOMIC_OP(add, +=)
+#define ATOMIC_OP(op, c_op)						\
+void atomic_##op(int i, atomic_t *v)					\
+{									\
+	unsigned long flags;						\
+	spin_lock_irqsave(ATOMIC_HASH(v), flags);			\
+									\
+	v->counter c_op i;						\
+									\
+	spin_unlock_irqrestore(ATOMIC_HASH(v), flags);			\
+}									\
+EXPORT_SYMBOL(atomic_##op);
 
+ATOMIC_OP_RETURN(add, +=)
+ATOMIC_OP(and, &=)
+ATOMIC_OP(or, |=)
+ATOMIC_OP(xor, ^=)
+
+#undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
 int atomic_xchg(atomic_t *v, int new)
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 05dac43..d6b0363 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -47,6 +47,9 @@
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
@@ -84,6 +87,9 @@
 
 ATOMIC64_OPS(add)
 ATOMIC64_OPS(sub)
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 8069ce1..8eb454c 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -111,6 +111,9 @@
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 7931eee..f8b9f71 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -807,7 +807,7 @@
 	}
 
 	if (bpf_jit_enable > 1)
-		bpf_jit_dump(flen, proglen, pass, image);
+		bpf_jit_dump(flen, proglen, pass + 1, image);
 
 	if (image) {
 		bpf_flush_icache(image, image + proglen);
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 9def1f5..2ba12d7 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -32,6 +32,7 @@
 	select EDAC_SUPPORT
 	select GENERIC_STRNCPY_FROM_USER
 	select GENERIC_STRNLEN_USER
+	select HAVE_ARCH_SECCOMP_FILTER
 
 # FIXME: investigate whether we need/want these options.
 #	select HAVE_IOREMAP_PROT
@@ -221,6 +222,22 @@
 	  If enabled, the kernel will support running TILE-Gx binaries
 	  that were built with the -m32 option.
 
+config SECCOMP
+	bool "Enable seccomp to safely compute untrusted bytecode"
+	depends on PROC_FS
+	help
+	  This kernel feature is useful for number crunching applications
+	  that may need to compute untrusted bytecode during their
+	  execution. By using pipes or other transports made available to
+	  the process as file descriptors supporting the read/write
+	  syscalls, it's possible to isolate those applications in
+	  their own address space using seccomp. Once seccomp is
+	  enabled via prctl, it cannot be disabled and the task is only
+	  allowed to execute a few safe syscalls defined by each seccomp
+	  mode.
+
+	  If unsure, say N.
+
 config SYSVIPC_COMPAT
 	def_bool y
 	depends on COMPAT && SYSVIPC
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index d8a8431..ba35c41 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -28,6 +28,7 @@
 generic-y += posix_types.h
 generic-y += preempt.h
 generic-y += resource.h
+generic-y += seccomp.h
 generic-y += sembuf.h
 generic-y += serial.h
 generic-y += shmbuf.h
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 1b109fa..d320ce2 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -34,6 +34,19 @@
 	_atomic_xchg_add(&v->counter, i);
 }
 
+#define ATOMIC_OP(op)							\
+unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	_atomic_##op((unsigned long *)&v->counter, i);			\
+}
+
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
+
 /**
  * atomic_add_return - add integer and return
  * @v: pointer of type atomic_t
@@ -113,6 +126,17 @@
 	_atomic64_xchg_add(&v->counter, i);
 }
 
+#define ATOMIC64_OP(op)						\
+long long _atomic64_##op(long long *v, long long n);		\
+static inline void atomic64_##op(long long i, atomic64_t *v)	\
+{								\
+	_atomic64_##op(&v->counter, i);				\
+}
+
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
 /**
  * atomic64_add_return - add integer and return
  * @v: pointer of type atomic64_t
@@ -225,6 +249,7 @@
 extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
 						  int *lock, int o, int n);
 extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
 extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
 extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
 extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
@@ -234,6 +259,9 @@
 					long long n);
 extern long long __atomic64_xchg_add_unless(volatile long long *p,
 					int *lock, long long o, long long n);
+extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
 
 /* Return failure from the atomic wrappers. */
 struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index 0496970..096a56d 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -58,6 +58,26 @@
 	return oldval;
 }
 
+static inline void atomic_and(int i, atomic_t *v)
+{
+	__insn_fetchand4((void *)&v->counter, i);
+}
+
+static inline void atomic_or(int i, atomic_t *v)
+{
+	__insn_fetchor4((void *)&v->counter, i);
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+	int guess, oldval = v->counter;
+	do {
+		guess = oldval;
+		__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+		oldval = __insn_cmpexch4(&v->counter, guess ^ i);
+	} while (guess != oldval);
+}
+
 /* Now the true 64-bit operations. */
 
 #define ATOMIC64_INIT(i)	{ (i) }
@@ -91,6 +111,26 @@
 	return oldval != u;
 }
 
+static inline void atomic64_and(long i, atomic64_t *v)
+{
+	__insn_fetchand((void *)&v->counter, i);
+}
+
+static inline void atomic64_or(long i, atomic64_t *v)
+{
+	__insn_fetchor((void *)&v->counter, i);
+}
+
+static inline void atomic64_xor(long i, atomic64_t *v)
+{
+	long guess, oldval = v->counter;
+	do {
+		guess = oldval;
+		__insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+		oldval = __insn_cmpexch(&v->counter, guess ^ i);
+	} while (guess != oldval);
+}
+
 #define atomic64_sub_return(i, v)	atomic64_add_return(-(i), (v))
 #define atomic64_sub(i, v)		atomic64_add(-(i), (v))
 #define atomic64_inc_return(v)		atomic64_add_return(1, (v))
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
index 41d9878..c505d77 100644
--- a/arch/tile/include/asm/elf.h
+++ b/arch/tile/include/asm/elf.h
@@ -22,6 +22,7 @@
 #include <arch/chip.h>
 
 #include <linux/ptrace.h>
+#include <linux/elf-em.h>
 #include <asm/byteorder.h>
 #include <asm/page.h>
 
@@ -30,9 +31,6 @@
 #define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 
-#define EM_TILEPRO 188
-#define EM_TILEGX  191
-
 /* Provide a nominal data structure. */
 #define ELF_NFPREG	0
 typedef double elf_fpreg_t;
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index dc61de1..322b5fe 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -55,6 +55,7 @@
 #define ioremap_nocache(physaddr, size)		ioremap(physaddr, size)
 #define ioremap_wc(physaddr, size)		ioremap(physaddr, size)
 #define ioremap_wt(physaddr, size)		ioremap(physaddr, size)
+#define ioremap_uc(physaddr, size)		ioremap(physaddr, size)
 #define ioremap_fullcache(physaddr, size)	ioremap(physaddr, size)
 
 #define mmiowb()
diff --git a/arch/tile/include/asm/switch_to.h b/arch/tile/include/asm/switch_to.h
index b8f888c..34ee727 100644
--- a/arch/tile/include/asm/switch_to.h
+++ b/arch/tile/include/asm/switch_to.h
@@ -53,15 +53,13 @@
  * Kernel threads can check to see if they need to migrate their
  * stack whenever they return from a context switch; for user
  * threads, we defer until they are returning to user-space.
+ * We defer homecache migration until the runqueue lock is released.
  */
-#define finish_arch_switch(prev) do {                                     \
-	if (unlikely((prev)->state == TASK_DEAD))                         \
-		__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT |       \
-			((prev)->pid << _SIM_CONTROL_OPERATOR_BITS));     \
+#define finish_arch_post_lock_switch() do {                               \
 	__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH |             \
 		(current->pid << _SIM_CONTROL_OPERATOR_BITS));            \
 	if (current->mm == NULL && !kstack_hash &&                        \
-	    current_thread_info()->homecache_cpu != smp_processor_id())   \
+	    current_thread_info()->homecache_cpu != raw_smp_processor_id()) \
 		homecache_migrate_kthread();                              \
 } while (0)
 
diff --git a/arch/tile/include/asm/syscall.h b/arch/tile/include/asm/syscall.h
index 9644b88..373d730 100644
--- a/arch/tile/include/asm/syscall.h
+++ b/arch/tile/include/asm/syscall.h
@@ -20,6 +20,8 @@
 
 #include <linux/sched.h>
 #include <linux/err.h>
+#include <linux/audit.h>
+#include <linux/compat.h>
 #include <arch/abi.h>
 
 /* The array of function pointers for syscalls. */
@@ -61,7 +63,15 @@
 					    struct pt_regs *regs,
 					    int error, long val)
 {
-	regs->regs[0] = (long) error ?: val;
+	if (error) {
+		/* R0 is the passed-in negative error, R1 is positive. */
+		regs->regs[0] = error;
+		regs->regs[1] = -error;
+	} else {
+		/* R1 set to zero to indicate no error. */
+		regs->regs[0] = val;
+		regs->regs[1] = 0;
+	}
 }
 
 static inline void syscall_get_arguments(struct task_struct *task,
@@ -82,4 +92,20 @@
 	memcpy(&regs[i], args, n * sizeof(args[0]));
 }
 
+/*
+ * We don't care about endianness (__AUDIT_ARCH_LE bit) here because
+ * tile has the same system calls both on little- and big- endian.
+ */
+static inline int syscall_get_arch(void)
+{
+	if (is_compat_task())
+		return AUDIT_ARCH_TILEGX32;
+
+#ifdef CONFIG_TILEGX
+	return AUDIT_ARCH_TILEGX;
+#else
+	return AUDIT_ARCH_TILEPRO;
+#endif
+}
+
 #endif	/* _ASM_TILE_SYSCALL_H */
diff --git a/arch/tile/include/uapi/arch/opcode_tilegx.h b/arch/tile/include/uapi/arch/opcode_tilegx.h
index d76ff2d..9e46eaa 100644
--- a/arch/tile/include/uapi/arch/opcode_tilegx.h
+++ b/arch/tile/include/uapi/arch/opcode_tilegx.h
@@ -830,11 +830,11 @@
   ADDX_RRR_0_OPCODE_X0 = 2,
   ADDX_RRR_0_OPCODE_X1 = 2,
   ADDX_RRR_0_OPCODE_Y0 = 0,
-  ADDX_SPECIAL_0_OPCODE_Y1 = 0,
+  ADDX_RRR_0_OPCODE_Y1 = 0,
   ADD_RRR_0_OPCODE_X0 = 3,
   ADD_RRR_0_OPCODE_X1 = 3,
   ADD_RRR_0_OPCODE_Y0 = 1,
-  ADD_SPECIAL_0_OPCODE_Y1 = 1,
+  ADD_RRR_0_OPCODE_Y1 = 1,
   ANDI_IMM8_OPCODE_X0 = 3,
   ANDI_IMM8_OPCODE_X1 = 3,
   ANDI_OPCODE_Y0 = 2,
@@ -995,6 +995,7 @@
   LD4U_ADD_IMM8_OPCODE_X1 = 12,
   LD4U_OPCODE_Y2 = 2,
   LD4U_UNARY_OPCODE_X1 = 20,
+  LDNA_ADD_IMM8_OPCODE_X1 = 21,
   LDNA_UNARY_OPCODE_X1 = 21,
   LDNT1S_ADD_IMM8_OPCODE_X1 = 13,
   LDNT1S_UNARY_OPCODE_X1 = 22,
@@ -1015,7 +1016,6 @@
   LD_UNARY_OPCODE_X1 = 29,
   LNK_UNARY_OPCODE_X1 = 30,
   LNK_UNARY_OPCODE_Y1 = 14,
-  LWNA_ADD_IMM8_OPCODE_X1 = 21,
   MFSPR_IMM8_OPCODE_X1 = 22,
   MF_UNARY_OPCODE_X1 = 31,
   MM_BF_OPCODE_X0 = 7,
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index cdbda45..fbbe2ea 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -1224,6 +1224,7 @@
 	 jal    do_syscall_trace_enter
 	}
 	FEEDBACK_REENTER(handle_syscall)
+	blz     r0, .Lsyscall_sigreturn_skip
 
 	/*
 	 * We always reload our registers from the stack at this
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 800b91d..58964d2 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -1247,6 +1247,7 @@
 	 jal    do_syscall_trace_enter
 	}
 	FEEDBACK_REENTER(handle_syscall)
+	bltz    r0, .Lsyscall_sigreturn_skip
 
 	/*
 	 * We always reload our registers from the stack at this
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index b1df847..b3f73fd 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -304,11 +304,12 @@
  * to Linux which just calls handle_level_irq() after clearing the
  * MAC INTx Assert status bit associated with this interrupt.
  */
-static void trio_handle_level_irq(unsigned int irq, struct irq_desc *desc)
+static void trio_handle_level_irq(unsigned int __irq, struct irq_desc *desc)
 {
 	struct pci_controller *controller = irq_desc_get_handler_data(desc);
 	gxio_trio_context_t *trio_context = controller->trio;
 	uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
+	unsigned int irq = irq_desc_get_irq(desc);
 	int mac = controller->mac;
 	unsigned int reg_offset;
 	uint64_t level_mask;
@@ -1442,7 +1443,7 @@
 /* MSI support starts here. */
 static unsigned int tilegx_msi_startup(struct irq_data *d)
 {
-	if (d->msi_desc)
+	if (irq_data_get_msi_desc(d))
 		pci_msi_unmask_irq(d);
 
 	return 0;
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index a452137..7d57693 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -446,6 +446,11 @@
 	hardwall_switch_tasks(prev, next);
 #endif
 
+	/* Notify the simulator of task exit. */
+	if (unlikely(prev->state == TASK_DEAD))
+		__insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT |
+			     (prev->pid << _SIM_CONTROL_OPERATOR_BITS));
+
 	/*
 	 * Switch kernel SP, PC, and callee-saved registers.
 	 * In the context of the new task, return the old task pointer
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index f84eed8..bdc126f 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -262,6 +262,9 @@
 	if (work & _TIF_NOHZ)
 		user_exit();
 
+	if (secure_computing() == -1)
+		return -1;
+
 	if (work & _TIF_SYSCALL_TRACE) {
 		if (tracehook_report_syscall_entry(regs))
 			regs->regs[TREG_SYSCALL_NR] = -1;
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
index a3ed12f..825867c 100644
--- a/arch/tile/kernel/sysfs.c
+++ b/arch/tile/kernel/sysfs.c
@@ -198,16 +198,13 @@
 	return err;
 }
 
-static int hv_stats_device_remove(struct device *dev,
-				  struct subsys_interface *sif)
+static void hv_stats_device_remove(struct device *dev,
+				   struct subsys_interface *sif)
 {
 	int cpu = dev->id;
 
-	if (!cpu_online(cpu))
-		return 0;
-
-	sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr);
-	return 0;
+	if (cpu_online(cpu))
+		sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr);
 }
 
 
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 00178ec..178989e 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -140,10 +140,10 @@
  * Whenever anyone tries to change modes, we just mask interrupts
  * and wait for the next event to get set.
  */
-static void tile_timer_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int tile_timer_shutdown(struct clock_event_device *evt)
 {
 	arch_local_irq_mask_now(INT_TILE_TIMER);
+	return 0;
 }
 
 /*
@@ -157,7 +157,9 @@
 	.rating = 100,
 	.irq = -1,
 	.set_next_event = tile_timer_set_next_event,
-	.set_mode = tile_timer_set_mode,
+	.set_state_shutdown = tile_timer_shutdown,
+	.set_state_oneshot = tile_timer_shutdown,
+	.tick_resume = tile_timer_shutdown,
 };
 
 void setup_tile_timer(void)
diff --git a/arch/tile/kernel/vdso/Makefile b/arch/tile/kernel/vdso/Makefile
index a025f63..c54fff3 100644
--- a/arch/tile/kernel/vdso/Makefile
+++ b/arch/tile/kernel/vdso/Makefile
@@ -54,7 +54,7 @@
 $(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
 
 SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-                            $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+                            $(call cc-ldoption, -Wl$(comma)--hash-style=both)
 SYSCFLAGS_vdso_syms.o = -r
 $(obj)/vdso-syms.o: $(src)/vdso.lds $(obj)/vrt_sigreturn.o FORCE
 	$(call if_changed,vdsold)
@@ -113,6 +113,6 @@
 $(obj)/vdso32.o: $(obj)/vdso32.so
 
 SYSCFLAGS_vdso32.so.dbg = -m32 -shared -s -Wl,-soname=linux-vdso32.so.1 \
-			    $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+			    $(call cc-ldoption, -Wl$(comma)--hash-style=both)
 $(obj)/vdso32.so.dbg: $(src)/vdso.lds $(obj-vdso32)
 	$(call if_changed,vdsold)
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index c89b211..298df1e 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -94,6 +94,12 @@
 }
 EXPORT_SYMBOL(_atomic_or);
 
+unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
+{
+	return __atomic_and((int *)p, __atomic_setup(p), mask).val;
+}
+EXPORT_SYMBOL(_atomic_and);
+
 unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
 {
 	return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
@@ -136,6 +142,23 @@
 }
 EXPORT_SYMBOL(_atomic64_cmpxchg);
 
+long long _atomic64_and(long long *v, long long n)
+{
+	return __atomic64_and(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_and);
+
+long long _atomic64_or(long long *v, long long n)
+{
+	return __atomic64_or(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_or);
+
+long long _atomic64_xor(long long *v, long long n)
+{
+	return __atomic64_xor(v, __atomic_setup(v), n);
+}
+EXPORT_SYMBOL(_atomic64_xor);
 
 /*
  * If any of the atomic or futex routines hit a bad address (not in
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 6bda313..f611265 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -178,6 +178,7 @@
 atomic_op _xchg_add_unless, 32, \
 	"sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
 atomic_op _or, 32, "or r24, r22, r2"
+atomic_op _and, 32, "and r24, r22, r2"
 atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2"
 atomic_op _xor, 32, "xor r24, r22, r2"
 
@@ -191,6 +192,9 @@
 	{ bbns r26, 3f; add r24, r22, r4 }; \
 	{ bbns r27, 3f; add r25, r23, r5 }; \
 	slt_u r26, r24, r22; add r25, r25, r26"
+atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
+atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
+atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
 
 	jrp     lr              /* happy backtracer */
 
diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
index 83a91f9..35ab97e 100644
--- a/arch/um/include/shared/kern_util.h
+++ b/arch/um/include/shared/kern_util.h
@@ -22,7 +22,8 @@
 extern unsigned long alloc_stack(int order, int atomic);
 extern void free_stack(unsigned long stack, int order);
 
-extern int do_signal(void);
+struct pt_regs;
+extern void do_signal(struct pt_regs *regs);
 extern void interrupt_end(void);
 extern void relay_signal(int sig, struct siginfo *si, struct uml_pt_regs *regs);
 
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 68b9119..a6d9226 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -90,12 +90,14 @@
 
 void interrupt_end(void)
 {
+	struct pt_regs *regs = &current->thread.regs;
+
 	if (need_resched())
 		schedule();
 	if (test_thread_flag(TIF_SIGPENDING))
-		do_signal();
+		do_signal(regs);
 	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
-		tracehook_notify_resume(&current->thread.regs);
+		tracehook_notify_resume(regs);
 }
 
 void exit_thread(void)
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 4f60e4a..57acbd6 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -64,7 +64,7 @@
 	signal_setup_done(err, ksig, singlestep);
 }
 
-static int kern_do_signal(struct pt_regs *regs)
+void do_signal(struct pt_regs *regs)
 {
 	struct ksignal ksig;
 	int handled_sig = 0;
@@ -110,10 +110,4 @@
 	 */
 	if (!handled_sig)
 		restore_saved_sigmask();
-	return handled_sig;
-}
-
-int do_signal(void)
-{
-	return kern_do_signal(&current->thread.regs);
 }
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 117568d..5af441e 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -22,23 +22,16 @@
 	local_irq_restore(flags);
 }
 
-static void itimer_set_mode(enum clock_event_mode mode,
-			    struct clock_event_device *evt)
+static int itimer_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		set_interval();
-		break;
+	disable_timer();
+	return 0;
+}
 
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_ONESHOT:
-		disable_timer();
-		break;
-
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+static int itimer_set_periodic(struct clock_event_device *evt)
+{
+	set_interval();
+	return 0;
 }
 
 static int itimer_next_event(unsigned long delta,
@@ -48,14 +41,17 @@
 }
 
 static struct clock_event_device itimer_clockevent = {
-	.name		= "itimer",
-	.rating		= 250,
-	.cpumask	= cpu_all_mask,
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode	= itimer_set_mode,
-	.set_next_event = itimer_next_event,
-	.shift		= 32,
-	.irq		= 0,
+	.name			= "itimer",
+	.rating			= 250,
+	.cpumask		= cpu_all_mask,
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_shutdown	= itimer_shutdown,
+	.set_state_periodic	= itimer_set_periodic,
+	.set_state_oneshot	= itimer_shutdown,
+	.set_next_event		= itimer_next_event,
+	.shift			= 32,
+	.irq			= 0,
 };
 
 static irqreturn_t um_timer(int irq, void *dev)
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index f1b3eb1..2077248 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -291,7 +291,7 @@
 		/* We are under mmap_sem, release it such that current can terminate */
 		up_write(&current->mm->mmap_sem);
 		force_sig(SIGKILL, current);
-		do_signal();
+		do_signal(&current->thread.regs);
 	}
 }
 
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 557232f..d8a9fce 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -173,7 +173,7 @@
 void fatal_sigsegv(void)
 {
 	force_sigsegv(SIGSEGV, current);
-	do_signal();
+	do_signal(&current->thread.regs);
 	/*
 	 * This is to tell gcc that we're not returning - do_signal
 	 * can, in general, return, but in this case, it's not, since
diff --git a/arch/unicore32/kernel/irq.c b/arch/unicore32/kernel/irq.c
index 0be5ccd..c53729d 100644
--- a/arch/unicore32/kernel/irq.c
+++ b/arch/unicore32/kernel/irq.c
@@ -112,10 +112,9 @@
  * irq_controller_lock held, and IRQs disabled.  Decode the IRQ
  * and call the handler.
  */
-static void
-puv3_gpio_handler(unsigned int irq, struct irq_desc *desc)
+static void puv3_gpio_handler(unsigned int __irq, struct irq_desc *desc)
 {
-	unsigned int mask;
+	unsigned int mask, irq;
 
 	mask = readl(GPIO_GEDR);
 	do {
diff --git a/arch/unicore32/kernel/time.c b/arch/unicore32/kernel/time.c
index d3824b2..ac4c544 100644
--- a/arch/unicore32/kernel/time.c
+++ b/arch/unicore32/kernel/time.c
@@ -46,29 +46,20 @@
 	return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
 }
 
-static void
-puv3_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *c)
+static int puv3_osmr0_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		writel(readl(OST_OIER) & ~OST_OIER_E0, OST_OIER);
-		writel(readl(OST_OSSR) & ~OST_OSSR_M0, OST_OSSR);
-		break;
-
-	case CLOCK_EVT_MODE_RESUME:
-	case CLOCK_EVT_MODE_PERIODIC:
-		break;
-	}
+	writel(readl(OST_OIER) & ~OST_OIER_E0, OST_OIER);
+	writel(readl(OST_OSSR) & ~OST_OSSR_M0, OST_OSSR);
+	return 0;
 }
 
 static struct clock_event_device ckevt_puv3_osmr0 = {
-	.name		= "osmr0",
-	.features	= CLOCK_EVT_FEAT_ONESHOT,
-	.rating		= 200,
-	.set_next_event	= puv3_osmr0_set_next_event,
-	.set_mode	= puv3_osmr0_set_mode,
+	.name			= "osmr0",
+	.features		= CLOCK_EVT_FEAT_ONESHOT,
+	.rating			= 200,
+	.set_next_event		= puv3_osmr0_set_next_event,
+	.set_state_shutdown	= puv3_osmr0_shutdown,
+	.set_state_oneshot	= puv3_osmr0_shutdown,
 };
 
 static cycle_t puv3_read_oscr(struct clocksource *cs)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b3a1a5d..48f7433 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -133,7 +133,7 @@
 	select HAVE_PERF_USER_STACK_DUMP
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_SYSCALL_TRACEPOINTS
-	select HAVE_UID16			if X86_32
+	select HAVE_UID16			if X86_32 || IA32_EMULATION
 	select HAVE_UNSTABLE_SCHED_CLOCK
 	select HAVE_USER_RETURN_NOTIFIER
 	select IRQ_FORCED_THREADING
@@ -955,6 +955,7 @@
 
 config X86_MCE
 	bool "Machine Check / overheating reporting"
+	select GENERIC_ALLOCATOR
 	default y
 	---help---
 	  Machine Check support allows the processor to notify the
@@ -1002,19 +1003,41 @@
 	def_bool y
 	depends on X86_MCE_INTEL
 
-config VM86
-	bool "Enable VM86 support" if EXPERT
-	default y
+config X86_LEGACY_VM86
+	bool "Legacy VM86 support (obsolete)"
+	default n
 	depends on X86_32
 	---help---
-	  This option is required by programs like DOSEMU to run
-	  16-bit real mode legacy code on x86 processors. It also may
-	  be needed by software like XFree86 to initialize some video
-	  cards via BIOS. Disabling this option saves about 6K.
+	  This option allows user programs to put the CPU into V8086
+	  mode, which is an 80286-era approximation of 16-bit real mode.
+
+	  Some very old versions of X and/or vbetool require this option
+	  for user mode setting.  Similarly, DOSEMU will use it if
+	  available to accelerate real mode DOS programs.  However, any
+	  recent version of DOSEMU, X, or vbetool should be fully
+	  functional even without kernel VM86 support, as they will all
+	  fall back to (pretty well performing) software emulation.
+
+	  Anything that works on a 64-bit kernel is unlikely to need
+	  this option, as 64-bit kernels don't, and can't, support V8086
+	  mode.  This option is also unrelated to 16-bit protected mode
+	  and is not needed to run most 16-bit programs under Wine.
+
+	  Enabling this option adds considerable attack surface to the
+	  kernel and slows down system calls and exception handling.
+
+	  Unless you use very old userspace or need the last drop of
+	  performance in your real mode DOS games and can't use KVM,
+	  say N here.
+
+config VM86
+       bool
+       default X86_LEGACY_VM86
 
 config X86_16BIT
 	bool "Enable support for 16-bit segments" if EXPERT
 	default y
+	depends on MODIFY_LDT_SYSCALL
 	---help---
 	  This option is required by programs like Wine to run 16-bit
 	  protected mode legacy code on x86 processors.  Disabling
@@ -1509,6 +1532,7 @@
 
 config MATH_EMULATION
 	bool
+	depends on MODIFY_LDT_SYSCALL
 	prompt "Math emulation" if X86_32
 	---help---
 	  Linux can emulate a math coprocessor (used for floating point
@@ -2053,6 +2077,22 @@
 	  This is used to work around broken boot loaders.  This should
 	  be set to 'N' under normal conditions.
 
+config MODIFY_LDT_SYSCALL
+	bool "Enable the LDT (local descriptor table)" if EXPERT
+	default y
+	---help---
+	  Linux can allow user programs to install a per-process x86
+	  Local Descriptor Table (LDT) using the modify_ldt(2) system
+	  call.  This is required to run 16-bit or segmented code such as
+	  DOSEMU or some Wine programs.  It is also used by some very old
+	  threading libraries.
+
+	  Enabling this feature adds a small amount of overhead to
+	  context switches and increases the low-level kernel attack
+	  surface.  Disabling it removes the modify_ldt(2) system call.
+
+	  Saying 'N' here may make sense for embedded or server kernels.
+
 source "kernel/livepatch/Kconfig"
 
 endmenu
@@ -2522,7 +2562,7 @@
 	depends on X86_64
 	select BINFMT_ELF
 	select COMPAT_BINFMT_ELF
-	select HAVE_UID16
+	select ARCH_WANT_OLD_COMPAT_IPC
 	---help---
 	  Include code to run legacy 32-bit programs under a
 	  64-bit kernel. You should likely turn this on, unless you're
@@ -2536,7 +2576,7 @@
 
 config X86_X32
 	bool "x32 ABI for 64-bit mode"
-	depends on X86_64 && IA32_EMULATION
+	depends on X86_64
 	---help---
 	  Include code to run binaries for the x32 native 32-bit ABI
 	  for 64-bit processors.  An x32 process gets access to the
@@ -2550,7 +2590,6 @@
 config COMPAT
 	def_bool y
 	depends on IA32_EMULATION || X86_X32
-	select ARCH_WANT_OLD_COMPAT_IPC
 
 if COMPAT
 config COMPAT_FOR_U64_ALIGNMENT
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 118e6de..747860c 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -39,6 +39,16 @@
         LDFLAGS_vmlinux := --emit-relocs
 endif
 
+#
+# Prevent GCC from generating any FP code by mistake.
+#
+# This must happen before we try the -mpreferred-stack-boundary, see:
+#
+#    https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
+#
+KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
+KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
+
 ifeq ($(CONFIG_X86_32),y)
         BITS := 32
         UTS_MACHINE := i386
@@ -167,9 +177,6 @@
 KBUILD_CFLAGS += -Wno-sign-compare
 #
 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
-# prevent gcc from generating any FP code by mistake
-KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
-KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
 
 KBUILD_CFLAGS += $(mflags-y)
 KBUILD_AFLAGS += $(mflags-y)
@@ -212,6 +219,8 @@
 
 drivers-$(CONFIG_FB) += arch/x86/video/
 
+drivers-$(CONFIG_RAS) += arch/x86/ras/
+
 ####
 # boot loader support. Several targets are kept for legacy purposes
 
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 57bbf2f..0d553e5 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -23,7 +23,7 @@
 subdir-		:= compressed
 
 setup-y		+= a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o
-setup-y		+= early_serial_console.o edd.o header.o main.o mca.o memory.o
+setup-y		+= early_serial_console.o edd.o header.o main.o memory.o
 setup-y		+= pm.o pmjump.o printf.o regs.o string.o tty.o video.o
 setup-y		+= video-mode.o version.o
 setup-$(CONFIG_X86_APM_BOOT) += apm.o
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index bd49ec6..0033e96 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -307,9 +307,6 @@
 /* header.S */
 void __attribute__((noreturn)) die(void);
 
-/* mca.c */
-int query_mca(void);
-
 /* memory.c */
 int detect_memory(void);
 
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index d7b1f65..6a9b96b 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -82,7 +82,7 @@
 
 	if (has_cpuflag(X86_FEATURE_TSC)) {
 		debug_putstr(" RDTSC");
-		rdtscll(raw);
+		raw = rdtsc();
 
 		random ^= raw;
 		use_i8254 = false;
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 7d69afd..ee1b6d3 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1041,7 +1041,6 @@
 struct boot_params *make_boot_params(struct efi_config *c)
 {
 	struct boot_params *boot_params;
-	struct sys_desc_table *sdt;
 	struct apm_bios_info *bi;
 	struct setup_header *hdr;
 	struct efi_info *efi;
@@ -1089,7 +1088,6 @@
 	hdr = &boot_params->hdr;
 	efi = &boot_params->efi_info;
 	bi = &boot_params->apm_bios_info;
-	sdt = &boot_params->sys_desc_table;
 
 	/* Copy the second sector to boot_params */
 	memcpy(&hdr->jump, image->image_base + 512, 512);
@@ -1118,8 +1116,6 @@
 	/* Clear APM BIOS info */
 	memset(bi, 0, sizeof(*bi));
 
-	memset(sdt, 0, sizeof(*sdt));
-
 	status = efi_parse_options(cmdline_ptr);
 	if (status != EFI_SUCCESS)
 		goto fail2;
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index a107b93..f637979 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -220,6 +220,23 @@
 	outb(0xff & (pos >> 1), vidport+1);
 }
 
+void __puthex(unsigned long value)
+{
+	char alpha[2] = "0";
+	int bits;
+
+	for (bits = sizeof(value) * 8 - 4; bits >= 0; bits -= 4) {
+		unsigned long digit = (value >> bits) & 0xf;
+
+		if (digit < 0xA)
+			alpha[0] = '0' + digit;
+		else
+			alpha[0] = 'a' + (digit - 0xA);
+
+		__putstr(alpha);
+	}
+}
+
 static void error(char *x)
 {
 	error_putstr("\n\n");
@@ -399,6 +416,13 @@
 	free_mem_ptr     = heap;	/* Heap */
 	free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
 
+	/* Report initial kernel position details. */
+	debug_putaddr(input_data);
+	debug_putaddr(input_len);
+	debug_putaddr(output);
+	debug_putaddr(output_len);
+	debug_putaddr(run_size);
+
 	/*
 	 * The memory hole needed for the kernel is the larger of either
 	 * the entire decompressed kernel plus relocation table, or the
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 805d25c..3783dc3 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -34,16 +34,27 @@
 extern memptr free_mem_end_ptr;
 extern struct boot_params *real_mode;		/* Pointer to real-mode data */
 void __putstr(const char *s);
+void __puthex(unsigned long value);
 #define error_putstr(__x)  __putstr(__x)
+#define error_puthex(__x)  __puthex(__x)
 
 #ifdef CONFIG_X86_VERBOSE_BOOTUP
 
 #define debug_putstr(__x)  __putstr(__x)
+#define debug_puthex(__x)  __puthex(__x)
+#define debug_putaddr(__x) { \
+		debug_putstr(#__x ": 0x"); \
+		debug_puthex((unsigned long)(__x)); \
+		debug_putstr("\n"); \
+	}
 
 #else
 
 static inline void debug_putstr(const char *s)
 { }
+static inline void debug_puthex(const char *s)
+{ }
+#define debug_putaddr(x) /* */
 
 #endif
 
diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
index fd6c9f2..9bcea38 100644
--- a/arch/x86/boot/main.c
+++ b/arch/x86/boot/main.c
@@ -161,9 +161,6 @@
 	/* Set keyboard repeat rate (why?) and query the lock flags */
 	keyboard_init();
 
-	/* Query MCA information */
-	query_mca();
-
 	/* Query Intel SpeedStep (IST) information */
 	query_ist();
 
diff --git a/arch/x86/boot/mca.c b/arch/x86/boot/mca.c
deleted file mode 100644
index a95a531..0000000
--- a/arch/x86/boot/mca.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- *   Copyright (C) 1991, 1992 Linus Torvalds
- *   Copyright 2007 rPath, Inc. - All Rights Reserved
- *   Copyright 2009 Intel Corporation; author H. Peter Anvin
- *
- *   This file is part of the Linux kernel, and is made available under
- *   the terms of the GNU General Public License version 2.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * Get the MCA system description table
- */
-
-#include "boot.h"
-
-int query_mca(void)
-{
-	struct biosregs ireg, oreg;
-	u16 len;
-
-	initregs(&ireg);
-	ireg.ah = 0xc0;
-	intcall(0x15, &ireg, &oreg);
-
-	if (oreg.eflags & X86_EFLAGS_CF)
-		return -1;	/* No MCA present */
-
-	set_fs(oreg.es);
-	len = rdfs16(oreg.bx);
-
-	if (len > sizeof(boot_params.sys_desc_table))
-		len = sizeof(boot_params.sys_desc_table);
-
-	copy_from_fs(&boot_params.sys_desc_table, oreg.bx, len);
-	return 0;
-}
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index aaa1118..028be48 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -23,6 +23,7 @@
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 05630df..cb5b3ab 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -22,6 +22,7 @@
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 5a4a089..9a2838c 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
 obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
+obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha20-x86_64.o
 obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o
 obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
 obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
@@ -30,6 +31,7 @@
 obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
 obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
 obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
+obj-$(CONFIG_CRYPTO_POLY1305_X86_64) += poly1305-x86_64.o
 
 # These modules require assembler to support AVX.
 ifeq ($(avx_supported),yes)
@@ -60,6 +62,7 @@
 twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
 twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o
 salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
+chacha20-x86_64-y := chacha20-ssse3-x86_64.o chacha20_glue.o
 serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
 
 ifeq ($(avx_supported),yes)
@@ -75,6 +78,7 @@
 
 ifeq ($(avx2_supported),yes)
 	camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o
+	chacha20-x86_64-y += chacha20-avx2-x86_64.o
 	serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o
 endif
 
@@ -82,8 +86,10 @@
 aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
 ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
 sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
+poly1305-x86_64-y := poly1305-sse2-x86_64.o poly1305_glue.o
 ifeq ($(avx2_supported),yes)
 sha1-ssse3-y += sha1_avx2_x86_64_asm.o
+poly1305-x86_64-y += poly1305-avx2-x86_64.o
 endif
 crc32c-intel-y := crc32c-intel_glue.o
 crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index dccad38..3633ad6 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -803,10 +803,7 @@
 		return PTR_ERR(cryptd_tfm);
 
 	*ctx = cryptd_tfm;
-	crypto_aead_set_reqsize(
-		aead,
-		sizeof(struct aead_request) +
-		crypto_aead_reqsize(&cryptd_tfm->base));
+	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
 	return 0;
 }
 
@@ -955,8 +952,8 @@
 
 	/* Assuming we are supporting rfc4106 64-bit extended */
 	/* sequence numbers We need to have the AAD length equal */
-	/* to 8 or 12 bytes */
-	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
+	/* to 16 or 20 bytes */
+	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 		return -EINVAL;
 
 	/* IV below built */
@@ -992,9 +989,9 @@
 	}
 
 	kernel_fpu_begin();
-	aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
-		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
-		+ ((unsigned long)req->cryptlen), auth_tag_len);
+	aesni_gcm_enc_tfm(aes_ctx, dst, src, req->cryptlen, iv,
+			  ctx->hash_subkey, assoc, req->assoclen - 8,
+			  dst + req->cryptlen, auth_tag_len);
 	kernel_fpu_end();
 
 	/* The authTag (aka the Integrity Check Value) needs to be written
@@ -1033,12 +1030,12 @@
 	struct scatter_walk dst_sg_walk;
 	unsigned int i;
 
-	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
+	if (unlikely(req->assoclen != 16 && req->assoclen != 20))
 		return -EINVAL;
 
 	/* Assuming we are supporting rfc4106 64-bit extended */
 	/* sequence numbers We need to have the AAD length */
-	/* equal to 8 or 12 bytes */
+	/* equal to 16 or 20 bytes */
 
 	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
 	/* IV below built */
@@ -1075,8 +1072,8 @@
 
 	kernel_fpu_begin();
 	aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
-		ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
-		authTag, auth_tag_len);
+			  ctx->hash_subkey, assoc, req->assoclen - 8,
+			  authTag, auth_tag_len);
 	kernel_fpu_end();
 
 	/* Compare generated tag with passed in tag. */
@@ -1105,19 +1102,12 @@
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
 	struct cryptd_aead *cryptd_tfm = *ctx;
-	struct aead_request *subreq = aead_request_ctx(req);
 
-	aead_request_set_tfm(subreq, irq_fpu_usable() ?
-				     cryptd_aead_child(cryptd_tfm) :
-				     &cryptd_tfm->base);
+	aead_request_set_tfm(req, irq_fpu_usable() ?
+				  cryptd_aead_child(cryptd_tfm) :
+				  &cryptd_tfm->base);
 
-	aead_request_set_callback(subreq, req->base.flags,
-				  req->base.complete, req->base.data);
-	aead_request_set_crypt(subreq, req->src, req->dst,
-			       req->cryptlen, req->iv);
-	aead_request_set_ad(subreq, req->assoclen);
-
-	return crypto_aead_encrypt(subreq);
+	return crypto_aead_encrypt(req);
 }
 
 static int rfc4106_decrypt(struct aead_request *req)
@@ -1125,19 +1115,12 @@
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
 	struct cryptd_aead *cryptd_tfm = *ctx;
-	struct aead_request *subreq = aead_request_ctx(req);
 
-	aead_request_set_tfm(subreq, irq_fpu_usable() ?
-				     cryptd_aead_child(cryptd_tfm) :
-				     &cryptd_tfm->base);
+	aead_request_set_tfm(req, irq_fpu_usable() ?
+				  cryptd_aead_child(cryptd_tfm) :
+				  &cryptd_tfm->base);
 
-	aead_request_set_callback(subreq, req->base.flags,
-				  req->base.complete, req->base.data);
-	aead_request_set_crypt(subreq, req->src, req->dst,
-			       req->cryptlen, req->iv);
-	aead_request_set_ad(subreq, req->assoclen);
-
-	return crypto_aead_decrypt(subreq);
+	return crypto_aead_decrypt(req);
 }
 #endif
 
diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S
new file mode 100644
index 0000000..16694e6
--- /dev/null
+++ b/arch/x86/crypto/chacha20-avx2-x86_64.S
@@ -0,0 +1,443 @@
+/*
+ * ChaCha20 256-bit cipher algorithm, RFC7539, x64 AVX2 functions
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.data
+.align 32
+
+ROT8:	.octa 0x0e0d0c0f0a09080b0605040702010003
+	.octa 0x0e0d0c0f0a09080b0605040702010003
+ROT16:	.octa 0x0d0c0f0e09080b0a0504070601000302
+	.octa 0x0d0c0f0e09080b0a0504070601000302
+CTRINC:	.octa 0x00000003000000020000000100000000
+	.octa 0x00000007000000060000000500000004
+
+.text
+
+ENTRY(chacha20_8block_xor_avx2)
+	# %rdi: Input state matrix, s
+	# %rsi: 8 data blocks output, o
+	# %rdx: 8 data blocks input, i
+
+	# This function encrypts eight consecutive ChaCha20 blocks by loading
+	# the state matrix in AVX registers eight times. As we need some
+	# scratch registers, we save the first four registers on the stack. The
+	# algorithm performs each operation on the corresponding word of each
+	# state matrix, hence requires no word shuffling. For final XORing step
+	# we transpose the matrix by interleaving 32-, 64- and then 128-bit
+	# words, which allows us to do XOR in AVX registers. 8/16-bit word
+	# rotation is done with the slightly better performing byte shuffling,
+	# 7/12-bit word rotation uses traditional shift+OR.
+
+	vzeroupper
+	# 4 * 32 byte stack, 32-byte aligned
+	mov		%rsp, %r8
+	and		$~31, %rsp
+	sub		$0x80, %rsp
+
+	# x0..15[0-7] = s[0..15]
+	vpbroadcastd	0x00(%rdi),%ymm0
+	vpbroadcastd	0x04(%rdi),%ymm1
+	vpbroadcastd	0x08(%rdi),%ymm2
+	vpbroadcastd	0x0c(%rdi),%ymm3
+	vpbroadcastd	0x10(%rdi),%ymm4
+	vpbroadcastd	0x14(%rdi),%ymm5
+	vpbroadcastd	0x18(%rdi),%ymm6
+	vpbroadcastd	0x1c(%rdi),%ymm7
+	vpbroadcastd	0x20(%rdi),%ymm8
+	vpbroadcastd	0x24(%rdi),%ymm9
+	vpbroadcastd	0x28(%rdi),%ymm10
+	vpbroadcastd	0x2c(%rdi),%ymm11
+	vpbroadcastd	0x30(%rdi),%ymm12
+	vpbroadcastd	0x34(%rdi),%ymm13
+	vpbroadcastd	0x38(%rdi),%ymm14
+	vpbroadcastd	0x3c(%rdi),%ymm15
+	# x0..3 on stack
+	vmovdqa		%ymm0,0x00(%rsp)
+	vmovdqa		%ymm1,0x20(%rsp)
+	vmovdqa		%ymm2,0x40(%rsp)
+	vmovdqa		%ymm3,0x60(%rsp)
+
+	vmovdqa		CTRINC(%rip),%ymm1
+	vmovdqa		ROT8(%rip),%ymm2
+	vmovdqa		ROT16(%rip),%ymm3
+
+	# x12 += counter values 0-3
+	vpaddd		%ymm1,%ymm12,%ymm12
+
+	mov		$10,%ecx
+
+.Ldoubleround8:
+	# x0 += x4, x12 = rotl32(x12 ^ x0, 16)
+	vpaddd		0x00(%rsp),%ymm4,%ymm0
+	vmovdqa		%ymm0,0x00(%rsp)
+	vpxor		%ymm0,%ymm12,%ymm12
+	vpshufb		%ymm3,%ymm12,%ymm12
+	# x1 += x5, x13 = rotl32(x13 ^ x1, 16)
+	vpaddd		0x20(%rsp),%ymm5,%ymm0
+	vmovdqa		%ymm0,0x20(%rsp)
+	vpxor		%ymm0,%ymm13,%ymm13
+	vpshufb		%ymm3,%ymm13,%ymm13
+	# x2 += x6, x14 = rotl32(x14 ^ x2, 16)
+	vpaddd		0x40(%rsp),%ymm6,%ymm0
+	vmovdqa		%ymm0,0x40(%rsp)
+	vpxor		%ymm0,%ymm14,%ymm14
+	vpshufb		%ymm3,%ymm14,%ymm14
+	# x3 += x7, x15 = rotl32(x15 ^ x3, 16)
+	vpaddd		0x60(%rsp),%ymm7,%ymm0
+	vmovdqa		%ymm0,0x60(%rsp)
+	vpxor		%ymm0,%ymm15,%ymm15
+	vpshufb		%ymm3,%ymm15,%ymm15
+
+	# x8 += x12, x4 = rotl32(x4 ^ x8, 12)
+	vpaddd		%ymm12,%ymm8,%ymm8
+	vpxor		%ymm8,%ymm4,%ymm4
+	vpslld		$12,%ymm4,%ymm0
+	vpsrld		$20,%ymm4,%ymm4
+	vpor		%ymm0,%ymm4,%ymm4
+	# x9 += x13, x5 = rotl32(x5 ^ x9, 12)
+	vpaddd		%ymm13,%ymm9,%ymm9
+	vpxor		%ymm9,%ymm5,%ymm5
+	vpslld		$12,%ymm5,%ymm0
+	vpsrld		$20,%ymm5,%ymm5
+	vpor		%ymm0,%ymm5,%ymm5
+	# x10 += x14, x6 = rotl32(x6 ^ x10, 12)
+	vpaddd		%ymm14,%ymm10,%ymm10
+	vpxor		%ymm10,%ymm6,%ymm6
+	vpslld		$12,%ymm6,%ymm0
+	vpsrld		$20,%ymm6,%ymm6
+	vpor		%ymm0,%ymm6,%ymm6
+	# x11 += x15, x7 = rotl32(x7 ^ x11, 12)
+	vpaddd		%ymm15,%ymm11,%ymm11
+	vpxor		%ymm11,%ymm7,%ymm7
+	vpslld		$12,%ymm7,%ymm0
+	vpsrld		$20,%ymm7,%ymm7
+	vpor		%ymm0,%ymm7,%ymm7
+
+	# x0 += x4, x12 = rotl32(x12 ^ x0, 8)
+	vpaddd		0x00(%rsp),%ymm4,%ymm0
+	vmovdqa		%ymm0,0x00(%rsp)
+	vpxor		%ymm0,%ymm12,%ymm12
+	vpshufb		%ymm2,%ymm12,%ymm12
+	# x1 += x5, x13 = rotl32(x13 ^ x1, 8)
+	vpaddd		0x20(%rsp),%ymm5,%ymm0
+	vmovdqa		%ymm0,0x20(%rsp)
+	vpxor		%ymm0,%ymm13,%ymm13
+	vpshufb		%ymm2,%ymm13,%ymm13
+	# x2 += x6, x14 = rotl32(x14 ^ x2, 8)
+	vpaddd		0x40(%rsp),%ymm6,%ymm0
+	vmovdqa		%ymm0,0x40(%rsp)
+	vpxor		%ymm0,%ymm14,%ymm14
+	vpshufb		%ymm2,%ymm14,%ymm14
+	# x3 += x7, x15 = rotl32(x15 ^ x3, 8)
+	vpaddd		0x60(%rsp),%ymm7,%ymm0
+	vmovdqa		%ymm0,0x60(%rsp)
+	vpxor		%ymm0,%ymm15,%ymm15
+	vpshufb		%ymm2,%ymm15,%ymm15
+
+	# x8 += x12, x4 = rotl32(x4 ^ x8, 7)
+	vpaddd		%ymm12,%ymm8,%ymm8
+	vpxor		%ymm8,%ymm4,%ymm4
+	vpslld		$7,%ymm4,%ymm0
+	vpsrld		$25,%ymm4,%ymm4
+	vpor		%ymm0,%ymm4,%ymm4
+	# x9 += x13, x5 = rotl32(x5 ^ x9, 7)
+	vpaddd		%ymm13,%ymm9,%ymm9
+	vpxor		%ymm9,%ymm5,%ymm5
+	vpslld		$7,%ymm5,%ymm0
+	vpsrld		$25,%ymm5,%ymm5
+	vpor		%ymm0,%ymm5,%ymm5
+	# x10 += x14, x6 = rotl32(x6 ^ x10, 7)
+	vpaddd		%ymm14,%ymm10,%ymm10
+	vpxor		%ymm10,%ymm6,%ymm6
+	vpslld		$7,%ymm6,%ymm0
+	vpsrld		$25,%ymm6,%ymm6
+	vpor		%ymm0,%ymm6,%ymm6
+	# x11 += x15, x7 = rotl32(x7 ^ x11, 7)
+	vpaddd		%ymm15,%ymm11,%ymm11
+	vpxor		%ymm11,%ymm7,%ymm7
+	vpslld		$7,%ymm7,%ymm0
+	vpsrld		$25,%ymm7,%ymm7
+	vpor		%ymm0,%ymm7,%ymm7
+
+	# x0 += x5, x15 = rotl32(x15 ^ x0, 16)
+	vpaddd		0x00(%rsp),%ymm5,%ymm0
+	vmovdqa		%ymm0,0x00(%rsp)
+	vpxor		%ymm0,%ymm15,%ymm15
+	vpshufb		%ymm3,%ymm15,%ymm15
+	# x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0
+	vpaddd		0x20(%rsp),%ymm6,%ymm0
+	vmovdqa		%ymm0,0x20(%rsp)
+	vpxor		%ymm0,%ymm12,%ymm12
+	vpshufb		%ymm3,%ymm12,%ymm12
+	# x2 += x7, x13 = rotl32(x13 ^ x2, 16)
+	vpaddd		0x40(%rsp),%ymm7,%ymm0
+	vmovdqa		%ymm0,0x40(%rsp)
+	vpxor		%ymm0,%ymm13,%ymm13
+	vpshufb		%ymm3,%ymm13,%ymm13
+	# x3 += x4, x14 = rotl32(x14 ^ x3, 16)
+	vpaddd		0x60(%rsp),%ymm4,%ymm0
+	vmovdqa		%ymm0,0x60(%rsp)
+	vpxor		%ymm0,%ymm14,%ymm14
+	vpshufb		%ymm3,%ymm14,%ymm14
+
+	# x10 += x15, x5 = rotl32(x5 ^ x10, 12)
+	vpaddd		%ymm15,%ymm10,%ymm10
+	vpxor		%ymm10,%ymm5,%ymm5
+	vpslld		$12,%ymm5,%ymm0
+	vpsrld		$20,%ymm5,%ymm5
+	vpor		%ymm0,%ymm5,%ymm5
+	# x11 += x12, x6 = rotl32(x6 ^ x11, 12)
+	vpaddd		%ymm12,%ymm11,%ymm11
+	vpxor		%ymm11,%ymm6,%ymm6
+	vpslld		$12,%ymm6,%ymm0
+	vpsrld		$20,%ymm6,%ymm6
+	vpor		%ymm0,%ymm6,%ymm6
+	# x8 += x13, x7 = rotl32(x7 ^ x8, 12)
+	vpaddd		%ymm13,%ymm8,%ymm8
+	vpxor		%ymm8,%ymm7,%ymm7
+	vpslld		$12,%ymm7,%ymm0
+	vpsrld		$20,%ymm7,%ymm7
+	vpor		%ymm0,%ymm7,%ymm7
+	# x9 += x14, x4 = rotl32(x4 ^ x9, 12)
+	vpaddd		%ymm14,%ymm9,%ymm9
+	vpxor		%ymm9,%ymm4,%ymm4
+	vpslld		$12,%ymm4,%ymm0
+	vpsrld		$20,%ymm4,%ymm4
+	vpor		%ymm0,%ymm4,%ymm4
+
+	# x0 += x5, x15 = rotl32(x15 ^ x0, 8)
+	vpaddd		0x00(%rsp),%ymm5,%ymm0
+	vmovdqa		%ymm0,0x00(%rsp)
+	vpxor		%ymm0,%ymm15,%ymm15
+	vpshufb		%ymm2,%ymm15,%ymm15
+	# x1 += x6, x12 = rotl32(x12 ^ x1, 8)
+	vpaddd		0x20(%rsp),%ymm6,%ymm0
+	vmovdqa		%ymm0,0x20(%rsp)
+	vpxor		%ymm0,%ymm12,%ymm12
+	vpshufb		%ymm2,%ymm12,%ymm12
+	# x2 += x7, x13 = rotl32(x13 ^ x2, 8)
+	vpaddd		0x40(%rsp),%ymm7,%ymm0
+	vmovdqa		%ymm0,0x40(%rsp)
+	vpxor		%ymm0,%ymm13,%ymm13
+	vpshufb		%ymm2,%ymm13,%ymm13
+	# x3 += x4, x14 = rotl32(x14 ^ x3, 8)
+	vpaddd		0x60(%rsp),%ymm4,%ymm0
+	vmovdqa		%ymm0,0x60(%rsp)
+	vpxor		%ymm0,%ymm14,%ymm14
+	vpshufb		%ymm2,%ymm14,%ymm14
+
+	# x10 += x15, x5 = rotl32(x5 ^ x10, 7)
+	vpaddd		%ymm15,%ymm10,%ymm10
+	vpxor		%ymm10,%ymm5,%ymm5
+	vpslld		$7,%ymm5,%ymm0
+	vpsrld		$25,%ymm5,%ymm5
+	vpor		%ymm0,%ymm5,%ymm5
+	# x11 += x12, x6 = rotl32(x6 ^ x11, 7)
+	vpaddd		%ymm12,%ymm11,%ymm11
+	vpxor		%ymm11,%ymm6,%ymm6
+	vpslld		$7,%ymm6,%ymm0
+	vpsrld		$25,%ymm6,%ymm6
+	vpor		%ymm0,%ymm6,%ymm6
+	# x8 += x13, x7 = rotl32(x7 ^ x8, 7)
+	vpaddd		%ymm13,%ymm8,%ymm8
+	vpxor		%ymm8,%ymm7,%ymm7
+	vpslld		$7,%ymm7,%ymm0
+	vpsrld		$25,%ymm7,%ymm7
+	vpor		%ymm0,%ymm7,%ymm7
+	# x9 += x14, x4 = rotl32(x4 ^ x9, 7)
+	vpaddd		%ymm14,%ymm9,%ymm9
+	vpxor		%ymm9,%ymm4,%ymm4
+	vpslld		$7,%ymm4,%ymm0
+	vpsrld		$25,%ymm4,%ymm4
+	vpor		%ymm0,%ymm4,%ymm4
+
+	dec		%ecx
+	jnz		.Ldoubleround8
+
+	# x0..15[0-3] += s[0..15]
+	vpbroadcastd	0x00(%rdi),%ymm0
+	vpaddd		0x00(%rsp),%ymm0,%ymm0
+	vmovdqa		%ymm0,0x00(%rsp)
+	vpbroadcastd	0x04(%rdi),%ymm0
+	vpaddd		0x20(%rsp),%ymm0,%ymm0
+	vmovdqa		%ymm0,0x20(%rsp)
+	vpbroadcastd	0x08(%rdi),%ymm0
+	vpaddd		0x40(%rsp),%ymm0,%ymm0
+	vmovdqa		%ymm0,0x40(%rsp)
+	vpbroadcastd	0x0c(%rdi),%ymm0
+	vpaddd		0x60(%rsp),%ymm0,%ymm0
+	vmovdqa		%ymm0,0x60(%rsp)
+	vpbroadcastd	0x10(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm4,%ymm4
+	vpbroadcastd	0x14(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm5,%ymm5
+	vpbroadcastd	0x18(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm6,%ymm6
+	vpbroadcastd	0x1c(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm7,%ymm7
+	vpbroadcastd	0x20(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm8,%ymm8
+	vpbroadcastd	0x24(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm9,%ymm9
+	vpbroadcastd	0x28(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm10,%ymm10
+	vpbroadcastd	0x2c(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm11,%ymm11
+	vpbroadcastd	0x30(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm12,%ymm12
+	vpbroadcastd	0x34(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm13,%ymm13
+	vpbroadcastd	0x38(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm14,%ymm14
+	vpbroadcastd	0x3c(%rdi),%ymm0
+	vpaddd		%ymm0,%ymm15,%ymm15
+
+	# x12 += counter values 0-3
+	vpaddd		%ymm1,%ymm12,%ymm12
+
+	# interleave 32-bit words in state n, n+1
+	vmovdqa		0x00(%rsp),%ymm0
+	vmovdqa		0x20(%rsp),%ymm1
+	vpunpckldq	%ymm1,%ymm0,%ymm2
+	vpunpckhdq	%ymm1,%ymm0,%ymm1
+	vmovdqa		%ymm2,0x00(%rsp)
+	vmovdqa		%ymm1,0x20(%rsp)
+	vmovdqa		0x40(%rsp),%ymm0
+	vmovdqa		0x60(%rsp),%ymm1
+	vpunpckldq	%ymm1,%ymm0,%ymm2
+	vpunpckhdq	%ymm1,%ymm0,%ymm1
+	vmovdqa		%ymm2,0x40(%rsp)
+	vmovdqa		%ymm1,0x60(%rsp)
+	vmovdqa		%ymm4,%ymm0
+	vpunpckldq	%ymm5,%ymm0,%ymm4
+	vpunpckhdq	%ymm5,%ymm0,%ymm5
+	vmovdqa		%ymm6,%ymm0
+	vpunpckldq	%ymm7,%ymm0,%ymm6
+	vpunpckhdq	%ymm7,%ymm0,%ymm7
+	vmovdqa		%ymm8,%ymm0
+	vpunpckldq	%ymm9,%ymm0,%ymm8
+	vpunpckhdq	%ymm9,%ymm0,%ymm9
+	vmovdqa		%ymm10,%ymm0
+	vpunpckldq	%ymm11,%ymm0,%ymm10
+	vpunpckhdq	%ymm11,%ymm0,%ymm11
+	vmovdqa		%ymm12,%ymm0
+	vpunpckldq	%ymm13,%ymm0,%ymm12
+	vpunpckhdq	%ymm13,%ymm0,%ymm13
+	vmovdqa		%ymm14,%ymm0
+	vpunpckldq	%ymm15,%ymm0,%ymm14
+	vpunpckhdq	%ymm15,%ymm0,%ymm15
+
+	# interleave 64-bit words in state n, n+2
+	vmovdqa		0x00(%rsp),%ymm0
+	vmovdqa		0x40(%rsp),%ymm2
+	vpunpcklqdq	%ymm2,%ymm0,%ymm1
+	vpunpckhqdq	%ymm2,%ymm0,%ymm2
+	vmovdqa		%ymm1,0x00(%rsp)
+	vmovdqa		%ymm2,0x40(%rsp)
+	vmovdqa		0x20(%rsp),%ymm0
+	vmovdqa		0x60(%rsp),%ymm2
+	vpunpcklqdq	%ymm2,%ymm0,%ymm1
+	vpunpckhqdq	%ymm2,%ymm0,%ymm2
+	vmovdqa		%ymm1,0x20(%rsp)
+	vmovdqa		%ymm2,0x60(%rsp)
+	vmovdqa		%ymm4,%ymm0
+	vpunpcklqdq	%ymm6,%ymm0,%ymm4
+	vpunpckhqdq	%ymm6,%ymm0,%ymm6
+	vmovdqa		%ymm5,%ymm0
+	vpunpcklqdq	%ymm7,%ymm0,%ymm5
+	vpunpckhqdq	%ymm7,%ymm0,%ymm7
+	vmovdqa		%ymm8,%ymm0
+	vpunpcklqdq	%ymm10,%ymm0,%ymm8
+	vpunpckhqdq	%ymm10,%ymm0,%ymm10
+	vmovdqa		%ymm9,%ymm0
+	vpunpcklqdq	%ymm11,%ymm0,%ymm9
+	vpunpckhqdq	%ymm11,%ymm0,%ymm11
+	vmovdqa		%ymm12,%ymm0
+	vpunpcklqdq	%ymm14,%ymm0,%ymm12
+	vpunpckhqdq	%ymm14,%ymm0,%ymm14
+	vmovdqa		%ymm13,%ymm0
+	vpunpcklqdq	%ymm15,%ymm0,%ymm13
+	vpunpckhqdq	%ymm15,%ymm0,%ymm15
+
+	# interleave 128-bit words in state n, n+4
+	vmovdqa		0x00(%rsp),%ymm0
+	vperm2i128	$0x20,%ymm4,%ymm0,%ymm1
+	vperm2i128	$0x31,%ymm4,%ymm0,%ymm4
+	vmovdqa		%ymm1,0x00(%rsp)
+	vmovdqa		0x20(%rsp),%ymm0
+	vperm2i128	$0x20,%ymm5,%ymm0,%ymm1
+	vperm2i128	$0x31,%ymm5,%ymm0,%ymm5
+	vmovdqa		%ymm1,0x20(%rsp)
+	vmovdqa		0x40(%rsp),%ymm0
+	vperm2i128	$0x20,%ymm6,%ymm0,%ymm1
+	vperm2i128	$0x31,%ymm6,%ymm0,%ymm6
+	vmovdqa		%ymm1,0x40(%rsp)
+	vmovdqa		0x60(%rsp),%ymm0
+	vperm2i128	$0x20,%ymm7,%ymm0,%ymm1
+	vperm2i128	$0x31,%ymm7,%ymm0,%ymm7
+	vmovdqa		%ymm1,0x60(%rsp)
+	vperm2i128	$0x20,%ymm12,%ymm8,%ymm0
+	vperm2i128	$0x31,%ymm12,%ymm8,%ymm12
+	vmovdqa		%ymm0,%ymm8
+	vperm2i128	$0x20,%ymm13,%ymm9,%ymm0
+	vperm2i128	$0x31,%ymm13,%ymm9,%ymm13
+	vmovdqa		%ymm0,%ymm9
+	vperm2i128	$0x20,%ymm14,%ymm10,%ymm0
+	vperm2i128	$0x31,%ymm14,%ymm10,%ymm14
+	vmovdqa		%ymm0,%ymm10
+	vperm2i128	$0x20,%ymm15,%ymm11,%ymm0
+	vperm2i128	$0x31,%ymm15,%ymm11,%ymm15
+	vmovdqa		%ymm0,%ymm11
+
+	# xor with corresponding input, write to output
+	vmovdqa		0x00(%rsp),%ymm0
+	vpxor		0x0000(%rdx),%ymm0,%ymm0
+	vmovdqu		%ymm0,0x0000(%rsi)
+	vmovdqa		0x20(%rsp),%ymm0
+	vpxor		0x0080(%rdx),%ymm0,%ymm0
+	vmovdqu		%ymm0,0x0080(%rsi)
+	vmovdqa		0x40(%rsp),%ymm0
+	vpxor		0x0040(%rdx),%ymm0,%ymm0
+	vmovdqu		%ymm0,0x0040(%rsi)
+	vmovdqa		0x60(%rsp),%ymm0
+	vpxor		0x00c0(%rdx),%ymm0,%ymm0
+	vmovdqu		%ymm0,0x00c0(%rsi)
+	vpxor		0x0100(%rdx),%ymm4,%ymm4
+	vmovdqu		%ymm4,0x0100(%rsi)
+	vpxor		0x0180(%rdx),%ymm5,%ymm5
+	vmovdqu		%ymm5,0x00180(%rsi)
+	vpxor		0x0140(%rdx),%ymm6,%ymm6
+	vmovdqu		%ymm6,0x0140(%rsi)
+	vpxor		0x01c0(%rdx),%ymm7,%ymm7
+	vmovdqu		%ymm7,0x01c0(%rsi)
+	vpxor		0x0020(%rdx),%ymm8,%ymm8
+	vmovdqu		%ymm8,0x0020(%rsi)
+	vpxor		0x00a0(%rdx),%ymm9,%ymm9
+	vmovdqu		%ymm9,0x00a0(%rsi)
+	vpxor		0x0060(%rdx),%ymm10,%ymm10
+	vmovdqu		%ymm10,0x0060(%rsi)
+	vpxor		0x00e0(%rdx),%ymm11,%ymm11
+	vmovdqu		%ymm11,0x00e0(%rsi)
+	vpxor		0x0120(%rdx),%ymm12,%ymm12
+	vmovdqu		%ymm12,0x0120(%rsi)
+	vpxor		0x01a0(%rdx),%ymm13,%ymm13
+	vmovdqu		%ymm13,0x01a0(%rsi)
+	vpxor		0x0160(%rdx),%ymm14,%ymm14
+	vmovdqu		%ymm14,0x0160(%rsi)
+	vpxor		0x01e0(%rdx),%ymm15,%ymm15
+	vmovdqu		%ymm15,0x01e0(%rsi)
+
+	vzeroupper
+	mov		%r8,%rsp
+	ret
+ENDPROC(chacha20_8block_xor_avx2)
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S
new file mode 100644
index 0000000..712b130
--- /dev/null
+++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S
@@ -0,0 +1,625 @@
+/*
+ * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.data
+.align 16
+
+ROT8:	.octa 0x0e0d0c0f0a09080b0605040702010003
+ROT16:	.octa 0x0d0c0f0e09080b0a0504070601000302
+CTRINC:	.octa 0x00000003000000020000000100000000
+
+.text
+
+ENTRY(chacha20_block_xor_ssse3)
+	# %rdi: Input state matrix, s
+	# %rsi: 1 data block output, o
+	# %rdx: 1 data block input, i
+
+	# This function encrypts one ChaCha20 block by loading the state matrix
+	# in four SSE registers. It performs matrix operation on four words in
+	# parallel, but requireds shuffling to rearrange the words after each
+	# round. 8/16-bit word rotation is done with the slightly better
+	# performing SSSE3 byte shuffling, 7/12-bit word rotation uses
+	# traditional shift+OR.
+
+	# x0..3 = s0..3
+	movdqa		0x00(%rdi),%xmm0
+	movdqa		0x10(%rdi),%xmm1
+	movdqa		0x20(%rdi),%xmm2
+	movdqa		0x30(%rdi),%xmm3
+	movdqa		%xmm0,%xmm8
+	movdqa		%xmm1,%xmm9
+	movdqa		%xmm2,%xmm10
+	movdqa		%xmm3,%xmm11
+
+	movdqa		ROT8(%rip),%xmm4
+	movdqa		ROT16(%rip),%xmm5
+
+	mov	$10,%ecx
+
+.Ldoubleround:
+
+	# x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+	paddd		%xmm1,%xmm0
+	pxor		%xmm0,%xmm3
+	pshufb		%xmm5,%xmm3
+
+	# x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+	paddd		%xmm3,%xmm2
+	pxor		%xmm2,%xmm1
+	movdqa		%xmm1,%xmm6
+	pslld		$12,%xmm6
+	psrld		$20,%xmm1
+	por		%xmm6,%xmm1
+
+	# x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+	paddd		%xmm1,%xmm0
+	pxor		%xmm0,%xmm3
+	pshufb		%xmm4,%xmm3
+
+	# x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+	paddd		%xmm3,%xmm2
+	pxor		%xmm2,%xmm1
+	movdqa		%xmm1,%xmm7
+	pslld		$7,%xmm7
+	psrld		$25,%xmm1
+	por		%xmm7,%xmm1
+
+	# x1 = shuffle32(x1, MASK(0, 3, 2, 1))
+	pshufd		$0x39,%xmm1,%xmm1
+	# x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+	pshufd		$0x4e,%xmm2,%xmm2
+	# x3 = shuffle32(x3, MASK(2, 1, 0, 3))
+	pshufd		$0x93,%xmm3,%xmm3
+
+	# x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+	paddd		%xmm1,%xmm0
+	pxor		%xmm0,%xmm3
+	pshufb		%xmm5,%xmm3
+
+	# x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+	paddd		%xmm3,%xmm2
+	pxor		%xmm2,%xmm1
+	movdqa		%xmm1,%xmm6
+	pslld		$12,%xmm6
+	psrld		$20,%xmm1
+	por		%xmm6,%xmm1
+
+	# x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+	paddd		%xmm1,%xmm0
+	pxor		%xmm0,%xmm3
+	pshufb		%xmm4,%xmm3
+
+	# x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+	paddd		%xmm3,%xmm2
+	pxor		%xmm2,%xmm1
+	movdqa		%xmm1,%xmm7
+	pslld		$7,%xmm7
+	psrld		$25,%xmm1
+	por		%xmm7,%xmm1
+
+	# x1 = shuffle32(x1, MASK(2, 1, 0, 3))
+	pshufd		$0x93,%xmm1,%xmm1
+	# x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+	pshufd		$0x4e,%xmm2,%xmm2
+	# x3 = shuffle32(x3, MASK(0, 3, 2, 1))
+	pshufd		$0x39,%xmm3,%xmm3
+
+	dec		%ecx
+	jnz		.Ldoubleround
+
+	# o0 = i0 ^ (x0 + s0)
+	movdqu		0x00(%rdx),%xmm4
+	paddd		%xmm8,%xmm0
+	pxor		%xmm4,%xmm0
+	movdqu		%xmm0,0x00(%rsi)
+	# o1 = i1 ^ (x1 + s1)
+	movdqu		0x10(%rdx),%xmm5
+	paddd		%xmm9,%xmm1
+	pxor		%xmm5,%xmm1
+	movdqu		%xmm1,0x10(%rsi)
+	# o2 = i2 ^ (x2 + s2)
+	movdqu		0x20(%rdx),%xmm6
+	paddd		%xmm10,%xmm2
+	pxor		%xmm6,%xmm2
+	movdqu		%xmm2,0x20(%rsi)
+	# o3 = i3 ^ (x3 + s3)
+	movdqu		0x30(%rdx),%xmm7
+	paddd		%xmm11,%xmm3
+	pxor		%xmm7,%xmm3
+	movdqu		%xmm3,0x30(%rsi)
+
+	ret
+ENDPROC(chacha20_block_xor_ssse3)
+
+ENTRY(chacha20_4block_xor_ssse3)
+	# %rdi: Input state matrix, s
+	# %rsi: 4 data blocks output, o
+	# %rdx: 4 data blocks input, i
+
+	# This function encrypts four consecutive ChaCha20 blocks by loading the
+	# the state matrix in SSE registers four times. As we need some scratch
+	# registers, we save the first four registers on the stack. The
+	# algorithm performs each operation on the corresponding word of each
+	# state matrix, hence requires no word shuffling. For final XORing step
+	# we transpose the matrix by interleaving 32- and then 64-bit words,
+	# which allows us to do XOR in SSE registers. 8/16-bit word rotation is
+	# done with the slightly better performing SSSE3 byte shuffling,
+	# 7/12-bit word rotation uses traditional shift+OR.
+
+	sub		$0x40,%rsp
+
+	# x0..15[0-3] = s0..3[0..3]
+	movq		0x00(%rdi),%xmm1
+	pshufd		$0x00,%xmm1,%xmm0
+	pshufd		$0x55,%xmm1,%xmm1
+	movq		0x08(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	movq		0x10(%rdi),%xmm5
+	pshufd		$0x00,%xmm5,%xmm4
+	pshufd		$0x55,%xmm5,%xmm5
+	movq		0x18(%rdi),%xmm7
+	pshufd		$0x00,%xmm7,%xmm6
+	pshufd		$0x55,%xmm7,%xmm7
+	movq		0x20(%rdi),%xmm9
+	pshufd		$0x00,%xmm9,%xmm8
+	pshufd		$0x55,%xmm9,%xmm9
+	movq		0x28(%rdi),%xmm11
+	pshufd		$0x00,%xmm11,%xmm10
+	pshufd		$0x55,%xmm11,%xmm11
+	movq		0x30(%rdi),%xmm13
+	pshufd		$0x00,%xmm13,%xmm12
+	pshufd		$0x55,%xmm13,%xmm13
+	movq		0x38(%rdi),%xmm15
+	pshufd		$0x00,%xmm15,%xmm14
+	pshufd		$0x55,%xmm15,%xmm15
+	# x0..3 on stack
+	movdqa		%xmm0,0x00(%rsp)
+	movdqa		%xmm1,0x10(%rsp)
+	movdqa		%xmm2,0x20(%rsp)
+	movdqa		%xmm3,0x30(%rsp)
+
+	movdqa		CTRINC(%rip),%xmm1
+	movdqa		ROT8(%rip),%xmm2
+	movdqa		ROT16(%rip),%xmm3
+
+	# x12 += counter values 0-3
+	paddd		%xmm1,%xmm12
+
+	mov		$10,%ecx
+
+.Ldoubleround4:
+	# x0 += x4, x12 = rotl32(x12 ^ x0, 16)
+	movdqa		0x00(%rsp),%xmm0
+	paddd		%xmm4,%xmm0
+	movdqa		%xmm0,0x00(%rsp)
+	pxor		%xmm0,%xmm12
+	pshufb		%xmm3,%xmm12
+	# x1 += x5, x13 = rotl32(x13 ^ x1, 16)
+	movdqa		0x10(%rsp),%xmm0
+	paddd		%xmm5,%xmm0
+	movdqa		%xmm0,0x10(%rsp)
+	pxor		%xmm0,%xmm13
+	pshufb		%xmm3,%xmm13
+	# x2 += x6, x14 = rotl32(x14 ^ x2, 16)
+	movdqa		0x20(%rsp),%xmm0
+	paddd		%xmm6,%xmm0
+	movdqa		%xmm0,0x20(%rsp)
+	pxor		%xmm0,%xmm14
+	pshufb		%xmm3,%xmm14
+	# x3 += x7, x15 = rotl32(x15 ^ x3, 16)
+	movdqa		0x30(%rsp),%xmm0
+	paddd		%xmm7,%xmm0
+	movdqa		%xmm0,0x30(%rsp)
+	pxor		%xmm0,%xmm15
+	pshufb		%xmm3,%xmm15
+
+	# x8 += x12, x4 = rotl32(x4 ^ x8, 12)
+	paddd		%xmm12,%xmm8
+	pxor		%xmm8,%xmm4
+	movdqa		%xmm4,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm4
+	por		%xmm0,%xmm4
+	# x9 += x13, x5 = rotl32(x5 ^ x9, 12)
+	paddd		%xmm13,%xmm9
+	pxor		%xmm9,%xmm5
+	movdqa		%xmm5,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm5
+	por		%xmm0,%xmm5
+	# x10 += x14, x6 = rotl32(x6 ^ x10, 12)
+	paddd		%xmm14,%xmm10
+	pxor		%xmm10,%xmm6
+	movdqa		%xmm6,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm6
+	por		%xmm0,%xmm6
+	# x11 += x15, x7 = rotl32(x7 ^ x11, 12)
+	paddd		%xmm15,%xmm11
+	pxor		%xmm11,%xmm7
+	movdqa		%xmm7,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm7
+	por		%xmm0,%xmm7
+
+	# x0 += x4, x12 = rotl32(x12 ^ x0, 8)
+	movdqa		0x00(%rsp),%xmm0
+	paddd		%xmm4,%xmm0
+	movdqa		%xmm0,0x00(%rsp)
+	pxor		%xmm0,%xmm12
+	pshufb		%xmm2,%xmm12
+	# x1 += x5, x13 = rotl32(x13 ^ x1, 8)
+	movdqa		0x10(%rsp),%xmm0
+	paddd		%xmm5,%xmm0
+	movdqa		%xmm0,0x10(%rsp)
+	pxor		%xmm0,%xmm13
+	pshufb		%xmm2,%xmm13
+	# x2 += x6, x14 = rotl32(x14 ^ x2, 8)
+	movdqa		0x20(%rsp),%xmm0
+	paddd		%xmm6,%xmm0
+	movdqa		%xmm0,0x20(%rsp)
+	pxor		%xmm0,%xmm14
+	pshufb		%xmm2,%xmm14
+	# x3 += x7, x15 = rotl32(x15 ^ x3, 8)
+	movdqa		0x30(%rsp),%xmm0
+	paddd		%xmm7,%xmm0
+	movdqa		%xmm0,0x30(%rsp)
+	pxor		%xmm0,%xmm15
+	pshufb		%xmm2,%xmm15
+
+	# x8 += x12, x4 = rotl32(x4 ^ x8, 7)
+	paddd		%xmm12,%xmm8
+	pxor		%xmm8,%xmm4
+	movdqa		%xmm4,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm4
+	por		%xmm0,%xmm4
+	# x9 += x13, x5 = rotl32(x5 ^ x9, 7)
+	paddd		%xmm13,%xmm9
+	pxor		%xmm9,%xmm5
+	movdqa		%xmm5,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm5
+	por		%xmm0,%xmm5
+	# x10 += x14, x6 = rotl32(x6 ^ x10, 7)
+	paddd		%xmm14,%xmm10
+	pxor		%xmm10,%xmm6
+	movdqa		%xmm6,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm6
+	por		%xmm0,%xmm6
+	# x11 += x15, x7 = rotl32(x7 ^ x11, 7)
+	paddd		%xmm15,%xmm11
+	pxor		%xmm11,%xmm7
+	movdqa		%xmm7,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm7
+	por		%xmm0,%xmm7
+
+	# x0 += x5, x15 = rotl32(x15 ^ x0, 16)
+	movdqa		0x00(%rsp),%xmm0
+	paddd		%xmm5,%xmm0
+	movdqa		%xmm0,0x00(%rsp)
+	pxor		%xmm0,%xmm15
+	pshufb		%xmm3,%xmm15
+	# x1 += x6, x12 = rotl32(x12 ^ x1, 16)
+	movdqa		0x10(%rsp),%xmm0
+	paddd		%xmm6,%xmm0
+	movdqa		%xmm0,0x10(%rsp)
+	pxor		%xmm0,%xmm12
+	pshufb		%xmm3,%xmm12
+	# x2 += x7, x13 = rotl32(x13 ^ x2, 16)
+	movdqa		0x20(%rsp),%xmm0
+	paddd		%xmm7,%xmm0
+	movdqa		%xmm0,0x20(%rsp)
+	pxor		%xmm0,%xmm13
+	pshufb		%xmm3,%xmm13
+	# x3 += x4, x14 = rotl32(x14 ^ x3, 16)
+	movdqa		0x30(%rsp),%xmm0
+	paddd		%xmm4,%xmm0
+	movdqa		%xmm0,0x30(%rsp)
+	pxor		%xmm0,%xmm14
+	pshufb		%xmm3,%xmm14
+
+	# x10 += x15, x5 = rotl32(x5 ^ x10, 12)
+	paddd		%xmm15,%xmm10
+	pxor		%xmm10,%xmm5
+	movdqa		%xmm5,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm5
+	por		%xmm0,%xmm5
+	# x11 += x12, x6 = rotl32(x6 ^ x11, 12)
+	paddd		%xmm12,%xmm11
+	pxor		%xmm11,%xmm6
+	movdqa		%xmm6,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm6
+	por		%xmm0,%xmm6
+	# x8 += x13, x7 = rotl32(x7 ^ x8, 12)
+	paddd		%xmm13,%xmm8
+	pxor		%xmm8,%xmm7
+	movdqa		%xmm7,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm7
+	por		%xmm0,%xmm7
+	# x9 += x14, x4 = rotl32(x4 ^ x9, 12)
+	paddd		%xmm14,%xmm9
+	pxor		%xmm9,%xmm4
+	movdqa		%xmm4,%xmm0
+	pslld		$12,%xmm0
+	psrld		$20,%xmm4
+	por		%xmm0,%xmm4
+
+	# x0 += x5, x15 = rotl32(x15 ^ x0, 8)
+	movdqa		0x00(%rsp),%xmm0
+	paddd		%xmm5,%xmm0
+	movdqa		%xmm0,0x00(%rsp)
+	pxor		%xmm0,%xmm15
+	pshufb		%xmm2,%xmm15
+	# x1 += x6, x12 = rotl32(x12 ^ x1, 8)
+	movdqa		0x10(%rsp),%xmm0
+	paddd		%xmm6,%xmm0
+	movdqa		%xmm0,0x10(%rsp)
+	pxor		%xmm0,%xmm12
+	pshufb		%xmm2,%xmm12
+	# x2 += x7, x13 = rotl32(x13 ^ x2, 8)
+	movdqa		0x20(%rsp),%xmm0
+	paddd		%xmm7,%xmm0
+	movdqa		%xmm0,0x20(%rsp)
+	pxor		%xmm0,%xmm13
+	pshufb		%xmm2,%xmm13
+	# x3 += x4, x14 = rotl32(x14 ^ x3, 8)
+	movdqa		0x30(%rsp),%xmm0
+	paddd		%xmm4,%xmm0
+	movdqa		%xmm0,0x30(%rsp)
+	pxor		%xmm0,%xmm14
+	pshufb		%xmm2,%xmm14
+
+	# x10 += x15, x5 = rotl32(x5 ^ x10, 7)
+	paddd		%xmm15,%xmm10
+	pxor		%xmm10,%xmm5
+	movdqa		%xmm5,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm5
+	por		%xmm0,%xmm5
+	# x11 += x12, x6 = rotl32(x6 ^ x11, 7)
+	paddd		%xmm12,%xmm11
+	pxor		%xmm11,%xmm6
+	movdqa		%xmm6,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm6
+	por		%xmm0,%xmm6
+	# x8 += x13, x7 = rotl32(x7 ^ x8, 7)
+	paddd		%xmm13,%xmm8
+	pxor		%xmm8,%xmm7
+	movdqa		%xmm7,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm7
+	por		%xmm0,%xmm7
+	# x9 += x14, x4 = rotl32(x4 ^ x9, 7)
+	paddd		%xmm14,%xmm9
+	pxor		%xmm9,%xmm4
+	movdqa		%xmm4,%xmm0
+	pslld		$7,%xmm0
+	psrld		$25,%xmm4
+	por		%xmm0,%xmm4
+
+	dec		%ecx
+	jnz		.Ldoubleround4
+
+	# x0[0-3] += s0[0]
+	# x1[0-3] += s0[1]
+	movq		0x00(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		0x00(%rsp),%xmm2
+	movdqa		%xmm2,0x00(%rsp)
+	paddd		0x10(%rsp),%xmm3
+	movdqa		%xmm3,0x10(%rsp)
+	# x2[0-3] += s0[2]
+	# x3[0-3] += s0[3]
+	movq		0x08(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		0x20(%rsp),%xmm2
+	movdqa		%xmm2,0x20(%rsp)
+	paddd		0x30(%rsp),%xmm3
+	movdqa		%xmm3,0x30(%rsp)
+
+	# x4[0-3] += s1[0]
+	# x5[0-3] += s1[1]
+	movq		0x10(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		%xmm2,%xmm4
+	paddd		%xmm3,%xmm5
+	# x6[0-3] += s1[2]
+	# x7[0-3] += s1[3]
+	movq		0x18(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		%xmm2,%xmm6
+	paddd		%xmm3,%xmm7
+
+	# x8[0-3] += s2[0]
+	# x9[0-3] += s2[1]
+	movq		0x20(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		%xmm2,%xmm8
+	paddd		%xmm3,%xmm9
+	# x10[0-3] += s2[2]
+	# x11[0-3] += s2[3]
+	movq		0x28(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		%xmm2,%xmm10
+	paddd		%xmm3,%xmm11
+
+	# x12[0-3] += s3[0]
+	# x13[0-3] += s3[1]
+	movq		0x30(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		%xmm2,%xmm12
+	paddd		%xmm3,%xmm13
+	# x14[0-3] += s3[2]
+	# x15[0-3] += s3[3]
+	movq		0x38(%rdi),%xmm3
+	pshufd		$0x00,%xmm3,%xmm2
+	pshufd		$0x55,%xmm3,%xmm3
+	paddd		%xmm2,%xmm14
+	paddd		%xmm3,%xmm15
+
+	# x12 += counter values 0-3
+	paddd		%xmm1,%xmm12
+
+	# interleave 32-bit words in state n, n+1
+	movdqa		0x00(%rsp),%xmm0
+	movdqa		0x10(%rsp),%xmm1
+	movdqa		%xmm0,%xmm2
+	punpckldq	%xmm1,%xmm2
+	punpckhdq	%xmm1,%xmm0
+	movdqa		%xmm2,0x00(%rsp)
+	movdqa		%xmm0,0x10(%rsp)
+	movdqa		0x20(%rsp),%xmm0
+	movdqa		0x30(%rsp),%xmm1
+	movdqa		%xmm0,%xmm2
+	punpckldq	%xmm1,%xmm2
+	punpckhdq	%xmm1,%xmm0
+	movdqa		%xmm2,0x20(%rsp)
+	movdqa		%xmm0,0x30(%rsp)
+	movdqa		%xmm4,%xmm0
+	punpckldq	%xmm5,%xmm4
+	punpckhdq	%xmm5,%xmm0
+	movdqa		%xmm0,%xmm5
+	movdqa		%xmm6,%xmm0
+	punpckldq	%xmm7,%xmm6
+	punpckhdq	%xmm7,%xmm0
+	movdqa		%xmm0,%xmm7
+	movdqa		%xmm8,%xmm0
+	punpckldq	%xmm9,%xmm8
+	punpckhdq	%xmm9,%xmm0
+	movdqa		%xmm0,%xmm9
+	movdqa		%xmm10,%xmm0
+	punpckldq	%xmm11,%xmm10
+	punpckhdq	%xmm11,%xmm0
+	movdqa		%xmm0,%xmm11
+	movdqa		%xmm12,%xmm0
+	punpckldq	%xmm13,%xmm12
+	punpckhdq	%xmm13,%xmm0
+	movdqa		%xmm0,%xmm13
+	movdqa		%xmm14,%xmm0
+	punpckldq	%xmm15,%xmm14
+	punpckhdq	%xmm15,%xmm0
+	movdqa		%xmm0,%xmm15
+
+	# interleave 64-bit words in state n, n+2
+	movdqa		0x00(%rsp),%xmm0
+	movdqa		0x20(%rsp),%xmm1
+	movdqa		%xmm0,%xmm2
+	punpcklqdq	%xmm1,%xmm2
+	punpckhqdq	%xmm1,%xmm0
+	movdqa		%xmm2,0x00(%rsp)
+	movdqa		%xmm0,0x20(%rsp)
+	movdqa		0x10(%rsp),%xmm0
+	movdqa		0x30(%rsp),%xmm1
+	movdqa		%xmm0,%xmm2
+	punpcklqdq	%xmm1,%xmm2
+	punpckhqdq	%xmm1,%xmm0
+	movdqa		%xmm2,0x10(%rsp)
+	movdqa		%xmm0,0x30(%rsp)
+	movdqa		%xmm4,%xmm0
+	punpcklqdq	%xmm6,%xmm4
+	punpckhqdq	%xmm6,%xmm0
+	movdqa		%xmm0,%xmm6
+	movdqa		%xmm5,%xmm0
+	punpcklqdq	%xmm7,%xmm5
+	punpckhqdq	%xmm7,%xmm0
+	movdqa		%xmm0,%xmm7
+	movdqa		%xmm8,%xmm0
+	punpcklqdq	%xmm10,%xmm8
+	punpckhqdq	%xmm10,%xmm0
+	movdqa		%xmm0,%xmm10
+	movdqa		%xmm9,%xmm0
+	punpcklqdq	%xmm11,%xmm9
+	punpckhqdq	%xmm11,%xmm0
+	movdqa		%xmm0,%xmm11
+	movdqa		%xmm12,%xmm0
+	punpcklqdq	%xmm14,%xmm12
+	punpckhqdq	%xmm14,%xmm0
+	movdqa		%xmm0,%xmm14
+	movdqa		%xmm13,%xmm0
+	punpcklqdq	%xmm15,%xmm13
+	punpckhqdq	%xmm15,%xmm0
+	movdqa		%xmm0,%xmm15
+
+	# xor with corresponding input, write to output
+	movdqa		0x00(%rsp),%xmm0
+	movdqu		0x00(%rdx),%xmm1
+	pxor		%xmm1,%xmm0
+	movdqu		%xmm0,0x00(%rsi)
+	movdqa		0x10(%rsp),%xmm0
+	movdqu		0x80(%rdx),%xmm1
+	pxor		%xmm1,%xmm0
+	movdqu		%xmm0,0x80(%rsi)
+	movdqa		0x20(%rsp),%xmm0
+	movdqu		0x40(%rdx),%xmm1
+	pxor		%xmm1,%xmm0
+	movdqu		%xmm0,0x40(%rsi)
+	movdqa		0x30(%rsp),%xmm0
+	movdqu		0xc0(%rdx),%xmm1
+	pxor		%xmm1,%xmm0
+	movdqu		%xmm0,0xc0(%rsi)
+	movdqu		0x10(%rdx),%xmm1
+	pxor		%xmm1,%xmm4
+	movdqu		%xmm4,0x10(%rsi)
+	movdqu		0x90(%rdx),%xmm1
+	pxor		%xmm1,%xmm5
+	movdqu		%xmm5,0x90(%rsi)
+	movdqu		0x50(%rdx),%xmm1
+	pxor		%xmm1,%xmm6
+	movdqu		%xmm6,0x50(%rsi)
+	movdqu		0xd0(%rdx),%xmm1
+	pxor		%xmm1,%xmm7
+	movdqu		%xmm7,0xd0(%rsi)
+	movdqu		0x20(%rdx),%xmm1
+	pxor		%xmm1,%xmm8
+	movdqu		%xmm8,0x20(%rsi)
+	movdqu		0xa0(%rdx),%xmm1
+	pxor		%xmm1,%xmm9
+	movdqu		%xmm9,0xa0(%rsi)
+	movdqu		0x60(%rdx),%xmm1
+	pxor		%xmm1,%xmm10
+	movdqu		%xmm10,0x60(%rsi)
+	movdqu		0xe0(%rdx),%xmm1
+	pxor		%xmm1,%xmm11
+	movdqu		%xmm11,0xe0(%rsi)
+	movdqu		0x30(%rdx),%xmm1
+	pxor		%xmm1,%xmm12
+	movdqu		%xmm12,0x30(%rsi)
+	movdqu		0xb0(%rdx),%xmm1
+	pxor		%xmm1,%xmm13
+	movdqu		%xmm13,0xb0(%rsi)
+	movdqu		0x70(%rdx),%xmm1
+	pxor		%xmm1,%xmm14
+	movdqu		%xmm14,0x70(%rsi)
+	movdqu		0xf0(%rdx),%xmm1
+	pxor		%xmm1,%xmm15
+	movdqu		%xmm15,0xf0(%rsi)
+
+	add		$0x40,%rsp
+	ret
+ENDPROC(chacha20_4block_xor_ssse3)
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
new file mode 100644
index 0000000..effe216
--- /dev/null
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -0,0 +1,150 @@
+/*
+ * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/chacha20.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/fpu/api.h>
+#include <asm/simd.h>
+
+#define CHACHA20_STATE_ALIGN 16
+
+asmlinkage void chacha20_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src);
+asmlinkage void chacha20_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src);
+#ifdef CONFIG_AS_AVX2
+asmlinkage void chacha20_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src);
+static bool chacha20_use_avx2;
+#endif
+
+static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src,
+			    unsigned int bytes)
+{
+	u8 buf[CHACHA20_BLOCK_SIZE];
+
+#ifdef CONFIG_AS_AVX2
+	if (chacha20_use_avx2) {
+		while (bytes >= CHACHA20_BLOCK_SIZE * 8) {
+			chacha20_8block_xor_avx2(state, dst, src);
+			bytes -= CHACHA20_BLOCK_SIZE * 8;
+			src += CHACHA20_BLOCK_SIZE * 8;
+			dst += CHACHA20_BLOCK_SIZE * 8;
+			state[12] += 8;
+		}
+	}
+#endif
+	while (bytes >= CHACHA20_BLOCK_SIZE * 4) {
+		chacha20_4block_xor_ssse3(state, dst, src);
+		bytes -= CHACHA20_BLOCK_SIZE * 4;
+		src += CHACHA20_BLOCK_SIZE * 4;
+		dst += CHACHA20_BLOCK_SIZE * 4;
+		state[12] += 4;
+	}
+	while (bytes >= CHACHA20_BLOCK_SIZE) {
+		chacha20_block_xor_ssse3(state, dst, src);
+		bytes -= CHACHA20_BLOCK_SIZE;
+		src += CHACHA20_BLOCK_SIZE;
+		dst += CHACHA20_BLOCK_SIZE;
+		state[12]++;
+	}
+	if (bytes) {
+		memcpy(buf, src, bytes);
+		chacha20_block_xor_ssse3(state, buf, buf);
+		memcpy(dst, buf, bytes);
+	}
+}
+
+static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
+			 struct scatterlist *src, unsigned int nbytes)
+{
+	u32 *state, state_buf[16 + (CHACHA20_STATE_ALIGN / sizeof(u32)) - 1];
+	struct blkcipher_walk walk;
+	int err;
+
+	if (!may_use_simd())
+		return crypto_chacha20_crypt(desc, dst, src, nbytes);
+
+	state = (u32 *)roundup((uintptr_t)state_buf, CHACHA20_STATE_ALIGN);
+
+	blkcipher_walk_init(&walk, dst, src, nbytes);
+	err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
+
+	crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
+
+	kernel_fpu_begin();
+
+	while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
+		chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
+				rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
+		err = blkcipher_walk_done(desc, &walk,
+					  walk.nbytes % CHACHA20_BLOCK_SIZE);
+	}
+
+	if (walk.nbytes) {
+		chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
+				walk.nbytes);
+		err = blkcipher_walk_done(desc, &walk, 0);
+	}
+
+	kernel_fpu_end();
+
+	return err;
+}
+
+static struct crypto_alg alg = {
+	.cra_name		= "chacha20",
+	.cra_driver_name	= "chacha20-simd",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_BLKCIPHER,
+	.cra_blocksize		= 1,
+	.cra_type		= &crypto_blkcipher_type,
+	.cra_ctxsize		= sizeof(struct chacha20_ctx),
+	.cra_alignmask		= sizeof(u32) - 1,
+	.cra_module		= THIS_MODULE,
+	.cra_u			= {
+		.blkcipher = {
+			.min_keysize	= CHACHA20_KEY_SIZE,
+			.max_keysize	= CHACHA20_KEY_SIZE,
+			.ivsize		= CHACHA20_IV_SIZE,
+			.geniv		= "seqiv",
+			.setkey		= crypto_chacha20_setkey,
+			.encrypt	= chacha20_simd,
+			.decrypt	= chacha20_simd,
+		},
+	},
+};
+
+static int __init chacha20_simd_mod_init(void)
+{
+	if (!cpu_has_ssse3)
+		return -ENODEV;
+
+#ifdef CONFIG_AS_AVX2
+	chacha20_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
+			    cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL);
+#endif
+	return crypto_register_alg(&alg);
+}
+
+static void __exit chacha20_simd_mod_fini(void)
+{
+	crypto_unregister_alg(&alg);
+}
+
+module_init(chacha20_simd_mod_init);
+module_exit(chacha20_simd_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("chacha20 cipher algorithm, SIMD accelerated");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-simd");
diff --git a/arch/x86/crypto/poly1305-avx2-x86_64.S b/arch/x86/crypto/poly1305-avx2-x86_64.S
new file mode 100644
index 0000000..eff2f41
--- /dev/null
+++ b/arch/x86/crypto/poly1305-avx2-x86_64.S
@@ -0,0 +1,386 @@
+/*
+ * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.data
+.align 32
+
+ANMASK:	.octa 0x0000000003ffffff0000000003ffffff
+	.octa 0x0000000003ffffff0000000003ffffff
+ORMASK:	.octa 0x00000000010000000000000001000000
+	.octa 0x00000000010000000000000001000000
+
+.text
+
+#define h0 0x00(%rdi)
+#define h1 0x04(%rdi)
+#define h2 0x08(%rdi)
+#define h3 0x0c(%rdi)
+#define h4 0x10(%rdi)
+#define r0 0x00(%rdx)
+#define r1 0x04(%rdx)
+#define r2 0x08(%rdx)
+#define r3 0x0c(%rdx)
+#define r4 0x10(%rdx)
+#define u0 0x00(%r8)
+#define u1 0x04(%r8)
+#define u2 0x08(%r8)
+#define u3 0x0c(%r8)
+#define u4 0x10(%r8)
+#define w0 0x14(%r8)
+#define w1 0x18(%r8)
+#define w2 0x1c(%r8)
+#define w3 0x20(%r8)
+#define w4 0x24(%r8)
+#define y0 0x28(%r8)
+#define y1 0x2c(%r8)
+#define y2 0x30(%r8)
+#define y3 0x34(%r8)
+#define y4 0x38(%r8)
+#define m %rsi
+#define hc0 %ymm0
+#define hc1 %ymm1
+#define hc2 %ymm2
+#define hc3 %ymm3
+#define hc4 %ymm4
+#define hc0x %xmm0
+#define hc1x %xmm1
+#define hc2x %xmm2
+#define hc3x %xmm3
+#define hc4x %xmm4
+#define t1 %ymm5
+#define t2 %ymm6
+#define t1x %xmm5
+#define t2x %xmm6
+#define ruwy0 %ymm7
+#define ruwy1 %ymm8
+#define ruwy2 %ymm9
+#define ruwy3 %ymm10
+#define ruwy4 %ymm11
+#define ruwy0x %xmm7
+#define ruwy1x %xmm8
+#define ruwy2x %xmm9
+#define ruwy3x %xmm10
+#define ruwy4x %xmm11
+#define svxz1 %ymm12
+#define svxz2 %ymm13
+#define svxz3 %ymm14
+#define svxz4 %ymm15
+#define d0 %r9
+#define d1 %r10
+#define d2 %r11
+#define d3 %r12
+#define d4 %r13
+
+ENTRY(poly1305_4block_avx2)
+	# %rdi: Accumulator h[5]
+	# %rsi: 64 byte input block m
+	# %rdx: Poly1305 key r[5]
+	# %rcx: Quadblock count
+	# %r8:  Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5],
+
+	# This four-block variant uses loop unrolled block processing. It
+	# requires 4 Poly1305 keys: r, r^2, r^3 and r^4:
+	# h = (h + m) * r  =>  h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r
+
+	vzeroupper
+	push		%rbx
+	push		%r12
+	push		%r13
+
+	# combine r0,u0,w0,y0
+	vmovd		y0,ruwy0x
+	vmovd		w0,t1x
+	vpunpcklqdq	t1,ruwy0,ruwy0
+	vmovd		u0,t1x
+	vmovd		r0,t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,ruwy0,ruwy0
+
+	# combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5
+	vmovd		y1,ruwy1x
+	vmovd		w1,t1x
+	vpunpcklqdq	t1,ruwy1,ruwy1
+	vmovd		u1,t1x
+	vmovd		r1,t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,ruwy1,ruwy1
+	vpslld		$2,ruwy1,svxz1
+	vpaddd		ruwy1,svxz1,svxz1
+
+	# combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5
+	vmovd		y2,ruwy2x
+	vmovd		w2,t1x
+	vpunpcklqdq	t1,ruwy2,ruwy2
+	vmovd		u2,t1x
+	vmovd		r2,t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,ruwy2,ruwy2
+	vpslld		$2,ruwy2,svxz2
+	vpaddd		ruwy2,svxz2,svxz2
+
+	# combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5
+	vmovd		y3,ruwy3x
+	vmovd		w3,t1x
+	vpunpcklqdq	t1,ruwy3,ruwy3
+	vmovd		u3,t1x
+	vmovd		r3,t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,ruwy3,ruwy3
+	vpslld		$2,ruwy3,svxz3
+	vpaddd		ruwy3,svxz3,svxz3
+
+	# combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5
+	vmovd		y4,ruwy4x
+	vmovd		w4,t1x
+	vpunpcklqdq	t1,ruwy4,ruwy4
+	vmovd		u4,t1x
+	vmovd		r4,t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,ruwy4,ruwy4
+	vpslld		$2,ruwy4,svxz4
+	vpaddd		ruwy4,svxz4,svxz4
+
+.Ldoblock4:
+	# hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff,
+	#	 m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0]
+	vmovd		0x00(m),hc0x
+	vmovd		0x10(m),t1x
+	vpunpcklqdq	t1,hc0,hc0
+	vmovd		0x20(m),t1x
+	vmovd		0x30(m),t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,hc0,hc0
+	vpand		ANMASK(%rip),hc0,hc0
+	vmovd		h0,t1x
+	vpaddd		t1,hc0,hc0
+	# hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff,
+	#	 (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1]
+	vmovd		0x03(m),hc1x
+	vmovd		0x13(m),t1x
+	vpunpcklqdq	t1,hc1,hc1
+	vmovd		0x23(m),t1x
+	vmovd		0x33(m),t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,hc1,hc1
+	vpsrld		$2,hc1,hc1
+	vpand		ANMASK(%rip),hc1,hc1
+	vmovd		h1,t1x
+	vpaddd		t1,hc1,hc1
+	# hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff,
+	#	 (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2]
+	vmovd		0x06(m),hc2x
+	vmovd		0x16(m),t1x
+	vpunpcklqdq	t1,hc2,hc2
+	vmovd		0x26(m),t1x
+	vmovd		0x36(m),t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,hc2,hc2
+	vpsrld		$4,hc2,hc2
+	vpand		ANMASK(%rip),hc2,hc2
+	vmovd		h2,t1x
+	vpaddd		t1,hc2,hc2
+	# hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff,
+	#	 (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3]
+	vmovd		0x09(m),hc3x
+	vmovd		0x19(m),t1x
+	vpunpcklqdq	t1,hc3,hc3
+	vmovd		0x29(m),t1x
+	vmovd		0x39(m),t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,hc3,hc3
+	vpsrld		$6,hc3,hc3
+	vpand		ANMASK(%rip),hc3,hc3
+	vmovd		h3,t1x
+	vpaddd		t1,hc3,hc3
+	# hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24),
+	#	 (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4]
+	vmovd		0x0c(m),hc4x
+	vmovd		0x1c(m),t1x
+	vpunpcklqdq	t1,hc4,hc4
+	vmovd		0x2c(m),t1x
+	vmovd		0x3c(m),t2x
+	vpunpcklqdq	t2,t1,t1
+	vperm2i128	$0x20,t1,hc4,hc4
+	vpsrld		$8,hc4,hc4
+	vpor		ORMASK(%rip),hc4,hc4
+	vmovd		h4,t1x
+	vpaddd		t1,hc4,hc4
+
+	# t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ]
+	vpmuludq	hc0,ruwy0,t1
+	# t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ]
+	vpmuludq	hc1,svxz4,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ]
+	vpmuludq	hc2,svxz3,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ]
+	vpmuludq	hc3,svxz2,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ]
+	vpmuludq	hc4,svxz1,t2
+	vpaddq		t2,t1,t1
+	# d0 = t1[0] + t1[1] + t[2] + t[3]
+	vpermq		$0xee,t1,t2
+	vpaddq		t2,t1,t1
+	vpsrldq		$8,t1,t2
+	vpaddq		t2,t1,t1
+	vmovq		t1x,d0
+
+	# t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ]
+	vpmuludq	hc0,ruwy1,t1
+	# t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ]
+	vpmuludq	hc1,ruwy0,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ]
+	vpmuludq	hc2,svxz4,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ]
+	vpmuludq	hc3,svxz3,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ]
+	vpmuludq	hc4,svxz2,t2
+	vpaddq		t2,t1,t1
+	# d1 = t1[0] + t1[1] + t1[3] + t1[4]
+	vpermq		$0xee,t1,t2
+	vpaddq		t2,t1,t1
+	vpsrldq		$8,t1,t2
+	vpaddq		t2,t1,t1
+	vmovq		t1x,d1
+
+	# t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ]
+	vpmuludq	hc0,ruwy2,t1
+	# t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ]
+	vpmuludq	hc1,ruwy1,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ]
+	vpmuludq	hc2,ruwy0,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ]
+	vpmuludq	hc3,svxz4,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ]
+	vpmuludq	hc4,svxz3,t2
+	vpaddq		t2,t1,t1
+	# d2 = t1[0] + t1[1] + t1[2] + t1[3]
+	vpermq		$0xee,t1,t2
+	vpaddq		t2,t1,t1
+	vpsrldq		$8,t1,t2
+	vpaddq		t2,t1,t1
+	vmovq		t1x,d2
+
+	# t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ]
+	vpmuludq	hc0,ruwy3,t1
+	# t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ]
+	vpmuludq	hc1,ruwy2,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ]
+	vpmuludq	hc2,ruwy1,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ]
+	vpmuludq	hc3,ruwy0,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ]
+	vpmuludq	hc4,svxz4,t2
+	vpaddq		t2,t1,t1
+	# d3 = t1[0] + t1[1] + t1[2] + t1[3]
+	vpermq		$0xee,t1,t2
+	vpaddq		t2,t1,t1
+	vpsrldq		$8,t1,t2
+	vpaddq		t2,t1,t1
+	vmovq		t1x,d3
+
+	# t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ]
+	vpmuludq	hc0,ruwy4,t1
+	# t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ]
+	vpmuludq	hc1,ruwy3,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ]
+	vpmuludq	hc2,ruwy2,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ]
+	vpmuludq	hc3,ruwy1,t2
+	vpaddq		t2,t1,t1
+	# t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ]
+	vpmuludq	hc4,ruwy0,t2
+	vpaddq		t2,t1,t1
+	# d4 = t1[0] + t1[1] + t1[2] + t1[3]
+	vpermq		$0xee,t1,t2
+	vpaddq		t2,t1,t1
+	vpsrldq		$8,t1,t2
+	vpaddq		t2,t1,t1
+	vmovq		t1x,d4
+
+	# d1 += d0 >> 26
+	mov		d0,%rax
+	shr		$26,%rax
+	add		%rax,d1
+	# h0 = d0 & 0x3ffffff
+	mov		d0,%rbx
+	and		$0x3ffffff,%ebx
+
+	# d2 += d1 >> 26
+	mov		d1,%rax
+	shr		$26,%rax
+	add		%rax,d2
+	# h1 = d1 & 0x3ffffff
+	mov		d1,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h1
+
+	# d3 += d2 >> 26
+	mov		d2,%rax
+	shr		$26,%rax
+	add		%rax,d3
+	# h2 = d2 & 0x3ffffff
+	mov		d2,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h2
+
+	# d4 += d3 >> 26
+	mov		d3,%rax
+	shr		$26,%rax
+	add		%rax,d4
+	# h3 = d3 & 0x3ffffff
+	mov		d3,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h3
+
+	# h0 += (d4 >> 26) * 5
+	mov		d4,%rax
+	shr		$26,%rax
+	lea		(%eax,%eax,4),%eax
+	add		%eax,%ebx
+	# h4 = d4 & 0x3ffffff
+	mov		d4,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h4
+
+	# h1 += h0 >> 26
+	mov		%ebx,%eax
+	shr		$26,%eax
+	add		%eax,h1
+	# h0 = h0 & 0x3ffffff
+	andl		$0x3ffffff,%ebx
+	mov		%ebx,h0
+
+	add		$0x40,m
+	dec		%rcx
+	jnz		.Ldoblock4
+
+	vzeroupper
+	pop		%r13
+	pop		%r12
+	pop		%rbx
+	ret
+ENDPROC(poly1305_4block_avx2)
diff --git a/arch/x86/crypto/poly1305-sse2-x86_64.S b/arch/x86/crypto/poly1305-sse2-x86_64.S
new file mode 100644
index 0000000..338c748
--- /dev/null
+++ b/arch/x86/crypto/poly1305-sse2-x86_64.S
@@ -0,0 +1,582 @@
+/*
+ * Poly1305 authenticator algorithm, RFC7539, x64 SSE2 functions
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.data
+.align 16
+
+ANMASK:	.octa 0x0000000003ffffff0000000003ffffff
+ORMASK:	.octa 0x00000000010000000000000001000000
+
+.text
+
+#define h0 0x00(%rdi)
+#define h1 0x04(%rdi)
+#define h2 0x08(%rdi)
+#define h3 0x0c(%rdi)
+#define h4 0x10(%rdi)
+#define r0 0x00(%rdx)
+#define r1 0x04(%rdx)
+#define r2 0x08(%rdx)
+#define r3 0x0c(%rdx)
+#define r4 0x10(%rdx)
+#define s1 0x00(%rsp)
+#define s2 0x04(%rsp)
+#define s3 0x08(%rsp)
+#define s4 0x0c(%rsp)
+#define m %rsi
+#define h01 %xmm0
+#define h23 %xmm1
+#define h44 %xmm2
+#define t1 %xmm3
+#define t2 %xmm4
+#define t3 %xmm5
+#define t4 %xmm6
+#define mask %xmm7
+#define d0 %r8
+#define d1 %r9
+#define d2 %r10
+#define d3 %r11
+#define d4 %r12
+
+ENTRY(poly1305_block_sse2)
+	# %rdi: Accumulator h[5]
+	# %rsi: 16 byte input block m
+	# %rdx: Poly1305 key r[5]
+	# %rcx: Block count
+
+	# This single block variant tries to improve performance by doing two
+	# multiplications in parallel using SSE instructions. There is quite
+	# some quardword packing involved, hence the speedup is marginal.
+
+	push		%rbx
+	push		%r12
+	sub		$0x10,%rsp
+
+	# s1..s4 = r1..r4 * 5
+	mov		r1,%eax
+	lea		(%eax,%eax,4),%eax
+	mov		%eax,s1
+	mov		r2,%eax
+	lea		(%eax,%eax,4),%eax
+	mov		%eax,s2
+	mov		r3,%eax
+	lea		(%eax,%eax,4),%eax
+	mov		%eax,s3
+	mov		r4,%eax
+	lea		(%eax,%eax,4),%eax
+	mov		%eax,s4
+
+	movdqa		ANMASK(%rip),mask
+
+.Ldoblock:
+	# h01 = [0, h1, 0, h0]
+	# h23 = [0, h3, 0, h2]
+	# h44 = [0, h4, 0, h4]
+	movd		h0,h01
+	movd		h1,t1
+	movd		h2,h23
+	movd		h3,t2
+	movd		h4,h44
+	punpcklqdq	t1,h01
+	punpcklqdq	t2,h23
+	punpcklqdq	h44,h44
+
+	# h01 += [ (m[3-6] >> 2) & 0x3ffffff, m[0-3] & 0x3ffffff ]
+	movd		0x00(m),t1
+	movd		0x03(m),t2
+	psrld		$2,t2
+	punpcklqdq	t2,t1
+	pand		mask,t1
+	paddd		t1,h01
+	# h23 += [ (m[9-12] >> 6) & 0x3ffffff, (m[6-9] >> 4) & 0x3ffffff ]
+	movd		0x06(m),t1
+	movd		0x09(m),t2
+	psrld		$4,t1
+	psrld		$6,t2
+	punpcklqdq	t2,t1
+	pand		mask,t1
+	paddd		t1,h23
+	# h44 += [ (m[12-15] >> 8) | (1 << 24), (m[12-15] >> 8) | (1 << 24) ]
+	mov		0x0c(m),%eax
+	shr		$8,%eax
+	or		$0x01000000,%eax
+	movd		%eax,t1
+	pshufd		$0xc4,t1,t1
+	paddd		t1,h44
+
+	# t1[0] = h0 * r0 + h2 * s3
+	# t1[1] = h1 * s4 + h3 * s2
+	movd		r0,t1
+	movd		s4,t2
+	punpcklqdq	t2,t1
+	pmuludq		h01,t1
+	movd		s3,t2
+	movd		s2,t3
+	punpcklqdq	t3,t2
+	pmuludq		h23,t2
+	paddq		t2,t1
+	# t2[0] = h0 * r1 + h2 * s4
+	# t2[1] = h1 * r0 + h3 * s3
+	movd		r1,t2
+	movd		r0,t3
+	punpcklqdq	t3,t2
+	pmuludq		h01,t2
+	movd		s4,t3
+	movd		s3,t4
+	punpcklqdq	t4,t3
+	pmuludq		h23,t3
+	paddq		t3,t2
+	# t3[0] = h4 * s1
+	# t3[1] = h4 * s2
+	movd		s1,t3
+	movd		s2,t4
+	punpcklqdq	t4,t3
+	pmuludq		h44,t3
+	# d0 = t1[0] + t1[1] + t3[0]
+	# d1 = t2[0] + t2[1] + t3[1]
+	movdqa		t1,t4
+	punpcklqdq	t2,t4
+	punpckhqdq	t2,t1
+	paddq		t4,t1
+	paddq		t3,t1
+	movq		t1,d0
+	psrldq		$8,t1
+	movq		t1,d1
+
+	# t1[0] = h0 * r2 + h2 * r0
+	# t1[1] = h1 * r1 + h3 * s4
+	movd		r2,t1
+	movd		r1,t2
+	punpcklqdq 	t2,t1
+	pmuludq		h01,t1
+	movd		r0,t2
+	movd		s4,t3
+	punpcklqdq	t3,t2
+	pmuludq		h23,t2
+	paddq		t2,t1
+	# t2[0] = h0 * r3 + h2 * r1
+	# t2[1] = h1 * r2 + h3 * r0
+	movd		r3,t2
+	movd		r2,t3
+	punpcklqdq	t3,t2
+	pmuludq		h01,t2
+	movd		r1,t3
+	movd		r0,t4
+	punpcklqdq	t4,t3
+	pmuludq		h23,t3
+	paddq		t3,t2
+	# t3[0] = h4 * s3
+	# t3[1] = h4 * s4
+	movd		s3,t3
+	movd		s4,t4
+	punpcklqdq	t4,t3
+	pmuludq		h44,t3
+	# d2 = t1[0] + t1[1] + t3[0]
+	# d3 = t2[0] + t2[1] + t3[1]
+	movdqa		t1,t4
+	punpcklqdq	t2,t4
+	punpckhqdq	t2,t1
+	paddq		t4,t1
+	paddq		t3,t1
+	movq		t1,d2
+	psrldq		$8,t1
+	movq		t1,d3
+
+	# t1[0] = h0 * r4 + h2 * r2
+	# t1[1] = h1 * r3 + h3 * r1
+	movd		r4,t1
+	movd		r3,t2
+	punpcklqdq	t2,t1
+	pmuludq		h01,t1
+	movd		r2,t2
+	movd		r1,t3
+	punpcklqdq	t3,t2
+	pmuludq		h23,t2
+	paddq		t2,t1
+	# t3[0] = h4 * r0
+	movd		r0,t3
+	pmuludq		h44,t3
+	# d4 = t1[0] + t1[1] + t3[0]
+	movdqa		t1,t4
+	psrldq		$8,t4
+	paddq		t4,t1
+	paddq		t3,t1
+	movq		t1,d4
+
+	# d1 += d0 >> 26
+	mov		d0,%rax
+	shr		$26,%rax
+	add		%rax,d1
+	# h0 = d0 & 0x3ffffff
+	mov		d0,%rbx
+	and		$0x3ffffff,%ebx
+
+	# d2 += d1 >> 26
+	mov		d1,%rax
+	shr		$26,%rax
+	add		%rax,d2
+	# h1 = d1 & 0x3ffffff
+	mov		d1,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h1
+
+	# d3 += d2 >> 26
+	mov		d2,%rax
+	shr		$26,%rax
+	add		%rax,d3
+	# h2 = d2 & 0x3ffffff
+	mov		d2,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h2
+
+	# d4 += d3 >> 26
+	mov		d3,%rax
+	shr		$26,%rax
+	add		%rax,d4
+	# h3 = d3 & 0x3ffffff
+	mov		d3,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h3
+
+	# h0 += (d4 >> 26) * 5
+	mov		d4,%rax
+	shr		$26,%rax
+	lea		(%eax,%eax,4),%eax
+	add		%eax,%ebx
+	# h4 = d4 & 0x3ffffff
+	mov		d4,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h4
+
+	# h1 += h0 >> 26
+	mov		%ebx,%eax
+	shr		$26,%eax
+	add		%eax,h1
+	# h0 = h0 & 0x3ffffff
+	andl		$0x3ffffff,%ebx
+	mov		%ebx,h0
+
+	add		$0x10,m
+	dec		%rcx
+	jnz		.Ldoblock
+
+	add		$0x10,%rsp
+	pop		%r12
+	pop		%rbx
+	ret
+ENDPROC(poly1305_block_sse2)
+
+
+#define u0 0x00(%r8)
+#define u1 0x04(%r8)
+#define u2 0x08(%r8)
+#define u3 0x0c(%r8)
+#define u4 0x10(%r8)
+#define hc0 %xmm0
+#define hc1 %xmm1
+#define hc2 %xmm2
+#define hc3 %xmm5
+#define hc4 %xmm6
+#define ru0 %xmm7
+#define ru1 %xmm8
+#define ru2 %xmm9
+#define ru3 %xmm10
+#define ru4 %xmm11
+#define sv1 %xmm12
+#define sv2 %xmm13
+#define sv3 %xmm14
+#define sv4 %xmm15
+#undef d0
+#define d0 %r13
+
+ENTRY(poly1305_2block_sse2)
+	# %rdi: Accumulator h[5]
+	# %rsi: 16 byte input block m
+	# %rdx: Poly1305 key r[5]
+	# %rcx: Doubleblock count
+	# %r8:  Poly1305 derived key r^2 u[5]
+
+	# This two-block variant further improves performance by using loop
+	# unrolled block processing. This is more straight forward and does
+	# less byte shuffling, but requires a second Poly1305 key r^2:
+	# h = (h + m) * r    =>    h = (h + m1) * r^2 + m2 * r
+
+	push		%rbx
+	push		%r12
+	push		%r13
+
+	# combine r0,u0
+	movd		u0,ru0
+	movd		r0,t1
+	punpcklqdq	t1,ru0
+
+	# combine r1,u1 and s1=r1*5,v1=u1*5
+	movd		u1,ru1
+	movd		r1,t1
+	punpcklqdq	t1,ru1
+	movdqa		ru1,sv1
+	pslld		$2,sv1
+	paddd		ru1,sv1
+
+	# combine r2,u2 and s2=r2*5,v2=u2*5
+	movd		u2,ru2
+	movd		r2,t1
+	punpcklqdq	t1,ru2
+	movdqa		ru2,sv2
+	pslld		$2,sv2
+	paddd		ru2,sv2
+
+	# combine r3,u3 and s3=r3*5,v3=u3*5
+	movd		u3,ru3
+	movd		r3,t1
+	punpcklqdq	t1,ru3
+	movdqa		ru3,sv3
+	pslld		$2,sv3
+	paddd		ru3,sv3
+
+	# combine r4,u4 and s4=r4*5,v4=u4*5
+	movd		u4,ru4
+	movd		r4,t1
+	punpcklqdq	t1,ru4
+	movdqa		ru4,sv4
+	pslld		$2,sv4
+	paddd		ru4,sv4
+
+.Ldoblock2:
+	# hc0 = [ m[16-19] & 0x3ffffff, h0 + m[0-3] & 0x3ffffff ]
+	movd		0x00(m),hc0
+	movd		0x10(m),t1
+	punpcklqdq	t1,hc0
+	pand		ANMASK(%rip),hc0
+	movd		h0,t1
+	paddd		t1,hc0
+	# hc1 = [ (m[19-22] >> 2) & 0x3ffffff, h1 + (m[3-6] >> 2) & 0x3ffffff ]
+	movd		0x03(m),hc1
+	movd		0x13(m),t1
+	punpcklqdq	t1,hc1
+	psrld		$2,hc1
+	pand		ANMASK(%rip),hc1
+	movd		h1,t1
+	paddd		t1,hc1
+	# hc2 = [ (m[22-25] >> 4) & 0x3ffffff, h2 + (m[6-9] >> 4) & 0x3ffffff ]
+	movd		0x06(m),hc2
+	movd		0x16(m),t1
+	punpcklqdq	t1,hc2
+	psrld		$4,hc2
+	pand		ANMASK(%rip),hc2
+	movd		h2,t1
+	paddd		t1,hc2
+	# hc3 = [ (m[25-28] >> 6) & 0x3ffffff, h3 + (m[9-12] >> 6) & 0x3ffffff ]
+	movd		0x09(m),hc3
+	movd		0x19(m),t1
+	punpcklqdq	t1,hc3
+	psrld		$6,hc3
+	pand		ANMASK(%rip),hc3
+	movd		h3,t1
+	paddd		t1,hc3
+	# hc4 = [ (m[28-31] >> 8) | (1<<24), h4 + (m[12-15] >> 8) | (1<<24) ]
+	movd		0x0c(m),hc4
+	movd		0x1c(m),t1
+	punpcklqdq	t1,hc4
+	psrld		$8,hc4
+	por		ORMASK(%rip),hc4
+	movd		h4,t1
+	paddd		t1,hc4
+
+	# t1 = [ hc0[1] * r0, hc0[0] * u0 ]
+	movdqa		ru0,t1
+	pmuludq		hc0,t1
+	# t1 += [ hc1[1] * s4, hc1[0] * v4 ]
+	movdqa		sv4,t2
+	pmuludq		hc1,t2
+	paddq		t2,t1
+	# t1 += [ hc2[1] * s3, hc2[0] * v3 ]
+	movdqa		sv3,t2
+	pmuludq		hc2,t2
+	paddq		t2,t1
+	# t1 += [ hc3[1] * s2, hc3[0] * v2 ]
+	movdqa		sv2,t2
+	pmuludq		hc3,t2
+	paddq		t2,t1
+	# t1 += [ hc4[1] * s1, hc4[0] * v1 ]
+	movdqa		sv1,t2
+	pmuludq		hc4,t2
+	paddq		t2,t1
+	# d0 = t1[0] + t1[1]
+	movdqa		t1,t2
+	psrldq		$8,t2
+	paddq		t2,t1
+	movq		t1,d0
+
+	# t1 = [ hc0[1] * r1, hc0[0] * u1 ]
+	movdqa		ru1,t1
+	pmuludq		hc0,t1
+	# t1 += [ hc1[1] * r0, hc1[0] * u0 ]
+	movdqa		ru0,t2
+	pmuludq		hc1,t2
+	paddq		t2,t1
+	# t1 += [ hc2[1] * s4, hc2[0] * v4 ]
+	movdqa		sv4,t2
+	pmuludq		hc2,t2
+	paddq		t2,t1
+	# t1 += [ hc3[1] * s3, hc3[0] * v3 ]
+	movdqa		sv3,t2
+	pmuludq		hc3,t2
+	paddq		t2,t1
+	# t1 += [ hc4[1] * s2, hc4[0] * v2 ]
+	movdqa		sv2,t2
+	pmuludq		hc4,t2
+	paddq		t2,t1
+	# d1 = t1[0] + t1[1]
+	movdqa		t1,t2
+	psrldq		$8,t2
+	paddq		t2,t1
+	movq		t1,d1
+
+	# t1 = [ hc0[1] * r2, hc0[0] * u2 ]
+	movdqa		ru2,t1
+	pmuludq		hc0,t1
+	# t1 += [ hc1[1] * r1, hc1[0] * u1 ]
+	movdqa		ru1,t2
+	pmuludq		hc1,t2
+	paddq		t2,t1
+	# t1 += [ hc2[1] * r0, hc2[0] * u0 ]
+	movdqa		ru0,t2
+	pmuludq		hc2,t2
+	paddq		t2,t1
+	# t1 += [ hc3[1] * s4, hc3[0] * v4 ]
+	movdqa		sv4,t2
+	pmuludq		hc3,t2
+	paddq		t2,t1
+	# t1 += [ hc4[1] * s3, hc4[0] * v3 ]
+	movdqa		sv3,t2
+	pmuludq		hc4,t2
+	paddq		t2,t1
+	# d2 = t1[0] + t1[1]
+	movdqa		t1,t2
+	psrldq		$8,t2
+	paddq		t2,t1
+	movq		t1,d2
+
+	# t1 = [ hc0[1] * r3, hc0[0] * u3 ]
+	movdqa		ru3,t1
+	pmuludq		hc0,t1
+	# t1 += [ hc1[1] * r2, hc1[0] * u2 ]
+	movdqa		ru2,t2
+	pmuludq		hc1,t2
+	paddq		t2,t1
+	# t1 += [ hc2[1] * r1, hc2[0] * u1 ]
+	movdqa		ru1,t2
+	pmuludq		hc2,t2
+	paddq		t2,t1
+	# t1 += [ hc3[1] * r0, hc3[0] * u0 ]
+	movdqa		ru0,t2
+	pmuludq		hc3,t2
+	paddq		t2,t1
+	# t1 += [ hc4[1] * s4, hc4[0] * v4 ]
+	movdqa		sv4,t2
+	pmuludq		hc4,t2
+	paddq		t2,t1
+	# d3 = t1[0] + t1[1]
+	movdqa		t1,t2
+	psrldq		$8,t2
+	paddq		t2,t1
+	movq		t1,d3
+
+	# t1 = [ hc0[1] * r4, hc0[0] * u4 ]
+	movdqa		ru4,t1
+	pmuludq		hc0,t1
+	# t1 += [ hc1[1] * r3, hc1[0] * u3 ]
+	movdqa		ru3,t2
+	pmuludq		hc1,t2
+	paddq		t2,t1
+	# t1 += [ hc2[1] * r2, hc2[0] * u2 ]
+	movdqa		ru2,t2
+	pmuludq		hc2,t2
+	paddq		t2,t1
+	# t1 += [ hc3[1] * r1, hc3[0] * u1 ]
+	movdqa		ru1,t2
+	pmuludq		hc3,t2
+	paddq		t2,t1
+	# t1 += [ hc4[1] * r0, hc4[0] * u0 ]
+	movdqa		ru0,t2
+	pmuludq		hc4,t2
+	paddq		t2,t1
+	# d4 = t1[0] + t1[1]
+	movdqa		t1,t2
+	psrldq		$8,t2
+	paddq		t2,t1
+	movq		t1,d4
+
+	# d1 += d0 >> 26
+	mov		d0,%rax
+	shr		$26,%rax
+	add		%rax,d1
+	# h0 = d0 & 0x3ffffff
+	mov		d0,%rbx
+	and		$0x3ffffff,%ebx
+
+	# d2 += d1 >> 26
+	mov		d1,%rax
+	shr		$26,%rax
+	add		%rax,d2
+	# h1 = d1 & 0x3ffffff
+	mov		d1,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h1
+
+	# d3 += d2 >> 26
+	mov		d2,%rax
+	shr		$26,%rax
+	add		%rax,d3
+	# h2 = d2 & 0x3ffffff
+	mov		d2,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h2
+
+	# d4 += d3 >> 26
+	mov		d3,%rax
+	shr		$26,%rax
+	add		%rax,d4
+	# h3 = d3 & 0x3ffffff
+	mov		d3,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h3
+
+	# h0 += (d4 >> 26) * 5
+	mov		d4,%rax
+	shr		$26,%rax
+	lea		(%eax,%eax,4),%eax
+	add		%eax,%ebx
+	# h4 = d4 & 0x3ffffff
+	mov		d4,%rax
+	and		$0x3ffffff,%eax
+	mov		%eax,h4
+
+	# h1 += h0 >> 26
+	mov		%ebx,%eax
+	shr		$26,%eax
+	add		%eax,h1
+	# h0 = h0 & 0x3ffffff
+	andl		$0x3ffffff,%ebx
+	mov		%ebx,h0
+
+	add		$0x20,m
+	dec		%rcx
+	jnz		.Ldoblock2
+
+	pop		%r13
+	pop		%r12
+	pop		%rbx
+	ret
+ENDPROC(poly1305_2block_sse2)
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
new file mode 100644
index 0000000..f7170d7
--- /dev/null
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -0,0 +1,207 @@
+/*
+ * Poly1305 authenticator algorithm, RFC7539, SIMD glue code
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/poly1305.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/fpu/api.h>
+#include <asm/simd.h>
+
+struct poly1305_simd_desc_ctx {
+	struct poly1305_desc_ctx base;
+	/* derived key u set? */
+	bool uset;
+#ifdef CONFIG_AS_AVX2
+	/* derived keys r^3, r^4 set? */
+	bool wset;
+#endif
+	/* derived Poly1305 key r^2 */
+	u32 u[5];
+	/* ... silently appended r^3 and r^4 when using AVX2 */
+};
+
+asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src,
+				    const u32 *r, unsigned int blocks);
+asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r,
+				     unsigned int blocks, const u32 *u);
+#ifdef CONFIG_AS_AVX2
+asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r,
+				     unsigned int blocks, const u32 *u);
+static bool poly1305_use_avx2;
+#endif
+
+static int poly1305_simd_init(struct shash_desc *desc)
+{
+	struct poly1305_simd_desc_ctx *sctx = shash_desc_ctx(desc);
+
+	sctx->uset = false;
+#ifdef CONFIG_AS_AVX2
+	sctx->wset = false;
+#endif
+
+	return crypto_poly1305_init(desc);
+}
+
+static void poly1305_simd_mult(u32 *a, const u32 *b)
+{
+	u8 m[POLY1305_BLOCK_SIZE];
+
+	memset(m, 0, sizeof(m));
+	/* The poly1305 block function adds a hi-bit to the accumulator which
+	 * we don't need for key multiplication; compensate for it. */
+	a[4] -= 1 << 24;
+	poly1305_block_sse2(a, m, b, 1);
+}
+
+static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx,
+					 const u8 *src, unsigned int srclen)
+{
+	struct poly1305_simd_desc_ctx *sctx;
+	unsigned int blocks, datalen;
+
+	BUILD_BUG_ON(offsetof(struct poly1305_simd_desc_ctx, base));
+	sctx = container_of(dctx, struct poly1305_simd_desc_ctx, base);
+
+	if (unlikely(!dctx->sset)) {
+		datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
+		src += srclen - datalen;
+		srclen = datalen;
+	}
+
+#ifdef CONFIG_AS_AVX2
+	if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) {
+		if (unlikely(!sctx->wset)) {
+			if (!sctx->uset) {
+				memcpy(sctx->u, dctx->r, sizeof(sctx->u));
+				poly1305_simd_mult(sctx->u, dctx->r);
+				sctx->uset = true;
+			}
+			memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u));
+			poly1305_simd_mult(sctx->u + 5, dctx->r);
+			memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u));
+			poly1305_simd_mult(sctx->u + 10, dctx->r);
+			sctx->wset = true;
+		}
+		blocks = srclen / (POLY1305_BLOCK_SIZE * 4);
+		poly1305_4block_avx2(dctx->h, src, dctx->r, blocks, sctx->u);
+		src += POLY1305_BLOCK_SIZE * 4 * blocks;
+		srclen -= POLY1305_BLOCK_SIZE * 4 * blocks;
+	}
+#endif
+	if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) {
+		if (unlikely(!sctx->uset)) {
+			memcpy(sctx->u, dctx->r, sizeof(sctx->u));
+			poly1305_simd_mult(sctx->u, dctx->r);
+			sctx->uset = true;
+		}
+		blocks = srclen / (POLY1305_BLOCK_SIZE * 2);
+		poly1305_2block_sse2(dctx->h, src, dctx->r, blocks, sctx->u);
+		src += POLY1305_BLOCK_SIZE * 2 * blocks;
+		srclen -= POLY1305_BLOCK_SIZE * 2 * blocks;
+	}
+	if (srclen >= POLY1305_BLOCK_SIZE) {
+		poly1305_block_sse2(dctx->h, src, dctx->r, 1);
+		srclen -= POLY1305_BLOCK_SIZE;
+	}
+	return srclen;
+}
+
+static int poly1305_simd_update(struct shash_desc *desc,
+				const u8 *src, unsigned int srclen)
+{
+	struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+	unsigned int bytes;
+
+	/* kernel_fpu_begin/end is costly, use fallback for small updates */
+	if (srclen <= 288 || !may_use_simd())
+		return crypto_poly1305_update(desc, src, srclen);
+
+	kernel_fpu_begin();
+
+	if (unlikely(dctx->buflen)) {
+		bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
+		memcpy(dctx->buf + dctx->buflen, src, bytes);
+		src += bytes;
+		srclen -= bytes;
+		dctx->buflen += bytes;
+
+		if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+			poly1305_simd_blocks(dctx, dctx->buf,
+					     POLY1305_BLOCK_SIZE);
+			dctx->buflen = 0;
+		}
+	}
+
+	if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
+		bytes = poly1305_simd_blocks(dctx, src, srclen);
+		src += srclen - bytes;
+		srclen = bytes;
+	}
+
+	kernel_fpu_end();
+
+	if (unlikely(srclen)) {
+		dctx->buflen = srclen;
+		memcpy(dctx->buf, src, srclen);
+	}
+
+	return 0;
+}
+
+static struct shash_alg alg = {
+	.digestsize	= POLY1305_DIGEST_SIZE,
+	.init		= poly1305_simd_init,
+	.update		= poly1305_simd_update,
+	.final		= crypto_poly1305_final,
+	.setkey		= crypto_poly1305_setkey,
+	.descsize	= sizeof(struct poly1305_simd_desc_ctx),
+	.base		= {
+		.cra_name		= "poly1305",
+		.cra_driver_name	= "poly1305-simd",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_SHASH,
+		.cra_alignmask		= sizeof(u32) - 1,
+		.cra_blocksize		= POLY1305_BLOCK_SIZE,
+		.cra_module		= THIS_MODULE,
+	},
+};
+
+static int __init poly1305_simd_mod_init(void)
+{
+	if (!cpu_has_xmm2)
+		return -ENODEV;
+
+#ifdef CONFIG_AS_AVX2
+	poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
+			    cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL);
+	alg.descsize = sizeof(struct poly1305_simd_desc_ctx);
+	if (poly1305_use_avx2)
+		alg.descsize += 10 * sizeof(u32);
+#endif
+	return crypto_register_shash(&alg);
+}
+
+static void __exit poly1305_simd_mod_exit(void)
+{
+	crypto_unregister_shash(&alg);
+}
+
+module_init(poly1305_simd_mod_init);
+module_exit(poly1305_simd_mod_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("Poly1305 authenticator");
+MODULE_ALIAS_CRYPTO("poly1305");
+MODULE_ALIAS_CRYPTO("poly1305-simd");
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index 7a14497..bd55ded 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -2,6 +2,7 @@
 # Makefile for the x86 low level entry code
 #
 obj-y				:= entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
+obj-y				+= common.o
 
 obj-y				+= vdso/
 obj-y				+= vsyscall/
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index f4e6308..3c71dd9 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -135,9 +135,6 @@
 	movq %rbp, 4*8+\offset(%rsp)
 	movq %rbx, 5*8+\offset(%rsp)
 	.endm
-	.macro SAVE_EXTRA_REGS_RBP offset=0
-	movq %rbp, 4*8+\offset(%rsp)
-	.endm
 
 	.macro RESTORE_EXTRA_REGS offset=0
 	movq 0*8+\offset(%rsp), %r15
@@ -193,12 +190,6 @@
 	.macro RESTORE_C_REGS_EXCEPT_RCX_R11
 	RESTORE_C_REGS_HELPER 1,0,0,1,1
 	.endm
-	.macro RESTORE_RSI_RDI
-	RESTORE_C_REGS_HELPER 0,0,0,0,0
-	.endm
-	.macro RESTORE_RSI_RDI_RDX
-	RESTORE_C_REGS_HELPER 0,0,0,0,1
-	.endm
 
 	.macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
 	subq $-(15*8+\addskip), %rsp
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
new file mode 100644
index 0000000..80dcc92
--- /dev/null
+++ b/arch/x86/entry/common.c
@@ -0,0 +1,318 @@
+/*
+ * common.c - C code for kernel entry and exit
+ * Copyright (c) 2015 Andrew Lutomirski
+ * GPL v2
+ *
+ * Based on asm and ptrace code by many authors.  The code here originated
+ * in ptrace.c and signal.c.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/audit.h>
+#include <linux/seccomp.h>
+#include <linux/signal.h>
+#include <linux/export.h>
+#include <linux/context_tracking.h>
+#include <linux/user-return-notifier.h>
+#include <linux/uprobes.h>
+
+#include <asm/desc.h>
+#include <asm/traps.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+#ifdef CONFIG_CONTEXT_TRACKING
+/* Called on entry from user mode with IRQs off. */
+__visible void enter_from_user_mode(void)
+{
+	CT_WARN_ON(ct_state() != CONTEXT_USER);
+	user_exit();
+}
+#endif
+
+static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
+{
+#ifdef CONFIG_X86_64
+	if (arch == AUDIT_ARCH_X86_64) {
+		audit_syscall_entry(regs->orig_ax, regs->di,
+				    regs->si, regs->dx, regs->r10);
+	} else
+#endif
+	{
+		audit_syscall_entry(regs->orig_ax, regs->bx,
+				    regs->cx, regs->dx, regs->si);
+	}
+}
+
+/*
+ * We can return 0 to resume the syscall or anything else to go to phase
+ * 2.  If we resume the syscall, we need to put something appropriate in
+ * regs->orig_ax.
+ *
+ * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax
+ * are fully functional.
+ *
+ * For phase 2's benefit, our return value is:
+ * 0:			resume the syscall
+ * 1:			go to phase 2; no seccomp phase 2 needed
+ * anything else:	go to phase 2; pass return value to seccomp
+ */
+unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
+{
+	unsigned long ret = 0;
+	u32 work;
+
+	BUG_ON(regs != task_pt_regs(current));
+
+	work = ACCESS_ONCE(current_thread_info()->flags) &
+		_TIF_WORK_SYSCALL_ENTRY;
+
+#ifdef CONFIG_CONTEXT_TRACKING
+	/*
+	 * If TIF_NOHZ is set, we are required to call user_exit() before
+	 * doing anything that could touch RCU.
+	 */
+	if (work & _TIF_NOHZ) {
+		enter_from_user_mode();
+		work &= ~_TIF_NOHZ;
+	}
+#endif
+
+#ifdef CONFIG_SECCOMP
+	/*
+	 * Do seccomp first -- it should minimize exposure of other
+	 * code, and keeping seccomp fast is probably more valuable
+	 * than the rest of this.
+	 */
+	if (work & _TIF_SECCOMP) {
+		struct seccomp_data sd;
+
+		sd.arch = arch;
+		sd.nr = regs->orig_ax;
+		sd.instruction_pointer = regs->ip;
+#ifdef CONFIG_X86_64
+		if (arch == AUDIT_ARCH_X86_64) {
+			sd.args[0] = regs->di;
+			sd.args[1] = regs->si;
+			sd.args[2] = regs->dx;
+			sd.args[3] = regs->r10;
+			sd.args[4] = regs->r8;
+			sd.args[5] = regs->r9;
+		} else
+#endif
+		{
+			sd.args[0] = regs->bx;
+			sd.args[1] = regs->cx;
+			sd.args[2] = regs->dx;
+			sd.args[3] = regs->si;
+			sd.args[4] = regs->di;
+			sd.args[5] = regs->bp;
+		}
+
+		BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0);
+		BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1);
+
+		ret = seccomp_phase1(&sd);
+		if (ret == SECCOMP_PHASE1_SKIP) {
+			regs->orig_ax = -1;
+			ret = 0;
+		} else if (ret != SECCOMP_PHASE1_OK) {
+			return ret;  /* Go directly to phase 2 */
+		}
+
+		work &= ~_TIF_SECCOMP;
+	}
+#endif
+
+	/* Do our best to finish without phase 2. */
+	if (work == 0)
+		return ret;  /* seccomp and/or nohz only (ret == 0 here) */
+
+#ifdef CONFIG_AUDITSYSCALL
+	if (work == _TIF_SYSCALL_AUDIT) {
+		/*
+		 * If there is no more work to be done except auditing,
+		 * then audit in phase 1.  Phase 2 always audits, so, if
+		 * we audit here, then we can't go on to phase 2.
+		 */
+		do_audit_syscall_entry(regs, arch);
+		return 0;
+	}
+#endif
+
+	return 1;  /* Something is enabled that we can't handle in phase 1 */
+}
+
+/* Returns the syscall nr to run (which should match regs->orig_ax). */
+long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
+				unsigned long phase1_result)
+{
+	long ret = 0;
+	u32 work = ACCESS_ONCE(current_thread_info()->flags) &
+		_TIF_WORK_SYSCALL_ENTRY;
+
+	BUG_ON(regs != task_pt_regs(current));
+
+	/*
+	 * If we stepped into a sysenter/syscall insn, it trapped in
+	 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
+	 * If user-mode had set TF itself, then it's still clear from
+	 * do_debug() and we need to set it again to restore the user
+	 * state.  If we entered on the slow path, TF was already set.
+	 */
+	if (work & _TIF_SINGLESTEP)
+		regs->flags |= X86_EFLAGS_TF;
+
+#ifdef CONFIG_SECCOMP
+	/*
+	 * Call seccomp_phase2 before running the other hooks so that
+	 * they can see any changes made by a seccomp tracer.
+	 */
+	if (phase1_result > 1 && seccomp_phase2(phase1_result)) {
+		/* seccomp failures shouldn't expose any additional code. */
+		return -1;
+	}
+#endif
+
+	if (unlikely(work & _TIF_SYSCALL_EMU))
+		ret = -1L;
+
+	if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
+	    tracehook_report_syscall_entry(regs))
+		ret = -1L;
+
+	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+		trace_sys_enter(regs, regs->orig_ax);
+
+	do_audit_syscall_entry(regs, arch);
+
+	return ret ?: regs->orig_ax;
+}
+
+long syscall_trace_enter(struct pt_regs *regs)
+{
+	u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
+	unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch);
+
+	if (phase1_result == 0)
+		return regs->orig_ax;
+	else
+		return syscall_trace_enter_phase2(regs, arch, phase1_result);
+}
+
+static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
+{
+	unsigned long top_of_stack =
+		(unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
+	return (struct thread_info *)(top_of_stack - THREAD_SIZE);
+}
+
+/* Called with IRQs disabled. */
+__visible void prepare_exit_to_usermode(struct pt_regs *regs)
+{
+	if (WARN_ON(!irqs_disabled()))
+		local_irq_disable();
+
+	/*
+	 * In order to return to user mode, we need to have IRQs off with
+	 * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY,
+	 * _TIF_UPROBE, or _TIF_NEED_RESCHED set.  Several of these flags
+	 * can be set at any time on preemptable kernels if we have IRQs on,
+	 * so we need to loop.  Disabling preemption wouldn't help: doing the
+	 * work to clear some of the flags can sleep.
+	 */
+	while (true) {
+		u32 cached_flags =
+			READ_ONCE(pt_regs_to_thread_info(regs)->flags);
+
+		if (!(cached_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME |
+				      _TIF_UPROBE | _TIF_NEED_RESCHED |
+				      _TIF_USER_RETURN_NOTIFY)))
+			break;
+
+		/* We have work to do. */
+		local_irq_enable();
+
+		if (cached_flags & _TIF_NEED_RESCHED)
+			schedule();
+
+		if (cached_flags & _TIF_UPROBE)
+			uprobe_notify_resume(regs);
+
+		/* deal with pending signal delivery */
+		if (cached_flags & _TIF_SIGPENDING)
+			do_signal(regs);
+
+		if (cached_flags & _TIF_NOTIFY_RESUME) {
+			clear_thread_flag(TIF_NOTIFY_RESUME);
+			tracehook_notify_resume(regs);
+		}
+
+		if (cached_flags & _TIF_USER_RETURN_NOTIFY)
+			fire_user_return_notifiers();
+
+		/* Disable IRQs and retry */
+		local_irq_disable();
+	}
+
+	user_enter();
+}
+
+/*
+ * Called with IRQs on and fully valid regs.  Returns with IRQs off in a
+ * state such that we can immediately switch to user mode.
+ */
+__visible void syscall_return_slowpath(struct pt_regs *regs)
+{
+	struct thread_info *ti = pt_regs_to_thread_info(regs);
+	u32 cached_flags = READ_ONCE(ti->flags);
+	bool step;
+
+	CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
+
+	if (WARN(irqs_disabled(), "syscall %ld left IRQs disabled",
+		 regs->orig_ax))
+		local_irq_enable();
+
+	/*
+	 * First do one-time work.  If these work items are enabled, we
+	 * want to run them exactly once per syscall exit with IRQs on.
+	 */
+	if (cached_flags & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT |
+			    _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)) {
+		audit_syscall_exit(regs);
+
+		if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
+			trace_sys_exit(regs, regs->ax);
+
+		/*
+		 * If TIF_SYSCALL_EMU is set, we only get here because of
+		 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
+		 * We already reported this syscall instruction in
+		 * syscall_trace_enter().
+		 */
+		step = unlikely(
+			(cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
+			== _TIF_SINGLESTEP);
+		if (step || cached_flags & _TIF_SYSCALL_TRACE)
+			tracehook_report_syscall_exit(regs, step);
+	}
+
+#ifdef CONFIG_COMPAT
+	/*
+	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
+	 * returning to user mode.
+	 */
+	ti->status &= ~TS_COMPAT;
+#endif
+
+	local_irq_disable();
+	prepare_exit_to_usermode(regs);
+}
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 21dc60a..b2909bf 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -45,16 +45,6 @@
 #include <asm/asm.h>
 #include <asm/smap.h>
 
-/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
-#include <linux/elf-em.h>
-#define AUDIT_ARCH_I386		(EM_386|__AUDIT_ARCH_LE)
-#define __AUDIT_ARCH_LE		0x40000000
-
-#ifndef CONFIG_AUDITSYSCALL
-# define sysenter_audit		syscall_trace_entry
-# define sysexit_audit		syscall_exit_work
-#endif
-
 	.section .entry.text, "ax"
 
 /*
@@ -266,14 +256,10 @@
 
 ENTRY(resume_userspace)
 	LOCKDEP_SYS_EXIT
-	DISABLE_INTERRUPTS(CLBR_ANY)		# make sure we don't miss an interrupt
-						# setting need_resched or sigpending
-						# between sampling and the iret
+	DISABLE_INTERRUPTS(CLBR_ANY)
 	TRACE_IRQS_OFF
-	movl	TI_flags(%ebp), %ecx
-	andl	$_TIF_WORK_MASK, %ecx		# is there any work to be done on
-						# int/exception return?
-	jne	work_pending
+	movl	%esp, %eax
+	call	prepare_exit_to_usermode
 	jmp	restore_all
 END(ret_from_exception)
 
@@ -339,7 +325,7 @@
 	GET_THREAD_INFO(%ebp)
 
 	testl	$_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
-	jnz	sysenter_audit
+	jnz	syscall_trace_entry
 sysenter_do_call:
 	cmpl	$(NR_syscalls), %eax
 	jae	sysenter_badsys
@@ -351,7 +337,7 @@
 	TRACE_IRQS_OFF
 	movl	TI_flags(%ebp), %ecx
 	testl	$_TIF_ALLWORK_MASK, %ecx
-	jnz	sysexit_audit
+	jnz	syscall_exit_work_irqs_off
 sysenter_exit:
 /* if something modifies registers it must also disable sysexit */
 	movl	PT_EIP(%esp), %edx
@@ -362,40 +348,6 @@
 	PTGS_TO_GS
 	ENABLE_INTERRUPTS_SYSEXIT
 
-#ifdef CONFIG_AUDITSYSCALL
-sysenter_audit:
-	testl	$(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp)
-	jnz	syscall_trace_entry
-	/* movl	PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
-	movl	PT_EBX(%esp), %edx		/* ebx/a0: 2nd arg to audit */
-	/* movl	PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
-	pushl	PT_ESI(%esp)			/* a3: 5th arg */
-	pushl	PT_EDX+4(%esp)			/* a2: 4th arg */
-	call	__audit_syscall_entry
-	popl	%ecx				/* get that remapped edx off the stack */
-	popl	%ecx				/* get that remapped esi off the stack */
-	movl	PT_EAX(%esp), %eax		/* reload syscall number */
-	jmp	sysenter_do_call
-
-sysexit_audit:
-	testl	$(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
-	jnz	syscall_exit_work
-	TRACE_IRQS_ON
-	ENABLE_INTERRUPTS(CLBR_ANY)
-	movl	%eax, %edx			/* second arg, syscall return value */
-	cmpl	$-MAX_ERRNO, %eax		/* is it an error ? */
-	setbe %al				/* 1 if so, 0 if not */
-	movzbl %al, %eax			/* zero-extend that */
-	call	__audit_syscall_exit
-	DISABLE_INTERRUPTS(CLBR_ANY)
-	TRACE_IRQS_OFF
-	movl	TI_flags(%ebp), %ecx
-	testl	$(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
-	jnz	syscall_exit_work
-	movl	PT_EAX(%esp), %eax		/* reload syscall return value */
-	jmp	sysenter_exit
-#endif
-
 .pushsection .fixup, "ax"
 2:	movl	$0, PT_FS(%esp)
 	jmp	1b
@@ -421,13 +373,7 @@
 	movl	%eax, PT_EAX(%esp)		# store the return value
 syscall_exit:
 	LOCKDEP_SYS_EXIT
-	DISABLE_INTERRUPTS(CLBR_ANY)		# make sure we don't miss an interrupt
-						# setting need_resched or sigpending
-						# between sampling and the iret
-	TRACE_IRQS_OFF
-	movl	TI_flags(%ebp), %ecx
-	testl	$_TIF_ALLWORK_MASK, %ecx	# current->work
-	jnz	syscall_exit_work
+	jmp	syscall_exit_work
 
 restore_all:
 	TRACE_IRQS_IRET
@@ -504,57 +450,6 @@
 #endif
 ENDPROC(entry_INT80_32)
 
-	# perform work that needs to be done immediately before resumption
-	ALIGN
-work_pending:
-	testb	$_TIF_NEED_RESCHED, %cl
-	jz	work_notifysig
-work_resched:
-	call	schedule
-	LOCKDEP_SYS_EXIT
-	DISABLE_INTERRUPTS(CLBR_ANY)		# make sure we don't miss an interrupt
-						# setting need_resched or sigpending
-						# between sampling and the iret
-	TRACE_IRQS_OFF
-	movl	TI_flags(%ebp), %ecx
-	andl	$_TIF_WORK_MASK, %ecx		# is there any work to be done other
-						# than syscall tracing?
-	jz	restore_all
-	testb	$_TIF_NEED_RESCHED, %cl
-	jnz	work_resched
-
-work_notifysig:					# deal with pending signals and
-						# notify-resume requests
-#ifdef CONFIG_VM86
-	testl	$X86_EFLAGS_VM, PT_EFLAGS(%esp)
-	movl	%esp, %eax
-	jnz	work_notifysig_v86		# returning to kernel-space or
-						# vm86-space
-1:
-#else
-	movl	%esp, %eax
-#endif
-	TRACE_IRQS_ON
-	ENABLE_INTERRUPTS(CLBR_NONE)
-	movb	PT_CS(%esp), %bl
-	andb	$SEGMENT_RPL_MASK, %bl
-	cmpb	$USER_RPL, %bl
-	jb	resume_kernel
-	xorl	%edx, %edx
-	call	do_notify_resume
-	jmp	resume_userspace
-
-#ifdef CONFIG_VM86
-	ALIGN
-work_notifysig_v86:
-	pushl	%ecx				# save ti_flags for do_notify_resume
-	call	save_v86_state			# %eax contains pt_regs pointer
-	popl	%ecx
-	movl	%eax, %esp
-	jmp	1b
-#endif
-END(work_pending)
-
 	# perform syscall exit tracing
 	ALIGN
 syscall_trace_entry:
@@ -569,15 +464,14 @@
 
 	# perform syscall exit tracing
 	ALIGN
-syscall_exit_work:
-	testl	$_TIF_WORK_SYSCALL_EXIT, %ecx
-	jz	work_pending
+syscall_exit_work_irqs_off:
 	TRACE_IRQS_ON
-	ENABLE_INTERRUPTS(CLBR_ANY)		# could let syscall_trace_leave() call
-						# schedule() instead
+	ENABLE_INTERRUPTS(CLBR_ANY)
+
+syscall_exit_work:
 	movl	%esp, %eax
-	call	syscall_trace_leave
-	jmp	resume_userspace
+	call	syscall_return_slowpath
+	jmp	restore_all
 END(syscall_exit_work)
 
 syscall_fault:
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 8cb3e43..d303318 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -33,7 +33,6 @@
 #include <asm/paravirt.h>
 #include <asm/percpu.h>
 #include <asm/asm.h>
-#include <asm/context_tracking.h>
 #include <asm/smap.h>
 #include <asm/pgtable_types.h>
 #include <linux/err.h>
@@ -229,6 +228,11 @@
 	 */
 	USERGS_SYSRET64
 
+GLOBAL(int_ret_from_sys_call_irqs_off)
+	TRACE_IRQS_ON
+	ENABLE_INTERRUPTS(CLBR_NONE)
+	jmp int_ret_from_sys_call
+
 	/* Do syscall entry tracing */
 tracesys:
 	movq	%rsp, %rdi
@@ -272,69 +276,11 @@
  * Has correct iret frame.
  */
 GLOBAL(int_ret_from_sys_call)
-	DISABLE_INTERRUPTS(CLBR_NONE)
-int_ret_from_sys_call_irqs_off: /* jumps come here from the irqs-off SYSRET path */
-	TRACE_IRQS_OFF
-	movl	$_TIF_ALLWORK_MASK, %edi
-	/* edi:	mask to check */
-GLOBAL(int_with_check)
-	LOCKDEP_SYS_EXIT_IRQ
-	GET_THREAD_INFO(%rcx)
-	movl	TI_flags(%rcx), %edx
-	andl	%edi, %edx
-	jnz	int_careful
-	andl	$~TS_COMPAT, TI_status(%rcx)
-	jmp	syscall_return
-
-	/*
-	 * Either reschedule or signal or syscall exit tracking needed.
-	 * First do a reschedule test.
-	 * edx:	work, edi: workmask
-	 */
-int_careful:
-	bt	$TIF_NEED_RESCHED, %edx
-	jnc	int_very_careful
-	TRACE_IRQS_ON
-	ENABLE_INTERRUPTS(CLBR_NONE)
-	pushq	%rdi
-	SCHEDULE_USER
-	popq	%rdi
-	DISABLE_INTERRUPTS(CLBR_NONE)
-	TRACE_IRQS_OFF
-	jmp	int_with_check
-
-	/* handle signals and tracing -- both require a full pt_regs */
-int_very_careful:
-	TRACE_IRQS_ON
-	ENABLE_INTERRUPTS(CLBR_NONE)
 	SAVE_EXTRA_REGS
-	/* Check for syscall exit trace */
-	testl	$_TIF_WORK_SYSCALL_EXIT, %edx
-	jz	int_signal
-	pushq	%rdi
-	leaq	8(%rsp), %rdi			/* &ptregs -> arg1 */
-	call	syscall_trace_leave
-	popq	%rdi
-	andl	$~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU), %edi
-	jmp	int_restore_rest
-
-int_signal:
-	testl	$_TIF_DO_NOTIFY_MASK, %edx
-	jz	1f
-	movq	%rsp, %rdi			/* &ptregs -> arg1 */
-	xorl	%esi, %esi			/* oldset -> arg2 */
-	call	do_notify_resume
-1:	movl	$_TIF_WORK_MASK, %edi
-int_restore_rest:
+	movq	%rsp, %rdi
+	call	syscall_return_slowpath	/* returns with IRQs disabled */
 	RESTORE_EXTRA_REGS
-	DISABLE_INTERRUPTS(CLBR_NONE)
-	TRACE_IRQS_OFF
-	jmp	int_with_check
-
-syscall_return:
-	/* The IRETQ could re-enable interrupts: */
-	DISABLE_INTERRUPTS(CLBR_ANY)
-	TRACE_IRQS_IRETQ
+	TRACE_IRQS_IRETQ		/* we're about to change IF */
 
 	/*
 	 * Try to use SYSRET instead of IRET if we're returning to
@@ -555,23 +501,22 @@
 /* 0(%rsp): ~(interrupt number) */
 	.macro interrupt func
 	cld
-	/*
-	 * Since nothing in interrupt handling code touches r12...r15 members
-	 * of "struct pt_regs", and since interrupts can nest, we can save
-	 * four stack slots and simultaneously provide
-	 * an unwind-friendly stack layout by saving "truncated" pt_regs
-	 * exactly up to rbp slot, without these members.
-	 */
-	ALLOC_PT_GPREGS_ON_STACK -RBP
-	SAVE_C_REGS -RBP
-	/* this goes to 0(%rsp) for unwinder, not for saving the value: */
-	SAVE_EXTRA_REGS_RBP -RBP
+	ALLOC_PT_GPREGS_ON_STACK
+	SAVE_C_REGS
+	SAVE_EXTRA_REGS
 
-	leaq	-RBP(%rsp), %rdi		/* arg1 for \func (pointer to pt_regs) */
-
-	testb	$3, CS-RBP(%rsp)
+	testb	$3, CS(%rsp)
 	jz	1f
+
+	/*
+	 * IRQ from user mode.  Switch to kernel gsbase and inform context
+	 * tracking that we're in kernel mode.
+	 */
 	SWAPGS
+#ifdef CONFIG_CONTEXT_TRACKING
+	call enter_from_user_mode
+#endif
+
 1:
 	/*
 	 * Save previous stack pointer, optionally switch to interrupt stack.
@@ -580,14 +525,14 @@
 	 * a little cheaper to use a separate counter in the PDA (short of
 	 * moving irq_enter into assembly, which would be too much work)
 	 */
-	movq	%rsp, %rsi
+	movq	%rsp, %rdi
 	incl	PER_CPU_VAR(irq_count)
 	cmovzq	PER_CPU_VAR(irq_stack_ptr), %rsp
-	pushq	%rsi
+	pushq	%rdi
 	/* We entered an interrupt context - irqs are off: */
 	TRACE_IRQS_OFF
 
-	call	\func
+	call	\func	/* rdi points to pt_regs */
 	.endm
 
 	/*
@@ -606,34 +551,19 @@
 	decl	PER_CPU_VAR(irq_count)
 
 	/* Restore saved previous stack */
-	popq	%rsi
-	/* return code expects complete pt_regs - adjust rsp accordingly: */
-	leaq	-RBP(%rsi), %rsp
+	popq	%rsp
 
 	testb	$3, CS(%rsp)
 	jz	retint_kernel
+
 	/* Interrupt came from user space */
-retint_user:
-	GET_THREAD_INFO(%rcx)
-
-	/* %rcx: thread info. Interrupts are off. */
-retint_with_reschedule:
-	movl	$_TIF_WORK_MASK, %edi
-retint_check:
 	LOCKDEP_SYS_EXIT_IRQ
-	movl	TI_flags(%rcx), %edx
-	andl	%edi, %edx
-	jnz	retint_careful
-
-retint_swapgs:					/* return to user-space */
-	/*
-	 * The iretq could re-enable interrupts:
-	 */
-	DISABLE_INTERRUPTS(CLBR_ANY)
+GLOBAL(retint_user)
+	mov	%rsp,%rdi
+	call	prepare_exit_to_usermode
 	TRACE_IRQS_IRETQ
-
 	SWAPGS
-	jmp	restore_c_regs_and_iret
+	jmp	restore_regs_and_iret
 
 /* Returning to kernel space */
 retint_kernel:
@@ -657,6 +587,8 @@
  * At this label, code paths which return to kernel and to user,
  * which come from interrupts/exception and from syscalls, merge.
  */
+restore_regs_and_iret:
+	RESTORE_EXTRA_REGS
 restore_c_regs_and_iret:
 	RESTORE_C_REGS
 	REMOVE_PT_GPREGS_FROM_STACK 8
@@ -707,37 +639,6 @@
 	popq	%rax
 	jmp	native_irq_return_iret
 #endif
-
-	/* edi: workmask, edx: work */
-retint_careful:
-	bt	$TIF_NEED_RESCHED, %edx
-	jnc	retint_signal
-	TRACE_IRQS_ON
-	ENABLE_INTERRUPTS(CLBR_NONE)
-	pushq	%rdi
-	SCHEDULE_USER
-	popq	%rdi
-	GET_THREAD_INFO(%rcx)
-	DISABLE_INTERRUPTS(CLBR_NONE)
-	TRACE_IRQS_OFF
-	jmp	retint_check
-
-retint_signal:
-	testl	$_TIF_DO_NOTIFY_MASK, %edx
-	jz	retint_swapgs
-	TRACE_IRQS_ON
-	ENABLE_INTERRUPTS(CLBR_NONE)
-	SAVE_EXTRA_REGS
-	movq	$-1, ORIG_RAX(%rsp)
-	xorl	%esi, %esi			/* oldset */
-	movq	%rsp, %rdi			/* &pt_regs */
-	call	do_notify_resume
-	RESTORE_EXTRA_REGS
-	DISABLE_INTERRUPTS(CLBR_NONE)
-	TRACE_IRQS_OFF
-	GET_THREAD_INFO(%rcx)
-	jmp	retint_with_reschedule
-
 END(common_interrupt)
 
 /*
@@ -1143,12 +1044,22 @@
 	SAVE_EXTRA_REGS 8
 	xorl	%ebx, %ebx
 	testb	$3, CS+8(%rsp)
-	jz	error_kernelspace
+	jz	.Lerror_kernelspace
 
-	/* We entered from user mode */
+.Lerror_entry_from_usermode_swapgs:
+	/*
+	 * We entered from user mode or we're pretending to have entered
+	 * from user mode due to an IRET fault.
+	 */
 	SWAPGS
 
-error_entry_done:
+.Lerror_entry_from_usermode_after_swapgs:
+#ifdef CONFIG_CONTEXT_TRACKING
+	call enter_from_user_mode
+#endif
+
+.Lerror_entry_done:
+
 	TRACE_IRQS_OFF
 	ret
 
@@ -1158,31 +1069,30 @@
 	 * truncated RIP for IRET exceptions returning to compat mode. Check
 	 * for these here too.
 	 */
-error_kernelspace:
+.Lerror_kernelspace:
 	incl	%ebx
 	leaq	native_irq_return_iret(%rip), %rcx
 	cmpq	%rcx, RIP+8(%rsp)
-	je	error_bad_iret
+	je	.Lerror_bad_iret
 	movl	%ecx, %eax			/* zero extend */
 	cmpq	%rax, RIP+8(%rsp)
-	je	bstep_iret
+	je	.Lbstep_iret
 	cmpq	$gs_change, RIP+8(%rsp)
-	jne	error_entry_done
+	jne	.Lerror_entry_done
 
 	/*
 	 * hack: gs_change can fail with user gsbase.  If this happens, fix up
 	 * gsbase and proceed.  We'll fix up the exception and land in
 	 * gs_change's error handler with kernel gsbase.
 	 */
-	SWAPGS
-	jmp	error_entry_done
+	jmp	.Lerror_entry_from_usermode_swapgs
 
-bstep_iret:
+.Lbstep_iret:
 	/* Fix truncated RIP */
 	movq	%rcx, RIP+8(%rsp)
 	/* fall through */
 
-error_bad_iret:
+.Lerror_bad_iret:
 	/*
 	 * We came from an IRET to user mode, so we have user gsbase.
 	 * Switch to kernel gsbase:
@@ -1198,7 +1108,7 @@
 	call	fixup_bad_iret
 	mov	%rax, %rsp
 	decl	%ebx
-	jmp	error_entry_done
+	jmp	.Lerror_entry_from_usermode_after_swapgs
 END(error_entry)
 
 
@@ -1209,7 +1119,6 @@
  */
 ENTRY(error_exit)
 	movl	%ebx, %eax
-	RESTORE_EXTRA_REGS
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	testl	%eax, %eax
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index a7e257d..a9360d4 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -22,8 +22,8 @@
 #define __AUDIT_ARCH_LE		0x40000000
 
 #ifndef CONFIG_AUDITSYSCALL
-# define sysexit_audit		ia32_ret_from_sys_call
-# define sysretl_audit		ia32_ret_from_sys_call
+# define sysexit_audit		ia32_ret_from_sys_call_irqs_off
+# define sysretl_audit		ia32_ret_from_sys_call_irqs_off
 #endif
 
 	.section .entry.text, "ax"
@@ -141,7 +141,8 @@
 	andl	$~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
 	movl	RIP(%rsp), %ecx		/* User %eip */
 	movq    RAX(%rsp), %rax
-	RESTORE_RSI_RDI
+	movl	RSI(%rsp), %esi
+	movl	RDI(%rsp), %edi
 	xorl	%edx, %edx		/* Do not leak kernel information */
 	xorq	%r8, %r8
 	xorq	%r9, %r9
@@ -209,10 +210,10 @@
 	.endm
 
 	.macro auditsys_exit exit
-	testl	$(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-	jnz	ia32_ret_from_sys_call
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
+	testl	$(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+	jnz	ia32_ret_from_sys_call
 	movl	%eax, %esi		/* second arg, syscall return value */
 	cmpl	$-MAX_ERRNO, %eax	/* is it an error ? */
 	jbe	1f
@@ -230,7 +231,7 @@
 	movq	%rax, R10(%rsp)
 	movq	%rax, R9(%rsp)
 	movq	%rax, R8(%rsp)
-	jmp	int_with_check
+	jmp	int_ret_from_sys_call_irqs_off
 	.endm
 
 sysenter_auditsys:
@@ -365,7 +366,9 @@
 
 sysretl_from_sys_call:
 	andl	$~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-	RESTORE_RSI_RDI_RDX
+	movl	RDX(%rsp), %edx
+	movl	RSI(%rsp), %esi
+	movl	RDI(%rsp), %edi
 	movl	RIP(%rsp), %ecx
 	movl	EFLAGS(%rsp), %r11d
 	movq    RAX(%rsp), %rax
@@ -430,8 +433,48 @@
 END(entry_SYSCALL_compat)
 
 ia32_badarg:
-	ASM_CLAC
-	movq	$-EFAULT, RAX(%rsp)
+	/*
+	 * So far, we've entered kernel mode, set AC, turned on IRQs, and
+	 * saved C regs except r8-r11.  We haven't done any of the other
+	 * standard entry work, though.  We want to bail, but we shouldn't
+	 * treat this as a syscall entry since we don't even know what the
+	 * args are.  Instead, treat this as a non-syscall entry, finish
+	 * the entry work, and immediately exit after setting AX = -EFAULT.
+	 *
+	 * We're really just being polite here.  Killing the task outright
+	 * would be a reasonable action, too.  Given that the only valid
+	 * way to have gotten here is through the vDSO, and we already know
+	 * that the stack pointer is bad, the task isn't going to survive
+	 * for long no matter what we do.
+	 */
+
+	ASM_CLAC			/* undo STAC */
+	movq	$-EFAULT, RAX(%rsp)	/* return -EFAULT if possible */
+
+	/* Fill in the rest of pt_regs */
+	xorl	%eax, %eax
+	movq	%rax, R11(%rsp)
+	movq	%rax, R10(%rsp)
+	movq	%rax, R9(%rsp)
+	movq	%rax, R8(%rsp)
+	SAVE_EXTRA_REGS
+
+	/* Turn IRQs back off. */
+	DISABLE_INTERRUPTS(CLBR_NONE)
+	TRACE_IRQS_OFF
+
+	/* Now finish entering normal kernel mode. */
+#ifdef CONFIG_CONTEXT_TRACKING
+	call enter_from_user_mode
+#endif
+
+	/* And exit again. */
+	jmp retint_user
+
+ia32_ret_from_sys_call_irqs_off:
+	TRACE_IRQS_ON
+	ENABLE_INTERRUPTS(CLBR_NONE)
+
 ia32_ret_from_sys_call:
 	xorl	%eax, %eax		/* Do not leak kernel information */
 	movq	%rax, R11(%rsp)
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index ef8187f..25e3cf1 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -365,3 +365,18 @@
 356	i386	memfd_create		sys_memfd_create
 357	i386	bpf			sys_bpf
 358	i386	execveat		sys_execveat			stub32_execveat
+359	i386	socket			sys_socket
+360	i386	socketpair		sys_socketpair
+361	i386	bind			sys_bind
+362	i386	connect			sys_connect
+363	i386	listen			sys_listen
+364	i386	accept4			sys_accept4
+365	i386	getsockopt		sys_getsockopt			compat_sys_getsockopt
+366	i386	setsockopt		sys_setsockopt			compat_sys_setsockopt
+367	i386	getsockname		sys_getsockname
+368	i386	getpeername		sys_getpeername
+369	i386	sendto			sys_sendto
+370	i386	sendmsg			sys_sendmsg			compat_sys_sendmsg
+371	i386	recvfrom		sys_recvfrom			compat_sys_recvfrom
+372	i386	recvmsg			sys_recvmsg			compat_sys_recvmsg
+373	i386	shutdown		sys_shutdown
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index e970320..a3d0767 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -8,7 +8,7 @@
 VDSO64-$(CONFIG_X86_64)		:= y
 VDSOX32-$(CONFIG_X86_X32_ABI)	:= y
 VDSO32-$(CONFIG_X86_32)		:= y
-VDSO32-$(CONFIG_COMPAT)		:= y
+VDSO32-$(CONFIG_IA32_EMULATION)	:= y
 
 # files to link into the vdso
 vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
@@ -20,7 +20,7 @@
 vdso_img-$(VDSO64-y)		+= 64
 vdso_img-$(VDSOX32-y)		+= x32
 vdso_img-$(VDSO32-y)		+= 32-int80
-vdso_img-$(CONFIG_COMPAT)	+= 32-syscall
+vdso_img-$(CONFIG_IA32_EMULATION)	+= 32-syscall
 vdso_img-$(VDSO32-y)		+= 32-sysenter
 
 obj-$(VDSO32-y)			+= vdso32-setup.o
@@ -126,7 +126,7 @@
 # Build multiple 32-bit vDSO images to choose from at boot time.
 #
 vdso32.so-$(VDSO32-y)		+= int80
-vdso32.so-$(CONFIG_COMPAT)	+= syscall
+vdso32.so-$(CONFIG_IA32_EMULATION)	+= syscall
 vdso32.so-$(VDSO32-y)		+= sysenter
 
 vdso32-images			= $(vdso32.so-y:%=vdso32-%.so)
@@ -175,7 +175,7 @@
 		       -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
 		 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
+VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
 	$(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
 GCOV_PROFILE := n
 
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 9793322..ca94fa6 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -175,20 +175,8 @@
 
 notrace static cycle_t vread_tsc(void)
 {
-	cycle_t ret;
-	u64 last;
-
-	/*
-	 * Empirically, a fence (of type that depends on the CPU)
-	 * before rdtsc is enough to ensure that rdtsc is ordered
-	 * with respect to loads.  The various CPU manuals are unclear
-	 * as to whether rdtsc can be reordered with later loads,
-	 * but no one has ever seen it happen.
-	 */
-	rdtsc_barrier();
-	ret = (cycle_t)__native_read_tsc();
-
-	last = gtod->cycle_last;
+	cycle_t ret = (cycle_t)rdtsc_ordered();
+	u64 last = gtod->cycle_last;
 
 	if (likely(ret >= last))
 		return ret;
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 1c9f750..4345431 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -177,7 +177,7 @@
 	return ret;
 }
 
-#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
+#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
 static int load_vdso32(void)
 {
 	int ret;
@@ -219,8 +219,11 @@
 		return map_vdso(&vdso_image_x32, true);
 	}
 #endif
-
+#ifdef CONFIG_IA32_EMULATION
 	return load_vdso32();
+#else
+	return 0;
+#endif
 }
 #endif
 #else
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 2dcc6ff..26a46f4 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -290,7 +290,7 @@
 
 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
 {
-#ifdef CONFIG_IA32_EMULATION
+#ifdef CONFIG_COMPAT
 	if (!mm || mm->context.ia32_compat)
 		return NULL;
 #endif
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index ae3a29a..a0a19b7 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -34,99 +34,6 @@
 #include <asm/sys_ia32.h>
 #include <asm/smap.h>
 
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
-{
-	int err = 0;
-	bool ia32 = test_thread_flag(TIF_IA32);
-
-	if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
-		return -EFAULT;
-
-	put_user_try {
-		/* If you change siginfo_t structure, please make sure that
-		   this code is fixed accordingly.
-		   It should never copy any pad contained in the structure
-		   to avoid security leaks, but must copy the generic
-		   3 ints plus the relevant union member.  */
-		put_user_ex(from->si_signo, &to->si_signo);
-		put_user_ex(from->si_errno, &to->si_errno);
-		put_user_ex((short)from->si_code, &to->si_code);
-
-		if (from->si_code < 0) {
-			put_user_ex(from->si_pid, &to->si_pid);
-			put_user_ex(from->si_uid, &to->si_uid);
-			put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
-		} else {
-			/*
-			 * First 32bits of unions are always present:
-			 * si_pid === si_band === si_tid === si_addr(LS half)
-			 */
-			put_user_ex(from->_sifields._pad[0],
-					  &to->_sifields._pad[0]);
-			switch (from->si_code >> 16) {
-			case __SI_FAULT >> 16:
-				break;
-			case __SI_SYS >> 16:
-				put_user_ex(from->si_syscall, &to->si_syscall);
-				put_user_ex(from->si_arch, &to->si_arch);
-				break;
-			case __SI_CHLD >> 16:
-				if (ia32) {
-					put_user_ex(from->si_utime, &to->si_utime);
-					put_user_ex(from->si_stime, &to->si_stime);
-				} else {
-					put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime);
-					put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime);
-				}
-				put_user_ex(from->si_status, &to->si_status);
-				/* FALL THROUGH */
-			default:
-			case __SI_KILL >> 16:
-				put_user_ex(from->si_uid, &to->si_uid);
-				break;
-			case __SI_POLL >> 16:
-				put_user_ex(from->si_fd, &to->si_fd);
-				break;
-			case __SI_TIMER >> 16:
-				put_user_ex(from->si_overrun, &to->si_overrun);
-				put_user_ex(ptr_to_compat(from->si_ptr),
-					    &to->si_ptr);
-				break;
-				 /* This is not generated by the kernel as of now.  */
-			case __SI_RT >> 16:
-			case __SI_MESGQ >> 16:
-				put_user_ex(from->si_uid, &to->si_uid);
-				put_user_ex(from->si_int, &to->si_int);
-				break;
-			}
-		}
-	} put_user_catch(err);
-
-	return err;
-}
-
-int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
-{
-	int err = 0;
-	u32 ptr32;
-
-	if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
-		return -EFAULT;
-
-	get_user_try {
-		get_user_ex(to->si_signo, &from->si_signo);
-		get_user_ex(to->si_errno, &from->si_errno);
-		get_user_ex(to->si_code, &from->si_code);
-
-		get_user_ex(to->si_pid, &from->si_pid);
-		get_user_ex(to->si_uid, &from->si_uid);
-		get_user_ex(ptr32, &from->si_ptr);
-		to->si_ptr = compat_ptr(ptr32);
-	} get_user_catch(err);
-
-	return err;
-}
-
 /*
  * Do a signal return; undo the signal stack.
  */
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index c839363..ebf6d5e 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -313,7 +313,6 @@
 	/* wakeup_secondary_cpu */
 	int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
 
-	bool wait_for_init_deassert;
 	void (*inquire_remote_apic)(int apicid);
 
 	/* apic ops */
@@ -378,7 +377,6 @@
  * APIC functionality to boot other CPUs - only used on SMP:
  */
 #ifdef CONFIG_SMP
-extern atomic_t init_deasserted;
 extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
 #endif
 
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index 9686c3d..259a7c1 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -21,7 +21,7 @@
  * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
  * compiler switches.
  */
-static inline unsigned int __arch_hweight32(unsigned int w)
+static __always_inline unsigned int __arch_hweight32(unsigned int w)
 {
 	unsigned int res = 0;
 
@@ -42,20 +42,23 @@
 	return __arch_hweight32(w & 0xff);
 }
 
+#ifdef CONFIG_X86_32
 static inline unsigned long __arch_hweight64(__u64 w)
 {
+	return  __arch_hweight32((u32)w) +
+		__arch_hweight32((u32)(w >> 32));
+}
+#else
+static __always_inline unsigned long __arch_hweight64(__u64 w)
+{
 	unsigned long res = 0;
 
-#ifdef CONFIG_X86_32
-	return  __arch_hweight32((u32)w) +
-		__arch_hweight32((u32)(w >> 32));
-#else
 	asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
 		     : "="REG_OUT (res)
 		     : REG_IN (w));
-#endif /* CONFIG_X86_32 */
 
 	return res;
 }
+#endif /* CONFIG_X86_32 */
 
 #endif
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index e916895..fb52aa6 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -182,6 +182,21 @@
 	return xchg(&v->counter, new);
 }
 
+#define ATOMIC_OP(op)							\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	asm volatile(LOCK_PREFIX #op"l %1,%0"				\
+			: "+m" (v->counter)				\
+			: "ir" (i)					\
+			: "memory");					\
+}
+
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_OP
+
 /**
  * __atomic_add_unless - add unless the number is already a given value
  * @v: pointer of type atomic_t
@@ -219,16 +234,6 @@
 	return *v;
 }
 
-/* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr)				\
-	asm volatile(LOCK_PREFIX "andl %0,%1"			\
-		     : : "r" (~(mask)), "m" (*(addr)) : "memory")
-
-#define atomic_set_mask(mask, addr)				\
-	asm volatile(LOCK_PREFIX "orl %0,%1"			\
-		     : : "r" ((unsigned)(mask)), "m" (*(addr))	\
-		     : "memory")
-
 #ifdef CONFIG_X86_32
 # include <asm/atomic64_32.h>
 #else
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index b154de7..a11c30b 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -313,4 +313,18 @@
 #undef alternative_atomic64
 #undef __alternative_atomic64
 
+#define ATOMIC64_OP(op, c_op)						\
+static inline void atomic64_##op(long long i, atomic64_t *v)		\
+{									\
+	long long old, c = 0;						\
+	while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c)		\
+		c = old;						\
+}
+
+ATOMIC64_OP(and, &)
+ATOMIC64_OP(or, |)
+ATOMIC64_OP(xor, ^)
+
+#undef ATOMIC64_OP
+
 #endif /* _ASM_X86_ATOMIC64_32_H */
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index b965f9e..50e33ef 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -220,4 +220,19 @@
 	return dec;
 }
 
+#define ATOMIC64_OP(op)							\
+static inline void atomic64_##op(long i, atomic64_t *v)			\
+{									\
+	asm volatile(LOCK_PREFIX #op"q %1,%0"				\
+			: "+m" (v->counter)				\
+			: "er" (i)					\
+			: "memory");					\
+}
+
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
+#undef ATOMIC64_OP
+
 #endif /* _ASM_X86_ATOMIC64_64_H */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index e51a8f8..0681d25 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -57,12 +57,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
@@ -74,12 +74,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	barrier();							\
 	___p1;								\
@@ -91,15 +91,4 @@
 #define smp_mb__before_atomic()	barrier()
 #define smp_mb__after_atomic()	barrier()
 
-/*
- * Stop RDTSC speculation. This is needed when you need to use RDTSC
- * (or get_cycles or vread that possibly accesses the TSC) in a defined
- * code region.
- */
-static __always_inline void rdtsc_barrier(void)
-{
-	alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
-			  "lfence", X86_FEATURE_LFENCE_RDTSC);
-}
-
 #endif /* _ASM_X86_BARRIER_H */
diff --git a/arch/x86/include/asm/context_tracking.h b/arch/x86/include/asm/context_tracking.h
deleted file mode 100644
index 1fe4970..0000000
--- a/arch/x86/include/asm/context_tracking.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ASM_X86_CONTEXT_TRACKING_H
-#define _ASM_X86_CONTEXT_TRACKING_H
-
-#ifdef CONFIG_CONTEXT_TRACKING
-# define SCHEDULE_USER call schedule_user
-#else
-# define SCHEDULE_USER call schedule
-#endif
-
-#endif
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 3d6606f..477fc28 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -119,6 +119,7 @@
 #define X86_FEATURE_TM2		( 4*32+ 8) /* Thermal Monitor 2 */
 #define X86_FEATURE_SSSE3	( 4*32+ 9) /* Supplemental SSE-3 */
 #define X86_FEATURE_CID		( 4*32+10) /* Context ID */
+#define X86_FEATURE_SDBG	( 4*32+11) /* Silicon Debug */
 #define X86_FEATURE_FMA		( 4*32+12) /* Fused multiply-add */
 #define X86_FEATURE_CX16	( 4*32+13) /* CMPXCHG16B */
 #define X86_FEATURE_XTPR	( 4*32+14) /* Send Task Priority Messages */
@@ -176,6 +177,7 @@
 #define X86_FEATURE_PERFCTR_NB  ( 6*32+24) /* NB performance counter extensions */
 #define X86_FEATURE_BPEXT	(6*32+26) /* data breakpoint extension */
 #define X86_FEATURE_PERFCTR_L2	( 6*32+28) /* L2 performance counter extensions */
+#define X86_FEATURE_MWAITX	( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
 
 /*
  * Auxiliary flags: Linux defined - For features scattered in various
diff --git a/arch/x86/include/asm/delay.h b/arch/x86/include/asm/delay.h
index 9b3b4f2..36a760b 100644
--- a/arch/x86/include/asm/delay.h
+++ b/arch/x86/include/asm/delay.h
@@ -4,5 +4,6 @@
 #include <asm-generic/delay.h>
 
 void use_tsc_delay(void);
+void use_mwaitx_delay(void);
 
 #endif /* _ASM_X86_DELAY_H */
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index f161c18..141c561 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -78,7 +78,7 @@
 #ifdef CONFIG_X86_64
 extern unsigned int vdso64_enabled;
 #endif
-#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
+#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
 extern unsigned int vdso32_enabled;
 #endif
 
@@ -187,8 +187,8 @@
 #define	COMPAT_ELF_PLAT_INIT(regs, load_addr)		\
 	elf_common_init(&current->thread, regs, __USER_DS)
 
-void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp);
-#define compat_start_thread start_thread_ia32
+void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp);
+#define compat_start_thread compat_start_thread
 
 void set_personality_ia32(bool);
 #define COMPAT_SET_PERSONALITY(ex)			\
@@ -344,14 +344,9 @@
  */
 static inline int mmap_is_ia32(void)
 {
-#ifdef CONFIG_X86_32
-	return 1;
-#endif
-#ifdef CONFIG_IA32_EMULATION
-	if (test_thread_flag(TIF_ADDR32))
-		return 1;
-#endif
-	return 0;
+	return config_enabled(CONFIG_X86_32) ||
+	       (config_enabled(CONFIG_COMPAT) &&
+		test_thread_flag(TIF_ADDR32));
 }
 
 /* Do not change the values. See get_align_mask() */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 6615032..1e3408e 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -182,10 +182,10 @@
 #define trace_irq_entries_start irq_entries_start
 #endif
 
-#define VECTOR_UNDEFINED	(-1)
-#define VECTOR_RETRIGGERED	(-2)
+#define VECTOR_UNUSED		NULL
+#define VECTOR_RETRIGGERED	((void *)~0UL)
 
-typedef int vector_irq_t[NR_VECTORS];
+typedef struct irq_desc* vector_irq_t[NR_VECTORS];
 DECLARE_PER_CPU(vector_irq_t, vector_irq);
 
 #endif /* !ASSEMBLY_ */
diff --git a/arch/x86/include/asm/ia32.h b/arch/x86/include/asm/ia32.h
index d0e8e01..2801976 100644
--- a/arch/x86/include/asm/ia32.h
+++ b/arch/x86/include/asm/ia32.h
@@ -22,15 +22,6 @@
 	compat_sigset_t	  uc_sigmask;	/* mask last for extensibility */
 };
 
-struct ucontext_x32 {
-	unsigned int	  uc_flags;
-	unsigned int 	  uc_link;
-	compat_stack_t	  uc_stack;
-	unsigned int	  uc__pad0;     /* needed for alignment */
-	struct sigcontext uc_mcontext;  /* the 64-bit sigcontext type */
-	compat_sigset_t	  uc_sigmask;	/* mask last for extensibility */
-};
-
 /* This matches struct stat64 in glibc2.2, hence the absolutely
  * insane amounts of padding around dev_t's.
  */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index cc9c61b..7cfc085 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -180,6 +180,8 @@
  */
 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
+#define ioremap_uc ioremap_uc
+
 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
 				unsigned long prot_val);
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
index 57995f0..b72ad0f 100644
--- a/arch/x86/include/asm/iosf_mbi.h
+++ b/arch/x86/include/asm/iosf_mbi.h
@@ -52,20 +52,20 @@
 
 /* Quark available units */
 #define QRK_MBI_UNIT_HBA	0x00
-#define QRK_MBI_UNIT_HB	0x03
+#define QRK_MBI_UNIT_HB		0x03
 #define QRK_MBI_UNIT_RMU	0x04
-#define QRK_MBI_UNIT_MM	0x05
+#define QRK_MBI_UNIT_MM		0x05
 #define QRK_MBI_UNIT_MMESRAM	0x05
 #define QRK_MBI_UNIT_SOC	0x31
 
 /* Quark read/write opcodes */
 #define QRK_MBI_HBA_READ	0x10
 #define QRK_MBI_HBA_WRITE	0x11
-#define QRK_MBI_HB_READ	0x10
+#define QRK_MBI_HB_READ		0x10
 #define QRK_MBI_HB_WRITE	0x11
 #define QRK_MBI_RMU_READ	0x10
 #define QRK_MBI_RMU_WRITE	0x11
-#define QRK_MBI_MM_READ	0x10
+#define QRK_MBI_MM_READ		0x10
 #define QRK_MBI_MM_WRITE	0x11
 #define QRK_MBI_MMESRAM_READ	0x12
 #define QRK_MBI_MMESRAM_WRITE	0x13
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 8008d06..881b476 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -36,7 +36,9 @@
 
 extern void (*x86_platform_ipi_callback)(void);
 extern void native_init_IRQ(void);
-extern bool handle_irq(unsigned irq, struct pt_regs *regs);
+
+struct irq_desc;
+extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
 
 extern __visible unsigned int do_IRQ(struct pt_regs *regs);
 
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 4c2d2eb..6ca9fd6 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -117,16 +117,6 @@
 
 #define FPU_IRQ				  13
 
-#define	FIRST_VM86_IRQ			   3
-#define LAST_VM86_IRQ			  15
-
-#ifndef __ASSEMBLY__
-static inline int invalid_vm86_irq(int irq)
-{
-	return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
-}
-#endif
-
 /*
  * Size the maximum number of interrupts.
  *
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index a4c1cf7..5daeca3 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -16,15 +16,32 @@
 # define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
 #endif
 
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
 	asm_volatile_goto("1:"
 		".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
 		".pushsection __jump_table,  \"aw\" \n\t"
 		_ASM_ALIGN "\n\t"
-		_ASM_PTR "1b, %l[l_yes], %c0 \n\t"
+		_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
 		".popsection \n\t"
-		: :  "i" (key) : : l_yes);
+		: :  "i" (key), "i" (branch) : : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+	asm_volatile_goto("1:"
+		".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
+		"2:\n\t"
+		".pushsection __jump_table,  \"aw\" \n\t"
+		_ASM_ALIGN "\n\t"
+		_ASM_PTR "1b, %l[l_yes], %c0 + %c1 \n\t"
+		".popsection \n\t"
+		: :  "i" (key), "i" (branch) : : l_yes);
+
 	return false;
 l_yes:
 	return true;
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index 74a2a8d..1410b56 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -1,6 +1,9 @@
 #ifndef _ASM_X86_KASAN_H
 #define _ASM_X86_KASAN_H
 
+#include <linux/const.h>
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
 /*
  * Compiler uses shadow offset assuming that addresses start
  * from 0. Kernel addresses don't start from 0, so shadow
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 49ec903..c12e845 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -252,6 +252,11 @@
 	int size;
 };
 
+struct rsvd_bits_validate {
+	u64 rsvd_bits_mask[2][4];
+	u64 bad_mt_xwr;
+};
+
 /*
  * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
  * 32-bit).  The kvm_mmu structure abstracts the details of the current mmu
@@ -289,8 +294,15 @@
 
 	u64 *pae_root;
 	u64 *lm_root;
-	u64 rsvd_bits_mask[2][4];
-	u64 bad_mt_xwr;
+
+	/*
+	 * check zero bits on shadow page table entries, these
+	 * bits include not only hardware reserved bits but also
+	 * the bits spte never used.
+	 */
+	struct rsvd_bits_validate shadow_zero_check;
+
+	struct rsvd_bits_validate guest_rsvd_check;
 
 	/*
 	 * Bitmap: bit set = last pte in walk
@@ -358,6 +370,11 @@
 	struct list_head head;
 };
 
+/* Hyper-V per vcpu emulation context */
+struct kvm_vcpu_hv {
+	u64 hv_vapic;
+};
+
 struct kvm_vcpu_arch {
 	/*
 	 * rip and regs accesses must go through
@@ -514,8 +531,7 @@
 	/* used for guest single stepping over the given code position */
 	unsigned long singlestep_rip;
 
-	/* fields used by HYPER-V emulation */
-	u64 hv_vapic;
+	struct kvm_vcpu_hv hyperv;
 
 	cpumask_var_t wbinvd_dirty_mask;
 
@@ -586,6 +602,17 @@
 	struct kvm_lapic *logical_map[16][16];
 };
 
+/* Hyper-V emulation context */
+struct kvm_hv {
+	u64 hv_guest_os_id;
+	u64 hv_hypercall;
+	u64 hv_tsc_page;
+
+	/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
+	u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
+	u64 hv_crash_ctl;
+};
+
 struct kvm_arch {
 	unsigned int n_used_mmu_pages;
 	unsigned int n_requested_mmu_pages;
@@ -645,16 +672,14 @@
 	/* reads protected by irq_srcu, writes by irq_lock */
 	struct hlist_head mask_notifier_list;
 
-	/* fields used by HYPER-V emulation */
-	u64 hv_guest_os_id;
-	u64 hv_hypercall;
-	u64 hv_tsc_page;
+	struct kvm_hv hyperv;
 
 	#ifdef CONFIG_KVM_MMU_AUDIT
 	int audit_point;
 	#endif
 
 	bool boot_vcpu_runs_old_kvmclock;
+	u32 bsp_vcpu_id;
 
 	u64 disabled_quirks;
 };
@@ -1203,5 +1228,7 @@
 			    const struct kvm_userspace_memory_region *mem);
 int x86_set_memory_region(struct kvm *kvm,
 			  const struct kvm_userspace_memory_region *mem);
+bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
 
 #endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/math_emu.h b/arch/x86/include/asm/math_emu.h
index 031f626..0d9b14f 100644
--- a/arch/x86/include/asm/math_emu.h
+++ b/arch/x86/include/asm/math_emu.h
@@ -2,7 +2,6 @@
 #define _ASM_X86_MATH_EMU_H
 
 #include <asm/ptrace.h>
-#include <asm/vm86.h>
 
 /* This structure matches the layout of the data saved to the stack
    following a device-not-present interrupt, part of it saved
@@ -10,9 +9,6 @@
    */
 struct math_emu_info {
 	long ___orig_eip;
-	union {
-		struct pt_regs *regs;
-		struct kernel_vm86_regs *vm86;
-	};
+	struct pt_regs *regs;
 };
 #endif /* _ASM_X86_MATH_EMU_H */
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 982dfc3..2dbc0bf 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -151,10 +151,12 @@
 #ifdef CONFIG_X86_MCE
 int mcheck_init(void);
 void mcheck_cpu_init(struct cpuinfo_x86 *c);
+void mcheck_cpu_clear(struct cpuinfo_x86 *c);
 void mcheck_vendor_init_severity(void);
 #else
 static inline int mcheck_init(void) { return 0; }
 static inline void mcheck_cpu_init(struct cpuinfo_x86 *c) {}
+static inline void mcheck_cpu_clear(struct cpuinfo_x86 *c) {}
 static inline void mcheck_vendor_init_severity(void) {}
 #endif
 
@@ -181,20 +183,18 @@
 
 #ifdef CONFIG_X86_MCE_INTEL
 void mce_intel_feature_init(struct cpuinfo_x86 *c);
+void mce_intel_feature_clear(struct cpuinfo_x86 *c);
 void cmci_clear(void);
 void cmci_reenable(void);
 void cmci_rediscover(void);
 void cmci_recheck(void);
-void lmce_clear(void);
-void lmce_enable(void);
 #else
 static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
+static inline void mce_intel_feature_clear(struct cpuinfo_x86 *c) { }
 static inline void cmci_clear(void) {}
 static inline void cmci_reenable(void) {}
 static inline void cmci_rediscover(void) {}
 static inline void cmci_recheck(void) {}
-static inline void lmce_clear(void) {}
-static inline void lmce_enable(void) {}
 #endif
 
 #ifdef CONFIG_X86_MCE_AMD
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 364d274..55234d5 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -9,7 +9,9 @@
  * we put the segment information here.
  */
 typedef struct {
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
 	struct ldt_struct *ldt;
+#endif
 
 #ifdef CONFIG_X86_64
 	/* True if mm supports a task running in 32 bit compatibility mode. */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 984abfe..379cd36 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -33,6 +33,7 @@
 static inline void load_mm_cr4(struct mm_struct *mm) {}
 #endif
 
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
 /*
  * ldt_structs can be allocated, used, and freed, but they are never
  * modified while live.
@@ -48,8 +49,23 @@
 	int size;
 };
 
+/*
+ * Used for LDT copy/destruction.
+ */
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+void destroy_context(struct mm_struct *mm);
+#else	/* CONFIG_MODIFY_LDT_SYSCALL */
+static inline int init_new_context(struct task_struct *tsk,
+				   struct mm_struct *mm)
+{
+	return 0;
+}
+static inline void destroy_context(struct mm_struct *mm) {}
+#endif
+
 static inline void load_mm_ldt(struct mm_struct *mm)
 {
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
 	struct ldt_struct *ldt;
 
 	/* lockless_dereference synchronizes with smp_store_release */
@@ -73,17 +89,13 @@
 		set_ldt(ldt->entries, ldt->size);
 	else
 		clear_LDT();
+#else
+	clear_LDT();
+#endif
 
 	DEBUG_LOCKS_WARN_ON(preemptible());
 }
 
-/*
- * Used for LDT copy/destruction.
- */
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-void destroy_context(struct mm_struct *mm);
-
-
 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
 #ifdef CONFIG_SMP
@@ -114,6 +126,7 @@
 		/* Load per-mm CR4 state */
 		load_mm_cr4(next);
 
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
 		/*
 		 * Load the LDT, if the LDT is different.
 		 *
@@ -128,6 +141,7 @@
 		 */
 		if (unlikely(prev->context.ldt != next->context.ldt))
 			load_mm_ldt(next);
+#endif
 	}
 #ifdef CONFIG_SMP
 	  else {
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index c163215..aaf59b7 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -7,6 +7,7 @@
 
 struct ms_hyperv_info {
 	u32 features;
+	u32 misc_features;
 	u32 hints;
 };
 
@@ -20,4 +21,8 @@
 void hv_setup_vmbus_irq(void (*handler)(void));
 void hv_remove_vmbus_irq(void);
 
+void hv_setup_kexec_handler(void (*handler)(void));
+void hv_remove_kexec_handler(void);
+void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
+void hv_remove_crash_handler(void);
 #endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 9ebc3d0..c1c0a1c 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -73,6 +73,12 @@
 #define MSR_LBR_CORE_FROM		0x00000040
 #define MSR_LBR_CORE_TO			0x00000060
 
+#define MSR_LBR_INFO_0			0x00000dc0 /* ... 0xddf for _31 */
+#define LBR_INFO_MISPRED		BIT_ULL(63)
+#define LBR_INFO_IN_TX			BIT_ULL(62)
+#define LBR_INFO_ABORT			BIT_ULL(61)
+#define LBR_INFO_CYCLES			0xffff
+
 #define MSR_IA32_PEBS_ENABLE		0x000003f1
 #define MSR_IA32_DS_AREA		0x00000600
 #define MSR_IA32_PERF_CAPABILITIES	0x00000345
@@ -80,13 +86,21 @@
 
 #define MSR_IA32_RTIT_CTL		0x00000570
 #define RTIT_CTL_TRACEEN		BIT(0)
+#define RTIT_CTL_CYCLEACC		BIT(1)
 #define RTIT_CTL_OS			BIT(2)
 #define RTIT_CTL_USR			BIT(3)
 #define RTIT_CTL_CR3EN			BIT(7)
 #define RTIT_CTL_TOPA			BIT(8)
+#define RTIT_CTL_MTC_EN			BIT(9)
 #define RTIT_CTL_TSC_EN			BIT(10)
 #define RTIT_CTL_DISRETC		BIT(11)
 #define RTIT_CTL_BRANCH_EN		BIT(13)
+#define RTIT_CTL_MTC_RANGE_OFFSET	14
+#define RTIT_CTL_MTC_RANGE		(0x0full << RTIT_CTL_MTC_RANGE_OFFSET)
+#define RTIT_CTL_CYC_THRESH_OFFSET	19
+#define RTIT_CTL_CYC_THRESH		(0x0full << RTIT_CTL_CYC_THRESH_OFFSET)
+#define RTIT_CTL_PSB_FREQ_OFFSET	24
+#define RTIT_CTL_PSB_FREQ      		(0x0full << RTIT_CTL_PSB_FREQ_OFFSET)
 #define MSR_IA32_RTIT_STATUS		0x00000571
 #define RTIT_STATUS_CONTEXTEN		BIT(1)
 #define RTIT_STATUS_TRIGGEREN		BIT(2)
@@ -170,6 +184,12 @@
 #define MSR_PP1_ENERGY_STATUS		0x00000641
 #define MSR_PP1_POLICY			0x00000642
 
+#define MSR_CONFIG_TDP_NOMINAL		0x00000648
+#define MSR_CONFIG_TDP_LEVEL_1		0x00000649
+#define MSR_CONFIG_TDP_LEVEL_2		0x0000064A
+#define MSR_CONFIG_TDP_CONTROL		0x0000064B
+#define MSR_TURBO_ACTIVATION_RATIO	0x0000064C
+
 #define MSR_PKG_WEIGHTED_CORE_C0_RES	0x00000658
 #define MSR_PKG_ANY_CORE_C0_RES		0x00000659
 #define MSR_PKG_ANY_GFXE_C0_RES		0x0000065A
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index e6a707e..77d8b28 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -47,14 +47,13 @@
  * it means rax *or* rdx.
  */
 #ifdef CONFIG_X86_64
-#define DECLARE_ARGS(val, low, high)	unsigned low, high
-#define EAX_EDX_VAL(val, low, high)	((low) | ((u64)(high) << 32))
-#define EAX_EDX_ARGS(val, low, high)	"a" (low), "d" (high)
+/* Using 64-bit values saves one instruction clearing the high half of low */
+#define DECLARE_ARGS(val, low, high)	unsigned long low, high
+#define EAX_EDX_VAL(val, low, high)	((low) | (high) << 32)
 #define EAX_EDX_RET(val, low, high)	"=a" (low), "=d" (high)
 #else
 #define DECLARE_ARGS(val, low, high)	unsigned long long val
 #define EAX_EDX_VAL(val, low, high)	(val)
-#define EAX_EDX_ARGS(val, low, high)	"A" (val)
 #define EAX_EDX_RET(val, low, high)	"=A" (val)
 #endif
 
@@ -106,12 +105,19 @@
 	return err;
 }
 
-extern unsigned long long native_read_tsc(void);
-
 extern int rdmsr_safe_regs(u32 regs[8]);
 extern int wrmsr_safe_regs(u32 regs[8]);
 
-static __always_inline unsigned long long __native_read_tsc(void)
+/**
+ * rdtsc() - returns the current TSC without ordering constraints
+ *
+ * rdtsc() returns the result of RDTSC as a 64-bit integer.  The
+ * only ordering constraint it supplies is the ordering implied by
+ * "asm volatile": it will put the RDTSC in the place you expect.  The
+ * CPU can and will speculatively execute that RDTSC, though, so the
+ * results can be non-monotonic if compared on different CPUs.
+ */
+static __always_inline unsigned long long rdtsc(void)
 {
 	DECLARE_ARGS(val, low, high);
 
@@ -120,6 +126,35 @@
 	return EAX_EDX_VAL(val, low, high);
 }
 
+/**
+ * rdtsc_ordered() - read the current TSC in program order
+ *
+ * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
+ * It is ordered like a load to a global in-memory counter.  It should
+ * be impossible to observe non-monotonic rdtsc_unordered() behavior
+ * across multiple CPUs as long as the TSC is synced.
+ */
+static __always_inline unsigned long long rdtsc_ordered(void)
+{
+	/*
+	 * The RDTSC instruction is not ordered relative to memory
+	 * access.  The Intel SDM and the AMD APM are both vague on this
+	 * point, but empirically an RDTSC instruction can be
+	 * speculatively executed before prior loads.  An RDTSC
+	 * immediately after an appropriate barrier appears to be
+	 * ordered as a normal load, that is, it provides the same
+	 * ordering guarantees as reading from a global memory location
+	 * that some other imaginary CPU is updating continuously with a
+	 * time stamp.
+	 */
+	alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
+			  "lfence", X86_FEATURE_LFENCE_RDTSC);
+	return rdtsc();
+}
+
+/* Deprecated, keep it for a cycle for easier merging: */
+#define rdtscll(now)	do { (now) = rdtsc_ordered(); } while (0)
+
 static inline unsigned long long native_read_pmc(int counter)
 {
 	DECLARE_ARGS(val, low, high);
@@ -153,8 +188,10 @@
 #define rdmsrl(msr, val)			\
 	((val) = native_read_msr((msr)))
 
-#define wrmsrl(msr, val)						\
-	native_write_msr((msr), (u32)((u64)(val)), (u32)((u64)(val) >> 32))
+static inline void wrmsrl(unsigned msr, u64 val)
+{
+	native_write_msr(msr, (u32)val, (u32)(val >> 32));
+}
 
 /* wrmsr with exception handling */
 static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
@@ -180,12 +217,6 @@
 	return err;
 }
 
-#define rdtscl(low)						\
-	((low) = (u32)__native_read_tsc())
-
-#define rdtscll(val)						\
-	((val) = __native_read_tsc())
-
 #define rdpmc(counter, low, high)			\
 do {							\
 	u64 _l = native_read_pmc((counter));		\
@@ -195,15 +226,6 @@
 
 #define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
 
-#define rdtscp(low, high, aux)					\
-do {                                                            \
-	unsigned long long _val = native_read_tscp(&(aux));     \
-	(low) = (u32)_val;                                      \
-	(high) = (u32)(_val >> 32);                             \
-} while (0)
-
-#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))
-
 #endif	/* !CONFIG_PARAVIRT */
 
 /*
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 653dfa7..c70689b 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -14,6 +14,9 @@
 #define CPUID5_ECX_INTERRUPT_BREAK	0x2
 
 #define MWAIT_ECX_INTERRUPT_BREAK	0x1
+#define MWAITX_ECX_TIMER_ENABLE		BIT(1)
+#define MWAITX_MAX_LOOPS		((u32)-1)
+#define MWAITX_DISABLE_CSTATES		0xf
 
 static inline void __monitor(const void *eax, unsigned long ecx,
 			     unsigned long edx)
@@ -23,6 +26,14 @@
 		     :: "a" (eax), "c" (ecx), "d"(edx));
 }
 
+static inline void __monitorx(const void *eax, unsigned long ecx,
+			      unsigned long edx)
+{
+	/* "monitorx %eax, %ecx, %edx;" */
+	asm volatile(".byte 0x0f, 0x01, 0xfa;"
+		     :: "a" (eax), "c" (ecx), "d"(edx));
+}
+
 static inline void __mwait(unsigned long eax, unsigned long ecx)
 {
 	/* "mwait %eax, %ecx;" */
@@ -30,6 +41,40 @@
 		     :: "a" (eax), "c" (ecx));
 }
 
+/*
+ * MWAITX allows for a timer expiration to get the core out a wait state in
+ * addition to the default MWAIT exit condition of a store appearing at a
+ * monitored virtual address.
+ *
+ * Registers:
+ *
+ * MWAITX ECX[1]: enable timer if set
+ * MWAITX EBX[31:0]: max wait time expressed in SW P0 clocks. The software P0
+ * frequency is the same as the TSC frequency.
+ *
+ * Below is a comparison between MWAIT and MWAITX on AMD processors:
+ *
+ *                 MWAIT                           MWAITX
+ * opcode          0f 01 c9           |            0f 01 fb
+ * ECX[0]                  value of RFLAGS.IF seen by instruction
+ * ECX[1]          unused/#GP if set  |            enable timer if set
+ * ECX[31:2]                     unused/#GP if set
+ * EAX                           unused (reserve for hint)
+ * EBX[31:0]       unused             |            max wait time (P0 clocks)
+ *
+ *                 MONITOR                         MONITORX
+ * opcode          0f 01 c8           |            0f 01 fa
+ * EAX                     (logical) address to monitor
+ * ECX                     #GP if not zero
+ */
+static inline void __mwaitx(unsigned long eax, unsigned long ebx,
+			    unsigned long ecx)
+{
+	/* "mwaitx %eax, %ebx, %ecx;" */
+	asm volatile(".byte 0x0f, 0x01, 0xfb;"
+		     :: "a" (eax), "b" (ebx), "c" (ecx));
+}
+
 static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
 {
 	trace_hardirqs_on();
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index d143bfa..10d0596 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -153,7 +153,11 @@
 	val = paravirt_read_msr(msr, &_err);	\
 } while (0)
 
-#define wrmsrl(msr, val)	wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
+static inline void wrmsrl(unsigned msr, u64 val)
+{
+	wrmsr(msr, (u32)val, (u32)(val>>32));
+}
+
 #define wrmsr_safe(msr, a, b)	paravirt_write_msr(msr, a, b)
 
 /* rdmsr with exception handling */
@@ -174,19 +178,6 @@
 	return err;
 }
 
-static inline u64 paravirt_read_tsc(void)
-{
-	return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
-}
-
-#define rdtscl(low)				\
-do {						\
-	u64 _l = paravirt_read_tsc();		\
-	low = (int)_l;				\
-} while (0)
-
-#define rdtscll(val) (val = paravirt_read_tsc())
-
 static inline unsigned long long paravirt_sched_clock(void)
 {
 	return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
@@ -215,27 +206,6 @@
 
 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
 
-static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
-{
-	return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
-}
-
-#define rdtscp(low, high, aux)				\
-do {							\
-	int __aux;					\
-	unsigned long __val = paravirt_rdtscp(&__aux);	\
-	(low) = (u32)__val;				\
-	(high) = (u32)(__val >> 32);			\
-	(aux) = __aux;					\
-} while (0)
-
-#define rdtscpll(val, aux)				\
-do {							\
-	unsigned long __aux; 				\
-	val = paravirt_rdtscp(&__aux);			\
-	(aux) = __aux;					\
-} while (0)
-
 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
 {
 	PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index a6b8f9f..ce029e4 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -156,9 +156,7 @@
 	u64 (*read_msr)(unsigned int msr, int *err);
 	int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
 
-	u64 (*read_tsc)(void);
 	u64 (*read_pmc)(int counter);
-	unsigned long long (*read_tscp)(unsigned int *aux);
 
 #ifdef CONFIG_X86_32
 	/*
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 164e3f8..fa1195d 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,8 +93,6 @@
 extern int (*pcibios_enable_irq)(struct pci_dev *dev);
 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
 
-extern bool mp_should_keep_irq(struct device *dev);
-
 struct pci_raw_ops {
 	int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
 						int reg, int len, u32 *val);
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index dc0f6ed..7bcb861 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -159,6 +159,13 @@
  */
 #define INTEL_PMC_IDX_FIXED_BTS				(INTEL_PMC_IDX_FIXED + 16)
 
+#define GLOBAL_STATUS_COND_CHG				BIT_ULL(63)
+#define GLOBAL_STATUS_BUFFER_OVF			BIT_ULL(62)
+#define GLOBAL_STATUS_UNC_OVF				BIT_ULL(61)
+#define GLOBAL_STATUS_ASIF				BIT_ULL(60)
+#define GLOBAL_STATUS_COUNTERS_FROZEN			BIT_ULL(59)
+#define GLOBAL_STATUS_LBRS_FROZEN			BIT_ULL(58)
+
 /*
  * IBS cpuid feature detection
  */
diff --git a/arch/x86/include/asm/pmc_atom.h b/arch/x86/include/asm/pmc_atom.h
index bc0fc08..aa8744c 100644
--- a/arch/x86/include/asm/pmc_atom.h
+++ b/arch/x86/include/asm/pmc_atom.h
@@ -18,6 +18,8 @@
 
 /* ValleyView Power Control Unit PCI Device ID */
 #define	PCI_DEVICE_ID_VLV_PMC	0x0F1C
+/* CherryTrail Power Control Unit PCI Device ID */
+#define	PCI_DEVICE_ID_CHT_PMC	0x229C
 
 /* PMC Memory mapped IO registers */
 #define	PMC_BASE_ADDR_OFFSET	0x44
@@ -29,6 +31,10 @@
 #define	PMC_FUNC_DIS		0x34
 #define	PMC_FUNC_DIS_2		0x38
 
+/* CHT specific bits in FUNC_DIS2 register */
+#define	BIT_FD_GMM		BIT(3)
+#define	BIT_FD_ISH		BIT(4)
+
 /* S0ix wake event control */
 #define	PMC_S0IX_WAKE_EN	0x3C
 
@@ -75,6 +81,21 @@
 #define PMC_PSS_BIT_USB			BIT(16)
 #define PMC_PSS_BIT_USB_SUS		BIT(17)
 
+/* CHT specific bits in PSS register */
+#define	PMC_PSS_BIT_CHT_UFS		BIT(7)
+#define	PMC_PSS_BIT_CHT_UXD		BIT(11)
+#define	PMC_PSS_BIT_CHT_UXD_FD		BIT(12)
+#define	PMC_PSS_BIT_CHT_UX_ENG		BIT(15)
+#define	PMC_PSS_BIT_CHT_USB_SUS		BIT(16)
+#define	PMC_PSS_BIT_CHT_GMM		BIT(17)
+#define	PMC_PSS_BIT_CHT_ISH		BIT(18)
+#define	PMC_PSS_BIT_CHT_DFX_MASTER	BIT(26)
+#define	PMC_PSS_BIT_CHT_DFX_CLUSTER1	BIT(27)
+#define	PMC_PSS_BIT_CHT_DFX_CLUSTER2	BIT(28)
+#define	PMC_PSS_BIT_CHT_DFX_CLUSTER3	BIT(29)
+#define	PMC_PSS_BIT_CHT_DFX_CLUSTER4	BIT(30)
+#define	PMC_PSS_BIT_CHT_DFX_CLUSTER5	BIT(31)
+
 /* These registers reflect D3 status of functions */
 #define	PMC_D3_STS_0		0xA0
 
@@ -117,6 +138,10 @@
 #define	BIT_USH_SS_PHY		BIT(2)
 #define	BIT_DFX			BIT(3)
 
+/* CHT specific bits in PMC_D3_STS_1 register */
+#define	BIT_STS_GMM		BIT(1)
+#define	BIT_STS_ISH		BIT(2)
+
 /* PMC I/O Registers */
 #define	ACPI_BASE_ADDR_OFFSET	0x40
 #define	ACPI_BASE_ADDR_MASK	0xFFFFFE00
@@ -126,4 +151,8 @@
 #define	SLEEP_TYPE_MASK		0xFFFFECFF
 #define	SLEEP_TYPE_S5		0x1C00
 #define	SLEEP_ENABLE		0x2000
+
+extern int pmc_atom_read(int offset, u32 *value);
+extern int pmc_atom_write(int offset, u32 value);
+
 #endif /* PMC_ATOM_H */
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index dca7171..b12f810 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -90,9 +90,9 @@
 /*
  * Returns true when we need to resched and can (barring IRQ state).
  */
-static __always_inline bool should_resched(void)
+static __always_inline bool should_resched(int preempt_offset)
 {
-	return unlikely(!raw_cpu_read_4(__preempt_count));
+	return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
 }
 
 #ifdef CONFIG_PREEMPT
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 944f178..19577dd 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -6,8 +6,8 @@
 /* Forward declaration, a strange C thing */
 struct task_struct;
 struct mm_struct;
+struct vm86;
 
-#include <asm/vm86.h>
 #include <asm/math_emu.h>
 #include <asm/segment.h>
 #include <asm/types.h>
@@ -400,15 +400,9 @@
 	unsigned long		cr2;
 	unsigned long		trap_nr;
 	unsigned long		error_code;
-#ifdef CONFIG_X86_32
+#ifdef CONFIG_VM86
 	/* Virtual 86 mode info */
-	struct vm86_struct __user *vm86_info;
-	unsigned long		screen_bitmap;
-	unsigned long		v86flags;
-	unsigned long		v86mask;
-	unsigned long		saved_sp0;
-	unsigned int		saved_fs;
-	unsigned int		saved_gs;
+	struct vm86		*vm86;
 #endif
 	/* IO permissions: */
 	unsigned long		*io_bitmap_ptr;
@@ -651,14 +645,6 @@
 
 extern void set_task_blockstep(struct task_struct *task, bool on);
 
-/*
- * from system description table in BIOS. Mostly for MCA use, but
- * others may find it useful:
- */
-extern unsigned int		machine_id;
-extern unsigned int		machine_submodel_id;
-extern unsigned int		BIOS_revision;
-
 /* Boot loader type from the setup header: */
 extern int			bootloader_type;
 extern int			bootloader_version;
@@ -720,7 +706,6 @@
 
 #define INIT_THREAD  {							  \
 	.sp0			= TOP_OF_INIT_STACK,			  \
-	.vm86_info		= NULL,					  \
 	.sysenter_cs		= __KERNEL_CS,				  \
 	.io_bitmap_ptr		= NULL,					  \
 }
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 5fabf13..6271281 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -88,7 +88,6 @@
 				       unsigned long phase1_result);
 
 extern long syscall_trace_enter(struct pt_regs *);
-extern void syscall_trace_leave(struct pt_regs *);
 
 static inline unsigned long regs_return_value(struct pt_regs *regs)
 {
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 628954c..7a6bed5 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -62,7 +62,7 @@
 static __always_inline
 u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
 {
-	u64 delta = __native_read_tsc() - src->tsc_timestamp;
+	u64 delta = rdtsc_ordered() - src->tsc_timestamp;
 	return pvclock_scale_delta(delta, src->tsc_to_system_mul,
 				   src->tsc_shift);
 }
@@ -76,13 +76,7 @@
 	u8 ret_flags;
 
 	version = src->version;
-	/* Note: emulated platforms which do not advertise SSE2 support
-	 * result in kvmclock not using the necessary RDTSC barriers.
-	 * Without barriers, it is possible that RDTSC instruction reads from
-	 * the time stamp counter outside rdtsc_barrier protected section
-	 * below, resulting in violation of monotonicity.
-	 */
-	rdtsc_barrier();
+
 	offset = pvclock_get_nsec_offset(src);
 	ret = src->system_time + offset;
 	ret_flags = src->flags;
diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h
index ae0e241..c537cbb 100644
--- a/arch/x86/include/asm/qrwlock.h
+++ b/arch/x86/include/asm/qrwlock.h
@@ -2,16 +2,6 @@
 #define _ASM_X86_QRWLOCK_H
 
 #include <asm-generic/qrwlock_types.h>
-
-#ifndef CONFIG_X86_PPRO_FENCE
-#define queue_write_unlock queue_write_unlock
-static inline void queue_write_unlock(struct qrwlock *lock)
-{
-        barrier();
-        ACCESS_ONCE(*(u8 *)&lock->cnts) = 0;
-}
-#endif
-
 #include <asm-generic/qrwlock.h>
 
 #endif /* _ASM_X86_QRWLOCK_H */
diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h
index 7c7c27c..1f3175b 100644
--- a/arch/x86/include/asm/sigframe.h
+++ b/arch/x86/include/asm/sigframe.h
@@ -4,6 +4,7 @@
 #include <asm/sigcontext.h>
 #include <asm/siginfo.h>
 #include <asm/ucontext.h>
+#include <linux/compat.h>
 
 #ifdef CONFIG_X86_32
 #define sigframe_ia32		sigframe
@@ -69,6 +70,15 @@
 
 #ifdef CONFIG_X86_X32_ABI
 
+struct ucontext_x32 {
+	unsigned int	  uc_flags;
+	unsigned int 	  uc_link;
+	compat_stack_t	  uc_stack;
+	unsigned int	  uc__pad0;     /* needed for alignment */
+	struct sigcontext uc_mcontext;  /* the 64-bit sigcontext type */
+	compat_sigset_t	  uc_sigmask;	/* mask last for extensibility */
+};
+
 struct rt_sigframe_x32 {
 	u64 pretcode;
 	struct ucontext_x32 uc;
diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h
index 31eab86..c481be7 100644
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
@@ -30,7 +30,7 @@
 #endif /* __ASSEMBLY__ */
 #include <uapi/asm/signal.h>
 #ifndef __ASSEMBLY__
-extern void do_notify_resume(struct pt_regs *, void *, __u32);
+extern void do_signal(struct pt_regs *regs);
 
 #define __ARCH_HAS_SA_RESTORER
 
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index c2e00bb..58505f0 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -72,7 +72,7 @@
 	 * on during the bootup the random pool has true entropy too.
 	 */
 	get_random_bytes(&canary, sizeof(canary));
-	tsc = __native_read_tsc();
+	tsc = rdtsc();
 	canary += tsc + (tsc << 32UL);
 
 	current->stack_canary = canary;
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 592a6a6..91dfcaf 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -37,6 +37,7 @@
 asmlinkage unsigned long sys_sigreturn(void);
 
 /* kernel/vm86_32.c */
+struct vm86_struct;
 asmlinkage long sys_vm86old(struct vm86_struct __user *);
 asmlinkage long sys_vm86(unsigned long, unsigned long);
 
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 225ee54..8afdc3e 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -27,14 +27,17 @@
  * Without this offset, that can result in a page fault.  (We are
  * careful that, in this case, the value we read doesn't matter.)
  *
- * In vm86 mode, the hardware frame is much longer still, but we neither
- * access the extra members from NMI context, nor do we write such a
- * frame at sp0 at all.
+ * In vm86 mode, the hardware frame is much longer still, so add 16
+ * bytes to make room for the real-mode segments.
  *
  * x86_64 has a fixed-length stack frame.
  */
 #ifdef CONFIG_X86_32
-# define TOP_OF_KERNEL_STACK_PADDING 8
+# ifdef CONFIG_VM86
+#  define TOP_OF_KERNEL_STACK_PADDING 16
+# else
+#  define TOP_OF_KERNEL_STACK_PADDING 8
+# endif
 #else
 # define TOP_OF_KERNEL_STACK_PADDING 0
 #endif
@@ -140,27 +143,11 @@
 	 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT |	\
 	 _TIF_NOHZ)
 
-/* work to do in syscall_trace_leave() */
-#define _TIF_WORK_SYSCALL_EXIT	\
-	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP |	\
-	 _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
-
-/* work to do on interrupt/exception return */
-#define _TIF_WORK_MASK							\
-	(0x0000FFFF &							\
-	 ~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|			\
-	   _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU))
-
 /* work to do on any return to user space */
 #define _TIF_ALLWORK_MASK						\
 	((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT |	\
 	_TIF_NOHZ)
 
-/* Only used for 64 bit */
-#define _TIF_DO_NOTIFY_MASK						\
-	(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME |				\
-	 _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE)
-
 /* flags to check in __switch_to() */
 #define _TIF_WORK_CTXSW							\
 	(_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index c5380be..c3496619 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -112,8 +112,8 @@
 asmlinkage void smp_deferred_error_interrupt(void);
 #endif
 
-extern enum ctx_state ist_enter(struct pt_regs *regs);
-extern void ist_exit(struct pt_regs *regs, enum ctx_state prev_state);
+extern void ist_enter(struct pt_regs *regs);
+extern void ist_exit(struct pt_regs *regs);
 extern void ist_begin_non_atomic(struct pt_regs *regs);
 extern void ist_end_non_atomic(void);
 
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 94605c0..6d7c547 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -21,28 +21,12 @@
 
 static inline cycles_t get_cycles(void)
 {
-	unsigned long long ret = 0;
-
 #ifndef CONFIG_X86_TSC
 	if (!cpu_has_tsc)
 		return 0;
 #endif
-	rdtscll(ret);
 
-	return ret;
-}
-
-static __always_inline cycles_t vget_cycles(void)
-{
-	/*
-	 * We only do VDSOs on TSC capable CPUs, so this shouldn't
-	 * access boot_cpu_data (which is not VDSO-safe):
-	 */
-#ifndef CONFIG_X86_TSC
-	if (!cpu_has_tsc)
-		return 0;
-#endif
-	return (cycles_t)__native_read_tsc();
+	return rdtsc();
 }
 
 extern void tsc_init(void);
@@ -51,6 +35,7 @@
 extern int check_tsc_unstable(void);
 extern int check_tsc_disabled(void);
 extern unsigned long native_calibrate_tsc(void);
+extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
 
 extern int tsc_clocksource_reliable;
 
diff --git a/arch/x86/include/asm/vm86.h b/arch/x86/include/asm/vm86.h
index 1d8de3f..1e491f3 100644
--- a/arch/x86/include/asm/vm86.h
+++ b/arch/x86/include/asm/vm86.h
@@ -1,7 +1,6 @@
 #ifndef _ASM_X86_VM86_H
 #define _ASM_X86_VM86_H
 
-
 #include <asm/ptrace.h>
 #include <uapi/asm/vm86.h>
 
@@ -28,43 +27,49 @@
 	unsigned short gs, __gsh;
 };
 
-struct kernel_vm86_struct {
-	struct kernel_vm86_regs regs;
-/*
- * the below part remains on the kernel stack while we are in VM86 mode.
- * 'tss.esp0' then contains the address of VM86_TSS_ESP0 below, and when we
- * get forced back from VM86, the CPU and "SAVE_ALL" will restore the above
- * 'struct kernel_vm86_regs' with the then actual values.
- * Therefore, pt_regs in fact points to a complete 'kernel_vm86_struct'
- * in kernelspace, hence we need not reget the data from userspace.
- */
-#define VM86_TSS_ESP0 flags
+struct vm86 {
+	struct vm86plus_struct __user *user_vm86;
+	struct pt_regs regs32;
+	unsigned long veflags;
+	unsigned long veflags_mask;
+	unsigned long saved_sp0;
+
 	unsigned long flags;
 	unsigned long screen_bitmap;
 	unsigned long cpu_type;
 	struct revectored_struct int_revectored;
 	struct revectored_struct int21_revectored;
 	struct vm86plus_info_struct vm86plus;
-	struct pt_regs *regs32;   /* here we save the pointer to the old regs */
-/*
- * The below is not part of the structure, but the stack layout continues
- * this way. In front of 'return-eip' may be some data, depending on
- * compilation, so we don't rely on this and save the pointer to 'oldregs'
- * in 'regs32' above.
- * However, with GCC-2.7.2 and the current CFLAGS you see exactly this:
-
-	long return-eip;        from call to vm86()
-	struct pt_regs oldregs;  user space registers as saved by syscall
- */
 };
 
 #ifdef CONFIG_VM86
 
 void handle_vm86_fault(struct kernel_vm86_regs *, long);
 int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
-struct pt_regs *save_v86_state(struct kernel_vm86_regs *);
+void save_v86_state(struct kernel_vm86_regs *, int);
 
 struct task_struct;
+
+#define free_vm86(t) do {				\
+	struct thread_struct *__t = (t);		\
+	if (__t->vm86 != NULL) {			\
+		kfree(__t->vm86);			\
+		__t->vm86 = NULL;			\
+	}						\
+} while (0)
+
+/*
+ * Support for VM86 programs to request interrupts for
+ * real mode hardware drivers:
+ */
+#define FIRST_VM86_IRQ		 3
+#define LAST_VM86_IRQ		15
+
+static inline int invalid_vm86_irq(int irq)
+{
+	return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
+}
+
 void release_vm86_irqs(struct task_struct *);
 
 #else
@@ -77,6 +82,10 @@
 	return 0;
 }
 
+static inline void save_v86_state(struct kernel_vm86_regs *a, int b) { }
+
+#define free_vm86(t) do { } while(0)
+
 #endif /* CONFIG_VM86 */
 
 #endif /* _ASM_X86_VM86_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index da772ed..448b7ca 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -47,6 +47,7 @@
 #define CPU_BASED_MOV_DR_EXITING                0x00800000
 #define CPU_BASED_UNCOND_IO_EXITING             0x01000000
 #define CPU_BASED_USE_IO_BITMAPS                0x02000000
+#define CPU_BASED_MONITOR_TRAP_FLAG             0x08000000
 #define CPU_BASED_USE_MSR_BITMAPS               0x10000000
 #define CPU_BASED_MONITOR_EXITING               0x20000000
 #define CPU_BASED_PAUSE_EXITING                 0x40000000
@@ -367,29 +368,29 @@
 #define TYPE_PHYSICAL_APIC_EVENT        (10 << 12)
 #define TYPE_PHYSICAL_APIC_INST         (15 << 12)
 
-/* segment AR */
-#define SEGMENT_AR_L_MASK (1 << 13)
+/* segment AR in VMCS -- these are different from what LAR reports */
+#define VMX_SEGMENT_AR_L_MASK (1 << 13)
 
-#define AR_TYPE_ACCESSES_MASK 1
-#define AR_TYPE_READABLE_MASK (1 << 1)
-#define AR_TYPE_WRITEABLE_MASK (1 << 2)
-#define AR_TYPE_CODE_MASK (1 << 3)
-#define AR_TYPE_MASK 0x0f
-#define AR_TYPE_BUSY_64_TSS 11
-#define AR_TYPE_BUSY_32_TSS 11
-#define AR_TYPE_BUSY_16_TSS 3
-#define AR_TYPE_LDT 2
+#define VMX_AR_TYPE_ACCESSES_MASK 1
+#define VMX_AR_TYPE_READABLE_MASK (1 << 1)
+#define VMX_AR_TYPE_WRITEABLE_MASK (1 << 2)
+#define VMX_AR_TYPE_CODE_MASK (1 << 3)
+#define VMX_AR_TYPE_MASK 0x0f
+#define VMX_AR_TYPE_BUSY_64_TSS 11
+#define VMX_AR_TYPE_BUSY_32_TSS 11
+#define VMX_AR_TYPE_BUSY_16_TSS 3
+#define VMX_AR_TYPE_LDT 2
 
-#define AR_UNUSABLE_MASK (1 << 16)
-#define AR_S_MASK (1 << 4)
-#define AR_P_MASK (1 << 7)
-#define AR_L_MASK (1 << 13)
-#define AR_DB_MASK (1 << 14)
-#define AR_G_MASK (1 << 15)
-#define AR_DPL_SHIFT 5
-#define AR_DPL(ar) (((ar) >> AR_DPL_SHIFT) & 3)
+#define VMX_AR_UNUSABLE_MASK (1 << 16)
+#define VMX_AR_S_MASK (1 << 4)
+#define VMX_AR_P_MASK (1 << 7)
+#define VMX_AR_L_MASK (1 << 13)
+#define VMX_AR_DB_MASK (1 << 14)
+#define VMX_AR_G_MASK (1 << 15)
+#define VMX_AR_DPL_SHIFT 5
+#define VMX_AR_DPL(ar) (((ar) >> VMX_AR_DPL_SHIFT) & 3)
 
-#define AR_RESERVD_MASK 0xfffe0f00
+#define VMX_AR_RESERVD_MASK 0xfffe0f00
 
 #define TSS_PRIVATE_MEMSLOT			(KVM_USER_MEM_SLOTS + 0)
 #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT	(KVM_USER_MEM_SLOTS + 1)
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index ab456dc..3292543 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -120,7 +120,7 @@
 	__u8  _pad3[16];				/* 0x070 */
 	__u8  hd0_info[16];	/* obsolete! */		/* 0x080 */
 	__u8  hd1_info[16];	/* obsolete! */		/* 0x090 */
-	struct sys_desc_table sys_desc_table;		/* 0x0a0 */
+	struct sys_desc_table sys_desc_table; /* obsolete! */	/* 0x0a0 */
 	struct olpc_ofw_header olpc_ofw_header;		/* 0x0b0 */
 	__u32 ext_ramdisk_image;			/* 0x0c0 */
 	__u32 ext_ramdisk_size;				/* 0x0c4 */
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index f36d56b..f0412c5 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -27,6 +27,8 @@
 #define HV_X64_MSR_VP_RUNTIME_AVAILABLE		(1 << 0)
 /* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
 #define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE	(1 << 1)
+/* Partition reference TSC MSR is available */
+#define HV_X64_MSR_REFERENCE_TSC_AVAILABLE              (1 << 9)
 
 /* A partition's reference time stamp counter (TSC) page */
 #define HV_X64_MSR_REFERENCE_TSC		0x40000021
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index a0eab85..76880ed 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -15,7 +15,8 @@
 	__u64 time;	/* wall time_t when error was detected */
 	__u8  cpuvendor;	/* cpu vendor as encoded in system.h */
 	__u8  inject_flags;	/* software inject flags */
-	__u16  pad;
+	__u8  severity;
+	__u8  usable_addr;
 	__u32 cpuid;	/* CPUID 1 EAX */
 	__u8  cs;		/* code segment */
 	__u8  bank;	/* machine check bank */
diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h
index 180a0c3..79887ab 100644
--- a/arch/x86/include/uapi/asm/processor-flags.h
+++ b/arch/x86/include/uapi/asm/processor-flags.h
@@ -37,8 +37,6 @@
 #define X86_EFLAGS_VM		_BITUL(X86_EFLAGS_VM_BIT)
 #define X86_EFLAGS_AC_BIT	18 /* Alignment Check/Access Control */
 #define X86_EFLAGS_AC		_BITUL(X86_EFLAGS_AC_BIT)
-#define X86_EFLAGS_AC_BIT	18 /* Alignment Check/Access Control */
-#define X86_EFLAGS_AC		_BITUL(X86_EFLAGS_AC_BIT)
 #define X86_EFLAGS_VIF_BIT	19 /* Virtual Interrupt Flag */
 #define X86_EFLAGS_VIF		_BITUL(X86_EFLAGS_VIF_BIT)
 #define X86_EFLAGS_VIP_BIT	20 /* Virtual Interrupt Pending */
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 1fe9218..37fee27 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -58,6 +58,7 @@
 #define EXIT_REASON_INVALID_STATE       33
 #define EXIT_REASON_MSR_LOAD_FAIL       34
 #define EXIT_REASON_MWAIT_INSTRUCTION   36
+#define EXIT_REASON_MONITOR_TRAP_FLAG   37
 #define EXIT_REASON_MONITOR_INSTRUCTION 39
 #define EXIT_REASON_PAUSE_INSTRUCTION   40
 #define EXIT_REASON_MCE_DURING_VMENTRY  41
@@ -106,6 +107,7 @@
 	{ EXIT_REASON_MSR_READ,              "MSR_READ" }, \
 	{ EXIT_REASON_MSR_WRITE,             "MSR_WRITE" }, \
 	{ EXIT_REASON_MWAIT_INSTRUCTION,     "MWAIT_INSTRUCTION" }, \
+	{ EXIT_REASON_MONITOR_TRAP_FLAG,     "MONITOR_TRAP_FLAG" }, \
 	{ EXIT_REASON_MONITOR_INSTRUCTION,   "MONITOR_INSTRUCTION" }, \
 	{ EXIT_REASON_PAUSE_INSTRUCTION,     "PAUSE_INSTRUCTION" }, \
 	{ EXIT_REASON_MCE_DURING_VMENTRY,    "MCE_DURING_VMENTRY" }, \
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 0f15af4..3c36221 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -23,8 +23,10 @@
 CFLAGS_irq.o := -I$(src)/../include/asm/trace
 
 obj-y			:= process_$(BITS).o signal.o
+obj-$(CONFIG_COMPAT)	+= signal_compat.o
 obj-y			+= traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
-obj-y			+= time.o ioport.o ldt.o dumpstack.o nmi.o
+obj-y			+= time.o ioport.o dumpstack.o nmi.o
+obj-$(CONFIG_MODIFY_LDT_SYSCALL)	+= ldt.o
 obj-y			+= setup.o x86_init.o i8259.o irqinit.o jump_label.o
 obj-$(CONFIG_IRQ_WORK)  += irq_work.o
 obj-y			+= probe_roms.o
@@ -107,8 +109,6 @@
 
 obj-$(CONFIG_PERF_EVENTS)		+= perf_regs.o
 obj-$(CONFIG_TRACING)			+= tracepoint.o
-obj-$(CONFIG_IOSF_MBI)			+= iosf_mbi.o
-obj-$(CONFIG_PMC_ATOM)			+= pmc_atom.o
 
 ###
 # 64 bit specific files
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index e49ee24..ded848c 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -445,6 +445,7 @@
 		polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
 
 	mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+	acpi_penalize_sci_irq(bus_irq, trigger, polarity);
 
 	/*
 	 * stash over-ride to indicate we've been here
@@ -710,7 +711,7 @@
 #endif
 }
 
-static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
+int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu)
 {
 	int cpu;
 
@@ -726,12 +727,6 @@
 	*pcpu = cpu;
 	return 0;
 }
-
-/* wrapper to silence section mismatch warning */
-int __ref acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu)
-{
-	return _acpi_map_lsapic(handle, physid, pcpu);
-}
 EXPORT_SYMBOL(acpi_map_cpu);
 
 int acpi_unmap_cpu(int cpu)
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index ede92c3..222a570 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -263,7 +263,7 @@
 
 	/* Verify whether apbt counter works */
 	t1 = dw_apb_clocksource_read(clocksource_apbt);
-	rdtscll(start);
+	start = rdtsc();
 
 	/*
 	 * We don't know the TSC frequency yet, but waiting for
@@ -273,7 +273,7 @@
 	 */
 	do {
 		rep_nop();
-		rdtscll(now);
+		now = rdtsc();
 	} while ((now - start) < 200000UL);
 
 	/* APBT is the only always on clocksource, it has to work! */
@@ -390,13 +390,13 @@
 	old = dw_apb_clocksource_read(clocksource_apbt);
 	old += loop;
 
-	t1 = __native_read_tsc();
+	t1 = rdtsc();
 
 	do {
 		new = dw_apb_clocksource_read(clocksource_apbt);
 	} while (new < old);
 
-	t2 = __native_read_tsc();
+	t2 = rdtsc();
 
 	shift = 5;
 	if (unlikely(loop >> shift == 0)) {
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index dcb5285..3ca3e46 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -457,45 +457,45 @@
 {
 	u64 tsc;
 
-	rdtscll(tsc);
+	tsc = rdtsc();
 	wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
 	return 0;
 }
 
-/*
- * Setup the lapic timer in periodic or oneshot mode
- */
-static void lapic_timer_setup(enum clock_event_mode mode,
-			      struct clock_event_device *evt)
+static int lapic_timer_shutdown(struct clock_event_device *evt)
 {
-	unsigned long flags;
 	unsigned int v;
 
 	/* Lapic used as dummy for broadcast ? */
 	if (evt->features & CLOCK_EVT_FEAT_DUMMY)
-		return;
+		return 0;
 
-	local_irq_save(flags);
+	v = apic_read(APIC_LVTT);
+	v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
+	apic_write(APIC_LVTT, v);
+	apic_write(APIC_TMICT, 0);
+	return 0;
+}
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-	case CLOCK_EVT_MODE_ONESHOT:
-		__setup_APIC_LVTT(lapic_timer_frequency,
-				  mode != CLOCK_EVT_MODE_PERIODIC, 1);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		v = apic_read(APIC_LVTT);
-		v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
-		apic_write(APIC_LVTT, v);
-		apic_write(APIC_TMICT, 0);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		/* Nothing to do here */
-		break;
-	}
+static inline int
+lapic_timer_set_periodic_oneshot(struct clock_event_device *evt, bool oneshot)
+{
+	/* Lapic used as dummy for broadcast ? */
+	if (evt->features & CLOCK_EVT_FEAT_DUMMY)
+		return 0;
 
-	local_irq_restore(flags);
+	__setup_APIC_LVTT(lapic_timer_frequency, oneshot, 1);
+	return 0;
+}
+
+static int lapic_timer_set_periodic(struct clock_event_device *evt)
+{
+	return lapic_timer_set_periodic_oneshot(evt, false);
+}
+
+static int lapic_timer_set_oneshot(struct clock_event_device *evt)
+{
+	return lapic_timer_set_periodic_oneshot(evt, true);
 }
 
 /*
@@ -513,15 +513,18 @@
  * The local apic timer can be used for any function which is CPU local.
  */
 static struct clock_event_device lapic_clockevent = {
-	.name		= "lapic",
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
-			| CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
-	.shift		= 32,
-	.set_mode	= lapic_timer_setup,
-	.set_next_event	= lapic_next_event,
-	.broadcast	= lapic_timer_broadcast,
-	.rating		= 100,
-	.irq		= -1,
+	.name			= "lapic",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
+				  | CLOCK_EVT_FEAT_DUMMY,
+	.shift			= 32,
+	.set_state_shutdown	= lapic_timer_shutdown,
+	.set_state_periodic	= lapic_timer_set_periodic,
+	.set_state_oneshot	= lapic_timer_set_oneshot,
+	.set_next_event		= lapic_next_event,
+	.broadcast		= lapic_timer_broadcast,
+	.rating			= 100,
+	.irq			= -1,
 };
 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
 
@@ -592,7 +595,7 @@
 	unsigned long pm = acpi_pm_read_early();
 
 	if (cpu_has_tsc)
-		rdtscll(tsc);
+		tsc = rdtsc();
 
 	switch (lapic_cal_loops++) {
 	case 0:
@@ -778,7 +781,7 @@
 		 * Setup the apic timer manually
 		 */
 		levt->event_handler = lapic_cal_handler;
-		lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt);
+		lapic_timer_set_periodic(levt);
 		lapic_cal_loops = -1;
 
 		/* Let the interrupts run */
@@ -788,7 +791,8 @@
 			cpu_relax();
 
 		/* Stop the lapic timer */
-		lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);
+		local_irq_disable();
+		lapic_timer_shutdown(levt);
 
 		/* Jiffies delta */
 		deltaj = lapic_cal_j2 - lapic_cal_j1;
@@ -799,8 +803,8 @@
 			apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
 		else
 			levt->features |= CLOCK_EVT_FEAT_DUMMY;
-	} else
-		local_irq_enable();
+	}
+	local_irq_enable();
 
 	if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
 		pr_warning("APIC timer disabled due to verification failure\n");
@@ -878,7 +882,7 @@
 	if (!evt->event_handler) {
 		pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
 		/* Switch it off */
-		lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
+		lapic_timer_shutdown(evt);
 		return;
 	}
 
@@ -1209,7 +1213,7 @@
 	long long max_loops = cpu_khz ? cpu_khz : 1000000;
 
 	if (cpu_has_tsc)
-		rdtscll(tsc);
+		tsc = rdtsc();
 
 	if (disable_apic) {
 		disable_ioapic_support();
@@ -1293,7 +1297,7 @@
 		}
 		if (queued) {
 			if (cpu_has_tsc && cpu_khz) {
-				rdtscll(ntsc);
+				ntsc = rdtsc();
 				max_loops = (cpu_khz << 10) - (ntsc - tsc);
 			} else
 				max_loops--;
@@ -1424,7 +1428,7 @@
 {
 	u64 msr;
 
-	if (cpu_has_apic)
+	if (!cpu_has_apic)
 		return;
 
 	rdmsrl(MSR_IA32_APICBASE, msr);
@@ -1483,10 +1487,13 @@
 
 static __init void x2apic_disable(void)
 {
-	u32 x2apic_id;
+	u32 x2apic_id, state = x2apic_state;
 
-	if (x2apic_state != X2APIC_ON)
-		goto out;
+	x2apic_mode = 0;
+	x2apic_state = X2APIC_DISABLED;
+
+	if (state != X2APIC_ON)
+		return;
 
 	x2apic_id = read_apic_id();
 	if (x2apic_id >= 255)
@@ -1494,9 +1501,6 @@
 
 	__x2apic_disable();
 	register_lapic_address(mp_lapic_addr);
-out:
-	x2apic_state = X2APIC_DISABLED;
-	x2apic_mode = 0;
 }
 
 static __init void x2apic_enable(void)
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index de918c4..f92ab36 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -191,7 +191,6 @@
 	.send_IPI_all			= flat_send_IPI_all,
 	.send_IPI_self			= apic_send_IPI_self,
 
-	.wait_for_init_deassert		= false,
 	.inquire_remote_apic		= default_inquire_remote_apic,
 
 	.read				= native_apic_mem_read,
@@ -299,7 +298,6 @@
 	.send_IPI_all			= physflat_send_IPI_all,
 	.send_IPI_self			= apic_send_IPI_self,
 
-	.wait_for_init_deassert		= false,
 	.inquire_remote_apic		= default_inquire_remote_apic,
 
 	.read				= native_apic_mem_read,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index b205cdb..0d96749 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -152,7 +152,6 @@
 
 	.wakeup_secondary_cpu		= noop_wakeup_secondary_cpu,
 
-	.wait_for_init_deassert		= false,
 	.inquire_remote_apic		= NULL,
 
 	.read				= noop_apic_read,
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 017149c..b548fd3 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -92,7 +92,6 @@
 
 	write_lcsr(CSR_G3_EXT_IRQ_GEN, int_gen.v);
 
-	atomic_set(&init_deasserted, 1);
 	return 0;
 }
 
@@ -235,7 +234,6 @@
 	.send_IPI_self			= numachip_send_IPI_self,
 
 	.wakeup_secondary_cpu		= numachip_wakeup_secondary,
-	.wait_for_init_deassert		= false,
 	.inquire_remote_apic		= NULL, /* REMRD not supported */
 
 	.read				= native_apic_mem_read,
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index c4a8d63..971cf88 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -186,7 +186,6 @@
 	.send_IPI_all			= bigsmp_send_IPI_all,
 	.send_IPI_self			= default_send_IPI_self,
 
-	.wait_for_init_deassert		= true,
 	.inquire_remote_apic		= default_inquire_remote_apic,
 
 	.read				= native_apic_mem_read,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 206052e..38a76f8 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2541,7 +2541,7 @@
 		 * Honour affinities which have been set in early boot
 		 */
 		if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata))
-			mask = idata->affinity;
+			mask = irq_data_get_affinity_mask(idata);
 		else
 			mask = apic->target_cpus();
 
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index 1a9d735..5f1feb6 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -264,7 +264,7 @@
 
 static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
 {
-	hpet_msi_write(data->handler_data, msg);
+	hpet_msi_write(irq_data_get_irq_handler_data(data), msg);
 }
 
 static struct irq_chip hpet_msi_controller = {
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index bda4886..7694ae6 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -111,7 +111,6 @@
 	.send_IPI_all			= default_send_IPI_all,
 	.send_IPI_self			= default_send_IPI_self,
 
-	.wait_for_init_deassert		= true,
 	.inquire_remote_apic		= default_inquire_remote_apic,
 
 	.read				= native_apic_mem_read,
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 2683f36..1bbd0fe 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -169,8 +169,7 @@
 			goto next;
 
 		for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
-			if (per_cpu(vector_irq, new_cpu)[vector] >
-			    VECTOR_UNDEFINED)
+			if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
 				goto next;
 		}
 		/* Found one! */
@@ -182,7 +181,7 @@
 			   cpumask_intersects(d->old_domain, cpu_online_mask);
 		}
 		for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
-			per_cpu(vector_irq, new_cpu)[vector] = irq;
+			per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
 		d->cfg.vector = vector;
 		cpumask_copy(d->domain, vector_cpumask);
 		err = 0;
@@ -224,15 +223,16 @@
 
 static void clear_irq_vector(int irq, struct apic_chip_data *data)
 {
-	int cpu, vector;
+	struct irq_desc *desc;
 	unsigned long flags;
+	int cpu, vector;
 
 	raw_spin_lock_irqsave(&vector_lock, flags);
 	BUG_ON(!data->cfg.vector);
 
 	vector = data->cfg.vector;
 	for_each_cpu_and(cpu, data->domain, cpu_online_mask)
-		per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
+		per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
 
 	data->cfg.vector = 0;
 	cpumask_clear(data->domain);
@@ -242,12 +242,13 @@
 		return;
 	}
 
+	desc = irq_to_desc(irq);
 	for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
 		for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
 		     vector++) {
-			if (per_cpu(vector_irq, cpu)[vector] != irq)
+			if (per_cpu(vector_irq, cpu)[vector] != desc)
 				continue;
-			per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
+			per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
 			break;
 		}
 	}
@@ -296,7 +297,7 @@
 	struct irq_alloc_info *info = arg;
 	struct apic_chip_data *data;
 	struct irq_data *irq_data;
-	int i, err;
+	int i, err, node;
 
 	if (disable_apic)
 		return -ENXIO;
@@ -308,12 +309,13 @@
 	for (i = 0; i < nr_irqs; i++) {
 		irq_data = irq_domain_get_irq_data(domain, virq + i);
 		BUG_ON(!irq_data);
+		node = irq_data_get_node(irq_data);
 #ifdef	CONFIG_X86_IO_APIC
 		if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i])
 			data = legacy_irq_data[virq + i];
 		else
 #endif
-			data = alloc_apic_chip_data(irq_data->node);
+			data = alloc_apic_chip_data(node);
 		if (!data) {
 			err = -ENOMEM;
 			goto error;
@@ -322,8 +324,7 @@
 		irq_data->chip = &lapic_controller;
 		irq_data->chip_data = data;
 		irq_data->hwirq = virq + i;
-		err = assign_irq_vector_policy(virq + i, irq_data->node, data,
-					       info);
+		err = assign_irq_vector_policy(virq + i, node, data, info);
 		if (err)
 			goto error;
 	}
@@ -403,32 +404,32 @@
 	return arch_early_ioapic_init();
 }
 
+/* Initialize vector_irq on a new cpu */
 static void __setup_vector_irq(int cpu)
 {
-	/* Initialize vector_irq on a new cpu */
-	int irq, vector;
 	struct apic_chip_data *data;
+	struct irq_desc *desc;
+	int irq, vector;
 
 	/* Mark the inuse vectors */
-	for_each_active_irq(irq) {
-		data = apic_chip_data(irq_get_irq_data(irq));
-		if (!data)
-			continue;
+	for_each_irq_desc(irq, desc) {
+		struct irq_data *idata = irq_desc_get_irq_data(desc);
 
-		if (!cpumask_test_cpu(cpu, data->domain))
+		data = apic_chip_data(idata);
+		if (!data || !cpumask_test_cpu(cpu, data->domain))
 			continue;
 		vector = data->cfg.vector;
-		per_cpu(vector_irq, cpu)[vector] = irq;
+		per_cpu(vector_irq, cpu)[vector] = desc;
 	}
 	/* Mark the free vectors */
 	for (vector = 0; vector < NR_VECTORS; ++vector) {
-		irq = per_cpu(vector_irq, cpu)[vector];
-		if (irq <= VECTOR_UNDEFINED)
+		desc = per_cpu(vector_irq, cpu)[vector];
+		if (IS_ERR_OR_NULL(desc))
 			continue;
 
-		data = apic_chip_data(irq_get_irq_data(irq));
+		data = apic_chip_data(irq_desc_get_irq_data(desc));
 		if (!cpumask_test_cpu(cpu, data->domain))
-			per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
+			per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
 	}
 }
 
@@ -448,7 +449,7 @@
 	 * legacy vector to irq mapping:
 	 */
 	for (irq = 0; irq < nr_legacy_irqs(); irq++)
-		per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq;
+		per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);
 
 	__setup_vector_irq(cpu);
 }
@@ -490,7 +491,8 @@
 	if (err) {
 		struct irq_data *top = irq_get_irq_data(irq);
 
-		if (assign_irq_vector(irq, data, top->affinity))
+		if (assign_irq_vector(irq, data,
+				      irq_data_get_affinity_mask(top)))
 			pr_err("Failed to recover vector for irq %d\n", irq);
 		return err;
 	}
@@ -538,27 +540,30 @@
 
 	entering_ack_irq();
 
+	/* Prevent vectors vanishing under us */
+	raw_spin_lock(&vector_lock);
+
 	me = smp_processor_id();
 	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
-		int irq;
-		unsigned int irr;
-		struct irq_desc *desc;
 		struct apic_chip_data *data;
+		struct irq_desc *desc;
+		unsigned int irr;
 
-		irq = __this_cpu_read(vector_irq[vector]);
-
-		if (irq <= VECTOR_UNDEFINED)
+	retry:
+		desc = __this_cpu_read(vector_irq[vector]);
+		if (IS_ERR_OR_NULL(desc))
 			continue;
 
-		desc = irq_to_desc(irq);
-		if (!desc)
-			continue;
+		if (!raw_spin_trylock(&desc->lock)) {
+			raw_spin_unlock(&vector_lock);
+			cpu_relax();
+			raw_spin_lock(&vector_lock);
+			goto retry;
+		}
 
-		data = apic_chip_data(&desc->irq_data);
+		data = apic_chip_data(irq_desc_get_irq_data(desc));
 		if (!data)
-			continue;
-
-		raw_spin_lock(&desc->lock);
+			goto unlock;
 
 		/*
 		 * Check if the irq migration is in progress. If so, we
@@ -583,11 +588,13 @@
 			apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
 			goto unlock;
 		}
-		__this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
+		__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
 unlock:
 		raw_spin_unlock(&desc->lock);
 	}
 
+	raw_spin_unlock(&vector_lock);
+
 	exiting_irq();
 }
 
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index ab3219b..cc8311c 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -182,7 +182,7 @@
 	return notifier_from_errno(err);
 }
 
-static struct notifier_block __refdata x2apic_cpu_notifier = {
+static struct notifier_block x2apic_cpu_notifier = {
 	.notifier_call = update_clusterinfo,
 };
 
@@ -272,7 +272,6 @@
 	.send_IPI_all			= x2apic_send_IPI_all,
 	.send_IPI_self			= x2apic_send_IPI_self,
 
-	.wait_for_init_deassert		= false,
 	.inquire_remote_apic		= NULL,
 
 	.read				= native_apic_msr_read,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 3ffd925..662e915 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -128,7 +128,6 @@
 	.send_IPI_all			= x2apic_send_IPI_all,
 	.send_IPI_self			= x2apic_send_IPI_self,
 
-	.wait_for_init_deassert		= false,
 	.inquire_remote_apic		= NULL,
 
 	.read				= native_apic_msr_read,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index c8d9295..4a13946 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -248,7 +248,6 @@
 	    APIC_DM_STARTUP;
 	uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
 
-	atomic_set(&init_deasserted, 1);
 	return 0;
 }
 
@@ -414,7 +413,6 @@
 	.send_IPI_self			= uv_send_IPI_self,
 
 	.wakeup_secondary_cpu		= uv_wakeup_secondary,
-	.wait_for_init_deassert		= false,
 	.inquire_remote_apic		= NULL,
 
 	.read				= native_apic_msr_read,
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 927ec92..052c9c3 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -919,7 +919,7 @@
 	} else if (jiffies_since_last_check > idle_period) {
 		unsigned int idle_percentage;
 
-		idle_percentage = stime - last_stime;
+		idle_percentage = cputime_to_jiffies(stime - last_stime);
 		idle_percentage *= 100;
 		idle_percentage /= jiffies_since_last_check;
 		use_apm_idle = (idle_percentage > idle_threshold);
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 58118e2..145863d 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -1,4 +1,4 @@
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/kthread.h>
 #include <linux/workqueue.h>
@@ -163,6 +163,5 @@
 	schedule_delayed_work(&bios_check_work, 0);
 	return 0;
 }
-
-module_init(start_periodic_check_for_corruption);
+device_initcall(start_periodic_check_for_corruption);
 
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 9bff687..4eb065c 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -46,6 +46,8 @@
 					   perf_event_intel_uncore_snb.o \
 					   perf_event_intel_uncore_snbep.o \
 					   perf_event_intel_uncore_nhmex.o
+obj-$(CONFIG_CPU_SUP_INTEL)		+= perf_event_msr.o
+obj-$(CONFIG_CPU_SUP_AMD)		+= perf_event_msr.o
 endif
 
 
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index dd3a4ba..4a70fc6 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -11,6 +11,7 @@
 #include <asm/cpu.h>
 #include <asm/smp.h>
 #include <asm/pci-direct.h>
+#include <asm/delay.h>
 
 #ifdef CONFIG_X86_64
 # include <asm/mmconfig.h>
@@ -114,7 +115,7 @@
 		const int K6_BUG_LOOP = 1000000;
 		int n;
 		void (*f_vide)(void);
-		unsigned long d, d2;
+		u64 d, d2;
 
 		printk(KERN_INFO "AMD K6 stepping B detected - ");
 
@@ -125,10 +126,10 @@
 
 		n = K6_BUG_LOOP;
 		f_vide = vide;
-		rdtscl(d);
+		d = rdtsc();
 		while (n--)
 			f_vide();
-		rdtscl(d2);
+		d2 = rdtsc();
 		d = d2-d;
 
 		if (d > 20*K6_BUG_LOOP)
@@ -506,6 +507,9 @@
 		/* A random value per boot for bit slice [12:upper_bit) */
 		va_align.bits = get_random_int() & va_align.mask;
 	}
+
+	if (cpu_has(c, X86_FEATURE_MWAITX))
+		use_mwaitx_delay();
 }
 
 static void early_init_amd(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index cb9e5df..07ce52c 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -13,6 +13,7 @@
 #include <linux/kgdb.h>
 #include <linux/smp.h>
 #include <linux/io.h>
+#include <linux/syscore_ops.h>
 
 #include <asm/stackprotector.h>
 #include <asm/perf_event.h>
@@ -1185,10 +1186,10 @@
 	 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
 	 */
 	wrmsrl(MSR_STAR,  ((u64)__USER32_CS)<<48  | ((u64)__KERNEL_CS)<<32);
-	wrmsrl(MSR_LSTAR, entry_SYSCALL_64);
+	wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
 
 #ifdef CONFIG_IA32_EMULATION
-	wrmsrl(MSR_CSTAR, entry_SYSCALL_compat);
+	wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
 	/*
 	 * This only works on Intel CPUs.
 	 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
@@ -1199,7 +1200,7 @@
 	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
 	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
 #else
-	wrmsrl(MSR_CSTAR, ignore_sysret);
+	wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
 	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
 	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
 	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
@@ -1488,3 +1489,20 @@
 	return boot_cpu_has(bit);
 }
 EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
+
+static void bsp_resume(void)
+{
+	if (this_cpu->c_bsp_resume)
+		this_cpu->c_bsp_resume(&boot_cpu_data);
+}
+
+static struct syscore_ops cpu_syscore_ops = {
+	.resume		= bsp_resume,
+};
+
+static int __init init_cpu_syscore(void)
+{
+	register_syscore_ops(&cpu_syscore_ops);
+	return 0;
+}
+core_initcall(init_cpu_syscore);
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index c37dc37..2584265 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -13,6 +13,7 @@
 	void		(*c_init)(struct cpuinfo_x86 *);
 	void		(*c_identify)(struct cpuinfo_x86 *);
 	void		(*c_detect_tlb)(struct cpuinfo_x86 *);
+	void		(*c_bsp_resume)(struct cpuinfo_x86 *);
 	int		c_x86_vendor;
 #ifdef CONFIG_X86_32
 	/* Optional vendor specific routine to obtain the cache size. */
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 50163fa..98a13db 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -371,6 +371,36 @@
 	}
 }
 
+static void init_intel_energy_perf(struct cpuinfo_x86 *c)
+{
+	u64 epb;
+
+	/*
+	 * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized.
+	 * (x86_energy_perf_policy(8) is available to change it at run-time.)
+	 */
+	if (!cpu_has(c, X86_FEATURE_EPB))
+		return;
+
+	rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+	if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
+		return;
+
+	pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
+	pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
+	epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
+	wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
+}
+
+static void intel_bsp_resume(struct cpuinfo_x86 *c)
+{
+	/*
+	 * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume,
+	 * so reinitialize it properly like during bootup:
+	 */
+	init_intel_energy_perf(c);
+}
+
 static void init_intel(struct cpuinfo_x86 *c)
 {
 	unsigned int l2 = 0;
@@ -478,21 +508,7 @@
 	if (cpu_has(c, X86_FEATURE_VMX))
 		detect_vmx_virtcap(c);
 
-	/*
-	 * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not.
-	 * x86_energy_perf_policy(8) is available to change it at run-time
-	 */
-	if (cpu_has(c, X86_FEATURE_EPB)) {
-		u64 epb;
-
-		rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
-		if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
-			pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
-			pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
-			epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
-			wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
-		}
-	}
+	init_intel_energy_perf(c);
 }
 
 #ifdef CONFIG_X86_32
@@ -747,6 +763,7 @@
 	.c_detect_tlb	= intel_detect_tlb,
 	.c_early_init   = early_init_intel,
 	.c_init		= init_intel,
+	.c_bsp_resume	= intel_bsp_resume,
 	.c_x86_vendor	= X86_VENDOR_INTEL,
 };
 
diff --git a/arch/x86/kernel/cpu/intel_pt.h b/arch/x86/kernel/cpu/intel_pt.h
index 1c338b0..336878a 100644
--- a/arch/x86/kernel/cpu/intel_pt.h
+++ b/arch/x86/kernel/cpu/intel_pt.h
@@ -25,32 +25,11 @@
  */
 #define TOPA_PMI_MARGIN 512
 
-/*
- * Table of Physical Addresses bits
- */
-enum topa_sz {
-	TOPA_4K	= 0,
-	TOPA_8K,
-	TOPA_16K,
-	TOPA_32K,
-	TOPA_64K,
-	TOPA_128K,
-	TOPA_256K,
-	TOPA_512K,
-	TOPA_1MB,
-	TOPA_2MB,
-	TOPA_4MB,
-	TOPA_8MB,
-	TOPA_16MB,
-	TOPA_32MB,
-	TOPA_64MB,
-	TOPA_128MB,
-	TOPA_SZ_END,
-};
+#define TOPA_SHIFT 12
 
-static inline unsigned int sizes(enum topa_sz tsz)
+static inline unsigned int sizes(unsigned int tsz)
 {
-	return 1 << (tsz + 12);
+	return 1 << (tsz + TOPA_SHIFT);
 };
 
 struct topa_entry {
@@ -66,20 +45,26 @@
 	u64	rsvd4	: 16;
 };
 
-#define TOPA_SHIFT 12
-#define PT_CPUID_LEAVES 2
+#define PT_CPUID_LEAVES		2
+#define PT_CPUID_REGS_NUM	4 /* number of regsters (eax, ebx, ecx, edx) */
 
 enum pt_capabilities {
 	PT_CAP_max_subleaf = 0,
 	PT_CAP_cr3_filtering,
+	PT_CAP_psb_cyc,
+	PT_CAP_mtc,
 	PT_CAP_topa_output,
 	PT_CAP_topa_multiple_entries,
+	PT_CAP_single_range_output,
 	PT_CAP_payloads_lip,
+	PT_CAP_mtc_periods,
+	PT_CAP_cycle_thresholds,
+	PT_CAP_psb_periods,
 };
 
 struct pt_pmu {
 	struct pmu		pmu;
-	u32			caps[4 * PT_CPUID_LEAVES];
+	u32			caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
 };
 
 /**
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile
index bb34b03..a3311c8 100644
--- a/arch/x86/kernel/cpu/mcheck/Makefile
+++ b/arch/x86/kernel/cpu/mcheck/Makefile
@@ -1,4 +1,4 @@
-obj-y				=  mce.o mce-severity.o
+obj-y				=  mce.o mce-severity.o mce-genpool.o
 
 obj-$(CONFIG_X86_ANCIENT_MCE)	+= winchip.o p5.o
 obj-$(CONFIG_X86_MCE_INTEL)	+= mce_intel.o
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
index a1aef95..34c89a3 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -57,7 +57,6 @@
 
 	m.addr = mem_err->physical_addr;
 	mce_log(&m);
-	mce_notify_irq();
 }
 EXPORT_SYMBOL_GPL(apei_mce_report_mem_error);
 
diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
new file mode 100644
index 0000000..0a85010
--- /dev/null
+++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
@@ -0,0 +1,99 @@
+/*
+ * MCE event pool management in MCE context
+ *
+ * Copyright (C) 2015 Intel Corp.
+ * Author: Chen, Gong <gong.chen@linux.intel.com>
+ *
+ * This file is licensed under GPLv2.
+ */
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/genalloc.h>
+#include <linux/llist.h>
+#include "mce-internal.h"
+
+/*
+ * printk() is not safe in MCE context. This is a lock-less memory allocator
+ * used to save error information organized in a lock-less list.
+ *
+ * This memory pool is only to be used to save MCE records in MCE context.
+ * MCE events are rare, so a fixed size memory pool should be enough. Use
+ * 2 pages to save MCE events for now (~80 MCE records at most).
+ */
+#define MCE_POOLSZ	(2 * PAGE_SIZE)
+
+static struct gen_pool *mce_evt_pool;
+static LLIST_HEAD(mce_event_llist);
+static char gen_pool_buf[MCE_POOLSZ];
+
+void mce_gen_pool_process(void)
+{
+	struct llist_node *head;
+	struct mce_evt_llist *node;
+	struct mce *mce;
+
+	head = llist_del_all(&mce_event_llist);
+	if (!head)
+		return;
+
+	head = llist_reverse_order(head);
+	llist_for_each_entry(node, head, llnode) {
+		mce = &node->mce;
+		atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
+		gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
+	}
+}
+
+bool mce_gen_pool_empty(void)
+{
+	return llist_empty(&mce_event_llist);
+}
+
+int mce_gen_pool_add(struct mce *mce)
+{
+	struct mce_evt_llist *node;
+
+	if (!mce_evt_pool)
+		return -EINVAL;
+
+	node = (void *)gen_pool_alloc(mce_evt_pool, sizeof(*node));
+	if (!node) {
+		pr_warn_ratelimited("MCE records pool full!\n");
+		return -ENOMEM;
+	}
+
+	memcpy(&node->mce, mce, sizeof(*mce));
+	llist_add(&node->llnode, &mce_event_llist);
+
+	return 0;
+}
+
+static int mce_gen_pool_create(void)
+{
+	struct gen_pool *tmpp;
+	int ret = -ENOMEM;
+
+	tmpp = gen_pool_create(ilog2(sizeof(struct mce_evt_llist)), -1);
+	if (!tmpp)
+		goto out;
+
+	ret = gen_pool_add(tmpp, (unsigned long)gen_pool_buf, MCE_POOLSZ, -1);
+	if (ret) {
+		gen_pool_destroy(tmpp);
+		goto out;
+	}
+
+	mce_evt_pool = tmpp;
+
+out:
+	return ret;
+}
+
+int mce_gen_pool_init(void)
+{
+	/* Just init mce_gen_pool once. */
+	if (mce_evt_pool)
+		return 0;
+
+	return mce_gen_pool_create();
+}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index fe32074..547720e 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -13,6 +13,8 @@
 	MCE_PANIC_SEVERITY,
 };
 
+extern struct atomic_notifier_head x86_mce_decoder_chain;
+
 #define ATTR_LEN		16
 #define INITIAL_CHECK_INTERVAL	5 * 60 /* 5 minutes */
 
@@ -24,6 +26,16 @@
 	char			attrname[ATTR_LEN];	/* attribute name */
 };
 
+struct mce_evt_llist {
+	struct llist_node llnode;
+	struct mce mce;
+};
+
+void mce_gen_pool_process(void);
+bool mce_gen_pool_empty(void);
+int mce_gen_pool_add(struct mce *mce);
+int mce_gen_pool_init(void);
+
 extern int (*mce_severity)(struct mce *a, int tolerant, char **msg, bool is_excp);
 struct dentry *mce_get_debugfs_dir(void);
 
@@ -67,3 +79,5 @@
 	return -EINVAL;
 }
 #endif
+
+void mce_inject_log(struct mce *m);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index df919ff..9d014b82 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -52,11 +52,11 @@
 
 static DEFINE_MUTEX(mce_chrdev_read_mutex);
 
-#define rcu_dereference_check_mce(p) \
+#define mce_log_get_idx_check(p) \
 ({ \
-	rcu_lockdep_assert(rcu_read_lock_sched_held() || \
-			   lockdep_is_held(&mce_chrdev_read_mutex), \
-			   "suspicious rcu_dereference_check_mce() usage"); \
+	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+			 !lockdep_is_held(&mce_chrdev_read_mutex), \
+			 "suspicious mce_log_get_idx_check() usage"); \
 	smp_load_acquire(&(p)); \
 })
 
@@ -110,22 +110,24 @@
  */
 mce_banks_t mce_banks_ce_disabled;
 
-static DEFINE_PER_CPU(struct work_struct, mce_work);
+static struct work_struct mce_work;
+static struct irq_work mce_irq_work;
 
 static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
+static int mce_usable_address(struct mce *m);
 
 /*
  * CPU/chipset specific EDAC code can register a notifier call here to print
  * MCE errors in a human-readable form.
  */
-static ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
+ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
 
 /* Do initial initialization of a struct mce */
 void mce_setup(struct mce *m)
 {
 	memset(m, 0, sizeof(struct mce));
 	m->cpu = m->extcpu = smp_processor_id();
-	rdtscll(m->tsc);
+	m->tsc = rdtsc();
 	/* We hope get_seconds stays lockless */
 	m->time = get_seconds();
 	m->cpuvendor = boot_cpu_data.x86_vendor;
@@ -157,12 +159,13 @@
 	/* Emit the trace record: */
 	trace_mce_record(mce);
 
-	atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
+	if (!mce_gen_pool_add(mce))
+		irq_work_queue(&mce_irq_work);
 
 	mce->finished = 0;
 	wmb();
 	for (;;) {
-		entry = rcu_dereference_check_mce(mcelog.next);
+		entry = mce_log_get_idx_check(mcelog.next);
 		for (;;) {
 
 			/*
@@ -196,48 +199,23 @@
 	set_bit(0, &mce_need_notify);
 }
 
-static void drain_mcelog_buffer(void)
+void mce_inject_log(struct mce *m)
 {
-	unsigned int next, i, prev = 0;
-
-	next = ACCESS_ONCE(mcelog.next);
-
-	do {
-		struct mce *m;
-
-		/* drain what was logged during boot */
-		for (i = prev; i < next; i++) {
-			unsigned long start = jiffies;
-			unsigned retries = 1;
-
-			m = &mcelog.entry[i];
-
-			while (!m->finished) {
-				if (time_after_eq(jiffies, start + 2*retries))
-					retries++;
-
-				cpu_relax();
-
-				if (!m->finished && retries >= 4) {
-					pr_err("skipping error being logged currently!\n");
-					break;
-				}
-			}
-			smp_rmb();
-			atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
-		}
-
-		memset(mcelog.entry + prev, 0, (next - prev) * sizeof(*m));
-		prev = next;
-		next = cmpxchg(&mcelog.next, prev, 0);
-	} while (next != prev);
+	mutex_lock(&mce_chrdev_read_mutex);
+	mce_log(m);
+	mutex_unlock(&mce_chrdev_read_mutex);
 }
+EXPORT_SYMBOL_GPL(mce_inject_log);
 
+static struct notifier_block mce_srao_nb;
 
 void mce_register_decode_chain(struct notifier_block *nb)
 {
+	/* Ensure SRAO notifier has the highest priority in the decode chain. */
+	if (nb != &mce_srao_nb && nb->priority == INT_MAX)
+		nb->priority -= 1;
+
 	atomic_notifier_chain_register(&x86_mce_decoder_chain, nb);
-	drain_mcelog_buffer();
 }
 EXPORT_SYMBOL_GPL(mce_register_decode_chain);
 
@@ -461,61 +439,6 @@
 	}
 }
 
-/*
- * Simple lockless ring to communicate PFNs from the exception handler with the
- * process context work function. This is vastly simplified because there's
- * only a single reader and a single writer.
- */
-#define MCE_RING_SIZE 16	/* we use one entry less */
-
-struct mce_ring {
-	unsigned short start;
-	unsigned short end;
-	unsigned long ring[MCE_RING_SIZE];
-};
-static DEFINE_PER_CPU(struct mce_ring, mce_ring);
-
-/* Runs with CPU affinity in workqueue */
-static int mce_ring_empty(void)
-{
-	struct mce_ring *r = this_cpu_ptr(&mce_ring);
-
-	return r->start == r->end;
-}
-
-static int mce_ring_get(unsigned long *pfn)
-{
-	struct mce_ring *r;
-	int ret = 0;
-
-	*pfn = 0;
-	get_cpu();
-	r = this_cpu_ptr(&mce_ring);
-	if (r->start == r->end)
-		goto out;
-	*pfn = r->ring[r->start];
-	r->start = (r->start + 1) % MCE_RING_SIZE;
-	ret = 1;
-out:
-	put_cpu();
-	return ret;
-}
-
-/* Always runs in MCE context with preempt off */
-static int mce_ring_add(unsigned long pfn)
-{
-	struct mce_ring *r = this_cpu_ptr(&mce_ring);
-	unsigned next;
-
-	next = (r->end + 1) % MCE_RING_SIZE;
-	if (next == r->start)
-		return -1;
-	r->ring[r->end] = pfn;
-	wmb();
-	r->end = next;
-	return 0;
-}
-
 int mce_available(struct cpuinfo_x86 *c)
 {
 	if (mca_cfg.disabled)
@@ -525,12 +448,10 @@
 
 static void mce_schedule_work(void)
 {
-	if (!mce_ring_empty())
-		schedule_work(this_cpu_ptr(&mce_work));
+	if (!mce_gen_pool_empty() && keventd_up())
+		schedule_work(&mce_work);
 }
 
-static DEFINE_PER_CPU(struct irq_work, mce_irq_work);
-
 static void mce_irq_work_cb(struct irq_work *entry)
 {
 	mce_notify_irq();
@@ -551,9 +472,30 @@
 		return;
 	}
 
-	irq_work_queue(this_cpu_ptr(&mce_irq_work));
+	irq_work_queue(&mce_irq_work);
 }
 
+static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
+				void *data)
+{
+	struct mce *mce = (struct mce *)data;
+	unsigned long pfn;
+
+	if (!mce)
+		return NOTIFY_DONE;
+
+	if (mce->usable_addr && (mce->severity == MCE_AO_SEVERITY)) {
+		pfn = mce->addr >> PAGE_SHIFT;
+		memory_failure(pfn, MCE_VECTOR, 0);
+	}
+
+	return NOTIFY_OK;
+}
+static struct notifier_block mce_srao_nb = {
+	.notifier_call	= srao_decode_notifier,
+	.priority = INT_MAX,
+};
+
 /*
  * Read ADDR and MISC registers.
  */
@@ -672,8 +614,11 @@
 		 */
 		if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) {
 			if (m.status & MCI_STATUS_ADDRV) {
-				mce_ring_add(m.addr >> PAGE_SHIFT);
-				mce_schedule_work();
+				m.severity = severity;
+				m.usable_addr = mce_usable_address(&m);
+
+				if (!mce_gen_pool_add(&m))
+					mce_schedule_work();
 			}
 		}
 
@@ -1029,7 +974,6 @@
 {
 	struct mca_config *cfg = &mca_cfg;
 	struct mce m, *final;
-	enum ctx_state prev_state;
 	int i;
 	int worst = 0;
 	int severity;
@@ -1055,7 +999,7 @@
 	int flags = MF_ACTION_REQUIRED;
 	int lmce = 0;
 
-	prev_state = ist_enter(regs);
+	ist_enter(regs);
 
 	this_cpu_inc(mce_exception_count);
 
@@ -1143,15 +1087,9 @@
 
 		mce_read_aux(&m, i);
 
-		/*
-		 * Action optional error. Queue address for later processing.
-		 * When the ring overflows we just ignore the AO error.
-		 * RED-PEN add some logging mechanism when
-		 * usable_address or mce_add_ring fails.
-		 * RED-PEN don't ignore overflow for mca_cfg.tolerant == 0
-		 */
-		if (severity == MCE_AO_SEVERITY && mce_usable_address(&m))
-			mce_ring_add(m.addr >> PAGE_SHIFT);
+		/* assuming valid severity level != 0 */
+		m.severity = severity;
+		m.usable_addr = mce_usable_address(&m);
 
 		mce_log(&m);
 
@@ -1227,7 +1165,7 @@
 	local_irq_disable();
 	ist_end_non_atomic();
 done:
-	ist_exit(regs, prev_state);
+	ist_exit(regs);
 }
 EXPORT_SYMBOL_GPL(do_machine_check);
 
@@ -1247,14 +1185,11 @@
 /*
  * Action optional processing happens here (picking up
  * from the list of faulting pages that do_machine_check()
- * placed into the "ring").
+ * placed into the genpool).
  */
 static void mce_process_work(struct work_struct *dummy)
 {
-	unsigned long pfn;
-
-	while (mce_ring_get(&pfn))
-		memory_failure(pfn, MCE_VECTOR, 0);
+	mce_gen_pool_process();
 }
 
 #ifdef CONFIG_X86_MCE_INTEL
@@ -1678,6 +1613,17 @@
 	}
 }
 
+static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
+{
+	switch (c->x86_vendor) {
+	case X86_VENDOR_INTEL:
+		mce_intel_feature_clear(c);
+		break;
+	default:
+		break;
+	}
+}
+
 static void mce_start_timer(unsigned int cpu, struct timer_list *t)
 {
 	unsigned long iv = check_interval * HZ;
@@ -1731,13 +1677,36 @@
 		return;
 	}
 
+	if (mce_gen_pool_init()) {
+		mca_cfg.disabled = true;
+		pr_emerg("Couldn't allocate MCE records pool!\n");
+		return;
+	}
+
 	machine_check_vector = do_machine_check;
 
 	__mcheck_cpu_init_generic();
 	__mcheck_cpu_init_vendor(c);
 	__mcheck_cpu_init_timer();
-	INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work);
-	init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb);
+}
+
+/*
+ * Called for each booted CPU to clear some machine checks opt-ins
+ */
+void mcheck_cpu_clear(struct cpuinfo_x86 *c)
+{
+	if (mca_cfg.disabled)
+		return;
+
+	if (!mce_available(c))
+		return;
+
+	/*
+	 * Possibly to clear general settings generic to x86
+	 * __mcheck_cpu_clear_generic(c);
+	 */
+	__mcheck_cpu_clear_vendor(c);
+
 }
 
 /*
@@ -1784,7 +1753,7 @@
 {
 	unsigned long *cpu_tsc = (unsigned long *)data;
 
-	rdtscll(cpu_tsc[smp_processor_id()]);
+	cpu_tsc[smp_processor_id()] = rdtsc();
 }
 
 static int mce_apei_read_done;
@@ -1850,7 +1819,7 @@
 			goto out;
 	}
 
-	next = rcu_dereference_check_mce(mcelog.next);
+	next = mce_log_get_idx_check(mcelog.next);
 
 	/* Only supports full reads right now */
 	err = -EINVAL;
@@ -2056,8 +2025,12 @@
 int __init mcheck_init(void)
 {
 	mcheck_intel_therm_init();
+	mce_register_decode_chain(&mce_srao_nb);
 	mcheck_vendor_init_severity();
 
+	INIT_WORK(&mce_work, mce_process_work);
+	init_irq_work(&mce_irq_work, mce_irq_work_cb);
+
 	return 0;
 }
 
@@ -2591,5 +2564,20 @@
 
 	return 0;
 }
-late_initcall(mcheck_debugfs_init);
+#else
+static int __init mcheck_debugfs_init(void) { return -EINVAL; }
 #endif
+
+static int __init mcheck_late_init(void)
+{
+	mcheck_debugfs_init();
+
+	/*
+	 * Flush out everything that has been logged during early boot, now that
+	 * everything has been initialized (workqueues, decoders, ...).
+	 */
+	mce_schedule_work();
+
+	return 0;
+}
+late_initcall(mcheck_late_init);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 844f56c..1e8bb6c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -146,6 +146,27 @@
 	per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
 }
 
+static void cmci_toggle_interrupt_mode(bool on)
+{
+	unsigned long flags, *owned;
+	int bank;
+	u64 val;
+
+	raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+	owned = this_cpu_ptr(mce_banks_owned);
+	for_each_set_bit(bank, owned, MAX_NR_BANKS) {
+		rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+
+		if (on)
+			val |= MCI_CTL2_CMCI_EN;
+		else
+			val &= ~MCI_CTL2_CMCI_EN;
+
+		wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
+	}
+	raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+}
+
 unsigned long cmci_intel_adjust_timer(unsigned long interval)
 {
 	if ((this_cpu_read(cmci_backoff_cnt) > 0) &&
@@ -175,7 +196,7 @@
 		 */
 		if (!atomic_read(&cmci_storm_on_cpus)) {
 			__this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
-			cmci_reenable();
+			cmci_toggle_interrupt_mode(true);
 			cmci_recheck();
 		}
 		return CMCI_POLL_INTERVAL;
@@ -186,22 +207,6 @@
 	}
 }
 
-static void cmci_storm_disable_banks(void)
-{
-	unsigned long flags, *owned;
-	int bank;
-	u64 val;
-
-	raw_spin_lock_irqsave(&cmci_discover_lock, flags);
-	owned = this_cpu_ptr(mce_banks_owned);
-	for_each_set_bit(bank, owned, MAX_NR_BANKS) {
-		rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
-		val &= ~MCI_CTL2_CMCI_EN;
-		wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
-	}
-	raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
-}
-
 static bool cmci_storm_detect(void)
 {
 	unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
@@ -223,7 +228,7 @@
 	if (cnt <= CMCI_STORM_THRESHOLD)
 		return false;
 
-	cmci_storm_disable_banks();
+	cmci_toggle_interrupt_mode(false);
 	__this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
 	r = atomic_add_return(1, &cmci_storm_on_cpus);
 	mce_timer_kick(CMCI_STORM_INTERVAL);
@@ -246,7 +251,6 @@
 		return;
 
 	machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
-	mce_notify_irq();
 }
 
 /*
@@ -435,7 +439,7 @@
 	cmci_recheck();
 }
 
-void intel_init_lmce(void)
+static void intel_init_lmce(void)
 {
 	u64 val;
 
@@ -448,9 +452,26 @@
 		wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
 }
 
+static void intel_clear_lmce(void)
+{
+	u64 val;
+
+	if (!lmce_supported())
+		return;
+
+	rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
+	val &= ~MCG_EXT_CTL_LMCE_EN;
+	wrmsrl(MSR_IA32_MCG_EXT_CTL, val);
+}
+
 void mce_intel_feature_init(struct cpuinfo_x86 *c)
 {
 	intel_init_thermal(c);
 	intel_init_cmci();
 	intel_init_lmce();
 }
+
+void mce_intel_feature_clear(struct cpuinfo_x86 *c)
+{
+	intel_clear_lmce();
+}
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index 737b0ad..12402e1 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -19,10 +19,9 @@
 /* Machine check handler for Pentium class Intel CPUs: */
 static void pentium_machine_check(struct pt_regs *regs, long error_code)
 {
-	enum ctx_state prev_state;
 	u32 loaddr, hi, lotype;
 
-	prev_state = ist_enter(regs);
+	ist_enter(regs);
 
 	rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
 	rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi);
@@ -39,7 +38,7 @@
 
 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
 
-	ist_exit(regs, prev_state);
+	ist_exit(regs);
 }
 
 /* Set up machine check reporting for processors with Intel style MCE: */
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index 44f1382..01dd870 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -15,12 +15,12 @@
 /* Machine check handler for WinChip C6: */
 static void winchip_machine_check(struct pt_regs *regs, long error_code)
 {
-	enum ctx_state prev_state = ist_enter(regs);
+	ist_enter(regs);
 
 	printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
 	add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
 
-	ist_exit(regs, prev_state);
+	ist_exit(regs);
 }
 
 /* Set up machine check reporting on the Winchip C6 series */
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 6236a54..9e3f3c7 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -377,17 +377,16 @@
 	return err;
 }
 
-static int mc_device_remove(struct device *dev, struct subsys_interface *sif)
+static void mc_device_remove(struct device *dev, struct subsys_interface *sif)
 {
 	int cpu = dev->id;
 
 	if (!cpu_online(cpu))
-		return 0;
+		return;
 
 	pr_debug("CPU%d removed\n", cpu);
 	microcode_fini_cpu(cpu);
 	sysfs_remove_group(&dev->kobj, &mc_attr_group);
-	return 0;
 }
 
 static struct subsys_interface mc_cpu_interface = {
@@ -460,7 +459,7 @@
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata mc_cpu_notifier = {
+static struct notifier_block mc_cpu_notifier = {
 	.notifier_call	= mc_cpu_callback,
 };
 
diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
index 8187b72..37ea89c 100644
--- a/arch/x86/kernel/cpu/microcode/intel_early.c
+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
@@ -390,7 +390,7 @@
 }
 
 #ifdef DEBUG
-static void __ref show_saved_mc(void)
+static void show_saved_mc(void)
 {
 	int i, j;
 	unsigned int sig, pf, rev, total_size, data_size, date;
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index aad4bd8..381c8b9 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -18,6 +18,7 @@
 #include <linux/efi.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/kexec.h>
 #include <asm/processor.h>
 #include <asm/hypervisor.h>
 #include <asm/hyperv.h>
@@ -28,10 +29,14 @@
 #include <asm/i8259.h>
 #include <asm/apic.h>
 #include <asm/timer.h>
+#include <asm/reboot.h>
 
 struct ms_hyperv_info ms_hyperv;
 EXPORT_SYMBOL_GPL(ms_hyperv);
 
+static void (*hv_kexec_handler)(void);
+static void (*hv_crash_handler)(struct pt_regs *regs);
+
 #if IS_ENABLED(CONFIG_HYPERV)
 static void (*vmbus_handler)(void);
 
@@ -67,8 +72,47 @@
 }
 EXPORT_SYMBOL_GPL(hv_setup_vmbus_irq);
 EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq);
+
+void hv_setup_kexec_handler(void (*handler)(void))
+{
+	hv_kexec_handler = handler;
+}
+EXPORT_SYMBOL_GPL(hv_setup_kexec_handler);
+
+void hv_remove_kexec_handler(void)
+{
+	hv_kexec_handler = NULL;
+}
+EXPORT_SYMBOL_GPL(hv_remove_kexec_handler);
+
+void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs))
+{
+	hv_crash_handler = handler;
+}
+EXPORT_SYMBOL_GPL(hv_setup_crash_handler);
+
+void hv_remove_crash_handler(void)
+{
+	hv_crash_handler = NULL;
+}
+EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
 #endif
 
+static void hv_machine_shutdown(void)
+{
+	if (kexec_in_progress && hv_kexec_handler)
+		hv_kexec_handler();
+	native_machine_shutdown();
+}
+
+static void hv_machine_crash_shutdown(struct pt_regs *regs)
+{
+	if (hv_crash_handler)
+		hv_crash_handler(regs);
+	native_machine_crash_shutdown(regs);
+}
+
+
 static uint32_t  __init ms_hyperv_platform(void)
 {
 	u32 eax;
@@ -114,6 +158,7 @@
 	 * Extract the features and hints
 	 */
 	ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
+	ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
 	ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
 
 	printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
@@ -141,6 +186,9 @@
 	no_timer_check = 1;
 #endif
 
+	machine_ops.shutdown = hv_machine_shutdown;
+	machine_ops.crash_shutdown = hv_machine_crash_shutdown;
+	mark_tsc_unstable("running on Hyper-V");
 }
 
 const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index e7ed0d8..f891b47 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -448,7 +448,6 @@
 	return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
 			     increment);
 }
-EXPORT_SYMBOL(mtrr_add);
 
 /**
  * mtrr_del_page - delete a memory type region
@@ -537,7 +536,6 @@
 		return -EINVAL;
 	return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
 }
-EXPORT_SYMBOL(mtrr_del);
 
 /**
  * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 9469dfa..66dd3fe9 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1551,7 +1551,7 @@
 }
 
 /* Merge two pointer arrays */
-static __init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
+__init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
 {
 	struct attribute **new;
 	int j, i;
@@ -2179,6 +2179,7 @@
 	int idx = segment >> 3;
 
 	if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
 		struct ldt_struct *ldt;
 
 		if (idx > LDT_ENTRIES)
@@ -2190,6 +2191,9 @@
 			return 0;
 
 		desc = &ldt->entries[idx];
+#else
+		return 0;
+#endif
 	} else {
 		if (idx > GDT_ENTRIES)
 			return 0;
@@ -2200,7 +2204,7 @@
 	return get_desc_base(desc);
 }
 
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_IA32_EMULATION
 
 #include <asm/compat.h>
 
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 3e7fd27..5edf6d8 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -165,7 +165,7 @@
 	unsigned	core_id;	/* per-core: core id */
 };
 
-#define MAX_LBR_ENTRIES		16
+#define MAX_LBR_ENTRIES		32
 
 enum {
 	X86_PERF_KFREE_SHARED = 0,
@@ -594,6 +594,7 @@
 	struct event_constraint *pebs_constraints;
 	void		(*pebs_aliases)(struct perf_event *event);
 	int 		max_pebs_events;
+	unsigned long	free_running_flags;
 
 	/*
 	 * Intel LBR
@@ -624,6 +625,7 @@
 struct x86_perf_task_context {
 	u64 lbr_from[MAX_LBR_ENTRIES];
 	u64 lbr_to[MAX_LBR_ENTRIES];
+	u64 lbr_info[MAX_LBR_ENTRIES];
 	int lbr_callstack_users;
 	int lbr_stack_state;
 };
@@ -793,6 +795,8 @@
 ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event);
 ssize_t intel_event_sysfs_show(char *page, u64 config);
 
+struct attribute **merge_attr(struct attribute **a, struct attribute **b);
+
 #ifdef CONFIG_CPU_SUP_AMD
 
 int amd_pmu_init(void);
@@ -808,20 +812,6 @@
 
 #ifdef CONFIG_CPU_SUP_INTEL
 
-static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
-{
-	/* user explicitly requested branch sampling */
-	if (has_branch_stack(event))
-		return true;
-
-	/* implicit branch sampling to correct PEBS skid */
-	if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
-	    x86_pmu.intel_cap.pebs_format < 2)
-		return true;
-
-	return false;
-}
-
 static inline bool intel_pmu_has_bts(struct perf_event *event)
 {
 	if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
@@ -873,6 +863,8 @@
 
 extern struct event_constraint intel_hsw_pebs_event_constraints[];
 
+extern struct event_constraint intel_skl_pebs_event_constraints[];
+
 struct event_constraint *intel_pebs_constraints(struct perf_event *event);
 
 void intel_pmu_pebs_enable(struct perf_event *event);
@@ -911,6 +903,8 @@
 
 void intel_pmu_lbr_init_hsw(void);
 
+void intel_pmu_lbr_init_skl(void);
+
 int intel_pmu_setup_lbr_filter(struct perf_event *event);
 
 void intel_pt_interrupt(void);
@@ -934,6 +928,7 @@
 {
 	return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);
 }
+
 #else /* CONFIG_CPU_SUP_INTEL */
 
 static inline void reserve_ds_buffers(void)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 6326ae2..3f124d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -177,6 +177,14 @@
 	EVENT_CONSTRAINT_END
 };
 
+struct event_constraint intel_skl_event_constraints[] = {
+	FIXED_EVENT_CONSTRAINT(0x00c0, 0),	/* INST_RETIRED.ANY */
+	FIXED_EVENT_CONSTRAINT(0x003c, 1),	/* CPU_CLK_UNHALTED.CORE */
+	FIXED_EVENT_CONSTRAINT(0x0300, 2),	/* CPU_CLK_UNHALTED.REF */
+	INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),	/* INST_RETIRED.PREC_DIST */
+	EVENT_CONSTRAINT_END
+};
+
 static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
@@ -193,6 +201,13 @@
 	EVENT_EXTRA_END
 };
 
+static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
+	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
+	INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
+	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+	EVENT_EXTRA_END
+};
+
 EVENT_ATTR_STR(mem-loads,	mem_ld_nhm,	"event=0x0b,umask=0x10,ldlat=3");
 EVENT_ATTR_STR(mem-loads,	mem_ld_snb,	"event=0xcd,umask=0x1,ldlat=3");
 EVENT_ATTR_STR(mem-stores,	mem_st_snb,	"event=0xcd,umask=0x2");
@@ -244,6 +259,200 @@
 	return intel_perfmon_event_map[hw_event];
 }
 
+/*
+ * Notes on the events:
+ * - data reads do not include code reads (comparable to earlier tables)
+ * - data counts include speculative execution (except L1 write, dtlb, bpu)
+ * - remote node access includes remote memory, remote cache, remote mmio.
+ * - prefetches are not included in the counts.
+ * - icache miss does not include decoded icache
+ */
+
+#define SKL_DEMAND_DATA_RD		BIT_ULL(0)
+#define SKL_DEMAND_RFO			BIT_ULL(1)
+#define SKL_ANY_RESPONSE		BIT_ULL(16)
+#define SKL_SUPPLIER_NONE		BIT_ULL(17)
+#define SKL_L3_MISS_LOCAL_DRAM		BIT_ULL(26)
+#define SKL_L3_MISS_REMOTE_HOP0_DRAM	BIT_ULL(27)
+#define SKL_L3_MISS_REMOTE_HOP1_DRAM	BIT_ULL(28)
+#define SKL_L3_MISS_REMOTE_HOP2P_DRAM	BIT_ULL(29)
+#define SKL_L3_MISS			(SKL_L3_MISS_LOCAL_DRAM| \
+					 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
+					 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
+					 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
+#define SKL_SPL_HIT			BIT_ULL(30)
+#define SKL_SNOOP_NONE			BIT_ULL(31)
+#define SKL_SNOOP_NOT_NEEDED		BIT_ULL(32)
+#define SKL_SNOOP_MISS			BIT_ULL(33)
+#define SKL_SNOOP_HIT_NO_FWD		BIT_ULL(34)
+#define SKL_SNOOP_HIT_WITH_FWD		BIT_ULL(35)
+#define SKL_SNOOP_HITM			BIT_ULL(36)
+#define SKL_SNOOP_NON_DRAM		BIT_ULL(37)
+#define SKL_ANY_SNOOP			(SKL_SPL_HIT|SKL_SNOOP_NONE| \
+					 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
+					 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
+					 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
+#define SKL_DEMAND_READ			SKL_DEMAND_DATA_RD
+#define SKL_SNOOP_DRAM			(SKL_SNOOP_NONE| \
+					 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
+					 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
+					 SKL_SNOOP_HITM|SKL_SPL_HIT)
+#define SKL_DEMAND_WRITE		SKL_DEMAND_RFO
+#define SKL_LLC_ACCESS			SKL_ANY_RESPONSE
+#define SKL_L3_MISS_REMOTE		(SKL_L3_MISS_REMOTE_HOP0_DRAM| \
+					 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
+					 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
+
+static __initconst const u64 skl_hw_cache_event_ids
+				[PERF_COUNT_HW_CACHE_MAX]
+				[PERF_COUNT_HW_CACHE_OP_MAX]
+				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D ) ] = {
+	[ C(OP_READ) ] = {
+		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */
+		[ C(RESULT_MISS)   ] = 0x151,	/* L1D.REPLACEMENT */
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */
+		[ C(RESULT_MISS)   ] = 0x0,
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = 0x0,
+		[ C(RESULT_MISS)   ] = 0x0,
+	},
+ },
+ [ C(L1I ) ] = {
+	[ C(OP_READ) ] = {
+		[ C(RESULT_ACCESS) ] = 0x0,
+		[ C(RESULT_MISS)   ] = 0x283,	/* ICACHE_64B.MISS */
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = -1,
+		[ C(RESULT_MISS)   ] = -1,
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = 0x0,
+		[ C(RESULT_MISS)   ] = 0x0,
+	},
+ },
+ [ C(LL  ) ] = {
+	[ C(OP_READ) ] = {
+		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
+		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
+		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = 0x0,
+		[ C(RESULT_MISS)   ] = 0x0,
+	},
+ },
+ [ C(DTLB) ] = {
+	[ C(OP_READ) ] = {
+		[ C(RESULT_ACCESS) ] = 0x81d0,	/* MEM_INST_RETIRED.ALL_LOADS */
+		[ C(RESULT_MISS)   ] = 0x608,	/* DTLB_LOAD_MISSES.WALK_COMPLETED */
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = 0x82d0,	/* MEM_INST_RETIRED.ALL_STORES */
+		[ C(RESULT_MISS)   ] = 0x649,	/* DTLB_STORE_MISSES.WALK_COMPLETED */
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = 0x0,
+		[ C(RESULT_MISS)   ] = 0x0,
+	},
+ },
+ [ C(ITLB) ] = {
+	[ C(OP_READ) ] = {
+		[ C(RESULT_ACCESS) ] = 0x2085,	/* ITLB_MISSES.STLB_HIT */
+		[ C(RESULT_MISS)   ] = 0xe85,	/* ITLB_MISSES.WALK_COMPLETED */
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = -1,
+		[ C(RESULT_MISS)   ] = -1,
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = -1,
+		[ C(RESULT_MISS)   ] = -1,
+	},
+ },
+ [ C(BPU ) ] = {
+	[ C(OP_READ) ] = {
+		[ C(RESULT_ACCESS) ] = 0xc4,	/* BR_INST_RETIRED.ALL_BRANCHES */
+		[ C(RESULT_MISS)   ] = 0xc5,	/* BR_MISP_RETIRED.ALL_BRANCHES */
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = -1,
+		[ C(RESULT_MISS)   ] = -1,
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = -1,
+		[ C(RESULT_MISS)   ] = -1,
+	},
+ },
+ [ C(NODE) ] = {
+	[ C(OP_READ) ] = {
+		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
+		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = 0x1b7,	/* OFFCORE_RESPONSE */
+		[ C(RESULT_MISS)   ] = 0x1b7,	/* OFFCORE_RESPONSE */
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = 0x0,
+		[ C(RESULT_MISS)   ] = 0x0,
+	},
+ },
+};
+
+static __initconst const u64 skl_hw_cache_extra_regs
+				[PERF_COUNT_HW_CACHE_MAX]
+				[PERF_COUNT_HW_CACHE_OP_MAX]
+				[PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(LL  ) ] = {
+	[ C(OP_READ) ] = {
+		[ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
+				       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
+		[ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
+				       SKL_L3_MISS|SKL_ANY_SNOOP|
+				       SKL_SUPPLIER_NONE,
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
+				       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
+		[ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
+				       SKL_L3_MISS|SKL_ANY_SNOOP|
+				       SKL_SUPPLIER_NONE,
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = 0x0,
+		[ C(RESULT_MISS)   ] = 0x0,
+	},
+ },
+ [ C(NODE) ] = {
+	[ C(OP_READ) ] = {
+		[ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
+				       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
+		[ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
+				       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
+	},
+	[ C(OP_WRITE) ] = {
+		[ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
+				       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
+		[ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
+				       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
+	},
+	[ C(OP_PREFETCH) ] = {
+		[ C(RESULT_ACCESS) ] = 0x0,
+		[ C(RESULT_MISS)   ] = 0x0,
+	},
+ },
+};
+
 #define SNB_DMND_DATA_RD	(1ULL << 0)
 #define SNB_DMND_RFO		(1ULL << 1)
 #define SNB_DMND_IFETCH		(1ULL << 2)
@@ -1114,7 +1323,7 @@
 {
 	/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 	INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
-	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffffull, RSP_1),
+	INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
 	EVENT_EXTRA_END
 };
 
@@ -1594,6 +1803,7 @@
 
 	loops = 0;
 again:
+	intel_pmu_lbr_read();
 	intel_pmu_ack_status(status);
 	if (++loops > 100) {
 		static bool warned = false;
@@ -1608,16 +1818,16 @@
 
 	inc_irq_stat(apic_perf_irqs);
 
-	intel_pmu_lbr_read();
 
 	/*
-	 * CondChgd bit 63 doesn't mean any overflow status. Ignore
-	 * and clear the bit.
+	 * Ignore a range of extra bits in status that do not indicate
+	 * overflow by themselves.
 	 */
-	if (__test_and_clear_bit(63, (unsigned long *)&status)) {
-		if (!status)
-			goto done;
-	}
+	status &= ~(GLOBAL_STATUS_COND_CHG |
+		    GLOBAL_STATUS_ASIF |
+		    GLOBAL_STATUS_LBRS_FROZEN);
+	if (!status)
+		goto done;
 
 	/*
 	 * PEBS overflow sets bit 62 in the global status register
@@ -1699,18 +1909,22 @@
 	return NULL;
 }
 
-static int intel_alt_er(int idx)
+static int intel_alt_er(int idx, u64 config)
 {
+	int alt_idx;
 	if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
 		return idx;
 
 	if (idx == EXTRA_REG_RSP_0)
-		return EXTRA_REG_RSP_1;
+		alt_idx = EXTRA_REG_RSP_1;
 
 	if (idx == EXTRA_REG_RSP_1)
-		return EXTRA_REG_RSP_0;
+		alt_idx = EXTRA_REG_RSP_0;
 
-	return idx;
+	if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
+		return idx;
+
+	return alt_idx;
 }
 
 static void intel_fixup_er(struct perf_event *event, int idx)
@@ -1799,7 +2013,7 @@
 		 */
 		c = NULL;
 	} else {
-		idx = intel_alt_er(idx);
+		idx = intel_alt_er(idx, reg->config);
 		if (idx != reg->idx) {
 			raw_spin_unlock_irqrestore(&era->lock, flags);
 			goto again;
@@ -2253,6 +2467,15 @@
 	}
 }
 
+static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
+{
+	unsigned long flags = x86_pmu.free_running_flags;
+
+	if (event->attr.use_clockid)
+		flags &= ~PERF_SAMPLE_TIME;
+	return flags;
+}
+
 static int intel_pmu_hw_config(struct perf_event *event)
 {
 	int ret = x86_pmu_hw_config(event);
@@ -2263,7 +2486,8 @@
 	if (event->attr.precise_ip) {
 		if (!event->attr.freq) {
 			event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
-			if (!(event->attr.sample_type & ~PEBS_FREERUNNING_FLAGS))
+			if (!(event->attr.sample_type &
+			      ~intel_pmu_free_running_flags(event)))
 				event->hw.flags |= PERF_X86_EVENT_FREERUNNING;
 		}
 		if (x86_pmu.pebs_aliases)
@@ -2694,6 +2918,8 @@
 	.event_map		= intel_pmu_event_map,
 	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
 	.apic			= 1,
+	.free_running_flags	= PEBS_FREERUNNING_FLAGS,
+
 	/*
 	 * Intel PMCs cannot be accessed sanely above 32-bit width,
 	 * so we install an artificial 1<<31 period regardless of
@@ -2732,6 +2958,7 @@
 	.event_map		= intel_pmu_event_map,
 	.max_events		= ARRAY_SIZE(intel_perfmon_event_map),
 	.apic			= 1,
+	.free_running_flags	= PEBS_FREERUNNING_FLAGS,
 	/*
 	 * Intel PMCs cannot be accessed sanely above 32 bit width,
 	 * so we install an artificial 1<<31 period regardless of
@@ -3269,6 +3496,29 @@
 		pr_cont("Broadwell events, ");
 		break;
 
+	case 78: /* 14nm Skylake Mobile */
+	case 94: /* 14nm Skylake Desktop */
+		x86_pmu.late_ack = true;
+		memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+		memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+		intel_pmu_lbr_init_skl();
+
+		x86_pmu.event_constraints = intel_skl_event_constraints;
+		x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
+		x86_pmu.extra_regs = intel_skl_extra_regs;
+		x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
+		/* all extra regs are per-cpu when HT is on */
+		x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+		x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
+
+		x86_pmu.hw_config = hsw_hw_config;
+		x86_pmu.get_event_constraints = hsw_get_event_constraints;
+		x86_pmu.cpu_events = hsw_events_attrs;
+		WARN_ON(!x86_pmu.format_attrs);
+		x86_pmu.cpu_events = hsw_events_attrs;
+		pr_cont("Skylake events, ");
+		break;
+
 	default:
 		switch (x86_pmu.version) {
 		case 1:
@@ -3338,7 +3588,7 @@
 	 */
 	if (x86_pmu.extra_regs) {
 		for (er = x86_pmu.extra_regs; er->msr; er++) {
-			er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
+			er->extra_msr_access = check_msr(er->msr, 0x11UL);
 			/* Disable LBR select mapping */
 			if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
 				x86_pmu.lbr_sel_map = NULL;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c
index 43dd672..54690e8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_bts.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c
@@ -62,9 +62,6 @@
 
 struct pmu bts_pmu;
 
-void intel_pmu_enable_bts(u64 config);
-void intel_pmu_disable_bts(void);
-
 static size_t buf_size(struct page *page)
 {
 	return 1 << (PAGE_SHIFT + page_private(page));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 71fc402..84f236a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -224,6 +224,19 @@
 
 #define PEBS_HSW_TSX_FLAGS	0xff00000000ULL
 
+/* Same as HSW, plus TSC */
+
+struct pebs_record_skl {
+	u64 flags, ip;
+	u64 ax, bx, cx, dx;
+	u64 si, di, bp, sp;
+	u64 r8,  r9,  r10, r11;
+	u64 r12, r13, r14, r15;
+	u64 status, dla, dse, lat;
+	u64 real_ip, tsx_tuning;
+	u64 tsc;
+};
+
 void init_debug_store_on_cpu(int cpu)
 {
 	struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
@@ -675,6 +688,28 @@
 	EVENT_CONSTRAINT_END
 };
 
+struct event_constraint intel_skl_pebs_event_constraints[] = {
+	INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2),	/* INST_RETIRED.PREC_DIST */
+	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
+	/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
+	INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+	INTEL_PLD_CONSTRAINT(0x1cd, 0xf),		      /* MEM_TRANS_RETIRED.* */
+	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
+	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
+	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
+	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */
+	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
+	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
+	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
+	INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
+	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf),    /* MEM_LOAD_RETIRED.* */
+	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf),    /* MEM_LOAD_L3_HIT_RETIRED.* */
+	INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf),    /* MEM_LOAD_L3_MISS_RETIRED.* */
+	/* Allow all events as PEBS with no flags */
+	INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
+	EVENT_CONSTRAINT_END
+};
+
 struct event_constraint *intel_pebs_constraints(struct perf_event *event)
 {
 	struct event_constraint *c;
@@ -754,6 +789,11 @@
 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
 	struct hw_perf_event *hwc = &event->hw;
 	struct debug_store *ds = cpuc->ds;
+	bool large_pebs = ds->pebs_interrupt_threshold >
+		ds->pebs_buffer_base + x86_pmu.pebs_record_size;
+
+	if (large_pebs)
+		intel_pmu_drain_pebs_buffer();
 
 	cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
 
@@ -762,12 +802,8 @@
 	else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
 		cpuc->pebs_enabled &= ~(1ULL << 63);
 
-	if (ds->pebs_interrupt_threshold >
-	    ds->pebs_buffer_base + x86_pmu.pebs_record_size) {
-		intel_pmu_drain_pebs_buffer();
-		if (!pebs_is_enabled(cpuc))
-			perf_sched_cb_dec(event->ctx->pmu);
-	}
+	if (large_pebs && !pebs_is_enabled(cpuc))
+		perf_sched_cb_dec(event->ctx->pmu);
 
 	if (cpuc->enabled)
 		wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
@@ -885,7 +921,7 @@
 	return 0;
 }
 
-static inline u64 intel_hsw_weight(struct pebs_record_hsw *pebs)
+static inline u64 intel_hsw_weight(struct pebs_record_skl *pebs)
 {
 	if (pebs->tsx_tuning) {
 		union hsw_tsx_tuning tsx = { .value = pebs->tsx_tuning };
@@ -894,7 +930,7 @@
 	return 0;
 }
 
-static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs)
+static inline u64 intel_hsw_transaction(struct pebs_record_skl *pebs)
 {
 	u64 txn = (pebs->tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
 
@@ -918,7 +954,7 @@
 	 * unconditionally access the 'extra' entries.
 	 */
 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
-	struct pebs_record_hsw *pebs = __pebs;
+	struct pebs_record_skl *pebs = __pebs;
 	u64 sample_type;
 	int fll, fst, dsrc;
 	int fl = event->hw.flags;
@@ -1016,6 +1052,16 @@
 			data->txn = intel_hsw_transaction(pebs);
 	}
 
+	/*
+	 * v3 supplies an accurate time stamp, so we use that
+	 * for the time stamp.
+	 *
+	 * We can only do this for the default trace clock.
+	 */
+	if (x86_pmu.intel_cap.pebs_format >= 3 &&
+		event->attr.use_clockid == 0)
+		data->time = native_sched_clock_from_tsc(pebs->tsc);
+
 	if (has_branch_stack(event))
 		data->br_stack = &cpuc->lbr_stack;
 }
@@ -1142,6 +1188,7 @@
 
 	for (at = base; at < top; at += x86_pmu.pebs_record_size) {
 		struct pebs_record_nhm *p = at;
+		u64 pebs_status;
 
 		/* PEBS v3 has accurate status bits */
 		if (x86_pmu.intel_cap.pebs_format >= 3) {
@@ -1152,12 +1199,17 @@
 			continue;
 		}
 
-		bit = find_first_bit((unsigned long *)&p->status,
+		pebs_status = p->status & cpuc->pebs_enabled;
+		pebs_status &= (1ULL << x86_pmu.max_pebs_events) - 1;
+
+		bit = find_first_bit((unsigned long *)&pebs_status,
 					x86_pmu.max_pebs_events);
-		if (bit >= x86_pmu.max_pebs_events)
+		if (WARN(bit >= x86_pmu.max_pebs_events,
+			 "PEBS record without PEBS event! status=%Lx pebs_enabled=%Lx active_mask=%Lx",
+			 (unsigned long long)p->status, (unsigned long long)cpuc->pebs_enabled,
+			 *(unsigned long long *)cpuc->active_mask))
 			continue;
-		if (!test_bit(bit, cpuc->active_mask))
-			continue;
+
 		/*
 		 * The PEBS hardware does not deal well with the situation
 		 * when events happen near to each other and multiple bits
@@ -1172,27 +1224,21 @@
 		 * one, and it's not possible to reconstruct all events
 		 * that caused the PEBS record. It's called collision.
 		 * If collision happened, the record will be dropped.
-		 *
 		 */
-		if (p->status != (1 << bit)) {
-			u64 pebs_status;
-
-			/* slow path */
-			pebs_status = p->status & cpuc->pebs_enabled;
-			pebs_status &= (1ULL << MAX_PEBS_EVENTS) - 1;
-			if (pebs_status != (1 << bit)) {
-				for_each_set_bit(i, (unsigned long *)&pebs_status,
-						 MAX_PEBS_EVENTS)
-					error[i]++;
-				continue;
-			}
+		if (p->status != (1ULL << bit)) {
+			for_each_set_bit(i, (unsigned long *)&pebs_status,
+					 x86_pmu.max_pebs_events)
+				error[i]++;
+			continue;
 		}
+
 		counts[bit]++;
 	}
 
 	for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) {
 		if ((counts[bit] == 0) && (error[bit] == 0))
 			continue;
+
 		event = cpuc->events[bit];
 		WARN_ON_ONCE(!event);
 		WARN_ON_ONCE(!event->attr.precise_ip);
@@ -1245,6 +1291,14 @@
 			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
 			break;
 
+		case 3:
+			pr_cont("PEBS fmt3%c, ", pebs_type);
+			x86_pmu.pebs_record_size =
+						sizeof(struct pebs_record_skl);
+			x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
+			x86_pmu.free_running_flags |= PERF_SAMPLE_TIME;
+			break;
+
 		default:
 			printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
 			x86_pmu.pebs = 0;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 452a7bd..b2c9475 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -13,7 +13,8 @@
 	LBR_FORMAT_EIP		= 0x02,
 	LBR_FORMAT_EIP_FLAGS	= 0x03,
 	LBR_FORMAT_EIP_FLAGS2	= 0x04,
-	LBR_FORMAT_MAX_KNOWN    = LBR_FORMAT_EIP_FLAGS2,
+	LBR_FORMAT_INFO		= 0x05,
+	LBR_FORMAT_MAX_KNOWN    = LBR_FORMAT_INFO,
 };
 
 static enum {
@@ -140,6 +141,13 @@
 	u64 debugctl, lbr_select = 0, orig_debugctl;
 
 	/*
+	 * No need to unfreeze manually, as v4 can do that as part
+	 * of the GLOBAL_STATUS ack.
+	 */
+	if (pmi && x86_pmu.version >= 4)
+		return;
+
+	/*
 	 * No need to reprogram LBR_SELECT in a PMI, as it
 	 * did not change.
 	 */
@@ -186,6 +194,8 @@
 	for (i = 0; i < x86_pmu.lbr_nr; i++) {
 		wrmsrl(x86_pmu.lbr_from + i, 0);
 		wrmsrl(x86_pmu.lbr_to   + i, 0);
+		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+			wrmsrl(MSR_LBR_INFO_0 + i, 0);
 	}
 }
 
@@ -230,10 +240,12 @@
 
 	mask = x86_pmu.lbr_nr - 1;
 	tos = intel_pmu_lbr_tos();
-	for (i = 0; i < x86_pmu.lbr_nr; i++) {
+	for (i = 0; i < tos; i++) {
 		lbr_idx = (tos - i) & mask;
 		wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
 		wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+			wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
 	}
 	task_ctx->lbr_stack_state = LBR_NONE;
 }
@@ -251,10 +263,12 @@
 
 	mask = x86_pmu.lbr_nr - 1;
 	tos = intel_pmu_lbr_tos();
-	for (i = 0; i < x86_pmu.lbr_nr; i++) {
+	for (i = 0; i < tos; i++) {
 		lbr_idx = (tos - i) & mask;
 		rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
 		rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+		if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+			rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
 	}
 	task_ctx->lbr_stack_state = LBR_VALID;
 }
@@ -411,16 +425,31 @@
 	u64 tos = intel_pmu_lbr_tos();
 	int i;
 	int out = 0;
+	int num = x86_pmu.lbr_nr;
 
-	for (i = 0; i < x86_pmu.lbr_nr; i++) {
+	if (cpuc->lbr_sel->config & LBR_CALL_STACK)
+		num = tos;
+
+	for (i = 0; i < num; i++) {
 		unsigned long lbr_idx = (tos - i) & mask;
 		u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
 		int skip = 0;
+		u16 cycles = 0;
 		int lbr_flags = lbr_desc[lbr_format];
 
 		rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
 		rdmsrl(x86_pmu.lbr_to   + lbr_idx, to);
 
+		if (lbr_format == LBR_FORMAT_INFO) {
+			u64 info;
+
+			rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info);
+			mis = !!(info & LBR_INFO_MISPRED);
+			pred = !mis;
+			in_tx = !!(info & LBR_INFO_IN_TX);
+			abort = !!(info & LBR_INFO_ABORT);
+			cycles = (info & LBR_INFO_CYCLES);
+		}
 		if (lbr_flags & LBR_EIP_FLAGS) {
 			mis = !!(from & LBR_FROM_FLAG_MISPRED);
 			pred = !mis;
@@ -450,6 +479,7 @@
 		cpuc->lbr_entries[out].predicted = pred;
 		cpuc->lbr_entries[out].in_tx	 = in_tx;
 		cpuc->lbr_entries[out].abort	 = abort;
+		cpuc->lbr_entries[out].cycles	 = cycles;
 		cpuc->lbr_entries[out].reserved	 = 0;
 		out++;
 	}
@@ -947,6 +977,26 @@
 	pr_cont("16-deep LBR, ");
 }
 
+/* skylake */
+__init void intel_pmu_lbr_init_skl(void)
+{
+	x86_pmu.lbr_nr	 = 32;
+	x86_pmu.lbr_tos	 = MSR_LBR_TOS;
+	x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
+	x86_pmu.lbr_to   = MSR_LBR_NHM_TO;
+
+	x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
+	x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
+
+	/*
+	 * SW branch filter usage:
+	 * - support syscall, sysret capture.
+	 *   That requires LBR_FAR but that means far
+	 *   jmp need to be filtered out
+	 */
+	pr_cont("32-deep LBR, ");
+}
+
 /* atom */
 void __init intel_pmu_lbr_init_atom(void)
 {
diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
index 183de71..4216928 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
@@ -65,15 +65,21 @@
 } pt_caps[] = {
 	PT_CAP(max_subleaf,		0, CR_EAX, 0xffffffff),
 	PT_CAP(cr3_filtering,		0, CR_EBX, BIT(0)),
+	PT_CAP(psb_cyc,			0, CR_EBX, BIT(1)),
+	PT_CAP(mtc,			0, CR_EBX, BIT(3)),
 	PT_CAP(topa_output,		0, CR_ECX, BIT(0)),
 	PT_CAP(topa_multiple_entries,	0, CR_ECX, BIT(1)),
+	PT_CAP(single_range_output,	0, CR_ECX, BIT(2)),
 	PT_CAP(payloads_lip,		0, CR_ECX, BIT(31)),
+	PT_CAP(mtc_periods,		1, CR_EAX, 0xffff0000),
+	PT_CAP(cycle_thresholds,	1, CR_EBX, 0xffff),
+	PT_CAP(psb_periods,		1, CR_EBX, 0xffff0000),
 };
 
 static u32 pt_cap_get(enum pt_capabilities cap)
 {
 	struct pt_cap_desc *cd = &pt_caps[cap];
-	u32 c = pt_pmu.caps[cd->leaf * 4 + cd->reg];
+	u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
 	unsigned int shift = __ffs(cd->mask);
 
 	return (c & cd->mask) >> shift;
@@ -94,12 +100,22 @@
 	.name	= "caps",
 };
 
+PMU_FORMAT_ATTR(cyc,		"config:1"	);
+PMU_FORMAT_ATTR(mtc,		"config:9"	);
 PMU_FORMAT_ATTR(tsc,		"config:10"	);
 PMU_FORMAT_ATTR(noretcomp,	"config:11"	);
+PMU_FORMAT_ATTR(mtc_period,	"config:14-17"	);
+PMU_FORMAT_ATTR(cyc_thresh,	"config:19-22"	);
+PMU_FORMAT_ATTR(psb_period,	"config:24-27"	);
 
 static struct attribute *pt_formats_attr[] = {
+	&format_attr_cyc.attr,
+	&format_attr_mtc.attr,
 	&format_attr_tsc.attr,
 	&format_attr_noretcomp.attr,
+	&format_attr_mtc_period.attr,
+	&format_attr_cyc_thresh.attr,
+	&format_attr_psb_period.attr,
 	NULL,
 };
 
@@ -129,10 +145,10 @@
 
 	for (i = 0; i < PT_CPUID_LEAVES; i++) {
 		cpuid_count(20, i,
-			    &pt_pmu.caps[CR_EAX + i*4],
-			    &pt_pmu.caps[CR_EBX + i*4],
-			    &pt_pmu.caps[CR_ECX + i*4],
-			    &pt_pmu.caps[CR_EDX + i*4]);
+			    &pt_pmu.caps[CR_EAX + i*PT_CPUID_REGS_NUM],
+			    &pt_pmu.caps[CR_EBX + i*PT_CPUID_REGS_NUM],
+			    &pt_pmu.caps[CR_ECX + i*PT_CPUID_REGS_NUM],
+			    &pt_pmu.caps[CR_EDX + i*PT_CPUID_REGS_NUM]);
 	}
 
 	ret = -ENOMEM;
@@ -170,15 +186,65 @@
 	return ret;
 }
 
-#define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC)
+#define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC	| \
+			  RTIT_CTL_CYC_THRESH	| \
+			  RTIT_CTL_PSB_FREQ)
+
+#define RTIT_CTL_MTC	(RTIT_CTL_MTC_EN	| \
+			 RTIT_CTL_MTC_RANGE)
+
+#define PT_CONFIG_MASK (RTIT_CTL_TSC_EN		| \
+			RTIT_CTL_DISRETC	| \
+			RTIT_CTL_CYC_PSB	| \
+			RTIT_CTL_MTC)
 
 static bool pt_event_valid(struct perf_event *event)
 {
 	u64 config = event->attr.config;
+	u64 allowed, requested;
 
 	if ((config & PT_CONFIG_MASK) != config)
 		return false;
 
+	if (config & RTIT_CTL_CYC_PSB) {
+		if (!pt_cap_get(PT_CAP_psb_cyc))
+			return false;
+
+		allowed = pt_cap_get(PT_CAP_psb_periods);
+		requested = (config & RTIT_CTL_PSB_FREQ) >>
+			RTIT_CTL_PSB_FREQ_OFFSET;
+		if (requested && (!(allowed & BIT(requested))))
+			return false;
+
+		allowed = pt_cap_get(PT_CAP_cycle_thresholds);
+		requested = (config & RTIT_CTL_CYC_THRESH) >>
+			RTIT_CTL_CYC_THRESH_OFFSET;
+		if (requested && (!(allowed & BIT(requested))))
+			return false;
+	}
+
+	if (config & RTIT_CTL_MTC) {
+		/*
+		 * In the unlikely case that CPUID lists valid mtc periods,
+		 * but not the mtc capability, drop out here.
+		 *
+		 * Spec says that setting mtc period bits while mtc bit in
+		 * CPUID is 0 will #GP, so better safe than sorry.
+		 */
+		if (!pt_cap_get(PT_CAP_mtc))
+			return false;
+
+		allowed = pt_cap_get(PT_CAP_mtc_periods);
+		if (!allowed)
+			return false;
+
+		requested = (config & RTIT_CTL_MTC_RANGE) >>
+			RTIT_CTL_MTC_RANGE_OFFSET;
+
+		if (!(allowed & BIT(requested)))
+			return false;
+	}
+
 	return true;
 }
 
@@ -191,6 +257,11 @@
 {
 	u64 reg;
 
+	if (!event->hw.itrace_started) {
+		event->hw.itrace_started = 1;
+		wrmsrl(MSR_IA32_RTIT_STATUS, 0);
+	}
+
 	reg = RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN;
 
 	if (!event->attr.exclude_kernel)
@@ -910,7 +981,6 @@
 
 		pt_config_buffer(buf->cur->table, buf->cur_idx,
 				 buf->output_off);
-		wrmsrl(MSR_IA32_RTIT_STATUS, 0);
 		pt_config(event);
 	}
 }
@@ -934,7 +1004,6 @@
 
 	pt_config_buffer(buf->cur->table, buf->cur_idx,
 			 buf->output_off);
-	wrmsrl(MSR_IA32_RTIT_STATUS, 0);
 	pt_config(event);
 }
 
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 5cbd4e6..81431c0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -86,6 +86,10 @@
 			 1<<RAPL_IDX_RAM_NRG_STAT|\
 			 1<<RAPL_IDX_PP1_NRG_STAT)
 
+/* Knights Landing has PKG, RAM */
+#define RAPL_IDX_KNL	(1<<RAPL_IDX_PKG_NRG_STAT|\
+			 1<<RAPL_IDX_RAM_NRG_STAT)
+
 /*
  * event code: LSB 8 bits, passed in attr->config
  * any other bit is reserved
@@ -486,6 +490,18 @@
 	NULL,
 };
 
+static struct attribute *rapl_events_knl_attr[] = {
+	EVENT_PTR(rapl_pkg),
+	EVENT_PTR(rapl_ram),
+
+	EVENT_PTR(rapl_pkg_unit),
+	EVENT_PTR(rapl_ram_unit),
+
+	EVENT_PTR(rapl_pkg_scale),
+	EVENT_PTR(rapl_ram_scale),
+	NULL,
+};
+
 static struct attribute_group rapl_pmu_events_group = {
 	.name = "events",
 	.attrs = NULL, /* patched at runtime */
@@ -730,6 +746,10 @@
 		rapl_cntr_mask = RAPL_IDX_SRV;
 		rapl_pmu_events_group.attrs = rapl_events_srv_attr;
 		break;
+	case 87: /* Knights Landing */
+		rapl_add_quirk(rapl_hsw_server_quirk);
+		rapl_cntr_mask = RAPL_IDX_KNL;
+		rapl_pmu_events_group.attrs = rapl_events_knl_attr;
 
 	default:
 		/* unsupported */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 21b5e38..560e525 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -911,6 +911,9 @@
 	case 63: /* Haswell-EP */
 		ret = hswep_uncore_pci_init();
 		break;
+	case 86: /* BDX-DE */
+		ret = bdx_uncore_pci_init();
+		break;
 	case 42: /* Sandy Bridge */
 		ret = snb_uncore_pci_init();
 		break;
@@ -1209,6 +1212,11 @@
 		break;
 	case 42: /* Sandy Bridge */
 	case 58: /* Ivy Bridge */
+	case 60: /* Haswell */
+	case 69: /* Haswell */
+	case 70: /* Haswell */
+	case 61: /* Broadwell */
+	case 71: /* Broadwell */
 		snb_uncore_cpu_init();
 		break;
 	case 45: /* Sandy Bridge-EP */
@@ -1224,6 +1232,9 @@
 	case 63: /* Haswell-EP */
 		hswep_uncore_cpu_init();
 		break;
+	case 86: /* BDX-DE */
+		bdx_uncore_cpu_init();
+		break;
 	default:
 		return 0;
 	}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 0f77f0a..72c54c2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -336,6 +336,8 @@
 void ivbep_uncore_cpu_init(void);
 int hswep_uncore_pci_init(void);
 void hswep_uncore_cpu_init(void);
+int bdx_uncore_pci_init(void);
+void bdx_uncore_cpu_init(void);
 
 /* perf_event_intel_uncore_nhmex.c */
 void nhmex_uncore_cpu_init(void);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
index b005a78..f78574b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
@@ -45,6 +45,11 @@
 #define SNB_UNC_CBO_0_PER_CTR0                  0x706
 #define SNB_UNC_CBO_MSR_OFFSET                  0x10
 
+/* SNB ARB register */
+#define SNB_UNC_ARB_PER_CTR0			0x3b0
+#define SNB_UNC_ARB_PERFEVTSEL0			0x3b2
+#define SNB_UNC_ARB_MSR_OFFSET			0x10
+
 /* NHM global control register */
 #define NHM_UNC_PERF_GLOBAL_CTL                 0x391
 #define NHM_UNC_FIXED_CTR                       0x394
@@ -115,7 +120,7 @@
 	.read_counter	= uncore_msr_read_counter,
 };
 
-static struct event_constraint snb_uncore_cbox_constraints[] = {
+static struct event_constraint snb_uncore_arb_constraints[] = {
 	UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
 	UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
 	EVENT_CONSTRAINT_END
@@ -134,14 +139,28 @@
 	.single_fixed	= 1,
 	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
 	.msr_offset	= SNB_UNC_CBO_MSR_OFFSET,
-	.constraints	= snb_uncore_cbox_constraints,
 	.ops		= &snb_uncore_msr_ops,
 	.format_group	= &snb_uncore_format_group,
 	.event_descs	= snb_uncore_events,
 };
 
+static struct intel_uncore_type snb_uncore_arb = {
+	.name		= "arb",
+	.num_counters   = 2,
+	.num_boxes	= 1,
+	.perf_ctr_bits	= 44,
+	.perf_ctr	= SNB_UNC_ARB_PER_CTR0,
+	.event_ctl	= SNB_UNC_ARB_PERFEVTSEL0,
+	.event_mask	= SNB_UNC_RAW_EVENT_MASK,
+	.msr_offset	= SNB_UNC_ARB_MSR_OFFSET,
+	.constraints	= snb_uncore_arb_constraints,
+	.ops		= &snb_uncore_msr_ops,
+	.format_group	= &snb_uncore_format_group,
+};
+
 static struct intel_uncore_type *snb_msr_uncores[] = {
 	&snb_uncore_cbox,
+	&snb_uncore_arb,
 	NULL,
 };
 
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
index 6d6e85d..694510a8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -2215,7 +2215,7 @@
 	NULL,
 };
 
-static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = {
+static const struct pci_device_id hswep_uncore_pci_ids[] = {
 	{ /* Home Agent 0 */
 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2f30),
 		.driver_data = UNCORE_PCI_DEV_DATA(HSWEP_PCI_UNCORE_HA, 0),
@@ -2321,3 +2321,167 @@
 	return 0;
 }
 /* end of Haswell-EP uncore support */
+
+/* BDX-DE uncore support */
+
+static struct intel_uncore_type bdx_uncore_ubox = {
+	.name			= "ubox",
+	.num_counters		= 2,
+	.num_boxes		= 1,
+	.perf_ctr_bits		= 48,
+	.fixed_ctr_bits		= 48,
+	.perf_ctr		= HSWEP_U_MSR_PMON_CTR0,
+	.event_ctl		= HSWEP_U_MSR_PMON_CTL0,
+	.event_mask		= SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
+	.fixed_ctr		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTR,
+	.fixed_ctl		= HSWEP_U_MSR_PMON_UCLK_FIXED_CTL,
+	.num_shared_regs	= 1,
+	.ops			= &ivbep_uncore_msr_ops,
+	.format_group		= &ivbep_uncore_ubox_format_group,
+};
+
+static struct event_constraint bdx_uncore_cbox_constraints[] = {
+	UNCORE_EVENT_CONSTRAINT(0x09, 0x3),
+	UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
+	UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
+	EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type bdx_uncore_cbox = {
+	.name			= "cbox",
+	.num_counters		= 4,
+	.num_boxes		= 8,
+	.perf_ctr_bits		= 48,
+	.event_ctl		= HSWEP_C0_MSR_PMON_CTL0,
+	.perf_ctr		= HSWEP_C0_MSR_PMON_CTR0,
+	.event_mask		= SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
+	.box_ctl		= HSWEP_C0_MSR_PMON_BOX_CTL,
+	.msr_offset		= HSWEP_CBO_MSR_OFFSET,
+	.num_shared_regs	= 1,
+	.constraints		= bdx_uncore_cbox_constraints,
+	.ops			= &hswep_uncore_cbox_ops,
+	.format_group		= &hswep_uncore_cbox_format_group,
+};
+
+static struct intel_uncore_type *bdx_msr_uncores[] = {
+	&bdx_uncore_ubox,
+	&bdx_uncore_cbox,
+	&hswep_uncore_pcu,
+	NULL,
+};
+
+void bdx_uncore_cpu_init(void)
+{
+	if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+		bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+	uncore_msr_uncores = bdx_msr_uncores;
+}
+
+static struct intel_uncore_type bdx_uncore_ha = {
+	.name		= "ha",
+	.num_counters   = 4,
+	.num_boxes	= 1,
+	.perf_ctr_bits	= 48,
+	SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type bdx_uncore_imc = {
+	.name		= "imc",
+	.num_counters   = 5,
+	.num_boxes	= 2,
+	.perf_ctr_bits	= 48,
+	.fixed_ctr_bits	= 48,
+	.fixed_ctr	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
+	.fixed_ctl	= SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
+	.event_descs	= hswep_uncore_imc_events,
+	SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type bdx_uncore_irp = {
+	.name			= "irp",
+	.num_counters		= 4,
+	.num_boxes		= 1,
+	.perf_ctr_bits		= 48,
+	.event_mask		= SNBEP_PMON_RAW_EVENT_MASK,
+	.box_ctl		= SNBEP_PCI_PMON_BOX_CTL,
+	.ops			= &hswep_uncore_irp_ops,
+	.format_group		= &snbep_uncore_format_group,
+};
+
+
+static struct event_constraint bdx_uncore_r2pcie_constraints[] = {
+	UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
+	UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
+	UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
+	UNCORE_EVENT_CONSTRAINT(0x23, 0x1),
+	UNCORE_EVENT_CONSTRAINT(0x25, 0x1),
+	UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+	UNCORE_EVENT_CONSTRAINT(0x2d, 0x3),
+	EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type bdx_uncore_r2pcie = {
+	.name		= "r2pcie",
+	.num_counters   = 4,
+	.num_boxes	= 1,
+	.perf_ctr_bits	= 48,
+	.constraints	= bdx_uncore_r2pcie_constraints,
+	SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+enum {
+	BDX_PCI_UNCORE_HA,
+	BDX_PCI_UNCORE_IMC,
+	BDX_PCI_UNCORE_IRP,
+	BDX_PCI_UNCORE_R2PCIE,
+};
+
+static struct intel_uncore_type *bdx_pci_uncores[] = {
+	[BDX_PCI_UNCORE_HA]	= &bdx_uncore_ha,
+	[BDX_PCI_UNCORE_IMC]	= &bdx_uncore_imc,
+	[BDX_PCI_UNCORE_IRP]	= &bdx_uncore_irp,
+	[BDX_PCI_UNCORE_R2PCIE]	= &bdx_uncore_r2pcie,
+	NULL,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(bdx_uncore_pci_ids) = {
+	{ /* Home Agent 0 */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f30),
+		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_HA, 0),
+	},
+	{ /* MC0 Channel 0 */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb0),
+		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 0),
+	},
+	{ /* MC0 Channel 1 */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fb1),
+		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IMC, 1),
+	},
+	{ /* IRP */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f39),
+		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_IRP, 0),
+	},
+	{ /* R2PCIe */
+		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f34),
+		.driver_data = UNCORE_PCI_DEV_DATA(BDX_PCI_UNCORE_R2PCIE, 0),
+	},
+	{ /* end: all zeroes */ }
+};
+
+static struct pci_driver bdx_uncore_pci_driver = {
+	.name		= "bdx_uncore",
+	.id_table	= bdx_uncore_pci_ids,
+};
+
+int bdx_uncore_pci_init(void)
+{
+	int ret = snbep_pci2phy_map_init(0x6f1e);
+
+	if (ret)
+		return ret;
+	uncore_pci_uncores = bdx_pci_uncores;
+	uncore_pci_driver = &bdx_uncore_pci_driver;
+	return 0;
+}
+
+/* end of BDX-DE uncore support */
diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c
new file mode 100644
index 0000000..086b12e
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_msr.c
@@ -0,0 +1,242 @@
+#include <linux/perf_event.h>
+
+enum perf_msr_id {
+	PERF_MSR_TSC			= 0,
+	PERF_MSR_APERF			= 1,
+	PERF_MSR_MPERF			= 2,
+	PERF_MSR_PPERF			= 3,
+	PERF_MSR_SMI			= 4,
+
+	PERF_MSR_EVENT_MAX,
+};
+
+bool test_aperfmperf(int idx)
+{
+	return boot_cpu_has(X86_FEATURE_APERFMPERF);
+}
+
+bool test_intel(int idx)
+{
+	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+	    boot_cpu_data.x86 != 6)
+		return false;
+
+	switch (boot_cpu_data.x86_model) {
+	case 30: /* 45nm Nehalem    */
+	case 26: /* 45nm Nehalem-EP */
+	case 46: /* 45nm Nehalem-EX */
+
+	case 37: /* 32nm Westmere    */
+	case 44: /* 32nm Westmere-EP */
+	case 47: /* 32nm Westmere-EX */
+
+	case 42: /* 32nm SandyBridge         */
+	case 45: /* 32nm SandyBridge-E/EN/EP */
+
+	case 58: /* 22nm IvyBridge       */
+	case 62: /* 22nm IvyBridge-EP/EX */
+
+	case 60: /* 22nm Haswell Core */
+	case 63: /* 22nm Haswell Server */
+	case 69: /* 22nm Haswell ULT */
+	case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
+
+	case 61: /* 14nm Broadwell Core-M */
+	case 86: /* 14nm Broadwell Xeon D */
+	case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
+	case 79: /* 14nm Broadwell Server */
+
+	case 55: /* 22nm Atom "Silvermont"                */
+	case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
+	case 76: /* 14nm Atom "Airmont"                   */
+		if (idx == PERF_MSR_SMI)
+			return true;
+		break;
+
+	case 78: /* 14nm Skylake Mobile */
+	case 94: /* 14nm Skylake Desktop */
+		if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
+			return true;
+		break;
+	}
+
+	return false;
+}
+
+struct perf_msr {
+	u64	msr;
+	struct	perf_pmu_events_attr *attr;
+	bool	(*test)(int idx);
+};
+
+PMU_EVENT_ATTR_STRING(tsc,   evattr_tsc,   "event=0x00");
+PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01");
+PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
+PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
+PMU_EVENT_ATTR_STRING(smi,   evattr_smi,   "event=0x04");
+
+static struct perf_msr msr[] = {
+	[PERF_MSR_TSC]   = { 0,			&evattr_tsc,	NULL,		 },
+	[PERF_MSR_APERF] = { MSR_IA32_APERF,	&evattr_aperf,	test_aperfmperf, },
+	[PERF_MSR_MPERF] = { MSR_IA32_MPERF,	&evattr_mperf,	test_aperfmperf, },
+	[PERF_MSR_PPERF] = { MSR_PPERF,		&evattr_pperf,	test_intel,	 },
+	[PERF_MSR_SMI]   = { MSR_SMI_COUNT,	&evattr_smi,	test_intel,	 },
+};
+
+static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
+	NULL,
+};
+
+static struct attribute_group events_attr_group = {
+	.name = "events",
+	.attrs = events_attrs,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-63");
+static struct attribute *format_attrs[] = {
+	&format_attr_event.attr,
+	NULL,
+};
+static struct attribute_group format_attr_group = {
+	.name = "format",
+	.attrs = format_attrs,
+};
+
+static const struct attribute_group *attr_groups[] = {
+	&events_attr_group,
+	&format_attr_group,
+	NULL,
+};
+
+static int msr_event_init(struct perf_event *event)
+{
+	u64 cfg = event->attr.config;
+
+	if (event->attr.type != event->pmu->type)
+		return -ENOENT;
+
+	if (cfg >= PERF_MSR_EVENT_MAX)
+		return -EINVAL;
+
+	/* unsupported modes and filters */
+	if (event->attr.exclude_user   ||
+	    event->attr.exclude_kernel ||
+	    event->attr.exclude_hv     ||
+	    event->attr.exclude_idle   ||
+	    event->attr.exclude_host   ||
+	    event->attr.exclude_guest  ||
+	    event->attr.sample_period) /* no sampling */
+		return -EINVAL;
+
+	if (!msr[cfg].attr)
+		return -EINVAL;
+
+	event->hw.idx = -1;
+	event->hw.event_base = msr[cfg].msr;
+	event->hw.config = cfg;
+
+	return 0;
+}
+
+static inline u64 msr_read_counter(struct perf_event *event)
+{
+	u64 now;
+
+	if (event->hw.event_base)
+		rdmsrl(event->hw.event_base, now);
+	else
+		rdtscll(now);
+
+	return now;
+}
+static void msr_event_update(struct perf_event *event)
+{
+	u64 prev, now;
+	s64 delta;
+
+	/* Careful, an NMI might modify the previous event value. */
+again:
+	prev = local64_read(&event->hw.prev_count);
+	now = msr_read_counter(event);
+
+	if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
+		goto again;
+
+	delta = now - prev;
+	if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) {
+		delta <<= 32;
+		delta >>= 32; /* sign extend */
+	}
+	local64_add(now - prev, &event->count);
+}
+
+static void msr_event_start(struct perf_event *event, int flags)
+{
+	u64 now;
+
+	now = msr_read_counter(event);
+	local64_set(&event->hw.prev_count, now);
+}
+
+static void msr_event_stop(struct perf_event *event, int flags)
+{
+	msr_event_update(event);
+}
+
+static void msr_event_del(struct perf_event *event, int flags)
+{
+	msr_event_stop(event, PERF_EF_UPDATE);
+}
+
+static int msr_event_add(struct perf_event *event, int flags)
+{
+	if (flags & PERF_EF_START)
+		msr_event_start(event, flags);
+
+	return 0;
+}
+
+static struct pmu pmu_msr = {
+	.task_ctx_nr	= perf_sw_context,
+	.attr_groups	= attr_groups,
+	.event_init	= msr_event_init,
+	.add		= msr_event_add,
+	.del		= msr_event_del,
+	.start		= msr_event_start,
+	.stop		= msr_event_stop,
+	.read		= msr_event_update,
+	.capabilities	= PERF_PMU_CAP_NO_INTERRUPT,
+};
+
+static int __init msr_init(void)
+{
+	int i, j = 0;
+
+	if (!boot_cpu_has(X86_FEATURE_TSC)) {
+		pr_cont("no MSR PMU driver.\n");
+		return 0;
+	}
+
+	/* Probe the MSRs. */
+	for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
+		u64 val;
+
+		/*
+		 * Virt sucks arse; you cannot tell if a R/O MSR is present :/
+		 */
+		if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
+			msr[i].attr = NULL;
+	}
+
+	/* List remaining MSRs in the sysfs attrs. */
+	for (i = 0; i < PERF_MSR_EVENT_MAX; i++) {
+		if (msr[i].attr)
+			events_attrs[j++] = &msr[i].attr->attr.attr;
+	}
+	events_attrs[j] = NULL;
+
+	perf_pmu_register(&pmu_msr, "msr", -1);
+
+	return 0;
+}
+device_initcall(msr_init);
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 83741a7..bd3507d 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -170,7 +170,7 @@
 	return notifier_from_errno(err);
 }
 
-static struct notifier_block __refdata cpuid_class_cpu_notifier =
+static struct notifier_block cpuid_class_cpu_notifier =
 {
 	.notifier_call = cpuid_class_cpu_callback,
 };
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index ce95676..4d38416 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -110,7 +110,7 @@
 	 */
 	if (!arch_get_random_long(&rand)) {
 		/* The constant is an arbitrary large prime */
-		rdtscll(rand);
+		rand = rdtsc();
 		rand *= 0xc345c6b72fd16123UL;
 	}
 
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 10757d0..88b4da3 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -226,22 +226,7 @@
  */
 static unsigned long hpet_freq;
 
-static void hpet_legacy_set_mode(enum clock_event_mode mode,
-			  struct clock_event_device *evt);
-static int hpet_legacy_next_event(unsigned long delta,
-			   struct clock_event_device *evt);
-
-/*
- * The hpet clock event device
- */
-static struct clock_event_device hpet_clockevent = {
-	.name		= "hpet",
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode	= hpet_legacy_set_mode,
-	.set_next_event = hpet_legacy_next_event,
-	.irq		= 0,
-	.rating		= 50,
-};
+static struct clock_event_device hpet_clockevent;
 
 static void hpet_stop_counter(void)
 {
@@ -306,64 +291,74 @@
 	printk(KERN_DEBUG "hpet clockevent registered\n");
 }
 
-static void hpet_set_mode(enum clock_event_mode mode,
-			  struct clock_event_device *evt, int timer)
+static int hpet_set_periodic(struct clock_event_device *evt, int timer)
 {
 	unsigned int cfg, cmp, now;
 	uint64_t delta;
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		hpet_stop_counter();
-		delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
-		delta >>= evt->shift;
-		now = hpet_readl(HPET_COUNTER);
-		cmp = now + (unsigned int) delta;
-		cfg = hpet_readl(HPET_Tn_CFG(timer));
-		cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
-		       HPET_TN_SETVAL | HPET_TN_32BIT;
-		hpet_writel(cfg, HPET_Tn_CFG(timer));
-		hpet_writel(cmp, HPET_Tn_CMP(timer));
-		udelay(1);
-		/*
-		 * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
-		 * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
-		 * bit is automatically cleared after the first write.
-		 * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
-		 * Publication # 24674)
-		 */
-		hpet_writel((unsigned int) delta, HPET_Tn_CMP(timer));
-		hpet_start_counter();
-		hpet_print_config();
-		break;
+	hpet_stop_counter();
+	delta = ((uint64_t)(NSEC_PER_SEC / HZ)) * evt->mult;
+	delta >>= evt->shift;
+	now = hpet_readl(HPET_COUNTER);
+	cmp = now + (unsigned int)delta;
+	cfg = hpet_readl(HPET_Tn_CFG(timer));
+	cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
+	       HPET_TN_32BIT;
+	hpet_writel(cfg, HPET_Tn_CFG(timer));
+	hpet_writel(cmp, HPET_Tn_CMP(timer));
+	udelay(1);
+	/*
+	 * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
+	 * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
+	 * bit is automatically cleared after the first write.
+	 * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
+	 * Publication # 24674)
+	 */
+	hpet_writel((unsigned int)delta, HPET_Tn_CMP(timer));
+	hpet_start_counter();
+	hpet_print_config();
 
-	case CLOCK_EVT_MODE_ONESHOT:
-		cfg = hpet_readl(HPET_Tn_CFG(timer));
-		cfg &= ~HPET_TN_PERIODIC;
-		cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
-		hpet_writel(cfg, HPET_Tn_CFG(timer));
-		break;
+	return 0;
+}
 
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		cfg = hpet_readl(HPET_Tn_CFG(timer));
-		cfg &= ~HPET_TN_ENABLE;
-		hpet_writel(cfg, HPET_Tn_CFG(timer));
-		break;
+static int hpet_set_oneshot(struct clock_event_device *evt, int timer)
+{
+	unsigned int cfg;
 
-	case CLOCK_EVT_MODE_RESUME:
-		if (timer == 0) {
-			hpet_enable_legacy_int();
-		} else {
-			struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
-			irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
-			disable_irq(hdev->irq);
-			irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
-			enable_irq(hdev->irq);
-		}
-		hpet_print_config();
-		break;
+	cfg = hpet_readl(HPET_Tn_CFG(timer));
+	cfg &= ~HPET_TN_PERIODIC;
+	cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
+	hpet_writel(cfg, HPET_Tn_CFG(timer));
+
+	return 0;
+}
+
+static int hpet_shutdown(struct clock_event_device *evt, int timer)
+{
+	unsigned int cfg;
+
+	cfg = hpet_readl(HPET_Tn_CFG(timer));
+	cfg &= ~HPET_TN_ENABLE;
+	hpet_writel(cfg, HPET_Tn_CFG(timer));
+
+	return 0;
+}
+
+static int hpet_resume(struct clock_event_device *evt, int timer)
+{
+	if (!timer) {
+		hpet_enable_legacy_int();
+	} else {
+		struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+
+		irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
+		disable_irq(hdev->irq);
+		irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
+		enable_irq(hdev->irq);
 	}
+	hpet_print_config();
+
+	return 0;
 }
 
 static int hpet_next_event(unsigned long delta,
@@ -403,10 +398,24 @@
 	return res < HPET_MIN_CYCLES ? -ETIME : 0;
 }
 
-static void hpet_legacy_set_mode(enum clock_event_mode mode,
-			struct clock_event_device *evt)
+static int hpet_legacy_shutdown(struct clock_event_device *evt)
 {
-	hpet_set_mode(mode, evt, 0);
+	return hpet_shutdown(evt, 0);
+}
+
+static int hpet_legacy_set_oneshot(struct clock_event_device *evt)
+{
+	return hpet_set_oneshot(evt, 0);
+}
+
+static int hpet_legacy_set_periodic(struct clock_event_device *evt)
+{
+	return hpet_set_periodic(evt, 0);
+}
+
+static int hpet_legacy_resume(struct clock_event_device *evt)
+{
+	return hpet_resume(evt, 0);
 }
 
 static int hpet_legacy_next_event(unsigned long delta,
@@ -416,6 +425,22 @@
 }
 
 /*
+ * The hpet clock event device
+ */
+static struct clock_event_device hpet_clockevent = {
+	.name			= "hpet",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_periodic	= hpet_legacy_set_periodic,
+	.set_state_oneshot	= hpet_legacy_set_oneshot,
+	.set_state_shutdown	= hpet_legacy_shutdown,
+	.tick_resume		= hpet_legacy_resume,
+	.set_next_event		= hpet_legacy_next_event,
+	.irq			= 0,
+	.rating			= 50,
+};
+
+/*
  * HPET MSI Support
  */
 #ifdef CONFIG_PCI_MSI
@@ -426,7 +451,7 @@
 
 void hpet_msi_unmask(struct irq_data *data)
 {
-	struct hpet_dev *hdev = data->handler_data;
+	struct hpet_dev *hdev = irq_data_get_irq_handler_data(data);
 	unsigned int cfg;
 
 	/* unmask it */
@@ -437,7 +462,7 @@
 
 void hpet_msi_mask(struct irq_data *data)
 {
-	struct hpet_dev *hdev = data->handler_data;
+	struct hpet_dev *hdev = irq_data_get_irq_handler_data(data);
 	unsigned int cfg;
 
 	/* mask it */
@@ -459,11 +484,32 @@
 	msg->address_hi = 0;
 }
 
-static void hpet_msi_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int hpet_msi_shutdown(struct clock_event_device *evt)
 {
 	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
-	hpet_set_mode(mode, evt, hdev->num);
+
+	return hpet_shutdown(evt, hdev->num);
+}
+
+static int hpet_msi_set_oneshot(struct clock_event_device *evt)
+{
+	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+
+	return hpet_set_oneshot(evt, hdev->num);
+}
+
+static int hpet_msi_set_periodic(struct clock_event_device *evt)
+{
+	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+
+	return hpet_set_periodic(evt, hdev->num);
+}
+
+static int hpet_msi_resume(struct clock_event_device *evt)
+{
+	struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
+
+	return hpet_resume(evt, hdev->num);
 }
 
 static int hpet_msi_next_event(unsigned long delta,
@@ -523,10 +569,14 @@
 
 	evt->rating = 110;
 	evt->features = CLOCK_EVT_FEAT_ONESHOT;
-	if (hdev->flags & HPET_DEV_PERI_CAP)
+	if (hdev->flags & HPET_DEV_PERI_CAP) {
 		evt->features |= CLOCK_EVT_FEAT_PERIODIC;
+		evt->set_state_periodic = hpet_msi_set_periodic;
+	}
 
-	evt->set_mode = hpet_msi_set_mode;
+	evt->set_state_shutdown = hpet_msi_shutdown;
+	evt->set_state_oneshot = hpet_msi_set_oneshot;
+	evt->tick_resume = hpet_msi_resume;
 	evt->set_next_event = hpet_msi_next_event;
 	evt->cpumask = cpumask_of(hdev->cpu);
 
@@ -735,7 +785,7 @@
 
 	/* Verify whether hpet counter works */
 	t1 = hpet_readl(HPET_COUNTER);
-	rdtscll(start);
+	start = rdtsc();
 
 	/*
 	 * We don't know the TSC frequency yet, but waiting for
@@ -745,7 +795,7 @@
 	 */
 	do {
 		rep_nop();
-		rdtscll(now);
+		now = rdtsc();
 	} while ((now - start) < 200000UL);
 
 	if (t1 == hpet_readl(HPET_COUNTER)) {
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 7114ba2..50a3fad 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -32,6 +32,7 @@
 #include <linux/irqflags.h>
 #include <linux/notifier.h>
 #include <linux/kallsyms.h>
+#include <linux/kprobes.h>
 #include <linux/percpu.h>
 #include <linux/kdebug.h>
 #include <linux/kernel.h>
@@ -179,7 +180,11 @@
 	va = info->address;
 	len = bp->attr.bp_len;
 
-	return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
+	/*
+	 * We don't need to worry about va + len - 1 overflowing:
+	 * we already require that va is aligned to a multiple of len.
+	 */
+	return (va >= TASK_SIZE_MAX) || ((va + len - 1) >= TASK_SIZE_MAX);
 }
 
 int arch_bp_generic_fields(int x86_len, int x86_type,
@@ -243,6 +248,20 @@
 		info->type = X86_BREAKPOINT_RW;
 		break;
 	case HW_BREAKPOINT_X:
+		/*
+		 * We don't allow kernel breakpoints in places that are not
+		 * acceptable for kprobes.  On non-kprobes kernels, we don't
+		 * allow kernel breakpoints at all.
+		 */
+		if (bp->attr.bp_addr >= TASK_SIZE_MAX) {
+#ifdef CONFIG_KPROBES
+			if (within_kprobe_blacklist(bp->attr.bp_addr))
+				return -EINVAL;
+#else
+			return -EINVAL;
+#endif
+		}
+
 		info->type = X86_BREAKPOINT_EXECUTE;
 		/*
 		 * x86 inst breakpoints need to have a specific undefined len.
@@ -276,8 +295,18 @@
 		break;
 #endif
 	default:
+		/* AMD range breakpoint */
 		if (!is_power_of_2(bp->attr.bp_len))
 			return -EINVAL;
+		if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
+			return -EINVAL;
+		/*
+		 * It's impossible to use a range breakpoint to fake out
+		 * user vs kernel detection because bp_len - 1 can't
+		 * have the high bit set.  If we ever allow range instruction
+		 * breakpoints, then we'll have to check for kprobe-blacklisted
+		 * addresses anywhere in the range.
+		 */
 		if (!cpu_has_bpext)
 			return -EOPNOTSUPP;
 		info->mask = bp->attr.bp_len - 1;
diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c
index f2b96de..efb82f0 100644
--- a/arch/x86/kernel/i8253.c
+++ b/arch/x86/kernel/i8253.c
@@ -34,7 +34,7 @@
 	  * - when local APIC timer is active (PIT is switched off)
 	  */
 	if (num_possible_cpus() > 1 || is_hpet_enabled() ||
-	    i8253_clockevent.mode != CLOCK_EVT_MODE_PERIODIC)
+	    !clockevent_state_periodic(&i8253_clockevent))
 		return 0;
 
 	return clocksource_i8253_init();
diff --git a/arch/x86/kernel/iosf_mbi.c b/arch/x86/kernel/iosf_mbi.c
deleted file mode 100644
index 82f8d02..0000000
--- a/arch/x86/kernel/iosf_mbi.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * IOSF-SB MailBox Interface Driver
- * Copyright (c) 2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- *
- * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
- * mailbox interface (MBI) to communicate with mutiple devices. This
- * driver implements access to this interface for those platforms that can
- * enumerate the device using PCI.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/debugfs.h>
-#include <linux/capability.h>
-
-#include <asm/iosf_mbi.h>
-
-#define PCI_DEVICE_ID_BAYTRAIL		0x0F00
-#define PCI_DEVICE_ID_BRASWELL		0x2280
-#define PCI_DEVICE_ID_QUARK_X1000	0x0958
-
-static DEFINE_SPINLOCK(iosf_mbi_lock);
-
-static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
-{
-	return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE;
-}
-
-static struct pci_dev *mbi_pdev;	/* one mbi device */
-
-static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
-{
-	int result;
-
-	if (!mbi_pdev)
-		return -ENODEV;
-
-	if (mcrx) {
-		result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
-						mcrx);
-		if (result < 0)
-			goto fail_read;
-	}
-
-	result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
-	if (result < 0)
-		goto fail_read;
-
-	result = pci_read_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
-	if (result < 0)
-		goto fail_read;
-
-	return 0;
-
-fail_read:
-	dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
-	return result;
-}
-
-static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
-{
-	int result;
-
-	if (!mbi_pdev)
-		return -ENODEV;
-
-	result = pci_write_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
-	if (result < 0)
-		goto fail_write;
-
-	if (mcrx) {
-		result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
-						mcrx);
-		if (result < 0)
-			goto fail_write;
-	}
-
-	result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
-	if (result < 0)
-		goto fail_write;
-
-	return 0;
-
-fail_write:
-	dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
-	return result;
-}
-
-int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
-{
-	u32 mcr, mcrx;
-	unsigned long flags;
-	int ret;
-
-	/*Access to the GFX unit is handled by GPU code */
-	if (port == BT_MBI_UNIT_GFX) {
-		WARN_ON(1);
-		return -EPERM;
-	}
-
-	mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
-	mcrx = offset & MBI_MASK_HI;
-
-	spin_lock_irqsave(&iosf_mbi_lock, flags);
-	ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
-	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(iosf_mbi_read);
-
-int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
-{
-	u32 mcr, mcrx;
-	unsigned long flags;
-	int ret;
-
-	/*Access to the GFX unit is handled by GPU code */
-	if (port == BT_MBI_UNIT_GFX) {
-		WARN_ON(1);
-		return -EPERM;
-	}
-
-	mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
-	mcrx = offset & MBI_MASK_HI;
-
-	spin_lock_irqsave(&iosf_mbi_lock, flags);
-	ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
-	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(iosf_mbi_write);
-
-int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
-{
-	u32 mcr, mcrx;
-	u32 value;
-	unsigned long flags;
-	int ret;
-
-	/*Access to the GFX unit is handled by GPU code */
-	if (port == BT_MBI_UNIT_GFX) {
-		WARN_ON(1);
-		return -EPERM;
-	}
-
-	mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
-	mcrx = offset & MBI_MASK_HI;
-
-	spin_lock_irqsave(&iosf_mbi_lock, flags);
-
-	/* Read current mdr value */
-	ret = iosf_mbi_pci_read_mdr(mcrx, mcr & MBI_RD_MASK, &value);
-	if (ret < 0) {
-		spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-		return ret;
-	}
-
-	/* Apply mask */
-	value &= ~mask;
-	mdr &= mask;
-	value |= mdr;
-
-	/* Write back */
-	ret = iosf_mbi_pci_write_mdr(mcrx, mcr | MBI_WR_MASK, value);
-
-	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-
-	return ret;
-}
-EXPORT_SYMBOL(iosf_mbi_modify);
-
-bool iosf_mbi_available(void)
-{
-	/* Mbi isn't hot-pluggable. No remove routine is provided */
-	return mbi_pdev;
-}
-EXPORT_SYMBOL(iosf_mbi_available);
-
-#ifdef CONFIG_IOSF_MBI_DEBUG
-static u32	dbg_mdr;
-static u32	dbg_mcr;
-static u32	dbg_mcrx;
-
-static int mcr_get(void *data, u64 *val)
-{
-	*val = *(u32 *)data;
-	return 0;
-}
-
-static int mcr_set(void *data, u64 val)
-{
-	u8 command = ((u32)val & 0xFF000000) >> 24,
-	   port	   = ((u32)val & 0x00FF0000) >> 16,
-	   offset  = ((u32)val & 0x0000FF00) >> 8;
-	int err;
-
-	*(u32 *)data = val;
-
-	if (!capable(CAP_SYS_RAWIO))
-		return -EACCES;
-
-	if (command & 1u)
-		err = iosf_mbi_write(port,
-			       command,
-			       dbg_mcrx | offset,
-			       dbg_mdr);
-	else
-		err = iosf_mbi_read(port,
-			      command,
-			      dbg_mcrx | offset,
-			      &dbg_mdr);
-
-	return err;
-}
-DEFINE_SIMPLE_ATTRIBUTE(iosf_mcr_fops, mcr_get, mcr_set , "%llx\n");
-
-static struct dentry *iosf_dbg;
-
-static void iosf_sideband_debug_init(void)
-{
-	struct dentry *d;
-
-	iosf_dbg = debugfs_create_dir("iosf_sb", NULL);
-	if (IS_ERR_OR_NULL(iosf_dbg))
-		return;
-
-	/* mdr */
-	d = debugfs_create_x32("mdr", 0660, iosf_dbg, &dbg_mdr);
-	if (IS_ERR_OR_NULL(d))
-		goto cleanup;
-
-	/* mcrx */
-	debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx);
-	if (IS_ERR_OR_NULL(d))
-		goto cleanup;
-
-	/* mcr - initiates mailbox tranaction */
-	debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops);
-	if (IS_ERR_OR_NULL(d))
-		goto cleanup;
-
-	return;
-
-cleanup:
-	debugfs_remove_recursive(d);
-}
-
-static void iosf_debugfs_init(void)
-{
-	iosf_sideband_debug_init();
-}
-
-static void iosf_debugfs_remove(void)
-{
-	debugfs_remove_recursive(iosf_dbg);
-}
-#else
-static inline void iosf_debugfs_init(void) { }
-static inline void iosf_debugfs_remove(void) { }
-#endif /* CONFIG_IOSF_MBI_DEBUG */
-
-static int iosf_mbi_probe(struct pci_dev *pdev,
-			  const struct pci_device_id *unused)
-{
-	int ret;
-
-	ret = pci_enable_device(pdev);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "error: could not enable device\n");
-		return ret;
-	}
-
-	mbi_pdev = pci_dev_get(pdev);
-	return 0;
-}
-
-static const struct pci_device_id iosf_mbi_pci_ids[] = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BAYTRAIL) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRASWELL) },
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_QUARK_X1000) },
-	{ 0, },
-};
-MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
-
-static struct pci_driver iosf_mbi_pci_driver = {
-	.name		= "iosf_mbi_pci",
-	.probe		= iosf_mbi_probe,
-	.id_table	= iosf_mbi_pci_ids,
-};
-
-static int __init iosf_mbi_init(void)
-{
-	iosf_debugfs_init();
-
-	return pci_register_driver(&iosf_mbi_pci_driver);
-}
-
-static void __exit iosf_mbi_exit(void)
-{
-	iosf_debugfs_remove();
-
-	pci_unregister_driver(&iosf_mbi_pci_driver);
-	if (mbi_pdev) {
-		pci_dev_put(mbi_pdev);
-		mbi_pdev = NULL;
-	}
-}
-
-module_init(iosf_mbi_init);
-module_exit(iosf_mbi_exit);
-
-MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
-MODULE_DESCRIPTION("IOSF Mailbox Interface accessor");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index c7dfe1b..f8062aa 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -139,10 +139,13 @@
 	seq_puts(p, "  Machine check polls\n");
 #endif
 #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
-	seq_printf(p, "%*s: ", prec, "HYP");
-	for_each_online_cpu(j)
-		seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
-	seq_puts(p, "  Hypervisor callback interrupts\n");
+	if (test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors)) {
+		seq_printf(p, "%*s: ", prec, "HYP");
+		for_each_online_cpu(j)
+			seq_printf(p, "%10u ",
+				   irq_stats(j)->irq_hv_callback_count);
+		seq_puts(p, "  Hypervisor callback interrupts\n");
+	}
 #endif
 	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
 #if defined(CONFIG_X86_IO_APIC)
@@ -211,24 +214,38 @@
 __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
-
+	struct irq_desc * desc;
 	/* high bit used in ret_from_ code  */
 	unsigned vector = ~regs->orig_ax;
-	unsigned irq;
+
+	/*
+	 * NB: Unlike exception entries, IRQ entries do not reliably
+	 * handle context tracking in the low-level entry code.  This is
+	 * because syscall entries execute briefly with IRQs on before
+	 * updating context tracking state, so we can take an IRQ from
+	 * kernel mode with CONTEXT_USER.  The low-level entry code only
+	 * updates the context if we came from user mode, so we won't
+	 * switch to CONTEXT_KERNEL.  We'll fix that once the syscall
+	 * code is cleaned up enough that we can cleanly defer enabling
+	 * IRQs.
+	 */
 
 	entering_irq();
 
-	irq = __this_cpu_read(vector_irq[vector]);
+	/* entering_irq() tells RCU that we're not quiescent.  Check it. */
+	RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
 
-	if (!handle_irq(irq, regs)) {
+	desc = __this_cpu_read(vector_irq[vector]);
+
+	if (!handle_irq(desc, regs)) {
 		ack_APIC_irq();
 
-		if (irq != VECTOR_RETRIGGERED) {
-			pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n",
+		if (desc != VECTOR_RETRIGGERED) {
+			pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
 					     __func__, smp_processor_id(),
-					     vector, irq);
+					     vector);
 		} else {
-			__this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
+			__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
 		}
 	}
 
@@ -330,10 +347,10 @@
  */
 int check_irq_vectors_for_cpu_disable(void)
 {
-	int irq, cpu;
 	unsigned int this_cpu, vector, this_count, count;
 	struct irq_desc *desc;
 	struct irq_data *data;
+	int cpu;
 
 	this_cpu = smp_processor_id();
 	cpumask_copy(&online_new, cpu_online_mask);
@@ -341,47 +358,43 @@
 
 	this_count = 0;
 	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
-		irq = __this_cpu_read(vector_irq[vector]);
-		if (irq >= 0) {
-			desc = irq_to_desc(irq);
-			if (!desc)
-				continue;
+		desc = __this_cpu_read(vector_irq[vector]);
+		if (IS_ERR_OR_NULL(desc))
+			continue;
+		/*
+		 * Protect against concurrent action removal, affinity
+		 * changes etc.
+		 */
+		raw_spin_lock(&desc->lock);
+		data = irq_desc_get_irq_data(desc);
+		cpumask_copy(&affinity_new,
+			     irq_data_get_affinity_mask(data));
+		cpumask_clear_cpu(this_cpu, &affinity_new);
 
-			/*
-			 * Protect against concurrent action removal,
-			 * affinity changes etc.
-			 */
-			raw_spin_lock(&desc->lock);
-			data = irq_desc_get_irq_data(desc);
-			cpumask_copy(&affinity_new, data->affinity);
-			cpumask_clear_cpu(this_cpu, &affinity_new);
-
-			/* Do not count inactive or per-cpu irqs. */
-			if (!irq_has_action(irq) || irqd_is_per_cpu(data)) {
-				raw_spin_unlock(&desc->lock);
-				continue;
-			}
-
+		/* Do not count inactive or per-cpu irqs. */
+		if (!irq_desc_has_action(desc) || irqd_is_per_cpu(data)) {
 			raw_spin_unlock(&desc->lock);
-			/*
-			 * A single irq may be mapped to multiple
-			 * cpu's vector_irq[] (for example IOAPIC cluster
-			 * mode).  In this case we have two
-			 * possibilities:
-			 *
-			 * 1) the resulting affinity mask is empty; that is
-			 * this the down'd cpu is the last cpu in the irq's
-			 * affinity mask, or
-			 *
-			 * 2) the resulting affinity mask is no longer
-			 * a subset of the online cpus but the affinity
-			 * mask is not zero; that is the down'd cpu is the
-			 * last online cpu in a user set affinity mask.
-			 */
-			if (cpumask_empty(&affinity_new) ||
-			    !cpumask_subset(&affinity_new, &online_new))
-				this_count++;
+			continue;
 		}
+
+		raw_spin_unlock(&desc->lock);
+		/*
+		 * A single irq may be mapped to multiple cpu's
+		 * vector_irq[] (for example IOAPIC cluster mode).  In
+		 * this case we have two possibilities:
+		 *
+		 * 1) the resulting affinity mask is empty; that is
+		 * this the down'd cpu is the last cpu in the irq's
+		 * affinity mask, or
+		 *
+		 * 2) the resulting affinity mask is no longer a
+		 * subset of the online cpus but the affinity mask is
+		 * not zero; that is the down'd cpu is the last online
+		 * cpu in a user set affinity mask.
+		 */
+		if (cpumask_empty(&affinity_new) ||
+		    !cpumask_subset(&affinity_new, &online_new))
+			this_count++;
 	}
 
 	count = 0;
@@ -400,8 +413,8 @@
 		for (vector = FIRST_EXTERNAL_VECTOR;
 		     vector < first_system_vector; vector++) {
 			if (!test_bit(vector, used_vectors) &&
-			    per_cpu(vector_irq, cpu)[vector] < 0)
-					count++;
+			    IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
+			    count++;
 		}
 	}
 
@@ -437,7 +450,7 @@
 		raw_spin_lock(&desc->lock);
 
 		data = irq_desc_get_irq_data(desc);
-		affinity = data->affinity;
+		affinity = irq_data_get_affinity_mask(data);
 		if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
 		    cpumask_subset(affinity, cpu_online_mask)) {
 			raw_spin_unlock(&desc->lock);
@@ -505,14 +518,13 @@
 	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
 		unsigned int irr;
 
-		if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED)
+		if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
 			continue;
 
 		irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
 		if (irr  & (1 << (vector % 32))) {
-			irq = __this_cpu_read(vector_irq[vector]);
+			desc = __this_cpu_read(vector_irq[vector]);
 
-			desc = irq_to_desc(irq);
 			raw_spin_lock(&desc->lock);
 			data = irq_desc_get_irq_data(desc);
 			chip = irq_data_get_irq_chip(data);
@@ -523,7 +535,7 @@
 			raw_spin_unlock(&desc->lock);
 		}
 		if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
-			__this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
+			__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
 	}
 }
 #endif
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index cd74f59..c80cf66 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -148,21 +148,21 @@
 	call_on_stack(__do_softirq, isp);
 }
 
-bool handle_irq(unsigned irq, struct pt_regs *regs)
+bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
 {
-	struct irq_desc *desc;
+	unsigned int irq;
 	int overflow;
 
 	overflow = check_stack_overflow();
 
-	desc = irq_to_desc(irq);
-	if (unlikely(!desc))
+	if (IS_ERR_OR_NULL(desc))
 		return false;
 
+	irq = irq_desc_get_irq(desc);
 	if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
 		if (unlikely(overflow))
 			print_stack_overflow();
-		desc->handle_irq(irq, desc);
+		generic_handle_irq_desc(irq, desc);
 	}
 
 	return true;
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index bc4604e..ff16ccb 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -68,16 +68,13 @@
 #endif
 }
 
-bool handle_irq(unsigned irq, struct pt_regs *regs)
+bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
 {
-	struct irq_desc *desc;
-
 	stack_overflow_check(regs);
 
-	desc = irq_to_desc(irq);
-	if (unlikely(!desc))
+	if (unlikely(IS_ERR_OR_NULL(desc)))
 		return false;
 
-	generic_handle_irq_desc(irq, desc);
+	generic_handle_irq_desc(irq_desc_get_irq(desc), desc);
 	return true;
 }
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index a3a5e15..1423ab1 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -52,7 +52,7 @@
 };
 
 DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
-	[0 ... NR_VECTORS - 1] = VECTOR_UNDEFINED,
+	[0 ... NR_VECTORS - 1] = VECTOR_UNUSED,
 };
 
 int vector_used_by_percpu_irq(unsigned int vector)
@@ -60,7 +60,7 @@
 	int cpu;
 
 	for_each_online_cpu(cpu) {
-		if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNDEFINED)
+		if (!IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
 			return 1;
 	}
 
@@ -94,7 +94,7 @@
 	 * irq's migrate etc.
 	 */
 	for (i = 0; i < nr_legacy_irqs(); i++)
-		per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = i;
+		per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);
 
 	x86_init.irqs.intr_init();
 }
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 26d5a55..e565e0e 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -45,7 +45,7 @@
 	const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
 	const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
 
-	if (type == JUMP_LABEL_ENABLE) {
+	if (type == JUMP_LABEL_JMP) {
 		if (init) {
 			/*
 			 * Jump label is enabled for the first time.
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index ca83f7ac..961e51e 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -223,9 +223,6 @@
 	memset(&params->hd0_info, 0, sizeof(params->hd0_info));
 	memset(&params->hd1_info, 0, sizeof(params->hd1_info));
 
-	/* Default sysdesc table */
-	params->sys_desc_table.length = 0;
-
 	if (image->type == KEXEC_TYPE_CRASH) {
 		ret = crash_setup_memmap_entries(image, params);
 		if (ret)
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index d05bd2e..697f90d 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -110,7 +110,7 @@
 		a->handler, whole_msecs, decimal_msecs);
 }
 
-static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
+static int nmi_handle(unsigned int type, struct pt_regs *regs)
 {
 	struct nmi_desc *desc = nmi_to_desc(type);
 	struct nmiaction *a;
@@ -213,7 +213,7 @@
 pci_serr_error(unsigned char reason, struct pt_regs *regs)
 {
 	/* check to see if anyone registered against these types of errors */
-	if (nmi_handle(NMI_SERR, regs, false))
+	if (nmi_handle(NMI_SERR, regs))
 		return;
 
 	pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
@@ -247,7 +247,7 @@
 	unsigned long i;
 
 	/* check to see if anyone registered against these types of errors */
-	if (nmi_handle(NMI_IO_CHECK, regs, false))
+	if (nmi_handle(NMI_IO_CHECK, regs))
 		return;
 
 	pr_emerg(
@@ -284,7 +284,7 @@
 	 * as only the first one is ever run (unless it can actually determine
 	 * if it caused the NMI)
 	 */
-	handled = nmi_handle(NMI_UNKNOWN, regs, false);
+	handled = nmi_handle(NMI_UNKNOWN, regs);
 	if (handled) {
 		__this_cpu_add(nmi_stats.unknown, handled);
 		return;
@@ -332,7 +332,7 @@
 
 	__this_cpu_write(last_nmi_rip, regs->ip);
 
-	handled = nmi_handle(NMI_LOCAL, regs, b2b);
+	handled = nmi_handle(NMI_LOCAL, regs);
 	__this_cpu_add(nmi_stats.normal, handled);
 	if (handled) {
 		/*
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 58bcfb6..f68e48f 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -351,9 +351,7 @@
 	.wbinvd = native_wbinvd,
 	.read_msr = native_read_msr_safe,
 	.write_msr = native_write_msr_safe,
-	.read_tsc = native_read_tsc,
 	.read_pmc = native_read_pmc,
-	.read_tscp = native_read_tscp,
 	.load_tr_desc = native_load_tr_desc,
 	.set_ldt = native_set_ldt,
 	.load_gdt = native_load_gdt,
diff --git a/arch/x86/kernel/paravirt_patch_32.c b/arch/x86/kernel/paravirt_patch_32.c
index e1b0136..c89f50a 100644
--- a/arch/x86/kernel/paravirt_patch_32.c
+++ b/arch/x86/kernel/paravirt_patch_32.c
@@ -10,7 +10,6 @@
 DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
 DEF_NATIVE(pv_cpu_ops, clts, "clts");
-DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
 
 #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
 DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
@@ -52,7 +51,6 @@
 		PATCH_SITE(pv_mmu_ops, read_cr3);
 		PATCH_SITE(pv_mmu_ops, write_cr3);
 		PATCH_SITE(pv_cpu_ops, clts);
-		PATCH_SITE(pv_cpu_ops, read_tsc);
 #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
 		case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
 			if (pv_is_native_spin_unlock()) {
diff --git a/arch/x86/kernel/pmc_atom.c b/arch/x86/kernel/pmc_atom.c
deleted file mode 100644
index d66a4fe..0000000
--- a/arch/x86/kernel/pmc_atom.c
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
- * Intel Atom SOC Power Management Controller Driver
- * Copyright (c) 2014, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/io.h>
-
-#include <asm/pmc_atom.h>
-
-struct pmc_dev {
-	u32 base_addr;
-	void __iomem *regmap;
-#ifdef CONFIG_DEBUG_FS
-	struct dentry *dbgfs_dir;
-#endif /* CONFIG_DEBUG_FS */
-};
-
-static struct pmc_dev pmc_device;
-static u32 acpi_base_addr;
-
-struct pmc_bit_map {
-	const char *name;
-	u32 bit_mask;
-};
-
-static const struct pmc_bit_map dev_map[] = {
-	{"0  - LPSS1_F0_DMA",		BIT_LPSS1_F0_DMA},
-	{"1  - LPSS1_F1_PWM1",		BIT_LPSS1_F1_PWM1},
-	{"2  - LPSS1_F2_PWM2",		BIT_LPSS1_F2_PWM2},
-	{"3  - LPSS1_F3_HSUART1",	BIT_LPSS1_F3_HSUART1},
-	{"4  - LPSS1_F4_HSUART2",	BIT_LPSS1_F4_HSUART2},
-	{"5  - LPSS1_F5_SPI",		BIT_LPSS1_F5_SPI},
-	{"6  - LPSS1_F6_Reserved",	BIT_LPSS1_F6_XXX},
-	{"7  - LPSS1_F7_Reserved",	BIT_LPSS1_F7_XXX},
-	{"8  - SCC_EMMC",		BIT_SCC_EMMC},
-	{"9  - SCC_SDIO",		BIT_SCC_SDIO},
-	{"10 - SCC_SDCARD",		BIT_SCC_SDCARD},
-	{"11 - SCC_MIPI",		BIT_SCC_MIPI},
-	{"12 - HDA",			BIT_HDA},
-	{"13 - LPE",			BIT_LPE},
-	{"14 - OTG",			BIT_OTG},
-	{"15 - USH",			BIT_USH},
-	{"16 - GBE",			BIT_GBE},
-	{"17 - SATA",			BIT_SATA},
-	{"18 - USB_EHCI",		BIT_USB_EHCI},
-	{"19 - SEC",			BIT_SEC},
-	{"20 - PCIE_PORT0",		BIT_PCIE_PORT0},
-	{"21 - PCIE_PORT1",		BIT_PCIE_PORT1},
-	{"22 - PCIE_PORT2",		BIT_PCIE_PORT2},
-	{"23 - PCIE_PORT3",		BIT_PCIE_PORT3},
-	{"24 - LPSS2_F0_DMA",		BIT_LPSS2_F0_DMA},
-	{"25 - LPSS2_F1_I2C1",		BIT_LPSS2_F1_I2C1},
-	{"26 - LPSS2_F2_I2C2",		BIT_LPSS2_F2_I2C2},
-	{"27 - LPSS2_F3_I2C3",		BIT_LPSS2_F3_I2C3},
-	{"28 - LPSS2_F3_I2C4",		BIT_LPSS2_F4_I2C4},
-	{"29 - LPSS2_F5_I2C5",		BIT_LPSS2_F5_I2C5},
-	{"30 - LPSS2_F6_I2C6",		BIT_LPSS2_F6_I2C6},
-	{"31 - LPSS2_F7_I2C7",		BIT_LPSS2_F7_I2C7},
-	{"32 - SMB",			BIT_SMB},
-	{"33 - OTG_SS_PHY",		BIT_OTG_SS_PHY},
-	{"34 - USH_SS_PHY",		BIT_USH_SS_PHY},
-	{"35 - DFX",			BIT_DFX},
-};
-
-static const struct pmc_bit_map pss_map[] = {
-	{"0  - GBE",			PMC_PSS_BIT_GBE},
-	{"1  - SATA",			PMC_PSS_BIT_SATA},
-	{"2  - HDA",			PMC_PSS_BIT_HDA},
-	{"3  - SEC",			PMC_PSS_BIT_SEC},
-	{"4  - PCIE",			PMC_PSS_BIT_PCIE},
-	{"5  - LPSS",			PMC_PSS_BIT_LPSS},
-	{"6  - LPE",			PMC_PSS_BIT_LPE},
-	{"7  - DFX",			PMC_PSS_BIT_DFX},
-	{"8  - USH_CTRL",		PMC_PSS_BIT_USH_CTRL},
-	{"9  - USH_SUS",		PMC_PSS_BIT_USH_SUS},
-	{"10 - USH_VCCS",		PMC_PSS_BIT_USH_VCCS},
-	{"11 - USH_VCCA",		PMC_PSS_BIT_USH_VCCA},
-	{"12 - OTG_CTRL",		PMC_PSS_BIT_OTG_CTRL},
-	{"13 - OTG_VCCS",		PMC_PSS_BIT_OTG_VCCS},
-	{"14 - OTG_VCCA_CLK",		PMC_PSS_BIT_OTG_VCCA_CLK},
-	{"15 - OTG_VCCA",		PMC_PSS_BIT_OTG_VCCA},
-	{"16 - USB",			PMC_PSS_BIT_USB},
-	{"17 - USB_SUS",		PMC_PSS_BIT_USB_SUS},
-};
-
-static inline u32 pmc_reg_read(struct pmc_dev *pmc, int reg_offset)
-{
-	return readl(pmc->regmap + reg_offset);
-}
-
-static inline void pmc_reg_write(struct pmc_dev *pmc, int reg_offset, u32 val)
-{
-	writel(val, pmc->regmap + reg_offset);
-}
-
-static void pmc_power_off(void)
-{
-	u16	pm1_cnt_port;
-	u32	pm1_cnt_value;
-
-	pr_info("Preparing to enter system sleep state S5\n");
-
-	pm1_cnt_port = acpi_base_addr + PM1_CNT;
-
-	pm1_cnt_value = inl(pm1_cnt_port);
-	pm1_cnt_value &= SLEEP_TYPE_MASK;
-	pm1_cnt_value |= SLEEP_TYPE_S5;
-	pm1_cnt_value |= SLEEP_ENABLE;
-
-	outl(pm1_cnt_value, pm1_cnt_port);
-}
-
-static void pmc_hw_reg_setup(struct pmc_dev *pmc)
-{
-	/*
-	 * Disable PMC S0IX_WAKE_EN events coming from:
-	 * - LPC clock run
-	 * - GPIO_SUS ored dedicated IRQs
-	 * - GPIO_SCORE ored dedicated IRQs
-	 * - GPIO_SUS shared IRQ
-	 * - GPIO_SCORE shared IRQ
-	 */
-	pmc_reg_write(pmc, PMC_S0IX_WAKE_EN, (u32)PMC_WAKE_EN_SETTING);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int pmc_dev_state_show(struct seq_file *s, void *unused)
-{
-	struct pmc_dev *pmc = s->private;
-	u32 func_dis, func_dis_2, func_dis_index;
-	u32 d3_sts_0, d3_sts_1, d3_sts_index;
-	int dev_num, dev_index, reg_index;
-
-	func_dis = pmc_reg_read(pmc, PMC_FUNC_DIS);
-	func_dis_2 = pmc_reg_read(pmc, PMC_FUNC_DIS_2);
-	d3_sts_0 = pmc_reg_read(pmc, PMC_D3_STS_0);
-	d3_sts_1 = pmc_reg_read(pmc, PMC_D3_STS_1);
-
-	dev_num = ARRAY_SIZE(dev_map);
-
-	for (dev_index = 0; dev_index < dev_num; dev_index++) {
-		reg_index = dev_index / PMC_REG_BIT_WIDTH;
-		if (reg_index) {
-			func_dis_index = func_dis_2;
-			d3_sts_index = d3_sts_1;
-		} else {
-			func_dis_index = func_dis;
-			d3_sts_index = d3_sts_0;
-		}
-
-		seq_printf(s, "Dev: %-32s\tState: %s [%s]\n",
-			dev_map[dev_index].name,
-			dev_map[dev_index].bit_mask & func_dis_index ?
-			"Disabled" : "Enabled ",
-			dev_map[dev_index].bit_mask & d3_sts_index ?
-			"D3" : "D0");
-	}
-	return 0;
-}
-
-static int pmc_dev_state_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, pmc_dev_state_show, inode->i_private);
-}
-
-static const struct file_operations pmc_dev_state_ops = {
-	.open		= pmc_dev_state_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static int pmc_pss_state_show(struct seq_file *s, void *unused)
-{
-	struct pmc_dev *pmc = s->private;
-	u32 pss = pmc_reg_read(pmc, PMC_PSS);
-	int pss_index;
-
-	for (pss_index = 0; pss_index < ARRAY_SIZE(pss_map); pss_index++) {
-		seq_printf(s, "Island: %-32s\tState: %s\n",
-			pss_map[pss_index].name,
-			pss_map[pss_index].bit_mask & pss ? "Off" : "On");
-	}
-	return 0;
-}
-
-static int pmc_pss_state_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, pmc_pss_state_show, inode->i_private);
-}
-
-static const struct file_operations pmc_pss_state_ops = {
-	.open		= pmc_pss_state_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static int pmc_sleep_tmr_show(struct seq_file *s, void *unused)
-{
-	struct pmc_dev *pmc = s->private;
-	u64 s0ir_tmr, s0i1_tmr, s0i2_tmr, s0i3_tmr, s0_tmr;
-
-	s0ir_tmr = (u64)pmc_reg_read(pmc, PMC_S0IR_TMR) << PMC_TMR_SHIFT;
-	s0i1_tmr = (u64)pmc_reg_read(pmc, PMC_S0I1_TMR) << PMC_TMR_SHIFT;
-	s0i2_tmr = (u64)pmc_reg_read(pmc, PMC_S0I2_TMR) << PMC_TMR_SHIFT;
-	s0i3_tmr = (u64)pmc_reg_read(pmc, PMC_S0I3_TMR) << PMC_TMR_SHIFT;
-	s0_tmr = (u64)pmc_reg_read(pmc, PMC_S0_TMR) << PMC_TMR_SHIFT;
-
-	seq_printf(s, "S0IR Residency:\t%lldus\n", s0ir_tmr);
-	seq_printf(s, "S0I1 Residency:\t%lldus\n", s0i1_tmr);
-	seq_printf(s, "S0I2 Residency:\t%lldus\n", s0i2_tmr);
-	seq_printf(s, "S0I3 Residency:\t%lldus\n", s0i3_tmr);
-	seq_printf(s, "S0   Residency:\t%lldus\n", s0_tmr);
-	return 0;
-}
-
-static int pmc_sleep_tmr_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, pmc_sleep_tmr_show, inode->i_private);
-}
-
-static const struct file_operations pmc_sleep_tmr_ops = {
-	.open		= pmc_sleep_tmr_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-static void pmc_dbgfs_unregister(struct pmc_dev *pmc)
-{
-	debugfs_remove_recursive(pmc->dbgfs_dir);
-}
-
-static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev)
-{
-	struct dentry *dir, *f;
-
-	dir = debugfs_create_dir("pmc_atom", NULL);
-	if (!dir)
-		return -ENOMEM;
-
-	pmc->dbgfs_dir = dir;
-
-	f = debugfs_create_file("dev_state", S_IFREG | S_IRUGO,
-				dir, pmc, &pmc_dev_state_ops);
-	if (!f) {
-		dev_err(&pdev->dev, "dev_state register failed\n");
-		goto err;
-	}
-
-	f = debugfs_create_file("pss_state", S_IFREG | S_IRUGO,
-				dir, pmc, &pmc_pss_state_ops);
-	if (!f) {
-		dev_err(&pdev->dev, "pss_state register failed\n");
-		goto err;
-	}
-
-	f = debugfs_create_file("sleep_state", S_IFREG | S_IRUGO,
-				dir, pmc, &pmc_sleep_tmr_ops);
-	if (!f) {
-		dev_err(&pdev->dev, "sleep_state register failed\n");
-		goto err;
-	}
-
-	return 0;
-err:
-	pmc_dbgfs_unregister(pmc);
-	return -ENODEV;
-}
-#else
-static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev)
-{
-	return 0;
-}
-#endif /* CONFIG_DEBUG_FS */
-
-static int pmc_setup_dev(struct pci_dev *pdev)
-{
-	struct pmc_dev *pmc = &pmc_device;
-	int ret;
-
-	/* Obtain ACPI base address */
-	pci_read_config_dword(pdev, ACPI_BASE_ADDR_OFFSET, &acpi_base_addr);
-	acpi_base_addr &= ACPI_BASE_ADDR_MASK;
-
-	/* Install power off function */
-	if (acpi_base_addr != 0 && pm_power_off == NULL)
-		pm_power_off = pmc_power_off;
-
-	pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr);
-	pmc->base_addr &= PMC_BASE_ADDR_MASK;
-
-	pmc->regmap = ioremap_nocache(pmc->base_addr, PMC_MMIO_REG_LEN);
-	if (!pmc->regmap) {
-		dev_err(&pdev->dev, "error: ioremap failed\n");
-		return -ENOMEM;
-	}
-
-	/* PMC hardware registers setup */
-	pmc_hw_reg_setup(pmc);
-
-	ret = pmc_dbgfs_register(pmc, pdev);
-	if (ret) {
-		iounmap(pmc->regmap);
-	}
-
-	return ret;
-}
-
-/*
- * Data for PCI driver interface
- *
- * This data only exists for exporting the supported
- * PCI ids via MODULE_DEVICE_TABLE.  We do not actually
- * register a pci_driver, because lpc_ich will register
- * a driver on the same PCI id.
- */
-static const struct pci_device_id pmc_pci_ids[] = {
-	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_VLV_PMC) },
-	{ 0, },
-};
-
-MODULE_DEVICE_TABLE(pci, pmc_pci_ids);
-
-static int __init pmc_atom_init(void)
-{
-	struct pci_dev *pdev = NULL;
-	const struct pci_device_id *ent;
-
-	/* We look for our device - PCU PMC
-	 * we assume that there is max. one device.
-	 *
-	 * We can't use plain pci_driver mechanism,
-	 * as the device is really a multiple function device,
-	 * main driver that binds to the pci_device is lpc_ich
-	 * and have to find & bind to the device this way.
-	 */
-	for_each_pci_dev(pdev) {
-		ent = pci_match_id(pmc_pci_ids, pdev);
-		if (ent)
-			return pmc_setup_dev(pdev);
-	}
-	/* Device not found. */
-	return -ENODEV;
-}
-
-module_init(pmc_atom_init);
-/* no module_exit, this driver shouldn't be unloaded */
-
-MODULE_AUTHOR("Aubrey Li <aubrey.li@linux.intel.com>");
-MODULE_DESCRIPTION("Intel Atom SOC Power Management Controller Interface");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c27cad7..6d0e62a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -29,6 +29,8 @@
 #include <asm/debugreg.h>
 #include <asm/nmi.h>
 #include <asm/tlbflush.h>
+#include <asm/mce.h>
+#include <asm/vm86.h>
 
 /*
  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -110,6 +112,8 @@
 		kfree(bp);
 	}
 
+	free_vm86(t);
+
 	fpu__drop(fpu);
 }
 
@@ -319,6 +323,7 @@
 	 */
 	set_cpu_online(smp_processor_id(), false);
 	disable_local_APIC();
+	mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
 
 	for (;;)
 		halt();
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index f73c962..c13df2c 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -53,6 +53,7 @@
 #include <asm/syscalls.h>
 #include <asm/debugreg.h>
 #include <asm/switch_to.h>
+#include <asm/vm86.h>
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index f6b9163..3c1bbcf 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -121,6 +121,7 @@
 void release_thread(struct task_struct *dead_task)
 {
 	if (dead_task->mm) {
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
 		if (dead_task->mm->context.ldt) {
 			pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
 				dead_task->comm,
@@ -128,6 +129,7 @@
 				dead_task->mm->context.ldt->size);
 			BUG();
 		}
+#endif
 	}
 }
 
@@ -248,8 +250,8 @@
 			    __USER_CS, __USER_DS, 0);
 }
 
-#ifdef CONFIG_IA32_EMULATION
-void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
+#ifdef CONFIG_COMPAT
+void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
 {
 	start_thread_common(regs, new_ip, new_sp,
 			    test_thread_flag(TIF_X32)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 9be72bc..558f50e 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -37,12 +37,10 @@
 #include <asm/proto.h>
 #include <asm/hw_breakpoint.h>
 #include <asm/traps.h>
+#include <asm/syscall.h>
 
 #include "tls.h"
 
-#define CREATE_TRACE_POINTS
-#include <trace/events/syscalls.h>
-
 enum x86_regset {
 	REGSET_GENERAL,
 	REGSET_FP,
@@ -1123,6 +1121,73 @@
 	return ret;
 }
 
+static long ia32_arch_ptrace(struct task_struct *child, compat_long_t request,
+			     compat_ulong_t caddr, compat_ulong_t cdata)
+{
+	unsigned long addr = caddr;
+	unsigned long data = cdata;
+	void __user *datap = compat_ptr(data);
+	int ret;
+	__u32 val;
+
+	switch (request) {
+	case PTRACE_PEEKUSR:
+		ret = getreg32(child, addr, &val);
+		if (ret == 0)
+			ret = put_user(val, (__u32 __user *)datap);
+		break;
+
+	case PTRACE_POKEUSR:
+		ret = putreg32(child, addr, data);
+		break;
+
+	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
+		return copy_regset_to_user(child, &user_x86_32_view,
+					   REGSET_GENERAL,
+					   0, sizeof(struct user_regs_struct32),
+					   datap);
+
+	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
+		return copy_regset_from_user(child, &user_x86_32_view,
+					     REGSET_GENERAL, 0,
+					     sizeof(struct user_regs_struct32),
+					     datap);
+
+	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
+		return copy_regset_to_user(child, &user_x86_32_view,
+					   REGSET_FP, 0,
+					   sizeof(struct user_i387_ia32_struct),
+					   datap);
+
+	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
+		return copy_regset_from_user(
+			child, &user_x86_32_view, REGSET_FP,
+			0, sizeof(struct user_i387_ia32_struct), datap);
+
+	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
+		return copy_regset_to_user(child, &user_x86_32_view,
+					   REGSET_XFP, 0,
+					   sizeof(struct user32_fxsr_struct),
+					   datap);
+
+	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
+		return copy_regset_from_user(child, &user_x86_32_view,
+					     REGSET_XFP, 0,
+					     sizeof(struct user32_fxsr_struct),
+					     datap);
+
+	case PTRACE_GET_THREAD_AREA:
+	case PTRACE_SET_THREAD_AREA:
+		return arch_ptrace(child, request, addr, data);
+
+	default:
+		return compat_ptrace_request(child, request, addr, data);
+	}
+
+	return ret;
+}
+#endif /* CONFIG_IA32_EMULATION */
+
 #ifdef CONFIG_X86_X32_ABI
 static long x32_arch_ptrace(struct task_struct *child,
 			    compat_long_t request, compat_ulong_t caddr,
@@ -1211,78 +1276,21 @@
 }
 #endif
 
+#ifdef CONFIG_COMPAT
 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 			compat_ulong_t caddr, compat_ulong_t cdata)
 {
-	unsigned long addr = caddr;
-	unsigned long data = cdata;
-	void __user *datap = compat_ptr(data);
-	int ret;
-	__u32 val;
-
 #ifdef CONFIG_X86_X32_ABI
 	if (!is_ia32_task())
 		return x32_arch_ptrace(child, request, caddr, cdata);
 #endif
-
-	switch (request) {
-	case PTRACE_PEEKUSR:
-		ret = getreg32(child, addr, &val);
-		if (ret == 0)
-			ret = put_user(val, (__u32 __user *)datap);
-		break;
-
-	case PTRACE_POKEUSR:
-		ret = putreg32(child, addr, data);
-		break;
-
-	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
-		return copy_regset_to_user(child, &user_x86_32_view,
-					   REGSET_GENERAL,
-					   0, sizeof(struct user_regs_struct32),
-					   datap);
-
-	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
-		return copy_regset_from_user(child, &user_x86_32_view,
-					     REGSET_GENERAL, 0,
-					     sizeof(struct user_regs_struct32),
-					     datap);
-
-	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
-		return copy_regset_to_user(child, &user_x86_32_view,
-					   REGSET_FP, 0,
-					   sizeof(struct user_i387_ia32_struct),
-					   datap);
-
-	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
-		return copy_regset_from_user(
-			child, &user_x86_32_view, REGSET_FP,
-			0, sizeof(struct user_i387_ia32_struct), datap);
-
-	case PTRACE_GETFPXREGS:	/* Get the child extended FPU state. */
-		return copy_regset_to_user(child, &user_x86_32_view,
-					   REGSET_XFP, 0,
-					   sizeof(struct user32_fxsr_struct),
-					   datap);
-
-	case PTRACE_SETFPXREGS:	/* Set the child extended FPU state. */
-		return copy_regset_from_user(child, &user_x86_32_view,
-					     REGSET_XFP, 0,
-					     sizeof(struct user32_fxsr_struct),
-					     datap);
-
-	case PTRACE_GET_THREAD_AREA:
-	case PTRACE_SET_THREAD_AREA:
-		return arch_ptrace(child, request, addr, data);
-
-	default:
-		return compat_ptrace_request(child, request, addr, data);
-	}
-
-	return ret;
+#ifdef CONFIG_IA32_EMULATION
+	return ia32_arch_ptrace(child, request, caddr, cdata);
+#else
+	return 0;
+#endif
 }
-
-#endif	/* CONFIG_IA32_EMULATION */
+#endif	/* CONFIG_COMPAT */
 
 #ifdef CONFIG_X86_64
 
@@ -1434,201 +1442,3 @@
 	/* Send us the fake SIGTRAP */
 	force_sig_info(SIGTRAP, &info, tsk);
 }
-
-static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
-{
-#ifdef CONFIG_X86_64
-	if (arch == AUDIT_ARCH_X86_64) {
-		audit_syscall_entry(regs->orig_ax, regs->di,
-				    regs->si, regs->dx, regs->r10);
-	} else
-#endif
-	{
-		audit_syscall_entry(regs->orig_ax, regs->bx,
-				    regs->cx, regs->dx, regs->si);
-	}
-}
-
-/*
- * We can return 0 to resume the syscall or anything else to go to phase
- * 2.  If we resume the syscall, we need to put something appropriate in
- * regs->orig_ax.
- *
- * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax
- * are fully functional.
- *
- * For phase 2's benefit, our return value is:
- * 0:			resume the syscall
- * 1:			go to phase 2; no seccomp phase 2 needed
- * anything else:	go to phase 2; pass return value to seccomp
- */
-unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
-{
-	unsigned long ret = 0;
-	u32 work;
-
-	BUG_ON(regs != task_pt_regs(current));
-
-	work = ACCESS_ONCE(current_thread_info()->flags) &
-		_TIF_WORK_SYSCALL_ENTRY;
-
-	/*
-	 * If TIF_NOHZ is set, we are required to call user_exit() before
-	 * doing anything that could touch RCU.
-	 */
-	if (work & _TIF_NOHZ) {
-		user_exit();
-		work &= ~_TIF_NOHZ;
-	}
-
-#ifdef CONFIG_SECCOMP
-	/*
-	 * Do seccomp first -- it should minimize exposure of other
-	 * code, and keeping seccomp fast is probably more valuable
-	 * than the rest of this.
-	 */
-	if (work & _TIF_SECCOMP) {
-		struct seccomp_data sd;
-
-		sd.arch = arch;
-		sd.nr = regs->orig_ax;
-		sd.instruction_pointer = regs->ip;
-#ifdef CONFIG_X86_64
-		if (arch == AUDIT_ARCH_X86_64) {
-			sd.args[0] = regs->di;
-			sd.args[1] = regs->si;
-			sd.args[2] = regs->dx;
-			sd.args[3] = regs->r10;
-			sd.args[4] = regs->r8;
-			sd.args[5] = regs->r9;
-		} else
-#endif
-		{
-			sd.args[0] = regs->bx;
-			sd.args[1] = regs->cx;
-			sd.args[2] = regs->dx;
-			sd.args[3] = regs->si;
-			sd.args[4] = regs->di;
-			sd.args[5] = regs->bp;
-		}
-
-		BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0);
-		BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1);
-
-		ret = seccomp_phase1(&sd);
-		if (ret == SECCOMP_PHASE1_SKIP) {
-			regs->orig_ax = -1;
-			ret = 0;
-		} else if (ret != SECCOMP_PHASE1_OK) {
-			return ret;  /* Go directly to phase 2 */
-		}
-
-		work &= ~_TIF_SECCOMP;
-	}
-#endif
-
-	/* Do our best to finish without phase 2. */
-	if (work == 0)
-		return ret;  /* seccomp and/or nohz only (ret == 0 here) */
-
-#ifdef CONFIG_AUDITSYSCALL
-	if (work == _TIF_SYSCALL_AUDIT) {
-		/*
-		 * If there is no more work to be done except auditing,
-		 * then audit in phase 1.  Phase 2 always audits, so, if
-		 * we audit here, then we can't go on to phase 2.
-		 */
-		do_audit_syscall_entry(regs, arch);
-		return 0;
-	}
-#endif
-
-	return 1;  /* Something is enabled that we can't handle in phase 1 */
-}
-
-/* Returns the syscall nr to run (which should match regs->orig_ax). */
-long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
-				unsigned long phase1_result)
-{
-	long ret = 0;
-	u32 work = ACCESS_ONCE(current_thread_info()->flags) &
-		_TIF_WORK_SYSCALL_ENTRY;
-
-	BUG_ON(regs != task_pt_regs(current));
-
-	/*
-	 * If we stepped into a sysenter/syscall insn, it trapped in
-	 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
-	 * If user-mode had set TF itself, then it's still clear from
-	 * do_debug() and we need to set it again to restore the user
-	 * state.  If we entered on the slow path, TF was already set.
-	 */
-	if (work & _TIF_SINGLESTEP)
-		regs->flags |= X86_EFLAGS_TF;
-
-#ifdef CONFIG_SECCOMP
-	/*
-	 * Call seccomp_phase2 before running the other hooks so that
-	 * they can see any changes made by a seccomp tracer.
-	 */
-	if (phase1_result > 1 && seccomp_phase2(phase1_result)) {
-		/* seccomp failures shouldn't expose any additional code. */
-		return -1;
-	}
-#endif
-
-	if (unlikely(work & _TIF_SYSCALL_EMU))
-		ret = -1L;
-
-	if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
-	    tracehook_report_syscall_entry(regs))
-		ret = -1L;
-
-	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
-		trace_sys_enter(regs, regs->orig_ax);
-
-	do_audit_syscall_entry(regs, arch);
-
-	return ret ?: regs->orig_ax;
-}
-
-long syscall_trace_enter(struct pt_regs *regs)
-{
-	u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
-	unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch);
-
-	if (phase1_result == 0)
-		return regs->orig_ax;
-	else
-		return syscall_trace_enter_phase2(regs, arch, phase1_result);
-}
-
-void syscall_trace_leave(struct pt_regs *regs)
-{
-	bool step;
-
-	/*
-	 * We may come here right after calling schedule_user()
-	 * or do_notify_resume(), in which case we can be in RCU
-	 * user mode.
-	 */
-	user_exit();
-
-	audit_syscall_exit(regs);
-
-	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
-		trace_sys_exit(regs, regs->ax);
-
-	/*
-	 * If TIF_SYSCALL_EMU is set, we only get here because of
-	 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
-	 * We already reported this syscall instruction in
-	 * syscall_trace_enter().
-	 */
-	step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
-			!test_thread_flag(TIF_SYSCALL_EMU);
-	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
-		tracehook_report_syscall_exit(regs, step);
-
-	user_enter();
-}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 80f874b..b143c2d 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -916,11 +916,6 @@
 #ifdef CONFIG_X86_32
 	apm_info.bios = boot_params.apm_bios_info;
 	ist_info = boot_params.ist_info;
-	if (boot_params.sys_desc_table.length != 0) {
-		machine_id = boot_params.sys_desc_table.table[0];
-		machine_submodel_id = boot_params.sys_desc_table.table[1];
-		BIOS_revision = boot_params.sys_desc_table.table[2];
-	}
 #endif
 	saved_video_mode = boot_params.hdr.vid_mode;
 	bootloader_type = boot_params.hdr.type_of_loader;
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 71820c4..da52e6b 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -31,11 +31,11 @@
 #include <asm/vdso.h>
 #include <asm/mce.h>
 #include <asm/sighandling.h>
+#include <asm/vm86.h>
 
 #ifdef CONFIG_X86_64
 #include <asm/proto.h>
 #include <asm/ia32_unistd.h>
-#include <asm/sys_ia32.h>
 #endif /* CONFIG_X86_64 */
 
 #include <asm/syscall.h>
@@ -632,6 +632,9 @@
 	bool stepping, failed;
 	struct fpu *fpu = &current->thread.fpu;
 
+	if (v8086_mode(regs))
+		save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
+
 	/* Are we from a system call? */
 	if (syscall_get_nr(current, regs) >= 0) {
 		/* If so, check system call restarting.. */
@@ -697,7 +700,7 @@
  * want to handle. Thus you cannot kill init even with a SIGKILL even by
  * mistake.
  */
-static void do_signal(struct pt_regs *regs)
+void do_signal(struct pt_regs *regs)
 {
 	struct ksignal ksig;
 
@@ -732,32 +735,6 @@
 	restore_saved_sigmask();
 }
 
-/*
- * notification of userspace execution resumption
- * - triggered by the TIF_WORK_MASK flags
- */
-__visible void
-do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
-{
-	user_exit();
-
-	if (thread_info_flags & _TIF_UPROBE)
-		uprobe_notify_resume(regs);
-
-	/* deal with pending signal delivery */
-	if (thread_info_flags & _TIF_SIGPENDING)
-		do_signal(regs);
-
-	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
-		clear_thread_flag(TIF_NOTIFY_RESUME);
-		tracehook_notify_resume(regs);
-	}
-	if (thread_info_flags & _TIF_USER_RETURN_NOTIFY)
-		fire_user_return_notifiers();
-
-	user_enter();
-}
-
 void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
 {
 	struct task_struct *me = current;
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
new file mode 100644
index 0000000..dc3c0b1
--- /dev/null
+++ b/arch/x86/kernel/signal_compat.c
@@ -0,0 +1,95 @@
+#include <linux/compat.h>
+#include <linux/uaccess.h>
+
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
+{
+	int err = 0;
+	bool ia32 = test_thread_flag(TIF_IA32);
+
+	if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
+		return -EFAULT;
+
+	put_user_try {
+		/* If you change siginfo_t structure, please make sure that
+		   this code is fixed accordingly.
+		   It should never copy any pad contained in the structure
+		   to avoid security leaks, but must copy the generic
+		   3 ints plus the relevant union member.  */
+		put_user_ex(from->si_signo, &to->si_signo);
+		put_user_ex(from->si_errno, &to->si_errno);
+		put_user_ex((short)from->si_code, &to->si_code);
+
+		if (from->si_code < 0) {
+			put_user_ex(from->si_pid, &to->si_pid);
+			put_user_ex(from->si_uid, &to->si_uid);
+			put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
+		} else {
+			/*
+			 * First 32bits of unions are always present:
+			 * si_pid === si_band === si_tid === si_addr(LS half)
+			 */
+			put_user_ex(from->_sifields._pad[0],
+					  &to->_sifields._pad[0]);
+			switch (from->si_code >> 16) {
+			case __SI_FAULT >> 16:
+				break;
+			case __SI_SYS >> 16:
+				put_user_ex(from->si_syscall, &to->si_syscall);
+				put_user_ex(from->si_arch, &to->si_arch);
+				break;
+			case __SI_CHLD >> 16:
+				if (ia32) {
+					put_user_ex(from->si_utime, &to->si_utime);
+					put_user_ex(from->si_stime, &to->si_stime);
+				} else {
+					put_user_ex(from->si_utime, &to->_sifields._sigchld_x32._utime);
+					put_user_ex(from->si_stime, &to->_sifields._sigchld_x32._stime);
+				}
+				put_user_ex(from->si_status, &to->si_status);
+				/* FALL THROUGH */
+			default:
+			case __SI_KILL >> 16:
+				put_user_ex(from->si_uid, &to->si_uid);
+				break;
+			case __SI_POLL >> 16:
+				put_user_ex(from->si_fd, &to->si_fd);
+				break;
+			case __SI_TIMER >> 16:
+				put_user_ex(from->si_overrun, &to->si_overrun);
+				put_user_ex(ptr_to_compat(from->si_ptr),
+					    &to->si_ptr);
+				break;
+				 /* This is not generated by the kernel as of now.  */
+			case __SI_RT >> 16:
+			case __SI_MESGQ >> 16:
+				put_user_ex(from->si_uid, &to->si_uid);
+				put_user_ex(from->si_int, &to->si_int);
+				break;
+			}
+		}
+	} put_user_catch(err);
+
+	return err;
+}
+
+int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+{
+	int err = 0;
+	u32 ptr32;
+
+	if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
+		return -EFAULT;
+
+	get_user_try {
+		get_user_ex(to->si_signo, &from->si_signo);
+		get_user_ex(to->si_errno, &from->si_errno);
+		get_user_ex(to->si_code, &from->si_code);
+
+		get_user_ex(to->si_pid, &from->si_pid);
+		get_user_ex(to->si_uid, &from->si_uid);
+		get_user_ex(ptr32, &from->si_ptr);
+		to->si_ptr = compat_ptr(ptr32);
+	} get_user_catch(err);
+
+	return err;
+}
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 15aaa69..12c8286 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -30,6 +30,7 @@
 #include <asm/proto.h>
 #include <asm/apic.h>
 #include <asm/nmi.h>
+#include <asm/mce.h>
 #include <asm/trace/irq_vectors.h>
 /*
  *	Some notes on x86 processor bugs affecting SMP operation:
@@ -243,6 +244,7 @@
 finish:
 	local_irq_save(flags);
 	disable_local_APIC();
+	mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
 	local_irq_restore(flags);
 }
 
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index b1f3ed9c..e0c198e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -97,8 +97,6 @@
 DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
 EXPORT_PER_CPU_SYMBOL(cpu_info);
 
-atomic_t init_deasserted;
-
 static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
 {
 	unsigned long flags;
@@ -146,16 +144,11 @@
 
 	/*
 	 * If waken up by an INIT in an 82489DX configuration
-	 * we may get here before an INIT-deassert IPI reaches
-	 * our local APIC.  We have to wait for the IPI or we'll
-	 * lock up on an APIC access.
-	 *
-	 * Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI.
+	 * cpu_callout_mask guarantees we don't get here before
+	 * an INIT_deassert IPI reaches our local APIC, so it is
+	 * now safe to touch our local APIC.
 	 */
 	cpuid = smp_processor_id();
-	if (apic->wait_for_init_deassert && cpuid)
-		while (!atomic_read(&init_deasserted))
-			cpu_relax();
 
 	/*
 	 * (This works even if the APIC is not enabled.)
@@ -620,7 +613,6 @@
 	send_status = safe_apic_wait_icr_idle();
 
 	mb();
-	atomic_set(&init_deasserted, 1);
 
 	/*
 	 * Should we send STARTUP IPIs ?
@@ -665,7 +657,8 @@
 		/*
 		 * Give the other CPU some time to accept the IPI.
 		 */
-		udelay(300);
+		if (init_udelay)
+			udelay(300);
 
 		pr_debug("Startup point 1\n");
 
@@ -675,7 +668,8 @@
 		/*
 		 * Give the other CPU some time to accept the IPI.
 		 */
-		udelay(200);
+		if (init_udelay)
+			udelay(200);
 
 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
 			apic_write(APIC_ESR, 0);
@@ -859,8 +853,6 @@
 	 * the targeted processor.
 	 */
 
-	atomic_set(&init_deasserted, 0);
-
 	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
 
 		pr_debug("Setting warm reset code and vector.\n");
@@ -898,7 +890,7 @@
 
 	if (!boot_error) {
 		/*
-		 * Wait 10s total for a response from AP
+		 * Wait 10s total for first sign of life from AP
 		 */
 		boot_error = -1;
 		timeout = jiffies + 10*HZ;
@@ -911,7 +903,6 @@
 				boot_error = 0;
 				break;
 			}
-			udelay(100);
 			schedule();
 		}
 	}
@@ -927,7 +918,6 @@
 			 * for the MTRR work(triggered by the AP coming online)
 			 * to be completed in the stop machine context.
 			 */
-			udelay(100);
 			schedule();
 		}
 	}
@@ -1358,7 +1348,7 @@
 	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
 }
 
-static void __ref remove_cpu_from_maps(int cpu)
+static void remove_cpu_from_maps(int cpu)
 {
 	set_cpu_online(cpu, false);
 	cpumask_clear_cpu(cpu, cpu_callout_mask);
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 0ccb53a..c9a0738 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -18,6 +18,7 @@
 		return addr;
 	}
 
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
 	/*
 	 * We'll assume that the code segments in the GDT
 	 * are all zero-based. That is largely true: the
@@ -45,6 +46,7 @@
 		}
 		mutex_unlock(&child->mm->context.lock);
 	}
+#endif
 
 	return addr;
 }
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 649b010..12cbe2b 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -57,7 +57,7 @@
  *
  * This is only called for debugging CPU offline/online feature.
  */
-int __ref _debug_hotplug_cpu(int cpu, int action)
+int _debug_hotplug_cpu(int cpu, int action)
 {
 	struct device *dev = get_cpu_device(cpu);
 	int ret;
@@ -104,7 +104,7 @@
 late_initcall_sync(debug_hotplug_cpu);
 #endif /* CONFIG_DEBUG_HOTPLUG_CPU0 */
 
-int __ref arch_register_cpu(int num)
+int arch_register_cpu(int num)
 {
 	struct cpuinfo_x86 *c = &cpu_data(num);
 
diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c
index 25b9937..80bb24d 100644
--- a/arch/x86/kernel/trace_clock.c
+++ b/arch/x86/kernel/trace_clock.c
@@ -12,10 +12,5 @@
  */
 u64 notrace trace_clock_x86_tsc(void)
 {
-	u64 ret;
-
-	rdtsc_barrier();
-	rdtscll(ret);
-
-	return ret;
+	return rdtsc_ordered();
 }
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index f579192..346eec7 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -62,6 +62,7 @@
 #include <asm/fpu/xstate.h>
 #include <asm/trace/mpx.h>
 #include <asm/mpx.h>
+#include <asm/vm86.h>
 
 #ifdef CONFIG_X86_64
 #include <asm/x86_init.h>
@@ -108,13 +109,10 @@
 	preempt_count_dec();
 }
 
-enum ctx_state ist_enter(struct pt_regs *regs)
+void ist_enter(struct pt_regs *regs)
 {
-	enum ctx_state prev_state;
-
 	if (user_mode(regs)) {
-		/* Other than that, we're just an exception. */
-		prev_state = exception_enter();
+		RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 	} else {
 		/*
 		 * We might have interrupted pretty much anything.  In
@@ -123,32 +121,25 @@
 		 * but we need to notify RCU.
 		 */
 		rcu_nmi_enter();
-		prev_state = CONTEXT_KERNEL;  /* the value is irrelevant. */
 	}
 
 	/*
-	 * We are atomic because we're on the IST stack (or we're on x86_32,
-	 * in which case we still shouldn't schedule).
-	 *
-	 * This must be after exception_enter(), because exception_enter()
-	 * won't do anything if in_interrupt() returns true.
+	 * We are atomic because we're on the IST stack; or we're on
+	 * x86_32, in which case we still shouldn't schedule; or we're
+	 * on x86_64 and entered from user mode, in which case we're
+	 * still atomic unless ist_begin_non_atomic is called.
 	 */
 	preempt_count_add(HARDIRQ_OFFSET);
 
 	/* This code is a bit fragile.  Test it. */
-	rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work");
-
-	return prev_state;
+	RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
 }
 
-void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
+void ist_exit(struct pt_regs *regs)
 {
-	/* Must be before exception_exit. */
 	preempt_count_sub(HARDIRQ_OFFSET);
 
-	if (user_mode(regs))
-		return exception_exit(prev_state);
-	else
+	if (!user_mode(regs))
 		rcu_nmi_exit();
 }
 
@@ -162,7 +153,7 @@
  * a double fault, it can be safe to schedule.  ist_begin_non_atomic()
  * begins a non-atomic section within an ist_enter()/ist_exit() region.
  * Callers are responsible for enabling interrupts themselves inside
- * the non-atomic section, and callers must call is_end_non_atomic()
+ * the non-atomic section, and callers must call ist_end_non_atomic()
  * before ist_exit().
  */
 void ist_begin_non_atomic(struct pt_regs *regs)
@@ -289,17 +280,16 @@
 static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
 			  unsigned long trapnr, int signr)
 {
-	enum ctx_state prev_state = exception_enter();
 	siginfo_t info;
 
+	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+
 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
 			NOTIFY_STOP) {
 		conditional_sti(regs);
 		do_trap(trapnr, signr, str, regs, error_code,
 			fill_trap_info(regs, signr, trapnr, &info));
 	}
-
-	exception_exit(prev_state);
 }
 
 #define DO_ERROR(trapnr, signr, str, name)				\
@@ -351,7 +341,7 @@
 	}
 #endif
 
-	ist_enter(regs);  /* Discard prev_state because we won't return. */
+	ist_enter(regs);
 	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 
 	tsk->thread.error_code = error_code;
@@ -371,14 +361,13 @@
 
 dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
 {
-	enum ctx_state prev_state;
 	const struct bndcsr *bndcsr;
 	siginfo_t *info;
 
-	prev_state = exception_enter();
+	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 	if (notify_die(DIE_TRAP, "bounds", regs, error_code,
 			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
-		goto exit;
+		return;
 	conditional_sti(regs);
 
 	if (!user_mode(regs))
@@ -435,9 +424,8 @@
 		die("bounds", regs, error_code);
 	}
 
-exit:
-	exception_exit(prev_state);
 	return;
+
 exit_trap:
 	/*
 	 * This path out is for all the cases where we could not
@@ -447,35 +435,33 @@
 	 * time..
 	 */
 	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL);
-	exception_exit(prev_state);
 }
 
 dotraplinkage void
 do_general_protection(struct pt_regs *regs, long error_code)
 {
 	struct task_struct *tsk;
-	enum ctx_state prev_state;
 
-	prev_state = exception_enter();
+	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 	conditional_sti(regs);
 
 	if (v8086_mode(regs)) {
 		local_irq_enable();
 		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
-		goto exit;
+		return;
 	}
 
 	tsk = current;
 	if (!user_mode(regs)) {
 		if (fixup_exception(regs))
-			goto exit;
+			return;
 
 		tsk->thread.error_code = error_code;
 		tsk->thread.trap_nr = X86_TRAP_GP;
 		if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
 			       X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
 			die("general protection fault", regs, error_code);
-		goto exit;
+		return;
 	}
 
 	tsk->thread.error_code = error_code;
@@ -491,16 +477,12 @@
 	}
 
 	force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
-exit:
-	exception_exit(prev_state);
 }
 NOKPROBE_SYMBOL(do_general_protection);
 
 /* May run on IST stack. */
 dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
 {
-	enum ctx_state prev_state;
-
 #ifdef CONFIG_DYNAMIC_FTRACE
 	/*
 	 * ftrace must be first, everything else may cause a recursive crash.
@@ -513,7 +495,8 @@
 	if (poke_int3_handler(regs))
 		return;
 
-	prev_state = ist_enter(regs);
+	ist_enter(regs);
+	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
 				SIGTRAP) == NOTIFY_STOP)
@@ -539,7 +522,7 @@
 	preempt_conditional_cli(regs);
 	debug_stack_usage_dec();
 exit:
-	ist_exit(regs, prev_state);
+	ist_exit(regs);
 }
 NOKPROBE_SYMBOL(do_int3);
 
@@ -615,12 +598,11 @@
 dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
 {
 	struct task_struct *tsk = current;
-	enum ctx_state prev_state;
 	int user_icebp = 0;
 	unsigned long dr6;
 	int si_code;
 
-	prev_state = ist_enter(regs);
+	ist_enter(regs);
 
 	get_debugreg(dr6, 6);
 
@@ -695,7 +677,7 @@
 	debug_stack_usage_dec();
 
 exit:
-	ist_exit(regs, prev_state);
+	ist_exit(regs);
 }
 NOKPROBE_SYMBOL(do_debug);
 
@@ -747,21 +729,15 @@
 
 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
 {
-	enum ctx_state prev_state;
-
-	prev_state = exception_enter();
+	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 	math_error(regs, error_code, X86_TRAP_MF);
-	exception_exit(prev_state);
 }
 
 dotraplinkage void
 do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
 {
-	enum ctx_state prev_state;
-
-	prev_state = exception_enter();
+	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 	math_error(regs, error_code, X86_TRAP_XF);
-	exception_exit(prev_state);
 }
 
 dotraplinkage void
@@ -773,9 +749,7 @@
 dotraplinkage void
 do_device_not_available(struct pt_regs *regs, long error_code)
 {
-	enum ctx_state prev_state;
-
-	prev_state = exception_enter();
+	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 	BUG_ON(use_eager_fpu());
 
 #ifdef CONFIG_MATH_EMULATION
@@ -786,7 +760,6 @@
 
 		info.regs = regs;
 		math_emulate(&info);
-		exception_exit(prev_state);
 		return;
 	}
 #endif
@@ -794,7 +767,6 @@
 #ifdef CONFIG_X86_32
 	conditional_sti(regs);
 #endif
-	exception_exit(prev_state);
 }
 NOKPROBE_SYMBOL(do_device_not_available);
 
@@ -802,9 +774,8 @@
 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
 {
 	siginfo_t info;
-	enum ctx_state prev_state;
 
-	prev_state = exception_enter();
+	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 	local_irq_enable();
 
 	info.si_signo = SIGILL;
@@ -816,7 +787,6 @@
 		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
 			&info);
 	}
-	exception_exit(prev_state);
 }
 #endif
 
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 7437b41..c8d52cb 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -38,7 +38,7 @@
    erroneous rdtsc usage on !cpu_has_tsc processors */
 static int __read_mostly tsc_disabled = -1;
 
-static struct static_key __use_tsc = STATIC_KEY_INIT;
+static DEFINE_STATIC_KEY_FALSE(__use_tsc);
 
 int tsc_clocksource_reliable;
 
@@ -248,7 +248,7 @@
 
 	data = cyc2ns_write_begin(cpu);
 
-	rdtscll(tsc_now);
+	tsc_now = rdtsc();
 	ns_now = cycles_2_ns(tsc_now);
 
 	/*
@@ -274,7 +274,12 @@
  */
 u64 native_sched_clock(void)
 {
-	u64 tsc_now;
+	if (static_branch_likely(&__use_tsc)) {
+		u64 tsc_now = rdtsc();
+
+		/* return the value in ns */
+		return cycles_2_ns(tsc_now);
+	}
 
 	/*
 	 * Fall back to jiffies if there's no TSC available:
@@ -284,16 +289,17 @@
 	 *   very important for it to be as fast as the platform
 	 *   can achieve it. )
 	 */
-	if (!static_key_false(&__use_tsc)) {
-		/* No locking but a rare wrong value is not a big deal: */
-		return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
-	}
 
-	/* read the Time Stamp Counter: */
-	rdtscll(tsc_now);
+	/* No locking but a rare wrong value is not a big deal: */
+	return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
+}
 
-	/* return the value in ns */
-	return cycles_2_ns(tsc_now);
+/*
+ * Generate a sched_clock if you already have a TSC value.
+ */
+u64 native_sched_clock_from_tsc(u64 tsc)
+{
+	return cycles_2_ns(tsc);
 }
 
 /* We need to define a real function for sched_clock, to override the
@@ -308,12 +314,6 @@
 sched_clock(void) __attribute__((alias("native_sched_clock")));
 #endif
 
-unsigned long long native_read_tsc(void)
-{
-	return __native_read_tsc();
-}
-EXPORT_SYMBOL(native_read_tsc);
-
 int check_tsc_unstable(void)
 {
 	return tsc_unstable;
@@ -976,7 +976,7 @@
  */
 static cycle_t read_tsc(struct clocksource *cs)
 {
-	return (cycle_t)get_cycles();
+	return (cycle_t)rdtsc_ordered();
 }
 
 /*
@@ -1210,7 +1210,7 @@
 	/* now allow native_sched_clock() to use rdtsc */
 
 	tsc_disabled = 0;
-	static_key_slow_inc(&__use_tsc);
+	static_branch_enable(&__use_tsc);
 
 	if (!no_sched_irq_time)
 		enable_sched_clock_irqtime();
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index dd8d079..78083bf 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -39,16 +39,15 @@
 static int nr_warps;
 
 /*
- * TSC-warp measurement loop running on both CPUs:
+ * TSC-warp measurement loop running on both CPUs.  This is not called
+ * if there is no TSC.
  */
 static void check_tsc_warp(unsigned int timeout)
 {
 	cycles_t start, now, prev, end;
 	int i;
 
-	rdtsc_barrier();
-	start = get_cycles();
-	rdtsc_barrier();
+	start = rdtsc_ordered();
 	/*
 	 * The measurement runs for 'timeout' msecs:
 	 */
@@ -63,9 +62,7 @@
 		 */
 		arch_spin_lock(&sync_lock);
 		prev = last_tsc;
-		rdtsc_barrier();
-		now = get_cycles();
-		rdtsc_barrier();
+		now = rdtsc_ordered();
 		last_tsc = now;
 		arch_spin_unlock(&sync_lock);
 
@@ -126,7 +123,7 @@
 
 	/*
 	 * No need to check if we already know that the TSC is not
-	 * synchronized:
+	 * synchronized or if we have no TSC.
 	 */
 	if (unsynchronized_tsc())
 		return;
@@ -190,6 +187,7 @@
 {
 	int cpus = 2;
 
+	/* Also aborts if there is no TSC. */
 	if (unsynchronized_tsc() || tsc_clocksource_reliable)
 		return;
 
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 6647624..bf4db6e 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -985,3 +985,12 @@
 
 	return -1;
 }
+
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+				struct pt_regs *regs)
+{
+	if (ctx == RP_CHECK_CALL) /* sp was just decremented by "call" insn */
+		return regs->sp < ret->stack;
+	else
+		return regs->sp <= ret->stack;
+}
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index fc9db6e..abd8b856 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -44,11 +44,14 @@
 #include <linux/ptrace.h>
 #include <linux/audit.h>
 #include <linux/stddef.h>
+#include <linux/slab.h>
 
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/tlbflush.h>
 #include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/vm86.h>
 
 /*
  * Known problems:
@@ -66,10 +69,6 @@
  */
 
 
-#define KVM86	((struct kernel_vm86_struct *)regs)
-#define VMPI	KVM86->vm86plus
-
-
 /*
  * 8- and 16-bit register defines..
  */
@@ -81,8 +80,8 @@
 /*
  * virtual flags (16 and 32-bit versions)
  */
-#define VFLAGS	(*(unsigned short *)&(current->thread.v86flags))
-#define VEFLAGS	(current->thread.v86flags)
+#define VFLAGS	(*(unsigned short *)&(current->thread.vm86->veflags))
+#define VEFLAGS	(current->thread.vm86->veflags)
 
 #define set_flags(X, new, mask) \
 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
@@ -90,46 +89,13 @@
 #define SAFE_MASK	(0xDD5)
 #define RETURN_MASK	(0xDFF)
 
-/* convert kernel_vm86_regs to vm86_regs */
-static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
-				  const struct kernel_vm86_regs *regs)
-{
-	int ret = 0;
-
-	/*
-	 * kernel_vm86_regs is missing gs, so copy everything up to
-	 * (but not including) orig_eax, and then rest including orig_eax.
-	 */
-	ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
-	ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax,
-			    sizeof(struct kernel_vm86_regs) -
-			    offsetof(struct kernel_vm86_regs, pt.orig_ax));
-
-	return ret;
-}
-
-/* convert vm86_regs to kernel_vm86_regs */
-static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
-				    const struct vm86_regs __user *user,
-				    unsigned extra)
-{
-	int ret = 0;
-
-	/* copy ax-fs inclusive */
-	ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax));
-	/* copy orig_ax-__gsh+extra */
-	ret += copy_from_user(&regs->pt.orig_ax, &user->orig_eax,
-			      sizeof(struct kernel_vm86_regs) -
-			      offsetof(struct kernel_vm86_regs, pt.orig_ax) +
-			      extra);
-	return ret;
-}
-
-struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
+void save_v86_state(struct kernel_vm86_regs *regs, int retval)
 {
 	struct tss_struct *tss;
-	struct pt_regs *ret;
-	unsigned long tmp;
+	struct task_struct *tsk = current;
+	struct vm86plus_struct __user *user;
+	struct vm86 *vm86 = current->thread.vm86;
+	long err = 0;
 
 	/*
 	 * This gets called from entry.S with interrupts disabled, but
@@ -138,31 +104,57 @@
 	 */
 	local_irq_enable();
 
-	if (!current->thread.vm86_info) {
-		pr_alert("no vm86_info: BAD\n");
+	if (!vm86 || !vm86->user_vm86) {
+		pr_alert("no user_vm86: BAD\n");
 		do_exit(SIGSEGV);
 	}
-	set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask);
-	tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs, regs);
-	tmp += put_user(current->thread.screen_bitmap, &current->thread.vm86_info->screen_bitmap);
-	if (tmp) {
-		pr_alert("could not access userspace vm86_info\n");
+	set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask);
+	user = vm86->user_vm86;
+
+	if (!access_ok(VERIFY_WRITE, user, vm86->vm86plus.is_vm86pus ?
+		       sizeof(struct vm86plus_struct) :
+		       sizeof(struct vm86_struct))) {
+		pr_alert("could not access userspace vm86 info\n");
+		do_exit(SIGSEGV);
+	}
+
+	put_user_try {
+		put_user_ex(regs->pt.bx, &user->regs.ebx);
+		put_user_ex(regs->pt.cx, &user->regs.ecx);
+		put_user_ex(regs->pt.dx, &user->regs.edx);
+		put_user_ex(regs->pt.si, &user->regs.esi);
+		put_user_ex(regs->pt.di, &user->regs.edi);
+		put_user_ex(regs->pt.bp, &user->regs.ebp);
+		put_user_ex(regs->pt.ax, &user->regs.eax);
+		put_user_ex(regs->pt.ip, &user->regs.eip);
+		put_user_ex(regs->pt.cs, &user->regs.cs);
+		put_user_ex(regs->pt.flags, &user->regs.eflags);
+		put_user_ex(regs->pt.sp, &user->regs.esp);
+		put_user_ex(regs->pt.ss, &user->regs.ss);
+		put_user_ex(regs->es, &user->regs.es);
+		put_user_ex(regs->ds, &user->regs.ds);
+		put_user_ex(regs->fs, &user->regs.fs);
+		put_user_ex(regs->gs, &user->regs.gs);
+
+		put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
+	} put_user_catch(err);
+	if (err) {
+		pr_alert("could not access userspace vm86 info\n");
 		do_exit(SIGSEGV);
 	}
 
 	tss = &per_cpu(cpu_tss, get_cpu());
-	current->thread.sp0 = current->thread.saved_sp0;
-	current->thread.sysenter_cs = __KERNEL_CS;
-	load_sp0(tss, &current->thread);
-	current->thread.saved_sp0 = 0;
+	tsk->thread.sp0 = vm86->saved_sp0;
+	tsk->thread.sysenter_cs = __KERNEL_CS;
+	load_sp0(tss, &tsk->thread);
+	vm86->saved_sp0 = 0;
 	put_cpu();
 
-	ret = KVM86->regs32;
+	memcpy(&regs->pt, &vm86->regs32, sizeof(struct pt_regs));
 
-	ret->fs = current->thread.saved_fs;
-	set_user_gs(ret, current->thread.saved_gs);
+	lazy_load_gs(vm86->regs32.gs);
 
-	return ret;
+	regs->pt.ax = retval;
 }
 
 static void mark_screen_rdonly(struct mm_struct *mm)
@@ -200,45 +192,16 @@
 
 
 static int do_vm86_irq_handling(int subfunction, int irqnumber);
-static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
+static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus);
 
-SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
+SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, user_vm86)
 {
-	struct kernel_vm86_struct info; /* declare this _on top_,
-					 * this avoids wasting of stack space.
-					 * This remains on the stack until we
-					 * return to 32 bit user space.
-					 */
-	struct task_struct *tsk = current;
-	int tmp;
-
-	if (tsk->thread.saved_sp0)
-		return -EPERM;
-	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
-				       offsetof(struct kernel_vm86_struct, vm86plus) -
-				       sizeof(info.regs));
-	if (tmp)
-		return -EFAULT;
-	memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
-	info.regs32 = current_pt_regs();
-	tsk->thread.vm86_info = v86;
-	do_sys_vm86(&info, tsk);
-	return 0;	/* we never return here */
+	return do_sys_vm86((struct vm86plus_struct __user *) user_vm86, false);
 }
 
 
 SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
 {
-	struct kernel_vm86_struct info; /* declare this _on top_,
-					 * this avoids wasting of stack space.
-					 * This remains on the stack until we
-					 * return to 32 bit user space.
-					 */
-	struct task_struct *tsk;
-	int tmp;
-	struct vm86plus_struct __user *v86;
-
-	tsk = current;
 	switch (cmd) {
 	case VM86_REQUEST_IRQ:
 	case VM86_FREE_IRQ:
@@ -256,114 +219,133 @@
 	}
 
 	/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
-	if (tsk->thread.saved_sp0)
-		return -EPERM;
-	v86 = (struct vm86plus_struct __user *)arg;
-	tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
-				       offsetof(struct kernel_vm86_struct, regs32) -
-				       sizeof(info.regs));
-	if (tmp)
-		return -EFAULT;
-	info.regs32 = current_pt_regs();
-	info.vm86plus.is_vm86pus = 1;
-	tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
-	do_sys_vm86(&info, tsk);
-	return 0;	/* we never return here */
+	return do_sys_vm86((struct vm86plus_struct __user *) arg, true);
 }
 
 
-static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
+static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
 {
 	struct tss_struct *tss;
-/*
- * make sure the vm86() system call doesn't try to do anything silly
- */
-	info->regs.pt.ds = 0;
-	info->regs.pt.es = 0;
-	info->regs.pt.fs = 0;
-#ifndef CONFIG_X86_32_LAZY_GS
-	info->regs.pt.gs = 0;
-#endif
+	struct task_struct *tsk = current;
+	struct vm86 *vm86 = tsk->thread.vm86;
+	struct kernel_vm86_regs vm86regs;
+	struct pt_regs *regs = current_pt_regs();
+	unsigned long err = 0;
+
+	if (!vm86) {
+		if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
+			return -ENOMEM;
+		tsk->thread.vm86 = vm86;
+	}
+	if (vm86->saved_sp0)
+		return -EPERM;
+
+	if (!access_ok(VERIFY_READ, user_vm86, plus ?
+		       sizeof(struct vm86_struct) :
+		       sizeof(struct vm86plus_struct)))
+		return -EFAULT;
+
+	memset(&vm86regs, 0, sizeof(vm86regs));
+	get_user_try {
+		unsigned short seg;
+		get_user_ex(vm86regs.pt.bx, &user_vm86->regs.ebx);
+		get_user_ex(vm86regs.pt.cx, &user_vm86->regs.ecx);
+		get_user_ex(vm86regs.pt.dx, &user_vm86->regs.edx);
+		get_user_ex(vm86regs.pt.si, &user_vm86->regs.esi);
+		get_user_ex(vm86regs.pt.di, &user_vm86->regs.edi);
+		get_user_ex(vm86regs.pt.bp, &user_vm86->regs.ebp);
+		get_user_ex(vm86regs.pt.ax, &user_vm86->regs.eax);
+		get_user_ex(vm86regs.pt.ip, &user_vm86->regs.eip);
+		get_user_ex(seg, &user_vm86->regs.cs);
+		vm86regs.pt.cs = seg;
+		get_user_ex(vm86regs.pt.flags, &user_vm86->regs.eflags);
+		get_user_ex(vm86regs.pt.sp, &user_vm86->regs.esp);
+		get_user_ex(seg, &user_vm86->regs.ss);
+		vm86regs.pt.ss = seg;
+		get_user_ex(vm86regs.es, &user_vm86->regs.es);
+		get_user_ex(vm86regs.ds, &user_vm86->regs.ds);
+		get_user_ex(vm86regs.fs, &user_vm86->regs.fs);
+		get_user_ex(vm86regs.gs, &user_vm86->regs.gs);
+
+		get_user_ex(vm86->flags, &user_vm86->flags);
+		get_user_ex(vm86->screen_bitmap, &user_vm86->screen_bitmap);
+		get_user_ex(vm86->cpu_type, &user_vm86->cpu_type);
+	} get_user_catch(err);
+	if (err)
+		return err;
+
+	if (copy_from_user(&vm86->int_revectored,
+			   &user_vm86->int_revectored,
+			   sizeof(struct revectored_struct)))
+		return -EFAULT;
+	if (copy_from_user(&vm86->int21_revectored,
+			   &user_vm86->int21_revectored,
+			   sizeof(struct revectored_struct)))
+		return -EFAULT;
+	if (plus) {
+		if (copy_from_user(&vm86->vm86plus, &user_vm86->vm86plus,
+				   sizeof(struct vm86plus_info_struct)))
+			return -EFAULT;
+		vm86->vm86plus.is_vm86pus = 1;
+	} else
+		memset(&vm86->vm86plus, 0,
+		       sizeof(struct vm86plus_info_struct));
+
+	memcpy(&vm86->regs32, regs, sizeof(struct pt_regs));
+	vm86->user_vm86 = user_vm86;
 
 /*
  * The flags register is also special: we cannot trust that the user
  * has set it up safely, so this makes sure interrupt etc flags are
  * inherited from protected mode.
  */
-	VEFLAGS = info->regs.pt.flags;
-	info->regs.pt.flags &= SAFE_MASK;
-	info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
-	info->regs.pt.flags |= X86_VM_MASK;
+	VEFLAGS = vm86regs.pt.flags;
+	vm86regs.pt.flags &= SAFE_MASK;
+	vm86regs.pt.flags |= regs->flags & ~SAFE_MASK;
+	vm86regs.pt.flags |= X86_VM_MASK;
 
-	switch (info->cpu_type) {
+	vm86regs.pt.orig_ax = regs->orig_ax;
+
+	switch (vm86->cpu_type) {
 	case CPU_286:
-		tsk->thread.v86mask = 0;
+		vm86->veflags_mask = 0;
 		break;
 	case CPU_386:
-		tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+		vm86->veflags_mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
 		break;
 	case CPU_486:
-		tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+		vm86->veflags_mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
 		break;
 	default:
-		tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
+		vm86->veflags_mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
 		break;
 	}
 
 /*
- * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL)
+ * Save old state
  */
-	info->regs32->ax = VM86_SIGNAL;
-	tsk->thread.saved_sp0 = tsk->thread.sp0;
-	tsk->thread.saved_fs = info->regs32->fs;
-	tsk->thread.saved_gs = get_user_gs(info->regs32);
+	vm86->saved_sp0 = tsk->thread.sp0;
+	lazy_save_gs(vm86->regs32.gs);
 
 	tss = &per_cpu(cpu_tss, get_cpu());
-	tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
+	/* make room for real-mode segments */
+	tsk->thread.sp0 += 16;
 	if (cpu_has_sep)
 		tsk->thread.sysenter_cs = 0;
 	load_sp0(tss, &tsk->thread);
 	put_cpu();
 
-	tsk->thread.screen_bitmap = info->screen_bitmap;
-	if (info->flags & VM86_SCREEN_BITMAP)
+	if (vm86->flags & VM86_SCREEN_BITMAP)
 		mark_screen_rdonly(tsk->mm);
 
-	/*call __audit_syscall_exit since we do not exit via the normal paths */
-#ifdef CONFIG_AUDITSYSCALL
-	if (unlikely(current->audit_context))
-		__audit_syscall_exit(1, 0);
-#endif
-
-	__asm__ __volatile__(
-		"movl %0,%%esp\n\t"
-		"movl %1,%%ebp\n\t"
-#ifdef CONFIG_X86_32_LAZY_GS
-		"mov  %2, %%gs\n\t"
-#endif
-		"jmp resume_userspace"
-		: /* no outputs */
-		:"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
-	/* we never return here */
-}
-
-static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval)
-{
-	struct pt_regs *regs32;
-
-	regs32 = save_v86_state(regs16);
-	regs32->ax = retval;
-	__asm__ __volatile__("movl %0,%%esp\n\t"
-		"movl %1,%%ebp\n\t"
-		"jmp resume_userspace"
-		: : "r" (regs32), "r" (current_thread_info()));
+	memcpy((struct kernel_vm86_regs *)regs, &vm86regs, sizeof(vm86regs));
+	force_iret();
+	return regs->ax;
 }
 
 static inline void set_IF(struct kernel_vm86_regs *regs)
 {
 	VEFLAGS |= X86_EFLAGS_VIF;
-	if (VEFLAGS & X86_EFLAGS_VIP)
-		return_to_32bit(regs, VM86_STI);
 }
 
 static inline void clear_IF(struct kernel_vm86_regs *regs)
@@ -395,7 +377,7 @@
 
 static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
 {
-	set_flags(VEFLAGS, flags, current->thread.v86mask);
+	set_flags(VEFLAGS, flags, current->thread.vm86->veflags_mask);
 	set_flags(regs->pt.flags, flags, SAFE_MASK);
 	if (flags & X86_EFLAGS_IF)
 		set_IF(regs);
@@ -405,7 +387,7 @@
 
 static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
 {
-	set_flags(VFLAGS, flags, current->thread.v86mask);
+	set_flags(VFLAGS, flags, current->thread.vm86->veflags_mask);
 	set_flags(regs->pt.flags, flags, SAFE_MASK);
 	if (flags & X86_EFLAGS_IF)
 		set_IF(regs);
@@ -420,7 +402,7 @@
 	if (VEFLAGS & X86_EFLAGS_VIF)
 		flags |= X86_EFLAGS_IF;
 	flags |= X86_EFLAGS_IOPL;
-	return flags | (VEFLAGS & current->thread.v86mask);
+	return flags | (VEFLAGS & current->thread.vm86->veflags_mask);
 }
 
 static inline int is_revectored(int nr, struct revectored_struct *bitmap)
@@ -518,12 +500,13 @@
 {
 	unsigned long __user *intr_ptr;
 	unsigned long segoffs;
+	struct vm86 *vm86 = current->thread.vm86;
 
 	if (regs->pt.cs == BIOSSEG)
 		goto cannot_handle;
-	if (is_revectored(i, &KVM86->int_revectored))
+	if (is_revectored(i, &vm86->int_revectored))
 		goto cannot_handle;
-	if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
+	if (i == 0x21 && is_revectored(AH(regs), &vm86->int21_revectored))
 		goto cannot_handle;
 	intr_ptr = (unsigned long __user *) (i << 2);
 	if (get_user(segoffs, intr_ptr))
@@ -542,18 +525,16 @@
 	return;
 
 cannot_handle:
-	return_to_32bit(regs, VM86_INTx + (i << 8));
+	save_v86_state(regs, VM86_INTx + (i << 8));
 }
 
 int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
 {
-	if (VMPI.is_vm86pus) {
+	struct vm86 *vm86 = current->thread.vm86;
+
+	if (vm86->vm86plus.is_vm86pus) {
 		if ((trapno == 3) || (trapno == 1)) {
-			KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
-			/* setting this flag forces the code in entry_32.S to
-			   the path where we call save_v86_state() and change
-			   the stack pointer to KVM86->regs32 */
-			set_thread_flag(TIF_NOTIFY_RESUME);
+			save_v86_state(regs, VM86_TRAP + (trapno << 8));
 			return 0;
 		}
 		do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
@@ -574,16 +555,11 @@
 	unsigned char __user *ssp;
 	unsigned short ip, sp, orig_flags;
 	int data32, pref_done;
+	struct vm86plus_info_struct *vmpi = &current->thread.vm86->vm86plus;
 
 #define CHECK_IF_IN_TRAP \
-	if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
+	if (vmpi->vm86dbg_active && vmpi->vm86dbg_TFpendig) \
 		newflags |= X86_EFLAGS_TF
-#define VM86_FAULT_RETURN do { \
-	if (VMPI.force_return_for_pic  && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
-		return_to_32bit(regs, VM86_PICRETURN); \
-	if (orig_flags & X86_EFLAGS_TF) \
-		handle_vm86_trap(regs, 0, 1); \
-	return; } while (0)
 
 	orig_flags = *(unsigned short *)&regs->pt.flags;
 
@@ -622,7 +598,7 @@
 			SP(regs) -= 2;
 		}
 		IP(regs) = ip;
-		VM86_FAULT_RETURN;
+		goto vm86_fault_return;
 
 	/* popf */
 	case 0x9d:
@@ -642,16 +618,18 @@
 		else
 			set_vflags_short(newflags, regs);
 
-		VM86_FAULT_RETURN;
+		goto check_vip;
 		}
 
 	/* int xx */
 	case 0xcd: {
 		int intno = popb(csp, ip, simulate_sigsegv);
 		IP(regs) = ip;
-		if (VMPI.vm86dbg_active) {
-			if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3])
-				return_to_32bit(regs, VM86_INTx + (intno << 8));
+		if (vmpi->vm86dbg_active) {
+			if ((1 << (intno & 7)) & vmpi->vm86dbg_intxxtab[intno >> 3]) {
+				save_v86_state(regs, VM86_INTx + (intno << 8));
+				return;
+			}
 		}
 		do_int(regs, intno, ssp, sp);
 		return;
@@ -682,14 +660,14 @@
 		} else {
 			set_vflags_short(newflags, regs);
 		}
-		VM86_FAULT_RETURN;
+		goto check_vip;
 		}
 
 	/* cli */
 	case 0xfa:
 		IP(regs) = ip;
 		clear_IF(regs);
-		VM86_FAULT_RETURN;
+		goto vm86_fault_return;
 
 	/* sti */
 	/*
@@ -701,14 +679,29 @@
 	case 0xfb:
 		IP(regs) = ip;
 		set_IF(regs);
-		VM86_FAULT_RETURN;
+		goto check_vip;
 
 	default:
-		return_to_32bit(regs, VM86_UNKNOWN);
+		save_v86_state(regs, VM86_UNKNOWN);
 	}
 
 	return;
 
+check_vip:
+	if (VEFLAGS & X86_EFLAGS_VIP) {
+		save_v86_state(regs, VM86_STI);
+		return;
+	}
+
+vm86_fault_return:
+	if (vmpi->force_return_for_pic  && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) {
+		save_v86_state(regs, VM86_PICRETURN);
+		return;
+	}
+	if (orig_flags & X86_EFLAGS_TF)
+		handle_vm86_trap(regs, 0, X86_TRAP_DB);
+	return;
+
 simulate_sigsegv:
 	/* FIXME: After a long discussion with Stas we finally
 	 *        agreed, that this is wrong. Here we should
@@ -720,7 +713,7 @@
 	 *        should be a mixture of the two, but how do we
 	 *        get the information? [KD]
 	 */
-	return_to_32bit(regs, VM86_UNKNOWN);
+	save_v86_state(regs, VM86_UNKNOWN);
 }
 
 /* ---------------- vm86 special IRQ passing stuff ----------------- */
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 67d215c..a1ff508 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -12,7 +12,9 @@
 kvm-$(CONFIG_KVM_ASYNC_PF)	+= $(KVM)/async_pf.o
 
 kvm-y			+= x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
-			   i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o
+			   i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
+			   hyperv.o
+
 kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT)	+= assigned-dev.o iommu.o
 kvm-intel-y		+= vmx.o pmu_intel.o
 kvm-amd-y		+= svm.o pmu_amd.o
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
new file mode 100644
index 0000000..a8160d2
--- /dev/null
+++ b/arch/x86/kvm/hyperv.c
@@ -0,0 +1,377 @@
+/*
+ * KVM Microsoft Hyper-V emulation
+ *
+ * derived from arch/x86/kvm/x86.c
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ * Copyright (C) 2008 Qumranet, Inc.
+ * Copyright IBM Corporation, 2008
+ * Copyright 2010 Red Hat, Inc. and/or its affiliates.
+ * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * Authors:
+ *   Avi Kivity   <avi@qumranet.com>
+ *   Yaniv Kamay  <yaniv@qumranet.com>
+ *   Amit Shah    <amit.shah@qumranet.com>
+ *   Ben-Ami Yassour <benami@il.ibm.com>
+ *   Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include "x86.h"
+#include "lapic.h"
+#include "hyperv.h"
+
+#include <linux/kvm_host.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
+
+static bool kvm_hv_msr_partition_wide(u32 msr)
+{
+	bool r = false;
+
+	switch (msr) {
+	case HV_X64_MSR_GUEST_OS_ID:
+	case HV_X64_MSR_HYPERCALL:
+	case HV_X64_MSR_REFERENCE_TSC:
+	case HV_X64_MSR_TIME_REF_COUNT:
+	case HV_X64_MSR_CRASH_CTL:
+	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+		r = true;
+		break;
+	}
+
+	return r;
+}
+
+static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
+				     u32 index, u64 *pdata)
+{
+	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+
+	if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
+		return -EINVAL;
+
+	*pdata = hv->hv_crash_param[index];
+	return 0;
+}
+
+static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
+{
+	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+
+	*pdata = hv->hv_crash_ctl;
+	return 0;
+}
+
+static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
+{
+	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+
+	if (host)
+		hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
+
+	if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
+
+		vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
+			  hv->hv_crash_param[0],
+			  hv->hv_crash_param[1],
+			  hv->hv_crash_param[2],
+			  hv->hv_crash_param[3],
+			  hv->hv_crash_param[4]);
+
+		/* Send notification about crash to user space */
+		kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
+	}
+
+	return 0;
+}
+
+static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
+				     u32 index, u64 data)
+{
+	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+
+	if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
+		return -EINVAL;
+
+	hv->hv_crash_param[index] = data;
+	return 0;
+}
+
+static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
+			     bool host)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_hv *hv = &kvm->arch.hyperv;
+
+	switch (msr) {
+	case HV_X64_MSR_GUEST_OS_ID:
+		hv->hv_guest_os_id = data;
+		/* setting guest os id to zero disables hypercall page */
+		if (!hv->hv_guest_os_id)
+			hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
+		break;
+	case HV_X64_MSR_HYPERCALL: {
+		u64 gfn;
+		unsigned long addr;
+		u8 instructions[4];
+
+		/* if guest os id is not set hypercall should remain disabled */
+		if (!hv->hv_guest_os_id)
+			break;
+		if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
+			hv->hv_hypercall = data;
+			break;
+		}
+		gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
+		addr = gfn_to_hva(kvm, gfn);
+		if (kvm_is_error_hva(addr))
+			return 1;
+		kvm_x86_ops->patch_hypercall(vcpu, instructions);
+		((unsigned char *)instructions)[3] = 0xc3; /* ret */
+		if (__copy_to_user((void __user *)addr, instructions, 4))
+			return 1;
+		hv->hv_hypercall = data;
+		mark_page_dirty(kvm, gfn);
+		break;
+	}
+	case HV_X64_MSR_REFERENCE_TSC: {
+		u64 gfn;
+		HV_REFERENCE_TSC_PAGE tsc_ref;
+
+		memset(&tsc_ref, 0, sizeof(tsc_ref));
+		hv->hv_tsc_page = data;
+		if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+			break;
+		gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
+		if (kvm_write_guest(
+				kvm,
+				gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
+				&tsc_ref, sizeof(tsc_ref)))
+			return 1;
+		mark_page_dirty(kvm, gfn);
+		break;
+	}
+	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+		return kvm_hv_msr_set_crash_data(vcpu,
+						 msr - HV_X64_MSR_CRASH_P0,
+						 data);
+	case HV_X64_MSR_CRASH_CTL:
+		return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
+	default:
+		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
+			    msr, data);
+		return 1;
+	}
+	return 0;
+}
+
+static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+	struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+
+	switch (msr) {
+	case HV_X64_MSR_APIC_ASSIST_PAGE: {
+		u64 gfn;
+		unsigned long addr;
+
+		if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
+			hv->hv_vapic = data;
+			if (kvm_lapic_enable_pv_eoi(vcpu, 0))
+				return 1;
+			break;
+		}
+		gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
+		addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
+		if (kvm_is_error_hva(addr))
+			return 1;
+		if (__clear_user((void __user *)addr, PAGE_SIZE))
+			return 1;
+		hv->hv_vapic = data;
+		kvm_vcpu_mark_page_dirty(vcpu, gfn);
+		if (kvm_lapic_enable_pv_eoi(vcpu,
+					    gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
+			return 1;
+		break;
+	}
+	case HV_X64_MSR_EOI:
+		return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
+	case HV_X64_MSR_ICR:
+		return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
+	case HV_X64_MSR_TPR:
+		return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
+	default:
+		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
+			    msr, data);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+	u64 data = 0;
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_hv *hv = &kvm->arch.hyperv;
+
+	switch (msr) {
+	case HV_X64_MSR_GUEST_OS_ID:
+		data = hv->hv_guest_os_id;
+		break;
+	case HV_X64_MSR_HYPERCALL:
+		data = hv->hv_hypercall;
+		break;
+	case HV_X64_MSR_TIME_REF_COUNT: {
+		data =
+		     div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
+		break;
+	}
+	case HV_X64_MSR_REFERENCE_TSC:
+		data = hv->hv_tsc_page;
+		break;
+	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+		return kvm_hv_msr_get_crash_data(vcpu,
+						 msr - HV_X64_MSR_CRASH_P0,
+						 pdata);
+	case HV_X64_MSR_CRASH_CTL:
+		return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
+	default:
+		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+		return 1;
+	}
+
+	*pdata = data;
+	return 0;
+}
+
+static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+	u64 data = 0;
+	struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+
+	switch (msr) {
+	case HV_X64_MSR_VP_INDEX: {
+		int r;
+		struct kvm_vcpu *v;
+
+		kvm_for_each_vcpu(r, v, vcpu->kvm) {
+			if (v == vcpu) {
+				data = r;
+				break;
+			}
+		}
+		break;
+	}
+	case HV_X64_MSR_EOI:
+		return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
+	case HV_X64_MSR_ICR:
+		return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
+	case HV_X64_MSR_TPR:
+		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
+	case HV_X64_MSR_APIC_ASSIST_PAGE:
+		data = hv->hv_vapic;
+		break;
+	default:
+		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+		return 1;
+	}
+	*pdata = data;
+	return 0;
+}
+
+int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
+{
+	if (kvm_hv_msr_partition_wide(msr)) {
+		int r;
+
+		mutex_lock(&vcpu->kvm->lock);
+		r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
+		mutex_unlock(&vcpu->kvm->lock);
+		return r;
+	} else
+		return kvm_hv_set_msr(vcpu, msr, data);
+}
+
+int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+	if (kvm_hv_msr_partition_wide(msr)) {
+		int r;
+
+		mutex_lock(&vcpu->kvm->lock);
+		r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
+		mutex_unlock(&vcpu->kvm->lock);
+		return r;
+	} else
+		return kvm_hv_get_msr(vcpu, msr, pdata);
+}
+
+bool kvm_hv_hypercall_enabled(struct kvm *kvm)
+{
+	return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
+}
+
+int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
+{
+	u64 param, ingpa, outgpa, ret;
+	uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
+	bool fast, longmode;
+
+	/*
+	 * hypercall generates UD from non zero cpl and real mode
+	 * per HYPER-V spec
+	 */
+	if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
+		kvm_queue_exception(vcpu, UD_VECTOR);
+		return 0;
+	}
+
+	longmode = is_64_bit_mode(vcpu);
+
+	if (!longmode) {
+		param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
+			(kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
+		ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
+			(kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
+		outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
+			(kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
+	}
+#ifdef CONFIG_X86_64
+	else {
+		param = kvm_register_read(vcpu, VCPU_REGS_RCX);
+		ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
+		outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
+	}
+#endif
+
+	code = param & 0xffff;
+	fast = (param >> 16) & 0x1;
+	rep_cnt = (param >> 32) & 0xfff;
+	rep_idx = (param >> 48) & 0xfff;
+
+	trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
+
+	switch (code) {
+	case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
+		kvm_vcpu_on_spin(vcpu);
+		break;
+	default:
+		res = HV_STATUS_INVALID_HYPERCALL_CODE;
+		break;
+	}
+
+	ret = res | (((u64)rep_done & 0xfff) << 32);
+	if (longmode) {
+		kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
+	} else {
+		kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
+		kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
+	}
+
+	return 1;
+}
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
new file mode 100644
index 0000000..c7bce55
--- /dev/null
+++ b/arch/x86/kvm/hyperv.h
@@ -0,0 +1,32 @@
+/*
+ * KVM Microsoft Hyper-V emulation
+ *
+ * derived from arch/x86/kvm/x86.c
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ * Copyright (C) 2008 Qumranet, Inc.
+ * Copyright IBM Corporation, 2008
+ * Copyright 2010 Red Hat, Inc. and/or its affiliates.
+ * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * Authors:
+ *   Avi Kivity   <avi@qumranet.com>
+ *   Yaniv Kamay  <yaniv@qumranet.com>
+ *   Amit Shah    <amit.shah@qumranet.com>
+ *   Ben-Ami Yassour <benami@il.ibm.com>
+ *   Andrey Smetanin <asmetanin@virtuozzo.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef __ARCH_X86_KVM_HYPERV_H__
+#define __ARCH_X86_KVM_HYPERV_H__
+
+int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
+int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
+bool kvm_hv_hypercall_enabled(struct kvm *kvm);
+int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
+
+#endif
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index fef922f..7cc2360 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -651,15 +651,10 @@
 	return NULL;
 }
 
-void kvm_destroy_pic(struct kvm *kvm)
+void kvm_destroy_pic(struct kvm_pic *vpic)
 {
-	struct kvm_pic *vpic = kvm->arch.vpic;
-
-	if (vpic) {
-		kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_master);
-		kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_slave);
-		kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &vpic->dev_eclr);
-		kvm->arch.vpic = NULL;
-		kfree(vpic);
-	}
+	kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master);
+	kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave);
+	kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr);
+	kfree(vpic);
 }
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index ad68c73..3d782a2 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -74,7 +74,7 @@
 };
 
 struct kvm_pic *kvm_create_pic(struct kvm *kvm);
-void kvm_destroy_pic(struct kvm *kvm);
+void kvm_destroy_pic(struct kvm_pic *vpic);
 int kvm_pic_read_irq(struct kvm *kvm);
 void kvm_pic_update_irq(struct kvm_pic *s);
 
@@ -85,11 +85,11 @@
 
 static inline int irqchip_in_kernel(struct kvm *kvm)
 {
-	int ret;
+	struct kvm_pic *vpic = pic_irqchip(kvm);
 
-	ret = (pic_irqchip(kvm) != NULL);
+	/* Read vpic before kvm->irq_routing.  */
 	smp_rmb();
-	return ret;
+	return vpic != NULL;
 }
 
 void kvm_pic_reset(struct kvm_kpic_state *s);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 2a5ca97..8d9013c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1172,7 +1172,7 @@
 
 	tsc_deadline = apic->lapic_timer.expired_tscdeadline;
 	apic->lapic_timer.expired_tscdeadline = 0;
-	guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+	guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
 	trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
 
 	/* __delay is delay_tsc whenever the hardware has TSC, thus always.  */
@@ -1240,7 +1240,7 @@
 		local_irq_save(flags);
 
 		now = apic->lapic_timer.timer.base->get_time();
-		guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, native_read_tsc());
+		guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu, rdtsc());
 		if (likely(tscdeadline > guest_tsc)) {
 			ns = (tscdeadline - guest_tsc) * 1000000ULL;
 			do_div(ns, this_tsc_khz);
@@ -1900,8 +1900,9 @@
 	if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
 		return;
 
-	kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
-				sizeof(u32));
+	if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+				  sizeof(u32)))
+		return;
 
 	apic_set_tpr(vcpu->arch.apic, data & 0xff);
 }
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 7195274..7640379 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -91,7 +91,7 @@
 
 static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
 {
-	return vcpu->arch.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE;
+	return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE;
 }
 
 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4417146..fb16a8e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -357,12 +357,6 @@
 {
 	return ACCESS_ONCE(*sptep);
 }
-
-static bool __check_direct_spte_mmio_pf(u64 spte)
-{
-	/* It is valid if the spte is zapped. */
-	return spte == 0ull;
-}
 #else
 union split_spte {
 	struct {
@@ -478,23 +472,6 @@
 
 	return spte.spte;
 }
-
-static bool __check_direct_spte_mmio_pf(u64 spte)
-{
-	union split_spte sspte = (union split_spte)spte;
-	u32 high_mmio_mask = shadow_mmio_mask >> 32;
-
-	/* It is valid if the spte is zapped. */
-	if (spte == 0ull)
-		return true;
-
-	/* It is valid if the spte is being zapped. */
-	if (sspte.spte_low == 0ull &&
-	    (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
-		return true;
-
-	return false;
-}
 #endif
 
 static bool spte_is_locklessly_modifiable(u64 spte)
@@ -3291,6 +3268,25 @@
 	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
 }
 
+static bool
+__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
+{
+	int bit7 = (pte >> 7) & 1, low6 = pte & 0x3f;
+
+	return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) |
+		((rsvd_check->bad_mt_xwr & (1ull << low6)) != 0);
+}
+
+static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
+{
+	return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level);
+}
+
+static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level)
+{
+	return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level);
+}
+
 static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
 {
 	if (direct)
@@ -3299,46 +3295,62 @@
 	return vcpu_match_mmio_gva(vcpu, addr);
 }
 
-
-/*
- * On direct hosts, the last spte is only allows two states
- * for mmio page fault:
- *   - It is the mmio spte
- *   - It is zapped or it is being zapped.
- *
- * This function completely checks the spte when the last spte
- * is not the mmio spte.
- */
-static bool check_direct_spte_mmio_pf(u64 spte)
-{
-	return __check_direct_spte_mmio_pf(spte);
-}
-
-static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
+/* return true if reserved bit is detected on spte. */
+static bool
+walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 {
 	struct kvm_shadow_walk_iterator iterator;
-	u64 spte = 0ull;
+	u64 sptes[PT64_ROOT_LEVEL], spte = 0ull;
+	int root, leaf;
+	bool reserved = false;
 
 	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
-		return spte;
+		goto exit;
 
 	walk_shadow_page_lockless_begin(vcpu);
-	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
+
+	for (shadow_walk_init(&iterator, vcpu, addr), root = iterator.level;
+	     shadow_walk_okay(&iterator);
+	     __shadow_walk_next(&iterator, spte)) {
+		leaf = iterator.level;
+		spte = mmu_spte_get_lockless(iterator.sptep);
+
+		sptes[leaf - 1] = spte;
+
 		if (!is_shadow_present_pte(spte))
 			break;
+
+		reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
+						    leaf);
+	}
+
 	walk_shadow_page_lockless_end(vcpu);
 
-	return spte;
+	if (reserved) {
+		pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
+		       __func__, addr);
+		while (root >= leaf) {
+			pr_err("------ spte 0x%llx level %d.\n",
+			       sptes[root - 1], root);
+			root--;
+		}
+	}
+exit:
+	*sptep = spte;
+	return reserved;
 }
 
 int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
 {
 	u64 spte;
+	bool reserved;
 
 	if (quickly_check_mmio_pf(vcpu, addr, direct))
 		return RET_MMIO_PF_EMULATE;
 
-	spte = walk_shadow_page_get_mmio_spte(vcpu, addr);
+	reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
+	if (unlikely(reserved))
+		return RET_MMIO_PF_BUG;
 
 	if (is_mmio_spte(spte)) {
 		gfn_t gfn = get_mmio_spte_gfn(spte);
@@ -3356,13 +3368,6 @@
 	}
 
 	/*
-	 * It's ok if the gva is remapped by other cpus on shadow guest,
-	 * it's a BUG if the gfn is not a mmio page.
-	 */
-	if (direct && !check_direct_spte_mmio_pf(spte))
-		return RET_MMIO_PF_BUG;
-
-	/*
 	 * If the page table is zapped by other cpus, let CPU fault again on
 	 * the address.
 	 */
@@ -3604,19 +3609,21 @@
 #include "paging_tmpl.h"
 #undef PTTYPE
 
-static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
-				  struct kvm_mmu *context)
+static void
+__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
+			struct rsvd_bits_validate *rsvd_check,
+			int maxphyaddr, int level, bool nx, bool gbpages,
+			bool pse)
 {
-	int maxphyaddr = cpuid_maxphyaddr(vcpu);
 	u64 exb_bit_rsvd = 0;
 	u64 gbpages_bit_rsvd = 0;
 	u64 nonleaf_bit8_rsvd = 0;
 
-	context->bad_mt_xwr = 0;
+	rsvd_check->bad_mt_xwr = 0;
 
-	if (!context->nx)
+	if (!nx)
 		exb_bit_rsvd = rsvd_bits(63, 63);
-	if (!guest_cpuid_has_gbpages(vcpu))
+	if (!gbpages)
 		gbpages_bit_rsvd = rsvd_bits(7, 7);
 
 	/*
@@ -3626,80 +3633,95 @@
 	if (guest_cpuid_is_amd(vcpu))
 		nonleaf_bit8_rsvd = rsvd_bits(8, 8);
 
-	switch (context->root_level) {
+	switch (level) {
 	case PT32_ROOT_LEVEL:
 		/* no rsvd bits for 2 level 4K page table entries */
-		context->rsvd_bits_mask[0][1] = 0;
-		context->rsvd_bits_mask[0][0] = 0;
-		context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
+		rsvd_check->rsvd_bits_mask[0][1] = 0;
+		rsvd_check->rsvd_bits_mask[0][0] = 0;
+		rsvd_check->rsvd_bits_mask[1][0] =
+			rsvd_check->rsvd_bits_mask[0][0];
 
-		if (!is_pse(vcpu)) {
-			context->rsvd_bits_mask[1][1] = 0;
+		if (!pse) {
+			rsvd_check->rsvd_bits_mask[1][1] = 0;
 			break;
 		}
 
 		if (is_cpuid_PSE36())
 			/* 36bits PSE 4MB page */
-			context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
+			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
 		else
 			/* 32 bits PSE 4MB page */
-			context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
+			rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
 		break;
 	case PT32E_ROOT_LEVEL:
-		context->rsvd_bits_mask[0][2] =
+		rsvd_check->rsvd_bits_mask[0][2] =
 			rsvd_bits(maxphyaddr, 63) |
 			rsvd_bits(5, 8) | rsvd_bits(1, 2);	/* PDPTE */
-		context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
+		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
 			rsvd_bits(maxphyaddr, 62);	/* PDE */
-		context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
+		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
 			rsvd_bits(maxphyaddr, 62); 	/* PTE */
-		context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
+		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
 			rsvd_bits(maxphyaddr, 62) |
 			rsvd_bits(13, 20);		/* large page */
-		context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
+		rsvd_check->rsvd_bits_mask[1][0] =
+			rsvd_check->rsvd_bits_mask[0][0];
 		break;
 	case PT64_ROOT_LEVEL:
-		context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
-			nonleaf_bit8_rsvd | rsvd_bits(7, 7) | rsvd_bits(maxphyaddr, 51);
-		context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
-			nonleaf_bit8_rsvd | gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51);
-		context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
+		rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
+			nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
 			rsvd_bits(maxphyaddr, 51);
-		context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
+		rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
+			nonleaf_bit8_rsvd | gbpages_bit_rsvd |
 			rsvd_bits(maxphyaddr, 51);
-		context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
-		context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
+		rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
+			rsvd_bits(maxphyaddr, 51);
+		rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
+			rsvd_bits(maxphyaddr, 51);
+		rsvd_check->rsvd_bits_mask[1][3] =
+			rsvd_check->rsvd_bits_mask[0][3];
+		rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd |
 			gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) |
 			rsvd_bits(13, 29);
-		context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
+		rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
 			rsvd_bits(maxphyaddr, 51) |
 			rsvd_bits(13, 20);		/* large page */
-		context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
+		rsvd_check->rsvd_bits_mask[1][0] =
+			rsvd_check->rsvd_bits_mask[0][0];
 		break;
 	}
 }
 
-static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
-		struct kvm_mmu *context, bool execonly)
+static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
+				  struct kvm_mmu *context)
 {
-	int maxphyaddr = cpuid_maxphyaddr(vcpu);
+	__reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
+				cpuid_maxphyaddr(vcpu), context->root_level,
+				context->nx, guest_cpuid_has_gbpages(vcpu),
+				is_pse(vcpu));
+}
+
+static void
+__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
+			    int maxphyaddr, bool execonly)
+{
 	int pte;
 
-	context->rsvd_bits_mask[0][3] =
+	rsvd_check->rsvd_bits_mask[0][3] =
 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
-	context->rsvd_bits_mask[0][2] =
+	rsvd_check->rsvd_bits_mask[0][2] =
 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
-	context->rsvd_bits_mask[0][1] =
+	rsvd_check->rsvd_bits_mask[0][1] =
 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
-	context->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
+	rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
 
 	/* large page */
-	context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
-	context->rsvd_bits_mask[1][2] =
+	rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
+	rsvd_check->rsvd_bits_mask[1][2] =
 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
-	context->rsvd_bits_mask[1][1] =
+	rsvd_check->rsvd_bits_mask[1][1] =
 		rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
-	context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
+	rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
 
 	for (pte = 0; pte < 64; pte++) {
 		int rwx_bits = pte & 7;
@@ -3707,10 +3729,64 @@
 		if (mt == 0x2 || mt == 0x3 || mt == 0x7 ||
 				rwx_bits == 0x2 || rwx_bits == 0x6 ||
 				(rwx_bits == 0x4 && !execonly))
-			context->bad_mt_xwr |= (1ull << pte);
+			rsvd_check->bad_mt_xwr |= (1ull << pte);
 	}
 }
 
+static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
+		struct kvm_mmu *context, bool execonly)
+{
+	__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
+				    cpuid_maxphyaddr(vcpu), execonly);
+}
+
+/*
+ * the page table on host is the shadow page table for the page
+ * table in guest or amd nested guest, its mmu features completely
+ * follow the features in guest.
+ */
+void
+reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+{
+	__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+				boot_cpu_data.x86_phys_bits,
+				context->shadow_root_level, context->nx,
+				guest_cpuid_has_gbpages(vcpu), is_pse(vcpu));
+}
+EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
+
+/*
+ * the direct page table on host, use as much mmu features as
+ * possible, however, kvm currently does not do execution-protection.
+ */
+static void
+reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
+				struct kvm_mmu *context)
+{
+	if (guest_cpuid_is_amd(vcpu))
+		__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
+					boot_cpu_data.x86_phys_bits,
+					context->shadow_root_level, false,
+					cpu_has_gbpages, true);
+	else
+		__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
+					    boot_cpu_data.x86_phys_bits,
+					    false);
+
+}
+
+/*
+ * as the comments in reset_shadow_zero_bits_mask() except it
+ * is the shadow page table for intel nested guest.
+ */
+static void
+reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
+				struct kvm_mmu *context, bool execonly)
+{
+	__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
+				    boot_cpu_data.x86_phys_bits, execonly);
+}
+
 static void update_permission_bitmask(struct kvm_vcpu *vcpu,
 				      struct kvm_mmu *mmu, bool ept)
 {
@@ -3889,6 +3965,7 @@
 
 	update_permission_bitmask(vcpu, context, false);
 	update_last_pte_bitmap(vcpu, context);
+	reset_tdp_shadow_zero_bits_mask(vcpu, context);
 }
 
 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
@@ -3916,6 +3993,7 @@
 	context->base_role.smap_andnot_wp
 		= smap && !is_write_protection(vcpu);
 	context->base_role.smm = is_smm(vcpu);
+	reset_shadow_zero_bits_mask(vcpu, context);
 }
 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
 
@@ -3939,6 +4017,7 @@
 
 	update_permission_bitmask(vcpu, context, true);
 	reset_rsvds_bits_mask_ept(vcpu, context, execonly);
+	reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
 }
 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
 
@@ -4860,28 +4939,6 @@
 	return nr_mmu_pages;
 }
 
-int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
-{
-	struct kvm_shadow_walk_iterator iterator;
-	u64 spte;
-	int nr_sptes = 0;
-
-	if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
-		return nr_sptes;
-
-	walk_shadow_page_lockless_begin(vcpu);
-	for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
-		sptes[iterator.level-1] = spte;
-		nr_sptes++;
-		if (!is_shadow_present_pte(spte))
-			break;
-	}
-	walk_shadow_page_lockless_end(vcpu);
-
-	return nr_sptes;
-}
-EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
-
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
 {
 	kvm_mmu_unload(vcpu);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 398d21c..e4202e4 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -50,9 +50,11 @@
 	return ((1ULL << (e - s + 1)) - 1) << s;
 }
 
-int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
 
+void
+reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
+
 /*
  * Return values of handle_mmio_page_fault_common:
  * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 0f67d7e..736e6ab 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -128,14 +128,6 @@
 	*access &= mask;
 }
 
-static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
-{
-	int bit7 = (gpte >> 7) & 1, low6 = gpte & 0x3f;
-
-	return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) |
-		((mmu->bad_mt_xwr & (1ull << low6)) != 0);
-}
-
 static inline int FNAME(is_present_gpte)(unsigned long pte)
 {
 #if PTTYPE != PTTYPE_EPT
@@ -172,7 +164,7 @@
 				  struct kvm_mmu_page *sp, u64 *spte,
 				  u64 gpte)
 {
-	if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
+	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
 		goto no_present;
 
 	if (!FNAME(is_present_gpte)(gpte))
@@ -353,8 +345,7 @@
 		if (unlikely(!FNAME(is_present_gpte)(pte)))
 			goto error;
 
-		if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte,
-					             walker->level))) {
+		if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
 			errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
 			goto error;
 		}
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index 886aa25..39b9112 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -133,8 +133,6 @@
 	/* MSR_K7_PERFCTRn */
 	pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0);
 	if (pmc) {
-		if (!msr_info->host_initiated)
-			data = (s64)data;
 		pmc->counter += data - pmc_read_counter(pmc);
 		return 0;
 	}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8e0c084..fdb8cb6 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1139,7 +1139,7 @@
 {
 	u64 tsc;
 
-	tsc = svm_scale_tsc(vcpu, native_read_tsc());
+	tsc = svm_scale_tsc(vcpu, rdtsc());
 
 	return target_tsc - tsc;
 }
@@ -1173,6 +1173,10 @@
 	if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
 		return 0;
 
+	if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) &&
+	    kvm_read_cr0(vcpu) & X86_CR0_CD)
+		return _PAGE_NOCACHE;
+
 	mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
 	return mtrr2protval[mtrr];
 }
@@ -1667,13 +1671,10 @@
 
 	if (!vcpu->fpu_active)
 		cr0 |= X86_CR0_TS;
-	/*
-	 * re-enable caching here because the QEMU bios
-	 * does not do it - this results in some delay at
-	 * reboot
-	 */
-	if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
-		cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
+
+	/* These are emulated via page tables.  */
+	cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
+
 	svm->vmcb->save.cr0 = cr0;
 	mark_dirty(svm->vmcb, VMCB_CR);
 	update_cr0_intercept(svm);
@@ -2106,6 +2107,7 @@
 	vcpu->arch.mmu.get_pdptr         = nested_svm_get_tdp_pdptr;
 	vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
 	vcpu->arch.mmu.shadow_root_level = get_npt_level();
+	reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
 	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
 }
 
@@ -3172,7 +3174,7 @@
 	switch (msr_info->index) {
 	case MSR_IA32_TSC: {
 		msr_info->data = svm->vmcb->control.tsc_offset +
-			svm_scale_tsc(vcpu, native_read_tsc());
+			svm_scale_tsc(vcpu, rdtsc());
 
 		break;
 	}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 83b7b5c..4a4eec30 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2236,7 +2236,7 @@
 {
 	u64 host_tsc, tsc_offset;
 
-	rdtscll(host_tsc);
+	host_tsc = rdtsc();
 	tsc_offset = vmcs_read64(TSC_OFFSET);
 	return host_tsc + tsc_offset;
 }
@@ -2317,7 +2317,7 @@
 
 static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
 {
-	return target_tsc - native_read_tsc();
+	return target_tsc - rdtsc();
 }
 
 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
@@ -2443,10 +2443,10 @@
 		CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
 #endif
 		CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
-		CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
-		CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING |
-		CPU_BASED_PAUSE_EXITING | CPU_BASED_TPR_SHADOW |
-		CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
+		CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
+		CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
+		CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
+		CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
 	/*
 	 * We can allow some features even when not supported by the
 	 * hardware. For example, L1 can specify an MSR bitmap - and we
@@ -3423,12 +3423,12 @@
 	vmx_segment_cache_clear(to_vmx(vcpu));
 
 	guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
-	if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
+	if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
 		pr_debug_ratelimited("%s: tss fixup for long mode. \n",
 				     __func__);
 		vmcs_write32(GUEST_TR_AR_BYTES,
-			     (guest_tr_ar & ~AR_TYPE_MASK)
-			     | AR_TYPE_BUSY_64_TSS);
+			     (guest_tr_ar & ~VMX_AR_TYPE_MASK)
+			     | VMX_AR_TYPE_BUSY_64_TSS);
 	}
 	vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
 }
@@ -3719,7 +3719,7 @@
 		return 0;
 	else {
 		int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
-		return AR_DPL(ar);
+		return VMX_AR_DPL(ar);
 	}
 }
 
@@ -3847,11 +3847,11 @@
 
 	if (cs.unusable)
 		return false;
-	if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
+	if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
 		return false;
 	if (!cs.s)
 		return false;
-	if (cs.type & AR_TYPE_WRITEABLE_MASK) {
+	if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
 		if (cs.dpl > cs_rpl)
 			return false;
 	} else {
@@ -3901,7 +3901,7 @@
 		return false;
 	if (!var.present)
 		return false;
-	if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
+	if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
 		if (var.dpl < rpl) /* DPL < RPL */
 			return false;
 	}
@@ -5759,73 +5759,9 @@
 	return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
 }
 
-static u64 ept_rsvd_mask(u64 spte, int level)
-{
-	int i;
-	u64 mask = 0;
-
-	for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
-		mask |= (1ULL << i);
-
-	if (level == 4)
-		/* bits 7:3 reserved */
-		mask |= 0xf8;
-	else if (spte & (1ULL << 7))
-		/*
-		 * 1GB/2MB page, bits 29:12 or 20:12 reserved respectively,
-		 * level == 1 if the hypervisor is using the ignored bit 7.
-		 */
-		mask |= (PAGE_SIZE << ((level - 1) * 9)) - PAGE_SIZE;
-	else if (level > 1)
-		/* bits 6:3 reserved */
-		mask |= 0x78;
-
-	return mask;
-}
-
-static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
-				       int level)
-{
-	printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
-
-	/* 010b (write-only) */
-	WARN_ON((spte & 0x7) == 0x2);
-
-	/* 110b (write/execute) */
-	WARN_ON((spte & 0x7) == 0x6);
-
-	/* 100b (execute-only) and value not supported by logical processor */
-	if (!cpu_has_vmx_ept_execute_only())
-		WARN_ON((spte & 0x7) == 0x4);
-
-	/* not 000b */
-	if ((spte & 0x7)) {
-		u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
-
-		if (rsvd_bits != 0) {
-			printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
-					 __func__, rsvd_bits);
-			WARN_ON(1);
-		}
-
-		/* bits 5:3 are _not_ reserved for large page or leaf page */
-		if ((rsvd_bits & 0x38) == 0) {
-			u64 ept_mem_type = (spte & 0x38) >> 3;
-
-			if (ept_mem_type == 2 || ept_mem_type == 3 ||
-			    ept_mem_type == 7) {
-				printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
-						__func__, ept_mem_type);
-				WARN_ON(1);
-			}
-		}
-	}
-}
-
 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
 {
-	u64 sptes[4];
-	int nr_sptes, i, ret;
+	int ret;
 	gpa_t gpa;
 
 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
@@ -5846,13 +5782,7 @@
 		return 1;
 
 	/* It is the real ept misconfig */
-	printk(KERN_ERR "EPT: Misconfiguration.\n");
-	printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
-
-	nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
-
-	for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
-		ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
+	WARN_ON(1);
 
 	vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
 	vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
@@ -6246,6 +6176,11 @@
 	return handle_nop(vcpu);
 }
 
+static int handle_monitor_trap(struct kvm_vcpu *vcpu)
+{
+	return 1;
+}
+
 static int handle_monitor(struct kvm_vcpu *vcpu)
 {
 	printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
@@ -6408,8 +6343,12 @@
  */
 static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
 				 unsigned long exit_qualification,
-				 u32 vmx_instruction_info, gva_t *ret)
+				 u32 vmx_instruction_info, bool wr, gva_t *ret)
 {
+	gva_t off;
+	bool exn;
+	struct kvm_segment s;
+
 	/*
 	 * According to Vol. 3B, "Information for VM Exits Due to Instruction
 	 * Execution", on an exit, vmx_instruction_info holds most of the
@@ -6434,22 +6373,63 @@
 
 	/* Addr = segment_base + offset */
 	/* offset = base + [index * scale] + displacement */
-	*ret = vmx_get_segment_base(vcpu, seg_reg);
+	off = exit_qualification; /* holds the displacement */
 	if (base_is_valid)
-		*ret += kvm_register_read(vcpu, base_reg);
+		off += kvm_register_read(vcpu, base_reg);
 	if (index_is_valid)
-		*ret += kvm_register_read(vcpu, index_reg)<<scaling;
-	*ret += exit_qualification; /* holds the displacement */
+		off += kvm_register_read(vcpu, index_reg)<<scaling;
+	vmx_get_segment(vcpu, &s, seg_reg);
+	*ret = s.base + off;
 
 	if (addr_size == 1) /* 32 bit */
 		*ret &= 0xffffffff;
 
-	/*
-	 * TODO: throw #GP (and return 1) in various cases that the VM*
-	 * instructions require it - e.g., offset beyond segment limit,
-	 * unusable or unreadable/unwritable segment, non-canonical 64-bit
-	 * address, and so on. Currently these are not checked.
-	 */
+	/* Checks for #GP/#SS exceptions. */
+	exn = false;
+	if (is_protmode(vcpu)) {
+		/* Protected mode: apply checks for segment validity in the
+		 * following order:
+		 * - segment type check (#GP(0) may be thrown)
+		 * - usability check (#GP(0)/#SS(0))
+		 * - limit check (#GP(0)/#SS(0))
+		 */
+		if (wr)
+			/* #GP(0) if the destination operand is located in a
+			 * read-only data segment or any code segment.
+			 */
+			exn = ((s.type & 0xa) == 0 || (s.type & 8));
+		else
+			/* #GP(0) if the source operand is located in an
+			 * execute-only code segment
+			 */
+			exn = ((s.type & 0xa) == 8);
+	}
+	if (exn) {
+		kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+		return 1;
+	}
+	if (is_long_mode(vcpu)) {
+		/* Long mode: #GP(0)/#SS(0) if the memory address is in a
+		 * non-canonical form. This is an only check for long mode.
+		 */
+		exn = is_noncanonical_address(*ret);
+	} else if (is_protmode(vcpu)) {
+		/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
+		 */
+		exn = (s.unusable != 0);
+		/* Protected mode: #GP(0)/#SS(0) if the memory
+		 * operand is outside the segment limit.
+		 */
+		exn = exn || (off + sizeof(u64) > s.limit);
+	}
+	if (exn) {
+		kvm_queue_exception_e(vcpu,
+				      seg_reg == VCPU_SREG_SS ?
+						SS_VECTOR : GP_VECTOR,
+				      0);
+		return 1;
+	}
+
 	return 0;
 }
 
@@ -6471,7 +6451,7 @@
 	int maxphyaddr = cpuid_maxphyaddr(vcpu);
 
 	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
-			vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
+			vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
 		return 1;
 
 	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
@@ -6999,7 +6979,7 @@
 			field_value);
 	} else {
 		if (get_vmx_mem_address(vcpu, exit_qualification,
-				vmx_instruction_info, &gva))
+				vmx_instruction_info, true, &gva))
 			return 1;
 		/* _system ok, as nested_vmx_check_permission verified cpl=0 */
 		kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
@@ -7036,7 +7016,7 @@
 			(((vmx_instruction_info) >> 3) & 0xf));
 	else {
 		if (get_vmx_mem_address(vcpu, exit_qualification,
-				vmx_instruction_info, &gva))
+				vmx_instruction_info, false, &gva))
 			return 1;
 		if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
 			   &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
@@ -7128,7 +7108,7 @@
 		return 1;
 
 	if (get_vmx_mem_address(vcpu, exit_qualification,
-			vmx_instruction_info, &vmcs_gva))
+			vmx_instruction_info, true, &vmcs_gva))
 		return 1;
 	/* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
 	if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
@@ -7184,7 +7164,7 @@
 	 * operand is read even if it isn't needed (e.g., for type==global)
 	 */
 	if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
-			vmx_instruction_info, &gva))
+			vmx_instruction_info, false, &gva))
 		return 1;
 	if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
 				sizeof(operand), &e)) {
@@ -7282,6 +7262,7 @@
 	[EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
 	[EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
 	[EXIT_REASON_MWAIT_INSTRUCTION]	      = handle_mwait,
+	[EXIT_REASON_MONITOR_TRAP_FLAG]       = handle_monitor_trap,
 	[EXIT_REASON_MONITOR_INSTRUCTION]     = handle_monitor,
 	[EXIT_REASON_INVEPT]                  = handle_invept,
 	[EXIT_REASON_INVVPID]                 = handle_invvpid,
@@ -7542,6 +7523,8 @@
 		return true;
 	case EXIT_REASON_MWAIT_INSTRUCTION:
 		return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
+	case EXIT_REASON_MONITOR_TRAP_FLAG:
+		return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
 	case EXIT_REASON_MONITOR_INSTRUCTION:
 		return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
 	case EXIT_REASON_PAUSE_INSTRUCTION:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8f0f6ec..1e7e76e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -29,6 +29,7 @@
 #include "cpuid.h"
 #include "assigned-dev.h"
 #include "pmu.h"
+#include "hyperv.h"
 
 #include <linux/clocksource.h>
 #include <linux/interrupt.h>
@@ -221,11 +222,9 @@
 void kvm_define_shared_msr(unsigned slot, u32 msr)
 {
 	BUG_ON(slot >= KVM_NR_SHARED_MSRS);
+	shared_msrs_global.msrs[slot] = msr;
 	if (slot >= shared_msrs_global.nr)
 		shared_msrs_global.nr = slot + 1;
-	shared_msrs_global.msrs[slot] = msr;
-	/* we need ensured the shared_msr_global have been updated */
-	smp_wmb();
 }
 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
 
@@ -526,7 +525,8 @@
 	}
 	for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
 		if (is_present_gpte(pdpte[i]) &&
-		    (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
+		    (pdpte[i] &
+		     vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
 			ret = 0;
 			goto out;
 		}
@@ -949,6 +949,8 @@
 	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
 	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
 	HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
+	HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
+	HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
 	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
 	MSR_KVM_PV_EOI_EN,
 
@@ -1217,11 +1219,6 @@
 		 __func__, base_khz, scaled_khz, shift, *pmultiplier);
 }
 
-static inline u64 get_kernel_ns(void)
-{
-	return ktime_get_boot_ns();
-}
-
 #ifdef CONFIG_X86_64
 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
 #endif
@@ -1444,20 +1441,8 @@
 
 static cycle_t read_tsc(void)
 {
-	cycle_t ret;
-	u64 last;
-
-	/*
-	 * Empirically, a fence (of type that depends on the CPU)
-	 * before rdtsc is enough to ensure that rdtsc is ordered
-	 * with respect to loads.  The various CPU manuals are unclear
-	 * as to whether rdtsc can be reordered with later loads,
-	 * but no one has ever seen it happen.
-	 */
-	rdtsc_barrier();
-	ret = (cycle_t)vget_cycles();
-
-	last = pvclock_gtod_data.clock.cycle_last;
+	cycle_t ret = (cycle_t)rdtsc_ordered();
+	u64 last = pvclock_gtod_data.clock.cycle_last;
 
 	if (likely(ret >= last))
 		return ret;
@@ -1646,7 +1631,7 @@
 		return 1;
 	}
 	if (!use_master_clock) {
-		host_tsc = native_read_tsc();
+		host_tsc = rdtsc();
 		kernel_ns = get_kernel_ns();
 	}
 
@@ -1869,123 +1854,6 @@
 	return r;
 }
 
-static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
-{
-	return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
-}
-
-static bool kvm_hv_msr_partition_wide(u32 msr)
-{
-	bool r = false;
-	switch (msr) {
-	case HV_X64_MSR_GUEST_OS_ID:
-	case HV_X64_MSR_HYPERCALL:
-	case HV_X64_MSR_REFERENCE_TSC:
-	case HV_X64_MSR_TIME_REF_COUNT:
-		r = true;
-		break;
-	}
-
-	return r;
-}
-
-static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
-{
-	struct kvm *kvm = vcpu->kvm;
-
-	switch (msr) {
-	case HV_X64_MSR_GUEST_OS_ID:
-		kvm->arch.hv_guest_os_id = data;
-		/* setting guest os id to zero disables hypercall page */
-		if (!kvm->arch.hv_guest_os_id)
-			kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
-		break;
-	case HV_X64_MSR_HYPERCALL: {
-		u64 gfn;
-		unsigned long addr;
-		u8 instructions[4];
-
-		/* if guest os id is not set hypercall should remain disabled */
-		if (!kvm->arch.hv_guest_os_id)
-			break;
-		if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
-			kvm->arch.hv_hypercall = data;
-			break;
-		}
-		gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
-		addr = gfn_to_hva(kvm, gfn);
-		if (kvm_is_error_hva(addr))
-			return 1;
-		kvm_x86_ops->patch_hypercall(vcpu, instructions);
-		((unsigned char *)instructions)[3] = 0xc3; /* ret */
-		if (__copy_to_user((void __user *)addr, instructions, 4))
-			return 1;
-		kvm->arch.hv_hypercall = data;
-		mark_page_dirty(kvm, gfn);
-		break;
-	}
-	case HV_X64_MSR_REFERENCE_TSC: {
-		u64 gfn;
-		HV_REFERENCE_TSC_PAGE tsc_ref;
-		memset(&tsc_ref, 0, sizeof(tsc_ref));
-		kvm->arch.hv_tsc_page = data;
-		if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
-			break;
-		gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
-		if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
-			&tsc_ref, sizeof(tsc_ref)))
-			return 1;
-		mark_page_dirty(kvm, gfn);
-		break;
-	}
-	default:
-		vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
-			    "data 0x%llx\n", msr, data);
-		return 1;
-	}
-	return 0;
-}
-
-static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
-{
-	switch (msr) {
-	case HV_X64_MSR_APIC_ASSIST_PAGE: {
-		u64 gfn;
-		unsigned long addr;
-
-		if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
-			vcpu->arch.hv_vapic = data;
-			if (kvm_lapic_enable_pv_eoi(vcpu, 0))
-				return 1;
-			break;
-		}
-		gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
-		addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
-		if (kvm_is_error_hva(addr))
-			return 1;
-		if (__clear_user((void __user *)addr, PAGE_SIZE))
-			return 1;
-		vcpu->arch.hv_vapic = data;
-		kvm_vcpu_mark_page_dirty(vcpu, gfn);
-		if (kvm_lapic_enable_pv_eoi(vcpu, gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
-			return 1;
-		break;
-	}
-	case HV_X64_MSR_EOI:
-		return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
-	case HV_X64_MSR_ICR:
-		return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
-	case HV_X64_MSR_TPR:
-		return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
-	default:
-		vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
-			    "data 0x%llx\n", msr, data);
-		return 1;
-	}
-
-	return 0;
-}
-
 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
 {
 	gpa_t gpa = data & ~0x3f;
@@ -2224,15 +2092,10 @@
 		 */
 		break;
 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
-		if (kvm_hv_msr_partition_wide(msr)) {
-			int r;
-			mutex_lock(&vcpu->kvm->lock);
-			r = set_msr_hyperv_pw(vcpu, msr, data);
-			mutex_unlock(&vcpu->kvm->lock);
-			return r;
-		} else
-			return set_msr_hyperv(vcpu, msr, data);
-		break;
+	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+	case HV_X64_MSR_CRASH_CTL:
+		return kvm_hv_set_msr_common(vcpu, msr, data,
+					     msr_info->host_initiated);
 	case MSR_IA32_BBL_CR_CTL3:
 		/* Drop writes to this legacy MSR -- see rdmsr
 		 * counterpart for further detail.
@@ -2315,68 +2178,6 @@
 	return 0;
 }
 
-static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
-{
-	u64 data = 0;
-	struct kvm *kvm = vcpu->kvm;
-
-	switch (msr) {
-	case HV_X64_MSR_GUEST_OS_ID:
-		data = kvm->arch.hv_guest_os_id;
-		break;
-	case HV_X64_MSR_HYPERCALL:
-		data = kvm->arch.hv_hypercall;
-		break;
-	case HV_X64_MSR_TIME_REF_COUNT: {
-		data =
-		     div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
-		break;
-	}
-	case HV_X64_MSR_REFERENCE_TSC:
-		data = kvm->arch.hv_tsc_page;
-		break;
-	default:
-		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
-		return 1;
-	}
-
-	*pdata = data;
-	return 0;
-}
-
-static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
-{
-	u64 data = 0;
-
-	switch (msr) {
-	case HV_X64_MSR_VP_INDEX: {
-		int r;
-		struct kvm_vcpu *v;
-		kvm_for_each_vcpu(r, v, vcpu->kvm) {
-			if (v == vcpu) {
-				data = r;
-				break;
-			}
-		}
-		break;
-	}
-	case HV_X64_MSR_EOI:
-		return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
-	case HV_X64_MSR_ICR:
-		return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
-	case HV_X64_MSR_TPR:
-		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
-	case HV_X64_MSR_APIC_ASSIST_PAGE:
-		data = vcpu->arch.hv_vapic;
-		break;
-	default:
-		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
-		return 1;
-	}
-	*pdata = data;
-	return 0;
-}
-
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
 	switch (msr_info->index) {
@@ -2493,14 +2294,10 @@
 		msr_info->data = 0x20000000;
 		break;
 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
-		if (kvm_hv_msr_partition_wide(msr_info->index)) {
-			int r;
-			mutex_lock(&vcpu->kvm->lock);
-			r = get_msr_hyperv_pw(vcpu, msr_info->index, &msr_info->data);
-			mutex_unlock(&vcpu->kvm->lock);
-			return r;
-		} else
-			return get_msr_hyperv(vcpu, msr_info->index, &msr_info->data);
+	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+	case HV_X64_MSR_CRASH_CTL:
+		return kvm_hv_get_msr_common(vcpu,
+					     msr_info->index, &msr_info->data);
 		break;
 	case MSR_IA32_BBL_CR_CTL3:
 		/* This legacy MSR exists but isn't fully documented in current
@@ -2651,6 +2448,7 @@
 	case KVM_CAP_TSC_DEADLINE_TIMER:
 	case KVM_CAP_ENABLE_CAP_VM:
 	case KVM_CAP_DISABLE_QUIRKS:
+	case KVM_CAP_SET_BOOT_CPU_ID:
 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
 	case KVM_CAP_ASSIGN_DEV_IRQ:
 	case KVM_CAP_PCI_2_3:
@@ -2810,7 +2608,7 @@
 
 	if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
 		s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
-				native_read_tsc() - vcpu->arch.last_host_tsc;
+				rdtsc() - vcpu->arch.last_host_tsc;
 		if (tsc_delta < 0)
 			mark_tsc_unstable("KVM discovered backwards TSC");
 		if (check_tsc_unstable()) {
@@ -2838,7 +2636,7 @@
 {
 	kvm_x86_ops->vcpu_put(vcpu);
 	kvm_put_guest_fpu(vcpu);
-	vcpu->arch.last_host_tsc = native_read_tsc();
+	vcpu->arch.last_host_tsc = rdtsc();
 }
 
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
@@ -3817,30 +3615,25 @@
 			r = kvm_ioapic_init(kvm);
 			if (r) {
 				mutex_lock(&kvm->slots_lock);
-				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
-							  &vpic->dev_master);
-				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
-							  &vpic->dev_slave);
-				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
-							  &vpic->dev_eclr);
+				kvm_destroy_pic(vpic);
 				mutex_unlock(&kvm->slots_lock);
-				kfree(vpic);
 				goto create_irqchip_unlock;
 			}
 		} else
 			goto create_irqchip_unlock;
-		smp_wmb();
-		kvm->arch.vpic = vpic;
-		smp_wmb();
 		r = kvm_setup_default_irq_routing(kvm);
 		if (r) {
 			mutex_lock(&kvm->slots_lock);
 			mutex_lock(&kvm->irq_lock);
 			kvm_ioapic_destroy(kvm);
-			kvm_destroy_pic(kvm);
+			kvm_destroy_pic(vpic);
 			mutex_unlock(&kvm->irq_lock);
 			mutex_unlock(&kvm->slots_lock);
+			goto create_irqchip_unlock;
 		}
+		/* Write kvm->irq_routing before kvm->arch.vpic.  */
+		smp_wmb();
+		kvm->arch.vpic = vpic;
 	create_irqchip_unlock:
 		mutex_unlock(&kvm->lock);
 		break;
@@ -3967,6 +3760,15 @@
 		r = kvm_vm_ioctl_reinject(kvm, &control);
 		break;
 	}
+	case KVM_SET_BOOT_CPU_ID:
+		r = 0;
+		mutex_lock(&kvm->lock);
+		if (atomic_read(&kvm->online_vcpus) != 0)
+			r = -EBUSY;
+		else
+			kvm->arch.bsp_vcpu_id = arg;
+		mutex_unlock(&kvm->lock);
+		break;
 	case KVM_XEN_HVM_CONFIG: {
 		r = -EFAULT;
 		if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
@@ -5882,66 +5684,6 @@
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
 
-int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
-{
-	u64 param, ingpa, outgpa, ret;
-	uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
-	bool fast, longmode;
-
-	/*
-	 * hypercall generates UD from non zero cpl and real mode
-	 * per HYPER-V spec
-	 */
-	if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
-		kvm_queue_exception(vcpu, UD_VECTOR);
-		return 0;
-	}
-
-	longmode = is_64_bit_mode(vcpu);
-
-	if (!longmode) {
-		param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
-			(kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
-		ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
-			(kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
-		outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
-			(kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
-	}
-#ifdef CONFIG_X86_64
-	else {
-		param = kvm_register_read(vcpu, VCPU_REGS_RCX);
-		ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
-		outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
-	}
-#endif
-
-	code = param & 0xffff;
-	fast = (param >> 16) & 0x1;
-	rep_cnt = (param >> 32) & 0xfff;
-	rep_idx = (param >> 48) & 0xfff;
-
-	trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
-
-	switch (code) {
-	case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
-		kvm_vcpu_on_spin(vcpu);
-		break;
-	default:
-		res = HV_STATUS_INVALID_HYPERCALL_CODE;
-		break;
-	}
-
-	ret = res | (((u64)rep_done & 0xfff) << 32);
-	if (longmode) {
-		kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
-	} else {
-		kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
-		kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
-	}
-
-	return 1;
-}
-
 /*
  * kvm_pv_kick_cpu_op:  Kick a vcpu.
  *
@@ -6518,6 +6260,12 @@
 			vcpu_scan_ioapic(vcpu);
 		if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
 			kvm_vcpu_reload_apic_access_page(vcpu);
+		if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
+			vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+			vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
+			r = 0;
+			goto out;
+		}
 	}
 
 	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
@@ -6627,7 +6375,7 @@
 		hw_breakpoint_restore();
 
 	vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
-							   native_read_tsc());
+							   rdtsc());
 
 	vcpu->mode = OUTSIDE_GUEST_MODE;
 	smp_wmb();
@@ -7436,7 +7184,7 @@
 	if (ret != 0)
 		return ret;
 
-	local_tsc = native_read_tsc();
+	local_tsc = rdtsc();
 	stable = !check_tsc_unstable();
 	list_for_each_entry(kvm, &vm_list, vm_list) {
 		kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -7540,6 +7288,17 @@
 	kvm_x86_ops->check_processor_compatibility(rtn);
 }
 
+bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
+{
+	return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);
+
+bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
+{
+	return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
+}
+
 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
 {
 	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 0ca2f3e..2f822cd 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -147,6 +147,11 @@
 	return kvm_register_write(vcpu, reg, val);
 }
 
+static inline u64 get_kernel_ns(void)
+{
+	return ktime_get_boot_ns();
+}
+
 static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
 {
 	return !(kvm->arch.disabled_quirks & quirk);
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index f2dc08c..161804d 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -835,16 +835,46 @@
 	.irq_unmask	= enable_lguest_irq,
 };
 
+/*
+ * Interrupt descriptors are allocated as-needed, but low-numbered ones are
+ * reserved by the generic x86 code.  So we ignore irq_alloc_desc_at if it
+ * tells us the irq is already used: other errors (ie. ENOMEM) we take
+ * seriously.
+ */
+static int lguest_setup_irq(unsigned int irq)
+{
+	struct irq_desc *desc;
+	int err;
+
+	/* Returns -ve error or vector number. */
+	err = irq_alloc_desc_at(irq, 0);
+	if (err < 0 && err != -EEXIST)
+		return err;
+
+	/*
+	 * Tell the Linux infrastructure that the interrupt is
+	 * controlled by our level-based lguest interrupt controller.
+	 */
+	irq_set_chip_and_handler_name(irq, &lguest_irq_controller,
+				      handle_level_irq, "level");
+
+	/* Some systems map "vectors" to interrupts weirdly.  Not us! */
+	desc = irq_to_desc(irq);
+	__this_cpu_write(vector_irq[FIRST_EXTERNAL_VECTOR + irq], desc);
+	return 0;
+}
+
 static int lguest_enable_irq(struct pci_dev *dev)
 {
+	int err;
 	u8 line = 0;
 
 	/* We literally use the PCI interrupt line as the irq number. */
 	pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line);
-	irq_set_chip_and_handler_name(line, &lguest_irq_controller,
-				      handle_level_irq, "level");
-	dev->irq = line;
-	return 0;
+	err = lguest_setup_irq(line);
+	if (!err)
+		dev->irq = line;
+	return err;
 }
 
 /* We don't do hotplug PCI, so this shouldn't be called. */
@@ -855,17 +885,13 @@
 
 /*
  * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
- * interrupt (except 128, which is used for system calls), and then tells the
- * Linux infrastructure that each interrupt is controlled by our level-based
- * lguest interrupt controller.
+ * interrupt (except 128, which is used for system calls).
  */
 static void __init lguest_init_IRQ(void)
 {
 	unsigned int i;
 
 	for (i = FIRST_EXTERNAL_VECTOR; i < FIRST_SYSTEM_VECTOR; i++) {
-		/* Some systems map "vectors" to interrupts weirdly.  Not us! */
-		__this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
 		if (i != IA32_SYSCALL_VECTOR)
 			set_intr_gate(i, irq_entries_start +
 					8 * (i - FIRST_EXTERNAL_VECTOR));
@@ -879,26 +905,6 @@
 }
 
 /*
- * Interrupt descriptors are allocated as-needed, but low-numbered ones are
- * reserved by the generic x86 code.  So we ignore irq_alloc_desc_at if it
- * tells us the irq is already used: other errors (ie. ENOMEM) we take
- * seriously.
- */
-int lguest_setup_irq(unsigned int irq)
-{
-	int err;
-
-	/* Returns -ve error or vector number. */
-	err = irq_alloc_desc_at(irq, 0);
-	if (err < 0 && err != -EEXIST)
-		return err;
-
-	irq_set_chip_and_handler_name(irq, &lguest_irq_controller,
-				      handle_level_irq, "level");
-	return 0;
-}
-
-/*
  * Time.
  *
  * It would be far better for everyone if the Guest had its own clock, but
@@ -985,23 +991,11 @@
 	return 0;
 }
 
-static void lguest_clockevent_set_mode(enum clock_event_mode mode,
-                                      struct clock_event_device *evt)
+static int lguest_clockevent_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		/* A 0 argument shuts the clock down. */
-		hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* This is what we expect. */
-		break;
-	case CLOCK_EVT_MODE_PERIODIC:
-		BUG();
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	/* A 0 argument shuts the clock down. */
+	hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0);
+	return 0;
 }
 
 /* This describes our primitive timer chip. */
@@ -1009,7 +1003,7 @@
 	.name                   = "lguest",
 	.features               = CLOCK_EVT_FEAT_ONESHOT,
 	.set_next_event         = lguest_clockevent_set_next_event,
-	.set_mode               = lguest_clockevent_set_mode,
+	.set_state_shutdown	= lguest_clockevent_shutdown,
 	.rating                 = INT_MAX,
 	.mult                   = 1,
 	.shift                  = 0,
@@ -1040,7 +1034,8 @@
 static void lguest_time_init(void)
 {
 	/* Set up the timer interrupt (0) to go to our simple timer routine */
-	lguest_setup_irq(0);
+	if (lguest_setup_irq(0) != 0)
+		panic("Could not set up timer irq");
 	irq_set_handler(0, lguest_time_irq);
 
 	clocksource_register_hz(&lguest_clock, NSEC_PER_SEC);
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 39d6a3d..e912b2f 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -20,6 +20,7 @@
 #include <asm/processor.h>
 #include <asm/delay.h>
 #include <asm/timer.h>
+#include <asm/mwait.h>
 
 #ifdef CONFIG_SMP
 # include <asm/smp.h>
@@ -49,16 +50,14 @@
 /* TSC based delay: */
 static void delay_tsc(unsigned long __loops)
 {
-	u32 bclock, now, loops = __loops;
+	u64 bclock, now, loops = __loops;
 	int cpu;
 
 	preempt_disable();
 	cpu = smp_processor_id();
-	rdtsc_barrier();
-	rdtscl(bclock);
+	bclock = rdtsc_ordered();
 	for (;;) {
-		rdtsc_barrier();
-		rdtscl(now);
+		now = rdtsc_ordered();
 		if ((now - bclock) >= loops)
 			break;
 
@@ -79,14 +78,51 @@
 		if (unlikely(cpu != smp_processor_id())) {
 			loops -= (now - bclock);
 			cpu = smp_processor_id();
-			rdtsc_barrier();
-			rdtscl(bclock);
+			bclock = rdtsc_ordered();
 		}
 	}
 	preempt_enable();
 }
 
 /*
+ * On some AMD platforms, MWAITX has a configurable 32-bit timer, that
+ * counts with TSC frequency. The input value is the loop of the
+ * counter, it will exit when the timer expires.
+ */
+static void delay_mwaitx(unsigned long __loops)
+{
+	u64 start, end, delay, loops = __loops;
+
+	start = rdtsc_ordered();
+
+	for (;;) {
+		delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
+
+		/*
+		 * Use cpu_tss as a cacheline-aligned, seldomly
+		 * accessed per-cpu variable as the monitor target.
+		 */
+		__monitorx(this_cpu_ptr(&cpu_tss), 0, 0);
+
+		/*
+		 * AMD, like Intel, supports the EAX hint and EAX=0xf
+		 * means, do not enter any deep C-state and we use it
+		 * here in delay() to minimize wakeup latency.
+		 */
+		__mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
+
+		end = rdtsc_ordered();
+
+		if (loops <= end - start)
+			break;
+
+		loops -= end - start;
+
+		start = end;
+	}
+}
+
+/*
  * Since we calibrate only once at boot, this
  * function should be set once at boot and not changed
  */
@@ -94,13 +130,19 @@
 
 void use_tsc_delay(void)
 {
-	delay_fn = delay_tsc;
+	if (delay_fn == delay_loop)
+		delay_fn = delay_tsc;
+}
+
+void use_mwaitx_delay(void)
+{
+	delay_fn = delay_mwaitx;
 }
 
 int read_current_timer(unsigned long *timer_val)
 {
 	if (delay_fn == delay_tsc) {
-		rdtscll(*timer_val);
+		*timer_val = rdtsc();
 		return 0;
 	}
 	return -1;
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index 8300db7..8db2659 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -20,6 +20,7 @@
 #include <linux/stddef.h>
 
 #include <asm/uaccess.h>
+#include <asm/vm86.h>
 
 #include "fpu_system.h"
 #include "exception.h"
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9dc9098..eef44d9 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -20,6 +20,7 @@
 #include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
 #include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
 #include <asm/vsyscall.h>		/* emulate_vsyscall		*/
+#include <asm/vm86.h>			/* struct vm86			*/
 
 #define CREATE_TRACE_POINTS
 #include <asm/trace/exceptions.h>
@@ -301,14 +302,16 @@
 check_v8086_mode(struct pt_regs *regs, unsigned long address,
 		 struct task_struct *tsk)
 {
+#ifdef CONFIG_VM86
 	unsigned long bit;
 
-	if (!v8086_mode(regs))
+	if (!v8086_mode(regs) || !tsk->thread.vm86)
 		return;
 
 	bit = (address - 0xA0000) >> PAGE_SHIFT;
 	if (bit < 32)
-		tsk->thread.screen_bitmap |= 1 << bit;
+		tsk->thread.vm86->screen_bitmap |= 1 << bit;
+#endif
 }
 
 static bool low_pfn(unsigned long pfn)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 8533b46..1d8a83d 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -30,8 +30,11 @@
 /*
  * Tables translating between page_cache_type_t and pte encoding.
  *
- * Minimal supported modes are defined statically, they are modified
- * during bootup if more supported cache modes are available.
+ * The default values are defined statically as minimal supported mode;
+ * WC and WT fall back to UC-.  pat_init() updates these values to support
+ * more cache modes, WC and WT, when it is safe to do so.  See pat_init()
+ * for the details.  Note, __early_ioremap() used during early boot-time
+ * takes pgprot_t (pte encoding) and does not use these tables.
  *
  *   Index into __cachemode2pte_tbl[] is the cachemode.
  *
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8340e45..68aec42 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -137,6 +137,7 @@
 
 	vaddr = start;
 	pgd_idx = pgd_index(vaddr);
+	pmd_idx = pmd_index(vaddr);
 
 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
 		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index e1840f3..9ce5da2 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -12,20 +12,6 @@
 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
 extern struct range pfn_mapped[E820_X_MAX];
 
-static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
-static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
-static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
-
-/*
- * This page used as early shadow. We don't use empty_zero_page
- * at early stages, stack instrumentation could write some garbage
- * to this page.
- * Latter we reuse it as zero shadow for large ranges of memory
- * that allowed to access, but not instrumented by kasan
- * (vmalloc/vmemmap ...).
- */
-static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
-
 static int __init map_range(struct range *range)
 {
 	unsigned long start;
@@ -62,106 +48,6 @@
 	}
 }
 
-static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
-				unsigned long end)
-{
-	pte_t *pte = pte_offset_kernel(pmd, addr);
-
-	while (addr + PAGE_SIZE <= end) {
-		WARN_ON(!pte_none(*pte));
-		set_pte(pte, __pte(__pa_nodebug(kasan_zero_page)
-					| __PAGE_KERNEL_RO));
-		addr += PAGE_SIZE;
-		pte = pte_offset_kernel(pmd, addr);
-	}
-	return 0;
-}
-
-static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
-				unsigned long end)
-{
-	int ret = 0;
-	pmd_t *pmd = pmd_offset(pud, addr);
-
-	while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
-		WARN_ON(!pmd_none(*pmd));
-		set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
-					| _KERNPG_TABLE));
-		addr += PMD_SIZE;
-		pmd = pmd_offset(pud, addr);
-	}
-	if (addr < end) {
-		if (pmd_none(*pmd)) {
-			void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
-			if (!p)
-				return -ENOMEM;
-			set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE));
-		}
-		ret = zero_pte_populate(pmd, addr, end);
-	}
-	return ret;
-}
-
-
-static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
-				unsigned long end)
-{
-	int ret = 0;
-	pud_t *pud = pud_offset(pgd, addr);
-
-	while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
-		WARN_ON(!pud_none(*pud));
-		set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
-					| _KERNPG_TABLE));
-		addr += PUD_SIZE;
-		pud = pud_offset(pgd, addr);
-	}
-
-	if (addr < end) {
-		if (pud_none(*pud)) {
-			void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
-			if (!p)
-				return -ENOMEM;
-			set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE));
-		}
-		ret = zero_pmd_populate(pud, addr, end);
-	}
-	return ret;
-}
-
-static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
-{
-	int ret = 0;
-	pgd_t *pgd = pgd_offset_k(addr);
-
-	while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
-		WARN_ON(!pgd_none(*pgd));
-		set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
-					| _KERNPG_TABLE));
-		addr += PGDIR_SIZE;
-		pgd = pgd_offset_k(addr);
-	}
-
-	if (addr < end) {
-		if (pgd_none(*pgd)) {
-			void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
-			if (!p)
-				return -ENOMEM;
-			set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE));
-		}
-		ret = zero_pud_populate(pgd, addr, end);
-	}
-	return ret;
-}
-
-
-static void __init populate_zero_shadow(const void *start, const void *end)
-{
-	if (zero_pgd_populate((unsigned long)start, (unsigned long)end))
-		panic("kasan: unable to map zero shadow!");
-}
-
-
 #ifdef CONFIG_KASAN_INLINE
 static int kasan_die_handler(struct notifier_block *self,
 			     unsigned long val,
@@ -213,7 +99,7 @@
 
 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
-	populate_zero_shadow((void *)KASAN_SHADOW_START,
+	kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
 			kasan_mem_to_shadow((void *)PAGE_OFFSET));
 
 	for (i = 0; i < E820_X_MAX; i++) {
@@ -223,14 +109,15 @@
 		if (map_range(&pfn_mapped[i]))
 			panic("kasan: unable to allocate shadow!");
 	}
-	populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
-			kasan_mem_to_shadow((void *)__START_KERNEL_map));
+	kasan_populate_zero_shadow(
+		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
+		kasan_mem_to_shadow((void *)__START_KERNEL_map));
 
 	vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
 			(unsigned long)kasan_mem_to_shadow(_end),
 			NUMA_NO_NODE);
 
-	populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
+	kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
 			(void *)KASAN_SHADOW_END);
 
 	memset(kasan_zero_page, 0, PAGE_SIZE);
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index 8ff686a..5f169d5 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -8,6 +8,7 @@
 #include <linux/kthread.h>
 #include <linux/random.h>
 #include <linux/kernel.h>
+#include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
 
@@ -256,5 +257,4 @@
 
 	return 0;
 }
-
-module_init(start_pageattr_test);
+device_initcall(start_pageattr_test);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 727158c..2c44c07 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -4,7 +4,6 @@
  */
 #include <linux/highmem.h>
 #include <linux/bootmem.h>
-#include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index be2e7a2..70efcd0 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -246,7 +246,7 @@
  *     goto out;
  *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
  *     goto out;
- *   prog = array->prog[index];
+ *   prog = array->ptrs[index];
  *   if (prog == NULL)
  *     goto out;
  *   goto *(prog->bpf_func + prologue_size);
@@ -284,9 +284,9 @@
 	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
 	EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
 
-	/* prog = array->prog[index]; */
+	/* prog = array->ptrs[index]; */
 	EMIT4_off32(0x48, 0x8D, 0x84, 0xD6,       /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
-		    offsetof(struct bpf_array, prog));
+		    offsetof(struct bpf_array, ptrs));
 	EMIT3(0x48, 0x8B, 0x00);                  /* mov rax, qword ptr [rax] */
 
 	/* if (prog == NULL)
@@ -315,6 +315,26 @@
 	*pprog = prog;
 }
 
+
+static void emit_load_skb_data_hlen(u8 **pprog)
+{
+	u8 *prog = *pprog;
+	int cnt = 0;
+
+	/* r9d = skb->len - skb->data_len (headlen)
+	 * r10 = skb->data
+	 */
+	/* mov %r9d, off32(%rdi) */
+	EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
+
+	/* sub %r9d, off32(%rdi) */
+	EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
+
+	/* mov %r10, off32(%rdi) */
+	EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
+	*pprog = prog;
+}
+
 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
 		  int oldproglen, struct jit_context *ctx)
 {
@@ -329,36 +349,8 @@
 
 	emit_prologue(&prog);
 
-	if (seen_ld_abs) {
-		/* r9d : skb->len - skb->data_len (headlen)
-		 * r10 : skb->data
-		 */
-		if (is_imm8(offsetof(struct sk_buff, len)))
-			/* mov %r9d, off8(%rdi) */
-			EMIT4(0x44, 0x8b, 0x4f,
-			      offsetof(struct sk_buff, len));
-		else
-			/* mov %r9d, off32(%rdi) */
-			EMIT3_off32(0x44, 0x8b, 0x8f,
-				    offsetof(struct sk_buff, len));
-
-		if (is_imm8(offsetof(struct sk_buff, data_len)))
-			/* sub %r9d, off8(%rdi) */
-			EMIT4(0x44, 0x2b, 0x4f,
-			      offsetof(struct sk_buff, data_len));
-		else
-			EMIT3_off32(0x44, 0x2b, 0x8f,
-				    offsetof(struct sk_buff, data_len));
-
-		if (is_imm8(offsetof(struct sk_buff, data)))
-			/* mov %r10, off8(%rdi) */
-			EMIT4(0x4c, 0x8b, 0x57,
-			      offsetof(struct sk_buff, data));
-		else
-			/* mov %r10, off32(%rdi) */
-			EMIT3_off32(0x4c, 0x8b, 0x97,
-				    offsetof(struct sk_buff, data));
-	}
+	if (seen_ld_abs)
+		emit_load_skb_data_hlen(&prog);
 
 	for (i = 0; i < insn_cnt; i++, insn++) {
 		const s32 imm32 = insn->imm;
@@ -367,6 +359,7 @@
 		u8 b1 = 0, b2 = 0, b3 = 0;
 		s64 jmp_offset;
 		u8 jmp_cond;
+		bool reload_skb_data;
 		int ilen;
 		u8 *func;
 
@@ -818,12 +811,18 @@
 			func = (u8 *) __bpf_call_base + imm32;
 			jmp_offset = func - (image + addrs[i]);
 			if (seen_ld_abs) {
-				EMIT2(0x41, 0x52); /* push %r10 */
-				EMIT2(0x41, 0x51); /* push %r9 */
-				/* need to adjust jmp offset, since
-				 * pop %r9, pop %r10 take 4 bytes after call insn
-				 */
-				jmp_offset += 4;
+				reload_skb_data = bpf_helper_changes_skb_data(func);
+				if (reload_skb_data) {
+					EMIT1(0x57); /* push %rdi */
+					jmp_offset += 22; /* pop, mov, sub, mov */
+				} else {
+					EMIT2(0x41, 0x52); /* push %r10 */
+					EMIT2(0x41, 0x51); /* push %r9 */
+					/* need to adjust jmp offset, since
+					 * pop %r9, pop %r10 take 4 bytes after call insn
+					 */
+					jmp_offset += 4;
+				}
 			}
 			if (!imm32 || !is_simm32(jmp_offset)) {
 				pr_err("unsupported bpf func %d addr %p image %p\n",
@@ -832,8 +831,13 @@
 			}
 			EMIT1_off32(0xE8, jmp_offset);
 			if (seen_ld_abs) {
-				EMIT2(0x41, 0x59); /* pop %r9 */
-				EMIT2(0x41, 0x5A); /* pop %r10 */
+				if (reload_skb_data) {
+					EMIT1(0x5F); /* pop %rdi */
+					emit_load_skb_data_hlen(&prog);
+				} else {
+					EMIT2(0x41, 0x59); /* pop %r9 */
+					EMIT2(0x41, 0x5A); /* pop %r10 */
+				}
 			}
 			break;
 
@@ -1099,7 +1103,7 @@
 	}
 
 	if (bpf_jit_enable > 1)
-		bpf_jit_dump(prog->len, proglen, 0, image);
+		bpf_jit_dump(prog->len, proglen, pass + 1, image);
 
 	if (image) {
 		bpf_flush_icache(header, image + proglen);
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 8fd6f44..09d3afc 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -166,7 +166,6 @@
 {
 	struct pci_dev *dev;
 
-	pci_read_bridge_bases(b);
 	list_for_each_entry(dev, &b->devices, bus_list)
 		pcibios_fixup_device_resources(dev);
 }
@@ -673,24 +672,22 @@
 	return 0;
 }
 
-int pcibios_enable_device(struct pci_dev *dev, int mask)
+int pcibios_alloc_irq(struct pci_dev *dev)
 {
-	int err;
-
-	if ((err = pci_enable_resources(dev, mask)) < 0)
-		return err;
-
-	if (!pci_dev_msi_enabled(dev))
-		return pcibios_enable_irq(dev);
-	return 0;
+	return pcibios_enable_irq(dev);
 }
 
-void pcibios_disable_device (struct pci_dev *dev)
+void pcibios_free_irq(struct pci_dev *dev)
 {
-	if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
+	if (pcibios_disable_irq)
 		pcibios_disable_irq(dev);
 }
 
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+	return pci_enable_resources(dev, mask);
+}
+
 int pci_ext_cfg_avail(void)
 {
 	if (raw_pci_ext_ops)
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 9a2b710..e585655 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -62,19 +62,6 @@
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide);
 
-static void pci_fixup_ncr53c810(struct pci_dev *d)
-{
-	/*
-	 * NCR 53C810 returns class code 0 (at least on some systems).
-	 * Fix class to be PCI_CLASS_STORAGE_SCSI
-	 */
-	if (!d->class) {
-		dev_warn(&d->dev, "Fixing NCR 53C810 class code\n");
-		d->class = PCI_CLASS_STORAGE_SCSI << 8;
-	}
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, pci_fixup_ncr53c810);
-
 static void pci_fixup_latency(struct pci_dev *d)
 {
 	/*
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 2706230..0d24e7c 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -35,6 +35,9 @@
 
 #define PCIE_CAP_OFFSET	0x100
 
+/* Quirks for the listed devices */
+#define PCI_DEVICE_ID_INTEL_MRFL_MMC	0x1190
+
 /* Fixed BAR fields */
 #define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00	/* Fixed BAR (TBD) */
 #define PCI_FIXED_BAR_0_SIZE	0x04
@@ -210,22 +213,41 @@
 {
 	struct irq_alloc_info info;
 	int polarity;
+	int ret;
 
-	if (dev->irq_managed && dev->irq > 0)
+	if (pci_has_managed_irq(dev))
 		return 0;
 
-	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
-		polarity = 0; /* active high */
-	else
-		polarity = 1; /* active low */
+	switch (intel_mid_identify_cpu()) {
+	case INTEL_MID_CPU_CHIP_TANGIER:
+		polarity = IOAPIC_POL_HIGH;
+
+		/* Special treatment for IRQ0 */
+		if (dev->irq == 0) {
+			/*
+			 * TNG has IRQ0 assigned to eMMC controller. But there
+			 * are also other devices with bogus PCI configuration
+			 * that have IRQ0 assigned. This check ensures that
+			 * eMMC gets it.
+			 */
+			if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC)
+				return -EBUSY;
+		}
+		break;
+	default:
+		polarity = IOAPIC_POL_LOW;
+		break;
+	}
+
 	ioapic_set_alloc_attr(&info, dev_to_node(&dev->dev), 1, polarity);
 
 	/*
 	 * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
 	 * IOAPIC RTE entries, so we just enable RTE for the device.
 	 */
-	if (mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC, &info) < 0)
-		return -EBUSY;
+	ret = mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC, &info);
+	if (ret < 0)
+		return ret;
 
 	dev->irq_managed = 1;
 
@@ -234,14 +256,17 @@
 
 static void intel_mid_pci_irq_disable(struct pci_dev *dev)
 {
-	if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
-	    dev->irq > 0) {
+	if (pci_has_managed_irq(dev)) {
 		mp_unmap_irq(dev->irq);
 		dev->irq_managed = 0;
+		/*
+		 * Don't reset dev->irq here, otherwise
+		 * intel_mid_pci_irq_enable() will fail on next call.
+		 */
 	}
 }
 
-struct pci_ops intel_mid_pci_ops = {
+static struct pci_ops intel_mid_pci_ops = {
 	.read = pci_read,
 	.write = pci_write,
 };
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 9bd1154..32e7034 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1202,7 +1202,7 @@
 			struct pci_dev *temp_dev;
 			int irq;
 
-			if (dev->irq_managed && dev->irq > 0)
+			if (pci_has_managed_irq(dev))
 				return 0;
 
 			irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
@@ -1230,8 +1230,7 @@
 			}
 			dev = temp_dev;
 			if (irq >= 0) {
-				dev->irq_managed = 1;
-				dev->irq = irq;
+				pci_set_managed_irq(dev, irq);
 				dev_info(&dev->dev, "PCI->APIC IRQ transform: "
 					 "INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
 				return 0;
@@ -1257,24 +1256,10 @@
 	return 0;
 }
 
-bool mp_should_keep_irq(struct device *dev)
-{
-	if (dev->power.is_prepared)
-		return true;
-#ifdef CONFIG_PM
-	if (dev->power.runtime_status == RPM_SUSPENDING)
-		return true;
-#endif
-
-	return false;
-}
-
 static void pirq_disable_irq(struct pci_dev *dev)
 {
-	if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
-	    dev->irq_managed && dev->irq) {
+	if (io_apic_assign_pci_irqs && pci_has_managed_irq(dev)) {
 		mp_unmap_irq(dev->irq);
-		dev->irq = 0;
-		dev->irq_managed = 0;
+		pci_reset_managed_irq(dev);
 	}
 }
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index d22f4b5..ff31ab4 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -179,7 +179,7 @@
 	if (ret)
 		goto error;
 	i = 0;
-	list_for_each_entry(msidesc, &dev->msi_list, list) {
+	for_each_pci_msi_entry(msidesc, dev) {
 		irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
 					       (type == PCI_CAP_ID_MSI) ? nvec : 1,
 					       (type == PCI_CAP_ID_MSIX) ?
@@ -230,7 +230,7 @@
 	if (type == PCI_CAP_ID_MSI && nvec > 1)
 		return 1;
 
-	list_for_each_entry(msidesc, &dev->msi_list, list) {
+	for_each_pci_msi_entry(msidesc, dev) {
 		__pci_read_msi_msg(msidesc, &msg);
 		pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
 			((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
@@ -274,7 +274,7 @@
 	int ret = 0;
 	struct msi_desc *msidesc;
 
-	list_for_each_entry(msidesc, &dev->msi_list, list) {
+	for_each_pci_msi_entry(msidesc, dev) {
 		struct physdev_map_pirq map_irq;
 		domid_t domid;
 
@@ -386,7 +386,7 @@
 {
 	struct msi_desc *msidesc;
 
-	msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
+	msidesc = first_pci_msi_entry(dev);
 	if (msidesc->msi_attrib.is_msix)
 		xen_pci_frontend_disable_msix(dev);
 	else
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index f1a6c8e..184842e 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -5,6 +5,7 @@
 obj-y	+= geode/
 obj-y	+= goldfish/
 obj-y	+= iris/
+obj-y	+= intel/
 obj-y	+= intel-mid/
 obj-y	+= intel-quark/
 obj-y	+= olpc/
diff --git a/arch/x86/platform/atom/Makefile b/arch/x86/platform/atom/Makefile
index 0a3a40c..40983f5 100644
--- a/arch/x86/platform/atom/Makefile
+++ b/arch/x86/platform/atom/Makefile
@@ -1 +1,2 @@
-obj-$(CONFIG_PUNIT_ATOM_DEBUG) += punit_atom_debug.o
+obj-$(CONFIG_PMC_ATOM)		+= pmc_atom.o
+obj-$(CONFIG_PUNIT_ATOM_DEBUG)	+= punit_atom_debug.o
diff --git a/arch/x86/platform/atom/pmc_atom.c b/arch/x86/platform/atom/pmc_atom.c
new file mode 100644
index 0000000..964ff4f
--- /dev/null
+++ b/arch/x86/platform/atom/pmc_atom.c
@@ -0,0 +1,460 @@
+/*
+ * Intel Atom SOC Power Management Controller Driver
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/io.h>
+
+#include <asm/pmc_atom.h>
+
+struct pmc_bit_map {
+	const char *name;
+	u32 bit_mask;
+};
+
+struct pmc_reg_map {
+	const struct pmc_bit_map *d3_sts_0;
+	const struct pmc_bit_map *d3_sts_1;
+	const struct pmc_bit_map *func_dis;
+	const struct pmc_bit_map *func_dis_2;
+	const struct pmc_bit_map *pss;
+};
+
+struct pmc_dev {
+	u32 base_addr;
+	void __iomem *regmap;
+	const struct pmc_reg_map *map;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
+	bool init;
+};
+
+static struct pmc_dev pmc_device;
+static u32 acpi_base_addr;
+
+static const struct pmc_bit_map d3_sts_0_map[] = {
+	{"LPSS1_F0_DMA",	BIT_LPSS1_F0_DMA},
+	{"LPSS1_F1_PWM1",	BIT_LPSS1_F1_PWM1},
+	{"LPSS1_F2_PWM2",	BIT_LPSS1_F2_PWM2},
+	{"LPSS1_F3_HSUART1",	BIT_LPSS1_F3_HSUART1},
+	{"LPSS1_F4_HSUART2",	BIT_LPSS1_F4_HSUART2},
+	{"LPSS1_F5_SPI",	BIT_LPSS1_F5_SPI},
+	{"LPSS1_F6_Reserved",	BIT_LPSS1_F6_XXX},
+	{"LPSS1_F7_Reserved",	BIT_LPSS1_F7_XXX},
+	{"SCC_EMMC",		BIT_SCC_EMMC},
+	{"SCC_SDIO",		BIT_SCC_SDIO},
+	{"SCC_SDCARD",		BIT_SCC_SDCARD},
+	{"SCC_MIPI",		BIT_SCC_MIPI},
+	{"HDA",			BIT_HDA},
+	{"LPE",			BIT_LPE},
+	{"OTG",			BIT_OTG},
+	{"USH",			BIT_USH},
+	{"GBE",			BIT_GBE},
+	{"SATA",		BIT_SATA},
+	{"USB_EHCI",		BIT_USB_EHCI},
+	{"SEC",			BIT_SEC},
+	{"PCIE_PORT0",		BIT_PCIE_PORT0},
+	{"PCIE_PORT1",		BIT_PCIE_PORT1},
+	{"PCIE_PORT2",		BIT_PCIE_PORT2},
+	{"PCIE_PORT3",		BIT_PCIE_PORT3},
+	{"LPSS2_F0_DMA",	BIT_LPSS2_F0_DMA},
+	{"LPSS2_F1_I2C1",	BIT_LPSS2_F1_I2C1},
+	{"LPSS2_F2_I2C2",	BIT_LPSS2_F2_I2C2},
+	{"LPSS2_F3_I2C3",	BIT_LPSS2_F3_I2C3},
+	{"LPSS2_F3_I2C4",	BIT_LPSS2_F4_I2C4},
+	{"LPSS2_F5_I2C5",	BIT_LPSS2_F5_I2C5},
+	{"LPSS2_F6_I2C6",	BIT_LPSS2_F6_I2C6},
+	{"LPSS2_F7_I2C7",	BIT_LPSS2_F7_I2C7},
+	{},
+};
+
+static struct pmc_bit_map byt_d3_sts_1_map[] = {
+	{"SMB",			BIT_SMB},
+	{"OTG_SS_PHY",		BIT_OTG_SS_PHY},
+	{"USH_SS_PHY",		BIT_USH_SS_PHY},
+	{"DFX",			BIT_DFX},
+	{},
+};
+
+static struct pmc_bit_map cht_d3_sts_1_map[] = {
+	{"SMB",			BIT_SMB},
+	{"GMM",			BIT_STS_GMM},
+	{"ISH",			BIT_STS_ISH},
+	{},
+};
+
+static struct pmc_bit_map cht_func_dis_2_map[] = {
+	{"SMB",			BIT_SMB},
+	{"GMM",			BIT_FD_GMM},
+	{"ISH",			BIT_FD_ISH},
+	{},
+};
+
+static const struct pmc_bit_map byt_pss_map[] = {
+	{"GBE",			PMC_PSS_BIT_GBE},
+	{"SATA",		PMC_PSS_BIT_SATA},
+	{"HDA",			PMC_PSS_BIT_HDA},
+	{"SEC",			PMC_PSS_BIT_SEC},
+	{"PCIE",		PMC_PSS_BIT_PCIE},
+	{"LPSS",		PMC_PSS_BIT_LPSS},
+	{"LPE",			PMC_PSS_BIT_LPE},
+	{"DFX",			PMC_PSS_BIT_DFX},
+	{"USH_CTRL",		PMC_PSS_BIT_USH_CTRL},
+	{"USH_SUS",		PMC_PSS_BIT_USH_SUS},
+	{"USH_VCCS",		PMC_PSS_BIT_USH_VCCS},
+	{"USH_VCCA",		PMC_PSS_BIT_USH_VCCA},
+	{"OTG_CTRL",		PMC_PSS_BIT_OTG_CTRL},
+	{"OTG_VCCS",		PMC_PSS_BIT_OTG_VCCS},
+	{"OTG_VCCA_CLK",	PMC_PSS_BIT_OTG_VCCA_CLK},
+	{"OTG_VCCA",		PMC_PSS_BIT_OTG_VCCA},
+	{"USB",			PMC_PSS_BIT_USB},
+	{"USB_SUS",		PMC_PSS_BIT_USB_SUS},
+	{},
+};
+
+static const struct pmc_bit_map cht_pss_map[] = {
+	{"SATA",		PMC_PSS_BIT_SATA},
+	{"HDA",			PMC_PSS_BIT_HDA},
+	{"SEC",			PMC_PSS_BIT_SEC},
+	{"PCIE",		PMC_PSS_BIT_PCIE},
+	{"LPSS",		PMC_PSS_BIT_LPSS},
+	{"LPE",			PMC_PSS_BIT_LPE},
+	{"UFS",			PMC_PSS_BIT_CHT_UFS},
+	{"UXD",			PMC_PSS_BIT_CHT_UXD},
+	{"UXD_FD",		PMC_PSS_BIT_CHT_UXD_FD},
+	{"UX_ENG",		PMC_PSS_BIT_CHT_UX_ENG},
+	{"USB_SUS",		PMC_PSS_BIT_CHT_USB_SUS},
+	{"GMM",			PMC_PSS_BIT_CHT_GMM},
+	{"ISH",			PMC_PSS_BIT_CHT_ISH},
+	{"DFX_MASTER",		PMC_PSS_BIT_CHT_DFX_MASTER},
+	{"DFX_CLUSTER1",	PMC_PSS_BIT_CHT_DFX_CLUSTER1},
+	{"DFX_CLUSTER2",	PMC_PSS_BIT_CHT_DFX_CLUSTER2},
+	{"DFX_CLUSTER3",	PMC_PSS_BIT_CHT_DFX_CLUSTER3},
+	{"DFX_CLUSTER4",	PMC_PSS_BIT_CHT_DFX_CLUSTER4},
+	{"DFX_CLUSTER5",	PMC_PSS_BIT_CHT_DFX_CLUSTER5},
+	{},
+};
+
+static const struct pmc_reg_map byt_reg_map = {
+	.d3_sts_0	= d3_sts_0_map,
+	.d3_sts_1	= byt_d3_sts_1_map,
+	.func_dis	= d3_sts_0_map,
+	.func_dis_2	= byt_d3_sts_1_map,
+	.pss		= byt_pss_map,
+};
+
+static const struct pmc_reg_map cht_reg_map = {
+	.d3_sts_0	= d3_sts_0_map,
+	.d3_sts_1	= cht_d3_sts_1_map,
+	.func_dis	= d3_sts_0_map,
+	.func_dis_2	= cht_func_dis_2_map,
+	.pss		= cht_pss_map,
+};
+
+static inline u32 pmc_reg_read(struct pmc_dev *pmc, int reg_offset)
+{
+	return readl(pmc->regmap + reg_offset);
+}
+
+static inline void pmc_reg_write(struct pmc_dev *pmc, int reg_offset, u32 val)
+{
+	writel(val, pmc->regmap + reg_offset);
+}
+
+int pmc_atom_read(int offset, u32 *value)
+{
+	struct pmc_dev *pmc = &pmc_device;
+
+	if (!pmc->init)
+		return -ENODEV;
+
+	*value = pmc_reg_read(pmc, offset);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pmc_atom_read);
+
+int pmc_atom_write(int offset, u32 value)
+{
+	struct pmc_dev *pmc = &pmc_device;
+
+	if (!pmc->init)
+		return -ENODEV;
+
+	pmc_reg_write(pmc, offset, value);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pmc_atom_write);
+
+static void pmc_power_off(void)
+{
+	u16	pm1_cnt_port;
+	u32	pm1_cnt_value;
+
+	pr_info("Preparing to enter system sleep state S5\n");
+
+	pm1_cnt_port = acpi_base_addr + PM1_CNT;
+
+	pm1_cnt_value = inl(pm1_cnt_port);
+	pm1_cnt_value &= SLEEP_TYPE_MASK;
+	pm1_cnt_value |= SLEEP_TYPE_S5;
+	pm1_cnt_value |= SLEEP_ENABLE;
+
+	outl(pm1_cnt_value, pm1_cnt_port);
+}
+
+static void pmc_hw_reg_setup(struct pmc_dev *pmc)
+{
+	/*
+	 * Disable PMC S0IX_WAKE_EN events coming from:
+	 * - LPC clock run
+	 * - GPIO_SUS ored dedicated IRQs
+	 * - GPIO_SCORE ored dedicated IRQs
+	 * - GPIO_SUS shared IRQ
+	 * - GPIO_SCORE shared IRQ
+	 */
+	pmc_reg_write(pmc, PMC_S0IX_WAKE_EN, (u32)PMC_WAKE_EN_SETTING);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void pmc_dev_state_print(struct seq_file *s, int reg_index,
+				u32 sts, const struct pmc_bit_map *sts_map,
+				u32 fd, const struct pmc_bit_map *fd_map)
+{
+	int offset = PMC_REG_BIT_WIDTH * reg_index;
+	int index;
+
+	for (index = 0; sts_map[index].name; index++) {
+		seq_printf(s, "Dev: %-2d - %-32s\tState: %s [%s]\n",
+			offset + index, sts_map[index].name,
+			fd_map[index].bit_mask & fd ?  "Disabled" : "Enabled ",
+			sts_map[index].bit_mask & sts ?  "D3" : "D0");
+	}
+}
+
+static int pmc_dev_state_show(struct seq_file *s, void *unused)
+{
+	struct pmc_dev *pmc = s->private;
+	const struct pmc_reg_map *m = pmc->map;
+	u32 func_dis, func_dis_2;
+	u32 d3_sts_0, d3_sts_1;
+
+	func_dis = pmc_reg_read(pmc, PMC_FUNC_DIS);
+	func_dis_2 = pmc_reg_read(pmc, PMC_FUNC_DIS_2);
+	d3_sts_0 = pmc_reg_read(pmc, PMC_D3_STS_0);
+	d3_sts_1 = pmc_reg_read(pmc, PMC_D3_STS_1);
+
+	/* Low part */
+	pmc_dev_state_print(s, 0, d3_sts_0, m->d3_sts_0, func_dis, m->func_dis);
+
+	/* High part */
+	pmc_dev_state_print(s, 1, d3_sts_1, m->d3_sts_1, func_dis_2, m->func_dis_2);
+
+	return 0;
+}
+
+static int pmc_dev_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmc_dev_state_show, inode->i_private);
+}
+
+static const struct file_operations pmc_dev_state_ops = {
+	.open		= pmc_dev_state_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int pmc_pss_state_show(struct seq_file *s, void *unused)
+{
+	struct pmc_dev *pmc = s->private;
+	const struct pmc_bit_map *map = pmc->map->pss;
+	u32 pss = pmc_reg_read(pmc, PMC_PSS);
+	int index;
+
+	for (index = 0; map[index].name; index++) {
+		seq_printf(s, "Island: %-2d - %-32s\tState: %s\n",
+			index, map[index].name,
+			map[index].bit_mask & pss ? "Off" : "On");
+	}
+	return 0;
+}
+
+static int pmc_pss_state_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmc_pss_state_show, inode->i_private);
+}
+
+static const struct file_operations pmc_pss_state_ops = {
+	.open		= pmc_pss_state_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int pmc_sleep_tmr_show(struct seq_file *s, void *unused)
+{
+	struct pmc_dev *pmc = s->private;
+	u64 s0ir_tmr, s0i1_tmr, s0i2_tmr, s0i3_tmr, s0_tmr;
+
+	s0ir_tmr = (u64)pmc_reg_read(pmc, PMC_S0IR_TMR) << PMC_TMR_SHIFT;
+	s0i1_tmr = (u64)pmc_reg_read(pmc, PMC_S0I1_TMR) << PMC_TMR_SHIFT;
+	s0i2_tmr = (u64)pmc_reg_read(pmc, PMC_S0I2_TMR) << PMC_TMR_SHIFT;
+	s0i3_tmr = (u64)pmc_reg_read(pmc, PMC_S0I3_TMR) << PMC_TMR_SHIFT;
+	s0_tmr = (u64)pmc_reg_read(pmc, PMC_S0_TMR) << PMC_TMR_SHIFT;
+
+	seq_printf(s, "S0IR Residency:\t%lldus\n", s0ir_tmr);
+	seq_printf(s, "S0I1 Residency:\t%lldus\n", s0i1_tmr);
+	seq_printf(s, "S0I2 Residency:\t%lldus\n", s0i2_tmr);
+	seq_printf(s, "S0I3 Residency:\t%lldus\n", s0i3_tmr);
+	seq_printf(s, "S0   Residency:\t%lldus\n", s0_tmr);
+	return 0;
+}
+
+static int pmc_sleep_tmr_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pmc_sleep_tmr_show, inode->i_private);
+}
+
+static const struct file_operations pmc_sleep_tmr_ops = {
+	.open		= pmc_sleep_tmr_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void pmc_dbgfs_unregister(struct pmc_dev *pmc)
+{
+	debugfs_remove_recursive(pmc->dbgfs_dir);
+}
+
+static int pmc_dbgfs_register(struct pmc_dev *pmc)
+{
+	struct dentry *dir, *f;
+
+	dir = debugfs_create_dir("pmc_atom", NULL);
+	if (!dir)
+		return -ENOMEM;
+
+	pmc->dbgfs_dir = dir;
+
+	f = debugfs_create_file("dev_state", S_IFREG | S_IRUGO,
+				dir, pmc, &pmc_dev_state_ops);
+	if (!f)
+		goto err;
+
+	f = debugfs_create_file("pss_state", S_IFREG | S_IRUGO,
+				dir, pmc, &pmc_pss_state_ops);
+	if (!f)
+		goto err;
+
+	f = debugfs_create_file("sleep_state", S_IFREG | S_IRUGO,
+				dir, pmc, &pmc_sleep_tmr_ops);
+	if (!f)
+		goto err;
+
+	return 0;
+err:
+	pmc_dbgfs_unregister(pmc);
+	return -ENODEV;
+}
+#else
+static int pmc_dbgfs_register(struct pmc_dev *pmc)
+{
+	return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int pmc_setup_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct pmc_dev *pmc = &pmc_device;
+	const struct pmc_reg_map *map = (struct pmc_reg_map *)ent->driver_data;
+	int ret;
+
+	/* Obtain ACPI base address */
+	pci_read_config_dword(pdev, ACPI_BASE_ADDR_OFFSET, &acpi_base_addr);
+	acpi_base_addr &= ACPI_BASE_ADDR_MASK;
+
+	/* Install power off function */
+	if (acpi_base_addr != 0 && pm_power_off == NULL)
+		pm_power_off = pmc_power_off;
+
+	pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr);
+	pmc->base_addr &= PMC_BASE_ADDR_MASK;
+
+	pmc->regmap = ioremap_nocache(pmc->base_addr, PMC_MMIO_REG_LEN);
+	if (!pmc->regmap) {
+		dev_err(&pdev->dev, "error: ioremap failed\n");
+		return -ENOMEM;
+	}
+
+	pmc->map = map;
+
+	/* PMC hardware registers setup */
+	pmc_hw_reg_setup(pmc);
+
+	ret = pmc_dbgfs_register(pmc);
+	if (ret)
+		dev_warn(&pdev->dev, "debugfs register failed\n");
+
+	pmc->init = true;
+	return ret;
+}
+
+/*
+ * Data for PCI driver interface
+ *
+ * used by pci_match_id() call below.
+ */
+static const struct pci_device_id pmc_pci_ids[] = {
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_VLV_PMC), (kernel_ulong_t)&byt_reg_map },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_CHT_PMC), (kernel_ulong_t)&cht_reg_map },
+	{ 0, },
+};
+
+static int __init pmc_atom_init(void)
+{
+	struct pci_dev *pdev = NULL;
+	const struct pci_device_id *ent;
+
+	/* We look for our device - PCU PMC
+	 * we assume that there is max. one device.
+	 *
+	 * We can't use plain pci_driver mechanism,
+	 * as the device is really a multiple function device,
+	 * main driver that binds to the pci_device is lpc_ich
+	 * and have to find & bind to the device this way.
+	 */
+	for_each_pci_dev(pdev) {
+		ent = pci_match_id(pmc_pci_ids, pdev);
+		if (ent)
+			return pmc_setup_dev(pdev, ent);
+	}
+	/* Device not found. */
+	return -ENODEV;
+}
+
+device_initcall(pmc_atom_init);
+
+/*
+MODULE_AUTHOR("Aubrey Li <aubrey.li@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Atom SOC Power Management Controller Interface");
+MODULE_LICENSE("GPL v2");
+*/
diff --git a/arch/x86/platform/intel/Makefile b/arch/x86/platform/intel/Makefile
new file mode 100644
index 0000000..b878032
--- /dev/null
+++ b/arch/x86/platform/intel/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_IOSF_MBI)			+= iosf_mbi.o
diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
new file mode 100644
index 0000000..edf2c54
--- /dev/null
+++ b/arch/x86/platform/intel/iosf_mbi.c
@@ -0,0 +1,327 @@
+/*
+ * IOSF-SB MailBox Interface Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ *
+ * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
+ * mailbox interface (MBI) to communicate with mutiple devices. This
+ * driver implements access to this interface for those platforms that can
+ * enumerate the device using PCI.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/capability.h>
+
+#include <asm/iosf_mbi.h>
+
+#define PCI_DEVICE_ID_BAYTRAIL		0x0F00
+#define PCI_DEVICE_ID_BRASWELL		0x2280
+#define PCI_DEVICE_ID_QUARK_X1000	0x0958
+#define PCI_DEVICE_ID_TANGIER		0x1170
+
+static struct pci_dev *mbi_pdev;
+static DEFINE_SPINLOCK(iosf_mbi_lock);
+
+static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
+{
+	return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE;
+}
+
+static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
+{
+	int result;
+
+	if (!mbi_pdev)
+		return -ENODEV;
+
+	if (mcrx) {
+		result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
+						mcrx);
+		if (result < 0)
+			goto fail_read;
+	}
+
+	result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
+	if (result < 0)
+		goto fail_read;
+
+	result = pci_read_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
+	if (result < 0)
+		goto fail_read;
+
+	return 0;
+
+fail_read:
+	dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+	return result;
+}
+
+static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+{
+	int result;
+
+	if (!mbi_pdev)
+		return -ENODEV;
+
+	result = pci_write_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
+	if (result < 0)
+		goto fail_write;
+
+	if (mcrx) {
+		result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
+						mcrx);
+		if (result < 0)
+			goto fail_write;
+	}
+
+	result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
+	if (result < 0)
+		goto fail_write;
+
+	return 0;
+
+fail_write:
+	dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+	return result;
+}
+
+int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+{
+	u32 mcr, mcrx;
+	unsigned long flags;
+	int ret;
+
+	/* Access to the GFX unit is handled by GPU code */
+	if (port == BT_MBI_UNIT_GFX) {
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+	mcrx = offset & MBI_MASK_HI;
+
+	spin_lock_irqsave(&iosf_mbi_lock, flags);
+	ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
+	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_read);
+
+int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
+{
+	u32 mcr, mcrx;
+	unsigned long flags;
+	int ret;
+
+	/* Access to the GFX unit is handled by GPU code */
+	if (port == BT_MBI_UNIT_GFX) {
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+	mcrx = offset & MBI_MASK_HI;
+
+	spin_lock_irqsave(&iosf_mbi_lock, flags);
+	ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
+	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_write);
+
+int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
+{
+	u32 mcr, mcrx;
+	u32 value;
+	unsigned long flags;
+	int ret;
+
+	/* Access to the GFX unit is handled by GPU code */
+	if (port == BT_MBI_UNIT_GFX) {
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+	mcrx = offset & MBI_MASK_HI;
+
+	spin_lock_irqsave(&iosf_mbi_lock, flags);
+
+	/* Read current mdr value */
+	ret = iosf_mbi_pci_read_mdr(mcrx, mcr & MBI_RD_MASK, &value);
+	if (ret < 0) {
+		spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+		return ret;
+	}
+
+	/* Apply mask */
+	value &= ~mask;
+	mdr &= mask;
+	value |= mdr;
+
+	/* Write back */
+	ret = iosf_mbi_pci_write_mdr(mcrx, mcr | MBI_WR_MASK, value);
+
+	spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_modify);
+
+bool iosf_mbi_available(void)
+{
+	/* Mbi isn't hot-pluggable. No remove routine is provided */
+	return mbi_pdev;
+}
+EXPORT_SYMBOL(iosf_mbi_available);
+
+#ifdef CONFIG_IOSF_MBI_DEBUG
+static u32	dbg_mdr;
+static u32	dbg_mcr;
+static u32	dbg_mcrx;
+
+static int mcr_get(void *data, u64 *val)
+{
+	*val = *(u32 *)data;
+	return 0;
+}
+
+static int mcr_set(void *data, u64 val)
+{
+	u8 command = ((u32)val & 0xFF000000) >> 24,
+	   port	   = ((u32)val & 0x00FF0000) >> 16,
+	   offset  = ((u32)val & 0x0000FF00) >> 8;
+	int err;
+
+	*(u32 *)data = val;
+
+	if (!capable(CAP_SYS_RAWIO))
+		return -EACCES;
+
+	if (command & 1u)
+		err = iosf_mbi_write(port,
+			       command,
+			       dbg_mcrx | offset,
+			       dbg_mdr);
+	else
+		err = iosf_mbi_read(port,
+			      command,
+			      dbg_mcrx | offset,
+			      &dbg_mdr);
+
+	return err;
+}
+DEFINE_SIMPLE_ATTRIBUTE(iosf_mcr_fops, mcr_get, mcr_set , "%llx\n");
+
+static struct dentry *iosf_dbg;
+
+static void iosf_sideband_debug_init(void)
+{
+	struct dentry *d;
+
+	iosf_dbg = debugfs_create_dir("iosf_sb", NULL);
+	if (IS_ERR_OR_NULL(iosf_dbg))
+		return;
+
+	/* mdr */
+	d = debugfs_create_x32("mdr", 0660, iosf_dbg, &dbg_mdr);
+	if (!d)
+		goto cleanup;
+
+	/* mcrx */
+	d = debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx);
+	if (!d)
+		goto cleanup;
+
+	/* mcr - initiates mailbox tranaction */
+	d = debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops);
+	if (!d)
+		goto cleanup;
+
+	return;
+
+cleanup:
+	debugfs_remove_recursive(d);
+}
+
+static void iosf_debugfs_init(void)
+{
+	iosf_sideband_debug_init();
+}
+
+static void iosf_debugfs_remove(void)
+{
+	debugfs_remove_recursive(iosf_dbg);
+}
+#else
+static inline void iosf_debugfs_init(void) { }
+static inline void iosf_debugfs_remove(void) { }
+#endif /* CONFIG_IOSF_MBI_DEBUG */
+
+static int iosf_mbi_probe(struct pci_dev *pdev,
+			  const struct pci_device_id *unused)
+{
+	int ret;
+
+	ret = pci_enable_device(pdev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "error: could not enable device\n");
+		return ret;
+	}
+
+	mbi_pdev = pci_dev_get(pdev);
+	return 0;
+}
+
+static const struct pci_device_id iosf_mbi_pci_ids[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BAYTRAIL) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRASWELL) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_QUARK_X1000) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_TANGIER) },
+	{ 0, },
+};
+MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
+
+static struct pci_driver iosf_mbi_pci_driver = {
+	.name		= "iosf_mbi_pci",
+	.probe		= iosf_mbi_probe,
+	.id_table	= iosf_mbi_pci_ids,
+};
+
+static int __init iosf_mbi_init(void)
+{
+	iosf_debugfs_init();
+
+	return pci_register_driver(&iosf_mbi_pci_driver);
+}
+
+static void __exit iosf_mbi_exit(void)
+{
+	iosf_debugfs_remove();
+
+	pci_unregister_driver(&iosf_mbi_pci_driver);
+	pci_dev_put(mbi_pdev);
+	mbi_pdev = NULL;
+}
+
+module_init(iosf_mbi_init);
+module_exit(iosf_mbi_exit);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("IOSF Mailbox Interface accessor");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index 8570abe..e1c2463 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -89,7 +89,7 @@
 		return -EINVAL;
 
 	chip_data = kmalloc_node(sizeof(*chip_data), GFP_KERNEL,
-				 irq_data->node);
+				 irq_data_get_node(irq_data));
 	if (!chip_data)
 		return -ENOMEM;
 
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index a244237..2b158a9 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -32,8 +32,7 @@
 
 static cycle_t uv_read_rtc(struct clocksource *cs);
 static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
-static void uv_rtc_timer_setup(enum clock_event_mode,
-				struct clock_event_device *);
+static int uv_rtc_shutdown(struct clock_event_device *evt);
 
 static struct clocksource clocksource_uv = {
 	.name		= RTC_NAME,
@@ -44,14 +43,14 @@
 };
 
 static struct clock_event_device clock_event_device_uv = {
-	.name		= RTC_NAME,
-	.features	= CLOCK_EVT_FEAT_ONESHOT,
-	.shift		= 20,
-	.rating		= 400,
-	.irq		= -1,
-	.set_next_event	= uv_rtc_next_event,
-	.set_mode	= uv_rtc_timer_setup,
-	.event_handler	= NULL,
+	.name			= RTC_NAME,
+	.features		= CLOCK_EVT_FEAT_ONESHOT,
+	.shift			= 20,
+	.rating			= 400,
+	.irq			= -1,
+	.set_next_event		= uv_rtc_next_event,
+	.set_state_shutdown	= uv_rtc_shutdown,
+	.event_handler		= NULL,
 };
 
 static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
@@ -321,24 +320,14 @@
 }
 
 /*
- * Setup the RTC timer in oneshot mode
+ * Shutdown the RTC timer
  */
-static void uv_rtc_timer_setup(enum clock_event_mode mode,
-			       struct clock_event_device *evt)
+static int uv_rtc_shutdown(struct clock_event_device *evt)
 {
 	int ced_cpu = cpumask_first(evt->cpumask);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_RESUME:
-		/* Nothing to do here yet */
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		uv_rtc_unset_timer(ced_cpu, 1);
-		break;
-	}
+	uv_rtc_unset_timer(ced_cpu, 1);
+	return 0;
 }
 
 static void uv_rtc_interrupt(void)
diff --git a/arch/x86/ras/Kconfig b/arch/x86/ras/Kconfig
new file mode 100644
index 0000000..10fea5f
--- /dev/null
+++ b/arch/x86/ras/Kconfig
@@ -0,0 +1,11 @@
+config AMD_MCE_INJ
+	tristate "Simple MCE injection interface for AMD processors"
+	depends on RAS && EDAC_DECODE_MCE && DEBUG_FS
+	default n
+	help
+	  This is a simple debugfs interface to inject MCEs and test different
+	  aspects of the MCE handling code.
+
+	  WARNING: Do not even assume this interface is staying stable!
+
+
diff --git a/arch/x86/ras/Makefile b/arch/x86/ras/Makefile
new file mode 100644
index 0000000..dd2c98b
--- /dev/null
+++ b/arch/x86/ras/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_AMD_MCE_INJ)		+= mce_amd_inj.o
+
diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c
new file mode 100644
index 0000000..17e35b5
--- /dev/null
+++ b/arch/x86/ras/mce_amd_inj.c
@@ -0,0 +1,375 @@
+/*
+ * A simple MCE injection facility for testing different aspects of the RAS
+ * code. This driver should be built as module so that it can be loaded
+ * on production kernels for testing purposes.
+ *
+ * This file may be distributed under the terms of the GNU General Public
+ * License version 2.
+ *
+ * Copyright (c) 2010-15:  Borislav Petkov <bp@alien8.de>
+ *			Advanced Micro Devices Inc.
+ */
+
+#include <linux/kobject.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <asm/mce.h>
+
+#include "../kernel/cpu/mcheck/mce-internal.h"
+
+/*
+ * Collect all the MCi_XXX settings
+ */
+static struct mce i_mce;
+static struct dentry *dfs_inj;
+
+static u8 n_banks;
+
+#define MAX_FLAG_OPT_SIZE	3
+
+enum injection_type {
+	SW_INJ = 0,	/* SW injection, simply decode the error */
+	HW_INJ,		/* Trigger a #MC */
+	N_INJ_TYPES,
+};
+
+static const char * const flags_options[] = {
+	[SW_INJ] = "sw",
+	[HW_INJ] = "hw",
+	NULL
+};
+
+/* Set default injection to SW_INJ */
+static enum injection_type inj_type = SW_INJ;
+
+#define MCE_INJECT_SET(reg)						\
+static int inj_##reg##_set(void *data, u64 val)				\
+{									\
+	struct mce *m = (struct mce *)data;				\
+									\
+	m->reg = val;							\
+	return 0;							\
+}
+
+MCE_INJECT_SET(status);
+MCE_INJECT_SET(misc);
+MCE_INJECT_SET(addr);
+
+#define MCE_INJECT_GET(reg)						\
+static int inj_##reg##_get(void *data, u64 *val)			\
+{									\
+	struct mce *m = (struct mce *)data;				\
+									\
+	*val = m->reg;							\
+	return 0;							\
+}
+
+MCE_INJECT_GET(status);
+MCE_INJECT_GET(misc);
+MCE_INJECT_GET(addr);
+
+DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n");
+
+/*
+ * Caller needs to be make sure this cpu doesn't disappear
+ * from under us, i.e.: get_cpu/put_cpu.
+ */
+static int toggle_hw_mce_inject(unsigned int cpu, bool enable)
+{
+	u32 l, h;
+	int err;
+
+	err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h);
+	if (err) {
+		pr_err("%s: error reading HWCR\n", __func__);
+		return err;
+	}
+
+	enable ? (l |= BIT(18)) : (l &= ~BIT(18));
+
+	err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h);
+	if (err)
+		pr_err("%s: error writing HWCR\n", __func__);
+
+	return err;
+}
+
+static int __set_inj(const char *buf)
+{
+	int i;
+
+	for (i = 0; i < N_INJ_TYPES; i++) {
+		if (!strncmp(flags_options[i], buf, strlen(flags_options[i]))) {
+			inj_type = i;
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static ssize_t flags_read(struct file *filp, char __user *ubuf,
+			  size_t cnt, loff_t *ppos)
+{
+	char buf[MAX_FLAG_OPT_SIZE];
+	int n;
+
+	n = sprintf(buf, "%s\n", flags_options[inj_type]);
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
+}
+
+static ssize_t flags_write(struct file *filp, const char __user *ubuf,
+			   size_t cnt, loff_t *ppos)
+{
+	char buf[MAX_FLAG_OPT_SIZE], *__buf;
+	int err;
+	size_t ret;
+
+	if (cnt > MAX_FLAG_OPT_SIZE)
+		cnt = MAX_FLAG_OPT_SIZE;
+
+	ret = cnt;
+
+	if (copy_from_user(&buf, ubuf, cnt))
+		return -EFAULT;
+
+	buf[cnt - 1] = 0;
+
+	/* strip whitespace */
+	__buf = strstrip(buf);
+
+	err = __set_inj(__buf);
+	if (err) {
+		pr_err("%s: Invalid flags value: %s\n", __func__, __buf);
+		return err;
+	}
+
+	*ppos += ret;
+
+	return ret;
+}
+
+static const struct file_operations flags_fops = {
+	.read           = flags_read,
+	.write          = flags_write,
+	.llseek         = generic_file_llseek,
+};
+
+/*
+ * On which CPU to inject?
+ */
+MCE_INJECT_GET(extcpu);
+
+static int inj_extcpu_set(void *data, u64 val)
+{
+	struct mce *m = (struct mce *)data;
+
+	if (val >= nr_cpu_ids || !cpu_online(val)) {
+		pr_err("%s: Invalid CPU: %llu\n", __func__, val);
+		return -EINVAL;
+	}
+	m->extcpu = val;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(extcpu_fops, inj_extcpu_get, inj_extcpu_set, "%llu\n");
+
+static void trigger_mce(void *info)
+{
+	asm volatile("int $18");
+}
+
+static void do_inject(void)
+{
+	u64 mcg_status = 0;
+	unsigned int cpu = i_mce.extcpu;
+	u8 b = i_mce.bank;
+
+	if (i_mce.misc)
+		i_mce.status |= MCI_STATUS_MISCV;
+
+	if (inj_type == SW_INJ) {
+		mce_inject_log(&i_mce);
+		return;
+	}
+
+	/* prep MCE global settings for the injection */
+	mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV;
+
+	if (!(i_mce.status & MCI_STATUS_PCC))
+		mcg_status |= MCG_STATUS_RIPV;
+
+	get_online_cpus();
+	if (!cpu_online(cpu))
+		goto err;
+
+	toggle_hw_mce_inject(cpu, true);
+
+	wrmsr_on_cpu(cpu, MSR_IA32_MCG_STATUS,
+		     (u32)mcg_status, (u32)(mcg_status >> 32));
+
+	wrmsr_on_cpu(cpu, MSR_IA32_MCx_STATUS(b),
+		     (u32)i_mce.status, (u32)(i_mce.status >> 32));
+
+	wrmsr_on_cpu(cpu, MSR_IA32_MCx_ADDR(b),
+		     (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
+
+	wrmsr_on_cpu(cpu, MSR_IA32_MCx_MISC(b),
+		     (u32)i_mce.misc, (u32)(i_mce.misc >> 32));
+
+	toggle_hw_mce_inject(cpu, false);
+
+	smp_call_function_single(cpu, trigger_mce, NULL, 0);
+
+err:
+	put_online_cpus();
+
+}
+
+/*
+ * This denotes into which bank we're injecting and triggers
+ * the injection, at the same time.
+ */
+static int inj_bank_set(void *data, u64 val)
+{
+	struct mce *m = (struct mce *)data;
+
+	if (val >= n_banks) {
+		pr_err("Non-existent MCE bank: %llu\n", val);
+		return -EINVAL;
+	}
+
+	m->bank = val;
+	do_inject();
+
+	return 0;
+}
+
+MCE_INJECT_GET(bank);
+
+DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n");
+
+static const char readme_msg[] =
+"Description of the files and their usages:\n"
+"\n"
+"Note1: i refers to the bank number below.\n"
+"Note2: See respective BKDGs for the exact bit definitions of the files below\n"
+"as they mirror the hardware registers.\n"
+"\n"
+"status:\t Set MCi_STATUS: the bits in that MSR control the error type and\n"
+"\t attributes of the error which caused the MCE.\n"
+"\n"
+"misc:\t Set MCi_MISC: provide auxiliary info about the error. It is mostly\n"
+"\t used for error thresholding purposes and its validity is indicated by\n"
+"\t MCi_STATUS[MiscV].\n"
+"\n"
+"addr:\t Error address value to be written to MCi_ADDR. Log address information\n"
+"\t associated with the error.\n"
+"\n"
+"cpu:\t The CPU to inject the error on.\n"
+"\n"
+"bank:\t Specify the bank you want to inject the error into: the number of\n"
+"\t banks in a processor varies and is family/model-specific, therefore, the\n"
+"\t supplied value is sanity-checked. Setting the bank value also triggers the\n"
+"\t injection.\n"
+"\n"
+"flags:\t Injection type to be performed. Writing to this file will trigger a\n"
+"\t real machine check, an APIC interrupt or invoke the error decoder routines\n"
+"\t for AMD processors.\n"
+"\n"
+"\t Allowed error injection types:\n"
+"\t  - \"sw\": Software error injection. Decode error to a human-readable \n"
+"\t    format only. Safe to use.\n"
+"\t  - \"hw\": Hardware error injection. Causes the #MC exception handler to \n"
+"\t    handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n"
+"\t    is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n"
+"\t    before injecting.\n"
+"\n";
+
+static ssize_t
+inj_readme_read(struct file *filp, char __user *ubuf,
+		       size_t cnt, loff_t *ppos)
+{
+	return simple_read_from_buffer(ubuf, cnt, ppos,
+					readme_msg, strlen(readme_msg));
+}
+
+static const struct file_operations readme_fops = {
+	.read		= inj_readme_read,
+};
+
+static struct dfs_node {
+	char *name;
+	struct dentry *d;
+	const struct file_operations *fops;
+	umode_t perm;
+} dfs_fls[] = {
+	{ .name = "status",	.fops = &status_fops, .perm = S_IRUSR | S_IWUSR },
+	{ .name = "misc",	.fops = &misc_fops,   .perm = S_IRUSR | S_IWUSR },
+	{ .name = "addr",	.fops = &addr_fops,   .perm = S_IRUSR | S_IWUSR },
+	{ .name = "bank",	.fops = &bank_fops,   .perm = S_IRUSR | S_IWUSR },
+	{ .name = "flags",	.fops = &flags_fops,  .perm = S_IRUSR | S_IWUSR },
+	{ .name = "cpu",	.fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR },
+	{ .name = "README",	.fops = &readme_fops, .perm = S_IRUSR | S_IRGRP | S_IROTH },
+};
+
+static int __init init_mce_inject(void)
+{
+	int i;
+	u64 cap;
+
+	rdmsrl(MSR_IA32_MCG_CAP, cap);
+	n_banks = cap & MCG_BANKCNT_MASK;
+
+	dfs_inj = debugfs_create_dir("mce-inject", NULL);
+	if (!dfs_inj)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) {
+		dfs_fls[i].d = debugfs_create_file(dfs_fls[i].name,
+						    dfs_fls[i].perm,
+						    dfs_inj,
+						    &i_mce,
+						    dfs_fls[i].fops);
+
+		if (!dfs_fls[i].d)
+			goto err_dfs_add;
+	}
+
+	return 0;
+
+err_dfs_add:
+	while (--i >= 0)
+		debugfs_remove(dfs_fls[i].d);
+
+	debugfs_remove(dfs_inj);
+	dfs_inj = NULL;
+
+	return -ENOMEM;
+}
+
+static void __exit exit_mce_inject(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dfs_fls); i++)
+		debugfs_remove(dfs_fls[i].d);
+
+	memset(&dfs_fls, 0, sizeof(dfs_fls));
+
+	debugfs_remove(dfs_inj);
+	dfs_inj = NULL;
+}
+module_init(init_mce_inject);
+module_exit(exit_mce_inject);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>");
+MODULE_AUTHOR("AMD Inc.");
+MODULE_DESCRIPTION("MCE injection facility for RAS testing");
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index b9531d3..755481f 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -45,17 +45,4 @@
 #define read_barrier_depends()		do { } while (0)
 #define smp_read_barrier_depends()	do { } while (0)
 
-/*
- * Stop RDTSC speculation. This is needed when you need to use RDTSC
- * (or get_cycles or vread that possibly accesses the TSC) in a defined
- * code region.
- *
- * (Could use an alternative three way for this if there was one.)
- */
-static inline void rdtsc_barrier(void)
-{
-	alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
-			  "lfence", X86_FEATURE_LFENCE_RDTSC);
-}
-
 #endif
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 11d6fb4..d9cfa45 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1215,11 +1215,8 @@
 	.read_msr = xen_read_msr_safe,
 	.write_msr = xen_write_msr_safe,
 
-	.read_tsc = native_read_tsc,
 	.read_pmc = native_read_pmc,
 
-	.read_tscp = native_read_tscp,
-
 	.iret = xen_iret,
 #ifdef CONFIG_X86_64
 	.usergs_sysret32 = xen_sysret32,
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 55da33b..f1ba6a0 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -274,30 +274,18 @@
 	return xen_clocksource_read() + delta;
 }
 
-static void xen_timerop_set_mode(enum clock_event_mode mode,
-				 struct clock_event_device *evt)
+static int xen_timerop_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		/* unsupported */
-		WARN_ON(1);
-		break;
+	/* cancel timeout */
+	HYPERVISOR_set_timer_op(0);
 
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		HYPERVISOR_set_timer_op(0);  /* cancel timeout */
-		break;
-	}
+	return 0;
 }
 
 static int xen_timerop_set_next_event(unsigned long delta,
 				      struct clock_event_device *evt)
 {
-	WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
+	WARN_ON(!clockevent_state_oneshot(evt));
 
 	if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
 		BUG();
@@ -310,46 +298,39 @@
 }
 
 static const struct clock_event_device xen_timerop_clockevent = {
-	.name = "xen",
-	.features = CLOCK_EVT_FEAT_ONESHOT,
+	.name			= "xen",
+	.features		= CLOCK_EVT_FEAT_ONESHOT,
 
-	.max_delta_ns = 0xffffffff,
-	.min_delta_ns = TIMER_SLOP,
+	.max_delta_ns		= 0xffffffff,
+	.min_delta_ns		= TIMER_SLOP,
 
-	.mult = 1,
-	.shift = 0,
-	.rating = 500,
+	.mult			= 1,
+	.shift			= 0,
+	.rating			= 500,
 
-	.set_mode = xen_timerop_set_mode,
-	.set_next_event = xen_timerop_set_next_event,
+	.set_state_shutdown	= xen_timerop_shutdown,
+	.set_next_event		= xen_timerop_set_next_event,
 };
 
-
-
-static void xen_vcpuop_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int xen_vcpuop_shutdown(struct clock_event_device *evt)
 {
 	int cpu = smp_processor_id();
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		WARN_ON(1);	/* unsupported */
-		break;
+	if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
+	    HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
+		BUG();
 
-	case CLOCK_EVT_MODE_ONESHOT:
-		if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
-			BUG();
-		break;
+	return 0;
+}
 
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
-		    HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
-			BUG();
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+static int xen_vcpuop_set_oneshot(struct clock_event_device *evt)
+{
+	int cpu = smp_processor_id();
+
+	if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
+		BUG();
+
+	return 0;
 }
 
 static int xen_vcpuop_set_next_event(unsigned long delta,
@@ -359,7 +340,7 @@
 	struct vcpu_set_singleshot_timer single;
 	int ret;
 
-	WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
+	WARN_ON(!clockevent_state_oneshot(evt));
 
 	single.timeout_abs_ns = get_abs_timeout(delta);
 	single.flags = VCPU_SSHOTTMR_future;
@@ -382,7 +363,8 @@
 	.shift = 0,
 	.rating = 500,
 
-	.set_mode = xen_vcpuop_set_mode,
+	.set_state_shutdown = xen_vcpuop_shutdown,
+	.set_state_oneshot = xen_vcpuop_set_oneshot,
 	.set_next_event = xen_vcpuop_set_next_event,
 };
 
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index e5b872b..3bd3504 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -14,12 +14,15 @@
 	select GENERIC_IRQ_SHOW
 	select GENERIC_PCI_IOMAP
 	select GENERIC_SCHED_CLOCK
+	select HAVE_DMA_API_DEBUG
+	select HAVE_DMA_ATTRS
 	select HAVE_FUNCTION_TRACER
 	select HAVE_IRQ_TIME_ACCOUNTING
 	select HAVE_OPROFILE
 	select HAVE_PERF_EVENTS
 	select IRQ_DOMAIN
 	select MODULES_USE_ELF_RELA
+	select PERF_USE_VMALLOC
 	select VIRT_TO_BUS
 	help
 	  Xtensa processors are 32-bit RISC machines designed by Tensilica
@@ -61,9 +64,7 @@
 	def_bool y
 
 config MMU
-	bool
-	default n if !XTENSA_VARIANT_CUSTOM
-	default XTENSA_VARIANT_MMU if XTENSA_VARIANT_CUSTOM
+	def_bool n
 
 config VARIANT_IRQ_SWITCH
 	def_bool n
@@ -71,9 +72,6 @@
 config HAVE_XTENSA_GPIO32
 	def_bool n
 
-config MAY_HAVE_SMP
-	def_bool n
-
 menu "Processor type and features"
 
 choice
@@ -100,7 +98,6 @@
 
 config XTENSA_VARIANT_CUSTOM
 	bool "Custom Xtensa processor configuration"
-	select MAY_HAVE_SMP
 	select HAVE_XTENSA_GPIO32
 	help
 	  Select this variant to use a custom Xtensa processor configuration.
@@ -126,10 +123,21 @@
 	bool "Core variant has a Full MMU (TLB, Pages, Protection, etc)"
 	depends on XTENSA_VARIANT_CUSTOM
 	default y
+	select MMU
 	help
 	  Build a Conventional Kernel with full MMU support,
 	  ie: it supports a TLB with auto-loading, page protection.
 
+config XTENSA_VARIANT_HAVE_PERF_EVENTS
+	bool "Core variant has Performance Monitor Module"
+	depends on XTENSA_VARIANT_CUSTOM
+	default n
+	help
+	  Enable if core variant has Performance Monitor Module with
+	  External Registers Interface.
+
+	  If unsure, say N.
+
 config XTENSA_UNALIGNED_USER
 	bool "Unaligned memory access in use space"
 	help
@@ -143,7 +151,7 @@
 
 config HAVE_SMP
 	bool "System Supports SMP (MX)"
-	depends on MAY_HAVE_SMP
+	depends on XTENSA_VARIANT_CUSTOM
 	select XTENSA_MX
 	help
 	  This option is use to indicate that the system-on-a-chip (SOC)
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index e4d193e..f3dfe0d 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -616,7 +616,6 @@
 # CONFIG_SLUB_DEBUG_ON is not set
 # CONFIG_SLUB_STATS is not set
 # CONFIG_DEBUG_RT_MUTEXES is not set
-# CONFIG_RT_MUTEX_TESTER is not set
 # CONFIG_DEBUG_SPINLOCK is not set
 # CONFIG_DEBUG_MUTEXES is not set
 # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 5b478ac..63c223d 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -2,7 +2,6 @@
 generic-y += bug.h
 generic-y += clkdev.h
 generic-y += cputime.h
-generic-y += device.h
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += errno.h
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 00b7d46..93795d0 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -29,7 +29,7 @@
  *
  * Locking interrupts looks like this:
  *
- *    rsil a15, LOCKLEVEL
+ *    rsil a15, TOPLEVEL
  *    <code>
  *    wsr  a15, PS
  *    rsync
@@ -106,7 +106,7 @@
 	unsigned int vval;						\
 									\
 	__asm__ __volatile__(						\
-			"       rsil    a15, "__stringify(LOCKLEVEL)"\n"\
+			"       rsil    a15, "__stringify(TOPLEVEL)"\n"\
 			"       l32i    %0, %2, 0\n"			\
 			"       " #op " %0, %0, %1\n"			\
 			"       s32i    %0, %2, 0\n"			\
@@ -124,7 +124,7 @@
 	unsigned int vval;						\
 									\
 	__asm__ __volatile__(						\
-			"       rsil    a15,"__stringify(LOCKLEVEL)"\n"	\
+			"       rsil    a15,"__stringify(TOPLEVEL)"\n"	\
 			"       l32i    %0, %2, 0\n"			\
 			"       " #op " %0, %0, %1\n"			\
 			"       s32i    %0, %2, 0\n"			\
@@ -145,6 +145,10 @@
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
@@ -250,75 +254,6 @@
 	return c;
 }
 
-
-static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
-	unsigned long tmp;
-	int result;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %3, 0\n"
-			"       wsr     %1, scompare1\n"
-			"       and     %0, %1, %2\n"
-			"       s32c1i  %0, %3, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (result), "=&a" (tmp)
-			: "a" (~mask), "a" (v)
-			: "memory"
-			);
-#else
-	unsigned int all_f = -1;
-	unsigned int vval;
-
-	__asm__ __volatile__(
-			"       rsil    a15,"__stringify(LOCKLEVEL)"\n"
-			"       l32i    %0, %2, 0\n"
-			"       xor     %1, %4, %3\n"
-			"       and     %0, %0, %4\n"
-			"       s32i    %0, %2, 0\n"
-			"       wsr     a15, ps\n"
-			"       rsync\n"
-			: "=&a" (vval), "=a" (mask)
-			: "a" (v), "a" (all_f), "1" (mask)
-			: "a15", "memory"
-			);
-#endif
-}
-
-static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
-{
-#if XCHAL_HAVE_S32C1I
-	unsigned long tmp;
-	int result;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %3, 0\n"
-			"       wsr     %1, scompare1\n"
-			"       or      %0, %1, %2\n"
-			"       s32c1i  %0, %3, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (result), "=&a" (tmp)
-			: "a" (mask), "a" (v)
-			: "memory"
-			);
-#else
-	unsigned int vval;
-
-	__asm__ __volatile__(
-			"       rsil    a15,"__stringify(LOCKLEVEL)"\n"
-			"       l32i    %0, %2, 0\n"
-			"       or      %0, %0, %1\n"
-			"       s32i    %0, %2, 0\n"
-			"       wsr     a15, ps\n"
-			"       rsync\n"
-			: "=&a" (vval)
-			: "a" (mask), "a" (v)
-			: "a15", "memory"
-			);
-#endif
-}
-
 #endif /* __KERNEL__ */
 
 #endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 370b26f..201e900 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -34,7 +34,7 @@
 	return new;
 #else
 	__asm__ __volatile__(
-			"       rsil    a15, "__stringify(LOCKLEVEL)"\n"
+			"       rsil    a15, "__stringify(TOPLEVEL)"\n"
 			"       l32i    %0, %1, 0\n"
 			"       bne     %0, %2, 1f\n"
 			"       s32i    %3, %1, 0\n"
@@ -123,7 +123,7 @@
 #else
 	unsigned long tmp;
 	__asm__ __volatile__(
-			"       rsil    a15, "__stringify(LOCKLEVEL)"\n"
+			"       rsil    a15, "__stringify(TOPLEVEL)"\n"
 			"       l32i    %0, %1, 0\n"
 			"       s32i    %2, %1, 0\n"
 			"       wsr     a15, ps\n"
diff --git a/arch/xtensa/include/asm/device.h b/arch/xtensa/include/asm/device.h
new file mode 100644
index 0000000..fe1f5c8
--- /dev/null
+++ b/arch/xtensa/include/asm/device.h
@@ -0,0 +1,19 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_XTENSA_DEVICE_H
+#define _ASM_XTENSA_DEVICE_H
+
+struct dma_map_ops;
+
+struct dev_archdata {
+	/* DMA operations on that device */
+	struct dma_map_ops *dma_ops;
+};
+
+struct pdev_archdata {
+};
+
+#endif /* _ASM_XTENSA_DEVICE_H */
diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
index 1f5f6dc..f01cb30 100644
--- a/arch/xtensa/include/asm/dma-mapping.h
+++ b/arch/xtensa/include/asm/dma-mapping.h
@@ -1,11 +1,10 @@
 /*
- * include/asm-xtensa/dma-mapping.h
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
  * Copyright (C) 2003 - 2005 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
  */
 
 #ifndef _XTENSA_DMA_MAPPING_H
@@ -13,142 +12,67 @@
 
 #include <asm/cache.h>
 #include <asm/io.h>
+
+#include <asm-generic/dma-coherent.h>
+
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
 
 #define DMA_ERROR_CODE		(~(dma_addr_t)0x0)
 
-/*
- * DMA-consistent mapping functions.
- */
+extern struct dma_map_ops xtensa_dma_map_ops;
 
-extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);
-extern void consistent_free(void*, size_t, dma_addr_t);
-extern void consistent_sync(void*, size_t, int);
-
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
-			   dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
-			 void *vaddr, dma_addr_t dma_handle);
-
-static inline dma_addr_t
-dma_map_single(struct device *dev, void *ptr, size_t size,
-	       enum dma_data_direction direction)
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
 {
-	BUG_ON(direction == DMA_NONE);
-	consistent_sync(ptr, size, direction);
-	return virt_to_phys(ptr);
+	if (dev && dev->archdata.dma_ops)
+		return dev->archdata.dma_ops;
+	else
+		return &xtensa_dma_map_ops;
 }
 
-static inline void
-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-		 enum dma_data_direction direction)
+#include <asm-generic/dma-mapping-common.h>
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+				    dma_addr_t *dma_handle, gfp_t gfp,
+				    struct dma_attrs *attrs)
 {
-	BUG_ON(direction == DMA_NONE);
+	void *ret;
+	struct dma_map_ops *ops = get_dma_ops(dev);
+
+	if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
+		return ret;
+
+	ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
+	debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
+
+	return ret;
 }
 
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
-	   enum dma_data_direction direction)
+static inline void dma_free_attrs(struct device *dev, size_t size,
+				  void *vaddr, dma_addr_t dma_handle,
+				  struct dma_attrs *attrs)
 {
-	int i;
-	struct scatterlist *sg;
+	struct dma_map_ops *ops = get_dma_ops(dev);
 
-	BUG_ON(direction == DMA_NONE);
+	if (dma_release_from_coherent(dev, get_order(size), vaddr))
+		return;
 
-	for_each_sg(sglist, sg, nents, i) {
-		BUG_ON(!sg_page(sg));
-
-		sg->dma_address = sg_phys(sg);
-		consistent_sync(sg_virt(sg), sg->length, direction);
-	}
-
-	return nents;
+	ops->free(dev, size, vaddr, dma_handle, attrs);
+	debug_dma_free_coherent(dev, size, vaddr, dma_handle);
 }
 
-static inline dma_addr_t
-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
-	     size_t size, enum dma_data_direction direction)
-{
-	BUG_ON(direction == DMA_NONE);
-	return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
-}
-
-static inline void
-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
-	       enum dma_data_direction direction)
-{
-	BUG_ON(direction == DMA_NONE);
-}
-
-
-static inline void
-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
-	     enum dma_data_direction direction)
-{
-	BUG_ON(direction == DMA_NONE);
-}
-
-static inline void
-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
-		enum dma_data_direction direction)
-{
-	consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
-}
-
-static inline void
-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
-		           size_t size, enum dma_data_direction direction)
-{
-	consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
-}
-
-static inline void
-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
-		      unsigned long offset, size_t size,
-		      enum dma_data_direction direction)
-{
-
-	consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
-}
-
-static inline void
-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
-		      unsigned long offset, size_t size,
-		      enum dma_data_direction direction)
-{
-
-	consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
-}
-static inline void
-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
-		 enum dma_data_direction dir)
-{
-	int i;
-	struct scatterlist *sg;
-
-	for_each_sg(sglist, sg, nelems, i)
-		consistent_sync(sg_virt(sg), sg->length, dir);
-}
-
-static inline void
-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
-		       int nelems, enum dma_data_direction dir)
-{
-	int i;
-	struct scatterlist *sg;
-
-	for_each_sg(sglist, sg, nelems, i)
-		consistent_sync(sg_virt(sg), sg->length, dir);
-}
 static inline int
 dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
-	return 0;
+	struct dma_map_ops *ops = get_dma_ops(dev);
+
+	debug_dma_mapping_error(dev, dma_addr);
+	return ops->mapping_error(dev, dma_addr);
 }
 
 static inline int
@@ -168,39 +92,7 @@
 	return 0;
 }
 
-static inline void
-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-	       enum dma_data_direction direction)
-{
-	consistent_sync(vaddr, size, direction);
-}
-
-/* Not supported for now */
-static inline int dma_mmap_coherent(struct device *dev,
-				    struct vm_area_struct *vma, void *cpu_addr,
-				    dma_addr_t dma_addr, size_t size)
-{
-	return -EINVAL;
-}
-
-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
-				  void *cpu_addr, dma_addr_t dma_addr,
-				  size_t size)
-{
-	return -EINVAL;
-}
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
-				    dma_addr_t *dma_handle, gfp_t flag,
-				    struct dma_attrs *attrs)
-{
-	return NULL;
-}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
-				  void *vaddr, dma_addr_t dma_handle,
-				  struct dma_attrs *attrs)
-{
-}
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+		    enum dma_data_direction direction);
 
 #endif	/* _XTENSA_DMA_MAPPING_H */
diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h
index ea36674..8e090c7 100644
--- a/arch/xtensa/include/asm/irqflags.h
+++ b/arch/xtensa/include/asm/irqflags.h
@@ -6,6 +6,7 @@
  * for more details.
  *
  * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
  */
 
 #ifndef _XTENSA_IRQFLAGS_H
@@ -23,8 +24,27 @@
 static inline unsigned long arch_local_irq_save(void)
 {
 	unsigned long flags;
-	asm volatile("rsil %0, "__stringify(LOCKLEVEL)
+#if XTENSA_FAKE_NMI
+#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+	unsigned long tmp;
+
+	asm volatile("rsr	%0, ps\t\n"
+		     "extui	%1, %0, 0, 4\t\n"
+		     "bgei	%1, "__stringify(LOCKLEVEL)", 1f\t\n"
+		     "rsil	%0, "__stringify(LOCKLEVEL)"\n"
+		     "1:"
+		     : "=a" (flags), "=a" (tmp) :: "memory");
+#else
+	asm volatile("rsr	%0, ps\t\n"
+		     "or	%0, %0, %1\t\n"
+		     "xsr	%0, ps\t\n"
+		     "rsync"
+		     : "=&a" (flags) : "a" (LOCKLEVEL) : "memory");
+#endif
+#else
+	asm volatile("rsil	%0, "__stringify(LOCKLEVEL)
 		     : "=a" (flags) :: "memory");
+#endif
 	return flags;
 }
 
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index b61bdf0..83e2e4bc 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -1,11 +1,10 @@
 /*
- * include/asm-xtensa/processor.h
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
  * Copyright (C) 2001 - 2008 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
  */
 
 #ifndef _XTENSA_PROCESSOR_H
@@ -45,6 +44,14 @@
 #define STACK_TOP_MAX	STACK_TOP
 
 /*
+ * General exception cause assigned to fake NMI. Fake NMI needs to be handled
+ * differently from other interrupts, but it uses common kernel entry/exit
+ * code.
+ */
+
+#define EXCCAUSE_MAPPED_NMI	62
+
+/*
  * General exception cause assigned to debug exceptions. Debug exceptions go
  * to their own vector, rather than the general exception vectors (user,
  * kernel, double); and their specific causes are reported via DEBUGCAUSE
@@ -65,10 +72,30 @@
 
 #define VALID_DOUBLE_EXCEPTION_ADDRESS	64
 
+#define XTENSA_INT_LEVEL(intno) _XTENSA_INT_LEVEL(intno)
+#define _XTENSA_INT_LEVEL(intno) XCHAL_INT##intno##_LEVEL
+
+#define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
+#define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
+
+#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
+
+#define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
+
 /* LOCKLEVEL defines the interrupt level that masks all
  * general-purpose interrupts.
  */
+#if defined(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) && \
+	defined(XCHAL_PROFILING_INTERRUPT) && \
+	PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
+	XCHAL_EXCM_LEVEL > 1 && \
+	IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL))
+#define LOCKLEVEL (XCHAL_EXCM_LEVEL - 1)
+#else
 #define LOCKLEVEL XCHAL_EXCM_LEVEL
+#endif
+#define TOPLEVEL XCHAL_EXCM_LEVEL
+#define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
 
 /* WSBITS and WBBITS are the width of the WINDOWSTART and WINDOWBASE
  * registers
diff --git a/arch/xtensa/include/asm/stacktrace.h b/arch/xtensa/include/asm/stacktrace.h
index 6a05fcb..fe06e8e 100644
--- a/arch/xtensa/include/asm/stacktrace.h
+++ b/arch/xtensa/include/asm/stacktrace.h
@@ -33,4 +33,12 @@
 		int (*fn)(struct stackframe *frame, void *data),
 		void *data);
 
+void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
+			     int (*kfn)(struct stackframe *frame, void *data),
+			     int (*ufn)(struct stackframe *frame, void *data),
+			     void *data);
+void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
+			   int (*ufn)(struct stackframe *frame, void *data),
+			   void *data);
+
 #endif /* _XTENSA_STACKTRACE_H */
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 677bfcf..28f33a8 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -25,30 +25,39 @@
 {
 #if XCHAL_NUM_AREGS > 16
 	__asm__ __volatile__ (
-		"	call12	1f\n"
+		"	call8	1f\n"
 		"	_j	2f\n"
 		"	retw\n"
 		"	.align	4\n"
 		"1:\n"
+#if XCHAL_NUM_AREGS == 32
+		"	_entry	a1, 32\n"
+		"	addi	a8, a0, 3\n"
+		"	_entry	a1, 16\n"
+		"	mov	a12, a12\n"
+		"	retw\n"
+#else
 		"	_entry	a1, 48\n"
-		"	addi	a12, a0, 3\n"
-#if XCHAL_NUM_AREGS > 32
-		"	.rept	(" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
+		"	call12	1f\n"
+		"	retw\n"
+		"	.align	4\n"
+		"1:\n"
+		"	.rept	(" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
 		"	_entry	a1, 48\n"
 		"	mov	a12, a0\n"
 		"	.endr\n"
-#endif
-		"	_entry	a1, 48\n"
+		"	_entry	a1, 16\n"
 #if XCHAL_NUM_AREGS % 12 == 0
-		"	mov	a8, a8\n"
-#elif XCHAL_NUM_AREGS % 12 == 4
 		"	mov	a12, a12\n"
-#elif XCHAL_NUM_AREGS % 12 == 8
+#elif XCHAL_NUM_AREGS % 12 == 4
 		"	mov	a4, a4\n"
+#elif XCHAL_NUM_AREGS % 12 == 8
+		"	mov	a8, a8\n"
 #endif
 		"	retw\n"
+#endif
 		"2:\n"
-		: : : "a12", "a13", "memory");
+		: : : "a8", "a9", "memory");
 #else
 	__asm__ __volatile__ (
 		"	mov	a12, a12\n"
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index d3a0f0f..50137bc 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
 obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
 obj-$(CONFIG_SMP) += smp.o mxhead.o
+obj-$(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) += perf_event.o
 
 AFLAGS_head.o += -mtext-section-literals
 
@@ -27,10 +28,11 @@
 #
 # Replicate rules in scripts/Makefile.build
 
-sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \
-	-e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g'	 \
-	-e 's/\*(\(\.text .*\))/*(.literal \1)/g'			 \
-	-e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g'
+sed-y = -e ':a; s/\*(\([^)]*\)\.text\.unlikely/*(\1.literal.unlikely .{text}.unlikely/; ta; ' \
+	-e ':b; s/\*(\([^)]*\)\.text\(\.[a-z]*\)/*(\1.{text}\2.literal .{text}\2/; tb; ' \
+	-e ':c; s/\*(\([^)]*\)\(\.[a-z]*it\|\.ref\)\.text/*(\1\2.literal \2.{text}/; tc; ' \
+	-e ':d; s/\*(\([^)]\+ \|\)\.text/*(\1.literal .{text}/; td; ' \
+	-e 's/\.{text}/.text/g'
 
 quiet_cmd__cpp_lds_S = LDS     $@
 cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $<    \
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 82bbfa5..5041303 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1,6 +1,4 @@
 /*
- * arch/xtensa/kernel/entry.S
- *
  * Low-level exception handling
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -8,6 +6,7 @@
  * for more details.
  *
  * Copyright (C) 2004 - 2008 by Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
  *
  * Chris Zankel <chris@zankel.net>
  *
@@ -75,6 +74,27 @@
 #endif
 	.endm
 
+
+	.macro	irq_save flags tmp
+#if XTENSA_FAKE_NMI
+#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+	rsr	\flags, ps
+	extui	\tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+	bgei	\tmp, LOCKLEVEL, 99f
+	rsil	\tmp, LOCKLEVEL
+99:
+#else
+	movi	\tmp, LOCKLEVEL
+	rsr	\flags, ps
+	or	\flags, \flags, \tmp
+	xsr	\flags, ps
+	rsync
+#endif
+#else
+	rsil	\flags, LOCKLEVEL
+#endif
+	.endm
+
 /* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */
 
 /*
@@ -122,6 +142,7 @@
 	/* Save SAR and turn off single stepping */
 
 	movi	a2, 0
+	wsr	a2, depc		# terminate user stack trace with 0
 	rsr	a3, sar
 	xsr	a2, icountlevel
 	s32i	a3, a1, PT_SAR
@@ -301,7 +322,18 @@
 	s32i	a14, a1, PT_AREG14
 	s32i	a15, a1, PT_AREG15
 
+	_bnei	a2, 1, 1f
+
+	/* Copy spill slots of a0 and a1 to imitate movsp
+	 * in order to keep exception stack continuous
+	 */
+	l32i	a3, a1, PT_SIZE
+	l32i	a0, a1, PT_SIZE + 4
+	s32e	a3, a1, -16
+	s32e	a0, a1, -12
 1:
+	l32i	a0, a1, PT_AREG0	# restore saved a0
+	wsr	a0, depc
 
 #ifdef KERNEL_STACK_OVERFLOW_CHECK
 
@@ -340,75 +372,88 @@
 
 	/* It is now save to restore the EXC_TABLE_FIXUP variable. */
 
-	rsr	a0, exccause
+	rsr	a2, exccause
 	movi	a3, 0
-	rsr	a2, excsave1
-	s32i	a0, a1, PT_EXCCAUSE
-	s32i	a3, a2, EXC_TABLE_FIXUP
+	rsr	a0, excsave1
+	s32i	a2, a1, PT_EXCCAUSE
+	s32i	a3, a0, EXC_TABLE_FIXUP
 
-	/* All unrecoverable states are saved on stack, now, and a1 is valid,
-	 * so we can allow exceptions and interrupts (*) again.
-	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
+	/* All unrecoverable states are saved on stack, now, and a1 is valid.
+	 * Now we can allow exceptions again. In case we've got an interrupt
+	 * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts,
+	 * otherwise it's left unchanged.
 	 *
-	 * (*) We only allow interrupts if they were previously enabled and
-	 *     we're not handling an IRQ
+	 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X)
 	 */
 
 	rsr	a3, ps
-	addi	a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT
-	movi	a2, LOCKLEVEL
+	s32i	a3, a1, PT_PS		# save ps
+
+#if XTENSA_FAKE_NMI
+	/* Correct PS needs to be saved in the PT_PS:
+	 * - in case of exception or level-1 interrupt it's in the PS,
+	 *   and is already saved.
+	 * - in case of medium level interrupt it's in the excsave2.
+	 */
+	movi	a0, EXCCAUSE_MAPPED_NMI
+	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+	beq	a2, a0, .Lmedium_level_irq
+	bnei	a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception
+	beqz	a3, .Llevel1_irq	# level-1 IRQ sets ps.intlevel to 0
+
+.Lmedium_level_irq:
+	rsr	a0, excsave2
+	s32i	a0, a1, PT_PS		# save medium-level interrupt ps
+	bgei	a3, LOCKLEVEL, .Lexception
+
+.Llevel1_irq:
+	movi	a3, LOCKLEVEL
+
+.Lexception:
+	movi	a0, 1 << PS_WOE_BIT
+	or	a3, a3, a0
+#else
+	addi	a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT
+	movi	a0, LOCKLEVEL
 	extui	a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
 					# a3 = PS.INTLEVEL
-	moveqz	a3, a2, a0		# a3 = LOCKLEVEL iff interrupt
+	moveqz	a3, a0, a2		# a3 = LOCKLEVEL iff interrupt
 	movi	a2, 1 << PS_WOE_BIT
 	or	a3, a3, a2
-	rsr	a0, exccause
-	xsr	a3, ps
+	rsr	a2, exccause
+#endif
 
-	s32i	a3, a1, PT_PS		# save ps
+	/* restore return address (or 0 if return to userspace) */
+	rsr	a0, depc
+	wsr	a3, ps
+	rsync				# PS.WOE => rsync => overflow
 
 	/* Save lbeg, lend */
 
-	rsr	a2, lbeg
+	rsr	a4, lbeg
 	rsr	a3, lend
-	s32i	a2, a1, PT_LBEG
+	s32i	a4, a1, PT_LBEG
 	s32i	a3, a1, PT_LEND
 
 	/* Save SCOMPARE1 */
 
 #if XCHAL_HAVE_S32C1I
-	rsr     a2, scompare1
-	s32i    a2, a1, PT_SCOMPARE1
+	rsr     a3, scompare1
+	s32i    a3, a1, PT_SCOMPARE1
 #endif
 
 	/* Save optional registers. */
 
-	save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
+	save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
 	
-#ifdef CONFIG_TRACE_IRQFLAGS
-	l32i	a4, a1, PT_DEPC
-	/* Double exception means we came here with an exception
-	 * while PS.EXCM was set, i.e. interrupts disabled.
-	 */
-	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
-	l32i	a4, a1, PT_EXCCAUSE
-	bnei	a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
-	/* We came here with an interrupt means interrupts were enabled
-	 * and we've just disabled them.
-	 */
-	movi	a4, trace_hardirqs_off
-	callx4	a4
-1:
-#endif
-
 	/* Go to second-level dispatcher. Set up parameters to pass to the
 	 * exception handler and call the exception handler.
 	 */
 
 	rsr	a4, excsave1
 	mov	a6, a1			# pass stack frame
-	mov	a7, a0			# pass EXCCAUSE
-	addx4	a4, a0, a4
+	mov	a7, a2			# pass EXCCAUSE
+	addx4	a4, a2, a4
 	l32i	a4, a4, EXC_TABLE_DEFAULT		# load handler
 
 	/* Call the second-level handler */
@@ -419,8 +464,17 @@
 	.global common_exception_return
 common_exception_return:
 
+#if XTENSA_FAKE_NMI
+	l32i	a2, a1, PT_EXCCAUSE
+	movi	a3, EXCCAUSE_MAPPED_NMI
+	beq	a2, a3, .LNMIexit
+#endif
 1:
-	rsil	a2, LOCKLEVEL
+	irq_save a2, a3
+#ifdef CONFIG_TRACE_IRQFLAGS
+	movi	a4, trace_hardirqs_off
+	callx4	a4
+#endif
 
 	/* Jump if we are returning from kernel exceptions. */
 
@@ -445,6 +499,10 @@
 
 	/* Call do_signal() */
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+	movi	a4, trace_hardirqs_on
+	callx4	a4
+#endif
 	rsil	a2, 0
 	movi	a4, do_notify_resume	# int do_notify_resume(struct pt_regs*)
 	mov	a6, a1
@@ -453,6 +511,10 @@
 
 3:	/* Reschedule */
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+	movi	a4, trace_hardirqs_on
+	callx4	a4
+#endif
 	rsil	a2, 0
 	movi	a4, schedule	# void schedule (void)
 	callx4	a4
@@ -471,6 +533,12 @@
 	j	1b
 #endif
 
+#if XTENSA_FAKE_NMI
+.LNMIexit:
+	l32i	a3, a1, PT_PS
+	_bbci.l	a3, PS_UM_BIT, 4f
+#endif
+
 5:
 #ifdef CONFIG_DEBUG_TLB_SANITY
 	l32i	a4, a1, PT_DEPC
@@ -481,16 +549,8 @@
 6:
 4:
 #ifdef CONFIG_TRACE_IRQFLAGS
-	l32i	a4, a1, PT_DEPC
-	/* Double exception means we came here with an exception
-	 * while PS.EXCM was set, i.e. interrupts disabled.
-	 */
-	bgeui	a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f
-	l32i	a4, a1, PT_EXCCAUSE
-	bnei	a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f
-	/* We came here with an interrupt means interrupts were enabled
-	 * and we'll reenable them on return.
-	 */
+	extui	a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH
+	bgei	a4, LOCKLEVEL, 1f
 	movi	a4, trace_hardirqs_on
 	callx4	a4
 1:
@@ -568,12 +628,13 @@
 	 *	 (if we have restored WSBITS-1 frames).
 	 */
 
+2:
 #if XCHAL_HAVE_THREADPTR
 	l32i	a3, a1, PT_THREADPTR
 	wur	a3, threadptr
 #endif
 
-2:	j	common_exception_exit
+	j	common_exception_exit
 
 	/* This is the kernel exception exit.
 	 * We avoided to do a MOVSP when we entered the exception, but we
@@ -1561,6 +1622,13 @@
 	rfde
 
 9:	l32i	a0, a1, TASK_ACTIVE_MM	# unlikely case mm == 0
+	bnez	a0, 8b
+
+	/* Even more unlikely case active_mm == 0.
+	 * We can get here with NMI in the middle of context_switch that
+	 * touches vmalloc area.
+	 */
+	movi	a0, init_mm
 	j	8b
 
 #if (DCACHE_WAY_SIZE > PAGE_SIZE)
@@ -1820,7 +1888,7 @@
 	mov	a12, a0
 	.endr
 #endif
-	_entry	a1, 48
+	_entry	a1, 16
 #if XCHAL_NUM_AREGS % 12 == 0
 	mov	a8, a8
 #elif XCHAL_NUM_AREGS % 12 == 4
@@ -1844,7 +1912,7 @@
 
 ENTRY(_switch_to)
 
-	entry	a1, 16
+	entry	a1, 48
 
 	mov	a11, a3			# and 'next' (a3)
 
@@ -1864,10 +1932,8 @@
 
 	/* Disable ints while we manipulate the stack pointer. */
 
-	rsil	a14, LOCKLEVEL
-	rsr	a3, excsave1
+	irq_save a14, a3
 	rsync
-	s32i	a3, a3, EXC_TABLE_FIXUP	/* enter critical section */
 
 	/* Switch CPENABLE */
 
@@ -1888,9 +1954,7 @@
 	 */
 
 	rsr	a3, excsave1		# exc_table
-	movi	a6, 0
 	addi	a7, a5, PT_REGS_OFFSET
-	s32i	a6, a3, EXC_TABLE_FIXUP
 	s32i	a7, a3, EXC_TABLE_KSTK
 
 	/* restore context of the task 'next' */
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 3eee94f..4ac3d23 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -28,7 +28,7 @@
 #include <asm/uaccess.h>
 #include <asm/platform.h>
 
-atomic_t irq_err_count;
+DECLARE_PER_CPU(unsigned long, nmi_count);
 
 asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
 {
@@ -57,11 +57,16 @@
 
 int arch_show_interrupts(struct seq_file *p, int prec)
 {
+	unsigned cpu __maybe_unused;
 #ifdef CONFIG_SMP
 	show_ipi_list(p, prec);
 #endif
-	seq_printf(p, "%*s: ", prec, "ERR");
-	seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
+#if XTENSA_FAKE_NMI
+	seq_printf(p, "%*s:", prec, "NMI");
+	for_each_online_cpu(cpu)
+		seq_printf(p, " %10lu", per_cpu(nmi_count, cpu));
+	seq_puts(p, "   Non-maskable interrupts\n");
+#endif
 	return 0;
 }
 
@@ -106,6 +111,12 @@
 		irq_set_chip_and_handler_name(irq, irq_chip,
 				handle_percpu_irq, "timer");
 		irq_clear_status_flags(irq, IRQ_LEVEL);
+#ifdef XCHAL_INTTYPE_MASK_PROFILING
+	} else if (mask & XCHAL_INTTYPE_MASK_PROFILING) {
+		irq_set_chip_and_handler_name(irq, irq_chip,
+				handle_percpu_irq, "profiling");
+		irq_set_status_flags(irq, IRQ_LEVEL);
+#endif
 	} else {/* XCHAL_INTTYPE_MASK_WRITE_ERROR */
 		/* XCHAL_INTTYPE_MASK_NMI */
 		irq_set_chip_and_handler_name(irq, irq_chip,
@@ -166,23 +177,25 @@
 
 	for_each_active_irq(i) {
 		struct irq_data *data = irq_get_irq_data(i);
+		struct cpumask *mask;
 		unsigned int newcpu;
 
 		if (irqd_is_per_cpu(data))
 			continue;
 
-		if (!cpumask_test_cpu(cpu, data->affinity))
+		mask = irq_data_get_affinity_mask(data);
+		if (!cpumask_test_cpu(cpu, mask))
 			continue;
 
-		newcpu = cpumask_any_and(data->affinity, cpu_online_mask);
+		newcpu = cpumask_any_and(mask, cpu_online_mask);
 
 		if (newcpu >= nr_cpu_ids) {
 			pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
 					    i, cpu);
 
-			cpumask_setall(data->affinity);
+			cpumask_setall(mask);
 		}
-		irq_set_affinity(i, data->affinity);
+		irq_set_affinity(i, mask);
 	}
 }
 #endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c
index e8b76b8..fb75ebf 100644
--- a/arch/xtensa/kernel/pci-dma.c
+++ b/arch/xtensa/kernel/pci-dma.c
@@ -1,6 +1,4 @@
 /*
- * arch/xtensa/kernel/pci-dma.c
- *
  * DMA coherent memory allocation.
  *
  * This program is free software; you can redistribute  it and/or modify it
@@ -9,6 +7,7 @@
  * option) any later version.
  *
  * Copyright (C) 2002 - 2005 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
  *
  * Based on version for i386.
  *
@@ -25,13 +24,107 @@
 #include <asm/io.h>
 #include <asm/cacheflush.h>
 
+void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+		    enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_BIDIRECTIONAL:
+		__flush_invalidate_dcache_range((unsigned long)vaddr, size);
+		break;
+
+	case DMA_FROM_DEVICE:
+		__invalidate_dcache_range((unsigned long)vaddr, size);
+		break;
+
+	case DMA_TO_DEVICE:
+		__flush_dcache_range((unsigned long)vaddr, size);
+		break;
+
+	case DMA_NONE:
+		BUG();
+		break;
+	}
+}
+EXPORT_SYMBOL(dma_cache_sync);
+
+static void xtensa_sync_single_for_cpu(struct device *dev,
+				       dma_addr_t dma_handle, size_t size,
+				       enum dma_data_direction dir)
+{
+	void *vaddr;
+
+	switch (dir) {
+	case DMA_BIDIRECTIONAL:
+	case DMA_FROM_DEVICE:
+		vaddr = bus_to_virt(dma_handle);
+		__invalidate_dcache_range((unsigned long)vaddr, size);
+		break;
+
+	case DMA_NONE:
+		BUG();
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void xtensa_sync_single_for_device(struct device *dev,
+					  dma_addr_t dma_handle, size_t size,
+					  enum dma_data_direction dir)
+{
+	void *vaddr;
+
+	switch (dir) {
+	case DMA_BIDIRECTIONAL:
+	case DMA_TO_DEVICE:
+		vaddr = bus_to_virt(dma_handle);
+		__flush_dcache_range((unsigned long)vaddr, size);
+		break;
+
+	case DMA_NONE:
+		BUG();
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void xtensa_sync_sg_for_cpu(struct device *dev,
+				   struct scatterlist *sg, int nents,
+				   enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i) {
+		xtensa_sync_single_for_cpu(dev, sg_dma_address(s),
+					   sg_dma_len(s), dir);
+	}
+}
+
+static void xtensa_sync_sg_for_device(struct device *dev,
+				      struct scatterlist *sg, int nents,
+				      enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i) {
+		xtensa_sync_single_for_device(dev, sg_dma_address(s),
+					      sg_dma_len(s), dir);
+	}
+}
+
 /*
  * Note: We assume that the full memory space is always mapped to 'kseg'
  *	 Otherwise we have to use page attributes (not implemented).
  */
 
-void *
-dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag)
+static void *xtensa_dma_alloc(struct device *dev, size_t size,
+			      dma_addr_t *handle, gfp_t flag,
+			      struct dma_attrs *attrs)
 {
 	unsigned long ret;
 	unsigned long uncached = 0;
@@ -52,20 +145,15 @@
 	BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
 	       ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
 
+	uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
+	*handle = virt_to_bus((void *)ret);
+	__invalidate_dcache_range(ret, size);
 
-	if (ret != 0) {
-		memset((void*) ret, 0, size);
-		uncached = ret+XCHAL_KSEG_BYPASS_VADDR-XCHAL_KSEG_CACHED_VADDR;
-		*handle = virt_to_bus((void*)ret);
-		__flush_invalidate_dcache_range(ret, size);
-	}
-
-	return (void*)uncached;
+	return (void *)uncached;
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
-void dma_free_coherent(struct device *hwdev, size_t size,
-			 void *vaddr, dma_addr_t dma_handle)
+static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr,
+			    dma_addr_t dma_handle, struct dma_attrs *attrs)
 {
 	unsigned long addr = (unsigned long)vaddr +
 		XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
@@ -75,24 +163,79 @@
 
 	free_pages(addr, get_order(size));
 }
-EXPORT_SYMBOL(dma_free_coherent);
 
-
-void consistent_sync(void *vaddr, size_t size, int direction)
+static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
+				  unsigned long offset, size_t size,
+				  enum dma_data_direction dir,
+				  struct dma_attrs *attrs)
 {
-	switch (direction) {
-	case PCI_DMA_NONE:
-		BUG();
-	case PCI_DMA_FROMDEVICE:        /* invalidate only */
-		__invalidate_dcache_range((unsigned long)vaddr,
-				          (unsigned long)size);
-		break;
+	dma_addr_t dma_handle = page_to_phys(page) + offset;
 
-	case PCI_DMA_TODEVICE:          /* writeback only */
-	case PCI_DMA_BIDIRECTIONAL:     /* writeback and invalidate */
-		__flush_invalidate_dcache_range((unsigned long)vaddr,
-				    		(unsigned long)size);
-		break;
+	BUG_ON(PageHighMem(page));
+	xtensa_sync_single_for_device(dev, dma_handle, size, dir);
+	return dma_handle;
+}
+
+static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
+			      size_t size, enum dma_data_direction dir,
+			      struct dma_attrs *attrs)
+{
+	xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
+}
+
+static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
+			 int nents, enum dma_data_direction dir,
+			 struct dma_attrs *attrs)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i) {
+		s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset,
+						 s->length, dir, attrs);
+	}
+	return nents;
+}
+
+static void xtensa_unmap_sg(struct device *dev,
+			    struct scatterlist *sg, int nents,
+			    enum dma_data_direction dir,
+			    struct dma_attrs *attrs)
+{
+	struct scatterlist *s;
+	int i;
+
+	for_each_sg(sg, s, nents, i) {
+		xtensa_unmap_page(dev, sg_dma_address(s),
+				  sg_dma_len(s), dir, attrs);
 	}
 }
-EXPORT_SYMBOL(consistent_sync);
+
+int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+	return 0;
+}
+
+struct dma_map_ops xtensa_dma_map_ops = {
+	.alloc = xtensa_dma_alloc,
+	.free = xtensa_dma_free,
+	.map_page = xtensa_map_page,
+	.unmap_page = xtensa_unmap_page,
+	.map_sg = xtensa_map_sg,
+	.unmap_sg = xtensa_unmap_sg,
+	.sync_single_for_cpu = xtensa_sync_single_for_cpu,
+	.sync_single_for_device = xtensa_sync_single_for_device,
+	.sync_sg_for_cpu = xtensa_sync_sg_for_cpu,
+	.sync_sg_for_device = xtensa_sync_sg_for_device,
+	.mapping_error = xtensa_dma_mapping_error,
+};
+EXPORT_SYMBOL(xtensa_dma_map_ops);
+
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+
+static int __init xtensa_dma_init(void)
+{
+	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+	return 0;
+}
+fs_initcall(xtensa_dma_init);
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index b848cc3..d27b4dc 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -210,10 +210,6 @@
 
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
-	if (bus->parent) {
-		/* This is a subordinate bridge */
-		pci_read_bridge_bases(bus);
-	}
 }
 
 void pcibios_set_master(struct pci_dev *dev)
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
new file mode 100644
index 0000000..54f0118
--- /dev/null
+++ b/arch/xtensa/kernel/perf_event.c
@@ -0,0 +1,454 @@
+/*
+ * Xtensa Performance Monitor Module driver
+ * See Tensilica Debug User's Guide for PMU registers documentation.
+ *
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+#include <asm/processor.h>
+#include <asm/stacktrace.h>
+
+/* Global control/status for all perf counters */
+#define XTENSA_PMU_PMG			0x1000
+/* Perf counter values */
+#define XTENSA_PMU_PM(i)		(0x1080 + (i) * 4)
+/* Perf counter control registers */
+#define XTENSA_PMU_PMCTRL(i)		(0x1100 + (i) * 4)
+/* Perf counter status registers */
+#define XTENSA_PMU_PMSTAT(i)		(0x1180 + (i) * 4)
+
+#define XTENSA_PMU_PMG_PMEN		0x1
+
+#define XTENSA_PMU_COUNTER_MASK		0xffffffffULL
+#define XTENSA_PMU_COUNTER_MAX		0x7fffffff
+
+#define XTENSA_PMU_PMCTRL_INTEN		0x00000001
+#define XTENSA_PMU_PMCTRL_KRNLCNT	0x00000008
+#define XTENSA_PMU_PMCTRL_TRACELEVEL	0x000000f0
+#define XTENSA_PMU_PMCTRL_SELECT_SHIFT	8
+#define XTENSA_PMU_PMCTRL_SELECT	0x00001f00
+#define XTENSA_PMU_PMCTRL_MASK_SHIFT	16
+#define XTENSA_PMU_PMCTRL_MASK		0xffff0000
+
+#define XTENSA_PMU_MASK(select, mask) \
+	(((select) << XTENSA_PMU_PMCTRL_SELECT_SHIFT) | \
+	 ((mask) << XTENSA_PMU_PMCTRL_MASK_SHIFT) | \
+	 XTENSA_PMU_PMCTRL_TRACELEVEL | \
+	 XTENSA_PMU_PMCTRL_INTEN)
+
+#define XTENSA_PMU_PMSTAT_OVFL		0x00000001
+#define XTENSA_PMU_PMSTAT_INTASRT	0x00000010
+
+struct xtensa_pmu_events {
+	/* Array of events currently on this core */
+	struct perf_event *event[XCHAL_NUM_PERF_COUNTERS];
+	/* Bitmap of used hardware counters */
+	unsigned long used_mask[BITS_TO_LONGS(XCHAL_NUM_PERF_COUNTERS)];
+};
+static DEFINE_PER_CPU(struct xtensa_pmu_events, xtensa_pmu_events);
+
+static const u32 xtensa_hw_ctl[] = {
+	[PERF_COUNT_HW_CPU_CYCLES]		= XTENSA_PMU_MASK(0, 0x1),
+	[PERF_COUNT_HW_INSTRUCTIONS]		= XTENSA_PMU_MASK(2, 0xffff),
+	[PERF_COUNT_HW_CACHE_REFERENCES]	= XTENSA_PMU_MASK(10, 0x1),
+	[PERF_COUNT_HW_CACHE_MISSES]		= XTENSA_PMU_MASK(12, 0x1),
+	/* Taken and non-taken branches + taken loop ends */
+	[PERF_COUNT_HW_BRANCH_INSTRUCTIONS]	= XTENSA_PMU_MASK(2, 0x490),
+	/* Instruction-related + other global stall cycles */
+	[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND]	= XTENSA_PMU_MASK(4, 0x1ff),
+	/* Data-related global stall cycles */
+	[PERF_COUNT_HW_STALLED_CYCLES_BACKEND]	= XTENSA_PMU_MASK(3, 0x1ff),
+};
+
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+
+static const u32 xtensa_cache_ctl[][C(OP_MAX)][C(RESULT_MAX)] = {
+	[C(L1D)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= XTENSA_PMU_MASK(10, 0x1),
+			[C(RESULT_MISS)]	= XTENSA_PMU_MASK(10, 0x2),
+		},
+		[C(OP_WRITE)] = {
+			[C(RESULT_ACCESS)]	= XTENSA_PMU_MASK(11, 0x1),
+			[C(RESULT_MISS)]	= XTENSA_PMU_MASK(11, 0x2),
+		},
+	},
+	[C(L1I)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= XTENSA_PMU_MASK(8, 0x1),
+			[C(RESULT_MISS)]	= XTENSA_PMU_MASK(8, 0x2),
+		},
+	},
+	[C(DTLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= XTENSA_PMU_MASK(9, 0x1),
+			[C(RESULT_MISS)]	= XTENSA_PMU_MASK(9, 0x8),
+		},
+	},
+	[C(ITLB)] = {
+		[C(OP_READ)] = {
+			[C(RESULT_ACCESS)]	= XTENSA_PMU_MASK(7, 0x1),
+			[C(RESULT_MISS)]	= XTENSA_PMU_MASK(7, 0x8),
+		},
+	},
+};
+
+static int xtensa_pmu_cache_event(u64 config)
+{
+	unsigned int cache_type, cache_op, cache_result;
+	int ret;
+
+	cache_type = (config >>  0) & 0xff;
+	cache_op = (config >>  8) & 0xff;
+	cache_result = (config >> 16) & 0xff;
+
+	if (cache_type >= ARRAY_SIZE(xtensa_cache_ctl) ||
+	    cache_op >= C(OP_MAX) ||
+	    cache_result >= C(RESULT_MAX))
+		return -EINVAL;
+
+	ret = xtensa_cache_ctl[cache_type][cache_op][cache_result];
+
+	if (ret == 0)
+		return -EINVAL;
+
+	return ret;
+}
+
+static inline uint32_t xtensa_pmu_read_counter(int idx)
+{
+	return get_er(XTENSA_PMU_PM(idx));
+}
+
+static inline void xtensa_pmu_write_counter(int idx, uint32_t v)
+{
+	set_er(v, XTENSA_PMU_PM(idx));
+}
+
+static void xtensa_perf_event_update(struct perf_event *event,
+				     struct hw_perf_event *hwc, int idx)
+{
+	uint64_t prev_raw_count, new_raw_count;
+	int64_t delta;
+
+	do {
+		prev_raw_count = local64_read(&hwc->prev_count);
+		new_raw_count = xtensa_pmu_read_counter(event->hw.idx);
+	} while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+				 new_raw_count) != prev_raw_count);
+
+	delta = (new_raw_count - prev_raw_count) & XTENSA_PMU_COUNTER_MASK;
+
+	local64_add(delta, &event->count);
+	local64_sub(delta, &hwc->period_left);
+}
+
+static bool xtensa_perf_event_set_period(struct perf_event *event,
+					 struct hw_perf_event *hwc, int idx)
+{
+	bool rc = false;
+	s64 left;
+
+	if (!is_sampling_event(event)) {
+		left = XTENSA_PMU_COUNTER_MAX;
+	} else {
+		s64 period = hwc->sample_period;
+
+		left = local64_read(&hwc->period_left);
+		if (left <= -period) {
+			left = period;
+			local64_set(&hwc->period_left, left);
+			hwc->last_period = period;
+			rc = true;
+		} else if (left <= 0) {
+			left += period;
+			local64_set(&hwc->period_left, left);
+			hwc->last_period = period;
+			rc = true;
+		}
+		if (left > XTENSA_PMU_COUNTER_MAX)
+			left = XTENSA_PMU_COUNTER_MAX;
+	}
+
+	local64_set(&hwc->prev_count, -left);
+	xtensa_pmu_write_counter(idx, -left);
+	perf_event_update_userpage(event);
+
+	return rc;
+}
+
+static void xtensa_pmu_enable(struct pmu *pmu)
+{
+	set_er(get_er(XTENSA_PMU_PMG) | XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG);
+}
+
+static void xtensa_pmu_disable(struct pmu *pmu)
+{
+	set_er(get_er(XTENSA_PMU_PMG) & ~XTENSA_PMU_PMG_PMEN, XTENSA_PMU_PMG);
+}
+
+static int xtensa_pmu_event_init(struct perf_event *event)
+{
+	int ret;
+
+	switch (event->attr.type) {
+	case PERF_TYPE_HARDWARE:
+		if (event->attr.config >= ARRAY_SIZE(xtensa_hw_ctl) ||
+		    xtensa_hw_ctl[event->attr.config] == 0)
+			return -EINVAL;
+		event->hw.config = xtensa_hw_ctl[event->attr.config];
+		return 0;
+
+	case PERF_TYPE_HW_CACHE:
+		ret = xtensa_pmu_cache_event(event->attr.config);
+		if (ret < 0)
+			return ret;
+		event->hw.config = ret;
+		return 0;
+
+	case PERF_TYPE_RAW:
+		/* Not 'previous counter' select */
+		if ((event->attr.config & XTENSA_PMU_PMCTRL_SELECT) ==
+		    (1 << XTENSA_PMU_PMCTRL_SELECT_SHIFT))
+			return -EINVAL;
+		event->hw.config = (event->attr.config &
+				    (XTENSA_PMU_PMCTRL_KRNLCNT |
+				     XTENSA_PMU_PMCTRL_TRACELEVEL |
+				     XTENSA_PMU_PMCTRL_SELECT |
+				     XTENSA_PMU_PMCTRL_MASK)) |
+			XTENSA_PMU_PMCTRL_INTEN;
+		return 0;
+
+	default:
+		return -ENOENT;
+	}
+}
+
+/*
+ * Starts/Stops a counter present on the PMU. The PMI handler
+ * should stop the counter when perf_event_overflow() returns
+ * !0. ->start() will be used to continue.
+ */
+static void xtensa_pmu_start(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	if (WARN_ON_ONCE(idx == -1))
+		return;
+
+	if (flags & PERF_EF_RELOAD) {
+		WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+		xtensa_perf_event_set_period(event, hwc, idx);
+	}
+
+	hwc->state = 0;
+
+	set_er(hwc->config, XTENSA_PMU_PMCTRL(idx));
+}
+
+static void xtensa_pmu_stop(struct perf_event *event, int flags)
+{
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	if (!(hwc->state & PERF_HES_STOPPED)) {
+		set_er(0, XTENSA_PMU_PMCTRL(idx));
+		set_er(get_er(XTENSA_PMU_PMSTAT(idx)),
+		       XTENSA_PMU_PMSTAT(idx));
+		hwc->state |= PERF_HES_STOPPED;
+	}
+
+	if ((flags & PERF_EF_UPDATE) &&
+	    !(event->hw.state & PERF_HES_UPTODATE)) {
+		xtensa_perf_event_update(event, &event->hw, idx);
+		event->hw.state |= PERF_HES_UPTODATE;
+	}
+}
+
+/*
+ * Adds/Removes a counter to/from the PMU, can be done inside
+ * a transaction, see the ->*_txn() methods.
+ */
+static int xtensa_pmu_add(struct perf_event *event, int flags)
+{
+	struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	if (__test_and_set_bit(idx, ev->used_mask)) {
+		idx = find_first_zero_bit(ev->used_mask,
+					  XCHAL_NUM_PERF_COUNTERS);
+		if (idx == XCHAL_NUM_PERF_COUNTERS)
+			return -EAGAIN;
+
+		__set_bit(idx, ev->used_mask);
+		hwc->idx = idx;
+	}
+	ev->event[idx] = event;
+
+	hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+	if (flags & PERF_EF_START)
+		xtensa_pmu_start(event, PERF_EF_RELOAD);
+
+	perf_event_update_userpage(event);
+	return 0;
+}
+
+static void xtensa_pmu_del(struct perf_event *event, int flags)
+{
+	struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
+
+	xtensa_pmu_stop(event, PERF_EF_UPDATE);
+	__clear_bit(event->hw.idx, ev->used_mask);
+	perf_event_update_userpage(event);
+}
+
+static void xtensa_pmu_read(struct perf_event *event)
+{
+	xtensa_perf_event_update(event, &event->hw, event->hw.idx);
+}
+
+static int callchain_trace(struct stackframe *frame, void *data)
+{
+	struct perf_callchain_entry *entry = data;
+
+	perf_callchain_store(entry, frame->pc);
+	return 0;
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+			   struct pt_regs *regs)
+{
+	xtensa_backtrace_kernel(regs, PERF_MAX_STACK_DEPTH,
+				callchain_trace, NULL, entry);
+}
+
+void perf_callchain_user(struct perf_callchain_entry *entry,
+			 struct pt_regs *regs)
+{
+	xtensa_backtrace_user(regs, PERF_MAX_STACK_DEPTH,
+			      callchain_trace, entry);
+}
+
+void perf_event_print_debug(void)
+{
+	unsigned long flags;
+	unsigned i;
+
+	local_irq_save(flags);
+	pr_info("CPU#%d: PMG: 0x%08lx\n", smp_processor_id(),
+		get_er(XTENSA_PMU_PMG));
+	for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i)
+		pr_info("PM%d: 0x%08lx, PMCTRL%d: 0x%08lx, PMSTAT%d: 0x%08lx\n",
+			i, get_er(XTENSA_PMU_PM(i)),
+			i, get_er(XTENSA_PMU_PMCTRL(i)),
+			i, get_er(XTENSA_PMU_PMSTAT(i)));
+	local_irq_restore(flags);
+}
+
+irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id)
+{
+	irqreturn_t rc = IRQ_NONE;
+	struct xtensa_pmu_events *ev = this_cpu_ptr(&xtensa_pmu_events);
+	unsigned i;
+
+	for (i = find_first_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS);
+	     i < XCHAL_NUM_PERF_COUNTERS;
+	     i = find_next_bit(ev->used_mask, XCHAL_NUM_PERF_COUNTERS, i + 1)) {
+		uint32_t v = get_er(XTENSA_PMU_PMSTAT(i));
+		struct perf_event *event = ev->event[i];
+		struct hw_perf_event *hwc = &event->hw;
+		u64 last_period;
+
+		if (!(v & XTENSA_PMU_PMSTAT_OVFL))
+			continue;
+
+		set_er(v, XTENSA_PMU_PMSTAT(i));
+		xtensa_perf_event_update(event, hwc, i);
+		last_period = hwc->last_period;
+		if (xtensa_perf_event_set_period(event, hwc, i)) {
+			struct perf_sample_data data;
+			struct pt_regs *regs = get_irq_regs();
+
+			perf_sample_data_init(&data, 0, last_period);
+			if (perf_event_overflow(event, &data, regs))
+				xtensa_pmu_stop(event, 0);
+		}
+
+		rc = IRQ_HANDLED;
+	}
+	return rc;
+}
+
+static struct pmu xtensa_pmu = {
+	.pmu_enable = xtensa_pmu_enable,
+	.pmu_disable = xtensa_pmu_disable,
+	.event_init = xtensa_pmu_event_init,
+	.add = xtensa_pmu_add,
+	.del = xtensa_pmu_del,
+	.start = xtensa_pmu_start,
+	.stop = xtensa_pmu_stop,
+	.read = xtensa_pmu_read,
+};
+
+static void xtensa_pmu_setup(void)
+{
+	unsigned i;
+
+	set_er(0, XTENSA_PMU_PMG);
+	for (i = 0; i < XCHAL_NUM_PERF_COUNTERS; ++i) {
+		set_er(0, XTENSA_PMU_PMCTRL(i));
+		set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i));
+	}
+}
+
+static int xtensa_pmu_notifier(struct notifier_block *self,
+			       unsigned long action, void *data)
+{
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_STARTING:
+		xtensa_pmu_setup();
+		break;
+
+	default:
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int __init xtensa_pmu_init(void)
+{
+	int ret;
+	int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
+
+	perf_cpu_notifier(xtensa_pmu_notifier);
+#if XTENSA_FAKE_NMI
+	enable_irq(irq);
+#else
+	ret = request_irq(irq, xtensa_pmu_irq_handler, IRQF_PERCPU,
+			  "pmu", NULL);
+	if (ret < 0)
+		return ret;
+#endif
+
+	ret = perf_pmu_register(&xtensa_pmu, "cpu", PERF_TYPE_RAW);
+	if (ret)
+		free_irq(irq, NULL);
+
+	return ret;
+}
+early_initcall(xtensa_pmu_init);
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
index 7d2c317..7538d80 100644
--- a/arch/xtensa/kernel/stacktrace.c
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -1,11 +1,12 @@
 /*
- * arch/xtensa/kernel/stacktrace.c
+ * Kernel and userspace stack tracing.
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
  * Copyright (C) 2001 - 2013 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
  */
 #include <linux/export.h>
 #include <linux/sched.h>
@@ -13,6 +14,170 @@
 
 #include <asm/stacktrace.h>
 #include <asm/traps.h>
+#include <asm/uaccess.h>
+
+#if IS_ENABLED(CONFIG_OPROFILE) || IS_ENABLED(CONFIG_PERF_EVENTS)
+
+/* Address of common_exception_return, used to check the
+ * transition from kernel to user space.
+ */
+extern int common_exception_return;
+
+/* A struct that maps to the part of the frame containing the a0 and
+ * a1 registers.
+ */
+struct frame_start {
+	unsigned long a0;
+	unsigned long a1;
+};
+
+void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth,
+			   int (*ufn)(struct stackframe *frame, void *data),
+			   void *data)
+{
+	unsigned long windowstart = regs->windowstart;
+	unsigned long windowbase = regs->windowbase;
+	unsigned long a0 = regs->areg[0];
+	unsigned long a1 = regs->areg[1];
+	unsigned long pc = regs->pc;
+	struct stackframe frame;
+	int index;
+
+	if (!depth--)
+		return;
+
+	frame.pc = pc;
+	frame.sp = a1;
+
+	if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
+		return;
+
+	/* Two steps:
+	 *
+	 * 1. Look through the register window for the
+	 * previous PCs in the call trace.
+	 *
+	 * 2. Look on the stack.
+	 */
+
+	/* Step 1.  */
+	/* Rotate WINDOWSTART to move the bit corresponding to
+	 * the current window to the bit #0.
+	 */
+	windowstart = (windowstart << WSBITS | windowstart) >> windowbase;
+
+	/* Look for bits that are set, they correspond to
+	 * valid windows.
+	 */
+	for (index = WSBITS - 1; (index > 0) && depth; depth--, index--)
+		if (windowstart & (1 << index)) {
+			/* Get the PC from a0 and a1. */
+			pc = MAKE_PC_FROM_RA(a0, pc);
+			/* Read a0 and a1 from the
+			 * corresponding position in AREGs.
+			 */
+			a0 = regs->areg[index * 4];
+			a1 = regs->areg[index * 4 + 1];
+
+			frame.pc = pc;
+			frame.sp = a1;
+
+			if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
+				return;
+		}
+
+	/* Step 2. */
+	/* We are done with the register window, we need to
+	 * look through the stack.
+	 */
+	if (!depth)
+		return;
+
+	/* Start from the a1 register. */
+	/* a1 = regs->areg[1]; */
+	while (a0 != 0 && depth--) {
+		struct frame_start frame_start;
+		/* Get the location for a1, a0 for the
+		 * previous frame from the current a1.
+		 */
+		unsigned long *psp = (unsigned long *)a1;
+
+		psp -= 4;
+
+		/* Check if the region is OK to access. */
+		if (!access_ok(VERIFY_READ, psp, sizeof(frame_start)))
+			return;
+		/* Copy a1, a0 from user space stack frame. */
+		if (__copy_from_user_inatomic(&frame_start, psp,
+					      sizeof(frame_start)))
+			return;
+
+		pc = MAKE_PC_FROM_RA(a0, pc);
+		a0 = frame_start.a0;
+		a1 = frame_start.a1;
+
+		frame.pc = pc;
+		frame.sp = a1;
+
+		if (pc == 0 || pc >= TASK_SIZE || ufn(&frame, data))
+			return;
+	}
+}
+EXPORT_SYMBOL(xtensa_backtrace_user);
+
+void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth,
+			     int (*kfn)(struct stackframe *frame, void *data),
+			     int (*ufn)(struct stackframe *frame, void *data),
+			     void *data)
+{
+	unsigned long pc = regs->depc > VALID_DOUBLE_EXCEPTION_ADDRESS ?
+		regs->depc : regs->pc;
+	unsigned long sp_start, sp_end;
+	unsigned long a0 = regs->areg[0];
+	unsigned long a1 = regs->areg[1];
+
+	sp_start = a1 & ~(THREAD_SIZE - 1);
+	sp_end = sp_start + THREAD_SIZE;
+
+	/* Spill the register window to the stack first. */
+	spill_registers();
+
+	/* Read the stack frames one by one and create the PC
+	 * from the a0 and a1 registers saved there.
+	 */
+	while (a1 > sp_start && a1 < sp_end && depth--) {
+		struct stackframe frame;
+		unsigned long *psp = (unsigned long *)a1;
+
+		frame.pc = pc;
+		frame.sp = a1;
+
+		if (kernel_text_address(pc) && kfn(&frame, data))
+			return;
+
+		if (pc == (unsigned long)&common_exception_return) {
+			regs = (struct pt_regs *)a1;
+			if (user_mode(regs)) {
+				if (ufn == NULL)
+					return;
+				xtensa_backtrace_user(regs, depth, ufn, data);
+				return;
+			}
+			a0 = regs->areg[0];
+			a1 = regs->areg[1];
+			continue;
+		}
+
+		sp_start = a1;
+
+		pc = MAKE_PC_FROM_RA(a0, pc);
+		a0 = *(psp - 4);
+		a1 = *(psp - 3);
+	}
+}
+EXPORT_SYMBOL(xtensa_backtrace_kernel);
+
+#endif
 
 void walk_stackframe(unsigned long *sp,
 		int (*fn)(struct stackframe *frame, void *data),
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 2a1823d..b97767d 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -52,8 +52,6 @@
 
 static int ccount_timer_set_next_event(unsigned long delta,
 		struct clock_event_device *dev);
-static void ccount_timer_set_mode(enum clock_event_mode mode,
-		struct clock_event_device *evt);
 struct ccount_timer {
 	struct clock_event_device evt;
 	int irq_enabled;
@@ -77,35 +75,34 @@
 	return ret;
 }
 
-static void ccount_timer_set_mode(enum clock_event_mode mode,
-		struct clock_event_device *evt)
+/*
+ * There is no way to disable the timer interrupt at the device level,
+ * only at the intenable register itself. Since enable_irq/disable_irq
+ * calls are nested, we need to make sure that these calls are
+ * balanced.
+ */
+static int ccount_timer_shutdown(struct clock_event_device *evt)
 {
 	struct ccount_timer *timer =
 		container_of(evt, struct ccount_timer, evt);
 
-	/*
-	 * There is no way to disable the timer interrupt at the device level,
-	 * only at the intenable register itself. Since enable_irq/disable_irq
-	 * calls are nested, we need to make sure that these calls are
-	 * balanced.
-	 */
-	switch (mode) {
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		if (timer->irq_enabled) {
-			disable_irq(evt->irq);
-			timer->irq_enabled = 0;
-		}
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-	case CLOCK_EVT_MODE_ONESHOT:
-		if (!timer->irq_enabled) {
-			enable_irq(evt->irq);
-			timer->irq_enabled = 1;
-		}
-	default:
-		break;
+	if (timer->irq_enabled) {
+		disable_irq(evt->irq);
+		timer->irq_enabled = 0;
 	}
+	return 0;
+}
+
+static int ccount_timer_set_oneshot(struct clock_event_device *evt)
+{
+	struct ccount_timer *timer =
+		container_of(evt, struct ccount_timer, evt);
+
+	if (!timer->irq_enabled) {
+		enable_irq(evt->irq);
+		timer->irq_enabled = 1;
+	}
+	return 0;
 }
 
 static irqreturn_t timer_interrupt(int irq, void *dev_id);
@@ -126,7 +123,9 @@
 	clockevent->features = CLOCK_EVT_FEAT_ONESHOT;
 	clockevent->rating = 300;
 	clockevent->set_next_event = ccount_timer_set_next_event;
-	clockevent->set_mode = ccount_timer_set_mode;
+	clockevent->set_state_shutdown = ccount_timer_shutdown;
+	clockevent->set_state_oneshot = ccount_timer_set_oneshot;
+	clockevent->tick_resume = ccount_timer_set_oneshot;
 	clockevent->cpumask = cpumask_of(cpu);
 	clockevent->irq = irq_create_mapping(NULL, LINUX_TIMER_INT);
 	if (WARN(!clockevent->irq, "error: can't map timer irq"))
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 9d2f45f..42d441f 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -62,6 +62,7 @@
 
 extern void do_illegal_instruction (struct pt_regs*);
 extern void do_interrupt (struct pt_regs*);
+extern void do_nmi(struct pt_regs *);
 extern void do_unaligned_user (struct pt_regs*);
 extern void do_multihit (struct pt_regs*, unsigned long);
 extern void do_page_fault (struct pt_regs*, unsigned long);
@@ -146,6 +147,9 @@
 #if XTENSA_HAVE_COPROCESSOR(7)
 COPROCESSOR(7),
 #endif
+#if XTENSA_FAKE_NMI
+{ EXCCAUSE_MAPPED_NMI,			0,		do_nmi },
+#endif
 { EXCCAUSE_MAPPED_DEBUG,		0,		do_debug },
 { -1, -1, 0 }
 
@@ -199,6 +203,28 @@
 
 extern void do_IRQ(int, struct pt_regs *);
 
+#if XTENSA_FAKE_NMI
+
+irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
+
+DEFINE_PER_CPU(unsigned long, nmi_count);
+
+void do_nmi(struct pt_regs *regs)
+{
+	struct pt_regs *old_regs;
+
+	if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL)
+		trace_hardirqs_off();
+
+	old_regs = set_irq_regs(regs);
+	nmi_enter();
+	++*this_cpu_ptr(&nmi_count);
+	xtensa_pmu_irq_handler(0, NULL);
+	nmi_exit();
+	set_irq_regs(old_regs);
+}
+#endif
+
 void do_interrupt(struct pt_regs *regs)
 {
 	static const unsigned int_level_mask[] = {
@@ -211,8 +237,11 @@
 		XCHAL_INTLEVEL6_MASK,
 		XCHAL_INTLEVEL7_MASK,
 	};
-	struct pt_regs *old_regs = set_irq_regs(regs);
+	struct pt_regs *old_regs;
 
+	trace_hardirqs_off();
+
+	old_regs = set_irq_regs(regs);
 	irq_enter();
 
 	for (;;) {
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index 1b397a9..abcdb52 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -627,7 +627,11 @@
 	wsr	a0, excsave2
 	rsr	a0, epc\level
 	wsr	a0, epc1
+	.if	\level <= LOCKLEVEL
 	movi	a0, EXCCAUSE_LEVEL1_INTERRUPT
+	.else
+	movi	a0, EXCCAUSE_MAPPED_NMI
+	.endif
 	wsr	a0, exccause
 	rsr	a0, eps\level
 					# branch to user or kernel vector
@@ -682,11 +686,13 @@
 	.align 4
 _SimulateUserKernelVectorException:
 	addi	a0, a0, (1 << PS_EXCM_BIT)
+#if !XTENSA_FAKE_NMI
 	wsr	a0, ps
+#endif
 	bbsi.l	a0, PS_UM_BIT, 1f	# branch if user mode
-	rsr	a0, excsave2		# restore a0
+	xsr	a0, excsave2		# restore a0
 	j	_KernelExceptionVector	# simulate kernel vector exception
-1:	rsr	a0, excsave2		# restore a0
+1:	xsr	a0, excsave2		# restore a0
 	j	_UserExceptionVector	# simulate user vector exception
 #endif
 
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 83a44a3..c9784c1 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -15,6 +15,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/hardirq.h>
+#include <linux/perf_event.h>
 #include <linux/uaccess.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
@@ -142,6 +143,12 @@
 	}
 
 	up_read(&mm->mmap_sem);
+	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+	if (flags & VM_FAULT_MAJOR)
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
+	else if (flags & VM_FAULT_MINOR)
+		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
+
 	return;
 
 	/* Something tried to access memory that isn't in our memory map..
diff --git a/arch/xtensa/oprofile/backtrace.c b/arch/xtensa/oprofile/backtrace.c
index 5f03a59..8f95203 100644
--- a/arch/xtensa/oprofile/backtrace.c
+++ b/arch/xtensa/oprofile/backtrace.c
@@ -2,168 +2,26 @@
  * @file backtrace.c
  *
  * @remark Copyright 2008 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
  * @remark Read the file COPYING
  *
  */
 
 #include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
 #include <asm/ptrace.h>
-#include <asm/uaccess.h>
-#include <asm/traps.h>
+#include <asm/stacktrace.h>
 
-/* Address of common_exception_return, used to check the
- * transition from kernel to user space.
- */
-extern int common_exception_return;
-
-/* A struct that maps to the part of the frame containing the a0 and
- * a1 registers.
- */
-struct frame_start {
-	unsigned long a0;
-	unsigned long a1;
-};
-
-static void xtensa_backtrace_user(struct pt_regs *regs, unsigned int depth)
+static int xtensa_backtrace_cb(struct stackframe *frame, void *data)
 {
-	unsigned long windowstart = regs->windowstart;
-	unsigned long windowbase = regs->windowbase;
-	unsigned long a0 = regs->areg[0];
-	unsigned long a1 = regs->areg[1];
-	unsigned long pc = MAKE_PC_FROM_RA(a0, regs->pc);
-	int index;
-
-	/* First add the current PC to the trace. */
-	if (pc != 0 && pc <= TASK_SIZE)
-		oprofile_add_trace(pc);
-	else
-		return;
-
-	/* Two steps:
-	 *
-	 * 1. Look through the register window for the
-	 * previous PCs in the call trace.
-	 *
-	 * 2. Look on the stack.
-	 */
-
-	/* Step 1.  */
-	/* Rotate WINDOWSTART to move the bit corresponding to
-	 * the current window to the bit #0.
-	 */
-	windowstart = (windowstart << WSBITS | windowstart) >> windowbase;
-
-	/* Look for bits that are set, they correspond to
-	 * valid windows.
-	 */
-	for (index = WSBITS - 1; (index > 0) && depth; depth--, index--)
-		if (windowstart & (1 << index)) {
-			/* Read a0 and a1 from the
-			 * corresponding position in AREGs.
-			 */
-			a0 = regs->areg[index * 4];
-			a1 = regs->areg[index * 4 + 1];
-			/* Get the PC from a0 and a1. */
-			pc = MAKE_PC_FROM_RA(a0, pc);
-
-			/* Add the PC to the trace. */
-			if (pc != 0 && pc <= TASK_SIZE)
-				oprofile_add_trace(pc);
-			else
-				return;
-		}
-
-	/* Step 2. */
-	/* We are done with the register window, we need to
-	 * look through the stack.
-	 */
-	if (depth > 0) {
-		/* Start from the a1 register. */
-		/* a1 = regs->areg[1]; */
-		while (a0 != 0 && depth--) {
-
-			struct frame_start frame_start;
-			/* Get the location for a1, a0 for the
-			 * previous frame from the current a1.
-			 */
-			unsigned long *psp = (unsigned long *)a1;
-			psp -= 4;
-
-			/* Check if the region is OK to access. */
-			if (!access_ok(VERIFY_READ, psp, sizeof(frame_start)))
-				return;
-			/* Copy a1, a0 from user space stack frame. */
-			if (__copy_from_user_inatomic(&frame_start, psp,
-						sizeof(frame_start)))
-				return;
-
-			a0 = frame_start.a0;
-			a1 = frame_start.a1;
-			pc = MAKE_PC_FROM_RA(a0, pc);
-
-			if (pc != 0 && pc <= TASK_SIZE)
-				oprofile_add_trace(pc);
-			else
-				return;
-		}
-	}
-}
-
-static void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth)
-{
-	unsigned long pc = regs->pc;
-	unsigned long *psp;
-	unsigned long sp_start, sp_end;
-	unsigned long a0 = regs->areg[0];
-	unsigned long a1 = regs->areg[1];
-
-	sp_start = a1 & ~(THREAD_SIZE-1);
-	sp_end = sp_start + THREAD_SIZE;
-
-	/* Spill the register window to the stack first. */
-	spill_registers();
-
-	/* Read the stack frames one by one and create the PC
-	 * from the a0 and a1 registers saved there.
-	 */
-	while (a1 > sp_start && a1 < sp_end && depth--) {
-		pc = MAKE_PC_FROM_RA(a0, pc);
-
-		/* Add the PC to the trace. */
-		oprofile_add_trace(pc);
-		if (pc == (unsigned long) &common_exception_return) {
-			regs = (struct pt_regs *)a1;
-			if (user_mode(regs)) {
-				pc = regs->pc;
-				if (pc != 0 && pc <= TASK_SIZE)
-					oprofile_add_trace(pc);
-				else
-					return;
-				return xtensa_backtrace_user(regs, depth);
-			}
-			a0 = regs->areg[0];
-			a1 = regs->areg[1];
-			continue;
-		}
-
-		psp = (unsigned long *)a1;
-
-		a0 = *(psp - 4);
-		a1 = *(psp - 3);
-
-		if (a1 <= (unsigned long)psp)
-			return;
-
-	}
-	return;
+	oprofile_add_trace(frame->pc);
+	return 0;
 }
 
 void xtensa_backtrace(struct pt_regs * const regs, unsigned int depth)
 {
 	if (user_mode(regs))
-		xtensa_backtrace_user(regs, depth);
+		xtensa_backtrace_user(regs, depth, xtensa_backtrace_cb, NULL);
 	else
-		xtensa_backtrace_kernel(regs, depth);
+		xtensa_backtrace_kernel(regs, depth, xtensa_backtrace_cb,
+					xtensa_backtrace_cb, NULL);
 }
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 8ab021b..976a385 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -105,13 +105,17 @@
 
 	va_start(ap, str);
 	while ((arg = va_arg(ap, char**)) != NULL) {
-		if (*str == '\0')
+		if (*str == '\0') {
+			va_end(ap);
 			return NULL;
+		}
 		end = strchr(str, ',');
 		if (end != str)
 			*arg = str;
-		if (end == NULL)
+		if (end == NULL) {
+			va_end(ap);
 			return NULL;
+		}
 		*end++ = '\0';
 		str = end;
 	}
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 48eebac..fa84ca9 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -101,8 +101,9 @@
 	spin_unlock(&dev->lock);
 }
 
-static int simdisk_xfer_bio(struct simdisk *dev, struct bio *bio)
+static void simdisk_make_request(struct request_queue *q, struct bio *bio)
 {
+	struct simdisk *dev = q->queuedata;
 	struct bio_vec bvec;
 	struct bvec_iter iter;
 	sector_t sector = bio->bi_iter.bi_sector;
@@ -116,17 +117,10 @@
 		sector += len;
 		__bio_kunmap_atomic(buffer);
 	}
-	return 0;
-}
 
-static void simdisk_make_request(struct request_queue *q, struct bio *bio)
-{
-	struct simdisk *dev = q->queuedata;
-	int status = simdisk_xfer_bio(dev, bio);
-	bio_endio(bio, status);
+	bio_endio(bio);
 }
 
-
 static int simdisk_open(struct block_device *bdev, fmode_t mode)
 {
 	struct simdisk *dev = bdev->bd_disk->private_data;
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 719b715..4aecca7 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -355,13 +355,12 @@
 		container_of(work, struct bio_integrity_payload, bip_work);
 	struct bio *bio = bip->bip_bio;
 	struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
-	int error;
 
-	error = bio_integrity_process(bio, bi->verify_fn);
+	bio->bi_error = bio_integrity_process(bio, bi->verify_fn);
 
 	/* Restore original bio completion handler */
 	bio->bi_end_io = bip->bip_end_io;
-	bio_endio(bio, error);
+	bio_endio(bio);
 }
 
 /**
@@ -376,7 +375,7 @@
  * in process context.	This function postpones completion
  * accordingly.
  */
-void bio_integrity_endio(struct bio *bio, int error)
+void bio_integrity_endio(struct bio *bio)
 {
 	struct bio_integrity_payload *bip = bio_integrity(bio);
 
@@ -386,9 +385,9 @@
 	 * integrity metadata.  Restore original bio end_io handler
 	 * and run it.
 	 */
-	if (error) {
+	if (bio->bi_error) {
 		bio->bi_end_io = bip->bip_end_io;
-		bio_endio(bio, error);
+		bio_endio(bio);
 
 		return;
 	}
diff --git a/block/bio.c b/block/bio.c
index d6e5ba3..515b543 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -269,7 +269,6 @@
 void bio_init(struct bio *bio)
 {
 	memset(bio, 0, sizeof(*bio));
-	bio->bi_flags = 1 << BIO_UPTODATE;
 	atomic_set(&bio->__bi_remaining, 1);
 	atomic_set(&bio->__bi_cnt, 1);
 }
@@ -292,14 +291,17 @@
 	__bio_free(bio);
 
 	memset(bio, 0, BIO_RESET_BYTES);
-	bio->bi_flags = flags | (1 << BIO_UPTODATE);
+	bio->bi_flags = flags;
 	atomic_set(&bio->__bi_remaining, 1);
 }
 EXPORT_SYMBOL(bio_reset);
 
-static void bio_chain_endio(struct bio *bio, int error)
+static void bio_chain_endio(struct bio *bio)
 {
-	bio_endio(bio->bi_private, error);
+	struct bio *parent = bio->bi_private;
+
+	parent->bi_error = bio->bi_error;
+	bio_endio(parent);
 	bio_put(bio);
 }
 
@@ -309,7 +311,7 @@
  */
 static inline void bio_inc_remaining(struct bio *bio)
 {
-	bio->bi_flags |= (1 << BIO_CHAIN);
+	bio_set_flag(bio, BIO_CHAIN);
 	smp_mb__before_atomic();
 	atomic_inc(&bio->__bi_remaining);
 }
@@ -493,7 +495,7 @@
 		if (unlikely(!bvl))
 			goto err_free;
 
-		bio->bi_flags |= 1 << BIO_OWNS_VEC;
+		bio_set_flag(bio, BIO_OWNS_VEC);
 	} else if (nr_iovecs) {
 		bvl = bio->bi_inline_vecs;
 	}
@@ -578,7 +580,7 @@
 	 * so we don't set nor calculate new physical/hw segment counts here
 	 */
 	bio->bi_bdev = bio_src->bi_bdev;
-	bio->bi_flags |= 1 << BIO_CLONED;
+	bio_set_flag(bio, BIO_CLONED);
 	bio->bi_rw = bio_src->bi_rw;
 	bio->bi_iter = bio_src->bi_iter;
 	bio->bi_io_vec = bio_src->bi_io_vec;
@@ -692,31 +694,22 @@
 EXPORT_SYMBOL(bio_clone_bioset);
 
 /**
- *	bio_get_nr_vecs		- return approx number of vecs
- *	@bdev:  I/O target
+ *	bio_add_pc_page	-	attempt to add page to bio
+ *	@q: the target queue
+ *	@bio: destination bio
+ *	@page: page to add
+ *	@len: vec entry length
+ *	@offset: vec entry offset
  *
- *	Return the approximate number of pages we can send to this target.
- *	There's no guarantee that you will be able to fit this number of pages
- *	into a bio, it does not account for dynamic restrictions that vary
- *	on offset.
+ *	Attempt to add a page to the bio_vec maplist. This can fail for a
+ *	number of reasons, such as the bio being full or target block device
+ *	limitations. The target block device must allow bio's up to PAGE_SIZE,
+ *	so it is always possible to add a single page to an empty bio.
+ *
+ *	This should only be used by REQ_PC bios.
  */
-int bio_get_nr_vecs(struct block_device *bdev)
-{
-	struct request_queue *q = bdev_get_queue(bdev);
-	int nr_pages;
-
-	nr_pages = min_t(unsigned,
-		     queue_max_segments(q),
-		     queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
-
-	return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
-
-}
-EXPORT_SYMBOL(bio_get_nr_vecs);
-
-static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
-			  *page, unsigned int len, unsigned int offset,
-			  unsigned int max_sectors)
+int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
+		    *page, unsigned int len, unsigned int offset)
 {
 	int retried_segments = 0;
 	struct bio_vec *bvec;
@@ -727,7 +720,7 @@
 	if (unlikely(bio_flagged(bio, BIO_CLONED)))
 		return 0;
 
-	if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
+	if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
 		return 0;
 
 	/*
@@ -740,28 +733,7 @@
 
 		if (page == prev->bv_page &&
 		    offset == prev->bv_offset + prev->bv_len) {
-			unsigned int prev_bv_len = prev->bv_len;
 			prev->bv_len += len;
-
-			if (q->merge_bvec_fn) {
-				struct bvec_merge_data bvm = {
-					/* prev_bvec is already charged in
-					   bi_size, discharge it in order to
-					   simulate merging updated prev_bvec
-					   as new bvec. */
-					.bi_bdev = bio->bi_bdev,
-					.bi_sector = bio->bi_iter.bi_sector,
-					.bi_size = bio->bi_iter.bi_size -
-						prev_bv_len,
-					.bi_rw = bio->bi_rw,
-				};
-
-				if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) {
-					prev->bv_len -= len;
-					return 0;
-				}
-			}
-
 			bio->bi_iter.bi_size += len;
 			goto done;
 		}
@@ -770,8 +742,7 @@
 		 * If the queue doesn't support SG gaps and adding this
 		 * offset would create a gap, disallow it.
 		 */
-		if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
-		    bvec_gap_to_prev(prev, offset))
+		if (bvec_gap_to_prev(q, prev, offset))
 			return 0;
 	}
 
@@ -804,30 +775,9 @@
 		blk_recount_segments(q, bio);
 	}
 
-	/*
-	 * if queue has other restrictions (eg varying max sector size
-	 * depending on offset), it can specify a merge_bvec_fn in the
-	 * queue to get further control
-	 */
-	if (q->merge_bvec_fn) {
-		struct bvec_merge_data bvm = {
-			.bi_bdev = bio->bi_bdev,
-			.bi_sector = bio->bi_iter.bi_sector,
-			.bi_size = bio->bi_iter.bi_size - len,
-			.bi_rw = bio->bi_rw,
-		};
-
-		/*
-		 * merge_bvec_fn() returns number of bytes it can accept
-		 * at this offset
-		 */
-		if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len)
-			goto failed;
-	}
-
 	/* If we may be able to merge these biovecs, force a recount */
 	if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
-		bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+		bio_clear_flag(bio, BIO_SEG_VALID);
 
  done:
 	return len;
@@ -841,28 +791,6 @@
 	blk_recount_segments(q, bio);
 	return 0;
 }
-
-/**
- *	bio_add_pc_page	-	attempt to add page to bio
- *	@q: the target queue
- *	@bio: destination bio
- *	@page: page to add
- *	@len: vec entry length
- *	@offset: vec entry offset
- *
- *	Attempt to add a page to the bio_vec maplist. This can fail for a
- *	number of reasons, such as the bio being full or target block device
- *	limitations. The target block device must allow bio's up to PAGE_SIZE,
- *	so it is always possible to add a single page to an empty bio.
- *
- *	This should only be used by REQ_PC bios.
- */
-int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
-		    unsigned int len, unsigned int offset)
-{
-	return __bio_add_page(q, bio, page, len, offset,
-			      queue_max_hw_sectors(q));
-}
 EXPORT_SYMBOL(bio_add_pc_page);
 
 /**
@@ -872,22 +800,47 @@
  *	@len: vec entry length
  *	@offset: vec entry offset
  *
- *	Attempt to add a page to the bio_vec maplist. This can fail for a
- *	number of reasons, such as the bio being full or target block device
- *	limitations. The target block device must allow bio's up to PAGE_SIZE,
- *	so it is always possible to add a single page to an empty bio.
+ *	Attempt to add a page to the bio_vec maplist. This will only fail
+ *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
  */
-int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
-		 unsigned int offset)
+int bio_add_page(struct bio *bio, struct page *page,
+		 unsigned int len, unsigned int offset)
 {
-	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-	unsigned int max_sectors;
+	struct bio_vec *bv;
 
-	max_sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
-	if ((max_sectors < (len >> 9)) && !bio->bi_iter.bi_size)
-		max_sectors = len >> 9;
+	/*
+	 * cloned bio must not modify vec list
+	 */
+	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
+		return 0;
 
-	return __bio_add_page(q, bio, page, len, offset, max_sectors);
+	/*
+	 * For filesystems with a blocksize smaller than the pagesize
+	 * we will often be called with the same page as last time and
+	 * a consecutive offset.  Optimize this special case.
+	 */
+	if (bio->bi_vcnt > 0) {
+		bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
+
+		if (page == bv->bv_page &&
+		    offset == bv->bv_offset + bv->bv_len) {
+			bv->bv_len += len;
+			goto done;
+		}
+	}
+
+	if (bio->bi_vcnt >= bio->bi_max_vecs)
+		return 0;
+
+	bv		= &bio->bi_io_vec[bio->bi_vcnt];
+	bv->bv_page	= page;
+	bv->bv_len	= len;
+	bv->bv_offset	= offset;
+
+	bio->bi_vcnt++;
+done:
+	bio->bi_iter.bi_size += len;
+	return len;
 }
 EXPORT_SYMBOL(bio_add_page);
 
@@ -896,11 +849,11 @@
 	int error;
 };
 
-static void submit_bio_wait_endio(struct bio *bio, int error)
+static void submit_bio_wait_endio(struct bio *bio)
 {
 	struct submit_bio_ret *ret = bio->bi_private;
 
-	ret->error = error;
+	ret->error = bio->bi_error;
 	complete(&ret->event);
 }
 
@@ -1388,7 +1341,7 @@
 	if (iter->type & WRITE)
 		bio->bi_rw |= REQ_WRITE;
 
-	bio->bi_flags |= (1 << BIO_USER_MAPPED);
+	bio_set_flag(bio, BIO_USER_MAPPED);
 
 	/*
 	 * subtle -- if __bio_map_user() ended up bouncing a bio,
@@ -1445,7 +1398,7 @@
 }
 EXPORT_SYMBOL(bio_unmap_user);
 
-static void bio_map_kern_endio(struct bio *bio, int err)
+static void bio_map_kern_endio(struct bio *bio)
 {
 	bio_put(bio);
 }
@@ -1501,13 +1454,13 @@
 }
 EXPORT_SYMBOL(bio_map_kern);
 
-static void bio_copy_kern_endio(struct bio *bio, int err)
+static void bio_copy_kern_endio(struct bio *bio)
 {
 	bio_free_pages(bio);
 	bio_put(bio);
 }
 
-static void bio_copy_kern_endio_read(struct bio *bio, int err)
+static void bio_copy_kern_endio_read(struct bio *bio)
 {
 	char *p = bio->bi_private;
 	struct bio_vec *bvec;
@@ -1518,7 +1471,7 @@
 		p += bvec->bv_len;
 	}
 
-	bio_copy_kern_endio(bio, err);
+	bio_copy_kern_endio(bio);
 }
 
 /**
@@ -1768,7 +1721,7 @@
 	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
 
 	if (atomic_dec_and_test(&bio->__bi_remaining)) {
-		clear_bit(BIO_CHAIN, &bio->bi_flags);
+		bio_clear_flag(bio, BIO_CHAIN);
 		return true;
 	}
 
@@ -1778,25 +1731,15 @@
 /**
  * bio_endio - end I/O on a bio
  * @bio:	bio
- * @error:	error, if any
  *
  * Description:
- *   bio_endio() will end I/O on the whole bio. bio_endio() is the
- *   preferred way to end I/O on a bio, it takes care of clearing
- *   BIO_UPTODATE on error. @error is 0 on success, and and one of the
- *   established -Exxxx (-EIO, for instance) error values in case
- *   something went wrong. No one should call bi_end_io() directly on a
- *   bio unless they own it and thus know that it has an end_io
- *   function.
+ *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
+ *   way to end I/O on a bio. No one should call bi_end_io() directly on a
+ *   bio unless they own it and thus know that it has an end_io function.
  **/
-void bio_endio(struct bio *bio, int error)
+void bio_endio(struct bio *bio)
 {
 	while (bio) {
-		if (error)
-			clear_bit(BIO_UPTODATE, &bio->bi_flags);
-		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-			error = -EIO;
-
 		if (unlikely(!bio_remaining_done(bio)))
 			break;
 
@@ -1810,11 +1753,12 @@
 		 */
 		if (bio->bi_end_io == bio_chain_endio) {
 			struct bio *parent = bio->bi_private;
+			parent->bi_error = bio->bi_error;
 			bio_put(bio);
 			bio = parent;
 		} else {
 			if (bio->bi_end_io)
-				bio->bi_end_io(bio, error);
+				bio->bi_end_io(bio);
 			bio = NULL;
 		}
 	}
@@ -1882,7 +1826,7 @@
 	if (offset == 0 && size == bio->bi_iter.bi_size)
 		return;
 
-	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+	bio_clear_flag(bio, BIO_SEG_VALID);
 
 	bio_advance(bio, offset << 9);
 
diff --git a/block/blk-core.c b/block/blk-core.c
index 627ed0c..60912e9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -143,18 +143,16 @@
 			  unsigned int nbytes, int error)
 {
 	if (error)
-		clear_bit(BIO_UPTODATE, &bio->bi_flags);
-	else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-		error = -EIO;
+		bio->bi_error = error;
 
 	if (unlikely(rq->cmd_flags & REQ_QUIET))
-		set_bit(BIO_QUIET, &bio->bi_flags);
+		bio_set_flag(bio, BIO_QUIET);
 
 	bio_advance(bio, nbytes);
 
 	/* don't actually finish bio if it's part of flush sequence */
 	if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
-		bio_endio(bio, error);
+		bio_endio(bio);
 }
 
 void blk_dump_rq_flags(struct request *rq, char *msg)
@@ -645,6 +643,10 @@
 	if (q->id < 0)
 		goto fail_q;
 
+	q->bio_split = bioset_create(BIO_POOL_SIZE, 0);
+	if (!q->bio_split)
+		goto fail_id;
+
 	q->backing_dev_info.ra_pages =
 			(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
 	q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
@@ -653,7 +655,7 @@
 
 	err = bdi_init(&q->backing_dev_info);
 	if (err)
-		goto fail_id;
+		goto fail_split;
 
 	setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
 		    laptop_mode_timer_fn, (unsigned long) q);
@@ -695,6 +697,8 @@
 
 fail_bdi:
 	bdi_destroy(&q->backing_dev_info);
+fail_split:
+	bioset_free(q->bio_split);
 fail_id:
 	ida_simple_remove(&blk_queue_ida, q->id);
 fail_q:
@@ -1612,6 +1616,8 @@
 	struct request *req;
 	unsigned int request_count = 0;
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	/*
 	 * low level driver can indicate that it wants pages above a
 	 * certain limit bounced to low memory (ie for highmem, or even
@@ -1620,7 +1626,8 @@
 	blk_queue_bounce(q, &bio);
 
 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-		bio_endio(bio, -EIO);
+		bio->bi_error = -EIO;
+		bio_endio(bio);
 		return;
 	}
 
@@ -1673,7 +1680,8 @@
 	 */
 	req = get_request(q, rw_flags, bio, GFP_NOIO);
 	if (IS_ERR(req)) {
-		bio_endio(bio, PTR_ERR(req));	/* @q is dead */
+		bio->bi_error = PTR_ERR(req);
+		bio_endio(bio);
 		goto out_unlock;
 	}
 
@@ -1832,15 +1840,6 @@
 		goto end_io;
 	}
 
-	if (likely(bio_is_rw(bio) &&
-		   nr_sectors > queue_max_hw_sectors(q))) {
-		printk(KERN_ERR "bio too big device %s (%u > %u)\n",
-		       bdevname(bio->bi_bdev, b),
-		       bio_sectors(bio),
-		       queue_max_hw_sectors(q));
-		goto end_io;
-	}
-
 	part = bio->bi_bdev->bd_part;
 	if (should_fail_request(part, bio->bi_iter.bi_size) ||
 	    should_fail_request(&part_to_disk(part)->part0,
@@ -1896,7 +1895,8 @@
 	return true;
 
 end_io:
-	bio_endio(bio, err);
+	bio->bi_error = err;
+	bio_endio(bio);
 	return false;
 }
 
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 20badd7..9c423e5 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -73,6 +73,7 @@
 
 #include "blk.h"
 #include "blk-mq.h"
+#include "blk-mq-tag.h"
 
 /* FLUSH/FUA sequences */
 enum {
@@ -226,7 +227,12 @@
 	struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
 
 	if (q->mq_ops) {
+		struct blk_mq_hw_ctx *hctx;
+
+		/* release the tag's ownership to the req cloned from */
 		spin_lock_irqsave(&fq->mq_flush_lock, flags);
+		hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu);
+		blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
 		flush_rq->tag = -1;
 	}
 
@@ -308,11 +314,18 @@
 
 	/*
 	 * Borrow tag from the first request since they can't
-	 * be in flight at the same time.
+	 * be in flight at the same time. And acquire the tag's
+	 * ownership for flush req.
 	 */
 	if (q->mq_ops) {
+		struct blk_mq_hw_ctx *hctx;
+
 		flush_rq->mq_ctx = first_rq->mq_ctx;
 		flush_rq->tag = first_rq->tag;
+		fq->orig_rq = first_rq;
+
+		hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu);
+		blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
 	}
 
 	flush_rq->cmd_type = REQ_TYPE_FS;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 7688ee3..bd40292 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -11,21 +11,28 @@
 
 struct bio_batch {
 	atomic_t		done;
-	unsigned long		flags;
+	int			error;
 	struct completion	*wait;
 };
 
-static void bio_batch_end_io(struct bio *bio, int err)
+static void bio_batch_end_io(struct bio *bio)
 {
 	struct bio_batch *bb = bio->bi_private;
 
-	if (err && (err != -EOPNOTSUPP))
-		clear_bit(BIO_UPTODATE, &bb->flags);
+	if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
+		bb->error = bio->bi_error;
 	if (atomic_dec_and_test(&bb->done))
 		complete(bb->wait);
 	bio_put(bio);
 }
 
+/*
+ * Ensure that max discard sectors doesn't overflow bi_size and hopefully
+ * it is of the proper granularity as long as the granularity is a power
+ * of two.
+ */
+#define MAX_BIO_SECTORS ((1U << 31) >> 9)
+
 /**
  * blkdev_issue_discard - queue a discard
  * @bdev:	blockdev to issue discard for
@@ -43,8 +50,6 @@
 	DECLARE_COMPLETION_ONSTACK(wait);
 	struct request_queue *q = bdev_get_queue(bdev);
 	int type = REQ_WRITE | REQ_DISCARD;
-	unsigned int max_discard_sectors, granularity;
-	int alignment;
 	struct bio_batch bb;
 	struct bio *bio;
 	int ret = 0;
@@ -56,21 +61,6 @@
 	if (!blk_queue_discard(q))
 		return -EOPNOTSUPP;
 
-	/* Zero-sector (unknown) and one-sector granularities are the same.  */
-	granularity = max(q->limits.discard_granularity >> 9, 1U);
-	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
-
-	/*
-	 * Ensure that max_discard_sectors is of the proper
-	 * granularity, so that requests stay aligned after a split.
-	 */
-	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-	max_discard_sectors -= max_discard_sectors % granularity;
-	if (unlikely(!max_discard_sectors)) {
-		/* Avoid infinite loop below. Being cautious never hurts. */
-		return -EOPNOTSUPP;
-	}
-
 	if (flags & BLKDEV_DISCARD_SECURE) {
 		if (!blk_queue_secdiscard(q))
 			return -EOPNOTSUPP;
@@ -78,13 +68,13 @@
 	}
 
 	atomic_set(&bb.done, 1);
-	bb.flags = 1 << BIO_UPTODATE;
+	bb.error = 0;
 	bb.wait = &wait;
 
 	blk_start_plug(&plug);
 	while (nr_sects) {
 		unsigned int req_sects;
-		sector_t end_sect, tmp;
+		sector_t end_sect;
 
 		bio = bio_alloc(gfp_mask, 1);
 		if (!bio) {
@@ -92,21 +82,8 @@
 			break;
 		}
 
-		req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
-
-		/*
-		 * If splitting a request, and the next starting sector would be
-		 * misaligned, stop the discard at the previous aligned sector.
-		 */
+		req_sects = min_t(sector_t, nr_sects, MAX_BIO_SECTORS);
 		end_sect = sector + req_sects;
-		tmp = end_sect;
-		if (req_sects < nr_sects &&
-		    sector_div(tmp, granularity) != alignment) {
-			end_sect = end_sect - alignment;
-			sector_div(end_sect, granularity);
-			end_sect = end_sect * granularity + alignment;
-			req_sects = end_sect - sector;
-		}
 
 		bio->bi_iter.bi_sector = sector;
 		bio->bi_end_io = bio_batch_end_io;
@@ -134,9 +111,8 @@
 	if (!atomic_dec_and_test(&bb.done))
 		wait_for_completion_io(&wait);
 
-	if (!test_bit(BIO_UPTODATE, &bb.flags))
-		ret = -EIO;
-
+	if (bb.error)
+		return bb.error;
 	return ret;
 }
 EXPORT_SYMBOL(blkdev_issue_discard);
@@ -166,13 +142,11 @@
 	if (!q)
 		return -ENXIO;
 
-	max_write_same_sectors = q->limits.max_write_same_sectors;
-
-	if (max_write_same_sectors == 0)
-		return -EOPNOTSUPP;
+	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
+	max_write_same_sectors = UINT_MAX >> 9;
 
 	atomic_set(&bb.done, 1);
-	bb.flags = 1 << BIO_UPTODATE;
+	bb.error = 0;
 	bb.wait = &wait;
 
 	while (nr_sects) {
@@ -208,9 +182,8 @@
 	if (!atomic_dec_and_test(&bb.done))
 		wait_for_completion_io(&wait);
 
-	if (!test_bit(BIO_UPTODATE, &bb.flags))
-		ret = -ENOTSUPP;
-
+	if (bb.error)
+		return bb.error;
 	return ret;
 }
 EXPORT_SYMBOL(blkdev_issue_write_same);
@@ -236,7 +209,7 @@
 	DECLARE_COMPLETION_ONSTACK(wait);
 
 	atomic_set(&bb.done, 1);
-	bb.flags = 1 << BIO_UPTODATE;
+	bb.error = 0;
 	bb.wait = &wait;
 
 	ret = 0;
@@ -270,10 +243,8 @@
 	if (!atomic_dec_and_test(&bb.done))
 		wait_for_completion_io(&wait);
 
-	if (!test_bit(BIO_UPTODATE, &bb.flags))
-		/* One of bios in the batch was completed with error.*/
-		ret = -EIO;
-
+	if (bb.error)
+		return bb.error;
 	return ret;
 }
 
diff --git a/block/blk-map.c b/block/blk-map.c
index da310a1..2338416 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -94,7 +94,7 @@
 		return PTR_ERR(bio);
 
 	if (map_data && map_data->null_mapped)
-		bio->bi_flags |= (1 << BIO_NULL_MAPPED);
+		bio_set_flag(bio, BIO_NULL_MAPPED);
 
 	if (bio->bi_iter.bi_size != iter->count) {
 		/*
@@ -103,7 +103,7 @@
 		 * normal IO completion path
 		 */
 		bio_get(bio);
-		bio_endio(bio, 0);
+		bio_endio(bio);
 		__blk_rq_unmap_user(bio);
 		return -EINVAL;
 	}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 30a0d9f..d088cff 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,12 +9,146 @@
 
 #include "blk.h"
 
+static struct bio *blk_bio_discard_split(struct request_queue *q,
+					 struct bio *bio,
+					 struct bio_set *bs)
+{
+	unsigned int max_discard_sectors, granularity;
+	int alignment;
+	sector_t tmp;
+	unsigned split_sectors;
+
+	/* Zero-sector (unknown) and one-sector granularities are the same.  */
+	granularity = max(q->limits.discard_granularity >> 9, 1U);
+
+	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+	max_discard_sectors -= max_discard_sectors % granularity;
+
+	if (unlikely(!max_discard_sectors)) {
+		/* XXX: warn */
+		return NULL;
+	}
+
+	if (bio_sectors(bio) <= max_discard_sectors)
+		return NULL;
+
+	split_sectors = max_discard_sectors;
+
+	/*
+	 * If the next starting sector would be misaligned, stop the discard at
+	 * the previous aligned sector.
+	 */
+	alignment = (q->limits.discard_alignment >> 9) % granularity;
+
+	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
+	tmp = sector_div(tmp, granularity);
+
+	if (split_sectors > tmp)
+		split_sectors -= tmp;
+
+	return bio_split(bio, split_sectors, GFP_NOIO, bs);
+}
+
+static struct bio *blk_bio_write_same_split(struct request_queue *q,
+					    struct bio *bio,
+					    struct bio_set *bs)
+{
+	if (!q->limits.max_write_same_sectors)
+		return NULL;
+
+	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
+		return NULL;
+
+	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
+}
+
+static struct bio *blk_bio_segment_split(struct request_queue *q,
+					 struct bio *bio,
+					 struct bio_set *bs)
+{
+	struct bio *split;
+	struct bio_vec bv, bvprv;
+	struct bvec_iter iter;
+	unsigned seg_size = 0, nsegs = 0, sectors = 0;
+	int prev = 0;
+
+	bio_for_each_segment(bv, bio, iter) {
+		sectors += bv.bv_len >> 9;
+
+		if (sectors > queue_max_sectors(q))
+			goto split;
+
+		/*
+		 * If the queue doesn't support SG gaps and adding this
+		 * offset would create a gap, disallow it.
+		 */
+		if (prev && bvec_gap_to_prev(q, &bvprv, bv.bv_offset))
+			goto split;
+
+		if (prev && blk_queue_cluster(q)) {
+			if (seg_size + bv.bv_len > queue_max_segment_size(q))
+				goto new_segment;
+			if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
+				goto new_segment;
+			if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
+				goto new_segment;
+
+			seg_size += bv.bv_len;
+			bvprv = bv;
+			prev = 1;
+			continue;
+		}
+new_segment:
+		if (nsegs == queue_max_segments(q))
+			goto split;
+
+		nsegs++;
+		bvprv = bv;
+		prev = 1;
+		seg_size = bv.bv_len;
+	}
+
+	return NULL;
+split:
+	split = bio_clone_bioset(bio, GFP_NOIO, bs);
+
+	split->bi_iter.bi_size -= iter.bi_size;
+	bio->bi_iter = iter;
+
+	if (bio_integrity(bio)) {
+		bio_integrity_advance(bio, split->bi_iter.bi_size);
+		bio_integrity_trim(split, 0, bio_sectors(split));
+	}
+
+	return split;
+}
+
+void blk_queue_split(struct request_queue *q, struct bio **bio,
+		     struct bio_set *bs)
+{
+	struct bio *split;
+
+	if ((*bio)->bi_rw & REQ_DISCARD)
+		split = blk_bio_discard_split(q, *bio, bs);
+	else if ((*bio)->bi_rw & REQ_WRITE_SAME)
+		split = blk_bio_write_same_split(q, *bio, bs);
+	else
+		split = blk_bio_segment_split(q, *bio, q->bio_split);
+
+	if (split) {
+		bio_chain(split, *bio);
+		generic_make_request(*bio);
+		*bio = split;
+	}
+}
+EXPORT_SYMBOL(blk_queue_split);
+
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 					     struct bio *bio,
 					     bool no_sg_merge)
 {
 	struct bio_vec bv, bvprv = { NULL };
-	int cluster, high, highprv = 1;
+	int cluster, prev = 0;
 	unsigned int seg_size, nr_phys_segs;
 	struct bio *fbio, *bbio;
 	struct bvec_iter iter;
@@ -36,7 +170,6 @@
 	cluster = blk_queue_cluster(q);
 	seg_size = 0;
 	nr_phys_segs = 0;
-	high = 0;
 	for_each_bio(bio) {
 		bio_for_each_segment(bv, bio, iter) {
 			/*
@@ -46,13 +179,7 @@
 			if (no_sg_merge)
 				goto new_segment;
 
-			/*
-			 * the trick here is making sure that a high page is
-			 * never considered part of another segment, since
-			 * that might change with the bounce page.
-			 */
-			high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
-			if (!high && !highprv && cluster) {
+			if (prev && cluster) {
 				if (seg_size + bv.bv_len
 				    > queue_max_segment_size(q))
 					goto new_segment;
@@ -72,8 +199,8 @@
 
 			nr_phys_segs++;
 			bvprv = bv;
+			prev = 1;
 			seg_size = bv.bv_len;
-			highprv = high;
 		}
 		bbio = bio;
 	}
@@ -116,7 +243,7 @@
 		bio->bi_next = nxt;
 	}
 
-	bio->bi_flags |= (1 << BIO_SEG_VALID);
+	bio_set_flag(bio, BIO_SEG_VALID);
 }
 EXPORT_SYMBOL(blk_recount_segments);
 
@@ -266,7 +393,7 @@
 		if (rq->cmd_flags & REQ_WRITE)
 			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 
-		sg->page_link &= ~0x02;
+		sg_unmark_end(sg);
 		sg = sg_next(sg);
 		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
 			    q->dma_drain_size,
@@ -356,12 +483,12 @@
 	return !q->mq_ops && req->special;
 }
 
-static int req_gap_to_prev(struct request *req, struct request *next)
+static int req_gap_to_prev(struct request *req, struct bio *next)
 {
 	struct bio *prev = req->biotail;
 
-	return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1],
-				next->bio->bi_io_vec[0].bv_offset);
+	return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1],
+			next->bi_io_vec[0].bv_offset);
 }
 
 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
@@ -378,8 +505,7 @@
 	if (req_no_special_merge(req) || req_no_special_merge(next))
 		return 0;
 
-	if (test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags) &&
-	    req_gap_to_prev(req, next))
+	if (req_gap_to_prev(req, next->bio))
 		return 0;
 
 	/*
@@ -564,8 +690,6 @@
 
 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 {
-	struct request_queue *q = rq->q;
-
 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
 		return false;
 
@@ -590,13 +714,8 @@
 		return false;
 
 	/* Only check gaps if the bio carries data */
-	if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && bio_has_data(bio)) {
-		struct bio_vec *bprev;
-
-		bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
-		if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
-			return false;
-	}
+	if (bio_has_data(bio) && req_gap_to_prev(rq, bio))
+		return false;
 
 	return true;
 }
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index b79685e..279c5d6 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -141,15 +141,26 @@
 
 static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
 {
-	char *start_page = page;
 	struct request *rq;
+	int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
 
-	page += sprintf(page, "%s:\n", msg);
+	list_for_each_entry(rq, list, queuelist) {
+		const int rq_len = 2 * sizeof(rq) + 2;
 
-	list_for_each_entry(rq, list, queuelist)
-		page += sprintf(page, "\t%p\n", rq);
+		/* if the output will be truncated */
+		if (PAGE_SIZE - 1 < len + rq_len) {
+			/* backspacing if it can't hold '\t...\n' */
+			if (PAGE_SIZE - 1 < len + 5)
+				len -= rq_len;
+			len += snprintf(page + len, PAGE_SIZE - 1 - len,
+					"\t...\n");
+			break;
+		}
+		len += snprintf(page + len, PAGE_SIZE - 1 - len,
+				"\t%p\n", rq);
+	}
 
-	return page - start_page;
+	return len;
 }
 
 static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 9b6e288..9115c6d 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -429,7 +429,7 @@
 		for (bit = find_first_bit(&bm->word, bm->depth);
 		     bit < bm->depth;
 		     bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
-		     	rq = blk_mq_tag_to_rq(hctx->tags, off + bit);
+			rq = hctx->tags->rqs[off + bit];
 			if (rq->q == hctx->queue)
 				fn(hctx, rq, data, reserved);
 		}
@@ -453,7 +453,7 @@
 		for (bit = find_first_bit(&bm->word, bm->depth);
 		     bit < bm->depth;
 		     bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
-			rq = blk_mq_tag_to_rq(tags, off + bit);
+			rq = tags->rqs[off + bit];
 			fn(rq, data, reserved);
 		}
 
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 75893a3..9eb2cf4 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -89,4 +89,16 @@
 	__blk_mq_tag_idle(hctx);
 }
 
+/*
+ * This helper should only be used for flush request to share tag
+ * with the request cloned from, and both the two requests can't be
+ * in flight at the same time. The caller has to make sure the tag
+ * can't be freed.
+ */
+static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx,
+		unsigned int tag, struct request *rq)
+{
+	hctx->tags->rqs[tag] = rq;
+}
+
 #endif
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7d842db..f2d67b4 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -559,23 +559,9 @@
 }
 EXPORT_SYMBOL(blk_mq_abort_requeue_list);
 
-static inline bool is_flush_request(struct request *rq,
-		struct blk_flush_queue *fq, unsigned int tag)
-{
-	return ((rq->cmd_flags & REQ_FLUSH_SEQ) &&
-			fq->flush_rq->tag == tag);
-}
-
 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
 {
-	struct request *rq = tags->rqs[tag];
-	/* mq_ctx of flush rq is always cloned from the corresponding req */
-	struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx);
-
-	if (!is_flush_request(rq, fq, tag))
-		return rq;
-
-	return fq->flush_rq;
+	return tags->rqs[tag];
 }
 EXPORT_SYMBOL(blk_mq_tag_to_rq);
 
@@ -1199,7 +1185,7 @@
 	struct blk_mq_alloc_data alloc_data;
 
 	if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
-		bio_endio(bio, -EIO);
+		bio_io_error(bio);
 		return NULL;
 	}
 
@@ -1283,10 +1269,12 @@
 	blk_queue_bounce(q, &bio);
 
 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-		bio_endio(bio, -EIO);
+		bio_io_error(bio);
 		return;
 	}
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	if (!is_flush_fua && !blk_queue_nomerges(q) &&
 	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
 		return;
@@ -1368,10 +1356,12 @@
 	blk_queue_bounce(q, &bio);
 
 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-		bio_endio(bio, -EIO);
+		bio_io_error(bio);
 		return;
 	}
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	if (!is_flush_fua && !blk_queue_nomerges(q) &&
 	    blk_attempt_plug_merge(q, bio, &request_count, NULL))
 		return;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index e0057d0..7d8f129 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -53,28 +53,6 @@
 }
 EXPORT_SYMBOL(blk_queue_unprep_rq);
 
-/**
- * blk_queue_merge_bvec - set a merge_bvec function for queue
- * @q:		queue
- * @mbfn:	merge_bvec_fn
- *
- * Usually queues have static limitations on the max sectors or segments that
- * we can put in a request. Stacking drivers may have some settings that
- * are dynamic, and thus we have to query the queue whether it is ok to
- * add a new bio_vec to a bio at a given offset or not. If the block device
- * has such limitations, it needs to register a merge_bvec_fn to control
- * the size of bio's sent to it. Note that a block device *must* allow a
- * single page to be added to an empty bio. The block device driver may want
- * to use the bio_split() function to deal with these bio's. By default
- * no merge_bvec_fn is defined for a queue, and only the fixed limits are
- * honored.
- */
-void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
-{
-	q->merge_bvec_fn = mbfn;
-}
-EXPORT_SYMBOL(blk_queue_merge_bvec);
-
 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
 {
 	q->softirq_done_fn = fn;
@@ -111,11 +89,13 @@
 	lim->max_segments = BLK_MAX_SEGMENTS;
 	lim->max_integrity_segments = 0;
 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
+	lim->virt_boundary_mask = 0;
 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
 	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
 	lim->chunk_sectors = 0;
 	lim->max_write_same_sectors = 0;
 	lim->max_discard_sectors = 0;
+	lim->max_hw_discard_sectors = 0;
 	lim->discard_granularity = 0;
 	lim->discard_alignment = 0;
 	lim->discard_misaligned = 0;
@@ -257,7 +237,9 @@
 		       __func__, max_hw_sectors);
 	}
 
-	limits->max_sectors = limits->max_hw_sectors = max_hw_sectors;
+	limits->max_hw_sectors = max_hw_sectors;
+	limits->max_sectors = min_t(unsigned int, max_hw_sectors,
+				    BLK_DEF_MAX_SECTORS);
 }
 EXPORT_SYMBOL(blk_limits_max_hw_sectors);
 
@@ -303,6 +285,7 @@
 void blk_queue_max_discard_sectors(struct request_queue *q,
 		unsigned int max_discard_sectors)
 {
+	q->limits.max_hw_discard_sectors = max_discard_sectors;
 	q->limits.max_discard_sectors = max_discard_sectors;
 }
 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
@@ -550,6 +533,8 @@
 
 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
 					    b->seg_boundary_mask);
+	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
+					    b->virt_boundary_mask);
 
 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
@@ -641,6 +626,8 @@
 
 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
 						      b->max_discard_sectors);
+		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
+							 b->max_hw_discard_sectors);
 		t->discard_granularity = max(t->discard_granularity,
 					     b->discard_granularity);
 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
@@ -788,6 +775,17 @@
 EXPORT_SYMBOL(blk_queue_segment_boundary);
 
 /**
+ * blk_queue_virt_boundary - set boundary rules for bio merging
+ * @q:  the request queue for the device
+ * @mask:  the memory boundary mask
+ **/
+void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
+{
+	q->limits.virt_boundary_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_virt_boundary);
+
+/**
  * blk_queue_dma_alignment - set dma length and memory alignment
  * @q:     the request queue for the device
  * @mask:  alignment mask
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 6264b38..3e44a9d 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -145,12 +145,43 @@
 	return queue_var_show(q->limits.discard_granularity, page);
 }
 
+static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
+{
+	unsigned long long val;
+
+	val = q->limits.max_hw_discard_sectors << 9;
+	return sprintf(page, "%llu\n", val);
+}
+
 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
 {
 	return sprintf(page, "%llu\n",
 		       (unsigned long long)q->limits.max_discard_sectors << 9);
 }
 
+static ssize_t queue_discard_max_store(struct request_queue *q,
+				       const char *page, size_t count)
+{
+	unsigned long max_discard;
+	ssize_t ret = queue_var_store(&max_discard, page, count);
+
+	if (ret < 0)
+		return ret;
+
+	if (max_discard & (q->limits.discard_granularity - 1))
+		return -EINVAL;
+
+	max_discard >>= 9;
+	if (max_discard > UINT_MAX)
+		return -EINVAL;
+
+	if (max_discard > q->limits.max_hw_discard_sectors)
+		max_discard = q->limits.max_hw_discard_sectors;
+
+	q->limits.max_discard_sectors = max_discard;
+	return ret;
+}
+
 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
 {
 	return queue_var_show(queue_discard_zeroes_data(q), page);
@@ -360,9 +391,15 @@
 	.show = queue_discard_granularity_show,
 };
 
+static struct queue_sysfs_entry queue_discard_max_hw_entry = {
+	.attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO },
+	.show = queue_discard_max_hw_show,
+};
+
 static struct queue_sysfs_entry queue_discard_max_entry = {
-	.attr = {.name = "discard_max_bytes", .mode = S_IRUGO },
+	.attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR },
 	.show = queue_discard_max_show,
+	.store = queue_discard_max_store,
 };
 
 static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
@@ -421,6 +458,7 @@
 	&queue_io_opt_entry.attr,
 	&queue_discard_granularity_entry.attr,
 	&queue_discard_max_entry.attr,
+	&queue_discard_max_hw_entry.attr,
 	&queue_discard_zeroes_data_entry.attr,
 	&queue_write_same_max_entry.attr,
 	&queue_nonrot_entry.attr,
@@ -523,6 +561,9 @@
 
 	blk_trace_shutdown(q);
 
+	if (q->bio_split)
+		bioset_free(q->bio_split);
+
 	ida_simple_remove(&blk_queue_ida, q->id);
 	call_rcu(&q->rcu_head, blk_free_queue_rcu);
 }
diff --git a/block/blk.h b/block/blk.h
index 026d959..838188b 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -22,6 +22,12 @@
 	struct list_head	flush_queue[2];
 	struct list_head	flush_data_in_flight;
 	struct request		*flush_rq;
+
+	/*
+	 * flush_rq shares tag with this rq, both can't be active
+	 * at the same time
+	 */
+	struct request		*orig_rq;
 	spinlock_t		mq_flush_lock;
 };
 
diff --git a/block/bounce.c b/block/bounce.c
index b173112..0611aea 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -123,7 +123,7 @@
 	}
 }
 
-static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
+static void bounce_end_io(struct bio *bio, mempool_t *pool)
 {
 	struct bio *bio_orig = bio->bi_private;
 	struct bio_vec *bvec, *org_vec;
@@ -141,61 +141,44 @@
 		mempool_free(bvec->bv_page, pool);
 	}
 
-	bio_endio(bio_orig, err);
+	bio_orig->bi_error = bio->bi_error;
+	bio_endio(bio_orig);
 	bio_put(bio);
 }
 
-static void bounce_end_io_write(struct bio *bio, int err)
+static void bounce_end_io_write(struct bio *bio)
 {
-	bounce_end_io(bio, page_pool, err);
+	bounce_end_io(bio, page_pool);
 }
 
-static void bounce_end_io_write_isa(struct bio *bio, int err)
+static void bounce_end_io_write_isa(struct bio *bio)
 {
 
-	bounce_end_io(bio, isa_page_pool, err);
+	bounce_end_io(bio, isa_page_pool);
 }
 
-static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
+static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
 {
 	struct bio *bio_orig = bio->bi_private;
 
-	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+	if (!bio->bi_error)
 		copy_to_high_bio_irq(bio_orig, bio);
 
-	bounce_end_io(bio, pool, err);
+	bounce_end_io(bio, pool);
 }
 
-static void bounce_end_io_read(struct bio *bio, int err)
+static void bounce_end_io_read(struct bio *bio)
 {
-	__bounce_end_io_read(bio, page_pool, err);
+	__bounce_end_io_read(bio, page_pool);
 }
 
-static void bounce_end_io_read_isa(struct bio *bio, int err)
+static void bounce_end_io_read_isa(struct bio *bio)
 {
-	__bounce_end_io_read(bio, isa_page_pool, err);
+	__bounce_end_io_read(bio, isa_page_pool);
 }
 
-#ifdef CONFIG_NEED_BOUNCE_POOL
-static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
-{
-	if (bio_data_dir(bio) != WRITE)
-		return 0;
-
-	if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
-		return 0;
-
-	return test_bit(BIO_SNAP_STABLE, &bio->bi_flags);
-}
-#else
-static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
-{
-	return 0;
-}
-#endif /* CONFIG_NEED_BOUNCE_POOL */
-
 static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
-			       mempool_t *pool, int force)
+			       mempool_t *pool)
 {
 	struct bio *bio;
 	int rw = bio_data_dir(*bio_orig);
@@ -203,8 +186,6 @@
 	struct bvec_iter iter;
 	unsigned i;
 
-	if (force)
-		goto bounce;
 	bio_for_each_segment(from, *bio_orig, iter)
 		if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
 			goto bounce;
@@ -216,7 +197,7 @@
 	bio_for_each_segment_all(to, bio, i) {
 		struct page *page = to->bv_page;
 
-		if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
+		if (page_to_pfn(page) <= queue_bounce_pfn(q))
 			continue;
 
 		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
@@ -254,7 +235,6 @@
 
 void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 {
-	int must_bounce;
 	mempool_t *pool;
 
 	/*
@@ -263,15 +243,13 @@
 	if (!bio_has_data(*bio_orig))
 		return;
 
-	must_bounce = must_snapshot_stable_pages(q, *bio_orig);
-
 	/*
 	 * for non-isa bounce case, just check if the bounce pfn is equal
 	 * to or bigger than the highest pfn in the system -- in that case,
 	 * don't waste time iterating over bio segments
 	 */
 	if (!(q->bounce_gfp & GFP_DMA)) {
-		if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
+		if (queue_bounce_pfn(q) >= blk_max_pfn)
 			return;
 		pool = page_pool;
 	} else {
@@ -282,7 +260,7 @@
 	/*
 	 * slow path
 	 */
-	__blk_queue_bounce(q, bio_orig, pool, must_bounce);
+	__blk_queue_bounce(q, bio_orig, pool);
 }
 
 EXPORT_SYMBOL(blk_queue_bounce);
diff --git a/block/genhd.c b/block/genhd.c
index 59a1395..0c706f3 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1110,8 +1110,7 @@
 	disk_release_events(disk);
 	kfree(disk->random);
 	disk_replace_part_tbl(disk, NULL);
-	free_part_stats(&disk->part0);
-	free_part_info(&disk->part0);
+	hd_free_part(&disk->part0);
 	if (disk->queue)
 		blk_put_queue(disk->queue);
 	kfree(disk);
@@ -1285,7 +1284,11 @@
 		 * converted to make use of bd_mutex and sequence counters.
 		 */
 		seqcount_init(&disk->part0.nr_sects_seq);
-		hd_ref_init(&disk->part0);
+		if (hd_ref_init(&disk->part0)) {
+			hd_free_part(&disk->part0);
+			kfree(disk);
+			return NULL;
+		}
 
 		disk->minors = minors;
 		rand_initialize_disk(disk);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 0d9e5f9..e771113 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -212,8 +212,7 @@
 {
 	struct hd_struct *p = dev_to_part(dev);
 	blk_free_devt(dev->devt);
-	free_part_stats(p);
-	free_part_info(p);
+	hd_free_part(p);
 	kfree(p);
 }
 
@@ -233,8 +232,9 @@
 	put_device(part_to_dev(part));
 }
 
-void __delete_partition(struct hd_struct *part)
+void __delete_partition(struct percpu_ref *ref)
 {
+	struct hd_struct *part = container_of(ref, struct hd_struct, ref);
 	call_rcu(&part->rcu_head, delete_partition_rcu_cb);
 }
 
@@ -255,7 +255,7 @@
 	kobject_put(part->holder_dir);
 	device_del(part_to_dev(part));
 
-	hd_struct_put(part);
+	hd_struct_kill(part);
 }
 
 static ssize_t whole_disk_show(struct device *dev,
@@ -356,8 +356,8 @@
 	if (!dev_get_uevent_suppress(ddev))
 		kobject_uevent(&pdev->kobj, KOBJ_ADD);
 
-	hd_ref_init(p);
-	return p;
+	if (!hd_ref_init(p))
+		return p;
 
 out_free_info:
 	free_part_info(p);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index b4cfc57..b582ea7 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -48,6 +48,8 @@
 config CRYPTO_AEAD2
 	tristate
 	select CRYPTO_ALGAPI2
+	select CRYPTO_NULL2
+	select CRYPTO_RNG2
 
 config CRYPTO_BLKCIPHER
 	tristate
@@ -150,12 +152,16 @@
 
 config CRYPTO_NULL
 	tristate "Null algorithms"
-	select CRYPTO_ALGAPI
-	select CRYPTO_BLKCIPHER
-	select CRYPTO_HASH
+	select CRYPTO_NULL2
 	help
 	  These are 'Null' algorithms, used by IPsec, which do nothing.
 
+config CRYPTO_NULL2
+	tristate
+	select CRYPTO_ALGAPI2
+	select CRYPTO_BLKCIPHER2
+	select CRYPTO_HASH2
+
 config CRYPTO_PCRYPT
 	tristate "Parallel crypto engine"
 	depends on SMP
@@ -200,6 +206,7 @@
 	select CRYPTO_BLKCIPHER
 	select CRYPTO_MANAGER
 	select CRYPTO_HASH
+	select CRYPTO_NULL
 	help
 	  Authenc: Combined mode wrapper for IPsec.
 	  This is required for IPSec.
@@ -470,6 +477,18 @@
 	  It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
 	  in IETF protocols. This is the portable C implementation of Poly1305.
 
+config CRYPTO_POLY1305_X86_64
+	tristate "Poly1305 authenticator algorithm (x86_64/SSE2/AVX2)"
+	depends on X86 && 64BIT
+	select CRYPTO_POLY1305
+	help
+	  Poly1305 authenticator algorithm, RFC7539.
+
+	  Poly1305 is an authenticator algorithm designed by Daniel J. Bernstein.
+	  It is used for the ChaCha20-Poly1305 AEAD, specified in RFC7539 for use
+	  in IETF protocols. This is the x86_64 assembler implementation using SIMD
+	  instructions.
+
 config CRYPTO_MD4
 	tristate "MD4 digest algorithm"
 	select CRYPTO_HASH
@@ -1213,6 +1232,21 @@
 	  See also:
 	  <http://cr.yp.to/chacha/chacha-20080128.pdf>
 
+config CRYPTO_CHACHA20_X86_64
+	tristate "ChaCha20 cipher algorithm (x86_64/SSSE3/AVX2)"
+	depends on X86 && 64BIT
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_CHACHA20
+	help
+	  ChaCha20 cipher algorithm, RFC7539.
+
+	  ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J.
+	  Bernstein and further specified in RFC7539 for use in IETF protocols.
+	  This is the x86_64 assembler implementation using SIMD instructions.
+
+	  See also:
+	  <http://cr.yp.to/chacha/chacha-20080128.pdf>
+
 config CRYPTO_SEED
 	tristate "SEED cipher algorithm"
 	select CRYPTO_ALGAPI
diff --git a/crypto/Makefile b/crypto/Makefile
index a16a7e7..e2c5981 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -17,6 +17,7 @@
 
 crypto_blkcipher-y := ablkcipher.o
 crypto_blkcipher-y += blkcipher.o
+crypto_blkcipher-y += skcipher.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
@@ -46,7 +47,7 @@
 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
 obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
 obj-$(CONFIG_CRYPTO_XCBC) += xcbc.o
-obj-$(CONFIG_CRYPTO_NULL) += crypto_null.o
+obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
 obj-$(CONFIG_CRYPTO_MD4) += md4.o
 obj-$(CONFIG_CRYPTO_MD5) += md5.o
 obj-$(CONFIG_CRYPTO_RMD128) += rmd128.o
diff --git a/crypto/aead.c b/crypto/aead.c
index 07bf997..9b18a1e 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -3,7 +3,7 @@
  *
  * This file provides API support for AEAD algorithms.
  *
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -13,13 +13,14 @@
  */
 
 #include <crypto/internal/geniv.h>
+#include <crypto/internal/rng.h>
+#include <crypto/null.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/rtnetlink.h>
-#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 #include <linux/cryptouser.h>
@@ -27,17 +28,6 @@
 
 #include "internal.h"
 
-struct compat_request_ctx {
-	struct scatterlist src[2];
-	struct scatterlist dst[2];
-	struct scatterlist ivbuf[2];
-	struct scatterlist *ivsg;
-	struct aead_givcrypt_request subreq;
-};
-
-static int aead_null_givencrypt(struct aead_givcrypt_request *req);
-static int aead_null_givdecrypt(struct aead_givcrypt_request *req);
-
 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
 			    unsigned int keylen)
 {
@@ -53,7 +43,7 @@
 
 	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
 	memcpy(alignbuffer, key, keylen);
-	ret = tfm->setkey(tfm, alignbuffer, keylen);
+	ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
 	memset(alignbuffer, 0, keylen);
 	kfree(buffer);
 	return ret;
@@ -64,12 +54,10 @@
 {
 	unsigned long alignmask = crypto_aead_alignmask(tfm);
 
-	tfm = tfm->child;
-
 	if ((unsigned long)key & alignmask)
 		return setkey_unaligned(tfm, key, keylen);
 
-	return tfm->setkey(tfm, key, keylen);
+	return crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
 }
 EXPORT_SYMBOL_GPL(crypto_aead_setkey);
 
@@ -80,100 +68,17 @@
 	if (authsize > crypto_aead_maxauthsize(tfm))
 		return -EINVAL;
 
-	if (tfm->setauthsize) {
-		err = tfm->setauthsize(tfm->child, authsize);
+	if (crypto_aead_alg(tfm)->setauthsize) {
+		err = crypto_aead_alg(tfm)->setauthsize(tfm, authsize);
 		if (err)
 			return err;
 	}
 
-	tfm->child->authsize = authsize;
 	tfm->authsize = authsize;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
 
-struct aead_old_request {
-	struct scatterlist srcbuf[2];
-	struct scatterlist dstbuf[2];
-	struct aead_request subreq;
-};
-
-unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
-{
-	return tfm->reqsize + sizeof(struct aead_old_request);
-}
-EXPORT_SYMBOL_GPL(crypto_aead_reqsize);
-
-static int old_crypt(struct aead_request *req,
-		     int (*crypt)(struct aead_request *req))
-{
-	struct aead_old_request *nreq = aead_request_ctx(req);
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct scatterlist *src, *dst;
-
-	if (req->old)
-		return crypt(req);
-
-	src = scatterwalk_ffwd(nreq->srcbuf, req->src, req->assoclen);
-	dst = req->src == req->dst ?
-	      src : scatterwalk_ffwd(nreq->dstbuf, req->dst, req->assoclen);
-
-	aead_request_set_tfm(&nreq->subreq, aead);
-	aead_request_set_callback(&nreq->subreq, aead_request_flags(req),
-				  req->base.complete, req->base.data);
-	aead_request_set_crypt(&nreq->subreq, src, dst, req->cryptlen,
-			       req->iv);
-	aead_request_set_assoc(&nreq->subreq, req->src, req->assoclen);
-
-	return crypt(&nreq->subreq);
-}
-
-static int old_encrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct old_aead_alg *alg = crypto_old_aead_alg(aead);
-
-	return old_crypt(req, alg->encrypt);
-}
-
-static int old_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct old_aead_alg *alg = crypto_old_aead_alg(aead);
-
-	return old_crypt(req, alg->decrypt);
-}
-
-static int no_givcrypt(struct aead_givcrypt_request *req)
-{
-	return -ENOSYS;
-}
-
-static int crypto_old_aead_init_tfm(struct crypto_tfm *tfm)
-{
-	struct old_aead_alg *alg = &tfm->__crt_alg->cra_aead;
-	struct crypto_aead *crt = __crypto_aead_cast(tfm);
-
-	if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
-		return -EINVAL;
-
-	crt->setkey = alg->setkey;
-	crt->setauthsize = alg->setauthsize;
-	crt->encrypt = old_encrypt;
-	crt->decrypt = old_decrypt;
-	if (alg->ivsize) {
-		crt->givencrypt = alg->givencrypt ?: no_givcrypt;
-		crt->givdecrypt = alg->givdecrypt ?: no_givcrypt;
-	} else {
-		crt->givencrypt = aead_null_givencrypt;
-		crt->givdecrypt = aead_null_givdecrypt;
-	}
-	crt->child = __crypto_aead_cast(tfm);
-	crt->authsize = alg->maxauthsize;
-
-	return 0;
-}
-
 static void crypto_aead_exit_tfm(struct crypto_tfm *tfm)
 {
 	struct crypto_aead *aead = __crypto_aead_cast(tfm);
@@ -187,14 +92,6 @@
 	struct crypto_aead *aead = __crypto_aead_cast(tfm);
 	struct aead_alg *alg = crypto_aead_alg(aead);
 
-	if (crypto_old_aead_alg(aead)->encrypt)
-		return crypto_old_aead_init_tfm(tfm);
-
-	aead->setkey = alg->setkey;
-	aead->setauthsize = alg->setauthsize;
-	aead->encrypt = alg->encrypt;
-	aead->decrypt = alg->decrypt;
-	aead->child = __crypto_aead_cast(tfm);
 	aead->authsize = alg->maxauthsize;
 
 	if (alg->exit)
@@ -207,64 +104,6 @@
 }
 
 #ifdef CONFIG_NET
-static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-	struct crypto_report_aead raead;
-	struct old_aead_alg *aead = &alg->cra_aead;
-
-	strncpy(raead.type, "aead", sizeof(raead.type));
-	strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv));
-
-	raead.blocksize = alg->cra_blocksize;
-	raead.maxauthsize = aead->maxauthsize;
-	raead.ivsize = aead->ivsize;
-
-	if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
-		    sizeof(struct crypto_report_aead), &raead))
-		goto nla_put_failure;
-	return 0;
-
-nla_put_failure:
-	return -EMSGSIZE;
-}
-#else
-static int crypto_old_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-	return -ENOSYS;
-}
-#endif
-
-static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg)
-	__attribute__ ((unused));
-static void crypto_old_aead_show(struct seq_file *m, struct crypto_alg *alg)
-{
-	struct old_aead_alg *aead = &alg->cra_aead;
-
-	seq_printf(m, "type         : aead\n");
-	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
-					     "yes" : "no");
-	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
-	seq_printf(m, "ivsize       : %u\n", aead->ivsize);
-	seq_printf(m, "maxauthsize  : %u\n", aead->maxauthsize);
-	seq_printf(m, "geniv        : %s\n", aead->geniv ?: "<built-in>");
-}
-
-const struct crypto_type crypto_aead_type = {
-	.extsize = crypto_alg_extsize,
-	.init_tfm = crypto_aead_init_tfm,
-#ifdef CONFIG_PROC_FS
-	.show = crypto_old_aead_show,
-#endif
-	.report = crypto_old_aead_report,
-	.lookup = crypto_lookup_aead,
-	.maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV),
-	.maskset = CRYPTO_ALG_TYPE_MASK,
-	.type = CRYPTO_ALG_TYPE_AEAD,
-	.tfmsize = offsetof(struct crypto_aead, base),
-};
-EXPORT_SYMBOL_GPL(crypto_aead_type);
-
-#ifdef CONFIG_NET
 static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
 {
 	struct crypto_report_aead raead;
@@ -307,9 +146,22 @@
 	seq_printf(m, "geniv        : <none>\n");
 }
 
-static const struct crypto_type crypto_new_aead_type = {
+static void crypto_aead_free_instance(struct crypto_instance *inst)
+{
+	struct aead_instance *aead = aead_instance(inst);
+
+	if (!aead->free) {
+		inst->tmpl->free(inst);
+		return;
+	}
+
+	aead->free(aead);
+}
+
+static const struct crypto_type crypto_aead_type = {
 	.extsize = crypto_alg_extsize,
 	.init_tfm = crypto_aead_init_tfm,
+	.free = crypto_aead_free_instance,
 #ifdef CONFIG_PROC_FS
 	.show = crypto_aead_show,
 #endif
@@ -320,81 +172,6 @@
 	.tfmsize = offsetof(struct crypto_aead, base),
 };
 
-static int aead_null_givencrypt(struct aead_givcrypt_request *req)
-{
-	return crypto_aead_encrypt(&req->areq);
-}
-
-static int aead_null_givdecrypt(struct aead_givcrypt_request *req)
-{
-	return crypto_aead_decrypt(&req->areq);
-}
-
-#ifdef CONFIG_NET
-static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-	struct crypto_report_aead raead;
-	struct old_aead_alg *aead = &alg->cra_aead;
-
-	strncpy(raead.type, "nivaead", sizeof(raead.type));
-	strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv));
-
-	raead.blocksize = alg->cra_blocksize;
-	raead.maxauthsize = aead->maxauthsize;
-	raead.ivsize = aead->ivsize;
-
-	if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
-		    sizeof(struct crypto_report_aead), &raead))
-		goto nla_put_failure;
-	return 0;
-
-nla_put_failure:
-	return -EMSGSIZE;
-}
-#else
-static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-	return -ENOSYS;
-}
-#endif
-
-
-static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
-	__attribute__ ((unused));
-static void crypto_nivaead_show(struct seq_file *m, struct crypto_alg *alg)
-{
-	struct old_aead_alg *aead = &alg->cra_aead;
-
-	seq_printf(m, "type         : nivaead\n");
-	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
-					     "yes" : "no");
-	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
-	seq_printf(m, "ivsize       : %u\n", aead->ivsize);
-	seq_printf(m, "maxauthsize  : %u\n", aead->maxauthsize);
-	seq_printf(m, "geniv        : %s\n", aead->geniv);
-}
-
-const struct crypto_type crypto_nivaead_type = {
-	.extsize = crypto_alg_extsize,
-	.init_tfm = crypto_aead_init_tfm,
-#ifdef CONFIG_PROC_FS
-	.show = crypto_nivaead_show,
-#endif
-	.report = crypto_nivaead_report,
-	.maskclear = ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV),
-	.maskset = CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV,
-	.type = CRYPTO_ALG_TYPE_AEAD,
-	.tfmsize = offsetof(struct crypto_aead, base),
-};
-EXPORT_SYMBOL_GPL(crypto_nivaead_type);
-
-static int crypto_grab_nivaead(struct crypto_aead_spawn *spawn,
-			       const char *name, u32 type, u32 mask)
-{
-	spawn->base.frontend = &crypto_nivaead_type;
-	return crypto_grab_spawn(&spawn->base, name, type, mask);
-}
-
 static int aead_geniv_setkey(struct crypto_aead *tfm,
 			     const u8 *key, unsigned int keylen)
 {
@@ -411,169 +188,6 @@
 	return crypto_aead_setauthsize(ctx->child, authsize);
 }
 
-static void compat_encrypt_complete2(struct aead_request *req, int err)
-{
-	struct compat_request_ctx *rctx = aead_request_ctx(req);
-	struct aead_givcrypt_request *subreq = &rctx->subreq;
-	struct crypto_aead *geniv;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	if (err)
-		goto out;
-
-	geniv = crypto_aead_reqtfm(req);
-	scatterwalk_map_and_copy(subreq->giv, rctx->ivsg, 0,
-				 crypto_aead_ivsize(geniv), 1);
-
-out:
-	kzfree(subreq->giv);
-}
-
-static void compat_encrypt_complete(struct crypto_async_request *base, int err)
-{
-	struct aead_request *req = base->data;
-
-	compat_encrypt_complete2(req, err);
-	aead_request_complete(req, err);
-}
-
-static int compat_encrypt(struct aead_request *req)
-{
-	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-	struct compat_request_ctx *rctx = aead_request_ctx(req);
-	struct aead_givcrypt_request *subreq = &rctx->subreq;
-	unsigned int ivsize = crypto_aead_ivsize(geniv);
-	struct scatterlist *src, *dst;
-	crypto_completion_t compl;
-	void *data;
-	u8 *info;
-	__be64 seq;
-	int err;
-
-	if (req->cryptlen < ivsize)
-		return -EINVAL;
-
-	compl = req->base.complete;
-	data = req->base.data;
-
-	rctx->ivsg = scatterwalk_ffwd(rctx->ivbuf, req->dst, req->assoclen);
-	info = PageHighMem(sg_page(rctx->ivsg)) ? NULL : sg_virt(rctx->ivsg);
-
-	if (!info) {
-		info = kmalloc(ivsize, req->base.flags &
-				       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
-								  GFP_ATOMIC);
-		if (!info)
-			return -ENOMEM;
-
-		compl = compat_encrypt_complete;
-		data = req;
-	}
-
-	memcpy(&seq, req->iv + ivsize - sizeof(seq), sizeof(seq));
-
-	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize);
-	dst = req->src == req->dst ?
-	      src : scatterwalk_ffwd(rctx->dst, rctx->ivsg, ivsize);
-
-	aead_givcrypt_set_tfm(subreq, ctx->child);
-	aead_givcrypt_set_callback(subreq, req->base.flags,
-				   req->base.complete, req->base.data);
-	aead_givcrypt_set_crypt(subreq, src, dst,
-				req->cryptlen - ivsize, req->iv);
-	aead_givcrypt_set_assoc(subreq, req->src, req->assoclen);
-	aead_givcrypt_set_giv(subreq, info, be64_to_cpu(seq));
-
-	err = crypto_aead_givencrypt(subreq);
-	if (unlikely(PageHighMem(sg_page(rctx->ivsg))))
-		compat_encrypt_complete2(req, err);
-	return err;
-}
-
-static int compat_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-	struct compat_request_ctx *rctx = aead_request_ctx(req);
-	struct aead_request *subreq = &rctx->subreq.areq;
-	unsigned int ivsize = crypto_aead_ivsize(geniv);
-	struct scatterlist *src, *dst;
-	crypto_completion_t compl;
-	void *data;
-
-	if (req->cryptlen < ivsize)
-		return -EINVAL;
-
-	aead_request_set_tfm(subreq, ctx->child);
-
-	compl = req->base.complete;
-	data = req->base.data;
-
-	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen + ivsize);
-	dst = req->src == req->dst ?
-	      src : scatterwalk_ffwd(rctx->dst, req->dst,
-				     req->assoclen + ivsize);
-
-	aead_request_set_callback(subreq, req->base.flags, compl, data);
-	aead_request_set_crypt(subreq, src, dst,
-			       req->cryptlen - ivsize, req->iv);
-	aead_request_set_assoc(subreq, req->src, req->assoclen);
-
-	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
-
-	return crypto_aead_decrypt(subreq);
-}
-
-static int compat_encrypt_first(struct aead_request *req)
-{
-	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-	int err = 0;
-
-	spin_lock_bh(&ctx->lock);
-	if (geniv->encrypt != compat_encrypt_first)
-		goto unlock;
-
-	geniv->encrypt = compat_encrypt;
-
-unlock:
-	spin_unlock_bh(&ctx->lock);
-
-	if (err)
-		return err;
-
-	return compat_encrypt(req);
-}
-
-static int aead_geniv_init_compat(struct crypto_tfm *tfm)
-{
-	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-	int err;
-
-	spin_lock_init(&ctx->lock);
-
-	crypto_aead_set_reqsize(geniv, sizeof(struct compat_request_ctx));
-
-	err = aead_geniv_init(tfm);
-
-	ctx->child = geniv->child;
-	geniv->child = geniv;
-
-	return err;
-}
-
-static void aead_geniv_exit_compat(struct crypto_tfm *tfm)
-{
-	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
-
-	crypto_free_aead(ctx->child);
-}
-
 struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
 				       struct rtattr **tb, u32 type, u32 mask)
 {
@@ -590,8 +204,7 @@
 	if (IS_ERR(algt))
 		return ERR_CAST(algt);
 
-	if ((algt->type ^ (CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV)) &
-	    algt->mask)
+	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
 		return ERR_PTR(-EINVAL);
 
 	name = crypto_attr_alg_name(tb[1]);
@@ -608,9 +221,7 @@
 	mask |= crypto_requires_sync(algt->type, algt->mask);
 
 	crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
-	err = (algt->mask & CRYPTO_ALG_GENIV) ?
-	      crypto_grab_nivaead(spawn, name, type, mask) :
-	      crypto_grab_aead(spawn, name, type, mask);
+	err = crypto_grab_aead(spawn, name, type, mask);
 	if (err)
 		goto err_free_inst;
 
@@ -623,43 +234,6 @@
 	if (ivsize < sizeof(u64))
 		goto err_drop_alg;
 
-	/*
-	 * This is only true if we're constructing an algorithm with its
-	 * default IV generator.  For the default generator we elide the
-	 * template name and double-check the IV generator.
-	 */
-	if (algt->mask & CRYPTO_ALG_GENIV) {
-		if (!alg->base.cra_aead.encrypt)
-			goto err_drop_alg;
-		if (strcmp(tmpl->name, alg->base.cra_aead.geniv))
-			goto err_drop_alg;
-
-		memcpy(inst->alg.base.cra_name, alg->base.cra_name,
-		       CRYPTO_MAX_ALG_NAME);
-		memcpy(inst->alg.base.cra_driver_name,
-		       alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME);
-
-		inst->alg.base.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					   CRYPTO_ALG_GENIV;
-		inst->alg.base.cra_flags |= alg->base.cra_flags &
-					    CRYPTO_ALG_ASYNC;
-		inst->alg.base.cra_priority = alg->base.cra_priority;
-		inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
-		inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
-		inst->alg.base.cra_type = &crypto_aead_type;
-
-		inst->alg.base.cra_aead.ivsize = ivsize;
-		inst->alg.base.cra_aead.maxauthsize = maxauthsize;
-
-		inst->alg.base.cra_aead.setkey = alg->base.cra_aead.setkey;
-		inst->alg.base.cra_aead.setauthsize =
-			alg->base.cra_aead.setauthsize;
-		inst->alg.base.cra_aead.encrypt = alg->base.cra_aead.encrypt;
-		inst->alg.base.cra_aead.decrypt = alg->base.cra_aead.decrypt;
-
-		goto out;
-	}
-
 	err = -ENAMETOOLONG;
 	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
 		     "%s(%s)", tmpl->name, alg->base.cra_name) >=
@@ -682,12 +256,6 @@
 	inst->alg.ivsize = ivsize;
 	inst->alg.maxauthsize = maxauthsize;
 
-	inst->alg.encrypt = compat_encrypt_first;
-	inst->alg.decrypt = compat_decrypt;
-
-	inst->alg.base.cra_init = aead_geniv_init_compat;
-	inst->alg.base.cra_exit = aead_geniv_exit_compat;
-
 out:
 	return inst;
 
@@ -707,147 +275,58 @@
 }
 EXPORT_SYMBOL_GPL(aead_geniv_free);
 
-int aead_geniv_init(struct crypto_tfm *tfm)
+int aead_init_geniv(struct crypto_aead *aead)
 {
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
+	struct aead_geniv_ctx *ctx = crypto_aead_ctx(aead);
+	struct aead_instance *inst = aead_alg_instance(aead);
 	struct crypto_aead *child;
-	struct crypto_aead *aead;
-
-	aead = __crypto_aead_cast(tfm);
-
-	child = crypto_spawn_aead(crypto_instance_ctx(inst));
-	if (IS_ERR(child))
-		return PTR_ERR(child);
-
-	aead->child = child;
-	aead->reqsize += crypto_aead_reqsize(child);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(aead_geniv_init);
-
-void aead_geniv_exit(struct crypto_tfm *tfm)
-{
-	crypto_free_aead(__crypto_aead_cast(tfm)->child);
-}
-EXPORT_SYMBOL_GPL(aead_geniv_exit);
-
-static int crypto_nivaead_default(struct crypto_alg *alg, u32 type, u32 mask)
-{
-	struct rtattr *tb[3];
-	struct {
-		struct rtattr attr;
-		struct crypto_attr_type data;
-	} ptype;
-	struct {
-		struct rtattr attr;
-		struct crypto_attr_alg data;
-	} palg;
-	struct crypto_template *tmpl;
-	struct crypto_instance *inst;
-	struct crypto_alg *larval;
-	const char *geniv;
 	int err;
 
-	larval = crypto_larval_lookup(alg->cra_driver_name,
-				      CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_GENIV,
-				      CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-	err = PTR_ERR(larval);
-	if (IS_ERR(larval))
+	spin_lock_init(&ctx->lock);
+
+	err = crypto_get_default_rng();
+	if (err)
 		goto out;
 
-	err = -EAGAIN;
-	if (!crypto_is_larval(larval))
-		goto drop_larval;
+	err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
+				   crypto_aead_ivsize(aead));
+	crypto_put_default_rng();
+	if (err)
+		goto out;
 
-	ptype.attr.rta_len = sizeof(ptype);
-	ptype.attr.rta_type = CRYPTOA_TYPE;
-	ptype.data.type = type | CRYPTO_ALG_GENIV;
-	/* GENIV tells the template that we're making a default geniv. */
-	ptype.data.mask = mask | CRYPTO_ALG_GENIV;
-	tb[0] = &ptype.attr;
+	ctx->null = crypto_get_default_null_skcipher();
+	err = PTR_ERR(ctx->null);
+	if (IS_ERR(ctx->null))
+		goto out;
 
-	palg.attr.rta_len = sizeof(palg);
-	palg.attr.rta_type = CRYPTOA_ALG;
-	/* Must use the exact name to locate ourselves. */
-	memcpy(palg.data.name, alg->cra_driver_name, CRYPTO_MAX_ALG_NAME);
-	tb[1] = &palg.attr;
+	child = crypto_spawn_aead(aead_instance_ctx(inst));
+	err = PTR_ERR(child);
+	if (IS_ERR(child))
+		goto drop_null;
 
-	tb[2] = NULL;
+	ctx->child = child;
+	crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) +
+				      sizeof(struct aead_request));
 
-	geniv = alg->cra_aead.geniv;
+	err = 0;
 
-	tmpl = crypto_lookup_template(geniv);
-	err = -ENOENT;
-	if (!tmpl)
-		goto kill_larval;
-
-	if (tmpl->create) {
-		err = tmpl->create(tmpl, tb);
-		if (err)
-			goto put_tmpl;
-		goto ok;
-	}
-
-	inst = tmpl->alloc(tb);
-	err = PTR_ERR(inst);
-	if (IS_ERR(inst))
-		goto put_tmpl;
-
-	err = crypto_register_instance(tmpl, inst);
-	if (err) {
-		tmpl->free(inst);
-		goto put_tmpl;
-	}
-
-ok:
-	/* Redo the lookup to use the instance we just registered. */
-	err = -EAGAIN;
-
-put_tmpl:
-	crypto_tmpl_put(tmpl);
-kill_larval:
-	crypto_larval_kill(larval);
-drop_larval:
-	crypto_mod_put(larval);
 out:
-	crypto_mod_put(alg);
 	return err;
-}
 
-struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask)
+drop_null:
+	crypto_put_default_null_skcipher();
+	goto out;
+}
+EXPORT_SYMBOL_GPL(aead_init_geniv);
+
+void aead_exit_geniv(struct crypto_aead *tfm)
 {
-	struct crypto_alg *alg;
+	struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
 
-	alg = crypto_alg_mod_lookup(name, type, mask);
-	if (IS_ERR(alg))
-		return alg;
-
-	if (alg->cra_type == &crypto_aead_type)
-		return alg;
-
-	if (!alg->cra_aead.ivsize)
-		return alg;
-
-	crypto_mod_put(alg);
-	alg = crypto_alg_mod_lookup(name, type | CRYPTO_ALG_TESTED,
-				    mask & ~CRYPTO_ALG_TESTED);
-	if (IS_ERR(alg))
-		return alg;
-
-	if (alg->cra_type == &crypto_aead_type) {
-		if (~alg->cra_flags & (type ^ ~mask) & CRYPTO_ALG_TESTED) {
-			crypto_mod_put(alg);
-			alg = ERR_PTR(-ENOENT);
-		}
-		return alg;
-	}
-
-	BUG_ON(!alg->cra_aead.ivsize);
-
-	return ERR_PTR(crypto_nivaead_default(alg, type, mask));
+	crypto_free_aead(ctx->child);
+	crypto_put_default_null_skcipher();
 }
-EXPORT_SYMBOL_GPL(crypto_lookup_aead);
+EXPORT_SYMBOL_GPL(aead_exit_geniv);
 
 int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
 		     u32 type, u32 mask)
@@ -870,7 +349,7 @@
 	if (max(alg->maxauthsize, alg->ivsize) > PAGE_SIZE / 8)
 		return -EINVAL;
 
-	base->cra_type = &crypto_new_aead_type;
+	base->cra_type = &crypto_aead_type;
 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
 	base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
 
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 3c079b7..d130b41 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -67,12 +67,22 @@
 	return crypto_set_driver_name(alg);
 }
 
+static void crypto_free_instance(struct crypto_instance *inst)
+{
+	if (!inst->alg.cra_type->free) {
+		inst->tmpl->free(inst);
+		return;
+	}
+
+	inst->alg.cra_type->free(inst);
+}
+
 static void crypto_destroy_instance(struct crypto_alg *alg)
 {
 	struct crypto_instance *inst = (void *)alg;
 	struct crypto_template *tmpl = inst->tmpl;
 
-	tmpl->free(inst);
+	crypto_free_instance(inst);
 	crypto_tmpl_put(tmpl);
 }
 
@@ -481,7 +491,7 @@
 
 	hlist_for_each_entry_safe(inst, n, list, list) {
 		BUG_ON(atomic_read(&inst->alg.cra_refcnt) != 1);
-		tmpl->free(inst);
+		crypto_free_instance(inst);
 	}
 	crypto_remove_final(&users);
 }
@@ -892,7 +902,7 @@
 }
 EXPORT_SYMBOL_GPL(crypto_enqueue_request);
 
-void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset)
+struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
 {
 	struct list_head *request;
 
@@ -907,14 +917,7 @@
 	request = queue->list.next;
 	list_del(request);
 
-	return (char *)list_entry(request, struct crypto_async_request, list) -
-	       offset;
-}
-EXPORT_SYMBOL_GPL(__crypto_dequeue_request);
-
-struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
-{
-	return __crypto_dequeue_request(queue, 0);
+	return list_entry(request, struct crypto_async_request, list);
 }
 EXPORT_SYMBOL_GPL(crypto_dequeue_request);
 
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 76fc0b2..6e39d9c 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -248,13 +248,11 @@
 	type = alg->cra_flags;
 
 	/* This piece of crap needs to disappear into per-type test hooks. */
-	if ((!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
-	       CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
-	     ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
-	      CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
-					  alg->cra_ablkcipher.ivsize)) ||
-	    (!((type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) &&
-	     alg->cra_type == &crypto_nivaead_type && alg->cra_aead.ivsize))
+	if (!((type ^ CRYPTO_ALG_TYPE_BLKCIPHER) &
+	      CRYPTO_ALG_TYPE_BLKCIPHER_MASK) && !(type & CRYPTO_ALG_GENIV) &&
+	    ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+	     CRYPTO_ALG_TYPE_BLKCIPHER ? alg->cra_blkcipher.ivsize :
+					 alg->cra_ablkcipher.ivsize))
 		type |= CRYPTO_ALG_TESTED;
 
 	param->type = type;
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index e0408a4..0aa6fdf 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -90,6 +90,7 @@
 		put_page(sg_page(sg + i));
 		sg_assign_page(sg + i, NULL);
 	}
+	sg_init_table(sg, ALG_MAX_PAGES);
 	sgl->cur = 0;
 	ctx->used = 0;
 	ctx->more = 0;
@@ -514,8 +515,7 @@
 
 static void *aead_bind(const char *name, u32 type, u32 mask)
 {
-	return crypto_alloc_aead(name, type | CRYPTO_ALG_AEAD_NEW,
-				 mask | CRYPTO_ALG_AEAD_NEW);
+	return crypto_alloc_aead(name, type, mask);
 }
 
 static void aead_release(void *private)
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 9450752..af31a0e 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -145,7 +145,7 @@
 		sgl->cur = 0;
 
 		if (sg)
-			scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
+			sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
 
 		list_add_tail(&sgl->list, &ctx->tsgl);
 	}
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 3e85229..55a354d 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -1,7 +1,7 @@
 /*
  * Authenc: Simple AEAD wrapper for IPsec
  *
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -14,6 +14,7 @@
 #include <crypto/internal/hash.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/authenc.h>
+#include <crypto/null.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -23,26 +24,21 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
-typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags);
-
 struct authenc_instance_ctx {
 	struct crypto_ahash_spawn auth;
 	struct crypto_skcipher_spawn enc;
+	unsigned int reqoff;
 };
 
 struct crypto_authenc_ctx {
-	unsigned int reqoff;
 	struct crypto_ahash *auth;
 	struct crypto_ablkcipher *enc;
+	struct crypto_blkcipher *null;
 };
 
 struct authenc_request_ctx {
-	unsigned int cryptlen;
-	struct scatterlist *sg;
-	struct scatterlist asg[2];
-	struct scatterlist cipher[2];
-	crypto_completion_t complete;
-	crypto_completion_t update_complete;
+	struct scatterlist src[2];
+	struct scatterlist dst[2];
 	char tail[];
 };
 
@@ -119,189 +115,35 @@
 	goto out;
 }
 
-static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
-					    int err)
-{
-	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
-				areq_ctx->cryptlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		goto out;
-
-	scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-				 areq_ctx->cryptlen,
-				 crypto_aead_authsize(authenc), 1);
-
-out:
-	authenc_request_complete(req, err);
-}
-
 static void authenc_geniv_ahash_done(struct crypto_async_request *areq, int err)
 {
 	struct aead_request *req = areq->data;
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+	struct aead_instance *inst = aead_alg_instance(authenc);
+	struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
 
 	if (err)
 		goto out;
 
-	scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-				 areq_ctx->cryptlen,
+	scatterwalk_map_and_copy(ahreq->result, req->dst,
+				 req->assoclen + req->cryptlen,
 				 crypto_aead_authsize(authenc), 1);
 
 out:
 	aead_request_complete(req, err);
 }
 
-static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
-					     int err)
-{
-	u8 *ihash;
-	unsigned int authsize;
-	struct ablkcipher_request *abreq;
-	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-	unsigned int cryptlen = req->cryptlen;
-
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
-				areq_ctx->cryptlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		goto out;
-
-	authsize = crypto_aead_authsize(authenc);
-	cryptlen -= authsize;
-	ihash = ahreq->result + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-
-	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-	if (err)
-		goto out;
-
-	abreq = aead_request_ctx(req);
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-				     cryptlen, req->iv);
-
-	err = crypto_ablkcipher_decrypt(abreq);
-
-out:
-	authenc_request_complete(req, err);
-}
-
-static void authenc_verify_ahash_done(struct crypto_async_request *areq,
-				      int err)
-{
-	u8 *ihash;
-	unsigned int authsize;
-	struct ablkcipher_request *abreq;
-	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-	unsigned int cryptlen = req->cryptlen;
-
-	if (err)
-		goto out;
-
-	authsize = crypto_aead_authsize(authenc);
-	cryptlen -= authsize;
-	ihash = ahreq->result + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-
-	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-	if (err)
-		goto out;
-
-	abreq = aead_request_ctx(req);
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-				     cryptlen, req->iv);
-
-	err = crypto_ablkcipher_decrypt(abreq);
-
-out:
-	authenc_request_complete(req, err);
-}
-
-static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags)
+static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags)
 {
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+	struct aead_instance *inst = aead_alg_instance(authenc);
 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+	struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
 	struct crypto_ahash *auth = ctx->auth;
 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-	u8 *hash = areq_ctx->tail;
-	int err;
-
-	hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
-			    crypto_ahash_alignmask(auth) + 1);
-
-	ahash_request_set_tfm(ahreq, auth);
-
-	err = crypto_ahash_init(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-				   areq_ctx->update_complete, req);
-
-	err = crypto_ahash_update(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
-				areq_ctx->cryptlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	return hash;
-}
-
-static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags)
-{
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-	struct crypto_ahash *auth = ctx->auth;
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
 	u8 *hash = areq_ctx->tail;
 	int err;
 
@@ -309,66 +151,18 @@
 			   crypto_ahash_alignmask(auth) + 1);
 
 	ahash_request_set_tfm(ahreq, auth);
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, hash,
-				areq_ctx->cryptlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-				   areq_ctx->complete, req);
+	ahash_request_set_crypt(ahreq, req->dst, hash,
+				req->assoclen + req->cryptlen);
+	ahash_request_set_callback(ahreq, flags,
+				   authenc_geniv_ahash_done, req);
 
 	err = crypto_ahash_digest(ahreq);
 	if (err)
-		return ERR_PTR(err);
+		return err;
 
-	return hash;
-}
-
-static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
-				 unsigned int flags)
-{
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct scatterlist *dst = req->dst;
-	struct scatterlist *assoc = req->assoc;
-	struct scatterlist *cipher = areq_ctx->cipher;
-	struct scatterlist *asg = areq_ctx->asg;
-	unsigned int ivsize = crypto_aead_ivsize(authenc);
-	unsigned int cryptlen = req->cryptlen;
-	authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
-	struct page *dstp;
-	u8 *vdst;
-	u8 *hash;
-
-	dstp = sg_page(dst);
-	vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
-
-	if (ivsize) {
-		sg_init_table(cipher, 2);
-		sg_set_buf(cipher, iv, ivsize);
-		scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
-		dst = cipher;
-		cryptlen += ivsize;
-	}
-
-	if (req->assoclen && sg_is_last(assoc)) {
-		authenc_ahash_fn = crypto_authenc_ahash;
-		sg_init_table(asg, 2);
-		sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
-		scatterwalk_crypto_chain(asg, dst, 0, 2);
-		dst = asg;
-		cryptlen += req->assoclen;
-	}
-
-	areq_ctx->cryptlen = cryptlen;
-	areq_ctx->sg = dst;
-
-	areq_ctx->complete = authenc_geniv_ahash_done;
-	areq_ctx->update_complete = authenc_geniv_ahash_update_done;
-
-	hash = authenc_ahash_fn(req, flags);
-	if (IS_ERR(hash))
-		return PTR_ERR(hash);
-
-	scatterwalk_map_and_copy(hash, dst, cryptlen,
+	scatterwalk_map_and_copy(hash, req->dst, req->assoclen + req->cryptlen,
 				 crypto_aead_authsize(authenc), 1);
+
 	return 0;
 }
 
@@ -377,180 +171,155 @@
 {
 	struct aead_request *areq = req->data;
 
-	if (!err) {
-		struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
-		struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-		struct authenc_request_ctx *areq_ctx = aead_request_ctx(areq);
-		struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
-							    + ctx->reqoff);
-		u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(ctx->enc);
+	if (err)
+		goto out;
 
-		err = crypto_authenc_genicv(areq, iv, 0);
-	}
+	err = crypto_authenc_genicv(areq, 0);
 
+out:
 	authenc_request_complete(areq, err);
 }
 
+static int crypto_authenc_copy_assoc(struct aead_request *req)
+{
+	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+	struct blkcipher_desc desc = {
+		.tfm = ctx->null,
+	};
+
+	return crypto_blkcipher_encrypt(&desc, req->dst, req->src,
+					req->assoclen);
+}
+
 static int crypto_authenc_encrypt(struct aead_request *req)
 {
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+	struct aead_instance *inst = aead_alg_instance(authenc);
 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+	struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
 	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
 	struct crypto_ablkcipher *enc = ctx->enc;
-	struct scatterlist *dst = req->dst;
 	unsigned int cryptlen = req->cryptlen;
-	struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
-						    + ctx->reqoff);
-	u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
+	struct ablkcipher_request *abreq = (void *)(areq_ctx->tail +
+						    ictx->reqoff);
+	struct scatterlist *src, *dst;
 	int err;
 
+	sg_init_table(areq_ctx->src, 2);
+	src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
+	dst = src;
+
+	if (req->src != req->dst) {
+		err = crypto_authenc_copy_assoc(req);
+		if (err)
+			return err;
+
+		sg_init_table(areq_ctx->dst, 2);
+		dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+	}
+
 	ablkcipher_request_set_tfm(abreq, enc);
 	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
 					crypto_authenc_encrypt_done, req);
-	ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
-
-	memcpy(iv, req->iv, crypto_aead_ivsize(authenc));
+	ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv);
 
 	err = crypto_ablkcipher_encrypt(abreq);
 	if (err)
 		return err;
 
-	return crypto_authenc_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+	return crypto_authenc_genicv(req, aead_request_flags(req));
 }
 
-static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
-					   int err)
+static int crypto_authenc_decrypt_tail(struct aead_request *req,
+				       unsigned int flags)
 {
-	struct aead_request *areq = req->data;
-
-	if (!err) {
-		struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
-
-		err = crypto_authenc_genicv(areq, greq->giv, 0);
-	}
-
-	authenc_request_complete(areq, err);
-}
-
-static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req)
-{
-	struct crypto_aead *authenc = aead_givcrypt_reqtfm(req);
+	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+	struct aead_instance *inst = aead_alg_instance(authenc);
 	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-	struct aead_request *areq = &req->areq;
-	struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
-	u8 *iv = req->giv;
-	int err;
+	struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
+	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+	struct ablkcipher_request *abreq = (void *)(areq_ctx->tail +
+						    ictx->reqoff);
+	unsigned int authsize = crypto_aead_authsize(authenc);
+	u8 *ihash = ahreq->result + authsize;
+	struct scatterlist *src, *dst;
 
-	skcipher_givcrypt_set_tfm(greq, ctx->enc);
-	skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
-				       crypto_authenc_givencrypt_done, areq);
-	skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
-				    areq->iv);
-	skcipher_givcrypt_set_giv(greq, iv, req->seq);
+	scatterwalk_map_and_copy(ihash, req->src, ahreq->nbytes, authsize, 0);
 
-	err = crypto_skcipher_givencrypt(greq);
+	if (crypto_memneq(ihash, ahreq->result, authsize))
+		return -EBADMSG;
+
+	sg_init_table(areq_ctx->src, 2);
+	src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen);
+	dst = src;
+
+	if (req->src != req->dst) {
+		sg_init_table(areq_ctx->dst, 2);
+		dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+	}
+
+	ablkcipher_request_set_tfm(abreq, ctx->enc);
+	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
+					req->base.complete, req->base.data);
+	ablkcipher_request_set_crypt(abreq, src, dst,
+				     req->cryptlen - authsize, req->iv);
+
+	return crypto_ablkcipher_decrypt(abreq);
+}
+
+static void authenc_verify_ahash_done(struct crypto_async_request *areq,
+				      int err)
+{
+	struct aead_request *req = areq->data;
+
 	if (err)
-		return err;
+		goto out;
 
-	return crypto_authenc_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
-}
+	err = crypto_authenc_decrypt_tail(req, 0);
 
-static int crypto_authenc_verify(struct aead_request *req,
-				 authenc_ahash_t authenc_ahash_fn)
-{
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	u8 *ohash;
-	u8 *ihash;
-	unsigned int authsize;
-
-	areq_ctx->complete = authenc_verify_ahash_done;
-	areq_ctx->update_complete = authenc_verify_ahash_update_done;
-
-	ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP);
-	if (IS_ERR(ohash))
-		return PTR_ERR(ohash);
-
-	authsize = crypto_aead_authsize(authenc);
-	ihash = ohash + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-	return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
-}
-
-static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
-				  unsigned int cryptlen)
-{
-	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct scatterlist *src = req->src;
-	struct scatterlist *assoc = req->assoc;
-	struct scatterlist *cipher = areq_ctx->cipher;
-	struct scatterlist *asg = areq_ctx->asg;
-	unsigned int ivsize = crypto_aead_ivsize(authenc);
-	authenc_ahash_t authenc_ahash_fn = crypto_authenc_ahash_fb;
-	struct page *srcp;
-	u8 *vsrc;
-
-	srcp = sg_page(src);
-	vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
-
-	if (ivsize) {
-		sg_init_table(cipher, 2);
-		sg_set_buf(cipher, iv, ivsize);
-		scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
-		src = cipher;
-		cryptlen += ivsize;
-	}
-
-	if (req->assoclen && sg_is_last(assoc)) {
-		authenc_ahash_fn = crypto_authenc_ahash;
-		sg_init_table(asg, 2);
-		sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
-		scatterwalk_crypto_chain(asg, src, 0, 2);
-		src = asg;
-		cryptlen += req->assoclen;
-	}
-
-	areq_ctx->cryptlen = cryptlen;
-	areq_ctx->sg = src;
-
-	return crypto_authenc_verify(req, authenc_ahash_fn);
+out:
+	authenc_request_complete(req, err);
 }
 
 static int crypto_authenc_decrypt(struct aead_request *req)
 {
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-	struct ablkcipher_request *abreq = aead_request_ctx(req);
-	unsigned int cryptlen = req->cryptlen;
 	unsigned int authsize = crypto_aead_authsize(authenc);
-	u8 *iv = req->iv;
+	struct aead_instance *inst = aead_alg_instance(authenc);
+	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+	struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
+	struct crypto_ahash *auth = ctx->auth;
+	struct authenc_request_ctx *areq_ctx = aead_request_ctx(req);
+	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff);
+	u8 *hash = areq_ctx->tail;
 	int err;
 
-	if (cryptlen < authsize)
-		return -EINVAL;
-	cryptlen -= authsize;
+	hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
+			   crypto_ahash_alignmask(auth) + 1);
 
-	err = crypto_authenc_iverify(req, iv, cryptlen);
+	ahash_request_set_tfm(ahreq, auth);
+	ahash_request_set_crypt(ahreq, req->src, hash,
+				req->assoclen + req->cryptlen - authsize);
+	ahash_request_set_callback(ahreq, aead_request_flags(req),
+				   authenc_verify_ahash_done, req);
+
+	err = crypto_ahash_digest(ahreq);
 	if (err)
 		return err;
 
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
-
-	return crypto_ablkcipher_decrypt(abreq);
+	return crypto_authenc_decrypt_tail(req, aead_request_flags(req));
 }
 
-static int crypto_authenc_init_tfm(struct crypto_tfm *tfm)
+static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
 {
-	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
-	struct authenc_instance_ctx *ictx = crypto_instance_ctx(inst);
-	struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aead_instance *inst = aead_alg_instance(tfm);
+	struct authenc_instance_ctx *ictx = aead_instance_ctx(inst);
+	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
 	struct crypto_ahash *auth;
 	struct crypto_ablkcipher *enc;
+	struct crypto_blkcipher *null;
 	int err;
 
 	auth = crypto_spawn_ahash(&ictx->auth);
@@ -562,42 +331,57 @@
 	if (IS_ERR(enc))
 		goto err_free_ahash;
 
+	null = crypto_get_default_null_skcipher();
+	err = PTR_ERR(null);
+	if (IS_ERR(null))
+		goto err_free_skcipher;
+
 	ctx->auth = auth;
 	ctx->enc = enc;
+	ctx->null = null;
 
-	ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
-			    crypto_ahash_alignmask(auth),
-			    crypto_ahash_alignmask(auth) + 1) +
-		      crypto_ablkcipher_ivsize(enc);
-
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+	crypto_aead_set_reqsize(
+		tfm,
 		sizeof(struct authenc_request_ctx) +
-		ctx->reqoff +
+		ictx->reqoff +
 		max_t(unsigned int,
-			crypto_ahash_reqsize(auth) +
-			sizeof(struct ahash_request),
-			sizeof(struct skcipher_givcrypt_request) +
-			crypto_ablkcipher_reqsize(enc)));
+		      crypto_ahash_reqsize(auth) +
+		      sizeof(struct ahash_request),
+		      sizeof(struct ablkcipher_request) +
+		      crypto_ablkcipher_reqsize(enc)));
 
 	return 0;
 
+err_free_skcipher:
+	crypto_free_ablkcipher(enc);
 err_free_ahash:
 	crypto_free_ahash(auth);
 	return err;
 }
 
-static void crypto_authenc_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
 {
-	struct crypto_authenc_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
 
 	crypto_free_ahash(ctx->auth);
 	crypto_free_ablkcipher(ctx->enc);
+	crypto_put_default_null_skcipher();
 }
 
-static struct crypto_instance *crypto_authenc_alloc(struct rtattr **tb)
+static void crypto_authenc_free(struct aead_instance *inst)
+{
+	struct authenc_instance_ctx *ctx = aead_instance_ctx(inst);
+
+	crypto_drop_skcipher(&ctx->enc);
+	crypto_drop_ahash(&ctx->auth);
+	kfree(inst);
+}
+
+static int crypto_authenc_create(struct crypto_template *tmpl,
+				 struct rtattr **tb)
 {
 	struct crypto_attr_type *algt;
-	struct crypto_instance *inst;
+	struct aead_instance *inst;
 	struct hash_alg_common *auth;
 	struct crypto_alg *auth_base;
 	struct crypto_alg *enc;
@@ -607,15 +391,15 @@
 
 	algt = crypto_get_attr_type(tb);
 	if (IS_ERR(algt))
-		return ERR_CAST(algt);
+		return PTR_ERR(algt);
 
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
 			       CRYPTO_ALG_TYPE_AHASH_MASK);
 	if (IS_ERR(auth))
-		return ERR_CAST(auth);
+		return PTR_ERR(auth);
 
 	auth_base = &auth->base;
 
@@ -629,13 +413,14 @@
 	if (!inst)
 		goto out_put_auth;
 
-	ctx = crypto_instance_ctx(inst);
+	ctx = aead_instance_ctx(inst);
 
-	err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
+	err = crypto_init_ahash_spawn(&ctx->auth, auth,
+				      aead_crypto_instance(inst));
 	if (err)
 		goto err_free_inst;
 
-	crypto_set_skcipher_spawn(&ctx->enc, inst);
+	crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
 	err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
 				   crypto_requires_sync(algt->type,
 							algt->mask));
@@ -644,41 +429,47 @@
 
 	enc = crypto_skcipher_spawn_alg(&ctx->enc);
 
+	ctx->reqoff = ALIGN(2 * auth->digestsize + auth_base->cra_alignmask,
+			    auth_base->cra_alignmask + 1);
+
 	err = -ENAMETOOLONG;
-	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
 		     "authenc(%s,%s)", auth_base->cra_name, enc->cra_name) >=
 	    CRYPTO_MAX_ALG_NAME)
 		goto err_drop_enc;
 
-	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "authenc(%s,%s)", auth_base->cra_driver_name,
 		     enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_drop_enc;
 
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-	inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
-	inst->alg.cra_priority = enc->cra_priority *
-				 10 + auth_base->cra_priority;
-	inst->alg.cra_blocksize = enc->cra_blocksize;
-	inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
-	inst->alg.cra_type = &crypto_aead_type;
+	inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_priority = enc->cra_priority * 10 +
+				      auth_base->cra_priority;
+	inst->alg.base.cra_blocksize = enc->cra_blocksize;
+	inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
+				       enc->cra_alignmask;
+	inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
 
-	inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
-	inst->alg.cra_aead.maxauthsize = auth->digestsize;
+	inst->alg.ivsize = enc->cra_ablkcipher.ivsize;
+	inst->alg.maxauthsize = auth->digestsize;
 
-	inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_ctx);
+	inst->alg.init = crypto_authenc_init_tfm;
+	inst->alg.exit = crypto_authenc_exit_tfm;
 
-	inst->alg.cra_init = crypto_authenc_init_tfm;
-	inst->alg.cra_exit = crypto_authenc_exit_tfm;
+	inst->alg.setkey = crypto_authenc_setkey;
+	inst->alg.encrypt = crypto_authenc_encrypt;
+	inst->alg.decrypt = crypto_authenc_decrypt;
 
-	inst->alg.cra_aead.setkey = crypto_authenc_setkey;
-	inst->alg.cra_aead.encrypt = crypto_authenc_encrypt;
-	inst->alg.cra_aead.decrypt = crypto_authenc_decrypt;
-	inst->alg.cra_aead.givencrypt = crypto_authenc_givencrypt;
+	inst->free = crypto_authenc_free;
+
+	err = aead_register_instance(tmpl, inst);
+	if (err)
+		goto err_drop_enc;
 
 out:
 	crypto_mod_put(auth_base);
-	return inst;
+	return err;
 
 err_drop_enc:
 	crypto_drop_skcipher(&ctx->enc);
@@ -687,23 +478,12 @@
 err_free_inst:
 	kfree(inst);
 out_put_auth:
-	inst = ERR_PTR(err);
 	goto out;
 }
 
-static void crypto_authenc_free(struct crypto_instance *inst)
-{
-	struct authenc_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-	crypto_drop_skcipher(&ctx->enc);
-	crypto_drop_ahash(&ctx->auth);
-	kfree(inst);
-}
-
 static struct crypto_template crypto_authenc_tmpl = {
 	.name = "authenc",
-	.alloc = crypto_authenc_alloc,
-	.free = crypto_authenc_free,
+	.create = crypto_authenc_create,
 	.module = THIS_MODULE,
 };
 
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index b8efe36..0c04688 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -4,6 +4,7 @@
  *
  * Copyright (C) 2010 secunet Security Networks AG
  * Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com>
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -16,6 +17,7 @@
 #include <crypto/internal/hash.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/authenc.h>
+#include <crypto/null.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -34,19 +36,12 @@
 	unsigned int reqoff;
 	struct crypto_ahash *auth;
 	struct crypto_ablkcipher *enc;
+	struct crypto_blkcipher *null;
 };
 
 struct authenc_esn_request_ctx {
-	unsigned int cryptlen;
-	unsigned int headlen;
-	unsigned int trailen;
-	struct scatterlist *sg;
-	struct scatterlist hsg[2];
-	struct scatterlist tsg[1];
-	struct scatterlist cipher[2];
-	crypto_completion_t complete;
-	crypto_completion_t update_complete;
-	crypto_completion_t update_complete2;
+	struct scatterlist src[2];
+	struct scatterlist dst[2];
 	char tail[];
 };
 
@@ -56,6 +51,15 @@
 		aead_request_complete(req, err);
 }
 
+static int crypto_authenc_esn_setauthsize(struct crypto_aead *authenc_esn,
+					  unsigned int authsize)
+{
+	if (authsize > 0 && authsize < 4)
+		return -EINVAL;
+
+	return 0;
+}
+
 static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
 				     unsigned int keylen)
 {
@@ -93,349 +97,73 @@
 	goto out;
 }
 
-static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq,
-						int err)
+static int crypto_authenc_esn_genicv_tail(struct aead_request *req,
+					  unsigned int flags)
 {
-	struct aead_request *req = areq->data;
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
 	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+	struct crypto_ahash *auth = ctx->auth;
+	u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
+			     crypto_ahash_alignmask(auth) + 1);
+	unsigned int authsize = crypto_aead_authsize(authenc_esn);
+	unsigned int assoclen = req->assoclen;
+	unsigned int cryptlen = req->cryptlen;
+	struct scatterlist *dst = req->dst;
+	u32 tmp[2];
 
-	if (err)
-		goto out;
+	/* Move high-order bits of sequence number back. */
+	scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
+	scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
+	scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
 
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
-				areq_ctx->cryptlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->update_complete2, req);
-
-	err = crypto_ahash_update(ahreq);
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
-				areq_ctx->trailen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		goto out;
-
-	scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-				 areq_ctx->cryptlen,
-				 crypto_aead_authsize(authenc_esn), 1);
-
-out:
-	authenc_esn_request_complete(req, err);
+	scatterwalk_map_and_copy(hash, dst, assoclen + cryptlen, authsize, 1);
+	return 0;
 }
 
-static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq,
-						 int err)
-{
-	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
-				areq_ctx->trailen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		goto out;
-
-	scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-				 areq_ctx->cryptlen,
-				 crypto_aead_authsize(authenc_esn), 1);
-
-out:
-	authenc_esn_request_complete(req, err);
-}
-
-
 static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq,
 					 int err)
 {
 	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
 
-	if (err)
-		goto out;
-
-	scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
-				 areq_ctx->cryptlen,
-				 crypto_aead_authsize(authenc_esn), 1);
-
-out:
+	err = err ?: crypto_authenc_esn_genicv_tail(req, 0);
 	aead_request_complete(req, err);
 }
 
-
-static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq,
-						 int err)
-{
-	u8 *ihash;
-	unsigned int authsize;
-	struct ablkcipher_request *abreq;
-	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-	unsigned int cryptlen = req->cryptlen;
-
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
-				areq_ctx->cryptlen);
-
-	ahash_request_set_callback(ahreq,
-				   aead_request_flags(req) &
-				   CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->update_complete2, req);
-
-	err = crypto_ahash_update(ahreq);
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
-				areq_ctx->trailen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		goto out;
-
-	authsize = crypto_aead_authsize(authenc_esn);
-	cryptlen -= authsize;
-	ihash = ahreq->result + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-
-	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-	if (err)
-		goto out;
-
-	abreq = aead_request_ctx(req);
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-				     cryptlen, req->iv);
-
-	err = crypto_ablkcipher_decrypt(abreq);
-
-out:
-	authenc_esn_request_complete(req, err);
-}
-
-static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq,
-						  int err)
-{
-	u8 *ihash;
-	unsigned int authsize;
-	struct ablkcipher_request *abreq;
-	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-	unsigned int cryptlen = req->cryptlen;
-
-	if (err)
-		goto out;
-
-	ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
-				areq_ctx->trailen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) &
-					  CRYPTO_TFM_REQ_MAY_SLEEP,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		goto out;
-
-	authsize = crypto_aead_authsize(authenc_esn);
-	cryptlen -= authsize;
-	ihash = ahreq->result + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-
-	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-	if (err)
-		goto out;
-
-	abreq = aead_request_ctx(req);
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-				     cryptlen, req->iv);
-
-	err = crypto_ablkcipher_decrypt(abreq);
-
-out:
-	authenc_esn_request_complete(req, err);
-}
-
-
-static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
-					  int err)
-{
-	u8 *ihash;
-	unsigned int authsize;
-	struct ablkcipher_request *abreq;
-	struct aead_request *req = areq->data;
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-	unsigned int cryptlen = req->cryptlen;
-
-	if (err)
-		goto out;
-
-	authsize = crypto_aead_authsize(authenc_esn);
-	cryptlen -= authsize;
-	ihash = ahreq->result + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-
-	err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
-	if (err)
-		goto out;
-
-	abreq = aead_request_ctx(req);
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst,
-				     cryptlen, req->iv);
-
-	err = crypto_ablkcipher_decrypt(abreq);
-
-out:
-	authenc_esn_request_complete(req, err);
-}
-
-static u8 *crypto_authenc_esn_ahash(struct aead_request *req,
-				    unsigned int flags)
-{
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct crypto_ahash *auth = ctx->auth;
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
-	u8 *hash = areq_ctx->tail;
-	int err;
-
-	hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
-			    crypto_ahash_alignmask(auth) + 1);
-
-	ahash_request_set_tfm(ahreq, auth);
-
-	err = crypto_ahash_init(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	ahash_request_set_crypt(ahreq, areq_ctx->hsg, hash, areq_ctx->headlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-				   areq_ctx->update_complete, req);
-
-	err = crypto_ahash_update(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-				   areq_ctx->update_complete2, req);
-
-	err = crypto_ahash_update(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	ahash_request_set_crypt(ahreq, areq_ctx->tsg, hash,
-				areq_ctx->trailen);
-	ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
-				   areq_ctx->complete, req);
-
-	err = crypto_ahash_finup(ahreq);
-	if (err)
-		return ERR_PTR(err);
-
-	return hash;
-}
-
-static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
+static int crypto_authenc_esn_genicv(struct aead_request *req,
 				     unsigned int flags)
 {
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct scatterlist *dst = req->dst;
-	struct scatterlist *assoc = req->assoc;
-	struct scatterlist *cipher = areq_ctx->cipher;
-	struct scatterlist *hsg = areq_ctx->hsg;
-	struct scatterlist *tsg = areq_ctx->tsg;
-	unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
+	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+	struct crypto_ahash *auth = ctx->auth;
+	u8 *hash = PTR_ALIGN((u8 *)areq_ctx->tail,
+			     crypto_ahash_alignmask(auth) + 1);
+	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
+	unsigned int authsize = crypto_aead_authsize(authenc_esn);
+	unsigned int assoclen = req->assoclen;
 	unsigned int cryptlen = req->cryptlen;
-	struct page *dstp;
-	u8 *vdst;
-	u8 *hash;
+	struct scatterlist *dst = req->dst;
+	u32 tmp[2];
 
-	dstp = sg_page(dst);
-	vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
+	if (!authsize)
+		return 0;
 
-	if (ivsize) {
-		sg_init_table(cipher, 2);
-		sg_set_buf(cipher, iv, ivsize);
-		scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
-		dst = cipher;
-		cryptlen += ivsize;
-	}
+	/* Move high-order bits of sequence number to the end. */
+	scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
+	scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
+	scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
 
-	if (assoc->length < 12)
-		return -EINVAL;
+	sg_init_table(areq_ctx->dst, 2);
+	dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
 
-	sg_init_table(hsg, 2);
-	sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
-	sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
+	ahash_request_set_tfm(ahreq, auth);
+	ahash_request_set_crypt(ahreq, dst, hash, assoclen + cryptlen);
+	ahash_request_set_callback(ahreq, flags,
+				   authenc_esn_geniv_ahash_done, req);
 
-	sg_init_table(tsg, 1);
-	sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
-
-	areq_ctx->cryptlen = cryptlen;
-	areq_ctx->headlen = 8;
-	areq_ctx->trailen = 4;
-	areq_ctx->sg = dst;
-
-	areq_ctx->complete = authenc_esn_geniv_ahash_done;
-	areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done;
-	areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2;
-
-	hash = crypto_authenc_esn_ahash(req, flags);
-	if (IS_ERR(hash))
-		return PTR_ERR(hash);
-
-	scatterwalk_map_and_copy(hash, dst, cryptlen,
-				 crypto_aead_authsize(authenc_esn), 1);
-	return 0;
+	return crypto_ahash_digest(ahreq) ?:
+	       crypto_authenc_esn_genicv_tail(req, aead_request_flags(req));
 }
 
 
@@ -444,185 +172,167 @@
 {
 	struct aead_request *areq = req->data;
 
-	if (!err) {
-		struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq);
-		struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-		struct ablkcipher_request *abreq = aead_request_ctx(areq);
-		u8 *iv = (u8 *)(abreq + 1) +
-			 crypto_ablkcipher_reqsize(ctx->enc);
-
-		err = crypto_authenc_esn_genicv(areq, iv, 0);
-	}
+	if (!err)
+		err = crypto_authenc_esn_genicv(areq, 0);
 
 	authenc_esn_request_complete(areq, err);
 }
 
+static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
+{
+	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+	struct blkcipher_desc desc = {
+		.tfm = ctx->null,
+	};
+
+	return crypto_blkcipher_encrypt(&desc, req->dst, req->src, len);
+}
+
 static int crypto_authenc_esn_encrypt(struct aead_request *req)
 {
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
 	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct crypto_ablkcipher *enc = ctx->enc;
-	struct scatterlist *dst = req->dst;
-	unsigned int cryptlen = req->cryptlen;
+	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
 	struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
 						    + ctx->reqoff);
-	u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
+	struct crypto_ablkcipher *enc = ctx->enc;
+	unsigned int assoclen = req->assoclen;
+	unsigned int cryptlen = req->cryptlen;
+	struct scatterlist *src, *dst;
 	int err;
 
+	sg_init_table(areq_ctx->src, 2);
+	src = scatterwalk_ffwd(areq_ctx->src, req->src, assoclen);
+	dst = src;
+
+	if (req->src != req->dst) {
+		err = crypto_authenc_esn_copy(req, assoclen);
+		if (err)
+			return err;
+
+		sg_init_table(areq_ctx->dst, 2);
+		dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
+	}
+
 	ablkcipher_request_set_tfm(abreq, enc);
 	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
 					crypto_authenc_esn_encrypt_done, req);
-	ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
-
-	memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn));
+	ablkcipher_request_set_crypt(abreq, src, dst, cryptlen, req->iv);
 
 	err = crypto_ablkcipher_encrypt(abreq);
 	if (err)
 		return err;
 
-	return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+	return crypto_authenc_esn_genicv(req, aead_request_flags(req));
 }
 
-static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req,
-					       int err)
+static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
+					   unsigned int flags)
 {
-	struct aead_request *areq = req->data;
-
-	if (!err) {
-		struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
-
-		err = crypto_authenc_esn_genicv(areq, greq->giv, 0);
-	}
-
-	authenc_esn_request_complete(areq, err);
-}
-
-static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req)
-{
-	struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req);
+	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+	unsigned int authsize = crypto_aead_authsize(authenc_esn);
+	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
 	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct aead_request *areq = &req->areq;
-	struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
-	u8 *iv = req->giv;
-	int err;
+	struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+						    + ctx->reqoff);
+	struct crypto_ahash *auth = ctx->auth;
+	u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
+			      crypto_ahash_alignmask(auth) + 1);
+	unsigned int cryptlen = req->cryptlen - authsize;
+	unsigned int assoclen = req->assoclen;
+	struct scatterlist *dst = req->dst;
+	u8 *ihash = ohash + crypto_ahash_digestsize(auth);
+	u32 tmp[2];
 
-	skcipher_givcrypt_set_tfm(greq, ctx->enc);
-	skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
-				       crypto_authenc_esn_givencrypt_done, areq);
-	skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
-				    areq->iv);
-	skcipher_givcrypt_set_giv(greq, iv, req->seq);
+	/* Move high-order bits of sequence number back. */
+	scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
+	scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
+	scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
 
-	err = crypto_skcipher_givencrypt(greq);
-	if (err)
-		return err;
+	if (crypto_memneq(ihash, ohash, authsize))
+		return -EBADMSG;
 
-	return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
+	sg_init_table(areq_ctx->dst, 2);
+	dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
+
+	ablkcipher_request_set_tfm(abreq, ctx->enc);
+	ablkcipher_request_set_callback(abreq, flags,
+					req->base.complete, req->base.data);
+	ablkcipher_request_set_crypt(abreq, dst, dst, cryptlen, req->iv);
+
+	return crypto_ablkcipher_decrypt(abreq);
 }
 
-static int crypto_authenc_esn_verify(struct aead_request *req)
+static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
+					  int err)
 {
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	u8 *ohash;
-	u8 *ihash;
-	unsigned int authsize;
+	struct aead_request *req = areq->data;
 
-	areq_ctx->complete = authenc_esn_verify_ahash_done;
-	areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
-
-	ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP);
-	if (IS_ERR(ohash))
-		return PTR_ERR(ohash);
-
-	authsize = crypto_aead_authsize(authenc_esn);
-	ihash = ohash + authsize;
-	scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
-				 authsize, 0);
-	return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
-}
-
-static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
-				      unsigned int cryptlen)
-{
-	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
-	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
-	struct scatterlist *src = req->src;
-	struct scatterlist *assoc = req->assoc;
-	struct scatterlist *cipher = areq_ctx->cipher;
-	struct scatterlist *hsg = areq_ctx->hsg;
-	struct scatterlist *tsg = areq_ctx->tsg;
-	unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
-	struct page *srcp;
-	u8 *vsrc;
-
-	srcp = sg_page(src);
-	vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
-
-	if (ivsize) {
-		sg_init_table(cipher, 2);
-		sg_set_buf(cipher, iv, ivsize);
-		scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
-		src = cipher;
-		cryptlen += ivsize;
-	}
-
-	if (assoc->length < 12)
-		return -EINVAL;
-
-	sg_init_table(hsg, 2);
-	sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
-	sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
-
-	sg_init_table(tsg, 1);
-	sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
-
-	areq_ctx->cryptlen = cryptlen;
-	areq_ctx->headlen = 8;
-	areq_ctx->trailen = 4;
-	areq_ctx->sg = src;
-
-	areq_ctx->complete = authenc_esn_verify_ahash_done;
-	areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
-	areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2;
-
-	return crypto_authenc_esn_verify(req);
+	err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
+	aead_request_complete(req, err);
 }
 
 static int crypto_authenc_esn_decrypt(struct aead_request *req)
 {
 	struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+	struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
 	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
-	struct ablkcipher_request *abreq = aead_request_ctx(req);
-	unsigned int cryptlen = req->cryptlen;
+	struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
 	unsigned int authsize = crypto_aead_authsize(authenc_esn);
-	u8 *iv = req->iv;
+	struct crypto_ahash *auth = ctx->auth;
+	u8 *ohash = PTR_ALIGN((u8 *)areq_ctx->tail,
+			      crypto_ahash_alignmask(auth) + 1);
+	unsigned int assoclen = req->assoclen;
+	unsigned int cryptlen = req->cryptlen;
+	u8 *ihash = ohash + crypto_ahash_digestsize(auth);
+	struct scatterlist *dst = req->dst;
+	u32 tmp[2];
 	int err;
 
-	if (cryptlen < authsize)
-		return -EINVAL;
 	cryptlen -= authsize;
 
-	err = crypto_authenc_esn_iverify(req, iv, cryptlen);
+	if (req->src != dst) {
+		err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
+		if (err)
+			return err;
+	}
+
+	scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
+				 authsize, 0);
+
+	if (!authsize)
+		goto tail;
+
+	/* Move high-order bits of sequence number to the end. */
+	scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
+	scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
+	scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
+
+	sg_init_table(areq_ctx->dst, 2);
+	dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
+
+	ahash_request_set_tfm(ahreq, auth);
+	ahash_request_set_crypt(ahreq, dst, ohash, assoclen + cryptlen);
+	ahash_request_set_callback(ahreq, aead_request_flags(req),
+				   authenc_esn_verify_ahash_done, req);
+
+	err = crypto_ahash_digest(ahreq);
 	if (err)
 		return err;
 
-	ablkcipher_request_set_tfm(abreq, ctx->enc);
-	ablkcipher_request_set_callback(abreq, aead_request_flags(req),
-					req->base.complete, req->base.data);
-	ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
-
-	return crypto_ablkcipher_decrypt(abreq);
+tail:
+	return crypto_authenc_esn_decrypt_tail(req, aead_request_flags(req));
 }
 
-static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
+static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
 {
-	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
-	struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst);
-	struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aead_instance *inst = aead_alg_instance(tfm);
+	struct authenc_esn_instance_ctx *ictx = aead_instance_ctx(inst);
+	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
 	struct crypto_ahash *auth;
 	struct crypto_ablkcipher *enc;
+	struct crypto_blkcipher *null;
 	int err;
 
 	auth = crypto_spawn_ahash(&ictx->auth);
@@ -634,15 +344,20 @@
 	if (IS_ERR(enc))
 		goto err_free_ahash;
 
+	null = crypto_get_default_null_skcipher();
+	err = PTR_ERR(null);
+	if (IS_ERR(null))
+		goto err_free_skcipher;
+
 	ctx->auth = auth;
 	ctx->enc = enc;
+	ctx->null = null;
 
-	ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
-			    crypto_ahash_alignmask(auth),
-			    crypto_ahash_alignmask(auth) + 1) +
-		      crypto_ablkcipher_ivsize(enc);
+	ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth),
+			    crypto_ahash_alignmask(auth) + 1);
 
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+	crypto_aead_set_reqsize(
+		tfm,
 		sizeof(struct authenc_esn_request_ctx) +
 		ctx->reqoff +
 		max_t(unsigned int,
@@ -653,23 +368,36 @@
 
 	return 0;
 
+err_free_skcipher:
+	crypto_free_ablkcipher(enc);
 err_free_ahash:
 	crypto_free_ahash(auth);
 	return err;
 }
 
-static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
 {
-	struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
 
 	crypto_free_ahash(ctx->auth);
 	crypto_free_ablkcipher(ctx->enc);
+	crypto_put_default_null_skcipher();
 }
 
-static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
+static void crypto_authenc_esn_free(struct aead_instance *inst)
+{
+	struct authenc_esn_instance_ctx *ctx = aead_instance_ctx(inst);
+
+	crypto_drop_skcipher(&ctx->enc);
+	crypto_drop_ahash(&ctx->auth);
+	kfree(inst);
+}
+
+static int crypto_authenc_esn_create(struct crypto_template *tmpl,
+				     struct rtattr **tb)
 {
 	struct crypto_attr_type *algt;
-	struct crypto_instance *inst;
+	struct aead_instance *inst;
 	struct hash_alg_common *auth;
 	struct crypto_alg *auth_base;
 	struct crypto_alg *enc;
@@ -679,15 +407,15 @@
 
 	algt = crypto_get_attr_type(tb);
 	if (IS_ERR(algt))
-		return ERR_CAST(algt);
+		return PTR_ERR(algt);
 
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
 			       CRYPTO_ALG_TYPE_AHASH_MASK);
 	if (IS_ERR(auth))
-		return ERR_CAST(auth);
+		return PTR_ERR(auth);
 
 	auth_base = &auth->base;
 
@@ -701,13 +429,14 @@
 	if (!inst)
 		goto out_put_auth;
 
-	ctx = crypto_instance_ctx(inst);
+	ctx = aead_instance_ctx(inst);
 
-	err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
+	err = crypto_init_ahash_spawn(&ctx->auth, auth,
+				      aead_crypto_instance(inst));
 	if (err)
 		goto err_free_inst;
 
-	crypto_set_skcipher_spawn(&ctx->enc, inst);
+	crypto_set_skcipher_spawn(&ctx->enc, aead_crypto_instance(inst));
 	err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
 				   crypto_requires_sync(algt->type,
 							algt->mask));
@@ -717,40 +446,44 @@
 	enc = crypto_skcipher_spawn_alg(&ctx->enc);
 
 	err = -ENAMETOOLONG;
-	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
-		     "authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >=
-	    CRYPTO_MAX_ALG_NAME)
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+		     "authencesn(%s,%s)", auth_base->cra_name,
+		     enc->cra_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_drop_enc;
 
-	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "authencesn(%s,%s)", auth_base->cra_driver_name,
 		     enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_drop_enc;
 
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-	inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
-	inst->alg.cra_priority = enc->cra_priority *
-				 10 + auth_base->cra_priority;
-	inst->alg.cra_blocksize = enc->cra_blocksize;
-	inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
-	inst->alg.cra_type = &crypto_aead_type;
+	inst->alg.base.cra_flags = enc->cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_priority = enc->cra_priority * 10 +
+				      auth_base->cra_priority;
+	inst->alg.base.cra_blocksize = enc->cra_blocksize;
+	inst->alg.base.cra_alignmask = auth_base->cra_alignmask |
+				       enc->cra_alignmask;
+	inst->alg.base.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
 
-	inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
-	inst->alg.cra_aead.maxauthsize = auth->digestsize;
+	inst->alg.ivsize = enc->cra_ablkcipher.ivsize;
+	inst->alg.maxauthsize = auth->digestsize;
 
-	inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
+	inst->alg.init = crypto_authenc_esn_init_tfm;
+	inst->alg.exit = crypto_authenc_esn_exit_tfm;
 
-	inst->alg.cra_init = crypto_authenc_esn_init_tfm;
-	inst->alg.cra_exit = crypto_authenc_esn_exit_tfm;
+	inst->alg.setkey = crypto_authenc_esn_setkey;
+	inst->alg.setauthsize = crypto_authenc_esn_setauthsize;
+	inst->alg.encrypt = crypto_authenc_esn_encrypt;
+	inst->alg.decrypt = crypto_authenc_esn_decrypt;
 
-	inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey;
-	inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt;
-	inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt;
-	inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt;
+	inst->free = crypto_authenc_esn_free,
+
+	err = aead_register_instance(tmpl, inst);
+	if (err)
+		goto err_drop_enc;
 
 out:
 	crypto_mod_put(auth_base);
-	return inst;
+	return err;
 
 err_drop_enc:
 	crypto_drop_skcipher(&ctx->enc);
@@ -759,23 +492,12 @@
 err_free_inst:
 	kfree(inst);
 out_put_auth:
-	inst = ERR_PTR(err);
 	goto out;
 }
 
-static void crypto_authenc_esn_free(struct crypto_instance *inst)
-{
-	struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-	crypto_drop_skcipher(&ctx->enc);
-	crypto_drop_ahash(&ctx->auth);
-	kfree(inst);
-}
-
 static struct crypto_template crypto_authenc_esn_tmpl = {
 	.name = "authencesn",
-	.alloc = crypto_authenc_esn_alloc,
-	.free = crypto_authenc_esn_free,
+	.create = crypto_authenc_esn_create,
 	.module = THIS_MODULE,
 };
 
diff --git a/crypto/ccm.c b/crypto/ccm.c
index a4d1a5e..cc31ea4 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -36,14 +36,20 @@
 	u8 nonce[3];
 };
 
+struct crypto_rfc4309_req_ctx {
+	struct scatterlist src[3];
+	struct scatterlist dst[3];
+	struct aead_request subreq;
+};
+
 struct crypto_ccm_req_priv_ctx {
 	u8 odata[16];
 	u8 idata[16];
 	u8 auth_tag[16];
 	u32 ilen;
 	u32 flags;
-	struct scatterlist src[2];
-	struct scatterlist dst[2];
+	struct scatterlist src[3];
+	struct scatterlist dst[3];
 	struct ablkcipher_request abreq;
 };
 
@@ -265,7 +271,7 @@
 	/* format associated data and compute into mac */
 	if (assoclen) {
 		pctx->ilen = format_adata(idata, assoclen);
-		get_data_to_compute(cipher, pctx, req->assoc, req->assoclen);
+		get_data_to_compute(cipher, pctx, req->src, req->assoclen);
 	} else {
 		pctx->ilen = 0;
 	}
@@ -286,7 +292,8 @@
 	u8 *odata = pctx->odata;
 
 	if (!err)
-		scatterwalk_map_and_copy(odata, req->dst, req->cryptlen,
+		scatterwalk_map_and_copy(odata, req->dst,
+					 req->assoclen + req->cryptlen,
 					 crypto_aead_authsize(aead), 1);
 	aead_request_complete(req, err);
 }
@@ -300,6 +307,41 @@
 	return 0;
 }
 
+static int crypto_ccm_init_crypt(struct aead_request *req, u8 *tag)
+{
+	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
+	struct scatterlist *sg;
+	u8 *iv = req->iv;
+	int err;
+
+	err = crypto_ccm_check_iv(iv);
+	if (err)
+		return err;
+
+	pctx->flags = aead_request_flags(req);
+
+	 /* Note: rfc 3610 and NIST 800-38C require counter of
+	 * zero to encrypt auth tag.
+	 */
+	memset(iv + 15 - iv[0], 0, iv[0] + 1);
+
+	sg_init_table(pctx->src, 3);
+	sg_set_buf(pctx->src, tag, 16);
+	sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen);
+	if (sg != pctx->src + 1)
+		sg_chain(pctx->src, 2, sg);
+
+	if (req->src != req->dst) {
+		sg_init_table(pctx->dst, 3);
+		sg_set_buf(pctx->dst, tag, 16);
+		sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen);
+		if (sg != pctx->dst + 1)
+			sg_chain(pctx->dst, 2, sg);
+	}
+
+	return 0;
+}
+
 static int crypto_ccm_encrypt(struct aead_request *req)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -312,32 +354,17 @@
 	u8 *iv = req->iv;
 	int err;
 
-	err = crypto_ccm_check_iv(iv);
+	err = crypto_ccm_init_crypt(req, odata);
 	if (err)
 		return err;
 
-	pctx->flags = aead_request_flags(req);
-
-	err = crypto_ccm_auth(req, req->src, cryptlen);
+	err = crypto_ccm_auth(req, sg_next(pctx->src), cryptlen);
 	if (err)
 		return err;
 
-	 /* Note: rfc 3610 and NIST 800-38C require counter of
-	 * zero to encrypt auth tag.
-	 */
-	memset(iv + 15 - iv[0], 0, iv[0] + 1);
-
-	sg_init_table(pctx->src, 2);
-	sg_set_buf(pctx->src, odata, 16);
-	scatterwalk_sg_chain(pctx->src, 2, req->src);
-
 	dst = pctx->src;
-	if (req->src != req->dst) {
-		sg_init_table(pctx->dst, 2);
-		sg_set_buf(pctx->dst, odata, 16);
-		scatterwalk_sg_chain(pctx->dst, 2, req->dst);
+	if (req->src != req->dst)
 		dst = pctx->dst;
-	}
 
 	ablkcipher_request_set_tfm(abreq, ctx->ctr);
 	ablkcipher_request_set_callback(abreq, pctx->flags,
@@ -348,7 +375,7 @@
 		return err;
 
 	/* copy authtag to end of dst */
-	scatterwalk_map_and_copy(odata, req->dst, cryptlen,
+	scatterwalk_map_and_copy(odata, sg_next(dst), cryptlen,
 				 crypto_aead_authsize(aead), 1);
 	return err;
 }
@@ -361,9 +388,14 @@
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	unsigned int authsize = crypto_aead_authsize(aead);
 	unsigned int cryptlen = req->cryptlen - authsize;
+	struct scatterlist *dst;
+
+	pctx->flags = 0;
+
+	dst = sg_next(req->src == req->dst ? pctx->src : pctx->dst);
 
 	if (!err) {
-		err = crypto_ccm_auth(req, req->dst, cryptlen);
+		err = crypto_ccm_auth(req, dst, cryptlen);
 		if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
 			err = -EBADMSG;
 	}
@@ -384,31 +416,18 @@
 	u8 *iv = req->iv;
 	int err;
 
-	if (cryptlen < authsize)
-		return -EINVAL;
 	cryptlen -= authsize;
 
-	err = crypto_ccm_check_iv(iv);
+	err = crypto_ccm_init_crypt(req, authtag);
 	if (err)
 		return err;
 
-	pctx->flags = aead_request_flags(req);
-
-	scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0);
-
-	memset(iv + 15 - iv[0], 0, iv[0] + 1);
-
-	sg_init_table(pctx->src, 2);
-	sg_set_buf(pctx->src, authtag, 16);
-	scatterwalk_sg_chain(pctx->src, 2, req->src);
+	scatterwalk_map_and_copy(authtag, sg_next(pctx->src), cryptlen,
+				 authsize, 0);
 
 	dst = pctx->src;
-	if (req->src != req->dst) {
-		sg_init_table(pctx->dst, 2);
-		sg_set_buf(pctx->dst, authtag, 16);
-		scatterwalk_sg_chain(pctx->dst, 2, req->dst);
+	if (req->src != req->dst)
 		dst = pctx->dst;
-	}
 
 	ablkcipher_request_set_tfm(abreq, ctx->ctr);
 	ablkcipher_request_set_callback(abreq, pctx->flags,
@@ -418,7 +437,7 @@
 	if (err)
 		return err;
 
-	err = crypto_ccm_auth(req, req->dst, cryptlen);
+	err = crypto_ccm_auth(req, sg_next(dst), cryptlen);
 	if (err)
 		return err;
 
@@ -429,11 +448,11 @@
 	return err;
 }
 
-static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
+static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
 {
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-	struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst);
-	struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aead_instance *inst = aead_alg_instance(tfm);
+	struct ccm_instance_ctx *ictx = aead_instance_ctx(inst);
+	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
 	struct crypto_cipher *cipher;
 	struct crypto_ablkcipher *ctr;
 	unsigned long align;
@@ -451,9 +470,10 @@
 	ctx->cipher = cipher;
 	ctx->ctr = ctr;
 
-	align = crypto_tfm_alg_alignmask(tfm);
+	align = crypto_aead_alignmask(tfm);
 	align &= ~(crypto_tfm_ctx_alignment() - 1);
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
+	crypto_aead_set_reqsize(
+		tfm,
 		align + sizeof(struct crypto_ccm_req_priv_ctx) +
 		crypto_ablkcipher_reqsize(ctr));
 
@@ -464,21 +484,31 @@
 	return err;
 }
 
-static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_ccm_exit_tfm(struct crypto_aead *tfm)
 {
-	struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
 
 	crypto_free_cipher(ctx->cipher);
 	crypto_free_ablkcipher(ctx->ctr);
 }
 
-static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
-						       const char *full_name,
-						       const char *ctr_name,
-						       const char *cipher_name)
+static void crypto_ccm_free(struct aead_instance *inst)
+{
+	struct ccm_instance_ctx *ctx = aead_instance_ctx(inst);
+
+	crypto_drop_spawn(&ctx->cipher);
+	crypto_drop_skcipher(&ctx->ctr);
+	kfree(inst);
+}
+
+static int crypto_ccm_create_common(struct crypto_template *tmpl,
+				    struct rtattr **tb,
+				    const char *full_name,
+				    const char *ctr_name,
+				    const char *cipher_name)
 {
 	struct crypto_attr_type *algt;
-	struct crypto_instance *inst;
+	struct aead_instance *inst;
 	struct crypto_alg *ctr;
 	struct crypto_alg *cipher;
 	struct ccm_instance_ctx *ictx;
@@ -486,15 +516,15 @@
 
 	algt = crypto_get_attr_type(tb);
 	if (IS_ERR(algt))
-		return ERR_CAST(algt);
+		return PTR_ERR(algt);
 
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	cipher = crypto_alg_mod_lookup(cipher_name,  CRYPTO_ALG_TYPE_CIPHER,
 				       CRYPTO_ALG_TYPE_MASK);
 	if (IS_ERR(cipher))
-		return ERR_CAST(cipher);
+		return PTR_ERR(cipher);
 
 	err = -EINVAL;
 	if (cipher->cra_blocksize != 16)
@@ -505,14 +535,15 @@
 	if (!inst)
 		goto out_put_cipher;
 
-	ictx = crypto_instance_ctx(inst);
+	ictx = aead_instance_ctx(inst);
 
-	err = crypto_init_spawn(&ictx->cipher, cipher, inst,
+	err = crypto_init_spawn(&ictx->cipher, cipher,
+				aead_crypto_instance(inst),
 				CRYPTO_ALG_TYPE_MASK);
 	if (err)
 		goto err_free_inst;
 
-	crypto_set_skcipher_spawn(&ictx->ctr, inst);
+	crypto_set_skcipher_spawn(&ictx->ctr, aead_crypto_instance(inst));
 	err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
 				   crypto_requires_sync(algt->type,
 							algt->mask));
@@ -531,33 +562,39 @@
 		goto err_drop_ctr;
 
 	err = -ENAMETOOLONG;
-	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "ccm_base(%s,%s)", ctr->cra_driver_name,
 		     cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto err_drop_ctr;
 
-	memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
+	memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
 
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-	inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
-	inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority;
-	inst->alg.cra_blocksize = 1;
-	inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask |
-				  (__alignof__(u32) - 1);
-	inst->alg.cra_type = &crypto_aead_type;
-	inst->alg.cra_aead.ivsize = 16;
-	inst->alg.cra_aead.maxauthsize = 16;
-	inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
-	inst->alg.cra_init = crypto_ccm_init_tfm;
-	inst->alg.cra_exit = crypto_ccm_exit_tfm;
-	inst->alg.cra_aead.setkey = crypto_ccm_setkey;
-	inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize;
-	inst->alg.cra_aead.encrypt = crypto_ccm_encrypt;
-	inst->alg.cra_aead.decrypt = crypto_ccm_decrypt;
+	inst->alg.base.cra_flags = ctr->cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_priority = (cipher->cra_priority +
+				       ctr->cra_priority) / 2;
+	inst->alg.base.cra_blocksize = 1;
+	inst->alg.base.cra_alignmask = cipher->cra_alignmask |
+				       ctr->cra_alignmask |
+				       (__alignof__(u32) - 1);
+	inst->alg.ivsize = 16;
+	inst->alg.maxauthsize = 16;
+	inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
+	inst->alg.init = crypto_ccm_init_tfm;
+	inst->alg.exit = crypto_ccm_exit_tfm;
+	inst->alg.setkey = crypto_ccm_setkey;
+	inst->alg.setauthsize = crypto_ccm_setauthsize;
+	inst->alg.encrypt = crypto_ccm_encrypt;
+	inst->alg.decrypt = crypto_ccm_decrypt;
 
-out:
+	inst->free = crypto_ccm_free;
+
+	err = aead_register_instance(tmpl, inst);
+	if (err)
+		goto err_drop_ctr;
+
+out_put_cipher:
 	crypto_mod_put(cipher);
-	return inst;
+	return err;
 
 err_drop_ctr:
 	crypto_drop_skcipher(&ictx->ctr);
@@ -565,12 +602,10 @@
 	crypto_drop_spawn(&ictx->cipher);
 err_free_inst:
 	kfree(inst);
-out_put_cipher:
-	inst = ERR_PTR(err);
-	goto out;
+	goto out_put_cipher;
 }
 
-static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
+static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
 	const char *cipher_name;
 	char ctr_name[CRYPTO_MAX_ALG_NAME];
@@ -578,36 +613,28 @@
 
 	cipher_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(cipher_name))
-		return ERR_CAST(cipher_name);
+		return PTR_ERR(cipher_name);
 
 	if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
 		     cipher_name) >= CRYPTO_MAX_ALG_NAME)
-		return ERR_PTR(-ENAMETOOLONG);
+		return -ENAMETOOLONG;
 
 	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
 	    CRYPTO_MAX_ALG_NAME)
-		return ERR_PTR(-ENAMETOOLONG);
+		return -ENAMETOOLONG;
 
-	return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
-}
-
-static void crypto_ccm_free(struct crypto_instance *inst)
-{
-	struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-	crypto_drop_spawn(&ctx->cipher);
-	crypto_drop_skcipher(&ctx->ctr);
-	kfree(inst);
+	return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
+					cipher_name);
 }
 
 static struct crypto_template crypto_ccm_tmpl = {
 	.name = "ccm",
-	.alloc = crypto_ccm_alloc,
-	.free = crypto_ccm_free,
+	.create = crypto_ccm_create,
 	.module = THIS_MODULE,
 };
 
-static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
+static int crypto_ccm_base_create(struct crypto_template *tmpl,
+				  struct rtattr **tb)
 {
 	const char *ctr_name;
 	const char *cipher_name;
@@ -615,23 +642,23 @@
 
 	ctr_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(ctr_name))
-		return ERR_CAST(ctr_name);
+		return PTR_ERR(ctr_name);
 
 	cipher_name = crypto_attr_alg_name(tb[2]);
 	if (IS_ERR(cipher_name))
-		return ERR_CAST(cipher_name);
+		return PTR_ERR(cipher_name);
 
 	if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
 		     ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
-		return ERR_PTR(-ENAMETOOLONG);
+		return -ENAMETOOLONG;
 
-	return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
+	return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
+					cipher_name);
 }
 
 static struct crypto_template crypto_ccm_base_tmpl = {
 	.name = "ccm_base",
-	.alloc = crypto_ccm_base_alloc,
-	.free = crypto_ccm_free,
+	.create = crypto_ccm_base_create,
 	.module = THIS_MODULE,
 };
 
@@ -677,10 +704,12 @@
 
 static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
 {
-	struct aead_request *subreq = aead_request_ctx(req);
+	struct crypto_rfc4309_req_ctx *rctx = aead_request_ctx(req);
+	struct aead_request *subreq = &rctx->subreq;
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead);
 	struct crypto_aead *child = ctx->child;
+	struct scatterlist *sg;
 	u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
 			   crypto_aead_alignmask(child) + 1);
 
@@ -690,17 +719,38 @@
 	memcpy(iv + 1, ctx->nonce, 3);
 	memcpy(iv + 4, req->iv, 8);
 
+	scatterwalk_map_and_copy(iv + 16, req->src, 0, req->assoclen - 8, 0);
+
+	sg_init_table(rctx->src, 3);
+	sg_set_buf(rctx->src, iv + 16, req->assoclen - 8);
+	sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
+	if (sg != rctx->src + 1)
+		sg_chain(rctx->src, 2, sg);
+
+	if (req->src != req->dst) {
+		sg_init_table(rctx->dst, 3);
+		sg_set_buf(rctx->dst, iv + 16, req->assoclen - 8);
+		sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
+		if (sg != rctx->dst + 1)
+			sg_chain(rctx->dst, 2, sg);
+	}
+
 	aead_request_set_tfm(subreq, child);
 	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
 				  req->base.data);
-	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
-	aead_request_set_assoc(subreq, req->assoc, req->assoclen);
+	aead_request_set_crypt(subreq, rctx->src,
+			       req->src == req->dst ? rctx->src : rctx->dst,
+			       req->cryptlen, iv);
+	aead_request_set_ad(subreq, req->assoclen - 8);
 
 	return subreq;
 }
 
 static int crypto_rfc4309_encrypt(struct aead_request *req)
 {
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
+
 	req = crypto_rfc4309_crypt(req);
 
 	return crypto_aead_encrypt(req);
@@ -708,16 +758,19 @@
 
 static int crypto_rfc4309_decrypt(struct aead_request *req)
 {
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
+
 	req = crypto_rfc4309_crypt(req);
 
 	return crypto_aead_decrypt(req);
 }
 
-static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
+static int crypto_rfc4309_init_tfm(struct crypto_aead *tfm)
 {
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-	struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
-	struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aead_instance *inst = aead_alg_instance(tfm);
+	struct crypto_aead_spawn *spawn = aead_instance_ctx(inst);
+	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm);
 	struct crypto_aead *aead;
 	unsigned long align;
 
@@ -729,115 +782,118 @@
 
 	align = crypto_aead_alignmask(aead);
 	align &= ~(crypto_tfm_ctx_alignment() - 1);
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-		sizeof(struct aead_request) +
+	crypto_aead_set_reqsize(
+		tfm,
+		sizeof(struct crypto_rfc4309_req_ctx) +
 		ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
-		align + 16);
+		align + 32);
 
 	return 0;
 }
 
-static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm)
+static void crypto_rfc4309_exit_tfm(struct crypto_aead *tfm)
 {
-	struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm);
 
 	crypto_free_aead(ctx->child);
 }
 
-static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb)
+static void crypto_rfc4309_free(struct aead_instance *inst)
+{
+	crypto_drop_aead(aead_instance_ctx(inst));
+	kfree(inst);
+}
+
+static int crypto_rfc4309_create(struct crypto_template *tmpl,
+				 struct rtattr **tb)
 {
 	struct crypto_attr_type *algt;
-	struct crypto_instance *inst;
+	struct aead_instance *inst;
 	struct crypto_aead_spawn *spawn;
-	struct crypto_alg *alg;
+	struct aead_alg *alg;
 	const char *ccm_name;
 	int err;
 
 	algt = crypto_get_attr_type(tb);
 	if (IS_ERR(algt))
-		return ERR_CAST(algt);
+		return PTR_ERR(algt);
 
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	ccm_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(ccm_name))
-		return ERR_CAST(ccm_name);
+		return PTR_ERR(ccm_name);
 
 	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
 	if (!inst)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
-	spawn = crypto_instance_ctx(inst);
-	crypto_set_aead_spawn(spawn, inst);
+	spawn = aead_instance_ctx(inst);
+	crypto_set_aead_spawn(spawn, aead_crypto_instance(inst));
 	err = crypto_grab_aead(spawn, ccm_name, 0,
 			       crypto_requires_sync(algt->type, algt->mask));
 	if (err)
 		goto out_free_inst;
 
-	alg = crypto_aead_spawn_alg(spawn);
+	alg = crypto_spawn_aead_alg(spawn);
 
 	err = -EINVAL;
 
 	/* We only support 16-byte blocks. */
-	if (alg->cra_aead.ivsize != 16)
+	if (crypto_aead_alg_ivsize(alg) != 16)
 		goto out_drop_alg;
 
 	/* Not a stream cipher? */
-	if (alg->cra_blocksize != 1)
+	if (alg->base.cra_blocksize != 1)
 		goto out_drop_alg;
 
 	err = -ENAMETOOLONG;
-	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
-		     "rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
-	    snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-		     "rfc4309(%s)", alg->cra_driver_name) >=
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+		     "rfc4309(%s)", alg->base.cra_name) >=
+	    CRYPTO_MAX_ALG_NAME ||
+	    snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+		     "rfc4309(%s)", alg->base.cra_driver_name) >=
 	    CRYPTO_MAX_ALG_NAME)
 		goto out_drop_alg;
 
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-	inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
-	inst->alg.cra_priority = alg->cra_priority;
-	inst->alg.cra_blocksize = 1;
-	inst->alg.cra_alignmask = alg->cra_alignmask;
-	inst->alg.cra_type = &crypto_nivaead_type;
+	inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_priority = alg->base.cra_priority;
+	inst->alg.base.cra_blocksize = 1;
+	inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
 
-	inst->alg.cra_aead.ivsize = 8;
-	inst->alg.cra_aead.maxauthsize = 16;
+	inst->alg.ivsize = 8;
+	inst->alg.maxauthsize = 16;
 
-	inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
+	inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
 
-	inst->alg.cra_init = crypto_rfc4309_init_tfm;
-	inst->alg.cra_exit = crypto_rfc4309_exit_tfm;
+	inst->alg.init = crypto_rfc4309_init_tfm;
+	inst->alg.exit = crypto_rfc4309_exit_tfm;
 
-	inst->alg.cra_aead.setkey = crypto_rfc4309_setkey;
-	inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize;
-	inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt;
-	inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt;
+	inst->alg.setkey = crypto_rfc4309_setkey;
+	inst->alg.setauthsize = crypto_rfc4309_setauthsize;
+	inst->alg.encrypt = crypto_rfc4309_encrypt;
+	inst->alg.decrypt = crypto_rfc4309_decrypt;
 
-	inst->alg.cra_aead.geniv = "seqiv";
+	inst->free = crypto_rfc4309_free;
+
+	err = aead_register_instance(tmpl, inst);
+	if (err)
+		goto out_drop_alg;
 
 out:
-	return inst;
+	return err;
 
 out_drop_alg:
 	crypto_drop_aead(spawn);
 out_free_inst:
 	kfree(inst);
-	inst = ERR_PTR(err);
 	goto out;
 }
 
-static void crypto_rfc4309_free(struct crypto_instance *inst)
-{
-	crypto_drop_spawn(crypto_instance_ctx(inst));
-	kfree(inst);
-}
-
 static struct crypto_template crypto_rfc4309_tmpl = {
 	.name = "rfc4309",
-	.alloc = crypto_rfc4309_alloc,
-	.free = crypto_rfc4309_free,
+	.create = crypto_rfc4309_create,
 	.module = THIS_MODULE,
 };
 
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
index fa42e70..da9c899 100644
--- a/crypto/chacha20_generic.c
+++ b/crypto/chacha20_generic.c
@@ -13,14 +13,7 @@
 #include <linux/crypto.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-
-#define CHACHA20_NONCE_SIZE 16
-#define CHACHA20_KEY_SIZE   32
-#define CHACHA20_BLOCK_SIZE 64
-
-struct chacha20_ctx {
-	u32 key[8];
-};
+#include <crypto/chacha20.h>
 
 static inline u32 rotl32(u32 v, u8 n)
 {
@@ -108,7 +101,7 @@
 	}
 }
 
-static void chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
+void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
 {
 	static const char constant[16] = "expand 32-byte k";
 
@@ -129,8 +122,9 @@
 	state[14] = le32_to_cpuvp(iv +  8);
 	state[15] = le32_to_cpuvp(iv + 12);
 }
+EXPORT_SYMBOL_GPL(crypto_chacha20_init);
 
-static int chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
+int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
 			   unsigned int keysize)
 {
 	struct chacha20_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -144,8 +138,9 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(crypto_chacha20_setkey);
 
-static int chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 			  struct scatterlist *src, unsigned int nbytes)
 {
 	struct blkcipher_walk walk;
@@ -155,7 +150,7 @@
 	blkcipher_walk_init(&walk, dst, src, nbytes);
 	err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
 
-	chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
+	crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
 
 	while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
 		chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
@@ -172,6 +167,7 @@
 
 	return err;
 }
+EXPORT_SYMBOL_GPL(crypto_chacha20_crypt);
 
 static struct crypto_alg alg = {
 	.cra_name		= "chacha20",
@@ -187,11 +183,11 @@
 		.blkcipher = {
 			.min_keysize	= CHACHA20_KEY_SIZE,
 			.max_keysize	= CHACHA20_KEY_SIZE,
-			.ivsize		= CHACHA20_NONCE_SIZE,
+			.ivsize		= CHACHA20_IV_SIZE,
 			.geniv		= "seqiv",
-			.setkey		= chacha20_setkey,
-			.encrypt	= chacha20_crypt,
-			.decrypt	= chacha20_crypt,
+			.setkey		= crypto_chacha20_setkey,
+			.encrypt	= crypto_chacha20_crypt,
+			.decrypt	= crypto_chacha20_crypt,
 		},
 	},
 };
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 7b46ed7..99c3cce 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -13,6 +13,8 @@
 #include <crypto/internal/hash.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/scatterwalk.h>
+#include <crypto/chacha20.h>
+#include <crypto/poly1305.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -20,11 +22,6 @@
 
 #include "internal.h"
 
-#define POLY1305_BLOCK_SIZE	16
-#define POLY1305_DIGEST_SIZE	16
-#define POLY1305_KEY_SIZE	32
-#define CHACHA20_KEY_SIZE	32
-#define CHACHA20_IV_SIZE	16
 #define CHACHAPOLY_IV_SIZE	12
 
 struct chachapoly_instance_ctx {
@@ -60,12 +57,16 @@
 };
 
 struct chachapoly_req_ctx {
+	struct scatterlist src[2];
+	struct scatterlist dst[2];
 	/* the key we generate for Poly1305 using Chacha20 */
 	u8 key[POLY1305_KEY_SIZE];
 	/* calculated Poly1305 tag */
 	u8 tag[POLY1305_DIGEST_SIZE];
 	/* length of data to en/decrypt, without ICV */
 	unsigned int cryptlen;
+	/* Actual AD, excluding IV */
+	unsigned int assoclen;
 	union {
 		struct poly_req poly;
 		struct chacha_req chacha;
@@ -98,7 +99,9 @@
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	u8 tag[sizeof(rctx->tag)];
 
-	scatterwalk_map_and_copy(tag, req->src, rctx->cryptlen, sizeof(tag), 0);
+	scatterwalk_map_and_copy(tag, req->src,
+				 req->assoclen + rctx->cryptlen,
+				 sizeof(tag), 0);
 	if (crypto_memneq(tag, rctx->tag, sizeof(tag)))
 		return -EBADMSG;
 	return 0;
@@ -108,7 +111,8 @@
 {
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 
-	scatterwalk_map_and_copy(rctx->tag, req->dst, rctx->cryptlen,
+	scatterwalk_map_and_copy(rctx->tag, req->dst,
+				 req->assoclen + rctx->cryptlen,
 				 sizeof(rctx->tag), 1);
 	return 0;
 }
@@ -123,14 +127,24 @@
 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct chacha_req *creq = &rctx->u.chacha;
+	struct scatterlist *src, *dst;
 	int err;
 
 	chacha_iv(creq->iv, req, 1);
 
+	sg_init_table(rctx->src, 2);
+	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
+	dst = src;
+
+	if (req->src != req->dst) {
+		sg_init_table(rctx->dst, 2);
+		dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
+	}
+
 	ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
 					chacha_decrypt_done, req);
 	ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
-	ablkcipher_request_set_crypt(&creq->req, req->src, req->dst,
+	ablkcipher_request_set_crypt(&creq->req, src, dst,
 				     rctx->cryptlen, creq->iv);
 	err = crypto_ablkcipher_decrypt(&creq->req);
 	if (err)
@@ -156,14 +170,15 @@
 
 static int poly_tail(struct aead_request *req)
 {
-	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct poly_req *preq = &rctx->u.poly;
 	__le64 len;
 	int err;
 
 	sg_init_table(preq->src, 1);
-	len = cpu_to_le64(req->assoclen);
+	len = cpu_to_le64(rctx->assoclen);
 	memcpy(&preq->tail.assoclen, &len, sizeof(len));
 	len = cpu_to_le64(rctx->cryptlen);
 	memcpy(&preq->tail.cryptlen, &len, sizeof(len));
@@ -228,6 +243,9 @@
 	if (rctx->cryptlen == req->cryptlen) /* encrypting */
 		crypt = req->dst;
 
+	sg_init_table(rctx->src, 2);
+	crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
+
 	ahash_request_set_callback(&preq->req, aead_request_flags(req),
 				   poly_cipher_done, req);
 	ahash_request_set_tfm(&preq->req, ctx->poly);
@@ -253,7 +271,7 @@
 	unsigned int padlen, bs = POLY1305_BLOCK_SIZE;
 	int err;
 
-	padlen = (bs - (req->assoclen % bs)) % bs;
+	padlen = (bs - (rctx->assoclen % bs)) % bs;
 	memset(preq->pad, 0, sizeof(preq->pad));
 	sg_init_table(preq->src, 1);
 	sg_set_buf(preq->src, preq->pad, padlen);
@@ -285,7 +303,7 @@
 	ahash_request_set_callback(&preq->req, aead_request_flags(req),
 				   poly_ad_done, req);
 	ahash_request_set_tfm(&preq->req, ctx->poly);
-	ahash_request_set_crypt(&preq->req, req->assoc, NULL, req->assoclen);
+	ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
 
 	err = crypto_ahash_update(&preq->req);
 	if (err)
@@ -351,11 +369,20 @@
 
 static int poly_genkey(struct aead_request *req)
 {
-	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct chacha_req *creq = &rctx->u.chacha;
 	int err;
 
+	rctx->assoclen = req->assoclen;
+
+	if (crypto_aead_ivsize(tfm) == 8) {
+		if (rctx->assoclen < 8)
+			return -EINVAL;
+		rctx->assoclen -= 8;
+	}
+
 	sg_init_table(creq->src, 1);
 	memset(rctx->key, 0, sizeof(rctx->key));
 	sg_set_buf(creq->src, rctx->key, sizeof(rctx->key));
@@ -385,14 +412,24 @@
 	struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 	struct chacha_req *creq = &rctx->u.chacha;
+	struct scatterlist *src, *dst;
 	int err;
 
 	chacha_iv(creq->iv, req, 1);
 
+	sg_init_table(rctx->src, 2);
+	src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen);
+	dst = src;
+
+	if (req->src != req->dst) {
+		sg_init_table(rctx->dst, 2);
+		dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
+	}
+
 	ablkcipher_request_set_callback(&creq->req, aead_request_flags(req),
 					chacha_encrypt_done, req);
 	ablkcipher_request_set_tfm(&creq->req, ctx->chacha);
-	ablkcipher_request_set_crypt(&creq->req, req->src, req->dst,
+	ablkcipher_request_set_crypt(&creq->req, src, dst,
 				     req->cryptlen, creq->iv);
 	err = crypto_ablkcipher_encrypt(&creq->req);
 	if (err)
@@ -426,8 +463,6 @@
 {
 	struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
 
-	if (req->cryptlen < POLY1305_DIGEST_SIZE)
-		return -EINVAL;
 	rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
 
 	/* decrypt call chain:
@@ -476,11 +511,11 @@
 	return 0;
 }
 
-static int chachapoly_init(struct crypto_tfm *tfm)
+static int chachapoly_init(struct crypto_aead *tfm)
 {
-	struct crypto_instance *inst = (void *)tfm->__crt_alg;
-	struct chachapoly_instance_ctx *ictx = crypto_instance_ctx(inst);
-	struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct aead_instance *inst = aead_alg_instance(tfm);
+	struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst);
+	struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
 	struct crypto_ablkcipher *chacha;
 	struct crypto_ahash *poly;
 	unsigned long align;
@@ -499,77 +534,87 @@
 	ctx->poly = poly;
 	ctx->saltlen = ictx->saltlen;
 
-	align = crypto_tfm_alg_alignmask(tfm);
+	align = crypto_aead_alignmask(tfm);
 	align &= ~(crypto_tfm_ctx_alignment() - 1);
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-				align + offsetof(struct chachapoly_req_ctx, u) +
-				max(offsetof(struct chacha_req, req) +
-				    sizeof(struct ablkcipher_request) +
-				    crypto_ablkcipher_reqsize(chacha),
-				    offsetof(struct poly_req, req) +
-				    sizeof(struct ahash_request) +
-				    crypto_ahash_reqsize(poly)));
+	crypto_aead_set_reqsize(
+		tfm,
+		align + offsetof(struct chachapoly_req_ctx, u) +
+		max(offsetof(struct chacha_req, req) +
+		    sizeof(struct ablkcipher_request) +
+		    crypto_ablkcipher_reqsize(chacha),
+		    offsetof(struct poly_req, req) +
+		    sizeof(struct ahash_request) +
+		    crypto_ahash_reqsize(poly)));
 
 	return 0;
 }
 
-static void chachapoly_exit(struct crypto_tfm *tfm)
+static void chachapoly_exit(struct crypto_aead *tfm)
 {
-	struct chachapoly_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm);
 
 	crypto_free_ahash(ctx->poly);
 	crypto_free_ablkcipher(ctx->chacha);
 }
 
-static struct crypto_instance *chachapoly_alloc(struct rtattr **tb,
-						const char *name,
-						unsigned int ivsize)
+static void chachapoly_free(struct aead_instance *inst)
+{
+	struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst);
+
+	crypto_drop_skcipher(&ctx->chacha);
+	crypto_drop_ahash(&ctx->poly);
+	kfree(inst);
+}
+
+static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
+			     const char *name, unsigned int ivsize)
 {
 	struct crypto_attr_type *algt;
-	struct crypto_instance *inst;
+	struct aead_instance *inst;
 	struct crypto_alg *chacha;
 	struct crypto_alg *poly;
-	struct ahash_alg *poly_ahash;
+	struct hash_alg_common *poly_hash;
 	struct chachapoly_instance_ctx *ctx;
 	const char *chacha_name, *poly_name;
 	int err;
 
 	if (ivsize > CHACHAPOLY_IV_SIZE)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	algt = crypto_get_attr_type(tb);
 	if (IS_ERR(algt))
-		return ERR_CAST(algt);
+		return PTR_ERR(algt);
 
 	if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
 	chacha_name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(chacha_name))
-		return ERR_CAST(chacha_name);
+		return PTR_ERR(chacha_name);
 	poly_name = crypto_attr_alg_name(tb[2]);
 	if (IS_ERR(poly_name))
-		return ERR_CAST(poly_name);
+		return PTR_ERR(poly_name);
 
 	poly = crypto_find_alg(poly_name, &crypto_ahash_type,
 			       CRYPTO_ALG_TYPE_HASH,
 			       CRYPTO_ALG_TYPE_AHASH_MASK);
 	if (IS_ERR(poly))
-		return ERR_CAST(poly);
+		return PTR_ERR(poly);
 
 	err = -ENOMEM;
 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 	if (!inst)
 		goto out_put_poly;
 
-	ctx = crypto_instance_ctx(inst);
+	ctx = aead_instance_ctx(inst);
 	ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize;
-	poly_ahash = container_of(poly, struct ahash_alg, halg.base);
-	err = crypto_init_ahash_spawn(&ctx->poly, &poly_ahash->halg, inst);
+	poly_hash = __crypto_hash_alg_common(poly);
+	err = crypto_init_ahash_spawn(&ctx->poly, poly_hash,
+				      aead_crypto_instance(inst));
 	if (err)
 		goto err_free_inst;
 
-	crypto_set_skcipher_spawn(&ctx->chacha, inst);
+	crypto_set_skcipher_spawn(&ctx->chacha, aead_crypto_instance(inst));
 	err = crypto_grab_skcipher(&ctx->chacha, chacha_name, 0,
 				   crypto_requires_sync(algt->type,
 							algt->mask));
@@ -587,37 +632,42 @@
 		goto out_drop_chacha;
 
 	err = -ENAMETOOLONG;
-	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
+	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
 		     "%s(%s,%s)", name, chacha_name,
 		     poly_name) >= CRYPTO_MAX_ALG_NAME)
 		goto out_drop_chacha;
-	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+	if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 		     "%s(%s,%s)", name, chacha->cra_driver_name,
 		     poly->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		goto out_drop_chacha;
 
-	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
-	inst->alg.cra_flags |= (chacha->cra_flags |
-				poly->cra_flags) & CRYPTO_ALG_ASYNC;
-	inst->alg.cra_priority = (chacha->cra_priority +
-				  poly->cra_priority) / 2;
-	inst->alg.cra_blocksize = 1;
-	inst->alg.cra_alignmask = chacha->cra_alignmask | poly->cra_alignmask;
-	inst->alg.cra_type = &crypto_nivaead_type;
-	inst->alg.cra_aead.ivsize = ivsize;
-	inst->alg.cra_aead.maxauthsize = POLY1305_DIGEST_SIZE;
-	inst->alg.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen;
-	inst->alg.cra_init = chachapoly_init;
-	inst->alg.cra_exit = chachapoly_exit;
-	inst->alg.cra_aead.encrypt = chachapoly_encrypt;
-	inst->alg.cra_aead.decrypt = chachapoly_decrypt;
-	inst->alg.cra_aead.setkey = chachapoly_setkey;
-	inst->alg.cra_aead.setauthsize = chachapoly_setauthsize;
-	inst->alg.cra_aead.geniv = "seqiv";
+	inst->alg.base.cra_flags = (chacha->cra_flags | poly->cra_flags) &
+				   CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_priority = (chacha->cra_priority +
+				       poly->cra_priority) / 2;
+	inst->alg.base.cra_blocksize = 1;
+	inst->alg.base.cra_alignmask = chacha->cra_alignmask |
+				       poly->cra_alignmask;
+	inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) +
+				     ctx->saltlen;
+	inst->alg.ivsize = ivsize;
+	inst->alg.maxauthsize = POLY1305_DIGEST_SIZE;
+	inst->alg.init = chachapoly_init;
+	inst->alg.exit = chachapoly_exit;
+	inst->alg.encrypt = chachapoly_encrypt;
+	inst->alg.decrypt = chachapoly_decrypt;
+	inst->alg.setkey = chachapoly_setkey;
+	inst->alg.setauthsize = chachapoly_setauthsize;
 
-out:
+	inst->free = chachapoly_free;
+
+	err = aead_register_instance(tmpl, inst);
+	if (err)
+		goto out_drop_chacha;
+
+out_put_poly:
 	crypto_mod_put(poly);
-	return inst;
+	return err;
 
 out_drop_chacha:
 	crypto_drop_skcipher(&ctx->chacha);
@@ -625,41 +675,28 @@
 	crypto_drop_ahash(&ctx->poly);
 err_free_inst:
 	kfree(inst);
-out_put_poly:
-	inst = ERR_PTR(err);
-	goto out;
+	goto out_put_poly;
 }
 
-static struct crypto_instance *rfc7539_alloc(struct rtattr **tb)
+static int rfc7539_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
-	return chachapoly_alloc(tb, "rfc7539", 12);
+	return chachapoly_create(tmpl, tb, "rfc7539", 12);
 }
 
-static struct crypto_instance *rfc7539esp_alloc(struct rtattr **tb)
+static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
-	return chachapoly_alloc(tb, "rfc7539esp", 8);
-}
-
-static void chachapoly_free(struct crypto_instance *inst)
-{
-	struct chachapoly_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-	crypto_drop_skcipher(&ctx->chacha);
-	crypto_drop_ahash(&ctx->poly);
-	kfree(inst);
+	return chachapoly_create(tmpl, tb, "rfc7539esp", 8);
 }
 
 static struct crypto_template rfc7539_tmpl = {
 	.name = "rfc7539",
-	.alloc = rfc7539_alloc,
-	.free = chachapoly_free,
+	.create = rfc7539_create,
 	.module = THIS_MODULE,
 };
 
 static struct crypto_template rfc7539esp_tmpl = {
 	.name = "rfc7539esp",
-	.alloc = rfc7539esp_alloc,
-	.free = chachapoly_free,
+	.create = rfc7539esp_create,
 	.module = THIS_MODULE,
 };
 
@@ -690,6 +727,5 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
 MODULE_DESCRIPTION("ChaCha20-Poly1305 AEAD");
-MODULE_ALIAS_CRYPTO("chacha20poly1305");
 MODULE_ALIAS_CRYPTO("rfc7539");
 MODULE_ALIAS_CRYPTO("rfc7539esp");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 22ba81f..c81861b 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -176,10 +176,9 @@
 	algt = crypto_get_attr_type(tb);
 	if (IS_ERR(algt))
 		return;
-	if ((algt->type & CRYPTO_ALG_INTERNAL))
-		*type |= CRYPTO_ALG_INTERNAL;
-	if ((algt->mask & CRYPTO_ALG_INTERNAL))
-		*mask |= CRYPTO_ALG_INTERNAL;
+
+	*type |= algt->type & CRYPTO_ALG_INTERNAL;
+	*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
 }
 
 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
@@ -688,16 +687,18 @@
 			int (*crypt)(struct aead_request *req))
 {
 	struct cryptd_aead_request_ctx *rctx;
+	crypto_completion_t compl;
+
 	rctx = aead_request_ctx(req);
+	compl = rctx->complete;
 
 	if (unlikely(err == -EINPROGRESS))
 		goto out;
 	aead_request_set_tfm(req, child);
 	err = crypt( req );
-	req->base.complete = rctx->complete;
 out:
 	local_bh_disable();
-	rctx->complete(&req->base, err);
+	compl(&req->base, err);
 	local_bh_enable();
 }
 
@@ -708,7 +709,7 @@
 	struct aead_request *req;
 
 	req = container_of(areq, struct aead_request, base);
-	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt);
+	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
 }
 
 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
@@ -718,7 +719,7 @@
 	struct aead_request *req;
 
 	req = container_of(areq, struct aead_request, base);
-	cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt);
+	cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
 }
 
 static int cryptd_aead_enqueue(struct aead_request *req,
@@ -756,7 +757,9 @@
 		return PTR_ERR(cipher);
 
 	ctx->child = cipher;
-	crypto_aead_set_reqsize(tfm, sizeof(struct cryptd_aead_request_ctx));
+	crypto_aead_set_reqsize(
+		tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
+			 crypto_aead_reqsize(cipher)));
 	return 0;
 }
 
@@ -775,7 +778,7 @@
 	struct aead_alg *alg;
 	const char *name;
 	u32 type = 0;
-	u32 mask = 0;
+	u32 mask = CRYPTO_ALG_ASYNC;
 	int err;
 
 	cryptd_check_internal(tb, &type, &mask);
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 08ea286..d94d99f 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -25,7 +25,6 @@
 #include <net/netlink.h>
 #include <linux/security.h>
 #include <net/net_namespace.h>
-#include <crypto/internal/aead.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/internal/rng.h>
 #include <crypto/akcipher.h>
@@ -385,34 +384,6 @@
 	return ERR_PTR(err);
 }
 
-static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
-					       u32 mask)
-{
-	int err;
-	struct crypto_alg *alg;
-
-	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-	type |= CRYPTO_ALG_TYPE_AEAD;
-	mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
-	mask |= CRYPTO_ALG_TYPE_MASK;
-
-	for (;;) {
-		alg = crypto_lookup_aead(name,  type, mask);
-		if (!IS_ERR(alg))
-			return alg;
-
-		err = PTR_ERR(alg);
-		if (err != -EAGAIN)
-			break;
-		if (signal_pending(current)) {
-			err = -EINTR;
-			break;
-		}
-	}
-
-	return ERR_PTR(err);
-}
-
 static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
 			  struct nlattr **attrs)
 {
@@ -446,9 +417,6 @@
 		name = p->cru_name;
 
 	switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
-	case CRYPTO_ALG_TYPE_AEAD:
-		alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask);
-		break;
 	case CRYPTO_ALG_TYPE_GIVCIPHER:
 	case CRYPTO_ALG_TYPE_BLKCIPHER:
 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index b6e43dc..b96a8456 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -19,8 +19,6 @@
  */
 
 #include <crypto/internal/geniv.h>
-#include <crypto/null.h>
-#include <crypto/rng.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -33,13 +31,6 @@
 
 #define MAX_IV_SIZE 16
 
-struct echainiv_ctx {
-	/* aead_geniv_ctx must be first the element */
-	struct aead_geniv_ctx geniv;
-	struct crypto_blkcipher *null;
-	u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
-};
-
 static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
 
 /* We don't care if we get preempted and read/write IVs from the next CPU. */
@@ -103,7 +94,7 @@
 static int echainiv_encrypt(struct aead_request *req)
 {
 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
+	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
 	struct aead_request *subreq = aead_request_ctx(req);
 	crypto_completion_t compl;
 	void *data;
@@ -114,7 +105,7 @@
 	if (req->cryptlen < ivsize)
 		return -EINVAL;
 
-	aead_request_set_tfm(subreq, ctx->geniv.child);
+	aead_request_set_tfm(subreq, ctx->child);
 
 	compl = echainiv_encrypt_complete;
 	data = req;
@@ -145,8 +136,8 @@
 
 	aead_request_set_callback(subreq, req->base.flags, compl, data);
 	aead_request_set_crypt(subreq, req->dst, req->dst,
-			       req->cryptlen - ivsize, info);
-	aead_request_set_ad(subreq, req->assoclen + ivsize);
+			       req->cryptlen, info);
+	aead_request_set_ad(subreq, req->assoclen);
 
 	crypto_xor(info, ctx->salt, ivsize);
 	scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
@@ -160,16 +151,16 @@
 static int echainiv_decrypt(struct aead_request *req)
 {
 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
+	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
 	struct aead_request *subreq = aead_request_ctx(req);
 	crypto_completion_t compl;
 	void *data;
 	unsigned int ivsize = crypto_aead_ivsize(geniv);
 
-	if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
+	if (req->cryptlen < ivsize)
 		return -EINVAL;
 
-	aead_request_set_tfm(subreq, ctx->geniv.child);
+	aead_request_set_tfm(subreq, ctx->child);
 
 	compl = req->base.complete;
 	data = req->base.data;
@@ -180,61 +171,10 @@
 	aead_request_set_ad(subreq, req->assoclen + ivsize);
 
 	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
-	if (req->src != req->dst)
-		scatterwalk_map_and_copy(req->iv, req->dst,
-					 req->assoclen, ivsize, 1);
 
 	return crypto_aead_decrypt(subreq);
 }
 
-static int echainiv_init(struct crypto_tfm *tfm)
-{
-	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-	struct echainiv_ctx *ctx = crypto_aead_ctx(geniv);
-	int err;
-
-	spin_lock_init(&ctx->geniv.lock);
-
-	crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
-
-	err = crypto_get_default_rng();
-	if (err)
-		goto out;
-
-	err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
-				   crypto_aead_ivsize(geniv));
-	crypto_put_default_rng();
-	if (err)
-		goto out;
-
-	ctx->null = crypto_get_default_null_skcipher();
-	err = PTR_ERR(ctx->null);
-	if (IS_ERR(ctx->null))
-		goto out;
-
-	err = aead_geniv_init(tfm);
-	if (err)
-		goto drop_null;
-
-	ctx->geniv.child = geniv->child;
-	geniv->child = geniv;
-
-out:
-	return err;
-
-drop_null:
-	crypto_put_default_null_skcipher();
-	goto out;
-}
-
-static void echainiv_exit(struct crypto_tfm *tfm)
-{
-	struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	crypto_free_aead(ctx->geniv.child);
-	crypto_put_default_null_skcipher();
-}
-
 static int echainiv_aead_create(struct crypto_template *tmpl,
 				struct rtattr **tb)
 {
@@ -251,9 +191,6 @@
 	spawn = aead_instance_ctx(inst);
 	alg = crypto_spawn_aead_alg(spawn);
 
-	if (alg->base.cra_aead.encrypt)
-		goto done;
-
 	err = -EINVAL;
 	if (inst->alg.ivsize & (sizeof(u32) - 1) ||
 	    inst->alg.ivsize > MAX_IV_SIZE)
@@ -262,14 +199,15 @@
 	inst->alg.encrypt = echainiv_encrypt;
 	inst->alg.decrypt = echainiv_decrypt;
 
-	inst->alg.base.cra_init = echainiv_init;
-	inst->alg.base.cra_exit = echainiv_exit;
+	inst->alg.init = aead_init_geniv;
+	inst->alg.exit = aead_exit_geniv;
 
 	inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
-	inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx);
+	inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
 	inst->alg.base.cra_ctxsize += inst->alg.ivsize;
 
-done:
+	inst->free = aead_geniv_free;
+
 	err = aead_register_instance(tmpl, inst);
 	if (err)
 		goto free_inst;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 7d32d47..bec329b 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -38,6 +38,12 @@
 	u8 nonce[4];
 };
 
+struct crypto_rfc4106_req_ctx {
+	struct scatterlist src[3];
+	struct scatterlist dst[3];
+	struct aead_request subreq;
+};
+
 struct crypto_rfc4543_instance_ctx {
 	struct crypto_aead_spawn aead;
 };
@@ -200,14 +206,14 @@
 	sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
 	sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen);
 	if (sg != pctx->src + 1)
-		scatterwalk_sg_chain(pctx->src, 2, sg);
+		sg_chain(pctx->src, 2, sg);
 
 	if (req->src != req->dst) {
 		sg_init_table(pctx->dst, 3);
 		sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag));
 		sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen);
 		if (sg != pctx->dst + 1)
-			scatterwalk_sg_chain(pctx->dst, 2, sg);
+			sg_chain(pctx->dst, 2, sg);
 	}
 }
 
@@ -601,6 +607,15 @@
 	crypto_free_ablkcipher(ctx->ctr);
 }
 
+static void crypto_gcm_free(struct aead_instance *inst)
+{
+	struct gcm_instance_ctx *ctx = aead_instance_ctx(inst);
+
+	crypto_drop_skcipher(&ctx->ctr);
+	crypto_drop_ahash(&ctx->ghash);
+	kfree(inst);
+}
+
 static int crypto_gcm_create_common(struct crypto_template *tmpl,
 				    struct rtattr **tb,
 				    const char *full_name,
@@ -689,6 +704,8 @@
 	inst->alg.encrypt = crypto_gcm_encrypt;
 	inst->alg.decrypt = crypto_gcm_decrypt;
 
+	inst->free = crypto_gcm_free;
+
 	err = aead_register_instance(tmpl, inst);
 	if (err)
 		goto out_put_ctr;
@@ -728,19 +745,9 @@
 					ctr_name, "ghash");
 }
 
-static void crypto_gcm_free(struct crypto_instance *inst)
-{
-	struct gcm_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-	crypto_drop_skcipher(&ctx->ctr);
-	crypto_drop_ahash(&ctx->ghash);
-	kfree(aead_instance(inst));
-}
-
 static struct crypto_template crypto_gcm_tmpl = {
 	.name = "gcm",
 	.create = crypto_gcm_create,
-	.free = crypto_gcm_free,
 	.module = THIS_MODULE,
 };
 
@@ -770,7 +777,6 @@
 static struct crypto_template crypto_gcm_base_tmpl = {
 	.name = "gcm_base",
 	.create = crypto_gcm_base_create,
-	.free = crypto_gcm_free,
 	.module = THIS_MODULE,
 };
 
@@ -816,27 +822,50 @@
 
 static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
 {
-	struct aead_request *subreq = aead_request_ctx(req);
+	struct crypto_rfc4106_req_ctx *rctx = aead_request_ctx(req);
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead);
+	struct aead_request *subreq = &rctx->subreq;
 	struct crypto_aead *child = ctx->child;
+	struct scatterlist *sg;
 	u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
 			   crypto_aead_alignmask(child) + 1);
 
+	scatterwalk_map_and_copy(iv + 12, req->src, 0, req->assoclen - 8, 0);
+
 	memcpy(iv, ctx->nonce, 4);
 	memcpy(iv + 4, req->iv, 8);
 
+	sg_init_table(rctx->src, 3);
+	sg_set_buf(rctx->src, iv + 12, req->assoclen - 8);
+	sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
+	if (sg != rctx->src + 1)
+		sg_chain(rctx->src, 2, sg);
+
+	if (req->src != req->dst) {
+		sg_init_table(rctx->dst, 3);
+		sg_set_buf(rctx->dst, iv + 12, req->assoclen - 8);
+		sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
+		if (sg != rctx->dst + 1)
+			sg_chain(rctx->dst, 2, sg);
+	}
+
 	aead_request_set_tfm(subreq, child);
 	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
 				  req->base.data);
-	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
-	aead_request_set_ad(subreq, req->assoclen);
+	aead_request_set_crypt(subreq, rctx->src,
+			       req->src == req->dst ? rctx->src : rctx->dst,
+			       req->cryptlen, iv);
+	aead_request_set_ad(subreq, req->assoclen - 8);
 
 	return subreq;
 }
 
 static int crypto_rfc4106_encrypt(struct aead_request *req)
 {
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
+
 	req = crypto_rfc4106_crypt(req);
 
 	return crypto_aead_encrypt(req);
@@ -844,6 +873,9 @@
 
 static int crypto_rfc4106_decrypt(struct aead_request *req)
 {
+	if (req->assoclen != 16 && req->assoclen != 20)
+		return -EINVAL;
+
 	req = crypto_rfc4106_crypt(req);
 
 	return crypto_aead_decrypt(req);
@@ -867,9 +899,9 @@
 	align &= ~(crypto_tfm_ctx_alignment() - 1);
 	crypto_aead_set_reqsize(
 		tfm,
-		sizeof(struct aead_request) +
+		sizeof(struct crypto_rfc4106_req_ctx) +
 		ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
-		align + 12);
+		align + 24);
 
 	return 0;
 }
@@ -881,6 +913,12 @@
 	crypto_free_aead(ctx->child);
 }
 
+static void crypto_rfc4106_free(struct aead_instance *inst)
+{
+	crypto_drop_aead(aead_instance_ctx(inst));
+	kfree(inst);
+}
+
 static int crypto_rfc4106_create(struct crypto_template *tmpl,
 				 struct rtattr **tb)
 {
@@ -934,7 +972,7 @@
 	    CRYPTO_MAX_ALG_NAME)
 		goto out_drop_alg;
 
-	inst->alg.base.cra_flags |= alg->base.cra_flags & CRYPTO_ALG_ASYNC;
+	inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
 	inst->alg.base.cra_priority = alg->base.cra_priority;
 	inst->alg.base.cra_blocksize = 1;
 	inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
@@ -952,6 +990,8 @@
 	inst->alg.encrypt = crypto_rfc4106_encrypt;
 	inst->alg.decrypt = crypto_rfc4106_decrypt;
 
+	inst->free = crypto_rfc4106_free;
+
 	err = aead_register_instance(tmpl, inst);
 	if (err)
 		goto out_drop_alg;
@@ -966,16 +1006,9 @@
 	goto out;
 }
 
-static void crypto_rfc4106_free(struct crypto_instance *inst)
-{
-	crypto_drop_aead(crypto_instance_ctx(inst));
-	kfree(aead_instance(inst));
-}
-
 static struct crypto_template crypto_rfc4106_tmpl = {
 	.name = "rfc4106",
 	.create = crypto_rfc4106_create,
-	.free = crypto_rfc4106_free,
 	.module = THIS_MODULE,
 };
 
@@ -1114,6 +1147,15 @@
 	crypto_put_default_null_skcipher();
 }
 
+static void crypto_rfc4543_free(struct aead_instance *inst)
+{
+	struct crypto_rfc4543_instance_ctx *ctx = aead_instance_ctx(inst);
+
+	crypto_drop_aead(&ctx->aead);
+
+	kfree(inst);
+}
+
 static int crypto_rfc4543_create(struct crypto_template *tmpl,
 				struct rtattr **tb)
 {
@@ -1187,6 +1229,8 @@
 	inst->alg.encrypt = crypto_rfc4543_encrypt;
 	inst->alg.decrypt = crypto_rfc4543_decrypt;
 
+	inst->free = crypto_rfc4543_free,
+
 	err = aead_register_instance(tmpl, inst);
 	if (err)
 		goto out_drop_alg;
@@ -1201,19 +1245,9 @@
 	goto out;
 }
 
-static void crypto_rfc4543_free(struct crypto_instance *inst)
-{
-	struct crypto_rfc4543_instance_ctx *ctx = crypto_instance_ctx(inst);
-
-	crypto_drop_aead(&ctx->aead);
-
-	kfree(aead_instance(inst));
-}
-
 static struct crypto_template crypto_rfc4543_tmpl = {
 	.name = "rfc4543",
 	.create = crypto_rfc4543_create,
-	.free = crypto_rfc4543_free,
 	.module = THIS_MODULE,
 };
 
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index b32d834..ceea83d 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -79,7 +79,7 @@
 
 void jent_panic(char *s)
 {
-	panic(s);
+	panic("%s", s);
 }
 
 void jent_memcpy(void *dest, const void *src, unsigned int n)
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 45e7d51..ee9cfb9 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -274,11 +274,16 @@
 			      u32 type, u32 mask)
 {
 	struct pcrypt_instance_ctx *ctx;
+	struct crypto_attr_type *algt;
 	struct aead_instance *inst;
 	struct aead_alg *alg;
 	const char *name;
 	int err;
 
+	algt = crypto_get_attr_type(tb);
+	if (IS_ERR(algt))
+		return PTR_ERR(algt);
+
 	name = crypto_attr_alg_name(tb[1]);
 	if (IS_ERR(name))
 		return PTR_ERR(name);
@@ -299,6 +304,8 @@
 	if (err)
 		goto out_drop_aead;
 
+	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
+
 	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
 	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index 387b5c8..2df9835d 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -13,31 +13,11 @@
 
 #include <crypto/algapi.h>
 #include <crypto/internal/hash.h>
+#include <crypto/poly1305.h>
 #include <linux/crypto.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 
-#define POLY1305_BLOCK_SIZE	16
-#define POLY1305_KEY_SIZE	32
-#define POLY1305_DIGEST_SIZE	16
-
-struct poly1305_desc_ctx {
-	/* key */
-	u32 r[5];
-	/* finalize key */
-	u32 s[4];
-	/* accumulator */
-	u32 h[5];
-	/* partial buffer */
-	u8 buf[POLY1305_BLOCK_SIZE];
-	/* bytes used in partial buffer */
-	unsigned int buflen;
-	/* r key has been set */
-	bool rset;
-	/* s key has been set */
-	bool sset;
-};
-
 static inline u64 mlt(u64 a, u64 b)
 {
 	return a * b;
@@ -58,7 +38,7 @@
 	return le32_to_cpup(p);
 }
 
-static int poly1305_init(struct shash_desc *desc)
+int crypto_poly1305_init(struct shash_desc *desc)
 {
 	struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
 
@@ -69,8 +49,9 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(crypto_poly1305_init);
 
-static int poly1305_setkey(struct crypto_shash *tfm,
+int crypto_poly1305_setkey(struct crypto_shash *tfm,
 			   const u8 *key, unsigned int keylen)
 {
 	/* Poly1305 requires a unique key for each tag, which implies that
@@ -79,6 +60,7 @@
 	 * the update() call. */
 	return -ENOTSUPP;
 }
+EXPORT_SYMBOL_GPL(crypto_poly1305_setkey);
 
 static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
 {
@@ -98,16 +80,10 @@
 	dctx->s[3] = le32_to_cpuvp(key + 12);
 }
 
-static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
-				    const u8 *src, unsigned int srclen,
-				    u32 hibit)
+unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
+					const u8 *src, unsigned int srclen)
 {
-	u32 r0, r1, r2, r3, r4;
-	u32 s1, s2, s3, s4;
-	u32 h0, h1, h2, h3, h4;
-	u64 d0, d1, d2, d3, d4;
-
-	if (unlikely(!dctx->sset)) {
+	if (!dctx->sset) {
 		if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
 			poly1305_setrkey(dctx, src);
 			src += POLY1305_BLOCK_SIZE;
@@ -121,6 +97,25 @@
 			dctx->sset = true;
 		}
 	}
+	return srclen;
+}
+EXPORT_SYMBOL_GPL(crypto_poly1305_setdesckey);
+
+static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
+				    const u8 *src, unsigned int srclen,
+				    u32 hibit)
+{
+	u32 r0, r1, r2, r3, r4;
+	u32 s1, s2, s3, s4;
+	u32 h0, h1, h2, h3, h4;
+	u64 d0, d1, d2, d3, d4;
+	unsigned int datalen;
+
+	if (unlikely(!dctx->sset)) {
+		datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
+		src += srclen - datalen;
+		srclen = datalen;
+	}
 
 	r0 = dctx->r[0];
 	r1 = dctx->r[1];
@@ -181,7 +176,7 @@
 	return srclen;
 }
 
-static int poly1305_update(struct shash_desc *desc,
+int crypto_poly1305_update(struct shash_desc *desc,
 			   const u8 *src, unsigned int srclen)
 {
 	struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
@@ -214,8 +209,9 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(crypto_poly1305_update);
 
-static int poly1305_final(struct shash_desc *desc, u8 *dst)
+int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
 {
 	struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
 	__le32 *mac = (__le32 *)dst;
@@ -282,13 +278,14 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(crypto_poly1305_final);
 
 static struct shash_alg poly1305_alg = {
 	.digestsize	= POLY1305_DIGEST_SIZE,
-	.init		= poly1305_init,
-	.update		= poly1305_update,
-	.final		= poly1305_final,
-	.setkey		= poly1305_setkey,
+	.init		= crypto_poly1305_init,
+	.update		= crypto_poly1305_update,
+	.final		= crypto_poly1305_final,
+	.setkey		= crypto_poly1305_setkey,
 	.descsize	= sizeof(struct poly1305_desc_ctx),
 	.base		= {
 		.cra_name		= "poly1305",
diff --git a/crypto/rsa.c b/crypto/rsa.c
index 752af06..466003e 100644
--- a/crypto/rsa.c
+++ b/crypto/rsa.c
@@ -267,12 +267,36 @@
 	return ret;
 }
 
+static int rsa_check_key_length(unsigned int len)
+{
+	switch (len) {
+	case 512:
+	case 1024:
+	case 1536:
+	case 2048:
+	case 3072:
+	case 4096:
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
 static int rsa_setkey(struct crypto_akcipher *tfm, const void *key,
 		      unsigned int keylen)
 {
 	struct rsa_key *pkey = akcipher_tfm_ctx(tfm);
+	int ret;
 
-	return rsa_parse_key(pkey, key, keylen);
+	ret = rsa_parse_key(pkey, key, keylen);
+	if (ret)
+		return ret;
+
+	if (rsa_check_key_length(mpi_get_size(pkey->n) << 3)) {
+		rsa_free_key(pkey);
+		ret = -EINVAL;
+	}
+	return ret;
 }
 
 static void rsa_exit_tfm(struct crypto_akcipher *tfm)
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c
index 3e8e0a9..8d96ce9 100644
--- a/crypto/rsa_helper.c
+++ b/crypto/rsa_helper.c
@@ -28,7 +28,7 @@
 		return -ENOMEM;
 
 	/* In FIPS mode only allow key size 2K & 3K */
-	if (fips_enabled && (mpi_get_size(key->n) != 256 ||
+	if (fips_enabled && (mpi_get_size(key->n) != 256 &&
 			     mpi_get_size(key->n) != 384)) {
 		pr_err("RSA: key size not allowed in FIPS mode\n");
 		mpi_free(key->n);
@@ -62,7 +62,7 @@
 		return -ENOMEM;
 
 	/* In FIPS mode only allow key size 2K & 3K */
-	if (fips_enabled && (mpi_get_size(key->d) != 256 ||
+	if (fips_enabled && (mpi_get_size(key->d) != 256 &&
 			     mpi_get_size(key->d) != 384)) {
 		pr_err("RSA: key size not allowed in FIPS mode\n");
 		mpi_free(key->d);
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 122c56e..15a749a 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -15,7 +15,6 @@
 
 #include <crypto/internal/geniv.h>
 #include <crypto/internal/skcipher.h>
-#include <crypto/null.h>
 #include <crypto/rng.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
@@ -26,23 +25,11 @@
 #include <linux/spinlock.h>
 #include <linux/string.h>
 
-struct seqniv_request_ctx {
-	struct scatterlist dst[2];
-	struct aead_request subreq;
-};
-
 struct seqiv_ctx {
 	spinlock_t lock;
 	u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
 };
 
-struct seqiv_aead_ctx {
-	/* aead_geniv_ctx must be first the element */
-	struct aead_geniv_ctx geniv;
-	struct crypto_blkcipher *null;
-	u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
-};
-
 static void seqiv_free(struct crypto_instance *inst);
 
 static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
@@ -71,32 +58,6 @@
 	skcipher_givcrypt_complete(req, err);
 }
 
-static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err)
-{
-	struct aead_request *subreq = aead_givcrypt_reqctx(req);
-	struct crypto_aead *geniv;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	if (err)
-		goto out;
-
-	geniv = aead_givcrypt_reqtfm(req);
-	memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv));
-
-out:
-	kfree(subreq->iv);
-}
-
-static void seqiv_aead_complete(struct crypto_async_request *base, int err)
-{
-	struct aead_givcrypt_request *req = base->data;
-
-	seqiv_aead_complete2(req, err);
-	aead_givcrypt_complete(req, err);
-}
-
 static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
 {
 	struct aead_request *subreq = aead_request_ctx(req);
@@ -124,50 +85,6 @@
 	aead_request_complete(req, err);
 }
 
-static void seqniv_aead_encrypt_complete2(struct aead_request *req, int err)
-{
-	unsigned int ivsize = 8;
-	u8 data[20];
-
-	if (err == -EINPROGRESS)
-		return;
-
-	/* Swap IV and ESP header back to correct order. */
-	scatterwalk_map_and_copy(data, req->dst, 0, req->assoclen + ivsize, 0);
-	scatterwalk_map_and_copy(data + ivsize, req->dst, 0, req->assoclen, 1);
-	scatterwalk_map_and_copy(data, req->dst, req->assoclen, ivsize, 1);
-}
-
-static void seqniv_aead_encrypt_complete(struct crypto_async_request *base,
-					int err)
-{
-	struct aead_request *req = base->data;
-
-	seqniv_aead_encrypt_complete2(req, err);
-	aead_request_complete(req, err);
-}
-
-static void seqniv_aead_decrypt_complete2(struct aead_request *req, int err)
-{
-	u8 data[4];
-
-	if (err == -EINPROGRESS)
-		return;
-
-	/* Move ESP header back to correct location. */
-	scatterwalk_map_and_copy(data, req->dst, 16, req->assoclen - 8, 0);
-	scatterwalk_map_and_copy(data, req->dst, 8, req->assoclen - 8, 1);
-}
-
-static void seqniv_aead_decrypt_complete(struct crypto_async_request *base,
-					 int err)
-{
-	struct aead_request *req = base->data;
-
-	seqniv_aead_decrypt_complete2(req, err);
-	aead_request_complete(req, err);
-}
-
 static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
 			unsigned int ivsize)
 {
@@ -227,112 +144,10 @@
 	return err;
 }
 
-static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
-{
-	struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
-	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
-	struct aead_request *areq = &req->areq;
-	struct aead_request *subreq = aead_givcrypt_reqctx(req);
-	crypto_completion_t compl;
-	void *data;
-	u8 *info;
-	unsigned int ivsize;
-	int err;
-
-	aead_request_set_tfm(subreq, aead_geniv_base(geniv));
-
-	compl = areq->base.complete;
-	data = areq->base.data;
-	info = areq->iv;
-
-	ivsize = crypto_aead_ivsize(geniv);
-
-	if (unlikely(!IS_ALIGNED((unsigned long)info,
-				 crypto_aead_alignmask(geniv) + 1))) {
-		info = kmalloc(ivsize, areq->base.flags &
-				       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
-								  GFP_ATOMIC);
-		if (!info)
-			return -ENOMEM;
-
-		compl = seqiv_aead_complete;
-		data = req;
-	}
-
-	aead_request_set_callback(subreq, areq->base.flags, compl, data);
-	aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
-			       info);
-	aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
-
-	seqiv_geniv(ctx, info, req->seq, ivsize);
-	memcpy(req->giv, info, ivsize);
-
-	err = crypto_aead_encrypt(subreq);
-	if (unlikely(info != areq->iv))
-		seqiv_aead_complete2(req, err);
-	return err;
-}
-
-static int seqniv_aead_encrypt(struct aead_request *req)
-{
-	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
-	struct seqniv_request_ctx *rctx = aead_request_ctx(req);
-	struct aead_request *subreq = &rctx->subreq;
-	struct scatterlist *dst;
-	crypto_completion_t compl;
-	void *data;
-	unsigned int ivsize = 8;
-	u8 buf[20] __attribute__ ((aligned(__alignof__(u32))));
-	int err;
-
-	if (req->cryptlen < ivsize)
-		return -EINVAL;
-
-	/* ESP AD is at most 12 bytes (ESN). */
-	if (req->assoclen > 12)
-		return -EINVAL;
-
-	aead_request_set_tfm(subreq, ctx->geniv.child);
-
-	compl = seqniv_aead_encrypt_complete;
-	data = req;
-
-	if (req->src != req->dst) {
-		struct blkcipher_desc desc = {
-			.tfm = ctx->null,
-		};
-
-		err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
-					       req->assoclen + req->cryptlen);
-		if (err)
-			return err;
-	}
-
-	dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize);
-
-	aead_request_set_callback(subreq, req->base.flags, compl, data);
-	aead_request_set_crypt(subreq, dst, dst,
-			       req->cryptlen - ivsize, req->iv);
-	aead_request_set_ad(subreq, req->assoclen);
-
-	memcpy(buf, req->iv, ivsize);
-	crypto_xor(buf, ctx->salt, ivsize);
-	memcpy(req->iv, buf, ivsize);
-
-	/* Swap order of IV and ESP AD for ICV generation. */
-	scatterwalk_map_and_copy(buf + ivsize, req->dst, 0, req->assoclen, 0);
-	scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 1);
-
-	err = crypto_aead_encrypt(subreq);
-	seqniv_aead_encrypt_complete2(req, err);
-	return err;
-}
-
 static int seqiv_aead_encrypt(struct aead_request *req)
 {
 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
+	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
 	struct aead_request *subreq = aead_request_ctx(req);
 	crypto_completion_t compl;
 	void *data;
@@ -343,7 +158,7 @@
 	if (req->cryptlen < ivsize)
 		return -EINVAL;
 
-	aead_request_set_tfm(subreq, ctx->geniv.child);
+	aead_request_set_tfm(subreq, ctx->child);
 
 	compl = req->base.complete;
 	data = req->base.data;
@@ -387,67 +202,10 @@
 	return err;
 }
 
-static int seqniv_aead_decrypt(struct aead_request *req)
-{
-	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
-	struct seqniv_request_ctx *rctx = aead_request_ctx(req);
-	struct aead_request *subreq = &rctx->subreq;
-	struct scatterlist *dst;
-	crypto_completion_t compl;
-	void *data;
-	unsigned int ivsize = 8;
-	u8 buf[20];
-	int err;
-
-	if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
-		return -EINVAL;
-
-	aead_request_set_tfm(subreq, ctx->geniv.child);
-
-	compl = req->base.complete;
-	data = req->base.data;
-
-	if (req->assoclen > 12)
-		return -EINVAL;
-	else if (req->assoclen > 8) {
-		compl = seqniv_aead_decrypt_complete;
-		data = req;
-	}
-
-	if (req->src != req->dst) {
-		struct blkcipher_desc desc = {
-			.tfm = ctx->null,
-		};
-
-		err = crypto_blkcipher_encrypt(&desc, req->dst, req->src,
-					       req->assoclen + req->cryptlen);
-		if (err)
-			return err;
-	}
-
-	/* Move ESP AD forward for ICV generation. */
-	scatterwalk_map_and_copy(buf, req->dst, 0, req->assoclen + ivsize, 0);
-	memcpy(req->iv, buf + req->assoclen, ivsize);
-	scatterwalk_map_and_copy(buf, req->dst, ivsize, req->assoclen, 1);
-
-	dst = scatterwalk_ffwd(rctx->dst, req->dst, ivsize);
-
-	aead_request_set_callback(subreq, req->base.flags, compl, data);
-	aead_request_set_crypt(subreq, dst, dst,
-			       req->cryptlen - ivsize, req->iv);
-	aead_request_set_ad(subreq, req->assoclen);
-
-	err = crypto_aead_decrypt(subreq);
-	if (req->assoclen > 8)
-		seqniv_aead_decrypt_complete2(req, err);
-	return err;
-}
-
 static int seqiv_aead_decrypt(struct aead_request *req)
 {
 	struct crypto_aead *geniv = crypto_aead_reqtfm(req);
-	struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
+	struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
 	struct aead_request *subreq = aead_request_ctx(req);
 	crypto_completion_t compl;
 	void *data;
@@ -456,7 +214,7 @@
 	if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
 		return -EINVAL;
 
-	aead_request_set_tfm(subreq, ctx->geniv.child);
+	aead_request_set_tfm(subreq, ctx->child);
 
 	compl = req->base.complete;
 	data = req->base.data;
@@ -467,9 +225,6 @@
 	aead_request_set_ad(subreq, req->assoclen + ivsize);
 
 	scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
-	if (req->src != req->dst)
-		scatterwalk_map_and_copy(req->iv, req->dst,
-					 req->assoclen, ivsize, 1);
 
 	return crypto_aead_decrypt(subreq);
 }
@@ -495,85 +250,6 @@
 	return err ?: skcipher_geniv_init(tfm);
 }
 
-static int seqiv_old_aead_init(struct crypto_tfm *tfm)
-{
-	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-	struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
-	int err;
-
-	spin_lock_init(&ctx->lock);
-
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-				sizeof(struct aead_request));
-	err = 0;
-	if (!crypto_get_default_rng()) {
-		geniv->givencrypt = seqiv_aead_givencrypt;
-		err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
-					   crypto_aead_ivsize(geniv));
-		crypto_put_default_rng();
-	}
-
-	return err ?: aead_geniv_init(tfm);
-}
-
-static int seqiv_aead_init_common(struct crypto_tfm *tfm, unsigned int reqsize)
-{
-	struct crypto_aead *geniv = __crypto_aead_cast(tfm);
-	struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv);
-	int err;
-
-	spin_lock_init(&ctx->geniv.lock);
-
-	crypto_aead_set_reqsize(geniv, sizeof(struct aead_request));
-
-	err = crypto_get_default_rng();
-	if (err)
-		goto out;
-
-	err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
-				   crypto_aead_ivsize(geniv));
-	crypto_put_default_rng();
-	if (err)
-		goto out;
-
-	ctx->null = crypto_get_default_null_skcipher();
-	err = PTR_ERR(ctx->null);
-	if (IS_ERR(ctx->null))
-		goto out;
-
-	err = aead_geniv_init(tfm);
-	if (err)
-		goto drop_null;
-
-	ctx->geniv.child = geniv->child;
-	geniv->child = geniv;
-
-out:
-	return err;
-
-drop_null:
-	crypto_put_default_null_skcipher();
-	goto out;
-}
-
-static int seqiv_aead_init(struct crypto_tfm *tfm)
-{
-	return seqiv_aead_init_common(tfm, sizeof(struct aead_request));
-}
-
-static int seqniv_aead_init(struct crypto_tfm *tfm)
-{
-	return seqiv_aead_init_common(tfm, sizeof(struct seqniv_request_ctx));
-}
-
-static void seqiv_aead_exit(struct crypto_tfm *tfm)
-{
-	struct seqiv_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	crypto_free_aead(ctx->geniv.child);
-	crypto_put_default_null_skcipher();
-}
-
 static int seqiv_ablkcipher_create(struct crypto_template *tmpl,
 				   struct rtattr **tb)
 {
@@ -609,33 +285,6 @@
 	goto out;
 }
 
-static int seqiv_old_aead_create(struct crypto_template *tmpl,
-				 struct aead_instance *aead)
-{
-	struct crypto_instance *inst = aead_crypto_instance(aead);
-	int err = -EINVAL;
-
-	if (inst->alg.cra_aead.ivsize < sizeof(u64))
-		goto free_inst;
-
-	inst->alg.cra_init = seqiv_old_aead_init;
-	inst->alg.cra_exit = aead_geniv_exit;
-
-	inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize;
-	inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
-
-	err = crypto_register_instance(tmpl, inst);
-	if (err)
-		goto free_inst;
-
-out:
-	return err;
-
-free_inst:
-	aead_geniv_free(aead);
-	goto out;
-}
-
 static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
 	struct aead_instance *inst;
@@ -650,15 +299,9 @@
 
 	inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
 
-	if (inst->alg.base.cra_aead.encrypt)
-		return seqiv_old_aead_create(tmpl, inst);
-
 	spawn = aead_instance_ctx(inst);
 	alg = crypto_spawn_aead_alg(spawn);
 
-	if (alg->base.cra_aead.encrypt)
-		goto done;
-
 	err = -EINVAL;
 	if (inst->alg.ivsize != sizeof(u64))
 		goto free_inst;
@@ -666,13 +309,12 @@
 	inst->alg.encrypt = seqiv_aead_encrypt;
 	inst->alg.decrypt = seqiv_aead_decrypt;
 
-	inst->alg.base.cra_init = seqiv_aead_init;
-	inst->alg.base.cra_exit = seqiv_aead_exit;
+	inst->alg.init = aead_init_geniv;
+	inst->alg.exit = aead_exit_geniv;
 
-	inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
-	inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize;
+	inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
+	inst->alg.base.cra_ctxsize += inst->alg.ivsize;
 
-done:
 	err = aead_register_instance(tmpl, inst);
 	if (err)
 		goto free_inst;
@@ -702,51 +344,6 @@
 	return err;
 }
 
-static int seqniv_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
-	struct aead_instance *inst;
-	struct crypto_aead_spawn *spawn;
-	struct aead_alg *alg;
-	int err;
-
-	inst = aead_geniv_alloc(tmpl, tb, 0, 0);
-	err = PTR_ERR(inst);
-	if (IS_ERR(inst))
-		goto out;
-
-	spawn = aead_instance_ctx(inst);
-	alg = crypto_spawn_aead_alg(spawn);
-
-	if (alg->base.cra_aead.encrypt)
-		goto done;
-
-	err = -EINVAL;
-	if (inst->alg.ivsize != sizeof(u64))
-		goto free_inst;
-
-	inst->alg.encrypt = seqniv_aead_encrypt;
-	inst->alg.decrypt = seqniv_aead_decrypt;
-
-	inst->alg.base.cra_init = seqniv_aead_init;
-	inst->alg.base.cra_exit = seqiv_aead_exit;
-
-	inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
-	inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx);
-	inst->alg.base.cra_ctxsize += inst->alg.ivsize;
-
-done:
-	err = aead_register_instance(tmpl, inst);
-	if (err)
-		goto free_inst;
-
-out:
-	return err;
-
-free_inst:
-	aead_geniv_free(inst);
-	goto out;
-}
-
 static void seqiv_free(struct crypto_instance *inst)
 {
 	if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
@@ -762,36 +359,13 @@
 	.module = THIS_MODULE,
 };
 
-static struct crypto_template seqniv_tmpl = {
-	.name = "seqniv",
-	.create = seqniv_create,
-	.free = seqiv_free,
-	.module = THIS_MODULE,
-};
-
 static int __init seqiv_module_init(void)
 {
-	int err;
-
-	err = crypto_register_template(&seqiv_tmpl);
-	if (err)
-		goto out;
-
-	err = crypto_register_template(&seqniv_tmpl);
-	if (err)
-		goto out_undo_niv;
-
-out:
-	return err;
-
-out_undo_niv:
-	crypto_unregister_template(&seqiv_tmpl);
-	goto out;
+	return crypto_register_template(&seqiv_tmpl);
 }
 
 static void __exit seqiv_module_exit(void)
 {
-	crypto_unregister_template(&seqniv_tmpl);
 	crypto_unregister_template(&seqiv_tmpl);
 }
 
@@ -801,4 +375,3 @@
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Sequence Number IV Generator");
 MODULE_ALIAS_CRYPTO("seqiv");
-MODULE_ALIAS_CRYPTO("seqniv");
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
new file mode 100644
index 0000000..dd5fc1b
--- /dev/null
+++ b/crypto/skcipher.c
@@ -0,0 +1,245 @@
+/*
+ * Symmetric key cipher operations.
+ *
+ * Generic encrypt/decrypt wrapper for ciphers, handles operations across
+ * multiple page boundaries by using temporary blocks.  In user context,
+ * the kernel is given a chance to schedule us once per page.
+ *
+ * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/skcipher.h>
+#include <linux/bug.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
+{
+	if (alg->cra_type == &crypto_blkcipher_type)
+		return sizeof(struct crypto_blkcipher *);
+
+	BUG_ON(alg->cra_type != &crypto_ablkcipher_type &&
+	       alg->cra_type != &crypto_givcipher_type);
+
+	return sizeof(struct crypto_ablkcipher *);
+}
+
+static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
+				     const u8 *key, unsigned int keylen)
+{
+	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_blkcipher *blkcipher = *ctx;
+	int err;
+
+	crypto_blkcipher_clear_flags(blkcipher, ~0);
+	crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) &
+					      CRYPTO_TFM_REQ_MASK);
+	err = crypto_blkcipher_setkey(blkcipher, key, keylen);
+	crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
+				       CRYPTO_TFM_RES_MASK);
+
+	return err;
+}
+
+static int skcipher_crypt_blkcipher(struct skcipher_request *req,
+				    int (*crypt)(struct blkcipher_desc *,
+						 struct scatterlist *,
+						 struct scatterlist *,
+						 unsigned int))
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
+	struct blkcipher_desc desc = {
+		.tfm = *ctx,
+		.info = req->iv,
+		.flags = req->base.flags,
+	};
+
+
+	return crypt(&desc, req->dst, req->src, req->cryptlen);
+}
+
+static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
+{
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
+
+	return skcipher_crypt_blkcipher(req, alg->encrypt);
+}
+
+static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
+{
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
+
+	return skcipher_crypt_blkcipher(req, alg->decrypt);
+}
+
+static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
+{
+	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_blkcipher(*ctx);
+}
+
+int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *calg = tfm->__crt_alg;
+	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
+	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
+	struct crypto_blkcipher *blkcipher;
+	struct crypto_tfm *btfm;
+
+	if (!crypto_mod_get(calg))
+		return -EAGAIN;
+
+	btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
+					CRYPTO_ALG_TYPE_MASK);
+	if (IS_ERR(btfm)) {
+		crypto_mod_put(calg);
+		return PTR_ERR(btfm);
+	}
+
+	blkcipher = __crypto_blkcipher_cast(btfm);
+	*ctx = blkcipher;
+	tfm->exit = crypto_exit_skcipher_ops_blkcipher;
+
+	skcipher->setkey = skcipher_setkey_blkcipher;
+	skcipher->encrypt = skcipher_encrypt_blkcipher;
+	skcipher->decrypt = skcipher_decrypt_blkcipher;
+
+	skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
+
+	return 0;
+}
+
+static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
+				      const u8 *key, unsigned int keylen)
+{
+	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
+	struct crypto_ablkcipher *ablkcipher = *ctx;
+	int err;
+
+	crypto_ablkcipher_clear_flags(ablkcipher, ~0);
+	crypto_ablkcipher_set_flags(ablkcipher,
+				    crypto_skcipher_get_flags(tfm) &
+				    CRYPTO_TFM_REQ_MASK);
+	err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
+	crypto_skcipher_set_flags(tfm,
+				  crypto_ablkcipher_get_flags(ablkcipher) &
+				  CRYPTO_TFM_RES_MASK);
+
+	return err;
+}
+
+static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
+				     int (*crypt)(struct ablkcipher_request *))
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
+	struct ablkcipher_request *subreq = skcipher_request_ctx(req);
+
+	ablkcipher_request_set_tfm(subreq, *ctx);
+	ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
+					req->base.complete, req->base.data);
+	ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+				     req->iv);
+
+	return crypt(subreq);
+}
+
+static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
+{
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
+
+	return skcipher_crypt_ablkcipher(req, alg->encrypt);
+}
+
+static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
+{
+	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
+	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
+
+	return skcipher_crypt_ablkcipher(req, alg->decrypt);
+}
+
+static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
+{
+	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_ablkcipher(*ctx);
+}
+
+int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *calg = tfm->__crt_alg;
+	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
+	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
+	struct crypto_ablkcipher *ablkcipher;
+	struct crypto_tfm *abtfm;
+
+	if (!crypto_mod_get(calg))
+		return -EAGAIN;
+
+	abtfm = __crypto_alloc_tfm(calg, 0, 0);
+	if (IS_ERR(abtfm)) {
+		crypto_mod_put(calg);
+		return PTR_ERR(abtfm);
+	}
+
+	ablkcipher = __crypto_ablkcipher_cast(abtfm);
+	*ctx = ablkcipher;
+	tfm->exit = crypto_exit_skcipher_ops_ablkcipher;
+
+	skcipher->setkey = skcipher_setkey_ablkcipher;
+	skcipher->encrypt = skcipher_encrypt_ablkcipher;
+	skcipher->decrypt = skcipher_decrypt_ablkcipher;
+
+	skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
+			    sizeof(struct ablkcipher_request);
+
+	return 0;
+}
+
+static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
+{
+	if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
+		return crypto_init_skcipher_ops_blkcipher(tfm);
+
+	BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type &&
+	       tfm->__crt_alg->cra_type != &crypto_givcipher_type);
+
+	return crypto_init_skcipher_ops_ablkcipher(tfm);
+}
+
+static const struct crypto_type crypto_skcipher_type2 = {
+	.extsize = crypto_skcipher_extsize,
+	.init_tfm = crypto_skcipher_init_tfm,
+	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
+	.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
+	.type = CRYPTO_ALG_TYPE_BLKCIPHER,
+	.tfmsize = offsetof(struct crypto_skcipher, base),
+};
+
+struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
+					      u32 type, u32 mask)
+{
+	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Symmetric key cipher type");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 9f6f10b..2b00b61 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -73,6 +73,22 @@
 	"lzo", "cts", "zlib", NULL
 };
 
+struct tcrypt_result {
+	struct completion completion;
+	int err;
+};
+
+static void tcrypt_complete(struct crypto_async_request *req, int err)
+{
+	struct tcrypt_result *res = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+
+	res->err = err;
+	complete(&res->completion);
+}
+
 static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
 			       struct scatterlist *sg, int blen, int secs)
 {
@@ -143,6 +159,20 @@
 	return ret;
 }
 
+static inline int do_one_aead_op(struct aead_request *req, int ret)
+{
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		struct tcrypt_result *tr = req->base.data;
+
+		ret = wait_for_completion_interruptible(&tr->completion);
+		if (!ret)
+			ret = tr->err;
+		reinit_completion(&tr->completion);
+	}
+
+	return ret;
+}
+
 static int test_aead_jiffies(struct aead_request *req, int enc,
 				int blen, int secs)
 {
@@ -153,9 +183,9 @@
 	for (start = jiffies, end = start + secs * HZ, bcount = 0;
 	     time_before(jiffies, end); bcount++) {
 		if (enc)
-			ret = crypto_aead_encrypt(req);
+			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
 		else
-			ret = crypto_aead_decrypt(req);
+			ret = do_one_aead_op(req, crypto_aead_decrypt(req));
 
 		if (ret)
 			return ret;
@@ -177,9 +207,9 @@
 	/* Warm-up run. */
 	for (i = 0; i < 4; i++) {
 		if (enc)
-			ret = crypto_aead_encrypt(req);
+			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
 		else
-			ret = crypto_aead_decrypt(req);
+			ret = do_one_aead_op(req, crypto_aead_decrypt(req));
 
 		if (ret)
 			goto out;
@@ -191,9 +221,9 @@
 
 		start = get_cycles();
 		if (enc)
-			ret = crypto_aead_encrypt(req);
+			ret = do_one_aead_op(req, crypto_aead_encrypt(req));
 		else
-			ret = crypto_aead_decrypt(req);
+			ret = do_one_aead_op(req, crypto_aead_decrypt(req));
 		end = get_cycles();
 
 		if (ret)
@@ -286,6 +316,7 @@
 	char *axbuf[XBUFSIZE];
 	unsigned int *b_size;
 	unsigned int iv_len;
+	struct tcrypt_result result;
 
 	iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
 	if (!iv)
@@ -321,6 +352,7 @@
 		goto out_notfm;
 	}
 
+	init_completion(&result.completion);
 	printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
 			get_driver_name(crypto_aead, tfm), e);
 
@@ -331,6 +363,9 @@
 		goto out_noreq;
 	}
 
+	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				  tcrypt_complete, &result);
+
 	i = 0;
 	do {
 		b_size = aead_sizes;
@@ -749,22 +784,6 @@
 	crypto_free_hash(tfm);
 }
 
-struct tcrypt_result {
-	struct completion completion;
-	int err;
-};
-
-static void tcrypt_complete(struct crypto_async_request *req, int err)
-{
-	struct tcrypt_result *res = req->data;
-
-	if (err == -EINPROGRESS)
-		return;
-
-	res->err = err;
-	complete(&res->completion);
-}
-
 static inline int do_one_ahash_op(struct ahash_request *req, int ret)
 {
 	if (ret == -EINPROGRESS || ret == -EBUSY) {
@@ -1759,14 +1778,27 @@
 
 	case 211:
 		test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
+				NULL, 0, 16, 16, aead_speed_template_20);
+		test_aead_speed("gcm(aes)", ENCRYPT, sec,
 				NULL, 0, 16, 8, aead_speed_template_20);
 		break;
 
 	case 212:
 		test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec,
-				NULL, 0, 16, 8, aead_speed_template_19);
+				NULL, 0, 16, 16, aead_speed_template_19);
 		break;
 
+	case 213:
+		test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec,
+				NULL, 0, 16, 8, aead_speed_template_36);
+		break;
+
+	case 214:
+		test_cipher_speed("chacha20", ENCRYPT, sec, NULL, 0,
+				  speed_template_32);
+		break;
+
+
 	case 300:
 		if (alg) {
 			test_hash_speed(alg, sec, generic_hash_speed_template);
@@ -1855,6 +1887,10 @@
 		test_hash_speed("crct10dif", sec, generic_hash_speed_template);
 		if (mode > 300 && mode < 400) break;
 
+	case 321:
+		test_hash_speed("poly1305", sec, poly1305_speed_template);
+		if (mode > 300 && mode < 400) break;
+
 	case 399:
 		break;
 
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 6cc1b85..f0bfee1 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -61,12 +61,14 @@
 static u8 speed_template_32_48[] = {32, 48, 0};
 static u8 speed_template_32_48_64[] = {32, 48, 64, 0};
 static u8 speed_template_32_64[] = {32, 64, 0};
+static u8 speed_template_32[] = {32, 0};
 
 /*
  * AEAD speed tests
  */
 static u8 aead_speed_template_19[] = {19, 0};
 static u8 aead_speed_template_20[] = {20, 0};
+static u8 aead_speed_template_36[] = {36, 0};
 
 /*
  * Digest speed tests
@@ -127,4 +129,22 @@
 	{  .blen = 0,	.plen = 0,	.klen = 0, }
 };
 
+static struct hash_speed poly1305_speed_template[] = {
+	{ .blen = 96,	.plen = 16, },
+	{ .blen = 96,	.plen = 32, },
+	{ .blen = 96,	.plen = 96, },
+	{ .blen = 288,	.plen = 16, },
+	{ .blen = 288,	.plen = 32, },
+	{ .blen = 288,	.plen = 288, },
+	{ .blen = 1056,	.plen = 32, },
+	{ .blen = 1056,	.plen = 1056, },
+	{ .blen = 2080,	.plen = 32, },
+	{ .blen = 2080,	.plen = 2080, },
+	{ .blen = 4128,	.plen = 4128, },
+	{ .blen = 8224,	.plen = 8224, },
+
+	/* End marker */
+	{  .blen = 0,	.plen = 0, }
+};
+
 #endif	/* _CRYPTO_TCRYPT_H */
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index d0a42bd..35c2de1 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -22,6 +22,7 @@
 
 #include <crypto/aead.h>
 #include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/err.h>
 #include <linux/fips.h>
 #include <linux/module.h>
@@ -921,15 +922,15 @@
 	return ret;
 }
 
-static int __test_skcipher(struct crypto_ablkcipher *tfm, int enc,
+static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
 			   struct cipher_testvec *template, unsigned int tcount,
 			   const bool diff_dst, const int align_offset)
 {
 	const char *algo =
-		crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
+		crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
 	unsigned int i, j, k, n, temp;
 	char *q;
-	struct ablkcipher_request *req;
+	struct skcipher_request *req;
 	struct scatterlist sg[8];
 	struct scatterlist sgout[8];
 	const char *e, *d;
@@ -958,15 +959,15 @@
 
 	init_completion(&result.completion);
 
-	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
+	req = skcipher_request_alloc(tfm, GFP_KERNEL);
 	if (!req) {
 		pr_err("alg: skcipher%s: Failed to allocate request for %s\n",
 		       d, algo);
 		goto out;
 	}
 
-	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-					tcrypt_complete, &result);
+	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				      tcrypt_complete, &result);
 
 	j = 0;
 	for (i = 0; i < tcount; i++) {
@@ -987,15 +988,16 @@
 		data += align_offset;
 		memcpy(data, template[i].input, template[i].ilen);
 
-		crypto_ablkcipher_clear_flags(tfm, ~0);
+		crypto_skcipher_clear_flags(tfm, ~0);
 		if (template[i].wk)
-			crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+			crypto_skcipher_set_flags(tfm,
+						  CRYPTO_TFM_REQ_WEAK_KEY);
 
-		ret = crypto_ablkcipher_setkey(tfm, template[i].key,
-					       template[i].klen);
+		ret = crypto_skcipher_setkey(tfm, template[i].key,
+					     template[i].klen);
 		if (!ret == template[i].fail) {
 			pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
-			       d, j, algo, crypto_ablkcipher_get_flags(tfm));
+			       d, j, algo, crypto_skcipher_get_flags(tfm));
 			goto out;
 		} else if (ret)
 			continue;
@@ -1007,10 +1009,10 @@
 			sg_init_one(&sgout[0], data, template[i].ilen);
 		}
 
-		ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
-					     template[i].ilen, iv);
-		ret = enc ? crypto_ablkcipher_encrypt(req) :
-			    crypto_ablkcipher_decrypt(req);
+		skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
+					   template[i].ilen, iv);
+		ret = enc ? crypto_skcipher_encrypt(req) :
+			    crypto_skcipher_decrypt(req);
 
 		switch (ret) {
 		case 0:
@@ -1054,15 +1056,16 @@
 			memset(iv, 0, MAX_IVLEN);
 
 		j++;
-		crypto_ablkcipher_clear_flags(tfm, ~0);
+		crypto_skcipher_clear_flags(tfm, ~0);
 		if (template[i].wk)
-			crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+			crypto_skcipher_set_flags(tfm,
+						  CRYPTO_TFM_REQ_WEAK_KEY);
 
-		ret = crypto_ablkcipher_setkey(tfm, template[i].key,
-					       template[i].klen);
+		ret = crypto_skcipher_setkey(tfm, template[i].key,
+					     template[i].klen);
 		if (!ret == template[i].fail) {
 			pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
-			       d, j, algo, crypto_ablkcipher_get_flags(tfm));
+			       d, j, algo, crypto_skcipher_get_flags(tfm));
 			goto out;
 		} else if (ret)
 			continue;
@@ -1100,11 +1103,11 @@
 			temp += template[i].tap[k];
 		}
 
-		ablkcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
-					     template[i].ilen, iv);
+		skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
+					   template[i].ilen, iv);
 
-		ret = enc ? crypto_ablkcipher_encrypt(req) :
-			    crypto_ablkcipher_decrypt(req);
+		ret = enc ? crypto_skcipher_encrypt(req) :
+			    crypto_skcipher_decrypt(req);
 
 		switch (ret) {
 		case 0:
@@ -1157,7 +1160,7 @@
 	ret = 0;
 
 out:
-	ablkcipher_request_free(req);
+	skcipher_request_free(req);
 	if (diff_dst)
 		testmgr_free_buf(xoutbuf);
 out_nooutbuf:
@@ -1166,7 +1169,7 @@
 	return ret;
 }
 
-static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
+static int test_skcipher(struct crypto_skcipher *tfm, int enc,
 			 struct cipher_testvec *template, unsigned int tcount)
 {
 	unsigned int alignmask;
@@ -1578,10 +1581,10 @@
 static int alg_test_skcipher(const struct alg_test_desc *desc,
 			     const char *driver, u32 type, u32 mask)
 {
-	struct crypto_ablkcipher *tfm;
+	struct crypto_skcipher *tfm;
 	int err = 0;
 
-	tfm = crypto_alloc_ablkcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
+	tfm = crypto_alloc_skcipher(driver, type | CRYPTO_ALG_INTERNAL, mask);
 	if (IS_ERR(tfm)) {
 		printk(KERN_ERR "alg: skcipher: Failed to load transform for "
 		       "%s: %ld\n", driver, PTR_ERR(tfm));
@@ -1600,7 +1603,7 @@
 				    desc->suite.cipher.dec.count);
 
 out:
-	crypto_free_ablkcipher(tfm);
+	crypto_free_skcipher(tfm);
 	return err;
 }
 
@@ -2476,6 +2479,7 @@
 		}
 	}, {
 		.alg = "cmac(aes)",
+		.fips_allowed = 1,
 		.test = alg_test_hash,
 		.suite = {
 			.hash = {
@@ -2485,6 +2489,7 @@
 		}
 	}, {
 		.alg = "cmac(des3_ede)",
+		.fips_allowed = 1,
 		.test = alg_test_hash,
 		.suite = {
 			.hash = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 868edf1..64b8a80 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -14504,6 +14504,9 @@
 		.result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
 			  "\x27\x08\x94\x2d\xbe\x77\x18\x1a",
 		.rlen   = 16,
+		.also_non_np = 1,
+		.np	= 8,
+		.tap	= { 3, 2, 3, 2, 3, 1, 1, 1 },
 	}, {
 		.key    = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
 			  "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
@@ -14723,6 +14726,9 @@
 		.ilen   = 16,
 		.result = "Single block msg",
 		.rlen   = 16,
+		.also_non_np = 1,
+		.np	= 8,
+		.tap	= { 3, 2, 3, 2, 3, 1, 1, 1 },
 	}, {
 		.key    = "\xc2\x86\x69\x6d\x88\x7c\x9a\xa0"
 			  "\x61\x1b\xbb\x3e\x20\x25\xa4\x5a",
@@ -15032,6 +15038,9 @@
 		.klen   = 8 + 20 + 16,
 		.iv     = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
 			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+		.assoc	= "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
+			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+		.alen	= 16,
 		.input  = "Single block msg",
 		.ilen   = 16,
 		.result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
@@ -15057,6 +15066,9 @@
 		.klen   = 8 + 20 + 16,
 		.iv     = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
 			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+		.assoc	= "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
+			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+		.alen	= 16,
 		.input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -15087,6 +15099,9 @@
 		.klen   = 8 + 20 + 16,
 		.iv     = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
 			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+		.assoc	= "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
+			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+		.alen	= 16,
 		.input  = "This is a 48-byte message (exactly 3 AES blocks)",
 		.ilen   = 48,
 		.result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
@@ -15116,6 +15131,9 @@
 		.klen   = 8 + 20 + 16,
 		.iv     = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
 			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+		.assoc	= "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
+			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+		.alen	= 16,
 		.input  = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
 			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
 			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
@@ -15154,8 +15172,10 @@
 		.klen   = 8 + 20 + 16,
 		.iv     = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
 			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
+			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
+		.alen   = 24,
 		.input  = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
 			  "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
@@ -15199,6 +15219,9 @@
 		.klen   = 8 + 20 + 24,
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.alen	= 16,
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15239,6 +15262,9 @@
 		.klen   = 8 + 20 + 32,
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.alen	= 16,
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15374,6 +15400,9 @@
 		.klen   = 8 + 32 + 16,
 		.iv     = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
 			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+		.assoc	= "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
+			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+		.alen	= 16,
 		.input  = "Single block msg",
 		.ilen   = 16,
 		.result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
@@ -15401,6 +15430,9 @@
 		.klen   = 8 + 32 + 16,
 		.iv     = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
 			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+		.assoc	= "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
+			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+		.alen	= 16,
 		.input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -15433,6 +15465,9 @@
 		.klen   = 8 + 32 + 16,
 		.iv     = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
 			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+		.assoc	= "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
+			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+		.alen	= 16,
 		.input  = "This is a 48-byte message (exactly 3 AES blocks)",
 		.ilen   = 48,
 		.result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
@@ -15464,6 +15499,9 @@
 		.klen   = 8 + 32 + 16,
 		.iv     = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
 			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+		.assoc	= "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
+			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+		.alen	= 16,
 		.input  = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
 			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
 			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
@@ -15504,8 +15542,10 @@
 		.klen   = 8 + 32 + 16,
 		.iv     = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
 			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
+			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
+		.alen   = 24,
 		.input  = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
 			  "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
@@ -15551,6 +15591,9 @@
 		.klen   = 8 + 32 + 24,
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.alen   = 16,
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15593,6 +15636,9 @@
 		.klen   = 8 + 32 + 32,
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.alen   = 16,
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15641,6 +15687,9 @@
 		.klen   = 8 + 64 + 16,
 		.iv     = "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
 			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+		.assoc	= "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30"
+			  "\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+		.alen   = 16,
 		.input  = "Single block msg",
 		.ilen   = 16,
 		.result = "\xe3\x53\x77\x9c\x10\x79\xae\xb8"
@@ -15676,6 +15725,9 @@
 		.klen   = 8 + 64 + 16,
 		.iv     = "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
 			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+		.assoc	= "\x56\x2e\x17\x99\x6d\x09\x3d\x28"
+			  "\xdd\xb3\xba\x69\x5a\x2e\x6f\x58",
+		.alen   = 16,
 		.input  = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
 			  "\x10\x11\x12\x13\x14\x15\x16\x17"
@@ -15716,6 +15768,9 @@
 		.klen   = 8 + 64 + 16,
 		.iv     = "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
 			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+		.assoc	= "\xc7\x82\xdc\x4c\x09\x8c\x66\xcb"
+			  "\xd9\xcd\x27\xd8\x25\x68\x2c\x81",
+		.alen   = 16,
 		.input  = "This is a 48-byte message (exactly 3 AES blocks)",
 		.ilen   = 48,
 		.result = "\xd0\xa0\x2b\x38\x36\x45\x17\x53"
@@ -15755,6 +15810,9 @@
 		.klen   = 8 + 64 + 16,
 		.iv     = "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
 			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+		.assoc	= "\x8c\xe8\x2e\xef\xbe\xa0\xda\x3c"
+			  "\x44\x69\x9e\xd7\xdb\x51\xb7\xd9",
+		.alen   = 16,
 		.input  = "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
 			  "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
 			  "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
@@ -15803,8 +15861,10 @@
 		.klen   = 8 + 64 + 16,
 		.iv     = "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
 			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\xe9\x6e\x8c\x08\xab\x46\x57\x63"
+			  "\xfd\x09\x8d\x45\xdd\x3f\xf8\x93",
+		.alen   = 24,
 		.input  = "\x08\x00\x0e\xbd\xa7\x0a\x00\x00"
 			  "\x8e\x9c\x08\x3d\xb9\x5b\x07\x00"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
@@ -15858,6 +15918,9 @@
 		.klen   = 8 + 64 + 24,
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.alen   = 16,
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15908,6 +15971,9 @@
 		.klen   = 8 + 64 + 32,
 		.iv     = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.assoc	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+		.alen   = 16,
 		.input  = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
 			  "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
 			  "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
@@ -15955,8 +16021,9 @@
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 		.klen	= 8 + 20 + 8,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16015,8 +16082,9 @@
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 		.klen	= 8 + 24 + 8,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16076,8 +16144,9 @@
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 		.klen	= 8 + 32 + 8,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16140,8 +16209,9 @@
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 		.klen	= 8 + 48 + 8,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16208,8 +16278,9 @@
 			  "\xE9\xC0\xFF\x2E\x76\x0B\x64\x24",
 		.klen	= 8 + 64 + 8,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16275,8 +16346,9 @@
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 		.klen	= 8 + 20 + 24,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 		  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16337,8 +16409,9 @@
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 		.klen	= 8 + 24 + 24,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16400,8 +16473,9 @@
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 		.klen	= 8 + 32 + 24,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16466,8 +16540,9 @@
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 		.klen	= 8 + 48 + 24,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-	.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -16536,8 +16611,9 @@
 			  "\xEA\xC2\x84\xE8\x14\x95\xDB\xE8",
 		.klen	= 8 + 64 + 24,
 		.iv	= "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
-		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01",
-		.alen   = 8,
+		.assoc  = "\x00\x00\x43\x21\x00\x00\x00\x01"
+			  "\x7D\x33\x88\x93\x0F\x93\xB2\x42",
+		.alen   = 16,
 		.input	= "\x6f\x54\x20\x6f\x61\x4d\x79\x6e"
 			  "\x53\x20\x63\x65\x65\x72\x73\x74"
 			  "\x54\x20\x6f\x6f\x4d\x20\x6e\x61"
@@ -20129,149 +20205,150 @@
 };
 
 static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
-        { /* Generated using Crypto++ */
+	{ /* Generated using Crypto++ */
 		.key    = zeroed_string,
 		.klen	= 20,
-                .iv     = zeroed_string,
-                .input  = zeroed_string,
-                .ilen   = 16,
-                .assoc  = zeroed_string,
-                .alen   = 8,
+		.iv	= zeroed_string,
+		.input  = zeroed_string,
+		.ilen   = 16,
+		.assoc  = zeroed_string,
+		.alen   = 16,
 		.result	= "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
-                          "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
-                          "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
-                          "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+			  "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+			  "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+			  "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
 		.rlen	= 32,
-        },{
+	},{
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
-                .input  = zeroed_string,
-                .ilen   = 16,
-                .assoc  = zeroed_string,
-                .alen   = 8,
+		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input  = zeroed_string,
+		.ilen   = 16,
+		.assoc  = "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen   = 16,
 		.result	= "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
-                          "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
-                          "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
-                          "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+			  "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+			  "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+			  "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
 		.rlen	= 32,
 
-        }, {
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
-                .iv     = zeroed_string,
-                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .ilen   = 16,
-                .assoc  = zeroed_string,
-                .alen   = 8,
+		.iv     = zeroed_string,
+		.input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen   = 16,
+		.assoc  = zeroed_string,
+		.alen   = 16,
 		.result	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
-                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
-                          "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
-                          "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+			  "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+			  "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+			  "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
 		.rlen	= 32,
-        }, {
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
-                .iv     = zeroed_string,
-                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .ilen   = 16,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
+		.iv     = zeroed_string,
+		.input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen   = 16,
+		.assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen   = 16,
 		.result	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
-                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
-                          "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
-                          "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+			  "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+			  "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+			  "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
 		.rlen	= 32,
-        }, {
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
-                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .ilen   = 16,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
+		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen   = 16,
+		.assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen   = 16,
 		.result	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
-                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
-                          "\x64\x50\xF9\x32\x13\xFB\x74\x61"
-                          "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+			  "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+			  "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+			  "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
 		.rlen	= 32,
-        }, {
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
-                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .ilen   = 64,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
+		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen   = 64,
+		.assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen   = 16,
 		.result	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
-                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
-                          "\x98\x14\xA1\x42\x37\x80\xFD\x90"
-                          "\x68\x12\x01\xA8\x91\x89\xB9\x83"
-                          "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
-                          "\x94\x5F\x18\x12\xBA\x27\x09\x39"
-                          "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
-                          "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
-                          "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
-                          "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+			  "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+			  "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+			  "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+			  "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+			  "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+			  "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+			  "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+			  "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+			  "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
 		.rlen	= 80,
-        }, {
+	}, {
 		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
-                .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef"
-                          "\x00\x00\x00\x00",
-                .input  = "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff",
-                .ilen   = 192,
-                .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
-                          "\xaa\xaa\xaa\xaa",
-                .alen   = 12,
+		.iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef",
+		.input  = "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff",
+		.ilen   = 192,
+		.assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+			  "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
+			  "\x89\xab\xcd\xef",
+		.alen   = 20,
 		.result	= "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
 			  "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
 			  "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
@@ -20316,8 +20393,9 @@
 			  "\x00\x21\x00\x01\x01\x02\x02\x01",
 		.ilen	= 72,
 		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
-			  "\x00\x00\x00\x00",
-		.alen	= 12,
+			  "\x00\x00\x00\x00\x49\x56\xED\x7E"
+			  "\x3B\x24\x4C\xFE",
+		.alen	= 20,
 		.result	= "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07"
 			  "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76"
 			  "\x8D\x1B\x98\x73\x66\x96\xA6\xFD"
@@ -20345,8 +20423,9 @@
 			  "\x65\x72\x63\x69\x74\x79\x02\x64"
 			  "\x6B\x00\x00\x01\x00\x01\x00\x01",
 		.ilen	= 64,
-		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A",
-		.alen	= 8,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
 		.result	= "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1"
 			  "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04"
 			  "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F"
@@ -20374,8 +20453,9 @@
 			  "\x02\x04\x05\xB4\x01\x01\x04\x02"
 			  "\x01\x02\x02\x01",
 		.ilen	= 52,
-		.assoc	= "\x4A\x2C\xBF\xE3\x00\x00\x00\x02",
-		.alen	= 8,
+		.assoc	= "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
+			  "\x01\x02\x03\x04\x05\x06\x07\x08",
+		.alen	= 16,
 		.result	= "\xFF\x42\x5C\x9B\x72\x45\x99\xDF"
 			  "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D"
 			  "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF"
@@ -20401,8 +20481,9 @@
 			  "\x75\x76\x77\x61\x62\x63\x64\x65"
 			  "\x66\x67\x68\x69\x01\x02\x02\x01",
 		.ilen	= 64,
-		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x01",
-		.alen	= 8,
+		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen	= 16,
 		.result	= "\x46\x88\xDA\xF2\xF9\x73\xA3\x92"
 			  "\x73\x29\x09\xC3\x31\xD5\x6D\x60"
 			  "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F"
@@ -20430,8 +20511,9 @@
 			  "\x66\x67\x68\x69\x01\x02\x02\x01",
 		.ilen	= 64,
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.result	= "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0"
 			  "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0"
 			  "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0"
@@ -20455,8 +20537,9 @@
 			  "\x01\x02\x02\x01",
 		.ilen	= 28,
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.result	= "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0"
 			  "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E"
 			  "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13"
@@ -20477,8 +20560,9 @@
 			  "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
 			  "\x50\x10\x16\xD0\x75\x68\x00\x01",
 		.ilen	= 40,
-		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A",
-		.alen	= 8,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
 		.result	= "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4"
 			  "\x0E\x59\x8B\x81\x22\xDE\x02\x42"
 			  "\x09\x38\xB3\xAB\x33\xF8\x28\xE6"
@@ -20505,8 +20589,9 @@
 			  "\x23\x01\x01\x01",
 		.ilen	= 76,
 		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
-			  "\x00\x00\x00\x01",
-		.alen	= 12,
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
 		.result	= "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A"
 			  "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14"
 			  "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1"
@@ -20535,8 +20620,9 @@
 			  "\x50\x10\x1F\x64\x6D\x54\x00\x01",
 		.ilen	= 40,
 		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
-			  "\xDD\x0D\xB9\x9B",
-		.alen	= 12,
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
 		.result	= "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B"
 			  "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D"
 			  "\x1F\x27\x8F\xDE\x98\xEF\x67\x54"
@@ -20563,8 +20649,9 @@
 			  "\x15\x01\x01\x01",
 		.ilen	= 76,
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.result	= "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0"
 			  "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8"
 			  "\x3D\x77\x84\xB6\x07\x32\x3D\x22"
@@ -20597,8 +20684,9 @@
 			  "\x72\x72\x6F\x77\x01\x02\x02\x01",
 		.ilen	= 72,
 		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
-			  "\xDD\x0D\xB9\x9B",
-		.alen	= 12,
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
 		.result	= "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E"
 			  "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E"
 			  "\x7B\x43\xF8\x26\xFB\x56\x83\x12"
@@ -20619,8 +20707,9 @@
 		.iv	= "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
 		.input	= "\x01\x02\x02\x01",
 		.ilen	= 4,
-		.assoc	= "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF",
-		.alen	= 8,
+		.assoc	= "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
+			  "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+		.alen	= 16,
 		.result	= "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F"
 			  "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45"
 			  "\x04\xBE\xF2\x70",
@@ -20636,8 +20725,9 @@
 			  "\x62\x65\x00\x01",
 		.ilen	= 20,
 		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
-			  "\x00\x00\x00\x01",
-		.alen	= 12,
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
 		.result	= "\x29\xC9\xFC\x69\xA1\x97\xD0\x38"
 			  "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05"
 			  "\x43\x33\x21\x64\x41\x25\x03\x52"
@@ -20661,8 +20751,9 @@
 			  "\x01\x02\x02\x01",
 		.ilen	= 52,
 		.assoc	= "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
-			  "\xFF\xFF\xFF\xFF",
-		.alen	= 12,
+			  "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
+			  "\x67\x65\x74\x6D",
+		.alen	= 20,
 		.result	= "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC"
 			  "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D"
 			  "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6"
@@ -20688,8 +20779,9 @@
 			  "\x01\x02\x02\x01",
 		.ilen	= 52,
 		.assoc	= "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.result	= "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0"
 			  "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD"
 			  "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21"
@@ -20712,8 +20804,9 @@
 			  "\x71\x72\x73\x74\x01\x02\x02\x01",
 		.ilen	= 32,
 		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
-			  "\x00\x00\x00\x07",
-		.alen	= 12,
+			  "\x00\x00\x00\x07\x48\x55\xEC\x7D"
+			  "\x3A\x23\x4B\xFD",
+		.alen	= 20,
 		.result	= "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C"
 			  "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF"
 			  "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6"
@@ -20725,122 +20818,122 @@
 };
 
 static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
-        { /* Generated using Crypto++ */
+	{ /* Generated using Crypto++ */
 		.key    = zeroed_string,
 		.klen	= 20,
-                .iv     = zeroed_string,
+		.iv     = zeroed_string,
 		.input	= "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
-                          "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
-                          "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
-                          "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+			  "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+			  "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+			  "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
 		.ilen	= 32,
-                .assoc  = zeroed_string,
-                .alen   = 8,
-                .result = zeroed_string,
-                .rlen   = 16,
+		.assoc  = zeroed_string,
+		.alen   = 16,
+		.result = zeroed_string,
+		.rlen   = 16,
 
-        },{
+	},{
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
+		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
 		.input	= "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
-                          "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
-                          "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
-                          "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+			  "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+			  "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+			  "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
 		.ilen	= 32,
-                .assoc  = zeroed_string,
-                .alen   = 8,
-                .result = zeroed_string,
-                .rlen   = 16,
-        }, {
+		.assoc  = "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen   = 16,
+		.result = zeroed_string,
+		.rlen   = 16,
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
-                .iv     = zeroed_string,
+		.iv     = zeroed_string,
 		.input	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
-                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
-                          "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
-                          "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+			  "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+			  "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+			  "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
 		.ilen	= 32,
-                .assoc  = zeroed_string,
-                .alen   = 8,
-                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .rlen   = 16,
-        }, {
+		.assoc  = zeroed_string,
+		.alen   = 16,
+		.result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen   = 16,
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
-                .iv     = zeroed_string,
+		.iv     = zeroed_string,
 		.input	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
-                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
-                          "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
-                          "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+			  "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+			  "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+			  "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
 		.ilen	= 32,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
-                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .rlen   = 16,
+		.assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen   = 16,
+		.result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen   = 16,
 
-        }, {
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
+		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
 		.input	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
-                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
-                          "\x64\x50\xF9\x32\x13\xFB\x74\x61"
-                          "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+			  "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+			  "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+			  "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
 		.ilen	= 32,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
-                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .rlen   = 16,
-        }, {
+		.assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen   = 16,
+		.result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen   = 16,
+	}, {
 		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
 			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
-                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
-                          "\x00\x00\x00\x00",
+		.iv     = "\x00\x00\x00\x00\x00\x00\x00\x01",
 		.input	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
-                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
-                          "\x98\x14\xA1\x42\x37\x80\xFD\x90"
-                          "\x68\x12\x01\xA8\x91\x89\xB9\x83"
-                          "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
-                          "\x94\x5F\x18\x12\xBA\x27\x09\x39"
-                          "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
-                          "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
-                          "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
-                          "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+			  "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+			  "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+			  "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+			  "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+			  "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+			  "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+			  "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+			  "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+			  "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
 		.ilen	= 80,
-                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .alen   = 8,
-                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01"
-                          "\x01\x01\x01\x01\x01\x01\x01\x01",
-                .rlen   = 64,
-        }, {
+		.assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen   = 16,
+		.result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen   = 64,
+	}, {
 		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
 			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
-                          "\x00\x00\x00\x00",
+			  "\x00\x00\x00\x00",
 		.klen	= 20,
-                .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef"
-                          "\x00\x00\x00\x00",
+		.iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef",
 		.input	= "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
 			  "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
 			  "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
@@ -20868,34 +20961,35 @@
 			  "\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
 			  "\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
 		.ilen	= 208,
-                .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
-                          "\xaa\xaa\xaa\xaa",
-                .alen   = 12,
-                .result = "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff"
-                          "\xff\xff\xff\xff\xff\xff\xff\xff",
-                .rlen   = 192,
+		.assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+			  "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
+			  "\x89\xab\xcd\xef",
+		.alen   = 20,
+		.result = "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff",
+		.rlen   = 192,
 	}, {
 		.key	= "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
 			  "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
@@ -20913,8 +21007,9 @@
 			  "\x00\x21\x00\x01\x01\x02\x02\x01",
 		.rlen	= 72,
 		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
-			  "\x00\x00\x00\x00",
-		.alen	= 12,
+			  "\x00\x00\x00\x00\x49\x56\xED\x7E"
+			  "\x3B\x24\x4C\xFE",
+		.alen	= 20,
 		.input	= "\xFE\xCF\x53\x7E\x72\x9D\x5B\x07"
 			  "\xDC\x30\xDF\x52\x8D\xD2\x2B\x76"
 			  "\x8D\x1B\x98\x73\x66\x96\xA6\xFD"
@@ -20942,8 +21037,9 @@
 			  "\x65\x72\x63\x69\x74\x79\x02\x64"
 			  "\x6B\x00\x00\x01\x00\x01\x00\x01",
 		.rlen	= 64,
-		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A",
-		.alen	= 8,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
 		.input	= "\xDE\xB2\x2C\xD9\xB0\x7C\x72\xC1"
 			  "\x6E\x3A\x65\xBE\xEB\x8D\xF3\x04"
 			  "\xA5\xA5\x89\x7D\x33\xAE\x53\x0F"
@@ -20971,8 +21067,9 @@
 			  "\x02\x04\x05\xB4\x01\x01\x04\x02"
 			  "\x01\x02\x02\x01",
 		.rlen	= 52,
-		.assoc	= "\x4A\x2C\xBF\xE3\x00\x00\x00\x02",
-		.alen	= 8,
+		.assoc	= "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
+			  "\x01\x02\x03\x04\x05\x06\x07\x08",
+		.alen	= 16,
 		.input	= "\xFF\x42\x5C\x9B\x72\x45\x99\xDF"
 			  "\x7A\x3B\xCD\x51\x01\x94\xE0\x0D"
 			  "\x6A\x78\x10\x7F\x1B\x0B\x1C\xBF"
@@ -20998,8 +21095,9 @@
 			  "\x75\x76\x77\x61\x62\x63\x64\x65"
 			  "\x66\x67\x68\x69\x01\x02\x02\x01",
 		.rlen	= 64,
-		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x01",
-		.alen	= 8,
+		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen	= 16,
 		.input	= "\x46\x88\xDA\xF2\xF9\x73\xA3\x92"
 			  "\x73\x29\x09\xC3\x31\xD5\x6D\x60"
 			  "\xF6\x94\xAB\xAA\x41\x4B\x5E\x7F"
@@ -21027,8 +21125,9 @@
 			  "\x66\x67\x68\x69\x01\x02\x02\x01",
 		.rlen	= 64,
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.input	= "\xFB\xA2\xCA\xA4\x85\x3C\xF9\xF0"
 			  "\xF2\x2C\xB1\x0D\x86\xDD\x83\xB0"
 			  "\xFE\xC7\x56\x91\xCF\x1A\x04\xB0"
@@ -21052,8 +21151,9 @@
 			  "\x01\x02\x02\x01",
 		.rlen	= 28,
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.input	= "\xFB\xA2\xCA\x84\x5E\x5D\xF9\xF0"
 			  "\xF2\x2C\x3E\x6E\x86\xDD\x83\x1E"
 			  "\x1F\xC6\x57\x92\xCD\x1A\xF9\x13"
@@ -21074,8 +21174,9 @@
 			  "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
 			  "\x50\x10\x16\xD0\x75\x68\x00\x01",
 		.rlen	= 40,
-		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A",
-		.alen	= 8,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
 		.input	= "\xA5\xB1\xF8\x06\x60\x29\xAE\xA4"
 			  "\x0E\x59\x8B\x81\x22\xDE\x02\x42"
 			  "\x09\x38\xB3\xAB\x33\xF8\x28\xE6"
@@ -21102,8 +21203,9 @@
 			  "\x23\x01\x01\x01",
 		.rlen	= 76,
 		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
-			  "\x00\x00\x00\x01",
-		.alen	= 12,
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
 		.input	= "\x18\xA6\xFD\x42\xF7\x2C\xBF\x4A"
 			  "\xB2\xA2\xEA\x90\x1F\x73\xD8\x14"
 			  "\xE3\xE7\xF2\x43\xD9\x54\x12\xE1"
@@ -21132,8 +21234,9 @@
 			  "\x50\x10\x1F\x64\x6D\x54\x00\x01",
 		.rlen	= 40,
 		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
-			  "\xDD\x0D\xB9\x9B",
-		.alen	= 12,
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
 		.input	= "\xF2\xD6\x9E\xCD\xBD\x5A\x0D\x5B"
 			  "\x8D\x5E\xF3\x8B\xAD\x4D\xA5\x8D"
 			  "\x1F\x27\x8F\xDE\x98\xEF\x67\x54"
@@ -21160,8 +21263,9 @@
 			  "\x15\x01\x01\x01",
 		.rlen	= 76,
 		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.input	= "\xFB\xA2\xCA\xD1\x2F\xC1\xF9\xF0"
 			  "\x0D\x3C\xEB\xF3\x05\x41\x0D\xB8"
 			  "\x3D\x77\x84\xB6\x07\x32\x3D\x22"
@@ -21194,8 +21298,9 @@
 			  "\x72\x72\x6F\x77\x01\x02\x02\x01",
 		.rlen	= 72,
 		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
-			  "\xDD\x0D\xB9\x9B",
-		.alen	= 12,
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
 		.input	= "\xD4\xB7\xED\x86\xA1\x77\x7F\x2E"
 			  "\xA1\x3D\x69\x73\xD3\x24\xC6\x9E"
 			  "\x7B\x43\xF8\x26\xFB\x56\x83\x12"
@@ -21216,8 +21321,9 @@
 		.iv	= "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
 		.result	= "\x01\x02\x02\x01",
 		.rlen	= 4,
-		.assoc	= "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF",
-		.alen	= 8,
+		.assoc	= "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
+			  "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+		.alen	= 16,
 		.input	= "\x43\x7F\x86\x6B\xCB\x3F\x69\x9F"
 			  "\xE9\xB0\x82\x2B\xAC\x96\x1C\x45"
 			  "\x04\xBE\xF2\x70",
@@ -21233,8 +21339,9 @@
 			  "\x62\x65\x00\x01",
 		.rlen	= 20,
 		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
-			  "\x00\x00\x00\x01",
-		.alen	= 12,
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
 		.input	= "\x29\xC9\xFC\x69\xA1\x97\xD0\x38"
 			  "\xCC\xDD\x14\xE2\xDD\xFC\xAA\x05"
 			  "\x43\x33\x21\x64\x41\x25\x03\x52"
@@ -21258,8 +21365,9 @@
 			  "\x01\x02\x02\x01",
 		.rlen	= 52,
 		.assoc	= "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
-			  "\xFF\xFF\xFF\xFF",
-		.alen	= 12,
+			  "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
+			  "\x67\x65\x74\x6D",
+		.alen	= 20,
 		.input	= "\xF9\x7A\xB2\xAA\x35\x6D\x8E\xDC"
 			  "\xE1\x76\x44\xAC\x8C\x78\xE2\x5D"
 			  "\xD2\x4D\xED\xBB\x29\xEB\xF1\xB6"
@@ -21285,8 +21393,9 @@
 			  "\x01\x02\x02\x01",
 		.rlen	= 52,
 		.assoc	= "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
-			  "\x10\x10\x10\x10",
-		.alen	= 12,
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
 		.input	= "\xFB\xA2\xCA\xA8\xC6\xC5\xF9\xF0"
 			  "\xF2\x2C\xA5\x4A\x06\x12\x10\xAD"
 			  "\x3F\x6E\x57\x91\xCF\x1A\xCA\x21"
@@ -21309,8 +21418,9 @@
 			  "\x71\x72\x73\x74\x01\x02\x02\x01",
 		.rlen	= 32,
 		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
-			  "\x00\x00\x00\x07",
-		.alen	= 12,
+			  "\x00\x00\x00\x07\x48\x55\xEC\x7D"
+			  "\x3A\x23\x4B\xFD",
+		.alen	= 20,
 		.input	= "\x74\x75\x2E\x8A\xEB\x5D\x87\x3C"
 			  "\xD7\xC0\xF4\xAC\xC3\x6C\x4B\xFF"
 			  "\x84\xB7\xD7\xB9\x8F\x0C\xA8\xB6"
@@ -21538,10 +21648,7 @@
 			  "\xba",
 		.rlen	= 33,
 	}, {
-		/*
-		 * This is the same vector as aes_ccm_rfc4309_enc_tv_template[0]
-		 * below but rewritten to use the ccm algorithm directly.
-		 */
+		/* This is taken from FIPS CAVS. */
 		.key	= "\x83\xac\x54\x66\xc2\xeb\xe5\x05"
 			  "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e",
 		.klen	= 16,
@@ -21559,6 +21666,142 @@
 			  "\xda\x24\xea\xd9\xa1\x39\x98\xfd"
 			  "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8",
 		.rlen	= 48,
+	}, {
+		.key	= "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0"
+			  "\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3",
+		.klen	= 16,
+		.iv	= "\x03\x4f\xa3\x19\xd3\x01\x5a\xd8"
+			  "\x30\x60\x15\x56\x00\x00\x00\x00",
+		.assoc	= "\xda\xe6\x28\x9c\x45\x2d\xfd\x63"
+			  "\x5e\xda\x4c\xb6\xe6\xfc\xf9\xb7"
+			  "\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1"
+			  "\x0a\x63\x09\x78\xbc\x2c\x55\xde",
+		.alen	= 32,
+		.input	= "\x87\xa3\x36\xfd\x96\xb3\x93\x78"
+			  "\xa9\x28\x63\xba\x12\xa3\x14\x85"
+			  "\x57\x1e\x06\xc9\x7b\x21\xef\x76"
+			  "\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e",
+		.ilen	= 32,
+		.result	= "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19"
+			  "\xfc\x70\xc4\x6d\x8e\xb7\x99\xab"
+			  "\xc5\x4b\xa2\xac\xd3\xf3\x48\xff"
+			  "\x3b\xb5\xce\x53\xef\xde\xbb\x02"
+			  "\xa9\x86\x15\x6c\x13\xfe\xda\x0a"
+			  "\x22\xb8\x29\x3d\xd8\x39\x9a\x23",
+		.rlen	= 48,
+	}, {
+		.key	= "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1"
+			  "\xa3\xf0\xff\xdd\x4e\x4b\x12\x75"
+			  "\x53\x14\x73\x66\x8d\x88\xf6\x80",
+		.klen	= 24,
+		.iv	= "\x03\xa0\x20\x35\x26\xf2\x21\x8d"
+			  "\x50\x20\xda\xe2\x00\x00\x00\x00",
+		.assoc	= "\x5b\x9e\x13\x67\x02\x5e\xef\xc1"
+			  "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47"
+			  "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d"
+			  "\xd8\x9e\x2b\x56\x10\x23\x56\xe7",
+		.alen	= 32,
+		.result	= "\x36\xea\x7a\x70\x08\xdc\x6a\xbc"
+			  "\xad\x0c\x7a\x63\xf6\x61\xfd\x9b",
+		.rlen	= 16,
+	}, {
+		.key	= "\x56\xdf\x5c\x8f\x26\x3f\x0e\x42"
+			  "\xef\x7a\xd3\xce\xfc\x84\x60\x62"
+			  "\xca\xb4\x40\xaf\x5f\xc9\xc9\x01",
+		.klen	= 24,
+		.iv	= "\x03\xd6\x3c\x8c\x86\x84\xb6\xcd"
+			  "\xef\x09\x2e\x94\x00\x00\x00\x00",
+		.assoc	= "\x02\x65\x78\x3c\xe9\x21\x30\x91"
+			  "\xb1\xb9\xda\x76\x9a\x78\x6d\x95"
+			  "\xf2\x88\x32\xa3\xf2\x50\xcb\x4c"
+			  "\xe3\x00\x73\x69\x84\x69\x87\x79",
+		.alen	= 32,
+		.input	= "\x9f\xd2\x02\x4b\x52\x49\x31\x3c"
+			  "\x43\x69\x3a\x2d\x8e\x70\xad\x7e"
+			  "\xe0\xe5\x46\x09\x80\x89\x13\xb2"
+			  "\x8c\x8b\xd9\x3f\x86\xfb\xb5\x6b",
+		.ilen	= 32,
+		.result	= "\x39\xdf\x7c\x3c\x5a\x29\xb9\x62"
+			  "\x5d\x51\xc2\x16\xd8\xbd\x06\x9f"
+			  "\x9b\x6a\x09\x70\xc1\x51\x83\xc2"
+			  "\x66\x88\x1d\x4f\x9a\xda\xe0\x1e"
+			  "\xc7\x79\x11\x58\xe5\x6b\x20\x40"
+			  "\x7a\xea\x46\x42\x8b\xe4\x6f\xe1",
+		.rlen	= 48,
+	}, {
+		.key	= "\xe0\x8d\x99\x71\x60\xd7\x97\x1a"
+			  "\xbd\x01\x99\xd5\x8a\xdf\x71\x3a"
+			  "\xd3\xdf\x24\x4b\x5e\x3d\x4b\x4e"
+			  "\x30\x7a\xb9\xd8\x53\x0a\x5e\x2b",
+		.klen	= 32,
+		.iv	= "\x03\x1e\x29\x91\xad\x8e\xc1\x53"
+			  "\x0a\xcf\x2d\xbe\x00\x00\x00\x00",
+		.assoc	= "\x19\xb6\x1f\x57\xc4\xf3\xf0\x8b"
+			  "\x78\x2b\x94\x02\x29\x0f\x42\x27"
+			  "\x6b\x75\xcb\x98\x34\x08\x7e\x79"
+			  "\xe4\x3e\x49\x0d\x84\x8b\x22\x87",
+		.alen	= 32,
+		.input	= "\xe1\xd9\xd8\x13\xeb\x3a\x75\x3f"
+			  "\x9d\xbd\x5f\x66\xbe\xdc\xbb\x66"
+			  "\xbf\x17\x99\x62\x4a\x39\x27\x1f"
+			  "\x1d\xdc\x24\xae\x19\x2f\x98\x4c",
+		.ilen	= 32,
+		.result	= "\x19\xb8\x61\x33\x45\x2b\x43\x96"
+			  "\x6f\x51\xd0\x20\x30\x7d\x9b\xc6"
+			  "\x26\x3d\xf8\xc9\x65\x16\xa8\x9f"
+			  "\xf0\x62\x17\x34\xf2\x1e\x8d\x75"
+			  "\x4e\x13\xcc\xc0\xc3\x2a\x54\x2d",
+		.rlen	= 40,
+	}, {
+		.key	= "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c"
+			  "\x45\x41\xb8\xbd\x5c\xa7\xc2\x32"
+			  "\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c"
+			  "\x09\x75\x9a\x9b\x3c\x9b\x27\x39",
+		.klen	= 32,
+		.iv	= "\x03\xf9\xd9\x4e\x63\xb5\x3d\x9d"
+			  "\x43\xf6\x1e\x50",
+		.assoc	= "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b"
+			  "\x13\x02\x01\x0c\x83\x4c\x96\x35"
+			  "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94"
+			  "\xb0\x39\x36\xe6\x8f\x57\xe0\x13",
+		.alen	= 32,
+		.input	= "\x3b\x6c\x29\x36\xb6\xef\x07\xa6"
+			  "\x83\x72\x07\x4f\xcf\xfa\x66\x89"
+			  "\x5f\xca\xb1\xba\xd5\x8f\x2c\x27"
+			  "\x30\xdb\x75\x09\x93\xd4\x65\xe4",
+		.ilen	= 32,
+		.result	= "\xb0\x88\x5a\x33\xaa\xe5\xc7\x1d"
+			  "\x85\x23\xc7\xc6\x2f\xf4\x1e\x3d"
+			  "\xcc\x63\x44\x25\x07\x78\x4f\x9e"
+			  "\x96\xb8\x88\xeb\xbc\x48\x1f\x06"
+			  "\x39\xaf\x39\xac\xd8\x4a\x80\x39"
+			  "\x7b\x72\x8a\xf7",
+		.rlen	= 44,
+	}, {
+		.key	= "\xab\xd0\xe9\x33\x07\x26\xe5\x83"
+			  "\x8c\x76\x95\xd4\xb6\xdc\xf3\x46"
+			  "\xf9\x8f\xad\xe3\x02\x13\x83\x77"
+			  "\x3f\xb0\xf1\xa1\xa1\x22\x0f\x2b",
+		.klen	= 32,
+		.iv	= "\x03\x24\xa7\x8b\x07\xcb\xcc\x0e"
+			  "\xe6\x33\xbf\xf5\x00\x00\x00\x00",
+		.assoc	= "\xd4\xdb\x30\x1d\x03\xfe\xfd\x5f"
+			  "\x87\xd4\x8c\xb6\xb6\xf1\x7a\x5d"
+			  "\xab\x90\x65\x8d\x8e\xca\x4d\x4f"
+			  "\x16\x0c\x40\x90\x4b\xc7\x36\x73",
+		.alen	= 32,
+		.input	= "\xf5\xc6\x7d\x48\xc1\xb7\xe6\x92"
+			  "\x97\x5a\xca\xc4\xa9\x6d\xf9\x3d"
+			  "\x6c\xde\xbc\xf1\x90\xea\x6a\xb2"
+			  "\x35\x86\x36\xaf\x5c\xfe\x4b\x3a",
+		.ilen	= 32,
+		.result	= "\x83\x6f\x40\x87\x72\xcf\xc1\x13"
+			  "\xef\xbb\x80\x21\x04\x6c\x58\x09"
+			  "\x07\x1b\xfc\xdf\xc0\x3f\x5b\xc7"
+			  "\xe0\x79\xa8\x6e\x71\x7c\x3f\xcf"
+			  "\x5c\xda\xb2\x33\xe5\x13\xe2\x0d"
+			  "\x74\xd1\xef\xb5\x0f\x3a\xb5\xf8",
+		.rlen	= 48,
 	}
 };
 
@@ -21688,186 +21931,13 @@
 			  "\x8e\x5e\x67\x01\xc9\x17\x87\x65"
 			  "\x98\x09\xd6\x7d\xbe\xdd\x18",
 		.rlen	= 23,
-	},
-};
-
-/*
- * rfc4309 refers to section 8 of rfc3610 for test vectors, but they all
- * use a 13-byte nonce, we only support an 11-byte nonce. Similarly, all of
- * Special Publication 800-38C's test vectors also use nonce lengths our
- * implementation doesn't support. The following are taken from fips cavs
- * fax files on hand at Red Hat.
- *
- * nb: actual key lengths are (klen - 3), the last 3 bytes are actually
- * part of the nonce which combine w/the iv, but need to be input this way.
- */
-static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
-	{
-		.key	= "\x83\xac\x54\x66\xc2\xeb\xe5\x05"
-			  "\x2e\x01\xd1\xfc\x5d\x82\x66\x2e"
-			  "\x96\xac\x59",
-		.klen	= 19,
-		.iv	= "\x30\x07\xa1\xe2\xa2\xc7\x55\x24",
-		.alen	= 0,
-		.input	= "\x19\xc8\x81\xf6\xe9\x86\xff\x93"
-			  "\x0b\x78\x67\xe5\xbb\xb7\xfc\x6e"
-			  "\x83\x77\xb3\xa6\x0c\x8c\x9f\x9c"
-			  "\x35\x2e\xad\xe0\x62\xf9\x91\xa1",
-		.ilen	= 32,
-		.result	= "\xab\x6f\xe1\x69\x1d\x19\x99\xa8"
-			  "\x92\xa0\xc4\x6f\x7e\xe2\x8b\xb1"
-			  "\x70\xbb\x8c\xa6\x4c\x6e\x97\x8a"
-			  "\x57\x2b\xbe\x5d\x98\xa6\xb1\x32"
-			  "\xda\x24\xea\xd9\xa1\x39\x98\xfd"
-			  "\xa4\xbe\xd9\xf2\x1a\x6d\x22\xa8",
-		.rlen	= 48,
 	}, {
-		.key	= "\x1e\x2c\x7e\x01\x41\x9a\xef\xc0"
-			  "\x0d\x58\x96\x6e\x5c\xa2\x4b\xd3"
-			  "\x4f\xa3\x19",
-		.klen	= 19,
-		.iv	= "\xd3\x01\x5a\xd8\x30\x60\x15\x56",
-		.assoc	= "\xda\xe6\x28\x9c\x45\x2d\xfd\x63"
-			  "\x5e\xda\x4c\xb6\xe6\xfc\xf9\xb7"
-			  "\x0c\x56\xcb\xe4\xe0\x05\x7a\xe1"
-			  "\x0a\x63\x09\x78\xbc\x2c\x55\xde",
-		.alen	= 32,
-		.input	= "\x87\xa3\x36\xfd\x96\xb3\x93\x78"
-			  "\xa9\x28\x63\xba\x12\xa3\x14\x85"
-			  "\x57\x1e\x06\xc9\x7b\x21\xef\x76"
-			  "\x7f\x38\x7e\x8e\x29\xa4\x3e\x7e",
-		.ilen	= 32,
-		.result	= "\x8a\x1e\x11\xf0\x02\x6b\xe2\x19"
-			  "\xfc\x70\xc4\x6d\x8e\xb7\x99\xab"
-			  "\xc5\x4b\xa2\xac\xd3\xf3\x48\xff"
-			  "\x3b\xb5\xce\x53\xef\xde\xbb\x02"
-			  "\xa9\x86\x15\x6c\x13\xfe\xda\x0a"
-			  "\x22\xb8\x29\x3d\xd8\x39\x9a\x23",
-		.rlen	= 48,
-	}, {
-		.key	= "\xf4\x6b\xc2\x75\x62\xfe\xb4\xe1"
-			  "\xa3\xf0\xff\xdd\x4e\x4b\x12\x75"
-			  "\x53\x14\x73\x66\x8d\x88\xf6\x80"
-			  "\xa0\x20\x35",
-		.klen	= 27,
-		.iv	= "\x26\xf2\x21\x8d\x50\x20\xda\xe2",
-		.assoc	= "\x5b\x9e\x13\x67\x02\x5e\xef\xc1"
-			  "\x6c\xf9\xd7\x1e\x52\x8f\x7a\x47"
-			  "\xe9\xd4\xcf\x20\x14\x6e\xf0\x2d"
-			  "\xd8\x9e\x2b\x56\x10\x23\x56\xe7",
-		.alen	= 32,
-		.ilen	= 0,
-		.result	= "\x36\xea\x7a\x70\x08\xdc\x6a\xbc"
-			  "\xad\x0c\x7a\x63\xf6\x61\xfd\x9b",
-		.rlen	= 16,
-	}, {
-		.key	= "\x56\xdf\x5c\x8f\x26\x3f\x0e\x42"
-			  "\xef\x7a\xd3\xce\xfc\x84\x60\x62"
-			  "\xca\xb4\x40\xaf\x5f\xc9\xc9\x01"
-			  "\xd6\x3c\x8c",
-		.klen	= 27,
-		.iv	= "\x86\x84\xb6\xcd\xef\x09\x2e\x94",
-		.assoc	= "\x02\x65\x78\x3c\xe9\x21\x30\x91"
-			  "\xb1\xb9\xda\x76\x9a\x78\x6d\x95"
-			  "\xf2\x88\x32\xa3\xf2\x50\xcb\x4c"
-			  "\xe3\x00\x73\x69\x84\x69\x87\x79",
-		.alen	= 32,
-		.input	= "\x9f\xd2\x02\x4b\x52\x49\x31\x3c"
-			  "\x43\x69\x3a\x2d\x8e\x70\xad\x7e"
-			  "\xe0\xe5\x46\x09\x80\x89\x13\xb2"
-			  "\x8c\x8b\xd9\x3f\x86\xfb\xb5\x6b",
-		.ilen	= 32,
-		.result	= "\x39\xdf\x7c\x3c\x5a\x29\xb9\x62"
-			  "\x5d\x51\xc2\x16\xd8\xbd\x06\x9f"
-			  "\x9b\x6a\x09\x70\xc1\x51\x83\xc2"
-			  "\x66\x88\x1d\x4f\x9a\xda\xe0\x1e"
-			  "\xc7\x79\x11\x58\xe5\x6b\x20\x40"
-			  "\x7a\xea\x46\x42\x8b\xe4\x6f\xe1",
-		.rlen	= 48,
-	}, {
-		.key	= "\xe0\x8d\x99\x71\x60\xd7\x97\x1a"
-			  "\xbd\x01\x99\xd5\x8a\xdf\x71\x3a"
-			  "\xd3\xdf\x24\x4b\x5e\x3d\x4b\x4e"
-			  "\x30\x7a\xb9\xd8\x53\x0a\x5e\x2b"
-			  "\x1e\x29\x91",
-		.klen	= 35,
-		.iv	= "\xad\x8e\xc1\x53\x0a\xcf\x2d\xbe",
-		.assoc	= "\x19\xb6\x1f\x57\xc4\xf3\xf0\x8b"
-			  "\x78\x2b\x94\x02\x29\x0f\x42\x27"
-			  "\x6b\x75\xcb\x98\x34\x08\x7e\x79"
-			  "\xe4\x3e\x49\x0d\x84\x8b\x22\x87",
-		.alen	= 32,
-		.input	= "\xe1\xd9\xd8\x13\xeb\x3a\x75\x3f"
-			  "\x9d\xbd\x5f\x66\xbe\xdc\xbb\x66"
-			  "\xbf\x17\x99\x62\x4a\x39\x27\x1f"
-			  "\x1d\xdc\x24\xae\x19\x2f\x98\x4c",
-		.ilen	= 32,
-		.result	= "\x19\xb8\x61\x33\x45\x2b\x43\x96"
-			  "\x6f\x51\xd0\x20\x30\x7d\x9b\xc6"
-			  "\x26\x3d\xf8\xc9\x65\x16\xa8\x9f"
-			  "\xf0\x62\x17\x34\xf2\x1e\x8d\x75"
-			  "\x4e\x13\xcc\xc0\xc3\x2a\x54\x2d",
-		.rlen	= 40,
-	}, {
-		.key	= "\x7c\xc8\x18\x3b\x8d\x99\xe0\x7c"
-			  "\x45\x41\xb8\xbd\x5c\xa7\xc2\x32"
-			  "\x8a\xb8\x02\x59\xa4\xfe\xa9\x2c"
-			  "\x09\x75\x9a\x9b\x3c\x9b\x27\x39"
-			  "\xf9\xd9\x4e",
-		.klen	= 35,
-		.iv	= "\x63\xb5\x3d\x9d\x43\xf6\x1e\x50",
-		.assoc	= "\x57\xf5\x6b\x8b\x57\x5c\x3d\x3b"
-			  "\x13\x02\x01\x0c\x83\x4c\x96\x35"
-			  "\x8e\xd6\x39\xcf\x7d\x14\x9b\x94"
-			  "\xb0\x39\x36\xe6\x8f\x57\xe0\x13",
-		.alen	= 32,
-		.input	= "\x3b\x6c\x29\x36\xb6\xef\x07\xa6"
-			  "\x83\x72\x07\x4f\xcf\xfa\x66\x89"
-			  "\x5f\xca\xb1\xba\xd5\x8f\x2c\x27"
-			  "\x30\xdb\x75\x09\x93\xd4\x65\xe4",
-		.ilen	= 32,
-		.result	= "\xb0\x88\x5a\x33\xaa\xe5\xc7\x1d"
-			  "\x85\x23\xc7\xc6\x2f\xf4\x1e\x3d"
-			  "\xcc\x63\x44\x25\x07\x78\x4f\x9e"
-			  "\x96\xb8\x88\xeb\xbc\x48\x1f\x06"
-			  "\x39\xaf\x39\xac\xd8\x4a\x80\x39"
-			  "\x7b\x72\x8a\xf7",
-		.rlen	= 44,
-	}, {
-		.key	= "\xab\xd0\xe9\x33\x07\x26\xe5\x83"
-			  "\x8c\x76\x95\xd4\xb6\xdc\xf3\x46"
-			  "\xf9\x8f\xad\xe3\x02\x13\x83\x77"
-			  "\x3f\xb0\xf1\xa1\xa1\x22\x0f\x2b"
-			  "\x24\xa7\x8b",
-		.klen	= 35,
-		.iv	= "\x07\xcb\xcc\x0e\xe6\x33\xbf\xf5",
-		.assoc	= "\xd4\xdb\x30\x1d\x03\xfe\xfd\x5f"
-			  "\x87\xd4\x8c\xb6\xb6\xf1\x7a\x5d"
-			  "\xab\x90\x65\x8d\x8e\xca\x4d\x4f"
-			  "\x16\x0c\x40\x90\x4b\xc7\x36\x73",
-		.alen	= 32,
-		.input	= "\xf5\xc6\x7d\x48\xc1\xb7\xe6\x92"
-			  "\x97\x5a\xca\xc4\xa9\x6d\xf9\x3d"
-			  "\x6c\xde\xbc\xf1\x90\xea\x6a\xb2"
-			  "\x35\x86\x36\xaf\x5c\xfe\x4b\x3a",
-		.ilen	= 32,
-		.result	= "\x83\x6f\x40\x87\x72\xcf\xc1\x13"
-			  "\xef\xbb\x80\x21\x04\x6c\x58\x09"
-			  "\x07\x1b\xfc\xdf\xc0\x3f\x5b\xc7"
-			  "\xe0\x79\xa8\x6e\x71\x7c\x3f\xcf"
-			  "\x5c\xda\xb2\x33\xe5\x13\xe2\x0d"
-			  "\x74\xd1\xef\xb5\x0f\x3a\xb5\xf8",
-		.rlen	= 48,
-	},
-};
-
-static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[] = {
-	{
+		/* This is taken from FIPS CAVS. */
 		.key	= "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1"
-			  "\xff\x80\x2e\x48\x7d\x82\xf8\xb9"
-			  "\xc6\xfb\x7d",
-		.klen	= 19,
-		.iv	= "\x80\x0d\x13\xab\xd8\xa6\xb2\xd8",
+			  "\xff\x80\x2e\x48\x7d\x82\xf8\xb9",
+		.klen	= 16,
+		.iv	= "\x03\xc6\xfb\x7d\x80\x0d\x13\xab"
+			  "\xd8\xa6\xb2\xd8\x00\x00\x00\x00",
 		.alen	= 0,
 		.input	= "\xd5\xe8\x93\x9f\xc7\x89\x2e\x2b",
 		.ilen	= 8,
@@ -21876,10 +21946,10 @@
 		.novrfy	= 1,
 	}, {
 		.key	= "\xab\x2f\x8a\x74\xb7\x1c\xd2\xb1"
-			  "\xff\x80\x2e\x48\x7d\x82\xf8\xb9"
-			  "\xaf\x94\x87",
-		.klen	= 19,
-		.iv	= "\x78\x35\x82\x81\x7f\x88\x94\x68",
+			  "\xff\x80\x2e\x48\x7d\x82\xf8\xb9",
+		.klen	= 16,
+		.iv	= "\x03\xaf\x94\x87\x78\x35\x82\x81"
+			  "\x7f\x88\x94\x68\x00\x00\x00\x00",
 		.alen	= 0,
 		.input	= "\x41\x3c\xb8\x87\x73\xcb\xf3\xf3",
 		.ilen	= 8,
@@ -21887,10 +21957,10 @@
 		.rlen	= 0,
 	}, {
 		.key	= "\x61\x0e\x8c\xae\xe3\x23\xb6\x38"
-			  "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8"
-			  "\xc6\xfb\x7d",
-		.klen	= 19,
-		.iv	= "\x80\x0d\x13\xab\xd8\xa6\xb2\xd8",
+			  "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8",
+		.klen	= 16,
+		.iv	= "\x03\xc6\xfb\x7d\x80\x0d\x13\xab"
+			  "\xd8\xa6\xb2\xd8\x00\x00\x00\x00",
 		.assoc	= "\xf3\x94\x87\x78\x35\x82\x81\x7f"
 			  "\x88\x94\x68\xb1\x78\x6b\x2b\xd6"
 			  "\x04\x1f\x4e\xed\x78\xd5\x33\x66"
@@ -21911,10 +21981,10 @@
 		.novrfy	= 1,
 	}, {
 		.key	= "\x61\x0e\x8c\xae\xe3\x23\xb6\x38"
-			  "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8"
-			  "\x05\xe0\xc9",
-		.klen	= 19,
-		.iv	= "\x0f\xed\x34\xea\x97\xd4\x3b\xdf",
+			  "\x76\x1c\xf6\x3a\x67\xa3\x9c\xd8",
+		.klen	= 16,
+		.iv	= "\x03\x05\xe0\xc9\x0f\xed\x34\xea"
+			  "\x97\xd4\x3b\xdf\x00\x00\x00\x00",
 		.assoc	= "\x49\x5c\x50\x1f\x1d\x94\xcc\x81"
 			  "\xba\xb7\xb6\x03\xaf\xa5\xc1\xa1"
 			  "\xd8\x5c\x42\x68\xe0\x6c\xda\x89"
@@ -21935,10 +22005,10 @@
 	}, {
 		.key	= "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
 			  "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3"
-			  "\xa4\x48\x93\x39\x26\x71\x4a\xc6"
-			  "\xee\x49\x83",
-		.klen	= 27,
-		.iv	= "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
+			  "\xa4\x48\x93\x39\x26\x71\x4a\xc6",
+		.klen	= 24,
+		.iv	= "\x03\xee\x49\x83\xe9\xa9\xff\xe9"
+			  "\x57\xba\xfd\x9e\x00\x00\x00\x00",
 		.assoc	= "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1"
 			  "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
 			  "\xa4\xf0\x13\x05\xd1\x77\x99\x67"
@@ -21951,10 +22021,10 @@
 	}, {
 		.key	= "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
 			  "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
-			  "\x29\xa0\xba\x9e\x48\x78\xd1\xba"
-			  "\xee\x49\x83",
-		.klen	= 27,
-		.iv	= "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
+			  "\x29\xa0\xba\x9e\x48\x78\xd1\xba",
+		.klen	= 24,
+		.iv	= "\x03\xee\x49\x83\xe9\xa9\xff\xe9"
+			  "\x57\xba\xfd\x9e\x00\x00\x00\x00",
 		.assoc	= "\x44\xa6\x2c\x05\xe9\xe1\x43\xb1"
 			  "\x58\x7c\xf2\x5c\x6d\x39\x0a\x64"
 			  "\xa4\xf0\x13\x05\xd1\x77\x99\x67"
@@ -21974,10 +22044,10 @@
 	}, {
 		.key	= "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
 			  "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
-			  "\x29\xa0\xba\x9e\x48\x78\xd1\xba"
-			  "\xd1\xfc\x57",
-		.klen	= 27,
-		.iv	= "\x9c\xfe\xb8\x9c\xad\x71\xaa\x1f",
+			  "\x29\xa0\xba\x9e\x48\x78\xd1\xba",
+		.klen	= 24,
+		.iv	= "\x03\xd1\xfc\x57\x9c\xfe\xb8\x9c"
+			  "\xad\x71\xaa\x1f\x00\x00\x00\x00",
 		.assoc	= "\x86\x67\xa5\xa9\x14\x5f\x0d\xc6"
 			  "\xff\x14\xc7\x44\xbf\x6c\x3a\xc3"
 			  "\xff\xb6\x81\xbd\xe2\xd5\x06\xc7"
@@ -22000,10 +22070,10 @@
 		.key	= "\xa4\x4b\x54\x29\x0a\xb8\x6d\x01"
 			  "\x5b\x80\x2a\xcf\x25\xc4\xb7\x5c"
 			  "\x20\x2c\xad\x30\xc2\x2b\x41\xfb"
-			  "\x0e\x85\xbc\x33\xad\x0f\x2b\xff"
-			  "\xee\x49\x83",
-		.klen	= 35,
-		.iv	= "\xe9\xa9\xff\xe9\x57\xba\xfd\x9e",
+			  "\x0e\x85\xbc\x33\xad\x0f\x2b\xff",
+		.klen	= 32,
+		.iv	= "\x03\xee\x49\x83\xe9\xa9\xff\xe9"
+			  "\x57\xba\xfd\x9e\x00\x00\x00\x00",
 		.alen	= 0,
 		.input	= "\x1f\xb8\x8f\xa3\xdd\x54\x00\xf2",
 		.ilen	= 8,
@@ -22013,10 +22083,10 @@
 		.key	= "\x39\xbb\xa7\xbe\x59\x97\x9e\x73"
 			  "\xa2\xbc\x6b\x98\xd7\x75\x7f\xe3"
 			  "\xa4\x48\x93\x39\x26\x71\x4a\xc6"
-			  "\xae\x8f\x11\x4c\xc2\x9c\x4a\xbb"
-			  "\x85\x34\x66",
-		.klen	= 35,
-		.iv	= "\x42\xc8\x92\x0f\x36\x58\xe0\x6b",
+			  "\xae\x8f\x11\x4c\xc2\x9c\x4a\xbb",
+		.klen	= 32,
+		.iv	= "\x03\x85\x34\x66\x42\xc8\x92\x0f"
+			  "\x36\x58\xe0\x6b\x00\x00\x00\x00",
 		.alen	= 0,
 		.input	= "\x48\x01\x5e\x02\x24\x04\x66\x47"
 			  "\xa1\xea\x6f\xaf\xe8\xfc\xfb\xdd"
@@ -22035,10 +22105,10 @@
 		.key	= "\x58\x5d\xa0\x96\x65\x1a\x04\xd7"
 			  "\x96\xe5\xc5\x68\xaa\x95\x35\xe0"
 			  "\x29\xa0\xba\x9e\x48\x78\xd1\xba"
-			  "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b"
-			  "\xcf\x76\x3f",
-		.klen	= 35,
-		.iv	= "\xd9\x95\x75\x8f\x44\x89\x40\x7b",
+			  "\x0d\x1a\x53\x3b\xb5\xe3\xf8\x8b",
+		.klen	= 32,
+		.iv	= "\x03\xcf\x76\x3f\xd9\x95\x75\x8f"
+			  "\x44\x89\x40\x7b\x00\x00\x00\x00",
 		.assoc	= "\x8f\x86\x6c\x4d\x1d\xc5\x39\x88"
 			  "\xc8\xf3\x5c\x52\x10\x63\x6f\x2b"
 			  "\x8a\x2a\xc5\x6f\x30\x23\x58\x7b"
@@ -22060,6 +22130,1240 @@
 };
 
 /*
+ * rfc4309 refers to section 8 of rfc3610 for test vectors, but they all
+ * use a 13-byte nonce, we only support an 11-byte nonce.  Worse,
+ * they use AD lengths which are not valid ESP header lengths.
+ *
+ * These vectors are copied/generated from the ones for rfc4106 with
+ * the key truncated by one byte..
+ */
+static struct aead_testvec aes_ccm_rfc4309_enc_tv_template[] = {
+	{ /* Generated using Crypto++ */
+		.key	= zeroed_string,
+		.klen	= 19,
+		.iv	= zeroed_string,
+		.input	= zeroed_string,
+		.ilen	= 16,
+		.assoc	= zeroed_string,
+		.alen	= 16,
+		.result	= "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F"
+			  "\x12\x50\xE8\xDE\x81\x3C\x63\x08"
+			  "\x1A\x22\xBA\x75\xEE\xD4\xD5\xB5"
+			  "\x27\x50\x01\xAC\x03\x33\x39\xFB",
+		.rlen	= 32,
+	},{
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= zeroed_string,
+		.ilen	= 16,
+		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen	= 16,
+		.result	= "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F"
+			  "\x7E\x76\xF8\xE6\xF8\xCC\x1F\x17"
+			  "\x6A\xE0\x53\x9F\x4B\x73\x7E\xDA"
+			  "\x08\x09\x4E\xC4\x1E\xAD\xC6\xB0",
+		.rlen	= 32,
+
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= zeroed_string,
+		.input	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen	= 16,
+		.assoc	= zeroed_string,
+		.alen	= 16,
+		.result	= "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+			  "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
+			  "\xA1\xE2\xC2\x42\x2B\x81\x70\x40"
+			  "\xFD\x7F\x76\xD1\x03\x07\xBB\x0C",
+		.rlen	= 32,
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= zeroed_string,
+		.input	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen	= 16,
+		.assoc	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen	= 16,
+		.result	= "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+			  "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
+			  "\x5B\xC0\x73\xE0\x2B\x73\x68\xC9"
+			  "\x2D\x8C\x58\xC2\x90\x3D\xB0\x3E",
+		.rlen	= 32,
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen	= 16,
+		.assoc	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen	= 16,
+		.result	= "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+			  "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
+			  "\x43\x8E\x76\x57\x3B\xB4\x05\xE8"
+			  "\xA9\x9B\xBF\x25\xE0\x4F\xC0\xED",
+		.rlen	= 32,
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.ilen	= 64,
+		.assoc	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen	= 16,
+		.result	= "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+			  "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
+			  "\x9C\xA4\x97\x83\x3F\x01\xA5\xF4"
+			  "\x43\x09\xE7\xB8\xE9\xD1\xD7\x02"
+			  "\x9B\xAB\x39\x18\xEB\x94\x34\x36"
+			  "\xE6\xC5\xC8\x9B\x00\x81\x9E\x49"
+			  "\x1D\x78\xE1\x48\xE3\xE9\xEA\x8E"
+			  "\x3A\x2B\x67\x5D\x35\x6A\x0F\xDB"
+			  "\x02\x73\xDD\xE7\x30\x4A\x30\x54"
+			  "\x1A\x9D\x09\xCA\xC8\x1C\x32\x5F",
+		.rlen	= 80,
+	}, {
+		.key	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x45\x67\x89\xab\xcd\xef",
+		.input	= "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff",
+		.ilen	= 192,
+		.assoc	= "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+			  "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
+			  "\x89\xab\xcd\xef",
+		.alen	= 20,
+		.result	= "\x64\x17\xDC\x24\x9D\x92\xBA\x5E"
+			  "\x7C\x64\x6D\x33\x46\x77\xAC\xB1"
+			  "\x5C\x9E\xE2\xC7\x27\x11\x3E\x95"
+			  "\x7D\xBE\x28\xC8\xC1\xCA\x5E\x8C"
+			  "\xB4\xE2\xDE\x9F\x53\x59\x26\xDB"
+			  "\x0C\xD4\xE4\x07\x9A\xE6\x3E\x01"
+			  "\x58\x0D\x3E\x3D\xD5\x21\xEB\x04"
+			  "\x06\x9D\x5F\xB9\x02\x49\x1A\x2B"
+			  "\xBA\xF0\x4E\x3B\x85\x50\x5B\x09"
+			  "\xFE\xEC\xFC\x54\xEC\x0C\xE2\x79"
+			  "\x8A\x2F\x5F\xD7\x05\x5D\xF1\x6D"
+			  "\x22\xEB\xD1\x09\x80\x3F\x5A\x70"
+			  "\xB2\xB9\xD3\x63\x99\xC2\x4D\x1B"
+			  "\x36\x12\x00\x89\xAA\x5D\x55\xDA"
+			  "\x1D\x5B\xD8\x3C\x5F\x09\xD2\xE6"
+			  "\x39\x41\x5C\xF0\xBE\x26\x4E\x5F"
+			  "\x2B\x50\x44\x52\xC2\x10\x7D\x38"
+			  "\x82\x64\x83\x0C\xAE\x49\xD0\xE5"
+			  "\x4F\xE5\x66\x4C\x58\x7A\xEE\x43"
+			  "\x3B\x51\xFE\xBA\x24\x8A\xFE\xDC"
+			  "\x19\x6D\x60\x66\x61\xF9\x9A\x3F"
+			  "\x75\xFC\x38\x53\x5B\xB5\xCD\x52"
+			  "\x4F\xE5\xE4\xC9\xFE\x10\xCB\x98"
+			  "\xF0\x06\x5B\x07\xAB\xBB\xF4\x0E"
+			  "\x2D\xC2\xDD\x5D\xDD\x22\x9A\xCC"
+			  "\x39\xAB\x63\xA5\x3D\x9C\x51\x8A",
+		.rlen	= 208,
+	}, { /* From draft-mcgrew-gcm-test-01 */
+		.key	= "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
+			  "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
+			  "\x2E\x44\x3B",
+		.klen	= 19,
+		.iv	= "\x49\x56\xED\x7E\x3B\x24\x4C\xFE",
+		.input	= "\x45\x00\x00\x48\x69\x9A\x00\x00"
+			  "\x80\x11\x4D\xB7\xC0\xA8\x01\x02"
+			  "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56"
+			  "\x38\xD3\x01\x00\x00\x01\x00\x00"
+			  "\x00\x00\x00\x00\x04\x5F\x73\x69"
+			  "\x70\x04\x5F\x75\x64\x70\x03\x73"
+			  "\x69\x70\x09\x63\x79\x62\x65\x72"
+			  "\x63\x69\x74\x79\x02\x64\x6B\x00"
+			  "\x00\x21\x00\x01\x01\x02\x02\x01",
+		.ilen	= 72,
+		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
+			  "\x00\x00\x00\x00\x49\x56\xED\x7E"
+			  "\x3B\x24\x4C\xFE",
+		.alen	= 20,
+		.result	= "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB"
+			  "\x83\x60\xF5\xBA\x3A\x56\x79\xE6"
+			  "\x7E\x0C\x53\xCF\x9E\x87\xE0\x4E"
+			  "\x1A\x26\x01\x24\xC7\x2E\x3D\xBF"
+			  "\x29\x2C\x91\xC1\xB8\xA8\xCF\xE0"
+			  "\x39\xF8\x53\x6D\x31\x22\x2B\xBF"
+			  "\x98\x81\xFC\x34\xEE\x85\x36\xCD"
+			  "\x26\xDB\x6C\x7A\x0C\x77\x8A\x35"
+			  "\x18\x85\x54\xB2\xBC\xDD\x3F\x43"
+			  "\x61\x06\x8A\xDF\x86\x3F\xB4\xAC"
+			  "\x97\xDC\xBD\xFD\x92\x10\xC5\xFF",
+		.rlen	= 88,
+	}, {
+		.key	= "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+			  "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
+			  "\xCA\xFE\xBA",
+		.klen	= 19,
+		.iv	= "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.input	= "\x45\x00\x00\x3E\x69\x8F\x00\x00"
+			  "\x80\x11\x4D\xCC\xC0\xA8\x01\x02"
+			  "\xC0\xA8\x01\x01\x0A\x98\x00\x35"
+			  "\x00\x2A\x23\x43\xB2\xD0\x01\x00"
+			  "\x00\x01\x00\x00\x00\x00\x00\x00"
+			  "\x03\x73\x69\x70\x09\x63\x79\x62"
+			  "\x65\x72\x63\x69\x74\x79\x02\x64"
+			  "\x6B\x00\x00\x01\x00\x01\x00\x01",
+		.ilen	= 64,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
+		.result	= "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8"
+			  "\xE5\x26\x8A\xDE\xB8\x7E\x7D\x16"
+			  "\x56\xC7\xD2\x88\xBA\x8D\x58\xAF"
+			  "\xF5\x71\xB6\x37\x84\xA7\xB1\x99"
+			  "\x51\x5C\x0D\xA0\x27\xDE\xE7\x2D"
+			  "\xEF\x25\x88\x1F\x1D\x77\x11\xFF"
+			  "\xDB\xED\xEE\x56\x16\xC5\x5C\x9B"
+			  "\x00\x62\x1F\x68\x4E\x7C\xA0\x97"
+			  "\x10\x72\x7E\x53\x13\x3B\x68\xE4"
+			  "\x30\x99\x91\x79\x09\xEA\xFF\x6A",
+		.rlen	= 80,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\x11\x22\x33",
+		.klen	= 35,
+		.iv	= "\x01\x02\x03\x04\x05\x06\x07\x08",
+		.input	= "\x45\x00\x00\x30\x69\xA6\x40\x00"
+			  "\x80\x06\x26\x90\xC0\xA8\x01\x02"
+			  "\x93\x89\x15\x5E\x0A\x9E\x00\x8B"
+			  "\x2D\xC5\x7E\xE0\x00\x00\x00\x00"
+			  "\x70\x02\x40\x00\x20\xBF\x00\x00"
+			  "\x02\x04\x05\xB4\x01\x01\x04\x02"
+			  "\x01\x02\x02\x01",
+		.ilen	= 52,
+		.assoc	= "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
+			  "\x01\x02\x03\x04\x05\x06\x07\x08",
+		.alen	= 16,
+		.result	= "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F"
+			  "\x58\x41\x7E\xFF\x9A\x9E\x09\xB4"
+			  "\x1A\xF7\xF6\x42\x31\xCD\xBF\xAD"
+			  "\x27\x0E\x2C\xF2\xDB\x10\xDF\x55"
+			  "\x8F\x0D\xD7\xAC\x23\xBD\x42\x10"
+			  "\xD0\xB2\xAF\xD8\x37\xAC\x6B\x0B"
+			  "\x11\xD4\x0B\x12\xEC\xB4\xB1\x92"
+			  "\x23\xA6\x10\xB0\x26\xD6\xD9\x26"
+			  "\x5A\x48\x6A\x3E",
+		.rlen	= 68,
+	}, {
+		.key	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.input	= "\x45\x00\x00\x3C\x99\xC5\x00\x00"
+			  "\x80\x01\xCB\x7A\x40\x67\x93\x18"
+			  "\x01\x01\x01\x01\x08\x00\x07\x5C"
+			  "\x02\x00\x44\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x75\x76\x77\x61\x62\x63\x64\x65"
+			  "\x66\x67\x68\x69\x01\x02\x02\x01",
+		.ilen	= 64,
+		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen	= 16,
+		.result	= "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F"
+			  "\x92\x51\x23\xA4\xC1\x5B\xF0\x10"
+			  "\xF3\x13\xF4\xF8\xA1\x9A\xB4\xDC"
+			  "\x89\xC8\xF8\x42\x62\x95\xB7\xCB"
+			  "\xB8\xF5\x0F\x1B\x2E\x94\xA2\xA7"
+			  "\xBF\xFB\x8A\x92\x13\x63\xD1\x3C"
+			  "\x08\xF5\xE8\xA6\xAA\xF6\x34\xF9"
+			  "\x42\x05\xAF\xB3\xE7\x9A\xFC\xEE"
+			  "\x36\x25\xC1\x10\x12\x1C\xCA\x82"
+			  "\xEA\xE6\x63\x5A\x57\x28\xA9\x9A",
+		.rlen	= 80,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.input	= "\x45\x00\x00\x3C\x99\xC3\x00\x00"
+			  "\x80\x01\xCB\x7C\x40\x67\x93\x18"
+			  "\x01\x01\x01\x01\x08\x00\x08\x5C"
+			  "\x02\x00\x43\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x75\x76\x77\x61\x62\x63\x64\x65"
+			  "\x66\x67\x68\x69\x01\x02\x02\x01",
+		.ilen	= 64,
+		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.result	= "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6"
+			  "\x10\x60\x40\x62\x6B\x4F\x97\x8E"
+			  "\x0B\xB2\x22\x97\xCB\x21\xE0\x90"
+			  "\xA2\xE7\xD1\x41\x30\xE4\x4B\x1B"
+			  "\x79\x01\x58\x50\x01\x06\xE1\xE0"
+			  "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
+			  "\x30\xB8\xE5\xDF\xD7\x12\x56\x75"
+			  "\xD0\x95\xB7\xB8\x91\x42\xF7\xFD"
+			  "\x97\x57\xCA\xC1\x20\xD0\x86\xB9"
+			  "\x66\x9D\xB4\x2B\x96\x22\xAC\x67",
+		.rlen	= 80,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.input	= "\x45\x00\x00\x1C\x42\xA2\x00\x00"
+			  "\x80\x01\x44\x1F\x40\x67\x93\xB6"
+			  "\xE0\x00\x00\x02\x0A\x00\xF5\xFF"
+			  "\x01\x02\x02\x01",
+		.ilen	= 28,
+		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.result	= "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6"
+			  "\x10\x60\xCF\x01\x6B\x4F\x97\x20"
+			  "\xEA\xB3\x23\x94\xC9\x21\x1D\x33"
+			  "\xA1\xE5\x90\x40\x05\x37\x45\x70"
+			  "\xB5\xD6\x09\x0A\x23\x73\x33\xF9"
+			  "\x08\xB4\x22\xE4",
+		.rlen	= 44,
+	}, {
+		.key	= "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+			  "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
+			  "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+			  "\xCA\xFE\xBA",
+		.klen	= 27,
+		.iv	= "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.input	= "\x45\x00\x00\x28\xA4\xAD\x40\x00"
+			  "\x40\x06\x78\x80\x0A\x01\x03\x8F"
+			  "\x0A\x01\x06\x12\x80\x23\x06\xB8"
+			  "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
+			  "\x50\x10\x16\xD0\x75\x68\x00\x01",
+		.ilen	= 40,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
+		.result	= "\x05\x22\x15\xD1\x52\x56\x85\x04"
+			  "\xA8\x5C\x5D\x6D\x7E\x6E\xF5\xFA"
+			  "\xEA\x16\x37\x50\xF3\xDF\x84\x3B"
+			  "\x2F\x32\x18\x57\x34\x2A\x8C\x23"
+			  "\x67\xDF\x6D\x35\x7B\x54\x0D\xFB"
+			  "\x34\xA5\x9F\x6C\x48\x30\x1E\x22"
+			  "\xFE\xB1\x22\x17\x17\x8A\xB9\x5B",
+		.rlen	= 56,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xDE\xCA\xF8",
+		.klen	= 19,
+		.iv	= "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
+		.input	= "\x45\x00\x00\x49\x33\xBA\x00\x00"
+			  "\x7F\x11\x91\x06\xC3\xFB\x1D\x10"
+			  "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
+			  "\x00\x35\xDD\x7B\x80\x03\x02\xD5"
+			  "\x00\x00\x4E\x20\x00\x1E\x8C\x18"
+			  "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47"
+			  "\x6B\x91\xB9\x24\xB2\x80\x38\x9D"
+			  "\x92\xC9\x63\xBA\xC0\x46\xEC\x95"
+			  "\x9B\x62\x66\xC0\x47\x22\xB1\x49"
+			  "\x23\x01\x01\x01",
+		.ilen	= 76,
+		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
+		.result	= "\x92\xD0\x53\x79\x33\x38\xD5\xF3"
+			  "\x7D\xE4\x7A\x8E\x86\x03\xC9\x90"
+			  "\x96\x35\xAB\x9C\xFB\xE8\xA3\x76"
+			  "\xE9\xE9\xE2\xD1\x2E\x11\x0E\x00"
+			  "\xFA\xCE\xB5\x9E\x02\xA7\x7B\xEA"
+			  "\x71\x9A\x58\xFB\xA5\x8A\xE1\xB7"
+			  "\x9C\x39\x9D\xE3\xB5\x6E\x69\xE6"
+			  "\x63\xC9\xDB\x05\x69\x51\x12\xAD"
+			  "\x3E\x00\x32\x73\x86\xF2\xEE\xF5"
+			  "\x0F\xE8\x81\x7E\x84\xD3\xC0\x0D"
+			  "\x76\xD6\x55\xC6\xB4\xC2\x34\xC7"
+			  "\x12\x25\x0B\xF9",
+		.rlen	= 92,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\x73\x61\x6C",
+		.klen	= 35,
+		.iv	= "\x61\x6E\x64\x01\x69\x76\x65\x63",
+		.input	= "\x45\x08\x00\x28\x73\x2C\x00\x00"
+			  "\x40\x06\xE9\xF9\x0A\x01\x06\x12"
+			  "\x0A\x01\x03\x8F\x06\xB8\x80\x23"
+			  "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02"
+			  "\x50\x10\x1F\x64\x6D\x54\x00\x01",
+		.ilen	= 40,
+		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
+		.result	= "\xCC\x74\xB7\xD3\xB0\x38\x50\x42"
+			  "\x2C\x64\x87\x46\x1E\x34\x10\x05"
+			  "\x29\x6B\xBB\x36\xE9\x69\xAD\x92"
+			  "\x82\xA1\x10\x6A\xEB\x0F\xDC\x7D"
+			  "\x08\xBA\xF3\x91\xCA\xAA\x61\xDA"
+			  "\x62\xF4\x14\x61\x5C\x9D\xB5\xA7"
+			  "\xEE\xD7\xB9\x7E\x87\x99\x9B\x7D",
+		.rlen	= 56,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.input	= "\x45\x00\x00\x49\x33\x3E\x00\x00"
+			  "\x7F\x11\x91\x82\xC3\xFB\x1D\x10"
+			  "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
+			  "\x00\x35\xCB\x45\x80\x03\x02\x5B"
+			  "\x00\x00\x01\xE0\x00\x1E\x8C\x18"
+			  "\xD6\x57\x59\xD5\x22\x84\xA0\x35"
+			  "\x2C\x71\x47\x5C\x88\x80\x39\x1C"
+			  "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32"
+			  "\x5A\xE2\x70\xC0\x38\x99\x49\x39"
+			  "\x15\x01\x01\x01",
+		.ilen	= 76,
+		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.result	= "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6"
+			  "\xEF\x70\x1A\x9C\xE8\xD3\x19\x86"
+			  "\xC8\x02\xF0\xB0\x03\x09\xD9\x02"
+			  "\xA0\xD2\x59\x04\xD1\x85\x2A\x24"
+			  "\x1C\x67\x3E\xD8\x68\x72\x06\x94"
+			  "\x97\xBA\x4F\x76\x8D\xB0\x44\x5B"
+			  "\x69\xBF\xD5\xE2\x3D\xF1\x0B\x0C"
+			  "\xC0\xBF\xB1\x8F\x70\x09\x9E\xCE"
+			  "\xA5\xF2\x55\x58\x84\xFA\xF9\xB5"
+			  "\x23\xF4\x84\x40\x74\x14\x8A\x6B"
+			  "\xDB\xD7\x67\xED\xA4\x93\xF3\x47"
+			  "\xCC\xF7\x46\x6F",
+		.rlen	= 92,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\x73\x61\x6C",
+		.klen	= 35,
+		.iv	= "\x61\x6E\x64\x01\x69\x76\x65\x63",
+		.input	= "\x63\x69\x73\x63\x6F\x01\x72\x75"
+			  "\x6C\x65\x73\x01\x74\x68\x65\x01"
+			  "\x6E\x65\x74\x77\x65\x01\x64\x65"
+			  "\x66\x69\x6E\x65\x01\x74\x68\x65"
+			  "\x74\x65\x63\x68\x6E\x6F\x6C\x6F"
+			  "\x67\x69\x65\x73\x01\x74\x68\x61"
+			  "\x74\x77\x69\x6C\x6C\x01\x64\x65"
+			  "\x66\x69\x6E\x65\x74\x6F\x6D\x6F"
+			  "\x72\x72\x6F\x77\x01\x02\x02\x01",
+		.ilen	= 72,
+		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
+		.result	= "\xEA\x15\xC4\x98\xAC\x15\x22\x37"
+			  "\x00\x07\x1D\xBE\x60\x5D\x73\x16"
+			  "\x4D\x0F\xCC\xCE\x8A\xD0\x49\xD4"
+			  "\x39\xA3\xD1\xB1\x21\x0A\x92\x1A"
+			  "\x2C\xCF\x8F\x9D\xC9\x91\x0D\xB4"
+			  "\x15\xFC\xBC\xA5\xC5\xBF\x54\xE5"
+			  "\x1C\xC7\x32\x41\x07\x7B\x2C\xB6"
+			  "\x5C\x23\x7C\x93\xEA\xEF\x23\x1C"
+			  "\x73\xF4\xE7\x12\x84\x4C\x37\x0A"
+			  "\x4A\x8F\x06\x37\x48\xF9\xF9\x05"
+			  "\x55\x13\x40\xC3\xD5\x55\x3A\x3D",
+		.rlen	= 88,
+	}, {
+		.key	= "\x7D\x77\x3D\x00\xC1\x44\xC5\x25"
+			  "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47"
+			  "\xD9\x66\x42",
+		.klen	= 19,
+		.iv	= "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+		.input	= "\x01\x02\x02\x01",
+		.ilen	= 4,
+		.assoc	= "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
+			  "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+		.alen	= 16,
+		.result	= "\x4C\x72\x63\x30\x2F\xE6\x56\xDD"
+			  "\xD0\xD8\x60\x9D\x8B\xEF\x85\x90"
+			  "\xF7\x61\x24\x62",
+		.rlen	= 20,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xDE\xCA\xF8",
+		.klen	= 19,
+		.iv	= "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
+		.input	= "\x74\x6F\x01\x62\x65\x01\x6F\x72"
+			  "\x01\x6E\x6F\x74\x01\x74\x6F\x01"
+			  "\x62\x65\x00\x01",
+		.ilen	= 20,
+		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
+		.result	= "\xA3\xBF\x52\x52\x65\x83\xBA\x81"
+			  "\x03\x9B\x84\xFC\x44\x8C\xBB\x81"
+			  "\x36\xE1\x78\xBB\xA5\x49\x3A\xD0"
+			  "\xF0\x6B\x21\xAF\x98\xC0\x34\xDC"
+			  "\x17\x17\x65\xAD",
+		.rlen	= 36,
+	}, {
+		.key	= "\x6C\x65\x67\x61\x6C\x69\x7A\x65"
+			  "\x6D\x61\x72\x69\x6A\x75\x61\x6E"
+			  "\x61\x61\x6E\x64\x64\x6F\x69\x74"
+			  "\x62\x65\x66\x6F\x72\x65\x69\x61"
+			  "\x74\x75\x72",
+		.klen	= 35,
+		.iv	= "\x33\x30\x21\x69\x67\x65\x74\x6D",
+		.input	= "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+			  "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
+			  "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
+			  "\x02\x00\x07\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x01\x02\x02\x01",
+		.ilen	= 52,
+		.assoc	= "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
+			  "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
+			  "\x67\x65\x74\x6D",
+		.alen	= 20,
+		.result	= "\x96\xFD\x86\xF8\xD1\x98\xFF\x10"
+			  "\xAB\x8C\xDA\x8A\x5A\x08\x38\x1A"
+			  "\x48\x59\x80\x18\x1A\x18\x1A\x04"
+			  "\xC9\x0D\xE3\xE7\x0E\xA4\x0B\x75"
+			  "\x92\x9C\x52\x5C\x0B\xFB\xF8\xAF"
+			  "\x16\xC3\x35\xA8\xE7\xCE\x84\x04"
+			  "\xEB\x40\x6B\x7A\x8E\x75\xBB\x42"
+			  "\xE0\x63\x4B\x21\x44\xA2\x2B\x2B"
+			  "\x39\xDB\xC8\xDC",
+		.rlen	= 68,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.input	= "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+			  "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
+			  "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
+			  "\x02\x00\x07\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x01\x02\x02\x01",
+		.ilen	= 52,
+		.assoc	= "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.result	= "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6"
+			  "\x10\x60\x54\x25\xEB\x80\x04\x93"
+			  "\xCA\x1B\x23\x97\xCB\x21\x2E\x01"
+			  "\xA2\xE7\x95\x41\x30\xE4\x4B\x1B"
+			  "\x79\x01\x58\x50\x01\x06\xE1\xE0"
+			  "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
+			  "\x44\xCC\x90\xBF\x00\x94\x94\x92"
+			  "\x20\x17\x0C\x1B\x55\xDE\x7E\x68"
+			  "\xF4\x95\x5D\x4F",
+		.rlen	= 68,
+	}, {
+		.key	= "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
+			  "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
+			  "\x22\x43\x3C",
+		.klen	= 19,
+		.iv	= "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD",
+		.input	= "\x08\x00\xC6\xCD\x02\x00\x07\x00"
+			  "\x61\x62\x63\x64\x65\x66\x67\x68"
+			  "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70"
+			  "\x71\x72\x73\x74\x01\x02\x02\x01",
+		.ilen	= 32,
+		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
+			  "\x00\x00\x00\x07\x48\x55\xEC\x7D"
+			  "\x3A\x23\x4B\xFD",
+		.alen	= 20,
+		.result	= "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02"
+			  "\xF0\xB5\x37\xB6\x6B\x2F\xF5\x4F"
+			  "\xF8\xA3\x4C\x53\xB8\x12\x09\xBF"
+			  "\x58\x7D\xCF\x29\xA3\x41\x68\x6B"
+			  "\xCE\xE8\x79\x85\x3C\xB0\x3A\x8F"
+			  "\x16\xB0\xA1\x26\xC9\xBC\xBC\xA6",
+		.rlen	= 48,
+	}
+};
+
+static struct aead_testvec aes_ccm_rfc4309_dec_tv_template[]	= {
+	{ /* Generated using Crypto++ */
+		.key	= zeroed_string,
+		.klen	= 19,
+		.iv	= zeroed_string,
+		.result	= zeroed_string,
+		.rlen	= 16,
+		.assoc	= zeroed_string,
+		.alen	= 16,
+		.input	= "\x2E\x9A\xCA\x6B\xDA\x54\xFC\x6F"
+			  "\x12\x50\xE8\xDE\x81\x3C\x63\x08"
+			  "\x1A\x22\xBA\x75\xEE\xD4\xD5\xB5"
+			  "\x27\x50\x01\xAC\x03\x33\x39\xFB",
+		.ilen	= 32,
+	},{
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.result	= zeroed_string,
+		.rlen	= 16,
+		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen	= 16,
+		.input	= "\xCF\xB9\x99\x17\xC8\x86\x0E\x7F"
+			  "\x7E\x76\xF8\xE6\xF8\xCC\x1F\x17"
+			  "\x6A\xE0\x53\x9F\x4B\x73\x7E\xDA"
+			  "\x08\x09\x4E\xC4\x1E\xAD\xC6\xB0",
+		.ilen	= 32,
+
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= zeroed_string,
+		.result	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen	= 16,
+		.assoc	= zeroed_string,
+		.alen	= 16,
+		.input	= "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+			  "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
+			  "\xA1\xE2\xC2\x42\x2B\x81\x70\x40"
+			  "\xFD\x7F\x76\xD1\x03\x07\xBB\x0C",
+		.ilen	= 32,
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= zeroed_string,
+		.result	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen	= 16,
+		.assoc	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen	= 16,
+		.input	= "\x33\xDE\x73\xBC\xA6\xCE\x4E\xA6"
+			  "\x61\xF4\xF5\x41\x03\x4A\xE3\x86"
+			  "\x5B\xC0\x73\xE0\x2B\x73\x68\xC9"
+			  "\x2D\x8C\x58\xC2\x90\x3D\xB0\x3E",
+		.ilen	= 32,
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.result	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen	= 16,
+		.assoc	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen	= 16,
+		.input	= "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+			  "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
+			  "\x43\x8E\x76\x57\x3B\xB4\x05\xE8"
+			  "\xA9\x9B\xBF\x25\xE0\x4F\xC0\xED",
+		.ilen	= 32,
+	}, {
+		.key	= "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.result	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x01\x01\x01\x01\x01\x01\x01\x01",
+		.rlen	= 64,
+		.assoc	= "\x01\x01\x01\x01\x01\x01\x01\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.alen	= 16,
+		.input	= "\xCE\xB8\x98\x16\xC9\x87\x0F\x7E"
+			  "\x7F\x77\xF9\xE7\xF9\xCD\x1E\x16"
+			  "\x9C\xA4\x97\x83\x3F\x01\xA5\xF4"
+			  "\x43\x09\xE7\xB8\xE9\xD1\xD7\x02"
+			  "\x9B\xAB\x39\x18\xEB\x94\x34\x36"
+			  "\xE6\xC5\xC8\x9B\x00\x81\x9E\x49"
+			  "\x1D\x78\xE1\x48\xE3\xE9\xEA\x8E"
+			  "\x3A\x2B\x67\x5D\x35\x6A\x0F\xDB"
+			  "\x02\x73\xDD\xE7\x30\x4A\x30\x54"
+			  "\x1A\x9D\x09\xCA\xC8\x1C\x32\x5F",
+		.ilen	= 80,
+	}, {
+		.key	= "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x45\x67\x89\xab\xcd\xef",
+		.result	= "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff"
+			  "\xff\xff\xff\xff\xff\xff\xff\xff",
+		.rlen	= 192,
+		.assoc	= "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+			  "\xaa\xaa\xaa\xaa\x00\x00\x45\x67"
+			  "\x89\xab\xcd\xef",
+		.alen	= 20,
+		.input	= "\x64\x17\xDC\x24\x9D\x92\xBA\x5E"
+			  "\x7C\x64\x6D\x33\x46\x77\xAC\xB1"
+			  "\x5C\x9E\xE2\xC7\x27\x11\x3E\x95"
+			  "\x7D\xBE\x28\xC8\xC1\xCA\x5E\x8C"
+			  "\xB4\xE2\xDE\x9F\x53\x59\x26\xDB"
+			  "\x0C\xD4\xE4\x07\x9A\xE6\x3E\x01"
+			  "\x58\x0D\x3E\x3D\xD5\x21\xEB\x04"
+			  "\x06\x9D\x5F\xB9\x02\x49\x1A\x2B"
+			  "\xBA\xF0\x4E\x3B\x85\x50\x5B\x09"
+			  "\xFE\xEC\xFC\x54\xEC\x0C\xE2\x79"
+			  "\x8A\x2F\x5F\xD7\x05\x5D\xF1\x6D"
+			  "\x22\xEB\xD1\x09\x80\x3F\x5A\x70"
+			  "\xB2\xB9\xD3\x63\x99\xC2\x4D\x1B"
+			  "\x36\x12\x00\x89\xAA\x5D\x55\xDA"
+			  "\x1D\x5B\xD8\x3C\x5F\x09\xD2\xE6"
+			  "\x39\x41\x5C\xF0\xBE\x26\x4E\x5F"
+			  "\x2B\x50\x44\x52\xC2\x10\x7D\x38"
+			  "\x82\x64\x83\x0C\xAE\x49\xD0\xE5"
+			  "\x4F\xE5\x66\x4C\x58\x7A\xEE\x43"
+			  "\x3B\x51\xFE\xBA\x24\x8A\xFE\xDC"
+			  "\x19\x6D\x60\x66\x61\xF9\x9A\x3F"
+			  "\x75\xFC\x38\x53\x5B\xB5\xCD\x52"
+			  "\x4F\xE5\xE4\xC9\xFE\x10\xCB\x98"
+			  "\xF0\x06\x5B\x07\xAB\xBB\xF4\x0E"
+			  "\x2D\xC2\xDD\x5D\xDD\x22\x9A\xCC"
+			  "\x39\xAB\x63\xA5\x3D\x9C\x51\x8A",
+		.ilen	= 208,
+	}, { /* From draft-mcgrew-gcm-test-01 */
+		.key	= "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
+			  "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
+			  "\x2E\x44\x3B",
+		.klen	= 19,
+		.iv	= "\x49\x56\xED\x7E\x3B\x24\x4C\xFE",
+		.result	= "\x45\x00\x00\x48\x69\x9A\x00\x00"
+			  "\x80\x11\x4D\xB7\xC0\xA8\x01\x02"
+			  "\xC0\xA8\x01\x01\x0A\x9B\xF1\x56"
+			  "\x38\xD3\x01\x00\x00\x01\x00\x00"
+			  "\x00\x00\x00\x00\x04\x5F\x73\x69"
+			  "\x70\x04\x5F\x75\x64\x70\x03\x73"
+			  "\x69\x70\x09\x63\x79\x62\x65\x72"
+			  "\x63\x69\x74\x79\x02\x64\x6B\x00"
+			  "\x00\x21\x00\x01\x01\x02\x02\x01",
+		.rlen	= 72,
+		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
+			  "\x00\x00\x00\x00\x49\x56\xED\x7E"
+			  "\x3B\x24\x4C\xFE",
+		.alen	= 20,
+		.input	= "\x89\xBA\x3E\xEF\xE6\xD6\xCF\xDB"
+			  "\x83\x60\xF5\xBA\x3A\x56\x79\xE6"
+			  "\x7E\x0C\x53\xCF\x9E\x87\xE0\x4E"
+			  "\x1A\x26\x01\x24\xC7\x2E\x3D\xBF"
+			  "\x29\x2C\x91\xC1\xB8\xA8\xCF\xE0"
+			  "\x39\xF8\x53\x6D\x31\x22\x2B\xBF"
+			  "\x98\x81\xFC\x34\xEE\x85\x36\xCD"
+			  "\x26\xDB\x6C\x7A\x0C\x77\x8A\x35"
+			  "\x18\x85\x54\xB2\xBC\xDD\x3F\x43"
+			  "\x61\x06\x8A\xDF\x86\x3F\xB4\xAC"
+			  "\x97\xDC\xBD\xFD\x92\x10\xC5\xFF",
+		.ilen	= 88,
+	}, {
+		.key	= "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+			  "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
+			  "\xCA\xFE\xBA",
+		.klen	= 19,
+		.iv	= "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.result	= "\x45\x00\x00\x3E\x69\x8F\x00\x00"
+			  "\x80\x11\x4D\xCC\xC0\xA8\x01\x02"
+			  "\xC0\xA8\x01\x01\x0A\x98\x00\x35"
+			  "\x00\x2A\x23\x43\xB2\xD0\x01\x00"
+			  "\x00\x01\x00\x00\x00\x00\x00\x00"
+			  "\x03\x73\x69\x70\x09\x63\x79\x62"
+			  "\x65\x72\x63\x69\x74\x79\x02\x64"
+			  "\x6B\x00\x00\x01\x00\x01\x00\x01",
+		.rlen	= 64,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
+		.input	= "\x4B\xC2\x70\x60\x64\xD2\xF3\xC8"
+			  "\xE5\x26\x8A\xDE\xB8\x7E\x7D\x16"
+			  "\x56\xC7\xD2\x88\xBA\x8D\x58\xAF"
+			  "\xF5\x71\xB6\x37\x84\xA7\xB1\x99"
+			  "\x51\x5C\x0D\xA0\x27\xDE\xE7\x2D"
+			  "\xEF\x25\x88\x1F\x1D\x77\x11\xFF"
+			  "\xDB\xED\xEE\x56\x16\xC5\x5C\x9B"
+			  "\x00\x62\x1F\x68\x4E\x7C\xA0\x97"
+			  "\x10\x72\x7E\x53\x13\x3B\x68\xE4"
+			  "\x30\x99\x91\x79\x09\xEA\xFF\x6A",
+		.ilen	= 80,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\x11\x22\x33",
+		.klen	= 35,
+		.iv	= "\x01\x02\x03\x04\x05\x06\x07\x08",
+		.result	= "\x45\x00\x00\x30\x69\xA6\x40\x00"
+			  "\x80\x06\x26\x90\xC0\xA8\x01\x02"
+			  "\x93\x89\x15\x5E\x0A\x9E\x00\x8B"
+			  "\x2D\xC5\x7E\xE0\x00\x00\x00\x00"
+			  "\x70\x02\x40\x00\x20\xBF\x00\x00"
+			  "\x02\x04\x05\xB4\x01\x01\x04\x02"
+			  "\x01\x02\x02\x01",
+		.rlen	= 52,
+		.assoc	= "\x4A\x2C\xBF\xE3\x00\x00\x00\x02"
+			  "\x01\x02\x03\x04\x05\x06\x07\x08",
+		.alen	= 16,
+		.input	= "\xD6\x31\x0D\x2B\x3D\x6F\xBD\x2F"
+			  "\x58\x41\x7E\xFF\x9A\x9E\x09\xB4"
+			  "\x1A\xF7\xF6\x42\x31\xCD\xBF\xAD"
+			  "\x27\x0E\x2C\xF2\xDB\x10\xDF\x55"
+			  "\x8F\x0D\xD7\xAC\x23\xBD\x42\x10"
+			  "\xD0\xB2\xAF\xD8\x37\xAC\x6B\x0B"
+			  "\x11\xD4\x0B\x12\xEC\xB4\xB1\x92"
+			  "\x23\xA6\x10\xB0\x26\xD6\xD9\x26"
+			  "\x5A\x48\x6A\x3E",
+		.ilen	= 68,
+	}, {
+		.key	= "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00",
+		.klen	= 19,
+		.iv	= "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.result	= "\x45\x00\x00\x3C\x99\xC5\x00\x00"
+			  "\x80\x01\xCB\x7A\x40\x67\x93\x18"
+			  "\x01\x01\x01\x01\x08\x00\x07\x5C"
+			  "\x02\x00\x44\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x75\x76\x77\x61\x62\x63\x64\x65"
+			  "\x66\x67\x68\x69\x01\x02\x02\x01",
+		.rlen	= 64,
+		.assoc	= "\x00\x00\x00\x00\x00\x00\x00\x01"
+			  "\x00\x00\x00\x00\x00\x00\x00\x00",
+		.alen	= 16,
+		.input	= "\x6B\x9A\xCA\x57\x43\x91\xFC\x6F"
+			  "\x92\x51\x23\xA4\xC1\x5B\xF0\x10"
+			  "\xF3\x13\xF4\xF8\xA1\x9A\xB4\xDC"
+			  "\x89\xC8\xF8\x42\x62\x95\xB7\xCB"
+			  "\xB8\xF5\x0F\x1B\x2E\x94\xA2\xA7"
+			  "\xBF\xFB\x8A\x92\x13\x63\xD1\x3C"
+			  "\x08\xF5\xE8\xA6\xAA\xF6\x34\xF9"
+			  "\x42\x05\xAF\xB3\xE7\x9A\xFC\xEE"
+			  "\x36\x25\xC1\x10\x12\x1C\xCA\x82"
+			  "\xEA\xE6\x63\x5A\x57\x28\xA9\x9A",
+		.ilen	= 80,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.result	= "\x45\x00\x00\x3C\x99\xC3\x00\x00"
+			  "\x80\x01\xCB\x7C\x40\x67\x93\x18"
+			  "\x01\x01\x01\x01\x08\x00\x08\x5C"
+			  "\x02\x00\x43\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x75\x76\x77\x61\x62\x63\x64\x65"
+			  "\x66\x67\x68\x69\x01\x02\x02\x01",
+		.rlen	= 64,
+		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.input	= "\x6A\x6B\x45\x2B\x7C\x67\x52\xF6"
+			  "\x10\x60\x40\x62\x6B\x4F\x97\x8E"
+			  "\x0B\xB2\x22\x97\xCB\x21\xE0\x90"
+			  "\xA2\xE7\xD1\x41\x30\xE4\x4B\x1B"
+			  "\x79\x01\x58\x50\x01\x06\xE1\xE0"
+			  "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
+			  "\x30\xB8\xE5\xDF\xD7\x12\x56\x75"
+			  "\xD0\x95\xB7\xB8\x91\x42\xF7\xFD"
+			  "\x97\x57\xCA\xC1\x20\xD0\x86\xB9"
+			  "\x66\x9D\xB4\x2B\x96\x22\xAC\x67",
+		.ilen	= 80,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.result	= "\x45\x00\x00\x1C\x42\xA2\x00\x00"
+			  "\x80\x01\x44\x1F\x40\x67\x93\xB6"
+			  "\xE0\x00\x00\x02\x0A\x00\xF5\xFF"
+			  "\x01\x02\x02\x01",
+		.rlen	= 28,
+		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.input	= "\x6A\x6B\x45\x0B\xA7\x06\x52\xF6"
+			  "\x10\x60\xCF\x01\x6B\x4F\x97\x20"
+			  "\xEA\xB3\x23\x94\xC9\x21\x1D\x33"
+			  "\xA1\xE5\x90\x40\x05\x37\x45\x70"
+			  "\xB5\xD6\x09\x0A\x23\x73\x33\xF9"
+			  "\x08\xB4\x22\xE4",
+		.ilen	= 44,
+	}, {
+		.key	= "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+			  "\x6D\x6A\x8F\x94\x67\x30\x83\x08"
+			  "\xFE\xFF\xE9\x92\x86\x65\x73\x1C"
+			  "\xCA\xFE\xBA",
+		.klen	= 27,
+		.iv	= "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.result	= "\x45\x00\x00\x28\xA4\xAD\x40\x00"
+			  "\x40\x06\x78\x80\x0A\x01\x03\x8F"
+			  "\x0A\x01\x06\x12\x80\x23\x06\xB8"
+			  "\xCB\x71\x26\x02\xDD\x6B\xB0\x3E"
+			  "\x50\x10\x16\xD0\x75\x68\x00\x01",
+		.rlen	= 40,
+		.assoc	= "\x00\x00\xA5\xF8\x00\x00\x00\x0A"
+			  "\xFA\xCE\xDB\xAD\xDE\xCA\xF8\x88",
+		.alen	= 16,
+		.input	= "\x05\x22\x15\xD1\x52\x56\x85\x04"
+			  "\xA8\x5C\x5D\x6D\x7E\x6E\xF5\xFA"
+			  "\xEA\x16\x37\x50\xF3\xDF\x84\x3B"
+			  "\x2F\x32\x18\x57\x34\x2A\x8C\x23"
+			  "\x67\xDF\x6D\x35\x7B\x54\x0D\xFB"
+			  "\x34\xA5\x9F\x6C\x48\x30\x1E\x22"
+			  "\xFE\xB1\x22\x17\x17\x8A\xB9\x5B",
+		.ilen	= 56,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xDE\xCA\xF8",
+		.klen	= 19,
+		.iv	= "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
+		.result	= "\x45\x00\x00\x49\x33\xBA\x00\x00"
+			  "\x7F\x11\x91\x06\xC3\xFB\x1D\x10"
+			  "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
+			  "\x00\x35\xDD\x7B\x80\x03\x02\xD5"
+			  "\x00\x00\x4E\x20\x00\x1E\x8C\x18"
+			  "\xD7\x5B\x81\xDC\x91\xBA\xA0\x47"
+			  "\x6B\x91\xB9\x24\xB2\x80\x38\x9D"
+			  "\x92\xC9\x63\xBA\xC0\x46\xEC\x95"
+			  "\x9B\x62\x66\xC0\x47\x22\xB1\x49"
+			  "\x23\x01\x01\x01",
+		.rlen	= 76,
+		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
+		.input	= "\x92\xD0\x53\x79\x33\x38\xD5\xF3"
+			  "\x7D\xE4\x7A\x8E\x86\x03\xC9\x90"
+			  "\x96\x35\xAB\x9C\xFB\xE8\xA3\x76"
+			  "\xE9\xE9\xE2\xD1\x2E\x11\x0E\x00"
+			  "\xFA\xCE\xB5\x9E\x02\xA7\x7B\xEA"
+			  "\x71\x9A\x58\xFB\xA5\x8A\xE1\xB7"
+			  "\x9C\x39\x9D\xE3\xB5\x6E\x69\xE6"
+			  "\x63\xC9\xDB\x05\x69\x51\x12\xAD"
+			  "\x3E\x00\x32\x73\x86\xF2\xEE\xF5"
+			  "\x0F\xE8\x81\x7E\x84\xD3\xC0\x0D"
+			  "\x76\xD6\x55\xC6\xB4\xC2\x34\xC7"
+			  "\x12\x25\x0B\xF9",
+		.ilen	= 92,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\x73\x61\x6C",
+		.klen	= 35,
+		.iv	= "\x61\x6E\x64\x01\x69\x76\x65\x63",
+		.result	= "\x45\x08\x00\x28\x73\x2C\x00\x00"
+			  "\x40\x06\xE9\xF9\x0A\x01\x06\x12"
+			  "\x0A\x01\x03\x8F\x06\xB8\x80\x23"
+			  "\xDD\x6B\xAF\xBE\xCB\x71\x26\x02"
+			  "\x50\x10\x1F\x64\x6D\x54\x00\x01",
+		.rlen	= 40,
+		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
+		.input	= "\xCC\x74\xB7\xD3\xB0\x38\x50\x42"
+			  "\x2C\x64\x87\x46\x1E\x34\x10\x05"
+			  "\x29\x6B\xBB\x36\xE9\x69\xAD\x92"
+			  "\x82\xA1\x10\x6A\xEB\x0F\xDC\x7D"
+			  "\x08\xBA\xF3\x91\xCA\xAA\x61\xDA"
+			  "\x62\xF4\x14\x61\x5C\x9D\xB5\xA7"
+			  "\xEE\xD7\xB9\x7E\x87\x99\x9B\x7D",
+		.ilen	= 56,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.result	= "\x45\x00\x00\x49\x33\x3E\x00\x00"
+			  "\x7F\x11\x91\x82\xC3\xFB\x1D\x10"
+			  "\xC2\xB1\xD3\x26\xC0\x28\x31\xCE"
+			  "\x00\x35\xCB\x45\x80\x03\x02\x5B"
+			  "\x00\x00\x01\xE0\x00\x1E\x8C\x18"
+			  "\xD6\x57\x59\xD5\x22\x84\xA0\x35"
+			  "\x2C\x71\x47\x5C\x88\x80\x39\x1C"
+			  "\x76\x4D\x6E\x5E\xE0\x49\x6B\x32"
+			  "\x5A\xE2\x70\xC0\x38\x99\x49\x39"
+			  "\x15\x01\x01\x01",
+		.rlen	= 76,
+		.assoc	= "\x42\xF6\x7E\x3F\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.input	= "\x6A\x6B\x45\x5E\xD6\x9A\x52\xF6"
+			  "\xEF\x70\x1A\x9C\xE8\xD3\x19\x86"
+			  "\xC8\x02\xF0\xB0\x03\x09\xD9\x02"
+			  "\xA0\xD2\x59\x04\xD1\x85\x2A\x24"
+			  "\x1C\x67\x3E\xD8\x68\x72\x06\x94"
+			  "\x97\xBA\x4F\x76\x8D\xB0\x44\x5B"
+			  "\x69\xBF\xD5\xE2\x3D\xF1\x0B\x0C"
+			  "\xC0\xBF\xB1\x8F\x70\x09\x9E\xCE"
+			  "\xA5\xF2\x55\x58\x84\xFA\xF9\xB5"
+			  "\x23\xF4\x84\x40\x74\x14\x8A\x6B"
+			  "\xDB\xD7\x67\xED\xA4\x93\xF3\x47"
+			  "\xCC\xF7\x46\x6F",
+		.ilen	= 92,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\x73\x61\x6C",
+		.klen	= 35,
+		.iv	= "\x61\x6E\x64\x01\x69\x76\x65\x63",
+		.result	= "\x63\x69\x73\x63\x6F\x01\x72\x75"
+			  "\x6C\x65\x73\x01\x74\x68\x65\x01"
+			  "\x6E\x65\x74\x77\x65\x01\x64\x65"
+			  "\x66\x69\x6E\x65\x01\x74\x68\x65"
+			  "\x74\x65\x63\x68\x6E\x6F\x6C\x6F"
+			  "\x67\x69\x65\x73\x01\x74\x68\x61"
+			  "\x74\x77\x69\x6C\x6C\x01\x64\x65"
+			  "\x66\x69\x6E\x65\x74\x6F\x6D\x6F"
+			  "\x72\x72\x6F\x77\x01\x02\x02\x01",
+		.rlen	= 72,
+		.assoc	= "\x17\x40\x5E\x67\x15\x6F\x31\x26"
+			  "\xDD\x0D\xB9\x9B\x61\x6E\x64\x01"
+			  "\x69\x76\x65\x63",
+		.alen	= 20,
+		.input	= "\xEA\x15\xC4\x98\xAC\x15\x22\x37"
+			  "\x00\x07\x1D\xBE\x60\x5D\x73\x16"
+			  "\x4D\x0F\xCC\xCE\x8A\xD0\x49\xD4"
+			  "\x39\xA3\xD1\xB1\x21\x0A\x92\x1A"
+			  "\x2C\xCF\x8F\x9D\xC9\x91\x0D\xB4"
+			  "\x15\xFC\xBC\xA5\xC5\xBF\x54\xE5"
+			  "\x1C\xC7\x32\x41\x07\x7B\x2C\xB6"
+			  "\x5C\x23\x7C\x93\xEA\xEF\x23\x1C"
+			  "\x73\xF4\xE7\x12\x84\x4C\x37\x0A"
+			  "\x4A\x8F\x06\x37\x48\xF9\xF9\x05"
+			  "\x55\x13\x40\xC3\xD5\x55\x3A\x3D",
+		.ilen	= 88,
+	}, {
+		.key	= "\x7D\x77\x3D\x00\xC1\x44\xC5\x25"
+			  "\xAC\x61\x9D\x18\xC8\x4A\x3F\x47"
+			  "\xD9\x66\x42",
+		.klen	= 19,
+		.iv	= "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+		.result	= "\x01\x02\x02\x01",
+		.rlen	= 4,
+		.assoc	= "\x33\x54\x67\xAE\xFF\xFF\xFF\xFF"
+			  "\x43\x45\x7E\x91\x82\x44\x3B\xC6",
+		.alen	= 16,
+		.input	= "\x4C\x72\x63\x30\x2F\xE6\x56\xDD"
+			  "\xD0\xD8\x60\x9D\x8B\xEF\x85\x90"
+			  "\xF7\x61\x24\x62",
+		.ilen	= 20,
+	}, {
+		.key	= "\xAB\xBC\xCD\xDE\xF0\x01\x12\x23"
+			  "\x34\x45\x56\x67\x78\x89\x9A\xAB"
+			  "\xDE\xCA\xF8",
+		.klen	= 19,
+		.iv	= "\xCA\xFE\xDE\xBA\xCE\xFA\xCE\x74",
+		.result	= "\x74\x6F\x01\x62\x65\x01\x6F\x72"
+			  "\x01\x6E\x6F\x74\x01\x74\x6F\x01"
+			  "\x62\x65\x00\x01",
+		.rlen	= 20,
+		.assoc	= "\x00\x00\x01\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x01\xCA\xFE\xDE\xBA"
+			  "\xCE\xFA\xCE\x74",
+		.alen	= 20,
+		.input	= "\xA3\xBF\x52\x52\x65\x83\xBA\x81"
+			  "\x03\x9B\x84\xFC\x44\x8C\xBB\x81"
+			  "\x36\xE1\x78\xBB\xA5\x49\x3A\xD0"
+			  "\xF0\x6B\x21\xAF\x98\xC0\x34\xDC"
+			  "\x17\x17\x65\xAD",
+		.ilen	= 36,
+	}, {
+		.key	= "\x6C\x65\x67\x61\x6C\x69\x7A\x65"
+			  "\x6D\x61\x72\x69\x6A\x75\x61\x6E"
+			  "\x61\x61\x6E\x64\x64\x6F\x69\x74"
+			  "\x62\x65\x66\x6F\x72\x65\x69\x61"
+			  "\x74\x75\x72",
+		.klen	= 35,
+		.iv	= "\x33\x30\x21\x69\x67\x65\x74\x6D",
+		.result	= "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+			  "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
+			  "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
+			  "\x02\x00\x07\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x01\x02\x02\x01",
+		.rlen	= 52,
+		.assoc	= "\x79\x6B\x69\x63\xFF\xFF\xFF\xFF"
+			  "\xFF\xFF\xFF\xFF\x33\x30\x21\x69"
+			  "\x67\x65\x74\x6D",
+		.alen	= 20,
+		.input	= "\x96\xFD\x86\xF8\xD1\x98\xFF\x10"
+			  "\xAB\x8C\xDA\x8A\x5A\x08\x38\x1A"
+			  "\x48\x59\x80\x18\x1A\x18\x1A\x04"
+			  "\xC9\x0D\xE3\xE7\x0E\xA4\x0B\x75"
+			  "\x92\x9C\x52\x5C\x0B\xFB\xF8\xAF"
+			  "\x16\xC3\x35\xA8\xE7\xCE\x84\x04"
+			  "\xEB\x40\x6B\x7A\x8E\x75\xBB\x42"
+			  "\xE0\x63\x4B\x21\x44\xA2\x2B\x2B"
+			  "\x39\xDB\xC8\xDC",
+		.ilen	= 68,
+	}, {
+		.key	= "\x3D\xE0\x98\x74\xB3\x88\xE6\x49"
+			  "\x19\x88\xD0\xC3\x60\x7E\xAE\x1F"
+			  "\x57\x69\x0E",
+		.klen	= 19,
+		.iv	= "\x4E\x28\x00\x00\xA2\xFC\xA1\xA3",
+		.result	= "\x45\x00\x00\x30\xDA\x3A\x00\x00"
+			  "\x80\x01\xDF\x3B\xC0\xA8\x00\x05"
+			  "\xC0\xA8\x00\x01\x08\x00\xC6\xCD"
+			  "\x02\x00\x07\x00\x61\x62\x63\x64"
+			  "\x65\x66\x67\x68\x69\x6A\x6B\x6C"
+			  "\x6D\x6E\x6F\x70\x71\x72\x73\x74"
+			  "\x01\x02\x02\x01",
+		.rlen	= 52,
+		.assoc	= "\x3F\x7E\xF6\x42\x10\x10\x10\x10"
+			  "\x10\x10\x10\x10\x4E\x28\x00\x00"
+			  "\xA2\xFC\xA1\xA3",
+		.alen	= 20,
+		.input	= "\x6A\x6B\x45\x27\x3F\x9E\x52\xF6"
+			  "\x10\x60\x54\x25\xEB\x80\x04\x93"
+			  "\xCA\x1B\x23\x97\xCB\x21\x2E\x01"
+			  "\xA2\xE7\x95\x41\x30\xE4\x4B\x1B"
+			  "\x79\x01\x58\x50\x01\x06\xE1\xE0"
+			  "\x2C\x83\x79\xD3\xDE\x46\x97\x1A"
+			  "\x44\xCC\x90\xBF\x00\x94\x94\x92"
+			  "\x20\x17\x0C\x1B\x55\xDE\x7E\x68"
+			  "\xF4\x95\x5D\x4F",
+		.ilen	= 68,
+	}, {
+		.key	= "\x4C\x80\xCD\xEF\xBB\x5D\x10\xDA"
+			  "\x90\x6A\xC7\x3C\x36\x13\xA6\x34"
+			  "\x22\x43\x3C",
+		.klen	= 19,
+		.iv	= "\x48\x55\xEC\x7D\x3A\x23\x4B\xFD",
+		.result	= "\x08\x00\xC6\xCD\x02\x00\x07\x00"
+			  "\x61\x62\x63\x64\x65\x66\x67\x68"
+			  "\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70"
+			  "\x71\x72\x73\x74\x01\x02\x02\x01",
+		.rlen	= 32,
+		.assoc	= "\x00\x00\x43\x21\x87\x65\x43\x21"
+			  "\x00\x00\x00\x07\x48\x55\xEC\x7D"
+			  "\x3A\x23\x4B\xFD",
+		.alen	= 20,
+		.input	= "\x67\xE9\x28\xB3\x1C\xA4\x6D\x02"
+			  "\xF0\xB5\x37\xB6\x6B\x2F\xF5\x4F"
+			  "\xF8\xA3\x4C\x53\xB8\x12\x09\xBF"
+			  "\x58\x7D\xCF\x29\xA3\x41\x68\x6B"
+			  "\xCE\xE8\x79\x85\x3C\xB0\x3A\x8F"
+			  "\x16\xB0\xA1\x26\xC9\xBC\xBC\xA6",
+		.ilen	= 48,
+	}
+};
+
+/*
  * ChaCha20-Poly1305 AEAD test vectors from RFC7539 2.8.2./A.5.
  */
 #define RFC7539_ENC_TEST_VECTORS 2
@@ -22343,8 +23647,9 @@
 		.klen	= 36,
 		.iv	= "\x01\x02\x03\x04\x05\x06\x07\x08",
 		.assoc	= "\xf3\x33\x88\x86\x00\x00\x00\x00"
-			  "\x00\x00\x4e\x91",
-		.alen	= 12,
+			  "\x00\x00\x4e\x91\x01\x02\x03\x04"
+			  "\x05\x06\x07\x08",
+		.alen	= 20,
 		.input	= "\x49\x6e\x74\x65\x72\x6e\x65\x74"
 			  "\x2d\x44\x72\x61\x66\x74\x73\x20"
 			  "\x61\x72\x65\x20\x64\x72\x61\x66"
@@ -22430,8 +23735,9 @@
 		.klen	= 36,
 		.iv	= "\x01\x02\x03\x04\x05\x06\x07\x08",
 		.assoc	= "\xf3\x33\x88\x86\x00\x00\x00\x00"
-			  "\x00\x00\x4e\x91",
-		.alen	= 12,
+			  "\x00\x00\x4e\x91\x01\x02\x03\x04"
+			  "\x05\x06\x07\x08",
+		.alen	= 20,
 		.input	= "\x64\xa0\x86\x15\x75\x86\x1a\xf4"
 			  "\x60\xf0\x62\xc7\x9b\xe6\x43\xbd"
 			  "\x5e\x80\x5c\xfd\x34\x5c\xf3\x89"
@@ -30174,7 +31480,7 @@
 	},
 };
 
-#define CHACHA20_ENC_TEST_VECTORS 3
+#define CHACHA20_ENC_TEST_VECTORS 4
 static struct cipher_testvec chacha20_enc_tv_template[] = {
 	{ /* RFC7539 A.2. Test Vector #1 */
 		.key	= "\x00\x00\x00\x00\x00\x00\x00\x00"
@@ -30348,6 +31654,338 @@
 			  "\x87\xb5\x8d\xfd\x72\x8a\xfa\x36"
 			  "\x75\x7a\x79\x7a\xc1\x88\xd1",
 		.rlen	= 127,
+	}, { /* Self-made test vector for long data */
+		.key	= "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
+			  "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
+			  "\x47\x39\x17\xc1\x40\x2b\x80\x09"
+			  "\x9d\xca\x5c\xbc\x20\x70\x75\xc0",
+		.klen	= 32,
+		.iv     = "\x1c\x00\x00\x00\x00\x00\x00\x00"
+			  "\x00\x00\x00\x00\x00\x00\x00\x01",
+		.input	= "\x49\xee\xe0\xdc\x24\x90\x40\xcd"
+			  "\xc5\x40\x8f\x47\x05\xbc\xdd\x81"
+			  "\x47\xc6\x8d\xe6\xb1\x8f\xd7\xcb"
+			  "\x09\x0e\x6e\x22\x48\x1f\xbf\xb8"
+			  "\x5c\xf7\x1e\x8a\xc1\x23\xf2\xd4"
+			  "\x19\x4b\x01\x0f\x4e\xa4\x43\xce"
+			  "\x01\xc6\x67\xda\x03\x91\x18\x90"
+			  "\xa5\xa4\x8e\x45\x03\xb3\x2d\xac"
+			  "\x74\x92\xd3\x53\x47\xc8\xdd\x25"
+			  "\x53\x6c\x02\x03\x87\x0d\x11\x0c"
+			  "\x58\xe3\x12\x18\xfd\x2a\x5b\x40"
+			  "\x0c\x30\xf0\xb8\x3f\x43\xce\xae"
+			  "\x65\x3a\x7d\x7c\xf4\x54\xaa\xcc"
+			  "\x33\x97\xc3\x77\xba\xc5\x70\xde"
+			  "\xd7\xd5\x13\xa5\x65\xc4\x5f\x0f"
+			  "\x46\x1a\x0d\x97\xb5\xf3\xbb\x3c"
+			  "\x84\x0f\x2b\xc5\xaa\xea\xf2\x6c"
+			  "\xc9\xb5\x0c\xee\x15\xf3\x7d\xbe"
+			  "\x9f\x7b\x5a\xa6\xae\x4f\x83\xb6"
+			  "\x79\x49\x41\xf4\x58\x18\xcb\x86"
+			  "\x7f\x30\x0e\xf8\x7d\x44\x36\xea"
+			  "\x75\xeb\x88\x84\x40\x3c\xad\x4f"
+			  "\x6f\x31\x6b\xaa\x5d\xe5\xa5\xc5"
+			  "\x21\x66\xe9\xa7\xe3\xb2\x15\x88"
+			  "\x78\xf6\x79\xa1\x59\x47\x12\x4e"
+			  "\x9f\x9f\x64\x1a\xa0\x22\x5b\x08"
+			  "\xbe\x7c\x36\xc2\x2b\x66\x33\x1b"
+			  "\xdd\x60\x71\xf7\x47\x8c\x61\xc3"
+			  "\xda\x8a\x78\x1e\x16\xfa\x1e\x86"
+			  "\x81\xa6\x17\x2a\xa7\xb5\xc2\xe7"
+			  "\xa4\xc7\x42\xf1\xcf\x6a\xca\xb4"
+			  "\x45\xcf\xf3\x93\xf0\xe7\xea\xf6"
+			  "\xf4\xe6\x33\x43\x84\x93\xa5\x67"
+			  "\x9b\x16\x58\x58\x80\x0f\x2b\x5c"
+			  "\x24\x74\x75\x7f\x95\x81\xb7\x30"
+			  "\x7a\x33\xa7\xf7\x94\x87\x32\x27"
+			  "\x10\x5d\x14\x4c\x43\x29\xdd\x26"
+			  "\xbd\x3e\x3c\x0e\xfe\x0e\xa5\x10"
+			  "\xea\x6b\x64\xfd\x73\xc6\xed\xec"
+			  "\xa8\xc9\xbf\xb3\xba\x0b\x4d\x07"
+			  "\x70\xfc\x16\xfd\x79\x1e\xd7\xc5"
+			  "\x49\x4e\x1c\x8b\x8d\x79\x1b\xb1"
+			  "\xec\xca\x60\x09\x4c\x6a\xd5\x09"
+			  "\x49\x46\x00\x88\x22\x8d\xce\xea"
+			  "\xb1\x17\x11\xde\x42\xd2\x23\xc1"
+			  "\x72\x11\xf5\x50\x73\x04\x40\x47"
+			  "\xf9\x5d\xe7\xa7\x26\xb1\x7e\xb0"
+			  "\x3f\x58\xc1\x52\xab\x12\x67\x9d"
+			  "\x3f\x43\x4b\x68\xd4\x9c\x68\x38"
+			  "\x07\x8a\x2d\x3e\xf3\xaf\x6a\x4b"
+			  "\xf9\xe5\x31\x69\x22\xf9\xa6\x69"
+			  "\xc6\x9c\x96\x9a\x12\x35\x95\x1d"
+			  "\x95\xd5\xdd\xbe\xbf\x93\x53\x24"
+			  "\xfd\xeb\xc2\x0a\x64\xb0\x77\x00"
+			  "\x6f\x88\xc4\x37\x18\x69\x7c\xd7"
+			  "\x41\x92\x55\x4c\x03\xa1\x9a\x4b"
+			  "\x15\xe5\xdf\x7f\x37\x33\x72\xc1"
+			  "\x8b\x10\x67\xa3\x01\x57\x94\x25"
+			  "\x7b\x38\x71\x7e\xdd\x1e\xcc\x73"
+			  "\x55\xd2\x8e\xeb\x07\xdd\xf1\xda"
+			  "\x58\xb1\x47\x90\xfe\x42\x21\x72"
+			  "\xa3\x54\x7a\xa0\x40\xec\x9f\xdd"
+			  "\xc6\x84\x6e\xca\xae\xe3\x68\xb4"
+			  "\x9d\xe4\x78\xff\x57\xf2\xf8\x1b"
+			  "\x03\xa1\x31\xd9\xde\x8d\xf5\x22"
+			  "\x9c\xdd\x20\xa4\x1e\x27\xb1\x76"
+			  "\x4f\x44\x55\xe2\x9b\xa1\x9c\xfe"
+			  "\x54\xf7\x27\x1b\xf4\xde\x02\xf5"
+			  "\x1b\x55\x48\x5c\xdc\x21\x4b\x9e"
+			  "\x4b\x6e\xed\x46\x23\xdc\x65\xb2"
+			  "\xcf\x79\x5f\x28\xe0\x9e\x8b\xe7"
+			  "\x4c\x9d\x8a\xff\xc1\xa6\x28\xb8"
+			  "\x65\x69\x8a\x45\x29\xef\x74\x85"
+			  "\xde\x79\xc7\x08\xae\x30\xb0\xf4"
+			  "\xa3\x1d\x51\x41\xab\xce\xcb\xf6"
+			  "\xb5\xd8\x6d\xe0\x85\xe1\x98\xb3"
+			  "\x43\xbb\x86\x83\x0a\xa0\xf5\xb7"
+			  "\x04\x0b\xfa\x71\x1f\xb0\xf6\xd9"
+			  "\x13\x00\x15\xf0\xc7\xeb\x0d\x5a"
+			  "\x9f\xd7\xb9\x6c\x65\x14\x22\x45"
+			  "\x6e\x45\x32\x3e\x7e\x60\x1a\x12"
+			  "\x97\x82\x14\xfb\xaa\x04\x22\xfa"
+			  "\xa0\xe5\x7e\x8c\x78\x02\x48\x5d"
+			  "\x78\x33\x5a\x7c\xad\xdb\x29\xce"
+			  "\xbb\x8b\x61\xa4\xb7\x42\xe2\xac"
+			  "\x8b\x1a\xd9\x2f\x0b\x8b\x62\x21"
+			  "\x83\x35\x7e\xad\x73\xc2\xb5\x6c"
+			  "\x10\x26\x38\x07\xe5\xc7\x36\x80"
+			  "\xe2\x23\x12\x61\xf5\x48\x4b\x2b"
+			  "\xc5\xdf\x15\xd9\x87\x01\xaa\xac"
+			  "\x1e\x7c\xad\x73\x78\x18\x63\xe0"
+			  "\x8b\x9f\x81\xd8\x12\x6a\x28\x10"
+			  "\xbe\x04\x68\x8a\x09\x7c\x1b\x1c"
+			  "\x83\x66\x80\x47\x80\xe8\xfd\x35"
+			  "\x1c\x97\x6f\xae\x49\x10\x66\xcc"
+			  "\xc6\xd8\xcc\x3a\x84\x91\x20\x77"
+			  "\x72\xe4\x24\xd2\x37\x9f\xc5\xc9"
+			  "\x25\x94\x10\x5f\x40\x00\x64\x99"
+			  "\xdc\xae\xd7\x21\x09\x78\x50\x15"
+			  "\xac\x5f\xc6\x2c\xa2\x0b\xa9\x39"
+			  "\x87\x6e\x6d\xab\xde\x08\x51\x16"
+			  "\xc7\x13\xe9\xea\xed\x06\x8e\x2c"
+			  "\xf8\x37\x8c\xf0\xa6\x96\x8d\x43"
+			  "\xb6\x98\x37\xb2\x43\xed\xde\xdf"
+			  "\x89\x1a\xe7\xeb\x9d\xa1\x7b\x0b"
+			  "\x77\xb0\xe2\x75\xc0\xf1\x98\xd9"
+			  "\x80\x55\xc9\x34\x91\xd1\x59\xe8"
+			  "\x4b\x0f\xc1\xa9\x4b\x7a\x84\x06"
+			  "\x20\xa8\x5d\xfa\xd1\xde\x70\x56"
+			  "\x2f\x9e\x91\x9c\x20\xb3\x24\xd8"
+			  "\x84\x3d\xe1\x8c\x7e\x62\x52\xe5"
+			  "\x44\x4b\x9f\xc2\x93\x03\xea\x2b"
+			  "\x59\xc5\xfa\x3f\x91\x2b\xbb\x23"
+			  "\xf5\xb2\x7b\xf5\x38\xaf\xb3\xee"
+			  "\x63\xdc\x7b\xd1\xff\xaa\x8b\xab"
+			  "\x82\x6b\x37\x04\xeb\x74\xbe\x79"
+			  "\xb9\x83\x90\xef\x20\x59\x46\xff"
+			  "\xe9\x97\x3e\x2f\xee\xb6\x64\x18"
+			  "\x38\x4c\x7a\x4a\xf9\x61\xe8\x9a"
+			  "\xa1\xb5\x01\xa6\x47\xd3\x11\xd4"
+			  "\xce\xd3\x91\x49\x88\xc7\xb8\x4d"
+			  "\xb1\xb9\x07\x6d\x16\x72\xae\x46"
+			  "\x5e\x03\xa1\x4b\xb6\x02\x30\xa8"
+			  "\x3d\xa9\x07\x2a\x7c\x19\xe7\x62"
+			  "\x87\xe3\x82\x2f\x6f\xe1\x09\xd9"
+			  "\x94\x97\xea\xdd\x58\x9e\xae\x76"
+			  "\x7e\x35\xe5\xb4\xda\x7e\xf4\xde"
+			  "\xf7\x32\x87\xcd\x93\xbf\x11\x56"
+			  "\x11\xbe\x08\x74\xe1\x69\xad\xe2"
+			  "\xd7\xf8\x86\x75\x8a\x3c\xa4\xbe"
+			  "\x70\xa7\x1b\xfc\x0b\x44\x2a\x76"
+			  "\x35\xea\x5d\x85\x81\xaf\x85\xeb"
+			  "\xa0\x1c\x61\xc2\xf7\x4f\xa5\xdc"
+			  "\x02\x7f\xf6\x95\x40\x6e\x8a\x9a"
+			  "\xf3\x5d\x25\x6e\x14\x3a\x22\xc9"
+			  "\x37\x1c\xeb\x46\x54\x3f\xa5\x91"
+			  "\xc2\xb5\x8c\xfe\x53\x08\x97\x32"
+			  "\x1b\xb2\x30\x27\xfe\x25\x5d\xdc"
+			  "\x08\x87\xd0\xe5\x94\x1a\xd4\xf1"
+			  "\xfe\xd6\xb4\xa3\xe6\x74\x81\x3c"
+			  "\x1b\xb7\x31\xa7\x22\xfd\xd4\xdd"
+			  "\x20\x4e\x7c\x51\xb0\x60\x73\xb8"
+			  "\x9c\xac\x91\x90\x7e\x01\xb0\xe1"
+			  "\x8a\x2f\x75\x1c\x53\x2a\x98\x2a"
+			  "\x06\x52\x95\x52\xb2\xe9\x25\x2e"
+			  "\x4c\xe2\x5a\x00\xb2\x13\x81\x03"
+			  "\x77\x66\x0d\xa5\x99\xda\x4e\x8c"
+			  "\xac\xf3\x13\x53\x27\x45\xaf\x64"
+			  "\x46\xdc\xea\x23\xda\x97\xd1\xab"
+			  "\x7d\x6c\x30\x96\x1f\xbc\x06\x34"
+			  "\x18\x0b\x5e\x21\x35\x11\x8d\x4c"
+			  "\xe0\x2d\xe9\x50\x16\x74\x81\xa8"
+			  "\xb4\x34\xb9\x72\x42\xa6\xcc\xbc"
+			  "\xca\x34\x83\x27\x10\x5b\x68\x45"
+			  "\x8f\x52\x22\x0c\x55\x3d\x29\x7c"
+			  "\xe3\xc0\x66\x05\x42\x91\x5f\x58"
+			  "\xfe\x4a\x62\xd9\x8c\xa9\x04\x19"
+			  "\x04\xa9\x08\x4b\x57\xfc\x67\x53"
+			  "\x08\x7c\xbc\x66\x8a\xb0\xb6\x9f"
+			  "\x92\xd6\x41\x7c\x5b\x2a\x00\x79"
+			  "\x72",
+		.ilen	= 1281,
+		.result	= "\x45\xe8\xe0\xb6\x9c\xca\xfd\x87"
+			  "\xe8\x1d\x37\x96\x8a\xe3\x40\x35"
+			  "\xcf\x5e\x3a\x46\x3d\xfb\xd0\x69"
+			  "\xde\xaf\x7a\xd5\x0d\xe9\x52\xec"
+			  "\xc2\x82\xe5\x3e\x7d\xb2\x4a\xd9"
+			  "\xbb\xc3\x9f\xc0\x5d\xac\x93\x8d"
+			  "\x0e\x6f\xd3\xd7\xfb\x6a\x0d\xce"
+			  "\x92\x2c\xf7\xbb\x93\x57\xcc\xee"
+			  "\x42\x72\x6f\xc8\x4b\xd2\x76\xbf"
+			  "\xa0\xe3\x7a\x39\xf9\x5c\x8e\xfd"
+			  "\xa1\x1d\x41\xe5\x08\xc1\x1c\x11"
+			  "\x92\xfd\x39\x5c\x51\xd0\x2f\x66"
+			  "\x33\x4a\x71\x15\xfe\xee\x12\x54"
+			  "\x8c\x8f\x34\xd8\x50\x3c\x18\xa6"
+			  "\xc5\xe1\x46\x8a\xfb\x5f\x7e\x25"
+			  "\x9b\xe2\xc3\x66\x41\x2b\xb3\xa5"
+			  "\x57\x0e\x94\x17\x26\x39\xbb\x54"
+			  "\xae\x2e\x6f\x42\xfb\x4d\x89\x6f"
+			  "\x9d\xf1\x16\x2e\xe3\xe7\xfc\xe3"
+			  "\xb2\x4b\x2b\xa6\x7c\x04\x69\x3a"
+			  "\x70\x5a\xa7\xf1\x31\x64\x19\xca"
+			  "\x45\x79\xd8\x58\x23\x61\xaf\xc2"
+			  "\x52\x05\xc3\x0b\xc1\x64\x7c\x81"
+			  "\xd9\x11\xcf\xff\x02\x3d\x51\x84"
+			  "\x01\xac\xc6\x2e\x34\x2b\x09\x3a"
+			  "\xa8\x5d\x98\x0e\x89\xd9\xef\x8f"
+			  "\xd9\xd7\x7d\xdd\x63\x47\x46\x7d"
+			  "\xa1\xda\x0b\x53\x7d\x79\xcd\xc9"
+			  "\x86\xdd\x6b\x13\xa1\x9a\x70\xdd"
+			  "\x5c\xa1\x69\x3c\xe4\x5d\xe3\x8c"
+			  "\xe5\xf4\x87\x9c\x10\xcf\x0f\x0b"
+			  "\xc8\x43\xdc\xf8\x1d\x62\x5e\x5b"
+			  "\xe2\x03\x06\xc5\x71\xb6\x48\xa5"
+			  "\xf0\x0f\x2d\xd5\xa2\x73\x55\x8f"
+			  "\x01\xa7\x59\x80\x5f\x11\x6c\x40"
+			  "\xff\xb1\xf2\xc6\x7e\x01\xbb\x1c"
+			  "\x69\x9c\xc9\x3f\x71\x5f\x07\x7e"
+			  "\xdf\x6f\x99\xca\x9c\xfd\xf9\xb9"
+			  "\x49\xe7\xcc\x91\xd5\x9b\x8f\x03"
+			  "\xae\xe7\x61\x32\xef\x41\x6c\x75"
+			  "\x84\x9b\x8c\xce\x1d\x6b\x93\x21"
+			  "\x41\xec\xc6\xad\x8e\x0c\x48\xa8"
+			  "\xe2\xf5\x57\xde\xf7\x38\xfd\x4a"
+			  "\x6f\xa7\x4a\xf9\xac\x7d\xb1\x85"
+			  "\x7d\x6c\x95\x0a\x5a\xcf\x68\xd2"
+			  "\xe0\x7a\x26\xd9\xc1\x6d\x3e\xc6"
+			  "\x37\xbd\xbe\x24\x36\x77\x9f\x1b"
+			  "\xc1\x22\xf3\x79\xae\x95\x78\x66"
+			  "\x97\x11\xc0\x1a\xf1\xe8\x0d\x38"
+			  "\x09\xc2\xee\xb7\xd3\x46\x7b\x59"
+			  "\x77\x23\xe8\xb4\x92\x3d\x78\xbe"
+			  "\xe2\x25\x63\xa5\x2a\x06\x70\x92"
+			  "\x32\x63\xf9\x19\x21\x68\xe1\x0b"
+			  "\x9a\xd0\xee\x21\xdb\x1f\xe0\xde"
+			  "\x3e\x64\x02\x4d\x0e\xe0\x0a\xa9"
+			  "\xed\x19\x8c\xa8\xbf\xe3\x2e\x75"
+			  "\x24\x2b\xb0\xe5\x82\x6a\x1e\x6f"
+			  "\x71\x2a\x3a\x60\xed\x06\x0d\x17"
+			  "\xa2\xdb\x29\x1d\xae\xb2\xc4\xfb"
+			  "\x94\x04\xd8\x58\xfc\xc4\x04\x4e"
+			  "\xee\xc7\xc1\x0f\xe9\x9b\x63\x2d"
+			  "\x02\x3e\x02\x67\xe5\xd8\xbb\x79"
+			  "\xdf\xd2\xeb\x50\xe9\x0a\x02\x46"
+			  "\xdf\x68\xcf\xe7\x2b\x0a\x56\xd6"
+			  "\xf7\xbc\x44\xad\xb8\xb5\x5f\xeb"
+			  "\xbc\x74\x6b\xe8\x7e\xb0\x60\xc6"
+			  "\x0d\x96\x09\xbb\x19\xba\xe0\x3c"
+			  "\xc4\x6c\xbf\x0f\x58\xc0\x55\x62"
+			  "\x23\xa0\xff\xb5\x1c\xfd\x18\xe1"
+			  "\xcf\x6d\xd3\x52\xb4\xce\xa6\xfa"
+			  "\xaa\xfb\x1b\x0b\x42\x6d\x79\x42"
+			  "\x48\x70\x5b\x0e\xdd\x3a\xc9\x69"
+			  "\x8b\x73\x67\xf6\x95\xdb\x8c\xfb"
+			  "\xfd\xb5\x08\x47\x42\x84\x9a\xfa"
+			  "\xcc\x67\xb2\x3c\xb6\xfd\xd8\x32"
+			  "\xd6\x04\xb6\x4a\xea\x53\x4b\xf5"
+			  "\x94\x16\xad\xf0\x10\x2e\x2d\xb4"
+			  "\x8b\xab\xe5\x89\xc7\x39\x12\xf3"
+			  "\x8d\xb5\x96\x0b\x87\x5d\xa7\x7c"
+			  "\xb0\xc2\xf6\x2e\x57\x97\x2c\xdc"
+			  "\x54\x1c\x34\x72\xde\x0c\x68\x39"
+			  "\x9d\x32\xa5\x75\x92\x13\x32\xea"
+			  "\x90\x27\xbd\x5b\x1d\xb9\x21\x02"
+			  "\x1c\xcc\xba\x97\x5e\x49\x58\xe8"
+			  "\xac\x8b\xf3\xce\x3c\xf0\x00\xe9"
+			  "\x6c\xae\xe9\x77\xdf\xf4\x02\xcd"
+			  "\x55\x25\x89\x9e\x90\xf3\x6b\x8f"
+			  "\xb7\xd6\x47\x98\x26\x2f\x31\x2f"
+			  "\x8d\xbf\x54\xcd\x99\xeb\x80\xd7"
+			  "\xac\xc3\x08\xc2\xa6\x32\xf1\x24"
+			  "\x76\x7c\x4f\x78\x53\x55\xfb\x00"
+			  "\x8a\xd6\x52\x53\x25\x45\xfb\x0a"
+			  "\x6b\xb9\xbe\x3c\x5e\x11\xcc\x6a"
+			  "\xdd\xfc\xa7\xc4\x79\x4d\xbd\xfb"
+			  "\xce\x3a\xf1\x7a\xda\xeb\xfe\x64"
+			  "\x28\x3d\x0f\xee\x80\xba\x0c\xf8"
+			  "\xe9\x5b\x3a\xd4\xae\xc9\xf3\x0e"
+			  "\xe8\x5d\xc5\x5c\x0b\x20\x20\xee"
+			  "\x40\x0d\xde\x07\xa7\x14\xb4\x90"
+			  "\xb6\xbd\x3b\xae\x7d\x2b\xa7\xc7"
+			  "\xdc\x0b\x4c\x5d\x65\xb0\xd2\xc5"
+			  "\x79\x61\x23\xe0\xa2\x99\x73\x55"
+			  "\xad\xc6\xfb\xc7\x54\xb5\x98\x1f"
+			  "\x8c\x86\xc2\x3f\xbe\x5e\xea\x64"
+			  "\xa3\x60\x18\x9f\x80\xaf\x52\x74"
+			  "\x1a\xfe\x22\xc2\x92\x67\x40\x02"
+			  "\x08\xee\x67\x5b\x67\xe0\x3d\xde"
+			  "\x7a\xaf\x8e\x28\xf3\x5e\x0e\xf4"
+			  "\x48\x56\xaa\x85\x22\xd8\x36\xed"
+			  "\x3b\x3d\x68\x69\x30\xbc\x71\x23"
+			  "\xb1\x6e\x61\x03\x89\x44\x03\xf4"
+			  "\x32\xaa\x4c\x40\x9f\x69\xfb\x70"
+			  "\x91\xcc\x1f\x11\xbd\x76\x67\xe6"
+			  "\x10\x8b\x29\x39\x68\xea\x4e\x6d"
+			  "\xae\xfb\x40\xcf\xe2\xd0\x0d\x8d"
+			  "\x6f\xed\x9b\x8d\x64\x7a\x94\x8e"
+			  "\x32\x38\x78\xeb\x7d\x5f\xf9\x4d"
+			  "\x13\xbe\x21\xea\x16\xe7\x5c\xee"
+			  "\xcd\xf6\x5f\xc6\x45\xb2\x8f\x2b"
+			  "\xb5\x93\x3e\x45\xdb\xfd\xa2\x6a"
+			  "\xec\x83\x92\x99\x87\x47\xe0\x7c"
+			  "\xa2\x7b\xc4\x2a\xcd\xc0\x81\x03"
+			  "\x98\xb0\x87\xb6\x86\x13\x64\x33"
+			  "\x4c\xd7\x99\xbf\xdb\x7b\x6e\xaa"
+			  "\x76\xcc\xa0\x74\x1b\xa3\x6e\x83"
+			  "\xd4\xba\x7a\x84\x9d\x91\x71\xcd"
+			  "\x60\x2d\x56\xfd\x26\x35\xcb\xeb"
+			  "\xac\xe9\xee\xa4\xfc\x18\x5b\x91"
+			  "\xd5\xfe\x84\x45\xe0\xc7\xfd\x11"
+			  "\xe9\x00\xb6\x54\xdf\xe1\x94\xde"
+			  "\x2b\x70\x9f\x94\x7f\x15\x0e\x83"
+			  "\x63\x10\xb3\xf5\xea\xd3\xe8\xd1"
+			  "\xa5\xfc\x17\x19\x68\x9a\xbc\x17"
+			  "\x30\x43\x0a\x1a\x33\x92\xd4\x2a"
+			  "\x2e\x68\x99\xbc\x49\xf0\x68\xe3"
+			  "\xf0\x1f\xcb\xcc\xfa\xbb\x05\x56"
+			  "\x46\x84\x8b\x69\x83\x64\xc5\xe0"
+			  "\xc5\x52\x99\x07\x3c\xa6\x5c\xaf"
+			  "\xa3\xde\xd7\xdb\x43\xe6\xb7\x76"
+			  "\x4e\x4d\xd6\x71\x60\x63\x4a\x0c"
+			  "\x5f\xae\x25\x84\x22\x90\x5f\x26"
+			  "\x61\x4d\x8f\xaf\xc9\x22\xf2\x05"
+			  "\xcf\xc1\xdc\x68\xe5\x57\x8e\x24"
+			  "\x1b\x30\x59\xca\xd7\x0d\xc3\xd3"
+			  "\x52\x9e\x09\x3e\x0e\xaf\xdb\x5f"
+			  "\xc7\x2b\xde\x3a\xfd\xad\x93\x04"
+			  "\x74\x06\x89\x0e\x90\xeb\x85\xff"
+			  "\xe6\x3c\x12\x42\xf4\xfa\x80\x75"
+			  "\x5e\x4e\xd7\x2f\x93\x0b\x34\x41"
+			  "\x02\x85\x68\xd0\x03\x12\xde\x92"
+			  "\x54\x7a\x7e\xfb\x55\xe7\x88\xfb"
+			  "\xa4\xa9\xf2\xd1\xc6\x70\x06\x37"
+			  "\x25\xee\xa7\x6e\xd9\x89\x86\x50"
+			  "\x2e\x07\xdb\xfb\x2a\x86\x45\x0e"
+			  "\x91\xf4\x7c\xbb\x12\x60\xe8\x3f"
+			  "\x71\xbe\x8f\x9d\x26\xef\xd9\x89"
+			  "\xc4\x8f\xd8\xc5\x73\xd8\x84\xaa"
+			  "\x2f\xad\x22\x1e\x7e\xcf\xa2\x08"
+			  "\x23\x45\x89\x42\xa0\x30\xeb\xbf"
+			  "\xa1\xed\xad\xd5\x76\xfa\x24\x8f"
+			  "\x98",
+		.rlen	= 1281,
 	},
 };
 
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 6e973b8..46b4a8e 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -176,6 +176,8 @@
 
 source "drivers/mcb/Kconfig"
 
+source "drivers/perf/Kconfig"
+
 source "drivers/ras/Kconfig"
 
 source "drivers/thunderbolt/Kconfig"
@@ -184,4 +186,6 @@
 
 source "drivers/nvdimm/Kconfig"
 
+source "drivers/nvmem/Kconfig"
+
 endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index b64b49f..b250b36 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -11,7 +11,7 @@
 obj-$(CONFIG_GENERIC_PHY)	+= phy/
 
 # GPIO must come after pinctrl as gpios may need to mux pins etc
-obj-y				+= pinctrl/
+obj-$(CONFIG_PINCTRL)		+= pinctrl/
 obj-y				+= gpio/
 obj-y				+= pwm/
 obj-$(CONFIG_PCI)		+= pci/
@@ -161,7 +161,9 @@
 obj-$(CONFIG_FMC)		+= fmc/
 obj-$(CONFIG_POWERCAP)		+= powercap/
 obj-$(CONFIG_MCB)		+= mcb/
+obj-$(CONFIG_PERF_EVENTS)	+= perf/
 obj-$(CONFIG_RAS)		+= ras/
 obj-$(CONFIG_THUNDERBOLT)	+= thunderbolt/
 obj-$(CONFIG_CORESIGHT)		+= hwtracing/coresight/
 obj-$(CONFIG_ANDROID)		+= android/
+obj-$(CONFIG_NVMEM)		+= nvmem/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 114cf48..54e9729 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -189,17 +189,24 @@
 	  This driver supports ACPI-controlled docking stations and removable
 	  drive bays such as the IBM Ultrabay and the Dell Module Bay.
 
+config ACPI_CPU_FREQ_PSS
+	bool
+	select THERMAL
+
+config ACPI_PROCESSOR_IDLE
+	bool
+	select CPU_IDLE
+
 config ACPI_PROCESSOR
 	tristate "Processor"
-	select THERMAL
-	select CPU_IDLE
 	depends on X86 || IA64
+	select ACPI_PROCESSOR_IDLE
+	select ACPI_CPU_FREQ_PSS
 	default y
 	help
-	  This driver installs ACPI as the idle handler for Linux and uses
-	  ACPI C2 and C3 processor states to save power on systems that
-	  support it.  It is required by several flavors of cpufreq
-	  performance-state drivers.
+	  This driver adds support for the ACPI Processor package. It is required
+	  by several flavors of cpufreq performance-state, thermal, throttling and
+	  idle drivers.
 
 	  To compile this driver as a module, choose M here:
 	  the module will be called processor.
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 8321430..b5e7cd8 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -24,7 +24,7 @@
 # Power management related files
 acpi-y				+= wakeup.o
 acpi-$(CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT) += sleep.o
-acpi-y				+= device_pm.o
+acpi-y				+= device_sysfs.o device_pm.o
 acpi-$(CONFIG_ACPI_SLEEP)	+= proc.o
 
 
@@ -80,8 +80,10 @@
 obj-$(CONFIG_ACPI_BGRT)		+= bgrt.o
 
 # processor has its own "processor." module_param namespace
-processor-y			:= processor_driver.o processor_throttling.o
-processor-y			+= processor_idle.o processor_thermal.o
+processor-y			:= processor_driver.o
+processor-$(CONFIG_ACPI_PROCESSOR_IDLE) += processor_idle.o
+processor-$(CONFIG_ACPI_CPU_FREQ_PSS)	+= processor_throttling.o	\
+	processor_thermal.o
 processor-$(CONFIG_CPU_FREQ)	+= processor_perflib.o
 
 obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 9b5354a..f71b756 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -16,10 +16,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 3984ea9..a450e7a 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -16,7 +16,6 @@
 #include <linux/clkdev.h>
 #include <linux/acpi.h>
 #include <linux/err.h>
-#include <linux/clk.h>
 #include <linux/pm.h>
 
 #include "internal.h"
diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
index ac0f52f..f77956c 100644
--- a/drivers/acpi/acpi_ipmi.c
+++ b/drivers/acpi/acpi_ipmi.c
@@ -17,10 +17,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 46b58ab..f51bd0d 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -11,7 +11,6 @@
  */
 
 #include <linux/acpi.h>
-#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
@@ -60,6 +59,7 @@
 #define LPSS_CLK_DIVIDER		BIT(2)
 #define LPSS_LTR			BIT(3)
 #define LPSS_SAVE_CTX			BIT(4)
+#define LPSS_NO_D3_DELAY		BIT(5)
 
 struct lpss_private_data;
 
@@ -156,6 +156,10 @@
 	.flags = LPSS_SAVE_CTX,
 };
 
+static const struct lpss_device_desc bsw_pwm_dev_desc = {
+	.flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
+};
+
 static const struct lpss_device_desc byt_uart_dev_desc = {
 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
 	.clk_con_id = "baudclk",
@@ -163,6 +167,14 @@
 	.setup = lpss_uart_setup,
 };
 
+static const struct lpss_device_desc bsw_uart_dev_desc = {
+	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
+			| LPSS_NO_D3_DELAY,
+	.clk_con_id = "baudclk",
+	.prv_offset = 0x800,
+	.setup = lpss_uart_setup,
+};
+
 static const struct lpss_device_desc byt_spi_dev_desc = {
 	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
 	.prv_offset = 0x400,
@@ -178,8 +190,15 @@
 	.setup = byt_i2c_setup,
 };
 
+static const struct lpss_device_desc bsw_i2c_dev_desc = {
+	.flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
+	.prv_offset = 0x800,
+	.setup = byt_i2c_setup,
+};
+
 static struct lpss_device_desc bsw_spi_dev_desc = {
-	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
+	.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX
+			| LPSS_NO_D3_DELAY,
 	.prv_offset = 0x400,
 	.setup = lpss_deassert_reset,
 };
@@ -214,11 +233,12 @@
 	{ "INT33FC", },
 
 	/* Braswell LPSS devices */
-	{ "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
-	{ "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
+	{ "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
+	{ "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
 	{ "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
-	{ "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
+	{ "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
 
+	/* Broadwell LPSS devices */
 	{ "INT3430", LPSS_ADDR(lpt_dev_desc) },
 	{ "INT3431", LPSS_ADDR(lpt_dev_desc) },
 	{ "INT3432", LPSS_ADDR(lpt_i2c_dev_desc) },
@@ -558,9 +578,14 @@
 	 * The following delay is needed or the subsequent write operations may
 	 * fail. The LPSS devices are actually PCI devices and the PCI spec
 	 * expects 10ms delay before the device can be accessed after D3 to D0
-	 * transition.
+	 * transition. However some platforms like BSW does not need this delay.
 	 */
-	msleep(10);
+	unsigned int delay = 10;	/* default 10ms delay */
+
+	if (pdata->dev_desc->flags & LPSS_NO_D3_DELAY)
+		delay = 0;
+
+	msleep(delay);
 
 	for (i = 0; i < LPSS_PRV_REG_COUNT; i++) {
 		unsigned long offset = i * sizeof(u32);
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index ee28f4d..6b0d3ef 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -16,11 +16,6 @@
  * NON INFRINGEMENT.  See the GNU General Public License for more
  * details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
  * ACPI based HotPlug driver that supports Memory Hotplug
  * This driver fields notifications from firmware for memory add
  * and remove operations and alerts the VM of the affected memory
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 00b3980..ae307ff 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -12,10 +12,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index ff6d8ad..fb76552 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -153,6 +153,7 @@
 	{"AEI0250"},		/* PROLiNK 1456VH ISA PnP K56flex Fax Modem */
 	{"AEI1240"},		/* Actiontec ISA PNP 56K X2 Fax Modem */
 	{"AKY1021"},		/* Rockwell 56K ACF II Fax+Data+Voice Modem */
+	{"ALI5123"},		/* ALi Fast Infrared Controller */
 	{"AZT4001"},		/* AZT3005 PnP SOUND DEVICE */
 	{"BDP3336"},		/* Best Data Products Inc. Smart One 336F PnP Modem */
 	{"BRI0A49"},		/* Boca Complete Ofc Communicator 14.4 Data-FAX */
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 92a5f73..985b8a8 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -485,7 +485,7 @@
 	{ }
 };
 
-static struct acpi_scan_handler __refdata processor_handler = {
+static struct acpi_scan_handler processor_handler = {
 	.ids = processor_device_ids,
 	.attach = acpi_processor_add,
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 8c2fe2f..5778e8e 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -17,10 +17,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index c1a9635..fedcc16 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -11,6 +11,7 @@
 acpi-y :=		\
 	dsargs.o	\
 	dscontrol.o	\
+	dsdebug.o	\
 	dsfield.o	\
 	dsinit.o	\
 	dsmethod.o	\
@@ -164,6 +165,7 @@
 	utmath.o	\
 	utmisc.o	\
 	utmutex.o	\
+	utnonansi.o	\
 	utobject.o	\
 	utosi.o		\
 	utownerid.o	\
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 43685dd..eb2e926 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -67,9 +67,6 @@
 };
 
 #define PARAM_LIST(pl)                  pl
-#define DBTEST_OUTPUT_LEVEL(lvl)        if (acpi_gbl_db_opt_verbose)
-#define VERBOSE_PRINT(fp)               DBTEST_OUTPUT_LEVEL(lvl) {\
-			  acpi_os_printf PARAM_LIST(fp);}
 
 #define EX_NO_SINGLE_STEP               1
 #define EX_SINGLE_STEP                  2
@@ -77,10 +74,6 @@
 /*
  * dbxface - external debugger interfaces
  */
-acpi_status acpi_db_initialize(void);
-
-void acpi_db_terminate(void);
-
 acpi_status
 acpi_db_single_step(struct acpi_walk_state *walk_state,
 		    union acpi_parse_object *op, u32 op_type);
@@ -102,6 +95,8 @@
 
 acpi_status acpi_db_sleep(char *object_arg);
 
+void acpi_db_trace(char *enable_arg, char *method_arg, char *once_arg);
+
 void acpi_db_display_locks(void);
 
 void acpi_db_display_resources(char *object_arg);
@@ -262,6 +257,23 @@
 			     char **next, acpi_object_type * return_type);
 
 /*
+ * dbobject
+ */
+void acpi_db_decode_internal_object(union acpi_operand_object *obj_desc);
+
+void
+acpi_db_display_internal_object(union acpi_operand_object *obj_desc,
+				struct acpi_walk_state *walk_state);
+
+void acpi_db_decode_arguments(struct acpi_walk_state *walk_state);
+
+void acpi_db_decode_locals(struct acpi_walk_state *walk_state);
+
+void
+acpi_db_dump_method_info(acpi_status status,
+			 struct acpi_walk_state *walk_state);
+
+/*
  * dbstats - Generation and display of ACPI table statistics
  */
 void acpi_db_generate_statistics(union acpi_parse_object *root, u8 is_method);
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 408f04b..7094dc8 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -354,4 +354,12 @@
 acpi_ds_result_push(union acpi_operand_object *object,
 		    struct acpi_walk_state *walk_state);
 
+/*
+ * dsdebug - parser debugging routines
+ */
+void
+acpi_ds_dump_method_stack(acpi_status status,
+			  struct acpi_walk_state *walk_state,
+			  union acpi_parse_object *op);
+
 #endif				/* _ACDISPAT_H_ */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 53f96a3..09f37b5 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -58,11 +58,12 @@
 
 ACPI_GLOBAL(struct acpi_table_header *, acpi_gbl_DSDT);
 ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX);
 
 #if (!ACPI_REDUCED_HARDWARE)
 ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS);
-ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs32);
-ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_facs64);
 
 #endif				/* !ACPI_REDUCED_HARDWARE */
 
@@ -235,6 +236,10 @@
 
 ACPI_GLOBAL(struct acpi_thread_state *, acpi_gbl_current_walk_list);
 
+/* Maximum number of While() loop iterations before forced abort */
+
+ACPI_GLOBAL(u16, acpi_gbl_max_loop_iterations);
+
 /* Control method single step flag */
 
 ACPI_GLOBAL(u8, acpi_gbl_cm_single_step);
@@ -290,8 +295,6 @@
 
 ACPI_GLOBAL(u32, acpi_gbl_original_dbg_level);
 ACPI_GLOBAL(u32, acpi_gbl_original_dbg_layer);
-ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_level);
-ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_layer);
 
 /*****************************************************************************
  *
@@ -309,9 +312,10 @@
 ACPI_INIT_GLOBAL(u8, acpi_gbl_ignore_noop_operator, FALSE);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_cstyle_disassembly, TRUE);
 ACPI_INIT_GLOBAL(u8, acpi_gbl_force_aml_disassembly, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_dm_opt_verbose, TRUE);
 
-ACPI_GLOBAL(u8, acpi_gbl_db_opt_disasm);
-ACPI_GLOBAL(u8, acpi_gbl_db_opt_verbose);
+ACPI_GLOBAL(u8, acpi_gbl_dm_opt_disasm);
+ACPI_GLOBAL(u8, acpi_gbl_dm_opt_listing);
 ACPI_GLOBAL(u8, acpi_gbl_num_external_methods);
 ACPI_GLOBAL(u32, acpi_gbl_resolved_external_methods);
 ACPI_GLOBAL(struct acpi_external_list *, acpi_gbl_external_list);
@@ -346,8 +350,8 @@
 /*
  * Statistic globals
  */
-ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
-ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TYPE_NS_NODE_MAX + 1]);
+ACPI_GLOBAL(u16, acpi_gbl_obj_type_count[ACPI_TOTAL_TYPES]);
+ACPI_GLOBAL(u16, acpi_gbl_node_type_count[ACPI_TOTAL_TYPES]);
 ACPI_GLOBAL(u16, acpi_gbl_obj_type_count_misc);
 ACPI_GLOBAL(u16, acpi_gbl_node_type_count_misc);
 ACPI_GLOBAL(u32, acpi_gbl_num_nodes);
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 7ac9800..e820ed8 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -131,6 +131,28 @@
 acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 			u32 level, u32 index);
 
+void
+acpi_ex_start_trace_method(struct acpi_namespace_node *method_node,
+			   union acpi_operand_object *obj_desc,
+			   struct acpi_walk_state *walk_state);
+
+void
+acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node,
+			  union acpi_operand_object *obj_desc,
+			  struct acpi_walk_state *walk_state);
+
+void
+acpi_ex_start_trace_opcode(union acpi_parse_object *op,
+			   struct acpi_walk_state *walk_state);
+
+void
+acpi_ex_stop_trace_opcode(union acpi_parse_object *op,
+			  struct acpi_walk_state *walk_state);
+
+void
+acpi_ex_trace_point(acpi_trace_event_type type,
+		    u8 begin, u8 *aml, char *pathname);
+
 /*
  * exfield - ACPI AML (p-code) execution - field manipulation
  */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index bc60096..6f70826 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -174,8 +174,12 @@
 	 */
 #ifdef ACPI_LARGE_NAMESPACE_NODE
 	union acpi_parse_object *op;
+	void *method_locals;
+	void *method_args;
 	u32 value;
 	u32 length;
+	u8 arg_count;
+
 #endif
 };
 
@@ -209,11 +213,9 @@
 #define ACPI_ROOT_ORIGIN_ALLOCATED      (1)
 #define ACPI_ROOT_ALLOW_RESIZE          (2)
 
-/* Predefined (fixed) table indexes */
+/* Predefined table indexes */
 
-#define ACPI_TABLE_INDEX_DSDT           (0)
-#define ACPI_TABLE_INDEX_FACS           (1)
-#define ACPI_TABLE_INDEX_X_FACS         (2)
+#define ACPI_INVALID_TABLE_INDEX        (0xFFFFFFFF)
 
 struct acpi_find_context {
 	char *search_for;
@@ -404,6 +406,13 @@
 
 #define ACPI_NUM_RTYPES                 5	/* Number of actual object types */
 
+/* Info for running the _REG methods */
+
+struct acpi_reg_walk_info {
+	acpi_adr_space_type space_id;
+	u32 reg_run_count;
+};
+
 /*****************************************************************************
  *
  * Event typedefs and structs
@@ -715,7 +724,7 @@
 	union acpi_parse_object *arg;	/* arguments and contained ops */
 };
 
-#ifdef ACPI_DISASSEMBLER
+#if defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUG_OUTPUT)
 #define ACPI_DISASM_ONLY_MEMBERS(a)     a;
 #else
 #define ACPI_DISASM_ONLY_MEMBERS(a)
@@ -726,7 +735,7 @@
 	u8                              descriptor_type; /* To differentiate various internal objs */\
 	u8                              flags;          /* Type of Op */\
 	u16                             aml_opcode;     /* AML opcode */\
-	u32                             aml_offset;     /* Offset of declaration in AML */\
+	u8                              *aml;           /* Address of declaration in AML */\
 	union acpi_parse_object         *next;          /* Next op */\
 	struct acpi_namespace_node      *node;          /* For use by interpreter */\
 	union acpi_parse_value          value;          /* Value or args associated with the opcode */\
@@ -1103,6 +1112,9 @@
 	 *   Index of current thread inside all them created.
 	 */
 	char init_args;
+#ifdef ACPI_DEBUGGER
+	acpi_object_type arg_types[4];
+#endif
 	char *arguments[4];
 	char num_threads_str[11];
 	char id_of_thread_str[11];
@@ -1119,6 +1131,10 @@
 #define ACPI_DB_CONSOLE_OUTPUT          0x02
 #define ACPI_DB_DUPLICATE_OUTPUT        0x03
 
+struct acpi_object_info {
+	u32 types[ACPI_TOTAL_TYPES];
+};
+
 /*****************************************************************************
  *
  * Debug
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index c240bdf..e85366c 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -220,6 +220,15 @@
 #define ACPI_MUL_32(a)                  _ACPI_MUL(a, 5)
 #define ACPI_MOD_32(a)                  _ACPI_MOD(a, 32)
 
+/* Test for ASCII character */
+
+#define ACPI_IS_ASCII(c)                ((c) < 0x80)
+
+/* Signed integers */
+
+#define ACPI_SIGN_POSITIVE              0
+#define ACPI_SIGN_NEGATIVE              1
+
 /*
  * Rounding macros (Power of two boundaries only)
  */
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 0dd0882..ea0d907 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -272,17 +272,20 @@
  */
 u32 acpi_ns_opens_scope(acpi_object_type type);
 
-acpi_status
-acpi_ns_build_external_path(struct acpi_namespace_node *node,
-			    acpi_size size, char *name_buffer);
-
 char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node);
 
+u32
+acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
+			      char *full_path, u32 path_size, u8 no_trailing);
+
+char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
+				      u8 no_trailing);
+
 char *acpi_ns_name_of_current_scope(struct acpi_walk_state *walk_state);
 
 acpi_status
 acpi_ns_handle_to_pathname(acpi_handle target_handle,
-			   struct acpi_buffer *buffer);
+			   struct acpi_buffer *buffer, u8 no_trailing);
 
 u8
 acpi_ns_pattern_match(struct acpi_namespace_node *obj_node, char *search_for);
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index c81d98d..0bd02c4 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -176,6 +176,7 @@
 	u8 param_count;
 	u8 sync_level;
 	union acpi_operand_object *mutex;
+	union acpi_operand_object *node;
 	u8 *aml_start;
 	union {
 		acpi_internal_method implementation;
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 0cdd2fc..6021ccf 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -225,11 +225,11 @@
 /*
  * psutils - parser utilities
  */
-union acpi_parse_object *acpi_ps_create_scope_op(void);
+union acpi_parse_object *acpi_ps_create_scope_op(u8 *aml);
 
 void acpi_ps_init_op(union acpi_parse_object *op, u16 opcode);
 
-union acpi_parse_object *acpi_ps_alloc_op(u16 opcode);
+union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml);
 
 void acpi_ps_free_op(union acpi_parse_object *op);
 
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 44997ca..f9992dc 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -85,7 +85,7 @@
 	u8 namespace_override;	/* Override existing objects */
 	u8 result_size;		/* Total elements for the result stack */
 	u8 result_count;	/* Current number of occupied elements of result stack */
-	u32 aml_offset;
+	u8 *aml;
 	u32 arg_types;
 	u32 method_breakpoint;	/* For single stepping */
 	u32 user_breakpoint;	/* User AML breakpoint */
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 7e0b6f1..f7731f2 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -154,14 +154,20 @@
 struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index);
 
 void
-acpi_tb_install_table_with_override(u32 table_index,
-				    struct acpi_table_desc *new_table_desc,
-				    u8 override);
+acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
+				    u8 override, u32 *table_index);
 
 acpi_status
 acpi_tb_install_fixed_table(acpi_physical_address address,
-			    char *signature, u32 table_index);
+			    char *signature, u32 *table_index);
 
 acpi_status acpi_tb_parse_root_table(acpi_physical_address rsdp_address);
 
+u8 acpi_is_valid_signature(char *signature);
+
+/*
+ * tbxfload
+ */
+acpi_status acpi_tb_load_namespace(void);
+
 #endif				/* __ACTABLES_H__ */
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 6de0d35..fb2aa50 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -167,6 +167,17 @@
 #define DB_QWORD_DISPLAY    8
 
 /*
+ * utnonansi - Non-ANSI C library functions
+ */
+void acpi_ut_strupr(char *src_string);
+
+void acpi_ut_strlwr(char *src_string);
+
+int acpi_ut_stricmp(char *string1, char *string2);
+
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
+
+/*
  * utglobal - Global data structures and procedures
  */
 acpi_status acpi_ut_init_globals(void);
@@ -205,8 +216,6 @@
 
 void acpi_ut_subsystem_shutdown(void);
 
-#define ACPI_IS_ASCII(c)  ((c) < 0x80)
-
 /*
  * utcopy - Object construction and conversion interfaces
  */
@@ -508,7 +517,7 @@
 
 u8 acpi_ut_is_pci_root_bridge(char *id);
 
-#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP)
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_NAMES_APP)
 u8 acpi_ut_is_aml_table(struct acpi_table_header *table);
 #endif
 
@@ -567,16 +576,6 @@
 /*
  * utstring - String and character utilities
  */
-void acpi_ut_strupr(char *src_string);
-
-#ifdef ACPI_ASL_COMPILER
-void acpi_ut_strlwr(char *src_string);
-
-int acpi_ut_stricmp(char *string1, char *string2);
-#endif
-
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
-
 void acpi_ut_print_string(char *string, u16 max_length);
 
 #if defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 3e69897..e2ab59e 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -86,7 +86,7 @@
 
 	/* Allocate a new parser op to be the root of the parsed tree */
 
-	op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+	op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP, aml_start);
 	if (!op) {
 		return_ACPI_STATUS(AE_NO_MEMORY);
 	}
@@ -129,7 +129,7 @@
 
 	/* Evaluate the deferred arguments */
 
-	op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
+	op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP, aml_start);
 	if (!op) {
 		return_ACPI_STATUS(AE_NO_MEMORY);
 	}
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 39da9da..435fc16 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -212,7 +212,7 @@
 			 */
 			control_state->control.loop_count++;
 			if (control_state->control.loop_count >
-			    ACPI_MAX_LOOP_ITERATIONS) {
+			    acpi_gbl_max_loop_iterations) {
 				status = AE_AML_INFINITE_LOOP;
 				break;
 			}
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
new file mode 100644
index 0000000..309556e
--- /dev/null
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -0,0 +1,231 @@
+/******************************************************************************
+ *
+ * Module Name: dsdebug - Parser/Interpreter interface - debugging
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acdispat.h"
+#include "acnamesp.h"
+#ifdef ACPI_DISASSEMBLER
+#include "acdisasm.h"
+#endif
+#include "acinterp.h"
+
+#define _COMPONENT          ACPI_DISPATCHER
+ACPI_MODULE_NAME("dsdebug")
+
+#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+/* Local prototypes */
+static void
+acpi_ds_print_node_pathname(struct acpi_namespace_node *node,
+			    const char *message);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_print_node_pathname
+ *
+ * PARAMETERS:  node            - Object
+ *              message         - Prefix message
+ *
+ * DESCRIPTION: Print an object's full namespace pathname
+ *              Manages allocation/freeing of a pathname buffer
+ *
+ ******************************************************************************/
+
+static void
+acpi_ds_print_node_pathname(struct acpi_namespace_node *node,
+			    const char *message)
+{
+	struct acpi_buffer buffer;
+	acpi_status status;
+
+	ACPI_FUNCTION_TRACE(ds_print_node_pathname);
+
+	if (!node) {
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "[NULL NAME]"));
+		return_VOID;
+	}
+
+	/* Convert handle to full pathname and print it (with supplied message) */
+
+	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
+
+	status = acpi_ns_handle_to_pathname(node, &buffer, TRUE);
+	if (ACPI_SUCCESS(status)) {
+		if (message) {
+			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "%s ",
+					      message));
+		}
+
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "[%s] (Node %p)",
+				      (char *)buffer.pointer, node));
+		ACPI_FREE(buffer.pointer);
+	}
+
+	return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_dump_method_stack
+ *
+ * PARAMETERS:  status          - Method execution status
+ *              walk_state      - Current state of the parse tree walk
+ *              op              - Executing parse op
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Called when a method has been aborted because of an error.
+ *              Dumps the method execution stack.
+ *
+ ******************************************************************************/
+
+void
+acpi_ds_dump_method_stack(acpi_status status,
+			  struct acpi_walk_state *walk_state,
+			  union acpi_parse_object *op)
+{
+	union acpi_parse_object *next;
+	struct acpi_thread_state *thread;
+	struct acpi_walk_state *next_walk_state;
+	struct acpi_namespace_node *previous_method = NULL;
+	union acpi_operand_object *method_desc;
+
+	ACPI_FUNCTION_TRACE(ds_dump_method_stack);
+
+	/* Ignore control codes, they are not errors */
+
+	if ((status & AE_CODE_MASK) == AE_CODE_CONTROL) {
+		return_VOID;
+	}
+
+	/* We may be executing a deferred opcode */
+
+	if (walk_state->deferred_node) {
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "Executing subtree for Buffer/Package/Region\n"));
+		return_VOID;
+	}
+
+	/*
+	 * If there is no Thread, we are not actually executing a method.
+	 * This can happen when the iASL compiler calls the interpreter
+	 * to perform constant folding.
+	 */
+	thread = walk_state->thread;
+	if (!thread) {
+		return_VOID;
+	}
+
+	/* Display exception and method name */
+
+	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+			  "\n**** Exception %s during execution of method ",
+			  acpi_format_exception(status)));
+	acpi_ds_print_node_pathname(walk_state->method_node, NULL);
+
+	/* Display stack of executing methods */
+
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH,
+			      "\n\nMethod Execution Stack:\n"));
+	next_walk_state = thread->walk_state_list;
+
+	/* Walk list of linked walk states */
+
+	while (next_walk_state) {
+		method_desc = next_walk_state->method_desc;
+		if (method_desc) {
+			acpi_ex_stop_trace_method((struct acpi_namespace_node *)
+						  method_desc->method.node,
+						  method_desc, walk_state);
+		}
+
+		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+				  "    Method [%4.4s] executing: ",
+				  acpi_ut_get_node_name(next_walk_state->
+							method_node)));
+
+		/* First method is the currently executing method */
+
+		if (next_walk_state == walk_state) {
+			if (op) {
+
+				/* Display currently executing ASL statement */
+
+				next = op->common.next;
+				op->common.next = NULL;
+
+#ifdef ACPI_DISASSEMBLER
+				acpi_dm_disassemble(next_walk_state, op,
+						    ACPI_UINT32_MAX);
+#endif
+				op->common.next = next;
+			}
+		} else {
+			/*
+			 * This method has called another method
+			 * NOTE: the method call parse subtree is already deleted at this
+			 * point, so we cannot disassemble the method invocation.
+			 */
+			ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH,
+					      "Call to method "));
+			acpi_ds_print_node_pathname(previous_method, NULL);
+		}
+
+		previous_method = next_walk_state->method_node;
+		next_walk_state = next_walk_state->next;
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_DISPATCH, "\n"));
+	}
+
+	return_VOID;
+}
+
+#else
+void
+acpi_ds_dump_method_stack(acpi_status status,
+			  struct acpi_walk_state *walk_state,
+			  union acpi_parse_object *op)
+{
+	return;
+}
+
+#endif
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index 95779e8..920f1b1 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -237,12 +237,22 @@
 		return_ACPI_STATUS(status);
 	}
 
+	/* DSDT is always the first AML table */
+
+	if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT)) {
+		ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+				      "\nInitializing Namespace objects:\n"));
+	}
+
+	/* Summary of objects initialized */
+
 	ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
-			      "Table [%4.4s] (id %4.4X) - %4u Objects with %3u Devices, "
-			      "%3u Regions, %3u Methods (%u/%u/%u Serial/Non/Cvt)\n",
-			      table->signature, owner_id, info.object_count,
-			      info.device_count, info.op_region_count,
-			      info.method_count, info.serial_method_count,
+			      "Table [%4.4s:%8.8s] (id %.2X) - %4u Objects with %3u Devices, "
+			      "%3u Regions, %4u Methods (%u/%u/%u Serial/Non/Cvt)\n",
+			      table->signature, table->oem_table_id, owner_id,
+			      info.object_count, info.device_count,
+			      info.op_region_count, info.method_count,
+			      info.serial_method_count,
 			      info.non_serial_method_count,
 			      info.serialized_method_count));
 
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 85bb951..bc32f31 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -46,11 +46,9 @@
 #include "acdispat.h"
 #include "acinterp.h"
 #include "acnamesp.h"
-#ifdef	ACPI_DISASSEMBLER
-#include "acdisasm.h"
-#endif
 #include "acparser.h"
 #include "amlcode.h"
+#include "acdebug.h"
 
 #define _COMPONENT          ACPI_DISPATCHER
 ACPI_MODULE_NAME("dsmethod")
@@ -103,7 +101,7 @@
 
 	/* Create/Init a root op for the method parse tree */
 
-	op = acpi_ps_alloc_op(AML_METHOD_OP);
+	op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
 	if (!op) {
 		return_ACPI_STATUS(AE_NO_MEMORY);
 	}
@@ -205,7 +203,7 @@
  * RETURN:      Status
  *
  * DESCRIPTION: Called on method error. Invoke the global exception handler if
- *              present, dump the method data if the disassembler is configured
+ *              present, dump the method data if the debugger is configured
  *
  *              Note: Allows the exception handler to change the status code
  *
@@ -214,6 +212,8 @@
 acpi_status
 acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state)
 {
+	u32 aml_offset;
+
 	ACPI_FUNCTION_ENTRY();
 
 	/* Ignore AE_OK and control exception codes */
@@ -234,26 +234,30 @@
 		 * Handler can map the exception code to anything it wants, including
 		 * AE_OK, in which case the executing method will not be aborted.
 		 */
+		aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml,
+						walk_state->parser_state.
+						aml_start);
+
 		status = acpi_gbl_exception_handler(status,
 						    walk_state->method_node ?
 						    walk_state->method_node->
 						    name.integer : 0,
 						    walk_state->opcode,
-						    walk_state->aml_offset,
-						    NULL);
+						    aml_offset, NULL);
 		acpi_ex_enter_interpreter();
 	}
 
 	acpi_ds_clear_implicit_return(walk_state);
 
-#ifdef ACPI_DISASSEMBLER
 	if (ACPI_FAILURE(status)) {
+		acpi_ds_dump_method_stack(status, walk_state, walk_state->op);
 
-		/* Display method locals/args if disassembler is present */
+		/* Display method locals/args if debugger is present */
 
-		acpi_dm_dump_method_info(status, walk_state, walk_state->op);
-	}
+#ifdef ACPI_DEBUGGER
+		acpi_db_dump_method_info(status, walk_state);
 #endif
+	}
 
 	return (status);
 }
@@ -328,6 +332,8 @@
 		return_ACPI_STATUS(AE_NULL_ENTRY);
 	}
 
+	acpi_ex_start_trace_method(method_node, obj_desc, walk_state);
+
 	/* Prevent wraparound of thread count */
 
 	if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
@@ -574,9 +580,7 @@
 	/* On error, we must terminate the method properly */
 
 	acpi_ds_terminate_control_method(obj_desc, next_walk_state);
-	if (next_walk_state) {
-		acpi_ds_delete_walk_state(next_walk_state);
-	}
+	acpi_ds_delete_walk_state(next_walk_state);
 
 	return_ACPI_STATUS(status);
 }
@@ -826,5 +830,8 @@
 		}
 	}
 
+	acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc->
+				  method.node, method_desc, walk_state);
+
 	return_VOID;
 }
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index ea0cc4e..81d7b986 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -480,8 +480,8 @@
 	union acpi_operand_object **operand;
 	struct acpi_namespace_node *node;
 	union acpi_parse_object *next_op;
-	u32 table_index;
 	struct acpi_table_header *table;
+	u32 table_index;
 
 	ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op);
 
@@ -504,6 +504,8 @@
 		return_ACPI_STATUS(status);
 	}
 
+	operand = &walk_state->operands[0];
+
 	/*
 	 * Resolve the Signature string, oem_id string,
 	 * and oem_table_id string operands
@@ -511,32 +513,34 @@
 	status = acpi_ex_resolve_operands(op->common.aml_opcode,
 					  ACPI_WALK_OPERANDS, walk_state);
 	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
+		goto cleanup;
 	}
 
-	operand = &walk_state->operands[0];
-
 	/* Find the ACPI table */
 
 	status = acpi_tb_find_table(operand[0]->string.pointer,
 				    operand[1]->string.pointer,
 				    operand[2]->string.pointer, &table_index);
 	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
+		if (status == AE_NOT_FOUND) {
+			ACPI_ERROR((AE_INFO,
+				    "ACPI Table [%4.4s] OEM:(%s, %s) not found in RSDT/XSDT",
+				    operand[0]->string.pointer,
+				    operand[1]->string.pointer,
+				    operand[2]->string.pointer));
+		}
+		goto cleanup;
 	}
 
-	acpi_ut_remove_reference(operand[0]);
-	acpi_ut_remove_reference(operand[1]);
-	acpi_ut_remove_reference(operand[2]);
-
 	status = acpi_get_table_by_index(table_index, &table);
 	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
+		goto cleanup;
 	}
 
 	obj_desc = acpi_ns_get_attached_object(node);
 	if (!obj_desc) {
-		return_ACPI_STATUS(AE_NOT_EXIST);
+		status = AE_NOT_EXIST;
+		goto cleanup;
 	}
 
 	obj_desc->region.address = ACPI_PTR_TO_PHYSADDR(table);
@@ -551,6 +555,11 @@
 
 	obj_desc->region.flags |= AOPOBJ_DATA_VALID;
 
+cleanup:
+	acpi_ut_remove_reference(operand[0]);
+	acpi_ut_remove_reference(operand[1]);
+	acpi_ut_remove_reference(operand[2]);
+
 	return_ACPI_STATUS(status);
 }
 
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 845ff44..097188a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -388,7 +388,7 @@
 
 		/* Create a new op */
 
-		op = acpi_ps_alloc_op(walk_state->opcode);
+		op = acpi_ps_alloc_op(walk_state->opcode, walk_state->aml);
 		if (!op) {
 			return_ACPI_STATUS(AE_NO_MEMORY);
 		}
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index fcaa30c..e2c08cd 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -335,7 +335,7 @@
 
 		/* Create a new op */
 
-		op = acpi_ps_alloc_op(walk_state->opcode);
+		op = acpi_ps_alloc_op(walk_state->opcode, walk_state->aml);
 		if (!op) {
 			return_ACPI_STATUS(AE_NO_MEMORY);
 		}
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 2ba28a6..5ee79a1 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -626,9 +626,17 @@
 			    acpi_adr_space_type space_id)
 {
 	acpi_status status;
+	struct acpi_reg_walk_info info;
 
 	ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
 
+	info.space_id = space_id;
+	info.reg_run_count = 0;
+
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES,
+			      "    Running _REG methods for SpaceId %s\n",
+			      acpi_ut_get_region_name(info.space_id)));
+
 	/*
 	 * Run all _REG methods for all Operation Regions for this space ID. This
 	 * is a separate walk in order to handle any interdependencies between
@@ -637,7 +645,7 @@
 	 */
 	status = acpi_ns_walk_namespace(ACPI_TYPE_ANY, node, ACPI_UINT32_MAX,
 					ACPI_NS_WALK_UNLOCK, acpi_ev_reg_run,
-					NULL, &space_id, NULL);
+					NULL, &info, NULL);
 
 	/* Special case for EC: handle "orphan" _REG methods with no region */
 
@@ -645,6 +653,11 @@
 		acpi_ev_orphan_ec_reg_method(node);
 	}
 
+	ACPI_DEBUG_PRINT_RAW((ACPI_DB_NAMES,
+			      "    Executed %u _REG methods for SpaceId %s\n",
+			      info.reg_run_count,
+			      acpi_ut_get_region_name(info.space_id)));
+
 	return_ACPI_STATUS(status);
 }
 
@@ -664,10 +677,10 @@
 {
 	union acpi_operand_object *obj_desc;
 	struct acpi_namespace_node *node;
-	acpi_adr_space_type space_id;
 	acpi_status status;
+	struct acpi_reg_walk_info *info;
 
-	space_id = *ACPI_CAST_PTR(acpi_adr_space_type, context);
+	info = ACPI_CAST_PTR(struct acpi_reg_walk_info, context);
 
 	/* Convert and validate the device handle */
 
@@ -696,13 +709,14 @@
 
 	/* Object is a Region */
 
-	if (obj_desc->region.space_id != space_id) {
+	if (obj_desc->region.space_id != info->space_id) {
 
 		/* This region is for a different address space, just ignore it */
 
 		return (AE_OK);
 	}
 
+	info->reg_run_count++;
 	status = acpi_ev_execute_reg_method(obj_desc, ACPI_REG_CONNECT);
 	return (status);
 }
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 24a4c5c..b540913 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -162,14 +162,6 @@
 
 	ACPI_FUNCTION_TRACE(ex_load_table_op);
 
-	/* Validate lengths for the Signature, oem_id, and oem_table_id strings */
-
-	if ((operand[0]->string.length > ACPI_NAME_SIZE) ||
-	    (operand[1]->string.length > ACPI_OEM_ID_SIZE) ||
-	    (operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) {
-		return_ACPI_STATUS(AE_AML_STRING_LIMIT);
-	}
-
 	/* Find the ACPI table in the RSDT/XSDT */
 
 	status = acpi_tb_find_table(operand[0]->string.pointer,
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index aaeea48..ccb7219 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -486,6 +486,7 @@
 
 	obj_desc->method.aml_start = aml_start;
 	obj_desc->method.aml_length = aml_length;
+	obj_desc->method.node = operand[0];
 
 	/*
 	 * Disassemble the method flags. Split off the arg_count, Serialized
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 815442b..de92458 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -43,11 +43,21 @@
 
 #include <acpi/acpi.h>
 #include "accommon.h"
+#include "acnamesp.h"
 #include "acinterp.h"
+#include "acparser.h"
 
 #define _COMPONENT          ACPI_EXECUTER
 ACPI_MODULE_NAME("exdebug")
 
+static union acpi_operand_object *acpi_gbl_trace_method_object = NULL;
+
+/* Local prototypes */
+
+#ifdef ACPI_DEBUG_OUTPUT
+static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type);
+#endif
+
 #ifndef ACPI_NO_ERROR_MESSAGES
 /*******************************************************************************
  *
@@ -70,6 +80,7 @@
  * enabled if necessary.
  *
  ******************************************************************************/
+
 void
 acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 			u32 level, u32 index)
@@ -308,3 +319,316 @@
 	return_VOID;
 }
 #endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_interpreter_trace_enabled
+ *
+ * PARAMETERS:  name                - Whether method name should be matched,
+ *                                    this should be checked before starting
+ *                                    the tracer
+ *
+ * RETURN:      TRUE if interpreter trace is enabled.
+ *
+ * DESCRIPTION: Check whether interpreter trace is enabled
+ *
+ ******************************************************************************/
+
+static u8 acpi_ex_interpreter_trace_enabled(char *name)
+{
+
+	/* Check if tracing is enabled */
+
+	if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED)) {
+		return (FALSE);
+	}
+
+	/*
+	 * Check if tracing is filtered:
+	 *
+	 * 1. If the tracer is started, acpi_gbl_trace_method_object should have
+	 *    been filled by the trace starter
+	 * 2. If the tracer is not started, acpi_gbl_trace_method_name should be
+	 *    matched if it is specified
+	 * 3. If the tracer is oneshot style, acpi_gbl_trace_method_name should
+	 *    not be cleared by the trace stopper during the first match
+	 */
+	if (acpi_gbl_trace_method_object) {
+		return (TRUE);
+	}
+	if (name &&
+	    (acpi_gbl_trace_method_name &&
+	     strcmp(acpi_gbl_trace_method_name, name))) {
+		return (FALSE);
+	}
+	if ((acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) &&
+	    !acpi_gbl_trace_method_name) {
+		return (FALSE);
+	}
+
+	return (TRUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_get_trace_event_name
+ *
+ * PARAMETERS:  type            - Trace event type
+ *
+ * RETURN:      Trace event name.
+ *
+ * DESCRIPTION: Used to obtain the full trace event name.
+ *
+ ******************************************************************************/
+
+#ifdef ACPI_DEBUG_OUTPUT
+
+static const char *acpi_ex_get_trace_event_name(acpi_trace_event_type type)
+{
+	switch (type) {
+	case ACPI_TRACE_AML_METHOD:
+
+		return "Method";
+
+	case ACPI_TRACE_AML_OPCODE:
+
+		return "Opcode";
+
+	case ACPI_TRACE_AML_REGION:
+
+		return "Region";
+
+	default:
+
+		return "";
+	}
+}
+
+#endif
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_trace_point
+ *
+ * PARAMETERS:  type                - Trace event type
+ *              begin               - TRUE if before execution
+ *              aml                 - Executed AML address
+ *              pathname            - Object path
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Internal interpreter execution trace.
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_trace_point(acpi_trace_event_type type,
+		    u8 begin, u8 *aml, char *pathname)
+{
+
+	ACPI_FUNCTION_NAME(ex_trace_point);
+
+	if (pathname) {
+		ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
+				  "%s %s [0x%p:%s] execution.\n",
+				  acpi_ex_get_trace_event_name(type),
+				  begin ? "Begin" : "End", aml, pathname));
+	} else {
+		ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
+				  "%s %s [0x%p] execution.\n",
+				  acpi_ex_get_trace_event_name(type),
+				  begin ? "Begin" : "End", aml));
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_start_trace_method
+ *
+ * PARAMETERS:  method_node         - Node of the method
+ *              obj_desc            - The method object
+ *              walk_state          - current state, NULL if not yet executing
+ *                                    a method.
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Start control method execution trace
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_start_trace_method(struct acpi_namespace_node *method_node,
+			   union acpi_operand_object *obj_desc,
+			   struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	char *pathname = NULL;
+	u8 enabled = FALSE;
+
+	ACPI_FUNCTION_NAME(ex_start_trace_method);
+
+	if (method_node) {
+		pathname = acpi_ns_get_normalized_pathname(method_node, TRUE);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		goto exit;
+	}
+
+	enabled = acpi_ex_interpreter_trace_enabled(pathname);
+	if (enabled && !acpi_gbl_trace_method_object) {
+		acpi_gbl_trace_method_object = obj_desc;
+		acpi_gbl_original_dbg_level = acpi_dbg_level;
+		acpi_gbl_original_dbg_layer = acpi_dbg_layer;
+		acpi_dbg_level = ACPI_TRACE_LEVEL_ALL;
+		acpi_dbg_layer = ACPI_TRACE_LAYER_ALL;
+
+		if (acpi_gbl_trace_dbg_level) {
+			acpi_dbg_level = acpi_gbl_trace_dbg_level;
+		}
+		if (acpi_gbl_trace_dbg_layer) {
+			acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
+		}
+	}
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+exit:
+	if (enabled) {
+		ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, TRUE,
+				 obj_desc ? obj_desc->method.aml_start : NULL,
+				 pathname);
+	}
+	if (pathname) {
+		ACPI_FREE(pathname);
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_stop_trace_method
+ *
+ * PARAMETERS:  method_node         - Node of the method
+ *              obj_desc            - The method object
+ *              walk_state          - current state, NULL if not yet executing
+ *                                    a method.
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Stop control method execution trace
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_stop_trace_method(struct acpi_namespace_node *method_node,
+			  union acpi_operand_object *obj_desc,
+			  struct acpi_walk_state *walk_state)
+{
+	acpi_status status;
+	char *pathname = NULL;
+	u8 enabled;
+
+	ACPI_FUNCTION_NAME(ex_stop_trace_method);
+
+	if (method_node) {
+		pathname = acpi_ns_get_normalized_pathname(method_node, TRUE);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		goto exit_path;
+	}
+
+	enabled = acpi_ex_interpreter_trace_enabled(NULL);
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+	if (enabled) {
+		ACPI_TRACE_POINT(ACPI_TRACE_AML_METHOD, FALSE,
+				 obj_desc ? obj_desc->method.aml_start : NULL,
+				 pathname);
+	}
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		goto exit_path;
+	}
+
+	/* Check whether the tracer should be stopped */
+
+	if (acpi_gbl_trace_method_object == obj_desc) {
+
+		/* Disable further tracing if type is one-shot */
+
+		if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT) {
+			acpi_gbl_trace_method_name = NULL;
+		}
+
+		acpi_dbg_level = acpi_gbl_original_dbg_level;
+		acpi_dbg_layer = acpi_gbl_original_dbg_layer;
+		acpi_gbl_trace_method_object = NULL;
+	}
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+
+exit_path:
+	if (pathname) {
+		ACPI_FREE(pathname);
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_start_trace_opcode
+ *
+ * PARAMETERS:  op                  - The parser opcode object
+ *              walk_state          - current state, NULL if not yet executing
+ *                                    a method.
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Start opcode execution trace
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_start_trace_opcode(union acpi_parse_object *op,
+			   struct acpi_walk_state *walk_state)
+{
+
+	ACPI_FUNCTION_NAME(ex_start_trace_opcode);
+
+	if (acpi_ex_interpreter_trace_enabled(NULL) &&
+	    (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) {
+		ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, TRUE,
+				 op->common.aml, op->common.aml_op_name);
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_stop_trace_opcode
+ *
+ * PARAMETERS:  op                  - The parser opcode object
+ *              walk_state          - current state, NULL if not yet executing
+ *                                    a method.
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Stop opcode execution trace
+ *
+ ******************************************************************************/
+
+void
+acpi_ex_stop_trace_opcode(union acpi_parse_object *op,
+			  struct acpi_walk_state *walk_state)
+{
+
+	ACPI_FUNCTION_NAME(ex_stop_trace_opcode);
+
+	if (acpi_ex_interpreter_trace_enabled(NULL) &&
+	    (acpi_gbl_trace_flags & ACPI_TRACE_OPCODE)) {
+		ACPI_TRACE_POINT(ACPI_TRACE_AML_OPCODE, FALSE,
+				 op->common.aml, op->common.aml_op_name);
+	}
+}
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 401e7ed..d836f88 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -995,9 +995,8 @@
 	if (obj_desc->reference.class == ACPI_REFCLASS_NAME) {
 		acpi_os_printf(" %p ", obj_desc->reference.node);
 
-		status =
-		    acpi_ns_handle_to_pathname(obj_desc->reference.node,
-					       &ret_buf);
+		status = acpi_ns_handle_to_pathname(obj_desc->reference.node,
+						    &ret_buf, TRUE);
 		if (ACPI_FAILURE(status)) {
 			acpi_os_printf(" Could not convert name to pathname\n");
 		} else {
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index c7e3b92..1b372ef 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -126,7 +126,7 @@
 	if (!source_desc) {
 		ACPI_ERROR((AE_INFO, "No object attached to node [%4.4s] %p",
 			    node->name.ascii, node));
-		return_ACPI_STATUS(AE_AML_NO_OPERAND);
+		return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE);
 	}
 
 	/*
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index b6b7f3a..7b10912 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -337,8 +337,9 @@
 			 acpi_object_type * return_type,
 			 union acpi_operand_object **return_desc)
 {
-	union acpi_operand_object *obj_desc = (void *)operand;
-	struct acpi_namespace_node *node;
+	union acpi_operand_object *obj_desc = ACPI_CAST_PTR(void, operand);
+	struct acpi_namespace_node *node =
+	    ACPI_CAST_PTR(struct acpi_namespace_node, operand);
 	acpi_object_type type;
 	acpi_status status;
 
@@ -355,9 +356,7 @@
 	case ACPI_DESC_TYPE_NAMED:
 
 		type = ((struct acpi_namespace_node *)obj_desc)->type;
-		obj_desc =
-		    acpi_ns_get_attached_object((struct acpi_namespace_node *)
-						obj_desc);
+		obj_desc = acpi_ns_get_attached_object(node);
 
 		/* If we had an Alias node, use the attached object for type info */
 
@@ -368,6 +367,13 @@
 							 acpi_namespace_node *)
 							obj_desc);
 		}
+
+		if (!obj_desc) {
+			ACPI_ERROR((AE_INFO,
+				    "[%4.4s] Node is unresolved or uninitialized",
+				    acpi_ut_get_node_name(node)));
+			return_ACPI_STATUS(AE_AML_UNINITIALIZED_NODE);
+		}
 		break;
 
 	default:
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 52dfd0d..d62a616 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -160,19 +160,8 @@
 
 	ACPI_FUNCTION_TRACE(acpi_set_firmware_waking_vectors);
 
-	/* If Hardware Reduced flag is set, there is no FACS */
-
-	if (acpi_gbl_reduced_hardware) {
-		return_ACPI_STATUS (AE_OK);
-	}
-
-	if (acpi_gbl_facs32) {
-		(void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs32,
-							  physical_address,
-							  physical_address64);
-	}
-	if (acpi_gbl_facs64) {
-		(void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_facs64,
+	if (acpi_gbl_FACS) {
+		(void)acpi_hw_set_firmware_waking_vectors(acpi_gbl_FACS,
 							  physical_address,
 							  physical_address64);
 	}
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 80670cb..7eba578 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -274,6 +274,7 @@
 		acpi_ex_exit_interpreter();
 
 		if (ACPI_FAILURE(status)) {
+			info->return_object = NULL;
 			goto cleanup;
 		}
 
@@ -464,7 +465,8 @@
 
 	status = acpi_ns_evaluate(info);
 
-	ACPI_DEBUG_PRINT((ACPI_DB_INIT, "Executed module-level code at %p\n",
+	ACPI_DEBUG_PRINT((ACPI_DB_INIT_NAMES,
+			  "Executed module-level code at %p\n",
 			  method_obj->method.aml_start));
 
 	/* Delete a possible implicit return value (in slack mode) */
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index bd6cd4a..14ab836 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -111,7 +111,21 @@
 	if (ACPI_SUCCESS(status)) {
 		acpi_tb_set_table_loaded_flag(table_index, TRUE);
 	} else {
-		(void)acpi_tb_release_owner_id(table_index);
+		/*
+		 * On error, delete any namespace objects created by this table.
+		 * We cannot initialize these objects, so delete them. There are
+		 * a couple of expecially bad cases:
+		 * AE_ALREADY_EXISTS - namespace collision.
+		 * AE_NOT_FOUND - the target of a Scope operator does not
+		 * exist. This target of Scope must already exist in the
+		 * namespace, as per the ACPI specification.
+		 */
+		(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+		acpi_ns_delete_namespace_by_owner(acpi_gbl_root_table_list.
+						  tables[table_index].owner_id);
+		acpi_tb_release_owner_id(table_index);
+
+		return_ACPI_STATUS(status);
 	}
 
 unlock:
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d293d97..8934b4e 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -51,73 +51,6 @@
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_ns_build_external_path
- *
- * PARAMETERS:  node            - NS node whose pathname is needed
- *              size            - Size of the pathname
- *              *name_buffer    - Where to return the pathname
- *
- * RETURN:      Status
- *              Places the pathname into the name_buffer, in external format
- *              (name segments separated by path separators)
- *
- * DESCRIPTION: Generate a full pathaname
- *
- ******************************************************************************/
-acpi_status
-acpi_ns_build_external_path(struct acpi_namespace_node *node,
-			    acpi_size size, char *name_buffer)
-{
-	acpi_size index;
-	struct acpi_namespace_node *parent_node;
-
-	ACPI_FUNCTION_ENTRY();
-
-	/* Special case for root */
-
-	index = size - 1;
-	if (index < ACPI_NAME_SIZE) {
-		name_buffer[0] = AML_ROOT_PREFIX;
-		name_buffer[1] = 0;
-		return (AE_OK);
-	}
-
-	/* Store terminator byte, then build name backwards */
-
-	parent_node = node;
-	name_buffer[index] = 0;
-
-	while ((index > ACPI_NAME_SIZE) && (parent_node != acpi_gbl_root_node)) {
-		index -= ACPI_NAME_SIZE;
-
-		/* Put the name into the buffer */
-
-		ACPI_MOVE_32_TO_32((name_buffer + index), &parent_node->name);
-		parent_node = parent_node->parent;
-
-		/* Prefix name with the path separator */
-
-		index--;
-		name_buffer[index] = ACPI_PATH_SEPARATOR;
-	}
-
-	/* Overwrite final separator with the root prefix character */
-
-	name_buffer[index] = AML_ROOT_PREFIX;
-
-	if (index != 0) {
-		ACPI_ERROR((AE_INFO,
-			    "Could not construct external pathname; index=%u, size=%u, Path=%s",
-			    (u32) index, (u32) size, &name_buffer[size]));
-
-		return (AE_BAD_PARAMETER);
-	}
-
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
  * FUNCTION:    acpi_ns_get_external_pathname
  *
  * PARAMETERS:  node            - Namespace node whose pathname is needed
@@ -130,37 +63,13 @@
  *              for error and debug statements.
  *
  ******************************************************************************/
-
 char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
 {
-	acpi_status status;
 	char *name_buffer;
-	acpi_size size;
 
 	ACPI_FUNCTION_TRACE_PTR(ns_get_external_pathname, node);
 
-	/* Calculate required buffer size based on depth below root */
-
-	size = acpi_ns_get_pathname_length(node);
-	if (!size) {
-		return_PTR(NULL);
-	}
-
-	/* Allocate a buffer to be returned to caller */
-
-	name_buffer = ACPI_ALLOCATE_ZEROED(size);
-	if (!name_buffer) {
-		ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size));
-		return_PTR(NULL);
-	}
-
-	/* Build the path in the allocated buffer */
-
-	status = acpi_ns_build_external_path(node, size, name_buffer);
-	if (ACPI_FAILURE(status)) {
-		ACPI_FREE(name_buffer);
-		return_PTR(NULL);
-	}
+	name_buffer = acpi_ns_get_normalized_pathname(node, FALSE);
 
 	return_PTR(name_buffer);
 }
@@ -180,33 +89,12 @@
 acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
 {
 	acpi_size size;
-	struct acpi_namespace_node *next_node;
 
 	ACPI_FUNCTION_ENTRY();
 
-	/*
-	 * Compute length of pathname as 5 * number of name segments.
-	 * Go back up the parent tree to the root
-	 */
-	size = 0;
-	next_node = node;
+	size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE);
 
-	while (next_node && (next_node != acpi_gbl_root_node)) {
-		if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) {
-			ACPI_ERROR((AE_INFO,
-				    "Invalid Namespace Node (%p) while traversing namespace",
-				    next_node));
-			return (0);
-		}
-		size += ACPI_PATH_SEGMENT_LENGTH;
-		next_node = next_node->parent;
-	}
-
-	if (!size) {
-		size = 1;	/* Root node case */
-	}
-
-	return (size + 1);	/* +1 for null string terminator */
+	return (size);
 }
 
 /*******************************************************************************
@@ -216,6 +104,8 @@
  * PARAMETERS:  target_handle           - Handle of named object whose name is
  *                                        to be found
  *              buffer                  - Where the pathname is returned
+ *              no_trailing             - Remove trailing '_' for each name
+ *                                        segment
  *
  * RETURN:      Status, Buffer is filled with pathname if status is AE_OK
  *
@@ -225,7 +115,7 @@
 
 acpi_status
 acpi_ns_handle_to_pathname(acpi_handle target_handle,
-			   struct acpi_buffer * buffer)
+			   struct acpi_buffer * buffer, u8 no_trailing)
 {
 	acpi_status status;
 	struct acpi_namespace_node *node;
@@ -240,7 +130,8 @@
 
 	/* Determine size required for the caller buffer */
 
-	required_size = acpi_ns_get_pathname_length(node);
+	required_size =
+	    acpi_ns_build_normalized_path(node, NULL, 0, no_trailing);
 	if (!required_size) {
 		return_ACPI_STATUS(AE_BAD_PARAMETER);
 	}
@@ -254,8 +145,8 @@
 
 	/* Build the path in the caller buffer */
 
-	status =
-	    acpi_ns_build_external_path(node, required_size, buffer->pointer);
+	(void)acpi_ns_build_normalized_path(node, buffer->pointer,
+					    required_size, no_trailing);
 	if (ACPI_FAILURE(status)) {
 		return_ACPI_STATUS(status);
 	}
@@ -264,3 +155,149 @@
 			  (char *)buffer->pointer, (u32) required_size));
 	return_ACPI_STATUS(AE_OK);
 }
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_build_normalized_path
+ *
+ * PARAMETERS:  node        - Namespace node
+ *              full_path   - Where the path name is returned
+ *              path_size   - Size of returned path name buffer
+ *              no_trailing - Remove trailing '_' from each name segment
+ *
+ * RETURN:      Return 1 if the AML path is empty, otherwise returning (length
+ *              of pathname + 1) which means the 'FullPath' contains a trailing
+ *              null.
+ *
+ * DESCRIPTION: Build and return a full namespace pathname.
+ *              Note that if the size of 'FullPath' isn't large enough to
+ *              contain the namespace node's path name, the actual required
+ *              buffer length is returned, and it should be greater than
+ *              'PathSize'. So callers are able to check the returning value
+ *              to determine the buffer size of 'FullPath'.
+ *
+ ******************************************************************************/
+
+u32
+acpi_ns_build_normalized_path(struct acpi_namespace_node *node,
+			      char *full_path, u32 path_size, u8 no_trailing)
+{
+	u32 length = 0, i;
+	char name[ACPI_NAME_SIZE];
+	u8 do_no_trailing;
+	char c, *left, *right;
+	struct acpi_namespace_node *next_node;
+
+	ACPI_FUNCTION_TRACE_PTR(ns_build_normalized_path, node);
+
+#define ACPI_PATH_PUT8(path, size, byte, length)    \
+	do {                                            \
+		if ((length) < (size))                      \
+		{                                           \
+			(path)[(length)] = (byte);              \
+		}                                           \
+		(length)++;                                 \
+	} while (0)
+
+	/*
+	 * Make sure the path_size is correct, so that we don't need to
+	 * validate both full_path and path_size.
+	 */
+	if (!full_path) {
+		path_size = 0;
+	}
+
+	if (!node) {
+		goto build_trailing_null;
+	}
+
+	next_node = node;
+	while (next_node && next_node != acpi_gbl_root_node) {
+		if (next_node != node) {
+			ACPI_PATH_PUT8(full_path, path_size,
+				       AML_DUAL_NAME_PREFIX, length);
+		}
+		ACPI_MOVE_32_TO_32(name, &next_node->name);
+		do_no_trailing = no_trailing;
+		for (i = 0; i < 4; i++) {
+			c = name[4 - i - 1];
+			if (do_no_trailing && c != '_') {
+				do_no_trailing = FALSE;
+			}
+			if (!do_no_trailing) {
+				ACPI_PATH_PUT8(full_path, path_size, c, length);
+			}
+		}
+		next_node = next_node->parent;
+	}
+	ACPI_PATH_PUT8(full_path, path_size, AML_ROOT_PREFIX, length);
+
+	/* Reverse the path string */
+
+	if (length <= path_size) {
+		left = full_path;
+		right = full_path + length - 1;
+		while (left < right) {
+			c = *left;
+			*left++ = *right;
+			*right-- = c;
+		}
+	}
+
+	/* Append the trailing null */
+
+build_trailing_null:
+	ACPI_PATH_PUT8(full_path, path_size, '\0', length);
+
+#undef ACPI_PATH_PUT8
+
+	return_UINT32(length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_get_normalized_pathname
+ *
+ * PARAMETERS:  node            - Namespace node whose pathname is needed
+ *              no_trailing     - Remove trailing '_' from each name segment
+ *
+ * RETURN:      Pointer to storage containing the fully qualified name of
+ *              the node, In external format (name segments separated by path
+ *              separators.)
+ *
+ * DESCRIPTION: Used to obtain the full pathname to a namespace node, usually
+ *              for error and debug statements. All trailing '_' will be
+ *              removed from the full pathname if 'NoTrailing' is specified..
+ *
+ ******************************************************************************/
+
+char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node,
+				      u8 no_trailing)
+{
+	char *name_buffer;
+	acpi_size size;
+
+	ACPI_FUNCTION_TRACE_PTR(ns_get_normalized_pathname, node);
+
+	/* Calculate required buffer size based on depth below root */
+
+	size = acpi_ns_build_normalized_path(node, NULL, 0, no_trailing);
+	if (!size) {
+		return_PTR(NULL);
+	}
+
+	/* Allocate a buffer to be returned to caller */
+
+	name_buffer = ACPI_ALLOCATE_ZEROED(size);
+	if (!name_buffer) {
+		ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size));
+		return_PTR(NULL);
+	}
+
+	/* Build the path in the allocated buffer */
+
+	(void)acpi_ns_build_normalized_path(node, name_buffer, size,
+					    no_trailing);
+
+	return_PTR(name_buffer);
+}
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 57a4cfe..3736d43b 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -70,7 +70,7 @@
 {
 	union acpi_parse_object *parse_root;
 	acpi_status status;
-       u32 aml_length;
+	u32 aml_length;
 	u8 *aml_start;
 	struct acpi_walk_state *walk_state;
 	struct acpi_table_header *table;
@@ -78,6 +78,20 @@
 
 	ACPI_FUNCTION_TRACE(ns_one_complete_parse);
 
+	status = acpi_get_table_by_index(table_index, &table);
+	if (ACPI_FAILURE(status)) {
+		return_ACPI_STATUS(status);
+	}
+
+	/* Table must consist of at least a complete header */
+
+	if (table->length < sizeof(struct acpi_table_header)) {
+		return_ACPI_STATUS(AE_BAD_HEADER);
+	}
+
+	aml_start = (u8 *)table + sizeof(struct acpi_table_header);
+	aml_length = table->length - sizeof(struct acpi_table_header);
+
 	status = acpi_tb_get_owner_id(table_index, &owner_id);
 	if (ACPI_FAILURE(status)) {
 		return_ACPI_STATUS(status);
@@ -85,7 +99,7 @@
 
 	/* Create and init a Root Node */
 
-	parse_root = acpi_ps_create_scope_op();
+	parse_root = acpi_ps_create_scope_op(aml_start);
 	if (!parse_root) {
 		return_ACPI_STATUS(AE_NO_MEMORY);
 	}
@@ -98,23 +112,12 @@
 		return_ACPI_STATUS(AE_NO_MEMORY);
 	}
 
-	status = acpi_get_table_by_index(table_index, &table);
+	status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
+				       aml_start, aml_length, NULL,
+				       (u8)pass_number);
 	if (ACPI_FAILURE(status)) {
 		acpi_ds_delete_walk_state(walk_state);
-		acpi_ps_free_op(parse_root);
-		return_ACPI_STATUS(status);
-	}
-
-	/* Table must consist of at least a complete header */
-
-	if (table->length < sizeof(struct acpi_table_header)) {
-		status = AE_BAD_HEADER;
-	} else {
-		aml_start = (u8 *) table + sizeof(struct acpi_table_header);
-		aml_length = table->length - sizeof(struct acpi_table_header);
-		status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
-					       aml_start, aml_length, NULL,
-					       (u8) pass_number);
+		goto cleanup;
 	}
 
 	/* Found OSDT table, enable the namespace override feature */
@@ -124,11 +127,6 @@
 		walk_state->namespace_override = TRUE;
 	}
 
-	if (ACPI_FAILURE(status)) {
-		acpi_ds_delete_walk_state(walk_state);
-		goto cleanup;
-	}
-
 	/* start_node is the default location to load the table */
 
 	if (start_node && start_node != acpi_gbl_root_node) {
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 8d8104b8..de325ae 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -83,7 +83,7 @@
 
 	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
 
-	status = acpi_ns_handle_to_pathname(node, &buffer);
+	status = acpi_ns_handle_to_pathname(node, &buffer, TRUE);
 	if (ACPI_SUCCESS(status)) {
 		if (message) {
 			acpi_os_printf("%s ", message);
@@ -596,6 +596,23 @@
 
 	ACPI_FUNCTION_TRACE(ns_terminate);
 
+#ifdef ACPI_EXEC_APP
+	{
+		union acpi_operand_object *prev;
+		union acpi_operand_object *next;
+
+		/* Delete any module-level code blocks */
+
+		next = acpi_gbl_module_code_list;
+		while (next) {
+			prev = next;
+			next = next->method.mutex;
+			prev->method.mutex = NULL;	/* Clear the Mutex (cheated) field */
+			acpi_ut_remove_reference(prev);
+		}
+	}
+#endif
+
 	/*
 	 * Free the entire namespace -- all nodes and all objects
 	 * attached to the nodes
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 9ff643b..4b4d2f4 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -172,11 +172,15 @@
 		return (status);
 	}
 
-	if (name_type == ACPI_FULL_PATHNAME) {
+	if (name_type == ACPI_FULL_PATHNAME ||
+	    name_type == ACPI_FULL_PATHNAME_NO_TRAILING) {
 
 		/* Get the full pathname (From the namespace root) */
 
-		status = acpi_ns_handle_to_pathname(handle, buffer);
+		status = acpi_ns_handle_to_pathname(handle, buffer,
+						    name_type ==
+						    ACPI_FULL_PATHNAME ? FALSE :
+						    TRUE);
 		return (status);
 	}
 
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 6d03877..29d8b7b 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -287,7 +287,7 @@
 				  "Control Method - %p Desc %p Path=%p\n", node,
 				  method_desc, path));
 
-		name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+		name_op = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP, start);
 		if (!name_op) {
 			return_ACPI_STATUS(AE_NO_MEMORY);
 		}
@@ -484,7 +484,7 @@
 static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
 						       *parser_state)
 {
-	u32 aml_offset;
+	u8 *aml;
 	union acpi_parse_object *field;
 	union acpi_parse_object *arg = NULL;
 	u16 opcode;
@@ -498,8 +498,7 @@
 
 	ACPI_FUNCTION_TRACE(ps_get_next_field);
 
-	aml_offset =
-	    (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start);
+	aml = parser_state->aml;
 
 	/* Determine field type */
 
@@ -536,13 +535,11 @@
 
 	/* Allocate a new field op */
 
-	field = acpi_ps_alloc_op(opcode);
+	field = acpi_ps_alloc_op(opcode, aml);
 	if (!field) {
 		return_PTR(NULL);
 	}
 
-	field->common.aml_offset = aml_offset;
-
 	/* Decode the field type */
 
 	switch (opcode) {
@@ -604,6 +601,7 @@
 		 * Argument for Connection operator can be either a Buffer
 		 * (resource descriptor), or a name_string.
 		 */
+		aml = parser_state->aml;
 		if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) {
 			parser_state->aml++;
 
@@ -616,7 +614,8 @@
 
 				/* Non-empty list */
 
-				arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
+				arg =
+				    acpi_ps_alloc_op(AML_INT_BYTELIST_OP, aml);
 				if (!arg) {
 					acpi_ps_free_op(field);
 					return_PTR(NULL);
@@ -665,7 +664,7 @@
 
 			parser_state->aml = pkg_end;
 		} else {
-			arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+			arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP, aml);
 			if (!arg) {
 				acpi_ps_free_op(field);
 				return_PTR(NULL);
@@ -730,7 +729,7 @@
 
 		/* Constants, strings, and namestrings are all the same size */
 
-		arg = acpi_ps_alloc_op(AML_BYTE_OP);
+		arg = acpi_ps_alloc_op(AML_BYTE_OP, parser_state->aml);
 		if (!arg) {
 			return_ACPI_STATUS(AE_NO_MEMORY);
 		}
@@ -777,7 +776,8 @@
 
 			/* Non-empty list */
 
-			arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
+			arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP,
+					       parser_state->aml);
 			if (!arg) {
 				return_ACPI_STATUS(AE_NO_MEMORY);
 			}
@@ -807,7 +807,9 @@
 
 			/* null_name or name_string */
 
-			arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+			arg =
+			    acpi_ps_alloc_op(AML_INT_NAMEPATH_OP,
+					     parser_state->aml);
 			if (!arg) {
 				return_ACPI_STATUS(AE_NO_MEMORY);
 			}
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 9043722..03ac8c9 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -51,6 +51,7 @@
 
 #include <acpi/acpi.h>
 #include "accommon.h"
+#include "acinterp.h"
 #include "acparser.h"
 #include "acdispat.h"
 #include "amlcode.h"
@@ -125,10 +126,7 @@
 		 */
 		while (GET_CURRENT_ARG_TYPE(walk_state->arg_types)
 		       && !walk_state->arg_count) {
-			walk_state->aml_offset =
-			    (u32) ACPI_PTR_DIFF(walk_state->parser_state.aml,
-						walk_state->parser_state.
-						aml_start);
+			walk_state->aml = walk_state->parser_state.aml;
 
 			status =
 			    acpi_ps_get_next_arg(walk_state,
@@ -140,7 +138,6 @@
 			}
 
 			if (arg) {
-				arg->common.aml_offset = walk_state->aml_offset;
 				acpi_ps_append_arg(op, arg);
 			}
 
@@ -324,6 +321,8 @@
 	union acpi_operand_object *method_obj;
 	struct acpi_namespace_node *parent_node;
 
+	ACPI_FUNCTION_TRACE(ps_link_module_code);
+
 	/* Get the tail of the list */
 
 	prev = next = acpi_gbl_module_code_list;
@@ -343,9 +342,13 @@
 
 		method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
 		if (!method_obj) {
-			return;
+			return_VOID;
 		}
 
+		ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+				  "Create/Link new code block: %p\n",
+				  method_obj));
+
 		if (parent_op->common.node) {
 			parent_node = parent_op->common.node;
 		} else {
@@ -370,8 +373,14 @@
 			prev->method.mutex = method_obj;
 		}
 	} else {
+		ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+				  "Appending to existing code block: %p\n",
+				  prev));
+
 		prev->method.aml_length += aml_length;
 	}
+
+	return_VOID;
 }
 
 /*******************************************************************************
@@ -494,16 +503,7 @@
 				continue;
 			}
 
-			op->common.aml_offset = walk_state->aml_offset;
-
-			if (walk_state->op_info) {
-				ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
-						  "Opcode %4.4X [%s] Op %p Aml %p AmlOffset %5.5X\n",
-						  (u32) op->common.aml_opcode,
-						  walk_state->op_info->name, op,
-						  parser_state->aml,
-						  op->common.aml_offset));
-			}
+			acpi_ex_start_trace_opcode(op, walk_state);
 		}
 
 		/*
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 2f5ddd8..e54bc2a 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -66,12 +66,11 @@
 
 static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
 {
+	u32 aml_offset;
 
 	ACPI_FUNCTION_TRACE_PTR(ps_get_aml_opcode, walk_state);
 
-	walk_state->aml_offset =
-	    (u32)ACPI_PTR_DIFF(walk_state->parser_state.aml,
-			       walk_state->parser_state.aml_start);
+	walk_state->aml = walk_state->parser_state.aml;
 	walk_state->opcode = acpi_ps_peek_opcode(&(walk_state->parser_state));
 
 	/*
@@ -98,10 +97,14 @@
 		/* The opcode is unrecognized. Complain and skip unknown opcodes */
 
 		if (walk_state->pass_number == 2) {
+			aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml,
+							walk_state->
+							parser_state.aml_start);
+
 			ACPI_ERROR((AE_INFO,
 				    "Unknown opcode 0x%.2X at table offset 0x%.4X, ignoring",
 				    walk_state->opcode,
-				    (u32)(walk_state->aml_offset +
+				    (u32)(aml_offset +
 					  sizeof(struct acpi_table_header))));
 
 			ACPI_DUMP_BUFFER((walk_state->parser_state.aml - 16),
@@ -115,14 +118,14 @@
 			acpi_os_printf
 			    ("/*\nError: Unknown opcode 0x%.2X at table offset 0x%.4X, context:\n",
 			     walk_state->opcode,
-			     (u32)(walk_state->aml_offset +
+			     (u32)(aml_offset +
 				   sizeof(struct acpi_table_header)));
 
 			/* Dump the context surrounding the invalid opcode */
 
 			acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
 					     aml - 16), 48, DB_BYTE_DISPLAY,
-					    (walk_state->aml_offset +
+					    (aml_offset +
 					     sizeof(struct acpi_table_header) -
 					     16));
 			acpi_os_printf(" */\n");
@@ -294,7 +297,7 @@
 	/* Create Op structure and append to parent's argument list */
 
 	walk_state->op_info = acpi_ps_get_opcode_info(walk_state->opcode);
-	op = acpi_ps_alloc_op(walk_state->opcode);
+	op = acpi_ps_alloc_op(walk_state->opcode, aml_op_start);
 	if (!op) {
 		return_ACPI_STATUS(AE_NO_MEMORY);
 	}
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index a555f7f..98001d7 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -147,6 +147,8 @@
 		return_ACPI_STATUS(AE_OK);	/* OK for now */
 	}
 
+	acpi_ex_stop_trace_opcode(op, walk_state);
+
 	/* Delete this op and the subtree below it if asked to */
 
 	if (((walk_state->parse_flags & ACPI_PARSE_TREE_MASK) !=
@@ -185,7 +187,8 @@
 			 * op must be replaced by a placeholder return op
 			 */
 			replacement_op =
-			    acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+			    acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
+					     op->common.aml);
 			if (!replacement_op) {
 				status = AE_NO_MEMORY;
 			}
@@ -209,7 +212,8 @@
 			    || (op->common.parent->common.aml_opcode ==
 				AML_VAR_PACKAGE_OP)) {
 				replacement_op =
-				    acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+				    acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
+						     op->common.aml);
 				if (!replacement_op) {
 					status = AE_NO_MEMORY;
 				}
@@ -224,7 +228,8 @@
 					AML_VAR_PACKAGE_OP)) {
 					replacement_op =
 					    acpi_ps_alloc_op(op->common.
-							     aml_opcode);
+							     aml_opcode,
+							     op->common.aml);
 					if (!replacement_op) {
 						status = AE_NO_MEMORY;
 					} else {
@@ -240,7 +245,8 @@
 		default:
 
 			replacement_op =
-			    acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP);
+			    acpi_ps_alloc_op(AML_INT_RETURN_VALUE_OP,
+					     op->common.aml);
 			if (!replacement_op) {
 				status = AE_NO_MEMORY;
 			}
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 3244091..183cc1e 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -60,11 +60,11 @@
  * DESCRIPTION: Create a Scope and associated namepath op with the root name
  *
  ******************************************************************************/
-union acpi_parse_object *acpi_ps_create_scope_op(void)
+union acpi_parse_object *acpi_ps_create_scope_op(u8 *aml)
 {
 	union acpi_parse_object *scope_op;
 
-	scope_op = acpi_ps_alloc_op(AML_SCOPE_OP);
+	scope_op = acpi_ps_alloc_op(AML_SCOPE_OP, aml);
 	if (!scope_op) {
 		return (NULL);
 	}
@@ -103,6 +103,7 @@
  * FUNCTION:    acpi_ps_alloc_op
  *
  * PARAMETERS:  opcode          - Opcode that will be stored in the new Op
+ *              aml             - Address of the opcode
  *
  * RETURN:      Pointer to the new Op, null on failure
  *
@@ -112,7 +113,7 @@
  *
  ******************************************************************************/
 
-union acpi_parse_object *acpi_ps_alloc_op(u16 opcode)
+union acpi_parse_object *acpi_ps_alloc_op(u16 opcode, u8 *aml)
 {
 	union acpi_parse_object *op;
 	const struct acpi_opcode_info *op_info;
@@ -149,6 +150,7 @@
 
 	if (op) {
 		acpi_ps_init_op(op, opcode);
+		op->common.aml = aml;
 		op->common.flags = flags;
 	}
 
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 841a5ea..4254805 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -47,15 +47,12 @@
 #include "acdispat.h"
 #include "acinterp.h"
 #include "actables.h"
+#include "acnamesp.h"
 
 #define _COMPONENT          ACPI_PARSER
 ACPI_MODULE_NAME("psxface")
 
 /* Local Prototypes */
-static void acpi_ps_start_trace(struct acpi_evaluate_info *info);
-
-static void acpi_ps_stop_trace(struct acpi_evaluate_info *info);
-
 static void
 acpi_ps_update_parameter_list(struct acpi_evaluate_info *info, u16 action);
 
@@ -76,7 +73,7 @@
  ******************************************************************************/
 
 acpi_status
-acpi_debug_trace(char *name, u32 debug_level, u32 debug_layer, u32 flags)
+acpi_debug_trace(const char *name, u32 debug_level, u32 debug_layer, u32 flags)
 {
 	acpi_status status;
 
@@ -85,108 +82,14 @@
 		return (status);
 	}
 
-	/* TBDs: Validate name, allow full path or just nameseg */
-
-	acpi_gbl_trace_method_name = *ACPI_CAST_PTR(u32, name);
+	acpi_gbl_trace_method_name = name;
 	acpi_gbl_trace_flags = flags;
-
-	if (debug_level) {
-		acpi_gbl_trace_dbg_level = debug_level;
-	}
-	if (debug_layer) {
-		acpi_gbl_trace_dbg_layer = debug_layer;
-	}
+	acpi_gbl_trace_dbg_level = debug_level;
+	acpi_gbl_trace_dbg_layer = debug_layer;
+	status = AE_OK;
 
 	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-	return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_start_trace
- *
- * PARAMETERS:  info        - Method info struct
- *
- * RETURN:      None
- *
- * DESCRIPTION: Start control method execution trace
- *
- ******************************************************************************/
-
-static void acpi_ps_start_trace(struct acpi_evaluate_info *info)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_ENTRY();
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return;
-	}
-
-	if ((!acpi_gbl_trace_method_name) ||
-	    (acpi_gbl_trace_method_name != info->node->name.integer)) {
-		goto exit;
-	}
-
-	acpi_gbl_original_dbg_level = acpi_dbg_level;
-	acpi_gbl_original_dbg_layer = acpi_dbg_layer;
-
-	acpi_dbg_level = 0x00FFFFFF;
-	acpi_dbg_layer = ACPI_UINT32_MAX;
-
-	if (acpi_gbl_trace_dbg_level) {
-		acpi_dbg_level = acpi_gbl_trace_dbg_level;
-	}
-	if (acpi_gbl_trace_dbg_layer) {
-		acpi_dbg_layer = acpi_gbl_trace_dbg_layer;
-	}
-
-exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ps_stop_trace
- *
- * PARAMETERS:  info        - Method info struct
- *
- * RETURN:      None
- *
- * DESCRIPTION: Stop control method execution trace
- *
- ******************************************************************************/
-
-static void acpi_ps_stop_trace(struct acpi_evaluate_info *info)
-{
-	acpi_status status;
-
-	ACPI_FUNCTION_ENTRY();
-
-	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
-	if (ACPI_FAILURE(status)) {
-		return;
-	}
-
-	if ((!acpi_gbl_trace_method_name) ||
-	    (acpi_gbl_trace_method_name != info->node->name.integer)) {
-		goto exit;
-	}
-
-	/* Disable further tracing if type is one-shot */
-
-	if (acpi_gbl_trace_flags & 1) {
-		acpi_gbl_trace_method_name = 0;
-		acpi_gbl_trace_dbg_level = 0;
-		acpi_gbl_trace_dbg_layer = 0;
-	}
-
-	acpi_dbg_level = acpi_gbl_original_dbg_level;
-	acpi_dbg_layer = acpi_gbl_original_dbg_layer;
-
-exit:
-	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+	return (status);
 }
 
 /*******************************************************************************
@@ -212,7 +115,7 @@
  *
  ******************************************************************************/
 
-acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
+acpi_status acpi_ps_execute_method(struct acpi_evaluate_info * info)
 {
 	acpi_status status;
 	union acpi_parse_object *op;
@@ -243,10 +146,6 @@
 	 */
 	acpi_ps_update_parameter_list(info, REF_INCREMENT);
 
-	/* Begin tracing if requested */
-
-	acpi_ps_start_trace(info);
-
 	/*
 	 * Execute the method. Performs parse simultaneously
 	 */
@@ -256,7 +155,7 @@
 
 	/* Create and init a Root Node */
 
-	op = acpi_ps_create_scope_op();
+	op = acpi_ps_create_scope_op(info->obj_desc->method.aml_start);
 	if (!op) {
 		status = AE_NO_MEMORY;
 		goto cleanup;
@@ -326,10 +225,6 @@
 cleanup:
 	acpi_ps_delete_parse_tree(op);
 
-	/* End optional tracing */
-
-	acpi_ps_stop_trace(info);
-
 	/* Take away the extra reference that we gave the parameters above */
 
 	acpi_ps_update_parameter_list(info, REF_DECREMENT);
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 3fa829e..a534442 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -348,7 +348,8 @@
 				status =
 				    acpi_ns_handle_to_pathname((acpi_handle)
 							       node,
-							       &path_buffer);
+							       &path_buffer,
+							       FALSE);
 
 				/* +1 to include null terminator */
 
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 6253001..455a070 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -345,7 +345,7 @@
 	/* Obtain the DSDT and FACS tables via their addresses within the FADT */
 
 	acpi_tb_install_fixed_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
-				    ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
+				    ACPI_SIG_DSDT, &acpi_gbl_dsdt_index);
 
 	/* If Hardware Reduced flag is set, there is no FACS */
 
@@ -354,13 +354,13 @@
 			acpi_tb_install_fixed_table((acpi_physical_address)
 						    acpi_gbl_FADT.facs,
 						    ACPI_SIG_FACS,
-						    ACPI_TABLE_INDEX_FACS);
+						    &acpi_gbl_facs_index);
 		}
 		if (acpi_gbl_FADT.Xfacs) {
 			acpi_tb_install_fixed_table((acpi_physical_address)
 						    acpi_gbl_FADT.Xfacs,
 						    ACPI_SIG_FACS,
-						    ACPI_TABLE_INDEX_X_FACS);
+						    &acpi_gbl_xfacs_index);
 		}
 	}
 }
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 119c84a..405529d 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -68,12 +68,25 @@
 acpi_tb_find_table(char *signature,
 		   char *oem_id, char *oem_table_id, u32 *table_index)
 {
-	u32 i;
 	acpi_status status;
 	struct acpi_table_header header;
+	u32 i;
 
 	ACPI_FUNCTION_TRACE(tb_find_table);
 
+	/* Validate the input table signature */
+
+	if (!acpi_is_valid_signature(signature)) {
+		return_ACPI_STATUS(AE_BAD_SIGNATURE);
+	}
+
+	/* Don't allow the OEM strings to be too long */
+
+	if ((strlen(oem_id) > ACPI_OEM_ID_SIZE) ||
+	    (strlen(oem_table_id) > ACPI_OEM_TABLE_ID_SIZE)) {
+		return_ACPI_STATUS(AE_AML_STRING_LIMIT);
+	}
+
 	/* Normalize the input strings */
 
 	memset(&header, 0, sizeof(struct acpi_table_header));
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 15ea98e..6319b42 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -100,9 +100,9 @@
  *
  * FUNCTION:    acpi_tb_install_table_with_override
  *
- * PARAMETERS:  table_index             - Index into root table array
- *              new_table_desc          - New table descriptor to install
+ * PARAMETERS:  new_table_desc          - New table descriptor to install
  *              override                - Whether override should be performed
+ *              table_index             - Where the table index is returned
  *
  * RETURN:      None
  *
@@ -114,12 +114,14 @@
  ******************************************************************************/
 
 void
-acpi_tb_install_table_with_override(u32 table_index,
-				    struct acpi_table_desc *new_table_desc,
-				    u8 override)
+acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
+				    u8 override, u32 *table_index)
 {
+	u32 i;
+	acpi_status status;
 
-	if (table_index >= acpi_gbl_root_table_list.current_table_count) {
+	status = acpi_tb_get_next_table_descriptor(&i, NULL);
+	if (ACPI_FAILURE(status)) {
 		return;
 	}
 
@@ -134,8 +136,7 @@
 		acpi_tb_override_table(new_table_desc);
 	}
 
-	acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.
-				      tables[table_index],
+	acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.tables[i],
 				      new_table_desc->address,
 				      new_table_desc->flags,
 				      new_table_desc->pointer);
@@ -143,9 +144,13 @@
 	acpi_tb_print_table_header(new_table_desc->address,
 				   new_table_desc->pointer);
 
+	/* This synchronizes acpi_gbl_dsdt_index */
+
+	*table_index = i;
+
 	/* Set the global integer width (based upon revision of the DSDT) */
 
-	if (table_index == ACPI_TABLE_INDEX_DSDT) {
+	if (i == acpi_gbl_dsdt_index) {
 		acpi_ut_set_integer_width(new_table_desc->pointer->revision);
 	}
 }
@@ -157,7 +162,7 @@
  * PARAMETERS:  address                 - Physical address of DSDT or FACS
  *              signature               - Table signature, NULL if no need to
  *                                        match
- *              table_index             - Index into root table array
+ *              table_index             - Where the table index is returned
  *
  * RETURN:      Status
  *
@@ -168,7 +173,7 @@
 
 acpi_status
 acpi_tb_install_fixed_table(acpi_physical_address address,
-			    char *signature, u32 table_index)
+			    char *signature, u32 *table_index)
 {
 	struct acpi_table_desc new_table_desc;
 	acpi_status status;
@@ -200,7 +205,9 @@
 		goto release_and_exit;
 	}
 
-	acpi_tb_install_table_with_override(table_index, &new_table_desc, TRUE);
+	/* Add the table to the global root table list */
+
+	acpi_tb_install_table_with_override(&new_table_desc, TRUE, table_index);
 
 release_and_exit:
 
@@ -355,13 +362,8 @@
 
 	/* Add the table to the global root table list */
 
-	status = acpi_tb_get_next_table_descriptor(&i, NULL);
-	if (ACPI_FAILURE(status)) {
-		goto release_and_exit;
-	}
-
-	*table_index = i;
-	acpi_tb_install_table_with_override(i, &new_table_desc, override);
+	acpi_tb_install_table_with_override(&new_table_desc, override,
+					    table_index);
 
 release_and_exit:
 
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 568ac0e..4337990 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -68,28 +68,27 @@
 
 acpi_status acpi_tb_initialize_facs(void)
 {
+	struct acpi_table_facs *facs;
 
 	/* If Hardware Reduced flag is set, there is no FACS */
 
 	if (acpi_gbl_reduced_hardware) {
 		acpi_gbl_FACS = NULL;
 		return (AE_OK);
-	}
-
-	(void)acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
-				      ACPI_CAST_INDIRECT_PTR(struct
-							     acpi_table_header,
-							     &acpi_gbl_facs32));
-	(void)acpi_get_table_by_index(ACPI_TABLE_INDEX_X_FACS,
-				      ACPI_CAST_INDIRECT_PTR(struct
-							     acpi_table_header,
-							     &acpi_gbl_facs64));
-
-	if (acpi_gbl_facs64
-	    && (!acpi_gbl_facs32 || !acpi_gbl_use32_bit_facs_addresses)) {
-		acpi_gbl_FACS = acpi_gbl_facs64;
-	} else if (acpi_gbl_facs32) {
-		acpi_gbl_FACS = acpi_gbl_facs32;
+	} else if (acpi_gbl_FADT.Xfacs &&
+		   (!acpi_gbl_FADT.facs
+		    || !acpi_gbl_use32_bit_facs_addresses)) {
+		(void)acpi_get_table_by_index(acpi_gbl_xfacs_index,
+					      ACPI_CAST_INDIRECT_PTR(struct
+								     acpi_table_header,
+								     &facs));
+		acpi_gbl_FACS = facs;
+	} else if (acpi_gbl_FADT.facs) {
+		(void)acpi_get_table_by_index(acpi_gbl_facs_index,
+					      ACPI_CAST_INDIRECT_PTR(struct
+								     acpi_table_header,
+								     &facs));
+		acpi_gbl_FACS = facs;
 	}
 
 	/* If there is no FACS, just continue. There was already an error msg */
@@ -192,7 +191,7 @@
 	acpi_tb_uninstall_table(table_desc);
 
 	acpi_tb_init_table_descriptor(&acpi_gbl_root_table_list.
-				      tables[ACPI_TABLE_INDEX_DSDT],
+				      tables[acpi_gbl_dsdt_index],
 				      ACPI_PTR_TO_PHYSADDR(new_table),
 				      ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL,
 				      new_table);
@@ -369,13 +368,6 @@
 			    table_entry_size);
 	table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
 
-	/*
-	 * First three entries in the table array are reserved for the DSDT
-	 * and 32bit/64bit FACS, which are not actually present in the
-	 * RSDT/XSDT - they come from the FADT
-	 */
-	acpi_gbl_root_table_list.current_table_count = 3;
-
 	/* Initialize the root table array from the RSDT/XSDT */
 
 	for (i = 0; i < table_count; i++) {
@@ -412,3 +404,36 @@
 
 	return_ACPI_STATUS(AE_OK);
 }
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_is_valid_signature
+ *
+ * PARAMETERS:  signature           - Sig string to be validated
+ *
+ * RETURN:      TRUE if signature is correct length and has valid characters
+ *
+ * DESCRIPTION: Validate an ACPI table signature.
+ *
+ ******************************************************************************/
+
+u8 acpi_is_valid_signature(char *signature)
+{
+	u32 i;
+
+	/* Validate the signature length */
+
+	if (strlen(signature) != ACPI_NAME_SIZE) {
+		return (FALSE);
+	}
+
+	/* Validate each character in the signature */
+
+	for (i = 0; i < ACPI_NAME_SIZE; i++) {
+		if (!acpi_ut_valid_acpi_char(signature[i], i)) {
+			return (FALSE);
+		}
+	}
+
+	return (TRUE);
+}
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index 9682d40..55ee14c 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -51,9 +51,6 @@
 #define _COMPONENT          ACPI_TABLES
 ACPI_MODULE_NAME("tbxfload")
 
-/* Local prototypes */
-static acpi_status acpi_tb_load_namespace(void);
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_load_tables
@@ -65,7 +62,6 @@
  * DESCRIPTION: Load the ACPI tables from the RSDT/XSDT
  *
  ******************************************************************************/
-
 acpi_status __init acpi_load_tables(void)
 {
 	acpi_status status;
@@ -75,6 +71,13 @@
 	/* Load the namespace from the tables */
 
 	status = acpi_tb_load_namespace();
+
+	/* Don't let single failures abort the load */
+
+	if (status == AE_CTRL_TERMINATE) {
+		status = AE_OK;
+	}
+
 	if (ACPI_FAILURE(status)) {
 		ACPI_EXCEPTION((AE_INFO, status,
 				"While loading namespace from ACPI tables"));
@@ -97,11 +100,14 @@
  *              the RSDT/XSDT.
  *
  ******************************************************************************/
-static acpi_status acpi_tb_load_namespace(void)
+acpi_status acpi_tb_load_namespace(void)
 {
 	acpi_status status;
 	u32 i;
 	struct acpi_table_header *new_dsdt;
+	struct acpi_table_desc *table;
+	u32 tables_loaded = 0;
+	u32 tables_failed = 0;
 
 	ACPI_FUNCTION_TRACE(tb_load_namespace);
 
@@ -111,15 +117,11 @@
 	 * Load the namespace. The DSDT is required, but any SSDT and
 	 * PSDT tables are optional. Verify the DSDT.
 	 */
+	table = &acpi_gbl_root_table_list.tables[acpi_gbl_dsdt_index];
+
 	if (!acpi_gbl_root_table_list.current_table_count ||
-	    !ACPI_COMPARE_NAME(&
-			       (acpi_gbl_root_table_list.
-				tables[ACPI_TABLE_INDEX_DSDT].signature),
-			       ACPI_SIG_DSDT)
-	    ||
-	    ACPI_FAILURE(acpi_tb_validate_table
-			 (&acpi_gbl_root_table_list.
-			  tables[ACPI_TABLE_INDEX_DSDT]))) {
+	    !ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_DSDT) ||
+	    ACPI_FAILURE(acpi_tb_validate_table(table))) {
 		status = AE_NO_ACPI_TABLES;
 		goto unlock_and_exit;
 	}
@@ -130,8 +132,7 @@
 	 * array can change dynamically as tables are loaded at run-time. Note:
 	 * .Pointer field is not validated until after call to acpi_tb_validate_table.
 	 */
-	acpi_gbl_DSDT =
-	    acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
+	acpi_gbl_DSDT = table->pointer;
 
 	/*
 	 * Optionally copy the entire DSDT to local memory (instead of simply
@@ -140,7 +141,7 @@
 	 * the DSDT.
 	 */
 	if (acpi_gbl_copy_dsdt_locally) {
-		new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
+		new_dsdt = acpi_tb_copy_dsdt(acpi_gbl_dsdt_index);
 		if (new_dsdt) {
 			acpi_gbl_DSDT = new_dsdt;
 		}
@@ -157,41 +158,65 @@
 
 	/* Load and parse tables */
 
-	status = acpi_ns_load_table(ACPI_TABLE_INDEX_DSDT, acpi_gbl_root_node);
+	status = acpi_ns_load_table(acpi_gbl_dsdt_index, acpi_gbl_root_node);
 	if (ACPI_FAILURE(status)) {
-		return_ACPI_STATUS(status);
+		ACPI_EXCEPTION((AE_INFO, status, "[DSDT] table load failed"));
+		tables_failed++;
+	} else {
+		tables_loaded++;
 	}
 
 	/* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
 
 	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 	for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
+		table = &acpi_gbl_root_table_list.tables[i];
+
 		if (!acpi_gbl_root_table_list.tables[i].address ||
-		    (!ACPI_COMPARE_NAME
-		     (&(acpi_gbl_root_table_list.tables[i].signature),
-		      ACPI_SIG_SSDT)
-		     &&
-		     !ACPI_COMPARE_NAME(&
-					(acpi_gbl_root_table_list.tables[i].
-					 signature), ACPI_SIG_PSDT)
-		     &&
-		     !ACPI_COMPARE_NAME(&
-					(acpi_gbl_root_table_list.tables[i].
-					 signature), ACPI_SIG_OSDT))
-		    ||
-		    ACPI_FAILURE(acpi_tb_validate_table
-				 (&acpi_gbl_root_table_list.tables[i]))) {
+		    (!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT)
+		     && !ACPI_COMPARE_NAME(table->signature.ascii,
+					   ACPI_SIG_PSDT)
+		     && !ACPI_COMPARE_NAME(table->signature.ascii,
+					   ACPI_SIG_OSDT))
+		    || ACPI_FAILURE(acpi_tb_validate_table(table))) {
 			continue;
 		}
 
 		/* Ignore errors while loading tables, get as many as possible */
 
 		(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
-		(void)acpi_ns_load_table(i, acpi_gbl_root_node);
+		status = acpi_ns_load_table(i, acpi_gbl_root_node);
+		if (ACPI_FAILURE(status)) {
+			ACPI_EXCEPTION((AE_INFO, status,
+					"(%4.4s:%8.8s) while loading table",
+					table->signature.ascii,
+					table->pointer->oem_table_id));
+			tables_failed++;
+
+			ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
+					      "Table [%4.4s:%8.8s] (id FF) - Table namespace load failed\n\n",
+					      table->signature.ascii,
+					      table->pointer->oem_table_id));
+		} else {
+			tables_loaded++;
+		}
+
 		(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 	}
 
-	ACPI_INFO((AE_INFO, "All ACPI Tables successfully acquired"));
+	if (!tables_failed) {
+		ACPI_INFO((AE_INFO,
+			   "%u ACPI AML tables successfully acquired and loaded",
+			   tables_loaded));
+	} else {
+		ACPI_ERROR((AE_INFO,
+			    "%u table load failures, %u successful",
+			    tables_failed, tables_loaded));
+
+		/* Indicate at least one failure */
+
+		status = AE_CTRL_TERMINATE;
+	}
 
 unlock_and_exit:
 	(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index cd02693..4146229 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -45,6 +45,7 @@
 
 #include <acpi/acpi.h>
 #include "accommon.h"
+#include "acinterp.h"
 
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utdebug")
@@ -560,8 +561,37 @@
 	}
 }
 
-#endif
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_trace_point
+ *
+ * PARAMETERS:  type                - Trace event type
+ *              begin               - TRUE if before execution
+ *              aml                 - Executed AML address
+ *              pathname            - Object path
+ *              pointer             - Pointer to the related object
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Interpreter execution trace.
+ *
+ ******************************************************************************/
 
+void
+acpi_trace_point(acpi_trace_event_type type, u8 begin, u8 *aml, char *pathname)
+{
+
+	ACPI_FUNCTION_ENTRY();
+
+	acpi_ex_trace_point(type, begin, aml, pathname);
+
+#ifdef ACPI_USE_SYSTEM_TRACER
+	acpi_os_trace_point(type, begin, aml, pathname);
+#endif
+}
+
+ACPI_EXPORT_SYMBOL(acpi_trace_point)
+#endif
 #ifdef ACPI_APPLICATION
 /*******************************************************************************
  *
@@ -575,7 +605,6 @@
  * DESCRIPTION: Print error message to the console, used by applications.
  *
  ******************************************************************************/
-
 void ACPI_INTERNAL_VAR_XFACE acpi_log_error(const char *format, ...)
 {
 	va_list args;
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 71fce38..1638312 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -209,6 +209,9 @@
 			acpi_ut_delete_object_desc(object->method.mutex);
 			object->method.mutex = NULL;
 		}
+		if (object->method.node) {
+			object->method.node = NULL;
+		}
 		break;
 
 	case ACPI_TYPE_REGION:
diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c
index 857af82..75a94f5 100644
--- a/drivers/acpi/acpica/utfileio.c
+++ b/drivers/acpi/acpica/utfileio.c
@@ -312,7 +312,7 @@
 	/* Get the entire file */
 
 	fprintf(stderr,
-		"Reading ACPI table from file %10s - Length %.8u (0x%06X)\n",
+		"Reading ACPI table from file %12s - Length %.8u (0x%06X)\n",
 		filename, file_size, file_size);
 
 	status = acpi_ut_read_table(file, table, &table_length);
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index e402e07..28ab3a1 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -204,11 +204,10 @@
 	acpi_gbl_acpi_hardware_present = TRUE;
 	acpi_gbl_last_owner_id_index = 0;
 	acpi_gbl_next_owner_id_offset = 0;
-	acpi_gbl_trace_dbg_level = 0;
-	acpi_gbl_trace_dbg_layer = 0;
 	acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
 	acpi_gbl_osi_mutex = NULL;
 	acpi_gbl_reg_methods_executed = FALSE;
+	acpi_gbl_max_loop_iterations = 0xFFFF;
 
 	/* Hardware oriented */
 
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 71b6653..bd4443b 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -75,7 +75,7 @@
 	return (FALSE);
 }
 
-#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP)
+#if (defined ACPI_ASL_COMPILER || defined ACPI_EXEC_APP || defined ACPI_NAMES_APP)
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_is_aml_table
@@ -376,7 +376,7 @@
 	/* Get the full pathname to the node */
 
 	buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
-	status = acpi_ns_handle_to_pathname(obj_handle, &buffer);
+	status = acpi_ns_handle_to_pathname(obj_handle, &buffer, TRUE);
 	if (ACPI_FAILURE(status)) {
 		return;
 	}
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
new file mode 100644
index 0000000..1d5f6b1
--- /dev/null
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -0,0 +1,380 @@
+/*******************************************************************************
+ *
+ * Module Name: utnonansi - Non-ansi C library functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utnonansi")
+
+/*
+ * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit
+ * version of strtoul.
+ */
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_strlwr (strlwr)
+ *
+ * PARAMETERS:  src_string      - The source string to convert
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Convert a string to lowercase
+ *
+ ******************************************************************************/
+void acpi_ut_strlwr(char *src_string)
+{
+	char *string;
+
+	ACPI_FUNCTION_ENTRY();
+
+	if (!src_string) {
+		return;
+	}
+
+	/* Walk entire string, lowercasing the letters */
+
+	for (string = src_string; *string; string++) {
+		*string = (char)tolower((int)*string);
+	}
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_strupr (strupr)
+ *
+ * PARAMETERS:  src_string      - The source string to convert
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Convert a string to uppercase
+ *
+ ******************************************************************************/
+
+void acpi_ut_strupr(char *src_string)
+{
+	char *string;
+
+	ACPI_FUNCTION_ENTRY();
+
+	if (!src_string) {
+		return;
+	}
+
+	/* Walk entire string, uppercasing the letters */
+
+	for (string = src_string; *string; string++) {
+		*string = (char)toupper((int)*string);
+	}
+}
+
+/******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_stricmp (stricmp)
+ *
+ * PARAMETERS:  string1             - first string to compare
+ *              string2             - second string to compare
+ *
+ * RETURN:      int that signifies string relationship. Zero means strings
+ *              are equal.
+ *
+ * DESCRIPTION: Case-insensitive string compare. Implementation of the
+ *              non-ANSI stricmp function.
+ *
+ ******************************************************************************/
+
+int acpi_ut_stricmp(char *string1, char *string2)
+{
+	int c1;
+	int c2;
+
+	do {
+		c1 = tolower((int)*string1);
+		c2 = tolower((int)*string2);
+
+		string1++;
+		string2++;
+	}
+	while ((c1 == c2) && (c1));
+
+	return (c1 - c2);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_strtoul64
+ *
+ * PARAMETERS:  string          - Null terminated string
+ *              base            - Radix of the string: 16 or ACPI_ANY_BASE;
+ *                                ACPI_ANY_BASE means 'in behalf of to_integer'
+ *              ret_integer     - Where the converted integer is returned
+ *
+ * RETURN:      Status and Converted value
+ *
+ * DESCRIPTION: Convert a string into an unsigned value. Performs either a
+ *              32-bit or 64-bit conversion, depending on the current mode
+ *              of the interpreter.
+ *
+ * NOTE:        Does not support Octal strings, not needed.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
+{
+	u32 this_digit = 0;
+	u64 return_value = 0;
+	u64 quotient;
+	u64 dividend;
+	u32 to_integer_op = (base == ACPI_ANY_BASE);
+	u32 mode32 = (acpi_gbl_integer_byte_width == 4);
+	u8 valid_digits = 0;
+	u8 sign_of0x = 0;
+	u8 term = 0;
+
+	ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
+
+	switch (base) {
+	case ACPI_ANY_BASE:
+	case 16:
+
+		break;
+
+	default:
+
+		/* Invalid Base */
+
+		return_ACPI_STATUS(AE_BAD_PARAMETER);
+	}
+
+	if (!string) {
+		goto error_exit;
+	}
+
+	/* Skip over any white space in the buffer */
+
+	while ((*string) && (isspace((int)*string) || *string == '\t')) {
+		string++;
+	}
+
+	if (to_integer_op) {
+		/*
+		 * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
+		 * We need to determine if it is decimal or hexadecimal.
+		 */
+		if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) {
+			sign_of0x = 1;
+			base = 16;
+
+			/* Skip over the leading '0x' */
+			string += 2;
+		} else {
+			base = 10;
+		}
+	}
+
+	/* Any string left? Check that '0x' is not followed by white space. */
+
+	if (!(*string) || isspace((int)*string) || *string == '\t') {
+		if (to_integer_op) {
+			goto error_exit;
+		} else {
+			goto all_done;
+		}
+	}
+
+	/*
+	 * Perform a 32-bit or 64-bit conversion, depending upon the current
+	 * execution mode of the interpreter
+	 */
+	dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
+
+	/* Main loop: convert the string to a 32- or 64-bit integer */
+
+	while (*string) {
+		if (isdigit((int)*string)) {
+
+			/* Convert ASCII 0-9 to Decimal value */
+
+			this_digit = ((u8)*string) - '0';
+		} else if (base == 10) {
+
+			/* Digit is out of range; possible in to_integer case only */
+
+			term = 1;
+		} else {
+			this_digit = (u8)toupper((int)*string);
+			if (isxdigit((int)this_digit)) {
+
+				/* Convert ASCII Hex char to value */
+
+				this_digit = this_digit - 'A' + 10;
+			} else {
+				term = 1;
+			}
+		}
+
+		if (term) {
+			if (to_integer_op) {
+				goto error_exit;
+			} else {
+				break;
+			}
+		} else if ((valid_digits == 0) && (this_digit == 0)
+			   && !sign_of0x) {
+
+			/* Skip zeros */
+			string++;
+			continue;
+		}
+
+		valid_digits++;
+
+		if (sign_of0x
+		    && ((valid_digits > 16)
+			|| ((valid_digits > 8) && mode32))) {
+			/*
+			 * This is to_integer operation case.
+			 * No any restrictions for string-to-integer conversion,
+			 * see ACPI spec.
+			 */
+			goto error_exit;
+		}
+
+		/* Divide the digit into the correct position */
+
+		(void)acpi_ut_short_divide((dividend - (u64)this_digit),
+					   base, &quotient, NULL);
+
+		if (return_value > quotient) {
+			if (to_integer_op) {
+				goto error_exit;
+			} else {
+				break;
+			}
+		}
+
+		return_value *= base;
+		return_value += this_digit;
+		string++;
+	}
+
+	/* All done, normal exit */
+
+all_done:
+
+	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
+			  ACPI_FORMAT_UINT64(return_value)));
+
+	*ret_integer = return_value;
+	return_ACPI_STATUS(AE_OK);
+
+error_exit:
+	/* Base was set/validated above */
+
+	if (base == 10) {
+		return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
+	} else {
+		return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
+	}
+}
+
+#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
+ *
+ * PARAMETERS:  Adds a "DestSize" parameter to each of the standard string
+ *              functions. This is the size of the Destination buffer.
+ *
+ * RETURN:      TRUE if the operation would overflow the destination buffer.
+ *
+ * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
+ *              the result of the operation will not overflow the output string
+ *              buffer.
+ *
+ * NOTE:        These functions are typically only helpful for processing
+ *              user input and command lines. For most ACPICA code, the
+ *              required buffer length is precisely calculated before buffer
+ *              allocation, so the use of these functions is unnecessary.
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
+{
+
+	if (strlen(source) >= dest_size) {
+		return (TRUE);
+	}
+
+	strcpy(dest, source);
+	return (FALSE);
+}
+
+u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
+{
+
+	if ((strlen(dest) + strlen(source)) >= dest_size) {
+		return (TRUE);
+	}
+
+	strcat(dest, source);
+	return (FALSE);
+}
+
+u8
+acpi_ut_safe_strncat(char *dest,
+		     acpi_size dest_size,
+		     char *source, acpi_size max_transfer_length)
+{
+	acpi_size actual_transfer_length;
+
+	actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
+
+	if ((strlen(dest) + actual_transfer_length) >= dest_size) {
+		return (TRUE);
+	}
+
+	strncat(dest, source, max_transfer_length);
+	return (FALSE);
+}
+#endif
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 8f3c883..4ddd105 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -48,286 +48,6 @@
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utstring")
 
-/*
- * Non-ANSI C library functions - strlwr, strupr, stricmp, and a 64-bit
- * version of strtoul.
- */
-#ifdef ACPI_ASL_COMPILER
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_strlwr (strlwr)
- *
- * PARAMETERS:  src_string      - The source string to convert
- *
- * RETURN:      None
- *
- * DESCRIPTION: Convert string to lowercase
- *
- * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
- *
- ******************************************************************************/
-void acpi_ut_strlwr(char *src_string)
-{
-	char *string;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (!src_string) {
-		return;
-	}
-
-	/* Walk entire string, lowercasing the letters */
-
-	for (string = src_string; *string; string++) {
-		*string = (char)tolower((int)*string);
-	}
-
-	return;
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_ut_stricmp (stricmp)
- *
- * PARAMETERS:  string1             - first string to compare
- *              string2             - second string to compare
- *
- * RETURN:      int that signifies string relationship. Zero means strings
- *              are equal.
- *
- * DESCRIPTION: Implementation of the non-ANSI stricmp function (compare
- *              strings with no case sensitivity)
- *
- ******************************************************************************/
-
-int acpi_ut_stricmp(char *string1, char *string2)
-{
-	int c1;
-	int c2;
-
-	do {
-		c1 = tolower((int)*string1);
-		c2 = tolower((int)*string2);
-
-		string1++;
-		string2++;
-	}
-	while ((c1 == c2) && (c1));
-
-	return (c1 - c2);
-}
-#endif
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_strupr (strupr)
- *
- * PARAMETERS:  src_string      - The source string to convert
- *
- * RETURN:      None
- *
- * DESCRIPTION: Convert string to uppercase
- *
- * NOTE: This is not a POSIX function, so it appears here, not in utclib.c
- *
- ******************************************************************************/
-
-void acpi_ut_strupr(char *src_string)
-{
-	char *string;
-
-	ACPI_FUNCTION_ENTRY();
-
-	if (!src_string) {
-		return;
-	}
-
-	/* Walk entire string, uppercasing the letters */
-
-	for (string = src_string; *string; string++) {
-		*string = (char)toupper((int)*string);
-	}
-
-	return;
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_strtoul64
- *
- * PARAMETERS:  string          - Null terminated string
- *              base            - Radix of the string: 16 or ACPI_ANY_BASE;
- *                                ACPI_ANY_BASE means 'in behalf of to_integer'
- *              ret_integer     - Where the converted integer is returned
- *
- * RETURN:      Status and Converted value
- *
- * DESCRIPTION: Convert a string into an unsigned value. Performs either a
- *              32-bit or 64-bit conversion, depending on the current mode
- *              of the interpreter.
- *              NOTE: Does not support Octal strings, not needed.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
-{
-	u32 this_digit = 0;
-	u64 return_value = 0;
-	u64 quotient;
-	u64 dividend;
-	u32 to_integer_op = (base == ACPI_ANY_BASE);
-	u32 mode32 = (acpi_gbl_integer_byte_width == 4);
-	u8 valid_digits = 0;
-	u8 sign_of0x = 0;
-	u8 term = 0;
-
-	ACPI_FUNCTION_TRACE_STR(ut_stroul64, string);
-
-	switch (base) {
-	case ACPI_ANY_BASE:
-	case 16:
-
-		break;
-
-	default:
-
-		/* Invalid Base */
-
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
-	if (!string) {
-		goto error_exit;
-	}
-
-	/* Skip over any white space in the buffer */
-
-	while ((*string) && (isspace((int)*string) || *string == '\t')) {
-		string++;
-	}
-
-	if (to_integer_op) {
-		/*
-		 * Base equal to ACPI_ANY_BASE means 'ToInteger operation case'.
-		 * We need to determine if it is decimal or hexadecimal.
-		 */
-		if ((*string == '0') && (tolower((int)*(string + 1)) == 'x')) {
-			sign_of0x = 1;
-			base = 16;
-
-			/* Skip over the leading '0x' */
-			string += 2;
-		} else {
-			base = 10;
-		}
-	}
-
-	/* Any string left? Check that '0x' is not followed by white space. */
-
-	if (!(*string) || isspace((int)*string) || *string == '\t') {
-		if (to_integer_op) {
-			goto error_exit;
-		} else {
-			goto all_done;
-		}
-	}
-
-	/*
-	 * Perform a 32-bit or 64-bit conversion, depending upon the current
-	 * execution mode of the interpreter
-	 */
-	dividend = (mode32) ? ACPI_UINT32_MAX : ACPI_UINT64_MAX;
-
-	/* Main loop: convert the string to a 32- or 64-bit integer */
-
-	while (*string) {
-		if (isdigit((int)*string)) {
-
-			/* Convert ASCII 0-9 to Decimal value */
-
-			this_digit = ((u8)*string) - '0';
-		} else if (base == 10) {
-
-			/* Digit is out of range; possible in to_integer case only */
-
-			term = 1;
-		} else {
-			this_digit = (u8)toupper((int)*string);
-			if (isxdigit((int)this_digit)) {
-
-				/* Convert ASCII Hex char to value */
-
-				this_digit = this_digit - 'A' + 10;
-			} else {
-				term = 1;
-			}
-		}
-
-		if (term) {
-			if (to_integer_op) {
-				goto error_exit;
-			} else {
-				break;
-			}
-		} else if ((valid_digits == 0) && (this_digit == 0)
-			   && !sign_of0x) {
-
-			/* Skip zeros */
-			string++;
-			continue;
-		}
-
-		valid_digits++;
-
-		if (sign_of0x
-		    && ((valid_digits > 16)
-			|| ((valid_digits > 8) && mode32))) {
-			/*
-			 * This is to_integer operation case.
-			 * No any restrictions for string-to-integer conversion,
-			 * see ACPI spec.
-			 */
-			goto error_exit;
-		}
-
-		/* Divide the digit into the correct position */
-
-		(void)acpi_ut_short_divide((dividend - (u64)this_digit),
-					   base, &quotient, NULL);
-
-		if (return_value > quotient) {
-			if (to_integer_op) {
-				goto error_exit;
-			} else {
-				break;
-			}
-		}
-
-		return_value *= base;
-		return_value += this_digit;
-		string++;
-	}
-
-	/* All done, normal exit */
-
-all_done:
-
-	ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Converted value: %8.8X%8.8X\n",
-			  ACPI_FORMAT_UINT64(return_value)));
-
-	*ret_integer = return_value;
-	return_ACPI_STATUS(AE_OK);
-
-error_exit:
-	/* Base was set/validated above */
-
-	if (base == 10) {
-		return_ACPI_STATUS(AE_BAD_DECIMAL_CONSTANT);
-	} else {
-		return_ACPI_STATUS(AE_BAD_HEX_CONSTANT);
-	}
-}
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_print_string
@@ -342,7 +62,6 @@
  *              sequences.
  *
  ******************************************************************************/
-
 void acpi_ut_print_string(char *string, u16 max_length)
 {
 	u32 i;
@@ -584,64 +303,3 @@
 	}
 }
 #endif
-
-#if defined (ACPI_DEBUGGER) || defined (ACPI_APPLICATION)
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ut_safe_strcpy, acpi_ut_safe_strcat, acpi_ut_safe_strncat
- *
- * PARAMETERS:  Adds a "DestSize" parameter to each of the standard string
- *              functions. This is the size of the Destination buffer.
- *
- * RETURN:      TRUE if the operation would overflow the destination buffer.
- *
- * DESCRIPTION: Safe versions of standard Clib string functions. Ensure that
- *              the result of the operation will not overflow the output string
- *              buffer.
- *
- * NOTE:        These functions are typically only helpful for processing
- *              user input and command lines. For most ACPICA code, the
- *              required buffer length is precisely calculated before buffer
- *              allocation, so the use of these functions is unnecessary.
- *
- ******************************************************************************/
-
-u8 acpi_ut_safe_strcpy(char *dest, acpi_size dest_size, char *source)
-{
-
-	if (strlen(source) >= dest_size) {
-		return (TRUE);
-	}
-
-	strcpy(dest, source);
-	return (FALSE);
-}
-
-u8 acpi_ut_safe_strcat(char *dest, acpi_size dest_size, char *source)
-{
-
-	if ((strlen(dest) + strlen(source)) >= dest_size) {
-		return (TRUE);
-	}
-
-	strcat(dest, source);
-	return (FALSE);
-}
-
-u8
-acpi_ut_safe_strncat(char *dest,
-		     acpi_size dest_size,
-		     char *source, acpi_size max_transfer_length)
-{
-	acpi_size actual_transfer_length;
-
-	actual_transfer_length = ACPI_MIN(max_transfer_length, strlen(source));
-
-	if ((strlen(dest) + actual_transfer_length) >= dest_size) {
-		return (TRUE);
-	}
-
-	strncat(dest, source, max_transfer_length);
-	return (FALSE);
-}
-#endif
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 51cf52d..4f33281 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -92,13 +92,6 @@
 
 	acpi_ut_mutex_terminate();
 
-#ifdef ACPI_DEBUGGER
-
-	/* Shut down the debugger */
-
-	acpi_db_terminate();
-#endif
-
 	/* Now we can shutdown the OS-dependent layer */
 
 	status = acpi_os_terminate();
@@ -517,7 +510,8 @@
 
 	/* Parameter validation */
 
-	if (!in_buffer || !return_buffer || (length < 16)) {
+	if (!in_buffer || !return_buffer
+	    || (length < ACPI_PLD_REV1_BUFFER_SIZE)) {
 		return (AE_BAD_PARAMETER);
 	}
 
@@ -567,7 +561,7 @@
 	pld_info->rotation = ACPI_PLD_GET_ROTATION(&dword);
 	pld_info->order = ACPI_PLD_GET_ORDER(&dword);
 
-	if (length >= ACPI_PLD_BUFFER_SIZE) {
+	if (length >= ACPI_PLD_REV2_BUFFER_SIZE) {
 
 		/* Fifth 32-bit DWord (Revision 2 of _PLD) */
 
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 42a32a6..a7137ec 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -124,17 +124,6 @@
 		return_ACPI_STATUS(status);
 	}
 
-	/* If configured, initialize the AML debugger */
-
-#ifdef ACPI_DEBUGGER
-	status = acpi_db_initialize();
-	if (ACPI_FAILURE(status)) {
-		ACPI_EXCEPTION((AE_INFO, status,
-				"During Debugger initialization"));
-		return_ACPI_STATUS(status);
-	}
-#endif
-
 	return_ACPI_STATUS(AE_OK);
 }
 
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index a85ac07..a2c8d7a 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -24,10 +24,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index a095d4f..0431883 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -18,10 +18,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 04ab5c9..6330f55 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -17,10 +17,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 3670bba..6682c5d 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -18,10 +18,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 2bfd53c..23981ac 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -23,10 +23,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 06e9b41..20b3fcf 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -21,10 +21,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b3628cc..b719ab3 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -18,10 +18,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 278dc4b..96809cd 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -20,10 +20,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 513e7230e..46506e7 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -15,10 +15,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
@@ -423,6 +419,406 @@
 	acpi_evaluate_ost(handle, type, ost_code, NULL);
 }
 
+static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
+{
+	struct acpi_device *device = data;
+
+	device->driver->ops.notify(device, event);
+}
+
+static void acpi_device_notify_fixed(void *data)
+{
+	struct acpi_device *device = data;
+
+	/* Fixed hardware devices have no handles */
+	acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
+}
+
+static u32 acpi_device_fixed_event(void *data)
+{
+	acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
+	return ACPI_INTERRUPT_HANDLED;
+}
+
+static int acpi_device_install_notify_handler(struct acpi_device *device)
+{
+	acpi_status status;
+
+	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+		status =
+		    acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+						     acpi_device_fixed_event,
+						     device);
+	else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+		status =
+		    acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+						     acpi_device_fixed_event,
+						     device);
+	else
+		status = acpi_install_notify_handler(device->handle,
+						     ACPI_DEVICE_NOTIFY,
+						     acpi_device_notify,
+						     device);
+
+	if (ACPI_FAILURE(status))
+		return -EINVAL;
+	return 0;
+}
+
+static void acpi_device_remove_notify_handler(struct acpi_device *device)
+{
+	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
+		acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
+						acpi_device_fixed_event);
+	else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
+		acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
+						acpi_device_fixed_event);
+	else
+		acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+					   acpi_device_notify);
+}
+
+/* --------------------------------------------------------------------------
+                             Device Matching
+   -------------------------------------------------------------------------- */
+
+static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev,
+						      const struct device *dev)
+{
+	struct mutex *physical_node_lock = &adev->physical_node_lock;
+
+	mutex_lock(physical_node_lock);
+	if (list_empty(&adev->physical_node_list)) {
+		adev = NULL;
+	} else {
+		const struct acpi_device_physical_node *node;
+
+		node = list_first_entry(&adev->physical_node_list,
+					struct acpi_device_physical_node, node);
+		if (node->dev != dev)
+			adev = NULL;
+	}
+	mutex_unlock(physical_node_lock);
+	return adev;
+}
+
+/**
+ * acpi_device_is_first_physical_node - Is given dev first physical node
+ * @adev: ACPI companion device
+ * @dev: Physical device to check
+ *
+ * Function checks if given @dev is the first physical devices attached to
+ * the ACPI companion device. This distinction is needed in some cases
+ * where the same companion device is shared between many physical devices.
+ *
+ * Note that the caller have to provide valid @adev pointer.
+ */
+bool acpi_device_is_first_physical_node(struct acpi_device *adev,
+					const struct device *dev)
+{
+	return !!acpi_primary_dev_companion(adev, dev);
+}
+
+/*
+ * acpi_companion_match() - Can we match via ACPI companion device
+ * @dev: Device in question
+ *
+ * Check if the given device has an ACPI companion and if that companion has
+ * a valid list of PNP IDs, and if the device is the first (primary) physical
+ * device associated with it.  Return the companion pointer if that's the case
+ * or NULL otherwise.
+ *
+ * If multiple physical devices are attached to a single ACPI companion, we need
+ * to be careful.  The usage scenario for this kind of relationship is that all
+ * of the physical devices in question use resources provided by the ACPI
+ * companion.  A typical case is an MFD device where all the sub-devices share
+ * the parent's ACPI companion.  In such cases we can only allow the primary
+ * (first) physical device to be matched with the help of the companion's PNP
+ * IDs.
+ *
+ * Additional physical devices sharing the ACPI companion can still use
+ * resources available from it but they will be matched normally using functions
+ * provided by their bus types (and analogously for their modalias).
+ */
+struct acpi_device *acpi_companion_match(const struct device *dev)
+{
+	struct acpi_device *adev;
+
+	adev = ACPI_COMPANION(dev);
+	if (!adev)
+		return NULL;
+
+	if (list_empty(&adev->pnp.ids))
+		return NULL;
+
+	return acpi_primary_dev_companion(adev, dev);
+}
+
+/**
+ * acpi_of_match_device - Match device object using the "compatible" property.
+ * @adev: ACPI device object to match.
+ * @of_match_table: List of device IDs to match against.
+ *
+ * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
+ * identifiers and a _DSD object with the "compatible" property, use that
+ * property to match against the given list of identifiers.
+ */
+static bool acpi_of_match_device(struct acpi_device *adev,
+				 const struct of_device_id *of_match_table)
+{
+	const union acpi_object *of_compatible, *obj;
+	int i, nval;
+
+	if (!adev)
+		return false;
+
+	of_compatible = adev->data.of_compatible;
+	if (!of_match_table || !of_compatible)
+		return false;
+
+	if (of_compatible->type == ACPI_TYPE_PACKAGE) {
+		nval = of_compatible->package.count;
+		obj = of_compatible->package.elements;
+	} else { /* Must be ACPI_TYPE_STRING. */
+		nval = 1;
+		obj = of_compatible;
+	}
+	/* Now we can look for the driver DT compatible strings */
+	for (i = 0; i < nval; i++, obj++) {
+		const struct of_device_id *id;
+
+		for (id = of_match_table; id->compatible[0]; id++)
+			if (!strcasecmp(obj->string.pointer, id->compatible))
+				return true;
+	}
+
+	return false;
+}
+
+static bool __acpi_match_device_cls(const struct acpi_device_id *id,
+				    struct acpi_hardware_id *hwid)
+{
+	int i, msk, byte_shift;
+	char buf[3];
+
+	if (!id->cls)
+		return false;
+
+	/* Apply class-code bitmask, before checking each class-code byte */
+	for (i = 1; i <= 3; i++) {
+		byte_shift = 8 * (3 - i);
+		msk = (id->cls_msk >> byte_shift) & 0xFF;
+		if (!msk)
+			continue;
+
+		sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
+		if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
+			return false;
+	}
+	return true;
+}
+
+static const struct acpi_device_id *__acpi_match_device(
+	struct acpi_device *device,
+	const struct acpi_device_id *ids,
+	const struct of_device_id *of_ids)
+{
+	const struct acpi_device_id *id;
+	struct acpi_hardware_id *hwid;
+
+	/*
+	 * If the device is not present, it is unnecessary to load device
+	 * driver for it.
+	 */
+	if (!device || !device->status.present)
+		return NULL;
+
+	list_for_each_entry(hwid, &device->pnp.ids, list) {
+		/* First, check the ACPI/PNP IDs provided by the caller. */
+		for (id = ids; id->id[0] || id->cls; id++) {
+			if (id->id[0] && !strcmp((char *) id->id, hwid->id))
+				return id;
+			else if (id->cls && __acpi_match_device_cls(id, hwid))
+				return id;
+		}
+
+		/*
+		 * Next, check ACPI_DT_NAMESPACE_HID and try to match the
+		 * "compatible" property if found.
+		 *
+		 * The id returned by the below is not valid, but the only
+		 * caller passing non-NULL of_ids here is only interested in
+		 * whether or not the return value is NULL.
+		 */
+		if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
+		    && acpi_of_match_device(device, of_ids))
+			return id;
+	}
+	return NULL;
+}
+
+/**
+ * acpi_match_device - Match a struct device against a given list of ACPI IDs
+ * @ids: Array of struct acpi_device_id object to match against.
+ * @dev: The device structure to match.
+ *
+ * Check if @dev has a valid ACPI handle and if there is a struct acpi_device
+ * object for that handle and use that object to match against a given list of
+ * device IDs.
+ *
+ * Return a pointer to the first matching ID on success or %NULL on failure.
+ */
+const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
+					       const struct device *dev)
+{
+	return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_match_device);
+
+int acpi_match_device_ids(struct acpi_device *device,
+			  const struct acpi_device_id *ids)
+{
+	return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
+}
+EXPORT_SYMBOL(acpi_match_device_ids);
+
+bool acpi_driver_match_device(struct device *dev,
+			      const struct device_driver *drv)
+{
+	if (!drv->acpi_match_table)
+		return acpi_of_match_device(ACPI_COMPANION(dev),
+					    drv->of_match_table);
+
+	return !!__acpi_match_device(acpi_companion_match(dev),
+				     drv->acpi_match_table, drv->of_match_table);
+}
+EXPORT_SYMBOL_GPL(acpi_driver_match_device);
+
+/* --------------------------------------------------------------------------
+                              ACPI Driver Management
+   -------------------------------------------------------------------------- */
+
+/**
+ * acpi_bus_register_driver - register a driver with the ACPI bus
+ * @driver: driver being registered
+ *
+ * Registers a driver with the ACPI bus.  Searches the namespace for all
+ * devices that match the driver's criteria and binds.  Returns zero for
+ * success or a negative error status for failure.
+ */
+int acpi_bus_register_driver(struct acpi_driver *driver)
+{
+	int ret;
+
+	if (acpi_disabled)
+		return -ENODEV;
+	driver->drv.name = driver->name;
+	driver->drv.bus = &acpi_bus_type;
+	driver->drv.owner = driver->owner;
+
+	ret = driver_register(&driver->drv);
+	return ret;
+}
+
+EXPORT_SYMBOL(acpi_bus_register_driver);
+
+/**
+ * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
+ * @driver: driver to unregister
+ *
+ * Unregisters a driver with the ACPI bus.  Searches the namespace for all
+ * devices that match the driver's criteria and unbinds.
+ */
+void acpi_bus_unregister_driver(struct acpi_driver *driver)
+{
+	driver_unregister(&driver->drv);
+}
+
+EXPORT_SYMBOL(acpi_bus_unregister_driver);
+
+/* --------------------------------------------------------------------------
+                              ACPI Bus operations
+   -------------------------------------------------------------------------- */
+
+static int acpi_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct acpi_device *acpi_dev = to_acpi_device(dev);
+	struct acpi_driver *acpi_drv = to_acpi_driver(drv);
+
+	return acpi_dev->flags.match_driver
+		&& !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
+}
+
+static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
+}
+
+static int acpi_device_probe(struct device *dev)
+{
+	struct acpi_device *acpi_dev = to_acpi_device(dev);
+	struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
+	int ret;
+
+	if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev))
+		return -EINVAL;
+
+	if (!acpi_drv->ops.add)
+		return -ENOSYS;
+
+	ret = acpi_drv->ops.add(acpi_dev);
+	if (ret)
+		return ret;
+
+	acpi_dev->driver = acpi_drv;
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+			  "Driver [%s] successfully bound to device [%s]\n",
+			  acpi_drv->name, acpi_dev->pnp.bus_id));
+
+	if (acpi_drv->ops.notify) {
+		ret = acpi_device_install_notify_handler(acpi_dev);
+		if (ret) {
+			if (acpi_drv->ops.remove)
+				acpi_drv->ops.remove(acpi_dev);
+
+			acpi_dev->driver = NULL;
+			acpi_dev->driver_data = NULL;
+			return ret;
+		}
+	}
+
+	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
+			  acpi_drv->name, acpi_dev->pnp.bus_id));
+	get_device(dev);
+	return 0;
+}
+
+static int acpi_device_remove(struct device * dev)
+{
+	struct acpi_device *acpi_dev = to_acpi_device(dev);
+	struct acpi_driver *acpi_drv = acpi_dev->driver;
+
+	if (acpi_drv) {
+		if (acpi_drv->ops.notify)
+			acpi_device_remove_notify_handler(acpi_dev);
+		if (acpi_drv->ops.remove)
+			acpi_drv->ops.remove(acpi_dev);
+	}
+	acpi_dev->driver = NULL;
+	acpi_dev->driver_data = NULL;
+
+	put_device(dev);
+	return 0;
+}
+
+struct bus_type acpi_bus_type = {
+	.name		= "acpi",
+	.match		= acpi_bus_match,
+	.probe		= acpi_device_probe,
+	.remove		= acpi_device_remove,
+	.uevent		= acpi_device_uevent,
+};
+
 /* --------------------------------------------------------------------------
                              Initialization/Cleanup
    -------------------------------------------------------------------------- */
@@ -661,7 +1057,9 @@
 	 */
 	acpi_root_dir = proc_mkdir(ACPI_BUS_FILE_ROOT, NULL);
 
-	return 0;
+	result = bus_register(&acpi_bus_type);
+	if (!result)
+		return 0;
 
 	/* Mimic structured exception handling */
       error1:
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 6d5d183..5c3b091 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -16,10 +16,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
index 6c9ee68..d0918d4 100644
--- a/drivers/acpi/cm_sbs.c
+++ b/drivers/acpi/cm_sbs.c
@@ -11,10 +11,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index c8ead9f..12c2409 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -20,10 +20,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 #include <linux/acpi.h>
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 6b1919f..68bb305 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -7,6 +7,8 @@
 #include <linux/debugfs.h>
 #include <linux/acpi.h>
 
+#include "internal.h"
+
 #define _COMPONENT		ACPI_SYSTEM_COMPONENT
 ACPI_MODULE_NAME("debugfs");
 
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 88dbbb1..4806b7f 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -15,10 +15,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
@@ -1123,6 +1119,14 @@
 	if (dev->pm_domain)
 		return -EEXIST;
 
+	/*
+	 * Only attach the power domain to the first device if the
+	 * companion is shared by multiple. This is to prevent doing power
+	 * management twice.
+	 */
+	if (!acpi_device_is_first_physical_node(adev, dev))
+		return -EBUSY;
+
 	acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func);
 	dev->pm_domain = &acpi_general_pm_domain;
 	if (power_on) {
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
new file mode 100644
index 0000000..4ab4582
--- /dev/null
+++ b/drivers/acpi/device_sysfs.c
@@ -0,0 +1,521 @@
+/*
+ * drivers/acpi/device_sysfs.c - ACPI device sysfs attributes and modalias.
+ *
+ * Copyright (C) 2015, Intel Corp.
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/nls.h>
+
+#include "internal.h"
+
+/**
+ * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent
+ * @acpi_dev: ACPI device object.
+ * @modalias: Buffer to print into.
+ * @size: Size of the buffer.
+ *
+ * Creates hid/cid(s) string needed for modalias and uevent
+ * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
+ * char *modalias: "acpi:IBM0001:ACPI0001"
+ * Return: 0: no _HID and no _CID
+ *         -EINVAL: output error
+ *         -ENOMEM: output is truncated
+*/
+static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
+			       int size)
+{
+	int len;
+	int count;
+	struct acpi_hardware_id *id;
+
+	/*
+	 * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
+	 * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
+	 * device's list.
+	 */
+	count = 0;
+	list_for_each_entry(id, &acpi_dev->pnp.ids, list)
+		if (strcmp(id->id, ACPI_DT_NAMESPACE_HID))
+			count++;
+
+	if (!count)
+		return 0;
+
+	len = snprintf(modalias, size, "acpi:");
+	if (len <= 0)
+		return len;
+
+	size -= len;
+
+	list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
+		if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID))
+			continue;
+
+		count = snprintf(&modalias[len], size, "%s:", id->id);
+		if (count < 0)
+			return -EINVAL;
+
+		if (count >= size)
+			return -ENOMEM;
+
+		len += count;
+		size -= count;
+	}
+	modalias[len] = '\0';
+	return len;
+}
+
+/**
+ * create_of_modalias - Creates DT compatible string for modalias and uevent
+ * @acpi_dev: ACPI device object.
+ * @modalias: Buffer to print into.
+ * @size: Size of the buffer.
+ *
+ * Expose DT compatible modalias as of:NnameTCcompatible.  This function should
+ * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
+ * ACPI/PNP IDs.
+ */
+static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
+			      int size)
+{
+	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+	const union acpi_object *of_compatible, *obj;
+	int len, count;
+	int i, nval;
+	char *c;
+
+	acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
+	/* DT strings are all in lower case */
+	for (c = buf.pointer; *c != '\0'; c++)
+		*c = tolower(*c);
+
+	len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
+	ACPI_FREE(buf.pointer);
+
+	if (len <= 0)
+		return len;
+
+	of_compatible = acpi_dev->data.of_compatible;
+	if (of_compatible->type == ACPI_TYPE_PACKAGE) {
+		nval = of_compatible->package.count;
+		obj = of_compatible->package.elements;
+	} else { /* Must be ACPI_TYPE_STRING. */
+		nval = 1;
+		obj = of_compatible;
+	}
+	for (i = 0; i < nval; i++, obj++) {
+		count = snprintf(&modalias[len], size, "C%s",
+				 obj->string.pointer);
+		if (count < 0)
+			return -EINVAL;
+
+		if (count >= size)
+			return -ENOMEM;
+
+		len += count;
+		size -= count;
+	}
+	modalias[len] = '\0';
+	return len;
+}
+
+int __acpi_device_uevent_modalias(struct acpi_device *adev,
+				  struct kobj_uevent_env *env)
+{
+	int len;
+
+	if (!adev)
+		return -ENODEV;
+
+	if (list_empty(&adev->pnp.ids))
+		return 0;
+
+	if (add_uevent_var(env, "MODALIAS="))
+		return -ENOMEM;
+
+	len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
+				  sizeof(env->buf) - env->buflen);
+	if (len < 0)
+		return len;
+
+	env->buflen += len;
+	if (!adev->data.of_compatible)
+		return 0;
+
+	if (len > 0 && add_uevent_var(env, "MODALIAS="))
+		return -ENOMEM;
+
+	len = create_of_modalias(adev, &env->buf[env->buflen - 1],
+				 sizeof(env->buf) - env->buflen);
+	if (len < 0)
+		return len;
+
+	env->buflen += len;
+
+	return 0;
+}
+
+/**
+ * acpi_device_uevent_modalias - uevent modalias for ACPI-enumerated devices.
+ *
+ * Create the uevent modalias field for ACPI-enumerated devices.
+ *
+ * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
+ * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
+ */
+int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
+{
+	return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
+}
+EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
+
+static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
+{
+	int len, count;
+
+	if (!adev)
+		return -ENODEV;
+
+	if (list_empty(&adev->pnp.ids))
+		return 0;
+
+	len = create_pnp_modalias(adev, buf, size - 1);
+	if (len < 0) {
+		return len;
+	} else if (len > 0) {
+		buf[len++] = '\n';
+		size -= len;
+	}
+	if (!adev->data.of_compatible)
+		return len;
+
+	count = create_of_modalias(adev, buf + len, size - 1);
+	if (count < 0) {
+		return count;
+	} else if (count > 0) {
+		len += count;
+		buf[len++] = '\n';
+	}
+
+	return len;
+}
+
+/**
+ * acpi_device_modalias - modalias sysfs attribute for ACPI-enumerated devices.
+ *
+ * Create the modalias sysfs attribute for ACPI-enumerated devices.
+ *
+ * Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
+ * hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
+ */
+int acpi_device_modalias(struct device *dev, char *buf, int size)
+{
+	return __acpi_device_modalias(acpi_companion_match(dev), buf, size);
+}
+EXPORT_SYMBOL_GPL(acpi_device_modalias);
+
+static ssize_t
+acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
+	return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
+}
+static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
+
+static ssize_t real_power_state_show(struct device *dev,
+				     struct device_attribute *attr, char *buf)
+{
+	struct acpi_device *adev = to_acpi_device(dev);
+	int state;
+	int ret;
+
+	ret = acpi_device_get_power(adev, &state);
+	if (ret)
+		return ret;
+
+	return sprintf(buf, "%s\n", acpi_power_state_string(state));
+}
+
+static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
+
+static ssize_t power_state_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct acpi_device *adev = to_acpi_device(dev);
+
+	return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
+}
+
+static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
+
+static ssize_t
+acpi_eject_store(struct device *d, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct acpi_device *acpi_device = to_acpi_device(d);
+	acpi_object_type not_used;
+	acpi_status status;
+
+	if (!count || buf[0] != '1')
+		return -EINVAL;
+
+	if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
+	    && !acpi_device->driver)
+		return -ENODEV;
+
+	status = acpi_get_type(acpi_device->handle, &not_used);
+	if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
+		return -ENODEV;
+
+	get_device(&acpi_device->dev);
+	status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
+	if (ACPI_SUCCESS(status))
+		return count;
+
+	put_device(&acpi_device->dev);
+	acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
+			  ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
+	return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
+}
+
+static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
+
+static ssize_t
+acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
+	struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+	return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
+}
+static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
+
+static ssize_t acpi_device_uid_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+	return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
+}
+static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
+
+static ssize_t acpi_device_adr_show(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct acpi_device *acpi_dev = to_acpi_device(dev);
+
+	return sprintf(buf, "0x%08x\n",
+		       (unsigned int)(acpi_dev->pnp.bus_address));
+}
+static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
+
+static ssize_t
+acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
+	struct acpi_device *acpi_dev = to_acpi_device(dev);
+	struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
+	int result;
+
+	result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
+	if (result)
+		goto end;
+
+	result = sprintf(buf, "%s\n", (char*)path.pointer);
+	kfree(path.pointer);
+end:
+	return result;
+}
+static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
+
+/* sysfs file that shows description text from the ACPI _STR method */
+static ssize_t description_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf) {
+	struct acpi_device *acpi_dev = to_acpi_device(dev);
+	int result;
+
+	if (acpi_dev->pnp.str_obj == NULL)
+		return 0;
+
+	/*
+	 * The _STR object contains a Unicode identifier for a device.
+	 * We need to convert to utf-8 so it can be displayed.
+	 */
+	result = utf16s_to_utf8s(
+		(wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
+		acpi_dev->pnp.str_obj->buffer.length,
+		UTF16_LITTLE_ENDIAN, buf,
+		PAGE_SIZE);
+
+	buf[result++] = '\n';
+
+	return result;
+}
+static DEVICE_ATTR(description, 0444, description_show, NULL);
+
+static ssize_t
+acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
+		     char *buf) {
+	struct acpi_device *acpi_dev = to_acpi_device(dev);
+	acpi_status status;
+	unsigned long long sun;
+
+	status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
+
+	return sprintf(buf, "%llu\n", sun);
+}
+static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
+
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+				char *buf) {
+	struct acpi_device *acpi_dev = to_acpi_device(dev);
+	acpi_status status;
+	unsigned long long sta;
+
+	status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
+
+	return sprintf(buf, "%llu\n", sta);
+}
+static DEVICE_ATTR_RO(status);
+
+/**
+ * acpi_device_setup_files - Create sysfs attributes of an ACPI device.
+ * @dev: ACPI device object.
+ */
+int acpi_device_setup_files(struct acpi_device *dev)
+{
+	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+	acpi_status status;
+	int result = 0;
+
+	/*
+	 * Devices gotten from FADT don't have a "path" attribute
+	 */
+	if (dev->handle) {
+		result = device_create_file(&dev->dev, &dev_attr_path);
+		if (result)
+			goto end;
+	}
+
+	if (!list_empty(&dev->pnp.ids)) {
+		result = device_create_file(&dev->dev, &dev_attr_hid);
+		if (result)
+			goto end;
+
+		result = device_create_file(&dev->dev, &dev_attr_modalias);
+		if (result)
+			goto end;
+	}
+
+	/*
+	 * If device has _STR, 'description' file is created
+	 */
+	if (acpi_has_method(dev->handle, "_STR")) {
+		status = acpi_evaluate_object(dev->handle, "_STR",
+					NULL, &buffer);
+		if (ACPI_FAILURE(status))
+			buffer.pointer = NULL;
+		dev->pnp.str_obj = buffer.pointer;
+		result = device_create_file(&dev->dev, &dev_attr_description);
+		if (result)
+			goto end;
+	}
+
+	if (dev->pnp.type.bus_address)
+		result = device_create_file(&dev->dev, &dev_attr_adr);
+	if (dev->pnp.unique_id)
+		result = device_create_file(&dev->dev, &dev_attr_uid);
+
+	if (acpi_has_method(dev->handle, "_SUN")) {
+		result = device_create_file(&dev->dev, &dev_attr_sun);
+		if (result)
+			goto end;
+	}
+
+	if (acpi_has_method(dev->handle, "_STA")) {
+		result = device_create_file(&dev->dev, &dev_attr_status);
+		if (result)
+			goto end;
+	}
+
+        /*
+         * If device has _EJ0, 'eject' file is created that is used to trigger
+         * hot-removal function from userland.
+         */
+	if (acpi_has_method(dev->handle, "_EJ0")) {
+		result = device_create_file(&dev->dev, &dev_attr_eject);
+		if (result)
+			return result;
+	}
+
+	if (dev->flags.power_manageable) {
+		result = device_create_file(&dev->dev, &dev_attr_power_state);
+		if (result)
+			return result;
+
+		if (dev->power.flags.power_resources)
+			result = device_create_file(&dev->dev,
+						    &dev_attr_real_power_state);
+	}
+
+end:
+	return result;
+}
+
+/**
+ * acpi_device_remove_files - Remove sysfs attributes of an ACPI device.
+ * @dev: ACPI device object.
+ */
+void acpi_device_remove_files(struct acpi_device *dev)
+{
+	if (dev->flags.power_manageable) {
+		device_remove_file(&dev->dev, &dev_attr_power_state);
+		if (dev->power.flags.power_resources)
+			device_remove_file(&dev->dev,
+					   &dev_attr_real_power_state);
+	}
+
+	/*
+	 * If device has _STR, remove 'description' file
+	 */
+	if (acpi_has_method(dev->handle, "_STR")) {
+		kfree(dev->pnp.str_obj);
+		device_remove_file(&dev->dev, &dev_attr_description);
+	}
+	/*
+	 * If device has _EJ0, remove 'eject' file.
+	 */
+	if (acpi_has_method(dev->handle, "_EJ0"))
+		device_remove_file(&dev->dev, &dev_attr_eject);
+
+	if (acpi_has_method(dev->handle, "_SUN"))
+		device_remove_file(&dev->dev, &dev_attr_sun);
+
+	if (dev->pnp.unique_id)
+		device_remove_file(&dev->dev, &dev_attr_uid);
+	if (dev->pnp.type.bus_address)
+		device_remove_file(&dev->dev, &dev_attr_adr);
+	device_remove_file(&dev->dev, &dev_attr_modalias);
+	device_remove_file(&dev->dev, &dev_attr_hid);
+	if (acpi_has_method(dev->handle, "_STA"))
+		device_remove_file(&dev->dev, &dev_attr_status);
+	if (dev->handle)
+		device_remove_file(&dev->dev, &dev_attr_path);
+}
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index a688aa2..e8e128d 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -17,10 +17,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 9d4761d..2614a83 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -22,10 +22,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
@@ -165,8 +161,16 @@
 	u8 flags;
 };
 
+struct acpi_ec_query {
+	struct transaction transaction;
+	struct work_struct work;
+	struct acpi_ec_query_handler *handler;
+};
+
 static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
 static void advance_transaction(struct acpi_ec *ec);
+static void acpi_ec_event_handler(struct work_struct *work);
+static void acpi_ec_event_processor(struct work_struct *work);
 
 struct acpi_ec *boot_ec, *first_ec;
 EXPORT_SYMBOL(first_ec);
@@ -978,60 +982,90 @@
 }
 EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
 
-static void acpi_ec_run(void *cxt)
+static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
 {
-	struct acpi_ec_query_handler *handler = cxt;
+	struct acpi_ec_query *q;
+	struct transaction *t;
 
-	if (!handler)
-		return;
+	q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
+	if (!q)
+		return NULL;
+	INIT_WORK(&q->work, acpi_ec_event_processor);
+	t = &q->transaction;
+	t->command = ACPI_EC_COMMAND_QUERY;
+	t->rdata = pval;
+	t->rlen = 1;
+	return q;
+}
+
+static void acpi_ec_delete_query(struct acpi_ec_query *q)
+{
+	if (q) {
+		if (q->handler)
+			acpi_ec_put_query_handler(q->handler);
+		kfree(q);
+	}
+}
+
+static void acpi_ec_event_processor(struct work_struct *work)
+{
+	struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
+	struct acpi_ec_query_handler *handler = q->handler;
+
 	ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
 	if (handler->func)
 		handler->func(handler->data);
 	else if (handler->handle)
 		acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
 	ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
-	acpi_ec_put_query_handler(handler);
+	acpi_ec_delete_query(q);
 }
 
 static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
 {
 	u8 value = 0;
 	int result;
-	acpi_status status;
 	struct acpi_ec_query_handler *handler;
-	struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
-				.wdata = NULL, .rdata = &value,
-				.wlen = 0, .rlen = 1};
+	struct acpi_ec_query *q;
+
+	q = acpi_ec_create_query(&value);
+	if (!q)
+		return -ENOMEM;
 
 	/*
 	 * Query the EC to find out which _Qxx method we need to evaluate.
 	 * Note that successful completion of the query causes the ACPI_EC_SCI
 	 * bit to be cleared (and thus clearing the interrupt source).
 	 */
-	result = acpi_ec_transaction(ec, &t);
-	if (result)
-		return result;
-	if (data)
-		*data = value;
+	result = acpi_ec_transaction(ec, &q->transaction);
 	if (!value)
-		return -ENODATA;
+		result = -ENODATA;
+	if (result)
+		goto err_exit;
 
 	mutex_lock(&ec->mutex);
 	list_for_each_entry(handler, &ec->list, node) {
 		if (value == handler->query_bit) {
-			/* have custom handler for this bit */
-			handler = acpi_ec_get_query_handler(handler);
+			q->handler = acpi_ec_get_query_handler(handler);
 			ec_dbg_evt("Query(0x%02x) scheduled",
-				   handler->query_bit);
-			status = acpi_os_execute((handler->func) ?
-				OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
-				acpi_ec_run, handler);
-			if (ACPI_FAILURE(status))
+				   q->handler->query_bit);
+			/*
+			 * It is reported that _Qxx are evaluated in a
+			 * parallel way on Windows:
+			 * https://bugzilla.kernel.org/show_bug.cgi?id=94411
+			 */
+			if (!schedule_work(&q->work))
 				result = -EBUSY;
 			break;
 		}
 	}
 	mutex_unlock(&ec->mutex);
+
+err_exit:
+	if (result && q)
+		acpi_ec_delete_query(q);
+	if (data)
+		*data = value;
 	return result;
 }
 
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index bea0bba..e297a48 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -16,10 +16,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
index a322710..5c67a6d 100644
--- a/drivers/acpi/hed.c
+++ b/drivers/acpi/hed.c
@@ -15,10 +15,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 4683a96..9e42621 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -13,9 +13,6 @@
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
 #ifndef _ACPI_INTERNAL_H_
@@ -70,7 +67,7 @@
 
 #ifdef CONFIG_DEBUG_FS
 extern struct dentry *acpi_debugfs_dir;
-int acpi_debugfs_init(void);
+void acpi_debugfs_init(void);
 #else
 static inline void acpi_debugfs_init(void) { return; }
 #endif
@@ -93,10 +90,21 @@
 		    void (*release)(struct device *));
 void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
 			     int type, unsigned long long sta);
+int acpi_device_setup_files(struct acpi_device *dev);
+void acpi_device_remove_files(struct acpi_device *dev);
 void acpi_device_add_finalize(struct acpi_device *device);
 void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
 bool acpi_device_is_present(struct acpi_device *adev);
 bool acpi_device_is_battery(struct acpi_device *adev);
+bool acpi_device_is_first_physical_node(struct acpi_device *adev,
+					const struct device *dev);
+
+/* --------------------------------------------------------------------------
+                     Device Matching and Notification
+   -------------------------------------------------------------------------- */
+struct acpi_device *acpi_companion_match(const struct device *dev);
+int __acpi_device_uevent_modalias(struct acpi_device *adev,
+				  struct kobj_uevent_env *env);
 
 /* --------------------------------------------------------------------------
                                   Power Resource
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 628a42c..cf0fd96 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -702,11 +702,11 @@
 	u16 flags = to_nfit_memdev(dev)->flags;
 
 	return sprintf(buf, "%s%s%s%s%s\n",
-			flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
-			flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
-			flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
-			flags & ACPI_NFIT_MEM_ARMED ? "arm " : "",
-			flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart " : "");
+		flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
+		flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
+		flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
+		flags & ACPI_NFIT_MEM_ARMED ? "not_armed " : "",
+		flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "");
 }
 static DEVICE_ATTR_RO(flags);
 
@@ -849,12 +849,12 @@
 		if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
 			continue;
 
-		dev_info(acpi_desc->dev, "%s: failed: %s%s%s%s\n",
+		dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n",
 				nvdimm_name(nvdimm),
-			mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save " : "",
-			mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore " : "",
-			mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush " : "",
-			mem_flags & ACPI_NFIT_MEM_ARMED ? "arm " : "");
+		  mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
+		  mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
+		  mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
+		  mem_flags & ACPI_NFIT_MEM_ARMED ? " not_armed" : "");
 
 	}
 
@@ -1024,7 +1024,7 @@
 		wmb_pmem();
 }
 
-static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
+static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
 {
 	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
 	u64 offset = nfit_blk->stat_offset + mmio->size * bw;
@@ -1032,7 +1032,7 @@
 	if (mmio->num_lines)
 		offset = to_interleave_offset(offset, mmio);
 
-	return readq(mmio->base + offset);
+	return readl(mmio->base + offset);
 }
 
 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index acaa3b4..72b6e9e 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -15,10 +15,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  */
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 3b8963f..739a4a6 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -19,10 +19,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  */
@@ -47,6 +43,7 @@
 
 #include <asm/io.h>
 #include <asm/uaccess.h>
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
 
 #include "internal.h"
 
@@ -83,6 +80,7 @@
 static struct workqueue_struct *kacpid_wq;
 static struct workqueue_struct *kacpi_notify_wq;
 static struct workqueue_struct *kacpi_hotplug_wq;
+static bool acpi_os_initialized;
 
 /*
  * This list of permanent mappings is for memory that may be accessed from
@@ -947,21 +945,6 @@
 
 EXPORT_SYMBOL(acpi_os_write_port);
 
-#ifdef readq
-static inline u64 read64(const volatile void __iomem *addr)
-{
-	return readq(addr);
-}
-#else
-static inline u64 read64(const volatile void __iomem *addr)
-{
-	u64 l, h;
-	l = readl(addr);
-	h = readl(addr+4);
-	return l | (h << 32);
-}
-#endif
-
 acpi_status
 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
 {
@@ -994,7 +977,7 @@
 		*(u32 *) value = readl(virt_addr);
 		break;
 	case 64:
-		*(u64 *) value = read64(virt_addr);
+		*(u64 *) value = readq(virt_addr);
 		break;
 	default:
 		BUG();
@@ -1008,19 +991,6 @@
 	return AE_OK;
 }
 
-#ifdef writeq
-static inline void write64(u64 val, volatile void __iomem *addr)
-{
-	writeq(val, addr);
-}
-#else
-static inline void write64(u64 val, volatile void __iomem *addr)
-{
-	writel(val, addr);
-	writel(val>>32, addr+4);
-}
-#endif
-
 acpi_status
 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
 {
@@ -1049,7 +1019,7 @@
 		writel(value, virt_addr);
 		break;
 	case 64:
-		write64(value, virt_addr);
+		writeq(value, virt_addr);
 		break;
 	default:
 		BUG();
@@ -1316,6 +1286,9 @@
 	long jiffies;
 	int ret = 0;
 
+	if (!acpi_os_initialized)
+		return AE_OK;
+
 	if (!sem || (units < 1))
 		return AE_BAD_PARAMETER;
 
@@ -1355,6 +1328,9 @@
 {
 	struct semaphore *sem = (struct semaphore *)handle;
 
+	if (!acpi_os_initialized)
+		return AE_OK;
+
 	if (!sem || (units < 1))
 		return AE_BAD_PARAMETER;
 
@@ -1863,6 +1839,7 @@
 		rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
 		pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
 	}
+	acpi_os_initialized = true;
 
 	return AE_OK;
 }
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 304eccb..6da0f9b 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -19,10 +19,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
@@ -412,7 +408,7 @@
 		return 0;
 	}
 
-	if (dev->irq_managed && dev->irq > 0)
+	if (pci_has_managed_irq(dev))
 		return 0;
 
 	entry = acpi_pci_irq_lookup(dev, pin);
@@ -457,8 +453,7 @@
 		kfree(entry);
 		return rc;
 	}
-	dev->irq = rc;
-	dev->irq_managed = 1;
+	pci_set_managed_irq(dev, rc);
 
 	if (link)
 		snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -481,17 +476,9 @@
 	u8 pin;
 
 	pin = dev->pin;
-	if (!pin || !dev->irq_managed || dev->irq <= 0)
+	if (!pin || !pci_has_managed_irq(dev))
 		return;
 
-	/* Keep IOAPIC pin configuration when suspending */
-	if (dev->dev.power.is_prepared)
-		return;
-#ifdef	CONFIG_PM
-	if (dev->dev.power.runtime_status == RPM_SUSPENDING)
-		return;
-#endif
-
 	entry = acpi_pci_irq_lookup(dev, pin);
 	if (!entry)
 		return;
@@ -511,6 +498,6 @@
 	dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
 	if (gsi >= 0) {
 		acpi_unregister_gsi(gsi);
-		dev->irq_managed = 0;
+		pci_reset_managed_irq(dev);
 	}
 }
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index cfd7581..3b4ea98 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -17,10 +17,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  * TBD: 
@@ -826,6 +822,22 @@
 }
 
 /*
+ * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
+ * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
+ * PCI IRQs.
+ */
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
+{
+	if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+		if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
+		    polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
+			acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
+		else
+			acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+	}
+}
+
+/*
  * Over-ride default table to reserve additional IRQs for use by ISA
  * e.g. acpi_irq_isa=5
  * Useful for telling ACPI how not to interfere with your ISA sound card.
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 1b5569c..393706a 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -16,10 +16,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index 139d9e4..7188e53 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -20,10 +20,6 @@
  *  WITHOUT ANY WARRANTY; without even the implied warranty of
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
 #include <linux/kernel.h>
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 93eac53..fcd4ce6 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -1,8 +1,10 @@
 /*
- *  acpi_power.c - ACPI Bus Power Management ($Revision: 39 $)
+ * drivers/acpi/power.c - ACPI Power Resources management.
  *
- *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
- *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2001 - 2015 Intel Corp.
+ * Author: Andy Grover <andrew.grover@intel.com>
+ * Author: Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
@@ -16,10 +18,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
@@ -27,10 +25,11 @@
  * ACPI power-managed devices may be controlled in two ways:
  * 1. via "Device Specific (D-State) Control"
  * 2. via "Power Resource Control".
- * This module is used to manage devices relying on Power Resource Control.
+ * The code below deals with ACPI Power Resources control.
  * 
- * An ACPI "power resource object" describes a software controllable power
- * plane, clock plane, or other resource used by a power managed device.
+ * An ACPI "power resource object" represents a software controllable power
+ * plane, clock plane, or other resource depended on by a device.
+ *
  * A device may rely on multiple power resources, and a power resource
  * may be shared by multiple devices.
  */
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index d9f7158..51e658f2 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -21,10 +21,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
@@ -159,38 +155,28 @@
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata acpi_cpu_notifier = {
+static struct notifier_block acpi_cpu_notifier = {
 	    .notifier_call = acpi_cpu_soft_notify,
 };
 
-static int __acpi_processor_start(struct acpi_device *device)
+#ifdef CONFIG_ACPI_CPU_FREQ_PSS
+static int acpi_pss_perf_init(struct acpi_processor *pr,
+		struct acpi_device *device)
 {
-	struct acpi_processor *pr = acpi_driver_data(device);
-	acpi_status status;
 	int result = 0;
 
-	if (!pr)
-		return -ENODEV;
-
-	if (pr->flags.need_hotplug_init)
-		return 0;
-
-#ifdef CONFIG_CPU_FREQ
 	acpi_processor_ppc_has_changed(pr, 0);
-#endif
+
 	acpi_processor_get_throttling_info(pr);
 
 	if (pr->flags.throttling)
 		pr->flags.limit = 1;
 
-	if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
-		acpi_processor_power_init(pr);
-
 	pr->cdev = thermal_cooling_device_register("Processor", device,
 						   &processor_cooling_ops);
 	if (IS_ERR(pr->cdev)) {
 		result = PTR_ERR(pr->cdev);
-		goto err_power_exit;
+		return result;
 	}
 
 	dev_dbg(&device->dev, "registered as cooling_device%d\n",
@@ -204,6 +190,7 @@
 			"Failed to create sysfs link 'thermal_cooling'\n");
 		goto err_thermal_unregister;
 	}
+
 	result = sysfs_create_link(&pr->cdev->device.kobj,
 				   &device->dev.kobj,
 				   "device");
@@ -213,17 +200,61 @@
 		goto err_remove_sysfs_thermal;
 	}
 
-	status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
-					     acpi_processor_notify, device);
-	if (ACPI_SUCCESS(status))
-		return 0;
-
 	sysfs_remove_link(&pr->cdev->device.kobj, "device");
  err_remove_sysfs_thermal:
 	sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
  err_thermal_unregister:
 	thermal_cooling_device_unregister(pr->cdev);
- err_power_exit:
+
+	return result;
+}
+
+static void acpi_pss_perf_exit(struct acpi_processor *pr,
+		struct acpi_device *device)
+{
+	if (pr->cdev) {
+		sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+		sysfs_remove_link(&pr->cdev->device.kobj, "device");
+		thermal_cooling_device_unregister(pr->cdev);
+		pr->cdev = NULL;
+	}
+}
+#else
+static inline int acpi_pss_perf_init(struct acpi_processor *pr,
+		struct acpi_device *device)
+{
+	return 0;
+}
+
+static inline void acpi_pss_perf_exit(struct acpi_processor *pr,
+		struct acpi_device *device) {}
+#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
+
+static int __acpi_processor_start(struct acpi_device *device)
+{
+	struct acpi_processor *pr = acpi_driver_data(device);
+	acpi_status status;
+	int result = 0;
+
+	if (!pr)
+		return -ENODEV;
+
+	if (pr->flags.need_hotplug_init)
+		return 0;
+
+	if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
+		acpi_processor_power_init(pr);
+
+	result = acpi_pss_perf_init(pr, device);
+	if (result)
+		goto err_power_exit;
+
+	status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+					     acpi_processor_notify, device);
+	if (ACPI_SUCCESS(status))
+		return 0;
+
+err_power_exit:
 	acpi_processor_power_exit(pr);
 	return result;
 }
@@ -252,15 +283,10 @@
 	pr = acpi_driver_data(device);
 	if (!pr)
 		return 0;
-
 	acpi_processor_power_exit(pr);
 
-	if (pr->cdev) {
-		sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
-		sysfs_remove_link(&pr->cdev->device.kobj, "device");
-		thermal_cooling_device_unregister(pr->cdev);
-		pr->cdev = NULL;
-	}
+	acpi_pss_perf_exit(pr, device);
+
 	return 0;
 }
 
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index d540f42..175c86b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -21,10 +21,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index cfc8aba..bb01dea 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -20,10 +20,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  */
 
 #include <linux/kernel.h>
@@ -87,7 +83,7 @@
 	if (ignore_ppc)
 		return 0;
 
-	if (event != CPUFREQ_INCOMPATIBLE)
+	if (event != CPUFREQ_ADJUST)
 		return 0;
 
 	mutex_lock(&performance_mutex);
@@ -784,9 +780,7 @@
 
 EXPORT_SYMBOL(acpi_processor_register_performance);
 
-void
-acpi_processor_unregister_performance(struct acpi_processor_performance
-				      *performance, unsigned int cpu)
+void acpi_processor_unregister_performance(unsigned int cpu)
 {
 	struct acpi_processor *pr;
 
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index e003663..1fed84a 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -19,10 +19,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 84243c3..f170d74 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -19,10 +19,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 7836e2e..6d99450 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -528,13 +528,14 @@
 
 	if (!val)
 		return obj->package.count;
-	else if (nval <= 0)
-		return -EINVAL;
 
 	if (nval > obj->package.count)
 		return -EOVERFLOW;
+	else if (nval <= 0)
+		return -EINVAL;
 
 	items = obj->package.elements;
+
 	switch (proptype) {
 	case DEV_PROP_U8:
 		ret = acpi_copy_property_array_u8(items, (u8 *)val, nval);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index f1c966e..15d22db 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -15,10 +15,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 01504c8..cb3dedb 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -17,10 +17,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index ec25635..01136b8 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -115,264 +115,6 @@
 	return 0;
 }
 
-/**
- * create_pnp_modalias - Create hid/cid(s) string for modalias and uevent
- * @acpi_dev: ACPI device object.
- * @modalias: Buffer to print into.
- * @size: Size of the buffer.
- *
- * Creates hid/cid(s) string needed for modalias and uevent
- * e.g. on a device with hid:IBM0001 and cid:ACPI0001 you get:
- * char *modalias: "acpi:IBM0001:ACPI0001"
- * Return: 0: no _HID and no _CID
- *         -EINVAL: output error
- *         -ENOMEM: output is truncated
-*/
-static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
-			       int size)
-{
-	int len;
-	int count;
-	struct acpi_hardware_id *id;
-
-	/*
-	 * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
-	 * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
-	 * device's list.
-	 */
-	count = 0;
-	list_for_each_entry(id, &acpi_dev->pnp.ids, list)
-		if (strcmp(id->id, ACPI_DT_NAMESPACE_HID))
-			count++;
-
-	if (!count)
-		return 0;
-
-	len = snprintf(modalias, size, "acpi:");
-	if (len <= 0)
-		return len;
-
-	size -= len;
-
-	list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
-		if (!strcmp(id->id, ACPI_DT_NAMESPACE_HID))
-			continue;
-
-		count = snprintf(&modalias[len], size, "%s:", id->id);
-		if (count < 0)
-			return -EINVAL;
-
-		if (count >= size)
-			return -ENOMEM;
-
-		len += count;
-		size -= count;
-	}
-	modalias[len] = '\0';
-	return len;
-}
-
-/**
- * create_of_modalias - Creates DT compatible string for modalias and uevent
- * @acpi_dev: ACPI device object.
- * @modalias: Buffer to print into.
- * @size: Size of the buffer.
- *
- * Expose DT compatible modalias as of:NnameTCcompatible.  This function should
- * only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
- * ACPI/PNP IDs.
- */
-static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
-			      int size)
-{
-	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
-	const union acpi_object *of_compatible, *obj;
-	int len, count;
-	int i, nval;
-	char *c;
-
-	acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
-	/* DT strings are all in lower case */
-	for (c = buf.pointer; *c != '\0'; c++)
-		*c = tolower(*c);
-
-	len = snprintf(modalias, size, "of:N%sT", (char *)buf.pointer);
-	ACPI_FREE(buf.pointer);
-
-	if (len <= 0)
-		return len;
-
-	of_compatible = acpi_dev->data.of_compatible;
-	if (of_compatible->type == ACPI_TYPE_PACKAGE) {
-		nval = of_compatible->package.count;
-		obj = of_compatible->package.elements;
-	} else { /* Must be ACPI_TYPE_STRING. */
-		nval = 1;
-		obj = of_compatible;
-	}
-	for (i = 0; i < nval; i++, obj++) {
-		count = snprintf(&modalias[len], size, "C%s",
-				 obj->string.pointer);
-		if (count < 0)
-			return -EINVAL;
-
-		if (count >= size)
-			return -ENOMEM;
-
-		len += count;
-		size -= count;
-	}
-	modalias[len] = '\0';
-	return len;
-}
-
-/*
- * acpi_companion_match() - Can we match via ACPI companion device
- * @dev: Device in question
- *
- * Check if the given device has an ACPI companion and if that companion has
- * a valid list of PNP IDs, and if the device is the first (primary) physical
- * device associated with it.  Return the companion pointer if that's the case
- * or NULL otherwise.
- *
- * If multiple physical devices are attached to a single ACPI companion, we need
- * to be careful.  The usage scenario for this kind of relationship is that all
- * of the physical devices in question use resources provided by the ACPI
- * companion.  A typical case is an MFD device where all the sub-devices share
- * the parent's ACPI companion.  In such cases we can only allow the primary
- * (first) physical device to be matched with the help of the companion's PNP
- * IDs.
- *
- * Additional physical devices sharing the ACPI companion can still use
- * resources available from it but they will be matched normally using functions
- * provided by their bus types (and analogously for their modalias).
- */
-static struct acpi_device *acpi_companion_match(const struct device *dev)
-{
-	struct acpi_device *adev;
-	struct mutex *physical_node_lock;
-
-	adev = ACPI_COMPANION(dev);
-	if (!adev)
-		return NULL;
-
-	if (list_empty(&adev->pnp.ids))
-		return NULL;
-
-	physical_node_lock = &adev->physical_node_lock;
-	mutex_lock(physical_node_lock);
-	if (list_empty(&adev->physical_node_list)) {
-		adev = NULL;
-	} else {
-		const struct acpi_device_physical_node *node;
-
-		node = list_first_entry(&adev->physical_node_list,
-					struct acpi_device_physical_node, node);
-		if (node->dev != dev)
-			adev = NULL;
-	}
-	mutex_unlock(physical_node_lock);
-
-	return adev;
-}
-
-static int __acpi_device_uevent_modalias(struct acpi_device *adev,
-					 struct kobj_uevent_env *env)
-{
-	int len;
-
-	if (!adev)
-		return -ENODEV;
-
-	if (list_empty(&adev->pnp.ids))
-		return 0;
-
-	if (add_uevent_var(env, "MODALIAS="))
-		return -ENOMEM;
-
-	len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
-				  sizeof(env->buf) - env->buflen);
-	if (len < 0)
-		return len;
-
-	env->buflen += len;
-	if (!adev->data.of_compatible)
-		return 0;
-
-	if (len > 0 && add_uevent_var(env, "MODALIAS="))
-		return -ENOMEM;
-
-	len = create_of_modalias(adev, &env->buf[env->buflen - 1],
-				 sizeof(env->buf) - env->buflen);
-	if (len < 0)
-		return len;
-
-	env->buflen += len;
-
-	return 0;
-}
-
-/*
- * Creates uevent modalias field for ACPI enumerated devices.
- * Because the other buses does not support ACPI HIDs & CIDs.
- * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
- * "acpi:IBM0001:ACPI0001"
- */
-int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
-{
-	return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
-}
-EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
-
-static int __acpi_device_modalias(struct acpi_device *adev, char *buf, int size)
-{
-	int len, count;
-
-	if (!adev)
-		return -ENODEV;
-
-	if (list_empty(&adev->pnp.ids))
-		return 0;
-
-	len = create_pnp_modalias(adev, buf, size - 1);
-	if (len < 0) {
-		return len;
-	} else if (len > 0) {
-		buf[len++] = '\n';
-		size -= len;
-	}
-	if (!adev->data.of_compatible)
-		return len;
-
-	count = create_of_modalias(adev, buf + len, size - 1);
-	if (count < 0) {
-		return count;
-	} else if (count > 0) {
-		len += count;
-		buf[len++] = '\n';
-	}
-
-	return len;
-}
-
-/*
- * Creates modalias sysfs attribute for ACPI enumerated devices.
- * Because the other buses does not support ACPI HIDs & CIDs.
- * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
- * "acpi:IBM0001:ACPI0001"
- */
-int acpi_device_modalias(struct device *dev, char *buf, int size)
-{
-	return __acpi_device_modalias(acpi_companion_match(dev), buf, size);
-}
-EXPORT_SYMBOL_GPL(acpi_device_modalias);
-
-static ssize_t
-acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf) {
-	return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
-}
-static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
-
 bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
 {
 	struct acpi_device_physical_node *pn;
@@ -701,423 +443,6 @@
 	unlock_device_hotplug();
 }
 
-static ssize_t real_power_state_show(struct device *dev,
-				     struct device_attribute *attr, char *buf)
-{
-	struct acpi_device *adev = to_acpi_device(dev);
-	int state;
-	int ret;
-
-	ret = acpi_device_get_power(adev, &state);
-	if (ret)
-		return ret;
-
-	return sprintf(buf, "%s\n", acpi_power_state_string(state));
-}
-
-static DEVICE_ATTR(real_power_state, 0444, real_power_state_show, NULL);
-
-static ssize_t power_state_show(struct device *dev,
-				struct device_attribute *attr, char *buf)
-{
-	struct acpi_device *adev = to_acpi_device(dev);
-
-	return sprintf(buf, "%s\n", acpi_power_state_string(adev->power.state));
-}
-
-static DEVICE_ATTR(power_state, 0444, power_state_show, NULL);
-
-static ssize_t
-acpi_eject_store(struct device *d, struct device_attribute *attr,
-		const char *buf, size_t count)
-{
-	struct acpi_device *acpi_device = to_acpi_device(d);
-	acpi_object_type not_used;
-	acpi_status status;
-
-	if (!count || buf[0] != '1')
-		return -EINVAL;
-
-	if ((!acpi_device->handler || !acpi_device->handler->hotplug.enabled)
-	    && !acpi_device->driver)
-		return -ENODEV;
-
-	status = acpi_get_type(acpi_device->handle, &not_used);
-	if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
-		return -ENODEV;
-
-	get_device(&acpi_device->dev);
-	status = acpi_hotplug_schedule(acpi_device, ACPI_OST_EC_OSPM_EJECT);
-	if (ACPI_SUCCESS(status))
-		return count;
-
-	put_device(&acpi_device->dev);
-	acpi_evaluate_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
-			  ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
-	return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
-}
-
-static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
-
-static ssize_t
-acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
-	struct acpi_device *acpi_dev = to_acpi_device(dev);
-
-	return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
-}
-static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
-
-static ssize_t acpi_device_uid_show(struct device *dev,
-				    struct device_attribute *attr, char *buf)
-{
-	struct acpi_device *acpi_dev = to_acpi_device(dev);
-
-	return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
-}
-static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
-
-static ssize_t acpi_device_adr_show(struct device *dev,
-				    struct device_attribute *attr, char *buf)
-{
-	struct acpi_device *acpi_dev = to_acpi_device(dev);
-
-	return sprintf(buf, "0x%08x\n",
-		       (unsigned int)(acpi_dev->pnp.bus_address));
-}
-static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
-
-static ssize_t
-acpi_device_path_show(struct device *dev, struct device_attribute *attr, char *buf) {
-	struct acpi_device *acpi_dev = to_acpi_device(dev);
-	struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
-	int result;
-
-	result = acpi_get_name(acpi_dev->handle, ACPI_FULL_PATHNAME, &path);
-	if (result)
-		goto end;
-
-	result = sprintf(buf, "%s\n", (char*)path.pointer);
-	kfree(path.pointer);
-end:
-	return result;
-}
-static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
-
-/* sysfs file that shows description text from the ACPI _STR method */
-static ssize_t description_show(struct device *dev,
-				struct device_attribute *attr,
-				char *buf) {
-	struct acpi_device *acpi_dev = to_acpi_device(dev);
-	int result;
-
-	if (acpi_dev->pnp.str_obj == NULL)
-		return 0;
-
-	/*
-	 * The _STR object contains a Unicode identifier for a device.
-	 * We need to convert to utf-8 so it can be displayed.
-	 */
-	result = utf16s_to_utf8s(
-		(wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
-		acpi_dev->pnp.str_obj->buffer.length,
-		UTF16_LITTLE_ENDIAN, buf,
-		PAGE_SIZE);
-
-	buf[result++] = '\n';
-
-	return result;
-}
-static DEVICE_ATTR(description, 0444, description_show, NULL);
-
-static ssize_t
-acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
-		     char *buf) {
-	struct acpi_device *acpi_dev = to_acpi_device(dev);
-	acpi_status status;
-	unsigned long long sun;
-
-	status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun);
-	if (ACPI_FAILURE(status))
-		return -ENODEV;
-
-	return sprintf(buf, "%llu\n", sun);
-}
-static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
-
-static ssize_t status_show(struct device *dev, struct device_attribute *attr,
-				char *buf) {
-	struct acpi_device *acpi_dev = to_acpi_device(dev);
-	acpi_status status;
-	unsigned long long sta;
-
-	status = acpi_evaluate_integer(acpi_dev->handle, "_STA", NULL, &sta);
-	if (ACPI_FAILURE(status))
-		return -ENODEV;
-
-	return sprintf(buf, "%llu\n", sta);
-}
-static DEVICE_ATTR_RO(status);
-
-static int acpi_device_setup_files(struct acpi_device *dev)
-{
-	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
-	acpi_status status;
-	int result = 0;
-
-	/*
-	 * Devices gotten from FADT don't have a "path" attribute
-	 */
-	if (dev->handle) {
-		result = device_create_file(&dev->dev, &dev_attr_path);
-		if (result)
-			goto end;
-	}
-
-	if (!list_empty(&dev->pnp.ids)) {
-		result = device_create_file(&dev->dev, &dev_attr_hid);
-		if (result)
-			goto end;
-
-		result = device_create_file(&dev->dev, &dev_attr_modalias);
-		if (result)
-			goto end;
-	}
-
-	/*
-	 * If device has _STR, 'description' file is created
-	 */
-	if (acpi_has_method(dev->handle, "_STR")) {
-		status = acpi_evaluate_object(dev->handle, "_STR",
-					NULL, &buffer);
-		if (ACPI_FAILURE(status))
-			buffer.pointer = NULL;
-		dev->pnp.str_obj = buffer.pointer;
-		result = device_create_file(&dev->dev, &dev_attr_description);
-		if (result)
-			goto end;
-	}
-
-	if (dev->pnp.type.bus_address)
-		result = device_create_file(&dev->dev, &dev_attr_adr);
-	if (dev->pnp.unique_id)
-		result = device_create_file(&dev->dev, &dev_attr_uid);
-
-	if (acpi_has_method(dev->handle, "_SUN")) {
-		result = device_create_file(&dev->dev, &dev_attr_sun);
-		if (result)
-			goto end;
-	}
-
-	if (acpi_has_method(dev->handle, "_STA")) {
-		result = device_create_file(&dev->dev, &dev_attr_status);
-		if (result)
-			goto end;
-	}
-
-        /*
-         * If device has _EJ0, 'eject' file is created that is used to trigger
-         * hot-removal function from userland.
-         */
-	if (acpi_has_method(dev->handle, "_EJ0")) {
-		result = device_create_file(&dev->dev, &dev_attr_eject);
-		if (result)
-			return result;
-	}
-
-	if (dev->flags.power_manageable) {
-		result = device_create_file(&dev->dev, &dev_attr_power_state);
-		if (result)
-			return result;
-
-		if (dev->power.flags.power_resources)
-			result = device_create_file(&dev->dev,
-						    &dev_attr_real_power_state);
-	}
-
-end:
-	return result;
-}
-
-static void acpi_device_remove_files(struct acpi_device *dev)
-{
-	if (dev->flags.power_manageable) {
-		device_remove_file(&dev->dev, &dev_attr_power_state);
-		if (dev->power.flags.power_resources)
-			device_remove_file(&dev->dev,
-					   &dev_attr_real_power_state);
-	}
-
-	/*
-	 * If device has _STR, remove 'description' file
-	 */
-	if (acpi_has_method(dev->handle, "_STR")) {
-		kfree(dev->pnp.str_obj);
-		device_remove_file(&dev->dev, &dev_attr_description);
-	}
-	/*
-	 * If device has _EJ0, remove 'eject' file.
-	 */
-	if (acpi_has_method(dev->handle, "_EJ0"))
-		device_remove_file(&dev->dev, &dev_attr_eject);
-
-	if (acpi_has_method(dev->handle, "_SUN"))
-		device_remove_file(&dev->dev, &dev_attr_sun);
-
-	if (dev->pnp.unique_id)
-		device_remove_file(&dev->dev, &dev_attr_uid);
-	if (dev->pnp.type.bus_address)
-		device_remove_file(&dev->dev, &dev_attr_adr);
-	device_remove_file(&dev->dev, &dev_attr_modalias);
-	device_remove_file(&dev->dev, &dev_attr_hid);
-	if (acpi_has_method(dev->handle, "_STA"))
-		device_remove_file(&dev->dev, &dev_attr_status);
-	if (dev->handle)
-		device_remove_file(&dev->dev, &dev_attr_path);
-}
-/* --------------------------------------------------------------------------
-			ACPI Bus operations
-   -------------------------------------------------------------------------- */
-
-/**
- * acpi_of_match_device - Match device object using the "compatible" property.
- * @adev: ACPI device object to match.
- * @of_match_table: List of device IDs to match against.
- *
- * If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
- * identifiers and a _DSD object with the "compatible" property, use that
- * property to match against the given list of identifiers.
- */
-static bool acpi_of_match_device(struct acpi_device *adev,
-				 const struct of_device_id *of_match_table)
-{
-	const union acpi_object *of_compatible, *obj;
-	int i, nval;
-
-	if (!adev)
-		return false;
-
-	of_compatible = adev->data.of_compatible;
-	if (!of_match_table || !of_compatible)
-		return false;
-
-	if (of_compatible->type == ACPI_TYPE_PACKAGE) {
-		nval = of_compatible->package.count;
-		obj = of_compatible->package.elements;
-	} else { /* Must be ACPI_TYPE_STRING. */
-		nval = 1;
-		obj = of_compatible;
-	}
-	/* Now we can look for the driver DT compatible strings */
-	for (i = 0; i < nval; i++, obj++) {
-		const struct of_device_id *id;
-
-		for (id = of_match_table; id->compatible[0]; id++)
-			if (!strcasecmp(obj->string.pointer, id->compatible))
-				return true;
-	}
-
-	return false;
-}
-
-static bool __acpi_match_device_cls(const struct acpi_device_id *id,
-				    struct acpi_hardware_id *hwid)
-{
-	int i, msk, byte_shift;
-	char buf[3];
-
-	if (!id->cls)
-		return false;
-
-	/* Apply class-code bitmask, before checking each class-code byte */
-	for (i = 1; i <= 3; i++) {
-		byte_shift = 8 * (3 - i);
-		msk = (id->cls_msk >> byte_shift) & 0xFF;
-		if (!msk)
-			continue;
-
-		sprintf(buf, "%02x", (id->cls >> byte_shift) & msk);
-		if (strncmp(buf, &hwid->id[(i - 1) * 2], 2))
-			return false;
-	}
-	return true;
-}
-
-static const struct acpi_device_id *__acpi_match_device(
-	struct acpi_device *device,
-	const struct acpi_device_id *ids,
-	const struct of_device_id *of_ids)
-{
-	const struct acpi_device_id *id;
-	struct acpi_hardware_id *hwid;
-
-	/*
-	 * If the device is not present, it is unnecessary to load device
-	 * driver for it.
-	 */
-	if (!device || !device->status.present)
-		return NULL;
-
-	list_for_each_entry(hwid, &device->pnp.ids, list) {
-		/* First, check the ACPI/PNP IDs provided by the caller. */
-		for (id = ids; id->id[0] || id->cls; id++) {
-			if (id->id[0] && !strcmp((char *) id->id, hwid->id))
-				return id;
-			else if (id->cls && __acpi_match_device_cls(id, hwid))
-				return id;
-		}
-
-		/*
-		 * Next, check ACPI_DT_NAMESPACE_HID and try to match the
-		 * "compatible" property if found.
-		 *
-		 * The id returned by the below is not valid, but the only
-		 * caller passing non-NULL of_ids here is only interested in
-		 * whether or not the return value is NULL.
-		 */
-		if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
-		    && acpi_of_match_device(device, of_ids))
-			return id;
-	}
-	return NULL;
-}
-
-/**
- * acpi_match_device - Match a struct device against a given list of ACPI IDs
- * @ids: Array of struct acpi_device_id object to match against.
- * @dev: The device structure to match.
- *
- * Check if @dev has a valid ACPI handle and if there is a struct acpi_device
- * object for that handle and use that object to match against a given list of
- * device IDs.
- *
- * Return a pointer to the first matching ID on success or %NULL on failure.
- */
-const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
-					       const struct device *dev)
-{
-	return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
-}
-EXPORT_SYMBOL_GPL(acpi_match_device);
-
-int acpi_match_device_ids(struct acpi_device *device,
-			  const struct acpi_device_id *ids)
-{
-	return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
-}
-EXPORT_SYMBOL(acpi_match_device_ids);
-
-bool acpi_driver_match_device(struct device *dev,
-			      const struct device_driver *drv)
-{
-	if (!drv->acpi_match_table)
-		return acpi_of_match_device(ACPI_COMPANION(dev),
-					    drv->of_match_table);
-
-	return !!__acpi_match_device(acpi_companion_match(dev),
-				     drv->acpi_match_table, drv->of_match_table);
-}
-EXPORT_SYMBOL_GPL(acpi_driver_match_device);
-
 static void acpi_free_power_resources_lists(struct acpi_device *device)
 {
 	int i;
@@ -1144,144 +469,6 @@
 	kfree(acpi_dev);
 }
 
-static int acpi_bus_match(struct device *dev, struct device_driver *drv)
-{
-	struct acpi_device *acpi_dev = to_acpi_device(dev);
-	struct acpi_driver *acpi_drv = to_acpi_driver(drv);
-
-	return acpi_dev->flags.match_driver
-		&& !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
-}
-
-static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
-	return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
-}
-
-static void acpi_device_notify(acpi_handle handle, u32 event, void *data)
-{
-	struct acpi_device *device = data;
-
-	device->driver->ops.notify(device, event);
-}
-
-static void acpi_device_notify_fixed(void *data)
-{
-	struct acpi_device *device = data;
-
-	/* Fixed hardware devices have no handles */
-	acpi_device_notify(NULL, ACPI_FIXED_HARDWARE_EVENT, device);
-}
-
-static u32 acpi_device_fixed_event(void *data)
-{
-	acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_device_notify_fixed, data);
-	return ACPI_INTERRUPT_HANDLED;
-}
-
-static int acpi_device_install_notify_handler(struct acpi_device *device)
-{
-	acpi_status status;
-
-	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
-		status =
-		    acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
-						     acpi_device_fixed_event,
-						     device);
-	else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
-		status =
-		    acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
-						     acpi_device_fixed_event,
-						     device);
-	else
-		status = acpi_install_notify_handler(device->handle,
-						     ACPI_DEVICE_NOTIFY,
-						     acpi_device_notify,
-						     device);
-
-	if (ACPI_FAILURE(status))
-		return -EINVAL;
-	return 0;
-}
-
-static void acpi_device_remove_notify_handler(struct acpi_device *device)
-{
-	if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
-		acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
-						acpi_device_fixed_event);
-	else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
-		acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
-						acpi_device_fixed_event);
-	else
-		acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
-					   acpi_device_notify);
-}
-
-static int acpi_device_probe(struct device *dev)
-{
-	struct acpi_device *acpi_dev = to_acpi_device(dev);
-	struct acpi_driver *acpi_drv = to_acpi_driver(dev->driver);
-	int ret;
-
-	if (acpi_dev->handler && !acpi_is_pnp_device(acpi_dev))
-		return -EINVAL;
-
-	if (!acpi_drv->ops.add)
-		return -ENOSYS;
-
-	ret = acpi_drv->ops.add(acpi_dev);
-	if (ret)
-		return ret;
-
-	acpi_dev->driver = acpi_drv;
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-			  "Driver [%s] successfully bound to device [%s]\n",
-			  acpi_drv->name, acpi_dev->pnp.bus_id));
-
-	if (acpi_drv->ops.notify) {
-		ret = acpi_device_install_notify_handler(acpi_dev);
-		if (ret) {
-			if (acpi_drv->ops.remove)
-				acpi_drv->ops.remove(acpi_dev);
-
-			acpi_dev->driver = NULL;
-			acpi_dev->driver_data = NULL;
-			return ret;
-		}
-	}
-
-	ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
-			  acpi_drv->name, acpi_dev->pnp.bus_id));
-	get_device(dev);
-	return 0;
-}
-
-static int acpi_device_remove(struct device * dev)
-{
-	struct acpi_device *acpi_dev = to_acpi_device(dev);
-	struct acpi_driver *acpi_drv = acpi_dev->driver;
-
-	if (acpi_drv) {
-		if (acpi_drv->ops.notify)
-			acpi_device_remove_notify_handler(acpi_dev);
-		if (acpi_drv->ops.remove)
-			acpi_drv->ops.remove(acpi_dev);
-	}
-	acpi_dev->driver = NULL;
-	acpi_dev->driver_data = NULL;
-
-	put_device(dev);
-	return 0;
-}
-
-struct bus_type acpi_bus_type = {
-	.name		= "acpi",
-	.match		= acpi_bus_match,
-	.probe		= acpi_device_probe,
-	.remove		= acpi_device_remove,
-	.uevent		= acpi_device_uevent,
-};
-
 static void acpi_device_del(struct acpi_device *device)
 {
 	mutex_lock(&acpi_device_lock);
@@ -1529,47 +716,6 @@
 }
 
 /* --------------------------------------------------------------------------
-                                 Driver Management
-   -------------------------------------------------------------------------- */
-/**
- * acpi_bus_register_driver - register a driver with the ACPI bus
- * @driver: driver being registered
- *
- * Registers a driver with the ACPI bus.  Searches the namespace for all
- * devices that match the driver's criteria and binds.  Returns zero for
- * success or a negative error status for failure.
- */
-int acpi_bus_register_driver(struct acpi_driver *driver)
-{
-	int ret;
-
-	if (acpi_disabled)
-		return -ENODEV;
-	driver->drv.name = driver->name;
-	driver->drv.bus = &acpi_bus_type;
-	driver->drv.owner = driver->owner;
-
-	ret = driver_register(&driver->drv);
-	return ret;
-}
-
-EXPORT_SYMBOL(acpi_bus_register_driver);
-
-/**
- * acpi_bus_unregister_driver - unregisters a driver with the ACPI bus
- * @driver: driver to unregister
- *
- * Unregisters a driver with the ACPI bus.  Searches the namespace for all
- * devices that match the driver's criteria and unbinds.
- */
-void acpi_bus_unregister_driver(struct acpi_driver *driver)
-{
-	driver_unregister(&driver->drv);
-}
-
-EXPORT_SYMBOL(acpi_bus_unregister_driver);
-
-/* --------------------------------------------------------------------------
                                  Device Enumeration
    -------------------------------------------------------------------------- */
 static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
@@ -2744,12 +1890,6 @@
 {
 	int result;
 
-	result = bus_register(&acpi_bus_type);
-	if (result) {
-		/* We don't want to quit even if we failed to add suspend/resume */
-		printk(KERN_ERR PREFIX "Could not register bus type\n");
-	}
-
 	acpi_pci_root_init();
 	acpi_pci_link_init();
 	acpi_processor_init();
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 0876d77b..40a4265 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -69,6 +69,8 @@
 	ACPI_DEBUG_INIT(ACPI_LV_INIT),
 	ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
 	ACPI_DEBUG_INIT(ACPI_LV_INFO),
+	ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
+	ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
 
 	ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
 	ACPI_DEBUG_INIT(ACPI_LV_PARSE),
@@ -162,55 +164,116 @@
 module_param_cb(debug_layer, &param_ops_debug_layer, &acpi_dbg_layer, 0644);
 module_param_cb(debug_level, &param_ops_debug_level, &acpi_dbg_level, 0644);
 
-static char trace_method_name[6];
-module_param_string(trace_method_name, trace_method_name, 6, 0644);
-static unsigned int trace_debug_layer;
-module_param(trace_debug_layer, uint, 0644);
-static unsigned int trace_debug_level;
-module_param(trace_debug_level, uint, 0644);
+static char trace_method_name[1024];
+
+int param_set_trace_method_name(const char *val, const struct kernel_param *kp)
+{
+	u32 saved_flags = 0;
+	bool is_abs_path = true;
+
+	if (*val != '\\')
+		is_abs_path = false;
+
+	if ((is_abs_path && strlen(val) > 1023) ||
+	    (!is_abs_path && strlen(val) > 1022)) {
+		pr_err("%s: string parameter too long\n", kp->name);
+		return -ENOSPC;
+	}
+
+	/*
+	 * It's not safe to update acpi_gbl_trace_method_name without
+	 * having the tracer stopped, so we save the original tracer
+	 * state and disable it.
+	 */
+	saved_flags = acpi_gbl_trace_flags;
+	(void)acpi_debug_trace(NULL,
+			       acpi_gbl_trace_dbg_level,
+			       acpi_gbl_trace_dbg_layer,
+			       0);
+
+	/* This is a hack.  We can't kmalloc in early boot. */
+	if (is_abs_path)
+		strcpy(trace_method_name, val);
+	else {
+		trace_method_name[0] = '\\';
+		strcpy(trace_method_name+1, val);
+	}
+
+	/* Restore the original tracer state */
+	(void)acpi_debug_trace(trace_method_name,
+			       acpi_gbl_trace_dbg_level,
+			       acpi_gbl_trace_dbg_layer,
+			       saved_flags);
+
+	return 0;
+}
+
+static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
+{
+	return scnprintf(buffer, PAGE_SIZE, "%s", acpi_gbl_trace_method_name);
+}
+
+static const struct kernel_param_ops param_ops_trace_method = {
+	.set = param_set_trace_method_name,
+	.get = param_get_trace_method_name,
+};
+
+static const struct kernel_param_ops param_ops_trace_attrib = {
+	.set = param_set_uint,
+	.get = param_get_uint,
+};
+
+module_param_cb(trace_method_name, &param_ops_trace_method, &trace_method_name, 0644);
+module_param_cb(trace_debug_layer, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
+module_param_cb(trace_debug_level, &param_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
 
 static int param_set_trace_state(const char *val, struct kernel_param *kp)
 {
-	int result = 0;
+	acpi_status status;
+	const char *method = trace_method_name;
+	u32 flags = 0;
 
-	if (!strncmp(val, "enable", sizeof("enable") - 1)) {
-		result = acpi_debug_trace(trace_method_name, trace_debug_level,
-					  trace_debug_layer, 0);
-		if (result)
-			result = -EBUSY;
-		goto exit;
-	}
+/* So "xxx-once" comparison should go prior than "xxx" comparison */
+#define acpi_compare_param(val, key)	\
+	strncmp((val), (key), sizeof(key) - 1)
 
-	if (!strncmp(val, "disable", sizeof("disable") - 1)) {
-		int name = 0;
-		result = acpi_debug_trace((char *)&name, trace_debug_level,
-					  trace_debug_layer, 0);
-		if (result)
-			result = -EBUSY;
-		goto exit;
-	}
+	if (!acpi_compare_param(val, "enable")) {
+		method = NULL;
+		flags = ACPI_TRACE_ENABLED;
+	} else if (!acpi_compare_param(val, "disable"))
+		method = NULL;
+	else if (!acpi_compare_param(val, "method-once"))
+		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
+	else if (!acpi_compare_param(val, "method"))
+		flags = ACPI_TRACE_ENABLED;
+	else if (!acpi_compare_param(val, "opcode-once"))
+		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
+	else if (!acpi_compare_param(val, "opcode"))
+		flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
+	else
+		return -EINVAL;
 
-	if (!strncmp(val, "1", 1)) {
-		result = acpi_debug_trace(trace_method_name, trace_debug_level,
-					  trace_debug_layer, 1);
-		if (result)
-			result = -EBUSY;
-		goto exit;
-	}
+	status = acpi_debug_trace(method,
+				  acpi_gbl_trace_dbg_level,
+				  acpi_gbl_trace_dbg_layer,
+				  flags);
+	if (ACPI_FAILURE(status))
+		return -EBUSY;
 
-	result = -EINVAL;
-exit:
-	return result;
+	return 0;
 }
 
 static int param_get_trace_state(char *buffer, struct kernel_param *kp)
 {
-	if (!acpi_gbl_trace_method_name)
+	if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
 		return sprintf(buffer, "disable");
 	else {
-		if (acpi_gbl_trace_flags & 1)
-			return sprintf(buffer, "1");
-		else
+		if (acpi_gbl_trace_method_name) {
+			if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
+				return sprintf(buffer, "method-once");
+			else
+				return sprintf(buffer, "method");
+		} else
 			return sprintf(buffer, "enable");
 	}
 	return 0;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 2e19189..17a6fa0 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -15,10 +15,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  */
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 6d4e44e..fc28b9f 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -16,10 +16,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  *  This driver fully implements the ACPI thermal policy as described in the
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 67c548a..475c907 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -16,10 +16,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 7e62751..a466602 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -351,6 +351,7 @@
 	/* JMicron 362B and 362C have an AHCI function with IDE class code */
 	{ PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr },
 	{ PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr },
+	/* May need to update quirk_jmicron_async_suspend() for additions */
 
 	/* ATI */
 	{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
@@ -1451,18 +1452,6 @@
 	else if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
 		ahci_pci_bar = AHCI_PCI_BAR_CAVIUM;
 
-	/*
-	 * The JMicron chip 361/363 contains one SATA controller and one
-	 * PATA controller,for powering on these both controllers, we must
-	 * follow the sequence one by one, otherwise one of them can not be
-	 * powered on successfully, so here we disable the async suspend
-	 * method for these chips.
-	 */
-	if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
-		(pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
-		pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
-		device_disable_async_suspend(&pdev->dev);
-
 	/* acquire resources */
 	rc = pcim_enable_device(pdev);
 	if (rc)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 19bcb80..b79cb10 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4230,6 +4230,8 @@
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 	{ "Samsung SSD 8*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+						ATA_HORKAGE_ZERO_AFTER_TRIM, },
 
 	/* devices that don't properly handle TRIM commands */
 	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
@@ -4751,6 +4753,7 @@
 /**
  *	ata_qc_new_init - Request an available ATA command, and initialize it
  *	@dev: Device from whom we request an available command structure
+ *	@tag: tag
  *
  *	LOCKING:
  *	None.
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 5d9ee99..80fe0f6 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -834,7 +834,7 @@
 		return -ENOMEM;
 	}
 
-	acdev->clk = clk_get(&pdev->dev, NULL);
+	acdev->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(acdev->clk)) {
 		dev_warn(&pdev->dev, "Clock not found\n");
 		return PTR_ERR(acdev->clk);
@@ -843,9 +843,8 @@
 	/* allocate host */
 	host = ata_host_alloc(&pdev->dev, 1);
 	if (!host) {
-		ret = -ENOMEM;
 		dev_warn(&pdev->dev, "alloc host fail\n");
-		goto free_clk;
+		return -ENOMEM;
 	}
 
 	ap = host->ports[0];
@@ -894,7 +893,7 @@
 
 	ret = cf_init(acdev);
 	if (ret)
-		goto free_clk;
+		return ret;
 
 	cf_card_detect(acdev, 0);
 
@@ -904,8 +903,7 @@
 		return 0;
 
 	cf_exit(acdev);
-free_clk:
-	clk_put(acdev->clk);
+
 	return ret;
 }
 
@@ -916,7 +914,6 @@
 
 	ata_host_detach(host);
 	cf_exit(acdev);
-	clk_put(acdev->clk);
 
 	return 0;
 }
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index 47e418b..4d1a5d2 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -143,18 +143,6 @@
 	};
 	const struct ata_port_info *ppi[] = { &info, NULL };
 
-	/*
-	 * The JMicron chip 361/363 contains one SATA controller and one
-	 * PATA controller,for powering on these both controllers, we must
-	 * follow the sequence one by one, otherwise one of them can not be
-	 * powered on successfully, so here we disable the async suspend
-	 * method for these chips.
-	 */
-	if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
-		(pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
-		pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
-		device_disable_async_suspend(&pdev->dev);
-
 	return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
 }
 
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 6d08446..12fe0f3 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -27,12 +27,11 @@
 #include <linux/io.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 
 #include <linux/libata.h>
 #include <scsi/scsi_host.h>
 
-#include <asm/gpio.h>
-
 #define DRV_NAME	"pata-rb532-cf"
 #define DRV_VERSION	"0.1.0"
 #define DRV_DESC	"PATA driver for RouterBOARD 532 Compact Flash"
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index d49a519..8804127 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -861,10 +861,6 @@
 static const struct platform_device_id sata_rcar_id_table[] = {
 	{ "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */
 	{ "sata-r8a7779", RCAR_GEN1_SATA },
-	{ "sata-r8a7790", RCAR_GEN2_SATA },
-	{ "sata-r8a7790-es1", RCAR_R8A7790_ES1_SATA },
-	{ "sata-r8a7791", RCAR_GEN2_SATA },
-	{ "sata-r8a7793", RCAR_GEN2_SATA },
 	{ },
 };
 MODULE_DEVICE_TABLE(platform, sata_rcar_id_table);
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index 5b93852..816de9e 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -23,6 +23,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -90,17 +92,19 @@
 
 void ks0108_startline(unsigned char startline)
 {
-	ks0108_writedata(min(startline,(unsigned char)63) | bit(6) | bit(7));
+	ks0108_writedata(min_t(unsigned char, startline, 63) | bit(6) |
+			 bit(7));
 }
 
 void ks0108_address(unsigned char address)
 {
-	ks0108_writedata(min(address,(unsigned char)63) | bit(6));
+	ks0108_writedata(min_t(unsigned char, address, 63) | bit(6));
 }
 
 void ks0108_page(unsigned char page)
 {
-	ks0108_writedata(min(page,(unsigned char)7) | bit(3) | bit(4) | bit(5) | bit(7));
+	ks0108_writedata(min_t(unsigned char, page, 7) | bit(3) | bit(4) |
+			 bit(5) | bit(7));
 }
 
 EXPORT_SYMBOL_GPL(ks0108_writedata);
@@ -121,52 +125,71 @@
 }
 EXPORT_SYMBOL_GPL(ks0108_isinited);
 
+static void ks0108_parport_attach(struct parport *port)
+{
+	struct pardev_cb ks0108_cb;
+
+	if (port->base != ks0108_port)
+		return;
+
+	memset(&ks0108_cb, 0, sizeof(ks0108_cb));
+	ks0108_cb.flags = PARPORT_DEV_EXCL;
+	ks0108_pardevice = parport_register_dev_model(port, KS0108_NAME,
+						      &ks0108_cb, 0);
+	if (!ks0108_pardevice) {
+		pr_err("ERROR: parport didn't register new device\n");
+		return;
+	}
+	if (parport_claim(ks0108_pardevice)) {
+		pr_err("could not claim access to parport %i. Aborting.\n",
+		       ks0108_port);
+		goto err_unreg_device;
+	}
+
+	ks0108_parport = port;
+	ks0108_inited = 1;
+	return;
+
+err_unreg_device:
+	parport_unregister_device(ks0108_pardevice);
+	ks0108_pardevice = NULL;
+}
+
+static void ks0108_parport_detach(struct parport *port)
+{
+	if (port->base != ks0108_port)
+		return;
+
+	if (!ks0108_pardevice) {
+		pr_err("%s: already unregistered.\n", KS0108_NAME);
+		return;
+	}
+
+	parport_release(ks0108_pardevice);
+	parport_unregister_device(ks0108_pardevice);
+	ks0108_pardevice = NULL;
+	ks0108_parport = NULL;
+}
+
 /*
  * Module Init & Exit
  */
 
+static struct parport_driver ks0108_parport_driver = {
+	.name = "ks0108",
+	.match_port = ks0108_parport_attach,
+	.detach = ks0108_parport_detach,
+	.devmodel = true,
+};
+
 static int __init ks0108_init(void)
 {
-	int result;
-	int ret = -EINVAL;
-
-	ks0108_parport = parport_find_base(ks0108_port);
-	if (ks0108_parport == NULL) {
-		printk(KERN_ERR KS0108_NAME ": ERROR: "
-			"parport didn't find %i port\n", ks0108_port);
-		goto none;
-	}
-
-	ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME,
-		NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
-	if (ks0108_pardevice == NULL) {
-		printk(KERN_ERR KS0108_NAME ": ERROR: "
-			"parport didn't register new device\n");
-		goto none;
-	}
-
-	result = parport_claim(ks0108_pardevice);
-	if (result != 0) {
-		printk(KERN_ERR KS0108_NAME ": ERROR: "
-			"can't claim %i parport, maybe in use\n", ks0108_port);
-		ret = result;
-		goto registered;
-	}
-
-	ks0108_inited = 1;
-	return 0;
-
-registered:
-	parport_unregister_device(ks0108_pardevice);
-
-none:
-	return ret;
+	return parport_register_driver(&ks0108_parport_driver);
 }
 
 static void __exit ks0108_exit(void)
 {
-	parport_release(ks0108_pardevice);
-	parport_unregister_device(ks0108_pardevice);
+	parport_unregister_driver(&ks0108_parport_driver);
 }
 
 module_init(ks0108_init);
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 527d291..6b2a84e 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -22,6 +22,7 @@
 obj-$(CONFIG_SOC_BUS) += soc.o
 obj-$(CONFIG_PINCTRL) += pinctrl.o
 obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
+obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
 
 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
 
diff --git a/drivers/base/base.h b/drivers/base/base.h
index fd3347d..1782f3a 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -63,7 +63,7 @@
  *	binding of drivers which were unable to get all the resources needed by
  *	the device; typically because it depends on another driver getting
  *	probed first.
- * @device - pointer back to the struct class that this structure is
+ * @device - pointer back to the struct device that this structure is
  * associated with.
  *
  * Nothing outside of the driver core should ever touch these fields.
@@ -134,6 +134,7 @@
 
 /* /sys/devices directory */
 extern struct kset *devices_kset;
+extern void devices_kset_move_last(struct device *dev);
 
 #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
 extern void module_add_driver(struct module *mod, struct device_driver *drv);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index dafae6d..334ec7e 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -534,6 +534,52 @@
 struct kset *devices_kset;
 
 /**
+ * devices_kset_move_before - Move device in the devices_kset's list.
+ * @deva: Device to move.
+ * @devb: Device @deva should come before.
+ */
+static void devices_kset_move_before(struct device *deva, struct device *devb)
+{
+	if (!devices_kset)
+		return;
+	pr_debug("devices_kset: Moving %s before %s\n",
+		 dev_name(deva), dev_name(devb));
+	spin_lock(&devices_kset->list_lock);
+	list_move_tail(&deva->kobj.entry, &devb->kobj.entry);
+	spin_unlock(&devices_kset->list_lock);
+}
+
+/**
+ * devices_kset_move_after - Move device in the devices_kset's list.
+ * @deva: Device to move
+ * @devb: Device @deva should come after.
+ */
+static void devices_kset_move_after(struct device *deva, struct device *devb)
+{
+	if (!devices_kset)
+		return;
+	pr_debug("devices_kset: Moving %s after %s\n",
+		 dev_name(deva), dev_name(devb));
+	spin_lock(&devices_kset->list_lock);
+	list_move(&deva->kobj.entry, &devb->kobj.entry);
+	spin_unlock(&devices_kset->list_lock);
+}
+
+/**
+ * devices_kset_move_last - move the device to the end of devices_kset's list.
+ * @dev: device to move
+ */
+void devices_kset_move_last(struct device *dev)
+{
+	if (!devices_kset)
+		return;
+	pr_debug("devices_kset: Moving %s to end of list\n", dev_name(dev));
+	spin_lock(&devices_kset->list_lock);
+	list_move_tail(&dev->kobj.entry, &devices_kset->list);
+	spin_unlock(&devices_kset->list_lock);
+}
+
+/**
  * device_create_file - create sysfs attribute file for device.
  * @dev: device.
  * @attr: device attribute descriptor.
@@ -662,6 +708,9 @@
 	INIT_LIST_HEAD(&dev->devres_head);
 	device_pm_init(dev);
 	set_dev_node(dev, -1);
+#ifdef CONFIG_GENERIC_MSI_IRQ
+	INIT_LIST_HEAD(&dev->msi_list);
+#endif
 }
 EXPORT_SYMBOL_GPL(device_initialize);
 
@@ -1252,6 +1301,19 @@
 }
 EXPORT_SYMBOL_GPL(device_unregister);
 
+static struct device *prev_device(struct klist_iter *i)
+{
+	struct klist_node *n = klist_prev(i);
+	struct device *dev = NULL;
+	struct device_private *p;
+
+	if (n) {
+		p = to_device_private_parent(n);
+		dev = p->device;
+	}
+	return dev;
+}
+
 static struct device *next_device(struct klist_iter *i)
 {
 	struct klist_node *n = klist_next(i);
@@ -1341,6 +1403,36 @@
 EXPORT_SYMBOL_GPL(device_for_each_child);
 
 /**
+ * device_for_each_child_reverse - device child iterator in reversed order.
+ * @parent: parent struct device.
+ * @fn: function to be called for each device.
+ * @data: data for the callback.
+ *
+ * Iterate over @parent's child devices, and call @fn for each,
+ * passing it @data.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ */
+int device_for_each_child_reverse(struct device *parent, void *data,
+				  int (*fn)(struct device *dev, void *data))
+{
+	struct klist_iter i;
+	struct device *child;
+	int error = 0;
+
+	if (!parent->p)
+		return 0;
+
+	klist_iter_init(&parent->p->klist_children, &i);
+	while ((child = prev_device(&i)) && !error)
+		error = fn(child, data);
+	klist_iter_exit(&i);
+	return error;
+}
+EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
+
+/**
  * device_find_child - device iterator for locating a particular device.
  * @parent: parent struct device
  * @match: Callback function to check device
@@ -1923,12 +2015,15 @@
 		break;
 	case DPM_ORDER_DEV_AFTER_PARENT:
 		device_pm_move_after(dev, new_parent);
+		devices_kset_move_after(dev, new_parent);
 		break;
 	case DPM_ORDER_PARENT_BEFORE_DEV:
 		device_pm_move_before(new_parent, dev);
+		devices_kset_move_before(new_parent, dev);
 		break;
 	case DPM_ORDER_DEV_LAST:
 		device_pm_move_last(dev);
+		devices_kset_move_last(dev);
 		break;
 	}
 
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 78720e7..91bbb19 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -41,7 +41,7 @@
 	cpu->node_id = to_nid;
 }
 
-static int __ref cpu_subsys_online(struct device *dev)
+static int cpu_subsys_online(struct device *dev)
 {
 	struct cpu *cpu = container_of(dev, struct cpu, dev);
 	int cpuid = dev->id;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index a638bbb..be0eb46 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -304,6 +304,14 @@
 			goto probe_failed;
 	}
 
+	/*
+	 * Ensure devices are listed in devices_kset in correct order
+	 * It's important to move Dev to the end of devices_kset before
+	 * calling .probe, because it could be recursive and parent Dev
+	 * should always go first
+	 */
+	devices_kset_move_last(dev);
+
 	if (dev->bus->probe) {
 		ret = dev->bus->probe(dev);
 		if (ret)
@@ -399,6 +407,8 @@
  *
  * This function must be called with @dev lock held.  When called for a
  * USB interface, @dev->parent lock must be held as well.
+ *
+ * If the device has a parent, runtime-resume the parent before driver probing.
  */
 int driver_probe_device(struct device_driver *drv, struct device *dev)
 {
@@ -410,10 +420,16 @@
 	pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
 		 drv->bus->name, __func__, dev_name(dev), drv->name);
 
+	if (dev->parent)
+		pm_runtime_get_sync(dev->parent);
+
 	pm_runtime_barrier(dev);
 	ret = really_probe(dev, drv);
 	pm_request_idle(dev);
 
+	if (dev->parent)
+		pm_runtime_put(dev->parent);
+
 	return ret;
 }
 
@@ -507,11 +523,17 @@
 
 	device_lock(dev);
 
+	if (dev->parent)
+		pm_runtime_get_sync(dev->parent);
+
 	bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
 	dev_dbg(dev, "async probe completed\n");
 
 	pm_request_idle(dev);
 
+	if (dev->parent)
+		pm_runtime_put(dev->parent);
+
 	device_unlock(dev);
 
 	put_device(dev);
@@ -541,6 +563,9 @@
 			.want_async = false,
 		};
 
+		if (dev->parent)
+			pm_runtime_get_sync(dev->parent);
+
 		ret = bus_for_each_drv(dev->bus, NULL, &data,
 					__device_attach_driver);
 		if (!ret && allow_async && data.have_async) {
@@ -557,6 +582,9 @@
 		} else {
 			pm_request_idle(dev);
 		}
+
+		if (dev->parent)
+			pm_runtime_put(dev->parent);
 	}
 out_unlock:
 	device_unlock(dev);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index c8a53d1..8754646 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -297,10 +297,10 @@
 	if (!dr) {
 		add_dr(dev, &new_dr->node);
 		dr = new_dr;
-		new_dr = NULL;
+		new_res = NULL;
 	}
 	spin_unlock_irqrestore(&dev->devres_lock, flags);
-	devres_free(new_dr);
+	devres_free(new_res);
 
 	return dr->data;
 }
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 894bda1..8524450 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -443,7 +443,7 @@
 		return -ENOMEM;
 	fwn->name = kstrdup_const(name, GFP_KERNEL);
 	if (!fwn->name) {
-		kfree(fwn);
+		devres_free(fwn);
 		return -ENOMEM;
 	}
 
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
new file mode 100644
index 0000000..1857a5d
--- /dev/null
+++ b/drivers/base/platform-msi.c
@@ -0,0 +1,282 @@
+/*
+ * MSI framework for platform devices
+ *
+ * Copyright (C) 2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/slab.h>
+
+#define DEV_ID_SHIFT	24
+
+/*
+ * Internal data structure containing a (made up, but unique) devid
+ * and the callback to write the MSI message.
+ */
+struct platform_msi_priv_data {
+	irq_write_msi_msg_t	write_msg;
+	int			devid;
+};
+
+/* The devid allocator */
+static DEFINE_IDA(platform_msi_devid_ida);
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+/*
+ * Convert an msi_desc to a globaly unique identifier (per-device
+ * devid + msi_desc position in the msi_list).
+ */
+static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
+{
+	u32 devid;
+
+	devid = desc->platform.msi_priv_data->devid;
+
+	return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index;
+}
+
+static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+	arg->desc = desc;
+	arg->hwirq = platform_msi_calc_hwirq(desc);
+}
+
+static int platform_msi_init(struct irq_domain *domain,
+			     struct msi_domain_info *info,
+			     unsigned int virq, irq_hw_number_t hwirq,
+			     msi_alloc_info_t *arg)
+{
+	struct irq_data *data;
+
+	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+				      info->chip, info->chip_data);
+
+	/*
+	 * Save the MSI descriptor in handler_data so that the
+	 * irq_write_msi_msg callback can retrieve it (and the
+	 * associated device).
+	 */
+	data = irq_domain_get_irq_data(domain, virq);
+	data->handler_data = arg->desc;
+
+	return 0;
+}
+#else
+#define platform_msi_set_desc		NULL
+#define platform_msi_init		NULL
+#endif
+
+static void platform_msi_update_dom_ops(struct msi_domain_info *info)
+{
+	struct msi_domain_ops *ops = info->ops;
+
+	BUG_ON(!ops);
+
+	if (ops->msi_init == NULL)
+		ops->msi_init = platform_msi_init;
+	if (ops->set_desc == NULL)
+		ops->set_desc = platform_msi_set_desc;
+}
+
+static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+	struct msi_desc *desc = irq_data_get_irq_handler_data(data);
+	struct platform_msi_priv_data *priv_data;
+
+	priv_data = desc->platform.msi_priv_data;
+
+	priv_data->write_msg(desc, msg);
+}
+
+static void platform_msi_update_chip_ops(struct msi_domain_info *info)
+{
+	struct irq_chip *chip = info->chip;
+
+	BUG_ON(!chip);
+	if (!chip->irq_mask)
+		chip->irq_mask = irq_chip_mask_parent;
+	if (!chip->irq_unmask)
+		chip->irq_unmask = irq_chip_unmask_parent;
+	if (!chip->irq_eoi)
+		chip->irq_eoi = irq_chip_eoi_parent;
+	if (!chip->irq_set_affinity)
+		chip->irq_set_affinity = msi_domain_set_affinity;
+	if (!chip->irq_write_msi_msg)
+		chip->irq_write_msi_msg = platform_msi_write_msg;
+}
+
+static void platform_msi_free_descs(struct device *dev)
+{
+	struct msi_desc *desc, *tmp;
+
+	list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
+		list_del(&desc->list);
+		free_msi_entry(desc);
+	}
+}
+
+static int platform_msi_alloc_descs(struct device *dev, int nvec,
+				    struct platform_msi_priv_data *data)
+
+{
+	int i;
+
+	for (i = 0; i < nvec; i++) {
+		struct msi_desc *desc;
+
+		desc = alloc_msi_entry(dev);
+		if (!desc)
+			break;
+
+		desc->platform.msi_priv_data = data;
+		desc->platform.msi_index = i;
+		desc->nvec_used = 1;
+
+		list_add_tail(&desc->list, dev_to_msi_list(dev));
+	}
+
+	if (i != nvec) {
+		/* Clean up the mess */
+		platform_msi_free_descs(dev);
+
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
+ * @np:		Optional device-tree node of the interrupt controller
+ * @info:	MSI domain info
+ * @parent:	Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a platform MSI
+ * interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
+						  struct msi_domain_info *info,
+						  struct irq_domain *parent)
+{
+	struct irq_domain *domain;
+
+	if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+		platform_msi_update_dom_ops(info);
+	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+		platform_msi_update_chip_ops(info);
+
+	domain = msi_create_irq_domain(np, info, parent);
+	if (domain)
+		domain->bus_token = DOMAIN_BUS_PLATFORM_MSI;
+
+	return domain;
+}
+
+/**
+ * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
+ * @dev:		The device for which to allocate interrupts
+ * @nvec:		The number of interrupts to allocate
+ * @write_msi_msg:	Callback to write an interrupt message for @dev
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+				   irq_write_msi_msg_t write_msi_msg)
+{
+	struct platform_msi_priv_data *priv_data;
+	int err;
+
+	/*
+	 * Limit the number of interrupts to 256 per device. Should we
+	 * need to bump this up, DEV_ID_SHIFT should be adjusted
+	 * accordingly (which would impact the max number of MSI
+	 * capable devices).
+	 */
+	if (!dev->msi_domain || !write_msi_msg || !nvec ||
+	    nvec > (1 << (32 - DEV_ID_SHIFT)))
+		return -EINVAL;
+
+	if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
+		dev_err(dev, "Incompatible msi_domain, giving up\n");
+		return -EINVAL;
+	}
+
+	/* Already had a helping of MSI? Greed... */
+	if (!list_empty(dev_to_msi_list(dev)))
+		return -EBUSY;
+
+	priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+	if (!priv_data)
+		return -ENOMEM;
+
+	priv_data->devid = ida_simple_get(&platform_msi_devid_ida,
+					  0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
+	if (priv_data->devid < 0) {
+		err = priv_data->devid;
+		goto out_free_data;
+	}
+
+	priv_data->write_msg = write_msi_msg;
+
+	err = platform_msi_alloc_descs(dev, nvec, priv_data);
+	if (err)
+		goto out_free_id;
+
+	err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec);
+	if (err)
+		goto out_free_desc;
+
+	return 0;
+
+out_free_desc:
+	platform_msi_free_descs(dev);
+out_free_id:
+	ida_simple_remove(&platform_msi_devid_ida, priv_data->devid);
+out_free_data:
+	kfree(priv_data);
+
+	return err;
+}
+
+/**
+ * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
+ * @dev:	The device for which to free interrupts
+ */
+void platform_msi_domain_free_irqs(struct device *dev)
+{
+	struct msi_desc *desc;
+
+	desc = first_msi_entry(dev);
+	if (desc) {
+		struct platform_msi_priv_data *data;
+
+		data = desc->platform.msi_priv_data;
+
+		ida_simple_remove(&platform_msi_devid_ida, data->devid);
+		kfree(data);
+	}
+
+	msi_domain_free_irqs(dev->msi_domain, dev);
+	platform_msi_free_descs(dev);
+}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 063f0ab..f80aaaf 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -375,9 +375,7 @@
 
 	while (--i >= 0) {
 		struct resource *r = &pdev->resource[i];
-		unsigned long type = resource_type(r);
-
-		if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+		if (r->parent)
 			release_resource(r);
 	}
 
@@ -408,9 +406,7 @@
 
 		for (i = 0; i < pdev->num_resources; i++) {
 			struct resource *r = &pdev->resource[i];
-			unsigned long type = resource_type(r);
-
-			if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+			if (r->parent)
 				release_resource(r);
 		}
 	}
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index acef9f9..652b5a3 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -38,7 +38,7 @@
  * @dev: The device for the given clock
  * @ce: PM clock entry corresponding to the clock.
  */
-static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
+static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
 {
 	int ret;
 
@@ -50,8 +50,6 @@
 			dev_err(dev, "%s: failed to enable clk %p, error %d\n",
 				__func__, ce->clk, ret);
 	}
-
-	return ret;
 }
 
 /**
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 0ee43c1..4167201 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -114,8 +114,12 @@
 					stop_latency_ns, "stop");
 }
 
-static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
+static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev,
+			bool timed)
 {
+	if (!timed)
+		return GENPD_DEV_CALLBACK(genpd, int, start, dev);
+
 	return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
 					start_latency_ns, "start");
 }
@@ -136,41 +140,6 @@
 	smp_mb__after_atomic();
 }
 
-static void genpd_acquire_lock(struct generic_pm_domain *genpd)
-{
-	DEFINE_WAIT(wait);
-
-	mutex_lock(&genpd->lock);
-	/*
-	 * Wait for the domain to transition into either the active,
-	 * or the power off state.
-	 */
-	for (;;) {
-		prepare_to_wait(&genpd->status_wait_queue, &wait,
-				TASK_UNINTERRUPTIBLE);
-		if (genpd->status == GPD_STATE_ACTIVE
-		    || genpd->status == GPD_STATE_POWER_OFF)
-			break;
-		mutex_unlock(&genpd->lock);
-
-		schedule();
-
-		mutex_lock(&genpd->lock);
-	}
-	finish_wait(&genpd->status_wait_queue, &wait);
-}
-
-static void genpd_release_lock(struct generic_pm_domain *genpd)
-{
-	mutex_unlock(&genpd->lock);
-}
-
-static void genpd_set_active(struct generic_pm_domain *genpd)
-{
-	if (genpd->resume_count == 0)
-		genpd->status = GPD_STATE_ACTIVE;
-}
-
 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
 {
 	s64 usecs64;
@@ -251,35 +220,14 @@
  * resume a device belonging to it.
  */
 static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
-	__releases(&genpd->lock) __acquires(&genpd->lock)
 {
 	struct gpd_link *link;
-	DEFINE_WAIT(wait);
 	int ret = 0;
 
-	/* If the domain's master is being waited for, we have to wait too. */
-	for (;;) {
-		prepare_to_wait(&genpd->status_wait_queue, &wait,
-				TASK_UNINTERRUPTIBLE);
-		if (genpd->status != GPD_STATE_WAIT_MASTER)
-			break;
-		mutex_unlock(&genpd->lock);
-
-		schedule();
-
-		mutex_lock(&genpd->lock);
-	}
-	finish_wait(&genpd->status_wait_queue, &wait);
-
 	if (genpd->status == GPD_STATE_ACTIVE
 	    || (genpd->prepared_count > 0 && genpd->suspend_power_off))
 		return 0;
 
-	if (genpd->status != GPD_STATE_POWER_OFF) {
-		genpd_set_active(genpd);
-		return 0;
-	}
-
 	if (genpd->cpuidle_data) {
 		cpuidle_pause_and_lock();
 		genpd->cpuidle_data->idle_state->disabled = true;
@@ -294,20 +242,8 @@
 	 */
 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
 		genpd_sd_counter_inc(link->master);
-		genpd->status = GPD_STATE_WAIT_MASTER;
-
-		mutex_unlock(&genpd->lock);
 
 		ret = pm_genpd_poweron(link->master);
-
-		mutex_lock(&genpd->lock);
-
-		/*
-		 * The "wait for parent" status is guaranteed not to change
-		 * while the master is powering on.
-		 */
-		genpd->status = GPD_STATE_POWER_OFF;
-		wake_up_all(&genpd->status_wait_queue);
 		if (ret) {
 			genpd_sd_counter_dec(link->master);
 			goto err;
@@ -319,8 +255,7 @@
 		goto err;
 
  out:
-	genpd_set_active(genpd);
-
+	genpd->status = GPD_STATE_ACTIVE;
 	return 0;
 
  err:
@@ -356,20 +291,18 @@
 	return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
 }
 
-static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
-				     struct device *dev)
-{
-	return GENPD_DEV_CALLBACK(genpd, int, start, dev);
-}
-
 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
 {
 	return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
 					save_state_latency_ns, "state save");
 }
 
-static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
+static int genpd_restore_dev(struct generic_pm_domain *genpd,
+			struct device *dev, bool timed)
 {
+	if (!timed)
+		return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
+
 	return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
 					restore_state_latency_ns,
 					"state restore");
@@ -416,89 +349,6 @@
 }
 
 /**
- * __pm_genpd_save_device - Save the pre-suspend state of a device.
- * @pdd: Domain data of the device to save the state of.
- * @genpd: PM domain the device belongs to.
- */
-static int __pm_genpd_save_device(struct pm_domain_data *pdd,
-				  struct generic_pm_domain *genpd)
-	__releases(&genpd->lock) __acquires(&genpd->lock)
-{
-	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
-	struct device *dev = pdd->dev;
-	int ret = 0;
-
-	if (gpd_data->need_restore > 0)
-		return 0;
-
-	/*
-	 * If the value of the need_restore flag is still unknown at this point,
-	 * we trust that pm_genpd_poweroff() has verified that the device is
-	 * already runtime PM suspended.
-	 */
-	if (gpd_data->need_restore < 0) {
-		gpd_data->need_restore = 1;
-		return 0;
-	}
-
-	mutex_unlock(&genpd->lock);
-
-	genpd_start_dev(genpd, dev);
-	ret = genpd_save_dev(genpd, dev);
-	genpd_stop_dev(genpd, dev);
-
-	mutex_lock(&genpd->lock);
-
-	if (!ret)
-		gpd_data->need_restore = 1;
-
-	return ret;
-}
-
-/**
- * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
- * @pdd: Domain data of the device to restore the state of.
- * @genpd: PM domain the device belongs to.
- */
-static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
-				      struct generic_pm_domain *genpd)
-	__releases(&genpd->lock) __acquires(&genpd->lock)
-{
-	struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
-	struct device *dev = pdd->dev;
-	int need_restore = gpd_data->need_restore;
-
-	gpd_data->need_restore = 0;
-	mutex_unlock(&genpd->lock);
-
-	genpd_start_dev(genpd, dev);
-
-	/*
-	 * Call genpd_restore_dev() for recently added devices too (need_restore
-	 * is negative then).
-	 */
-	if (need_restore)
-		genpd_restore_dev(genpd, dev);
-
-	mutex_lock(&genpd->lock);
-}
-
-/**
- * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
- * @genpd: PM domain to check.
- *
- * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
- * a "power off" operation, which means that a "power on" has occured in the
- * meantime, or if its resume_count field is different from zero, which means
- * that one of its devices has been resumed in the meantime.
- */
-static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
-{
-	return genpd->status == GPD_STATE_WAIT_MASTER
-		|| genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
-}
-
-/**
  * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
  * @genpd: PM domait to power off.
  *
@@ -515,34 +365,26 @@
  * @genpd: PM domain to power down.
  *
  * If all of the @genpd's devices have been suspended and all of its subdomains
- * have been powered down, run the runtime suspend callbacks provided by all of
- * the @genpd's devices' drivers and remove power from @genpd.
+ * have been powered down, remove power from @genpd.
  */
 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
-	__releases(&genpd->lock) __acquires(&genpd->lock)
 {
 	struct pm_domain_data *pdd;
 	struct gpd_link *link;
-	unsigned int not_suspended;
-	int ret = 0;
+	unsigned int not_suspended = 0;
 
- start:
 	/*
 	 * Do not try to power off the domain in the following situations:
 	 * (1) The domain is already in the "power off" state.
-	 * (2) The domain is waiting for its master to power up.
-	 * (3) One of the domain's devices is being resumed right now.
-	 * (4) System suspend is in progress.
+	 * (2) System suspend is in progress.
 	 */
 	if (genpd->status == GPD_STATE_POWER_OFF
-	    || genpd->status == GPD_STATE_WAIT_MASTER
-	    || genpd->resume_count > 0 || genpd->prepared_count > 0)
+	    || genpd->prepared_count > 0)
 		return 0;
 
 	if (atomic_read(&genpd->sd_count) > 0)
 		return -EBUSY;
 
-	not_suspended = 0;
 	list_for_each_entry(pdd, &genpd->dev_list, list_node) {
 		enum pm_qos_flags_status stat;
 
@@ -560,41 +402,11 @@
 	if (not_suspended > genpd->in_progress)
 		return -EBUSY;
 
-	if (genpd->poweroff_task) {
-		/*
-		 * Another instance of pm_genpd_poweroff() is executing
-		 * callbacks, so tell it to start over and return.
-		 */
-		genpd->status = GPD_STATE_REPEAT;
-		return 0;
-	}
-
 	if (genpd->gov && genpd->gov->power_down_ok) {
 		if (!genpd->gov->power_down_ok(&genpd->domain))
 			return -EAGAIN;
 	}
 
-	genpd->status = GPD_STATE_BUSY;
-	genpd->poweroff_task = current;
-
-	list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
-		ret = atomic_read(&genpd->sd_count) == 0 ?
-			__pm_genpd_save_device(pdd, genpd) : -EBUSY;
-
-		if (genpd_abort_poweroff(genpd))
-			goto out;
-
-		if (ret) {
-			genpd_set_active(genpd);
-			goto out;
-		}
-
-		if (genpd->status == GPD_STATE_REPEAT) {
-			genpd->poweroff_task = NULL;
-			goto start;
-		}
-	}
-
 	if (genpd->cpuidle_data) {
 		/*
 		 * If cpuidle_data is set, cpuidle should turn the domain off
@@ -607,14 +419,14 @@
 		cpuidle_pause_and_lock();
 		genpd->cpuidle_data->idle_state->disabled = false;
 		cpuidle_resume_and_unlock();
-		goto out;
+		return 0;
 	}
 
 	if (genpd->power_off) {
-		if (atomic_read(&genpd->sd_count) > 0) {
-			ret = -EBUSY;
-			goto out;
-		}
+		int ret;
+
+		if (atomic_read(&genpd->sd_count) > 0)
+			return -EBUSY;
 
 		/*
 		 * If sd_count > 0 at this point, one of the subdomains hasn't
@@ -625,10 +437,8 @@
 		 * happen very often).
 		 */
 		ret = genpd_power_off(genpd, true);
-		if (ret == -EBUSY) {
-			genpd_set_active(genpd);
-			goto out;
-		}
+		if (ret)
+			return ret;
 	}
 
 	genpd->status = GPD_STATE_POWER_OFF;
@@ -638,10 +448,7 @@
 		genpd_queue_power_off_work(link->master);
 	}
 
- out:
-	genpd->poweroff_task = NULL;
-	wake_up_all(&genpd->status_wait_queue);
-	return ret;
+	return 0;
 }
 
 /**
@@ -654,9 +461,9 @@
 
 	genpd = container_of(work, struct generic_pm_domain, power_off_work);
 
-	genpd_acquire_lock(genpd);
+	mutex_lock(&genpd->lock);
 	pm_genpd_poweroff(genpd);
-	genpd_release_lock(genpd);
+	mutex_unlock(&genpd->lock);
 }
 
 /**
@@ -670,7 +477,6 @@
 static int pm_genpd_runtime_suspend(struct device *dev)
 {
 	struct generic_pm_domain *genpd;
-	struct generic_pm_domain_data *gpd_data;
 	bool (*stop_ok)(struct device *__dev);
 	int ret;
 
@@ -684,10 +490,16 @@
 	if (stop_ok && !stop_ok(dev))
 		return -EBUSY;
 
-	ret = genpd_stop_dev(genpd, dev);
+	ret = genpd_save_dev(genpd, dev);
 	if (ret)
 		return ret;
 
+	ret = genpd_stop_dev(genpd, dev);
+	if (ret) {
+		genpd_restore_dev(genpd, dev, true);
+		return ret;
+	}
+
 	/*
 	 * If power.irq_safe is set, this routine will be run with interrupts
 	 * off, so it can't use mutexes.
@@ -696,16 +508,6 @@
 		return 0;
 
 	mutex_lock(&genpd->lock);
-
-	/*
-	 * If we have an unknown state of the need_restore flag, it means none
-	 * of the runtime PM callbacks has been invoked yet. Let's update the
-	 * flag to reflect that the current state is active.
-	 */
-	gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
-	if (gpd_data->need_restore < 0)
-		gpd_data->need_restore = 0;
-
 	genpd->in_progress++;
 	pm_genpd_poweroff(genpd);
 	genpd->in_progress--;
@@ -725,8 +527,8 @@
 static int pm_genpd_runtime_resume(struct device *dev)
 {
 	struct generic_pm_domain *genpd;
-	DEFINE_WAIT(wait);
 	int ret;
+	bool timed = true;
 
 	dev_dbg(dev, "%s()\n", __func__);
 
@@ -735,40 +537,22 @@
 		return -EINVAL;
 
 	/* If power.irq_safe, the PM domain is never powered off. */
-	if (dev->power.irq_safe)
-		return genpd_start_dev_no_timing(genpd, dev);
+	if (dev->power.irq_safe) {
+		timed = false;
+		goto out;
+	}
 
 	mutex_lock(&genpd->lock);
 	ret = __pm_genpd_poweron(genpd);
-	if (ret) {
-		mutex_unlock(&genpd->lock);
-		return ret;
-	}
-	genpd->status = GPD_STATE_BUSY;
-	genpd->resume_count++;
-	for (;;) {
-		prepare_to_wait(&genpd->status_wait_queue, &wait,
-				TASK_UNINTERRUPTIBLE);
-		/*
-		 * If current is the powering off task, we have been called
-		 * reentrantly from one of the device callbacks, so we should
-		 * not wait.
-		 */
-		if (!genpd->poweroff_task || genpd->poweroff_task == current)
-			break;
-		mutex_unlock(&genpd->lock);
-
-		schedule();
-
-		mutex_lock(&genpd->lock);
-	}
-	finish_wait(&genpd->status_wait_queue, &wait);
-	__pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
-	genpd->resume_count--;
-	genpd_set_active(genpd);
-	wake_up_all(&genpd->status_wait_queue);
 	mutex_unlock(&genpd->lock);
 
+	if (ret)
+		return ret;
+
+ out:
+	genpd_start_dev(genpd, dev, timed);
+	genpd_restore_dev(genpd, dev, timed);
+
 	return 0;
 }
 
@@ -883,7 +667,7 @@
 {
 	struct gpd_link *link;
 
-	if (genpd->status != GPD_STATE_POWER_OFF)
+	if (genpd->status == GPD_STATE_ACTIVE)
 		return;
 
 	list_for_each_entry(link, &genpd->slave_links, slave_node) {
@@ -960,14 +744,14 @@
 	if (resume_needed(dev, genpd))
 		pm_runtime_resume(dev);
 
-	genpd_acquire_lock(genpd);
+	mutex_lock(&genpd->lock);
 
 	if (genpd->prepared_count++ == 0) {
 		genpd->suspended_count = 0;
 		genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
 	}
 
-	genpd_release_lock(genpd);
+	mutex_unlock(&genpd->lock);
 
 	if (genpd->suspend_power_off) {
 		pm_runtime_put_noidle(dev);
@@ -1102,7 +886,7 @@
 	pm_genpd_sync_poweron(genpd, true);
 	genpd->suspended_count--;
 
-	return genpd_start_dev(genpd, dev);
+	return genpd_start_dev(genpd, dev, true);
 }
 
 /**
@@ -1230,7 +1014,7 @@
 	if (IS_ERR(genpd))
 		return -EINVAL;
 
-	return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
+	return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true);
 }
 
 /**
@@ -1324,7 +1108,7 @@
 
 	pm_genpd_sync_poweron(genpd, true);
 
-	return genpd_start_dev(genpd, dev);
+	return genpd_start_dev(genpd, dev, true);
 }
 
 /**
@@ -1440,7 +1224,6 @@
 		gpd_data->td = *td;
 
 	gpd_data->base.dev = dev;
-	gpd_data->need_restore = -1;
 	gpd_data->td.constraint_changed = true;
 	gpd_data->td.effective_constraint_ns = -1;
 	gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
@@ -1502,7 +1285,7 @@
 	if (IS_ERR(gpd_data))
 		return PTR_ERR(gpd_data);
 
-	genpd_acquire_lock(genpd);
+	mutex_lock(&genpd->lock);
 
 	if (genpd->prepared_count > 0) {
 		ret = -EAGAIN;
@@ -1519,7 +1302,7 @@
 	list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
 
  out:
-	genpd_release_lock(genpd);
+	mutex_unlock(&genpd->lock);
 
 	if (ret)
 		genpd_free_dev_data(dev, gpd_data);
@@ -1563,7 +1346,7 @@
 	gpd_data = to_gpd_data(pdd);
 	dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
 
-	genpd_acquire_lock(genpd);
+	mutex_lock(&genpd->lock);
 
 	if (genpd->prepared_count > 0) {
 		ret = -EAGAIN;
@@ -1578,14 +1361,14 @@
 
 	list_del_init(&pdd->list_node);
 
-	genpd_release_lock(genpd);
+	mutex_unlock(&genpd->lock);
 
 	genpd_free_dev_data(dev, gpd_data);
 
 	return 0;
 
  out:
-	genpd_release_lock(genpd);
+	mutex_unlock(&genpd->lock);
 	dev_pm_qos_add_notifier(dev, &gpd_data->nb);
 
 	return ret;
@@ -1606,17 +1389,9 @@
 	    || genpd == subdomain)
 		return -EINVAL;
 
- start:
-	genpd_acquire_lock(genpd);
+	mutex_lock(&genpd->lock);
 	mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
 
-	if (subdomain->status != GPD_STATE_POWER_OFF
-	    && subdomain->status != GPD_STATE_ACTIVE) {
-		mutex_unlock(&subdomain->lock);
-		genpd_release_lock(genpd);
-		goto start;
-	}
-
 	if (genpd->status == GPD_STATE_POWER_OFF
 	    &&  subdomain->status != GPD_STATE_POWER_OFF) {
 		ret = -EINVAL;
@@ -1644,7 +1419,7 @@
 
  out:
 	mutex_unlock(&subdomain->lock);
-	genpd_release_lock(genpd);
+	mutex_unlock(&genpd->lock);
 
 	return ret;
 }
@@ -1692,8 +1467,7 @@
 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
 		return -EINVAL;
 
- start:
-	genpd_acquire_lock(genpd);
+	mutex_lock(&genpd->lock);
 
 	list_for_each_entry(link, &genpd->master_links, master_node) {
 		if (link->slave != subdomain)
@@ -1701,13 +1475,6 @@
 
 		mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
 
-		if (subdomain->status != GPD_STATE_POWER_OFF
-		    && subdomain->status != GPD_STATE_ACTIVE) {
-			mutex_unlock(&subdomain->lock);
-			genpd_release_lock(genpd);
-			goto start;
-		}
-
 		list_del(&link->master_node);
 		list_del(&link->slave_node);
 		kfree(link);
@@ -1720,7 +1487,7 @@
 		break;
 	}
 
-	genpd_release_lock(genpd);
+	mutex_unlock(&genpd->lock);
 
 	return ret;
 }
@@ -1744,7 +1511,7 @@
 	if (IS_ERR_OR_NULL(genpd) || state < 0)
 		return -EINVAL;
 
-	genpd_acquire_lock(genpd);
+	mutex_lock(&genpd->lock);
 
 	if (genpd->cpuidle_data) {
 		ret = -EEXIST;
@@ -1775,7 +1542,7 @@
 	genpd_recalc_cpu_exit_latency(genpd);
 
  out:
-	genpd_release_lock(genpd);
+	mutex_unlock(&genpd->lock);
 	return ret;
 
  err:
@@ -1812,7 +1579,7 @@
 	if (IS_ERR_OR_NULL(genpd))
 		return -EINVAL;
 
-	genpd_acquire_lock(genpd);
+	mutex_lock(&genpd->lock);
 
 	cpuidle_data = genpd->cpuidle_data;
 	if (!cpuidle_data) {
@@ -1830,7 +1597,7 @@
 	kfree(cpuidle_data);
 
  out:
-	genpd_release_lock(genpd);
+	mutex_unlock(&genpd->lock);
 	return ret;
 }
 
@@ -1912,9 +1679,6 @@
 	genpd->in_progress = 0;
 	atomic_set(&genpd->sd_count, 0);
 	genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
-	init_waitqueue_head(&genpd->status_wait_queue);
-	genpd->poweroff_task = NULL;
-	genpd->resume_count = 0;
 	genpd->device_count = 0;
 	genpd->max_off_time_ns = -1;
 	genpd->max_off_time_changed = true;
@@ -1952,6 +1716,7 @@
 	list_add(&genpd->gpd_list_node, &gpd_list);
 	mutex_unlock(&gpd_list_lock);
 }
+EXPORT_SYMBOL_GPL(pm_genpd_init);
 
 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
 /*
@@ -2125,7 +1890,7 @@
 
 /**
  * genpd_dev_pm_detach - Detach a device from its PM domain.
- * @dev: Device to attach.
+ * @dev: Device to detach.
  * @power_off: Currently not used
  *
  * Try to locate a corresponding generic PM domain, which the device was
@@ -2183,7 +1948,10 @@
  * Both generic and legacy Samsung-specific DT bindings are supported to keep
  * backwards compatibility with existing DTBs.
  *
- * Returns 0 on successfully attached PM domain or negative error code.
+ * Returns 0 on successfully attached PM domain or negative error code. Note
+ * that if a power-domain exists for the device, but it cannot be found or
+ * turned on, then return -EPROBE_DEFER to ensure that the device is not
+ * probed and to re-try again later.
  */
 int genpd_dev_pm_attach(struct device *dev)
 {
@@ -2220,7 +1988,7 @@
 		dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
 			__func__, PTR_ERR(pd));
 		of_node_put(dev->of_node);
-		return PTR_ERR(pd);
+		return -EPROBE_DEFER;
 	}
 
 	dev_dbg(dev, "adding to PM domain %s\n", pd->name);
@@ -2238,14 +2006,15 @@
 		dev_err(dev, "failed to add to PM domain %s: %d",
 			pd->name, ret);
 		of_node_put(dev->of_node);
-		return ret;
+		goto out;
 	}
 
 	dev->pm_domain->detach = genpd_dev_pm_detach;
 	dev->pm_domain->sync = genpd_dev_pm_sync;
-	pm_genpd_poweron(pd);
+	ret = pm_genpd_poweron(pd);
 
-	return 0;
+out:
+	return ret ? -EPROBE_DEFER : 0;
 }
 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
@@ -2293,9 +2062,6 @@
 {
 	static const char * const status_lookup[] = {
 		[GPD_STATE_ACTIVE] = "on",
-		[GPD_STATE_WAIT_MASTER] = "wait-master",
-		[GPD_STATE_BUSY] = "busy",
-		[GPD_STATE_REPEAT] = "off-in-progress",
 		[GPD_STATE_POWER_OFF] = "off"
 	};
 	struct pm_domain_data *pm_data;
@@ -2309,7 +2075,7 @@
 
 	if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
 		goto exit;
-	seq_printf(s, "%-30s  %-15s  ", genpd->name, status_lookup[genpd->status]);
+	seq_printf(s, "%-30s  %-15s ", genpd->name, status_lookup[genpd->status]);
 
 	/*
 	 * Modifications on the list require holding locks on both
@@ -2344,8 +2110,8 @@
 	struct generic_pm_domain *genpd;
 	int ret = 0;
 
-	seq_puts(s, "    domain                      status         slaves\n");
-	seq_puts(s, "           /device                                      runtime status\n");
+	seq_puts(s, "domain                          status          slaves\n");
+	seq_puts(s, "    /device                                             runtime status\n");
 	seq_puts(s, "----------------------------------------------------------------------\n");
 
 	ret = mutex_lock_interruptible(&gpd_list_lock);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 30b7bbf..1710c26 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1377,7 +1377,7 @@
 	if (dev->power.direct_complete) {
 		if (pm_runtime_status_suspended(dev)) {
 			pm_runtime_disable(dev);
-			if (pm_runtime_suspended_if_enabled(dev))
+			if (pm_runtime_status_suspended(dev))
 				goto Complete;
 
 			pm_runtime_enable(dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 677fb28..eb25449 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -11,6 +11,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -51,10 +52,17 @@
  *		order.
  * @dynamic:	not-created from static DT entries.
  * @available:	true/false - marks if this OPP as available or not
+ * @turbo:	true if turbo (boost) OPP
  * @rate:	Frequency in hertz
- * @u_volt:	Nominal voltage in microvolts corresponding to this OPP
+ * @u_volt:	Target voltage in microvolts corresponding to this OPP
+ * @u_volt_min:	Minimum voltage in microvolts corresponding to this OPP
+ * @u_volt_max:	Maximum voltage in microvolts corresponding to this OPP
+ * @u_amp:	Maximum current drawn by the device in microamperes
+ * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
+ *		frequency from any other OPP's frequency.
  * @dev_opp:	points back to the device_opp struct this opp belongs to
  * @rcu_head:	RCU callback head used for deferred freeing
+ * @np:		OPP's device node.
  *
  * This structure stores the OPP information for a given device.
  */
@@ -63,11 +71,34 @@
 
 	bool available;
 	bool dynamic;
+	bool turbo;
 	unsigned long rate;
+
 	unsigned long u_volt;
+	unsigned long u_volt_min;
+	unsigned long u_volt_max;
+	unsigned long u_amp;
+	unsigned long clock_latency_ns;
 
 	struct device_opp *dev_opp;
 	struct rcu_head rcu_head;
+
+	struct device_node *np;
+};
+
+/**
+ * struct device_list_opp - devices managed by 'struct device_opp'
+ * @node:	list node
+ * @dev:	device to which the struct object belongs
+ * @rcu_head:	RCU callback head used for deferred freeing
+ *
+ * This is an internal data structure maintaining the list of devices that are
+ * managed by 'struct device_opp'.
+ */
+struct device_list_opp {
+	struct list_head node;
+	const struct device *dev;
+	struct rcu_head rcu_head;
 };
 
 /**
@@ -77,10 +108,12 @@
  *		list.
  *		RCU usage: nodes are not modified in the list of device_opp,
  *		however addition is possible and is secured by dev_opp_list_lock
- * @dev:	device pointer
  * @srcu_head:	notifier head to notify the OPP availability changes.
  * @rcu_head:	RCU callback head used for deferred freeing
+ * @dev_list:	list of devices that share these OPPs
  * @opp_list:	list of opps
+ * @np:		struct device_node pointer for opp's DT node.
+ * @shared_opp: OPP is shared between multiple devices.
  *
  * This is an internal data structure maintaining the link to opps attached to
  * a device. This structure is not meant to be shared to users as it is
@@ -93,10 +126,15 @@
 struct device_opp {
 	struct list_head node;
 
-	struct device *dev;
 	struct srcu_notifier_head srcu_head;
 	struct rcu_head rcu_head;
+	struct list_head dev_list;
 	struct list_head opp_list;
+
+	struct device_node *np;
+	unsigned long clock_latency_ns_max;
+	bool shared_opp;
+	struct dev_pm_opp *suspend_opp;
 };
 
 /*
@@ -110,12 +148,44 @@
 
 #define opp_rcu_lockdep_assert()					\
 do {									\
-	rcu_lockdep_assert(rcu_read_lock_held() ||			\
-				lockdep_is_held(&dev_opp_list_lock),	\
+	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
+				!lockdep_is_held(&dev_opp_list_lock),	\
 			   "Missing rcu_read_lock() or "		\
 			   "dev_opp_list_lock protection");		\
 } while (0)
 
+static struct device_list_opp *_find_list_dev(const struct device *dev,
+					      struct device_opp *dev_opp)
+{
+	struct device_list_opp *list_dev;
+
+	list_for_each_entry(list_dev, &dev_opp->dev_list, node)
+		if (list_dev->dev == dev)
+			return list_dev;
+
+	return NULL;
+}
+
+static struct device_opp *_managed_opp(const struct device_node *np)
+{
+	struct device_opp *dev_opp;
+
+	list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
+		if (dev_opp->np == np) {
+			/*
+			 * Multiple devices can point to the same OPP table and
+			 * so will have same node-pointer, np.
+			 *
+			 * But the OPPs will be considered as shared only if the
+			 * OPP table contains a "opp-shared" property.
+			 */
+			return dev_opp->shared_opp ? dev_opp : NULL;
+		}
+	}
+
+	return NULL;
+}
+
 /**
  * _find_device_opp() - find device_opp struct using device pointer
  * @dev:	device pointer used to lookup device OPPs
@@ -132,21 +202,18 @@
  */
 static struct device_opp *_find_device_opp(struct device *dev)
 {
-	struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
+	struct device_opp *dev_opp;
 
-	if (unlikely(IS_ERR_OR_NULL(dev))) {
+	if (IS_ERR_OR_NULL(dev)) {
 		pr_err("%s: Invalid parameters\n", __func__);
 		return ERR_PTR(-EINVAL);
 	}
 
-	list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
-		if (tmp_dev_opp->dev == dev) {
-			dev_opp = tmp_dev_opp;
-			break;
-		}
-	}
+	list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
+		if (_find_list_dev(dev, dev_opp))
+			return dev_opp;
 
-	return dev_opp;
+	return ERR_PTR(-ENODEV);
 }
 
 /**
@@ -172,7 +239,7 @@
 	opp_rcu_lockdep_assert();
 
 	tmp_opp = rcu_dereference(opp);
-	if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
+	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
 		pr_err("%s: Invalid parameters\n", __func__);
 	else
 		v = tmp_opp->u_volt;
@@ -204,7 +271,7 @@
 	opp_rcu_lockdep_assert();
 
 	tmp_opp = rcu_dereference(opp);
-	if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
+	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
 		pr_err("%s: Invalid parameters\n", __func__);
 	else
 		f = tmp_opp->rate;
@@ -214,6 +281,66 @@
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
 
 /**
+ * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
+ * @opp: opp for which turbo mode is being verified
+ *
+ * Turbo OPPs are not for normal use, and can be enabled (under certain
+ * conditions) for short duration of times to finish high throughput work
+ * quickly. Running on them for longer times may overheat the chip.
+ *
+ * Return: true if opp is turbo opp, else false.
+ *
+ * Locking: This function must be called under rcu_read_lock(). opp is a rcu
+ * protected pointer. This means that opp which could have been fetched by
+ * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
+ * under RCU lock. The pointer returned by the opp_find_freq family must be
+ * used in the same section as the usage of this function with the pointer
+ * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
+ * pointer.
+ */
+bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
+{
+	struct dev_pm_opp *tmp_opp;
+
+	opp_rcu_lockdep_assert();
+
+	tmp_opp = rcu_dereference(opp);
+	if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
+		pr_err("%s: Invalid parameters\n", __func__);
+		return false;
+	}
+
+	return tmp_opp->turbo;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
+
+/**
+ * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
+ * @dev:	device for which we do this operation
+ *
+ * Return: This function returns the max clock latency in nanoseconds.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
+{
+	struct device_opp *dev_opp;
+	unsigned long clock_latency_ns;
+
+	rcu_read_lock();
+
+	dev_opp = _find_device_opp(dev);
+	if (IS_ERR(dev_opp))
+		clock_latency_ns = 0;
+	else
+		clock_latency_ns = dev_opp->clock_latency_ns_max;
+
+	rcu_read_unlock();
+	return clock_latency_ns;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
+
+/**
  * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
  * @dev:	device for which we do this operation
  *
@@ -407,18 +534,57 @@
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
 
+/* List-dev Helpers */
+static void _kfree_list_dev_rcu(struct rcu_head *head)
+{
+	struct device_list_opp *list_dev;
+
+	list_dev = container_of(head, struct device_list_opp, rcu_head);
+	kfree_rcu(list_dev, rcu_head);
+}
+
+static void _remove_list_dev(struct device_list_opp *list_dev,
+			     struct device_opp *dev_opp)
+{
+	list_del(&list_dev->node);
+	call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
+		  _kfree_list_dev_rcu);
+}
+
+static struct device_list_opp *_add_list_dev(const struct device *dev,
+					     struct device_opp *dev_opp)
+{
+	struct device_list_opp *list_dev;
+
+	list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
+	if (!list_dev)
+		return NULL;
+
+	/* Initialize list-dev */
+	list_dev->dev = dev;
+	list_add_rcu(&list_dev->node, &dev_opp->dev_list);
+
+	return list_dev;
+}
+
 /**
- * _add_device_opp() - Allocate a new device OPP table
+ * _add_device_opp() - Find device OPP table or allocate a new one
  * @dev:	device for which we do this operation
  *
- * New device node which uses OPPs - used when multiple devices with OPP tables
- * are maintained.
+ * It tries to find an existing table first, if it couldn't find one, it
+ * allocates a new OPP table and returns that.
  *
  * Return: valid device_opp pointer if success, else NULL.
  */
 static struct device_opp *_add_device_opp(struct device *dev)
 {
 	struct device_opp *dev_opp;
+	struct device_list_opp *list_dev;
+
+	/* Check for existing list for 'dev' first */
+	dev_opp = _find_device_opp(dev);
+	if (!IS_ERR(dev_opp))
+		return dev_opp;
 
 	/*
 	 * Allocate a new device OPP table. In the infrequent case where a new
@@ -428,7 +594,14 @@
 	if (!dev_opp)
 		return NULL;
 
-	dev_opp->dev = dev;
+	INIT_LIST_HEAD(&dev_opp->dev_list);
+
+	list_dev = _add_list_dev(dev, dev_opp);
+	if (!list_dev) {
+		kfree(dev_opp);
+		return NULL;
+	}
+
 	srcu_init_notifier_head(&dev_opp->srcu_head);
 	INIT_LIST_HEAD(&dev_opp->opp_list);
 
@@ -438,149 +611,6 @@
 }
 
 /**
- * _opp_add_dynamic() - Allocate a dynamic OPP.
- * @dev:	device for which we do this operation
- * @freq:	Frequency in Hz for this OPP
- * @u_volt:	Voltage in uVolts for this OPP
- * @dynamic:	Dynamically added OPPs.
- *
- * This function adds an opp definition to the opp list and returns status.
- * The opp is made available by default and it can be controlled using
- * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
- *
- * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
- * freed by of_free_opp_table.
- *
- * Locking: The internal device_opp and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- *
- * Return:
- * 0		On success OR
- *		Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST	Freq are same and volt are different OR
- *		Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM	Memory allocation failure
- */
-static int _opp_add_dynamic(struct device *dev, unsigned long freq,
-			    long u_volt, bool dynamic)
-{
-	struct device_opp *dev_opp = NULL;
-	struct dev_pm_opp *opp, *new_opp;
-	struct list_head *head;
-	int ret;
-
-	/* allocate new OPP node */
-	new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
-	if (!new_opp)
-		return -ENOMEM;
-
-	/* Hold our list modification lock here */
-	mutex_lock(&dev_opp_list_lock);
-
-	/* populate the opp table */
-	new_opp->rate = freq;
-	new_opp->u_volt = u_volt;
-	new_opp->available = true;
-	new_opp->dynamic = dynamic;
-
-	/* Check for existing list for 'dev' */
-	dev_opp = _find_device_opp(dev);
-	if (IS_ERR(dev_opp)) {
-		dev_opp = _add_device_opp(dev);
-		if (!dev_opp) {
-			ret = -ENOMEM;
-			goto free_opp;
-		}
-
-		head = &dev_opp->opp_list;
-		goto list_add;
-	}
-
-	/*
-	 * Insert new OPP in order of increasing frequency
-	 * and discard if already present
-	 */
-	head = &dev_opp->opp_list;
-	list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
-		if (new_opp->rate <= opp->rate)
-			break;
-		else
-			head = &opp->node;
-	}
-
-	/* Duplicate OPPs ? */
-	if (new_opp->rate == opp->rate) {
-		ret = opp->available && new_opp->u_volt == opp->u_volt ?
-			0 : -EEXIST;
-
-		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
-			 __func__, opp->rate, opp->u_volt, opp->available,
-			 new_opp->rate, new_opp->u_volt, new_opp->available);
-		goto free_opp;
-	}
-
-list_add:
-	new_opp->dev_opp = dev_opp;
-	list_add_rcu(&new_opp->node, head);
-	mutex_unlock(&dev_opp_list_lock);
-
-	/*
-	 * Notify the changes in the availability of the operable
-	 * frequency/voltage list.
-	 */
-	srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
-	return 0;
-
-free_opp:
-	mutex_unlock(&dev_opp_list_lock);
-	kfree(new_opp);
-	return ret;
-}
-
-/**
- * dev_pm_opp_add()  - Add an OPP table from a table definitions
- * @dev:	device for which we do this operation
- * @freq:	Frequency in Hz for this OPP
- * @u_volt:	Voltage in uVolts for this OPP
- *
- * This function adds an opp definition to the opp list and returns status.
- * The opp is made available by default and it can be controlled using
- * dev_pm_opp_enable/disable functions.
- *
- * Locking: The internal device_opp and opp structures are RCU protected.
- * Hence this function internally uses RCU updater strategy with mutex locks
- * to keep the integrity of the internal data structures. Callers should ensure
- * that this function is *NOT* called under RCU protection or in contexts where
- * mutex cannot be locked.
- *
- * Return:
- * 0		On success OR
- *		Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST	Freq are same and volt are different OR
- *		Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM	Memory allocation failure
- */
-int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
-{
-	return _opp_add_dynamic(dev, freq, u_volt, true);
-}
-EXPORT_SYMBOL_GPL(dev_pm_opp_add);
-
-/**
- * _kfree_opp_rcu() - Free OPP RCU handler
- * @head:	RCU head
- */
-static void _kfree_opp_rcu(struct rcu_head *head)
-{
-	struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
-
-	kfree_rcu(opp, rcu_head);
-}
-
-/**
  * _kfree_device_rcu() - Free device_opp RCU handler
  * @head:	RCU head
  */
@@ -592,9 +622,47 @@
 }
 
 /**
+ * _remove_device_opp() - Removes a device OPP table
+ * @dev_opp: device OPP table to be removed.
+ *
+ * Removes/frees device OPP table it it doesn't contain any OPPs.
+ */
+static void _remove_device_opp(struct device_opp *dev_opp)
+{
+	struct device_list_opp *list_dev;
+
+	if (!list_empty(&dev_opp->opp_list))
+		return;
+
+	list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
+				    node);
+
+	_remove_list_dev(list_dev, dev_opp);
+
+	/* dev_list must be empty now */
+	WARN_ON(!list_empty(&dev_opp->dev_list));
+
+	list_del_rcu(&dev_opp->node);
+	call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
+		  _kfree_device_rcu);
+}
+
+/**
+ * _kfree_opp_rcu() - Free OPP RCU handler
+ * @head:	RCU head
+ */
+static void _kfree_opp_rcu(struct rcu_head *head)
+{
+	struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
+
+	kfree_rcu(opp, rcu_head);
+}
+
+/**
  * _opp_remove()  - Remove an OPP from a table definition
  * @dev_opp:	points back to the device_opp struct this opp belongs to
  * @opp:	pointer to the OPP to remove
+ * @notify:	OPP_EVENT_REMOVE notification should be sent or not
  *
  * This function removes an opp definition from the opp list.
  *
@@ -603,21 +671,18 @@
  * strategy.
  */
 static void _opp_remove(struct device_opp *dev_opp,
-			struct dev_pm_opp *opp)
+			struct dev_pm_opp *opp, bool notify)
 {
 	/*
 	 * Notify the changes in the availability of the operable
 	 * frequency/voltage list.
 	 */
-	srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
+	if (notify)
+		srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
 	list_del_rcu(&opp->node);
 	call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
 
-	if (list_empty(&dev_opp->opp_list)) {
-		list_del_rcu(&dev_opp->node);
-		call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
-			  _kfree_device_rcu);
-	}
+	_remove_device_opp(dev_opp);
 }
 
 /**
@@ -659,12 +724,307 @@
 		goto unlock;
 	}
 
-	_opp_remove(dev_opp, opp);
+	_opp_remove(dev_opp, opp, true);
 unlock:
 	mutex_unlock(&dev_opp_list_lock);
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
 
+static struct dev_pm_opp *_allocate_opp(struct device *dev,
+					struct device_opp **dev_opp)
+{
+	struct dev_pm_opp *opp;
+
+	/* allocate new OPP node */
+	opp = kzalloc(sizeof(*opp), GFP_KERNEL);
+	if (!opp)
+		return NULL;
+
+	INIT_LIST_HEAD(&opp->node);
+
+	*dev_opp = _add_device_opp(dev);
+	if (!*dev_opp) {
+		kfree(opp);
+		return NULL;
+	}
+
+	return opp;
+}
+
+static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
+		    struct device_opp *dev_opp)
+{
+	struct dev_pm_opp *opp;
+	struct list_head *head = &dev_opp->opp_list;
+
+	/*
+	 * Insert new OPP in order of increasing frequency and discard if
+	 * already present.
+	 *
+	 * Need to use &dev_opp->opp_list in the condition part of the 'for'
+	 * loop, don't replace it with head otherwise it will become an infinite
+	 * loop.
+	 */
+	list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
+		if (new_opp->rate > opp->rate) {
+			head = &opp->node;
+			continue;
+		}
+
+		if (new_opp->rate < opp->rate)
+			break;
+
+		/* Duplicate OPPs */
+		dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
+			 __func__, opp->rate, opp->u_volt, opp->available,
+			 new_opp->rate, new_opp->u_volt, new_opp->available);
+
+		return opp->available && new_opp->u_volt == opp->u_volt ?
+			0 : -EEXIST;
+	}
+
+	new_opp->dev_opp = dev_opp;
+	list_add_rcu(&new_opp->node, head);
+
+	return 0;
+}
+
+/**
+ * _opp_add_dynamic() - Allocate a dynamic OPP.
+ * @dev:	device for which we do this operation
+ * @freq:	Frequency in Hz for this OPP
+ * @u_volt:	Voltage in uVolts for this OPP
+ * @dynamic:	Dynamically added OPPs.
+ *
+ * This function adds an opp definition to the opp list and returns status.
+ * The opp is made available by default and it can be controlled using
+ * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
+ *
+ * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
+ * freed by of_free_opp_table.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0		On success OR
+ *		Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST	Freq are same and volt are different OR
+ *		Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM	Memory allocation failure
+ */
+static int _opp_add_dynamic(struct device *dev, unsigned long freq,
+			    long u_volt, bool dynamic)
+{
+	struct device_opp *dev_opp;
+	struct dev_pm_opp *new_opp;
+	int ret;
+
+	/* Hold our list modification lock here */
+	mutex_lock(&dev_opp_list_lock);
+
+	new_opp = _allocate_opp(dev, &dev_opp);
+	if (!new_opp) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
+
+	/* populate the opp table */
+	new_opp->rate = freq;
+	new_opp->u_volt = u_volt;
+	new_opp->available = true;
+	new_opp->dynamic = dynamic;
+
+	ret = _opp_add(dev, new_opp, dev_opp);
+	if (ret)
+		goto free_opp;
+
+	mutex_unlock(&dev_opp_list_lock);
+
+	/*
+	 * Notify the changes in the availability of the operable
+	 * frequency/voltage list.
+	 */
+	srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+	return 0;
+
+free_opp:
+	_opp_remove(dev_opp, new_opp, false);
+unlock:
+	mutex_unlock(&dev_opp_list_lock);
+	return ret;
+}
+
+/* TODO: Support multiple regulators */
+static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
+{
+	u32 microvolt[3] = {0};
+	int count, ret;
+
+	count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+	if (!count)
+		return 0;
+
+	/* There can be one or three elements here */
+	if (count != 1 && count != 3) {
+		dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
+			__func__, count);
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
+					 count);
+	if (ret) {
+		dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
+			ret);
+		return -EINVAL;
+	}
+
+	opp->u_volt = microvolt[0];
+	opp->u_volt_min = microvolt[1];
+	opp->u_volt_max = microvolt[2];
+
+	return 0;
+}
+
+/**
+ * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
+ * @dev:	device for which we do this operation
+ * @np:		device node
+ *
+ * This function adds an opp definition to the opp list and returns status. The
+ * opp can be controlled using dev_pm_opp_enable/disable functions and may be
+ * removed by dev_pm_opp_remove.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0		On success OR
+ *		Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST	Freq are same and volt are different OR
+ *		Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM	Memory allocation failure
+ * -EINVAL	Failed parsing the OPP node
+ */
+static int _opp_add_static_v2(struct device *dev, struct device_node *np)
+{
+	struct device_opp *dev_opp;
+	struct dev_pm_opp *new_opp;
+	u64 rate;
+	u32 val;
+	int ret;
+
+	/* Hold our list modification lock here */
+	mutex_lock(&dev_opp_list_lock);
+
+	new_opp = _allocate_opp(dev, &dev_opp);
+	if (!new_opp) {
+		ret = -ENOMEM;
+		goto unlock;
+	}
+
+	ret = of_property_read_u64(np, "opp-hz", &rate);
+	if (ret < 0) {
+		dev_err(dev, "%s: opp-hz not found\n", __func__);
+		goto free_opp;
+	}
+
+	/*
+	 * Rate is defined as an unsigned long in clk API, and so casting
+	 * explicitly to its type. Must be fixed once rate is 64 bit
+	 * guaranteed in clk API.
+	 */
+	new_opp->rate = (unsigned long)rate;
+	new_opp->turbo = of_property_read_bool(np, "turbo-mode");
+
+	new_opp->np = np;
+	new_opp->dynamic = false;
+	new_opp->available = true;
+
+	if (!of_property_read_u32(np, "clock-latency-ns", &val))
+		new_opp->clock_latency_ns = val;
+
+	ret = opp_get_microvolt(new_opp, dev);
+	if (ret)
+		goto free_opp;
+
+	if (!of_property_read_u32(new_opp->np, "opp-microamp", &val))
+		new_opp->u_amp = val;
+
+	ret = _opp_add(dev, new_opp, dev_opp);
+	if (ret)
+		goto free_opp;
+
+	/* OPP to select on device suspend */
+	if (of_property_read_bool(np, "opp-suspend")) {
+		if (dev_opp->suspend_opp)
+			dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
+				 __func__, dev_opp->suspend_opp->rate,
+				 new_opp->rate);
+		else
+			dev_opp->suspend_opp = new_opp;
+	}
+
+	if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
+		dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
+
+	mutex_unlock(&dev_opp_list_lock);
+
+	pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
+		 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
+		 new_opp->u_volt_min, new_opp->u_volt_max,
+		 new_opp->clock_latency_ns);
+
+	/*
+	 * Notify the changes in the availability of the operable
+	 * frequency/voltage list.
+	 */
+	srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
+	return 0;
+
+free_opp:
+	_opp_remove(dev_opp, new_opp, false);
+unlock:
+	mutex_unlock(&dev_opp_list_lock);
+	return ret;
+}
+
+/**
+ * dev_pm_opp_add()  - Add an OPP table from a table definitions
+ * @dev:	device for which we do this operation
+ * @freq:	Frequency in Hz for this OPP
+ * @u_volt:	Voltage in uVolts for this OPP
+ *
+ * This function adds an opp definition to the opp list and returns status.
+ * The opp is made available by default and it can be controlled using
+ * dev_pm_opp_enable/disable functions.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ *
+ * Return:
+ * 0		On success OR
+ *		Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST	Freq are same and volt are different OR
+ *		Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM	Memory allocation failure
+ */
+int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+{
+	return _opp_add_dynamic(dev, freq, u_volt, true);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_add);
+
 /**
  * _opp_set_availability() - helper to set the availability of an opp
  * @dev:		device for which we do this operation
@@ -825,28 +1185,179 @@
 
 #ifdef CONFIG_OF
 /**
- * of_init_opp_table() - Initialize opp table from device tree
+ * of_free_opp_table() - Free OPP table entries created from static DT entries
  * @dev:	device pointer used to lookup device OPPs.
  *
- * Register the initial OPP table with the OPP library for given device.
+ * Free OPPs created using static entries present in DT.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
  * Hence this function indirectly uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
  * mutex cannot be locked.
- *
- * Return:
- * 0		On success OR
- *		Duplicate OPPs (both freq and volt are same) and opp->available
- * -EEXIST	Freq are same and volt are different OR
- *		Duplicate OPPs (both freq and volt are same) and !opp->available
- * -ENOMEM	Memory allocation failure
- * -ENODEV	when 'operating-points' property is not found or is invalid data
- *		in device node.
- * -ENODATA	when empty 'operating-points' property is found
  */
-int of_init_opp_table(struct device *dev)
+void of_free_opp_table(struct device *dev)
+{
+	struct device_opp *dev_opp;
+	struct dev_pm_opp *opp, *tmp;
+
+	/* Hold our list modification lock here */
+	mutex_lock(&dev_opp_list_lock);
+
+	/* Check for existing list for 'dev' */
+	dev_opp = _find_device_opp(dev);
+	if (IS_ERR(dev_opp)) {
+		int error = PTR_ERR(dev_opp);
+
+		if (error != -ENODEV)
+			WARN(1, "%s: dev_opp: %d\n",
+			     IS_ERR_OR_NULL(dev) ?
+					"Invalid device" : dev_name(dev),
+			     error);
+		goto unlock;
+	}
+
+	/* Find if dev_opp manages a single device */
+	if (list_is_singular(&dev_opp->dev_list)) {
+		/* Free static OPPs */
+		list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
+			if (!opp->dynamic)
+				_opp_remove(dev_opp, opp, true);
+		}
+	} else {
+		_remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
+	}
+
+unlock:
+	mutex_unlock(&dev_opp_list_lock);
+}
+EXPORT_SYMBOL_GPL(of_free_opp_table);
+
+void of_cpumask_free_opp_table(cpumask_var_t cpumask)
+{
+	struct device *cpu_dev;
+	int cpu;
+
+	WARN_ON(cpumask_empty(cpumask));
+
+	for_each_cpu(cpu, cpumask) {
+		cpu_dev = get_cpu_device(cpu);
+		if (!cpu_dev) {
+			pr_err("%s: failed to get cpu%d device\n", __func__,
+			       cpu);
+			continue;
+		}
+
+		of_free_opp_table(cpu_dev);
+	}
+}
+EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table);
+
+/* Returns opp descriptor node from its phandle. Caller must do of_node_put() */
+static struct device_node *
+_of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop)
+{
+	struct device_node *opp_np;
+
+	opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value));
+	if (!opp_np) {
+		dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n",
+			__func__, prop->name);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return opp_np;
+}
+
+/* Returns opp descriptor node for a device. Caller must do of_node_put() */
+static struct device_node *_of_get_opp_desc_node(struct device *dev)
+{
+	const struct property *prop;
+
+	prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
+	if (!prop)
+		return ERR_PTR(-ENODEV);
+	if (!prop->value)
+		return ERR_PTR(-ENODATA);
+
+	/*
+	 * TODO: Support for multiple OPP tables.
+	 *
+	 * There should be only ONE phandle present in "operating-points-v2"
+	 * property.
+	 */
+	if (prop->length != sizeof(__be32)) {
+		dev_err(dev, "%s: Invalid opp desc phandle\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return _of_get_opp_desc_node_from_prop(dev, prop);
+}
+
+/* Initializes OPP tables based on new bindings */
+static int _of_init_opp_table_v2(struct device *dev,
+				 const struct property *prop)
+{
+	struct device_node *opp_np, *np;
+	struct device_opp *dev_opp;
+	int ret = 0, count = 0;
+
+	if (!prop->value)
+		return -ENODATA;
+
+	/* Get opp node */
+	opp_np = _of_get_opp_desc_node_from_prop(dev, prop);
+	if (IS_ERR(opp_np))
+		return PTR_ERR(opp_np);
+
+	dev_opp = _managed_opp(opp_np);
+	if (dev_opp) {
+		/* OPPs are already managed */
+		if (!_add_list_dev(dev, dev_opp))
+			ret = -ENOMEM;
+		goto put_opp_np;
+	}
+
+	/* We have opp-list node now, iterate over it and add OPPs */
+	for_each_available_child_of_node(opp_np, np) {
+		count++;
+
+		ret = _opp_add_static_v2(dev, np);
+		if (ret) {
+			dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
+				ret);
+			goto free_table;
+		}
+	}
+
+	/* There should be one of more OPP defined */
+	if (WARN_ON(!count)) {
+		ret = -ENOENT;
+		goto put_opp_np;
+	}
+
+	dev_opp = _find_device_opp(dev);
+	if (WARN_ON(IS_ERR(dev_opp))) {
+		ret = PTR_ERR(dev_opp);
+		goto free_table;
+	}
+
+	dev_opp->np = opp_np;
+	dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+
+	of_node_put(opp_np);
+	return 0;
+
+free_table:
+	of_free_opp_table(dev);
+put_opp_np:
+	of_node_put(opp_np);
+
+	return ret;
+}
+
+/* Initializes OPP tables based on old-deprecated bindings */
+static int _of_init_opp_table_v1(struct device *dev)
 {
 	const struct property *prop;
 	const __be32 *val;
@@ -881,47 +1392,177 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(of_init_opp_table);
 
 /**
- * of_free_opp_table() - Free OPP table entries created from static DT entries
+ * of_init_opp_table() - Initialize opp table from device tree
  * @dev:	device pointer used to lookup device OPPs.
  *
- * Free OPPs created using static entries present in DT.
+ * Register the initial OPP table with the OPP library for given device.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
  * Hence this function indirectly uses RCU updater strategy with mutex locks
  * to keep the integrity of the internal data structures. Callers should ensure
  * that this function is *NOT* called under RCU protection or in contexts where
  * mutex cannot be locked.
+ *
+ * Return:
+ * 0		On success OR
+ *		Duplicate OPPs (both freq and volt are same) and opp->available
+ * -EEXIST	Freq are same and volt are different OR
+ *		Duplicate OPPs (both freq and volt are same) and !opp->available
+ * -ENOMEM	Memory allocation failure
+ * -ENODEV	when 'operating-points' property is not found or is invalid data
+ *		in device node.
+ * -ENODATA	when empty 'operating-points' property is found
+ * -EINVAL	when invalid entries are found in opp-v2 table
  */
-void of_free_opp_table(struct device *dev)
+int of_init_opp_table(struct device *dev)
 {
-	struct device_opp *dev_opp;
-	struct dev_pm_opp *opp, *tmp;
+	const struct property *prop;
 
-	/* Check for existing list for 'dev' */
-	dev_opp = _find_device_opp(dev);
-	if (IS_ERR(dev_opp)) {
-		int error = PTR_ERR(dev_opp);
-		if (error != -ENODEV)
-			WARN(1, "%s: dev_opp: %d\n",
-			     IS_ERR_OR_NULL(dev) ?
-					"Invalid device" : dev_name(dev),
-			     error);
-		return;
+	/*
+	 * OPPs have two version of bindings now. The older one is deprecated,
+	 * try for the new binding first.
+	 */
+	prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
+	if (!prop) {
+		/*
+		 * Try old-deprecated bindings for backward compatibility with
+		 * older dtbs.
+		 */
+		return _of_init_opp_table_v1(dev);
 	}
 
-	/* Hold our list modification lock here */
-	mutex_lock(&dev_opp_list_lock);
-
-	/* Free static OPPs */
-	list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
-		if (!opp->dynamic)
-			_opp_remove(dev_opp, opp);
-	}
-
-	mutex_unlock(&dev_opp_list_lock);
+	return _of_init_opp_table_v2(dev, prop);
 }
-EXPORT_SYMBOL_GPL(of_free_opp_table);
+EXPORT_SYMBOL_GPL(of_init_opp_table);
+
+int of_cpumask_init_opp_table(cpumask_var_t cpumask)
+{
+	struct device *cpu_dev;
+	int cpu, ret = 0;
+
+	WARN_ON(cpumask_empty(cpumask));
+
+	for_each_cpu(cpu, cpumask) {
+		cpu_dev = get_cpu_device(cpu);
+		if (!cpu_dev) {
+			pr_err("%s: failed to get cpu%d device\n", __func__,
+			       cpu);
+			continue;
+		}
+
+		ret = of_init_opp_table(cpu_dev);
+		if (ret) {
+			pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
+			       __func__, cpu, ret);
+
+			/* Free all other OPPs */
+			of_cpumask_free_opp_table(cpumask);
+			break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table);
+
+/* Required only for V1 bindings, as v2 can manage it from DT itself */
+int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+	struct device_list_opp *list_dev;
+	struct device_opp *dev_opp;
+	struct device *dev;
+	int cpu, ret = 0;
+
+	rcu_read_lock();
+
+	dev_opp = _find_device_opp(cpu_dev);
+	if (IS_ERR(dev_opp)) {
+		ret = -EINVAL;
+		goto out_rcu_read_unlock;
+	}
+
+	for_each_cpu(cpu, cpumask) {
+		if (cpu == cpu_dev->id)
+			continue;
+
+		dev = get_cpu_device(cpu);
+		if (!dev) {
+			dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+				__func__, cpu);
+			continue;
+		}
+
+		list_dev = _add_list_dev(dev, dev_opp);
+		if (!list_dev) {
+			dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
+				__func__, cpu);
+			continue;
+		}
+	}
+out_rcu_read_unlock:
+	rcu_read_unlock();
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(set_cpus_sharing_opps);
+
+/*
+ * Works only for OPP v2 bindings.
+ *
+ * cpumask should be already set to mask of cpu_dev->id.
+ * Returns -ENOENT if operating-points-v2 bindings aren't supported.
+ */
+int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+	struct device_node *np, *tmp_np;
+	struct device *tcpu_dev;
+	int cpu, ret = 0;
+
+	/* Get OPP descriptor node */
+	np = _of_get_opp_desc_node(cpu_dev);
+	if (IS_ERR(np)) {
+		dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__,
+			PTR_ERR(np));
+		return -ENOENT;
+	}
+
+	/* OPPs are shared ? */
+	if (!of_property_read_bool(np, "opp-shared"))
+		goto put_cpu_node;
+
+	for_each_possible_cpu(cpu) {
+		if (cpu == cpu_dev->id)
+			continue;
+
+		tcpu_dev = get_cpu_device(cpu);
+		if (!tcpu_dev) {
+			dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+				__func__, cpu);
+			ret = -ENODEV;
+			goto put_cpu_node;
+		}
+
+		/* Get OPP descriptor node */
+		tmp_np = _of_get_opp_desc_node(tcpu_dev);
+		if (IS_ERR(tmp_np)) {
+			dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n",
+				__func__, PTR_ERR(tmp_np));
+			ret = PTR_ERR(tmp_np);
+			goto put_cpu_node;
+		}
+
+		/* CPUs are sharing opp node */
+		if (np == tmp_np)
+			cpumask_set_cpu(cpu, cpumask);
+
+		of_node_put(tmp_np);
+	}
+
+put_cpu_node:
+	of_node_put(np);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps);
 #endif
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index f1a5d95..998fa6b 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -73,6 +73,8 @@
 extern void pm_qos_sysfs_remove_resume_latency(struct device *dev);
 extern int pm_qos_sysfs_add_flags(struct device *dev);
 extern void pm_qos_sysfs_remove_flags(struct device *dev);
+extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev);
+extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev);
 
 #else /* CONFIG_PM */
 
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index e56d538..7f3646e 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -883,3 +883,40 @@
 	mutex_unlock(&dev_pm_qos_mtx);
 	return ret;
 }
+
+/**
+ * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
+ * @dev: Device whose latency tolerance to expose
+ */
+int dev_pm_qos_expose_latency_tolerance(struct device *dev)
+{
+	int ret;
+
+	if (!dev->power.set_latency_tolerance)
+		return -EINVAL;
+
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
+	ret = pm_qos_sysfs_add_latency_tolerance(dev);
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
+
+/**
+ * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
+ * @dev: Device whose latency tolerance to hide
+ */
+void dev_pm_qos_hide_latency_tolerance(struct device *dev)
+{
+	mutex_lock(&dev_pm_qos_sysfs_mtx);
+	pm_qos_sysfs_remove_latency_tolerance(dev);
+	mutex_unlock(&dev_pm_qos_sysfs_mtx);
+
+	/* Remove the request from user space now */
+	pm_runtime_get_sync(dev);
+	dev_pm_qos_update_user_latency_tolerance(dev,
+		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
+	pm_runtime_put(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index d2be3f9..a7b4679 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -738,6 +738,17 @@
 	sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
 }
 
+int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
+{
+	return sysfs_merge_group(&dev->kobj,
+				 &pm_qos_latency_tolerance_attr_group);
+}
+
+void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
+{
+	sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
+}
+
 void rpm_sysfs_remove(struct device *dev)
 {
 	sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index f3f6d16..ff03f23 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -16,6 +16,8 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/property.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
 
 /**
  * device_add_property_set - Add a collection of properties to a device object.
@@ -27,9 +29,10 @@
  */
 void device_add_property_set(struct device *dev, struct property_set *pset)
 {
-	if (pset)
-		pset->fwnode.type = FWNODE_PDATA;
+	if (!pset)
+		return;
 
+	pset->fwnode.type = FWNODE_PDATA;
 	set_secondary_fwnode(dev, &pset->fwnode);
 }
 EXPORT_SYMBOL_GPL(device_add_property_set);
@@ -153,6 +156,7 @@
  *	   %-ENODATA if the property does not have a value,
  *	   %-EPROTO if the property is not an array of numbers,
  *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
  */
 int device_property_read_u8_array(struct device *dev, const char *propname,
 				  u8 *val, size_t nval)
@@ -177,6 +181,7 @@
  *	   %-ENODATA if the property does not have a value,
  *	   %-EPROTO if the property is not an array of numbers,
  *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
  */
 int device_property_read_u16_array(struct device *dev, const char *propname,
 				   u16 *val, size_t nval)
@@ -201,6 +206,7 @@
  *	   %-ENODATA if the property does not have a value,
  *	   %-EPROTO if the property is not an array of numbers,
  *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
  */
 int device_property_read_u32_array(struct device *dev, const char *propname,
 				   u32 *val, size_t nval)
@@ -225,6 +231,7 @@
  *	   %-ENODATA if the property does not have a value,
  *	   %-EPROTO if the property is not an array of numbers,
  *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
  */
 int device_property_read_u64_array(struct device *dev, const char *propname,
 				   u64 *val, size_t nval)
@@ -249,6 +256,7 @@
  *	   %-ENODATA if the property does not have a value,
  *	   %-EPROTO or %-EILSEQ if the property is not an array of strings,
  *	   %-EOVERFLOW if the size of the property is not as expected.
+ *	   %-ENXIO if no suitable firmware interface is present.
  */
 int device_property_read_string_array(struct device *dev, const char *propname,
 				      const char **val, size_t nval)
@@ -270,6 +278,7 @@
  *	   %-EINVAL if given arguments are not valid,
  *	   %-ENODATA if the property does not have a value,
  *	   %-EPROTO or %-EILSEQ if the property type is not a string.
+ *	   %-ENXIO if no suitable firmware interface is present.
  */
 int device_property_read_string(struct device *dev, const char *propname,
 				const char **val)
@@ -291,9 +300,11 @@
 	else if (is_acpi_node(_fwnode_)) \
 		_ret_ = acpi_dev_prop_read(to_acpi_node(_fwnode_), _propname_, \
 					   _proptype_, _val_, _nval_); \
-	else \
+	else if (is_pset(_fwnode_)) \
 		_ret_ = pset_prop_read_array(to_pset(_fwnode_), _propname_, \
 					     _proptype_, _val_, _nval_); \
+	else \
+		_ret_ = -ENXIO; \
 	_ret_; \
 })
 
@@ -431,9 +442,10 @@
 	else if (is_acpi_node(fwnode))
 		return acpi_dev_prop_read(to_acpi_node(fwnode), propname,
 					  DEV_PROP_STRING, val, nval);
-
-	return pset_prop_read_array(to_pset(fwnode), propname,
-				    DEV_PROP_STRING, val, nval);
+	else if (is_pset(fwnode))
+		return pset_prop_read_array(to_pset(fwnode), propname,
+					    DEV_PROP_STRING, val, nval);
+	return -ENXIO;
 }
 EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
 
@@ -461,7 +473,8 @@
 		return acpi_dev_prop_read(to_acpi_node(fwnode), propname,
 					  DEV_PROP_STRING, val, 1);
 
-	return -ENXIO;
+	return pset_prop_read_array(to_pset(fwnode), propname,
+				    DEV_PROP_STRING, val, 1);
 }
 EXPORT_SYMBOL_GPL(fwnode_property_read_string);
 
@@ -533,3 +546,79 @@
 	return coherent;
 }
 EXPORT_SYMBOL_GPL(device_dma_is_coherent);
+
+/**
+ * device_get_phy_mode - Get phy mode for given device
+ * @dev:	Pointer to the given device
+ *
+ * The function gets phy interface string from property 'phy-mode' or
+ * 'phy-connection-type', and return its index in phy_modes table, or errno in
+ * error case.
+ */
+int device_get_phy_mode(struct device *dev)
+{
+	const char *pm;
+	int err, i;
+
+	err = device_property_read_string(dev, "phy-mode", &pm);
+	if (err < 0)
+		err = device_property_read_string(dev,
+						  "phy-connection-type", &pm);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
+		if (!strcasecmp(pm, phy_modes(i)))
+			return i;
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(device_get_phy_mode);
+
+static void *device_get_mac_addr(struct device *dev,
+				 const char *name, char *addr,
+				 int alen)
+{
+	int ret = device_property_read_u8_array(dev, name, addr, alen);
+
+	if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
+		return addr;
+	return NULL;
+}
+
+/**
+ * device_get_mac_address - Get the MAC for a given device
+ * @dev:	Pointer to the device
+ * @addr:	Address of buffer to store the MAC in
+ * @alen:	Length of the buffer pointed to by addr, should be ETH_ALEN
+ *
+ * Search the firmware node for the best MAC address to use.  'mac-address' is
+ * checked first, because that is supposed to contain to "most recent" MAC
+ * address. If that isn't set, then 'local-mac-address' is checked next,
+ * because that is the default address.  If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree.
+ *
+ * Note that the 'address' property is supposed to contain a virtual address of
+ * the register set, but some DTS files have redefined that property to be the
+ * MAC address.
+ *
+ * All-zero MAC addresses are rejected, because those could be properties that
+ * exist in the firmware tables, but were not updated by the firmware.  For
+ * example, the DTS could define 'mac-address' and 'local-mac-address', with
+ * zero MAC addresses.  Some older U-Boots only initialized 'local-mac-address'.
+ * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
+ * exists but is all zeros.
+*/
+void *device_get_mac_address(struct device *dev, char *addr, int alen)
+{
+	addr = device_get_mac_addr(dev, "mac-address", addr, alen);
+	if (addr)
+		return addr;
+
+	addr = device_get_mac_addr(dev, "local-mac-address", addr, alen);
+	if (addr)
+		return addr;
+
+	return device_get_mac_addr(dev, "address", addr, alen);
+}
+EXPORT_SYMBOL(device_get_mac_address);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index b2b2849..873ddf9 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -136,7 +136,7 @@
 	/* if set, the HW registers are known to match map->reg_defaults */
 	bool no_sync_defaults;
 
-	struct reg_default *patch;
+	struct reg_sequence *patch;
 	int patch_regs;
 
 	/* if set, converts bulk rw to single rw */
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 7111d04..0a849ee 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -34,7 +34,7 @@
 
 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
 			       unsigned int mask, unsigned int val,
-			       bool *change);
+			       bool *change, bool force_write);
 
 static int _regmap_bus_reg_read(void *context, unsigned int reg,
 				unsigned int *val);
@@ -1178,7 +1178,7 @@
 		ret = _regmap_update_bits(map, range->selector_reg,
 					  range->selector_mask,
 					  win_page << range->selector_shift,
-					  &page_chg);
+					  &page_chg, false);
 
 		map->work_buf = orig_work_buf;
 
@@ -1624,6 +1624,18 @@
 }
 EXPORT_SYMBOL_GPL(regmap_fields_write);
 
+int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
+			unsigned int val)
+{
+	if (id >= field->id_size)
+		return -EINVAL;
+
+	return regmap_write_bits(field->regmap,
+				  field->reg + (field->id_offset * id),
+				  field->mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_force_write);
+
 /**
  * regmap_fields_update_bits():	Perform a read/modify/write cycle
  *                              on the register field
@@ -1743,7 +1755,7 @@
  * relative. The page register has been written if that was neccessary.
  */
 static int _regmap_raw_multi_reg_write(struct regmap *map,
-				       const struct reg_default *regs,
+				       const struct reg_sequence *regs,
 				       size_t num_regs)
 {
 	int ret;
@@ -1800,12 +1812,12 @@
 }
 
 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
-					       struct reg_default *regs,
+					       struct reg_sequence *regs,
 					       size_t num_regs)
 {
 	int ret;
 	int i, n;
-	struct reg_default *base;
+	struct reg_sequence *base;
 	unsigned int this_page = 0;
 	/*
 	 * the set of registers are not neccessarily in order, but
@@ -1843,7 +1855,7 @@
 }
 
 static int _regmap_multi_reg_write(struct regmap *map,
-				   const struct reg_default *regs,
+				   const struct reg_sequence *regs,
 				   size_t num_regs)
 {
 	int i;
@@ -1895,8 +1907,8 @@
 		struct regmap_range_node *range;
 		range = _regmap_range_lookup(map, reg);
 		if (range) {
-			size_t len = sizeof(struct reg_default)*num_regs;
-			struct reg_default *base = kmemdup(regs, len,
+			size_t len = sizeof(struct reg_sequence)*num_regs;
+			struct reg_sequence *base = kmemdup(regs, len,
 							   GFP_KERNEL);
 			if (!base)
 				return -ENOMEM;
@@ -1929,7 +1941,7 @@
  * A value of zero will be returned on success, a negative errno will be
  * returned in error cases.
  */
-int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
 			   int num_regs)
 {
 	int ret;
@@ -1962,7 +1974,7 @@
  * be returned in error cases.
  */
 int regmap_multi_reg_write_bypassed(struct regmap *map,
-				    const struct reg_default *regs,
+				    const struct reg_sequence *regs,
 				    int num_regs)
 {
 	int ret;
@@ -2327,7 +2339,7 @@
 
 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
 			       unsigned int mask, unsigned int val,
-			       bool *change)
+			       bool *change, bool force_write)
 {
 	int ret;
 	unsigned int tmp, orig;
@@ -2339,7 +2351,7 @@
 	tmp = orig & ~mask;
 	tmp |= val & mask;
 
-	if (tmp != orig) {
+	if (force_write || (tmp != orig)) {
 		ret = _regmap_write(map, reg, tmp);
 		if (change)
 			*change = true;
@@ -2367,7 +2379,7 @@
 	int ret;
 
 	map->lock(map->lock_arg);
-	ret = _regmap_update_bits(map, reg, mask, val, NULL);
+	ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
 	map->unlock(map->lock_arg);
 
 	return ret;
@@ -2375,6 +2387,29 @@
 EXPORT_SYMBOL_GPL(regmap_update_bits);
 
 /**
+ * regmap_write_bits: Perform a read/modify/write cycle on the register map
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_write_bits(struct regmap *map, unsigned int reg,
+		      unsigned int mask, unsigned int val)
+{
+	int ret;
+
+	map->lock(map->lock_arg);
+	ret = _regmap_update_bits(map, reg, mask, val, NULL, true);
+	map->unlock(map->lock_arg);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write_bits);
+
+/**
  * regmap_update_bits_async: Perform a read/modify/write cycle on the register
  *                           map asynchronously
  *
@@ -2398,7 +2433,7 @@
 
 	map->async = true;
 
-	ret = _regmap_update_bits(map, reg, mask, val, NULL);
+	ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
 
 	map->async = false;
 
@@ -2427,7 +2462,7 @@
 	int ret;
 
 	map->lock(map->lock_arg);
-	ret = _regmap_update_bits(map, reg, mask, val, change);
+	ret = _regmap_update_bits(map, reg, mask, val, change, false);
 	map->unlock(map->lock_arg);
 	return ret;
 }
@@ -2460,7 +2495,7 @@
 
 	map->async = true;
 
-	ret = _regmap_update_bits(map, reg, mask, val, change);
+	ret = _regmap_update_bits(map, reg, mask, val, change, false);
 
 	map->async = false;
 
@@ -2552,10 +2587,10 @@
  * The caller must ensure that this function cannot be called
  * concurrently with either itself or regcache_sync().
  */
-int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
 			  int num_regs)
 {
-	struct reg_default *p;
+	struct reg_sequence *p;
 	int ret;
 	bool bypass;
 
@@ -2564,7 +2599,7 @@
 		return 0;
 
 	p = krealloc(map->patch,
-		     sizeof(struct reg_default) * (map->patch_regs + num_regs),
+		     sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
 		     GFP_KERNEL);
 	if (p) {
 		memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index be5fffb..023d448 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -92,7 +92,7 @@
 config BCMA_DRIVER_GPIO
 	bool "BCMA GPIO driver"
 	depends on BCMA && GPIOLIB
-	select IRQ_DOMAIN if BCMA_HOST_SOC
+	select GPIOLIB_IRQCHIP if BCMA_HOST_SOC
 	help
 	  Driver to provide access to the GPIO pins of the bcma bus.
 
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 15f2b2e..38f1567 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -34,6 +34,7 @@
 int bcma_bus_suspend(struct bcma_bus *bus);
 int bcma_bus_resume(struct bcma_bus *bus);
 #endif
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus);
 
 /* scan.c */
 void bcma_detect_chip(struct bcma_bus *bus);
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 5f6018e..504899a7 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -8,10 +8,8 @@
  * Licensed under the GNU/GPL. See COPYING for details.
  */
 
-#include <linux/gpio.h>
-#include <linux/irq.h>
+#include <linux/gpio/driver.h>
 #include <linux/interrupt.h>
-#include <linux/irqdomain.h>
 #include <linux/export.h>
 #include <linux/bcma/bcma.h>
 
@@ -79,19 +77,11 @@
 }
 
 #if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
-static int bcma_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
-{
-	struct bcma_drv_cc *cc = bcma_gpio_get_cc(chip);
-
-	if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
-		return irq_find_mapping(cc->irq_domain, gpio);
-	else
-		return -EINVAL;
-}
 
 static void bcma_gpio_irq_unmask(struct irq_data *d)
 {
-	struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
 	int gpio = irqd_to_hwirq(d);
 	u32 val = bcma_chipco_gpio_in(cc, BIT(gpio));
 
@@ -101,7 +91,8 @@
 
 static void bcma_gpio_irq_mask(struct irq_data *d)
 {
-	struct bcma_drv_cc *cc = irq_data_get_irq_chip_data(d);
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct bcma_drv_cc *cc = bcma_gpio_get_cc(gc);
 	int gpio = irqd_to_hwirq(d);
 
 	bcma_chipco_gpio_intmask(cc, BIT(gpio), 0);
@@ -116,6 +107,7 @@
 static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
 {
 	struct bcma_drv_cc *cc = dev_id;
+	struct gpio_chip *gc = &cc->gpio;
 	u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN);
 	u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ);
 	u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL);
@@ -125,81 +117,58 @@
 	if (!irqs)
 		return IRQ_NONE;
 
-	for_each_set_bit(gpio, &irqs, cc->gpio.ngpio)
-		generic_handle_irq(bcma_gpio_to_irq(&cc->gpio, gpio));
+	for_each_set_bit(gpio, &irqs, gc->ngpio)
+		generic_handle_irq(irq_find_mapping(gc->irqdomain, gpio));
 	bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
 
 	return IRQ_HANDLED;
 }
 
-static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
 {
 	struct gpio_chip *chip = &cc->gpio;
-	int gpio, hwirq, err;
+	int hwirq, err;
 
 	if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
 		return 0;
 
-	cc->irq_domain = irq_domain_add_linear(NULL, chip->ngpio,
-					       &irq_domain_simple_ops, cc);
-	if (!cc->irq_domain) {
-		err = -ENODEV;
-		goto err_irq_domain;
-	}
-	for (gpio = 0; gpio < chip->ngpio; gpio++) {
-		int irq = irq_create_mapping(cc->irq_domain, gpio);
-
-		irq_set_chip_data(irq, cc);
-		irq_set_chip_and_handler(irq, &bcma_gpio_irq_chip,
-					 handle_simple_irq);
-	}
-
 	hwirq = bcma_core_irq(cc->core, 0);
 	err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio",
 			  cc);
 	if (err)
-		goto err_req_irq;
+		return err;
 
 	bcma_chipco_gpio_intmask(cc, ~0, 0);
 	bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
 
-	return 0;
-
-err_req_irq:
-	for (gpio = 0; gpio < chip->ngpio; gpio++) {
-		int irq = irq_find_mapping(cc->irq_domain, gpio);
-
-		irq_dispose_mapping(irq);
+	err =  gpiochip_irqchip_add(chip,
+				    &bcma_gpio_irq_chip,
+				    0,
+				    handle_simple_irq,
+				    IRQ_TYPE_NONE);
+	if (err) {
+		free_irq(hwirq, cc);
+		return err;
 	}
-	irq_domain_remove(cc->irq_domain);
-err_irq_domain:
-	return err;
+
+	return 0;
 }
 
-static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
 {
-	struct gpio_chip *chip = &cc->gpio;
-	int gpio;
-
 	if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
 		return;
 
 	bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO);
 	free_irq(bcma_core_irq(cc->core, 0), cc);
-	for (gpio = 0; gpio < chip->ngpio; gpio++) {
-		int irq = irq_find_mapping(cc->irq_domain, gpio);
-
-		irq_dispose_mapping(irq);
-	}
-	irq_domain_remove(cc->irq_domain);
 }
 #else
-static int bcma_gpio_irq_domain_init(struct bcma_drv_cc *cc)
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
 {
 	return 0;
 }
 
-static void bcma_gpio_irq_domain_exit(struct bcma_drv_cc *cc)
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
 {
 }
 #endif
@@ -218,9 +187,8 @@
 	chip->set		= bcma_gpio_set_value;
 	chip->direction_input	= bcma_gpio_direction_input;
 	chip->direction_output	= bcma_gpio_direction_output;
-#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
-	chip->to_irq		= bcma_gpio_to_irq;
-#endif
+	chip->owner		= THIS_MODULE;
+	chip->dev		= bcma_bus_get_host_dev(bus);
 #if IS_BUILTIN(CONFIG_OF)
 	if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
 		chip->of_node	= cc->core->dev.of_node;
@@ -248,13 +216,13 @@
 	else
 		chip->base		= -1;
 
-	err = bcma_gpio_irq_domain_init(cc);
+	err = gpiochip_add(chip);
 	if (err)
 		return err;
 
-	err = gpiochip_add(chip);
+	err = bcma_gpio_irq_init(cc);
 	if (err) {
-		bcma_gpio_irq_domain_exit(cc);
+		gpiochip_remove(chip);
 		return err;
 	}
 
@@ -263,7 +231,7 @@
 
 int bcma_gpio_unregister(struct bcma_drv_cc *cc)
 {
-	bcma_gpio_irq_domain_exit(cc);
+	bcma_gpio_irq_exit(cc);
 	gpiochip_remove(&cc->gpio);
 	return 0;
 }
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 9635f10..24882c1 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -7,11 +7,14 @@
 
 #include "bcma_private.h"
 #include <linux/module.h>
+#include <linux/mmc/sdio_func.h>
 #include <linux/platform_device.h>
+#include <linux/pci.h>
 #include <linux/bcma/bcma.h>
 #include <linux/slab.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/of_platform.h>
 
 MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
 MODULE_LICENSE("GPL");
@@ -268,6 +271,28 @@
 	}
 }
 
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
+{
+	switch (bus->hosttype) {
+	case BCMA_HOSTTYPE_PCI:
+		if (bus->host_pci)
+			return &bus->host_pci->dev;
+		else
+			return NULL;
+	case BCMA_HOSTTYPE_SOC:
+		if (bus->host_pdev)
+			return &bus->host_pdev->dev;
+		else
+			return NULL;
+	case BCMA_HOSTTYPE_SDIO:
+		if (bus->host_sdio)
+			return &bus->host_sdio->dev;
+		else
+			return NULL;
+	}
+	return NULL;
+}
+
 void bcma_init_bus(struct bcma_bus *bus)
 {
 	mutex_lock(&bcma_buses_mutex);
@@ -387,6 +412,7 @@
 {
 	int err;
 	struct bcma_device *core;
+	struct device *dev;
 
 	/* Scan for devices (cores) */
 	err = bcma_bus_scan(bus);
@@ -409,6 +435,16 @@
 		bcma_core_pci_early_init(&bus->drv_pci[0]);
 	}
 
+	dev = bcma_bus_get_host_dev(bus);
+	/* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when
+	 * of_default_bus_match_table is exported or in some other way
+	 * accessible. This is just a temporary workaround.
+	 */
+	if (IS_BUILTIN(CONFIG_BCMA) && dev) {
+		of_platform_populate(dev->of_node, of_default_bus_match_table,
+				     NULL, dev);
+	}
+
 	/* Cores providing flash access go before SPROM init */
 	list_for_each_entry(core, &bus->cores, list) {
 		if (bcma_is_core_needed_early(core->id.id))
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 46c282f..dd73e1f 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -395,7 +395,7 @@
 	WARN_ON(d->flags & DEVFL_TKILL);
 	WARN_ON(d->gd);
 	WARN_ON(d->flags & DEVFL_UP);
-	blk_queue_max_hw_sectors(q, 1024);
+	blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
 	q->backing_dev_info.name = "aoe";
 	q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
 	d->bufpool = mp;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 422b7d8..ad80c85 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1110,7 +1110,7 @@
 		d->ip.rq = NULL;
 	do {
 		bio = rq->bio;
-		bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
+		bok = !fastfail && !bio->bi_error;
 	} while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
 
 	/* cf. http://lkml.org/lkml/2006/10/31/28 */
@@ -1172,7 +1172,7 @@
 			ahout->cmdstat, ahin->cmdstat,
 			d->aoemajor, d->aoeminor);
 noskb:		if (buf)
-			clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+			buf->bio->bi_error = -EIO;
 		goto out;
 	}
 
@@ -1185,7 +1185,7 @@
 				"aoe: runt data size in read from",
 				(long) d->aoemajor, d->aoeminor,
 			       skb->len, n);
-			clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+			buf->bio->bi_error = -EIO;
 			break;
 		}
 		if (n > f->iter.bi_size) {
@@ -1193,7 +1193,7 @@
 				"aoe: too-large data size in read from",
 				(long) d->aoemajor, d->aoeminor,
 				n, f->iter.bi_size);
-			clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+			buf->bio->bi_error = -EIO;
 			break;
 		}
 		bvcpy(skb, f->buf->bio, f->iter, n);
@@ -1695,7 +1695,7 @@
 	if (buf == NULL)
 		return;
 	buf->iter.bi_size = 0;
-	clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
+	buf->bio->bi_error = -EIO;
 	if (buf->nframesout == 0)
 		aoe_end_buf(d, buf);
 }
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index e774c50..ffd1947 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -170,7 +170,7 @@
 	if (rq == NULL)
 		return;
 	while ((bio = d->ip.nxbio)) {
-		clear_bit(BIO_UPTODATE, &bio->bi_flags);
+		bio->bi_error = -EIO;
 		d->ip.nxbio = bio->bi_next;
 		n = (unsigned long) rq->special;
 		rq->special = (void *) --n;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 64ab495..f9ab745 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -331,14 +331,12 @@
 	struct bio_vec bvec;
 	sector_t sector;
 	struct bvec_iter iter;
-	int err = -EIO;
 
 	sector = bio->bi_iter.bi_sector;
 	if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
-		goto out;
+		goto io_error;
 
 	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
-		err = 0;
 		discard_from_brd(brd, sector, bio->bi_iter.bi_size);
 		goto out;
 	}
@@ -349,15 +347,20 @@
 
 	bio_for_each_segment(bvec, bio, iter) {
 		unsigned int len = bvec.bv_len;
+		int err;
+
 		err = brd_do_bvec(brd, bvec.bv_page, len,
 					bvec.bv_offset, rw, sector);
 		if (err)
-			break;
+			goto io_error;
 		sector += len >> SECTOR_SHIFT;
 	}
 
 out:
-	bio_endio(bio, err);
+	bio_endio(bio);
+	return;
+io_error:
+	bio_io_error(bio);
 }
 
 static int brd_rw_page(struct block_device *bdev, sector_t sector,
@@ -500,7 +503,7 @@
 	blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
 
 	brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
-	brd->brd_queue->limits.max_discard_sectors = UINT_MAX;
+	blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
 	brd->brd_queue->limits.discard_zeroes_data = 1;
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
 
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 1318e32..b3868e7 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -175,11 +175,11 @@
 	atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */
 	device->md_io.submit_jif = jiffies;
 	if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
-		bio_endio(bio, -EIO);
+		bio_io_error(bio);
 	else
 		submit_bio(rw, bio);
 	wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
-	if (bio_flagged(bio, BIO_UPTODATE))
+	if (!bio->bi_error)
 		err = device->md_io.error;
 
  out:
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 434c77d..e5e0f19 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -941,36 +941,27 @@
 }
 
 /* bv_page may be a copy, or may be the original */
-static void drbd_bm_endio(struct bio *bio, int error)
+static void drbd_bm_endio(struct bio *bio)
 {
 	struct drbd_bm_aio_ctx *ctx = bio->bi_private;
 	struct drbd_device *device = ctx->device;
 	struct drbd_bitmap *b = device->bitmap;
 	unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
-	int uptodate = bio_flagged(bio, BIO_UPTODATE);
-
-
-	/* strange behavior of some lower level drivers...
-	 * fail the request by clearing the uptodate flag,
-	 * but do not return any error?!
-	 * do we want to WARN() on this? */
-	if (!error && !uptodate)
-		error = -EIO;
 
 	if ((ctx->flags & BM_AIO_COPY_PAGES) == 0 &&
 	    !bm_test_page_unchanged(b->bm_pages[idx]))
 		drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
 
-	if (error) {
+	if (bio->bi_error) {
 		/* ctx error will hold the completed-last non-zero error code,
 		 * in case error codes differ. */
-		ctx->error = error;
+		ctx->error = bio->bi_error;
 		bm_set_page_io_err(b->bm_pages[idx]);
 		/* Not identical to on disk version of it.
 		 * Is BM_PAGE_IO_ERROR enough? */
 		if (__ratelimit(&drbd_ratelimit_state))
 			drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
-					error, idx);
+					bio->bi_error, idx);
 	} else {
 		bm_clear_page_io_err(b->bm_pages[idx]);
 		dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
@@ -1031,7 +1022,7 @@
 
 	if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
 		bio->bi_rw |= rw;
-		bio_endio(bio, -EIO);
+		bio_io_error(bio);
 	} else {
 		submit_bio(rw, bio);
 		/* this should not count as user activity and cause the
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index efd19c2..015c6e9 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1450,7 +1450,6 @@
 extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
 extern void drbd_make_request(struct request_queue *q, struct bio *bio);
 extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
-extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
 extern int is_valid_ar_handle(struct drbd_request *, sector_t);
 
 
@@ -1481,9 +1480,9 @@
 
 /* drbd_worker.c */
 /* bi_end_io handlers */
-extern void drbd_md_endio(struct bio *bio, int error);
-extern void drbd_peer_request_endio(struct bio *bio, int error);
-extern void drbd_request_endio(struct bio *bio, int error);
+extern void drbd_md_endio(struct bio *bio);
+extern void drbd_peer_request_endio(struct bio *bio);
+extern void drbd_request_endio(struct bio *bio);
 extern int drbd_worker(struct drbd_thread *thi);
 enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
 void drbd_resync_after_changed(struct drbd_device *device);
@@ -1604,12 +1603,13 @@
 	__release(local);
 	if (!bio->bi_bdev) {
 		drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
-		bio_endio(bio, -ENODEV);
+		bio->bi_error = -ENODEV;
+		bio_endio(bio);
 		return;
 	}
 
 	if (drbd_insert_fault(device, fault_type))
-		bio_endio(bio, -EIO);
+		bio_io_error(bio);
 	else
 		generic_make_request(bio);
 }
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index a151853..74d97f4 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2774,7 +2774,6 @@
 	   This triggers a max_bio_size message upon first attach or connect */
 	blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
 	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
-	blk_queue_merge_bvec(q, drbd_merge_bvec);
 	q->queue_lock = &resource->req_lock;
 
 	device->md_io.page = alloc_page(GFP_KERNEL);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 74df8cf..e80cbef 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1156,14 +1156,14 @@
 			/* For now, don't allow more than one activity log extent worth of data
 			 * to be discarded in one go. We may need to rework drbd_al_begin_io()
 			 * to allow for even larger discard ranges */
-			q->limits.max_discard_sectors = DRBD_MAX_DISCARD_SECTORS;
+			blk_queue_max_discard_sectors(q, DRBD_MAX_DISCARD_SECTORS);
 
 			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 			/* REALLY? Is stacking secdiscard "legal"? */
 			if (blk_queue_secdiscard(b))
 				queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
 		} else {
-			q->limits.max_discard_sectors = 0;
+			blk_queue_max_discard_sectors(q, 0);
 			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
 			queue_flag_clear_unlocked(QUEUE_FLAG_SECDISCARD, q);
 		}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 3907202..2115926 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -201,7 +201,8 @@
 void complete_master_bio(struct drbd_device *device,
 		struct bio_and_error *m)
 {
-	bio_endio(m->bio, m->error);
+	m->bio->bi_error = m->error;
+	bio_endio(m->bio);
 	dec_ap_bio(device);
 }
 
@@ -1153,12 +1154,12 @@
 				      rw == WRITE ? DRBD_FAULT_DT_WR
 				    : rw == READ  ? DRBD_FAULT_DT_RD
 				    :               DRBD_FAULT_DT_RA))
-			bio_endio(bio, -EIO);
+			bio_io_error(bio);
 		else
 			generic_make_request(bio);
 		put_ldev(device);
 	} else
-		bio_endio(bio, -EIO);
+		bio_io_error(bio);
 }
 
 static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
@@ -1191,7 +1192,8 @@
 		/* only pass the error to the upper layers.
 		 * if user cannot handle io errors, that's not our business. */
 		drbd_err(device, "could not kmalloc() req\n");
-		bio_endio(bio, -ENOMEM);
+		bio->bi_error = -ENOMEM;
+		bio_endio(bio);
 		return ERR_PTR(-ENOMEM);
 	}
 	req->start_jif = start_jif;
@@ -1497,6 +1499,8 @@
 	struct drbd_device *device = (struct drbd_device *) q->queuedata;
 	unsigned long start_jif;
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	start_jif = jiffies;
 
 	/*
@@ -1508,41 +1512,6 @@
 	__drbd_make_request(device, bio, start_jif);
 }
 
-/* This is called by bio_add_page().
- *
- * q->max_hw_sectors and other global limits are already enforced there.
- *
- * We need to call down to our lower level device,
- * in case it has special restrictions.
- *
- * We also may need to enforce configured max-bio-bvecs limits.
- *
- * As long as the BIO is empty we have to allow at least one bvec,
- * regardless of size and offset, so no need to ask lower levels.
- */
-int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
-{
-	struct drbd_device *device = (struct drbd_device *) q->queuedata;
-	unsigned int bio_size = bvm->bi_size;
-	int limit = DRBD_MAX_BIO_SIZE;
-	int backing_limit;
-
-	if (bio_size && get_ldev(device)) {
-		unsigned int max_hw_sectors = queue_max_hw_sectors(q);
-		struct request_queue * const b =
-			device->ldev->backing_bdev->bd_disk->queue;
-		if (b->merge_bvec_fn) {
-			bvm->bi_bdev = device->ldev->backing_bdev;
-			backing_limit = b->merge_bvec_fn(b, bvm, bvec);
-			limit = min(limit, backing_limit);
-		}
-		put_ldev(device);
-		if ((limit >> 9) > max_hw_sectors)
-			limit = max_hw_sectors << 9;
-	}
-	return limit;
-}
-
 void request_timer_fn(unsigned long data)
 {
 	struct drbd_device *device = (struct drbd_device *) data;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index d0fae55..5578c14 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -65,12 +65,12 @@
 /* used for synchronous meta data and bitmap IO
  * submitted by drbd_md_sync_page_io()
  */
-void drbd_md_endio(struct bio *bio, int error)
+void drbd_md_endio(struct bio *bio)
 {
 	struct drbd_device *device;
 
 	device = bio->bi_private;
-	device->md_io.error = error;
+	device->md_io.error = bio->bi_error;
 
 	/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
 	 * to timeout on the lower level device, and eventually detach from it.
@@ -170,31 +170,20 @@
 /* writes on behalf of the partner, or resync writes,
  * "submitted" by the receiver.
  */
-void drbd_peer_request_endio(struct bio *bio, int error)
+void drbd_peer_request_endio(struct bio *bio)
 {
 	struct drbd_peer_request *peer_req = bio->bi_private;
 	struct drbd_device *device = peer_req->peer_device->device;
-	int uptodate = bio_flagged(bio, BIO_UPTODATE);
 	int is_write = bio_data_dir(bio) == WRITE;
 	int is_discard = !!(bio->bi_rw & REQ_DISCARD);
 
-	if (error && __ratelimit(&drbd_ratelimit_state))
+	if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
 		drbd_warn(device, "%s: error=%d s=%llus\n",
 				is_write ? (is_discard ? "discard" : "write")
-					: "read", error,
+					: "read", bio->bi_error,
 				(unsigned long long)peer_req->i.sector);
-	if (!error && !uptodate) {
-		if (__ratelimit(&drbd_ratelimit_state))
-			drbd_warn(device, "%s: setting error to -EIO s=%llus\n",
-					is_write ? "write" : "read",
-					(unsigned long long)peer_req->i.sector);
-		/* strange behavior of some lower level drivers...
-		 * fail the request by clearing the uptodate flag,
-		 * but do not return any error?! */
-		error = -EIO;
-	}
 
-	if (error)
+	if (bio->bi_error)
 		set_bit(__EE_WAS_ERROR, &peer_req->flags);
 
 	bio_put(bio); /* no need for the bio anymore */
@@ -208,24 +197,13 @@
 
 /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
  */
-void drbd_request_endio(struct bio *bio, int error)
+void drbd_request_endio(struct bio *bio)
 {
 	unsigned long flags;
 	struct drbd_request *req = bio->bi_private;
 	struct drbd_device *device = req->device;
 	struct bio_and_error m;
 	enum drbd_req_event what;
-	int uptodate = bio_flagged(bio, BIO_UPTODATE);
-
-	if (!error && !uptodate) {
-		drbd_warn(device, "p %s: setting error to -EIO\n",
-			 bio_data_dir(bio) == WRITE ? "write" : "read");
-		/* strange behavior of some lower level drivers...
-		 * fail the request by clearing the uptodate flag,
-		 * but do not return any error?! */
-		error = -EIO;
-	}
-
 
 	/* If this request was aborted locally before,
 	 * but now was completed "successfully",
@@ -259,14 +237,14 @@
 		if (__ratelimit(&drbd_ratelimit_state))
 			drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
 
-		if (!error)
+		if (!bio->bi_error)
 			panic("possible random memory corruption caused by delayed completion of aborted local request\n");
 	}
 
 	/* to avoid recursion in __req_mod */
-	if (unlikely(error)) {
+	if (unlikely(bio->bi_error)) {
 		if (bio->bi_rw & REQ_DISCARD)
-			what = (error == -EOPNOTSUPP)
+			what = (bio->bi_error == -EOPNOTSUPP)
 				? DISCARD_COMPLETED_NOTSUPP
 				: DISCARD_COMPLETED_WITH_ERROR;
 		else
@@ -279,7 +257,7 @@
 		what = COMPLETED_OK;
 
 	bio_put(req->private_bio);
-	req->private_bio = ERR_PTR(error);
+	req->private_bio = ERR_PTR(bio->bi_error);
 
 	/* not req_mod(), we need irqsave here! */
 	spin_lock_irqsave(&device->resource->req_lock, flags);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index a08cda9..331363e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3771,13 +3771,14 @@
 	struct completion complete;
 };
 
-static void floppy_rb0_cb(struct bio *bio, int err)
+static void floppy_rb0_cb(struct bio *bio)
 {
 	struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
 	int drive = cbdata->drive;
 
-	if (err) {
-		pr_info("floppy: error %d while reading block 0\n", err);
+	if (bio->bi_error) {
+		pr_info("floppy: error %d while reading block 0\n",
+			bio->bi_error);
 		set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
 	}
 	complete(&cbdata->complete);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f7a4c9d..f9889b6 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -675,7 +675,7 @@
 	    lo->lo_encrypt_key_size) {
 		q->limits.discard_granularity = 0;
 		q->limits.discard_alignment = 0;
-		q->limits.max_discard_sectors = 0;
+		blk_queue_max_discard_sectors(q, 0);
 		q->limits.discard_zeroes_data = 0;
 		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
 		return;
@@ -683,7 +683,7 @@
 
 	q->limits.discard_granularity = inode->i_sb->s_blocksize;
 	q->limits.discard_alignment = 0;
-	q->limits.max_discard_sectors = UINT_MAX >> 9;
+	blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
 	q->limits.discard_zeroes_data = 1;
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 }
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 4a2ef09..f504232 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3756,6 +3756,14 @@
 	struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
 	u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
 
+	/*
+	 * For flush requests, request_idx starts at the end of the
+	 * tag space.  Since we don't support FLUSH/FUA, simply return
+	 * 0 as there's nothing to be done.
+	 */
+	if (request_idx >= MTIP_MAX_COMMAND_SLOTS)
+		return 0;
+
 	cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
 			&cmd->command_dma, GFP_KERNEL);
 	if (!cmd->command)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 0e385d8..293495a 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -33,6 +33,7 @@
 #include <linux/net.h>
 #include <linux/kthread.h>
 #include <linux/types.h>
+#include <linux/debugfs.h>
 
 #include <asm/uaccess.h>
 #include <asm/types.h>
@@ -40,8 +41,7 @@
 #include <linux/nbd.h>
 
 struct nbd_device {
-	int flags;
-	int harderror;		/* Code of hard error			*/
+	u32 flags;
 	struct socket * sock;	/* If == NULL, device is not ready, yet	*/
 	int magic;
 
@@ -56,11 +56,24 @@
 	struct gendisk *disk;
 	int blksize;
 	loff_t bytesize;
-	pid_t pid; /* pid of nbd-client, if attached */
 	int xmit_timeout;
-	int disconnect; /* a disconnect has been requested by user */
+	bool disconnect; /* a disconnect has been requested by user */
+
+	struct timer_list timeout_timer;
+	struct task_struct *task_recv;
+	struct task_struct *task_send;
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+	struct dentry *dbg_dir;
+#endif
 };
 
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static struct dentry *nbd_dbg_dir;
+#endif
+
+#define nbd_name(nbd) ((nbd)->disk->disk_name)
+
 #define NBD_MAGIC 0x68797548
 
 static unsigned int nbds_max = 16;
@@ -113,26 +126,36 @@
 /*
  * Forcibly shutdown the socket causing all listeners to error
  */
-static void sock_shutdown(struct nbd_device *nbd, int lock)
+static void sock_shutdown(struct nbd_device *nbd)
 {
-	if (lock)
-		mutex_lock(&nbd->tx_lock);
-	if (nbd->sock) {
-		dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
-		kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
-		nbd->sock = NULL;
-	}
-	if (lock)
-		mutex_unlock(&nbd->tx_lock);
+	if (!nbd->sock)
+		return;
+
+	dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n");
+	kernel_sock_shutdown(nbd->sock, SHUT_RDWR);
+	nbd->sock = NULL;
+	del_timer_sync(&nbd->timeout_timer);
 }
 
 static void nbd_xmit_timeout(unsigned long arg)
 {
-	struct task_struct *task = (struct task_struct *)arg;
+	struct nbd_device *nbd = (struct nbd_device *)arg;
+	struct task_struct *task;
 
-	printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n",
-		task->comm, task->pid);
-	force_sig(SIGKILL, task);
+	if (list_empty(&nbd->queue_head))
+		return;
+
+	nbd->disconnect = true;
+
+	task = READ_ONCE(nbd->task_recv);
+	if (task)
+		force_sig(SIGKILL, task);
+
+	task = READ_ONCE(nbd->task_send);
+	if (task)
+		force_sig(SIGKILL, nbd->task_send);
+
+	dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n");
 }
 
 /*
@@ -171,33 +194,12 @@
 		msg.msg_controllen = 0;
 		msg.msg_flags = msg_flags | MSG_NOSIGNAL;
 
-		if (send) {
-			struct timer_list ti;
-
-			if (nbd->xmit_timeout) {
-				init_timer(&ti);
-				ti.function = nbd_xmit_timeout;
-				ti.data = (unsigned long)current;
-				ti.expires = jiffies + nbd->xmit_timeout;
-				add_timer(&ti);
-			}
+		if (send)
 			result = kernel_sendmsg(sock, &msg, &iov, 1, size);
-			if (nbd->xmit_timeout)
-				del_timer_sync(&ti);
-		} else
+		else
 			result = kernel_recvmsg(sock, &msg, &iov, 1, size,
 						msg.msg_flags);
 
-		if (signal_pending(current)) {
-			siginfo_t info;
-			printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
-				task_pid_nr(current), current->comm,
-				dequeue_signal_lock(current, &current->blocked, &info));
-			result = -EINTR;
-			sock_shutdown(nbd, !send);
-			break;
-		}
-
 		if (result <= 0) {
 			if (result == 0)
 				result = -EPIPE; /* short read */
@@ -210,6 +212,9 @@
 	sigprocmask(SIG_SETMASK, &oldset, NULL);
 	tsk_restore_flags(current, pflags, PF_MEMALLOC);
 
+	if (!send && nbd->xmit_timeout)
+		mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
+
 	return result;
 }
 
@@ -333,26 +338,24 @@
 	if (result <= 0) {
 		dev_err(disk_to_dev(nbd->disk),
 			"Receive control failed (result %d)\n", result);
-		goto harderror;
+		return ERR_PTR(result);
 	}
 
 	if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
 		dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
 				(unsigned long)ntohl(reply.magic));
-		result = -EPROTO;
-		goto harderror;
+		return ERR_PTR(-EPROTO);
 	}
 
 	req = nbd_find_request(nbd, *(struct request **)reply.handle);
 	if (IS_ERR(req)) {
 		result = PTR_ERR(req);
 		if (result != -ENOENT)
-			goto harderror;
+			return ERR_PTR(result);
 
 		dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n",
 			reply.handle);
-		result = -EBADR;
-		goto harderror;
+		return ERR_PTR(-EBADR);
 	}
 
 	if (ntohl(reply.error)) {
@@ -380,18 +383,15 @@
 		}
 	}
 	return req;
-harderror:
-	nbd->harderror = result;
-	return NULL;
 }
 
 static ssize_t pid_show(struct device *dev,
 			struct device_attribute *attr, char *buf)
 {
 	struct gendisk *disk = dev_to_disk(dev);
+	struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
 
-	return sprintf(buf, "%ld\n",
-		(long) ((struct nbd_device *)disk->private_data)->pid);
+	return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
 }
 
 static struct device_attribute pid_attr = {
@@ -399,7 +399,7 @@
 	.show = pid_show,
 };
 
-static int nbd_do_it(struct nbd_device *nbd)
+static int nbd_thread_recv(struct nbd_device *nbd)
 {
 	struct request *req;
 	int ret;
@@ -407,20 +407,43 @@
 	BUG_ON(nbd->magic != NBD_MAGIC);
 
 	sk_set_memalloc(nbd->sock->sk);
-	nbd->pid = task_pid_nr(current);
+
+	nbd->task_recv = current;
+
 	ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
 	if (ret) {
 		dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n");
-		nbd->pid = 0;
+		nbd->task_recv = NULL;
 		return ret;
 	}
 
-	while ((req = nbd_read_stat(nbd)) != NULL)
+	while (1) {
+		req = nbd_read_stat(nbd);
+		if (IS_ERR(req)) {
+			ret = PTR_ERR(req);
+			break;
+		}
+
 		nbd_end_request(nbd, req);
+	}
 
 	device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
-	nbd->pid = 0;
-	return 0;
+
+	nbd->task_recv = NULL;
+
+	if (signal_pending(current)) {
+		siginfo_t info;
+
+		ret = dequeue_signal_lock(current, &current->blocked, &info);
+		dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
+			 task_pid_nr(current), current->comm, ret);
+		mutex_lock(&nbd->tx_lock);
+		sock_shutdown(nbd);
+		mutex_unlock(&nbd->tx_lock);
+		ret = -ETIMEDOUT;
+	}
+
+	return ret;
 }
 
 static void nbd_clear_que(struct nbd_device *nbd)
@@ -455,6 +478,7 @@
 		req->errors++;
 		nbd_end_request(nbd, req);
 	}
+	dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
 }
 
 
@@ -482,6 +506,9 @@
 
 	nbd->active_req = req;
 
+	if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head))
+		mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout);
+
 	if (nbd_send_req(nbd, req) != 0) {
 		dev_err(disk_to_dev(nbd->disk), "Request send failed\n");
 		req->errors++;
@@ -503,11 +530,13 @@
 	nbd_end_request(nbd, req);
 }
 
-static int nbd_thread(void *data)
+static int nbd_thread_send(void *data)
 {
 	struct nbd_device *nbd = data;
 	struct request *req;
 
+	nbd->task_send = current;
+
 	set_user_nice(current, MIN_NICE);
 	while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) {
 		/* wait for something to do */
@@ -515,6 +544,20 @@
 					 kthread_should_stop() ||
 					 !list_empty(&nbd->waiting_queue));
 
+		if (signal_pending(current)) {
+			siginfo_t info;
+			int ret;
+
+			ret = dequeue_signal_lock(current, &current->blocked,
+						  &info);
+			dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
+				 task_pid_nr(current), current->comm, ret);
+			mutex_lock(&nbd->tx_lock);
+			sock_shutdown(nbd);
+			mutex_unlock(&nbd->tx_lock);
+			break;
+		}
+
 		/* extract request */
 		if (list_empty(&nbd->waiting_queue))
 			continue;
@@ -528,6 +571,9 @@
 		/* handle request */
 		nbd_handle_req(nbd, req);
 	}
+
+	nbd->task_send = NULL;
+
 	return 0;
 }
 
@@ -538,7 +584,7 @@
  *   { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
  */
 
-static void do_nbd_request(struct request_queue *q)
+static void nbd_request_handler(struct request_queue *q)
 		__releases(q->queue_lock) __acquires(q->queue_lock)
 {
 	struct request *req;
@@ -574,6 +620,9 @@
 	}
 }
 
+static int nbd_dev_dbg_init(struct nbd_device *nbd);
+static void nbd_dev_dbg_close(struct nbd_device *nbd);
+
 /* Must be called with tx_lock held */
 
 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
@@ -597,7 +646,7 @@
 		if (!nbd->sock)
 			return -EINVAL;
 
-		nbd->disconnect = 1;
+		nbd->disconnect = true;
 
 		nbd_send_req(nbd, &sreq);
 		return 0;
@@ -625,7 +674,7 @@
 			nbd->sock = sock;
 			if (max_part > 0)
 				bdev->bd_invalidated = 1;
-			nbd->disconnect = 0; /* we're connected now */
+			nbd->disconnect = false; /* we're connected now */
 			return 0;
 		}
 		return -EINVAL;
@@ -648,6 +697,12 @@
 
 	case NBD_SET_TIMEOUT:
 		nbd->xmit_timeout = arg * HZ;
+		if (arg)
+			mod_timer(&nbd->timeout_timer,
+				  jiffies + nbd->xmit_timeout);
+		else
+			del_timer_sync(&nbd->timeout_timer);
+
 		return 0;
 
 	case NBD_SET_FLAGS:
@@ -666,7 +721,7 @@
 		struct socket *sock;
 		int error;
 
-		if (nbd->pid)
+		if (nbd->task_recv)
 			return -EBUSY;
 		if (!nbd->sock)
 			return -EINVAL;
@@ -683,24 +738,24 @@
 		else
 			blk_queue_flush(nbd->disk->queue, 0);
 
-		thread = kthread_run(nbd_thread, nbd, "%s",
-				     nbd->disk->disk_name);
+		thread = kthread_run(nbd_thread_send, nbd, "%s",
+				     nbd_name(nbd));
 		if (IS_ERR(thread)) {
 			mutex_lock(&nbd->tx_lock);
 			return PTR_ERR(thread);
 		}
 
-		error = nbd_do_it(nbd);
+		nbd_dev_dbg_init(nbd);
+		error = nbd_thread_recv(nbd);
+		nbd_dev_dbg_close(nbd);
 		kthread_stop(thread);
 
 		mutex_lock(&nbd->tx_lock);
-		if (error)
-			return error;
-		sock_shutdown(nbd, 0);
+
+		sock_shutdown(nbd);
 		sock = nbd->sock;
 		nbd->sock = NULL;
 		nbd_clear_que(nbd);
-		dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
 		kill_bdev(bdev);
 		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
 		set_device_ro(bdev, false);
@@ -714,7 +769,7 @@
 			blkdev_reread_part(bdev);
 		if (nbd->disconnect) /* user requested, ignore socket errors */
 			return 0;
-		return nbd->harderror;
+		return error;
 	}
 
 	case NBD_CLEAR_QUE:
@@ -758,6 +813,161 @@
 	.ioctl =	nbd_ioctl,
 };
 
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+
+static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
+{
+	struct nbd_device *nbd = s->private;
+
+	if (nbd->task_recv)
+		seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
+	if (nbd->task_send)
+		seq_printf(s, "send: %d\n", task_pid_nr(nbd->task_send));
+
+	return 0;
+}
+
+static int nbd_dbg_tasks_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, nbd_dbg_tasks_show, inode->i_private);
+}
+
+static const struct file_operations nbd_dbg_tasks_ops = {
+	.open = nbd_dbg_tasks_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
+{
+	struct nbd_device *nbd = s->private;
+	u32 flags = nbd->flags;
+
+	seq_printf(s, "Hex: 0x%08x\n\n", flags);
+
+	seq_puts(s, "Known flags:\n");
+
+	if (flags & NBD_FLAG_HAS_FLAGS)
+		seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
+	if (flags & NBD_FLAG_READ_ONLY)
+		seq_puts(s, "NBD_FLAG_READ_ONLY\n");
+	if (flags & NBD_FLAG_SEND_FLUSH)
+		seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
+	if (flags & NBD_FLAG_SEND_TRIM)
+		seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
+
+	return 0;
+}
+
+static int nbd_dbg_flags_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, nbd_dbg_flags_show, inode->i_private);
+}
+
+static const struct file_operations nbd_dbg_flags_ops = {
+	.open = nbd_dbg_flags_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int nbd_dev_dbg_init(struct nbd_device *nbd)
+{
+	struct dentry *dir;
+	struct dentry *f;
+
+	dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
+	if (IS_ERR_OR_NULL(dir)) {
+		dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s' (%ld)\n",
+			nbd_name(nbd), PTR_ERR(dir));
+		return PTR_ERR(dir);
+	}
+	nbd->dbg_dir = dir;
+
+	f = debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops);
+	if (IS_ERR_OR_NULL(f)) {
+		dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'tasks', %ld\n",
+			PTR_ERR(f));
+		return PTR_ERR(f);
+	}
+
+	f = debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize);
+	if (IS_ERR_OR_NULL(f)) {
+		dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'size_bytes', %ld\n",
+			PTR_ERR(f));
+		return PTR_ERR(f);
+	}
+
+	f = debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout);
+	if (IS_ERR_OR_NULL(f)) {
+		dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'timeout', %ld\n",
+			PTR_ERR(f));
+		return PTR_ERR(f);
+	}
+
+	f = debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize);
+	if (IS_ERR_OR_NULL(f)) {
+		dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'blocksize', %ld\n",
+			PTR_ERR(f));
+		return PTR_ERR(f);
+	}
+
+	f = debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops);
+	if (IS_ERR_OR_NULL(f)) {
+		dev_err(nbd_to_dev(nbd), "Failed to create debugfs file 'flags', %ld\n",
+			PTR_ERR(f));
+		return PTR_ERR(f);
+	}
+
+	return 0;
+}
+
+static void nbd_dev_dbg_close(struct nbd_device *nbd)
+{
+	debugfs_remove_recursive(nbd->dbg_dir);
+}
+
+static int nbd_dbg_init(void)
+{
+	struct dentry *dbg_dir;
+
+	dbg_dir = debugfs_create_dir("nbd", NULL);
+	if (IS_ERR(dbg_dir))
+		return PTR_ERR(dbg_dir);
+
+	nbd_dbg_dir = dbg_dir;
+
+	return 0;
+}
+
+static void nbd_dbg_close(void)
+{
+	debugfs_remove_recursive(nbd_dbg_dir);
+}
+
+#else  /* IS_ENABLED(CONFIG_DEBUG_FS) */
+
+static int nbd_dev_dbg_init(struct nbd_device *nbd)
+{
+	return 0;
+}
+
+static void nbd_dev_dbg_close(struct nbd_device *nbd)
+{
+}
+
+static int nbd_dbg_init(void)
+{
+	return 0;
+}
+
+static void nbd_dbg_close(void)
+{
+}
+
+#endif
+
 /*
  * And here should be modules and kernel interface 
  *  (Just smiley confuses emacs :-)
@@ -811,7 +1021,7 @@
 		 * every gendisk to have its very own request_queue struct.
 		 * These structs are big so we dynamically allocate them.
 		 */
-		disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
+		disk->queue = blk_init_queue(nbd_request_handler, &nbd_lock);
 		if (!disk->queue) {
 			put_disk(disk);
 			goto out;
@@ -822,7 +1032,7 @@
 		queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
 		queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
 		disk->queue->limits.discard_granularity = 512;
-		disk->queue->limits.max_discard_sectors = UINT_MAX;
+		blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
 		disk->queue->limits.discard_zeroes_data = 0;
 		blk_queue_max_hw_sectors(disk->queue, 65536);
 		disk->queue->limits.max_sectors = 256;
@@ -835,6 +1045,8 @@
 
 	printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
 
+	nbd_dbg_init();
+
 	for (i = 0; i < nbds_max; i++) {
 		struct gendisk *disk = nbd_dev[i].disk;
 		nbd_dev[i].magic = NBD_MAGIC;
@@ -842,6 +1054,9 @@
 		spin_lock_init(&nbd_dev[i].queue_lock);
 		INIT_LIST_HEAD(&nbd_dev[i].queue_head);
 		mutex_init(&nbd_dev[i].tx_lock);
+		init_timer(&nbd_dev[i].timeout_timer);
+		nbd_dev[i].timeout_timer.function = nbd_xmit_timeout;
+		nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i];
 		init_waitqueue_head(&nbd_dev[i].active_wq);
 		init_waitqueue_head(&nbd_dev[i].waiting_wq);
 		nbd_dev[i].blksize = 1024;
@@ -868,6 +1083,9 @@
 static void __exit nbd_cleanup(void)
 {
 	int i;
+
+	nbd_dbg_close();
+
 	for (i = 0; i < nbds_max; i++) {
 		struct gendisk *disk = nbd_dev[i].disk;
 		nbd_dev[i].magic = 0;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 3177b24..17269a3 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -222,7 +222,7 @@
 		blk_end_request_all(cmd->rq, 0);
 		break;
 	case NULL_Q_BIO:
-		bio_endio(cmd->bio, 0);
+		bio_endio(cmd->bio);
 		break;
 	}
 
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 7920c27..b97fc3f 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -72,6 +72,10 @@
 static int use_threaded_interrupts;
 module_param(use_threaded_interrupts, int, 0);
 
+static bool use_cmb_sqes = true;
+module_param(use_cmb_sqes, bool, 0644);
+MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
+
 static DEFINE_SPINLOCK(dev_list_lock);
 static LIST_HEAD(dev_list);
 static struct task_struct *nvme_thread;
@@ -103,6 +107,7 @@
 	char irqname[24];	/* nvme4294967295-65535\0 */
 	spinlock_t q_lock;
 	struct nvme_command *sq_cmds;
+	struct nvme_command __iomem *sq_cmds_io;
 	volatile struct nvme_completion *cqes;
 	struct blk_mq_tags **tags;
 	dma_addr_t sq_dma_addr;
@@ -379,27 +384,28 @@
  *
  * Safe to use from interrupt context
  */
-static int __nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
+						struct nvme_command *cmd)
 {
 	u16 tail = nvmeq->sq_tail;
 
-	memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+	if (nvmeq->sq_cmds_io)
+		memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
+	else
+		memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+
 	if (++tail == nvmeq->q_depth)
 		tail = 0;
 	writel(tail, nvmeq->q_db);
 	nvmeq->sq_tail = tail;
-
-	return 0;
 }
 
-static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
 {
 	unsigned long flags;
-	int ret;
 	spin_lock_irqsave(&nvmeq->q_lock, flags);
-	ret = __nvme_submit_cmd(nvmeq, cmd);
+	__nvme_submit_cmd(nvmeq, cmd);
 	spin_unlock_irqrestore(&nvmeq->q_lock, flags);
-	return ret;
 }
 
 static __le64 **iod_list(struct nvme_iod *iod)
@@ -730,18 +736,16 @@
 static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req,
 		struct nvme_iod *iod)
 {
-	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+	struct nvme_command cmnd;
 
-	memcpy(cmnd, req->cmd, sizeof(struct nvme_command));
-	cmnd->rw.command_id = req->tag;
+	memcpy(&cmnd, req->cmd, sizeof(cmnd));
+	cmnd.rw.command_id = req->tag;
 	if (req->nr_phys_segments) {
-		cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
-		cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
+		cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+		cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
 	}
 
-	if (++nvmeq->sq_tail == nvmeq->q_depth)
-		nvmeq->sq_tail = 0;
-	writel(nvmeq->sq_tail, nvmeq->q_db);
+	__nvme_submit_cmd(nvmeq, &cmnd);
 }
 
 /*
@@ -754,45 +758,41 @@
 {
 	struct nvme_dsm_range *range =
 				(struct nvme_dsm_range *)iod_list(iod)[0];
-	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+	struct nvme_command cmnd;
 
 	range->cattr = cpu_to_le32(0);
 	range->nlb = cpu_to_le32(blk_rq_bytes(req) >> ns->lba_shift);
 	range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
 
-	memset(cmnd, 0, sizeof(*cmnd));
-	cmnd->dsm.opcode = nvme_cmd_dsm;
-	cmnd->dsm.command_id = req->tag;
-	cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
-	cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma);
-	cmnd->dsm.nr = 0;
-	cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
+	memset(&cmnd, 0, sizeof(cmnd));
+	cmnd.dsm.opcode = nvme_cmd_dsm;
+	cmnd.dsm.command_id = req->tag;
+	cmnd.dsm.nsid = cpu_to_le32(ns->ns_id);
+	cmnd.dsm.prp1 = cpu_to_le64(iod->first_dma);
+	cmnd.dsm.nr = 0;
+	cmnd.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
 
-	if (++nvmeq->sq_tail == nvmeq->q_depth)
-		nvmeq->sq_tail = 0;
-	writel(nvmeq->sq_tail, nvmeq->q_db);
+	__nvme_submit_cmd(nvmeq, &cmnd);
 }
 
 static void nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
 								int cmdid)
 {
-	struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+	struct nvme_command cmnd;
 
-	memset(cmnd, 0, sizeof(*cmnd));
-	cmnd->common.opcode = nvme_cmd_flush;
-	cmnd->common.command_id = cmdid;
-	cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+	memset(&cmnd, 0, sizeof(cmnd));
+	cmnd.common.opcode = nvme_cmd_flush;
+	cmnd.common.command_id = cmdid;
+	cmnd.common.nsid = cpu_to_le32(ns->ns_id);
 
-	if (++nvmeq->sq_tail == nvmeq->q_depth)
-		nvmeq->sq_tail = 0;
-	writel(nvmeq->sq_tail, nvmeq->q_db);
+	__nvme_submit_cmd(nvmeq, &cmnd);
 }
 
 static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
 							struct nvme_ns *ns)
 {
 	struct request *req = iod_get_private(iod);
-	struct nvme_command *cmnd;
+	struct nvme_command cmnd;
 	u16 control = 0;
 	u32 dsmgmt = 0;
 
@@ -804,19 +804,16 @@
 	if (req->cmd_flags & REQ_RAHEAD)
 		dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
 
-	cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
-	memset(cmnd, 0, sizeof(*cmnd));
+	memset(&cmnd, 0, sizeof(cmnd));
+	cmnd.rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
+	cmnd.rw.command_id = req->tag;
+	cmnd.rw.nsid = cpu_to_le32(ns->ns_id);
+	cmnd.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+	cmnd.rw.prp2 = cpu_to_le64(iod->first_dma);
+	cmnd.rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
+	cmnd.rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
 
-	cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
-	cmnd->rw.command_id = req->tag;
-	cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
-	cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
-	cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
-	cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
-	cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
-
-	if (blk_integrity_rq(req)) {
-		cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
+	if (ns->ms) {
 		switch (ns->pi_type) {
 		case NVME_NS_DPS_PI_TYPE3:
 			control |= NVME_RW_PRINFO_PRCHK_GUARD;
@@ -825,19 +822,21 @@
 		case NVME_NS_DPS_PI_TYPE2:
 			control |= NVME_RW_PRINFO_PRCHK_GUARD |
 					NVME_RW_PRINFO_PRCHK_REF;
-			cmnd->rw.reftag = cpu_to_le32(
+			cmnd.rw.reftag = cpu_to_le32(
 					nvme_block_nr(ns, blk_rq_pos(req)));
 			break;
 		}
-	} else if (ns->ms)
-		control |= NVME_RW_PRINFO_PRACT;
+		if (blk_integrity_rq(req))
+			cmnd.rw.metadata =
+				cpu_to_le64(sg_dma_address(iod->meta_sg));
+		else
+			control |= NVME_RW_PRINFO_PRACT;
+	}
 
-	cmnd->rw.control = cpu_to_le16(control);
-	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+	cmnd.rw.control = cpu_to_le16(control);
+	cmnd.rw.dsmgmt = cpu_to_le32(dsmgmt);
 
-	if (++nvmeq->sq_tail == nvmeq->q_depth)
-		nvmeq->sq_tail = 0;
-	writel(nvmeq->sq_tail, nvmeq->q_db);
+	__nvme_submit_cmd(nvmeq, &cmnd);
 
 	return 0;
 }
@@ -1080,7 +1079,8 @@
 	c.common.command_id = req->tag;
 
 	blk_mq_free_request(req);
-	return __nvme_submit_cmd(nvmeq, &c);
+	__nvme_submit_cmd(nvmeq, &c);
+	return 0;
 }
 
 static int nvme_submit_admin_async_cmd(struct nvme_dev *dev,
@@ -1103,7 +1103,8 @@
 
 	cmd->common.command_id = req->tag;
 
-	return nvme_submit_cmd(nvmeq, cmd);
+	nvme_submit_cmd(nvmeq, cmd);
+	return 0;
 }
 
 static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
@@ -1315,12 +1316,7 @@
 
 	dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", req->tag,
 							nvmeq->qid);
-	if (nvme_submit_cmd(dev->queues[0], &cmd) < 0) {
-		dev_warn(nvmeq->q_dmadev,
-				"Could not abort I/O %d QID %d",
-				req->tag, nvmeq->qid);
-		blk_mq_free_request(abort_req);
-	}
+	nvme_submit_cmd(dev->queues[0], &cmd);
 }
 
 static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved)
@@ -1374,7 +1370,8 @@
 {
 	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
 				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
-	dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+	if (nvmeq->sq_cmds)
+		dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
 					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
 	kfree(nvmeq);
 }
@@ -1447,6 +1444,47 @@
 	spin_unlock_irq(&nvmeq->q_lock);
 }
 
+static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
+				int entry_size)
+{
+	int q_depth = dev->q_depth;
+	unsigned q_size_aligned = roundup(q_depth * entry_size, dev->page_size);
+
+	if (q_size_aligned * nr_io_queues > dev->cmb_size) {
+		u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
+		mem_per_q = round_down(mem_per_q, dev->page_size);
+		q_depth = div_u64(mem_per_q, entry_size);
+
+		/*
+		 * Ensure the reduced q_depth is above some threshold where it
+		 * would be better to map queues in system memory with the
+		 * original depth
+		 */
+		if (q_depth < 64)
+			return -ENOMEM;
+	}
+
+	return q_depth;
+}
+
+static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+				int qid, int depth)
+{
+	if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
+		unsigned offset = (qid - 1) *
+					roundup(SQ_SIZE(depth), dev->page_size);
+		nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
+		nvmeq->sq_cmds_io = dev->cmb + offset;
+	} else {
+		nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+					&nvmeq->sq_dma_addr, GFP_KERNEL);
+		if (!nvmeq->sq_cmds)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
 							int depth)
 {
@@ -1459,9 +1497,7 @@
 	if (!nvmeq->cqes)
 		goto free_nvmeq;
 
-	nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
-					&nvmeq->sq_dma_addr, GFP_KERNEL);
-	if (!nvmeq->sq_cmds)
+	if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
 		goto free_cqdma;
 
 	nvmeq->q_dmadev = dev->dev;
@@ -1696,6 +1732,12 @@
 		page_shift = dev_page_max;
 	}
 
+	dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
+						NVME_CAP_NSSRC(cap) : 0;
+
+	if (dev->subsystem && (readl(&dev->bar->csts) & NVME_CSTS_NSSRO))
+		writel(NVME_CSTS_NSSRO, &dev->bar->csts);
+
 	result = nvme_disable_ctrl(dev, cap);
 	if (result < 0)
 		return result;
@@ -1856,6 +1898,15 @@
 	return status;
 }
 
+static int nvme_subsys_reset(struct nvme_dev *dev)
+{
+	if (!dev->subsystem)
+		return -ENOTTY;
+
+	writel(0x4E564D65, &dev->bar->nssr); /* "NVMe" */
+	return 0;
+}
+
 static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
 							unsigned long arg)
 {
@@ -1935,7 +1986,7 @@
 	ns->queue->limits.discard_zeroes_data = 0;
 	ns->queue->limits.discard_alignment = logical_block_size;
 	ns->queue->limits.discard_granularity = logical_block_size;
-	ns->queue->limits.max_discard_sectors = 0xffffffff;
+	blk_queue_max_discard_sectors(ns->queue, 0xffffffff);
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
 }
 
@@ -1989,7 +2040,7 @@
 								!ns->ext)
 		nvme_init_integrity(ns);
 
-	if (ns->ms && !blk_get_integrity(disk))
+	if (ns->ms && !(ns->ms == 8 && ns->pi_type) && !blk_get_integrity(disk))
 		set_capacity(disk, 0);
 	else
 		set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
@@ -2020,7 +2071,10 @@
 		spin_lock(&dev_list_lock);
 		list_for_each_entry_safe(dev, next, &dev_list, node) {
 			int i;
-			if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
+			u32 csts = readl(&dev->bar->csts);
+
+			if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
+							csts & NVME_CSTS_CFS) {
 				if (work_busy(&dev->reset_work))
 					continue;
 				list_del_init(&dev->node);
@@ -2067,7 +2121,6 @@
 		goto out_free_ns;
 	queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
-	queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, ns->queue);
 	ns->dev = dev;
 	ns->queue->queuedata = ns;
 
@@ -2081,12 +2134,16 @@
 	list_add_tail(&ns->list, &dev->namespaces);
 
 	blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
-	if (dev->max_hw_sectors)
+	if (dev->max_hw_sectors) {
 		blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
+		blk_queue_max_segments(ns->queue,
+			((dev->max_hw_sectors << 9) / dev->page_size) + 1);
+	}
 	if (dev->stripe_size)
 		blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
 	if (dev->vwc & NVME_CTRL_VWC_PRESENT)
 		blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
+	blk_queue_virt_boundary(ns->queue, dev->page_size - 1);
 
 	disk->major = nvme_major;
 	disk->first_minor = 0;
@@ -2159,6 +2216,58 @@
 	return min(result & 0xffff, result >> 16) + 1;
 }
 
+static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
+{
+	u64 szu, size, offset;
+	u32 cmbloc;
+	resource_size_t bar_size;
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
+	void __iomem *cmb;
+	dma_addr_t dma_addr;
+
+	if (!use_cmb_sqes)
+		return NULL;
+
+	dev->cmbsz = readl(&dev->bar->cmbsz);
+	if (!(NVME_CMB_SZ(dev->cmbsz)))
+		return NULL;
+
+	cmbloc = readl(&dev->bar->cmbloc);
+
+	szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
+	size = szu * NVME_CMB_SZ(dev->cmbsz);
+	offset = szu * NVME_CMB_OFST(cmbloc);
+	bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));
+
+	if (offset > bar_size)
+		return NULL;
+
+	/*
+	 * Controllers may support a CMB size larger than their BAR,
+	 * for example, due to being behind a bridge. Reduce the CMB to
+	 * the reported size of the BAR
+	 */
+	if (size > bar_size - offset)
+		size = bar_size - offset;
+
+	dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
+	cmb = ioremap_wc(dma_addr, size);
+	if (!cmb)
+		return NULL;
+
+	dev->cmb_dma_addr = dma_addr;
+	dev->cmb_size = size;
+	return cmb;
+}
+
+static inline void nvme_release_cmb(struct nvme_dev *dev)
+{
+	if (dev->cmb) {
+		iounmap(dev->cmb);
+		dev->cmb = NULL;
+	}
+}
+
 static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
 {
 	return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
@@ -2177,6 +2286,15 @@
 	if (result < nr_io_queues)
 		nr_io_queues = result;
 
+	if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
+		result = nvme_cmb_qdepth(dev, nr_io_queues,
+				sizeof(struct nvme_command));
+		if (result > 0)
+			dev->q_depth = result;
+		else
+			nvme_release_cmb(dev);
+	}
+
 	size = db_bar_size(dev, nr_io_queues);
 	if (size > 8192) {
 		iounmap(dev->bar);
@@ -2344,7 +2462,6 @@
 {
 	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	int res;
-	unsigned nn;
 	struct nvme_id_ctrl *ctrl;
 	int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
 
@@ -2354,7 +2471,6 @@
 		return -EIO;
 	}
 
-	nn = le32_to_cpup(&ctrl->nn);
 	dev->oncs = le16_to_cpup(&ctrl->oncs);
 	dev->abort_limit = ctrl->acl + 1;
 	dev->vwc = ctrl->vwc;
@@ -2440,6 +2556,8 @@
 	dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
 	dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
 	dev->dbs = ((void __iomem *)dev->bar) + 4096;
+	if (readl(&dev->bar->vs) >= NVME_VS(1, 2))
+		dev->cmb = nvme_map_cmb(dev);
 
 	return 0;
 
@@ -2820,6 +2938,8 @@
 	case NVME_IOCTL_RESET:
 		dev_warn(dev->dev, "resetting controller\n");
 		return nvme_reset(dev);
+	case NVME_IOCTL_SUBSYS_RESET:
+		return nvme_subsys_reset(dev);
 	default:
 		return -ENOTTY;
 	}
@@ -3145,6 +3265,7 @@
 	nvme_dev_remove_admin(dev);
 	device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
 	nvme_free_queues(dev, 0);
+	nvme_release_cmb(dev);
 	nvme_release_prp_pools(dev);
 	kref_put(&dev->kref, nvme_free_dev);
 }
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 4c20c22..7be2375 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -977,7 +977,7 @@
 	}
 }
 
-static void pkt_end_io_read(struct bio *bio, int err)
+static void pkt_end_io_read(struct bio *bio)
 {
 	struct packet_data *pkt = bio->bi_private;
 	struct pktcdvd_device *pd = pkt->pd;
@@ -985,9 +985,9 @@
 
 	pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
 		bio, (unsigned long long)pkt->sector,
-		(unsigned long long)bio->bi_iter.bi_sector, err);
+		(unsigned long long)bio->bi_iter.bi_sector, bio->bi_error);
 
-	if (err)
+	if (bio->bi_error)
 		atomic_inc(&pkt->io_errors);
 	if (atomic_dec_and_test(&pkt->io_wait)) {
 		atomic_inc(&pkt->run_sm);
@@ -996,13 +996,13 @@
 	pkt_bio_finished(pd);
 }
 
-static void pkt_end_io_packet_write(struct bio *bio, int err)
+static void pkt_end_io_packet_write(struct bio *bio)
 {
 	struct packet_data *pkt = bio->bi_private;
 	struct pktcdvd_device *pd = pkt->pd;
 	BUG_ON(!pd);
 
-	pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err);
+	pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error);
 
 	pd->stats.pkt_ended++;
 
@@ -1340,22 +1340,22 @@
 	pkt_queue_bio(pd, pkt->w_bio);
 }
 
-static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
+static void pkt_finish_packet(struct packet_data *pkt, int error)
 {
 	struct bio *bio;
 
-	if (!uptodate)
+	if (error)
 		pkt->cache_valid = 0;
 
 	/* Finish all bios corresponding to this packet */
-	while ((bio = bio_list_pop(&pkt->orig_bios)))
-		bio_endio(bio, uptodate ? 0 : -EIO);
+	while ((bio = bio_list_pop(&pkt->orig_bios))) {
+		bio->bi_error = error;
+		bio_endio(bio);
+	}
 }
 
 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
 {
-	int uptodate;
-
 	pkt_dbg(2, pd, "pkt %d\n", pkt->id);
 
 	for (;;) {
@@ -1384,7 +1384,7 @@
 			if (atomic_read(&pkt->io_wait) > 0)
 				return;
 
-			if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
+			if (!pkt->w_bio->bi_error) {
 				pkt_set_state(pkt, PACKET_FINISHED_STATE);
 			} else {
 				pkt_set_state(pkt, PACKET_RECOVERY_STATE);
@@ -1401,8 +1401,7 @@
 			break;
 
 		case PACKET_FINISHED_STATE:
-			uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
-			pkt_finish_packet(pkt, uptodate);
+			pkt_finish_packet(pkt, pkt->w_bio->bi_error);
 			return;
 
 		default:
@@ -2332,13 +2331,14 @@
 }
 
 
-static void pkt_end_io_read_cloned(struct bio *bio, int err)
+static void pkt_end_io_read_cloned(struct bio *bio)
 {
 	struct packet_stacked_data *psd = bio->bi_private;
 	struct pktcdvd_device *pd = psd->pd;
 
+	psd->bio->bi_error = bio->bi_error;
 	bio_put(bio);
-	bio_endio(psd->bio, err);
+	bio_endio(psd->bio);
 	mempool_free(psd, psd_pool);
 	pkt_bio_finished(pd);
 }
@@ -2447,6 +2447,10 @@
 	char b[BDEVNAME_SIZE];
 	struct bio *split;
 
+	blk_queue_bounce(q, &bio);
+
+	blk_queue_split(q, &bio, q->bio_split);
+
 	pd = q->queuedata;
 	if (!pd) {
 		pr_err("%s incorrect request queue\n",
@@ -2477,8 +2481,6 @@
 		goto end_io;
 	}
 
-	blk_queue_bounce(q, &bio);
-
 	do {
 		sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
 		sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
@@ -2504,26 +2506,6 @@
 
 
 
-static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
-			  struct bio_vec *bvec)
-{
-	struct pktcdvd_device *pd = q->queuedata;
-	sector_t zone = get_zone(bmd->bi_sector, pd);
-	int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
-	int remaining = (pd->settings.size << 9) - used;
-	int remaining2;
-
-	/*
-	 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
-	 * boundary, pkt_make_request() will split the bio.
-	 */
-	remaining2 = PAGE_SIZE - bmd->bi_size;
-	remaining = max(remaining, remaining2);
-
-	BUG_ON(remaining < 0);
-	return remaining;
-}
-
 static void pkt_init_queue(struct pktcdvd_device *pd)
 {
 	struct request_queue *q = pd->disk->queue;
@@ -2531,7 +2513,6 @@
 	blk_queue_make_request(q, pkt_make_request);
 	blk_queue_logical_block_size(q, CD_FRAMESIZE);
 	blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
-	blk_queue_merge_bvec(q, pkt_merge_bvec);
 	q->queuedata = pd;
 }
 
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index b1612eb..d89fcac 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -593,7 +593,8 @@
 	next = bio_list_peek(&priv->list);
 	spin_unlock_irq(&priv->lock);
 
-	bio_endio(bio, error);
+	bio->bi_error = error;
+	bio_endio(bio);
 	return next;
 }
 
@@ -605,6 +606,8 @@
 
 	dev_dbg(&dev->core, "%s\n", __func__);
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	spin_lock_irq(&priv->lock);
 	busy = !bio_list_empty(&priv->list);
 	bio_list_add(&priv->list, bio);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index bc67a93..698f761 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3474,52 +3474,6 @@
 	return BLK_MQ_RQ_QUEUE_OK;
 }
 
-/*
- * a queue callback. Makes sure that we don't create a bio that spans across
- * multiple osd objects. One exception would be with a single page bios,
- * which we handle later at bio_chain_clone_range()
- */
-static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
-			  struct bio_vec *bvec)
-{
-	struct rbd_device *rbd_dev = q->queuedata;
-	sector_t sector_offset;
-	sector_t sectors_per_obj;
-	sector_t obj_sector_offset;
-	int ret;
-
-	/*
-	 * Find how far into its rbd object the partition-relative
-	 * bio start sector is to offset relative to the enclosing
-	 * device.
-	 */
-	sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
-	sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
-	obj_sector_offset = sector_offset & (sectors_per_obj - 1);
-
-	/*
-	 * Compute the number of bytes from that offset to the end
-	 * of the object.  Account for what's already used by the bio.
-	 */
-	ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
-	if (ret > bmd->bi_size)
-		ret -= bmd->bi_size;
-	else
-		ret = 0;
-
-	/*
-	 * Don't send back more than was asked for.  And if the bio
-	 * was empty, let the whole thing through because:  "Note
-	 * that a block device *must* allow a single page to be
-	 * added to an empty bio."
-	 */
-	rbd_assert(bvec->bv_len <= PAGE_SIZE);
-	if (ret > (int) bvec->bv_len || !bmd->bi_size)
-		ret = (int) bvec->bv_len;
-
-	return ret;
-}
-
 static void rbd_free_disk(struct rbd_device *rbd_dev)
 {
 	struct gendisk *disk = rbd_dev->disk;
@@ -3815,10 +3769,9 @@
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 	q->limits.discard_granularity = segment_size;
 	q->limits.discard_alignment = segment_size;
-	q->limits.max_discard_sectors = segment_size / SECTOR_SIZE;
+	blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
 	q->limits.discard_zeroes_data = 1;
 
-	blk_queue_merge_bvec(q, rbd_merge_bvec);
 	disk->queue = q;
 
 	q->queuedata = rbd_dev;
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index ac8c62c..3163e4cdc 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -137,7 +137,10 @@
 		if (!card->eeh_state && card->gendisk)
 			disk_stats_complete(card, meta->bio, meta->start_time);
 
-		bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0);
+		if (atomic_read(&meta->error))
+			bio_io_error(meta->bio);
+		else
+			bio_endio(meta->bio);
 		kmem_cache_free(bio_meta_pool, meta);
 	}
 }
@@ -148,6 +151,8 @@
 	struct rsxx_bio_meta *bio_meta;
 	int st = -EINVAL;
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	might_sleep();
 
 	if (!card)
@@ -199,7 +204,9 @@
 queue_err:
 	kmem_cache_free(bio_meta_pool, bio_meta);
 req_err:
-	bio_endio(bio, st);
+	if (st)
+		bio->bi_error = st;
+	bio_endio(bio);
 }
 
 /*----------------- Device Setup -------------------*/
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 1e46eb2..586f916 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -4422,7 +4422,7 @@
 	/* DISCARD Flag initialization. */
 	q->limits.discard_granularity = 8192;
 	q->limits.discard_alignment = 0;
-	q->limits.max_discard_sectors = UINT_MAX >> 9;
+	blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
 	q->limits.discard_zeroes_data = 1;
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 4cf81b5..04d6579 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -456,7 +456,7 @@
 				PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
 		if (control & DMASCR_HARD_ERROR) {
 			/* error */
-			clear_bit(BIO_UPTODATE, &bio->bi_flags);
+			bio->bi_error = -EIO;
 			dev_printk(KERN_WARNING, &card->dev->dev,
 				"I/O error on sector %d/%d\n",
 				le32_to_cpu(desc->local_addr)>>9,
@@ -505,7 +505,7 @@
 
 		return_bio = bio->bi_next;
 		bio->bi_next = NULL;
-		bio_endio(bio, 0);
+		bio_endio(bio);
 	}
 }
 
@@ -531,6 +531,8 @@
 		 (unsigned long long)bio->bi_iter.bi_sector,
 		 bio->bi_iter.bi_size);
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	spin_lock_irq(&card->lock);
 	*card->biotail = bio;
 	bio->bi_next = NULL;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 954c002..6a685ae 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1078,9 +1078,9 @@
 /*
  * bio callback.
  */
-static void end_block_io_op(struct bio *bio, int error)
+static void end_block_io_op(struct bio *bio)
 {
-	__end_block_io_op(bio->bi_private, error);
+	__end_block_io_op(bio->bi_private, bio->bi_error);
 	bio_put(bio);
 }
 
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 7a8a73f..5f6b3be 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -82,7 +82,6 @@
 struct split_bio {
 	struct bio *bio;
 	atomic_t pending;
-	int err;
 };
 
 static DEFINE_MUTEX(blkfront_mutex);
@@ -1481,16 +1480,14 @@
 	return 0;
 }
 
-static void split_bio_end(struct bio *bio, int error)
+static void split_bio_end(struct bio *bio)
 {
 	struct split_bio *split_bio = bio->bi_private;
 
-	if (error)
-		split_bio->err = error;
-
 	if (atomic_dec_and_test(&split_bio->pending)) {
 		split_bio->bio->bi_phys_segments = 0;
-		bio_endio(split_bio->bio, split_bio->err);
+		split_bio->bio->bi_error = bio->bi_error;
+		bio_endio(split_bio->bio);
 		kfree(split_bio);
 	}
 	bio_put(bio);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 763301c..9c01f5b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -848,7 +848,7 @@
 
 	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
 		zram_bio_discard(zram, index, offset, bio);
-		bio_endio(bio, 0);
+		bio_endio(bio);
 		return;
 	}
 
@@ -881,8 +881,7 @@
 		update_position(&index, &offset, &bvec);
 	}
 
-	set_bit(BIO_UPTODATE, &bio->bi_flags);
-	bio_endio(bio, 0);
+	bio_endio(bio);
 	return;
 
 out:
@@ -899,6 +898,8 @@
 	if (unlikely(!zram_meta_get(zram)))
 		goto error;
 
+	blk_queue_split(queue, &bio, queue->bio_split);
+
 	if (!valid_io_request(zram, bio->bi_iter.bi_sector,
 					bio->bi_iter.bi_size)) {
 		atomic64_inc(&zram->stats.invalid_io);
@@ -1242,7 +1243,7 @@
 	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
 	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
 	zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
-	zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
+	blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
 	/*
 	 * zram_bio_discard() will clear all logical blocks if logical block
 	 * size is identical with physical block size(PAGE_SIZE). But if it is
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 2e77707..0bd88c9 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -13,6 +13,10 @@
 	tristate
 	select FW_LOADER
 
+config BT_QCA
+	tristate
+	select FW_LOADER
+
 config BT_HCIBTUSB
 	tristate "HCI USB driver"
 	depends on USB
@@ -132,6 +136,7 @@
 config BT_HCIUART_INTEL
 	bool "Intel protocol support"
 	depends on BT_HCIUART
+	select BT_HCIUART_H4
 	select BT_INTEL
 	help
 	  The Intel protocol support enables Bluetooth HCI over serial
@@ -150,6 +155,19 @@
 
 	  Say Y here to compile support for Broadcom protocol.
 
+config BT_HCIUART_QCA
+	bool "Qualcomm Atheros protocol support"
+	depends on BT_HCIUART
+	select BT_HCIUART_H4
+	select BT_QCA
+	help
+	  The Qualcomm Atheros protocol supports HCI In-Band Sleep feature
+	  over serial port interface(H4) between controller and host.
+	  This protocol is required for UART clock control for QCA Bluetooth
+	  devices.
+
+	  Say Y here to compile support for QCA protocol.
+
 config BT_HCIBCM203X
 	tristate "HCI BCM203x USB driver"
 	depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index f40e194..07c9cf3 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -22,6 +22,7 @@
 obj-$(CONFIG_BT_WILINK)		+= btwilink.o
 obj-$(CONFIG_BT_BCM)		+= btbcm.o
 obj-$(CONFIG_BT_RTL)		+= btrtl.o
+obj-$(CONFIG_BT_QCA)		+= btqca.o
 
 btmrvl-y			:= btmrvl_main.o
 btmrvl-$(CONFIG_DEBUG_FS)	+= btmrvl_debugfs.o
@@ -34,6 +35,7 @@
 hci_uart-$(CONFIG_BT_HCIUART_3WIRE)	+= hci_h5.o
 hci_uart-$(CONFIG_BT_HCIUART_INTEL)	+= hci_intel.o
 hci_uart-$(CONFIG_BT_HCIUART_BCM)	+= hci_bcm.o
+hci_uart-$(CONFIG_BT_HCIUART_QCA)	+= hci_qca.o
 hci_uart-objs				:= $(hci_uart-y)
 
 ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index fcfb72e..a5c4d05 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -492,7 +492,7 @@
 	case HCI_SCODATA_PKT:
 		hdev->stat.sco_tx++;
 		break;
-	};
+	}
 
 	/* Prepend skb with frame type */
 	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 7aab654..a00bb82 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -427,7 +427,7 @@
 	case HCI_SCODATA_PKT:
 		hdev->stat.sco_tx++;
 		break;
-	};
+	}
 
 	/* Prepend skb with frame type */
 	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 9ceb8ac..02ed816 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -34,6 +34,7 @@
 
 #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
 #define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
+#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
 
 int btbcm_check_bdaddr(struct hci_dev *hdev)
 {
@@ -66,9 +67,13 @@
 	 *
 	 * The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller
 	 * with waiting for configuration state.
+	 *
+	 * The address 43:30:B1:00:00:00 indicates a BCM4330B1 controller
+	 * with waiting for configuration state.
 	 */
 	if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) ||
-	    !bacmp(&bda->bdaddr, BDADDR_BCM4324B3)) {
+	    !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
+	    !bacmp(&bda->bdaddr, BDADDR_BCM4330B1)) {
 		BT_INFO("%s: BCM: Using default device address (%pMR)",
 			hdev->name, &bda->bdaddr);
 		set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
@@ -241,6 +246,7 @@
 	u16 subver;
 	const char *name;
 } bcm_uart_subver_table[] = {
+	{ 0x4103, "BCM4330B1"	},	/* 002.001.003 */
 	{ 0x410e, "BCM43341B0"	},	/* 002.001.014 */
 	{ 0x4406, "BCM4324B3"	},	/* 002.004.006 */
 	{ 0x610c, "BCM4354"	},	/* 003.001.012 */
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 828f2f8..048423f 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -89,7 +89,89 @@
 }
 EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
 
+void btintel_hw_error(struct hci_dev *hdev, u8 code)
+{
+	struct sk_buff *skb;
+	u8 type = 0x00;
+
+	BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
+
+	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: Reset after hardware error failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return;
+	}
+	kfree_skb(skb);
+
+	skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return;
+	}
+
+	if (skb->len != 13) {
+		BT_ERR("%s: Exception info size mismatch", hdev->name);
+		kfree_skb(skb);
+		return;
+	}
+
+	BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
+
+	kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(btintel_hw_error);
+
+void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+{
+	const char *variant;
+
+	switch (ver->fw_variant) {
+	case 0x06:
+		variant = "Bootloader";
+		break;
+	case 0x23:
+		variant = "Firmware";
+		break;
+	default:
+		return;
+	}
+
+	BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
+		variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
+		ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
+}
+EXPORT_SYMBOL_GPL(btintel_version_info);
+
+int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
+			const void *param)
+{
+	while (plen > 0) {
+		struct sk_buff *skb;
+		u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
+
+		cmd_param[0] = fragment_type;
+		memcpy(cmd_param + 1, param, fragment_len);
+
+		skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
+				     cmd_param, HCI_INIT_TIMEOUT);
+		if (IS_ERR(skb))
+			return PTR_ERR(skb);
+
+		kfree_skb(skb);
+
+		plen -= fragment_len;
+		param += fragment_len;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_secure_send);
+
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
 MODULE_VERSION(VERSION);
 MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("intel/ibt-11-5.sfi");
+MODULE_FIRMWARE("intel/ibt-11-5.ddc");
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 4bda6ab..b278d14 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -73,6 +73,11 @@
 
 int btintel_check_bdaddr(struct hci_dev *hdev);
 int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+void btintel_hw_error(struct hci_dev *hdev, u8 code);
+
+void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
+int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
+			const void *param);
 
 #else
 
@@ -86,4 +91,18 @@
 	return -EOPNOTSUPP;
 }
 
+static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
+{
+}
+
+static void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+{
+}
+
+static inline int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type,
+				      u32 plen, const void *param)
+{
+	return -EOPNOTSUPP;
+}
+
 #endif
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 086f0ec..27a9aac 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -95,10 +95,10 @@
 	struct btmrvl_device btmrvl_dev;
 	struct btmrvl_adapter *adapter;
 	struct btmrvl_thread main_thread;
-	int (*hw_host_to_card) (struct btmrvl_private *priv,
+	int (*hw_host_to_card)(struct btmrvl_private *priv,
 				u8 *payload, u16 nb);
-	int (*hw_wakeup_firmware) (struct btmrvl_private *priv);
-	int (*hw_process_int_status) (struct btmrvl_private *priv);
+	int (*hw_wakeup_firmware)(struct btmrvl_private *priv);
+	int (*hw_process_int_status)(struct btmrvl_private *priv);
 	void (*firmware_dump)(struct btmrvl_private *priv);
 	spinlock_t driver_lock;		/* spinlock used by driver */
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index b9a8119..b9978a7 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1071,8 +1071,6 @@
 		}
 	}
 
-	sdio_release_host(card->func);
-
 	/*
 	 * winner or not, with this test the FW synchronizes when the
 	 * module can continue its initialization
@@ -1082,6 +1080,8 @@
 		return -ETIMEDOUT;
 	}
 
+	sdio_release_host(card->func);
+
 	return 0;
 
 done:
@@ -1376,8 +1376,7 @@
 
 	/* fw_dump_data will be free in device coredump release function
 	   after 5 min*/
-	dev_coredumpv(&priv->btmrvl_dev.hcidev->dev, fw_dump_data,
-		      fw_dump_len, GFP_KERNEL);
+	dev_coredumpv(&card->func->dev, fw_dump_data, fw_dump_len, GFP_KERNEL);
 	BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end");
 }
 
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
new file mode 100644
index 0000000..4a62081
--- /dev/null
+++ b/drivers/bluetooth/btqca.c
@@ -0,0 +1,392 @@
+/*
+ *  Bluetooth supports for Qualcomm Atheros chips
+ *
+ *  Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/firmware.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "btqca.h"
+
+#define VERSION "0.1"
+
+static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
+{
+	struct sk_buff *skb;
+	struct edl_event_hdr *edl;
+	struct rome_version *ver;
+	char cmd;
+	int err = 0;
+
+	BT_DBG("%s: ROME Patch Version Request", hdev->name);
+
+	cmd = EDL_PATCH_VER_REQ_CMD;
+	skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
+				&cmd, HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		BT_ERR("%s: Failed to read version of ROME (%d)", hdev->name,
+		       err);
+		return err;
+	}
+
+	if (skb->len != sizeof(*edl) + sizeof(*ver)) {
+		BT_ERR("%s: Version size mismatch len %d", hdev->name,
+		       skb->len);
+		err = -EILSEQ;
+		goto out;
+	}
+
+	edl = (struct edl_event_hdr *)(skb->data);
+	if (!edl || !edl->data) {
+		BT_ERR("%s: TLV with no header or no data", hdev->name);
+		err = -EILSEQ;
+		goto out;
+	}
+
+	if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+	    edl->rtype != EDL_APP_VER_RES_EVT) {
+		BT_ERR("%s: Wrong packet received %d %d", hdev->name,
+		       edl->cresp, edl->rtype);
+		err = -EIO;
+		goto out;
+	}
+
+	ver = (struct rome_version *)(edl->data);
+
+	BT_DBG("%s: Product:0x%08x", hdev->name, le32_to_cpu(ver->product_id));
+	BT_DBG("%s: Patch  :0x%08x", hdev->name, le16_to_cpu(ver->patch_ver));
+	BT_DBG("%s: ROM    :0x%08x", hdev->name, le16_to_cpu(ver->rome_ver));
+	BT_DBG("%s: SOC    :0x%08x", hdev->name, le32_to_cpu(ver->soc_id));
+
+	/* ROME chipset version can be decided by patch and SoC
+	 * version, combination with upper 2 bytes from SoC
+	 * and lower 2 bytes from patch will be used.
+	 */
+	*rome_version = (le32_to_cpu(ver->soc_id) << 16) |
+		        (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+
+out:
+	kfree_skb(skb);
+
+	return err;
+}
+
+static int rome_reset(struct hci_dev *hdev)
+{
+	struct sk_buff *skb;
+	int err;
+
+	BT_DBG("%s: ROME HCI_RESET", hdev->name);
+
+	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		BT_ERR("%s: Reset failed (%d)", hdev->name, err);
+		return err;
+	}
+
+	kfree_skb(skb);
+
+	return 0;
+}
+
+static void rome_tlv_check_data(struct rome_config *config,
+				const struct firmware *fw)
+{
+	const u8 *data;
+	u32 type_len;
+	u16 tag_id, tag_len;
+	int idx, length;
+	struct tlv_type_hdr *tlv;
+	struct tlv_type_patch *tlv_patch;
+	struct tlv_type_nvm *tlv_nvm;
+
+	tlv = (struct tlv_type_hdr *)fw->data;
+
+	type_len = le32_to_cpu(tlv->type_len);
+	length = (type_len >> 8) & 0x00ffffff;
+
+	BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff);
+	BT_DBG("Length\t\t : %d bytes", length);
+
+	switch (config->type) {
+	case TLV_TYPE_PATCH:
+		tlv_patch = (struct tlv_type_patch *)tlv->data;
+		BT_DBG("Total Length\t\t : %d bytes",
+		       le32_to_cpu(tlv_patch->total_size));
+		BT_DBG("Patch Data Length\t : %d bytes",
+		       le32_to_cpu(tlv_patch->data_length));
+		BT_DBG("Signing Format Version : 0x%x",
+		       tlv_patch->format_version);
+		BT_DBG("Signature Algorithm\t : 0x%x",
+		       tlv_patch->signature);
+		BT_DBG("Reserved\t\t : 0x%x",
+		       le16_to_cpu(tlv_patch->reserved1));
+		BT_DBG("Product ID\t\t : 0x%04x",
+		       le16_to_cpu(tlv_patch->product_id));
+		BT_DBG("Rom Build Version\t : 0x%04x",
+		       le16_to_cpu(tlv_patch->rom_build));
+		BT_DBG("Patch Version\t\t : 0x%04x",
+		       le16_to_cpu(tlv_patch->patch_version));
+		BT_DBG("Reserved\t\t : 0x%x",
+		       le16_to_cpu(tlv_patch->reserved2));
+		BT_DBG("Patch Entry Address\t : 0x%x",
+		       le32_to_cpu(tlv_patch->entry));
+		break;
+
+	case TLV_TYPE_NVM:
+		idx = 0;
+		data = tlv->data;
+		while (idx < length) {
+			tlv_nvm = (struct tlv_type_nvm *)(data + idx);
+
+			tag_id = le16_to_cpu(tlv_nvm->tag_id);
+			tag_len = le16_to_cpu(tlv_nvm->tag_len);
+
+			/* Update NVM tags as needed */
+			switch (tag_id) {
+			case EDL_TAG_ID_HCI:
+				/* HCI transport layer parameters
+				 * enabling software inband sleep
+				 * onto controller side.
+				 */
+				tlv_nvm->data[0] |= 0x80;
+
+				/* UART Baud Rate */
+				tlv_nvm->data[2] = config->user_baud_rate;
+
+				break;
+
+			case EDL_TAG_ID_DEEP_SLEEP:
+				/* Sleep enable mask
+				 * enabling deep sleep feature on controller.
+				 */
+				tlv_nvm->data[0] |= 0x01;
+
+				break;
+			}
+
+			idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len);
+		}
+		break;
+
+	default:
+		BT_ERR("Unknown TLV type %d", config->type);
+		break;
+	}
+}
+
+static int rome_tlv_send_segment(struct hci_dev *hdev, int idx, int seg_size,
+				 const u8 *data)
+{
+	struct sk_buff *skb;
+	struct edl_event_hdr *edl;
+	struct tlv_seg_resp *tlv_resp;
+	u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2];
+	int err = 0;
+
+	BT_DBG("%s: Download segment #%d size %d", hdev->name, idx, seg_size);
+
+	cmd[0] = EDL_PATCH_TLV_REQ_CMD;
+	cmd[1] = seg_size;
+	memcpy(cmd + 2, data, seg_size);
+
+	skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd,
+				HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		BT_ERR("%s: Failed to send TLV segment (%d)", hdev->name, err);
+		return err;
+	}
+
+	if (skb->len != sizeof(*edl) + sizeof(*tlv_resp)) {
+		BT_ERR("%s: TLV response size mismatch", hdev->name);
+		err = -EILSEQ;
+		goto out;
+	}
+
+	edl = (struct edl_event_hdr *)(skb->data);
+	if (!edl || !edl->data) {
+		BT_ERR("%s: TLV with no header or no data", hdev->name);
+		err = -EILSEQ;
+		goto out;
+	}
+
+	tlv_resp = (struct tlv_seg_resp *)(edl->data);
+
+	if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+	    edl->rtype != EDL_TVL_DNLD_RES_EVT || tlv_resp->result != 0x00) {
+		BT_ERR("%s: TLV with error stat 0x%x rtype 0x%x (0x%x)",
+		       hdev->name, edl->cresp, edl->rtype, tlv_resp->result);
+		err = -EIO;
+	}
+
+out:
+	kfree_skb(skb);
+
+	return err;
+}
+
+static int rome_tlv_download_request(struct hci_dev *hdev,
+				     const struct firmware *fw)
+{
+	const u8 *buffer, *data;
+	int total_segment, remain_size;
+	int ret, i;
+
+	if (!fw || !fw->data)
+		return -EINVAL;
+
+	total_segment = fw->size / MAX_SIZE_PER_TLV_SEGMENT;
+	remain_size = fw->size % MAX_SIZE_PER_TLV_SEGMENT;
+
+	BT_DBG("%s: Total segment num %d remain size %d total size %zu",
+	       hdev->name, total_segment, remain_size, fw->size);
+
+	data = fw->data;
+	for (i = 0; i < total_segment; i++) {
+		buffer = data + i * MAX_SIZE_PER_TLV_SEGMENT;
+		ret = rome_tlv_send_segment(hdev, i, MAX_SIZE_PER_TLV_SEGMENT,
+					    buffer);
+		if (ret < 0)
+			return -EIO;
+	}
+
+	if (remain_size) {
+		buffer = data + total_segment * MAX_SIZE_PER_TLV_SEGMENT;
+		ret = rome_tlv_send_segment(hdev, total_segment, remain_size,
+					    buffer);
+		if (ret < 0)
+			return -EIO;
+	}
+
+	return 0;
+}
+
+static int rome_download_firmware(struct hci_dev *hdev,
+				  struct rome_config *config)
+{
+	const struct firmware *fw;
+	int ret;
+
+	BT_INFO("%s: ROME Downloading %s", hdev->name, config->fwname);
+
+	ret = request_firmware(&fw, config->fwname, &hdev->dev);
+	if (ret) {
+		BT_ERR("%s: Failed to request file: %s (%d)", hdev->name,
+		       config->fwname, ret);
+		return ret;
+	}
+
+	rome_tlv_check_data(config, fw);
+
+	ret = rome_tlv_download_request(hdev, fw);
+	if (ret) {
+		BT_ERR("%s: Failed to download file: %s (%d)", hdev->name,
+		       config->fwname, ret);
+	}
+
+	release_firmware(fw);
+
+	return ret;
+}
+
+int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+	struct sk_buff *skb;
+	u8 cmd[9];
+	int err;
+
+	cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD;
+	cmd[1] = 0x02; 			/* TAG ID */
+	cmd[2] = sizeof(bdaddr_t);	/* size */
+	memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
+	skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd,
+				HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		BT_ERR("%s: Change address command failed (%d)",
+		       hdev->name, err);
+		return err;
+	}
+
+	kfree_skb(skb);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
+
+int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate)
+{
+	u32 rome_ver = 0;
+	struct rome_config config;
+	int err;
+
+	BT_DBG("%s: ROME setup on UART", hdev->name);
+
+	config.user_baud_rate = baudrate;
+
+	/* Get ROME version information */
+	err = rome_patch_ver_req(hdev, &rome_ver);
+	if (err < 0 || rome_ver == 0) {
+		BT_ERR("%s: Failed to get version 0x%x", hdev->name, err);
+		return err;
+	}
+
+	BT_INFO("%s: ROME controller version 0x%08x", hdev->name, rome_ver);
+
+	/* Download rampatch file */
+	config.type = TLV_TYPE_PATCH;
+	snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin",
+		 rome_ver);
+	err = rome_download_firmware(hdev, &config);
+	if (err < 0) {
+		BT_ERR("%s: Failed to download patch (%d)", hdev->name, err);
+		return err;
+	}
+
+	/* Download NVM configuration */
+	config.type = TLV_TYPE_NVM;
+	snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin",
+		 rome_ver);
+	err = rome_download_firmware(hdev, &config);
+	if (err < 0) {
+		BT_ERR("%s: Failed to download NVM (%d)", hdev->name, err);
+		return err;
+	}
+
+	/* Perform HCI reset */
+	err = rome_reset(hdev);
+	if (err < 0) {
+		BT_ERR("%s: Failed to run HCI_RESET (%d)", hdev->name, err);
+		return err;
+	}
+
+	BT_INFO("%s: ROME setup on UART is completed", hdev->name);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qca_uart_setup_rome);
+
+MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
+MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
new file mode 100644
index 0000000..65e994b
--- /dev/null
+++ b/drivers/bluetooth/btqca.h
@@ -0,0 +1,135 @@
+/*
+ *  Bluetooth supports for Qualcomm Atheros ROME chips
+ *
+ *  Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#define EDL_PATCH_CMD_OPCODE		(0xFC00)
+#define EDL_NVM_ACCESS_OPCODE		(0xFC0B)
+#define EDL_PATCH_CMD_LEN		(1)
+#define EDL_PATCH_VER_REQ_CMD		(0x19)
+#define EDL_PATCH_TLV_REQ_CMD		(0x1E)
+#define EDL_NVM_ACCESS_SET_REQ_CMD	(0x01)
+#define MAX_SIZE_PER_TLV_SEGMENT	(243)
+
+#define EDL_CMD_REQ_RES_EVT		(0x00)
+#define EDL_PATCH_VER_RES_EVT		(0x19)
+#define EDL_APP_VER_RES_EVT		(0x02)
+#define EDL_TVL_DNLD_RES_EVT		(0x04)
+#define EDL_CMD_EXE_STATUS_EVT		(0x00)
+#define EDL_SET_BAUDRATE_RSP_EVT	(0x92)
+#define EDL_NVM_ACCESS_CODE_EVT		(0x0B)
+
+#define EDL_TAG_ID_HCI			(17)
+#define EDL_TAG_ID_DEEP_SLEEP		(27)
+
+enum qca_bardrate {
+	QCA_BAUDRATE_115200 	= 0,
+	QCA_BAUDRATE_57600,
+	QCA_BAUDRATE_38400,
+	QCA_BAUDRATE_19200,
+	QCA_BAUDRATE_9600,
+	QCA_BAUDRATE_230400,
+	QCA_BAUDRATE_250000,
+	QCA_BAUDRATE_460800,
+	QCA_BAUDRATE_500000,
+	QCA_BAUDRATE_720000,
+	QCA_BAUDRATE_921600,
+	QCA_BAUDRATE_1000000,
+	QCA_BAUDRATE_1250000,
+	QCA_BAUDRATE_2000000,
+	QCA_BAUDRATE_3000000,
+	QCA_BAUDRATE_4000000,
+	QCA_BAUDRATE_1600000,
+	QCA_BAUDRATE_3200000,
+	QCA_BAUDRATE_3500000,
+	QCA_BAUDRATE_AUTO 	= 0xFE,
+	QCA_BAUDRATE_RESERVED
+};
+
+enum rome_tlv_type {
+	TLV_TYPE_PATCH = 1,
+	TLV_TYPE_NVM
+};
+
+struct rome_config {
+	u8 type;
+	char fwname[64];
+	uint8_t user_baud_rate;
+};
+
+struct edl_event_hdr {
+	__u8 cresp;
+	__u8 rtype;
+	__u8 data[0];
+} __packed;
+
+struct rome_version {
+	__le32 product_id;
+	__le16 patch_ver;
+	__le16 rome_ver;
+	__le32 soc_id;
+} __packed;
+
+struct tlv_seg_resp {
+	__u8 result;
+} __packed;
+
+struct tlv_type_patch {
+	__le32 total_size;
+	__le32 data_length;
+	__u8   format_version;
+	__u8   signature;
+	__le16 reserved1;
+	__le16 product_id;
+	__le16 rom_build;
+	__le16 patch_version;
+	__le16 reserved2;
+	__le32 entry;
+} __packed;
+
+struct tlv_type_nvm {
+	__le16 tag_id;
+	__le16 tag_len;
+	__le32 reserve1;
+	__le32 reserve2;
+	__u8   data[0];
+} __packed;
+
+struct tlv_type_hdr {
+	__le32 type_len;
+	__u8   data[0];
+} __packed;
+
+#if IS_ENABLED(CONFIG_BT_QCA)
+
+int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+int qca_uart_setup_rome(struct hci_dev *hdev, uint8_t baudrate);
+
+#else
+
+static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int qca_uart_setup_rome(struct hci_dev *hdev, int speed)
+{
+	return -EOPNOTSUPP;
+}
+
+#endif
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index b4cf8d9..b6aceaf 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -68,6 +68,9 @@
 	/* Generic Bluetooth AMP device */
 	{ USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP },
 
+	/* Generic Bluetooth USB interface */
+	{ USB_INTERFACE_INFO(0xe0, 0x01, 0x01) },
+
 	/* Apple-specific (Broadcom) devices */
 	{ USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01),
 	  .driver_info = BTUSB_BCM_APPLE },
@@ -319,6 +322,9 @@
 	{ USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
 	{ USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
 
+	/* Silicon Wave based devices */
+	{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
+
 	{ }	/* Terminating entry */
 };
 
@@ -1575,7 +1581,7 @@
 
 	/* fw_patch_num indicates the version of patch the device currently
 	 * have. If there is no patch data in the device, it is always 0x00.
-	 * So, if it is other than 0x00, no need to patch the deivce again.
+	 * So, if it is other than 0x00, no need to patch the device again.
 	 */
 	if (ver->fw_patch_num) {
 		BT_INFO("%s: Intel device is already patched. patch num: %02x",
@@ -1878,51 +1884,6 @@
 	return -EILSEQ;
 }
 
-static int btusb_intel_secure_send(struct hci_dev *hdev, u8 fragment_type,
-				   u32 plen, const void *param)
-{
-	while (plen > 0) {
-		struct sk_buff *skb;
-		u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
-
-		cmd_param[0] = fragment_type;
-		memcpy(cmd_param + 1, param, fragment_len);
-
-		skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
-				     cmd_param, HCI_INIT_TIMEOUT);
-		if (IS_ERR(skb))
-			return PTR_ERR(skb);
-
-		kfree_skb(skb);
-
-		plen -= fragment_len;
-		param += fragment_len;
-	}
-
-	return 0;
-}
-
-static void btusb_intel_version_info(struct hci_dev *hdev,
-				     struct intel_version *ver)
-{
-	const char *variant;
-
-	switch (ver->fw_variant) {
-	case 0x06:
-		variant = "Bootloader";
-		break;
-	case 0x23:
-		variant = "Firmware";
-		break;
-	default:
-		return;
-	}
-
-	BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
-		variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
-		ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
-}
-
 static int btusb_setup_intel_new(struct hci_dev *hdev)
 {
 	static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
@@ -1984,7 +1945,7 @@
 		return -EINVAL;
 	}
 
-	btusb_intel_version_info(hdev, ver);
+	btintel_version_info(hdev, ver);
 
 	/* The firmware variant determines if the device is in bootloader
 	 * mode or is running operational firmware. The value 0x06 identifies
@@ -2104,7 +2065,7 @@
 	/* Start the firmware download transaction with the Init fragment
 	 * represented by the 128 bytes of CSS header.
 	 */
-	err = btusb_intel_secure_send(hdev, 0x00, 128, fw->data);
+	err = btintel_secure_send(hdev, 0x00, 128, fw->data);
 	if (err < 0) {
 		BT_ERR("%s: Failed to send firmware header (%d)",
 		       hdev->name, err);
@@ -2114,7 +2075,7 @@
 	/* Send the 256 bytes of public key information from the firmware
 	 * as the PKey fragment.
 	 */
-	err = btusb_intel_secure_send(hdev, 0x03, 256, fw->data + 128);
+	err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
 	if (err < 0) {
 		BT_ERR("%s: Failed to send firmware public key (%d)",
 		       hdev->name, err);
@@ -2124,7 +2085,7 @@
 	/* Send the 256 bytes of signature information from the firmware
 	 * as the Sign fragment.
 	 */
-	err = btusb_intel_secure_send(hdev, 0x02, 256, fw->data + 388);
+	err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
 	if (err < 0) {
 		BT_ERR("%s: Failed to send firmware signature (%d)",
 		       hdev->name, err);
@@ -2139,7 +2100,7 @@
 
 		frag_len += sizeof(*cmd) + cmd->plen;
 
-		/* The paramter length of the secure send command requires
+		/* The parameter length of the secure send command requires
 		 * a 4 byte alignment. It happens so that the firmware file
 		 * contains proper Intel_NOP commands to align the fragments
 		 * as needed.
@@ -2148,8 +2109,7 @@
 		 * firmware data buffer as a single Data fragement.
 		 */
 		if (!(frag_len % 4)) {
-			err = btusb_intel_secure_send(hdev, 0x01, frag_len,
-						      fw_ptr);
+			err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
 			if (err < 0) {
 				BT_ERR("%s: Failed to send firmware data (%d)",
 				       hdev->name, err);
@@ -2291,39 +2251,6 @@
 	return 0;
 }
 
-static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
-{
-	struct sk_buff *skb;
-	u8 type = 0x00;
-
-	BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
-
-	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
-	if (IS_ERR(skb)) {
-		BT_ERR("%s: Reset after hardware error failed (%ld)",
-		       hdev->name, PTR_ERR(skb));
-		return;
-	}
-	kfree_skb(skb);
-
-	skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
-	if (IS_ERR(skb)) {
-		BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
-		       hdev->name, PTR_ERR(skb));
-		return;
-	}
-
-	if (skb->len != 13) {
-		BT_ERR("%s: Exception info size mismatch", hdev->name);
-		kfree_skb(skb);
-		return;
-	}
-
-	BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
-
-	kfree_skb(skb);
-}
-
 static int btusb_shutdown_intel(struct hci_dev *hdev)
 {
 	struct sk_buff *skb;
@@ -2783,7 +2710,7 @@
 	if (id->driver_info & BTUSB_INTEL_NEW) {
 		hdev->send = btusb_send_frame_intel;
 		hdev->setup = btusb_setup_intel_new;
-		hdev->hw_error = btusb_hw_error_intel;
+		hdev->hw_error = btintel_hw_error;
 		hdev->set_bdaddr = btintel_set_bdaddr;
 		set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
 	}
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 78e10f0..84135c5 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -182,9 +182,9 @@
 	int i;
 
 	printk(KERN_INFO "Bluetooth: Nokia control data =");
-	for (i = 0; i < skb->len; i++) {
+	for (i = 0; i < skb->len; i++)
 		printk(" %02x", skb->data[i]);
-	}
+
 	printk("\n");
 
 	/* transition to active state */
@@ -406,7 +406,7 @@
 		break;
 	default:
 		return -EILSEQ;
-	};
+	}
 
 	nsh.zero = 0;
 	nsh.len = skb->len;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 23523e1..835bfab 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -25,6 +25,12 @@
 #include <linux/errno.h>
 #include <linux/skbuff.h>
 #include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/tty.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -32,11 +38,37 @@
 #include "btbcm.h"
 #include "hci_uart.h"
 
-struct bcm_data {
-	struct sk_buff *rx_skb;
-	struct sk_buff_head txq;
+struct bcm_device {
+	struct list_head	list;
+
+	struct platform_device	*pdev;
+
+	const char		*name;
+	struct gpio_desc	*device_wakeup;
+	struct gpio_desc	*shutdown;
+
+	struct clk		*clk;
+	bool			clk_enabled;
+
+	u32			init_speed;
+
+#ifdef CONFIG_PM_SLEEP
+	struct hci_uart		*hu;
+	bool			is_suspended; /* suspend/resume flag */
+#endif
 };
 
+struct bcm_data {
+	struct sk_buff		*rx_skb;
+	struct sk_buff_head	txq;
+
+	struct bcm_device	*dev;
+};
+
+/* List of BCM BT UART devices */
+static DEFINE_SPINLOCK(bcm_device_lock);
+static LIST_HEAD(bcm_device_list);
+
 static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
 {
 	struct hci_dev *hdev = hu->hdev;
@@ -86,9 +118,41 @@
 	return 0;
 }
 
+/* bcm_device_exists should be protected by bcm_device_lock */
+static bool bcm_device_exists(struct bcm_device *device)
+{
+	struct list_head *p;
+
+	list_for_each(p, &bcm_device_list) {
+		struct bcm_device *dev = list_entry(p, struct bcm_device, list);
+
+		if (device == dev)
+			return true;
+	}
+
+	return false;
+}
+
+static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
+{
+	if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
+		clk_enable(dev->clk);
+
+	gpiod_set_value(dev->shutdown, powered);
+	gpiod_set_value(dev->device_wakeup, powered);
+
+	if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled)
+		clk_disable(dev->clk);
+
+	dev->clk_enabled = powered;
+
+	return 0;
+}
+
 static int bcm_open(struct hci_uart *hu)
 {
 	struct bcm_data *bcm;
+	struct list_head *p;
 
 	BT_DBG("hu %p", hu);
 
@@ -99,6 +163,30 @@
 	skb_queue_head_init(&bcm->txq);
 
 	hu->priv = bcm;
+
+	spin_lock(&bcm_device_lock);
+	list_for_each(p, &bcm_device_list) {
+		struct bcm_device *dev = list_entry(p, struct bcm_device, list);
+
+		/* Retrieve saved bcm_device based on parent of the
+		 * platform device (saved during device probe) and
+		 * parent of tty device used by hci_uart
+		 */
+		if (hu->tty->dev->parent == dev->pdev->dev.parent) {
+			bcm->dev = dev;
+			hu->init_speed = dev->init_speed;
+#ifdef CONFIG_PM_SLEEP
+			dev->hu = hu;
+#endif
+			break;
+		}
+	}
+
+	if (bcm->dev)
+		bcm_gpio_set_power(bcm->dev, true);
+
+	spin_unlock(&bcm_device_lock);
+
 	return 0;
 }
 
@@ -108,6 +196,16 @@
 
 	BT_DBG("hu %p", hu);
 
+	/* Protect bcm->dev against removal of the device or driver */
+	spin_lock(&bcm_device_lock);
+	if (bcm_device_exists(bcm->dev)) {
+		bcm_gpio_set_power(bcm->dev, false);
+#ifdef CONFIG_PM_SLEEP
+		bcm->dev->hu = NULL;
+#endif
+	}
+	spin_unlock(&bcm_device_lock);
+
 	skb_queue_purge(&bcm->txq);
 	kfree_skb(bcm->rx_skb);
 	kfree(bcm);
@@ -232,6 +330,204 @@
 	return skb_dequeue(&bcm->txq);
 }
 
+#ifdef CONFIG_PM_SLEEP
+/* Platform suspend callback */
+static int bcm_suspend(struct device *dev)
+{
+	struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+	BT_DBG("suspend (%p): is_suspended %d", bdev, bdev->is_suspended);
+
+	spin_lock(&bcm_device_lock);
+
+	if (!bdev->hu)
+		goto unlock;
+
+	if (!bdev->is_suspended) {
+		hci_uart_set_flow_control(bdev->hu, true);
+
+		/* Once this callback returns, driver suspends BT via GPIO */
+		bdev->is_suspended = true;
+	}
+
+	/* Suspend the device */
+	if (bdev->device_wakeup) {
+		gpiod_set_value(bdev->device_wakeup, false);
+		BT_DBG("suspend, delaying 15 ms");
+		mdelay(15);
+	}
+
+unlock:
+	spin_unlock(&bcm_device_lock);
+
+	return 0;
+}
+
+/* Platform resume callback */
+static int bcm_resume(struct device *dev)
+{
+	struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev));
+
+	BT_DBG("resume (%p): is_suspended %d", bdev, bdev->is_suspended);
+
+	spin_lock(&bcm_device_lock);
+
+	if (!bdev->hu)
+		goto unlock;
+
+	if (bdev->device_wakeup) {
+		gpiod_set_value(bdev->device_wakeup, true);
+		BT_DBG("resume, delaying 15 ms");
+		mdelay(15);
+	}
+
+	/* When this callback executes, the device has woken up already */
+	if (bdev->is_suspended) {
+		bdev->is_suspended = false;
+
+		hci_uart_set_flow_control(bdev->hu, false);
+	}
+
+unlock:
+	spin_unlock(&bcm_device_lock);
+
+	return 0;
+}
+#endif
+
+static const struct acpi_gpio_params device_wakeup_gpios = { 0, 0, false };
+static const struct acpi_gpio_params shutdown_gpios = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_bcm_default_gpios[] = {
+	{ "device-wakeup-gpios", &device_wakeup_gpios, 1 },
+	{ "shutdown-gpios", &shutdown_gpios, 1 },
+	{ },
+};
+
+#ifdef CONFIG_ACPI
+static int bcm_resource(struct acpi_resource *ares, void *data)
+{
+	struct bcm_device *dev = data;
+
+	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+		struct acpi_resource_uart_serialbus *sb;
+
+		sb = &ares->data.uart_serial_bus;
+		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART)
+			dev->init_speed = sb->default_baud_rate;
+	}
+
+	/* Always tell the ACPI core to skip this resource */
+	return 1;
+}
+
+static int bcm_acpi_probe(struct bcm_device *dev)
+{
+	struct platform_device *pdev = dev->pdev;
+	const struct acpi_device_id *id;
+	struct acpi_device *adev;
+	LIST_HEAD(resources);
+	int ret;
+
+	id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
+	if (!id)
+		return -ENODEV;
+
+	/* Retrieve GPIO data */
+	dev->name = dev_name(&pdev->dev);
+	ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
+					acpi_bcm_default_gpios);
+	if (ret)
+		return ret;
+
+	dev->clk = devm_clk_get(&pdev->dev, NULL);
+
+	dev->device_wakeup = devm_gpiod_get_optional(&pdev->dev,
+						     "device-wakeup",
+						     GPIOD_OUT_LOW);
+	if (IS_ERR(dev->device_wakeup))
+		return PTR_ERR(dev->device_wakeup);
+
+	dev->shutdown = devm_gpiod_get_optional(&pdev->dev, "shutdown",
+						GPIOD_OUT_LOW);
+	if (IS_ERR(dev->shutdown))
+		return PTR_ERR(dev->shutdown);
+
+	/* Make sure at-least one of the GPIO is defined and that
+	 * a name is specified for this instance
+	 */
+	if ((!dev->device_wakeup && !dev->shutdown) || !dev->name) {
+		dev_err(&pdev->dev, "invalid platform data\n");
+		return -EINVAL;
+	}
+
+	/* Retrieve UART ACPI info */
+	adev = ACPI_COMPANION(&dev->pdev->dev);
+	if (!adev)
+		return 0;
+
+	acpi_dev_get_resources(adev, &resources, bcm_resource, dev);
+
+	return 0;
+}
+#else
+static int bcm_acpi_probe(struct bcm_device *dev)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_ACPI */
+
+static int bcm_probe(struct platform_device *pdev)
+{
+	struct bcm_device *dev;
+	struct acpi_device_id *pdata = pdev->dev.platform_data;
+	int ret;
+
+	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->pdev = pdev;
+
+	if (ACPI_HANDLE(&pdev->dev)) {
+		ret = bcm_acpi_probe(dev);
+		if (ret)
+			return ret;
+	} else if (pdata) {
+		dev->name = pdata->id;
+	} else {
+		return -ENODEV;
+	}
+
+	platform_set_drvdata(pdev, dev);
+
+	dev_info(&pdev->dev, "%s device registered.\n", dev->name);
+
+	/* Place this instance on the device list */
+	spin_lock(&bcm_device_lock);
+	list_add_tail(&dev->list, &bcm_device_list);
+	spin_unlock(&bcm_device_lock);
+
+	bcm_gpio_set_power(dev, false);
+
+	return 0;
+}
+
+static int bcm_remove(struct platform_device *pdev)
+{
+	struct bcm_device *dev = platform_get_drvdata(pdev);
+
+	spin_lock(&bcm_device_lock);
+	list_del(&dev->list);
+	spin_unlock(&bcm_device_lock);
+
+	acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev));
+
+	dev_info(&pdev->dev, "%s device unregistered.\n", dev->name);
+
+	return 0;
+}
+
 static const struct hci_uart_proto bcm_proto = {
 	.id		= HCI_UART_BCM,
 	.name		= "BCM",
@@ -247,12 +543,38 @@
 	.dequeue	= bcm_dequeue,
 };
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id bcm_acpi_match[] = {
+	{ "BCM2E39", 0 },
+	{ "BCM2E67", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, bcm_acpi_match);
+#endif
+
+/* Platform suspend and resume callbacks */
+static SIMPLE_DEV_PM_OPS(bcm_pm_ops, bcm_suspend, bcm_resume);
+
+static struct platform_driver bcm_driver = {
+	.probe = bcm_probe,
+	.remove = bcm_remove,
+	.driver = {
+		.name = "hci_bcm",
+		.acpi_match_table = ACPI_PTR(bcm_acpi_match),
+		.pm = &bcm_pm_ops,
+	},
+};
+
 int __init bcm_init(void)
 {
+	platform_driver_register(&bcm_driver);
+
 	return hci_uart_register_proto(&bcm_proto);
 }
 
 int __exit bcm_deinit(void)
 {
+	platform_driver_unregister(&bcm_driver);
+
 	return hci_uart_unregister_proto(&bcm_proto);
 }
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 57faddc..eec3f28 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -223,8 +223,7 @@
 			switch ((&pkts[i])->lsize) {
 			case 0:
 				/* No variable data length */
-				(&pkts[i])->recv(hdev, skb);
-				skb = NULL;
+				dlen = 0;
 				break;
 			case 1:
 				/* Single octet variable length */
@@ -252,6 +251,12 @@
 				kfree_skb(skb);
 				return ERR_PTR(-EILSEQ);
 			}
+
+			if (!dlen) {
+				/* No more data, complete frame */
+				(&pkts[i])->recv(hdev, skb);
+				skb = NULL;
+			}
 		} else {
 			/* Complete frame */
 			(&pkts[i])->recv(hdev, skb);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 3455cec..b35b238 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -75,7 +75,7 @@
 	size_t			rx_pending;	/* Expecting more bytes */
 	u8			rx_ack;		/* Last ack number received */
 
-	int			(*rx_func) (struct hci_uart *hu, u8 c);
+	int			(*rx_func)(struct hci_uart *hu, u8 c);
 
 	struct timer_list	timer;		/* Retransmission timer */
 
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 5dd07bf..cf07d11 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -24,8 +24,864 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/tty.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/acpi.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
 #include "hci_uart.h"
+#include "btintel.h"
+
+#define STATE_BOOTLOADER	0
+#define STATE_DOWNLOADING	1
+#define STATE_FIRMWARE_LOADED	2
+#define STATE_FIRMWARE_FAILED	3
+#define STATE_BOOTING		4
+
+struct intel_device {
+	struct list_head list;
+	struct platform_device *pdev;
+	struct gpio_desc *reset;
+};
+
+static LIST_HEAD(intel_device_list);
+static DEFINE_SPINLOCK(intel_device_list_lock);
+
+struct intel_data {
+	struct sk_buff *rx_skb;
+	struct sk_buff_head txq;
+	unsigned long flags;
+};
+
+static u8 intel_convert_speed(unsigned int speed)
+{
+	switch (speed) {
+	case 9600:
+		return 0x00;
+	case 19200:
+		return 0x01;
+	case 38400:
+		return 0x02;
+	case 57600:
+		return 0x03;
+	case 115200:
+		return 0x04;
+	case 230400:
+		return 0x05;
+	case 460800:
+		return 0x06;
+	case 921600:
+		return 0x07;
+	case 1843200:
+		return 0x08;
+	case 3250000:
+		return 0x09;
+	case 2000000:
+		return 0x0a;
+	case 3000000:
+		return 0x0b;
+	default:
+		return 0xff;
+	}
+}
+
+static int intel_wait_booting(struct hci_uart *hu)
+{
+	struct intel_data *intel = hu->priv;
+	int err;
+
+	err = wait_on_bit_timeout(&intel->flags, STATE_BOOTING,
+				  TASK_INTERRUPTIBLE,
+				  msecs_to_jiffies(1000));
+
+	if (err == 1) {
+		BT_ERR("%s: Device boot interrupted", hu->hdev->name);
+		return -EINTR;
+	}
+
+	if (err) {
+		BT_ERR("%s: Device boot timeout", hu->hdev->name);
+		return -ETIMEDOUT;
+	}
+
+	return err;
+}
+
+static int intel_set_power(struct hci_uart *hu, bool powered)
+{
+	struct list_head *p;
+	int err = -ENODEV;
+
+	spin_lock(&intel_device_list_lock);
+
+	list_for_each(p, &intel_device_list) {
+		struct intel_device *idev = list_entry(p, struct intel_device,
+						       list);
+
+		/* tty device and pdev device should share the same parent
+		 * which is the UART port.
+		 */
+		if (hu->tty->dev->parent != idev->pdev->dev.parent)
+			continue;
+
+		if (!idev->reset) {
+			err = -ENOTSUPP;
+			break;
+		}
+
+		BT_INFO("hu %p, Switching compatible pm device (%s) to %u",
+			hu, dev_name(&idev->pdev->dev), powered);
+
+		gpiod_set_value(idev->reset, powered);
+	}
+
+	spin_unlock(&intel_device_list_lock);
+
+	return err;
+}
+
+static int intel_open(struct hci_uart *hu)
+{
+	struct intel_data *intel;
+
+	BT_DBG("hu %p", hu);
+
+	intel = kzalloc(sizeof(*intel), GFP_KERNEL);
+	if (!intel)
+		return -ENOMEM;
+
+	skb_queue_head_init(&intel->txq);
+
+	hu->priv = intel;
+
+	if (!intel_set_power(hu, true))
+		set_bit(STATE_BOOTING, &intel->flags);
+
+	return 0;
+}
+
+static int intel_close(struct hci_uart *hu)
+{
+	struct intel_data *intel = hu->priv;
+
+	BT_DBG("hu %p", hu);
+
+	intel_set_power(hu, false);
+
+	skb_queue_purge(&intel->txq);
+	kfree_skb(intel->rx_skb);
+	kfree(intel);
+
+	hu->priv = NULL;
+	return 0;
+}
+
+static int intel_flush(struct hci_uart *hu)
+{
+	struct intel_data *intel = hu->priv;
+
+	BT_DBG("hu %p", hu);
+
+	skb_queue_purge(&intel->txq);
+
+	return 0;
+}
+
+static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
+{
+	struct sk_buff *skb;
+	struct hci_event_hdr *hdr;
+	struct hci_ev_cmd_complete *evt;
+
+	skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
+	hdr->evt = HCI_EV_CMD_COMPLETE;
+	hdr->plen = sizeof(*evt) + 1;
+
+	evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
+	evt->ncmd = 0x01;
+	evt->opcode = cpu_to_le16(opcode);
+
+	*skb_put(skb, 1) = 0x00;
+
+	bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+
+	return hci_recv_frame(hdev, skb);
+}
+
+static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed)
+{
+	struct intel_data *intel = hu->priv;
+	struct hci_dev *hdev = hu->hdev;
+	u8 speed_cmd[] = { 0x06, 0xfc, 0x01, 0x00 };
+	struct sk_buff *skb;
+	int err;
+
+	/* This can be the first command sent to the chip, check
+	 * that the controller is ready.
+	 */
+	err = intel_wait_booting(hu);
+
+	clear_bit(STATE_BOOTING, &intel->flags);
+
+	/* In case of timeout, try to continue anyway */
+	if (err && err != ETIMEDOUT)
+		return err;
+
+	BT_INFO("%s: Change controller speed to %d", hdev->name, speed);
+
+	speed_cmd[3] = intel_convert_speed(speed);
+	if (speed_cmd[3] == 0xff) {
+		BT_ERR("%s: Unsupported speed", hdev->name);
+		return -EINVAL;
+	}
+
+	/* Device will not accept speed change if Intel version has not been
+	 * previously requested.
+	 */
+	skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: Reading Intel version information failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return PTR_ERR(skb);
+	}
+	kfree_skb(skb);
+
+	skb = bt_skb_alloc(sizeof(speed_cmd), GFP_KERNEL);
+	if (!skb) {
+		BT_ERR("%s: Failed to allocate memory for baudrate packet",
+		       hdev->name);
+		return -ENOMEM;
+	}
+
+	memcpy(skb_put(skb, sizeof(speed_cmd)), speed_cmd, sizeof(speed_cmd));
+	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+
+	hci_uart_set_flow_control(hu, true);
+
+	skb_queue_tail(&intel->txq, skb);
+	hci_uart_tx_wakeup(hu);
+
+	/* wait 100ms to change baudrate on controller side */
+	msleep(100);
+
+	hci_uart_set_baudrate(hu, speed);
+	hci_uart_set_flow_control(hu, false);
+
+	return 0;
+}
+
+static int intel_setup(struct hci_uart *hu)
+{
+	static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
+					  0x00, 0x08, 0x04, 0x00 };
+	struct intel_data *intel = hu->priv;
+	struct hci_dev *hdev = hu->hdev;
+	struct sk_buff *skb;
+	struct intel_version *ver;
+	struct intel_boot_params *params;
+	const struct firmware *fw;
+	const u8 *fw_ptr;
+	char fwname[64];
+	u32 frag_len;
+	ktime_t calltime, delta, rettime;
+	unsigned long long duration;
+	unsigned int init_speed, oper_speed;
+	int speed_change = 0;
+	int err;
+
+	BT_DBG("%s", hdev->name);
+
+	hu->hdev->set_bdaddr = btintel_set_bdaddr;
+
+	calltime = ktime_get();
+
+	if (hu->init_speed)
+		init_speed = hu->init_speed;
+	else
+		init_speed = hu->proto->init_speed;
+
+	if (hu->oper_speed)
+		oper_speed = hu->oper_speed;
+	else
+		oper_speed = hu->proto->oper_speed;
+
+	if (oper_speed && init_speed && oper_speed != init_speed)
+		speed_change = 1;
+
+	/* Check that the controller is ready */
+	err = intel_wait_booting(hu);
+
+	clear_bit(STATE_BOOTING, &intel->flags);
+
+	/* In case of timeout, try to continue anyway */
+	if (err && err != ETIMEDOUT)
+		return err;
+
+	set_bit(STATE_BOOTLOADER, &intel->flags);
+
+	/* Read the Intel version information to determine if the device
+	 * is in bootloader mode or if it already has operational firmware
+	 * loaded.
+	 */
+	skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: Reading Intel version information failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return PTR_ERR(skb);
+	}
+
+	if (skb->len != sizeof(*ver)) {
+		BT_ERR("%s: Intel version event size mismatch", hdev->name);
+		kfree_skb(skb);
+		return -EILSEQ;
+	}
+
+	ver = (struct intel_version *)skb->data;
+	if (ver->status) {
+		BT_ERR("%s: Intel version command failure (%02x)",
+		       hdev->name, ver->status);
+		err = -bt_to_errno(ver->status);
+		kfree_skb(skb);
+		return err;
+	}
+
+	/* The hardware platform number has a fixed value of 0x37 and
+	 * for now only accept this single value.
+	 */
+	if (ver->hw_platform != 0x37) {
+		BT_ERR("%s: Unsupported Intel hardware platform (%u)",
+		       hdev->name, ver->hw_platform);
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	/* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
+	 * supported by this firmware loading method. This check has been
+	 * put in place to ensure correct forward compatibility options
+	 * when newer hardware variants come along.
+	 */
+	if (ver->hw_variant != 0x0b) {
+		BT_ERR("%s: Unsupported Intel hardware variant (%u)",
+		       hdev->name, ver->hw_variant);
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	btintel_version_info(hdev, ver);
+
+	/* The firmware variant determines if the device is in bootloader
+	 * mode or is running operational firmware. The value 0x06 identifies
+	 * the bootloader and the value 0x23 identifies the operational
+	 * firmware.
+	 *
+	 * When the operational firmware is already present, then only
+	 * the check for valid Bluetooth device address is needed. This
+	 * determines if the device will be added as configured or
+	 * unconfigured controller.
+	 *
+	 * It is not possible to use the Secure Boot Parameters in this
+	 * case since that command is only available in bootloader mode.
+	 */
+	if (ver->fw_variant == 0x23) {
+		kfree_skb(skb);
+		clear_bit(STATE_BOOTLOADER, &intel->flags);
+		btintel_check_bdaddr(hdev);
+		return 0;
+	}
+
+	/* If the device is not in bootloader mode, then the only possible
+	 * choice is to return an error and abort the device initialization.
+	 */
+	if (ver->fw_variant != 0x06) {
+		BT_ERR("%s: Unsupported Intel firmware variant (%u)",
+		       hdev->name, ver->fw_variant);
+		kfree_skb(skb);
+		return -ENODEV;
+	}
+
+	kfree_skb(skb);
+
+	/* Read the secure boot parameters to identify the operating
+	 * details of the bootloader.
+	 */
+	skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb)) {
+		BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
+		       hdev->name, PTR_ERR(skb));
+		return PTR_ERR(skb);
+	}
+
+	if (skb->len != sizeof(*params)) {
+		BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
+		kfree_skb(skb);
+		return -EILSEQ;
+	}
+
+	params = (struct intel_boot_params *)skb->data;
+	if (params->status) {
+		BT_ERR("%s: Intel boot parameters command failure (%02x)",
+		       hdev->name, params->status);
+		err = -bt_to_errno(params->status);
+		kfree_skb(skb);
+		return err;
+	}
+
+	BT_INFO("%s: Device revision is %u", hdev->name,
+		le16_to_cpu(params->dev_revid));
+
+	BT_INFO("%s: Secure boot is %s", hdev->name,
+		params->secure_boot ? "enabled" : "disabled");
+
+	BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
+		params->min_fw_build_nn, params->min_fw_build_cw,
+		2000 + params->min_fw_build_yy);
+
+	/* It is required that every single firmware fragment is acknowledged
+	 * with a command complete event. If the boot parameters indicate
+	 * that this bootloader does not send them, then abort the setup.
+	 */
+	if (params->limited_cce != 0x00) {
+		BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
+		       hdev->name, params->limited_cce);
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	/* If the OTP has no valid Bluetooth device address, then there will
+	 * also be no valid address for the operational firmware.
+	 */
+	if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+		BT_INFO("%s: No device address configured", hdev->name);
+		set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+	}
+
+	/* With this Intel bootloader only the hardware variant and device
+	 * revision information are used to select the right firmware.
+	 *
+	 * Currently this bootloader support is limited to hardware variant
+	 * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b).
+	 */
+	snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi",
+		 le16_to_cpu(params->dev_revid));
+
+	err = request_firmware(&fw, fwname, &hdev->dev);
+	if (err < 0) {
+		BT_ERR("%s: Failed to load Intel firmware file (%d)",
+		       hdev->name, err);
+		kfree_skb(skb);
+		return err;
+	}
+
+	BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
+
+	kfree_skb(skb);
+
+	if (fw->size < 644) {
+		BT_ERR("%s: Invalid size of firmware file (%zu)",
+		       hdev->name, fw->size);
+		err = -EBADF;
+		goto done;
+	}
+
+	set_bit(STATE_DOWNLOADING, &intel->flags);
+
+	/* Start the firmware download transaction with the Init fragment
+	 * represented by the 128 bytes of CSS header.
+	 */
+	err = btintel_secure_send(hdev, 0x00, 128, fw->data);
+	if (err < 0) {
+		BT_ERR("%s: Failed to send firmware header (%d)",
+		       hdev->name, err);
+		goto done;
+	}
+
+	/* Send the 256 bytes of public key information from the firmware
+	 * as the PKey fragment.
+	 */
+	err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
+	if (err < 0) {
+		BT_ERR("%s: Failed to send firmware public key (%d)",
+		       hdev->name, err);
+		goto done;
+	}
+
+	/* Send the 256 bytes of signature information from the firmware
+	 * as the Sign fragment.
+	 */
+	err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
+	if (err < 0) {
+		BT_ERR("%s: Failed to send firmware signature (%d)",
+		       hdev->name, err);
+		goto done;
+	}
+
+	fw_ptr = fw->data + 644;
+	frag_len = 0;
+
+	while (fw_ptr - fw->data < fw->size) {
+		struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
+
+		frag_len += sizeof(*cmd) + cmd->plen;
+
+		BT_DBG("%s: patching %td/%zu", hdev->name,
+		       (fw_ptr - fw->data), fw->size);
+
+		/* The parameter length of the secure send command requires
+		 * a 4 byte alignment. It happens so that the firmware file
+		 * contains proper Intel_NOP commands to align the fragments
+		 * as needed.
+		 *
+		 * Send set of commands with 4 byte alignment from the
+		 * firmware data buffer as a single Data fragement.
+		 */
+		if (frag_len % 4)
+			continue;
+
+		/* Send each command from the firmware data buffer as
+		 * a single Data fragment.
+		 */
+		err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
+		if (err < 0) {
+			BT_ERR("%s: Failed to send firmware data (%d)",
+			       hdev->name, err);
+			goto done;
+		}
+
+		fw_ptr += frag_len;
+		frag_len = 0;
+	}
+
+	set_bit(STATE_FIRMWARE_LOADED, &intel->flags);
+
+	BT_INFO("%s: Waiting for firmware download to complete", hdev->name);
+
+	/* Before switching the device into operational mode and with that
+	 * booting the loaded firmware, wait for the bootloader notification
+	 * that all fragments have been successfully received.
+	 *
+	 * When the event processing receives the notification, then the
+	 * STATE_DOWNLOADING flag will be cleared.
+	 *
+	 * The firmware loading should not take longer than 5 seconds
+	 * and thus just timeout if that happens and fail the setup
+	 * of this device.
+	 */
+	err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING,
+				  TASK_INTERRUPTIBLE,
+				  msecs_to_jiffies(5000));
+	if (err == 1) {
+		BT_ERR("%s: Firmware loading interrupted", hdev->name);
+		err = -EINTR;
+		goto done;
+	}
+
+	if (err) {
+		BT_ERR("%s: Firmware loading timeout", hdev->name);
+		err = -ETIMEDOUT;
+		goto done;
+	}
+
+	if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) {
+		BT_ERR("%s: Firmware loading failed", hdev->name);
+		err = -ENOEXEC;
+		goto done;
+	}
+
+	rettime = ktime_get();
+	delta = ktime_sub(rettime, calltime);
+	duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+	BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration);
+
+done:
+	release_firmware(fw);
+
+	if (err < 0)
+		return err;
+
+	/* We need to restore the default speed before Intel reset */
+	if (speed_change) {
+		err = intel_set_baudrate(hu, init_speed);
+		if (err)
+			return err;
+	}
+
+	calltime = ktime_get();
+
+	set_bit(STATE_BOOTING, &intel->flags);
+
+	skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
+			     HCI_INIT_TIMEOUT);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+
+	kfree_skb(skb);
+
+	/* The bootloader will not indicate when the device is ready. This
+	 * is done by the operational firmware sending bootup notification.
+	 *
+	 * Booting into operational firmware should not take longer than
+	 * 1 second. However if that happens, then just fail the setup
+	 * since something went wrong.
+	 */
+	BT_INFO("%s: Waiting for device to boot", hdev->name);
+
+	err = intel_wait_booting(hu);
+	if (err)
+		return err;
+
+	clear_bit(STATE_BOOTING, &intel->flags);
+
+	rettime = ktime_get();
+	delta = ktime_sub(rettime, calltime);
+	duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+	BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration);
+
+	skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT);
+	if (IS_ERR(skb))
+		return PTR_ERR(skb);
+	kfree_skb(skb);
+
+	if (speed_change) {
+		err = intel_set_baudrate(hu, oper_speed);
+		if (err)
+			return err;
+	}
+
+	BT_INFO("%s: Setup complete", hdev->name);
+
+	clear_bit(STATE_BOOTLOADER, &intel->flags);
+
+	return 0;
+}
+
+static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+	struct intel_data *intel = hu->priv;
+	struct hci_event_hdr *hdr;
+
+	if (!test_bit(STATE_BOOTLOADER, &intel->flags) &&
+	    !test_bit(STATE_BOOTING, &intel->flags))
+		goto recv;
+
+	hdr = (void *)skb->data;
+
+	/* When the firmware loading completes the device sends
+	 * out a vendor specific event indicating the result of
+	 * the firmware loading.
+	 */
+	if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
+	    skb->data[2] == 0x06) {
+		if (skb->data[3] != 0x00)
+			set_bit(STATE_FIRMWARE_FAILED, &intel->flags);
+
+		if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) &&
+		    test_bit(STATE_FIRMWARE_LOADED, &intel->flags)) {
+			smp_mb__after_atomic();
+			wake_up_bit(&intel->flags, STATE_DOWNLOADING);
+		}
+
+	/* When switching to the operational firmware the device
+	 * sends a vendor specific event indicating that the bootup
+	 * completed.
+	 */
+	} else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
+		   skb->data[2] == 0x02) {
+		if (test_and_clear_bit(STATE_BOOTING, &intel->flags)) {
+			smp_mb__after_atomic();
+			wake_up_bit(&intel->flags, STATE_BOOTING);
+		}
+	}
+recv:
+	return hci_recv_frame(hdev, skb);
+}
+
+static const struct h4_recv_pkt intel_recv_pkts[] = {
+	{ H4_RECV_ACL,   .recv = hci_recv_frame },
+	{ H4_RECV_SCO,   .recv = hci_recv_frame },
+	{ H4_RECV_EVENT, .recv = intel_recv_event },
+};
+
+static int intel_recv(struct hci_uart *hu, const void *data, int count)
+{
+	struct intel_data *intel = hu->priv;
+
+	if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+		return -EUNATCH;
+
+	intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
+				    intel_recv_pkts,
+				    ARRAY_SIZE(intel_recv_pkts));
+	if (IS_ERR(intel->rx_skb)) {
+		int err = PTR_ERR(intel->rx_skb);
+		BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+		intel->rx_skb = NULL;
+		return err;
+	}
+
+	return count;
+}
+
+static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+	struct intel_data *intel = hu->priv;
+
+	BT_DBG("hu %p skb %p", hu, skb);
+
+	skb_queue_tail(&intel->txq, skb);
+
+	return 0;
+}
+
+static struct sk_buff *intel_dequeue(struct hci_uart *hu)
+{
+	struct intel_data *intel = hu->priv;
+	struct sk_buff *skb;
+
+	skb = skb_dequeue(&intel->txq);
+	if (!skb)
+		return skb;
+
+	if (test_bit(STATE_BOOTLOADER, &intel->flags) &&
+	    (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT)) {
+		struct hci_command_hdr *cmd = (void *)skb->data;
+		__u16 opcode = le16_to_cpu(cmd->opcode);
+
+		/* When the 0xfc01 command is issued to boot into
+		 * the operational firmware, it will actually not
+		 * send a command complete event. To keep the flow
+		 * control working inject that event here.
+		 */
+		if (opcode == 0xfc01)
+			inject_cmd_complete(hu->hdev, opcode);
+	}
+
+	/* Prepend skb with frame type */
+	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+	return skb;
+}
+
+static const struct hci_uart_proto intel_proto = {
+	.id		= HCI_UART_INTEL,
+	.name		= "Intel",
+	.init_speed	= 115200,
+	.oper_speed	= 3000000,
+	.open		= intel_open,
+	.close		= intel_close,
+	.flush		= intel_flush,
+	.setup		= intel_setup,
+	.set_baudrate	= intel_set_baudrate,
+	.recv		= intel_recv,
+	.enqueue	= intel_enqueue,
+	.dequeue	= intel_dequeue,
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id intel_acpi_match[] = {
+	{ "INT33E1", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(acpi, intel_acpi_match);
+
+static int intel_acpi_probe(struct intel_device *idev)
+{
+	const struct acpi_device_id *id;
+
+	id = acpi_match_device(intel_acpi_match, &idev->pdev->dev);
+	if (!id)
+		return -ENODEV;
+
+	return 0;
+}
+#else
+static int intel_acpi_probe(struct intel_device *idev)
+{
+	return -ENODEV;
+}
+#endif
+
+static int intel_probe(struct platform_device *pdev)
+{
+	struct intel_device *idev;
+
+	idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
+	if (!idev)
+		return -ENOMEM;
+
+	idev->pdev = pdev;
+
+	if (ACPI_HANDLE(&pdev->dev)) {
+		int err = intel_acpi_probe(idev);
+		if (err)
+			return err;
+	} else {
+		return -ENODEV;
+	}
+
+	idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset",
+					      GPIOD_OUT_LOW);
+	if (IS_ERR(idev->reset)) {
+		dev_err(&pdev->dev, "Unable to retrieve gpio\n");
+		return PTR_ERR(idev->reset);
+	}
+
+	platform_set_drvdata(pdev, idev);
+
+	/* Place this instance on the device list */
+	spin_lock(&intel_device_list_lock);
+	list_add_tail(&idev->list, &intel_device_list);
+	spin_unlock(&intel_device_list_lock);
+
+	dev_info(&pdev->dev, "registered.\n");
+
+	return 0;
+}
+
+static int intel_remove(struct platform_device *pdev)
+{
+	struct intel_device *idev = platform_get_drvdata(pdev);
+
+	spin_lock(&intel_device_list_lock);
+	list_del(&idev->list);
+	spin_unlock(&intel_device_list_lock);
+
+	dev_info(&pdev->dev, "unregistered.\n");
+
+	return 0;
+}
+
+static struct platform_driver intel_driver = {
+	.probe = intel_probe,
+	.remove = intel_remove,
+	.driver = {
+		.name = "hci_intel",
+		.acpi_match_table = ACPI_PTR(intel_acpi_match),
+	},
+};
+
+int __init intel_init(void)
+{
+	platform_driver_register(&intel_driver);
+
+	return hci_uart_register_proto(&intel_proto);
+}
+
+int __exit intel_deinit(void)
+{
+	platform_driver_unregister(&intel_driver);
+
+	return hci_uart_unregister_proto(&intel_proto);
+}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 177dd69..0d5a05a 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -770,7 +770,7 @@
 
 	/* Register the tty discipline */
 
-	memset(&hci_uart_ldisc, 0, sizeof (hci_uart_ldisc));
+	memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc));
 	hci_uart_ldisc.magic		= TTY_LDISC_MAGIC;
 	hci_uart_ldisc.name		= "n_hci";
 	hci_uart_ldisc.open		= hci_uart_tty_open;
@@ -804,9 +804,15 @@
 #ifdef CONFIG_BT_HCIUART_3WIRE
 	h5_init();
 #endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+	intel_init();
+#endif
 #ifdef CONFIG_BT_HCIUART_BCM
 	bcm_init();
 #endif
+#ifdef CONFIG_BT_HCIUART_QCA
+	qca_init();
+#endif
 
 	return 0;
 }
@@ -830,9 +836,15 @@
 #ifdef CONFIG_BT_HCIUART_3WIRE
 	h5_deinit();
 #endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+	intel_deinit();
+#endif
 #ifdef CONFIG_BT_HCIUART_BCM
 	bcm_deinit();
 #endif
+#ifdef CONFIG_BT_HCIUART_QCA
+	qca_deinit();
+#endif
 
 	/* Release tty registration of line discipline */
 	err = tty_unregister_ldisc(N_HCI);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
new file mode 100644
index 0000000..6b9b912
--- /dev/null
+++ b/drivers/bluetooth/hci_qca.c
@@ -0,0 +1,969 @@
+/*
+ *  Bluetooth Software UART Qualcomm protocol
+ *
+ *  HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
+ *  protocol extension to H4.
+ *
+ *  Copyright (C) 2007 Texas Instruments, Inc.
+ *  Copyright (c) 2010, 2012 The Linux Foundation. All rights reserved.
+ *
+ *  Acknowledgements:
+ *  This file is based on hci_ll.c, which was...
+ *  Written by Ohad Ben-Cohen <ohad@bencohen.org>
+ *  which was in turn based on hci_h4.c, which was written
+ *  by Maxim Krasnyansky and Marcel Holtmann.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+#include "btqca.h"
+
+/* HCI_IBS protocol messages */
+#define HCI_IBS_SLEEP_IND	0xFE
+#define HCI_IBS_WAKE_IND	0xFD
+#define HCI_IBS_WAKE_ACK	0xFC
+#define HCI_MAX_IBS_SIZE 	10
+
+/* Controller states */
+#define STATE_IN_BAND_SLEEP_ENABLED	1
+
+#define IBS_WAKE_RETRANS_TIMEOUT_MS 	100
+#define IBS_TX_IDLE_TIMEOUT_MS 		2000
+#define BAUDRATE_SETTLE_TIMEOUT_MS	300
+
+/* HCI_IBS transmit side sleep protocol states */
+enum tx_ibs_states {
+	HCI_IBS_TX_ASLEEP,
+	HCI_IBS_TX_WAKING,
+	HCI_IBS_TX_AWAKE,
+};
+
+/* HCI_IBS receive side sleep protocol states */
+enum rx_states {
+	HCI_IBS_RX_ASLEEP,
+	HCI_IBS_RX_AWAKE,
+};
+
+/* HCI_IBS transmit and receive side clock state vote */
+enum hci_ibs_clock_state_vote {
+	HCI_IBS_VOTE_STATS_UPDATE,
+	HCI_IBS_TX_VOTE_CLOCK_ON,
+	HCI_IBS_TX_VOTE_CLOCK_OFF,
+	HCI_IBS_RX_VOTE_CLOCK_ON,
+	HCI_IBS_RX_VOTE_CLOCK_OFF,
+};
+
+struct qca_data {
+	struct hci_uart *hu;
+	struct sk_buff *rx_skb;
+	struct sk_buff_head txq;
+	struct sk_buff_head tx_wait_q;	/* HCI_IBS wait queue	*/
+	spinlock_t hci_ibs_lock;	/* HCI_IBS state lock	*/
+	u8 tx_ibs_state;	/* HCI_IBS transmit side power state*/
+	u8 rx_ibs_state;	/* HCI_IBS receive side power state */
+	u32 tx_vote;		/* Clock must be on for TX */
+	u32 rx_vote;		/* Clock must be on for RX */
+	struct timer_list tx_idle_timer;
+	u32 tx_idle_delay;
+	struct timer_list wake_retrans_timer;
+	u32 wake_retrans;
+	struct workqueue_struct *workqueue;
+	struct work_struct ws_awake_rx;
+	struct work_struct ws_awake_device;
+	struct work_struct ws_rx_vote_off;
+	struct work_struct ws_tx_vote_off;
+	unsigned long flags;
+
+	/* For debugging purpose */
+	u64 ibs_sent_wacks;
+	u64 ibs_sent_slps;
+	u64 ibs_sent_wakes;
+	u64 ibs_recv_wacks;
+	u64 ibs_recv_slps;
+	u64 ibs_recv_wakes;
+	u64 vote_last_jif;
+	u32 vote_on_ms;
+	u32 vote_off_ms;
+	u64 tx_votes_on;
+	u64 rx_votes_on;
+	u64 tx_votes_off;
+	u64 rx_votes_off;
+	u64 votes_on;
+	u64 votes_off;
+};
+
+static void __serial_clock_on(struct tty_struct *tty)
+{
+	/* TODO: Some chipset requires to enable UART clock on client
+	 * side to save power consumption or manual work is required.
+	 * Please put your code to control UART clock here if needed
+	 */
+}
+
+static void __serial_clock_off(struct tty_struct *tty)
+{
+	/* TODO: Some chipset requires to disable UART clock on client
+	 * side to save power consumption or manual work is required.
+	 * Please put your code to control UART clock off here if needed
+	 */
+}
+
+/* serial_clock_vote needs to be called with the ibs lock held */
+static void serial_clock_vote(unsigned long vote, struct hci_uart *hu)
+{
+	struct qca_data *qca = hu->priv;
+	unsigned int diff;
+
+	bool old_vote = (qca->tx_vote | qca->rx_vote);
+	bool new_vote;
+
+	switch (vote) {
+	case HCI_IBS_VOTE_STATS_UPDATE:
+		diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+
+		if (old_vote)
+			qca->vote_off_ms += diff;
+		else
+			qca->vote_on_ms += diff;
+		return;
+
+	case HCI_IBS_TX_VOTE_CLOCK_ON:
+		qca->tx_vote = true;
+		qca->tx_votes_on++;
+		new_vote = true;
+		break;
+
+	case HCI_IBS_RX_VOTE_CLOCK_ON:
+		qca->rx_vote = true;
+		qca->rx_votes_on++;
+		new_vote = true;
+		break;
+
+	case HCI_IBS_TX_VOTE_CLOCK_OFF:
+		qca->tx_vote = false;
+		qca->tx_votes_off++;
+		new_vote = qca->rx_vote | qca->tx_vote;
+		break;
+
+	case HCI_IBS_RX_VOTE_CLOCK_OFF:
+		qca->rx_vote = false;
+		qca->rx_votes_off++;
+		new_vote = qca->rx_vote | qca->tx_vote;
+		break;
+
+	default:
+		BT_ERR("Voting irregularity");
+		return;
+	}
+
+	if (new_vote != old_vote) {
+		if (new_vote)
+			__serial_clock_on(hu->tty);
+		else
+			__serial_clock_off(hu->tty);
+
+		BT_DBG("Vote serial clock %s(%s)", new_vote? "true" : "false",
+		       vote? "true" : "false");
+
+		diff = jiffies_to_msecs(jiffies - qca->vote_last_jif);
+
+		if (new_vote) {
+			qca->votes_on++;
+			qca->vote_off_ms += diff;
+		} else {
+			qca->votes_off++;
+			qca->vote_on_ms += diff;
+		}
+		qca->vote_last_jif = jiffies;
+	}
+}
+
+/* Builds and sends an HCI_IBS command packet.
+ * These are very simple packets with only 1 cmd byte.
+ */
+static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu)
+{
+	int err = 0;
+	struct sk_buff *skb = NULL;
+	struct qca_data *qca = hu->priv;
+
+	BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd);
+
+	skb = bt_skb_alloc(1, GFP_ATOMIC);
+	if (!skb) {
+		BT_ERR("Failed to allocate memory for HCI_IBS packet");
+		return -ENOMEM;
+	}
+
+	/* Assign HCI_IBS type */
+	*skb_put(skb, 1) = cmd;
+
+	skb_queue_tail(&qca->txq, skb);
+
+	return err;
+}
+
+static void qca_wq_awake_device(struct work_struct *work)
+{
+	struct qca_data *qca = container_of(work, struct qca_data,
+					    ws_awake_device);
+	struct hci_uart *hu = qca->hu;
+	unsigned long retrans_delay;
+
+	BT_DBG("hu %p wq awake device", hu);
+
+	/* Vote for serial clock */
+	serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
+
+	spin_lock(&qca->hci_ibs_lock);
+
+	/* Send wake indication to device */
+	if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
+		BT_ERR("Failed to send WAKE to device");
+
+	qca->ibs_sent_wakes++;
+
+	/* Start retransmit timer */
+	retrans_delay = msecs_to_jiffies(qca->wake_retrans);
+	mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+
+	spin_unlock(&qca->hci_ibs_lock);
+
+	/* Actually send the packets */
+	hci_uart_tx_wakeup(hu);
+}
+
+static void qca_wq_awake_rx(struct work_struct *work)
+{
+	struct qca_data *qca = container_of(work, struct qca_data,
+					    ws_awake_rx);
+	struct hci_uart *hu = qca->hu;
+
+	BT_DBG("hu %p wq awake rx", hu);
+
+	serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
+
+	spin_lock(&qca->hci_ibs_lock);
+	qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
+
+	/* Always acknowledge device wake up,
+	 * sending IBS message doesn't count as TX ON.
+	 */
+	if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
+		BT_ERR("Failed to acknowledge device wake up");
+
+	qca->ibs_sent_wacks++;
+
+	spin_unlock(&qca->hci_ibs_lock);
+
+	/* Actually send the packets */
+	hci_uart_tx_wakeup(hu);
+}
+
+static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work)
+{
+	struct qca_data *qca = container_of(work, struct qca_data,
+					    ws_rx_vote_off);
+	struct hci_uart *hu = qca->hu;
+
+	BT_DBG("hu %p rx clock vote off", hu);
+
+	serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
+}
+
+static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work)
+{
+	struct qca_data *qca = container_of(work, struct qca_data,
+					    ws_tx_vote_off);
+	struct hci_uart *hu = qca->hu;
+
+	BT_DBG("hu %p tx clock vote off", hu);
+
+	/* Run HCI tx handling unlocked */
+	hci_uart_tx_wakeup(hu);
+
+	/* Now that message queued to tty driver, vote for tty clocks off.
+	 * It is up to the tty driver to pend the clocks off until tx done.
+	 */
+	serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
+}
+
+static void hci_ibs_tx_idle_timeout(unsigned long arg)
+{
+	struct hci_uart *hu = (struct hci_uart *)arg;
+	struct qca_data *qca = hu->priv;
+	unsigned long flags;
+
+	BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);
+
+	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+				 flags, SINGLE_DEPTH_NESTING);
+
+	switch (qca->tx_ibs_state) {
+	case HCI_IBS_TX_AWAKE:
+		/* TX_IDLE, go to SLEEP */
+		if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
+			BT_ERR("Failed to send SLEEP to device");
+			break;
+		}
+		qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+		qca->ibs_sent_slps++;
+		queue_work(qca->workqueue, &qca->ws_tx_vote_off);
+		break;
+
+	case HCI_IBS_TX_ASLEEP:
+	case HCI_IBS_TX_WAKING:
+		/* Fall through */
+
+	default:
+		BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+		break;
+	}
+
+	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+}
+
+static void hci_ibs_wake_retrans_timeout(unsigned long arg)
+{
+	struct hci_uart *hu = (struct hci_uart *)arg;
+	struct qca_data *qca = hu->priv;
+	unsigned long flags, retrans_delay;
+	unsigned long retransmit = 0;
+
+	BT_DBG("hu %p wake retransmit timeout in %d state",
+		hu, qca->tx_ibs_state);
+
+	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
+				 flags, SINGLE_DEPTH_NESTING);
+
+	switch (qca->tx_ibs_state) {
+	case HCI_IBS_TX_WAKING:
+		/* No WAKE_ACK, retransmit WAKE */
+		retransmit = 1;
+		if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
+			BT_ERR("Failed to acknowledge device wake up");
+			break;
+		}
+		qca->ibs_sent_wakes++;
+		retrans_delay = msecs_to_jiffies(qca->wake_retrans);
+		mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
+		break;
+
+	case HCI_IBS_TX_ASLEEP:
+	case HCI_IBS_TX_AWAKE:
+		/* Fall through */
+
+	default:
+		BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
+		break;
+	}
+
+	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+	if (retransmit)
+		hci_uart_tx_wakeup(hu);
+}
+
+/* Initialize protocol */
+static int qca_open(struct hci_uart *hu)
+{
+	struct qca_data *qca;
+
+	BT_DBG("hu %p qca_open", hu);
+
+	qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
+	if (!qca)
+		return -ENOMEM;
+
+	skb_queue_head_init(&qca->txq);
+	skb_queue_head_init(&qca->tx_wait_q);
+	spin_lock_init(&qca->hci_ibs_lock);
+	qca->workqueue = create_singlethread_workqueue("qca_wq");
+	if (!qca->workqueue) {
+		BT_ERR("QCA Workqueue not initialized properly");
+		kfree(qca);
+		return -ENOMEM;
+	}
+
+	INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
+	INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
+	INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
+	INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
+
+	qca->hu = hu;
+
+	/* Assume we start with both sides asleep -- extra wakes OK */
+	qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
+	qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+
+	/* clocks actually on, but we start votes off */
+	qca->tx_vote = false;
+	qca->rx_vote = false;
+	qca->flags = 0;
+
+	qca->ibs_sent_wacks = 0;
+	qca->ibs_sent_slps = 0;
+	qca->ibs_sent_wakes = 0;
+	qca->ibs_recv_wacks = 0;
+	qca->ibs_recv_slps = 0;
+	qca->ibs_recv_wakes = 0;
+	qca->vote_last_jif = jiffies;
+	qca->vote_on_ms = 0;
+	qca->vote_off_ms = 0;
+	qca->votes_on = 0;
+	qca->votes_off = 0;
+	qca->tx_votes_on = 0;
+	qca->tx_votes_off = 0;
+	qca->rx_votes_on = 0;
+	qca->rx_votes_off = 0;
+
+	hu->priv = qca;
+
+	init_timer(&qca->wake_retrans_timer);
+	qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout;
+	qca->wake_retrans_timer.data = (u_long)hu;
+	qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
+
+	init_timer(&qca->tx_idle_timer);
+	qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout;
+	qca->tx_idle_timer.data = (u_long)hu;
+	qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
+
+	BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
+	       qca->tx_idle_delay, qca->wake_retrans);
+
+	return 0;
+}
+
+static void qca_debugfs_init(struct hci_dev *hdev)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+	struct qca_data *qca = hu->priv;
+	struct dentry *ibs_dir;
+	umode_t mode;
+
+	if (!hdev->debugfs)
+		return;
+
+	ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
+
+	/* read only */
+	mode = S_IRUGO;
+	debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state);
+	debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state);
+	debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir,
+			   &qca->ibs_sent_slps);
+	debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir,
+			   &qca->ibs_sent_wakes);
+	debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir,
+			   &qca->ibs_sent_wacks);
+	debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir,
+			   &qca->ibs_recv_slps);
+	debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir,
+			   &qca->ibs_recv_wakes);
+	debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir,
+			   &qca->ibs_recv_wacks);
+	debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote);
+	debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on);
+	debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off);
+	debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote);
+	debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on);
+	debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off);
+	debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on);
+	debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off);
+	debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms);
+	debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms);
+
+	/* read/write */
+	mode = S_IRUGO | S_IWUSR;
+	debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans);
+	debugfs_create_u32("tx_idle_delay", mode, ibs_dir,
+			   &qca->tx_idle_delay);
+}
+
+/* Flush protocol data */
+static int qca_flush(struct hci_uart *hu)
+{
+	struct qca_data *qca = hu->priv;
+
+	BT_DBG("hu %p qca flush", hu);
+
+	skb_queue_purge(&qca->tx_wait_q);
+	skb_queue_purge(&qca->txq);
+
+	return 0;
+}
+
+/* Close protocol */
+static int qca_close(struct hci_uart *hu)
+{
+	struct qca_data *qca = hu->priv;
+
+	BT_DBG("hu %p qca close", hu);
+
+	serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu);
+
+	skb_queue_purge(&qca->tx_wait_q);
+	skb_queue_purge(&qca->txq);
+	del_timer(&qca->tx_idle_timer);
+	del_timer(&qca->wake_retrans_timer);
+	destroy_workqueue(qca->workqueue);
+	qca->hu = NULL;
+
+	kfree_skb(qca->rx_skb);
+
+	hu->priv = NULL;
+
+	kfree(qca);
+
+	return 0;
+}
+
+/* Called upon a wake-up-indication from the device.
+ */
+static void device_want_to_wakeup(struct hci_uart *hu)
+{
+	unsigned long flags;
+	struct qca_data *qca = hu->priv;
+
+	BT_DBG("hu %p want to wake up", hu);
+
+	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+	qca->ibs_recv_wakes++;
+
+	switch (qca->rx_ibs_state) {
+	case HCI_IBS_RX_ASLEEP:
+		/* Make sure clock is on - we may have turned clock off since
+		 * receiving the wake up indicator awake rx clock.
+		 */
+		queue_work(qca->workqueue, &qca->ws_awake_rx);
+		spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+		return;
+
+	case HCI_IBS_RX_AWAKE:
+		/* Always acknowledge device wake up,
+		 * sending IBS message doesn't count as TX ON.
+		 */
+		if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) {
+			BT_ERR("Failed to acknowledge device wake up");
+			break;
+		}
+		qca->ibs_sent_wacks++;
+		break;
+
+	default:
+		/* Any other state is illegal */
+		BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
+		       qca->rx_ibs_state);
+		break;
+	}
+
+	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+	/* Actually send the packets */
+	hci_uart_tx_wakeup(hu);
+}
+
+/* Called upon a sleep-indication from the device.
+ */
+static void device_want_to_sleep(struct hci_uart *hu)
+{
+	unsigned long flags;
+	struct qca_data *qca = hu->priv;
+
+	BT_DBG("hu %p want to sleep", hu);
+
+	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+	qca->ibs_recv_slps++;
+
+	switch (qca->rx_ibs_state) {
+	case HCI_IBS_RX_AWAKE:
+		/* Update state */
+		qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
+		/* Vote off rx clock under workqueue */
+		queue_work(qca->workqueue, &qca->ws_rx_vote_off);
+		break;
+
+	case HCI_IBS_RX_ASLEEP:
+		/* Fall through */
+
+	default:
+		/* Any other state is illegal */
+		BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
+		       qca->rx_ibs_state);
+		break;
+	}
+
+	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+}
+
+/* Called upon wake-up-acknowledgement from the device
+ */
+static void device_woke_up(struct hci_uart *hu)
+{
+	unsigned long flags, idle_delay;
+	struct qca_data *qca = hu->priv;
+	struct sk_buff *skb = NULL;
+
+	BT_DBG("hu %p woke up", hu);
+
+	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+	qca->ibs_recv_wacks++;
+
+	switch (qca->tx_ibs_state) {
+	case HCI_IBS_TX_AWAKE:
+		/* Expect one if we send 2 WAKEs */
+		BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
+		       qca->tx_ibs_state);
+		break;
+
+	case HCI_IBS_TX_WAKING:
+		/* Send pending packets */
+		while ((skb = skb_dequeue(&qca->tx_wait_q)))
+			skb_queue_tail(&qca->txq, skb);
+
+		/* Switch timers and change state to HCI_IBS_TX_AWAKE */
+		del_timer(&qca->wake_retrans_timer);
+		idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
+		mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
+		qca->tx_ibs_state = HCI_IBS_TX_AWAKE;
+		break;
+
+	case HCI_IBS_TX_ASLEEP:
+		/* Fall through */
+
+	default:
+		BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
+		       qca->tx_ibs_state);
+		break;
+	}
+
+	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+	/* Actually send the packets */
+	hci_uart_tx_wakeup(hu);
+}
+
+/* Enqueue frame for transmittion (padding, crc, etc) may be called from
+ * two simultaneous tasklets.
+ */
+static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+	unsigned long flags = 0, idle_delay;
+	struct qca_data *qca = hu->priv;
+
+	BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
+	       qca->tx_ibs_state);
+
+	/* Prepend skb with frame type */
+	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+	/* Don't go to sleep in middle of patch download or
+	 * Out-Of-Band(GPIOs control) sleep is selected.
+	 */
+	if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
+		skb_queue_tail(&qca->txq, skb);
+		return 0;
+	}
+
+	spin_lock_irqsave(&qca->hci_ibs_lock, flags);
+
+	/* Act according to current state */
+	switch (qca->tx_ibs_state) {
+	case HCI_IBS_TX_AWAKE:
+		BT_DBG("Device awake, sending normally");
+		skb_queue_tail(&qca->txq, skb);
+		idle_delay = msecs_to_jiffies(qca->tx_idle_delay);
+		mod_timer(&qca->tx_idle_timer, jiffies + idle_delay);
+		break;
+
+	case HCI_IBS_TX_ASLEEP:
+		BT_DBG("Device asleep, waking up and queueing packet");
+		/* Save packet for later */
+		skb_queue_tail(&qca->tx_wait_q, skb);
+
+		qca->tx_ibs_state = HCI_IBS_TX_WAKING;
+		/* Schedule a work queue to wake up device */
+		queue_work(qca->workqueue, &qca->ws_awake_device);
+		break;
+
+	case HCI_IBS_TX_WAKING:
+		BT_DBG("Device waking up, queueing packet");
+		/* Transient state; just keep packet for later */
+		skb_queue_tail(&qca->tx_wait_q, skb);
+		break;
+
+	default:
+		BT_ERR("Illegal tx state: %d (losing packet)",
+		       qca->tx_ibs_state);
+		kfree_skb(skb);
+		break;
+	}
+
+	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
+
+	return 0;
+}
+
+static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+
+	BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND);
+
+	device_want_to_sleep(hu);
+
+	kfree_skb(skb);
+	return 0;
+}
+
+static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+
+	BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND);
+
+	device_want_to_wakeup(hu);
+
+	kfree_skb(skb);
+	return 0;
+}
+
+static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+
+	BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK);
+
+	device_woke_up(hu);
+
+	kfree_skb(skb);
+	return 0;
+}
+
+#define QCA_IBS_SLEEP_IND_EVENT \
+	.type = HCI_IBS_SLEEP_IND, \
+	.hlen = 0, \
+	.loff = 0, \
+	.lsize = 0, \
+	.maxlen = HCI_MAX_IBS_SIZE
+
+#define QCA_IBS_WAKE_IND_EVENT \
+	.type = HCI_IBS_WAKE_IND, \
+	.hlen = 0, \
+	.loff = 0, \
+	.lsize = 0, \
+	.maxlen = HCI_MAX_IBS_SIZE
+
+#define QCA_IBS_WAKE_ACK_EVENT \
+	.type = HCI_IBS_WAKE_ACK, \
+	.hlen = 0, \
+	.loff = 0, \
+	.lsize = 0, \
+	.maxlen = HCI_MAX_IBS_SIZE
+
+static const struct h4_recv_pkt qca_recv_pkts[] = {
+	{ H4_RECV_ACL,             .recv = hci_recv_frame    },
+	{ H4_RECV_SCO,             .recv = hci_recv_frame    },
+	{ H4_RECV_EVENT,           .recv = hci_recv_frame    },
+	{ QCA_IBS_WAKE_IND_EVENT,  .recv = qca_ibs_wake_ind  },
+	{ QCA_IBS_WAKE_ACK_EVENT,  .recv = qca_ibs_wake_ack  },
+	{ QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
+};
+
+static int qca_recv(struct hci_uart *hu, const void *data, int count)
+{
+	struct qca_data *qca = hu->priv;
+
+	if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+		return -EUNATCH;
+
+	qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
+				  qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
+	if (IS_ERR(qca->rx_skb)) {
+		int err = PTR_ERR(qca->rx_skb);
+		BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+		qca->rx_skb = NULL;
+		return err;
+	}
+
+	return count;
+}
+
+static struct sk_buff *qca_dequeue(struct hci_uart *hu)
+{
+	struct qca_data *qca = hu->priv;
+
+	return skb_dequeue(&qca->txq);
+}
+
+static uint8_t qca_get_baudrate_value(int speed)
+{
+	switch(speed) {
+	case 9600:
+		return QCA_BAUDRATE_9600;
+	case 19200:
+		return QCA_BAUDRATE_19200;
+	case 38400:
+		return QCA_BAUDRATE_38400;
+	case 57600:
+		return QCA_BAUDRATE_57600;
+	case 115200:
+		return QCA_BAUDRATE_115200;
+	case 230400:
+		return QCA_BAUDRATE_230400;
+	case 460800:
+		return QCA_BAUDRATE_460800;
+	case 500000:
+		return QCA_BAUDRATE_500000;
+	case 921600:
+		return QCA_BAUDRATE_921600;
+	case 1000000:
+		return QCA_BAUDRATE_1000000;
+	case 2000000:
+		return QCA_BAUDRATE_2000000;
+	case 3000000:
+		return QCA_BAUDRATE_3000000;
+	case 3500000:
+		return QCA_BAUDRATE_3500000;
+	default:
+		return QCA_BAUDRATE_115200;
+	}
+}
+
+static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
+{
+	struct hci_uart *hu = hci_get_drvdata(hdev);
+	struct qca_data *qca = hu->priv;
+	struct sk_buff *skb;
+	u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
+
+	if (baudrate > QCA_BAUDRATE_3000000)
+		return -EINVAL;
+
+	cmd[4] = baudrate;
+
+	skb = bt_skb_alloc(sizeof(cmd), GFP_ATOMIC);
+	if (!skb) {
+		BT_ERR("Failed to allocate memory for baudrate packet");
+		return -ENOMEM;
+	}
+
+	/* Assign commands to change baudrate and packet type. */
+	memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
+	bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+
+	skb_queue_tail(&qca->txq, skb);
+	hci_uart_tx_wakeup(hu);
+
+	/* wait 300ms to change new baudrate on controller side
+	 * controller will come back after they receive this HCI command
+	 * then host can communicate with new baudrate to controller
+	 */
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	return 0;
+}
+
+static int qca_setup(struct hci_uart *hu)
+{
+	struct hci_dev *hdev = hu->hdev;
+	struct qca_data *qca = hu->priv;
+	unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
+	int ret;
+
+	BT_INFO("%s: ROME setup", hdev->name);
+
+	/* Patch downloading has to be done without IBS mode */
+	clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+
+	/* Setup initial baudrate */
+	speed = 0;
+	if (hu->init_speed)
+		speed = hu->init_speed;
+	else if (hu->proto->init_speed)
+		speed = hu->proto->init_speed;
+
+	if (speed)
+		hci_uart_set_baudrate(hu, speed);
+
+	/* Setup user speed if needed */
+	speed = 0;
+	if (hu->oper_speed)
+		speed = hu->oper_speed;
+	else if (hu->proto->oper_speed)
+		speed = hu->proto->oper_speed;
+
+	if (speed) {
+		qca_baudrate = qca_get_baudrate_value(speed);
+
+		BT_INFO("%s: Set UART speed to %d", hdev->name, speed);
+		ret = qca_set_baudrate(hdev, qca_baudrate);
+		if (ret) {
+			BT_ERR("%s: Failed to change the baud rate (%d)",
+			       hdev->name, ret);
+			return ret;
+		}
+		hci_uart_set_baudrate(hu, speed);
+	}
+
+	/* Setup patch / NVM configurations */
+	ret = qca_uart_setup_rome(hdev, qca_baudrate);
+	if (!ret) {
+		set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+		qca_debugfs_init(hdev);
+	}
+
+	/* Setup bdaddr */
+	hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
+
+	return ret;
+}
+
+static struct hci_uart_proto qca_proto = {
+	.id		= HCI_UART_QCA,
+	.name		= "QCA",
+	.init_speed	= 115200,
+	.oper_speed	= 3000000,
+	.open		= qca_open,
+	.close		= qca_close,
+	.flush		= qca_flush,
+	.setup		= qca_setup,
+	.recv		= qca_recv,
+	.enqueue	= qca_enqueue,
+	.dequeue	= qca_dequeue,
+};
+
+int __init qca_init(void)
+{
+	return hci_uart_register_proto(&qca_proto);
+}
+
+int __exit qca_deinit(void)
+{
+	return hci_uart_unregister_proto(&qca_proto);
+}
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index ce9c670..495b9ef 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -35,7 +35,7 @@
 #define HCIUARTGETFLAGS		_IOR('U', 204, int)
 
 /* UART protocols */
-#define HCI_UART_MAX_PROTO	8
+#define HCI_UART_MAX_PROTO	9
 
 #define HCI_UART_H4	0
 #define HCI_UART_BCSP	1
@@ -45,6 +45,7 @@
 #define HCI_UART_ATH3K	5
 #define HCI_UART_INTEL	6
 #define HCI_UART_BCM	7
+#define HCI_UART_QCA	8
 
 #define HCI_UART_RAW_DEVICE	0
 #define HCI_UART_RESET_ON_INIT	1
@@ -167,7 +168,17 @@
 int h5_deinit(void);
 #endif
 
+#ifdef CONFIG_BT_HCIUART_INTEL
+int intel_init(void);
+int intel_deinit(void);
+#endif
+
 #ifdef CONFIG_BT_HCIUART_BCM
 int bcm_init(void);
 int bcm_deinit(void);
 #endif
+
+#ifdef CONFIG_BT_HCIUART_QCA
+int qca_init(void);
+int qca_deinit(void);
+#endif
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index ab3bde1..1c543ef 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -332,6 +332,18 @@
 }
 
 /**
+ * mips_cdmm_phys_base() - Choose a physical base address for CDMM region.
+ *
+ * Picking a suitable physical address at which to map the CDMM region is
+ * platform specific, so this weak function can be overridden by platform
+ * code to pick a suitable value if none is configured by the bootloader.
+ */
+phys_addr_t __weak mips_cdmm_phys_base(void)
+{
+	return 0;
+}
+
+/**
  * mips_cdmm_setup() - Ensure the CDMM bus is initialised and usable.
  * @bus:	Pointer to bus information for current CPU.
  *		IS_ERR(bus) is checked, so no need for caller to check.
@@ -368,7 +380,7 @@
 	if (!bus->phys)
 		bus->phys = mips_cdmm_cur_base();
 	/* Otherwise, ask platform code for suggestions */
-	if (!bus->phys && mips_cdmm_phys_base)
+	if (!bus->phys)
 		bus->phys = mips_cdmm_phys_base();
 	/* Otherwise, copy what other CPUs have done */
 	if (!bus->phys)
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index fdb0f9b..8069b36 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -243,17 +243,15 @@
  *	@misc: device to unregister
  *
  *	Unregister a miscellaneous device that was previously
- *	successfully registered with misc_register(). Success
- *	is indicated by a zero return, a negative errno code
- *	indicates an error.
+ *	successfully registered with misc_register().
  */
 
-int misc_deregister(struct miscdevice *misc)
+void misc_deregister(struct miscdevice *misc)
 {
 	int i = DYNAMIC_MINORS - misc->minor - 1;
 
 	if (WARN_ON(list_empty(&misc->list)))
-		return -EINVAL;
+		return;
 
 	mutex_lock(&misc_mtx);
 	list_del(&misc->list);
@@ -261,7 +259,6 @@
 	if (i < DYNAMIC_MINORS && i >= 0)
 		clear_bit(i, misc_minors);
 	mutex_unlock(&misc_mtx);
-	return 0;
 }
 
 EXPORT_SYMBOL(misc_register);
@@ -281,10 +278,9 @@
 static int __init misc_init(void)
 {
 	int err;
+	struct proc_dir_entry *ret;
 
-#ifdef CONFIG_PROC_FS
-	proc_create("misc", 0, NULL, &misc_proc_fops);
-#endif
+	ret = proc_create("misc", 0, NULL, &misc_proc_fops);
 	misc_class = class_create(THIS_MODULE, "misc");
 	err = PTR_ERR(misc_class);
 	if (IS_ERR(misc_class))
@@ -300,7 +296,8 @@
 	printk("unable to get major %d for misc devices\n", MISC_MAJOR);
 	class_destroy(misc_class);
 fail_remove:
-	remove_proc_entry("misc", NULL);
+	if (ret)
+		remove_proc_entry("misc", NULL);
 	return err;
 }
 subsys_initcall(misc_init);
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index 9df78e2..97c2d8d 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -702,7 +702,7 @@
 		seq_printf(seq, "%ds%s\n", nvram[10],
 		    nvram[10] < 8 ? ", no memory test" : "");
 
-	vmode = (nvram[14] << 8) || nvram[15];
+	vmode = (nvram[14] << 8) | nvram[15];
 	seq_printf(seq,
 	    "Video mode       : %s colors, %d columns, %s %s monitor\n",
 	    colors[vmode & 7],
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c
index 014c9d9..f5a45d8 100644
--- a/drivers/char/toshiba.c
+++ b/drivers/char/toshiba.c
@@ -430,7 +430,7 @@
 	int i,major,minor,day,year,month,flag;
 	unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 };
 	SMMRegisters regs;
-	void __iomem *bios = ioremap_cache(0xf0000, 0x10000);
+	void __iomem *bios = ioremap(0xf0000, 0x10000);
 
 	if (!bios)
 		return -ENOMEM;
diff --git a/drivers/char/xillybus/xillybus_pcie.c b/drivers/char/xillybus/xillybus_pcie.c
index d8266bc..9418300 100644
--- a/drivers/char/xillybus/xillybus_pcie.c
+++ b/drivers/char/xillybus/xillybus_pcie.c
@@ -193,14 +193,16 @@
 	}
 
 	/*
-	 * In theory, an attempt to set the DMA mask to 64 and dma_using_dac=1
-	 * is the right thing. But some unclever PCIe drivers report it's OK
-	 * when the hardware drops those 64-bit PCIe packets. So trust
-	 * nobody and use 32 bits DMA addressing in any case.
+	 * Some (old and buggy?) hardware drops 64-bit addressed PCIe packets,
+	 * even when the PCIe driver claims that a 64-bit mask is OK. On the
+	 * other hand, on some architectures, 64-bit addressing is mandatory.
+	 * So go for the 64-bit mask only when failing is the other option.
 	 */
 
 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
 		endpoint->dma_using_dac = 0;
+	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		endpoint->dma_using_dac = 1;
 	} else {
 		dev_err(endpoint->dev, "Failed to set DMA mask. Aborting.\n");
 		return -ENODEV;
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index c4cf075..d08b3e5 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -9,7 +9,7 @@
 obj-$(CONFIG_COMMON_CLK)	+= clk-mux.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-composite.o
 obj-$(CONFIG_COMMON_CLK)	+= clk-fractional-divider.o
-obj-$(CONFIG_COMMON_CLK)	+= clk-gpio-gate.o
+obj-$(CONFIG_COMMON_CLK)	+= clk-gpio.o
 ifeq ($(CONFIG_OF), y)
 obj-$(CONFIG_COMMON_CLK)	+= clk-conf.o
 endif
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index 27dfa96..fd7247d 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -614,17 +614,12 @@
 	int num_parents;
 	unsigned int irq;
 	const char *name = np->name;
-	int i;
 
 	num_parents = of_clk_get_parent_count(np);
 	if (num_parents <= 0 || num_parents > 2)
 		return;
 
-	for (i = 0; i < num_parents; ++i) {
-		parent_names[i] = of_clk_get_parent_name(np, i);
-		if (!parent_names[i])
-			return;
-	}
+	of_clk_parent_fill(np, parent_names, num_parents);
 
 	of_property_read_string(np, "clock-output-names", &name);
 
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index 5b3ded5..620ea32 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -222,7 +222,6 @@
 {
 	struct clk *clk;
 	int num_parents;
-	int i;
 	unsigned int irq;
 	const char *parent_names[MASTER_SOURCE_MAX];
 	const char *name = np->name;
@@ -232,11 +231,7 @@
 	if (num_parents <= 0 || num_parents > MASTER_SOURCE_MAX)
 		return;
 
-	for (i = 0; i < num_parents; ++i) {
-		parent_names[i] = of_clk_get_parent_name(np, i);
-		if (!parent_names[i])
-			return;
-	}
+	of_clk_parent_fill(np, parent_names, num_parents);
 
 	of_property_read_string(np, "clock-output-names", &name);
 
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index df2c1af..e4d7b57 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -134,7 +134,7 @@
 
 static void clk_sam9x5_peripheral_autodiv(struct clk_sam9x5_peripheral *periph)
 {
-	struct clk *parent;
+	struct clk_hw *parent;
 	unsigned long parent_rate;
 	int shift = 0;
 
@@ -142,8 +142,8 @@
 		return;
 
 	if (periph->range.max) {
-		parent = clk_get_parent_by_index(periph->hw.clk, 0);
-		parent_rate = __clk_get_rate(parent);
+		parent = clk_hw_get_parent_by_index(&periph->hw, 0);
+		parent_rate = clk_hw_get_rate(parent);
 		if (!parent_rate)
 			return;
 
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index 8c86c0f7..14b270b 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -54,46 +54,47 @@
 	return parent_rate >> pres;
 }
 
-static long clk_programmable_determine_rate(struct clk_hw *hw,
-					    unsigned long rate,
-					    unsigned long min_rate,
-					    unsigned long max_rate,
-					    unsigned long *best_parent_rate,
-					    struct clk_hw **best_parent_hw)
+static int clk_programmable_determine_rate(struct clk_hw *hw,
+					   struct clk_rate_request *req)
 {
-	struct clk *parent = NULL;
+	struct clk_hw *parent;
 	long best_rate = -EINVAL;
 	unsigned long parent_rate;
 	unsigned long tmp_rate;
 	int shift;
 	int i;
 
-	for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
-		parent = clk_get_parent_by_index(hw->clk, i);
+	for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+		parent = clk_hw_get_parent_by_index(hw, i);
 		if (!parent)
 			continue;
 
-		parent_rate = __clk_get_rate(parent);
+		parent_rate = clk_hw_get_rate(parent);
 		for (shift = 0; shift < PROG_PRES_MASK; shift++) {
 			tmp_rate = parent_rate >> shift;
-			if (tmp_rate <= rate)
+			if (tmp_rate <= req->rate)
 				break;
 		}
 
-		if (tmp_rate > rate)
+		if (tmp_rate > req->rate)
 			continue;
 
-		if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) {
+		if (best_rate < 0 ||
+		    (req->rate - tmp_rate) < (req->rate - best_rate)) {
 			best_rate = tmp_rate;
-			*best_parent_rate = parent_rate;
-			*best_parent_hw = __clk_get_hw(parent);
+			req->best_parent_rate = parent_rate;
+			req->best_parent_hw = parent;
 		}
 
 		if (!best_rate)
 			break;
 	}
 
-	return best_rate;
+	if (best_rate < 0)
+		return best_rate;
+
+	req->rate = best_rate;
+	return 0;
 }
 
 static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
@@ -230,7 +231,6 @@
 {
 	int num;
 	u32 id;
-	int i;
 	struct clk *clk;
 	int num_parents;
 	const char *parent_names[PROG_SOURCE_MAX];
@@ -241,11 +241,7 @@
 	if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX)
 		return;
 
-	for (i = 0; i < num_parents; ++i) {
-		parent_names[i] = of_clk_get_parent_name(np, i);
-		if (!parent_names[i])
-			return;
-	}
+	of_clk_parent_fill(np, parent_names, num_parents);
 
 	num = of_get_child_count(np);
 	if (!num || num > (PROG_ID_MAX + 1))
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
index 98a84a8..d0d5076 100644
--- a/drivers/clk/at91/clk-slow.c
+++ b/drivers/clk/at91/clk-slow.c
@@ -10,8 +10,10 @@
  *
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
+#include <linux/slab.h>
 #include <linux/clk/at91_pmc.h>
 #include <linux/delay.h>
 #include <linux/of.h>
@@ -371,17 +373,12 @@
 	const char *parent_names[2];
 	int num_parents;
 	const char *name = np->name;
-	int i;
 
 	num_parents = of_clk_get_parent_count(np);
 	if (num_parents <= 0 || num_parents > 2)
 		return;
 
-	for (i = 0; i < num_parents; ++i) {
-		parent_names[i] = of_clk_get_parent_name(np, i);
-		if (!parent_names[i])
-			return;
-	}
+	of_clk_parent_fill(np, parent_names, num_parents);
 
 	of_property_read_string(np, "clock-output-names", &name);
 
@@ -449,17 +446,12 @@
 	const char *parent_names[2];
 	int num_parents;
 	const char *name = np->name;
-	int i;
 
 	num_parents = of_clk_get_parent_count(np);
 	if (num_parents != 2)
 		return;
 
-	for (i = 0; i < num_parents; ++i) {
-		parent_names[i] = of_clk_get_parent_name(np, i);
-		if (!parent_names[i])
-			return;
-	}
+	of_clk_parent_fill(np, parent_names, num_parents);
 
 	of_property_read_string(np, "clock-output-names", &name);
 
diff --git a/drivers/clk/at91/clk-smd.c b/drivers/clk/at91/clk-smd.c
index 3817ea8..a7f8501 100644
--- a/drivers/clk/at91/clk-smd.c
+++ b/drivers/clk/at91/clk-smd.c
@@ -145,7 +145,6 @@
 					struct at91_pmc *pmc)
 {
 	struct clk *clk;
-	int i;
 	int num_parents;
 	const char *parent_names[SMD_SOURCE_MAX];
 	const char *name = np->name;
@@ -154,11 +153,7 @@
 	if (num_parents <= 0 || num_parents > SMD_SOURCE_MAX)
 		return;
 
-	for (i = 0; i < num_parents; i++) {
-		parent_names[i] = of_clk_get_parent_name(np, i);
-		if (!parent_names[i])
-			return;
-	}
+	of_clk_parent_fill(np, parent_names, num_parents);
 
 	of_property_read_string(np, "clock-output-names", &name);
 
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index b0cbd2b..8ab8502 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -56,47 +56,43 @@
 	return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
 }
 
-static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
-					      unsigned long rate,
-					      unsigned long min_rate,
-					      unsigned long max_rate,
-					      unsigned long *best_parent_rate,
-					      struct clk_hw **best_parent_hw)
+static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
+					     struct clk_rate_request *req)
 {
-	struct clk *parent = NULL;
+	struct clk_hw *parent;
 	long best_rate = -EINVAL;
 	unsigned long tmp_rate;
 	int best_diff = -1;
 	int tmp_diff;
 	int i;
 
-	for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
+	for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
 		int div;
 
-		parent = clk_get_parent_by_index(hw->clk, i);
+		parent = clk_hw_get_parent_by_index(hw, i);
 		if (!parent)
 			continue;
 
 		for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) {
 			unsigned long tmp_parent_rate;
 
-			tmp_parent_rate = rate * div;
-			tmp_parent_rate = __clk_round_rate(parent,
+			tmp_parent_rate = req->rate * div;
+			tmp_parent_rate = clk_hw_round_rate(parent,
 							   tmp_parent_rate);
 			tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
-			if (tmp_rate < rate)
-				tmp_diff = rate - tmp_rate;
+			if (tmp_rate < req->rate)
+				tmp_diff = req->rate - tmp_rate;
 			else
-				tmp_diff = tmp_rate - rate;
+				tmp_diff = tmp_rate - req->rate;
 
 			if (best_diff < 0 || best_diff > tmp_diff) {
 				best_rate = tmp_rate;
 				best_diff = tmp_diff;
-				*best_parent_rate = tmp_parent_rate;
-				*best_parent_hw = __clk_get_hw(parent);
+				req->best_parent_rate = tmp_parent_rate;
+				req->best_parent_hw = parent;
 			}
 
-			if (!best_diff || tmp_rate < rate)
+			if (!best_diff || tmp_rate < req->rate)
 				break;
 		}
 
@@ -104,7 +100,11 @@
 			break;
 	}
 
-	return best_rate;
+	if (best_rate < 0)
+		return best_rate;
+
+	req->rate = best_rate;
+	return 0;
 }
 
 static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
@@ -273,7 +273,7 @@
 					  unsigned long *parent_rate)
 {
 	struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
-	struct clk *parent = __clk_get_parent(hw->clk);
+	struct clk_hw *parent = clk_hw_get_parent(hw);
 	unsigned long bestrate = 0;
 	int bestdiff = -1;
 	unsigned long tmprate;
@@ -287,7 +287,7 @@
 			continue;
 
 		tmp_parent_rate = rate * usb->divisors[i];
-		tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate);
+		tmp_parent_rate = clk_hw_round_rate(parent, tmp_parent_rate);
 		tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]);
 		if (tmprate < rate)
 			tmpdiff = rate - tmprate;
@@ -373,7 +373,6 @@
 					struct at91_pmc *pmc)
 {
 	struct clk *clk;
-	int i;
 	int num_parents;
 	const char *parent_names[USB_SOURCE_MAX];
 	const char *name = np->name;
@@ -382,11 +381,7 @@
 	if (num_parents <= 0 || num_parents > USB_SOURCE_MAX)
 		return;
 
-	for (i = 0; i < num_parents; i++) {
-		parent_names[i] = of_clk_get_parent_name(np, i);
-		if (!parent_names[i])
-			return;
-	}
+	of_clk_parent_fill(np, parent_names, num_parents);
 
 	of_property_read_string(np, "clock-output-names", &name);
 
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 39be2be..d1844f1 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -125,7 +125,6 @@
 
 	irq_set_chip_and_handler(virq, &pmc_irq,
 				 handle_level_irq);
-	set_irq_flags(virq, IRQF_VALID);
 	irq_set_chip_data(virq, pmc);
 
 	return 0;
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index eb8e5dc..8b87771 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -59,71 +59,63 @@
 int of_at91_get_clk_range(struct device_node *np, const char *propname,
 			  struct clk_range *range);
 
-extern void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
-						 struct at91_pmc *pmc);
+void of_at91sam9260_clk_slow_setup(struct device_node *np,
+				   struct at91_pmc *pmc);
 
-extern void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np,
-						    struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
-						       struct at91_pmc *pmc);
-extern void __init of_at91rm9200_clk_main_setup(struct device_node *np,
-						struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_main_setup(struct device_node *np,
-						struct at91_pmc *pmc);
+void of_at91rm9200_clk_main_osc_setup(struct device_node *np,
+				      struct at91_pmc *pmc);
+void of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np,
+					 struct at91_pmc *pmc);
+void of_at91rm9200_clk_main_setup(struct device_node *np,
+				  struct at91_pmc *pmc);
+void of_at91sam9x5_clk_main_setup(struct device_node *np,
+				  struct at91_pmc *pmc);
 
-extern void __init of_at91rm9200_clk_pll_setup(struct device_node *np,
-					       struct at91_pmc *pmc);
-extern void __init of_at91sam9g45_clk_pll_setup(struct device_node *np,
-						struct at91_pmc *pmc);
-extern void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np,
-						 struct at91_pmc *pmc);
-extern void __init of_sama5d3_clk_pll_setup(struct device_node *np,
-					    struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
-						  struct at91_pmc *pmc);
+void of_at91rm9200_clk_pll_setup(struct device_node *np,
+				 struct at91_pmc *pmc);
+void of_at91sam9g45_clk_pll_setup(struct device_node *np,
+				  struct at91_pmc *pmc);
+void of_at91sam9g20_clk_pllb_setup(struct device_node *np,
+				   struct at91_pmc *pmc);
+void of_sama5d3_clk_pll_setup(struct device_node *np,
+			      struct at91_pmc *pmc);
+void of_at91sam9x5_clk_plldiv_setup(struct device_node *np,
+				    struct at91_pmc *pmc);
 
-extern void __init of_at91rm9200_clk_master_setup(struct device_node *np,
-						  struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_master_setup(struct device_node *np,
-						  struct at91_pmc *pmc);
+void of_at91rm9200_clk_master_setup(struct device_node *np,
+				    struct at91_pmc *pmc);
+void of_at91sam9x5_clk_master_setup(struct device_node *np,
+				    struct at91_pmc *pmc);
 
-extern void __init of_at91rm9200_clk_sys_setup(struct device_node *np,
-					       struct at91_pmc *pmc);
+void of_at91rm9200_clk_sys_setup(struct device_node *np,
+				 struct at91_pmc *pmc);
 
-extern void __init of_at91rm9200_clk_periph_setup(struct device_node *np,
-						  struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_periph_setup(struct device_node *np,
-						  struct at91_pmc *pmc);
+void of_at91rm9200_clk_periph_setup(struct device_node *np,
+				    struct at91_pmc *pmc);
+void of_at91sam9x5_clk_periph_setup(struct device_node *np,
+				    struct at91_pmc *pmc);
 
-extern void __init of_at91rm9200_clk_prog_setup(struct device_node *np,
-						struct at91_pmc *pmc);
-extern void __init of_at91sam9g45_clk_prog_setup(struct device_node *np,
-						 struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_prog_setup(struct device_node *np,
-						struct at91_pmc *pmc);
+void of_at91rm9200_clk_prog_setup(struct device_node *np,
+				  struct at91_pmc *pmc);
+void of_at91sam9g45_clk_prog_setup(struct device_node *np,
+				   struct at91_pmc *pmc);
+void of_at91sam9x5_clk_prog_setup(struct device_node *np,
+				  struct at91_pmc *pmc);
 
-#if defined(CONFIG_HAVE_AT91_UTMI)
-extern void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np,
-						struct at91_pmc *pmc);
-#endif
+void of_at91sam9x5_clk_utmi_setup(struct device_node *np,
+				  struct at91_pmc *pmc);
 
-#if defined(CONFIG_HAVE_AT91_USB_CLK)
-extern void __init of_at91rm9200_clk_usb_setup(struct device_node *np,
-					       struct at91_pmc *pmc);
-extern void __init of_at91sam9x5_clk_usb_setup(struct device_node *np,
-					       struct at91_pmc *pmc);
-extern void __init of_at91sam9n12_clk_usb_setup(struct device_node *np,
-						struct at91_pmc *pmc);
-#endif
+void of_at91rm9200_clk_usb_setup(struct device_node *np,
+				 struct at91_pmc *pmc);
+void of_at91sam9x5_clk_usb_setup(struct device_node *np,
+				 struct at91_pmc *pmc);
+void of_at91sam9n12_clk_usb_setup(struct device_node *np,
+				  struct at91_pmc *pmc);
 
-#if defined(CONFIG_HAVE_AT91_SMD)
-extern void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
-					       struct at91_pmc *pmc);
-#endif
+void of_at91sam9x5_clk_smd_setup(struct device_node *np,
+				 struct at91_pmc *pmc);
 
-#if defined(CONFIG_HAVE_AT91_H32MX)
-extern void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
-					      struct at91_pmc *pmc);
-#endif
+void of_sama5d4_clk_h32mx_setup(struct device_node *np,
+				struct at91_pmc *pmc);
 
 #endif /* __PMC_H_ */
diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c
index 79a9850..3a15347 100644
--- a/drivers/clk/bcm/clk-kona.c
+++ b/drivers/clk/bcm/clk-kona.c
@@ -16,6 +16,7 @@
 
 #include <linux/delay.h>
 #include <linux/kernel.h>
+#include <linux/clk.h>
 
 /*
  * "Policies" affect the frequencies of bus clocks provided by a
@@ -1010,25 +1011,23 @@
 	struct bcm_clk_div *div = &bcm_clk->u.peri->div;
 
 	if (!divider_exists(div))
-		return __clk_get_rate(hw->clk);
+		return clk_hw_get_rate(hw);
 
 	/* Quietly avoid a zero rate */
 	return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
 				rate ? rate : 1, *parent_rate, NULL);
 }
 
-static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
-		unsigned long min_rate,
-		unsigned long max_rate,
-		unsigned long *best_parent_rate, struct clk_hw **best_parent)
+static int kona_peri_clk_determine_rate(struct clk_hw *hw,
+					struct clk_rate_request *req)
 {
 	struct kona_clk *bcm_clk = to_kona_clk(hw);
-	struct clk *clk = hw->clk;
-	struct clk *current_parent;
+	struct clk_hw *current_parent;
 	unsigned long parent_rate;
 	unsigned long best_delta;
 	unsigned long best_rate;
 	u32 parent_count;
+	long rate;
 	u32 which;
 
 	/*
@@ -1037,18 +1036,25 @@
 	 */
 	WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT);
 	parent_count = (u32)bcm_clk->init_data.num_parents;
-	if (parent_count < 2)
-		return kona_peri_clk_round_rate(hw, rate, best_parent_rate);
+	if (parent_count < 2) {
+		rate = kona_peri_clk_round_rate(hw, req->rate,
+						&req->best_parent_rate);
+		if (rate < 0)
+			return rate;
+
+		req->rate = rate;
+		return 0;
+	}
 
 	/* Unless we can do better, stick with current parent */
-	current_parent = clk_get_parent(clk);
-	parent_rate = __clk_get_rate(current_parent);
-	best_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate);
-	best_delta = abs(best_rate - rate);
+	current_parent = clk_hw_get_parent(hw);
+	parent_rate = clk_hw_get_rate(current_parent);
+	best_rate = kona_peri_clk_round_rate(hw, req->rate, &parent_rate);
+	best_delta = abs(best_rate - req->rate);
 
 	/* Check whether any other parent clock can produce a better result */
 	for (which = 0; which < parent_count; which++) {
-		struct clk *parent = clk_get_parent_by_index(clk, which);
+		struct clk_hw *parent = clk_hw_get_parent_by_index(hw, which);
 		unsigned long delta;
 		unsigned long other_rate;
 
@@ -1057,18 +1063,20 @@
 			continue;
 
 		/* We don't support CLK_SET_RATE_PARENT */
-		parent_rate = __clk_get_rate(parent);
-		other_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate);
-		delta = abs(other_rate - rate);
+		parent_rate = clk_hw_get_rate(parent);
+		other_rate = kona_peri_clk_round_rate(hw, req->rate,
+						      &parent_rate);
+		delta = abs(other_rate - req->rate);
 		if (delta < best_delta) {
 			best_delta = delta;
 			best_rate = other_rate;
-			*best_parent = __clk_get_hw(parent);
-			*best_parent_rate = parent_rate;
+			req->best_parent_hw = parent;
+			req->best_parent_rate = parent_rate;
 		}
 	}
 
-	return best_rate;
+	req->rate = best_rate;
+	return 0;
 }
 
 static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
@@ -1130,7 +1138,7 @@
 	if (parent_rate > (unsigned long)LONG_MAX)
 		return -EINVAL;
 
-	if (rate == __clk_get_rate(hw->clk))
+	if (rate == clk_hw_get_rate(hw))
 		return 0;
 
 	if (!divider_exists(div))
@@ -1249,6 +1257,7 @@
 	unsigned long flags;
 	unsigned int which;
 	struct clk **clks = ccu->clk_data.clks;
+	struct kona_clk *kona_clks = ccu->kona_clks;
 	bool success = true;
 
 	flags = ccu_lock(ccu);
@@ -1259,7 +1268,7 @@
 
 		if (!clks[which])
 			continue;
-		bcm_clk = to_kona_clk(__clk_get_hw(clks[which]));
+		bcm_clk = &kona_clks[which];
 		success &= __kona_clk_init(bcm_clk);
 	}
 
diff --git a/drivers/clk/berlin/berlin2-pll.c b/drivers/clk/berlin/berlin2-pll.c
index f4b8d32..1c2294d 100644
--- a/drivers/clk/berlin/berlin2-pll.c
+++ b/drivers/clk/berlin/berlin2-pll.c
@@ -61,7 +61,7 @@
 	fbdiv = (val >> map->fbdiv_shift) & FBDIV_MASK;
 	rfdiv = (val >> map->rfdiv_shift) & RFDIV_MASK;
 	if (rfdiv == 0) {
-		pr_warn("%s has zero rfdiv\n", __clk_get_name(hw->clk));
+		pr_warn("%s has zero rfdiv\n", clk_hw_get_name(hw));
 		rfdiv = 1;
 	}
 
@@ -70,7 +70,7 @@
 	vcodiv = map->vcodiv[vcodivsel];
 	if (vcodiv == 0) {
 		pr_warn("%s has zero vcodiv (index %d)\n",
-			__clk_get_name(hw->clk), vcodivsel);
+			clk_hw_get_name(hw), vcodivsel);
 		vcodiv = 1;
 	}
 
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index e619285..3bcd42f 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -10,7 +10,6 @@
 
 #include <linux/platform_device.h>
 #include <linux/clk-provider.h>
-#include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/of.h>
diff --git a/drivers/clk/clk-bcm2835.c b/drivers/clk/clk-bcm2835.c
index 6b950ca..dd295e4 100644
--- a/drivers/clk/clk-bcm2835.c
+++ b/drivers/clk/clk-bcm2835.c
@@ -32,11 +32,6 @@
 	struct clk *clk;
 	int ret;
 
-	clk = clk_register_fixed_rate(NULL, "sys_pclk", NULL, CLK_IS_ROOT,
-					250000000);
-	if (IS_ERR(clk))
-		pr_err("sys_pclk not registered\n");
-
 	clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT,
 					126000000);
 	if (IS_ERR(clk))
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index f01164f..01877f6 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -10,6 +10,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/i2c.h>
@@ -309,7 +310,7 @@
 	if (!mul)
 		div = CDCE706_DIVIDER_DIVIDER_MAX;
 
-	if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+	if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
 		unsigned long best_diff = rate;
 		unsigned long best_div = 0;
 		struct clk *gp_clk = cdce->clkin_clk[cdce->clkin[0].parent];
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index 85fafb4..089bf88 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -10,6 +10,7 @@
  * Copyright (C) 2014, Topic Embedded Products
  * Licenced under GPL
  */
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/module.h>
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
index 715eec1..ff4ef4f 100644
--- a/drivers/clk/clk-clps711x.c
+++ b/drivers/clk/clk-clps711x.c
@@ -9,7 +9,6 @@
  * (at your option) any later version.
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/io.h>
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 616f5ae..4735de0 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -55,78 +55,77 @@
 	return rate_ops->recalc_rate(rate_hw, parent_rate);
 }
 
-static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
-					unsigned long min_rate,
-					unsigned long max_rate,
-					unsigned long *best_parent_rate,
-					struct clk_hw **best_parent_p)
+static int clk_composite_determine_rate(struct clk_hw *hw,
+					struct clk_rate_request *req)
 {
 	struct clk_composite *composite = to_clk_composite(hw);
 	const struct clk_ops *rate_ops = composite->rate_ops;
 	const struct clk_ops *mux_ops = composite->mux_ops;
 	struct clk_hw *rate_hw = composite->rate_hw;
 	struct clk_hw *mux_hw = composite->mux_hw;
-	struct clk *parent;
+	struct clk_hw *parent;
 	unsigned long parent_rate;
 	long tmp_rate, best_rate = 0;
 	unsigned long rate_diff;
 	unsigned long best_rate_diff = ULONG_MAX;
+	long rate;
 	int i;
 
 	if (rate_hw && rate_ops && rate_ops->determine_rate) {
 		__clk_hw_set_clk(rate_hw, hw);
-		return rate_ops->determine_rate(rate_hw, rate, min_rate,
-						max_rate,
-						best_parent_rate,
-						best_parent_p);
+		return rate_ops->determine_rate(rate_hw, req);
 	} else if (rate_hw && rate_ops && rate_ops->round_rate &&
 		   mux_hw && mux_ops && mux_ops->set_parent) {
-		*best_parent_p = NULL;
+		req->best_parent_hw = NULL;
 
-		if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) {
-			parent = clk_get_parent(mux_hw->clk);
-			*best_parent_p = __clk_get_hw(parent);
-			*best_parent_rate = __clk_get_rate(parent);
+		if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) {
+			parent = clk_hw_get_parent(mux_hw);
+			req->best_parent_hw = parent;
+			req->best_parent_rate = clk_hw_get_rate(parent);
 
-			return rate_ops->round_rate(rate_hw, rate,
-						    best_parent_rate);
+			rate = rate_ops->round_rate(rate_hw, req->rate,
+						    &req->best_parent_rate);
+			if (rate < 0)
+				return rate;
+
+			req->rate = rate;
+			return 0;
 		}
 
-		for (i = 0; i < __clk_get_num_parents(mux_hw->clk); i++) {
-			parent = clk_get_parent_by_index(mux_hw->clk, i);
+		for (i = 0; i < clk_hw_get_num_parents(mux_hw); i++) {
+			parent = clk_hw_get_parent_by_index(mux_hw, i);
 			if (!parent)
 				continue;
 
-			parent_rate = __clk_get_rate(parent);
+			parent_rate = clk_hw_get_rate(parent);
 
-			tmp_rate = rate_ops->round_rate(rate_hw, rate,
+			tmp_rate = rate_ops->round_rate(rate_hw, req->rate,
 							&parent_rate);
 			if (tmp_rate < 0)
 				continue;
 
-			rate_diff = abs(rate - tmp_rate);
+			rate_diff = abs(req->rate - tmp_rate);
 
-			if (!rate_diff || !*best_parent_p
+			if (!rate_diff || !req->best_parent_hw
 				       || best_rate_diff > rate_diff) {
-				*best_parent_p = __clk_get_hw(parent);
-				*best_parent_rate = parent_rate;
+				req->best_parent_hw = parent;
+				req->best_parent_rate = parent_rate;
 				best_rate_diff = rate_diff;
 				best_rate = tmp_rate;
 			}
 
 			if (!rate_diff)
-				return rate;
+				return 0;
 		}
 
-		return best_rate;
+		req->rate = best_rate;
+		return 0;
 	} else if (mux_hw && mux_ops && mux_ops->determine_rate) {
 		__clk_hw_set_clk(mux_hw, hw);
-		return mux_ops->determine_rate(mux_hw, rate, min_rate,
-					       max_rate, best_parent_rate,
-					       best_parent_p);
+		return mux_ops->determine_rate(mux_hw, req);
 	} else {
 		pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
-		return 0;
+		return -EINVAL;
 	}
 }
 
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 706b578..f24d0a1 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -78,12 +78,14 @@
 }
 
 static unsigned int _get_div(const struct clk_div_table *table,
-			     unsigned int val, unsigned long flags)
+			     unsigned int val, unsigned long flags, u8 width)
 {
 	if (flags & CLK_DIVIDER_ONE_BASED)
 		return val;
 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
 		return 1 << val;
+	if (flags & CLK_DIVIDER_MAX_AT_ZERO)
+		return val ? val : div_mask(width) + 1;
 	if (table)
 		return _get_table_div(table, val);
 	return val + 1;
@@ -101,12 +103,14 @@
 }
 
 static unsigned int _get_val(const struct clk_div_table *table,
-			     unsigned int div, unsigned long flags)
+			     unsigned int div, unsigned long flags, u8 width)
 {
 	if (flags & CLK_DIVIDER_ONE_BASED)
 		return div;
 	if (flags & CLK_DIVIDER_POWER_OF_TWO)
 		return __ffs(div);
+	if (flags & CLK_DIVIDER_MAX_AT_ZERO)
+		return (div == div_mask(width) + 1) ? 0 : div;
 	if (table)
 		return  _get_table_val(table, div);
 	return div - 1;
@@ -117,13 +121,14 @@
 				  const struct clk_div_table *table,
 				  unsigned long flags)
 {
+	struct clk_divider *divider = to_clk_divider(hw);
 	unsigned int div;
 
-	div = _get_div(table, val, flags);
+	div = _get_div(table, val, flags, divider->width);
 	if (!div) {
 		WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
 			"%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
-			__clk_get_name(hw->clk));
+			clk_hw_get_name(hw));
 		return parent_rate;
 	}
 
@@ -285,7 +290,7 @@
 
 	maxdiv = _get_maxdiv(table, width, flags);
 
-	if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
+	if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
 		parent_rate = *best_parent_rate;
 		bestdiv = _div_round(table, parent_rate, rate, flags);
 		bestdiv = bestdiv == 0 ? 1 : bestdiv;
@@ -311,7 +316,7 @@
 			*best_parent_rate = parent_rate_saved;
 			return i;
 		}
-		parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
+		parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
 					       rate * i);
 		now = DIV_ROUND_UP(parent_rate, i);
 		if (_is_best_div(rate, now, best, flags)) {
@@ -323,7 +328,7 @@
 
 	if (!bestdiv) {
 		bestdiv = _get_maxdiv(table, width, flags);
-		*best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1);
+		*best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
 	}
 
 	return bestdiv;
@@ -351,7 +356,8 @@
 	if (divider->flags & CLK_DIVIDER_READ_ONLY) {
 		bestdiv = readl(divider->reg) >> divider->shift;
 		bestdiv &= div_mask(divider->width);
-		bestdiv = _get_div(divider->table, bestdiv, divider->flags);
+		bestdiv = _get_div(divider->table, bestdiv, divider->flags,
+			divider->width);
 		return DIV_ROUND_UP(*prate, bestdiv);
 	}
 
@@ -370,7 +376,7 @@
 	if (!_is_valid_div(table, div, flags))
 		return -EINVAL;
 
-	value = _get_val(table, div, flags);
+	value = _get_val(table, div, flags, width);
 
 	return min_t(unsigned int, value, div_mask(width));
 }
@@ -389,6 +395,8 @@
 
 	if (divider->lock)
 		spin_lock_irqsave(divider->lock, flags);
+	else
+		__acquire(divider->lock);
 
 	if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
 		val = div_mask(divider->width) << (divider->shift + 16);
@@ -401,6 +409,8 @@
 
 	if (divider->lock)
 		spin_unlock_irqrestore(divider->lock, flags);
+	else
+		__release(divider->lock);
 
 	return 0;
 }
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c
index 73a8d0f..bac4553 100644
--- a/drivers/clk/clk-efm32gg.c
+++ b/drivers/clk/clk-efm32gg.c
@@ -6,7 +6,6 @@
  * the terms of the GNU General Public License version 2 as published by the
  * Free Software Foundation.
  */
-#include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index fccabe4..83de57a 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -41,12 +41,11 @@
 {
 	struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
 
-	if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+	if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
 		unsigned long best_parent;
 
 		best_parent = (rate / fix->mult) * fix->div;
-		*prate = __clk_round_rate(__clk_get_parent(hw->clk),
-				best_parent);
+		*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
 	}
 
 	return (*prate / fix->div) * fix->mult;
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index 140eb58..e85f856 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -27,11 +27,15 @@
 
 	if (fd->lock)
 		spin_lock_irqsave(fd->lock, flags);
+	else
+		__acquire(fd->lock);
 
 	val = clk_readl(fd->reg);
 
 	if (fd->lock)
 		spin_unlock_irqrestore(fd->lock, flags);
+	else
+		__release(fd->lock);
 
 	m = (val & fd->mmask) >> fd->mshift;
 	n = (val & fd->nmask) >> fd->nshift;
@@ -80,6 +84,8 @@
 
 	if (fd->lock)
 		spin_lock_irqsave(fd->lock, flags);
+	else
+		__acquire(fd->lock);
 
 	val = clk_readl(fd->reg);
 	val &= ~(fd->mmask | fd->nmask);
@@ -88,6 +94,8 @@
 
 	if (fd->lock)
 		spin_unlock_irqrestore(fd->lock, flags);
+	else
+		__release(fd->lock);
 
 	return 0;
 }
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 551dd06..de0b322 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -52,6 +52,8 @@
 
 	if (gate->lock)
 		spin_lock_irqsave(gate->lock, flags);
+	else
+		__acquire(gate->lock);
 
 	if (gate->flags & CLK_GATE_HIWORD_MASK) {
 		reg = BIT(gate->bit_idx + 16);
@@ -70,6 +72,8 @@
 
 	if (gate->lock)
 		spin_unlock_irqrestore(gate->lock, flags);
+	else
+		__release(gate->lock);
 }
 
 static int clk_gate_enable(struct clk_hw *hw)
diff --git a/drivers/clk/clk-gpio-gate.c b/drivers/clk/clk-gpio-gate.c
deleted file mode 100644
index f564e62..0000000
--- a/drivers/clk/clk-gpio-gate.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * Copyright (C) 2013 - 2014 Texas Instruments Incorporated - http://www.ti.com
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Gpio gated clock implementation
- */
-
-#include <linux/clk-provider.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
-#include <linux/err.h>
-#include <linux/device.h>
-
-/**
- * DOC: basic gpio gated clock which can be enabled and disabled
- *      with gpio output
- * Traits of this clock:
- * prepare - clk_(un)prepare only ensures parent is (un)prepared
- * enable - clk_enable and clk_disable are functional & control gpio
- * rate - inherits rate from parent.  No clk_set_rate support
- * parent - fixed parent.  No clk_set_parent support
- */
-
-#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
-
-static int clk_gpio_gate_enable(struct clk_hw *hw)
-{
-	struct clk_gpio *clk = to_clk_gpio(hw);
-
-	gpiod_set_value(clk->gpiod, 1);
-
-	return 0;
-}
-
-static void clk_gpio_gate_disable(struct clk_hw *hw)
-{
-	struct clk_gpio *clk = to_clk_gpio(hw);
-
-	gpiod_set_value(clk->gpiod, 0);
-}
-
-static int clk_gpio_gate_is_enabled(struct clk_hw *hw)
-{
-	struct clk_gpio *clk = to_clk_gpio(hw);
-
-	return gpiod_get_value(clk->gpiod);
-}
-
-const struct clk_ops clk_gpio_gate_ops = {
-	.enable = clk_gpio_gate_enable,
-	.disable = clk_gpio_gate_disable,
-	.is_enabled = clk_gpio_gate_is_enabled,
-};
-EXPORT_SYMBOL_GPL(clk_gpio_gate_ops);
-
-/**
- * clk_register_gpio - register a gpip clock with the clock framework
- * @dev: device that is registering this clock
- * @name: name of this clock
- * @parent_name: name of this clock's parent
- * @gpio: gpio number to gate this clock
- * @active_low: true if gpio should be set to 0 to enable clock
- * @flags: clock flags
- */
-struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
-		const char *parent_name, unsigned gpio, bool active_low,
-		unsigned long flags)
-{
-	struct clk_gpio *clk_gpio = NULL;
-	struct clk *clk = ERR_PTR(-EINVAL);
-	struct clk_init_data init = { NULL };
-	unsigned long gpio_flags;
-	int err;
-
-	if (active_low)
-		gpio_flags = GPIOF_ACTIVE_LOW | GPIOF_OUT_INIT_HIGH;
-	else
-		gpio_flags = GPIOF_OUT_INIT_LOW;
-
-	if (dev)
-		err = devm_gpio_request_one(dev, gpio, gpio_flags, name);
-	else
-		err = gpio_request_one(gpio, gpio_flags, name);
-
-	if (err) {
-		pr_err("%s: %s: Error requesting clock control gpio %u\n",
-		       __func__, name, gpio);
-		return ERR_PTR(err);
-	}
-
-	if (dev)
-		clk_gpio = devm_kzalloc(dev, sizeof(struct clk_gpio),
-					GFP_KERNEL);
-	else
-		clk_gpio = kzalloc(sizeof(struct clk_gpio), GFP_KERNEL);
-
-	if (!clk_gpio) {
-		clk = ERR_PTR(-ENOMEM);
-		goto clk_register_gpio_gate_err;
-	}
-
-	init.name = name;
-	init.ops = &clk_gpio_gate_ops;
-	init.flags = flags | CLK_IS_BASIC;
-	init.parent_names = (parent_name ? &parent_name : NULL);
-	init.num_parents = (parent_name ? 1 : 0);
-
-	clk_gpio->gpiod = gpio_to_desc(gpio);
-	clk_gpio->hw.init = &init;
-
-	clk = clk_register(dev, &clk_gpio->hw);
-
-	if (!IS_ERR(clk))
-		return clk;
-
-	if (!dev)
-		kfree(clk_gpio);
-
-clk_register_gpio_gate_err:
-	if (!dev)
-		gpio_free(gpio);
-
-	return clk;
-}
-EXPORT_SYMBOL_GPL(clk_register_gpio_gate);
-
-#ifdef CONFIG_OF
-/**
- * The clk_register_gpio_gate has to be delayed, because the EPROBE_DEFER
- * can not be handled properly at of_clk_init() call time.
- */
-
-struct clk_gpio_gate_delayed_register_data {
-	struct device_node *node;
-	struct mutex lock;
-	struct clk *clk;
-};
-
-static struct clk *of_clk_gpio_gate_delayed_register_get(
-		struct of_phandle_args *clkspec,
-		void *_data)
-{
-	struct clk_gpio_gate_delayed_register_data *data = _data;
-	struct clk *clk;
-	const char *clk_name = data->node->name;
-	const char *parent_name;
-	int gpio;
-	enum of_gpio_flags of_flags;
-
-	mutex_lock(&data->lock);
-
-	if (data->clk) {
-		mutex_unlock(&data->lock);
-		return data->clk;
-	}
-
-	gpio = of_get_named_gpio_flags(data->node, "enable-gpios", 0,
-						&of_flags);
-	if (gpio < 0) {
-		mutex_unlock(&data->lock);
-		if (gpio != -EPROBE_DEFER)
-			pr_err("%s: %s: Can't get 'enable-gpios' DT property\n",
-			       __func__, clk_name);
-		return ERR_PTR(gpio);
-	}
-
-	parent_name = of_clk_get_parent_name(data->node, 0);
-
-	clk = clk_register_gpio_gate(NULL, clk_name, parent_name, gpio,
-					of_flags & OF_GPIO_ACTIVE_LOW, 0);
-	if (IS_ERR(clk)) {
-		mutex_unlock(&data->lock);
-		return clk;
-	}
-
-	data->clk = clk;
-	mutex_unlock(&data->lock);
-
-	return clk;
-}
-
-/**
- * of_gpio_gate_clk_setup() - Setup function for gpio controlled clock
- */
-static void __init of_gpio_gate_clk_setup(struct device_node *node)
-{
-	struct clk_gpio_gate_delayed_register_data *data;
-
-	data = kzalloc(sizeof(struct clk_gpio_gate_delayed_register_data),
-		       GFP_KERNEL);
-	if (!data)
-		return;
-
-	data->node = node;
-	mutex_init(&data->lock);
-
-	of_clk_add_provider(node, of_clk_gpio_gate_delayed_register_get, data);
-}
-CLK_OF_DECLARE(gpio_gate_clk, "gpio-gate-clock", of_gpio_gate_clk_setup);
-#endif
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
new file mode 100644
index 0000000..10819e2
--- /dev/null
+++ b/drivers/clk/clk-gpio.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) 2013 - 2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors:
+ *    Jyri Sarha <jsarha@ti.com>
+ *    Sergej Sawazki <ce3a@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Gpio controlled clock implementation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_gpio.h>
+#include <linux/err.h>
+#include <linux/device.h>
+
+/**
+ * DOC: basic gpio gated clock which can be enabled and disabled
+ *      with gpio output
+ * Traits of this clock:
+ * prepare - clk_(un)prepare only ensures parent is (un)prepared
+ * enable - clk_enable and clk_disable are functional & control gpio
+ * rate - inherits rate from parent.  No clk_set_rate support
+ * parent - fixed parent.  No clk_set_parent support
+ */
+
+#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
+
+static int clk_gpio_gate_enable(struct clk_hw *hw)
+{
+	struct clk_gpio *clk = to_clk_gpio(hw);
+
+	gpiod_set_value(clk->gpiod, 1);
+
+	return 0;
+}
+
+static void clk_gpio_gate_disable(struct clk_hw *hw)
+{
+	struct clk_gpio *clk = to_clk_gpio(hw);
+
+	gpiod_set_value(clk->gpiod, 0);
+}
+
+static int clk_gpio_gate_is_enabled(struct clk_hw *hw)
+{
+	struct clk_gpio *clk = to_clk_gpio(hw);
+
+	return gpiod_get_value(clk->gpiod);
+}
+
+const struct clk_ops clk_gpio_gate_ops = {
+	.enable = clk_gpio_gate_enable,
+	.disable = clk_gpio_gate_disable,
+	.is_enabled = clk_gpio_gate_is_enabled,
+};
+EXPORT_SYMBOL_GPL(clk_gpio_gate_ops);
+
+/**
+ * DOC: basic clock multiplexer which can be controlled with a gpio output
+ * Traits of this clock:
+ * prepare - clk_prepare only ensures that parents are prepared
+ * rate - rate is only affected by parent switching.  No clk_set_rate support
+ * parent - parent is adjustable through clk_set_parent
+ */
+
+static u8 clk_gpio_mux_get_parent(struct clk_hw *hw)
+{
+	struct clk_gpio *clk = to_clk_gpio(hw);
+
+	return gpiod_get_value(clk->gpiod);
+}
+
+static int clk_gpio_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct clk_gpio *clk = to_clk_gpio(hw);
+
+	gpiod_set_value(clk->gpiod, index);
+
+	return 0;
+}
+
+const struct clk_ops clk_gpio_mux_ops = {
+	.get_parent = clk_gpio_mux_get_parent,
+	.set_parent = clk_gpio_mux_set_parent,
+	.determine_rate = __clk_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_gpio_mux_ops);
+
+static struct clk *clk_register_gpio(struct device *dev, const char *name,
+		const char * const *parent_names, u8 num_parents, unsigned gpio,
+		bool active_low, unsigned long flags,
+		const struct clk_ops *clk_gpio_ops)
+{
+	struct clk_gpio *clk_gpio;
+	struct clk *clk;
+	struct clk_init_data init = {};
+	unsigned long gpio_flags;
+	int err;
+
+	if (dev)
+		clk_gpio = devm_kzalloc(dev, sizeof(*clk_gpio),	GFP_KERNEL);
+	else
+		clk_gpio = kzalloc(sizeof(*clk_gpio), GFP_KERNEL);
+
+	if (!clk_gpio)
+		return ERR_PTR(-ENOMEM);
+
+	if (active_low)
+		gpio_flags = GPIOF_ACTIVE_LOW | GPIOF_OUT_INIT_HIGH;
+	else
+		gpio_flags = GPIOF_OUT_INIT_LOW;
+
+	if (dev)
+		err = devm_gpio_request_one(dev, gpio, gpio_flags, name);
+	else
+		err = gpio_request_one(gpio, gpio_flags, name);
+	if (err) {
+		if (err != -EPROBE_DEFER)
+			pr_err("%s: %s: Error requesting clock control gpio %u\n",
+					__func__, name, gpio);
+		if (!dev)
+			kfree(clk_gpio);
+
+		return ERR_PTR(err);
+	}
+
+	init.name = name;
+	init.ops = clk_gpio_ops;
+	init.flags = flags | CLK_IS_BASIC;
+	init.parent_names = parent_names;
+	init.num_parents = num_parents;
+
+	clk_gpio->gpiod = gpio_to_desc(gpio);
+	clk_gpio->hw.init = &init;
+
+	if (dev)
+		clk = devm_clk_register(dev, &clk_gpio->hw);
+	else
+		clk = clk_register(NULL, &clk_gpio->hw);
+
+	if (!IS_ERR(clk))
+		return clk;
+
+	if (!dev) {
+		gpiod_put(clk_gpio->gpiod);
+		kfree(clk_gpio);
+	}
+
+	return clk;
+}
+
+/**
+ * clk_register_gpio_gate - register a gpio clock gate with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @gpio: gpio number to gate this clock
+ * @active_low: true if gpio should be set to 0 to enable clock
+ * @flags: clock flags
+ */
+struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
+		const char *parent_name, unsigned gpio, bool active_low,
+		unsigned long flags)
+{
+	return clk_register_gpio(dev, name,
+			(parent_name ? &parent_name : NULL),
+			(parent_name ? 1 : 0), gpio, active_low, flags,
+			&clk_gpio_gate_ops);
+}
+EXPORT_SYMBOL_GPL(clk_register_gpio_gate);
+
+/**
+ * clk_register_gpio_mux - register a gpio clock mux with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_names: names of this clock's parents
+ * @num_parents: number of parents listed in @parent_names
+ * @gpio: gpio number to gate this clock
+ * @active_low: true if gpio should be set to 0 to enable clock
+ * @flags: clock flags
+ */
+struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
+		const char * const *parent_names, u8 num_parents, unsigned gpio,
+		bool active_low, unsigned long flags)
+{
+	if (num_parents != 2) {
+		pr_err("mux-clock %s must have 2 parents\n", name);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return clk_register_gpio(dev, name, parent_names, num_parents,
+			gpio, active_low, flags, &clk_gpio_mux_ops);
+}
+EXPORT_SYMBOL_GPL(clk_register_gpio_mux);
+
+#ifdef CONFIG_OF
+/**
+ * clk_register_get() has to be delayed, because -EPROBE_DEFER
+ * can not be handled properly at of_clk_init() call time.
+ */
+
+struct clk_gpio_delayed_register_data {
+	const char *gpio_name;
+	struct device_node *node;
+	struct mutex lock;
+	struct clk *clk;
+	struct clk *(*clk_register_get)(const char *name,
+			const char * const *parent_names, u8 num_parents,
+			unsigned gpio, bool active_low);
+};
+
+static struct clk *of_clk_gpio_delayed_register_get(
+		struct of_phandle_args *clkspec, void *_data)
+{
+	struct clk_gpio_delayed_register_data *data = _data;
+	struct clk *clk;
+	const char **parent_names;
+	int i, num_parents;
+	int gpio;
+	enum of_gpio_flags of_flags;
+
+	mutex_lock(&data->lock);
+
+	if (data->clk) {
+		mutex_unlock(&data->lock);
+		return data->clk;
+	}
+
+	gpio = of_get_named_gpio_flags(data->node, data->gpio_name, 0,
+			&of_flags);
+	if (gpio < 0) {
+		mutex_unlock(&data->lock);
+		if (gpio == -EPROBE_DEFER)
+			pr_debug("%s: %s: GPIOs not yet available, retry later\n",
+					data->node->name, __func__);
+		else
+			pr_err("%s: %s: Can't get '%s' DT property\n",
+					data->node->name, __func__,
+					data->gpio_name);
+		return ERR_PTR(gpio);
+	}
+
+	num_parents = of_clk_get_parent_count(data->node);
+
+	parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL);
+	if (!parent_names) {
+		clk = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	for (i = 0; i < num_parents; i++)
+		parent_names[i] = of_clk_get_parent_name(data->node, i);
+
+	clk = data->clk_register_get(data->node->name, parent_names,
+			num_parents, gpio, of_flags & OF_GPIO_ACTIVE_LOW);
+	if (IS_ERR(clk))
+		goto out;
+
+	data->clk = clk;
+out:
+	mutex_unlock(&data->lock);
+	kfree(parent_names);
+
+	return clk;
+}
+
+static struct clk *of_clk_gpio_gate_delayed_register_get(const char *name,
+		const char * const *parent_names, u8 num_parents,
+		unsigned gpio, bool active_low)
+{
+	return clk_register_gpio_gate(NULL, name, parent_names[0],
+			gpio, active_low, 0);
+}
+
+static struct clk *of_clk_gpio_mux_delayed_register_get(const char *name,
+		const char * const *parent_names, u8 num_parents, unsigned gpio,
+		bool active_low)
+{
+	return clk_register_gpio_mux(NULL, name, parent_names, num_parents,
+			gpio, active_low, 0);
+}
+
+static void __init of_gpio_clk_setup(struct device_node *node,
+		const char *gpio_name,
+		struct clk *(*clk_register_get)(const char *name,
+				const char * const *parent_names,
+				u8 num_parents,
+				unsigned gpio, bool active_low))
+{
+	struct clk_gpio_delayed_register_data *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return;
+
+	data->node = node;
+	data->gpio_name = gpio_name;
+	data->clk_register_get = clk_register_get;
+	mutex_init(&data->lock);
+
+	of_clk_add_provider(node, of_clk_gpio_delayed_register_get, data);
+}
+
+static void __init of_gpio_gate_clk_setup(struct device_node *node)
+{
+	of_gpio_clk_setup(node, "enable-gpios",
+		of_clk_gpio_gate_delayed_register_get);
+}
+CLK_OF_DECLARE(gpio_gate_clk, "gpio-gate-clock", of_gpio_gate_clk_setup);
+
+void __init of_gpio_mux_clk_setup(struct device_node *node)
+{
+	of_gpio_clk_setup(node, "select-gpios",
+		of_clk_gpio_mux_delayed_register_get);
+}
+CLK_OF_DECLARE(gpio_mux_clk, "gpio-mux-clock", of_gpio_mux_clk_setup);
+#endif
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 2e7e9d9..be3a21a 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -17,6 +17,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/of.h>
diff --git a/drivers/clk/clk-moxart.c b/drivers/clk/clk-moxart.c
index 5181b89..f37f719 100644
--- a/drivers/clk/clk-moxart.c
+++ b/drivers/clk/clk-moxart.c
@@ -10,6 +10,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 6066a01..7129c86 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -10,7 +10,6 @@
  * Simple multiplexer clock implementation
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -32,7 +31,7 @@
 static u8 clk_mux_get_parent(struct clk_hw *hw)
 {
 	struct clk_mux *mux = to_clk_mux(hw);
-	int num_parents = __clk_get_num_parents(hw->clk);
+	int num_parents = clk_hw_get_num_parents(hw);
 	u32 val;
 
 	/*
@@ -85,6 +84,8 @@
 
 	if (mux->lock)
 		spin_lock_irqsave(mux->lock, flags);
+	else
+		__acquire(mux->lock);
 
 	if (mux->flags & CLK_MUX_HIWORD_MASK) {
 		val = mux->mask << (mux->shift + 16);
@@ -97,6 +98,8 @@
 
 	if (mux->lock)
 		spin_unlock_irqrestore(mux->lock, flags);
+	else
+		__release(mux->lock);
 
 	return 0;
 }
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index c948717..e4d8a99 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -8,8 +8,7 @@
 #define pr_fmt(fmt) "Nomadik SRC clocks: " fmt
 
 #include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/clk-provider.h>
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index 45a535a..8e3039f 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -18,7 +18,6 @@
  */
 
 #include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/mfd/palmas.h>
 #include <linux/module.h>
diff --git a/drivers/clk/clk-rk808.c b/drivers/clk/clk-rk808.c
index 83902b9..0fee2f4 100644
--- a/drivers/clk/clk-rk808.c
+++ b/drivers/clk/clk-rk808.c
@@ -15,7 +15,6 @@
  * more details.
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/module.h>
 #include <linux/slab.h>
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 9b13a30..d266299 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -58,21 +58,17 @@
 static int s2mps11_clk_prepare(struct clk_hw *hw)
 {
 	struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
-	int ret;
 
-	ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
+	return regmap_update_bits(s2mps11->iodev->regmap_pmic,
 				 s2mps11->reg,
 				 s2mps11->mask, s2mps11->mask);
-
-	return ret;
 }
 
 static void s2mps11_clk_unprepare(struct clk_hw *hw)
 {
 	struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
-	int ret;
 
-	ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, s2mps11->reg,
+	regmap_update_bits(s2mps11->iodev->regmap_pmic, s2mps11->reg,
 			   s2mps11->mask, ~s2mps11->mask);
 }
 
@@ -186,15 +182,15 @@
 	struct clk_init_data *clks_init;
 	int i, ret = 0;
 
-	s2mps11_clks = devm_kzalloc(&pdev->dev, sizeof(*s2mps11_clk) *
-					S2MPS11_CLKS_NUM, GFP_KERNEL);
+	s2mps11_clks = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM,
+				sizeof(*s2mps11_clk), GFP_KERNEL);
 	if (!s2mps11_clks)
 		return -ENOMEM;
 
 	s2mps11_clk = s2mps11_clks;
 
-	clk_table = devm_kzalloc(&pdev->dev, sizeof(struct clk *) *
-				 S2MPS11_CLKS_NUM, GFP_KERNEL);
+	clk_table = devm_kcalloc(&pdev->dev, S2MPS11_CLKS_NUM,
+				sizeof(struct clk *), GFP_KERNEL);
 	if (!clk_table)
 		return -ENOMEM;
 
@@ -246,7 +242,7 @@
 					s2mps11_name(s2mps11_clk), NULL);
 		if (!s2mps11_clk->lookup) {
 			ret = -ENOMEM;
-			goto err_lup;
+			goto err_reg;
 		}
 	}
 
@@ -265,16 +261,10 @@
 	platform_set_drvdata(pdev, s2mps11_clks);
 
 	return ret;
-err_lup:
-	devm_clk_unregister(&pdev->dev, s2mps11_clk->clk);
+
 err_reg:
-	while (s2mps11_clk > s2mps11_clks) {
-		if (s2mps11_clk->lookup) {
-			clkdev_drop(s2mps11_clk->lookup);
-			devm_clk_unregister(&pdev->dev, s2mps11_clk->clk);
-		}
-		s2mps11_clk--;
-	}
+	while (--i >= 0)
+		clkdev_drop(s2mps11_clks[i].lookup);
 
 	return ret;
 }
@@ -322,7 +312,7 @@
 }
 subsys_initcall(s2mps11_clk_init);
 
-static void __init s2mps11_clk_cleanup(void)
+static void __exit s2mps11_clk_cleanup(void)
 {
 	platform_driver_unregister(&s2mps11_clk_driver);
 }
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index e39e1e6..5596c0a 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -18,7 +18,7 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/clkdev.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/err.h>
@@ -439,7 +439,7 @@
 
 	dev_dbg(&hwdata->drvdata->client->dev,
 		"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n",
-		__func__, __clk_get_name(hwdata->hw.clk),
+		__func__, clk_hw_get_name(hw),
 		hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
 		parent_rate, (unsigned long)rate);
 
@@ -497,7 +497,7 @@
 
 	dev_dbg(&hwdata->drvdata->client->dev,
 		"%s - %s: a = %lu, b = %lu, c = %lu, parent_rate = %lu, rate = %lu\n",
-		__func__, __clk_get_name(hwdata->hw.clk), a, b, c,
+		__func__, clk_hw_get_name(hw), a, b, c,
 		*parent_rate, rate);
 
 	return rate;
@@ -521,7 +521,7 @@
 
 	dev_dbg(&hwdata->drvdata->client->dev,
 		"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, parent_rate = %lu, rate = %lu\n",
-		__func__, __clk_get_name(hwdata->hw.clk),
+		__func__, clk_hw_get_name(hw),
 		hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
 		parent_rate, rate);
 
@@ -632,7 +632,7 @@
 
 	dev_dbg(&hwdata->drvdata->client->dev,
 		"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, m = %lu, parent_rate = %lu, rate = %lu\n",
-		__func__, __clk_get_name(hwdata->hw.clk),
+		__func__, clk_hw_get_name(hw),
 		hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
 		m, parent_rate, (unsigned long)rate);
 
@@ -663,7 +663,7 @@
 		divby4 = 1;
 
 	/* multisync can set pll */
-	if (__clk_get_flags(hwdata->hw.clk) & CLK_SET_RATE_PARENT) {
+	if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
 		/*
 		 * find largest integer divider for max
 		 * vco frequency and given target rate
@@ -745,7 +745,7 @@
 
 	dev_dbg(&hwdata->drvdata->client->dev,
 		"%s - %s: a = %lu, b = %lu, c = %lu, divby4 = %d, parent_rate = %lu, rate = %lu\n",
-		__func__, __clk_get_name(hwdata->hw.clk), a, b, c, divby4,
+		__func__, clk_hw_get_name(hw), a, b, c, divby4,
 		*parent_rate, rate);
 
 	return rate;
@@ -777,7 +777,7 @@
 
 	dev_dbg(&hwdata->drvdata->client->dev,
 		"%s - %s: p1 = %lu, p2 = %lu, p3 = %lu, divby4 = %d, parent_rate = %lu, rate = %lu\n",
-		__func__, __clk_get_name(hwdata->hw.clk),
+		__func__, clk_hw_get_name(hw),
 		hwdata->params.p1, hwdata->params.p2, hwdata->params.p3,
 		divby4, parent_rate, rate);
 
@@ -1013,7 +1013,7 @@
 		rate = SI5351_CLKOUT_MIN_FREQ;
 
 	/* request frequency if multisync master */
-	if (__clk_get_flags(hwdata->hw.clk) & CLK_SET_RATE_PARENT) {
+	if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
 		/* use r divider for frequencies below 1MHz */
 		rdiv = SI5351_OUTPUT_CLK_DIV_1;
 		while (rate < SI5351_MULTISYNTH_MIN_FREQ &&
@@ -1042,7 +1042,7 @@
 
 	dev_dbg(&hwdata->drvdata->client->dev,
 		"%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n",
-		__func__, __clk_get_name(hwdata->hw.clk), (1 << rdiv),
+		__func__, clk_hw_get_name(hw), (1 << rdiv),
 		*parent_rate, rate);
 
 	return rate;
@@ -1093,7 +1093,7 @@
 
 	dev_dbg(&hwdata->drvdata->client->dev,
 		"%s - %s: rdiv = %u, parent_rate = %lu, rate = %lu\n",
-		__func__, __clk_get_name(hwdata->hw.clk), (1 << rdiv),
+		__func__, clk_hw_get_name(hw), (1 << rdiv),
 		parent_rate, rate);
 
 	return 0;
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index 20a5aec..cf478aa 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -19,6 +19,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/module.h>
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 3f6f7ad..fd89e77 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -175,11 +175,10 @@
 	if (readl(base + STM32F4_RCC_CFGR) & BIT(am->bit_idx))
 		mult = 2;
 
-	if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+	if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
 		unsigned long best_parent = rate / mult;
 
-		*prate =
-		    __clk_round_rate(__clk_get_parent(hw->clk), best_parent);
+		*prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
 	}
 
 	return *prate * mult;
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index 4a75513..8e5ed64 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -20,7 +20,6 @@
 *
 */
 
-#include <linux/clk.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/platform_device.h>
@@ -91,7 +90,7 @@
 	clkdata->twl6040 = twl6040;
 
 	clkdata->mcpdm_fclk.init = &wm831x_clkout_init;
-	clkdata->clk = clk_register(&pdev->dev, &clkdata->mcpdm_fclk);
+	clkdata->clk = devm_clk_register(&pdev->dev, &clkdata->mcpdm_fclk);
 	if (IS_ERR(clkdata->clk))
 		return PTR_ERR(clkdata->clk);
 
@@ -100,21 +99,11 @@
 	return 0;
 }
 
-static int twl6040_clk_remove(struct platform_device *pdev)
-{
-	struct twl6040_clk *clkdata = platform_get_drvdata(pdev);
-
-	clk_unregister(clkdata->clk);
-
-	return 0;
-}
-
 static struct platform_driver twl6040_clk_driver = {
 	.driver = {
 		.name = "twl6040-clk",
 	},
 	.probe = twl6040_clk_probe,
-	.remove = twl6040_clk_remove,
 };
 
 module_platform_driver(twl6040_clk_driver);
diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c
index 18bf5e5..95d1742 100644
--- a/drivers/clk/clk-u300.c
+++ b/drivers/clk/clk-u300.c
@@ -5,8 +5,8 @@
  * Author: Linus Walleij <linus.walleij@stericsson.com>
  * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
  */
-#include <linux/clk.h>
 #include <linux/clkdev.h>
+#include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/clk-provider.h>
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index ef67719..43f9d15 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -12,7 +12,6 @@
  *
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/module.h>
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index f26b3ac..96a6190 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -60,7 +60,6 @@
 
 struct xgene_clk_pll {
 	struct clk_hw	hw;
-	const char	*name;
 	void __iomem	*reg;
 	spinlock_t	*lock;
 	u32		pll_offset;
@@ -75,7 +74,7 @@
 	u32 data;
 
 	data = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
-	pr_debug("%s pll %s\n", pllclk->name,
+	pr_debug("%s pll %s\n", clk_hw_get_name(hw),
 		data & REGSPEC_RESET_F1_MASK ? "disabled" : "enabled");
 
 	return data & REGSPEC_RESET_F1_MASK ? 0 : 1;
@@ -113,7 +112,7 @@
 		fref = parent_rate / nref;
 		fvco = fref * nfb;
 	}
-	pr_debug("%s pll recalc rate %ld parent %ld\n", pllclk->name,
+	pr_debug("%s pll recalc rate %ld parent %ld\n", clk_hw_get_name(hw),
 		fvco / nout, parent_rate);
 
 	return fvco / nout;
@@ -146,7 +145,6 @@
 	init.parent_names = parent_name ? &parent_name : NULL;
 	init.num_parents = parent_name ? 1 : 0;
 
-	apmclk->name = name;
 	apmclk->reg = reg;
 	apmclk->lock = lock;
 	apmclk->pll_offset = pll_offset;
@@ -210,7 +208,6 @@
 
 struct xgene_clk {
 	struct clk_hw	hw;
-	const char	*name;
 	spinlock_t	*lock;
 	struct xgene_dev_parameters	param;
 };
@@ -228,7 +225,7 @@
 		spin_lock_irqsave(pclk->lock, flags);
 
 	if (pclk->param.csr_reg != NULL) {
-		pr_debug("%s clock enabled\n", pclk->name);
+		pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
 		reg = __pa(pclk->param.csr_reg);
 		/* First enable the clock */
 		data = xgene_clk_read(pclk->param.csr_reg +
@@ -237,7 +234,7 @@
 		xgene_clk_write(data, pclk->param.csr_reg +
 					pclk->param.reg_clk_offset);
 		pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n",
-			pclk->name, &reg,
+			clk_hw_get_name(hw), &reg,
 			pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
 			data);
 
@@ -248,7 +245,7 @@
 		xgene_clk_write(data, pclk->param.csr_reg +
 					pclk->param.reg_csr_offset);
 		pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n",
-			pclk->name, &reg,
+			clk_hw_get_name(hw), &reg,
 			pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
 			data);
 	}
@@ -269,7 +266,7 @@
 		spin_lock_irqsave(pclk->lock, flags);
 
 	if (pclk->param.csr_reg != NULL) {
-		pr_debug("%s clock disabled\n", pclk->name);
+		pr_debug("%s clock disabled\n", clk_hw_get_name(hw));
 		/* First put the CSR in reset */
 		data = xgene_clk_read(pclk->param.csr_reg +
 					pclk->param.reg_csr_offset);
@@ -295,10 +292,10 @@
 	u32 data = 0;
 
 	if (pclk->param.csr_reg != NULL) {
-		pr_debug("%s clock checking\n", pclk->name);
+		pr_debug("%s clock checking\n", clk_hw_get_name(hw));
 		data = xgene_clk_read(pclk->param.csr_reg +
 					pclk->param.reg_clk_offset);
-		pr_debug("%s clock is %s\n", pclk->name,
+		pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
 			data & pclk->param.reg_clk_mask ? "enabled" :
 							"disabled");
 	}
@@ -321,11 +318,13 @@
 		data &= (1 << pclk->param.reg_divider_width) - 1;
 
 		pr_debug("%s clock recalc rate %ld parent %ld\n",
-			pclk->name, parent_rate / data, parent_rate);
+			clk_hw_get_name(hw),
+			parent_rate / data, parent_rate);
+
 		return parent_rate / data;
 	} else {
 		pr_debug("%s clock recalc rate %ld parent %ld\n",
-			pclk->name, parent_rate, parent_rate);
+			clk_hw_get_name(hw), parent_rate, parent_rate);
 		return parent_rate;
 	}
 }
@@ -357,7 +356,7 @@
 		data |= divider;
 		xgene_clk_write(data, pclk->param.divider_reg +
 					pclk->param.reg_divider_offset);
-		pr_debug("%s clock set rate %ld\n", pclk->name,
+		pr_debug("%s clock set rate %ld\n", clk_hw_get_name(hw),
 			parent_rate / divider_save);
 	} else {
 		divider_save = 1;
@@ -419,7 +418,6 @@
 	init.parent_names = parent_name ? &parent_name : NULL;
 	init.num_parents = parent_name ? 1 : 0;
 
-	apmclk->name = name;
 	apmclk->lock = lock;
 	apmclk->hw.init = &init;
 	apmclk->param = *parameters;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index ddb4b54..43e2c3a 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -9,6 +9,7 @@
  * Standard functionality for the common clock API.  See Documentation/clk.txt
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clk/clk-conf.h>
 #include <linux/module.h>
@@ -56,8 +57,11 @@
 	struct clk_core		*new_parent;
 	struct clk_core		*new_child;
 	unsigned long		flags;
+	bool			orphan;
 	unsigned int		enable_count;
 	unsigned int		prepare_count;
+	unsigned long		min_rate;
+	unsigned long		max_rate;
 	unsigned long		accuracy;
 	int			phase;
 	struct hlist_head	children;
@@ -111,12 +115,14 @@
 }
 
 static unsigned long clk_enable_lock(void)
+	__acquires(enable_lock)
 {
 	unsigned long flags;
 
 	if (!spin_trylock_irqsave(&enable_lock, flags)) {
 		if (enable_owner == current) {
 			enable_refcnt++;
+			__acquire(enable_lock);
 			return flags;
 		}
 		spin_lock_irqsave(&enable_lock, flags);
@@ -129,12 +135,15 @@
 }
 
 static void clk_enable_unlock(unsigned long flags)
+	__releases(enable_lock)
 {
 	WARN_ON_ONCE(enable_owner != current);
 	WARN_ON_ONCE(enable_refcnt == 0);
 
-	if (--enable_refcnt)
+	if (--enable_refcnt) {
+		__release(enable_lock);
 		return;
+	}
 	enable_owner = NULL;
 	spin_unlock_irqrestore(&enable_lock, flags);
 }
@@ -269,27 +278,29 @@
 }
 EXPORT_SYMBOL_GPL(__clk_get_name);
 
+const char *clk_hw_get_name(const struct clk_hw *hw)
+{
+	return hw->core->name;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_name);
+
 struct clk_hw *__clk_get_hw(struct clk *clk)
 {
 	return !clk ? NULL : clk->core->hw;
 }
 EXPORT_SYMBOL_GPL(__clk_get_hw);
 
-u8 __clk_get_num_parents(struct clk *clk)
+unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
 {
-	return !clk ? 0 : clk->core->num_parents;
+	return hw->core->num_parents;
 }
-EXPORT_SYMBOL_GPL(__clk_get_num_parents);
+EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
 
-struct clk *__clk_get_parent(struct clk *clk)
+struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
 {
-	if (!clk)
-		return NULL;
-
-	/* TODO: Create a per-user clk and change callers to call clk_put */
-	return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
+	return hw->core->parent ? hw->core->parent->hw : NULL;
 }
-EXPORT_SYMBOL_GPL(__clk_get_parent);
+EXPORT_SYMBOL_GPL(clk_hw_get_parent);
 
 static struct clk_core *__clk_lookup_subtree(const char *name,
 					     struct clk_core *core)
@@ -348,18 +359,16 @@
 		return core->parents[index];
 }
 
-struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+struct clk_hw *
+clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
 {
 	struct clk_core *parent;
 
-	if (!clk)
-		return NULL;
+	parent = clk_core_get_parent_by_index(hw->core, index);
 
-	parent = clk_core_get_parent_by_index(clk->core, index);
-
-	return !parent ? NULL : parent->hw->clk;
+	return !parent ? NULL : parent->hw;
 }
-EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
+EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
 
 unsigned int __clk_get_enable_count(struct clk *clk)
 {
@@ -387,14 +396,11 @@
 	return ret;
 }
 
-unsigned long __clk_get_rate(struct clk *clk)
+unsigned long clk_hw_get_rate(const struct clk_hw *hw)
 {
-	if (!clk)
-		return 0;
-
-	return clk_core_get_rate_nolock(clk->core);
+	return clk_core_get_rate_nolock(hw->core);
 }
-EXPORT_SYMBOL_GPL(__clk_get_rate);
+EXPORT_SYMBOL_GPL(clk_hw_get_rate);
 
 static unsigned long __clk_get_accuracy(struct clk_core *core)
 {
@@ -410,12 +416,15 @@
 }
 EXPORT_SYMBOL_GPL(__clk_get_flags);
 
-bool __clk_is_prepared(struct clk *clk)
+unsigned long clk_hw_get_flags(const struct clk_hw *hw)
 {
-	if (!clk)
-		return false;
+	return hw->core->flags;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_flags);
 
-	return clk_core_is_prepared(clk->core);
+bool clk_hw_is_prepared(const struct clk_hw *hw)
+{
+	return clk_core_is_prepared(hw->core);
 }
 
 bool __clk_is_enabled(struct clk *clk)
@@ -436,28 +445,31 @@
 	return now <= rate && now > best;
 }
 
-static long
-clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
-			     unsigned long min_rate,
-			     unsigned long max_rate,
-			     unsigned long *best_parent_rate,
-			     struct clk_hw **best_parent_p,
+static int
+clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
 			     unsigned long flags)
 {
 	struct clk_core *core = hw->core, *parent, *best_parent = NULL;
-	int i, num_parents;
-	unsigned long parent_rate, best = 0;
+	int i, num_parents, ret;
+	unsigned long best = 0;
+	struct clk_rate_request parent_req = *req;
 
 	/* if NO_REPARENT flag set, pass through to current parent */
 	if (core->flags & CLK_SET_RATE_NO_REPARENT) {
 		parent = core->parent;
-		if (core->flags & CLK_SET_RATE_PARENT)
-			best = __clk_determine_rate(parent ? parent->hw : NULL,
-						    rate, min_rate, max_rate);
-		else if (parent)
+		if (core->flags & CLK_SET_RATE_PARENT) {
+			ret = __clk_determine_rate(parent ? parent->hw : NULL,
+						   &parent_req);
+			if (ret)
+				return ret;
+
+			best = parent_req.rate;
+		} else if (parent) {
 			best = clk_core_get_rate_nolock(parent);
-		else
+		} else {
 			best = clk_core_get_rate_nolock(core);
+		}
+
 		goto out;
 	}
 
@@ -467,24 +479,33 @@
 		parent = clk_core_get_parent_by_index(core, i);
 		if (!parent)
 			continue;
-		if (core->flags & CLK_SET_RATE_PARENT)
-			parent_rate = __clk_determine_rate(parent->hw, rate,
-							   min_rate,
-							   max_rate);
-		else
-			parent_rate = clk_core_get_rate_nolock(parent);
-		if (mux_is_better_rate(rate, parent_rate, best, flags)) {
+
+		if (core->flags & CLK_SET_RATE_PARENT) {
+			parent_req = *req;
+			ret = __clk_determine_rate(parent->hw, &parent_req);
+			if (ret)
+				continue;
+		} else {
+			parent_req.rate = clk_core_get_rate_nolock(parent);
+		}
+
+		if (mux_is_better_rate(req->rate, parent_req.rate,
+				       best, flags)) {
 			best_parent = parent;
-			best = parent_rate;
+			best = parent_req.rate;
 		}
 	}
 
+	if (!best_parent)
+		return -EINVAL;
+
 out:
 	if (best_parent)
-		*best_parent_p = best_parent->hw;
-	*best_parent_rate = best;
+		req->best_parent_hw = best_parent->hw;
+	req->best_parent_rate = best;
+	req->rate = best;
 
-	return best;
+	return 0;
 }
 
 struct clk *__clk_lookup(const char *name)
@@ -500,8 +521,8 @@
 {
 	struct clk *clk_user;
 
-	*min_rate = 0;
-	*max_rate = ULONG_MAX;
+	*min_rate = core->min_rate;
+	*max_rate = core->max_rate;
 
 	hlist_for_each_entry(clk_user, &core->clks, clks_node)
 		*min_rate = max(*min_rate, clk_user->min_rate);
@@ -510,33 +531,30 @@
 		*max_rate = min(*max_rate, clk_user->max_rate);
 }
 
+void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
+			   unsigned long max_rate)
+{
+	hw->core->min_rate = min_rate;
+	hw->core->max_rate = max_rate;
+}
+EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
+
 /*
  * Helper for finding best parent to provide a given frequency. This can be used
  * directly as a determine_rate callback (e.g. for a mux), or from a more
  * complex clock that may combine a mux with other operations.
  */
-long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
-			      unsigned long min_rate,
-			      unsigned long max_rate,
-			      unsigned long *best_parent_rate,
-			      struct clk_hw **best_parent_p)
+int __clk_mux_determine_rate(struct clk_hw *hw,
+			     struct clk_rate_request *req)
 {
-	return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
-					    best_parent_rate,
-					    best_parent_p, 0);
+	return clk_mux_determine_rate_flags(hw, req, 0);
 }
 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
 
-long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
-			      unsigned long min_rate,
-			      unsigned long max_rate,
-			      unsigned long *best_parent_rate,
-			      struct clk_hw **best_parent_p)
+int __clk_mux_determine_rate_closest(struct clk_hw *hw,
+				     struct clk_rate_request *req)
 {
-	return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
-					    best_parent_rate,
-					    best_parent_p,
-					    CLK_MUX_ROUND_CLOSEST);
+	return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
 }
 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
 
@@ -759,14 +777,11 @@
 }
 EXPORT_SYMBOL_GPL(clk_enable);
 
-static unsigned long clk_core_round_rate_nolock(struct clk_core *core,
-						unsigned long rate,
-						unsigned long min_rate,
-						unsigned long max_rate)
+static int clk_core_round_rate_nolock(struct clk_core *core,
+				      struct clk_rate_request *req)
 {
-	unsigned long parent_rate = 0;
 	struct clk_core *parent;
-	struct clk_hw *parent_hw;
+	long rate;
 
 	lockdep_assert_held(&prepare_lock);
 
@@ -774,21 +789,30 @@
 		return 0;
 
 	parent = core->parent;
-	if (parent)
-		parent_rate = parent->rate;
+	if (parent) {
+		req->best_parent_hw = parent->hw;
+		req->best_parent_rate = parent->rate;
+	} else {
+		req->best_parent_hw = NULL;
+		req->best_parent_rate = 0;
+	}
 
 	if (core->ops->determine_rate) {
-		parent_hw = parent ? parent->hw : NULL;
-		return core->ops->determine_rate(core->hw, rate,
-						min_rate, max_rate,
-						&parent_rate, &parent_hw);
-	} else if (core->ops->round_rate)
-		return core->ops->round_rate(core->hw, rate, &parent_rate);
-	else if (core->flags & CLK_SET_RATE_PARENT)
-		return clk_core_round_rate_nolock(core->parent, rate, min_rate,
-						  max_rate);
-	else
-		return core->rate;
+		return core->ops->determine_rate(core->hw, req);
+	} else if (core->ops->round_rate) {
+		rate = core->ops->round_rate(core->hw, req->rate,
+					     &req->best_parent_rate);
+		if (rate < 0)
+			return rate;
+
+		req->rate = rate;
+	} else if (core->flags & CLK_SET_RATE_PARENT) {
+		return clk_core_round_rate_nolock(parent, req);
+	} else {
+		req->rate = core->rate;
+	}
+
+	return 0;
 }
 
 /**
@@ -800,38 +824,32 @@
  *
  * Useful for clk_ops such as .set_rate and .determine_rate.
  */
-unsigned long __clk_determine_rate(struct clk_hw *hw,
-				   unsigned long rate,
-				   unsigned long min_rate,
-				   unsigned long max_rate)
+int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
 {
-	if (!hw)
+	if (!hw) {
+		req->rate = 0;
 		return 0;
+	}
 
-	return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate);
+	return clk_core_round_rate_nolock(hw->core, req);
 }
 EXPORT_SYMBOL_GPL(__clk_determine_rate);
 
-/**
- * __clk_round_rate - round the given rate for a clk
- * @clk: round the rate of this clock
- * @rate: the rate which is to be rounded
- *
- * Useful for clk_ops such as .set_rate
- */
-unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
+unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
 {
-	unsigned long min_rate;
-	unsigned long max_rate;
+	int ret;
+	struct clk_rate_request req;
 
-	if (!clk)
+	clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
+	req.rate = rate;
+
+	ret = clk_core_round_rate_nolock(hw->core, &req);
+	if (ret)
 		return 0;
 
-	clk_core_get_boundaries(clk->core, &min_rate, &max_rate);
-
-	return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate);
+	return req.rate;
 }
-EXPORT_SYMBOL_GPL(__clk_round_rate);
+EXPORT_SYMBOL_GPL(clk_hw_round_rate);
 
 /**
  * clk_round_rate - round the given rate for a clk
@@ -844,16 +862,24 @@
  */
 long clk_round_rate(struct clk *clk, unsigned long rate)
 {
-	unsigned long ret;
+	struct clk_rate_request req;
+	int ret;
 
 	if (!clk)
 		return 0;
 
 	clk_prepare_lock();
-	ret = __clk_round_rate(clk, rate);
+
+	clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
+	req.rate = rate;
+
+	ret = clk_core_round_rate_nolock(clk->core, &req);
 	clk_prepare_unlock();
 
-	return ret;
+	if (ret)
+		return ret;
+
+	return req.rate;
 }
 EXPORT_SYMBOL_GPL(clk_round_rate);
 
@@ -1064,18 +1090,40 @@
 	return -EINVAL;
 }
 
+/*
+ * Update the orphan status of @core and all its children.
+ */
+static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
+{
+	struct clk_core *child;
+
+	core->orphan = is_orphan;
+
+	hlist_for_each_entry(child, &core->children, child_node)
+		clk_core_update_orphan_status(child, is_orphan);
+}
+
 static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
 {
+	bool was_orphan = core->orphan;
+
 	hlist_del(&core->child_node);
 
 	if (new_parent) {
+		bool becomes_orphan = new_parent->orphan;
+
 		/* avoid duplicate POST_RATE_CHANGE notifications */
 		if (new_parent->new_child == core)
 			new_parent->new_child = NULL;
 
 		hlist_add_head(&core->child_node, &new_parent->children);
+
+		if (was_orphan != becomes_orphan)
+			clk_core_update_orphan_status(core, becomes_orphan);
 	} else {
 		hlist_add_head(&core->child_node, &clk_orphan_list);
+		if (!was_orphan)
+			clk_core_update_orphan_status(core, true);
 	}
 
 	core->parent = new_parent;
@@ -1160,14 +1208,8 @@
 		flags = clk_enable_lock();
 		clk_reparent(core, old_parent);
 		clk_enable_unlock(flags);
+		__clk_set_parent_after(core, old_parent, parent);
 
-		if (core->prepare_count) {
-			flags = clk_enable_lock();
-			clk_core_disable(core);
-			clk_core_disable(parent);
-			clk_enable_unlock(flags);
-			clk_core_unprepare(parent);
-		}
 		return ret;
 	}
 
@@ -1249,7 +1291,6 @@
 {
 	struct clk_core *top = core;
 	struct clk_core *old_parent, *parent;
-	struct clk_hw *parent_hw;
 	unsigned long best_parent_rate = 0;
 	unsigned long new_rate;
 	unsigned long min_rate;
@@ -1270,20 +1311,29 @@
 
 	/* find the closest rate and parent clk/rate */
 	if (core->ops->determine_rate) {
-		parent_hw = parent ? parent->hw : NULL;
-		ret = core->ops->determine_rate(core->hw, rate,
-					       min_rate,
-					       max_rate,
-					       &best_parent_rate,
-					       &parent_hw);
+		struct clk_rate_request req;
+
+		req.rate = rate;
+		req.min_rate = min_rate;
+		req.max_rate = max_rate;
+		if (parent) {
+			req.best_parent_hw = parent->hw;
+			req.best_parent_rate = parent->rate;
+		} else {
+			req.best_parent_hw = NULL;
+			req.best_parent_rate = 0;
+		}
+
+		ret = core->ops->determine_rate(core->hw, &req);
 		if (ret < 0)
 			return NULL;
 
-		new_rate = ret;
-		parent = parent_hw ? parent_hw->core : NULL;
+		best_parent_rate = req.best_parent_rate;
+		new_rate = req.rate;
+		parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
 	} else if (core->ops->round_rate) {
 		ret = core->ops->round_rate(core->hw, rate,
-					   &best_parent_rate);
+					    &best_parent_rate);
 		if (ret < 0)
 			return NULL;
 
@@ -1592,8 +1642,12 @@
 {
 	struct clk *parent;
 
+	if (!clk)
+		return NULL;
+
 	clk_prepare_lock();
-	parent = __clk_get_parent(clk);
+	/* TODO: Create a per-user clk and change callers to call clk_put */
+	parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
 	clk_prepare_unlock();
 
 	return parent;
@@ -2324,13 +2378,17 @@
 	 * clocks and re-parent any that are children of the clock currently
 	 * being clk_init'd.
 	 */
-	if (core->parent)
+	if (core->parent) {
 		hlist_add_head(&core->child_node,
 				&core->parent->children);
-	else if (core->flags & CLK_IS_ROOT)
+		core->orphan = core->parent->orphan;
+	} else if (core->flags & CLK_IS_ROOT) {
 		hlist_add_head(&core->child_node, &clk_root_list);
-	else
+		core->orphan = false;
+	} else {
 		hlist_add_head(&core->child_node, &clk_orphan_list);
+		core->orphan = true;
+	}
 
 	/*
 	 * Set clk's accuracy.  The preferred method is to use
@@ -2479,6 +2537,8 @@
 	core->hw = hw;
 	core->flags = hw->init->flags;
 	core->num_parents = hw->init->num_parents;
+	core->min_rate = 0;
+	core->max_rate = ULONG_MAX;
 	hw->core = core;
 
 	/* allocate local copy in case parent_names is __initdata */
@@ -3054,8 +3114,6 @@
 	struct list_head node;
 };
 
-static LIST_HEAD(clk_provider_list);
-
 /*
  * This function looks for a parent clock. If there is one, then it
  * checks that the provider for this parent clock was initialized, in
@@ -3106,14 +3164,24 @@
 	struct clock_provider *clk_provider, *next;
 	bool is_init_done;
 	bool force = false;
+	LIST_HEAD(clk_provider_list);
 
 	if (!matches)
 		matches = &__clk_of_table;
 
 	/* First prepare the list of the clocks providers */
 	for_each_matching_node_and_match(np, matches, &match) {
-		struct clock_provider *parent =
-			kzalloc(sizeof(struct clock_provider),	GFP_KERNEL);
+		struct clock_provider *parent;
+
+		parent = kzalloc(sizeof(*parent), GFP_KERNEL);
+		if (!parent) {
+			list_for_each_entry_safe(clk_provider, next,
+						 &clk_provider_list, node) {
+				list_del(&clk_provider->node);
+				kfree(clk_provider);
+			}
+			return;
+		}
 
 		parent->clk_init_cb = match->data;
 		parent->np = np;
diff --git a/drivers/clk/h8300/clk-div.c b/drivers/clk/h8300/clk-div.c
index 56f9eba..1dd5d14 100644
--- a/drivers/clk/h8300/clk-div.c
+++ b/drivers/clk/h8300/clk-div.c
@@ -4,8 +4,6 @@
  * Copyright 2015 Yoshinori Sato <ysato@users.sourceforge.jp>
  */
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/of.h>
@@ -15,7 +13,7 @@
 
 static void __init h8300_div_clk_setup(struct device_node *node)
 {
-	unsigned int num_parents;
+	int num_parents;
 	struct clk *clk;
 	const char *clk_name = node->name;
 	const char *parent_name;
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index 4701b093..2a38eb4 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -4,8 +4,6 @@
  * Copyright 2015 Yoshinori Sato <ysato@users.sourceforge.jp>
  */
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/device.h>
@@ -28,7 +26,7 @@
 		unsigned long parent_rate)
 {
 	struct pll_clock *pll_clock = to_pll_clock(hw);
-	int mul = 1 << (ctrl_inb((unsigned long)pll_clock->pllcr) & 3);
+	int mul = 1 << (readb(pll_clock->pllcr) & 3);
 
 	return parent_rate * mul;
 }
@@ -65,13 +63,13 @@
 
 	pll = ((rate / parent_rate) / 2) & 0x03;
 	spin_lock_irqsave(&clklock, flags);
-	val = ctrl_inb((unsigned long)pll_clock->sckcr);
+	val = readb(pll_clock->sckcr);
 	val |= 0x08;
-	ctrl_outb(val, (unsigned long)pll_clock->sckcr);
-	val = ctrl_inb((unsigned long)pll_clock->pllcr);
+	writeb(val, pll_clock->sckcr);
+	val = readb(pll_clock->pllcr);
 	val &= ~0x03;
 	val |= pll;
-	ctrl_outb(val, (unsigned long)pll_clock->pllcr);
+	writeb(val, pll_clock->pllcr);
 	spin_unlock_irqrestore(&clklock, flags);
 	return 0;
 }
@@ -84,7 +82,7 @@
 
 static void __init h8s2678_pll_clk_setup(struct device_node *node)
 {
-	unsigned int num_parents;
+	int num_parents;
 	struct clk *clk;
 	const char *clk_name = node->name;
 	const char *parent_name;
@@ -98,11 +96,9 @@
 	}
 
 
-	pll_clock = kzalloc(sizeof(struct pll_clock), GFP_KERNEL);
-	if (!pll_clock) {
-		pr_err("%s: failed to alloc memory", clk_name);
+	pll_clock = kzalloc(sizeof(*pll_clock), GFP_KERNEL);
+	if (!pll_clock)
 		return;
-	}
 
 	pll_clock->sckcr = of_iomap(node, 0);
 	if (pll_clock->sckcr == NULL) {
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig
index b4165ba..2c16807 100644
--- a/drivers/clk/hisilicon/Kconfig
+++ b/drivers/clk/hisilicon/Kconfig
@@ -1,6 +1,6 @@
 config COMMON_CLK_HI6220
 	bool "Hi6220 Clock Driver"
-	depends on ARCH_HISI || COMPILE_TEST
+	depends on (ARCH_HISI || COMPILE_TEST) && MAILBOX
 	default ARCH_HISI
 	help
 	  Build the Hisilicon Hi6220 clock driver based on the common clock framework.
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index 48f0116..4a1001a 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -7,4 +7,4 @@
 obj-$(CONFIG_ARCH_HI3xxx)	+= clk-hi3620.o
 obj-$(CONFIG_ARCH_HIP04)	+= clk-hip04.o
 obj-$(CONFIG_ARCH_HIX5HD2)	+= clk-hix5hd2.o
-obj-$(CONFIG_COMMON_CLK_HI6220)	+= clk-hi6220.o
+obj-$(CONFIG_COMMON_CLK_HI6220)	+= clk-hi6220.o clk-hi6220-stub.o
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index 715d34a..7d03fe1 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -25,13 +25,11 @@
 
 #include <linux/kernel.h>
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/slab.h>
-#include <linux/clk.h>
 
 #include <dt-bindings/clock/hi3620-clock.h>
 
@@ -294,34 +292,29 @@
 	}
 }
 
-static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
-			      unsigned long min_rate,
-			      unsigned long max_rate,
-			      unsigned long *best_parent_rate,
-			      struct clk_hw **best_parent_p)
+static int mmc_clk_determine_rate(struct clk_hw *hw,
+				  struct clk_rate_request *req)
 {
 	struct clk_mmc *mclk = to_mmc(hw);
-	unsigned long best = 0;
 
-	if ((rate <= 13000000) && (mclk->id == HI3620_MMC_CIUCLK1)) {
-		rate = 13000000;
-		best = 26000000;
-	} else if (rate <= 26000000) {
-		rate = 25000000;
-		best = 180000000;
-	} else if (rate <= 52000000) {
-		rate = 50000000;
-		best = 360000000;
-	} else if (rate <= 100000000) {
-		rate = 100000000;
-		best = 720000000;
+	if ((req->rate <= 13000000) && (mclk->id == HI3620_MMC_CIUCLK1)) {
+		req->rate = 13000000;
+		req->best_parent_rate = 26000000;
+	} else if (req->rate <= 26000000) {
+		req->rate = 25000000;
+		req->best_parent_rate = 180000000;
+	} else if (req->rate <= 52000000) {
+		req->rate = 50000000;
+		req->best_parent_rate = 360000000;
+	} else if (req->rate <= 100000000) {
+		req->rate = 100000000;
+		req->best_parent_rate = 720000000;
 	} else {
 		/* max is 180M */
-		rate = 180000000;
-		best = 1440000000;
+		req->rate = 180000000;
+		req->best_parent_rate = 1440000000;
 	}
-	*best_parent_rate = best;
-	return rate;
+	return -EINVAL;
 }
 
 static u32 mmc_clk_delay(u32 val, u32 para, u32 off, u32 len)
diff --git a/drivers/clk/hisilicon/clk-hi6220-stub.c b/drivers/clk/hisilicon/clk-hi6220-stub.c
new file mode 100644
index 0000000..2c4add1
--- /dev/null
+++ b/drivers/clk/hisilicon/clk-hi6220-stub.c
@@ -0,0 +1,276 @@
+/*
+ * Hi6220 stub clock driver
+ *
+ * Copyright (c) 2015 Hisilicon Limited.
+ * Copyright (c) 2015 Linaro Limited.
+ *
+ * Author: Leo Yan <leo.yan@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+/* Stub clocks id */
+#define HI6220_STUB_ACPU0		0
+#define HI6220_STUB_ACPU1		1
+#define HI6220_STUB_GPU			2
+#define HI6220_STUB_DDR			5
+
+/* Mailbox message */
+#define HI6220_MBOX_MSG_LEN		8
+
+#define HI6220_MBOX_FREQ		0xA
+#define HI6220_MBOX_CMD_SET		0x3
+#define HI6220_MBOX_OBJ_AP		0x0
+
+/* CPU dynamic frequency scaling */
+#define ACPU_DFS_FREQ_MAX		0x1724
+#define ACPU_DFS_CUR_FREQ		0x17CC
+#define ACPU_DFS_FLAG			0x1B30
+#define ACPU_DFS_FREQ_REQ		0x1B34
+#define ACPU_DFS_FREQ_LMT		0x1B38
+#define ACPU_DFS_LOCK_FLAG		0xAEAEAEAE
+
+#define to_stub_clk(hw) container_of(hw, struct hi6220_stub_clk, hw)
+
+struct hi6220_stub_clk {
+	u32 id;
+
+	struct device *dev;
+	struct clk_hw hw;
+
+	struct regmap *dfs_map;
+	struct mbox_client cl;
+	struct mbox_chan *mbox;
+};
+
+struct hi6220_mbox_msg {
+	unsigned char type;
+	unsigned char cmd;
+	unsigned char obj;
+	unsigned char src;
+	unsigned char para[4];
+};
+
+union hi6220_mbox_data {
+	unsigned int data[HI6220_MBOX_MSG_LEN];
+	struct hi6220_mbox_msg msg;
+};
+
+static unsigned int hi6220_acpu_get_freq(struct hi6220_stub_clk *stub_clk)
+{
+	unsigned int freq;
+
+	regmap_read(stub_clk->dfs_map, ACPU_DFS_CUR_FREQ, &freq);
+	return freq;
+}
+
+static int hi6220_acpu_set_freq(struct hi6220_stub_clk *stub_clk,
+				unsigned int freq)
+{
+	union hi6220_mbox_data data;
+
+	/* set the frequency in sram */
+	regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_REQ, freq);
+
+	/* compound mailbox message */
+	data.msg.type = HI6220_MBOX_FREQ;
+	data.msg.cmd  = HI6220_MBOX_CMD_SET;
+	data.msg.obj  = HI6220_MBOX_OBJ_AP;
+	data.msg.src  = HI6220_MBOX_OBJ_AP;
+
+	mbox_send_message(stub_clk->mbox, &data);
+	return 0;
+}
+
+static int hi6220_acpu_round_freq(struct hi6220_stub_clk *stub_clk,
+				  unsigned int freq)
+{
+	unsigned int limit_flag, limit_freq = UINT_MAX;
+	unsigned int max_freq;
+
+	/* check the constrained frequency */
+	regmap_read(stub_clk->dfs_map, ACPU_DFS_FLAG, &limit_flag);
+	if (limit_flag == ACPU_DFS_LOCK_FLAG)
+		regmap_read(stub_clk->dfs_map, ACPU_DFS_FREQ_LMT, &limit_freq);
+
+	/* check the supported maximum frequency */
+	regmap_read(stub_clk->dfs_map, ACPU_DFS_FREQ_MAX, &max_freq);
+
+	/* calculate the real maximum frequency */
+	max_freq = min(max_freq, limit_freq);
+
+	if (WARN_ON(freq > max_freq))
+		freq = max_freq;
+
+	return freq;
+}
+
+static unsigned long hi6220_stub_clk_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	u32 rate = 0;
+	struct hi6220_stub_clk *stub_clk = to_stub_clk(hw);
+
+	switch (stub_clk->id) {
+	case HI6220_STUB_ACPU0:
+		rate = hi6220_acpu_get_freq(stub_clk);
+
+		/* convert from kHz to Hz */
+		rate *= 1000;
+		break;
+
+	default:
+		dev_err(stub_clk->dev, "%s: un-supported clock id %d\n",
+			__func__, stub_clk->id);
+		break;
+	}
+
+	return rate;
+}
+
+static int hi6220_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate)
+{
+	struct hi6220_stub_clk *stub_clk = to_stub_clk(hw);
+	unsigned long new_rate = rate / 1000;  /* kHz */
+	int ret = 0;
+
+	switch (stub_clk->id) {
+	case HI6220_STUB_ACPU0:
+		ret = hi6220_acpu_set_freq(stub_clk, new_rate);
+		if (ret < 0)
+			return ret;
+
+		break;
+
+	default:
+		dev_err(stub_clk->dev, "%s: un-supported clock id %d\n",
+			__func__, stub_clk->id);
+		break;
+	}
+
+	pr_debug("%s: set rate=%ldkHz\n", __func__, new_rate);
+	return ret;
+}
+
+static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+		unsigned long *parent_rate)
+{
+	struct hi6220_stub_clk *stub_clk = to_stub_clk(hw);
+	unsigned long new_rate = rate / 1000;  /* kHz */
+
+	switch (stub_clk->id) {
+	case HI6220_STUB_ACPU0:
+		new_rate = hi6220_acpu_round_freq(stub_clk, new_rate);
+
+		/* convert from kHz to Hz */
+		new_rate *= 1000;
+		break;
+
+	default:
+		dev_err(stub_clk->dev, "%s: un-supported clock id %d\n",
+			__func__, stub_clk->id);
+		break;
+	}
+
+	return new_rate;
+}
+
+static const struct clk_ops hi6220_stub_clk_ops = {
+	.recalc_rate	= hi6220_stub_clk_recalc_rate,
+	.round_rate	= hi6220_stub_clk_round_rate,
+	.set_rate	= hi6220_stub_clk_set_rate,
+};
+
+static int hi6220_stub_clk_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct clk_init_data init;
+	struct hi6220_stub_clk *stub_clk;
+	struct clk *clk;
+	struct device_node *np = pdev->dev.of_node;
+	int ret;
+
+	stub_clk = devm_kzalloc(dev, sizeof(*stub_clk), GFP_KERNEL);
+	if (!stub_clk)
+		return -ENOMEM;
+
+	stub_clk->dfs_map = syscon_regmap_lookup_by_phandle(np,
+				"hisilicon,hi6220-clk-sram");
+	if (IS_ERR(stub_clk->dfs_map)) {
+		dev_err(dev, "failed to get sram regmap\n");
+		return PTR_ERR(stub_clk->dfs_map);
+	}
+
+	stub_clk->hw.init = &init;
+	stub_clk->dev = dev;
+	stub_clk->id = HI6220_STUB_ACPU0;
+
+	/* Use mailbox client with blocking mode */
+	stub_clk->cl.dev = dev;
+	stub_clk->cl.tx_done = NULL;
+	stub_clk->cl.tx_block = true;
+	stub_clk->cl.tx_tout = 500;
+	stub_clk->cl.knows_txdone = false;
+
+	/* Allocate mailbox channel */
+	stub_clk->mbox = mbox_request_channel(&stub_clk->cl, 0);
+	if (IS_ERR(stub_clk->mbox)) {
+		dev_err(dev, "failed get mailbox channel\n");
+		return PTR_ERR(stub_clk->mbox);
+	};
+
+	init.name = "acpu0";
+	init.ops = &hi6220_stub_clk_ops;
+	init.num_parents = 0;
+	init.flags = CLK_IS_ROOT;
+
+	clk = devm_clk_register(dev, &stub_clk->hw);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+	if (ret) {
+		dev_err(dev, "failed to register OF clock provider\n");
+		return ret;
+	}
+
+	/* initialize buffer to zero */
+	regmap_write(stub_clk->dfs_map, ACPU_DFS_FLAG, 0x0);
+	regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_REQ, 0x0);
+	regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_LMT, 0x0);
+
+	dev_dbg(dev, "Registered clock '%s'\n", init.name);
+	return 0;
+}
+
+static const struct of_device_id hi6220_stub_clk_of_match[] = {
+	{ .compatible = "hisilicon,hi6220-stub-clk", },
+	{}
+};
+
+static struct platform_driver hi6220_stub_clk_driver = {
+	.driver	= {
+		.name = "hi6220-stub-clk",
+		.of_match_table = hi6220_stub_clk_of_match,
+	},
+	.probe = hi6220_stub_clk_probe,
+};
+
+static int __init hi6220_stub_clk_init(void)
+{
+	return platform_driver_register(&hi6220_stub_clk_driver);
+}
+subsys_initcall(hi6220_stub_clk_init);
diff --git a/drivers/clk/hisilicon/clk-hip04.c b/drivers/clk/hisilicon/clk-hip04.c
index 132b57a..8ca9673 100644
--- a/drivers/clk/hisilicon/clk-hip04.c
+++ b/drivers/clk/hisilicon/clk-hip04.c
@@ -24,13 +24,11 @@
 
 #include <linux/kernel.h>
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/slab.h>
-#include <linux/clk.h>
 
 #include <dt-bindings/clock/hip04-clock.h>
 
diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c
index c90a897..9f8e766 100644
--- a/drivers/clk/hisilicon/clk.c
+++ b/drivers/clk/hisilicon/clk.c
@@ -24,15 +24,14 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/clk-provider.h>
 #include <linux/clkdev.h>
+#include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/slab.h>
-#include <linux/clk.h>
 
 #include "clk.h"
 
@@ -45,14 +44,9 @@
 	struct clk **clk_table;
 	void __iomem *base;
 
-	if (np) {
-		base = of_iomap(np, 0);
-		if (!base) {
-			pr_err("failed to map Hisilicon clock registers\n");
-			goto err;
-		}
-	} else {
-		pr_err("failed to find Hisilicon clock node in DTS\n");
+	base = of_iomap(np, 0);
+	if (!base) {
+		pr_err("%s: failed to map clock registers\n", __func__);
 		goto err;
 	}
 
diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c
index b03d5a7..a47812f 100644
--- a/drivers/clk/hisilicon/clkgate-separated.c
+++ b/drivers/clk/hisilicon/clkgate-separated.c
@@ -25,10 +25,8 @@
 
 #include <linux/kernel.h>
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/io.h>
 #include <linux/slab.h>
-#include <linux/clk.h>
 
 #include "clk.h"
 
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 75fae16..1ada68a 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -22,5 +22,6 @@
 obj-$(CONFIG_SOC_IMX6Q)  += clk-imx6q.o
 obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o
 obj-$(CONFIG_SOC_IMX6SX) += clk-imx6sx.o
+obj-$(CONFIG_SOC_IMX6UL) += clk-imx6ul.o
 obj-$(CONFIG_SOC_IMX7D)  += clk-imx7d.o
 obj-$(CONFIG_SOC_VF610)  += clk-vf610.o
diff --git a/drivers/clk/imx/clk-imx1.c b/drivers/clk/imx/clk-imx1.c
index c2647fa..99cf802 100644
--- a/drivers/clk/imx/clk-imx1.c
+++ b/drivers/clk/imx/clk-imx1.c
@@ -15,7 +15,6 @@
  * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
  */
 
-#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
diff --git a/drivers/clk/imx/clk-imx21.c b/drivers/clk/imx/clk-imx21.c
index dba987e..e63188e 100644
--- a/drivers/clk/imx/clk-imx21.c
+++ b/drivers/clk/imx/clk-imx21.c
@@ -9,7 +9,6 @@
  * of the License, or (at your option) any later version.
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/of.h>
diff --git a/drivers/clk/imx/clk-imx31.c b/drivers/clk/imx/clk-imx31.c
index fe66c40..1f83834 100644
--- a/drivers/clk/imx/clk-imx31.c
+++ b/drivers/clk/imx/clk-imx31.c
@@ -147,7 +147,8 @@
 	clk_register_clkdev(clk[cspi3_gate], NULL, "imx31-cspi.2");
 	clk_register_clkdev(clk[pwm_gate], "pwm", NULL);
 	clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
-	clk_register_clkdev(clk[rtc_gate], NULL, "imx21-rtc");
+	clk_register_clkdev(clk[ckil], "ref", "imx21-rtc");
+	clk_register_clkdev(clk[rtc_gate], "ipg", "imx21-rtc");
 	clk_register_clkdev(clk[epit1_gate], "epit", NULL);
 	clk_register_clkdev(clk[epit2_gate], "epit", NULL);
 	clk_register_clkdev(clk[nfc], NULL, "imx27-nand.0");
diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
index 69138ba..8623cd4 100644
--- a/drivers/clk/imx/clk-imx35.c
+++ b/drivers/clk/imx/clk-imx35.c
@@ -66,7 +66,7 @@
 static const char *ipg_per_sel[] = {"ahb_per_div", "arm_per_div"};
 
 enum mx35_clks {
-	ckih, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg,
+	ckih, ckil, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg,
 	arm_per_div, ahb_per_div, ipg_per, uart_sel, uart_div, esdhc_sel,
 	esdhc1_div, esdhc2_div, esdhc3_div, spdif_sel, spdif_div_pre,
 	spdif_div_post, ssi_sel, ssi1_div_pre, ssi1_div_post, ssi2_div_pre,
@@ -107,6 +107,7 @@
 	}
 
 	clk[ckih] = imx_clk_fixed("ckih", 24000000);
+	clk[ckil] = imx_clk_fixed("ckih", 32768);
 	clk[mpll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "mpll", "ckih", base + MX35_CCM_MPCTL);
 	clk[ppll] = imx_clk_pllv1(IMX_PLLV1_IMX35, "ppll", "ckih", base + MX35_CCM_PPCTL);
 
@@ -258,6 +259,9 @@
 	clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
 	clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
 	clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
+	/* i.mx35 has the i.mx21 type rtc */
+	clk_register_clkdev(clk[ckil], "ref", "imx21-rtc");
+	clk_register_clkdev(clk[rtc_gate], "ipg", "imx21-rtc");
 	clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
 	clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
 	clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.0");
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index d046f8e..b2c1c04 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -381,6 +381,9 @@
 	clk[IMX6QDL_CLK_ASRC]         = imx_clk_gate2_shared("asrc",         "asrc_podf",   base + 0x68, 6, &share_count_asrc);
 	clk[IMX6QDL_CLK_ASRC_IPG]     = imx_clk_gate2_shared("asrc_ipg",     "ahb",         base + 0x68, 6, &share_count_asrc);
 	clk[IMX6QDL_CLK_ASRC_MEM]     = imx_clk_gate2_shared("asrc_mem",     "ahb",         base + 0x68, 6, &share_count_asrc);
+	clk[IMX6QDL_CLK_CAAM_MEM]     = imx_clk_gate2("caam_mem",      "ahb",               base + 0x68, 8);
+	clk[IMX6QDL_CLK_CAAM_ACLK]    = imx_clk_gate2("caam_aclk",     "ahb",               base + 0x68, 10);
+	clk[IMX6QDL_CLK_CAAM_IPG]     = imx_clk_gate2("caam_ipg",      "ipg",               base + 0x68, 12);
 	clk[IMX6QDL_CLK_CAN1_IPG]     = imx_clk_gate2("can1_ipg",      "ipg",               base + 0x68, 14);
 	clk[IMX6QDL_CLK_CAN1_SERIAL]  = imx_clk_gate2("can1_serial",   "can_root",          base + 0x68, 16);
 	clk[IMX6QDL_CLK_CAN2_IPG]     = imx_clk_gate2("can2_ipg",      "ipg",               base + 0x68, 18);
@@ -494,6 +497,10 @@
 		clk_set_parent(clk[IMX6QDL_CLK_LDB_DI1_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
 	}
 
+	clk_set_rate(clk[IMX6QDL_CLK_PLL3_PFD1_540M], 540000000);
+	if (clk_on_imx6dl())
+		clk_set_parent(clk[IMX6QDL_CLK_IPU1_SEL], clk[IMX6QDL_CLK_PLL3_PFD1_540M]);
+
 	clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI0_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
 	clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI1_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
 	clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI0_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
new file mode 100644
index 0000000..aaa3665
--- /dev/null
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <dt-bindings/clock/imx6ul-clock.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/types.h>
+
+#include "clk.h"
+
+#define BM_CCM_CCDR_MMDC_CH0_MASK	(0x2 << 16)
+#define CCDR	0x4
+
+static const char *pll_bypass_src_sels[] = { "osc", "dummy", };
+static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", };
+static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", };
+static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", };
+static const char *pll4_bypass_sels[] = { "pll4", "pll4_bypass_src", };
+static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", };
+static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", };
+static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
+static const char *ca7_secondary_sels[] = { "pll2_pfd2_396m", "pll2_bus", };
+static const char *step_sels[] = { "osc", "ca7_secondary_sel", };
+static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
+static const char *axi_alt_sels[] = { "pll2_pfd2_396m", "pll3_pfd1_540m", };
+static const char *axi_sels[] = {"periph", "axi_alt_sel", };
+static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
+static const char *periph2_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll4_audio_div", };
+static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", };
+static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "osc", };
+static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
+static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
+static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *bch_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *gpmi_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *eim_slow_sels[] =  { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll3_pfd0_720m", };
+static const char *spdif_sels[] = { "pll4_audio_div", "pll3_pfd2_508m", "pll5_video_div", "pll3_usb_otg", };
+static const char *sai_sels[] = { "pll3_pfd2_508m", "pll5_video_div", "pll4_audio_div", };
+static const char *lcdif_pre_sels[] = { "pll2_bus", "pll3_pfd3_454m", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd1_594m", "pll3_pfd1_540m", };
+static const char *sim_pre_sels[] = { "pll2_bus", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd2_508m", };
+static const char *ldb_di0_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_pfd3_594m", "pll2_pfd1_594m", "pll3_pfd3_454m", };
+static const char *ldb_di0_div_sels[] = { "ldb_di0_div_3_5", "ldb_di0_div_7", };
+static const char *ldb_di1_div_sels[] = { "ldb_di1_div_3_5", "ldb_di1_div_7", };
+static const char *qspi1_sels[] = { "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll2_bus", "pll3_pfd3_454m", "pll3_pfd2_508m", };
+static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", "pll3_pfd3_454m", "dummy", "dummy", "dummy", };
+static const char *can_sels[] = { "pll3_60m", "osc", "pll3_80m", "dummy", };
+static const char *ecspi_sels[] = { "pll3_60m", "osc", };
+static const char *uart_sels[] = { "pll3_80m", "osc", };
+static const char *perclk_sels[] = { "ipg", "osc", };
+static const char *lcdif_sels[] = { "lcdif_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
+static const char *csi_sels[] = { "osc", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
+static const char *sim_sels[] = { "sim_podf", "ipp_di0", "ipp_di1", "ldb_di0", "ldb_di1", };
+
+static struct clk *clks[IMX6UL_CLK_END];
+static struct clk_onecell_data clk_data;
+
+static int const clks_init_on[] __initconst = {
+	IMX6UL_CLK_AIPSTZ1, IMX6UL_CLK_AIPSTZ2, IMX6UL_CLK_AIPSTZ3,
+	IMX6UL_CLK_AXI, IMX6UL_CLK_ARM, IMX6UL_CLK_ROM,
+	IMX6UL_CLK_MMDC_P0_FAST, IMX6UL_CLK_MMDC_P0_IPG,
+};
+
+static struct clk_div_table clk_enet_ref_table[] = {
+	{ .val = 0, .div = 20, },
+	{ .val = 1, .div = 10, },
+	{ .val = 2, .div = 5, },
+	{ .val = 3, .div = 4, },
+	{ }
+};
+
+static struct clk_div_table post_div_table[] = {
+	{ .val = 2, .div = 1, },
+	{ .val = 1, .div = 2, },
+	{ .val = 0, .div = 4, },
+	{ }
+};
+
+static struct clk_div_table video_div_table[] = {
+	{ .val = 0, .div = 1, },
+	{ .val = 1, .div = 2, },
+	{ .val = 2, .div = 1, },
+	{ .val = 3, .div = 4, },
+	{ }
+};
+
+static u32 share_count_asrc;
+static u32 share_count_audio;
+static u32 share_count_sai1;
+static u32 share_count_sai2;
+static u32 share_count_sai3;
+
+static void __init imx6ul_clocks_init(struct device_node *ccm_node)
+{
+	struct device_node *np;
+	void __iomem *base;
+	int i;
+
+	clks[IMX6UL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
+
+	clks[IMX6UL_CLK_CKIL] = of_clk_get_by_name(ccm_node, "ckil");
+	clks[IMX6UL_CLK_OSC] = of_clk_get_by_name(ccm_node, "osc");
+
+	/* ipp_di clock is external input */
+	clks[IMX6UL_CLK_IPP_DI0] = of_clk_get_by_name(ccm_node, "ipp_di0");
+	clks[IMX6UL_CLK_IPP_DI1] = of_clk_get_by_name(ccm_node, "ipp_di1");
+
+	np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-anatop");
+	base = of_iomap(np, 0);
+	WARN_ON(!base);
+
+	clks[IMX6UL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+	clks[IMX6UL_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+	clks[IMX6UL_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", base + 0x10, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+	clks[IMX6UL_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", base + 0x70, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+	clks[IMX6UL_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", base + 0xa0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+	clks[IMX6UL_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", base + 0xe0, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+	clks[IMX6UL_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", base + 0x20, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+
+	clks[IMX6UL_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS,	 "pll1", "pll1_bypass_src", base + 0x00, 0x7f);
+	clks[IMX6UL_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", base + 0x30, 0x1);
+	clks[IMX6UL_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB,	 "pll3", "pll3_bypass_src", base + 0x10, 0x3);
+	clks[IMX6UL_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV,	 "pll4", "pll4_bypass_src", base + 0x70, 0x7f);
+	clks[IMX6UL_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_AV,	 "pll5", "pll5_bypass_src", base + 0xa0, 0x7f);
+	clks[IMX6UL_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_ENET,	 "pll6", "pll6_bypass_src", base + 0xe0, 0x3);
+	clks[IMX6UL_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB,	 "pll7", "pll7_bypass_src", base + 0x20, 0x3);
+
+	clks[IMX6UL_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", base + 0x00, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
+	clks[IMX6UL_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", base + 0x30, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
+	clks[IMX6UL_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", base + 0x10, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT);
+	clks[IMX6UL_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", base + 0x70, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT);
+	clks[IMX6UL_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
+	clks[IMX6UL_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT);
+	clks[IMX6UL_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
+	clks[IMX6UL_CLK_CSI_SEL] = imx_clk_mux_flags("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels), CLK_SET_RATE_PARENT);
+
+	/* Do not bypass PLLs initially */
+	clk_set_parent(clks[IMX6UL_PLL1_BYPASS], clks[IMX6UL_CLK_PLL1]);
+	clk_set_parent(clks[IMX6UL_PLL2_BYPASS], clks[IMX6UL_CLK_PLL2]);
+	clk_set_parent(clks[IMX6UL_PLL3_BYPASS], clks[IMX6UL_CLK_PLL3]);
+	clk_set_parent(clks[IMX6UL_PLL4_BYPASS], clks[IMX6UL_CLK_PLL4]);
+	clk_set_parent(clks[IMX6UL_PLL5_BYPASS], clks[IMX6UL_CLK_PLL5]);
+	clk_set_parent(clks[IMX6UL_PLL6_BYPASS], clks[IMX6UL_CLK_PLL6]);
+	clk_set_parent(clks[IMX6UL_PLL7_BYPASS], clks[IMX6UL_CLK_PLL7]);
+
+	clks[IMX6UL_CLK_PLL1_SYS]	= imx_clk_fixed_factor("pll1_sys",	"pll1_bypass", 1, 1);
+	clks[IMX6UL_CLK_PLL2_BUS]	= imx_clk_gate("pll2_bus", 	"pll2_bypass", base + 0x30, 13);
+	clks[IMX6UL_CLK_PLL3_USB_OTG]	= imx_clk_gate("pll3_usb_otg", 	"pll3_bypass", base + 0x10, 13);
+	clks[IMX6UL_CLK_PLL4_AUDIO]	= imx_clk_gate("pll4_audio", 	"pll4_bypass", base + 0x70, 13);
+	clks[IMX6UL_CLK_PLL5_VIDEO]	= imx_clk_gate("pll5_video",	"pll5_bypass", base + 0xa0, 13);
+	clks[IMX6UL_CLK_PLL6_ENET]	= imx_clk_gate("pll6_enet",	"pll6_bypass", base + 0xe0, 13);
+	clks[IMX6UL_CLK_PLL7_USB_HOST]	= imx_clk_gate("pll7_usb_host",	"pll7_bypass", base + 0x20, 13);
+
+	/*
+	 * Bit 20 is the reserved and read-only bit, we do this only for:
+	 * - Do nothing for usbphy clk_enable/disable
+	 * - Keep refcount when do usbphy clk_enable/disable, in that case,
+	 * the clk framework many need to enable/disable usbphy's parent
+	 */
+	clks[IMX6UL_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll3_usb_otg",  base + 0x10, 20);
+	clks[IMX6UL_CLK_USBPHY2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 20);
+
+	/*
+	 * usbphy*_gate needs to be on after system boots up, and software
+	 * never needs to control it anymore.
+	 */
+	clks[IMX6UL_CLK_USBPHY1_GATE] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6);
+	clks[IMX6UL_CLK_USBPHY2_GATE] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6);
+
+	/*					name		   parent_name	   reg		idx */
+	clks[IMX6UL_CLK_PLL2_PFD0] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus",	   base + 0x100, 0);
+	clks[IMX6UL_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus",	   base + 0x100, 1);
+	clks[IMX6UL_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus",	   base + 0x100, 2);
+	clks[IMX6UL_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3_594m", "pll2_bus",	   base + 0x100, 3);
+	clks[IMX6UL_CLK_PLL3_PFD0] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0,  0);
+	clks[IMX6UL_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0,  1);
+	clks[IMX6UL_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0,	 2);
+	clks[IMX6UL_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0,	 3);
+
+	clks[IMX6UL_CLK_ENET_REF] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0,
+			base + 0xe0, 0, 2, 0, clk_enet_ref_table, &imx_ccm_lock);
+	clks[IMX6UL_CLK_ENET2_REF] = clk_register_divider_table(NULL, "enet2_ref", "pll6_enet", 0,
+			base + 0xe0, 2, 2, 0, clk_enet_ref_table, &imx_ccm_lock);
+
+	clks[IMX6UL_CLK_ENET2_REF_125M] = imx_clk_gate("enet_ref_125m", "enet2_ref", base + 0xe0, 20);
+	clks[IMX6UL_CLK_ENET_PTP_REF] 	= imx_clk_fixed_factor("enet_ptp_ref", "pll6_enet", 1, 20);
+	clks[IMX6UL_CLK_ENET_PTP] 	= imx_clk_gate("enet_ptp", "enet_ptp_ref", base + 0xe0, 21);
+
+	clks[IMX6UL_CLK_PLL4_POST_DIV]  = clk_register_divider_table(NULL, "pll4_post_div", "pll4_audio",
+		 CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x70, 19, 2, 0, post_div_table, &imx_ccm_lock);
+	clks[IMX6UL_CLK_PLL4_AUDIO_DIV] = clk_register_divider(NULL, "pll4_audio_div", "pll4_post_div",
+		 CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 15, 1, 0, &imx_ccm_lock);
+	clks[IMX6UL_CLK_PLL5_POST_DIV]  = clk_register_divider_table(NULL, "pll5_post_div", "pll5_video",
+		 CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0xa0, 19, 2, 0, post_div_table, &imx_ccm_lock);
+	clks[IMX6UL_CLK_PLL5_VIDEO_DIV] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div",
+		 CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
+
+	/*						   name		parent_name	 mult  div */
+	clks[IMX6UL_CLK_PLL2_198M] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1,	2);
+	clks[IMX6UL_CLK_PLL3_80M]  = imx_clk_fixed_factor("pll3_80m",  "pll3_usb_otg",   1, 	6);
+	clks[IMX6UL_CLK_PLL3_60M]  = imx_clk_fixed_factor("pll3_60m",  "pll3_usb_otg",   1, 	8);
+	clks[IMX6UL_CLK_GPT_3M]	   = imx_clk_fixed_factor("gpt_3m",	"osc",		 1,	8);
+
+	np = ccm_node;
+	base = of_iomap(np, 0);
+	WARN_ON(!base);
+
+	clks[IMX6UL_CA7_SECONDARY_SEL]	  = imx_clk_mux("ca7_secondary_sel", base + 0xc, 3, 1, ca7_secondary_sels, ARRAY_SIZE(ca7_secondary_sels));
+	clks[IMX6UL_CLK_STEP] 	 	  = imx_clk_mux("step", base + 0x0c, 8, 1, step_sels, ARRAY_SIZE(step_sels));
+	clks[IMX6UL_CLK_PLL1_SW] 	  = imx_clk_mux_flags("pll1_sw",   base + 0x0c, 2,  1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels), 0);
+	clks[IMX6UL_CLK_AXI_ALT_SEL]	  = imx_clk_mux("axi_alt_sel",		base + 0x14, 7,  1, axi_alt_sels, ARRAY_SIZE(axi_alt_sels));
+	clks[IMX6UL_CLK_AXI_SEL] 	  = imx_clk_mux_flags("axi_sel", 	base + 0x14, 6,  1, axi_sels, ARRAY_SIZE(axi_sels), 0);
+	clks[IMX6UL_CLK_PERIPH_PRE] 	  = imx_clk_mux("periph_pre",       base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
+	clks[IMX6UL_CLK_PERIPH2_PRE] 	  = imx_clk_mux("periph2_pre",      base + 0x18, 21, 2, periph2_pre_sels, ARRAY_SIZE(periph2_pre_sels));
+	clks[IMX6UL_CLK_PERIPH_CLK2_SEL]  = imx_clk_mux("periph_clk2_sel",  base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
+	clks[IMX6UL_CLK_PERIPH2_CLK2_SEL] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels));
+	clks[IMX6UL_CLK_EIM_SLOW_SEL] 	  = imx_clk_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels));
+	clks[IMX6UL_CLK_GPMI_SEL]	  = imx_clk_mux("gpmi_sel",     base + 0x1c, 19, 1, gpmi_sels, ARRAY_SIZE(gpmi_sels));
+	clks[IMX6UL_CLK_BCH_SEL]      	  = imx_clk_mux("bch_sel", 	base + 0x1c, 18, 1, bch_sels, ARRAY_SIZE(bch_sels));
+	clks[IMX6UL_CLK_USDHC2_SEL]	  = imx_clk_mux("usdhc2_sel",   base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+	clks[IMX6UL_CLK_USDHC1_SEL]	  = imx_clk_mux("usdhc1_sel",   base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
+	clks[IMX6UL_CLK_SAI3_SEL]     	  = imx_clk_mux("sai3_sel",     base + 0x1c, 14, 2, sai_sels, ARRAY_SIZE(sai_sels));
+	clks[IMX6UL_CLK_SAI2_SEL]         = imx_clk_mux("sai2_sel",     base + 0x1c, 12, 2, sai_sels, ARRAY_SIZE(sai_sels));
+	clks[IMX6UL_CLK_SAI1_SEL]    	  = imx_clk_mux("sai1_sel",     base + 0x1c, 10, 2, sai_sels, ARRAY_SIZE(sai_sels));
+	clks[IMX6UL_CLK_QSPI1_SEL] 	  = imx_clk_mux("qspi1_sel",    base + 0x1c, 7,  3, qspi1_sels, ARRAY_SIZE(qspi1_sels));
+	clks[IMX6UL_CLK_PERCLK_SEL] 	  = imx_clk_mux("perclk_sel",	base + 0x1c, 6,  1, perclk_sels, ARRAY_SIZE(perclk_sels));
+	clks[IMX6UL_CLK_CAN_SEL]      	  = imx_clk_mux("can_sel",	base + 0x20, 8,  2, can_sels, ARRAY_SIZE(can_sels));
+	clks[IMX6UL_CLK_UART_SEL]	  = imx_clk_mux("uart_sel",	base + 0x24, 6,  1, uart_sels, ARRAY_SIZE(uart_sels));
+	clks[IMX6UL_CLK_ENFC_SEL]	  = imx_clk_mux("enfc_sel",	base + 0x2c, 15, 3, enfc_sels, ARRAY_SIZE(enfc_sels));
+	clks[IMX6UL_CLK_LDB_DI0_SEL]	  = imx_clk_mux("ldb_di0_sel",	base + 0x2c, 9,  3, ldb_di0_sels, ARRAY_SIZE(ldb_di0_sels));
+	clks[IMX6UL_CLK_SPDIF_SEL]	  = imx_clk_mux("spdif_sel",	base + 0x30, 20, 2, spdif_sels, ARRAY_SIZE(spdif_sels));
+	clks[IMX6UL_CLK_SIM_PRE_SEL] 	  = imx_clk_mux("sim_pre_sel",	base + 0x34, 15, 3, sim_pre_sels, ARRAY_SIZE(sim_pre_sels));
+	clks[IMX6UL_CLK_SIM_SEL]	  = imx_clk_mux("sim_sel", 	base + 0x34, 9, 3, sim_sels, ARRAY_SIZE(sim_sels));
+	clks[IMX6UL_CLK_ECSPI_SEL]	  = imx_clk_mux("ecspi_sel",	base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
+	clks[IMX6UL_CLK_LCDIF_PRE_SEL]	  = imx_clk_mux("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels));
+	clks[IMX6UL_CLK_LCDIF_SEL]	  = imx_clk_mux("lcdif_sel", 	base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
+
+	clks[IMX6UL_CLK_LDB_DI0_DIV_SEL]  = imx_clk_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
+	clks[IMX6UL_CLK_LDB_DI1_DIV_SEL]  = imx_clk_mux("ldb_di1", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels));
+
+	clks[IMX6UL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+	clks[IMX6UL_CLK_LDB_DI0_DIV_7]	 = imx_clk_fixed_factor("ldb_di0_div_7",   "ldb_di0_sel", 1, 7);
+	clks[IMX6UL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "qspi1_sel", 2, 7);
+	clks[IMX6UL_CLK_LDB_DI1_DIV_7]	 = imx_clk_fixed_factor("ldb_di1_div_7",   "qspi1_sel", 1, 7);
+
+	clks[IMX6UL_CLK_PERIPH]  = imx_clk_busy_mux("periph",  base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels));
+	clks[IMX6UL_CLK_PERIPH2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels));
+
+	clks[IMX6UL_CLK_PERIPH_CLK2]	= imx_clk_divider("periph_clk2",   "periph_clk2_sel",  	base + 0x14, 27, 3);
+	clks[IMX6UL_CLK_PERIPH2_CLK2]	= imx_clk_divider("periph2_clk2",  "periph2_clk2_sel", 	base + 0x14, 0,  3);
+	clks[IMX6UL_CLK_IPG]		= imx_clk_divider("ipg",	   "ahb",		base + 0x14, 8,	 2);
+	clks[IMX6UL_CLK_LCDIF_PODF]	= imx_clk_divider("lcdif_podf",	   "lcdif_pred",	base + 0x18, 23, 3);
+	clks[IMX6UL_CLK_QSPI1_PDOF] 	= imx_clk_divider("qspi1_podf",	   "qspi1_sel",		base + 0x1c, 26, 3);
+	clks[IMX6UL_CLK_EIM_SLOW_PODF]	= imx_clk_divider("eim_slow_podf", "eim_slow_sel",	base + 0x1c, 23, 3);
+	clks[IMX6UL_CLK_PERCLK]		= imx_clk_divider("perclk",	   "perclk_sel",	base + 0x1c, 0,  6);
+	clks[IMX6UL_CLK_CAN_PODF]	= imx_clk_divider("can_podf",	   "can_sel",		base + 0x20, 2,  6);
+	clks[IMX6UL_CLK_GPMI_PODF]	= imx_clk_divider("gpmi_podf",	   "gpmi_sel",		base + 0x24, 22, 3);
+	clks[IMX6UL_CLK_BCH_PODF]	= imx_clk_divider("bch_podf",	   "bch_sel",		base + 0x24, 19, 3);
+	clks[IMX6UL_CLK_USDHC2_PODF]	= imx_clk_divider("usdhc2_podf",   "usdhc2_sel",	base + 0x24, 16, 3);
+	clks[IMX6UL_CLK_USDHC1_PODF]	= imx_clk_divider("usdhc1_podf",   "usdhc1_sel",	base + 0x24, 11, 3);
+	clks[IMX6UL_CLK_UART_PODF]	= imx_clk_divider("uart_podf",	   "uart_sel",		base + 0x24, 0,  6);
+	clks[IMX6UL_CLK_SAI3_PRED]	= imx_clk_divider("sai3_pred",	   "sai3_sel",		base + 0x28, 22, 3);
+	clks[IMX6UL_CLK_SAI3_PODF]	= imx_clk_divider("sai3_podf",	   "sai3_pred",		base + 0x28, 16, 6);
+	clks[IMX6UL_CLK_SAI1_PRED]	= imx_clk_divider("sai1_pred",	   "sai1_sel",		base + 0x28, 6,	 3);
+	clks[IMX6UL_CLK_SAI1_PODF]	= imx_clk_divider("sai1_podf",	   "sai1_pred",		base + 0x28, 0,	 6);
+	clks[IMX6UL_CLK_ENFC_PRED]	= imx_clk_divider("enfc_pred",	   "enfc_sel",		base + 0x2c, 18, 3);
+	clks[IMX6UL_CLK_ENFC_PODF]	= imx_clk_divider("enfc_podf",	   "enfc_pred",		base + 0x2c, 21, 6);
+	clks[IMX6UL_CLK_SAI2_PRED]	= imx_clk_divider("sai2_pred",	   "sai2_sel",		base + 0x2c, 6,	 3);
+	clks[IMX6UL_CLK_SAI2_PODF]	= imx_clk_divider("sai2_podf",	   "sai2_pred",		base + 0x2c, 0,  6);
+	clks[IMX6UL_CLK_SPDIF_PRED]	= imx_clk_divider("spdif_pred",	   "spdif_sel",		base + 0x30, 25, 3);
+	clks[IMX6UL_CLK_SPDIF_PODF]	= imx_clk_divider("spdif_podf",	   "spdif_pred",	base + 0x30, 22, 3);
+	clks[IMX6UL_CLK_SIM_PODF]	= imx_clk_divider("sim_podf",	   "sim_pre_sel",	base + 0x34, 12, 3);
+	clks[IMX6UL_CLK_ECSPI_PODF]	= imx_clk_divider("ecspi_podf",	   "ecspi_sel",		base + 0x38, 19, 6);
+	clks[IMX6UL_CLK_LCDIF_PRED]	= imx_clk_divider("lcdif_pred",	   "lcdif_pre_sel",	base + 0x38, 12, 3);
+	clks[IMX6UL_CLK_CSI_PODF]       = imx_clk_divider("csi_podf",      "csi_sel",           base + 0x3c, 11, 3);
+
+	clks[IMX6UL_CLK_ARM]		= imx_clk_busy_divider("arm", 	    "pll1_sw",	base +	0x10, 0,  3,  base + 0x48, 16);
+	clks[IMX6UL_CLK_MMDC_PODF]	= imx_clk_busy_divider("mmdc_podf", "periph2",	base +  0x14, 3,  3,  base + 0x48, 2);
+	clks[IMX6UL_CLK_AXI_PODF]	= imx_clk_busy_divider("axi_podf",  "axi_sel",	base +  0x14, 16, 3,  base + 0x48, 0);
+	clks[IMX6UL_CLK_AHB]		= imx_clk_busy_divider("ahb",	    "periph",	base +  0x14, 10, 3,  base + 0x48, 1);
+
+	/* CCGR0 */
+	clks[IMX6UL_CLK_AIPSTZ1]	= imx_clk_gate2("aips_tz1", 	"ahb",		base + 0x68,	0);
+	clks[IMX6UL_CLK_AIPSTZ2]	= imx_clk_gate2("aips_tz2", 	"ahb",		base + 0x68,	2);
+	clks[IMX6UL_CLK_APBHDMA]	= imx_clk_gate2("apbh_dma",	"bch_podf",	base + 0x68,	4);
+	clks[IMX6UL_CLK_ASRC_IPG]	= imx_clk_gate2_shared("asrc_ipg",	"ahb",	base + 0x68,	6, &share_count_asrc);
+	clks[IMX6UL_CLK_ASRC_MEM]	= imx_clk_gate2_shared("asrc_mem",	"ahb",	base + 0x68,	6, &share_count_asrc);
+	clks[IMX6UL_CLK_CAAM_MEM]	= imx_clk_gate2("caam_mem",	"ahb",		base + 0x68,	8);
+	clks[IMX6UL_CLK_CAAM_ACLK]	= imx_clk_gate2("caam_aclk",	"ahb",		base + 0x68,	10);
+	clks[IMX6UL_CLK_CAAM_IPG]	= imx_clk_gate2("caam_ipg",	"ipg",		base + 0x68,	12);
+	clks[IMX6UL_CLK_CAN1_IPG]	= imx_clk_gate2("can1_ipg",	"ipg",		base + 0x68,	14);
+	clks[IMX6UL_CLK_CAN1_SERIAL]	= imx_clk_gate2("can1_serial",	"can_podf",	base + 0x68, 	16);
+	clks[IMX6UL_CLK_CAN2_IPG]	= imx_clk_gate2("can2_ipg",	"ipg",		base + 0x68,	18);
+	clks[IMX6UL_CLK_CAN2_SERIAL]	= imx_clk_gate2("can2_serial",	"can_podf",	base + 0x68,	20);
+	clks[IMX6UL_CLK_GPT2_BUS]	= imx_clk_gate2("gpt_bus",	"perclk",	base + 0x68,	24);
+	clks[IMX6UL_CLK_GPT2_SERIAL]	= imx_clk_gate2("gpt_serial",	"perclk",	base + 0x68,	26);
+	clks[IMX6UL_CLK_UART2_IPG]	= imx_clk_gate2("uart2_ipg",	"ipg",		base + 0x68,	28);
+	clks[IMX6UL_CLK_UART2_SERIAL]	= imx_clk_gate2("uart2_serial",	"uart_podf",	base + 0x68,	28);
+	clks[IMX6UL_CLK_AIPSTZ3]	= imx_clk_gate2("aips_tz3",	"ahb",		base + 0x68,	30);
+
+	/* CCGR1 */
+	clks[IMX6UL_CLK_ECSPI1]		= imx_clk_gate2("ecspi1",	"ecspi_podf",	base + 0x6c,	0);
+	clks[IMX6UL_CLK_ECSPI2]		= imx_clk_gate2("ecspi2",	"ecspi_podf",	base + 0x6c,	2);
+	clks[IMX6UL_CLK_ECSPI3]		= imx_clk_gate2("ecspi3",	"ecspi_podf",	base + 0x6c,	4);
+	clks[IMX6UL_CLK_ECSPI4]		= imx_clk_gate2("ecspi4",	"ecspi_podf",	base + 0x6c,	6);
+	clks[IMX6UL_CLK_ADC2]		= imx_clk_gate2("adc2",		"ipg",		base + 0x6c,	8);
+	clks[IMX6UL_CLK_UART3_IPG]	= imx_clk_gate2("uart3_ipg",	"ipg",		base + 0x6c,	10);
+	clks[IMX6UL_CLK_UART3_SERIAL]	= imx_clk_gate2("uart3_serial",	"uart_podf",	base + 0x6c,	10);
+	clks[IMX6UL_CLK_EPIT1]		= imx_clk_gate2("epit1",	"perclk",	base + 0x6c,	12);
+	clks[IMX6UL_CLK_EPIT2]		= imx_clk_gate2("epit2",	"perclk",	base + 0x6c,	14);
+	clks[IMX6UL_CLK_ADC1]		= imx_clk_gate2("adc1",		"ipg",		base + 0x6c,	16);
+	clks[IMX6UL_CLK_GPT1_BUS]	= imx_clk_gate2("gpt1_bus",	"perclk",	base + 0x6c,	20);
+	clks[IMX6UL_CLK_GPT1_SERIAL]	= imx_clk_gate2("gpt1_serial",	"perclk",	base + 0x6c,	22);
+	clks[IMX6UL_CLK_UART4_IPG]	= imx_clk_gate2("uart4_ipg",	"ipg",		base + 0x6c,	24);
+	clks[IMX6UL_CLK_UART4_SERIAL]	= imx_clk_gate2("uart4_serail",	"uart_podf",	base + 0x6c,	24);
+
+	/* CCGR2 */
+	clks[IMX6UL_CLK_CSI]		= imx_clk_gate2("csi",		"csi_podf",		base + 0x70,	2);
+	clks[IMX6UL_CLK_I2C1]		= imx_clk_gate2("i2c1",		"perclk",	base + 0x70,	6);
+	clks[IMX6UL_CLK_I2C2]		= imx_clk_gate2("i2c2",		"perclk",	base + 0x70,	8);
+	clks[IMX6UL_CLK_I2C3] 		= imx_clk_gate2("i2c3",		"perclk",	base + 0x70,	10);
+	clks[IMX6UL_CLK_OCOTP]		= imx_clk_gate2("ocotp",	"ipg",		base + 0x70,	12);
+	clks[IMX6UL_CLK_IOMUXC]		= imx_clk_gate2("iomuxc",	"lcdif_podf",	base + 0x70,	14);
+	clks[IMX6UL_CLK_LCDIF_APB]	= imx_clk_gate2("lcdif_apb",	"axi",		base + 0x70,	28);
+	clks[IMX6UL_CLK_PXP]		= imx_clk_gate2("pxp",		"axi",		base + 0x70,	30);
+
+	/* CCGR3 */
+	clks[IMX6UL_CLK_UART5_IPG]	= imx_clk_gate2("uart5_ipg",	"ipg",		base + 0x74,	2);
+	clks[IMX6UL_CLK_UART5_SERIAL]	= imx_clk_gate2("uart5_serial",	"uart_podf",	base + 0x74,	2);
+	clks[IMX6UL_CLK_ENET]		= imx_clk_gate2("enet",		"ipg",		base + 0x74,	4);
+	clks[IMX6UL_CLK_ENET_AHB]	= imx_clk_gate2("enet_ahb",	"ahb",		base + 0x74,	4);
+	clks[IMX6UL_CLK_UART6_IPG]	= imx_clk_gate2("uart6_ipg",	"ipg",		base + 0x74,	6);
+	clks[IMX6UL_CLK_UART6_SERIAL]	= imx_clk_gate2("uart6_serial",	"uart_podf",	base + 0x74,	6);
+	clks[IMX6UL_CLK_LCDIF_PIX]	= imx_clk_gate2("lcdif_pix",	"lcdif_podf",	base + 0x74,	10);
+	clks[IMX6UL_CLK_QSPI]		= imx_clk_gate2("qspi1",	"qspi1_podf",	base + 0x74,	14);
+	clks[IMX6UL_CLK_WDOG1]		= imx_clk_gate2("wdog1",	"ipg",		base + 0x74,	16);
+	clks[IMX6UL_CLK_MMDC_P0_FAST]	= imx_clk_gate("mmdc_p0_fast", "mmdc_podf", base + 0x74,	20);
+	clks[IMX6UL_CLK_MMDC_P0_IPG]	= imx_clk_gate2("mmdc_p0_ipg",	"ipg",		base + 0x74,	24);
+	clks[IMX6UL_CLK_AXI]		= imx_clk_gate("axi",	"axi_podf",	base + 0x74,	28);
+
+	/* CCGR4 */
+	clks[IMX6UL_CLK_PER_BCH]	= imx_clk_gate2("per_bch",	"bch_podf",	base + 0x78,	12);
+	clks[IMX6UL_CLK_PWM1]		= imx_clk_gate2("pwm1",		"perclk",	base + 0x78,	16);
+	clks[IMX6UL_CLK_PWM2]		= imx_clk_gate2("pwm2",		"perclk",	base + 0x78,	18);
+	clks[IMX6UL_CLK_PWM3]		= imx_clk_gate2("pwm3",		"perclk",	base + 0x78,	20);
+	clks[IMX6UL_CLK_PWM4]		= imx_clk_gate2("pwm4",		"perclk",	base + 0x78,	22);
+	clks[IMX6UL_CLK_GPMI_BCH_APB]	= imx_clk_gate2("gpmi_bch_apb",	"bch_podf",	base + 0x78,	24);
+	clks[IMX6UL_CLK_GPMI_BCH]	= imx_clk_gate2("gpmi_bch",	"gpmi_podf",	base + 0x78,	26);
+	clks[IMX6UL_CLK_GPMI_IO]	= imx_clk_gate2("gpmi_io",	"enfc_podf",	base + 0x78,	28);
+	clks[IMX6UL_CLK_GPMI_APB]	= imx_clk_gate2("gpmi_apb",	"bch_podf",	base + 0x78,	30);
+
+	/* CCGR5 */
+	clks[IMX6UL_CLK_ROM]		= imx_clk_gate2("rom",		"ahb",		base + 0x7c,	0);
+	clks[IMX6UL_CLK_SDMA]		= imx_clk_gate2("sdma",		"ahb",		base + 0x7c,	6);
+	clks[IMX6UL_CLK_WDOG2]		= imx_clk_gate2("wdog2",	"ipg",		base + 0x7c,	10);
+	clks[IMX6UL_CLK_SPBA]		= imx_clk_gate2("spba",		"ipg",		base + 0x7c,	12);
+	clks[IMX6UL_CLK_SPDIF]		= imx_clk_gate2_shared("spdif",		"spdif_podf",	base + 0x7c,	14, &share_count_audio);
+	clks[IMX6UL_CLK_SPDIF_GCLK]	= imx_clk_gate2_shared("spdif_gclk",	"ipg",		base + 0x7c,	14, &share_count_audio);
+	clks[IMX6UL_CLK_SAI3]		= imx_clk_gate2_shared("sai3",		"sai3_podf",	base + 0x7c,	22, &share_count_sai3);
+	clks[IMX6UL_CLK_SAI3_IPG]	= imx_clk_gate2_shared("sai3_ipg",	"ipg",		base + 0x7c,	22, &share_count_sai3);
+	clks[IMX6UL_CLK_UART1_IPG]	= imx_clk_gate2("uart1_ipg",	"ipg",		base + 0x7c,	24);
+	clks[IMX6UL_CLK_UART1_SERIAL]	= imx_clk_gate2("uart1_serial",	"uart_podf",	base + 0x7c,	24);
+	clks[IMX6UL_CLK_UART7_IPG]	= imx_clk_gate2("uart7_ipg",	"ipg",		base + 0x7c,	26);
+	clks[IMX6UL_CLK_UART7_SERIAL]	= imx_clk_gate2("uart7_serial",	"uart_podf",	base + 0x7c,	26);
+	clks[IMX6UL_CLK_SAI1]		= imx_clk_gate2_shared("sai1",		"sai1_podf",	base + 0x7c,	28, &share_count_sai1);
+	clks[IMX6UL_CLK_SAI1_IPG]	= imx_clk_gate2_shared("sai1_ipg",	"ipg",		base + 0x7c,	28, &share_count_sai1);
+	clks[IMX6UL_CLK_SAI2]		= imx_clk_gate2_shared("sai2",		"sai2_podf",	base + 0x7c,	30, &share_count_sai2);
+	clks[IMX6UL_CLK_SAI2_IPG]	= imx_clk_gate2_shared("sai2_ipg",	"ipg",		base + 0x7c,	30, &share_count_sai2);
+
+	/* CCGR6 */
+	clks[IMX6UL_CLK_USBOH3]		= imx_clk_gate2("usboh3",	"ipg",		 base + 0x80,	0);
+	clks[IMX6UL_CLK_USDHC1]		= imx_clk_gate2("usdhc1",	"usdhc1_podf",	 base + 0x80,	2);
+	clks[IMX6UL_CLK_USDHC2]		= imx_clk_gate2("usdhc2",	"usdhc2_podf",	 base + 0x80,	4);
+	clks[IMX6UL_CLK_SIM1]		= imx_clk_gate2("sim1",		"sim_sel",	 base + 0x80,	6);
+	clks[IMX6UL_CLK_SIM2]		= imx_clk_gate2("sim2",		"sim_sel",	 base + 0x80,	8);
+	clks[IMX6UL_CLK_EIM]		= imx_clk_gate2("eim",		"eim_slow_podf", base + 0x80,	10);
+	clks[IMX6UL_CLK_PWM8]		= imx_clk_gate2("pwm8",		"perclk",	 base + 0x80,	16);
+	clks[IMX6UL_CLK_UART8_IPG]	= imx_clk_gate2("uart8_ipg",	"ipg",		 base + 0x80,	14);
+	clks[IMX6UL_CLK_UART8_SERIAL]	= imx_clk_gate2("uart8_serial", "uart_podf",	 base + 0x80,	14);
+	clks[IMX6UL_CLK_WDOG3]		= imx_clk_gate2("wdog3",	"ipg",		 base + 0x80,	20);
+	clks[IMX6UL_CLK_I2C4]		= imx_clk_gate2("i2c4",		"perclk", 	 base + 0x80,	24);
+	clks[IMX6UL_CLK_PWM5]		= imx_clk_gate2("pwm5",		"perclk",	 base + 0x80,	26);
+	clks[IMX6UL_CLK_PWM6]		= imx_clk_gate2("pwm6",		"perclk",	 base +	0x80,	28);
+	clks[IMX6UL_CLK_PWM7]		= imx_clk_gate2("Pwm7",		"perclk",	 base + 0x80,	30);
+
+	/* mask handshake of mmdc */
+	writel_relaxed(BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR);
+
+	for (i = 0; i < ARRAY_SIZE(clks); i++)
+		if (IS_ERR(clks[i]))
+			pr_err("i.MX6UL clk %d: register failed with %ld\n", i, PTR_ERR(clks[i]));
+
+	clk_data.clks = clks;
+	clk_data.clk_num = ARRAY_SIZE(clks);
+	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
+	/* set perclk to from OSC */
+	clk_set_parent(clks[IMX6UL_CLK_PERCLK_SEL], clks[IMX6UL_CLK_OSC]);
+
+	clk_set_rate(clks[IMX6UL_CLK_ENET_REF], 50000000);
+	clk_set_rate(clks[IMX6UL_CLK_ENET2_REF], 50000000);
+	clk_set_rate(clks[IMX6UL_CLK_CSI], 24000000);
+
+	/* keep all the clks on just for bringup */
+	for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+		clk_prepare_enable(clks[clks_init_on[i]]);
+
+	if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
+		clk_prepare_enable(clks[IMX6UL_CLK_USBPHY1_GATE]);
+		clk_prepare_enable(clks[IMX6UL_CLK_USBPHY2_GATE]);
+	}
+
+	clk_set_parent(clks[IMX6UL_CLK_CAN_SEL], clks[IMX6UL_CLK_PLL3_60M]);
+	clk_set_parent(clks[IMX6UL_CLK_SIM_PRE_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]);
+
+	clk_set_parent(clks[IMX6UL_CLK_ENFC_SEL], clks[IMX6UL_CLK_PLL2_PFD2]);
+}
+
+CLK_OF_DECLARE(imx6ul, "fsl,imx6ul-ccm", imx6ul_clocks_init);
diff --git a/drivers/clk/imx/clk-pfd.c b/drivers/clk/imx/clk-pfd.c
index 0b0f6f6..04a3e78 100644
--- a/drivers/clk/imx/clk-pfd.c
+++ b/drivers/clk/imx/clk-pfd.c
@@ -10,7 +10,6 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/slab.h>
diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c
index c34ad8a..8564e43 100644
--- a/drivers/clk/imx/clk-pllv1.c
+++ b/drivers/clk/imx/clk-pllv1.c
@@ -1,4 +1,3 @@
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/slab.h>
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index f0d15fb..6addf8f 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -10,7 +10,6 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/io.h>
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index b936cdd..7cfb7b2 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -16,6 +16,7 @@
  */
 
 #include <linux/bitops.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/delay.h>
diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c
index 86f1e36..aed5af2 100644
--- a/drivers/clk/keystone/gate.c
+++ b/drivers/clk/keystone/gate.c
@@ -10,7 +10,6 @@
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
  */
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/io.h>
diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
index 4a375ea..3f553d0 100644
--- a/drivers/clk/keystone/pll.c
+++ b/drivers/clk/keystone/pll.c
@@ -10,7 +10,6 @@
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
  */
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/io.h>
@@ -309,8 +308,7 @@
 		return;
 	}
 
-	parents[0] = of_clk_get_parent_name(node, 0);
-	parents[1] = of_clk_get_parent_name(node, 1);
+	of_clk_parent_fill(node, parents, 2);
 	if (!parents[0] || !parents[1]) {
 		pr_err("%s: missing parent clocks\n", __func__);
 		return;
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 6b6780b..11e25c9 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -16,9 +16,10 @@
 #define __DRV_CLK_GATE_H
 
 #include <linux/regmap.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 
+struct clk;
+
 struct mtk_clk_gate {
 	struct clk_hw	hw;
 	struct regmap	*regmap;
diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
index 08b4b84..07c21e4 100644
--- a/drivers/clk/mediatek/clk-mt8135.c
+++ b/drivers/clk/mediatek/clk-mt8135.c
@@ -12,6 +12,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/slab.h>
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index 8b6523d..90eff85 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -12,6 +12,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/slab.h>
@@ -795,8 +796,9 @@
 
 #define CON0_MT8173_RST_BAR	BIT(24)
 
-#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, _pd_shift, \
-			_tuner_reg, _pcw_reg, _pcw_shift) { \
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,	\
+			_pd_reg, _pd_shift, _tuner_reg, _pcw_reg,	\
+			_pcw_shift, _div_table) {			\
 		.id = _id,						\
 		.name = _name,						\
 		.reg = _reg,						\
@@ -811,14 +813,31 @@
 		.tuner_reg = _tuner_reg,				\
 		.pcw_reg = _pcw_reg,					\
 		.pcw_shift = _pcw_shift,				\
+		.div_table = _div_table,				\
 	}
 
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits,	\
+			_pd_reg, _pd_shift, _tuner_reg, _pcw_reg,	\
+			_pcw_shift)					\
+		PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+			_pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+			NULL)
+
+static const struct mtk_pll_div_table mmpll_div_table[] = {
+	{ .div = 0, .freq = MT8173_PLL_FMAX },
+	{ .div = 1, .freq = 1000000000 },
+	{ .div = 2, .freq = 702000000 },
+	{ .div = 3, .freq = 253500000 },
+	{ .div = 4, .freq = 126750000 },
+	{ } /* sentinel */
+};
+
 static const struct mtk_pll_data plls[] = {
 	PLL(CLK_APMIXED_ARMCA15PLL, "armca15pll", 0x200, 0x20c, 0x00000001, 0, 21, 0x204, 24, 0x0, 0x204, 0),
 	PLL(CLK_APMIXED_ARMCA7PLL, "armca7pll", 0x210, 0x21c, 0x00000001, 0, 21, 0x214, 24, 0x0, 0x214, 0),
 	PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x220, 0x22c, 0xf0000101, HAVE_RST_BAR, 21, 0x220, 4, 0x0, 0x224, 0),
 	PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x230, 0x23c, 0xfe000001, HAVE_RST_BAR, 7, 0x230, 4, 0x0, 0x234, 14),
-	PLL(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0x00000001, 0, 21, 0x244, 24, 0x0, 0x244, 0),
+	PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0x00000001, 0, 21, 0x244, 24, 0x0, 0x244, 0, mmpll_div_table),
 	PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x250, 0x25c, 0x00000001, 0, 21, 0x250, 4, 0x0, 0x254, 0),
 	PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x260, 0x26c, 0x00000001, 0, 21, 0x260, 4, 0x0, 0x264, 0),
 	PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x270, 0x27c, 0x00000001, 0, 21, 0x270, 4, 0x0, 0x274, 0),
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 9dda9d8..c5cbecb 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -17,9 +17,10 @@
 
 #include <linux/regmap.h>
 #include <linux/bitops.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 
+struct clk;
+
 #define MAX_MUX_GATE_BIT	31
 #define INVALID_MUX_GATE_BIT	(MAX_MUX_GATE_BIT + 1)
 
@@ -134,6 +135,11 @@
 
 #define HAVE_RST_BAR	BIT(0)
 
+struct mtk_pll_div_table {
+	u32 div;
+	unsigned long freq;
+};
+
 struct mtk_pll_data {
 	int id;
 	const char *name;
@@ -150,6 +156,7 @@
 	int pcwbits;
 	uint32_t pcw_reg;
 	int pcw_shift;
+	const struct mtk_pll_div_table *div_table;
 };
 
 void __init mtk_clk_register_plls(struct device_node *node,
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index 44409e9..622e7b6 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -90,20 +90,23 @@
 static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw,
 		int postdiv)
 {
-	u32 con1, pd, val;
+	u32 con1, val;
 	int pll_en;
 
-	/* set postdiv */
-	pd = readl(pll->pd_addr);
-	pd &= ~(POSTDIV_MASK << pll->data->pd_shift);
-	pd |= (ffs(postdiv) - 1) << pll->data->pd_shift;
-	writel(pd, pll->pd_addr);
-
 	pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN;
 
-	/* set pcw */
-	val = readl(pll->pcw_addr);
+	/* set postdiv */
+	val = readl(pll->pd_addr);
+	val &= ~(POSTDIV_MASK << pll->data->pd_shift);
+	val |= (ffs(postdiv) - 1) << pll->data->pd_shift;
 
+	/* postdiv and pcw need to set at the same time if on same register */
+	if (pll->pd_addr != pll->pcw_addr) {
+		writel(val, pll->pd_addr);
+		val = readl(pll->pcw_addr);
+	}
+
+	/* set pcw */
 	val &= ~GENMASK(pll->data->pcw_shift + pll->data->pcwbits - 1,
 			pll->data->pcw_shift);
 	val |= pcw << pll->data->pcw_shift;
@@ -135,16 +138,28 @@
 		u32 freq, u32 fin)
 {
 	unsigned long fmin = 1000 * MHZ;
+	const struct mtk_pll_div_table *div_table = pll->data->div_table;
 	u64 _pcw;
 	u32 val;
 
 	if (freq > pll->data->fmax)
 		freq = pll->data->fmax;
 
-	for (val = 0; val < 4; val++) {
+	if (div_table) {
+		if (freq > div_table[0].freq)
+			freq = div_table[0].freq;
+
+		for (val = 0; div_table[val + 1].freq != 0; val++) {
+			if (freq > div_table[val + 1].freq)
+				break;
+		}
 		*postdiv = 1 << val;
-		if (freq * *postdiv >= fmin)
-			break;
+	} else {
+		for (val = 0; val < 5; val++) {
+			*postdiv = 1 << val;
+			if ((u64)freq * *postdiv >= fmin)
+				break;
+		}
 	}
 
 	/* _pcw = freq * postdiv / fin * 2^pcwfbits */
diff --git a/drivers/clk/meson/clk-cpu.c b/drivers/clk/meson/clk-cpu.c
index 71ad493..f7c30ea 100644
--- a/drivers/clk/meson/clk-cpu.c
+++ b/drivers/clk/meson/clk-cpu.c
@@ -35,6 +35,7 @@
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/slab.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 
 #define MESON_CPU_CLK_CNTL1		0x00
diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c
index b8c511c..c83ae13 100644
--- a/drivers/clk/meson/clkc.c
+++ b/drivers/clk/meson/clkc.c
@@ -15,7 +15,6 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/mfd/syscon.h>
 #include <linux/slab.h>
diff --git a/drivers/clk/mmp/clk-apbc.c b/drivers/clk/mmp/clk-apbc.c
index 09d41c7..4c717db 100644
--- a/drivers/clk/mmp/clk-apbc.c
+++ b/drivers/clk/mmp/clk-apbc.c
@@ -10,7 +10,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/err.h>
 #include <linux/delay.h>
diff --git a/drivers/clk/mmp/clk-apmu.c b/drivers/clk/mmp/clk-apmu.c
index cdcf2d7..47b5542 100644
--- a/drivers/clk/mmp/clk-apmu.c
+++ b/drivers/clk/mmp/clk-apmu.c
@@ -10,7 +10,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/err.h>
 #include <linux/delay.h>
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
index adbd9d6..d20cd34 100644
--- a/drivers/clk/mmp/clk-gate.c
+++ b/drivers/clk/mmp/clk-gate.c
@@ -27,7 +27,6 @@
 static int mmp_clk_gate_enable(struct clk_hw *hw)
 {
 	struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
-	struct clk *clk = hw->clk;
 	unsigned long flags = 0;
 	unsigned long rate;
 	u32 tmp;
@@ -44,7 +43,7 @@
 		spin_unlock_irqrestore(gate->lock, flags);
 
 	if (gate->flags & MMP_CLK_GATE_NEED_DELAY) {
-		rate = __clk_get_rate(clk);
+		rate = clk_hw_get_rate(hw);
 		/* Need delay 2 cycles. */
 		udelay(2000000/rate);
 	}
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
index de6a873..c554833c 100644
--- a/drivers/clk/mmp/clk-mix.c
+++ b/drivers/clk/mmp/clk-mix.c
@@ -63,7 +63,7 @@
 
 static unsigned int _get_mux(struct mmp_clk_mix *mix, unsigned int val)
 {
-	int num_parents = __clk_get_num_parents(mix->hw.clk);
+	int num_parents = clk_hw_get_num_parents(&mix->hw);
 	int i;
 
 	if (mix->mux_flags & CLK_MUX_INDEX_BIT)
@@ -113,15 +113,15 @@
 {
 	int i;
 	struct mmp_clk_mix_clk_table *item;
-	struct clk *parent, *clk;
+	struct clk_hw *parent, *hw;
 	unsigned long parent_rate;
 
-	clk = mix->hw.clk;
+	hw = &mix->hw;
 
 	for (i = 0; i < table_size; i++) {
 		item = &table[i];
-		parent = clk_get_parent_by_index(clk, item->parent_index);
-		parent_rate = __clk_get_rate(parent);
+		parent = clk_hw_get_parent_by_index(hw, item->parent_index);
+		parent_rate = clk_hw_get_rate(parent);
 		if (parent_rate % item->rate) {
 			item->valid = 0;
 		} else {
@@ -181,7 +181,7 @@
 
 		if (timeout == 0) {
 			pr_err("%s:%s cannot do frequency change\n",
-				__func__, __clk_get_name(mix->hw.clk));
+				__func__, clk_hw_get_name(&mix->hw));
 			ret = -EBUSY;
 			goto error;
 		}
@@ -201,27 +201,22 @@
 	return ret;
 }
 
-static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
-					unsigned long min_rate,
-					unsigned long max_rate,
-					unsigned long *best_parent_rate,
-					struct clk_hw **best_parent_clk)
+static int mmp_clk_mix_determine_rate(struct clk_hw *hw,
+				      struct clk_rate_request *req)
 {
 	struct mmp_clk_mix *mix = to_clk_mix(hw);
 	struct mmp_clk_mix_clk_table *item;
-	struct clk *parent, *parent_best, *mix_clk;
+	struct clk_hw *parent, *parent_best;
 	unsigned long parent_rate, mix_rate, mix_rate_best, parent_rate_best;
 	unsigned long gap, gap_best;
 	u32 div_val_max;
 	unsigned int div;
 	int i, j;
 
-	mix_clk = hw->clk;
 
-	parent = NULL;
 	mix_rate_best = 0;
 	parent_rate_best = 0;
-	gap_best = rate;
+	gap_best = ULONG_MAX;
 	parent_best = NULL;
 
 	if (mix->table) {
@@ -229,11 +224,11 @@
 			item = &mix->table[i];
 			if (item->valid == 0)
 				continue;
-			parent = clk_get_parent_by_index(mix_clk,
+			parent = clk_hw_get_parent_by_index(hw,
 							item->parent_index);
-			parent_rate = __clk_get_rate(parent);
+			parent_rate = clk_hw_get_rate(parent);
 			mix_rate = parent_rate / item->divisor;
-			gap = abs(mix_rate - rate);
+			gap = abs(mix_rate - req->rate);
 			if (parent_best == NULL || gap < gap_best) {
 				parent_best = parent;
 				parent_rate_best = parent_rate;
@@ -244,14 +239,14 @@
 			}
 		}
 	} else {
-		for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
-			parent = clk_get_parent_by_index(mix_clk, i);
-			parent_rate = __clk_get_rate(parent);
+		for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+			parent = clk_hw_get_parent_by_index(hw, i);
+			parent_rate = clk_hw_get_rate(parent);
 			div_val_max = _get_maxdiv(mix);
 			for (j = 0; j < div_val_max; j++) {
 				div = _get_div(mix, j);
 				mix_rate = parent_rate / div;
-				gap = abs(mix_rate - rate);
+				gap = abs(mix_rate - req->rate);
 				if (parent_best == NULL || gap < gap_best) {
 					parent_best = parent;
 					parent_rate_best = parent_rate;
@@ -265,10 +260,14 @@
 	}
 
 found:
-	*best_parent_rate = parent_rate_best;
-	*best_parent_clk = __clk_get_hw(parent_best);
+	if (!parent_best)
+		return -EINVAL;
 
-	return mix_rate_best;
+	req->best_parent_rate = parent_rate_best;
+	req->best_parent_hw = parent_best;
+	req->rate = mix_rate_best;
+
+	return 0;
 }
 
 static int mmp_clk_mix_set_rate_and_parent(struct clk_hw *hw,
@@ -381,20 +380,19 @@
 	struct mmp_clk_mix_clk_table *item;
 	unsigned long parent_rate;
 	unsigned int best_divisor;
-	struct clk *mix_clk, *parent;
+	struct clk_hw *parent;
 	int i;
 
 	best_divisor = best_parent_rate / rate;
 
-	mix_clk = hw->clk;
 	if (mix->table) {
 		for (i = 0; i < mix->table_size; i++) {
 			item = &mix->table[i];
 			if (item->valid == 0)
 				continue;
-			parent = clk_get_parent_by_index(mix_clk,
+			parent = clk_hw_get_parent_by_index(hw,
 							item->parent_index);
-			parent_rate = __clk_get_rate(parent);
+			parent_rate = clk_hw_get_rate(parent);
 			if (parent_rate == best_parent_rate
 				&& item->divisor == best_divisor)
 				break;
@@ -407,13 +405,13 @@
 		else
 			return -EINVAL;
 	} else {
-		for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
-			parent = clk_get_parent_by_index(mix_clk, i);
-			parent_rate = __clk_get_rate(parent);
+		for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+			parent = clk_hw_get_parent_by_index(hw, i);
+			parent_rate = clk_hw_get_rate(parent);
 			if (parent_rate == best_parent_rate)
 				break;
 		}
-		if (i < __clk_get_num_parents(mix_clk))
+		if (i < clk_hw_get_num_parents(hw))
 			return _set_rate(mix, _get_mux_val(mix, i),
 					_get_div_val(mix, best_divisor), 1, 1);
 		else
@@ -468,20 +466,20 @@
 	memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info));
 	if (config->table) {
 		table_bytes = sizeof(*config->table) * config->table_size;
-		mix->table = kzalloc(table_bytes, GFP_KERNEL);
+		mix->table = kmemdup(config->table, table_bytes, GFP_KERNEL);
 		if (!mix->table) {
 			pr_err("%s:%s: could not allocate mmp mix table\n",
 				__func__, name);
 			kfree(mix);
 			return ERR_PTR(-ENOMEM);
 		}
-		memcpy(mix->table, config->table, table_bytes);
 		mix->table_size = config->table_size;
 	}
 
 	if (config->mux_table) {
 		table_bytes = sizeof(u32) * num_parents;
-		mix->mux_table = kzalloc(table_bytes, GFP_KERNEL);
+		mix->mux_table = kmemdup(config->mux_table, table_bytes,
+					 GFP_KERNEL);
 		if (!mix->mux_table) {
 			pr_err("%s:%s: could not allocate mmp mix mux-table\n",
 				__func__, name);
@@ -489,7 +487,6 @@
 			kfree(mix);
 			return ERR_PTR(-ENOMEM);
 		}
-		memcpy(mix->mux_table, config->mux_table, table_bytes);
 	}
 
 	mix->div_flags = config->div_flags;
diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
index cf038ef..61893fe 100644
--- a/drivers/clk/mmp/clk.c
+++ b/drivers/clk/mmp/clk.c
@@ -1,7 +1,6 @@
 #include <linux/io.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index 3821a880..5837eb8 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -10,7 +10,8 @@
  * warranty of any kind, whether express or implied.
  */
 #include <linux/kernel.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/of_address.h>
 #include <linux/io.h>
@@ -120,7 +121,7 @@
 	if (!cpuclk->pmu_dfs)
 		return -ENODEV;
 
-	cur_rate = __clk_get_rate(hwclk->clk);
+	cur_rate = clk_hw_get_rate(hwclk);
 
 	reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
 	fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
@@ -196,7 +197,6 @@
 	for_each_node_by_type(dn, "cpu") {
 		struct clk_init_data init;
 		struct clk *clk;
-		struct clk *parent_clk;
 		char *clk_name = kzalloc(5, GFP_KERNEL);
 		int cpu, err;
 
@@ -208,9 +208,8 @@
 			goto bail_out;
 
 		sprintf(clk_name, "cpu%d", cpu);
-		parent_clk = of_clk_get(node, 0);
 
-		cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
+		cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
 		cpuclk[cpu].clk_name = clk_name;
 		cpuclk[cpu].cpu = cpu;
 		cpuclk[cpu].reg_base = clock_complex_base;
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index 15b370f..4a22429 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -13,8 +13,8 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/slab.h>
 #include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/of.h>
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
index 90e1da9..049ee27 100644
--- a/drivers/clk/mxs/clk-div.c
+++ b/drivers/clk/mxs/clk-div.c
@@ -9,7 +9,6 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/slab.h>
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
index e6aa6b5..73f0240 100644
--- a/drivers/clk/mxs/clk-frac.c
+++ b/drivers/clk/mxs/clk-frac.c
@@ -9,7 +9,6 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/io.h>
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index 32216f9..f01876a 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -9,9 +9,8 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include <linux/clk.h>
 #include <linux/clk/mxs.h>
-#include <linux/clkdev.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/init.h>
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index a686708..6b572b7 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -9,9 +9,9 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include <linux/clk.h>
 #include <linux/clk/mxs.h>
 #include <linux/clkdev.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/init.h>
diff --git a/drivers/clk/mxs/clk-pll.c b/drivers/clk/mxs/clk-pll.c
index fadae41..d4ca79a 100644
--- a/drivers/clk/mxs/clk-pll.c
+++ b/drivers/clk/mxs/clk-pll.c
@@ -9,7 +9,6 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/err.h>
diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c
index 4adeed6..495f99b 100644
--- a/drivers/clk/mxs/clk-ref.c
+++ b/drivers/clk/mxs/clk-ref.c
@@ -9,7 +9,6 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/io.h>
diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h
index f07d821..a459095 100644
--- a/drivers/clk/mxs/clk.h
+++ b/drivers/clk/mxs/clk.h
@@ -12,7 +12,8 @@
 #ifndef __MXS_CLK_H
 #define __MXS_CLK_H
 
-#include <linux/clk.h>
+struct clk;
+
 #include <linux/clk-provider.h>
 #include <linux/spinlock.h>
 
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index 81e9e1c..e0a3cb8 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -8,7 +8,6 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/kernel.h>
diff --git a/drivers/clk/pistachio/clk-pistachio.c b/drivers/clk/pistachio/clk-pistachio.c
index 8c0fe88..c4ceb5e 100644
--- a/drivers/clk/pistachio/clk-pistachio.c
+++ b/drivers/clk/pistachio/clk-pistachio.c
@@ -159,9 +159,15 @@
 		     "wifi_pll_mux", "bt_pll_mux" };
 static u32 mux_debug_idx[] = { 0x0, 0x1, 0x2, 0x4, 0x8, 0x10 };
 
-static unsigned int pistachio_critical_clks[] __initdata = {
-	CLK_MIPS,
-	CLK_PERIPH_SYS,
+static unsigned int pistachio_critical_clks_core[] __initdata = {
+	CLK_MIPS
+};
+
+static unsigned int pistachio_critical_clks_sys[] __initdata = {
+	PERIPH_CLK_SYS,
+	PERIPH_CLK_SYS_BUS,
+	PERIPH_CLK_DDR,
+	PERIPH_CLK_ROM,
 };
 
 static void __init pistachio_clk_init(struct device_node *np)
@@ -193,8 +199,8 @@
 
 	pistachio_clk_register_provider(p);
 
-	pistachio_clk_force_enable(p, pistachio_critical_clks,
-				   ARRAY_SIZE(pistachio_critical_clks));
+	pistachio_clk_force_enable(p, pistachio_critical_clks_core,
+				   ARRAY_SIZE(pistachio_critical_clks_core));
 }
 CLK_OF_DECLARE(pistachio_clk, "img,pistachio-clk", pistachio_clk_init);
 
@@ -261,6 +267,9 @@
 				    ARRAY_SIZE(pistachio_periph_gates));
 
 	pistachio_clk_register_provider(p);
+
+	pistachio_clk_force_enable(p, pistachio_critical_clks_sys,
+				   ARRAY_SIZE(pistachio_critical_clks_sys));
 }
 CLK_OF_DECLARE(pistachio_clk_periph, "img,pistachio-clk-periph",
 	       pistachio_clk_periph_init);
diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c
index e17dada..7e8daab 100644
--- a/drivers/clk/pistachio/clk-pll.c
+++ b/drivers/clk/pistachio/clk-pll.c
@@ -65,6 +65,12 @@
 #define MIN_OUTPUT_FRAC			12000000UL
 #define MAX_OUTPUT_FRAC			1600000000UL
 
+/* Fractional PLL operating modes */
+enum pll_mode {
+	PLL_MODE_FRAC,
+	PLL_MODE_INT,
+};
+
 struct pistachio_clk_pll {
 	struct clk_hw hw;
 	void __iomem *base;
@@ -88,12 +94,10 @@
 		cpu_relax();
 }
 
-static inline u32 do_div_round_closest(u64 dividend, u32 divisor)
+static inline u64 do_div_round_closest(u64 dividend, u64 divisor)
 {
 	dividend += divisor / 2;
-	do_div(dividend, divisor);
-
-	return dividend;
+	return div64_u64(dividend, divisor);
 }
 
 static inline struct pistachio_clk_pll *to_pistachio_pll(struct clk_hw *hw)
@@ -101,6 +105,29 @@
 	return container_of(hw, struct pistachio_clk_pll, hw);
 }
 
+static inline enum pll_mode pll_frac_get_mode(struct clk_hw *hw)
+{
+	struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+	u32 val;
+
+	val = pll_readl(pll, PLL_CTRL3) & PLL_FRAC_CTRL3_DSMPD;
+	return val ? PLL_MODE_INT : PLL_MODE_FRAC;
+}
+
+static inline void pll_frac_set_mode(struct clk_hw *hw, enum pll_mode mode)
+{
+	struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
+	u32 val;
+
+	val = pll_readl(pll, PLL_CTRL3);
+	if (mode == PLL_MODE_INT)
+		val |= PLL_FRAC_CTRL3_DSMPD | PLL_FRAC_CTRL3_DACPD;
+	else
+		val &= ~(PLL_FRAC_CTRL3_DSMPD | PLL_FRAC_CTRL3_DACPD);
+
+	pll_writel(pll, val, PLL_CTRL3);
+}
+
 static struct pistachio_pll_rate_table *
 pll_get_params(struct pistachio_clk_pll *pll, unsigned long fref,
 	       unsigned long fout)
@@ -136,8 +163,7 @@
 	u32 val;
 
 	val = pll_readl(pll, PLL_CTRL3);
-	val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_DACPD |
-		 PLL_FRAC_CTRL3_DSMPD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD |
+	val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD |
 		 PLL_FRAC_CTRL3_FOUT4PHASEPD | PLL_FRAC_CTRL3_FOUTVCOPD);
 	pll_writel(pll, val, PLL_CTRL3);
 
@@ -173,8 +199,8 @@
 	struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
 	struct pistachio_pll_rate_table *params;
 	int enabled = pll_gf40lp_frac_is_enabled(hw);
-	u32 val, vco, old_postdiv1, old_postdiv2;
-	const char *name = __clk_get_name(hw->clk);
+	u64 val, vco, old_postdiv1, old_postdiv2;
+	const char *name = clk_hw_get_name(hw);
 
 	if (rate < MIN_OUTPUT_FRAC || rate > MAX_OUTPUT_FRAC)
 		return -EINVAL;
@@ -183,17 +209,21 @@
 	if (!params || !params->refdiv)
 		return -EINVAL;
 
-	vco = params->fref * params->fbdiv / params->refdiv;
+	/* calculate vco */
+	vco = params->fref;
+	vco *= (params->fbdiv << 24) + params->frac;
+	vco = div64_u64(vco, params->refdiv << 24);
+
 	if (vco < MIN_VCO_FRAC_FRAC || vco > MAX_VCO_FRAC_FRAC)
-		pr_warn("%s: VCO %u is out of range %lu..%lu\n", name, vco,
+		pr_warn("%s: VCO %llu is out of range %lu..%lu\n", name, vco,
 			MIN_VCO_FRAC_FRAC, MAX_VCO_FRAC_FRAC);
 
-	val = params->fref / params->refdiv;
+	val = div64_u64(params->fref, params->refdiv);
 	if (val < MIN_PFD)
-		pr_warn("%s: PFD %u is too low (min %lu)\n",
+		pr_warn("%s: PFD %llu is too low (min %lu)\n",
 			name, val, MIN_PFD);
 	if (val > vco / 16)
-		pr_warn("%s: PFD %u is too high (max %u)\n",
+		pr_warn("%s: PFD %llu is too high (max %llu)\n",
 			name, val, vco / 16);
 
 	val = pll_readl(pll, PLL_CTRL1);
@@ -227,6 +257,12 @@
 		(params->postdiv2 << PLL_FRAC_CTRL2_POSTDIV2_SHIFT);
 	pll_writel(pll, val, PLL_CTRL2);
 
+	/* set operating mode */
+	if (params->frac)
+		pll_frac_set_mode(hw, PLL_MODE_FRAC);
+	else
+		pll_frac_set_mode(hw, PLL_MODE_INT);
+
 	if (enabled)
 		pll_lock(pll);
 
@@ -237,8 +273,7 @@
 						 unsigned long parent_rate)
 {
 	struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
-	u32 val, prediv, fbdiv, frac, postdiv1, postdiv2;
-	u64 rate = parent_rate;
+	u64 val, prediv, fbdiv, frac, postdiv1, postdiv2, rate;
 
 	val = pll_readl(pll, PLL_CTRL1);
 	prediv = (val >> PLL_CTRL1_REFDIV_SHIFT) & PLL_CTRL1_REFDIV_MASK;
@@ -251,7 +286,13 @@
 		PLL_FRAC_CTRL2_POSTDIV2_MASK;
 	frac = (val >> PLL_FRAC_CTRL2_FRAC_SHIFT) & PLL_FRAC_CTRL2_FRAC_MASK;
 
-	rate *= (fbdiv << 24) + frac;
+	/* get operating mode (int/frac) and calculate rate accordingly */
+	rate = parent_rate;
+	if (pll_frac_get_mode(hw) == PLL_MODE_FRAC)
+		rate *= (fbdiv << 24) + frac;
+	else
+		rate *= (fbdiv << 24);
+
 	rate = do_div_round_closest(rate, (prediv * postdiv1 * postdiv2) << 24);
 
 	return rate;
@@ -279,7 +320,7 @@
 	u32 val;
 
 	val = pll_readl(pll, PLL_CTRL1);
-	val &= ~(PLL_INT_CTRL1_PD | PLL_INT_CTRL1_DSMPD |
+	val &= ~(PLL_INT_CTRL1_PD |
 		 PLL_INT_CTRL1_FOUTPOSTDIVPD | PLL_INT_CTRL1_FOUTVCOPD);
 	pll_writel(pll, val, PLL_CTRL1);
 
@@ -316,7 +357,7 @@
 	struct pistachio_pll_rate_table *params;
 	int enabled = pll_gf40lp_laint_is_enabled(hw);
 	u32 val, vco, old_postdiv1, old_postdiv2;
-	const char *name = __clk_get_name(hw->clk);
+	const char *name = clk_hw_get_name(hw);
 
 	if (rate < MIN_OUTPUT_LA || rate > MAX_OUTPUT_LA)
 		return -EINVAL;
@@ -325,12 +366,12 @@
 	if (!params || !params->refdiv)
 		return -EINVAL;
 
-	vco = params->fref * params->fbdiv / params->refdiv;
+	vco = div_u64(params->fref * params->fbdiv, params->refdiv);
 	if (vco < MIN_VCO_LA || vco > MAX_VCO_LA)
 		pr_warn("%s: VCO %u is out of range %lu..%lu\n", name, vco,
 			MIN_VCO_LA, MAX_VCO_LA);
 
-	val = params->fref / params->refdiv;
+	val = div_u64(params->fref, params->refdiv);
 	if (val < MIN_PFD)
 		pr_warn("%s: PFD %u is too low (min %lu)\n",
 			name, val, MIN_PFD);
diff --git a/drivers/clk/pistachio/clk.c b/drivers/clk/pistachio/clk.c
index 85faa83..698cad4 100644
--- a/drivers/clk/pistachio/clk.c
+++ b/drivers/clk/pistachio/clk.c
@@ -6,6 +6,7 @@
  * version 2, as published by the Free Software Foundation.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
diff --git a/drivers/clk/pistachio/clk.h b/drivers/clk/pistachio/clk.h
index 52fabbc..8d45178 100644
--- a/drivers/clk/pistachio/clk.h
+++ b/drivers/clk/pistachio/clk.h
@@ -95,13 +95,13 @@
 	}
 
 struct pistachio_pll_rate_table {
-	unsigned long fref;
-	unsigned long fout;
-	unsigned int refdiv;
-	unsigned int fbdiv;
-	unsigned int postdiv1;
-	unsigned int postdiv2;
-	unsigned int frac;
+	unsigned long long fref;
+	unsigned long long fout;
+	unsigned long long refdiv;
+	unsigned long long fbdiv;
+	unsigned long long postdiv1;
+	unsigned long long postdiv2;
+	unsigned long long frac;
 };
 
 enum pistachio_pll_type {
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
index 6cd88d9..542e45e 100644
--- a/drivers/clk/pxa/clk-pxa25x.c
+++ b/drivers/clk/pxa/clk-pxa25x.c
@@ -79,7 +79,7 @@
 			clks[3] / 1000000, (clks[3] % 1000000) / 10000);
 	}
 
-	return (unsigned int)clks[0];
+	return (unsigned int)clks[0] / KHz;
 }
 
 static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index 9a31b77..5b82d30 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -80,7 +80,7 @@
 		pr_info("System bus clock: %ld.%02ldMHz\n",
 			clks[4] / 1000000, (clks[4] % 1000000) / 10000);
 	}
-	return (unsigned int)clks[0];
+	return (unsigned int)clks[0] / KHz;
 }
 
 bool pxa27x_is_ppll_disabled(void)
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index ac03ba4..4af4eed 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -78,7 +78,7 @@
 		pr_info("System bus clock: %ld.%02ldMHz\n",
 			clks[4] / 1000000, (clks[4] % 1000000) / 10000);
 	}
-	return (unsigned int)clks[0];
+	return (unsigned int)clks[0] / KHz;
 }
 
 static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 6b4d2bc..26f7af31 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -75,7 +75,7 @@
 		bool (check_halt)(const struct clk_branch *, bool))
 {
 	bool voted = br->halt_check & BRANCH_VOTED;
-	const char *name = __clk_get_name(br->clkr.hw.clk);
+	const char *name = clk_hw_get_name(&br->clkr.hw);
 
 	/* Skip checking halt bit if the clock is in hardware gated mode */
 	if (clk_branch_in_hwcg_mode(br))
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index 245d506..5b940d6 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -135,19 +135,19 @@
 	return NULL;
 }
 
-static long
-clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate,
-		       unsigned long min_rate, unsigned long max_rate,
-		       unsigned long *p_rate, struct clk_hw **p)
+static int
+clk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
 {
 	struct clk_pll *pll = to_clk_pll(hw);
 	const struct pll_freq_tbl *f;
 
-	f = find_freq(pll->freq_tbl, rate);
+	f = find_freq(pll->freq_tbl, req->rate);
 	if (!f)
-		return clk_pll_recalc_rate(hw, *p_rate);
+		req->rate = clk_pll_recalc_rate(hw, req->best_parent_rate);
+	else
+		req->rate = f->freq;
 
-	return f->freq;
+	return 0;
 }
 
 static int
@@ -194,7 +194,7 @@
 	u32 val;
 	int count;
 	int ret;
-	const char *name = __clk_get_name(pll->clkr.hw.clk);
+	const char *name = clk_hw_get_name(&pll->clkr.hw);
 
 	/* Wait for pll to enable. */
 	for (count = 200; count > 0; count--) {
@@ -213,7 +213,7 @@
 static int clk_pll_vote_enable(struct clk_hw *hw)
 {
 	int ret;
-	struct clk_pll *p = to_clk_pll(__clk_get_hw(__clk_get_parent(hw->clk)));
+	struct clk_pll *p = to_clk_pll(clk_hw_get_parent(hw));
 
 	ret = clk_enable_regmap(hw);
 	if (ret)
@@ -292,3 +292,78 @@
 		clk_pll_set_fsm_mode(pll, regmap, 0);
 }
 EXPORT_SYMBOL_GPL(clk_pll_configure_sr_hpm_lp);
+
+static int clk_pll_sr2_enable(struct clk_hw *hw)
+{
+	struct clk_pll *pll = to_clk_pll(hw);
+	int ret;
+	u32 mode;
+
+	ret = regmap_read(pll->clkr.regmap, pll->mode_reg, &mode);
+	if (ret)
+		return ret;
+
+	/* Disable PLL bypass mode. */
+	ret = regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_BYPASSNL,
+				 PLL_BYPASSNL);
+	if (ret)
+		return ret;
+
+	/*
+	 * H/W requires a 5us delay between disabling the bypass and
+	 * de-asserting the reset. Delay 10us just to be safe.
+	 */
+	udelay(10);
+
+	/* De-assert active-low PLL reset. */
+	ret = regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_RESET_N,
+				 PLL_RESET_N);
+	if (ret)
+		return ret;
+
+	ret = wait_for_pll(pll);
+	if (ret)
+		return ret;
+
+	/* Enable PLL output. */
+	return regmap_update_bits(pll->clkr.regmap, pll->mode_reg, PLL_OUTCTRL,
+				 PLL_OUTCTRL);
+}
+
+static int
+clk_pll_sr2_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate)
+{
+	struct clk_pll *pll = to_clk_pll(hw);
+	const struct pll_freq_tbl *f;
+	bool enabled;
+	u32 mode;
+	u32 enable_mask = PLL_OUTCTRL | PLL_BYPASSNL | PLL_RESET_N;
+
+	f = find_freq(pll->freq_tbl, rate);
+	if (!f)
+		return -EINVAL;
+
+	regmap_read(pll->clkr.regmap, pll->mode_reg, &mode);
+	enabled = (mode & enable_mask) == enable_mask;
+
+	if (enabled)
+		clk_pll_disable(hw);
+
+	regmap_update_bits(pll->clkr.regmap, pll->l_reg, 0x3ff, f->l);
+	regmap_update_bits(pll->clkr.regmap, pll->m_reg, 0x7ffff, f->m);
+	regmap_update_bits(pll->clkr.regmap, pll->n_reg, 0x7ffff, f->n);
+
+	if (enabled)
+		clk_pll_sr2_enable(hw);
+
+	return 0;
+}
+
+const struct clk_ops clk_pll_sr2_ops = {
+	.enable = clk_pll_sr2_enable,
+	.disable = clk_pll_disable,
+	.set_rate = clk_pll_sr2_set_rate,
+	.recalc_rate = clk_pll_recalc_rate,
+	.determine_rate = clk_pll_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_pll_sr2_ops);
diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h
index c9c0cda..ffd0c63 100644
--- a/drivers/clk/qcom/clk-pll.h
+++ b/drivers/clk/qcom/clk-pll.h
@@ -62,6 +62,7 @@
 
 extern const struct clk_ops clk_pll_ops;
 extern const struct clk_ops clk_pll_vote_ops;
+extern const struct clk_ops clk_pll_sr2_ops;
 
 #define to_clk_pll(_hw) container_of(to_clk_regmap(_hw), struct clk_pll, clkr)
 
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 7b3d626..bccedc4 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -45,7 +45,7 @@
 static u8 clk_rcg_get_parent(struct clk_hw *hw)
 {
 	struct clk_rcg *rcg = to_clk_rcg(hw);
-	int num_parents = __clk_get_num_parents(hw->clk);
+	int num_parents = clk_hw_get_num_parents(hw);
 	u32 ns;
 	int i, ret;
 
@@ -59,7 +59,7 @@
 
 err:
 	pr_debug("%s: Clock %s has invalid parent, using default.\n",
-		 __func__, __clk_get_name(hw->clk));
+		 __func__, clk_hw_get_name(hw));
 	return 0;
 }
 
@@ -72,7 +72,7 @@
 static u8 clk_dyn_rcg_get_parent(struct clk_hw *hw)
 {
 	struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
-	int num_parents = __clk_get_num_parents(hw->clk);
+	int num_parents = clk_hw_get_num_parents(hw);
 	u32 ns, reg;
 	int bank;
 	int i, ret;
@@ -95,7 +95,7 @@
 
 err:
 	pr_debug("%s: Clock %s has invalid parent, using default.\n",
-		 __func__, __clk_get_name(hw->clk));
+		 __func__, clk_hw_get_name(hw));
 	return 0;
 }
 
@@ -404,14 +404,12 @@
 	return calc_rate(parent_rate, m, n, mode, pre_div);
 }
 
-static long _freq_tbl_determine_rate(struct clk_hw *hw,
-		const struct freq_tbl *f, unsigned long rate,
-		unsigned long min_rate, unsigned long max_rate,
-		unsigned long *p_rate, struct clk_hw **p_hw,
+static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
+		struct clk_rate_request *req,
 		const struct parent_map *parent_map)
 {
-	unsigned long clk_flags;
-	struct clk *p;
+	unsigned long clk_flags, rate = req->rate;
+	struct clk_hw *p;
 	int index;
 
 	f = qcom_find_freq(f, rate);
@@ -422,8 +420,8 @@
 	if (index < 0)
 		return index;
 
-	clk_flags = __clk_get_flags(hw->clk);
-	p = clk_get_parent_by_index(hw->clk, index);
+	clk_flags = clk_hw_get_flags(hw);
+	p = clk_hw_get_parent_by_index(hw, index);
 	if (clk_flags & CLK_SET_RATE_PARENT) {
 		rate = rate * f->pre_div;
 		if (f->n) {
@@ -433,27 +431,26 @@
 			rate = tmp;
 		}
 	} else {
-		rate =  __clk_get_rate(p);
+		rate =  clk_hw_get_rate(p);
 	}
-	*p_hw = __clk_get_hw(p);
-	*p_rate = rate;
+	req->best_parent_hw = p;
+	req->best_parent_rate = rate;
+	req->rate = f->freq;
 
-	return f->freq;
+	return 0;
 }
 
-static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
-		unsigned long min_rate, unsigned long max_rate,
-		unsigned long *p_rate, struct clk_hw **p)
+static int clk_rcg_determine_rate(struct clk_hw *hw,
+				  struct clk_rate_request *req)
 {
 	struct clk_rcg *rcg = to_clk_rcg(hw);
 
-	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
-			max_rate, p_rate, p, rcg->s.parent_map);
+	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req,
+					rcg->s.parent_map);
 }
 
-static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
-		unsigned long min_rate, unsigned long max_rate,
-		unsigned long *p_rate, struct clk_hw **p)
+static int clk_dyn_rcg_determine_rate(struct clk_hw *hw,
+				      struct clk_rate_request *req)
 {
 	struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
 	u32 reg;
@@ -464,24 +461,22 @@
 	bank = reg_to_bank(rcg, reg);
 	s = &rcg->s[bank];
 
-	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
-			max_rate, p_rate, p, s->parent_map);
+	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, s->parent_map);
 }
 
-static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
-		unsigned long min_rate, unsigned long max_rate,
-		unsigned long *p_rate, struct clk_hw **p_hw)
+static int clk_rcg_bypass_determine_rate(struct clk_hw *hw,
+					 struct clk_rate_request *req)
 {
 	struct clk_rcg *rcg = to_clk_rcg(hw);
 	const struct freq_tbl *f = rcg->freq_tbl;
-	struct clk *p;
+	struct clk_hw *p;
 	int index = qcom_find_src_index(hw, rcg->s.parent_map, f->src);
 
-	p = clk_get_parent_by_index(hw->clk, index);
-	*p_hw = __clk_get_hw(p);
-	*p_rate = __clk_round_rate(p, rate);
+	req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
+	req->best_parent_rate = clk_hw_round_rate(p, req->rate);
+	req->rate = req->best_parent_rate;
 
-	return *p_rate;
+	return 0;
 }
 
 static int __clk_rcg_set_rate(struct clk_rcg *rcg, const struct freq_tbl *f)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 92936f0..9aec176 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -63,7 +63,7 @@
 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
-	int num_parents = __clk_get_num_parents(hw->clk);
+	int num_parents = clk_hw_get_num_parents(hw);
 	u32 cfg;
 	int i, ret;
 
@@ -80,7 +80,7 @@
 
 err:
 	pr_debug("%s: Clock %s has invalid parent, using default.\n",
-		 __func__, __clk_get_name(hw->clk));
+		 __func__, clk_hw_get_name(hw));
 	return 0;
 }
 
@@ -89,7 +89,7 @@
 	int count, ret;
 	u32 cmd;
 	struct clk_hw *hw = &rcg->clkr.hw;
-	const char *name = __clk_get_name(hw->clk);
+	const char *name = clk_hw_get_name(hw);
 
 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
 				 CMD_UPDATE, CMD_UPDATE);
@@ -176,12 +176,11 @@
 	return calc_rate(parent_rate, m, n, mode, hid_div);
 }
 
-static long _freq_tbl_determine_rate(struct clk_hw *hw,
-		const struct freq_tbl *f, unsigned long rate,
-		unsigned long *p_rate, struct clk_hw **p_hw)
+static int _freq_tbl_determine_rate(struct clk_hw *hw,
+		const struct freq_tbl *f, struct clk_rate_request *req)
 {
-	unsigned long clk_flags;
-	struct clk *p;
+	unsigned long clk_flags, rate = req->rate;
+	struct clk_hw *p;
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
 	int index;
 
@@ -193,8 +192,8 @@
 	if (index < 0)
 		return index;
 
-	clk_flags = __clk_get_flags(hw->clk);
-	p = clk_get_parent_by_index(hw->clk, index);
+	clk_flags = clk_hw_get_flags(hw);
+	p = clk_hw_get_parent_by_index(hw, index);
 	if (clk_flags & CLK_SET_RATE_PARENT) {
 		if (f->pre_div) {
 			rate /= 2;
@@ -208,21 +207,21 @@
 			rate = tmp;
 		}
 	} else {
-		rate =  __clk_get_rate(p);
+		rate =  clk_hw_get_rate(p);
 	}
-	*p_hw = __clk_get_hw(p);
-	*p_rate = rate;
+	req->best_parent_hw = p;
+	req->best_parent_rate = rate;
+	req->rate = f->freq;
 
-	return f->freq;
+	return 0;
 }
 
-static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
-		unsigned long min_rate, unsigned long max_rate,
-		unsigned long *p_rate, struct clk_hw **p)
+static int clk_rcg2_determine_rate(struct clk_hw *hw,
+				   struct clk_rate_request *req)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
 
-	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req);
 }
 
 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
@@ -374,35 +373,33 @@
 	return clk_edp_pixel_set_rate(hw, rate, parent_rate);
 }
 
-static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
-				 unsigned long min_rate,
-				 unsigned long max_rate,
-				 unsigned long *p_rate, struct clk_hw **p)
+static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
+					struct clk_rate_request *req)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
 	const struct freq_tbl *f = rcg->freq_tbl;
 	const struct frac_entry *frac;
 	int delta = 100000;
-	s64 src_rate = *p_rate;
 	s64 request;
 	u32 mask = BIT(rcg->hid_width) - 1;
 	u32 hid_div;
 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
 
 	/* Force the correct parent */
-	*p = __clk_get_hw(clk_get_parent_by_index(hw->clk, index));
+	req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
+	req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
 
-	if (src_rate == 810000000)
+	if (req->best_parent_rate == 810000000)
 		frac = frac_table_810m;
 	else
 		frac = frac_table_675m;
 
 	for (; frac->num; frac++) {
-		request = rate;
+		request = req->rate;
 		request *= frac->den;
 		request = div_s64(request, frac->num);
-		if ((src_rate < (request - delta)) ||
-		    (src_rate > (request + delta)))
+		if ((req->best_parent_rate < (request - delta)) ||
+		    (req->best_parent_rate > (request + delta)))
 			continue;
 
 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
@@ -410,8 +407,10 @@
 		hid_div >>= CFG_SRC_DIV_SHIFT;
 		hid_div &= mask;
 
-		return calc_rate(src_rate, frac->num, frac->den, !!frac->den,
-				 hid_div);
+		req->rate = calc_rate(req->best_parent_rate,
+				      frac->num, frac->den,
+				      !!frac->den, hid_div);
+		return 0;
 	}
 
 	return -EINVAL;
@@ -428,28 +427,28 @@
 };
 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
 
-static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
-			 unsigned long min_rate, unsigned long max_rate,
-			 unsigned long *p_rate, struct clk_hw **p_hw)
+static int clk_byte_determine_rate(struct clk_hw *hw,
+				   struct clk_rate_request *req)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
 	const struct freq_tbl *f = rcg->freq_tbl;
 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
 	unsigned long parent_rate, div;
 	u32 mask = BIT(rcg->hid_width) - 1;
-	struct clk *p;
+	struct clk_hw *p;
 
-	if (rate == 0)
+	if (req->rate == 0)
 		return -EINVAL;
 
-	p = clk_get_parent_by_index(hw->clk, index);
-	*p_hw = __clk_get_hw(p);
-	*p_rate = parent_rate = __clk_round_rate(p, rate);
+	req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
+	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
 
-	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+	div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
 	div = min_t(u32, div, mask);
 
-	return calc_rate(parent_rate, 0, 0, 0, div);
+	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+	return 0;
 }
 
 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -494,10 +493,8 @@
 	{ }
 };
 
-static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
-				 unsigned long min_rate,
-				 unsigned long max_rate,
-				 unsigned long *p_rate, struct clk_hw **p)
+static int clk_pixel_determine_rate(struct clk_hw *hw,
+				    struct clk_rate_request *req)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
 	unsigned long request, src_rate;
@@ -505,20 +502,20 @@
 	const struct freq_tbl *f = rcg->freq_tbl;
 	const struct frac_entry *frac = frac_table_pixel;
 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
-	struct clk *parent = clk_get_parent_by_index(hw->clk, index);
 
-	*p = __clk_get_hw(parent);
+	req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
 
 	for (; frac->num; frac++) {
-		request = (rate * frac->den) / frac->num;
+		request = (req->rate * frac->den) / frac->num;
 
-		src_rate = __clk_round_rate(parent, request);
+		src_rate = clk_hw_round_rate(req->best_parent_hw, request);
 		if ((src_rate < (request - delta)) ||
 			(src_rate > (request + delta)))
 			continue;
 
-		*p_rate = src_rate;
-		return (src_rate * frac->num) / frac->den;
+		req->best_parent_rate = src_rate;
+		req->rate = (src_rate * frac->num) / frac->den;
+		return 0;
 	}
 
 	return -EINVAL;
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index f7101e3..2dedcee 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -12,6 +12,7 @@
  */
 
 #include <linux/export.h>
+#include <linux/module.h>
 #include <linux/regmap.h>
 #include <linux/platform_device.h>
 #include <linux/clk-provider.h>
@@ -45,7 +46,7 @@
 
 int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
 {
-	int i, num_parents = __clk_get_num_parents(hw->clk);
+	int i, num_parents = clk_hw_get_num_parents(hw);
 
 	for (i = 0; i < num_parents; i++)
 		if (src == map[i].src)
@@ -144,3 +145,5 @@
 	reset_controller_unregister(platform_get_drvdata(pdev));
 }
 EXPORT_SYMBOL_GPL(qcom_cc_remove);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index 54a756b9..3563019 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -48,7 +48,7 @@
 	{ P_GPLL0, 1 }
 };
 
-static const char *gcc_xo_gpll0[] = {
+static const char * const gcc_xo_gpll0[] = {
 	"xo",
 	"gpll0_vote",
 };
@@ -59,7 +59,7 @@
 	{ P_GPLL4, 5 }
 };
 
-static const char *gcc_xo_gpll0_gpll4[] = {
+static const char * const gcc_xo_gpll0_gpll4[] = {
 	"xo",
 	"gpll0_vote",
 	"gpll4_vote",
@@ -70,7 +70,7 @@
 	{ P_SATA_ASIC0_CLK, 2 }
 };
 
-static const char *gcc_xo_sata_asic0[] = {
+static const char * const gcc_xo_sata_asic0[] = {
 	"xo",
 	"sata_asic0_clk",
 };
@@ -80,7 +80,7 @@
 	{ P_SATA_RX_CLK, 2}
 };
 
-static const char *gcc_xo_sata_rx[] = {
+static const char * const gcc_xo_sata_rx[] = {
 	"xo",
 	"sata_rx_clk",
 };
@@ -90,7 +90,7 @@
 	{ P_PCIE_0_1_PIPE_CLK, 2 }
 };
 
-static const char *gcc_xo_pcie[] = {
+static const char * const gcc_xo_pcie[] = {
 	"xo",
 	"pcie_pipe",
 };
@@ -100,7 +100,7 @@
 	{ P_SLEEP_CLK, 6 }
 };
 
-static const char *gcc_xo_pcie_sleep[] = {
+static const char * const gcc_xo_pcie_sleep[] = {
 	"xo",
 	"sleep_clk_src",
 };
@@ -2105,6 +2105,7 @@
 				"ce1_clk_src",
 			},
 			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
 			.ops = &clk_branch2_ops,
 		},
 	},
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 5639699..40e4802 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -188,7 +188,7 @@
 	{ P_PLL8, 3 }
 };
 
-static const char *gcc_pxo_pll8[] = {
+static const char * const gcc_pxo_pll8[] = {
 	"pxo",
 	"pll8_vote",
 };
@@ -199,7 +199,7 @@
 	{ P_CXO, 5 }
 };
 
-static const char *gcc_pxo_pll8_cxo[] = {
+static const char * const gcc_pxo_pll8_cxo[] = {
 	"pxo",
 	"pll8_vote",
 	"cxo",
@@ -215,7 +215,7 @@
 	{ P_PLL3, 6 }
 };
 
-static const char *gcc_pxo_pll3[] = {
+static const char * const gcc_pxo_pll3[] = {
 	"pxo",
 	"pll3",
 };
@@ -226,7 +226,7 @@
 	{ P_PLL0, 2 }
 };
 
-static const char *gcc_pxo_pll8_pll0_map[] = {
+static const char * const gcc_pxo_pll8_pll0_map[] = {
 	"pxo",
 	"pll8_vote",
 	"pll0_vote",
@@ -240,7 +240,7 @@
 	{ P_PLL18, 1 }
 };
 
-static const char *gcc_pxo_pll8_pll14_pll18_pll0[] = {
+static const char * const gcc_pxo_pll8_pll14_pll18_pll0[] = {
 	"pxo",
 	"pll8_vote",
 	"pll0_vote",
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index fc6b12d..b02826e 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -70,7 +70,7 @@
 	{ P_PLL8, 3 }
 };
 
-static const char *gcc_pxo_pll8[] = {
+static const char * const gcc_pxo_pll8[] = {
 	"pxo",
 	"pll8_vote",
 };
@@ -81,7 +81,7 @@
 	{ P_CXO, 5 }
 };
 
-static const char *gcc_pxo_pll8_cxo[] = {
+static const char * const gcc_pxo_pll8_cxo[] = {
 	"pxo",
 	"pll8_vote",
 	"cxo",
@@ -1917,7 +1917,7 @@
 	}
 };
 
-static const char *usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
+static const char * const usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
 
 static struct clk_branch usb_fs1_xcvr_fs_clk = {
 	.halt_reg = 0x2fcc,
@@ -1984,7 +1984,7 @@
 	}
 };
 
-static const char *usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
+static const char * const usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
 
 static struct clk_branch usb_fs2_xcvr_fs_clk = {
 	.halt_reg = 0x2fcc,
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index c66f7bc..22a4e1e 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -51,7 +51,7 @@
 	{ P_GPLL0, 1 },
 };
 
-static const char *gcc_xo_gpll0[] = {
+static const char * const gcc_xo_gpll0[] = {
 	"xo",
 	"gpll0_vote",
 };
@@ -62,7 +62,7 @@
 	{ P_BIMC, 2 },
 };
 
-static const char *gcc_xo_gpll0_bimc[] = {
+static const char * const gcc_xo_gpll0_bimc[] = {
 	"xo",
 	"gpll0_vote",
 	"bimc_pll_vote",
@@ -75,7 +75,7 @@
 	{ P_GPLL2_AUX, 2 },
 };
 
-static const char *gcc_xo_gpll0a_gpll1_gpll2a[] = {
+static const char * const gcc_xo_gpll0a_gpll1_gpll2a[] = {
 	"xo",
 	"gpll0_vote",
 	"gpll1_vote",
@@ -88,7 +88,7 @@
 	{ P_GPLL2, 2 },
 };
 
-static const char *gcc_xo_gpll0_gpll2[] = {
+static const char * const gcc_xo_gpll0_gpll2[] = {
 	"xo",
 	"gpll0_vote",
 	"gpll2_vote",
@@ -99,7 +99,7 @@
 	{ P_GPLL0_AUX, 2 },
 };
 
-static const char *gcc_xo_gpll0a[] = {
+static const char * const gcc_xo_gpll0a[] = {
 	"xo",
 	"gpll0_vote",
 };
@@ -111,7 +111,7 @@
 	{ P_SLEEP_CLK, 6 },
 };
 
-static const char *gcc_xo_gpll0_gpll1a_sleep[] = {
+static const char * const gcc_xo_gpll0_gpll1a_sleep[] = {
 	"xo",
 	"gpll0_vote",
 	"gpll1_vote",
@@ -124,7 +124,7 @@
 	{ P_GPLL1_AUX, 2 },
 };
 
-static const char *gcc_xo_gpll0_gpll1a[] = {
+static const char * const gcc_xo_gpll0_gpll1a[] = {
 	"xo",
 	"gpll0_vote",
 	"gpll1_vote",
@@ -135,7 +135,7 @@
 	{ P_DSI0_PHYPLL_BYTE, 2 },
 };
 
-static const char *gcc_xo_dsibyte[] = {
+static const char * const gcc_xo_dsibyte[] = {
 	"xo",
 	"dsi0pllbyte",
 };
@@ -146,7 +146,7 @@
 	{ P_DSI0_PHYPLL_BYTE, 1 },
 };
 
-static const char *gcc_xo_gpll0a_dsibyte[] = {
+static const char * const gcc_xo_gpll0a_dsibyte[] = {
 	"xo",
 	"gpll0_vote",
 	"dsi0pllbyte",
@@ -158,7 +158,7 @@
 	{ P_DSI0_PHYPLL_DSI, 2 },
 };
 
-static const char *gcc_xo_gpll0_dsiphy[] = {
+static const char * const gcc_xo_gpll0_dsiphy[] = {
 	"xo",
 	"gpll0_vote",
 	"dsi0pll",
@@ -170,7 +170,7 @@
 	{ P_DSI0_PHYPLL_DSI, 1 },
 };
 
-static const char *gcc_xo_gpll0a_dsiphy[] = {
+static const char * const gcc_xo_gpll0a_dsiphy[] = {
 	"xo",
 	"gpll0_vote",
 	"dsi0pll",
@@ -183,7 +183,7 @@
 	{ P_GPLL2, 2 },
 };
 
-static const char *gcc_xo_gpll0a_gpll1_gpll2[] = {
+static const char * const gcc_xo_gpll0a_gpll1_gpll2[] = {
 	"xo",
 	"gpll0_vote",
 	"gpll1_vote",
@@ -2278,7 +2278,7 @@
 	.halt_check = BRANCH_HALT_VOTED,
 	.clkr = {
 		.enable_reg = 0x45004,
-		.enable_mask = BIT(0),
+		.enable_mask = BIT(8),
 		.hw.init = &(struct clk_init_data){
 			.name = "gcc_prng_ahb_clk",
 			.parent_names = (const char *[]){
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index eb6a4f9..aa294b1 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -125,7 +125,7 @@
 	{ P_PLL8, 3 }
 };
 
-static const char *gcc_pxo_pll8[] = {
+static const char * const gcc_pxo_pll8[] = {
 	"pxo",
 	"pll8_vote",
 };
@@ -136,7 +136,7 @@
 	{ P_CXO, 5 }
 };
 
-static const char *gcc_pxo_pll8_cxo[] = {
+static const char * const gcc_pxo_pll8_cxo[] = {
 	"pxo",
 	"pll8_vote",
 	"cxo",
@@ -148,7 +148,7 @@
 	{ P_PLL3, 6 }
 };
 
-static const char *gcc_pxo_pll8_pll3[] = {
+static const char * const gcc_pxo_pll8_pll3[] = {
 	"pxo",
 	"pll8_vote",
 	"pll3",
@@ -2085,7 +2085,7 @@
 	}
 };
 
-static const char *usb_hsic_xcvr_fs_src_p[] = { "usb_hsic_xcvr_fs_src" };
+static const char * const usb_hsic_xcvr_fs_src_p[] = { "usb_hsic_xcvr_fs_src" };
 
 static struct clk_branch usb_hsic_xcvr_fs_clk = {
 	.halt_reg = 0x2fc8,
@@ -2181,7 +2181,7 @@
 	}
 };
 
-static const char *usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
+static const char * const usb_fs1_xcvr_fs_src_p[] = { "usb_fs1_xcvr_fs_src" };
 
 static struct clk_branch usb_fs1_xcvr_fs_clk = {
 	.halt_reg = 0x2fcc,
@@ -2248,7 +2248,7 @@
 	}
 };
 
-static const char *usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
+static const char * const usb_fs2_xcvr_fs_src_p[] = { "usb_fs2_xcvr_fs_src" };
 
 static struct clk_branch usb_fs2_xcvr_fs_clk = {
 	.halt_reg = 0x2fcc,
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index c39d098..2bcf875 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -44,7 +44,7 @@
 	{ P_GPLL0, 1 }
 };
 
-static const char *gcc_xo_gpll0[] = {
+static const char * const gcc_xo_gpll0[] = {
 	"xo",
 	"gpll0_vote",
 };
@@ -55,7 +55,7 @@
 	{ P_GPLL4, 5 }
 };
 
-static const char *gcc_xo_gpll0_gpll4[] = {
+static const char * const gcc_xo_gpll0_gpll4[] = {
 	"xo",
 	"gpll0_vote",
 	"gpll4_vote",
@@ -1783,6 +1783,7 @@
 				"ce1_clk_src",
 			},
 			.num_parents = 1,
+			.flags = CLK_SET_RATE_PARENT,
 			.ops = &clk_branch2_ops,
 		},
 	},
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 47f0ac1..93ad42b 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -71,7 +71,7 @@
 	{ P_PLL4, 2 }
 };
 
-static const char *lcc_pxo_pll4[] = {
+static const char * const lcc_pxo_pll4[] = {
 	"pxo",
 	"pll4_vote",
 };
@@ -146,7 +146,7 @@
 	},
 };
 
-static const char *lcc_mi2s_parents[] = {
+static const char * const lcc_mi2s_parents[] = {
 	"mi2s_osr_src",
 };
 
@@ -340,7 +340,7 @@
 	},
 };
 
-static const char *lcc_spdif_parents[] = {
+static const char * const lcc_spdif_parents[] = {
 	"spdif_src",
 };
 
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index d0df9d5..ecb96c2 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -57,7 +57,7 @@
 	{ P_PLL4, 2 }
 };
 
-static const char *lcc_pxo_pll4[] = {
+static const char * const lcc_pxo_pll4[] = {
 	"pxo",
 	"pll4_vote",
 };
@@ -127,7 +127,7 @@
 	},
 };
 
-static const char *lcc_mi2s_parents[] = {
+static const char * const lcc_mi2s_parents[] = {
 	"mi2s_osr_src",
 };
 
@@ -233,7 +233,7 @@
 	},							\
 };								\
 								\
-static const char *lcc_##prefix##_parents[] = {			\
+static const char * const lcc_##prefix##_parents[] = {		\
 	#prefix "_osr_src",					\
 };								\
 								\
@@ -445,7 +445,7 @@
 	},
 };
 
-static const char *lcc_slimbus_parents[] = {
+static const char * const lcc_slimbus_parents[] = {
 	"slimbus_src",
 };
 
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 1b17df2..f0ee6bd 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -53,7 +53,7 @@
 	{ P_GPLL0, 5 }
 };
 
-static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_mmpll1_gpll0[] = {
 	"xo",
 	"mmpll0_vote",
 	"mmpll1_vote",
@@ -69,7 +69,7 @@
 	{ P_DSI1PLL, 3 }
 };
 
-static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
 	"xo",
 	"mmpll0_vote",
 	"hdmipll",
@@ -86,7 +86,7 @@
 	{ P_MMPLL2, 3 }
 };
 
-static const char *mmcc_xo_mmpll0_1_2_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_1_2_gpll0[] = {
 	"xo",
 	"mmpll0_vote",
 	"mmpll1_vote",
@@ -102,7 +102,7 @@
 	{ P_MMPLL3, 3 }
 };
 
-static const char *mmcc_xo_mmpll0_1_3_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_1_3_gpll0[] = {
 	"xo",
 	"mmpll0_vote",
 	"mmpll1_vote",
@@ -119,7 +119,7 @@
 	{ P_DSI1PLL, 2 }
 };
 
-static const char *mmcc_xo_dsi_hdmi_edp[] = {
+static const char * const mmcc_xo_dsi_hdmi_edp[] = {
 	"xo",
 	"edp_link_clk",
 	"hdmipll",
@@ -137,7 +137,7 @@
 	{ P_DSI1PLL, 2 }
 };
 
-static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = {
+static const char * const mmcc_xo_dsi_hdmi_edp_gpll0[] = {
 	"xo",
 	"edp_link_clk",
 	"hdmipll",
@@ -155,7 +155,7 @@
 	{ P_DSI1PLL_BYTE, 2 }
 };
 
-static const char *mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
+static const char * const mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
 	"xo",
 	"edp_link_clk",
 	"hdmipll",
@@ -172,7 +172,7 @@
 	{ P_MMPLL4, 3 }
 };
 
-static const char *mmcc_xo_mmpll0_1_4_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_1_4_gpll0[] = {
 	"xo",
 	"mmpll0",
 	"mmpll1",
@@ -189,7 +189,7 @@
 	{ P_GPLL1, 4 }
 };
 
-static const char *mmcc_xo_mmpll0_1_4_gpll1_0[] = {
+static const char * const mmcc_xo_mmpll0_1_4_gpll1_0[] = {
 	"xo",
 	"mmpll0",
 	"mmpll1",
@@ -208,7 +208,7 @@
 	{ P_MMSLEEP, 6 }
 };
 
-static const char *mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = {
+static const char * const mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = {
 	"xo",
 	"mmpll0",
 	"mmpll1",
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 9711bca..bad02ae 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/regmap.h>
 #include <linux/reset-controller.h>
@@ -50,7 +51,7 @@
 	{ P_PLL2, 1 }
 };
 
-static const char *mmcc_pxo_pll8_pll2[] = {
+static const char * const mmcc_pxo_pll8_pll2[] = {
 	"pxo",
 	"pll8_vote",
 	"pll2",
@@ -63,7 +64,7 @@
 	{ P_PLL3, 3 }
 };
 
-static const char *mmcc_pxo_pll8_pll2_pll15[] = {
+static const char * const mmcc_pxo_pll8_pll2_pll15[] = {
 	"pxo",
 	"pll8_vote",
 	"pll2",
@@ -77,7 +78,7 @@
 	{ P_PLL15, 3 }
 };
 
-static const char *mmcc_pxo_pll8_pll2_pll3[] = {
+static const char * const mmcc_pxo_pll8_pll2_pll3[] = {
 	"pxo",
 	"pll8_vote",
 	"pll2",
@@ -508,8 +509,7 @@
 	int ret = 0;
 	u32 val;
 	struct clk_pix_rdi *rdi = to_clk_pix_rdi(hw);
-	struct clk *clk = hw->clk;
-	int num_parents = __clk_get_num_parents(hw->clk);
+	int num_parents = clk_hw_get_num_parents(hw);
 
 	/*
 	 * These clocks select three inputs via two muxes. One mux selects
@@ -520,7 +520,8 @@
 	 * needs to be on at what time.
 	 */
 	for (i = 0; i < num_parents; i++) {
-		ret = clk_prepare_enable(clk_get_parent_by_index(clk, i));
+		struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
+		ret = clk_prepare_enable(p->clk);
 		if (ret)
 			goto err;
 	}
@@ -548,8 +549,10 @@
 	udelay(1);
 
 err:
-	for (i--; i >= 0; i--)
-		clk_disable_unprepare(clk_get_parent_by_index(clk, i));
+	for (i--; i >= 0; i--) {
+		struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
+		clk_disable_unprepare(p->clk);
+	}
 
 	return ret;
 }
@@ -579,7 +582,7 @@
 	.determine_rate = __clk_mux_determine_rate,
 };
 
-static const char *pix_rdi_parents[] = {
+static const char * const pix_rdi_parents[] = {
 	"csi0_clk",
 	"csi1_clk",
 	"csi2_clk",
@@ -709,7 +712,7 @@
 	},
 };
 
-static const char *csixphy_timer_src[] = { "csiphytimer_src" };
+static const char * const csixphy_timer_src[] = { "csiphytimer_src" };
 
 static struct clk_branch csiphy0_timer_clk = {
 	.halt_reg = 0x01e8,
@@ -1385,7 +1388,7 @@
 	{ P_HDMI_PLL, 3 }
 };
 
-static const char *mmcc_pxo_hdmi[] = {
+static const char * const mmcc_pxo_hdmi[] = {
 	"pxo",
 	"hdmi_pll",
 };
@@ -1428,7 +1431,7 @@
 	},
 };
 
-static const char *tv_src_name[] = { "tv_src" };
+static const char * const tv_src_name[] = { "tv_src" };
 
 static struct clk_branch tv_enc_clk = {
 	.halt_reg = 0x01d4,
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 07f4cc1..0987bf4 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -56,7 +56,7 @@
 	{ P_GPLL0, 5 }
 };
 
-static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_mmpll1_gpll0[] = {
 	"xo",
 	"mmpll0_vote",
 	"mmpll1_vote",
@@ -72,7 +72,7 @@
 	{ P_DSI1PLL, 3 }
 };
 
-static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
 	"xo",
 	"mmpll0_vote",
 	"hdmipll",
@@ -89,7 +89,7 @@
 	{ P_MMPLL2, 3 }
 };
 
-static const char *mmcc_xo_mmpll0_1_2_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_1_2_gpll0[] = {
 	"xo",
 	"mmpll0_vote",
 	"mmpll1_vote",
@@ -105,7 +105,7 @@
 	{ P_MMPLL3, 3 }
 };
 
-static const char *mmcc_xo_mmpll0_1_3_gpll0[] = {
+static const char * const mmcc_xo_mmpll0_1_3_gpll0[] = {
 	"xo",
 	"mmpll0_vote",
 	"mmpll1_vote",
@@ -121,7 +121,7 @@
 	{ P_GPLL1, 4 }
 };
 
-static const char *mmcc_xo_mmpll0_1_gpll1_0[] = {
+static const char * const mmcc_xo_mmpll0_1_gpll1_0[] = {
 	"xo",
 	"mmpll0_vote",
 	"mmpll1_vote",
@@ -138,7 +138,7 @@
 	{ P_DSI1PLL, 2 }
 };
 
-static const char *mmcc_xo_dsi_hdmi_edp[] = {
+static const char * const mmcc_xo_dsi_hdmi_edp[] = {
 	"xo",
 	"edp_link_clk",
 	"hdmipll",
@@ -156,7 +156,7 @@
 	{ P_DSI1PLL, 2 }
 };
 
-static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = {
+static const char * const mmcc_xo_dsi_hdmi_edp_gpll0[] = {
 	"xo",
 	"edp_link_clk",
 	"hdmipll",
@@ -174,7 +174,7 @@
 	{ P_DSI1PLL_BYTE, 2 }
 };
 
-static const char *mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
+static const char * const mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
 	"xo",
 	"edp_link_clk",
 	"hdmipll",
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index 2714097..b27edd6 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -6,8 +6,10 @@
 obj-y	+= clk.o
 obj-y	+= clk-pll.o
 obj-y	+= clk-cpu.o
+obj-y	+= clk-inverter.o
 obj-y	+= clk-mmc-phase.o
 obj-$(CONFIG_RESET_CONTROLLER)	+= softrst.o
 
 obj-y	+= clk-rk3188.o
 obj-y	+= clk-rk3288.o
+obj-y	+= clk-rk3368.o
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index fb7721b..330870a 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -35,6 +35,7 @@
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/io.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include "clk.h"
 
diff --git a/drivers/clk/rockchip/clk-inverter.c b/drivers/clk/rockchip/clk-inverter.c
new file mode 100644
index 0000000..7cbf43b
--- /dev/null
+++ b/drivers/clk/rockchip/clk-inverter.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2015 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include "clk.h"
+
+struct rockchip_inv_clock {
+	struct clk_hw	hw;
+	void __iomem	*reg;
+	int		shift;
+	int		flags;
+	spinlock_t	*lock;
+};
+
+#define to_inv_clock(_hw) container_of(_hw, struct rockchip_inv_clock, hw)
+
+#define INVERTER_MASK 0x1
+
+static int rockchip_inv_get_phase(struct clk_hw *hw)
+{
+	struct rockchip_inv_clock *inv_clock = to_inv_clock(hw);
+	u32 val;
+
+	val = readl(inv_clock->reg) >> inv_clock->shift;
+	val &= INVERTER_MASK;
+	return val ? 180 : 0;
+}
+
+static int rockchip_inv_set_phase(struct clk_hw *hw, int degrees)
+{
+	struct rockchip_inv_clock *inv_clock = to_inv_clock(hw);
+	u32 val;
+
+	if (degrees % 180 == 0) {
+		val = !!degrees;
+	} else {
+		pr_err("%s: unsupported phase %d for %s\n",
+		       __func__, degrees, clk_hw_get_name(hw));
+		return -EINVAL;
+	}
+
+	if (inv_clock->flags & ROCKCHIP_INVERTER_HIWORD_MASK) {
+		writel(HIWORD_UPDATE(val, INVERTER_MASK, inv_clock->shift),
+		       inv_clock->reg);
+	} else {
+		unsigned long flags;
+		u32 reg;
+
+		spin_lock_irqsave(inv_clock->lock, flags);
+
+		reg = readl(inv_clock->reg);
+		reg &= ~BIT(inv_clock->shift);
+		reg |= val;
+		writel(reg, inv_clock->reg);
+
+		spin_unlock_irqrestore(inv_clock->lock, flags);
+	}
+
+	return 0;
+}
+
+static const struct clk_ops rockchip_inv_clk_ops = {
+	.get_phase	= rockchip_inv_get_phase,
+	.set_phase	= rockchip_inv_set_phase,
+};
+
+struct clk *rockchip_clk_register_inverter(const char *name,
+				const char *const *parent_names, u8 num_parents,
+				void __iomem *reg, int shift, int flags,
+				spinlock_t *lock)
+{
+	struct clk_init_data init;
+	struct rockchip_inv_clock *inv_clock;
+	struct clk *clk;
+
+	inv_clock = kmalloc(sizeof(*inv_clock), GFP_KERNEL);
+	if (!inv_clock)
+		return NULL;
+
+	init.name = name;
+	init.num_parents = num_parents;
+	init.flags = CLK_SET_RATE_PARENT;
+	init.parent_names = parent_names;
+	init.ops = &rockchip_inv_clk_ops;
+
+	inv_clock->hw.init = &init;
+	inv_clock->reg = reg;
+	inv_clock->shift = shift;
+	inv_clock->flags = flags;
+	inv_clock->lock = lock;
+
+	clk = clk_register(NULL, &inv_clock->hw);
+	if (IS_ERR(clk))
+		goto err_free;
+
+	return clk;
+
+err_free:
+	kfree(inv_clock);
+	return NULL;
+}
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
index e9f8df32..9b61342 100644
--- a/drivers/clk/rockchip/clk-mmc-phase.c
+++ b/drivers/clk/rockchip/clk-mmc-phase.c
@@ -14,7 +14,10 @@
  */
 
 #include <linux/slab.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
 #include "clk.h"
 
 struct rockchip_mmc_clock {
@@ -105,7 +108,7 @@
 	writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), mmc_clock->reg);
 
 	pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
-		__clk_get_name(hw->clk), degrees, delay_num,
+		clk_hw_get_name(hw), degrees, delay_num,
 		mmc_clock->reg, raw_value>>(mmc_clock->shift),
 		rockchip_mmc_get_phase(hw)
 	);
@@ -131,6 +134,7 @@
 	if (!mmc_clock)
 		return NULL;
 
+	init.name = name;
 	init.num_parents = num_parents;
 	init.parent_names = parent_names;
 	init.ops = &rockchip_mmc_clk_ops;
@@ -139,9 +143,6 @@
 	mmc_clock->reg = reg;
 	mmc_clock->shift = shift;
 
-	if (name)
-		init.name = name;
-
 	clk = clk_register(NULL, &mmc_clock->hw);
 	if (IS_ERR(clk))
 		goto err_free;
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index 7602726..7737a1d 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -17,7 +17,6 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/delay.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/regmap.h>
 #include "clk.h"
@@ -121,8 +120,8 @@
 #define RK3066_PLLCON0_NR_SHIFT		8
 #define RK3066_PLLCON1_NF_MASK		0x1fff
 #define RK3066_PLLCON1_NF_SHIFT		0
-#define RK3066_PLLCON2_BWADJ_MASK	0xfff
-#define RK3066_PLLCON2_BWADJ_SHIFT	0
+#define RK3066_PLLCON2_NB_MASK		0xfff
+#define RK3066_PLLCON2_NB_SHIFT		0
 #define RK3066_PLLCON3_RESET		(1 << 5)
 #define RK3066_PLLCON3_PWRDOWN		(1 << 1)
 #define RK3066_PLLCON3_BYPASS		(1 << 0)
@@ -137,7 +136,7 @@
 	pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(3));
 	if (pllcon & RK3066_PLLCON3_BYPASS) {
 		pr_debug("%s: pll %s is bypassed\n", __func__,
-			__clk_get_name(hw->clk));
+			clk_hw_get_name(hw));
 		return prate;
 	}
 
@@ -175,13 +174,13 @@
 	}
 
 	pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
-		 __func__, __clk_get_name(hw->clk), old_rate, drate, prate);
+		 __func__, clk_hw_get_name(hw), old_rate, drate, prate);
 
 	/* Get required rate settings from table */
 	rate = rockchip_get_pll_settings(pll, drate);
 	if (!rate) {
 		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
-			drate, __clk_get_name(hw->clk));
+			drate, clk_hw_get_name(hw));
 		return -EINVAL;
 	}
 
@@ -208,8 +207,8 @@
 	writel_relaxed(HIWORD_UPDATE(rate->nf - 1, RK3066_PLLCON1_NF_MASK,
 						   RK3066_PLLCON1_NF_SHIFT),
 		       pll->reg_base + RK3066_PLLCON(1));
-	writel_relaxed(HIWORD_UPDATE(rate->bwadj, RK3066_PLLCON2_BWADJ_MASK,
-						  RK3066_PLLCON2_BWADJ_SHIFT),
+	writel_relaxed(HIWORD_UPDATE(rate->nb - 1, RK3066_PLLCON2_NB_MASK,
+						   RK3066_PLLCON2_NB_SHIFT),
 		       pll->reg_base + RK3066_PLLCON(2));
 
 	/* leave reset and wait the reset_delay */
@@ -262,14 +261,14 @@
 {
 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
 	const struct rockchip_pll_rate_table *rate;
-	unsigned int nf, nr, no, bwadj;
+	unsigned int nf, nr, no, nb;
 	unsigned long drate;
 	u32 pllcon;
 
 	if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
 		return;
 
-	drate = __clk_get_rate(hw->clk);
+	drate = clk_hw_get_rate(hw);
 	rate = rockchip_get_pll_settings(pll, drate);
 
 	/* when no rate setting for the current rate, rely on clk_set_rate */
@@ -284,25 +283,25 @@
 	nf = ((pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK) + 1;
 
 	pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(2));
-	bwadj = (pllcon >> RK3066_PLLCON2_BWADJ_SHIFT) & RK3066_PLLCON2_BWADJ_MASK;
+	nb = ((pllcon >> RK3066_PLLCON2_NB_SHIFT) & RK3066_PLLCON2_NB_MASK) + 1;
 
-	pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), bwadj(%d:%d)\n",
-		 __func__, __clk_get_name(hw->clk), drate, rate->nr, nr,
-		rate->no, no, rate->nf, nf, rate->bwadj, bwadj);
+	pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), nb(%d:%d)\n",
+		 __func__, clk_hw_get_name(hw), drate, rate->nr, nr,
+		rate->no, no, rate->nf, nf, rate->nb, nb);
 	if (rate->nr != nr || rate->no != no || rate->nf != nf
-					     || rate->bwadj != bwadj) {
-		struct clk *parent = __clk_get_parent(hw->clk);
+					     || rate->nb != nb) {
+		struct clk_hw *parent = clk_hw_get_parent(hw);
 		unsigned long prate;
 
 		if (!parent) {
 			pr_warn("%s: parent of %s not available\n",
-				__func__, __clk_get_name(hw->clk));
+				__func__, clk_hw_get_name(hw));
 			return;
 		}
 
 		pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
-			 __func__, __clk_get_name(hw->clk));
-		prate = __clk_get_rate(parent);
+			 __func__, clk_hw_get_name(hw));
+		prate = clk_hw_get_rate(parent);
 		rockchip_rk3066_pll_set_rate(hw, drate, prate);
 	}
 }
@@ -354,6 +353,35 @@
 	if (!pll)
 		return ERR_PTR(-ENOMEM);
 
+	/* create the mux on top of the real pll */
+	pll->pll_mux_ops = &clk_mux_ops;
+	pll_mux = &pll->pll_mux;
+	pll_mux->reg = base + mode_offset;
+	pll_mux->shift = mode_shift;
+	pll_mux->mask = PLL_MODE_MASK;
+	pll_mux->flags = 0;
+	pll_mux->lock = lock;
+	pll_mux->hw.init = &init;
+
+	if (pll_type == pll_rk3066)
+		pll_mux->flags |= CLK_MUX_HIWORD_MASK;
+
+	/* the actual muxing is xin24m, pll-output, xin32k */
+	pll_parents[0] = parent_names[0];
+	pll_parents[1] = pll_name;
+	pll_parents[2] = parent_names[1];
+
+	init.name = name;
+	init.flags = CLK_SET_RATE_PARENT;
+	init.ops = pll->pll_mux_ops;
+	init.parent_names = pll_parents;
+	init.num_parents = ARRAY_SIZE(pll_parents);
+
+	mux_clk = clk_register(NULL, &pll_mux->hw);
+	if (IS_ERR(mux_clk))
+		goto err_mux;
+
+	/* now create the actual pll */
 	init.name = pll_name;
 
 	/* keep all plls untouched for now */
@@ -399,47 +427,19 @@
 	pll->flags = clk_pll_flags;
 	pll->lock = lock;
 
-	/* create the mux on top of the real pll */
-	pll->pll_mux_ops = &clk_mux_ops;
-	pll_mux = &pll->pll_mux;
-	pll_mux->reg = base + mode_offset;
-	pll_mux->shift = mode_shift;
-	pll_mux->mask = PLL_MODE_MASK;
-	pll_mux->flags = 0;
-	pll_mux->lock = lock;
-	pll_mux->hw.init = &init;
-
-	if (pll_type == pll_rk3066)
-		pll_mux->flags |= CLK_MUX_HIWORD_MASK;
-
 	pll_clk = clk_register(NULL, &pll->hw);
 	if (IS_ERR(pll_clk)) {
 		pr_err("%s: failed to register pll clock %s : %ld\n",
 			__func__, name, PTR_ERR(pll_clk));
-		mux_clk = pll_clk;
 		goto err_pll;
 	}
 
-	/* the actual muxing is xin24m, pll-output, xin32k */
-	pll_parents[0] = parent_names[0];
-	pll_parents[1] = pll_name;
-	pll_parents[2] = parent_names[1];
-
-	init.name = name;
-	init.flags = CLK_SET_RATE_PARENT;
-	init.ops = pll->pll_mux_ops;
-	init.parent_names = pll_parents;
-	init.num_parents = ARRAY_SIZE(pll_parents);
-
-	mux_clk = clk_register(NULL, &pll_mux->hw);
-	if (IS_ERR(mux_clk))
-		goto err_mux;
-
 	return mux_clk;
 
-err_mux:
-	clk_unregister(pll_clk);
 err_pll:
+	clk_unregister(mux_clk);
+	mux_clk = pll_clk;
+err_mux:
 	kfree(pll);
 	return mux_clk;
 }
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index e4f9d47..ed02bbc 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -13,6 +13,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -201,7 +202,7 @@
 PNAME(mux_aclk_cpu_p)		= { "apll", "gpll" };
 PNAME(mux_sclk_cif0_p)		= { "cif0_pre", "xin24m" };
 PNAME(mux_sclk_i2s0_p)		= { "i2s0_pre", "i2s0_frac", "xin12m" };
-PNAME(mux_sclk_spdif_p)		= { "spdif_src", "spdif_frac", "xin12m" };
+PNAME(mux_sclk_spdif_p)		= { "spdif_pre", "spdif_frac", "xin12m" };
 PNAME(mux_sclk_uart0_p)		= { "uart0_pre", "uart0_frac", "xin24m" };
 PNAME(mux_sclk_uart1_p)		= { "uart1_pre", "uart1_frac", "xin24m" };
 PNAME(mux_sclk_uart2_p)		= { "uart2_pre", "uart2_frac", "xin24m" };
@@ -235,6 +236,7 @@
 #define MFLAGS CLK_MUX_HIWORD_MASK
 #define DFLAGS CLK_DIVIDER_HIWORD_MASK
 #define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK
 
 /* 2 ^ (val + 1) */
 static struct clk_div_table div_core_peri_t[] = {
@@ -310,6 +312,8 @@
 
 	GATE(0, "pclkin_cif0", "ext_cif0", 0,
 			RK2928_CLKGATE_CON(3), 3, GFLAGS),
+	INVERTER(0, "pclk_cif0", "pclkin_cif0",
+			RK2928_CLKSEL_CON(30), 8, IFLAGS),
 
 	/*
 	 * the 480m are generated inside the usb block from these clocks,
@@ -334,8 +338,10 @@
 	COMPOSITE_FRAC(0, "hsadc_frac", "hsadc_src", 0,
 			RK2928_CLKSEL_CON(23), 0,
 			RK2928_CLKGATE_CON(2), 7, GFLAGS),
-	MUX(SCLK_HSADC, "sclk_hsadc", mux_sclk_hsadc_p, 0,
+	MUX(0, "sclk_hsadc_out", mux_sclk_hsadc_p, 0,
 			RK2928_CLKSEL_CON(22), 4, 2, MFLAGS),
+	INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
+			RK2928_CLKSEL_CON(22), 7, IFLAGS),
 
 	COMPOSITE_NOMUX(SCLK_SARADC, "sclk_saradc", "xin24m", 0,
 			RK2928_CLKSEL_CON(24), 8, 8, DFLAGS,
@@ -344,10 +350,10 @@
 	COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
 			RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
 			RK2928_CLKGATE_CON(0), 13, GFLAGS),
-	COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
+	COMPOSITE_FRAC(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT,
 			RK2928_CLKSEL_CON(9), 0,
 			RK2928_CLKGATE_CON(0), 14, GFLAGS),
-	MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
+	MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, CLK_SET_RATE_PARENT,
 			RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
 
 	/*
@@ -557,6 +563,8 @@
 
 	GATE(0, "pclkin_cif1", "ext_cif1", 0,
 			RK2928_CLKGATE_CON(3), 4, GFLAGS),
+	INVERTER(0, "pclk_cif1", "pclkin_cif1",
+			RK2928_CLKSEL_CON(30), 12, IFLAGS),
 
 	COMPOSITE(0, "aclk_gpu_src", mux_pll_src_cpll_gpll_p, 0,
 			RK2928_CLKSEL_CON(33), 8, 1, MFLAGS, 0, 5, DFLAGS,
@@ -809,7 +817,7 @@
 
 		rate = pll->rate_table;
 		while (rate->rate > 0) {
-			rate->bwadj = 0;
+			rate->nb = 1;
 			rate++;
 		}
 	}
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 4f817ed..9040878 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -84,7 +84,7 @@
 	RK3066_PLL_RATE( 742500000, 8, 495, 2),
 	RK3066_PLL_RATE( 696000000, 1, 58, 2),
 	RK3066_PLL_RATE( 600000000, 1, 50, 2),
-	RK3066_PLL_RATE_BWADJ(594000000, 1, 198, 8, 1),
+	RK3066_PLL_RATE_NB(594000000, 1, 198, 8, 1),
 	RK3066_PLL_RATE( 552000000, 1, 46, 2),
 	RK3066_PLL_RATE( 504000000, 1, 84, 4),
 	RK3066_PLL_RATE( 500000000, 3, 125, 2),
@@ -189,7 +189,7 @@
 PNAME(mux_uart2_p)	= { "uart2_src", "uart2_frac", "xin24m" };
 PNAME(mux_uart3_p)	= { "uart3_src", "uart3_frac", "xin24m" };
 PNAME(mux_uart4_p)	= { "uart4_src", "uart4_frac", "xin24m" };
-PNAME(mux_cif_out_p)	= { "cif_src", "xin24m" };
+PNAME(mux_vip_out_p)	= { "vip_src", "xin24m" };
 PNAME(mux_mac_p)	= { "mac_pll_src", "ext_gmac" };
 PNAME(mux_hsadcout_p)	= { "hsadc_src", "ext_hsadc" };
 PNAME(mux_edp_24m_p)	= { "ext_edp_24m", "xin24m" };
@@ -223,6 +223,7 @@
 #define MFLAGS CLK_MUX_HIWORD_MASK
 #define DFLAGS CLK_DIVIDER_HIWORD_MASK
 #define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK
 
 static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
 	/*
@@ -434,7 +435,7 @@
 	COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
 			RK3288_CLKSEL_CON(26), 8, 1, MFLAGS,
 			RK3288_CLKGATE_CON(3), 7, GFLAGS),
-	COMPOSITE_NOGATE(0, "sclk_vip_out", mux_cif_out_p, 0,
+	COMPOSITE_NOGATE(0, "sclk_vip_out", mux_vip_out_p, 0,
 			RK3288_CLKSEL_CON(26), 15, 1, MFLAGS, 9, 5, DFLAGS),
 
 	DIV(0, "pclk_pd_alive", "gpll", 0,
@@ -578,7 +579,7 @@
 	COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
 			RK3288_CLKSEL_CON(21), 0, 2, MFLAGS, 8, 5, DFLAGS,
 			RK3288_CLKGATE_CON(2), 5, GFLAGS),
-	MUX(SCLK_MAC, "mac_clk", mux_mac_p, 0,
+	MUX(SCLK_MAC, "mac_clk", mux_mac_p, CLK_SET_RATE_PARENT,
 			RK3288_CLKSEL_CON(21), 4, 1, MFLAGS),
 	GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 0,
 			RK3288_CLKGATE_CON(5), 3, GFLAGS),
@@ -592,8 +593,10 @@
 	COMPOSITE(0, "hsadc_src", mux_pll_src_cpll_gpll_p, 0,
 			RK3288_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS,
 			RK3288_CLKGATE_CON(2), 6, GFLAGS),
-	MUX(SCLK_HSADC, "sclk_hsadc_out", mux_hsadcout_p, 0,
+	MUX(0, "sclk_hsadc_out", mux_hsadcout_p, 0,
 			RK3288_CLKSEL_CON(22), 4, 1, MFLAGS),
+	INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
+			RK3288_CLKSEL_CON(22), 7, IFLAGS),
 
 	GATE(0, "jtag", "ext_jtag", 0,
 			RK3288_CLKGATE_CON(4), 14, GFLAGS),
@@ -768,13 +771,16 @@
 	 */
 
 	GATE(0, "pclk_vip_in", "ext_vip", 0, RK3288_CLKGATE_CON(16), 0, GFLAGS),
+	INVERTER(0, "pclk_vip", "pclk_vip_in", RK3288_CLKSEL_CON(29), 4, IFLAGS),
 	GATE(0, "pclk_isp_in", "ext_isp", 0, RK3288_CLKGATE_CON(16), 3, GFLAGS),
+	INVERTER(0, "pclk_isp", "pclk_isp_in", RK3288_CLKSEL_CON(29), 3, IFLAGS),
 };
 
 static const char *const rk3288_critical_clocks[] __initconst = {
 	"aclk_cpu",
 	"aclk_peri",
 	"hclk_peri",
+	"pclk_pd_pmu",
 };
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
new file mode 100644
index 0000000..9c5d61e
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -0,0 +1,881 @@
+/*
+ * Copyright (c) 2015 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/rk3368-cru.h>
+#include "clk.h"
+
+#define RK3368_GRF_SOC_STATUS0	0x480
+
+enum rk3368_plls {
+	apllb, aplll, dpll, cpll, gpll, npll,
+};
+
+static struct rockchip_pll_rate_table rk3368_pll_rates[] = {
+	RK3066_PLL_RATE(2208000000, 1, 92, 1),
+	RK3066_PLL_RATE(2184000000, 1, 91, 1),
+	RK3066_PLL_RATE(2160000000, 1, 90, 1),
+	RK3066_PLL_RATE(2136000000, 1, 89, 1),
+	RK3066_PLL_RATE(2112000000, 1, 88, 1),
+	RK3066_PLL_RATE(2088000000, 1, 87, 1),
+	RK3066_PLL_RATE(2064000000, 1, 86, 1),
+	RK3066_PLL_RATE(2040000000, 1, 85, 1),
+	RK3066_PLL_RATE(2016000000, 1, 84, 1),
+	RK3066_PLL_RATE(1992000000, 1, 83, 1),
+	RK3066_PLL_RATE(1968000000, 1, 82, 1),
+	RK3066_PLL_RATE(1944000000, 1, 81, 1),
+	RK3066_PLL_RATE(1920000000, 1, 80, 1),
+	RK3066_PLL_RATE(1896000000, 1, 79, 1),
+	RK3066_PLL_RATE(1872000000, 1, 78, 1),
+	RK3066_PLL_RATE(1848000000, 1, 77, 1),
+	RK3066_PLL_RATE(1824000000, 1, 76, 1),
+	RK3066_PLL_RATE(1800000000, 1, 75, 1),
+	RK3066_PLL_RATE(1776000000, 1, 74, 1),
+	RK3066_PLL_RATE(1752000000, 1, 73, 1),
+	RK3066_PLL_RATE(1728000000, 1, 72, 1),
+	RK3066_PLL_RATE(1704000000, 1, 71, 1),
+	RK3066_PLL_RATE(1680000000, 1, 70, 1),
+	RK3066_PLL_RATE(1656000000, 1, 69, 1),
+	RK3066_PLL_RATE(1632000000, 1, 68, 1),
+	RK3066_PLL_RATE(1608000000, 1, 67, 1),
+	RK3066_PLL_RATE(1560000000, 1, 65, 1),
+	RK3066_PLL_RATE(1512000000, 1, 63, 1),
+	RK3066_PLL_RATE(1488000000, 1, 62, 1),
+	RK3066_PLL_RATE(1464000000, 1, 61, 1),
+	RK3066_PLL_RATE(1440000000, 1, 60, 1),
+	RK3066_PLL_RATE(1416000000, 1, 59, 1),
+	RK3066_PLL_RATE(1392000000, 1, 58, 1),
+	RK3066_PLL_RATE(1368000000, 1, 57, 1),
+	RK3066_PLL_RATE(1344000000, 1, 56, 1),
+	RK3066_PLL_RATE(1320000000, 1, 55, 1),
+	RK3066_PLL_RATE(1296000000, 1, 54, 1),
+	RK3066_PLL_RATE(1272000000, 1, 53, 1),
+	RK3066_PLL_RATE(1248000000, 1, 52, 1),
+	RK3066_PLL_RATE(1224000000, 1, 51, 1),
+	RK3066_PLL_RATE(1200000000, 1, 50, 1),
+	RK3066_PLL_RATE(1176000000, 1, 49, 1),
+	RK3066_PLL_RATE(1128000000, 1, 47, 1),
+	RK3066_PLL_RATE(1104000000, 1, 46, 1),
+	RK3066_PLL_RATE(1008000000, 1, 84, 2),
+	RK3066_PLL_RATE( 912000000, 1, 76, 2),
+	RK3066_PLL_RATE( 888000000, 1, 74, 2),
+	RK3066_PLL_RATE( 816000000, 1, 68, 2),
+	RK3066_PLL_RATE( 792000000, 1, 66, 2),
+	RK3066_PLL_RATE( 696000000, 1, 58, 2),
+	RK3066_PLL_RATE( 672000000, 1, 56, 2),
+	RK3066_PLL_RATE( 648000000, 1, 54, 2),
+	RK3066_PLL_RATE( 624000000, 1, 52, 2),
+	RK3066_PLL_RATE( 600000000, 1, 50, 2),
+	RK3066_PLL_RATE( 576000000, 1, 48, 2),
+	RK3066_PLL_RATE( 552000000, 1, 46, 2),
+	RK3066_PLL_RATE( 528000000, 1, 88, 4),
+	RK3066_PLL_RATE( 504000000, 1, 84, 4),
+	RK3066_PLL_RATE( 480000000, 1, 80, 4),
+	RK3066_PLL_RATE( 456000000, 1, 76, 4),
+	RK3066_PLL_RATE( 408000000, 1, 68, 4),
+	RK3066_PLL_RATE( 312000000, 1, 52, 4),
+	RK3066_PLL_RATE( 252000000, 1, 84, 8),
+	RK3066_PLL_RATE( 216000000, 1, 72, 8),
+	RK3066_PLL_RATE( 126000000, 2, 84, 8),
+	RK3066_PLL_RATE(  48000000, 2, 32, 8),
+	{ /* sentinel */ },
+};
+
+PNAME(mux_pll_p)		= { "xin24m", "xin32k" };
+PNAME(mux_armclkb_p)		= { "apllb_core", "gpllb_core" };
+PNAME(mux_armclkl_p)		= { "aplll_core", "gplll_core" };
+PNAME(mux_ddrphy_p)		= { "dpll_ddr", "gpll_ddr" };
+PNAME(mux_cs_src_p)		= { "apllb_cs", "aplll_cs", "gpll_cs"};
+PNAME(mux_aclk_bus_src_p)	= { "cpll_aclk_bus", "gpll_aclk_bus" };
+
+PNAME(mux_pll_src_cpll_gpll_p)		= { "cpll", "gpll" };
+PNAME(mux_pll_src_cpll_gpll_npll_p)	= { "cpll", "gpll", "npll" };
+PNAME(mux_pll_src_npll_cpll_gpll_p)	= { "npll", "cpll", "gpll" };
+PNAME(mux_pll_src_cpll_gpll_usb_p)	= { "cpll", "gpll", "usbphy_480m" };
+PNAME(mux_pll_src_cpll_gpll_usb_usb_p)	= { "cpll", "gpll", "usbphy_480m",
+					    "usbphy_480m" };
+PNAME(mux_pll_src_cpll_gpll_usb_npll_p)	= { "cpll", "gpll", "usbphy_480m",
+					    "npll" };
+PNAME(mux_pll_src_cpll_gpll_npll_npll_p) = { "cpll", "gpll", "npll", "npll" };
+PNAME(mux_pll_src_cpll_gpll_npll_usb_p) = { "cpll", "gpll", "npll",
+					    "usbphy_480m" };
+
+PNAME(mux_i2s_8ch_pre_p)	= { "i2s_8ch_src", "i2s_8ch_frac",
+				    "ext_i2s", "xin12m" };
+PNAME(mux_i2s_8ch_clkout_p)	= { "i2s_8ch_pre", "xin12m" };
+PNAME(mux_i2s_2ch_p)		= { "i2s_2ch_src", "i2s_2ch_frac",
+				    "dummy", "xin12m" };
+PNAME(mux_spdif_8ch_p)		= { "spdif_8ch_pre", "spdif_8ch_frac",
+				    "ext_i2s", "xin12m" };
+PNAME(mux_edp_24m_p)		= { "dummy", "xin24m" };
+PNAME(mux_vip_out_p)		= { "vip_src", "xin24m" };
+PNAME(mux_usbphy480m_p)		= { "usbotg_out", "xin24m" };
+PNAME(mux_hsic_usbphy480m_p)	= { "usbotg_out", "dummy" };
+PNAME(mux_hsicphy480m_p)	= { "cpll", "gpll", "usbphy_480m" };
+PNAME(mux_uart0_p)		= { "uart0_src", "uart0_frac", "xin24m" };
+PNAME(mux_uart1_p)		= { "uart1_src", "uart1_frac", "xin24m" };
+PNAME(mux_uart2_p)		= { "uart2_src", "xin24m" };
+PNAME(mux_uart3_p)		= { "uart3_src", "uart3_frac", "xin24m" };
+PNAME(mux_uart4_p)		= { "uart4_src", "uart4_frac", "xin24m" };
+PNAME(mux_mac_p)		= { "mac_pll_src", "ext_gmac" };
+PNAME(mux_mmc_src_p)		= { "cpll", "gpll", "usbphy_480m", "xin24m" };
+
+static struct rockchip_pll_clock rk3368_pll_clks[] __initdata = {
+	[apllb] = PLL(pll_rk3066, PLL_APLLB, "apllb", mux_pll_p, 0, RK3368_PLL_CON(0),
+		     RK3368_PLL_CON(3), 8, 1, 0, rk3368_pll_rates),
+	[aplll] = PLL(pll_rk3066, PLL_APLLL, "aplll", mux_pll_p, 0, RK3368_PLL_CON(4),
+		     RK3368_PLL_CON(7), 8, 0, 0, rk3368_pll_rates),
+	[dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK3368_PLL_CON(8),
+		     RK3368_PLL_CON(11), 8, 2, 0, NULL),
+	[cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK3368_PLL_CON(12),
+		     RK3368_PLL_CON(15), 8, 3, ROCKCHIP_PLL_SYNC_RATE, rk3368_pll_rates),
+	[gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK3368_PLL_CON(16),
+		     RK3368_PLL_CON(19), 8, 4, ROCKCHIP_PLL_SYNC_RATE, rk3368_pll_rates),
+	[npll] = PLL(pll_rk3066, PLL_NPLL, "npll",  mux_pll_p, 0, RK3368_PLL_CON(20),
+		     RK3368_PLL_CON(23), 8, 5, ROCKCHIP_PLL_SYNC_RATE, rk3368_pll_rates),
+};
+
+static struct clk_div_table div_ddrphy_t[] = {
+	{ .val = 0, .div = 1 },
+	{ .val = 1, .div = 2 },
+	{ .val = 3, .div = 4 },
+	{ /* sentinel */ },
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+#define IFLAGS ROCKCHIP_INVERTER_HIWORD_MASK
+
+static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
+	.core_reg = RK3368_CLKSEL_CON(0),
+	.div_core_shift = 0,
+	.div_core_mask = 0x1f,
+	.mux_core_shift = 15,
+};
+
+static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
+	.core_reg = RK3368_CLKSEL_CON(2),
+	.div_core_shift = 0,
+	.div_core_mask = 0x1f,
+	.mux_core_shift = 7,
+};
+
+#define RK3368_DIV_ACLKM_MASK		0x1f
+#define RK3368_DIV_ACLKM_SHIFT		8
+#define RK3368_DIV_ATCLK_MASK		0x1f
+#define RK3368_DIV_ATCLK_SHIFT		0
+#define RK3368_DIV_PCLK_DBG_MASK	0x1f
+#define RK3368_DIV_PCLK_DBG_SHIFT	8
+
+#define RK3368_CLKSEL0(_offs, _aclkm)					\
+	{								\
+		.reg = RK3288_CLKSEL_CON(0 + _offs),			\
+		.val = HIWORD_UPDATE(_aclkm, RK3368_DIV_ACLKM_MASK,	\
+				RK3368_DIV_ACLKM_SHIFT),		\
+	}
+#define RK3368_CLKSEL1(_offs, _atclk, _pdbg)				\
+	{								\
+		.reg = RK3288_CLKSEL_CON(1 + _offs),			\
+		.val = HIWORD_UPDATE(_atclk, RK3368_DIV_ATCLK_MASK,	\
+				RK3368_DIV_ATCLK_SHIFT) |		\
+		       HIWORD_UPDATE(_pdbg, RK3368_DIV_PCLK_DBG_MASK,	\
+				RK3368_DIV_PCLK_DBG_SHIFT),		\
+	}
+
+/* cluster_b: aclkm in clksel0, rest in clksel1 */
+#define RK3368_CPUCLKB_RATE(_prate, _aclkm, _atclk, _pdbg)		\
+	{								\
+		.prate = _prate,					\
+		.divs = {						\
+			RK3368_CLKSEL0(0, _aclkm),			\
+			RK3368_CLKSEL1(0, _atclk, _pdbg),		\
+		},							\
+	}
+
+/* cluster_l: aclkm in clksel2, rest in clksel3 */
+#define RK3368_CPUCLKL_RATE(_prate, _aclkm, _atclk, _pdbg)		\
+	{								\
+		.prate = _prate,					\
+		.divs = {						\
+			RK3368_CLKSEL0(2, _aclkm),			\
+			RK3368_CLKSEL1(2, _atclk, _pdbg),		\
+		},							\
+	}
+
+static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
+	RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
+	RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
+	RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
+	RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
+	RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
+	RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
+	RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
+	RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
+	RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
+	RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
+};
+
+static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
+	RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
+	RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
+	RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
+	RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
+	RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
+	RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
+	RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
+	RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
+	RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
+	RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
+};
+
+static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
+	/*
+	 * Clock-Architecture Diagram 2
+	 */
+
+	MUX(SCLK_USBPHY480M, "usbphy_480m", mux_usbphy480m_p, CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(13), 8, 1, MFLAGS),
+
+	GATE(0, "apllb_core", "apllb", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(0), 0, GFLAGS),
+	GATE(0, "gpllb_core", "gpll", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(0), 1, GFLAGS),
+
+	GATE(0, "aplll_core", "aplll", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(0), 4, GFLAGS),
+	GATE(0, "gplll_core", "gpll", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(0), 5, GFLAGS),
+
+	DIV(0, "aclkm_core_b", "armclkb", 0,
+			RK3368_CLKSEL_CON(0), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY),
+	DIV(0, "atclk_core_b", "armclkb", 0,
+			RK3368_CLKSEL_CON(1), 0, 5, DFLAGS | CLK_DIVIDER_READ_ONLY),
+	DIV(0, "pclk_dbg_b", "armclkb", 0,
+			RK3368_CLKSEL_CON(1), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY),
+
+	DIV(0, "aclkm_core_l", "armclkl", 0,
+			RK3368_CLKSEL_CON(2), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY),
+	DIV(0, "atclk_core_l", "armclkl", 0,
+			RK3368_CLKSEL_CON(3), 0, 5, DFLAGS | CLK_DIVIDER_READ_ONLY),
+	DIV(0, "pclk_dbg_l", "armclkl", 0,
+			RK3368_CLKSEL_CON(3), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY),
+
+	GATE(0, "apllb_cs", "apllb", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(0), 9, GFLAGS),
+	GATE(0, "aplll_cs", "aplll", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(0), 10, GFLAGS),
+	GATE(0, "gpll_cs", "gpll", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(0), 8, GFLAGS),
+	COMPOSITE_NOGATE(0, "sclk_cs_pre", mux_cs_src_p, CLK_IGNORE_UNUSED,
+			RK3368_CLKSEL_CON(4), 6, 2, MFLAGS, 0, 5, DFLAGS),
+	COMPOSITE_NOMUX(0, "clkin_trace", "sclk_cs_pre", CLK_IGNORE_UNUSED,
+			RK3368_CLKSEL_CON(4), 8, 5, DFLAGS,
+			RK3368_CLKGATE_CON(0), 13, GFLAGS),
+
+	COMPOSITE(0, "aclk_cci_pre", mux_pll_src_cpll_gpll_usb_npll_p, CLK_IGNORE_UNUSED,
+			RK3368_CLKSEL_CON(5), 6, 2, MFLAGS, 0, 7, DFLAGS,
+			RK3368_CLKGATE_CON(0), 12, GFLAGS),
+	GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK3368_CLKGATE_CON(7), 10, GFLAGS),
+
+	GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(1), 8, GFLAGS),
+	GATE(0, "gpll_ddr", "gpll", 0,
+			RK3368_CLKGATE_CON(1), 9, GFLAGS),
+	COMPOSITE_NOGATE_DIVTBL(0, "ddrphy_src", mux_ddrphy_p, CLK_IGNORE_UNUSED,
+			RK3368_CLKSEL_CON(13), 4, 1, MFLAGS, 0, 2, DFLAGS, div_ddrphy_t),
+
+	GATE(0, "sclk_ddr", "ddrphy_div4", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(6), 14, GFLAGS),
+	GATE(0, "sclk_ddr4x", "ddrphy_src", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(6), 15, GFLAGS),
+
+	GATE(0, "gpll_aclk_bus", "gpll", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(1), 10, GFLAGS),
+	GATE(0, "cpll_aclk_bus", "cpll", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(1), 11, GFLAGS),
+	COMPOSITE_NOGATE(0, "aclk_bus_src", mux_aclk_bus_src_p, CLK_IGNORE_UNUSED,
+			RK3368_CLKSEL_CON(8), 7, 1, MFLAGS, 0, 5, DFLAGS),
+
+	GATE(ACLK_BUS, "aclk_bus", "aclk_bus_src", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(1), 0, GFLAGS),
+	COMPOSITE_NOMUX(PCLK_BUS, "pclk_bus", "aclk_bus_src", CLK_IGNORE_UNUSED,
+			RK3368_CLKSEL_CON(8), 12, 3, DFLAGS,
+			RK3368_CLKGATE_CON(1), 2, GFLAGS),
+	COMPOSITE_NOMUX(HCLK_BUS, "hclk_bus", "aclk_bus_src", CLK_IGNORE_UNUSED,
+			RK3368_CLKSEL_CON(8), 8, 2, DFLAGS,
+			RK3368_CLKGATE_CON(1), 1, GFLAGS),
+	COMPOSITE_NOMUX(0, "sclk_crypto", "aclk_bus_src", 0,
+			RK3368_CLKSEL_CON(10), 14, 2, DFLAGS,
+			RK3368_CLKGATE_CON(7), 2, GFLAGS),
+
+	COMPOSITE(0, "fclk_mcu_src", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
+			RK3368_CLKSEL_CON(12), 7, 1, MFLAGS, 0, 5, DFLAGS,
+			RK3368_CLKGATE_CON(1), 3, GFLAGS),
+	/*
+	 * stclk_mcu is listed as child of fclk_mcu_src in diagram 5,
+	 * but stclk_mcu has an additional own divider in diagram 2
+	 */
+	COMPOSITE_NOMUX(0, "stclk_mcu", "fclk_mcu_src", 0,
+			RK3368_CLKSEL_CON(12), 8, 3, DFLAGS,
+			RK3368_CLKGATE_CON(13), 13, GFLAGS),
+
+	COMPOSITE(0, "i2s_8ch_src", mux_pll_src_cpll_gpll_p, 0,
+			RK3368_CLKSEL_CON(27), 12, 1, MFLAGS, 0, 7, DFLAGS,
+			RK3368_CLKGATE_CON(6), 1, GFLAGS),
+	COMPOSITE_FRAC(0, "i2s_8ch_frac", "i2s_8ch_src", CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(28), 0,
+			RK3368_CLKGATE_CON(6), 2, GFLAGS),
+	MUX(0, "i2s_8ch_pre", mux_i2s_8ch_pre_p, CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(27), 8, 2, MFLAGS),
+	COMPOSITE_NODIV(SCLK_I2S_8CH_OUT, "i2s_8ch_clkout", mux_i2s_8ch_clkout_p, 0,
+			RK3368_CLKSEL_CON(27), 15, 1, MFLAGS,
+			RK3368_CLKGATE_CON(6), 0, GFLAGS),
+	GATE(SCLK_I2S_8CH, "sclk_i2s_8ch", "i2s_8ch_pre", CLK_SET_RATE_PARENT,
+			RK3368_CLKGATE_CON(6), 3, GFLAGS),
+	COMPOSITE(0, "spdif_8ch_src", mux_pll_src_cpll_gpll_p, 0,
+			RK3368_CLKSEL_CON(31), 12, 1, MFLAGS, 0, 7, DFLAGS,
+			RK3368_CLKGATE_CON(6), 4, GFLAGS),
+	COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_src", CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(32), 0,
+			RK3368_CLKGATE_CON(6), 5, GFLAGS),
+	COMPOSITE_NODIV(SCLK_SPDIF_8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0,
+			RK3368_CLKSEL_CON(31), 8, 2, MFLAGS,
+			RK3368_CLKGATE_CON(6), 6, GFLAGS),
+	COMPOSITE(0, "i2s_2ch_src", mux_pll_src_cpll_gpll_p, 0,
+			RK3368_CLKSEL_CON(53), 12, 1, MFLAGS, 0, 7, DFLAGS,
+			RK3368_CLKGATE_CON(5), 13, GFLAGS),
+	COMPOSITE_FRAC(0, "i2s_2ch_frac", "i2s_2ch_src", CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(54), 0,
+			RK3368_CLKGATE_CON(5), 14, GFLAGS),
+	COMPOSITE_NODIV(SCLK_I2S_2CH, "sclk_i2s_2ch", mux_i2s_2ch_p, 0,
+			RK3368_CLKSEL_CON(53), 8, 2, MFLAGS,
+			RK3368_CLKGATE_CON(5), 15, GFLAGS),
+
+	COMPOSITE(0, "sclk_tsp", mux_pll_src_cpll_gpll_npll_p, 0,
+			RK3368_CLKSEL_CON(46), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3368_CLKGATE_CON(6), 12, GFLAGS),
+	GATE(0, "sclk_hsadc_tsp", "ext_hsadc_tsp", 0,
+			RK3368_CLKGATE_CON(13), 7, GFLAGS),
+
+	MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0,
+			RK3368_CLKSEL_CON(35), 12, 1, MFLAGS),
+	COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0,
+			RK3368_CLKSEL_CON(37), 0, 7, DFLAGS,
+			RK3368_CLKGATE_CON(2), 4, GFLAGS),
+	MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(37), 8, 1, MFLAGS),
+
+	/*
+	 * Clock-Architecture Diagram 3
+	 */
+
+	COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
+			RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3368_CLKGATE_CON(4), 6, GFLAGS),
+	COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
+			RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
+			RK3368_CLKGATE_CON(4), 7, GFLAGS),
+
+	/*
+	 * We introduce a virtual node of hclk_vodec_pre_v to split one clock
+	 * struct with a gate and a fix divider into two node in software.
+	 */
+	GATE(0, "hclk_video_pre_v", "aclk_vdpu", 0,
+		RK3368_CLKGATE_CON(4), 8, GFLAGS),
+
+	COMPOSITE(0, "sclk_hevc_cabac_src", mux_pll_src_cpll_gpll_npll_usb_p, 0,
+			RK3368_CLKSEL_CON(17), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3368_CLKGATE_CON(5), 1, GFLAGS),
+	COMPOSITE(0, "sclk_hevc_core_src", mux_pll_src_cpll_gpll_npll_usb_p, 0,
+			RK3368_CLKSEL_CON(17), 14, 2, MFLAGS, 8, 5, DFLAGS,
+			RK3368_CLKGATE_CON(5), 2, GFLAGS),
+
+	COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb_p, CLK_IGNORE_UNUSED,
+			RK3368_CLKSEL_CON(19), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3368_CLKGATE_CON(4), 0, GFLAGS),
+	DIV(0, "hclk_vio", "aclk_vio0", 0,
+			RK3368_CLKSEL_CON(21), 0, 5, DFLAGS),
+
+	COMPOSITE(0, "aclk_rga_pre", mux_pll_src_cpll_gpll_usb_p, 0,
+			RK3368_CLKSEL_CON(18), 14, 2, MFLAGS, 8, 5, DFLAGS,
+			RK3368_CLKGATE_CON(4), 3, GFLAGS),
+	COMPOSITE(SCLK_RGA, "sclk_rga", mux_pll_src_cpll_gpll_usb_p, 0,
+			RK3368_CLKSEL_CON(18), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3368_CLKGATE_CON(4), 4, GFLAGS),
+
+	COMPOSITE(DCLK_VOP, "dclk_vop", mux_pll_src_cpll_gpll_npll_p, 0,
+			RK3368_CLKSEL_CON(20), 8, 2, MFLAGS, 0, 8, DFLAGS,
+			RK3368_CLKGATE_CON(4), 1, GFLAGS),
+
+	GATE(SCLK_VOP0_PWM, "sclk_vop0_pwm", "xin24m", 0,
+			RK3368_CLKGATE_CON(4), 2, GFLAGS),
+
+	COMPOSITE(SCLK_ISP, "sclk_isp", mux_pll_src_cpll_gpll_npll_npll_p, 0,
+			RK3368_CLKSEL_CON(22), 6, 2, MFLAGS, 0, 6, DFLAGS,
+			RK3368_CLKGATE_CON(4), 9, GFLAGS),
+
+	GATE(0, "pclk_isp_in", "ext_isp", 0,
+			RK3368_CLKGATE_CON(17), 2, GFLAGS),
+	INVERTER(PCLK_ISP, "pclk_isp", "pclk_isp_in",
+			RK3368_CLKSEL_CON(21), 6, IFLAGS),
+
+	GATE(0, "pclk_vip_in", "ext_vip", 0,
+			RK3368_CLKGATE_CON(16), 13, GFLAGS),
+	INVERTER(PCLK_VIP, "pclk_vip", "pclk_vip_in",
+			RK3368_CLKSEL_CON(21), 13, IFLAGS),
+
+	GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
+			RK3368_CLKGATE_CON(4), 13, GFLAGS),
+	GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
+			RK3368_CLKGATE_CON(5), 12, GFLAGS),
+
+	COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
+			RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
+			RK3368_CLKGATE_CON(4), 5, GFLAGS),
+	COMPOSITE_NOGATE(0, "sclk_vip_out", mux_vip_out_p, 0,
+			RK3368_CLKSEL_CON(21), 14, 1, MFLAGS, 8, 5, DFLAGS),
+
+	COMPOSITE_NODIV(SCLK_EDP_24M, "sclk_edp_24m", mux_edp_24m_p, 0,
+			RK3368_CLKSEL_CON(23), 8, 1, MFLAGS,
+			RK3368_CLKGATE_CON(5), 4, GFLAGS),
+	COMPOSITE(SCLK_EDP, "sclk_edp", mux_pll_src_cpll_gpll_npll_npll_p, 0,
+			RK3368_CLKSEL_CON(23), 6, 2, MFLAGS, 0, 6, DFLAGS,
+			RK3368_CLKGATE_CON(5), 3, GFLAGS),
+
+	COMPOSITE(SCLK_HDCP, "sclk_hdcp", mux_pll_src_cpll_gpll_npll_npll_p, 0,
+			RK3368_CLKSEL_CON(55), 6, 2, MFLAGS, 0, 6, DFLAGS,
+			RK3368_CLKGATE_CON(5), 5, GFLAGS),
+
+	DIV(0, "pclk_pd_alive", "gpll", 0,
+			RK3368_CLKSEL_CON(10), 8, 5, DFLAGS),
+
+	/* sclk_timer has a gate in the sgrf */
+
+	COMPOSITE_NOMUX(0, "pclk_pd_pmu", "gpll", CLK_IGNORE_UNUSED,
+			RK3368_CLKSEL_CON(10), 0, 5, DFLAGS,
+			RK3368_CLKGATE_CON(7), 9, GFLAGS),
+	GATE(SCLK_PVTM_PMU, "sclk_pvtm_pmu", "xin24m", 0,
+			RK3368_CLKGATE_CON(7), 3, GFLAGS),
+	COMPOSITE(0, "sclk_gpu_core_src", mux_pll_src_cpll_gpll_usb_npll_p, 0,
+			RK3368_CLKSEL_CON(14), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3368_CLKGATE_CON(4), 11, GFLAGS),
+	MUX(0, "aclk_gpu_src", mux_pll_src_cpll_gpll_p, 0,
+			RK3368_CLKSEL_CON(14), 14, 1, MFLAGS),
+	COMPOSITE_NOMUX(0, "aclk_gpu_mem_pre", "aclk_gpu_src", 0,
+			RK3368_CLKSEL_CON(14), 8, 5, DFLAGS,
+			RK3368_CLKGATE_CON(5), 8, GFLAGS),
+	COMPOSITE_NOMUX(0, "aclk_gpu_cfg_pre", "aclk_gpu_src", 0,
+			RK3368_CLKSEL_CON(16), 8, 5, DFLAGS,
+			RK3368_CLKGATE_CON(5), 9, GFLAGS),
+	GATE(SCLK_PVTM_GPU, "sclk_pvtm_gpu", "xin24m", 0,
+			RK3368_CLKGATE_CON(7), 11, GFLAGS),
+
+	COMPOSITE(0, "aclk_peri_src", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
+			RK3368_CLKSEL_CON(9), 7, 1, MFLAGS, 0, 5, DFLAGS,
+			RK3368_CLKGATE_CON(3), 0, GFLAGS),
+	COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0,
+			RK3368_CLKSEL_CON(9), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+			RK3368_CLKGATE_CON(3), 3, GFLAGS),
+	COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
+			RK3368_CLKSEL_CON(9), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+			RK3368_CLKGATE_CON(3), 2, GFLAGS),
+	GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(3), 1, GFLAGS),
+
+	GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3368_CLKGATE_CON(4), 14, GFLAGS),
+
+	/*
+	 * Clock-Architecture Diagram 4
+	 */
+
+	COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_cpll_gpll_p, 0,
+			RK3368_CLKSEL_CON(45), 7, 1, MFLAGS, 0, 7, DFLAGS,
+			RK3368_CLKGATE_CON(3), 7, GFLAGS),
+	COMPOSITE(SCLK_SPI1, "sclk_spi1", mux_pll_src_cpll_gpll_p, 0,
+			RK3368_CLKSEL_CON(45), 15, 1, MFLAGS, 8, 7, DFLAGS,
+			RK3368_CLKGATE_CON(3), 8, GFLAGS),
+	COMPOSITE(SCLK_SPI2, "sclk_spi2", mux_pll_src_cpll_gpll_p, 0,
+			RK3368_CLKSEL_CON(46), 15, 1, MFLAGS, 8, 7, DFLAGS,
+			RK3368_CLKGATE_CON(3), 9, GFLAGS),
+
+
+	COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
+			RK3368_CLKSEL_CON(50), 8, 2, MFLAGS, 0, 7, DFLAGS,
+			RK3368_CLKGATE_CON(7), 12, GFLAGS),
+	COMPOSITE(SCLK_SDIO0, "sclk_sdio0", mux_mmc_src_p, 0,
+			RK3368_CLKSEL_CON(48), 8, 2, MFLAGS, 0, 7, DFLAGS,
+			RK3368_CLKGATE_CON(7), 13, GFLAGS),
+	COMPOSITE(SCLK_EMMC, "sclk_emmc", mux_mmc_src_p, 0,
+			RK3368_CLKSEL_CON(51), 8, 2, MFLAGS, 0, 7, DFLAGS,
+			RK3368_CLKGATE_CON(7), 15, GFLAGS),
+
+	MMC(SCLK_SDMMC_DRV,    "sdmmc_drv",    "sclk_sdmmc", RK3368_SDMMC_CON0, 1),
+	MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3368_SDMMC_CON1, 0),
+
+	MMC(SCLK_SDIO0_DRV,    "sdio0_drv",    "sclk_sdio0", RK3368_SDIO0_CON0, 1),
+	MMC(SCLK_SDIO0_SAMPLE, "sdio0_sample", "sclk_sdio0", RK3368_SDIO0_CON1, 0),
+
+	MMC(SCLK_EMMC_DRV,     "emmc_drv",     "sclk_emmc",  RK3368_EMMC_CON0,  1),
+	MMC(SCLK_EMMC_SAMPLE,  "emmc_sample",  "sclk_emmc",  RK3368_EMMC_CON1,  0),
+
+	GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin24m", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(8), 1, GFLAGS),
+
+	/* pmu_grf_soc_con0[6] allows to select between xin32k and pvtm_pmu */
+	GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", CLK_IGNORE_UNUSED,
+			RK3368_CLKGATE_CON(8), 4, GFLAGS),
+
+	/* pmu_grf_soc_con0[6] allows to select between xin32k and pvtm_pmu */
+	COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin32k", 0,
+			RK3368_CLKSEL_CON(25), 0, 6, DFLAGS,
+			RK3368_CLKGATE_CON(3), 5, GFLAGS),
+
+	COMPOSITE_NOMUX(SCLK_SARADC, "sclk_saradc", "xin24m", 0,
+			RK3368_CLKSEL_CON(25), 8, 8, DFLAGS,
+			RK3368_CLKGATE_CON(3), 6, GFLAGS),
+
+	COMPOSITE(SCLK_NANDC0, "sclk_nandc0", mux_pll_src_cpll_gpll_p, 0,
+			RK3368_CLKSEL_CON(47), 7, 1, MFLAGS, 0, 5, DFLAGS,
+			RK3368_CLKGATE_CON(7), 8, GFLAGS),
+
+	COMPOSITE(SCLK_SFC, "sclk_sfc", mux_pll_src_cpll_gpll_p, 0,
+			RK3368_CLKSEL_CON(52), 7, 1, MFLAGS, 0, 5, DFLAGS,
+			RK3368_CLKGATE_CON(6), 7, GFLAGS),
+
+	COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gpll_usb_usb_p, 0,
+			RK3368_CLKSEL_CON(33), 12, 2, MFLAGS, 0, 7, DFLAGS,
+			RK3368_CLKGATE_CON(2), 0, GFLAGS),
+	COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(34), 0,
+			RK3368_CLKGATE_CON(2), 1, GFLAGS),
+	MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(33), 8, 2, MFLAGS),
+
+	COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0,
+			RK3368_CLKSEL_CON(35), 0, 7, DFLAGS,
+			RK3368_CLKGATE_CON(2), 2, GFLAGS),
+	COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(36), 0,
+			RK3368_CLKGATE_CON(2), 3, GFLAGS),
+	MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(35), 8, 2, MFLAGS),
+
+	COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0,
+			RK3368_CLKSEL_CON(39), 0, 7, DFLAGS,
+			RK3368_CLKGATE_CON(2), 6, GFLAGS),
+	COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(40), 0,
+			RK3368_CLKGATE_CON(2), 7, GFLAGS),
+	MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(39), 8, 2, MFLAGS),
+
+	COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0,
+			RK3368_CLKSEL_CON(41), 0, 7, DFLAGS,
+			RK3368_CLKGATE_CON(2), 8, GFLAGS),
+	COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(42), 0,
+			RK3368_CLKGATE_CON(2), 9, GFLAGS),
+	MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(41), 8, 2, MFLAGS),
+
+	COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
+			RK3368_CLKSEL_CON(43), 6, 2, MFLAGS, 0, 5, DFLAGS,
+			RK3368_CLKGATE_CON(3), 4, GFLAGS),
+	MUX(SCLK_MAC, "mac_clk", mux_mac_p, CLK_SET_RATE_PARENT,
+			RK3368_CLKSEL_CON(43), 8, 1, MFLAGS),
+	GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 0,
+			RK3368_CLKGATE_CON(7), 7, GFLAGS),
+	GATE(SCLK_MACREF, "sclk_macref", "mac_clk", 0,
+			RK3368_CLKGATE_CON(7), 6, GFLAGS),
+	GATE(SCLK_MAC_RX, "sclk_mac_rx", "mac_clk", 0,
+			RK3368_CLKGATE_CON(7), 4, GFLAGS),
+	GATE(SCLK_MAC_TX, "sclk_mac_tx", "mac_clk", 0,
+			RK3368_CLKGATE_CON(7), 5, GFLAGS),
+
+	GATE(0, "jtag", "ext_jtag", 0,
+			RK3368_CLKGATE_CON(7), 0, GFLAGS),
+
+	COMPOSITE_NODIV(0, "hsic_usbphy_480m", mux_hsic_usbphy480m_p, 0,
+			RK3368_CLKSEL_CON(26), 8, 2, MFLAGS,
+			RK3368_CLKGATE_CON(8), 0, GFLAGS),
+	COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
+			RK3368_CLKSEL_CON(26), 12, 2, MFLAGS,
+			RK3368_CLKGATE_CON(8), 7, GFLAGS),
+	GATE(SCLK_HSICPHY12M, "sclk_hsicphy12m", "xin12m", 0,
+			RK3368_CLKGATE_CON(8), 6, GFLAGS),
+
+	/*
+	 * Clock-Architecture Diagram 5
+	 */
+
+	/* aclk_cci_pre gates */
+	GATE(0, "aclk_core_niu_cpup", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 4, GFLAGS),
+	GATE(0, "aclk_core_niu_cci", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 3, GFLAGS),
+	GATE(0, "aclk_cci400", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 2, GFLAGS),
+	GATE(0, "aclk_adb400m_pd_core_b", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 1, GFLAGS),
+	GATE(0, "aclk_adb400m_pd_core_l", "aclk_cci_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 0, GFLAGS),
+
+	/* aclkm_core_* gates */
+	GATE(0, "aclk_adb400s_pd_core_b", "aclkm_core_b", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(10), 0, GFLAGS),
+	GATE(0, "aclk_adb400s_pd_core_l", "aclkm_core_l", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(9), 0, GFLAGS),
+
+	/* armclk* gates */
+	GATE(0, "sclk_dbg_pd_core_b", "armclkb", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(10), 1, GFLAGS),
+	GATE(0, "sclk_dbg_pd_core_l", "armclkl", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(9), 1, GFLAGS),
+
+	/* sclk_cs_pre gates */
+	GATE(0, "sclk_dbg", "sclk_cs_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 7, GFLAGS),
+	GATE(0, "pclk_core_niu_sdbg", "sclk_cs_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 6, GFLAGS),
+	GATE(0, "hclk_core_niu_dbg", "sclk_cs_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(11), 5, GFLAGS),
+
+	/* aclk_bus gates */
+	GATE(0, "aclk_strc_sys", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 12, GFLAGS),
+	GATE(ACLK_DMAC_BUS, "aclk_dmac_bus", "aclk_bus", 0, RK3368_CLKGATE_CON(12), 11, GFLAGS),
+	GATE(0, "sclk_intmem1", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 6, GFLAGS),
+	GATE(0, "sclk_intmem0", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 5, GFLAGS),
+	GATE(0, "aclk_intmem", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 4, GFLAGS),
+	GATE(0, "aclk_gic400", "aclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(13), 9, GFLAGS),
+
+	/* sclk_ddr gates */
+	GATE(0, "nclk_ddrupctl", "sclk_ddr", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(13), 2, GFLAGS),
+
+	/* clk_hsadc_tsp is part of diagram2 */
+
+	/* fclk_mcu_src gates */
+	GATE(0, "hclk_noc_mcu", "fclk_mcu_src", 0, RK3368_CLKGATE_CON(13), 14, GFLAGS),
+	GATE(0, "fclk_mcu", "fclk_mcu_src", 0, RK3368_CLKGATE_CON(13), 12, GFLAGS),
+	GATE(0, "hclk_mcu", "fclk_mcu_src", 0, RK3368_CLKGATE_CON(13), 11, GFLAGS),
+
+	/* hclk_cpu gates */
+	GATE(HCLK_SPDIF, "hclk_spdif", "hclk_bus", 0, RK3368_CLKGATE_CON(12), 10, GFLAGS),
+	GATE(HCLK_ROM, "hclk_rom", "hclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 9, GFLAGS),
+	GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_bus", 0, RK3368_CLKGATE_CON(12), 8, GFLAGS),
+	GATE(HCLK_I2S_8CH, "hclk_i2s_8ch", "hclk_bus", 0, RK3368_CLKGATE_CON(12), 7, GFLAGS),
+	GATE(HCLK_TSP, "hclk_tsp", "hclk_bus", 0, RK3368_CLKGATE_CON(13), 10, GFLAGS),
+	GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_bus", 0, RK3368_CLKGATE_CON(13), 4, GFLAGS),
+	GATE(MCLK_CRYPTO, "mclk_crypto", "hclk_bus", 0, RK3368_CLKGATE_CON(13), 3, GFLAGS),
+
+	/* pclk_cpu gates */
+	GATE(PCLK_DDRPHY, "pclk_ddrphy", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 14, GFLAGS),
+	GATE(PCLK_DDRUPCTL, "pclk_ddrupctl", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 13, GFLAGS),
+	GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 3, GFLAGS),
+	GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 2, GFLAGS),
+	GATE(PCLK_MAILBOX, "pclk_mailbox", "pclk_bus", 0, RK3368_CLKGATE_CON(12), 1, GFLAGS),
+	GATE(PCLK_PWM0, "pclk_pwm0", "pclk_bus", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(12), 0, GFLAGS),
+	GATE(PCLK_SIM, "pclk_sim", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 8, GFLAGS),
+	GATE(PCLK_PWM1, "pclk_pwm1", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 6, GFLAGS),
+	GATE(PCLK_UART2, "pclk_uart2", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 5, GFLAGS),
+	GATE(0, "pclk_efuse_256", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 1, GFLAGS),
+	GATE(0, "pclk_efuse_1024", "pclk_bus", 0, RK3368_CLKGATE_CON(13), 0, GFLAGS),
+
+	/*
+	 * video clk gates
+	 * aclk_video(_pre) can actually select between parents of aclk_vdpu
+	 * and aclk_vepu by setting bit GRF_SOC_CON0[7].
+	 */
+	GATE(ACLK_VIDEO, "aclk_video", "aclk_vdpu", 0, RK3368_CLKGATE_CON(15), 0, GFLAGS),
+	GATE(SCLK_HEVC_CABAC, "sclk_hevc_cabac", "sclk_hevc_cabac_src", 0, RK3368_CLKGATE_CON(15), 3, GFLAGS),
+	GATE(SCLK_HEVC_CORE, "sclk_hevc_core", "sclk_hevc_core_src", 0, RK3368_CLKGATE_CON(15), 2, GFLAGS),
+	GATE(HCLK_VIDEO, "hclk_video", "hclk_video_pre", 0, RK3368_CLKGATE_CON(15), 1, GFLAGS),
+
+	/* aclk_rga_pre gates */
+	GATE(ACLK_VIO1_NOC, "aclk_vio1_noc", "aclk_rga_pre", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(16), 10, GFLAGS),
+	GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3368_CLKGATE_CON(16), 0, GFLAGS),
+	GATE(ACLK_HDCP, "aclk_hdcp", "aclk_rga_pre", 0, RK3368_CLKGATE_CON(17), 10, GFLAGS),
+
+	/* aclk_vio0 gates */
+	GATE(ACLK_VIP, "aclk_vip", "aclk_vio0", 0, RK3368_CLKGATE_CON(16), 11, GFLAGS),
+	GATE(ACLK_VIO0_NOC, "aclk_vio0_noc", "aclk_vio0", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(16), 9, GFLAGS),
+	GATE(ACLK_VOP, "aclk_vop", "aclk_vio0", 0, RK3368_CLKGATE_CON(16), 5, GFLAGS),
+	GATE(ACLK_VOP_IEP, "aclk_vop_iep", "aclk_vio0", 0, RK3368_CLKGATE_CON(16), 4, GFLAGS),
+	GATE(ACLK_IEP, "aclk_iep", "aclk_vio0", 0, RK3368_CLKGATE_CON(16), 2, GFLAGS),
+
+	/* sclk_isp gates */
+	GATE(HCLK_ISP, "hclk_isp", "sclk_isp", 0, RK3368_CLKGATE_CON(16), 14, GFLAGS),
+	GATE(ACLK_ISP, "aclk_isp", "sclk_isp", 0, RK3368_CLKGATE_CON(17), 0, GFLAGS),
+
+	/* hclk_vio gates */
+	GATE(HCLK_VIP, "hclk_vip", "hclk_vio", 0, RK3368_CLKGATE_CON(16), 12, GFLAGS),
+	GATE(HCLK_VIO_NOC, "hclk_vio_noc", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(16), 8, GFLAGS),
+	GATE(HCLK_VIO_AHB_ARBI, "hclk_vio_ahb_arbi", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(16), 7, GFLAGS),
+	GATE(HCLK_VOP, "hclk_vop", "hclk_vio", 0, RK3368_CLKGATE_CON(16), 6, GFLAGS),
+	GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 0, RK3368_CLKGATE_CON(16), 3, GFLAGS),
+	GATE(HCLK_RGA, "hclk_rga", "hclk_vio", 0, RK3368_CLKGATE_CON(16), 1, GFLAGS),
+	GATE(HCLK_VIO_HDCPMMU, "hclk_hdcpmmu", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 12, GFLAGS),
+	GATE(HCLK_VIO_H2P, "hclk_vio_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 7, GFLAGS),
+
+	/*
+	 * pclk_vio gates
+	 * pclk_vio comes from the exactly same source as hclk_vio
+	 */
+	GATE(PCLK_HDCP, "pclk_hdcp", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 11, GFLAGS),
+	GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 9, GFLAGS),
+	GATE(PCLK_VIO_H2P, "pclk_vio_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 8, GFLAGS),
+	GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 6, GFLAGS),
+	GATE(PCLK_MIPI_CSI, "pclk_mipi_csi", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS),
+	GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "hclk_vio", 0, RK3368_CLKGATE_CON(17), 3, GFLAGS),
+
+	/* ext_vip gates in diagram3 */
+
+	/* gpu gates */
+	GATE(SCLK_GPU_CORE, "sclk_gpu_core", "sclk_gpu_core_src", 0, RK3368_CLKGATE_CON(18), 2, GFLAGS),
+	GATE(ACLK_GPU_MEM, "aclk_gpu_mem", "aclk_gpu_mem_pre", 0, RK3368_CLKGATE_CON(18), 1, GFLAGS),
+	GATE(ACLK_GPU_CFG, "aclk_gpu_cfg", "aclk_gpu_cfg_pre", 0, RK3368_CLKGATE_CON(18), 0, GFLAGS),
+
+	/* aclk_peri gates */
+	GATE(ACLK_DMAC_PERI, "aclk_dmac_peri", "aclk_peri", 0, RK3368_CLKGATE_CON(19), 3, GFLAGS),
+	GATE(0, "aclk_peri_axi_matrix", "aclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(19), 2, GFLAGS),
+	GATE(HCLK_SFC, "hclk_sfc", "aclk_peri", 0, RK3368_CLKGATE_CON(20), 15, GFLAGS),
+	GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK3368_CLKGATE_CON(20), 13, GFLAGS),
+	GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 8, GFLAGS),
+	GATE(ACLK_PERI_MMU, "aclk_peri_mmu", "aclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(21), 4, GFLAGS),
+
+	/* hclk_peri gates */
+	GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(19), 0, GFLAGS),
+	GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK3368_CLKGATE_CON(20), 11, GFLAGS),
+	GATE(0, "hclk_mmc_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 10, GFLAGS),
+	GATE(0, "hclk_emem_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 9, GFLAGS),
+	GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 7, GFLAGS),
+	GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 6, GFLAGS),
+	GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK3368_CLKGATE_CON(20), 5, GFLAGS),
+	GATE(HCLK_HOST1, "hclk_host1", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 4, GFLAGS),
+	GATE(HCLK_HOST0, "hclk_host0", "hclk_peri", 0, RK3368_CLKGATE_CON(20), 3, GFLAGS),
+	GATE(0, "pmu_hclk_otg0", "hclk_peri", 0, RK3368_CLKGATE_CON(20), 2, GFLAGS),
+	GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(20), 1, GFLAGS),
+	GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK3368_CLKGATE_CON(21), 3, GFLAGS),
+	GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0, RK3368_CLKGATE_CON(21), 2, GFLAGS),
+	GATE(HCLK_SDIO0, "hclk_sdio0", "hclk_peri", 0, RK3368_CLKGATE_CON(21), 1, GFLAGS),
+	GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3368_CLKGATE_CON(21), 0, GFLAGS),
+
+	/* pclk_peri gates */
+	GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 15, GFLAGS),
+	GATE(PCLK_I2C5, "pclk_i2c5", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 14, GFLAGS),
+	GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 13, GFLAGS),
+	GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 12, GFLAGS),
+	GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 11, GFLAGS),
+	GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 10, GFLAGS),
+	GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 9, GFLAGS),
+	GATE(PCLK_UART1, "pclk_uart1", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 8, GFLAGS),
+	GATE(PCLK_UART0, "pclk_uart0", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 7, GFLAGS),
+	GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 6, GFLAGS),
+	GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 5, GFLAGS),
+	GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK3368_CLKGATE_CON(19), 4, GFLAGS),
+	GATE(0, "pclk_peri_axi_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(19), 1, GFLAGS),
+	GATE(PCLK_GMAC, "pclk_gmac", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 14, GFLAGS),
+	GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS),
+
+	/* pclk_pd_alive gates */
+	GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 8, GFLAGS),
+	GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 7, GFLAGS),
+	GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 12, GFLAGS),
+	GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 11, GFLAGS),
+	GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 3, GFLAGS),
+	GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 2, GFLAGS),
+	GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 1, GFLAGS),
+
+	/*
+	 * pclk_vio gates
+	 * pclk_vio comes from the exactly same source as hclk_vio
+	 */
+	GATE(0, "pclk_dphyrx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
+	GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
+
+	/* pclk_pd_pmu gates */
+	GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 0, GFLAGS),
+	GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS),
+	GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 3, GFLAGS),
+	GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS),
+	GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 1, GFLAGS),
+	GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS),
+
+	/* timer gates */
+	GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS),
+	GATE(0, "sclk_timer14", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 10, GFLAGS),
+	GATE(0, "sclk_timer13", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 9, GFLAGS),
+	GATE(0, "sclk_timer12", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 8, GFLAGS),
+	GATE(0, "sclk_timer11", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 7, GFLAGS),
+	GATE(0, "sclk_timer10", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 6, GFLAGS),
+	GATE(0, "sclk_timer05", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 5, GFLAGS),
+	GATE(0, "sclk_timer04", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 4, GFLAGS),
+	GATE(0, "sclk_timer03", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 3, GFLAGS),
+	GATE(0, "sclk_timer02", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 2, GFLAGS),
+	GATE(0, "sclk_timer01", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 1, GFLAGS),
+	GATE(0, "sclk_timer00", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 0, GFLAGS),
+};
+
+static void __init rk3368_clk_init(struct device_node *np)
+{
+	void __iomem *reg_base;
+	struct clk *clk;
+
+	reg_base = of_iomap(np, 0);
+	if (!reg_base) {
+		pr_err("%s: could not map cru region\n", __func__);
+		return;
+	}
+
+	rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+
+	/* xin12m is created by a cru-internal divider */
+	clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
+	if (IS_ERR(clk))
+		pr_warn("%s: could not register clock xin12m: %ld\n",
+			__func__, PTR_ERR(clk));
+
+	/* ddrphy_div4 is created by a cru-internal divider */
+	clk = clk_register_fixed_factor(NULL, "ddrphy_div4", "ddrphy_src", 0, 1, 4);
+	if (IS_ERR(clk))
+		pr_warn("%s: could not register clock xin12m: %ld\n",
+			__func__, PTR_ERR(clk));
+
+	clk = clk_register_fixed_factor(NULL, "hclk_video_pre",
+					"hclk_video_pre_v", 0, 1, 4);
+	if (IS_ERR(clk))
+		pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
+			__func__, PTR_ERR(clk));
+
+	/* Watchdog pclk is controlled by sgrf_soc_con3[7]. */
+	clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
+	if (IS_ERR(clk))
+		pr_warn("%s: could not register clock pclk_wdt: %ld\n",
+			__func__, PTR_ERR(clk));
+	else
+		rockchip_clk_add_lookup(clk, PCLK_WDT);
+
+	rockchip_clk_register_plls(rk3368_pll_clks,
+				   ARRAY_SIZE(rk3368_pll_clks),
+				   RK3368_GRF_SOC_STATUS0);
+	rockchip_clk_register_branches(rk3368_clk_branches,
+				  ARRAY_SIZE(rk3368_clk_branches));
+
+	rockchip_clk_register_armclk(ARMCLKB, "armclkb",
+			mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p),
+			&rk3368_cpuclkb_data, rk3368_cpuclkb_rates,
+			ARRAY_SIZE(rk3368_cpuclkb_rates));
+
+	rockchip_clk_register_armclk(ARMCLKL, "armclkl",
+			mux_armclkl_p, ARRAY_SIZE(mux_armclkl_p),
+			&rk3368_cpuclkl_data, rk3368_cpuclkl_rates,
+			ARRAY_SIZE(rk3368_cpuclkl_rates));
+
+	rockchip_register_softrst(np, 15, reg_base + RK3368_SOFTRST_CON(0),
+				  ROCKCHIP_SOFTRST_HIWORD_MASK);
+
+	rockchip_register_restart_notifier(RK3368_GLB_SRST_FST);
+}
+CLK_OF_DECLARE(rk3368_cru, "rockchip,rk3368-cru", rk3368_clk_init);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 052b94d..2493881 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -277,6 +277,13 @@
 				list->div_shift
 			);
 			break;
+		case branch_inverter:
+			clk = rockchip_clk_register_inverter(
+				list->name, list->parent_names,
+				list->num_parents,
+				reg_base + list->muxdiv_offset,
+				list->div_shift, list->div_flags, &clk_lock);
+			break;
 		}
 
 		/* none of the cases above matched */
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 6b09267..dc8ecb2 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -24,29 +24,29 @@
 #define CLK_ROCKCHIP_CLK_H
 
 #include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
+
+struct clk;
 
 #define HIWORD_UPDATE(val, mask, shift) \
 		((val) << (shift) | (mask) << ((shift) + 16))
 
 /* register positions shared by RK2928, RK3066 and RK3188 */
-#define RK2928_PLL_CON(x)		(x * 0x4)
+#define RK2928_PLL_CON(x)		((x) * 0x4)
 #define RK2928_MODE_CON		0x40
-#define RK2928_CLKSEL_CON(x)	(x * 0x4 + 0x44)
-#define RK2928_CLKGATE_CON(x)	(x * 0x4 + 0xd0)
+#define RK2928_CLKSEL_CON(x)	((x) * 0x4 + 0x44)
+#define RK2928_CLKGATE_CON(x)	((x) * 0x4 + 0xd0)
 #define RK2928_GLB_SRST_FST		0x100
 #define RK2928_GLB_SRST_SND		0x104
-#define RK2928_SOFTRST_CON(x)	(x * 0x4 + 0x110)
+#define RK2928_SOFTRST_CON(x)	((x) * 0x4 + 0x110)
 #define RK2928_MISC_CON		0x134
 
 #define RK3288_PLL_CON(x)		RK2928_PLL_CON(x)
 #define RK3288_MODE_CON			0x50
-#define RK3288_CLKSEL_CON(x)		(x * 0x4 + 0x60)
-#define RK3288_CLKGATE_CON(x)		(x * 0x4 + 0x160)
+#define RK3288_CLKSEL_CON(x)		((x) * 0x4 + 0x60)
+#define RK3288_CLKGATE_CON(x)		((x) * 0x4 + 0x160)
 #define RK3288_GLB_SRST_FST		0x1b0
 #define RK3288_GLB_SRST_SND		0x1b4
-#define RK3288_SOFTRST_CON(x)		(x * 0x4 + 0x1b8)
+#define RK3288_SOFTRST_CON(x)		((x) * 0x4 + 0x1b8)
 #define RK3288_MISC_CON			0x1e8
 #define RK3288_SDMMC_CON0		0x200
 #define RK3288_SDMMC_CON1		0x204
@@ -57,6 +57,22 @@
 #define RK3288_EMMC_CON0		0x218
 #define RK3288_EMMC_CON1		0x21c
 
+#define RK3368_PLL_CON(x)		RK2928_PLL_CON(x)
+#define RK3368_CLKSEL_CON(x)		((x) * 0x4 + 0x100)
+#define RK3368_CLKGATE_CON(x)		((x) * 0x4 + 0x200)
+#define RK3368_GLB_SRST_FST		0x280
+#define RK3368_GLB_SRST_SND		0x284
+#define RK3368_SOFTRST_CON(x)		((x) * 0x4 + 0x300)
+#define RK3368_MISC_CON			0x380
+#define RK3368_SDMMC_CON0		0x400
+#define RK3368_SDMMC_CON1		0x404
+#define RK3368_SDIO0_CON0		0x408
+#define RK3368_SDIO0_CON1		0x40c
+#define RK3368_SDIO1_CON0		0x410
+#define RK3368_SDIO1_CON1		0x414
+#define RK3368_EMMC_CON0		0x418
+#define RK3368_EMMC_CON1		0x41c
+
 enum rockchip_pll_type {
 	pll_rk3066,
 };
@@ -67,16 +83,16 @@
 	.nr = _nr,				\
 	.nf = _nf,				\
 	.no = _no,				\
-	.bwadj = (_nf >> 1),			\
+	.nb = ((_nf) < 2) ? 1 : (_nf) >> 1,	\
 }
 
-#define RK3066_PLL_RATE_BWADJ(_rate, _nr, _nf, _no, _bw)	\
+#define RK3066_PLL_RATE_NB(_rate, _nr, _nf, _no, _nb)		\
 {								\
 	.rate	= _rate##U,					\
 	.nr = _nr,						\
 	.nf = _nf,						\
 	.no = _no,						\
-	.bwadj = _bw,						\
+	.nb = _nb,						\
 }
 
 struct rockchip_pll_rate_table {
@@ -84,7 +100,7 @@
 	unsigned int nr;
 	unsigned int nf;
 	unsigned int no;
-	unsigned int bwadj;
+	unsigned int nb;
 };
 
 /**
@@ -182,6 +198,13 @@
 				const char *const *parent_names, u8 num_parents,
 				void __iomem *reg, int shift);
 
+#define ROCKCHIP_INVERTER_HIWORD_MASK	BIT(0)
+
+struct clk *rockchip_clk_register_inverter(const char *name,
+				const char *const *parent_names, u8 num_parents,
+				void __iomem *reg, int shift, int flags,
+				spinlock_t *lock);
+
 #define PNAME(x) static const char *const x[] __initconst
 
 enum rockchip_clk_branch_type {
@@ -191,6 +214,7 @@
 	branch_fraction_divider,
 	branch_gate,
 	branch_mmc,
+	branch_inverter,
 };
 
 struct rockchip_clk_branch {
@@ -308,6 +332,26 @@
 		.gate_offset	= -1,				\
 	}
 
+#define COMPOSITE_NOGATE_DIVTBL(_id, cname, pnames, f, mo, ms,	\
+				mw, mf, ds, dw, df, dt)		\
+	{							\
+		.id		= _id,				\
+		.branch_type	= branch_composite,		\
+		.name		= cname,			\
+		.parent_names	= pnames,			\
+		.num_parents	= ARRAY_SIZE(pnames),		\
+		.flags		= f,				\
+		.muxdiv_offset	= mo,				\
+		.mux_shift	= ms,				\
+		.mux_width	= mw,				\
+		.mux_flags	= mf,				\
+		.div_shift	= ds,				\
+		.div_width	= dw,				\
+		.div_flags	= df,				\
+		.div_table	= dt,				\
+		.gate_offset	= -1,				\
+	}
+
 #define COMPOSITE_FRAC(_id, cname, pname, f, mo, df, go, gs, gf)\
 	{							\
 		.id		= _id,				\
@@ -394,6 +438,18 @@
 		.div_shift	= shift,			\
 	}
 
+#define INVERTER(_id, cname, pname, io, is, if)			\
+	{							\
+		.id		= _id,				\
+		.branch_type	= branch_inverter,		\
+		.name		= cname,			\
+		.parent_names	= (const char *[]){ pname },	\
+		.num_parents	= 1,				\
+		.muxdiv_offset	= io,				\
+		.div_shift	= is,				\
+		.div_flags	= if,				\
+	}
+
 void rockchip_clk_init(struct device_node *np, void __iomem *base,
 		       unsigned long nr_clks);
 struct regmap *rockchip_clk_get_grf(void);
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 3a1fe07..7c1e1f5 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -33,6 +33,9 @@
 */
 
 #include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include "clk-cpu.h"
 
 #define E4210_SRC_CPU		0x0
@@ -97,8 +100,8 @@
 static long exynos_cpuclk_round_rate(struct clk_hw *hw,
 			unsigned long drate, unsigned long *prate)
 {
-	struct clk *parent = __clk_get_parent(hw->clk);
-	*prate = __clk_round_rate(parent, drate);
+	struct clk_hw *parent = clk_hw_get_parent(hw);
+	*prate = clk_hw_round_rate(parent, drate);
 	return *prate;
 }
 
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 454b02a..4e9584d 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -9,8 +9,9 @@
  * Common Clock Framework support for Audio Subsystem Clock Controller.
 */
 
-#include <linux/clkdev.h>
+#include <linux/slab.h>
 #include <linux/io.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/of_address.h>
 #include <linux/syscore_ops.h>
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 03a5222..7cd02ff 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -9,8 +9,8 @@
  * Clock driver for Exynos clock output
  */
 
+#include <linux/slab.h>
 #include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 538de66..fdd41b1 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -8,8 +8,6 @@
  * Common Clock Framework support for Exynos3250 SoC.
  */
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -19,6 +17,7 @@
 #include <dt-bindings/clock/exynos3250.h>
 
 #include "clk.h"
+#include "clk-cpu.h"
 #include "clk-pll.h"
 
 #define SRC_LEFTBUS		0x4200
@@ -319,8 +318,10 @@
 	MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p,
 	    SRC_CPU, 24, 1),
 	MUX(CLK_MOUT_HPM, "mout_hpm", mout_hpm_p, SRC_CPU, 20, 1),
-	MUX(CLK_MOUT_CORE, "mout_core", mout_core_p, SRC_CPU, 16, 1),
-	MUX(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1),
+	MUX_F(CLK_MOUT_CORE, "mout_core", mout_core_p, SRC_CPU, 16, 1,
+			CLK_SET_RATE_PARENT, 0),
+	MUX_F(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+			CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_div_clock div_clks[] __initdata = {
@@ -772,6 +773,26 @@
 	.nr_clk_regs		= ARRAY_SIZE(exynos3250_cmu_clk_regs),
 };
 
+#define E3250_CPU_DIV0(apll, pclk_dbg, atb, corem)			\
+		(((apll) << 24) | ((pclk_dbg) << 20) | ((atb) << 16) |	\
+		((corem) << 4))
+#define E3250_CPU_DIV1(hpm, copy)					\
+		(((hpm) << 4) | ((copy) << 0))
+
+static const struct exynos_cpuclk_cfg_data e3250_armclk_d[] __initconst = {
+	{ 1000000, E3250_CPU_DIV0(1, 7, 4, 1), E3250_CPU_DIV1(7, 7), },
+	{  900000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+	{  800000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+	{  700000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+	{  600000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+	{  500000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+	{  400000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+	{  300000, E3250_CPU_DIV0(1, 5, 3, 1), E3250_CPU_DIV1(7, 7), },
+	{  200000, E3250_CPU_DIV0(1, 3, 3, 1), E3250_CPU_DIV1(7, 7), },
+	{  100000, E3250_CPU_DIV0(1, 1, 1, 1), E3250_CPU_DIV1(7, 7), },
+	{  0 },
+};
+
 static void __init exynos3250_cmu_init(struct device_node *np)
 {
 	struct samsung_clk_provider *ctx;
@@ -780,6 +801,11 @@
 	if (!ctx)
 		return;
 
+	exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+			mout_core_p[0], mout_core_p[1], 0x14200,
+			e3250_armclk_d, ARRAY_SIZE(e3250_armclk_d),
+			CLK_CPU_HAS_DIV1);
+
 	exynos3_core_down_clock(ctx->reg_base);
 }
 CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index cae2c04..251f48d 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -11,8 +11,8 @@
 */
 
 #include <dt-bindings/clock/exynos4.h>
+#include <linux/slab.h>
 #include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -86,6 +86,7 @@
 #define DIV_PERIL4		0xc560
 #define DIV_PERIL5		0xc564
 #define E4X12_DIV_CAM1		0xc568
+#define E4X12_GATE_BUS_FSYS1	0xc744
 #define GATE_SCLK_CAM		0xc820
 #define GATE_IP_CAM		0xc920
 #define GATE_IP_TV		0xc924
@@ -1097,6 +1098,7 @@
 		0),
 	GATE(CLK_PPMUIMAGE, "ppmuimage", "aclk200", E4X12_GATE_IP_IMAGE, 9, 0,
 		0),
+	GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0),
 	GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
 	GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
 	GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c
index 6c78b09..92c39f6e 100644
--- a/drivers/clk/samsung/clk-exynos4415.c
+++ b/drivers/clk/samsung/clk-exynos4415.c
@@ -9,8 +9,6 @@
  * Common Clock Framework support for Exynos4415 SoC.
  */
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 70ec3d2..55b83c7 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -11,14 +11,13 @@
 */
 
 #include <dt-bindings/clock/exynos5250.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/syscore_ops.h>
 
 #include "clk.h"
+#include "clk-cpu.h"
 
 #define APLL_LOCK		0x0
 #define APLL_CON0		0x100
@@ -748,6 +747,32 @@
 		VPLL_LOCK, VPLL_CON0, NULL),
 };
 
+#define E5250_CPU_DIV0(apll, pclk_dbg, atb, periph, acp, cpud)		\
+		((((apll) << 24) | ((pclk_dbg) << 20) | ((atb) << 16) |	\
+		 ((periph) << 12) | ((acp) << 8) | ((cpud) << 4)))
+#define E5250_CPU_DIV1(hpm, copy)					\
+		(((hpm) << 4) | (copy))
+
+static const struct exynos_cpuclk_cfg_data exynos5250_armclk_d[] __initconst = {
+	{ 1700000, E5250_CPU_DIV0(5, 3, 7, 7, 7, 3), E5250_CPU_DIV1(2, 0), },
+	{ 1600000, E5250_CPU_DIV0(4, 1, 7, 7, 7, 3), E5250_CPU_DIV1(2, 0), },
+	{ 1500000, E5250_CPU_DIV0(4, 1, 7, 7, 7, 2), E5250_CPU_DIV1(2, 0), },
+	{ 1400000, E5250_CPU_DIV0(4, 1, 6, 7, 7, 2), E5250_CPU_DIV1(2, 0), },
+	{ 1300000, E5250_CPU_DIV0(3, 1, 6, 7, 7, 2), E5250_CPU_DIV1(2, 0), },
+	{ 1200000, E5250_CPU_DIV0(3, 1, 5, 7, 7, 2), E5250_CPU_DIV1(2, 0), },
+	{ 1100000, E5250_CPU_DIV0(3, 1, 5, 7, 7, 3), E5250_CPU_DIV1(2, 0), },
+	{ 1000000, E5250_CPU_DIV0(2, 1, 4, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+	{  900000, E5250_CPU_DIV0(2, 1, 4, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+	{  800000, E5250_CPU_DIV0(2, 1, 4, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+	{  700000, E5250_CPU_DIV0(1, 1, 3, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+	{  600000, E5250_CPU_DIV0(1, 1, 3, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+	{  500000, E5250_CPU_DIV0(1, 1, 2, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+	{  400000, E5250_CPU_DIV0(1, 1, 2, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+	{  300000, E5250_CPU_DIV0(1, 1, 1, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+	{  200000, E5250_CPU_DIV0(1, 1, 1, 7, 7, 1), E5250_CPU_DIV1(2, 0), },
+	{  0 },
+};
+
 static const struct of_device_id ext_clk_match[] __initconst = {
 	{ .compatible = "samsung,clock-xxti", .data = (void *)0, },
 	{ },
@@ -797,6 +822,10 @@
 			ARRAY_SIZE(exynos5250_div_clks));
 	samsung_clk_register_gate(ctx, exynos5250_gate_clks,
 			ARRAY_SIZE(exynos5250_gate_clks));
+	exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+			mout_cpu_p[0], mout_cpu_p[1], 0x200,
+			exynos5250_armclk_d, ARRAY_SIZE(exynos5250_armclk_d),
+			CLK_CPU_HAS_DIV1);
 
 	/*
 	 * Enable arm clock down (in idle) and set arm divider
diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c
index 06f96eb..d1a29f6 100644
--- a/drivers/clk/samsung/clk-exynos5260.c
+++ b/drivers/clk/samsung/clk-exynos5260.c
@@ -9,8 +9,6 @@
  * Common Clock Framework support for Exynos5260 SoC.
  */
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 
diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c
index 231475b..d5d5dca 100644
--- a/drivers/clk/samsung/clk-exynos5410.c
+++ b/drivers/clk/samsung/clk-exynos5410.c
@@ -11,8 +11,6 @@
 
 #include <dt-bindings/clock/exynos5410.h>
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index a1d731c..389af3c 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -11,8 +11,7 @@
 */
 
 #include <dt-bindings/clock/exynos5420.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index 39c9564..cee062c 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -9,8 +9,6 @@
  * Common Clock Framework support for Exynos5443 SoC.
  */
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 
diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c
index 979e813..5908138 100644
--- a/drivers/clk/samsung/clk-exynos5440.c
+++ b/drivers/clk/samsung/clk-exynos5440.c
@@ -10,8 +10,6 @@
 */
 
 #include <dt-bindings/clock/exynos5440.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
index 03d36e8..8524e66 100644
--- a/drivers/clk/samsung/clk-exynos7.c
+++ b/drivers/clk/samsung/clk-exynos7.c
@@ -8,8 +8,6 @@
  *
 */
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index bebc61b..b7dd396 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -12,6 +12,8 @@
 #include <linux/errno.h>
 #include <linux/hrtimer.h>
 #include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/clkdev.h>
 #include "clk.h"
 #include "clk-pll.h"
 
@@ -180,7 +182,7 @@
 	rate = samsung_get_pll_settings(pll, drate);
 	if (!rate) {
 		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
-			drate, __clk_get_name(hw->clk));
+			drate, clk_hw_get_name(hw));
 		return -EINVAL;
 	}
 
@@ -288,7 +290,7 @@
 	rate = samsung_get_pll_settings(pll, drate);
 	if (!rate) {
 		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
-			drate, __clk_get_name(hw->clk));
+			drate, clk_hw_get_name(hw));
 		return -EINVAL;
 	}
 
@@ -403,7 +405,7 @@
 	rate = samsung_get_pll_settings(pll, drate);
 	if (!rate) {
 		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
-			drate, __clk_get_name(hw->clk));
+			drate, clk_hw_get_name(hw));
 		return -EINVAL;
 	}
 
@@ -455,7 +457,7 @@
 
 		if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
 			pr_err("%s: could not lock PLL %s\n",
-					__func__, __clk_get_name(hw->clk));
+					__func__, clk_hw_get_name(hw));
 			return -EFAULT;
 		}
 
@@ -554,7 +556,7 @@
 	rate = samsung_get_pll_settings(pll, drate);
 	if (!rate) {
 		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
-			drate, __clk_get_name(hw->clk));
+			drate, clk_hw_get_name(hw));
 		return -EINVAL;
 	}
 
@@ -614,7 +616,7 @@
 
 		if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
 			pr_err("%s: could not lock PLL %s\n",
-					__func__, __clk_get_name(hw->clk));
+					__func__, clk_hw_get_name(hw));
 			return -EFAULT;
 		}
 
@@ -772,7 +774,7 @@
 	rate = samsung_get_pll_settings(pll, drate);
 	if (!rate) {
 		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
-			drate, __clk_get_name(hw->clk));
+			drate, clk_hw_get_name(hw));
 		return -EINVAL;
 	}
 
@@ -1013,7 +1015,7 @@
 	rate = samsung_get_pll_settings(pll, drate);
 	if (!rate) {
 		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
-			drate, __clk_get_name(hw->clk));
+			drate, clk_hw_get_name(hw));
 		return -EINVAL;
 	}
 
@@ -1111,7 +1113,7 @@
 	rate = samsung_get_pll_settings(pll, drate);
 	if (!rate) {
 		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
-			drate, __clk_get_name(hw->clk));
+			drate, clk_hw_get_name(hw));
 		return -EINVAL;
 	}
 
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
index e56df50..e9eb935 100644
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -8,6 +8,10 @@
  * Common Clock Framework support for s3c24xx external clock output.
  */
 
+#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/platform_device.h>
 #include <linux/module.h>
 #include "clk.h"
@@ -57,7 +61,7 @@
 static u8 s3c24xx_clkout_get_parent(struct clk_hw *hw)
 {
 	struct s3c24xx_clkout *clkout = to_s3c24xx_clkout(hw);
-	int num_parents = __clk_get_num_parents(hw->clk);
+	int num_parents = clk_hw_get_num_parents(hw);
 	u32 val;
 
 	val = readl_relaxed(S3C24XX_MISCCR) >> clkout->shift;
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index 5d2f034..0945a88 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -8,8 +8,6 @@
  * Common Clock Framework support for S3C2410 and following SoCs.
  */
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
index 2ceedaf..44d6a9f 100644
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ b/drivers/clk/samsung/clk-s3c2412.c
@@ -8,8 +8,6 @@
  * Common Clock Framework support for S3C2412 and S3C2413.
  */
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
index 0c3c182..2c0a1ea 100644
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ b/drivers/clk/samsung/clk-s3c2443.c
@@ -8,8 +8,6 @@
  * Common Clock Framework support for S3C2443 and following SoCs.
  */
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index 0f590e5..d325ed1 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -8,8 +8,7 @@
  * Common Clock Framework support for all S3C64xx SoCs.
 */
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
index de4455b..eefb84b 100644
--- a/drivers/clk/samsung/clk-s5pv210-audss.c
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -13,8 +13,8 @@
  * Driver for Audio Subsystem Clock Controller of S5PV210-compatible SoCs.
 */
 
-#include <linux/clkdev.h>
 #include <linux/io.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/of_address.h>
 #include <linux/syscore_ops.h>
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
index cf7e8fa..759aaf3 100644
--- a/drivers/clk/samsung/clk-s5pv210.c
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -11,8 +11,6 @@
  * Common Clock Framework support for all S5PC110/S5PV210 SoCs.
  */
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -828,6 +826,8 @@
 
 	s5pv210_clk_sleep_init();
 
+	samsung_clk_of_add_provider(np, ctx);
+
 	pr_info("%s clocks: mout_apll = %ld, mout_mpll = %ld\n"
 		"\tmout_epll = %ld, mout_vpll = %ld\n",
 		is_s5p6442 ? "S5P6442" : "S5PV210",
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 0117238..f38a6c4 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -11,6 +11,10 @@
  * clock framework for Samsung platforms.
 */
 
+#include <linux/slab.h>
+#include <linux/clkdev.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/of_address.h>
 #include <linux/syscore_ops.h>
 
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index b775fc2..aa872d2 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -13,10 +13,11 @@
 #ifndef __SAMSUNG_CLK_H
 #define __SAMSUNG_CLK_H
 
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include "clk-pll.h"
 
+struct clk;
+
 /**
  * struct samsung_clk_provider: information about clock provider
  * @reg_base: virtual address for the register base.
diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c
index 036a692..b4c8d67 100644
--- a/drivers/clk/shmobile/clk-div6.c
+++ b/drivers/clk/shmobile/clk-div6.c
@@ -11,12 +11,12 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/slab.h>
 
 #define CPG_DIV6_CKSTP		BIT(8)
 #define CPG_DIV6_DIV(d)		((d) & 0x3f)
@@ -133,13 +133,13 @@
 
 	hw_index = (clk_readl(clock->reg) >> clock->src_shift) &
 		   (BIT(clock->src_width) - 1);
-	for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
+	for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
 		if (clock->parents[i] == hw_index)
 			return i;
 	}
 
 	pr_err("%s: %s DIV6 clock set to invalid parent %u\n",
-	       __func__, __clk_get_name(hw->clk), hw_index);
+	       __func__, clk_hw_get_name(hw), hw_index);
 	return 0;
 }
 
@@ -149,7 +149,7 @@
 	u8 hw_index;
 	u32 mask;
 
-	if (index >= __clk_get_num_parents(hw->clk))
+	if (index >= clk_hw_get_num_parents(hw))
 		return -EINVAL;
 
 	mask = ~((BIT(clock->src_width) - 1) << clock->src_shift);
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c
index 2d2fe77..b1df7b2 100644
--- a/drivers/clk/shmobile/clk-mstp.c
+++ b/drivers/clk/shmobile/clk-mstp.c
@@ -2,6 +2,7 @@
  * R-Car MSTP clocks
  *
  * Copyright (C) 2013 Ideas On Board SPRL
+ * Copyright (C) 2015 Glider bvba
  *
  * Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  *
@@ -10,11 +11,16 @@
  * the Free Software Foundation; version 2 of the License.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
+#include <linux/clk/shmobile.h>
+#include <linux/device.h>
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_domain.h>
 #include <linux/spinlock.h>
 
 /*
@@ -236,3 +242,84 @@
 	of_clk_add_provider(np, of_clk_src_onecell_get, &group->data);
 }
 CLK_OF_DECLARE(cpg_mstp_clks, "renesas,cpg-mstp-clocks", cpg_mstp_clocks_init);
+
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev)
+{
+	struct device_node *np = dev->of_node;
+	struct of_phandle_args clkspec;
+	struct clk *clk;
+	int i = 0;
+	int error;
+
+	while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
+					   &clkspec)) {
+		if (of_device_is_compatible(clkspec.np,
+					    "renesas,cpg-mstp-clocks"))
+			goto found;
+
+		of_node_put(clkspec.np);
+		i++;
+	}
+
+	return 0;
+
+found:
+	clk = of_clk_get_from_provider(&clkspec);
+	of_node_put(clkspec.np);
+
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	error = pm_clk_create(dev);
+	if (error) {
+		dev_err(dev, "pm_clk_create failed %d\n", error);
+		goto fail_put;
+	}
+
+	error = pm_clk_add_clk(dev, clk);
+	if (error) {
+		dev_err(dev, "pm_clk_add_clk %pC failed %d\n", clk, error);
+		goto fail_destroy;
+	}
+
+	return 0;
+
+fail_destroy:
+	pm_clk_destroy(dev);
+fail_put:
+	clk_put(clk);
+	return error;
+}
+
+void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev)
+{
+	if (!list_empty(&dev->power.subsys_data->clock_list))
+		pm_clk_destroy(dev);
+}
+
+void __init cpg_mstp_add_clk_domain(struct device_node *np)
+{
+	struct generic_pm_domain *pd;
+	u32 ncells;
+
+	if (of_property_read_u32(np, "#power-domain-cells", &ncells)) {
+		pr_warn("%s lacks #power-domain-cells\n", np->full_name);
+		return;
+	}
+
+	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return;
+
+	pd->name = np->name;
+
+	pd->flags = GENPD_FLAG_PM_CLK;
+	pm_genpd_init(pd, &simple_qos_governor, false);
+	pd->attach_dev = cpg_mstp_attach_dev;
+	pd->detach_dev = cpg_mstp_detach_dev;
+
+	of_genpd_add_provider_simple(np, pd);
+}
+#endif /* !CONFIG_PM_GENERIC_DOMAINS_OF */
diff --git a/drivers/clk/shmobile/clk-r8a73a4.c b/drivers/clk/shmobile/clk-r8a73a4.c
index 29b9a0b0..9326204 100644
--- a/drivers/clk/shmobile/clk-r8a73a4.c
+++ b/drivers/clk/shmobile/clk-r8a73a4.c
@@ -9,10 +9,10 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/clk/shmobile.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/spinlock.h>
diff --git a/drivers/clk/shmobile/clk-r8a7740.c b/drivers/clk/shmobile/clk-r8a7740.c
index 1e2eaae..1e6b1da 100644
--- a/drivers/clk/shmobile/clk-r8a7740.c
+++ b/drivers/clk/shmobile/clk-r8a7740.c
@@ -9,10 +9,10 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/clk/shmobile.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/spinlock.h>
diff --git a/drivers/clk/shmobile/clk-r8a7778.c b/drivers/clk/shmobile/clk-r8a7778.c
index cb33b57..87c1d2f 100644
--- a/drivers/clk/shmobile/clk-r8a7778.c
+++ b/drivers/clk/shmobile/clk-r8a7778.c
@@ -9,9 +9,9 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/clk/shmobile.h>
 #include <linux/of_address.h>
+#include <linux/slab.h>
 
 struct r8a7778_cpg {
 	struct clk_onecell_data data;
@@ -124,6 +124,8 @@
 	}
 
 	of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+
+	cpg_mstp_add_clk_domain(np);
 }
 
 CLK_OF_DECLARE(r8a7778_cpg_clks, "renesas,r8a7778-cpg-clocks",
diff --git a/drivers/clk/shmobile/clk-r8a7779.c b/drivers/clk/shmobile/clk-r8a7779.c
index 652ecac..92275c5f 100644
--- a/drivers/clk/shmobile/clk-r8a7779.c
+++ b/drivers/clk/shmobile/clk-r8a7779.c
@@ -11,12 +11,12 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/clk/shmobile.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/slab.h>
 #include <linux/spinlock.h>
 
 #include <dt-bindings/clock/r8a7779-clock.h>
@@ -168,6 +168,8 @@
 	}
 
 	of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+
+	cpg_mstp_add_clk_domain(np);
 }
 CLK_OF_DECLARE(r8a7779_cpg_clks, "renesas,r8a7779-cpg-clocks",
 	       r8a7779_cpg_clocks_init);
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c
index acfb6d7..745496f 100644
--- a/drivers/clk/shmobile/clk-rcar-gen2.c
+++ b/drivers/clk/shmobile/clk-rcar-gen2.c
@@ -11,13 +11,13 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/clk/shmobile.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/math64.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/slab.h>
 #include <linux/spinlock.h>
 
 struct rcar_gen2_cpg {
@@ -415,6 +415,8 @@
 	}
 
 	of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+
+	cpg_mstp_add_clk_domain(np);
 }
 CLK_OF_DECLARE(rcar_gen2_cpg_clks, "renesas,rcar-gen2-cpg-clocks",
 	       rcar_gen2_cpg_clocks_init);
diff --git a/drivers/clk/shmobile/clk-rz.c b/drivers/clk/shmobile/clk-rz.c
index 7e68e86..9766e3c 100644
--- a/drivers/clk/shmobile/clk-rz.c
+++ b/drivers/clk/shmobile/clk-rz.c
@@ -10,6 +10,7 @@
  */
 
 #include <linux/clk-provider.h>
+#include <linux/clk/shmobile.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
@@ -99,5 +100,7 @@
 	}
 
 	of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+
+	cpg_mstp_add_clk_domain(np);
 }
 CLK_OF_DECLARE(rz_cpg_clks, "renesas,rz-cpg-clocks", rz_cpg_clocks_init);
diff --git a/drivers/clk/shmobile/clk-sh73a0.c b/drivers/clk/shmobile/clk-sh73a0.c
index cd529cf..8966f8b 100644
--- a/drivers/clk/shmobile/clk-sh73a0.c
+++ b/drivers/clk/shmobile/clk-sh73a0.c
@@ -9,12 +9,12 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/clk/shmobile.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/slab.h>
 #include <linux/spinlock.h>
 
 struct sh73a0_cpg {
diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c
index d63b76c..c5eaa9d 100644
--- a/drivers/clk/sirf/clk-atlas6.c
+++ b/drivers/clk/sirf/clk-atlas6.c
@@ -10,7 +10,6 @@
 #include <linux/module.h>
 #include <linux/bitops.h>
 #include <linux/io.h>
-#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c
index db8ab69..a98e21f 100644
--- a/drivers/clk/sirf/clk-atlas7.c
+++ b/drivers/clk/sirf/clk-atlas7.c
@@ -358,6 +358,7 @@
 	if (regctrl0 & SIRFSOC_ABPLL_CTRL0_SSEN) {
 		rate = fin;
 		rate *= 1 << 24;
+		do_div(rate, nr);
 		do_div(rate, (256 * ((ssdiv >> ssdepth) << ssdepth)
 			+ (ssmod << ssdepth)));
 	} else {
@@ -465,6 +466,9 @@
  *  double resolution mode:fout = fin * finc / 2^29
  *  normal mode:fout = fin * finc / 2^28
  */
+#define DTO_RESL_DOUBLE	(1ULL << 29)
+#define DTO_RESL_NORMAL	(1ULL << 28)
+
 static int dto_clk_is_enabled(struct clk_hw *hw)
 {
 	struct clk_dto *clk = to_dtoclk(hw);
@@ -509,9 +513,9 @@
 	rate *= finc;
 	if (droff & BIT(0))
 		/* Double resolution off */
-		do_div(rate, 1 << 28);
+		do_div(rate, DTO_RESL_NORMAL);
 	else
-		do_div(rate, 1 << 29);
+		do_div(rate, DTO_RESL_DOUBLE);
 
 	return rate;
 }
@@ -519,11 +523,11 @@
 static long dto_clk_round_rate(struct clk_hw *hw, unsigned long rate,
 	unsigned long *parent_rate)
 {
-	u64 dividend = rate * (1 << 29);
+	u64 dividend = rate * DTO_RESL_DOUBLE;
 
 	do_div(dividend, *parent_rate);
 	dividend *= *parent_rate;
-	do_div(dividend, 1 << 29);
+	do_div(dividend, DTO_RESL_DOUBLE);
 
 	return dividend;
 }
@@ -531,7 +535,7 @@
 static int dto_clk_set_rate(struct clk_hw *hw, unsigned long rate,
 	unsigned long parent_rate)
 {
-	u64 dividend = rate * (1 << 29);
+	u64 dividend = rate * DTO_RESL_DOUBLE;
 	struct clk_dto *clk = to_dtoclk(hw);
 
 	do_div(dividend, parent_rate);
@@ -1161,7 +1165,7 @@
 	{ 122, "spram1_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 0, &leaf6_gate_lock },
 	{ 123, "spram2_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 1, &leaf6_gate_lock },
 	{ 124, "coresight_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 2, &leaf6_gate_lock },
-	{ 125, "thcpum_cpudiv4", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 3, &leaf6_gate_lock },
+	{ 125, "coresight_tpiu", "cpum_tpiu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 3, &leaf6_gate_lock },
 	{ 126, "graphic_gpu", "gpum_gpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 0, &leaf7_gate_lock },
 	{ 127, "vss_sdr", "gpum_sdr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 1, &leaf7_gate_lock },
 	{ 128, "thgpum_nocr", "gpum_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 2, &leaf7_gate_lock },
@@ -1174,9 +1178,13 @@
 	{ 135, "thbtm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 7, &leaf8_gate_lock },
 	{ 136, "btslow", "xinw_fixdiv_btslow", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 25, &root1_gate_lock },
 	{ 137, "a7ca_btslow", "btslow", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 0, &leaf8_gate_lock },
+	{ 138, "pwm_io", "io_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 0, &leaf0_gate_lock },
+	{ 139, "pwm_xin", "xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 1, &leaf0_gate_lock },
+	{ 140, "pwm_xinw", "xinw", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 2, &leaf0_gate_lock },
+	{ 141, "thcgum_sys", "sys_mux", 0, SIRFSOC_CLKC_LEAF_CLK_EN0_SET, 3, &leaf0_gate_lock },
 };
 
-static struct clk *atlas7_clks[ARRAY_SIZE(unit_list)];
+static struct clk *atlas7_clks[ARRAY_SIZE(unit_list) + ARRAY_SIZE(mux_list)];
 
 static int unit_clk_is_enabled(struct clk_hw *hw)
 {
@@ -1609,6 +1617,7 @@
 			       sirfsoc_clk_vbase + mux->mux_offset,
 			       mux->shift, mux->width,
 			       mux->mux_flags, NULL);
+		atlas7_clks[ARRAY_SIZE(unit_list) + i] = clk;
 		BUG_ON(!clk);
 	}
 
@@ -1620,7 +1629,7 @@
 	}
 
 	clk_data.clks = atlas7_clks;
-	clk_data.clk_num = ARRAY_SIZE(unit_list);
+	clk_data.clk_num = ARRAY_SIZE(unit_list) + ARRAY_SIZE(mux_list);
 
 	ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
 	BUG_ON(ret);
diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c
index 9fc285d..77e1e24 100644
--- a/drivers/clk/sirf/clk-common.c
+++ b/drivers/clk/sirf/clk-common.c
@@ -7,6 +7,8 @@
  * Licensed under GPLv2 or later.
  */
 
+#include <linux/clk.h>
+
 #define KHZ     1000
 #define MHZ     (KHZ * KHZ)
 
@@ -165,10 +167,10 @@
 	 * SiRF SoC has not cpu clock control,
 	 * So bypass to it's parent pll.
 	 */
-	struct clk *parent_clk = clk_get_parent(hw->clk);
-	struct clk *pll_parent_clk = clk_get_parent(parent_clk);
-	unsigned long pll_parent_rate = clk_get_rate(pll_parent_clk);
-	return pll_clk_round_rate(__clk_get_hw(parent_clk), rate, &pll_parent_rate);
+	struct clk_hw *parent_clk = clk_hw_get_parent(hw);
+	struct clk_hw *pll_parent_clk = clk_hw_get_parent(parent_clk);
+	unsigned long pll_parent_rate = clk_hw_get_rate(pll_parent_clk);
+	return pll_clk_round_rate(parent_clk, rate, &pll_parent_rate);
 }
 
 static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
@@ -178,8 +180,8 @@
 	 * SiRF SoC has not cpu clock control,
 	 * So return the parent pll rate.
 	 */
-	struct clk *parent_clk = clk_get_parent(hw->clk);
-	return __clk_get_rate(parent_clk);
+	struct clk_hw *parent_clk = clk_hw_get_parent(hw);
+	return clk_hw_get_rate(parent_clk);
 }
 
 static struct clk_ops std_pll_ops = {
diff --git a/drivers/clk/sirf/clk-prima2.c b/drivers/clk/sirf/clk-prima2.c
index 6968e2e..f92c402 100644
--- a/drivers/clk/sirf/clk-prima2.c
+++ b/drivers/clk/sirf/clk-prima2.c
@@ -10,7 +10,6 @@
 #include <linux/module.h>
 #include <linux/bitops.h>
 #include <linux/io.h>
-#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c
index 83c6780..1cebf25 100644
--- a/drivers/clk/socfpga/clk-gate-a10.c
+++ b/drivers/clk/socfpga/clk-gate-a10.c
@@ -13,6 +13,7 @@
  * You should have received a copy of the GNU General Public License along with
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
+#include <linux/slab.h>
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/mfd/syscon.h>
@@ -38,7 +39,7 @@
 		div = socfpgaclk->fixed_div;
 	else if (socfpgaclk->div_reg) {
 		val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
-		val &= div_mask(socfpgaclk->width);
+		val &= GENMASK(socfpgaclk->width - 1, 0);
 		div = (1 << val);
 	}
 
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index 82449cd..aa7a6e6 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -15,8 +15,7 @@
  * Based from clk-highbank.c
  *
  */
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/mfd/syscon.h>
@@ -106,7 +105,7 @@
 		div = socfpgaclk->fixed_div;
 	else if (socfpgaclk->div_reg) {
 		val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
-		val &= div_mask(socfpgaclk->width);
+		val &= GENMASK(socfpgaclk->width - 1, 0);
 		/* Check for GPIO_DB_CLK by its offset */
 		if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
 			div = val + 1;
diff --git a/drivers/clk/socfpga/clk-periph-a10.c b/drivers/clk/socfpga/clk-periph-a10.c
index 9d0181b..1f397cb 100644
--- a/drivers/clk/socfpga/clk-periph-a10.c
+++ b/drivers/clk/socfpga/clk-periph-a10.c
@@ -13,6 +13,7 @@
  * You should have received a copy of the GNU General Public License along with
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
+#include <linux/slab.h>
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -37,7 +38,7 @@
 		div = socfpgaclk->fixed_div;
 	} else if (socfpgaclk->div_reg) {
 		div = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
-		div &= div_mask(socfpgaclk->width);
+		div &= GENMASK(socfpgaclk->width - 1, 0);
 		div += 1;
 	} else {
 		div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
diff --git a/drivers/clk/socfpga/clk-periph.c b/drivers/clk/socfpga/clk-periph.c
index 83aeaa2..52c883e 100644
--- a/drivers/clk/socfpga/clk-periph.c
+++ b/drivers/clk/socfpga/clk-periph.c
@@ -15,8 +15,7 @@
  * Based from clk-highbank.c
  *
  */
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -36,7 +35,7 @@
 	} else {
 		if (socfpgaclk->div_reg) {
 			val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
-			val &= div_mask(socfpgaclk->width);
+			val &= GENMASK(socfpgaclk->width - 1, 0);
 			parent_rate /= (val + 1);
 		}
 		div = ((readl(socfpgaclk->hw.reg) & 0x1ff) + 1);
@@ -45,8 +44,17 @@
 	return parent_rate / div;
 }
 
+static u8 clk_periclk_get_parent(struct clk_hw *hwclk)
+{
+	u32 clk_src;
+
+	clk_src = readl(clk_mgr_base_addr + CLKMGR_DBCTRL);
+	return clk_src & 0x1;
+}
+
 static const struct clk_ops periclk_ops = {
 	.recalc_rate = clk_periclk_recalc_rate,
+	.get_parent = clk_periclk_get_parent,
 };
 
 static __init void __socfpga_periph_init(struct device_node *node,
@@ -56,7 +64,7 @@
 	struct clk *clk;
 	struct socfpga_periph_clk *periph_clk;
 	const char *clk_name = node->name;
-	const char *parent_name;
+	const char *parent_name[SOCFPGA_MAX_PARENTS];
 	struct clk_init_data init;
 	int rc;
 	u32 fixed_div;
@@ -90,9 +98,10 @@
 	init.name = clk_name;
 	init.ops = ops;
 	init.flags = 0;
-	parent_name = of_clk_get_parent_name(node, 0);
-	init.parent_names = &parent_name;
-	init.num_parents = 1;
+
+	init.num_parents = of_clk_parent_fill(node, parent_name,
+					      SOCFPGA_MAX_PARENTS);
+	init.parent_names = parent_name;
 
 	periph_clk->hw.hw.init = &init;
 
diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
index 1178b11..402d630 100644
--- a/drivers/clk/socfpga/clk-pll-a10.c
+++ b/drivers/clk/socfpga/clk-pll-a10.c
@@ -13,6 +13,7 @@
  * You should have received a copy of the GNU General Public License along with
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
+#include <linux/slab.h>
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/of.h>
diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
index 8f26b52..c7f4631 100644
--- a/drivers/clk/socfpga/clk-pll.c
+++ b/drivers/clk/socfpga/clk-pll.c
@@ -15,8 +15,7 @@
  * Based from clk-highbank.c
  *
  */
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/of.h>
diff --git a/drivers/clk/socfpga/clk.h b/drivers/clk/socfpga/clk.h
index 603973a..814c724 100644
--- a/drivers/clk/socfpga/clk.h
+++ b/drivers/clk/socfpga/clk.h
@@ -18,16 +18,15 @@
 #define __SOCFPGA_CLK_H
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 
 /* Clock Manager offsets */
 #define CLKMGR_CTRL		0x0
 #define CLKMGR_BYPASS		0x4
+#define CLKMGR_DBCTRL		0x10
 #define CLKMGR_L4SRC		0x70
 #define CLKMGR_PERPLL_SRC	0xAC
 
 #define SOCFPGA_MAX_PARENTS		5
-#define div_mask(width) ((1 << (width)) - 1)
 
 #define streq(a, b) (strcmp((a), (b)) == 0)
 #define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index 5ebddc5..dc21ca4 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -87,7 +87,7 @@
 	struct clk_pll *pll = to_clk_pll(hw);
 	unsigned long prev_rate, vco_prev_rate, rate = 0;
 	unsigned long vco_parent_rate =
-		__clk_get_rate(__clk_get_parent(__clk_get_parent(hw->clk)));
+		clk_hw_get_rate(clk_hw_get_parent(clk_hw_get_parent(hw)));
 
 	if (!prate) {
 		pr_err("%s: prate is must for pll clk\n", __func__);
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 222ce108..009bd14 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -11,7 +11,6 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/err.h>
 #include <linux/io.h>
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index 973c9d3..9c7abfd 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -11,7 +11,6 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/err.h>
 #include <linux/io.h>
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index 231061f..e24f85c 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -9,7 +9,6 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/io.h>
 #include <linux/spinlock_types.h>
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 8dd8cce..bd355ee 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -5,6 +5,7 @@
  * Author:  Maxime Coquelin <maxime.coquelin@st.com> for ST-Microelectronics.
  * License terms:  GNU General Public License (GPL), version 2  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -44,7 +45,7 @@
 
 	clk_gate_ops.enable(fgate_hw);
 
-	pr_debug("%s: flexgen output enabled\n", __clk_get_name(hw->clk));
+	pr_debug("%s: flexgen output enabled\n", clk_hw_get_name(hw));
 	return 0;
 }
 
@@ -58,7 +59,7 @@
 
 	clk_gate_ops.disable(fgate_hw);
 
-	pr_debug("%s: flexgen output disabled\n", __clk_get_name(hw->clk));
+	pr_debug("%s: flexgen output disabled\n", clk_hw_get_name(hw));
 }
 
 static int flexgen_is_enabled(struct clk_hw *hw)
@@ -108,7 +109,7 @@
 	/* Round div according to exact prate and wished rate */
 	div = clk_best_div(*prate, rate);
 
-	if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+	if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
 		*prate = rate * div;
 		return rate;
 	}
@@ -243,7 +244,7 @@
 						       int *num_parents)
 {
 	const char **parents;
-	int nparents, i;
+	int nparents;
 
 	nparents = of_clk_get_parent_count(np);
 	if (WARN_ON(nparents <= 0))
@@ -253,10 +254,8 @@
 	if (!parents)
 		return NULL;
 
-	for (i = 0; i < nparents; i++)
-		parents[i] = of_clk_get_parent_name(np, i);
+	*num_parents = of_clk_parent_fill(np, parents, nparents);
 
-	*num_parents = nparents;
 	return parents;
 }
 
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index d9eb2e1..83ccf14 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -15,6 +15,7 @@
 
 #include <linux/slab.h>
 #include <linux/of_address.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 
 #include "clkgen.h"
@@ -512,7 +513,7 @@
 	params.ndiv = CLKGEN_READ(pll, ndiv);
 	if (clk_fs660c32_vco_get_rate(parent_rate, &params, &rate))
 		pr_err("%s:%s error calculating rate\n",
-		       __clk_get_name(hw->clk), __func__);
+		       clk_hw_get_name(hw), __func__);
 
 	pll->ndiv = params.ndiv;
 
@@ -557,7 +558,7 @@
 		clk_fs660c32_vco_get_rate(*prate, &params, &rate);
 
 	pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
-		 __func__, __clk_get_name(hw->clk),
+		 __func__, clk_hw_get_name(hw),
 		 rate, (unsigned int)params.sdiv,
 		 (unsigned int)params.mdiv,
 		 (unsigned int)params.pe, (unsigned int)params.nsdiv);
@@ -580,7 +581,7 @@
 		clk_fs660c32_vco_get_rate(parent_rate, &params, &hwrate);
 
 	pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n",
-		 __func__, __clk_get_name(hw->clk),
+		 __func__, clk_hw_get_name(hw),
 		 hwrate, (unsigned int)params.ndiv);
 
 	if (!hwrate)
@@ -744,7 +745,7 @@
 	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
 	unsigned long flags = 0;
 
-	pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk));
+	pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
 
 	quadfs_fsynth_program_rate(fs);
 
@@ -769,7 +770,7 @@
 	struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
 	unsigned long flags = 0;
 
-	pr_debug("%s: %s\n", __func__, __clk_get_name(hw->clk));
+	pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw));
 
 	if (fs->lock)
 		spin_lock_irqsave(fs->lock, flags);
@@ -786,7 +787,7 @@
 	u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]);
 
 	pr_debug("%s: %s enable bit = 0x%x\n",
-		 __func__, __clk_get_name(hw->clk), nsb);
+		 __func__, clk_hw_get_name(hw), nsb);
 
 	return fs->data->standby_polarity ? !nsb : !!nsb;
 }
@@ -945,10 +946,10 @@
 
 	if (clk_fs_get_rate(parent_rate, &params, &rate)) {
 		pr_err("%s:%s error calculating rate\n",
-		       __clk_get_name(hw->clk), __func__);
+		       clk_hw_get_name(hw), __func__);
 	}
 
-	pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+	pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
 
 	return rate;
 }
@@ -961,7 +962,7 @@
 	rate = quadfs_find_best_rate(hw, rate, *prate, &params);
 
 	pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
-		 __func__, __clk_get_name(hw->clk),
+		 __func__, clk_hw_get_name(hw),
 		 rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv,
 			 (unsigned int)params.pe, (unsigned int)params.nsdiv);
 
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index 717c4a9..4f7f6c0 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -15,6 +15,7 @@
 
 #include <linux/slab.h>
 #include <linux/of_address.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 
 static DEFINE_SPINLOCK(clkgena_divmux_lock);
@@ -24,20 +25,17 @@
 						       int *num_parents)
 {
 	const char **parents;
-	int nparents, i;
+	int nparents;
 
 	nparents = of_clk_get_parent_count(np);
 	if (WARN_ON(nparents <= 0))
 		return ERR_PTR(-EINVAL);
 
-	parents = kzalloc(nparents * sizeof(const char *), GFP_KERNEL);
+	parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
 	if (!parents)
 		return ERR_PTR(-ENOMEM);
 
-	for (i = 0; i < nparents; i++)
-		parents[i] = of_clk_get_parent_name(np, i);
-
-	*num_parents = nparents;
+	*num_parents = of_clk_parent_fill(np, parents, nparents);
 	return parents;
 }
 
@@ -141,7 +139,7 @@
 	genamux->muxsel = clk_mux_ops.get_parent(mux_hw);
 	if ((s8)genamux->muxsel < 0) {
 		pr_debug("%s: %s: Invalid parent, setting to default.\n",
-		      __func__, __clk_get_name(hw->clk));
+		      __func__, clk_hw_get_name(hw));
 		genamux->muxsel = 0;
 	}
 
@@ -215,7 +213,7 @@
 /**
  * clk_register_genamux - register a genamux clock with the clock framework
  */
-static struct clk *clk_register_genamux(const char *name,
+static struct clk * __init clk_register_genamux(const char *name,
 				const char **parent_names, u8 num_parents,
 				void __iomem *reg,
 				const struct clkgena_divmux_data *muxdata,
@@ -369,11 +367,10 @@
 	{}
 };
 
-static void __iomem * __init clkgen_get_register_base(
-				struct device_node *np)
+static void __iomem * __init clkgen_get_register_base(struct device_node *np)
 {
 	struct device_node *pnode;
-	void __iomem *reg = NULL;
+	void __iomem *reg;
 
 	pnode = of_get_parent(np);
 	if (!pnode)
@@ -398,7 +395,7 @@
 	if (WARN_ON(!match))
 		return;
 
-	data = (struct clkgena_divmux_data *)match->data;
+	data = match->data;
 
 	reg = clkgen_get_register_base(np);
 	if (!reg)
@@ -406,18 +403,18 @@
 
 	parents = clkgen_mux_get_parents(np, &num_parents);
 	if (IS_ERR(parents))
-		return;
+		goto err_parents;
 
 	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
 	if (!clk_data)
-		goto err;
+		goto err_alloc;
 
 	clk_data->clk_num = data->num_outputs;
-	clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
+	clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
 				 GFP_KERNEL);
 
 	if (!clk_data->clks)
-		goto err;
+		goto err_alloc_clks;
 
 	for (i = 0; i < clk_data->clk_num; i++) {
 		struct clk *clk;
@@ -447,11 +444,13 @@
 	of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
 	return;
 err:
-	if (clk_data)
-		kfree(clk_data->clks);
-
+	kfree(clk_data->clks);
+err_alloc_clks:
 	kfree(clk_data);
+err_alloc:
 	kfree(parents);
+err_parents:
+	iounmap(reg);
 }
 CLK_OF_DECLARE(clkgenadivmux, "st,clkgena-divmux", st_of_clkgena_divmux_setup);
 
@@ -491,7 +490,7 @@
 	void __iomem *reg;
 	const char *parent_name, *clk_name;
 	struct clk *clk;
-	struct clkgena_prediv_data *data;
+	const struct clkgena_prediv_data *data;
 
 	match = of_match_node(clkgena_prediv_of_match, np);
 	if (!match) {
@@ -499,7 +498,7 @@
 		return;
 	}
 
-	data = (struct clkgena_prediv_data *)match->data;
+	data = match->data;
 
 	reg = clkgen_get_register_base(np);
 	if (!reg)
@@ -507,18 +506,18 @@
 
 	parent_name = of_clk_get_parent_name(np, 0);
 	if (!parent_name)
-		return;
+		goto err;
 
 	if (of_property_read_string_index(np, "clock-output-names",
 					  0, &clk_name))
-		return;
+		goto err;
 
 	clk = clk_register_divider_table(NULL, clk_name, parent_name,
 					 CLK_GET_RATE_NOCACHE,
 					 reg + data->offset, data->shift, 1,
 					 0, data->table, NULL);
 	if (IS_ERR(clk))
-		return;
+		goto err;
 
 	of_clk_add_provider(np, of_clk_src_simple_get, clk);
 	pr_debug("%s: parent %s rate %u\n",
@@ -527,6 +526,8 @@
 		(unsigned int)clk_get_rate(clk));
 
 	return;
+err:
+	iounmap(reg);
 }
 CLK_OF_DECLARE(clkgenaprediv, "st,clkgena-prediv", st_of_clkgena_prediv_setup);
 
@@ -630,7 +631,7 @@
 	void __iomem *reg;
 	const char **parents;
 	int num_parents;
-	struct clkgen_mux_data *data;
+	const struct clkgen_mux_data *data;
 
 	match = of_match_node(mux_of_match, np);
 	if (!match) {
@@ -638,7 +639,7 @@
 		return;
 	}
 
-	data = (struct clkgen_mux_data *)match->data;
+	data = match->data;
 
 	reg = of_iomap(np, 0);
 	if (!reg) {
@@ -650,7 +651,7 @@
 	if (IS_ERR(parents)) {
 		pr_err("%s: Failed to get parents (%ld)\n",
 				__func__, PTR_ERR(parents));
-		return;
+		goto err_parents;
 	}
 
 	clk = clk_register_mux(NULL, np->name, parents, num_parents,
@@ -666,12 +667,14 @@
 			__clk_get_name(clk_get_parent(clk)),
 			(unsigned int)clk_get_rate(clk));
 
+	kfree(parents);
 	of_clk_add_provider(np, of_clk_src_simple_get, clk);
+	return;
 
 err:
 	kfree(parents);
-
-	return;
+err_parents:
+	iounmap(reg);
 }
 CLK_OF_DECLARE(clkgen_mux, "st,clkgen-mux", st_of_clkgen_mux_setup);
 
@@ -707,12 +710,12 @@
 	const char **parents;
 	int num_parents, i;
 	struct clk_onecell_data *clk_data;
-	struct clkgen_vcc_data *data;
+	const struct clkgen_vcc_data *data;
 
 	match = of_match_node(vcc_of_match, np);
 	if (WARN_ON(!match))
 		return;
-	data = (struct clkgen_vcc_data *)match->data;
+	data = match->data;
 
 	reg = of_iomap(np, 0);
 	if (!reg)
@@ -720,18 +723,18 @@
 
 	parents = clkgen_mux_get_parents(np, &num_parents);
 	if (IS_ERR(parents))
-		return;
+		goto err_parents;
 
 	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
 	if (!clk_data)
-		goto err;
+		goto err_alloc;
 
 	clk_data->clk_num = VCC_MAX_CHANNELS;
-	clk_data->clks = kzalloc(clk_data->clk_num * sizeof(struct clk *),
+	clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
 				 GFP_KERNEL);
 
 	if (!clk_data->clks)
-		goto err;
+		goto err_alloc_clks;
 
 	for (i = 0; i < clk_data->clk_num; i++) {
 		struct clk *clk;
@@ -750,21 +753,21 @@
 		if (*clk_name == '\0')
 			continue;
 
-		gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
+		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
 		if (!gate)
-			break;
+			goto err;
 
-		div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
+		div = kzalloc(sizeof(*div), GFP_KERNEL);
 		if (!div) {
 			kfree(gate);
-			break;
+			goto err;
 		}
 
-		mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
+		mux = kzalloc(sizeof(*mux), GFP_KERNEL);
 		if (!mux) {
 			kfree(gate);
 			kfree(div);
-			break;
+			goto err;
 		}
 
 		gate->reg = reg + VCC_GATE_OFFSET;
@@ -823,10 +826,12 @@
 		kfree(container_of(composite->mux_hw, struct clk_mux, hw));
 	}
 
-	if (clk_data)
-		kfree(clk_data->clks);
-
+	kfree(clk_data->clks);
+err_alloc_clks:
 	kfree(clk_data);
+err_alloc:
 	kfree(parents);
+err_parents:
+	iounmap(reg);
 }
 CLK_OF_DECLARE(clkgen_vcc, "st,clkgen-vcc", st_of_clkgen_vcc_setup);
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index 72d1c27..47a38a9 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -16,6 +16,7 @@
 
 #include <linux/slab.h>
 #include <linux/of_address.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 
 #include "clkgen.h"
@@ -291,7 +292,7 @@
 	res = (uint64_t)2 * (uint64_t)parent_rate * (uint64_t)ndiv;
 	rate = (unsigned long)div64_u64(res, mdiv * (1 << pdiv));
 
-	pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+	pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
 
 	return rate;
 
@@ -316,7 +317,7 @@
 	/* Note: input is divided by 1000 to avoid overflow */
 	rate = ((2 * (parent_rate / 1000) * ndiv) / mdiv) * 1000;
 
-	pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+	pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
 
 	return rate;
 }
@@ -338,7 +339,7 @@
 		/* Note: input is divided to avoid overflow */
 		rate = ((2 * (parent_rate/1000) * ndiv) / idf) * 1000;
 
-	pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+	pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
 
 	return rate;
 }
@@ -365,7 +366,7 @@
 	/* Note: input is divided by 1000 to avoid overflow */
 	rate = (((parent_rate / 1000) * ldf) / (odf * idf)) * 1000;
 
-	pr_debug("%s:%s rate %lu\n", __clk_get_name(hw->clk), __func__, rate);
+	pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate);
 
 	return rate;
 }
diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile
index 058f273..f5a35b8 100644
--- a/drivers/clk/sunxi/Makefile
+++ b/drivers/clk/sunxi/Makefile
@@ -6,6 +6,7 @@
 obj-y += clk-a10-hosc.o
 obj-y += clk-a20-gmac.o
 obj-y += clk-mod0.o
+obj-y += clk-simple-gates.o
 obj-y += clk-sun8i-mbus.o
 obj-y += clk-sun9i-core.o
 obj-y += clk-sun9i-mmc.o
diff --git a/drivers/clk/sunxi/clk-a20-gmac.c b/drivers/clk/sunxi/clk-a20-gmac.c
index 0dcf4f2..1611b03 100644
--- a/drivers/clk/sunxi/clk-a20-gmac.c
+++ b/drivers/clk/sunxi/clk-a20-gmac.c
@@ -80,9 +80,7 @@
 		goto free_mux;
 
 	/* gmac clock requires exactly 2 parents */
-	parents[0] = of_clk_get_parent_name(node, 0);
-	parents[1] = of_clk_get_parent_name(node, 1);
-	if (!parents[0] || !parents[1])
+	if (of_clk_parent_fill(node, parents, 2) != 2)
 		goto free_gate;
 
 	reg = of_iomap(node, 0);
diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c
index 8c20190..59428db 100644
--- a/drivers/clk/sunxi/clk-factors.c
+++ b/drivers/clk/sunxi/clk-factors.c
@@ -79,41 +79,42 @@
 	return rate;
 }
 
-static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
-				       unsigned long min_rate,
-				       unsigned long max_rate,
-				       unsigned long *best_parent_rate,
-				       struct clk_hw **best_parent_p)
+static int clk_factors_determine_rate(struct clk_hw *hw,
+				      struct clk_rate_request *req)
 {
-	struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+	struct clk_hw *parent, *best_parent = NULL;
 	int i, num_parents;
 	unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
 
 	/* find the parent that can help provide the fastest rate <= rate */
-	num_parents = __clk_get_num_parents(clk);
+	num_parents = clk_hw_get_num_parents(hw);
 	for (i = 0; i < num_parents; i++) {
-		parent = clk_get_parent_by_index(clk, i);
+		parent = clk_hw_get_parent_by_index(hw, i);
 		if (!parent)
 			continue;
-		if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT)
-			parent_rate = __clk_round_rate(parent, rate);
+		if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
+			parent_rate = clk_hw_round_rate(parent, req->rate);
 		else
-			parent_rate = __clk_get_rate(parent);
+			parent_rate = clk_hw_get_rate(parent);
 
-		child_rate = clk_factors_round_rate(hw, rate, &parent_rate);
+		child_rate = clk_factors_round_rate(hw, req->rate,
+						    &parent_rate);
 
-		if (child_rate <= rate && child_rate > best_child_rate) {
+		if (child_rate <= req->rate && child_rate > best_child_rate) {
 			best_parent = parent;
 			best = parent_rate;
 			best_child_rate = child_rate;
 		}
 	}
 
-	if (best_parent)
-		*best_parent_p = __clk_get_hw(best_parent);
-	*best_parent_rate = best;
+	if (!best_parent)
+		return -EINVAL;
 
-	return best_child_rate;
+	req->best_parent_hw = best_parent;
+	req->best_parent_rate = best;
+	req->rate = best_child_rate;
+
+	return 0;
 }
 
 static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -174,9 +175,7 @@
 	int i = 0;
 
 	/* if we have a mux, we will have >1 parents */
-	while (i < FACTORS_MAX_PARENTS &&
-	       (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
-		i++;
+	i = of_clk_parent_fill(node, parents, FACTORS_MAX_PARENTS);
 
 	/*
 	 * some factor clocks, such as pll5 and pll6, may have multiple
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index 9d028ae..d167e1e 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -14,10 +14,11 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
+#include <linux/slab.h>
 
 #include "clk-factors.h"
 
diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c
new file mode 100644
index 0000000..6ce9118
--- /dev/null
+++ b/drivers/clk/sunxi/clk-simple-gates.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2015 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+static DEFINE_SPINLOCK(gates_lock);
+
+static void __init sunxi_simple_gates_setup(struct device_node *node,
+					    const int protected[],
+					    int nprotected)
+{
+	struct clk_onecell_data *clk_data;
+	const char *clk_parent, *clk_name;
+	struct property *prop;
+	struct resource res;
+	void __iomem *clk_reg;
+	void __iomem *reg;
+	const __be32 *p;
+	int number, i = 0, j;
+	u8 clk_bit;
+	u32 index;
+
+	reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+	if (IS_ERR(reg))
+		return;
+
+	clk_parent = of_clk_get_parent_name(node, 0);
+
+	clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
+	if (!clk_data)
+		goto err_unmap;
+
+	number = of_property_count_u32_elems(node, "clock-indices");
+	of_property_read_u32_index(node, "clock-indices", number - 1, &number);
+
+	clk_data->clks = kcalloc(number + 1, sizeof(struct clk *), GFP_KERNEL);
+	if (!clk_data->clks)
+		goto err_free_data;
+
+	of_property_for_each_u32(node, "clock-indices", prop, p, index) {
+		of_property_read_string_index(node, "clock-output-names",
+					      i, &clk_name);
+
+		clk_reg = reg + 4 * (index / 32);
+		clk_bit = index % 32;
+
+		clk_data->clks[index] = clk_register_gate(NULL, clk_name,
+							  clk_parent, 0,
+							  clk_reg,
+							  clk_bit,
+							  0, &gates_lock);
+		i++;
+
+		if (IS_ERR(clk_data->clks[index])) {
+			WARN_ON(true);
+			continue;
+		}
+
+		for (j = 0; j < nprotected; j++)
+			if (protected[j] == index)
+				clk_prepare_enable(clk_data->clks[index]);
+
+	}
+
+	clk_data->clk_num = number + 1;
+	of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+	return;
+
+err_free_data:
+	kfree(clk_data);
+err_unmap:
+	iounmap(reg);
+	of_address_to_resource(node, 0, &res);
+	release_mem_region(res.start, resource_size(&res));
+}
+
+static void __init sunxi_simple_gates_init(struct device_node *node)
+{
+	sunxi_simple_gates_setup(node, NULL, 0);
+}
+
+CLK_OF_DECLARE(sun4i_a10_apb0, "allwinner,sun4i-a10-apb0-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun4i_a10_apb1, "allwinner,sun4i-a10-apb1-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun4i_a10_axi, "allwinner,sun4i-a10-axi-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun5i_a10s_apb0, "allwinner,sun5i-a10s-apb0-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun5i_a10s_apb1, "allwinner,sun5i-a10s-apb1-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun5i_a13_apb0, "allwinner,sun5i-a13-apb0-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun5i_a13_apb1, "allwinner,sun5i-a13-apb1-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun6i_a31_apb1, "allwinner,sun6i-a31-apb1-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun6i_a31_apb2, "allwinner,sun6i-a31-apb2-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun7i_a20_apb0, "allwinner,sun7i-a20-apb0-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun7i_a20_apb1, "allwinner,sun7i-a20-apb1-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun8i_a23_ahb1, "allwinner,sun8i-a23-ahb1-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun8i_a23_apb1, "allwinner,sun8i-a23-apb1-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun8i_a23_apb2, "allwinner,sun8i-a23-apb2-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun9i_a80_ahb0, "allwinner,sun9i-a80-ahb0-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun9i_a80_ahb1, "allwinner,sun9i-a80-ahb1-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun9i_a80_ahb2, "allwinner,sun9i-a80-ahb2-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-gates-clk",
+	       sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-gates-clk",
+	       sunxi_simple_gates_init);
+
+static const int sun4i_a10_ahb_critical_clocks[] __initconst = {
+	14,	/* ahb_sdram */
+};
+
+static void __init sun4i_a10_ahb_init(struct device_node *node)
+{
+	sunxi_simple_gates_setup(node, sun4i_a10_ahb_critical_clocks,
+				 ARRAY_SIZE(sun4i_a10_ahb_critical_clocks));
+}
+CLK_OF_DECLARE(sun4i_a10_ahb, "allwinner,sun4i-a10-ahb-gates-clk",
+	       sun4i_a10_ahb_init);
+CLK_OF_DECLARE(sun5i_a10s_ahb, "allwinner,sun5i-a10s-ahb-gates-clk",
+	       sun4i_a10_ahb_init);
+CLK_OF_DECLARE(sun5i_a13_ahb, "allwinner,sun5i-a13-ahb-gates-clk",
+	       sun4i_a10_ahb_init);
+CLK_OF_DECLARE(sun7i_a20_ahb, "allwinner,sun7i-a20-ahb-gates-clk",
+	       sun4i_a10_ahb_init);
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index 63cf149..806fd01 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -44,28 +44,25 @@
 	return (parent_rate >> shift) / (div + 1);
 }
 
-static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
-				 unsigned long min_rate,
-				 unsigned long max_rate,
-				 unsigned long *best_parent_rate,
-				 struct clk_hw **best_parent_clk)
+static int ar100_determine_rate(struct clk_hw *hw,
+				struct clk_rate_request *req)
 {
-	int nparents = __clk_get_num_parents(hw->clk);
+	int nparents = clk_hw_get_num_parents(hw);
 	long best_rate = -EINVAL;
 	int i;
 
-	*best_parent_clk = NULL;
+	req->best_parent_hw = NULL;
 
 	for (i = 0; i < nparents; i++) {
 		unsigned long parent_rate;
 		unsigned long tmp_rate;
-		struct clk *parent;
+		struct clk_hw *parent;
 		unsigned long div;
 		int shift;
 
-		parent = clk_get_parent_by_index(hw->clk, i);
-		parent_rate = __clk_get_rate(parent);
-		div = DIV_ROUND_UP(parent_rate, rate);
+		parent = clk_hw_get_parent_by_index(hw, i);
+		parent_rate = clk_hw_get_rate(parent);
+		div = DIV_ROUND_UP(parent_rate, req->rate);
 
 		/*
 		 * The AR100 clk contains 2 divisors:
@@ -101,14 +98,19 @@
 			continue;
 
 		tmp_rate = (parent_rate >> shift) / div;
-		if (!*best_parent_clk || tmp_rate > best_rate) {
-			*best_parent_clk = __clk_get_hw(parent);
-			*best_parent_rate = parent_rate;
+		if (!req->best_parent_hw || tmp_rate > best_rate) {
+			req->best_parent_hw = parent;
+			req->best_parent_rate = parent_rate;
 			best_rate = tmp_rate;
 		}
 	}
 
-	return best_rate;
+	if (best_rate < 0)
+		return best_rate;
+
+	req->rate = best_rate;
+
+	return 0;
 }
 
 static int ar100_set_parent(struct clk_hw *hw, u8 index)
@@ -180,7 +182,6 @@
 	struct resource *r;
 	struct clk *clk;
 	int nparents;
-	int i;
 
 	ar100 = devm_kzalloc(&pdev->dev, sizeof(*ar100), GFP_KERNEL);
 	if (!ar100)
@@ -195,8 +196,7 @@
 	if (nparents > SUN6I_AR100_MAX_PARENTS)
 		nparents = SUN6I_AR100_MAX_PARENTS;
 
-	for (i = 0; i < nparents; i++)
-		parents[i] = of_clk_get_parent_name(np, i);
+	of_clk_parent_fill(np, parents, nparents);
 
 	of_property_read_string(np, "clock-output-names", &clk_name);
 
diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c
index 14cd026..bf117a6 100644
--- a/drivers/clk/sunxi/clk-sun8i-mbus.c
+++ b/drivers/clk/sunxi/clk-sun8i-mbus.c
@@ -14,8 +14,8 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/of_address.h>
 
 #include "clk-factors.h"
diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c
index 887f4ea..6c4c983 100644
--- a/drivers/clk/sunxi/clk-sun9i-core.c
+++ b/drivers/clk/sunxi/clk-sun9i-core.c
@@ -14,8 +14,8 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/log2.h>
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
index 710c273..3436a948 100644
--- a/drivers/clk/sunxi/clk-sun9i-mmc.c
+++ b/drivers/clk/sunxi/clk-sun9i-mmc.c
@@ -14,14 +14,15 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/reset.h>
 #include <linux/platform_device.h>
 #include <linux/reset-controller.h>
+#include <linux/slab.h>
 #include <linux/spinlock.h>
 
 #define SUN9I_MMC_WIDTH		4
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c
index abf7b37..413070d 100644
--- a/drivers/clk/sunxi/clk-sunxi.c
+++ b/drivers/clk/sunxi/clk-sunxi.c
@@ -14,11 +14,13 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/reset-controller.h>
+#include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/log2.h>
 
@@ -118,42 +120,42 @@
 	return (parent_rate / calcm) >> calcp;
 }
 
-static long sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
-					  unsigned long min_rate,
-					  unsigned long max_rate,
-					  unsigned long *best_parent_rate,
-					  struct clk_hw **best_parent_clk)
+static int sun6i_ahb1_clk_determine_rate(struct clk_hw *hw,
+					 struct clk_rate_request *req)
 {
-	struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+	struct clk_hw *parent, *best_parent = NULL;
 	int i, num_parents;
 	unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
 
 	/* find the parent that can help provide the fastest rate <= rate */
-	num_parents = __clk_get_num_parents(clk);
+	num_parents = clk_hw_get_num_parents(hw);
 	for (i = 0; i < num_parents; i++) {
-		parent = clk_get_parent_by_index(clk, i);
+		parent = clk_hw_get_parent_by_index(hw, i);
 		if (!parent)
 			continue;
-		if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT)
-			parent_rate = __clk_round_rate(parent, rate);
+		if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
+			parent_rate = clk_hw_round_rate(parent, req->rate);
 		else
-			parent_rate = __clk_get_rate(parent);
+			parent_rate = clk_hw_get_rate(parent);
 
-		child_rate = sun6i_ahb1_clk_round(rate, NULL, NULL, i,
+		child_rate = sun6i_ahb1_clk_round(req->rate, NULL, NULL, i,
 						  parent_rate);
 
-		if (child_rate <= rate && child_rate > best_child_rate) {
+		if (child_rate <= req->rate && child_rate > best_child_rate) {
 			best_parent = parent;
 			best = parent_rate;
 			best_child_rate = child_rate;
 		}
 	}
 
-	if (best_parent)
-		*best_parent_clk = __clk_get_hw(best_parent);
-	*best_parent_rate = best;
+	if (!best_parent)
+		return -EINVAL;
 
-	return best_child_rate;
+	req->best_parent_hw = best_parent;
+	req->best_parent_rate = best;
+	req->rate = best_child_rate;
+
+	return 0;
 }
 
 static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -195,17 +197,14 @@
 	const char *clk_name = node->name;
 	const char *parents[SUN6I_AHB1_MAX_PARENTS];
 	void __iomem *reg;
-	int i = 0;
+	int i;
 
 	reg = of_io_request_and_map(node, 0, of_node_full_name(node));
 	if (IS_ERR(reg))
 		return;
 
 	/* we have a mux, we will have >1 parents */
-	while (i < SUN6I_AHB1_MAX_PARENTS &&
-	       (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
-		i++;
-
+	i = of_clk_parent_fill(node, parents, SUN6I_AHB1_MAX_PARENTS);
 	of_property_read_string(node, "clock-output-names", &clk_name);
 
 	ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL);
@@ -786,14 +785,11 @@
 	const char *clk_name = node->name;
 	const char *parents[SUNXI_MAX_PARENTS];
 	void __iomem *reg;
-	int i = 0;
+	int i;
 
 	reg = of_iomap(node, 0);
 
-	while (i < SUNXI_MAX_PARENTS &&
-	       (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
-		i++;
-
+	i = of_clk_parent_fill(node, parents, SUNXI_MAX_PARENTS);
 	of_property_read_string(node, "clock-output-names", &clk_name);
 
 	clk = clk_register_mux(NULL, clk_name, parents, i,
@@ -900,150 +896,6 @@
 	DECLARE_BITMAP(mask, SUNXI_GATES_MAX_SIZE);
 };
 
-static const struct gates_data sun4i_axi_gates_data __initconst = {
-	.mask = {1},
-};
-
-static const struct gates_data sun4i_ahb_gates_data __initconst = {
-	.mask = {0x7F77FFF, 0x14FB3F},
-};
-
-static const struct gates_data sun5i_a10s_ahb_gates_data __initconst = {
-	.mask = {0x147667e7, 0x185915},
-};
-
-static const struct gates_data sun5i_a13_ahb_gates_data __initconst = {
-	.mask = {0x107067e7, 0x185111},
-};
-
-static const struct gates_data sun6i_a31_ahb1_gates_data __initconst = {
-	.mask = {0xEDFE7F62, 0x794F931},
-};
-
-static const struct gates_data sun7i_a20_ahb_gates_data __initconst = {
-	.mask = { 0x12f77fff, 0x16ff3f },
-};
-
-static const struct gates_data sun8i_a23_ahb1_gates_data __initconst = {
-	.mask = {0x25386742, 0x2505111},
-};
-
-static const struct gates_data sun9i_a80_ahb0_gates_data __initconst = {
-	.mask = {0xF5F12B},
-};
-
-static const struct gates_data sun9i_a80_ahb1_gates_data __initconst = {
-	.mask = {0x1E20003},
-};
-
-static const struct gates_data sun9i_a80_ahb2_gates_data __initconst = {
-	.mask = {0x9B7},
-};
-
-static const struct gates_data sun4i_apb0_gates_data __initconst = {
-	.mask = {0x4EF},
-};
-
-static const struct gates_data sun5i_a10s_apb0_gates_data __initconst = {
-	.mask = {0x469},
-};
-
-static const struct gates_data sun5i_a13_apb0_gates_data __initconst = {
-	.mask = {0x61},
-};
-
-static const struct gates_data sun7i_a20_apb0_gates_data __initconst = {
-	.mask = { 0x4ff },
-};
-
-static const struct gates_data sun9i_a80_apb0_gates_data __initconst = {
-	.mask = {0xEB822},
-};
-
-static const struct gates_data sun4i_apb1_gates_data __initconst = {
-	.mask = {0xFF00F7},
-};
-
-static const struct gates_data sun5i_a10s_apb1_gates_data __initconst = {
-	.mask = {0xf0007},
-};
-
-static const struct gates_data sun5i_a13_apb1_gates_data __initconst = {
-	.mask = {0xa0007},
-};
-
-static const struct gates_data sun6i_a31_apb1_gates_data __initconst = {
-	.mask = {0x3031},
-};
-
-static const struct gates_data sun8i_a23_apb1_gates_data __initconst = {
-	.mask = {0x3021},
-};
-
-static const struct gates_data sun6i_a31_apb2_gates_data __initconst = {
-	.mask = {0x3F000F},
-};
-
-static const struct gates_data sun7i_a20_apb1_gates_data __initconst = {
-	.mask = { 0xff80ff },
-};
-
-static const struct gates_data sun9i_a80_apb1_gates_data __initconst = {
-	.mask = {0x3F001F},
-};
-
-static const struct gates_data sun8i_a23_apb2_gates_data __initconst = {
-	.mask = {0x1F0007},
-};
-
-static void __init sunxi_gates_clk_setup(struct device_node *node,
-					 struct gates_data *data)
-{
-	struct clk_onecell_data *clk_data;
-	const char *clk_parent;
-	const char *clk_name;
-	void __iomem *reg;
-	int qty;
-	int i = 0;
-	int j = 0;
-
-	reg = of_iomap(node, 0);
-
-	clk_parent = of_clk_get_parent_name(node, 0);
-
-	/* Worst-case size approximation and memory allocation */
-	qty = find_last_bit(data->mask, SUNXI_GATES_MAX_SIZE);
-	clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
-	if (!clk_data)
-		return;
-	clk_data->clks = kzalloc((qty+1) * sizeof(struct clk *), GFP_KERNEL);
-	if (!clk_data->clks) {
-		kfree(clk_data);
-		return;
-	}
-
-	for_each_set_bit(i, data->mask, SUNXI_GATES_MAX_SIZE) {
-		of_property_read_string_index(node, "clock-output-names",
-					      j, &clk_name);
-
-		clk_data->clks[i] = clk_register_gate(NULL, clk_name,
-						      clk_parent, 0,
-						      reg + 4 * (i/32), i % 32,
-						      0, &clk_lock);
-		WARN_ON(IS_ERR(clk_data->clks[i]));
-		clk_register_clkdev(clk_data->clks[i], clk_name, NULL);
-
-		j++;
-	}
-
-	/* Adjust to the real max */
-	clk_data->clk_num = i;
-
-	of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
-}
-
-
-
 /**
  * sunxi_divs_clk_setup() helper data
  */
@@ -1281,34 +1133,6 @@
 	{}
 };
 
-/* Matches for gate clocks */
-static const struct of_device_id clk_gates_match[] __initconst = {
-	{.compatible = "allwinner,sun4i-a10-axi-gates-clk", .data = &sun4i_axi_gates_data,},
-	{.compatible = "allwinner,sun4i-a10-ahb-gates-clk", .data = &sun4i_ahb_gates_data,},
-	{.compatible = "allwinner,sun5i-a10s-ahb-gates-clk", .data = &sun5i_a10s_ahb_gates_data,},
-	{.compatible = "allwinner,sun5i-a13-ahb-gates-clk", .data = &sun5i_a13_ahb_gates_data,},
-	{.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,},
-	{.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,},
-	{.compatible = "allwinner,sun8i-a23-ahb1-gates-clk", .data = &sun8i_a23_ahb1_gates_data,},
-	{.compatible = "allwinner,sun9i-a80-ahb0-gates-clk", .data = &sun9i_a80_ahb0_gates_data,},
-	{.compatible = "allwinner,sun9i-a80-ahb1-gates-clk", .data = &sun9i_a80_ahb1_gates_data,},
-	{.compatible = "allwinner,sun9i-a80-ahb2-gates-clk", .data = &sun9i_a80_ahb2_gates_data,},
-	{.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
-	{.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,},
-	{.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,},
-	{.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,},
-	{.compatible = "allwinner,sun9i-a80-apb0-gates-clk", .data = &sun9i_a80_apb0_gates_data,},
-	{.compatible = "allwinner,sun4i-a10-apb1-gates-clk", .data = &sun4i_apb1_gates_data,},
-	{.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,},
-	{.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,},
-	{.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,},
-	{.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,},
-	{.compatible = "allwinner,sun8i-a23-apb1-gates-clk", .data = &sun8i_a23_apb1_gates_data,},
-	{.compatible = "allwinner,sun9i-a80-apb1-gates-clk", .data = &sun9i_a80_apb1_gates_data,},
-	{.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
-	{.compatible = "allwinner,sun8i-a23-apb2-gates-clk", .data = &sun8i_a23_apb2_gates_data,},
-	{}
-};
 
 static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_match,
 					      void *function)
@@ -1340,9 +1164,6 @@
 	/* Register mux clocks */
 	of_sunxi_table_clock_setup(clk_mux_match, sunxi_mux_clk_setup);
 
-	/* Register gate clocks */
-	of_sunxi_table_clock_setup(clk_gates_match, sunxi_gates_clk_setup);
-
 	/* Protect the clocks that needs to stay on */
 	for (i = 0; i < nclocks; i++) {
 		struct clk *clk = clk_get(NULL, clocks[i]);
@@ -1354,7 +1175,6 @@
 
 static const char *sun4i_a10_critical_clocks[] __initdata = {
 	"pll5_ddr",
-	"ahb_sdram",
 };
 
 static void __init sun4i_a10_init_clocks(struct device_node *node)
@@ -1367,7 +1187,6 @@
 static const char *sun5i_critical_clocks[] __initdata = {
 	"cpu",
 	"pll5_ddr",
-	"ahb_sdram",
 };
 
 static void __init sun5i_init_clocks(struct device_node *node)
diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c
index 3a25f95..1a72cd6 100644
--- a/drivers/clk/sunxi/clk-usb.c
+++ b/drivers/clk/sunxi/clk-usb.c
@@ -14,11 +14,12 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/reset-controller.h>
+#include <linux/slab.h>
 #include <linux/spinlock.h>
 
 
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index aec862b..826c325 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -1,5 +1,6 @@
 obj-y					+= clk.o
 obj-y					+= clk-audio-sync.o
+obj-y					+= clk-dfll.o
 obj-y					+= clk-divider.o
 obj-y					+= clk-periph.o
 obj-y					+= clk-periph-gate.o
@@ -16,4 +17,6 @@
 obj-$(CONFIG_ARCH_TEGRA_3x_SOC)         += clk-tegra30.o
 obj-$(CONFIG_ARCH_TEGRA_114_SOC)	+= clk-tegra114.o
 obj-$(CONFIG_ARCH_TEGRA_124_SOC)	+= clk-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_124_SOC)	+= clk-tegra124-dfll-fcpu.o
 obj-$(CONFIG_ARCH_TEGRA_132_SOC)	+= clk-tegra124.o
+obj-y					+= cvb.o
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
new file mode 100644
index 0000000..c2ff859
--- /dev/null
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -0,0 +1,1757 @@
+/*
+ * clk-dfll.c - Tegra DFLL clock source common code
+ *
+ * Copyright (C) 2012-2014 NVIDIA Corporation. All rights reserved.
+ *
+ * Aleksandr Frid <afrid@nvidia.com>
+ * Paul Walmsley <pwalmsley@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * This library is for the DVCO and DFLL IP blocks on the Tegra124
+ * SoC. These IP blocks together are also known at NVIDIA as
+ * "CL-DVFS". To try to avoid confusion, this code refers to them
+ * collectively as the "DFLL."
+ *
+ * The DFLL is a root clocksource which tolerates some amount of
+ * supply voltage noise. Tegra124 uses it to clock the fast CPU
+ * complex when the target CPU speed is above a particular rate. The
+ * DFLL can be operated in either open-loop mode or closed-loop mode.
+ * In open-loop mode, the DFLL generates an output clock appropriate
+ * to the supply voltage. In closed-loop mode, when configured with a
+ * target frequency, the DFLL minimizes supply voltage while
+ * delivering an average frequency equal to the target.
+ *
+ * Devices clocked by the DFLL must be able to tolerate frequency
+ * variation. In the case of the CPU, it's important to note that the
+ * CPU cycle time will vary. This has implications for
+ * performance-measurement code and any code that relies on the CPU
+ * cycle time to delay for a certain length of time.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/seq_file.h>
+
+#include "clk-dfll.h"
+
+/*
+ * DFLL control registers - access via dfll_{readl,writel}
+ */
+
+/* DFLL_CTRL: DFLL control register */
+#define DFLL_CTRL			0x00
+#define DFLL_CTRL_MODE_MASK		0x03
+
+/* DFLL_CONFIG: DFLL sample rate control */
+#define DFLL_CONFIG			0x04
+#define DFLL_CONFIG_DIV_MASK		0xff
+#define DFLL_CONFIG_DIV_PRESCALE	32
+
+/* DFLL_PARAMS: tuning coefficients for closed loop integrator */
+#define DFLL_PARAMS			0x08
+#define DFLL_PARAMS_CG_SCALE		(0x1 << 24)
+#define DFLL_PARAMS_FORCE_MODE_SHIFT	22
+#define DFLL_PARAMS_FORCE_MODE_MASK	(0x3 << DFLL_PARAMS_FORCE_MODE_SHIFT)
+#define DFLL_PARAMS_CF_PARAM_SHIFT	16
+#define DFLL_PARAMS_CF_PARAM_MASK	(0x3f << DFLL_PARAMS_CF_PARAM_SHIFT)
+#define DFLL_PARAMS_CI_PARAM_SHIFT	8
+#define DFLL_PARAMS_CI_PARAM_MASK	(0x7 << DFLL_PARAMS_CI_PARAM_SHIFT)
+#define DFLL_PARAMS_CG_PARAM_SHIFT	0
+#define DFLL_PARAMS_CG_PARAM_MASK	(0xff << DFLL_PARAMS_CG_PARAM_SHIFT)
+
+/* DFLL_TUNE0: delay line configuration register 0 */
+#define DFLL_TUNE0			0x0c
+
+/* DFLL_TUNE1: delay line configuration register 1 */
+#define DFLL_TUNE1			0x10
+
+/* DFLL_FREQ_REQ: target DFLL frequency control */
+#define DFLL_FREQ_REQ			0x14
+#define DFLL_FREQ_REQ_FORCE_ENABLE	(0x1 << 28)
+#define DFLL_FREQ_REQ_FORCE_SHIFT	16
+#define DFLL_FREQ_REQ_FORCE_MASK	(0xfff << DFLL_FREQ_REQ_FORCE_SHIFT)
+#define FORCE_MAX			2047
+#define FORCE_MIN			-2048
+#define DFLL_FREQ_REQ_SCALE_SHIFT	8
+#define DFLL_FREQ_REQ_SCALE_MASK	(0xff << DFLL_FREQ_REQ_SCALE_SHIFT)
+#define DFLL_FREQ_REQ_SCALE_MAX		256
+#define DFLL_FREQ_REQ_FREQ_VALID	(0x1 << 7)
+#define DFLL_FREQ_REQ_MULT_SHIFT	0
+#define DFLL_FREQ_REG_MULT_MASK		(0x7f << DFLL_FREQ_REQ_MULT_SHIFT)
+#define FREQ_MAX			127
+
+/* DFLL_DROOP_CTRL: droop prevention control */
+#define DFLL_DROOP_CTRL			0x1c
+
+/* DFLL_OUTPUT_CFG: closed loop mode control registers */
+/* NOTE: access via dfll_i2c_{readl,writel} */
+#define DFLL_OUTPUT_CFG			0x20
+#define DFLL_OUTPUT_CFG_I2C_ENABLE	(0x1 << 30)
+#define OUT_MASK			0x3f
+#define DFLL_OUTPUT_CFG_SAFE_SHIFT	24
+#define DFLL_OUTPUT_CFG_SAFE_MASK	\
+		(OUT_MASK << DFLL_OUTPUT_CFG_SAFE_SHIFT)
+#define DFLL_OUTPUT_CFG_MAX_SHIFT	16
+#define DFLL_OUTPUT_CFG_MAX_MASK	\
+		(OUT_MASK << DFLL_OUTPUT_CFG_MAX_SHIFT)
+#define DFLL_OUTPUT_CFG_MIN_SHIFT	8
+#define DFLL_OUTPUT_CFG_MIN_MASK	\
+		(OUT_MASK << DFLL_OUTPUT_CFG_MIN_SHIFT)
+#define DFLL_OUTPUT_CFG_PWM_DELTA	(0x1 << 7)
+#define DFLL_OUTPUT_CFG_PWM_ENABLE	(0x1 << 6)
+#define DFLL_OUTPUT_CFG_PWM_DIV_SHIFT	0
+#define DFLL_OUTPUT_CFG_PWM_DIV_MASK	\
+		(OUT_MASK << DFLL_OUTPUT_CFG_PWM_DIV_SHIFT)
+
+/* DFLL_OUTPUT_FORCE: closed loop mode voltage forcing control */
+#define DFLL_OUTPUT_FORCE		0x24
+#define DFLL_OUTPUT_FORCE_ENABLE	(0x1 << 6)
+#define DFLL_OUTPUT_FORCE_VALUE_SHIFT	0
+#define DFLL_OUTPUT_FORCE_VALUE_MASK	\
+		(OUT_MASK << DFLL_OUTPUT_FORCE_VALUE_SHIFT)
+
+/* DFLL_MONITOR_CTRL: internal monitor data source control */
+#define DFLL_MONITOR_CTRL		0x28
+#define DFLL_MONITOR_CTRL_FREQ		6
+
+/* DFLL_MONITOR_DATA: internal monitor data output */
+#define DFLL_MONITOR_DATA		0x2c
+#define DFLL_MONITOR_DATA_NEW_MASK	(0x1 << 16)
+#define DFLL_MONITOR_DATA_VAL_SHIFT	0
+#define DFLL_MONITOR_DATA_VAL_MASK	(0xFFFF << DFLL_MONITOR_DATA_VAL_SHIFT)
+
+/*
+ * I2C output control registers - access via dfll_i2c_{readl,writel}
+ */
+
+/* DFLL_I2C_CFG: I2C controller configuration register */
+#define DFLL_I2C_CFG			0x40
+#define DFLL_I2C_CFG_ARB_ENABLE		(0x1 << 20)
+#define DFLL_I2C_CFG_HS_CODE_SHIFT	16
+#define DFLL_I2C_CFG_HS_CODE_MASK	(0x7 << DFLL_I2C_CFG_HS_CODE_SHIFT)
+#define DFLL_I2C_CFG_PACKET_ENABLE	(0x1 << 15)
+#define DFLL_I2C_CFG_SIZE_SHIFT		12
+#define DFLL_I2C_CFG_SIZE_MASK		(0x7 << DFLL_I2C_CFG_SIZE_SHIFT)
+#define DFLL_I2C_CFG_SLAVE_ADDR_10	(0x1 << 10)
+#define DFLL_I2C_CFG_SLAVE_ADDR_SHIFT_7BIT	1
+#define DFLL_I2C_CFG_SLAVE_ADDR_SHIFT_10BIT	0
+
+/* DFLL_I2C_VDD_REG_ADDR: PMIC I2C address for closed loop mode */
+#define DFLL_I2C_VDD_REG_ADDR		0x44
+
+/* DFLL_I2C_STS: I2C controller status */
+#define DFLL_I2C_STS			0x48
+#define DFLL_I2C_STS_I2C_LAST_SHIFT	1
+#define DFLL_I2C_STS_I2C_REQ_PENDING	0x1
+
+/* DFLL_INTR_STS: DFLL interrupt status register */
+#define DFLL_INTR_STS			0x5c
+
+/* DFLL_INTR_EN: DFLL interrupt enable register */
+#define DFLL_INTR_EN			0x60
+#define DFLL_INTR_MIN_MASK		0x1
+#define DFLL_INTR_MAX_MASK		0x2
+
+/*
+ * Integrated I2C controller registers - relative to td->i2c_controller_base
+ */
+
+/* DFLL_I2C_CLK_DIVISOR: I2C controller clock divisor */
+#define DFLL_I2C_CLK_DIVISOR		0x6c
+#define DFLL_I2C_CLK_DIVISOR_MASK	0xffff
+#define DFLL_I2C_CLK_DIVISOR_FS_SHIFT	16
+#define DFLL_I2C_CLK_DIVISOR_HS_SHIFT	0
+#define DFLL_I2C_CLK_DIVISOR_PREDIV	8
+#define DFLL_I2C_CLK_DIVISOR_HSMODE_PREDIV	12
+
+/*
+ * Other constants
+ */
+
+/* MAX_DFLL_VOLTAGES: number of LUT entries in the DFLL IP block */
+#define MAX_DFLL_VOLTAGES		33
+
+/*
+ * REF_CLK_CYC_PER_DVCO_SAMPLE: the number of ref_clk cycles that the hardware
+ *    integrates the DVCO counter over - used for debug rate monitoring and
+ *    droop control
+ */
+#define REF_CLK_CYC_PER_DVCO_SAMPLE	4
+
+/*
+ * REF_CLOCK_RATE: the DFLL reference clock rate currently supported by this
+ * driver, in Hz
+ */
+#define REF_CLOCK_RATE			51000000UL
+
+#define DVCO_RATE_TO_MULT(rate, ref_rate)	((rate) / ((ref_rate) / 2))
+#define MULT_TO_DVCO_RATE(mult, ref_rate)	((mult) * ((ref_rate) / 2))
+
+/**
+ * enum dfll_ctrl_mode - DFLL hardware operating mode
+ * @DFLL_UNINITIALIZED: (uninitialized state - not in hardware bitfield)
+ * @DFLL_DISABLED: DFLL not generating an output clock
+ * @DFLL_OPEN_LOOP: DVCO running, but DFLL not adjusting voltage
+ * @DFLL_CLOSED_LOOP: DVCO running, and DFLL adjusting voltage to match
+ *		      the requested rate
+ *
+ * The integer corresponding to the last two states, minus one, is
+ * written to the DFLL hardware to change operating modes.
+ */
+enum dfll_ctrl_mode {
+	DFLL_UNINITIALIZED = 0,
+	DFLL_DISABLED = 1,
+	DFLL_OPEN_LOOP = 2,
+	DFLL_CLOSED_LOOP = 3,
+};
+
+/**
+ * enum dfll_tune_range - voltage range that the driver believes it's in
+ * @DFLL_TUNE_UNINITIALIZED: DFLL tuning not yet programmed
+ * @DFLL_TUNE_LOW: DFLL in the low-voltage range (or open-loop mode)
+ *
+ * Some DFLL tuning parameters may need to change depending on the
+ * DVCO's voltage; these states represent the ranges that the driver
+ * supports. These are software states; these values are never
+ * written into registers.
+ */
+enum dfll_tune_range {
+	DFLL_TUNE_UNINITIALIZED = 0,
+	DFLL_TUNE_LOW = 1,
+};
+
+/**
+ * struct dfll_rate_req - target DFLL rate request data
+ * @rate: target frequency, after the postscaling
+ * @dvco_target_rate: target frequency, after the postscaling
+ * @lut_index: LUT index at which voltage the dvco_target_rate will be reached
+ * @mult_bits: value to program to the MULT bits of the DFLL_FREQ_REQ register
+ * @scale_bits: value to program to the SCALE bits of the DFLL_FREQ_REQ register
+ */
+struct dfll_rate_req {
+	unsigned long rate;
+	unsigned long dvco_target_rate;
+	int lut_index;
+	u8 mult_bits;
+	u8 scale_bits;
+};
+
+struct tegra_dfll {
+	struct device			*dev;
+	struct tegra_dfll_soc_data	*soc;
+
+	void __iomem			*base;
+	void __iomem			*i2c_base;
+	void __iomem			*i2c_controller_base;
+	void __iomem			*lut_base;
+
+	struct regulator		*vdd_reg;
+	struct clk			*soc_clk;
+	struct clk			*ref_clk;
+	struct clk			*i2c_clk;
+	struct clk			*dfll_clk;
+	struct reset_control		*dvco_rst;
+	unsigned long			ref_rate;
+	unsigned long			i2c_clk_rate;
+	unsigned long			dvco_rate_min;
+
+	enum dfll_ctrl_mode		mode;
+	enum dfll_tune_range		tune_range;
+	struct dentry			*debugfs_dir;
+	struct clk_hw			dfll_clk_hw;
+	const char			*output_clock_name;
+	struct dfll_rate_req		last_req;
+	unsigned long			last_unrounded_rate;
+
+	/* Parameters from DT */
+	u32				droop_ctrl;
+	u32				sample_rate;
+	u32				force_mode;
+	u32				cf;
+	u32				ci;
+	u32				cg;
+	bool				cg_scale;
+
+	/* I2C interface parameters */
+	u32				i2c_fs_rate;
+	u32				i2c_reg;
+	u32				i2c_slave_addr;
+
+	/* i2c_lut array entries are regulator framework selectors */
+	unsigned			i2c_lut[MAX_DFLL_VOLTAGES];
+	int				i2c_lut_size;
+	u8				lut_min, lut_max, lut_safe;
+};
+
+#define clk_hw_to_dfll(_hw) container_of(_hw, struct tegra_dfll, dfll_clk_hw)
+
+/* mode_name: map numeric DFLL modes to names for friendly console messages */
+static const char * const mode_name[] = {
+	[DFLL_UNINITIALIZED] = "uninitialized",
+	[DFLL_DISABLED] = "disabled",
+	[DFLL_OPEN_LOOP] = "open_loop",
+	[DFLL_CLOSED_LOOP] = "closed_loop",
+};
+
+/*
+ * Register accessors
+ */
+
+static inline u32 dfll_readl(struct tegra_dfll *td, u32 offs)
+{
+	return __raw_readl(td->base + offs);
+}
+
+static inline void dfll_writel(struct tegra_dfll *td, u32 val, u32 offs)
+{
+	WARN_ON(offs >= DFLL_I2C_CFG);
+	__raw_writel(val, td->base + offs);
+}
+
+static inline void dfll_wmb(struct tegra_dfll *td)
+{
+	dfll_readl(td, DFLL_CTRL);
+}
+
+/* I2C output control registers - for addresses above DFLL_I2C_CFG */
+
+static inline u32 dfll_i2c_readl(struct tegra_dfll *td, u32 offs)
+{
+	return __raw_readl(td->i2c_base + offs);
+}
+
+static inline void dfll_i2c_writel(struct tegra_dfll *td, u32 val, u32 offs)
+{
+	__raw_writel(val, td->i2c_base + offs);
+}
+
+static inline void dfll_i2c_wmb(struct tegra_dfll *td)
+{
+	dfll_i2c_readl(td, DFLL_I2C_CFG);
+}
+
+/**
+ * dfll_is_running - is the DFLL currently generating a clock?
+ * @td: DFLL instance
+ *
+ * If the DFLL is currently generating an output clock signal, return
+ * true; otherwise return false.
+ */
+static bool dfll_is_running(struct tegra_dfll *td)
+{
+	return td->mode >= DFLL_OPEN_LOOP;
+}
+
+/*
+ * Runtime PM suspend/resume callbacks
+ */
+
+/**
+ * tegra_dfll_runtime_resume - enable all clocks needed by the DFLL
+ * @dev: DFLL device *
+ *
+ * Enable all clocks needed by the DFLL. Assumes that clk_prepare()
+ * has already been called on all the clocks.
+ *
+ * XXX Should also handle context restore when returning from off.
+ */
+int tegra_dfll_runtime_resume(struct device *dev)
+{
+	struct tegra_dfll *td = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_enable(td->ref_clk);
+	if (ret) {
+		dev_err(dev, "could not enable ref clock: %d\n", ret);
+		return ret;
+	}
+
+	ret = clk_enable(td->soc_clk);
+	if (ret) {
+		dev_err(dev, "could not enable register clock: %d\n", ret);
+		clk_disable(td->ref_clk);
+		return ret;
+	}
+
+	ret = clk_enable(td->i2c_clk);
+	if (ret) {
+		dev_err(dev, "could not enable i2c clock: %d\n", ret);
+		clk_disable(td->soc_clk);
+		clk_disable(td->ref_clk);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_dfll_runtime_resume);
+
+/**
+ * tegra_dfll_runtime_suspend - disable all clocks needed by the DFLL
+ * @dev: DFLL device *
+ *
+ * Disable all clocks needed by the DFLL. Assumes that other code
+ * will later call clk_unprepare().
+ */
+int tegra_dfll_runtime_suspend(struct device *dev)
+{
+	struct tegra_dfll *td = dev_get_drvdata(dev);
+
+	clk_disable(td->ref_clk);
+	clk_disable(td->soc_clk);
+	clk_disable(td->i2c_clk);
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_dfll_runtime_suspend);
+
+/*
+ * DFLL tuning operations (per-voltage-range tuning settings)
+ */
+
+/**
+ * dfll_tune_low - tune to DFLL and CPU settings valid for any voltage
+ * @td: DFLL instance
+ *
+ * Tune the DFLL oscillator parameters and the CPU clock shaper for
+ * the low-voltage range. These settings are valid for any voltage,
+ * but may not be optimal.
+ */
+static void dfll_tune_low(struct tegra_dfll *td)
+{
+	td->tune_range = DFLL_TUNE_LOW;
+
+	dfll_writel(td, td->soc->tune0_low, DFLL_TUNE0);
+	dfll_writel(td, td->soc->tune1, DFLL_TUNE1);
+	dfll_wmb(td);
+
+	if (td->soc->set_clock_trimmers_low)
+		td->soc->set_clock_trimmers_low();
+}
+
+/*
+ * Output clock scaler helpers
+ */
+
+/**
+ * dfll_scale_dvco_rate - calculate scaled rate from the DVCO rate
+ * @scale_bits: clock scaler value (bits in the DFLL_FREQ_REQ_SCALE field)
+ * @dvco_rate: the DVCO rate
+ *
+ * Apply the same scaling formula that the DFLL hardware uses to scale
+ * the DVCO rate.
+ */
+static unsigned long dfll_scale_dvco_rate(int scale_bits,
+					  unsigned long dvco_rate)
+{
+	return (u64)dvco_rate * (scale_bits + 1) / DFLL_FREQ_REQ_SCALE_MAX;
+}
+
+/*
+ * Monitor control
+ */
+
+/**
+ * dfll_calc_monitored_rate - convert DFLL_MONITOR_DATA_VAL rate into real freq
+ * @monitor_data: value read from the DFLL_MONITOR_DATA_VAL bitfield
+ * @ref_rate: DFLL reference clock rate
+ *
+ * Convert @monitor_data from DFLL_MONITOR_DATA_VAL units into cycles
+ * per second. Returns the converted value.
+ */
+static u64 dfll_calc_monitored_rate(u32 monitor_data,
+				    unsigned long ref_rate)
+{
+	return monitor_data * (ref_rate / REF_CLK_CYC_PER_DVCO_SAMPLE);
+}
+
+/**
+ * dfll_read_monitor_rate - return the DFLL's output rate from internal monitor
+ * @td: DFLL instance
+ *
+ * If the DFLL is enabled, return the last rate reported by the DFLL's
+ * internal monitoring hardware. This works in both open-loop and
+ * closed-loop mode, and takes the output scaler setting into account.
+ * Assumes that the monitor was programmed to monitor frequency before
+ * the sample period started. If the driver believes that the DFLL is
+ * currently uninitialized or disabled, it will return 0, since
+ * otherwise the DFLL monitor data register will return the last
+ * measured rate from when the DFLL was active.
+ */
+static u64 dfll_read_monitor_rate(struct tegra_dfll *td)
+{
+	u32 v, s;
+	u64 pre_scaler_rate, post_scaler_rate;
+
+	if (!dfll_is_running(td))
+		return 0;
+
+	v = dfll_readl(td, DFLL_MONITOR_DATA);
+	v = (v & DFLL_MONITOR_DATA_VAL_MASK) >> DFLL_MONITOR_DATA_VAL_SHIFT;
+	pre_scaler_rate = dfll_calc_monitored_rate(v, td->ref_rate);
+
+	s = dfll_readl(td, DFLL_FREQ_REQ);
+	s = (s & DFLL_FREQ_REQ_SCALE_MASK) >> DFLL_FREQ_REQ_SCALE_SHIFT;
+	post_scaler_rate = dfll_scale_dvco_rate(s, pre_scaler_rate);
+
+	return post_scaler_rate;
+}
+
+/*
+ * DFLL mode switching
+ */
+
+/**
+ * dfll_set_mode - change the DFLL control mode
+ * @td: DFLL instance
+ * @mode: DFLL control mode (see enum dfll_ctrl_mode)
+ *
+ * Change the DFLL's operating mode between disabled, open-loop mode,
+ * and closed-loop mode, or vice versa.
+ */
+static void dfll_set_mode(struct tegra_dfll *td,
+			  enum dfll_ctrl_mode mode)
+{
+	td->mode = mode;
+	dfll_writel(td, mode - 1, DFLL_CTRL);
+	dfll_wmb(td);
+}
+
+/*
+ * DFLL-to-I2C controller interface
+ */
+
+/**
+ * dfll_i2c_set_output_enabled - enable/disable I2C PMIC voltage requests
+ * @td: DFLL instance
+ * @enable: whether to enable or disable the I2C voltage requests
+ *
+ * Set the master enable control for I2C control value updates. If disabled,
+ * then I2C control messages are inhibited, regardless of the DFLL mode.
+ */
+static int dfll_i2c_set_output_enabled(struct tegra_dfll *td, bool enable)
+{
+	u32 val;
+
+	val = dfll_i2c_readl(td, DFLL_OUTPUT_CFG);
+
+	if (enable)
+		val |= DFLL_OUTPUT_CFG_I2C_ENABLE;
+	else
+		val &= ~DFLL_OUTPUT_CFG_I2C_ENABLE;
+
+	dfll_i2c_writel(td, val, DFLL_OUTPUT_CFG);
+	dfll_i2c_wmb(td);
+
+	return 0;
+}
+
+/**
+ * dfll_load_lut - load the voltage lookup table
+ * @td: struct tegra_dfll *
+ *
+ * Load the voltage-to-PMIC register value lookup table into the DFLL
+ * IP block memory. Look-up tables can be loaded at any time.
+ */
+static void dfll_load_i2c_lut(struct tegra_dfll *td)
+{
+	int i, lut_index;
+	u32 val;
+
+	for (i = 0; i < MAX_DFLL_VOLTAGES; i++) {
+		if (i < td->lut_min)
+			lut_index = td->lut_min;
+		else if (i > td->lut_max)
+			lut_index = td->lut_max;
+		else
+			lut_index = i;
+
+		val = regulator_list_hardware_vsel(td->vdd_reg,
+						     td->i2c_lut[lut_index]);
+		__raw_writel(val, td->lut_base + i * 4);
+	}
+
+	dfll_i2c_wmb(td);
+}
+
+/**
+ * dfll_init_i2c_if - set up the DFLL's DFLL-I2C interface
+ * @td: DFLL instance
+ *
+ * During DFLL driver initialization, program the DFLL-I2C interface
+ * with the PMU slave address, vdd register offset, and transfer mode.
+ * This data is used by the DFLL to automatically construct I2C
+ * voltage-set commands, which are then passed to the DFLL's internal
+ * I2C controller.
+ */
+static void dfll_init_i2c_if(struct tegra_dfll *td)
+{
+	u32 val;
+
+	if (td->i2c_slave_addr > 0x7f) {
+		val = td->i2c_slave_addr << DFLL_I2C_CFG_SLAVE_ADDR_SHIFT_10BIT;
+		val |= DFLL_I2C_CFG_SLAVE_ADDR_10;
+	} else {
+		val = td->i2c_slave_addr << DFLL_I2C_CFG_SLAVE_ADDR_SHIFT_7BIT;
+	}
+	val |= DFLL_I2C_CFG_SIZE_MASK;
+	val |= DFLL_I2C_CFG_ARB_ENABLE;
+	dfll_i2c_writel(td, val, DFLL_I2C_CFG);
+
+	dfll_i2c_writel(td, td->i2c_reg, DFLL_I2C_VDD_REG_ADDR);
+
+	val = DIV_ROUND_UP(td->i2c_clk_rate, td->i2c_fs_rate * 8);
+	BUG_ON(!val || (val > DFLL_I2C_CLK_DIVISOR_MASK));
+	val = (val - 1) << DFLL_I2C_CLK_DIVISOR_FS_SHIFT;
+
+	/* default hs divisor just in case */
+	val |= 1 << DFLL_I2C_CLK_DIVISOR_HS_SHIFT;
+	__raw_writel(val, td->i2c_controller_base + DFLL_I2C_CLK_DIVISOR);
+	dfll_i2c_wmb(td);
+}
+
+/**
+ * dfll_init_out_if - prepare DFLL-to-PMIC interface
+ * @td: DFLL instance
+ *
+ * During DFLL driver initialization or resume from context loss,
+ * disable the I2C command output to the PMIC, set safe voltage and
+ * output limits, and disable and clear limit interrupts.
+ */
+static void dfll_init_out_if(struct tegra_dfll *td)
+{
+	u32 val;
+
+	td->lut_min = 0;
+	td->lut_max = td->i2c_lut_size - 1;
+	td->lut_safe = td->lut_min + 1;
+
+	dfll_i2c_writel(td, 0, DFLL_OUTPUT_CFG);
+	val = (td->lut_safe << DFLL_OUTPUT_CFG_SAFE_SHIFT) |
+		(td->lut_max << DFLL_OUTPUT_CFG_MAX_SHIFT) |
+		(td->lut_min << DFLL_OUTPUT_CFG_MIN_SHIFT);
+	dfll_i2c_writel(td, val, DFLL_OUTPUT_CFG);
+	dfll_i2c_wmb(td);
+
+	dfll_writel(td, 0, DFLL_OUTPUT_FORCE);
+	dfll_i2c_writel(td, 0, DFLL_INTR_EN);
+	dfll_i2c_writel(td, DFLL_INTR_MAX_MASK | DFLL_INTR_MIN_MASK,
+			DFLL_INTR_STS);
+
+	dfll_load_i2c_lut(td);
+	dfll_init_i2c_if(td);
+}
+
+/*
+ * Set/get the DFLL's targeted output clock rate
+ */
+
+/**
+ * find_lut_index_for_rate - determine I2C LUT index for given DFLL rate
+ * @td: DFLL instance
+ * @rate: clock rate
+ *
+ * Determines the index of a I2C LUT entry for a voltage that approximately
+ * produces the given DFLL clock rate. This is used when forcing a value
+ * to the integrator during rate changes. Returns -ENOENT if a suitable
+ * LUT index is not found.
+ */
+static int find_lut_index_for_rate(struct tegra_dfll *td, unsigned long rate)
+{
+	struct dev_pm_opp *opp;
+	int i, uv;
+
+	opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
+	if (IS_ERR(opp))
+		return PTR_ERR(opp);
+	uv = dev_pm_opp_get_voltage(opp);
+
+	for (i = 0; i < td->i2c_lut_size; i++) {
+		if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv)
+			return i;
+	}
+
+	return -ENOENT;
+}
+
+/**
+ * dfll_calculate_rate_request - calculate DFLL parameters for a given rate
+ * @td: DFLL instance
+ * @req: DFLL-rate-request structure
+ * @rate: the desired DFLL rate
+ *
+ * Populate the DFLL-rate-request record @req fields with the scale_bits
+ * and mult_bits fields, based on the target input rate. Returns 0 upon
+ * success, or -EINVAL if the requested rate in req->rate is too high
+ * or low for the DFLL to generate.
+ */
+static int dfll_calculate_rate_request(struct tegra_dfll *td,
+				       struct dfll_rate_req *req,
+				       unsigned long rate)
+{
+	u32 val;
+
+	/*
+	 * If requested rate is below the minimum DVCO rate, active the scaler.
+	 * In the future the DVCO minimum voltage should be selected based on
+	 * chip temperature and the actual minimum rate should be calibrated
+	 * at runtime.
+	 */
+	req->scale_bits = DFLL_FREQ_REQ_SCALE_MAX - 1;
+	if (rate < td->dvco_rate_min) {
+		int scale;
+
+		scale = DIV_ROUND_CLOSEST(rate / 1000 * DFLL_FREQ_REQ_SCALE_MAX,
+					  td->dvco_rate_min / 1000);
+		if (!scale) {
+			dev_err(td->dev, "%s: Rate %lu is too low\n",
+				__func__, rate);
+			return -EINVAL;
+		}
+		req->scale_bits = scale - 1;
+		rate = td->dvco_rate_min;
+	}
+
+	/* Convert requested rate into frequency request and scale settings */
+	val = DVCO_RATE_TO_MULT(rate, td->ref_rate);
+	if (val > FREQ_MAX) {
+		dev_err(td->dev, "%s: Rate %lu is above dfll range\n",
+			__func__, rate);
+		return -EINVAL;
+	}
+	req->mult_bits = val;
+	req->dvco_target_rate = MULT_TO_DVCO_RATE(req->mult_bits, td->ref_rate);
+	req->rate = dfll_scale_dvco_rate(req->scale_bits,
+					 req->dvco_target_rate);
+	req->lut_index = find_lut_index_for_rate(td, req->dvco_target_rate);
+	if (req->lut_index < 0)
+		return req->lut_index;
+
+	return 0;
+}
+
+/**
+ * dfll_set_frequency_request - start the frequency change operation
+ * @td: DFLL instance
+ * @req: rate request structure
+ *
+ * Tell the DFLL to try to change its output frequency to the
+ * frequency represented by @req. DFLL must be in closed-loop mode.
+ */
+static void dfll_set_frequency_request(struct tegra_dfll *td,
+				       struct dfll_rate_req *req)
+{
+	u32 val = 0;
+	int force_val;
+	int coef = 128; /* FIXME: td->cg_scale? */;
+
+	force_val = (req->lut_index - td->lut_safe) * coef / td->cg;
+	force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
+
+	val |= req->mult_bits << DFLL_FREQ_REQ_MULT_SHIFT;
+	val |= req->scale_bits << DFLL_FREQ_REQ_SCALE_SHIFT;
+	val |= ((u32)force_val << DFLL_FREQ_REQ_FORCE_SHIFT) &
+		DFLL_FREQ_REQ_FORCE_MASK;
+	val |= DFLL_FREQ_REQ_FREQ_VALID | DFLL_FREQ_REQ_FORCE_ENABLE;
+
+	dfll_writel(td, val, DFLL_FREQ_REQ);
+	dfll_wmb(td);
+}
+
+/**
+ * tegra_dfll_request_rate - set the next rate for the DFLL to tune to
+ * @td: DFLL instance
+ * @rate: clock rate to target
+ *
+ * Convert the requested clock rate @rate into the DFLL control logic
+ * settings. In closed-loop mode, update new settings immediately to
+ * adjust DFLL output rate accordingly. Otherwise, just save them
+ * until the next switch to closed loop. Returns 0 upon success,
+ * -EPERM if the DFLL driver has not yet been initialized, or -EINVAL
+ * if @rate is outside the DFLL's tunable range.
+ */
+static int dfll_request_rate(struct tegra_dfll *td, unsigned long rate)
+{
+	int ret;
+	struct dfll_rate_req req;
+
+	if (td->mode == DFLL_UNINITIALIZED) {
+		dev_err(td->dev, "%s: Cannot set DFLL rate in %s mode\n",
+			__func__, mode_name[td->mode]);
+		return -EPERM;
+	}
+
+	ret = dfll_calculate_rate_request(td, &req, rate);
+	if (ret)
+		return ret;
+
+	td->last_unrounded_rate = rate;
+	td->last_req = req;
+
+	if (td->mode == DFLL_CLOSED_LOOP)
+		dfll_set_frequency_request(td, &td->last_req);
+
+	return 0;
+}
+
+/*
+ * DFLL enable/disable & open-loop <-> closed-loop transitions
+ */
+
+/**
+ * dfll_disable - switch from open-loop mode to disabled mode
+ * @td: DFLL instance
+ *
+ * Switch from OPEN_LOOP state to DISABLED state. Returns 0 upon success
+ * or -EPERM if the DFLL is not currently in open-loop mode.
+ */
+static int dfll_disable(struct tegra_dfll *td)
+{
+	if (td->mode != DFLL_OPEN_LOOP) {
+		dev_err(td->dev, "cannot disable DFLL in %s mode\n",
+			mode_name[td->mode]);
+		return -EINVAL;
+	}
+
+	dfll_set_mode(td, DFLL_DISABLED);
+	pm_runtime_put_sync(td->dev);
+
+	return 0;
+}
+
+/**
+ * dfll_enable - switch a disabled DFLL to open-loop mode
+ * @td: DFLL instance
+ *
+ * Switch from DISABLED state to OPEN_LOOP state. Returns 0 upon success
+ * or -EPERM if the DFLL is not currently disabled.
+ */
+static int dfll_enable(struct tegra_dfll *td)
+{
+	if (td->mode != DFLL_DISABLED) {
+		dev_err(td->dev, "cannot enable DFLL in %s mode\n",
+			mode_name[td->mode]);
+		return -EPERM;
+	}
+
+	pm_runtime_get_sync(td->dev);
+	dfll_set_mode(td, DFLL_OPEN_LOOP);
+
+	return 0;
+}
+
+/**
+ * dfll_set_open_loop_config - prepare to switch to open-loop mode
+ * @td: DFLL instance
+ *
+ * Prepare to switch the DFLL to open-loop mode. This switches the
+ * DFLL to the low-voltage tuning range, ensures that I2C output
+ * forcing is disabled, and disables the output clock rate scaler.
+ * The DFLL's low-voltage tuning range parameters must be
+ * characterized to keep the downstream device stable at any DVCO
+ * input voltage. No return value.
+ */
+static void dfll_set_open_loop_config(struct tegra_dfll *td)
+{
+	u32 val;
+
+	/* always tune low (safe) in open loop */
+	if (td->tune_range != DFLL_TUNE_LOW)
+		dfll_tune_low(td);
+
+	val = dfll_readl(td, DFLL_FREQ_REQ);
+	val |= DFLL_FREQ_REQ_SCALE_MASK;
+	val &= ~DFLL_FREQ_REQ_FORCE_ENABLE;
+	dfll_writel(td, val, DFLL_FREQ_REQ);
+	dfll_wmb(td);
+}
+
+/**
+ * tegra_dfll_lock - switch from open-loop to closed-loop mode
+ * @td: DFLL instance
+ *
+ * Switch from OPEN_LOOP state to CLOSED_LOOP state. Returns 0 upon success,
+ * -EINVAL if the DFLL's target rate hasn't been set yet, or -EPERM if the
+ * DFLL is not currently in open-loop mode.
+ */
+static int dfll_lock(struct tegra_dfll *td)
+{
+	struct dfll_rate_req *req = &td->last_req;
+
+	switch (td->mode) {
+	case DFLL_CLOSED_LOOP:
+		return 0;
+
+	case DFLL_OPEN_LOOP:
+		if (req->rate == 0) {
+			dev_err(td->dev, "%s: Cannot lock DFLL at rate 0\n",
+				__func__);
+			return -EINVAL;
+		}
+
+		dfll_i2c_set_output_enabled(td, true);
+		dfll_set_mode(td, DFLL_CLOSED_LOOP);
+		dfll_set_frequency_request(td, req);
+		return 0;
+
+	default:
+		BUG_ON(td->mode > DFLL_CLOSED_LOOP);
+		dev_err(td->dev, "%s: Cannot lock DFLL in %s mode\n",
+			__func__, mode_name[td->mode]);
+		return -EPERM;
+	}
+}
+
+/**
+ * tegra_dfll_unlock - switch from closed-loop to open-loop mode
+ * @td: DFLL instance
+ *
+ * Switch from CLOSED_LOOP state to OPEN_LOOP state. Returns 0 upon success,
+ * or -EPERM if the DFLL is not currently in open-loop mode.
+ */
+static int dfll_unlock(struct tegra_dfll *td)
+{
+	switch (td->mode) {
+	case DFLL_CLOSED_LOOP:
+		dfll_set_open_loop_config(td);
+		dfll_set_mode(td, DFLL_OPEN_LOOP);
+		dfll_i2c_set_output_enabled(td, false);
+		return 0;
+
+	case DFLL_OPEN_LOOP:
+		return 0;
+
+	default:
+		BUG_ON(td->mode > DFLL_CLOSED_LOOP);
+		dev_err(td->dev, "%s: Cannot unlock DFLL in %s mode\n",
+			__func__, mode_name[td->mode]);
+		return -EPERM;
+	}
+}
+
+/*
+ * Clock framework integration
+ *
+ * When the DFLL is being controlled by the CCF, always enter closed loop
+ * mode when the clk is enabled. This requires that a DFLL rate request
+ * has been set beforehand, which implies that a clk_set_rate() call is
+ * always required before a clk_enable().
+ */
+
+static int dfll_clk_is_enabled(struct clk_hw *hw)
+{
+	struct tegra_dfll *td = clk_hw_to_dfll(hw);
+
+	return dfll_is_running(td);
+}
+
+static int dfll_clk_enable(struct clk_hw *hw)
+{
+	struct tegra_dfll *td = clk_hw_to_dfll(hw);
+	int ret;
+
+	ret = dfll_enable(td);
+	if (ret)
+		return ret;
+
+	ret = dfll_lock(td);
+	if (ret)
+		dfll_disable(td);
+
+	return ret;
+}
+
+static void dfll_clk_disable(struct clk_hw *hw)
+{
+	struct tegra_dfll *td = clk_hw_to_dfll(hw);
+	int ret;
+
+	ret = dfll_unlock(td);
+	if (!ret)
+		dfll_disable(td);
+}
+
+static unsigned long dfll_clk_recalc_rate(struct clk_hw *hw,
+					  unsigned long parent_rate)
+{
+	struct tegra_dfll *td = clk_hw_to_dfll(hw);
+
+	return td->last_unrounded_rate;
+}
+
+static long dfll_clk_round_rate(struct clk_hw *hw,
+				unsigned long rate,
+				unsigned long *parent_rate)
+{
+	struct tegra_dfll *td = clk_hw_to_dfll(hw);
+	struct dfll_rate_req req;
+	int ret;
+
+	ret = dfll_calculate_rate_request(td, &req, rate);
+	if (ret)
+		return ret;
+
+	/*
+	 * Don't return the rounded rate, since it doesn't really matter as
+	 * the output rate will be voltage controlled anyway, and cpufreq
+	 * freaks out if any rounding happens.
+	 */
+	return rate;
+}
+
+static int dfll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+			     unsigned long parent_rate)
+{
+	struct tegra_dfll *td = clk_hw_to_dfll(hw);
+
+	return dfll_request_rate(td, rate);
+}
+
+static const struct clk_ops dfll_clk_ops = {
+	.is_enabled	= dfll_clk_is_enabled,
+	.enable		= dfll_clk_enable,
+	.disable	= dfll_clk_disable,
+	.recalc_rate	= dfll_clk_recalc_rate,
+	.round_rate	= dfll_clk_round_rate,
+	.set_rate	= dfll_clk_set_rate,
+};
+
+static struct clk_init_data dfll_clk_init_data = {
+	.flags		= CLK_IS_ROOT,
+	.ops		= &dfll_clk_ops,
+	.num_parents	= 0,
+};
+
+/**
+ * dfll_register_clk - register the DFLL output clock with the clock framework
+ * @td: DFLL instance
+ *
+ * Register the DFLL's output clock with the Linux clock framework and register
+ * the DFLL driver as an OF clock provider. Returns 0 upon success or -EINVAL
+ * or -ENOMEM upon failure.
+ */
+static int dfll_register_clk(struct tegra_dfll *td)
+{
+	int ret;
+
+	dfll_clk_init_data.name = td->output_clock_name;
+	td->dfll_clk_hw.init = &dfll_clk_init_data;
+
+	td->dfll_clk = clk_register(td->dev, &td->dfll_clk_hw);
+	if (IS_ERR(td->dfll_clk)) {
+		dev_err(td->dev, "DFLL clock registration error\n");
+		return -EINVAL;
+	}
+
+	ret = of_clk_add_provider(td->dev->of_node, of_clk_src_simple_get,
+				  td->dfll_clk);
+	if (ret) {
+		dev_err(td->dev, "of_clk_add_provider() failed\n");
+
+		clk_unregister(td->dfll_clk);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * dfll_unregister_clk - unregister the DFLL output clock
+ * @td: DFLL instance
+ *
+ * Unregister the DFLL's output clock from the Linux clock framework
+ * and from clkdev. No return value.
+ */
+static void dfll_unregister_clk(struct tegra_dfll *td)
+{
+	of_clk_del_provider(td->dev->of_node);
+	clk_unregister(td->dfll_clk);
+	td->dfll_clk = NULL;
+}
+
+/*
+ * Debugfs interface
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+static int attr_enable_get(void *data, u64 *val)
+{
+	struct tegra_dfll *td = data;
+
+	*val = dfll_is_running(td);
+
+	return 0;
+}
+static int attr_enable_set(void *data, u64 val)
+{
+	struct tegra_dfll *td = data;
+
+	return val ? dfll_enable(td) : dfll_disable(td);
+}
+DEFINE_SIMPLE_ATTRIBUTE(enable_fops, attr_enable_get, attr_enable_set,
+			"%llu\n");
+
+static int attr_lock_get(void *data, u64 *val)
+{
+	struct tegra_dfll *td = data;
+
+	*val = (td->mode == DFLL_CLOSED_LOOP);
+
+	return 0;
+}
+static int attr_lock_set(void *data, u64 val)
+{
+	struct tegra_dfll *td = data;
+
+	return val ? dfll_lock(td) :  dfll_unlock(td);
+}
+DEFINE_SIMPLE_ATTRIBUTE(lock_fops, attr_lock_get, attr_lock_set,
+			"%llu\n");
+
+static int attr_rate_get(void *data, u64 *val)
+{
+	struct tegra_dfll *td = data;
+
+	*val = dfll_read_monitor_rate(td);
+
+	return 0;
+}
+
+static int attr_rate_set(void *data, u64 val)
+{
+	struct tegra_dfll *td = data;
+
+	return dfll_request_rate(td, val);
+}
+DEFINE_SIMPLE_ATTRIBUTE(rate_fops, attr_rate_get, attr_rate_set, "%llu\n");
+
+static int attr_registers_show(struct seq_file *s, void *data)
+{
+	u32 val, offs;
+	struct tegra_dfll *td = s->private;
+
+	seq_puts(s, "CONTROL REGISTERS:\n");
+	for (offs = 0; offs <= DFLL_MONITOR_DATA; offs += 4) {
+		if (offs == DFLL_OUTPUT_CFG)
+			val = dfll_i2c_readl(td, offs);
+		else
+			val = dfll_readl(td, offs);
+		seq_printf(s, "[0x%02x] = 0x%08x\n", offs, val);
+	}
+
+	seq_puts(s, "\nI2C and INTR REGISTERS:\n");
+	for (offs = DFLL_I2C_CFG; offs <= DFLL_I2C_STS; offs += 4)
+		seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
+			   dfll_i2c_readl(td, offs));
+	for (offs = DFLL_INTR_STS; offs <= DFLL_INTR_EN; offs += 4)
+		seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
+			   dfll_i2c_readl(td, offs));
+
+	seq_puts(s, "\nINTEGRATED I2C CONTROLLER REGISTERS:\n");
+	offs = DFLL_I2C_CLK_DIVISOR;
+	seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
+		   __raw_readl(td->i2c_controller_base + offs));
+
+	seq_puts(s, "\nLUT:\n");
+	for (offs = 0; offs <  4 * MAX_DFLL_VOLTAGES; offs += 4)
+		seq_printf(s, "[0x%02x] = 0x%08x\n", offs,
+			   __raw_readl(td->lut_base + offs));
+
+	return 0;
+}
+
+static int attr_registers_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, attr_registers_show, inode->i_private);
+}
+
+static const struct file_operations attr_registers_fops = {
+	.open		= attr_registers_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int dfll_debug_init(struct tegra_dfll *td)
+{
+	int ret;
+
+	if (!td || (td->mode == DFLL_UNINITIALIZED))
+		return 0;
+
+	td->debugfs_dir = debugfs_create_dir("tegra_dfll_fcpu", NULL);
+	if (!td->debugfs_dir)
+		return -ENOMEM;
+
+	ret = -ENOMEM;
+
+	if (!debugfs_create_file("enable", S_IRUGO | S_IWUSR,
+				 td->debugfs_dir, td, &enable_fops))
+		goto err_out;
+
+	if (!debugfs_create_file("lock", S_IRUGO,
+				 td->debugfs_dir, td, &lock_fops))
+		goto err_out;
+
+	if (!debugfs_create_file("rate", S_IRUGO,
+				 td->debugfs_dir, td, &rate_fops))
+		goto err_out;
+
+	if (!debugfs_create_file("registers", S_IRUGO,
+				 td->debugfs_dir, td, &attr_registers_fops))
+		goto err_out;
+
+	return 0;
+
+err_out:
+	debugfs_remove_recursive(td->debugfs_dir);
+	return ret;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * DFLL initialization
+ */
+
+/**
+ * dfll_set_default_params - program non-output related DFLL parameters
+ * @td: DFLL instance
+ *
+ * During DFLL driver initialization or resume from context loss,
+ * program parameters for the closed loop integrator, DVCO tuning,
+ * voltage droop control and monitor control.
+ */
+static void dfll_set_default_params(struct tegra_dfll *td)
+{
+	u32 val;
+
+	val = DIV_ROUND_UP(td->ref_rate, td->sample_rate * 32);
+	BUG_ON(val > DFLL_CONFIG_DIV_MASK);
+	dfll_writel(td, val, DFLL_CONFIG);
+
+	val = (td->force_mode << DFLL_PARAMS_FORCE_MODE_SHIFT) |
+		(td->cf << DFLL_PARAMS_CF_PARAM_SHIFT) |
+		(td->ci << DFLL_PARAMS_CI_PARAM_SHIFT) |
+		(td->cg << DFLL_PARAMS_CG_PARAM_SHIFT) |
+		(td->cg_scale ? DFLL_PARAMS_CG_SCALE : 0);
+	dfll_writel(td, val, DFLL_PARAMS);
+
+	dfll_tune_low(td);
+	dfll_writel(td, td->droop_ctrl, DFLL_DROOP_CTRL);
+	dfll_writel(td, DFLL_MONITOR_CTRL_FREQ, DFLL_MONITOR_CTRL);
+}
+
+/**
+ * dfll_init_clks - clk_get() the DFLL source clocks
+ * @td: DFLL instance
+ *
+ * Call clk_get() on the DFLL source clocks and save the pointers for later
+ * use. Returns 0 upon success or error (see devm_clk_get) if one or more
+ * of the clocks couldn't be looked up.
+ */
+static int dfll_init_clks(struct tegra_dfll *td)
+{
+	td->ref_clk = devm_clk_get(td->dev, "ref");
+	if (IS_ERR(td->ref_clk)) {
+		dev_err(td->dev, "missing ref clock\n");
+		return PTR_ERR(td->ref_clk);
+	}
+
+	td->soc_clk = devm_clk_get(td->dev, "soc");
+	if (IS_ERR(td->soc_clk)) {
+		dev_err(td->dev, "missing soc clock\n");
+		return PTR_ERR(td->soc_clk);
+	}
+
+	td->i2c_clk = devm_clk_get(td->dev, "i2c");
+	if (IS_ERR(td->i2c_clk)) {
+		dev_err(td->dev, "missing i2c clock\n");
+		return PTR_ERR(td->i2c_clk);
+	}
+	td->i2c_clk_rate = clk_get_rate(td->i2c_clk);
+
+	return 0;
+}
+
+/**
+ * dfll_init - Prepare the DFLL IP block for use
+ * @td: DFLL instance
+ *
+ * Do everything necessary to prepare the DFLL IP block for use. The
+ * DFLL will be left in DISABLED state. Called by dfll_probe().
+ * Returns 0 upon success, or passes along the error from whatever
+ * function returned it.
+ */
+static int dfll_init(struct tegra_dfll *td)
+{
+	int ret;
+
+	td->ref_rate = clk_get_rate(td->ref_clk);
+	if (td->ref_rate != REF_CLOCK_RATE) {
+		dev_err(td->dev, "unexpected ref clk rate %lu, expecting %lu",
+			td->ref_rate, REF_CLOCK_RATE);
+		return -EINVAL;
+	}
+
+	reset_control_deassert(td->dvco_rst);
+
+	ret = clk_prepare(td->ref_clk);
+	if (ret) {
+		dev_err(td->dev, "failed to prepare ref_clk\n");
+		return ret;
+	}
+
+	ret = clk_prepare(td->soc_clk);
+	if (ret) {
+		dev_err(td->dev, "failed to prepare soc_clk\n");
+		goto di_err1;
+	}
+
+	ret = clk_prepare(td->i2c_clk);
+	if (ret) {
+		dev_err(td->dev, "failed to prepare i2c_clk\n");
+		goto di_err2;
+	}
+
+	td->last_unrounded_rate = 0;
+
+	pm_runtime_enable(td->dev);
+	pm_runtime_get_sync(td->dev);
+
+	dfll_set_mode(td, DFLL_DISABLED);
+	dfll_set_default_params(td);
+
+	if (td->soc->init_clock_trimmers)
+		td->soc->init_clock_trimmers();
+
+	dfll_set_open_loop_config(td);
+
+	dfll_init_out_if(td);
+
+	pm_runtime_put_sync(td->dev);
+
+	return 0;
+
+di_err2:
+	clk_unprepare(td->soc_clk);
+di_err1:
+	clk_unprepare(td->ref_clk);
+
+	reset_control_assert(td->dvco_rst);
+
+	return ret;
+}
+
+/*
+ * DT data fetch
+ */
+
+/*
+ * Find a PMIC voltage register-to-voltage mapping for the given voltage.
+ * An exact voltage match is required.
+ */
+static int find_vdd_map_entry_exact(struct tegra_dfll *td, int uV)
+{
+	int i, n_voltages, reg_uV;
+
+	n_voltages = regulator_count_voltages(td->vdd_reg);
+	for (i = 0; i < n_voltages; i++) {
+		reg_uV = regulator_list_voltage(td->vdd_reg, i);
+		if (reg_uV < 0)
+			break;
+
+		if (uV == reg_uV)
+			return i;
+	}
+
+	dev_err(td->dev, "no voltage map entry for %d uV\n", uV);
+	return -EINVAL;
+}
+
+/*
+ * Find a PMIC voltage register-to-voltage mapping for the given voltage,
+ * rounding up to the closest supported voltage.
+ * */
+static int find_vdd_map_entry_min(struct tegra_dfll *td, int uV)
+{
+	int i, n_voltages, reg_uV;
+
+	n_voltages = regulator_count_voltages(td->vdd_reg);
+	for (i = 0; i < n_voltages; i++) {
+		reg_uV = regulator_list_voltage(td->vdd_reg, i);
+		if (reg_uV < 0)
+			break;
+
+		if (uV <= reg_uV)
+			return i;
+	}
+
+	dev_err(td->dev, "no voltage map entry rounding to %d uV\n", uV);
+	return -EINVAL;
+}
+
+/**
+ * dfll_build_i2c_lut - build the I2C voltage register lookup table
+ * @td: DFLL instance
+ *
+ * The DFLL hardware has 33 bytes of look-up table RAM that must be filled with
+ * PMIC voltage register values that span the entire DFLL operating range.
+ * This function builds the look-up table based on the OPP table provided by
+ * the soc-specific platform driver (td->soc->opp_dev) and the PMIC
+ * register-to-voltage mapping queried from the regulator framework.
+ *
+ * On success, fills in td->i2c_lut and returns 0, or -err on failure.
+ */
+static int dfll_build_i2c_lut(struct tegra_dfll *td)
+{
+	int ret = -EINVAL;
+	int j, v, v_max, v_opp;
+	int selector;
+	unsigned long rate;
+	struct dev_pm_opp *opp;
+	int lut;
+
+	rcu_read_lock();
+
+	rate = ULONG_MAX;
+	opp = dev_pm_opp_find_freq_floor(td->soc->dev, &rate);
+	if (IS_ERR(opp)) {
+		dev_err(td->dev, "couldn't get vmax opp, empty opp table?\n");
+		goto out;
+	}
+	v_max = dev_pm_opp_get_voltage(opp);
+
+	v = td->soc->min_millivolts * 1000;
+	lut = find_vdd_map_entry_exact(td, v);
+	if (lut < 0)
+		goto out;
+	td->i2c_lut[0] = lut;
+
+	for (j = 1, rate = 0; ; rate++) {
+		opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
+		if (IS_ERR(opp))
+			break;
+		v_opp = dev_pm_opp_get_voltage(opp);
+
+		if (v_opp <= td->soc->min_millivolts * 1000)
+			td->dvco_rate_min = dev_pm_opp_get_freq(opp);
+
+		for (;;) {
+			v += max(1, (v_max - v) / (MAX_DFLL_VOLTAGES - j));
+			if (v >= v_opp)
+				break;
+
+			selector = find_vdd_map_entry_min(td, v);
+			if (selector < 0)
+				goto out;
+			if (selector != td->i2c_lut[j - 1])
+				td->i2c_lut[j++] = selector;
+		}
+
+		v = (j == MAX_DFLL_VOLTAGES - 1) ? v_max : v_opp;
+		selector = find_vdd_map_entry_exact(td, v);
+		if (selector < 0)
+			goto out;
+		if (selector != td->i2c_lut[j - 1])
+			td->i2c_lut[j++] = selector;
+
+		if (v >= v_max)
+			break;
+	}
+	td->i2c_lut_size = j;
+
+	if (!td->dvco_rate_min)
+		dev_err(td->dev, "no opp above DFLL minimum voltage %d mV\n",
+			td->soc->min_millivolts);
+	else
+		ret = 0;
+
+out:
+	rcu_read_unlock();
+
+	return ret;
+}
+
+/**
+ * read_dt_param - helper function for reading required parameters from the DT
+ * @td: DFLL instance
+ * @param: DT property name
+ * @dest: output pointer for the value read
+ *
+ * Read a required numeric parameter from the DFLL device node, or complain
+ * if the property doesn't exist. Returns a boolean indicating success for
+ * easy chaining of multiple calls to this function.
+ */
+static bool read_dt_param(struct tegra_dfll *td, const char *param, u32 *dest)
+{
+	int err = of_property_read_u32(td->dev->of_node, param, dest);
+
+	if (err < 0) {
+		dev_err(td->dev, "failed to read DT parameter %s: %d\n",
+			param, err);
+		return false;
+	}
+
+	return true;
+}
+
+/**
+ * dfll_fetch_i2c_params - query PMIC I2C params from DT & regulator subsystem
+ * @td: DFLL instance
+ *
+ * Read all the parameters required for operation in I2C mode. The parameters
+ * can originate from the device tree or the regulator subsystem.
+ * Returns 0 on success or -err on failure.
+ */
+static int dfll_fetch_i2c_params(struct tegra_dfll *td)
+{
+	struct regmap *regmap;
+	struct device *i2c_dev;
+	struct i2c_client *i2c_client;
+	int vsel_reg, vsel_mask;
+	int ret;
+
+	if (!read_dt_param(td, "nvidia,i2c-fs-rate", &td->i2c_fs_rate))
+		return -EINVAL;
+
+	regmap = regulator_get_regmap(td->vdd_reg);
+	i2c_dev = regmap_get_device(regmap);
+	i2c_client = to_i2c_client(i2c_dev);
+
+	td->i2c_slave_addr = i2c_client->addr;
+
+	ret = regulator_get_hardware_vsel_register(td->vdd_reg,
+						   &vsel_reg,
+						   &vsel_mask);
+	if (ret < 0) {
+		dev_err(td->dev,
+			"regulator unsuitable for DFLL I2C operation\n");
+		return -EINVAL;
+	}
+	td->i2c_reg = vsel_reg;
+
+	ret = dfll_build_i2c_lut(td);
+	if (ret) {
+		dev_err(td->dev, "couldn't build I2C LUT\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * dfll_fetch_common_params - read DFLL parameters from the device tree
+ * @td: DFLL instance
+ *
+ * Read all the DT parameters that are common to both I2C and PWM operation.
+ * Returns 0 on success or -EINVAL on any failure.
+ */
+static int dfll_fetch_common_params(struct tegra_dfll *td)
+{
+	bool ok = true;
+
+	ok &= read_dt_param(td, "nvidia,droop-ctrl", &td->droop_ctrl);
+	ok &= read_dt_param(td, "nvidia,sample-rate", &td->sample_rate);
+	ok &= read_dt_param(td, "nvidia,force-mode", &td->force_mode);
+	ok &= read_dt_param(td, "nvidia,cf", &td->cf);
+	ok &= read_dt_param(td, "nvidia,ci", &td->ci);
+	ok &= read_dt_param(td, "nvidia,cg", &td->cg);
+	td->cg_scale = of_property_read_bool(td->dev->of_node,
+					     "nvidia,cg-scale");
+
+	if (of_property_read_string(td->dev->of_node, "clock-output-names",
+				    &td->output_clock_name)) {
+		dev_err(td->dev, "missing clock-output-names property\n");
+		ok = false;
+	}
+
+	return ok ? 0 : -EINVAL;
+}
+
+/*
+ * API exported to per-SoC platform drivers
+ */
+
+/**
+ * tegra_dfll_register - probe a Tegra DFLL device
+ * @pdev: DFLL platform_device *
+ * @soc: Per-SoC integration and characterization data for this DFLL instance
+ *
+ * Probe and initialize a DFLL device instance. Intended to be called
+ * by a SoC-specific shim driver that passes in per-SoC integration
+ * and configuration data via @soc. Returns 0 on success or -err on failure.
+ */
+int tegra_dfll_register(struct platform_device *pdev,
+			struct tegra_dfll_soc_data *soc)
+{
+	struct resource *mem;
+	struct tegra_dfll *td;
+	int ret;
+
+	if (!soc) {
+		dev_err(&pdev->dev, "no tegra_dfll_soc_data provided\n");
+		return -EINVAL;
+	}
+
+	td = devm_kzalloc(&pdev->dev, sizeof(*td), GFP_KERNEL);
+	if (!td)
+		return -ENOMEM;
+	td->dev = &pdev->dev;
+	platform_set_drvdata(pdev, td);
+
+	td->soc = soc;
+
+	td->vdd_reg = devm_regulator_get(td->dev, "vdd-cpu");
+	if (IS_ERR(td->vdd_reg)) {
+		dev_err(td->dev, "couldn't get vdd_cpu regulator\n");
+		return PTR_ERR(td->vdd_reg);
+	}
+
+	td->dvco_rst = devm_reset_control_get(td->dev, "dvco");
+	if (IS_ERR(td->dvco_rst)) {
+		dev_err(td->dev, "couldn't get dvco reset\n");
+		return PTR_ERR(td->dvco_rst);
+	}
+
+	ret = dfll_fetch_common_params(td);
+	if (ret) {
+		dev_err(td->dev, "couldn't parse device tree parameters\n");
+		return ret;
+	}
+
+	ret = dfll_fetch_i2c_params(td);
+	if (ret)
+		return ret;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem) {
+		dev_err(td->dev, "no control register resource\n");
+		return -ENODEV;
+	}
+
+	td->base = devm_ioremap(td->dev, mem->start, resource_size(mem));
+	if (!td->base) {
+		dev_err(td->dev, "couldn't ioremap DFLL control registers\n");
+		return -ENODEV;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!mem) {
+		dev_err(td->dev, "no i2c_base resource\n");
+		return -ENODEV;
+	}
+
+	td->i2c_base = devm_ioremap(td->dev, mem->start, resource_size(mem));
+	if (!td->i2c_base) {
+		dev_err(td->dev, "couldn't ioremap i2c_base resource\n");
+		return -ENODEV;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	if (!mem) {
+		dev_err(td->dev, "no i2c_controller_base resource\n");
+		return -ENODEV;
+	}
+
+	td->i2c_controller_base = devm_ioremap(td->dev, mem->start,
+					       resource_size(mem));
+	if (!td->i2c_controller_base) {
+		dev_err(td->dev,
+			"couldn't ioremap i2c_controller_base resource\n");
+		return -ENODEV;
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+	if (!mem) {
+		dev_err(td->dev, "no lut_base resource\n");
+		return -ENODEV;
+	}
+
+	td->lut_base = devm_ioremap(td->dev, mem->start, resource_size(mem));
+	if (!td->lut_base) {
+		dev_err(td->dev,
+			"couldn't ioremap lut_base resource\n");
+		return -ENODEV;
+	}
+
+	ret = dfll_init_clks(td);
+	if (ret) {
+		dev_err(&pdev->dev, "DFLL clock init error\n");
+		return ret;
+	}
+
+	/* Enable the clocks and set the device up */
+	ret = dfll_init(td);
+	if (ret)
+		return ret;
+
+	ret = dfll_register_clk(td);
+	if (ret) {
+		dev_err(&pdev->dev, "DFLL clk registration failed\n");
+		return ret;
+	}
+
+#ifdef CONFIG_DEBUG_FS
+	dfll_debug_init(td);
+#endif
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_dfll_register);
+
+/**
+ * tegra_dfll_unregister - release all of the DFLL driver resources for a device
+ * @pdev: DFLL platform_device *
+ *
+ * Unbind this driver from the DFLL hardware device represented by
+ * @pdev. The DFLL must be disabled for this to succeed. Returns 0
+ * upon success or -EBUSY if the DFLL is still active.
+ */
+int tegra_dfll_unregister(struct platform_device *pdev)
+{
+	struct tegra_dfll *td = platform_get_drvdata(pdev);
+
+	/* Try to prevent removal while the DFLL is active */
+	if (td->mode != DFLL_DISABLED) {
+		dev_err(&pdev->dev,
+			"must disable DFLL before removing driver\n");
+		return -EBUSY;
+	}
+
+	debugfs_remove_recursive(td->debugfs_dir);
+
+	dfll_unregister_clk(td);
+	pm_runtime_disable(&pdev->dev);
+
+	clk_unprepare(td->ref_clk);
+	clk_unprepare(td->soc_clk);
+	clk_unprepare(td->i2c_clk);
+
+	reset_control_assert(td->dvco_rst);
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_dfll_unregister);
diff --git a/drivers/clk/tegra/clk-dfll.h b/drivers/clk/tegra/clk-dfll.h
new file mode 100644
index 0000000..2e4c077
--- /dev/null
+++ b/drivers/clk/tegra/clk-dfll.h
@@ -0,0 +1,54 @@
+/*
+ * clk-dfll.h - prototypes and macros for the Tegra DFLL clocksource driver
+ * Copyright (C) 2013 NVIDIA Corporation.  All rights reserved.
+ *
+ * Aleksandr Frid <afrid@nvidia.com>
+ * Paul Walmsley <pwalmsley@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __DRIVERS_CLK_TEGRA_CLK_DFLL_H
+#define __DRIVERS_CLK_TEGRA_CLK_DFLL_H
+
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/types.h>
+
+/**
+ * struct tegra_dfll_soc_data - SoC-specific hooks/integration for the DFLL driver
+ * @opp_dev: struct device * that holds the OPP table for the DFLL
+ * @min_millivolts: minimum voltage (in mV) that the DFLL can operate
+ * @tune0_low: DFLL tuning register 0 (low voltage range)
+ * @tune0_high: DFLL tuning register 0 (high voltage range)
+ * @tune1: DFLL tuning register 1
+ * @assert_dvco_reset: fn ptr to place the DVCO in reset
+ * @deassert_dvco_reset: fn ptr to release the DVCO reset
+ * @set_clock_trimmers_high: fn ptr to tune clock trimmers for high voltage
+ * @set_clock_trimmers_low: fn ptr to tune clock trimmers for low voltage
+ */
+struct tegra_dfll_soc_data {
+	struct device *dev;
+	unsigned int min_millivolts;
+	u32 tune0_low;
+	u32 tune0_high;
+	u32 tune1;
+	void (*init_clock_trimmers)(void);
+	void (*set_clock_trimmers_high)(void);
+	void (*set_clock_trimmers_low)(void);
+};
+
+int tegra_dfll_register(struct platform_device *pdev,
+			struct tegra_dfll_soc_data *soc);
+int tegra_dfll_unregister(struct platform_device *pdev);
+int tegra_dfll_runtime_suspend(struct device *dev);
+int tegra_dfll_runtime_resume(struct device *dev);
+
+#endif /* __DRIVERS_CLK_TEGRA_CLK_DFLL_H */
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 59a5714..48c83ef 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -19,7 +19,6 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/clk-provider.h>
-#include <linux/clk.h>
 
 #include "clk.h"
 
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index 7649685..138a94b 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -103,7 +103,7 @@
 	 * CCF wrongly assumes that the parent won't change during set_rate,
 	 * so get the parent rate explicitly.
 	 */
-	parent_rate = __clk_get_rate(__clk_get_parent(hw->clk));
+	parent_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
 
 	val = readl(tegra->clk_regs + CLK_SOURCE_EMC);
 	div = val & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK;
@@ -116,11 +116,7 @@
  * safer since things have EMC rate floors. Also don't touch parent_rate
  * since we don't want the CCF to play with our parent clocks.
  */
-static long emc_determine_rate(struct clk_hw *hw, unsigned long rate,
-			       unsigned long min_rate,
-			       unsigned long max_rate,
-			       unsigned long *best_parent_rate,
-			       struct clk_hw **best_parent_hw)
+static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
 {
 	struct tegra_clk_emc *tegra;
 	u8 ram_code = tegra_read_ram_code();
@@ -135,22 +131,28 @@
 
 		timing = tegra->timings + i;
 
-		if (timing->rate > max_rate) {
+		if (timing->rate > req->max_rate) {
 			i = min(i, 1);
-			return tegra->timings[i - 1].rate;
+			req->rate = tegra->timings[i - 1].rate;
+			return 0;
 		}
 
-		if (timing->rate < min_rate)
+		if (timing->rate < req->min_rate)
 			continue;
 
-		if (timing->rate >= rate)
-			return timing->rate;
+		if (timing->rate >= req->rate) {
+			req->rate = timing->rate;
+			return 0;
+		}
 	}
 
-	if (timing)
-		return timing->rate;
+	if (timing) {
+		req->rate = timing->rate;
+		return 0;
+	}
 
-	return __clk_get_rate(hw->clk);
+	req->rate = clk_hw_get_rate(hw);
+	return 0;
 }
 
 static u8 emc_get_parent(struct clk_hw *hw)
@@ -312,7 +314,7 @@
 
 	tegra = container_of(hw, struct tegra_clk_emc, hw);
 
-	if (__clk_get_rate(hw->clk) == rate)
+	if (clk_hw_get_rate(hw) == rate)
 		return 0;
 
 	/*
@@ -525,8 +527,8 @@
 	if (IS_ERR(clk))
 		return clk;
 
-	tegra->prev_parent = clk_get_parent_by_index(
-		tegra->hw.clk, emc_get_parent(&tegra->hw));
+	tegra->prev_parent = clk_hw_get_parent_by_index(
+		&tegra->hw, emc_get_parent(&tegra->hw))->clk;
 	tegra->changing_timing = false;
 
 	/* Allow debugging tools to see the EMC clock */
diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c
index 0aa8830..d28d6e9 100644
--- a/drivers/clk/tegra/clk-periph-gate.c
+++ b/drivers/clk/tegra/clk-periph-gate.c
@@ -14,7 +14,6 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/slab.h>
 #include <linux/io.h>
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index d84ae49..ec5b611 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -14,7 +14,6 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/export.h>
 #include <linux/slab.h>
diff --git a/drivers/clk/tegra/clk-pll-out.c b/drivers/clk/tegra/clk-pll-out.c
index 3598987..257cae0 100644
--- a/drivers/clk/tegra/clk-pll-out.c
+++ b/drivers/clk/tegra/clk-pll-out.c
@@ -20,7 +20,6 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/clk-provider.h>
-#include <linux/clk.h>
 
 #include "clk.h"
 
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 05c6d08..d6d4ecb 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -18,8 +18,8 @@
 #include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/err.h>
-#include <linux/clk-provider.h>
 #include <linux/clk.h>
+#include <linux/clk-provider.h>
 
 #include "clk.h"
 
@@ -264,7 +264,7 @@
 	}
 
 	pr_err("%s: Timed out waiting for pll %s lock\n", __func__,
-	       __clk_get_name(pll->hw.clk));
+	       clk_hw_get_name(&pll->hw));
 
 	return -1;
 }
@@ -595,7 +595,7 @@
 	if (pll->params->flags & TEGRA_PLL_FIXED) {
 		if (rate != pll->params->fixed_rate) {
 			pr_err("%s: Can not change %s fixed rate %lu to %lu\n",
-				__func__, __clk_get_name(hw->clk),
+				__func__, clk_hw_get_name(hw),
 				pll->params->fixed_rate, rate);
 			return -EINVAL;
 		}
@@ -605,7 +605,7 @@
 	if (_get_table_rate(hw, &cfg, rate, parent_rate) &&
 	    _calc_rate(hw, &cfg, rate, parent_rate)) {
 		pr_err("%s: Failed to set %s rate %lu\n", __func__,
-		       __clk_get_name(hw->clk), rate);
+		       clk_hw_get_name(hw), rate);
 		WARN_ON(1);
 		return -EINVAL;
 	}
@@ -634,7 +634,7 @@
 
 	/* PLLM is used for memory; we do not change rate */
 	if (pll->params->flags & TEGRA_PLLM)
-		return __clk_get_rate(hw->clk);
+		return clk_hw_get_rate(hw);
 
 	if (_get_table_rate(hw, &cfg, rate, *prate) &&
 	    _calc_rate(hw, &cfg, rate, *prate))
@@ -663,7 +663,7 @@
 		if (_get_table_rate(hw, &sel, pll->params->fixed_rate,
 					parent_rate)) {
 			pr_err("Clock %s has unknown fixed frequency\n",
-			       __clk_get_name(hw->clk));
+			       clk_hw_get_name(hw));
 			BUG();
 		}
 		return pll->params->fixed_rate;
@@ -1577,7 +1577,7 @@
 	if (!pll_params->pdiv_tohw)
 		return ERR_PTR(-EINVAL);
 
-	parent_rate = __clk_get_rate(parent);
+	parent_rate = clk_get_rate(parent);
 
 	pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
 
@@ -1674,7 +1674,7 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	parent_rate = __clk_get_rate(parent);
+	parent_rate = clk_get_rate(parent);
 
 	pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
 
@@ -1715,7 +1715,7 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	parent_rate = __clk_get_rate(parent);
+	parent_rate = clk_get_rate(parent);
 
 	pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
 
@@ -1848,7 +1848,7 @@
 	val &= ~PLLSS_REF_SRC_SEL_MASK;
 	pll_writel_base(val, pll);
 
-	parent_rate = __clk_get_rate(parent);
+	parent_rate = clk_get_rate(parent);
 
 	pll_params->vco_min = _clip_vco_min(pll_params->vco_min, parent_rate);
 
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
index 2fd924d..131d1b50 100644
--- a/drivers/clk/tegra/clk-super.c
+++ b/drivers/clk/tegra/clk-super.c
@@ -20,7 +20,6 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/clk-provider.h>
-#include <linux/clk.h>
 
 #include "clk.h"
 
diff --git a/drivers/clk/tegra/clk-tegra-audio.c b/drivers/clk/tegra/clk-tegra-audio.c
index 5c38aab..11e3ad7 100644
--- a/drivers/clk/tegra/clk-tegra-audio.c
+++ b/drivers/clk/tegra/clk-tegra-audio.c
@@ -15,7 +15,6 @@
  */
 
 #include <linux/io.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c
index 605676d..da0b594 100644
--- a/drivers/clk/tegra/clk-tegra-fixed.c
+++ b/drivers/clk/tegra/clk-tegra-fixed.c
@@ -15,7 +15,6 @@
  */
 
 #include <linux/io.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 46af924..cb6ab83 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -15,7 +15,6 @@
  */
 
 #include <linux/io.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/of.h>
diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c
index 08b21c1..91377ab 100644
--- a/drivers/clk/tegra/clk-tegra-pmc.c
+++ b/drivers/clk/tegra/clk-tegra-pmc.c
@@ -15,7 +15,6 @@
  */
 
 #include <linux/io.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/of.h>
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index feb3201..5b1d723 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -15,7 +15,6 @@
  */
 
 #include <linux/io.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -44,7 +43,9 @@
 
 static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
 					"pll_p", "pll_p_out4", "unused",
-					"unused", "pll_x" };
+					"unused", "pll_x", "unused", "unused",
+					"unused", "unused", "unused", "unused",
+					"dfllCPU_out" };
 
 static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
 					 "pll_p", "pll_p_out4", "unused",
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 8237d16..db58715 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -15,9 +15,7 @@
  */
 
 #include <linux/io.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/delay.h>
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
new file mode 100644
index 0000000..6125333
--- /dev/null
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -0,0 +1,166 @@
+/*
+ * Tegra124 DFLL FCPU clock source driver
+ *
+ * Copyright (C) 2012-2014 NVIDIA Corporation.  All rights reserved.
+ *
+ * Aleksandr Frid <afrid@nvidia.com>
+ * Paul Walmsley <pwalmsley@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <soc/tegra/fuse.h>
+
+#include "clk.h"
+#include "clk-dfll.h"
+#include "cvb.h"
+
+/* Maximum CPU frequency, indexed by CPU speedo id */
+static const unsigned long cpu_max_freq_table[] = {
+	[0] = 2014500000UL,
+	[1] = 2320500000UL,
+	[2] = 2116500000UL,
+	[3] = 2524500000UL,
+};
+
+static const struct cvb_table tegra124_cpu_cvb_tables[] = {
+	{
+		.speedo_id = -1,
+		.process_id = -1,
+		.min_millivolts = 900,
+		.max_millivolts = 1260,
+		.alignment = {
+			.step_uv = 10000, /* 10mV */
+		},
+		.speedo_scale = 100,
+		.voltage_scale = 1000,
+		.cvb_table = {
+			{204000000UL,   {1112619, -29295, 402} },
+			{306000000UL,	{1150460, -30585, 402} },
+			{408000000UL,	{1190122, -31865, 402} },
+			{510000000UL,	{1231606, -33155, 402} },
+			{612000000UL,	{1274912, -34435, 402} },
+			{714000000UL,	{1320040, -35725, 402} },
+			{816000000UL,	{1366990, -37005, 402} },
+			{918000000UL,	{1415762, -38295, 402} },
+			{1020000000UL,	{1466355, -39575, 402} },
+			{1122000000UL,	{1518771, -40865, 402} },
+			{1224000000UL,	{1573009, -42145, 402} },
+			{1326000000UL,	{1629068, -43435, 402} },
+			{1428000000UL,	{1686950, -44715, 402} },
+			{1530000000UL,	{1746653, -46005, 402} },
+			{1632000000UL,	{1808179, -47285, 402} },
+			{1734000000UL,	{1871526, -48575, 402} },
+			{1836000000UL,	{1936696, -49855, 402} },
+			{1938000000UL,	{2003687, -51145, 402} },
+			{2014500000UL,	{2054787, -52095, 402} },
+			{2116500000UL,	{2124957, -53385, 402} },
+			{2218500000UL,	{2196950, -54665, 402} },
+			{2320500000UL,	{2270765, -55955, 402} },
+			{2422500000UL,	{2346401, -57235, 402} },
+			{2524500000UL,	{2437299, -58535, 402} },
+			{0,		{      0,      0,   0} },
+		},
+		.cpu_dfll_data = {
+			.tune0_low = 0x005020ff,
+			.tune0_high = 0x005040ff,
+			.tune1 = 0x00000060,
+		}
+	},
+};
+
+static int tegra124_dfll_fcpu_probe(struct platform_device *pdev)
+{
+	int process_id, speedo_id, speedo_value;
+	struct tegra_dfll_soc_data *soc;
+	const struct cvb_table *cvb;
+
+	process_id = tegra_sku_info.cpu_process_id;
+	speedo_id = tegra_sku_info.cpu_speedo_id;
+	speedo_value = tegra_sku_info.cpu_speedo_value;
+
+	if (speedo_id >= ARRAY_SIZE(cpu_max_freq_table)) {
+		dev_err(&pdev->dev, "unknown max CPU freq for speedo_id=%d\n",
+			speedo_id);
+		return -ENODEV;
+	}
+
+	soc = devm_kzalloc(&pdev->dev, sizeof(*soc), GFP_KERNEL);
+	if (!soc)
+		return -ENOMEM;
+
+	soc->dev = get_cpu_device(0);
+	if (!soc->dev) {
+		dev_err(&pdev->dev, "no CPU0 device\n");
+		return -ENODEV;
+	}
+
+	cvb = tegra_cvb_build_opp_table(tegra124_cpu_cvb_tables,
+					ARRAY_SIZE(tegra124_cpu_cvb_tables),
+					process_id, speedo_id, speedo_value,
+					cpu_max_freq_table[speedo_id],
+					soc->dev);
+	if (IS_ERR(cvb)) {
+		dev_err(&pdev->dev, "couldn't build OPP table: %ld\n",
+			PTR_ERR(cvb));
+		return PTR_ERR(cvb);
+	}
+
+	soc->min_millivolts = cvb->min_millivolts;
+	soc->tune0_low = cvb->cpu_dfll_data.tune0_low;
+	soc->tune0_high = cvb->cpu_dfll_data.tune0_high;
+	soc->tune1 = cvb->cpu_dfll_data.tune1;
+
+	return tegra_dfll_register(pdev, soc);
+}
+
+static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
+	{ .compatible = "nvidia,tegra124-dfll", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, tegra124_dfll_fcpu_of_match);
+
+static const struct dev_pm_ops tegra124_dfll_pm_ops = {
+	SET_RUNTIME_PM_OPS(tegra_dfll_runtime_suspend,
+			   tegra_dfll_runtime_resume, NULL)
+};
+
+static struct platform_driver tegra124_dfll_fcpu_driver = {
+	.probe = tegra124_dfll_fcpu_probe,
+	.remove = tegra_dfll_unregister,
+	.driver = {
+		.name = "tegra124-dfll",
+		.of_match_table = tegra124_dfll_fcpu_of_match,
+		.pm = &tegra124_dfll_pm_ops,
+	},
+};
+
+static int __init tegra124_dfll_fcpu_init(void)
+{
+	return platform_driver_register(&tegra124_dfll_fcpu_driver);
+}
+module_init(tegra124_dfll_fcpu_init);
+
+static void __exit tegra124_dfll_fcpu_exit(void)
+{
+	platform_driver_unregister(&tegra124_dfll_fcpu_driver);
+}
+module_exit(tegra124_dfll_fcpu_exit);
+
+MODULE_DESCRIPTION("Tegra124 DFLL clock source driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Aleksandr Frid <afrid@nvidia.com>");
+MODULE_AUTHOR("Paul Walmsley <pwalmsley@nvidia.com>");
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index e8cca3e..824d758 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -15,7 +15,6 @@
  */
 
 #include <linux/io.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/of.h>
@@ -24,6 +23,7 @@
 #include <linux/export.h>
 #include <linux/clk/tegra.h>
 #include <dt-bindings/clock/tegra124-car.h>
+#include <dt-bindings/reset/tegra124-car.h>
 
 #include "clk.h"
 #include "clk-id.h"
@@ -39,6 +39,9 @@
 #define CLK_SOURCE_CSITE 0x1d4
 #define CLK_SOURCE_EMC 0x19c
 
+#define RST_DFLL_DVCO			0x2f4
+#define DVFS_DFLL_RESET_SHIFT		0
+
 #define PLLC_BASE 0x80
 #define PLLC_OUT 0x84
 #define PLLC_MISC2 0x88
@@ -94,6 +97,8 @@
 #define PMC_PLLM_WB0_OVERRIDE 0x1dc
 #define PMC_PLLM_WB0_OVERRIDE_2 0x2b0
 
+#define CCLKG_BURST_POLICY 0x368
+
 #define UTMIP_PLL_CFG2 0x488
 #define UTMIP_PLL_CFG2_STABLE_COUNT(x) (((x) & 0xffff) << 6)
 #define UTMIP_PLL_CFG2_ACTIVE_DLY_COUNT(x) (((x) & 0x3f) << 18)
@@ -126,6 +131,8 @@
 #ifdef CONFIG_PM_SLEEP
 static struct cpu_clk_suspend_context {
 	u32 clk_csite_src;
+	u32 cclkg_burst;
+	u32 cclkg_divider;
 } tegra124_cpu_clk_sctx;
 #endif
 
@@ -1319,12 +1326,22 @@
 	tegra124_cpu_clk_sctx.clk_csite_src =
 				readl(clk_base + CLK_SOURCE_CSITE);
 	writel(3 << 30, clk_base + CLK_SOURCE_CSITE);
+
+	tegra124_cpu_clk_sctx.cclkg_burst =
+				readl(clk_base + CCLKG_BURST_POLICY);
+	tegra124_cpu_clk_sctx.cclkg_divider =
+				readl(clk_base + CCLKG_BURST_POLICY + 4);
 }
 
 static void tegra124_cpu_clock_resume(void)
 {
 	writel(tegra124_cpu_clk_sctx.clk_csite_src,
 				clk_base + CLK_SOURCE_CSITE);
+
+	writel(tegra124_cpu_clk_sctx.cclkg_burst,
+					clk_base + CCLKG_BURST_POLICY);
+	writel(tegra124_cpu_clk_sctx.cclkg_divider,
+					clk_base + CCLKG_BURST_POLICY + 4);
 }
 #endif
 
@@ -1415,6 +1432,68 @@
 }
 
 /**
+ * tegra124_car_barrier - wait for pending writes to the CAR to complete
+ *
+ * Wait for any outstanding writes to the CAR MMIO space from this CPU
+ * to complete before continuing execution.  No return value.
+ */
+static void tegra124_car_barrier(void)
+{
+	readl_relaxed(clk_base + RST_DFLL_DVCO);
+}
+
+/**
+ * tegra124_clock_assert_dfll_dvco_reset - assert the DFLL's DVCO reset
+ *
+ * Assert the reset line of the DFLL's DVCO.  No return value.
+ */
+static void tegra124_clock_assert_dfll_dvco_reset(void)
+{
+	u32 v;
+
+	v = readl_relaxed(clk_base + RST_DFLL_DVCO);
+	v |= (1 << DVFS_DFLL_RESET_SHIFT);
+	writel_relaxed(v, clk_base + RST_DFLL_DVCO);
+	tegra124_car_barrier();
+}
+
+/**
+ * tegra124_clock_deassert_dfll_dvco_reset - deassert the DFLL's DVCO reset
+ *
+ * Deassert the reset line of the DFLL's DVCO, allowing the DVCO to
+ * operate.  No return value.
+ */
+static void tegra124_clock_deassert_dfll_dvco_reset(void)
+{
+	u32 v;
+
+	v = readl_relaxed(clk_base + RST_DFLL_DVCO);
+	v &= ~(1 << DVFS_DFLL_RESET_SHIFT);
+	writel_relaxed(v, clk_base + RST_DFLL_DVCO);
+	tegra124_car_barrier();
+}
+
+static int tegra124_reset_assert(unsigned long id)
+{
+	if (id == TEGRA124_RST_DFLL_DVCO)
+		tegra124_clock_assert_dfll_dvco_reset();
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+static int tegra124_reset_deassert(unsigned long id)
+{
+	if (id == TEGRA124_RST_DFLL_DVCO)
+		tegra124_clock_deassert_dfll_dvco_reset();
+	else
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
  * tegra132_clock_apply_init_table - initialize clocks on Tegra132 SoCs
  *
  * Program an initial clock rate and enable or disable clocks needed
@@ -1499,6 +1578,8 @@
 {
 	tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks,
 				  &pll_x_params);
+	tegra_init_special_resets(1, tegra124_reset_assert,
+				  tegra124_reset_deassert);
 	tegra_add_of_provider(np);
 
 	clks[TEGRA124_CLK_EMC] = tegra_clk_register_emc(clk_base, np,
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 41272dc..bf004f0 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -15,7 +15,6 @@
  */
 
 #include <linux/io.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/of.h>
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 0af3e83..fad561a 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -16,7 +16,6 @@
 
 #include <linux/io.h>
 #include <linux/delay.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/of.h>
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 41cd87c..2a3a4fe 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -14,6 +14,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/clkdev.h>
 #include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
@@ -49,7 +50,6 @@
 #define RST_DEVICES_L			0x004
 #define RST_DEVICES_H			0x008
 #define RST_DEVICES_U			0x00C
-#define RST_DFLL_DVCO			0x2F4
 #define RST_DEVICES_V			0x358
 #define RST_DEVICES_W			0x35C
 #define RST_DEVICES_X			0x28C
@@ -79,6 +79,11 @@
 static int clk_num;
 static struct clk_onecell_data clk_data;
 
+/* Handlers for SoC-specific reset lines */
+static int (*special_reset_assert)(unsigned long);
+static int (*special_reset_deassert)(unsigned long);
+static unsigned int num_special_reset;
+
 static struct tegra_clk_periph_regs periph_regs[] = {
 	[0] = {
 		.enb_reg = CLK_OUT_ENB_L,
@@ -152,19 +157,29 @@
 	 */
 	tegra_read_chipid();
 
-	writel_relaxed(BIT(id % 32),
-			clk_base + periph_regs[id / 32].rst_set_reg);
+	if (id < periph_banks * 32) {
+		writel_relaxed(BIT(id % 32),
+			       clk_base + periph_regs[id / 32].rst_set_reg);
+		return 0;
+	} else if (id < periph_banks * 32 + num_special_reset) {
+		return special_reset_assert(id);
+	}
 
-	return 0;
+	return -EINVAL;
 }
 
 static int tegra_clk_rst_deassert(struct reset_controller_dev *rcdev,
 		unsigned long id)
 {
-	writel_relaxed(BIT(id % 32),
-			clk_base + periph_regs[id / 32].rst_clr_reg);
+	if (id < periph_banks * 32) {
+		writel_relaxed(BIT(id % 32),
+			       clk_base + periph_regs[id / 32].rst_clr_reg);
+		return 0;
+	} else if (id < periph_banks * 32 + num_special_reset) {
+		return special_reset_deassert(id);
+	}
 
-	return 0;
+	return -EINVAL;
 }
 
 struct tegra_clk_periph_regs *get_reg_bank(int clkid)
@@ -286,10 +301,19 @@
 	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
 
 	rst_ctlr.of_node = np;
-	rst_ctlr.nr_resets = periph_banks * 32;
+	rst_ctlr.nr_resets = periph_banks * 32 + num_special_reset;
 	reset_controller_register(&rst_ctlr);
 }
 
+void __init tegra_init_special_resets(unsigned int num,
+				      int (*assert)(unsigned long),
+				      int (*deassert)(unsigned long))
+{
+	num_special_reset = num;
+	special_reset_assert = assert;
+	special_reset_deassert = deassert;
+}
+
 void __init tegra_register_devclks(struct tegra_devclk *dev_clks, int num)
 {
 	int i;
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 75ddc8ff..0621887 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -591,6 +591,9 @@
 	char		*con_id;
 };
 
+void tegra_init_special_resets(unsigned int num, int (*assert)(unsigned long),
+			       int (*deassert)(unsigned long));
+
 void tegra_init_from_table(struct tegra_clk_init_table *tbl,
 		struct clk *clks[], int clk_max);
 
diff --git a/drivers/clk/tegra/cvb.c b/drivers/clk/tegra/cvb.c
new file mode 100644
index 0000000..0204e08
--- /dev/null
+++ b/drivers/clk/tegra/cvb.c
@@ -0,0 +1,140 @@
+/*
+ * Utility functions for parsing Tegra CVB voltage tables
+ *
+ * Copyright (C) 2012-2014 NVIDIA Corporation.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/pm_opp.h>
+
+#include "cvb.h"
+
+/* cvb_mv = ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0) */
+static inline int get_cvb_voltage(int speedo, int s_scale,
+				  const struct cvb_coefficients *cvb)
+{
+	int mv;
+
+	/* apply only speedo scale: output mv = cvb_mv * v_scale */
+	mv = DIV_ROUND_CLOSEST(cvb->c2 * speedo, s_scale);
+	mv = DIV_ROUND_CLOSEST((mv + cvb->c1) * speedo, s_scale) + cvb->c0;
+	return mv;
+}
+
+static int round_cvb_voltage(int mv, int v_scale,
+			     const struct rail_alignment *align)
+{
+	/* combined: apply voltage scale and round to cvb alignment step */
+	int uv;
+	int step = (align->step_uv ? : 1000) * v_scale;
+	int offset = align->offset_uv * v_scale;
+
+	uv = max(mv * 1000, offset) - offset;
+	uv = DIV_ROUND_UP(uv, step) * align->step_uv + align->offset_uv;
+	return uv / 1000;
+}
+
+enum {
+	DOWN,
+	UP
+};
+
+static int round_voltage(int mv, const struct rail_alignment *align, int up)
+{
+	if (align->step_uv) {
+		int uv;
+
+		uv = max(mv * 1000, align->offset_uv) - align->offset_uv;
+		uv = (uv + (up ? align->step_uv - 1 : 0)) / align->step_uv;
+		return (uv * align->step_uv + align->offset_uv) / 1000;
+	}
+	return mv;
+}
+
+static int build_opp_table(const struct cvb_table *d,
+			   int speedo_value,
+			   unsigned long max_freq,
+			   struct device *opp_dev)
+{
+	int i, ret, dfll_mv, min_mv, max_mv;
+	const struct cvb_table_freq_entry *table = NULL;
+	const struct rail_alignment *align = &d->alignment;
+
+	min_mv = round_voltage(d->min_millivolts, align, UP);
+	max_mv = round_voltage(d->max_millivolts, align, DOWN);
+
+	for (i = 0; i < MAX_DVFS_FREQS; i++) {
+		table = &d->cvb_table[i];
+		if (!table->freq || (table->freq > max_freq))
+			break;
+
+		/*
+		 * FIXME after clk_round_rate/clk_determine_rate prototypes
+		 * have been updated
+		 */
+		if (table->freq & (1<<31))
+			continue;
+
+		dfll_mv = get_cvb_voltage(
+			speedo_value, d->speedo_scale, &table->coefficients);
+		dfll_mv = round_cvb_voltage(dfll_mv, d->voltage_scale, align);
+		dfll_mv = clamp(dfll_mv, min_mv, max_mv);
+
+		ret = dev_pm_opp_add(opp_dev, table->freq, dfll_mv * 1000);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * tegra_cvb_build_opp_table - build OPP table from Tegra CVB tables
+ * @cvb_tables: array of CVB tables
+ * @sz: size of the previously mentioned array
+ * @process_id: process id of the HW module
+ * @speedo_id: speedo id of the HW module
+ * @speedo_value: speedo value of the HW module
+ * @max_rate: highest safe clock rate
+ * @opp_dev: the struct device * for which the OPP table is built
+ *
+ * On Tegra, a CVB table encodes the relationship between operating voltage
+ * and safe maximal frequency for a given module (e.g. GPU or CPU). This
+ * function calculates the optimal voltage-frequency operating points
+ * for the given arguments and exports them via the OPP library for the
+ * given @opp_dev. Returns a pointer to the struct cvb_table that matched
+ * or an ERR_PTR on failure.
+ */
+const struct cvb_table *tegra_cvb_build_opp_table(
+		const struct cvb_table *cvb_tables,
+		size_t sz, int process_id,
+		int speedo_id, int speedo_value,
+		unsigned long max_rate,
+		struct device *opp_dev)
+{
+	int i, ret;
+
+	for (i = 0; i < sz; i++) {
+		const struct cvb_table *d = &cvb_tables[i];
+
+		if (d->speedo_id != -1 && d->speedo_id != speedo_id)
+			continue;
+		if (d->process_id != -1 && d->process_id != process_id)
+			continue;
+
+		ret = build_opp_table(d, speedo_value, max_rate, opp_dev);
+		return ret ? ERR_PTR(ret) : d;
+	}
+
+	return ERR_PTR(-EINVAL);
+}
diff --git a/drivers/clk/tegra/cvb.h b/drivers/clk/tegra/cvb.h
new file mode 100644
index 0000000..f62cdc4
--- /dev/null
+++ b/drivers/clk/tegra/cvb.h
@@ -0,0 +1,67 @@
+/*
+ * Utility functions for parsing Tegra CVB voltage tables
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __DRIVERS_CLK_TEGRA_CVB_H
+#define __DRIVERS_CLK_TEGRA_CVB_H
+
+#include <linux/types.h>
+
+struct device;
+
+#define MAX_DVFS_FREQS	40
+
+struct rail_alignment {
+	int offset_uv;
+	int step_uv;
+};
+
+struct cvb_coefficients {
+	int c0;
+	int c1;
+	int c2;
+};
+
+struct cvb_table_freq_entry {
+	unsigned long freq;
+	struct cvb_coefficients coefficients;
+};
+
+struct cvb_cpu_dfll_data {
+	u32 tune0_low;
+	u32 tune0_high;
+	u32 tune1;
+};
+
+struct cvb_table {
+	int speedo_id;
+	int process_id;
+
+	int min_millivolts;
+	int max_millivolts;
+	struct rail_alignment alignment;
+
+	int speedo_scale;
+	int voltage_scale;
+	struct cvb_table_freq_entry cvb_table[MAX_DVFS_FREQS];
+	struct cvb_cpu_dfll_data cpu_dfll_data;
+};
+
+const struct cvb_table *tegra_cvb_build_opp_table(
+		const struct cvb_table *cvb_tables,
+		size_t sz, int process_id,
+		int speedo_id, int speedo_value,
+		unsigned long max_rate,
+		struct device *opp_dev);
+
+#endif
diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile
index 105ffd0..d4ac960 100644
--- a/drivers/clk/ti/Makefile
+++ b/drivers/clk/ti/Makefile
@@ -1,16 +1,19 @@
 obj-y					+= clk.o autoidle.o clockdomain.o
 clk-common				= dpll.o composite.o divider.o gate.o \
-					  fixed-factor.o mux.o apll.o
-obj-$(CONFIG_SOC_AM33XX)		+= $(clk-common) clk-33xx.o
-obj-$(CONFIG_SOC_TI81XX)		+= $(clk-common) fapll.o clk-816x.o
+					  fixed-factor.o mux.o apll.o \
+					  clkt_dpll.o clkt_iclk.o clkt_dflt.o
+obj-$(CONFIG_SOC_AM33XX)		+= $(clk-common) clk-33xx.o dpll3xxx.o
+obj-$(CONFIG_SOC_TI81XX)		+= $(clk-common) fapll.o clk-814x.o clk-816x.o
 obj-$(CONFIG_ARCH_OMAP2)		+= $(clk-common) interface.o clk-2xxx.o
 obj-$(CONFIG_ARCH_OMAP3)		+= $(clk-common) interface.o \
-					   clk-3xxx.o
-obj-$(CONFIG_ARCH_OMAP4)		+= $(clk-common) clk-44xx.o
-obj-$(CONFIG_SOC_OMAP5)			+= $(clk-common) clk-54xx.o
+					   clk-3xxx.o dpll3xxx.o
+obj-$(CONFIG_ARCH_OMAP4)		+= $(clk-common) clk-44xx.o \
+					   dpll3xxx.o dpll44xx.o
+obj-$(CONFIG_SOC_OMAP5)			+= $(clk-common) clk-54xx.o \
+					   dpll3xxx.o dpll44xx.o
 obj-$(CONFIG_SOC_DRA7XX)		+= $(clk-common) clk-7xx.o \
-					   clk-dra7-atl.o
-obj-$(CONFIG_SOC_AM43XX)		+= $(clk-common) clk-43xx.o
+					   clk-dra7-atl.o dpll3xxx.o dpll44xx.o
+obj-$(CONFIG_SOC_AM43XX)		+= $(clk-common) dpll3xxx.o clk-43xx.o
 
 ifdef CONFIG_ATAGS
 obj-$(CONFIG_ARCH_OMAP3)                += clk-3xxx-legacy.o
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 49baf38..f3eab6e 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -15,6 +15,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -27,6 +28,8 @@
 #include <linux/clk/ti.h>
 #include <linux/delay.h>
 
+#include "clock.h"
+
 #define APLL_FORCE_LOCK 0x1
 #define APLL_AUTO_IDLE	0x2
 #define MAX_APLL_WAIT_TRIES		1000000
@@ -47,7 +50,7 @@
 	if (!ad)
 		return -EINVAL;
 
-	clk_name = __clk_get_name(clk->hw.clk);
+	clk_name = clk_hw_get_name(&clk->hw);
 
 	state <<= __ffs(ad->idlest_mask);
 
@@ -170,7 +173,6 @@
 	struct clk_hw_omap *clk_hw = NULL;
 	struct clk_init_data *init = NULL;
 	const char **parent_names = NULL;
-	int i;
 
 	ad = kzalloc(sizeof(*ad), GFP_KERNEL);
 	clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
@@ -195,8 +197,7 @@
 	if (!parent_names)
 		goto cleanup;
 
-	for (i = 0; i < init->num_parents; i++)
-		parent_names[i] = of_clk_get_parent_name(node, i);
+	of_clk_parent_fill(node, parent_names, init->num_parents);
 
 	init->parent_names = parent_names;
 
@@ -272,7 +273,7 @@
 
 	if (i == MAX_APLL_WAIT_TRIES) {
 		pr_warn("%s failed to transition to locked\n",
-			__clk_get_name(clk->hw.clk));
+			clk_hw_get_name(&clk->hw));
 		return -EBUSY;
 	}
 
diff --git a/drivers/clk/ti/autoidle.c b/drivers/clk/ti/autoidle.c
index e75c64c9..345af43 100644
--- a/drivers/clk/ti/autoidle.c
+++ b/drivers/clk/ti/autoidle.c
@@ -22,6 +22,8 @@
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 struct clk_ti_autoidle {
 	void __iomem		*reg;
 	u8			shift;
@@ -33,8 +35,41 @@
 #define AUTOIDLE_LOW		0x1
 
 static LIST_HEAD(autoidle_clks);
+static LIST_HEAD(clk_hw_omap_clocks);
 
-static void ti_allow_autoidle(struct clk_ti_autoidle *clk)
+/**
+ * omap2_clk_deny_idle - disable autoidle on an OMAP clock
+ * @clk: struct clk * to disable autoidle for
+ *
+ * Disable autoidle on an OMAP clock.
+ */
+int omap2_clk_deny_idle(struct clk *clk)
+{
+	struct clk_hw_omap *c;
+
+	c = to_clk_hw_omap(__clk_get_hw(clk));
+	if (c->ops && c->ops->deny_idle)
+		c->ops->deny_idle(c);
+	return 0;
+}
+
+/**
+ * omap2_clk_allow_idle - enable autoidle on an OMAP clock
+ * @clk: struct clk * to enable autoidle for
+ *
+ * Enable autoidle on an OMAP clock.
+ */
+int omap2_clk_allow_idle(struct clk *clk)
+{
+	struct clk_hw_omap *c;
+
+	c = to_clk_hw_omap(__clk_get_hw(clk));
+	if (c->ops && c->ops->allow_idle)
+		c->ops->allow_idle(c);
+	return 0;
+}
+
+static void _allow_autoidle(struct clk_ti_autoidle *clk)
 {
 	u32 val;
 
@@ -48,7 +83,7 @@
 	ti_clk_ll_ops->clk_writel(val, clk->reg);
 }
 
-static void ti_deny_autoidle(struct clk_ti_autoidle *clk)
+static void _deny_autoidle(struct clk_ti_autoidle *clk)
 {
 	u32 val;
 
@@ -63,31 +98,31 @@
 }
 
 /**
- * of_ti_clk_allow_autoidle_all - enable autoidle for all clocks
+ * _clk_generic_allow_autoidle_all - enable autoidle for all clocks
  *
  * Enables hardware autoidle for all registered DT clocks, which have
  * the feature.
  */
-void of_ti_clk_allow_autoidle_all(void)
+static void _clk_generic_allow_autoidle_all(void)
 {
 	struct clk_ti_autoidle *c;
 
 	list_for_each_entry(c, &autoidle_clks, node)
-		ti_allow_autoidle(c);
+		_allow_autoidle(c);
 }
 
 /**
- * of_ti_clk_deny_autoidle_all - disable autoidle for all clocks
+ * _clk_generic_deny_autoidle_all - disable autoidle for all clocks
  *
  * Disables hardware autoidle for all registered DT clocks, which have
  * the feature.
  */
-void of_ti_clk_deny_autoidle_all(void)
+static void _clk_generic_deny_autoidle_all(void)
 {
 	struct clk_ti_autoidle *c;
 
 	list_for_each_entry(c, &autoidle_clks, node)
-		ti_deny_autoidle(c);
+		_deny_autoidle(c);
 }
 
 /**
@@ -131,3 +166,67 @@
 
 	return 0;
 }
+
+/**
+ * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
+ * @hw: struct clk_hw * to initialize
+ *
+ * Add an OMAP clock @clk to the internal list of OMAP clocks.  Used
+ * temporarily for autoidle handling, until this support can be
+ * integrated into the common clock framework code in some way.  No
+ * return value.
+ */
+void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw)
+{
+	struct clk_hw_omap *c;
+
+	if (clk_hw_get_flags(hw) & CLK_IS_BASIC)
+		return;
+
+	c = to_clk_hw_omap(hw);
+	list_add(&c->node, &clk_hw_omap_clocks);
+}
+
+/**
+ * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
+ * support it
+ *
+ * Enable clock autoidle on all OMAP clocks that have allow_idle
+ * function pointers associated with them.  This function is intended
+ * to be temporary until support for this is added to the common clock
+ * code.  Returns 0.
+ */
+int omap2_clk_enable_autoidle_all(void)
+{
+	struct clk_hw_omap *c;
+
+	list_for_each_entry(c, &clk_hw_omap_clocks, node)
+		if (c->ops && c->ops->allow_idle)
+			c->ops->allow_idle(c);
+
+	_clk_generic_allow_autoidle_all();
+
+	return 0;
+}
+
+/**
+ * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that
+ * support it
+ *
+ * Disable clock autoidle on all OMAP clocks that have allow_idle
+ * function pointers associated with them.  This function is intended
+ * to be temporary until support for this is added to the common clock
+ * code.  Returns 0.
+ */
+int omap2_clk_disable_autoidle_all(void)
+{
+	struct clk_hw_omap *c;
+
+	list_for_each_entry(c, &clk_hw_omap_clocks, node)
+		if (c->ops && c->ops->deny_idle)
+			c->ops->deny_idle(c);
+
+	_clk_generic_deny_autoidle_all();
+
+	return 0;
+}
diff --git a/drivers/clk/ti/clk-2xxx.c b/drivers/clk/ti/clk-2xxx.c
index c808ab3..657c4fe 100644
--- a/drivers/clk/ti/clk-2xxx.c
+++ b/drivers/clk/ti/clk-2xxx.c
@@ -16,9 +16,11 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/clk-provider.h>
+#include <linux/clk.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 static struct ti_dt_clk omap2xxx_clks[] = {
 	DT_CLK(NULL, "func_32k_ck", "func_32k_ck"),
 	DT_CLK(NULL, "secure_32k_ck", "secure_32k_ck"),
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 028b337..ef2ec64 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -16,9 +16,12 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 static struct ti_dt_clk am33xx_clks[] = {
 	DT_CLK(NULL, "clk_32768_ck", "clk_32768_ck"),
 	DT_CLK(NULL, "clk_rc32k_ck", "clk_rc32k_ck"),
diff --git a/drivers/clk/ti/clk-3xxx-legacy.c b/drivers/clk/ti/clk-3xxx-legacy.c
index 0b61548..0fbf8a9 100644
--- a/drivers/clk/ti/clk-3xxx-legacy.c
+++ b/drivers/clk/ti/clk-3xxx-legacy.c
@@ -15,6 +15,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clk/ti.h>
 
diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
index 757636d..676ee8f 100644
--- a/drivers/clk/ti/clk-3xxx.c
+++ b/drivers/clk/ti/clk-3xxx.c
@@ -16,9 +16,220 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
+/*
+ * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
+ * that are sourced by DPLL5, and both of these require this clock
+ * to be at 120 MHz for proper operation.
+ */
+#define DPLL5_FREQ_FOR_USBHOST		120000000
+
+#define OMAP3430ES2_ST_DSS_IDLE_SHIFT			1
+#define OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT		5
+#define OMAP3430ES2_ST_SSI_IDLE_SHIFT			8
+
+#define OMAP34XX_CM_IDLEST_VAL				1
+
+/*
+ * In AM35xx IPSS, the {ICK,FCK} enable bits for modules are exported
+ * in the same register at a bit offset of 0x8. The EN_ACK for ICK is
+ * at an offset of 4 from ICK enable bit.
+ */
+#define AM35XX_IPSS_ICK_MASK			0xF
+#define AM35XX_IPSS_ICK_EN_ACK_OFFSET		0x4
+#define AM35XX_IPSS_ICK_FCK_OFFSET		0x8
+#define AM35XX_IPSS_CLK_IDLEST_VAL		0
+
+#define AM35XX_ST_IPSS_SHIFT			5
+
+/**
+ * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift
+ * from the CM_{I,F}CLKEN bit.  Pass back the correct info via
+ * @idlest_reg and @idlest_bit.  No return value.
+ */
+static void omap3430es2_clk_ssi_find_idlest(struct clk_hw_omap *clk,
+					    void __iomem **idlest_reg,
+					    u8 *idlest_bit,
+					    u8 *idlest_val)
+{
+	u32 r;
+
+	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+	*idlest_reg = (__force void __iomem *)r;
+	*idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT;
+	*idlest_val = OMAP34XX_CM_IDLEST_VAL;
+}
+
+const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait = {
+	.allow_idle	= omap2_clkt_iclk_allow_idle,
+	.deny_idle	= omap2_clkt_iclk_deny_idle,
+	.find_idlest	= omap3430es2_clk_ssi_find_idlest,
+	.find_companion	= omap2_clk_dflt_find_companion,
+};
+
+/**
+ * omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * Some OMAP modules on OMAP3 ES2+ chips have both initiator and
+ * target IDLEST bits.  For our purposes, we are concerned with the
+ * target IDLEST bits, which exist at a different bit position than
+ * the *CLKEN bit position for these modules (DSS and USBHOST) (The
+ * default find_idlest code assumes that they are at the same
+ * position.)  No return value.
+ */
+static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk_hw_omap *clk,
+						    void __iomem **idlest_reg,
+						    u8 *idlest_bit,
+						    u8 *idlest_val)
+{
+	u32 r;
+
+	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+	*idlest_reg = (__force void __iomem *)r;
+	/* USBHOST_IDLE has same shift */
+	*idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT;
+	*idlest_val = OMAP34XX_CM_IDLEST_VAL;
+}
+
+const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait = {
+	.find_idlest	= omap3430es2_clk_dss_usbhost_find_idlest,
+	.find_companion	= omap2_clk_dflt_find_companion,
+};
+
+const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait = {
+	.allow_idle	= omap2_clkt_iclk_allow_idle,
+	.deny_idle	= omap2_clkt_iclk_deny_idle,
+	.find_idlest	= omap3430es2_clk_dss_usbhost_find_idlest,
+	.find_companion	= omap2_clk_dflt_find_companion,
+};
+
+/**
+ * omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different
+ * shift from the CM_{I,F}CLKEN bit.  Pass back the correct info via
+ * @idlest_reg and @idlest_bit.  No return value.
+ */
+static void omap3430es2_clk_hsotgusb_find_idlest(struct clk_hw_omap *clk,
+						 void __iomem **idlest_reg,
+						 u8 *idlest_bit,
+						 u8 *idlest_val)
+{
+	u32 r;
+
+	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+	*idlest_reg = (__force void __iomem *)r;
+	*idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT;
+	*idlest_val = OMAP34XX_CM_IDLEST_VAL;
+}
+
+const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait = {
+	.allow_idle	= omap2_clkt_iclk_allow_idle,
+	.deny_idle	= omap2_clkt_iclk_deny_idle,
+	.find_idlest	= omap3430es2_clk_hsotgusb_find_idlest,
+	.find_companion	= omap2_clk_dflt_find_companion,
+};
+
+/**
+ * am35xx_clk_find_idlest - return clock ACK info for AM35XX IPSS
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * The interface clocks on AM35xx IPSS reflects the clock idle status
+ * in the enable register itsel at a bit offset of 4 from the enable
+ * bit. A value of 1 indicates that clock is enabled.
+ */
+static void am35xx_clk_find_idlest(struct clk_hw_omap *clk,
+				   void __iomem **idlest_reg,
+				   u8 *idlest_bit,
+				   u8 *idlest_val)
+{
+	*idlest_reg = (__force void __iomem *)(clk->enable_reg);
+	*idlest_bit = clk->enable_bit + AM35XX_IPSS_ICK_EN_ACK_OFFSET;
+	*idlest_val = AM35XX_IPSS_CLK_IDLEST_VAL;
+}
+
+/**
+ * am35xx_clk_find_companion - find companion clock to @clk
+ * @clk: struct clk * to find the companion clock of
+ * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
+ * @other_bit: u8 ** to return the companion clock bit shift in
+ *
+ * Some clocks don't have companion clocks.  For example, modules with
+ * only an interface clock (such as HECC) don't have a companion
+ * clock.  Right now, this code relies on the hardware exporting a bit
+ * in the correct companion register that indicates that the
+ * nonexistent 'companion clock' is active.  Future patches will
+ * associate this type of code with per-module data structures to
+ * avoid this issue, and remove the casts.  No return value.
+ */
+static void am35xx_clk_find_companion(struct clk_hw_omap *clk,
+				      void __iomem **other_reg,
+				      u8 *other_bit)
+{
+	*other_reg = (__force void __iomem *)(clk->enable_reg);
+	if (clk->enable_bit & AM35XX_IPSS_ICK_MASK)
+		*other_bit = clk->enable_bit + AM35XX_IPSS_ICK_FCK_OFFSET;
+	else
+	*other_bit = clk->enable_bit - AM35XX_IPSS_ICK_FCK_OFFSET;
+}
+
+const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait = {
+	.find_idlest	= am35xx_clk_find_idlest,
+	.find_companion	= am35xx_clk_find_companion,
+};
+
+/**
+ * am35xx_clk_ipss_find_idlest - return CM_IDLEST info for IPSS
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * The IPSS target CM_IDLEST bit is at a different shift from the
+ * CM_{I,F}CLKEN bit.  Pass back the correct info via @idlest_reg
+ * and @idlest_bit.  No return value.
+ */
+static void am35xx_clk_ipss_find_idlest(struct clk_hw_omap *clk,
+					void __iomem **idlest_reg,
+					u8 *idlest_bit,
+					u8 *idlest_val)
+{
+	u32 r;
+
+	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+	*idlest_reg = (__force void __iomem *)r;
+	*idlest_bit = AM35XX_ST_IPSS_SHIFT;
+	*idlest_val = OMAP34XX_CM_IDLEST_VAL;
+}
+
+const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait = {
+	.allow_idle	= omap2_clkt_iclk_allow_idle,
+	.deny_idle	= omap2_clkt_iclk_deny_idle,
+	.find_idlest	= am35xx_clk_ipss_find_idlest,
+	.find_companion	= omap2_clk_dflt_find_companion,
+};
 
 static struct ti_dt_clk omap3xxx_clks[] = {
 	DT_CLK(NULL, "apb_pclk", "dummy_apb_pclk"),
@@ -324,6 +535,30 @@
 	OMAP3_SOC_OMAP3630,
 };
 
+/**
+ * omap3_clk_lock_dpll5 - locks DPLL5
+ *
+ * Locks DPLL5 to a pre-defined frequency. This is required for proper
+ * operation of USB.
+ */
+void __init omap3_clk_lock_dpll5(void)
+{
+	struct clk *dpll5_clk;
+	struct clk *dpll5_m2_clk;
+
+	dpll5_clk = clk_get(NULL, "dpll5_ck");
+	clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
+	clk_prepare_enable(dpll5_clk);
+
+	/* Program dpll5_m2_clk divider for no division */
+	dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
+	clk_prepare_enable(dpll5_m2_clk);
+	clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);
+
+	clk_disable_unprepare(dpll5_m2_clk);
+	clk_disable_unprepare(dpll5_clk);
+}
+
 static int __init omap3xxx_dt_clk_init(int soc_type)
 {
 	if (soc_type == OMAP3_SOC_AM35XX || soc_type == OMAP3_SOC_OMAP3630 ||
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 3795fce..097fc90 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -16,9 +16,12 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 static struct ti_dt_clk am43xx_clks[] = {
 	DT_CLK(NULL, "clk_32768_ck", "clk_32768_ck"),
 	DT_CLK(NULL, "clk_rc32k_ck", "clk_rc32k_ck"),
@@ -71,6 +74,7 @@
 	DT_CLK(NULL, "clk_24mhz", "clk_24mhz"),
 	DT_CLK(NULL, "cpsw_125mhz_gclk", "cpsw_125mhz_gclk"),
 	DT_CLK(NULL, "cpsw_cpts_rft_clk", "cpsw_cpts_rft_clk"),
+	DT_CLK(NULL, "dpll_clksel_mac_clk", "dpll_clksel_mac_clk"),
 	DT_CLK(NULL, "gpio0_dbclk_mux_ck", "gpio0_dbclk_mux_ck"),
 	DT_CLK(NULL, "gpio0_dbclk", "gpio0_dbclk"),
 	DT_CLK(NULL, "gpio1_dbclk", "gpio1_dbclk"),
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index 581db77..7a8b51b 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -16,6 +16,8 @@
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 /*
  * OMAP4 ABE DPLL default frequency. In OMAP4460 TRM version V, section
  * "3.6.3.2.3 CM1_ABE Clock Generator" states that the "DPLL_ABE_X2_CLK
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 96c69a3..59ce2fa 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -17,6 +17,8 @@
 #include <linux/io.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 #define OMAP5_DPLL_ABE_DEFFREQ				98304000
 
 /*
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 63b8323..9b5b289 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -16,11 +16,12 @@
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 #define DRA7_DPLL_ABE_DEFFREQ				180633600
 #define DRA7_DPLL_GMAC_DEFFREQ				1000000000
 #define DRA7_DPLL_USB_DEFFREQ				960000000
 
-
 static struct ti_dt_clk dra7xx_clks[] = {
 	DT_CLK(NULL, "atl_clkin0_ck", "atl_clkin0_ck"),
 	DT_CLK(NULL, "atl_clkin1_ck", "atl_clkin1_ck"),
diff --git a/drivers/clk/ti/clk-814x.c b/drivers/clk/ti/clk-814x.c
new file mode 100644
index 0000000..e172920
--- /dev/null
+++ b/drivers/clk/ti/clk-814x.c
@@ -0,0 +1,33 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+static struct ti_dt_clk dm814_clks[] = {
+	DT_CLK(NULL, "devosc_ck", "devosc_ck"),
+	DT_CLK(NULL, "mpu_ck", "mpu_ck"),
+	DT_CLK(NULL, "sysclk4_ck", "sysclk4_ck"),
+	DT_CLK(NULL, "sysclk6_ck", "sysclk6_ck"),
+	DT_CLK(NULL, "sysclk10_ck", "sysclk10_ck"),
+	DT_CLK(NULL, "sysclk18_ck", "sysclk18_ck"),
+	DT_CLK(NULL, "timer_sys_ck", "devosc_ck"),
+	DT_CLK(NULL, "cpsw_125mhz_gclk", "cpsw_125mhz_gclk"),
+	DT_CLK(NULL, "cpsw_cpts_rft_clk", "cpsw_cpts_rft_clk"),
+	{ .node_name = NULL },
+};
+
+int __init dm814x_dt_clk_init(void)
+{
+	ti_dt_clocks_register(dm814_clks);
+	omap2_clk_disable_autoidle_all();
+	omap2_clk_enable_init_clocks(NULL, 0);
+
+	return 0;
+}
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
index 9451e65..1dfad0c 100644
--- a/drivers/clk/ti/clk-816x.c
+++ b/drivers/clk/ti/clk-816x.c
@@ -14,6 +14,8 @@
 #include <linux/clk-provider.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 static struct ti_dt_clk dm816x_clks[] = {
 	DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
 	DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
@@ -42,7 +44,7 @@
 	"ddr_pll_clk3",
 };
 
-int __init ti81xx_dt_clk_init(void)
+int __init dm816x_dt_clk_init(void)
 {
 	ti_dt_clocks_register(dm816x_clks);
 	omap2_clk_disable_autoidle_all();
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index 19e543a..2e14dfb 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -16,6 +16,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/slab.h>
 #include <linux/io.h>
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 64bb5e8..b5bcd77 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -15,12 +15,15 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/list.h>
+#include <linux/regmap.h>
+#include <linux/bootmem.h>
 
 #include "clock.h"
 
@@ -30,6 +33,63 @@
 struct ti_clk_ll_ops *ti_clk_ll_ops;
 static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS];
 
+static struct ti_clk_features ti_clk_features;
+
+struct clk_iomap {
+	struct regmap *regmap;
+	void __iomem *mem;
+};
+
+static struct clk_iomap *clk_memmaps[CLK_MAX_MEMMAPS];
+
+static void clk_memmap_writel(u32 val, void __iomem *reg)
+{
+	struct clk_omap_reg *r = (struct clk_omap_reg *)&reg;
+	struct clk_iomap *io = clk_memmaps[r->index];
+
+	if (io->regmap)
+		regmap_write(io->regmap, r->offset, val);
+	else
+		writel_relaxed(val, io->mem + r->offset);
+}
+
+static u32 clk_memmap_readl(void __iomem *reg)
+{
+	u32 val;
+	struct clk_omap_reg *r = (struct clk_omap_reg *)&reg;
+	struct clk_iomap *io = clk_memmaps[r->index];
+
+	if (io->regmap)
+		regmap_read(io->regmap, r->offset, &val);
+	else
+		val = readl_relaxed(io->mem + r->offset);
+
+	return val;
+}
+
+/**
+ * ti_clk_setup_ll_ops - setup low level clock operations
+ * @ops: low level clock ops descriptor
+ *
+ * Sets up low level clock operations for TI clock driver. This is used
+ * to provide various callbacks for the clock driver towards platform
+ * specific code. Returns 0 on success, -EBUSY if ll_ops have been
+ * registered already.
+ */
+int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
+{
+	if (ti_clk_ll_ops) {
+		pr_err("Attempt to register ll_ops multiple times.\n");
+		return -EBUSY;
+	}
+
+	ti_clk_ll_ops = ops;
+	ops->clk_readl = clk_memmap_readl;
+	ops->clk_writel = clk_memmap_writel;
+
+	return 0;
+}
+
 /**
  * ti_dt_clocks_register - register DT alias clocks during boot
  * @oclks: list of clocks to register
@@ -134,32 +194,67 @@
 
 	reg->offset = val;
 
-	return (void __iomem *)tmp;
+	return (__force void __iomem *)tmp;
 }
 
 /**
- * ti_dt_clk_init_provider - init master clock provider
+ * omap2_clk_provider_init - init master clock provider
  * @parent: master node
  * @index: internal index for clk_reg_ops
+ * @syscon: syscon regmap pointer for accessing clock registers
+ * @mem: iomem pointer for the clock provider memory area, only used if
+ *       syscon is not provided
  *
  * Initializes a master clock IP block. This basically sets up the
  * mapping from clocks node to the memory map index. All the clocks
  * are then initialized through the common of_clk_init call, and the
  * clocks will access their memory maps based on the node layout.
+ * Returns 0 in success.
  */
-void ti_dt_clk_init_provider(struct device_node *parent, int index)
+int __init omap2_clk_provider_init(struct device_node *parent, int index,
+				   struct regmap *syscon, void __iomem *mem)
 {
 	struct device_node *clocks;
+	struct clk_iomap *io;
 
 	/* get clocks for this parent */
 	clocks = of_get_child_by_name(parent, "clocks");
 	if (!clocks) {
 		pr_err("%s missing 'clocks' child node.\n", parent->name);
-		return;
+		return -EINVAL;
 	}
 
 	/* add clocks node info */
 	clocks_node_ptr[index] = clocks;
+
+	io = kzalloc(sizeof(*io), GFP_KERNEL);
+	if (!io)
+		return -ENOMEM;
+
+	io->regmap = syscon;
+	io->mem = mem;
+
+	clk_memmaps[index] = io;
+
+	return 0;
+}
+
+/**
+ * omap2_clk_legacy_provider_init - initialize a legacy clock provider
+ * @index: index for the clock provider
+ * @mem: iomem pointer for the clock provider memory area
+ *
+ * Initializes a legacy clock provider memory mapping.
+ */
+void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem)
+{
+	struct clk_iomap *io;
+
+	io = memblock_virt_alloc(sizeof(*io), 0);
+
+	io->mem = mem;
+
+	clk_memmaps[index] = io;
 }
 
 /**
@@ -244,11 +339,11 @@
 	if (!IS_ERR(clk)) {
 		setup->clk = clk;
 		if (setup->clkdm_name) {
-			if (__clk_get_flags(clk) & CLK_IS_BASIC) {
+			clk_hw = __clk_get_hw(clk);
+			if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) {
 				pr_warn("can't setup clkdm for basic clk %s\n",
 					setup->name);
 			} else {
-				clk_hw = __clk_get_hw(clk);
 				to_clk_hw_omap(clk_hw)->clkdm_name =
 					setup->clkdm_name;
 				omap2_init_clk_clkdm(clk_hw);
@@ -311,3 +406,50 @@
 	return 0;
 }
 #endif
+
+/**
+ * ti_clk_setup_features - setup clock features flags
+ * @features: features definition to use
+ *
+ * Initializes the clock driver features flags based on platform
+ * provided data. No return value.
+ */
+void __init ti_clk_setup_features(struct ti_clk_features *features)
+{
+	memcpy(&ti_clk_features, features, sizeof(*features));
+}
+
+/**
+ * ti_clk_get_features - get clock driver features flags
+ *
+ * Get TI clock driver features description. Returns a pointer
+ * to the current feature setup.
+ */
+const struct ti_clk_features *ti_clk_get_features(void)
+{
+	return &ti_clk_features;
+}
+
+/**
+ * omap2_clk_enable_init_clocks - prepare & enable a list of clocks
+ * @clk_names: ptr to an array of strings of clock names to enable
+ * @num_clocks: number of clock names in @clk_names
+ *
+ * Prepare and enable a list of clocks, named by @clk_names.  No
+ * return value. XXX Deprecated; only needed until these clocks are
+ * properly claimed and enabled by the drivers or core code that uses
+ * them.  XXX What code disables & calls clk_put on these clocks?
+ */
+void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks)
+{
+	struct clk *init_clk;
+	int i;
+
+	for (i = 0; i < num_clocks; i++) {
+		init_clk = clk_get(NULL, clk_names[i]);
+		if (WARN(IS_ERR(init_clk), "could not find init clock %s\n",
+			 clk_names[i]))
+			continue;
+		clk_prepare_enable(init_clk);
+	}
+}
diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c
new file mode 100644
index 0000000..90d7d8a
--- /dev/null
+++ b/drivers/clk/ti/clkt_dflt.c
@@ -0,0 +1,316 @@
+/*
+ * Default clock type
+ *
+ * Copyright (C) 2005-2008, 2015 Texas Instruments, Inc.
+ * Copyright (C) 2004-2010 Nokia Corporation
+ *
+ * Contacts:
+ * Richard Woodruff <r-woodruff2@ti.com>
+ * Paul Walmsley
+ * Tero Kristo <t-kristo@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/clk/ti.h>
+#include <linux/delay.h>
+
+#include "clock.h"
+
+/*
+ * MAX_MODULE_ENABLE_WAIT: maximum of number of microseconds to wait
+ * for a module to indicate that it is no longer in idle
+ */
+#define MAX_MODULE_ENABLE_WAIT		100000
+
+/*
+ * CM module register offsets, used for calculating the companion
+ * register addresses.
+ */
+#define CM_FCLKEN			0x0000
+#define CM_ICLKEN			0x0010
+
+/**
+ * _wait_idlest_generic - wait for a module to leave the idle state
+ * @clk: module clock to wait for (needed for register offsets)
+ * @reg: virtual address of module IDLEST register
+ * @mask: value to mask against to determine if the module is active
+ * @idlest: idle state indicator (0 or 1) for the clock
+ * @name: name of the clock (for printk)
+ *
+ * Wait for a module to leave idle, where its idle-status register is
+ * not inside the CM module.  Returns 1 if the module left idle
+ * promptly, or 0 if the module did not leave idle before the timeout
+ * elapsed.  XXX Deprecated - should be moved into drivers for the
+ * individual IP block that the IDLEST register exists in.
+ */
+static int _wait_idlest_generic(struct clk_hw_omap *clk, void __iomem *reg,
+				u32 mask, u8 idlest, const char *name)
+{
+	int i = 0, ena = 0;
+
+	ena = (idlest) ? 0 : mask;
+
+	/* Wait until module enters enabled state */
+	for (i = 0; i < MAX_MODULE_ENABLE_WAIT; i++) {
+		if ((ti_clk_ll_ops->clk_readl(reg) & mask) == ena)
+			break;
+		udelay(1);
+	}
+
+	if (i < MAX_MODULE_ENABLE_WAIT)
+		pr_debug("omap clock: module associated with clock %s ready after %d loops\n",
+			 name, i);
+	else
+		pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n",
+		       name, MAX_MODULE_ENABLE_WAIT);
+
+	return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0;
+}
+
+/**
+ * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE
+ * @clk: struct clk * belonging to the module
+ *
+ * If the necessary clocks for the OMAP hardware IP block that
+ * corresponds to clock @clk are enabled, then wait for the module to
+ * indicate readiness (i.e., to leave IDLE).  This code does not
+ * belong in the clock code and will be moved in the medium term to
+ * module-dependent code.  No return value.
+ */
+static void _omap2_module_wait_ready(struct clk_hw_omap *clk)
+{
+	void __iomem *companion_reg, *idlest_reg;
+	u8 other_bit, idlest_bit, idlest_val, idlest_reg_id;
+	s16 prcm_mod;
+	int r;
+
+	/* Not all modules have multiple clocks that their IDLEST depends on */
+	if (clk->ops->find_companion) {
+		clk->ops->find_companion(clk, &companion_reg, &other_bit);
+		if (!(ti_clk_ll_ops->clk_readl(companion_reg) &
+		      (1 << other_bit)))
+			return;
+	}
+
+	clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
+	r = ti_clk_ll_ops->cm_split_idlest_reg(idlest_reg, &prcm_mod,
+					       &idlest_reg_id);
+	if (r) {
+		/* IDLEST register not in the CM module */
+		_wait_idlest_generic(clk, idlest_reg, (1 << idlest_bit),
+				     idlest_val, clk_hw_get_name(&clk->hw));
+	} else {
+		ti_clk_ll_ops->cm_wait_module_ready(0, prcm_mod, idlest_reg_id,
+						    idlest_bit);
+	}
+}
+
+/**
+ * omap2_clk_dflt_find_companion - find companion clock to @clk
+ * @clk: struct clk * to find the companion clock of
+ * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
+ * @other_bit: u8 ** to return the companion clock bit shift in
+ *
+ * Note: We don't need special code here for INVERT_ENABLE for the
+ * time being since INVERT_ENABLE only applies to clocks enabled by
+ * CM_CLKEN_PLL
+ *
+ * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes it's
+ * just a matter of XORing the bits.
+ *
+ * Some clocks don't have companion clocks.  For example, modules with
+ * only an interface clock (such as MAILBOXES) don't have a companion
+ * clock.  Right now, this code relies on the hardware exporting a bit
+ * in the correct companion register that indicates that the
+ * nonexistent 'companion clock' is active.  Future patches will
+ * associate this type of code with per-module data structures to
+ * avoid this issue, and remove the casts.  No return value.
+ */
+void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
+				   void __iomem **other_reg, u8 *other_bit)
+{
+	u32 r;
+
+	/*
+	 * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes
+	 * it's just a matter of XORing the bits.
+	 */
+	r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN));
+
+	*other_reg = (__force void __iomem *)r;
+	*other_bit = clk->enable_bit;
+}
+
+/**
+ * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk
+ * @clk: struct clk * to find IDLEST info for
+ * @idlest_reg: void __iomem ** to return the CM_IDLEST va in
+ * @idlest_bit: u8 * to return the CM_IDLEST bit shift in
+ * @idlest_val: u8 * to return the idle status indicator
+ *
+ * Return the CM_IDLEST register address and bit shift corresponding
+ * to the module that "owns" this clock.  This default code assumes
+ * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that
+ * the IDLEST register address ID corresponds to the CM_*CLKEN
+ * register address ID (e.g., that CM_FCLKEN2 corresponds to
+ * CM_IDLEST2).  This is not true for all modules.  No return value.
+ */
+void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
+				void __iomem **idlest_reg, u8 *idlest_bit,
+				u8 *idlest_val)
+{
+	u32 r;
+
+	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+	*idlest_reg = (__force void __iomem *)r;
+	*idlest_bit = clk->enable_bit;
+
+	/*
+	 * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
+	 * 34xx reverses this, just to keep us on our toes
+	 * AM35xx uses both, depending on the module.
+	 */
+	*idlest_val = ti_clk_get_features()->cm_idlest_val;
+}
+
+/**
+ * omap2_dflt_clk_enable - enable a clock in the hardware
+ * @hw: struct clk_hw * of the clock to enable
+ *
+ * Enable the clock @hw in the hardware.  We first call into the OMAP
+ * clockdomain code to "enable" the corresponding clockdomain if this
+ * is the first enabled user of the clockdomain.  Then program the
+ * hardware to enable the clock.  Then wait for the IP block that uses
+ * this clock to leave idle (if applicable).  Returns the error value
+ * from clkdm_clk_enable() if it terminated with an error, or -EINVAL
+ * if @hw has a null clock enable_reg, or zero upon success.
+ */
+int omap2_dflt_clk_enable(struct clk_hw *hw)
+{
+	struct clk_hw_omap *clk;
+	u32 v;
+	int ret = 0;
+	bool clkdm_control;
+
+	if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL)
+		clkdm_control = false;
+	else
+		clkdm_control = true;
+
+	clk = to_clk_hw_omap(hw);
+
+	if (clkdm_control && clk->clkdm) {
+		ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
+		if (ret) {
+			WARN(1,
+			     "%s: could not enable %s's clockdomain %s: %d\n",
+			     __func__, clk_hw_get_name(hw),
+			     clk->clkdm_name, ret);
+			return ret;
+		}
+	}
+
+	if (unlikely(!clk->enable_reg)) {
+		pr_err("%s: %s missing enable_reg\n", __func__,
+		       clk_hw_get_name(hw));
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* FIXME should not have INVERT_ENABLE bit here */
+	v = ti_clk_ll_ops->clk_readl(clk->enable_reg);
+	if (clk->flags & INVERT_ENABLE)
+		v &= ~(1 << clk->enable_bit);
+	else
+		v |= (1 << clk->enable_bit);
+	ti_clk_ll_ops->clk_writel(v, clk->enable_reg);
+	v = ti_clk_ll_ops->clk_readl(clk->enable_reg); /* OCP barrier */
+
+	if (clk->ops && clk->ops->find_idlest)
+		_omap2_module_wait_ready(clk);
+
+	return 0;
+
+err:
+	if (clkdm_control && clk->clkdm)
+		ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
+	return ret;
+}
+
+/**
+ * omap2_dflt_clk_disable - disable a clock in the hardware
+ * @hw: struct clk_hw * of the clock to disable
+ *
+ * Disable the clock @hw in the hardware, and call into the OMAP
+ * clockdomain code to "disable" the corresponding clockdomain if all
+ * clocks/hwmods in that clockdomain are now disabled.  No return
+ * value.
+ */
+void omap2_dflt_clk_disable(struct clk_hw *hw)
+{
+	struct clk_hw_omap *clk;
+	u32 v;
+
+	clk = to_clk_hw_omap(hw);
+	if (!clk->enable_reg) {
+		/*
+		 * 'independent' here refers to a clock which is not
+		 * controlled by its parent.
+		 */
+		pr_err("%s: independent clock %s has no enable_reg\n",
+		       __func__, clk_hw_get_name(hw));
+		return;
+	}
+
+	v = ti_clk_ll_ops->clk_readl(clk->enable_reg);
+	if (clk->flags & INVERT_ENABLE)
+		v |= (1 << clk->enable_bit);
+	else
+		v &= ~(1 << clk->enable_bit);
+	ti_clk_ll_ops->clk_writel(v, clk->enable_reg);
+	/* No OCP barrier needed here since it is a disable operation */
+
+	if (!(ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) &&
+	    clk->clkdm)
+		ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
+}
+
+/**
+ * omap2_dflt_clk_is_enabled - is clock enabled in the hardware?
+ * @hw: struct clk_hw * to check
+ *
+ * Return 1 if the clock represented by @hw is enabled in the
+ * hardware, or 0 otherwise.  Intended for use in the struct
+ * clk_ops.is_enabled function pointer.
+ */
+int omap2_dflt_clk_is_enabled(struct clk_hw *hw)
+{
+	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+	u32 v;
+
+	v = ti_clk_ll_ops->clk_readl(clk->enable_reg);
+
+	if (clk->flags & INVERT_ENABLE)
+		v ^= BIT(clk->enable_bit);
+
+	v &= BIT(clk->enable_bit);
+
+	return v ? 1 : 0;
+}
+
+const struct clk_hw_omap_ops clkhwops_wait = {
+	.find_idlest	= omap2_clk_dflt_find_idlest,
+	.find_companion	= omap2_clk_dflt_find_companion,
+};
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
new file mode 100644
index 0000000..9023ca9
--- /dev/null
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -0,0 +1,370 @@
+/*
+ * OMAP2/3/4 DPLL clock functions
+ *
+ * Copyright (C) 2005-2008 Texas Instruments, Inc.
+ * Copyright (C) 2004-2010 Nokia Corporation
+ *
+ * Contacts:
+ * Richard Woodruff <r-woodruff2@ti.com>
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/clk/ti.h>
+
+#include <asm/div64.h>
+
+#include "clock.h"
+
+/* DPLL rate rounding: minimum DPLL multiplier, divider values */
+#define DPLL_MIN_MULTIPLIER		2
+#define DPLL_MIN_DIVIDER		1
+
+/* Possible error results from _dpll_test_mult */
+#define DPLL_MULT_UNDERFLOW		-1
+
+/*
+ * Scale factor to mitigate roundoff errors in DPLL rate rounding.
+ * The higher the scale factor, the greater the risk of arithmetic overflow,
+ * but the closer the rounded rate to the target rate.  DPLL_SCALE_FACTOR
+ * must be a power of DPLL_SCALE_BASE.
+ */
+#define DPLL_SCALE_FACTOR		64
+#define DPLL_SCALE_BASE			2
+#define DPLL_ROUNDING_VAL		((DPLL_SCALE_BASE / 2) * \
+					 (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE))
+
+/*
+ * DPLL valid Fint frequency range for OMAP36xx and OMAP4xxx.
+ * From device data manual section 4.3 "DPLL and DLL Specifications".
+ */
+#define OMAP3PLUS_DPLL_FINT_JTYPE_MIN	500000
+#define OMAP3PLUS_DPLL_FINT_JTYPE_MAX	2500000
+
+/* _dpll_test_fint() return codes */
+#define DPLL_FINT_UNDERFLOW		-1
+#define DPLL_FINT_INVALID		-2
+
+/* Private functions */
+
+/*
+ * _dpll_test_fint - test whether an Fint value is valid for the DPLL
+ * @clk: DPLL struct clk to test
+ * @n: divider value (N) to test
+ *
+ * Tests whether a particular divider @n will result in a valid DPLL
+ * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter
+ * Correction".  Returns 0 if OK, -1 if the enclosing loop can terminate
+ * (assuming that it is counting N upwards), or -2 if the enclosing loop
+ * should skip to the next iteration (again assuming N is increasing).
+ */
+static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n)
+{
+	struct dpll_data *dd;
+	long fint, fint_min, fint_max;
+	int ret = 0;
+
+	dd = clk->dpll_data;
+
+	/* DPLL divider must result in a valid jitter correction val */
+	fint = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)) / n;
+
+	if (dd->flags & DPLL_J_TYPE) {
+		fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN;
+		fint_max = OMAP3PLUS_DPLL_FINT_JTYPE_MAX;
+	} else {
+		fint_min = ti_clk_get_features()->fint_min;
+		fint_max = ti_clk_get_features()->fint_max;
+	}
+
+	if (!fint_min || !fint_max) {
+		WARN(1, "No fint limits available!\n");
+		return DPLL_FINT_INVALID;
+	}
+
+	if (fint < ti_clk_get_features()->fint_min) {
+		pr_debug("rejecting n=%d due to Fint failure, lowering max_divider\n",
+			 n);
+		dd->max_divider = n;
+		ret = DPLL_FINT_UNDERFLOW;
+	} else if (fint > ti_clk_get_features()->fint_max) {
+		pr_debug("rejecting n=%d due to Fint failure, boosting min_divider\n",
+			 n);
+		dd->min_divider = n;
+		ret = DPLL_FINT_INVALID;
+	} else if (fint > ti_clk_get_features()->fint_band1_max &&
+		   fint < ti_clk_get_features()->fint_band2_min) {
+		pr_debug("rejecting n=%d due to Fint failure\n", n);
+		ret = DPLL_FINT_INVALID;
+	}
+
+	return ret;
+}
+
+static unsigned long _dpll_compute_new_rate(unsigned long parent_rate,
+					    unsigned int m, unsigned int n)
+{
+	unsigned long long num;
+
+	num = (unsigned long long)parent_rate * m;
+	do_div(num, n);
+	return num;
+}
+
+/*
+ * _dpll_test_mult - test a DPLL multiplier value
+ * @m: pointer to the DPLL m (multiplier) value under test
+ * @n: current DPLL n (divider) value under test
+ * @new_rate: pointer to storage for the resulting rounded rate
+ * @target_rate: the desired DPLL rate
+ * @parent_rate: the DPLL's parent clock rate
+ *
+ * This code tests a DPLL multiplier value, ensuring that the
+ * resulting rate will not be higher than the target_rate, and that
+ * the multiplier value itself is valid for the DPLL.  Initially, the
+ * integer pointed to by the m argument should be prescaled by
+ * multiplying by DPLL_SCALE_FACTOR.  The code will replace this with
+ * a non-scaled m upon return.  This non-scaled m will result in a
+ * new_rate as close as possible to target_rate (but not greater than
+ * target_rate) given the current (parent_rate, n, prescaled m)
+ * triple. Returns DPLL_MULT_UNDERFLOW in the event that the
+ * non-scaled m attempted to underflow, which can allow the calling
+ * function to bail out early; or 0 upon success.
+ */
+static int _dpll_test_mult(int *m, int n, unsigned long *new_rate,
+			   unsigned long target_rate,
+			   unsigned long parent_rate)
+{
+	int r = 0, carry = 0;
+
+	/* Unscale m and round if necessary */
+	if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL)
+		carry = 1;
+	*m = (*m / DPLL_SCALE_FACTOR) + carry;
+
+	/*
+	 * The new rate must be <= the target rate to avoid programming
+	 * a rate that is impossible for the hardware to handle
+	 */
+	*new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
+	if (*new_rate > target_rate) {
+		(*m)--;
+		*new_rate = 0;
+	}
+
+	/* Guard against m underflow */
+	if (*m < DPLL_MIN_MULTIPLIER) {
+		*m = DPLL_MIN_MULTIPLIER;
+		*new_rate = 0;
+		r = DPLL_MULT_UNDERFLOW;
+	}
+
+	if (*new_rate == 0)
+		*new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
+
+	return r;
+}
+
+/**
+ * _omap2_dpll_is_in_bypass - check if DPLL is in bypass mode or not
+ * @v: bitfield value of the DPLL enable
+ *
+ * Checks given DPLL enable bitfield to see whether the DPLL is in bypass
+ * mode or not. Returns 1 if the DPLL is in bypass, 0 otherwise.
+ */
+static int _omap2_dpll_is_in_bypass(u32 v)
+{
+	u8 mask, val;
+
+	mask = ti_clk_get_features()->dpll_bypass_vals;
+
+	/*
+	 * Each set bit in the mask corresponds to a bypass value equal
+	 * to the bitshift. Go through each set-bit in the mask and
+	 * compare against the given register value.
+	 */
+	while (mask) {
+		val = __ffs(mask);
+		mask ^= (1 << val);
+		if (v == val)
+			return 1;
+	}
+
+	return 0;
+}
+
+/* Public functions */
+u8 omap2_init_dpll_parent(struct clk_hw *hw)
+{
+	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+	u32 v;
+	struct dpll_data *dd;
+
+	dd = clk->dpll_data;
+	if (!dd)
+		return -EINVAL;
+
+	v = ti_clk_ll_ops->clk_readl(dd->control_reg);
+	v &= dd->enable_mask;
+	v >>= __ffs(dd->enable_mask);
+
+	/* Reparent the struct clk in case the dpll is in bypass */
+	if (_omap2_dpll_is_in_bypass(v))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * omap2_get_dpll_rate - returns the current DPLL CLKOUT rate
+ * @clk: struct clk * of a DPLL
+ *
+ * DPLLs can be locked or bypassed - basically, enabled or disabled.
+ * When locked, the DPLL output depends on the M and N values.  When
+ * bypassed, on OMAP2xxx, the output rate is either the 32KiHz clock
+ * or sys_clk.  Bypass rates on OMAP3 depend on the DPLL: DPLLs 1 and
+ * 2 are bypassed with dpll1_fclk and dpll2_fclk respectively
+ * (generated by DPLL3), while DPLL 3, 4, and 5 bypass rates are sys_clk.
+ * Returns the current DPLL CLKOUT rate (*not* CLKOUTX2) if the DPLL is
+ * locked, or the appropriate bypass rate if the DPLL is bypassed, or 0
+ * if the clock @clk is not a DPLL.
+ */
+unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
+{
+	long long dpll_clk;
+	u32 dpll_mult, dpll_div, v;
+	struct dpll_data *dd;
+
+	dd = clk->dpll_data;
+	if (!dd)
+		return 0;
+
+	/* Return bypass rate if DPLL is bypassed */
+	v = ti_clk_ll_ops->clk_readl(dd->control_reg);
+	v &= dd->enable_mask;
+	v >>= __ffs(dd->enable_mask);
+
+	if (_omap2_dpll_is_in_bypass(v))
+		return clk_get_rate(dd->clk_bypass);
+
+	v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg);
+	dpll_mult = v & dd->mult_mask;
+	dpll_mult >>= __ffs(dd->mult_mask);
+	dpll_div = v & dd->div1_mask;
+	dpll_div >>= __ffs(dd->div1_mask);
+
+	dpll_clk = (long long)clk_get_rate(dd->clk_ref) * dpll_mult;
+	do_div(dpll_clk, dpll_div + 1);
+
+	return dpll_clk;
+}
+
+/* DPLL rate rounding code */
+
+/**
+ * omap2_dpll_round_rate - round a target rate for an OMAP DPLL
+ * @clk: struct clk * for a DPLL
+ * @target_rate: desired DPLL clock rate
+ *
+ * Given a DPLL and a desired target rate, round the target rate to a
+ * possible, programmable rate for this DPLL.  Attempts to select the
+ * minimum possible n.  Stores the computed (m, n) in the DPLL's
+ * dpll_data structure so set_rate() will not need to call this
+ * (expensive) function again.  Returns ~0 if the target rate cannot
+ * be rounded, or the rounded rate upon success.
+ */
+long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
+			   unsigned long *parent_rate)
+{
+	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+	int m, n, r, scaled_max_m;
+	int min_delta_m = INT_MAX, min_delta_n = INT_MAX;
+	unsigned long scaled_rt_rp;
+	unsigned long new_rate = 0;
+	struct dpll_data *dd;
+	unsigned long ref_rate;
+	long delta;
+	long prev_min_delta = LONG_MAX;
+	const char *clk_name;
+
+	if (!clk || !clk->dpll_data)
+		return ~0;
+
+	dd = clk->dpll_data;
+
+	ref_rate = clk_get_rate(dd->clk_ref);
+	clk_name = clk_hw_get_name(hw);
+	pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n",
+		 clk_name, target_rate);
+
+	scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR);
+	scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR;
+
+	dd->last_rounded_rate = 0;
+
+	for (n = dd->min_divider; n <= dd->max_divider; n++) {
+		/* Is the (input clk, divider) pair valid for the DPLL? */
+		r = _dpll_test_fint(clk, n);
+		if (r == DPLL_FINT_UNDERFLOW)
+			break;
+		else if (r == DPLL_FINT_INVALID)
+			continue;
+
+		/* Compute the scaled DPLL multiplier, based on the divider */
+		m = scaled_rt_rp * n;
+
+		/*
+		 * Since we're counting n up, a m overflow means we
+		 * can bail out completely (since as n increases in
+		 * the next iteration, there's no way that m can
+		 * increase beyond the current m)
+		 */
+		if (m > scaled_max_m)
+			break;
+
+		r = _dpll_test_mult(&m, n, &new_rate, target_rate,
+				    ref_rate);
+
+		/* m can't be set low enough for this n - try with a larger n */
+		if (r == DPLL_MULT_UNDERFLOW)
+			continue;
+
+		/* skip rates above our target rate */
+		delta = target_rate - new_rate;
+		if (delta < 0)
+			continue;
+
+		if (delta < prev_min_delta) {
+			prev_min_delta = delta;
+			min_delta_m = m;
+			min_delta_n = n;
+		}
+
+		pr_debug("clock: %s: m = %d: n = %d: new_rate = %lu\n",
+			 clk_name, m, n, new_rate);
+
+		if (delta == 0)
+			break;
+	}
+
+	if (prev_min_delta == LONG_MAX) {
+		pr_debug("clock: %s: cannot round to rate %lu\n",
+			 clk_name, target_rate);
+		return ~0;
+	}
+
+	dd->last_rounded_m = min_delta_m;
+	dd->last_rounded_n = min_delta_n;
+	dd->last_rounded_rate = target_rate - prev_min_delta;
+
+	return dd->last_rounded_rate;
+}
diff --git a/drivers/clk/ti/clkt_iclk.c b/drivers/clk/ti/clkt_iclk.c
new file mode 100644
index 0000000..38c3690
--- /dev/null
+++ b/drivers/clk/ti/clkt_iclk.c
@@ -0,0 +1,101 @@
+/*
+ * OMAP2/3 interface clock control
+ *
+ * Copyright (C) 2011 Nokia Corporation
+ * Paul Walmsley
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+/* Register offsets */
+#define OMAP24XX_CM_FCLKEN2		0x04
+#define CM_AUTOIDLE			0x30
+#define CM_ICLKEN			0x10
+#define CM_IDLEST			0x20
+
+#define OMAP24XX_CM_IDLEST_VAL		0
+
+/* Private functions */
+
+/* XXX */
+void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk)
+{
+	u32 v;
+	void __iomem *r;
+
+	r = (__force void __iomem *)
+		((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN));
+
+	v = ti_clk_ll_ops->clk_readl(r);
+	v |= (1 << clk->enable_bit);
+	ti_clk_ll_ops->clk_writel(v, r);
+}
+
+/* XXX */
+void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk)
+{
+	u32 v;
+	void __iomem *r;
+
+	r = (__force void __iomem *)
+		((__force u32)clk->enable_reg ^ (CM_AUTOIDLE ^ CM_ICLKEN));
+
+	v = ti_clk_ll_ops->clk_readl(r);
+	v &= ~(1 << clk->enable_bit);
+	ti_clk_ll_ops->clk_writel(v, r);
+}
+
+/**
+ * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ * @idlest_val: pointer to a u8 to store the CM_IDLEST indicator
+ *
+ * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the
+ * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE.  This custom function
+ * passes back the correct CM_IDLEST register address for I2CHS
+ * modules.  No return value.
+ */
+static void omap2430_clk_i2chs_find_idlest(struct clk_hw_omap *clk,
+					   void __iomem **idlest_reg,
+					   u8 *idlest_bit,
+					   u8 *idlest_val)
+{
+	u32 r;
+
+	r = ((__force u32)clk->enable_reg ^ (OMAP24XX_CM_FCLKEN2 ^ CM_IDLEST));
+	*idlest_reg = (__force void __iomem *)r;
+	*idlest_bit = clk->enable_bit;
+	*idlest_val = OMAP24XX_CM_IDLEST_VAL;
+}
+
+/* Public data */
+
+const struct clk_hw_omap_ops clkhwops_iclk = {
+	.allow_idle	= omap2_clkt_iclk_allow_idle,
+	.deny_idle	= omap2_clkt_iclk_deny_idle,
+};
+
+const struct clk_hw_omap_ops clkhwops_iclk_wait = {
+	.allow_idle	= omap2_clkt_iclk_allow_idle,
+	.deny_idle	= omap2_clkt_iclk_deny_idle,
+	.find_idlest	= omap2_clk_dflt_find_idlest,
+	.find_companion	= omap2_clk_dflt_find_companion,
+};
+
+/* 2430 I2CHS has non-standard IDLEST register */
+const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait = {
+	.find_idlest	= omap2430_clk_i2chs_find_idlest,
+	.find_companion	= omap2_clk_dflt_find_companion,
+};
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 404158d..90f3f47 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -154,6 +154,35 @@
 	u8 recal_st_bit;
 };
 
+/* Composite clock component types */
+enum {
+	CLK_COMPONENT_TYPE_GATE = 0,
+	CLK_COMPONENT_TYPE_DIVIDER,
+	CLK_COMPONENT_TYPE_MUX,
+	CLK_COMPONENT_TYPE_MAX,
+};
+
+/**
+ * struct ti_dt_clk - OMAP DT clock alias declarations
+ * @lk: clock lookup definition
+ * @node_name: clock DT node to map to
+ */
+struct ti_dt_clk {
+	struct clk_lookup		lk;
+	char				*node_name;
+};
+
+#define DT_CLK(dev, con, name)		\
+	{				\
+		.lk = {			\
+			.dev_id = dev,	\
+			.con_id = con,	\
+		},			\
+		.node_name = name,	\
+	}
+
+typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
+
 struct clk *ti_clk_register_gate(struct ti_clk *setup);
 struct clk *ti_clk_register_interface(struct ti_clk *setup);
 struct clk *ti_clk_register_mux(struct ti_clk *setup);
@@ -169,4 +198,80 @@
 struct clk *ti_clk_register_clk(struct ti_clk *setup);
 int ti_clk_register_legacy_clks(struct ti_clk_alias *clks);
 
+void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
+void ti_dt_clocks_register(struct ti_dt_clk *oclks);
+int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
+		      ti_of_clk_init_cb_t func);
+int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
+
+void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw);
+int of_ti_clk_autoidle_setup(struct device_node *node);
+void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
+
+extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
+extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
+extern const struct clk_hw_omap_ops clkhwops_wait;
+extern const struct clk_hw_omap_ops clkhwops_iclk;
+extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
+extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
+extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
+extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
+
+extern const struct clk_ops ti_clk_divider_ops;
+extern const struct clk_ops ti_clk_mux_ops;
+
+int omap2_clkops_enable_clkdm(struct clk_hw *hw);
+void omap2_clkops_disable_clkdm(struct clk_hw *hw);
+
+int omap2_dflt_clk_enable(struct clk_hw *hw);
+void omap2_dflt_clk_disable(struct clk_hw *hw);
+int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
+void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
+				   void __iomem **other_reg,
+				   u8 *other_bit);
+void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
+				void __iomem **idlest_reg,
+				u8 *idlest_bit, u8 *idlest_val);
+
+void omap2_clkt_iclk_allow_idle(struct clk_hw_omap *clk);
+void omap2_clkt_iclk_deny_idle(struct clk_hw_omap *clk);
+
+u8 omap2_init_dpll_parent(struct clk_hw *hw);
+int omap3_noncore_dpll_enable(struct clk_hw *hw);
+void omap3_noncore_dpll_disable(struct clk_hw *hw);
+int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
+int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate);
+int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
+					   unsigned long rate,
+					   unsigned long parent_rate,
+					   u8 index);
+int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
+				      struct clk_rate_request *req);
+long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
+			   unsigned long *parent_rate);
+unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
+				    unsigned long parent_rate);
+
+unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
+int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
+			 unsigned long parent_rate);
+int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+				    unsigned long parent_rate, u8 index);
+void omap3_clk_lock_dpll5(void);
+
+unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
+					 unsigned long parent_rate);
+long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
+				    unsigned long target_rate,
+				    unsigned long *parent_rate);
+int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
+				       struct clk_rate_request *req);
+
+extern struct ti_clk_ll_ops *ti_clk_ll_ops;
+
 #endif
diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c
index b82ef07..b9bc3b8 100644
--- a/drivers/clk/ti/clockdomain.c
+++ b/drivers/clk/ti/clockdomain.c
@@ -15,15 +15,94 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
+/**
+ * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw
+ * @hw: struct clk_hw * of the clock being enabled
+ *
+ * Increment the usecount of the clockdomain of the clock pointed to
+ * by @hw; if the usecount is 1, the clockdomain will be "enabled."
+ * Only needed for clocks that don't use omap2_dflt_clk_enable() as
+ * their enable function pointer.  Passes along the return value of
+ * clkdm_clk_enable(), -EINVAL if @hw is not associated with a
+ * clockdomain, or 0 if clock framework-based clockdomain control is
+ * not implemented.
+ */
+int omap2_clkops_enable_clkdm(struct clk_hw *hw)
+{
+	struct clk_hw_omap *clk;
+	int ret = 0;
+
+	clk = to_clk_hw_omap(hw);
+
+	if (unlikely(!clk->clkdm)) {
+		pr_err("%s: %s: no clkdm set ?!\n", __func__,
+		       clk_hw_get_name(hw));
+		return -EINVAL;
+	}
+
+	if (unlikely(clk->enable_reg))
+		pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__,
+		       clk_hw_get_name(hw));
+
+	if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) {
+		pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
+		       __func__, clk_hw_get_name(hw));
+		return 0;
+	}
+
+	ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
+	WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n",
+	     __func__, clk_hw_get_name(hw), clk->clkdm_name, ret);
+
+	return ret;
+}
+
+/**
+ * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw
+ * @hw: struct clk_hw * of the clock being disabled
+ *
+ * Decrement the usecount of the clockdomain of the clock pointed to
+ * by @hw; if the usecount is 0, the clockdomain will be "disabled."
+ * Only needed for clocks that don't use omap2_dflt_clk_disable() as their
+ * disable function pointer.  No return value.
+ */
+void omap2_clkops_disable_clkdm(struct clk_hw *hw)
+{
+	struct clk_hw_omap *clk;
+
+	clk = to_clk_hw_omap(hw);
+
+	if (unlikely(!clk->clkdm)) {
+		pr_err("%s: %s: no clkdm set ?!\n", __func__,
+		       clk_hw_get_name(hw));
+		return;
+	}
+
+	if (unlikely(clk->enable_reg))
+		pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__,
+		       clk_hw_get_name(hw));
+
+	if (ti_clk_get_features()->flags & TI_CLK_DISABLE_CLKDM_CONTROL) {
+		pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
+		       __func__, clk_hw_get_name(hw));
+		return;
+	}
+
+	ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
+}
+
 static void __init of_ti_clockdomain_setup(struct device_node *node)
 {
 	struct clk *clk;
@@ -41,12 +120,12 @@
 			       __func__, node->full_name, i, PTR_ERR(clk));
 			continue;
 		}
-		if (__clk_get_flags(clk) & CLK_IS_BASIC) {
+		clk_hw = __clk_get_hw(clk);
+		if (clk_hw_get_flags(clk_hw) & CLK_IS_BASIC) {
 			pr_warn("can't setup clkdm for basic clk %s\n",
 				__clk_get_name(clk));
 			continue;
 		}
-		clk_hw = __clk_get_hw(clk);
 		to_clk_hw_omap(clk_hw)->clkdm_name = clkdm_name;
 		omap2_init_clk_clkdm(clk_hw);
 	}
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index 96f83ce..dbef218 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -276,7 +276,6 @@
 	int num_parents;
 	const char **parent_names;
 	struct component_clk *clk;
-	int i;
 
 	num_parents = of_clk_get_parent_count(node);
 
@@ -289,8 +288,7 @@
 	if (!parent_names)
 		return -ENOMEM;
 
-	for (i = 0; i < num_parents; i++)
-		parent_names[i] = of_clk_get_parent_name(node, i);
+	of_clk_parent_fill(node, parent_names, num_parents);
 
 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
 	if (!clk) {
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index ff5f117..5b17268 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -109,7 +109,7 @@
 	if (!div) {
 		WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
 		     "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
-		     __clk_get_name(hw->clk));
+		     clk_hw_get_name(hw));
 		return parent_rate;
 	}
 
@@ -155,7 +155,7 @@
 
 	maxdiv = _get_maxdiv(divider);
 
-	if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
+	if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
 		parent_rate = *best_parent_rate;
 		bestdiv = DIV_ROUND_UP(parent_rate, rate);
 		bestdiv = bestdiv == 0 ? 1 : bestdiv;
@@ -181,7 +181,7 @@
 			*best_parent_rate = parent_rate_saved;
 			return i;
 		}
-		parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
+		parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
 				MULT_ROUND_UP(rate, i));
 		now = DIV_ROUND_UP(parent_rate, i);
 		if (now <= rate && now > best) {
@@ -194,7 +194,7 @@
 	if (!bestdiv) {
 		bestdiv = _get_maxdiv(divider);
 		*best_parent_rate =
-			__clk_round_rate(__clk_get_parent(hw->clk), 1);
+			clk_hw_round_rate(clk_hw_get_parent(hw), 1);
 	}
 
 	return bestdiv;
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 2aacf7a..5519b38 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -15,6 +15,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/slab.h>
 #include <linux/err.h>
@@ -162,7 +163,7 @@
 	clk = clk_register(NULL, &clk_hw->hw);
 
 	if (!IS_ERR(clk)) {
-		omap2_init_clk_hw_omap_clocks(clk);
+		omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
 		kfree(clk_hw->hw.init->parent_names);
 		kfree(clk_hw->hw.init);
@@ -319,7 +320,7 @@
 	if (IS_ERR(clk)) {
 		kfree(clk_hw);
 	} else {
-		omap2_init_clk_hw_omap_clocks(clk);
+		omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
 		of_clk_add_provider(node, of_clk_src_simple_get, clk);
 	}
 }
@@ -341,7 +342,6 @@
 	struct clk_init_data *init = NULL;
 	const char **parent_names = NULL;
 	struct dpll_data *dd = NULL;
-	int i;
 	u8 dpll_mode = 0;
 
 	dd = kzalloc(sizeof(*dd), GFP_KERNEL);
@@ -370,8 +370,7 @@
 	if (!parent_names)
 		goto cleanup;
 
-	for (i = 0; i < init->num_parents; i++)
-		parent_names[i] = of_clk_get_parent_name(node, i);
+	of_clk_parent_fill(node, parent_names, init->num_parents);
 
 	init->parent_names = parent_names;
 
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
new file mode 100644
index 0000000..f4dec00
--- /dev/null
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -0,0 +1,817 @@
+/*
+ * OMAP3/4 - specific DPLL control functions
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ * Copyright (C) 2009-2010 Nokia Corporation
+ *
+ * Written by Paul Walmsley
+ * Testing and integration fixes by Jouni Högander
+ *
+ * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth
+ * Menon
+ *
+ * Parts of this code are based on code written by
+ * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/clkdev.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+/* CM_AUTOIDLE_PLL*.AUTO_* bit values */
+#define DPLL_AUTOIDLE_DISABLE			0x0
+#define DPLL_AUTOIDLE_LOW_POWER_STOP		0x1
+
+#define MAX_DPLL_WAIT_TRIES		1000000
+
+#define OMAP3XXX_EN_DPLL_LOCKED		0x7
+
+/* Forward declarations */
+static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk);
+static void omap3_dpll_deny_idle(struct clk_hw_omap *clk);
+static void omap3_dpll_allow_idle(struct clk_hw_omap *clk);
+
+/* Private functions */
+
+/* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */
+static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits)
+{
+	const struct dpll_data *dd;
+	u32 v;
+
+	dd = clk->dpll_data;
+
+	v = ti_clk_ll_ops->clk_readl(dd->control_reg);
+	v &= ~dd->enable_mask;
+	v |= clken_bits << __ffs(dd->enable_mask);
+	ti_clk_ll_ops->clk_writel(v, dd->control_reg);
+}
+
+/* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */
+static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state)
+{
+	const struct dpll_data *dd;
+	int i = 0;
+	int ret = -EINVAL;
+	const char *clk_name;
+
+	dd = clk->dpll_data;
+	clk_name = clk_hw_get_name(&clk->hw);
+
+	state <<= __ffs(dd->idlest_mask);
+
+	while (((ti_clk_ll_ops->clk_readl(dd->idlest_reg) & dd->idlest_mask)
+		!= state) && i < MAX_DPLL_WAIT_TRIES) {
+		i++;
+		udelay(1);
+	}
+
+	if (i == MAX_DPLL_WAIT_TRIES) {
+		pr_err("clock: %s failed transition to '%s'\n",
+		       clk_name, (state) ? "locked" : "bypassed");
+	} else {
+		pr_debug("clock: %s transition to '%s' in %d loops\n",
+			 clk_name, (state) ? "locked" : "bypassed", i);
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
+/* From 3430 TRM ES2 4.7.6.2 */
+static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n)
+{
+	unsigned long fint;
+	u16 f = 0;
+
+	fint = clk_get_rate(clk->dpll_data->clk_ref) / n;
+
+	pr_debug("clock: fint is %lu\n", fint);
+
+	if (fint >= 750000 && fint <= 1000000)
+		f = 0x3;
+	else if (fint > 1000000 && fint <= 1250000)
+		f = 0x4;
+	else if (fint > 1250000 && fint <= 1500000)
+		f = 0x5;
+	else if (fint > 1500000 && fint <= 1750000)
+		f = 0x6;
+	else if (fint > 1750000 && fint <= 2100000)
+		f = 0x7;
+	else if (fint > 7500000 && fint <= 10000000)
+		f = 0xB;
+	else if (fint > 10000000 && fint <= 12500000)
+		f = 0xC;
+	else if (fint > 12500000 && fint <= 15000000)
+		f = 0xD;
+	else if (fint > 15000000 && fint <= 17500000)
+		f = 0xE;
+	else if (fint > 17500000 && fint <= 21000000)
+		f = 0xF;
+	else
+		pr_debug("clock: unknown freqsel setting for %d\n", n);
+
+	return f;
+}
+
+/*
+ * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to lock.  Waits for the DPLL to report
+ * readiness before returning.  Will save and restore the DPLL's
+ * autoidle state across the enable, per the CDP code.  If the DPLL
+ * locked successfully, return 0; if the DPLL did not lock in the time
+ * allotted, or DPLL3 was passed in, return -EINVAL.
+ */
+static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk)
+{
+	const struct dpll_data *dd;
+	u8 ai;
+	u8 state = 1;
+	int r = 0;
+
+	pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw));
+
+	dd = clk->dpll_data;
+	state <<= __ffs(dd->idlest_mask);
+
+	/* Check if already locked */
+	if ((ti_clk_ll_ops->clk_readl(dd->idlest_reg) & dd->idlest_mask) ==
+	    state)
+		goto done;
+
+	ai = omap3_dpll_autoidle_read(clk);
+
+	if (ai)
+		omap3_dpll_deny_idle(clk);
+
+	_omap3_dpll_write_clken(clk, DPLL_LOCKED);
+
+	r = _omap3_wait_dpll_status(clk, 1);
+
+	if (ai)
+		omap3_dpll_allow_idle(clk);
+
+done:
+	return r;
+}
+
+/*
+ * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enter low-power bypass mode.  In
+ * bypass mode, the DPLL's rate is set equal to its parent clock's
+ * rate.  Waits for the DPLL to report readiness before returning.
+ * Will save and restore the DPLL's autoidle state across the enable,
+ * per the CDP code.  If the DPLL entered bypass mode successfully,
+ * return 0; if the DPLL did not enter bypass in the time allotted, or
+ * DPLL3 was passed in, or the DPLL does not support low-power bypass,
+ * return -EINVAL.
+ */
+static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk)
+{
+	int r;
+	u8 ai;
+
+	if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS)))
+		return -EINVAL;
+
+	pr_debug("clock: configuring DPLL %s for low-power bypass\n",
+		 clk_hw_get_name(&clk->hw));
+
+	ai = omap3_dpll_autoidle_read(clk);
+
+	_omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS);
+
+	r = _omap3_wait_dpll_status(clk, 0);
+
+	if (ai)
+		omap3_dpll_allow_idle(clk);
+
+	return r;
+}
+
+/*
+ * _omap3_noncore_dpll_stop - instruct a DPLL to stop
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enter low-power stop. Will save and
+ * restore the DPLL's autoidle state across the stop, per the CDP
+ * code.  If DPLL3 was passed in, or the DPLL does not support
+ * low-power stop, return -EINVAL; otherwise, return 0.
+ */
+static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk)
+{
+	u8 ai;
+
+	if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP)))
+		return -EINVAL;
+
+	pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw));
+
+	ai = omap3_dpll_autoidle_read(clk);
+
+	_omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP);
+
+	if (ai)
+		omap3_dpll_allow_idle(clk);
+
+	return 0;
+}
+
+/**
+ * _lookup_dco - Lookup DCO used by j-type DPLL
+ * @clk: pointer to a DPLL struct clk
+ * @dco: digital control oscillator selector
+ * @m: DPLL multiplier to set
+ * @n: DPLL divider to set
+ *
+ * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
+ *
+ * XXX This code is not needed for 3430/AM35xx; can it be optimized
+ * out in non-multi-OMAP builds for those chips?
+ */
+static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n)
+{
+	unsigned long fint, clkinp; /* watch out for overflow */
+
+	clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
+	fint = (clkinp / n) * m;
+
+	if (fint < 1000000000)
+		*dco = 2;
+	else
+		*dco = 4;
+}
+
+/**
+ * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL
+ * @clk: pointer to a DPLL struct clk
+ * @sd_div: target sigma-delta divider
+ * @m: DPLL multiplier to set
+ * @n: DPLL divider to set
+ *
+ * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)"
+ *
+ * XXX This code is not needed for 3430/AM35xx; can it be optimized
+ * out in non-multi-OMAP builds for those chips?
+ */
+static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n)
+{
+	unsigned long clkinp, sd; /* watch out for overflow */
+	int mod1, mod2;
+
+	clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw));
+
+	/*
+	 * target sigma-delta to near 250MHz
+	 * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)]
+	 */
+	clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */
+	mod1 = (clkinp * m) % (250 * n);
+	sd = (clkinp * m) / (250 * n);
+	mod2 = sd % 10;
+	sd /= 10;
+
+	if (mod1 || mod2)
+		sd++;
+	*sd_div = sd;
+}
+
+/*
+ * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly
+ * @clk:	struct clk * of DPLL to set
+ * @freqsel:	FREQSEL value to set
+ *
+ * Program the DPLL with the last M, N values calculated, and wait for
+ * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success.
+ */
+static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel)
+{
+	struct dpll_data *dd = clk->dpll_data;
+	u8 dco, sd_div;
+	u32 v;
+
+	/* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */
+	_omap3_noncore_dpll_bypass(clk);
+
+	/*
+	 * Set jitter correction. Jitter correction applicable for OMAP343X
+	 * only since freqsel field is no longer present on other devices.
+	 */
+	if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
+		v = ti_clk_ll_ops->clk_readl(dd->control_reg);
+		v &= ~dd->freqsel_mask;
+		v |= freqsel << __ffs(dd->freqsel_mask);
+		ti_clk_ll_ops->clk_writel(v, dd->control_reg);
+	}
+
+	/* Set DPLL multiplier, divider */
+	v = ti_clk_ll_ops->clk_readl(dd->mult_div1_reg);
+
+	/* Handle Duty Cycle Correction */
+	if (dd->dcc_mask) {
+		if (dd->last_rounded_rate >= dd->dcc_rate)
+			v |= dd->dcc_mask; /* Enable DCC */
+		else
+			v &= ~dd->dcc_mask; /* Disable DCC */
+	}
+
+	v &= ~(dd->mult_mask | dd->div1_mask);
+	v |= dd->last_rounded_m << __ffs(dd->mult_mask);
+	v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask);
+
+	/* Configure dco and sd_div for dplls that have these fields */
+	if (dd->dco_mask) {
+		_lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n);
+		v &= ~(dd->dco_mask);
+		v |= dco << __ffs(dd->dco_mask);
+	}
+	if (dd->sddiv_mask) {
+		_lookup_sddiv(clk, &sd_div, dd->last_rounded_m,
+			      dd->last_rounded_n);
+		v &= ~(dd->sddiv_mask);
+		v |= sd_div << __ffs(dd->sddiv_mask);
+	}
+
+	ti_clk_ll_ops->clk_writel(v, dd->mult_div1_reg);
+
+	/* Set 4X multiplier and low-power mode */
+	if (dd->m4xen_mask || dd->lpmode_mask) {
+		v = ti_clk_ll_ops->clk_readl(dd->control_reg);
+
+		if (dd->m4xen_mask) {
+			if (dd->last_rounded_m4xen)
+				v |= dd->m4xen_mask;
+			else
+				v &= ~dd->m4xen_mask;
+		}
+
+		if (dd->lpmode_mask) {
+			if (dd->last_rounded_lpmode)
+				v |= dd->lpmode_mask;
+			else
+				v &= ~dd->lpmode_mask;
+		}
+
+		ti_clk_ll_ops->clk_writel(v, dd->control_reg);
+	}
+
+	/* We let the clock framework set the other output dividers later */
+
+	/* REVISIT: Set ramp-up delay? */
+
+	_omap3_noncore_dpll_lock(clk);
+
+	return 0;
+}
+
+/* Public functions */
+
+/**
+ * omap3_dpll_recalc - recalculate DPLL rate
+ * @clk: DPLL struct clk
+ *
+ * Recalculate and propagate the DPLL rate.
+ */
+unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate)
+{
+	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+
+	return omap2_get_dpll_rate(clk);
+}
+
+/* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */
+
+/**
+ * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock.
+ * The choice of modes depends on the DPLL's programmed rate: if it is
+ * the same as the DPLL's parent clock, it will enter bypass;
+ * otherwise, it will enter lock.  This code will wait for the DPLL to
+ * indicate readiness before returning, unless the DPLL takes too long
+ * to enter the target state.  Intended to be used as the struct clk's
+ * enable function.  If DPLL3 was passed in, or the DPLL does not
+ * support low-power stop, or if the DPLL took too long to enter
+ * bypass or lock, return -EINVAL; otherwise, return 0.
+ */
+int omap3_noncore_dpll_enable(struct clk_hw *hw)
+{
+	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+	int r;
+	struct dpll_data *dd;
+	struct clk_hw *parent;
+
+	dd = clk->dpll_data;
+	if (!dd)
+		return -EINVAL;
+
+	if (clk->clkdm) {
+		r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
+		if (r) {
+			WARN(1,
+			     "%s: could not enable %s's clockdomain %s: %d\n",
+			     __func__, clk_hw_get_name(hw),
+			     clk->clkdm_name, r);
+			return r;
+		}
+	}
+
+	parent = clk_hw_get_parent(hw);
+
+	if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) {
+		WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
+		r = _omap3_noncore_dpll_bypass(clk);
+	} else {
+		WARN_ON(parent != __clk_get_hw(dd->clk_ref));
+		r = _omap3_noncore_dpll_lock(clk);
+	}
+
+	return r;
+}
+
+/**
+ * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop
+ * @clk: pointer to a DPLL struct clk
+ *
+ * Instructs a non-CORE DPLL to enter low-power stop.  This function is
+ * intended for use in struct clkops.  No return value.
+ */
+void omap3_noncore_dpll_disable(struct clk_hw *hw)
+{
+	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+
+	_omap3_noncore_dpll_stop(clk);
+	if (clk->clkdm)
+		ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
+}
+
+/* Non-CORE DPLL rate set code */
+
+/**
+ * omap3_noncore_dpll_determine_rate - determine rate for a DPLL
+ * @hw: pointer to the clock to determine rate for
+ * @req: target rate request
+ *
+ * Determines which DPLL mode to use for reaching a desired target rate.
+ * Checks whether the DPLL shall be in bypass or locked mode, and if
+ * locked, calculates the M,N values for the DPLL via round-rate.
+ * Returns a 0 on success, negative error value in failure.
+ */
+int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
+				      struct clk_rate_request *req)
+{
+	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+	struct dpll_data *dd;
+
+	if (!req->rate)
+		return -EINVAL;
+
+	dd = clk->dpll_data;
+	if (!dd)
+		return -EINVAL;
+
+	if (clk_get_rate(dd->clk_bypass) == req->rate &&
+	    (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
+		req->best_parent_hw = __clk_get_hw(dd->clk_bypass);
+	} else {
+		req->rate = omap2_dpll_round_rate(hw, req->rate,
+					  &req->best_parent_rate);
+		req->best_parent_hw = __clk_get_hw(dd->clk_ref);
+	}
+
+	req->best_parent_rate = req->rate;
+
+	return 0;
+}
+
+/**
+ * omap3_noncore_dpll_set_parent - set parent for a DPLL clock
+ * @hw: pointer to the clock to set parent for
+ * @index: parent index to select
+ *
+ * Sets parent for a DPLL clock. This sets the DPLL into bypass or
+ * locked mode. Returns 0 with success, negative error value otherwise.
+ */
+int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index)
+{
+	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+	int ret;
+
+	if (!hw)
+		return -EINVAL;
+
+	if (index)
+		ret = _omap3_noncore_dpll_bypass(clk);
+	else
+		ret = _omap3_noncore_dpll_lock(clk);
+
+	return ret;
+}
+
+/**
+ * omap3_noncore_dpll_set_rate - set rate for a DPLL clock
+ * @hw: pointer to the clock to set parent for
+ * @rate: target rate for the clock
+ * @parent_rate: rate of the parent clock
+ *
+ * Sets rate for a DPLL clock. First checks if the clock parent is
+ * reference clock (in bypass mode, the rate of the clock can't be
+ * changed) and proceeds with the rate change operation. Returns 0
+ * with success, negative error value otherwise.
+ */
+int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate)
+{
+	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+	struct dpll_data *dd;
+	u16 freqsel = 0;
+	int ret;
+
+	if (!hw || !rate)
+		return -EINVAL;
+
+	dd = clk->dpll_data;
+	if (!dd)
+		return -EINVAL;
+
+	if (clk_hw_get_parent(hw) != __clk_get_hw(dd->clk_ref))
+		return -EINVAL;
+
+	if (dd->last_rounded_rate == 0)
+		return -EINVAL;
+
+	/* Freqsel is available only on OMAP343X devices */
+	if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) {
+		freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n);
+		WARN_ON(!freqsel);
+	}
+
+	pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__,
+		 clk_hw_get_name(hw), rate);
+
+	ret = omap3_noncore_dpll_program(clk, freqsel);
+
+	return ret;
+}
+
+/**
+ * omap3_noncore_dpll_set_rate_and_parent - set rate and parent for a DPLL clock
+ * @hw: pointer to the clock to set rate and parent for
+ * @rate: target rate for the DPLL
+ * @parent_rate: clock rate of the DPLL parent
+ * @index: new parent index for the DPLL, 0 - reference, 1 - bypass
+ *
+ * Sets rate and parent for a DPLL clock. If new parent is the bypass
+ * clock, only selects the parent. Otherwise proceeds with a rate
+ * change, as this will effectively also change the parent as the
+ * DPLL is put into locked mode. Returns 0 with success, negative error
+ * value otherwise.
+ */
+int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
+					   unsigned long rate,
+					   unsigned long parent_rate,
+					   u8 index)
+{
+	int ret;
+
+	if (!hw || !rate)
+		return -EINVAL;
+
+	/*
+	 * clk-ref at index[0], in which case we only need to set rate,
+	 * the parent will be changed automatically with the lock sequence.
+	 * With clk-bypass case we only need to change parent.
+	 */
+	if (index)
+		ret = omap3_noncore_dpll_set_parent(hw, index);
+	else
+		ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
+
+	return ret;
+}
+
+/* DPLL autoidle read/set code */
+
+/**
+ * omap3_dpll_autoidle_read - read a DPLL's autoidle bits
+ * @clk: struct clk * of the DPLL to read
+ *
+ * Return the DPLL's autoidle bits, shifted down to bit 0.  Returns
+ * -EINVAL if passed a null pointer or if the struct clk does not
+ * appear to refer to a DPLL.
+ */
+static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk)
+{
+	const struct dpll_data *dd;
+	u32 v;
+
+	if (!clk || !clk->dpll_data)
+		return -EINVAL;
+
+	dd = clk->dpll_data;
+
+	if (!dd->autoidle_reg)
+		return -EINVAL;
+
+	v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg);
+	v &= dd->autoidle_mask;
+	v >>= __ffs(dd->autoidle_mask);
+
+	return v;
+}
+
+/**
+ * omap3_dpll_allow_idle - enable DPLL autoidle bits
+ * @clk: struct clk * of the DPLL to operate on
+ *
+ * Enable DPLL automatic idle control.  This automatic idle mode
+ * switching takes effect only when the DPLL is locked, at least on
+ * OMAP3430.  The DPLL will enter low-power stop when its downstream
+ * clocks are gated.  No return value.
+ */
+static void omap3_dpll_allow_idle(struct clk_hw_omap *clk)
+{
+	const struct dpll_data *dd;
+	u32 v;
+
+	if (!clk || !clk->dpll_data)
+		return;
+
+	dd = clk->dpll_data;
+
+	if (!dd->autoidle_reg)
+		return;
+
+	/*
+	 * REVISIT: CORE DPLL can optionally enter low-power bypass
+	 * by writing 0x5 instead of 0x1.  Add some mechanism to
+	 * optionally enter this mode.
+	 */
+	v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg);
+	v &= ~dd->autoidle_mask;
+	v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask);
+	ti_clk_ll_ops->clk_writel(v, dd->autoidle_reg);
+}
+
+/**
+ * omap3_dpll_deny_idle - prevent DPLL from automatically idling
+ * @clk: struct clk * of the DPLL to operate on
+ *
+ * Disable DPLL automatic idle control.  No return value.
+ */
+static void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
+{
+	const struct dpll_data *dd;
+	u32 v;
+
+	if (!clk || !clk->dpll_data)
+		return;
+
+	dd = clk->dpll_data;
+
+	if (!dd->autoidle_reg)
+		return;
+
+	v = ti_clk_ll_ops->clk_readl(dd->autoidle_reg);
+	v &= ~dd->autoidle_mask;
+	v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask);
+	ti_clk_ll_ops->clk_writel(v, dd->autoidle_reg);
+}
+
+/* Clock control for DPLL outputs */
+
+/* Find the parent DPLL for the given clkoutx2 clock */
+static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
+{
+	struct clk_hw_omap *pclk = NULL;
+
+	/* Walk up the parents of clk, looking for a DPLL */
+	do {
+		do {
+			hw = clk_hw_get_parent(hw);
+		} while (hw && (clk_hw_get_flags(hw) & CLK_IS_BASIC));
+		if (!hw)
+			break;
+		pclk = to_clk_hw_omap(hw);
+	} while (pclk && !pclk->dpll_data);
+
+	/* clk does not have a DPLL as a parent?  error in the clock data */
+	if (!pclk) {
+		WARN_ON(1);
+		return NULL;
+	}
+
+	return pclk;
+}
+
+/**
+ * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
+ * @clk: DPLL output struct clk
+ *
+ * Using parent clock DPLL data, look up DPLL state.  If locked, set our
+ * rate to the dpll_clk * 2; otherwise, just use dpll_clk.
+ */
+unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
+				    unsigned long parent_rate)
+{
+	const struct dpll_data *dd;
+	unsigned long rate;
+	u32 v;
+	struct clk_hw_omap *pclk = NULL;
+
+	if (!parent_rate)
+		return 0;
+
+	pclk = omap3_find_clkoutx2_dpll(hw);
+
+	if (!pclk)
+		return 0;
+
+	dd = pclk->dpll_data;
+
+	WARN_ON(!dd->enable_mask);
+
+	v = ti_clk_ll_ops->clk_readl(dd->control_reg) & dd->enable_mask;
+	v >>= __ffs(dd->enable_mask);
+	if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE))
+		rate = parent_rate;
+	else
+		rate = parent_rate * 2;
+	return rate;
+}
+
+/* OMAP3/4 non-CORE DPLL clkops */
+const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
+	.allow_idle	= omap3_dpll_allow_idle,
+	.deny_idle	= omap3_dpll_deny_idle,
+};
+
+/**
+ * omap3_dpll4_set_rate - set rate for omap3 per-dpll
+ * @hw: clock to change
+ * @rate: target rate for clock
+ * @parent_rate: rate of the parent clock
+ *
+ * Check if the current SoC supports the per-dpll reprogram operation
+ * or not, and then do the rate change if supported. Returns -EINVAL
+ * if not supported, 0 for success, and potential error codes from the
+ * clock rate change.
+ */
+int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate,
+			 unsigned long parent_rate)
+{
+	/*
+	 * According to the 12-5 CDP code from TI, "Limitation 2.5"
+	 * on 3430ES1 prevents us from changing DPLL multipliers or dividers
+	 * on DPLL4.
+	 */
+	if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
+		pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
+		return -EINVAL;
+	}
+
+	return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
+}
+
+/**
+ * omap3_dpll4_set_rate_and_parent - set rate and parent for omap3 per-dpll
+ * @hw: clock to change
+ * @rate: target rate for clock
+ * @parent_rate: rate of the parent clock
+ * @index: parent index, 0 - reference clock, 1 - bypass clock
+ *
+ * Check if the current SoC support the per-dpll reprogram operation
+ * or not, and then do the rate + parent change if supported. Returns
+ * -EINVAL if not supported, 0 for success, and potential error codes
+ * from the clock rate change.
+ */
+int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+				    unsigned long parent_rate, u8 index)
+{
+	if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) {
+		pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n");
+		return -EINVAL;
+	}
+
+	return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
+						      index);
+}
diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c
new file mode 100644
index 0000000..660d743
--- /dev/null
+++ b/drivers/clk/ti/dpll44xx.c
@@ -0,0 +1,227 @@
+/*
+ * OMAP4-specific DPLL control functions
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Rajendra Nayak
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+/*
+ * Maximum DPLL input frequency (FINT) and output frequency (FOUT) that
+ * can supported when using the DPLL low-power mode. Frequencies are
+ * defined in OMAP4430/60 Public TRM section 3.6.3.3.2 "Enable Control,
+ * Status, and Low-Power Operation Mode".
+ */
+#define OMAP4_DPLL_LP_FINT_MAX	1000000
+#define OMAP4_DPLL_LP_FOUT_MAX	100000000
+
+/*
+ * Bitfield declarations
+ */
+#define OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK		BIT(8)
+#define OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK		BIT(10)
+#define OMAP4430_DPLL_REGM4XEN_MASK			BIT(11)
+
+/* Static rate multiplier for OMAP4 REGM4XEN clocks */
+#define OMAP4430_REGM4XEN_MULT				4
+
+static void omap4_dpllmx_allow_gatectrl(struct clk_hw_omap *clk)
+{
+	u32 v;
+	u32 mask;
+
+	if (!clk || !clk->clksel_reg)
+		return;
+
+	mask = clk->flags & CLOCK_CLKOUTX2 ?
+			OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK :
+			OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK;
+
+	v = ti_clk_ll_ops->clk_readl(clk->clksel_reg);
+	/* Clear the bit to allow gatectrl */
+	v &= ~mask;
+	ti_clk_ll_ops->clk_writel(v, clk->clksel_reg);
+}
+
+static void omap4_dpllmx_deny_gatectrl(struct clk_hw_omap *clk)
+{
+	u32 v;
+	u32 mask;
+
+	if (!clk || !clk->clksel_reg)
+		return;
+
+	mask = clk->flags & CLOCK_CLKOUTX2 ?
+			OMAP4430_DPLL_CLKOUTX2_GATE_CTRL_MASK :
+			OMAP4430_DPLL_CLKOUT_GATE_CTRL_MASK;
+
+	v = ti_clk_ll_ops->clk_readl(clk->clksel_reg);
+	/* Set the bit to deny gatectrl */
+	v |= mask;
+	ti_clk_ll_ops->clk_writel(v, clk->clksel_reg);
+}
+
+const struct clk_hw_omap_ops clkhwops_omap4_dpllmx = {
+	.allow_idle	= omap4_dpllmx_allow_gatectrl,
+	.deny_idle      = omap4_dpllmx_deny_gatectrl,
+};
+
+/**
+ * omap4_dpll_lpmode_recalc - compute DPLL low-power setting
+ * @dd: pointer to the dpll data structure
+ *
+ * Calculates if low-power mode can be enabled based upon the last
+ * multiplier and divider values calculated. If low-power mode can be
+ * enabled, then the bit to enable low-power mode is stored in the
+ * last_rounded_lpmode variable. This implementation is based upon the
+ * criteria for enabling low-power mode as described in the OMAP4430/60
+ * Public TRM section 3.6.3.3.2 "Enable Control, Status, and Low-Power
+ * Operation Mode".
+ */
+static void omap4_dpll_lpmode_recalc(struct dpll_data *dd)
+{
+	long fint, fout;
+
+	fint = clk_get_rate(dd->clk_ref) / (dd->last_rounded_n + 1);
+	fout = fint * dd->last_rounded_m;
+
+	if ((fint < OMAP4_DPLL_LP_FINT_MAX) && (fout < OMAP4_DPLL_LP_FOUT_MAX))
+		dd->last_rounded_lpmode = 1;
+	else
+		dd->last_rounded_lpmode = 0;
+}
+
+/**
+ * omap4_dpll_regm4xen_recalc - compute DPLL rate, considering REGM4XEN bit
+ * @clk: struct clk * of the DPLL to compute the rate for
+ *
+ * Compute the output rate for the OMAP4 DPLL represented by @clk.
+ * Takes the REGM4XEN bit into consideration, which is needed for the
+ * OMAP4 ABE DPLL.  Returns the DPLL's output rate (before M-dividers)
+ * upon success, or 0 upon error.
+ */
+unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
+					 unsigned long parent_rate)
+{
+	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+	u32 v;
+	unsigned long rate;
+	struct dpll_data *dd;
+
+	if (!clk || !clk->dpll_data)
+		return 0;
+
+	dd = clk->dpll_data;
+
+	rate = omap2_get_dpll_rate(clk);
+
+	/* regm4xen adds a multiplier of 4 to DPLL calculations */
+	v = ti_clk_ll_ops->clk_readl(dd->control_reg);
+	if (v & OMAP4430_DPLL_REGM4XEN_MASK)
+		rate *= OMAP4430_REGM4XEN_MULT;
+
+	return rate;
+}
+
+/**
+ * omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit
+ * @clk: struct clk * of the DPLL to round a rate for
+ * @target_rate: the desired rate of the DPLL
+ *
+ * Compute the rate that would be programmed into the DPLL hardware
+ * for @clk if set_rate() were to be provided with the rate
+ * @target_rate.  Takes the REGM4XEN bit into consideration, which is
+ * needed for the OMAP4 ABE DPLL.  Returns the rounded rate (before
+ * M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or
+ * ~0 if an error occurred in omap2_dpll_round_rate().
+ */
+long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
+				    unsigned long target_rate,
+				    unsigned long *parent_rate)
+{
+	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+	struct dpll_data *dd;
+	long r;
+
+	if (!clk || !clk->dpll_data)
+		return -EINVAL;
+
+	dd = clk->dpll_data;
+
+	dd->last_rounded_m4xen = 0;
+
+	/*
+	 * First try to compute the DPLL configuration for
+	 * target rate without using the 4X multiplier.
+	 */
+	r = omap2_dpll_round_rate(hw, target_rate, NULL);
+	if (r != ~0)
+		goto out;
+
+	/*
+	 * If we did not find a valid DPLL configuration, try again, but
+	 * this time see if using the 4X multiplier can help. Enabling the
+	 * 4X multiplier is equivalent to dividing the target rate by 4.
+	 */
+	r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT,
+				  NULL);
+	if (r == ~0)
+		return r;
+
+	dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+	dd->last_rounded_m4xen = 1;
+
+out:
+	omap4_dpll_lpmode_recalc(dd);
+
+	return dd->last_rounded_rate;
+}
+
+/**
+ * omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL
+ * @hw: pointer to the clock to determine rate for
+ * @req: target rate request
+ *
+ * Determines which DPLL mode to use for reaching a desired rate.
+ * Checks whether the DPLL shall be in bypass or locked mode, and if
+ * locked, calculates the M,N values for the DPLL via round-rate.
+ * Returns 0 on success and a negative error value otherwise.
+ */
+int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
+				       struct clk_rate_request *req)
+{
+	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
+	struct dpll_data *dd;
+
+	if (!req->rate)
+		return -EINVAL;
+
+	dd = clk->dpll_data;
+	if (!dd)
+		return -EINVAL;
+
+	if (clk_get_rate(dd->clk_bypass) == req->rate &&
+	    (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
+		req->best_parent_hw = __clk_get_hw(dd->clk_bypass);
+	} else {
+		req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate,
+						&req->best_parent_rate);
+		req->best_parent_hw = __clk_get_hw(dd->clk_ref);
+	}
+
+	req->best_parent_rate = req->rate;
+
+	return 0;
+}
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 730aa62..f4b2e98 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -9,6 +9,7 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/delay.h>
 #include <linux/err.h>
@@ -558,8 +559,7 @@
 		goto free;
 	}
 
-	parent_name[0] = of_clk_get_parent_name(node, 0);
-	parent_name[1] = of_clk_get_parent_name(node, 1);
+	of_clk_parent_fill(node, parent_name, 2);
 	init->parent_names = parent_name;
 
 	fd->clk_ref = of_clk_get(node, 0);
diff --git a/drivers/clk/ti/fixed-factor.c b/drivers/clk/ti/fixed-factor.c
index c2c8a28..3cd4067 100644
--- a/drivers/clk/ti/fixed-factor.c
+++ b/drivers/clk/ti/fixed-factor.c
@@ -22,6 +22,8 @@
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 0c6fdfc..5429d35 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -62,7 +62,7 @@
  * (Any other value different from the Read value) to the
  * corresponding CM_CLKSEL register will refresh the dividers.
  */
-static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
+static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *hw)
 {
 	struct clk_divider *parent;
 	struct clk_hw *parent_hw;
@@ -70,10 +70,10 @@
 	int ret;
 
 	/* Clear PWRDN bit of HSDIVIDER */
-	ret = omap2_dflt_clk_enable(clk);
+	ret = omap2_dflt_clk_enable(hw);
 
 	/* Parent is the x2 node, get parent of parent for the m2 div */
-	parent_hw = __clk_get_hw(__clk_get_parent(__clk_get_parent(clk->clk)));
+	parent_hw = clk_hw_get_parent(clk_hw_get_parent(hw));
 	parent = to_clk_divider(parent_hw);
 
 	/* Restore the dividers */
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index c76230d..e505e6f 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -63,7 +63,7 @@
 	if (IS_ERR(clk))
 		kfree(clk_hw);
 	else
-		omap2_init_clk_hw_omap_clocks(clk);
+		omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
 
 	return clk;
 }
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 5cdeed5..69f08a1 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -31,7 +31,7 @@
 static u8 ti_clk_mux_get_parent(struct clk_hw *hw)
 {
 	struct clk_mux *mux = to_clk_mux(hw);
-	int num_parents = __clk_get_num_parents(hw->clk);
+	int num_parents = clk_hw_get_num_parents(hw);
 	u32 val;
 
 	/*
@@ -190,7 +190,6 @@
 	void __iomem *reg;
 	int num_parents;
 	const char **parent_names;
-	int i;
 	u8 clk_mux_flags = 0;
 	u32 mask = 0;
 	u32 shift = 0;
@@ -205,8 +204,7 @@
 	if (!parent_names)
 		goto cleanup;
 
-	for (i = 0; i < num_parents; i++)
-		parent_names[i] = of_clk_get_parent_name(node, i);
+	of_clk_parent_fill(node, parent_names, num_parents);
 
 	reg = ti_clk_get_reg_addr(node, 0);
 
diff --git a/drivers/clk/ux500/Makefile b/drivers/clk/ux500/Makefile
index 521483f..f3baef2 100644
--- a/drivers/clk/ux500/Makefile
+++ b/drivers/clk/ux500/Makefile
@@ -9,7 +9,6 @@
 
 # Clock definitions
 obj-y += u8500_of_clk.o
-obj-y += u8500_clk.o
 obj-y += u9540_clk.o
 obj-y += u8540_clk.o
 
diff --git a/drivers/clk/ux500/abx500-clk.c b/drivers/clk/ux500/abx500-clk.c
index 3e5e051..222425d 100644
--- a/drivers/clk/ux500/abx500-clk.c
+++ b/drivers/clk/ux500/abx500-clk.c
@@ -13,7 +13,6 @@
 #include <linux/platform_device.h>
 #include <linux/mfd/abx500/ab8500.h>
 #include <linux/mfd/abx500/ab8500-sysctrl.h>
-#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/mfd/dbx500-prcmu.h>
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index bf63c96..7f34382 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -43,7 +43,7 @@
 	struct clk_prcmu *clk = to_clk_prcmu(hw);
 	if (prcmu_request_clock(clk->cg_sel, false))
 		pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
-			__clk_get_name(hw->clk));
+			clk_hw_get_name(hw));
 	else
 		clk->is_prepared = 0;
 }
@@ -101,11 +101,11 @@
 
 	if (!clk->opp_requested) {
 		err = prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP,
-						(char *)__clk_get_name(hw->clk),
+						(char *)clk_hw_get_name(hw),
 						100);
 		if (err) {
 			pr_err("clk_prcmu: %s fail req APE OPP for %s.\n",
-				__func__, __clk_get_name(hw->clk));
+				__func__, clk_hw_get_name(hw));
 			return err;
 		}
 		clk->opp_requested = 1;
@@ -114,7 +114,7 @@
 	err = prcmu_request_clock(clk->cg_sel, true);
 	if (err) {
 		prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
-					(char *)__clk_get_name(hw->clk));
+					(char *)clk_hw_get_name(hw));
 		clk->opp_requested = 0;
 		return err;
 	}
@@ -129,13 +129,13 @@
 
 	if (prcmu_request_clock(clk->cg_sel, false)) {
 		pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
-			__clk_get_name(hw->clk));
+			clk_hw_get_name(hw));
 		return;
 	}
 
 	if (clk->opp_requested) {
 		prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP,
-					(char *)__clk_get_name(hw->clk));
+					(char *)clk_hw_get_name(hw));
 		clk->opp_requested = 0;
 	}
 
@@ -151,7 +151,7 @@
 		err = prcmu_request_ape_opp_100_voltage(true);
 		if (err) {
 			pr_err("clk_prcmu: %s fail req APE OPP VOLT for %s.\n",
-				__func__, __clk_get_name(hw->clk));
+				__func__, clk_hw_get_name(hw));
 			return err;
 		}
 		clk->opp_requested = 1;
@@ -174,7 +174,7 @@
 
 	if (prcmu_request_clock(clk->cg_sel, false)) {
 		pr_err("clk_prcmu: %s failed to disable %s.\n", __func__,
-			__clk_get_name(hw->clk));
+			clk_hw_get_name(hw));
 		return;
 	}
 
diff --git a/drivers/clk/ux500/clk-sysctrl.c b/drivers/clk/ux500/clk-sysctrl.c
index e364c9d..266ddea 100644
--- a/drivers/clk/ux500/clk-sysctrl.c
+++ b/drivers/clk/ux500/clk-sysctrl.c
@@ -52,7 +52,7 @@
 	struct clk_sysctrl *clk = to_clk_sysctrl(hw);
 	if (ab8500_sysctrl_clear(clk->reg_sel[0], clk->reg_mask[0]))
 		dev_err(clk->dev, "clk_sysctrl: %s fail to clear %s.\n",
-			__func__, __clk_get_name(hw->clk));
+			__func__, clk_hw_get_name(hw));
 }
 
 static unsigned long clk_sysctrl_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/clk/ux500/clk.h b/drivers/clk/ux500/clk.h
index a2bb92d..b42485d 100644
--- a/drivers/clk/ux500/clk.h
+++ b/drivers/clk/ux500/clk.h
@@ -10,10 +10,11 @@
 #ifndef __UX500_CLK_H
 #define __UX500_CLK_H
 
-#include <linux/clk.h>
 #include <linux/device.h>
 #include <linux/types.h>
 
+struct clk;
+
 struct clk *clk_reg_prcc_pclk(const char *name,
 			      const char *parent_name,
 			      resource_size_t phy_base,
diff --git a/drivers/clk/ux500/u8500_clk.c b/drivers/clk/ux500/u8500_clk.c
deleted file mode 100644
index 4626b97..0000000
--- a/drivers/clk/ux500/u8500_clk.c
+++ /dev/null
@@ -1,526 +0,0 @@
-/*
- * Clock definitions for u8500 platform.
- *
- * Copyright (C) 2012 ST-Ericsson SA
- * Author: Ulf Hansson <ulf.hansson@linaro.org>
- *
- * License terms: GNU General Public License (GPL) version 2
- */
-
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/mfd/dbx500-prcmu.h>
-#include <linux/platform_data/clk-ux500.h>
-#include "clk.h"
-
-void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
-		    u32 clkrst5_base, u32 clkrst6_base)
-{
-	struct prcmu_fw_version *fw_version;
-	const char *sgaclk_parent = NULL;
-	struct clk *clk;
-
-	/* Clock sources */
-	clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
-				CLK_IS_ROOT|CLK_IGNORE_UNUSED);
-	clk_register_clkdev(clk, "soc0_pll", NULL);
-
-	clk = clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1,
-				CLK_IS_ROOT|CLK_IGNORE_UNUSED);
-	clk_register_clkdev(clk, "soc1_pll", NULL);
-
-	clk = clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR,
-				CLK_IS_ROOT|CLK_IGNORE_UNUSED);
-	clk_register_clkdev(clk, "ddr_pll", NULL);
-
-	/* FIXME: Add sys, ulp and int clocks here. */
-
-	clk = clk_register_fixed_rate(NULL, "rtc32k", "NULL",
-				CLK_IS_ROOT|CLK_IGNORE_UNUSED,
-				32768);
-	clk_register_clkdev(clk, "clk32k", NULL);
-	clk_register_clkdev(clk, "apb_pclk", "rtc-pl031");
-
-	/* PRCMU clocks */
-	fw_version = prcmu_get_fw_version();
-	if (fw_version != NULL) {
-		switch (fw_version->project) {
-		case PRCMU_FW_PROJECT_U8500_C2:
-		case PRCMU_FW_PROJECT_U8520:
-		case PRCMU_FW_PROJECT_U8420:
-			sgaclk_parent = "soc0_pll";
-			break;
-		default:
-			break;
-		}
-	}
-
-	if (sgaclk_parent)
-		clk = clk_reg_prcmu_gate("sgclk", sgaclk_parent,
-					PRCMU_SGACLK, 0);
-	else
-		clk = clk_reg_prcmu_gate("sgclk", NULL,
-					PRCMU_SGACLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "mali");
-
-	clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "UART");
-
-	clk = clk_reg_prcmu_gate("msp02clk", NULL, PRCMU_MSP02CLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "MSP02");
-
-	clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "MSP1");
-
-	clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "I2C");
-
-	clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "slim");
-
-	clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "PERIPH1");
-
-	clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "PERIPH2");
-
-	clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "PERIPH3");
-
-	clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "PERIPH5");
-
-	clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "PERIPH6");
-
-	clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "PERIPH7");
-
-	clk = clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0,
-				CLK_IS_ROOT|CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "lcd");
-	clk_register_clkdev(clk, "lcd", "mcde");
-
-	clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "bml");
-
-	clk = clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0,
-				CLK_IS_ROOT|CLK_SET_RATE_GATE);
-
-	clk = clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0,
-				CLK_IS_ROOT|CLK_SET_RATE_GATE);
-
-	clk = clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0,
-				CLK_IS_ROOT|CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "hdmi");
-	clk_register_clkdev(clk, "hdmi", "mcde");
-
-	clk = clk_reg_prcmu_scalable("apeatclk", NULL, PRCMU_APEATCLK, 0,
-				     CLK_IS_ROOT|CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "apeat");
-
-	clk = clk_reg_prcmu_scalable("apetraceclk", NULL, PRCMU_APETRACECLK, 0,
-				CLK_IS_ROOT|CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "apetrace");
-
-	clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "mcde");
-	clk_register_clkdev(clk, "mcde", "mcde");
-	clk_register_clkdev(clk, "dsisys", "dsilink.0");
-	clk_register_clkdev(clk, "dsisys", "dsilink.1");
-	clk_register_clkdev(clk, "dsisys", "dsilink.2");
-
-	clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK,
-				CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "ipi2");
-
-	clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK,
-				CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "dsialt");
-
-	clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "dma40.0");
-
-	clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "b2r2");
-	clk_register_clkdev(clk, NULL, "b2r2_core");
-	clk_register_clkdev(clk, NULL, "U8500-B2R2.0");
-
-	clk = clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0,
-				CLK_IS_ROOT|CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "tv");
-	clk_register_clkdev(clk, "tv", "mcde");
-
-	clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "SSP");
-
-	clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "rngclk");
-
-	clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "uicc");
-
-	clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, CLK_IS_ROOT);
-	clk_register_clkdev(clk, NULL, "mtu0");
-	clk_register_clkdev(clk, NULL, "mtu1");
-
-	clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL, PRCMU_SDMMCCLK,
-					100000000,
-					CLK_IS_ROOT|CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "sdmmc");
-
-	clk = clk_reg_prcmu_scalable("dsi_pll", "hdmiclk",
-				PRCMU_PLLDSI, 0, CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, "dsihs2", "mcde");
-	clk_register_clkdev(clk, "dsihs2", "dsilink.2");
-
-
-	clk = clk_reg_prcmu_scalable("dsi0clk", "dsi_pll",
-				PRCMU_DSI0CLK, 0, CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, "dsihs0", "mcde");
-	clk_register_clkdev(clk, "dsihs0", "dsilink.0");
-
-	clk = clk_reg_prcmu_scalable("dsi1clk", "dsi_pll",
-				PRCMU_DSI1CLK, 0, CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, "dsihs1", "mcde");
-	clk_register_clkdev(clk, "dsihs1", "dsilink.1");
-
-	clk = clk_reg_prcmu_scalable("dsi0escclk", "tvclk",
-				PRCMU_DSI0ESCCLK, 0, CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, "dsilp0", "dsilink.0");
-	clk_register_clkdev(clk, "dsilp0", "mcde");
-
-	clk = clk_reg_prcmu_scalable("dsi1escclk", "tvclk",
-				PRCMU_DSI1ESCCLK, 0, CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, "dsilp1", "dsilink.1");
-	clk_register_clkdev(clk, "dsilp1", "mcde");
-
-	clk = clk_reg_prcmu_scalable("dsi2escclk", "tvclk",
-				PRCMU_DSI2ESCCLK, 0, CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, "dsilp2", "dsilink.2");
-	clk_register_clkdev(clk, "dsilp2", "mcde");
-
-	clk = clk_reg_prcmu_scalable_rate("armss", NULL,
-				PRCMU_ARMSS, 0, CLK_IS_ROOT|CLK_IGNORE_UNUSED);
-	clk_register_clkdev(clk, "armss", NULL);
-
-	clk = clk_register_fixed_factor(NULL, "smp_twd", "armss",
-				CLK_IGNORE_UNUSED, 1, 2);
-	clk_register_clkdev(clk, NULL, "smp_twd");
-
-	/*
-	 * FIXME: Add special handled PRCMU clocks here:
-	 * 1. clkout0yuv, use PRCMU as parent + need regulator + pinctrl.
-	 * 2. ab9540_clkout1yuv, see clkout0yuv
-	 */
-
-	/* PRCC P-clocks */
-	clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", clkrst1_base,
-				BIT(0), 0);
-	clk_register_clkdev(clk, "apb_pclk", "uart0");
-
-	clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", clkrst1_base,
-				BIT(1), 0);
-	clk_register_clkdev(clk, "apb_pclk", "uart1");
-
-	clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", clkrst1_base,
-				BIT(2), 0);
-	clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1");
-
-	clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", clkrst1_base,
-				BIT(3), 0);
-	clk_register_clkdev(clk, "apb_pclk", "msp0");
-	clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.0");
-
-	clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", clkrst1_base,
-				BIT(4), 0);
-	clk_register_clkdev(clk, "apb_pclk", "msp1");
-	clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.1");
-
-	clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", clkrst1_base,
-				BIT(5), 0);
-	clk_register_clkdev(clk, "apb_pclk", "sdi0");
-
-	clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", clkrst1_base,
-				BIT(6), 0);
-	clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2");
-
-	clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", clkrst1_base,
-				BIT(7), 0);
-	clk_register_clkdev(clk, NULL, "spi3");
-
-	clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", clkrst1_base,
-				BIT(8), 0);
-	clk_register_clkdev(clk, "apb_pclk", "slimbus0");
-
-	clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", clkrst1_base,
-				BIT(9), 0);
-	clk_register_clkdev(clk, NULL, "gpio.0");
-	clk_register_clkdev(clk, NULL, "gpio.1");
-	clk_register_clkdev(clk, NULL, "gpioblock0");
-
-	clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", clkrst1_base,
-				BIT(10), 0);
-	clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4");
-
-	clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", clkrst1_base,
-				BIT(11), 0);
-	clk_register_clkdev(clk, "apb_pclk", "msp3");
-	clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.3");
-
-	clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", clkrst2_base,
-				BIT(0), 0);
-	clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3");
-
-	clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", clkrst2_base,
-				BIT(1), 0);
-	clk_register_clkdev(clk, NULL, "spi2");
-
-	clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", clkrst2_base,
-				BIT(2), 0);
-	clk_register_clkdev(clk, NULL, "spi1");
-
-	clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", clkrst2_base,
-				BIT(3), 0);
-	clk_register_clkdev(clk, NULL, "pwl");
-
-	clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", clkrst2_base,
-				BIT(4), 0);
-	clk_register_clkdev(clk, "apb_pclk", "sdi4");
-
-	clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", clkrst2_base,
-				BIT(5), 0);
-	clk_register_clkdev(clk, "apb_pclk", "msp2");
-	clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.2");
-
-	clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", clkrst2_base,
-				BIT(6), 0);
-	clk_register_clkdev(clk, "apb_pclk", "sdi1");
-
-	clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", clkrst2_base,
-				BIT(7), 0);
-	clk_register_clkdev(clk, "apb_pclk", "sdi3");
-
-	clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", clkrst2_base,
-				BIT(8), 0);
-	clk_register_clkdev(clk, NULL, "spi0");
-
-	clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", clkrst2_base,
-				BIT(9), 0);
-	clk_register_clkdev(clk, "hsir_hclk", "ste_hsi.0");
-
-	clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", clkrst2_base,
-				BIT(10), 0);
-	clk_register_clkdev(clk, "hsit_hclk", "ste_hsi.0");
-
-	clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", clkrst2_base,
-				BIT(11), 0);
-	clk_register_clkdev(clk, NULL, "gpio.6");
-	clk_register_clkdev(clk, NULL, "gpio.7");
-	clk_register_clkdev(clk, NULL, "gpioblock1");
-
-	clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", clkrst2_base,
-				BIT(12), 0);
-
-	clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base,
-				BIT(0), 0);
-	clk_register_clkdev(clk, "fsmc", NULL);
-	clk_register_clkdev(clk, NULL, "smsc911x.0");
-
-	clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base,
-				BIT(1), 0);
-	clk_register_clkdev(clk, "apb_pclk", "ssp0");
-
-	clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", clkrst3_base,
-				BIT(2), 0);
-	clk_register_clkdev(clk, "apb_pclk", "ssp1");
-
-	clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", clkrst3_base,
-				BIT(3), 0);
-	clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0");
-
-	clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", clkrst3_base,
-				BIT(4), 0);
-	clk_register_clkdev(clk, "apb_pclk", "sdi2");
-
-	clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", clkrst3_base,
-				BIT(5), 0);
-	clk_register_clkdev(clk, "apb_pclk", "ske");
-	clk_register_clkdev(clk, "apb_pclk", "nmk-ske-keypad");
-
-	clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", clkrst3_base,
-				BIT(6), 0);
-	clk_register_clkdev(clk, "apb_pclk", "uart2");
-
-	clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", clkrst3_base,
-				BIT(7), 0);
-	clk_register_clkdev(clk, "apb_pclk", "sdi5");
-
-	clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", clkrst3_base,
-				BIT(8), 0);
-	clk_register_clkdev(clk, NULL, "gpio.2");
-	clk_register_clkdev(clk, NULL, "gpio.3");
-	clk_register_clkdev(clk, NULL, "gpio.4");
-	clk_register_clkdev(clk, NULL, "gpio.5");
-	clk_register_clkdev(clk, NULL, "gpioblock2");
-
-	clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", clkrst5_base,
-				BIT(0), 0);
-	clk_register_clkdev(clk, "usb", "musb-ux500.0");
-
-	clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", clkrst5_base,
-				BIT(1), 0);
-	clk_register_clkdev(clk, NULL, "gpio.8");
-	clk_register_clkdev(clk, NULL, "gpioblock3");
-
-	clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", clkrst6_base,
-				BIT(0), 0);
-	clk_register_clkdev(clk, "apb_pclk", "rng");
-
-	clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", clkrst6_base,
-				BIT(1), 0);
-	clk_register_clkdev(clk, NULL, "cryp0");
-	clk_register_clkdev(clk, NULL, "cryp1");
-
-	clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", clkrst6_base,
-				BIT(2), 0);
-	clk_register_clkdev(clk, NULL, "hash0");
-
-	clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", clkrst6_base,
-				BIT(3), 0);
-	clk_register_clkdev(clk, NULL, "pka");
-
-	clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", clkrst6_base,
-				BIT(4), 0);
-	clk_register_clkdev(clk, NULL, "hash1");
-
-	clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", clkrst6_base,
-				BIT(5), 0);
-	clk_register_clkdev(clk, NULL, "cfgreg");
-
-	clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", clkrst6_base,
-				BIT(6), 0);
-	clk_register_clkdev(clk, "apb_pclk", "mtu0");
-
-	clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", clkrst6_base,
-				BIT(7), 0);
-	clk_register_clkdev(clk, "apb_pclk", "mtu1");
-
-	/* PRCC K-clocks
-	 *
-	 * FIXME: Some drivers requires PERPIH[n| to be automatically enabled
-	 * by enabling just the K-clock, even if it is not a valid parent to
-	 * the K-clock. Until drivers get fixed we might need some kind of
-	 * "parent muxed join".
-	 */
-
-	/* Periph1 */
-	clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk",
-			clkrst1_base, BIT(0), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "uart0");
-
-	clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk",
-			clkrst1_base, BIT(1), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "uart1");
-
-	clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
-			clkrst1_base, BIT(2), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "nmk-i2c.1");
-
-	clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
-			clkrst1_base, BIT(3), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "msp0");
-	clk_register_clkdev(clk, NULL, "ux500-msp-i2s.0");
-
-	clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
-			clkrst1_base, BIT(4), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "msp1");
-	clk_register_clkdev(clk, NULL, "ux500-msp-i2s.1");
-
-	clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk",
-			clkrst1_base, BIT(5), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "sdi0");
-
-	clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
-			clkrst1_base, BIT(6), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "nmk-i2c.2");
-
-	clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
-			clkrst1_base, BIT(8), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "slimbus0");
-
-	clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
-			clkrst1_base, BIT(9), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "nmk-i2c.4");
-
-	clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
-			clkrst1_base, BIT(10), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "msp3");
-	clk_register_clkdev(clk, NULL, "ux500-msp-i2s.3");
-
-	/* Periph2 */
-	clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
-			clkrst2_base, BIT(0), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "nmk-i2c.3");
-
-	clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk",
-			clkrst2_base, BIT(2), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "sdi4");
-
-	clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
-			clkrst2_base, BIT(3), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "msp2");
-	clk_register_clkdev(clk, NULL, "ux500-msp-i2s.2");
-
-	clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk",
-			clkrst2_base, BIT(4), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "sdi1");
-
-	clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk",
-			clkrst2_base, BIT(5), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "sdi3");
-
-	/* Note that rate is received from parent. */
-	clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk",
-			clkrst2_base, BIT(6),
-			CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
-	clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk",
-			clkrst2_base, BIT(7),
-			CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
-
-	/* Periph3 */
-	clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
-			clkrst3_base, BIT(1), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "ssp0");
-
-	clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
-			clkrst3_base, BIT(2), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "ssp1");
-
-	clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
-			clkrst3_base, BIT(3), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "nmk-i2c.0");
-
-	clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk",
-			clkrst3_base, BIT(4), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "sdi2");
-
-	clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k",
-			clkrst3_base, BIT(5), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "ske");
-	clk_register_clkdev(clk, NULL, "nmk-ske-keypad");
-
-	clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk",
-			clkrst3_base, BIT(6), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "uart2");
-
-	clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk",
-			clkrst3_base, BIT(7), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "sdi5");
-
-	/* Periph6 */
-	clk = clk_reg_prcc_kclk("p3_rng_kclk", "rngclk",
-			clkrst6_base, BIT(0), CLK_SET_RATE_GATE);
-	clk_register_clkdev(clk, NULL, "rng");
-}
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index e319ef9..271c096 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -8,8 +8,7 @@
  */
 
 #include <linux/of.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/of_address.h>
 #include <linux/clk-provider.h>
 #include <linux/mfd/dbx500-prcmu.h>
 #include <linux/platform_data/clk-ux500.h>
@@ -54,14 +53,25 @@
 	{ },
 };
 
-void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
-		       u32 clkrst5_base, u32 clkrst6_base)
+/* CLKRST4 is missing making it hard to index things */
+enum clkrst_index {
+	CLKRST1_INDEX = 0,
+	CLKRST2_INDEX,
+	CLKRST3_INDEX,
+	CLKRST5_INDEX,
+	CLKRST6_INDEX,
+	CLKRST_MAX,
+};
+
+void u8500_clk_init(void)
 {
 	struct prcmu_fw_version *fw_version;
 	struct device_node *np = NULL;
 	struct device_node *child = NULL;
 	const char *sgaclk_parent = NULL;
 	struct clk *clk, *rtc_clk, *twd_clk;
+	u32 bases[CLKRST_MAX];
+	int i;
 
 	if (of_have_populated_dt())
 		np = of_find_matching_node(NULL, u8500_clk_of_match);
@@ -69,6 +79,15 @@
 		pr_err("Either DT or U8500 Clock node not found\n");
 		return;
 	}
+	for (i = 0; i < ARRAY_SIZE(bases); i++) {
+		struct resource r;
+
+		if (of_address_to_resource(np, i, &r))
+			/* Not much choice but to continue */
+			pr_err("failed to get CLKRST %d base address\n",
+			       i + 1);
+		bases[i] = r.start;
+	}
 
 	/* Clock sources */
 	clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0,
@@ -246,179 +265,179 @@
 	 */
 
 	/* PRCC P-clocks */
-	clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", bases[CLKRST1_INDEX],
 				BIT(0), 0);
 	PRCC_PCLK_STORE(clk, 1, 0);
 
-	clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", bases[CLKRST1_INDEX],
 				BIT(1), 0);
 	PRCC_PCLK_STORE(clk, 1, 1);
 
-	clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", bases[CLKRST1_INDEX],
 				BIT(2), 0);
 	PRCC_PCLK_STORE(clk, 1, 2);
 
-	clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", bases[CLKRST1_INDEX],
 				BIT(3), 0);
 	PRCC_PCLK_STORE(clk, 1, 3);
 
-	clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", bases[CLKRST1_INDEX],
 				BIT(4), 0);
 	PRCC_PCLK_STORE(clk, 1, 4);
 
-	clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", bases[CLKRST1_INDEX],
 				BIT(5), 0);
 	PRCC_PCLK_STORE(clk, 1, 5);
 
-	clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", bases[CLKRST1_INDEX],
 				BIT(6), 0);
 	PRCC_PCLK_STORE(clk, 1, 6);
 
-	clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", bases[CLKRST1_INDEX],
 				BIT(7), 0);
 	PRCC_PCLK_STORE(clk, 1, 7);
 
-	clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", bases[CLKRST1_INDEX],
 				BIT(8), 0);
 	PRCC_PCLK_STORE(clk, 1, 8);
 
-	clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", bases[CLKRST1_INDEX],
 				BIT(9), 0);
 	PRCC_PCLK_STORE(clk, 1, 9);
 
-	clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", bases[CLKRST1_INDEX],
 				BIT(10), 0);
 	PRCC_PCLK_STORE(clk, 1, 10);
 
-	clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", bases[CLKRST1_INDEX],
 				BIT(11), 0);
 	PRCC_PCLK_STORE(clk, 1, 11);
 
-	clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", bases[CLKRST2_INDEX],
 				BIT(0), 0);
 	PRCC_PCLK_STORE(clk, 2, 0);
 
-	clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", bases[CLKRST2_INDEX],
 				BIT(1), 0);
 	PRCC_PCLK_STORE(clk, 2, 1);
 
-	clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", bases[CLKRST2_INDEX],
 				BIT(2), 0);
 	PRCC_PCLK_STORE(clk, 2, 2);
 
-	clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", bases[CLKRST2_INDEX],
 				BIT(3), 0);
 	PRCC_PCLK_STORE(clk, 2, 3);
 
-	clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", bases[CLKRST2_INDEX],
 				BIT(4), 0);
 	PRCC_PCLK_STORE(clk, 2, 4);
 
-	clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", bases[CLKRST2_INDEX],
 				BIT(5), 0);
 	PRCC_PCLK_STORE(clk, 2, 5);
 
-	clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", bases[CLKRST2_INDEX],
 				BIT(6), 0);
 	PRCC_PCLK_STORE(clk, 2, 6);
 
-	clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", bases[CLKRST2_INDEX],
 				BIT(7), 0);
 	PRCC_PCLK_STORE(clk, 2, 7);
 
-	clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", bases[CLKRST2_INDEX],
 				BIT(8), 0);
 	PRCC_PCLK_STORE(clk, 2, 8);
 
-	clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", bases[CLKRST2_INDEX],
 				BIT(9), 0);
 	PRCC_PCLK_STORE(clk, 2, 9);
 
-	clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", bases[CLKRST2_INDEX],
 				BIT(10), 0);
 	PRCC_PCLK_STORE(clk, 2, 10);
 
-	clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", bases[CLKRST2_INDEX],
 				BIT(11), 0);
 	PRCC_PCLK_STORE(clk, 2, 11);
 
-	clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", bases[CLKRST2_INDEX],
 				BIT(12), 0);
 	PRCC_PCLK_STORE(clk, 2, 12);
 
-	clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", bases[CLKRST3_INDEX],
 				BIT(0), 0);
 	PRCC_PCLK_STORE(clk, 3, 0);
 
-	clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", bases[CLKRST3_INDEX],
 				BIT(1), 0);
 	PRCC_PCLK_STORE(clk, 3, 1);
 
-	clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", bases[CLKRST3_INDEX],
 				BIT(2), 0);
 	PRCC_PCLK_STORE(clk, 3, 2);
 
-	clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", bases[CLKRST3_INDEX],
 				BIT(3), 0);
 	PRCC_PCLK_STORE(clk, 3, 3);
 
-	clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", bases[CLKRST3_INDEX],
 				BIT(4), 0);
 	PRCC_PCLK_STORE(clk, 3, 4);
 
-	clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", bases[CLKRST3_INDEX],
 				BIT(5), 0);
 	PRCC_PCLK_STORE(clk, 3, 5);
 
-	clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", bases[CLKRST3_INDEX],
 				BIT(6), 0);
 	PRCC_PCLK_STORE(clk, 3, 6);
 
-	clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", bases[CLKRST3_INDEX],
 				BIT(7), 0);
 	PRCC_PCLK_STORE(clk, 3, 7);
 
-	clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", bases[CLKRST3_INDEX],
 				BIT(8), 0);
 	PRCC_PCLK_STORE(clk, 3, 8);
 
-	clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", clkrst5_base,
+	clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", bases[CLKRST5_INDEX],
 				BIT(0), 0);
 	PRCC_PCLK_STORE(clk, 5, 0);
 
-	clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", clkrst5_base,
+	clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", bases[CLKRST5_INDEX],
 				BIT(1), 0);
 	PRCC_PCLK_STORE(clk, 5, 1);
 
-	clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", bases[CLKRST6_INDEX],
 				BIT(0), 0);
 	PRCC_PCLK_STORE(clk, 6, 0);
 
-	clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", bases[CLKRST6_INDEX],
 				BIT(1), 0);
 	PRCC_PCLK_STORE(clk, 6, 1);
 
-	clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", bases[CLKRST6_INDEX],
 				BIT(2), 0);
 	PRCC_PCLK_STORE(clk, 6, 2);
 
-	clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", bases[CLKRST6_INDEX],
 				BIT(3), 0);
 	PRCC_PCLK_STORE(clk, 6, 3);
 
-	clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", bases[CLKRST6_INDEX],
 				BIT(4), 0);
 	PRCC_PCLK_STORE(clk, 6, 4);
 
-	clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", bases[CLKRST6_INDEX],
 				BIT(5), 0);
 	PRCC_PCLK_STORE(clk, 6, 5);
 
-	clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", bases[CLKRST6_INDEX],
 				BIT(6), 0);
 	PRCC_PCLK_STORE(clk, 6, 6);
 
-	clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", bases[CLKRST6_INDEX],
 				BIT(7), 0);
 	PRCC_PCLK_STORE(clk, 6, 7);
 
@@ -432,109 +451,109 @@
 
 	/* Periph1 */
 	clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk",
-			clkrst1_base, BIT(0), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(0), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 1, 0);
 
 	clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk",
-			clkrst1_base, BIT(1), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(1), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 1, 1);
 
 	clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
-			clkrst1_base, BIT(2), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(2), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 1, 2);
 
 	clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
-			clkrst1_base, BIT(3), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(3), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 1, 3);
 
 	clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
-			clkrst1_base, BIT(4), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(4), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 1, 4);
 
 	clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk",
-			clkrst1_base, BIT(5), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(5), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 1, 5);
 
 	clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
-			clkrst1_base, BIT(6), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(6), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 1, 6);
 
 	clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
-			clkrst1_base, BIT(8), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(8), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 1, 8);
 
 	clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
-			clkrst1_base, BIT(9), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(9), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 1, 9);
 
 	clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
-			clkrst1_base, BIT(10), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(10), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 1, 10);
 
 	/* Periph2 */
 	clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
-			clkrst2_base, BIT(0), CLK_SET_RATE_GATE);
+			bases[CLKRST2_INDEX], BIT(0), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 2, 0);
 
 	clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk",
-			clkrst2_base, BIT(2), CLK_SET_RATE_GATE);
+			bases[CLKRST2_INDEX], BIT(2), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 2, 2);
 
 	clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
-			clkrst2_base, BIT(3), CLK_SET_RATE_GATE);
+			bases[CLKRST2_INDEX], BIT(3), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 2, 3);
 
 	clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk",
-			clkrst2_base, BIT(4), CLK_SET_RATE_GATE);
+			bases[CLKRST2_INDEX], BIT(4), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 2, 4);
 
 	clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk",
-			clkrst2_base, BIT(5), CLK_SET_RATE_GATE);
+			bases[CLKRST2_INDEX], BIT(5), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 2, 5);
 
 	/* Note that rate is received from parent. */
 	clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk",
-			clkrst2_base, BIT(6),
+			bases[CLKRST2_INDEX], BIT(6),
 			CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
 	PRCC_KCLK_STORE(clk, 2, 6);
 
 	clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk",
-			clkrst2_base, BIT(7),
+			bases[CLKRST2_INDEX], BIT(7),
 			CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
 	PRCC_KCLK_STORE(clk, 2, 7);
 
 	/* Periph3 */
 	clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
-			clkrst3_base, BIT(1), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(1), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 3, 1);
 
 	clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
-			clkrst3_base, BIT(2), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(2), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 3, 2);
 
 	clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
-			clkrst3_base, BIT(3), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(3), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 3, 3);
 
 	clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk",
-			clkrst3_base, BIT(4), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(4), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 3, 4);
 
 	clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k",
-			clkrst3_base, BIT(5), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(5), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 3, 5);
 
 	clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk",
-			clkrst3_base, BIT(6), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(6), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 3, 6);
 
 	clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk",
-			clkrst3_base, BIT(7), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(7), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 3, 7);
 
 	/* Periph6 */
 	clk = clk_reg_prcc_kclk("p3_rng_kclk", "rngclk",
-			clkrst6_base, BIT(0), CLK_SET_RATE_GATE);
+			bases[CLKRST6_INDEX], BIT(0), CLK_SET_RATE_GATE);
 	PRCC_KCLK_STORE(clk, 6, 0);
 
 	for_each_child_of_node(np, child) {
diff --git a/drivers/clk/ux500/u8540_clk.c b/drivers/clk/ux500/u8540_clk.c
index 20c8add..d7bcb7a 100644
--- a/drivers/clk/ux500/u8540_clk.c
+++ b/drivers/clk/ux500/u8540_clk.c
@@ -7,17 +7,51 @@
  * License terms: GNU General Public License (GPL) version 2
  */
 
-#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/mfd/dbx500-prcmu.h>
 #include <linux/platform_data/clk-ux500.h>
 #include "clk.h"
 
-void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
-		    u32 clkrst5_base, u32 clkrst6_base)
+static const struct of_device_id u8540_clk_of_match[] = {
+	{ .compatible = "stericsson,u8540-clks", },
+	{ }
+};
+
+/* CLKRST4 is missing making it hard to index things */
+enum clkrst_index {
+	CLKRST1_INDEX = 0,
+	CLKRST2_INDEX,
+	CLKRST3_INDEX,
+	CLKRST5_INDEX,
+	CLKRST6_INDEX,
+	CLKRST_MAX,
+};
+
+void u8540_clk_init(void)
 {
 	struct clk *clk;
+	struct device_node *np = NULL;
+	u32 bases[CLKRST_MAX];
+	int i;
+
+	if (of_have_populated_dt())
+		np = of_find_matching_node(NULL, u8540_clk_of_match);
+	if (!np) {
+		pr_err("Either DT or U8540 Clock node not found\n");
+		return;
+	}
+	for (i = 0; i < ARRAY_SIZE(bases); i++) {
+		struct resource r;
+
+		if (of_address_to_resource(np, i, &r))
+			/* Not much choice but to continue */
+			pr_err("failed to get CLKRST %d base address\n",
+			       i + 1);
+		bases[i] = r.start;
+	}
 
 	/* Clock sources. */
 	/* Fixed ClockGen */
@@ -219,151 +253,151 @@
 
 	/* PRCC P-clocks */
 	/* Peripheral 1 : PRCC P-clocks */
-	clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", bases[CLKRST1_INDEX],
 				BIT(0), 0);
 	clk_register_clkdev(clk, "apb_pclk", "uart0");
 
-	clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk1", "per1clk", bases[CLKRST1_INDEX],
 				BIT(1), 0);
 	clk_register_clkdev(clk, "apb_pclk", "uart1");
 
-	clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", bases[CLKRST1_INDEX],
 				BIT(2), 0);
 	clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1");
 
-	clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", bases[CLKRST1_INDEX],
 				BIT(3), 0);
 	clk_register_clkdev(clk, "apb_pclk", "msp0");
 	clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.0");
 
-	clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", bases[CLKRST1_INDEX],
 				BIT(4), 0);
 	clk_register_clkdev(clk, "apb_pclk", "msp1");
 	clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.1");
 
-	clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", bases[CLKRST1_INDEX],
 				BIT(5), 0);
 	clk_register_clkdev(clk, "apb_pclk", "sdi0");
 
-	clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", bases[CLKRST1_INDEX],
 				BIT(6), 0);
 	clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2");
 
-	clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", bases[CLKRST1_INDEX],
 				BIT(7), 0);
 	clk_register_clkdev(clk, NULL, "spi3");
 
-	clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", bases[CLKRST1_INDEX],
 				BIT(8), 0);
 	clk_register_clkdev(clk, "apb_pclk", "slimbus0");
 
-	clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", bases[CLKRST1_INDEX],
 				BIT(9), 0);
 	clk_register_clkdev(clk, NULL, "gpio.0");
 	clk_register_clkdev(clk, NULL, "gpio.1");
 	clk_register_clkdev(clk, NULL, "gpioblock0");
 	clk_register_clkdev(clk, "apb_pclk", "ab85xx-codec.0");
 
-	clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", bases[CLKRST1_INDEX],
 				BIT(10), 0);
 	clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4");
 
-	clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", clkrst1_base,
+	clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", bases[CLKRST1_INDEX],
 				BIT(11), 0);
 	clk_register_clkdev(clk, "apb_pclk", "msp3");
 	clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.3");
 
 	/* Peripheral 2 : PRCC P-clocks */
-	clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", bases[CLKRST2_INDEX],
 				BIT(0), 0);
 	clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3");
 
-	clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", bases[CLKRST2_INDEX],
 				BIT(1), 0);
 	clk_register_clkdev(clk, NULL, "spi2");
 
-	clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk2", "per2clk", bases[CLKRST2_INDEX],
 				BIT(2), 0);
 	clk_register_clkdev(clk, NULL, "spi1");
 
-	clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk3", "per2clk", bases[CLKRST2_INDEX],
 				BIT(3), 0);
 	clk_register_clkdev(clk, NULL, "pwl");
 
-	clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk4", "per2clk", bases[CLKRST2_INDEX],
 				BIT(4), 0);
 	clk_register_clkdev(clk, "apb_pclk", "sdi4");
 
-	clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", bases[CLKRST2_INDEX],
 				BIT(5), 0);
 	clk_register_clkdev(clk, "apb_pclk", "msp2");
 	clk_register_clkdev(clk, "apb_pclk", "dbx5x0-msp-i2s.2");
 
-	clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", bases[CLKRST2_INDEX],
 				BIT(6), 0);
 	clk_register_clkdev(clk, "apb_pclk", "sdi1");
 
-	clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", bases[CLKRST2_INDEX],
 				BIT(7), 0);
 	clk_register_clkdev(clk, "apb_pclk", "sdi3");
 
-	clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk8", "per2clk", bases[CLKRST2_INDEX],
 				BIT(8), 0);
 	clk_register_clkdev(clk, NULL, "spi0");
 
-	clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk9", "per2clk", bases[CLKRST2_INDEX],
 				BIT(9), 0);
 	clk_register_clkdev(clk, "hsir_hclk", "ste_hsi.0");
 
-	clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk10", "per2clk", bases[CLKRST2_INDEX],
 				BIT(10), 0);
 	clk_register_clkdev(clk, "hsit_hclk", "ste_hsi.0");
 
-	clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk11", "per2clk", bases[CLKRST2_INDEX],
 				BIT(11), 0);
 	clk_register_clkdev(clk, NULL, "gpio.6");
 	clk_register_clkdev(clk, NULL, "gpio.7");
 	clk_register_clkdev(clk, NULL, "gpioblock1");
 
-	clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", clkrst2_base,
+	clk = clk_reg_prcc_pclk("p2_pclk12", "per2clk", bases[CLKRST2_INDEX],
 				BIT(12), 0);
 	clk_register_clkdev(clk, "msp4-pclk", "ab85xx-codec.0");
 
 	/* Peripheral 3 : PRCC P-clocks */
-	clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk0", "per3clk", bases[CLKRST3_INDEX],
 				BIT(0), 0);
 	clk_register_clkdev(clk, NULL, "fsmc");
 
-	clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", bases[CLKRST3_INDEX],
 				BIT(1), 0);
 	clk_register_clkdev(clk, "apb_pclk", "ssp0");
 
-	clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", bases[CLKRST3_INDEX],
 				BIT(2), 0);
 	clk_register_clkdev(clk, "apb_pclk", "ssp1");
 
-	clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", bases[CLKRST3_INDEX],
 				BIT(3), 0);
 	clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0");
 
-	clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", bases[CLKRST3_INDEX],
 				BIT(4), 0);
 	clk_register_clkdev(clk, "apb_pclk", "sdi2");
 
-	clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk5", "per3clk", bases[CLKRST3_INDEX],
 				BIT(5), 0);
 	clk_register_clkdev(clk, "apb_pclk", "ske");
 	clk_register_clkdev(clk, "apb_pclk", "nmk-ske-keypad");
 
-	clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk6", "per3clk", bases[CLKRST3_INDEX],
 				BIT(6), 0);
 	clk_register_clkdev(clk, "apb_pclk", "uart2");
 
-	clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk7", "per3clk", bases[CLKRST3_INDEX],
 				BIT(7), 0);
 	clk_register_clkdev(clk, "apb_pclk", "sdi5");
 
-	clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk8", "per3clk", bases[CLKRST3_INDEX],
 				BIT(8), 0);
 	clk_register_clkdev(clk, NULL, "gpio.2");
 	clk_register_clkdev(clk, NULL, "gpio.3");
@@ -371,64 +405,64 @@
 	clk_register_clkdev(clk, NULL, "gpio.5");
 	clk_register_clkdev(clk, NULL, "gpioblock2");
 
-	clk = clk_reg_prcc_pclk("p3_pclk9", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk9", "per3clk", bases[CLKRST3_INDEX],
 				BIT(9), 0);
 	clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.5");
 
-	clk = clk_reg_prcc_pclk("p3_pclk10", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk10", "per3clk", bases[CLKRST3_INDEX],
 				BIT(10), 0);
 	clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.6");
 
-	clk = clk_reg_prcc_pclk("p3_pclk11", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk11", "per3clk", bases[CLKRST3_INDEX],
 				BIT(11), 0);
 	clk_register_clkdev(clk, "apb_pclk", "uart3");
 
-	clk = clk_reg_prcc_pclk("p3_pclk12", "per3clk", clkrst3_base,
+	clk = clk_reg_prcc_pclk("p3_pclk12", "per3clk", bases[CLKRST3_INDEX],
 				BIT(12), 0);
 	clk_register_clkdev(clk, "apb_pclk", "uart4");
 
 	/* Peripheral 5 : PRCC P-clocks */
-	clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", clkrst5_base,
+	clk = clk_reg_prcc_pclk("p5_pclk0", "per5clk", bases[CLKRST5_INDEX],
 				BIT(0), 0);
 	clk_register_clkdev(clk, "usb", "musb-ux500.0");
 	clk_register_clkdev(clk, "usbclk", "ab-iddet.0");
 
-	clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", clkrst5_base,
+	clk = clk_reg_prcc_pclk("p5_pclk1", "per5clk", bases[CLKRST5_INDEX],
 				BIT(1), 0);
 	clk_register_clkdev(clk, NULL, "gpio.8");
 	clk_register_clkdev(clk, NULL, "gpioblock3");
 
 	/* Peripheral 6 : PRCC P-clocks */
-	clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk0", "per6clk", bases[CLKRST6_INDEX],
 				BIT(0), 0);
 	clk_register_clkdev(clk, "apb_pclk", "rng");
 
-	clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk1", "per6clk", bases[CLKRST6_INDEX],
 				BIT(1), 0);
 	clk_register_clkdev(clk, NULL, "cryp0");
 	clk_register_clkdev(clk, NULL, "cryp1");
 
-	clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk2", "per6clk", bases[CLKRST6_INDEX],
 				BIT(2), 0);
 	clk_register_clkdev(clk, NULL, "hash0");
 
-	clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk3", "per6clk", bases[CLKRST6_INDEX],
 				BIT(3), 0);
 	clk_register_clkdev(clk, NULL, "pka");
 
-	clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk4", "per6clk", bases[CLKRST6_INDEX],
 				BIT(4), 0);
 	clk_register_clkdev(clk, NULL, "db8540-hash1");
 
-	clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk5", "per6clk", bases[CLKRST6_INDEX],
 				BIT(5), 0);
 	clk_register_clkdev(clk, NULL, "cfgreg");
 
-	clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk6", "per6clk", bases[CLKRST6_INDEX],
 				BIT(6), 0);
 	clk_register_clkdev(clk, "apb_pclk", "mtu0");
 
-	clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", clkrst6_base,
+	clk = clk_reg_prcc_pclk("p6_pclk7", "per6clk", bases[CLKRST6_INDEX],
 				BIT(7), 0);
 	clk_register_clkdev(clk, "apb_pclk", "mtu1");
 
@@ -442,138 +476,138 @@
 
 	/* Peripheral 1 : PRCC K-clocks */
 	clk = clk_reg_prcc_kclk("p1_uart0_kclk", "uartclk",
-			clkrst1_base, BIT(0), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(0), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "uart0");
 
 	clk = clk_reg_prcc_kclk("p1_uart1_kclk", "uartclk",
-			clkrst1_base, BIT(1), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(1), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "uart1");
 
 	clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
-			clkrst1_base, BIT(2), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(2), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "nmk-i2c.1");
 
 	clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
-			clkrst1_base, BIT(3), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(3), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "msp0");
 	clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.0");
 
 	clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
-			clkrst1_base, BIT(4), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(4), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "msp1");
 	clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.1");
 
 	clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmchclk",
-			clkrst1_base, BIT(5), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(5), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "sdi0");
 
 	clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
-			clkrst1_base, BIT(6), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(6), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "nmk-i2c.2");
 
 	clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
-			clkrst1_base, BIT(8), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(8), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "slimbus0");
 
 	clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
-			clkrst1_base, BIT(9), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(9), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "nmk-i2c.4");
 
 	clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
-			clkrst1_base, BIT(10), CLK_SET_RATE_GATE);
+			bases[CLKRST1_INDEX], BIT(10), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "msp3");
 	clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.3");
 
 	/* Peripheral 2 : PRCC K-clocks */
 	clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
-			clkrst2_base, BIT(0), CLK_SET_RATE_GATE);
+			bases[CLKRST2_INDEX], BIT(0), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "nmk-i2c.3");
 
 	clk = clk_reg_prcc_kclk("p2_pwl_kclk", "rtc32k",
-			clkrst2_base, BIT(1), CLK_SET_RATE_GATE);
+			bases[CLKRST2_INDEX], BIT(1), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "pwl");
 
 	clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmchclk",
-			clkrst2_base, BIT(2), CLK_SET_RATE_GATE);
+			bases[CLKRST2_INDEX], BIT(2), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "sdi4");
 
 	clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
-			clkrst2_base, BIT(3), CLK_SET_RATE_GATE);
+			bases[CLKRST2_INDEX], BIT(3), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "msp2");
 	clk_register_clkdev(clk, NULL, "dbx5x0-msp-i2s.2");
 
 	clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmchclk",
-			clkrst2_base, BIT(4), CLK_SET_RATE_GATE);
+			bases[CLKRST2_INDEX], BIT(4), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "sdi1");
 
 	clk = clk_reg_prcc_kclk("p2_sdi3_kclk", "sdmmcclk",
-			clkrst2_base, BIT(5), CLK_SET_RATE_GATE);
+			bases[CLKRST2_INDEX], BIT(5), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "sdi3");
 
 	clk = clk_reg_prcc_kclk("p2_ssirx_kclk", "hsirxclk",
-			clkrst2_base, BIT(6),
+			bases[CLKRST2_INDEX], BIT(6),
 			CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
 	clk_register_clkdev(clk, "hsir_hsirxclk", "ste_hsi.0");
 
 	clk = clk_reg_prcc_kclk("p2_ssitx_kclk", "hsitxclk",
-			clkrst2_base, BIT(7),
+			bases[CLKRST2_INDEX], BIT(7),
 			CLK_SET_RATE_GATE|CLK_SET_RATE_PARENT);
 	clk_register_clkdev(clk, "hsit_hsitxclk", "ste_hsi.0");
 
 	/* Should only be 9540, but might be added for 85xx as well */
 	clk = clk_reg_prcc_kclk("p2_msp4_kclk", "msp02clk",
-			clkrst2_base, BIT(9), CLK_SET_RATE_GATE);
+			bases[CLKRST2_INDEX], BIT(9), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "msp4");
 	clk_register_clkdev(clk, "msp4", "ab85xx-codec.0");
 
 	/* Peripheral 3 : PRCC K-clocks */
 	clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
-			clkrst3_base, BIT(1), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(1), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "ssp0");
 
 	clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
-			clkrst3_base, BIT(2), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(2), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "ssp1");
 
 	clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
-			clkrst3_base, BIT(3), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(3), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "nmk-i2c.0");
 
 	clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmchclk",
-			clkrst3_base, BIT(4), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(4), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "sdi2");
 
 	clk = clk_reg_prcc_kclk("p3_ske_kclk", "rtc32k",
-			clkrst3_base, BIT(5), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(5), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "ske");
 	clk_register_clkdev(clk, NULL, "nmk-ske-keypad");
 
 	clk = clk_reg_prcc_kclk("p3_uart2_kclk", "uartclk",
-			clkrst3_base, BIT(6), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(6), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "uart2");
 
 	clk = clk_reg_prcc_kclk("p3_sdi5_kclk", "sdmmcclk",
-			clkrst3_base, BIT(7), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(7), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "sdi5");
 
 	clk = clk_reg_prcc_kclk("p3_i2c5_kclk", "i2cclk",
-			clkrst3_base, BIT(8), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(8), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "nmk-i2c.5");
 
 	clk = clk_reg_prcc_kclk("p3_i2c6_kclk", "i2cclk",
-			clkrst3_base, BIT(9), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(9), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "nmk-i2c.6");
 
 	clk = clk_reg_prcc_kclk("p3_uart3_kclk", "uartclk",
-			clkrst3_base, BIT(10), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(10), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "uart3");
 
 	clk = clk_reg_prcc_kclk("p3_uart4_kclk", "uartclk",
-			clkrst3_base, BIT(11), CLK_SET_RATE_GATE);
+			bases[CLKRST3_INDEX], BIT(11), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "uart4");
 
 	/* Peripheral 6 : PRCC K-clocks */
 	clk = clk_reg_prcc_kclk("p6_rng_kclk", "rngclk",
-			clkrst6_base, BIT(0), CLK_SET_RATE_GATE);
+			bases[CLKRST6_INDEX], BIT(0), CLK_SET_RATE_GATE);
 	clk_register_clkdev(clk, NULL, "rng");
 }
diff --git a/drivers/clk/ux500/u9540_clk.c b/drivers/clk/ux500/u9540_clk.c
index 4479478..2138a4c 100644
--- a/drivers/clk/ux500/u9540_clk.c
+++ b/drivers/clk/ux500/u9540_clk.c
@@ -7,15 +7,12 @@
  * License terms: GNU General Public License (GPL) version 2
  */
 
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/mfd/dbx500-prcmu.h>
 #include <linux/platform_data/clk-ux500.h>
 #include "clk.h"
 
-void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
-		    u32 clkrst5_base, u32 clkrst6_base)
+void u9540_clk_init(void)
 {
 	/* register clocks here */
 }
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index bc96f10..a3893ea 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -13,8 +13,9 @@
  * ICST clock code from the ARM tree should probably be merged into this
  * file.
  */
-#include <linux/clk.h>
-#include <linux/clkdev.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/export.h>
 #include <linux/err.h>
 #include <linux/clk-provider.h>
 #include <linux/io.h>
diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c
index 1cc1330..65c842a 100644
--- a/drivers/clk/versatile/clk-impd1.c
+++ b/drivers/clk/versatile/clk-impd1.c
@@ -7,7 +7,6 @@
  * published by the Free Software Foundation.
  */
 #include <linux/clk-provider.h>
-#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/err.h>
 #include <linux/io.h>
diff --git a/drivers/clk/versatile/clk-realview.c b/drivers/clk/versatile/clk-realview.c
index c8b5231..86f7099 100644
--- a/drivers/clk/versatile/clk-realview.c
+++ b/drivers/clk/versatile/clk-realview.c
@@ -6,7 +6,6 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/err.h>
 #include <linux/io.h>
@@ -33,13 +32,13 @@
 	.idx2s		= icst307_idx2s,
 };
 
-static const struct clk_icst_desc __initdata realview_osc0_desc = {
+static const struct clk_icst_desc realview_osc0_desc __initconst = {
 	.params = &realview_oscvco_params,
 	.vco_offset = REALVIEW_SYS_OSC0_OFFSET,
 	.lock_offset = REALVIEW_SYS_LOCK_OFFSET,
 };
 
-static const struct clk_icst_desc __initdata realview_osc4_desc = {
+static const struct clk_icst_desc realview_osc4_desc __initconst = {
 	.params = &realview_oscvco_params,
 	.vco_offset = REALVIEW_SYS_OSC4_OFFSET,
 	.lock_offset = REALVIEW_SYS_LOCK_OFFSET,
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index a96dd8e..a1cdef6 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -12,7 +12,8 @@
  */
 
 #include <linux/amba/sp810.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/err.h>
 #include <linux/of.h>
@@ -32,12 +33,9 @@
 
 struct clk_sp810 {
 	struct device_node *node;
-	int refclk_index, timclk_index;
 	void __iomem *base;
 	spinlock_t lock;
 	struct clk_sp810_timerclken timerclken[4];
-	struct clk *refclk;
-	struct clk *timclk;
 };
 
 static u8 clk_sp810_timerclken_get_parent(struct clk_hw *hw)
@@ -70,55 +68,7 @@
 	return 0;
 }
 
-/*
- * FIXME - setting the parent every time .prepare is invoked is inefficient.
- * This is better handled by a dedicated clock tree configuration mechanism at
- * init-time.  Revisit this later when such a mechanism exists
- */
-static int clk_sp810_timerclken_prepare(struct clk_hw *hw)
-{
-	struct clk_sp810_timerclken *timerclken = to_clk_sp810_timerclken(hw);
-	struct clk_sp810 *sp810 = timerclken->sp810;
-	struct clk *old_parent = __clk_get_parent(hw->clk);
-	struct clk *new_parent;
-
-	if (!sp810->refclk)
-		sp810->refclk = of_clk_get(sp810->node, sp810->refclk_index);
-
-	if (!sp810->timclk)
-		sp810->timclk = of_clk_get(sp810->node, sp810->timclk_index);
-
-	if (WARN_ON(IS_ERR(sp810->refclk) || IS_ERR(sp810->timclk)))
-		return -ENOENT;
-
-	/* Select fastest parent */
-	if (clk_get_rate(sp810->refclk) > clk_get_rate(sp810->timclk))
-		new_parent = sp810->refclk;
-	else
-		new_parent = sp810->timclk;
-
-	/* Switch the parent if necessary */
-	if (old_parent != new_parent) {
-		clk_prepare(new_parent);
-		clk_set_parent(hw->clk, new_parent);
-		clk_unprepare(old_parent);
-	}
-
-	return 0;
-}
-
-static void clk_sp810_timerclken_unprepare(struct clk_hw *hw)
-{
-	struct clk_sp810_timerclken *timerclken = to_clk_sp810_timerclken(hw);
-	struct clk_sp810 *sp810 = timerclken->sp810;
-
-	clk_put(sp810->timclk);
-	clk_put(sp810->refclk);
-}
-
 static const struct clk_ops clk_sp810_timerclken_ops = {
-	.prepare = clk_sp810_timerclken_prepare,
-	.unprepare = clk_sp810_timerclken_unprepare,
 	.get_parent = clk_sp810_timerclken_get_parent,
 	.set_parent = clk_sp810_timerclken_set_parent,
 };
@@ -128,8 +78,8 @@
 {
 	struct clk_sp810 *sp810 = data;
 
-	if (WARN_ON(clkspec->args_count != 1 || clkspec->args[0] >
-			ARRAY_SIZE(sp810->timerclken)))
+	if (WARN_ON(clkspec->args_count != 1 ||
+		    clkspec->args[0] >=	ARRAY_SIZE(sp810->timerclken)))
 		return NULL;
 
 	return sp810->timerclken[clkspec->args[0]].clk;
@@ -139,24 +89,18 @@
 {
 	struct clk_sp810 *sp810 = kzalloc(sizeof(*sp810), GFP_KERNEL);
 	const char *parent_names[2];
+	int num = ARRAY_SIZE(parent_names);
 	char name[12];
 	struct clk_init_data init;
 	int i;
+	bool deprecated;
 
 	if (!sp810) {
 		pr_err("Failed to allocate memory for SP810!\n");
 		return;
 	}
 
-	sp810->refclk_index = of_property_match_string(node, "clock-names",
-			"refclk");
-	parent_names[0] = of_clk_get_parent_name(node, sp810->refclk_index);
-
-	sp810->timclk_index = of_property_match_string(node, "clock-names",
-			"timclk");
-	parent_names[1] = of_clk_get_parent_name(node, sp810->timclk_index);
-
-	if (!parent_names[0] || !parent_names[1]) {
+	if (of_clk_parent_fill(node, parent_names, num) != num) {
 		pr_warn("Failed to obtain parent clocks for SP810!\n");
 		return;
 	}
@@ -169,7 +113,9 @@
 	init.ops = &clk_sp810_timerclken_ops;
 	init.flags = CLK_IS_BASIC;
 	init.parent_names = parent_names;
-	init.num_parents = ARRAY_SIZE(parent_names);
+	init.num_parents = num;
+
+	deprecated = !of_find_property(node, "assigned-clock-parents", NULL);
 
 	for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
 		snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
@@ -178,6 +124,15 @@
 		sp810->timerclken[i].channel = i;
 		sp810->timerclken[i].hw.init = &init;
 
+		/*
+		 * If DT isn't setting the parent, force it to be
+		 * the 1 MHz clock without going through the framework.
+		 * We do this before clk_register() so that it can determine
+		 * the parent and setup the tree properly.
+		 */
+		if (deprecated)
+			init.ops->set_parent(&sp810->timerclken[i].hw, 1);
+
 		sp810->timerclken[i].clk = clk_register(NULL,
 				&sp810->timerclken[i].hw);
 		WARN_ON(IS_ERR(sp810->timerclken[i].clk));
diff --git a/drivers/clk/versatile/clk-versatile.c b/drivers/clk/versatile/clk-versatile.c
index 7a4f863..a89a927 100644
--- a/drivers/clk/versatile/clk-versatile.c
+++ b/drivers/clk/versatile/clk-versatile.c
@@ -8,8 +8,6 @@
  * published by the Free Software Foundation.
  */
 #include <linux/clk-provider.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
 #include <linux/err.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -35,7 +33,7 @@
 	.idx2s		= icst525_idx2s,
 };
 
-static const struct clk_icst_desc __initdata cm_auxosc_desc = {
+static const struct clk_icst_desc cm_auxosc_desc __initconst = {
 	.params = &cp_auxosc_params,
 	.vco_offset = 0x1c,
 	.lock_offset = INTEGRATOR_HDR_LOCK_OFFSET,
diff --git a/drivers/clk/zte/Makefile b/drivers/clk/zte/Makefile
index 95b707c..74005aa 100644
--- a/drivers/clk/zte/Makefile
+++ b/drivers/clk/zte/Makefile
@@ -1,2 +1,2 @@
-obj-y := clk-pll.o
+obj-y := clk.o
 obj-$(CONFIG_SOC_ZX296702) += clk-zx296702.o
diff --git a/drivers/clk/zte/clk-pll.c b/drivers/clk/zte/clk-pll.c
deleted file mode 100644
index c3b221a..0000000
--- a/drivers/clk/zte/clk-pll.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright 2014 Linaro Ltd.
- * Copyright (C) 2014 ZTE Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-
-#include "clk.h"
-
-#define to_clk_zx_pll(_hw) container_of(_hw, struct clk_zx_pll, hw)
-
-#define CFG0_CFG1_OFFSET 4
-#define LOCK_FLAG BIT(30)
-#define POWER_DOWN BIT(31)
-
-static int rate_to_idx(struct clk_zx_pll *zx_pll, unsigned long rate)
-{
-	const struct zx_pll_config *config = zx_pll->lookup_table;
-	int i;
-
-	for (i = 0; i < zx_pll->count; i++) {
-		if (config[i].rate > rate)
-			return i > 0 ? i - 1 : 0;
-
-		if (config[i].rate == rate)
-			return i;
-	}
-
-	return i - 1;
-}
-
-static int hw_to_idx(struct clk_zx_pll *zx_pll)
-{
-	const struct zx_pll_config *config = zx_pll->lookup_table;
-	u32 hw_cfg0, hw_cfg1;
-	int i;
-
-	hw_cfg0 = readl_relaxed(zx_pll->reg_base);
-	hw_cfg1 = readl_relaxed(zx_pll->reg_base + CFG0_CFG1_OFFSET);
-
-	/* For matching the value in lookup table */
-	hw_cfg0 &= ~LOCK_FLAG;
-	hw_cfg0 |= POWER_DOWN;
-
-	for (i = 0; i < zx_pll->count; i++) {
-		if (hw_cfg0 == config[i].cfg0 && hw_cfg1 == config[i].cfg1)
-			return i;
-	}
-
-	return -EINVAL;
-}
-
-static unsigned long zx_pll_recalc_rate(struct clk_hw *hw,
-					unsigned long parent_rate)
-{
-	struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
-	int idx;
-
-	idx = hw_to_idx(zx_pll);
-	if (unlikely(idx == -EINVAL))
-		return 0;
-
-	return zx_pll->lookup_table[idx].rate;
-}
-
-static long zx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
-			      unsigned long *prate)
-{
-	struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
-	int idx;
-
-	idx = rate_to_idx(zx_pll, rate);
-
-	return zx_pll->lookup_table[idx].rate;
-}
-
-static int zx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
-			   unsigned long parent_rate)
-{
-	/* Assume current cpu is not running on current PLL */
-	struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
-	const struct zx_pll_config *config;
-	int idx;
-
-	idx = rate_to_idx(zx_pll, rate);
-	config = &zx_pll->lookup_table[idx];
-
-	writel_relaxed(config->cfg0, zx_pll->reg_base);
-	writel_relaxed(config->cfg1, zx_pll->reg_base + CFG0_CFG1_OFFSET);
-
-	return 0;
-}
-
-static int zx_pll_enable(struct clk_hw *hw)
-{
-	struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
-	u32 reg;
-
-	reg = readl_relaxed(zx_pll->reg_base);
-	writel_relaxed(reg & ~POWER_DOWN, zx_pll->reg_base);
-
-	return readl_relaxed_poll_timeout(zx_pll->reg_base, reg,
-					  reg & LOCK_FLAG, 0, 100);
-}
-
-static void zx_pll_disable(struct clk_hw *hw)
-{
-	struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
-	u32 reg;
-
-	reg = readl_relaxed(zx_pll->reg_base);
-	writel_relaxed(reg | POWER_DOWN, zx_pll->reg_base);
-}
-
-static int zx_pll_is_enabled(struct clk_hw *hw)
-{
-	struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
-	u32 reg;
-
-	reg = readl_relaxed(zx_pll->reg_base);
-
-	return !(reg & POWER_DOWN);
-}
-
-static const struct clk_ops zx_pll_ops = {
-	.recalc_rate = zx_pll_recalc_rate,
-	.round_rate = zx_pll_round_rate,
-	.set_rate = zx_pll_set_rate,
-	.enable = zx_pll_enable,
-	.disable = zx_pll_disable,
-	.is_enabled = zx_pll_is_enabled,
-};
-
-struct clk *clk_register_zx_pll(const char *name, const char *parent_name,
-	unsigned long flags, void __iomem *reg_base,
-	const struct zx_pll_config *lookup_table, int count, spinlock_t *lock)
-{
-	struct clk_zx_pll *zx_pll;
-	struct clk *clk;
-	struct clk_init_data init;
-
-	zx_pll = kzalloc(sizeof(*zx_pll), GFP_KERNEL);
-	if (!zx_pll)
-		return ERR_PTR(-ENOMEM);
-
-	init.name = name;
-	init.ops = &zx_pll_ops;
-	init.flags = flags;
-	init.parent_names = parent_name ? &parent_name : NULL;
-	init.num_parents = parent_name ? 1 : 0;
-
-	zx_pll->reg_base = reg_base;
-	zx_pll->lookup_table = lookup_table;
-	zx_pll->count = count;
-	zx_pll->lock = lock;
-	zx_pll->hw.init = &init;
-
-	clk = clk_register(NULL, &zx_pll->hw);
-	if (IS_ERR(clk))
-		kfree(zx_pll);
-
-	return clk;
-}
diff --git a/drivers/clk/zte/clk-zx296702.c b/drivers/clk/zte/clk-zx296702.c
index 929d033..ebd20d8 100644
--- a/drivers/clk/zte/clk-zx296702.c
+++ b/drivers/clk/zte/clk-zx296702.c
@@ -36,10 +36,21 @@
 #define CLK_MUX1		(topcrm_base + 0x8c)
 
 #define CLK_SDMMC1		(lsp0crpm_base + 0x0c)
+#define CLK_GPIO		(lsp0crpm_base + 0x2c)
+#define CLK_SPDIF0		(lsp0crpm_base + 0x10)
+#define SPDIF0_DIV		(lsp0crpm_base + 0x14)
+#define CLK_I2S0		(lsp0crpm_base + 0x18)
+#define I2S0_DIV		(lsp0crpm_base + 0x1c)
+#define CLK_I2S1		(lsp0crpm_base + 0x20)
+#define I2S1_DIV		(lsp0crpm_base + 0x24)
+#define CLK_I2S2		(lsp0crpm_base + 0x34)
+#define I2S2_DIV		(lsp0crpm_base + 0x38)
 
 #define CLK_UART0		(lsp1crpm_base + 0x20)
 #define CLK_UART1		(lsp1crpm_base + 0x24)
 #define CLK_SDMMC0		(lsp1crpm_base + 0x2c)
+#define CLK_SPDIF1		(lsp1crpm_base + 0x30)
+#define SPDIF1_DIV		(lsp1crpm_base + 0x34)
 
 static const struct zx_pll_config pll_a9_config[] = {
 	{ .rate = 700000000, .cfg0 = 0x800405d1, .cfg1 = 0x04555555 },
@@ -72,104 +83,119 @@
 	{ /* sentinel */ }
 };
 
-static const char * matrix_aclk_sel[] = {
+static const char * const matrix_aclk_sel[] = {
 	"pll_mm0_198M",
 	"osc",
 	"clk_148M5",
 	"pll_lsp_104M",
 };
 
-static const char * a9_wclk_sel[] = {
+static const char * const a9_wclk_sel[] = {
 	"pll_a9",
 	"osc",
 	"clk_500",
 	"clk_250",
 };
 
-static const char * a9_as1_aclk_sel[] = {
+static const char * const a9_as1_aclk_sel[] = {
 	"clk_250",
 	"osc",
 	"pll_mm0_396M",
 	"pll_mac_333M",
 };
 
-static const char * a9_trace_clkin_sel[] = {
+static const char * const a9_trace_clkin_sel[] = {
 	"clk_74M25",
 	"pll_mm1_108M",
 	"clk_125",
 	"clk_148M5",
 };
 
-static const char * decppu_aclk_sel[] = {
+static const char * const decppu_aclk_sel[] = {
 	"clk_250",
 	"pll_mm0_198M",
 	"pll_lsp_104M",
 	"pll_audio_294M912",
 };
 
-static const char * vou_main_wclk_sel[] = {
+static const char * const vou_main_wclk_sel[] = {
 	"clk_148M5",
 	"clk_74M25",
 	"clk_27",
 	"pll_mm1_54M",
 };
 
-static const char * vou_scaler_wclk_sel[] = {
+static const char * const vou_scaler_wclk_sel[] = {
 	"clk_250",
 	"pll_mac_333M",
 	"pll_audio_294M912",
 	"pll_mm0_198M",
 };
 
-static const char * r2d_wclk_sel[] = {
+static const char * const r2d_wclk_sel[] = {
 	"pll_audio_294M912",
 	"pll_mac_333M",
 	"pll_a9_350M",
 	"pll_mm0_396M",
 };
 
-static const char * ddr_wclk_sel[] = {
+static const char * const ddr_wclk_sel[] = {
 	"pll_mac_333M",
 	"pll_ddr_266M",
 	"pll_audio_294M912",
 	"pll_mm0_198M",
 };
 
-static const char * nand_wclk_sel[] = {
+static const char * const nand_wclk_sel[] = {
 	"pll_lsp_104M",
 	"osc",
 };
 
-static const char * lsp_26_wclk_sel[] = {
+static const char * const lsp_26_wclk_sel[] = {
 	"pll_lsp_26M",
 	"osc",
 };
 
-static const char * vl0_sel[] = {
+static const char * const vl0_sel[] = {
 	"vou_main_channel_div",
 	"vou_aux_channel_div",
 };
 
-static const char * hdmi_sel[] = {
+static const char * const hdmi_sel[] = {
 	"vou_main_channel_wclk",
 	"vou_aux_channel_wclk",
 };
 
-static const char * sdmmc0_wclk_sel[] = {
+static const char * const sdmmc0_wclk_sel[] = {
 	"lsp1_104M_wclk",
 	"lsp1_26M_wclk",
 };
 
-static const char * sdmmc1_wclk_sel[] = {
+static const char * const sdmmc1_wclk_sel[] = {
 	"lsp0_104M_wclk",
 	"lsp0_26M_wclk",
 };
 
-static const char * uart_wclk_sel[] = {
+static const char * const uart_wclk_sel[] = {
 	"lsp1_104M_wclk",
 	"lsp1_26M_wclk",
 };
 
+static const char * const spdif0_wclk_sel[] = {
+	"lsp0_104M_wclk",
+	"lsp0_26M_wclk",
+};
+
+static const char * const spdif1_wclk_sel[] = {
+	"lsp1_104M_wclk",
+	"lsp1_26M_wclk",
+};
+
+static const char * const i2s_wclk_sel[] = {
+	"lsp0_104M_wclk",
+	"lsp0_26M_wclk",
+};
+
 static inline struct clk *zx_divtbl(const char *name, const char *parent,
 				    void __iomem *reg, u8 shift, u8 width,
 				    const struct clk_div_table *table)
@@ -185,7 +211,7 @@
 				    reg, shift, width, 0, &reg_lock);
 }
 
-static inline struct clk *zx_mux(const char *name, const char **parents,
+static inline struct clk *zx_mux(const char *name, const char * const *parents,
 		int num_parents, void __iomem *reg, u8 shift, u8 width)
 {
 	return clk_register_mux(NULL, name, parents, num_parents,
@@ -196,7 +222,7 @@
 				  void __iomem *reg, u8 shift)
 {
 	return clk_register_gate(NULL, name, parent, CLK_IGNORE_UNUSED,
-				 reg, shift, 0, &reg_lock);
+				 reg, shift, CLK_SET_RATE_PARENT, &reg_lock);
 }
 
 static void __init zx296702_top_clocks_init(struct device_node *np)
@@ -585,7 +611,57 @@
 	clk[ZX296702_SDMMC1_WCLK] =
 		zx_gate("sdmmc1_wclk", "sdmmc1_wclk_div", CLK_SDMMC1, 1);
 	clk[ZX296702_SDMMC1_PCLK] =
-		zx_gate("sdmmc1_pclk", "lsp1_apb_pclk", CLK_SDMMC1, 0);
+		zx_gate("sdmmc1_pclk", "lsp0_apb_pclk", CLK_SDMMC1, 0);
+
+	clk[ZX296702_GPIO_CLK] =
+		zx_gate("gpio_clk", "lsp0_apb_pclk", CLK_GPIO, 0);
+
+	/* SPDIF */
+	clk[ZX296702_SPDIF0_WCLK_MUX] =
+		zx_mux("spdif0_wclk_mux", spdif0_wclk_sel,
+				ARRAY_SIZE(spdif0_wclk_sel), CLK_SPDIF0, 4, 1);
+	clk[ZX296702_SPDIF0_WCLK] =
+		zx_gate("spdif0_wclk", "spdif0_wclk_mux", CLK_SPDIF0, 1);
+	clk[ZX296702_SPDIF0_PCLK] =
+		zx_gate("spdif0_pclk", "lsp0_apb_pclk", CLK_SPDIF0, 0);
+
+	clk[ZX296702_SPDIF0_DIV] =
+		clk_register_zx_audio("spdif0_div", "spdif0_wclk", 0,
+				SPDIF0_DIV);
+
+	/* I2S */
+	clk[ZX296702_I2S0_WCLK_MUX] =
+		zx_mux("i2s0_wclk_mux", i2s_wclk_sel,
+				ARRAY_SIZE(i2s_wclk_sel), CLK_I2S0, 4, 1);
+	clk[ZX296702_I2S0_WCLK] =
+		zx_gate("i2s0_wclk", "i2s0_wclk_mux", CLK_I2S0, 1);
+	clk[ZX296702_I2S0_PCLK] =
+		zx_gate("i2s0_pclk", "lsp0_apb_pclk", CLK_I2S0, 0);
+
+	clk[ZX296702_I2S0_DIV] =
+		clk_register_zx_audio("i2s0_div", "i2s0_wclk", 0, I2S0_DIV);
+
+	clk[ZX296702_I2S1_WCLK_MUX] =
+		zx_mux("i2s1_wclk_mux", i2s_wclk_sel,
+				ARRAY_SIZE(i2s_wclk_sel), CLK_I2S1, 4, 1);
+	clk[ZX296702_I2S1_WCLK] =
+		zx_gate("i2s1_wclk", "i2s1_wclk_mux", CLK_I2S1, 1);
+	clk[ZX296702_I2S1_PCLK] =
+		zx_gate("i2s1_pclk", "lsp0_apb_pclk", CLK_I2S1, 0);
+
+	clk[ZX296702_I2S1_DIV] =
+		clk_register_zx_audio("i2s1_div", "i2s1_wclk", 0, I2S1_DIV);
+
+	clk[ZX296702_I2S2_WCLK_MUX] =
+		zx_mux("i2s2_wclk_mux", i2s_wclk_sel,
+				ARRAY_SIZE(i2s_wclk_sel), CLK_I2S2, 4, 1);
+	clk[ZX296702_I2S2_WCLK] =
+		zx_gate("i2s2_wclk", "i2s2_wclk_mux", CLK_I2S2, 1);
+	clk[ZX296702_I2S2_PCLK] =
+		zx_gate("i2s2_pclk", "lsp0_apb_pclk", CLK_I2S2, 0);
+
+	clk[ZX296702_I2S2_DIV] =
+		clk_register_zx_audio("i2s2_div", "i2s2_wclk", 0, I2S2_DIV);
 
 	for (i = 0; i < ARRAY_SIZE(lsp0clk); i++) {
 		if (IS_ERR(clk[i])) {
@@ -641,6 +717,18 @@
 	clk[ZX296702_SDMMC0_PCLK] =
 		zx_gate("sdmmc0_pclk", "lsp1_apb_pclk", CLK_SDMMC0, 0);
 
+	clk[ZX296702_SPDIF1_WCLK_MUX] =
+		zx_mux("spdif1_wclk_mux", spdif1_wclk_sel,
+				ARRAY_SIZE(spdif1_wclk_sel), CLK_SPDIF1, 4, 1);
+	clk[ZX296702_SPDIF1_WCLK] =
+		zx_gate("spdif1_wclk", "spdif1_wclk_mux", CLK_SPDIF1, 1);
+	clk[ZX296702_SPDIF1_PCLK] =
+		zx_gate("spdif1_pclk", "lsp1_apb_pclk", CLK_SPDIF1, 0);
+
+	clk[ZX296702_SPDIF1_DIV] =
+		clk_register_zx_audio("spdif1_div", "spdif1_wclk", 0,
+				SPDIF1_DIV);
+
 	for (i = 0; i < ARRAY_SIZE(lsp1clk); i++) {
 		if (IS_ERR(clk[i])) {
 			pr_err("zx296702 clk %d: register failed with %ld\n",
diff --git a/drivers/clk/zte/clk.c b/drivers/clk/zte/clk.c
new file mode 100644
index 0000000..7c73c53
--- /dev/null
+++ b/drivers/clk/zte/clk.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2014 Linaro Ltd.
+ * Copyright (C) 2014 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <asm/div64.h>
+
+#include "clk.h"
+
+#define to_clk_zx_pll(_hw) container_of(_hw, struct clk_zx_pll, hw)
+#define to_clk_zx_audio(_hw) container_of(_hw, struct clk_zx_audio, hw)
+
+#define CFG0_CFG1_OFFSET 4
+#define LOCK_FLAG BIT(30)
+#define POWER_DOWN BIT(31)
+
+static int rate_to_idx(struct clk_zx_pll *zx_pll, unsigned long rate)
+{
+	const struct zx_pll_config *config = zx_pll->lookup_table;
+	int i;
+
+	for (i = 0; i < zx_pll->count; i++) {
+		if (config[i].rate > rate)
+			return i > 0 ? i - 1 : 0;
+
+		if (config[i].rate == rate)
+			return i;
+	}
+
+	return i - 1;
+}
+
+static int hw_to_idx(struct clk_zx_pll *zx_pll)
+{
+	const struct zx_pll_config *config = zx_pll->lookup_table;
+	u32 hw_cfg0, hw_cfg1;
+	int i;
+
+	hw_cfg0 = readl_relaxed(zx_pll->reg_base);
+	hw_cfg1 = readl_relaxed(zx_pll->reg_base + CFG0_CFG1_OFFSET);
+
+	/* For matching the value in lookup table */
+	hw_cfg0 &= ~LOCK_FLAG;
+	hw_cfg0 |= POWER_DOWN;
+
+	for (i = 0; i < zx_pll->count; i++) {
+		if (hw_cfg0 == config[i].cfg0 && hw_cfg1 == config[i].cfg1)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+static unsigned long zx_pll_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
+	int idx;
+
+	idx = hw_to_idx(zx_pll);
+	if (unlikely(idx == -EINVAL))
+		return 0;
+
+	return zx_pll->lookup_table[idx].rate;
+}
+
+static long zx_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+			      unsigned long *prate)
+{
+	struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
+	int idx;
+
+	idx = rate_to_idx(zx_pll, rate);
+
+	return zx_pll->lookup_table[idx].rate;
+}
+
+static int zx_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+			   unsigned long parent_rate)
+{
+	/* Assume current cpu is not running on current PLL */
+	struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
+	const struct zx_pll_config *config;
+	int idx;
+
+	idx = rate_to_idx(zx_pll, rate);
+	config = &zx_pll->lookup_table[idx];
+
+	writel_relaxed(config->cfg0, zx_pll->reg_base);
+	writel_relaxed(config->cfg1, zx_pll->reg_base + CFG0_CFG1_OFFSET);
+
+	return 0;
+}
+
+static int zx_pll_enable(struct clk_hw *hw)
+{
+	struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
+	u32 reg;
+
+	reg = readl_relaxed(zx_pll->reg_base);
+	writel_relaxed(reg & ~POWER_DOWN, zx_pll->reg_base);
+
+	return readl_relaxed_poll_timeout(zx_pll->reg_base, reg,
+					  reg & LOCK_FLAG, 0, 100);
+}
+
+static void zx_pll_disable(struct clk_hw *hw)
+{
+	struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
+	u32 reg;
+
+	reg = readl_relaxed(zx_pll->reg_base);
+	writel_relaxed(reg | POWER_DOWN, zx_pll->reg_base);
+}
+
+static int zx_pll_is_enabled(struct clk_hw *hw)
+{
+	struct clk_zx_pll *zx_pll = to_clk_zx_pll(hw);
+	u32 reg;
+
+	reg = readl_relaxed(zx_pll->reg_base);
+
+	return !(reg & POWER_DOWN);
+}
+
+static const struct clk_ops zx_pll_ops = {
+	.recalc_rate = zx_pll_recalc_rate,
+	.round_rate = zx_pll_round_rate,
+	.set_rate = zx_pll_set_rate,
+	.enable = zx_pll_enable,
+	.disable = zx_pll_disable,
+	.is_enabled = zx_pll_is_enabled,
+};
+
+struct clk *clk_register_zx_pll(const char *name, const char *parent_name,
+				unsigned long flags, void __iomem *reg_base,
+				const struct zx_pll_config *lookup_table,
+				int count, spinlock_t *lock)
+{
+	struct clk_zx_pll *zx_pll;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	zx_pll = kzalloc(sizeof(*zx_pll), GFP_KERNEL);
+	if (!zx_pll)
+		return ERR_PTR(-ENOMEM);
+
+	init.name = name;
+	init.ops = &zx_pll_ops;
+	init.flags = flags;
+	init.parent_names = parent_name ? &parent_name : NULL;
+	init.num_parents = parent_name ? 1 : 0;
+
+	zx_pll->reg_base = reg_base;
+	zx_pll->lookup_table = lookup_table;
+	zx_pll->count = count;
+	zx_pll->lock = lock;
+	zx_pll->hw.init = &init;
+
+	clk = clk_register(NULL, &zx_pll->hw);
+	if (IS_ERR(clk))
+		kfree(zx_pll);
+
+	return clk;
+}
+
+#define BPAR 1000000
+static u32 calc_reg(u32 parent_rate, u32 rate)
+{
+	u32 sel, integ, fra_div, tmp;
+	u64 tmp64 = (u64)parent_rate * BPAR;
+
+	do_div(tmp64, rate);
+	integ = (u32)tmp64 / BPAR;
+	integ = integ >> 1;
+
+	tmp = (u32)tmp64 % BPAR;
+	sel = tmp / BPAR;
+
+	tmp = tmp % BPAR;
+	fra_div = tmp * 0xff / BPAR;
+	tmp = (sel << 24) | (integ << 16) | (0xff << 8) | fra_div;
+
+	/* Set I2S integer divider as 1. This bit is reserved for SPDIF
+	 * and do no harm.
+	 */
+	tmp |= BIT(28);
+	return tmp;
+}
+
+static u32 calc_rate(u32 reg, u32 parent_rate)
+{
+	u32 sel, integ, fra_div, tmp;
+	u64 tmp64 = (u64)parent_rate * BPAR;
+
+	tmp = reg;
+	sel = (tmp >> 24) & BIT(0);
+	integ = (tmp >> 16) & 0xff;
+	fra_div = tmp & 0xff;
+
+	tmp = fra_div * BPAR;
+	tmp = tmp / 0xff;
+	tmp += sel * BPAR;
+	tmp += 2 * integ * BPAR;
+	do_div(tmp64, tmp);
+
+	return (u32)tmp64;
+}
+
+static unsigned long zx_audio_recalc_rate(struct clk_hw *hw,
+					  unsigned long parent_rate)
+{
+	struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
+	u32 reg;
+
+	reg = readl_relaxed(zx_audio->reg_base);
+	return calc_rate(reg, parent_rate);
+}
+
+static long zx_audio_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *prate)
+{
+	u32 reg;
+
+	if (rate * 2 > *prate)
+		return -EINVAL;
+
+	reg = calc_reg(*prate, rate);
+	return calc_rate(reg, *prate);
+}
+
+static int zx_audio_set_rate(struct clk_hw *hw, unsigned long rate,
+			     unsigned long parent_rate)
+{
+	struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
+	u32 reg;
+
+	reg = calc_reg(parent_rate, rate);
+	writel_relaxed(reg, zx_audio->reg_base);
+
+	return 0;
+}
+
+#define ZX_AUDIO_EN BIT(25)
+static int zx_audio_enable(struct clk_hw *hw)
+{
+	struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
+	u32 reg;
+
+	reg = readl_relaxed(zx_audio->reg_base);
+	writel_relaxed(reg & ~ZX_AUDIO_EN, zx_audio->reg_base);
+	return 0;
+}
+
+static void zx_audio_disable(struct clk_hw *hw)
+{
+	struct clk_zx_audio *zx_audio = to_clk_zx_audio(hw);
+	u32 reg;
+
+	reg = readl_relaxed(zx_audio->reg_base);
+	writel_relaxed(reg | ZX_AUDIO_EN, zx_audio->reg_base);
+}
+
+static const struct clk_ops zx_audio_ops = {
+	.recalc_rate = zx_audio_recalc_rate,
+	.round_rate = zx_audio_round_rate,
+	.set_rate = zx_audio_set_rate,
+	.enable = zx_audio_enable,
+	.disable = zx_audio_disable,
+};
+
+struct clk *clk_register_zx_audio(const char *name,
+				  const char * const parent_name,
+				  unsigned long flags,
+				  void __iomem *reg_base)
+{
+	struct clk_zx_audio *zx_audio;
+	struct clk *clk;
+	struct clk_init_data init;
+
+	zx_audio = kzalloc(sizeof(*zx_audio), GFP_KERNEL);
+	if (!zx_audio)
+		return ERR_PTR(-ENOMEM);
+
+	init.name = name;
+	init.ops = &zx_audio_ops;
+	init.flags = flags;
+	init.parent_names = parent_name ? &parent_name : NULL;
+	init.num_parents = parent_name ? 1 : 0;
+
+	zx_audio->reg_base = reg_base;
+	zx_audio->hw.init = &init;
+
+	clk = clk_register(NULL, &zx_audio->hw);
+	if (IS_ERR(clk))
+		kfree(zx_audio);
+
+	return clk;
+}
diff --git a/drivers/clk/zte/clk.h b/drivers/clk/zte/clk.h
index 0914a82..65ae08b 100644
--- a/drivers/clk/zte/clk.h
+++ b/drivers/clk/zte/clk.h
@@ -29,4 +29,13 @@
 struct clk *clk_register_zx_pll(const char *name, const char *parent_name,
 	unsigned long flags, void __iomem *reg_base,
 	const struct zx_pll_config *lookup_table, int count, spinlock_t *lock);
+
+struct clk_zx_audio {
+	struct clk_hw hw;
+	void __iomem *reg_base;
+};
+
+struct clk *clk_register_zx_audio(const char *name,
+				  const char * const parent_name,
+				  unsigned long flags, void __iomem *reg_base);
 #endif
diff --git a/drivers/clk/zynq/Makefile b/drivers/clk/zynq/Makefile
index 156d923..0afc2e7 100644
--- a/drivers/clk/zynq/Makefile
+++ b/drivers/clk/zynq/Makefile
@@ -1,3 +1,3 @@
 # Zynq clock specific Makefile
 
-obj-$(CONFIG_ARCH_ZYNQ)	+= clkc.o pll.o
+obj-y	+= clkc.o pll.o
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index de61438..38a65c3 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -19,6 +19,7 @@
  */
 
 #include <linux/clk/zynq.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 4e57730..a7726db 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -111,6 +111,10 @@
 	select CLKSRC_MMIO
 	select CLKSRC_OF
 
+config CLKSRC_PISTACHIO
+	bool
+	select CLKSRC_OF
+
 config CLKSRC_STM32
 	bool "Clocksource for STM32 SoCs" if !ARCH_STM32
 	depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
@@ -277,7 +281,7 @@
 
 config CLKSRC_PXA
 	def_bool y if ARCH_PXA || ARCH_SA1100
-	select CLKSRC_OF if USE_OF
+	select CLKSRC_OF if OF
 	help
 	  This enables OST0 support available on PXA and SA-11x0
 	  platforms.
@@ -293,4 +297,12 @@
 	depends on ARM && CLKDEV_LOOKUP
 	select CLKSRC_MMIO
 
+config CLKSRC_ST_LPC
+	bool
+	depends on ARCH_STI
+	select CLKSRC_OF if OF
+	help
+	  Enable this option to use the Low Power controller timer
+	  as clocksource.
+
 endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index f228354..5c00863 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -44,6 +44,7 @@
 obj-$(CONFIG_VF_PIT_TIMER)	+= vf_pit_timer.o
 obj-$(CONFIG_CLKSRC_QCOM)	+= qcom-timer.o
 obj-$(CONFIG_MTK_TIMER)		+= mtk_timer.o
+obj-$(CONFIG_CLKSRC_PISTACHIO)	+= time-pistachio.o
 
 obj-$(CONFIG_ARM_ARCH_TIMER)		+= arm_arch_timer.o
 obj-$(CONFIG_ARM_GLOBAL_TIMER)		+= arm_global_timer.o
@@ -60,3 +61,4 @@
 obj-$(CONFIG_H8300)			+= h8300_timer8.o
 obj-$(CONFIG_H8300_TMR16)		+= h8300_timer16.o
 obj-$(CONFIG_H8300_TPU)			+= h8300_tpu.o
+obj-$(CONFIG_CLKSRC_ST_LPC)		+= clksrc_st_lpc.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 0aa135d..d6e3e49 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -181,44 +181,36 @@
 	return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
 }
 
-static __always_inline void timer_set_mode(const int access, int mode,
-				  struct clock_event_device *clk)
+static __always_inline int timer_shutdown(const int access,
+					  struct clock_event_device *clk)
 {
 	unsigned long ctrl;
-	switch (mode) {
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
-		ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
-		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
-		break;
-	default:
-		break;
-	}
+
+	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+	ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+
+	return 0;
 }
 
-static void arch_timer_set_mode_virt(enum clock_event_mode mode,
-				     struct clock_event_device *clk)
+static int arch_timer_shutdown_virt(struct clock_event_device *clk)
 {
-	timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk);
+	return timer_shutdown(ARCH_TIMER_VIRT_ACCESS, clk);
 }
 
-static void arch_timer_set_mode_phys(enum clock_event_mode mode,
-				     struct clock_event_device *clk)
+static int arch_timer_shutdown_phys(struct clock_event_device *clk)
 {
-	timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk);
+	return timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
 }
 
-static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode,
-					 struct clock_event_device *clk)
+static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
 {
-	timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk);
+	return timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
 }
 
-static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode,
-					 struct clock_event_device *clk)
+static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
 {
-	timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk);
+	return timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
 }
 
 static __always_inline void set_next_event(const int access, unsigned long evt,
@@ -273,11 +265,11 @@
 		clk->cpumask = cpumask_of(smp_processor_id());
 		if (arch_timer_use_virtual) {
 			clk->irq = arch_timer_ppi[VIRT_PPI];
-			clk->set_mode = arch_timer_set_mode_virt;
+			clk->set_state_shutdown = arch_timer_shutdown_virt;
 			clk->set_next_event = arch_timer_set_next_event_virt;
 		} else {
 			clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
-			clk->set_mode = arch_timer_set_mode_phys;
+			clk->set_state_shutdown = arch_timer_shutdown_phys;
 			clk->set_next_event = arch_timer_set_next_event_phys;
 		}
 	} else {
@@ -286,17 +278,17 @@
 		clk->rating = 400;
 		clk->cpumask = cpu_all_mask;
 		if (arch_timer_mem_use_virtual) {
-			clk->set_mode = arch_timer_set_mode_virt_mem;
+			clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
 			clk->set_next_event =
 				arch_timer_set_next_event_virt_mem;
 		} else {
-			clk->set_mode = arch_timer_set_mode_phys_mem;
+			clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
 			clk->set_next_event =
 				arch_timer_set_next_event_phys_mem;
 		}
 	}
 
-	clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk);
+	clk->set_state_shutdown(clk);
 
 	clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
 }
@@ -506,7 +498,7 @@
 			disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
 	}
 
-	clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
+	clk->set_state_shutdown(clk);
 }
 
 static int arch_timer_cpu_notify(struct notifier_block *self,
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index e683377..29ea50a 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -107,26 +107,21 @@
 	writel(ctrl, gt_base + GT_CONTROL);
 }
 
-static void gt_clockevent_set_mode(enum clock_event_mode mode,
-				   struct clock_event_device *clk)
+static int gt_clockevent_shutdown(struct clock_event_device *evt)
 {
 	unsigned long ctrl;
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		gt_compare_set(DIV_ROUND_CLOSEST(gt_clk_rate, HZ), 1);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		ctrl = readl(gt_base + GT_CONTROL);
-		ctrl &= ~(GT_CONTROL_COMP_ENABLE |
-				GT_CONTROL_IRQ_ENABLE | GT_CONTROL_AUTO_INC);
-		writel(ctrl, gt_base + GT_CONTROL);
-		break;
-	default:
-		break;
-	}
+	ctrl = readl(gt_base + GT_CONTROL);
+	ctrl &= ~(GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE |
+		  GT_CONTROL_AUTO_INC);
+	writel(ctrl, gt_base + GT_CONTROL);
+	return 0;
+}
+
+static int gt_clockevent_set_periodic(struct clock_event_device *evt)
+{
+	gt_compare_set(DIV_ROUND_CLOSEST(gt_clk_rate, HZ), 1);
+	return 0;
 }
 
 static int gt_clockevent_set_next_event(unsigned long evt,
@@ -155,7 +150,7 @@
 	 *	the Global Timer flag _after_ having incremented
 	 *	the Comparator register	value to a higher value.
 	 */
-	if (evt->mode == CLOCK_EVT_MODE_ONESHOT)
+	if (clockevent_state_oneshot(evt))
 		gt_compare_set(ULONG_MAX, 0);
 
 	writel_relaxed(GT_INT_STATUS_EVENT_FLAG, gt_base + GT_INT_STATUS);
@@ -171,7 +166,9 @@
 	clk->name = "arm_global_timer";
 	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
 		CLOCK_EVT_FEAT_PERCPU;
-	clk->set_mode = gt_clockevent_set_mode;
+	clk->set_state_shutdown = gt_clockevent_shutdown;
+	clk->set_state_periodic = gt_clockevent_set_periodic;
+	clk->set_state_oneshot = gt_clockevent_shutdown;
 	clk->set_next_event = gt_clockevent_set_next_event;
 	clk->cpumask = cpumask_of(cpu);
 	clk->rating = 300;
@@ -184,7 +181,7 @@
 
 static void gt_clockevents_stop(struct clock_event_device *clk)
 {
-	gt_clockevent_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
+	gt_clockevent_shutdown(clk);
 	disable_percpu_irq(clk->irq);
 }
 
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 4c2ba59..217438d 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -120,38 +120,52 @@
 	return 0;
 }
 
-static void asm9260_timer_set_mode(enum clock_event_mode mode,
-				    struct clock_event_device *evt)
+static inline void __asm9260_timer_shutdown(struct clock_event_device *evt)
 {
 	/* stop timer0 */
 	writel_relaxed(BM_C0_EN, priv.base + HW_TCR + CLR_REG);
+}
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		/* disable reset and stop on match */
-		writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0),
-				priv.base + HW_MCR + CLR_REG);
-		/* configure match count for TC0 */
-		writel_relaxed(priv.ticks_per_jiffy, priv.base + HW_MR0);
-		/* enable TC0 */
-		writel_relaxed(BM_C0_EN, priv.base + HW_TCR + SET_REG);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* enable reset and stop on match */
-		writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0),
-				priv.base + HW_MCR + SET_REG);
-		break;
-	default:
-		break;
-	}
+static int asm9260_timer_shutdown(struct clock_event_device *evt)
+{
+	__asm9260_timer_shutdown(evt);
+	return 0;
+}
+
+static int asm9260_timer_set_oneshot(struct clock_event_device *evt)
+{
+	__asm9260_timer_shutdown(evt);
+
+	/* enable reset and stop on match */
+	writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0),
+		       priv.base + HW_MCR + SET_REG);
+	return 0;
+}
+
+static int asm9260_timer_set_periodic(struct clock_event_device *evt)
+{
+	__asm9260_timer_shutdown(evt);
+
+	/* disable reset and stop on match */
+	writel_relaxed(BM_MCR_RES_EN(0) | BM_MCR_STOP_EN(0),
+		       priv.base + HW_MCR + CLR_REG);
+	/* configure match count for TC0 */
+	writel_relaxed(priv.ticks_per_jiffy, priv.base + HW_MR0);
+	/* enable TC0 */
+	writel_relaxed(BM_C0_EN, priv.base + HW_TCR + SET_REG);
+	return 0;
 }
 
 static struct clock_event_device event_dev = {
-	.name		= DRIVER_NAME,
-	.rating		= 200,
-	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_next_event	= asm9260_timer_set_next_event,
-	.set_mode	= asm9260_timer_set_mode,
+	.name			= DRIVER_NAME,
+	.rating			= 200,
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_next_event		= asm9260_timer_set_next_event,
+	.set_state_shutdown	= asm9260_timer_shutdown,
+	.set_state_periodic	= asm9260_timer_set_periodic,
+	.set_state_oneshot	= asm9260_timer_set_oneshot,
+	.tick_resume		= asm9260_timer_shutdown,
 };
 
 static irqreturn_t asm9260_timer_interrupt(int irq, void *dev_id)
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 26ed331..6f28229 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -54,21 +54,6 @@
 	return readl_relaxed(system_clock);
 }
 
-static void bcm2835_time_set_mode(enum clock_event_mode mode,
-	struct clock_event_device *evt_dev)
-{
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	default:
-		WARN(1, "%s: unhandled event mode %d\n", __func__, mode);
-		break;
-	}
-}
-
 static int bcm2835_time_set_next_event(unsigned long event,
 	struct clock_event_device *evt_dev)
 {
@@ -129,7 +114,6 @@
 	timer->evt.name = node->name;
 	timer->evt.rating = 300;
 	timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
-	timer->evt.set_mode = bcm2835_time_set_mode;
 	timer->evt.set_next_event = bcm2835_time_set_next_event;
 	timer->evt.cpumask = cpumask_of(0);
 	timer->act.name = node->name;
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index f1e33d0..e717e87 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -127,25 +127,18 @@
 	return 0;
 }
 
-static void kona_timer_set_mode(enum clock_event_mode mode,
-			     struct clock_event_device *unused)
+static int kona_timer_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* by default mode is one shot don't do any thing */
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	default:
-		kona_timer_disable_and_clear(timers.tmr_regs);
-	}
+	kona_timer_disable_and_clear(timers.tmr_regs);
+	return 0;
 }
 
 static struct clock_event_device kona_clockevent_timer = {
 	.name = "timer 1",
 	.features = CLOCK_EVT_FEAT_ONESHOT,
 	.set_next_event = kona_timer_set_next_event,
-	.set_mode = kona_timer_set_mode
+	.set_state_shutdown = kona_timer_shutdown,
+	.tick_resume = kona_timer_shutdown,
 };
 
 static void __init kona_timer_clockevents_init(void)
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 510c8a1..9be6018 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -16,7 +16,6 @@
  */
 
 #include <linux/clk.h>
-#include <linux/clk-provider.h>
 #include <linux/interrupt.h>
 #include <linux/clockchips.h>
 #include <linux/of_address.h>
@@ -191,40 +190,42 @@
 }
 
 /**
- * ttc_set_mode - Sets the mode of timer
+ * ttc_set_{shutdown|oneshot|periodic} - Sets the state of timer
  *
- * @mode:	Mode to be set
  * @evt:	Address of clock event instance
  **/
-static void ttc_set_mode(enum clock_event_mode mode,
-					struct clock_event_device *evt)
+static int ttc_shutdown(struct clock_event_device *evt)
 {
 	struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
 	struct ttc_timer *timer = &ttce->ttc;
 	u32 ctrl_reg;
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		ttc_set_interval(timer, DIV_ROUND_CLOSEST(ttce->ttc.freq,
-						PRESCALE * HZ));
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		ctrl_reg = readl_relaxed(timer->base_addr +
-					TTC_CNT_CNTRL_OFFSET);
-		ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
-		writel_relaxed(ctrl_reg,
-				timer->base_addr + TTC_CNT_CNTRL_OFFSET);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		ctrl_reg = readl_relaxed(timer->base_addr +
-					TTC_CNT_CNTRL_OFFSET);
-		ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
-		writel_relaxed(ctrl_reg,
-				timer->base_addr + TTC_CNT_CNTRL_OFFSET);
-		break;
-	}
+	ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+	ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK;
+	writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+	return 0;
+}
+
+static int ttc_set_periodic(struct clock_event_device *evt)
+{
+	struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
+	struct ttc_timer *timer = &ttce->ttc;
+
+	ttc_set_interval(timer,
+			 DIV_ROUND_CLOSEST(ttce->ttc.freq, PRESCALE * HZ));
+	return 0;
+}
+
+static int ttc_resume(struct clock_event_device *evt)
+{
+	struct ttc_timer_clockevent *ttce = to_ttc_timer_clkevent(evt);
+	struct ttc_timer *timer = &ttce->ttc;
+	u32 ctrl_reg;
+
+	ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+	ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK;
+	writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET);
+	return 0;
 }
 
 static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
@@ -430,7 +431,10 @@
 	ttcce->ce.name = "ttc_clockevent";
 	ttcce->ce.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
 	ttcce->ce.set_next_event = ttc_set_next_event;
-	ttcce->ce.set_mode = ttc_set_mode;
+	ttcce->ce.set_state_shutdown = ttc_shutdown;
+	ttcce->ce.set_state_periodic = ttc_set_periodic;
+	ttcce->ce.set_state_oneshot = ttc_shutdown;
+	ttcce->ce.tick_resume = ttc_resume;
 	ttcce->ce.rating = 200;
 	ttcce->ce.irq = irq;
 	ttcce->ce.cpumask = cpu_possible_mask;
diff --git a/drivers/clocksource/clksrc_st_lpc.c b/drivers/clocksource/clksrc_st_lpc.c
new file mode 100644
index 0000000..65ec467
--- /dev/null
+++ b/drivers/clocksource/clksrc_st_lpc.c
@@ -0,0 +1,131 @@
+/*
+ * Clocksource using the Low Power Timer found in the Low Power Controller (LPC)
+ *
+ * Copyright (C) 2015 STMicroelectronics – All Rights Reserved
+ *
+ * Author(s): Francesco Virlinzi <francesco.virlinzi@st.com>
+ *	      Ajit Pal Singh <ajitpal.singh@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/sched_clock.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/mfd/st-lpc.h>
+
+/* Low Power Timer */
+#define LPC_LPT_LSB_OFF		0x400
+#define LPC_LPT_MSB_OFF		0x404
+#define LPC_LPT_START_OFF	0x408
+
+static struct st_clksrc_ddata {
+	struct clk		*clk;
+	void __iomem		*base;
+} ddata;
+
+static void __init st_clksrc_reset(void)
+{
+	writel_relaxed(0, ddata.base + LPC_LPT_START_OFF);
+	writel_relaxed(0, ddata.base + LPC_LPT_MSB_OFF);
+	writel_relaxed(0, ddata.base + LPC_LPT_LSB_OFF);
+	writel_relaxed(1, ddata.base + LPC_LPT_START_OFF);
+}
+
+static u64 notrace st_clksrc_sched_clock_read(void)
+{
+	return (u64)readl_relaxed(ddata.base + LPC_LPT_LSB_OFF);
+}
+
+static int __init st_clksrc_init(void)
+{
+	unsigned long rate;
+	int ret;
+
+	st_clksrc_reset();
+
+	rate = clk_get_rate(ddata.clk);
+
+	sched_clock_register(st_clksrc_sched_clock_read, 32, rate);
+
+	ret = clocksource_mmio_init(ddata.base + LPC_LPT_LSB_OFF,
+				    "clksrc-st-lpc", rate, 300, 32,
+				    clocksource_mmio_readl_up);
+	if (ret) {
+		pr_err("clksrc-st-lpc: Failed to register clocksource\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int __init st_clksrc_setup_clk(struct device_node *np)
+{
+	struct clk *clk;
+
+	clk = of_clk_get(np, 0);
+	if (IS_ERR(clk)) {
+		pr_err("clksrc-st-lpc: Failed to get LPC clock\n");
+		return PTR_ERR(clk);
+	}
+
+	if (clk_prepare_enable(clk)) {
+		pr_err("clksrc-st-lpc: Failed to enable LPC clock\n");
+		return -EINVAL;
+	}
+
+	if (!clk_get_rate(clk)) {
+		pr_err("clksrc-st-lpc: Failed to get LPC clock rate\n");
+		clk_disable_unprepare(clk);
+		return -EINVAL;
+	}
+
+	ddata.clk = clk;
+
+	return 0;
+}
+
+static void __init st_clksrc_of_register(struct device_node *np)
+{
+	int ret;
+	uint32_t mode;
+
+	ret = of_property_read_u32(np, "st,lpc-mode", &mode);
+	if (ret) {
+		pr_err("clksrc-st-lpc: An LPC mode must be provided\n");
+		return;
+	}
+
+	/* LPC can either run as a Clocksource or in RTC or WDT mode */
+	if (mode != ST_LPC_MODE_CLKSRC)
+		return;
+
+	ddata.base = of_iomap(np, 0);
+	if (!ddata.base) {
+		pr_err("clksrc-st-lpc: Unable to map iomem\n");
+		return;
+	}
+
+	if (st_clksrc_setup_clk(np)) {
+		iounmap(ddata.base);
+		return;
+	}
+
+	if (st_clksrc_init()) {
+		clk_disable_unprepare(ddata.clk);
+		clk_put(ddata.clk);
+		iounmap(ddata.base);
+		return;
+	}
+
+	pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n",
+		clk_get_rate(ddata.clk));
+}
+CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index d83ec1f..cdd86e3 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -61,11 +61,6 @@
 	return IRQ_HANDLED;
 }
 
-static void clps711x_clockevent_set_mode(enum clock_event_mode mode,
-					 struct clock_event_device *evt)
-{
-}
-
 static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
 					unsigned int irq)
 {
@@ -91,7 +86,6 @@
 	clkevt->name = "clps711x-clockevent";
 	clkevt->rating = 300;
 	clkevt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_C3STOP;
-	clkevt->set_mode = clps711x_clockevent_set_mode;
 	clkevt->cpumask = cpumask_of(0);
 	clockevents_config_and_register(clkevt, HZ, 0, 0);
 
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index db21052..9a7e37c 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -42,7 +42,6 @@
  *  256         128   .125            512.000
  */
 
-static unsigned int cs5535_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
 static struct cs5535_mfgpt_timer *cs5535_event_clock;
 
 /* Selected from the table above */
@@ -77,15 +76,17 @@
 			MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
 }
 
-static void mfgpt_set_mode(enum clock_event_mode mode,
-		struct clock_event_device *evt)
+static int mfgpt_shutdown(struct clock_event_device *evt)
 {
 	disable_timer(cs5535_event_clock);
+	return 0;
+}
 
-	if (mode == CLOCK_EVT_MODE_PERIODIC)
-		start_timer(cs5535_event_clock, MFGPT_PERIODIC);
-
-	cs5535_tick_mode = mode;
+static int mfgpt_set_periodic(struct clock_event_device *evt)
+{
+	disable_timer(cs5535_event_clock);
+	start_timer(cs5535_event_clock, MFGPT_PERIODIC);
+	return 0;
 }
 
 static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
@@ -97,7 +98,10 @@
 static struct clock_event_device cs5535_clockevent = {
 	.name = DRV_NAME,
 	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode = mfgpt_set_mode,
+	.set_state_shutdown = mfgpt_shutdown,
+	.set_state_periodic = mfgpt_set_periodic,
+	.set_state_oneshot = mfgpt_shutdown,
+	.tick_resume = mfgpt_shutdown,
 	.set_next_event = mfgpt_next_event,
 	.rating = 250,
 };
@@ -113,7 +117,7 @@
 	/* Turn off the clock (and clear the event) */
 	disable_timer(cs5535_event_clock);
 
-	if (cs5535_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
+	if (clockevent_state_shutdown(&cs5535_clockevent))
 		return IRQ_HANDLED;
 
 	/* Clear the counter */
@@ -121,7 +125,7 @@
 
 	/* Restart the clock in periodic mode */
 
-	if (cs5535_tick_mode == CLOCK_EVT_MODE_PERIODIC)
+	if (clockevent_state_periodic(&cs5535_clockevent))
 		cs5535_mfgpt_write(cs5535_event_clock, MFGPT_REG_SETUP,
 				MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
 
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c
index 3199060..776b6c8 100644
--- a/drivers/clocksource/dummy_timer.c
+++ b/drivers/clocksource/dummy_timer.c
@@ -16,15 +16,6 @@
 
 static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt);
 
-static void dummy_timer_set_mode(enum clock_event_mode mode,
-			   struct clock_event_device *evt)
-{
-	/*
-	 * Core clockevents code will call this when exchanging timer devices.
-	 * We don't need to do anything here.
-	 */
-}
-
 static void dummy_timer_setup(void)
 {
 	int cpu = smp_processor_id();
@@ -35,7 +26,6 @@
 			  CLOCK_EVT_FEAT_ONESHOT |
 			  CLOCK_EVT_FEAT_DUMMY;
 	evt->rating	= 100;
-	evt->set_mode	= dummy_timer_set_mode;
 	evt->cpumask	= cpumask_of(cpu);
 
 	clockevents_register_device(evt);
diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c
index 35a8809..c76c750 100644
--- a/drivers/clocksource/dw_apb_timer.c
+++ b/drivers/clocksource/dw_apb_timer.c
@@ -110,71 +110,87 @@
 	apbt_writel(timer, ctrl, APBTMR_N_CONTROL);
 }
 
-static void apbt_set_mode(enum clock_event_mode mode,
-			  struct clock_event_device *evt)
+static int apbt_shutdown(struct clock_event_device *evt)
 {
+	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
 	unsigned long ctrl;
-	unsigned long period;
+
+	pr_debug("%s CPU %d state=shutdown\n", __func__,
+		 cpumask_first(evt->cpumask));
+
+	ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
+	ctrl &= ~APBTMR_CONTROL_ENABLE;
+	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+	return 0;
+}
+
+static int apbt_set_oneshot(struct clock_event_device *evt)
+{
+	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
+	unsigned long ctrl;
+
+	pr_debug("%s CPU %d state=oneshot\n", __func__,
+		 cpumask_first(evt->cpumask));
+
+	ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
+	/*
+	 * set free running mode, this mode will let timer reload max
+	 * timeout which will give time (3min on 25MHz clock) to rearm
+	 * the next event, therefore emulate the one-shot mode.
+	 */
+	ctrl &= ~APBTMR_CONTROL_ENABLE;
+	ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
+
+	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+	/* write again to set free running mode */
+	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+
+	/*
+	 * DW APB p. 46, load counter with all 1s before starting free
+	 * running mode.
+	 */
+	apbt_writel(&dw_ced->timer, ~0, APBTMR_N_LOAD_COUNT);
+	ctrl &= ~APBTMR_CONTROL_INT;
+	ctrl |= APBTMR_CONTROL_ENABLE;
+	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+	return 0;
+}
+
+static int apbt_set_periodic(struct clock_event_device *evt)
+{
+	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
+	unsigned long period = DIV_ROUND_UP(dw_ced->timer.freq, HZ);
+	unsigned long ctrl;
+
+	pr_debug("%s CPU %d state=periodic\n", __func__,
+		 cpumask_first(evt->cpumask));
+
+	ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
+	ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
+	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+	/*
+	 * DW APB p. 46, have to disable timer before load counter,
+	 * may cause sync problem.
+	 */
+	ctrl &= ~APBTMR_CONTROL_ENABLE;
+	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+	udelay(1);
+	pr_debug("Setting clock period %lu for HZ %d\n", period, HZ);
+	apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT);
+	ctrl |= APBTMR_CONTROL_ENABLE;
+	apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
+	return 0;
+}
+
+static int apbt_resume(struct clock_event_device *evt)
+{
 	struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
 
-	pr_debug("%s CPU %d mode=%d\n", __func__,
-		 cpumask_first(evt->cpumask),
-		 mode);
+	pr_debug("%s CPU %d state=resume\n", __func__,
+		 cpumask_first(evt->cpumask));
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		period = DIV_ROUND_UP(dw_ced->timer.freq, HZ);
-		ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
-		ctrl |= APBTMR_CONTROL_MODE_PERIODIC;
-		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
-		/*
-		 * DW APB p. 46, have to disable timer before load counter,
-		 * may cause sync problem.
-		 */
-		ctrl &= ~APBTMR_CONTROL_ENABLE;
-		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
-		udelay(1);
-		pr_debug("Setting clock period %lu for HZ %d\n", period, HZ);
-		apbt_writel(&dw_ced->timer, period, APBTMR_N_LOAD_COUNT);
-		ctrl |= APBTMR_CONTROL_ENABLE;
-		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-		ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
-		/*
-		 * set free running mode, this mode will let timer reload max
-		 * timeout which will give time (3min on 25MHz clock) to rearm
-		 * the next event, therefore emulate the one-shot mode.
-		 */
-		ctrl &= ~APBTMR_CONTROL_ENABLE;
-		ctrl &= ~APBTMR_CONTROL_MODE_PERIODIC;
-
-		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
-		/* write again to set free running mode */
-		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
-
-		/*
-		 * DW APB p. 46, load counter with all 1s before starting free
-		 * running mode.
-		 */
-		apbt_writel(&dw_ced->timer, ~0, APBTMR_N_LOAD_COUNT);
-		ctrl &= ~APBTMR_CONTROL_INT;
-		ctrl |= APBTMR_CONTROL_ENABLE;
-		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
-		break;
-
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		ctrl = apbt_readl(&dw_ced->timer, APBTMR_N_CONTROL);
-		ctrl &= ~APBTMR_CONTROL_ENABLE;
-		apbt_writel(&dw_ced->timer, ctrl, APBTMR_N_CONTROL);
-		break;
-
-	case CLOCK_EVT_MODE_RESUME:
-		apbt_enable_int(&dw_ced->timer);
-		break;
-	}
+	apbt_enable_int(&dw_ced->timer);
+	return 0;
 }
 
 static int apbt_next_event(unsigned long delta,
@@ -232,8 +248,12 @@
 						       &dw_ced->ced);
 	dw_ced->ced.min_delta_ns = clockevent_delta2ns(5000, &dw_ced->ced);
 	dw_ced->ced.cpumask = cpumask_of(cpu);
-	dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
-	dw_ced->ced.set_mode = apbt_set_mode;
+	dw_ced->ced.features = CLOCK_EVT_FEAT_PERIODIC |
+				CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
+	dw_ced->ced.set_state_shutdown = apbt_shutdown;
+	dw_ced->ced.set_state_periodic = apbt_set_periodic;
+	dw_ced->ced.set_state_oneshot = apbt_set_oneshot;
+	dw_ced->ced.tick_resume = apbt_resume;
 	dw_ced->ced.set_next_event = apbt_next_event;
 	dw_ced->ced.irq = dw_ced->timer.irq;
 	dw_ced->ced.rating = rating;
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index dc3c6ee..7a97a34 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -251,33 +251,21 @@
 	return container_of(ced, struct em_sti_priv, ced);
 }
 
-static void em_sti_clock_event_mode(enum clock_event_mode mode,
-				    struct clock_event_device *ced)
+static int em_sti_clock_event_shutdown(struct clock_event_device *ced)
+{
+	struct em_sti_priv *p = ced_to_em_sti(ced);
+	em_sti_stop(p, USER_CLOCKEVENT);
+	return 0;
+}
+
+static int em_sti_clock_event_set_oneshot(struct clock_event_device *ced)
 {
 	struct em_sti_priv *p = ced_to_em_sti(ced);
 
-	/* deal with old setting first */
-	switch (ced->mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-		em_sti_stop(p, USER_CLOCKEVENT);
-		break;
-	default:
-		break;
-	}
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-		dev_info(&p->pdev->dev, "used for oneshot clock events\n");
-		em_sti_start(p, USER_CLOCKEVENT);
-		clockevents_config(&p->ced, p->rate);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		em_sti_stop(p, USER_CLOCKEVENT);
-		break;
-	default:
-		break;
-	}
+	dev_info(&p->pdev->dev, "used for oneshot clock events\n");
+	em_sti_start(p, USER_CLOCKEVENT);
+	clockevents_config(&p->ced, p->rate);
+	return 0;
 }
 
 static int em_sti_clock_event_next(unsigned long delta,
@@ -303,11 +291,12 @@
 	ced->rating = 200;
 	ced->cpumask = cpu_possible_mask;
 	ced->set_next_event = em_sti_clock_event_next;
-	ced->set_mode = em_sti_clock_event_mode;
+	ced->set_state_shutdown = em_sti_clock_event_shutdown;
+	ced->set_state_oneshot = em_sti_clock_event_set_oneshot;
 
 	dev_info(&p->pdev->dev, "used for clock events\n");
 
-	/* Register with dummy 1 Hz value, gets updated in ->set_mode() */
+	/* Register with dummy 1 Hz value, gets updated in ->set_state_oneshot() */
 	clockevents_config_and_register(ced, 1, 2, 0xffffffff);
 }
 
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 9064ff7..029f96a 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -257,15 +257,14 @@
 	exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
 }
 
-static void exynos4_mct_comp0_start(enum clock_event_mode mode,
-				    unsigned long cycles)
+static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles)
 {
 	unsigned int tcon;
 	cycle_t comp_cycle;
 
 	tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON);
 
-	if (mode == CLOCK_EVT_MODE_PERIODIC) {
+	if (periodic) {
 		tcon |= MCT_G_TCON_COMP0_AUTO_INC;
 		exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
 	}
@@ -283,38 +282,38 @@
 static int exynos4_comp_set_next_event(unsigned long cycles,
 				       struct clock_event_device *evt)
 {
-	exynos4_mct_comp0_start(evt->mode, cycles);
+	exynos4_mct_comp0_start(false, cycles);
 
 	return 0;
 }
 
-static void exynos4_comp_set_mode(enum clock_event_mode mode,
-				  struct clock_event_device *evt)
+static int mct_set_state_shutdown(struct clock_event_device *evt)
+{
+	exynos4_mct_comp0_stop();
+	return 0;
+}
+
+static int mct_set_state_periodic(struct clock_event_device *evt)
 {
 	unsigned long cycles_per_jiffy;
+
+	cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
+			    >> evt->shift);
 	exynos4_mct_comp0_stop();
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		cycles_per_jiffy =
-			(((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
-		exynos4_mct_comp0_start(mode, cycles_per_jiffy);
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	exynos4_mct_comp0_start(true, cycles_per_jiffy);
+	return 0;
 }
 
 static struct clock_event_device mct_comp_device = {
-	.name		= "mct-comp",
-	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.rating		= 250,
-	.set_next_event	= exynos4_comp_set_next_event,
-	.set_mode	= exynos4_comp_set_mode,
+	.name			= "mct-comp",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.rating			= 250,
+	.set_next_event		= exynos4_comp_set_next_event,
+	.set_state_periodic	= mct_set_state_periodic,
+	.set_state_shutdown	= mct_set_state_shutdown,
+	.set_state_oneshot	= mct_set_state_shutdown,
+	.tick_resume		= mct_set_state_shutdown,
 };
 
 static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
@@ -390,39 +389,32 @@
 	return 0;
 }
 
-static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
-					 struct clock_event_device *evt)
+static int set_state_shutdown(struct clock_event_device *evt)
+{
+	exynos4_mct_tick_stop(this_cpu_ptr(&percpu_mct_tick));
+	return 0;
+}
+
+static int set_state_periodic(struct clock_event_device *evt)
 {
 	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
 	unsigned long cycles_per_jiffy;
 
+	cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult)
+			    >> evt->shift);
 	exynos4_mct_tick_stop(mevt);
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		cycles_per_jiffy =
-			(((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
-		exynos4_mct_tick_start(cycles_per_jiffy, mevt);
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	exynos4_mct_tick_start(cycles_per_jiffy, mevt);
+	return 0;
 }
 
 static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
 {
-	struct clock_event_device *evt = &mevt->evt;
-
 	/*
 	 * This is for supporting oneshot mode.
 	 * Mct would generate interrupt periodically
 	 * without explicit stopping.
 	 */
-	if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
+	if (!clockevent_state_periodic(&mevt->evt))
 		exynos4_mct_tick_stop(mevt);
 
 	/* Clear the MCT tick interrupt */
@@ -442,20 +434,21 @@
 	return IRQ_HANDLED;
 }
 
-static int exynos4_local_timer_setup(struct clock_event_device *evt)
+static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
 {
-	struct mct_clock_event_device *mevt;
+	struct clock_event_device *evt = &mevt->evt;
 	unsigned int cpu = smp_processor_id();
 
-	mevt = container_of(evt, struct mct_clock_event_device, evt);
-
 	mevt->base = EXYNOS4_MCT_L_BASE(cpu);
 	snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
 
 	evt->name = mevt->name;
 	evt->cpumask = cpumask_of(cpu);
 	evt->set_next_event = exynos4_tick_set_next_event;
-	evt->set_mode = exynos4_tick_set_mode;
+	evt->set_state_periodic = set_state_periodic;
+	evt->set_state_shutdown = set_state_shutdown;
+	evt->set_state_oneshot = set_state_shutdown;
+	evt->tick_resume = set_state_shutdown;
 	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
 	evt->rating = 450;
 
@@ -477,9 +470,11 @@
 	return 0;
 }
 
-static void exynos4_local_timer_stop(struct clock_event_device *evt)
+static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
 {
-	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+	struct clock_event_device *evt = &mevt->evt;
+
+	evt->set_state_shutdown(evt);
 	if (mct_int_type == MCT_INT_SPI) {
 		if (evt->irq != -1)
 			disable_irq_nosync(evt->irq);
@@ -500,11 +495,11 @@
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_STARTING:
 		mevt = this_cpu_ptr(&percpu_mct_tick);
-		exynos4_local_timer_setup(&mevt->evt);
+		exynos4_local_timer_setup(mevt);
 		break;
 	case CPU_DYING:
 		mevt = this_cpu_ptr(&percpu_mct_tick);
-		exynos4_local_timer_stop(&mevt->evt);
+		exynos4_local_timer_stop(mevt);
 		break;
 	}
 
@@ -570,7 +565,7 @@
 		goto out_irq;
 
 	/* Immediately configure the timer on the boot CPU */
-	exynos4_local_timer_setup(&mevt->evt);
+	exynos4_local_timer_setup(mevt);
 	return;
 
 out_irq:
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c
index 454227d..ef43469 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/fsl_ftm_timer.c
@@ -153,19 +153,16 @@
 	return 0;
 }
 
-static void ftm_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int ftm_set_oneshot(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		ftm_set_next_event(priv->periodic_cyc, evt);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		ftm_counter_disable(priv->clkevt_base);
-		break;
-	default:
-		return;
-	}
+	ftm_counter_disable(priv->clkevt_base);
+	return 0;
+}
+
+static int ftm_set_periodic(struct clock_event_device *evt)
+{
+	ftm_set_next_event(priv->periodic_cyc, evt);
+	return 0;
 }
 
 static irqreturn_t ftm_evt_interrupt(int irq, void *dev_id)
@@ -174,7 +171,7 @@
 
 	ftm_irq_acknowledge(priv->clkevt_base);
 
-	if (likely(evt->mode == CLOCK_EVT_MODE_ONESHOT)) {
+	if (likely(clockevent_state_oneshot(evt))) {
 		ftm_irq_disable(priv->clkevt_base);
 		ftm_counter_disable(priv->clkevt_base);
 	}
@@ -185,11 +182,13 @@
 }
 
 static struct clock_event_device ftm_clockevent = {
-	.name		= "Freescale ftm timer",
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode	= ftm_set_mode,
-	.set_next_event	= ftm_set_next_event,
-	.rating		= 300,
+	.name			= "Freescale ftm timer",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_periodic	= ftm_set_periodic,
+	.set_state_oneshot	= ftm_set_oneshot,
+	.set_next_event		= ftm_set_next_event,
+	.rating			= 300,
 };
 
 static struct irqaction ftm_timer_irq = {
diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c
index 0214cb3..f9b3b70 100644
--- a/drivers/clocksource/h8300_timer8.c
+++ b/drivers/clocksource/h8300_timer8.c
@@ -81,7 +81,7 @@
 	p->flags |= FLAG_IRQCONTEXT;
 	ctrl_outw(p->tcora, p->mapbase + TCORA);
 	if (!(p->flags & FLAG_SKIPEVENT)) {
-		if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT)
+		if (clockevent_state_oneshot(&p->ced))
 			ctrl_outw(0x0000, p->mapbase + _8TCR);
 		p->ced.event_handler(&p->ced);
 	}
@@ -169,29 +169,32 @@
 	timer8_set_next(p, periodic?(p->rate + HZ/2) / HZ:0x10000);
 }
 
-static void timer8_clock_event_mode(enum clock_event_mode mode,
-				    struct clock_event_device *ced)
+static int timer8_clock_event_shutdown(struct clock_event_device *ced)
+{
+	timer8_stop(ced_to_priv(ced));
+	return 0;
+}
+
+static int timer8_clock_event_periodic(struct clock_event_device *ced)
 {
 	struct timer8_priv *p = ced_to_priv(ced);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		dev_info(&p->pdev->dev, "used for periodic clock events\n");
-		timer8_stop(p);
-		timer8_clock_event_start(p, PERIODIC);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		dev_info(&p->pdev->dev, "used for oneshot clock events\n");
-		timer8_stop(p);
-		timer8_clock_event_start(p, ONESHOT);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		timer8_stop(p);
-		break;
-	default:
-		break;
-	}
+	dev_info(&p->pdev->dev, "used for periodic clock events\n");
+	timer8_stop(p);
+	timer8_clock_event_start(p, PERIODIC);
+
+	return 0;
+}
+
+static int timer8_clock_event_oneshot(struct clock_event_device *ced)
+{
+	struct timer8_priv *p = ced_to_priv(ced);
+
+	dev_info(&p->pdev->dev, "used for oneshot clock events\n");
+	timer8_stop(p);
+	timer8_clock_event_start(p, ONESHOT);
+
+	return 0;
 }
 
 static int timer8_clock_event_next(unsigned long delta,
@@ -199,7 +202,7 @@
 {
 	struct timer8_priv *p = ced_to_priv(ced);
 
-	BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
+	BUG_ON(!clockevent_state_oneshot(ced));
 	timer8_set_next(p, delta - 1);
 
 	return 0;
@@ -246,7 +249,9 @@
 	p->ced.rating = 200;
 	p->ced.cpumask = cpumask_of(0);
 	p->ced.set_next_event = timer8_clock_event_next;
-	p->ced.set_mode = timer8_clock_event_mode;
+	p->ced.set_state_shutdown = timer8_clock_event_shutdown;
+	p->ced.set_state_periodic = timer8_clock_event_periodic;
+	p->ced.set_state_oneshot = timer8_clock_event_oneshot;
 
 	ret = setup_irq(irq, &p->irqaction);
 	if (ret < 0) {
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 14ee3ef..0efd36e 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -100,44 +100,40 @@
 #endif
 
 #ifdef CONFIG_CLKEVT_I8253
-/*
- * Initialize the PIT timer.
- *
- * This is also called after resume to bring the PIT into operation again.
- */
-static void init_pit_timer(enum clock_event_mode mode,
-			   struct clock_event_device *evt)
+static int pit_shutdown(struct clock_event_device *evt)
+{
+	if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt))
+		return 0;
+
+	raw_spin_lock(&i8253_lock);
+
+	outb_p(0x30, PIT_MODE);
+	outb_p(0, PIT_CH0);
+	outb_p(0, PIT_CH0);
+
+	raw_spin_unlock(&i8253_lock);
+	return 0;
+}
+
+static int pit_set_oneshot(struct clock_event_device *evt)
+{
+	raw_spin_lock(&i8253_lock);
+	outb_p(0x38, PIT_MODE);
+	raw_spin_unlock(&i8253_lock);
+	return 0;
+}
+
+static int pit_set_periodic(struct clock_event_device *evt)
 {
 	raw_spin_lock(&i8253_lock);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		/* binary, mode 2, LSB/MSB, ch 0 */
-		outb_p(0x34, PIT_MODE);
-		outb_p(PIT_LATCH & 0xff , PIT_CH0);	/* LSB */
-		outb_p(PIT_LATCH >> 8 , PIT_CH0);		/* MSB */
-		break;
+	/* binary, mode 2, LSB/MSB, ch 0 */
+	outb_p(0x34, PIT_MODE);
+	outb_p(PIT_LATCH & 0xff, PIT_CH0);	/* LSB */
+	outb_p(PIT_LATCH >> 8, PIT_CH0);	/* MSB */
 
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
-		    evt->mode == CLOCK_EVT_MODE_ONESHOT) {
-			outb_p(0x30, PIT_MODE);
-			outb_p(0, PIT_CH0);
-			outb_p(0, PIT_CH0);
-		}
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* One shot setup */
-		outb_p(0x38, PIT_MODE);
-		break;
-
-	case CLOCK_EVT_MODE_RESUME:
-		/* Nothing to do here */
-		break;
-	}
 	raw_spin_unlock(&i8253_lock);
+	return 0;
 }
 
 /*
@@ -160,10 +156,11 @@
  * it can be solely used for the global tick.
  */
 struct clock_event_device i8253_clockevent = {
-	.name		= "pit",
-	.features	= CLOCK_EVT_FEAT_PERIODIC,
-	.set_mode	= init_pit_timer,
-	.set_next_event = pit_next_event,
+	.name			= "pit",
+	.features		= CLOCK_EVT_FEAT_PERIODIC,
+	.set_state_shutdown	= pit_shutdown,
+	.set_state_periodic	= pit_set_periodic,
+	.set_next_event		= pit_next_event,
 };
 
 /*
@@ -172,8 +169,10 @@
  */
 void __init clockevent_i8253_init(bool oneshot)
 {
-	if (oneshot)
+	if (oneshot) {
 		i8253_clockevent.features |= CLOCK_EVT_FEAT_ONESHOT;
+		i8253_clockevent.set_state_oneshot = pit_set_oneshot;
+	}
 	/*
 	 * Start pit with the boot cpu mask. x86 might make it global
 	 * when it is used as broadcast device later.
diff --git a/drivers/clocksource/meson6_timer.c b/drivers/clocksource/meson6_timer.c
index 5c15cba..1fa22c4 100644
--- a/drivers/clocksource/meson6_timer.c
+++ b/drivers/clocksource/meson6_timer.c
@@ -67,25 +67,25 @@
 	writel(val | TIMER_ENABLE_BIT(timer), timer_base + TIMER_ISA_MUX);
 }
 
-static void meson6_clkevt_mode(enum clock_event_mode mode,
-			       struct clock_event_device *clk)
+static int meson6_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		meson6_clkevt_time_stop(CED_ID);
-		meson6_clkevt_time_setup(CED_ID, USEC_PER_SEC/HZ - 1);
-		meson6_clkevt_time_start(CED_ID, true);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		meson6_clkevt_time_stop(CED_ID);
-		meson6_clkevt_time_start(CED_ID, false);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	default:
-		meson6_clkevt_time_stop(CED_ID);
-		break;
-	}
+	meson6_clkevt_time_stop(CED_ID);
+	return 0;
+}
+
+static int meson6_set_oneshot(struct clock_event_device *evt)
+{
+	meson6_clkevt_time_stop(CED_ID);
+	meson6_clkevt_time_start(CED_ID, false);
+	return 0;
+}
+
+static int meson6_set_periodic(struct clock_event_device *evt)
+{
+	meson6_clkevt_time_stop(CED_ID);
+	meson6_clkevt_time_setup(CED_ID, USEC_PER_SEC / HZ - 1);
+	meson6_clkevt_time_start(CED_ID, true);
+	return 0;
 }
 
 static int meson6_clkevt_next_event(unsigned long evt,
@@ -99,11 +99,15 @@
 }
 
 static struct clock_event_device meson6_clockevent = {
-	.name		= "meson6_tick",
-	.rating		= 400,
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode	= meson6_clkevt_mode,
-	.set_next_event	= meson6_clkevt_next_event,
+	.name			= "meson6_tick",
+	.rating			= 400,
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_shutdown	= meson6_shutdown,
+	.set_state_periodic	= meson6_set_periodic,
+	.set_state_oneshot	= meson6_set_oneshot,
+	.tick_resume		= meson6_shutdown,
+	.set_next_event		= meson6_clkevt_next_event,
 };
 
 static irqreturn_t meson6_timer_interrupt(int irq, void *dev_id)
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
index b7384b8..bcd5c0d 100644
--- a/drivers/clocksource/metag_generic.c
+++ b/drivers/clocksource/metag_generic.c
@@ -56,25 +56,6 @@
 	return 0;
 }
 
-static void metag_timer_set_mode(enum clock_event_mode mode,
-				 struct clock_event_device *evt)
-{
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		/* We should disable the IRQ here */
-		break;
-
-	case CLOCK_EVT_MODE_PERIODIC:
-	case CLOCK_EVT_MODE_UNUSED:
-		WARN_ON(1);
-		break;
-	};
-}
-
 static cycle_t metag_clocksource_read(struct clocksource *cs)
 {
 	return __core_reg_get(TXTIMER);
@@ -129,7 +110,6 @@
 	clk->rating = 200,
 	clk->shift = 12,
 	clk->irq = tbisig_map(TBID_SIGNUM_TRT),
-	clk->set_mode = metag_timer_set_mode,
 	clk->set_next_event = metag_timer_set_next_event,
 
 	clk->mult = div_sc(hwtimer_freq, NSEC_PER_SEC, clk->shift);
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index b81ed1a..02a1945 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -33,12 +33,6 @@
 	return res;
 }
 
-static void gic_set_clock_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
-{
-	/* Nothing to do ...  */
-}
-
 static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
 {
 	struct clock_event_device *cd = dev_id;
@@ -67,7 +61,6 @@
 	cd->irq			= gic_timer_irq;
 	cd->cpumask		= cpumask_of(cpu);
 	cd->set_next_event	= gic_next_event;
-	cd->set_mode		= gic_set_clock_mode;
 
 	clockevents_config_and_register(cd, gic_frequency, 0x300, 0x7fffffff);
 
@@ -79,6 +72,13 @@
 	disable_percpu_irq(gic_timer_irq);
 }
 
+static void gic_update_frequency(void *data)
+{
+	unsigned long rate = (unsigned long)data;
+
+	clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate);
+}
+
 static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action,
 				void *data)
 {
@@ -94,18 +94,40 @@
 	return NOTIFY_OK;
 }
 
+static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
+			    void *data)
+{
+	struct clk_notifier_data *cnd = data;
+
+	if (action == POST_RATE_CHANGE)
+		on_each_cpu(gic_update_frequency, (void *)cnd->new_rate, 1);
+
+	return NOTIFY_OK;
+}
+
+
 static struct notifier_block gic_cpu_nb = {
 	.notifier_call = gic_cpu_notifier,
 };
 
+static struct notifier_block gic_clk_nb = {
+	.notifier_call = gic_clk_notifier,
+};
+
 static int gic_clockevent_init(void)
 {
+	int ret;
+
 	if (!cpu_has_counter || !gic_frequency)
 		return -ENXIO;
 
-	setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
+	ret = setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
+	if (ret < 0)
+		return ret;
 
-	register_cpu_notifier(&gic_cpu_nb);
+	ret = register_cpu_notifier(&gic_cpu_nb);
+	if (ret < 0)
+		pr_warn("GIC: Unable to register CPU notifier\n");
 
 	gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
 
@@ -125,18 +147,17 @@
 
 static void __init __gic_clocksource_init(void)
 {
+	int ret;
+
 	/* Set clocksource mask. */
 	gic_clocksource.mask = CLOCKSOURCE_MASK(gic_get_count_width());
 
 	/* Calculate a somewhat reasonable rating value. */
 	gic_clocksource.rating = 200 + gic_frequency / 10000000;
 
-	clocksource_register_hz(&gic_clocksource, gic_frequency);
-
-	gic_clockevent_init();
-
-	/* And finally start the counter */
-	gic_start_count();
+	ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
+	if (ret < 0)
+		pr_warn("GIC: Unable to register clocksource\n");
 }
 
 void __init gic_clocksource_init(unsigned int frequency)
@@ -146,11 +167,16 @@
 		GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_COMPARE);
 
 	__gic_clocksource_init();
+	gic_clockevent_init();
+
+	/* And finally start the counter */
+	gic_start_count();
 }
 
 static void __init gic_clocksource_of_init(struct device_node *node)
 {
 	struct clk *clk;
+	int ret;
 
 	if (WARN_ON(!gic_present || !node->parent ||
 		    !of_device_is_compatible(node->parent, "mti,gic")))
@@ -158,8 +184,13 @@
 
 	clk = of_clk_get(node, 0);
 	if (!IS_ERR(clk)) {
+		if (clk_prepare_enable(clk) < 0) {
+			pr_err("GIC failed to enable clock\n");
+			clk_put(clk);
+			return;
+		}
+
 		gic_frequency = clk_get_rate(clk);
-		clk_put(clk);
 	} else if (of_property_read_u32(node, "clock-frequency",
 					&gic_frequency)) {
 		pr_err("GIC frequency not specified.\n");
@@ -172,6 +203,15 @@
 	}
 
 	__gic_clocksource_init();
+
+	ret = gic_clockevent_init();
+	if (!ret && !IS_ERR(clk)) {
+		if (clk_notifier_register(clk, &gic_clk_nb) < 0)
+			pr_warn("GIC: Unable to register clock notifier\n");
+	}
+
+	/* And finally start the counter */
+	gic_start_count();
 }
 CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
 		       gic_clocksource_of_init);
diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c
index 5eb2c35..19857af 100644
--- a/drivers/clocksource/moxart_timer.c
+++ b/drivers/clocksource/moxart_timer.c
@@ -58,25 +58,24 @@
 static void __iomem *base;
 static unsigned int clock_count_per_tick;
 
-static void moxart_clkevt_mode(enum clock_event_mode mode,
-			       struct clock_event_device *clk)
+static int moxart_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_RESUME:
-	case CLOCK_EVT_MODE_ONESHOT:
-		writel(TIMER1_DISABLE, base + TIMER_CR);
-		writel(~0, base + TIMER1_BASE + REG_LOAD);
-		break;
-	case CLOCK_EVT_MODE_PERIODIC:
-		writel(clock_count_per_tick, base + TIMER1_BASE + REG_LOAD);
-		writel(TIMER1_ENABLE, base + TIMER_CR);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	default:
-		writel(TIMER1_DISABLE, base + TIMER_CR);
-		break;
-	}
+	writel(TIMER1_DISABLE, base + TIMER_CR);
+	return 0;
+}
+
+static int moxart_set_oneshot(struct clock_event_device *evt)
+{
+	writel(TIMER1_DISABLE, base + TIMER_CR);
+	writel(~0, base + TIMER1_BASE + REG_LOAD);
+	return 0;
+}
+
+static int moxart_set_periodic(struct clock_event_device *evt)
+{
+	writel(clock_count_per_tick, base + TIMER1_BASE + REG_LOAD);
+	writel(TIMER1_ENABLE, base + TIMER_CR);
+	return 0;
 }
 
 static int moxart_clkevt_next_event(unsigned long cycles,
@@ -95,11 +94,15 @@
 }
 
 static struct clock_event_device moxart_clockevent = {
-	.name		= "moxart_timer",
-	.rating		= 200,
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode	= moxart_clkevt_mode,
-	.set_next_event	= moxart_clkevt_next_event,
+	.name			= "moxart_timer",
+	.rating			= 200,
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_shutdown	= moxart_shutdown,
+	.set_state_periodic	= moxart_set_periodic,
+	.set_state_oneshot	= moxart_set_oneshot,
+	.tick_resume		= moxart_set_oneshot,
+	.set_next_event		= moxart_clkevt_next_event,
 };
 
 static irqreturn_t moxart_timer_interrupt(int irq, void *dev_id)
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c
index 68ab423..50f0641 100644
--- a/drivers/clocksource/mtk_timer.c
+++ b/drivers/clocksource/mtk_timer.c
@@ -102,27 +102,20 @@
 	       evt->gpt_base + TIMER_CTRL_REG(timer));
 }
 
-static void mtk_clkevt_mode(enum clock_event_mode mode,
-				struct clock_event_device *clk)
+static int mtk_clkevt_shutdown(struct clock_event_device *clk)
+{
+	mtk_clkevt_time_stop(to_mtk_clk(clk), GPT_CLK_EVT);
+	return 0;
+}
+
+static int mtk_clkevt_set_periodic(struct clock_event_device *clk)
 {
 	struct mtk_clock_event_device *evt = to_mtk_clk(clk);
 
 	mtk_clkevt_time_stop(evt, GPT_CLK_EVT);
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
-		mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* Timer is enabled in set_next_event */
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	default:
-		/* No more interrupts will occur as source is disabled */
-		break;
-	}
+	mtk_clkevt_time_setup(evt, evt->ticks_per_jiffy, GPT_CLK_EVT);
+	mtk_clkevt_time_start(evt, true, GPT_CLK_EVT);
+	return 0;
 }
 
 static int mtk_clkevt_next_event(unsigned long event,
@@ -196,7 +189,10 @@
 	evt->dev.name = "mtk_tick";
 	evt->dev.rating = 300;
 	evt->dev.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
-	evt->dev.set_mode = mtk_clkevt_mode;
+	evt->dev.set_state_shutdown = mtk_clkevt_shutdown;
+	evt->dev.set_state_periodic = mtk_clkevt_set_periodic;
+	evt->dev.set_state_oneshot = mtk_clkevt_shutdown;
+	evt->dev.tick_resume = mtk_clkevt_shutdown;
 	evt->dev.set_next_event = mtk_clkevt_next_event;
 	evt->dev.cpumask = cpu_possible_mask;
 
diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
index 445b68a..f5ce296 100644
--- a/drivers/clocksource/mxs_timer.c
+++ b/drivers/clocksource/mxs_timer.c
@@ -77,7 +77,6 @@
 #define BV_TIMROTv2_TIMCTRLn_SELECT__TICK_ALWAYS	0xf
 
 static struct clock_event_device mxs_clockevent_device;
-static enum clock_event_mode mxs_clockevent_mode = CLOCK_EVT_MODE_UNUSED;
 
 static void __iomem *mxs_timrot_base;
 static u32 timrot_major_version;
@@ -141,64 +140,49 @@
 	.handler	= mxs_timer_interrupt,
 };
 
-#ifdef DEBUG
-static const char *clock_event_mode_label[] const = {
-	[CLOCK_EVT_MODE_PERIODIC] = "CLOCK_EVT_MODE_PERIODIC",
-	[CLOCK_EVT_MODE_ONESHOT]  = "CLOCK_EVT_MODE_ONESHOT",
-	[CLOCK_EVT_MODE_SHUTDOWN] = "CLOCK_EVT_MODE_SHUTDOWN",
-	[CLOCK_EVT_MODE_UNUSED]   = "CLOCK_EVT_MODE_UNUSED"
-};
-#endif /* DEBUG */
-
-static void mxs_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static void mxs_irq_clear(char *state)
 {
 	/* Disable interrupt in timer module */
 	timrot_irq_disable();
 
-	if (mode != mxs_clockevent_mode) {
-		/* Set event time into the furthest future */
-		if (timrot_is_v1())
-			__raw_writel(0xffff,
-				mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1));
-		else
-			__raw_writel(0xffffffff,
-				mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
+	/* Set event time into the furthest future */
+	if (timrot_is_v1())
+		__raw_writel(0xffff, mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1));
+	else
+		__raw_writel(0xffffffff,
+			     mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
 
-		/* Clear pending interrupt */
-		timrot_irq_acknowledge();
-	}
+	/* Clear pending interrupt */
+	timrot_irq_acknowledge();
 
 #ifdef DEBUG
-	pr_info("%s: changing mode from %s to %s\n", __func__,
-		clock_event_mode_label[mxs_clockevent_mode],
-		clock_event_mode_label[mode]);
+	pr_info("%s: changing mode to %s\n", __func__, state)
 #endif /* DEBUG */
+}
 
-	/* Remember timer mode */
-	mxs_clockevent_mode = mode;
+static int mxs_shutdown(struct clock_event_device *evt)
+{
+	mxs_irq_clear("shutdown");
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		pr_err("%s: Periodic mode is not implemented\n", __func__);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		timrot_irq_enable();
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_RESUME:
-		/* Left event sources disabled, no more interrupts appear */
-		break;
-	}
+	return 0;
+}
+
+static int mxs_set_oneshot(struct clock_event_device *evt)
+{
+	if (clockevent_state_oneshot(evt))
+		mxs_irq_clear("oneshot");
+	timrot_irq_enable();
+	return 0;
 }
 
 static struct clock_event_device mxs_clockevent_device = {
-	.name		= "mxs_timrot",
-	.features	= CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode	= mxs_set_mode,
-	.set_next_event	= timrotv2_set_next_event,
-	.rating		= 200,
+	.name			= "mxs_timrot",
+	.features		= CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_shutdown	= mxs_shutdown,
+	.set_state_oneshot	= mxs_set_oneshot,
+	.tick_resume		= mxs_shutdown,
+	.set_next_event		= timrotv2_set_next_event,
+	.rating			= 200,
 };
 
 static int __init mxs_clockevent_init(struct clk *timer_clk)
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index a709cfa..bc8dd44 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -119,28 +119,27 @@
 	}
 }
 
-static void nmdk_clkevt_mode(enum clock_event_mode mode,
-			     struct clock_event_device *dev)
+static int nmdk_clkevt_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		clkevt_periodic = true;
-		nmdk_clkevt_reset();
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		clkevt_periodic = false;
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		writel(0, mtu_base + MTU_IMSC);
-		/* disable timer */
-		writel(0, mtu_base + MTU_CR(1));
-		/* load some high default value */
-		writel(0xffffffff, mtu_base + MTU_LR(1));
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	writel(0, mtu_base + MTU_IMSC);
+	/* disable timer */
+	writel(0, mtu_base + MTU_CR(1));
+	/* load some high default value */
+	writel(0xffffffff, mtu_base + MTU_LR(1));
+	return 0;
+}
+
+static int nmdk_clkevt_set_oneshot(struct clock_event_device *evt)
+{
+	clkevt_periodic = false;
+	return 0;
+}
+
+static int nmdk_clkevt_set_periodic(struct clock_event_device *evt)
+{
+	clkevt_periodic = true;
+	nmdk_clkevt_reset();
+	return 0;
 }
 
 static void nmdk_clksrc_reset(void)
@@ -163,13 +162,16 @@
 }
 
 static struct clock_event_device nmdk_clkevt = {
-	.name		= "mtu_1",
-	.features	= CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC |
-	                  CLOCK_EVT_FEAT_DYNIRQ,
-	.rating		= 200,
-	.set_mode	= nmdk_clkevt_mode,
-	.set_next_event	= nmdk_clkevt_next,
-	.resume		= nmdk_clkevt_resume,
+	.name			= "mtu_1",
+	.features		= CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_DYNIRQ,
+	.rating			= 200,
+	.set_state_shutdown	= nmdk_clkevt_shutdown,
+	.set_state_periodic	= nmdk_clkevt_set_periodic,
+	.set_state_oneshot	= nmdk_clkevt_set_oneshot,
+	.set_next_event		= nmdk_clkevt_next,
+	.resume			= nmdk_clkevt_resume,
 };
 
 /*
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index d9438af..45b6a49 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -88,26 +88,12 @@
 	return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
 }
 
-static void
-pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev)
+static int pxa_osmr0_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-		timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
-		timer_writel(OSSR_M0, OSSR);
-		break;
-
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		/* initializing, released, or preparing for suspend */
-		timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
-		timer_writel(OSSR_M0, OSSR);
-		break;
-
-	case CLOCK_EVT_MODE_RESUME:
-	case CLOCK_EVT_MODE_PERIODIC:
-		break;
-	}
+	/* initializing, released, or preparing for suspend */
+	timer_writel(timer_readl(OIER) & ~OIER_E0, OIER);
+	timer_writel(OSSR_M0, OSSR);
+	return 0;
 }
 
 #ifdef CONFIG_PM
@@ -147,13 +133,14 @@
 #endif
 
 static struct clock_event_device ckevt_pxa_osmr0 = {
-	.name		= "osmr0",
-	.features	= CLOCK_EVT_FEAT_ONESHOT,
-	.rating		= 200,
-	.set_next_event	= pxa_osmr0_set_next_event,
-	.set_mode	= pxa_osmr0_set_mode,
-	.suspend	= pxa_timer_suspend,
-	.resume		= pxa_timer_resume,
+	.name			= "osmr0",
+	.features		= CLOCK_EVT_FEAT_ONESHOT,
+	.rating			= 200,
+	.set_next_event		= pxa_osmr0_set_next_event,
+	.set_state_shutdown	= pxa_osmr0_shutdown,
+	.set_state_oneshot	= pxa_osmr0_shutdown,
+	.suspend		= pxa_timer_suspend,
+	.resume			= pxa_timer_resume,
 };
 
 static struct irqaction pxa_ost0_irq = {
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index cba2d01..f8e09f9 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -47,7 +47,7 @@
 {
 	struct clock_event_device *evt = dev_id;
 	/* Stop the timer tick */
-	if (evt->mode == CLOCK_EVT_MODE_ONESHOT) {
+	if (clockevent_state_oneshot(evt)) {
 		u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
 		ctrl &= ~TIMER_ENABLE_EN;
 		writel_relaxed(ctrl, event_base + TIMER_ENABLE);
@@ -75,26 +75,14 @@
 	return 0;
 }
 
-static void msm_timer_set_mode(enum clock_event_mode mode,
-			      struct clock_event_device *evt)
+static int msm_timer_shutdown(struct clock_event_device *evt)
 {
 	u32 ctrl;
 
 	ctrl = readl_relaxed(event_base + TIMER_ENABLE);
 	ctrl &= ~(TIMER_ENABLE_EN | TIMER_ENABLE_CLR_ON_MATCH_EN);
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_RESUME:
-	case CLOCK_EVT_MODE_PERIODIC:
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* Timer is enabled in set_next_event */
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		break;
-	}
 	writel_relaxed(ctrl, event_base + TIMER_ENABLE);
+	return 0;
 }
 
 static struct clock_event_device __percpu *msm_evt;
@@ -126,7 +114,9 @@
 	evt->name = "msm_timer";
 	evt->features = CLOCK_EVT_FEAT_ONESHOT;
 	evt->rating = 200;
-	evt->set_mode = msm_timer_set_mode;
+	evt->set_state_shutdown = msm_timer_shutdown;
+	evt->set_state_oneshot = msm_timer_shutdown;
+	evt->tick_resume = msm_timer_shutdown;
 	evt->set_next_event = msm_timer_set_next_event;
 	evt->cpumask = cpumask_of(cpu);
 
@@ -147,7 +137,7 @@
 
 static void msm_local_timer_stop(struct clock_event_device *evt)
 {
-	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+	evt->set_state_shutdown(evt);
 	disable_percpu_irq(evt->irq);
 }
 
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index a35993b..bb2c2b0 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -82,23 +82,18 @@
 	return 0;
 }
 
-static inline void rk_timer_set_mode(enum clock_event_mode mode,
-				     struct clock_event_device *ce)
+static int rk_timer_shutdown(struct clock_event_device *ce)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		rk_timer_disable(ce);
-		rk_timer_update_counter(rk_timer(ce)->freq / HZ - 1, ce);
-		rk_timer_enable(ce, TIMER_MODE_FREE_RUNNING);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		rk_timer_disable(ce);
-		break;
-	}
+	rk_timer_disable(ce);
+	return 0;
+}
+
+static int rk_timer_set_periodic(struct clock_event_device *ce)
+{
+	rk_timer_disable(ce);
+	rk_timer_update_counter(rk_timer(ce)->freq / HZ - 1, ce);
+	rk_timer_enable(ce, TIMER_MODE_FREE_RUNNING);
+	return 0;
 }
 
 static irqreturn_t rk_timer_interrupt(int irq, void *dev_id)
@@ -107,7 +102,7 @@
 
 	rk_timer_interrupt_clear(ce);
 
-	if (ce->mode == CLOCK_EVT_MODE_ONESHOT)
+	if (clockevent_state_oneshot(ce))
 		rk_timer_disable(ce);
 
 	ce->event_handler(ce);
@@ -161,7 +156,8 @@
 	ce->name = TIMER_NAME;
 	ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
 	ce->set_next_event = rk_timer_set_next_event;
-	ce->set_mode = rk_timer_set_mode;
+	ce->set_state_shutdown = rk_timer_shutdown;
+	ce->set_state_periodic = rk_timer_set_periodic;
 	ce->irq = irq;
 	ce->cpumask = cpumask_of(0);
 	ce->rating = 250;
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index 5645cfc..bc90e13 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -207,25 +207,18 @@
 	return 0;
 }
 
-static void samsung_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int samsung_shutdown(struct clock_event_device *evt)
 {
 	samsung_time_stop(pwm.event_id);
+	return 0;
+}
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1);
-		samsung_time_start(pwm.event_id, true);
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-		break;
-
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+static int samsung_set_periodic(struct clock_event_device *evt)
+{
+	samsung_time_stop(pwm.event_id);
+	samsung_time_setup(pwm.event_id, pwm.clock_count_per_tick - 1);
+	samsung_time_start(pwm.event_id, true);
+	return 0;
 }
 
 static void samsung_clockevent_resume(struct clock_event_device *cev)
@@ -240,12 +233,16 @@
 }
 
 static struct clock_event_device time_event_device = {
-	.name		= "samsung_event_timer",
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.rating		= 200,
-	.set_next_event	= samsung_set_next_event,
-	.set_mode	= samsung_set_mode,
-	.resume		= samsung_clockevent_resume,
+	.name			= "samsung_event_timer",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.rating			= 200,
+	.set_next_event		= samsung_set_next_event,
+	.set_state_shutdown	= samsung_shutdown,
+	.set_state_periodic	= samsung_set_periodic,
+	.set_state_oneshot	= samsung_shutdown,
+	.tick_resume		= samsung_shutdown,
+	.resume			= samsung_clockevent_resume,
 };
 
 static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index c96de14..ba73a6e 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -538,7 +538,7 @@
 
 	if (ch->flags & FLAG_CLOCKEVENT) {
 		if (!(ch->flags & FLAG_SKIPEVENT)) {
-			if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
+			if (clockevent_state_oneshot(&ch->ced)) {
 				ch->next_match_value = ch->max_match_value;
 				ch->flags |= FLAG_REPROGRAM;
 			}
@@ -554,7 +554,7 @@
 		sh_cmt_clock_event_program_verify(ch, 1);
 
 		if (ch->flags & FLAG_CLOCKEVENT)
-			if ((ch->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
+			if ((clockevent_state_shutdown(&ch->ced))
 			    || (ch->match_value == ch->next_match_value))
 				ch->flags &= ~FLAG_REPROGRAM;
 	}
@@ -726,39 +726,37 @@
 		sh_cmt_set_next(ch, ch->max_match_value);
 }
 
-static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
-				    struct clock_event_device *ced)
+static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
+{
+	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
+
+	sh_cmt_stop(ch, FLAG_CLOCKEVENT);
+	return 0;
+}
+
+static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
+					int periodic)
 {
 	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
 
 	/* deal with old setting first */
-	switch (ced->mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-	case CLOCK_EVT_MODE_ONESHOT:
+	if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
 		sh_cmt_stop(ch, FLAG_CLOCKEVENT);
-		break;
-	default:
-		break;
-	}
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		dev_info(&ch->cmt->pdev->dev,
-			 "ch%u: used for periodic clock events\n", ch->index);
-		sh_cmt_clock_event_start(ch, 1);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		dev_info(&ch->cmt->pdev->dev,
-			 "ch%u: used for oneshot clock events\n", ch->index);
-		sh_cmt_clock_event_start(ch, 0);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		sh_cmt_stop(ch, FLAG_CLOCKEVENT);
-		break;
-	default:
-		break;
-	}
+	dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
+		 ch->index, periodic ? "periodic" : "oneshot");
+	sh_cmt_clock_event_start(ch, periodic);
+	return 0;
+}
+
+static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced)
+{
+	return sh_cmt_clock_event_set_state(ced, 0);
+}
+
+static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced)
+{
+	return sh_cmt_clock_event_set_state(ced, 1);
 }
 
 static int sh_cmt_clock_event_next(unsigned long delta,
@@ -766,7 +764,7 @@
 {
 	struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
 
-	BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
+	BUG_ON(!clockevent_state_oneshot(ced));
 	if (likely(ch->flags & FLAG_IRQCONTEXT))
 		ch->next_match_value = delta - 1;
 	else
@@ -820,7 +818,9 @@
 	ced->rating = 125;
 	ced->cpumask = cpu_possible_mask;
 	ced->set_next_event = sh_cmt_clock_event_next;
-	ced->set_mode = sh_cmt_clock_event_mode;
+	ced->set_state_shutdown = sh_cmt_clock_event_shutdown;
+	ced->set_state_periodic = sh_cmt_clock_event_set_periodic;
+	ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot;
 	ced->suspend = sh_cmt_clock_event_suspend;
 	ced->resume = sh_cmt_clock_event_resume;
 
@@ -935,9 +935,6 @@
 static const struct platform_device_id sh_cmt_id_table[] = {
 	{ "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
 	{ "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
-	{ "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] },
-	{ "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] },
-	{ "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] },
 	{ }
 };
 MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 3d88698..f1985da 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -276,36 +276,25 @@
 	return container_of(ced, struct sh_mtu2_channel, ced);
 }
 
-static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
-				    struct clock_event_device *ced)
+static int sh_mtu2_clock_event_shutdown(struct clock_event_device *ced)
 {
 	struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
-	int disabled = 0;
 
-	/* deal with old setting first */
-	switch (ced->mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
+	sh_mtu2_disable(ch);
+	return 0;
+}
+
+static int sh_mtu2_clock_event_set_periodic(struct clock_event_device *ced)
+{
+	struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced);
+
+	if (clockevent_state_periodic(ced))
 		sh_mtu2_disable(ch);
-		disabled = 1;
-		break;
-	default:
-		break;
-	}
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		dev_info(&ch->mtu->pdev->dev,
-			 "ch%u: used for periodic clock events\n", ch->index);
-		sh_mtu2_enable(ch);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-		if (!disabled)
-			sh_mtu2_disable(ch);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	default:
-		break;
-	}
+	dev_info(&ch->mtu->pdev->dev, "ch%u: used for periodic clock events\n",
+		 ch->index);
+	sh_mtu2_enable(ch);
+	return 0;
 }
 
 static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
@@ -327,7 +316,8 @@
 	ced->features = CLOCK_EVT_FEAT_PERIODIC;
 	ced->rating = 200;
 	ced->cpumask = cpu_possible_mask;
-	ced->set_mode = sh_mtu2_clock_event_mode;
+	ced->set_state_shutdown = sh_mtu2_clock_event_shutdown;
+	ced->set_state_periodic = sh_mtu2_clock_event_set_periodic;
 	ced->suspend = sh_mtu2_clock_event_suspend;
 	ced->resume = sh_mtu2_clock_event_resume;
 
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index b6b8fa3..469e776 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -240,7 +240,7 @@
 	struct sh_tmu_channel *ch = dev_id;
 
 	/* disable or acknowledge interrupt */
-	if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT)
+	if (clockevent_state_oneshot(&ch->ced))
 		sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
 	else
 		sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4);
@@ -358,42 +358,38 @@
 	}
 }
 
-static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
-				    struct clock_event_device *ced)
+static int sh_tmu_clock_event_shutdown(struct clock_event_device *ced)
 {
 	struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
-	int disabled = 0;
+
+	if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
+		sh_tmu_disable(ch);
+	return 0;
+}
+
+static int sh_tmu_clock_event_set_state(struct clock_event_device *ced,
+					int periodic)
+{
+	struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
 
 	/* deal with old setting first */
-	switch (ced->mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-	case CLOCK_EVT_MODE_ONESHOT:
+	if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
 		sh_tmu_disable(ch);
-		disabled = 1;
-		break;
-	default:
-		break;
-	}
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		dev_info(&ch->tmu->pdev->dev,
-			 "ch%u: used for periodic clock events\n", ch->index);
-		sh_tmu_clock_event_start(ch, 1);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		dev_info(&ch->tmu->pdev->dev,
-			 "ch%u: used for oneshot clock events\n", ch->index);
-		sh_tmu_clock_event_start(ch, 0);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-		if (!disabled)
-			sh_tmu_disable(ch);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	default:
-		break;
-	}
+	dev_info(&ch->tmu->pdev->dev, "ch%u: used for %s clock events\n",
+		 ch->index, periodic ? "periodic" : "oneshot");
+	sh_tmu_clock_event_start(ch, periodic);
+	return 0;
+}
+
+static int sh_tmu_clock_event_set_oneshot(struct clock_event_device *ced)
+{
+	return sh_tmu_clock_event_set_state(ced, 0);
+}
+
+static int sh_tmu_clock_event_set_periodic(struct clock_event_device *ced)
+{
+	return sh_tmu_clock_event_set_state(ced, 1);
 }
 
 static int sh_tmu_clock_event_next(unsigned long delta,
@@ -401,7 +397,7 @@
 {
 	struct sh_tmu_channel *ch = ced_to_sh_tmu(ced);
 
-	BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
+	BUG_ON(!clockevent_state_oneshot(ced));
 
 	/* program new delta value */
 	sh_tmu_set_next(ch, delta, 0);
@@ -430,7 +426,9 @@
 	ced->rating = 200;
 	ced->cpumask = cpu_possible_mask;
 	ced->set_next_event = sh_tmu_clock_event_next;
-	ced->set_mode = sh_tmu_clock_event_mode;
+	ced->set_state_shutdown = sh_tmu_clock_event_shutdown;
+	ced->set_state_periodic = sh_tmu_clock_event_set_periodic;
+	ced->set_state_oneshot = sh_tmu_clock_event_set_oneshot;
 	ced->suspend = sh_tmu_clock_event_suspend;
 	ced->resume = sh_tmu_clock_event_resume;
 
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 1928a89..6f3719d 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -81,25 +81,25 @@
 	       timer_base + TIMER_CTL_REG(timer));
 }
 
-static void sun4i_clkevt_mode(enum clock_event_mode mode,
-			      struct clock_event_device *clk)
+static int sun4i_clkevt_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		sun4i_clkevt_time_stop(0);
-		sun4i_clkevt_time_setup(0, ticks_per_jiffy);
-		sun4i_clkevt_time_start(0, true);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		sun4i_clkevt_time_stop(0);
-		sun4i_clkevt_time_start(0, false);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	default:
-		sun4i_clkevt_time_stop(0);
-		break;
-	}
+	sun4i_clkevt_time_stop(0);
+	return 0;
+}
+
+static int sun4i_clkevt_set_oneshot(struct clock_event_device *evt)
+{
+	sun4i_clkevt_time_stop(0);
+	sun4i_clkevt_time_start(0, false);
+	return 0;
+}
+
+static int sun4i_clkevt_set_periodic(struct clock_event_device *evt)
+{
+	sun4i_clkevt_time_stop(0);
+	sun4i_clkevt_time_setup(0, ticks_per_jiffy);
+	sun4i_clkevt_time_start(0, true);
+	return 0;
 }
 
 static int sun4i_clkevt_next_event(unsigned long evt,
@@ -116,7 +116,10 @@
 	.name = "sun4i_tick",
 	.rating = 350,
 	.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode = sun4i_clkevt_mode,
+	.set_state_shutdown = sun4i_clkevt_shutdown,
+	.set_state_periodic = sun4i_clkevt_set_periodic,
+	.set_state_oneshot = sun4i_clkevt_set_oneshot,
+	.tick_resume = sun4i_clkevt_shutdown,
 	.set_next_event = sun4i_clkevt_next_event,
 };
 
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 8bdbc45..d28d2fe 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -91,55 +91,62 @@
  */
 static u32 timer_clock;
 
-static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
+static int tc_shutdown(struct clock_event_device *d)
 {
 	struct tc_clkevt_device *tcd = to_tc_clkevt(d);
 	void __iomem		*regs = tcd->regs;
 
-	if (tcd->clkevt.mode == CLOCK_EVT_MODE_PERIODIC
-			|| tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
-		__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
-		__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
-		clk_disable(tcd->clk);
-	}
+	__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
+	__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
+	clk_disable(tcd->clk);
 
-	switch (m) {
+	return 0;
+}
+
+static int tc_set_oneshot(struct clock_event_device *d)
+{
+	struct tc_clkevt_device *tcd = to_tc_clkevt(d);
+	void __iomem		*regs = tcd->regs;
+
+	if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
+		tc_shutdown(d);
+
+	clk_enable(tcd->clk);
+
+	/* slow clock, count up to RC, then irq and stop */
+	__raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
+		     ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
+	__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+
+	/* set_next_event() configures and starts the timer */
+	return 0;
+}
+
+static int tc_set_periodic(struct clock_event_device *d)
+{
+	struct tc_clkevt_device *tcd = to_tc_clkevt(d);
+	void __iomem		*regs = tcd->regs;
+
+	if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
+		tc_shutdown(d);
 
 	/* By not making the gentime core emulate periodic mode on top
 	 * of oneshot, we get lower overhead and improved accuracy.
 	 */
-	case CLOCK_EVT_MODE_PERIODIC:
-		clk_enable(tcd->clk);
+	clk_enable(tcd->clk);
 
-		/* slow clock, count up to RC, then irq and restart */
-		__raw_writel(timer_clock
-				| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
-				regs + ATMEL_TC_REG(2, CMR));
-		__raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+	/* slow clock, count up to RC, then irq and restart */
+	__raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+		     regs + ATMEL_TC_REG(2, CMR));
+	__raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
 
-		/* Enable clock and interrupts on RC compare */
-		__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+	/* Enable clock and interrupts on RC compare */
+	__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
 
-		/* go go gadget! */
-		__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
-				regs + ATMEL_TC_REG(2, CCR));
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-		clk_enable(tcd->clk);
-
-		/* slow clock, count up to RC, then irq and stop */
-		__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
-				| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
-				regs + ATMEL_TC_REG(2, CMR));
-		__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-
-		/* set_next_event() configures and starts the timer */
-		break;
-
-	default:
-		break;
-	}
+	/* go go gadget! */
+	__raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
+		     ATMEL_TC_REG(2, CCR));
+	return 0;
 }
 
 static int tc_next_event(unsigned long delta, struct clock_event_device *d)
@@ -154,13 +161,15 @@
 
 static struct tc_clkevt_device clkevt = {
 	.clkevt	= {
-		.name		= "tc_clkevt",
-		.features	= CLOCK_EVT_FEAT_PERIODIC
-					| CLOCK_EVT_FEAT_ONESHOT,
+		.name			= "tc_clkevt",
+		.features		= CLOCK_EVT_FEAT_PERIODIC |
+					  CLOCK_EVT_FEAT_ONESHOT,
 		/* Should be lower than at91rm9200's system timer */
-		.rating		= 125,
-		.set_next_event	= tc_next_event,
-		.set_mode	= tc_mode,
+		.rating			= 125,
+		.set_next_event		= tc_next_event,
+		.set_state_shutdown	= tc_shutdown,
+		.set_state_periodic	= tc_set_periodic,
+		.set_state_oneshot	= tc_set_oneshot,
 	},
 };
 
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index 5a112d7..6ebda11 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -72,33 +72,36 @@
 	return 0;
 }
 
-static void tegra_timer_set_mode(enum clock_event_mode mode,
-				    struct clock_event_device *evt)
+static inline void timer_shutdown(struct clock_event_device *evt)
 {
-	u32 reg;
-
 	timer_writel(0, TIMER3_BASE + TIMER_PTV);
+}
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		reg = 0xC0000000 | ((1000000/HZ)-1);
-		timer_writel(reg, TIMER3_BASE + TIMER_PTV);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+static int tegra_timer_shutdown(struct clock_event_device *evt)
+{
+	timer_shutdown(evt);
+	return 0;
+}
+
+static int tegra_timer_set_periodic(struct clock_event_device *evt)
+{
+	u32 reg = 0xC0000000 | ((1000000 / HZ) - 1);
+
+	timer_shutdown(evt);
+	timer_writel(reg, TIMER3_BASE + TIMER_PTV);
+	return 0;
 }
 
 static struct clock_event_device tegra_clockevent = {
-	.name		= "timer0",
-	.rating		= 300,
-	.features	= CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
-	.set_next_event	= tegra_timer_set_next_event,
-	.set_mode	= tegra_timer_set_mode,
+	.name			= "timer0",
+	.rating			= 300,
+	.features		= CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_PERIODIC,
+	.set_next_event		= tegra_timer_set_next_event,
+	.set_state_shutdown	= tegra_timer_shutdown,
+	.set_state_periodic	= tegra_timer_set_periodic,
+	.set_state_oneshot	= tegra_timer_shutdown,
+	.tick_resume		= tegra_timer_shutdown,
 };
 
 static u64 notrace tegra_read_sched_clock(void)
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 0c8c5e3..2162796f 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -121,33 +121,33 @@
 	return 0;
 }
 
-static void
-armada_370_xp_clkevt_mode(enum clock_event_mode mode,
-			  struct clock_event_device *dev)
+static int armada_370_xp_clkevt_shutdown(struct clock_event_device *evt)
 {
-	if (mode == CLOCK_EVT_MODE_PERIODIC) {
+	/*
+	 * Disable timer.
+	 */
+	local_timer_ctrl_clrset(TIMER0_EN, 0);
 
-		/*
-		 * Setup timer to fire at 1/HZ intervals.
-		 */
-		writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF);
-		writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF);
+	/*
+	 * ACK pending timer interrupt.
+	 */
+	writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
+	return 0;
+}
 
-		/*
-		 * Enable timer.
-		 */
-		local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
-	} else {
-		/*
-		 * Disable timer.
-		 */
-		local_timer_ctrl_clrset(TIMER0_EN, 0);
+static int armada_370_xp_clkevt_set_periodic(struct clock_event_device *evt)
+{
+	/*
+	 * Setup timer to fire at 1/HZ intervals.
+	 */
+	writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF);
+	writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF);
 
-		/*
-		 * ACK pending timer interrupt.
-		 */
-		writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
-	}
+	/*
+	 * Enable timer.
+	 */
+	local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
+	return 0;
 }
 
 static int armada_370_xp_clkevt_irq;
@@ -185,7 +185,10 @@
 	evt->shift		= 32,
 	evt->rating		= 300,
 	evt->set_next_event	= armada_370_xp_clkevt_next_event,
-	evt->set_mode		= armada_370_xp_clkevt_mode,
+	evt->set_state_shutdown	= armada_370_xp_clkevt_shutdown;
+	evt->set_state_periodic	= armada_370_xp_clkevt_set_periodic;
+	evt->set_state_oneshot	= armada_370_xp_clkevt_shutdown;
+	evt->tick_resume	= armada_370_xp_clkevt_shutdown;
 	evt->irq		= armada_370_xp_clkevt_irq;
 	evt->cpumask		= cpumask_of(cpu);
 
@@ -197,7 +200,7 @@
 
 static void armada_370_xp_timer_stop(struct clock_event_device *evt)
 {
-	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+	evt->set_state_shutdown(evt);
 	disable_percpu_irq(evt->irq);
 }
 
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/time-efm32.c
index 5b6e3d5..b06e4c2 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/time-efm32.c
@@ -48,40 +48,42 @@
 	unsigned periodic_top;
 };
 
-static void efm32_clock_event_set_mode(enum clock_event_mode mode,
-				       struct clock_event_device *evtdev)
+static int efm32_clock_event_shutdown(struct clock_event_device *evtdev)
 {
 	struct efm32_clock_event_ddata *ddata =
 		container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
-		writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP);
-		writel_relaxed(TIMERn_CTRL_PRESC_1024 |
-			       TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
-			       TIMERn_CTRL_MODE_DOWN,
-			       ddata->base + TIMERn_CTRL);
-		writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
-		break;
+	writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+	return 0;
+}
 
-	case CLOCK_EVT_MODE_ONESHOT:
-		writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
-		writel_relaxed(TIMERn_CTRL_PRESC_1024 |
-			       TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
-			       TIMERn_CTRL_OSMEN |
-			       TIMERn_CTRL_MODE_DOWN,
-			       ddata->base + TIMERn_CTRL);
-		break;
+static int efm32_clock_event_set_oneshot(struct clock_event_device *evtdev)
+{
+	struct efm32_clock_event_ddata *ddata =
+		container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
 
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
-		break;
+	writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+	writel_relaxed(TIMERn_CTRL_PRESC_1024 |
+		       TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
+		       TIMERn_CTRL_OSMEN |
+		       TIMERn_CTRL_MODE_DOWN,
+		       ddata->base + TIMERn_CTRL);
+	return 0;
+}
 
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+static int efm32_clock_event_set_periodic(struct clock_event_device *evtdev)
+{
+	struct efm32_clock_event_ddata *ddata =
+		container_of(evtdev, struct efm32_clock_event_ddata, evtdev);
+
+	writel_relaxed(TIMERn_CMD_STOP, ddata->base + TIMERn_CMD);
+	writel_relaxed(ddata->periodic_top, ddata->base + TIMERn_TOP);
+	writel_relaxed(TIMERn_CTRL_PRESC_1024 |
+		       TIMERn_CTRL_CLKSEL_PRESCHFPERCLK |
+		       TIMERn_CTRL_MODE_DOWN,
+		       ddata->base + TIMERn_CTRL);
+	writel_relaxed(TIMERn_CMD_START, ddata->base + TIMERn_CMD);
+	return 0;
 }
 
 static int efm32_clock_event_set_next_event(unsigned long evt,
@@ -112,7 +114,9 @@
 	.evtdev = {
 		.name = "efm32 clockevent",
 		.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
-		.set_mode = efm32_clock_event_set_mode,
+		.set_state_shutdown = efm32_clock_event_shutdown,
+		.set_state_periodic = efm32_clock_event_set_periodic,
+		.set_state_oneshot = efm32_clock_event_set_oneshot,
 		.set_next_event = efm32_clock_event_set_next_event,
 		.rating = 200,
 	},
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
index 0b3ce03..0ece742 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/time-orion.c
@@ -60,30 +60,36 @@
 	return 0;
 }
 
-static void orion_clkevt_mode(enum clock_event_mode mode,
-			      struct clock_event_device *dev)
+static int orion_clkevt_shutdown(struct clock_event_device *dev)
 {
-	if (mode == CLOCK_EVT_MODE_PERIODIC) {
-		/* setup and enable periodic timer at 1/HZ intervals */
-		writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD);
-		writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL);
-		atomic_io_modify(timer_base + TIMER_CTRL,
-			TIMER1_RELOAD_EN | TIMER1_EN,
-			TIMER1_RELOAD_EN | TIMER1_EN);
-	} else {
-		/* disable timer */
-		atomic_io_modify(timer_base + TIMER_CTRL,
-			TIMER1_RELOAD_EN | TIMER1_EN, 0);
-	}
+	/* disable timer */
+	atomic_io_modify(timer_base + TIMER_CTRL,
+			 TIMER1_RELOAD_EN | TIMER1_EN, 0);
+	return 0;
+}
+
+static int orion_clkevt_set_periodic(struct clock_event_device *dev)
+{
+	/* setup and enable periodic timer at 1/HZ intervals */
+	writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD);
+	writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL);
+	atomic_io_modify(timer_base + TIMER_CTRL,
+			 TIMER1_RELOAD_EN | TIMER1_EN,
+			 TIMER1_RELOAD_EN | TIMER1_EN);
+	return 0;
 }
 
 static struct clock_event_device orion_clkevt = {
-	.name		= "orion_event",
-	.features	= CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
-	.shift		= 32,
-	.rating		= 300,
-	.set_next_event	= orion_clkevt_next_event,
-	.set_mode	= orion_clkevt_mode,
+	.name			= "orion_event",
+	.features		= CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_PERIODIC,
+	.shift			= 32,
+	.rating			= 300,
+	.set_next_event		= orion_clkevt_next_event,
+	.set_state_shutdown	= orion_clkevt_shutdown,
+	.set_state_periodic	= orion_clkevt_set_periodic,
+	.set_state_oneshot	= orion_clkevt_shutdown,
+	.tick_resume		= orion_clkevt_shutdown,
 };
 
 static irqreturn_t orion_clkevt_irq_handler(int irq, void *dev_id)
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c
new file mode 100644
index 0000000..18d4266
--- /dev/null
+++ b/drivers/clocksource/time-pistachio.c
@@ -0,0 +1,217 @@
+/*
+ * Pistachio clocksource based on general-purpose timers
+ *
+ * Copyright (C) 2015 Imagination Technologies
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sched_clock.h>
+#include <linux/time.h>
+
+/* Top level reg */
+#define CR_TIMER_CTRL_CFG		0x00
+#define TIMER_ME_GLOBAL			BIT(0)
+#define CR_TIMER_REV			0x10
+
+/* Timer specific registers */
+#define TIMER_CFG			0x20
+#define TIMER_ME_LOCAL			BIT(0)
+#define TIMER_RELOAD_VALUE		0x24
+#define TIMER_CURRENT_VALUE		0x28
+#define TIMER_CURRENT_OVERFLOW_VALUE	0x2C
+#define TIMER_IRQ_STATUS		0x30
+#define TIMER_IRQ_CLEAR			0x34
+#define TIMER_IRQ_MASK			0x38
+
+#define PERIP_TIMER_CONTROL		0x90
+
+/* Timer specific configuration Values */
+#define RELOAD_VALUE			0xffffffff
+
+struct pistachio_clocksource {
+	void __iomem *base;
+	raw_spinlock_t lock;
+	struct clocksource cs;
+};
+
+static struct pistachio_clocksource pcs_gpt;
+
+#define to_pistachio_clocksource(cs)	\
+	container_of(cs, struct pistachio_clocksource, cs)
+
+static inline u32 gpt_readl(void __iomem *base, u32 offset, u32 gpt_id)
+{
+	return readl(base + 0x20 * gpt_id + offset);
+}
+
+static inline void gpt_writel(void __iomem *base, u32 value, u32 offset,
+		u32 gpt_id)
+{
+	writel(value, base + 0x20 * gpt_id + offset);
+}
+
+static cycle_t pistachio_clocksource_read_cycles(struct clocksource *cs)
+{
+	struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
+	u32 counter, overflw;
+	unsigned long flags;
+
+	/*
+	 * The counter value is only refreshed after the overflow value is read.
+	 * And they must be read in strict order, hence raw spin lock added.
+	 */
+
+	raw_spin_lock_irqsave(&pcs->lock, flags);
+	overflw = gpt_readl(pcs->base, TIMER_CURRENT_OVERFLOW_VALUE, 0);
+	counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0);
+	raw_spin_unlock_irqrestore(&pcs->lock, flags);
+
+	return ~(cycle_t)counter;
+}
+
+static u64 notrace pistachio_read_sched_clock(void)
+{
+	return pistachio_clocksource_read_cycles(&pcs_gpt.cs);
+}
+
+static void pistachio_clksrc_set_mode(struct clocksource *cs, int timeridx,
+			int enable)
+{
+	struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
+	u32 val;
+
+	val = gpt_readl(pcs->base, TIMER_CFG, timeridx);
+	if (enable)
+		val |= TIMER_ME_LOCAL;
+	else
+		val &= ~TIMER_ME_LOCAL;
+
+	gpt_writel(pcs->base, val, TIMER_CFG, timeridx);
+}
+
+static void pistachio_clksrc_enable(struct clocksource *cs, int timeridx)
+{
+	struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
+
+	/* Disable GPT local before loading reload value */
+	pistachio_clksrc_set_mode(cs, timeridx, false);
+	gpt_writel(pcs->base, RELOAD_VALUE, TIMER_RELOAD_VALUE, timeridx);
+	pistachio_clksrc_set_mode(cs, timeridx, true);
+}
+
+static void pistachio_clksrc_disable(struct clocksource *cs, int timeridx)
+{
+	/* Disable GPT local */
+	pistachio_clksrc_set_mode(cs, timeridx, false);
+}
+
+static int pistachio_clocksource_enable(struct clocksource *cs)
+{
+	pistachio_clksrc_enable(cs, 0);
+	return 0;
+}
+
+static void pistachio_clocksource_disable(struct clocksource *cs)
+{
+	pistachio_clksrc_disable(cs, 0);
+}
+
+/* Desirable clock source for pistachio platform */
+static struct pistachio_clocksource pcs_gpt = {
+	.cs =	{
+		.name		= "gptimer",
+		.rating		= 300,
+		.enable		= pistachio_clocksource_enable,
+		.disable	= pistachio_clocksource_disable,
+		.read		= pistachio_clocksource_read_cycles,
+		.mask		= CLOCKSOURCE_MASK(32),
+		.flags		= CLOCK_SOURCE_IS_CONTINUOUS |
+				  CLOCK_SOURCE_SUSPEND_NONSTOP,
+		},
+};
+
+static void __init pistachio_clksrc_of_init(struct device_node *node)
+{
+	struct clk *sys_clk, *fast_clk;
+	struct regmap *periph_regs;
+	unsigned long rate;
+	int ret;
+
+	pcs_gpt.base = of_iomap(node, 0);
+	if (!pcs_gpt.base) {
+		pr_err("cannot iomap\n");
+		return;
+	}
+
+	periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
+	if (IS_ERR(periph_regs)) {
+		pr_err("cannot get peripheral regmap (%lu)\n",
+		       PTR_ERR(periph_regs));
+		return;
+	}
+
+	/* Switch to using the fast counter clock */
+	ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL,
+				 0xf, 0x0);
+	if (ret)
+		return;
+
+	sys_clk = of_clk_get_by_name(node, "sys");
+	if (IS_ERR(sys_clk)) {
+		pr_err("clock get failed (%lu)\n", PTR_ERR(sys_clk));
+		return;
+	}
+
+	fast_clk = of_clk_get_by_name(node, "fast");
+	if (IS_ERR(fast_clk)) {
+		pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk));
+		return;
+	}
+
+	ret = clk_prepare_enable(sys_clk);
+	if (ret < 0) {
+		pr_err("failed to enable clock (%d)\n", ret);
+		return;
+	}
+
+	ret = clk_prepare_enable(fast_clk);
+	if (ret < 0) {
+		pr_err("failed to enable clock (%d)\n", ret);
+		clk_disable_unprepare(sys_clk);
+		return;
+	}
+
+	rate = clk_get_rate(fast_clk);
+
+	/* Disable irq's for clocksource usage */
+	gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
+	gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
+	gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
+	gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
+
+	/* Enable timer block */
+	writel(TIMER_ME_GLOBAL, pcs_gpt.base);
+
+	raw_spin_lock_init(&pcs_gpt.lock);
+	sched_clock_register(pistachio_read_sched_clock, 32, rate);
+	clocksource_register_hz(&pcs_gpt.cs, rate);
+}
+CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
+		       pistachio_clksrc_of_init);
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 60f9de3..27fa136 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -76,7 +76,7 @@
 	/* clear timer interrupt */
 	writel_relaxed(BIT(cpu), sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
 
-	if (ce->mode == CLOCK_EVT_MODE_ONESHOT)
+	if (clockevent_state_oneshot(ce))
 		sirfsoc_timer_count_disable(cpu);
 
 	ce->event_handler(ce);
@@ -117,18 +117,11 @@
 	return 0;
 }
 
-static void sirfsoc_timer_set_mode(enum clock_event_mode mode,
-	struct clock_event_device *ce)
+/* Oneshot is enabled in set_next_event */
+static int sirfsoc_timer_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* enable in set_next_event */
-		break;
-	default:
-		break;
-	}
-
 	sirfsoc_timer_count_disable(smp_processor_id());
+	return 0;
 }
 
 static void sirfsoc_clocksource_suspend(struct clocksource *cs)
@@ -193,7 +186,9 @@
 	ce->name = "local_timer";
 	ce->features = CLOCK_EVT_FEAT_ONESHOT;
 	ce->rating = 200;
-	ce->set_mode = sirfsoc_timer_set_mode;
+	ce->set_state_shutdown = sirfsoc_timer_shutdown;
+	ce->set_state_oneshot = sirfsoc_timer_shutdown;
+	ce->tick_resume = sirfsoc_timer_shutdown;
 	ce->set_next_event = sirfsoc_timer_set_next_event;
 	clockevents_calc_mult_shift(ce, atlas7_timer_rate, 60);
 	ce->max_delta_ns = clockevent_delta2ns(-2, ce);
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index c0304ff..d911c5d 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -90,33 +90,27 @@
 	return elapsed;
 }
 
-/*
- * Clockevent device:  interrupts every 1/HZ (== pit_cycles * MCK/16)
- */
-static void
-pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev)
+static int pit_clkevt_shutdown(struct clock_event_device *dev)
 {
 	struct pit_data *data = clkevt_to_pit_data(dev);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		/* update clocksource counter */
-		data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
-		pit_write(data->base, AT91_PIT_MR,
-			  (data->cycle - 1) | AT91_PIT_PITEN | AT91_PIT_PITIEN);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		BUG();
-		/* FALLTHROUGH */
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		/* disable irq, leaving the clocksource active */
-		pit_write(data->base, AT91_PIT_MR,
-			  (data->cycle - 1) | AT91_PIT_PITEN);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	/* disable irq, leaving the clocksource active */
+	pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN);
+	return 0;
+}
+
+/*
+ * Clockevent device:  interrupts every 1/HZ (== pit_cycles * MCK/16)
+ */
+static int pit_clkevt_set_periodic(struct clock_event_device *dev)
+{
+	struct pit_data *data = clkevt_to_pit_data(dev);
+
+	/* update clocksource counter */
+	data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
+	pit_write(data->base, AT91_PIT_MR,
+		  (data->cycle - 1) | AT91_PIT_PITEN | AT91_PIT_PITIEN);
+	return 0;
 }
 
 static void at91sam926x_pit_suspend(struct clock_event_device *cedev)
@@ -162,7 +156,7 @@
 	WARN_ON_ONCE(!irqs_disabled());
 
 	/* The PIT interrupt may be disabled, and is shared */
-	if ((data->clkevt.mode == CLOCK_EVT_MODE_PERIODIC) &&
+	if (clockevent_state_periodic(&data->clkevt) &&
 	    (pit_read(data->base, AT91_PIT_SR) & AT91_PIT_PITS)) {
 		unsigned nr_ticks;
 
@@ -208,8 +202,8 @@
 	data->clksrc.mask = CLOCKSOURCE_MASK(bits);
 	data->clksrc.name = "pit";
 	data->clksrc.rating = 175;
-	data->clksrc.read = read_pit_clk,
-	data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS,
+	data->clksrc.read = read_pit_clk;
+	data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
 	clocksource_register_hz(&data->clksrc, pit_rate);
 
 	/* Set up irq handler */
@@ -227,7 +221,8 @@
 	data->clkevt.rating = 100;
 	data->clkevt.cpumask = cpumask_of(0);
 
-	data->clkevt.set_mode = pit_clkevt_mode;
+	data->clkevt.set_state_shutdown = pit_clkevt_shutdown;
+	data->clkevt.set_state_periodic = pit_clkevt_set_periodic;
 	data->clkevt.resume = at91sam926x_pit_resume;
 	data->clkevt.suspend = at91sam926x_pit_suspend;
 	clockevents_register_device(&data->clkevt);
diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c
index 1692e17..41b7b6d 100644
--- a/drivers/clocksource/timer-atmel-st.c
+++ b/drivers/clocksource/timer-atmel-st.c
@@ -106,36 +106,47 @@
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static void
-clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev)
+static void clkdev32k_disable_and_flush_irq(void)
 {
 	unsigned int val;
 
 	/* Disable and flush pending timer interrupts */
 	regmap_write(regmap_st, AT91_ST_IDR, AT91_ST_PITS | AT91_ST_ALMS);
 	regmap_read(regmap_st, AT91_ST_SR, &val);
-
 	last_crtr = read_CRTR();
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		/* PIT for periodic irqs; fixed rate of 1/HZ */
-		irqmask = AT91_ST_PITS;
-		regmap_write(regmap_st, AT91_ST_PIMR, RM9200_TIMER_LATCH);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* ALM for oneshot irqs, set by next_event()
-		 * before 32 seconds have passed
-		 */
-		irqmask = AT91_ST_ALMS;
-		regmap_write(regmap_st, AT91_ST_RTAR, last_crtr);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_RESUME:
-		irqmask = 0;
-		break;
-	}
+}
+
+static int clkevt32k_shutdown(struct clock_event_device *evt)
+{
+	clkdev32k_disable_and_flush_irq();
+	irqmask = 0;
 	regmap_write(regmap_st, AT91_ST_IER, irqmask);
+	return 0;
+}
+
+static int clkevt32k_set_oneshot(struct clock_event_device *dev)
+{
+	clkdev32k_disable_and_flush_irq();
+
+	/*
+	 * ALM for oneshot irqs, set by next_event()
+	 * before 32 seconds have passed.
+	 */
+	irqmask = AT91_ST_ALMS;
+	regmap_write(regmap_st, AT91_ST_RTAR, last_crtr);
+	regmap_write(regmap_st, AT91_ST_IER, irqmask);
+	return 0;
+}
+
+static int clkevt32k_set_periodic(struct clock_event_device *dev)
+{
+	clkdev32k_disable_and_flush_irq();
+
+	/* PIT for periodic irqs; fixed rate of 1/HZ */
+	irqmask = AT91_ST_PITS;
+	regmap_write(regmap_st, AT91_ST_PIMR, RM9200_TIMER_LATCH);
+	regmap_write(regmap_st, AT91_ST_IER, irqmask);
+	return 0;
 }
 
 static int
@@ -170,11 +181,15 @@
 }
 
 static struct clock_event_device clkevt = {
-	.name		= "at91_tick",
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.rating		= 150,
-	.set_next_event	= clkevt32k_next_event,
-	.set_mode	= clkevt32k_mode,
+	.name			= "at91_tick",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.rating			= 150,
+	.set_next_event		= clkevt32k_next_event,
+	.set_state_shutdown	= clkevt32k_shutdown,
+	.set_state_periodic	= clkevt32k_set_periodic,
+	.set_state_oneshot	= clkevt32k_set_oneshot,
+	.tick_resume		= clkevt32k_shutdown,
 };
 
 /*
diff --git a/drivers/clocksource/timer-digicolor.c b/drivers/clocksource/timer-digicolor.c
index 7f8388c..e73947f0f 100644
--- a/drivers/clocksource/timer-digicolor.c
+++ b/drivers/clocksource/timer-digicolor.c
@@ -87,27 +87,27 @@
 	writel(count, dt->base + COUNT(dt->timer_id));
 }
 
-static void digicolor_clkevt_mode(enum clock_event_mode mode,
-				  struct clock_event_device *ce)
+static int digicolor_clkevt_shutdown(struct clock_event_device *ce)
+{
+	dc_timer_disable(ce);
+	return 0;
+}
+
+static int digicolor_clkevt_set_oneshot(struct clock_event_device *ce)
+{
+	dc_timer_disable(ce);
+	dc_timer_enable(ce, CONTROL_MODE_ONESHOT);
+	return 0;
+}
+
+static int digicolor_clkevt_set_periodic(struct clock_event_device *ce)
 {
 	struct digicolor_timer *dt = dc_timer(ce);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		dc_timer_disable(ce);
-		dc_timer_set_count(ce, dt->ticks_per_jiffy);
-		dc_timer_enable(ce, CONTROL_MODE_PERIODIC);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		dc_timer_disable(ce);
-		dc_timer_enable(ce, CONTROL_MODE_ONESHOT);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	default:
-		dc_timer_disable(ce);
-		break;
-	}
+	dc_timer_disable(ce);
+	dc_timer_set_count(ce, dt->ticks_per_jiffy);
+	dc_timer_enable(ce, CONTROL_MODE_PERIODIC);
+	return 0;
 }
 
 static int digicolor_clkevt_next_event(unsigned long evt,
@@ -125,7 +125,10 @@
 		.name = "digicolor_tick",
 		.rating = 340,
 		.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-		.set_mode = digicolor_clkevt_mode,
+		.set_state_shutdown = digicolor_clkevt_shutdown,
+		.set_state_periodic = digicolor_clkevt_set_periodic,
+		.set_state_oneshot = digicolor_clkevt_set_oneshot,
+		.tick_resume = digicolor_clkevt_shutdown,
 		.set_next_event = digicolor_clkevt_next_event,
 	},
 	.timer_id = TIMER_C,
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 86c7eb6..839aba9 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -83,7 +83,6 @@
 	struct clk *clk_ipg;
 	const struct imx_gpt_data *gpt;
 	struct clock_event_device ced;
-	enum clock_event_mode cem;
 	struct irqaction act;
 };
 
@@ -212,18 +211,38 @@
 				-ETIME : 0;
 }
 
+static int mxc_shutdown(struct clock_event_device *ced)
+{
+	struct imx_timer *imxtm = to_imx_timer(ced);
+	unsigned long flags;
+	u32 tcn;
+
+	/*
+	 * The timer interrupt generation is disabled at least
+	 * for enough time to call mxc_set_next_event()
+	 */
+	local_irq_save(flags);
+
+	/* Disable interrupt in GPT module */
+	imxtm->gpt->gpt_irq_disable(imxtm);
+
+	tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
+	/* Set event time into far-far future */
+	writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
+
+	/* Clear pending interrupt */
+	imxtm->gpt->gpt_irq_acknowledge(imxtm);
+
 #ifdef DEBUG
-static const char *clock_event_mode_label[] = {
-	[CLOCK_EVT_MODE_PERIODIC] = "CLOCK_EVT_MODE_PERIODIC",
-	[CLOCK_EVT_MODE_ONESHOT]  = "CLOCK_EVT_MODE_ONESHOT",
-	[CLOCK_EVT_MODE_SHUTDOWN] = "CLOCK_EVT_MODE_SHUTDOWN",
-	[CLOCK_EVT_MODE_UNUSED]   = "CLOCK_EVT_MODE_UNUSED",
-	[CLOCK_EVT_MODE_RESUME]   = "CLOCK_EVT_MODE_RESUME",
-};
+	printk(KERN_INFO "%s: changing mode\n", __func__);
 #endif /* DEBUG */
 
-static void mxc_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *ced)
+	local_irq_restore(flags);
+
+	return 0;
+}
+
+static int mxc_set_oneshot(struct clock_event_device *ced)
 {
 	struct imx_timer *imxtm = to_imx_timer(ced);
 	unsigned long flags;
@@ -237,7 +256,7 @@
 	/* Disable interrupt in GPT module */
 	imxtm->gpt->gpt_irq_disable(imxtm);
 
-	if (mode != imxtm->cem) {
+	if (!clockevent_state_oneshot(ced)) {
 		u32 tcn = readl_relaxed(imxtm->base + imxtm->gpt->reg_tcn);
 		/* Set event time into far-far future */
 		writel_relaxed(tcn - 3, imxtm->base + imxtm->gpt->reg_tcmp);
@@ -247,37 +266,19 @@
 	}
 
 #ifdef DEBUG
-	printk(KERN_INFO "mxc_set_mode: changing mode from %s to %s\n",
-		clock_event_mode_label[imxtm->cem],
-		clock_event_mode_label[mode]);
+	printk(KERN_INFO "%s: changing mode\n", __func__);
 #endif /* DEBUG */
 
-	/* Remember timer mode */
-	imxtm->cem = mode;
-	local_irq_restore(flags);
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		printk(KERN_ERR"mxc_set_mode: Periodic mode is not "
-				"supported for i.MX\n");
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
 	/*
 	 * Do not put overhead of interrupt enable/disable into
 	 * mxc_set_next_event(), the core has about 4 minutes
 	 * to call mxc_set_next_event() or shutdown clock after
 	 * mode switching
 	 */
-		local_irq_save(flags);
-		imxtm->gpt->gpt_irq_enable(imxtm);
-		local_irq_restore(flags);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_RESUME:
-		/* Left event sources disabled, no more interrupts appear */
-		break;
-	}
+	imxtm->gpt->gpt_irq_enable(imxtm);
+	local_irq_restore(flags);
+
+	return 0;
 }
 
 /*
@@ -303,11 +304,11 @@
 	struct clock_event_device *ced = &imxtm->ced;
 	struct irqaction *act = &imxtm->act;
 
-	imxtm->cem = CLOCK_EVT_MODE_UNUSED;
-
 	ced->name = "mxc_timer1";
 	ced->features = CLOCK_EVT_FEAT_ONESHOT;
-	ced->set_mode = mxc_set_mode;
+	ced->set_state_shutdown = mxc_shutdown;
+	ced->set_state_oneshot = mxc_set_oneshot;
+	ced->tick_resume = mxc_shutdown;
 	ced->set_next_event = imxtm->gpt->set_next_event;
 	ced->rating = 200;
 	ced->cpumask = cpumask_of(0);
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index a68866e..3f59ac2 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -75,33 +75,37 @@
 	return IRQ_HANDLED;
 }
 
-static void clkevt_set_mode(enum clock_event_mode mode, struct clock_event_device *evt)
+static int clkevt_shutdown(struct clock_event_device *evt)
+{
+	u32 ctrl = readl(clkevt_base + TIMER_CTRL) & ~TIMER_CTRL_ENABLE;
+
+	/* Disable timer */
+	writel(ctrl, clkevt_base + TIMER_CTRL);
+	return 0;
+}
+
+static int clkevt_set_oneshot(struct clock_event_device *evt)
+{
+	u32 ctrl = readl(clkevt_base + TIMER_CTRL) &
+		   ~(TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC);
+
+	/* Leave the timer disabled, .set_next_event will enable it */
+	writel(ctrl, clkevt_base + TIMER_CTRL);
+	return 0;
+}
+
+static int clkevt_set_periodic(struct clock_event_device *evt)
 {
 	u32 ctrl = readl(clkevt_base + TIMER_CTRL) & ~TIMER_CTRL_ENABLE;
 
 	/* Disable timer */
 	writel(ctrl, clkevt_base + TIMER_CTRL);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		/* Enable the timer and start the periodic tick */
-		writel(timer_reload, clkevt_base + TIMER_LOAD);
-		ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
-		writel(ctrl, clkevt_base + TIMER_CTRL);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* Leave the timer disabled, .set_next_event will enable it */
-		ctrl &= ~TIMER_CTRL_PERIODIC;
-		writel(ctrl, clkevt_base + TIMER_CTRL);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_RESUME:
-	default:
-		/* Just leave in disabled state */
-		break;
-	}
-
+	/* Enable the timer and start the periodic tick */
+	writel(timer_reload, clkevt_base + TIMER_LOAD);
+	ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
+	writel(ctrl, clkevt_base + TIMER_CTRL);
+	return 0;
 }
 
 static int clkevt_set_next_event(unsigned long next, struct clock_event_device *evt)
@@ -116,11 +120,15 @@
 }
 
 static struct clock_event_device integrator_clockevent = {
-	.name		= "timer1",
-	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode	= clkevt_set_mode,
-	.set_next_event	= clkevt_set_next_event,
-	.rating		= 300,
+	.name			= "timer1",
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT,
+	.set_state_shutdown	= clkevt_shutdown,
+	.set_state_periodic	= clkevt_set_periodic,
+	.set_state_oneshot	= clkevt_set_oneshot,
+	.tick_resume		= clkevt_shutdown,
+	.set_next_event		= clkevt_set_next_event,
+	.rating			= 300,
 };
 
 static struct irqaction integrator_timer_irq = {
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index 0250354..edacf39 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -72,10 +72,10 @@
 
 /**
  * keystone_timer_config: configures timer to work in oneshot/periodic modes.
- * @ mode: mode to configure
+ * @ mask: mask of the mode to configure
  * @ period: cycles number to configure for
  */
-static int keystone_timer_config(u64 period, enum clock_event_mode mode)
+static int keystone_timer_config(u64 period, int mask)
 {
 	u32 tcr;
 	u32 off;
@@ -84,16 +84,7 @@
 	off = tcr & ~(TCR_ENAMODE_MASK);
 
 	/* set enable mode */
-	switch (mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
-		tcr |= TCR_ENAMODE_ONESHOT_MASK;
-		break;
-	case CLOCK_EVT_MODE_PERIODIC:
-		tcr |= TCR_ENAMODE_PERIODIC_MASK;
-		break;
-	default:
-		return -1;
-	}
+	tcr |= mask;
 
 	/* disable timer */
 	keystone_timer_writel(off, TCR);
@@ -138,24 +129,19 @@
 static int keystone_set_next_event(unsigned long cycles,
 				  struct clock_event_device *evt)
 {
-	return keystone_timer_config(cycles, evt->mode);
+	return keystone_timer_config(cycles, TCR_ENAMODE_ONESHOT_MASK);
 }
 
-static void keystone_set_mode(enum clock_event_mode mode,
-			     struct clock_event_device *evt)
+static int keystone_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		keystone_timer_config(timer.hz_period, CLOCK_EVT_MODE_PERIODIC);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_ONESHOT:
-		keystone_timer_disable();
-		break;
-	default:
-		break;
-	}
+	keystone_timer_disable();
+	return 0;
+}
+
+static int keystone_set_periodic(struct clock_event_device *evt)
+{
+	keystone_timer_config(timer.hz_period, TCR_ENAMODE_PERIODIC_MASK);
+	return 0;
 }
 
 static void __init keystone_timer_init(struct device_node *np)
@@ -222,7 +208,9 @@
 	/* setup clockevent */
 	event_dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
 	event_dev->set_next_event = keystone_set_next_event;
-	event_dev->set_mode = keystone_set_mode;
+	event_dev->set_state_shutdown = keystone_shutdown;
+	event_dev->set_state_periodic = keystone_set_periodic;
+	event_dev->set_state_oneshot = keystone_shutdown;
 	event_dev->cpumask = cpu_all_mask;
 	event_dev->owner = THIS_MODULE;
 	event_dev->name = TIMER_NAME;
diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c
index ce18d57..78de982 100644
--- a/drivers/clocksource/timer-prima2.c
+++ b/drivers/clocksource/timer-prima2.c
@@ -104,26 +104,21 @@
 	return next - now > delta ? -ETIME : 0;
 }
 
-static void sirfsoc_timer_set_mode(enum clock_event_mode mode,
-	struct clock_event_device *ce)
+static int sirfsoc_timer_shutdown(struct clock_event_device *evt)
 {
 	u32 val = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		WARN_ON(1);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		writel_relaxed(val | BIT(0),
-			sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		writel_relaxed(val & ~BIT(0),
-			sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+
+	writel_relaxed(val & ~BIT(0),
+		       sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
+	return 0;
+}
+
+static int sirfsoc_timer_set_oneshot(struct clock_event_device *evt)
+{
+	u32 val = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
+
+	writel_relaxed(val | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN);
+	return 0;
 }
 
 static void sirfsoc_clocksource_suspend(struct clocksource *cs)
@@ -157,7 +152,8 @@
 	.name = "sirfsoc_clockevent",
 	.rating = 200,
 	.features = CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode = sirfsoc_timer_set_mode,
+	.set_state_shutdown = sirfsoc_timer_shutdown,
+	.set_state_oneshot = sirfsoc_timer_set_oneshot,
 	.set_next_event = sirfsoc_timer_set_next_event,
 };
 
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index ca02503..5f45b9a 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -133,50 +133,50 @@
 	return IRQ_HANDLED;
 }
 
-static void sp804_set_mode(enum clock_event_mode mode,
-	struct clock_event_device *evt)
+static inline void timer_shutdown(struct clock_event_device *evt)
 {
-	unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE;
+	writel(0, clkevt_base + TIMER_CTRL);
+}
 
+static int sp804_shutdown(struct clock_event_device *evt)
+{
+	timer_shutdown(evt);
+	return 0;
+}
+
+static int sp804_set_periodic(struct clock_event_device *evt)
+{
+	unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE |
+			     TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
+
+	timer_shutdown(evt);
+	writel(clkevt_reload, clkevt_base + TIMER_LOAD);
 	writel(ctrl, clkevt_base + TIMER_CTRL);
-
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		writel(clkevt_reload, clkevt_base + TIMER_LOAD);
-		ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
-		break;
-
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* period set, and timer enabled in 'next_event' hook */
-		ctrl |= TIMER_CTRL_ONESHOT;
-		break;
-
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	default:
-		break;
-	}
-
-	writel(ctrl, clkevt_base + TIMER_CTRL);
+	return 0;
 }
 
 static int sp804_set_next_event(unsigned long next,
 	struct clock_event_device *evt)
 {
-	unsigned long ctrl = readl(clkevt_base + TIMER_CTRL);
+	unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE |
+			     TIMER_CTRL_ONESHOT | TIMER_CTRL_ENABLE;
 
 	writel(next, clkevt_base + TIMER_LOAD);
-	writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
+	writel(ctrl, clkevt_base + TIMER_CTRL);
 
 	return 0;
 }
 
 static struct clock_event_device sp804_clockevent = {
-	.features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
-		CLOCK_EVT_FEAT_DYNIRQ,
-	.set_mode	= sp804_set_mode,
-	.set_next_event	= sp804_set_next_event,
-	.rating		= 300,
+	.features		= CLOCK_EVT_FEAT_PERIODIC |
+				  CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_DYNIRQ,
+	.set_state_shutdown	= sp804_shutdown,
+	.set_state_periodic	= sp804_set_periodic,
+	.set_state_oneshot	= sp804_shutdown,
+	.tick_resume		= sp804_shutdown,
+	.set_next_event		= sp804_set_next_event,
+	.rating			= 300,
 };
 
 static struct irqaction sp804_timer_irq = {
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index a97e8b5..f3dcb76 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -40,24 +40,25 @@
 	void __iomem *base;
 };
 
-static void stm32_clock_event_set_mode(enum clock_event_mode mode,
-				       struct clock_event_device *evtdev)
+static int stm32_clock_event_shutdown(struct clock_event_device *evtdev)
 {
 	struct stm32_clock_event_ddata *data =
 		container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
 	void *base = data->base;
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		writel_relaxed(data->periodic_top, base + TIM_ARR);
-		writel_relaxed(TIM_CR1_ARPE | TIM_CR1_CEN, base + TIM_CR1);
-		break;
+	writel_relaxed(0, base + TIM_CR1);
+	return 0;
+}
 
-	case CLOCK_EVT_MODE_ONESHOT:
-	default:
-		writel_relaxed(0, base + TIM_CR1);
-		break;
-	}
+static int stm32_clock_event_set_periodic(struct clock_event_device *evtdev)
+{
+	struct stm32_clock_event_ddata *data =
+		container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
+	void *base = data->base;
+
+	writel_relaxed(data->periodic_top, base + TIM_ARR);
+	writel_relaxed(TIM_CR1_ARPE | TIM_CR1_CEN, base + TIM_CR1);
+	return 0;
 }
 
 static int stm32_clock_event_set_next_event(unsigned long evt,
@@ -88,7 +89,10 @@
 	.evtdev = {
 		.name = "stm32 clockevent",
 		.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
-		.set_mode = stm32_clock_event_set_mode,
+		.set_state_shutdown = stm32_clock_event_shutdown,
+		.set_state_periodic = stm32_clock_event_set_periodic,
+		.set_state_oneshot = stm32_clock_event_shutdown,
+		.tick_resume = stm32_clock_event_shutdown,
 		.set_next_event = stm32_clock_event_set_next_event,
 		.rating = 200,
 	},
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 0ffb4ea..bca9573 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -103,27 +103,31 @@
 	       ce->timer.base + TIMER_CTL_REG(timer));
 }
 
-static void sun5i_clkevt_mode(enum clock_event_mode mode,
-			      struct clock_event_device *clkevt)
+static int sun5i_clkevt_shutdown(struct clock_event_device *clkevt)
 {
 	struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		sun5i_clkevt_time_stop(ce, 0);
-		sun5i_clkevt_time_setup(ce, 0, ce->timer.ticks_per_jiffy);
-		sun5i_clkevt_time_start(ce, 0, true);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		sun5i_clkevt_time_stop(ce, 0);
-		sun5i_clkevt_time_start(ce, 0, false);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	default:
-		sun5i_clkevt_time_stop(ce, 0);
-		break;
-	}
+	sun5i_clkevt_time_stop(ce, 0);
+	return 0;
+}
+
+static int sun5i_clkevt_set_oneshot(struct clock_event_device *clkevt)
+{
+	struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
+
+	sun5i_clkevt_time_stop(ce, 0);
+	sun5i_clkevt_time_start(ce, 0, false);
+	return 0;
+}
+
+static int sun5i_clkevt_set_periodic(struct clock_event_device *clkevt)
+{
+	struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
+
+	sun5i_clkevt_time_stop(ce, 0);
+	sun5i_clkevt_time_setup(ce, 0, ce->timer.ticks_per_jiffy);
+	sun5i_clkevt_time_start(ce, 0, true);
+	return 0;
 }
 
 static int sun5i_clkevt_next_event(unsigned long evt,
@@ -286,7 +290,10 @@
 	ce->clkevt.name = node->name;
 	ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
 	ce->clkevt.set_next_event = sun5i_clkevt_next_event;
-	ce->clkevt.set_mode = sun5i_clkevt_mode;
+	ce->clkevt.set_state_shutdown = sun5i_clkevt_shutdown;
+	ce->clkevt.set_state_periodic = sun5i_clkevt_set_periodic;
+	ce->clkevt.set_state_oneshot = sun5i_clkevt_set_oneshot;
+	ce->clkevt.tick_resume = sun5i_clkevt_shutdown;
 	ce->clkevt.rating = 340;
 	ce->clkevt.irq = irq;
 	ce->clkevt.cpumask = cpu_possible_mask;
diff --git a/drivers/clocksource/timer-u300.c b/drivers/clocksource/timer-u300.c
index 5dcf756..1744b24 100644
--- a/drivers/clocksource/timer-u300.c
+++ b/drivers/clocksource/timer-u300.c
@@ -187,85 +187,82 @@
 	unsigned ticks_per_jiffy;
 };
 
+static int u300_shutdown(struct clock_event_device *evt)
+{
+	/* Disable interrupts on GP1 */
+	writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+	       u300_timer_base + U300_TIMER_APP_GPT1IE);
+	/* Disable GP1 */
+	writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+	       u300_timer_base + U300_TIMER_APP_DGPT1);
+	return 0;
+}
+
 /*
- * The u300_set_mode() function is always called first, if we
- * have oneshot timer active, the oneshot scheduling function
+ * If we have oneshot timer active, the oneshot scheduling function
  * u300_set_next_event() is called immediately after.
  */
-static void u300_set_mode(enum clock_event_mode mode,
-			  struct clock_event_device *evt)
+static int u300_set_oneshot(struct clock_event_device *evt)
+{
+	/* Just return; here? */
+	/*
+	 * The actual event will be programmed by the next event hook,
+	 * so we just set a dummy value somewhere at the end of the
+	 * universe here.
+	 */
+	/* Disable interrupts on GPT1 */
+	writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+	       u300_timer_base + U300_TIMER_APP_GPT1IE);
+	/* Disable GP1 while we're reprogramming it. */
+	writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+	       u300_timer_base + U300_TIMER_APP_DGPT1);
+	/*
+	 * Expire far in the future, u300_set_next_event() will be
+	 * called soon...
+	 */
+	writel(0xFFFFFFFF, u300_timer_base + U300_TIMER_APP_GPT1TC);
+	/* We run one shot per tick here! */
+	writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT,
+	       u300_timer_base + U300_TIMER_APP_SGPT1M);
+	/* Enable interrupts for this timer */
+	writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
+	       u300_timer_base + U300_TIMER_APP_GPT1IE);
+	/* Enable timer */
+	writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
+	       u300_timer_base + U300_TIMER_APP_EGPT1);
+	return 0;
+}
+
+static int u300_set_periodic(struct clock_event_device *evt)
 {
 	struct u300_clockevent_data *cevdata =
 		container_of(evt, struct u300_clockevent_data, cevd);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		/* Disable interrupts on GPT1 */
-		writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
-		       u300_timer_base + U300_TIMER_APP_GPT1IE);
-		/* Disable GP1 while we're reprogramming it. */
-		writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
-		       u300_timer_base + U300_TIMER_APP_DGPT1);
-		/*
-		 * Set the periodic mode to a certain number of ticks per
-		 * jiffy.
-		 */
-		writel(cevdata->ticks_per_jiffy,
-		       u300_timer_base + U300_TIMER_APP_GPT1TC);
-		/*
-		 * Set continuous mode, so the timer keeps triggering
-		 * interrupts.
-		 */
-		writel(U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS,
-		       u300_timer_base + U300_TIMER_APP_SGPT1M);
-		/* Enable timer interrupts */
-		writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
-		       u300_timer_base + U300_TIMER_APP_GPT1IE);
-		/* Then enable the OS timer again */
-		writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
-		       u300_timer_base + U300_TIMER_APP_EGPT1);
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* Just break; here? */
-		/*
-		 * The actual event will be programmed by the next event hook,
-		 * so we just set a dummy value somewhere at the end of the
-		 * universe here.
-		 */
-		/* Disable interrupts on GPT1 */
-		writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
-		       u300_timer_base + U300_TIMER_APP_GPT1IE);
-		/* Disable GP1 while we're reprogramming it. */
-		writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
-		       u300_timer_base + U300_TIMER_APP_DGPT1);
-		/*
-		 * Expire far in the future, u300_set_next_event() will be
-		 * called soon...
-		 */
-		writel(0xFFFFFFFF, u300_timer_base + U300_TIMER_APP_GPT1TC);
-		/* We run one shot per tick here! */
-		writel(U300_TIMER_APP_SGPT1M_MODE_ONE_SHOT,
-		       u300_timer_base + U300_TIMER_APP_SGPT1M);
-		/* Enable interrupts for this timer */
-		writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
-		       u300_timer_base + U300_TIMER_APP_GPT1IE);
-		/* Enable timer */
-		writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
-		       u300_timer_base + U300_TIMER_APP_EGPT1);
-		break;
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		/* Disable interrupts on GP1 */
-		writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
-		       u300_timer_base + U300_TIMER_APP_GPT1IE);
-		/* Disable GP1 */
-		writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
-		       u300_timer_base + U300_TIMER_APP_DGPT1);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		/* Ignore this call */
-		break;
-	}
+	/* Disable interrupts on GPT1 */
+	writel(U300_TIMER_APP_GPT1IE_IRQ_DISABLE,
+	       u300_timer_base + U300_TIMER_APP_GPT1IE);
+	/* Disable GP1 while we're reprogramming it. */
+	writel(U300_TIMER_APP_DGPT1_TIMER_DISABLE,
+	       u300_timer_base + U300_TIMER_APP_DGPT1);
+	/*
+	 * Set the periodic mode to a certain number of ticks per
+	 * jiffy.
+	 */
+	writel(cevdata->ticks_per_jiffy,
+	       u300_timer_base + U300_TIMER_APP_GPT1TC);
+	/*
+	 * Set continuous mode, so the timer keeps triggering
+	 * interrupts.
+	 */
+	writel(U300_TIMER_APP_SGPT1M_MODE_CONTINUOUS,
+	       u300_timer_base + U300_TIMER_APP_SGPT1M);
+	/* Enable timer interrupts */
+	writel(U300_TIMER_APP_GPT1IE_IRQ_ENABLE,
+	       u300_timer_base + U300_TIMER_APP_GPT1IE);
+	/* Then enable the OS timer again */
+	writel(U300_TIMER_APP_EGPT1_TIMER_ENABLE,
+	       u300_timer_base + U300_TIMER_APP_EGPT1);
+	return 0;
 }
 
 /*
@@ -309,13 +306,15 @@
 static struct u300_clockevent_data u300_clockevent_data = {
 	/* Use general purpose timer 1 as clock event */
 	.cevd = {
-		.name		= "GPT1",
+		.name			= "GPT1",
 		/* Reasonably fast and accurate clock event */
-		.rating		= 300,
-		.features	= CLOCK_EVT_FEAT_PERIODIC |
-			CLOCK_EVT_FEAT_ONESHOT,
-		.set_next_event	= u300_set_next_event,
-		.set_mode	= u300_set_mode,
+		.rating			= 300,
+		.features		= CLOCK_EVT_FEAT_PERIODIC |
+					  CLOCK_EVT_FEAT_ONESHOT,
+		.set_next_event		= u300_set_next_event,
+		.set_state_shutdown	= u300_shutdown,
+		.set_state_periodic	= u300_set_periodic,
+		.set_state_oneshot	= u300_set_oneshot,
 	},
 };
 
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index b45ac62..f07ba99 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -86,20 +86,16 @@
 	return 0;
 }
 
-static void pit_set_mode(enum clock_event_mode mode,
-				struct clock_event_device *evt)
+static int pit_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		pit_set_next_event(cycle_per_jiffy, evt);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		pit_timer_disable();
-		break;
-	default:
-		break;
-	}
+	pit_timer_disable();
+	return 0;
+}
+
+static int pit_set_periodic(struct clock_event_device *evt)
+{
+	pit_set_next_event(cycle_per_jiffy, evt);
+	return 0;
 }
 
 static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
@@ -114,7 +110,7 @@
 	 * and start the counter again. So software need to disable the timer
 	 * to stop the counter loop in ONESHOT mode.
 	 */
-	if (likely(evt->mode == CLOCK_EVT_MODE_ONESHOT))
+	if (likely(clockevent_state_oneshot(evt)))
 		pit_timer_disable();
 
 	evt->event_handler(evt);
@@ -125,7 +121,8 @@
 static struct clock_event_device clockevent_pit = {
 	.name		= "VF pit timer",
 	.features	= CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
-	.set_mode	= pit_set_mode,
+	.set_state_shutdown = pit_shutdown,
+	.set_state_periodic = pit_set_periodic,
 	.set_next_event	= pit_set_next_event,
 	.rating		= 300,
 };
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
index 1098ed3..a92e94b 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/vt8500_timer.c
@@ -88,29 +88,20 @@
 	return 0;
 }
 
-static void vt8500_timer_set_mode(enum clock_event_mode mode,
-			      struct clock_event_device *evt)
+static int vt8500_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_RESUME:
-	case CLOCK_EVT_MODE_PERIODIC:
-		break;
-	case CLOCK_EVT_MODE_ONESHOT:
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		writel(readl(regbase + TIMER_CTRL_VAL) | 1,
-			regbase + TIMER_CTRL_VAL);
-		writel(0, regbase + TIMER_IER_VAL);
-		break;
-	}
+	writel(readl(regbase + TIMER_CTRL_VAL) | 1, regbase + TIMER_CTRL_VAL);
+	writel(0, regbase + TIMER_IER_VAL);
+	return 0;
 }
 
 static struct clock_event_device clockevent = {
-	.name           = "vt8500_timer",
-	.features       = CLOCK_EVT_FEAT_ONESHOT,
-	.rating         = 200,
-	.set_next_event = vt8500_timer_set_next_event,
-	.set_mode       = vt8500_timer_set_mode,
+	.name			= "vt8500_timer",
+	.features		= CLOCK_EVT_FEAT_ONESHOT,
+	.rating			= 200,
+	.set_next_event		= vt8500_timer_set_next_event,
+	.set_state_shutdown	= vt8500_shutdown,
+	.set_state_oneshot	= vt8500_shutdown,
 };
 
 static irqreturn_t vt8500_timer_interrupt(int irq, void *dev_id)
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index 7ce4421..ceaa613 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -76,32 +76,28 @@
 	return 0;
 }
 
-static void zevio_timer_set_mode(enum clock_event_mode mode,
-				 struct clock_event_device *dev)
+static int zevio_timer_shutdown(struct clock_event_device *dev)
 {
 	struct zevio_timer *timer = container_of(dev, struct zevio_timer,
 						 clkevt);
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_RESUME:
-	case CLOCK_EVT_MODE_ONESHOT:
-		/* Enable timer interrupts */
-		writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_MSK);
-		writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK);
-		break;
-	case CLOCK_EVT_MODE_SHUTDOWN:
-	case CLOCK_EVT_MODE_UNUSED:
-		/* Disable timer interrupts */
-		writel(0, timer->interrupt_regs + IO_INTR_MSK);
-		writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK);
-		/* Stop timer */
-		writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL);
-		break;
-	case CLOCK_EVT_MODE_PERIODIC:
-	default:
-		/* Unsupported */
-		break;
-	}
+	/* Disable timer interrupts */
+	writel(0, timer->interrupt_regs + IO_INTR_MSK);
+	writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK);
+	/* Stop timer */
+	writel(CNTL_STOP_TIMER, timer->timer1 + IO_CONTROL);
+	return 0;
+}
+
+static int zevio_timer_set_oneshot(struct clock_event_device *dev)
+{
+	struct zevio_timer *timer = container_of(dev, struct zevio_timer,
+						 clkevt);
+
+	/* Enable timer interrupts */
+	writel(TIMER_INTR_MSK, timer->interrupt_regs + IO_INTR_MSK);
+	writel(TIMER_INTR_ALL, timer->interrupt_regs + IO_INTR_ACK);
+	return 0;
 }
 
 static irqreturn_t zevio_timer_interrupt(int irq, void *dev_id)
@@ -162,7 +158,9 @@
 	if (timer->interrupt_regs && irqnr) {
 		timer->clkevt.name		= timer->clockevent_name;
 		timer->clkevt.set_next_event	= zevio_timer_set_event;
-		timer->clkevt.set_mode		= zevio_timer_set_mode;
+		timer->clkevt.set_state_shutdown = zevio_timer_shutdown;
+		timer->clkevt.set_state_oneshot = zevio_timer_set_oneshot;
+		timer->clkevt.tick_resume	= zevio_timer_set_oneshot;
 		timer->clkevt.rating		= 200;
 		timer->clkevt.cpumask		= cpu_all_mask;
 		timer->clkevt.features		= CLOCK_EVT_FEAT_ONESHOT;
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index cc8a71c..77aa34e 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -130,6 +130,13 @@
 	  This adds the CPUFreq driver for Marvell Kirkwood
 	  SoCs.
 
+config ARM_MT8173_CPUFREQ
+	bool "Mediatek MT8173 CPUFreq support"
+	depends on ARCH_MEDIATEK && REGULATOR
+	select PM_OPP
+	help
+	  This adds the CPUFreq driver support for Mediatek MT8173 SoC.
+
 config ARM_OMAP2PLUS_CPUFREQ
 	bool "TI OMAP2+"
 	depends on ARCH_OMAP2PLUS
@@ -247,12 +254,19 @@
 	help
 	  This adds the CPUFreq driver support for SPEAr SOCs.
 
-config ARM_TEGRA_CPUFREQ
-	bool "TEGRA CPUFreq support"
+config ARM_TEGRA20_CPUFREQ
+	bool "Tegra20 CPUFreq support"
 	depends on ARCH_TEGRA
 	default y
 	help
-	  This adds the CPUFreq driver support for TEGRA SOCs.
+	  This adds the CPUFreq driver support for Tegra20 SOCs.
+
+config ARM_TEGRA124_CPUFREQ
+	tristate "Tegra124 CPUFreq support"
+	depends on ARCH_TEGRA && CPUFREQ_DT
+	default y
+	help
+	  This adds the CPUFreq driver support for Tegra124 SOCs.
 
 config ARM_PXA2xx_CPUFREQ
 	tristate "Intel PXA2xx CPUfreq driver"
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 2169bf7..60a57ca 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -62,6 +62,7 @@
 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ)		+= imx6q-cpufreq.o
 obj-$(CONFIG_ARM_INTEGRATOR)		+= integrator-cpufreq.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ)	+= kirkwood-cpufreq.o
+obj-$(CONFIG_ARM_MT8173_CPUFREQ)	+= mt8173-cpufreq.o
 obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ)	+= omap-cpufreq.o
 obj-$(CONFIG_ARM_PXA2xx_CPUFREQ)	+= pxa2xx-cpufreq.o
 obj-$(CONFIG_PXA3xx)			+= pxa3xx-cpufreq.o
@@ -76,7 +77,8 @@
 obj-$(CONFIG_ARM_SA1100_CPUFREQ)	+= sa1100-cpufreq.o
 obj-$(CONFIG_ARM_SA1110_CPUFREQ)	+= sa1110-cpufreq.o
 obj-$(CONFIG_ARM_SPEAR_CPUFREQ)		+= spear-cpufreq.o
-obj-$(CONFIG_ARM_TEGRA_CPUFREQ)		+= tegra-cpufreq.o
+obj-$(CONFIG_ARM_TEGRA20_CPUFREQ)	+= tegra20-cpufreq.o
+obj-$(CONFIG_ARM_TEGRA124_CPUFREQ)	+= tegra124-cpufreq.o
 obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ)	+= vexpress-spc-cpufreq.o
 
 ##################################################################################
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 0136dfc..15b921a 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -65,18 +65,21 @@
 #define MSR_K7_HWCR_CPB_DIS	(1ULL << 25)
 
 struct acpi_cpufreq_data {
-	struct acpi_processor_performance *acpi_data;
 	struct cpufreq_frequency_table *freq_table;
 	unsigned int resume;
 	unsigned int cpu_feature;
+	unsigned int acpi_perf_cpu;
 	cpumask_var_t freqdomain_cpus;
 };
 
-static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
-
 /* acpi_perf_data is a pointer to percpu data. */
 static struct acpi_processor_performance __percpu *acpi_perf_data;
 
+static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data)
+{
+	return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu);
+}
+
 static struct cpufreq_driver acpi_cpufreq_driver;
 
 static unsigned int acpi_pstate_strict;
@@ -144,7 +147,7 @@
 
 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
 {
-	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+	struct acpi_cpufreq_data *data = policy->driver_data;
 
 	return cpufreq_show_cpus(data->freqdomain_cpus, buf);
 }
@@ -202,7 +205,7 @@
 	struct acpi_processor_performance *perf;
 	int i;
 
-	perf = data->acpi_data;
+	perf = to_perf_data(data);
 
 	for (i = 0; i < perf->state_count; i++) {
 		if (value == perf->states[i].status)
@@ -221,7 +224,7 @@
 	else
 		msr &= INTEL_MSR_RANGE;
 
-	perf = data->acpi_data;
+	perf = to_perf_data(data);
 
 	cpufreq_for_each_entry(pos, data->freq_table)
 		if (msr == perf->states[pos->driver_data].status)
@@ -327,7 +330,8 @@
 	put_cpu();
 }
 
-static u32 get_cur_val(const struct cpumask *mask)
+static u32
+get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
 {
 	struct acpi_processor_performance *perf;
 	struct drv_cmd cmd;
@@ -335,7 +339,7 @@
 	if (unlikely(cpumask_empty(mask)))
 		return 0;
 
-	switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
+	switch (data->cpu_feature) {
 	case SYSTEM_INTEL_MSR_CAPABLE:
 		cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
 		cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
@@ -346,7 +350,7 @@
 		break;
 	case SYSTEM_IO_CAPABLE:
 		cmd.type = SYSTEM_IO_CAPABLE;
-		perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
+		perf = to_perf_data(data);
 		cmd.addr.io.port = perf->control_register.address;
 		cmd.addr.io.bit_width = perf->control_register.bit_width;
 		break;
@@ -364,19 +368,24 @@
 
 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
 {
-	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
+	struct acpi_cpufreq_data *data;
+	struct cpufreq_policy *policy;
 	unsigned int freq;
 	unsigned int cached_freq;
 
 	pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
 
-	if (unlikely(data == NULL ||
-		     data->acpi_data == NULL || data->freq_table == NULL)) {
+	policy = cpufreq_cpu_get(cpu);
+	if (unlikely(!policy))
 		return 0;
-	}
 
-	cached_freq = data->freq_table[data->acpi_data->state].frequency;
-	freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
+	data = policy->driver_data;
+	cpufreq_cpu_put(policy);
+	if (unlikely(!data || !data->freq_table))
+		return 0;
+
+	cached_freq = data->freq_table[to_perf_data(data)->state].frequency;
+	freq = extract_freq(get_cur_val(cpumask_of(cpu), data), data);
 	if (freq != cached_freq) {
 		/*
 		 * The dreaded BIOS frequency change behind our back.
@@ -397,7 +406,7 @@
 	unsigned int i;
 
 	for (i = 0; i < 100; i++) {
-		cur_freq = extract_freq(get_cur_val(mask), data);
+		cur_freq = extract_freq(get_cur_val(mask, data), data);
 		if (cur_freq == freq)
 			return 1;
 		udelay(10);
@@ -408,18 +417,17 @@
 static int acpi_cpufreq_target(struct cpufreq_policy *policy,
 			       unsigned int index)
 {
-	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+	struct acpi_cpufreq_data *data = policy->driver_data;
 	struct acpi_processor_performance *perf;
 	struct drv_cmd cmd;
 	unsigned int next_perf_state = 0; /* Index into perf table */
 	int result = 0;
 
-	if (unlikely(data == NULL ||
-	     data->acpi_data == NULL || data->freq_table == NULL)) {
+	if (unlikely(data == NULL || data->freq_table == NULL)) {
 		return -ENODEV;
 	}
 
-	perf = data->acpi_data;
+	perf = to_perf_data(data);
 	next_perf_state = data->freq_table[index].driver_data;
 	if (perf->state == next_perf_state) {
 		if (unlikely(data->resume)) {
@@ -482,8 +490,9 @@
 static unsigned long
 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
 {
-	struct acpi_processor_performance *perf = data->acpi_data;
+	struct acpi_processor_performance *perf;
 
+	perf = to_perf_data(data);
 	if (cpu_khz) {
 		/* search the closest match to cpu_khz */
 		unsigned int i;
@@ -672,17 +681,17 @@
 		goto err_free;
 	}
 
-	data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
-	per_cpu(acfreq_data, cpu) = data;
+	perf = per_cpu_ptr(acpi_perf_data, cpu);
+	data->acpi_perf_cpu = cpu;
+	policy->driver_data = data;
 
 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
 		acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
 
-	result = acpi_processor_register_performance(data->acpi_data, cpu);
+	result = acpi_processor_register_performance(perf, cpu);
 	if (result)
 		goto err_free_mask;
 
-	perf = data->acpi_data;
 	policy->shared_type = perf->shared_type;
 
 	/*
@@ -838,26 +847,25 @@
 err_freqfree:
 	kfree(data->freq_table);
 err_unreg:
-	acpi_processor_unregister_performance(perf, cpu);
+	acpi_processor_unregister_performance(cpu);
 err_free_mask:
 	free_cpumask_var(data->freqdomain_cpus);
 err_free:
 	kfree(data);
-	per_cpu(acfreq_data, cpu) = NULL;
+	policy->driver_data = NULL;
 
 	return result;
 }
 
 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 {
-	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+	struct acpi_cpufreq_data *data = policy->driver_data;
 
 	pr_debug("acpi_cpufreq_cpu_exit\n");
 
 	if (data) {
-		per_cpu(acfreq_data, policy->cpu) = NULL;
-		acpi_processor_unregister_performance(data->acpi_data,
-						      policy->cpu);
+		policy->driver_data = NULL;
+		acpi_processor_unregister_performance(data->acpi_perf_cpu);
 		free_cpumask_var(data->freqdomain_cpus);
 		kfree(data->freq_table);
 		kfree(data);
@@ -868,7 +876,7 @@
 
 static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
 {
-	struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
+	struct acpi_cpufreq_data *data = policy->driver_data;
 
 	pr_debug("acpi_cpufreq_resume\n");
 
@@ -880,7 +888,9 @@
 static struct freq_attr *acpi_cpufreq_attr[] = {
 	&cpufreq_freq_attr_scaling_available_freqs,
 	&freqdomain_cpus,
-	NULL,	/* this is a placeholder for cpb, do not remove */
+#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
+	&cpb,
+#endif
 	NULL,
 };
 
@@ -953,17 +963,16 @@
 	 * only if configured. This is considered legacy code, which
 	 * will probably be removed at some point in the future.
 	 */
-	if (check_amd_hwpstate_cpu(0)) {
-		struct freq_attr **iter;
+	if (!check_amd_hwpstate_cpu(0)) {
+		struct freq_attr **attr;
 
-		pr_debug("adding sysfs entry for cpb\n");
+		pr_debug("CPB unsupported, do not expose it\n");
 
-		for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
-			;
-
-		/* make sure there is a terminator behind it */
-		if (iter[1] == NULL)
-			*iter = &cpb;
+		for (attr = acpi_cpufreq_attr; *attr; attr++)
+			if (*attr == &cpb) {
+				*attr = NULL;
+				break;
+			}
 	}
 #endif
 	acpi_cpufreq_boost_init();
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 528a82bf..c3583cd 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -36,6 +36,12 @@
 	unsigned int voltage_tolerance; /* in percentage */
 };
 
+static struct freq_attr *cpufreq_dt_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	NULL,   /* Extra space for boost-attr if required */
+	NULL,
+};
+
 static int set_target(struct cpufreq_policy *policy, unsigned int index)
 {
 	struct dev_pm_opp *opp;
@@ -184,7 +190,6 @@
 
 static int cpufreq_init(struct cpufreq_policy *policy)
 {
-	struct cpufreq_dt_platform_data *pd;
 	struct cpufreq_frequency_table *freq_table;
 	struct device_node *np;
 	struct private_data *priv;
@@ -193,6 +198,7 @@
 	struct clk *cpu_clk;
 	unsigned long min_uV = ~0, max_uV = 0;
 	unsigned int transition_latency;
+	bool need_update = false;
 	int ret;
 
 	ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
@@ -208,8 +214,47 @@
 		goto out_put_reg_clk;
 	}
 
-	/* OPPs might be populated at runtime, don't check for error here */
-	of_init_opp_table(cpu_dev);
+	/* Get OPP-sharing information from "operating-points-v2" bindings */
+	ret = of_get_cpus_sharing_opps(cpu_dev, policy->cpus);
+	if (ret) {
+		/*
+		 * operating-points-v2 not supported, fallback to old method of
+		 * finding shared-OPPs for backward compatibility.
+		 */
+		if (ret == -ENOENT)
+			need_update = true;
+		else
+			goto out_node_put;
+	}
+
+	/*
+	 * Initialize OPP tables for all policy->cpus. They will be shared by
+	 * all CPUs which have marked their CPUs shared with OPP bindings.
+	 *
+	 * For platforms not using operating-points-v2 bindings, we do this
+	 * before updating policy->cpus. Otherwise, we will end up creating
+	 * duplicate OPPs for policy->cpus.
+	 *
+	 * OPPs might be populated at runtime, don't check for error here
+	 */
+	of_cpumask_init_opp_table(policy->cpus);
+
+	if (need_update) {
+		struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
+
+		if (!pd || !pd->independent_clocks)
+			cpumask_setall(policy->cpus);
+
+		/*
+		 * OPP tables are initialized only for policy->cpu, do it for
+		 * others as well.
+		 */
+		set_cpus_sharing_opps(cpu_dev, policy->cpus);
+
+		of_property_read_u32(np, "clock-latency", &transition_latency);
+	} else {
+		transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
+	}
 
 	/*
 	 * But we need OPP table to function so if it is not there let's
@@ -230,7 +275,7 @@
 
 	of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
 
-	if (of_property_read_u32(np, "clock-latency", &transition_latency))
+	if (!transition_latency)
 		transition_latency = CPUFREQ_ETERNAL;
 
 	if (!IS_ERR(cpu_reg)) {
@@ -291,11 +336,16 @@
 		goto out_free_cpufreq_table;
 	}
 
-	policy->cpuinfo.transition_latency = transition_latency;
+	/* Support turbo/boost mode */
+	if (policy_has_boost_freq(policy)) {
+		/* This gets disabled by core on driver unregister */
+		ret = cpufreq_enable_boost_support();
+		if (ret)
+			goto out_free_cpufreq_table;
+		cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
+	}
 
-	pd = cpufreq_get_driver_data();
-	if (!pd || !pd->independent_clocks)
-		cpumask_setall(policy->cpus);
+	policy->cpuinfo.transition_latency = transition_latency;
 
 	of_node_put(np);
 
@@ -306,7 +356,8 @@
 out_free_priv:
 	kfree(priv);
 out_free_opp:
-	of_free_opp_table(cpu_dev);
+	of_cpumask_free_opp_table(policy->cpus);
+out_node_put:
 	of_node_put(np);
 out_put_reg_clk:
 	clk_put(cpu_clk);
@@ -322,7 +373,7 @@
 
 	cpufreq_cooling_unregister(priv->cdev);
 	dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
-	of_free_opp_table(priv->cpu_dev);
+	of_cpumask_free_opp_table(policy->related_cpus);
 	clk_put(policy->clk);
 	if (!IS_ERR(priv->cpu_reg))
 		regulator_put(priv->cpu_reg);
@@ -367,7 +418,7 @@
 	.exit = cpufreq_exit,
 	.ready = cpufreq_ready,
 	.name = "cpufreq-dt",
-	.attr = cpufreq_generic_attr,
+	.attr = cpufreq_dt_attr,
 };
 
 static int dt_cpufreq_probe(struct platform_device *pdev)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7a3c30c..b3d9368 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -112,12 +112,6 @@
 	return cpufreq_driver->target_index || cpufreq_driver->target;
 }
 
-/*
- * rwsem to guarantee that cpufreq driver module doesn't unload during critical
- * sections
- */
-static DECLARE_RWSEM(cpufreq_rwsem);
-
 /* internal prototypes */
 static int __cpufreq_governor(struct cpufreq_policy *policy,
 		unsigned int event);
@@ -277,10 +271,6 @@
  * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
  * freed as that depends on the kobj count.
  *
- * It also takes a read-lock of 'cpufreq_rwsem' and doesn't put it back if a
- * valid policy is found. This is done to make sure the driver doesn't get
- * unregistered while the policy is being used.
- *
  * Return: A valid policy on success, otherwise NULL on failure.
  */
 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
@@ -291,9 +281,6 @@
 	if (WARN_ON(cpu >= nr_cpu_ids))
 		return NULL;
 
-	if (!down_read_trylock(&cpufreq_rwsem))
-		return NULL;
-
 	/* get the cpufreq driver */
 	read_lock_irqsave(&cpufreq_driver_lock, flags);
 
@@ -306,9 +293,6 @@
 
 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-	if (!policy)
-		up_read(&cpufreq_rwsem);
-
 	return policy;
 }
 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
@@ -320,13 +304,10 @@
  *
  * This decrements the kobject reference count incremented earlier by calling
  * cpufreq_cpu_get().
- *
- * It also drops the read-lock of 'cpufreq_rwsem' taken at cpufreq_cpu_get().
  */
 void cpufreq_cpu_put(struct cpufreq_policy *policy)
 {
 	kobject_put(&policy->kobj);
-	up_read(&cpufreq_rwsem);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
 
@@ -539,9 +520,6 @@
 {
 	int err = -EINVAL;
 
-	if (!cpufreq_driver)
-		goto out;
-
 	if (cpufreq_driver->setpolicy) {
 		if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
 			*policy = CPUFREQ_POLICY_PERFORMANCE;
@@ -576,7 +554,6 @@
 
 		mutex_unlock(&cpufreq_governor_mutex);
 	}
-out:
 	return err;
 }
 
@@ -625,9 +602,7 @@
 	int ret, temp;							\
 	struct cpufreq_policy new_policy;				\
 									\
-	ret = cpufreq_get_policy(&new_policy, policy->cpu);		\
-	if (ret)							\
-		return -EINVAL;						\
+	memcpy(&new_policy, policy, sizeof(*policy));			\
 									\
 	ret = sscanf(buf, "%u", &new_policy.object);			\
 	if (ret != 1)							\
@@ -681,9 +656,7 @@
 	char	str_governor[16];
 	struct cpufreq_policy new_policy;
 
-	ret = cpufreq_get_policy(&new_policy, policy->cpu);
-	if (ret)
-		return ret;
+	memcpy(&new_policy, policy, sizeof(*policy));
 
 	ret = sscanf(buf, "%15s", str_governor);
 	if (ret != 1)
@@ -694,14 +667,7 @@
 		return -EINVAL;
 
 	ret = cpufreq_set_policy(policy, &new_policy);
-
-	policy->user_policy.policy = policy->policy;
-	policy->user_policy.governor = policy->governor;
-
-	if (ret)
-		return ret;
-	else
-		return count;
+	return ret ? ret : count;
 }
 
 /**
@@ -851,9 +817,6 @@
 	struct freq_attr *fattr = to_attr(attr);
 	ssize_t ret;
 
-	if (!down_read_trylock(&cpufreq_rwsem))
-		return -EINVAL;
-
 	down_read(&policy->rwsem);
 
 	if (fattr->show)
@@ -862,7 +825,6 @@
 		ret = -EIO;
 
 	up_read(&policy->rwsem);
-	up_read(&cpufreq_rwsem);
 
 	return ret;
 }
@@ -879,9 +841,6 @@
 	if (!cpu_online(policy->cpu))
 		goto unlock;
 
-	if (!down_read_trylock(&cpufreq_rwsem))
-		goto unlock;
-
 	down_write(&policy->rwsem);
 
 	/* Updating inactive policies is invalid, so avoid doing that. */
@@ -897,8 +856,6 @@
 
 unlock_policy_rwsem:
 	up_write(&policy->rwsem);
-
-	up_read(&cpufreq_rwsem);
 unlock:
 	put_online_cpus();
 
@@ -1027,8 +984,7 @@
 	}
 }
 
-static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
-				     struct device *dev)
+static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
 {
 	struct freq_attr **drv_attr;
 	int ret = 0;
@@ -1060,11 +1016,10 @@
 	return cpufreq_add_dev_symlink(policy);
 }
 
-static void cpufreq_init_policy(struct cpufreq_policy *policy)
+static int cpufreq_init_policy(struct cpufreq_policy *policy)
 {
 	struct cpufreq_governor *gov = NULL;
 	struct cpufreq_policy new_policy;
-	int ret = 0;
 
 	memcpy(&new_policy, policy, sizeof(*policy));
 
@@ -1083,16 +1038,10 @@
 		cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
 
 	/* set default policy */
-	ret = cpufreq_set_policy(policy, &new_policy);
-	if (ret) {
-		pr_debug("setting policy failed\n");
-		if (cpufreq_driver->exit)
-			cpufreq_driver->exit(policy);
-	}
+	return cpufreq_set_policy(policy, &new_policy);
 }
 
-static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
-				  unsigned int cpu, struct device *dev)
+static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
 {
 	int ret = 0;
 
@@ -1126,33 +1075,15 @@
 	return 0;
 }
 
-static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
+static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
 {
-	struct cpufreq_policy *policy;
-	unsigned long flags;
-
-	read_lock_irqsave(&cpufreq_driver_lock, flags);
-	policy = per_cpu(cpufreq_cpu_data, cpu);
-	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
-	if (likely(policy)) {
-		/* Policy should be inactive here */
-		WARN_ON(!policy_is_inactive(policy));
-
-		down_write(&policy->rwsem);
-		policy->cpu = cpu;
-		policy->governor = NULL;
-		up_write(&policy->rwsem);
-	}
-
-	return policy;
-}
-
-static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
-{
+	struct device *dev = get_cpu_device(cpu);
 	struct cpufreq_policy *policy;
 	int ret;
 
+	if (WARN_ON(!dev))
+		return NULL;
+
 	policy = kzalloc(sizeof(*policy), GFP_KERNEL);
 	if (!policy)
 		return NULL;
@@ -1180,10 +1111,10 @@
 	init_completion(&policy->kobj_unregister);
 	INIT_WORK(&policy->update, handle_update);
 
-	policy->cpu = dev->id;
+	policy->cpu = cpu;
 
 	/* Set this once on allocation */
-	policy->kobj_cpu = dev->id;
+	policy->kobj_cpu = cpu;
 
 	return policy;
 
@@ -1245,59 +1176,34 @@
 	kfree(policy);
 }
 
-/**
- * cpufreq_add_dev - add a CPU device
- *
- * Adds the cpufreq interface for a CPU device.
- *
- * The Oracle says: try running cpufreq registration/unregistration concurrently
- * with with cpu hotplugging and all hell will break loose. Tried to clean this
- * mess up, but more thorough testing is needed. - Mathieu
- */
-static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+static int cpufreq_online(unsigned int cpu)
 {
-	unsigned int j, cpu = dev->id;
-	int ret = -ENOMEM;
 	struct cpufreq_policy *policy;
+	bool new_policy;
 	unsigned long flags;
-	bool recover_policy = !sif;
+	unsigned int j;
+	int ret;
 
-	pr_debug("adding CPU %u\n", cpu);
-
-	if (cpu_is_offline(cpu)) {
-		/*
-		 * Only possible if we are here from the subsys_interface add
-		 * callback.  A hotplug notifier will follow and we will handle
-		 * it as CPU online then.  For now, just create the sysfs link,
-		 * unless there is no policy or the link is already present.
-		 */
-		policy = per_cpu(cpufreq_cpu_data, cpu);
-		return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
-			? add_cpu_dev_symlink(policy, cpu) : 0;
-	}
-
-	if (!down_read_trylock(&cpufreq_rwsem))
-		return 0;
+	pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
 
 	/* Check if this CPU already has a policy to manage it */
 	policy = per_cpu(cpufreq_cpu_data, cpu);
-	if (policy && !policy_is_inactive(policy)) {
+	if (policy) {
 		WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
-		ret = cpufreq_add_policy_cpu(policy, cpu, dev);
-		up_read(&cpufreq_rwsem);
-		return ret;
-	}
+		if (!policy_is_inactive(policy))
+			return cpufreq_add_policy_cpu(policy, cpu);
 
-	/*
-	 * Restore the saved policy when doing light-weight init and fall back
-	 * to the full init if that fails.
-	 */
-	policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
-	if (!policy) {
-		recover_policy = false;
-		policy = cpufreq_policy_alloc(dev);
+		/* This is the only online CPU for the policy.  Start over. */
+		new_policy = false;
+		down_write(&policy->rwsem);
+		policy->cpu = cpu;
+		policy->governor = NULL;
+		up_write(&policy->rwsem);
+	} else {
+		new_policy = true;
+		policy = cpufreq_policy_alloc(cpu);
 		if (!policy)
-			goto nomem_out;
+			return -ENOMEM;
 	}
 
 	cpumask_copy(policy->cpus, cpumask_of(cpu));
@@ -1308,17 +1214,17 @@
 	ret = cpufreq_driver->init(policy);
 	if (ret) {
 		pr_debug("initialization failed\n");
-		goto err_set_policy_cpu;
+		goto out_free_policy;
 	}
 
 	down_write(&policy->rwsem);
 
-	/* related cpus should atleast have policy->cpus */
-	cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
-
-	/* Remember which CPUs have been present at the policy creation time. */
-	if (!recover_policy)
+	if (new_policy) {
+		/* related_cpus should at least include policy->cpus. */
+		cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+		/* Remember CPUs present at the policy creation time. */
 		cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
+	}
 
 	/*
 	 * affected cpus must always be the one, which are online. We aren't
@@ -1326,7 +1232,7 @@
 	 */
 	cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
 
-	if (!recover_policy) {
+	if (new_policy) {
 		policy->user_policy.min = policy->min;
 		policy->user_policy.max = policy->max;
 
@@ -1340,7 +1246,7 @@
 		policy->cur = cpufreq_driver->get(policy->cpu);
 		if (!policy->cur) {
 			pr_err("%s: ->get() failed\n", __func__);
-			goto err_get_freq;
+			goto out_exit_policy;
 		}
 	}
 
@@ -1387,10 +1293,10 @@
 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
 				     CPUFREQ_START, policy);
 
-	if (!recover_policy) {
-		ret = cpufreq_add_dev_interface(policy, dev);
+	if (new_policy) {
+		ret = cpufreq_add_dev_interface(policy);
 		if (ret)
-			goto err_out_unregister;
+			goto out_exit_policy;
 		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
 				CPUFREQ_CREATE_POLICY, policy);
 
@@ -1399,18 +1305,19 @@
 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 	}
 
-	cpufreq_init_policy(policy);
-
-	if (!recover_policy) {
-		policy->user_policy.policy = policy->policy;
-		policy->user_policy.governor = policy->governor;
+	ret = cpufreq_init_policy(policy);
+	if (ret) {
+		pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
+		       __func__, cpu, ret);
+		/* cpufreq_policy_free() will notify based on this */
+		new_policy = false;
+		goto out_exit_policy;
 	}
+
 	up_write(&policy->rwsem);
 
 	kobject_uevent(&policy->kobj, KOBJ_ADD);
 
-	up_read(&cpufreq_rwsem);
-
 	/* Callback for handling stuff after policy is ready */
 	if (cpufreq_driver->ready)
 		cpufreq_driver->ready(policy);
@@ -1419,24 +1326,47 @@
 
 	return 0;
 
-err_out_unregister:
-err_get_freq:
+out_exit_policy:
 	up_write(&policy->rwsem);
 
 	if (cpufreq_driver->exit)
 		cpufreq_driver->exit(policy);
-err_set_policy_cpu:
-	cpufreq_policy_free(policy, recover_policy);
-nomem_out:
-	up_read(&cpufreq_rwsem);
+out_free_policy:
+	cpufreq_policy_free(policy, !new_policy);
+	return ret;
+}
+
+/**
+ * cpufreq_add_dev - the cpufreq interface for a CPU device.
+ * @dev: CPU device.
+ * @sif: Subsystem interface structure pointer (not used)
+ */
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+{
+	unsigned cpu = dev->id;
+	int ret;
+
+	dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
+
+	if (cpu_online(cpu)) {
+		ret = cpufreq_online(cpu);
+	} else {
+		/*
+		 * A hotplug notifier will follow and we will handle it as CPU
+		 * online then.  For now, just create the sysfs link, unless
+		 * there is no policy or the link is already present.
+		 */
+		struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+
+		ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
+			? add_cpu_dev_symlink(policy, cpu) : 0;
+	}
 
 	return ret;
 }
 
-static int __cpufreq_remove_dev_prepare(struct device *dev)
+static void cpufreq_offline_prepare(unsigned int cpu)
 {
-	unsigned int cpu = dev->id;
-	int ret = 0;
 	struct cpufreq_policy *policy;
 
 	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
@@ -1444,11 +1374,11 @@
 	policy = cpufreq_cpu_get_raw(cpu);
 	if (!policy) {
 		pr_debug("%s: No cpu_data found\n", __func__);
-		return -EINVAL;
+		return;
 	}
 
 	if (has_target()) {
-		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+		int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
 		if (ret)
 			pr_err("%s: Failed to stop governor\n", __func__);
 	}
@@ -1469,7 +1399,7 @@
 	/* Start governor again for active policy */
 	if (!policy_is_inactive(policy)) {
 		if (has_target()) {
-			ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+			int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
 			if (!ret)
 				ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
 
@@ -1479,28 +1409,24 @@
 	} else if (cpufreq_driver->stop_cpu) {
 		cpufreq_driver->stop_cpu(policy);
 	}
-
-	return ret;
 }
 
-static int __cpufreq_remove_dev_finish(struct device *dev)
+static void cpufreq_offline_finish(unsigned int cpu)
 {
-	unsigned int cpu = dev->id;
-	int ret;
 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
 
 	if (!policy) {
 		pr_debug("%s: No cpu_data found\n", __func__);
-		return -EINVAL;
+		return;
 	}
 
 	/* Only proceed for inactive policies */
 	if (!policy_is_inactive(policy))
-		return 0;
+		return;
 
 	/* If cpu is last user of policy, free policy */
 	if (has_target()) {
-		ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+		int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
 		if (ret)
 			pr_err("%s: Failed to exit governor\n", __func__);
 	}
@@ -1512,8 +1438,6 @@
 	 */
 	if (cpufreq_driver->exit)
 		cpufreq_driver->exit(policy);
-
-	return 0;
 }
 
 /**
@@ -1521,24 +1445,24 @@
  *
  * Removes the cpufreq interface for a CPU device.
  */
-static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
+static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
 {
 	unsigned int cpu = dev->id;
 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
 
 	if (!policy)
-		return 0;
+		return;
 
 	if (cpu_online(cpu)) {
-		__cpufreq_remove_dev_prepare(dev);
-		__cpufreq_remove_dev_finish(dev);
+		cpufreq_offline_prepare(cpu);
+		cpufreq_offline_finish(cpu);
 	}
 
 	cpumask_clear_cpu(cpu, policy->real_cpus);
 
 	if (cpumask_empty(policy->real_cpus)) {
 		cpufreq_policy_free(policy, true);
-		return 0;
+		return;
 	}
 
 	if (cpu != policy->kobj_cpu) {
@@ -1557,8 +1481,6 @@
 		policy->kobj_cpu = new_cpu;
 		WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
 	}
-
-	return 0;
 }
 
 static void handle_update(struct work_struct *work)
@@ -2247,7 +2169,11 @@
 
 	memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
 
-	if (new_policy->min > policy->max || new_policy->max < policy->min)
+	/*
+	* This check works well when we store new min/max freq attributes,
+	* because new_policy is a copy of policy with one field updated.
+	*/
+	if (new_policy->min > new_policy->max)
 		return -EINVAL;
 
 	/* verify the cpu speed can be set within this limit */
@@ -2259,10 +2185,6 @@
 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
 			CPUFREQ_ADJUST, new_policy);
 
-	/* adjust if necessary - hardware incompatibility*/
-	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
-			CPUFREQ_INCOMPATIBLE, new_policy);
-
 	/*
 	 * verify the cpu speed can be set within this limit, which might be
 	 * different to the first one
@@ -2296,16 +2218,31 @@
 	old_gov = policy->governor;
 	/* end old governor */
 	if (old_gov) {
-		__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+		if (ret) {
+			/* This can happen due to race with other operations */
+			pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
+				 __func__, old_gov->name, ret);
+			return ret;
+		}
+
 		up_write(&policy->rwsem);
-		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+		ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
 		down_write(&policy->rwsem);
+
+		if (ret) {
+			pr_err("%s: Failed to Exit Governor: %s (%d)\n",
+			       __func__, old_gov->name, ret);
+			return ret;
+		}
 	}
 
 	/* start new governor */
 	policy->governor = new_policy->governor;
-	if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
-		if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
+	ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
+	if (!ret) {
+		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
+		if (!ret)
 			goto out;
 
 		up_write(&policy->rwsem);
@@ -2317,11 +2254,13 @@
 	pr_debug("starting governor %s failed\n", policy->governor->name);
 	if (old_gov) {
 		policy->governor = old_gov;
-		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
-		__cpufreq_governor(policy, CPUFREQ_GOV_START);
+		if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
+			policy->governor = NULL;
+		else
+			__cpufreq_governor(policy, CPUFREQ_GOV_START);
 	}
 
-	return -EINVAL;
+	return ret;
 
  out:
 	pr_debug("governor: change or update limits\n");
@@ -2350,8 +2289,6 @@
 	memcpy(&new_policy, policy, sizeof(*policy));
 	new_policy.min = policy->user_policy.min;
 	new_policy.max = policy->user_policy.max;
-	new_policy.policy = policy->user_policy.policy;
-	new_policy.governor = policy->user_policy.governor;
 
 	/*
 	 * BIOS might change freq behind our back
@@ -2387,27 +2324,23 @@
 					unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned long)hcpu;
-	struct device *dev;
 
-	dev = get_cpu_device(cpu);
-	if (dev) {
-		switch (action & ~CPU_TASKS_FROZEN) {
-		case CPU_ONLINE:
-			cpufreq_add_dev(dev, NULL);
-			break;
+	switch (action & ~CPU_TASKS_FROZEN) {
+	case CPU_ONLINE:
+		cpufreq_online(cpu);
+		break;
 
-		case CPU_DOWN_PREPARE:
-			__cpufreq_remove_dev_prepare(dev);
-			break;
+	case CPU_DOWN_PREPARE:
+		cpufreq_offline_prepare(cpu);
+		break;
 
-		case CPU_POST_DEAD:
-			__cpufreq_remove_dev_finish(dev);
-			break;
+	case CPU_POST_DEAD:
+		cpufreq_offline_finish(cpu);
+		break;
 
-		case CPU_DOWN_FAILED:
-			cpufreq_add_dev(dev, NULL);
-			break;
-		}
+	case CPU_DOWN_FAILED:
+		cpufreq_online(cpu);
+		break;
 	}
 	return NOTIFY_OK;
 }
@@ -2477,6 +2410,49 @@
 }
 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
 
+static int create_boost_sysfs_file(void)
+{
+	int ret;
+
+	if (!cpufreq_boost_supported())
+		return 0;
+
+	/*
+	 * Check if driver provides function to enable boost -
+	 * if not, use cpufreq_boost_set_sw as default
+	 */
+	if (!cpufreq_driver->set_boost)
+		cpufreq_driver->set_boost = cpufreq_boost_set_sw;
+
+	ret = cpufreq_sysfs_create_file(&boost.attr);
+	if (ret)
+		pr_err("%s: cannot register global BOOST sysfs file\n",
+		       __func__);
+
+	return ret;
+}
+
+static void remove_boost_sysfs_file(void)
+{
+	if (cpufreq_boost_supported())
+		cpufreq_sysfs_remove_file(&boost.attr);
+}
+
+int cpufreq_enable_boost_support(void)
+{
+	if (!cpufreq_driver)
+		return -EINVAL;
+
+	if (cpufreq_boost_supported())
+		return 0;
+
+	cpufreq_driver->boost_supported = true;
+
+	/* This will get removed on driver unregister */
+	return create_boost_sysfs_file();
+}
+EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
+
 int cpufreq_boost_enabled(void)
 {
 	return cpufreq_driver->boost_enabled;
@@ -2515,10 +2491,14 @@
 
 	pr_debug("trying to register driver %s\n", driver_data->name);
 
+	/* Protect against concurrent CPU online/offline. */
+	get_online_cpus();
+
 	write_lock_irqsave(&cpufreq_driver_lock, flags);
 	if (cpufreq_driver) {
 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-		return -EEXIST;
+		ret = -EEXIST;
+		goto out;
 	}
 	cpufreq_driver = driver_data;
 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -2526,21 +2506,9 @@
 	if (driver_data->setpolicy)
 		driver_data->flags |= CPUFREQ_CONST_LOOPS;
 
-	if (cpufreq_boost_supported()) {
-		/*
-		 * Check if driver provides function to enable boost -
-		 * if not, use cpufreq_boost_set_sw as default
-		 */
-		if (!cpufreq_driver->set_boost)
-			cpufreq_driver->set_boost = cpufreq_boost_set_sw;
-
-		ret = cpufreq_sysfs_create_file(&boost.attr);
-		if (ret) {
-			pr_err("%s: cannot register global BOOST sysfs file\n",
-			       __func__);
-			goto err_null_driver;
-		}
-	}
+	ret = create_boost_sysfs_file();
+	if (ret)
+		goto err_null_driver;
 
 	ret = subsys_interface_register(&cpufreq_interface);
 	if (ret)
@@ -2557,17 +2525,19 @@
 	register_hotcpu_notifier(&cpufreq_cpu_notifier);
 	pr_debug("driver %s up and running\n", driver_data->name);
 
-	return 0;
+out:
+	put_online_cpus();
+	return ret;
+
 err_if_unreg:
 	subsys_interface_unregister(&cpufreq_interface);
 err_boost_unreg:
-	if (cpufreq_boost_supported())
-		cpufreq_sysfs_remove_file(&boost.attr);
+	remove_boost_sysfs_file();
 err_null_driver:
 	write_lock_irqsave(&cpufreq_driver_lock, flags);
 	cpufreq_driver = NULL;
 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-	return ret;
+	goto out;
 }
 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
 
@@ -2588,19 +2558,18 @@
 
 	pr_debug("unregistering driver %s\n", driver->name);
 
+	/* Protect against concurrent cpu hotplug */
+	get_online_cpus();
 	subsys_interface_unregister(&cpufreq_interface);
-	if (cpufreq_boost_supported())
-		cpufreq_sysfs_remove_file(&boost.attr);
-
+	remove_boost_sysfs_file();
 	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
 
-	down_write(&cpufreq_rwsem);
 	write_lock_irqsave(&cpufreq_driver_lock, flags);
 
 	cpufreq_driver = NULL;
 
 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-	up_write(&cpufreq_rwsem);
+	put_online_cpus();
 
 	return 0;
 }
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c86a10c..84a1506 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -47,7 +47,7 @@
 static void cs_check_cpu(int cpu, unsigned int load)
 {
 	struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
-	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+	struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
 	struct dbs_data *dbs_data = policy->governor_data;
 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 
@@ -102,26 +102,15 @@
 	}
 }
 
-static void cs_dbs_timer(struct work_struct *work)
+static unsigned int cs_dbs_timer(struct cpu_dbs_info *cdbs,
+				 struct dbs_data *dbs_data, bool modify_all)
 {
-	struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
-			struct cs_cpu_dbs_info_s, cdbs.work.work);
-	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
-	struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
-			cpu);
-	struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
-	int delay = delay_for_sampling_rate(cs_tuners->sampling_rate);
-	bool modify_all = true;
 
-	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
-	if (!need_load_eval(&core_dbs_info->cdbs, cs_tuners->sampling_rate))
-		modify_all = false;
-	else
-		dbs_check_cpu(dbs_data, cpu);
+	if (modify_all)
+		dbs_check_cpu(dbs_data, cdbs->shared->policy->cpu);
 
-	gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
-	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
+	return delay_for_sampling_rate(cs_tuners->sampling_rate);
 }
 
 static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
@@ -135,7 +124,7 @@
 	if (!dbs_info->enable)
 		return 0;
 
-	policy = dbs_info->cdbs.cur_policy;
+	policy = dbs_info->cdbs.shared->policy;
 
 	/*
 	 * we only care if our internally tracked freq moves outside the 'valid'
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 57a39f8..939197f 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -32,10 +32,10 @@
 
 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
 {
-	struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+	struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
 	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
-	struct cpufreq_policy *policy;
+	struct cpufreq_policy *policy = cdbs->shared->policy;
 	unsigned int sampling_rate;
 	unsigned int max_load = 0;
 	unsigned int ignore_nice;
@@ -60,11 +60,9 @@
 		ignore_nice = cs_tuners->ignore_nice_load;
 	}
 
-	policy = cdbs->cur_policy;
-
 	/* Get Absolute Load */
 	for_each_cpu(j, policy->cpus) {
-		struct cpu_dbs_common_info *j_cdbs;
+		struct cpu_dbs_info *j_cdbs;
 		u64 cur_wall_time, cur_idle_time;
 		unsigned int idle_time, wall_time;
 		unsigned int load;
@@ -163,9 +161,9 @@
 static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
 		unsigned int delay)
 {
-	struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
+	struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
 
-	mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
+	mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
 }
 
 void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
@@ -199,33 +197,63 @@
 static inline void gov_cancel_work(struct dbs_data *dbs_data,
 		struct cpufreq_policy *policy)
 {
-	struct cpu_dbs_common_info *cdbs;
+	struct cpu_dbs_info *cdbs;
 	int i;
 
 	for_each_cpu(i, policy->cpus) {
 		cdbs = dbs_data->cdata->get_cpu_cdbs(i);
-		cancel_delayed_work_sync(&cdbs->work);
+		cancel_delayed_work_sync(&cdbs->dwork);
 	}
 }
 
 /* Will return if we need to evaluate cpu load again or not */
-bool need_load_eval(struct cpu_dbs_common_info *cdbs,
-		unsigned int sampling_rate)
+static bool need_load_eval(struct cpu_common_dbs_info *shared,
+			   unsigned int sampling_rate)
 {
-	if (policy_is_shared(cdbs->cur_policy)) {
+	if (policy_is_shared(shared->policy)) {
 		ktime_t time_now = ktime_get();
-		s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
+		s64 delta_us = ktime_us_delta(time_now, shared->time_stamp);
 
 		/* Do nothing if we recently have sampled */
 		if (delta_us < (s64)(sampling_rate / 2))
 			return false;
 		else
-			cdbs->time_stamp = time_now;
+			shared->time_stamp = time_now;
 	}
 
 	return true;
 }
-EXPORT_SYMBOL_GPL(need_load_eval);
+
+static void dbs_timer(struct work_struct *work)
+{
+	struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info,
+						 dwork.work);
+	struct cpu_common_dbs_info *shared = cdbs->shared;
+	struct cpufreq_policy *policy = shared->policy;
+	struct dbs_data *dbs_data = policy->governor_data;
+	unsigned int sampling_rate, delay;
+	bool modify_all = true;
+
+	mutex_lock(&shared->timer_mutex);
+
+	if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
+		struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+
+		sampling_rate = cs_tuners->sampling_rate;
+	} else {
+		struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+
+		sampling_rate = od_tuners->sampling_rate;
+	}
+
+	if (!need_load_eval(cdbs->shared, sampling_rate))
+		modify_all = false;
+
+	delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all);
+	gov_queue_work(dbs_data, policy, delay, modify_all);
+
+	mutex_unlock(&shared->timer_mutex);
+}
 
 static void set_sampling_rate(struct dbs_data *dbs_data,
 		unsigned int sampling_rate)
@@ -239,6 +267,37 @@
 	}
 }
 
+static int alloc_common_dbs_info(struct cpufreq_policy *policy,
+				 struct common_dbs_data *cdata)
+{
+	struct cpu_common_dbs_info *shared;
+	int j;
+
+	/* Allocate memory for the common information for policy->cpus */
+	shared = kzalloc(sizeof(*shared), GFP_KERNEL);
+	if (!shared)
+		return -ENOMEM;
+
+	/* Set shared for all CPUs, online+offline */
+	for_each_cpu(j, policy->related_cpus)
+		cdata->get_cpu_cdbs(j)->shared = shared;
+
+	return 0;
+}
+
+static void free_common_dbs_info(struct cpufreq_policy *policy,
+				 struct common_dbs_data *cdata)
+{
+	struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
+	struct cpu_common_dbs_info *shared = cdbs->shared;
+	int j;
+
+	for_each_cpu(j, policy->cpus)
+		cdata->get_cpu_cdbs(j)->shared = NULL;
+
+	kfree(shared);
+}
+
 static int cpufreq_governor_init(struct cpufreq_policy *policy,
 				 struct dbs_data *dbs_data,
 				 struct common_dbs_data *cdata)
@@ -246,9 +305,18 @@
 	unsigned int latency;
 	int ret;
 
+	/* State should be equivalent to EXIT */
+	if (policy->governor_data)
+		return -EBUSY;
+
 	if (dbs_data) {
 		if (WARN_ON(have_governor_per_policy()))
 			return -EINVAL;
+
+		ret = alloc_common_dbs_info(policy, cdata);
+		if (ret)
+			return ret;
+
 		dbs_data->usage_count++;
 		policy->governor_data = dbs_data;
 		return 0;
@@ -258,12 +326,16 @@
 	if (!dbs_data)
 		return -ENOMEM;
 
+	ret = alloc_common_dbs_info(policy, cdata);
+	if (ret)
+		goto free_dbs_data;
+
 	dbs_data->cdata = cdata;
 	dbs_data->usage_count = 1;
 
 	ret = cdata->init(dbs_data, !policy->governor->initialized);
 	if (ret)
-		goto free_dbs_data;
+		goto free_common_dbs_info;
 
 	/* policy latency is in ns. Convert it to us first */
 	latency = policy->cpuinfo.transition_latency / 1000;
@@ -300,15 +372,22 @@
 	}
 cdata_exit:
 	cdata->exit(dbs_data, !policy->governor->initialized);
+free_common_dbs_info:
+	free_common_dbs_info(policy, cdata);
 free_dbs_data:
 	kfree(dbs_data);
 	return ret;
 }
 
-static void cpufreq_governor_exit(struct cpufreq_policy *policy,
-				  struct dbs_data *dbs_data)
+static int cpufreq_governor_exit(struct cpufreq_policy *policy,
+				 struct dbs_data *dbs_data)
 {
 	struct common_dbs_data *cdata = dbs_data->cdata;
+	struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
+
+	/* State should be equivalent to INIT */
+	if (!cdbs->shared || cdbs->shared->policy)
+		return -EBUSY;
 
 	policy->governor_data = NULL;
 	if (!--dbs_data->usage_count) {
@@ -323,6 +402,9 @@
 		cdata->exit(dbs_data, policy->governor->initialized == 1);
 		kfree(dbs_data);
 	}
+
+	free_common_dbs_info(policy, cdata);
+	return 0;
 }
 
 static int cpufreq_governor_start(struct cpufreq_policy *policy,
@@ -330,12 +412,17 @@
 {
 	struct common_dbs_data *cdata = dbs_data->cdata;
 	unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
-	struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
+	struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
+	struct cpu_common_dbs_info *shared = cdbs->shared;
 	int io_busy = 0;
 
 	if (!policy->cur)
 		return -EINVAL;
 
+	/* State should be equivalent to INIT */
+	if (!shared || shared->policy)
+		return -EBUSY;
+
 	if (cdata->governor == GOV_CONSERVATIVE) {
 		struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 
@@ -349,12 +436,14 @@
 		io_busy = od_tuners->io_is_busy;
 	}
 
+	shared->policy = policy;
+	shared->time_stamp = ktime_get();
+	mutex_init(&shared->timer_mutex);
+
 	for_each_cpu(j, policy->cpus) {
-		struct cpu_dbs_common_info *j_cdbs = cdata->get_cpu_cdbs(j);
+		struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
 		unsigned int prev_load;
 
-		j_cdbs->cpu = j;
-		j_cdbs->cur_policy = policy;
 		j_cdbs->prev_cpu_idle =
 			get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
 
@@ -366,8 +455,7 @@
 		if (ignore_nice)
 			j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 
-		mutex_init(&j_cdbs->timer_mutex);
-		INIT_DEFERRABLE_WORK(&j_cdbs->work, cdata->gov_dbs_timer);
+		INIT_DEFERRABLE_WORK(&j_cdbs->dwork, dbs_timer);
 	}
 
 	if (cdata->governor == GOV_CONSERVATIVE) {
@@ -386,20 +474,24 @@
 		od_ops->powersave_bias_init_cpu(cpu);
 	}
 
-	/* Initiate timer time stamp */
-	cpu_cdbs->time_stamp = ktime_get();
-
 	gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
 		       true);
 	return 0;
 }
 
-static void cpufreq_governor_stop(struct cpufreq_policy *policy,
-				  struct dbs_data *dbs_data)
+static int cpufreq_governor_stop(struct cpufreq_policy *policy,
+				 struct dbs_data *dbs_data)
 {
 	struct common_dbs_data *cdata = dbs_data->cdata;
 	unsigned int cpu = policy->cpu;
-	struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
+	struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
+	struct cpu_common_dbs_info *shared = cdbs->shared;
+
+	/* State should be equivalent to START */
+	if (!shared || !shared->policy)
+		return -EBUSY;
+
+	gov_cancel_work(dbs_data, policy);
 
 	if (cdata->governor == GOV_CONSERVATIVE) {
 		struct cs_cpu_dbs_info_s *cs_dbs_info =
@@ -408,38 +500,40 @@
 		cs_dbs_info->enable = 0;
 	}
 
-	gov_cancel_work(dbs_data, policy);
-
-	mutex_destroy(&cpu_cdbs->timer_mutex);
-	cpu_cdbs->cur_policy = NULL;
+	shared->policy = NULL;
+	mutex_destroy(&shared->timer_mutex);
+	return 0;
 }
 
-static void cpufreq_governor_limits(struct cpufreq_policy *policy,
-				    struct dbs_data *dbs_data)
+static int cpufreq_governor_limits(struct cpufreq_policy *policy,
+				   struct dbs_data *dbs_data)
 {
 	struct common_dbs_data *cdata = dbs_data->cdata;
 	unsigned int cpu = policy->cpu;
-	struct cpu_dbs_common_info *cpu_cdbs = cdata->get_cpu_cdbs(cpu);
+	struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
 
-	if (!cpu_cdbs->cur_policy)
-		return;
+	/* State should be equivalent to START */
+	if (!cdbs->shared || !cdbs->shared->policy)
+		return -EBUSY;
 
-	mutex_lock(&cpu_cdbs->timer_mutex);
-	if (policy->max < cpu_cdbs->cur_policy->cur)
-		__cpufreq_driver_target(cpu_cdbs->cur_policy, policy->max,
+	mutex_lock(&cdbs->shared->timer_mutex);
+	if (policy->max < cdbs->shared->policy->cur)
+		__cpufreq_driver_target(cdbs->shared->policy, policy->max,
 					CPUFREQ_RELATION_H);
-	else if (policy->min > cpu_cdbs->cur_policy->cur)
-		__cpufreq_driver_target(cpu_cdbs->cur_policy, policy->min,
+	else if (policy->min > cdbs->shared->policy->cur)
+		__cpufreq_driver_target(cdbs->shared->policy, policy->min,
 					CPUFREQ_RELATION_L);
 	dbs_check_cpu(dbs_data, cpu);
-	mutex_unlock(&cpu_cdbs->timer_mutex);
+	mutex_unlock(&cdbs->shared->timer_mutex);
+
+	return 0;
 }
 
 int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 			 struct common_dbs_data *cdata, unsigned int event)
 {
 	struct dbs_data *dbs_data;
-	int ret = 0;
+	int ret;
 
 	/* Lock governor to block concurrent initialization of governor */
 	mutex_lock(&cdata->mutex);
@@ -449,7 +543,7 @@
 	else
 		dbs_data = cdata->gdbs_data;
 
-	if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) {
+	if (!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)) {
 		ret = -EINVAL;
 		goto unlock;
 	}
@@ -459,17 +553,19 @@
 		ret = cpufreq_governor_init(policy, dbs_data, cdata);
 		break;
 	case CPUFREQ_GOV_POLICY_EXIT:
-		cpufreq_governor_exit(policy, dbs_data);
+		ret = cpufreq_governor_exit(policy, dbs_data);
 		break;
 	case CPUFREQ_GOV_START:
 		ret = cpufreq_governor_start(policy, dbs_data);
 		break;
 	case CPUFREQ_GOV_STOP:
-		cpufreq_governor_stop(policy, dbs_data);
+		ret = cpufreq_governor_stop(policy, dbs_data);
 		break;
 	case CPUFREQ_GOV_LIMITS:
-		cpufreq_governor_limits(policy, dbs_data);
+		ret = cpufreq_governor_limits(policy, dbs_data);
 		break;
+	default:
+		ret = -EINVAL;
 	}
 
 unlock:
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 34736f5..50f1717 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -109,7 +109,7 @@
 
 /* create helper routines */
 #define define_get_cpu_dbs_routines(_dbs_info)				\
-static struct cpu_dbs_common_info *get_cpu_cdbs(int cpu)		\
+static struct cpu_dbs_info *get_cpu_cdbs(int cpu)			\
 {									\
 	return &per_cpu(_dbs_info, cpu).cdbs;				\
 }									\
@@ -128,9 +128,20 @@
  * cs_*: Conservative governor
  */
 
+/* Common to all CPUs of a policy */
+struct cpu_common_dbs_info {
+	struct cpufreq_policy *policy;
+	/*
+	 * percpu mutex that serializes governor limit change with dbs_timer
+	 * invocation. We do not want dbs_timer to run when user is changing
+	 * the governor or limits.
+	 */
+	struct mutex timer_mutex;
+	ktime_t time_stamp;
+};
+
 /* Per cpu structures */
-struct cpu_dbs_common_info {
-	int cpu;
+struct cpu_dbs_info {
 	u64 prev_cpu_idle;
 	u64 prev_cpu_wall;
 	u64 prev_cpu_nice;
@@ -141,19 +152,12 @@
 	 * wake-up from idle.
 	 */
 	unsigned int prev_load;
-	struct cpufreq_policy *cur_policy;
-	struct delayed_work work;
-	/*
-	 * percpu mutex that serializes governor limit change with gov_dbs_timer
-	 * invocation. We do not want gov_dbs_timer to run when user is changing
-	 * the governor or limits.
-	 */
-	struct mutex timer_mutex;
-	ktime_t time_stamp;
+	struct delayed_work dwork;
+	struct cpu_common_dbs_info *shared;
 };
 
 struct od_cpu_dbs_info_s {
-	struct cpu_dbs_common_info cdbs;
+	struct cpu_dbs_info cdbs;
 	struct cpufreq_frequency_table *freq_table;
 	unsigned int freq_lo;
 	unsigned int freq_lo_jiffies;
@@ -163,7 +167,7 @@
 };
 
 struct cs_cpu_dbs_info_s {
-	struct cpu_dbs_common_info cdbs;
+	struct cpu_dbs_info cdbs;
 	unsigned int down_skip;
 	unsigned int requested_freq;
 	unsigned int enable:1;
@@ -204,9 +208,11 @@
 	 */
 	struct dbs_data *gdbs_data;
 
-	struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
+	struct cpu_dbs_info *(*get_cpu_cdbs)(int cpu);
 	void *(*get_cpu_dbs_info_s)(int cpu);
-	void (*gov_dbs_timer)(struct work_struct *work);
+	unsigned int (*gov_dbs_timer)(struct cpu_dbs_info *cdbs,
+				      struct dbs_data *dbs_data,
+				      bool modify_all);
 	void (*gov_check_cpu)(int cpu, unsigned int load);
 	int (*init)(struct dbs_data *dbs_data, bool notify);
 	void (*exit)(struct dbs_data *dbs_data, bool notify);
@@ -265,8 +271,6 @@
 extern struct mutex cpufreq_governor_lock;
 
 void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
-bool need_load_eval(struct cpu_dbs_common_info *cdbs,
-		unsigned int sampling_rate);
 int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 		struct common_dbs_data *cdata, unsigned int event);
 void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 3c1e10f..1fa9088 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -155,7 +155,7 @@
 static void od_check_cpu(int cpu, unsigned int load)
 {
 	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
-	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
+	struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
 	struct dbs_data *dbs_data = policy->governor_data;
 	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 
@@ -191,46 +191,40 @@
 	}
 }
 
-static void od_dbs_timer(struct work_struct *work)
+static unsigned int od_dbs_timer(struct cpu_dbs_info *cdbs,
+				 struct dbs_data *dbs_data, bool modify_all)
 {
-	struct od_cpu_dbs_info_s *dbs_info =
-		container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
-	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
-	struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
+	struct cpufreq_policy *policy = cdbs->shared->policy;
+	unsigned int cpu = policy->cpu;
+	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
 			cpu);
-	struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
 	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
-	int delay = 0, sample_type = core_dbs_info->sample_type;
-	bool modify_all = true;
+	int delay = 0, sample_type = dbs_info->sample_type;
 
-	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
-	if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
-		modify_all = false;
+	if (!modify_all)
 		goto max_delay;
-	}
 
 	/* Common NORMAL_SAMPLE setup */
-	core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
+	dbs_info->sample_type = OD_NORMAL_SAMPLE;
 	if (sample_type == OD_SUB_SAMPLE) {
-		delay = core_dbs_info->freq_lo_jiffies;
-		__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
-				core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
+		delay = dbs_info->freq_lo_jiffies;
+		__cpufreq_driver_target(policy, dbs_info->freq_lo,
+					CPUFREQ_RELATION_H);
 	} else {
 		dbs_check_cpu(dbs_data, cpu);
-		if (core_dbs_info->freq_lo) {
+		if (dbs_info->freq_lo) {
 			/* Setup timer for SUB_SAMPLE */
-			core_dbs_info->sample_type = OD_SUB_SAMPLE;
-			delay = core_dbs_info->freq_hi_jiffies;
+			dbs_info->sample_type = OD_SUB_SAMPLE;
+			delay = dbs_info->freq_hi_jiffies;
 		}
 	}
 
 max_delay:
 	if (!delay)
 		delay = delay_for_sampling_rate(od_tuners->sampling_rate
-				* core_dbs_info->rate_mult);
+				* dbs_info->rate_mult);
 
-	gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
-	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
+	return delay;
 }
 
 /************************** sysfs interface ************************/
@@ -273,27 +267,27 @@
 		dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
 		cpufreq_cpu_put(policy);
 
-		mutex_lock(&dbs_info->cdbs.timer_mutex);
+		mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
 
-		if (!delayed_work_pending(&dbs_info->cdbs.work)) {
-			mutex_unlock(&dbs_info->cdbs.timer_mutex);
+		if (!delayed_work_pending(&dbs_info->cdbs.dwork)) {
+			mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
 			continue;
 		}
 
 		next_sampling = jiffies + usecs_to_jiffies(new_rate);
-		appointed_at = dbs_info->cdbs.work.timer.expires;
+		appointed_at = dbs_info->cdbs.dwork.timer.expires;
 
 		if (time_before(next_sampling, appointed_at)) {
 
-			mutex_unlock(&dbs_info->cdbs.timer_mutex);
-			cancel_delayed_work_sync(&dbs_info->cdbs.work);
-			mutex_lock(&dbs_info->cdbs.timer_mutex);
+			mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
+			cancel_delayed_work_sync(&dbs_info->cdbs.dwork);
+			mutex_lock(&dbs_info->cdbs.shared->timer_mutex);
 
-			gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
-					usecs_to_jiffies(new_rate), true);
+			gov_queue_work(dbs_data, policy,
+				       usecs_to_jiffies(new_rate), true);
 
 		}
-		mutex_unlock(&dbs_info->cdbs.timer_mutex);
+		mutex_unlock(&dbs_info->cdbs.shared->timer_mutex);
 	}
 }
 
@@ -556,13 +550,16 @@
 
 	get_online_cpus();
 	for_each_online_cpu(cpu) {
+		struct cpu_common_dbs_info *shared;
+
 		if (cpumask_test_cpu(cpu, &done))
 			continue;
 
-		policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
-		if (!policy)
+		shared = per_cpu(od_cpu_dbs_info, cpu).cdbs.shared;
+		if (!shared)
 			continue;
 
+		policy = shared->policy;
 		cpumask_or(&done, &done, policy->cpus);
 
 		if (policy->governor != &cpufreq_gov_ondemand)
diff --git a/drivers/cpufreq/cpufreq_opp.c b/drivers/cpufreq/cpufreq_opp.c
index 773bcde..0f5e6d5 100644
--- a/drivers/cpufreq/cpufreq_opp.c
+++ b/drivers/cpufreq/cpufreq_opp.c
@@ -75,6 +75,10 @@
 		}
 		freq_table[i].driver_data = i;
 		freq_table[i].frequency = rate / 1000;
+
+		/* Is Boost/turbo opp ? */
+		if (dev_pm_opp_is_turbo(opp))
+			freq_table[i].flags = CPUFREQ_BOOST_FREQ;
 	}
 
 	freq_table[i].driver_data = i;
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index a0d2a42..4085244c 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -78,7 +78,7 @@
 static int eps_acpi_exit(struct cpufreq_policy *policy)
 {
 	if (eps_acpi_cpu_perf) {
-		acpi_processor_unregister_performance(eps_acpi_cpu_perf, 0);
+		acpi_processor_unregister_performance(0);
 		free_cpumask_var(eps_acpi_cpu_perf->shared_cpu_map);
 		kfree(eps_acpi_cpu_perf);
 		eps_acpi_cpu_perf = NULL;
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index dfbbf981..a8f1daf 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -18,6 +18,21 @@
  *                     FREQUENCY TABLE HELPERS                       *
  *********************************************************************/
 
+bool policy_has_boost_freq(struct cpufreq_policy *policy)
+{
+	struct cpufreq_frequency_table *pos, *table = policy->freq_table;
+
+	if (!table)
+		return false;
+
+	cpufreq_for_each_valid_entry(pos, table)
+		if (pos->flags & CPUFREQ_BOOST_FREQ)
+			return true;
+
+	return false;
+}
+EXPORT_SYMBOL_GPL(policy_has_boost_freq);
+
 int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
 				    struct cpufreq_frequency_table *table)
 {
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index c30aaa6..0202429 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -29,7 +29,6 @@
 
 struct cpufreq_acpi_io {
 	struct acpi_processor_performance	acpi_data;
-	struct cpufreq_frequency_table		*freq_table;
 	unsigned int				resume;
 };
 
@@ -221,6 +220,7 @@
 	unsigned int		cpu = policy->cpu;
 	struct cpufreq_acpi_io	*data;
 	unsigned int		result = 0;
+	struct cpufreq_frequency_table *freq_table;
 
 	pr_debug("acpi_cpufreq_cpu_init\n");
 
@@ -254,10 +254,10 @@
 	}
 
 	/* alloc freq_table */
-	data->freq_table = kzalloc(sizeof(*data->freq_table) *
+	freq_table = kzalloc(sizeof(*freq_table) *
 	                           (data->acpi_data.state_count + 1),
 	                           GFP_KERNEL);
-	if (!data->freq_table) {
+	if (!freq_table) {
 		result = -ENOMEM;
 		goto err_unreg;
 	}
@@ -276,14 +276,14 @@
 	for (i = 0; i <= data->acpi_data.state_count; i++)
 	{
 		if (i < data->acpi_data.state_count) {
-			data->freq_table[i].frequency =
+			freq_table[i].frequency =
 			      data->acpi_data.states[i].core_frequency * 1000;
 		} else {
-			data->freq_table[i].frequency = CPUFREQ_TABLE_END;
+			freq_table[i].frequency = CPUFREQ_TABLE_END;
 		}
 	}
 
-	result = cpufreq_table_validate_and_show(policy, data->freq_table);
+	result = cpufreq_table_validate_and_show(policy, freq_table);
 	if (result) {
 		goto err_freqfree;
 	}
@@ -311,9 +311,9 @@
 	return (result);
 
  err_freqfree:
-	kfree(data->freq_table);
+	kfree(freq_table);
  err_unreg:
-	acpi_processor_unregister_performance(&data->acpi_data, cpu);
+	acpi_processor_unregister_performance(cpu);
  err_free:
 	kfree(data);
 	acpi_io_data[cpu] = NULL;
@@ -332,8 +332,8 @@
 
 	if (data) {
 		acpi_io_data[policy->cpu] = NULL;
-		acpi_processor_unregister_performance(&data->acpi_data,
-		                                      policy->cpu);
+		acpi_processor_unregister_performance(policy->cpu);
+		kfree(policy->freq_table);
 		kfree(data);
 	}
 
diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c
index 129e266..2faa421 100644
--- a/drivers/cpufreq/integrator-cpufreq.c
+++ b/drivers/cpufreq/integrator-cpufreq.c
@@ -98,11 +98,10 @@
 	/* get current setting */
 	cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
 
-	if (machine_is_integrator()) {
+	if (machine_is_integrator())
 		vco.s = (cm_osc >> 8) & 7;
-	} else if (machine_is_cintegrator()) {
+	else if (machine_is_cintegrator())
 		vco.s = 1;
-	}
 	vco.v = cm_osc & 255;
 	vco.r = 22;
 	freqs.old = icst_hz(&cclk_params, vco) / 1000;
@@ -163,11 +162,10 @@
 	/* detect memory etc. */
 	cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET);
 
-	if (machine_is_integrator()) {
+	if (machine_is_integrator())
 		vco.s = (cm_osc >> 8) & 7;
-	} else {
+	else
 		vco.s = 1;
-	}
 	vco.v = cm_osc & 255;
 	vco.r = 22;
 
@@ -203,7 +201,7 @@
 	struct resource *res;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-        if (!res)
+	if (!res)
 		return -ENODEV;
 
 	cm_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
@@ -234,6 +232,6 @@
 module_platform_driver_probe(integrator_cpufreq_driver,
 			     integrator_cpufreq_probe);
 
-MODULE_AUTHOR ("Russell M. King");
-MODULE_DESCRIPTION ("cpufreq driver for ARM Integrator CPUs");
-MODULE_LICENSE ("GPL");
+MODULE_AUTHOR("Russell M. King");
+MODULE_DESCRIPTION("cpufreq driver for ARM Integrator CPUs");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index fcb929e..cddc619 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -484,12 +484,11 @@
 }
 /************************** sysfs end ************************/
 
-static void intel_pstate_hwp_enable(void)
+static void intel_pstate_hwp_enable(struct cpudata *cpudata)
 {
-	hwp_active++;
 	pr_info("intel_pstate: HWP enabled\n");
 
-	wrmsrl( MSR_PM_ENABLE, 0x1);
+	wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
 }
 
 static int byt_get_min_pstate(void)
@@ -522,7 +521,7 @@
 	int32_t vid_fp;
 	u32 vid;
 
-	val = pstate << 8;
+	val = (u64)pstate << 8;
 	if (limits.no_turbo && !limits.turbo_disabled)
 		val |= (u64)1 << 32;
 
@@ -611,7 +610,7 @@
 {
 	u64 val;
 
-	val = pstate << 8;
+	val = (u64)pstate << 8;
 	if (limits.no_turbo && !limits.turbo_disabled)
 		val |= (u64)1 << 32;
 
@@ -766,7 +765,7 @@
 	local_irq_save(flags);
 	rdmsrl(MSR_IA32_APERF, aperf);
 	rdmsrl(MSR_IA32_MPERF, mperf);
-	tsc = native_read_tsc();
+	tsc = rdtsc();
 	local_irq_restore(flags);
 
 	cpu->last_sample_time = cpu->sample.time;
@@ -909,6 +908,7 @@
 	ICPU(0x4c, byt_params),
 	ICPU(0x4e, core_params),
 	ICPU(0x4f, core_params),
+	ICPU(0x5e, core_params),
 	ICPU(0x56, core_params),
 	ICPU(0x57, knl_params),
 	{}
@@ -933,6 +933,10 @@
 	cpu = all_cpu_data[cpunum];
 
 	cpu->cpu = cpunum;
+
+	if (hwp_active)
+		intel_pstate_hwp_enable(cpu);
+
 	intel_pstate_get_cpu_pstates(cpu);
 
 	init_timer_deferrable(&cpu->timer);
@@ -1170,6 +1174,10 @@
 	{1, "ORACLE", "X4270M3 ", PPC},
 	{1, "ORACLE", "X4270M2 ", PPC},
 	{1, "ORACLE", "X4170M2 ", PPC},
+	{1, "ORACLE", "X4170 M3", PPC},
+	{1, "ORACLE", "X4275 M3", PPC},
+	{1, "ORACLE", "X6-2    ", PPC},
+	{1, "ORACLE", "Sudbury ", PPC},
 	{0, "", ""},
 };
 
@@ -1246,7 +1254,7 @@
 		return -ENOMEM;
 
 	if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
-		intel_pstate_hwp_enable();
+		hwp_active++;
 
 	if (!hwp_active && hwp_only)
 		goto out;
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
new file mode 100644
index 0000000..49caed2
--- /dev/null
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -0,0 +1,527 @@
+/*
+ * Copyright (c) 2015 Linaro Ltd.
+ * Author: Pi-Cheng Chen <pi-cheng.chen@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpu_cooling.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#define MIN_VOLT_SHIFT		(100000)
+#define MAX_VOLT_SHIFT		(200000)
+#define MAX_VOLT_LIMIT		(1150000)
+#define VOLT_TOL		(10000)
+
+/*
+ * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS
+ * on each CPU power/clock domain of Mediatek SoCs. Each CPU cluster in
+ * Mediatek SoCs has two voltage inputs, Vproc and Vsram. In some cases the two
+ * voltage inputs need to be controlled under a hardware limitation:
+ * 100mV < Vsram - Vproc < 200mV
+ *
+ * When scaling the clock frequency of a CPU clock domain, the clock source
+ * needs to be switched to another stable PLL clock temporarily until
+ * the original PLL becomes stable at target frequency.
+ */
+struct mtk_cpu_dvfs_info {
+	struct device *cpu_dev;
+	struct regulator *proc_reg;
+	struct regulator *sram_reg;
+	struct clk *cpu_clk;
+	struct clk *inter_clk;
+	struct thermal_cooling_device *cdev;
+	int intermediate_voltage;
+	bool need_voltage_tracking;
+};
+
+static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
+					int new_vproc)
+{
+	struct regulator *proc_reg = info->proc_reg;
+	struct regulator *sram_reg = info->sram_reg;
+	int old_vproc, old_vsram, new_vsram, vsram, vproc, ret;
+
+	old_vproc = regulator_get_voltage(proc_reg);
+	old_vsram = regulator_get_voltage(sram_reg);
+	/* Vsram should not exceed the maximum allowed voltage of SoC. */
+	new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT);
+
+	if (old_vproc < new_vproc) {
+		/*
+		 * When scaling up voltages, Vsram and Vproc scale up step
+		 * by step. At each step, set Vsram to (Vproc + 200mV) first,
+		 * then set Vproc to (Vsram - 100mV).
+		 * Keep doing it until Vsram and Vproc hit target voltages.
+		 */
+		do {
+			old_vsram = regulator_get_voltage(sram_reg);
+			old_vproc = regulator_get_voltage(proc_reg);
+
+			vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT);
+
+			if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
+				vsram = MAX_VOLT_LIMIT;
+
+				/*
+				 * If the target Vsram hits the maximum voltage,
+				 * try to set the exact voltage value first.
+				 */
+				ret = regulator_set_voltage(sram_reg, vsram,
+							    vsram);
+				if (ret)
+					ret = regulator_set_voltage(sram_reg,
+							vsram - VOLT_TOL,
+							vsram);
+
+				vproc = new_vproc;
+			} else {
+				ret = regulator_set_voltage(sram_reg, vsram,
+							    vsram + VOLT_TOL);
+
+				vproc = vsram - MIN_VOLT_SHIFT;
+			}
+			if (ret)
+				return ret;
+
+			ret = regulator_set_voltage(proc_reg, vproc,
+						    vproc + VOLT_TOL);
+			if (ret) {
+				regulator_set_voltage(sram_reg, old_vsram,
+						      old_vsram);
+				return ret;
+			}
+		} while (vproc < new_vproc || vsram < new_vsram);
+	} else if (old_vproc > new_vproc) {
+		/*
+		 * When scaling down voltages, Vsram and Vproc scale down step
+		 * by step. At each step, set Vproc to (Vsram - 200mV) first,
+		 * then set Vproc to (Vproc + 100mV).
+		 * Keep doing it until Vsram and Vproc hit target voltages.
+		 */
+		do {
+			old_vproc = regulator_get_voltage(proc_reg);
+			old_vsram = regulator_get_voltage(sram_reg);
+
+			vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT);
+			ret = regulator_set_voltage(proc_reg, vproc,
+						    vproc + VOLT_TOL);
+			if (ret)
+				return ret;
+
+			if (vproc == new_vproc)
+				vsram = new_vsram;
+			else
+				vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT);
+
+			if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) {
+				vsram = MAX_VOLT_LIMIT;
+
+				/*
+				 * If the target Vsram hits the maximum voltage,
+				 * try to set the exact voltage value first.
+				 */
+				ret = regulator_set_voltage(sram_reg, vsram,
+							    vsram);
+				if (ret)
+					ret = regulator_set_voltage(sram_reg,
+							vsram - VOLT_TOL,
+							vsram);
+			} else {
+				ret = regulator_set_voltage(sram_reg, vsram,
+							    vsram + VOLT_TOL);
+			}
+
+			if (ret) {
+				regulator_set_voltage(proc_reg, old_vproc,
+						      old_vproc);
+				return ret;
+			}
+		} while (vproc > new_vproc + VOLT_TOL ||
+			 vsram > new_vsram + VOLT_TOL);
+	}
+
+	return 0;
+}
+
+static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc)
+{
+	if (info->need_voltage_tracking)
+		return mtk_cpufreq_voltage_tracking(info, vproc);
+	else
+		return regulator_set_voltage(info->proc_reg, vproc,
+					     vproc + VOLT_TOL);
+}
+
+static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
+				  unsigned int index)
+{
+	struct cpufreq_frequency_table *freq_table = policy->freq_table;
+	struct clk *cpu_clk = policy->clk;
+	struct clk *armpll = clk_get_parent(cpu_clk);
+	struct mtk_cpu_dvfs_info *info = policy->driver_data;
+	struct device *cpu_dev = info->cpu_dev;
+	struct dev_pm_opp *opp;
+	long freq_hz, old_freq_hz;
+	int vproc, old_vproc, inter_vproc, target_vproc, ret;
+
+	inter_vproc = info->intermediate_voltage;
+
+	old_freq_hz = clk_get_rate(cpu_clk);
+	old_vproc = regulator_get_voltage(info->proc_reg);
+
+	freq_hz = freq_table[index].frequency * 1000;
+
+	rcu_read_lock();
+	opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
+	if (IS_ERR(opp)) {
+		rcu_read_unlock();
+		pr_err("cpu%d: failed to find OPP for %ld\n",
+		       policy->cpu, freq_hz);
+		return PTR_ERR(opp);
+	}
+	vproc = dev_pm_opp_get_voltage(opp);
+	rcu_read_unlock();
+
+	/*
+	 * If the new voltage or the intermediate voltage is higher than the
+	 * current voltage, scale up voltage first.
+	 */
+	target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc;
+	if (old_vproc < target_vproc) {
+		ret = mtk_cpufreq_set_voltage(info, target_vproc);
+		if (ret) {
+			pr_err("cpu%d: failed to scale up voltage!\n",
+			       policy->cpu);
+			mtk_cpufreq_set_voltage(info, old_vproc);
+			return ret;
+		}
+	}
+
+	/* Reparent the CPU clock to intermediate clock. */
+	ret = clk_set_parent(cpu_clk, info->inter_clk);
+	if (ret) {
+		pr_err("cpu%d: failed to re-parent cpu clock!\n",
+		       policy->cpu);
+		mtk_cpufreq_set_voltage(info, old_vproc);
+		WARN_ON(1);
+		return ret;
+	}
+
+	/* Set the original PLL to target rate. */
+	ret = clk_set_rate(armpll, freq_hz);
+	if (ret) {
+		pr_err("cpu%d: failed to scale cpu clock rate!\n",
+		       policy->cpu);
+		clk_set_parent(cpu_clk, armpll);
+		mtk_cpufreq_set_voltage(info, old_vproc);
+		return ret;
+	}
+
+	/* Set parent of CPU clock back to the original PLL. */
+	ret = clk_set_parent(cpu_clk, armpll);
+	if (ret) {
+		pr_err("cpu%d: failed to re-parent cpu clock!\n",
+		       policy->cpu);
+		mtk_cpufreq_set_voltage(info, inter_vproc);
+		WARN_ON(1);
+		return ret;
+	}
+
+	/*
+	 * If the new voltage is lower than the intermediate voltage or the
+	 * original voltage, scale down to the new voltage.
+	 */
+	if (vproc < inter_vproc || vproc < old_vproc) {
+		ret = mtk_cpufreq_set_voltage(info, vproc);
+		if (ret) {
+			pr_err("cpu%d: failed to scale down voltage!\n",
+			       policy->cpu);
+			clk_set_parent(cpu_clk, info->inter_clk);
+			clk_set_rate(armpll, old_freq_hz);
+			clk_set_parent(cpu_clk, armpll);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
+{
+	struct mtk_cpu_dvfs_info *info = policy->driver_data;
+	struct device_node *np = of_node_get(info->cpu_dev->of_node);
+
+	if (WARN_ON(!np))
+		return;
+
+	if (of_find_property(np, "#cooling-cells", NULL)) {
+		info->cdev = of_cpufreq_cooling_register(np,
+							 policy->related_cpus);
+
+		if (IS_ERR(info->cdev)) {
+			dev_err(info->cpu_dev,
+				"running cpufreq without cooling device: %ld\n",
+				PTR_ERR(info->cdev));
+
+			info->cdev = NULL;
+		}
+	}
+
+	of_node_put(np);
+}
+
+static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
+{
+	struct device *cpu_dev;
+	struct regulator *proc_reg = ERR_PTR(-ENODEV);
+	struct regulator *sram_reg = ERR_PTR(-ENODEV);
+	struct clk *cpu_clk = ERR_PTR(-ENODEV);
+	struct clk *inter_clk = ERR_PTR(-ENODEV);
+	struct dev_pm_opp *opp;
+	unsigned long rate;
+	int ret;
+
+	cpu_dev = get_cpu_device(cpu);
+	if (!cpu_dev) {
+		pr_err("failed to get cpu%d device\n", cpu);
+		return -ENODEV;
+	}
+
+	cpu_clk = clk_get(cpu_dev, "cpu");
+	if (IS_ERR(cpu_clk)) {
+		if (PTR_ERR(cpu_clk) == -EPROBE_DEFER)
+			pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu);
+		else
+			pr_err("failed to get cpu clk for cpu%d\n", cpu);
+
+		ret = PTR_ERR(cpu_clk);
+		return ret;
+	}
+
+	inter_clk = clk_get(cpu_dev, "intermediate");
+	if (IS_ERR(inter_clk)) {
+		if (PTR_ERR(inter_clk) == -EPROBE_DEFER)
+			pr_warn("intermediate clk for cpu%d not ready, retry.\n",
+				cpu);
+		else
+			pr_err("failed to get intermediate clk for cpu%d\n",
+			       cpu);
+
+		ret = PTR_ERR(inter_clk);
+		goto out_free_resources;
+	}
+
+	proc_reg = regulator_get_exclusive(cpu_dev, "proc");
+	if (IS_ERR(proc_reg)) {
+		if (PTR_ERR(proc_reg) == -EPROBE_DEFER)
+			pr_warn("proc regulator for cpu%d not ready, retry.\n",
+				cpu);
+		else
+			pr_err("failed to get proc regulator for cpu%d\n",
+			       cpu);
+
+		ret = PTR_ERR(proc_reg);
+		goto out_free_resources;
+	}
+
+	/* Both presence and absence of sram regulator are valid cases. */
+	sram_reg = regulator_get_exclusive(cpu_dev, "sram");
+
+	ret = of_init_opp_table(cpu_dev);
+	if (ret) {
+		pr_warn("no OPP table for cpu%d\n", cpu);
+		goto out_free_resources;
+	}
+
+	/* Search a safe voltage for intermediate frequency. */
+	rate = clk_get_rate(inter_clk);
+	rcu_read_lock();
+	opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
+	if (IS_ERR(opp)) {
+		rcu_read_unlock();
+		pr_err("failed to get intermediate opp for cpu%d\n", cpu);
+		ret = PTR_ERR(opp);
+		goto out_free_opp_table;
+	}
+	info->intermediate_voltage = dev_pm_opp_get_voltage(opp);
+	rcu_read_unlock();
+
+	info->cpu_dev = cpu_dev;
+	info->proc_reg = proc_reg;
+	info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg;
+	info->cpu_clk = cpu_clk;
+	info->inter_clk = inter_clk;
+
+	/*
+	 * If SRAM regulator is present, software "voltage tracking" is needed
+	 * for this CPU power domain.
+	 */
+	info->need_voltage_tracking = !IS_ERR(sram_reg);
+
+	return 0;
+
+out_free_opp_table:
+	of_free_opp_table(cpu_dev);
+
+out_free_resources:
+	if (!IS_ERR(proc_reg))
+		regulator_put(proc_reg);
+	if (!IS_ERR(sram_reg))
+		regulator_put(sram_reg);
+	if (!IS_ERR(cpu_clk))
+		clk_put(cpu_clk);
+	if (!IS_ERR(inter_clk))
+		clk_put(inter_clk);
+
+	return ret;
+}
+
+static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
+{
+	if (!IS_ERR(info->proc_reg))
+		regulator_put(info->proc_reg);
+	if (!IS_ERR(info->sram_reg))
+		regulator_put(info->sram_reg);
+	if (!IS_ERR(info->cpu_clk))
+		clk_put(info->cpu_clk);
+	if (!IS_ERR(info->inter_clk))
+		clk_put(info->inter_clk);
+
+	of_free_opp_table(info->cpu_dev);
+}
+
+static int mtk_cpufreq_init(struct cpufreq_policy *policy)
+{
+	struct mtk_cpu_dvfs_info *info;
+	struct cpufreq_frequency_table *freq_table;
+	int ret;
+
+	info = kzalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	ret = mtk_cpu_dvfs_info_init(info, policy->cpu);
+	if (ret) {
+		pr_err("%s failed to initialize dvfs info for cpu%d\n",
+		       __func__, policy->cpu);
+		goto out_free_dvfs_info;
+	}
+
+	ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table);
+	if (ret) {
+		pr_err("failed to init cpufreq table for cpu%d: %d\n",
+		       policy->cpu, ret);
+		goto out_release_dvfs_info;
+	}
+
+	ret = cpufreq_table_validate_and_show(policy, freq_table);
+	if (ret) {
+		pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+		goto out_free_cpufreq_table;
+	}
+
+	/* CPUs in the same cluster share a clock and power domain. */
+	cpumask_copy(policy->cpus, &cpu_topology[policy->cpu].core_sibling);
+	policy->driver_data = info;
+	policy->clk = info->cpu_clk;
+
+	return 0;
+
+out_free_cpufreq_table:
+	dev_pm_opp_free_cpufreq_table(info->cpu_dev, &freq_table);
+
+out_release_dvfs_info:
+	mtk_cpu_dvfs_info_release(info);
+
+out_free_dvfs_info:
+	kfree(info);
+
+	return ret;
+}
+
+static int mtk_cpufreq_exit(struct cpufreq_policy *policy)
+{
+	struct mtk_cpu_dvfs_info *info = policy->driver_data;
+
+	cpufreq_cooling_unregister(info->cdev);
+	dev_pm_opp_free_cpufreq_table(info->cpu_dev, &policy->freq_table);
+	mtk_cpu_dvfs_info_release(info);
+	kfree(info);
+
+	return 0;
+}
+
+static struct cpufreq_driver mt8173_cpufreq_driver = {
+	.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+	.verify = cpufreq_generic_frequency_table_verify,
+	.target_index = mtk_cpufreq_set_target,
+	.get = cpufreq_generic_get,
+	.init = mtk_cpufreq_init,
+	.exit = mtk_cpufreq_exit,
+	.ready = mtk_cpufreq_ready,
+	.name = "mtk-cpufreq",
+	.attr = cpufreq_generic_attr,
+};
+
+static int mt8173_cpufreq_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	ret = cpufreq_register_driver(&mt8173_cpufreq_driver);
+	if (ret)
+		pr_err("failed to register mtk cpufreq driver\n");
+
+	return ret;
+}
+
+static struct platform_driver mt8173_cpufreq_platdrv = {
+	.driver = {
+		.name	= "mt8173-cpufreq",
+	},
+	.probe		= mt8173_cpufreq_probe,
+};
+
+static int mt8173_cpufreq_driver_init(void)
+{
+	struct platform_device *pdev;
+	int err;
+
+	if (!of_machine_is_compatible("mediatek,mt8173"))
+		return -ENODEV;
+
+	err = platform_driver_register(&mt8173_cpufreq_platdrv);
+	if (err)
+		return err;
+
+	/*
+	 * Since there's no place to hold device registration code and no
+	 * device tree based way to match cpufreq driver yet, both the driver
+	 * and the device registration codes are put here to handle defer
+	 * probing.
+	 */
+	pdev = platform_device_register_simple("mt8173-cpufreq", -1, NULL, 0);
+	if (IS_ERR(pdev)) {
+		pr_err("failed to register mtk-cpufreq platform device\n");
+		return PTR_ERR(pdev);
+	}
+
+	return 0;
+}
+device_initcall(mt8173_cpufreq_driver_init);
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 37c5742..c1ae199 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -421,7 +421,7 @@
 	return 0;
 
 err2:
-	acpi_processor_unregister_performance(acpi_processor_perf, 0);
+	acpi_processor_unregister_performance(0);
 err1:
 	free_cpumask_var(acpi_processor_perf->shared_cpu_map);
 err05:
@@ -661,7 +661,7 @@
 {
 #ifdef CONFIG_X86_POWERNOW_K7_ACPI
 	if (acpi_processor_perf) {
-		acpi_processor_unregister_performance(acpi_processor_perf, 0);
+		acpi_processor_unregister_performance(0);
 		free_cpumask_var(acpi_processor_perf->shared_cpu_map);
 		kfree(acpi_processor_perf);
 	}
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 5c035d0..0b5bf13 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -795,7 +795,7 @@
 	kfree(powernow_table);
 
 err_out:
-	acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
+	acpi_processor_unregister_performance(data->cpu);
 
 	/* data->acpi_data.state_count informs us at ->exit()
 	 * whether ACPI was used */
@@ -863,8 +863,7 @@
 static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
 {
 	if (data->acpi_data.state_count)
-		acpi_processor_unregister_performance(&data->acpi_data,
-				data->cpu);
+		acpi_processor_unregister_performance(data->cpu);
 	free_cpumask_var(data->acpi_data.shared_cpu_map);
 }
 
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index ebef0d8..64994e1 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -27,20 +27,31 @@
 #include <linux/smp.h>
 #include <linux/of.h>
 #include <linux/reboot.h>
+#include <linux/slab.h>
 
 #include <asm/cputhreads.h>
 #include <asm/firmware.h>
 #include <asm/reg.h>
 #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
+#include <asm/opal.h>
 
 #define POWERNV_MAX_PSTATES	256
 #define PMSR_PSAFE_ENABLE	(1UL << 30)
 #define PMSR_SPR_EM_DISABLE	(1UL << 31)
 #define PMSR_MAX(x)		((x >> 32) & 0xFF)
-#define PMSR_LP(x)		((x >> 48) & 0xFF)
 
 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
-static bool rebooting, throttled;
+static bool rebooting, throttled, occ_reset;
+
+static struct chip {
+	unsigned int id;
+	bool throttled;
+	cpumask_t mask;
+	struct work_struct throttle;
+	bool restore;
+} *chips;
+
+static int nr_chips;
 
 /*
  * Note: The set of pstates consists of contiguous integers, the
@@ -298,28 +309,35 @@
 	return powernv_pstate_info.max - powernv_pstate_info.nominal;
 }
 
-static void powernv_cpufreq_throttle_check(unsigned int cpu)
+static void powernv_cpufreq_throttle_check(void *data)
 {
+	unsigned int cpu = smp_processor_id();
 	unsigned long pmsr;
-	int pmsr_pmax, pmsr_lp;
+	int pmsr_pmax, i;
 
 	pmsr = get_pmspr(SPRN_PMSR);
 
+	for (i = 0; i < nr_chips; i++)
+		if (chips[i].id == cpu_to_chip_id(cpu))
+			break;
+
 	/* Check for Pmax Capping */
 	pmsr_pmax = (s8)PMSR_MAX(pmsr);
 	if (pmsr_pmax != powernv_pstate_info.max) {
-		throttled = true;
-		pr_info("CPU %d Pmax is reduced to %d\n", cpu, pmsr_pmax);
-		pr_info("Max allowed Pstate is capped\n");
+		if (chips[i].throttled)
+			goto next;
+		chips[i].throttled = true;
+		pr_info("CPU %d on Chip %u has Pmax reduced to %d\n", cpu,
+			chips[i].id, pmsr_pmax);
+	} else if (chips[i].throttled) {
+		chips[i].throttled = false;
+		pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
+			chips[i].id, pmsr_pmax);
 	}
 
-	/*
-	 * Check for Psafe by reading LocalPstate
-	 * or check if Psafe_mode_active is set in PMSR.
-	 */
-	pmsr_lp = (s8)PMSR_LP(pmsr);
-	if ((pmsr_lp < powernv_pstate_info.min) ||
-				(pmsr & PMSR_PSAFE_ENABLE)) {
+	/* Check if Psafe_mode_active is set in PMSR. */
+next:
+	if (pmsr & PMSR_PSAFE_ENABLE) {
 		throttled = true;
 		pr_info("Pstate set to safe frequency\n");
 	}
@@ -350,7 +368,7 @@
 		return 0;
 
 	if (!throttled)
-		powernv_cpufreq_throttle_check(smp_processor_id());
+		powernv_cpufreq_throttle_check(NULL);
 
 	freq_data.pstate_id = powernv_freqs[new_index].driver_data;
 
@@ -395,6 +413,119 @@
 	.notifier_call = powernv_cpufreq_reboot_notifier,
 };
 
+void powernv_cpufreq_work_fn(struct work_struct *work)
+{
+	struct chip *chip = container_of(work, struct chip, throttle);
+	unsigned int cpu;
+	cpumask_var_t mask;
+
+	smp_call_function_any(&chip->mask,
+			      powernv_cpufreq_throttle_check, NULL, 0);
+
+	if (!chip->restore)
+		return;
+
+	chip->restore = false;
+	cpumask_copy(mask, &chip->mask);
+	for_each_cpu_and(cpu, mask, cpu_online_mask) {
+		int index, tcpu;
+		struct cpufreq_policy policy;
+
+		cpufreq_get_policy(&policy, cpu);
+		cpufreq_frequency_table_target(&policy, policy.freq_table,
+					       policy.cur,
+					       CPUFREQ_RELATION_C, &index);
+		powernv_cpufreq_target_index(&policy, index);
+		for_each_cpu(tcpu, policy.cpus)
+			cpumask_clear_cpu(tcpu, mask);
+	}
+}
+
+static char throttle_reason[][30] = {
+					"No throttling",
+					"Power Cap",
+					"Processor Over Temperature",
+					"Power Supply Failure",
+					"Over Current",
+					"OCC Reset"
+				     };
+
+static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
+				   unsigned long msg_type, void *_msg)
+{
+	struct opal_msg *msg = _msg;
+	struct opal_occ_msg omsg;
+	int i;
+
+	if (msg_type != OPAL_MSG_OCC)
+		return 0;
+
+	omsg.type = be64_to_cpu(msg->params[0]);
+
+	switch (omsg.type) {
+	case OCC_RESET:
+		occ_reset = true;
+		pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
+		/*
+		 * powernv_cpufreq_throttle_check() is called in
+		 * target() callback which can detect the throttle state
+		 * for governors like ondemand.
+		 * But static governors will not call target() often thus
+		 * report throttling here.
+		 */
+		if (!throttled) {
+			throttled = true;
+			pr_crit("CPU frequency is throttled for duration\n");
+		}
+
+		break;
+	case OCC_LOAD:
+		pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
+		break;
+	case OCC_THROTTLE:
+		omsg.chip = be64_to_cpu(msg->params[1]);
+		omsg.throttle_status = be64_to_cpu(msg->params[2]);
+
+		if (occ_reset) {
+			occ_reset = false;
+			throttled = false;
+			pr_info("OCC Active, CPU frequency is no longer throttled\n");
+
+			for (i = 0; i < nr_chips; i++) {
+				chips[i].restore = true;
+				schedule_work(&chips[i].throttle);
+			}
+
+			return 0;
+		}
+
+		if (omsg.throttle_status &&
+		    omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS)
+			pr_info("OCC: Chip %u Pmax reduced due to %s\n",
+				(unsigned int)omsg.chip,
+				throttle_reason[omsg.throttle_status]);
+		else if (!omsg.throttle_status)
+			pr_info("OCC: Chip %u %s\n", (unsigned int)omsg.chip,
+				throttle_reason[omsg.throttle_status]);
+		else
+			return 0;
+
+		for (i = 0; i < nr_chips; i++)
+			if (chips[i].id == omsg.chip) {
+				if (!omsg.throttle_status)
+					chips[i].restore = true;
+				schedule_work(&chips[i].throttle);
+			}
+	}
+	return 0;
+}
+
+static struct notifier_block powernv_cpufreq_opal_nb = {
+	.notifier_call	= powernv_cpufreq_occ_msg,
+	.next		= NULL,
+	.priority	= 0,
+};
+
 static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
 {
 	struct powernv_smp_call_data freq_data;
@@ -414,6 +545,36 @@
 	.attr		= powernv_cpu_freq_attr,
 };
 
+static int init_chip_info(void)
+{
+	unsigned int chip[256];
+	unsigned int cpu, i;
+	unsigned int prev_chip_id = UINT_MAX;
+
+	for_each_possible_cpu(cpu) {
+		unsigned int id = cpu_to_chip_id(cpu);
+
+		if (prev_chip_id != id) {
+			prev_chip_id = id;
+			chip[nr_chips++] = id;
+		}
+	}
+
+	chips = kmalloc_array(nr_chips, sizeof(struct chip), GFP_KERNEL);
+	if (!chips)
+		return -ENOMEM;
+
+	for (i = 0; i < nr_chips; i++) {
+		chips[i].id = chip[i];
+		chips[i].throttled = false;
+		cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
+		INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
+		chips[i].restore = false;
+	}
+
+	return 0;
+}
+
 static int __init powernv_cpufreq_init(void)
 {
 	int rc = 0;
@@ -429,7 +590,13 @@
 		return rc;
 	}
 
+	/* Populate chip info */
+	rc = init_chip_info();
+	if (rc)
+		return rc;
+
 	register_reboot_notifier(&powernv_cpufreq_reboot_nb);
+	opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
 	return cpufreq_register_driver(&powernv_cpufreq_driver);
 }
 module_init(powernv_cpufreq_init);
@@ -437,6 +604,8 @@
 static void __exit powernv_cpufreq_exit(void)
 {
 	unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
+	opal_message_notifier_unregister(OPAL_MSG_OCC,
+					 &powernv_cpufreq_opal_nb);
 	cpufreq_unregister_driver(&powernv_cpufreq_driver);
 }
 module_exit(powernv_cpufreq_exit);
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
index d29e8da..7969f76 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
@@ -97,8 +97,8 @@
 	struct cpufreq_frequency_table *cbe_freqs;
 	u8 node;
 
-	/* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE
-	 * and CPUFREQ_NOTIFY policy events?)
+	/* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
+	 * policy events?)
 	 */
 	if (event == CPUFREQ_START)
 		return 0;
diff --git a/drivers/cpufreq/sfi-cpufreq.c b/drivers/cpufreq/sfi-cpufreq.c
index ffa3389..992ce6f 100644
--- a/drivers/cpufreq/sfi-cpufreq.c
+++ b/drivers/cpufreq/sfi-cpufreq.c
@@ -45,12 +45,10 @@
 	pentry = (struct sfi_freq_table_entry *)sb->pentry;
 	totallen = num_freq_table_entries * sizeof(*pentry);
 
-	sfi_cpufreq_array = kzalloc(totallen, GFP_KERNEL);
+	sfi_cpufreq_array = kmemdup(pentry, totallen, GFP_KERNEL);
 	if (!sfi_cpufreq_array)
 		return -ENOMEM;
 
-	memcpy(sfi_cpufreq_array, pentry, totallen);
-
 	return 0;
 }
 
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 4ab7a21..15d3214 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -386,7 +386,7 @@
 	unsigned int prev_speed;
 	unsigned int ret = 0;
 	unsigned long flags;
-	struct timeval tv1, tv2;
+	ktime_t tv1, tv2;
 
 	if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
 		return -EINVAL;
@@ -415,14 +415,14 @@
 
 	/* start latency measurement */
 	if (transition_latency)
-		do_gettimeofday(&tv1);
+		tv1 = ktime_get();
 
 	/* switch to high state */
 	set_state(SPEEDSTEP_HIGH);
 
 	/* end latency measurement */
 	if (transition_latency)
-		do_gettimeofday(&tv2);
+		tv2 = ktime_get();
 
 	*high_speed = speedstep_get_frequency(processor);
 	if (!*high_speed) {
@@ -442,8 +442,7 @@
 		set_state(SPEEDSTEP_LOW);
 
 	if (transition_latency) {
-		*transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC +
-			tv2.tv_usec - tv1.tv_usec;
+		*transition_latency = ktime_to_us(ktime_sub(tv2, tv1));
 		pr_debug("transition latency is %u uSec\n", *transition_latency);
 
 		/* convert uSec to nSec and add 20% for safety reasons */
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
new file mode 100644
index 0000000..20bcceb
--- /dev/null
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -0,0 +1,214 @@
+/*
+ * Tegra 124 cpufreq driver
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpufreq-dt.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+struct tegra124_cpufreq_priv {
+	struct regulator *vdd_cpu_reg;
+	struct clk *cpu_clk;
+	struct clk *pllp_clk;
+	struct clk *pllx_clk;
+	struct clk *dfll_clk;
+	struct platform_device *cpufreq_dt_pdev;
+};
+
+static int tegra124_cpu_switch_to_dfll(struct tegra124_cpufreq_priv *priv)
+{
+	struct clk *orig_parent;
+	int ret;
+
+	ret = clk_set_rate(priv->dfll_clk, clk_get_rate(priv->cpu_clk));
+	if (ret)
+		return ret;
+
+	orig_parent = clk_get_parent(priv->cpu_clk);
+	clk_set_parent(priv->cpu_clk, priv->pllp_clk);
+
+	ret = clk_prepare_enable(priv->dfll_clk);
+	if (ret)
+		goto out;
+
+	clk_set_parent(priv->cpu_clk, priv->dfll_clk);
+
+	return 0;
+
+out:
+	clk_set_parent(priv->cpu_clk, orig_parent);
+
+	return ret;
+}
+
+static void tegra124_cpu_switch_to_pllx(struct tegra124_cpufreq_priv *priv)
+{
+	clk_set_parent(priv->cpu_clk, priv->pllp_clk);
+	clk_disable_unprepare(priv->dfll_clk);
+	regulator_sync_voltage(priv->vdd_cpu_reg);
+	clk_set_parent(priv->cpu_clk, priv->pllx_clk);
+}
+
+static struct cpufreq_dt_platform_data cpufreq_dt_pd = {
+	.independent_clocks = false,
+};
+
+static int tegra124_cpufreq_probe(struct platform_device *pdev)
+{
+	struct tegra124_cpufreq_priv *priv;
+	struct device_node *np;
+	struct device *cpu_dev;
+	struct platform_device_info cpufreq_dt_devinfo = {};
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	cpu_dev = get_cpu_device(0);
+	if (!cpu_dev)
+		return -ENODEV;
+
+	np = of_cpu_device_node_get(0);
+	if (!np)
+		return -ENODEV;
+
+	priv->vdd_cpu_reg = regulator_get(cpu_dev, "vdd-cpu");
+	if (IS_ERR(priv->vdd_cpu_reg)) {
+		ret = PTR_ERR(priv->vdd_cpu_reg);
+		goto out_put_np;
+	}
+
+	priv->cpu_clk = of_clk_get_by_name(np, "cpu_g");
+	if (IS_ERR(priv->cpu_clk)) {
+		ret = PTR_ERR(priv->cpu_clk);
+		goto out_put_vdd_cpu_reg;
+	}
+
+	priv->dfll_clk = of_clk_get_by_name(np, "dfll");
+	if (IS_ERR(priv->dfll_clk)) {
+		ret = PTR_ERR(priv->dfll_clk);
+		goto out_put_cpu_clk;
+	}
+
+	priv->pllx_clk = of_clk_get_by_name(np, "pll_x");
+	if (IS_ERR(priv->pllx_clk)) {
+		ret = PTR_ERR(priv->pllx_clk);
+		goto out_put_dfll_clk;
+	}
+
+	priv->pllp_clk = of_clk_get_by_name(np, "pll_p");
+	if (IS_ERR(priv->pllp_clk)) {
+		ret = PTR_ERR(priv->pllp_clk);
+		goto out_put_pllx_clk;
+	}
+
+	ret = tegra124_cpu_switch_to_dfll(priv);
+	if (ret)
+		goto out_put_pllp_clk;
+
+	cpufreq_dt_devinfo.name = "cpufreq-dt";
+	cpufreq_dt_devinfo.parent = &pdev->dev;
+	cpufreq_dt_devinfo.data = &cpufreq_dt_pd;
+	cpufreq_dt_devinfo.size_data = sizeof(cpufreq_dt_pd);
+
+	priv->cpufreq_dt_pdev =
+		platform_device_register_full(&cpufreq_dt_devinfo);
+	if (IS_ERR(priv->cpufreq_dt_pdev)) {
+		ret = PTR_ERR(priv->cpufreq_dt_pdev);
+		goto out_switch_to_pllx;
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	return 0;
+
+out_switch_to_pllx:
+	tegra124_cpu_switch_to_pllx(priv);
+out_put_pllp_clk:
+	clk_put(priv->pllp_clk);
+out_put_pllx_clk:
+	clk_put(priv->pllx_clk);
+out_put_dfll_clk:
+	clk_put(priv->dfll_clk);
+out_put_cpu_clk:
+	clk_put(priv->cpu_clk);
+out_put_vdd_cpu_reg:
+	regulator_put(priv->vdd_cpu_reg);
+out_put_np:
+	of_node_put(np);
+
+	return ret;
+}
+
+static int tegra124_cpufreq_remove(struct platform_device *pdev)
+{
+	struct tegra124_cpufreq_priv *priv = platform_get_drvdata(pdev);
+
+	platform_device_unregister(priv->cpufreq_dt_pdev);
+	tegra124_cpu_switch_to_pllx(priv);
+
+	clk_put(priv->pllp_clk);
+	clk_put(priv->pllx_clk);
+	clk_put(priv->dfll_clk);
+	clk_put(priv->cpu_clk);
+	regulator_put(priv->vdd_cpu_reg);
+
+	return 0;
+}
+
+static struct platform_driver tegra124_cpufreq_platdrv = {
+	.driver.name	= "cpufreq-tegra124",
+	.probe		= tegra124_cpufreq_probe,
+	.remove		= tegra124_cpufreq_remove,
+};
+
+static int __init tegra_cpufreq_init(void)
+{
+	int ret;
+	struct platform_device *pdev;
+
+	if (!of_machine_is_compatible("nvidia,tegra124"))
+		return -ENODEV;
+
+	/*
+	 * Platform driver+device required for handling EPROBE_DEFER with
+	 * the regulator and the DFLL clock
+	 */
+	ret = platform_driver_register(&tegra124_cpufreq_platdrv);
+	if (ret)
+		return ret;
+
+	pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0);
+	if (IS_ERR(pdev)) {
+		platform_driver_unregister(&tegra124_cpufreq_platdrv);
+		return PTR_ERR(pdev);
+	}
+
+	return 0;
+}
+module_init(tegra_cpufreq_init);
+
+MODULE_AUTHOR("Tuomas Tynkkynen <ttynkkynen@nvidia.com>");
+MODULE_DESCRIPTION("cpufreq driver for NVIDIA Tegra124");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c
similarity index 100%
rename from drivers/cpufreq/tegra-cpufreq.c
rename to drivers/cpufreq/tegra20-cpufreq.c
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 7936dce..1523e2d 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -176,14 +176,12 @@
 
 /**
  * cpuidle_state_is_coupled - check if a state is part of a coupled set
- * @dev: struct cpuidle_device for the current cpu
  * @drv: struct cpuidle_driver for the platform
  * @state: index of the target state in drv->states
  *
  * Returns true if the target state is coupled with cpus besides this one
  */
-bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
-	struct cpuidle_driver *drv, int state)
+bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
 {
 	return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
 }
@@ -473,7 +471,7 @@
 			return entered_state;
 		}
 		entered_state = cpuidle_enter_state(dev, drv,
-			dev->safe_state_index);
+			drv->safe_state_index);
 		local_irq_disable();
 	}
 
@@ -521,7 +519,7 @@
 		}
 
 		entered_state = cpuidle_enter_state(dev, drv,
-			dev->safe_state_index);
+			drv->safe_state_index);
 		local_irq_disable();
 	}
 
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index c13feec..ea9728f 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -25,16 +25,21 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/platform_device.h>
+#include <linux/psci.h>
+
 #include <asm/cpuidle.h>
 #include <asm/suspend.h>
-#include <asm/psci.h>
+
+#include <uapi/linux/psci.h>
+
+#define CALXEDA_IDLE_PARAM \
+	((0 << PSCI_0_2_POWER_STATE_ID_SHIFT) | \
+	 (0 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) | \
+	 (PSCI_POWER_STATE_TYPE_POWER_DOWN << PSCI_0_2_POWER_STATE_TYPE_SHIFT))
 
 static int calxeda_idle_finish(unsigned long val)
 {
-	const struct psci_power_state ps = {
-		.type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
-	};
-	return psci_ops.cpu_suspend(ps, __pa(cpu_resume));
+	return psci_ops.cpu_suspend(CALXEDA_IDLE_PARAM, __pa(cpu_resume));
 }
 
 static int calxeda_pwrdown_idle(struct cpuidle_device *dev,
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 48b7228..17a6dc0 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -123,6 +123,7 @@
 	 * cpuidle mechanism enables interrupts and doing that with timekeeping
 	 * suspended is generally unsafe.
 	 */
+	stop_critical_timings();
 	drv->states[index].enter_freeze(dev, drv, index);
 	WARN_ON(!irqs_disabled());
 	/*
@@ -131,6 +132,7 @@
 	 * critical sections, so tell RCU about that.
 	 */
 	RCU_NONIDLE(tick_unfreeze());
+	start_critical_timings();
 }
 
 /**
@@ -195,7 +197,9 @@
 	trace_cpu_idle_rcuidle(index, dev->cpu);
 	time_start = ktime_get();
 
+	stop_critical_timings();
 	entered_state = target_state->enter(dev, drv, index);
+	start_critical_timings();
 
 	time_end = ktime_get();
 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
@@ -210,7 +214,7 @@
 		tick_broadcast_exit();
 	}
 
-	if (!cpuidle_state_is_coupled(dev, drv, entered_state))
+	if (!cpuidle_state_is_coupled(drv, entered_state))
 		local_irq_enable();
 
 	diff = ktime_to_us(ktime_sub(time_end, time_start));
@@ -259,7 +263,7 @@
 int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
 		  int index)
 {
-	if (cpuidle_state_is_coupled(dev, drv, index))
+	if (cpuidle_state_is_coupled(drv, index))
 		return cpuidle_enter_state_coupled(dev, drv, index);
 	return cpuidle_enter_state(dev, drv, index);
 }
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index ee97e96..178c5ad 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -34,15 +34,14 @@
 extern void cpuidle_remove_sysfs(struct cpuidle_device *dev);
 
 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
-bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
-		struct cpuidle_driver *drv, int state);
+bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state);
 int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
 		struct cpuidle_driver *drv, int next_state);
 int cpuidle_coupled_register_device(struct cpuidle_device *dev);
 void cpuidle_coupled_unregister_device(struct cpuidle_device *dev);
 #else
-static inline bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
-		struct cpuidle_driver *drv, int state)
+static inline
+bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
 {
 	return false;
 }
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4044125..07bc7aa 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -480,4 +480,21 @@
 	  hardware hash accelerator. Supporting MD5/SHA1/SHA224/SHA256
 	  hashing algorithms.
 
+config CRYPTO_DEV_SUN4I_SS
+	tristate "Support for Allwinner Security System cryptographic accelerator"
+	depends on ARCH_SUNXI
+	select CRYPTO_MD5
+	select CRYPTO_SHA1
+	select CRYPTO_AES
+	select CRYPTO_DES
+	select CRYPTO_BLKCIPHER
+	help
+	  Some Allwinner SoC have a crypto accelerator named
+	  Security System. Select this if you want to use it.
+	  The Security System handle AES/DES/3DES ciphers in CBC mode
+	  and SHA1 and MD5 hash algorithms.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called sun4i-ss.
+
 endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index e35c07a..c3ced6fb 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -28,3 +28,4 @@
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
 obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 3b28e8c..192a8fa 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1113,7 +1113,7 @@
 	struct device *dev = (struct device *)data;
 	struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
 
-	if (core_dev->dev->ce_base == 0)
+	if (!core_dev->dev->ce_base)
 		return 0;
 
 	writel(PPC4XX_INTERRUPT_CLR,
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index d9af940..2f0b333 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -370,8 +370,7 @@
 			sg_init_table(ctx->bufsl, nsg);
 			sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
 			if (nsg > 1)
-				scatterwalk_sg_chain(ctx->bufsl, nsg,
-						req->src);
+				sg_chain(ctx->bufsl, nsg, req->src);
 			ctx->sg = ctx->bufsl;
 		} else
 			ctx->sg = req->src;
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index e286e28..5652a53 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,6 +1,6 @@
 config CRYPTO_DEV_FSL_CAAM
 	tristate "Freescale CAAM-Multicore driver backend"
-	depends on FSL_SOC
+	depends on FSL_SOC || ARCH_MXC
 	help
 	  Enables the driver module for Freescale's Cryptographic Accelerator
 	  and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -112,6 +112,14 @@
 	  To compile this as a module, choose M here: the module
 	  will be called caamrng.
 
+config CRYPTO_DEV_FSL_CAAM_IMX
+	def_bool SOC_IMX6 || SOC_IMX7D
+	depends on CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_CAAM_LE
+	def_bool CRYPTO_DEV_FSL_CAAM_IMX || SOC_LS1021A
+	depends on CRYPTO_DEV_FSL_CAAM
+
 config CRYPTO_DEV_FSL_CAAM_DEBUG
 	bool "Enable debug output in CAAM driver"
 	depends on CRYPTO_DEV_FSL_CAAM
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index daca933..ba79d63 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -68,27 +68,29 @@
 #define AEAD_DESC_JOB_IO_LEN		(DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
 #define GCM_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
 					 CAAM_CMD_SZ * 4)
+#define AUTHENC_DESC_JOB_IO_LEN		(AEAD_DESC_JOB_IO_LEN + \
+					 CAAM_CMD_SZ * 5)
 
 /* length of descriptors text */
 #define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
-#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
 
 /* Note: Nonce is counted in enckeylen */
-#define DESC_AEAD_CTR_RFC3686_LEN	(6 * CAAM_CMD_SZ)
+#define DESC_AEAD_CTR_RFC3686_LEN	(4 * CAAM_CMD_SZ)
 
 #define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
-#define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
 
 #define DESC_GCM_BASE			(3 * CAAM_CMD_SZ)
 #define DESC_GCM_ENC_LEN		(DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
 #define DESC_GCM_DEC_LEN		(DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
 
 #define DESC_RFC4106_BASE		(3 * CAAM_CMD_SZ)
-#define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
-#define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 10 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
 
 #define DESC_RFC4543_BASE		(3 * CAAM_CMD_SZ)
 #define DESC_RFC4543_ENC_LEN		(DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
@@ -111,6 +113,20 @@
 #endif
 static struct list_head alg_list;
 
+struct caam_alg_entry {
+	int class1_alg_type;
+	int class2_alg_type;
+	int alg_op;
+	bool rfc3686;
+	bool geniv;
+};
+
+struct caam_aead_alg {
+	struct aead_alg aead;
+	struct caam_alg_entry caam;
+	bool registered;
+};
+
 /* Set DK bit in class 1 operation if shared */
 static inline void append_dec_op1(u32 *desc, u32 type)
 {
@@ -145,18 +161,6 @@
 }
 
 /*
- * For aead encrypt and decrypt, read iv for both classes
- */
-static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
-{
-	append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
-			LDST_SRCDST_BYTE_CONTEXT |
-			(ivoffset << LDST_OFFSET_SHIFT));
-	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
-		    (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
-}
-
-/*
  * For ablkcipher encrypt and decrypt, read from req->src and
  * write to req->dst
  */
@@ -170,13 +174,6 @@
 }
 
 /*
- * If all data, including src (with assoc and iv) or dst (with iv only) are
- * contiguous
- */
-#define GIV_SRC_CONTIG		1
-#define GIV_DST_CONTIG		(1 << 1)
-
-/*
  * per-session context
  */
 struct caam_ctx {
@@ -259,7 +256,6 @@
 
 static int aead_null_set_sh_desc(struct crypto_aead *aead)
 {
-	unsigned int ivsize = crypto_aead_ivsize(aead);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
 	bool keys_fit_inline = false;
@@ -270,11 +266,11 @@
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
+	if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
 	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
 		keys_fit_inline = true;
 
-	/* old_aead_encrypt shared descriptor */
+	/* aead_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
 
 	init_sh_desc(desc, HDR_SHARE_SERIAL);
@@ -291,20 +287,10 @@
 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
 	set_jump_tgt_here(desc, key_jump_cmd);
 
-	/* cryptlen = seqoutlen - authsize */
-	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+	/* assoclen + cryptlen = seqinlen */
+	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
 
-	/*
-	 * NULL encryption; IV is zero
-	 * assoclen = (assoclen + cryptlen) - cryptlen
-	 */
-	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
-
-	/* read assoc before reading payload */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-			     KEY_VLF);
-
-	/* Prepare to read and write cryptlen bytes */
+	/* Prepare to read and write cryptlen + assoclen bytes */
 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
@@ -363,7 +349,7 @@
 
 	desc = ctx->sh_desc_dec;
 
-	/* old_aead_decrypt shared descriptor */
+	/* aead_decrypt shared descriptor */
 	init_sh_desc(desc, HDR_SHARE_SERIAL);
 
 	/* Skip if already shared */
@@ -382,18 +368,10 @@
 	append_operation(desc, ctx->class2_alg_type |
 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
-	/* assoclen + cryptlen = seqinlen - ivsize - authsize */
-	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
-				ctx->authsize + ivsize);
-	/* assoclen = (assoclen + cryptlen) - cryptlen */
+	/* assoclen + cryptlen = seqoutlen */
 	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
 
-	/* read assoc before reading payload */
-	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-			     KEY_VLF);
-
-	/* Prepare to read and write cryptlen bytes */
+	/* Prepare to read and write cryptlen + assoclen bytes */
 	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
 
@@ -450,10 +428,10 @@
 
 static int aead_set_sh_desc(struct crypto_aead *aead)
 {
+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+						 struct caam_aead_alg, aead);
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
-	const char *alg_name = crypto_tfm_alg_name(ctfm);
 	struct device *jrdev = ctx->jrdev;
 	bool keys_fit_inline;
 	u32 geniv, moveiv;
@@ -461,11 +439,7 @@
 	u32 *desc;
 	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
 			       OP_ALG_AAI_CTR_MOD128);
-	const bool is_rfc3686 = (ctr_mode &&
-				 (strstr(alg_name, "rfc3686") != NULL));
-
-	if (!ctx->authsize)
-		return 0;
+	const bool is_rfc3686 = alg->caam.rfc3686;
 
 	/* NULL encryption / decryption */
 	if (!ctx->enckeylen)
@@ -486,18 +460,21 @@
 	if (is_rfc3686)
 		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
 
+	if (alg->caam.geniv)
+		goto skip_enc;
+
 	/*
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
 	keys_fit_inline = false;
-	if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
+	if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
 	    ctx->split_key_pad_len + ctx->enckeylen +
 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    CAAM_DESC_BYTES_MAX)
 		keys_fit_inline = true;
 
-	/* old_aead_encrypt shared descriptor */
+	/* aead_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
 
 	/* Note: Context registers are saved. */
@@ -507,19 +484,16 @@
 	append_operation(desc, ctx->class2_alg_type |
 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 
-	/* cryptlen = seqoutlen - authsize */
-	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+	/* Read and write assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
-	/* assoclen + cryptlen = seqinlen - ivsize */
-	append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, ivsize);
-
-	/* assoclen = (assoclen + cryptlen) - cryptlen */
-	append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
 
 	/* read assoc before reading payload */
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-			     KEY_VLF);
-	aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
+				      FIFOLDST_VLF);
 
 	/* Load Counter into CONTEXT1 reg */
 	if (is_rfc3686)
@@ -534,8 +508,8 @@
 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 
 	/* Read and write cryptlen bytes */
-	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
 
 	/* Write ICV */
@@ -555,18 +529,19 @@
 		       desc_bytes(desc), 1);
 #endif
 
+skip_enc:
 	/*
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
 	keys_fit_inline = false;
-	if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
+	if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
 	    ctx->split_key_pad_len + ctx->enckeylen +
 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    CAAM_DESC_BYTES_MAX)
 		keys_fit_inline = true;
 
-	/* old_aead_decrypt shared descriptor */
+	/* aead_decrypt shared descriptor */
 	desc = ctx->sh_desc_dec;
 
 	/* Note: Context registers are saved. */
@@ -576,19 +551,17 @@
 	append_operation(desc, ctx->class2_alg_type |
 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
-	/* assoclen + cryptlen = seqinlen - ivsize - authsize */
-	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
-				ctx->authsize + ivsize);
-	/* assoclen = (assoclen + cryptlen) - cryptlen */
-	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+	/* Read and write assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
 
 	/* read assoc before reading payload */
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 			     KEY_VLF);
 
-	aead_append_ld_iv(desc, ivsize, ctx1_iv_off);
-
 	/* Load Counter into CONTEXT1 reg */
 	if (is_rfc3686)
 		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
@@ -605,8 +578,8 @@
 		append_dec_op1(desc, ctx->class1_alg_type);
 
 	/* Read and write cryptlen bytes */
-	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
-	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
 
 	/* Load ICV */
@@ -626,12 +599,15 @@
 		       desc_bytes(desc), 1);
 #endif
 
+	if (!alg->caam.geniv)
+		goto skip_givenc;
+
 	/*
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
 	keys_fit_inline = false;
-	if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
+	if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
 	    ctx->split_key_pad_len + ctx->enckeylen +
 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    CAAM_DESC_BYTES_MAX)
@@ -643,6 +619,9 @@
 	/* Note: Context registers are saved. */
 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 
+	if (is_rfc3686)
+		goto copy_iv;
+
 	/* Generate IV */
 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
 		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
@@ -656,6 +635,7 @@
 		    (ivsize << MOVE_LEN_SHIFT));
 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
 
+copy_iv:
 	/* Copy IV to class 1 context */
 	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
 		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
@@ -668,8 +648,12 @@
 	/* ivsize + cryptlen = seqoutlen - authsize */
 	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
 
-	/* assoclen = seqinlen - (ivsize + cryptlen) */
-	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+	/* Read and write assoclen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
 
 	/* read assoc before reading payload */
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
@@ -710,9 +694,9 @@
 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
 			 LDST_SRCDST_BYTE_CONTEXT);
 
-	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
-						 desc_bytes(desc),
-						 DMA_TO_DEVICE);
+	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+					      desc_bytes(desc),
+					      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
@@ -723,6 +707,7 @@
 		       desc_bytes(desc), 1);
 #endif
 
+skip_givenc:
 	return 0;
 }
 
@@ -976,22 +961,28 @@
 	append_operation(desc, ctx->class1_alg_type |
 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 
-	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
-	/* Skip assoc data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
 	/* Read assoc data */
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
 
-	/* cryptlen = seqoutlen - assoclen */
-	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+	/* Skip IV */
+	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
 
 	/* Will read cryptlen bytes */
 	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
 
+	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* cryptlen = seqoutlen - assoclen */
+	append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
+
 	/* Write encrypted data */
 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
 
@@ -1044,21 +1035,27 @@
 	append_operation(desc, ctx->class1_alg_type |
 			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
-	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
 
-	/* Skip assoc data */
-	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
-
 	/* Read assoc data */
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
 			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
 
-	/* Will write cryptlen bytes */
-	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	/* Skip IV */
+	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
 
 	/* Will read cryptlen bytes */
-	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
+
+	/* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
+
+	/* Skip assoc data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+
+	/* Will write cryptlen bytes */
+	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
 
 	/* Store payload data */
 	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
@@ -1793,22 +1790,6 @@
 		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
 }
 
-static void old_aead_unmap(struct device *dev,
-			   struct aead_edesc *edesc,
-			   struct aead_request *req)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	int ivsize = crypto_aead_ivsize(aead);
-
-	dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
-			     DMA_TO_DEVICE, edesc->assoc_chained);
-
-	caam_unmap(dev, req->src, req->dst,
-		   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
-		   edesc->dst_chained, edesc->iv_dma, ivsize,
-		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
-}
-
 static void ablkcipher_unmap(struct device *dev,
 			     struct ablkcipher_edesc *edesc,
 			     struct ablkcipher_request *req)
@@ -1844,45 +1825,6 @@
 	aead_request_complete(req, err);
 }
 
-static void old_aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
-				  void *context)
-{
-	struct aead_request *req = context;
-	struct aead_edesc *edesc;
-#ifdef DEBUG
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	int ivsize = crypto_aead_ivsize(aead);
-
-	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
-
-	edesc = (struct aead_edesc *)((char *)desc -
-		 offsetof(struct aead_edesc, hw_desc));
-
-	if (err)
-		caam_jr_strstatus(jrdev, err);
-
-	old_aead_unmap(jrdev, edesc, req);
-
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
-		       req->assoclen , 1);
-	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
-		       edesc->src_nents ? 100 : ivsize, 1);
-	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-		       edesc->src_nents ? 100 : req->cryptlen +
-		       ctx->authsize + 4, 1);
-#endif
-
-	kfree(edesc);
-
-	aead_request_complete(req, err);
-}
-
 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
 				   void *context)
 {
@@ -1911,62 +1853,6 @@
 	aead_request_complete(req, err);
 }
 
-static void old_aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
-				  void *context)
-{
-	struct aead_request *req = context;
-	struct aead_edesc *edesc;
-#ifdef DEBUG
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	int ivsize = crypto_aead_ivsize(aead);
-
-	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
-#endif
-
-	edesc = (struct aead_edesc *)((char *)desc -
-		 offsetof(struct aead_edesc, hw_desc));
-
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
-		       ivsize, 1);
-	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
-		       req->cryptlen - ctx->authsize, 1);
-#endif
-
-	if (err)
-		caam_jr_strstatus(jrdev, err);
-
-	old_aead_unmap(jrdev, edesc, req);
-
-	/*
-	 * verify hw auth check passed else return -EBADMSG
-	 */
-	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
-		err = -EBADMSG;
-
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4,
-		       ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
-		       sizeof(struct iphdr) + req->assoclen +
-		       ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
-		       ctx->authsize + 36, 1);
-	if (!err && edesc->sec4_sg_bytes) {
-		struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
-		print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
-			       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
-			sg->length + ctx->authsize + 16, 1);
-	}
-#endif
-
-	kfree(edesc);
-
-	aead_request_complete(req, err);
-}
-
 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
 				   void *context)
 {
@@ -2035,91 +1921,6 @@
 /*
  * Fill in aead job descriptor
  */
-static void old_init_aead_job(u32 *sh_desc, dma_addr_t ptr,
-			      struct aead_edesc *edesc,
-			      struct aead_request *req,
-			      bool all_contig, bool encrypt)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	int ivsize = crypto_aead_ivsize(aead);
-	int authsize = ctx->authsize;
-	u32 *desc = edesc->hw_desc;
-	u32 out_options = 0, in_options;
-	dma_addr_t dst_dma, src_dma;
-	int len, sec4_sg_index = 0;
-	bool is_gcm = false;
-
-#ifdef DEBUG
-	debug("assoclen %d cryptlen %d authsize %d\n",
-	      req->assoclen, req->cryptlen, authsize);
-	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
-		       req->assoclen , 1);
-	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
-		       edesc->src_nents ? 100 : ivsize, 1);
-	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-			edesc->src_nents ? 100 : req->cryptlen, 1);
-	print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
-		       desc_bytes(sh_desc), 1);
-#endif
-
-	if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
-	      OP_ALG_ALGSEL_AES) &&
-	    ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
-		is_gcm = true;
-
-	len = desc_len(sh_desc);
-	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-
-	if (all_contig) {
-		if (is_gcm)
-			src_dma = edesc->iv_dma;
-		else
-			src_dma = sg_dma_address(req->assoc);
-		in_options = 0;
-	} else {
-		src_dma = edesc->sec4_sg_dma;
-		sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
-				 (edesc->src_nents ? : 1);
-		in_options = LDST_SGF;
-	}
-
-	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
-			  in_options);
-
-	if (likely(req->src == req->dst)) {
-		if (all_contig) {
-			dst_dma = sg_dma_address(req->src);
-		} else {
-			dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
-				  ((edesc->assoc_nents ? : 1) + 1);
-			out_options = LDST_SGF;
-		}
-	} else {
-		if (!edesc->dst_nents) {
-			dst_dma = sg_dma_address(req->dst);
-		} else {
-			dst_dma = edesc->sec4_sg_dma +
-				  sec4_sg_index *
-				  sizeof(struct sec4_sg_entry);
-			out_options = LDST_SGF;
-		}
-	}
-	if (encrypt)
-		append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
-				   out_options);
-	else
-		append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
-				   out_options);
-}
-
-/*
- * Fill in aead job descriptor
- */
 static void init_aead_job(struct aead_request *req,
 			  struct aead_edesc *edesc,
 			  bool all_contig, bool encrypt)
@@ -2208,80 +2009,43 @@
 	/* End of blank commands */
 }
 
-/*
- * Fill in aead givencrypt job descriptor
- */
-static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
-			      struct aead_edesc *edesc,
-			      struct aead_request *req,
-			      int contig)
+static void init_authenc_job(struct aead_request *req,
+			     struct aead_edesc *edesc,
+			     bool all_contig, bool encrypt)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+						 struct caam_aead_alg, aead);
+	unsigned int ivsize = crypto_aead_ivsize(aead);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	int ivsize = crypto_aead_ivsize(aead);
-	int authsize = ctx->authsize;
+	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = alg->caam.rfc3686;
 	u32 *desc = edesc->hw_desc;
-	u32 out_options = 0, in_options;
-	dma_addr_t dst_dma, src_dma;
-	int len, sec4_sg_index = 0;
-	bool is_gcm = false;
+	u32 ivoffset = 0;
 
-#ifdef DEBUG
-	debug("assoclen %d cryptlen %d authsize %d\n",
-	      req->assoclen, req->cryptlen, authsize);
-	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
-		       req->assoclen , 1);
-	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
-	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-			edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
-	print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
-		       desc_bytes(sh_desc), 1);
-#endif
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ivoffset = 16;
 
-	if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
-	      OP_ALG_ALGSEL_AES) &&
-	    ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
-		is_gcm = true;
+	/*
+	 * RFC3686 specific:
+	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 */
+	if (is_rfc3686)
+		ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
 
-	len = desc_len(sh_desc);
-	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+	init_aead_job(req, edesc, all_contig, encrypt);
 
-	if (contig & GIV_SRC_CONTIG) {
-		if (is_gcm)
-			src_dma = edesc->iv_dma;
-		else
-			src_dma = sg_dma_address(req->assoc);
-		in_options = 0;
-	} else {
-		src_dma = edesc->sec4_sg_dma;
-		sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
-		in_options = LDST_SGF;
-	}
-	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
-			  in_options);
-
-	if (contig & GIV_DST_CONTIG) {
-		dst_dma = edesc->iv_dma;
-	} else {
-		if (likely(req->src == req->dst)) {
-			dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
-				  (edesc->assoc_nents +
-				   (is_gcm ? 1 + edesc->src_nents : 0));
-			out_options = LDST_SGF;
-		} else {
-			dst_dma = edesc->sec4_sg_dma +
-				  sec4_sg_index *
-				  sizeof(struct sec4_sg_entry);
-			out_options = LDST_SGF;
-		}
-	}
-
-	append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
-			   out_options);
+	if (ivsize && (is_rfc3686 || !(alg->caam.geniv && encrypt)))
+		append_load_as_imm(desc, req->iv, ivsize,
+				   LDST_CLASS_1_CCB |
+				   LDST_SRCDST_BYTE_CONTEXT |
+				   (ivoffset << LDST_OFFSET_SHIFT));
 }
 
 /*
@@ -2392,150 +2156,6 @@
 /*
  * allocate and map the aead extended descriptor
  */
-static struct aead_edesc *old_aead_edesc_alloc(struct aead_request *req,
-					       int desc_bytes,
-					       bool *all_contig_ptr,
-					       bool encrypt)
-{
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	struct device *jrdev = ctx->jrdev;
-	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
-		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-	int assoc_nents, src_nents, dst_nents = 0;
-	struct aead_edesc *edesc;
-	dma_addr_t iv_dma = 0;
-	int sgc;
-	bool all_contig = true;
-	bool assoc_chained = false, src_chained = false, dst_chained = false;
-	int ivsize = crypto_aead_ivsize(aead);
-	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
-	unsigned int authsize = ctx->authsize;
-	bool is_gcm = false;
-
-	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
-
-	if (unlikely(req->dst != req->src)) {
-		src_nents = sg_count(req->src, req->cryptlen, &src_chained);
-		dst_nents = sg_count(req->dst,
-				     req->cryptlen +
-					(encrypt ? authsize : (-authsize)),
-				     &dst_chained);
-	} else {
-		src_nents = sg_count(req->src,
-				     req->cryptlen +
-					(encrypt ? authsize : 0),
-				     &src_chained);
-	}
-
-	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
-				 DMA_TO_DEVICE, assoc_chained);
-	if (likely(req->src == req->dst)) {
-		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-					 DMA_BIDIRECTIONAL, src_chained);
-	} else {
-		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-					 DMA_TO_DEVICE, src_chained);
-		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
-					 DMA_FROM_DEVICE, dst_chained);
-	}
-
-	iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, iv_dma)) {
-		dev_err(jrdev, "unable to map IV\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
-	      OP_ALG_ALGSEL_AES) &&
-	    ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
-		is_gcm = true;
-
-	/*
-	 * Check if data are contiguous.
-	 * GCM expected input sequence: IV, AAD, text
-	 * All other - expected input sequence: AAD, IV, text
-	 */
-	if (is_gcm)
-		all_contig = (!assoc_nents &&
-			      iv_dma + ivsize == sg_dma_address(req->assoc) &&
-			      !src_nents && sg_dma_address(req->assoc) +
-			      req->assoclen == sg_dma_address(req->src));
-	else
-		all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
-			      req->assoclen == iv_dma && !src_nents &&
-			      iv_dma + ivsize == sg_dma_address(req->src));
-	if (!all_contig) {
-		assoc_nents = assoc_nents ? : 1;
-		src_nents = src_nents ? : 1;
-		sec4_sg_len = assoc_nents + 1 + src_nents;
-	}
-
-	sec4_sg_len += dst_nents;
-
-	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
-
-	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
-			sec4_sg_bytes, GFP_DMA | flags);
-	if (!edesc) {
-		dev_err(jrdev, "could not allocate extended descriptor\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	edesc->assoc_nents = assoc_nents;
-	edesc->assoc_chained = assoc_chained;
-	edesc->src_nents = src_nents;
-	edesc->src_chained = src_chained;
-	edesc->dst_nents = dst_nents;
-	edesc->dst_chained = dst_chained;
-	edesc->iv_dma = iv_dma;
-	edesc->sec4_sg_bytes = sec4_sg_bytes;
-	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
-			 desc_bytes;
-	*all_contig_ptr = all_contig;
-
-	sec4_sg_index = 0;
-	if (!all_contig) {
-		if (!is_gcm) {
-			sg_to_sec4_sg_len(req->assoc, req->assoclen,
-					  edesc->sec4_sg + sec4_sg_index);
-			sec4_sg_index += assoc_nents;
-		}
-
-		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
-				   iv_dma, ivsize, 0);
-		sec4_sg_index += 1;
-
-		if (is_gcm) {
-			sg_to_sec4_sg_len(req->assoc, req->assoclen,
-					  edesc->sec4_sg + sec4_sg_index);
-			sec4_sg_index += assoc_nents;
-		}
-
-		sg_to_sec4_sg_last(req->src,
-				   src_nents,
-				   edesc->sec4_sg +
-				   sec4_sg_index, 0);
-		sec4_sg_index += src_nents;
-	}
-	if (dst_nents) {
-		sg_to_sec4_sg_last(req->dst, dst_nents,
-				   edesc->sec4_sg + sec4_sg_index, 0);
-	}
-	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
-		dev_err(jrdev, "unable to map S/G table\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	return edesc;
-}
-
-/*
- * allocate and map the aead extended descriptor
- */
 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 					   int desc_bytes, bool *all_contig_ptr,
 					   bool encrypt)
@@ -2579,8 +2199,8 @@
 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
 
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
-			sec4_sg_bytes, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+			GFP_DMA | flags);
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return ERR_PTR(-ENOMEM);
@@ -2685,7 +2305,15 @@
 	return ret;
 }
 
-static int old_aead_encrypt(struct aead_request *req)
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_encrypt(req);
+}
+
+static int aead_encrypt(struct aead_request *req)
 {
 	struct aead_edesc *edesc;
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -2696,14 +2324,13 @@
 	int ret = 0;
 
 	/* allocate extended descriptor */
-	edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
-				     CAAM_CMD_SZ, &all_contig, true);
+	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+				 &all_contig, true);
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
 	/* Create and submit job descriptor */
-	old_init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
-			  all_contig, true);
+	init_authenc_job(req, edesc, all_contig, true);
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -2711,11 +2338,11 @@
 #endif
 
 	desc = edesc->hw_desc;
-	ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
+	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
 	if (!ret) {
 		ret = -EINPROGRESS;
 	} else {
-		old_aead_unmap(jrdev, edesc, req);
+		aead_unmap(jrdev, edesc, req);
 		kfree(edesc);
 	}
 
@@ -2757,7 +2384,15 @@
 	return ret;
 }
 
-static int old_aead_decrypt(struct aead_request *req)
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_decrypt(req);
+}
+
+static int aead_decrypt(struct aead_request *req)
 {
 	struct aead_edesc *edesc;
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -2768,20 +2403,19 @@
 	int ret = 0;
 
 	/* allocate extended descriptor */
-	edesc = old_aead_edesc_alloc(req, DESC_JOB_IO_LEN *
-				     CAAM_CMD_SZ, &all_contig, false);
+	edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
+				 &all_contig, false);
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-		       req->cryptlen, 1);
+		       req->assoclen + req->cryptlen, 1);
 #endif
 
 	/* Create and submit job descriptor*/
-	old_init_aead_job(ctx->sh_desc_dec,
-			  ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
+	init_authenc_job(req, edesc, all_contig, false);
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
@@ -2789,232 +2423,29 @@
 #endif
 
 	desc = edesc->hw_desc;
-	ret = caam_jr_enqueue(jrdev, desc, old_aead_decrypt_done, req);
+	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
 	if (!ret) {
 		ret = -EINPROGRESS;
 	} else {
-		old_aead_unmap(jrdev, edesc, req);
+		aead_unmap(jrdev, edesc, req);
 		kfree(edesc);
 	}
 
 	return ret;
 }
 
-/*
- * allocate and map the aead extended descriptor for aead givencrypt
- */
-static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
-					       *greq, int desc_bytes,
-					       u32 *contig_ptr)
+static int aead_givdecrypt(struct aead_request *req)
 {
-	struct aead_request *req = &greq->areq;
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	struct device *jrdev = ctx->jrdev;
-	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
-		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
-	int assoc_nents, src_nents, dst_nents = 0;
-	struct aead_edesc *edesc;
-	dma_addr_t iv_dma = 0;
-	int sgc;
-	u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
-	int ivsize = crypto_aead_ivsize(aead);
-	bool assoc_chained = false, src_chained = false, dst_chained = false;
-	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
-	bool is_gcm = false;
+	unsigned int ivsize = crypto_aead_ivsize(aead);
 
-	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
-	src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+	if (req->cryptlen < ivsize)
+		return -EINVAL;
 
-	if (unlikely(req->dst != req->src))
-		dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
-				     &dst_chained);
+	req->cryptlen -= ivsize;
+	req->assoclen += ivsize;
 
-	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
-				 DMA_TO_DEVICE, assoc_chained);
-	if (likely(req->src == req->dst)) {
-		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-					 DMA_BIDIRECTIONAL, src_chained);
-	} else {
-		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
-					 DMA_TO_DEVICE, src_chained);
-		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
-					 DMA_FROM_DEVICE, dst_chained);
-	}
-
-	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, iv_dma)) {
-		dev_err(jrdev, "unable to map IV\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
-	      OP_ALG_ALGSEL_AES) &&
-	    ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
-		is_gcm = true;
-
-	/*
-	 * Check if data are contiguous.
-	 * GCM expected input sequence: IV, AAD, text
-	 * All other - expected input sequence: AAD, IV, text
-	 */
-
-	if (is_gcm) {
-		if (assoc_nents || iv_dma + ivsize !=
-		    sg_dma_address(req->assoc) || src_nents ||
-		    sg_dma_address(req->assoc) + req->assoclen !=
-		    sg_dma_address(req->src))
-			contig &= ~GIV_SRC_CONTIG;
-	} else {
-		if (assoc_nents ||
-		    sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
-		    src_nents || iv_dma + ivsize != sg_dma_address(req->src))
-			contig &= ~GIV_SRC_CONTIG;
-	}
-
-	if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
-		contig &= ~GIV_DST_CONTIG;
-
-	if (!(contig & GIV_SRC_CONTIG)) {
-		assoc_nents = assoc_nents ? : 1;
-		src_nents = src_nents ? : 1;
-		sec4_sg_len += assoc_nents + 1 + src_nents;
-		if (req->src == req->dst &&
-		    (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
-			contig &= ~GIV_DST_CONTIG;
-	}
-
-	/*
-	 * Add new sg entries for GCM output sequence.
-	 * Expected output sequence: IV, encrypted text.
-	 */
-	if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
-		sec4_sg_len += 1 + src_nents;
-
-	if (unlikely(req->src != req->dst)) {
-		dst_nents = dst_nents ? : 1;
-		sec4_sg_len += 1 + dst_nents;
-	}
-
-	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
-
-	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
-			sec4_sg_bytes, GFP_DMA | flags);
-	if (!edesc) {
-		dev_err(jrdev, "could not allocate extended descriptor\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	edesc->assoc_nents = assoc_nents;
-	edesc->assoc_chained = assoc_chained;
-	edesc->src_nents = src_nents;
-	edesc->src_chained = src_chained;
-	edesc->dst_nents = dst_nents;
-	edesc->dst_chained = dst_chained;
-	edesc->iv_dma = iv_dma;
-	edesc->sec4_sg_bytes = sec4_sg_bytes;
-	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
-			 desc_bytes;
-	*contig_ptr = contig;
-
-	sec4_sg_index = 0;
-	if (!(contig & GIV_SRC_CONTIG)) {
-		if (!is_gcm) {
-			sg_to_sec4_sg_len(req->assoc, req->assoclen,
-					  edesc->sec4_sg + sec4_sg_index);
-			sec4_sg_index += assoc_nents;
-		}
-
-		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
-				   iv_dma, ivsize, 0);
-		sec4_sg_index += 1;
-
-		if (is_gcm) {
-			sg_to_sec4_sg_len(req->assoc, req->assoclen,
-					  edesc->sec4_sg + sec4_sg_index);
-			sec4_sg_index += assoc_nents;
-		}
-
-		sg_to_sec4_sg_last(req->src, src_nents,
-				   edesc->sec4_sg +
-				   sec4_sg_index, 0);
-		sec4_sg_index += src_nents;
-	}
-
-	if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
-		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
-				   iv_dma, ivsize, 0);
-		sec4_sg_index += 1;
-		sg_to_sec4_sg_last(req->src, src_nents,
-				   edesc->sec4_sg + sec4_sg_index, 0);
-	}
-
-	if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
-		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
-				   iv_dma, ivsize, 0);
-		sec4_sg_index += 1;
-		sg_to_sec4_sg_last(req->dst, dst_nents,
-				   edesc->sec4_sg + sec4_sg_index, 0);
-	}
-	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-					    sec4_sg_bytes, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
-		dev_err(jrdev, "unable to map S/G table\n");
-		return ERR_PTR(-ENOMEM);
-	}
-
-	return edesc;
-}
-
-static int old_aead_givencrypt(struct aead_givcrypt_request *areq)
-{
-	struct aead_request *req = &areq->areq;
-	struct aead_edesc *edesc;
-	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	struct device *jrdev = ctx->jrdev;
-	u32 contig;
-	u32 *desc;
-	int ret = 0;
-
-	/* allocate extended descriptor */
-	edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
-				     CAAM_CMD_SZ, &contig);
-
-	if (IS_ERR(edesc))
-		return PTR_ERR(edesc);
-
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-		       req->cryptlen, 1);
-#endif
-
-	/* Create and submit job descriptor*/
-	init_aead_giv_job(ctx->sh_desc_givenc,
-			  ctx->sh_desc_givenc_dma, edesc, req, contig);
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
-		       desc_bytes(edesc->hw_desc), 1);
-#endif
-
-	desc = edesc->hw_desc;
-	ret = caam_jr_enqueue(jrdev, desc, old_aead_encrypt_done, req);
-	if (!ret) {
-		ret = -EINPROGRESS;
-	} else {
-		old_aead_unmap(jrdev, edesc, req);
-		kfree(edesc);
-	}
-
-	return ret;
-}
-
-static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
-{
-	return old_aead_encrypt(&areq->areq);
+	return aead_decrypt(req);
 }
 
 /*
@@ -3072,8 +2503,8 @@
 			sizeof(struct sec4_sg_entry);
 
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
-			sec4_sg_bytes, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+			GFP_DMA | flags);
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return ERR_PTR(-ENOMEM);
@@ -3251,8 +2682,8 @@
 			sizeof(struct sec4_sg_entry);
 
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(*edesc) + desc_bytes +
-			sec4_sg_bytes, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
+			GFP_DMA | flags);
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return ERR_PTR(-ENOMEM);
@@ -3347,7 +2778,6 @@
 	u32 type;
 	union {
 		struct ablkcipher_alg ablkcipher;
-		struct old_aead_alg aead;
 	} template_u;
 	u32 class1_alg_type;
 	u32 class2_alg_type;
@@ -3355,598 +2785,6 @@
 };
 
 static struct caam_alg_template driver_algs[] = {
-	/* single-pass ipsec_esp descriptor */
-	{
-		.name = "authenc(hmac(md5),ecb(cipher_null))",
-		.driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
-		.blocksize = NULL_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = aead_null_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = NULL_IV_SIZE,
-			.maxauthsize = MD5_DIGEST_SIZE,
-			},
-		.class1_alg_type = 0,
-		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha1),ecb(cipher_null))",
-		.driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
-		.blocksize = NULL_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = aead_null_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = NULL_IV_SIZE,
-			.maxauthsize = SHA1_DIGEST_SIZE,
-			},
-		.class1_alg_type = 0,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha224),ecb(cipher_null))",
-		.driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
-		.blocksize = NULL_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = aead_null_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = NULL_IV_SIZE,
-			.maxauthsize = SHA224_DIGEST_SIZE,
-			},
-		.class1_alg_type = 0,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha256),ecb(cipher_null))",
-		.driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
-		.blocksize = NULL_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = aead_null_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = NULL_IV_SIZE,
-			.maxauthsize = SHA256_DIGEST_SIZE,
-			},
-		.class1_alg_type = 0,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha384),ecb(cipher_null))",
-		.driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
-		.blocksize = NULL_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = aead_null_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = NULL_IV_SIZE,
-			.maxauthsize = SHA384_DIGEST_SIZE,
-			},
-		.class1_alg_type = 0,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha512),ecb(cipher_null))",
-		.driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
-		.blocksize = NULL_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = aead_null_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = NULL_IV_SIZE,
-			.maxauthsize = SHA512_DIGEST_SIZE,
-			},
-		.class1_alg_type = 0,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(md5),cbc(aes))",
-		.driver_name = "authenc-hmac-md5-cbc-aes-caam",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = MD5_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha1),cbc(aes))",
-		.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = SHA1_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha224),cbc(aes))",
-		.driver_name = "authenc-hmac-sha224-cbc-aes-caam",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = SHA224_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha256),cbc(aes))",
-		.driver_name = "authenc-hmac-sha256-cbc-aes-caam",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = SHA256_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha384),cbc(aes))",
-		.driver_name = "authenc-hmac-sha384-cbc-aes-caam",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = SHA384_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
-	},
-
-	{
-		.name = "authenc(hmac(sha512),cbc(aes))",
-		.driver_name = "authenc-hmac-sha512-cbc-aes-caam",
-		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = SHA512_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(md5),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = DES3_EDE_BLOCK_SIZE,
-			.maxauthsize = MD5_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha1),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = DES3_EDE_BLOCK_SIZE,
-			.maxauthsize = SHA1_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha224),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = DES3_EDE_BLOCK_SIZE,
-			.maxauthsize = SHA224_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha256),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = DES3_EDE_BLOCK_SIZE,
-			.maxauthsize = SHA256_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha384),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = DES3_EDE_BLOCK_SIZE,
-			.maxauthsize = SHA384_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha512),cbc(des3_ede))",
-		.driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
-		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = DES3_EDE_BLOCK_SIZE,
-			.maxauthsize = SHA512_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(md5),cbc(des))",
-		.driver_name = "authenc-hmac-md5-cbc-des-caam",
-		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = DES_BLOCK_SIZE,
-			.maxauthsize = MD5_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha1),cbc(des))",
-		.driver_name = "authenc-hmac-sha1-cbc-des-caam",
-		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = DES_BLOCK_SIZE,
-			.maxauthsize = SHA1_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha224),cbc(des))",
-		.driver_name = "authenc-hmac-sha224-cbc-des-caam",
-		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = DES_BLOCK_SIZE,
-			.maxauthsize = SHA224_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha256),cbc(des))",
-		.driver_name = "authenc-hmac-sha256-cbc-des-caam",
-		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = DES_BLOCK_SIZE,
-			.maxauthsize = SHA256_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha384),cbc(des))",
-		.driver_name = "authenc-hmac-sha384-cbc-des-caam",
-		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = DES_BLOCK_SIZE,
-			.maxauthsize = SHA384_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha512),cbc(des))",
-		.driver_name = "authenc-hmac-sha512-cbc-des-caam",
-		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = DES_BLOCK_SIZE,
-			.maxauthsize = SHA512_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = MD5_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = SHA1_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = SHA224_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = SHA256_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = SHA384_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
-	},
-	{
-		.name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
-		.driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
-		.blocksize = 1,
-		.type = CRYPTO_ALG_TYPE_AEAD,
-		.template_aead = {
-			.setkey = aead_setkey,
-			.setauthsize = aead_setauthsize,
-			.encrypt = old_aead_encrypt,
-			.decrypt = old_aead_decrypt,
-			.givencrypt = old_aead_givencrypt,
-			.geniv = "<built-in>",
-			.ivsize = CTR_RFC3686_IV_SIZE,
-			.maxauthsize = SHA512_DIGEST_SIZE,
-			},
-		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
-		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
-				   OP_ALG_AAI_HMAC_PRECOMP,
-		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
-	},
 	/* ablkcipher descriptor */
 	{
 		.name = "cbc(aes)",
@@ -4036,18 +2874,6 @@
 	}
 };
 
-struct caam_alg_entry {
-	int class1_alg_type;
-	int class2_alg_type;
-	int alg_op;
-};
-
-struct caam_aead_alg {
-	struct aead_alg aead;
-	struct caam_alg_entry caam;
-	bool registered;
-};
-
 static struct caam_aead_alg driver_aeads[] = {
 	{
 		.aead = {
@@ -4058,8 +2884,8 @@
 			},
 			.setkey = rfc4106_setkey,
 			.setauthsize = rfc4106_setauthsize,
-			.encrypt = gcm_encrypt,
-			.decrypt = gcm_decrypt,
+			.encrypt = ipsec_gcm_encrypt,
+			.decrypt = ipsec_gcm_decrypt,
 			.ivsize = 8,
 			.maxauthsize = AES_BLOCK_SIZE,
 		},
@@ -4076,8 +2902,8 @@
 			},
 			.setkey = rfc4543_setkey,
 			.setauthsize = rfc4543_setauthsize,
-			.encrypt = gcm_encrypt,
-			.decrypt = gcm_decrypt,
+			.encrypt = ipsec_gcm_encrypt,
+			.decrypt = ipsec_gcm_decrypt,
 			.ivsize = 8,
 			.maxauthsize = AES_BLOCK_SIZE,
 		},
@@ -4104,6 +2930,1283 @@
 			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
 		},
 	},
+	/* single-pass ipsec_esp descriptor */
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "ecb(cipher_null))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "ecb-cipher_null-caam",
+				.cra_blocksize = NULL_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = NULL_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(aes)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-cbc-aes-caam",
+				.cra_blocksize = AES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		}
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(des3_ede)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-"
+						   "cbc-des3_ede-caam",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(md5),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-hmac-md5-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha1),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha1-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha224),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha224-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha256),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha256-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha384),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha384-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(des))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "echainiv(authenc(hmac(sha512),"
+					    "cbc(des)))",
+				.cra_driver_name = "echainiv-authenc-"
+						   "hmac-sha512-cbc-des-caam",
+				.cra_blocksize = DES_BLOCK_SIZE,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = DES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc("
+					    "hmac(md5),rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-md5-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_MD5 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc("
+					    "hmac(sha1),rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha1-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc("
+					    "hmac(sha224),rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha224-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc(hmac(sha256),"
+					    "rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha256-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc(hmac(sha384),"
+					    "rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha384-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "rfc3686(ctr(aes)))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+		},
+	},
+	{
+		.aead = {
+			.base = {
+				.cra_name = "seqiv(authenc(hmac(sha512),"
+					    "rfc3686(ctr(aes))))",
+				.cra_driver_name = "seqiv-authenc-hmac-sha512-"
+						   "rfc3686-ctr-aes-caam",
+				.cra_blocksize = 1,
+			},
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_givdecrypt,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+		},
+		.caam = {
+			.class1_alg_type = OP_ALG_ALGSEL_AES |
+					   OP_ALG_AAI_CTR_MOD128,
+			.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+					   OP_ALG_AAI_HMAC_PRECOMP,
+			.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+			.rfc3686 = true,
+			.geniv = true,
+		},
+	},
 };
 
 struct caam_crypto_alg {
@@ -4211,7 +4314,7 @@
 	struct caam_crypto_alg *t_alg;
 	struct crypto_alg *alg;
 
-	t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
 	if (!t_alg) {
 		pr_err("failed to allocate t_alg\n");
 		return ERR_PTR(-ENOMEM);
@@ -4240,10 +4343,6 @@
 		alg->cra_type = &crypto_ablkcipher_type;
 		alg->cra_ablkcipher = template->template_ablkcipher;
 		break;
-	case CRYPTO_ALG_TYPE_AEAD:
-		alg->cra_type = &crypto_aead_type;
-		alg->cra_aead = template->template_aead;
-		break;
 	}
 
 	t_alg->caam.class1_alg_type = template->class1_alg_type;
@@ -4271,8 +4370,10 @@
 	struct device_node *dev_node;
 	struct platform_device *pdev;
 	struct device *ctrldev;
-	void *priv;
+	struct caam_drv_private *priv;
 	int i = 0, err = 0;
+	u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+	unsigned int md_limit = SHA512_DIGEST_SIZE;
 	bool registered = false;
 
 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -4302,16 +4403,39 @@
 
 	INIT_LIST_HEAD(&alg_list);
 
-	/* register crypto algorithms the device supports */
-	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
-		/* TODO: check if h/w supports alg */
-		struct caam_crypto_alg *t_alg;
+	/*
+	 * Register crypto algorithms the device supports.
+	 * First, detect presence and attributes of DES, AES, and MD blocks.
+	 */
+	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+	des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+	aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+	md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
 
-		t_alg = caam_alg_alloc(&driver_algs[i]);
+	/* If MD is present, limit digest size based on LP256 */
+	if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+		md_limit = SHA256_DIGEST_SIZE;
+
+	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+		struct caam_crypto_alg *t_alg;
+		struct caam_alg_template *alg = driver_algs + i;
+		u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+		/* Skip DES algorithms if not supported by device */
+		if (!des_inst &&
+		    ((alg_sel == OP_ALG_ALGSEL_3DES) ||
+		     (alg_sel == OP_ALG_ALGSEL_DES)))
+				continue;
+
+		/* Skip AES algorithms if not supported by device */
+		if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
+				continue;
+
+		t_alg = caam_alg_alloc(alg);
 		if (IS_ERR(t_alg)) {
 			err = PTR_ERR(t_alg);
-			pr_warn("%s alg allocation failed\n",
-				driver_algs[i].driver_name);
+			pr_warn("%s alg allocation failed\n", alg->driver_name);
 			continue;
 		}
 
@@ -4329,6 +4453,37 @@
 
 	for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
 		struct caam_aead_alg *t_alg = driver_aeads + i;
+		u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+				 OP_ALG_ALGSEL_MASK;
+		u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+				 OP_ALG_ALGSEL_MASK;
+		u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
+
+		/* Skip DES algorithms if not supported by device */
+		if (!des_inst &&
+		    ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
+		     (c1_alg_sel == OP_ALG_ALGSEL_DES)))
+				continue;
+
+		/* Skip AES algorithms if not supported by device */
+		if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
+				continue;
+
+		/*
+		 * Check support for AES algorithms not available
+		 * on LP devices.
+		 */
+		if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+			if (alg_aai == OP_ALG_AAI_GCM)
+				continue;
+
+		/*
+		 * Skip algorithms requiring message digests
+		 * if MD or MD size is not supported by device.
+		 */
+		if (c2_alg_sel &&
+		    (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
+				continue;
 
 		caam_aead_alg_init(t_alg);
 
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index f9c7875..94433b9 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -127,7 +127,7 @@
 	int buflen_0;
 	u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
 	int buflen_1;
-	u8 caam_ctx[MAX_CTX_LEN];
+	u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
 	int (*update)(struct ahash_request *req);
 	int (*final)(struct ahash_request *req);
 	int (*finup)(struct ahash_request *req);
@@ -807,7 +807,7 @@
 		 * allocate space for base edesc and hw desc commands,
 		 * link tables
 		 */
-		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+		edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
 				sec4_sg_bytes, GFP_DMA | flags);
 		if (!edesc) {
 			dev_err(jrdev,
@@ -829,7 +829,7 @@
 		state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
 							edesc->sec4_sg + 1,
 							buf, state->buf_dma,
-							*buflen, last_buflen);
+							*next_buflen, *buflen);
 
 		if (src_nents) {
 			src_map_to_sec4_sg(jrdev, req->src, src_nents,
@@ -919,8 +919,8 @@
 	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
 
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
-			sec4_sg_bytes, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+			GFP_DMA | flags);
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return -ENOMEM;
@@ -1006,8 +1006,8 @@
 			 sizeof(struct sec4_sg_entry);
 
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
-			sec4_sg_bytes, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+			GFP_DMA | flags);
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return -ENOMEM;
@@ -1092,8 +1092,8 @@
 	sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
 
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
-			DESC_JOB_IO_LEN, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + sec4_sg_bytes + DESC_JOB_IO_LEN,
+			GFP_DMA | flags);
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return -ENOMEM;
@@ -1166,8 +1166,7 @@
 	int sh_len;
 
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
-			GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | flags);
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return -ENOMEM;
@@ -1246,7 +1245,7 @@
 		 * allocate space for base edesc and hw desc commands,
 		 * link tables
 		 */
-		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+		edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
 				sec4_sg_bytes, GFP_DMA | flags);
 		if (!edesc) {
 			dev_err(jrdev,
@@ -1354,8 +1353,8 @@
 			 sizeof(struct sec4_sg_entry);
 
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
-			sec4_sg_bytes, GFP_DMA | flags);
+	edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes,
+			GFP_DMA | flags);
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
 		return -ENOMEM;
@@ -1449,7 +1448,7 @@
 		 * allocate space for base edesc and hw desc commands,
 		 * link tables
 		 */
-		edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+		edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN +
 				sec4_sg_bytes, GFP_DMA | flags);
 		if (!edesc) {
 			dev_err(jrdev,
@@ -1843,7 +1842,7 @@
 	struct ahash_alg *halg;
 	struct crypto_alg *alg;
 
-	t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
+	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
 	if (!t_alg) {
 		pr_err("failed to allocate t_alg\n");
 		return ERR_PTR(-ENOMEM);
@@ -1885,8 +1884,10 @@
 	struct device_node *dev_node;
 	struct platform_device *pdev;
 	struct device *ctrldev;
-	void *priv;
 	int i = 0, err = 0;
+	struct caam_drv_private *priv;
+	unsigned int md_limit = SHA512_DIGEST_SIZE;
+	u32 cha_inst, cha_vid;
 
 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
 	if (!dev_node) {
@@ -1912,19 +1913,40 @@
 	if (!priv)
 		return -ENODEV;
 
+	/*
+	 * Register crypto algorithms the device supports.  First, identify
+	 * presence and attributes of MD block.
+	 */
+	cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+	cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+
+	/*
+	 * Skip registration of any hashing algorithms if MD block
+	 * is not present.
+	 */
+	if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
+		return -ENODEV;
+
+	/* Limit digest size based on LP256 */
+	if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
+		md_limit = SHA256_DIGEST_SIZE;
+
 	INIT_LIST_HEAD(&hash_list);
 
 	/* register crypto algorithms the device supports */
 	for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
-		/* TODO: check if h/w supports alg */
 		struct caam_hash_alg *t_alg;
+		struct caam_hash_template *alg = driver_hash + i;
+
+		/* If MD size is not supported by device, skip registration */
+		if (alg->template_ahash.halg.digestsize > md_limit)
+			continue;
 
 		/* register hmac version */
-		t_alg = caam_hash_alloc(&driver_hash[i], true);
+		t_alg = caam_hash_alloc(alg, true);
 		if (IS_ERR(t_alg)) {
 			err = PTR_ERR(t_alg);
-			pr_warn("%s alg allocation failed\n",
-				driver_hash[i].driver_name);
+			pr_warn("%s alg allocation failed\n", alg->driver_name);
 			continue;
 		}
 
@@ -1937,11 +1959,10 @@
 			list_add_tail(&t_alg->entry, &hash_list);
 
 		/* register unkeyed version */
-		t_alg = caam_hash_alloc(&driver_hash[i], false);
+		t_alg = caam_hash_alloc(alg, false);
 		if (IS_ERR(t_alg)) {
 			err = PTR_ERR(t_alg);
-			pr_warn("%s alg allocation failed\n",
-				driver_hash[i].driver_name);
+			pr_warn("%s alg allocation failed\n", alg->driver_name);
 			continue;
 		}
 
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 5095337..9b92af2 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -108,6 +108,10 @@
 
 	atomic_set(&bd->empty, BUF_NOT_EMPTY);
 	complete(&bd->filled);
+
+	/* Buffer refilled, invalidate cache */
+	dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
+
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
@@ -311,7 +315,7 @@
 	struct device_node *dev_node;
 	struct platform_device *pdev;
 	struct device *ctrldev;
-	void *priv;
+	struct caam_drv_private *priv;
 	int err;
 
 	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -338,20 +342,32 @@
 	if (!priv)
 		return -ENODEV;
 
+	/* Check for an instantiated RNG before registration */
+	if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
+		return -ENODEV;
+
 	dev = caam_jr_alloc();
 	if (IS_ERR(dev)) {
 		pr_err("Job Ring Device allocation for transform failed\n");
 		return PTR_ERR(dev);
 	}
-	rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
-	if (!rng_ctx)
-		return -ENOMEM;
+	rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA);
+	if (!rng_ctx) {
+		err = -ENOMEM;
+		goto free_caam_alloc;
+	}
 	err = caam_init_rng(rng_ctx, dev);
 	if (err)
-		return err;
+		goto free_rng_ctx;
 
 	dev_info(dev, "registering rng-caam\n");
 	return hwrng_register(&caam_rng);
+
+free_rng_ctx:
+	kfree(rng_ctx);
+free_caam_alloc:
+	caam_jr_free(dev);
+	return err;
 }
 
 module_init(caam_rng_init);
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index f57f395..b6955ec 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -23,6 +23,7 @@
 #include <linux/types.h>
 #include <linux/debugfs.h>
 #include <linux/circ_buf.h>
+#include <linux/clk.h>
 #include <net/xfrm.h>
 
 #include <crypto/algapi.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index efacab7..8abb4bc 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -16,6 +16,24 @@
 #include "error.h"
 
 /*
+ * i.MX targets tend to have clock control subsystems that can
+ * enable/disable clocking to our device.
+ */
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+static inline struct clk *caam_drv_identify_clk(struct device *dev,
+						char *clk_name)
+{
+	return devm_clk_get(dev, clk_name);
+}
+#else
+static inline struct clk *caam_drv_identify_clk(struct device *dev,
+						char *clk_name)
+{
+	return NULL;
+}
+#endif
+
+/*
  * Descriptor to instantiate RNG State Handle 0 in normal mode and
  * load the JDKEK, TDKEK and TDSK registers
  */
@@ -121,7 +139,7 @@
 		flags |= DECO_JQCR_FOUR;
 
 	/* Instruct the DECO to execute it */
-	wr_reg32(&deco->jr_ctl_hi, flags);
+	setbits32(&deco->jr_ctl_hi, flags);
 
 	timeout = 10000000;
 	do {
@@ -175,7 +193,7 @@
 {
 	struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
 	struct caam_ctrl __iomem *ctrl;
-	u32 *desc, status, rdsta_val;
+	u32 *desc, status = 0, rdsta_val;
 	int ret = 0, sh_idx;
 
 	ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
@@ -207,7 +225,8 @@
 		 * CAAM eras), then try again.
 		 */
 		rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
-		if (status || !(rdsta_val & (1 << sh_idx)))
+		if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
+		    !(rdsta_val & (1 << sh_idx)))
 			ret = -EAGAIN;
 		if (ret)
 			break;
@@ -279,7 +298,7 @@
 	struct device *ctrldev;
 	struct caam_drv_private *ctrlpriv;
 	struct caam_ctrl __iomem *ctrl;
-	int ring, ret = 0;
+	int ring;
 
 	ctrldev = &pdev->dev;
 	ctrlpriv = dev_get_drvdata(ctrldev);
@@ -303,7 +322,13 @@
 	/* Unmap controller region */
 	iounmap(ctrl);
 
-	return ret;
+	/* shut clocks off before finalizing shutdown */
+	clk_disable_unprepare(ctrlpriv->caam_ipg);
+	clk_disable_unprepare(ctrlpriv->caam_mem);
+	clk_disable_unprepare(ctrlpriv->caam_aclk);
+	clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+
+	return 0;
 }
 
 /*
@@ -370,14 +395,14 @@
 int caam_get_era(void)
 {
 	struct device_node *caam_node;
-	for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
-		const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
-				"fsl,sec-era",
-				NULL);
-		return prop ? *prop : -ENOTSUPP;
-	}
+	int ret;
+	u32 prop;
 
-	return -ENOTSUPP;
+	caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+	ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
+	of_node_put(caam_node);
+
+	return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop;
 }
 EXPORT_SYMBOL(caam_get_era);
 
@@ -390,6 +415,7 @@
 	struct device_node *nprop, *np;
 	struct caam_ctrl __iomem *ctrl;
 	struct caam_drv_private *ctrlpriv;
+	struct clk *clk;
 #ifdef CONFIG_DEBUG_FS
 	struct caam_perfmon *perfmon;
 #endif
@@ -398,8 +424,7 @@
 	int pg_size;
 	int BLOCK_OFFSET = 0;
 
-	ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
-				GFP_KERNEL);
+	ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
 	if (!ctrlpriv)
 		return -ENOMEM;
 
@@ -408,12 +433,76 @@
 	ctrlpriv->pdev = pdev;
 	nprop = pdev->dev.of_node;
 
+	/* Enable clocking */
+	clk = caam_drv_identify_clk(&pdev->dev, "ipg");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev,
+			"can't identify CAAM ipg clk: %d\n", ret);
+		return ret;
+	}
+	ctrlpriv->caam_ipg = clk;
+
+	clk = caam_drv_identify_clk(&pdev->dev, "mem");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev,
+			"can't identify CAAM mem clk: %d\n", ret);
+		return ret;
+	}
+	ctrlpriv->caam_mem = clk;
+
+	clk = caam_drv_identify_clk(&pdev->dev, "aclk");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev,
+			"can't identify CAAM aclk clk: %d\n", ret);
+		return ret;
+	}
+	ctrlpriv->caam_aclk = clk;
+
+	clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+	if (IS_ERR(clk)) {
+		ret = PTR_ERR(clk);
+		dev_err(&pdev->dev,
+			"can't identify CAAM emi_slow clk: %d\n", ret);
+		return ret;
+	}
+	ctrlpriv->caam_emi_slow = clk;
+
+	ret = clk_prepare_enable(ctrlpriv->caam_ipg);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(ctrlpriv->caam_mem);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
+			ret);
+		goto disable_caam_ipg;
+	}
+
+	ret = clk_prepare_enable(ctrlpriv->caam_aclk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
+		goto disable_caam_mem;
+	}
+
+	ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+			ret);
+		goto disable_caam_aclk;
+	}
+
 	/* Get configuration properties from device tree */
 	/* First, get register page */
 	ctrl = of_iomap(nprop, 0);
 	if (ctrl == NULL) {
 		dev_err(dev, "caam: of_iomap() failed\n");
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto disable_caam_emi_slow;
 	}
 	/* Finding the page size for using the CTPR_MS register */
 	comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
@@ -444,8 +533,9 @@
 	 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
 	 * long pointers in master configuration register
 	 */
-	setbits32(&ctrl->mcr, MCFGR_WDENABLE |
-		  (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+	clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
+		      MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
+					MCFGR_LONG_PTR : 0));
 
 	/*
 	 *  Read the Compile Time paramters and SCFGR to determine
@@ -492,12 +582,11 @@
 		    of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
 			rspec++;
 
-	ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
-					sizeof(struct platform_device *) * rspec,
-					GFP_KERNEL);
+	ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
+					sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
 	if (ctrlpriv->jrpdev == NULL) {
-		iounmap(ctrl);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto iounmap_ctrl;
 	}
 
 	ring = 0;
@@ -537,8 +626,8 @@
 	/* If no QI and no rings specified, quit and go home */
 	if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
 		dev_err(dev, "no queues configured, terminating\n");
-		caam_remove(pdev);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto caam_remove;
 	}
 
 	cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
@@ -595,8 +684,7 @@
 		} while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
 		if (ret) {
 			dev_err(dev, "failed to instantiate RNG");
-			caam_remove(pdev);
-			return ret;
+			goto caam_remove;
 		}
 		/*
 		 * Set handles init'ed by this module as the complement of the
@@ -700,6 +788,20 @@
 						 &ctrlpriv->ctl_tdsk_wrap);
 #endif
 	return 0;
+
+caam_remove:
+	caam_remove(pdev);
+iounmap_ctrl:
+	iounmap(ctrl);
+disable_caam_emi_slow:
+	clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+disable_caam_aclk:
+	clk_disable_unprepare(ctrlpriv->caam_aclk);
+disable_caam_mem:
+	clk_disable_unprepare(ctrlpriv->caam_mem);
+disable_caam_ipg:
+	clk_disable_unprepare(ctrlpriv->caam_ipg);
+	return ret;
 }
 
 static struct of_device_id caam_match[] = {
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index d397ff9..983d663 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -8,12 +8,29 @@
 #ifndef DESC_H
 #define DESC_H
 
+/*
+ * 16-byte hardware scatter/gather table
+ * An 8-byte table exists in the hardware spec, but has never been
+ * implemented to date. The 8/16 option is selected at RTL-compile-time.
+ * and this selection is visible in the Compile Time Parameters Register
+ */
+
+#define SEC4_SG_LEN_EXT		0x80000000	/* Entry points to table */
+#define SEC4_SG_LEN_FIN		0x40000000	/* Last ent in table */
+#define SEC4_SG_BPID_MASK	0x000000ff
+#define SEC4_SG_BPID_SHIFT	16
+#define SEC4_SG_LEN_MASK	0x3fffffff	/* Excludes EXT and FINAL */
+#define SEC4_SG_OFFS_MASK	0x00001fff
+
 struct sec4_sg_entry {
+#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
+	u32 rsvd1;
+	dma_addr_t ptr;
+#else
 	u64 ptr;
-#define SEC4_SG_LEN_FIN 0x40000000
-#define SEC4_SG_LEN_EXT 0x80000000
+#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_IMX */
 	u32 len;
-	u8 reserved;
+	u8 rsvd2;
 	u8 buf_pool_id;
 	u16 offset;
 };
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 9f79fd7..98d07de 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -367,7 +367,7 @@
 	if (upper) \
 		append_u64(desc, data); \
 	else \
-		append_u32(desc, data); \
+		append_u32(desc, lower_32_bits(data)); \
 } while (0)
 
 #define append_math_add_imm_u64(desc, dest, src0, src1, data) \
diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h
index 89b94cc..e2bcacc 100644
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
@@ -91,6 +91,11 @@
 				   Handles of the RNG4 block are initialized
 				   by this driver */
 
+	struct clk *caam_ipg;
+	struct clk *caam_mem;
+	struct clk *caam_aclk;
+	struct clk *caam_emi_slow;
+
 	/*
 	 * debugfs entries for developer view into driver/device
 	 * variables at runtime.
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index b8b5d47..f7e0d8d 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -202,6 +202,13 @@
 		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
 		userstatus = jrp->outring[hw_idx].jrstatus;
 
+		/*
+		 * Make sure all information from the job has been obtained
+		 * before telling CAAM that the job has been removed from the
+		 * output ring.
+		 */
+		mb();
+
 		/* set done */
 		wr_reg32(&jrp->rregs->outring_rmvd, 1);
 
@@ -351,12 +358,23 @@
 
 	jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
 
+	/*
+	 * Guarantee that the descriptor's DMA address has been written to
+	 * the next slot in the ring before the write index is updated, since
+	 * other cores may update this index independently.
+	 */
 	smp_wmb();
 
 	jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
 				    (JOBR_DEPTH - 1);
 	jrp->head = (head + 1) & (JOBR_DEPTH - 1);
 
+	/*
+	 * Ensure that all job information has been written before
+	 * notifying CAAM that a new job was added to the input ring.
+	 */
+	wmb();
+
 	wr_reg32(&jrp->rregs->inpring_jobadd, 1);
 
 	spin_unlock_bh(&jrp->inplock);
@@ -392,18 +410,17 @@
 		goto out_free_irq;
 
 	error = -ENOMEM;
-	jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
-					  &inpbusaddr, GFP_KERNEL);
+	jrp->inpring = dma_alloc_coherent(dev, sizeof(*jrp->inpring) *
+					  JOBR_DEPTH, &inpbusaddr, GFP_KERNEL);
 	if (!jrp->inpring)
 		goto out_free_irq;
 
-	jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
+	jrp->outring = dma_alloc_coherent(dev, sizeof(*jrp->outring) *
 					  JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
 	if (!jrp->outring)
 		goto out_free_inpring;
 
-	jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
-			       GFP_KERNEL);
+	jrp->entinfo = kcalloc(JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL);
 	if (!jrp->entinfo)
 		goto out_free_outring;
 
@@ -461,8 +478,7 @@
 	int error;
 
 	jrdev = &pdev->dev;
-	jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
-			      GFP_KERNEL);
+	jrpriv = devm_kmalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
 	if (!jrpriv)
 		return -ENOMEM;
 
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 672c974..a8a7997 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -65,9 +65,31 @@
  *
  */
 
+#ifdef CONFIG_ARM
+/* These are common macros for Power, put here for ARM */
+#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr))
+#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr))
+
+#define out_arch(type, endian, a, v)	__raw_write##type(cpu_to_##endian(v), a)
+#define in_arch(type, endian, a)	endian##_to_cpu(__raw_read##type(a))
+
+#define out_le32(a, v)	out_arch(l, le32, a, v)
+#define in_le32(a)	in_arch(l, le32, a)
+
+#define out_be32(a, v)	out_arch(l, be32, a, v)
+#define in_be32(a)	in_arch(l, be32, a)
+
+#define clrsetbits(type, addr, clear, set) \
+	out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
+
+#define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
+#define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
+#endif
+
 #ifdef __BIG_ENDIAN
 #define wr_reg32(reg, data) out_be32(reg, data)
 #define rd_reg32(reg) in_be32(reg)
+#define clrsetbits_32(addr, clear, set) clrsetbits_be32(addr, clear, set)
 #ifdef CONFIG_64BIT
 #define wr_reg64(reg, data) out_be64(reg, data)
 #define rd_reg64(reg) in_be64(reg)
@@ -76,6 +98,7 @@
 #ifdef __LITTLE_ENDIAN
 #define wr_reg32(reg, data) __raw_writel(data, reg)
 #define rd_reg32(reg) __raw_readl(reg)
+#define clrsetbits_32(addr, clear, set) clrsetbits_le32(addr, clear, set)
 #ifdef CONFIG_64BIT
 #define wr_reg64(reg, data) __raw_writeq(data, reg)
 #define rd_reg64(reg) __raw_readq(reg)
@@ -85,20 +108,31 @@
 
 /*
  * The only users of these wr/rd_reg64 functions is the Job Ring (JR).
- * The DMA address registers in the JR are a pair of 32-bit registers.
- * The layout is:
+ * The DMA address registers in the JR are handled differently depending on
+ * platform:
+ *
+ * 1. All BE CAAM platforms and i.MX platforms (LE CAAM):
  *
  *    base + 0x0000 : most-significant 32 bits
  *    base + 0x0004 : least-significant 32 bits
  *
  * The 32-bit version of this core therefore has to write to base + 0x0004
- * to set the 32-bit wide DMA address. This seems to be independent of the
- * endianness of the written/read data.
+ * to set the 32-bit wide DMA address.
+ *
+ * 2. All other LE CAAM platforms (LS1021A etc.)
+ *    base + 0x0000 : least-significant 32 bits
+ *    base + 0x0004 : most-significant 32 bits
  */
 
 #ifndef CONFIG_64BIT
+#if !defined(CONFIG_CRYPTO_DEV_FSL_CAAM_LE) || \
+	defined(CONFIG_CRYPTO_DEV_FSL_CAAM_IMX)
 #define REG64_MS32(reg) ((u32 __iomem *)(reg))
 #define REG64_LS32(reg) ((u32 __iomem *)(reg) + 1)
+#else
+#define REG64_MS32(reg) ((u32 __iomem *)(reg) + 1)
+#define REG64_LS32(reg) ((u32 __iomem *)(reg))
+#endif
 
 static inline void wr_reg64(u64 __iomem *reg, u64 data)
 {
@@ -133,18 +167,28 @@
 #define CHA_NUM_MS_DECONUM_SHIFT	24
 #define CHA_NUM_MS_DECONUM_MASK	(0xfull << CHA_NUM_MS_DECONUM_SHIFT)
 
-/* CHA Version IDs */
+/*
+ * CHA version IDs / instantiation bitfields
+ * Defined for use with the cha_id fields in perfmon, but the same shift/mask
+ * selectors can be used to pull out the number of instantiated blocks within
+ * cha_num fields in perfmon because the locations are the same.
+ */
 #define CHA_ID_LS_AES_SHIFT	0
-#define CHA_ID_LS_AES_MASK		(0xfull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_MASK	(0xfull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_LP	(0x3ull << CHA_ID_LS_AES_SHIFT)
+#define CHA_ID_LS_AES_HP	(0x4ull << CHA_ID_LS_AES_SHIFT)
 
 #define CHA_ID_LS_DES_SHIFT	4
-#define CHA_ID_LS_DES_MASK		(0xfull << CHA_ID_LS_DES_SHIFT)
+#define CHA_ID_LS_DES_MASK	(0xfull << CHA_ID_LS_DES_SHIFT)
 
 #define CHA_ID_LS_ARC4_SHIFT	8
 #define CHA_ID_LS_ARC4_MASK	(0xfull << CHA_ID_LS_ARC4_SHIFT)
 
 #define CHA_ID_LS_MD_SHIFT	12
 #define CHA_ID_LS_MD_MASK	(0xfull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP256	(0x0ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_LP512	(0x1ull << CHA_ID_LS_MD_SHIFT)
+#define CHA_ID_LS_MD_HP		(0x2ull << CHA_ID_LS_MD_SHIFT)
 
 #define CHA_ID_LS_RNG_SHIFT	16
 #define CHA_ID_LS_RNG_MASK	(0xfull << CHA_ID_LS_RNG_SHIFT)
@@ -395,10 +439,16 @@
 /* AXI read cache control */
 #define MCFGR_ARCACHE_SHIFT	12
 #define MCFGR_ARCACHE_MASK	(0xf << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_BUFF	(0x1 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_CACH	(0x2 << MCFGR_ARCACHE_SHIFT)
+#define MCFGR_ARCACHE_RALL	(0x4 << MCFGR_ARCACHE_SHIFT)
 
 /* AXI write cache control */
 #define MCFGR_AWCACHE_SHIFT	8
 #define MCFGR_AWCACHE_MASK	(0xf << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_BUFF	(0x1 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_CACH	(0x2 << MCFGR_AWCACHE_SHIFT)
+#define MCFGR_AWCACHE_WALL	(0x8 << MCFGR_AWCACHE_SHIFT)
 
 /* AXI pipeline depth */
 #define MCFGR_AXIPIPE_SHIFT	4
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index b68b74c..18cd6d1 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -15,7 +15,6 @@
 {
 	sec4_sg_ptr->ptr = dma;
 	sec4_sg_ptr->len = len;
-	sec4_sg_ptr->reserved = 0;
 	sec4_sg_ptr->buf_pool_id = 0;
 	sec4_sg_ptr->offset = offset;
 #ifdef DEBUG
@@ -106,9 +105,15 @@
 {
 	if (unlikely(chained)) {
 		int i;
+		struct scatterlist *tsg = sg;
+
+		/*
+		 * Use a local copy of the sg pointer to avoid moving the
+		 * head of the list pointed to by sg as we walk the list.
+		 */
 		for (i = 0; i < nents; i++) {
-			dma_unmap_sg(dev, sg, 1, dir);
-			sg = sg_next(sg);
+			dma_unmap_sg(dev, tsg, 1, dir);
+			tsg = sg_next(tsg);
 		}
 	} else if (nents) {
 		dma_unmap_sg(dev, sg, nents, dir);
@@ -119,19 +124,23 @@
 	struct device *dev, struct scatterlist *sg, unsigned int nents,
 	enum dma_data_direction dir, bool chained)
 {
-	struct scatterlist *first = sg;
-
 	if (unlikely(chained)) {
 		int i;
+		struct scatterlist *tsg = sg;
+
+		/*
+		 * Use a local copy of the sg pointer to avoid moving the
+		 * head of the list pointed to by sg as we walk the list.
+		 */
 		for (i = 0; i < nents; i++) {
-			if (!dma_map_sg(dev, sg, 1, dir)) {
-				dma_unmap_sg_chained(dev, first, i, dir,
+			if (!dma_map_sg(dev, tsg, 1, dir)) {
+				dma_unmap_sg_chained(dev, sg, i, dir,
 						     chained);
 				nents = 0;
 				break;
 			}
 
-			sg = sg_next(sg);
+			tsg = sg_next(tsg);
 		}
 	} else
 		nents = dma_map_sg(dev, sg, nents, dir);
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
index f2e6de3..bb241c3 100644
--- a/drivers/crypto/ccp/ccp-platform.c
+++ b/drivers/crypto/ccp/ccp-platform.c
@@ -216,6 +216,7 @@
 	{ "AMDI0C00", 0 },
 	{ },
 };
+MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
 #endif
 
 #ifdef CONFIG_OF
@@ -223,6 +224,7 @@
 	{ .compatible = "amd,ccp-seattle-v1a" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, ccp_of_match);
 #endif
 
 static struct platform_driver ccp_platform_driver = {
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index ad47d0d..68e8aa9 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -334,7 +334,7 @@
 
 	hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
 	if (!hdev->dma_lch) {
-		dev_err(hdev->dev, "Couldn't aquire a slave DMA channel.\n");
+		dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
 		return -EBUSY;
 	}
 	dma_conf.direction = DMA_MEM_TO_DEV;
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 402631a..8f27903 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -156,7 +156,8 @@
 };
 
 struct aead_ctx {
-	struct buffer_desc *buffer;
+	struct buffer_desc *src;
+	struct buffer_desc *dst;
 	struct scatterlist ivlist;
 	/* used when the hmac is not on one sg entry */
 	u8 *hmac_virt;
@@ -198,6 +199,15 @@
 	int registered;
 };
 
+struct ixp_aead_alg {
+	struct aead_alg crypto;
+	const struct ix_hash_algo *hash;
+	u32 cfg_enc;
+	u32 cfg_dec;
+
+	int registered;
+};
+
 static const struct ix_hash_algo hash_alg_md5 = {
 	.cfgword	= 0xAA010004,
 	.icv		= "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
@@ -339,11 +349,11 @@
 	struct aead_ctx *req_ctx = aead_request_ctx(req);
 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 	int authsize = crypto_aead_authsize(tfm);
-	int decryptlen = req->cryptlen - authsize;
+	int decryptlen = req->assoclen + req->cryptlen - authsize;
 
 	if (req_ctx->encrypt) {
 		scatterwalk_map_and_copy(req_ctx->hmac_virt,
-			req->src, decryptlen, authsize, 1);
+			req->dst, decryptlen, authsize, 1);
 	}
 	dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
 }
@@ -364,7 +374,8 @@
 		struct aead_request *req = crypt->data.aead_req;
 		struct aead_ctx *req_ctx = aead_request_ctx(req);
 
-		free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
+		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 		if (req_ctx->hmac_virt) {
 			finish_scattered_hmac(crypt);
 		}
@@ -573,11 +584,10 @@
 	return init_tfm(tfm);
 }
 
-static int init_tfm_aead(struct crypto_tfm *tfm)
+static int init_tfm_aead(struct crypto_aead *tfm)
 {
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-				sizeof(struct aead_ctx));
-	return init_tfm(tfm);
+	crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
+	return init_tfm(crypto_aead_tfm(tfm));
 }
 
 static void exit_tfm(struct crypto_tfm *tfm)
@@ -587,6 +597,11 @@
 	free_sa_dir(&ctx->decrypt);
 }
 
+static void exit_tfm_aead(struct crypto_aead *tfm)
+{
+	exit_tfm(crypto_aead_tfm(tfm));
+}
+
 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
 		int init_len, u32 ctx_addr, const u8 *key, int key_len)
 {
@@ -969,24 +984,6 @@
 	return ret;
 }
 
-static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
-		unsigned int nbytes)
-{
-	int offset = 0;
-
-	if (!nbytes)
-		return 0;
-
-	for (;;) {
-		if (start < offset + sg->length)
-			break;
-
-		offset += sg->length;
-		sg = sg_next(sg);
-	}
-	return (start + nbytes > offset + sg->length);
-}
-
 static int aead_perform(struct aead_request *req, int encrypt,
 		int cryptoffset, int eff_cryptlen, u8 *iv)
 {
@@ -1002,6 +999,8 @@
 	struct device *dev = &pdev->dev;
 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 				GFP_KERNEL : GFP_ATOMIC;
+	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+	unsigned int lastlen;
 
 	if (qmgr_stat_full(SEND_QID))
 		return -EAGAIN;
@@ -1030,35 +1029,55 @@
 	crypt->crypt_len = eff_cryptlen;
 
 	crypt->auth_offs = 0;
-	crypt->auth_len = req->assoclen + ivsize + cryptlen;
+	crypt->auth_len = req->assoclen + cryptlen;
 	BUG_ON(ivsize && !req->iv);
 	memcpy(crypt->iv, req->iv, ivsize);
 
+	req_ctx->dst = NULL;
+
 	if (req->src != req->dst) {
-		BUG(); /* -ENOTSUP because of my laziness */
+		struct buffer_desc dst_hook;
+
+		crypt->mode |= NPE_OP_NOT_IN_PLACE;
+		src_direction = DMA_TO_DEVICE;
+
+		buf = chainup_buffers(dev, req->dst, crypt->auth_len,
+				      &dst_hook, flags, DMA_FROM_DEVICE);
+		req_ctx->dst = dst_hook.next;
+		crypt->dst_buf = dst_hook.phys_next;
+
+		if (!buf)
+			goto free_buf_dst;
+
+		if (encrypt) {
+			lastlen = buf->buf_len;
+			if (lastlen >= authsize)
+				crypt->icv_rev_aes = buf->phys_addr +
+						     buf->buf_len - authsize;
+		}
 	}
 
-	/* ASSOC data */
-	buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
-		flags, DMA_TO_DEVICE);
-	req_ctx->buffer = src_hook.next;
+	buf = chainup_buffers(dev, req->src, crypt->auth_len,
+			      &src_hook, flags, src_direction);
+	req_ctx->src = src_hook.next;
 	crypt->src_buf = src_hook.phys_next;
 	if (!buf)
-		goto out;
-	/* IV */
-	sg_init_table(&req_ctx->ivlist, 1);
-	sg_set_buf(&req_ctx->ivlist, iv, ivsize);
-	buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
-			DMA_BIDIRECTIONAL);
-	if (!buf)
-		goto free_chain;
-	if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
+		goto free_buf_src;
+
+	if (!encrypt || !req_ctx->dst) {
+		lastlen = buf->buf_len;
+		if (lastlen >= authsize)
+			crypt->icv_rev_aes = buf->phys_addr +
+					     buf->buf_len - authsize;
+	}
+
+	if (unlikely(lastlen < authsize)) {
 		/* The 12 hmac bytes are scattered,
 		 * we need to copy them into a safe buffer */
 		req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
 				&crypt->icv_rev_aes);
 		if (unlikely(!req_ctx->hmac_virt))
-			goto free_chain;
+			goto free_buf_src;
 		if (!encrypt) {
 			scatterwalk_map_and_copy(req_ctx->hmac_virt,
 				req->src, cryptlen, authsize, 0);
@@ -1067,27 +1086,16 @@
 	} else {
 		req_ctx->hmac_virt = NULL;
 	}
-	/* Crypt */
-	buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
-			DMA_BIDIRECTIONAL);
-	if (!buf)
-		goto free_hmac_virt;
-	if (!req_ctx->hmac_virt) {
-		crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
-	}
 
 	crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
 	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 	BUG_ON(qmgr_stat_overflow(SEND_QID));
 	return -EINPROGRESS;
-free_hmac_virt:
-	if (req_ctx->hmac_virt) {
-		dma_pool_free(buffer_pool, req_ctx->hmac_virt,
-				crypt->icv_rev_aes);
-	}
-free_chain:
-	free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
-out:
+
+free_buf_src:
+	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+free_buf_dst:
+	free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 	crypt->ctl_flags = CTL_FLAG_UNUSED;
 	return -ENOMEM;
 }
@@ -1173,40 +1181,12 @@
 
 static int aead_encrypt(struct aead_request *req)
 {
-	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
-	return aead_perform(req, 1, req->assoclen + ivsize,
-			req->cryptlen, req->iv);
+	return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
 }
 
 static int aead_decrypt(struct aead_request *req)
 {
-	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
-	return aead_perform(req, 0, req->assoclen + ivsize,
-			req->cryptlen, req->iv);
-}
-
-static int aead_givencrypt(struct aead_givcrypt_request *req)
-{
-	struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
-	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
-	unsigned len, ivsize = crypto_aead_ivsize(tfm);
-	__be64 seq;
-
-	/* copied from eseqiv.c */
-	if (!ctx->salted) {
-		get_random_bytes(ctx->salt, ivsize);
-		ctx->salted = 1;
-	}
-	memcpy(req->areq.iv, ctx->salt, ivsize);
-	len = ivsize;
-	if (ivsize > sizeof(u64)) {
-		memset(req->giv, 0, ivsize - sizeof(u64));
-		len = sizeof(u64);
-	}
-	seq = cpu_to_be64(req->seq);
-	memcpy(req->giv + ivsize - len, &seq, len);
-	return aead_perform(&req->areq, 1, req->areq.assoclen,
-			req->areq.cryptlen +ivsize, req->giv);
+	return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
 }
 
 static struct ixp_alg ixp4xx_algos[] = {
@@ -1319,80 +1299,77 @@
 	},
 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
 	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
-}, {
+} };
+
+static struct ixp_aead_alg ixp4xx_aeads[] = {
+{
 	.crypto	= {
-		.cra_name	= "authenc(hmac(md5),cbc(des))",
-		.cra_blocksize	= DES_BLOCK_SIZE,
-		.cra_u		= { .aead = {
-			.ivsize		= DES_BLOCK_SIZE,
-			.maxauthsize	= MD5_DIGEST_SIZE,
-			}
-		}
+		.base = {
+			.cra_name	= "authenc(hmac(md5),cbc(des))",
+			.cra_blocksize	= DES_BLOCK_SIZE,
+		},
+		.ivsize		= DES_BLOCK_SIZE,
+		.maxauthsize	= MD5_DIGEST_SIZE,
 	},
 	.hash = &hash_alg_md5,
 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
 }, {
 	.crypto	= {
-		.cra_name	= "authenc(hmac(md5),cbc(des3_ede))",
-		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
-		.cra_u		= { .aead = {
-			.ivsize		= DES3_EDE_BLOCK_SIZE,
-			.maxauthsize	= MD5_DIGEST_SIZE,
-			}
-		}
+		.base = {
+			.cra_name	= "authenc(hmac(md5),cbc(des3_ede))",
+			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		},
+		.ivsize		= DES3_EDE_BLOCK_SIZE,
+		.maxauthsize	= MD5_DIGEST_SIZE,
 	},
 	.hash = &hash_alg_md5,
 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
 }, {
 	.crypto	= {
-		.cra_name	= "authenc(hmac(sha1),cbc(des))",
-		.cra_blocksize	= DES_BLOCK_SIZE,
-		.cra_u		= { .aead = {
+		.base = {
+			.cra_name	= "authenc(hmac(sha1),cbc(des))",
+			.cra_blocksize	= DES_BLOCK_SIZE,
+		},
 			.ivsize		= DES_BLOCK_SIZE,
 			.maxauthsize	= SHA1_DIGEST_SIZE,
-			}
-		}
 	},
 	.hash = &hash_alg_sha1,
 	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
 	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
 }, {
 	.crypto	= {
-		.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
-		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
-		.cra_u		= { .aead = {
-			.ivsize		= DES3_EDE_BLOCK_SIZE,
-			.maxauthsize	= SHA1_DIGEST_SIZE,
-			}
-		}
+		.base = {
+			.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
+			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		},
+		.ivsize		= DES3_EDE_BLOCK_SIZE,
+		.maxauthsize	= SHA1_DIGEST_SIZE,
 	},
 	.hash = &hash_alg_sha1,
 	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
 	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
 }, {
 	.crypto	= {
-		.cra_name	= "authenc(hmac(md5),cbc(aes))",
-		.cra_blocksize	= AES_BLOCK_SIZE,
-		.cra_u		= { .aead = {
-			.ivsize		= AES_BLOCK_SIZE,
-			.maxauthsize	= MD5_DIGEST_SIZE,
-			}
-		}
+		.base = {
+			.cra_name	= "authenc(hmac(md5),cbc(aes))",
+			.cra_blocksize	= AES_BLOCK_SIZE,
+		},
+		.ivsize		= AES_BLOCK_SIZE,
+		.maxauthsize	= MD5_DIGEST_SIZE,
 	},
 	.hash = &hash_alg_md5,
 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
 	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
 }, {
 	.crypto	= {
-		.cra_name	= "authenc(hmac(sha1),cbc(aes))",
-		.cra_blocksize	= AES_BLOCK_SIZE,
-		.cra_u		= { .aead = {
-			.ivsize		= AES_BLOCK_SIZE,
-			.maxauthsize	= SHA1_DIGEST_SIZE,
-			}
-		}
+		.base = {
+			.cra_name	= "authenc(hmac(sha1),cbc(aes))",
+			.cra_blocksize	= AES_BLOCK_SIZE,
+		},
+		.ivsize		= AES_BLOCK_SIZE,
+		.maxauthsize	= SHA1_DIGEST_SIZE,
 	},
 	.hash = &hash_alg_sha1,
 	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
@@ -1436,32 +1413,20 @@
 		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
 			continue;
 		}
-		if (!ixp4xx_algos[i].hash) {
-			/* block ciphers */
-			cra->cra_type = &crypto_ablkcipher_type;
-			cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
-					 CRYPTO_ALG_KERN_DRIVER_ONLY |
-					 CRYPTO_ALG_ASYNC;
-			if (!cra->cra_ablkcipher.setkey)
-				cra->cra_ablkcipher.setkey = ablk_setkey;
-			if (!cra->cra_ablkcipher.encrypt)
-				cra->cra_ablkcipher.encrypt = ablk_encrypt;
-			if (!cra->cra_ablkcipher.decrypt)
-				cra->cra_ablkcipher.decrypt = ablk_decrypt;
-			cra->cra_init = init_tfm_ablk;
-		} else {
-			/* authenc */
-			cra->cra_type = &crypto_aead_type;
-			cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					 CRYPTO_ALG_KERN_DRIVER_ONLY |
-					 CRYPTO_ALG_ASYNC;
-			cra->cra_aead.setkey = aead_setkey;
-			cra->cra_aead.setauthsize = aead_setauthsize;
-			cra->cra_aead.encrypt = aead_encrypt;
-			cra->cra_aead.decrypt = aead_decrypt;
-			cra->cra_aead.givencrypt = aead_givencrypt;
-			cra->cra_init = init_tfm_aead;
-		}
+
+		/* block ciphers */
+		cra->cra_type = &crypto_ablkcipher_type;
+		cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+				 CRYPTO_ALG_KERN_DRIVER_ONLY |
+				 CRYPTO_ALG_ASYNC;
+		if (!cra->cra_ablkcipher.setkey)
+			cra->cra_ablkcipher.setkey = ablk_setkey;
+		if (!cra->cra_ablkcipher.encrypt)
+			cra->cra_ablkcipher.encrypt = ablk_encrypt;
+		if (!cra->cra_ablkcipher.decrypt)
+			cra->cra_ablkcipher.decrypt = ablk_decrypt;
+		cra->cra_init = init_tfm_ablk;
+
 		cra->cra_ctxsize = sizeof(struct ixp_ctx);
 		cra->cra_module = THIS_MODULE;
 		cra->cra_alignmask = 3;
@@ -1473,6 +1438,38 @@
 		else
 			ixp4xx_algos[i].registered = 1;
 	}
+
+	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+		struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
+
+		if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+			     "%s"IXP_POSTFIX, cra->base.cra_name) >=
+		    CRYPTO_MAX_ALG_NAME)
+			continue;
+		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
+			continue;
+
+		/* authenc */
+		cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+				      CRYPTO_ALG_ASYNC;
+		cra->setkey = aead_setkey;
+		cra->setauthsize = aead_setauthsize;
+		cra->encrypt = aead_encrypt;
+		cra->decrypt = aead_decrypt;
+		cra->init = init_tfm_aead;
+		cra->exit = exit_tfm_aead;
+
+		cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+		cra->base.cra_module = THIS_MODULE;
+		cra->base.cra_alignmask = 3;
+		cra->base.cra_priority = 300;
+
+		if (crypto_register_aead(cra))
+			printk(KERN_ERR "Failed to register '%s'\n",
+				cra->base.cra_driver_name);
+		else
+			ixp4xx_aeads[i].registered = 1;
+	}
 	return 0;
 }
 
@@ -1481,6 +1478,11 @@
 	int num = ARRAY_SIZE(ixp4xx_algos);
 	int i;
 
+	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+		if (ixp4xx_aeads[i].registered)
+			crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
+	}
+
 	for (i=0; i< num; i++) {
 		if (ixp4xx_algos[i].registered)
 			crypto_unregister_alg(&ixp4xx_algos[i].crypto);
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 1c6f98d..0643e33 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -533,7 +533,6 @@
 	.probe		= mv_cesa_probe,
 	.remove		= mv_cesa_remove,
 	.driver		= {
-		.owner	= THIS_MODULE,
 		.name	= "marvell-cesa",
 		.of_match_table = mv_cesa_of_match_table,
 	},
diff --git a/drivers/crypto/nx/Kconfig b/drivers/crypto/nx/Kconfig
index e421c96..ad7552a 100644
--- a/drivers/crypto/nx/Kconfig
+++ b/drivers/crypto/nx/Kconfig
@@ -14,11 +14,14 @@
 config CRYPTO_DEV_NX_COMPRESS
 	tristate "Compression acceleration support"
 	default y
+	select CRYPTO_ALGAPI
+	select 842_DECOMPRESS
 	help
 	  Support for PowerPC Nest (NX) compression acceleration. This
 	  module supports acceleration for compressing memory with the 842
-	  algorithm.  One of the platform drivers must be selected also.
-	  If you choose 'M' here, this module will be called nx_compress.
+	  algorithm using the cryptographic API.  One of the platform
+	  drivers must be selected also.  If you choose 'M' here, this
+	  module will be called nx_compress.
 
 if CRYPTO_DEV_NX_COMPRESS
 
@@ -42,14 +45,4 @@
 	  algorithm.  This supports NX hardware on the PowerNV platform.
 	  If you choose 'M' here, this module will be called nx_compress_powernv.
 
-config CRYPTO_DEV_NX_COMPRESS_CRYPTO
-	tristate "Compression acceleration cryptographic interface"
-	select CRYPTO_ALGAPI
-	select 842_DECOMPRESS
-	default y
-	help
-	  Support for PowerPC Nest (NX) accelerators using the cryptographic
-	  API.  If you choose 'M' here, this module will be called
-	  nx_compress_crypto.
-
 endif
diff --git a/drivers/crypto/nx/Makefile b/drivers/crypto/nx/Makefile
index e1684f5..b727821 100644
--- a/drivers/crypto/nx/Makefile
+++ b/drivers/crypto/nx/Makefile
@@ -10,12 +10,8 @@
 		  nx-sha256.o \
 		  nx-sha512.o
 
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS) += nx-compress.o nx-compress-platform.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o
-obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_CRYPTO) += nx-compress-crypto.o
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_PSERIES) += nx-compress-pseries.o nx-compress.o
+obj-$(CONFIG_CRYPTO_DEV_NX_COMPRESS_POWERNV) += nx-compress-powernv.o nx-compress.o
 nx-compress-objs := nx-842.o
-nx-compress-platform-objs := nx-842-platform.o
 nx-compress-pseries-objs := nx-842-pseries.o
 nx-compress-powernv-objs := nx-842-powernv.o
-nx-compress-crypto-objs := nx-842-crypto.o
diff --git a/drivers/crypto/nx/nx-842-crypto.c b/drivers/crypto/nx/nx-842-crypto.c
deleted file mode 100644
index d53a1dc..0000000
--- a/drivers/crypto/nx/nx-842-crypto.c
+++ /dev/null
@@ -1,580 +0,0 @@
-/*
- * Cryptographic API for the NX-842 hardware compression.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Copyright (C) IBM Corporation, 2011-2015
- *
- * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
- *                   Seth Jennings <sjenning@linux.vnet.ibm.com>
- *
- * Rewrite: Dan Streetman <ddstreet@ieee.org>
- *
- * This is an interface to the NX-842 compression hardware in PowerPC
- * processors.  Most of the complexity of this drvier is due to the fact that
- * the NX-842 compression hardware requires the input and output data buffers
- * to be specifically aligned, to be a specific multiple in length, and within
- * specific minimum and maximum lengths.  Those restrictions, provided by the
- * nx-842 driver via nx842_constraints, mean this driver must use bounce
- * buffers and headers to correct misaligned in or out buffers, and to split
- * input buffers that are too large.
- *
- * This driver will fall back to software decompression if the hardware
- * decompression fails, so this driver's decompression should never fail as
- * long as the provided compressed buffer is valid.  Any compressed buffer
- * created by this driver will have a header (except ones where the input
- * perfectly matches the constraints); so users of this driver cannot simply
- * pass a compressed buffer created by this driver over to the 842 software
- * decompression library.  Instead, users must use this driver to decompress;
- * if the hardware fails or is unavailable, the compressed buffer will be
- * parsed and the header removed, and the raw 842 buffer(s) passed to the 842
- * software decompression library.
- *
- * This does not fall back to software compression, however, since the caller
- * of this function is specifically requesting hardware compression; if the
- * hardware compression fails, the caller can fall back to software
- * compression, and the raw 842 compressed buffer that the software compressor
- * creates can be passed to this driver for hardware decompression; any
- * buffer without our specific header magic is assumed to be a raw 842 buffer
- * and passed directly to the hardware.  Note that the software compression
- * library will produce a compressed buffer that is incompatible with the
- * hardware decompressor if the original input buffer length is not a multiple
- * of 8; if such a compressed buffer is passed to this driver for
- * decompression, the hardware will reject it and this driver will then pass
- * it over to the software library for decompression.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/vmalloc.h>
-#include <linux/sw842.h>
-#include <linux/ratelimit.h>
-
-#include "nx-842.h"
-
-/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
- * template (see lib/842/842.h), so this magic number will never appear at
- * the start of a raw 842 compressed buffer.  That is important, as any buffer
- * passed to us without this magic is assumed to be a raw 842 compressed
- * buffer, and passed directly to the hardware to decompress.
- */
-#define NX842_CRYPTO_MAGIC	(0xf842)
-#define NX842_CRYPTO_GROUP_MAX	(0x20)
-#define NX842_CRYPTO_HEADER_SIZE(g)				\
-	(sizeof(struct nx842_crypto_header) +			\
-	 sizeof(struct nx842_crypto_header_group) * (g))
-#define NX842_CRYPTO_HEADER_MAX_SIZE				\
-	NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
-
-/* bounce buffer size */
-#define BOUNCE_BUFFER_ORDER	(2)
-#define BOUNCE_BUFFER_SIZE					\
-	((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
-
-/* try longer on comp because we can fallback to sw decomp if hw is busy */
-#define COMP_BUSY_TIMEOUT	(250) /* ms */
-#define DECOMP_BUSY_TIMEOUT	(50) /* ms */
-
-struct nx842_crypto_header_group {
-	__be16 padding;			/* unused bytes at start of group */
-	__be32 compressed_length;	/* compressed bytes in group */
-	__be32 uncompressed_length;	/* bytes after decompression */
-} __packed;
-
-struct nx842_crypto_header {
-	__be16 magic;		/* NX842_CRYPTO_MAGIC */
-	__be16 ignore;		/* decompressed end bytes to ignore */
-	u8 groups;		/* total groups in this header */
-	struct nx842_crypto_header_group group[];
-} __packed;
-
-struct nx842_crypto_param {
-	u8 *in;
-	unsigned int iremain;
-	u8 *out;
-	unsigned int oremain;
-	unsigned int ototal;
-};
-
-static int update_param(struct nx842_crypto_param *p,
-			unsigned int slen, unsigned int dlen)
-{
-	if (p->iremain < slen)
-		return -EOVERFLOW;
-	if (p->oremain < dlen)
-		return -ENOSPC;
-
-	p->in += slen;
-	p->iremain -= slen;
-	p->out += dlen;
-	p->oremain -= dlen;
-	p->ototal += dlen;
-
-	return 0;
-}
-
-struct nx842_crypto_ctx {
-	u8 *wmem;
-	u8 *sbounce, *dbounce;
-
-	struct nx842_crypto_header header;
-	struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
-};
-
-static int nx842_crypto_init(struct crypto_tfm *tfm)
-{
-	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	ctx->wmem = kmalloc(nx842_workmem_size(), GFP_KERNEL);
-	ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
-	ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
-	if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
-		kfree(ctx->wmem);
-		free_page((unsigned long)ctx->sbounce);
-		free_page((unsigned long)ctx->dbounce);
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
-static void nx842_crypto_exit(struct crypto_tfm *tfm)
-{
-	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	kfree(ctx->wmem);
-	free_page((unsigned long)ctx->sbounce);
-	free_page((unsigned long)ctx->dbounce);
-}
-
-static int read_constraints(struct nx842_constraints *c)
-{
-	int ret;
-
-	ret = nx842_constraints(c);
-	if (ret) {
-		pr_err_ratelimited("could not get nx842 constraints : %d\n",
-				   ret);
-		return ret;
-	}
-
-	/* limit maximum, to always have enough bounce buffer to decompress */
-	if (c->maximum > BOUNCE_BUFFER_SIZE) {
-		c->maximum = BOUNCE_BUFFER_SIZE;
-		pr_info_once("limiting nx842 maximum to %x\n", c->maximum);
-	}
-
-	return 0;
-}
-
-static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
-{
-	int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
-
-	/* compress should have added space for header */
-	if (s > be16_to_cpu(hdr->group[0].padding)) {
-		pr_err("Internal error: no space for header\n");
-		return -EINVAL;
-	}
-
-	memcpy(buf, hdr, s);
-
-	print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
-
-	return 0;
-}
-
-static int compress(struct nx842_crypto_ctx *ctx,
-		    struct nx842_crypto_param *p,
-		    struct nx842_crypto_header_group *g,
-		    struct nx842_constraints *c,
-		    u16 *ignore,
-		    unsigned int hdrsize)
-{
-	unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
-	unsigned int adj_slen = slen;
-	u8 *src = p->in, *dst = p->out;
-	int ret, dskip = 0;
-	ktime_t timeout;
-
-	if (p->iremain == 0)
-		return -EOVERFLOW;
-
-	if (p->oremain == 0 || hdrsize + c->minimum > dlen)
-		return -ENOSPC;
-
-	if (slen % c->multiple)
-		adj_slen = round_up(slen, c->multiple);
-	if (slen < c->minimum)
-		adj_slen = c->minimum;
-	if (slen > c->maximum)
-		adj_slen = slen = c->maximum;
-	if (adj_slen > slen || (u64)src % c->alignment) {
-		adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
-		slen = min(slen, BOUNCE_BUFFER_SIZE);
-		if (adj_slen > slen)
-			memset(ctx->sbounce + slen, 0, adj_slen - slen);
-		memcpy(ctx->sbounce, src, slen);
-		src = ctx->sbounce;
-		slen = adj_slen;
-		pr_debug("using comp sbounce buffer, len %x\n", slen);
-	}
-
-	dst += hdrsize;
-	dlen -= hdrsize;
-
-	if ((u64)dst % c->alignment) {
-		dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
-		dst += dskip;
-		dlen -= dskip;
-	}
-	if (dlen % c->multiple)
-		dlen = round_down(dlen, c->multiple);
-	if (dlen < c->minimum) {
-nospc:
-		dst = ctx->dbounce;
-		dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
-		dlen = round_down(dlen, c->multiple);
-		dskip = 0;
-		pr_debug("using comp dbounce buffer, len %x\n", dlen);
-	}
-	if (dlen > c->maximum)
-		dlen = c->maximum;
-
-	tmplen = dlen;
-	timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
-	do {
-		dlen = tmplen; /* reset dlen, if we're retrying */
-		ret = nx842_compress(src, slen, dst, &dlen, ctx->wmem);
-		/* possibly we should reduce the slen here, instead of
-		 * retrying with the dbounce buffer?
-		 */
-		if (ret == -ENOSPC && dst != ctx->dbounce)
-			goto nospc;
-	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
-	if (ret)
-		return ret;
-
-	dskip += hdrsize;
-
-	if (dst == ctx->dbounce)
-		memcpy(p->out + dskip, dst, dlen);
-
-	g->padding = cpu_to_be16(dskip);
-	g->compressed_length = cpu_to_be32(dlen);
-	g->uncompressed_length = cpu_to_be32(slen);
-
-	if (p->iremain < slen) {
-		*ignore = slen - p->iremain;
-		slen = p->iremain;
-	}
-
-	pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
-		 slen, *ignore, dlen, dskip);
-
-	return update_param(p, slen, dskip + dlen);
-}
-
-static int nx842_crypto_compress(struct crypto_tfm *tfm,
-				 const u8 *src, unsigned int slen,
-				 u8 *dst, unsigned int *dlen)
-{
-	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct nx842_crypto_header *hdr = &ctx->header;
-	struct nx842_crypto_param p;
-	struct nx842_constraints c;
-	unsigned int groups, hdrsize, h;
-	int ret, n;
-	bool add_header;
-	u16 ignore = 0;
-
-	p.in = (u8 *)src;
-	p.iremain = slen;
-	p.out = dst;
-	p.oremain = *dlen;
-	p.ototal = 0;
-
-	*dlen = 0;
-
-	ret = read_constraints(&c);
-	if (ret)
-		return ret;
-
-	groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
-		       DIV_ROUND_UP(p.iremain, c.maximum));
-	hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
-
-	/* skip adding header if the buffers meet all constraints */
-	add_header = (p.iremain % c.multiple	||
-		      p.iremain < c.minimum	||
-		      p.iremain > c.maximum	||
-		      (u64)p.in % c.alignment	||
-		      p.oremain % c.multiple	||
-		      p.oremain < c.minimum	||
-		      p.oremain > c.maximum	||
-		      (u64)p.out % c.alignment);
-
-	hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
-	hdr->groups = 0;
-	hdr->ignore = 0;
-
-	while (p.iremain > 0) {
-		n = hdr->groups++;
-		if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
-			return -ENOSPC;
-
-		/* header goes before first group */
-		h = !n && add_header ? hdrsize : 0;
-
-		if (ignore)
-			pr_warn("interal error, ignore is set %x\n", ignore);
-
-		ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
-		if (ret)
-			return ret;
-	}
-
-	if (!add_header && hdr->groups > 1) {
-		pr_err("Internal error: No header but multiple groups\n");
-		return -EINVAL;
-	}
-
-	/* ignore indicates the input stream needed to be padded */
-	hdr->ignore = cpu_to_be16(ignore);
-	if (ignore)
-		pr_debug("marked %d bytes as ignore\n", ignore);
-
-	if (add_header)
-		ret = nx842_crypto_add_header(hdr, dst);
-	if (ret)
-		return ret;
-
-	*dlen = p.ototal;
-
-	pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
-
-	return 0;
-}
-
-static int decompress(struct nx842_crypto_ctx *ctx,
-		      struct nx842_crypto_param *p,
-		      struct nx842_crypto_header_group *g,
-		      struct nx842_constraints *c,
-		      u16 ignore,
-		      bool usehw)
-{
-	unsigned int slen = be32_to_cpu(g->compressed_length);
-	unsigned int required_len = be32_to_cpu(g->uncompressed_length);
-	unsigned int dlen = p->oremain, tmplen;
-	unsigned int adj_slen = slen;
-	u8 *src = p->in, *dst = p->out;
-	u16 padding = be16_to_cpu(g->padding);
-	int ret, spadding = 0, dpadding = 0;
-	ktime_t timeout;
-
-	if (!slen || !required_len)
-		return -EINVAL;
-
-	if (p->iremain <= 0 || padding + slen > p->iremain)
-		return -EOVERFLOW;
-
-	if (p->oremain <= 0 || required_len - ignore > p->oremain)
-		return -ENOSPC;
-
-	src += padding;
-
-	if (!usehw)
-		goto usesw;
-
-	if (slen % c->multiple)
-		adj_slen = round_up(slen, c->multiple);
-	if (slen < c->minimum)
-		adj_slen = c->minimum;
-	if (slen > c->maximum)
-		goto usesw;
-	if (slen < adj_slen || (u64)src % c->alignment) {
-		/* we can append padding bytes because the 842 format defines
-		 * an "end" template (see lib/842/842_decompress.c) and will
-		 * ignore any bytes following it.
-		 */
-		if (slen < adj_slen)
-			memset(ctx->sbounce + slen, 0, adj_slen - slen);
-		memcpy(ctx->sbounce, src, slen);
-		src = ctx->sbounce;
-		spadding = adj_slen - slen;
-		slen = adj_slen;
-		pr_debug("using decomp sbounce buffer, len %x\n", slen);
-	}
-
-	if (dlen % c->multiple)
-		dlen = round_down(dlen, c->multiple);
-	if (dlen < required_len || (u64)dst % c->alignment) {
-		dst = ctx->dbounce;
-		dlen = min(required_len, BOUNCE_BUFFER_SIZE);
-		pr_debug("using decomp dbounce buffer, len %x\n", dlen);
-	}
-	if (dlen < c->minimum)
-		goto usesw;
-	if (dlen > c->maximum)
-		dlen = c->maximum;
-
-	tmplen = dlen;
-	timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
-	do {
-		dlen = tmplen; /* reset dlen, if we're retrying */
-		ret = nx842_decompress(src, slen, dst, &dlen, ctx->wmem);
-	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
-	if (ret) {
-usesw:
-		/* reset everything, sw doesn't have constraints */
-		src = p->in + padding;
-		slen = be32_to_cpu(g->compressed_length);
-		spadding = 0;
-		dst = p->out;
-		dlen = p->oremain;
-		dpadding = 0;
-		if (dlen < required_len) { /* have ignore bytes */
-			dst = ctx->dbounce;
-			dlen = BOUNCE_BUFFER_SIZE;
-		}
-		pr_info_ratelimited("using software 842 decompression\n");
-		ret = sw842_decompress(src, slen, dst, &dlen);
-	}
-	if (ret)
-		return ret;
-
-	slen -= spadding;
-
-	dlen -= ignore;
-	if (ignore)
-		pr_debug("ignoring last %x bytes\n", ignore);
-
-	if (dst == ctx->dbounce)
-		memcpy(p->out, dst, dlen);
-
-	pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
-		 slen, padding, dlen, ignore);
-
-	return update_param(p, slen + padding, dlen);
-}
-
-static int nx842_crypto_decompress(struct crypto_tfm *tfm,
-				   const u8 *src, unsigned int slen,
-				   u8 *dst, unsigned int *dlen)
-{
-	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct nx842_crypto_header *hdr;
-	struct nx842_crypto_param p;
-	struct nx842_constraints c;
-	int n, ret, hdr_len;
-	u16 ignore = 0;
-	bool usehw = true;
-
-	p.in = (u8 *)src;
-	p.iremain = slen;
-	p.out = dst;
-	p.oremain = *dlen;
-	p.ototal = 0;
-
-	*dlen = 0;
-
-	if (read_constraints(&c))
-		usehw = false;
-
-	hdr = (struct nx842_crypto_header *)src;
-
-	/* If it doesn't start with our header magic number, assume it's a raw
-	 * 842 compressed buffer and pass it directly to the hardware driver
-	 */
-	if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
-		struct nx842_crypto_header_group g = {
-			.padding =		0,
-			.compressed_length =	cpu_to_be32(p.iremain),
-			.uncompressed_length =	cpu_to_be32(p.oremain),
-		};
-
-		ret = decompress(ctx, &p, &g, &c, 0, usehw);
-		if (ret)
-			return ret;
-
-		*dlen = p.ototal;
-
-		return 0;
-	}
-
-	if (!hdr->groups) {
-		pr_err("header has no groups\n");
-		return -EINVAL;
-	}
-	if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
-		pr_err("header has too many groups %x, max %x\n",
-		       hdr->groups, NX842_CRYPTO_GROUP_MAX);
-		return -EINVAL;
-	}
-
-	hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
-	if (hdr_len > slen)
-		return -EOVERFLOW;
-
-	memcpy(&ctx->header, src, hdr_len);
-	hdr = &ctx->header;
-
-	for (n = 0; n < hdr->groups; n++) {
-		/* ignore applies to last group */
-		if (n + 1 == hdr->groups)
-			ignore = be16_to_cpu(hdr->ignore);
-
-		ret = decompress(ctx, &p, &hdr->group[n], &c, ignore, usehw);
-		if (ret)
-			return ret;
-	}
-
-	*dlen = p.ototal;
-
-	pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
-
-	return 0;
-}
-
-static struct crypto_alg alg = {
-	.cra_name		= "842",
-	.cra_driver_name	= "842-nx",
-	.cra_priority		= 300,
-	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
-	.cra_ctxsize		= sizeof(struct nx842_crypto_ctx),
-	.cra_module		= THIS_MODULE,
-	.cra_init		= nx842_crypto_init,
-	.cra_exit		= nx842_crypto_exit,
-	.cra_u			= { .compress = {
-	.coa_compress		= nx842_crypto_compress,
-	.coa_decompress		= nx842_crypto_decompress } }
-};
-
-static int __init nx842_crypto_mod_init(void)
-{
-	return crypto_register_alg(&alg);
-}
-module_init(nx842_crypto_mod_init);
-
-static void __exit nx842_crypto_mod_exit(void)
-{
-	crypto_unregister_alg(&alg);
-}
-module_exit(nx842_crypto_mod_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Interface");
-MODULE_ALIAS_CRYPTO("842");
-MODULE_ALIAS_CRYPTO("842-nx");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
diff --git a/drivers/crypto/nx/nx-842-platform.c b/drivers/crypto/nx/nx-842-platform.c
deleted file mode 100644
index 664f13d..0000000
--- a/drivers/crypto/nx/nx-842-platform.c
+++ /dev/null
@@ -1,84 +0,0 @@
-
-#include "nx-842.h"
-
-/* this is needed, separate from the main nx-842.c driver, because that main
- * driver loads the platform drivers during its init(), and it expects one
- * (or none) of the platform drivers to set this pointer to its driver.
- * That means this pointer can't be in the main nx-842 driver, because it
- * wouldn't be accessible until after the main driver loaded, which wouldn't
- * be possible as it's waiting for the platform driver to load.  So place it
- * here.
- */
-static struct nx842_driver *driver;
-static DEFINE_SPINLOCK(driver_lock);
-
-struct nx842_driver *nx842_platform_driver(void)
-{
-	return driver;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver);
-
-bool nx842_platform_driver_set(struct nx842_driver *_driver)
-{
-	bool ret = false;
-
-	spin_lock(&driver_lock);
-
-	if (!driver) {
-		driver = _driver;
-		ret = true;
-	} else
-		WARN(1, "can't set platform driver, already set to %s\n",
-		     driver->name);
-
-	spin_unlock(&driver_lock);
-	return ret;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_set);
-
-/* only call this from the platform driver exit function */
-void nx842_platform_driver_unset(struct nx842_driver *_driver)
-{
-	spin_lock(&driver_lock);
-
-	if (driver == _driver)
-		driver = NULL;
-	else if (driver)
-		WARN(1, "can't unset platform driver %s, currently set to %s\n",
-		     _driver->name, driver->name);
-	else
-		WARN(1, "can't unset platform driver, already unset\n");
-
-	spin_unlock(&driver_lock);
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_unset);
-
-bool nx842_platform_driver_get(void)
-{
-	bool ret = false;
-
-	spin_lock(&driver_lock);
-
-	if (driver)
-		ret = try_module_get(driver->owner);
-
-	spin_unlock(&driver_lock);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_get);
-
-void nx842_platform_driver_put(void)
-{
-	spin_lock(&driver_lock);
-
-	if (driver)
-		module_put(driver->owner);
-
-	spin_unlock(&driver_lock);
-}
-EXPORT_SYMBOL_GPL(nx842_platform_driver_put);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("842 H/W Compression platform driver");
diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c
index 33b3b0a..3750e13 100644
--- a/drivers/crypto/nx/nx-842-powernv.c
+++ b/drivers/crypto/nx/nx-842-powernv.c
@@ -26,6 +26,8 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
 MODULE_DESCRIPTION("842 H/W Compression driver for IBM PowerNV processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
 
 #define WORKMEM_ALIGN	(CRB_ALIGN)
 #define CSB_WAIT_MAX	(5000) /* ms */
@@ -344,7 +346,8 @@
 	}
 
 	/* successful completion */
-	pr_debug_ratelimited("Processed %u bytes in %lu us\n", csb->count,
+	pr_debug_ratelimited("Processed %u bytes in %lu us\n",
+			     be32_to_cpu(csb->count),
 			     (unsigned long)ktime_us_delta(now, start));
 
 	return 0;
@@ -581,9 +584,29 @@
 	.decompress =	nx842_powernv_decompress,
 };
 
+static int nx842_powernv_crypto_init(struct crypto_tfm *tfm)
+{
+	return nx842_crypto_init(tfm, &nx842_powernv_driver);
+}
+
+static struct crypto_alg nx842_powernv_alg = {
+	.cra_name		= "842",
+	.cra_driver_name	= "842-nx",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
+	.cra_ctxsize		= sizeof(struct nx842_crypto_ctx),
+	.cra_module		= THIS_MODULE,
+	.cra_init		= nx842_powernv_crypto_init,
+	.cra_exit		= nx842_crypto_exit,
+	.cra_u			= { .compress = {
+	.coa_compress		= nx842_crypto_compress,
+	.coa_decompress		= nx842_crypto_decompress } }
+};
+
 static __init int nx842_powernv_init(void)
 {
 	struct device_node *dn;
+	int ret;
 
 	/* verify workmem size/align restrictions */
 	BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN);
@@ -594,17 +617,14 @@
 	BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT);
 	BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT);
 
-	pr_info("loading\n");
-
 	for_each_compatible_node(dn, NULL, "ibm,power-nx")
 		nx842_powernv_probe(dn);
 
-	if (!nx842_ct) {
-		pr_err("no coprocessors found\n");
+	if (!nx842_ct)
 		return -ENODEV;
-	}
 
-	if (!nx842_platform_driver_set(&nx842_powernv_driver)) {
+	ret = crypto_register_alg(&nx842_powernv_alg);
+	if (ret) {
 		struct nx842_coproc *coproc, *n;
 
 		list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
@@ -612,11 +632,9 @@
 			kfree(coproc);
 		}
 
-		return -EEXIST;
+		return ret;
 	}
 
-	pr_info("loaded\n");
-
 	return 0;
 }
 module_init(nx842_powernv_init);
@@ -625,13 +643,11 @@
 {
 	struct nx842_coproc *coproc, *n;
 
-	nx842_platform_driver_unset(&nx842_powernv_driver);
+	crypto_unregister_alg(&nx842_powernv_alg);
 
 	list_for_each_entry_safe(coproc, n, &nx842_coprocs, list) {
 		list_del(&coproc->list);
 		kfree(coproc);
 	}
-
-	pr_info("unloaded\n");
 }
 module_exit(nx842_powernv_exit);
diff --git a/drivers/crypto/nx/nx-842-pseries.c b/drivers/crypto/nx/nx-842-pseries.c
index 3040a60..f4cbde0 100644
--- a/drivers/crypto/nx/nx-842-pseries.c
+++ b/drivers/crypto/nx/nx-842-pseries.c
@@ -29,6 +29,8 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
 MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
+MODULE_ALIAS_CRYPTO("842");
+MODULE_ALIAS_CRYPTO("842-nx");
 
 static struct nx842_constraints nx842_pseries_constraints = {
 	.alignment =	DDE_BUFFER_ALIGN,
@@ -99,11 +101,6 @@
 #define NX842_HW_PAGE_SIZE	(4096)
 #define NX842_HW_PAGE_MASK	(~(NX842_HW_PAGE_SIZE-1))
 
-enum nx842_status {
-	UNAVAILABLE,
-	AVAILABLE
-};
-
 struct ibm_nx842_counters {
 	atomic64_t comp_complete;
 	atomic64_t comp_failed;
@@ -121,7 +118,6 @@
 	unsigned int max_sg_len;
 	unsigned int max_sync_size;
 	unsigned int max_sync_sg;
-	enum nx842_status status;
 } __rcu *devdata;
 static DEFINE_SPINLOCK(devdata_mutex);
 
@@ -230,9 +226,12 @@
 	switch (csb->completion_code) {
 	case 0:	/* Completed without error */
 		break;
-	case 64: /* Target bytes > Source bytes during compression */
+	case 64: /* Compression ok, but output larger than input */
+		dev_dbg(dev, "%s: output size larger than input size\n",
+					__func__);
+		break;
 	case 13: /* Output buffer too small */
-		dev_dbg(dev, "%s: Compression output larger than input\n",
+		dev_dbg(dev, "%s: Out of space in output buffer\n",
 					__func__);
 		return -ENOSPC;
 	case 66: /* Input data contains an illegal template field */
@@ -537,41 +536,36 @@
 		devdata->max_sync_size = 0;
 		devdata->max_sync_sg = 0;
 		devdata->max_sg_len = 0;
-		devdata->status = UNAVAILABLE;
 		return 0;
 	} else
 		return -ENOENT;
 }
 
 /**
- * nx842_OF_upd_status -- Update the device info from OF status prop
+ * nx842_OF_upd_status -- Check the device info from OF status prop
  *
  * The status property indicates if the accelerator is enabled.  If the
  * device is in the OF tree it indicates that the hardware is present.
  * The status field indicates if the device is enabled when the status
  * is 'okay'.  Otherwise the device driver will be disabled.
  *
- * @devdata - struct nx842_devdata to update
  * @prop - struct property point containing the maxsyncop for the update
  *
  * Returns:
  *  0 - Device is available
- *  -EINVAL - Device is not available
+ *  -ENODEV - Device is not available
  */
-static int nx842_OF_upd_status(struct nx842_devdata *devdata,
-					struct property *prop) {
-	int ret = 0;
+static int nx842_OF_upd_status(struct property *prop)
+{
 	const char *status = (const char *)prop->value;
 
-	if (!strncmp(status, "okay", (size_t)prop->length)) {
-		devdata->status = AVAILABLE;
-	} else {
-		dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n",
-				__func__, status);
-		devdata->status = UNAVAILABLE;
-	}
+	if (!strncmp(status, "okay", (size_t)prop->length))
+		return 0;
+	if (!strncmp(status, "disabled", (size_t)prop->length))
+		return -ENODEV;
+	dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status);
 
-	return ret;
+	return -EINVAL;
 }
 
 /**
@@ -735,6 +729,10 @@
 	int ret = 0;
 	unsigned long flags;
 
+	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+	if (!new_devdata)
+		return -ENOMEM;
+
 	spin_lock_irqsave(&devdata_mutex, flags);
 	old_devdata = rcu_dereference_check(devdata,
 			lockdep_is_held(&devdata_mutex));
@@ -744,16 +742,10 @@
 	if (!old_devdata || !of_node) {
 		pr_err("%s: device is not available\n", __func__);
 		spin_unlock_irqrestore(&devdata_mutex, flags);
+		kfree(new_devdata);
 		return -ENODEV;
 	}
 
-	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
-	if (!new_devdata) {
-		dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__);
-		ret = -ENOMEM;
-		goto error_out;
-	}
-
 	memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
 	new_devdata->counters = old_devdata->counters;
 
@@ -777,7 +769,7 @@
 		goto out;
 
 	/* Perform property updates */
-	ret = nx842_OF_upd_status(new_devdata, status);
+	ret = nx842_OF_upd_status(status);
 	if (ret)
 		goto error_out;
 
@@ -970,13 +962,43 @@
 	.decompress =	nx842_pseries_decompress,
 };
 
-static int __init nx842_probe(struct vio_dev *viodev,
-				  const struct vio_device_id *id)
+static int nx842_pseries_crypto_init(struct crypto_tfm *tfm)
+{
+	return nx842_crypto_init(tfm, &nx842_pseries_driver);
+}
+
+static struct crypto_alg nx842_pseries_alg = {
+	.cra_name		= "842",
+	.cra_driver_name	= "842-nx",
+	.cra_priority		= 300,
+	.cra_flags		= CRYPTO_ALG_TYPE_COMPRESS,
+	.cra_ctxsize		= sizeof(struct nx842_crypto_ctx),
+	.cra_module		= THIS_MODULE,
+	.cra_init		= nx842_pseries_crypto_init,
+	.cra_exit		= nx842_crypto_exit,
+	.cra_u			= { .compress = {
+	.coa_compress		= nx842_crypto_compress,
+	.coa_decompress		= nx842_crypto_decompress } }
+};
+
+static int nx842_probe(struct vio_dev *viodev,
+		       const struct vio_device_id *id)
 {
 	struct nx842_devdata *old_devdata, *new_devdata = NULL;
 	unsigned long flags;
 	int ret = 0;
 
+	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
+	if (!new_devdata)
+		return -ENOMEM;
+
+	new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
+			GFP_NOFS);
+	if (!new_devdata->counters) {
+		kfree(new_devdata);
+		return -ENOMEM;
+	}
+
 	spin_lock_irqsave(&devdata_mutex, flags);
 	old_devdata = rcu_dereference_check(devdata,
 			lockdep_is_held(&devdata_mutex));
@@ -989,21 +1011,6 @@
 
 	dev_set_drvdata(&viodev->dev, NULL);
 
-	new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
-	if (!new_devdata) {
-		dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__);
-		ret = -ENOMEM;
-		goto error_unlock;
-	}
-
-	new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
-			GFP_NOFS);
-	if (!new_devdata->counters) {
-		dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__);
-		ret = -ENOMEM;
-		goto error_unlock;
-	}
-
 	new_devdata->vdev = viodev;
 	new_devdata->dev = &viodev->dev;
 	nx842_OF_set_defaults(new_devdata);
@@ -1016,9 +1023,12 @@
 	of_reconfig_notifier_register(&nx842_of_nb);
 
 	ret = nx842_OF_upd(NULL);
-	if (ret && ret != -ENODEV) {
-		dev_err(&viodev->dev, "could not parse device tree. %d\n", ret);
-		ret = -1;
+	if (ret)
+		goto error;
+
+	ret = crypto_register_alg(&nx842_pseries_alg);
+	if (ret) {
+		dev_err(&viodev->dev, "could not register comp alg: %d\n", ret);
 		goto error;
 	}
 
@@ -1043,7 +1053,7 @@
 	return ret;
 }
 
-static int __exit nx842_remove(struct vio_dev *viodev)
+static int nx842_remove(struct vio_dev *viodev)
 {
 	struct nx842_devdata *old_devdata;
 	unsigned long flags;
@@ -1051,6 +1061,8 @@
 	pr_info("Removing IBM Power 842 compression device\n");
 	sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
 
+	crypto_unregister_alg(&nx842_pseries_alg);
+
 	spin_lock_irqsave(&devdata_mutex, flags);
 	old_devdata = rcu_dereference_check(devdata,
 			lockdep_is_held(&devdata_mutex));
@@ -1074,18 +1086,16 @@
 static struct vio_driver nx842_vio_driver = {
 	.name = KBUILD_MODNAME,
 	.probe = nx842_probe,
-	.remove = __exit_p(nx842_remove),
+	.remove = nx842_remove,
 	.get_desired_dma = nx842_get_desired_dma,
 	.id_table = nx842_vio_driver_ids,
 };
 
-static int __init nx842_init(void)
+static int __init nx842_pseries_init(void)
 {
 	struct nx842_devdata *new_devdata;
 	int ret;
 
-	pr_info("Registering IBM Power 842 compression driver\n");
-
 	if (!of_find_compatible_node(NULL, NULL, "ibm,compression"))
 		return -ENODEV;
 
@@ -1095,7 +1105,6 @@
 		pr_err("Could not allocate memory for device data\n");
 		return -ENOMEM;
 	}
-	new_devdata->status = UNAVAILABLE;
 	RCU_INIT_POINTER(devdata, new_devdata);
 
 	ret = vio_register_driver(&nx842_vio_driver);
@@ -1106,24 +1115,18 @@
 		return ret;
 	}
 
-	if (!nx842_platform_driver_set(&nx842_pseries_driver)) {
-		vio_unregister_driver(&nx842_vio_driver);
-		kfree(new_devdata);
-		return -EEXIST;
-	}
-
 	return 0;
 }
 
-module_init(nx842_init);
+module_init(nx842_pseries_init);
 
-static void __exit nx842_exit(void)
+static void __exit nx842_pseries_exit(void)
 {
 	struct nx842_devdata *old_devdata;
 	unsigned long flags;
 
-	pr_info("Exiting IBM Power 842 compression driver\n");
-	nx842_platform_driver_unset(&nx842_pseries_driver);
+	crypto_unregister_alg(&nx842_pseries_alg);
+
 	spin_lock_irqsave(&devdata_mutex, flags);
 	old_devdata = rcu_dereference_check(devdata,
 			lockdep_is_held(&devdata_mutex));
@@ -1136,5 +1139,5 @@
 	vio_unregister_driver(&nx842_vio_driver);
 }
 
-module_exit(nx842_exit);
+module_exit(nx842_pseries_exit);
 
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 6e5e0d6..046c1c4 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -1,10 +1,5 @@
 /*
- * Driver frontend for IBM Power 842 compression accelerator
- *
- * Copyright (C) 2015 Dan Streetman, IBM Corp
- *
- * Designer of the Power data compression engine:
- *   Bulent Abali <abali@us.ibm.com>
+ * Cryptographic API for the NX-842 hardware compression.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -15,89 +10,522 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
+ *
+ * Copyright (C) IBM Corporation, 2011-2015
+ *
+ * Designer of the Power data compression engine:
+ *   Bulent Abali <abali@us.ibm.com>
+ *
+ * Original Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
+ *                   Seth Jennings <sjenning@linux.vnet.ibm.com>
+ *
+ * Rewrite: Dan Streetman <ddstreet@ieee.org>
+ *
+ * This is an interface to the NX-842 compression hardware in PowerPC
+ * processors.  Most of the complexity of this drvier is due to the fact that
+ * the NX-842 compression hardware requires the input and output data buffers
+ * to be specifically aligned, to be a specific multiple in length, and within
+ * specific minimum and maximum lengths.  Those restrictions, provided by the
+ * nx-842 driver via nx842_constraints, mean this driver must use bounce
+ * buffers and headers to correct misaligned in or out buffers, and to split
+ * input buffers that are too large.
+ *
+ * This driver will fall back to software decompression if the hardware
+ * decompression fails, so this driver's decompression should never fail as
+ * long as the provided compressed buffer is valid.  Any compressed buffer
+ * created by this driver will have a header (except ones where the input
+ * perfectly matches the constraints); so users of this driver cannot simply
+ * pass a compressed buffer created by this driver over to the 842 software
+ * decompression library.  Instead, users must use this driver to decompress;
+ * if the hardware fails or is unavailable, the compressed buffer will be
+ * parsed and the header removed, and the raw 842 buffer(s) passed to the 842
+ * software decompression library.
+ *
+ * This does not fall back to software compression, however, since the caller
+ * of this function is specifically requesting hardware compression; if the
+ * hardware compression fails, the caller can fall back to software
+ * compression, and the raw 842 compressed buffer that the software compressor
+ * creates can be passed to this driver for hardware decompression; any
+ * buffer without our specific header magic is assumed to be a raw 842 buffer
+ * and passed directly to the hardware.  Note that the software compression
+ * library will produce a compressed buffer that is incompatible with the
+ * hardware decompressor if the original input buffer length is not a multiple
+ * of 8; if such a compressed buffer is passed to this driver for
+ * decompression, the hardware will reject it and this driver will then pass
+ * it over to the software library for decompression.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/vmalloc.h>
+#include <linux/sw842.h>
+#include <linux/spinlock.h>
+
 #include "nx-842.h"
 
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
-MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
-
-/**
- * nx842_constraints
- *
- * This provides the driver's constraints.  Different nx842 implementations
- * may have varying requirements.  The constraints are:
- *   @alignment:	All buffers should be aligned to this
- *   @multiple:		All buffer lengths should be a multiple of this
- *   @minimum:		Buffer lengths must not be less than this amount
- *   @maximum:		Buffer lengths must not be more than this amount
- *
- * The constraints apply to all buffers and lengths, both input and output,
- * for both compression and decompression, except for the minimum which
- * only applies to compression input and decompression output; the
- * compressed data can be less than the minimum constraint.  It can be
- * assumed that compressed data will always adhere to the multiple
- * constraint.
- *
- * The driver may succeed even if these constraints are violated;
- * however the driver can return failure or suffer reduced performance
- * if any constraint is not met.
+/* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit
+ * template (see lib/842/842.h), so this magic number will never appear at
+ * the start of a raw 842 compressed buffer.  That is important, as any buffer
+ * passed to us without this magic is assumed to be a raw 842 compressed
+ * buffer, and passed directly to the hardware to decompress.
  */
-int nx842_constraints(struct nx842_constraints *c)
+#define NX842_CRYPTO_MAGIC	(0xf842)
+#define NX842_CRYPTO_HEADER_SIZE(g)				\
+	(sizeof(struct nx842_crypto_header) +			\
+	 sizeof(struct nx842_crypto_header_group) * (g))
+#define NX842_CRYPTO_HEADER_MAX_SIZE				\
+	NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX)
+
+/* bounce buffer size */
+#define BOUNCE_BUFFER_ORDER	(2)
+#define BOUNCE_BUFFER_SIZE					\
+	((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER))
+
+/* try longer on comp because we can fallback to sw decomp if hw is busy */
+#define COMP_BUSY_TIMEOUT	(250) /* ms */
+#define DECOMP_BUSY_TIMEOUT	(50) /* ms */
+
+struct nx842_crypto_param {
+	u8 *in;
+	unsigned int iremain;
+	u8 *out;
+	unsigned int oremain;
+	unsigned int ototal;
+};
+
+static int update_param(struct nx842_crypto_param *p,
+			unsigned int slen, unsigned int dlen)
 {
-	memcpy(c, nx842_platform_driver()->constraints, sizeof(*c));
+	if (p->iremain < slen)
+		return -EOVERFLOW;
+	if (p->oremain < dlen)
+		return -ENOSPC;
+
+	p->in += slen;
+	p->iremain -= slen;
+	p->out += dlen;
+	p->oremain -= dlen;
+	p->ototal += dlen;
+
 	return 0;
 }
-EXPORT_SYMBOL_GPL(nx842_constraints);
 
-/**
- * nx842_workmem_size
- *
- * Get the amount of working memory the driver requires.
- */
-size_t nx842_workmem_size(void)
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver)
 {
-	return nx842_platform_driver()->workmem_size;
-}
-EXPORT_SYMBOL_GPL(nx842_workmem_size);
+	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
 
-int nx842_compress(const unsigned char *in, unsigned int ilen,
-		   unsigned char *out, unsigned int *olen, void *wmem)
-{
-	return nx842_platform_driver()->compress(in, ilen, out, olen, wmem);
-}
-EXPORT_SYMBOL_GPL(nx842_compress);
-
-int nx842_decompress(const unsigned char *in, unsigned int ilen,
-		     unsigned char *out, unsigned int *olen, void *wmem)
-{
-	return nx842_platform_driver()->decompress(in, ilen, out, olen, wmem);
-}
-EXPORT_SYMBOL_GPL(nx842_decompress);
-
-static __init int nx842_init(void)
-{
-	request_module("nx-compress-powernv");
-	request_module("nx-compress-pseries");
-
-	/* we prevent loading if there's no platform driver, and we get the
-	 * module that set it so it won't unload, so we don't need to check
-	 * if it's set in any of the above functions
-	 */
-	if (!nx842_platform_driver_get()) {
-		pr_err("no nx842 driver found.\n");
-		return -ENODEV;
+	spin_lock_init(&ctx->lock);
+	ctx->driver = driver;
+	ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL);
+	ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+	ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER);
+	if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) {
+		kfree(ctx->wmem);
+		free_page((unsigned long)ctx->sbounce);
+		free_page((unsigned long)ctx->dbounce);
+		return -ENOMEM;
 	}
 
 	return 0;
 }
-module_init(nx842_init);
+EXPORT_SYMBOL_GPL(nx842_crypto_init);
 
-static void __exit nx842_exit(void)
+void nx842_crypto_exit(struct crypto_tfm *tfm)
 {
-	nx842_platform_driver_put();
+	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	kfree(ctx->wmem);
+	free_page((unsigned long)ctx->sbounce);
+	free_page((unsigned long)ctx->dbounce);
 }
-module_exit(nx842_exit);
+EXPORT_SYMBOL_GPL(nx842_crypto_exit);
+
+static void check_constraints(struct nx842_constraints *c)
+{
+	/* limit maximum, to always have enough bounce buffer to decompress */
+	if (c->maximum > BOUNCE_BUFFER_SIZE)
+		c->maximum = BOUNCE_BUFFER_SIZE;
+}
+
+static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf)
+{
+	int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
+
+	/* compress should have added space for header */
+	if (s > be16_to_cpu(hdr->group[0].padding)) {
+		pr_err("Internal error: no space for header\n");
+		return -EINVAL;
+	}
+
+	memcpy(buf, hdr, s);
+
+	print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0);
+
+	return 0;
+}
+
+static int compress(struct nx842_crypto_ctx *ctx,
+		    struct nx842_crypto_param *p,
+		    struct nx842_crypto_header_group *g,
+		    struct nx842_constraints *c,
+		    u16 *ignore,
+		    unsigned int hdrsize)
+{
+	unsigned int slen = p->iremain, dlen = p->oremain, tmplen;
+	unsigned int adj_slen = slen;
+	u8 *src = p->in, *dst = p->out;
+	int ret, dskip = 0;
+	ktime_t timeout;
+
+	if (p->iremain == 0)
+		return -EOVERFLOW;
+
+	if (p->oremain == 0 || hdrsize + c->minimum > dlen)
+		return -ENOSPC;
+
+	if (slen % c->multiple)
+		adj_slen = round_up(slen, c->multiple);
+	if (slen < c->minimum)
+		adj_slen = c->minimum;
+	if (slen > c->maximum)
+		adj_slen = slen = c->maximum;
+	if (adj_slen > slen || (u64)src % c->alignment) {
+		adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE);
+		slen = min(slen, BOUNCE_BUFFER_SIZE);
+		if (adj_slen > slen)
+			memset(ctx->sbounce + slen, 0, adj_slen - slen);
+		memcpy(ctx->sbounce, src, slen);
+		src = ctx->sbounce;
+		slen = adj_slen;
+		pr_debug("using comp sbounce buffer, len %x\n", slen);
+	}
+
+	dst += hdrsize;
+	dlen -= hdrsize;
+
+	if ((u64)dst % c->alignment) {
+		dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst);
+		dst += dskip;
+		dlen -= dskip;
+	}
+	if (dlen % c->multiple)
+		dlen = round_down(dlen, c->multiple);
+	if (dlen < c->minimum) {
+nospc:
+		dst = ctx->dbounce;
+		dlen = min(p->oremain, BOUNCE_BUFFER_SIZE);
+		dlen = round_down(dlen, c->multiple);
+		dskip = 0;
+		pr_debug("using comp dbounce buffer, len %x\n", dlen);
+	}
+	if (dlen > c->maximum)
+		dlen = c->maximum;
+
+	tmplen = dlen;
+	timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT);
+	do {
+		dlen = tmplen; /* reset dlen, if we're retrying */
+		ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem);
+		/* possibly we should reduce the slen here, instead of
+		 * retrying with the dbounce buffer?
+		 */
+		if (ret == -ENOSPC && dst != ctx->dbounce)
+			goto nospc;
+	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+	if (ret)
+		return ret;
+
+	dskip += hdrsize;
+
+	if (dst == ctx->dbounce)
+		memcpy(p->out + dskip, dst, dlen);
+
+	g->padding = cpu_to_be16(dskip);
+	g->compressed_length = cpu_to_be32(dlen);
+	g->uncompressed_length = cpu_to_be32(slen);
+
+	if (p->iremain < slen) {
+		*ignore = slen - p->iremain;
+		slen = p->iremain;
+	}
+
+	pr_debug("compress slen %x ignore %x dlen %x padding %x\n",
+		 slen, *ignore, dlen, dskip);
+
+	return update_param(p, slen, dskip + dlen);
+}
+
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+			  const u8 *src, unsigned int slen,
+			  u8 *dst, unsigned int *dlen)
+{
+	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct nx842_crypto_header *hdr = &ctx->header;
+	struct nx842_crypto_param p;
+	struct nx842_constraints c = *ctx->driver->constraints;
+	unsigned int groups, hdrsize, h;
+	int ret, n;
+	bool add_header;
+	u16 ignore = 0;
+
+	check_constraints(&c);
+
+	p.in = (u8 *)src;
+	p.iremain = slen;
+	p.out = dst;
+	p.oremain = *dlen;
+	p.ototal = 0;
+
+	*dlen = 0;
+
+	groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX,
+		       DIV_ROUND_UP(p.iremain, c.maximum));
+	hdrsize = NX842_CRYPTO_HEADER_SIZE(groups);
+
+	spin_lock_bh(&ctx->lock);
+
+	/* skip adding header if the buffers meet all constraints */
+	add_header = (p.iremain % c.multiple	||
+		      p.iremain < c.minimum	||
+		      p.iremain > c.maximum	||
+		      (u64)p.in % c.alignment	||
+		      p.oremain % c.multiple	||
+		      p.oremain < c.minimum	||
+		      p.oremain > c.maximum	||
+		      (u64)p.out % c.alignment);
+
+	hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC);
+	hdr->groups = 0;
+	hdr->ignore = 0;
+
+	while (p.iremain > 0) {
+		n = hdr->groups++;
+		ret = -ENOSPC;
+		if (hdr->groups > NX842_CRYPTO_GROUP_MAX)
+			goto unlock;
+
+		/* header goes before first group */
+		h = !n && add_header ? hdrsize : 0;
+
+		if (ignore)
+			pr_warn("interal error, ignore is set %x\n", ignore);
+
+		ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
+		if (ret)
+			goto unlock;
+	}
+
+	if (!add_header && hdr->groups > 1) {
+		pr_err("Internal error: No header but multiple groups\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	/* ignore indicates the input stream needed to be padded */
+	hdr->ignore = cpu_to_be16(ignore);
+	if (ignore)
+		pr_debug("marked %d bytes as ignore\n", ignore);
+
+	if (add_header)
+		ret = nx842_crypto_add_header(hdr, dst);
+	if (ret)
+		goto unlock;
+
+	*dlen = p.ototal;
+
+	pr_debug("compress total slen %x dlen %x\n", slen, *dlen);
+
+unlock:
+	spin_unlock_bh(&ctx->lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_compress);
+
+static int decompress(struct nx842_crypto_ctx *ctx,
+		      struct nx842_crypto_param *p,
+		      struct nx842_crypto_header_group *g,
+		      struct nx842_constraints *c,
+		      u16 ignore)
+{
+	unsigned int slen = be32_to_cpu(g->compressed_length);
+	unsigned int required_len = be32_to_cpu(g->uncompressed_length);
+	unsigned int dlen = p->oremain, tmplen;
+	unsigned int adj_slen = slen;
+	u8 *src = p->in, *dst = p->out;
+	u16 padding = be16_to_cpu(g->padding);
+	int ret, spadding = 0, dpadding = 0;
+	ktime_t timeout;
+
+	if (!slen || !required_len)
+		return -EINVAL;
+
+	if (p->iremain <= 0 || padding + slen > p->iremain)
+		return -EOVERFLOW;
+
+	if (p->oremain <= 0 || required_len - ignore > p->oremain)
+		return -ENOSPC;
+
+	src += padding;
+
+	if (slen % c->multiple)
+		adj_slen = round_up(slen, c->multiple);
+	if (slen < c->minimum)
+		adj_slen = c->minimum;
+	if (slen > c->maximum)
+		goto usesw;
+	if (slen < adj_slen || (u64)src % c->alignment) {
+		/* we can append padding bytes because the 842 format defines
+		 * an "end" template (see lib/842/842_decompress.c) and will
+		 * ignore any bytes following it.
+		 */
+		if (slen < adj_slen)
+			memset(ctx->sbounce + slen, 0, adj_slen - slen);
+		memcpy(ctx->sbounce, src, slen);
+		src = ctx->sbounce;
+		spadding = adj_slen - slen;
+		slen = adj_slen;
+		pr_debug("using decomp sbounce buffer, len %x\n", slen);
+	}
+
+	if (dlen % c->multiple)
+		dlen = round_down(dlen, c->multiple);
+	if (dlen < required_len || (u64)dst % c->alignment) {
+		dst = ctx->dbounce;
+		dlen = min(required_len, BOUNCE_BUFFER_SIZE);
+		pr_debug("using decomp dbounce buffer, len %x\n", dlen);
+	}
+	if (dlen < c->minimum)
+		goto usesw;
+	if (dlen > c->maximum)
+		dlen = c->maximum;
+
+	tmplen = dlen;
+	timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT);
+	do {
+		dlen = tmplen; /* reset dlen, if we're retrying */
+		ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem);
+	} while (ret == -EBUSY && ktime_before(ktime_get(), timeout));
+	if (ret) {
+usesw:
+		/* reset everything, sw doesn't have constraints */
+		src = p->in + padding;
+		slen = be32_to_cpu(g->compressed_length);
+		spadding = 0;
+		dst = p->out;
+		dlen = p->oremain;
+		dpadding = 0;
+		if (dlen < required_len) { /* have ignore bytes */
+			dst = ctx->dbounce;
+			dlen = BOUNCE_BUFFER_SIZE;
+		}
+		pr_info_ratelimited("using software 842 decompression\n");
+		ret = sw842_decompress(src, slen, dst, &dlen);
+	}
+	if (ret)
+		return ret;
+
+	slen -= spadding;
+
+	dlen -= ignore;
+	if (ignore)
+		pr_debug("ignoring last %x bytes\n", ignore);
+
+	if (dst == ctx->dbounce)
+		memcpy(p->out, dst, dlen);
+
+	pr_debug("decompress slen %x padding %x dlen %x ignore %x\n",
+		 slen, padding, dlen, ignore);
+
+	return update_param(p, slen + padding, dlen);
+}
+
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+			    const u8 *src, unsigned int slen,
+			    u8 *dst, unsigned int *dlen)
+{
+	struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct nx842_crypto_header *hdr;
+	struct nx842_crypto_param p;
+	struct nx842_constraints c = *ctx->driver->constraints;
+	int n, ret, hdr_len;
+	u16 ignore = 0;
+
+	check_constraints(&c);
+
+	p.in = (u8 *)src;
+	p.iremain = slen;
+	p.out = dst;
+	p.oremain = *dlen;
+	p.ototal = 0;
+
+	*dlen = 0;
+
+	hdr = (struct nx842_crypto_header *)src;
+
+	spin_lock_bh(&ctx->lock);
+
+	/* If it doesn't start with our header magic number, assume it's a raw
+	 * 842 compressed buffer and pass it directly to the hardware driver
+	 */
+	if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) {
+		struct nx842_crypto_header_group g = {
+			.padding =		0,
+			.compressed_length =	cpu_to_be32(p.iremain),
+			.uncompressed_length =	cpu_to_be32(p.oremain),
+		};
+
+		ret = decompress(ctx, &p, &g, &c, 0);
+		if (ret)
+			goto unlock;
+
+		goto success;
+	}
+
+	if (!hdr->groups) {
+		pr_err("header has no groups\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+	if (hdr->groups > NX842_CRYPTO_GROUP_MAX) {
+		pr_err("header has too many groups %x, max %x\n",
+		       hdr->groups, NX842_CRYPTO_GROUP_MAX);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups);
+	if (hdr_len > slen) {
+		ret = -EOVERFLOW;
+		goto unlock;
+	}
+
+	memcpy(&ctx->header, src, hdr_len);
+	hdr = &ctx->header;
+
+	for (n = 0; n < hdr->groups; n++) {
+		/* ignore applies to last group */
+		if (n + 1 == hdr->groups)
+			ignore = be16_to_cpu(hdr->ignore);
+
+		ret = decompress(ctx, &p, &hdr->group[n], &c, ignore);
+		if (ret)
+			goto unlock;
+	}
+
+success:
+	*dlen = p.ototal;
+
+	pr_debug("decompress total slen %x dlen %x\n", slen, *dlen);
+
+	ret = 0;
+
+unlock:
+	spin_unlock_bh(&ctx->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(nx842_crypto_decompress);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Driver");
+MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h
index ac0ea79..a4eee3b 100644
--- a/drivers/crypto/nx/nx-842.h
+++ b/drivers/crypto/nx/nx-842.h
@@ -3,8 +3,9 @@
 #define __NX_842_H__
 
 #include <linux/kernel.h>
+#include <linux/init.h>
 #include <linux/module.h>
-#include <linux/sw842.h>
+#include <linux/crypto.h>
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <linux/io.h>
@@ -104,6 +105,25 @@
 #define GET_FIELD(v, m)		(((v) & (m)) >> MASK_LSH(m))
 #define SET_FIELD(v, m, val)	(((v) & ~(m)) | (((val) << MASK_LSH(m)) & (m)))
 
+/**
+ * This provides the driver's constraints.  Different nx842 implementations
+ * may have varying requirements.  The constraints are:
+ *   @alignment:	All buffers should be aligned to this
+ *   @multiple:		All buffer lengths should be a multiple of this
+ *   @minimum:		Buffer lengths must not be less than this amount
+ *   @maximum:		Buffer lengths must not be more than this amount
+ *
+ * The constraints apply to all buffers and lengths, both input and output,
+ * for both compression and decompression, except for the minimum which
+ * only applies to compression input and decompression output; the
+ * compressed data can be less than the minimum constraint.  It can be
+ * assumed that compressed data will always adhere to the multiple
+ * constraint.
+ *
+ * The driver may succeed even if these constraints are violated;
+ * however the driver can return failure or suffer reduced performance
+ * if any constraint is not met.
+ */
 struct nx842_constraints {
 	int alignment;
 	int multiple;
@@ -126,19 +146,40 @@
 			  void *wrkmem);
 };
 
-struct nx842_driver *nx842_platform_driver(void);
-bool nx842_platform_driver_set(struct nx842_driver *driver);
-void nx842_platform_driver_unset(struct nx842_driver *driver);
-bool nx842_platform_driver_get(void);
-void nx842_platform_driver_put(void);
+struct nx842_crypto_header_group {
+	__be16 padding;			/* unused bytes at start of group */
+	__be32 compressed_length;	/* compressed bytes in group */
+	__be32 uncompressed_length;	/* bytes after decompression */
+} __packed;
 
-size_t nx842_workmem_size(void);
+struct nx842_crypto_header {
+	__be16 magic;		/* NX842_CRYPTO_MAGIC */
+	__be16 ignore;		/* decompressed end bytes to ignore */
+	u8 groups;		/* total groups in this header */
+	struct nx842_crypto_header_group group[];
+} __packed;
 
-int nx842_constraints(struct nx842_constraints *constraints);
+#define NX842_CRYPTO_GROUP_MAX	(0x20)
 
-int nx842_compress(const unsigned char *in, unsigned int in_len,
-		   unsigned char *out, unsigned int *out_len, void *wrkmem);
-int nx842_decompress(const unsigned char *in, unsigned int in_len,
-		     unsigned char *out, unsigned int *out_len, void *wrkmem);
+struct nx842_crypto_ctx {
+	spinlock_t lock;
+
+	u8 *wmem;
+	u8 *sbounce, *dbounce;
+
+	struct nx842_crypto_header header;
+	struct nx842_crypto_header_group group[NX842_CRYPTO_GROUP_MAX];
+
+	struct nx842_driver *driver;
+};
+
+int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver);
+void nx842_crypto_exit(struct crypto_tfm *tfm);
+int nx842_crypto_compress(struct crypto_tfm *tfm,
+			  const u8 *src, unsigned int slen,
+			  u8 *dst, unsigned int *dlen);
+int nx842_crypto_decompress(struct crypto_tfm *tfm,
+			    const u8 *src, unsigned int slen,
+			    u8 *dst, unsigned int *dlen);
 
 #endif /* __NX_842_H__ */
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index e4311ce..73ef499 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -94,8 +94,6 @@
 		return -EINVAL;
 	}
 
-	crypto_aead_crt(tfm)->authsize = authsize;
-
 	return 0;
 }
 
@@ -111,8 +109,6 @@
 		return -EINVAL;
 	}
 
-	crypto_aead_crt(tfm)->authsize = authsize;
-
 	return 0;
 }
 
@@ -174,6 +170,7 @@
 			struct nx_crypto_ctx *nx_ctx,
 			unsigned int          authsize,
 			unsigned int          nbytes,
+			unsigned int	      assoclen,
 			u8                   *out)
 {
 	struct nx_sg *nx_insg = nx_ctx->in_sg;
@@ -200,16 +197,16 @@
 	 * greater than 2^32.
 	 */
 
-	if (!req->assoclen) {
+	if (!assoclen) {
 		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
-	} else if (req->assoclen <= 14) {
+	} else if (assoclen <= 14) {
 		/* if associated data is 14 bytes or less, we do 1 GCM
 		 * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1,
 		 * which is fed in through the source buffers here */
 		b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
 		b1 = nx_ctx->priv.ccm.iauth_tag;
-		iauth_len = req->assoclen;
-	} else if (req->assoclen <= 65280) {
+		iauth_len = assoclen;
+	} else if (assoclen <= 65280) {
 		/* if associated data is less than (2^16 - 2^8), we construct
 		 * B1 differently and feed in the associated data to a CCA
 		 * operation */
@@ -223,7 +220,7 @@
 	}
 
 	/* generate B0 */
-	rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
+	rc = generate_b0(iv, assoclen, authsize, nbytes, b0);
 	if (rc)
 		return rc;
 
@@ -233,22 +230,22 @@
 	 */
 	if (b1) {
 		memset(b1, 0, 16);
-		if (req->assoclen <= 65280) {
-			*(u16 *)b1 = (u16)req->assoclen;
-			scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
+		if (assoclen <= 65280) {
+			*(u16 *)b1 = assoclen;
+			scatterwalk_map_and_copy(b1 + 2, req->src, 0,
 					 iauth_len, SCATTERWALK_FROM_SG);
 		} else {
 			*(u16 *)b1 = (u16)(0xfffe);
-			*(u32 *)&b1[2] = (u32)req->assoclen;
-			scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
+			*(u32 *)&b1[2] = assoclen;
+			scatterwalk_map_and_copy(b1 + 6, req->src, 0,
 					 iauth_len, SCATTERWALK_FROM_SG);
 		}
 	}
 
 	/* now copy any remaining AAD to scatterlist and call nx... */
-	if (!req->assoclen) {
+	if (!assoclen) {
 		return rc;
-	} else if (req->assoclen <= 14) {
+	} else if (assoclen <= 14) {
 		unsigned int len = 16;
 
 		nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
@@ -280,7 +277,7 @@
 			return rc;
 
 		atomic_inc(&(nx_ctx->stats->aes_ops));
-		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+		atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
 
 	} else {
 		unsigned int processed = 0, to_process;
@@ -294,15 +291,15 @@
 				nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 
 		do {
-			to_process = min_t(u32, req->assoclen - processed,
+			to_process = min_t(u32, assoclen - processed,
 					   nx_ctx->ap->databytelen);
 
 			nx_insg = nx_walk_and_build(nx_ctx->in_sg,
 						    nx_ctx->ap->sglen,
-						    req->assoc, processed,
+						    req->src, processed,
 						    &to_process);
 
-			if ((to_process + processed) < req->assoclen) {
+			if ((to_process + processed) < assoclen) {
 				NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
 					NX_FDM_INTERMEDIATE;
 			} else {
@@ -328,11 +325,10 @@
 			NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
 
 			atomic_inc(&(nx_ctx->stats->aes_ops));
-			atomic64_add(req->assoclen,
-					&(nx_ctx->stats->aes_bytes));
+			atomic64_add(assoclen, &nx_ctx->stats->aes_bytes);
 
 			processed += to_process;
-		} while (processed < req->assoclen);
+		} while (processed < assoclen);
 
 		result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
 	}
@@ -343,7 +339,8 @@
 }
 
 static int ccm_nx_decrypt(struct aead_request   *req,
-			  struct blkcipher_desc *desc)
+			  struct blkcipher_desc *desc,
+			  unsigned int assoclen)
 {
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
@@ -360,10 +357,10 @@
 
 	/* copy out the auth tag to compare with later */
 	scatterwalk_map_and_copy(priv->oauth_tag,
-				 req->src, nbytes, authsize,
+				 req->src, nbytes + req->assoclen, authsize,
 				 SCATTERWALK_FROM_SG);
 
-	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
 			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
 	if (rc)
 		goto out;
@@ -383,8 +380,8 @@
 		NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
 
 		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
-					&to_process, processed,
-					csbcpb->cpb.aes_ccm.iv_or_ctr);
+				       &to_process, processed + req->assoclen,
+				       csbcpb->cpb.aes_ccm.iv_or_ctr);
 		if (rc)
 			goto out;
 
@@ -420,7 +417,8 @@
 }
 
 static int ccm_nx_encrypt(struct aead_request   *req,
-			  struct blkcipher_desc *desc)
+			  struct blkcipher_desc *desc,
+			  unsigned int assoclen)
 {
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
@@ -432,7 +430,7 @@
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
-	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
+	rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, assoclen,
 			  csbcpb->cpb.aes_ccm.in_pat_or_b0);
 	if (rc)
 		goto out;
@@ -451,7 +449,7 @@
 		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
 
 		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
-					&to_process, processed,
+				       &to_process, processed + req->assoclen,
 				       csbcpb->cpb.aes_ccm.iv_or_ctr);
 		if (rc)
 			goto out;
@@ -483,7 +481,7 @@
 
 	/* copy out the auth tag */
 	scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
-				 req->dst, nbytes, authsize,
+				 req->dst, nbytes + req->assoclen, authsize,
 				 SCATTERWALK_TO_SG);
 
 out:
@@ -503,9 +501,8 @@
 	memcpy(iv + 4, req->iv, 8);
 
 	desc.info = iv;
-	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 
-	return ccm_nx_encrypt(req, &desc);
+	return ccm_nx_encrypt(req, &desc, req->assoclen - 8);
 }
 
 static int ccm_aes_nx_encrypt(struct aead_request *req)
@@ -514,13 +511,12 @@
 	int rc;
 
 	desc.info = req->iv;
-	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 
 	rc = crypto_ccm_check_iv(desc.info);
 	if (rc)
 		return rc;
 
-	return ccm_nx_encrypt(req, &desc);
+	return ccm_nx_encrypt(req, &desc, req->assoclen);
 }
 
 static int ccm4309_aes_nx_decrypt(struct aead_request *req)
@@ -535,9 +531,8 @@
 	memcpy(iv + 4, req->iv, 8);
 
 	desc.info = iv;
-	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 
-	return ccm_nx_decrypt(req, &desc);
+	return ccm_nx_decrypt(req, &desc, req->assoclen - 8);
 }
 
 static int ccm_aes_nx_decrypt(struct aead_request *req)
@@ -546,13 +541,12 @@
 	int rc;
 
 	desc.info = req->iv;
-	desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
 
 	rc = crypto_ccm_check_iv(desc.info);
 	if (rc)
 		return rc;
 
-	return ccm_nx_decrypt(req, &desc);
+	return ccm_nx_decrypt(req, &desc, req->assoclen);
 }
 
 /* tell the block cipher walk routines that this is a stream cipher by
@@ -560,47 +554,42 @@
  * during encrypt/decrypt doesn't solve this problem, because it calls
  * blkcipher_walk_done under the covers, which doesn't use walk->blocksize,
  * but instead uses this tfm->blocksize. */
-struct crypto_alg nx_ccm_aes_alg = {
-	.cra_name        = "ccm(aes)",
-	.cra_driver_name = "ccm-aes-nx",
-	.cra_priority    = 300,
-	.cra_flags       = CRYPTO_ALG_TYPE_AEAD |
-			   CRYPTO_ALG_NEED_FALLBACK,
-	.cra_blocksize   = 1,
-	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-	.cra_type        = &crypto_aead_type,
-	.cra_module      = THIS_MODULE,
-	.cra_init        = nx_crypto_ctx_aes_ccm_init,
-	.cra_exit        = nx_crypto_ctx_exit,
-	.cra_aead = {
-		.ivsize      = AES_BLOCK_SIZE,
-		.maxauthsize = AES_BLOCK_SIZE,
-		.setkey      = ccm_aes_nx_set_key,
-		.setauthsize = ccm_aes_nx_setauthsize,
-		.encrypt     = ccm_aes_nx_encrypt,
-		.decrypt     = ccm_aes_nx_decrypt,
-	}
+struct aead_alg nx_ccm_aes_alg = {
+	.base = {
+		.cra_name        = "ccm(aes)",
+		.cra_driver_name = "ccm-aes-nx",
+		.cra_priority    = 300,
+		.cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize   = 1,
+		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+		.cra_module      = THIS_MODULE,
+	},
+	.init        = nx_crypto_ctx_aes_ccm_init,
+	.exit        = nx_crypto_ctx_aead_exit,
+	.ivsize      = AES_BLOCK_SIZE,
+	.maxauthsize = AES_BLOCK_SIZE,
+	.setkey      = ccm_aes_nx_set_key,
+	.setauthsize = ccm_aes_nx_setauthsize,
+	.encrypt     = ccm_aes_nx_encrypt,
+	.decrypt     = ccm_aes_nx_decrypt,
 };
 
-struct crypto_alg nx_ccm4309_aes_alg = {
-	.cra_name        = "rfc4309(ccm(aes))",
-	.cra_driver_name = "rfc4309-ccm-aes-nx",
-	.cra_priority    = 300,
-	.cra_flags       = CRYPTO_ALG_TYPE_AEAD |
-			   CRYPTO_ALG_NEED_FALLBACK,
-	.cra_blocksize   = 1,
-	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-	.cra_type        = &crypto_nivaead_type,
-	.cra_module      = THIS_MODULE,
-	.cra_init        = nx_crypto_ctx_aes_ccm_init,
-	.cra_exit        = nx_crypto_ctx_exit,
-	.cra_aead = {
-		.ivsize      = 8,
-		.maxauthsize = AES_BLOCK_SIZE,
-		.setkey      = ccm4309_aes_nx_set_key,
-		.setauthsize = ccm4309_aes_nx_setauthsize,
-		.encrypt     = ccm4309_aes_nx_encrypt,
-		.decrypt     = ccm4309_aes_nx_decrypt,
-		.geniv       = "seqiv",
-	}
+struct aead_alg nx_ccm4309_aes_alg = {
+	.base = {
+		.cra_name        = "rfc4309(ccm(aes))",
+		.cra_driver_name = "rfc4309-ccm-aes-nx",
+		.cra_priority    = 300,
+		.cra_flags       = CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize   = 1,
+		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+		.cra_module      = THIS_MODULE,
+	},
+	.init        = nx_crypto_ctx_aes_ccm_init,
+	.exit        = nx_crypto_ctx_aead_exit,
+	.ivsize      = 8,
+	.maxauthsize = AES_BLOCK_SIZE,
+	.setkey      = ccm4309_aes_nx_set_key,
+	.setauthsize = ccm4309_aes_nx_setauthsize,
+	.encrypt     = ccm4309_aes_nx_encrypt,
+	.decrypt     = ccm4309_aes_nx_decrypt,
 };
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index dd7e9f3..898c0a2 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -144,27 +144,6 @@
 	return ctr_aes_nx_crypt(desc, dst, src, nbytes);
 }
 
-struct crypto_alg nx_ctr_aes_alg = {
-	.cra_name        = "ctr(aes)",
-	.cra_driver_name = "ctr-aes-nx",
-	.cra_priority    = 300,
-	.cra_flags       = CRYPTO_ALG_TYPE_BLKCIPHER,
-	.cra_blocksize   = 1,
-	.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
-	.cra_type        = &crypto_blkcipher_type,
-	.cra_module      = THIS_MODULE,
-	.cra_init        = nx_crypto_ctx_aes_ctr_init,
-	.cra_exit        = nx_crypto_ctx_exit,
-	.cra_blkcipher = {
-		.min_keysize = AES_MIN_KEY_SIZE,
-		.max_keysize = AES_MAX_KEY_SIZE,
-		.ivsize      = AES_BLOCK_SIZE,
-		.setkey      = ctr_aes_nx_set_key,
-		.encrypt     = ctr_aes_nx_crypt,
-		.decrypt     = ctr_aes_nx_crypt,
-	}
-};
-
 struct crypto_alg nx_ctr3686_aes_alg = {
 	.cra_name        = "rfc3686(ctr(aes))",
 	.cra_driver_name = "rfc3686-ctr-aes-nx",
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 92c993f..eee624f 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -21,11 +21,9 @@
 
 #include <crypto/internal/aead.h>
 #include <crypto/aes.h>
-#include <crypto/algapi.h>
 #include <crypto/scatterwalk.h>
 #include <linux/module.h>
 #include <linux/types.h>
-#include <linux/crypto.h>
 #include <asm/vio.h>
 
 #include "nx_csbcpb.h"
@@ -36,7 +34,7 @@
 			      const u8           *in_key,
 			      unsigned int        key_len)
 {
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+	struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
 
@@ -75,7 +73,7 @@
 				  const u8           *in_key,
 				  unsigned int        key_len)
 {
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base);
+	struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
 	char *nonce = nx_ctx->priv.gcm.nonce;
 	int rc;
 
@@ -110,13 +108,14 @@
 
 static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
 		  struct aead_request   *req,
-		  u8                    *out)
+		  u8                    *out,
+		  unsigned int assoclen)
 {
 	int rc;
 	struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
 	struct scatter_walk walk;
 	struct nx_sg *nx_sg = nx_ctx->in_sg;
-	unsigned int nbytes = req->assoclen;
+	unsigned int nbytes = assoclen;
 	unsigned int processed = 0, to_process;
 	unsigned int max_sg_len;
 
@@ -167,7 +166,7 @@
 		NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
 
 		atomic_inc(&(nx_ctx->stats->aes_ops));
-		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+		atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
 
 		processed += to_process;
 	} while (processed < nbytes);
@@ -177,13 +176,15 @@
 	return rc;
 }
 
-static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
+static int gmac(struct aead_request *req, struct blkcipher_desc *desc,
+		unsigned int assoclen)
 {
 	int rc;
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct nx_sg *nx_sg;
-	unsigned int nbytes = req->assoclen;
+	unsigned int nbytes = assoclen;
 	unsigned int processed = 0, to_process;
 	unsigned int max_sg_len;
 
@@ -238,7 +239,7 @@
 		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
 		atomic_inc(&(nx_ctx->stats->aes_ops));
-		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+		atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
 
 		processed += to_process;
 	} while (processed < nbytes);
@@ -253,7 +254,8 @@
 		     int enc)
 {
 	int rc;
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	char out[AES_BLOCK_SIZE];
 	struct nx_sg *in_sg, *out_sg;
@@ -314,9 +316,11 @@
 	return rc;
 }
 
-static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
+static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
+			    unsigned int assoclen)
 {
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct blkcipher_desc desc;
@@ -332,10 +336,10 @@
 	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
 
 	if (nbytes == 0) {
-		if (req->assoclen == 0)
+		if (assoclen == 0)
 			rc = gcm_empty(req, &desc, enc);
 		else
-			rc = gmac(req, &desc);
+			rc = gmac(req, &desc, assoclen);
 		if (rc)
 			goto out;
 		else
@@ -343,9 +347,10 @@
 	}
 
 	/* Process associated data */
-	csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
-	if (req->assoclen) {
-		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
+	csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
+	if (assoclen) {
+		rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
+			    assoclen);
 		if (rc)
 			goto out;
 	}
@@ -363,7 +368,6 @@
 		to_process = nbytes - processed;
 
 		csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
-		desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
 		rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
 				       req->src, &to_process,
 				       processed + req->assoclen,
@@ -430,7 +434,7 @@
 
 	memcpy(iv, req->iv, 12);
 
-	return gcm_aes_nx_crypt(req, 1);
+	return gcm_aes_nx_crypt(req, 1, req->assoclen);
 }
 
 static int gcm_aes_nx_decrypt(struct aead_request *req)
@@ -440,12 +444,13 @@
 
 	memcpy(iv, req->iv, 12);
 
-	return gcm_aes_nx_crypt(req, 0);
+	return gcm_aes_nx_crypt(req, 0, req->assoclen);
 }
 
 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
 {
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
 	char *iv = rctx->iv;
 	char *nonce = nx_ctx->priv.gcm.nonce;
@@ -453,12 +458,16 @@
 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
 
-	return gcm_aes_nx_crypt(req, 1);
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
 }
 
 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
 {
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct nx_crypto_ctx *nx_ctx =
+		crypto_aead_ctx(crypto_aead_reqtfm(req));
 	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
 	char *iv = rctx->iv;
 	char *nonce = nx_ctx->priv.gcm.nonce;
@@ -466,7 +475,10 @@
 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
 	memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
 
-	return gcm_aes_nx_crypt(req, 0);
+	if (req->assoclen < 8)
+		return -EINVAL;
+
+	return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
 }
 
 /* tell the block cipher walk routines that this is a stream cipher by
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 4369713..0794f1c 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -596,13 +596,9 @@
 	if (rc)
 		goto out_unreg_ecb;
 
-	rc = nx_register_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
-	if (rc)
-		goto out_unreg_cbc;
-
 	rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
 	if (rc)
-		goto out_unreg_ctr;
+		goto out_unreg_cbc;
 
 	rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
 	if (rc)
@@ -612,11 +608,11 @@
 	if (rc)
 		goto out_unreg_gcm;
 
-	rc = nx_register_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+	rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
 	if (rc)
 		goto out_unreg_gcm4106;
 
-	rc = nx_register_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+	rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
 	if (rc)
 		goto out_unreg_ccm;
 
@@ -644,17 +640,15 @@
 	nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
 			    NX_PROPS_SHA256);
 out_unreg_ccm4309:
-	nx_unregister_alg(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+	nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
 out_unreg_ccm:
-	nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+	nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
 out_unreg_gcm4106:
 	nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
 out_unreg_gcm:
 	nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
 out_unreg_ctr3686:
 	nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
-out_unreg_ctr:
-	nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
 out_unreg_cbc:
 	nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
 out_unreg_ecb:
@@ -711,11 +705,10 @@
 }
 
 /* entry points from the crypto tfm initializers */
-int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm)
 {
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-				sizeof(struct nx_ccm_rctx));
-	return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+	crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx));
+	return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
 				  NX_MODE_AES_CCM);
 }
 
@@ -813,16 +806,15 @@
 				    NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
 		nx_unregister_shash(&nx_shash_sha256_alg,
 				    NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
-		nx_unregister_alg(&nx_ccm4309_aes_alg,
-				  NX_FC_AES, NX_MODE_AES_CCM);
-		nx_unregister_alg(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
+		nx_unregister_aead(&nx_ccm4309_aes_alg,
+				   NX_FC_AES, NX_MODE_AES_CCM);
+		nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
 		nx_unregister_aead(&nx_gcm4106_aes_alg,
 				   NX_FC_AES, NX_MODE_AES_GCM);
 		nx_unregister_aead(&nx_gcm_aes_alg,
 				   NX_FC_AES, NX_MODE_AES_GCM);
 		nx_unregister_alg(&nx_ctr3686_aes_alg,
 				  NX_FC_AES, NX_MODE_AES_CTR);
-		nx_unregister_alg(&nx_ctr_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
 		nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
 		nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
 	}
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index cdff03a..9347878 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -149,8 +149,10 @@
 	} priv;
 };
 
+struct crypto_aead;
+
 /* prototypes */
-int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm);
+int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm);
 int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm);
 int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm);
 int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm);
@@ -187,10 +189,9 @@
 extern struct crypto_alg nx_ecb_aes_alg;
 extern struct aead_alg nx_gcm_aes_alg;
 extern struct aead_alg nx_gcm4106_aes_alg;
-extern struct crypto_alg nx_ctr_aes_alg;
 extern struct crypto_alg nx_ctr3686_aes_alg;
-extern struct crypto_alg nx_ccm_aes_alg;
-extern struct crypto_alg nx_ccm4309_aes_alg;
+extern struct aead_alg nx_ccm_aes_alg;
+extern struct aead_alg nx_ccm4309_aes_alg;
 extern struct shash_alg nx_shash_aes_xcbc_alg;
 extern struct shash_alg nx_shash_sha512_alg;
 extern struct shash_alg nx_shash_sha256_alg;
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 9a28b7e..eba2314 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -52,29 +52,30 @@
 #define AES_REG_IV(dd, x)		((dd)->pdata->iv_ofs + ((x) * 0x04))
 
 #define AES_REG_CTRL(dd)		((dd)->pdata->ctrl_ofs)
-#define AES_REG_CTRL_CTR_WIDTH_MASK	(3 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_32		(0 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_64		(1 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_96		(2 << 7)
-#define AES_REG_CTRL_CTR_WIDTH_128		(3 << 7)
-#define AES_REG_CTRL_CTR		(1 << 6)
-#define AES_REG_CTRL_CBC		(1 << 5)
-#define AES_REG_CTRL_KEY_SIZE		(3 << 3)
-#define AES_REG_CTRL_DIRECTION		(1 << 2)
-#define AES_REG_CTRL_INPUT_READY	(1 << 1)
-#define AES_REG_CTRL_OUTPUT_READY	(1 << 0)
+#define AES_REG_CTRL_CTR_WIDTH_MASK	GENMASK(8, 7)
+#define AES_REG_CTRL_CTR_WIDTH_32	0
+#define AES_REG_CTRL_CTR_WIDTH_64	BIT(7)
+#define AES_REG_CTRL_CTR_WIDTH_96	BIT(8)
+#define AES_REG_CTRL_CTR_WIDTH_128	GENMASK(8, 7)
+#define AES_REG_CTRL_CTR		BIT(6)
+#define AES_REG_CTRL_CBC		BIT(5)
+#define AES_REG_CTRL_KEY_SIZE		GENMASK(4, 3)
+#define AES_REG_CTRL_DIRECTION		BIT(2)
+#define AES_REG_CTRL_INPUT_READY	BIT(1)
+#define AES_REG_CTRL_OUTPUT_READY	BIT(0)
+#define AES_REG_CTRL_MASK		GENMASK(24, 2)
 
 #define AES_REG_DATA_N(dd, x)		((dd)->pdata->data_ofs + ((x) * 0x04))
 
 #define AES_REG_REV(dd)			((dd)->pdata->rev_ofs)
 
 #define AES_REG_MASK(dd)		((dd)->pdata->mask_ofs)
-#define AES_REG_MASK_SIDLE		(1 << 6)
-#define AES_REG_MASK_START		(1 << 5)
-#define AES_REG_MASK_DMA_OUT_EN		(1 << 3)
-#define AES_REG_MASK_DMA_IN_EN		(1 << 2)
-#define AES_REG_MASK_SOFTRESET		(1 << 1)
-#define AES_REG_AUTOIDLE		(1 << 0)
+#define AES_REG_MASK_SIDLE		BIT(6)
+#define AES_REG_MASK_START		BIT(5)
+#define AES_REG_MASK_DMA_OUT_EN		BIT(3)
+#define AES_REG_MASK_DMA_IN_EN		BIT(2)
+#define AES_REG_MASK_SOFTRESET		BIT(1)
+#define AES_REG_AUTOIDLE		BIT(0)
 
 #define AES_REG_LENGTH_N(x)		(0x54 + ((x) * 0x04))
 
@@ -254,7 +255,7 @@
 {
 	unsigned int key32;
 	int i, err;
-	u32 val, mask = 0;
+	u32 val;
 
 	err = omap_aes_hw_init(dd);
 	if (err)
@@ -274,17 +275,13 @@
 	val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
 	if (dd->flags & FLAGS_CBC)
 		val |= AES_REG_CTRL_CBC;
-	if (dd->flags & FLAGS_CTR) {
+	if (dd->flags & FLAGS_CTR)
 		val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
-		mask = AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_MASK;
-	}
+
 	if (dd->flags & FLAGS_ENCRYPT)
 		val |= AES_REG_CTRL_DIRECTION;
 
-	mask |= AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
-			AES_REG_CTRL_KEY_SIZE;
-
-	omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, mask);
+	omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
 
 	return 0;
 }
@@ -558,6 +555,9 @@
 {
 	int len = 0;
 
+	if (!IS_ALIGNED(total, AES_BLOCK_SIZE))
+		return -EINVAL;
+
 	while (sg) {
 		if (!IS_ALIGNED(sg->offset, 4))
 			return -1;
@@ -577,9 +577,10 @@
 static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
 {
 	void *buf_in, *buf_out;
-	int pages;
+	int pages, total;
 
-	pages = get_order(dd->total);
+	total = ALIGN(dd->total, AES_BLOCK_SIZE);
+	pages = get_order(total);
 
 	buf_in = (void *)__get_free_pages(GFP_ATOMIC, pages);
 	buf_out = (void *)__get_free_pages(GFP_ATOMIC, pages);
@@ -594,11 +595,11 @@
 	sg_copy_buf(buf_in, dd->in_sg, 0, dd->total, 0);
 
 	sg_init_table(&dd->in_sgl, 1);
-	sg_set_buf(&dd->in_sgl, buf_in, dd->total);
+	sg_set_buf(&dd->in_sgl, buf_in, total);
 	dd->in_sg = &dd->in_sgl;
 
 	sg_init_table(&dd->out_sgl, 1);
-	sg_set_buf(&dd->out_sgl, buf_out, dd->total);
+	sg_set_buf(&dd->out_sgl, buf_out, total);
 	dd->out_sg = &dd->out_sgl;
 
 	return 0;
@@ -611,7 +612,7 @@
 	struct omap_aes_ctx *ctx;
 	struct omap_aes_reqctx *rctx;
 	unsigned long flags;
-	int err, ret = 0;
+	int err, ret = 0, len;
 
 	spin_lock_irqsave(&dd->lock, flags);
 	if (req)
@@ -650,8 +651,9 @@
 		dd->sgs_copied = 0;
 	}
 
-	dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, dd->total);
-	dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, dd->total);
+	len = ALIGN(dd->total, AES_BLOCK_SIZE);
+	dd->in_sg_len = scatterwalk_bytes_sglen(dd->in_sg, len);
+	dd->out_sg_len = scatterwalk_bytes_sglen(dd->out_sg, len);
 	BUG_ON(dd->in_sg_len < 0 || dd->out_sg_len < 0);
 
 	rctx = ablkcipher_request_ctx(req);
@@ -678,7 +680,7 @@
 {
 	struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
 	void *buf_in, *buf_out;
-	int pages;
+	int pages, len;
 
 	pr_debug("enter done_task\n");
 
@@ -697,7 +699,8 @@
 
 		sg_copy_buf(buf_out, dd->orig_out, 0, dd->total_save, 1);
 
-		pages = get_order(dd->total_save);
+		len = ALIGN(dd->total_save, AES_BLOCK_SIZE);
+		pages = get_order(len);
 		free_pages((unsigned long)buf_in, pages);
 		free_pages((unsigned long)buf_out, pages);
 	}
@@ -726,11 +729,6 @@
 		  !!(mode & FLAGS_ENCRYPT),
 		  !!(mode & FLAGS_CBC));
 
-	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
-		pr_err("request size is not exact amount of AES blocks\n");
-		return -EINVAL;
-	}
-
 	dd = omap_aes_find_dev(ctx);
 	if (!dd)
 		return -ENODEV;
@@ -833,7 +831,7 @@
 {
 	.cra_name		= "ecb(aes)",
 	.cra_driver_name	= "ecb-aes-omap",
-	.cra_priority		= 100,
+	.cra_priority		= 300,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
 				  CRYPTO_ALG_ASYNC,
@@ -855,7 +853,7 @@
 {
 	.cra_name		= "cbc(aes)",
 	.cra_driver_name	= "cbc-aes-omap",
-	.cra_priority		= 100,
+	.cra_priority		= 300,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
 				  CRYPTO_ALG_ASYNC,
@@ -881,7 +879,7 @@
 {
 	.cra_name		= "ctr(aes)",
 	.cra_driver_name	= "ctr-aes-omap",
-	.cra_priority		= 100,
+	.cra_priority		= 300,
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 				  CRYPTO_ALG_KERN_DRIVER_ONLY |
 				  CRYPTO_ALG_ASYNC,
@@ -1046,9 +1044,7 @@
 			}
 		}
 
-		dd->total -= AES_BLOCK_SIZE;
-
-		BUG_ON(dd->total < 0);
+		dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
 
 		/* Clear IRQ status */
 		status &= ~AES_REG_IRQ_DATA_OUT;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index b2024c95..48adb2a 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -588,7 +588,7 @@
 		 * the dmaengine may try to DMA the incorrect amount of data.
 		 */
 		sg_init_table(&ctx->sgl, 1);
-		ctx->sgl.page_link = ctx->sg->page_link;
+		sg_assign_page(&ctx->sgl, sg_page(ctx->sg));
 		ctx->sgl.offset = ctx->sg->offset;
 		sg_dma_len(&ctx->sgl) = len32;
 		sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 4f56f36..da36de2 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -99,11 +99,16 @@
 	dma_addr_t			src_addr, dst_addr;
 	struct spacc_ddt		*src_ddt, *dst_ddt;
 	void				(*complete)(struct spacc_req *req);
+};
 
-	/* AEAD specific bits. */
-	u8				*giv;
-	size_t				giv_len;
-	dma_addr_t			giv_pa;
+struct spacc_aead {
+	unsigned long			ctrl_default;
+	unsigned long			type;
+	struct aead_alg			alg;
+	struct spacc_engine		*engine;
+	struct list_head		entry;
+	int				key_offs;
+	int				iv_offs;
 };
 
 struct spacc_engine {
@@ -121,6 +126,9 @@
 	struct spacc_alg		*algs;
 	unsigned			num_algs;
 	struct list_head		registered_algs;
+	struct spacc_aead		*aeads;
+	unsigned			num_aeads;
+	struct list_head		registered_aeads;
 	size_t				cipher_pg_sz;
 	size_t				hash_pg_sz;
 	const char			*name;
@@ -174,8 +182,6 @@
 	u8				cipher_key_len;
 	u8				hash_key_len;
 	struct crypto_aead		*sw_cipher;
-	size_t				auth_size;
-	u8				salt[AES_BLOCK_SIZE];
 };
 
 static int spacc_ablk_submit(struct spacc_req *req);
@@ -185,6 +191,11 @@
 	return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
 }
 
+static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
+{
+	return container_of(alg, struct spacc_aead, alg);
+}
+
 static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
 {
 	u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
@@ -310,120 +321,117 @@
 	return NULL;
 }
 
-static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv)
+static int spacc_aead_make_ddts(struct aead_request *areq)
 {
-	struct aead_request *areq = container_of(req->req, struct aead_request,
-						 base);
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	struct spacc_req *req = aead_request_ctx(areq);
 	struct spacc_engine *engine = req->engine;
 	struct spacc_ddt *src_ddt, *dst_ddt;
-	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(areq));
-	unsigned nents = sg_count(areq->src, areq->cryptlen);
 	unsigned total;
-	dma_addr_t iv_addr;
+	unsigned int src_nents, dst_nents;
 	struct scatterlist *cur;
-	int i, dst_ents, src_ents, assoc_ents;
-	u8 *iv = giv ? giv : areq->iv;
+	int i, dst_ents, src_ents;
+
+	total = areq->assoclen + areq->cryptlen;
+	if (req->is_encrypt)
+		total += crypto_aead_authsize(aead);
+
+	src_nents = sg_count(areq->src, total);
+	if (src_nents + 1 > MAX_DDT_LEN)
+		return -E2BIG;
+
+	dst_nents = 0;
+	if (areq->src != areq->dst) {
+		dst_nents = sg_count(areq->dst, total);
+		if (src_nents + 1 > MAX_DDT_LEN)
+			return -E2BIG;
+	}
 
 	src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
 	if (!src_ddt)
-		return -ENOMEM;
+		goto err;
 
 	dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
-	if (!dst_ddt) {
-		dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
-		return -ENOMEM;
-	}
+	if (!dst_ddt)
+		goto err_free_src;
 
 	req->src_ddt = src_ddt;
 	req->dst_ddt = dst_ddt;
 
-	assoc_ents = dma_map_sg(engine->dev, areq->assoc,
-		sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
-	if (areq->src != areq->dst) {
-		src_ents = dma_map_sg(engine->dev, areq->src, nents,
+	if (dst_nents) {
+		src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
 				      DMA_TO_DEVICE);
-		dst_ents = dma_map_sg(engine->dev, areq->dst, nents,
+		if (!src_ents)
+			goto err_free_dst;
+
+		dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
 				      DMA_FROM_DEVICE);
+
+		if (!dst_ents) {
+			dma_unmap_sg(engine->dev, areq->src, src_nents,
+				     DMA_TO_DEVICE);
+			goto err_free_dst;
+		}
 	} else {
-		src_ents = dma_map_sg(engine->dev, areq->src, nents,
+		src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
 				      DMA_BIDIRECTIONAL);
-		dst_ents = 0;
+		if (!src_ents)
+			goto err_free_dst;
+		dst_ents = src_ents;
 	}
 
 	/*
-	 * Map the IV/GIV. For the GIV it needs to be bidirectional as it is
-	 * formed by the crypto block and sent as the ESP IV for IPSEC.
-	 */
-	iv_addr = dma_map_single(engine->dev, iv, ivsize,
-				 giv ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
-	req->giv_pa = iv_addr;
-
-	/*
-	 * Map the associated data. For decryption we don't copy the
-	 * associated data.
-	 */
-	total = areq->assoclen;
-	for_each_sg(areq->assoc, cur, assoc_ents, i) {
-		unsigned len = sg_dma_len(cur);
-
-		if (len > total)
-			len = total;
-
-		total -= len;
-
-		ddt_set(src_ddt++, sg_dma_address(cur), len);
-		if (req->is_encrypt)
-			ddt_set(dst_ddt++, sg_dma_address(cur), len);
-	}
-	ddt_set(src_ddt++, iv_addr, ivsize);
-
-	if (giv || req->is_encrypt)
-		ddt_set(dst_ddt++, iv_addr, ivsize);
-
-	/*
 	 * Now map in the payload for the source and destination and terminate
 	 * with the NULL pointers.
 	 */
-	for_each_sg(areq->src, cur, src_ents, i) {
+	for_each_sg(areq->src, cur, src_ents, i)
 		ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
-		if (areq->src == areq->dst)
-			ddt_set(dst_ddt++, sg_dma_address(cur),
-				sg_dma_len(cur));
-	}
 
-	for_each_sg(areq->dst, cur, dst_ents, i)
-		ddt_set(dst_ddt++, sg_dma_address(cur),
-			sg_dma_len(cur));
+	/* For decryption we need to skip the associated data. */
+	total = req->is_encrypt ? 0 : areq->assoclen;
+	for_each_sg(areq->dst, cur, dst_ents, i) {
+		unsigned len = sg_dma_len(cur);
+
+		if (len <= total) {
+			total -= len;
+			continue;
+		}
+
+		ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
+	}
 
 	ddt_set(src_ddt, 0, 0);
 	ddt_set(dst_ddt, 0, 0);
 
 	return 0;
+
+err_free_dst:
+	dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
+err_free_src:
+	dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
+err:
+	return -ENOMEM;
 }
 
 static void spacc_aead_free_ddts(struct spacc_req *req)
 {
 	struct aead_request *areq = container_of(req->req, struct aead_request,
 						 base);
-	struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg);
-	struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm);
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	unsigned total = areq->assoclen + areq->cryptlen +
+			 (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
+	struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
 	struct spacc_engine *engine = aead_ctx->generic.engine;
-	unsigned ivsize = alg->alg.cra_aead.ivsize;
-	unsigned nents = sg_count(areq->src, areq->cryptlen);
+	unsigned nents = sg_count(areq->src, total);
 
 	if (areq->src != areq->dst) {
 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
 		dma_unmap_sg(engine->dev, areq->dst,
-			     sg_count(areq->dst, areq->cryptlen),
+			     sg_count(areq->dst, total),
 			     DMA_FROM_DEVICE);
 	} else
 		dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
 
-	dma_unmap_sg(engine->dev, areq->assoc,
-		     sg_count(areq->assoc, areq->assoclen), DMA_TO_DEVICE);
-
-	dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL);
-
 	dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
 	dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
 }
@@ -438,65 +446,22 @@
 	dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
 }
 
-/*
- * Set key for a DES operation in an AEAD cipher. This also performs weak key
- * checking if required.
- */
-static int spacc_aead_des_setkey(struct crypto_aead *aead, const u8 *key,
-				 unsigned int len)
-{
-	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
-	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-	u32 tmp[DES_EXPKEY_WORDS];
-
-	if (unlikely(!des_ekey(tmp, key)) &&
-	    (crypto_aead_get_flags(aead)) & CRYPTO_TFM_REQ_WEAK_KEY) {
-		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
-		return -EINVAL;
-	}
-
-	memcpy(ctx->cipher_key, key, len);
-	ctx->cipher_key_len = len;
-
-	return 0;
-}
-
-/* Set the key for the AES block cipher component of the AEAD transform. */
-static int spacc_aead_aes_setkey(struct crypto_aead *aead, const u8 *key,
-				 unsigned int len)
-{
-	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
-	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	/*
-	 * IPSec engine only supports 128 and 256 bit AES keys. If we get a
-	 * request for any other size (192 bits) then we need to do a software
-	 * fallback.
-	 */
-	if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
-		/*
-		 * Set the fallback transform to use the same request flags as
-		 * the hardware transform.
-		 */
-		ctx->sw_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
-		ctx->sw_cipher->base.crt_flags |=
-			tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
-		return crypto_aead_setkey(ctx->sw_cipher, key, len);
-	}
-
-	memcpy(ctx->cipher_key, key, len);
-	ctx->cipher_key_len = len;
-
-	return 0;
-}
-
 static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 			     unsigned int keylen)
 {
 	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
 	struct crypto_authenc_keys keys;
-	int err = -EINVAL;
+	int err;
+
+	crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
+	crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
+					      CRYPTO_TFM_REQ_MASK);
+	err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
+	crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
+	crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
+				   CRYPTO_TFM_RES_MASK);
+	if (err)
+		return err;
 
 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 		goto badkey;
@@ -507,14 +472,8 @@
 	if (keys.authkeylen > sizeof(ctx->hash_ctx))
 		goto badkey;
 
-	if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
-	    SPA_CTRL_CIPH_ALG_AES)
-		err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
-	else
-		err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
-
-	if (err)
-		goto badkey;
+	memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
+	ctx->cipher_key_len = keys.enckeylen;
 
 	memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
 	ctx->hash_key_len = keys.authkeylen;
@@ -531,9 +490,7 @@
 {
 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
 
-	ctx->auth_size = authsize;
-
-	return 0;
+	return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
 }
 
 /*
@@ -541,15 +498,13 @@
  * be completed in hardware because the hardware may not support certain key
  * sizes. In these cases we need to complete the request in software.
  */
-static int spacc_aead_need_fallback(struct spacc_req *req)
+static int spacc_aead_need_fallback(struct aead_request *aead_req)
 {
-	struct aead_request *aead_req;
-	struct crypto_tfm *tfm = req->req->tfm;
-	struct crypto_alg *alg = req->req->tfm->__crt_alg;
-	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
-	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
+	struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
 
-	aead_req = container_of(req->req, struct aead_request, base);
 	/*
 	 * If we have a non-supported key-length, then we need to do a
 	 * software fallback.
@@ -568,22 +523,17 @@
 {
 	struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
 	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
-	int err;
+	struct aead_request *subreq = aead_request_ctx(req);
 
-	if (ctx->sw_cipher) {
-		/*
-		 * Change the request to use the software fallback transform,
-		 * and once the ciphering has completed, put the old transform
-		 * back into the request.
-		 */
-		aead_request_set_tfm(req, ctx->sw_cipher);
-		err = is_encrypt ? crypto_aead_encrypt(req) :
-		    crypto_aead_decrypt(req);
-		aead_request_set_tfm(req, __crypto_aead_cast(old_tfm));
-	} else
-		err = -EINVAL;
+	aead_request_set_tfm(subreq, ctx->sw_cipher);
+	aead_request_set_callback(subreq, req->base.flags,
+				  req->base.complete, req->base.data);
+	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+			       req->iv);
+	aead_request_set_ad(subreq, req->assoclen);
 
-	return err;
+	return is_encrypt ? crypto_aead_encrypt(subreq) :
+			    crypto_aead_decrypt(subreq);
 }
 
 static void spacc_aead_complete(struct spacc_req *req)
@@ -594,18 +544,19 @@
 
 static int spacc_aead_submit(struct spacc_req *req)
 {
-	struct crypto_tfm *tfm = req->req->tfm;
-	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct crypto_alg *alg = req->req->tfm->__crt_alg;
-	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
-	struct spacc_engine *engine = ctx->generic.engine;
-	u32 ctrl, proc_len, assoc_len;
 	struct aead_request *aead_req =
 		container_of(req->req, struct aead_request, base);
+	struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+	unsigned int authsize = crypto_aead_authsize(aead);
+	struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
+	struct spacc_engine *engine = ctx->generic.engine;
+	u32 ctrl, proc_len, assoc_len;
 
 	req->result = -EINPROGRESS;
 	req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
-		ctx->cipher_key_len, aead_req->iv, alg->cra_aead.ivsize,
+		ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
 		ctx->hash_ctx, ctx->hash_key_len);
 
 	/* Set the source and destination DDT pointers. */
@@ -617,25 +568,15 @@
 	proc_len = aead_req->cryptlen + assoc_len;
 
 	/*
-	 * If we aren't generating an IV, then we need to include the IV in the
-	 * associated data so that it is included in the hash.
-	 */
-	if (!req->giv) {
-		assoc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
-		proc_len += crypto_aead_ivsize(crypto_aead_reqtfm(aead_req));
-	} else
-		proc_len += req->giv_len;
-
-	/*
 	 * If we are decrypting, we need to take the length of the ICV out of
 	 * the processing length.
 	 */
 	if (!req->is_encrypt)
-		proc_len -= ctx->auth_size;
+		proc_len -= authsize;
 
 	writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
 	writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
-	writel(ctx->auth_size, engine->regs + SPA_ICV_LEN_REG_OFFSET);
+	writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
 	writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
 	writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
 
@@ -674,32 +615,29 @@
 /*
  * Setup an AEAD request for processing. This will configure the engine, load
  * the context and then start the packet processing.
- *
- * @giv Pointer to destination address for a generated IV. If the
- *	request does not need to generate an IV then this should be set to NULL.
  */
-static int spacc_aead_setup(struct aead_request *req, u8 *giv,
+static int spacc_aead_setup(struct aead_request *req,
 			    unsigned alg_type, bool is_encrypt)
 {
-	struct crypto_alg *alg = req->base.tfm->__crt_alg;
-	struct spacc_engine *engine = to_spacc_alg(alg)->engine;
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct spacc_engine *engine = to_spacc_aead(alg)->engine;
 	struct spacc_req *dev_req = aead_request_ctx(req);
-	int err = -EINPROGRESS;
+	int err;
 	unsigned long flags;
-	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
 
-	dev_req->giv		= giv;
-	dev_req->giv_len	= ivsize;
 	dev_req->req		= &req->base;
 	dev_req->is_encrypt	= is_encrypt;
 	dev_req->result		= -EBUSY;
 	dev_req->engine		= engine;
 	dev_req->complete	= spacc_aead_complete;
 
-	if (unlikely(spacc_aead_need_fallback(dev_req)))
+	if (unlikely(spacc_aead_need_fallback(req) ||
+		     ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
 		return spacc_aead_do_fallback(req, alg_type, is_encrypt);
 
-	spacc_aead_make_ddts(dev_req, dev_req->giv);
+	if (err)
+		goto out;
 
 	err = -EINPROGRESS;
 	spin_lock_irqsave(&engine->hw_lock, flags);
@@ -728,70 +666,44 @@
 static int spacc_aead_encrypt(struct aead_request *req)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
-	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+	struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
 
-	return spacc_aead_setup(req, NULL, alg->type, 1);
-}
-
-static int spacc_aead_givencrypt(struct aead_givcrypt_request *req)
-{
-	struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
-	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
-	size_t ivsize = crypto_aead_ivsize(tfm);
-	struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
-	unsigned len;
-	__be64 seq;
-
-	memcpy(req->areq.iv, ctx->salt, ivsize);
-	len = ivsize;
-	if (ivsize > sizeof(u64)) {
-		memset(req->giv, 0, ivsize - sizeof(u64));
-		len = sizeof(u64);
-	}
-	seq = cpu_to_be64(req->seq);
-	memcpy(req->giv + ivsize - len, &seq, len);
-
-	return spacc_aead_setup(&req->areq, req->giv, alg->type, 1);
+	return spacc_aead_setup(req, alg->type, 1);
 }
 
 static int spacc_aead_decrypt(struct aead_request *req)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
-	struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
+	struct spacc_aead  *alg = to_spacc_aead(crypto_aead_alg(aead));
 
-	return spacc_aead_setup(req, NULL, alg->type, 0);
+	return spacc_aead_setup(req, alg->type, 0);
 }
 
 /*
  * Initialise a new AEAD context. This is responsible for allocating the
  * fallback cipher and initialising the context.
  */
-static int spacc_aead_cra_init(struct crypto_tfm *tfm)
+static int spacc_aead_cra_init(struct crypto_aead *tfm)
 {
-	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-	struct crypto_alg *alg = tfm->__crt_alg;
-	struct spacc_alg *spacc_alg = to_spacc_alg(alg);
+	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_alg *alg = crypto_aead_alg(tfm);
+	struct spacc_aead *spacc_alg = to_spacc_aead(alg);
 	struct spacc_engine *engine = spacc_alg->engine;
 
 	ctx->generic.flags = spacc_alg->type;
 	ctx->generic.engine = engine;
-	ctx->sw_cipher = crypto_alloc_aead(alg->cra_name, 0,
-					   CRYPTO_ALG_ASYNC |
+	ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
 					   CRYPTO_ALG_NEED_FALLBACK);
-	if (IS_ERR(ctx->sw_cipher)) {
-		dev_warn(engine->dev, "failed to allocate fallback for %s\n",
-			 alg->cra_name);
-		ctx->sw_cipher = NULL;
-	}
+	if (IS_ERR(ctx->sw_cipher))
+		return PTR_ERR(ctx->sw_cipher);
 	ctx->generic.key_offs = spacc_alg->key_offs;
 	ctx->generic.iv_offs = spacc_alg->iv_offs;
 
-	get_random_bytes(ctx->salt, sizeof(ctx->salt));
-
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-				sizeof(struct spacc_req));
+	crypto_aead_set_reqsize(
+		tfm,
+		max(sizeof(struct spacc_req),
+		    sizeof(struct aead_request) +
+		    crypto_aead_reqsize(ctx->sw_cipher)));
 
 	return 0;
 }
@@ -800,13 +712,11 @@
  * Destructor for an AEAD context. This is called when the transform is freed
  * and must free the fallback cipher.
  */
-static void spacc_aead_cra_exit(struct crypto_tfm *tfm)
+static void spacc_aead_cra_exit(struct crypto_aead *tfm)
 {
-	struct spacc_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 
-	if (ctx->sw_cipher)
-		crypto_free_aead(ctx->sw_cipher);
-	ctx->sw_cipher = NULL;
+	crypto_free_aead(ctx->sw_cipher);
 }
 
 /*
@@ -1458,180 +1368,188 @@
 			.cra_exit = spacc_ablk_cra_exit,
 		},
 	},
+};
+
+static struct spacc_aead ipsec_engine_aeads[] = {
 	{
-		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
-				SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_SHA |
+				SPA_CTRL_HASH_MODE_HMAC,
 		.key_offs = 0,
 		.iv_offs = AES_MAX_KEY_SIZE,
 		.alg = {
-			.cra_name = "authenc(hmac(sha1),cbc(aes))",
-			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-picoxcell",
-			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					CRYPTO_ALG_ASYNC |
-					CRYPTO_ALG_KERN_DRIVER_ONLY,
-			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct spacc_aead_ctx),
-			.cra_type = &crypto_aead_type,
-			.cra_module = THIS_MODULE,
-			.cra_aead = {
-				.setkey = spacc_aead_setkey,
-				.setauthsize = spacc_aead_setauthsize,
-				.encrypt = spacc_aead_encrypt,
-				.decrypt = spacc_aead_decrypt,
-				.givencrypt = spacc_aead_givencrypt,
-				.ivsize = AES_BLOCK_SIZE,
-				.maxauthsize = SHA1_DIGEST_SIZE,
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-aes-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
 			},
-			.cra_init = spacc_aead_cra_init,
-			.cra_exit = spacc_aead_cra_exit,
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
 		},
 	},
 	{
-		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+				SPA_CTRL_CIPH_MODE_CBC |
 				SPA_CTRL_HASH_ALG_SHA256 |
 				SPA_CTRL_HASH_MODE_HMAC,
 		.key_offs = 0,
 		.iv_offs = AES_MAX_KEY_SIZE,
 		.alg = {
-			.cra_name = "authenc(hmac(sha256),cbc(aes))",
-			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-picoxcell",
-			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					CRYPTO_ALG_ASYNC |
-					CRYPTO_ALG_KERN_DRIVER_ONLY,
-			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct spacc_aead_ctx),
-			.cra_type = &crypto_aead_type,
-			.cra_module = THIS_MODULE,
-			.cra_aead = {
-				.setkey = spacc_aead_setkey,
-				.setauthsize = spacc_aead_setauthsize,
-				.encrypt = spacc_aead_encrypt,
-				.decrypt = spacc_aead_decrypt,
-				.givencrypt = spacc_aead_givencrypt,
-				.ivsize = AES_BLOCK_SIZE,
-				.maxauthsize = SHA256_DIGEST_SIZE,
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-aes-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
 			},
-			.cra_init = spacc_aead_cra_init,
-			.cra_exit = spacc_aead_cra_exit,
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
 		},
 	},
 	{
 		.key_offs = 0,
 		.iv_offs = AES_MAX_KEY_SIZE,
-		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
-				SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_MD5 |
+				SPA_CTRL_HASH_MODE_HMAC,
 		.alg = {
-			.cra_name = "authenc(hmac(md5),cbc(aes))",
-			.cra_driver_name = "authenc-hmac-md5-cbc-aes-picoxcell",
-			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					CRYPTO_ALG_ASYNC |
-					CRYPTO_ALG_KERN_DRIVER_ONLY,
-			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct spacc_aead_ctx),
-			.cra_type = &crypto_aead_type,
-			.cra_module = THIS_MODULE,
-			.cra_aead = {
-				.setkey = spacc_aead_setkey,
-				.setauthsize = spacc_aead_setauthsize,
-				.encrypt = spacc_aead_encrypt,
-				.decrypt = spacc_aead_decrypt,
-				.givencrypt = spacc_aead_givencrypt,
-				.ivsize = AES_BLOCK_SIZE,
-				.maxauthsize = MD5_DIGEST_SIZE,
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-aes-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
 			},
-			.cra_init = spacc_aead_cra_init,
-			.cra_exit = spacc_aead_cra_exit,
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
 		},
 	},
 	{
 		.key_offs = DES_BLOCK_SIZE,
 		.iv_offs = 0,
-		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
-				SPA_CTRL_HASH_ALG_SHA | SPA_CTRL_HASH_MODE_HMAC,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_DES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_SHA |
+				SPA_CTRL_HASH_MODE_HMAC,
 		.alg = {
-			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
-			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-picoxcell",
-			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					CRYPTO_ALG_ASYNC |
-					CRYPTO_ALG_KERN_DRIVER_ONLY,
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct spacc_aead_ctx),
-			.cra_type = &crypto_aead_type,
-			.cra_module = THIS_MODULE,
-			.cra_aead = {
-				.setkey = spacc_aead_setkey,
-				.setauthsize = spacc_aead_setauthsize,
-				.encrypt = spacc_aead_encrypt,
-				.decrypt = spacc_aead_decrypt,
-				.givencrypt = spacc_aead_givencrypt,
-				.ivsize = DES3_EDE_BLOCK_SIZE,
-				.maxauthsize = SHA1_DIGEST_SIZE,
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-3des-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
 			},
-			.cra_init = spacc_aead_cra_init,
-			.cra_exit = spacc_aead_cra_exit,
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
 		},
 	},
 	{
 		.key_offs = DES_BLOCK_SIZE,
 		.iv_offs = 0,
-		.ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC |
+		.ctrl_default = SPA_CTRL_CIPH_ALG_AES |
+				SPA_CTRL_CIPH_MODE_CBC |
 				SPA_CTRL_HASH_ALG_SHA256 |
 				SPA_CTRL_HASH_MODE_HMAC,
 		.alg = {
-			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
-			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-picoxcell",
-			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					CRYPTO_ALG_ASYNC |
-					CRYPTO_ALG_KERN_DRIVER_ONLY,
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct spacc_aead_ctx),
-			.cra_type = &crypto_aead_type,
-			.cra_module = THIS_MODULE,
-			.cra_aead = {
-				.setkey = spacc_aead_setkey,
-				.setauthsize = spacc_aead_setauthsize,
-				.encrypt = spacc_aead_encrypt,
-				.decrypt = spacc_aead_decrypt,
-				.givencrypt = spacc_aead_givencrypt,
-				.ivsize = DES3_EDE_BLOCK_SIZE,
-				.maxauthsize = SHA256_DIGEST_SIZE,
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-3des-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
 			},
-			.cra_init = spacc_aead_cra_init,
-			.cra_exit = spacc_aead_cra_exit,
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
 		},
 	},
 	{
 		.key_offs = DES_BLOCK_SIZE,
 		.iv_offs = 0,
-		.ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC |
-				SPA_CTRL_HASH_ALG_MD5 | SPA_CTRL_HASH_MODE_HMAC,
+		.ctrl_default = SPA_CTRL_CIPH_ALG_DES |
+				SPA_CTRL_CIPH_MODE_CBC |
+				SPA_CTRL_HASH_ALG_MD5 |
+				SPA_CTRL_HASH_MODE_HMAC,
 		.alg = {
-			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
-			.cra_driver_name = "authenc-hmac-md5-cbc-3des-picoxcell",
-			.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD |
-					CRYPTO_ALG_ASYNC |
-					CRYPTO_ALG_KERN_DRIVER_ONLY,
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_ctxsize = sizeof(struct spacc_aead_ctx),
-			.cra_type = &crypto_aead_type,
-			.cra_module = THIS_MODULE,
-			.cra_aead = {
-				.setkey = spacc_aead_setkey,
-				.setauthsize = spacc_aead_setauthsize,
-				.encrypt = spacc_aead_encrypt,
-				.decrypt = spacc_aead_decrypt,
-				.givencrypt = spacc_aead_givencrypt,
-				.ivsize = DES3_EDE_BLOCK_SIZE,
-				.maxauthsize = MD5_DIGEST_SIZE,
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-3des-picoxcell",
+				.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
+				.cra_flags = CRYPTO_ALG_ASYNC |
+					     CRYPTO_ALG_NEED_FALLBACK |
+					     CRYPTO_ALG_KERN_DRIVER_ONLY,
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct spacc_aead_ctx),
+				.cra_module = THIS_MODULE,
 			},
-			.cra_init = spacc_aead_cra_init,
-			.cra_exit = spacc_aead_cra_exit,
+			.setkey = spacc_aead_setkey,
+			.setauthsize = spacc_aead_setauthsize,
+			.encrypt = spacc_aead_encrypt,
+			.decrypt = spacc_aead_decrypt,
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			.init = spacc_aead_cra_init,
+			.exit = spacc_aead_cra_exit,
 		},
 	},
 };
@@ -1707,6 +1625,8 @@
 		engine->fifo_sz		= SPACC_CRYPTO_IPSEC_FIFO_SZ;
 		engine->algs		= ipsec_engine_algs;
 		engine->num_algs	= ARRAY_SIZE(ipsec_engine_algs);
+		engine->aeads		= ipsec_engine_aeads;
+		engine->num_aeads	= ARRAY_SIZE(ipsec_engine_aeads);
 	} else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
 		engine->max_ctxs	= SPACC_CRYPTO_L2_MAX_CTXS;
 		engine->cipher_pg_sz	= SPACC_CRYPTO_L2_CIPHER_PG_SZ;
@@ -1815,17 +1735,40 @@
 				engine->algs[i].alg.cra_name);
 	}
 
+	INIT_LIST_HEAD(&engine->registered_aeads);
+	for (i = 0; i < engine->num_aeads; ++i) {
+		engine->aeads[i].engine = engine;
+		err = crypto_register_aead(&engine->aeads[i].alg);
+		if (!err) {
+			list_add_tail(&engine->aeads[i].entry,
+				      &engine->registered_aeads);
+			ret = 0;
+		}
+		if (err)
+			dev_err(engine->dev, "failed to register alg \"%s\"\n",
+				engine->aeads[i].alg.base.cra_name);
+		else
+			dev_dbg(engine->dev, "registered alg \"%s\"\n",
+				engine->aeads[i].alg.base.cra_name);
+	}
+
 	return ret;
 }
 
 static int spacc_remove(struct platform_device *pdev)
 {
+	struct spacc_aead *aead, *an;
 	struct spacc_alg *alg, *next;
 	struct spacc_engine *engine = platform_get_drvdata(pdev);
 
 	del_timer_sync(&engine->packet_timeout);
 	device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
 
+	list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
+		list_del(&aead->entry);
+		crypto_unregister_aead(&aead->alg);
+	}
+
 	list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
 		list_del(&alg->entry);
 		crypto_unregister_alg(&alg->alg);
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index 6fdb9e8..eefccf7 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -3,11 +3,13 @@
 	select CRYPTO_AEAD
 	select CRYPTO_AUTHENC
 	select CRYPTO_BLKCIPHER
+	select CRYPTO_AKCIPHER
 	select CRYPTO_HMAC
 	select CRYPTO_SHA1
 	select CRYPTO_SHA256
 	select CRYPTO_SHA512
 	select FW_LOADER
+	select ASN1
 
 config CRYPTO_DEV_QAT_DH895xCC
 	tristate "Support for Intel(R) DH895xCC"
@@ -19,3 +21,16 @@
 
 	  To compile this as a module, choose M here: the module
 	  will be called qat_dh895xcc.
+
+config CRYPTO_DEV_QAT_DH895xCCVF
+	tristate "Support for Intel(R) DH895xCC Virtual Function"
+	depends on X86 && PCI
+	select PCI_IOV
+	select CRYPTO_DEV_QAT
+
+	help
+	  Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+	  Virtual Function for accelerating crypto and compression workloads.
+
+	  To compile this as a module, choose M here: the module
+	  will be called qat_dh895xccvf.
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
index d11481b..a3ce0b7 100644
--- a/drivers/crypto/qat/Makefile
+++ b/drivers/crypto/qat/Makefile
@@ -1,2 +1,3 @@
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
diff --git a/drivers/crypto/qat/qat_common/.gitignore b/drivers/crypto/qat/qat_common/.gitignore
new file mode 100644
index 0000000..ee328374
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/.gitignore
@@ -0,0 +1 @@
+*-asn1.[ch]
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
index e0424dc..df20a9d 100644
--- a/drivers/crypto/qat/qat_common/Makefile
+++ b/drivers/crypto/qat/qat_common/Makefile
@@ -1,3 +1,6 @@
+$(obj)/qat_rsakey-asn1.o: $(obj)/qat_rsakey-asn1.c $(obj)/qat_rsakey-asn1.h
+clean-files += qat_rsakey-asn1.c qat_rsakey-asn1.h
+
 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
 intel_qat-objs := adf_cfg.o \
 	adf_ctl_drv.o \
@@ -6,9 +9,14 @@
 	adf_accel_engine.o \
 	adf_aer.o \
 	adf_transport.o \
+	adf_admin.o \
+	adf_hw_arbiter.o \
 	qat_crypto.o \
 	qat_algs.o \
+	qat_rsakey-asn1.o \
+	qat_asym_algs.o \
 	qat_uclo.o \
 	qat_hal.o
 
 intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
+intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_pf2vf_msg.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 5fe9029..ca853d5 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -46,13 +46,17 @@
 */
 #ifndef ADF_ACCEL_DEVICES_H_
 #define ADF_ACCEL_DEVICES_H_
+#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/list.h>
 #include <linux/io.h>
+#include <linux/ratelimit.h>
 #include "adf_cfg_common.h"
 
 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435
+#define ADF_DH895XCCIOV_PCI_DEVICE_ID 0x443
 #define ADF_PCI_MAX_BARS 3
 #define ADF_DEVICE_NAME_LENGTH 32
 #define ADF_ETR_MAX_RINGS_PER_BANK 16
@@ -79,6 +83,7 @@
 struct adf_accel_msix {
 	struct msix_entry *entries;
 	char **names;
+	u32 num_entries;
 } __packed;
 
 struct adf_accel_pci {
@@ -99,6 +104,7 @@
 	DEV_SKU_2,
 	DEV_SKU_3,
 	DEV_SKU_4,
+	DEV_SKU_VF,
 	DEV_SKU_UNKNOWN,
 };
 
@@ -113,6 +119,8 @@
 		return "SKU3";
 	case DEV_SKU_4:
 		return "SKU4";
+	case DEV_SKU_VF:
+		return "SKUVF";
 	case DEV_SKU_UNKNOWN:
 	default:
 		break;
@@ -135,23 +143,29 @@
 	struct adf_hw_device_class *dev_class;
 	uint32_t (*get_accel_mask)(uint32_t fuse);
 	uint32_t (*get_ae_mask)(uint32_t fuse);
+	uint32_t (*get_sram_bar_id)(struct adf_hw_device_data *self);
 	uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
 	uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
 	uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
 	uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
+	uint32_t (*get_pf2vf_offset)(uint32_t i);
+	uint32_t (*get_vintmsk_offset)(uint32_t i);
 	enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
-	void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring);
-	void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring);
 	int (*alloc_irq)(struct adf_accel_dev *accel_dev);
 	void (*free_irq)(struct adf_accel_dev *accel_dev);
 	void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
 	int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
 	void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
+	int (*send_admin_init)(struct adf_accel_dev *accel_dev);
 	int (*init_arb)(struct adf_accel_dev *accel_dev);
 	void (*exit_arb)(struct adf_accel_dev *accel_dev);
+	void (*get_arb_mapping)(struct adf_accel_dev *accel_dev,
+				const uint32_t **cfg);
+	void (*disable_iov)(struct adf_accel_dev *accel_dev);
 	void (*enable_ints)(struct adf_accel_dev *accel_dev);
+	int (*enable_vf2pf_comms)(struct adf_accel_dev *accel_dev);
 	const char *fw_name;
-	uint32_t pci_dev_id;
+	const char *fw_mmp_name;
 	uint32_t fuses;
 	uint32_t accel_capabilities_mask;
 	uint16_t accel_mask;
@@ -163,6 +177,7 @@
 	uint8_t num_accel;
 	uint8_t num_logical_accel;
 	uint8_t num_engines;
+	uint8_t min_iov_compat_ver;
 } __packed;
 
 /* CSR write macro */
@@ -184,6 +199,16 @@
 struct adf_fw_loader_data {
 	struct icp_qat_fw_loader_handle *fw_loader;
 	const struct firmware *uof_fw;
+	const struct firmware *mmp_fw;
+};
+
+struct adf_accel_vf_info {
+	struct adf_accel_dev *accel_dev;
+	struct tasklet_struct vf2pf_bh_tasklet;
+	struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
+	struct ratelimit_state vf2pf_ratelimit;
+	u32 vf_nr;
+	bool init;
 };
 
 struct adf_accel_dev {
@@ -199,6 +224,21 @@
 	struct list_head list;
 	struct module *owner;
 	struct adf_accel_pci accel_pci_dev;
+	union {
+		struct {
+			/* vf_info is non-zero when SR-IOV is init'ed */
+			struct adf_accel_vf_info *vf_info;
+		} pf;
+		struct {
+			char *irq_name;
+			struct tasklet_struct pf2vf_bh_tasklet;
+			struct mutex vf2pf_lock; /* protect CSR access */
+			struct completion iov_msg_completion;
+			uint8_t compatible;
+			uint8_t pf_version;
+		} vf;
+	};
+	bool is_vf;
 	uint8_t accel_id;
 } __packed;
 #endif
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
index fdda8e7..20b08bd 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c
@@ -55,24 +55,36 @@
 {
 	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-	void *uof_addr;
-	uint32_t uof_size;
+	void *uof_addr, *mmp_addr;
+	u32 uof_size, mmp_size;
 
+	if (!hw_device->fw_name)
+		return 0;
+
+	if (request_firmware(&loader_data->mmp_fw, hw_device->fw_mmp_name,
+			     &accel_dev->accel_pci_dev.pci_dev->dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to load MMP firmware %s\n",
+			hw_device->fw_mmp_name);
+		return -EFAULT;
+	}
 	if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
 			     &accel_dev->accel_pci_dev.pci_dev->dev)) {
-		dev_err(&GET_DEV(accel_dev), "Failed to load firmware %s\n",
+		dev_err(&GET_DEV(accel_dev), "Failed to load UOF firmware %s\n",
 			hw_device->fw_name);
-		return -EFAULT;
+		goto out_err;
 	}
 
 	uof_size = loader_data->uof_fw->size;
 	uof_addr = (void *)loader_data->uof_fw->data;
+	mmp_size = loader_data->mmp_fw->size;
+	mmp_addr = (void *)loader_data->mmp_fw->data;
+	qat_uclo_wr_mimage(loader_data->fw_loader, mmp_addr, mmp_size);
 	if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
 		dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
 		goto out_err;
 	}
 	if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
-		dev_err(&GET_DEV(accel_dev), "Failed to map UOF\n");
+		dev_err(&GET_DEV(accel_dev), "Failed to load UOF\n");
 		goto out_err;
 	}
 	return 0;
@@ -85,11 +97,17 @@
 void adf_ae_fw_release(struct adf_accel_dev *accel_dev)
 {
 	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+	if (!hw_device->fw_name)
+		return;
 
 	qat_uclo_del_uof_obj(loader_data->fw_loader);
 	qat_hal_deinit(loader_data->fw_loader);
 	release_firmware(loader_data->uof_fw);
+	release_firmware(loader_data->mmp_fw);
 	loader_data->uof_fw = NULL;
+	loader_data->mmp_fw = NULL;
 	loader_data->fw_loader = NULL;
 }
 
@@ -99,6 +117,9 @@
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
 
+	if (!hw_data->fw_name)
+		return 0;
+
 	for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
 		if (hw_data->ae_mask & (1 << ae)) {
 			qat_hal_start(loader_data->fw_loader, ae, 0xFF);
@@ -117,6 +138,9 @@
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
 
+	if (!hw_data->fw_name)
+		return 0;
+
 	for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
 		if (hw_data->ae_mask & (1 << ae)) {
 			qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
@@ -143,6 +167,10 @@
 int adf_ae_init(struct adf_accel_dev *accel_dev)
 {
 	struct adf_fw_loader_data *loader_data;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+	if (!hw_device->fw_name)
+		return 0;
 
 	loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
 	if (!loader_data)
@@ -166,6 +194,10 @@
 int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
 {
 	struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+	if (!hw_device->fw_name)
+		return 0;
 
 	qat_hal_deinit(loader_data->fw_loader);
 	kfree(accel_dev->fw_loader);
diff --git a/drivers/crypto/qat/qat_common/adf_admin.c b/drivers/crypto/qat/qat_common/adf_admin.c
new file mode 100644
index 0000000..147d755
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_admin.c
@@ -0,0 +1,290 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_init_admin.h"
+
+/* Admin Messages Registers */
+#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
+#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
+#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
+#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_ADMINMSG_LEN 32
+
+static const u8 const_tab[1024] = {
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x01,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x13, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13,
+0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0xfe, 0xdc, 0xba, 0x98, 0x76,
+0x54, 0x32, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x23, 0x01, 0xef, 0xcd, 0xab,
+0x89, 0x98, 0xba, 0xdc, 0xfe, 0x10, 0x32, 0x54, 0x76, 0xc3, 0xd2, 0xe1, 0xf0,
+0x00, 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x05, 0x9e,
+0xd8, 0x36, 0x7c, 0xd5, 0x07, 0x30, 0x70, 0xdd, 0x17, 0xf7, 0x0e, 0x59, 0x39,
+0xff, 0xc0, 0x0b, 0x31, 0x68, 0x58, 0x15, 0x11, 0x64, 0xf9, 0x8f, 0xa7, 0xbe,
+0xfa, 0x4f, 0xa4, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xbb, 0x67, 0xae,
+0x85, 0x3c, 0x6e, 0xf3, 0x72, 0xa5, 0x4f, 0xf5, 0x3a, 0x51, 0x0e, 0x52, 0x7f,
+0x9b, 0x05, 0x68, 0x8c, 0x1f, 0x83, 0xd9, 0xab, 0x5b, 0xe0, 0xcd, 0x19, 0x05,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8, 0x62, 0x9a, 0x29,
+0x2a, 0x36, 0x7c, 0xd5, 0x07, 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
+0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39, 0x67, 0x33, 0x26, 0x67, 0xff,
+0xc0, 0x0b, 0x31, 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11, 0xdb, 0x0c,
+0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7, 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f,
+0xa4, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08, 0xbb,
+0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b, 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94,
+0xf8, 0x2b, 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1, 0x51, 0x0e, 0x52,
+0x7f, 0xad, 0xe6, 0x82, 0xd1, 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
+0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b, 0x5b, 0xe0, 0xcd, 0x19, 0x13,
+0x7e, 0x21, 0x79, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+struct adf_admin_comms {
+	dma_addr_t phy_addr;
+	dma_addr_t const_tbl_addr;
+	void *virt_addr;
+	void __iomem *mailbox_addr;
+	struct mutex lock;	/* protects adf_admin_comms struct */
+};
+
+static int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev, u32 ae,
+				  void *in, void *out)
+{
+	struct adf_admin_comms *admin = accel_dev->admin;
+	int offset = ae * ADF_ADMINMSG_LEN * 2;
+	void __iomem *mailbox = admin->mailbox_addr;
+	int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
+	int times, received;
+
+	mutex_lock(&admin->lock);
+
+	if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+		mutex_unlock(&admin->lock);
+		return -EAGAIN;
+	}
+
+	memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+	ADF_CSR_WR(mailbox, mb_offset, 1);
+	received = 0;
+	for (times = 0; times < 50; times++) {
+		msleep(20);
+		if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
+			received = 1;
+			break;
+		}
+	}
+	if (received)
+		memcpy(out, admin->virt_addr + offset +
+		       ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+	else
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send admin msg to accelerator\n");
+
+	mutex_unlock(&admin->lock);
+	return received ? 0 : -EFAULT;
+}
+
+static int adf_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+{
+	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+	struct icp_qat_fw_init_admin_req req;
+	struct icp_qat_fw_init_admin_resp resp;
+	int i;
+
+	memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
+	req.init_admin_cmd_id = cmd;
+
+	if (cmd == ICP_QAT_FW_CONSTANTS_CFG) {
+		req.init_cfg_sz = 1024;
+		req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
+	}
+	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+		memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
+		if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
+		    resp.init_resp_hdr.status)
+			return -EFAULT;
+	}
+	return 0;
+}
+
+/**
+ * adf_send_admin_init() - Function sends init message to FW
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function sends admin init message to the FW
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_admin_init(struct adf_accel_dev *accel_dev)
+{
+	int ret = adf_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+
+	if (ret)
+		return ret;
+	return adf_send_admin_cmd(accel_dev, ICP_QAT_FW_CONSTANTS_CFG);
+}
+EXPORT_SYMBOL_GPL(adf_send_admin_init);
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+	struct adf_admin_comms *admin;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+		&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *csr = pmisc->virt_addr;
+	void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
+	u64 reg_val;
+
+	admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+			     dev_to_node(&GET_DEV(accel_dev)));
+	if (!admin)
+		return -ENOMEM;
+	admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+					       &admin->phy_addr, GFP_KERNEL);
+	if (!admin->virt_addr) {
+		dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+		kfree(admin);
+		return -ENOMEM;
+	}
+
+	admin->const_tbl_addr = dma_map_single(&GET_DEV(accel_dev),
+					       (void *) const_tab, 1024,
+					       DMA_TO_DEVICE);
+
+	if (unlikely(dma_mapping_error(&GET_DEV(accel_dev),
+				       admin->const_tbl_addr))) {
+		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+				  admin->virt_addr, admin->phy_addr);
+		kfree(admin);
+		return -ENOMEM;
+	}
+	reg_val = (u64)admin->phy_addr;
+	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
+	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
+	mutex_init(&admin->lock);
+	admin->mailbox_addr = mailbox;
+	accel_dev->admin = admin;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_admin_comms);
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+	struct adf_admin_comms *admin = accel_dev->admin;
+
+	if (!admin)
+		return;
+
+	if (admin->virt_addr)
+		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+				  admin->virt_addr, admin->phy_addr);
+
+	dma_unmap_single(&GET_DEV(accel_dev), admin->const_tbl_addr, 1024,
+			 DMA_TO_DEVICE);
+	mutex_destroy(&admin->lock);
+	kfree(admin);
+	accel_dev->admin = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_exit_admin_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index 2dbc733..a57b419 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -91,6 +91,9 @@
 	dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
 		 accel_dev->accel_id);
 
+	if (!parent)
+		parent = pdev;
+
 	if (!pci_wait_for_pending_transaction(pdev))
 		dev_info(&GET_DEV(accel_dev),
 			 "Transaction still in progress. Proceeding\n");
@@ -206,7 +209,7 @@
  * QAT acceleration device accel_dev.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
 {
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index ab65bc2..d087979 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -123,7 +123,7 @@
  * The table stores device specific config values.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
 {
@@ -178,6 +178,9 @@
 {
 	struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
 
+	if (!dev_cfg_data)
+		return;
+
 	down_write(&dev_cfg_data->lock);
 	adf_cfg_section_del_all(&dev_cfg_data->sec_list);
 	up_write(&dev_cfg_data->lock);
@@ -276,7 +279,7 @@
  * in the given acceleration device
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
 				const char *section_name,
@@ -327,7 +330,7 @@
  * will be stored.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
 {
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
index 88b8218..c697fb1 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/qat/qat_common/adf_cfg_common.h
@@ -60,7 +60,7 @@
 #define ADF_CFG_NO_DEVICE 0xFF
 #define ADF_CFG_AFFINITY_WHATEVER 0xFF
 #define MAX_DEVICE_NAME_SIZE 32
-#define ADF_MAX_DEVICES 32
+#define ADF_MAX_DEVICES (32 * 32)
 
 enum adf_cfg_val_type {
 	ADF_DEC,
@@ -71,6 +71,7 @@
 enum adf_device_type {
 	DEV_UNKNOWN = 0,
 	DEV_DH895XCC,
+	DEV_DH895XCCVF,
 };
 
 struct adf_dev_status_info {
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 27e16c0..7836dff 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -54,8 +54,8 @@
 #include "icp_qat_hal.h"
 
 #define ADF_MAJOR_VERSION	0
-#define ADF_MINOR_VERSION	1
-#define ADF_BUILD_VERSION	3
+#define ADF_MINOR_VERSION	2
+#define ADF_BUILD_VERSION	0
 #define ADF_DRV_VERSION		__stringify(ADF_MAJOR_VERSION) "." \
 				__stringify(ADF_MINOR_VERSION) "." \
 				__stringify(ADF_BUILD_VERSION)
@@ -91,9 +91,13 @@
 	unsigned long start_status;
 	char *name;
 	struct list_head list;
-	int admin;
 };
 
+static inline int get_current_node(void)
+{
+	return topology_physical_package_id(smp_processor_id());
+}
+
 int adf_service_register(struct service_hndl *service);
 int adf_service_unregister(struct service_hndl *service);
 
@@ -102,13 +106,24 @@
 int adf_dev_stop(struct adf_accel_dev *accel_dev);
 void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
 
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
+void adf_clean_vf_map(bool);
+
 int adf_ctl_dev_register(void);
 void adf_ctl_dev_unregister(void);
 int adf_processes_dev_register(void);
 void adf_processes_dev_unregister(void);
 
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev);
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev);
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf);
 struct list_head *adf_devmgr_get_head(void);
 struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
 struct adf_accel_dev *adf_devmgr_get_first(void);
@@ -130,6 +145,12 @@
 void adf_disable_aer(struct adf_accel_dev *accel_dev);
 int adf_init_aer(void);
 void adf_exit_aer(void);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb(struct adf_etr_ring_data *ring);
 
 int adf_dev_get(struct adf_accel_dev *accel_dev);
 void adf_dev_put(struct adf_accel_dev *accel_dev);
@@ -141,10 +162,13 @@
 struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
 void qat_crypto_put_instance(struct qat_crypto_instance *inst);
 void qat_alg_callback(void *resp);
+void qat_alg_asym_callback(void *resp);
 int qat_algs_init(void);
 void qat_algs_exit(void);
 int qat_algs_register(void);
 int qat_algs_unregister(void);
+int qat_asym_algs_register(void);
+void qat_asym_algs_unregister(void);
 
 int qat_hal_init(struct adf_accel_dev *accel_dev);
 void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
@@ -196,4 +220,23 @@
 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
 int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
 			 void *addr_ptr, int mem_size);
+void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+			void *addr_ptr, int mem_size);
+#if defined(CONFIG_PCI_IOV)
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
+void adf_disable_sriov(struct adf_accel_dev *accel_dev);
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+				  uint32_t vf_mask);
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+				 uint32_t vf_mask);
+#else
+static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+	return 0;
+}
+
+static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
 #endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index e056b9e..cd8a12a 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -398,10 +398,9 @@
 	}
 
 	accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
-	if (!accel_dev) {
-		pr_err("QAT: Device %d not found\n", dev_info.accel_id);
+	if (!accel_dev)
 		return -ENODEV;
-	}
+
 	hw_data = accel_dev->hw_device;
 	dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
 	dev_info.num_ae = hw_data->get_num_aes(hw_data);
@@ -495,6 +494,7 @@
 	adf_exit_aer();
 	qat_crypto_unregister();
 	qat_algs_exit();
+	adf_clean_vf_map(false);
 	mutex_destroy(&adf_ctl_lock);
 }
 
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index 3f0ff9e..8dfdb8f 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -50,21 +50,125 @@
 #include "adf_common_drv.h"
 
 static LIST_HEAD(accel_table);
+static LIST_HEAD(vfs_table);
 static DEFINE_MUTEX(table_lock);
 static uint32_t num_devices;
 
+struct vf_id_map {
+	u32 bdf;
+	u32 id;
+	u32 fake_id;
+	bool attached;
+	struct list_head list;
+};
+
+static int adf_get_vf_id(struct adf_accel_dev *vf)
+{
+	return ((7 * (PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1)) +
+		PCI_FUNC(accel_to_pci_dev(vf)->devfn) +
+		(PCI_SLOT(accel_to_pci_dev(vf)->devfn) - 1));
+}
+
+static int adf_get_vf_num(struct adf_accel_dev *vf)
+{
+	return (accel_to_pci_dev(vf)->bus->number << 8) | adf_get_vf_id(vf);
+}
+
+static struct vf_id_map *adf_find_vf(u32 bdf)
+{
+	struct list_head *itr;
+
+	list_for_each(itr, &vfs_table) {
+		struct vf_id_map *ptr =
+			list_entry(itr, struct vf_id_map, list);
+
+		if (ptr->bdf == bdf)
+			return ptr;
+	}
+	return NULL;
+}
+
+static int adf_get_vf_real_id(u32 fake)
+{
+	struct list_head *itr;
+
+	list_for_each(itr, &vfs_table) {
+		struct vf_id_map *ptr =
+			list_entry(itr, struct vf_id_map, list);
+		if (ptr->fake_id == fake)
+			return ptr->id;
+	}
+	return -1;
+}
+
+/**
+ * adf_clean_vf_map() - Cleans VF id mapings
+ *
+ * Function cleans internal ids for virtual functions.
+ * @vf: flag indicating whether mappings is cleaned
+ *	for vfs only or for vfs and pfs
+ */
+void adf_clean_vf_map(bool vf)
+{
+	struct vf_id_map *map;
+	struct list_head *ptr, *tmp;
+
+	mutex_lock(&table_lock);
+	list_for_each_safe(ptr, tmp, &vfs_table) {
+		map = list_entry(ptr, struct vf_id_map, list);
+		if (map->bdf != -1)
+			num_devices--;
+
+		if (vf && map->bdf == -1)
+			continue;
+
+		list_del(ptr);
+		kfree(map);
+	}
+	mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_clean_vf_map);
+
+/**
+ * adf_devmgr_update_class_index() - Update internal index
+ * @hw_data:  Pointer to internal device data.
+ *
+ * Function updates internal dev index for VFs
+ */
+void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data)
+{
+	struct adf_hw_device_class *class = hw_data->dev_class;
+	struct list_head *itr;
+	int i = 0;
+
+	list_for_each(itr, &accel_table) {
+		struct adf_accel_dev *ptr =
+				list_entry(itr, struct adf_accel_dev, list);
+
+		if (ptr->hw_device->dev_class == class)
+			ptr->hw_device->instance_id = i++;
+
+		if (i == class->instances)
+				break;
+	}
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_update_class_index);
+
 /**
  * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
  * @accel_dev:  Pointer to acceleration device.
+ * @pf:		Corresponding PF if the accel_dev is a VF
  *
  * Function adds acceleration device to the acceleration framework.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
-int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf)
 {
 	struct list_head *itr;
+	int ret = 0;
 
 	if (num_devices == ADF_MAX_DEVICES) {
 		dev_err(&GET_DEV(accel_dev), "Only support up to %d devices\n",
@@ -73,20 +177,77 @@
 	}
 
 	mutex_lock(&table_lock);
-	list_for_each(itr, &accel_table) {
-		struct adf_accel_dev *ptr =
+	atomic_set(&accel_dev->ref_count, 0);
+
+	/* PF on host or VF on guest */
+	if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+		struct vf_id_map *map;
+
+		list_for_each(itr, &accel_table) {
+			struct adf_accel_dev *ptr =
 				list_entry(itr, struct adf_accel_dev, list);
 
-		if (ptr == accel_dev) {
-			mutex_unlock(&table_lock);
-			return -EEXIST;
+			if (ptr == accel_dev) {
+				ret = -EEXIST;
+				goto unlock;
+			}
 		}
+
+		list_add_tail(&accel_dev->list, &accel_table);
+		accel_dev->accel_id = num_devices++;
+
+		map = kzalloc(sizeof(*map), GFP_KERNEL);
+		if (!map) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
+		map->bdf = ~0;
+		map->id = accel_dev->accel_id;
+		map->fake_id = map->id;
+		map->attached = true;
+		list_add_tail(&map->list, &vfs_table);
+	} else if (accel_dev->is_vf && pf) {
+		/* VF on host */
+		struct adf_accel_vf_info *vf_info;
+		struct vf_id_map *map;
+
+		vf_info = pf->pf.vf_info + adf_get_vf_id(accel_dev);
+
+		map = adf_find_vf(adf_get_vf_num(accel_dev));
+		if (map) {
+			struct vf_id_map *next;
+
+			accel_dev->accel_id = map->id;
+			list_add_tail(&accel_dev->list, &accel_table);
+			map->fake_id++;
+			map->attached = true;
+			next = list_next_entry(map, list);
+			while (next && &next->list != &vfs_table) {
+				next->fake_id++;
+				next = list_next_entry(next, list);
+			}
+
+			ret = 0;
+			goto unlock;
+		}
+
+		map = kzalloc(sizeof(*map), GFP_KERNEL);
+		if (!map) {
+			ret = -ENOMEM;
+			goto unlock;
+		}
+
+		accel_dev->accel_id = num_devices++;
+		list_add_tail(&accel_dev->list, &accel_table);
+		map->bdf = adf_get_vf_num(accel_dev);
+		map->id = accel_dev->accel_id;
+		map->fake_id = map->id;
+		map->attached = true;
+		list_add_tail(&map->list, &vfs_table);
 	}
-	atomic_set(&accel_dev->ref_count, 0);
-	list_add_tail(&accel_dev->list, &accel_table);
-	accel_dev->accel_id = num_devices++;
+unlock:
 	mutex_unlock(&table_lock);
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
 
@@ -98,17 +259,37 @@
 /**
  * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
  * @accel_dev:  Pointer to acceleration device.
+ * @pf:		Corresponding PF if the accel_dev is a VF
  *
  * Function removes acceleration device from the acceleration framework.
  * To be used by QAT device specific drivers.
  *
  * Return: void
  */
-void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev)
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
+		       struct adf_accel_dev *pf)
 {
 	mutex_lock(&table_lock);
+	if (!accel_dev->is_vf || (accel_dev->is_vf && !pf)) {
+		num_devices--;
+	} else if (accel_dev->is_vf && pf) {
+		struct vf_id_map *map, *next;
+
+		map = adf_find_vf(adf_get_vf_num(accel_dev));
+		if (!map) {
+			dev_err(&GET_DEV(accel_dev), "Failed to find VF map\n");
+			goto unlock;
+		}
+		map->fake_id--;
+		map->attached = false;
+		next = list_next_entry(map, list);
+		while (next && &next->list != &vfs_table) {
+			next->fake_id--;
+			next = list_next_entry(next, list);
+		}
+	}
+unlock:
 	list_del(&accel_dev->list);
-	num_devices--;
 	mutex_unlock(&table_lock);
 }
 EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
@@ -154,17 +335,24 @@
 struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
 {
 	struct list_head *itr;
+	int real_id;
 
 	mutex_lock(&table_lock);
+	real_id = adf_get_vf_real_id(id);
+	if (real_id < 0)
+		goto unlock;
+
+	id = real_id;
+
 	list_for_each(itr, &accel_table) {
 		struct adf_accel_dev *ptr =
 				list_entry(itr, struct adf_accel_dev, list);
-
 		if (ptr->accel_id == id) {
 			mutex_unlock(&table_lock);
 			return ptr;
 		}
 	}
+unlock:
 	mutex_unlock(&table_lock);
 	return NULL;
 }
@@ -180,21 +368,52 @@
 	return -ENODEV;
 }
 
-void adf_devmgr_get_num_dev(uint32_t *num)
+static int adf_get_num_dettached_vfs(void)
 {
 	struct list_head *itr;
+	int vfs = 0;
 
-	*num = 0;
-	list_for_each(itr, &accel_table) {
-		(*num)++;
+	mutex_lock(&table_lock);
+	list_for_each(itr, &vfs_table) {
+		struct vf_id_map *ptr =
+			list_entry(itr, struct vf_id_map, list);
+		if (ptr->bdf != ~0 && !ptr->attached)
+			vfs++;
 	}
+	mutex_unlock(&table_lock);
+	return vfs;
 }
 
+void adf_devmgr_get_num_dev(uint32_t *num)
+{
+	*num = num_devices - adf_get_num_dettached_vfs();
+}
+
+/**
+ * adf_dev_in_use() - Check whether accel_dev is currently in use
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when device is in use, 0 otherwise.
+ */
 int adf_dev_in_use(struct adf_accel_dev *accel_dev)
 {
 	return atomic_read(&accel_dev->ref_count) != 0;
 }
+EXPORT_SYMBOL_GPL(adf_dev_in_use);
 
+/**
+ * adf_dev_get() - Increment accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Increment the accel_dev refcount and if this is the first time
+ * incrementing it during this period the accel_dev is in use,
+ * increment the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 when successful, EFAULT when fail to bump module refcount
+ */
 int adf_dev_get(struct adf_accel_dev *accel_dev)
 {
 	if (atomic_add_return(1, &accel_dev->ref_count) == 1)
@@ -202,19 +421,50 @@
 			return -EFAULT;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(adf_dev_get);
 
+/**
+ * adf_dev_put() - Decrement accel_dev reference count
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Decrement the accel_dev refcount and if this is the last time
+ * decrementing it during this period the accel_dev is in use,
+ * decrement the module refcount too.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
 void adf_dev_put(struct adf_accel_dev *accel_dev)
 {
 	if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
 		module_put(accel_dev->owner);
 }
+EXPORT_SYMBOL_GPL(adf_dev_put);
 
+/**
+ * adf_devmgr_in_reset() - Check whether device is in reset
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device is being reset, 0 otherwise.
+ */
 int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
 {
 	return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
 }
+EXPORT_SYMBOL_GPL(adf_devmgr_in_reset);
 
+/**
+ * adf_dev_started() - Check whether device has started
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 1 when the device has started, 0 otherwise
+ */
 int adf_dev_started(struct adf_accel_dev *accel_dev)
 {
 	return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
 }
+EXPORT_SYMBOL_GPL(adf_dev_started);
diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
new file mode 100644
index 0000000..6849422
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c
@@ -0,0 +1,168 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+
+#define ADF_ARB_NUM 4
+#define ADF_ARB_REQ_RING_NUM 8
+#define ADF_ARB_REG_SIZE 0x4
+#define ADF_ARB_WTR_SIZE 0x20
+#define ADF_ARB_OFFSET 0x30000
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_WTR_OFFSET 0x010
+#define ADF_ARB_RO_EN_OFFSET 0x090
+#define ADF_ARB_WQCFG_OFFSET 0x100
+#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+	(ADF_ARB_REG_SLOT * index), value)
+
+#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+	ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \
+	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+	ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \
+	(ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
+	(ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+	ADF_ARB_WRK_2_SER_MAP_OFFSET) + \
+	(ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \
+	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+	ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+int adf_init_arb(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+	u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
+	u32 arb, i;
+	const u32 *thd_2_arb_cfg;
+
+	/* Service arb configured for 32 bytes responses and
+	 * ring flow control check enabled. */
+	for (arb = 0; arb < ADF_ARB_NUM; arb++)
+		WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
+
+	/* Setup service weighting */
+	for (arb = 0; arb < ADF_ARB_NUM; arb++)
+		for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
+			WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF);
+
+	/* Setup ring response ordering */
+	for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
+		WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
+
+	/* Setup worker queue registers */
+	for (i = 0; i < hw_data->num_engines; i++)
+		WRITE_CSR_ARB_WQCFG(csr, i, i);
+
+	/* Map worker threads to service arbiters */
+	hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
+
+	if (!thd_2_arb_cfg)
+		return -EFAULT;
+
+	for (i = 0; i < hw_data->num_engines; i++)
+		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(adf_init_arb);
+
+/**
+ * adf_update_ring_arb() - update ring arbitration rgister
+ * @accel_dev:  Pointer to ring data.
+ *
+ * Function enables or disables rings for/from arbitration.
+ */
+void adf_update_ring_arb(struct adf_etr_ring_data *ring)
+{
+	WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
+				   ring->bank->bank_number,
+				   ring->bank->ring_mask & 0xFF);
+}
+EXPORT_SYMBOL_GPL(adf_update_ring_arb);
+
+void adf_exit_arb(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *csr;
+	unsigned int i;
+
+	if (!accel_dev->transport)
+		return;
+
+	csr = accel_dev->transport->banks[0].csr_addr;
+
+	/* Reset arbiter configuration */
+	for (i = 0; i < ADF_ARB_NUM; i++)
+		WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
+
+	/* Shutdown work queue */
+	for (i = 0; i < hw_data->num_engines; i++)
+		WRITE_CSR_ARB_WQCFG(csr, i, 0);
+
+	/* Unmap worker threads to service arbiters */
+	for (i = 0; i < hw_data->num_engines; i++)
+		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
+
+	/* Disable arbitration on all rings */
+	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
+		WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
+}
+EXPORT_SYMBOL_GPL(adf_exit_arb);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 245f432..ac37a89 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -69,7 +69,7 @@
  * Function adds the acceleration service to the acceleration framework.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_service_register(struct service_hndl *service)
 {
@@ -94,7 +94,7 @@
  * Function remove the acceleration service from the acceleration framework.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_service_unregister(struct service_hndl *service)
 {
@@ -114,7 +114,7 @@
  * Initialize the ring data structures and the admin comms and arbitration
  * services.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_dev_init(struct adf_accel_dev *accel_dev)
 {
@@ -177,20 +177,6 @@
 	 */
 	list_for_each(list_itr, &service_table) {
 		service = list_entry(list_itr, struct service_hndl, list);
-		if (!service->admin)
-			continue;
-		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
-			dev_err(&GET_DEV(accel_dev),
-				"Failed to initialise service %s\n",
-				service->name);
-			return -EFAULT;
-		}
-		set_bit(accel_dev->accel_id, &service->init_status);
-	}
-	list_for_each(list_itr, &service_table) {
-		service = list_entry(list_itr, struct service_hndl, list);
-		if (service->admin)
-			continue;
 		if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
 			dev_err(&GET_DEV(accel_dev),
 				"Failed to initialise service %s\n",
@@ -201,6 +187,7 @@
 	}
 
 	hw_data->enable_error_correction(accel_dev);
+	hw_data->enable_vf2pf_comms(accel_dev);
 
 	return 0;
 }
@@ -214,10 +201,11 @@
  * is ready to be used.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_dev_start(struct adf_accel_dev *accel_dev)
 {
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	struct service_hndl *service;
 	struct list_head *list_itr;
 
@@ -229,22 +217,13 @@
 	}
 	set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
 
-	list_for_each(list_itr, &service_table) {
-		service = list_entry(list_itr, struct service_hndl, list);
-		if (!service->admin)
-			continue;
-		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
-			dev_err(&GET_DEV(accel_dev),
-				"Failed to start service %s\n",
-				service->name);
-			return -EFAULT;
-		}
-		set_bit(accel_dev->accel_id, &service->start_status);
+	if (hw_data->send_admin_init(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
+		return -EFAULT;
 	}
+
 	list_for_each(list_itr, &service_table) {
 		service = list_entry(list_itr, struct service_hndl, list);
-		if (service->admin)
-			continue;
 		if (service->event_hld(accel_dev, ADF_EVENT_START)) {
 			dev_err(&GET_DEV(accel_dev),
 				"Failed to start service %s\n",
@@ -257,7 +236,8 @@
 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
 	set_bit(ADF_STATUS_STARTED, &accel_dev->status);
 
-	if (qat_algs_register()) {
+	if (!list_empty(&accel_dev->crypto_list) &&
+	    (qat_algs_register() || qat_asym_algs_register())) {
 		dev_err(&GET_DEV(accel_dev),
 			"Failed to register crypto algs\n");
 		set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -276,7 +256,7 @@
  * is shuting down.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_dev_stop(struct adf_accel_dev *accel_dev)
 {
@@ -292,14 +272,15 @@
 	clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
 	clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
 
-	if (qat_algs_unregister())
+	if (!list_empty(&accel_dev->crypto_list) && qat_algs_unregister())
 		dev_err(&GET_DEV(accel_dev),
 			"Failed to unregister crypto algs\n");
 
+	if (!list_empty(&accel_dev->crypto_list))
+		qat_asym_algs_unregister();
+
 	list_for_each(list_itr, &service_table) {
 		service = list_entry(list_itr, struct service_hndl, list);
-		if (service->admin)
-			continue;
 		if (!test_bit(accel_dev->accel_id, &service->start_status))
 			continue;
 		ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
@@ -310,19 +291,6 @@
 			clear_bit(accel_dev->accel_id, &service->start_status);
 		}
 	}
-	list_for_each(list_itr, &service_table) {
-		service = list_entry(list_itr, struct service_hndl, list);
-		if (!service->admin)
-			continue;
-		if (!test_bit(accel_dev->accel_id, &service->start_status))
-			continue;
-		if (service->event_hld(accel_dev, ADF_EVENT_STOP))
-			dev_err(&GET_DEV(accel_dev),
-				"Failed to shutdown service %s\n",
-				service->name);
-		else
-			clear_bit(accel_dev->accel_id, &service->start_status);
-	}
 
 	if (wait)
 		msleep(100);
@@ -373,21 +341,6 @@
 
 	list_for_each(list_itr, &service_table) {
 		service = list_entry(list_itr, struct service_hndl, list);
-		if (service->admin)
-			continue;
-		if (!test_bit(accel_dev->accel_id, &service->init_status))
-			continue;
-		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
-			dev_err(&GET_DEV(accel_dev),
-				"Failed to shutdown service %s\n",
-				service->name);
-		else
-			clear_bit(accel_dev->accel_id, &service->init_status);
-	}
-	list_for_each(list_itr, &service_table) {
-		service = list_entry(list_itr, struct service_hndl, list);
-		if (!service->admin)
-			continue;
 		if (!test_bit(accel_dev->accel_id, &service->init_status))
 			continue;
 		if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
@@ -413,6 +366,7 @@
 	if (hw_data->exit_admin_comms)
 		hw_data->exit_admin_comms(accel_dev);
 
+	hw_data->disable_iov(accel_dev);
 	adf_cleanup_etr_data(accel_dev);
 }
 EXPORT_SYMBOL_GPL(adf_dev_shutdown);
@@ -424,17 +378,6 @@
 
 	list_for_each(list_itr, &service_table) {
 		service = list_entry(list_itr, struct service_hndl, list);
-		if (service->admin)
-			continue;
-		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
-			dev_err(&GET_DEV(accel_dev),
-				"Failed to restart service %s.\n",
-				service->name);
-	}
-	list_for_each(list_itr, &service_table) {
-		service = list_entry(list_itr, struct service_hndl, list);
-		if (!service->admin)
-			continue;
 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
 			dev_err(&GET_DEV(accel_dev),
 				"Failed to restart service %s.\n",
@@ -450,17 +393,6 @@
 
 	list_for_each(list_itr, &service_table) {
 		service = list_entry(list_itr, struct service_hndl, list);
-		if (service->admin)
-			continue;
-		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
-			dev_err(&GET_DEV(accel_dev),
-				"Failed to restart service %s.\n",
-				service->name);
-	}
-	list_for_each(list_itr, &service_table) {
-		service = list_entry(list_itr, struct service_hndl, list);
-		if (!service->admin)
-			continue;
 		if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
 			dev_err(&GET_DEV(accel_dev),
 				"Failed to restart service %s.\n",
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
new file mode 100644
index 0000000..5fdbad8
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -0,0 +1,438 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/pci.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_pf2vf_msg.h"
+
+#define ADF_DH895XCC_EP_OFFSET	0x3A000
+#define ADF_DH895XCC_ERRMSK3	(ADF_DH895XCC_EP_OFFSET + 0x1C)
+#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
+#define ADF_DH895XCC_ERRMSK5	(ADF_DH895XCC_EP_OFFSET + 0xDC)
+#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
+
+/**
+ * adf_enable_pf2vf_interrupts() - Enable PF to VF interrupts
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function enables PF to VF interrupts
+ */
+void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *pmisc_bar_addr =
+		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x0);
+}
+EXPORT_SYMBOL_GPL(adf_enable_pf2vf_interrupts);
+
+/**
+ * adf_disable_pf2vf_interrupts() - Disable PF to VF interrupts
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables PF to VF interrupts
+ */
+void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *pmisc_bar_addr =
+		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+
+	ADF_CSR_WR(pmisc_bar_addr, hw_data->get_vintmsk_offset(0), 0x2);
+}
+EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
+
+void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
+				 u32 vf_mask)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	u32 reg;
+
+	/* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
+	if (vf_mask & 0xFFFF) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
+		reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+	}
+
+	/* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
+	if (vf_mask >> 16) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
+		reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+	}
+}
+
+/**
+ * adf_disable_pf2vf_interrupts() - Disable VF to PF interrupts
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables VF to PF interrupts
+ */
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	u32 reg;
+
+	/* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
+	if (vf_mask & 0xFFFF) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
+			ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
+	}
+
+	/* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
+	if (vf_mask >> 16) {
+		reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
+			ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
+		ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
+	}
+}
+EXPORT_SYMBOL_GPL(adf_disable_vf2pf_interrupts);
+
+static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	void __iomem *pmisc_bar_addr =
+		pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)].virt_addr;
+	u32 val, pf2vf_offset, count = 0;
+	u32 local_in_use_mask, local_in_use_pattern;
+	u32 remote_in_use_mask, remote_in_use_pattern;
+	struct mutex *lock;	/* lock preventing concurrent acces of CSR */
+	u32 int_bit;
+	int ret = 0;
+
+	if (accel_dev->is_vf) {
+		pf2vf_offset = hw_data->get_pf2vf_offset(0);
+		lock = &accel_dev->vf.vf2pf_lock;
+		local_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+		local_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+		remote_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+		remote_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+		int_bit = ADF_VF2PF_INT;
+	} else {
+		pf2vf_offset = hw_data->get_pf2vf_offset(vf_nr);
+		lock = &accel_dev->pf.vf_info[vf_nr].pf2vf_lock;
+		local_in_use_mask = ADF_PF2VF_IN_USE_BY_PF_MASK;
+		local_in_use_pattern = ADF_PF2VF_IN_USE_BY_PF;
+		remote_in_use_mask = ADF_VF2PF_IN_USE_BY_VF_MASK;
+		remote_in_use_pattern = ADF_VF2PF_IN_USE_BY_VF;
+		int_bit = ADF_PF2VF_INT;
+	}
+
+	mutex_lock(lock);
+
+	/* Check if PF2VF CSR is in use by remote function */
+	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+	if ((val & remote_in_use_mask) == remote_in_use_pattern) {
+		dev_dbg(&GET_DEV(accel_dev),
+			"PF2VF CSR in use by remote function\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/* Attempt to get ownership of PF2VF CSR */
+	msg &= ~local_in_use_mask;
+	msg |= local_in_use_pattern;
+	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
+
+	/* Wait in case remote func also attempting to get ownership */
+	msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
+
+	val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+	if ((val & local_in_use_mask) != local_in_use_pattern) {
+		dev_dbg(&GET_DEV(accel_dev),
+			"PF2VF CSR in use by remote - collision detected\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/*
+	 * This function now owns the PV2VF CSR.  The IN_USE_BY pattern must
+	 * remain in the PF2VF CSR for all writes including ACK from remote
+	 * until this local function relinquishes the CSR.  Send the message
+	 * by interrupting the remote.
+	 */
+	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
+
+	/* Wait for confirmation from remote func it received the message */
+	do {
+		msleep(ADF_IOV_MSG_ACK_DELAY);
+		val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
+	} while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
+
+	if (val & int_bit) {
+		dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
+		val &= ~int_bit;
+		ret = -EIO;
+	}
+
+	/* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
+	ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
+out:
+	mutex_unlock(lock);
+	return ret;
+}
+
+/**
+ * adf_iov_putmsg() - send PF2VF message
+ * @accel_dev:  Pointer to acceleration device.
+ * @msg:	Message to send
+ * @vf_nr:	VF number to which the message will be sent
+ *
+ * Function sends a messge from the PF to a VF
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+{
+	u32 count = 0;
+	int ret;
+
+	do {
+		ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
+		if (ret)
+			msleep(ADF_IOV_MSG_RETRY_DELAY);
+	} while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(adf_iov_putmsg);
+
+void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
+{
+	struct adf_accel_dev *accel_dev = vf_info->accel_dev;
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	int bar_id = hw_data->get_misc_bar_id(hw_data);
+	struct adf_bar *pmisc = &GET_BARS(accel_dev)[bar_id];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	u32 msg, resp = 0, vf_nr = vf_info->vf_nr;
+
+	/* Read message from the VF */
+	msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
+
+	/* To ACK, clear the VF2PFINT bit */
+	msg &= ~ADF_VF2PF_INT;
+	ADF_CSR_WR(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr), msg);
+
+	if (!(msg & ADF_VF2PF_MSGORIGIN_SYSTEM))
+		/* Ignore legacy non-system (non-kernel) VF2PF messages */
+		goto err;
+
+	switch ((msg & ADF_VF2PF_MSGTYPE_MASK) >> ADF_VF2PF_MSGTYPE_SHIFT) {
+	case ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ:
+		{
+		u8 vf_compat_ver = msg >> ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+
+		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+			  ADF_PF2VF_MSGTYPE_SHIFT) |
+			 (ADF_PFVF_COMPATIBILITY_VERSION <<
+			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+
+		dev_dbg(&GET_DEV(accel_dev),
+			"Compatibility Version Request from VF%d vers=%u\n",
+			vf_nr + 1, vf_compat_ver);
+
+		if (vf_compat_ver < hw_data->min_iov_compat_ver) {
+			dev_err(&GET_DEV(accel_dev),
+				"VF (vers %d) incompatible with PF (vers %d)\n",
+				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+			resp |= ADF_PF2VF_VF_INCOMPATIBLE <<
+				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		} else if (vf_compat_ver > ADF_PFVF_COMPATIBILITY_VERSION) {
+			dev_err(&GET_DEV(accel_dev),
+				"VF (vers %d) compat with PF (vers %d) unkn.\n",
+				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+			resp |= ADF_PF2VF_VF_COMPAT_UNKNOWN <<
+				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		} else {
+			dev_dbg(&GET_DEV(accel_dev),
+				"VF (vers %d) compatible with PF (vers %d)\n",
+				vf_compat_ver, ADF_PFVF_COMPATIBILITY_VERSION);
+			resp |= ADF_PF2VF_VF_COMPATIBLE <<
+				ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		}
+		}
+		break;
+	case ADF_VF2PF_MSGTYPE_VERSION_REQ:
+		dev_dbg(&GET_DEV(accel_dev),
+			"Legacy VersionRequest received from VF%d 0x%x\n",
+			vf_nr + 1, msg);
+		resp = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+			 (ADF_PF2VF_MSGTYPE_VERSION_RESP <<
+			  ADF_PF2VF_MSGTYPE_SHIFT) |
+			 (ADF_PFVF_COMPATIBILITY_VERSION <<
+			  ADF_PF2VF_VERSION_RESP_VERS_SHIFT));
+		resp |= ADF_PF2VF_VF_COMPATIBLE <<
+			ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		/* Set legacy major and minor version num */
+		resp |= 1 << ADF_PF2VF_MAJORVERSION_SHIFT |
+			1 << ADF_PF2VF_MINORVERSION_SHIFT;
+		break;
+	case ADF_VF2PF_MSGTYPE_INIT:
+		{
+		dev_dbg(&GET_DEV(accel_dev),
+			"Init message received from VF%d 0x%x\n",
+			vf_nr + 1, msg);
+		vf_info->init = true;
+		}
+		break;
+	case ADF_VF2PF_MSGTYPE_SHUTDOWN:
+		{
+		dev_dbg(&GET_DEV(accel_dev),
+			"Shutdown message received from VF%d 0x%x\n",
+			vf_nr + 1, msg);
+		vf_info->init = false;
+		}
+		break;
+	default:
+		goto err;
+	}
+
+	if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
+		dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
+
+	/* re-enable interrupt on PF from this VF */
+	adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
+	return;
+err:
+	dev_dbg(&GET_DEV(accel_dev), "Unknown message from VF%d (0x%x);\n",
+		vf_nr + 1, msg);
+}
+
+void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_vf_info *vf;
+	u32 msg = (ADF_PF2VF_MSGORIGIN_SYSTEM |
+		(ADF_PF2VF_MSGTYPE_RESTARTING << ADF_PF2VF_MSGTYPE_SHIFT));
+	int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
+
+	for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
+		if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to send restarting msg to VF%d\n", i);
+	}
+}
+
+static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
+{
+	unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	u32 msg = 0;
+	int ret;
+
+	msg = ADF_VF2PF_MSGORIGIN_SYSTEM;
+	msg |= ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ << ADF_VF2PF_MSGTYPE_SHIFT;
+	msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
+	BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
+
+	/* Send request from VF to PF */
+	ret = adf_iov_putmsg(accel_dev, msg, 0);
+	if (ret) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send Compatibility Version Request.\n");
+		return ret;
+	}
+
+	/* Wait for response */
+	if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
+					 timeout)) {
+		dev_err(&GET_DEV(accel_dev),
+			"IOV request/response message timeout expired\n");
+		return -EIO;
+	}
+
+	/* Response from PF received, check compatibility */
+	switch (accel_dev->vf.compatible) {
+	case ADF_PF2VF_VF_COMPATIBLE:
+		break;
+	case ADF_PF2VF_VF_COMPAT_UNKNOWN:
+		/* VF is newer than PF and decides whether it is compatible */
+		if (accel_dev->vf.pf_version >= hw_data->min_iov_compat_ver)
+			break;
+		/* fall through */
+	case ADF_PF2VF_VF_INCOMPATIBLE:
+		dev_err(&GET_DEV(accel_dev),
+			"PF (vers %d) and VF (vers %d) are not compatible\n",
+			accel_dev->vf.pf_version,
+			ADF_PFVF_COMPATIBILITY_VERSION);
+		return -EINVAL;
+	default:
+		dev_err(&GET_DEV(accel_dev),
+			"Invalid response from PF; assume not compatible\n");
+		return -EINVAL;
+	}
+	return ret;
+}
+
+/**
+ * adf_enable_vf2pf_comms() - Function enables communication from vf to pf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+	adf_enable_pf2vf_interrupts(accel_dev);
+	return adf_vf2pf_request_version(accel_dev);
+}
+EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
new file mode 100644
index 0000000..5acd531
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
@@ -0,0 +1,146 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_PF2VF_MSG_H
+#define ADF_PF2VF_MSG_H
+
+/*
+ * PF<->VF Messaging
+ * The PF has an array of 32-bit PF2VF registers, one for each VF.  The
+ * PF can access all these registers; each VF can access only the one
+ * register associated with that particular VF.
+ *
+ * The register functionally is split into two parts:
+ * The bottom half is for PF->VF messages. In particular when the first
+ * bit of this register (bit 0) gets set an interrupt will be triggered
+ * in the respective VF.
+ * The top half is for VF->PF messages. In particular when the first bit
+ * of this half of register (bit 16) gets set an interrupt will be triggered
+ * in the PF.
+ *
+ * The remaining bits within this register are available to encode messages.
+ * and implement a collision control mechanism to prevent concurrent use of
+ * the PF2VF register by both the PF and VF.
+ *
+ *  31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   VF2PF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ *  15 14 13 12 11 10  9  8  7  6  5  4  3  2  1  0
+ *  _______________________________________________
+ * |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
+ * +-----------------------------------------------+
+ *  \___________________________/ \_________/ ^   ^
+ *                ^                    ^      |   |
+ *                |                    |      |   PF2VF Int
+ *                |                    |      Message Origin
+ *                |                    Message Type
+ *                Message-specific Data/Reserved
+ *
+ * Message Origin (Should always be 1)
+ * A legacy out-of-tree QAT driver allowed for a set of messages not supported
+ * by this driver; these had a Msg Origin of 0 and are ignored by this driver.
+ *
+ * When a PF or VF attempts to send a message in the lower or upper 16 bits,
+ * respectively, the other 16 bits are written to first with a defined
+ * IN_USE_BY pattern as part of a collision control scheme (see adf_iov_putmsg).
+ */
+
+#define ADF_PFVF_COMPATIBILITY_VERSION		0x1	/* PF<->VF compat */
+
+/* PF->VF messages */
+#define ADF_PF2VF_INT				BIT(0)
+#define ADF_PF2VF_MSGORIGIN_SYSTEM		BIT(1)
+#define ADF_PF2VF_MSGTYPE_MASK			0x0000003C
+#define ADF_PF2VF_MSGTYPE_SHIFT			2
+#define ADF_PF2VF_MSGTYPE_RESTARTING		0x01
+#define ADF_PF2VF_MSGTYPE_VERSION_RESP		0x02
+#define ADF_PF2VF_IN_USE_BY_PF			0x6AC20000
+#define ADF_PF2VF_IN_USE_BY_PF_MASK		0xFFFE0000
+
+/* PF->VF Version Response */
+#define ADF_PF2VF_VERSION_RESP_VERS_MASK	0x00003FC0
+#define ADF_PF2VF_VERSION_RESP_VERS_SHIFT	6
+#define ADF_PF2VF_VERSION_RESP_RESULT_MASK	0x0000C000
+#define ADF_PF2VF_VERSION_RESP_RESULT_SHIFT	14
+#define ADF_PF2VF_MINORVERSION_SHIFT		6
+#define ADF_PF2VF_MAJORVERSION_SHIFT		10
+#define ADF_PF2VF_VF_COMPATIBLE			1
+#define ADF_PF2VF_VF_INCOMPATIBLE		2
+#define ADF_PF2VF_VF_COMPAT_UNKNOWN		3
+
+/* VF->PF messages */
+#define ADF_VF2PF_IN_USE_BY_VF			0x00006AC2
+#define ADF_VF2PF_IN_USE_BY_VF_MASK		0x0000FFFE
+#define ADF_VF2PF_INT				BIT(16)
+#define ADF_VF2PF_MSGORIGIN_SYSTEM		BIT(17)
+#define ADF_VF2PF_MSGTYPE_MASK			0x003C0000
+#define ADF_VF2PF_MSGTYPE_SHIFT			18
+#define ADF_VF2PF_MSGTYPE_INIT			0x3
+#define ADF_VF2PF_MSGTYPE_SHUTDOWN		0x4
+#define ADF_VF2PF_MSGTYPE_VERSION_REQ		0x5
+#define ADF_VF2PF_MSGTYPE_COMPAT_VER_REQ	0x6
+
+/* VF->PF Compatible Version Request */
+#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT		22
+
+/* Collision detection */
+#define ADF_IOV_MSG_COLLISION_DETECT_DELAY	10
+#define ADF_IOV_MSG_ACK_DELAY			2
+#define ADF_IOV_MSG_ACK_MAX_RETRY		100
+#define ADF_IOV_MSG_RETRY_DELAY			5
+#define ADF_IOV_MSG_MAX_RETRIES			3
+#define ADF_IOV_MSG_RESP_TIMEOUT	(ADF_IOV_MSG_ACK_DELAY * \
+					 ADF_IOV_MSG_ACK_MAX_RETRY + \
+					 ADF_IOV_MSG_COLLISION_DETECT_DELAY)
+#endif /* ADF_IOV_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c
new file mode 100644
index 0000000..2f77a4a
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/adf_sriov.c
@@ -0,0 +1,309 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/workqueue.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_pf2vf_msg.h"
+
+static struct workqueue_struct *pf2vf_resp_wq;
+
+#define ME2FUNCTION_MAP_A_OFFSET	(0x3A400 + 0x190)
+#define ME2FUNCTION_MAP_A_NUM_REGS	96
+
+#define ME2FUNCTION_MAP_B_OFFSET	(0x3A400 + 0x310)
+#define ME2FUNCTION_MAP_B_NUM_REGS	12
+
+#define ME2FUNCTION_MAP_REG_SIZE	4
+#define ME2FUNCTION_MAP_VALID		BIT(7)
+
+#define READ_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index)		\
+	ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET +		\
+		   ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_bar_addr, index, value)	\
+	ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_A_OFFSET +		\
+		   ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+#define READ_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index)		\
+	ADF_CSR_RD(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET +		\
+		   ME2FUNCTION_MAP_REG_SIZE * index)
+
+#define WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_bar_addr, index, value)	\
+	ADF_CSR_WR(pmisc_bar_addr, ME2FUNCTION_MAP_B_OFFSET +		\
+		   ME2FUNCTION_MAP_REG_SIZE * index, value)
+
+struct adf_pf2vf_resp {
+	struct work_struct pf2vf_resp_work;
+	struct adf_accel_vf_info *vf_info;
+};
+
+static void adf_iov_send_resp(struct work_struct *work)
+{
+	struct adf_pf2vf_resp *pf2vf_resp =
+		container_of(work, struct adf_pf2vf_resp, pf2vf_resp_work);
+
+	adf_vf2pf_req_hndl(pf2vf_resp->vf_info);
+	kfree(pf2vf_resp);
+}
+
+static void adf_vf2pf_bh_handler(void *data)
+{
+	struct adf_accel_vf_info *vf_info = (struct adf_accel_vf_info *)data;
+	struct adf_pf2vf_resp *pf2vf_resp;
+
+	pf2vf_resp = kzalloc(sizeof(*pf2vf_resp), GFP_ATOMIC);
+	if (!pf2vf_resp)
+		return;
+
+	pf2vf_resp->vf_info = vf_info;
+	INIT_WORK(&pf2vf_resp->pf2vf_resp_work, adf_iov_send_resp);
+	queue_work(pf2vf_resp_wq, &pf2vf_resp->pf2vf_resp_work);
+}
+
+static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+	int totalvfs = pci_sriov_get_totalvfs(pdev);
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	struct adf_accel_vf_info *vf_info;
+	int i;
+	u32 reg;
+
+	/* Workqueue for PF2VF responses */
+	pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+	if (!pf2vf_resp_wq)
+		return -ENOMEM;
+
+	for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
+	     i++, vf_info++) {
+		/* This ptr will be populated when VFs will be created */
+		vf_info->accel_dev = accel_dev;
+		vf_info->vf_nr = i;
+
+		tasklet_init(&vf_info->vf2pf_bh_tasklet,
+			     (void *)adf_vf2pf_bh_handler,
+			     (unsigned long)vf_info);
+		mutex_init(&vf_info->pf2vf_lock);
+		ratelimit_state_init(&vf_info->vf2pf_ratelimit,
+				     DEFAULT_RATELIMIT_INTERVAL,
+				     DEFAULT_RATELIMIT_BURST);
+	}
+
+	/* Set Valid bits in ME Thread to PCIe Function Mapping Group A */
+	for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+		reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+		reg |= ME2FUNCTION_MAP_VALID;
+		WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+	}
+
+	/* Set Valid bits in ME Thread to PCIe Function Mapping Group B */
+	for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+		reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+		reg |= ME2FUNCTION_MAP_VALID;
+		WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+	}
+
+	/* Enable VF to PF interrupts for all VFs */
+	adf_enable_vf2pf_interrupts(accel_dev, GENMASK_ULL(totalvfs - 1, 0));
+
+	/*
+	 * Due to the hardware design, when SR-IOV and the ring arbiter
+	 * are enabled all the VFs supported in hardware must be enabled in
+	 * order for all the hardware resources (i.e. bundles) to be usable.
+	 * When SR-IOV is enabled, each of the VFs will own one bundle.
+	 */
+	return pci_enable_sriov(pdev, totalvfs);
+}
+
+/**
+ * adf_disable_sriov() - Disable SRIOV for the device
+ * @pdev:  Pointer to pci device.
+ *
+ * Function disables SRIOV for the pci device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+void adf_disable_sriov(struct adf_accel_dev *accel_dev)
+{
+	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+	struct adf_bar *pmisc =
+			&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+	void __iomem *pmisc_addr = pmisc->virt_addr;
+	int totalvfs = pci_sriov_get_totalvfs(accel_to_pci_dev(accel_dev));
+	struct adf_accel_vf_info *vf;
+	u32 reg;
+	int i;
+
+	if (!accel_dev->pf.vf_info)
+		return;
+
+	adf_pf2vf_notify_restarting(accel_dev);
+
+	pci_disable_sriov(accel_to_pci_dev(accel_dev));
+
+	/* Disable VF to PF interrupts */
+	adf_disable_vf2pf_interrupts(accel_dev, 0xFFFFFFFF);
+
+	/* Clear Valid bits in ME Thread to PCIe Function Mapping Group A */
+	for (i = 0; i < ME2FUNCTION_MAP_A_NUM_REGS; i++) {
+		reg = READ_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i);
+		reg &= ~ME2FUNCTION_MAP_VALID;
+		WRITE_CSR_ME2FUNCTION_MAP_A(pmisc_addr, i, reg);
+	}
+
+	/* Clear Valid bits in ME Thread to PCIe Function Mapping Group B */
+	for (i = 0; i < ME2FUNCTION_MAP_B_NUM_REGS; i++) {
+		reg = READ_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i);
+		reg &= ~ME2FUNCTION_MAP_VALID;
+		WRITE_CSR_ME2FUNCTION_MAP_B(pmisc_addr, i, reg);
+	}
+
+	for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) {
+		tasklet_disable(&vf->vf2pf_bh_tasklet);
+		tasklet_kill(&vf->vf2pf_bh_tasklet);
+		mutex_destroy(&vf->pf2vf_lock);
+	}
+
+	kfree(accel_dev->pf.vf_info);
+	accel_dev->pf.vf_info = NULL;
+
+	if (pf2vf_resp_wq) {
+		destroy_workqueue(pf2vf_resp_wq);
+		pf2vf_resp_wq = NULL;
+	}
+}
+EXPORT_SYMBOL_GPL(adf_disable_sriov);
+
+/**
+ * adf_sriov_configure() - Enable SRIOV for the device
+ * @pdev:  Pointer to pci device.
+ *
+ * Function enables SRIOV for the pci device.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+	int totalvfs = pci_sriov_get_totalvfs(pdev);
+	unsigned long val;
+	int ret;
+
+	if (!accel_dev) {
+		dev_err(&pdev->dev, "Failed to find accel_dev\n");
+		return -EFAULT;
+	}
+
+	if (!iommu_present(&pci_bus_type)) {
+		dev_err(&pdev->dev,
+			"IOMMU must be enabled for SR-IOV to work\n");
+		return -EINVAL;
+	}
+
+	if (accel_dev->pf.vf_info) {
+		dev_info(&pdev->dev, "Already enabled for this device\n");
+		return -EINVAL;
+	}
+
+	if (adf_dev_started(accel_dev)) {
+		if (adf_devmgr_in_reset(accel_dev) ||
+		    adf_dev_in_use(accel_dev)) {
+			dev_err(&GET_DEV(accel_dev), "Device busy\n");
+			return -EBUSY;
+		}
+
+		if (adf_dev_stop(accel_dev)) {
+			dev_err(&GET_DEV(accel_dev),
+				"Failed to stop qat_dev%d\n",
+				accel_dev->accel_id);
+			return -EFAULT;
+		}
+
+		adf_dev_shutdown(accel_dev);
+	}
+
+	if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+		return -EFAULT;
+	val = 0;
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+					ADF_NUM_CY, (void *)&val, ADF_DEC))
+		return -EFAULT;
+
+	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+	/* Allocate memory for VF info structs */
+	accel_dev->pf.vf_info = kcalloc(totalvfs,
+					sizeof(struct adf_accel_vf_info),
+					GFP_KERNEL);
+	if (!accel_dev->pf.vf_info)
+		return -ENOMEM;
+
+	if (adf_dev_init(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to init qat_dev%d\n",
+			accel_dev->accel_id);
+		return -EFAULT;
+	}
+
+	if (adf_dev_start(accel_dev)) {
+		dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
+			accel_dev->accel_id);
+		return -EFAULT;
+	}
+
+	ret = adf_enable_sriov(accel_dev);
+	if (ret)
+		return ret;
+
+	return numvfs;
+}
+EXPORT_SYMBOL_GPL(adf_sriov_configure);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index db2926b..3865ae8 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -264,6 +264,10 @@
 		dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
 		return -EFAULT;
 	}
+	if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
+		dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
+		return -EFAULT;
+	}
 
 	bank = &transport_data->banks[bank_num];
 	if (adf_reserve_ring(bank, ring_num)) {
@@ -285,7 +289,7 @@
 		goto err;
 
 	/* Enable HW arbitration for the given ring */
-	accel_dev->hw_device->hw_arb_ring_enable(ring);
+	adf_update_ring_arb(ring);
 
 	if (adf_ring_debugfs_add(ring, ring_name)) {
 		dev_err(&GET_DEV(accel_dev),
@@ -302,14 +306,13 @@
 err:
 	adf_cleanup_ring(ring);
 	adf_unreserve_ring(bank, ring_num);
-	accel_dev->hw_device->hw_arb_ring_disable(ring);
+	adf_update_ring_arb(ring);
 	return ret;
 }
 
 void adf_remove_ring(struct adf_etr_ring_data *ring)
 {
 	struct adf_etr_bank_data *bank = ring->bank;
-	struct adf_accel_dev *accel_dev = bank->accel_dev;
 
 	/* Disable interrupts for the given ring */
 	adf_disable_ring_irq(bank, ring->ring_number);
@@ -322,7 +325,7 @@
 	adf_ring_debugfs_rm(ring);
 	adf_unreserve_ring(bank, ring->ring_number);
 	/* Disable HW arbitration for the given ring */
-	accel_dev->hw_device->hw_arb_ring_disable(ring);
+	adf_update_ring_arb(ring);
 	adf_cleanup_ring(ring);
 }
 
@@ -463,7 +466,7 @@
  * acceleration device accel_dev.
  * To be used by QAT device specific drivers.
  *
- * Return: 0 on success, error code othewise.
+ * Return: 0 on success, error code otherwise.
  */
 int adf_init_etr_data(struct adf_accel_dev *accel_dev)
 {
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
index 160c9a3..6ad7e4e 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -97,8 +97,9 @@
 #define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
 
 /* Minimum ring bufer size for memory allocation */
-#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
-				ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
+	((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
+		ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
 #define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
 #define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
 				SIZE) & ~0x4)
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h
index f1e30e2..46747f0 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_fw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw.h
@@ -249,6 +249,8 @@
 
 #define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
 #define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6
+#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1
 #define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
 #define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
 #define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
new file mode 100644
index 0000000..0d7a9b5
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/icp_qat_fw_pke.h
@@ -0,0 +1,112 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+	* Redistributions of source code must retain the above copyright
+	  notice, this list of conditions and the following disclaimer.
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in
+	  the documentation and/or other materials provided with the
+	  distribution.
+	* Neither the name of Intel Corporation nor the names of its
+	  contributors may be used to endorse or promote products derived
+	  from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_PKE_
+#define _ICP_QAT_FW_PKE_
+
+#include "icp_qat_fw.h"
+
+struct icp_qat_fw_req_hdr_pke_cd_pars {
+	u64 content_desc_addr;
+	u32 content_desc_resrvd;
+	u32 func_id;
+};
+
+struct icp_qat_fw_req_pke_mid {
+	u64 opaque;
+	u64 src_data_addr;
+	u64 dest_data_addr;
+};
+
+struct icp_qat_fw_req_pke_hdr {
+	u8 resrvd1;
+	u8 resrvd2;
+	u8 service_type;
+	u8 hdr_flags;
+	u16 comn_req_flags;
+	u16 resrvd4;
+	struct icp_qat_fw_req_hdr_pke_cd_pars cd_pars;
+};
+
+struct icp_qat_fw_pke_request {
+	struct icp_qat_fw_req_pke_hdr pke_hdr;
+	struct icp_qat_fw_req_pke_mid pke_mid;
+	u8 output_param_count;
+	u8 input_param_count;
+	u16 resrvd1;
+	u32 resrvd2;
+	u64 next_req_adr;
+};
+
+struct icp_qat_fw_resp_pke_hdr {
+	u8 resrvd1;
+	u8 resrvd2;
+	u8 response_type;
+	u8 hdr_flags;
+	u16 comn_resp_flags;
+	u16 resrvd4;
+};
+
+struct icp_qat_fw_pke_resp {
+	struct icp_qat_fw_resp_pke_hdr pke_resp_hdr;
+	u64 opaque;
+	u64 src_data_addr;
+	u64 dest_data_addr;
+};
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS              7
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK                0x1
+#define ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(status_word) \
+	QAT_FIELD_GET(((status_word >> ICP_QAT_FW_COMN_ONE_BYTE_SHIFT) & \
+		ICP_QAT_FW_COMN_SINGLE_BYTE_MASK), \
+		QAT_COMN_RESP_PKE_STATUS_BITPOS, \
+		QAT_COMN_RESP_PKE_STATUS_MASK)
+
+#define ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(hdr_t, val) \
+	QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+		ICP_QAT_FW_PKE_HDR_VALID_FLAG_BITPOS, \
+		ICP_QAT_FW_PKE_HDR_VALID_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index df427c0..2bd913a 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -53,7 +53,6 @@
 #include <crypto/hash.h>
 #include <crypto/algapi.h>
 #include <crypto/authenc.h>
-#include <crypto/rng.h>
 #include <linux/dma-mapping.h>
 #include "adf_accel_devices.h"
 #include "adf_transport.h"
@@ -113,9 +112,6 @@
 	struct crypto_shash *hash_tfm;
 	enum icp_qat_hw_auth_algo qat_hash_alg;
 	struct qat_crypto_instance *inst;
-	struct crypto_tfm *tfm;
-	uint8_t salt[AES_BLOCK_SIZE];
-	spinlock_t lock;	/* protects qat_alg_aead_ctx struct */
 };
 
 struct qat_alg_ablkcipher_ctx {
@@ -130,11 +126,6 @@
 	spinlock_t lock;	/* protects qat_alg_ablkcipher_ctx struct */
 };
 
-static int get_current_node(void)
-{
-	return cpu_data(current_thread_info()->cpu).phys_proc_id;
-}
-
 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
 {
 	switch (qat_hash_alg) {
@@ -278,12 +269,12 @@
 				       ICP_QAT_FW_LA_NO_UPDATE_STATE);
 }
 
-static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
 					 int alg,
 					 struct crypto_authenc_keys *keys)
 {
-	struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
-	unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
 	struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
 	struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
 	struct icp_qat_hw_auth_algo_blk *hash =
@@ -358,12 +349,12 @@
 	return 0;
 }
 
-static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
 					 int alg,
 					 struct crypto_authenc_keys *keys)
 {
-	struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
-	unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
+	unsigned int digestsize = crypto_aead_authsize(aead_tfm);
 	struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
 	struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
 	struct icp_qat_hw_cipher_algo_blk *cipher =
@@ -515,30 +506,27 @@
 	return 0;
 }
 
-static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
+static int qat_alg_aead_init_sessions(struct crypto_aead *tfm,
 				      const uint8_t *key, unsigned int keylen)
 {
 	struct crypto_authenc_keys keys;
 	int alg;
 
-	if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
-		return -EFAULT;
-
 	if (crypto_authenc_extractkeys(&keys, key, keylen))
 		goto bad_key;
 
 	if (qat_alg_validate_key(keys.enckeylen, &alg))
 		goto bad_key;
 
-	if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
+	if (qat_alg_aead_init_enc_session(tfm, alg, &keys))
 		goto error;
 
-	if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
+	if (qat_alg_aead_init_dec_session(tfm, alg, &keys))
 		goto error;
 
 	return 0;
 bad_key:
-	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 	return -EINVAL;
 error:
 	return -EFAULT;
@@ -567,7 +555,6 @@
 	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct device *dev;
 
-	spin_lock(&ctx->lock);
 	if (ctx->enc_cd) {
 		/* rekeying */
 		dev = &GET_DEV(ctx->inst->accel_dev);
@@ -581,7 +568,6 @@
 		struct qat_crypto_instance *inst =
 				qat_crypto_get_instance_node(node);
 		if (!inst) {
-			spin_unlock(&ctx->lock);
 			return -EINVAL;
 		}
 
@@ -591,19 +577,16 @@
 						  &ctx->enc_cd_paddr,
 						  GFP_ATOMIC);
 		if (!ctx->enc_cd) {
-			spin_unlock(&ctx->lock);
 			return -ENOMEM;
 		}
 		ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
 						  &ctx->dec_cd_paddr,
 						  GFP_ATOMIC);
 		if (!ctx->dec_cd) {
-			spin_unlock(&ctx->lock);
 			goto out_free_enc;
 		}
 	}
-	spin_unlock(&ctx->lock);
-	if (qat_alg_aead_init_sessions(ctx, key, keylen))
+	if (qat_alg_aead_init_sessions(tfm, key, keylen))
 		goto out_free_all;
 
 	return 0;
@@ -654,22 +637,20 @@
 }
 
 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
-			       struct scatterlist *assoc, int assoclen,
 			       struct scatterlist *sgl,
-			       struct scatterlist *sglout, uint8_t *iv,
-			       uint8_t ivlen,
+			       struct scatterlist *sglout,
 			       struct qat_crypto_request *qat_req)
 {
 	struct device *dev = &GET_DEV(inst->accel_dev);
-	int i, bufs = 0, sg_nctr = 0;
-	int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
+	int i, sg_nctr = 0;
+	int n = sg_nents(sgl);
 	struct qat_alg_buf_list *bufl;
 	struct qat_alg_buf_list *buflout = NULL;
 	dma_addr_t blp;
 	dma_addr_t bloutp = 0;
 	struct scatterlist *sg;
 	size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
-			((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+			((1 + n) * sizeof(struct qat_alg_buf));
 
 	if (unlikely(!n))
 		return -EINVAL;
@@ -683,35 +664,8 @@
 	if (unlikely(dma_mapping_error(dev, blp)))
 		goto err;
 
-	for_each_sg(assoc, sg, assoc_n, i) {
-		if (!sg->length)
-			continue;
-
-		if (!(assoclen > 0))
-			break;
-
-		bufl->bufers[bufs].addr =
-			dma_map_single(dev, sg_virt(sg),
-				       min_t(int, assoclen, sg->length),
-				       DMA_BIDIRECTIONAL);
-		bufl->bufers[bufs].len = min_t(int, assoclen, sg->length);
-		if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
-			goto err;
-		bufs++;
-		assoclen -= sg->length;
-	}
-
-	if (ivlen) {
-		bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
-							 DMA_BIDIRECTIONAL);
-		bufl->bufers[bufs].len = ivlen;
-		if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
-			goto err;
-		bufs++;
-	}
-
 	for_each_sg(sgl, sg, n, i) {
-		int y = sg_nctr + bufs;
+		int y = sg_nctr;
 
 		if (!sg->length)
 			continue;
@@ -724,7 +678,7 @@
 			goto err;
 		sg_nctr++;
 	}
-	bufl->num_bufs = sg_nctr + bufs;
+	bufl->num_bufs = sg_nctr;
 	qat_req->buf.bl = bufl;
 	qat_req->buf.blp = blp;
 	qat_req->buf.sz = sz;
@@ -734,7 +688,7 @@
 
 		n = sg_nents(sglout);
 		sz_out = sizeof(struct qat_alg_buf_list) +
-			((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+			((1 + n) * sizeof(struct qat_alg_buf));
 		sg_nctr = 0;
 		buflout = kzalloc_node(sz_out, GFP_ATOMIC,
 				       dev_to_node(&GET_DEV(inst->accel_dev)));
@@ -744,14 +698,8 @@
 		if (unlikely(dma_mapping_error(dev, bloutp)))
 			goto err;
 		bufers = buflout->bufers;
-		/* For out of place operation dma map only data and
-		 * reuse assoc mapping and iv */
-		for (i = 0; i < bufs; i++) {
-			bufers[i].len = bufl->bufers[i].len;
-			bufers[i].addr = bufl->bufers[i].addr;
-		}
 		for_each_sg(sglout, sg, n, i) {
-			int y = sg_nctr + bufs;
+			int y = sg_nctr;
 
 			if (!sg->length)
 				continue;
@@ -764,7 +712,7 @@
 			bufers[y].len = sg->length;
 			sg_nctr++;
 		}
-		buflout->num_bufs = sg_nctr + bufs;
+		buflout->num_bufs = sg_nctr;
 		buflout->num_mapped_bufs = sg_nctr;
 		qat_req->buf.blout = buflout;
 		qat_req->buf.bloutp = bloutp;
@@ -778,7 +726,7 @@
 err:
 	dev_err(dev, "Failed to map buf for dma\n");
 	sg_nctr = 0;
-	for (i = 0; i < n + bufs; i++)
+	for (i = 0; i < n; i++)
 		if (!dma_mapping_error(dev, bufl->bufers[i].addr))
 			dma_unmap_single(dev, bufl->bufers[i].addr,
 					 bufl->bufers[i].len,
@@ -789,7 +737,7 @@
 	kfree(bufl);
 	if (sgl != sglout && buflout) {
 		n = sg_nents(sglout);
-		for (i = bufs; i < n + bufs; i++)
+		for (i = 0; i < n; i++)
 			if (!dma_mapping_error(dev, buflout->bufers[i].addr))
 				dma_unmap_single(dev, buflout->bufers[i].addr,
 						 buflout->bufers[i].len,
@@ -849,12 +797,10 @@
 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
 	struct icp_qat_fw_la_auth_req_params *auth_param;
 	struct icp_qat_fw_la_bulk_req *msg;
-	int digst_size = crypto_aead_crt(aead_tfm)->authsize;
+	int digst_size = crypto_aead_authsize(aead_tfm);
 	int ret, ctr = 0;
 
-	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
-				  areq->src, areq->dst, areq->iv,
-				  AES_BLOCK_SIZE, qat_req);
+	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
 	if (unlikely(ret))
 		return ret;
 
@@ -868,12 +814,11 @@
 	qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
 	cipher_param->cipher_length = areq->cryptlen - digst_size;
-	cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+	cipher_param->cipher_offset = areq->assoclen;
 	memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
 	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
 	auth_param->auth_off = 0;
-	auth_param->auth_len = areq->assoclen +
-				cipher_param->cipher_length + AES_BLOCK_SIZE;
+	auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
 	do {
 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
 	} while (ret == -EAGAIN && ctr++ < 10);
@@ -885,8 +830,7 @@
 	return -EINPROGRESS;
 }
 
-static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
-				     int enc_iv)
+static int qat_alg_aead_enc(struct aead_request *areq)
 {
 	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
 	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
@@ -895,11 +839,10 @@
 	struct icp_qat_fw_la_cipher_req_params *cipher_param;
 	struct icp_qat_fw_la_auth_req_params *auth_param;
 	struct icp_qat_fw_la_bulk_req *msg;
+	uint8_t *iv = areq->iv;
 	int ret, ctr = 0;
 
-	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
-				  areq->src, areq->dst, iv, AES_BLOCK_SIZE,
-				  qat_req);
+	ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
 	if (unlikely(ret))
 		return ret;
 
@@ -914,16 +857,12 @@
 	cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
 	auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
 
-	if (enc_iv) {
-		cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
-		cipher_param->cipher_offset = areq->assoclen;
-	} else {
-		memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
-		cipher_param->cipher_length = areq->cryptlen;
-		cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
-	}
+	memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+	cipher_param->cipher_length = areq->cryptlen;
+	cipher_param->cipher_offset = areq->assoclen;
+
 	auth_param->auth_off = 0;
-	auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
+	auth_param->auth_len = areq->assoclen + areq->cryptlen;
 
 	do {
 		ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
@@ -936,25 +875,6 @@
 	return -EINPROGRESS;
 }
 
-static int qat_alg_aead_enc(struct aead_request *areq)
-{
-	return qat_alg_aead_enc_internal(areq, areq->iv, 0);
-}
-
-static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
-{
-	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
-	struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
-	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
-	__be64 seq;
-
-	memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
-	seq = cpu_to_be64(req->seq);
-	memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
-	       &seq, sizeof(uint64_t));
-	return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
-}
-
 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
 				     const uint8_t *key,
 				     unsigned int keylen)
@@ -1026,8 +946,7 @@
 	struct icp_qat_fw_la_bulk_req *msg;
 	int ret, ctr = 0;
 
-	ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
-				  NULL, 0, qat_req);
+	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
 	if (unlikely(ret))
 		return ret;
 
@@ -1064,8 +983,7 @@
 	struct icp_qat_fw_la_bulk_req *msg;
 	int ret, ctr = 0;
 
-	ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
-				  NULL, 0, qat_req);
+	ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
 	if (unlikely(ret))
 		return ret;
 
@@ -1092,47 +1010,43 @@
 	return -EINPROGRESS;
 }
 
-static int qat_alg_aead_init(struct crypto_tfm *tfm,
+static int qat_alg_aead_init(struct crypto_aead *tfm,
 			     enum icp_qat_hw_auth_algo hash,
 			     const char *hash_name)
 {
-	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 
 	ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
 	if (IS_ERR(ctx->hash_tfm))
-		return -EFAULT;
-	spin_lock_init(&ctx->lock);
+		return PTR_ERR(ctx->hash_tfm);
 	ctx->qat_hash_alg = hash;
-	crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
-		sizeof(struct aead_request) +
-		sizeof(struct qat_crypto_request));
-	ctx->tfm = tfm;
+	crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
+				     sizeof(struct qat_crypto_request));
 	return 0;
 }
 
-static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
 {
 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
 }
 
-static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
 {
 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
 }
 
-static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
 {
 	return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
 }
 
-static void qat_alg_aead_exit(struct crypto_tfm *tfm)
+static void qat_alg_aead_exit(struct crypto_aead *tfm)
 {
-	struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
 	struct qat_crypto_instance *inst = ctx->inst;
 	struct device *dev;
 
-	if (!IS_ERR(ctx->hash_tfm))
-		crypto_free_shash(ctx->hash_tfm);
+	crypto_free_shash(ctx->hash_tfm);
 
 	if (!inst)
 		return;
@@ -1189,73 +1103,61 @@
 	qat_crypto_put_instance(inst);
 }
 
+
+static struct aead_alg qat_aeads[] = { {
+	.base = {
+		.cra_name = "authenc(hmac(sha1),cbc(aes))",
+		.cra_driver_name = "qat_aes_cbc_hmac_sha1",
+		.cra_priority = 4001,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+		.cra_module = THIS_MODULE,
+	},
+	.init = qat_alg_aead_sha1_init,
+	.exit = qat_alg_aead_exit,
+	.setkey = qat_alg_aead_setkey,
+	.decrypt = qat_alg_aead_dec,
+	.encrypt = qat_alg_aead_enc,
+	.ivsize = AES_BLOCK_SIZE,
+	.maxauthsize = SHA1_DIGEST_SIZE,
+}, {
+	.base = {
+		.cra_name = "authenc(hmac(sha256),cbc(aes))",
+		.cra_driver_name = "qat_aes_cbc_hmac_sha256",
+		.cra_priority = 4001,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+		.cra_module = THIS_MODULE,
+	},
+	.init = qat_alg_aead_sha256_init,
+	.exit = qat_alg_aead_exit,
+	.setkey = qat_alg_aead_setkey,
+	.decrypt = qat_alg_aead_dec,
+	.encrypt = qat_alg_aead_enc,
+	.ivsize = AES_BLOCK_SIZE,
+	.maxauthsize = SHA256_DIGEST_SIZE,
+}, {
+	.base = {
+		.cra_name = "authenc(hmac(sha512),cbc(aes))",
+		.cra_driver_name = "qat_aes_cbc_hmac_sha512",
+		.cra_priority = 4001,
+		.cra_flags = CRYPTO_ALG_ASYNC,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
+		.cra_module = THIS_MODULE,
+	},
+	.init = qat_alg_aead_sha512_init,
+	.exit = qat_alg_aead_exit,
+	.setkey = qat_alg_aead_setkey,
+	.decrypt = qat_alg_aead_dec,
+	.encrypt = qat_alg_aead_enc,
+	.ivsize = AES_BLOCK_SIZE,
+	.maxauthsize = SHA512_DIGEST_SIZE,
+} };
+
 static struct crypto_alg qat_algs[] = { {
-	.cra_name = "authenc(hmac(sha1),cbc(aes))",
-	.cra_driver_name = "qat_aes_cbc_hmac_sha1",
-	.cra_priority = 4001,
-	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-	.cra_blocksize = AES_BLOCK_SIZE,
-	.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
-	.cra_alignmask = 0,
-	.cra_type = &crypto_aead_type,
-	.cra_module = THIS_MODULE,
-	.cra_init = qat_alg_aead_sha1_init,
-	.cra_exit = qat_alg_aead_exit,
-	.cra_u = {
-		.aead = {
-			.setkey = qat_alg_aead_setkey,
-			.decrypt = qat_alg_aead_dec,
-			.encrypt = qat_alg_aead_enc,
-			.givencrypt = qat_alg_aead_genivenc,
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = SHA1_DIGEST_SIZE,
-		},
-	},
-}, {
-	.cra_name = "authenc(hmac(sha256),cbc(aes))",
-	.cra_driver_name = "qat_aes_cbc_hmac_sha256",
-	.cra_priority = 4001,
-	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-	.cra_blocksize = AES_BLOCK_SIZE,
-	.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
-	.cra_alignmask = 0,
-	.cra_type = &crypto_aead_type,
-	.cra_module = THIS_MODULE,
-	.cra_init = qat_alg_aead_sha256_init,
-	.cra_exit = qat_alg_aead_exit,
-	.cra_u = {
-		.aead = {
-			.setkey = qat_alg_aead_setkey,
-			.decrypt = qat_alg_aead_dec,
-			.encrypt = qat_alg_aead_enc,
-			.givencrypt = qat_alg_aead_genivenc,
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = SHA256_DIGEST_SIZE,
-		},
-	},
-}, {
-	.cra_name = "authenc(hmac(sha512),cbc(aes))",
-	.cra_driver_name = "qat_aes_cbc_hmac_sha512",
-	.cra_priority = 4001,
-	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-	.cra_blocksize = AES_BLOCK_SIZE,
-	.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
-	.cra_alignmask = 0,
-	.cra_type = &crypto_aead_type,
-	.cra_module = THIS_MODULE,
-	.cra_init = qat_alg_aead_sha512_init,
-	.cra_exit = qat_alg_aead_exit,
-	.cra_u = {
-		.aead = {
-			.setkey = qat_alg_aead_setkey,
-			.decrypt = qat_alg_aead_dec,
-			.encrypt = qat_alg_aead_enc,
-			.givencrypt = qat_alg_aead_genivenc,
-			.ivsize = AES_BLOCK_SIZE,
-			.maxauthsize = SHA512_DIGEST_SIZE,
-		},
-	},
-}, {
 	.cra_name = "cbc(aes)",
 	.cra_driver_name = "qat_aes_cbc",
 	.cra_priority = 4001,
@@ -1281,42 +1183,54 @@
 
 int qat_algs_register(void)
 {
-	int ret = 0;
+	int ret = 0, i;
 
 	mutex_lock(&algs_lock);
-	if (++active_devs == 1) {
-		int i;
+	if (++active_devs != 1)
+		goto unlock;
 
-		for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
-			qat_algs[i].cra_flags =
-				(qat_algs[i].cra_type == &crypto_aead_type) ?
-				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
-				CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+	for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+		qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
 
-		ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
-	}
+	ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+	if (ret)
+		goto unlock;
+
+	for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
+		qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC;
+
+	ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+	if (ret)
+		goto unreg_algs;
+
+unlock:
 	mutex_unlock(&algs_lock);
 	return ret;
+
+unreg_algs:
+	crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+	goto unlock;
 }
 
 int qat_algs_unregister(void)
 {
-	int ret = 0;
-
 	mutex_lock(&algs_lock);
-	if (--active_devs == 0)
-		ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+	if (--active_devs != 0)
+		goto unlock;
+
+	crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
+	crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+
+unlock:
 	mutex_unlock(&algs_lock);
-	return ret;
+	return 0;
 }
 
 int qat_algs_init(void)
 {
-	crypto_get_default_rng();
 	return 0;
 }
 
 void qat_algs_exit(void)
 {
-	crypto_put_default_rng();
 }
diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c
new file mode 100644
index 0000000..e87f510
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c
@@ -0,0 +1,652 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+	* Redistributions of source code must retain the above copyright
+	  notice, this list of conditions and the following disclaimer.
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in
+	  the documentation and/or other materials provided with the
+	  distribution.
+	* Neither the name of Intel Corporation nor the names of its
+	  contributors may be used to endorse or promote products derived
+	  from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <linux/module.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/akcipher.h>
+#include <linux/dma-mapping.h>
+#include <linux/fips.h>
+#include "qat_rsakey-asn1.h"
+#include "icp_qat_fw_pke.h"
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
+
+struct qat_rsa_input_params {
+	union {
+		struct {
+			dma_addr_t m;
+			dma_addr_t e;
+			dma_addr_t n;
+		} enc;
+		struct {
+			dma_addr_t c;
+			dma_addr_t d;
+			dma_addr_t n;
+		} dec;
+		u64 in_tab[8];
+	};
+} __packed __aligned(64);
+
+struct qat_rsa_output_params {
+	union {
+		struct {
+			dma_addr_t c;
+		} enc;
+		struct {
+			dma_addr_t m;
+		} dec;
+		u64 out_tab[8];
+	};
+} __packed __aligned(64);
+
+struct qat_rsa_ctx {
+	char *n;
+	char *e;
+	char *d;
+	dma_addr_t dma_n;
+	dma_addr_t dma_e;
+	dma_addr_t dma_d;
+	unsigned int key_sz;
+	struct qat_crypto_instance *inst;
+} __packed __aligned(64);
+
+struct qat_rsa_request {
+	struct qat_rsa_input_params in;
+	struct qat_rsa_output_params out;
+	dma_addr_t phy_in;
+	dma_addr_t phy_out;
+	char *src_align;
+	struct icp_qat_fw_pke_request req;
+	struct qat_rsa_ctx *ctx;
+	int err;
+} __aligned(64);
+
+static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
+{
+	struct akcipher_request *areq = (void *)(__force long)resp->opaque;
+	struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64);
+	struct device *dev = &GET_DEV(req->ctx->inst->accel_dev);
+	int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
+				resp->pke_resp_hdr.comn_resp_flags);
+	char *ptr = areq->dst;
+
+	err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
+
+	if (req->src_align)
+		dma_free_coherent(dev, req->ctx->key_sz, req->src_align,
+				  req->in.enc.m);
+	else
+		dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz,
+				 DMA_TO_DEVICE);
+
+	dma_unmap_single(dev, req->out.enc.c, req->ctx->key_sz,
+			 DMA_FROM_DEVICE);
+	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
+			 DMA_TO_DEVICE);
+	dma_unmap_single(dev, req->phy_out,
+			 sizeof(struct qat_rsa_output_params),
+			 DMA_TO_DEVICE);
+
+	areq->dst_len = req->ctx->key_sz;
+	/* Need to set the corect length of the output */
+	while (!(*ptr) && areq->dst_len) {
+		areq->dst_len--;
+		ptr++;
+	}
+
+	if (areq->dst_len != req->ctx->key_sz)
+		memmove(areq->dst, ptr, areq->dst_len);
+
+	akcipher_request_complete(areq, err);
+}
+
+void qat_alg_asym_callback(void *_resp)
+{
+	struct icp_qat_fw_pke_resp *resp = _resp;
+
+	qat_rsa_cb(resp);
+}
+
+#define PKE_RSA_EP_512 0x1c161b21
+#define PKE_RSA_EP_1024 0x35111bf7
+#define PKE_RSA_EP_1536 0x4d111cdc
+#define PKE_RSA_EP_2048 0x6e111dba
+#define PKE_RSA_EP_3072 0x7d111ea3
+#define PKE_RSA_EP_4096 0xa5101f7e
+
+static unsigned long qat_rsa_enc_fn_id(unsigned int len)
+{
+	unsigned int bitslen = len << 3;
+
+	switch (bitslen) {
+	case 512:
+		return PKE_RSA_EP_512;
+	case 1024:
+		return PKE_RSA_EP_1024;
+	case 1536:
+		return PKE_RSA_EP_1536;
+	case 2048:
+		return PKE_RSA_EP_2048;
+	case 3072:
+		return PKE_RSA_EP_3072;
+	case 4096:
+		return PKE_RSA_EP_4096;
+	default:
+		return 0;
+	};
+}
+
+#define PKE_RSA_DP1_512 0x1c161b3c
+#define PKE_RSA_DP1_1024 0x35111c12
+#define PKE_RSA_DP1_1536 0x4d111cf7
+#define PKE_RSA_DP1_2048 0x6e111dda
+#define PKE_RSA_DP1_3072 0x7d111ebe
+#define PKE_RSA_DP1_4096 0xa5101f98
+
+static unsigned long qat_rsa_dec_fn_id(unsigned int len)
+{
+	unsigned int bitslen = len << 3;
+
+	switch (bitslen) {
+	case 512:
+		return PKE_RSA_DP1_512;
+	case 1024:
+		return PKE_RSA_DP1_1024;
+	case 1536:
+		return PKE_RSA_DP1_1536;
+	case 2048:
+		return PKE_RSA_DP1_2048;
+	case 3072:
+		return PKE_RSA_DP1_3072;
+	case 4096:
+		return PKE_RSA_DP1_4096;
+	default:
+		return 0;
+	};
+}
+
+static int qat_rsa_enc(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	struct qat_rsa_request *qat_req =
+			PTR_ALIGN(akcipher_request_ctx(req), 64);
+	struct icp_qat_fw_pke_request *msg = &qat_req->req;
+	int ret, ctr = 0;
+
+	if (unlikely(!ctx->n || !ctx->e))
+		return -EINVAL;
+
+	if (req->dst_len < ctx->key_sz) {
+		req->dst_len = ctx->key_sz;
+		return -EOVERFLOW;
+	}
+	memset(msg, '\0', sizeof(*msg));
+	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
+	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+		return -EINVAL;
+
+	qat_req->ctx = ctx;
+	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	msg->pke_hdr.comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+	qat_req->in.enc.e = ctx->dma_e;
+	qat_req->in.enc.n = ctx->dma_n;
+	ret = -ENOMEM;
+
+	/*
+	 * src can be of any size in valid range, but HW expects it to be the
+	 * same as modulo n so in case it is different we need to allocate a
+	 * new buf and copy src data.
+	 * In other case we just need to map the user provided buffer.
+	 */
+	if (req->src_len < ctx->key_sz) {
+		int shift = ctx->key_sz - req->src_len;
+
+		qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+							 &qat_req->in.enc.m,
+							 GFP_KERNEL);
+		if (unlikely(!qat_req->src_align))
+			return ret;
+
+		memcpy(qat_req->src_align + shift, req->src, req->src_len);
+	} else {
+		qat_req->src_align = NULL;
+		qat_req->in.enc.m = dma_map_single(dev, req->src, req->src_len,
+					   DMA_TO_DEVICE);
+	}
+	qat_req->in.in_tab[3] = 0;
+	qat_req->out.enc.c = dma_map_single(dev, req->dst, req->dst_len,
+					    DMA_FROM_DEVICE);
+	qat_req->out.out_tab[1] = 0;
+	qat_req->phy_in = dma_map_single(dev, &qat_req->in.enc.m,
+					 sizeof(struct qat_rsa_input_params),
+					 DMA_TO_DEVICE);
+	qat_req->phy_out = dma_map_single(dev, &qat_req->out.enc.c,
+					  sizeof(struct qat_rsa_output_params),
+					    DMA_TO_DEVICE);
+
+	if (unlikely((!qat_req->src_align &&
+		      dma_mapping_error(dev, qat_req->in.enc.m)) ||
+		     dma_mapping_error(dev, qat_req->out.enc.c) ||
+		     dma_mapping_error(dev, qat_req->phy_in) ||
+		     dma_mapping_error(dev, qat_req->phy_out)))
+		goto unmap;
+
+	msg->pke_mid.src_data_addr = qat_req->phy_in;
+	msg->pke_mid.dest_data_addr = qat_req->phy_out;
+	msg->pke_mid.opaque = (uint64_t)(__force long)req;
+	msg->input_param_count = 3;
+	msg->output_param_count = 1;
+	do {
+		ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+	} while (ret == -EBUSY && ctr++ < 100);
+
+	if (!ret)
+		return -EINPROGRESS;
+unmap:
+	if (qat_req->src_align)
+		dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+				  qat_req->in.enc.m);
+	else
+		if (!dma_mapping_error(dev, qat_req->in.enc.m))
+			dma_unmap_single(dev, qat_req->in.enc.m, ctx->key_sz,
+					 DMA_TO_DEVICE);
+	if (!dma_mapping_error(dev, qat_req->out.enc.c))
+		dma_unmap_single(dev, qat_req->out.enc.c, ctx->key_sz,
+				 DMA_FROM_DEVICE);
+	if (!dma_mapping_error(dev, qat_req->phy_in))
+		dma_unmap_single(dev, qat_req->phy_in,
+				 sizeof(struct qat_rsa_input_params),
+				 DMA_TO_DEVICE);
+	if (!dma_mapping_error(dev, qat_req->phy_out))
+		dma_unmap_single(dev, qat_req->phy_out,
+				 sizeof(struct qat_rsa_output_params),
+				 DMA_TO_DEVICE);
+	return ret;
+}
+
+static int qat_rsa_dec(struct akcipher_request *req)
+{
+	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	struct qat_rsa_request *qat_req =
+			PTR_ALIGN(akcipher_request_ctx(req), 64);
+	struct icp_qat_fw_pke_request *msg = &qat_req->req;
+	int ret, ctr = 0;
+
+	if (unlikely(!ctx->n || !ctx->d))
+		return -EINVAL;
+
+	if (req->dst_len < ctx->key_sz) {
+		req->dst_len = ctx->key_sz;
+		return -EOVERFLOW;
+	}
+	memset(msg, '\0', sizeof(*msg));
+	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
+					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
+	msg->pke_hdr.cd_pars.func_id = qat_rsa_dec_fn_id(ctx->key_sz);
+	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
+		return -EINVAL;
+
+	qat_req->ctx = ctx;
+	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
+	msg->pke_hdr.comn_req_flags =
+		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
+					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
+
+	qat_req->in.dec.d = ctx->dma_d;
+	qat_req->in.dec.n = ctx->dma_n;
+	ret = -ENOMEM;
+
+	/*
+	 * src can be of any size in valid range, but HW expects it to be the
+	 * same as modulo n so in case it is different we need to allocate a
+	 * new buf and copy src data.
+	 * In other case we just need to map the user provided buffer.
+	 */
+	if (req->src_len < ctx->key_sz) {
+		int shift = ctx->key_sz - req->src_len;
+
+		qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
+							 &qat_req->in.dec.c,
+							 GFP_KERNEL);
+		if (unlikely(!qat_req->src_align))
+			return ret;
+
+		memcpy(qat_req->src_align + shift, req->src, req->src_len);
+	} else {
+		qat_req->src_align = NULL;
+		qat_req->in.dec.c = dma_map_single(dev, req->src, req->src_len,
+						   DMA_TO_DEVICE);
+	}
+	qat_req->in.in_tab[3] = 0;
+	qat_req->out.dec.m = dma_map_single(dev, req->dst, req->dst_len,
+					    DMA_FROM_DEVICE);
+	qat_req->out.out_tab[1] = 0;
+	qat_req->phy_in = dma_map_single(dev, &qat_req->in.dec.c,
+					 sizeof(struct qat_rsa_input_params),
+					 DMA_TO_DEVICE);
+	qat_req->phy_out = dma_map_single(dev, &qat_req->out.dec.m,
+					  sizeof(struct qat_rsa_output_params),
+					    DMA_TO_DEVICE);
+
+	if (unlikely((!qat_req->src_align &&
+		      dma_mapping_error(dev, qat_req->in.dec.c)) ||
+		     dma_mapping_error(dev, qat_req->out.dec.m) ||
+		     dma_mapping_error(dev, qat_req->phy_in) ||
+		     dma_mapping_error(dev, qat_req->phy_out)))
+		goto unmap;
+
+	msg->pke_mid.src_data_addr = qat_req->phy_in;
+	msg->pke_mid.dest_data_addr = qat_req->phy_out;
+	msg->pke_mid.opaque = (uint64_t)(__force long)req;
+	msg->input_param_count = 3;
+	msg->output_param_count = 1;
+	do {
+		ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
+	} while (ret == -EBUSY && ctr++ < 100);
+
+	if (!ret)
+		return -EINPROGRESS;
+unmap:
+	if (qat_req->src_align)
+		dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
+				  qat_req->in.dec.c);
+	else
+		if (!dma_mapping_error(dev, qat_req->in.dec.c))
+			dma_unmap_single(dev, qat_req->in.dec.c, ctx->key_sz,
+					 DMA_TO_DEVICE);
+	if (!dma_mapping_error(dev, qat_req->out.dec.m))
+		dma_unmap_single(dev, qat_req->out.dec.m, ctx->key_sz,
+				 DMA_FROM_DEVICE);
+	if (!dma_mapping_error(dev, qat_req->phy_in))
+		dma_unmap_single(dev, qat_req->phy_in,
+				 sizeof(struct qat_rsa_input_params),
+				 DMA_TO_DEVICE);
+	if (!dma_mapping_error(dev, qat_req->phy_out))
+		dma_unmap_single(dev, qat_req->phy_out,
+				 sizeof(struct qat_rsa_output_params),
+				 DMA_TO_DEVICE);
+	return ret;
+}
+
+int qat_rsa_get_n(void *context, size_t hdrlen, unsigned char tag,
+		  const void *value, size_t vlen)
+{
+	struct qat_rsa_ctx *ctx = context;
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	const char *ptr = value;
+	int ret;
+
+	while (!*ptr && vlen) {
+		ptr++;
+		vlen--;
+	}
+
+	ctx->key_sz = vlen;
+	ret = -EINVAL;
+	/* In FIPS mode only allow key size 2K & 3K */
+	if (fips_enabled && (ctx->key_sz != 256 && ctx->key_sz != 384)) {
+		pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
+		goto err;
+	}
+	/* invalid key size provided */
+	if (!qat_rsa_enc_fn_id(ctx->key_sz))
+		goto err;
+
+	ret = -ENOMEM;
+	ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
+	if (!ctx->n)
+		goto err;
+
+	memcpy(ctx->n, ptr, ctx->key_sz);
+	return 0;
+err:
+	ctx->key_sz = 0;
+	ctx->n = NULL;
+	return ret;
+}
+
+int qat_rsa_get_e(void *context, size_t hdrlen, unsigned char tag,
+		  const void *value, size_t vlen)
+{
+	struct qat_rsa_ctx *ctx = context;
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	const char *ptr = value;
+
+	while (!*ptr && vlen) {
+		ptr++;
+		vlen--;
+	}
+
+	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
+		ctx->e = NULL;
+		return -EINVAL;
+	}
+
+	ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
+	if (!ctx->e) {
+		ctx->e = NULL;
+		return -ENOMEM;
+	}
+	memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
+	return 0;
+}
+
+int qat_rsa_get_d(void *context, size_t hdrlen, unsigned char tag,
+		  const void *value, size_t vlen)
+{
+	struct qat_rsa_ctx *ctx = context;
+	struct qat_crypto_instance *inst = ctx->inst;
+	struct device *dev = &GET_DEV(inst->accel_dev);
+	const char *ptr = value;
+	int ret;
+
+	while (!*ptr && vlen) {
+		ptr++;
+		vlen--;
+	}
+
+	ret = -EINVAL;
+	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
+		goto err;
+
+	/* In FIPS mode only allow key size 2K & 3K */
+	if (fips_enabled && (vlen != 256 && vlen != 384)) {
+		pr_err("QAT: RSA: key size not allowed in FIPS mode\n");
+		goto err;
+	}
+
+	ret = -ENOMEM;
+	ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
+	if (!ctx->n)
+		goto err;
+
+	memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
+	return 0;
+err:
+	ctx->d = NULL;
+	return ret;
+}
+
+static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+			  unsigned int keylen)
+{
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+	int ret;
+
+	/* Free the old key if any */
+	if (ctx->n)
+		dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+	if (ctx->e)
+		dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+	if (ctx->d) {
+		memset(ctx->d, '\0', ctx->key_sz);
+		dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+	}
+
+	ctx->n = NULL;
+	ctx->e = NULL;
+	ctx->d = NULL;
+	ret = asn1_ber_decoder(&qat_rsakey_decoder, ctx, key, keylen);
+	if (ret < 0)
+		goto free;
+
+	if (!ctx->n || !ctx->e) {
+		/* invalid key provided */
+		ret = -EINVAL;
+		goto free;
+	}
+
+	return 0;
+free:
+	if (ctx->d) {
+		memset(ctx->d, '\0', ctx->key_sz);
+		dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+		ctx->d = NULL;
+	}
+	if (ctx->e) {
+		dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+		ctx->e = NULL;
+	}
+	if (ctx->n) {
+		dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+		ctx->n = NULL;
+		ctx->key_sz = 0;
+	}
+	return ret;
+}
+
+static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct qat_crypto_instance *inst =
+			qat_crypto_get_instance_node(get_current_node());
+
+	if (!inst)
+		return -EINVAL;
+
+	ctx->key_sz = 0;
+	ctx->inst = inst;
+	return 0;
+}
+
+static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
+	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
+
+	if (ctx->n)
+		dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
+	if (ctx->e)
+		dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
+	if (ctx->d) {
+		memset(ctx->d, '\0', ctx->key_sz);
+		dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
+	}
+	qat_crypto_put_instance(ctx->inst);
+	ctx->n = NULL;
+	ctx->d = NULL;
+	ctx->d = NULL;
+}
+
+static struct akcipher_alg rsa = {
+	.encrypt = qat_rsa_enc,
+	.decrypt = qat_rsa_dec,
+	.sign = qat_rsa_dec,
+	.verify = qat_rsa_enc,
+	.setkey = qat_rsa_setkey,
+	.init = qat_rsa_init_tfm,
+	.exit = qat_rsa_exit_tfm,
+	.reqsize = sizeof(struct qat_rsa_request) + 64,
+	.base = {
+		.cra_name = "rsa",
+		.cra_driver_name = "qat-rsa",
+		.cra_priority = 1000,
+		.cra_module = THIS_MODULE,
+		.cra_ctxsize = sizeof(struct qat_rsa_ctx),
+	},
+};
+
+int qat_asym_algs_register(void)
+{
+	int ret = 0;
+
+	mutex_lock(&algs_lock);
+	if (++active_devs == 1) {
+		rsa.base.cra_flags = 0;
+		ret = crypto_register_akcipher(&rsa);
+	}
+	mutex_unlock(&algs_lock);
+	return ret;
+}
+
+void qat_asym_algs_unregister(void)
+{
+	mutex_lock(&algs_lock);
+	if (--active_devs == 0)
+		crypto_unregister_akcipher(&rsa);
+	mutex_unlock(&algs_lock);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 3bd705c..07c2f9f 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -88,12 +88,6 @@
 		if (inst->pke_rx)
 			adf_remove_ring(inst->pke_rx);
 
-		if (inst->rnd_tx)
-			adf_remove_ring(inst->rnd_tx);
-
-		if (inst->rnd_rx)
-			adf_remove_ring(inst->rnd_rx);
-
 		list_del(list_ptr);
 		kfree(inst);
 	}
@@ -109,9 +103,11 @@
 
 	list_for_each(itr, adf_devmgr_get_head()) {
 		accel_dev = list_entry(itr, struct adf_accel_dev, list);
+
 		if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
 		     dev_to_node(&GET_DEV(accel_dev)) < 0) &&
-		    adf_dev_started(accel_dev))
+		    adf_dev_started(accel_dev) &&
+		    !list_empty(&accel_dev->crypto_list))
 			break;
 		accel_dev = NULL;
 	}
@@ -158,7 +154,6 @@
 
 	INIT_LIST_HEAD(&accel_dev->crypto_list);
 	strlcpy(key, ADF_NUM_CY, sizeof(key));
-
 	if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
 		return -EFAULT;
 
@@ -187,7 +182,9 @@
 
 		if (kstrtoul(val, 10, &num_msg_sym))
 			goto err;
+
 		num_msg_sym = num_msg_sym >> 1;
+
 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
 		if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
 			goto err;
@@ -202,11 +199,6 @@
 				    msg_size, key, NULL, 0, &inst->sym_tx))
 			goto err;
 
-		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
-		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
-				    msg_size, key, NULL, 0, &inst->rnd_tx))
-			goto err;
-
 		msg_size = msg_size >> 1;
 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
 		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
@@ -220,15 +212,9 @@
 				    &inst->sym_rx))
 			goto err;
 
-		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
-		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
-				    msg_size, key, qat_alg_callback, 0,
-				    &inst->rnd_rx))
-			goto err;
-
 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
 		if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
-				    msg_size, key, qat_alg_callback, 0,
+				    msg_size, key, qat_alg_asym_callback, 0,
 				    &inst->pke_rx))
 			goto err;
 	}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index d503007..dc0273f 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -57,8 +57,6 @@
 	struct adf_etr_ring_data *sym_rx;
 	struct adf_etr_ring_data *pke_tx;
 	struct adf_etr_ring_data *pke_rx;
-	struct adf_etr_ring_data *rnd_tx;
-	struct adf_etr_ring_data *rnd_rx;
 	struct adf_accel_dev *accel_dev;
 	struct list_head list;
 	unsigned long state;
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 274ff7e..8e711d1 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -671,7 +671,6 @@
 #define ICP_DH895XCC_CAP_OFFSET     (ICP_DH895XCC_AE_OFFSET + 0x10000)
 #define LOCAL_TO_XFER_REG_OFFSET    0x800
 #define ICP_DH895XCC_EP_OFFSET      0x3a000
-#define ICP_DH895XCC_PMISC_BAR 1
 int qat_hal_init(struct adf_accel_dev *accel_dev)
 {
 	unsigned char ae;
@@ -679,21 +678,24 @@
 	struct icp_qat_fw_loader_handle *handle;
 	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-	struct adf_bar *bar =
+	struct adf_bar *misc_bar =
 			&pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
+	struct adf_bar *sram_bar =
+			&pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
 
 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
 	if (!handle)
 		return -ENOMEM;
 
-	handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr +
+	handle->hal_cap_g_ctl_csr_addr_v = misc_bar->virt_addr +
 						ICP_DH895XCC_CAP_OFFSET;
-	handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr +
+	handle->hal_cap_ae_xfer_csr_addr_v = misc_bar->virt_addr +
 						ICP_DH895XCC_AE_OFFSET;
-	handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET;
+	handle->hal_ep_csr_addr_v = misc_bar->virt_addr +
+				    ICP_DH895XCC_EP_OFFSET;
 	handle->hal_cap_ae_local_csr_addr_v =
 		handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
-
+	handle->hal_sram_addr_v = sram_bar->virt_addr;
 	handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
 	if (!handle->hal_handle)
 		goto out_hal_handle;
diff --git a/drivers/crypto/qat/qat_common/qat_rsakey.asn1 b/drivers/crypto/qat/qat_common/qat_rsakey.asn1
new file mode 100644
index 0000000..97b0e02
--- /dev/null
+++ b/drivers/crypto/qat/qat_common/qat_rsakey.asn1
@@ -0,0 +1,5 @@
+RsaKey ::= SEQUENCE {
+	n INTEGER ({ qat_rsa_get_n }),
+	e INTEGER ({ qat_rsa_get_e }),
+	d INTEGER ({ qat_rsa_get_d })
+}
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 1e27f9f..c48f181 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -359,28 +359,7 @@
 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
 				   struct icp_qat_uof_initmem *init_mem)
 {
-	unsigned int i;
-	struct icp_qat_uof_memvar_attr *mem_val_attr;
-
-	mem_val_attr =
-		(struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
-		sizeof(struct icp_qat_uof_initmem));
-
 	switch (init_mem->region) {
-	case ICP_QAT_UOF_SRAM_REGION:
-		if ((init_mem->addr + init_mem->num_in_bytes) >
-		    ICP_DH895XCC_PESRAM_BAR_SIZE) {
-			pr_err("QAT: initmem on SRAM is out of range");
-			return -EINVAL;
-		}
-		for (i = 0; i < init_mem->val_attr_num; i++) {
-			qat_uclo_wr_sram_by_words(handle,
-						  init_mem->addr +
-						  mem_val_attr->offset_in_byte,
-						  &mem_val_attr->value, 4);
-			mem_val_attr++;
-		}
-		break;
 	case ICP_QAT_UOF_LMEM_REGION:
 		if (qat_uclo_init_lmem_seg(handle, init_mem))
 			return -EINVAL;
@@ -990,6 +969,12 @@
 	return -EFAULT;
 }
 
+void qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
+			void *addr_ptr, int mem_size)
+{
+	qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, ALIGN(mem_size, 4));
+}
+
 int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
 			 void *addr_ptr, int mem_size)
 {
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile
index 25171c5..8c79c54 100644
--- a/drivers/crypto/qat/qat_dh895xcc/Makefile
+++ b/drivers/crypto/qat/qat_dh895xcc/Makefile
@@ -2,7 +2,4 @@
 obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
 qat_dh895xcc-objs := adf_drv.o \
 		adf_isr.o \
-		adf_dh895xcc_hw_data.o \
-		adf_hw_arbiter.o \
-		qat_admin.o \
-		adf_admin.o
+		adf_dh895xcc_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
deleted file mode 100644
index e466606..0000000
--- a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
-  This file is provided under a dual BSD/GPLv2 license.  When using or
-  redistributing this file, you may do so under either license.
-
-  GPL LICENSE SUMMARY
-  Copyright(c) 2014 Intel Corporation.
-  This program is free software; you can redistribute it and/or modify
-  it under the terms of version 2 of the GNU General Public License as
-  published by the Free Software Foundation.
-
-  This program is distributed in the hope that it will be useful, but
-  WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-  General Public License for more details.
-
-  Contact Information:
-  qat-linux@intel.com
-
-  BSD LICENSE
-  Copyright(c) 2014 Intel Corporation.
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions
-  are met:
-
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in
-      the documentation and/or other materials provided with the
-      distribution.
-    * Neither the name of Intel Corporation nor the names of its
-      contributors may be used to endorse or promote products derived
-      from this software without specific prior written permission.
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-#include <linux/types.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <adf_accel_devices.h>
-#include "adf_drv.h"
-#include "adf_dh895xcc_hw_data.h"
-
-#define ADF_ADMINMSG_LEN 32
-
-struct adf_admin_comms {
-	dma_addr_t phy_addr;
-	void *virt_addr;
-	void __iomem *mailbox_addr;
-	struct mutex lock;	/* protects adf_admin_comms struct */
-};
-
-int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
-			   uint32_t ae, void *in, void *out)
-{
-	struct adf_admin_comms *admin = accel_dev->admin;
-	int offset = ae * ADF_ADMINMSG_LEN * 2;
-	void __iomem *mailbox = admin->mailbox_addr;
-	int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
-	int times, received;
-
-	mutex_lock(&admin->lock);
-
-	if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
-		mutex_unlock(&admin->lock);
-		return -EAGAIN;
-	}
-
-	memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
-	ADF_CSR_WR(mailbox, mb_offset, 1);
-	received = 0;
-	for (times = 0; times < 50; times++) {
-		msleep(20);
-		if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
-			received = 1;
-			break;
-		}
-	}
-	if (received)
-		memcpy(out, admin->virt_addr + offset +
-		       ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
-	else
-		dev_err(&GET_DEV(accel_dev),
-			"Failed to send admin msg to accelerator\n");
-
-	mutex_unlock(&admin->lock);
-	return received ? 0 : -EFAULT;
-}
-
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
-{
-	struct adf_admin_comms *admin;
-	struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
-	void __iomem *csr = pmisc->virt_addr;
-	void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
-	uint64_t reg_val;
-
-	admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
-			     dev_to_node(&GET_DEV(accel_dev)));
-	if (!admin)
-		return -ENOMEM;
-	admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
-					       &admin->phy_addr, GFP_KERNEL);
-	if (!admin->virt_addr) {
-		dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
-		kfree(admin);
-		return -ENOMEM;
-	}
-	reg_val = (uint64_t)admin->phy_addr;
-	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
-	ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
-	mutex_init(&admin->lock);
-	admin->mailbox_addr = mailbox;
-	accel_dev->admin = admin;
-	return 0;
-}
-
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
-{
-	struct adf_admin_comms *admin = accel_dev->admin;
-
-	if (!admin)
-		return;
-
-	if (admin->virt_addr)
-		dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
-				  admin->virt_addr, admin->phy_addr);
-
-	mutex_destroy(&admin->lock);
-	kfree(admin);
-	accel_dev->admin = NULL;
-}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index b138692..ff54257 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -45,8 +45,9 @@
   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
 #include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
 #include "adf_dh895xcc_hw_data.h"
-#include "adf_common_drv.h"
 #include "adf_drv.h"
 
 /* Worker thread to service arbiter mappings based on dev SKUs */
@@ -117,6 +118,11 @@
 	return ADF_DH895XCC_ETR_BAR;
 }
 
+static uint32_t get_sram_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCC_SRAM_BAR;
+}
+
 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
 {
 	int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
@@ -156,6 +162,16 @@
 	}
 }
 
+static uint32_t get_pf2vf_offset(uint32_t i)
+{
+	return ADF_DH895XCC_PF2VF_OFFSET(i);
+}
+
+static uint32_t get_vintmsk_offset(uint32_t i)
+{
+	return ADF_DH895XCC_VINTMSK_OFFSET(i);
+}
+
 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
 {
 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
@@ -192,18 +208,23 @@
 
 	/* Enable bundle and misc interrupts */
 	ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
-		   ADF_DH895XCC_SMIA0_MASK);
+		   accel_dev->pf.vf_info ? 0 :
+			GENMASK_ULL(GET_MAX_BANKS(accel_dev) - 1, 0));
 	ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
 		   ADF_DH895XCC_SMIA1_MASK);
 }
 
+static int adf_pf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
 void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
 {
 	hw_data->dev_class = &dh895xcc_class;
 	hw_data->instance_id = dh895xcc_class.instances++;
 	hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
 	hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
-	hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID;
 	hw_data->num_logical_accel = 1;
 	hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
 	hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
@@ -211,21 +232,28 @@
 	hw_data->alloc_irq = adf_isr_resource_alloc;
 	hw_data->free_irq = adf_isr_resource_free;
 	hw_data->enable_error_correction = adf_enable_error_correction;
-	hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable;
-	hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable;
 	hw_data->get_accel_mask = get_accel_mask;
 	hw_data->get_ae_mask = get_ae_mask;
 	hw_data->get_num_accels = get_num_accels;
 	hw_data->get_num_aes = get_num_aes;
 	hw_data->get_etr_bar_id = get_etr_bar_id;
 	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sram_bar_id = get_sram_bar_id;
 	hw_data->get_sku = get_sku;
 	hw_data->fw_name = ADF_DH895XCC_FW;
+	hw_data->fw_mmp_name = ADF_DH895XCC_MMP;
 	hw_data->init_admin_comms = adf_init_admin_comms;
 	hw_data->exit_admin_comms = adf_exit_admin_comms;
+	hw_data->disable_iov = adf_disable_sriov;
+	hw_data->send_admin_init = adf_send_admin_init;
 	hw_data->init_arb = adf_init_arb;
 	hw_data->exit_arb = adf_exit_arb;
+	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
 	hw_data->enable_ints = adf_enable_ints;
+	hw_data->enable_vf2pf_comms = adf_pf_enable_vf2pf_comms;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
 }
 
 void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 25269a9..88dffb2 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -48,6 +48,7 @@
 #define ADF_DH895x_HW_DATA_H_
 
 /* PCIe configuration space */
+#define ADF_DH895XCC_SRAM_BAR 0
 #define ADF_DH895XCC_PMISC_BAR 1
 #define ADF_DH895XCC_ETR_BAR 2
 #define ADF_DH895XCC_RX_RINGS_OFFSET 8
@@ -79,10 +80,11 @@
 #define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
 #define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
 
-/* Admin Messages Registers */
-#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
-#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
-#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
-#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_DH895XCC_ERRSOU3	(0x3A000 + 0x00C)
+#define ADF_DH895XCC_ERRSOU5	(0x3A000 + 0x0D8)
+#define ADF_DH895XCC_PF2VF_OFFSET(i)	(0x3A000 + 0x280 + ((i) * 0x04))
+#define ADF_DH895XCC_VINTMSK_OFFSET(i)	(0x3A000 + 0x200 + ((i) * 0x04))
+/* FW names */
 #define ADF_DH895XCC_FW "qat_895xcc.bin"
+#define ADF_DH895XCC_MMP "qat_mmp.bin"
 #endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 1bde45b..f8dd14f 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -82,16 +82,21 @@
 	.id_table = adf_pci_tbl,
 	.name = adf_driver_name,
 	.probe = adf_probe,
-	.remove = adf_remove
+	.remove = adf_remove,
+	.sriov_configure = adf_sriov_configure,
 };
 
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
 {
 	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
 	int i;
 
-	adf_dev_shutdown(accel_dev);
-
 	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
 		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
 
@@ -100,7 +105,7 @@
 	}
 
 	if (accel_dev->hw_device) {
-		switch (accel_dev->hw_device->pci_dev_id) {
+		switch (accel_pci_dev->pci_dev->device) {
 		case ADF_DH895XCC_PCI_DEVICE_ID:
 			adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
 			break;
@@ -108,13 +113,11 @@
 			break;
 		}
 		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
 	}
 	adf_cfg_dev_remove(accel_dev);
 	debugfs_remove(accel_dev->debugfs_dir);
-	adf_devmgr_rm_dev(accel_dev);
-	pci_release_regions(accel_pci_dev->pci_dev);
-	pci_disable_device(accel_pci_dev->pci_dev);
-	kfree(accel_dev);
+	adf_devmgr_rm_dev(accel_dev, NULL);
 }
 
 static int adf_dev_configure(struct adf_accel_dev *accel_dev)
@@ -167,12 +170,6 @@
 						key, (void *)&val, ADF_DEC))
 			goto err;
 
-		val = 4;
-		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
-		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-						key, (void *)&val, ADF_DEC))
-			goto err;
-
 		val = 8;
 		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
 		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
@@ -185,12 +182,6 @@
 						key, (void *)&val, ADF_DEC))
 			goto err;
 
-		val = 12;
-		snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
-		if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
-						key, (void *)&val, ADF_DEC))
-			goto err;
-
 		val = ADF_COALESCING_DEF_TIME;
 		snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
 		if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
@@ -217,7 +208,7 @@
 	struct adf_hw_device_data *hw_data;
 	char name[ADF_DEVICE_NAME_LENGTH];
 	unsigned int i, bar_nr;
-	int ret;
+	int ret, bar_mask;
 
 	switch (ent->device) {
 	case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -241,10 +232,12 @@
 		return -ENOMEM;
 
 	INIT_LIST_HEAD(&accel_dev->crypto_list);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
 
 	/* Add accel device to accel table.
 	 * This should be called before adf_cleanup_accel is called */
-	if (adf_devmgr_add_dev(accel_dev)) {
+	if (adf_devmgr_add_dev(accel_dev, NULL)) {
 		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
 		kfree(accel_dev);
 		return -EFAULT;
@@ -267,7 +260,6 @@
 	default:
 		return -ENODEV;
 	}
-	accel_pci_dev = &accel_dev->accel_pci_dev;
 	pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
 	pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
 			      &hw_data->fuses);
@@ -276,7 +268,6 @@
 	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
 	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
 	accel_pci_dev->sku = hw_data->get_sku(hw_data);
-	accel_pci_dev->pci_dev = pdev;
 	/* If the device has no acceleration engines then ignore it. */
 	if (!hw_data->accel_mask || !hw_data->ae_mask ||
 	    ((~hw_data->ae_mask) & 0x01)) {
@@ -286,11 +277,14 @@
 	}
 
 	/* Create dev top level debugfs entry */
-	snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX,
-		 hw_data->dev_class->name, hw_data->instance_id);
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
 	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
 	if (!accel_dev->debugfs_dir) {
-		dev_err(&pdev->dev, "Could not create debugfs dir\n");
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
 		ret = -EINVAL;
 		goto out_err;
 	}
@@ -313,7 +307,7 @@
 		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
 			dev_err(&pdev->dev, "No usable DMA configuration\n");
 			ret = -EFAULT;
-			goto out_err;
+			goto out_err_disable;
 		} else {
 			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
 		}
@@ -324,7 +318,7 @@
 
 	if (pci_request_regions(pdev, adf_driver_name)) {
 		ret = -EFAULT;
-		goto out_err;
+		goto out_err_disable;
 	}
 
 	/* Read accelerator capabilities mask */
@@ -332,19 +326,21 @@
 			      &hw_data->accel_capabilities_mask);
 
 	/* Find and map all the device's BARS */
-	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
-		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+			 ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
-		bar_nr = i * 2;
 		bar->base_addr = pci_resource_start(pdev, bar_nr);
 		if (!bar->base_addr)
 			break;
 		bar->size = pci_resource_len(pdev, bar_nr);
 		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
 		if (!bar->virt_addr) {
-			dev_err(&pdev->dev, "Failed to map BAR %d\n", i);
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
 			ret = -EFAULT;
-			goto out_err;
+			goto out_err_free_reg;
 		}
 	}
 	pci_set_master(pdev);
@@ -352,32 +348,40 @@
 	if (adf_enable_aer(accel_dev, &adf_driver)) {
 		dev_err(&pdev->dev, "Failed to enable aer\n");
 		ret = -EFAULT;
-		goto out_err;
+		goto out_err_free_reg;
 	}
 
 	if (pci_save_state(pdev)) {
 		dev_err(&pdev->dev, "Failed to save pci state\n");
 		ret = -ENOMEM;
-		goto out_err;
+		goto out_err_free_reg;
 	}
 
 	ret = adf_dev_configure(accel_dev);
 	if (ret)
-		goto out_err;
+		goto out_err_free_reg;
 
 	ret = adf_dev_init(accel_dev);
 	if (ret)
-		goto out_err;
+		goto out_err_dev_shutdown;
 
 	ret = adf_dev_start(accel_dev);
-	if (ret) {
-		adf_dev_stop(accel_dev);
-		goto out_err;
-	}
+	if (ret)
+		goto out_err_dev_stop;
 
-	return 0;
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
 out_err:
 	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
 	return ret;
 }
 
@@ -391,15 +395,17 @@
 	}
 	if (adf_dev_stop(accel_dev))
 		dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+	adf_dev_shutdown(accel_dev);
 	adf_disable_aer(accel_dev);
 	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
 }
 
 static int __init adfdrv_init(void)
 {
 	request_module("intel_qat");
-	if (qat_admin_register())
-		return -EFAULT;
 
 	if (pci_register_driver(&adf_driver)) {
 		pr_err("QAT: Driver initialization failed\n");
@@ -411,7 +417,6 @@
 static void __exit adfdrv_release(void)
 {
 	pci_unregister_driver(&adf_driver);
-	qat_admin_unregister();
 }
 
 module_init(adfdrv_init);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
index a2fbb6c..85ff245 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
@@ -53,15 +53,6 @@
 void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
 int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
 void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
-void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
 void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
 			     uint32_t const **arb_map_config);
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
-int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
-			   uint32_t ae, void *in, void *out);
-int qat_admin_register(void);
-int qat_admin_unregister(void);
-int adf_init_arb(struct adf_accel_dev *accel_dev);
-void adf_exit_arb(struct adf_accel_dev *accel_dev);
 #endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
deleted file mode 100644
index 1864bdb..0000000
--- a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
-  This file is provided under a dual BSD/GPLv2 license.  When using or
-  redistributing this file, you may do so under either license.
-
-  GPL LICENSE SUMMARY
-  Copyright(c) 2014 Intel Corporation.
-  This program is free software; you can redistribute it and/or modify
-  it under the terms of version 2 of the GNU General Public License as
-  published by the Free Software Foundation.
-
-  This program is distributed in the hope that it will be useful, but
-  WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-  General Public License for more details.
-
-  Contact Information:
-  qat-linux@intel.com
-
-  BSD LICENSE
-  Copyright(c) 2014 Intel Corporation.
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions
-  are met:
-
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in
-      the documentation and/or other materials provided with the
-      distribution.
-    * Neither the name of Intel Corporation nor the names of its
-      contributors may be used to endorse or promote products derived
-      from this software without specific prior written permission.
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-#include <adf_accel_devices.h>
-#include <adf_transport_internal.h>
-#include "adf_drv.h"
-
-#define ADF_ARB_NUM 4
-#define ADF_ARB_REQ_RING_NUM 8
-#define ADF_ARB_REG_SIZE 0x4
-#define ADF_ARB_WTR_SIZE 0x20
-#define ADF_ARB_OFFSET 0x30000
-#define ADF_ARB_REG_SLOT 0x1000
-#define ADF_ARB_WTR_OFFSET 0x010
-#define ADF_ARB_RO_EN_OFFSET 0x090
-#define ADF_ARB_WQCFG_OFFSET 0x100
-#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
-#define ADF_ARB_WRK_2_SER_MAP 10
-#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
-
-#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
-	ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
-	(ADF_ARB_REG_SLOT * index), value)
-
-#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \
-	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
-	ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
-
-#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \
-	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
-	ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \
-	(ADF_ARB_REG_SIZE * index), value)
-
-#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
-	ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
-	(ADF_ARB_REG_SIZE * index), value)
-
-#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \
-	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
-	ADF_ARB_WRK_2_SER_MAP_OFFSET) + \
-	(ADF_ARB_REG_SIZE * index), value)
-
-#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \
-	ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
-	ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
-
-int adf_init_arb(struct adf_accel_dev *accel_dev)
-{
-	void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
-	uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
-	uint32_t arb, i;
-	const uint32_t *thd_2_arb_cfg;
-
-	/* Service arb configured for 32 bytes responses and
-	 * ring flow control check enabled. */
-	for (arb = 0; arb < ADF_ARB_NUM; arb++)
-		WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
-
-	/* Setup service weighting */
-	for (arb = 0; arb < ADF_ARB_NUM; arb++)
-		for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
-			WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF);
-
-	/* Setup ring response ordering */
-	for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
-		WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
-
-	/* Setup worker queue registers */
-	for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
-		WRITE_CSR_ARB_WQCFG(csr, i, i);
-
-	/* Map worker threads to service arbiters */
-	adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg);
-
-	if (!thd_2_arb_cfg)
-		return -EFAULT;
-
-	for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
-		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
-
-	return 0;
-}
-
-void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring)
-{
-	WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
-				   ring->bank->bank_number,
-				   ring->bank->ring_mask & 0xFF);
-}
-
-void adf_exit_arb(struct adf_accel_dev *accel_dev)
-{
-	void __iomem *csr;
-	unsigned int i;
-
-	if (!accel_dev->transport)
-		return;
-
-	csr = accel_dev->transport->banks[0].csr_addr;
-
-	/* Reset arbiter configuration */
-	for (i = 0; i < ADF_ARB_NUM; i++)
-		WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
-
-	/* Shutdown work queue */
-	for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
-		WRITE_CSR_ARB_WQCFG(csr, i, 0);
-
-	/* Unmap worker threads to service arbiters */
-	for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
-		WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
-
-	/* Disable arbitration on all rings */
-	for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
-		WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
-}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index 0d03c10..5570f78 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -59,21 +59,30 @@
 #include <adf_transport_access_macros.h>
 #include <adf_transport_internal.h>
 #include "adf_drv.h"
+#include "adf_dh895xcc_hw_data.h"
 
 static int adf_enable_msix(struct adf_accel_dev *accel_dev)
 {
 	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-	uint32_t msix_num_entries = hw_data->num_banks + 1;
-	int i;
+	u32 msix_num_entries = 1;
 
-	for (i = 0; i < msix_num_entries; i++)
-		pci_dev_info->msix_entries.entries[i].entry = i;
+	/* If SR-IOV is disabled, add entries for each bank */
+	if (!accel_dev->pf.vf_info) {
+		int i;
+
+		msix_num_entries += hw_data->num_banks;
+		for (i = 0; i < msix_num_entries; i++)
+			pci_dev_info->msix_entries.entries[i].entry = i;
+	} else {
+		pci_dev_info->msix_entries.entries[0].entry =
+			hw_data->num_banks;
+	}
 
 	if (pci_enable_msix_exact(pci_dev_info->pci_dev,
 				  pci_dev_info->msix_entries.entries,
 				  msix_num_entries)) {
-		dev_err(&GET_DEV(accel_dev), "Failed to enable MSIX IRQ\n");
+		dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
 		return -EFAULT;
 	}
 	return 0;
@@ -97,9 +106,58 @@
 {
 	struct adf_accel_dev *accel_dev = dev_ptr;
 
-	dev_info(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
-		 accel_dev->accel_id);
-	return IRQ_HANDLED;
+#ifdef CONFIG_PCI_IOV
+	/* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
+	if (accel_dev->pf.vf_info) {
+		void __iomem *pmisc_bar_addr =
+		    (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
+		u32 vf_mask;
+
+		/* Get the interrupt sources triggered by VFs */
+		vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU5) &
+			    0x0000FFFF) << 16) |
+			  ((ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCC_ERRSOU3) &
+			    0x01FFFE00) >> 9);
+
+		if (vf_mask) {
+			struct adf_accel_vf_info *vf_info;
+			bool irq_handled = false;
+			int i;
+
+			/* Disable VF2PF interrupts for VFs with pending ints */
+			adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+
+			/*
+			 * Schedule tasklets to handle VF2PF interrupt BHs
+			 * unless the VF is malicious and is attempting to
+			 * flood the host OS with VF2PF interrupts.
+			 */
+			for_each_set_bit(i, (const unsigned long *)&vf_mask,
+					 (sizeof(vf_mask) * BITS_PER_BYTE)) {
+				vf_info = accel_dev->pf.vf_info + i;
+
+				if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
+					dev_info(&GET_DEV(accel_dev),
+						 "Too many ints from VF%d\n",
+						  vf_info->vf_nr + 1);
+					continue;
+				}
+
+				/* Tasklet will re-enable ints from this VF */
+				tasklet_hi_schedule(&vf_info->vf2pf_bh_tasklet);
+				irq_handled = true;
+			}
+
+			if (irq_handled)
+				return IRQ_HANDLED;
+		}
+	}
+#endif /* CONFIG_PCI_IOV */
+
+	dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
+		accel_dev->accel_id);
+
+	return IRQ_NONE;
 }
 
 static int adf_request_irqs(struct adf_accel_dev *accel_dev)
@@ -108,28 +166,32 @@
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
 	struct adf_etr_data *etr_data = accel_dev->transport;
-	int ret, i;
+	int ret, i = 0;
 	char *name;
 
-	/* Request msix irq for all banks */
-	for (i = 0; i < hw_data->num_banks; i++) {
-		struct adf_etr_bank_data *bank = &etr_data->banks[i];
-		unsigned int cpu, cpus = num_online_cpus();
+	/* Request msix irq for all banks unless SR-IOV enabled */
+	if (!accel_dev->pf.vf_info) {
+		for (i = 0; i < hw_data->num_banks; i++) {
+			struct adf_etr_bank_data *bank = &etr_data->banks[i];
+			unsigned int cpu, cpus = num_online_cpus();
 
-		name = *(pci_dev_info->msix_entries.names + i);
-		snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
-			 "qat%d-bundle%d", accel_dev->accel_id, i);
-		ret = request_irq(msixe[i].vector,
-				  adf_msix_isr_bundle, 0, name, bank);
-		if (ret) {
-			dev_err(&GET_DEV(accel_dev),
-				"failed to enable irq %d for %s\n",
-				msixe[i].vector, name);
-			return ret;
+			name = *(pci_dev_info->msix_entries.names + i);
+			snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+				 "qat%d-bundle%d", accel_dev->accel_id, i);
+			ret = request_irq(msixe[i].vector,
+					  adf_msix_isr_bundle, 0, name, bank);
+			if (ret) {
+				dev_err(&GET_DEV(accel_dev),
+					"failed to enable irq %d for %s\n",
+					msixe[i].vector, name);
+				return ret;
+			}
+
+			cpu = ((accel_dev->accel_id * hw_data->num_banks) +
+			       i) % cpus;
+			irq_set_affinity_hint(msixe[i].vector,
+					      get_cpu_mask(cpu));
 		}
-
-		cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus;
-		irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu));
 	}
 
 	/* Request msix irq for AE */
@@ -152,11 +214,13 @@
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
 	struct adf_etr_data *etr_data = accel_dev->transport;
-	int i;
+	int i = 0;
 
-	for (i = 0; i < hw_data->num_banks; i++) {
-		irq_set_affinity_hint(msixe[i].vector, NULL);
-		free_irq(msixe[i].vector, &etr_data->banks[i]);
+	if (pci_dev_info->msix_entries.num_entries > 1) {
+		for (i = 0; i < hw_data->num_banks; i++) {
+			irq_set_affinity_hint(msixe[i].vector, NULL);
+			free_irq(msixe[i].vector, &etr_data->banks[i]);
+		}
 	}
 	irq_set_affinity_hint(msixe[i].vector, NULL);
 	free_irq(msixe[i].vector, accel_dev);
@@ -168,7 +232,11 @@
 	char **names;
 	struct msix_entry *entries;
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-	uint32_t msix_num_entries = hw_data->num_banks + 1;
+	u32 msix_num_entries = 1;
+
+	/* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
+	if (!accel_dev->pf.vf_info)
+		msix_num_entries += hw_data->num_banks;
 
 	entries = kzalloc_node(msix_num_entries * sizeof(*entries),
 			       GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
@@ -185,6 +253,7 @@
 		if (!(*(names + i)))
 			goto err;
 	}
+	accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
 	accel_dev->accel_pci_dev.msix_entries.entries = entries;
 	accel_dev->accel_pci_dev.msix_entries.names = names;
 	return 0;
@@ -198,13 +267,11 @@
 
 static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
 {
-	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-	uint32_t msix_num_entries = hw_data->num_banks + 1;
 	char **names = accel_dev->accel_pci_dev.msix_entries.names;
 	int i;
 
 	kfree(accel_dev->accel_pci_dev.msix_entries.entries);
-	for (i = 0; i < msix_num_entries; i++)
+	for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
 		kfree(*(names + i));
 	kfree(names);
 }
diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c
deleted file mode 100644
index 55b7a8e..0000000
--- a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
-  This file is provided under a dual BSD/GPLv2 license.  When using or
-  redistributing this file, you may do so under either license.
-
-  GPL LICENSE SUMMARY
-  Copyright(c) 2014 Intel Corporation.
-  This program is free software; you can redistribute it and/or modify
-  it under the terms of version 2 of the GNU General Public License as
-  published by the Free Software Foundation.
-
-  This program is distributed in the hope that it will be useful, but
-  WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-  General Public License for more details.
-
-  Contact Information:
-  qat-linux@intel.com
-
-  BSD LICENSE
-  Copyright(c) 2014 Intel Corporation.
-  Redistribution and use in source and binary forms, with or without
-  modification, are permitted provided that the following conditions
-  are met:
-
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above copyright
-      notice, this list of conditions and the following disclaimer in
-      the documentation and/or other materials provided with the
-      distribution.
-    * Neither the name of Intel Corporation nor the names of its
-      contributors may be used to endorse or promote products derived
-      from this software without specific prior written permission.
-
-  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-#include <icp_qat_fw_init_admin.h>
-#include <adf_accel_devices.h>
-#include <adf_common_drv.h>
-#include "adf_drv.h"
-
-static struct service_hndl qat_admin;
-
-static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
-{
-	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
-	struct icp_qat_fw_init_admin_req req;
-	struct icp_qat_fw_init_admin_resp resp;
-	int i;
-
-	memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
-	req.init_admin_cmd_id = cmd;
-	for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
-		memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
-		if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
-		    resp.init_resp_hdr.status)
-			return -EFAULT;
-	}
-	return 0;
-}
-
-static int qat_admin_start(struct adf_accel_dev *accel_dev)
-{
-	return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
-}
-
-static int qat_admin_event_handler(struct adf_accel_dev *accel_dev,
-				   enum adf_event event)
-{
-	int ret;
-
-	switch (event) {
-	case ADF_EVENT_START:
-		ret = qat_admin_start(accel_dev);
-		break;
-	case ADF_EVENT_STOP:
-	case ADF_EVENT_INIT:
-	case ADF_EVENT_SHUTDOWN:
-	default:
-		ret = 0;
-	}
-	return ret;
-}
-
-int qat_admin_register(void)
-{
-	memset(&qat_admin, 0, sizeof(struct service_hndl));
-	qat_admin.event_hld = qat_admin_event_handler;
-	qat_admin.name = "qat_admin";
-	qat_admin.admin = 1;
-	return adf_service_register(&qat_admin);
-}
-
-int qat_admin_unregister(void)
-{
-	return adf_service_unregister(&qat_admin);
-}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/Makefile b/drivers/crypto/qat/qat_dh895xccvf/Makefile
new file mode 100644
index 0000000..85399fc
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o
+qat_dh895xccvf-objs := adf_drv.o \
+		adf_isr.o \
+		adf_dh895xccvf_hw_data.o
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
new file mode 100644
index 0000000..a9a27ef
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
@@ -0,0 +1,172 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_pf2vf_msg.h>
+#include <adf_common_drv.h>
+#include "adf_dh895xccvf_hw_data.h"
+#include "adf_drv.h"
+
+static struct adf_hw_device_class dh895xcciov_class = {
+	.name = ADF_DH895XCCVF_DEVICE_NAME,
+	.type = DEV_DH895XCCVF,
+	.instances = 0
+};
+
+static u32 get_accel_mask(u32 fuse)
+{
+	return ADF_DH895XCCIOV_ACCELERATORS_MASK;
+}
+
+static u32 get_ae_mask(u32 fuse)
+{
+	return ADF_DH895XCCIOV_ACCELENGINES_MASK;
+}
+
+static u32 get_num_accels(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCCIOV_MAX_ACCELERATORS;
+}
+
+static u32 get_num_aes(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCCIOV_MAX_ACCELENGINES;
+}
+
+static u32 get_misc_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCCIOV_PMISC_BAR;
+}
+
+static u32 get_etr_bar_id(struct adf_hw_device_data *self)
+{
+	return ADF_DH895XCCIOV_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+	return DEV_SKU_VF;
+}
+
+static u32 get_pf2vf_offset(u32 i)
+{
+	return ADF_DH895XCCIOV_PF2VF_OFFSET;
+}
+
+static u32 get_vintmsk_offset(u32 i)
+{
+	return ADF_DH895XCCIOV_VINTMSK_OFFSET;
+}
+
+static int adf_vf_int_noop(struct adf_accel_dev *accel_dev)
+{
+	return 0;
+}
+
+static void adf_vf_void_noop(struct adf_accel_dev *accel_dev)
+{
+}
+
+static int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
+{
+	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+		(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
+
+	if (adf_iov_putmsg(accel_dev, msg, 0)) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send Init event to PF\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
+{
+	u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
+	    (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
+
+	if (adf_iov_putmsg(accel_dev, msg, 0))
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to send Shutdown event to PF\n");
+}
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class = &dh895xcciov_class;
+	hw_data->instance_id = dh895xcciov_class.instances++;
+	hw_data->num_banks = ADF_DH895XCCIOV_ETR_MAX_BANKS;
+	hw_data->num_accel = ADF_DH895XCCIOV_MAX_ACCELERATORS;
+	hw_data->num_logical_accel = 1;
+	hw_data->num_engines = ADF_DH895XCCIOV_MAX_ACCELENGINES;
+	hw_data->tx_rx_gap = ADF_DH895XCCIOV_RX_RINGS_OFFSET;
+	hw_data->tx_rings_mask = ADF_DH895XCCIOV_TX_RINGS_MASK;
+	hw_data->alloc_irq = adf_vf_isr_resource_alloc;
+	hw_data->free_irq = adf_vf_isr_resource_free;
+	hw_data->enable_error_correction = adf_vf_void_noop;
+	hw_data->init_admin_comms = adf_vf_int_noop;
+	hw_data->exit_admin_comms = adf_vf_void_noop;
+	hw_data->send_admin_init = adf_vf2pf_init;
+	hw_data->init_arb = adf_vf_int_noop;
+	hw_data->exit_arb = adf_vf_void_noop;
+	hw_data->disable_iov = adf_vf2pf_shutdown;
+	hw_data->get_accel_mask = get_accel_mask;
+	hw_data->get_ae_mask = get_ae_mask;
+	hw_data->get_num_accels = get_num_accels;
+	hw_data->get_num_aes = get_num_aes;
+	hw_data->get_etr_bar_id = get_etr_bar_id;
+	hw_data->get_misc_bar_id = get_misc_bar_id;
+	hw_data->get_pf2vf_offset = get_pf2vf_offset;
+	hw_data->get_vintmsk_offset = get_vintmsk_offset;
+	hw_data->get_sku = get_sku;
+	hw_data->enable_ints = adf_vf_void_noop;
+	hw_data->enable_vf2pf_comms = adf_enable_vf2pf_comms;
+	hw_data->min_iov_compat_ver = ADF_PFVF_COMPATIBILITY_VERSION;
+}
+
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
+{
+	hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
new file mode 100644
index 0000000..8f6babf
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.h
@@ -0,0 +1,68 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2015 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2015 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895XVF_HW_DATA_H_
+#define ADF_DH895XVF_HW_DATA_H_
+
+#define ADF_DH895XCCIOV_PMISC_BAR 1
+#define ADF_DH895XCCIOV_ACCELERATORS_MASK 0x1
+#define ADF_DH895XCCIOV_ACCELENGINES_MASK 0x1
+#define ADF_DH895XCCIOV_MAX_ACCELERATORS 1
+#define ADF_DH895XCCIOV_MAX_ACCELENGINES 1
+#define ADF_DH895XCCIOV_RX_RINGS_OFFSET 8
+#define ADF_DH895XCCIOV_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCCIOV_ETR_BAR 0
+#define ADF_DH895XCCIOV_ETR_MAX_BANKS 1
+
+#define ADF_DH895XCCIOV_PF2VF_OFFSET	0x200
+#define ADF_DH895XCC_PF2VF_PF2VFINT	BIT(0)
+
+#define ADF_DH895XCCIOV_VINTSOU_OFFSET	0x204
+#define ADF_DH895XCC_VINTSOU_BUN	BIT(0)
+#define ADF_DH895XCC_VINTSOU_PF2VF	BIT(1)
+
+#define ADF_DH895XCCIOV_VINTMSK_OFFSET	0x208
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
new file mode 100644
index 0000000..789426f
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -0,0 +1,393 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_transport_access_macros.h>
+#include "adf_dh895xccvf_hw_data.h"
+#include "adf_drv.h"
+
+static const char adf_driver_name[] = ADF_DH895XCCVF_DEVICE_NAME;
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+	ADF_SYSTEM_DEVICE(ADF_DH895XCCIOV_PCI_DEVICE_ID),
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+	.id_table = adf_pci_tbl,
+	.name = adf_driver_name,
+	.probe = adf_probe,
+	.remove = adf_remove,
+};
+
+static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
+{
+	pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
+	pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
+}
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+	struct adf_accel_dev *pf;
+	int i;
+
+	for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+		if (bar->virt_addr)
+			pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+	}
+
+	if (accel_dev->hw_device) {
+		switch (accel_pci_dev->pci_dev->device) {
+		case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+			adf_clean_hw_data_dh895xcciov(accel_dev->hw_device);
+			break;
+		default:
+			break;
+		}
+		kfree(accel_dev->hw_device);
+		accel_dev->hw_device = NULL;
+	}
+	adf_cfg_dev_remove(accel_dev);
+	debugfs_remove(accel_dev->debugfs_dir);
+	pf = adf_devmgr_pci_to_accel_dev(accel_pci_dev->pci_dev->physfn);
+	adf_devmgr_rm_dev(accel_dev, pf);
+}
+
+static int adf_dev_configure(struct adf_accel_dev *accel_dev)
+{
+	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+	unsigned long val, bank = 0;
+
+	if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+		goto err;
+	if (adf_cfg_section_add(accel_dev, "Accelerator0"))
+		goto err;
+
+	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, 0);
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+					(void *)&bank, ADF_DEC))
+		goto err;
+
+	val = bank;
+	snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, 0);
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+					(void *)&val, ADF_DEC))
+		goto err;
+
+	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, 0);
+
+	val = 128;
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, key,
+					(void *)&val, ADF_DEC))
+		goto err;
+
+	val = 512;
+	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, 0);
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+					key, (void *)&val, ADF_DEC))
+		goto err;
+
+	val = 0;
+	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, 0);
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+					key, (void *)&val, ADF_DEC))
+		goto err;
+
+	val = 2;
+	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, 0);
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+					key, (void *)&val, ADF_DEC))
+		goto err;
+
+	val = 8;
+	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, 0);
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+					key, (void *)&val, ADF_DEC))
+		goto err;
+
+	val = 10;
+	snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, 0);
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+					key, (void *)&val, ADF_DEC))
+			goto err;
+
+	val = ADF_COALESCING_DEF_TIME;
+	snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+		 (int)bank);
+	if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+					key, (void *)&val, ADF_DEC))
+		goto err;
+
+	val = 1;
+	if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+					ADF_NUM_CY, (void *)&val, ADF_DEC))
+		goto err;
+
+	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+	return 0;
+err:
+	dev_err(&GET_DEV(accel_dev), "Failed to configure QAT accel dev\n");
+	return -EINVAL;
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct adf_accel_dev *accel_dev;
+	struct adf_accel_dev *pf;
+	struct adf_accel_pci *accel_pci_dev;
+	struct adf_hw_device_data *hw_data;
+	char name[ADF_DEVICE_NAME_LENGTH];
+	unsigned int i, bar_nr;
+	int ret, bar_mask;
+
+	switch (ent->device) {
+	case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+		break;
+	default:
+		dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+		return -ENODEV;
+	}
+
+	accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
+				 dev_to_node(&pdev->dev));
+	if (!accel_dev)
+		return -ENOMEM;
+
+	accel_dev->is_vf = true;
+	pf = adf_devmgr_pci_to_accel_dev(pdev->physfn);
+	accel_pci_dev = &accel_dev->accel_pci_dev;
+	accel_pci_dev->pci_dev = pdev;
+
+	/* Add accel device to accel table */
+	if (adf_devmgr_add_dev(accel_dev, pf)) {
+		dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+		kfree(accel_dev);
+		return -EFAULT;
+	}
+	INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+	accel_dev->owner = THIS_MODULE;
+	/* Allocate and configure device configuration structure */
+	hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
+			       dev_to_node(&pdev->dev));
+	if (!hw_data) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+	accel_dev->hw_device = hw_data;
+	switch (ent->device) {
+	case ADF_DH895XCCIOV_PCI_DEVICE_ID:
+		adf_init_hw_data_dh895xcciov(accel_dev->hw_device);
+		break;
+	default:
+		ret = -ENODEV;
+		goto out_err;
+	}
+
+	/* Get Accelerators and Accelerators Engines masks */
+	hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+	hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+	accel_pci_dev->sku = hw_data->get_sku(hw_data);
+
+	/* Create dev top level debugfs entry */
+	snprintf(name, sizeof(name), "%s%s_%02x:%02d.%02d",
+		 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
+		 pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+
+	accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+	if (!accel_dev->debugfs_dir) {
+		dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+		ret = -EINVAL;
+		goto out_err;
+	}
+
+	/* Create device configuration table */
+	ret = adf_cfg_dev_add(accel_dev);
+	if (ret)
+		goto out_err;
+
+	/* enable PCI device */
+	if (pci_enable_device(pdev)) {
+		ret = -EFAULT;
+		goto out_err;
+	}
+
+	/* set dma identifier */
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+			dev_err(&pdev->dev, "No usable DMA configuration\n");
+			ret = -EFAULT;
+			goto out_err_disable;
+		} else {
+			pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else {
+		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	}
+
+	if (pci_request_regions(pdev, adf_driver_name)) {
+		ret = -EFAULT;
+		goto out_err_disable;
+	}
+
+	/* Find and map all the device's BARS */
+	i = 0;
+	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
+	for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
+			 ADF_PCI_MAX_BARS * 2) {
+		struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
+
+		bar->base_addr = pci_resource_start(pdev, bar_nr);
+		if (!bar->base_addr)
+			break;
+		bar->size = pci_resource_len(pdev, bar_nr);
+		bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+		if (!bar->virt_addr) {
+			dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
+			ret = -EFAULT;
+			goto out_err_free_reg;
+		}
+	}
+	pci_set_master(pdev);
+	/* Completion for VF2PF request/response message exchange */
+	init_completion(&accel_dev->vf.iov_msg_completion);
+
+	ret = adf_dev_configure(accel_dev);
+	if (ret)
+		goto out_err_free_reg;
+
+	ret = adf_dev_init(accel_dev);
+	if (ret)
+		goto out_err_dev_shutdown;
+
+	ret = adf_dev_start(accel_dev);
+	if (ret)
+		goto out_err_dev_stop;
+
+	return ret;
+
+out_err_dev_stop:
+	adf_dev_stop(accel_dev);
+out_err_dev_shutdown:
+	adf_dev_shutdown(accel_dev);
+out_err_free_reg:
+	pci_release_regions(accel_pci_dev->pci_dev);
+out_err_disable:
+	pci_disable_device(accel_pci_dev->pci_dev);
+out_err:
+	adf_cleanup_accel(accel_dev);
+	kfree(accel_dev);
+	return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+	struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+	if (!accel_dev) {
+		pr_err("QAT: Driver removal failed\n");
+		return;
+	}
+	if (adf_dev_stop(accel_dev))
+		dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+
+	adf_dev_shutdown(accel_dev);
+	adf_cleanup_accel(accel_dev);
+	adf_cleanup_pci_dev(accel_dev);
+	kfree(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+	request_module("intel_qat");
+
+	if (pci_register_driver(&adf_driver)) {
+		pr_err("QAT: Driver initialization failed\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+	pci_unregister_driver(&adf_driver);
+	adf_clean_vf_map(true);
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h
new file mode 100644
index 0000000..e270e4a
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.h
@@ -0,0 +1,57 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895xVF_DRV_H_
+#define ADF_DH895xVF_DRV_H_
+#include <adf_accel_devices.h>
+#include <adf_transport.h>
+
+void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data);
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c
new file mode 100644
index 0000000..87c5d8a
--- /dev/null
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_isr.c
@@ -0,0 +1,258 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_cfg_strings.h>
+#include <adf_cfg_common.h>
+#include <adf_transport_access_macros.h>
+#include <adf_transport_internal.h>
+#include <adf_pf2vf_msg.h>
+#include "adf_drv.h"
+#include "adf_dh895xccvf_hw_data.h"
+
+static int adf_enable_msi(struct adf_accel_dev *accel_dev)
+{
+	struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+	int stat = pci_enable_msi(pci_dev_info->pci_dev);
+
+	if (stat) {
+		dev_err(&GET_DEV(accel_dev),
+			"Failed to enable MSI interrupts\n");
+		return stat;
+	}
+
+	accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+	if (!accel_dev->vf.irq_name)
+		return -ENOMEM;
+
+	return stat;
+}
+
+static void adf_disable_msi(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	kfree(accel_dev->vf.irq_name);
+	pci_disable_msi(pdev);
+}
+
+static void adf_pf2vf_bh_handler(void *data)
+{
+	struct adf_accel_dev *accel_dev = data;
+	void __iomem *pmisc_bar_addr =
+		(&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
+	u32 msg;
+
+	/* Read the message from PF */
+	msg = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET);
+
+	if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
+		/* Ignore legacy non-system (non-kernel) PF2VF messages */
+		goto err;
+
+	switch ((msg & ADF_PF2VF_MSGTYPE_MASK) >> ADF_PF2VF_MSGTYPE_SHIFT) {
+	case ADF_PF2VF_MSGTYPE_RESTARTING:
+		dev_dbg(&GET_DEV(accel_dev),
+			"Restarting msg received from PF 0x%x\n", msg);
+		adf_dev_stop(accel_dev);
+		break;
+	case ADF_PF2VF_MSGTYPE_VERSION_RESP:
+		dev_dbg(&GET_DEV(accel_dev),
+			"Version resp received from PF 0x%x\n", msg);
+		accel_dev->vf.pf_version =
+			(msg & ADF_PF2VF_VERSION_RESP_VERS_MASK) >>
+			ADF_PF2VF_VERSION_RESP_VERS_SHIFT;
+		accel_dev->vf.compatible =
+			(msg & ADF_PF2VF_VERSION_RESP_RESULT_MASK) >>
+			ADF_PF2VF_VERSION_RESP_RESULT_SHIFT;
+		complete(&accel_dev->vf.iov_msg_completion);
+		break;
+	default:
+		goto err;
+	}
+
+	/* To ack, clear the PF2VFINT bit */
+	msg &= ~ADF_DH895XCC_PF2VF_PF2VFINT;
+	ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCCIOV_PF2VF_OFFSET, msg);
+
+	/* Re-enable PF2VF interrupts */
+	adf_enable_pf2vf_interrupts(accel_dev);
+	return;
+err:
+	dev_err(&GET_DEV(accel_dev),
+		"Unknown message from PF (0x%x); leaving PF2VF ints disabled\n",
+		msg);
+}
+
+static int adf_setup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+	tasklet_init(&accel_dev->vf.pf2vf_bh_tasklet,
+		     (void *)adf_pf2vf_bh_handler, (unsigned long)accel_dev);
+
+	mutex_init(&accel_dev->vf.vf2pf_lock);
+	return 0;
+}
+
+static void adf_cleanup_pf2vf_bh(struct adf_accel_dev *accel_dev)
+{
+	tasklet_disable(&accel_dev->vf.pf2vf_bh_tasklet);
+	tasklet_kill(&accel_dev->vf.pf2vf_bh_tasklet);
+	mutex_destroy(&accel_dev->vf.vf2pf_lock);
+}
+
+static irqreturn_t adf_isr(int irq, void *privdata)
+{
+	struct adf_accel_dev *accel_dev = privdata;
+	void __iomem *pmisc_bar_addr =
+		(&GET_BARS(accel_dev)[ADF_DH895XCCIOV_PMISC_BAR])->virt_addr;
+	u32 v_int;
+
+	/* Read VF INT source CSR to determine the source of VF interrupt */
+	v_int = ADF_CSR_RD(pmisc_bar_addr, ADF_DH895XCCIOV_VINTSOU_OFFSET);
+
+	/* Check for PF2VF interrupt */
+	if (v_int & ADF_DH895XCC_VINTSOU_PF2VF) {
+		/* Disable PF to VF interrupt */
+		adf_disable_pf2vf_interrupts(accel_dev);
+
+		/* Schedule tasklet to handle interrupt BH */
+		tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
+		return IRQ_HANDLED;
+	}
+
+	/* Check bundle interrupt */
+	if (v_int & ADF_DH895XCC_VINTSOU_BUN) {
+		struct adf_etr_data *etr_data = accel_dev->transport;
+		struct adf_etr_bank_data *bank = &etr_data->banks[0];
+
+		/* Disable Flag and Coalesce Ring Interrupts */
+		WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
+					   0);
+		tasklet_hi_schedule(&bank->resp_handler);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+	unsigned int cpu;
+	int ret;
+
+	snprintf(accel_dev->vf.irq_name, ADF_MAX_MSIX_VECTOR_NAME,
+		 "qat_%02x:%02d.%02d", pdev->bus->number, PCI_SLOT(pdev->devfn),
+		 PCI_FUNC(pdev->devfn));
+	ret = request_irq(pdev->irq, adf_isr, 0, accel_dev->vf.irq_name,
+			  (void *)accel_dev);
+	if (ret) {
+		dev_err(&GET_DEV(accel_dev), "failed to enable irq for %s\n",
+			accel_dev->vf.irq_name);
+		return ret;
+	}
+	cpu = accel_dev->accel_id % num_online_cpus();
+	irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
+
+	return ret;
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *priv_data = accel_dev->transport;
+
+	tasklet_init(&priv_data->banks[0].resp_handler, adf_response_handler,
+		     (unsigned long)priv_data->banks);
+	return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+	struct adf_etr_data *priv_data = accel_dev->transport;
+
+	tasklet_disable(&priv_data->banks[0].resp_handler);
+	tasklet_kill(&priv_data->banks[0].resp_handler);
+}
+
+void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+	irq_set_affinity_hint(pdev->irq, NULL);
+	free_irq(pdev->irq, (void *)accel_dev);
+	adf_cleanup_bh(accel_dev);
+	adf_cleanup_pf2vf_bh(accel_dev);
+	adf_disable_msi(accel_dev);
+}
+
+int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+	if (adf_enable_msi(accel_dev))
+		goto err_out;
+
+	if (adf_setup_pf2vf_bh(accel_dev))
+		goto err_out;
+
+	if (adf_setup_bh(accel_dev))
+		goto err_out;
+
+	if (adf_request_msi_irq(accel_dev))
+		goto err_out;
+
+	return 0;
+err_out:
+	adf_vf_isr_resource_free(accel_dev);
+	return -EFAULT;
+}
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index 5c5df1d..be2f504 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -296,7 +296,7 @@
 	if (rctx->buflen) {
 		sg_init_table(rctx->sg, 2);
 		sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
-		scatterwalk_sg_chain(rctx->sg, 2, req->src);
+		sg_chain(rctx->sg, 2, req->src);
 		req->src = rctx->sg;
 	}
 
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 397a500..820dc3a 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -999,7 +999,7 @@
 		sg_init_table(rctx->in_sg_chain, 2);
 		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
 
-		scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src);
+		sg_chain(rctx->in_sg_chain, 2, req->src);
 
 		rctx->total = req->nbytes + rctx->buf_cnt;
 		rctx->in_sg = rctx->in_sg_chain;
@@ -1516,7 +1516,7 @@
 	}
 
 	/* Allocate HW descriptors */
-	dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
+	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
 			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
 			&dev->hw_phys_desc[0], GFP_KERNEL);
 	if (!dev->hw_desc[0]) {
@@ -1528,34 +1528,31 @@
 				sizeof(struct sahara_hw_desc);
 
 	/* Allocate space for iv and key */
-	dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
+	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
 				&dev->key_phys_base, GFP_KERNEL);
 	if (!dev->key_base) {
 		dev_err(&pdev->dev, "Could not allocate memory for key\n");
-		err = -ENOMEM;
-		goto err_key;
+		return -ENOMEM;
 	}
 	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
 	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
 
 	/* Allocate space for context: largest digest + message length field */
-	dev->context_base = dma_alloc_coherent(&pdev->dev,
+	dev->context_base = dmam_alloc_coherent(&pdev->dev,
 					SHA256_DIGEST_SIZE + 4,
 					&dev->context_phys_base, GFP_KERNEL);
 	if (!dev->context_base) {
 		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
-		err = -ENOMEM;
-		goto err_key;
+		return -ENOMEM;
 	}
 
 	/* Allocate space for HW links */
-	dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
+	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
 			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
 			&dev->hw_phys_link[0], GFP_KERNEL);
 	if (!dev->hw_link[0]) {
 		dev_err(&pdev->dev, "Could not allocate hw links\n");
-		err = -ENOMEM;
-		goto err_link;
+		return -ENOMEM;
 	}
 	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
 		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
@@ -1572,15 +1569,14 @@
 
 	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
 	if (IS_ERR(dev->kthread)) {
-		err = PTR_ERR(dev->kthread);
-		goto err_link;
+		return PTR_ERR(dev->kthread);
 	}
 
 	init_completion(&dev->dma_completion);
 
 	err = clk_prepare_enable(dev->clk_ipg);
 	if (err)
-		goto err_link;
+		return err;
 	err = clk_prepare_enable(dev->clk_ahb);
 	if (err)
 		goto clk_ipg_disable;
@@ -1620,25 +1616,11 @@
 	return 0;
 
 err_algs:
-	dma_free_coherent(&pdev->dev,
-			  SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
-			  dev->hw_link[0], dev->hw_phys_link[0]);
 	kthread_stop(dev->kthread);
 	dev_ptr = NULL;
 	clk_disable_unprepare(dev->clk_ahb);
 clk_ipg_disable:
 	clk_disable_unprepare(dev->clk_ipg);
-err_link:
-	dma_free_coherent(&pdev->dev,
-			  2 * AES_KEYSIZE_128,
-			  dev->key_base, dev->key_phys_base);
-	dma_free_coherent(&pdev->dev,
-			  SHA256_DIGEST_SIZE,
-			  dev->context_base, dev->context_phys_base);
-err_key:
-	dma_free_coherent(&pdev->dev,
-			  SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
-			  dev->hw_desc[0], dev->hw_phys_desc[0]);
 
 	return err;
 }
@@ -1647,16 +1629,6 @@
 {
 	struct sahara_dev *dev = platform_get_drvdata(pdev);
 
-	dma_free_coherent(&pdev->dev,
-			  SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
-			  dev->hw_link[0], dev->hw_phys_link[0]);
-	dma_free_coherent(&pdev->dev,
-			  2 * AES_KEYSIZE_128,
-			  dev->key_base, dev->key_phys_base);
-	dma_free_coherent(&pdev->dev,
-			  SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
-			  dev->hw_desc[0], dev->hw_phys_desc[0]);
-
 	kthread_stop(dev->kthread);
 
 	sahara_unregister_algs(dev);
diff --git a/drivers/crypto/sunxi-ss/Makefile b/drivers/crypto/sunxi-ss/Makefile
new file mode 100644
index 0000000..8f4c7a2
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sun4i-ss.o
+sun4i-ss-y += sun4i-ss-core.o sun4i-ss-hash.o sun4i-ss-cipher.o
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
new file mode 100644
index 0000000..e070c31
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -0,0 +1,542 @@
+/*
+ * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for AES cipher with 128,192,256 bits
+ * keysize in CBC and ECB mode.
+ * Add support also for DES and 3DES in CBC and ECB mode.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include "sun4i-ss.h"
+
+static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_ss_ctx *ss = op->ss;
+	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+	struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
+	u32 mode = ctx->mode;
+	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
+	u32 rx_cnt = SS_RX_DEFAULT;
+	u32 tx_cnt = 0;
+	u32 spaces;
+	u32 v;
+	int i, err = 0;
+	unsigned int ileft = areq->nbytes;
+	unsigned int oleft = areq->nbytes;
+	unsigned int todo;
+	struct sg_mapping_iter mi, mo;
+	unsigned int oi, oo; /* offset for in and out */
+
+	if (areq->nbytes == 0)
+		return 0;
+
+	if (!areq->info) {
+		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
+		return -EINVAL;
+	}
+
+	if (!areq->src || !areq->dst) {
+		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&ss->slock);
+
+	for (i = 0; i < op->keylen; i += 4)
+		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+
+	if (areq->info) {
+		for (i = 0; i < 4 && i < ivsize / 4; i++) {
+			v = *(u32 *)(areq->info + i * 4);
+			writel(v, ss->base + SS_IV0 + i * 4);
+		}
+	}
+	writel(mode, ss->base + SS_CTL);
+
+	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
+	sg_miter_next(&mi);
+	sg_miter_next(&mo);
+	if (!mi.addr || !mo.addr) {
+		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+		err = -EINVAL;
+		goto release_ss;
+	}
+
+	ileft = areq->nbytes / 4;
+	oleft = areq->nbytes / 4;
+	oi = 0;
+	oo = 0;
+	do {
+		todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
+		if (todo > 0) {
+			ileft -= todo;
+			writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
+			oi += todo * 4;
+		}
+		if (oi == mi.length) {
+			sg_miter_next(&mi);
+			oi = 0;
+		}
+
+		spaces = readl(ss->base + SS_FCSR);
+		rx_cnt = SS_RXFIFO_SPACES(spaces);
+		tx_cnt = SS_TXFIFO_SPACES(spaces);
+
+		todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
+		if (todo > 0) {
+			oleft -= todo;
+			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+			oo += todo * 4;
+		}
+		if (oo == mo.length) {
+			sg_miter_next(&mo);
+			oo = 0;
+		}
+	} while (mo.length > 0);
+
+	if (areq->info) {
+		for (i = 0; i < 4 && i < ivsize / 4; i++) {
+			v = readl(ss->base + SS_IV0 + i * 4);
+			*(u32 *)(areq->info + i * 4) = v;
+		}
+	}
+
+release_ss:
+	sg_miter_stop(&mi);
+	sg_miter_stop(&mo);
+	writel(0, ss->base + SS_CTL);
+	spin_unlock_bh(&ss->slock);
+	return err;
+}
+
+/* Generic function that support SG with size not multiple of 4 */
+static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_ss_ctx *ss = op->ss;
+	int no_chunk = 1;
+	struct scatterlist *in_sg = areq->src;
+	struct scatterlist *out_sg = areq->dst;
+	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
+	struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
+	u32 mode = ctx->mode;
+	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
+	u32 rx_cnt = SS_RX_DEFAULT;
+	u32 tx_cnt = 0;
+	u32 v;
+	u32 spaces;
+	int i, err = 0;
+	unsigned int ileft = areq->nbytes;
+	unsigned int oleft = areq->nbytes;
+	unsigned int todo;
+	struct sg_mapping_iter mi, mo;
+	unsigned int oi, oo;	/* offset for in and out */
+	char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
+	char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
+	unsigned int ob = 0;	/* offset in buf */
+	unsigned int obo = 0;	/* offset in bufo*/
+	unsigned int obl = 0;	/* length of data in bufo */
+
+	if (areq->nbytes == 0)
+		return 0;
+
+	if (!areq->info) {
+		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
+		return -EINVAL;
+	}
+
+	if (!areq->src || !areq->dst) {
+		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * if we have only SGs with size multiple of 4,
+	 * we can use the SS optimized function
+	 */
+	while (in_sg && no_chunk == 1) {
+		if ((in_sg->length % 4) != 0)
+			no_chunk = 0;
+		in_sg = sg_next(in_sg);
+	}
+	while (out_sg && no_chunk == 1) {
+		if ((out_sg->length % 4) != 0)
+			no_chunk = 0;
+		out_sg = sg_next(out_sg);
+	}
+
+	if (no_chunk == 1)
+		return sun4i_ss_opti_poll(areq);
+
+	spin_lock_bh(&ss->slock);
+
+	for (i = 0; i < op->keylen; i += 4)
+		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
+
+	if (areq->info) {
+		for (i = 0; i < 4 && i < ivsize / 4; i++) {
+			v = *(u32 *)(areq->info + i * 4);
+			writel(v, ss->base + SS_IV0 + i * 4);
+		}
+	}
+	writel(mode, ss->base + SS_CTL);
+
+	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
+	sg_miter_next(&mi);
+	sg_miter_next(&mo);
+	if (!mi.addr || !mo.addr) {
+		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+		err = -EINVAL;
+		goto release_ss;
+	}
+	ileft = areq->nbytes;
+	oleft = areq->nbytes;
+	oi = 0;
+	oo = 0;
+
+	while (oleft > 0) {
+		if (ileft > 0) {
+			/*
+			 * todo is the number of consecutive 4byte word that we
+			 * can read from current SG
+			 */
+			todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
+			if (todo > 0 && ob == 0) {
+				writesl(ss->base + SS_RXFIFO, mi.addr + oi,
+					todo);
+				ileft -= todo * 4;
+				oi += todo * 4;
+			} else {
+				/*
+				 * not enough consecutive bytes, so we need to
+				 * linearize in buf. todo is in bytes
+				 * After that copy, if we have a multiple of 4
+				 * we need to be able to write all buf in one
+				 * pass, so it is why we min() with rx_cnt
+				 */
+				todo = min3(rx_cnt * 4 - ob, ileft,
+					    mi.length - oi);
+				memcpy(buf + ob, mi.addr + oi, todo);
+				ileft -= todo;
+				oi += todo;
+				ob += todo;
+				if (ob % 4 == 0) {
+					writesl(ss->base + SS_RXFIFO, buf,
+						ob / 4);
+					ob = 0;
+				}
+			}
+			if (oi == mi.length) {
+				sg_miter_next(&mi);
+				oi = 0;
+			}
+		}
+
+		spaces = readl(ss->base + SS_FCSR);
+		rx_cnt = SS_RXFIFO_SPACES(spaces);
+		tx_cnt = SS_TXFIFO_SPACES(spaces);
+		dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
+			mode,
+			oi, mi.length, ileft, areq->nbytes, rx_cnt,
+			oo, mo.length, oleft, areq->nbytes, tx_cnt,
+			todo, ob);
+
+		if (tx_cnt == 0)
+			continue;
+		/* todo in 4bytes word */
+		todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
+		if (todo > 0) {
+			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+			oleft -= todo * 4;
+			oo += todo * 4;
+			if (oo == mo.length) {
+				sg_miter_next(&mo);
+				oo = 0;
+			}
+		} else {
+			/*
+			 * read obl bytes in bufo, we read at maximum for
+			 * emptying the device
+			 */
+			readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
+			obl = tx_cnt * 4;
+			obo = 0;
+			do {
+				/*
+				 * how many bytes we can copy ?
+				 * no more than remaining SG size
+				 * no more than remaining buffer
+				 * no need to test against oleft
+				 */
+				todo = min(mo.length - oo, obl - obo);
+				memcpy(mo.addr + oo, bufo + obo, todo);
+				oleft -= todo;
+				obo += todo;
+				oo += todo;
+				if (oo == mo.length) {
+					sg_miter_next(&mo);
+					oo = 0;
+				}
+			} while (obo < obl);
+			/* bufo must be fully used here */
+		}
+	}
+	if (areq->info) {
+		for (i = 0; i < 4 && i < ivsize / 4; i++) {
+			v = readl(ss->base + SS_IV0 + i * 4);
+			*(u32 *)(areq->info + i * 4) = v;
+		}
+	}
+
+release_ss:
+	sg_miter_stop(&mi);
+	sg_miter_stop(&mo);
+	writel(0, ss->base + SS_CTL);
+	spin_unlock_bh(&ss->slock);
+
+	return err;
+}
+
+/* CBC AES */
+int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB AES */
+int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+/* CBC DES */
+int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB DES */
+int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+/* CBC 3DES */
+int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+/* ECB 3DES */
+int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
+{
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
+
+	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
+		op->keymode;
+	return sun4i_ss_cipher_poll(areq);
+}
+
+int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
+{
+	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct sun4i_ss_alg_template *algt;
+
+	memset(op, 0, sizeof(struct sun4i_tfm_ctx));
+
+	algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
+	op->ss = algt->ss;
+
+	tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
+
+	return 0;
+}
+
+/* check and set the AES key, prepare the mode to be used */
+int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_ss_ctx *ss = op->ss;
+
+	switch (keylen) {
+	case 128 / 8:
+		op->keymode = SS_AES_128BITS;
+		break;
+	case 192 / 8:
+		op->keymode = SS_AES_192BITS;
+		break;
+	case 256 / 8:
+		op->keymode = SS_AES_256BITS;
+		break;
+	default:
+		dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	op->keylen = keylen;
+	memcpy(op->key, key, keylen);
+	return 0;
+}
+
+/* check and set the DES key, prepare the mode to be used */
+int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_ss_ctx *ss = op->ss;
+	u32 flags;
+	u32 tmp[DES_EXPKEY_WORDS];
+	int ret;
+
+	if (unlikely(keylen != DES_KEY_SIZE)) {
+		dev_err(ss->dev, "Invalid keylen %u\n", keylen);
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	flags = crypto_ablkcipher_get_flags(tfm);
+
+	ret = des_ekey(tmp, key);
+	if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+		dev_dbg(ss->dev, "Weak key %u\n", keylen);
+		return -EINVAL;
+	}
+
+	op->keylen = keylen;
+	memcpy(op->key, key, keylen);
+	return 0;
+}
+
+/* check and set the 3DES key, prepare the mode to be used */
+int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			 unsigned int keylen)
+{
+	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
+	struct sun4i_ss_ctx *ss = op->ss;
+
+	if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
+		dev_err(ss->dev, "Invalid keylen %u\n", keylen);
+		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	op->keylen = keylen;
+	memcpy(op->key, key, keylen);
+	return 0;
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-core.c b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
new file mode 100644
index 0000000..eab6fe2
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
@@ -0,0 +1,425 @@
+/*
+ * sun4i-ss-core.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * Core file which registers crypto algorithms supported by the SS.
+ *
+ * You could find a link for the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/reset.h>
+
+#include "sun4i-ss.h"
+
+static struct sun4i_ss_alg_template ss_algs[] = {
+{       .type = CRYPTO_ALG_TYPE_AHASH,
+	.mode = SS_OP_MD5,
+	.alg.hash = {
+		.init = sun4i_hash_init,
+		.update = sun4i_hash_update,
+		.final = sun4i_hash_final,
+		.finup = sun4i_hash_finup,
+		.digest = sun4i_hash_digest,
+		.export = sun4i_hash_export_md5,
+		.import = sun4i_hash_import_md5,
+		.halg = {
+			.digestsize = MD5_DIGEST_SIZE,
+			.base = {
+				.cra_name = "md5",
+				.cra_driver_name = "md5-sun4i-ss",
+				.cra_priority = 300,
+				.cra_alignmask = 3,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH,
+				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+				.cra_module = THIS_MODULE,
+				.cra_type = &crypto_ahash_type,
+				.cra_init = sun4i_hash_crainit
+			}
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_AHASH,
+	.mode = SS_OP_SHA1,
+	.alg.hash = {
+		.init = sun4i_hash_init,
+		.update = sun4i_hash_update,
+		.final = sun4i_hash_final,
+		.finup = sun4i_hash_finup,
+		.digest = sun4i_hash_digest,
+		.export = sun4i_hash_export_sha1,
+		.import = sun4i_hash_import_sha1,
+		.halg = {
+			.digestsize = SHA1_DIGEST_SIZE,
+			.base = {
+				.cra_name = "sha1",
+				.cra_driver_name = "sha1-sun4i-ss",
+				.cra_priority = 300,
+				.cra_alignmask = 3,
+				.cra_flags = CRYPTO_ALG_TYPE_AHASH,
+				.cra_blocksize = SHA1_BLOCK_SIZE,
+				.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+				.cra_module = THIS_MODULE,
+				.cra_type = &crypto_ahash_type,
+				.cra_init = sun4i_hash_crainit
+			}
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	.alg.crypto = {
+		.cra_name = "cbc(aes)",
+		.cra_driver_name = "cbc-aes-sun4i-ss",
+		.cra_priority = 300,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
+		.cra_module = THIS_MODULE,
+		.cra_alignmask = 3,
+		.cra_type = &crypto_ablkcipher_type,
+		.cra_init = sun4i_ss_cipher_init,
+		.cra_ablkcipher = {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
+			.setkey         = sun4i_ss_aes_setkey,
+			.encrypt        = sun4i_ss_cbc_aes_encrypt,
+			.decrypt        = sun4i_ss_cbc_aes_decrypt,
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	.alg.crypto = {
+		.cra_name = "ecb(aes)",
+		.cra_driver_name = "ecb-aes-sun4i-ss",
+		.cra_priority = 300,
+		.cra_blocksize = AES_BLOCK_SIZE,
+		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.cra_ctxsize = sizeof(struct sun4i_tfm_ctx),
+		.cra_module = THIS_MODULE,
+		.cra_alignmask = 3,
+		.cra_type = &crypto_ablkcipher_type,
+		.cra_init = sun4i_ss_cipher_init,
+		.cra_ablkcipher = {
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.ivsize		= AES_BLOCK_SIZE,
+			.setkey         = sun4i_ss_aes_setkey,
+			.encrypt        = sun4i_ss_ecb_aes_encrypt,
+			.decrypt        = sun4i_ss_ecb_aes_decrypt,
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	.alg.crypto = {
+		.cra_name = "cbc(des)",
+		.cra_driver_name = "cbc-des-sun4i-ss",
+		.cra_priority = 300,
+		.cra_blocksize = DES_BLOCK_SIZE,
+		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+		.cra_module = THIS_MODULE,
+		.cra_alignmask = 3,
+		.cra_type = &crypto_ablkcipher_type,
+		.cra_init = sun4i_ss_cipher_init,
+		.cra_u.ablkcipher = {
+			.min_keysize    = DES_KEY_SIZE,
+			.max_keysize    = DES_KEY_SIZE,
+			.ivsize         = DES_BLOCK_SIZE,
+			.setkey         = sun4i_ss_des_setkey,
+			.encrypt        = sun4i_ss_cbc_des_encrypt,
+			.decrypt        = sun4i_ss_cbc_des_decrypt,
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	.alg.crypto = {
+		.cra_name = "ecb(des)",
+		.cra_driver_name = "ecb-des-sun4i-ss",
+		.cra_priority = 300,
+		.cra_blocksize = DES_BLOCK_SIZE,
+		.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+		.cra_module = THIS_MODULE,
+		.cra_alignmask = 3,
+		.cra_type = &crypto_ablkcipher_type,
+		.cra_init = sun4i_ss_cipher_init,
+		.cra_u.ablkcipher = {
+			.min_keysize    = DES_KEY_SIZE,
+			.max_keysize    = DES_KEY_SIZE,
+			.setkey         = sun4i_ss_des_setkey,
+			.encrypt        = sun4i_ss_ecb_des_encrypt,
+			.decrypt        = sun4i_ss_ecb_des_decrypt,
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	.alg.crypto = {
+			.cra_name = "cbc(des3_ede)",
+			.cra_driver_name = "cbc-des3-sun4i-ss",
+			.cra_priority = 300,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+			.cra_module = THIS_MODULE,
+			.cra_alignmask = 3,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_init = sun4i_ss_cipher_init,
+			.cra_u.ablkcipher = {
+				.min_keysize    = DES3_EDE_KEY_SIZE,
+				.max_keysize    = DES3_EDE_KEY_SIZE,
+				.ivsize         = DES3_EDE_BLOCK_SIZE,
+				.setkey         = sun4i_ss_des3_setkey,
+				.encrypt        = sun4i_ss_cbc_des3_encrypt,
+				.decrypt        = sun4i_ss_cbc_des3_decrypt,
+		}
+	}
+},
+{       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+	.alg.crypto = {
+			.cra_name = "ecb(des3_ede)",
+			.cra_driver_name = "ecb-des3-sun4i-ss",
+			.cra_priority = 300,
+			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
+			.cra_ctxsize = sizeof(struct sun4i_req_ctx),
+			.cra_module = THIS_MODULE,
+			.cra_alignmask = 3,
+			.cra_type = &crypto_ablkcipher_type,
+			.cra_init = sun4i_ss_cipher_init,
+			.cra_u.ablkcipher = {
+				.min_keysize    = DES3_EDE_KEY_SIZE,
+				.max_keysize    = DES3_EDE_KEY_SIZE,
+				.ivsize         = DES3_EDE_BLOCK_SIZE,
+				.setkey         = sun4i_ss_des3_setkey,
+				.encrypt        = sun4i_ss_ecb_des3_encrypt,
+				.decrypt        = sun4i_ss_ecb_des3_decrypt,
+		}
+	}
+},
+};
+
+static int sun4i_ss_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	u32 v;
+	int err, i;
+	unsigned long cr;
+	const unsigned long cr_ahb = 24 * 1000 * 1000;
+	const unsigned long cr_mod = 150 * 1000 * 1000;
+	struct sun4i_ss_ctx *ss;
+
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
+	if (!ss)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ss->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ss->base)) {
+		dev_err(&pdev->dev, "Cannot request MMIO\n");
+		return PTR_ERR(ss->base);
+	}
+
+	ss->ssclk = devm_clk_get(&pdev->dev, "mod");
+	if (IS_ERR(ss->ssclk)) {
+		err = PTR_ERR(ss->ssclk);
+		dev_err(&pdev->dev, "Cannot get SS clock err=%d\n", err);
+		return err;
+	}
+	dev_dbg(&pdev->dev, "clock ss acquired\n");
+
+	ss->busclk = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(ss->busclk)) {
+		err = PTR_ERR(ss->busclk);
+		dev_err(&pdev->dev, "Cannot get AHB SS clock err=%d\n", err);
+		return err;
+	}
+	dev_dbg(&pdev->dev, "clock ahb_ss acquired\n");
+
+	ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
+	if (IS_ERR(ss->reset)) {
+		if (PTR_ERR(ss->reset) == -EPROBE_DEFER)
+			return PTR_ERR(ss->reset);
+		dev_info(&pdev->dev, "no reset control found\n");
+		ss->reset = NULL;
+	}
+
+	/* Enable both clocks */
+	err = clk_prepare_enable(ss->busclk);
+	if (err != 0) {
+		dev_err(&pdev->dev, "Cannot prepare_enable busclk\n");
+		return err;
+	}
+	err = clk_prepare_enable(ss->ssclk);
+	if (err != 0) {
+		dev_err(&pdev->dev, "Cannot prepare_enable ssclk\n");
+		goto error_ssclk;
+	}
+
+	/*
+	 * Check that clock have the correct rates given in the datasheet
+	 * Try to set the clock to the maximum allowed
+	 */
+	err = clk_set_rate(ss->ssclk, cr_mod);
+	if (err != 0) {
+		dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n");
+		goto error_clk;
+	}
+
+	/* Deassert reset if we have a reset control */
+	if (ss->reset) {
+		err = reset_control_deassert(ss->reset);
+		if (err) {
+			dev_err(&pdev->dev, "Cannot deassert reset control\n");
+			goto error_clk;
+		}
+	}
+
+	/*
+	 * The only impact on clocks below requirement are bad performance,
+	 * so do not print "errors"
+	 * warn on Overclocked clocks
+	 */
+	cr = clk_get_rate(ss->busclk);
+	if (cr >= cr_ahb)
+		dev_dbg(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
+			cr, cr / 1000000, cr_ahb);
+	else
+		dev_warn(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n",
+			 cr, cr / 1000000, cr_ahb);
+
+	cr = clk_get_rate(ss->ssclk);
+	if (cr <= cr_mod)
+		if (cr < cr_mod)
+			dev_warn(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
+				 cr, cr / 1000000, cr_mod);
+		else
+			dev_dbg(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n",
+				cr, cr / 1000000, cr_mod);
+	else
+		dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n",
+			 cr, cr / 1000000, cr_mod);
+
+	/*
+	 * Datasheet named it "Die Bonding ID"
+	 * I expect to be a sort of Security System Revision number.
+	 * Since the A80 seems to have an other version of SS
+	 * this info could be useful
+	 */
+	writel(SS_ENABLED, ss->base + SS_CTL);
+	v = readl(ss->base + SS_CTL);
+	v >>= 16;
+	v &= 0x07;
+	dev_info(&pdev->dev, "Die ID %d\n", v);
+	writel(0, ss->base + SS_CTL);
+
+	ss->dev = &pdev->dev;
+
+	spin_lock_init(&ss->slock);
+
+	for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+		ss_algs[i].ss = ss;
+		switch (ss_algs[i].type) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			err = crypto_register_alg(&ss_algs[i].alg.crypto);
+			if (err != 0) {
+				dev_err(ss->dev, "Fail to register %s\n",
+					ss_algs[i].alg.crypto.cra_name);
+				goto error_alg;
+			}
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			err = crypto_register_ahash(&ss_algs[i].alg.hash);
+			if (err != 0) {
+				dev_err(ss->dev, "Fail to register %s\n",
+					ss_algs[i].alg.hash.halg.base.cra_name);
+				goto error_alg;
+			}
+			break;
+		}
+	}
+	platform_set_drvdata(pdev, ss);
+	return 0;
+error_alg:
+	i--;
+	for (; i >= 0; i--) {
+		switch (ss_algs[i].type) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			crypto_unregister_alg(&ss_algs[i].alg.crypto);
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			crypto_unregister_ahash(&ss_algs[i].alg.hash);
+			break;
+		}
+	}
+	if (ss->reset)
+		reset_control_assert(ss->reset);
+error_clk:
+	clk_disable_unprepare(ss->ssclk);
+error_ssclk:
+	clk_disable_unprepare(ss->busclk);
+	return err;
+}
+
+static int sun4i_ss_remove(struct platform_device *pdev)
+{
+	int i;
+	struct sun4i_ss_ctx *ss = platform_get_drvdata(pdev);
+
+	for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
+		switch (ss_algs[i].type) {
+		case CRYPTO_ALG_TYPE_ABLKCIPHER:
+			crypto_unregister_alg(&ss_algs[i].alg.crypto);
+			break;
+		case CRYPTO_ALG_TYPE_AHASH:
+			crypto_unregister_ahash(&ss_algs[i].alg.hash);
+			break;
+		}
+	}
+
+	writel(0, ss->base + SS_CTL);
+	if (ss->reset)
+		reset_control_assert(ss->reset);
+	clk_disable_unprepare(ss->busclk);
+	clk_disable_unprepare(ss->ssclk);
+	return 0;
+}
+
+static const struct of_device_id a20ss_crypto_of_match_table[] = {
+	{ .compatible = "allwinner,sun4i-a10-crypto" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
+
+static struct platform_driver sun4i_ss_driver = {
+	.probe          = sun4i_ss_probe,
+	.remove         = sun4i_ss_remove,
+	.driver         = {
+		.name           = "sun4i-ss",
+		.of_match_table	= a20ss_crypto_of_match_table,
+	},
+};
+
+module_platform_driver(sun4i_ss_driver);
+
+MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Corentin LABBE <clabbe.montjoie@gmail.com>");
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
new file mode 100644
index 0000000..ff80314
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
@@ -0,0 +1,492 @@
+/*
+ * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * This file add support for MD5 and SHA1.
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#include "sun4i-ss.h"
+#include <linux/scatterlist.h>
+
+/* This is a totally arbitrary value */
+#define SS_TIMEOUT 100
+
+int sun4i_hash_crainit(struct crypto_tfm *tfm)
+{
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct sun4i_req_ctx));
+	return 0;
+}
+
+/* sun4i_hash_init: initialize request context */
+int sun4i_hash_init(struct ahash_request *areq)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+	struct sun4i_ss_alg_template *algt;
+	struct sun4i_ss_ctx *ss;
+
+	memset(op, 0, sizeof(struct sun4i_req_ctx));
+
+	algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash);
+	ss = algt->ss;
+	op->ss = algt->ss;
+	op->mode = algt->mode;
+
+	return 0;
+}
+
+int sun4i_hash_export_md5(struct ahash_request *areq, void *out)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+	struct md5_state *octx = out;
+	int i;
+
+	octx->byte_count = op->byte_count + op->len;
+
+	memcpy(octx->block, op->buf, op->len);
+
+	if (op->byte_count > 0) {
+		for (i = 0; i < 4; i++)
+			octx->hash[i] = op->hash[i];
+	} else {
+		octx->hash[0] = SHA1_H0;
+		octx->hash[1] = SHA1_H1;
+		octx->hash[2] = SHA1_H2;
+		octx->hash[3] = SHA1_H3;
+	}
+
+	return 0;
+}
+
+int sun4i_hash_import_md5(struct ahash_request *areq, const void *in)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+	const struct md5_state *ictx = in;
+	int i;
+
+	sun4i_hash_init(areq);
+
+	op->byte_count = ictx->byte_count & ~0x3F;
+	op->len = ictx->byte_count & 0x3F;
+
+	memcpy(op->buf, ictx->block, op->len);
+
+	for (i = 0; i < 4; i++)
+		op->hash[i] = ictx->hash[i];
+
+	return 0;
+}
+
+int sun4i_hash_export_sha1(struct ahash_request *areq, void *out)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+	struct sha1_state *octx = out;
+	int i;
+
+	octx->count = op->byte_count + op->len;
+
+	memcpy(octx->buffer, op->buf, op->len);
+
+	if (op->byte_count > 0) {
+		for (i = 0; i < 5; i++)
+			octx->state[i] = op->hash[i];
+	} else {
+		octx->state[0] = SHA1_H0;
+		octx->state[1] = SHA1_H1;
+		octx->state[2] = SHA1_H2;
+		octx->state[3] = SHA1_H3;
+		octx->state[4] = SHA1_H4;
+	}
+
+	return 0;
+}
+
+int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in)
+{
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+	const struct sha1_state *ictx = in;
+	int i;
+
+	sun4i_hash_init(areq);
+
+	op->byte_count = ictx->count & ~0x3F;
+	op->len = ictx->count & 0x3F;
+
+	memcpy(op->buf, ictx->buffer, op->len);
+
+	for (i = 0; i < 5; i++)
+		op->hash[i] = ictx->state[i];
+
+	return 0;
+}
+
+/*
+ * sun4i_hash_update: update hash engine
+ *
+ * Could be used for both SHA1 and MD5
+ * Write data by step of 32bits and put then in the SS.
+ *
+ * Since we cannot leave partial data and hash state in the engine,
+ * we need to get the hash state at the end of this function.
+ * We can get the hash state every 64 bytes
+ *
+ * So the first work is to get the number of bytes to write to SS modulo 64
+ * The extra bytes will go to a temporary buffer op->buf storing op->len bytes
+ *
+ * So at the begin of update()
+ * if op->len + areq->nbytes < 64
+ * => all data will be written to wait buffer (op->buf) and end=0
+ * if not, write all data from op->buf to the device and position end to
+ * complete to 64bytes
+ *
+ * example 1:
+ * update1 60o => op->len=60
+ * update2 60o => need one more word to have 64 bytes
+ * end=4
+ * so write all data from op->buf and one word of SGs
+ * write remaining data in op->buf
+ * final state op->len=56
+ */
+int sun4i_hash_update(struct ahash_request *areq)
+{
+	u32 v, ivmode = 0;
+	unsigned int i = 0;
+	/*
+	 * i is the total bytes read from SGs, to be compared to areq->nbytes
+	 * i is important because we cannot rely on SG length since the sum of
+	 * SG->length could be greater than areq->nbytes
+	 */
+
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+	struct sun4i_ss_ctx *ss = op->ss;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	unsigned int in_i = 0; /* advancement in the current SG */
+	unsigned int end;
+	/*
+	 * end is the position when we need to stop writing to the device,
+	 * to be compared to i
+	 */
+	int in_r, err = 0;
+	unsigned int todo;
+	u32 spaces, rx_cnt = SS_RX_DEFAULT;
+	size_t copied = 0;
+	struct sg_mapping_iter mi;
+
+	dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
+		__func__, crypto_tfm_alg_name(areq->base.tfm),
+		op->byte_count, areq->nbytes, op->mode,
+		op->len, op->hash[0]);
+
+	if (areq->nbytes == 0)
+		return 0;
+
+	/* protect against overflow */
+	if (areq->nbytes > UINT_MAX - op->len) {
+		dev_err(ss->dev, "Cannot process too large request\n");
+		return -EINVAL;
+	}
+
+	if (op->len + areq->nbytes < 64) {
+		/* linearize data to op->buf */
+		copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+					    op->buf + op->len, areq->nbytes, 0);
+		op->len += copied;
+		return 0;
+	}
+
+	end = ((areq->nbytes + op->len) / 64) * 64 - op->len;
+
+	if (end > areq->nbytes || areq->nbytes - end > 63) {
+		dev_err(ss->dev, "ERROR: Bound error %u %u\n",
+			end, areq->nbytes);
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&ss->slock);
+
+	/*
+	 * if some data have been processed before,
+	 * we need to restore the partial hash state
+	 */
+	if (op->byte_count > 0) {
+		ivmode = SS_IV_ARBITRARY;
+		for (i = 0; i < 5; i++)
+			writel(op->hash[i], ss->base + SS_IV0 + i * 4);
+	}
+	/* Enable the device */
+	writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
+
+	i = 0;
+	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+	sg_miter_next(&mi);
+	in_i = 0;
+
+	do {
+		/*
+		 * we need to linearize in two case:
+		 * - the buffer is already used
+		 * - the SG does not have enough byte remaining ( < 4)
+		 */
+		if (op->len > 0 || (mi.length - in_i) < 4) {
+			/*
+			 * if we have entered here we have two reason to stop
+			 * - the buffer is full
+			 * - reach the end
+			 */
+			while (op->len < 64 && i < end) {
+				/* how many bytes we can read from current SG */
+				in_r = min3(mi.length - in_i, end - i,
+					    64 - op->len);
+				memcpy(op->buf + op->len, mi.addr + in_i, in_r);
+				op->len += in_r;
+				i += in_r;
+				in_i += in_r;
+				if (in_i == mi.length) {
+					sg_miter_next(&mi);
+					in_i = 0;
+				}
+			}
+			if (op->len > 3 && (op->len % 4) == 0) {
+				/* write buf to the device */
+				writesl(ss->base + SS_RXFIFO, op->buf,
+					op->len / 4);
+				op->byte_count += op->len;
+				op->len = 0;
+			}
+		}
+		if (mi.length - in_i > 3 && i < end) {
+			/* how many bytes we can read from current SG */
+			in_r = min3(mi.length - in_i, areq->nbytes - i,
+				    ((mi.length - in_i) / 4) * 4);
+			/* how many bytes we can write in the device*/
+			todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4);
+			writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo);
+			op->byte_count += todo * 4;
+			i += todo * 4;
+			in_i += todo * 4;
+			rx_cnt -= todo;
+			if (rx_cnt == 0) {
+				spaces = readl(ss->base + SS_FCSR);
+				rx_cnt = SS_RXFIFO_SPACES(spaces);
+			}
+			if (in_i == mi.length) {
+				sg_miter_next(&mi);
+				in_i = 0;
+			}
+		}
+	} while (i < end);
+	/* final linear */
+	if ((areq->nbytes - i) < 64) {
+		while (i < areq->nbytes && in_i < mi.length && op->len < 64) {
+			/* how many bytes we can read from current SG */
+			in_r = min3(mi.length - in_i, areq->nbytes - i,
+				    64 - op->len);
+			memcpy(op->buf + op->len, mi.addr + in_i, in_r);
+			op->len += in_r;
+			i += in_r;
+			in_i += in_r;
+			if (in_i == mi.length) {
+				sg_miter_next(&mi);
+				in_i = 0;
+			}
+		}
+	}
+
+	sg_miter_stop(&mi);
+
+	writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
+	i = 0;
+	do {
+		v = readl(ss->base + SS_CTL);
+		i++;
+	} while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
+	if (i >= SS_TIMEOUT) {
+		dev_err_ratelimited(ss->dev,
+				    "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
+				    i, SS_TIMEOUT, v, areq->nbytes);
+		err = -EIO;
+		goto release_ss;
+	}
+
+	/* get the partial hash only if something was written */
+	for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
+		op->hash[i] = readl(ss->base + SS_MD0 + i * 4);
+
+release_ss:
+	writel(0, ss->base + SS_CTL);
+	spin_unlock_bh(&ss->slock);
+	return err;
+}
+
+/*
+ * sun4i_hash_final: finalize hashing operation
+ *
+ * If we have some remaining bytes, we write them.
+ * Then ask the SS for finalizing the hashing operation
+ *
+ * I do not check RX FIFO size in this function since the size is 32
+ * after each enabling and this function neither write more than 32 words.
+ */
+int sun4i_hash_final(struct ahash_request *areq)
+{
+	u32 v, ivmode = 0;
+	unsigned int i;
+	unsigned int j = 0;
+	int zeros, err = 0;
+	unsigned int index, padlen;
+	__be64 bits;
+	struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+	struct sun4i_ss_ctx *ss = op->ss;
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+	u32 bf[32];
+	u32 wb = 0;
+	unsigned int nwait, nbw = 0;
+
+	dev_dbg(ss->dev, "%s: byte=%llu len=%u mode=%x wl=%u h=%x",
+		__func__, op->byte_count, areq->nbytes, op->mode,
+		op->len, op->hash[0]);
+
+	spin_lock_bh(&ss->slock);
+
+	/*
+	 * if we have already written something,
+	 * restore the partial hash state
+	 */
+	if (op->byte_count > 0) {
+		ivmode = SS_IV_ARBITRARY;
+		for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++)
+			writel(op->hash[i], ss->base + SS_IV0 + i * 4);
+	}
+	writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL);
+
+	/* write the remaining words of the wait buffer */
+	if (op->len > 0) {
+		nwait = op->len / 4;
+		if (nwait > 0) {
+			writesl(ss->base + SS_RXFIFO, op->buf, nwait);
+			op->byte_count += 4 * nwait;
+		}
+		nbw = op->len - 4 * nwait;
+		wb = *(u32 *)(op->buf + nwait * 4);
+		wb &= (0xFFFFFFFF >> (4 - nbw) * 8);
+	}
+
+	/* write the remaining bytes of the nbw buffer */
+	if (nbw > 0) {
+		wb |= ((1 << 7) << (nbw * 8));
+		bf[j++] = wb;
+	} else {
+		bf[j++] = 1 << 7;
+	}
+
+	/*
+	 * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
+	 * I take the operations from other MD5/SHA1 implementations
+	 */
+
+	/* we have already send 4 more byte of which nbw data */
+	if (op->mode == SS_OP_MD5) {
+		index = (op->byte_count + 4) & 0x3f;
+		op->byte_count += nbw;
+		if (index > 56)
+			zeros = (120 - index) / 4;
+		else
+			zeros = (56 - index) / 4;
+	} else {
+		op->byte_count += nbw;
+		index = op->byte_count & 0x3f;
+		padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+		zeros = (padlen - 1) / 4;
+	}
+
+	memset(bf + j, 0, 4 * zeros);
+	j += zeros;
+
+	/* write the length of data */
+	if (op->mode == SS_OP_SHA1) {
+		bits = cpu_to_be64(op->byte_count << 3);
+		bf[j++] = bits & 0xffffffff;
+		bf[j++] = (bits >> 32) & 0xffffffff;
+	} else {
+		bf[j++] = (op->byte_count << 3) & 0xffffffff;
+		bf[j++] = (op->byte_count >> 29) & 0xffffffff;
+	}
+	writesl(ss->base + SS_RXFIFO, bf, j);
+
+	/* Tell the SS to stop the hashing */
+	writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL);
+
+	/*
+	 * Wait for SS to finish the hash.
+	 * The timeout could happen only in case of bad overcloking
+	 * or driver bug.
+	 */
+	i = 0;
+	do {
+		v = readl(ss->base + SS_CTL);
+		i++;
+	} while (i < SS_TIMEOUT && (v & SS_DATA_END) > 0);
+	if (i >= SS_TIMEOUT) {
+		dev_err_ratelimited(ss->dev,
+				    "ERROR: hash end timeout %d>%d ctl=%x len=%u\n",
+				    i, SS_TIMEOUT, v, areq->nbytes);
+		err = -EIO;
+		goto release_ss;
+	}
+
+	/* Get the hash from the device */
+	if (op->mode == SS_OP_SHA1) {
+		for (i = 0; i < 5; i++) {
+			v = cpu_to_be32(readl(ss->base + SS_MD0 + i * 4));
+			memcpy(areq->result + i * 4, &v, 4);
+		}
+	} else {
+		for (i = 0; i < 4; i++) {
+			v = readl(ss->base + SS_MD0 + i * 4);
+			memcpy(areq->result + i * 4, &v, 4);
+		}
+	}
+
+release_ss:
+	writel(0, ss->base + SS_CTL);
+	spin_unlock_bh(&ss->slock);
+	return err;
+}
+
+/* sun4i_hash_finup: finalize hashing operation after an update */
+int sun4i_hash_finup(struct ahash_request *areq)
+{
+	int err;
+
+	err = sun4i_hash_update(areq);
+	if (err != 0)
+		return err;
+
+	return sun4i_hash_final(areq);
+}
+
+/* combo of init/update/final functions */
+int sun4i_hash_digest(struct ahash_request *areq)
+{
+	int err;
+
+	err = sun4i_hash_init(areq);
+	if (err != 0)
+		return err;
+
+	err = sun4i_hash_update(areq);
+	if (err != 0)
+		return err;
+
+	return sun4i_hash_final(areq);
+}
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
new file mode 100644
index 0000000..8e9c05f
--- /dev/null
+++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
@@ -0,0 +1,201 @@
+/*
+ * sun4i-ss.h - hardware cryptographic accelerator for Allwinner A20 SoC
+ *
+ * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
+ *
+ * Support AES cipher with 128,192,256 bits keysize.
+ * Support MD5 and SHA1 hash algorithms.
+ * Support DES and 3DES
+ *
+ * You could find the datasheet in Documentation/arm/sunxi/README
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <crypto/scatterwalk.h>
+#include <linux/scatterlist.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <crypto/md5.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/internal/rng.h>
+
+#define SS_CTL            0x00
+#define SS_KEY0           0x04
+#define SS_KEY1           0x08
+#define SS_KEY2           0x0C
+#define SS_KEY3           0x10
+#define SS_KEY4           0x14
+#define SS_KEY5           0x18
+#define SS_KEY6           0x1C
+#define SS_KEY7           0x20
+
+#define SS_IV0            0x24
+#define SS_IV1            0x28
+#define SS_IV2            0x2C
+#define SS_IV3            0x30
+
+#define SS_FCSR           0x44
+
+#define SS_MD0            0x4C
+#define SS_MD1            0x50
+#define SS_MD2            0x54
+#define SS_MD3            0x58
+#define SS_MD4            0x5C
+
+#define SS_RXFIFO         0x200
+#define SS_TXFIFO         0x204
+
+/* SS_CTL configuration values */
+
+/* PRNG generator mode - bit 15 */
+#define SS_PRNG_ONESHOT		(0 << 15)
+#define SS_PRNG_CONTINUE	(1 << 15)
+
+/* IV mode for hash */
+#define SS_IV_ARBITRARY		(1 << 14)
+
+/* SS operation mode - bits 12-13 */
+#define SS_ECB			(0 << 12)
+#define SS_CBC			(1 << 12)
+#define SS_CTS			(3 << 12)
+
+/* Counter width for CNT mode - bits 10-11 */
+#define SS_CNT_16BITS		(0 << 10)
+#define SS_CNT_32BITS		(1 << 10)
+#define SS_CNT_64BITS		(2 << 10)
+
+/* Key size for AES - bits 8-9 */
+#define SS_AES_128BITS		(0 << 8)
+#define SS_AES_192BITS		(1 << 8)
+#define SS_AES_256BITS		(2 << 8)
+
+/* Operation direction - bit 7 */
+#define SS_ENCRYPTION		(0 << 7)
+#define SS_DECRYPTION		(1 << 7)
+
+/* SS Method - bits 4-6 */
+#define SS_OP_AES		(0 << 4)
+#define SS_OP_DES		(1 << 4)
+#define SS_OP_3DES		(2 << 4)
+#define SS_OP_SHA1		(3 << 4)
+#define SS_OP_MD5		(4 << 4)
+#define SS_OP_PRNG		(5 << 4)
+
+/* Data end bit - bit 2 */
+#define SS_DATA_END		(1 << 2)
+
+/* PRNG start bit - bit 1 */
+#define SS_PRNG_START		(1 << 1)
+
+/* SS Enable bit - bit 0 */
+#define SS_DISABLED		(0 << 0)
+#define SS_ENABLED		(1 << 0)
+
+/* SS_FCSR configuration values */
+/* RX FIFO status - bit 30 */
+#define SS_RXFIFO_FREE		(1 << 30)
+
+/* RX FIFO empty spaces - bits 24-29 */
+#define SS_RXFIFO_SPACES(val)	(((val) >> 24) & 0x3f)
+
+/* TX FIFO status - bit 22 */
+#define SS_TXFIFO_AVAILABLE	(1 << 22)
+
+/* TX FIFO available spaces - bits 16-21 */
+#define SS_TXFIFO_SPACES(val)	(((val) >> 16) & 0x3f)
+
+#define SS_RX_MAX	32
+#define SS_RX_DEFAULT	SS_RX_MAX
+#define SS_TX_MAX	33
+
+#define SS_RXFIFO_EMP_INT_PENDING	(1 << 10)
+#define SS_TXFIFO_AVA_INT_PENDING	(1 << 8)
+#define SS_RXFIFO_EMP_INT_ENABLE	(1 << 2)
+#define SS_TXFIFO_AVA_INT_ENABLE	(1 << 0)
+
+struct sun4i_ss_ctx {
+	void __iomem *base;
+	int irq;
+	struct clk *busclk;
+	struct clk *ssclk;
+	struct reset_control *reset;
+	struct device *dev;
+	struct resource *res;
+	spinlock_t slock; /* control the use of the device */
+};
+
+struct sun4i_ss_alg_template {
+	u32 type;
+	u32 mode;
+	union {
+		struct crypto_alg crypto;
+		struct ahash_alg hash;
+	} alg;
+	struct sun4i_ss_ctx *ss;
+};
+
+struct sun4i_tfm_ctx {
+	u32 key[AES_MAX_KEY_SIZE / 4];/* divided by sizeof(u32) */
+	u32 keylen;
+	u32 keymode;
+	struct sun4i_ss_ctx *ss;
+};
+
+struct sun4i_cipher_req_ctx {
+	u32 mode;
+};
+
+struct sun4i_req_ctx {
+	u32 mode;
+	u64 byte_count; /* number of bytes "uploaded" to the device */
+	u32 hash[5]; /* for storing SS_IVx register */
+	char buf[64];
+	unsigned int len;
+	struct sun4i_ss_ctx *ss;
+};
+
+int sun4i_hash_crainit(struct crypto_tfm *tfm);
+int sun4i_hash_init(struct ahash_request *areq);
+int sun4i_hash_update(struct ahash_request *areq);
+int sun4i_hash_final(struct ahash_request *areq);
+int sun4i_hash_finup(struct ahash_request *areq);
+int sun4i_hash_digest(struct ahash_request *areq);
+int sun4i_hash_export_md5(struct ahash_request *areq, void *out);
+int sun4i_hash_import_md5(struct ahash_request *areq, const void *in);
+int sun4i_hash_export_sha1(struct ahash_request *areq, void *out);
+int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in);
+
+int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq);
+
+int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq);
+
+int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq);
+int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq);
+
+int sun4i_ss_cipher_init(struct crypto_tfm *tfm);
+int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			unsigned int keylen);
+int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			unsigned int keylen);
+int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+			 unsigned int keylen);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 83aca95..3b20a1b 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -766,6 +766,7 @@
 static int talitos_register_rng(struct device *dev)
 {
 	struct talitos_private *priv = dev_get_drvdata(dev);
+	int err;
 
 	priv->rng.name		= dev_driver_string(dev),
 	priv->rng.init		= talitos_rng_init,
@@ -773,14 +774,22 @@
 	priv->rng.data_read	= talitos_rng_data_read,
 	priv->rng.priv		= (unsigned long)dev;
 
-	return hwrng_register(&priv->rng);
+	err = hwrng_register(&priv->rng);
+	if (!err)
+		priv->rng_registered = true;
+
+	return err;
 }
 
 static void talitos_unregister_rng(struct device *dev)
 {
 	struct talitos_private *priv = dev_get_drvdata(dev);
 
+	if (!priv->rng_registered)
+		return;
+
 	hwrng_unregister(&priv->rng);
+	priv->rng_registered = false;
 }
 
 /*
@@ -799,7 +808,6 @@
 	unsigned int keylen;
 	unsigned int enckeylen;
 	unsigned int authkeylen;
-	unsigned int authsize;
 };
 
 #define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
@@ -819,16 +827,6 @@
 	struct scatterlist *psrc;
 };
 
-static int aead_setauthsize(struct crypto_aead *authenc,
-			    unsigned int authsize)
-{
-	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
-
-	ctx->authsize = authsize;
-
-	return 0;
-}
-
 static int aead_setkey(struct crypto_aead *authenc,
 		       const u8 *key, unsigned int keylen)
 {
@@ -857,12 +855,11 @@
 
 /*
  * talitos_edesc - s/w-extended descriptor
- * @assoc_nents: number of segments in associated data scatterlist
  * @src_nents: number of segments in input scatterlist
  * @dst_nents: number of segments in output scatterlist
- * @assoc_chained: whether assoc is chained or not
  * @src_chained: whether src is chained or not
  * @dst_chained: whether dst is chained or not
+ * @icv_ool: whether ICV is out-of-line
  * @iv_dma: dma address of iv for checking continuity and link table
  * @dma_len: length of dma mapped link_tbl space
  * @dma_link_tbl: bus physical address of link_tbl/buf
@@ -875,12 +872,11 @@
  * of link_tbl data
  */
 struct talitos_edesc {
-	int assoc_nents;
 	int src_nents;
 	int dst_nents;
-	bool assoc_chained;
 	bool src_chained;
 	bool dst_chained;
+	bool icv_ool;
 	dma_addr_t iv_dma;
 	int dma_len;
 	dma_addr_t dma_link_tbl;
@@ -952,14 +948,6 @@
 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
 	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
 
-	if (edesc->assoc_chained)
-		talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
-	else if (areq->assoclen)
-		/* assoc_nents counts also for IV in non-contiguous cases */
-		dma_unmap_sg(dev, areq->assoc,
-			     edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
-			     DMA_TO_DEVICE);
-
 	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
 
 	if (edesc->dma_len)
@@ -976,7 +964,7 @@
 {
 	struct aead_request *areq = context;
 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
-	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+	unsigned int authsize = crypto_aead_authsize(authenc);
 	struct talitos_edesc *edesc;
 	struct scatterlist *sg;
 	void *icvdata;
@@ -986,13 +974,12 @@
 	ipsec_esp_unmap(dev, edesc, areq);
 
 	/* copy the generated ICV to dst */
-	if (edesc->dst_nents) {
+	if (edesc->icv_ool) {
 		icvdata = &edesc->link_tbl[edesc->src_nents +
-					   edesc->dst_nents + 2 +
-					   edesc->assoc_nents];
+					   edesc->dst_nents + 2];
 		sg = sg_last(areq->dst, edesc->dst_nents);
-		memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
-		       icvdata, ctx->authsize);
+		memcpy((char *)sg_virt(sg) + sg->length - authsize,
+		       icvdata, authsize);
 	}
 
 	kfree(edesc);
@@ -1006,10 +993,10 @@
 {
 	struct aead_request *req = context;
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
-	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
+	unsigned int authsize = crypto_aead_authsize(authenc);
 	struct talitos_edesc *edesc;
 	struct scatterlist *sg;
-	void *icvdata;
+	char *oicv, *icv;
 
 	edesc = container_of(desc, struct talitos_edesc, desc);
 
@@ -1017,16 +1004,18 @@
 
 	if (!err) {
 		/* auth check */
-		if (edesc->dma_len)
-			icvdata = &edesc->link_tbl[edesc->src_nents +
-						   edesc->dst_nents + 2 +
-						   edesc->assoc_nents];
-		else
-			icvdata = &edesc->link_tbl[0];
-
 		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
-		err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
-			     ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
+		icv = (char *)sg_virt(sg) + sg->length - authsize;
+
+		if (edesc->dma_len) {
+			oicv = (char *)&edesc->link_tbl[edesc->src_nents +
+							edesc->dst_nents + 2];
+			if (edesc->icv_ool)
+				icv = oicv + authsize;
+		} else
+			oicv = (char *)&edesc->link_tbl[0];
+
+		err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0;
 	}
 
 	kfree(edesc);
@@ -1059,53 +1048,69 @@
  * convert scatterlist to SEC h/w link table format
  * stop at cryptlen bytes
  */
-static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
-			   int cryptlen, struct talitos_ptr *link_tbl_ptr)
+static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+				 unsigned int offset, int cryptlen,
+				 struct talitos_ptr *link_tbl_ptr)
 {
 	int n_sg = sg_count;
+	int count = 0;
 
-	while (sg && n_sg--) {
-		to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0);
-		link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
-		link_tbl_ptr->j_extent = 0;
-		link_tbl_ptr++;
-		cryptlen -= sg_dma_len(sg);
+	while (cryptlen && sg && n_sg--) {
+		unsigned int len = sg_dma_len(sg);
+
+		if (offset >= len) {
+			offset -= len;
+			goto next;
+		}
+
+		len -= offset;
+
+		if (len > cryptlen)
+			len = cryptlen;
+
+		to_talitos_ptr(link_tbl_ptr + count,
+			       sg_dma_address(sg) + offset, 0);
+		link_tbl_ptr[count].len = cpu_to_be16(len);
+		link_tbl_ptr[count].j_extent = 0;
+		count++;
+		cryptlen -= len;
+		offset = 0;
+
+next:
 		sg = sg_next(sg);
 	}
 
-	/* adjust (decrease) last one (or two) entry's len to cryptlen */
-	link_tbl_ptr--;
-	while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
-		/* Empty this entry, and move to previous one */
-		cryptlen += be16_to_cpu(link_tbl_ptr->len);
-		link_tbl_ptr->len = 0;
-		sg_count--;
-		link_tbl_ptr--;
-	}
-	link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
-					+ cryptlen);
-
 	/* tag end of link table */
-	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
+	if (count > 0)
+		link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
 
-	return sg_count;
+	return count;
+}
+
+static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
+				 int cryptlen,
+				 struct talitos_ptr *link_tbl_ptr)
+{
+	return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
+				     link_tbl_ptr);
 }
 
 /*
  * fill in and submit ipsec_esp descriptor
  */
 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
-		     u64 seq, void (*callback) (struct device *dev,
-						struct talitos_desc *desc,
-						void *context, int error))
+		     void (*callback)(struct device *dev,
+				      struct talitos_desc *desc,
+				      void *context, int error))
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	unsigned int authsize = crypto_aead_authsize(aead);
 	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *dev = ctx->dev;
 	struct talitos_desc *desc = &edesc->desc;
 	unsigned int cryptlen = areq->cryptlen;
-	unsigned int authsize = ctx->authsize;
 	unsigned int ivsize = crypto_aead_ivsize(aead);
+	int tbl_off = 0;
 	int sg_count, ret;
 	int sg_link_tbl_len;
 
@@ -1113,36 +1118,27 @@
 	map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
 			       DMA_TO_DEVICE);
 
+	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ?: 1,
+				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
+							   : DMA_TO_DEVICE,
+				  edesc->src_chained);
+
 	/* hmac data */
-	desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
-	if (edesc->assoc_nents) {
-		int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
-		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
+	desc->ptr[1].len = cpu_to_be16(areq->assoclen);
+	if (sg_count > 1 &&
+	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
+					 areq->assoclen,
+					 &edesc->link_tbl[tbl_off])) > 1) {
+		tbl_off += ret;
 
 		to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
 			       sizeof(struct talitos_ptr), 0);
 		desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
 
-		/* assoc_nents - 1 entries for assoc, 1 for IV */
-		sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
-					  areq->assoclen, tbl_ptr);
-
-		/* add IV to link table */
-		tbl_ptr += sg_count - 1;
-		tbl_ptr->j_extent = 0;
-		tbl_ptr++;
-		to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0);
-		tbl_ptr->len = cpu_to_be16(ivsize);
-		tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
-
 		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
 					   edesc->dma_len, DMA_BIDIRECTIONAL);
 	} else {
-		if (areq->assoclen)
-			to_talitos_ptr(&desc->ptr[1],
-				       sg_dma_address(areq->assoc), 0);
-		else
-			to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0);
+		to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
 		desc->ptr[1].j_extent = 0;
 	}
 
@@ -1150,8 +1146,6 @@
 	to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
 	desc->ptr[2].len = cpu_to_be16(ivsize);
 	desc->ptr[2].j_extent = 0;
-	/* Sync needed for the aead_givencrypt case */
-	dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
 
 	/* cipher key */
 	map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
@@ -1167,33 +1161,24 @@
 	desc->ptr[4].len = cpu_to_be16(cryptlen);
 	desc->ptr[4].j_extent = authsize;
 
-	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
-				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
-							   : DMA_TO_DEVICE,
-				  edesc->src_chained);
+	sg_link_tbl_len = cryptlen;
+	if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
+		sg_link_tbl_len += authsize;
 
-	if (sg_count == 1) {
+	if (sg_count > 1 &&
+	    (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
+					 sg_link_tbl_len,
+					 &edesc->link_tbl[tbl_off])) > 1) {
+		tbl_off += ret;
+		desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
+		to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
+					      tbl_off *
+					      sizeof(struct talitos_ptr), 0);
+		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
+					   edesc->dma_len,
+					   DMA_BIDIRECTIONAL);
+	} else
 		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
-	} else {
-		sg_link_tbl_len = cryptlen;
-
-		if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
-			sg_link_tbl_len = cryptlen + authsize;
-
-		sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
-					  &edesc->link_tbl[0]);
-		if (sg_count > 1) {
-			desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
-			to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0);
-			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
-						   edesc->dma_len,
-						   DMA_BIDIRECTIONAL);
-		} else {
-			/* Only one segment now, so no link tbl needed */
-			to_talitos_ptr(&desc->ptr[4],
-				       sg_dma_address(areq->src), 0);
-		}
-	}
 
 	/* cipher out */
 	desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1204,16 +1189,17 @@
 					  edesc->dst_nents ? : 1,
 					  DMA_FROM_DEVICE, edesc->dst_chained);
 
-	if (sg_count == 1) {
-		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
-	} else {
-		int tbl_off = edesc->src_nents + 1;
+	edesc->icv_ool = false;
+
+	if (sg_count > 1 &&
+	    (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
+					      areq->assoclen, cryptlen,
+					      &edesc->link_tbl[tbl_off])) >
+	    1) {
 		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
 
 		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
 			       tbl_off * sizeof(struct talitos_ptr), 0);
-		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
-					  tbl_ptr);
 
 		/* Add an entry to the link table for ICV data */
 		tbl_ptr += sg_count - 1;
@@ -1224,13 +1210,16 @@
 
 		/* icv data follows link tables */
 		to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
-			       (tbl_off + edesc->dst_nents + 1 +
-				edesc->assoc_nents) *
-			       sizeof(struct talitos_ptr), 0);
+					(edesc->src_nents + edesc->dst_nents +
+					 2) * sizeof(struct talitos_ptr) +
+					authsize, 0);
 		desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
 		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
 					   edesc->dma_len, DMA_BIDIRECTIONAL);
-	}
+
+		edesc->icv_ool = true;
+	} else
+		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
 
 	/* iv out */
 	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -1268,7 +1257,6 @@
  * allocate and map the extended descriptor
  */
 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
-						 struct scatterlist *assoc,
 						 struct scatterlist *src,
 						 struct scatterlist *dst,
 						 u8 *iv,
@@ -1281,8 +1269,8 @@
 						 bool encrypt)
 {
 	struct talitos_edesc *edesc;
-	int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
-	bool assoc_chained = false, src_chained = false, dst_chained = false;
+	int src_nents, dst_nents, alloc_len, dma_len;
+	bool src_chained = false, dst_chained = false;
 	dma_addr_t iv_dma = 0;
 	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
 		      GFP_ATOMIC;
@@ -1298,48 +1286,35 @@
 	if (ivsize)
 		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
 
-	if (assoclen) {
-		/*
-		 * Currently it is assumed that iv is provided whenever assoc
-		 * is.
-		 */
-		BUG_ON(!iv);
-
-		assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
-		talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
-			       assoc_chained);
-		assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
-
-		if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
-			assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
-	}
-
 	if (!dst || dst == src) {
-		src_nents = sg_count(src, cryptlen + authsize, &src_chained);
+		src_nents = sg_count(src, assoclen + cryptlen + authsize,
+				     &src_chained);
 		src_nents = (src_nents == 1) ? 0 : src_nents;
 		dst_nents = dst ? src_nents : 0;
 	} else { /* dst && dst != src*/
-		src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
+		src_nents = sg_count(src, assoclen + cryptlen +
+					  (encrypt ? 0 : authsize),
 				     &src_chained);
 		src_nents = (src_nents == 1) ? 0 : src_nents;
-		dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
+		dst_nents = sg_count(dst, assoclen + cryptlen +
+					  (encrypt ? authsize : 0),
 				     &dst_chained);
 		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
 	}
 
 	/*
 	 * allocate space for base edesc plus the link tables,
-	 * allowing for two separate entries for ICV and generated ICV (+ 2),
-	 * and the ICV data itself
+	 * allowing for two separate entries for AD and generated ICV (+ 2),
+	 * and space for two sets of ICVs (stashed and generated)
 	 */
 	alloc_len = sizeof(struct talitos_edesc);
-	if (assoc_nents || src_nents || dst_nents) {
+	if (src_nents || dst_nents) {
 		if (is_sec1)
 			dma_len = (src_nents ? cryptlen : 0) +
 				  (dst_nents ? cryptlen : 0);
 		else
-			dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
-				  sizeof(struct talitos_ptr) + authsize;
+			dma_len = (src_nents + dst_nents + 2) *
+				  sizeof(struct talitos_ptr) + authsize * 2;
 		alloc_len += dma_len;
 	} else {
 		dma_len = 0;
@@ -1348,13 +1323,6 @@
 
 	edesc = kmalloc(alloc_len, GFP_DMA | flags);
 	if (!edesc) {
-		if (assoc_chained)
-			talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
-		else if (assoclen)
-			dma_unmap_sg(dev, assoc,
-				     assoc_nents ? assoc_nents - 1 : 1,
-				     DMA_TO_DEVICE);
-
 		if (iv_dma)
 			dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
 
@@ -1362,10 +1330,8 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	edesc->assoc_nents = assoc_nents;
 	edesc->src_nents = src_nents;
 	edesc->dst_nents = dst_nents;
-	edesc->assoc_chained = assoc_chained;
 	edesc->src_chained = src_chained;
 	edesc->dst_chained = dst_chained;
 	edesc->iv_dma = iv_dma;
@@ -1382,12 +1348,13 @@
 					      int icv_stashing, bool encrypt)
 {
 	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+	unsigned int authsize = crypto_aead_authsize(authenc);
 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 	unsigned int ivsize = crypto_aead_ivsize(authenc);
 
-	return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
+	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
 				   iv, areq->assoclen, areq->cryptlen,
-				   ctx->authsize, ivsize, icv_stashing,
+				   authsize, ivsize, icv_stashing,
 				   areq->base.flags, encrypt);
 }
 
@@ -1405,14 +1372,14 @@
 	/* set encrypt */
 	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
 
-	return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
+	return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
 }
 
 static int aead_decrypt(struct aead_request *req)
 {
 	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+	unsigned int authsize = crypto_aead_authsize(authenc);
 	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
-	unsigned int authsize = ctx->authsize;
 	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
 	struct talitos_edesc *edesc;
 	struct scatterlist *sg;
@@ -1437,7 +1404,7 @@
 		/* reset integrity check result bits */
 		edesc->desc.hdr_lo = 0;
 
-		return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
+		return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
 	}
 
 	/* Have to check the ICV with software */
@@ -1445,40 +1412,16 @@
 
 	/* stash incoming ICV for later cmp with ICV generated by the h/w */
 	if (edesc->dma_len)
-		icvdata = &edesc->link_tbl[edesc->src_nents +
-					   edesc->dst_nents + 2 +
-					   edesc->assoc_nents];
+		icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
+						   edesc->dst_nents + 2];
 	else
 		icvdata = &edesc->link_tbl[0];
 
 	sg = sg_last(req->src, edesc->src_nents ? : 1);
 
-	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
-	       ctx->authsize);
+	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
 
-	return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
-}
-
-static int aead_givencrypt(struct aead_givcrypt_request *req)
-{
-	struct aead_request *areq = &req->areq;
-	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
-	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
-	struct talitos_edesc *edesc;
-
-	/* allocate extended descriptor */
-	edesc = aead_edesc_alloc(areq, req->giv, 0, true);
-	if (IS_ERR(edesc))
-		return PTR_ERR(edesc);
-
-	/* set encrypt */
-	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
-
-	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
-	/* avoid consecutive packets going out with same IV */
-	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
-
-	return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
+	return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
 }
 
 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
@@ -1710,7 +1653,7 @@
 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
 	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
 
-	return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
+	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
 				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
 				   areq->base.flags, encrypt);
 }
@@ -1895,7 +1838,7 @@
 	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
 	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 
-	return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
+	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
 				   nbytes, 0, 0, 0, areq->base.flags, false);
 }
 
@@ -1986,7 +1929,7 @@
 		sg_init_table(req_ctx->bufsl, nsg);
 		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
 		if (nsg > 1)
-			scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
+			sg_chain(req_ctx->bufsl, 2, areq->src);
 		req_ctx->psrc = req_ctx->bufsl;
 	} else
 		req_ctx->psrc = areq->src;
@@ -2161,6 +2104,7 @@
 	union {
 		struct crypto_alg crypto;
 		struct ahash_alg hash;
+		struct aead_alg aead;
 	} alg;
 	__be32 desc_hdr_template;
 };
@@ -2168,15 +2112,16 @@
 static struct talitos_alg_template driver_algs[] = {
 	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
 	{	.type = CRYPTO_ALG_TYPE_AEAD,
-		.alg.crypto = {
-			.cra_name = "authenc(hmac(sha1),cbc(aes))",
-			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
-			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-			.cra_aead = {
-				.ivsize = AES_BLOCK_SIZE,
-				.maxauthsize = SHA1_DIGEST_SIZE,
-			}
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_AESU |
@@ -2187,15 +2132,17 @@
 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
 	},
 	{	.type = CRYPTO_ALG_TYPE_AEAD,
-		.alg.crypto = {
-			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
-			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-			.cra_aead = {
-				.ivsize = DES3_EDE_BLOCK_SIZE,
-				.maxauthsize = SHA1_DIGEST_SIZE,
-			}
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha1),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha1-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_DEU |
@@ -2207,15 +2154,16 @@
 		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
 	},
 	{       .type = CRYPTO_ALG_TYPE_AEAD,
-		.alg.crypto = {
-			.cra_name = "authenc(hmac(sha224),cbc(aes))",
-			.cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
-			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-			.cra_aead = {
-				.ivsize = AES_BLOCK_SIZE,
-				.maxauthsize = SHA224_DIGEST_SIZE,
-			}
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 				     DESC_HDR_SEL0_AESU |
@@ -2226,15 +2174,17 @@
 				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
 	},
 	{	.type = CRYPTO_ALG_TYPE_AEAD,
-		.alg.crypto = {
-			.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
-			.cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-			.cra_aead = {
-				.ivsize = DES3_EDE_BLOCK_SIZE,
-				.maxauthsize = SHA224_DIGEST_SIZE,
-			}
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha224),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha224-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_DEU |
@@ -2246,15 +2196,16 @@
 		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
 	},
 	{	.type = CRYPTO_ALG_TYPE_AEAD,
-		.alg.crypto = {
-			.cra_name = "authenc(hmac(sha256),cbc(aes))",
-			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
-			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-			.cra_aead = {
-				.ivsize = AES_BLOCK_SIZE,
-				.maxauthsize = SHA256_DIGEST_SIZE,
-			}
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_AESU |
@@ -2265,15 +2216,17 @@
 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
 	},
 	{	.type = CRYPTO_ALG_TYPE_AEAD,
-		.alg.crypto = {
-			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
-			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-			.cra_aead = {
-				.ivsize = DES3_EDE_BLOCK_SIZE,
-				.maxauthsize = SHA256_DIGEST_SIZE,
-			}
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha256),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha256-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_DEU |
@@ -2285,15 +2238,16 @@
 		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
 	},
 	{	.type = CRYPTO_ALG_TYPE_AEAD,
-		.alg.crypto = {
-			.cra_name = "authenc(hmac(sha384),cbc(aes))",
-			.cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
-			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-			.cra_aead = {
-				.ivsize = AES_BLOCK_SIZE,
-				.maxauthsize = SHA384_DIGEST_SIZE,
-			}
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_AESU |
@@ -2304,15 +2258,17 @@
 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
 	},
 	{	.type = CRYPTO_ALG_TYPE_AEAD,
-		.alg.crypto = {
-			.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
-			.cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-			.cra_aead = {
-				.ivsize = DES3_EDE_BLOCK_SIZE,
-				.maxauthsize = SHA384_DIGEST_SIZE,
-			}
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha384),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha384-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_DEU |
@@ -2324,15 +2280,16 @@
 		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
 	},
 	{	.type = CRYPTO_ALG_TYPE_AEAD,
-		.alg.crypto = {
-			.cra_name = "authenc(hmac(sha512),cbc(aes))",
-			.cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
-			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-			.cra_aead = {
-				.ivsize = AES_BLOCK_SIZE,
-				.maxauthsize = SHA512_DIGEST_SIZE,
-			}
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_AESU |
@@ -2343,15 +2300,17 @@
 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
 	},
 	{	.type = CRYPTO_ALG_TYPE_AEAD,
-		.alg.crypto = {
-			.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
-			.cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-			.cra_aead = {
-				.ivsize = DES3_EDE_BLOCK_SIZE,
-				.maxauthsize = SHA512_DIGEST_SIZE,
-			}
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(sha512),"
+					    "cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-sha512-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_DEU |
@@ -2363,15 +2322,16 @@
 		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
 	},
 	{	.type = CRYPTO_ALG_TYPE_AEAD,
-		.alg.crypto = {
-			.cra_name = "authenc(hmac(md5),cbc(aes))",
-			.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
-			.cra_blocksize = AES_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-			.cra_aead = {
-				.ivsize = AES_BLOCK_SIZE,
-				.maxauthsize = MD5_DIGEST_SIZE,
-			}
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(aes))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-aes-talitos",
+				.cra_blocksize = AES_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = AES_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_AESU |
@@ -2382,15 +2342,16 @@
 		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
 	},
 	{	.type = CRYPTO_ALG_TYPE_AEAD,
-		.alg.crypto = {
-			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
-			.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
-			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
-			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
-			.cra_aead = {
-				.ivsize = DES3_EDE_BLOCK_SIZE,
-				.maxauthsize = MD5_DIGEST_SIZE,
-			}
+		.alg.aead = {
+			.base = {
+				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+				.cra_driver_name = "authenc-hmac-md5-"
+						   "cbc-3des-talitos",
+				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+				.cra_flags = CRYPTO_ALG_ASYNC,
+			},
+			.ivsize = DES3_EDE_BLOCK_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
 		},
 		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
 			             DESC_HDR_SEL0_DEU |
@@ -2658,15 +2619,9 @@
 	return 0;
 }
 
-static int talitos_cra_init_aead(struct crypto_tfm *tfm)
+static int talitos_cra_init_aead(struct crypto_aead *tfm)
 {
-	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
-
-	talitos_cra_init(tfm);
-
-	/* random first IV */
-	get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
-
+	talitos_cra_init(crypto_aead_tfm(tfm));
 	return 0;
 }
 
@@ -2713,9 +2668,9 @@
 	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
 		switch (t_alg->algt.type) {
 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
-		case CRYPTO_ALG_TYPE_AEAD:
-			crypto_unregister_alg(&t_alg->algt.alg.crypto);
 			break;
+		case CRYPTO_ALG_TYPE_AEAD:
+			crypto_unregister_aead(&t_alg->algt.alg.aead);
 		case CRYPTO_ALG_TYPE_AHASH:
 			crypto_unregister_ahash(&t_alg->algt.alg.hash);
 			break;
@@ -2727,7 +2682,7 @@
 	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
 		talitos_unregister_rng(dev);
 
-	for (i = 0; i < priv->num_channels; i++)
+	for (i = 0; priv->chan && i < priv->num_channels; i++)
 		kfree(priv->chan[i].fifo);
 
 	kfree(priv->chan);
@@ -2774,15 +2729,11 @@
 		alg->cra_ablkcipher.geniv = "eseqiv";
 		break;
 	case CRYPTO_ALG_TYPE_AEAD:
-		alg = &t_alg->algt.alg.crypto;
-		alg->cra_init = talitos_cra_init_aead;
-		alg->cra_type = &crypto_aead_type;
-		alg->cra_aead.setkey = aead_setkey;
-		alg->cra_aead.setauthsize = aead_setauthsize;
-		alg->cra_aead.encrypt = aead_encrypt;
-		alg->cra_aead.decrypt = aead_decrypt;
-		alg->cra_aead.givencrypt = aead_givencrypt;
-		alg->cra_aead.geniv = "<built-in>";
+		alg = &t_alg->algt.alg.aead.base;
+		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
+		t_alg->algt.alg.aead.setkey = aead_setkey;
+		t_alg->algt.alg.aead.encrypt = aead_encrypt;
+		t_alg->algt.alg.aead.decrypt = aead_decrypt;
 		break;
 	case CRYPTO_ALG_TYPE_AHASH:
 		alg = &t_alg->algt.alg.hash.halg.base;
@@ -3041,7 +2992,7 @@
 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
 		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
 			struct talitos_crypto_alg *t_alg;
-			char *name = NULL;
+			struct crypto_alg *alg = NULL;
 
 			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
 			if (IS_ERR(t_alg)) {
@@ -3053,21 +3004,26 @@
 
 			switch (t_alg->algt.type) {
 			case CRYPTO_ALG_TYPE_ABLKCIPHER:
-			case CRYPTO_ALG_TYPE_AEAD:
 				err = crypto_register_alg(
 						&t_alg->algt.alg.crypto);
-				name = t_alg->algt.alg.crypto.cra_driver_name;
+				alg = &t_alg->algt.alg.crypto;
 				break;
+
+			case CRYPTO_ALG_TYPE_AEAD:
+				err = crypto_register_aead(
+					&t_alg->algt.alg.aead);
+				alg = &t_alg->algt.alg.aead.base;
+				break;
+
 			case CRYPTO_ALG_TYPE_AHASH:
 				err = crypto_register_ahash(
 						&t_alg->algt.alg.hash);
-				name =
-				 t_alg->algt.alg.hash.halg.base.cra_driver_name;
+				alg = &t_alg->algt.alg.hash.halg.base;
 				break;
 			}
 			if (err) {
 				dev_err(dev, "%s alg registration failed\n",
-					name);
+					alg->cra_driver_name);
 				kfree(t_alg);
 			} else
 				list_add_tail(&t_alg->entry, &priv->alg_list);
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 314daf5..0090f32 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -52,12 +52,7 @@
 	__be32 ptr;     /* address */
 };
 
-static const struct talitos_ptr zero_entry = {
-	.len = 0,
-	.j_extent = 0,
-	.eptr = 0,
-	.ptr = 0
-};
+static const struct talitos_ptr zero_entry;
 
 /* descriptor */
 struct talitos_desc {
@@ -154,6 +149,7 @@
 
 	/* hwrng device */
 	struct hwrng rng;
+	bool rng_registered;
 };
 
 extern int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index e79e567..263af70 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -84,6 +84,7 @@
 	preempt_disable();
 	pagefault_disable();
 	enable_kernel_altivec();
+	enable_kernel_vsx();
 	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
 	ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
 	pagefault_enable();
@@ -103,6 +104,7 @@
 		preempt_disable();
 		pagefault_disable();
 		enable_kernel_altivec();
+		enable_kernel_vsx();
 		aes_p8_encrypt(src, dst, &ctx->enc_key);
 		pagefault_enable();
 		preempt_enable();
@@ -119,6 +121,7 @@
 		preempt_disable();
 		pagefault_disable();
 		enable_kernel_altivec();
+		enable_kernel_vsx();
 		aes_p8_decrypt(src, dst, &ctx->dec_key);
 		pagefault_enable();
 		preempt_enable();
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 7299995..0b8fe2e 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -85,6 +85,7 @@
 	preempt_disable();
 	pagefault_disable();
 	enable_kernel_altivec();
+	enable_kernel_vsx();
 	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
 	ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
 	pagefault_enable();
@@ -115,6 +116,7 @@
 		preempt_disable();
 		pagefault_disable();
 		enable_kernel_altivec();
+		enable_kernel_vsx();
 
 		blkcipher_walk_init(&walk, dst, src, nbytes);
 		ret = blkcipher_walk_virt(desc, &walk);
@@ -155,6 +157,7 @@
 		preempt_disable();
 		pagefault_disable();
 		enable_kernel_altivec();
+		enable_kernel_vsx();
 
 		blkcipher_walk_init(&walk, dst, src, nbytes);
 		ret = blkcipher_walk_virt(desc, &walk);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index 7adae42..ee1306c 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -82,6 +82,7 @@
 
 	pagefault_disable();
 	enable_kernel_altivec();
+	enable_kernel_vsx();
 	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
 	pagefault_enable();
 
@@ -100,6 +101,7 @@
 
 	pagefault_disable();
 	enable_kernel_altivec();
+	enable_kernel_vsx();
 	aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
 	pagefault_enable();
 
@@ -113,6 +115,7 @@
 			    struct scatterlist *src, unsigned int nbytes)
 {
 	int ret;
+	u64 inc;
 	struct blkcipher_walk walk;
 	struct p8_aes_ctr_ctx *ctx =
 		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
@@ -131,6 +134,7 @@
 		while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
 			pagefault_disable();
 			enable_kernel_altivec();
+			enable_kernel_vsx();
 			aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
 						    walk.dst.virt.addr,
 						    (nbytes &
@@ -140,7 +144,12 @@
 						    walk.iv);
 			pagefault_enable();
 
-			crypto_inc(walk.iv, AES_BLOCK_SIZE);
+			/* We need to update IV mostly for last bytes/round */
+			inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
+			if (inc > 0)
+				while (inc--)
+					crypto_inc(walk.iv, AES_BLOCK_SIZE);
+
 			nbytes &= AES_BLOCK_SIZE - 1;
 			ret = blkcipher_walk_done(desc, &walk, nbytes);
 		}
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index 6c5c20c..2280539 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -1437,28 +1437,28 @@
 	?vperm		v31,v31,$out0,$keyperm
 	lvx		v25,$x10,$key_		# pre-load round[2]
 
-	vadduwm		$two,$one,$one
+	vadduqm		$two,$one,$one
 	subi		$inp,$inp,15		# undo "caller"
 	$SHL		$len,$len,4
 
-	vadduwm		$out1,$ivec,$one	# counter values ...
-	vadduwm		$out2,$ivec,$two
+	vadduqm		$out1,$ivec,$one	# counter values ...
+	vadduqm		$out2,$ivec,$two
 	vxor		$out0,$ivec,$rndkey0	# ... xored with rndkey[0]
 	 le?li		$idx,8
-	vadduwm		$out3,$out1,$two
+	vadduqm		$out3,$out1,$two
 	vxor		$out1,$out1,$rndkey0
 	 le?lvsl	$inpperm,0,$idx
-	vadduwm		$out4,$out2,$two
+	vadduqm		$out4,$out2,$two
 	vxor		$out2,$out2,$rndkey0
 	 le?vspltisb	$tmp,0x0f
-	vadduwm		$out5,$out3,$two
+	vadduqm		$out5,$out3,$two
 	vxor		$out3,$out3,$rndkey0
 	 le?vxor	$inpperm,$inpperm,$tmp	# transform for lvx_u/stvx_u
-	vadduwm		$out6,$out4,$two
+	vadduqm		$out6,$out4,$two
 	vxor		$out4,$out4,$rndkey0
-	vadduwm		$out7,$out5,$two
+	vadduqm		$out7,$out5,$two
 	vxor		$out5,$out5,$rndkey0
-	vadduwm		$ivec,$out6,$two	# next counter value
+	vadduqm		$ivec,$out6,$two	# next counter value
 	vxor		$out6,$out6,$rndkey0
 	vxor		$out7,$out7,$rndkey0
 
@@ -1594,27 +1594,27 @@
 
 	vcipherlast	$in0,$out0,$in0
 	vcipherlast	$in1,$out1,$in1
-	 vadduwm	$out1,$ivec,$one	# counter values ...
+	 vadduqm	$out1,$ivec,$one	# counter values ...
 	vcipherlast	$in2,$out2,$in2
-	 vadduwm	$out2,$ivec,$two
+	 vadduqm	$out2,$ivec,$two
 	 vxor		$out0,$ivec,$rndkey0	# ... xored with rndkey[0]
 	vcipherlast	$in3,$out3,$in3
-	 vadduwm	$out3,$out1,$two
+	 vadduqm	$out3,$out1,$two
 	 vxor		$out1,$out1,$rndkey0
 	vcipherlast	$in4,$out4,$in4
-	 vadduwm	$out4,$out2,$two
+	 vadduqm	$out4,$out2,$two
 	 vxor		$out2,$out2,$rndkey0
 	vcipherlast	$in5,$out5,$in5
-	 vadduwm	$out5,$out3,$two
+	 vadduqm	$out5,$out3,$two
 	 vxor		$out3,$out3,$rndkey0
 	vcipherlast	$in6,$out6,$in6
-	 vadduwm	$out6,$out4,$two
+	 vadduqm	$out6,$out4,$two
 	 vxor		$out4,$out4,$rndkey0
 	vcipherlast	$in7,$out7,$in7
-	 vadduwm	$out7,$out5,$two
+	 vadduqm	$out7,$out5,$two
 	 vxor		$out5,$out5,$rndkey0
 	le?vperm	$in0,$in0,$in0,$inpperm
-	 vadduwm	$ivec,$out6,$two	# next counter value
+	 vadduqm	$ivec,$out6,$two	# next counter value
 	 vxor		$out6,$out6,$rndkey0
 	le?vperm	$in1,$in1,$in1,$inpperm
 	 vxor		$out7,$out7,$rndkey0
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index b5e2900..2183a2e 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -119,6 +119,7 @@
 	preempt_disable();
 	pagefault_disable();
 	enable_kernel_altivec();
+	enable_kernel_vsx();
 	enable_kernel_fp();
 	gcm_init_p8(ctx->htable, (const u64 *) key);
 	pagefault_enable();
@@ -149,6 +150,7 @@
 			preempt_disable();
 			pagefault_disable();
 			enable_kernel_altivec();
+			enable_kernel_vsx();
 			enable_kernel_fp();
 			gcm_ghash_p8(dctx->shash, ctx->htable,
 				     dctx->buffer, GHASH_DIGEST_SIZE);
@@ -163,6 +165,7 @@
 			preempt_disable();
 			pagefault_disable();
 			enable_kernel_altivec();
+			enable_kernel_vsx();
 			enable_kernel_fp();
 			gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
 			pagefault_enable();
@@ -193,6 +196,7 @@
 			preempt_disable();
 			pagefault_disable();
 			enable_kernel_altivec();
+			enable_kernel_vsx();
 			enable_kernel_fp();
 			gcm_ghash_p8(dctx->shash, ctx->htable,
 				     dctx->buffer, GHASH_DIGEST_SIZE);
diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl
index 0a6f899..d8429cb 100644
--- a/drivers/crypto/vmx/ghashp8-ppc.pl
+++ b/drivers/crypto/vmx/ghashp8-ppc.pl
@@ -61,6 +61,12 @@
 	mtspr		256,r0
 	li		r10,0x30
 	lvx_u		$H,0,r4			# load H
+	le?xor		r7,r7,r7
+	le?addi		r7,r7,0x8		# need a vperm start with 08
+	le?lvsr		5,0,r7
+	le?vspltisb	6,0x0f
+	le?vxor		5,5,6			# set a b-endian mask
+	le?vperm	$H,$H,$H,5
 
 	vspltisb	$xC2,-16		# 0xf0
 	vspltisb	$t0,1			# one
diff --git a/drivers/crypto/vmx/ppc-xlate.pl b/drivers/crypto/vmx/ppc-xlate.pl
index a591884..b999733 100644
--- a/drivers/crypto/vmx/ppc-xlate.pl
+++ b/drivers/crypto/vmx/ppc-xlate.pl
@@ -169,6 +169,7 @@
 my $vpmsubh	= sub { vcrypto_op(@_, 1096); };
 my $vpmsumw	= sub { vcrypto_op(@_, 1160); };
 my $vaddudm	= sub { vcrypto_op(@_, 192);  };
+my $vadduqm	= sub { vcrypto_op(@_, 256);  };
 
 my $mtsle	= sub {
     my ($f, $arg) = @_;
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 7d99d13..f9901f5 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -1,7 +1,7 @@
 /*
  * exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
  *
- * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2014-2015 Samsung Electronics Co., Ltd.
  * Author : Chanwoo Choi <cw00.choi@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
@@ -82,6 +82,15 @@
 	PPMU_EVENT(mscl),
 	PPMU_EVENT(fimd0x),
 	PPMU_EVENT(fimd1x),
+
+	/* Only for Exynos5433 SoCs */
+	PPMU_EVENT(d0-cpu),
+	PPMU_EVENT(d0-general),
+	PPMU_EVENT(d0-rt),
+	PPMU_EVENT(d1-cpu),
+	PPMU_EVENT(d1-general),
+	PPMU_EVENT(d1-rt),
+
 	{ /* sentinel */ },
 };
 
@@ -96,6 +105,9 @@
 	return -EINVAL;
 }
 
+/*
+ * The devfreq-event ops structure for PPMU v1.1
+ */
 static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
 {
 	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
@@ -200,10 +212,158 @@
 	.get_event = exynos_ppmu_get_event,
 };
 
+/*
+ * The devfreq-event ops structure for PPMU v2.0
+ */
+static int exynos_ppmu_v2_disable(struct devfreq_event_dev *edev)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	u32 pmnc, clear;
+
+	/* Disable all counters */
+	clear = (PPMU_CCNT_MASK | PPMU_PMCNT0_MASK | PPMU_PMCNT1_MASK
+		| PPMU_PMCNT2_MASK | PPMU_PMCNT3_MASK);
+
+	__raw_writel(clear, info->ppmu.base + PPMU_V2_FLAG);
+	__raw_writel(clear, info->ppmu.base + PPMU_V2_INTENC);
+	__raw_writel(clear, info->ppmu.base + PPMU_V2_CNTENC);
+	__raw_writel(clear, info->ppmu.base + PPMU_V2_CNT_RESET);
+
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG0);
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG1);
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_CFG2);
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CIG_RESULT);
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CNT_AUTO);
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV0_TYPE);
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV1_TYPE);
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV2_TYPE);
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_CH_EV3_TYPE);
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_V);
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_ID_A);
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_V);
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_SM_OTHERS_A);
+	__raw_writel(0x0, info->ppmu.base + PPMU_V2_INTERRUPT_RESET);
+
+	/* Disable PPMU */
+	pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+	__raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+
+	return 0;
+}
+
+static int exynos_ppmu_v2_set_event(struct devfreq_event_dev *edev)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	int id = exynos_ppmu_find_ppmu_id(edev);
+	u32 pmnc, cntens;
+
+	/* Enable all counters */
+	cntens = __raw_readl(info->ppmu.base + PPMU_V2_CNTENS);
+	cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+	__raw_writel(cntens, info->ppmu.base + PPMU_V2_CNTENS);
+
+	/* Set the event of Read/Write data count  */
+	switch (id) {
+	case PPMU_PMNCNT0:
+	case PPMU_PMNCNT1:
+	case PPMU_PMNCNT2:
+		__raw_writel(PPMU_V2_RO_DATA_CNT | PPMU_V2_WO_DATA_CNT,
+				info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
+		break;
+	case PPMU_PMNCNT3:
+		__raw_writel(PPMU_V2_EVT3_RW_DATA_CNT,
+				info->ppmu.base + PPMU_V2_CH_EVx_TYPE(id));
+		break;
+	}
+
+	/* Reset cycle counter/performance counter and enable PPMU */
+	pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+	pmnc &= ~(PPMU_PMNC_ENABLE_MASK
+			| PPMU_PMNC_COUNTER_RESET_MASK
+			| PPMU_PMNC_CC_RESET_MASK
+			| PPMU_PMNC_CC_DIVIDER_MASK
+			| PPMU_V2_PMNC_START_MODE_MASK);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
+	pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
+	pmnc |= (PPMU_V2_MODE_MANUAL << PPMU_V2_PMNC_START_MODE_SHIFT);
+	__raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+
+	return 0;
+}
+
+static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
+				    struct devfreq_event_data *edata)
+{
+	struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
+	int id = exynos_ppmu_find_ppmu_id(edev);
+	u32 pmnc, cntenc;
+	u32 pmcnt_high, pmcnt_low;
+	u64 load_count = 0;
+
+	/* Disable PPMU */
+	pmnc = __raw_readl(info->ppmu.base + PPMU_V2_PMNC);
+	pmnc &= ~PPMU_PMNC_ENABLE_MASK;
+	__raw_writel(pmnc, info->ppmu.base + PPMU_V2_PMNC);
+
+	/* Read cycle count and performance count */
+	edata->total_count = __raw_readl(info->ppmu.base + PPMU_V2_CCNT);
+
+	switch (id) {
+	case PPMU_PMNCNT0:
+	case PPMU_PMNCNT1:
+	case PPMU_PMNCNT2:
+		load_count = __raw_readl(info->ppmu.base + PPMU_V2_PMNCT(id));
+		break;
+	case PPMU_PMNCNT3:
+		pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
+		pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
+		load_count = (u64)((pmcnt_high & 0xff) << 32) + (u64)pmcnt_low;
+		break;
+	}
+	edata->load_count = load_count;
+
+	/* Disable all counters */
+	cntenc = __raw_readl(info->ppmu.base + PPMU_V2_CNTENC);
+	cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
+	__raw_writel(cntenc, info->ppmu.base + PPMU_V2_CNTENC);
+
+	dev_dbg(&edev->dev, "%25s (load: %ld / %ld)\n", edev->desc->name,
+					edata->load_count, edata->total_count);
+	return 0;
+}
+
+static const struct devfreq_event_ops exynos_ppmu_v2_ops = {
+	.disable = exynos_ppmu_v2_disable,
+	.set_event = exynos_ppmu_v2_set_event,
+	.get_event = exynos_ppmu_v2_get_event,
+};
+
+static const struct of_device_id exynos_ppmu_id_match[] = {
+	{
+		.compatible = "samsung,exynos-ppmu",
+		.data = (void *)&exynos_ppmu_ops,
+	}, {
+		.compatible = "samsung,exynos-ppmu-v2",
+		.data = (void *)&exynos_ppmu_v2_ops,
+	},
+	{ /* sentinel */ },
+};
+
+static struct devfreq_event_ops *exynos_bus_get_ops(struct device_node *np)
+{
+	const struct of_device_id *match;
+
+	match = of_match_node(exynos_ppmu_id_match, np);
+	return (struct devfreq_event_ops *)match->data;
+}
+
 static int of_get_devfreq_events(struct device_node *np,
 				 struct exynos_ppmu *info)
 {
 	struct devfreq_event_desc *desc;
+	struct devfreq_event_ops *event_ops;
 	struct device *dev = info->dev;
 	struct device_node *events_np, *node;
 	int i, j, count;
@@ -214,6 +374,7 @@
 			"failed to get child node of devfreq-event devices\n");
 		return -EINVAL;
 	}
+	event_ops = exynos_bus_get_ops(np);
 
 	count = of_get_child_count(events_np);
 	desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
@@ -238,7 +399,7 @@
 			continue;
 		}
 
-		desc[j].ops = &exynos_ppmu_ops;
+		desc[j].ops = event_ops;
 		desc[j].driver_data = info;
 
 		of_property_read_string(node, "event-name", &desc[j].name);
@@ -354,11 +515,6 @@
 	return 0;
 }
 
-static struct of_device_id exynos_ppmu_id_match[] = {
-	{ .compatible = "samsung,exynos-ppmu", },
-	{ /* sentinel */ },
-};
-
 static struct platform_driver exynos_ppmu_driver = {
 	.probe	= exynos_ppmu_probe,
 	.remove	= exynos_ppmu_remove,
diff --git a/drivers/devfreq/event/exynos-ppmu.h b/drivers/devfreq/event/exynos-ppmu.h
index 4e831d4..05774c4 100644
--- a/drivers/devfreq/event/exynos-ppmu.h
+++ b/drivers/devfreq/event/exynos-ppmu.h
@@ -26,6 +26,9 @@
 	PPMU_PMNCNT_MAX,
 };
 
+/***
+ * PPMUv1.1 Definitions
+ */
 enum ppmu_event_type {
 	PPMU_RO_BUSY_CYCLE_CNT	= 0x0,
 	PPMU_WO_BUSY_CYCLE_CNT	= 0x1,
@@ -90,4 +93,71 @@
 #define PPMU_PMNCT(x)			(PPMU_PMCNT0 + (0x10 * x))
 #define PPMU_BEVTxSEL(x)		(PPMU_BEVT0SEL + (0x100 * x))
 
+/***
+ * PPMU_V2.0 definitions
+ */
+enum ppmu_v2_mode {
+	PPMU_V2_MODE_MANUAL = 0,
+	PPMU_V2_MODE_AUTO = 1,
+	PPMU_V2_MODE_CIG = 2,	/* CIG (Conditional Interrupt Generation) */
+};
+
+enum ppmu_v2_event_type {
+	PPMU_V2_RO_DATA_CNT	= 0x4,
+	PPMU_V2_WO_DATA_CNT	= 0x5,
+
+	PPMU_V2_EVT3_RW_DATA_CNT = 0x22,	/* Only for Event3 */
+};
+
+enum ppmu_V2_reg {
+	/* PPC control register */
+	PPMU_V2_PMNC		= 0x04,
+	PPMU_V2_CNTENS		= 0x08,
+	PPMU_V2_CNTENC		= 0x0c,
+	PPMU_V2_INTENS		= 0x10,
+	PPMU_V2_INTENC		= 0x14,
+	PPMU_V2_FLAG		= 0x18,
+
+	/* Cycle Counter and Performance Event Counter Register */
+	PPMU_V2_CCNT		= 0x48,
+	PPMU_V2_PMCNT0		= 0x34,
+	PPMU_V2_PMCNT1		= 0x38,
+	PPMU_V2_PMCNT2		= 0x3c,
+	PPMU_V2_PMCNT3_LOW	= 0x40,
+	PPMU_V2_PMCNT3_HIGH	= 0x44,
+
+	/* Bus Event Generator */
+	PPMU_V2_CIG_CFG0		= 0x1c,
+	PPMU_V2_CIG_CFG1		= 0x20,
+	PPMU_V2_CIG_CFG2		= 0x24,
+	PPMU_V2_CIG_RESULT	= 0x28,
+	PPMU_V2_CNT_RESET	= 0x2c,
+	PPMU_V2_CNT_AUTO		= 0x30,
+	PPMU_V2_CH_EV0_TYPE	= 0x200,
+	PPMU_V2_CH_EV1_TYPE	= 0x204,
+	PPMU_V2_CH_EV2_TYPE	= 0x208,
+	PPMU_V2_CH_EV3_TYPE	= 0x20c,
+	PPMU_V2_SM_ID_V		= 0x220,
+	PPMU_V2_SM_ID_A		= 0x224,
+	PPMU_V2_SM_OTHERS_V	= 0x228,
+	PPMU_V2_SM_OTHERS_A	= 0x22c,
+	PPMU_V2_INTERRUPT_RESET	= 0x260,
+};
+
+/* PMNC register */
+#define PPMU_V2_PMNC_START_MODE_SHIFT	20
+#define PPMU_V2_PMNC_START_MODE_MASK	(0x3 << PPMU_V2_PMNC_START_MODE_SHIFT)
+
+#define PPMU_PMNC_CC_RESET_SHIFT	2
+#define PPMU_PMNC_COUNTER_RESET_SHIFT	1
+#define PPMU_PMNC_ENABLE_SHIFT		0
+#define PPMU_PMNC_START_MODE_MASK	BIT(16)
+#define PPMU_PMNC_CC_DIVIDER_MASK	BIT(3)
+#define PPMU_PMNC_CC_RESET_MASK		BIT(2)
+#define PPMU_PMNC_COUNTER_RESET_MASK	BIT(1)
+#define PPMU_PMNC_ENABLE_MASK		BIT(0)
+
+#define PPMU_V2_PMNCT(x)		(PPMU_V2_PMCNT0 + (0x4 * x))
+#define PPMU_V2_CH_EVx_TYPE(x)		(PPMU_V2_CH_EV0_TYPE + (0x4 * x))
+
 #endif /* __EXYNOS_PPMU_H__ */
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 88d474b..b458475 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -33,27 +33,29 @@
 
 comment "DMA Devices"
 
-config INTEL_MIC_X100_DMA
-	tristate "Intel MIC X100 DMA Driver"
-	depends on 64BIT && X86 && INTEL_MIC_BUS
-	select DMA_ENGINE
-	help
-	  This enables DMA support for the Intel Many Integrated Core
-	  (MIC) family of PCIe form factor coprocessor X100 devices that
-	  run a 64 bit Linux OS. This driver will be used by both MIC
-	  host and card drivers.
-
-	  If you are building host kernel with a MIC device or a card
-	  kernel for a MIC device, then say M (recommended) or Y, else
-	  say N. If unsure say N.
-
-	  More information about the Intel MIC family as well as the Linux
-	  OS and tools for MIC to use with this driver are available from
-	  <http://software.intel.com/en-us/mic-developer>.
-
+#core
 config ASYNC_TX_ENABLE_CHANNEL_SWITCH
 	bool
 
+config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+	bool
+
+config DMA_ENGINE
+	bool
+
+config DMA_VIRTUAL_CHANNELS
+	tristate
+
+config DMA_ACPI
+	def_bool y
+	depends on ACPI
+
+config DMA_OF
+	def_bool y
+	depends on OF
+	select DMA_ENGINE
+
+#devices
 config AMBA_PL08X
 	bool "ARM PrimeCell PL080 or PL081 support"
 	depends on ARM_AMBA
@@ -63,9 +65,181 @@
 	  Platform has a PL08x DMAC device
 	  which can provide DMA engine support
 
+config AMCC_PPC440SPE_ADMA
+	tristate "AMCC PPC440SPe ADMA support"
+	depends on 440SPe || 440SP
+	select DMA_ENGINE
+	select DMA_ENGINE_RAID
+	select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+	help
+	  Enable support for the AMCC PPC440SPe RAID engines.
+
+config AT_HDMAC
+	tristate "Atmel AHB DMA support"
+	depends on ARCH_AT91
+	select DMA_ENGINE
+	help
+	  Support the Atmel AHB DMA controller.
+
+config AT_XDMAC
+	tristate "Atmel XDMA support"
+	depends on ARCH_AT91
+	select DMA_ENGINE
+	help
+	  Support the Atmel XDMA controller.
+
+config AXI_DMAC
+	tristate "Analog Devices AXI-DMAC DMA support"
+	depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_SOCFPGA || COMPILE_TEST
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the Analog Devices AXI-DMAC peripheral. This DMA
+	  controller is often used in Analog Device's reference designs for FPGA
+	  platforms.
+
+config COH901318
+	bool "ST-Ericsson COH901318 DMA support"
+	select DMA_ENGINE
+	depends on ARCH_U300
+	help
+	  Enable support for ST-Ericsson COH 901 318 DMA.
+
+config DMA_BCM2835
+	tristate "BCM2835 DMA engine support"
+	depends on ARCH_BCM2835
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+
+config DMA_JZ4740
+	tristate "JZ4740 DMA support"
+	depends on MACH_JZ4740
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+
+config DMA_JZ4780
+	tristate "JZ4780 DMA support"
+	depends on MACH_JZ4780
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  This selects support for the DMA controller in Ingenic JZ4780 SoCs.
+	  If you have a board based on such a SoC and wish to use DMA for
+	  devices which can use the DMA controller, say Y or M here.
+
+config DMA_OMAP
+	tristate "OMAP DMA support"
+	depends on ARCH_OMAP
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	select TI_DMA_CROSSBAR if SOC_DRA7XX
+
+config DMA_SA11X0
+	tristate "SA-11x0 DMA support"
+	depends on ARCH_SA1100
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support the DMA engine found on Intel StrongARM SA-1100 and
+	  SA-1110 SoCs.  This DMA engine can only be used with on-chip
+	  devices.
+
+config DMA_SUN4I
+	tristate "Allwinner A10 DMA SoCs support"
+	depends on MACH_SUN4I || MACH_SUN5I || MACH_SUN7I
+	default (MACH_SUN4I || MACH_SUN5I || MACH_SUN7I)
+	select DMA_ENGINE
+	select DMA_OF
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the DMA controller present in the sun4i,
+	  sun5i and sun7i Allwinner ARM SoCs.
+
+config DMA_SUN6I
+	tristate "Allwinner A31 SoCs DMA support"
+	depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
+	depends on RESET_CONTROLLER
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support for the DMA engine first found in Allwinner A31 SoCs.
+
+config EP93XX_DMA
+	bool "Cirrus Logic EP93xx DMA support"
+	depends on ARCH_EP93XX
+	select DMA_ENGINE
+	help
+	  Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
+
+config FSL_DMA
+	tristate "Freescale Elo series DMA support"
+	depends on FSL_SOC
+	select DMA_ENGINE
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+	---help---
+	  Enable support for the Freescale Elo series DMA controllers.
+	  The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the
+	  EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
+	  some Txxx and Bxxx parts.
+
+config FSL_EDMA
+	tristate "Freescale eDMA engine support"
+	depends on OF
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support the Freescale eDMA engine with programmable channel
+	  multiplexing capability for DMA request sources(slot).
+	  This module can be found on Freescale Vybrid and LS-1 SoCs.
+
+config FSL_RAID
+        tristate "Freescale RAID engine Support"
+        depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
+        select DMA_ENGINE
+        select DMA_ENGINE_RAID
+        ---help---
+          Enable support for Freescale RAID Engine. RAID Engine is
+          available on some QorIQ SoCs (like P5020/P5040). It has
+          the capability to offload memcpy, xor and pq computation
+	  for raid5/6.
+
+config IMG_MDC_DMA
+	tristate "IMG MDC support"
+	depends on MIPS || COMPILE_TEST
+	depends on MFD_SYSCON
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the IMG multi-threaded DMA controller (MDC).
+
+config IMX_DMA
+	tristate "i.MX DMA support"
+	depends on ARCH_MXC
+	select DMA_ENGINE
+	help
+	  Support the i.MX DMA engine. This engine is integrated into
+	  Freescale i.MX1/21/27 chips.
+
+config IMX_SDMA
+	tristate "i.MX SDMA support"
+	depends on ARCH_MXC
+	select DMA_ENGINE
+	help
+	  Support the i.MX SDMA engine. This engine is integrated into
+	  Freescale i.MX25/31/35/51/53/6 chips.
+
+config IDMA64
+	tristate "Intel integrated DMA 64-bit support"
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable DMA support for Intel Low Power Subsystem such as found on
+	  Intel Skylake PCH.
+
 config INTEL_IOATDMA
 	tristate "Intel I/OAT DMA support"
-	depends on PCI && X86
+	depends on PCI && X86_64
 	select DMA_ENGINE
 	select DMA_ENGINE_RAID
 	select DCA
@@ -85,45 +259,69 @@
 	help
 	  Enable support for the Intel(R) IOP Series RAID engines.
 
-source "drivers/dma/dw/Kconfig"
-
-config AT_HDMAC
-	tristate "Atmel AHB DMA support"
-	depends on ARCH_AT91
+config INTEL_MIC_X100_DMA
+	tristate "Intel MIC X100 DMA Driver"
+	depends on 64BIT && X86 && INTEL_MIC_BUS
 	select DMA_ENGINE
 	help
-	  Support the Atmel AHB DMA controller.
+	  This enables DMA support for the Intel Many Integrated Core
+	  (MIC) family of PCIe form factor coprocessor X100 devices that
+	  run a 64 bit Linux OS. This driver will be used by both MIC
+	  host and card drivers.
 
-config AT_XDMAC
-	tristate "Atmel XDMA support"
-	depends on ARCH_AT91
+	  If you are building host kernel with a MIC device or a card
+	  kernel for a MIC device, then say M (recommended) or Y, else
+	  say N. If unsure say N.
+
+	  More information about the Intel MIC family as well as the Linux
+	  OS and tools for MIC to use with this driver are available from
+	  <http://software.intel.com/en-us/mic-developer>.
+
+config K3_DMA
+	tristate "Hisilicon K3 DMA support"
+	depends on ARCH_HI3xxx
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support the DMA engine for Hisilicon K3 platform
+	  devices.
+
+config LPC18XX_DMAMUX
+	bool "NXP LPC18xx/43xx DMA MUX for PL080"
+	depends on ARCH_LPC18XX || COMPILE_TEST
+	depends on OF && AMBA_PL08X
+	select MFD_SYSCON
+	help
+	  Enable support for DMA on NXP LPC18xx/43xx platforms
+	  with PL080 and multiplexed DMA request lines.
+
+config MMP_PDMA
+	bool "MMP PDMA support"
+	depends on (ARCH_MMP || ARCH_PXA)
 	select DMA_ENGINE
 	help
-	  Support the Atmel XDMA controller.
+	  Support the MMP PDMA engine for PXA and MMP platform.
 
-config FSL_DMA
-	tristate "Freescale Elo series DMA support"
-	depends on FSL_SOC
+config MMP_TDMA
+	bool "MMP Two-Channel DMA support"
+	depends on ARCH_MMP
 	select DMA_ENGINE
-	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
-	---help---
-	  Enable support for the Freescale Elo series DMA controllers.
-	  The Elo is the DMA controller on some mpc82xx and mpc83xx parts, the
-	  EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on
-	  some Txxx and Bxxx parts.
+	select MMP_SRAM
+	help
+	  Support the MMP Two-Channel DMA engine.
+	  This engine used for MMP Audio DMA and pxa910 SQU.
+	  It needs sram driver under mach-mmp.
 
-config FSL_RAID
-        tristate "Freescale RAID engine Support"
-        depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH
-        select DMA_ENGINE
-        select DMA_ENGINE_RAID
-        ---help---
-          Enable support for Freescale RAID Engine. RAID Engine is
-          available on some QorIQ SoCs (like P5020/P5040). It has
-          the capability to offload memcpy, xor and pq computation
-	  for raid5/6.
-
-source "drivers/dma/hsu/Kconfig"
+config MOXART_DMA
+	tristate "MOXART DMA support"
+	depends on ARCH_MOXART
+	select DMA_ENGINE
+	select DMA_OF
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Enable support for the MOXA ART SoC DMA controller.
+ 
+	  Say Y here if you enabled MMP ADMA, otherwise say N.
 
 config MPC512X_DMA
 	tristate "Freescale MPC512x built-in DMA engine support"
@@ -132,8 +330,6 @@
 	---help---
 	  Enable support for the Freescale MPC512x built-in DMA engine.
 
-source "drivers/dma/bestcomm/Kconfig"
-
 config MV_XOR
 	bool "Marvell XOR engine support"
 	depends on PLAT_ORION
@@ -143,6 +339,15 @@
 	---help---
 	  Enable support for the Marvell XOR engine.
 
+config MXS_DMA
+	bool "MXS DMA support"
+	depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
+	select STMP_DEVICE
+	select DMA_ENGINE
+	help
+	  Support the MXS DMA engine. This engine including APBH-DMA
+	  and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips.
+
 config MX3_IPU
 	bool "MX3x Image Processing Unit support"
 	depends on ARCH_MXC
@@ -162,6 +367,36 @@
 	  To avoid bloating the irq_desc[] array we allocate a sufficient
 	  number of IRQ slots and map them dynamically to specific sources.
 
+config NBPFAXI_DMA
+	tristate "Renesas Type-AXI NBPF DMA support"
+	select DMA_ENGINE
+	depends on ARM || COMPILE_TEST
+	help
+	  Support for "Type-AXI" NBPF DMA IPs from Renesas
+
+config PCH_DMA
+	tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
+	depends on PCI && (X86_32 || COMPILE_TEST)
+	select DMA_ENGINE
+	help
+	  Enable support for Intel EG20T PCH DMA engine.
+
+	  This driver also can be used for LAPIS Semiconductor IOH(Input/
+	  Output Hub), ML7213, ML7223 and ML7831.
+	  ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+	  for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+	  ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+	  ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+
+config PL330_DMA
+	tristate "DMA API Driver for PL330"
+	select DMA_ENGINE
+	depends on ARM_AMBA
+	help
+	  Select if your platform has one or more PL330 DMACs.
+	  You need to provide platform specific settings via
+	  platform_data for a dma-pl330 device.
+
 config PXA_DMA
 	bool "PXA DMA support"
 	depends on (ARCH_MMP || ARCH_PXA)
@@ -173,6 +408,41 @@
 	  16 to 32 channels for peripheral to memory or memory to memory
 	  transfers.
 
+config QCOM_BAM_DMA
+	tristate "QCOM BAM DMA support"
+	depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	---help---
+	  Enable support for the QCOM BAM DMA controller.  This controller
+	  provides DMA capabilities for a variety of on-chip devices.
+
+config SIRF_DMA
+	tristate "CSR SiRFprimaII/SiRFmarco DMA support"
+	depends on ARCH_SIRF
+	select DMA_ENGINE
+	help
+	  Enable support for the CSR SiRFprimaII DMA engine.
+
+config STE_DMA40
+	bool "ST-Ericsson DMA40 support"
+	depends on ARCH_U8500
+	select DMA_ENGINE
+	help
+	  Support for ST-Ericsson DMA40 controller
+
+config S3C24XX_DMAC
+	tristate "Samsung S3C24XX DMA support"
+	depends on ARCH_S3C24XX
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Support for the Samsung S3C24XX DMA controller driver. The
+	  DMA controller is having multiple DMA channels which can be
+	  configured for different peripherals like audio, UART, SPI.
+	  The DMA controller can transfer data from memory to peripheral,
+	  periphal to memory, periphal to periphal and memory to memory.
+
 config TXX9_DMAC
 	tristate "Toshiba TXx9 SoC DMA support"
 	depends on MACH_TX49XX || MACH_TX39XX
@@ -193,44 +463,6 @@
 	  This DMA controller transfers data from memory to peripheral fifo
 	  or vice versa. It does not support memory to memory data transfer.
 
-config S3C24XX_DMAC
-	tristate "Samsung S3C24XX DMA support"
-	depends on ARCH_S3C24XX
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Support for the Samsung S3C24XX DMA controller driver. The
-	  DMA controller is having multiple DMA channels which can be
-	  configured for different peripherals like audio, UART, SPI.
-	  The DMA controller can transfer data from memory to peripheral,
-	  periphal to memory, periphal to periphal and memory to memory.
-
-source "drivers/dma/sh/Kconfig"
-
-config COH901318
-	bool "ST-Ericsson COH901318 DMA support"
-	select DMA_ENGINE
-	depends on ARCH_U300
-	help
-	  Enable support for ST-Ericsson COH 901 318 DMA.
-
-config STE_DMA40
-	bool "ST-Ericsson DMA40 support"
-	depends on ARCH_U8500
-	select DMA_ENGINE
-	help
-	  Support for ST-Ericsson DMA40 controller
-
-config AMCC_PPC440SPE_ADMA
-	tristate "AMCC PPC440SPe ADMA support"
-	depends on 440SPe || 440SP
-	select DMA_ENGINE
-	select DMA_ENGINE_RAID
-	select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
-	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
-	help
-	  Enable support for the AMCC PPC440SPe RAID engines.
-
 config TIMB_DMA
 	tristate "Timberdale FPGA DMA support"
 	depends on MFD_TIMBERDALE
@@ -238,12 +470,16 @@
 	help
 	  Enable support for the Timberdale FPGA DMA engine.
 
-config SIRF_DMA
-	tristate "CSR SiRFprimaII/SiRFmarco DMA support"
-	depends on ARCH_SIRF
+config TI_CPPI41
+	tristate "AM33xx CPPI41 DMA support"
+	depends on ARCH_OMAP
 	select DMA_ENGINE
 	help
-	  Enable support for the CSR SiRFprimaII DMA engine.
+	  The Communications Port Programming Interface (CPPI) 4.1 DMA engine
+	  is currently used by the USB driver on AM335x platforms.
+
+config TI_DMA_CROSSBAR
+	bool
 
 config TI_EDMA
 	bool "TI EDMA support"
@@ -256,160 +492,14 @@
 	  Enable support for the TI EDMA controller. This DMA
 	  engine is found on TI DaVinci and AM33xx parts.
 
-config TI_DMA_CROSSBAR
-	bool
-
-config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
-	bool
-
-config PL330_DMA
-	tristate "DMA API Driver for PL330"
+config XGENE_DMA
+	tristate "APM X-Gene DMA support"
+	depends on ARCH_XGENE || COMPILE_TEST
 	select DMA_ENGINE
-	depends on ARM_AMBA
+	select DMA_ENGINE_RAID
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
 	help
-	  Select if your platform has one or more PL330 DMACs.
-	  You need to provide platform specific settings via
-	  platform_data for a dma-pl330 device.
-
-config PCH_DMA
-	tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
-	depends on PCI && (X86_32 || COMPILE_TEST)
-	select DMA_ENGINE
-	help
-	  Enable support for Intel EG20T PCH DMA engine.
-
-	  This driver also can be used for LAPIS Semiconductor IOH(Input/
-	  Output Hub), ML7213, ML7223 and ML7831.
-	  ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
-	  for MP(Media Phone) use and ML7831 IOH is for general purpose use.
-	  ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
-	  ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
-
-config IMX_SDMA
-	tristate "i.MX SDMA support"
-	depends on ARCH_MXC
-	select DMA_ENGINE
-	help
-	  Support the i.MX SDMA engine. This engine is integrated into
-	  Freescale i.MX25/31/35/51/53/6 chips.
-
-config IMX_DMA
-	tristate "i.MX DMA support"
-	depends on ARCH_MXC
-	select DMA_ENGINE
-	help
-	  Support the i.MX DMA engine. This engine is integrated into
-	  Freescale i.MX1/21/27 chips.
-
-config MXS_DMA
-	bool "MXS DMA support"
-	depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
-	select STMP_DEVICE
-	select DMA_ENGINE
-	help
-	  Support the MXS DMA engine. This engine including APBH-DMA
-	  and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips.
-
-config EP93XX_DMA
-	bool "Cirrus Logic EP93xx DMA support"
-	depends on ARCH_EP93XX
-	select DMA_ENGINE
-	help
-	  Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
-
-config DMA_SA11X0
-	tristate "SA-11x0 DMA support"
-	depends on ARCH_SA1100
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Support the DMA engine found on Intel StrongARM SA-1100 and
-	  SA-1110 SoCs.  This DMA engine can only be used with on-chip
-	  devices.
-
-config MMP_TDMA
-	bool "MMP Two-Channel DMA support"
-	depends on ARCH_MMP
-	select DMA_ENGINE
-	select MMP_SRAM
-	help
-	  Support the MMP Two-Channel DMA engine.
-	  This engine used for MMP Audio DMA and pxa910 SQU.
-	  It needs sram driver under mach-mmp.
-
-	  Say Y here if you enabled MMP ADMA, otherwise say N.
-
-config DMA_OMAP
-	tristate "OMAP DMA support"
-	depends on ARCH_OMAP
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	select TI_DMA_CROSSBAR if SOC_DRA7XX
-
-config DMA_BCM2835
-	tristate "BCM2835 DMA engine support"
-	depends on ARCH_BCM2835
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-
-config TI_CPPI41
-	tristate "AM33xx CPPI41 DMA support"
-	depends on ARCH_OMAP
-	select DMA_ENGINE
-	help
-	  The Communications Port Programming Interface (CPPI) 4.1 DMA engine
-	  is currently used by the USB driver on AM335x platforms.
-
-config MMP_PDMA
-	bool "MMP PDMA support"
-	depends on (ARCH_MMP || ARCH_PXA)
-	select DMA_ENGINE
-	help
-	  Support the MMP PDMA engine for PXA and MMP platform.
-
-config DMA_JZ4740
-	tristate "JZ4740 DMA support"
-	depends on MACH_JZ4740
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-
-config DMA_JZ4780
-	tristate "JZ4780 DMA support"
-	depends on MACH_JZ4780
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  This selects support for the DMA controller in Ingenic JZ4780 SoCs.
-	  If you have a board based on such a SoC and wish to use DMA for
-	  devices which can use the DMA controller, say Y or M here.
-
-config K3_DMA
-	tristate "Hisilicon K3 DMA support"
-	depends on ARCH_HI3xxx
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Support the DMA engine for Hisilicon K3 platform
-	  devices.
-
-config MOXART_DMA
-	tristate "MOXART DMA support"
-	depends on ARCH_MOXART
-	select DMA_ENGINE
-	select DMA_OF
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Enable support for the MOXA ART SoC DMA controller.
- 
-config FSL_EDMA
-	tristate "Freescale eDMA engine support"
-	depends on OF
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Support the Freescale eDMA engine with programmable channel
-	  multiplexing capability for DMA request sources(slot).
-	  This module can be found on Freescale Vybrid and LS-1 SoCs.
+	  Enable support for the APM X-Gene SoC DMA engine.
 
 config XILINX_VDMA
 	tristate "Xilinx AXI VDMA Engine"
@@ -425,55 +515,25 @@
 	  channels, Memory Mapped to Stream (MM2S) and Stream to
 	  Memory Mapped (S2MM) for the data transfers.
 
-config DMA_SUN6I
-	tristate "Allwinner A31 SoCs DMA support"
-	depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
-	depends on RESET_CONTROLLER
+config ZX_DMA
+	tristate "ZTE ZX296702 DMA support"
+	depends on ARCH_ZX
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
 	help
-	  Support for the DMA engine first found in Allwinner A31 SoCs.
+	  Support the DMA engine for ZTE ZX296702 platform devices.
 
-config NBPFAXI_DMA
-	tristate "Renesas Type-AXI NBPF DMA support"
-	select DMA_ENGINE
-	depends on ARM || COMPILE_TEST
-	help
-	  Support for "Type-AXI" NBPF DMA IPs from Renesas
 
-config IMG_MDC_DMA
-	tristate "IMG MDC support"
-	depends on MIPS || COMPILE_TEST
-	depends on MFD_SYSCON
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	help
-	  Enable support for the IMG multi-threaded DMA controller (MDC).
+# driver files
+source "drivers/dma/bestcomm/Kconfig"
 
-config XGENE_DMA
-	tristate "APM X-Gene DMA support"
-	depends on ARCH_XGENE || COMPILE_TEST
-	select DMA_ENGINE
-	select DMA_ENGINE_RAID
-	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
-	help
-	  Enable support for the APM X-Gene SoC DMA engine.
+source "drivers/dma/dw/Kconfig"
 
-config DMA_ENGINE
-	bool
+source "drivers/dma/hsu/Kconfig"
 
-config DMA_VIRTUAL_CHANNELS
-	tristate
+source "drivers/dma/sh/Kconfig"
 
-config DMA_ACPI
-	def_bool y
-	depends on ACPI
-
-config DMA_OF
-	def_bool y
-	depends on OF
-	select DMA_ENGINE
-
+# clients
 comment "DMA Clients"
 	depends on DMA_ENGINE
 
@@ -498,13 +558,4 @@
 config DMA_ENGINE_RAID
 	bool
 
-config QCOM_BAM_DMA
-	tristate "QCOM BAM DMA support"
-	depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
-	select DMA_ENGINE
-	select DMA_VIRTUAL_CHANNELS
-	---help---
-	  Enable support for the QCOM BAM DMA controller.  This controller
-	  provides DMA capabilities for a variety of on-chip devices.
-
 endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 6a4d6f2..7711a71 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,58 +1,69 @@
+#dmaengine debug flags
 subdir-ccflags-$(CONFIG_DMADEVICES_DEBUG)  := -DDEBUG
 subdir-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
 
+#core
 obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
 obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
 obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
 obj-$(CONFIG_DMA_OF) += of-dma.o
 
+#dmatest
 obj-$(CONFIG_DMATEST) += dmatest.o
-obj-$(CONFIG_INTEL_IOATDMA) += ioat/
-obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
-obj-$(CONFIG_FSL_DMA) += fsldma.o
-obj-$(CONFIG_HSU_DMA) += hsu/
-obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
-obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
-obj-$(CONFIG_MV_XOR) += mv_xor.o
-obj-$(CONFIG_DW_DMAC_CORE) += dw/
+
+#devices
+obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
 obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
-obj-$(CONFIG_MX3_IPU) += ipu/
-obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
-obj-$(CONFIG_RENESAS_DMA) += sh/
+obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o
 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
-obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
-obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
-obj-$(CONFIG_IMX_DMA) += imx-dma.o
-obj-$(CONFIG_MXS_DMA) += mxs-dma.o
-obj-$(CONFIG_PXA_DMA) += pxa_dma.o
-obj-$(CONFIG_TIMB_DMA) += timb_dma.o
-obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
-obj-$(CONFIG_TI_EDMA) += edma.o
-obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
-obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
-obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
-obj-$(CONFIG_PL330_DMA) += pl330.o
-obj-$(CONFIG_PCH_DMA) += pch_dma.o
-obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
-obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
-obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
-obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
-obj-$(CONFIG_DMA_OMAP) += omap-dma.o
-obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
 obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
-obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o
-obj-$(CONFIG_TI_CPPI41) += cppi41.o
-obj-$(CONFIG_K3_DMA) += k3dma.o
-obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
-obj-$(CONFIG_FSL_RAID) += fsl_raid.o
-obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
-obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
-obj-y += xilinx/
-obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
-obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
+obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
+obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
 obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
+obj-$(CONFIG_DW_DMAC_CORE) += dw/
+obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
+obj-$(CONFIG_FSL_DMA) += fsldma.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_FSL_RAID) += fsl_raid.o
+obj-$(CONFIG_HSU_DMA) += hsu/
 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
+obj-$(CONFIG_IMX_DMA) += imx-dma.o
+obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
+obj-$(CONFIG_IDMA64) += idma64.o
+obj-$(CONFIG_INTEL_IOATDMA) += ioat/
+obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
+obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
+obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
+obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
+obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
+obj-$(CONFIG_MV_XOR) += mv_xor.o
+obj-$(CONFIG_MXS_DMA) += mxs-dma.o
+obj-$(CONFIG_MX3_IPU) += ipu/
+obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
+obj-$(CONFIG_PCH_DMA) += pch_dma.o
+obj-$(CONFIG_PL330_DMA) += pl330.o
+obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
+obj-$(CONFIG_PXA_DMA) += pxa_dma.o
+obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
+obj-$(CONFIG_RENESAS_DMA) += sh/
+obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
+obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
+obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
+obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
+obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
+obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_TI_CPPI41) += cppi41.o
+obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma-crossbar.o
+obj-$(CONFIG_TI_EDMA) += edma.o
 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
+obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
+
+obj-y += xilinx/
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 5de3cf4..9b42c05 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -83,6 +83,8 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
 #include <linux/pm_runtime.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
@@ -2030,10 +2032,188 @@
 }
 #endif
 
+#ifdef CONFIG_OF
+static struct dma_chan *pl08x_find_chan_id(struct pl08x_driver_data *pl08x,
+					 u32 id)
+{
+	struct pl08x_dma_chan *chan;
+
+	list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) {
+		if (chan->signal == id)
+			return &chan->vc.chan;
+	}
+
+	return NULL;
+}
+
+static struct dma_chan *pl08x_of_xlate(struct of_phandle_args *dma_spec,
+				       struct of_dma *ofdma)
+{
+	struct pl08x_driver_data *pl08x = ofdma->of_dma_data;
+	struct pl08x_channel_data *data;
+	struct pl08x_dma_chan *chan;
+	struct dma_chan *dma_chan;
+
+	if (!pl08x)
+		return NULL;
+
+	if (dma_spec->args_count != 2)
+		return NULL;
+
+	dma_chan = pl08x_find_chan_id(pl08x, dma_spec->args[0]);
+	if (dma_chan)
+		return dma_get_slave_channel(dma_chan);
+
+	chan = devm_kzalloc(pl08x->slave.dev, sizeof(*chan) + sizeof(*data),
+			    GFP_KERNEL);
+	if (!chan)
+		return NULL;
+
+	data = (void *)&chan[1];
+	data->bus_id = "(none)";
+	data->periph_buses = dma_spec->args[1];
+
+	chan->cd = data;
+	chan->host = pl08x;
+	chan->slave = true;
+	chan->name = data->bus_id;
+	chan->state = PL08X_CHAN_IDLE;
+	chan->signal = dma_spec->args[0];
+	chan->vc.desc_free = pl08x_desc_free;
+
+	vchan_init(&chan->vc, &pl08x->slave);
+
+	return dma_get_slave_channel(&chan->vc.chan);
+}
+
+static int pl08x_of_probe(struct amba_device *adev,
+			  struct pl08x_driver_data *pl08x,
+			  struct device_node *np)
+{
+	struct pl08x_platform_data *pd;
+	u32 cctl_memcpy = 0;
+	u32 val;
+	int ret;
+
+	pd = devm_kzalloc(&adev->dev, sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return -ENOMEM;
+
+	/* Eligible bus masters for fetching LLIs */
+	if (of_property_read_bool(np, "lli-bus-interface-ahb1"))
+		pd->lli_buses |= PL08X_AHB1;
+	if (of_property_read_bool(np, "lli-bus-interface-ahb2"))
+		pd->lli_buses |= PL08X_AHB2;
+	if (!pd->lli_buses) {
+		dev_info(&adev->dev, "no bus masters for LLIs stated, assume all\n");
+		pd->lli_buses |= PL08X_AHB1 | PL08X_AHB2;
+	}
+
+	/* Eligible bus masters for memory access */
+	if (of_property_read_bool(np, "mem-bus-interface-ahb1"))
+		pd->mem_buses |= PL08X_AHB1;
+	if (of_property_read_bool(np, "mem-bus-interface-ahb2"))
+		pd->mem_buses |= PL08X_AHB2;
+	if (!pd->mem_buses) {
+		dev_info(&adev->dev, "no bus masters for memory stated, assume all\n");
+		pd->mem_buses |= PL08X_AHB1 | PL08X_AHB2;
+	}
+
+	/* Parse the memcpy channel properties */
+	ret = of_property_read_u32(np, "memcpy-burst-size", &val);
+	if (ret) {
+		dev_info(&adev->dev, "no memcpy burst size specified, using 1 byte\n");
+		val = 1;
+	}
+	switch (val) {
+	default:
+		dev_err(&adev->dev, "illegal burst size for memcpy, set to 1\n");
+		/* Fall through */
+	case 1:
+		cctl_memcpy |= PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 4:
+		cctl_memcpy |= PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 8:
+		cctl_memcpy |= PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 16:
+		cctl_memcpy |= PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 32:
+		cctl_memcpy |= PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 64:
+		cctl_memcpy |= PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 128:
+		cctl_memcpy |= PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	case 256:
+		cctl_memcpy |= PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT |
+			       PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT;
+		break;
+	}
+
+	ret = of_property_read_u32(np, "memcpy-bus-width", &val);
+	if (ret) {
+		dev_info(&adev->dev, "no memcpy bus width specified, using 8 bits\n");
+		val = 8;
+	}
+	switch (val) {
+	default:
+		dev_err(&adev->dev, "illegal bus width for memcpy, set to 8 bits\n");
+		/* Fall through */
+	case 8:
+		cctl_memcpy |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT |
+			       PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
+		break;
+	case 16:
+		cctl_memcpy |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT |
+			       PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
+		break;
+	case 32:
+		cctl_memcpy |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT |
+			       PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
+		break;
+	}
+
+	/* This is currently the only thing making sense */
+	cctl_memcpy |= PL080_CONTROL_PROT_SYS;
+
+	/* Set up memcpy channel */
+	pd->memcpy_channel.bus_id = "memcpy";
+	pd->memcpy_channel.cctl_memcpy = cctl_memcpy;
+	/* Use the buses that can access memory, obviously */
+	pd->memcpy_channel.periph_buses = pd->mem_buses;
+
+	pl08x->pd = pd;
+
+	return of_dma_controller_register(adev->dev.of_node, pl08x_of_xlate,
+					  pl08x);
+}
+#else
+static inline int pl08x_of_probe(struct amba_device *adev,
+				 struct pl08x_driver_data *pl08x,
+				 struct device_node *np)
+{
+	return -EINVAL;
+}
+#endif
+
 static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
 {
 	struct pl08x_driver_data *pl08x;
 	const struct vendor_data *vd = id->data;
+	struct device_node *np = adev->dev.of_node;
 	u32 tsfr_size;
 	int ret = 0;
 	int i;
@@ -2093,9 +2273,15 @@
 	/* Get the platform data */
 	pl08x->pd = dev_get_platdata(&adev->dev);
 	if (!pl08x->pd) {
-		dev_err(&adev->dev, "no platform data supplied\n");
-		ret = -EINVAL;
-		goto out_no_platdata;
+		if (np) {
+			ret = pl08x_of_probe(adev, pl08x, np);
+			if (ret)
+				goto out_no_platdata;
+		} else {
+			dev_err(&adev->dev, "no platform data supplied\n");
+			ret = -EINVAL;
+			goto out_no_platdata;
+		}
 	}
 
 	/* Assign useful pointers to the driver state */
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index d3629b7..58d4062 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -448,6 +448,7 @@
 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
 {
 	struct dma_async_tx_descriptor	*txd = &desc->txd;
+	struct at_dma			*atdma = to_at_dma(atchan->chan_common.device);
 
 	dev_vdbg(chan2dev(&atchan->chan_common),
 		"descriptor %u complete\n", txd->cookie);
@@ -456,6 +457,13 @@
 	if (!atc_chan_is_cyclic(atchan))
 		dma_cookie_complete(txd);
 
+	/* If the transfer was a memset, free our temporary buffer */
+	if (desc->memset) {
+		dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
+			      desc->memset_paddr);
+		desc->memset = false;
+	}
+
 	/* move children to free_list */
 	list_splice_init(&desc->tx_list, &atchan->free_list);
 	/* move myself to free_list */
@@ -717,14 +725,14 @@
 	size_t			len = 0;
 	int			i;
 
+	if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
+		return NULL;
+
 	dev_info(chan2dev(chan),
 		 "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
 		__func__, xt->src_start, xt->dst_start, xt->numf,
 		xt->frame_size, flags);
 
-	if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
-		return NULL;
-
 	/*
 	 * The controller can only "skip" X bytes every Y bytes, so we
 	 * need to make sure we are given a template that fit that
@@ -873,6 +881,93 @@
 	return NULL;
 }
 
+/**
+ * atc_prep_dma_memset - prepare a memcpy operation
+ * @chan: the channel to prepare operation on
+ * @dest: operation virtual destination address
+ * @value: value to set memory buffer to
+ * @len: operation length
+ * @flags: tx descriptor status flags
+ */
+static struct dma_async_tx_descriptor *
+atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
+		    size_t len, unsigned long flags)
+{
+	struct at_dma_chan	*atchan = to_at_dma_chan(chan);
+	struct at_dma		*atdma = to_at_dma(chan->device);
+	struct at_desc		*desc = NULL;
+	size_t			xfer_count;
+	u32			ctrla;
+	u32			ctrlb;
+
+	dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__,
+		dest, value, len, flags);
+
+	if (unlikely(!len)) {
+		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
+		return NULL;
+	}
+
+	if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
+		dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
+			__func__);
+		return NULL;
+	}
+
+	xfer_count = len >> 2;
+	if (xfer_count > ATC_BTSIZE_MAX) {
+		dev_err(chan2dev(chan), "%s: buffer is too big\n",
+			__func__);
+		return NULL;
+	}
+
+	ctrlb =   ATC_DEFAULT_CTRLB | ATC_IEN
+		| ATC_SRC_ADDR_MODE_FIXED
+		| ATC_DST_ADDR_MODE_INCR
+		| ATC_FC_MEM2MEM;
+
+	ctrla = ATC_SRC_WIDTH(2) |
+		ATC_DST_WIDTH(2);
+
+	desc = atc_desc_get(atchan);
+	if (!desc) {
+		dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
+			__func__);
+		return NULL;
+	}
+
+	desc->memset_vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC,
+					    &desc->memset_paddr);
+	if (!desc->memset_vaddr) {
+		dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
+			__func__);
+		goto err_put_desc;
+	}
+
+	*desc->memset_vaddr = value;
+	desc->memset = true;
+
+	desc->lli.saddr = desc->memset_paddr;
+	desc->lli.daddr = dest;
+	desc->lli.ctrla = ctrla | xfer_count;
+	desc->lli.ctrlb = ctrlb;
+
+	desc->txd.cookie = -EBUSY;
+	desc->len = len;
+	desc->total_len = len;
+
+	/* set end-of-link on the descriptor */
+	set_desc_eol(desc);
+
+	desc->txd.flags = flags;
+
+	return &desc->txd;
+
+err_put_desc:
+	atc_desc_put(atchan, desc);
+	return NULL;
+}
+
 
 /**
  * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
@@ -1755,6 +1850,8 @@
 	dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
 	dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
 	dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
+	dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
+	dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
 	dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
 	dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
 
@@ -1818,7 +1915,16 @@
 	if (!atdma->dma_desc_pool) {
 		dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
 		err = -ENOMEM;
-		goto err_pool_create;
+		goto err_desc_pool_create;
+	}
+
+	/* create a pool of consistent memory blocks for memset blocks */
+	atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
+					     &pdev->dev, sizeof(int), 4, 0);
+	if (!atdma->memset_pool) {
+		dev_err(&pdev->dev, "No memory for memset dma pool\n");
+		err = -ENOMEM;
+		goto err_memset_pool_create;
 	}
 
 	/* clear any pending interrupt */
@@ -1864,6 +1970,11 @@
 	if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
 		atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
 
+	if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
+		atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
+		atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
+	}
+
 	if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
 		atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
 		/* controller can do slave DMA: can trigger cyclic transfers */
@@ -1884,8 +1995,9 @@
 
 	dma_writel(atdma, EN, AT_DMA_ENABLE);
 
-	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
+	dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
 	  dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
+	  dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
 	  dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
 	  dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)  ? "sg-cpy " : "",
 	  plat_dat->nr_channels);
@@ -1910,8 +2022,10 @@
 
 err_of_dma_controller_register:
 	dma_async_device_unregister(&atdma->dma_common);
+	dma_pool_destroy(atdma->memset_pool);
+err_memset_pool_create:
 	dma_pool_destroy(atdma->dma_desc_pool);
-err_pool_create:
+err_desc_pool_create:
 	free_irq(platform_get_irq(pdev, 0), atdma);
 err_irq:
 	clk_disable_unprepare(atdma->clk);
@@ -1936,6 +2050,7 @@
 	at_dma_off(atdma);
 	dma_async_device_unregister(&atdma->dma_common);
 
+	dma_pool_destroy(atdma->memset_pool);
 	dma_pool_destroy(atdma->dma_desc_pool);
 	free_irq(platform_get_irq(pdev, 0), atdma);
 
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 7f5a082..c3bebbe 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -200,6 +200,11 @@
 	size_t				boundary;
 	size_t				dst_hole;
 	size_t				src_hole;
+
+	/* Memset temporary buffer */
+	bool				memset;
+	dma_addr_t			memset_paddr;
+	int				*memset_vaddr;
 };
 
 static inline struct at_desc *
@@ -330,6 +335,7 @@
 	u8			all_chan_mask;
 
 	struct dma_pool		*dma_desc_pool;
+	struct dma_pool		*memset_pool;
 	/* AT THE END channels table */
 	struct at_dma_chan	chan[0];
 };
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 40afa2a..a165b4b 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -625,12 +625,12 @@
 		       unsigned int sg_len, enum dma_transfer_direction direction,
 		       unsigned long flags, void *context)
 {
-	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
-	struct at_xdmac_desc	*first = NULL, *prev = NULL;
-	struct scatterlist	*sg;
-	int			i;
-	unsigned int		xfer_size = 0;
-	unsigned long		irqflags;
+	struct at_xdmac_chan		*atchan = to_at_xdmac_chan(chan);
+	struct at_xdmac_desc		*first = NULL, *prev = NULL;
+	struct scatterlist		*sg;
+	int				i;
+	unsigned int			xfer_size = 0;
+	unsigned long			irqflags;
 	struct dma_async_tx_descriptor	*ret = NULL;
 
 	if (!sgl)
@@ -797,10 +797,7 @@
 		list_add_tail(&desc->desc_node, &first->descs_list);
 	}
 
-	prev->lld.mbr_nda = first->tx_dma_desc.phys;
-	dev_dbg(chan2dev(chan),
-		"%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
-		__func__, prev, &prev->lld.mbr_nda);
+	at_xdmac_queue_desc(chan, prev, first);
 	first->tx_dma_desc.flags = flags;
 	first->xfer_size = buf_len;
 	first->direction = direction;
@@ -1135,7 +1132,7 @@
 	 * SAMA5D4x), so we can use the same interface for source and dest,
 	 * that solves the fact we don't know the direction.
 	 */
-	u32			chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
+	u32			chan_cc = AT_XDMAC_CC_DAM_UBS_AM
 					| AT_XDMAC_CC_SAM_INCREMENTED_AM
 					| AT_XDMAC_CC_DIF(0)
 					| AT_XDMAC_CC_SIF(0)
@@ -1203,6 +1200,168 @@
 	return &desc->tx_dma_desc;
 }
 
+static struct dma_async_tx_descriptor *
+at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
+			    unsigned int sg_len, int value,
+			    unsigned long flags)
+{
+	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
+	struct at_xdmac_desc	*desc, *pdesc = NULL,
+				*ppdesc = NULL, *first = NULL;
+	struct scatterlist	*sg, *psg = NULL, *ppsg = NULL;
+	size_t			stride = 0, pstride = 0, len = 0;
+	int			i;
+
+	if (!sgl)
+		return NULL;
+
+	dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
+		__func__, sg_len, value, flags);
+
+	/* Prepare descriptors. */
+	for_each_sg(sgl, sg, sg_len, i) {
+		dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
+			__func__, sg_dma_address(sg), sg_dma_len(sg),
+			value, flags);
+		desc = at_xdmac_memset_create_desc(chan, atchan,
+						   sg_dma_address(sg),
+						   sg_dma_len(sg),
+						   value);
+		if (!desc && first)
+			list_splice_init(&first->descs_list,
+					 &atchan->free_descs_list);
+
+		if (!first)
+			first = desc;
+
+		/* Update our strides */
+		pstride = stride;
+		if (psg)
+			stride = sg_dma_address(sg) -
+				(sg_dma_address(psg) + sg_dma_len(psg));
+
+		/*
+		 * The scatterlist API gives us only the address and
+		 * length of each elements.
+		 *
+		 * Unfortunately, we don't have the stride, which we
+		 * will need to compute.
+		 *
+		 * That make us end up in a situation like this one:
+		 *    len    stride    len    stride    len
+		 * +-------+        +-------+        +-------+
+		 * |  N-2  |        |  N-1  |        |   N   |
+		 * +-------+        +-------+        +-------+
+		 *
+		 * We need all these three elements (N-2, N-1 and N)
+		 * to actually take the decision on whether we need to
+		 * queue N-1 or reuse N-2.
+		 *
+		 * We will only consider N if it is the last element.
+		 */
+		if (ppdesc && pdesc) {
+			if ((stride == pstride) &&
+			    (sg_dma_len(ppsg) == sg_dma_len(psg))) {
+				dev_dbg(chan2dev(chan),
+					"%s: desc 0x%p can be merged with desc 0x%p\n",
+					__func__, pdesc, ppdesc);
+
+				/*
+				 * Increment the block count of the
+				 * N-2 descriptor
+				 */
+				at_xdmac_increment_block_count(chan, ppdesc);
+				ppdesc->lld.mbr_dus = stride;
+
+				/*
+				 * Put back the N-1 descriptor in the
+				 * free descriptor list
+				 */
+				list_add_tail(&pdesc->desc_node,
+					      &atchan->free_descs_list);
+
+				/*
+				 * Make our N-1 descriptor pointer
+				 * point to the N-2 since they were
+				 * actually merged.
+				 */
+				pdesc = ppdesc;
+
+			/*
+			 * Rule out the case where we don't have
+			 * pstride computed yet (our second sg
+			 * element)
+			 *
+			 * We also want to catch the case where there
+			 * would be a negative stride,
+			 */
+			} else if (pstride ||
+				   sg_dma_address(sg) < sg_dma_address(psg)) {
+				/*
+				 * Queue the N-1 descriptor after the
+				 * N-2
+				 */
+				at_xdmac_queue_desc(chan, ppdesc, pdesc);
+
+				/*
+				 * Add the N-1 descriptor to the list
+				 * of the descriptors used for this
+				 * transfer
+				 */
+				list_add_tail(&desc->desc_node,
+					      &first->descs_list);
+				dev_dbg(chan2dev(chan),
+					"%s: add desc 0x%p to descs_list 0x%p\n",
+					__func__, desc, first);
+			}
+		}
+
+		/*
+		 * If we are the last element, just see if we have the
+		 * same size than the previous element.
+		 *
+		 * If so, we can merge it with the previous descriptor
+		 * since we don't care about the stride anymore.
+		 */
+		if ((i == (sg_len - 1)) &&
+		    sg_dma_len(ppsg) == sg_dma_len(psg)) {
+			dev_dbg(chan2dev(chan),
+				"%s: desc 0x%p can be merged with desc 0x%p\n",
+				__func__, desc, pdesc);
+
+			/*
+			 * Increment the block count of the N-1
+			 * descriptor
+			 */
+			at_xdmac_increment_block_count(chan, pdesc);
+			pdesc->lld.mbr_dus = stride;
+
+			/*
+			 * Put back the N descriptor in the free
+			 * descriptor list
+			 */
+			list_add_tail(&desc->desc_node,
+				      &atchan->free_descs_list);
+		}
+
+		/* Update our descriptors */
+		ppdesc = pdesc;
+		pdesc = desc;
+
+		/* Update our scatter pointers */
+		ppsg = psg;
+		psg = sg;
+
+		len += sg_dma_len(sg);
+	}
+
+	first->tx_dma_desc.cookie = -EBUSY;
+	first->tx_dma_desc.flags = flags;
+	first->xfer_size = len;
+
+	return &first->tx_dma_desc;
+}
+
 static enum dma_status
 at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 		struct dma_tx_state *txstate)
@@ -1736,6 +1895,7 @@
 	dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
 	dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
 	dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
+	dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
 	dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
 	/*
 	 * Without DMA_PRIVATE the driver is not able to allocate more than
@@ -1751,6 +1911,7 @@
 	atxdmac->dma.device_prep_interleaved_dma	= at_xdmac_prep_interleaved;
 	atxdmac->dma.device_prep_dma_memcpy		= at_xdmac_prep_dma_memcpy;
 	atxdmac->dma.device_prep_dma_memset		= at_xdmac_prep_dma_memset;
+	atxdmac->dma.device_prep_dma_memset_sg		= at_xdmac_prep_dma_memset_sg;
 	atxdmac->dma.device_prep_slave_sg		= at_xdmac_prep_slave_sg;
 	atxdmac->dma.device_config			= at_xdmac_device_config;
 	atxdmac->dma.device_pause			= at_xdmac_device_pause;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index fd22dd3..c340ca9 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -2730,7 +2730,7 @@
 	 * This controller can only access address at even 32bit boundaries,
 	 * i.e. 2^2
 	 */
-	base->dma_memcpy.copy_align = 2;
+	base->dma_memcpy.copy_align = DMAENGINE_ALIGN_4_BYTES;
 	err = dma_async_device_register(&base->dma_memcpy);
 
 	if (err)
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
new file mode 100644
index 0000000..5b2395e
--- /dev/null
+++ b/drivers/dma/dma-axi-dmac.c
@@ -0,0 +1,691 @@
+/*
+ * Driver for the Analog Devices AXI-DMAC core
+ *
+ * Copyright 2013-2015 Analog Devices Inc.
+ *  Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/dma/axi-dmac.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/*
+ * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
+ * various instantiation parameters which decided the exact feature set support
+ * by the core.
+ *
+ * Each channel of the core has a source interface and a destination interface.
+ * The number of channels and the type of the channel interfaces is selected at
+ * configuration time. A interface can either be a connected to a central memory
+ * interconnect, which allows access to system memory, or it can be connected to
+ * a dedicated bus which is directly connected to a data port on a peripheral.
+ * Given that those are configuration options of the core that are selected when
+ * it is instantiated this means that they can not be changed by software at
+ * runtime. By extension this means that each channel is uni-directional. It can
+ * either be device to memory or memory to device, but not both. Also since the
+ * device side is a dedicated data bus only connected to a single peripheral
+ * there is no address than can or needs to be configured for the device side.
+ */
+
+#define AXI_DMAC_REG_IRQ_MASK		0x80
+#define AXI_DMAC_REG_IRQ_PENDING	0x84
+#define AXI_DMAC_REG_IRQ_SOURCE		0x88
+
+#define AXI_DMAC_REG_CTRL		0x400
+#define AXI_DMAC_REG_TRANSFER_ID	0x404
+#define AXI_DMAC_REG_START_TRANSFER	0x408
+#define AXI_DMAC_REG_FLAGS		0x40c
+#define AXI_DMAC_REG_DEST_ADDRESS	0x410
+#define AXI_DMAC_REG_SRC_ADDRESS	0x414
+#define AXI_DMAC_REG_X_LENGTH		0x418
+#define AXI_DMAC_REG_Y_LENGTH		0x41c
+#define AXI_DMAC_REG_DEST_STRIDE	0x420
+#define AXI_DMAC_REG_SRC_STRIDE		0x424
+#define AXI_DMAC_REG_TRANSFER_DONE	0x428
+#define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
+#define AXI_DMAC_REG_STATUS		0x430
+#define AXI_DMAC_REG_CURRENT_SRC_ADDR	0x434
+#define AXI_DMAC_REG_CURRENT_DEST_ADDR	0x438
+
+#define AXI_DMAC_CTRL_ENABLE		BIT(0)
+#define AXI_DMAC_CTRL_PAUSE		BIT(1)
+
+#define AXI_DMAC_IRQ_SOT		BIT(0)
+#define AXI_DMAC_IRQ_EOT		BIT(1)
+
+#define AXI_DMAC_FLAG_CYCLIC		BIT(0)
+
+struct axi_dmac_sg {
+	dma_addr_t src_addr;
+	dma_addr_t dest_addr;
+	unsigned int x_len;
+	unsigned int y_len;
+	unsigned int dest_stride;
+	unsigned int src_stride;
+	unsigned int id;
+};
+
+struct axi_dmac_desc {
+	struct virt_dma_desc vdesc;
+	bool cyclic;
+
+	unsigned int num_submitted;
+	unsigned int num_completed;
+	unsigned int num_sgs;
+	struct axi_dmac_sg sg[];
+};
+
+struct axi_dmac_chan {
+	struct virt_dma_chan vchan;
+
+	struct axi_dmac_desc *next_desc;
+	struct list_head active_descs;
+	enum dma_transfer_direction direction;
+
+	unsigned int src_width;
+	unsigned int dest_width;
+	unsigned int src_type;
+	unsigned int dest_type;
+
+	unsigned int max_length;
+	unsigned int align_mask;
+
+	bool hw_cyclic;
+	bool hw_2d;
+};
+
+struct axi_dmac {
+	void __iomem *base;
+	int irq;
+
+	struct clk *clk;
+
+	struct dma_device dma_dev;
+	struct axi_dmac_chan chan;
+
+	struct device_dma_parameters dma_parms;
+};
+
+static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
+{
+	return container_of(chan->vchan.chan.device, struct axi_dmac,
+		dma_dev);
+}
+
+static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
+{
+	return container_of(c, struct axi_dmac_chan, vchan.chan);
+}
+
+static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
+{
+	return container_of(vdesc, struct axi_dmac_desc, vdesc);
+}
+
+static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
+	unsigned int val)
+{
+	writel(val, axi_dmac->base + reg);
+}
+
+static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
+{
+	return readl(axi_dmac->base + reg);
+}
+
+static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
+{
+	return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
+}
+
+static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
+{
+	return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
+}
+
+static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
+{
+	if (len == 0 || len > chan->max_length)
+		return false;
+	if ((len & chan->align_mask) != 0) /* Not aligned */
+		return false;
+	return true;
+}
+
+static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
+{
+	if ((addr & chan->align_mask) != 0) /* Not aligned */
+		return false;
+	return true;
+}
+
+static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
+{
+	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+	struct virt_dma_desc *vdesc;
+	struct axi_dmac_desc *desc;
+	struct axi_dmac_sg *sg;
+	unsigned int flags = 0;
+	unsigned int val;
+
+	val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
+	if (val) /* Queue is full, wait for the next SOT IRQ */
+		return;
+
+	desc = chan->next_desc;
+
+	if (!desc) {
+		vdesc = vchan_next_desc(&chan->vchan);
+		if (!vdesc)
+			return;
+		list_move_tail(&vdesc->node, &chan->active_descs);
+		desc = to_axi_dmac_desc(vdesc);
+	}
+	sg = &desc->sg[desc->num_submitted];
+
+	desc->num_submitted++;
+	if (desc->num_submitted == desc->num_sgs)
+		chan->next_desc = NULL;
+	else
+		chan->next_desc = desc;
+
+	sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
+
+	if (axi_dmac_dest_is_mem(chan)) {
+		axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
+		axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
+	}
+
+	if (axi_dmac_src_is_mem(chan)) {
+		axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
+		axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
+	}
+
+	/*
+	 * If the hardware supports cyclic transfers and there is no callback to
+	 * call, enable hw cyclic mode to avoid unnecessary interrupts.
+	 */
+	if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback)
+		flags |= AXI_DMAC_FLAG_CYCLIC;
+
+	axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
+	axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
+	axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
+	axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
+}
+
+static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
+{
+	return list_first_entry_or_null(&chan->active_descs,
+		struct axi_dmac_desc, vdesc.node);
+}
+
+static void axi_dmac_transfer_done(struct axi_dmac_chan *chan,
+	unsigned int completed_transfers)
+{
+	struct axi_dmac_desc *active;
+	struct axi_dmac_sg *sg;
+
+	active = axi_dmac_active_desc(chan);
+	if (!active)
+		return;
+
+	if (active->cyclic) {
+		vchan_cyclic_callback(&active->vdesc);
+	} else {
+		do {
+			sg = &active->sg[active->num_completed];
+			if (!(BIT(sg->id) & completed_transfers))
+				break;
+			active->num_completed++;
+			if (active->num_completed == active->num_sgs) {
+				list_del(&active->vdesc.node);
+				vchan_cookie_complete(&active->vdesc);
+				active = axi_dmac_active_desc(chan);
+			}
+		} while (active);
+	}
+}
+
+static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
+{
+	struct axi_dmac *dmac = devid;
+	unsigned int pending;
+
+	pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
+	axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
+
+	spin_lock(&dmac->chan.vchan.lock);
+	/* One or more transfers have finished */
+	if (pending & AXI_DMAC_IRQ_EOT) {
+		unsigned int completed;
+
+		completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
+		axi_dmac_transfer_done(&dmac->chan, completed);
+	}
+	/* Space has become available in the descriptor queue */
+	if (pending & AXI_DMAC_IRQ_SOT)
+		axi_dmac_start_transfer(&dmac->chan);
+	spin_unlock(&dmac->chan.vchan.lock);
+
+	return IRQ_HANDLED;
+}
+
+static int axi_dmac_terminate_all(struct dma_chan *c)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&chan->vchan.lock, flags);
+	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
+	chan->next_desc = NULL;
+	vchan_get_all_descriptors(&chan->vchan, &head);
+	list_splice_tail_init(&chan->active_descs, &head);
+	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+	vchan_dma_desc_free_list(&chan->vchan, &head);
+
+	return 0;
+}
+
+static void axi_dmac_issue_pending(struct dma_chan *c)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac *dmac = chan_to_axi_dmac(chan);
+	unsigned long flags;
+
+	axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
+
+	spin_lock_irqsave(&chan->vchan.lock, flags);
+	if (vchan_issue_pending(&chan->vchan))
+		axi_dmac_start_transfer(chan);
+	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
+{
+	struct axi_dmac_desc *desc;
+
+	desc = kzalloc(sizeof(struct axi_dmac_desc) +
+		sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
+	if (!desc)
+		return NULL;
+
+	desc->num_sgs = num_sgs;
+
+	return desc;
+}
+
+static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
+	struct dma_chan *c, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction direction,
+	unsigned long flags, void *context)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac_desc *desc;
+	struct scatterlist *sg;
+	unsigned int i;
+
+	if (direction != chan->direction)
+		return NULL;
+
+	desc = axi_dmac_alloc_desc(sg_len);
+	if (!desc)
+		return NULL;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
+		    !axi_dmac_check_len(chan, sg_dma_len(sg))) {
+			kfree(desc);
+			return NULL;
+		}
+
+		if (direction == DMA_DEV_TO_MEM)
+			desc->sg[i].dest_addr = sg_dma_address(sg);
+		else
+			desc->sg[i].src_addr = sg_dma_address(sg);
+		desc->sg[i].x_len = sg_dma_len(sg);
+		desc->sg[i].y_len = 1;
+	}
+
+	desc->cyclic = false;
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
+	struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
+	size_t period_len, enum dma_transfer_direction direction,
+	unsigned long flags)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac_desc *desc;
+	unsigned int num_periods, i;
+
+	if (direction != chan->direction)
+		return NULL;
+
+	if (!axi_dmac_check_len(chan, buf_len) ||
+	    !axi_dmac_check_addr(chan, buf_addr))
+		return NULL;
+
+	if (period_len == 0 || buf_len % period_len)
+		return NULL;
+
+	num_periods = buf_len / period_len;
+
+	desc = axi_dmac_alloc_desc(num_periods);
+	if (!desc)
+		return NULL;
+
+	for (i = 0; i < num_periods; i++) {
+		if (direction == DMA_DEV_TO_MEM)
+			desc->sg[i].dest_addr = buf_addr;
+		else
+			desc->sg[i].src_addr = buf_addr;
+		desc->sg[i].x_len = period_len;
+		desc->sg[i].y_len = 1;
+		buf_addr += period_len;
+	}
+
+	desc->cyclic = true;
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
+	struct dma_chan *c, struct dma_interleaved_template *xt,
+	unsigned long flags)
+{
+	struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
+	struct axi_dmac_desc *desc;
+	size_t dst_icg, src_icg;
+
+	if (xt->frame_size != 1)
+		return NULL;
+
+	if (xt->dir != chan->direction)
+		return NULL;
+
+	if (axi_dmac_src_is_mem(chan)) {
+		if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
+			return NULL;
+	}
+
+	if (axi_dmac_dest_is_mem(chan)) {
+		if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
+			return NULL;
+	}
+
+	dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
+	src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
+
+	if (chan->hw_2d) {
+		if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
+		    !axi_dmac_check_len(chan, xt->numf))
+			return NULL;
+		if (xt->sgl[0].size + dst_icg > chan->max_length ||
+		    xt->sgl[0].size + src_icg > chan->max_length)
+			return NULL;
+	} else {
+		if (dst_icg != 0 || src_icg != 0)
+			return NULL;
+		if (chan->max_length / xt->sgl[0].size < xt->numf)
+			return NULL;
+		if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
+			return NULL;
+	}
+
+	desc = axi_dmac_alloc_desc(1);
+	if (!desc)
+		return NULL;
+
+	if (axi_dmac_src_is_mem(chan)) {
+		desc->sg[0].src_addr = xt->src_start;
+		desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
+	}
+
+	if (axi_dmac_dest_is_mem(chan)) {
+		desc->sg[0].dest_addr = xt->dst_start;
+		desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
+	}
+
+	if (chan->hw_2d) {
+		desc->sg[0].x_len = xt->sgl[0].size;
+		desc->sg[0].y_len = xt->numf;
+	} else {
+		desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
+		desc->sg[0].y_len = 1;
+	}
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static void axi_dmac_free_chan_resources(struct dma_chan *c)
+{
+	vchan_free_chan_resources(to_virt_chan(c));
+}
+
+static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
+{
+	kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
+}
+
+/*
+ * The configuration stored in the devicetree matches the configuration
+ * parameters of the peripheral instance and allows the driver to know which
+ * features are implemented and how it should behave.
+ */
+static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
+	struct axi_dmac_chan *chan)
+{
+	u32 val;
+	int ret;
+
+	ret = of_property_read_u32(of_chan, "reg", &val);
+	if (ret)
+		return ret;
+
+	/* We only support 1 channel for now */
+	if (val != 0)
+		return -EINVAL;
+
+	ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
+	if (ret)
+		return ret;
+	if (val > AXI_DMAC_BUS_TYPE_FIFO)
+		return -EINVAL;
+	chan->src_type = val;
+
+	ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
+	if (ret)
+		return ret;
+	if (val > AXI_DMAC_BUS_TYPE_FIFO)
+		return -EINVAL;
+	chan->dest_type = val;
+
+	ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
+	if (ret)
+		return ret;
+	chan->src_width = val / 8;
+
+	ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
+	if (ret)
+		return ret;
+	chan->dest_width = val / 8;
+
+	ret = of_property_read_u32(of_chan, "adi,length-width", &val);
+	if (ret)
+		return ret;
+
+	if (val >= 32)
+		chan->max_length = UINT_MAX;
+	else
+		chan->max_length = (1ULL << val) - 1;
+
+	chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
+
+	if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
+		chan->direction = DMA_MEM_TO_MEM;
+	else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
+		chan->direction = DMA_MEM_TO_DEV;
+	else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
+		chan->direction = DMA_DEV_TO_MEM;
+	else
+		chan->direction = DMA_DEV_TO_DEV;
+
+	chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic");
+	chan->hw_2d = of_property_read_bool(of_chan, "adi,2d");
+
+	return 0;
+}
+
+static int axi_dmac_probe(struct platform_device *pdev)
+{
+	struct device_node *of_channels, *of_chan;
+	struct dma_device *dma_dev;
+	struct axi_dmac *dmac;
+	struct resource *res;
+	int ret;
+
+	dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
+	if (!dmac)
+		return -ENOMEM;
+
+	dmac->irq = platform_get_irq(pdev, 0);
+	if (dmac->irq <= 0)
+		return -EINVAL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dmac->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(dmac->base))
+		return PTR_ERR(dmac->base);
+
+	dmac->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(dmac->clk))
+		return PTR_ERR(dmac->clk);
+
+	INIT_LIST_HEAD(&dmac->chan.active_descs);
+
+	of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels");
+	if (of_channels == NULL)
+		return -ENODEV;
+
+	for_each_child_of_node(of_channels, of_chan) {
+		ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
+		if (ret) {
+			of_node_put(of_chan);
+			of_node_put(of_channels);
+			return -EINVAL;
+		}
+	}
+	of_node_put(of_channels);
+
+	pdev->dev.dma_parms = &dmac->dma_parms;
+	dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length);
+
+	dma_dev = &dmac->dma_dev;
+	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
+	dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
+	dma_dev->device_tx_status = dma_cookie_status;
+	dma_dev->device_issue_pending = axi_dmac_issue_pending;
+	dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
+	dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
+	dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
+	dma_dev->device_terminate_all = axi_dmac_terminate_all;
+	dma_dev->dev = &pdev->dev;
+	dma_dev->chancnt = 1;
+	dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
+	dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
+	dma_dev->directions = BIT(dmac->chan.direction);
+	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+	INIT_LIST_HEAD(&dma_dev->channels);
+
+	dmac->chan.vchan.desc_free = axi_dmac_desc_free;
+	vchan_init(&dmac->chan.vchan, dma_dev);
+
+	ret = clk_prepare_enable(dmac->clk);
+	if (ret < 0)
+		return ret;
+
+	axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
+
+	ret = dma_async_device_register(dma_dev);
+	if (ret)
+		goto err_clk_disable;
+
+	ret = of_dma_controller_register(pdev->dev.of_node,
+		of_dma_xlate_by_chan_id, dma_dev);
+	if (ret)
+		goto err_unregister_device;
+
+	ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, 0,
+		dev_name(&pdev->dev), dmac);
+	if (ret)
+		goto err_unregister_of;
+
+	platform_set_drvdata(pdev, dmac);
+
+	return 0;
+
+err_unregister_of:
+	of_dma_controller_free(pdev->dev.of_node);
+err_unregister_device:
+	dma_async_device_unregister(&dmac->dma_dev);
+err_clk_disable:
+	clk_disable_unprepare(dmac->clk);
+
+	return ret;
+}
+
+static int axi_dmac_remove(struct platform_device *pdev)
+{
+	struct axi_dmac *dmac = platform_get_drvdata(pdev);
+
+	of_dma_controller_free(pdev->dev.of_node);
+	free_irq(dmac->irq, dmac);
+	tasklet_kill(&dmac->chan.vchan.task);
+	dma_async_device_unregister(&dmac->dma_dev);
+	clk_disable_unprepare(dmac->clk);
+
+	return 0;
+}
+
+static const struct of_device_id axi_dmac_of_match_table[] = {
+	{ .compatible = "adi,axi-dmac-1.00.a" },
+	{ },
+};
+
+static struct platform_driver axi_dmac_driver = {
+	.driver = {
+		.name = "dma-axi-dmac",
+		.of_match_table = axi_dmac_of_match_table,
+	},
+	.probe = axi_dmac_probe,
+	.remove = axi_dmac_remove,
+};
+module_platform_driver(axi_dmac_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 26d2f0e..dade7c4 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -145,7 +145,8 @@
 	struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS];
 };
 
-struct jz4780_dma_data {
+struct jz4780_dma_filter_data {
+	struct device_node *of_node;
 	uint32_t transfer_type;
 	int channel;
 };
@@ -214,11 +215,25 @@
 	kfree(desc);
 }
 
-static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord)
+static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
 {
-	*ord = ffs(val) - 1;
+	int ord = ffs(val) - 1;
 
-	switch (*ord) {
+	/*
+	 * 8 byte transfer sizes unsupported so fall back on 4. If it's larger
+	 * than the maximum, just limit it. It is perfectly safe to fall back
+	 * in this way since we won't exceed the maximum burst size supported
+	 * by the device, the only effect is reduced efficiency. This is better
+	 * than refusing to perform the request at all.
+	 */
+	if (ord == 3)
+		ord = 2;
+	else if (ord > 7)
+		ord = 7;
+
+	*shift = ord;
+
+	switch (ord) {
 	case 0:
 		return JZ_DMA_SIZE_1_BYTE;
 	case 1:
@@ -231,20 +246,17 @@
 		return JZ_DMA_SIZE_32_BYTE;
 	case 6:
 		return JZ_DMA_SIZE_64_BYTE;
-	case 7:
-		return JZ_DMA_SIZE_128_BYTE;
 	default:
-		return -EINVAL;
+		return JZ_DMA_SIZE_128_BYTE;
 	}
 }
 
-static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
+static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
 	struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
 	enum dma_transfer_direction direction)
 {
 	struct dma_slave_config *config = &jzchan->config;
 	uint32_t width, maxburst, tsz;
-	int ord;
 
 	if (direction == DMA_MEM_TO_DEV) {
 		desc->dcm = JZ_DMA_DCM_SAI;
@@ -271,8 +283,8 @@
 	 * divisible by the transfer size, and we must not use more than the
 	 * maximum burst specified by the user.
 	 */
-	tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), &ord);
-	jzchan->transfer_shift = ord;
+	tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst),
+				       &jzchan->transfer_shift);
 
 	switch (width) {
 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -289,12 +301,14 @@
 	desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
 	desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
 
-	desc->dtc = len >> ord;
+	desc->dtc = len >> jzchan->transfer_shift;
+	return 0;
 }
 
 static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
 	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
-	enum dma_transfer_direction direction, unsigned long flags)
+	enum dma_transfer_direction direction, unsigned long flags,
+	void *context)
 {
 	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 	struct jz4780_dma_desc *desc;
@@ -307,12 +321,11 @@
 
 	for (i = 0; i < sg_len; i++) {
 		err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
-					sg_dma_address(&sgl[i]),
-					sg_dma_len(&sgl[i]),
-					direction);
+					      sg_dma_address(&sgl[i]),
+					      sg_dma_len(&sgl[i]),
+					      direction);
 		if (err < 0)
-			return ERR_PTR(err);
-
+			return NULL;
 
 		desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
 
@@ -354,9 +367,9 @@
 
 	for (i = 0; i < periods; i++) {
 		err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
-					period_len, direction);
+					      period_len, direction);
 		if (err < 0)
-			return ERR_PTR(err);
+			return NULL;
 
 		buf_addr += period_len;
 
@@ -390,15 +403,13 @@
 	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 	struct jz4780_dma_desc *desc;
 	uint32_t tsz;
-	int ord;
 
 	desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
 	if (!desc)
 		return NULL;
 
-	tsz = jz4780_dma_transfer_size(dest | src | len, &ord);
-	if (tsz < 0)
-		return ERR_PTR(tsz);
+	tsz = jz4780_dma_transfer_size(dest | src | len,
+				       &jzchan->transfer_shift);
 
 	desc->desc[0].dsa = src;
 	desc->desc[0].dta = dest;
@@ -407,7 +418,7 @@
 			    tsz << JZ_DMA_DCM_TSZ_SHIFT |
 			    JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
 			    JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
-	desc->desc[0].dtc = len >> ord;
+	desc->desc[0].dtc = len >> jzchan->transfer_shift;
 
 	return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
 }
@@ -484,8 +495,9 @@
 	spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
 }
 
-static int jz4780_dma_terminate_all(struct jz4780_dma_chan *jzchan)
+static int jz4780_dma_terminate_all(struct dma_chan *chan)
 {
+	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 	struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
 	unsigned long flags;
 	LIST_HEAD(head);
@@ -507,9 +519,11 @@
 	return 0;
 }
 
-static int jz4780_dma_slave_config(struct jz4780_dma_chan *jzchan,
-	const struct dma_slave_config *config)
+static int jz4780_dma_config(struct dma_chan *chan,
+	struct dma_slave_config *config)
 {
+	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+
 	if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 	   || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
 		return -EINVAL;
@@ -567,8 +581,8 @@
 		txstate->residue = 0;
 
 	if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
-		&& jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
-			status = DMA_ERROR;
+	    && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
+		status = DMA_ERROR;
 
 	spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
 	return status;
@@ -671,7 +685,10 @@
 {
 	struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 	struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
-	struct jz4780_dma_data *data = param;
+	struct jz4780_dma_filter_data *data = param;
+
+	if (jzdma->dma_device.dev->of_node != data->of_node)
+		return false;
 
 	if (data->channel > -1) {
 		if (data->channel != jzchan->id)
@@ -690,11 +707,12 @@
 {
 	struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
 	dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
-	struct jz4780_dma_data data;
+	struct jz4780_dma_filter_data data;
 
 	if (dma_spec->args_count != 2)
 		return NULL;
 
+	data.of_node = ofdma->of_node;
 	data.transfer_type = dma_spec->args[0];
 	data.channel = dma_spec->args[1];
 
@@ -713,9 +731,14 @@
 				data.channel);
 			return NULL;
 		}
-	}
 
-	return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
+		jzdma->chan[data.channel].transfer_type = data.transfer_type;
+
+		return dma_get_slave_channel(
+			&jzdma->chan[data.channel].vchan.chan);
+	} else {
+		return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
+	}
 }
 
 static int jz4780_dma_probe(struct platform_device *pdev)
@@ -743,23 +766,26 @@
 	if (IS_ERR(jzdma->base))
 		return PTR_ERR(jzdma->base);
 
-	jzdma->irq = platform_get_irq(pdev, 0);
-	if (jzdma->irq < 0) {
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
 		dev_err(dev, "failed to get IRQ: %d\n", ret);
-		return jzdma->irq;
+		return ret;
 	}
 
-	ret = devm_request_irq(dev, jzdma->irq, jz4780_dma_irq_handler, 0,
-			       dev_name(dev), jzdma);
+	jzdma->irq = ret;
+
+	ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
+			  jzdma);
 	if (ret) {
 		dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
-		return -EINVAL;
+		return ret;
 	}
 
 	jzdma->clk = devm_clk_get(dev, NULL);
 	if (IS_ERR(jzdma->clk)) {
 		dev_err(dev, "failed to get clock\n");
-		return PTR_ERR(jzdma->clk);
+		ret = PTR_ERR(jzdma->clk);
+		goto err_free_irq;
 	}
 
 	clk_prepare_enable(jzdma->clk);
@@ -775,13 +801,13 @@
 	dma_cap_set(DMA_CYCLIC, dd->cap_mask);
 
 	dd->dev = dev;
-	dd->copy_align = 2; /* 2^2 = 4 byte alignment */
+	dd->copy_align = DMAENGINE_ALIGN_4_BYTES;
 	dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
 	dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
 	dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
 	dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
 	dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
-	dd->device_config = jz4780_dma_slave_config;
+	dd->device_config = jz4780_dma_config;
 	dd->device_terminate_all = jz4780_dma_terminate_all;
 	dd->device_tx_status = jz4780_dma_tx_status;
 	dd->device_issue_pending = jz4780_dma_issue_pending;
@@ -790,7 +816,6 @@
 	dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 	dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 
-
 	/*
 	 * Enable DMA controller, mark all channels as not programmable.
 	 * Also set the FMSC bit - it increases MSC performance, so it makes
@@ -832,15 +857,24 @@
 
 err_disable_clk:
 	clk_disable_unprepare(jzdma->clk);
+
+err_free_irq:
+	free_irq(jzdma->irq, jzdma);
 	return ret;
 }
 
 static int jz4780_dma_remove(struct platform_device *pdev)
 {
 	struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
+	int i;
 
 	of_dma_controller_free(pdev->dev.of_node);
-	devm_free_irq(&pdev->dev, jzdma->irq, jzdma);
+
+	free_irq(jzdma->irq, jzdma);
+
+	for (i = 0; i < JZ_DMA_NR_CHANNELS; i++)
+		tasklet_kill(&jzdma->chan[i].vchan.task);
+
 	dma_async_device_unregister(&jzdma->dma_device);
 	return 0;
 }
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index 36e02f0..e00c9b0 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -6,6 +6,9 @@
 	tristate
 	select DMA_ENGINE
 
+config DW_DMAC_BIG_ENDIAN_IO
+	bool
+
 config DW_DMAC
 	tristate "Synopsys DesignWare AHB DMA platform driver"
 	select DW_DMAC_CORE
@@ -23,6 +26,3 @@
 	  Support the Synopsys DesignWare AHB DMA controller on the
 	  platfroms that enumerate it as a PCI device. For example,
 	  Intel Medfield has integrated this GPDMA controller.
-
-config DW_DMAC_BIG_ENDIAN_IO
-	bool
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 88853af..3e5d4f1 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1000,7 +1000,7 @@
 	 * code using dma memcpy must make sure alignment of
 	 * length is at dma->copy_align boundary.
 	 */
-	dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	dma->copy_align = DMAENGINE_ALIGN_4_BYTES;
 
 	INIT_LIST_HEAD(&dma->channels);
 }
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index f42f71e..7669c7d 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -99,21 +99,13 @@
 
 static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&hsuc->lock, flags);
 	hsu_chan_disable(hsuc);
 	hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
 }
 
 static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&hsuc->lock, flags);
 	hsu_dma_chan_start(hsuc);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
 }
 
 static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
@@ -139,9 +131,9 @@
 	unsigned long flags;
 	u32 sr;
 
-	spin_lock_irqsave(&hsuc->lock, flags);
+	spin_lock_irqsave(&hsuc->vchan.lock, flags);
 	sr = hsu_chan_readl(hsuc, HSU_CH_SR);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
+	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 
 	return sr;
 }
@@ -273,14 +265,11 @@
 	struct hsu_dma_desc *desc = hsuc->desc;
 	size_t bytes = hsu_dma_desc_size(desc);
 	int i;
-	unsigned long flags;
 
-	spin_lock_irqsave(&hsuc->lock, flags);
 	i = desc->active % HSU_DMA_CHAN_NR_DESC;
 	do {
 		bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
 	} while (--i >= 0);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
 
 	return bytes;
 }
@@ -327,24 +316,6 @@
 	return 0;
 }
 
-static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&hsuc->lock, flags);
-	hsu_chan_disable(hsuc);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
-}
-
-static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&hsuc->lock, flags);
-	hsu_chan_enable(hsuc);
-	spin_unlock_irqrestore(&hsuc->lock, flags);
-}
-
 static int hsu_dma_pause(struct dma_chan *chan)
 {
 	struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
@@ -352,7 +323,7 @@
 
 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
 	if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
-		hsu_dma_chan_deactivate(hsuc);
+		hsu_chan_disable(hsuc);
 		hsuc->desc->status = DMA_PAUSED;
 	}
 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
@@ -368,7 +339,7 @@
 	spin_lock_irqsave(&hsuc->vchan.lock, flags);
 	if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
 		hsuc->desc->status = DMA_IN_PROGRESS;
-		hsu_dma_chan_activate(hsuc);
+		hsu_chan_enable(hsuc);
 	}
 	spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
 
@@ -441,8 +412,6 @@
 
 		hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
 		hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
-
-		spin_lock_init(&hsuc->lock);
 	}
 
 	dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
index 0275233..eeb9fff 100644
--- a/drivers/dma/hsu/hsu.h
+++ b/drivers/dma/hsu/hsu.h
@@ -78,7 +78,6 @@
 	struct virt_dma_chan vchan;
 
 	void __iomem *reg;
-	spinlock_t lock;
 
 	/* hardware configuration */
 	enum dma_transfer_direction direction;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
new file mode 100644
index 0000000..18c14e1
--- /dev/null
+++ b/drivers/dma/idma64.c
@@ -0,0 +1,710 @@
+/*
+ * Core driver for the Intel integrated DMA 64-bit
+ *
+ * Copyright (C) 2015 Intel Corporation
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "idma64.h"
+
+/* Platform driver name */
+#define DRV_NAME		"idma64"
+
+/* For now we support only two channels */
+#define IDMA64_NR_CHAN		2
+
+/* ---------------------------------------------------------------------- */
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void idma64_off(struct idma64 *idma64)
+{
+	unsigned short count = 100;
+
+	dma_writel(idma64, CFG, 0);
+
+	channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
+	channel_clear_bit(idma64, MASK(BLOCK), idma64->all_chan_mask);
+	channel_clear_bit(idma64, MASK(SRC_TRAN), idma64->all_chan_mask);
+	channel_clear_bit(idma64, MASK(DST_TRAN), idma64->all_chan_mask);
+	channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
+
+	do {
+		cpu_relax();
+	} while (dma_readl(idma64, CFG) & IDMA64_CFG_DMA_EN && --count);
+}
+
+static void idma64_on(struct idma64 *idma64)
+{
+	dma_writel(idma64, CFG, IDMA64_CFG_DMA_EN);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c)
+{
+	u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0);
+	u32 cfglo = 0;
+
+	/* Enforce FIFO drain when channel is suspended */
+	cfglo |= IDMA64C_CFGL_CH_DRAIN;
+
+	/* Set default burst alignment */
+	cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN;
+
+	channel_writel(idma64c, CFG_LO, cfglo);
+	channel_writel(idma64c, CFG_HI, cfghi);
+
+	/* Enable interrupts */
+	channel_set_bit(idma64, MASK(XFER), idma64c->mask);
+	channel_set_bit(idma64, MASK(ERROR), idma64c->mask);
+
+	/*
+	 * Enforce the controller to be turned on.
+	 *
+	 * The iDMA is turned off in ->probe() and looses context during system
+	 * suspend / resume cycle. That's why we have to enable it each time we
+	 * use it.
+	 */
+	idma64_on(idma64);
+}
+
+static void idma64_chan_stop(struct idma64 *idma64, struct idma64_chan *idma64c)
+{
+	channel_clear_bit(idma64, CH_EN, idma64c->mask);
+}
+
+static void idma64_chan_start(struct idma64 *idma64, struct idma64_chan *idma64c)
+{
+	struct idma64_desc *desc = idma64c->desc;
+	struct idma64_hw_desc *hw = &desc->hw[0];
+
+	channel_writeq(idma64c, SAR, 0);
+	channel_writeq(idma64c, DAR, 0);
+
+	channel_writel(idma64c, CTL_HI, IDMA64C_CTLH_BLOCK_TS(~0UL));
+	channel_writel(idma64c, CTL_LO, IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN);
+
+	channel_writeq(idma64c, LLP, hw->llp);
+
+	channel_set_bit(idma64, CH_EN, idma64c->mask);
+}
+
+static void idma64_stop_transfer(struct idma64_chan *idma64c)
+{
+	struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
+
+	idma64_chan_stop(idma64, idma64c);
+}
+
+static void idma64_start_transfer(struct idma64_chan *idma64c)
+{
+	struct idma64 *idma64 = to_idma64(idma64c->vchan.chan.device);
+	struct virt_dma_desc *vdesc;
+
+	/* Get the next descriptor */
+	vdesc = vchan_next_desc(&idma64c->vchan);
+	if (!vdesc) {
+		idma64c->desc = NULL;
+		return;
+	}
+
+	list_del(&vdesc->node);
+	idma64c->desc = to_idma64_desc(vdesc);
+
+	/* Configure the channel */
+	idma64_chan_init(idma64, idma64c);
+
+	/* Start the channel with a new descriptor */
+	idma64_chan_start(idma64, idma64c);
+}
+
+/* ---------------------------------------------------------------------- */
+
+static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
+		u32 status_err, u32 status_xfer)
+{
+	struct idma64_chan *idma64c = &idma64->chan[c];
+	struct idma64_desc *desc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&idma64c->vchan.lock, flags);
+	desc = idma64c->desc;
+	if (desc) {
+		if (status_err & (1 << c)) {
+			dma_writel(idma64, CLEAR(ERROR), idma64c->mask);
+			desc->status = DMA_ERROR;
+		} else if (status_xfer & (1 << c)) {
+			dma_writel(idma64, CLEAR(XFER), idma64c->mask);
+			desc->status = DMA_COMPLETE;
+			vchan_cookie_complete(&desc->vdesc);
+			idma64_start_transfer(idma64c);
+		}
+
+		/* idma64_start_transfer() updates idma64c->desc */
+		if (idma64c->desc == NULL || desc->status == DMA_ERROR)
+			idma64_stop_transfer(idma64c);
+	}
+	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+}
+
+static irqreturn_t idma64_irq(int irq, void *dev)
+{
+	struct idma64 *idma64 = dev;
+	u32 status = dma_readl(idma64, STATUS_INT);
+	u32 status_xfer;
+	u32 status_err;
+	unsigned short i;
+
+	dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
+
+	/* Check if we have any interrupt from the DMA controller */
+	if (!status)
+		return IRQ_NONE;
+
+	/* Disable interrupts */
+	channel_clear_bit(idma64, MASK(XFER), idma64->all_chan_mask);
+	channel_clear_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
+
+	status_xfer = dma_readl(idma64, RAW(XFER));
+	status_err = dma_readl(idma64, RAW(ERROR));
+
+	for (i = 0; i < idma64->dma.chancnt; i++)
+		idma64_chan_irq(idma64, i, status_err, status_xfer);
+
+	/* Re-enable interrupts */
+	channel_set_bit(idma64, MASK(XFER), idma64->all_chan_mask);
+	channel_set_bit(idma64, MASK(ERROR), idma64->all_chan_mask);
+
+	return IRQ_HANDLED;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static struct idma64_desc *idma64_alloc_desc(unsigned int ndesc)
+{
+	struct idma64_desc *desc;
+
+	desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+	if (!desc)
+		return NULL;
+
+	desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT);
+	if (!desc->hw) {
+		kfree(desc);
+		return NULL;
+	}
+
+	return desc;
+}
+
+static void idma64_desc_free(struct idma64_chan *idma64c,
+		struct idma64_desc *desc)
+{
+	struct idma64_hw_desc *hw;
+
+	if (desc->ndesc) {
+		unsigned int i = desc->ndesc;
+
+		do {
+			hw = &desc->hw[--i];
+			dma_pool_free(idma64c->pool, hw->lli, hw->llp);
+		} while (i);
+	}
+
+	kfree(desc->hw);
+	kfree(desc);
+}
+
+static void idma64_vdesc_free(struct virt_dma_desc *vdesc)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(vdesc->tx.chan);
+
+	idma64_desc_free(idma64c, to_idma64_desc(vdesc));
+}
+
+static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw,
+		struct dma_slave_config *config,
+		enum dma_transfer_direction direction, u64 llp)
+{
+	struct idma64_lli *lli = hw->lli;
+	u64 sar, dar;
+	u32 ctlhi = IDMA64C_CTLH_BLOCK_TS(hw->len);
+	u32 ctllo = IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN;
+	u32 src_width, dst_width;
+
+	if (direction == DMA_MEM_TO_DEV) {
+		sar = hw->phys;
+		dar = config->dst_addr;
+		ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC |
+			 IDMA64C_CTLL_FC_M2P;
+		src_width = min_t(u32, 2, __fls(sar | hw->len));
+		dst_width = __fls(config->dst_addr_width);
+	} else {	/* DMA_DEV_TO_MEM */
+		sar = config->src_addr;
+		dar = hw->phys;
+		ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX |
+			 IDMA64C_CTLL_FC_P2M;
+		src_width = __fls(config->src_addr_width);
+		dst_width = min_t(u32, 2, __fls(dar | hw->len));
+	}
+
+	lli->sar = sar;
+	lli->dar = dar;
+
+	lli->ctlhi = ctlhi;
+	lli->ctllo = ctllo |
+		     IDMA64C_CTLL_SRC_MSIZE(config->src_maxburst) |
+		     IDMA64C_CTLL_DST_MSIZE(config->dst_maxburst) |
+		     IDMA64C_CTLL_DST_WIDTH(dst_width) |
+		     IDMA64C_CTLL_SRC_WIDTH(src_width);
+
+	lli->llp = llp;
+	return hw->llp;
+}
+
+static void idma64_desc_fill(struct idma64_chan *idma64c,
+		struct idma64_desc *desc)
+{
+	struct dma_slave_config *config = &idma64c->config;
+	struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1];
+	struct idma64_lli *lli = hw->lli;
+	u64 llp = 0;
+	unsigned int i = desc->ndesc;
+
+	/* Fill the hardware descriptors and link them to a list */
+	do {
+		hw = &desc->hw[--i];
+		llp = idma64_hw_desc_fill(hw, config, desc->direction, llp);
+		desc->length += hw->len;
+	} while (i);
+
+	/* Trigger interrupt after last block */
+	lli->ctllo |= IDMA64C_CTLL_INT_EN;
+}
+
+static struct dma_async_tx_descriptor *idma64_prep_slave_sg(
+		struct dma_chan *chan, struct scatterlist *sgl,
+		unsigned int sg_len, enum dma_transfer_direction direction,
+		unsigned long flags, void *context)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+	struct idma64_desc *desc;
+	struct scatterlist *sg;
+	unsigned int i;
+
+	desc = idma64_alloc_desc(sg_len);
+	if (!desc)
+		return NULL;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		struct idma64_hw_desc *hw = &desc->hw[i];
+
+		/* Allocate DMA capable memory for hardware descriptor */
+		hw->lli = dma_pool_alloc(idma64c->pool, GFP_NOWAIT, &hw->llp);
+		if (!hw->lli) {
+			desc->ndesc = i;
+			idma64_desc_free(idma64c, desc);
+			return NULL;
+		}
+
+		hw->phys = sg_dma_address(sg);
+		hw->len = sg_dma_len(sg);
+	}
+
+	desc->ndesc = sg_len;
+	desc->direction = direction;
+	desc->status = DMA_IN_PROGRESS;
+
+	idma64_desc_fill(idma64c, desc);
+	return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags);
+}
+
+static void idma64_issue_pending(struct dma_chan *chan)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&idma64c->vchan.lock, flags);
+	if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc)
+		idma64_start_transfer(idma64c);
+	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+}
+
+static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
+{
+	struct idma64_desc *desc = idma64c->desc;
+	struct idma64_hw_desc *hw;
+	size_t bytes = desc->length;
+	u64 llp;
+	u32 ctlhi;
+	unsigned int i = 0;
+
+	llp = channel_readq(idma64c, LLP);
+	do {
+		hw = &desc->hw[i];
+	} while ((hw->llp != llp) && (++i < desc->ndesc));
+
+	if (!i)
+		return bytes;
+
+	do {
+		bytes -= desc->hw[--i].len;
+	} while (i);
+
+	ctlhi = channel_readl(idma64c, CTL_HI);
+	return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
+}
+
+static enum dma_status idma64_tx_status(struct dma_chan *chan,
+		dma_cookie_t cookie, struct dma_tx_state *state)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+	struct virt_dma_desc *vdesc;
+	enum dma_status status;
+	size_t bytes;
+	unsigned long flags;
+
+	status = dma_cookie_status(chan, cookie, state);
+	if (status == DMA_COMPLETE)
+		return status;
+
+	spin_lock_irqsave(&idma64c->vchan.lock, flags);
+	vdesc = vchan_find_desc(&idma64c->vchan, cookie);
+	if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) {
+		bytes = idma64_active_desc_size(idma64c);
+		dma_set_residue(state, bytes);
+		status = idma64c->desc->status;
+	} else if (vdesc) {
+		bytes = to_idma64_desc(vdesc)->length;
+		dma_set_residue(state, bytes);
+	}
+	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+	return status;
+}
+
+static void convert_burst(u32 *maxburst)
+{
+	if (*maxburst)
+		*maxburst = __fls(*maxburst);
+	else
+		*maxburst = 0;
+}
+
+static int idma64_slave_config(struct dma_chan *chan,
+		struct dma_slave_config *config)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+	/* Check if chan will be configured for slave transfers */
+	if (!is_slave_direction(config->direction))
+		return -EINVAL;
+
+	memcpy(&idma64c->config, config, sizeof(idma64c->config));
+
+	convert_burst(&idma64c->config.src_maxburst);
+	convert_burst(&idma64c->config.dst_maxburst);
+
+	return 0;
+}
+
+static void idma64_chan_deactivate(struct idma64_chan *idma64c)
+{
+	unsigned short count = 100;
+	u32 cfglo;
+
+	cfglo = channel_readl(idma64c, CFG_LO);
+	channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP);
+	do {
+		udelay(1);
+		cfglo = channel_readl(idma64c, CFG_LO);
+	} while (!(cfglo & IDMA64C_CFGL_FIFO_EMPTY) && --count);
+}
+
+static void idma64_chan_activate(struct idma64_chan *idma64c)
+{
+	u32 cfglo;
+
+	cfglo = channel_readl(idma64c, CFG_LO);
+	channel_writel(idma64c, CFG_LO, cfglo & ~IDMA64C_CFGL_CH_SUSP);
+}
+
+static int idma64_pause(struct dma_chan *chan)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&idma64c->vchan.lock, flags);
+	if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) {
+		idma64_chan_deactivate(idma64c);
+		idma64c->desc->status = DMA_PAUSED;
+	}
+	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+	return 0;
+}
+
+static int idma64_resume(struct dma_chan *chan)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&idma64c->vchan.lock, flags);
+	if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) {
+		idma64c->desc->status = DMA_IN_PROGRESS;
+		idma64_chan_activate(idma64c);
+	}
+	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+	return 0;
+}
+
+static int idma64_terminate_all(struct dma_chan *chan)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&idma64c->vchan.lock, flags);
+	idma64_chan_deactivate(idma64c);
+	idma64_stop_transfer(idma64c);
+	if (idma64c->desc) {
+		idma64_vdesc_free(&idma64c->desc->vdesc);
+		idma64c->desc = NULL;
+	}
+	vchan_get_all_descriptors(&idma64c->vchan, &head);
+	spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+
+	vchan_dma_desc_free_list(&idma64c->vchan, &head);
+	return 0;
+}
+
+static int idma64_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+	/* Create a pool of consistent memory blocks for hardware descriptors */
+	idma64c->pool = dma_pool_create(dev_name(chan2dev(chan)),
+					chan->device->dev,
+					sizeof(struct idma64_lli), 8, 0);
+	if (!idma64c->pool) {
+		dev_err(chan2dev(chan), "No memory for descriptors\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void idma64_free_chan_resources(struct dma_chan *chan)
+{
+	struct idma64_chan *idma64c = to_idma64_chan(chan);
+
+	vchan_free_chan_resources(to_virt_chan(chan));
+	dma_pool_destroy(idma64c->pool);
+	idma64c->pool = NULL;
+}
+
+/* ---------------------------------------------------------------------- */
+
+#define IDMA64_BUSWIDTHS				\
+	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)		|	\
+	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)		|	\
+	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
+
+static int idma64_probe(struct idma64_chip *chip)
+{
+	struct idma64 *idma64;
+	unsigned short nr_chan = IDMA64_NR_CHAN;
+	unsigned short i;
+	int ret;
+
+	idma64 = devm_kzalloc(chip->dev, sizeof(*idma64), GFP_KERNEL);
+	if (!idma64)
+		return -ENOMEM;
+
+	idma64->regs = chip->regs;
+	chip->idma64 = idma64;
+
+	idma64->chan = devm_kcalloc(chip->dev, nr_chan, sizeof(*idma64->chan),
+				    GFP_KERNEL);
+	if (!idma64->chan)
+		return -ENOMEM;
+
+	idma64->all_chan_mask = (1 << nr_chan) - 1;
+
+	/* Turn off iDMA controller */
+	idma64_off(idma64);
+
+	ret = devm_request_irq(chip->dev, chip->irq, idma64_irq, IRQF_SHARED,
+			       dev_name(chip->dev), idma64);
+	if (ret)
+		return ret;
+
+	INIT_LIST_HEAD(&idma64->dma.channels);
+	for (i = 0; i < nr_chan; i++) {
+		struct idma64_chan *idma64c = &idma64->chan[i];
+
+		idma64c->vchan.desc_free = idma64_vdesc_free;
+		vchan_init(&idma64c->vchan, &idma64->dma);
+
+		idma64c->regs = idma64->regs + i * IDMA64_CH_LENGTH;
+		idma64c->mask = BIT(i);
+	}
+
+	dma_cap_set(DMA_SLAVE, idma64->dma.cap_mask);
+	dma_cap_set(DMA_PRIVATE, idma64->dma.cap_mask);
+
+	idma64->dma.device_alloc_chan_resources = idma64_alloc_chan_resources;
+	idma64->dma.device_free_chan_resources = idma64_free_chan_resources;
+
+	idma64->dma.device_prep_slave_sg = idma64_prep_slave_sg;
+
+	idma64->dma.device_issue_pending = idma64_issue_pending;
+	idma64->dma.device_tx_status = idma64_tx_status;
+
+	idma64->dma.device_config = idma64_slave_config;
+	idma64->dma.device_pause = idma64_pause;
+	idma64->dma.device_resume = idma64_resume;
+	idma64->dma.device_terminate_all = idma64_terminate_all;
+
+	idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS;
+	idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
+	idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+	idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+	idma64->dma.dev = chip->dev;
+
+	ret = dma_async_device_register(&idma64->dma);
+	if (ret)
+		return ret;
+
+	dev_info(chip->dev, "Found Intel integrated DMA 64-bit\n");
+	return 0;
+}
+
+static int idma64_remove(struct idma64_chip *chip)
+{
+	struct idma64 *idma64 = chip->idma64;
+	unsigned short i;
+
+	dma_async_device_unregister(&idma64->dma);
+
+	/*
+	 * Explicitly call devm_request_irq() to avoid the side effects with
+	 * the scheduled tasklets.
+	 */
+	devm_free_irq(chip->dev, chip->irq, idma64);
+
+	for (i = 0; i < idma64->dma.chancnt; i++) {
+		struct idma64_chan *idma64c = &idma64->chan[i];
+
+		tasklet_kill(&idma64c->vchan.task);
+	}
+
+	return 0;
+}
+
+/* ---------------------------------------------------------------------- */
+
+static int idma64_platform_probe(struct platform_device *pdev)
+{
+	struct idma64_chip *chip;
+	struct device *dev = &pdev->dev;
+	struct resource *mem;
+	int ret;
+
+	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->irq = platform_get_irq(pdev, 0);
+	if (chip->irq < 0)
+		return chip->irq;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	chip->regs = devm_ioremap_resource(dev, mem);
+	if (IS_ERR(chip->regs))
+		return PTR_ERR(chip->regs);
+
+	ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (ret)
+		return ret;
+
+	chip->dev = dev;
+
+	ret = idma64_probe(chip);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, chip);
+	return 0;
+}
+
+static int idma64_platform_remove(struct platform_device *pdev)
+{
+	struct idma64_chip *chip = platform_get_drvdata(pdev);
+
+	return idma64_remove(chip);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int idma64_pm_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct idma64_chip *chip = platform_get_drvdata(pdev);
+
+	idma64_off(chip->idma64);
+	return 0;
+}
+
+static int idma64_pm_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct idma64_chip *chip = platform_get_drvdata(pdev);
+
+	idma64_on(chip->idma64);
+	return 0;
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops idma64_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume)
+};
+
+static struct platform_driver idma64_platform_driver = {
+	.probe		= idma64_platform_probe,
+	.remove		= idma64_platform_remove,
+	.driver = {
+		.name	= DRV_NAME,
+		.pm	= &idma64_dev_pm_ops,
+	},
+};
+
+module_platform_driver(idma64_platform_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("iDMA64 core driver");
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h
new file mode 100644
index 0000000..a4d9968
--- /dev/null
+++ b/drivers/dma/idma64.h
@@ -0,0 +1,233 @@
+/*
+ * Driver for the Intel integrated DMA 64-bit
+ *
+ * Copyright (C) 2015 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DMA_IDMA64_H__
+#define __DMA_IDMA64_H__
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "virt-dma.h"
+
+/* Channel registers */
+
+#define IDMA64_CH_SAR		0x00	/* Source Address Register */
+#define IDMA64_CH_DAR		0x08	/* Destination Address Register */
+#define IDMA64_CH_LLP		0x10	/* Linked List Pointer */
+#define IDMA64_CH_CTL_LO	0x18	/* Control Register Low */
+#define IDMA64_CH_CTL_HI	0x1c	/* Control Register High */
+#define IDMA64_CH_SSTAT		0x20
+#define IDMA64_CH_DSTAT		0x28
+#define IDMA64_CH_SSTATAR	0x30
+#define IDMA64_CH_DSTATAR	0x38
+#define IDMA64_CH_CFG_LO	0x40	/* Configuration Register Low */
+#define IDMA64_CH_CFG_HI	0x44	/* Configuration Register High */
+#define IDMA64_CH_SGR		0x48
+#define IDMA64_CH_DSR		0x50
+
+#define IDMA64_CH_LENGTH	0x58
+
+/* Bitfields in CTL_LO */
+#define IDMA64C_CTLL_INT_EN		(1 << 0)	/* irqs enabled? */
+#define IDMA64C_CTLL_DST_WIDTH(x)	((x) << 1)	/* bytes per element */
+#define IDMA64C_CTLL_SRC_WIDTH(x)	((x) << 4)
+#define IDMA64C_CTLL_DST_INC		(0 << 8)	/* DAR update/not */
+#define IDMA64C_CTLL_DST_FIX		(1 << 8)
+#define IDMA64C_CTLL_SRC_INC		(0 << 10)	/* SAR update/not */
+#define IDMA64C_CTLL_SRC_FIX		(1 << 10)
+#define IDMA64C_CTLL_DST_MSIZE(x)	((x) << 11)	/* burst, #elements */
+#define IDMA64C_CTLL_SRC_MSIZE(x)	((x) << 14)
+#define IDMA64C_CTLL_FC_M2P		(1 << 20)	/* mem-to-periph */
+#define IDMA64C_CTLL_FC_P2M		(2 << 20)	/* periph-to-mem */
+#define IDMA64C_CTLL_LLP_D_EN		(1 << 27)	/* dest block chain */
+#define IDMA64C_CTLL_LLP_S_EN		(1 << 28)	/* src block chain */
+
+/* Bitfields in CTL_HI */
+#define IDMA64C_CTLH_BLOCK_TS(x)	((x) & ((1 << 17) - 1))
+#define IDMA64C_CTLH_DONE		(1 << 17)
+
+/* Bitfields in CFG_LO */
+#define IDMA64C_CFGL_DST_BURST_ALIGN	(1 << 0)	/* dst burst align */
+#define IDMA64C_CFGL_SRC_BURST_ALIGN	(1 << 1)	/* src burst align */
+#define IDMA64C_CFGL_CH_SUSP		(1 << 8)
+#define IDMA64C_CFGL_FIFO_EMPTY		(1 << 9)
+#define IDMA64C_CFGL_CH_DRAIN		(1 << 10)	/* drain FIFO */
+#define IDMA64C_CFGL_DST_OPT_BL		(1 << 20)	/* optimize dst burst length */
+#define IDMA64C_CFGL_SRC_OPT_BL		(1 << 21)	/* optimize src burst length */
+
+/* Bitfields in CFG_HI */
+#define IDMA64C_CFGH_SRC_PER(x)		((x) << 0)	/* src peripheral */
+#define IDMA64C_CFGH_DST_PER(x)		((x) << 4)	/* dst peripheral */
+#define IDMA64C_CFGH_RD_ISSUE_THD(x)	((x) << 8)
+#define IDMA64C_CFGH_RW_ISSUE_THD(x)	((x) << 18)
+
+/* Interrupt registers */
+
+#define IDMA64_INT_XFER		0x00
+#define IDMA64_INT_BLOCK	0x08
+#define IDMA64_INT_SRC_TRAN	0x10
+#define IDMA64_INT_DST_TRAN	0x18
+#define IDMA64_INT_ERROR	0x20
+
+#define IDMA64_RAW(x)		(0x2c0 + IDMA64_INT_##x)	/* r */
+#define IDMA64_STATUS(x)	(0x2e8 + IDMA64_INT_##x)	/* r (raw & mask) */
+#define IDMA64_MASK(x)		(0x310 + IDMA64_INT_##x)	/* rw (set = irq enabled) */
+#define IDMA64_CLEAR(x)		(0x338 + IDMA64_INT_##x)	/* w (ack, affects "raw") */
+
+/* Common registers */
+
+#define IDMA64_STATUS_INT	0x360	/* r */
+#define IDMA64_CFG		0x398
+#define IDMA64_CH_EN		0x3a0
+
+/* Bitfields in CFG */
+#define IDMA64_CFG_DMA_EN		(1 << 0)
+
+/* Hardware descriptor for Linked LIst transfers */
+struct idma64_lli {
+	u64		sar;
+	u64		dar;
+	u64		llp;
+	u32		ctllo;
+	u32		ctlhi;
+	u32		sstat;
+	u32		dstat;
+};
+
+struct idma64_hw_desc {
+	struct idma64_lli *lli;
+	dma_addr_t llp;
+	dma_addr_t phys;
+	unsigned int len;
+};
+
+struct idma64_desc {
+	struct virt_dma_desc vdesc;
+	enum dma_transfer_direction direction;
+	struct idma64_hw_desc *hw;
+	unsigned int ndesc;
+	size_t length;
+	enum dma_status status;
+};
+
+static inline struct idma64_desc *to_idma64_desc(struct virt_dma_desc *vdesc)
+{
+	return container_of(vdesc, struct idma64_desc, vdesc);
+}
+
+struct idma64_chan {
+	struct virt_dma_chan vchan;
+
+	void __iomem *regs;
+
+	/* hardware configuration */
+	enum dma_transfer_direction direction;
+	unsigned int mask;
+	struct dma_slave_config config;
+
+	void *pool;
+	struct idma64_desc *desc;
+};
+
+static inline struct idma64_chan *to_idma64_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct idma64_chan, vchan.chan);
+}
+
+#define channel_set_bit(idma64, reg, mask)	\
+	dma_writel(idma64, reg, ((mask) << 8) | (mask))
+#define channel_clear_bit(idma64, reg, mask)	\
+	dma_writel(idma64, reg, ((mask) << 8) | 0)
+
+static inline u32 idma64c_readl(struct idma64_chan *idma64c, int offset)
+{
+	return readl(idma64c->regs + offset);
+}
+
+static inline void idma64c_writel(struct idma64_chan *idma64c, int offset,
+				  u32 value)
+{
+	writel(value, idma64c->regs + offset);
+}
+
+#define channel_readl(idma64c, reg)		\
+	idma64c_readl(idma64c, IDMA64_CH_##reg)
+#define channel_writel(idma64c, reg, value)	\
+	idma64c_writel(idma64c, IDMA64_CH_##reg, (value))
+
+static inline u64 idma64c_readq(struct idma64_chan *idma64c, int offset)
+{
+	u64 l, h;
+
+	l = idma64c_readl(idma64c, offset);
+	h = idma64c_readl(idma64c, offset + 4);
+
+	return l | (h << 32);
+}
+
+static inline void idma64c_writeq(struct idma64_chan *idma64c, int offset,
+				  u64 value)
+{
+	idma64c_writel(idma64c, offset, value);
+	idma64c_writel(idma64c, offset + 4, value >> 32);
+}
+
+#define channel_readq(idma64c, reg)		\
+	idma64c_readq(idma64c, IDMA64_CH_##reg)
+#define channel_writeq(idma64c, reg, value)	\
+	idma64c_writeq(idma64c, IDMA64_CH_##reg, (value))
+
+struct idma64 {
+	struct dma_device dma;
+
+	void __iomem *regs;
+
+	/* channels */
+	unsigned short all_chan_mask;
+	struct idma64_chan *chan;
+};
+
+static inline struct idma64 *to_idma64(struct dma_device *ddev)
+{
+	return container_of(ddev, struct idma64, dma);
+}
+
+static inline u32 idma64_readl(struct idma64 *idma64, int offset)
+{
+	return readl(idma64->regs + offset);
+}
+
+static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
+{
+	writel(value, idma64->regs + offset);
+}
+
+#define dma_readl(idma64, reg)			\
+	idma64_readl(idma64, IDMA64_##reg)
+#define dma_writel(idma64, reg, value)		\
+	idma64_writel(idma64, IDMA64_##reg, (value))
+
+/**
+ * struct idma64_chip - representation of DesignWare DMA controller hardware
+ * @dev:		struct device of the DMA controller
+ * @irq:		irq line
+ * @regs:		memory mapped I/O space
+ * @idma64:		struct idma64 that is filed by idma64_probe()
+ */
+struct idma64_chip {
+	struct device	*dev;
+	int		irq;
+	void __iomem	*regs;
+	struct idma64	*idma64;
+};
+
+#endif /* __DMA_IDMA64_H__ */
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 865501fc..48d85f8 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1083,8 +1083,12 @@
 	if (IS_ERR(imxdma->dma_ahb))
 		return PTR_ERR(imxdma->dma_ahb);
 
-	clk_prepare_enable(imxdma->dma_ipg);
-	clk_prepare_enable(imxdma->dma_ahb);
+	ret = clk_prepare_enable(imxdma->dma_ipg);
+	if (ret)
+		return ret;
+	ret = clk_prepare_enable(imxdma->dma_ahb);
+	if (ret)
+		goto disable_dma_ipg_clk;
 
 	/* reset DMA module */
 	imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
@@ -1094,20 +1098,20 @@
 				       dma_irq_handler, 0, "DMA", imxdma);
 		if (ret) {
 			dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
-			goto err;
+			goto disable_dma_ahb_clk;
 		}
 
 		irq_err = platform_get_irq(pdev, 1);
 		if (irq_err < 0) {
 			ret = irq_err;
-			goto err;
+			goto disable_dma_ahb_clk;
 		}
 
 		ret = devm_request_irq(&pdev->dev, irq_err,
 				       imxdma_err_handler, 0, "DMA", imxdma);
 		if (ret) {
 			dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
-			goto err;
+			goto disable_dma_ahb_clk;
 		}
 	}
 
@@ -1144,7 +1148,7 @@
 				dev_warn(imxdma->dev, "Can't register IRQ %d "
 					 "for DMA channel %d\n",
 					 irq + i, i);
-				goto err;
+				goto disable_dma_ahb_clk;
 			}
 			init_timer(&imxdmac->watchdog);
 			imxdmac->watchdog.function = &imxdma_watchdog;
@@ -1183,14 +1187,14 @@
 
 	platform_set_drvdata(pdev, imxdma);
 
-	imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
+	imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES;
 	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
 	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
 
 	ret = dma_async_device_register(&imxdma->dma_device);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to register\n");
-		goto err;
+		goto disable_dma_ahb_clk;
 	}
 
 	if (pdev->dev.of_node) {
@@ -1206,9 +1210,10 @@
 
 err_of_dma_controller:
 	dma_async_device_unregister(&imxdma->dma_device);
-err:
-	clk_disable_unprepare(imxdma->dma_ipg);
+disable_dma_ahb_clk:
 	clk_disable_unprepare(imxdma->dma_ahb);
+disable_dma_ipg_clk:
+	clk_disable_unprepare(imxdma->dma_ipg);
 	return ret;
 }
 
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 77b6aab..9d375bc 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -35,12 +35,16 @@
 #include <linux/platform_device.h>
 #include <linux/dmaengine.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
 
 #include <asm/irq.h>
 #include <linux/platform_data/dma-imx-sdma.h>
 #include <linux/platform_data/dma-imx.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
 
 #include "dmaengine.h"
 
@@ -124,6 +128,56 @@
 #define CHANGE_ENDIANNESS   0x80
 
 /*
+ *  p_2_p watermark_level description
+ *	Bits		Name			Description
+ *	0-7		Lower WML		Lower watermark level
+ *	8		PS			1: Pad Swallowing
+ *						0: No Pad Swallowing
+ *	9		PA			1: Pad Adding
+ *						0: No Pad Adding
+ *	10		SPDIF			If this bit is set both source
+ *						and destination are on SPBA
+ *	11		Source Bit(SP)		1: Source on SPBA
+ *						0: Source on AIPS
+ *	12		Destination Bit(DP)	1: Destination on SPBA
+ *						0: Destination on AIPS
+ *	13-15		---------		MUST BE 0
+ *	16-23		Higher WML		HWML
+ *	24-27		N			Total number of samples after
+ *						which Pad adding/Swallowing
+ *						must be done. It must be odd.
+ *	28		Lower WML Event(LWE)	SDMA events reg to check for
+ *						LWML event mask
+ *						0: LWE in EVENTS register
+ *						1: LWE in EVENTS2 register
+ *	29		Higher WML Event(HWE)	SDMA events reg to check for
+ *						HWML event mask
+ *						0: HWE in EVENTS register
+ *						1: HWE in EVENTS2 register
+ *	30		---------		MUST BE 0
+ *	31		CONT			1: Amount of samples to be
+ *						transferred is unknown and
+ *						script will keep on
+ *						transferring samples as long as
+ *						both events are detected and
+ *						script must be manually stopped
+ *						by the application
+ *						0: The amount of samples to be
+ *						transferred is equal to the
+ *						count field of mode word
+ */
+#define SDMA_WATERMARK_LEVEL_LWML	0xFF
+#define SDMA_WATERMARK_LEVEL_PS		BIT(8)
+#define SDMA_WATERMARK_LEVEL_PA		BIT(9)
+#define SDMA_WATERMARK_LEVEL_SPDIF	BIT(10)
+#define SDMA_WATERMARK_LEVEL_SP		BIT(11)
+#define SDMA_WATERMARK_LEVEL_DP		BIT(12)
+#define SDMA_WATERMARK_LEVEL_HWML	(0xFF << 16)
+#define SDMA_WATERMARK_LEVEL_LWE	BIT(28)
+#define SDMA_WATERMARK_LEVEL_HWE	BIT(29)
+#define SDMA_WATERMARK_LEVEL_CONT	BIT(31)
+
+/*
  * Mode/Count of data node descriptors - IPCv2
  */
 struct sdma_mode_count {
@@ -259,8 +313,9 @@
 	struct sdma_buffer_descriptor	*bd;
 	dma_addr_t			bd_phys;
 	unsigned int			pc_from_device, pc_to_device;
+	unsigned int			device_to_device;
 	unsigned long			flags;
-	dma_addr_t			per_address;
+	dma_addr_t			per_address, per_address2;
 	unsigned long			event_mask[2];
 	unsigned long			watermark_level;
 	u32				shp_addr, per_addr;
@@ -328,6 +383,8 @@
 	u32				script_number;
 	struct sdma_script_start_addrs	*script_addrs;
 	const struct sdma_driver_data	*drvdata;
+	u32				spba_start_addr;
+	u32				spba_end_addr;
 };
 
 static struct sdma_driver_data sdma_imx31 = {
@@ -705,6 +762,7 @@
 
 	sdmac->pc_from_device = 0;
 	sdmac->pc_to_device = 0;
+	sdmac->device_to_device = 0;
 
 	switch (peripheral_type) {
 	case IMX_DMATYPE_MEMORY:
@@ -780,6 +838,7 @@
 
 	sdmac->pc_from_device = per_2_emi;
 	sdmac->pc_to_device = emi_2_per;
+	sdmac->device_to_device = per_2_per;
 }
 
 static int sdma_load_context(struct sdma_channel *sdmac)
@@ -792,11 +851,12 @@
 	int ret;
 	unsigned long flags;
 
-	if (sdmac->direction == DMA_DEV_TO_MEM) {
+	if (sdmac->direction == DMA_DEV_TO_MEM)
 		load_address = sdmac->pc_from_device;
-	} else {
+	else if (sdmac->direction == DMA_DEV_TO_DEV)
+		load_address = sdmac->device_to_device;
+	else
 		load_address = sdmac->pc_to_device;
-	}
 
 	if (load_address < 0)
 		return load_address;
@@ -851,6 +911,46 @@
 	return 0;
 }
 
+static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
+{
+	struct sdma_engine *sdma = sdmac->sdma;
+
+	int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
+	int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
+
+	set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
+	set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
+
+	if (sdmac->event_id0 > 31)
+		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
+
+	if (sdmac->event_id1 > 31)
+		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
+
+	/*
+	 * If LWML(src_maxburst) > HWML(dst_maxburst), we need
+	 * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
+	 * r0(event_mask[1]) and r1(event_mask[0]).
+	 */
+	if (lwml > hwml) {
+		sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
+						SDMA_WATERMARK_LEVEL_HWML);
+		sdmac->watermark_level |= hwml;
+		sdmac->watermark_level |= lwml << 16;
+		swap(sdmac->event_mask[0], sdmac->event_mask[1]);
+	}
+
+	if (sdmac->per_address2 >= sdma->spba_start_addr &&
+			sdmac->per_address2 <= sdma->spba_end_addr)
+		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
+
+	if (sdmac->per_address >= sdma->spba_start_addr &&
+			sdmac->per_address <= sdma->spba_end_addr)
+		sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
+
+	sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
+}
+
 static int sdma_config_channel(struct dma_chan *chan)
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
@@ -869,6 +969,12 @@
 		sdma_event_enable(sdmac, sdmac->event_id0);
 	}
 
+	if (sdmac->event_id1) {
+		if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
+			return -EINVAL;
+		sdma_event_enable(sdmac, sdmac->event_id1);
+	}
+
 	switch (sdmac->peripheral_type) {
 	case IMX_DMATYPE_DSP:
 		sdma_config_ownership(sdmac, false, true, true);
@@ -887,19 +993,17 @@
 			(sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
 		/* Handle multiple event channels differently */
 		if (sdmac->event_id1) {
-			sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
-			if (sdmac->event_id1 > 31)
-				__set_bit(31, &sdmac->watermark_level);
-			sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
-			if (sdmac->event_id0 > 31)
-				__set_bit(30, &sdmac->watermark_level);
-		} else {
+			if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
+			    sdmac->peripheral_type == IMX_DMATYPE_ASRC)
+				sdma_set_watermarklevel_for_p2p(sdmac);
+		} else
 			__set_bit(sdmac->event_id0, sdmac->event_mask);
-		}
+
 		/* Watermark Level */
 		sdmac->watermark_level |= sdmac->watermark_level;
 		/* Address */
 		sdmac->shp_addr = sdmac->per_address;
+		sdmac->per_addr = sdmac->per_address2;
 	} else {
 		sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
 	}
@@ -987,17 +1091,22 @@
 
 	sdmac->peripheral_type = data->peripheral_type;
 	sdmac->event_id0 = data->dma_request;
+	sdmac->event_id1 = data->dma_request2;
 
-	clk_enable(sdmac->sdma->clk_ipg);
-	clk_enable(sdmac->sdma->clk_ahb);
+	ret = clk_enable(sdmac->sdma->clk_ipg);
+	if (ret)
+		return ret;
+	ret = clk_enable(sdmac->sdma->clk_ahb);
+	if (ret)
+		goto disable_clk_ipg;
 
 	ret = sdma_request_channel(sdmac);
 	if (ret)
-		return ret;
+		goto disable_clk_ahb;
 
 	ret = sdma_set_channel_priority(sdmac, prio);
 	if (ret)
-		return ret;
+		goto disable_clk_ahb;
 
 	dma_async_tx_descriptor_init(&sdmac->desc, chan);
 	sdmac->desc.tx_submit = sdma_tx_submit;
@@ -1005,6 +1114,12 @@
 	sdmac->desc.flags = DMA_CTRL_ACK;
 
 	return 0;
+
+disable_clk_ahb:
+	clk_disable(sdmac->sdma->clk_ahb);
+disable_clk_ipg:
+	clk_disable(sdmac->sdma->clk_ipg);
+	return ret;
 }
 
 static void sdma_free_chan_resources(struct dma_chan *chan)
@@ -1221,6 +1336,14 @@
 		sdmac->watermark_level = dmaengine_cfg->src_maxburst *
 			dmaengine_cfg->src_addr_width;
 		sdmac->word_size = dmaengine_cfg->src_addr_width;
+	} else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
+		sdmac->per_address2 = dmaengine_cfg->src_addr;
+		sdmac->per_address = dmaengine_cfg->dst_addr;
+		sdmac->watermark_level = dmaengine_cfg->src_maxburst &
+			SDMA_WATERMARK_LEVEL_LWML;
+		sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
+			SDMA_WATERMARK_LEVEL_HWML;
+		sdmac->word_size = dmaengine_cfg->dst_addr_width;
 	} else {
 		sdmac->per_address = dmaengine_cfg->dst_addr;
 		sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
@@ -1337,6 +1460,72 @@
 	release_firmware(fw);
 }
 
+#define EVENT_REMAP_CELLS 3
+
+static int __init sdma_event_remap(struct sdma_engine *sdma)
+{
+	struct device_node *np = sdma->dev->of_node;
+	struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
+	struct property *event_remap;
+	struct regmap *gpr;
+	char propname[] = "fsl,sdma-event-remap";
+	u32 reg, val, shift, num_map, i;
+	int ret = 0;
+
+	if (IS_ERR(np) || IS_ERR(gpr_np))
+		goto out;
+
+	event_remap = of_find_property(np, propname, NULL);
+	num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
+	if (!num_map) {
+		dev_warn(sdma->dev, "no event needs to be remapped\n");
+		goto out;
+	} else if (num_map % EVENT_REMAP_CELLS) {
+		dev_err(sdma->dev, "the property %s must modulo %d\n",
+				propname, EVENT_REMAP_CELLS);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	gpr = syscon_node_to_regmap(gpr_np);
+	if (IS_ERR(gpr)) {
+		dev_err(sdma->dev, "failed to get gpr regmap\n");
+		ret = PTR_ERR(gpr);
+		goto out;
+	}
+
+	for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
+		ret = of_property_read_u32_index(np, propname, i, &reg);
+		if (ret) {
+			dev_err(sdma->dev, "failed to read property %s index %d\n",
+					propname, i);
+			goto out;
+		}
+
+		ret = of_property_read_u32_index(np, propname, i + 1, &shift);
+		if (ret) {
+			dev_err(sdma->dev, "failed to read property %s index %d\n",
+					propname, i + 1);
+			goto out;
+		}
+
+		ret = of_property_read_u32_index(np, propname, i + 2, &val);
+		if (ret) {
+			dev_err(sdma->dev, "failed to read property %s index %d\n",
+					propname, i + 2);
+			goto out;
+		}
+
+		regmap_update_bits(gpr, reg, BIT(shift), val << shift);
+	}
+
+out:
+	if (!IS_ERR(gpr_np))
+		of_node_put(gpr_np);
+
+	return ret;
+}
+
 static int sdma_get_firmware(struct sdma_engine *sdma,
 		const char *fw_name)
 {
@@ -1354,8 +1543,12 @@
 	int i, ret;
 	dma_addr_t ccb_phys;
 
-	clk_enable(sdma->clk_ipg);
-	clk_enable(sdma->clk_ahb);
+	ret = clk_enable(sdma->clk_ipg);
+	if (ret)
+		return ret;
+	ret = clk_enable(sdma->clk_ahb);
+	if (ret)
+		goto disable_clk_ipg;
 
 	/* Be sure SDMA has not started yet */
 	writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
@@ -1411,8 +1604,9 @@
 	return 0;
 
 err_dma_alloc:
-	clk_disable(sdma->clk_ipg);
 	clk_disable(sdma->clk_ahb);
+disable_clk_ipg:
+	clk_disable(sdma->clk_ipg);
 	dev_err(sdma->dev, "initialisation failed with %d\n", ret);
 	return ret;
 }
@@ -1444,6 +1638,14 @@
 	data.dma_request = dma_spec->args[0];
 	data.peripheral_type = dma_spec->args[1];
 	data.priority = dma_spec->args[2];
+	/*
+	 * init dma_request2 to zero, which is not used by the dts.
+	 * For P2P, dma_request2 is init from dma_request_channel(),
+	 * chan->private will point to the imx_dma_data, and in
+	 * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
+	 * be set to sdmac->event_id1.
+	 */
+	data.dma_request2 = 0;
 
 	return dma_request_channel(mask, sdma_filter_fn, &data);
 }
@@ -1453,10 +1655,12 @@
 	const struct of_device_id *of_id =
 			of_match_device(sdma_dt_ids, &pdev->dev);
 	struct device_node *np = pdev->dev.of_node;
+	struct device_node *spba_bus;
 	const char *fw_name;
 	int ret;
 	int irq;
 	struct resource *iores;
+	struct resource spba_res;
 	struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	int i;
 	struct sdma_engine *sdma;
@@ -1551,6 +1755,10 @@
 	if (ret)
 		goto err_init;
 
+	ret = sdma_event_remap(sdma);
+	if (ret)
+		goto err_init;
+
 	if (sdma->drvdata->script_addrs)
 		sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
 	if (pdata && pdata->script_addrs)
@@ -1608,6 +1816,14 @@
 			dev_err(&pdev->dev, "failed to register controller\n");
 			goto err_register;
 		}
+
+		spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
+		ret = of_address_to_resource(spba_bus, 0, &spba_res);
+		if (!ret) {
+			sdma->spba_start_addr = spba_res.start;
+			sdma->spba_end_addr = spba_res.end;
+		}
+		of_node_put(spba_bus);
 	}
 
 	dev_info(sdma->dev, "initialized\n");
diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile
index 0ff7270..cf5fedb 100644
--- a/drivers/dma/ioat/Makefile
+++ b/drivers/dma/ioat/Makefile
@@ -1,2 +1,2 @@
 obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
-ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o
+ioatdma-y := init.o dma.o prep.o dca.o sysfs.o
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index ea1e107..2cb7c30 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -31,7 +31,6 @@
 
 #include "dma.h"
 #include "registers.h"
-#include "dma_v2.h"
 
 /*
  * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
@@ -71,14 +70,6 @@
 #define APICID_BIT(x)		(DCA_TAG_MAP_VALID | (x))
 #define IOAT_TAG_MAP_LEN	8
 
-static u8 ioat_tag_map_BNB[IOAT_TAG_MAP_LEN] = {
-	1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
-static u8 ioat_tag_map_SCNB[IOAT_TAG_MAP_LEN] = {
-	1, APICID_BIT(1), APICID_BIT(2), APICID_BIT(2), };
-static u8 ioat_tag_map_CNB[IOAT_TAG_MAP_LEN] = {
-	1, APICID_BIT(1), APICID_BIT(3), APICID_BIT(4), APICID_BIT(2), };
-static u8 ioat_tag_map_UNISYS[IOAT_TAG_MAP_LEN] = { 0 };
-
 /* pack PCI B/D/F into a u16 */
 static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
 {
@@ -126,96 +117,6 @@
 	struct ioat_dca_slot 	 req_slots[0];
 };
 
-/* 5000 series chipset DCA Port Requester ID Table Entry Format
- * [15:8]	PCI-Express Bus Number
- * [7:3]	PCI-Express Device Number
- * [2:0]	PCI-Express Function Number
- *
- * 5000 series chipset DCA control register format
- * [7:1]	Reserved (0)
- * [0]		Ignore Function Number
- */
-
-static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	struct pci_dev *pdev;
-	int i;
-	u16 id;
-
-	/* This implementation only supports PCI-Express */
-	if (!dev_is_pci(dev))
-		return -ENODEV;
-	pdev = to_pci_dev(dev);
-	id = dcaid_from_pcidev(pdev);
-
-	if (ioatdca->requester_count == ioatdca->max_requesters)
-		return -ENODEV;
-
-	for (i = 0; i < ioatdca->max_requesters; i++) {
-		if (ioatdca->req_slots[i].pdev == NULL) {
-			/* found an empty slot */
-			ioatdca->requester_count++;
-			ioatdca->req_slots[i].pdev = pdev;
-			ioatdca->req_slots[i].rid = id;
-			writew(id, ioatdca->dca_base + (i * 4));
-			/* make sure the ignore function bit is off */
-			writeb(0, ioatdca->dca_base + (i * 4) + 2);
-			return i;
-		}
-	}
-	/* Error, ioatdma->requester_count is out of whack */
-	return -EFAULT;
-}
-
-static int ioat_dca_remove_requester(struct dca_provider *dca,
-				     struct device *dev)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	struct pci_dev *pdev;
-	int i;
-
-	/* This implementation only supports PCI-Express */
-	if (!dev_is_pci(dev))
-		return -ENODEV;
-	pdev = to_pci_dev(dev);
-
-	for (i = 0; i < ioatdca->max_requesters; i++) {
-		if (ioatdca->req_slots[i].pdev == pdev) {
-			writew(0, ioatdca->dca_base + (i * 4));
-			ioatdca->req_slots[i].pdev = NULL;
-			ioatdca->req_slots[i].rid = 0;
-			ioatdca->requester_count--;
-			return i;
-		}
-	}
-	return -ENODEV;
-}
-
-static u8 ioat_dca_get_tag(struct dca_provider *dca,
-			   struct device *dev,
-			   int cpu)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	int i, apic_id, bit, value;
-	u8 entry, tag;
-
-	tag = 0;
-	apic_id = cpu_physical_id(cpu);
-
-	for (i = 0; i < IOAT_TAG_MAP_LEN; i++) {
-		entry = ioatdca->tag_map[i];
-		if (entry & DCA_TAG_MAP_VALID) {
-			bit = entry & ~DCA_TAG_MAP_VALID;
-			value = (apic_id & (1 << bit)) ? 1 : 0;
-		} else {
-			value = entry ? 1 : 0;
-		}
-		tag |= (value << i);
-	}
-	return tag;
-}
-
 static int ioat_dca_dev_managed(struct dca_provider *dca,
 				struct device *dev)
 {
@@ -231,260 +132,7 @@
 	return 0;
 }
 
-static struct dca_ops ioat_dca_ops = {
-	.add_requester		= ioat_dca_add_requester,
-	.remove_requester	= ioat_dca_remove_requester,
-	.get_tag		= ioat_dca_get_tag,
-	.dev_managed		= ioat_dca_dev_managed,
-};
-
-
-struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
-{
-	struct dca_provider *dca;
-	struct ioat_dca_priv *ioatdca;
-	u8 *tag_map = NULL;
-	int i;
-	int err;
-	u8 version;
-	u8 max_requesters;
-
-	if (!system_has_dca_enabled(pdev))
-		return NULL;
-
-	/* I/OAT v1 systems must have a known tag_map to support DCA */
-	switch (pdev->vendor) {
-	case PCI_VENDOR_ID_INTEL:
-		switch (pdev->device) {
-		case PCI_DEVICE_ID_INTEL_IOAT:
-			tag_map = ioat_tag_map_BNB;
-			break;
-		case PCI_DEVICE_ID_INTEL_IOAT_CNB:
-			tag_map = ioat_tag_map_CNB;
-			break;
-		case PCI_DEVICE_ID_INTEL_IOAT_SCNB:
-			tag_map = ioat_tag_map_SCNB;
-			break;
-		}
-		break;
-	case PCI_VENDOR_ID_UNISYS:
-		switch (pdev->device) {
-		case PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR:
-			tag_map = ioat_tag_map_UNISYS;
-			break;
-		}
-		break;
-	}
-	if (tag_map == NULL)
-		return NULL;
-
-	version = readb(iobase + IOAT_VER_OFFSET);
-	if (version == IOAT_VER_3_0)
-		max_requesters = IOAT3_DCA_MAX_REQ;
-	else
-		max_requesters = IOAT_DCA_MAX_REQ;
-
-	dca = alloc_dca_provider(&ioat_dca_ops,
-			sizeof(*ioatdca) +
-			(sizeof(struct ioat_dca_slot) * max_requesters));
-	if (!dca)
-		return NULL;
-
-	ioatdca = dca_priv(dca);
-	ioatdca->max_requesters = max_requesters;
-	ioatdca->dca_base = iobase + 0x54;
-
-	/* copy over the APIC ID to DCA tag mapping */
-	for (i = 0; i < IOAT_TAG_MAP_LEN; i++)
-		ioatdca->tag_map[i] = tag_map[i];
-
-	err = register_dca_provider(dca, &pdev->dev);
-	if (err) {
-		free_dca_provider(dca);
-		return NULL;
-	}
-
-	return dca;
-}
-
-
-static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	struct pci_dev *pdev;
-	int i;
-	u16 id;
-	u16 global_req_table;
-
-	/* This implementation only supports PCI-Express */
-	if (!dev_is_pci(dev))
-		return -ENODEV;
-	pdev = to_pci_dev(dev);
-	id = dcaid_from_pcidev(pdev);
-
-	if (ioatdca->requester_count == ioatdca->max_requesters)
-		return -ENODEV;
-
-	for (i = 0; i < ioatdca->max_requesters; i++) {
-		if (ioatdca->req_slots[i].pdev == NULL) {
-			/* found an empty slot */
-			ioatdca->requester_count++;
-			ioatdca->req_slots[i].pdev = pdev;
-			ioatdca->req_slots[i].rid = id;
-			global_req_table =
-			      readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
-			writel(id | IOAT_DCA_GREQID_VALID,
-			       ioatdca->iobase + global_req_table + (i * 4));
-			return i;
-		}
-	}
-	/* Error, ioatdma->requester_count is out of whack */
-	return -EFAULT;
-}
-
-static int ioat2_dca_remove_requester(struct dca_provider *dca,
-				      struct device *dev)
-{
-	struct ioat_dca_priv *ioatdca = dca_priv(dca);
-	struct pci_dev *pdev;
-	int i;
-	u16 global_req_table;
-
-	/* This implementation only supports PCI-Express */
-	if (!dev_is_pci(dev))
-		return -ENODEV;
-	pdev = to_pci_dev(dev);
-
-	for (i = 0; i < ioatdca->max_requesters; i++) {
-		if (ioatdca->req_slots[i].pdev == pdev) {
-			global_req_table =
-			      readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
-			writel(0, ioatdca->iobase + global_req_table + (i * 4));
-			ioatdca->req_slots[i].pdev = NULL;
-			ioatdca->req_slots[i].rid = 0;
-			ioatdca->requester_count--;
-			return i;
-		}
-	}
-	return -ENODEV;
-}
-
-static u8 ioat2_dca_get_tag(struct dca_provider *dca,
-			    struct device *dev,
-			    int cpu)
-{
-	u8 tag;
-
-	tag = ioat_dca_get_tag(dca, dev, cpu);
-	tag = (~tag) & 0x1F;
-	return tag;
-}
-
-static struct dca_ops ioat2_dca_ops = {
-	.add_requester		= ioat2_dca_add_requester,
-	.remove_requester	= ioat2_dca_remove_requester,
-	.get_tag		= ioat2_dca_get_tag,
-	.dev_managed		= ioat_dca_dev_managed,
-};
-
-static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset)
-{
-	int slots = 0;
-	u32 req;
-	u16 global_req_table;
-
-	global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
-	if (global_req_table == 0)
-		return 0;
-	do {
-		req = readl(iobase + global_req_table + (slots * sizeof(u32)));
-		slots++;
-	} while ((req & IOAT_DCA_GREQID_LASTID) == 0);
-
-	return slots;
-}
-
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
-{
-	struct dca_provider *dca;
-	struct ioat_dca_priv *ioatdca;
-	int slots;
-	int i;
-	int err;
-	u32 tag_map;
-	u16 dca_offset;
-	u16 csi_fsb_control;
-	u16 pcie_control;
-	u8 bit;
-
-	if (!system_has_dca_enabled(pdev))
-		return NULL;
-
-	dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
-	if (dca_offset == 0)
-		return NULL;
-
-	slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
-	if (slots == 0)
-		return NULL;
-
-	dca = alloc_dca_provider(&ioat2_dca_ops,
-				 sizeof(*ioatdca)
-				      + (sizeof(struct ioat_dca_slot) * slots));
-	if (!dca)
-		return NULL;
-
-	ioatdca = dca_priv(dca);
-	ioatdca->iobase = iobase;
-	ioatdca->dca_base = iobase + dca_offset;
-	ioatdca->max_requesters = slots;
-
-	/* some bios might not know to turn these on */
-	csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
-	if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
-		csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
-		writew(csi_fsb_control,
-		       ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
-	}
-	pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
-	if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
-		pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
-		writew(pcie_control,
-		       ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
-	}
-
-
-	/* TODO version, compatibility and configuration checks */
-
-	/* copy out the APIC to DCA tag map */
-	tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
-	for (i = 0; i < 5; i++) {
-		bit = (tag_map >> (4 * i)) & 0x0f;
-		if (bit < 8)
-			ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
-		else
-			ioatdca->tag_map[i] = 0;
-	}
-
-	if (!dca2_tag_map_valid(ioatdca->tag_map)) {
-		WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
-				"%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
-				dev_driver_string(&pdev->dev),
-				dev_name(&pdev->dev));
-		free_dca_provider(dca);
-		return NULL;
-	}
-
-	err = register_dca_provider(dca, &pdev->dev);
-	if (err) {
-		free_dca_provider(dca);
-		return NULL;
-	}
-
-	return dca;
-}
-
-static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
+static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
 {
 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
 	struct pci_dev *pdev;
@@ -518,7 +166,7 @@
 	return -EFAULT;
 }
 
-static int ioat3_dca_remove_requester(struct dca_provider *dca,
+static int ioat_dca_remove_requester(struct dca_provider *dca,
 				      struct device *dev)
 {
 	struct ioat_dca_priv *ioatdca = dca_priv(dca);
@@ -545,7 +193,7 @@
 	return -ENODEV;
 }
 
-static u8 ioat3_dca_get_tag(struct dca_provider *dca,
+static u8 ioat_dca_get_tag(struct dca_provider *dca,
 			    struct device *dev,
 			    int cpu)
 {
@@ -576,14 +224,14 @@
 	return tag;
 }
 
-static struct dca_ops ioat3_dca_ops = {
-	.add_requester		= ioat3_dca_add_requester,
-	.remove_requester	= ioat3_dca_remove_requester,
-	.get_tag		= ioat3_dca_get_tag,
+static struct dca_ops ioat_dca_ops = {
+	.add_requester		= ioat_dca_add_requester,
+	.remove_requester	= ioat_dca_remove_requester,
+	.get_tag		= ioat_dca_get_tag,
 	.dev_managed		= ioat_dca_dev_managed,
 };
 
-static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
+static int ioat_dca_count_dca_slots(void *iobase, u16 dca_offset)
 {
 	int slots = 0;
 	u32 req;
@@ -618,7 +266,7 @@
 		(tag_map[4] == DCA_TAG_MAP_VALID));
 }
 
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
 {
 	struct dca_provider *dca;
 	struct ioat_dca_priv *ioatdca;
@@ -645,11 +293,11 @@
 	if (dca_offset == 0)
 		return NULL;
 
-	slots = ioat3_dca_count_dca_slots(iobase, dca_offset);
+	slots = ioat_dca_count_dca_slots(iobase, dca_offset);
 	if (slots == 0)
 		return NULL;
 
-	dca = alloc_dca_provider(&ioat3_dca_ops,
+	dca = alloc_dca_provider(&ioat_dca_ops,
 				 sizeof(*ioatdca)
 				      + (sizeof(struct ioat_dca_slot) * slots));
 	if (!dca)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index ee0aa9f..f66b7e6 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -1,6 +1,6 @@
 /*
  * Intel I/OAT DMA Linux driver
- * Copyright(c) 2004 - 2009 Intel Corporation.
+ * Copyright(c) 2004 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -31,31 +31,23 @@
 #include <linux/dma-mapping.h>
 #include <linux/workqueue.h>
 #include <linux/prefetch.h>
-#include <linux/i7300_idle.h>
 #include "dma.h"
 #include "registers.h"
 #include "hw.h"
 
 #include "../dmaengine.h"
 
-int ioat_pending_level = 4;
-module_param(ioat_pending_level, int, 0644);
-MODULE_PARM_DESC(ioat_pending_level,
-		 "high-water mark for pushing ioat descriptors (default: 4)");
-
-/* internal functions */
-static void ioat1_cleanup(struct ioat_dma_chan *ioat);
-static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
+static void ioat_eh(struct ioatdma_chan *ioat_chan);
 
 /**
  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
  * @irq: interrupt id
  * @data: interrupt data
  */
-static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
+irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
 {
 	struct ioatdma_device *instance = data;
-	struct ioat_chan_common *chan;
+	struct ioatdma_chan *ioat_chan;
 	unsigned long attnstatus;
 	int bit;
 	u8 intrctrl;
@@ -72,9 +64,9 @@
 
 	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
 	for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
-		chan = ioat_chan_by_index(instance, bit);
-		if (test_bit(IOAT_RUN, &chan->state))
-			tasklet_schedule(&chan->cleanup_task);
+		ioat_chan = ioat_chan_by_index(instance, bit);
+		if (test_bit(IOAT_RUN, &ioat_chan->state))
+			tasklet_schedule(&ioat_chan->cleanup_task);
 	}
 
 	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -86,294 +78,32 @@
  * @irq: interrupt id
  * @data: interrupt data
  */
-static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
+irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
 {
-	struct ioat_chan_common *chan = data;
+	struct ioatdma_chan *ioat_chan = data;
 
-	if (test_bit(IOAT_RUN, &chan->state))
-		tasklet_schedule(&chan->cleanup_task);
+	if (test_bit(IOAT_RUN, &ioat_chan->state))
+		tasklet_schedule(&ioat_chan->cleanup_task);
 
 	return IRQ_HANDLED;
 }
 
-/* common channel initialization */
-void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
+void ioat_stop(struct ioatdma_chan *ioat_chan)
 {
-	struct dma_device *dma = &device->common;
-	struct dma_chan *c = &chan->common;
-	unsigned long data = (unsigned long) c;
-
-	chan->device = device;
-	chan->reg_base = device->reg_base + (0x80 * (idx + 1));
-	spin_lock_init(&chan->cleanup_lock);
-	chan->common.device = dma;
-	dma_cookie_init(&chan->common);
-	list_add_tail(&chan->common.device_node, &dma->channels);
-	device->idx[idx] = chan;
-	init_timer(&chan->timer);
-	chan->timer.function = device->timer_fn;
-	chan->timer.data = data;
-	tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
-}
-
-/**
- * ioat1_dma_enumerate_channels - find and initialize the device's channels
- * @device: the device to be enumerated
- */
-static int ioat1_enumerate_channels(struct ioatdma_device *device)
-{
-	u8 xfercap_scale;
-	u32 xfercap;
-	int i;
-	struct ioat_dma_chan *ioat;
-	struct device *dev = &device->pdev->dev;
-	struct dma_device *dma = &device->common;
-
-	INIT_LIST_HEAD(&dma->channels);
-	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
-	dma->chancnt &= 0x1f; /* bits [4:0] valid */
-	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
-		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
-			 dma->chancnt, ARRAY_SIZE(device->idx));
-		dma->chancnt = ARRAY_SIZE(device->idx);
-	}
-	xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
-	xfercap_scale &= 0x1f; /* bits [4:0] valid */
-	xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
-	dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
-
-#ifdef  CONFIG_I7300_IDLE_IOAT_CHANNEL
-	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
-		dma->chancnt--;
-#endif
-	for (i = 0; i < dma->chancnt; i++) {
-		ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
-		if (!ioat)
-			break;
-
-		ioat_init_channel(device, &ioat->base, i);
-		ioat->xfercap = xfercap;
-		spin_lock_init(&ioat->desc_lock);
-		INIT_LIST_HEAD(&ioat->free_desc);
-		INIT_LIST_HEAD(&ioat->used_desc);
-	}
-	dma->chancnt = i;
-	return i;
-}
-
-/**
- * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
- *                                 descriptors to hw
- * @chan: DMA channel handle
- */
-static inline void
-__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
-{
-	void __iomem *reg_base = ioat->base.reg_base;
-
-	dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
-		__func__, ioat->pending);
-	ioat->pending = 0;
-	writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
-}
-
-static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan(chan);
-
-	if (ioat->pending > 0) {
-		spin_lock_bh(&ioat->desc_lock);
-		__ioat1_dma_memcpy_issue_pending(ioat);
-		spin_unlock_bh(&ioat->desc_lock);
-	}
-}
-
-/**
- * ioat1_reset_channel - restart a channel
- * @ioat: IOAT DMA channel handle
- */
-static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	void __iomem *reg_base = chan->reg_base;
-	u32 chansts, chanerr;
-
-	dev_warn(to_dev(chan), "reset\n");
-	chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
-	chansts = *chan->completion & IOAT_CHANSTS_STATUS;
-	if (chanerr) {
-		dev_err(to_dev(chan),
-			"chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
-			chan_num(chan), chansts, chanerr);
-		writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
-	}
-
-	/*
-	 * whack it upside the head with a reset
-	 * and wait for things to settle out.
-	 * force the pending count to a really big negative
-	 * to make sure no one forces an issue_pending
-	 * while we're waiting.
-	 */
-
-	ioat->pending = INT_MIN;
-	writeb(IOAT_CHANCMD_RESET,
-	       reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
-	set_bit(IOAT_RESET_PENDING, &chan->state);
-	mod_timer(&chan->timer, jiffies + RESET_DELAY);
-}
-
-static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
-{
-	struct dma_chan *c = tx->chan;
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-	struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioat_desc_sw *first;
-	struct ioat_desc_sw *chain_tail;
-	dma_cookie_t cookie;
-
-	spin_lock_bh(&ioat->desc_lock);
-	/* cookie incr and addition to used_list must be atomic */
-	cookie = dma_cookie_assign(tx);
-	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
-
-	/* write address into NextDescriptor field of last desc in chain */
-	first = to_ioat_desc(desc->tx_list.next);
-	chain_tail = to_ioat_desc(ioat->used_desc.prev);
-	/* make descriptor updates globally visible before chaining */
-	wmb();
-	chain_tail->hw->next = first->txd.phys;
-	list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
-	dump_desc_dbg(ioat, chain_tail);
-	dump_desc_dbg(ioat, first);
-
-	if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	ioat->active += desc->hw->tx_cnt;
-	ioat->pending += desc->hw->tx_cnt;
-	if (ioat->pending >= ioat_pending_level)
-		__ioat1_dma_memcpy_issue_pending(ioat);
-	spin_unlock_bh(&ioat->desc_lock);
-
-	return cookie;
-}
-
-/**
- * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
- * @ioat: the channel supplying the memory pool for the descriptors
- * @flags: allocation flags
- */
-static struct ioat_desc_sw *
-ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
-{
-	struct ioat_dma_descriptor *desc;
-	struct ioat_desc_sw *desc_sw;
-	struct ioatdma_device *ioatdma_device;
-	dma_addr_t phys;
-
-	ioatdma_device = ioat->base.device;
-	desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
-	if (unlikely(!desc))
-		return NULL;
-
-	desc_sw = kzalloc(sizeof(*desc_sw), flags);
-	if (unlikely(!desc_sw)) {
-		pci_pool_free(ioatdma_device->dma_pool, desc, phys);
-		return NULL;
-	}
-
-	memset(desc, 0, sizeof(*desc));
-
-	INIT_LIST_HEAD(&desc_sw->tx_list);
-	dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
-	desc_sw->txd.tx_submit = ioat1_tx_submit;
-	desc_sw->hw = desc;
-	desc_sw->txd.phys = phys;
-	set_desc_id(desc_sw, -1);
-
-	return desc_sw;
-}
-
-static int ioat_initial_desc_count = 256;
-module_param(ioat_initial_desc_count, int, 0644);
-MODULE_PARM_DESC(ioat_initial_desc_count,
-		 "ioat1: initial descriptors per channel (default: 256)");
-/**
- * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
- * @chan: the channel to be filled out
- */
-static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioat_desc_sw *desc;
-	u32 chanerr;
-	int i;
-	LIST_HEAD(tmp_list);
-
-	/* have we already been set up? */
-	if (!list_empty(&ioat->free_desc))
-		return ioat->desccount;
-
-	/* Setup register to interrupt and write completion status on error */
-	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
-
-	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-	if (chanerr) {
-		dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
-		writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-	}
-
-	/* Allocate descriptors */
-	for (i = 0; i < ioat_initial_desc_count; i++) {
-		desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
-		if (!desc) {
-			dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
-			break;
-		}
-		set_desc_id(desc, i);
-		list_add_tail(&desc->node, &tmp_list);
-	}
-	spin_lock_bh(&ioat->desc_lock);
-	ioat->desccount = i;
-	list_splice(&tmp_list, &ioat->free_desc);
-	spin_unlock_bh(&ioat->desc_lock);
-
-	/* allocate a completion writeback area */
-	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
-	chan->completion = pci_pool_alloc(chan->device->completion_pool,
-					  GFP_KERNEL, &chan->completion_dma);
-	memset(chan->completion, 0, sizeof(*chan->completion));
-	writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
-	       chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
-	writel(((u64) chan->completion_dma) >> 32,
-	       chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
-	set_bit(IOAT_RUN, &chan->state);
-	ioat1_dma_start_null_desc(ioat);  /* give chain to dma device */
-	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
-		__func__, ioat->desccount);
-	return ioat->desccount;
-}
-
-void ioat_stop(struct ioat_chan_common *chan)
-{
-	struct ioatdma_device *device = chan->device;
-	struct pci_dev *pdev = device->pdev;
-	int chan_id = chan_num(chan);
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct pci_dev *pdev = ioat_dma->pdev;
+	int chan_id = chan_num(ioat_chan);
 	struct msix_entry *msix;
 
 	/* 1/ stop irq from firing tasklets
 	 * 2/ stop the tasklet from re-arming irqs
 	 */
-	clear_bit(IOAT_RUN, &chan->state);
+	clear_bit(IOAT_RUN, &ioat_chan->state);
 
 	/* flush inflight interrupts */
-	switch (device->irq_mode) {
+	switch (ioat_dma->irq_mode) {
 	case IOAT_MSIX:
-		msix = &device->msix_entries[chan_id];
+		msix = &ioat_dma->msix_entries[chan_id];
 		synchronize_irq(msix->vector);
 		break;
 	case IOAT_MSI:
@@ -385,398 +115,67 @@
 	}
 
 	/* flush inflight timers */
-	del_timer_sync(&chan->timer);
+	del_timer_sync(&ioat_chan->timer);
 
 	/* flush inflight tasklet runs */
-	tasklet_kill(&chan->cleanup_task);
+	tasklet_kill(&ioat_chan->cleanup_task);
 
 	/* final cleanup now that everything is quiesced and can't re-arm */
-	device->cleanup_fn((unsigned long) &chan->common);
+	ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
 }
 
-/**
- * ioat1_dma_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
- */
-static void ioat1_dma_free_chan_resources(struct dma_chan *c)
+static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
 {
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *ioatdma_device = chan->device;
-	struct ioat_desc_sw *desc, *_desc;
-	int in_use_descs = 0;
+	ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
+	ioat_chan->issued = ioat_chan->head;
+	writew(ioat_chan->dmacount,
+	       ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+	dev_dbg(to_dev(ioat_chan),
+		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail,
+		ioat_chan->issued, ioat_chan->dmacount);
+}
 
-	/* Before freeing channel resources first check
-	 * if they have been previously allocated for this channel.
-	 */
-	if (ioat->desccount == 0)
-		return;
+void ioat_issue_pending(struct dma_chan *c)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
 
-	ioat_stop(chan);
-
-	/* Delay 100ms after reset to allow internal DMA logic to quiesce
-	 * before removing DMA descriptor resources.
-	 */
-	writeb(IOAT_CHANCMD_RESET,
-	       chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
-	mdelay(100);
-
-	spin_lock_bh(&ioat->desc_lock);
-	list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
-		dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
-			__func__, desc_id(desc));
-		dump_desc_dbg(ioat, desc);
-		in_use_descs++;
-		list_del(&desc->node);
-		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-			      desc->txd.phys);
-		kfree(desc);
+	if (ioat_ring_pending(ioat_chan)) {
+		spin_lock_bh(&ioat_chan->prep_lock);
+		__ioat_issue_pending(ioat_chan);
+		spin_unlock_bh(&ioat_chan->prep_lock);
 	}
-	list_for_each_entry_safe(desc, _desc,
-				 &ioat->free_desc, node) {
-		list_del(&desc->node);
-		pci_pool_free(ioatdma_device->dma_pool, desc->hw,
-			      desc->txd.phys);
-		kfree(desc);
-	}
-	spin_unlock_bh(&ioat->desc_lock);
-
-	pci_pool_free(ioatdma_device->completion_pool,
-		      chan->completion,
-		      chan->completion_dma);
-
-	/* one is ok since we left it on there on purpose */
-	if (in_use_descs > 1)
-		dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
-			in_use_descs - 1);
-
-	chan->last_completion = 0;
-	chan->completion_dma = 0;
-	ioat->pending = 0;
-	ioat->desccount = 0;
 }
 
 /**
- * ioat1_dma_get_next_descriptor - return the next available descriptor
- * @ioat: IOAT DMA channel handle
+ * ioat_update_pending - log pending descriptors
+ * @ioat: ioat+ channel
  *
- * Gets the next descriptor from the chain, and must be called with the
- * channel's desc_lock held.  Allocates more descriptors if the channel
- * has run out.
+ * Check if the number of unsubmitted descriptors has exceeded the
+ * watermark.  Called with prep_lock held
  */
-static struct ioat_desc_sw *
-ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
+static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
 {
-	struct ioat_desc_sw *new;
-
-	if (!list_empty(&ioat->free_desc)) {
-		new = to_ioat_desc(ioat->free_desc.next);
-		list_del(&new->node);
-	} else {
-		/* try to get another desc */
-		new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
-		if (!new) {
-			dev_err(to_dev(&ioat->base), "alloc failed\n");
-			return NULL;
-		}
-	}
-	dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
-		__func__, desc_id(new));
-	prefetch(new->hw);
-	return new;
+	if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
+		__ioat_issue_pending(ioat_chan);
 }
 
-static struct dma_async_tx_descriptor *
-ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
-		      dma_addr_t dma_src, size_t len, unsigned long flags)
+static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
 {
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-	struct ioat_desc_sw *desc;
-	size_t copy;
-	LIST_HEAD(chain);
-	dma_addr_t src = dma_src;
-	dma_addr_t dest = dma_dest;
-	size_t total_len = len;
-	struct ioat_dma_descriptor *hw = NULL;
-	int tx_cnt = 0;
-
-	spin_lock_bh(&ioat->desc_lock);
-	desc = ioat1_dma_get_next_descriptor(ioat);
-	do {
-		if (!desc)
-			break;
-
-		tx_cnt++;
-		copy = min_t(size_t, len, ioat->xfercap);
-
-		hw = desc->hw;
-		hw->size = copy;
-		hw->ctl = 0;
-		hw->src_addr = src;
-		hw->dst_addr = dest;
-
-		list_add_tail(&desc->node, &chain);
-
-		len -= copy;
-		dest += copy;
-		src += copy;
-		if (len) {
-			struct ioat_desc_sw *next;
-
-			async_tx_ack(&desc->txd);
-			next = ioat1_dma_get_next_descriptor(ioat);
-			hw->next = next ? next->txd.phys : 0;
-			dump_desc_dbg(ioat, desc);
-			desc = next;
-		} else
-			hw->next = 0;
-	} while (len);
-
-	if (!desc) {
-		struct ioat_chan_common *chan = &ioat->base;
-
-		dev_err(to_dev(chan),
-			"chan%d - get_next_desc failed\n", chan_num(chan));
-		list_splice(&chain, &ioat->free_desc);
-		spin_unlock_bh(&ioat->desc_lock);
-		return NULL;
-	}
-	spin_unlock_bh(&ioat->desc_lock);
-
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	list_splice(&chain, &desc->tx_list);
-	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	hw->ctl_f.compl_write = 1;
-	hw->tx_cnt = tx_cnt;
-	dump_desc_dbg(ioat, desc);
-
-	return &desc->txd;
-}
-
-static void ioat1_cleanup_event(unsigned long data)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-
-	ioat1_cleanup(ioat);
-	if (!test_bit(IOAT_RUN, &chan->state))
-		return;
-	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
-{
-	dma_addr_t phys_complete;
-	u64 completion;
-
-	completion = *chan->completion;
-	phys_complete = ioat_chansts_to_addr(completion);
-
-	dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
-		(unsigned long long) phys_complete);
-
-	if (is_ioat_halted(completion)) {
-		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-		dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
-			chanerr);
-
-		/* TODO do something to salvage the situation */
-	}
-
-	return phys_complete;
-}
-
-bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
-			   dma_addr_t *phys_complete)
-{
-	*phys_complete = ioat_get_current_completion(chan);
-	if (*phys_complete == chan->last_completion)
-		return false;
-	clear_bit(IOAT_COMPLETION_ACK, &chan->state);
-	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	return true;
-}
-
-static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct list_head *_desc, *n;
-	struct dma_async_tx_descriptor *tx;
-
-	dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
-		 __func__, (unsigned long long) phys_complete);
-	list_for_each_safe(_desc, n, &ioat->used_desc) {
-		struct ioat_desc_sw *desc;
-
-		prefetch(n);
-		desc = list_entry(_desc, typeof(*desc), node);
-		tx = &desc->txd;
-		/*
-		 * Incoming DMA requests may use multiple descriptors,
-		 * due to exceeding xfercap, perhaps. If so, only the
-		 * last one will have a cookie, and require unmapping.
-		 */
-		dump_desc_dbg(ioat, desc);
-		if (tx->cookie) {
-			dma_cookie_complete(tx);
-			dma_descriptor_unmap(tx);
-			ioat->active -= desc->hw->tx_cnt;
-			if (tx->callback) {
-				tx->callback(tx->callback_param);
-				tx->callback = NULL;
-			}
-		}
-
-		if (tx->phys != phys_complete) {
-			/*
-			 * a completed entry, but not the last, so clean
-			 * up if the client is done with the descriptor
-			 */
-			if (async_tx_test_ack(tx))
-				list_move_tail(&desc->node, &ioat->free_desc);
-		} else {
-			/*
-			 * last used desc. Do not remove, so we can
-			 * append from it.
-			 */
-
-			/* if nothing else is pending, cancel the
-			 * completion timeout
-			 */
-			if (n == &ioat->used_desc) {
-				dev_dbg(to_dev(chan),
-					"%s cancel completion timeout\n",
-					__func__);
-				clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
-			}
-
-			/* TODO check status bits? */
-			break;
-		}
-	}
-
-	chan->last_completion = phys_complete;
-}
-
-/**
- * ioat1_cleanup - cleanup up finished descriptors
- * @chan: ioat channel to be cleaned up
- *
- * To prevent lock contention we defer cleanup when the locks are
- * contended with a terminal timeout that forces cleanup and catches
- * completion notification errors.
- */
-static void ioat1_cleanup(struct ioat_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-
-	prefetch(chan->completion);
-
-	if (!spin_trylock_bh(&chan->cleanup_lock))
-		return;
-
-	if (!ioat_cleanup_preamble(chan, &phys_complete)) {
-		spin_unlock_bh(&chan->cleanup_lock);
-		return;
-	}
-
-	if (!spin_trylock_bh(&ioat->desc_lock)) {
-		spin_unlock_bh(&chan->cleanup_lock);
-		return;
-	}
-
-	__cleanup(ioat, phys_complete);
-
-	spin_unlock_bh(&ioat->desc_lock);
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static void ioat1_timer_event(unsigned long data)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-
-	dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
-
-	spin_lock_bh(&chan->cleanup_lock);
-	if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
-		struct ioat_desc_sw *desc;
-
-		spin_lock_bh(&ioat->desc_lock);
-
-		/* restart active descriptors */
-		desc = to_ioat_desc(ioat->used_desc.prev);
-		ioat_set_chainaddr(ioat, desc->txd.phys);
-		ioat_start(chan);
-
-		ioat->pending = 0;
-		set_bit(IOAT_COMPLETION_PENDING, &chan->state);
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		spin_unlock_bh(&ioat->desc_lock);
-	} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
-		dma_addr_t phys_complete;
-
-		spin_lock_bh(&ioat->desc_lock);
-		/* if we haven't made progress and we have already
-		 * acknowledged a pending completion once, then be more
-		 * forceful with a restart
-		 */
-		if (ioat_cleanup_preamble(chan, &phys_complete))
-			__cleanup(ioat, phys_complete);
-		else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
-			ioat1_reset_channel(ioat);
-		else {
-			u64 status = ioat_chansts(chan);
-
-			/* manually update the last completion address */
-			if (ioat_chansts_to_addr(status) != 0)
-				*chan->completion = status;
-
-			set_bit(IOAT_COMPLETION_ACK, &chan->state);
-			mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		}
-		spin_unlock_bh(&ioat->desc_lock);
-	}
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-enum dma_status
-ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
-		   struct dma_tx_state *txstate)
-{
-	struct ioat_chan_common *chan = to_chan_common(c);
-	struct ioatdma_device *device = chan->device;
-	enum dma_status ret;
-
-	ret = dma_cookie_status(c, cookie, txstate);
-	if (ret == DMA_COMPLETE)
-		return ret;
-
-	device->cleanup_fn((unsigned long) c);
-
-	return dma_cookie_status(c, cookie, txstate);
-}
-
-static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioat_desc_sw *desc;
+	struct ioat_ring_ent *desc;
 	struct ioat_dma_descriptor *hw;
 
-	spin_lock_bh(&ioat->desc_lock);
-
-	desc = ioat1_dma_get_next_descriptor(ioat);
-
-	if (!desc) {
-		dev_err(to_dev(chan),
-			"Unable to start null desc - get next desc failed\n");
-		spin_unlock_bh(&ioat->desc_lock);
+	if (ioat_ring_space(ioat_chan) < 1) {
+		dev_err(to_dev(ioat_chan),
+			"Unable to start null desc - ring full\n");
 		return;
 	}
 
+	dev_dbg(to_dev(ioat_chan),
+		"%s: head: %#x tail: %#x issued: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
+	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
+
 	hw = desc->hw;
 	hw->ctl = 0;
 	hw->ctl_f.null = 1;
@@ -787,460 +186,804 @@
 	hw->src_addr = 0;
 	hw->dst_addr = 0;
 	async_tx_ack(&desc->txd);
-	hw->next = 0;
-	list_add_tail(&desc->node, &ioat->used_desc);
-	dump_desc_dbg(ioat, desc);
-
-	ioat_set_chainaddr(ioat, desc->txd.phys);
-	ioat_start(chan);
-	spin_unlock_bh(&ioat->desc_lock);
+	ioat_set_chainaddr(ioat_chan, desc->txd.phys);
+	dump_desc_dbg(ioat_chan, desc);
+	/* make sure descriptors are written before we submit */
+	wmb();
+	ioat_chan->head += 1;
+	__ioat_issue_pending(ioat_chan);
 }
 
-/*
- * Perform a IOAT transaction to verify the HW works.
- */
-#define IOAT_TEST_SIZE 2000
-
-static void ioat_dma_test_callback(void *dma_async_param)
+void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
 {
-	struct completion *cmp = dma_async_param;
-
-	complete(cmp);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	__ioat_start_null_desc(ioat_chan);
+	spin_unlock_bh(&ioat_chan->prep_lock);
 }
 
-/**
- * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
- * @device: device to be tested
- */
-int ioat_dma_self_test(struct ioatdma_device *device)
+static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
 {
-	int i;
-	u8 *src;
-	u8 *dest;
-	struct dma_device *dma = &device->common;
-	struct device *dev = &device->pdev->dev;
-	struct dma_chan *dma_chan;
-	struct dma_async_tx_descriptor *tx;
-	dma_addr_t dma_dest, dma_src;
-	dma_cookie_t cookie;
+	/* set the tail to be re-issued */
+	ioat_chan->issued = ioat_chan->tail;
+	ioat_chan->dmacount = 0;
+	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	dev_dbg(to_dev(ioat_chan),
+		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail,
+		ioat_chan->issued, ioat_chan->dmacount);
+
+	if (ioat_ring_pending(ioat_chan)) {
+		struct ioat_ring_ent *desc;
+
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
+		ioat_set_chainaddr(ioat_chan, desc->txd.phys);
+		__ioat_issue_pending(ioat_chan);
+	} else
+		__ioat_start_null_desc(ioat_chan);
+}
+
+static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
+{
+	unsigned long end = jiffies + tmo;
 	int err = 0;
-	struct completion cmp;
-	unsigned long tmo;
-	unsigned long flags;
+	u32 status;
 
-	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
-	if (!src)
-		return -ENOMEM;
-	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
-	if (!dest) {
-		kfree(src);
-		return -ENOMEM;
+	status = ioat_chansts(ioat_chan);
+	if (is_ioat_active(status) || is_ioat_idle(status))
+		ioat_suspend(ioat_chan);
+	while (is_ioat_active(status) || is_ioat_idle(status)) {
+		if (tmo && time_after(jiffies, end)) {
+			err = -ETIMEDOUT;
+			break;
+		}
+		status = ioat_chansts(ioat_chan);
+		cpu_relax();
 	}
 
-	/* Fill in src buffer */
-	for (i = 0; i < IOAT_TEST_SIZE; i++)
-		src[i] = (u8)i;
-
-	/* Start copy, using first DMA channel */
-	dma_chan = container_of(dma->channels.next, struct dma_chan,
-				device_node);
-	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
-		dev_err(dev, "selftest cannot allocate chan resource\n");
-		err = -ENODEV;
-		goto out;
-	}
-
-	dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
-	if (dma_mapping_error(dev, dma_src)) {
-		dev_err(dev, "mapping src buffer failed\n");
-		goto free_resources;
-	}
-	dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
-	if (dma_mapping_error(dev, dma_dest)) {
-		dev_err(dev, "mapping dest buffer failed\n");
-		goto unmap_src;
-	}
-	flags = DMA_PREP_INTERRUPT;
-	tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
-						   IOAT_TEST_SIZE, flags);
-	if (!tx) {
-		dev_err(dev, "Self-test prep failed, disabling\n");
-		err = -ENODEV;
-		goto unmap_dma;
-	}
-
-	async_tx_ack(tx);
-	init_completion(&cmp);
-	tx->callback = ioat_dma_test_callback;
-	tx->callback_param = &cmp;
-	cookie = tx->tx_submit(tx);
-	if (cookie < 0) {
-		dev_err(dev, "Self-test setup failed, disabling\n");
-		err = -ENODEV;
-		goto unmap_dma;
-	}
-	dma->device_issue_pending(dma_chan);
-
-	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-	if (tmo == 0 ||
-	    dma->device_tx_status(dma_chan, cookie, NULL)
-					!= DMA_COMPLETE) {
-		dev_err(dev, "Self-test copy timed out, disabling\n");
-		err = -ENODEV;
-		goto unmap_dma;
-	}
-	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
-		dev_err(dev, "Self-test copy failed compare, disabling\n");
-		err = -ENODEV;
-		goto free_resources;
-	}
-
-unmap_dma:
-	dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
-unmap_src:
-	dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
-free_resources:
-	dma->device_free_chan_resources(dma_chan);
-out:
-	kfree(src);
-	kfree(dest);
 	return err;
 }
 
-static char ioat_interrupt_style[32] = "msix";
-module_param_string(ioat_interrupt_style, ioat_interrupt_style,
-		    sizeof(ioat_interrupt_style), 0644);
-MODULE_PARM_DESC(ioat_interrupt_style,
-		 "set ioat interrupt style: msix (default), msi, intx");
+static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
+{
+	unsigned long end = jiffies + tmo;
+	int err = 0;
+
+	ioat_reset(ioat_chan);
+	while (ioat_reset_pending(ioat_chan)) {
+		if (end && time_after(jiffies, end)) {
+			err = -ETIMEDOUT;
+			break;
+		}
+		cpu_relax();
+	}
+
+	return err;
+}
+
+static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
+	__releases(&ioat_chan->prep_lock)
+{
+	struct dma_chan *c = tx->chan;
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	dma_cookie_t cookie;
+
+	cookie = dma_cookie_assign(tx);
+	dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
+
+	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	/* make descriptor updates visible before advancing ioat->head,
+	 * this is purposefully not smp_wmb() since we are also
+	 * publishing the descriptor updates to a dma device
+	 */
+	wmb();
+
+	ioat_chan->head += ioat_chan->produce;
+
+	ioat_update_pending(ioat_chan);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+
+	return cookie;
+}
+
+static struct ioat_ring_ent *
+ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
+{
+	struct ioat_dma_descriptor *hw;
+	struct ioat_ring_ent *desc;
+	struct ioatdma_device *ioat_dma;
+	dma_addr_t phys;
+
+	ioat_dma = to_ioatdma_device(chan->device);
+	hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
+	if (!hw)
+		return NULL;
+	memset(hw, 0, sizeof(*hw));
+
+	desc = kmem_cache_zalloc(ioat_cache, flags);
+	if (!desc) {
+		pci_pool_free(ioat_dma->dma_pool, hw, phys);
+		return NULL;
+	}
+
+	dma_async_tx_descriptor_init(&desc->txd, chan);
+	desc->txd.tx_submit = ioat_tx_submit_unlock;
+	desc->hw = hw;
+	desc->txd.phys = phys;
+	return desc;
+}
+
+void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
+{
+	struct ioatdma_device *ioat_dma;
+
+	ioat_dma = to_ioatdma_device(chan->device);
+	pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
+	kmem_cache_free(ioat_cache, desc);
+}
+
+struct ioat_ring_ent **
+ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
+{
+	struct ioat_ring_ent **ring;
+	int descs = 1 << order;
+	int i;
+
+	if (order > ioat_get_max_alloc_order())
+		return NULL;
+
+	/* allocate the array to hold the software ring */
+	ring = kcalloc(descs, sizeof(*ring), flags);
+	if (!ring)
+		return NULL;
+	for (i = 0; i < descs; i++) {
+		ring[i] = ioat_alloc_ring_ent(c, flags);
+		if (!ring[i]) {
+			while (i--)
+				ioat_free_ring_ent(ring[i], c);
+			kfree(ring);
+			return NULL;
+		}
+		set_desc_id(ring[i], i);
+	}
+
+	/* link descs */
+	for (i = 0; i < descs-1; i++) {
+		struct ioat_ring_ent *next = ring[i+1];
+		struct ioat_dma_descriptor *hw = ring[i]->hw;
+
+		hw->next = next->txd.phys;
+	}
+	ring[i]->hw->next = ring[0]->txd.phys;
+
+	return ring;
+}
+
+static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
+{
+	/* reshape differs from normal ring allocation in that we want
+	 * to allocate a new software ring while only
+	 * extending/truncating the hardware ring
+	 */
+	struct dma_chan *c = &ioat_chan->dma_chan;
+	const u32 curr_size = ioat_ring_size(ioat_chan);
+	const u16 active = ioat_ring_active(ioat_chan);
+	const u32 new_size = 1 << order;
+	struct ioat_ring_ent **ring;
+	u32 i;
+
+	if (order > ioat_get_max_alloc_order())
+		return false;
+
+	/* double check that we have at least 1 free descriptor */
+	if (active == curr_size)
+		return false;
+
+	/* when shrinking, verify that we can hold the current active
+	 * set in the new ring
+	 */
+	if (active >= new_size)
+		return false;
+
+	/* allocate the array to hold the software ring */
+	ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
+	if (!ring)
+		return false;
+
+	/* allocate/trim descriptors as needed */
+	if (new_size > curr_size) {
+		/* copy current descriptors to the new ring */
+		for (i = 0; i < curr_size; i++) {
+			u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat_chan->ring[curr_idx];
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* add new descriptors to the ring */
+		for (i = curr_size; i < new_size; i++) {
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT);
+			if (!ring[new_idx]) {
+				while (i--) {
+					u16 new_idx = (ioat_chan->tail+i) &
+						       (new_size-1);
+
+					ioat_free_ring_ent(ring[new_idx], c);
+				}
+				kfree(ring);
+				return false;
+			}
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* hw link new descriptors */
+		for (i = curr_size-1; i < new_size; i++) {
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+			struct ioat_ring_ent *next =
+				ring[(new_idx+1) & (new_size-1)];
+			struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
+
+			hw->next = next->txd.phys;
+		}
+	} else {
+		struct ioat_dma_descriptor *hw;
+		struct ioat_ring_ent *next;
+
+		/* copy current descriptors to the new ring, dropping the
+		 * removed descriptors
+		 */
+		for (i = 0; i < new_size; i++) {
+			u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
+			u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
+
+			ring[new_idx] = ioat_chan->ring[curr_idx];
+			set_desc_id(ring[new_idx], new_idx);
+		}
+
+		/* free deleted descriptors */
+		for (i = new_size; i < curr_size; i++) {
+			struct ioat_ring_ent *ent;
+
+			ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i);
+			ioat_free_ring_ent(ent, c);
+		}
+
+		/* fix up hardware ring */
+		hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw;
+		next = ring[(ioat_chan->tail+new_size) & (new_size-1)];
+		hw->next = next->txd.phys;
+	}
+
+	dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n",
+		__func__, new_size);
+
+	kfree(ioat_chan->ring);
+	ioat_chan->ring = ring;
+	ioat_chan->alloc_order = order;
+
+	return true;
+}
 
 /**
- * ioat_dma_setup_interrupts - setup interrupt handler
- * @device: ioat device
+ * ioat_check_space_lock - verify space and grab ring producer lock
+ * @ioat: ioat,3 channel (ring) to operate on
+ * @num_descs: allocation length
  */
-int ioat_dma_setup_interrupts(struct ioatdma_device *device)
+int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
+	__acquires(&ioat_chan->prep_lock)
 {
-	struct ioat_chan_common *chan;
-	struct pci_dev *pdev = device->pdev;
-	struct device *dev = &pdev->dev;
-	struct msix_entry *msix;
-	int i, j, msixcnt;
-	int err = -EINVAL;
-	u8 intrctrl = 0;
+	bool retry;
 
-	if (!strcmp(ioat_interrupt_style, "msix"))
-		goto msix;
-	if (!strcmp(ioat_interrupt_style, "msi"))
-		goto msi;
-	if (!strcmp(ioat_interrupt_style, "intx"))
-		goto intx;
-	dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
-	goto err_no_irq;
+ retry:
+	spin_lock_bh(&ioat_chan->prep_lock);
+	/* never allow the last descriptor to be consumed, we need at
+	 * least one free at all times to allow for on-the-fly ring
+	 * resizing.
+	 */
+	if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
+		dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
+			__func__, num_descs, ioat_chan->head,
+			ioat_chan->tail, ioat_chan->issued);
+		ioat_chan->produce = num_descs;
+		return 0;  /* with ioat->prep_lock held */
+	}
+	retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
+	spin_unlock_bh(&ioat_chan->prep_lock);
 
-msix:
-	/* The number of MSI-X vectors should equal the number of channels */
-	msixcnt = device->common.chancnt;
-	for (i = 0; i < msixcnt; i++)
-		device->msix_entries[i].entry = i;
+	/* is another cpu already trying to expand the ring? */
+	if (retry)
+		goto retry;
 
-	err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
-	if (err)
-		goto msi;
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1);
+	clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
 
-	for (i = 0; i < msixcnt; i++) {
-		msix = &device->msix_entries[i];
-		chan = ioat_chan_by_index(device, i);
-		err = devm_request_irq(dev, msix->vector,
-				       ioat_dma_do_interrupt_msix, 0,
-				       "ioat-msix", chan);
-		if (err) {
-			for (j = 0; j < i; j++) {
-				msix = &device->msix_entries[j];
-				chan = ioat_chan_by_index(device, j);
-				devm_free_irq(dev, msix->vector, chan);
+	/* if we were able to expand the ring retry the allocation */
+	if (retry)
+		goto retry;
+
+	dev_dbg_ratelimited(to_dev(ioat_chan),
+			    "%s: ring full! num_descs: %d (%x:%x:%x)\n",
+			    __func__, num_descs, ioat_chan->head,
+			    ioat_chan->tail, ioat_chan->issued);
+
+	/* progress reclaim in the allocation failure case we may be
+	 * called under bh_disabled so we need to trigger the timer
+	 * event directly
+	 */
+	if (time_is_before_jiffies(ioat_chan->timer.expires)
+	    && timer_pending(&ioat_chan->timer)) {
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+		ioat_timer_event((unsigned long)ioat_chan);
+	}
+
+	return -ENOMEM;
+}
+
+static bool desc_has_ext(struct ioat_ring_ent *desc)
+{
+	struct ioat_dma_descriptor *hw = desc->hw;
+
+	if (hw->ctl_f.op == IOAT_OP_XOR ||
+	    hw->ctl_f.op == IOAT_OP_XOR_VAL) {
+		struct ioat_xor_descriptor *xor = desc->xor;
+
+		if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
+			return true;
+	} else if (hw->ctl_f.op == IOAT_OP_PQ ||
+		   hw->ctl_f.op == IOAT_OP_PQ_VAL) {
+		struct ioat_pq_descriptor *pq = desc->pq;
+
+		if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
+			return true;
+	}
+
+	return false;
+}
+
+static void
+ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
+{
+	if (!sed)
+		return;
+
+	dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
+	kmem_cache_free(ioat_sed_cache, sed);
+}
+
+static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
+{
+	u64 phys_complete;
+	u64 completion;
+
+	completion = *ioat_chan->completion;
+	phys_complete = ioat_chansts_to_addr(completion);
+
+	dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
+		(unsigned long long) phys_complete);
+
+	return phys_complete;
+}
+
+static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
+				   u64 *phys_complete)
+{
+	*phys_complete = ioat_get_current_completion(ioat_chan);
+	if (*phys_complete == ioat_chan->last_completion)
+		return false;
+
+	clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
+	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+
+	return true;
+}
+
+static void
+desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
+{
+	struct ioat_dma_descriptor *hw = desc->hw;
+
+	switch (hw->ctl_f.op) {
+	case IOAT_OP_PQ_VAL:
+	case IOAT_OP_PQ_VAL_16S:
+	{
+		struct ioat_pq_descriptor *pq = desc->pq;
+
+		/* check if there's error written */
+		if (!pq->dwbes_f.wbes)
+			return;
+
+		/* need to set a chanerr var for checking to clear later */
+
+		if (pq->dwbes_f.p_val_err)
+			*desc->result |= SUM_CHECK_P_RESULT;
+
+		if (pq->dwbes_f.q_val_err)
+			*desc->result |= SUM_CHECK_Q_RESULT;
+
+		return;
+	}
+	default:
+		return;
+	}
+}
+
+/**
+ * __cleanup - reclaim used descriptors
+ * @ioat: channel (ring) to clean
+ */
+static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
+{
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct ioat_ring_ent *desc;
+	bool seen_current = false;
+	int idx = ioat_chan->tail, i;
+	u16 active;
+
+	dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
+		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
+
+	/*
+	 * At restart of the channel, the completion address and the
+	 * channel status will be 0 due to starting a new chain. Since
+	 * it's new chain and the first descriptor "fails", there is
+	 * nothing to clean up. We do not want to reap the entire submitted
+	 * chain due to this 0 address value and then BUG.
+	 */
+	if (!phys_complete)
+		return;
+
+	active = ioat_ring_active(ioat_chan);
+	for (i = 0; i < active && !seen_current; i++) {
+		struct dma_async_tx_descriptor *tx;
+
+		smp_read_barrier_depends();
+		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		dump_desc_dbg(ioat_chan, desc);
+
+		/* set err stat if we are using dwbes */
+		if (ioat_dma->cap & IOAT_CAP_DWBES)
+			desc_get_errstat(ioat_chan, desc);
+
+		tx = &desc->txd;
+		if (tx->cookie) {
+			dma_cookie_complete(tx);
+			dma_descriptor_unmap(tx);
+			if (tx->callback) {
+				tx->callback(tx->callback_param);
+				tx->callback = NULL;
 			}
-			goto msi;
+		}
+
+		if (tx->phys == phys_complete)
+			seen_current = true;
+
+		/* skip extended descriptors */
+		if (desc_has_ext(desc)) {
+			BUG_ON(i + 1 >= active);
+			i++;
+		}
+
+		/* cleanup super extended descriptors */
+		if (desc->sed) {
+			ioat_free_sed(ioat_dma, desc->sed);
+			desc->sed = NULL;
 		}
 	}
-	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
-	device->irq_mode = IOAT_MSIX;
-	goto done;
 
-msi:
-	err = pci_enable_msi(pdev);
-	if (err)
-		goto intx;
+	/* finish all descriptor reads before incrementing tail */
+	smp_mb();
+	ioat_chan->tail = idx + i;
+	/* no active descs have written a completion? */
+	BUG_ON(active && !seen_current);
+	ioat_chan->last_completion = phys_complete;
 
-	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
-			       "ioat-msi", device);
-	if (err) {
+	if (active - i == 0) {
+		dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
+			__func__);
+		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+	}
+
+	/* 5 microsecond delay per pending descriptor */
+	writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
+	       ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
+}
+
+static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
+{
+	u64 phys_complete;
+
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+
+	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+		__cleanup(ioat_chan, phys_complete);
+
+	if (is_ioat_halted(*ioat_chan->completion)) {
+		u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+
+		if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
+			mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+			ioat_eh(ioat_chan);
+		}
+	}
+
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+}
+
+void ioat_cleanup_event(unsigned long data)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
+
+	ioat_cleanup(ioat_chan);
+	if (!test_bit(IOAT_RUN, &ioat_chan->state))
+		return;
+	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
+}
+
+static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
+{
+	u64 phys_complete;
+
+	ioat_quiesce(ioat_chan, 0);
+	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+		__cleanup(ioat_chan, phys_complete);
+
+	__ioat_restart_chan(ioat_chan);
+}
+
+static void ioat_eh(struct ioatdma_chan *ioat_chan)
+{
+	struct pci_dev *pdev = to_pdev(ioat_chan);
+	struct ioat_dma_descriptor *hw;
+	struct dma_async_tx_descriptor *tx;
+	u64 phys_complete;
+	struct ioat_ring_ent *desc;
+	u32 err_handled = 0;
+	u32 chanerr_int;
+	u32 chanerr;
+
+	/* cleanup so tail points to descriptor that caused the error */
+	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+		__cleanup(ioat_chan, phys_complete);
+
+	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
+
+	dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
+		__func__, chanerr, chanerr_int);
+
+	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
+	hw = desc->hw;
+	dump_desc_dbg(ioat_chan, desc);
+
+	switch (hw->ctl_f.op) {
+	case IOAT_OP_XOR_VAL:
+		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+			*desc->result |= SUM_CHECK_P_RESULT;
+			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+		}
+		break;
+	case IOAT_OP_PQ_VAL:
+	case IOAT_OP_PQ_VAL_16S:
+		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
+			*desc->result |= SUM_CHECK_P_RESULT;
+			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
+		}
+		if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
+			*desc->result |= SUM_CHECK_Q_RESULT;
+			err_handled |= IOAT_CHANERR_XOR_Q_ERR;
+		}
+		break;
+	}
+
+	/* fault on unhandled error or spurious halt */
+	if (chanerr ^ err_handled || chanerr == 0) {
+		dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
+			__func__, chanerr, err_handled);
+		BUG();
+	} else { /* cleanup the faulty descriptor */
+		tx = &desc->txd;
+		if (tx->cookie) {
+			dma_cookie_complete(tx);
+			dma_descriptor_unmap(tx);
+			if (tx->callback) {
+				tx->callback(tx->callback_param);
+				tx->callback = NULL;
+			}
+		}
+	}
+
+	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
+
+	/* mark faulting descriptor as complete */
+	*ioat_chan->completion = desc->txd.phys;
+
+	spin_lock_bh(&ioat_chan->prep_lock);
+	ioat_restart_channel(ioat_chan);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+}
+
+static void check_active(struct ioatdma_chan *ioat_chan)
+{
+	if (ioat_ring_active(ioat_chan)) {
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+		return;
+	}
+
+	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
+		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+	else if (ioat_chan->alloc_order > ioat_get_alloc_order()) {
+		/* if the ring is idle, empty, and oversized try to step
+		 * down the size
+		 */
+		reshape_ring(ioat_chan, ioat_chan->alloc_order - 1);
+
+		/* keep shrinking until we get back to our minimum
+		 * default size
+		 */
+		if (ioat_chan->alloc_order > ioat_get_alloc_order())
+			mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
+	}
+
+}
+
+void ioat_timer_event(unsigned long data)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
+	dma_addr_t phys_complete;
+	u64 status;
+
+	status = ioat_chansts(ioat_chan);
+
+	/* when halted due to errors check for channel
+	 * programming errors before advancing the completion state
+	 */
+	if (is_ioat_halted(status)) {
+		u32 chanerr;
+
+		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+		dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
+			__func__, chanerr);
+		if (test_bit(IOAT_RUN, &ioat_chan->state))
+			BUG_ON(is_ioat_bug(chanerr));
+		else /* we never got off the ground */
+			return;
+	}
+
+	/* if we haven't made progress and we have already
+	 * acknowledged a pending completion once, then be more
+	 * forceful with a restart
+	 */
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
+		__cleanup(ioat_chan, phys_complete);
+	else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
+		spin_lock_bh(&ioat_chan->prep_lock);
+		ioat_restart_channel(ioat_chan);
+		spin_unlock_bh(&ioat_chan->prep_lock);
+		spin_unlock_bh(&ioat_chan->cleanup_lock);
+		return;
+	} else {
+		set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+	}
+
+
+	if (ioat_ring_active(ioat_chan))
+		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
+	else {
+		spin_lock_bh(&ioat_chan->prep_lock);
+		check_active(ioat_chan);
+		spin_unlock_bh(&ioat_chan->prep_lock);
+	}
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+}
+
+enum dma_status
+ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+		struct dma_tx_state *txstate)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	enum dma_status ret;
+
+	ret = dma_cookie_status(c, cookie, txstate);
+	if (ret == DMA_COMPLETE)
+		return ret;
+
+	ioat_cleanup(ioat_chan);
+
+	return dma_cookie_status(c, cookie, txstate);
+}
+
+static int ioat_irq_reinit(struct ioatdma_device *ioat_dma)
+{
+	struct pci_dev *pdev = ioat_dma->pdev;
+	int irq = pdev->irq, i;
+
+	if (!is_bwd_ioat(pdev))
+		return 0;
+
+	switch (ioat_dma->irq_mode) {
+	case IOAT_MSIX:
+		for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) {
+			struct msix_entry *msix = &ioat_dma->msix_entries[i];
+			struct ioatdma_chan *ioat_chan;
+
+			ioat_chan = ioat_chan_by_index(ioat_dma, i);
+			devm_free_irq(&pdev->dev, msix->vector, ioat_chan);
+		}
+
+		pci_disable_msix(pdev);
+		break;
+	case IOAT_MSI:
 		pci_disable_msi(pdev);
-		goto intx;
+		/* fall through */
+	case IOAT_INTX:
+		devm_free_irq(&pdev->dev, irq, ioat_dma);
+		break;
+	default:
+		return 0;
 	}
-	device->irq_mode = IOAT_MSI;
-	goto done;
+	ioat_dma->irq_mode = IOAT_NOIRQ;
 
-intx:
-	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
-			       IRQF_SHARED, "ioat-intx", device);
-	if (err)
-		goto err_no_irq;
-
-	device->irq_mode = IOAT_INTX;
-done:
-	if (device->intr_quirk)
-		device->intr_quirk(device);
-	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
-	writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
-	return 0;
-
-err_no_irq:
-	/* Disable all interrupt generation */
-	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
-	device->irq_mode = IOAT_NOIRQ;
-	dev_err(dev, "no usable interrupts\n");
-	return err;
-}
-EXPORT_SYMBOL(ioat_dma_setup_interrupts);
-
-static void ioat_disable_interrupts(struct ioatdma_device *device)
-{
-	/* Disable all interrupt generation */
-	writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
+	return ioat_dma_setup_interrupts(ioat_dma);
 }
 
-int ioat_probe(struct ioatdma_device *device)
+int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
 {
-	int err = -ENODEV;
-	struct dma_device *dma = &device->common;
-	struct pci_dev *pdev = device->pdev;
-	struct device *dev = &pdev->dev;
-
-	/* DMA coherent memory pool for DMA descriptor allocations */
-	device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
-					   sizeof(struct ioat_dma_descriptor),
-					   64, 0);
-	if (!device->dma_pool) {
-		err = -ENOMEM;
-		goto err_dma_pool;
-	}
-
-	device->completion_pool = pci_pool_create("completion_pool", pdev,
-						  sizeof(u64), SMP_CACHE_BYTES,
-						  SMP_CACHE_BYTES);
-
-	if (!device->completion_pool) {
-		err = -ENOMEM;
-		goto err_completion_pool;
-	}
-
-	device->enumerate_channels(device);
-
-	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
-	dma->dev = &pdev->dev;
-
-	if (!dma->chancnt) {
-		dev_err(dev, "channel enumeration error\n");
-		goto err_setup_interrupts;
-	}
-
-	err = ioat_dma_setup_interrupts(device);
-	if (err)
-		goto err_setup_interrupts;
-
-	err = device->self_test(device);
-	if (err)
-		goto err_self_test;
-
-	return 0;
-
-err_self_test:
-	ioat_disable_interrupts(device);
-err_setup_interrupts:
-	pci_pool_destroy(device->completion_pool);
-err_completion_pool:
-	pci_pool_destroy(device->dma_pool);
-err_dma_pool:
-	return err;
-}
-
-int ioat_register(struct ioatdma_device *device)
-{
-	int err = dma_async_device_register(&device->common);
-
-	if (err) {
-		ioat_disable_interrupts(device);
-		pci_pool_destroy(device->completion_pool);
-		pci_pool_destroy(device->dma_pool);
-	}
-
-	return err;
-}
-
-/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
-static void ioat1_intr_quirk(struct ioatdma_device *device)
-{
-	struct pci_dev *pdev = device->pdev;
-	u32 dmactrl;
-
-	pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
-	if (pdev->msi_enabled)
-		dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
-	else
-		dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
-	pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
-}
-
-static ssize_t ring_size_show(struct dma_chan *c, char *page)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-
-	return sprintf(page, "%d\n", ioat->desccount);
-}
-static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
-
-static ssize_t ring_active_show(struct dma_chan *c, char *page)
-{
-	struct ioat_dma_chan *ioat = to_ioat_chan(c);
-
-	return sprintf(page, "%d\n", ioat->active);
-}
-static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
-
-static ssize_t cap_show(struct dma_chan *c, char *page)
-{
-	struct dma_device *dma = c->device;
-
-	return sprintf(page, "copy%s%s%s%s%s\n",
-		       dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
-		       dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
-		       dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
-		       dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
-		       dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
-
-}
-struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
-
-static ssize_t version_show(struct dma_chan *c, char *page)
-{
-	struct dma_device *dma = c->device;
-	struct ioatdma_device *device = to_ioatdma_device(dma);
-
-	return sprintf(page, "%d.%d\n",
-		       device->version >> 4, device->version & 0xf);
-}
-struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
-
-static struct attribute *ioat1_attrs[] = {
-	&ring_size_attr.attr,
-	&ring_active_attr.attr,
-	&ioat_cap_attr.attr,
-	&ioat_version_attr.attr,
-	NULL,
-};
-
-static ssize_t
-ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
-	struct ioat_sysfs_entry *entry;
-	struct ioat_chan_common *chan;
-
-	entry = container_of(attr, struct ioat_sysfs_entry, attr);
-	chan = container_of(kobj, struct ioat_chan_common, kobj);
-
-	if (!entry->show)
-		return -EIO;
-	return entry->show(&chan->common, page);
-}
-
-const struct sysfs_ops ioat_sysfs_ops = {
-	.show	= ioat_attr_show,
-};
-
-static struct kobj_type ioat1_ktype = {
-	.sysfs_ops = &ioat_sysfs_ops,
-	.default_attrs = ioat1_attrs,
-};
-
-void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
-{
-	struct dma_device *dma = &device->common;
-	struct dma_chan *c;
-
-	list_for_each_entry(c, &dma->channels, device_node) {
-		struct ioat_chan_common *chan = to_chan_common(c);
-		struct kobject *parent = &c->dev->device.kobj;
-		int err;
-
-		err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
-		if (err) {
-			dev_warn(to_dev(chan),
-				 "sysfs init error (%d), continuing...\n", err);
-			kobject_put(&chan->kobj);
-			set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
-		}
-	}
-}
-
-void ioat_kobject_del(struct ioatdma_device *device)
-{
-	struct dma_device *dma = &device->common;
-	struct dma_chan *c;
-
-	list_for_each_entry(c, &dma->channels, device_node) {
-		struct ioat_chan_common *chan = to_chan_common(c);
-
-		if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
-			kobject_del(&chan->kobj);
-			kobject_put(&chan->kobj);
-		}
-	}
-}
-
-int ioat1_dma_probe(struct ioatdma_device *device, int dca)
-{
-	struct pci_dev *pdev = device->pdev;
-	struct dma_device *dma;
+	/* throw away whatever the channel was doing and get it
+	 * initialized, with ioat3 specific workarounds
+	 */
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct pci_dev *pdev = ioat_dma->pdev;
+	u32 chanerr;
+	u16 dev_id;
 	int err;
 
-	device->intr_quirk = ioat1_intr_quirk;
-	device->enumerate_channels = ioat1_enumerate_channels;
-	device->self_test = ioat_dma_self_test;
-	device->timer_fn = ioat1_timer_event;
-	device->cleanup_fn = ioat1_cleanup_event;
-	dma = &device->common;
-	dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
-	dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
-	dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
-	dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
-	dma->device_tx_status = ioat_dma_tx_status;
+	ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
 
-	err = ioat_probe(device);
-	if (err)
-		return err;
-	err = ioat_register(device);
-	if (err)
-		return err;
-	ioat_kobject_add(device, &ioat1_ktype);
+	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
 
-	if (dca)
-		device->dca = ioat_dca_init(pdev, device->reg_base);
+	if (ioat_dma->version < IOAT_VER_3_3) {
+		/* clear any pending errors */
+		err = pci_read_config_dword(pdev,
+				IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
+		if (err) {
+			dev_err(&pdev->dev,
+				"channel error register unreachable\n");
+			return err;
+		}
+		pci_write_config_dword(pdev,
+				IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
+
+		/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
+		 * (workaround for spurious config parity error after restart)
+		 */
+		pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
+		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
+			pci_write_config_dword(pdev,
+					       IOAT_PCI_DMAUNCERRSTS_OFFSET,
+					       0x10);
+		}
+	}
+
+	err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
+	if (!err)
+		err = ioat_irq_reinit(ioat_dma);
+
+	if (err)
+		dev_err(&pdev->dev, "Failed to reset: %d\n", err);
 
 	return err;
 }
-
-void ioat_dma_remove(struct ioatdma_device *device)
-{
-	struct dma_device *dma = &device->common;
-
-	ioat_disable_interrupts(device);
-
-	ioat_kobject_del(device);
-
-	dma_async_device_unregister(dma);
-
-	pci_pool_destroy(device->dma_pool);
-	pci_pool_destroy(device->completion_pool);
-
-	INIT_LIST_HEAD(&dma->channels);
-}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 30f5c7e..1bc08498 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -18,26 +18,32 @@
 #define IOATDMA_H
 
 #include <linux/dmaengine.h>
-#include "hw.h"
-#include "registers.h"
 #include <linux/init.h>
 #include <linux/dmapool.h>
 #include <linux/cache.h>
 #include <linux/pci_ids.h>
-#include <net/tcp.h>
+#include <linux/circ_buf.h>
+#include <linux/interrupt.h>
+#include "registers.h"
+#include "hw.h"
 
 #define IOAT_DMA_VERSION  "4.00"
 
-#define IOAT_LOW_COMPLETION_MASK	0xffffffc0
 #define IOAT_DMA_DCA_ANY_CPU		~0
 
-#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
-#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
-#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd)
-#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev)
-#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev)
+#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
+#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
+#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
 
-#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
+#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
+
+/* ioat hardware assumes at least two sources for raid operations */
+#define src_cnt_to_sw(x) ((x) + 2)
+#define src_cnt_to_hw(x) ((x) - 2)
+#define ndest_to_sw(x) ((x) + 1)
+#define ndest_to_hw(x) ((x) - 1)
+#define src16_cnt_to_sw(x) ((x) + 9)
+#define src16_cnt_to_hw(x) ((x) - 9)
 
 /*
  * workaround for IOAT ver.3.0 null descriptor issue
@@ -57,19 +63,15 @@
  * @pdev: PCI-Express device
  * @reg_base: MMIO register space base address
  * @dma_pool: for allocating DMA descriptors
- * @common: embedded struct dma_device
+ * @completion_pool: DMA buffers for completion ops
+ * @sed_hw_pool: DMA super descriptor pools
+ * @dma_dev: embedded struct dma_device
  * @version: version of ioatdma device
  * @msix_entries: irq handlers
  * @idx: per channel data
  * @dca: direct cache access context
- * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
- * @enumerate_channels: hw version specific channel enumeration
- * @reset_hw: hw version specific channel (re)initialization
- * @cleanup_fn: select between the v2 and v3 cleanup routines
- * @timer_fn: select between the v2 and v3 timer watchdog routines
- * @self_test: hardware version specific self test for each supported op type
- *
- * Note: the v3 cleanup routine supports raid operations
+ * @irq_mode: interrupt mode (INTX, MSI, MSIX)
+ * @cap: read DMA capabilities register
  */
 struct ioatdma_device {
 	struct pci_dev *pdev;
@@ -78,28 +80,21 @@
 	struct pci_pool *completion_pool;
 #define MAX_SED_POOLS	5
 	struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
-	struct dma_device common;
+	struct dma_device dma_dev;
 	u8 version;
 	struct msix_entry msix_entries[4];
-	struct ioat_chan_common *idx[4];
+	struct ioatdma_chan *idx[4];
 	struct dca_provider *dca;
 	enum ioat_irq_mode irq_mode;
 	u32 cap;
-	void (*intr_quirk)(struct ioatdma_device *device);
-	int (*enumerate_channels)(struct ioatdma_device *device);
-	int (*reset_hw)(struct ioat_chan_common *chan);
-	void (*cleanup_fn)(unsigned long data);
-	void (*timer_fn)(unsigned long data);
-	int (*self_test)(struct ioatdma_device *device);
 };
 
-struct ioat_chan_common {
-	struct dma_chan common;
+struct ioatdma_chan {
+	struct dma_chan dma_chan;
 	void __iomem *reg_base;
 	dma_addr_t last_completion;
 	spinlock_t cleanup_lock;
 	unsigned long state;
-	#define IOAT_COMPLETION_PENDING 0
 	#define IOAT_COMPLETION_ACK 1
 	#define IOAT_RESET_PENDING 2
 	#define IOAT_KOBJ_INIT_FAIL 3
@@ -110,11 +105,32 @@
 	#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
 	#define IDLE_TIMEOUT msecs_to_jiffies(2000)
 	#define RESET_DELAY msecs_to_jiffies(100)
-	struct ioatdma_device *device;
+	struct ioatdma_device *ioat_dma;
 	dma_addr_t completion_dma;
 	u64 *completion;
 	struct tasklet_struct cleanup_task;
 	struct kobject kobj;
+
+/* ioat v2 / v3 channel attributes
+ * @xfercap_log; log2 of channel max transfer length (for fast division)
+ * @head: allocated index
+ * @issued: hardware notification point
+ * @tail: cleanup index
+ * @dmacount: identical to 'head' except for occasionally resetting to zero
+ * @alloc_order: log2 of the number of allocated descriptors
+ * @produce: number of descriptors to produce at submit time
+ * @ring: software ring buffer implementation of hardware ring
+ * @prep_lock: serializes descriptor preparation (producers)
+ */
+	size_t xfercap_log;
+	u16 head;
+	u16 issued;
+	u16 tail;
+	u16 dmacount;
+	u16 alloc_order;
+	u16 produce;
+	struct ioat_ring_ent **ring;
+	spinlock_t prep_lock;
 };
 
 struct ioat_sysfs_entry {
@@ -123,28 +139,11 @@
 };
 
 /**
- * struct ioat_dma_chan - internal representation of a DMA channel
- */
-struct ioat_dma_chan {
-	struct ioat_chan_common base;
-
-	size_t xfercap;	/* XFERCAP register value expanded out */
-
-	spinlock_t desc_lock;
-	struct list_head free_desc;
-	struct list_head used_desc;
-
-	int pending;
-	u16 desccount;
-	u16 active;
-};
-
-/**
  * struct ioat_sed_ent - wrapper around super extended hardware descriptor
  * @hw: hardware SED
- * @sed_dma: dma address for the SED
- * @list: list member
+ * @dma: dma address for the SED
  * @parent: point to the dma descriptor that's the parent
+ * @hw_pool: descriptor pool index
  */
 struct ioat_sed_ent {
 	struct ioat_sed_raw_descriptor *hw;
@@ -153,39 +152,57 @@
 	unsigned int hw_pool;
 };
 
-static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c)
-{
-	return container_of(c, struct ioat_chan_common, common);
-}
-
-static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
-{
-	struct ioat_chan_common *chan = to_chan_common(c);
-
-	return container_of(chan, struct ioat_dma_chan, base);
-}
-
-/* wrapper around hardware descriptor format + additional software fields */
-
 /**
- * struct ioat_desc_sw - wrapper around hardware descriptor
+ * struct ioat_ring_ent - wrapper around hardware descriptor
  * @hw: hardware DMA descriptor (for memcpy)
- * @node: this descriptor will either be on the free list,
- *     or attached to a transaction list (tx_list)
+ * @xor: hardware xor descriptor
+ * @xor_ex: hardware xor extension descriptor
+ * @pq: hardware pq descriptor
+ * @pq_ex: hardware pq extension descriptor
+ * @pqu: hardware pq update descriptor
+ * @raw: hardware raw (un-typed) descriptor
  * @txd: the generic software descriptor for all engines
+ * @len: total transaction length for unmap
+ * @result: asynchronous result of validate operations
  * @id: identifier for debug
+ * @sed: pointer to super extended descriptor sw desc
  */
-struct ioat_desc_sw {
-	struct ioat_dma_descriptor *hw;
-	struct list_head node;
+
+struct ioat_ring_ent {
+	union {
+		struct ioat_dma_descriptor *hw;
+		struct ioat_xor_descriptor *xor;
+		struct ioat_xor_ext_descriptor *xor_ex;
+		struct ioat_pq_descriptor *pq;
+		struct ioat_pq_ext_descriptor *pq_ex;
+		struct ioat_pq_update_descriptor *pqu;
+		struct ioat_raw_descriptor *raw;
+	};
 	size_t len;
-	struct list_head tx_list;
 	struct dma_async_tx_descriptor txd;
+	enum sum_check_flags *result;
 	#ifdef DEBUG
 	int id;
 	#endif
+	struct ioat_sed_ent *sed;
 };
 
+extern const struct sysfs_ops ioat_sysfs_ops;
+extern struct ioat_sysfs_entry ioat_version_attr;
+extern struct ioat_sysfs_entry ioat_cap_attr;
+extern int ioat_pending_level;
+extern int ioat_ring_alloc_order;
+extern struct kobj_type ioat_ktype;
+extern struct kmem_cache *ioat_cache;
+extern int ioat_ring_max_alloc_order;
+extern struct kmem_cache *ioat_sed_cache;
+
+static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
+{
+	return container_of(c, struct ioatdma_chan, dma_chan);
+}
+
+/* wrapper around hardware descriptor format + additional software fields */
 #ifdef DEBUG
 #define set_desc_id(desc, i) ((desc)->id = (i))
 #define desc_id(desc) ((desc)->id)
@@ -195,10 +212,10 @@
 #endif
 
 static inline void
-__dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
+__dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
 		struct dma_async_tx_descriptor *tx, int id)
 {
-	struct device *dev = to_dev(chan);
+	struct device *dev = to_dev(ioat_chan);
 
 	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
 		" ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
@@ -208,25 +225,25 @@
 }
 
 #define dump_desc_dbg(c, d) \
-	({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
+	({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
 
-static inline struct ioat_chan_common *
-ioat_chan_by_index(struct ioatdma_device *device, int index)
+static inline struct ioatdma_chan *
+ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
 {
-	return device->idx[index];
+	return ioat_dma->idx[index];
 }
 
-static inline u64 ioat_chansts_32(struct ioat_chan_common *chan)
+static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 	u64 status;
 	u32 status_lo;
 
 	/* We need to read the low address first as this causes the
 	 * chipset to latch the upper bits for the subsequent read
 	 */
-	status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
-	status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
+	status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
+	status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
 	status <<= 32;
 	status |= status_lo;
 
@@ -235,16 +252,16 @@
 
 #if BITS_PER_LONG == 64
 
-static inline u64 ioat_chansts(struct ioat_chan_common *chan)
+static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 	u64 status;
 
 	 /* With IOAT v3.3 the status register is 64bit.  */
 	if (ver >= IOAT_VER_3_3)
-		status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
+		status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
 	else
-		status = ioat_chansts_32(chan);
+		status = ioat_chansts_32(ioat_chan);
 
 	return status;
 }
@@ -253,56 +270,41 @@
 #define ioat_chansts ioat_chansts_32
 #endif
 
-static inline void ioat_start(struct ioat_chan_common *chan)
-{
-	u8 ver = chan->device->version;
-
-	writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
-}
-
 static inline u64 ioat_chansts_to_addr(u64 status)
 {
 	return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
 }
 
-static inline u32 ioat_chanerr(struct ioat_chan_common *chan)
+static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
 {
-	return readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+	return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
 }
 
-static inline void ioat_suspend(struct ioat_chan_common *chan)
+static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 
-	writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+	writeb(IOAT_CHANCMD_SUSPEND,
+	       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
 }
 
-static inline void ioat_reset(struct ioat_chan_common *chan)
+static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 
-	writeb(IOAT_CHANCMD_RESET, chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+	writeb(IOAT_CHANCMD_RESET,
+	       ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
 }
 
-static inline bool ioat_reset_pending(struct ioat_chan_common *chan)
+static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
 {
-	u8 ver = chan->device->version;
+	u8 ver = ioat_chan->ioat_dma->version;
 	u8 cmd;
 
-	cmd = readb(chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
+	cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
 	return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
 }
 
-static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	writel(addr & 0x00000000FFFFFFFF,
-	       chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
-	writel(addr >> 32,
-	       chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
-}
-
 static inline bool is_ioat_active(unsigned long status)
 {
 	return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
@@ -329,24 +331,111 @@
 	return !!err;
 }
 
-int ioat_probe(struct ioatdma_device *device);
-int ioat_register(struct ioatdma_device *device);
-int ioat1_dma_probe(struct ioatdma_device *dev, int dca);
-int ioat_dma_self_test(struct ioatdma_device *device);
-void ioat_dma_remove(struct ioatdma_device *device);
+#define IOAT_MAX_ORDER 16
+#define ioat_get_alloc_order() \
+	(min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
+#define ioat_get_max_alloc_order() \
+	(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
+
+static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
+{
+	return 1 << ioat_chan->alloc_order;
+}
+
+/* count of descriptors in flight with the engine */
+static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
+{
+	return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
+			ioat_ring_size(ioat_chan));
+}
+
+/* count of descriptors pending submission to hardware */
+static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
+{
+	return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
+			ioat_ring_size(ioat_chan));
+}
+
+static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
+{
+	return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
+}
+
+static inline u16
+ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
+{
+	u16 num_descs = len >> ioat_chan->xfercap_log;
+
+	num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
+	return num_descs;
+}
+
+static inline struct ioat_ring_ent *
+ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
+{
+	return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
+}
+
+static inline void
+ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
+{
+	writel(addr & 0x00000000FFFFFFFF,
+	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+	writel(addr >> 32,
+	       ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+}
+
+/* IOAT Prep functions */
+struct dma_async_tx_descriptor *
+ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+			   dma_addr_t dma_src, size_t len, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+	       unsigned int src_cnt, size_t len, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
+		    unsigned int src_cnt, size_t len,
+		    enum sum_check_flags *result, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+	      unsigned int src_cnt, const unsigned char *scf, size_t len,
+	      unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+		  unsigned int src_cnt, const unsigned char *scf, size_t len,
+		  enum sum_check_flags *pqres, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+		 unsigned int src_cnt, size_t len, unsigned long flags);
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
+		     unsigned int src_cnt, size_t len,
+		     enum sum_check_flags *result, unsigned long flags);
+
+/* IOAT Operation functions */
+irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
+irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
+struct ioat_ring_ent **
+ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
+void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
+void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
+int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
+enum dma_status
+ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
+		struct dma_tx_state *txstate);
+void ioat_cleanup_event(unsigned long data);
+void ioat_timer_event(unsigned long data);
+int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
+void ioat_issue_pending(struct dma_chan *chan);
+void ioat_timer_event(unsigned long data);
+
+/* IOAT Init functions */
+bool is_bwd_ioat(struct pci_dev *pdev);
 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
-void ioat_init_channel(struct ioatdma_device *device,
-		       struct ioat_chan_common *chan, int idx);
-enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
-				   struct dma_tx_state *txstate);
-bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
-			   dma_addr_t *phys_complete);
-void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
-void ioat_kobject_del(struct ioatdma_device *device);
-int ioat_dma_setup_interrupts(struct ioatdma_device *device);
-void ioat_stop(struct ioat_chan_common *chan);
-extern const struct sysfs_ops ioat_sysfs_ops;
-extern struct ioat_sysfs_entry ioat_version_attr;
-extern struct ioat_sysfs_entry ioat_cap_attr;
+void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
+void ioat_kobject_del(struct ioatdma_device *ioat_dma);
+int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
+void ioat_stop(struct ioatdma_chan *ioat_chan);
 #endif /* IOATDMA_H */
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
deleted file mode 100644
index 69c7dfc..0000000
--- a/drivers/dma/ioat/dma_v2.c
+++ /dev/null
@@ -1,916 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2004 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
- * does asynchronous data movement and checksumming operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include <linux/prefetch.h>
-#include <linux/i7300_idle.h>
-#include "dma.h"
-#include "dma_v2.h"
-#include "registers.h"
-#include "hw.h"
-
-#include "../dmaengine.h"
-
-int ioat_ring_alloc_order = 8;
-module_param(ioat_ring_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_alloc_order,
-		 "ioat2+: allocate 2^n descriptors per channel"
-		 " (default: 8 max: 16)");
-static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
-module_param(ioat_ring_max_alloc_order, int, 0644);
-MODULE_PARM_DESC(ioat_ring_max_alloc_order,
-		 "ioat2+: upper limit for ring size (default: 16)");
-
-void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	ioat->dmacount += ioat2_ring_pending(ioat);
-	ioat->issued = ioat->head;
-	writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
-	dev_dbg(to_dev(chan),
-		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
-}
-
-void ioat2_issue_pending(struct dma_chan *c)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-
-	if (ioat2_ring_pending(ioat)) {
-		spin_lock_bh(&ioat->prep_lock);
-		__ioat2_issue_pending(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-	}
-}
-
-/**
- * ioat2_update_pending - log pending descriptors
- * @ioat: ioat2+ channel
- *
- * Check if the number of unsubmitted descriptors has exceeded the
- * watermark.  Called with prep_lock held
- */
-static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
-{
-	if (ioat2_ring_pending(ioat) > ioat_pending_level)
-		__ioat2_issue_pending(ioat);
-}
-
-static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_ring_ent *desc;
-	struct ioat_dma_descriptor *hw;
-
-	if (ioat2_ring_space(ioat) < 1) {
-		dev_err(to_dev(&ioat->base),
-			"Unable to start null desc - ring full\n");
-		return;
-	}
-
-	dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued);
-	desc = ioat2_get_ring_ent(ioat, ioat->head);
-
-	hw = desc->hw;
-	hw->ctl = 0;
-	hw->ctl_f.null = 1;
-	hw->ctl_f.int_en = 1;
-	hw->ctl_f.compl_write = 1;
-	/* set size to non-zero value (channel returns error when size is 0) */
-	hw->size = NULL_DESC_BUFFER_SIZE;
-	hw->src_addr = 0;
-	hw->dst_addr = 0;
-	async_tx_ack(&desc->txd);
-	ioat2_set_chainaddr(ioat, desc->txd.phys);
-	dump_desc_dbg(ioat, desc);
-	wmb();
-	ioat->head += 1;
-	__ioat2_issue_pending(ioat);
-}
-
-static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
-{
-	spin_lock_bh(&ioat->prep_lock);
-	__ioat2_start_null_desc(ioat);
-	spin_unlock_bh(&ioat->prep_lock);
-}
-
-static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct dma_async_tx_descriptor *tx;
-	struct ioat_ring_ent *desc;
-	bool seen_current = false;
-	u16 active;
-	int idx = ioat->tail, i;
-
-	dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued);
-
-	active = ioat2_ring_active(ioat);
-	for (i = 0; i < active && !seen_current; i++) {
-		smp_read_barrier_depends();
-		prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		tx = &desc->txd;
-		dump_desc_dbg(ioat, desc);
-		if (tx->cookie) {
-			dma_descriptor_unmap(tx);
-			dma_cookie_complete(tx);
-			if (tx->callback) {
-				tx->callback(tx->callback_param);
-				tx->callback = NULL;
-			}
-		}
-
-		if (tx->phys == phys_complete)
-			seen_current = true;
-	}
-	smp_mb(); /* finish all descriptor reads before incrementing tail */
-	ioat->tail = idx + i;
-	BUG_ON(active && !seen_current); /* no active descs have written a completion? */
-
-	chan->last_completion = phys_complete;
-	if (active - i == 0) {
-		dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
-			__func__);
-		clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
-		mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	}
-}
-
-/**
- * ioat2_cleanup - clean finished descriptors (advance tail pointer)
- * @chan: ioat channel to be cleaned up
- */
-static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-
-	spin_lock_bh(&chan->cleanup_lock);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-void ioat2_cleanup_event(unsigned long data)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-
-	ioat2_cleanup(ioat);
-	if (!test_bit(IOAT_RUN, &chan->state))
-		return;
-	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	/* set the tail to be re-issued */
-	ioat->issued = ioat->tail;
-	ioat->dmacount = 0;
-	set_bit(IOAT_COMPLETION_PENDING, &chan->state);
-	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	dev_dbg(to_dev(chan),
-		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
-
-	if (ioat2_ring_pending(ioat)) {
-		struct ioat_ring_ent *desc;
-
-		desc = ioat2_get_ring_ent(ioat, ioat->tail);
-		ioat2_set_chainaddr(ioat, desc->txd.phys);
-		__ioat2_issue_pending(ioat);
-	} else
-		__ioat2_start_null_desc(ioat);
-}
-
-int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
-{
-	unsigned long end = jiffies + tmo;
-	int err = 0;
-	u32 status;
-
-	status = ioat_chansts(chan);
-	if (is_ioat_active(status) || is_ioat_idle(status))
-		ioat_suspend(chan);
-	while (is_ioat_active(status) || is_ioat_idle(status)) {
-		if (tmo && time_after(jiffies, end)) {
-			err = -ETIMEDOUT;
-			break;
-		}
-		status = ioat_chansts(chan);
-		cpu_relax();
-	}
-
-	return err;
-}
-
-int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
-{
-	unsigned long end = jiffies + tmo;
-	int err = 0;
-
-	ioat_reset(chan);
-	while (ioat_reset_pending(chan)) {
-		if (end && time_after(jiffies, end)) {
-			err = -ETIMEDOUT;
-			break;
-		}
-		cpu_relax();
-	}
-
-	return err;
-}
-
-static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-
-	ioat2_quiesce(chan, 0);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-
-	__ioat2_restart_chan(ioat);
-}
-
-static void check_active(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	if (ioat2_ring_active(ioat)) {
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		return;
-	}
-
-	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
-		mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	else if (ioat->alloc_order > ioat_get_alloc_order()) {
-		/* if the ring is idle, empty, and oversized try to step
-		 * down the size
-		 */
-		reshape_ring(ioat, ioat->alloc_order - 1);
-
-		/* keep shrinking until we get back to our minimum
-		 * default size
-		 */
-		if (ioat->alloc_order > ioat_get_alloc_order())
-			mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	}
-
-}
-
-void ioat2_timer_event(unsigned long data)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-	u64 status;
-
-	status = ioat_chansts(chan);
-
-	/* when halted due to errors check for channel
-	 * programming errors before advancing the completion state
-	 */
-	if (is_ioat_halted(status)) {
-		u32 chanerr;
-
-		chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-		dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
-			__func__, chanerr);
-		if (test_bit(IOAT_RUN, &chan->state))
-			BUG_ON(is_ioat_bug(chanerr));
-		else /* we never got off the ground */
-			return;
-	}
-
-	/* if we haven't made progress and we have already
-	 * acknowledged a pending completion once, then be more
-	 * forceful with a restart
-	 */
-	spin_lock_bh(&chan->cleanup_lock);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-	else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
-		spin_lock_bh(&ioat->prep_lock);
-		ioat2_restart_channel(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-		spin_unlock_bh(&chan->cleanup_lock);
-		return;
-	} else {
-		set_bit(IOAT_COMPLETION_ACK, &chan->state);
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-	}
-
-
-	if (ioat2_ring_active(ioat))
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-	else {
-		spin_lock_bh(&ioat->prep_lock);
-		check_active(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-	}
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static int ioat2_reset_hw(struct ioat_chan_common *chan)
-{
-	/* throw away whatever the channel was doing and get it initialized */
-	u32 chanerr;
-
-	ioat2_quiesce(chan, msecs_to_jiffies(100));
-
-	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-
-	return ioat2_reset_sync(chan, msecs_to_jiffies(200));
-}
-
-/**
- * ioat2_enumerate_channels - find and initialize the device's channels
- * @device: the device to be enumerated
- */
-int ioat2_enumerate_channels(struct ioatdma_device *device)
-{
-	struct ioat2_dma_chan *ioat;
-	struct device *dev = &device->pdev->dev;
-	struct dma_device *dma = &device->common;
-	u8 xfercap_log;
-	int i;
-
-	INIT_LIST_HEAD(&dma->channels);
-	dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
-	dma->chancnt &= 0x1f; /* bits [4:0] valid */
-	if (dma->chancnt > ARRAY_SIZE(device->idx)) {
-		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
-			 dma->chancnt, ARRAY_SIZE(device->idx));
-		dma->chancnt = ARRAY_SIZE(device->idx);
-	}
-	xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
-	xfercap_log &= 0x1f; /* bits [4:0] valid */
-	if (xfercap_log == 0)
-		return 0;
-	dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
-
-	/* FIXME which i/oat version is i7300? */
-#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
-	if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
-		dma->chancnt--;
-#endif
-	for (i = 0; i < dma->chancnt; i++) {
-		ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
-		if (!ioat)
-			break;
-
-		ioat_init_channel(device, &ioat->base, i);
-		ioat->xfercap_log = xfercap_log;
-		spin_lock_init(&ioat->prep_lock);
-		if (device->reset_hw(&ioat->base)) {
-			i = 0;
-			break;
-		}
-	}
-	dma->chancnt = i;
-	return i;
-}
-
-static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
-{
-	struct dma_chan *c = tx->chan;
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_cookie_t cookie;
-
-	cookie = dma_cookie_assign(tx);
-	dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
-
-	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state))
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	/* make descriptor updates visible before advancing ioat->head,
-	 * this is purposefully not smp_wmb() since we are also
-	 * publishing the descriptor updates to a dma device
-	 */
-	wmb();
-
-	ioat->head += ioat->produce;
-
-	ioat2_update_pending(ioat);
-	spin_unlock_bh(&ioat->prep_lock);
-
-	return cookie;
-}
-
-static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
-{
-	struct ioat_dma_descriptor *hw;
-	struct ioat_ring_ent *desc;
-	struct ioatdma_device *dma;
-	dma_addr_t phys;
-
-	dma = to_ioatdma_device(chan->device);
-	hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
-	if (!hw)
-		return NULL;
-	memset(hw, 0, sizeof(*hw));
-
-	desc = kmem_cache_zalloc(ioat2_cache, flags);
-	if (!desc) {
-		pci_pool_free(dma->dma_pool, hw, phys);
-		return NULL;
-	}
-
-	dma_async_tx_descriptor_init(&desc->txd, chan);
-	desc->txd.tx_submit = ioat2_tx_submit_unlock;
-	desc->hw = hw;
-	desc->txd.phys = phys;
-	return desc;
-}
-
-static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
-{
-	struct ioatdma_device *dma;
-
-	dma = to_ioatdma_device(chan->device);
-	pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
-	kmem_cache_free(ioat2_cache, desc);
-}
-
-static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
-{
-	struct ioat_ring_ent **ring;
-	int descs = 1 << order;
-	int i;
-
-	if (order > ioat_get_max_alloc_order())
-		return NULL;
-
-	/* allocate the array to hold the software ring */
-	ring = kcalloc(descs, sizeof(*ring), flags);
-	if (!ring)
-		return NULL;
-	for (i = 0; i < descs; i++) {
-		ring[i] = ioat2_alloc_ring_ent(c, flags);
-		if (!ring[i]) {
-			while (i--)
-				ioat2_free_ring_ent(ring[i], c);
-			kfree(ring);
-			return NULL;
-		}
-		set_desc_id(ring[i], i);
-	}
-
-	/* link descs */
-	for (i = 0; i < descs-1; i++) {
-		struct ioat_ring_ent *next = ring[i+1];
-		struct ioat_dma_descriptor *hw = ring[i]->hw;
-
-		hw->next = next->txd.phys;
-	}
-	ring[i]->hw->next = ring[0]->txd.phys;
-
-	return ring;
-}
-
-void ioat2_free_chan_resources(struct dma_chan *c);
-
-/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
- * @chan: channel to be initialized
- */
-int ioat2_alloc_chan_resources(struct dma_chan *c)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioat_ring_ent **ring;
-	u64 status;
-	int order;
-	int i = 0;
-
-	/* have we already been set up? */
-	if (ioat->ring)
-		return 1 << ioat->alloc_order;
-
-	/* Setup register to interrupt and write completion status on error */
-	writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
-
-	/* allocate a completion writeback area */
-	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
-	chan->completion = pci_pool_alloc(chan->device->completion_pool,
-					  GFP_KERNEL, &chan->completion_dma);
-	if (!chan->completion)
-		return -ENOMEM;
-
-	memset(chan->completion, 0, sizeof(*chan->completion));
-	writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
-	       chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
-	writel(((u64) chan->completion_dma) >> 32,
-	       chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
-
-	order = ioat_get_alloc_order();
-	ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
-	if (!ring)
-		return -ENOMEM;
-
-	spin_lock_bh(&chan->cleanup_lock);
-	spin_lock_bh(&ioat->prep_lock);
-	ioat->ring = ring;
-	ioat->head = 0;
-	ioat->issued = 0;
-	ioat->tail = 0;
-	ioat->alloc_order = order;
-	set_bit(IOAT_RUN, &chan->state);
-	spin_unlock_bh(&ioat->prep_lock);
-	spin_unlock_bh(&chan->cleanup_lock);
-
-	ioat2_start_null_desc(ioat);
-
-	/* check that we got off the ground */
-	do {
-		udelay(1);
-		status = ioat_chansts(chan);
-	} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
-
-	if (is_ioat_active(status) || is_ioat_idle(status)) {
-		return 1 << ioat->alloc_order;
-	} else {
-		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-
-		dev_WARN(to_dev(chan),
-			"failed to start channel chanerr: %#x\n", chanerr);
-		ioat2_free_chan_resources(c);
-		return -EFAULT;
-	}
-}
-
-bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
-{
-	/* reshape differs from normal ring allocation in that we want
-	 * to allocate a new software ring while only
-	 * extending/truncating the hardware ring
-	 */
-	struct ioat_chan_common *chan = &ioat->base;
-	struct dma_chan *c = &chan->common;
-	const u32 curr_size = ioat2_ring_size(ioat);
-	const u16 active = ioat2_ring_active(ioat);
-	const u32 new_size = 1 << order;
-	struct ioat_ring_ent **ring;
-	u16 i;
-
-	if (order > ioat_get_max_alloc_order())
-		return false;
-
-	/* double check that we have at least 1 free descriptor */
-	if (active == curr_size)
-		return false;
-
-	/* when shrinking, verify that we can hold the current active
-	 * set in the new ring
-	 */
-	if (active >= new_size)
-		return false;
-
-	/* allocate the array to hold the software ring */
-	ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
-	if (!ring)
-		return false;
-
-	/* allocate/trim descriptors as needed */
-	if (new_size > curr_size) {
-		/* copy current descriptors to the new ring */
-		for (i = 0; i < curr_size; i++) {
-			u16 curr_idx = (ioat->tail+i) & (curr_size-1);
-			u16 new_idx = (ioat->tail+i) & (new_size-1);
-
-			ring[new_idx] = ioat->ring[curr_idx];
-			set_desc_id(ring[new_idx], new_idx);
-		}
-
-		/* add new descriptors to the ring */
-		for (i = curr_size; i < new_size; i++) {
-			u16 new_idx = (ioat->tail+i) & (new_size-1);
-
-			ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
-			if (!ring[new_idx]) {
-				while (i--) {
-					u16 new_idx = (ioat->tail+i) & (new_size-1);
-
-					ioat2_free_ring_ent(ring[new_idx], c);
-				}
-				kfree(ring);
-				return false;
-			}
-			set_desc_id(ring[new_idx], new_idx);
-		}
-
-		/* hw link new descriptors */
-		for (i = curr_size-1; i < new_size; i++) {
-			u16 new_idx = (ioat->tail+i) & (new_size-1);
-			struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
-			struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
-
-			hw->next = next->txd.phys;
-		}
-	} else {
-		struct ioat_dma_descriptor *hw;
-		struct ioat_ring_ent *next;
-
-		/* copy current descriptors to the new ring, dropping the
-		 * removed descriptors
-		 */
-		for (i = 0; i < new_size; i++) {
-			u16 curr_idx = (ioat->tail+i) & (curr_size-1);
-			u16 new_idx = (ioat->tail+i) & (new_size-1);
-
-			ring[new_idx] = ioat->ring[curr_idx];
-			set_desc_id(ring[new_idx], new_idx);
-		}
-
-		/* free deleted descriptors */
-		for (i = new_size; i < curr_size; i++) {
-			struct ioat_ring_ent *ent;
-
-			ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
-			ioat2_free_ring_ent(ent, c);
-		}
-
-		/* fix up hardware ring */
-		hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
-		next = ring[(ioat->tail+new_size) & (new_size-1)];
-		hw->next = next->txd.phys;
-	}
-
-	dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
-		__func__, new_size);
-
-	kfree(ioat->ring);
-	ioat->ring = ring;
-	ioat->alloc_order = order;
-
-	return true;
-}
-
-/**
- * ioat2_check_space_lock - verify space and grab ring producer lock
- * @ioat: ioat2,3 channel (ring) to operate on
- * @num_descs: allocation length
- */
-int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	bool retry;
-
- retry:
-	spin_lock_bh(&ioat->prep_lock);
-	/* never allow the last descriptor to be consumed, we need at
-	 * least one free at all times to allow for on-the-fly ring
-	 * resizing.
-	 */
-	if (likely(ioat2_ring_space(ioat) > num_descs)) {
-		dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
-			__func__, num_descs, ioat->head, ioat->tail, ioat->issued);
-		ioat->produce = num_descs;
-		return 0;  /* with ioat->prep_lock held */
-	}
-	retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state);
-	spin_unlock_bh(&ioat->prep_lock);
-
-	/* is another cpu already trying to expand the ring? */
-	if (retry)
-		goto retry;
-
-	spin_lock_bh(&chan->cleanup_lock);
-	spin_lock_bh(&ioat->prep_lock);
-	retry = reshape_ring(ioat, ioat->alloc_order + 1);
-	clear_bit(IOAT_RESHAPE_PENDING, &chan->state);
-	spin_unlock_bh(&ioat->prep_lock);
-	spin_unlock_bh(&chan->cleanup_lock);
-
-	/* if we were able to expand the ring retry the allocation */
-	if (retry)
-		goto retry;
-
-	if (printk_ratelimit())
-		dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n",
-			__func__, num_descs, ioat->head, ioat->tail, ioat->issued);
-
-	/* progress reclaim in the allocation failure case we may be
-	 * called under bh_disabled so we need to trigger the timer
-	 * event directly
-	 */
-	if (time_is_before_jiffies(chan->timer.expires)
-	    && timer_pending(&chan->timer)) {
-		struct ioatdma_device *device = chan->device;
-
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		device->timer_fn((unsigned long) &chan->common);
-	}
-
-	return -ENOMEM;
-}
-
-struct dma_async_tx_descriptor *
-ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
-			   dma_addr_t dma_src, size_t len, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_dma_descriptor *hw;
-	struct ioat_ring_ent *desc;
-	dma_addr_t dst = dma_dest;
-	dma_addr_t src = dma_src;
-	size_t total_len = len;
-	int num_descs, idx, i;
-
-	num_descs = ioat2_xferlen_to_descs(ioat, len);
-	if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
-		idx = ioat->head;
-	else
-		return NULL;
-	i = 0;
-	do {
-		size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
-
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		hw = desc->hw;
-
-		hw->size = copy;
-		hw->ctl = 0;
-		hw->src_addr = src;
-		hw->dst_addr = dst;
-
-		len -= copy;
-		dst += copy;
-		src += copy;
-		dump_desc_dbg(ioat, desc);
-	} while (++i < num_descs);
-
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-	hw->ctl_f.compl_write = 1;
-	dump_desc_dbg(ioat, desc);
-	/* we leave the channel locked to ensure in order submission */
-
-	return &desc->txd;
-}
-
-/**
- * ioat2_free_chan_resources - release all the descriptors
- * @chan: the channel to be cleaned
- */
-void ioat2_free_chan_resources(struct dma_chan *c)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *device = chan->device;
-	struct ioat_ring_ent *desc;
-	const u16 total_descs = 1 << ioat->alloc_order;
-	int descs;
-	int i;
-
-	/* Before freeing channel resources first check
-	 * if they have been previously allocated for this channel.
-	 */
-	if (!ioat->ring)
-		return;
-
-	ioat_stop(chan);
-	device->reset_hw(chan);
-
-	spin_lock_bh(&chan->cleanup_lock);
-	spin_lock_bh(&ioat->prep_lock);
-	descs = ioat2_ring_space(ioat);
-	dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
-	for (i = 0; i < descs; i++) {
-		desc = ioat2_get_ring_ent(ioat, ioat->head + i);
-		ioat2_free_ring_ent(desc, c);
-	}
-
-	if (descs < total_descs)
-		dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
-			total_descs - descs);
-
-	for (i = 0; i < total_descs - descs; i++) {
-		desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
-		dump_desc_dbg(ioat, desc);
-		ioat2_free_ring_ent(desc, c);
-	}
-
-	kfree(ioat->ring);
-	ioat->ring = NULL;
-	ioat->alloc_order = 0;
-	pci_pool_free(device->completion_pool, chan->completion,
-		      chan->completion_dma);
-	spin_unlock_bh(&ioat->prep_lock);
-	spin_unlock_bh(&chan->cleanup_lock);
-
-	chan->last_completion = 0;
-	chan->completion_dma = 0;
-	ioat->dmacount = 0;
-}
-
-static ssize_t ring_size_show(struct dma_chan *c, char *page)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-
-	return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1);
-}
-static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
-
-static ssize_t ring_active_show(struct dma_chan *c, char *page)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-
-	/* ...taken outside the lock, no need to be precise */
-	return sprintf(page, "%d\n", ioat2_ring_active(ioat));
-}
-static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
-
-static struct attribute *ioat2_attrs[] = {
-	&ring_size_attr.attr,
-	&ring_active_attr.attr,
-	&ioat_cap_attr.attr,
-	&ioat_version_attr.attr,
-	NULL,
-};
-
-struct kobj_type ioat2_ktype = {
-	.sysfs_ops = &ioat_sysfs_ops,
-	.default_attrs = ioat2_attrs,
-};
-
-int ioat2_dma_probe(struct ioatdma_device *device, int dca)
-{
-	struct pci_dev *pdev = device->pdev;
-	struct dma_device *dma;
-	struct dma_chan *c;
-	struct ioat_chan_common *chan;
-	int err;
-
-	device->enumerate_channels = ioat2_enumerate_channels;
-	device->reset_hw = ioat2_reset_hw;
-	device->cleanup_fn = ioat2_cleanup_event;
-	device->timer_fn = ioat2_timer_event;
-	device->self_test = ioat_dma_self_test;
-	dma = &device->common;
-	dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
-	dma->device_issue_pending = ioat2_issue_pending;
-	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
-	dma->device_free_chan_resources = ioat2_free_chan_resources;
-	dma->device_tx_status = ioat_dma_tx_status;
-
-	err = ioat_probe(device);
-	if (err)
-		return err;
-
-	list_for_each_entry(c, &dma->channels, device_node) {
-		chan = to_chan_common(c);
-		writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
-		       chan->reg_base + IOAT_DCACTRL_OFFSET);
-	}
-
-	err = ioat_register(device);
-	if (err)
-		return err;
-
-	ioat_kobject_add(device, &ioat2_ktype);
-
-	if (dca)
-		device->dca = ioat2_dca_init(pdev, device->reg_base);
-
-	return err;
-}
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
deleted file mode 100644
index bf24ebe..0000000
--- a/drivers/dma/ioat/dma_v2.h
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called COPYING.
- */
-#ifndef IOATDMA_V2_H
-#define IOATDMA_V2_H
-
-#include <linux/dmaengine.h>
-#include <linux/circ_buf.h>
-#include "dma.h"
-#include "hw.h"
-
-
-extern int ioat_pending_level;
-extern int ioat_ring_alloc_order;
-
-/*
- * workaround for IOAT ver.3.0 null descriptor issue
- * (channel returns error when size is 0)
- */
-#define NULL_DESC_BUFFER_SIZE 1
-
-#define IOAT_MAX_ORDER 16
-#define ioat_get_alloc_order() \
-	(min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
-#define ioat_get_max_alloc_order() \
-	(min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
-
-/* struct ioat2_dma_chan - ioat v2 / v3 channel attributes
- * @base: common ioat channel parameters
- * @xfercap_log; log2 of channel max transfer length (for fast division)
- * @head: allocated index
- * @issued: hardware notification point
- * @tail: cleanup index
- * @dmacount: identical to 'head' except for occasionally resetting to zero
- * @alloc_order: log2 of the number of allocated descriptors
- * @produce: number of descriptors to produce at submit time
- * @ring: software ring buffer implementation of hardware ring
- * @prep_lock: serializes descriptor preparation (producers)
- */
-struct ioat2_dma_chan {
-	struct ioat_chan_common base;
-	size_t xfercap_log;
-	u16 head;
-	u16 issued;
-	u16 tail;
-	u16 dmacount;
-	u16 alloc_order;
-	u16 produce;
-	struct ioat_ring_ent **ring;
-	spinlock_t prep_lock;
-};
-
-static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
-{
-	struct ioat_chan_common *chan = to_chan_common(c);
-
-	return container_of(chan, struct ioat2_dma_chan, base);
-}
-
-static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat)
-{
-	return 1 << ioat->alloc_order;
-}
-
-/* count of descriptors in flight with the engine */
-static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat)
-{
-	return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat));
-}
-
-/* count of descriptors pending submission to hardware */
-static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
-{
-	return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
-}
-
-static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat)
-{
-	return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
-}
-
-static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len)
-{
-	u16 num_descs = len >> ioat->xfercap_log;
-
-	num_descs += !!(len & ((1 << ioat->xfercap_log) - 1));
-	return num_descs;
-}
-
-/**
- * struct ioat_ring_ent - wrapper around hardware descriptor
- * @hw: hardware DMA descriptor (for memcpy)
- * @fill: hardware fill descriptor
- * @xor: hardware xor descriptor
- * @xor_ex: hardware xor extension descriptor
- * @pq: hardware pq descriptor
- * @pq_ex: hardware pq extension descriptor
- * @pqu: hardware pq update descriptor
- * @raw: hardware raw (un-typed) descriptor
- * @txd: the generic software descriptor for all engines
- * @len: total transaction length for unmap
- * @result: asynchronous result of validate operations
- * @id: identifier for debug
- */
-
-struct ioat_ring_ent {
-	union {
-		struct ioat_dma_descriptor *hw;
-		struct ioat_xor_descriptor *xor;
-		struct ioat_xor_ext_descriptor *xor_ex;
-		struct ioat_pq_descriptor *pq;
-		struct ioat_pq_ext_descriptor *pq_ex;
-		struct ioat_pq_update_descriptor *pqu;
-		struct ioat_raw_descriptor *raw;
-	};
-	size_t len;
-	struct dma_async_tx_descriptor txd;
-	enum sum_check_flags *result;
-	#ifdef DEBUG
-	int id;
-	#endif
-	struct ioat_sed_ent *sed;
-};
-
-static inline struct ioat_ring_ent *
-ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx)
-{
-	return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)];
-}
-
-static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	writel(addr & 0x00000000FFFFFFFF,
-	       chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
-	writel(addr >> 32,
-	       chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
-}
-
-int ioat2_dma_probe(struct ioatdma_device *dev, int dca);
-int ioat3_dma_probe(struct ioatdma_device *dev, int dca);
-struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
-int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
-int ioat2_enumerate_channels(struct ioatdma_device *device);
-struct dma_async_tx_descriptor *
-ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
-			   dma_addr_t dma_src, size_t len, unsigned long flags);
-void ioat2_issue_pending(struct dma_chan *chan);
-int ioat2_alloc_chan_resources(struct dma_chan *c);
-void ioat2_free_chan_resources(struct dma_chan *c);
-void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
-bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
-void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
-void ioat2_cleanup_event(unsigned long data);
-void ioat2_timer_event(unsigned long data);
-int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
-int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
-extern struct kobj_type ioat2_ktype;
-extern struct kmem_cache *ioat2_cache;
-#endif /* IOATDMA_V2_H */
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
deleted file mode 100644
index 64790a4..0000000
--- a/drivers/dma/ioat/dma_v3.c
+++ /dev/null
@@ -1,1717 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license.  When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * BSD LICENSE
- *
- * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- *   * Redistributions of source code must retain the above copyright
- *     notice, this list of conditions and the following disclaimer.
- *   * Redistributions in binary form must reproduce the above copyright
- *     notice, this list of conditions and the following disclaimer in
- *     the documentation and/or other materials provided with the
- *     distribution.
- *   * Neither the name of Intel Corporation nor the names of its
- *     contributors may be used to endorse or promote products derived
- *     from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Support routines for v3+ hardware
- */
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/prefetch.h>
-#include "../dmaengine.h"
-#include "registers.h"
-#include "hw.h"
-#include "dma.h"
-#include "dma_v2.h"
-
-extern struct kmem_cache *ioat3_sed_cache;
-
-/* ioat hardware assumes at least two sources for raid operations */
-#define src_cnt_to_sw(x) ((x) + 2)
-#define src_cnt_to_hw(x) ((x) - 2)
-#define ndest_to_sw(x) ((x) + 1)
-#define ndest_to_hw(x) ((x) - 1)
-#define src16_cnt_to_sw(x) ((x) + 9)
-#define src16_cnt_to_hw(x) ((x) - 9)
-
-/* provide a lookup table for setting the source address in the base or
- * extended descriptor of an xor or pq descriptor
- */
-static const u8 xor_idx_to_desc = 0xe0;
-static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
-static const u8 pq_idx_to_desc = 0xf8;
-static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
-				       2, 2, 2, 2, 2, 2, 2 };
-static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
-static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
-					0, 1, 2, 3, 4, 5, 6 };
-
-static void ioat3_eh(struct ioat2_dma_chan *ioat);
-
-static void xor_set_src(struct ioat_raw_descriptor *descs[2],
-			dma_addr_t addr, u32 offset, int idx)
-{
-	struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
-
-	raw->field[xor_idx_to_field[idx]] = addr + offset;
-}
-
-static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
-{
-	struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
-
-	return raw->field[pq_idx_to_field[idx]];
-}
-
-static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
-{
-	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
-
-	return raw->field[pq16_idx_to_field[idx]];
-}
-
-static void pq_set_src(struct ioat_raw_descriptor *descs[2],
-		       dma_addr_t addr, u32 offset, u8 coef, int idx)
-{
-	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
-	struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
-
-	raw->field[pq_idx_to_field[idx]] = addr + offset;
-	pq->coef[idx] = coef;
-}
-
-static bool is_jf_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
-	case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static bool is_snb_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
-	case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static bool is_ivb_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
-	case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
-		return true;
-	default:
-		return false;
-	}
-
-}
-
-static bool is_hsw_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
-	case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
-		return true;
-	default:
-		return false;
-	}
-
-}
-
-static bool is_xeon_cb32(struct pci_dev *pdev)
-{
-	return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
-		is_hsw_ioat(pdev);
-}
-
-static bool is_bwd_ioat(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
-	/* even though not Atom, BDX-DE has same DMA silicon */
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
-		return true;
-	default:
-		return false;
-	}
-}
-
-static bool is_bwd_noraid(struct pci_dev *pdev)
-{
-	switch (pdev->device) {
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
-	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
-	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
-		return true;
-	default:
-		return false;
-	}
-
-}
-
-static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
-			dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
-{
-	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
-	struct ioat_pq16a_descriptor *pq16 =
-		(struct ioat_pq16a_descriptor *)desc[1];
-	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
-
-	raw->field[pq16_idx_to_field[idx]] = addr + offset;
-
-	if (idx < 8)
-		pq->coef[idx] = coef;
-	else
-		pq16->coef[idx - 8] = coef;
-}
-
-static struct ioat_sed_ent *
-ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
-{
-	struct ioat_sed_ent *sed;
-	gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
-
-	sed = kmem_cache_alloc(ioat3_sed_cache, flags);
-	if (!sed)
-		return NULL;
-
-	sed->hw_pool = hw_pool;
-	sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
-				 flags, &sed->dma);
-	if (!sed->hw) {
-		kmem_cache_free(ioat3_sed_cache, sed);
-		return NULL;
-	}
-
-	return sed;
-}
-
-static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
-{
-	if (!sed)
-		return;
-
-	dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
-	kmem_cache_free(ioat3_sed_cache, sed);
-}
-
-static bool desc_has_ext(struct ioat_ring_ent *desc)
-{
-	struct ioat_dma_descriptor *hw = desc->hw;
-
-	if (hw->ctl_f.op == IOAT_OP_XOR ||
-	    hw->ctl_f.op == IOAT_OP_XOR_VAL) {
-		struct ioat_xor_descriptor *xor = desc->xor;
-
-		if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
-			return true;
-	} else if (hw->ctl_f.op == IOAT_OP_PQ ||
-		   hw->ctl_f.op == IOAT_OP_PQ_VAL) {
-		struct ioat_pq_descriptor *pq = desc->pq;
-
-		if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
-			return true;
-	}
-
-	return false;
-}
-
-static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
-{
-	u64 phys_complete;
-	u64 completion;
-
-	completion = *chan->completion;
-	phys_complete = ioat_chansts_to_addr(completion);
-
-	dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
-		(unsigned long long) phys_complete);
-
-	return phys_complete;
-}
-
-static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
-				   u64 *phys_complete)
-{
-	*phys_complete = ioat3_get_current_completion(chan);
-	if (*phys_complete == chan->last_completion)
-		return false;
-
-	clear_bit(IOAT_COMPLETION_ACK, &chan->state);
-	mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-
-	return true;
-}
-
-static void
-desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
-{
-	struct ioat_dma_descriptor *hw = desc->hw;
-
-	switch (hw->ctl_f.op) {
-	case IOAT_OP_PQ_VAL:
-	case IOAT_OP_PQ_VAL_16S:
-	{
-		struct ioat_pq_descriptor *pq = desc->pq;
-
-		/* check if there's error written */
-		if (!pq->dwbes_f.wbes)
-			return;
-
-		/* need to set a chanerr var for checking to clear later */
-
-		if (pq->dwbes_f.p_val_err)
-			*desc->result |= SUM_CHECK_P_RESULT;
-
-		if (pq->dwbes_f.q_val_err)
-			*desc->result |= SUM_CHECK_Q_RESULT;
-
-		return;
-	}
-	default:
-		return;
-	}
-}
-
-/**
- * __cleanup - reclaim used descriptors
- * @ioat: channel (ring) to clean
- *
- * The difference from the dma_v2.c __cleanup() is that this routine
- * handles extended descriptors and dma-unmapping raid operations.
- */
-static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *device = chan->device;
-	struct ioat_ring_ent *desc;
-	bool seen_current = false;
-	int idx = ioat->tail, i;
-	u16 active;
-
-	dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
-		__func__, ioat->head, ioat->tail, ioat->issued);
-
-	/*
-	 * At restart of the channel, the completion address and the
-	 * channel status will be 0 due to starting a new chain. Since
-	 * it's new chain and the first descriptor "fails", there is
-	 * nothing to clean up. We do not want to reap the entire submitted
-	 * chain due to this 0 address value and then BUG.
-	 */
-	if (!phys_complete)
-		return;
-
-	active = ioat2_ring_active(ioat);
-	for (i = 0; i < active && !seen_current; i++) {
-		struct dma_async_tx_descriptor *tx;
-
-		smp_read_barrier_depends();
-		prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		dump_desc_dbg(ioat, desc);
-
-		/* set err stat if we are using dwbes */
-		if (device->cap & IOAT_CAP_DWBES)
-			desc_get_errstat(ioat, desc);
-
-		tx = &desc->txd;
-		if (tx->cookie) {
-			dma_cookie_complete(tx);
-			dma_descriptor_unmap(tx);
-			if (tx->callback) {
-				tx->callback(tx->callback_param);
-				tx->callback = NULL;
-			}
-		}
-
-		if (tx->phys == phys_complete)
-			seen_current = true;
-
-		/* skip extended descriptors */
-		if (desc_has_ext(desc)) {
-			BUG_ON(i + 1 >= active);
-			i++;
-		}
-
-		/* cleanup super extended descriptors */
-		if (desc->sed) {
-			ioat3_free_sed(device, desc->sed);
-			desc->sed = NULL;
-		}
-	}
-	smp_mb(); /* finish all descriptor reads before incrementing tail */
-	ioat->tail = idx + i;
-	BUG_ON(active && !seen_current); /* no active descs have written a completion? */
-	chan->last_completion = phys_complete;
-
-	if (active - i == 0) {
-		dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
-			__func__);
-		clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
-		mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	}
-	/* 5 microsecond delay per pending descriptor */
-	writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
-	       chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
-}
-
-static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	u64 phys_complete;
-
-	spin_lock_bh(&chan->cleanup_lock);
-
-	if (ioat3_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-
-	if (is_ioat_halted(*chan->completion)) {
-		u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-
-		if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
-			mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-			ioat3_eh(ioat);
-		}
-	}
-
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static void ioat3_cleanup_event(unsigned long data)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-
-	ioat3_cleanup(ioat);
-	if (!test_bit(IOAT_RUN, &chan->state))
-		return;
-	writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
-}
-
-static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	u64 phys_complete;
-
-	ioat2_quiesce(chan, 0);
-	if (ioat3_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-
-	__ioat2_restart_chan(ioat);
-}
-
-static void ioat3_eh(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-	struct pci_dev *pdev = to_pdev(chan);
-	struct ioat_dma_descriptor *hw;
-	struct dma_async_tx_descriptor *tx;
-	u64 phys_complete;
-	struct ioat_ring_ent *desc;
-	u32 err_handled = 0;
-	u32 chanerr_int;
-	u32 chanerr;
-
-	/* cleanup so tail points to descriptor that caused the error */
-	if (ioat3_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-
-	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
-
-	dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
-		__func__, chanerr, chanerr_int);
-
-	desc = ioat2_get_ring_ent(ioat, ioat->tail);
-	hw = desc->hw;
-	dump_desc_dbg(ioat, desc);
-
-	switch (hw->ctl_f.op) {
-	case IOAT_OP_XOR_VAL:
-		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
-			*desc->result |= SUM_CHECK_P_RESULT;
-			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
-		}
-		break;
-	case IOAT_OP_PQ_VAL:
-	case IOAT_OP_PQ_VAL_16S:
-		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
-			*desc->result |= SUM_CHECK_P_RESULT;
-			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
-		}
-		if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
-			*desc->result |= SUM_CHECK_Q_RESULT;
-			err_handled |= IOAT_CHANERR_XOR_Q_ERR;
-		}
-		break;
-	}
-
-	/* fault on unhandled error or spurious halt */
-	if (chanerr ^ err_handled || chanerr == 0) {
-		dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
-			__func__, chanerr, err_handled);
-		BUG();
-	} else { /* cleanup the faulty descriptor */
-		tx = &desc->txd;
-		if (tx->cookie) {
-			dma_cookie_complete(tx);
-			dma_descriptor_unmap(tx);
-			if (tx->callback) {
-				tx->callback(tx->callback_param);
-				tx->callback = NULL;
-			}
-		}
-	}
-
-	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
-
-	/* mark faulting descriptor as complete */
-	*chan->completion = desc->txd.phys;
-
-	spin_lock_bh(&ioat->prep_lock);
-	ioat3_restart_channel(ioat);
-	spin_unlock_bh(&ioat->prep_lock);
-}
-
-static void check_active(struct ioat2_dma_chan *ioat)
-{
-	struct ioat_chan_common *chan = &ioat->base;
-
-	if (ioat2_ring_active(ioat)) {
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-		return;
-	}
-
-	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
-		mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	else if (ioat->alloc_order > ioat_get_alloc_order()) {
-		/* if the ring is idle, empty, and oversized try to step
-		 * down the size
-		 */
-		reshape_ring(ioat, ioat->alloc_order - 1);
-
-		/* keep shrinking until we get back to our minimum
-		 * default size
-		 */
-		if (ioat->alloc_order > ioat_get_alloc_order())
-			mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
-	}
-
-}
-
-static void ioat3_timer_event(unsigned long data)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
-	struct ioat_chan_common *chan = &ioat->base;
-	dma_addr_t phys_complete;
-	u64 status;
-
-	status = ioat_chansts(chan);
-
-	/* when halted due to errors check for channel
-	 * programming errors before advancing the completion state
-	 */
-	if (is_ioat_halted(status)) {
-		u32 chanerr;
-
-		chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-		dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
-			__func__, chanerr);
-		if (test_bit(IOAT_RUN, &chan->state))
-			BUG_ON(is_ioat_bug(chanerr));
-		else /* we never got off the ground */
-			return;
-	}
-
-	/* if we haven't made progress and we have already
-	 * acknowledged a pending completion once, then be more
-	 * forceful with a restart
-	 */
-	spin_lock_bh(&chan->cleanup_lock);
-	if (ioat_cleanup_preamble(chan, &phys_complete))
-		__cleanup(ioat, phys_complete);
-	else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
-		spin_lock_bh(&ioat->prep_lock);
-		ioat3_restart_channel(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-		spin_unlock_bh(&chan->cleanup_lock);
-		return;
-	} else {
-		set_bit(IOAT_COMPLETION_ACK, &chan->state);
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-	}
-
-
-	if (ioat2_ring_active(ioat))
-		mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
-	else {
-		spin_lock_bh(&ioat->prep_lock);
-		check_active(ioat);
-		spin_unlock_bh(&ioat->prep_lock);
-	}
-	spin_unlock_bh(&chan->cleanup_lock);
-}
-
-static enum dma_status
-ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
-		struct dma_tx_state *txstate)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	enum dma_status ret;
-
-	ret = dma_cookie_status(c, cookie, txstate);
-	if (ret == DMA_COMPLETE)
-		return ret;
-
-	ioat3_cleanup(ioat);
-
-	return dma_cookie_status(c, cookie, txstate);
-}
-
-static struct dma_async_tx_descriptor *
-__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
-		      dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
-		      size_t len, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_ring_ent *compl_desc;
-	struct ioat_ring_ent *desc;
-	struct ioat_ring_ent *ext;
-	size_t total_len = len;
-	struct ioat_xor_descriptor *xor;
-	struct ioat_xor_ext_descriptor *xor_ex = NULL;
-	struct ioat_dma_descriptor *hw;
-	int num_descs, with_ext, idx, i;
-	u32 offset = 0;
-	u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
-
-	BUG_ON(src_cnt < 2);
-
-	num_descs = ioat2_xferlen_to_descs(ioat, len);
-	/* we need 2x the number of descriptors to cover greater than 5
-	 * sources
-	 */
-	if (src_cnt > 5) {
-		with_ext = 1;
-		num_descs *= 2;
-	} else
-		with_ext = 0;
-
-	/* completion writes from the raid engine may pass completion
-	 * writes from the legacy engine, so we need one extra null
-	 * (legacy) descriptor to ensure all completion writes arrive in
-	 * order.
-	 */
-	if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
-		idx = ioat->head;
-	else
-		return NULL;
-	i = 0;
-	do {
-		struct ioat_raw_descriptor *descs[2];
-		size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
-		int s;
-
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		xor = desc->xor;
-
-		/* save a branch by unconditionally retrieving the
-		 * extended descriptor xor_set_src() knows to not write
-		 * to it in the single descriptor case
-		 */
-		ext = ioat2_get_ring_ent(ioat, idx + i + 1);
-		xor_ex = ext->xor_ex;
-
-		descs[0] = (struct ioat_raw_descriptor *) xor;
-		descs[1] = (struct ioat_raw_descriptor *) xor_ex;
-		for (s = 0; s < src_cnt; s++)
-			xor_set_src(descs, src[s], offset, s);
-		xor->size = xfer_size;
-		xor->dst_addr = dest + offset;
-		xor->ctl = 0;
-		xor->ctl_f.op = op;
-		xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
-
-		len -= xfer_size;
-		offset += xfer_size;
-		dump_desc_dbg(ioat, desc);
-	} while ((i += 1 + with_ext) < num_descs);
-
-	/* last xor descriptor carries the unmap parameters and fence bit */
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	if (result)
-		desc->result = result;
-	xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-
-	/* completion descriptor carries interrupt bit */
-	compl_desc = ioat2_get_ring_ent(ioat, idx + i);
-	compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
-	hw = compl_desc->hw;
-	hw->ctl = 0;
-	hw->ctl_f.null = 1;
-	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	hw->ctl_f.compl_write = 1;
-	hw->size = NULL_DESC_BUFFER_SIZE;
-	dump_desc_dbg(ioat, compl_desc);
-
-	/* we leave the channel locked to ensure in order submission */
-	return &compl_desc->txd;
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
-	       unsigned int src_cnt, size_t len, unsigned long flags)
-{
-	return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
-		    unsigned int src_cnt, size_t len,
-		    enum sum_check_flags *result, unsigned long flags)
-{
-	/* the cleanup routine only sets bits on validate failure, it
-	 * does not clear bits on validate success... so clear it here
-	 */
-	*result = 0;
-
-	return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
-				     src_cnt - 1, len, flags);
-}
-
-static void
-dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
-{
-	struct device *dev = to_dev(&ioat->base);
-	struct ioat_pq_descriptor *pq = desc->pq;
-	struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
-	struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
-	int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
-	int i;
-
-	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
-		" sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
-		" src_cnt: %d)\n",
-		desc_id(desc), (unsigned long long) desc->txd.phys,
-		(unsigned long long) (pq_ex ? pq_ex->next : pq->next),
-		desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
-		pq->ctl_f.compl_write,
-		pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
-		pq->ctl_f.src_cnt);
-	for (i = 0; i < src_cnt; i++)
-		dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
-			(unsigned long long) pq_get_src(descs, i), pq->coef[i]);
-	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
-	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
-	dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
-}
-
-static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
-			       struct ioat_ring_ent *desc)
-{
-	struct device *dev = to_dev(&ioat->base);
-	struct ioat_pq_descriptor *pq = desc->pq;
-	struct ioat_raw_descriptor *descs[] = { (void *)pq,
-						(void *)pq,
-						(void *)pq };
-	int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
-	int i;
-
-	if (desc->sed) {
-		descs[1] = (void *)desc->sed->hw;
-		descs[2] = (void *)desc->sed->hw + 64;
-	}
-
-	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
-		" sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
-		" src_cnt: %d)\n",
-		desc_id(desc), (unsigned long long) desc->txd.phys,
-		(unsigned long long) pq->next,
-		desc->txd.flags, pq->size, pq->ctl,
-		pq->ctl_f.op, pq->ctl_f.int_en,
-		pq->ctl_f.compl_write,
-		pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
-		pq->ctl_f.src_cnt);
-	for (i = 0; i < src_cnt; i++) {
-		dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
-			(unsigned long long) pq16_get_src(descs, i),
-			pq->coef[i]);
-	}
-	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
-	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
-}
-
-static struct dma_async_tx_descriptor *
-__ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
-		     const dma_addr_t *dst, const dma_addr_t *src,
-		     unsigned int src_cnt, const unsigned char *scf,
-		     size_t len, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *device = chan->device;
-	struct ioat_ring_ent *compl_desc;
-	struct ioat_ring_ent *desc;
-	struct ioat_ring_ent *ext;
-	size_t total_len = len;
-	struct ioat_pq_descriptor *pq;
-	struct ioat_pq_ext_descriptor *pq_ex = NULL;
-	struct ioat_dma_descriptor *hw;
-	u32 offset = 0;
-	u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
-	int i, s, idx, with_ext, num_descs;
-	int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
-
-	dev_dbg(to_dev(chan), "%s\n", __func__);
-	/* the engine requires at least two sources (we provide
-	 * at least 1 implied source in the DMA_PREP_CONTINUE case)
-	 */
-	BUG_ON(src_cnt + dmaf_continue(flags) < 2);
-
-	num_descs = ioat2_xferlen_to_descs(ioat, len);
-	/* we need 2x the number of descriptors to cover greater than 3
-	 * sources (we need 1 extra source in the q-only continuation
-	 * case and 3 extra sources in the p+q continuation case.
-	 */
-	if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
-	    (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
-		with_ext = 1;
-		num_descs *= 2;
-	} else
-		with_ext = 0;
-
-	/* completion writes from the raid engine may pass completion
-	 * writes from the legacy engine, so we need one extra null
-	 * (legacy) descriptor to ensure all completion writes arrive in
-	 * order.
-	 */
-	if (likely(num_descs) &&
-	    ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
-		idx = ioat->head;
-	else
-		return NULL;
-	i = 0;
-	do {
-		struct ioat_raw_descriptor *descs[2];
-		size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
-
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		pq = desc->pq;
-
-		/* save a branch by unconditionally retrieving the
-		 * extended descriptor pq_set_src() knows to not write
-		 * to it in the single descriptor case
-		 */
-		ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
-		pq_ex = ext->pq_ex;
-
-		descs[0] = (struct ioat_raw_descriptor *) pq;
-		descs[1] = (struct ioat_raw_descriptor *) pq_ex;
-
-		for (s = 0; s < src_cnt; s++)
-			pq_set_src(descs, src[s], offset, scf[s], s);
-
-		/* see the comment for dma_maxpq in include/linux/dmaengine.h */
-		if (dmaf_p_disabled_continue(flags))
-			pq_set_src(descs, dst[1], offset, 1, s++);
-		else if (dmaf_continue(flags)) {
-			pq_set_src(descs, dst[0], offset, 0, s++);
-			pq_set_src(descs, dst[1], offset, 1, s++);
-			pq_set_src(descs, dst[1], offset, 0, s++);
-		}
-		pq->size = xfer_size;
-		pq->p_addr = dst[0] + offset;
-		pq->q_addr = dst[1] + offset;
-		pq->ctl = 0;
-		pq->ctl_f.op = op;
-		/* we turn on descriptor write back error status */
-		if (device->cap & IOAT_CAP_DWBES)
-			pq->ctl_f.wb_en = result ? 1 : 0;
-		pq->ctl_f.src_cnt = src_cnt_to_hw(s);
-		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
-		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
-
-		len -= xfer_size;
-		offset += xfer_size;
-	} while ((i += 1 + with_ext) < num_descs);
-
-	/* last pq descriptor carries the unmap parameters and fence bit */
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	if (result)
-		desc->result = result;
-	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-	dump_pq_desc_dbg(ioat, desc, ext);
-
-	if (!cb32) {
-		pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-		pq->ctl_f.compl_write = 1;
-		compl_desc = desc;
-	} else {
-		/* completion descriptor carries interrupt bit */
-		compl_desc = ioat2_get_ring_ent(ioat, idx + i);
-		compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
-		hw = compl_desc->hw;
-		hw->ctl = 0;
-		hw->ctl_f.null = 1;
-		hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-		hw->ctl_f.compl_write = 1;
-		hw->size = NULL_DESC_BUFFER_SIZE;
-		dump_desc_dbg(ioat, compl_desc);
-	}
-
-
-	/* we leave the channel locked to ensure in order submission */
-	return &compl_desc->txd;
-}
-
-static struct dma_async_tx_descriptor *
-__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
-		       const dma_addr_t *dst, const dma_addr_t *src,
-		       unsigned int src_cnt, const unsigned char *scf,
-		       size_t len, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_chan_common *chan = &ioat->base;
-	struct ioatdma_device *device = chan->device;
-	struct ioat_ring_ent *desc;
-	size_t total_len = len;
-	struct ioat_pq_descriptor *pq;
-	u32 offset = 0;
-	u8 op;
-	int i, s, idx, num_descs;
-
-	/* this function is only called with 9-16 sources */
-	op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
-
-	dev_dbg(to_dev(chan), "%s\n", __func__);
-
-	num_descs = ioat2_xferlen_to_descs(ioat, len);
-
-	/*
-	 * 16 source pq is only available on cb3.3 and has no completion
-	 * write hw bug.
-	 */
-	if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
-		idx = ioat->head;
-	else
-		return NULL;
-
-	i = 0;
-
-	do {
-		struct ioat_raw_descriptor *descs[4];
-		size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
-
-		desc = ioat2_get_ring_ent(ioat, idx + i);
-		pq = desc->pq;
-
-		descs[0] = (struct ioat_raw_descriptor *) pq;
-
-		desc->sed = ioat3_alloc_sed(device, (src_cnt-2) >> 3);
-		if (!desc->sed) {
-			dev_err(to_dev(chan),
-				"%s: no free sed entries\n", __func__);
-			return NULL;
-		}
-
-		pq->sed_addr = desc->sed->dma;
-		desc->sed->parent = desc;
-
-		descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
-		descs[2] = (void *)descs[1] + 64;
-
-		for (s = 0; s < src_cnt; s++)
-			pq16_set_src(descs, src[s], offset, scf[s], s);
-
-		/* see the comment for dma_maxpq in include/linux/dmaengine.h */
-		if (dmaf_p_disabled_continue(flags))
-			pq16_set_src(descs, dst[1], offset, 1, s++);
-		else if (dmaf_continue(flags)) {
-			pq16_set_src(descs, dst[0], offset, 0, s++);
-			pq16_set_src(descs, dst[1], offset, 1, s++);
-			pq16_set_src(descs, dst[1], offset, 0, s++);
-		}
-
-		pq->size = xfer_size;
-		pq->p_addr = dst[0] + offset;
-		pq->q_addr = dst[1] + offset;
-		pq->ctl = 0;
-		pq->ctl_f.op = op;
-		pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
-		/* we turn on descriptor write back error status */
-		if (device->cap & IOAT_CAP_DWBES)
-			pq->ctl_f.wb_en = result ? 1 : 0;
-		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
-		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
-
-		len -= xfer_size;
-		offset += xfer_size;
-	} while (++i < num_descs);
-
-	/* last pq descriptor carries the unmap parameters and fence bit */
-	desc->txd.flags = flags;
-	desc->len = total_len;
-	if (result)
-		desc->result = result;
-	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-
-	/* with cb3.3 we should be able to do completion w/o a null desc */
-	pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
-	pq->ctl_f.compl_write = 1;
-
-	dump_pq16_desc_dbg(ioat, desc);
-
-	/* we leave the channel locked to ensure in order submission */
-	return &desc->txd;
-}
-
-static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
-{
-	if (dmaf_p_disabled_continue(flags))
-		return src_cnt + 1;
-	else if (dmaf_continue(flags))
-		return src_cnt + 3;
-	else
-		return src_cnt;
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
-	      unsigned int src_cnt, const unsigned char *scf, size_t len,
-	      unsigned long flags)
-{
-	/* specify valid address for disabled result */
-	if (flags & DMA_PREP_PQ_DISABLE_P)
-		dst[0] = dst[1];
-	if (flags & DMA_PREP_PQ_DISABLE_Q)
-		dst[1] = dst[0];
-
-	/* handle the single source multiply case from the raid6
-	 * recovery path
-	 */
-	if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
-		dma_addr_t single_source[2];
-		unsigned char single_source_coef[2];
-
-		BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
-		single_source[0] = src[0];
-		single_source[1] = src[0];
-		single_source_coef[0] = scf[0];
-		single_source_coef[1] = 0;
-
-		return src_cnt_flags(src_cnt, flags) > 8 ?
-			__ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
-					       2, single_source_coef, len,
-					       flags) :
-			__ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
-					     single_source_coef, len, flags);
-
-	} else {
-		return src_cnt_flags(src_cnt, flags) > 8 ?
-			__ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
-					       scf, len, flags) :
-			__ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
-					     scf, len, flags);
-	}
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
-		  unsigned int src_cnt, const unsigned char *scf, size_t len,
-		  enum sum_check_flags *pqres, unsigned long flags)
-{
-	/* specify valid address for disabled result */
-	if (flags & DMA_PREP_PQ_DISABLE_P)
-		pq[0] = pq[1];
-	if (flags & DMA_PREP_PQ_DISABLE_Q)
-		pq[1] = pq[0];
-
-	/* the cleanup routine only sets bits on validate failure, it
-	 * does not clear bits on validate success... so clear it here
-	 */
-	*pqres = 0;
-
-	return src_cnt_flags(src_cnt, flags) > 8 ?
-		__ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
-				       flags) :
-		__ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
-				     flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
-		 unsigned int src_cnt, size_t len, unsigned long flags)
-{
-	unsigned char scf[src_cnt];
-	dma_addr_t pq[2];
-
-	memset(scf, 0, src_cnt);
-	pq[0] = dst;
-	flags |= DMA_PREP_PQ_DISABLE_Q;
-	pq[1] = dst; /* specify valid address for disabled result */
-
-	return src_cnt_flags(src_cnt, flags) > 8 ?
-		__ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
-				       flags) :
-		__ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
-				     flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
-		     unsigned int src_cnt, size_t len,
-		     enum sum_check_flags *result, unsigned long flags)
-{
-	unsigned char scf[src_cnt];
-	dma_addr_t pq[2];
-
-	/* the cleanup routine only sets bits on validate failure, it
-	 * does not clear bits on validate success... so clear it here
-	 */
-	*result = 0;
-
-	memset(scf, 0, src_cnt);
-	pq[0] = src[0];
-	flags |= DMA_PREP_PQ_DISABLE_Q;
-	pq[1] = pq[0]; /* specify valid address for disabled result */
-
-	return src_cnt_flags(src_cnt, flags) > 8 ?
-		__ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
-				       scf, len, flags) :
-		__ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
-				     scf, len, flags);
-}
-
-static struct dma_async_tx_descriptor *
-ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
-{
-	struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
-	struct ioat_ring_ent *desc;
-	struct ioat_dma_descriptor *hw;
-
-	if (ioat2_check_space_lock(ioat, 1) == 0)
-		desc = ioat2_get_ring_ent(ioat, ioat->head);
-	else
-		return NULL;
-
-	hw = desc->hw;
-	hw->ctl = 0;
-	hw->ctl_f.null = 1;
-	hw->ctl_f.int_en = 1;
-	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
-	hw->ctl_f.compl_write = 1;
-	hw->size = NULL_DESC_BUFFER_SIZE;
-	hw->src_addr = 0;
-	hw->dst_addr = 0;
-
-	desc->txd.flags = flags;
-	desc->len = 1;
-
-	dump_desc_dbg(ioat, desc);
-
-	/* we leave the channel locked to ensure in order submission */
-	return &desc->txd;
-}
-
-static void ioat3_dma_test_callback(void *dma_async_param)
-{
-	struct completion *cmp = dma_async_param;
-
-	complete(cmp);
-}
-
-#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
-static int ioat_xor_val_self_test(struct ioatdma_device *device)
-{
-	int i, src_idx;
-	struct page *dest;
-	struct page *xor_srcs[IOAT_NUM_SRC_TEST];
-	struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
-	dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
-	dma_addr_t dest_dma;
-	struct dma_async_tx_descriptor *tx;
-	struct dma_chan *dma_chan;
-	dma_cookie_t cookie;
-	u8 cmp_byte = 0;
-	u32 cmp_word;
-	u32 xor_val_result;
-	int err = 0;
-	struct completion cmp;
-	unsigned long tmo;
-	struct device *dev = &device->pdev->dev;
-	struct dma_device *dma = &device->common;
-	u8 op = 0;
-
-	dev_dbg(dev, "%s\n", __func__);
-
-	if (!dma_has_cap(DMA_XOR, dma->cap_mask))
-		return 0;
-
-	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
-		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
-		if (!xor_srcs[src_idx]) {
-			while (src_idx--)
-				__free_page(xor_srcs[src_idx]);
-			return -ENOMEM;
-		}
-	}
-
-	dest = alloc_page(GFP_KERNEL);
-	if (!dest) {
-		while (src_idx--)
-			__free_page(xor_srcs[src_idx]);
-		return -ENOMEM;
-	}
-
-	/* Fill in src buffers */
-	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
-		u8 *ptr = page_address(xor_srcs[src_idx]);
-		for (i = 0; i < PAGE_SIZE; i++)
-			ptr[i] = (1 << src_idx);
-	}
-
-	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
-		cmp_byte ^= (u8) (1 << src_idx);
-
-	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
-			(cmp_byte << 8) | cmp_byte;
-
-	memset(page_address(dest), 0, PAGE_SIZE);
-
-	dma_chan = container_of(dma->channels.next, struct dma_chan,
-				device_node);
-	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
-		err = -ENODEV;
-		goto out;
-	}
-
-	/* test xor */
-	op = IOAT_OP_XOR;
-
-	dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
-	if (dma_mapping_error(dev, dest_dma))
-		goto dma_unmap;
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
-		dma_srcs[i] = DMA_ERROR_CODE;
-	for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
-		dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
-					   DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma_srcs[i]))
-			goto dma_unmap;
-	}
-	tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
-				      IOAT_NUM_SRC_TEST, PAGE_SIZE,
-				      DMA_PREP_INTERRUPT);
-
-	if (!tx) {
-		dev_err(dev, "Self-test xor prep failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	async_tx_ack(tx);
-	init_completion(&cmp);
-	tx->callback = ioat3_dma_test_callback;
-	tx->callback_param = &cmp;
-	cookie = tx->tx_submit(tx);
-	if (cookie < 0) {
-		dev_err(dev, "Self-test xor setup failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-	dma->device_issue_pending(dma_chan);
-
-	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-	if (tmo == 0 ||
-	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
-		dev_err(dev, "Self-test xor timed out\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
-		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
-
-	dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
-	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
-		u32 *ptr = page_address(dest);
-		if (ptr[i] != cmp_word) {
-			dev_err(dev, "Self-test xor failed compare\n");
-			err = -ENODEV;
-			goto free_resources;
-		}
-	}
-	dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
-
-	dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
-
-	/* skip validate if the capability is not present */
-	if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
-		goto free_resources;
-
-	op = IOAT_OP_XOR_VAL;
-
-	/* validate the sources with the destintation page */
-	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
-		xor_val_srcs[i] = xor_srcs[i];
-	xor_val_srcs[i] = dest;
-
-	xor_val_result = 1;
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-		dma_srcs[i] = DMA_ERROR_CODE;
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
-		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
-					   DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma_srcs[i]))
-			goto dma_unmap;
-	}
-	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
-					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
-					  &xor_val_result, DMA_PREP_INTERRUPT);
-	if (!tx) {
-		dev_err(dev, "Self-test zero prep failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	async_tx_ack(tx);
-	init_completion(&cmp);
-	tx->callback = ioat3_dma_test_callback;
-	tx->callback_param = &cmp;
-	cookie = tx->tx_submit(tx);
-	if (cookie < 0) {
-		dev_err(dev, "Self-test zero setup failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-	dma->device_issue_pending(dma_chan);
-
-	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-	if (tmo == 0 ||
-	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
-		dev_err(dev, "Self-test validate timed out\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
-
-	if (xor_val_result != 0) {
-		dev_err(dev, "Self-test validate failed compare\n");
-		err = -ENODEV;
-		goto free_resources;
-	}
-
-	memset(page_address(dest), 0, PAGE_SIZE);
-
-	/* test for non-zero parity sum */
-	op = IOAT_OP_XOR_VAL;
-
-	xor_val_result = 0;
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-		dma_srcs[i] = DMA_ERROR_CODE;
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
-		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
-					   DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, dma_srcs[i]))
-			goto dma_unmap;
-	}
-	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
-					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
-					  &xor_val_result, DMA_PREP_INTERRUPT);
-	if (!tx) {
-		dev_err(dev, "Self-test 2nd zero prep failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	async_tx_ack(tx);
-	init_completion(&cmp);
-	tx->callback = ioat3_dma_test_callback;
-	tx->callback_param = &cmp;
-	cookie = tx->tx_submit(tx);
-	if (cookie < 0) {
-		dev_err(dev, "Self-test  2nd zero setup failed\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-	dma->device_issue_pending(dma_chan);
-
-	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
-
-	if (tmo == 0 ||
-	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
-		dev_err(dev, "Self-test 2nd validate timed out\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	if (xor_val_result != SUM_CHECK_P_RESULT) {
-		dev_err(dev, "Self-test validate failed compare\n");
-		err = -ENODEV;
-		goto dma_unmap;
-	}
-
-	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
-
-	goto free_resources;
-dma_unmap:
-	if (op == IOAT_OP_XOR) {
-		if (dest_dma != DMA_ERROR_CODE)
-			dma_unmap_page(dev, dest_dma, PAGE_SIZE,
-				       DMA_FROM_DEVICE);
-		for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
-			if (dma_srcs[i] != DMA_ERROR_CODE)
-				dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
-					       DMA_TO_DEVICE);
-	} else if (op == IOAT_OP_XOR_VAL) {
-		for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
-			if (dma_srcs[i] != DMA_ERROR_CODE)
-				dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
-					       DMA_TO_DEVICE);
-	}
-free_resources:
-	dma->device_free_chan_resources(dma_chan);
-out:
-	src_idx = IOAT_NUM_SRC_TEST;
-	while (src_idx--)
-		__free_page(xor_srcs[src_idx]);
-	__free_page(dest);
-	return err;
-}
-
-static int ioat3_dma_self_test(struct ioatdma_device *device)
-{
-	int rc = ioat_dma_self_test(device);
-
-	if (rc)
-		return rc;
-
-	rc = ioat_xor_val_self_test(device);
-	if (rc)
-		return rc;
-
-	return 0;
-}
-
-static int ioat3_irq_reinit(struct ioatdma_device *device)
-{
-	struct pci_dev *pdev = device->pdev;
-	int irq = pdev->irq, i;
-
-	if (!is_bwd_ioat(pdev))
-		return 0;
-
-	switch (device->irq_mode) {
-	case IOAT_MSIX:
-		for (i = 0; i < device->common.chancnt; i++) {
-			struct msix_entry *msix = &device->msix_entries[i];
-			struct ioat_chan_common *chan;
-
-			chan = ioat_chan_by_index(device, i);
-			devm_free_irq(&pdev->dev, msix->vector, chan);
-		}
-
-		pci_disable_msix(pdev);
-		break;
-	case IOAT_MSI:
-		pci_disable_msi(pdev);
-		/* fall through */
-	case IOAT_INTX:
-		devm_free_irq(&pdev->dev, irq, device);
-		break;
-	default:
-		return 0;
-	}
-	device->irq_mode = IOAT_NOIRQ;
-
-	return ioat_dma_setup_interrupts(device);
-}
-
-static int ioat3_reset_hw(struct ioat_chan_common *chan)
-{
-	/* throw away whatever the channel was doing and get it
-	 * initialized, with ioat3 specific workarounds
-	 */
-	struct ioatdma_device *device = chan->device;
-	struct pci_dev *pdev = device->pdev;
-	u32 chanerr;
-	u16 dev_id;
-	int err;
-
-	ioat2_quiesce(chan, msecs_to_jiffies(100));
-
-	chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
-	writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
-
-	if (device->version < IOAT_VER_3_3) {
-		/* clear any pending errors */
-		err = pci_read_config_dword(pdev,
-				IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
-		if (err) {
-			dev_err(&pdev->dev,
-				"channel error register unreachable\n");
-			return err;
-		}
-		pci_write_config_dword(pdev,
-				IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
-
-		/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
-		 * (workaround for spurious config parity error after restart)
-		 */
-		pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
-		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
-			pci_write_config_dword(pdev,
-					       IOAT_PCI_DMAUNCERRSTS_OFFSET,
-					       0x10);
-		}
-	}
-
-	err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
-	if (!err)
-		err = ioat3_irq_reinit(device);
-
-	if (err)
-		dev_err(&pdev->dev, "Failed to reset: %d\n", err);
-
-	return err;
-}
-
-static void ioat3_intr_quirk(struct ioatdma_device *device)
-{
-	struct dma_device *dma;
-	struct dma_chan *c;
-	struct ioat_chan_common *chan;
-	u32 errmask;
-
-	dma = &device->common;
-
-	/*
-	 * if we have descriptor write back error status, we mask the
-	 * error interrupts
-	 */
-	if (device->cap & IOAT_CAP_DWBES) {
-		list_for_each_entry(c, &dma->channels, device_node) {
-			chan = to_chan_common(c);
-			errmask = readl(chan->reg_base +
-					IOAT_CHANERR_MASK_OFFSET);
-			errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
-				   IOAT_CHANERR_XOR_Q_ERR;
-			writel(errmask, chan->reg_base +
-					IOAT_CHANERR_MASK_OFFSET);
-		}
-	}
-}
-
-int ioat3_dma_probe(struct ioatdma_device *device, int dca)
-{
-	struct pci_dev *pdev = device->pdev;
-	int dca_en = system_has_dca_enabled(pdev);
-	struct dma_device *dma;
-	struct dma_chan *c;
-	struct ioat_chan_common *chan;
-	bool is_raid_device = false;
-	int err;
-
-	device->enumerate_channels = ioat2_enumerate_channels;
-	device->reset_hw = ioat3_reset_hw;
-	device->self_test = ioat3_dma_self_test;
-	device->intr_quirk = ioat3_intr_quirk;
-	dma = &device->common;
-	dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
-	dma->device_issue_pending = ioat2_issue_pending;
-	dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
-	dma->device_free_chan_resources = ioat2_free_chan_resources;
-
-	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
-	dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
-
-	device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
-
-	if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
-		device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
-
-	/* dca is incompatible with raid operations */
-	if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
-		device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
-
-	if (device->cap & IOAT_CAP_XOR) {
-		is_raid_device = true;
-		dma->max_xor = 8;
-
-		dma_cap_set(DMA_XOR, dma->cap_mask);
-		dma->device_prep_dma_xor = ioat3_prep_xor;
-
-		dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
-		dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
-	}
-
-	if (device->cap & IOAT_CAP_PQ) {
-		is_raid_device = true;
-
-		dma->device_prep_dma_pq = ioat3_prep_pq;
-		dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
-		dma_cap_set(DMA_PQ, dma->cap_mask);
-		dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
-
-		if (device->cap & IOAT_CAP_RAID16SS) {
-			dma_set_maxpq(dma, 16, 0);
-		} else {
-			dma_set_maxpq(dma, 8, 0);
-		}
-
-		if (!(device->cap & IOAT_CAP_XOR)) {
-			dma->device_prep_dma_xor = ioat3_prep_pqxor;
-			dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
-			dma_cap_set(DMA_XOR, dma->cap_mask);
-			dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
-
-			if (device->cap & IOAT_CAP_RAID16SS) {
-				dma->max_xor = 16;
-			} else {
-				dma->max_xor = 8;
-			}
-		}
-	}
-
-	dma->device_tx_status = ioat3_tx_status;
-	device->cleanup_fn = ioat3_cleanup_event;
-	device->timer_fn = ioat3_timer_event;
-
-	/* starting with CB3.3 super extended descriptors are supported */
-	if (device->cap & IOAT_CAP_RAID16SS) {
-		char pool_name[14];
-		int i;
-
-		for (i = 0; i < MAX_SED_POOLS; i++) {
-			snprintf(pool_name, 14, "ioat_hw%d_sed", i);
-
-			/* allocate SED DMA pool */
-			device->sed_hw_pool[i] = dmam_pool_create(pool_name,
-					&pdev->dev,
-					SED_SIZE * (i + 1), 64, 0);
-			if (!device->sed_hw_pool[i])
-				return -ENOMEM;
-
-		}
-	}
-
-	err = ioat_probe(device);
-	if (err)
-		return err;
-
-	list_for_each_entry(c, &dma->channels, device_node) {
-		chan = to_chan_common(c);
-		writel(IOAT_DMA_DCA_ANY_CPU,
-		       chan->reg_base + IOAT_DCACTRL_OFFSET);
-	}
-
-	err = ioat_register(device);
-	if (err)
-		return err;
-
-	ioat_kobject_add(device, &ioat2_ktype);
-
-	if (dca)
-		device->dca = ioat3_dca_init(pdev, device->reg_base);
-
-	return 0;
-}
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index a3e731e..690e3b4 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -21,11 +21,6 @@
 #define IOAT_MMIO_BAR		0
 
 /* CB device ID's */
-#define IOAT_PCI_DID_5000       0x1A38
-#define IOAT_PCI_DID_CNB        0x360B
-#define IOAT_PCI_DID_SCNB       0x65FF
-#define IOAT_PCI_DID_SNB        0x402F
-
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB0	0x0e20
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB1	0x0e21
 #define PCI_DEVICE_ID_INTEL_IOAT_IVB2	0x0e22
@@ -58,6 +53,17 @@
 #define PCI_DEVICE_ID_INTEL_IOAT_BDXDE2	0x6f52
 #define PCI_DEVICE_ID_INTEL_IOAT_BDXDE3	0x6f53
 
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX0	0x6f20
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX1	0x6f21
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX2	0x6f22
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX3	0x6f23
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX4	0x6f24
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX5	0x6f25
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX6	0x6f26
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX7	0x6f27
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX8	0x6f2e
+#define PCI_DEVICE_ID_INTEL_IOAT_BDX9	0x6f2f
+
 #define IOAT_VER_1_2            0x12    /* Version 1.2 */
 #define IOAT_VER_2_0            0x20    /* Version 2.0 */
 #define IOAT_VER_3_0            0x30    /* Version 3.0 */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
new file mode 100644
index 0000000..1c3c9b0
--- /dev/null
+++ b/drivers/dma/ioat/init.c
@@ -0,0 +1,1314 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/dca.h>
+#include "dma.h"
+#include "registers.h"
+#include "hw.h"
+
+#include "../dmaengine.h"
+
+MODULE_VERSION(IOAT_DMA_VERSION);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+static struct pci_device_id ioat_pci_tbl[] = {
+	/* I/OAT v3 platforms */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
+
+	/* I/OAT v3.2 platforms */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX3) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX4) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX5) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX6) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX7) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
+
+	/* I/OAT v3.3 platforms */
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
+
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
+	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
+
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
+
+static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static void ioat_remove(struct pci_dev *pdev);
+static void
+ioat_init_channel(struct ioatdma_device *ioat_dma,
+		  struct ioatdma_chan *ioat_chan, int idx);
+static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
+static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
+static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
+
+static int ioat_dca_enabled = 1;
+module_param(ioat_dca_enabled, int, 0644);
+MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
+int ioat_pending_level = 4;
+module_param(ioat_pending_level, int, 0644);
+MODULE_PARM_DESC(ioat_pending_level,
+		 "high-water mark for pushing ioat descriptors (default: 4)");
+int ioat_ring_alloc_order = 8;
+module_param(ioat_ring_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_alloc_order,
+		 "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)");
+int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
+module_param(ioat_ring_max_alloc_order, int, 0644);
+MODULE_PARM_DESC(ioat_ring_max_alloc_order,
+		 "ioat+: upper limit for ring size (default: 16)");
+static char ioat_interrupt_style[32] = "msix";
+module_param_string(ioat_interrupt_style, ioat_interrupt_style,
+		    sizeof(ioat_interrupt_style), 0644);
+MODULE_PARM_DESC(ioat_interrupt_style,
+		 "set ioat interrupt style: msix (default), msi, intx");
+
+struct kmem_cache *ioat_cache;
+struct kmem_cache *ioat_sed_cache;
+
+static bool is_jf_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
+	case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_snb_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
+	case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_ivb_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
+	case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
+		return true;
+	default:
+		return false;
+	}
+
+}
+
+static bool is_hsw_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
+	case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
+		return true;
+	default:
+		return false;
+	}
+
+}
+
+static bool is_bdx_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX3:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX4:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX5:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX6:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX7:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX8:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDX9:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_xeon_cb32(struct pci_dev *pdev)
+{
+	return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
+		is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
+}
+
+bool is_bwd_ioat(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+	/* even though not Atom, BDX-DE has same DMA silicon */
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool is_bwd_noraid(struct pci_dev *pdev)
+{
+	switch (pdev->device) {
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2:
+	case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3:
+		return true;
+	default:
+		return false;
+	}
+
+}
+
+/*
+ * Perform a IOAT transaction to verify the HW works.
+ */
+#define IOAT_TEST_SIZE 2000
+
+static void ioat_dma_test_callback(void *dma_async_param)
+{
+	struct completion *cmp = dma_async_param;
+
+	complete(cmp);
+}
+
+/**
+ * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
+ * @ioat_dma: dma device to be tested
+ */
+static int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
+{
+	int i;
+	u8 *src;
+	u8 *dest;
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct device *dev = &ioat_dma->pdev->dev;
+	struct dma_chan *dma_chan;
+	struct dma_async_tx_descriptor *tx;
+	dma_addr_t dma_dest, dma_src;
+	dma_cookie_t cookie;
+	int err = 0;
+	struct completion cmp;
+	unsigned long tmo;
+	unsigned long flags;
+
+	src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+	if (!src)
+		return -ENOMEM;
+	dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
+	if (!dest) {
+		kfree(src);
+		return -ENOMEM;
+	}
+
+	/* Fill in src buffer */
+	for (i = 0; i < IOAT_TEST_SIZE; i++)
+		src[i] = (u8)i;
+
+	/* Start copy, using first DMA channel */
+	dma_chan = container_of(dma->channels.next, struct dma_chan,
+				device_node);
+	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
+		dev_err(dev, "selftest cannot allocate chan resource\n");
+		err = -ENODEV;
+		goto out;
+	}
+
+	dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, dma_src)) {
+		dev_err(dev, "mapping src buffer failed\n");
+		goto free_resources;
+	}
+	dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+	if (dma_mapping_error(dev, dma_dest)) {
+		dev_err(dev, "mapping dest buffer failed\n");
+		goto unmap_src;
+	}
+	flags = DMA_PREP_INTERRUPT;
+	tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
+						      dma_src, IOAT_TEST_SIZE,
+						      flags);
+	if (!tx) {
+		dev_err(dev, "Self-test prep failed, disabling\n");
+		err = -ENODEV;
+		goto unmap_dma;
+	}
+
+	async_tx_ack(tx);
+	init_completion(&cmp);
+	tx->callback = ioat_dma_test_callback;
+	tx->callback_param = &cmp;
+	cookie = tx->tx_submit(tx);
+	if (cookie < 0) {
+		dev_err(dev, "Self-test setup failed, disabling\n");
+		err = -ENODEV;
+		goto unmap_dma;
+	}
+	dma->device_issue_pending(dma_chan);
+
+	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL)
+					!= DMA_COMPLETE) {
+		dev_err(dev, "Self-test copy timed out, disabling\n");
+		err = -ENODEV;
+		goto unmap_dma;
+	}
+	if (memcmp(src, dest, IOAT_TEST_SIZE)) {
+		dev_err(dev, "Self-test copy failed compare, disabling\n");
+		err = -ENODEV;
+		goto free_resources;
+	}
+
+unmap_dma:
+	dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+unmap_src:
+	dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+free_resources:
+	dma->device_free_chan_resources(dma_chan);
+out:
+	kfree(src);
+	kfree(dest);
+	return err;
+}
+
+/**
+ * ioat_dma_setup_interrupts - setup interrupt handler
+ * @ioat_dma: ioat dma device
+ */
+int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
+{
+	struct ioatdma_chan *ioat_chan;
+	struct pci_dev *pdev = ioat_dma->pdev;
+	struct device *dev = &pdev->dev;
+	struct msix_entry *msix;
+	int i, j, msixcnt;
+	int err = -EINVAL;
+	u8 intrctrl = 0;
+
+	if (!strcmp(ioat_interrupt_style, "msix"))
+		goto msix;
+	if (!strcmp(ioat_interrupt_style, "msi"))
+		goto msi;
+	if (!strcmp(ioat_interrupt_style, "intx"))
+		goto intx;
+	dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
+	goto err_no_irq;
+
+msix:
+	/* The number of MSI-X vectors should equal the number of channels */
+	msixcnt = ioat_dma->dma_dev.chancnt;
+	for (i = 0; i < msixcnt; i++)
+		ioat_dma->msix_entries[i].entry = i;
+
+	err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
+	if (err)
+		goto msi;
+
+	for (i = 0; i < msixcnt; i++) {
+		msix = &ioat_dma->msix_entries[i];
+		ioat_chan = ioat_chan_by_index(ioat_dma, i);
+		err = devm_request_irq(dev, msix->vector,
+				       ioat_dma_do_interrupt_msix, 0,
+				       "ioat-msix", ioat_chan);
+		if (err) {
+			for (j = 0; j < i; j++) {
+				msix = &ioat_dma->msix_entries[j];
+				ioat_chan = ioat_chan_by_index(ioat_dma, j);
+				devm_free_irq(dev, msix->vector, ioat_chan);
+			}
+			goto msi;
+		}
+	}
+	intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
+	ioat_dma->irq_mode = IOAT_MSIX;
+	goto done;
+
+msi:
+	err = pci_enable_msi(pdev);
+	if (err)
+		goto intx;
+
+	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
+			       "ioat-msi", ioat_dma);
+	if (err) {
+		pci_disable_msi(pdev);
+		goto intx;
+	}
+	ioat_dma->irq_mode = IOAT_MSI;
+	goto done;
+
+intx:
+	err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
+			       IRQF_SHARED, "ioat-intx", ioat_dma);
+	if (err)
+		goto err_no_irq;
+
+	ioat_dma->irq_mode = IOAT_INTX;
+done:
+	if (is_bwd_ioat(pdev))
+		ioat_intr_quirk(ioat_dma);
+	intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
+	writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
+	return 0;
+
+err_no_irq:
+	/* Disable all interrupt generation */
+	writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
+	ioat_dma->irq_mode = IOAT_NOIRQ;
+	dev_err(dev, "no usable interrupts\n");
+	return err;
+}
+
+static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
+{
+	/* Disable all interrupt generation */
+	writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
+}
+
+static int ioat_probe(struct ioatdma_device *ioat_dma)
+{
+	int err = -ENODEV;
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct pci_dev *pdev = ioat_dma->pdev;
+	struct device *dev = &pdev->dev;
+
+	/* DMA coherent memory pool for DMA descriptor allocations */
+	ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
+					     sizeof(struct ioat_dma_descriptor),
+					     64, 0);
+	if (!ioat_dma->dma_pool) {
+		err = -ENOMEM;
+		goto err_dma_pool;
+	}
+
+	ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
+						    sizeof(u64),
+						    SMP_CACHE_BYTES,
+						    SMP_CACHE_BYTES);
+
+	if (!ioat_dma->completion_pool) {
+		err = -ENOMEM;
+		goto err_completion_pool;
+	}
+
+	ioat_enumerate_channels(ioat_dma);
+
+	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+	dma->dev = &pdev->dev;
+
+	if (!dma->chancnt) {
+		dev_err(dev, "channel enumeration error\n");
+		goto err_setup_interrupts;
+	}
+
+	err = ioat_dma_setup_interrupts(ioat_dma);
+	if (err)
+		goto err_setup_interrupts;
+
+	err = ioat3_dma_self_test(ioat_dma);
+	if (err)
+		goto err_self_test;
+
+	return 0;
+
+err_self_test:
+	ioat_disable_interrupts(ioat_dma);
+err_setup_interrupts:
+	pci_pool_destroy(ioat_dma->completion_pool);
+err_completion_pool:
+	pci_pool_destroy(ioat_dma->dma_pool);
+err_dma_pool:
+	return err;
+}
+
+static int ioat_register(struct ioatdma_device *ioat_dma)
+{
+	int err = dma_async_device_register(&ioat_dma->dma_dev);
+
+	if (err) {
+		ioat_disable_interrupts(ioat_dma);
+		pci_pool_destroy(ioat_dma->completion_pool);
+		pci_pool_destroy(ioat_dma->dma_pool);
+	}
+
+	return err;
+}
+
+static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
+{
+	struct dma_device *dma = &ioat_dma->dma_dev;
+
+	ioat_disable_interrupts(ioat_dma);
+
+	ioat_kobject_del(ioat_dma);
+
+	dma_async_device_unregister(dma);
+
+	pci_pool_destroy(ioat_dma->dma_pool);
+	pci_pool_destroy(ioat_dma->completion_pool);
+
+	INIT_LIST_HEAD(&dma->channels);
+}
+
+/**
+ * ioat_enumerate_channels - find and initialize the device's channels
+ * @ioat_dma: the ioat dma device to be enumerated
+ */
+static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
+{
+	struct ioatdma_chan *ioat_chan;
+	struct device *dev = &ioat_dma->pdev->dev;
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	u8 xfercap_log;
+	int i;
+
+	INIT_LIST_HEAD(&dma->channels);
+	dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
+	dma->chancnt &= 0x1f; /* bits [4:0] valid */
+	if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
+		dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
+			 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
+		dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
+	}
+	xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
+	xfercap_log &= 0x1f; /* bits [4:0] valid */
+	if (xfercap_log == 0)
+		return 0;
+	dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
+
+	for (i = 0; i < dma->chancnt; i++) {
+		ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
+		if (!ioat_chan)
+			break;
+
+		ioat_init_channel(ioat_dma, ioat_chan, i);
+		ioat_chan->xfercap_log = xfercap_log;
+		spin_lock_init(&ioat_chan->prep_lock);
+		if (ioat_reset_hw(ioat_chan)) {
+			i = 0;
+			break;
+		}
+	}
+	dma->chancnt = i;
+	return i;
+}
+
+/**
+ * ioat_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+ */
+static void ioat_free_chan_resources(struct dma_chan *c)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct ioat_ring_ent *desc;
+	const int total_descs = 1 << ioat_chan->alloc_order;
+	int descs;
+	int i;
+
+	/* Before freeing channel resources first check
+	 * if they have been previously allocated for this channel.
+	 */
+	if (!ioat_chan->ring)
+		return;
+
+	ioat_stop(ioat_chan);
+	ioat_reset_hw(ioat_chan);
+
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	descs = ioat_ring_space(ioat_chan);
+	dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
+	for (i = 0; i < descs; i++) {
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
+		ioat_free_ring_ent(desc, c);
+	}
+
+	if (descs < total_descs)
+		dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
+			total_descs - descs);
+
+	for (i = 0; i < total_descs - descs; i++) {
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
+		dump_desc_dbg(ioat_chan, desc);
+		ioat_free_ring_ent(desc, c);
+	}
+
+	kfree(ioat_chan->ring);
+	ioat_chan->ring = NULL;
+	ioat_chan->alloc_order = 0;
+	pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
+		      ioat_chan->completion_dma);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+
+	ioat_chan->last_completion = 0;
+	ioat_chan->completion_dma = 0;
+	ioat_chan->dmacount = 0;
+}
+
+/* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
+ * @chan: channel to be initialized
+ */
+static int ioat_alloc_chan_resources(struct dma_chan *c)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_ring_ent **ring;
+	u64 status;
+	int order;
+	int i = 0;
+	u32 chanerr;
+
+	/* have we already been set up? */
+	if (ioat_chan->ring)
+		return 1 << ioat_chan->alloc_order;
+
+	/* Setup register to interrupt and write completion status on error */
+	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
+
+	/* allocate a completion writeback area */
+	/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
+	ioat_chan->completion =
+		pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
+			       GFP_KERNEL, &ioat_chan->completion_dma);
+	if (!ioat_chan->completion)
+		return -ENOMEM;
+
+	memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
+	writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
+	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
+	writel(((u64)ioat_chan->completion_dma) >> 32,
+	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
+
+	order = ioat_get_alloc_order();
+	ring = ioat_alloc_ring(c, order, GFP_KERNEL);
+	if (!ring)
+		return -ENOMEM;
+
+	spin_lock_bh(&ioat_chan->cleanup_lock);
+	spin_lock_bh(&ioat_chan->prep_lock);
+	ioat_chan->ring = ring;
+	ioat_chan->head = 0;
+	ioat_chan->issued = 0;
+	ioat_chan->tail = 0;
+	ioat_chan->alloc_order = order;
+	set_bit(IOAT_RUN, &ioat_chan->state);
+	spin_unlock_bh(&ioat_chan->prep_lock);
+	spin_unlock_bh(&ioat_chan->cleanup_lock);
+
+	ioat_start_null_desc(ioat_chan);
+
+	/* check that we got off the ground */
+	do {
+		udelay(1);
+		status = ioat_chansts(ioat_chan);
+	} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
+
+	if (is_ioat_active(status) || is_ioat_idle(status))
+		return 1 << ioat_chan->alloc_order;
+
+	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+
+	dev_WARN(to_dev(ioat_chan),
+		 "failed to start channel chanerr: %#x\n", chanerr);
+	ioat_free_chan_resources(c);
+	return -EFAULT;
+}
+
+/* common channel initialization */
+static void
+ioat_init_channel(struct ioatdma_device *ioat_dma,
+		  struct ioatdma_chan *ioat_chan, int idx)
+{
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct dma_chan *c = &ioat_chan->dma_chan;
+	unsigned long data = (unsigned long) c;
+
+	ioat_chan->ioat_dma = ioat_dma;
+	ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
+	spin_lock_init(&ioat_chan->cleanup_lock);
+	ioat_chan->dma_chan.device = dma;
+	dma_cookie_init(&ioat_chan->dma_chan);
+	list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
+	ioat_dma->idx[idx] = ioat_chan;
+	init_timer(&ioat_chan->timer);
+	ioat_chan->timer.function = ioat_timer_event;
+	ioat_chan->timer.data = data;
+	tasklet_init(&ioat_chan->cleanup_task, ioat_cleanup_event, data);
+}
+
+#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
+static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma)
+{
+	int i, src_idx;
+	struct page *dest;
+	struct page *xor_srcs[IOAT_NUM_SRC_TEST];
+	struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
+	dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
+	dma_addr_t dest_dma;
+	struct dma_async_tx_descriptor *tx;
+	struct dma_chan *dma_chan;
+	dma_cookie_t cookie;
+	u8 cmp_byte = 0;
+	u32 cmp_word;
+	u32 xor_val_result;
+	int err = 0;
+	struct completion cmp;
+	unsigned long tmo;
+	struct device *dev = &ioat_dma->pdev->dev;
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	u8 op = 0;
+
+	dev_dbg(dev, "%s\n", __func__);
+
+	if (!dma_has_cap(DMA_XOR, dma->cap_mask))
+		return 0;
+
+	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
+		xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
+		if (!xor_srcs[src_idx]) {
+			while (src_idx--)
+				__free_page(xor_srcs[src_idx]);
+			return -ENOMEM;
+		}
+	}
+
+	dest = alloc_page(GFP_KERNEL);
+	if (!dest) {
+		while (src_idx--)
+			__free_page(xor_srcs[src_idx]);
+		return -ENOMEM;
+	}
+
+	/* Fill in src buffers */
+	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
+		u8 *ptr = page_address(xor_srcs[src_idx]);
+
+		for (i = 0; i < PAGE_SIZE; i++)
+			ptr[i] = (1 << src_idx);
+	}
+
+	for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
+		cmp_byte ^= (u8) (1 << src_idx);
+
+	cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
+			(cmp_byte << 8) | cmp_byte;
+
+	memset(page_address(dest), 0, PAGE_SIZE);
+
+	dma_chan = container_of(dma->channels.next, struct dma_chan,
+				device_node);
+	if (dma->device_alloc_chan_resources(dma_chan) < 1) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	/* test xor */
+	op = IOAT_OP_XOR;
+
+	dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+	if (dma_mapping_error(dev, dest_dma))
+		goto dma_unmap;
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+		dma_srcs[i] = DMA_ERROR_CODE;
+	for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
+		dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_srcs[i]))
+			goto dma_unmap;
+	}
+	tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
+				      IOAT_NUM_SRC_TEST, PAGE_SIZE,
+				      DMA_PREP_INTERRUPT);
+
+	if (!tx) {
+		dev_err(dev, "Self-test xor prep failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	async_tx_ack(tx);
+	init_completion(&cmp);
+	tx->callback = ioat_dma_test_callback;
+	tx->callback_param = &cmp;
+	cookie = tx->tx_submit(tx);
+	if (cookie < 0) {
+		dev_err(dev, "Self-test xor setup failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+	dma->device_issue_pending(dma_chan);
+
+	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+		dev_err(dev, "Self-test xor timed out\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+	dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+	for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
+		u32 *ptr = page_address(dest);
+
+		if (ptr[i] != cmp_word) {
+			dev_err(dev, "Self-test xor failed compare\n");
+			err = -ENODEV;
+			goto free_resources;
+		}
+	}
+	dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+	dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+	/* skip validate if the capability is not present */
+	if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
+		goto free_resources;
+
+	op = IOAT_OP_XOR_VAL;
+
+	/* validate the sources with the destintation page */
+	for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+		xor_val_srcs[i] = xor_srcs[i];
+	xor_val_srcs[i] = dest;
+
+	xor_val_result = 1;
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+		dma_srcs[i] = DMA_ERROR_CODE;
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
+		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_srcs[i]))
+			goto dma_unmap;
+	}
+	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
+					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
+					  &xor_val_result, DMA_PREP_INTERRUPT);
+	if (!tx) {
+		dev_err(dev, "Self-test zero prep failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	async_tx_ack(tx);
+	init_completion(&cmp);
+	tx->callback = ioat_dma_test_callback;
+	tx->callback_param = &cmp;
+	cookie = tx->tx_submit(tx);
+	if (cookie < 0) {
+		dev_err(dev, "Self-test zero setup failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+	dma->device_issue_pending(dma_chan);
+
+	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+		dev_err(dev, "Self-test validate timed out\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+	if (xor_val_result != 0) {
+		dev_err(dev, "Self-test validate failed compare\n");
+		err = -ENODEV;
+		goto free_resources;
+	}
+
+	memset(page_address(dest), 0, PAGE_SIZE);
+
+	/* test for non-zero parity sum */
+	op = IOAT_OP_XOR_VAL;
+
+	xor_val_result = 0;
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+		dma_srcs[i] = DMA_ERROR_CODE;
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
+		dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_srcs[i]))
+			goto dma_unmap;
+	}
+	tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
+					  IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
+					  &xor_val_result, DMA_PREP_INTERRUPT);
+	if (!tx) {
+		dev_err(dev, "Self-test 2nd zero prep failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	async_tx_ack(tx);
+	init_completion(&cmp);
+	tx->callback = ioat_dma_test_callback;
+	tx->callback_param = &cmp;
+	cookie = tx->tx_submit(tx);
+	if (cookie < 0) {
+		dev_err(dev, "Self-test  2nd zero setup failed\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+	dma->device_issue_pending(dma_chan);
+
+	tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
+
+	if (tmo == 0 ||
+	    dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
+		dev_err(dev, "Self-test 2nd validate timed out\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	if (xor_val_result != SUM_CHECK_P_RESULT) {
+		dev_err(dev, "Self-test validate failed compare\n");
+		err = -ENODEV;
+		goto dma_unmap;
+	}
+
+	for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+		dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+	goto free_resources;
+dma_unmap:
+	if (op == IOAT_OP_XOR) {
+		if (dest_dma != DMA_ERROR_CODE)
+			dma_unmap_page(dev, dest_dma, PAGE_SIZE,
+				       DMA_FROM_DEVICE);
+		for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
+			if (dma_srcs[i] != DMA_ERROR_CODE)
+				dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+					       DMA_TO_DEVICE);
+	} else if (op == IOAT_OP_XOR_VAL) {
+		for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
+			if (dma_srcs[i] != DMA_ERROR_CODE)
+				dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
+					       DMA_TO_DEVICE);
+	}
+free_resources:
+	dma->device_free_chan_resources(dma_chan);
+out:
+	src_idx = IOAT_NUM_SRC_TEST;
+	while (src_idx--)
+		__free_page(xor_srcs[src_idx]);
+	__free_page(dest);
+	return err;
+}
+
+static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma)
+{
+	int rc;
+
+	rc = ioat_dma_self_test(ioat_dma);
+	if (rc)
+		return rc;
+
+	rc = ioat_xor_val_self_test(ioat_dma);
+
+	return rc;
+}
+
+static void ioat_intr_quirk(struct ioatdma_device *ioat_dma)
+{
+	struct dma_device *dma;
+	struct dma_chan *c;
+	struct ioatdma_chan *ioat_chan;
+	u32 errmask;
+
+	dma = &ioat_dma->dma_dev;
+
+	/*
+	 * if we have descriptor write back error status, we mask the
+	 * error interrupts
+	 */
+	if (ioat_dma->cap & IOAT_CAP_DWBES) {
+		list_for_each_entry(c, &dma->channels, device_node) {
+			ioat_chan = to_ioat_chan(c);
+			errmask = readl(ioat_chan->reg_base +
+					IOAT_CHANERR_MASK_OFFSET);
+			errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
+				   IOAT_CHANERR_XOR_Q_ERR;
+			writel(errmask, ioat_chan->reg_base +
+					IOAT_CHANERR_MASK_OFFSET);
+		}
+	}
+}
+
+static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
+{
+	struct pci_dev *pdev = ioat_dma->pdev;
+	int dca_en = system_has_dca_enabled(pdev);
+	struct dma_device *dma;
+	struct dma_chan *c;
+	struct ioatdma_chan *ioat_chan;
+	bool is_raid_device = false;
+	int err;
+
+	dma = &ioat_dma->dma_dev;
+	dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock;
+	dma->device_issue_pending = ioat_issue_pending;
+	dma->device_alloc_chan_resources = ioat_alloc_chan_resources;
+	dma->device_free_chan_resources = ioat_free_chan_resources;
+
+	dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
+	dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock;
+
+	ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET);
+
+	if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
+		ioat_dma->cap &=
+			~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
+
+	/* dca is incompatible with raid operations */
+	if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
+		ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
+
+	if (ioat_dma->cap & IOAT_CAP_XOR) {
+		is_raid_device = true;
+		dma->max_xor = 8;
+
+		dma_cap_set(DMA_XOR, dma->cap_mask);
+		dma->device_prep_dma_xor = ioat_prep_xor;
+
+		dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+		dma->device_prep_dma_xor_val = ioat_prep_xor_val;
+	}
+
+	if (ioat_dma->cap & IOAT_CAP_PQ) {
+		is_raid_device = true;
+
+		dma->device_prep_dma_pq = ioat_prep_pq;
+		dma->device_prep_dma_pq_val = ioat_prep_pq_val;
+		dma_cap_set(DMA_PQ, dma->cap_mask);
+		dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
+
+		if (ioat_dma->cap & IOAT_CAP_RAID16SS)
+			dma_set_maxpq(dma, 16, 0);
+		else
+			dma_set_maxpq(dma, 8, 0);
+
+		if (!(ioat_dma->cap & IOAT_CAP_XOR)) {
+			dma->device_prep_dma_xor = ioat_prep_pqxor;
+			dma->device_prep_dma_xor_val = ioat_prep_pqxor_val;
+			dma_cap_set(DMA_XOR, dma->cap_mask);
+			dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
+
+			if (ioat_dma->cap & IOAT_CAP_RAID16SS)
+				dma->max_xor = 16;
+			else
+				dma->max_xor = 8;
+		}
+	}
+
+	dma->device_tx_status = ioat_tx_status;
+
+	/* starting with CB3.3 super extended descriptors are supported */
+	if (ioat_dma->cap & IOAT_CAP_RAID16SS) {
+		char pool_name[14];
+		int i;
+
+		for (i = 0; i < MAX_SED_POOLS; i++) {
+			snprintf(pool_name, 14, "ioat_hw%d_sed", i);
+
+			/* allocate SED DMA pool */
+			ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name,
+					&pdev->dev,
+					SED_SIZE * (i + 1), 64, 0);
+			if (!ioat_dma->sed_hw_pool[i])
+				return -ENOMEM;
+
+		}
+	}
+
+	if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ)))
+		dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+
+	err = ioat_probe(ioat_dma);
+	if (err)
+		return err;
+
+	list_for_each_entry(c, &dma->channels, device_node) {
+		ioat_chan = to_ioat_chan(c);
+		writel(IOAT_DMA_DCA_ANY_CPU,
+		       ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+	}
+
+	err = ioat_register(ioat_dma);
+	if (err)
+		return err;
+
+	ioat_kobject_add(ioat_dma, &ioat_ktype);
+
+	if (dca)
+		ioat_dma->dca = ioat_dca_init(pdev, ioat_dma->reg_base);
+
+	return 0;
+}
+
+#define DRV_NAME "ioatdma"
+
+static struct pci_driver ioat_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= ioat_pci_tbl,
+	.probe		= ioat_pci_probe,
+	.remove		= ioat_remove,
+};
+
+static struct ioatdma_device *
+alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
+{
+	struct device *dev = &pdev->dev;
+	struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+
+	if (!d)
+		return NULL;
+	d->pdev = pdev;
+	d->reg_base = iobase;
+	return d;
+}
+
+static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	void __iomem * const *iomap;
+	struct device *dev = &pdev->dev;
+	struct ioatdma_device *device;
+	int err;
+
+	err = pcim_enable_device(pdev);
+	if (err)
+		return err;
+
+	err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
+	if (err)
+		return err;
+	iomap = pcim_iomap_table(pdev);
+	if (!iomap)
+		return -ENOMEM;
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err)
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err)
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (err)
+		return err;
+
+	device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
+	if (!device)
+		return -ENOMEM;
+	pci_set_master(pdev);
+	pci_set_drvdata(pdev, device);
+
+	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+	if (device->version >= IOAT_VER_3_0)
+		err = ioat3_dma_probe(device, ioat_dca_enabled);
+	else
+		return -ENODEV;
+
+	if (err) {
+		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void ioat_remove(struct pci_dev *pdev)
+{
+	struct ioatdma_device *device = pci_get_drvdata(pdev);
+
+	if (!device)
+		return;
+
+	dev_err(&pdev->dev, "Removing dma and dca services\n");
+	if (device->dca) {
+		unregister_dca_provider(device->dca, &pdev->dev);
+		free_dca_provider(device->dca);
+		device->dca = NULL;
+	}
+	ioat_dma_remove(device);
+}
+
+static int __init ioat_init_module(void)
+{
+	int err = -ENOMEM;
+
+	pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
+		DRV_NAME, IOAT_DMA_VERSION);
+
+	ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent),
+					0, SLAB_HWCACHE_ALIGN, NULL);
+	if (!ioat_cache)
+		return -ENOMEM;
+
+	ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
+	if (!ioat_sed_cache)
+		goto err_ioat_cache;
+
+	err = pci_register_driver(&ioat_pci_driver);
+	if (err)
+		goto err_ioat3_cache;
+
+	return 0;
+
+ err_ioat3_cache:
+	kmem_cache_destroy(ioat_sed_cache);
+
+ err_ioat_cache:
+	kmem_cache_destroy(ioat_cache);
+
+	return err;
+}
+module_init(ioat_init_module);
+
+static void __exit ioat_exit_module(void)
+{
+	pci_unregister_driver(&ioat_pci_driver);
+	kmem_cache_destroy(ioat_cache);
+}
+module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
deleted file mode 100644
index 76f0dc6..0000000
--- a/drivers/dma/ioat/pci.c
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Intel I/OAT DMA Linux driver
- * Copyright(c) 2007 - 2009 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- */
-
-/*
- * This driver supports an Intel I/OAT DMA engine, which does asynchronous
- * copy operations.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/dca.h>
-#include <linux/slab.h>
-#include "dma.h"
-#include "dma_v2.h"
-#include "registers.h"
-#include "hw.h"
-
-MODULE_VERSION(IOAT_DMA_VERSION);
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel Corporation");
-
-static struct pci_device_id ioat_pci_tbl[] = {
-	/* I/OAT v1 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB)  },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
-	{ PCI_VDEVICE(UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
-
-	/* I/OAT v2 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
-
-	/* I/OAT v3 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) },
-
-	/* I/OAT v3.2 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
-
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
-
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) },
-
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) },
-
-	/* I/OAT v3.3 platforms */
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) },
-
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) },
-	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) },
-
-	{ 0, }
-};
-MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
-
-static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
-static void ioat_remove(struct pci_dev *pdev);
-
-static int ioat_dca_enabled = 1;
-module_param(ioat_dca_enabled, int, 0644);
-MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)");
-
-struct kmem_cache *ioat2_cache;
-struct kmem_cache *ioat3_sed_cache;
-
-#define DRV_NAME "ioatdma"
-
-static struct pci_driver ioat_pci_driver = {
-	.name		= DRV_NAME,
-	.id_table	= ioat_pci_tbl,
-	.probe		= ioat_pci_probe,
-	.remove		= ioat_remove,
-};
-
-static struct ioatdma_device *
-alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
-{
-	struct device *dev = &pdev->dev;
-	struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
-
-	if (!d)
-		return NULL;
-	d->pdev = pdev;
-	d->reg_base = iobase;
-	return d;
-}
-
-static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
-	void __iomem * const *iomap;
-	struct device *dev = &pdev->dev;
-	struct ioatdma_device *device;
-	int err;
-
-	err = pcim_enable_device(pdev);
-	if (err)
-		return err;
-
-	err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME);
-	if (err)
-		return err;
-	iomap = pcim_iomap_table(pdev);
-	if (!iomap)
-		return -ENOMEM;
-
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err)
-		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (err)
-		return err;
-
-	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err)
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-	if (err)
-		return err;
-
-	device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]);
-	if (!device)
-		return -ENOMEM;
-	pci_set_master(pdev);
-	pci_set_drvdata(pdev, device);
-
-	device->version = readb(device->reg_base + IOAT_VER_OFFSET);
-	if (device->version == IOAT_VER_1_2)
-		err = ioat1_dma_probe(device, ioat_dca_enabled);
-	else if (device->version == IOAT_VER_2_0)
-		err = ioat2_dma_probe(device, ioat_dca_enabled);
-	else if (device->version >= IOAT_VER_3_0)
-		err = ioat3_dma_probe(device, ioat_dca_enabled);
-	else
-		return -ENODEV;
-
-	if (err) {
-		dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-static void ioat_remove(struct pci_dev *pdev)
-{
-	struct ioatdma_device *device = pci_get_drvdata(pdev);
-
-	if (!device)
-		return;
-
-	dev_err(&pdev->dev, "Removing dma and dca services\n");
-	if (device->dca) {
-		unregister_dca_provider(device->dca, &pdev->dev);
-		free_dca_provider(device->dca);
-		device->dca = NULL;
-	}
-	ioat_dma_remove(device);
-}
-
-static int __init ioat_init_module(void)
-{
-	int err = -ENOMEM;
-
-	pr_info("%s: Intel(R) QuickData Technology Driver %s\n",
-		DRV_NAME, IOAT_DMA_VERSION);
-
-	ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent),
-					0, SLAB_HWCACHE_ALIGN, NULL);
-	if (!ioat2_cache)
-		return -ENOMEM;
-
-	ioat3_sed_cache = KMEM_CACHE(ioat_sed_ent, 0);
-	if (!ioat3_sed_cache)
-		goto err_ioat2_cache;
-
-	err = pci_register_driver(&ioat_pci_driver);
-	if (err)
-		goto err_ioat3_cache;
-
-	return 0;
-
- err_ioat3_cache:
-	kmem_cache_destroy(ioat3_sed_cache);
-
- err_ioat2_cache:
-	kmem_cache_destroy(ioat2_cache);
-
-	return err;
-}
-module_init(ioat_init_module);
-
-static void __exit ioat_exit_module(void)
-{
-	pci_unregister_driver(&ioat_pci_driver);
-	kmem_cache_destroy(ioat2_cache);
-}
-module_exit(ioat_exit_module);
diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c
new file mode 100644
index 0000000..ad4fb41
--- /dev/null
+++ b/drivers/dma/ioat/prep.c
@@ -0,0 +1,715 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/gfp.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/prefetch.h>
+#include "../dmaengine.h"
+#include "registers.h"
+#include "hw.h"
+#include "dma.h"
+
+#define MAX_SCF	1024
+
+/* provide a lookup table for setting the source address in the base or
+ * extended descriptor of an xor or pq descriptor
+ */
+static const u8 xor_idx_to_desc = 0xe0;
+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
+static const u8 pq_idx_to_desc = 0xf8;
+static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
+				       2, 2, 2, 2, 2, 2, 2 };
+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
+static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
+					0, 1, 2, 3, 4, 5, 6 };
+
+static void xor_set_src(struct ioat_raw_descriptor *descs[2],
+			dma_addr_t addr, u32 offset, int idx)
+{
+	struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
+
+	raw->field[xor_idx_to_field[idx]] = addr + offset;
+}
+
+static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
+{
+	struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
+
+	return raw->field[pq_idx_to_field[idx]];
+}
+
+static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
+{
+	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+	return raw->field[pq16_idx_to_field[idx]];
+}
+
+static void pq_set_src(struct ioat_raw_descriptor *descs[2],
+		       dma_addr_t addr, u32 offset, u8 coef, int idx)
+{
+	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
+	struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
+
+	raw->field[pq_idx_to_field[idx]] = addr + offset;
+	pq->coef[idx] = coef;
+}
+
+static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
+			dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
+{
+	struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
+	struct ioat_pq16a_descriptor *pq16 =
+		(struct ioat_pq16a_descriptor *)desc[1];
+	struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
+
+	raw->field[pq16_idx_to_field[idx]] = addr + offset;
+
+	if (idx < 8)
+		pq->coef[idx] = coef;
+	else
+		pq16->coef[idx - 8] = coef;
+}
+
+static struct ioat_sed_ent *
+ioat3_alloc_sed(struct ioatdma_device *ioat_dma, unsigned int hw_pool)
+{
+	struct ioat_sed_ent *sed;
+	gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
+
+	sed = kmem_cache_alloc(ioat_sed_cache, flags);
+	if (!sed)
+		return NULL;
+
+	sed->hw_pool = hw_pool;
+	sed->hw = dma_pool_alloc(ioat_dma->sed_hw_pool[hw_pool],
+				 flags, &sed->dma);
+	if (!sed->hw) {
+		kmem_cache_free(ioat_sed_cache, sed);
+		return NULL;
+	}
+
+	return sed;
+}
+
+struct dma_async_tx_descriptor *
+ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+			   dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_dma_descriptor *hw;
+	struct ioat_ring_ent *desc;
+	dma_addr_t dst = dma_dest;
+	dma_addr_t src = dma_src;
+	size_t total_len = len;
+	int num_descs, idx, i;
+
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+	if (likely(num_descs) &&
+	    ioat_check_space_lock(ioat_chan, num_descs) == 0)
+		idx = ioat_chan->head;
+	else
+		return NULL;
+	i = 0;
+	do {
+		size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log);
+
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		hw = desc->hw;
+
+		hw->size = copy;
+		hw->ctl = 0;
+		hw->src_addr = src;
+		hw->dst_addr = dst;
+
+		len -= copy;
+		dst += copy;
+		src += copy;
+		dump_desc_dbg(ioat_chan, desc);
+	} while (++i < num_descs);
+
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+	hw->ctl_f.compl_write = 1;
+	dump_desc_dbg(ioat_chan, desc);
+	/* we leave the channel locked to ensure in order submission */
+
+	return &desc->txd;
+}
+
+
+static struct dma_async_tx_descriptor *
+__ioat_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
+		      dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
+		      size_t len, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_ring_ent *compl_desc;
+	struct ioat_ring_ent *desc;
+	struct ioat_ring_ent *ext;
+	size_t total_len = len;
+	struct ioat_xor_descriptor *xor;
+	struct ioat_xor_ext_descriptor *xor_ex = NULL;
+	struct ioat_dma_descriptor *hw;
+	int num_descs, with_ext, idx, i;
+	u32 offset = 0;
+	u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
+
+	BUG_ON(src_cnt < 2);
+
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+	/* we need 2x the number of descriptors to cover greater than 5
+	 * sources
+	 */
+	if (src_cnt > 5) {
+		with_ext = 1;
+		num_descs *= 2;
+	} else
+		with_ext = 0;
+
+	/* completion writes from the raid engine may pass completion
+	 * writes from the legacy engine, so we need one extra null
+	 * (legacy) descriptor to ensure all completion writes arrive in
+	 * order.
+	 */
+	if (likely(num_descs) &&
+	    ioat_check_space_lock(ioat_chan, num_descs+1) == 0)
+		idx = ioat_chan->head;
+	else
+		return NULL;
+	i = 0;
+	do {
+		struct ioat_raw_descriptor *descs[2];
+		size_t xfer_size = min_t(size_t,
+					 len, 1 << ioat_chan->xfercap_log);
+		int s;
+
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		xor = desc->xor;
+
+		/* save a branch by unconditionally retrieving the
+		 * extended descriptor xor_set_src() knows to not write
+		 * to it in the single descriptor case
+		 */
+		ext = ioat_get_ring_ent(ioat_chan, idx + i + 1);
+		xor_ex = ext->xor_ex;
+
+		descs[0] = (struct ioat_raw_descriptor *) xor;
+		descs[1] = (struct ioat_raw_descriptor *) xor_ex;
+		for (s = 0; s < src_cnt; s++)
+			xor_set_src(descs, src[s], offset, s);
+		xor->size = xfer_size;
+		xor->dst_addr = dest + offset;
+		xor->ctl = 0;
+		xor->ctl_f.op = op;
+		xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
+
+		len -= xfer_size;
+		offset += xfer_size;
+		dump_desc_dbg(ioat_chan, desc);
+	} while ((i += 1 + with_ext) < num_descs);
+
+	/* last xor descriptor carries the unmap parameters and fence bit */
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	if (result)
+		desc->result = result;
+	xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+
+	/* completion descriptor carries interrupt bit */
+	compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
+	compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+	hw = compl_desc->hw;
+	hw->ctl = 0;
+	hw->ctl_f.null = 1;
+	hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+	hw->ctl_f.compl_write = 1;
+	hw->size = NULL_DESC_BUFFER_SIZE;
+	dump_desc_dbg(ioat_chan, compl_desc);
+
+	/* we leave the channel locked to ensure in order submission */
+	return &compl_desc->txd;
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+	       unsigned int src_cnt, size_t len, unsigned long flags)
+{
+	return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
+		    unsigned int src_cnt, size_t len,
+		    enum sum_check_flags *result, unsigned long flags)
+{
+	/* the cleanup routine only sets bits on validate failure, it
+	 * does not clear bits on validate success... so clear it here
+	 */
+	*result = 0;
+
+	return __ioat_prep_xor_lock(chan, result, src[0], &src[1],
+				     src_cnt - 1, len, flags);
+}
+
+static void
+dump_pq_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc,
+		 struct ioat_ring_ent *ext)
+{
+	struct device *dev = to_dev(ioat_chan);
+	struct ioat_pq_descriptor *pq = desc->pq;
+	struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
+	struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
+	int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
+	int i;
+
+	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
+		" sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+		" src_cnt: %d)\n",
+		desc_id(desc), (unsigned long long) desc->txd.phys,
+		(unsigned long long) (pq_ex ? pq_ex->next : pq->next),
+		desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op,
+		pq->ctl_f.int_en, pq->ctl_f.compl_write,
+		pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
+		pq->ctl_f.src_cnt);
+	for (i = 0; i < src_cnt; i++)
+		dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
+			(unsigned long long) pq_get_src(descs, i), pq->coef[i]);
+	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
+	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
+	dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
+}
+
+static void dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan,
+			       struct ioat_ring_ent *desc)
+{
+	struct device *dev = to_dev(ioat_chan);
+	struct ioat_pq_descriptor *pq = desc->pq;
+	struct ioat_raw_descriptor *descs[] = { (void *)pq,
+						(void *)pq,
+						(void *)pq };
+	int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
+	int i;
+
+	if (desc->sed) {
+		descs[1] = (void *)desc->sed->hw;
+		descs[2] = (void *)desc->sed->hw + 64;
+	}
+
+	dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
+		" sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
+		" src_cnt: %d)\n",
+		desc_id(desc), (unsigned long long) desc->txd.phys,
+		(unsigned long long) pq->next,
+		desc->txd.flags, pq->size, pq->ctl,
+		pq->ctl_f.op, pq->ctl_f.int_en,
+		pq->ctl_f.compl_write,
+		pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
+		pq->ctl_f.src_cnt);
+	for (i = 0; i < src_cnt; i++) {
+		dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
+			(unsigned long long) pq16_get_src(descs, i),
+			pq->coef[i]);
+	}
+	dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
+	dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
+}
+
+static struct dma_async_tx_descriptor *
+__ioat_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
+		     const dma_addr_t *dst, const dma_addr_t *src,
+		     unsigned int src_cnt, const unsigned char *scf,
+		     size_t len, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct ioat_ring_ent *compl_desc;
+	struct ioat_ring_ent *desc;
+	struct ioat_ring_ent *ext;
+	size_t total_len = len;
+	struct ioat_pq_descriptor *pq;
+	struct ioat_pq_ext_descriptor *pq_ex = NULL;
+	struct ioat_dma_descriptor *hw;
+	u32 offset = 0;
+	u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
+	int i, s, idx, with_ext, num_descs;
+	int cb32 = (ioat_dma->version < IOAT_VER_3_3) ? 1 : 0;
+
+	dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
+	/* the engine requires at least two sources (we provide
+	 * at least 1 implied source in the DMA_PREP_CONTINUE case)
+	 */
+	BUG_ON(src_cnt + dmaf_continue(flags) < 2);
+
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+	/* we need 2x the number of descriptors to cover greater than 3
+	 * sources (we need 1 extra source in the q-only continuation
+	 * case and 3 extra sources in the p+q continuation case.
+	 */
+	if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
+	    (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
+		with_ext = 1;
+		num_descs *= 2;
+	} else
+		with_ext = 0;
+
+	/* completion writes from the raid engine may pass completion
+	 * writes from the legacy engine, so we need one extra null
+	 * (legacy) descriptor to ensure all completion writes arrive in
+	 * order.
+	 */
+	if (likely(num_descs) &&
+	    ioat_check_space_lock(ioat_chan, num_descs + cb32) == 0)
+		idx = ioat_chan->head;
+	else
+		return NULL;
+	i = 0;
+	do {
+		struct ioat_raw_descriptor *descs[2];
+		size_t xfer_size = min_t(size_t, len,
+					 1 << ioat_chan->xfercap_log);
+
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		pq = desc->pq;
+
+		/* save a branch by unconditionally retrieving the
+		 * extended descriptor pq_set_src() knows to not write
+		 * to it in the single descriptor case
+		 */
+		ext = ioat_get_ring_ent(ioat_chan, idx + i + with_ext);
+		pq_ex = ext->pq_ex;
+
+		descs[0] = (struct ioat_raw_descriptor *) pq;
+		descs[1] = (struct ioat_raw_descriptor *) pq_ex;
+
+		for (s = 0; s < src_cnt; s++)
+			pq_set_src(descs, src[s], offset, scf[s], s);
+
+		/* see the comment for dma_maxpq in include/linux/dmaengine.h */
+		if (dmaf_p_disabled_continue(flags))
+			pq_set_src(descs, dst[1], offset, 1, s++);
+		else if (dmaf_continue(flags)) {
+			pq_set_src(descs, dst[0], offset, 0, s++);
+			pq_set_src(descs, dst[1], offset, 1, s++);
+			pq_set_src(descs, dst[1], offset, 0, s++);
+		}
+		pq->size = xfer_size;
+		pq->p_addr = dst[0] + offset;
+		pq->q_addr = dst[1] + offset;
+		pq->ctl = 0;
+		pq->ctl_f.op = op;
+		/* we turn on descriptor write back error status */
+		if (ioat_dma->cap & IOAT_CAP_DWBES)
+			pq->ctl_f.wb_en = result ? 1 : 0;
+		pq->ctl_f.src_cnt = src_cnt_to_hw(s);
+		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
+		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
+
+		len -= xfer_size;
+		offset += xfer_size;
+	} while ((i += 1 + with_ext) < num_descs);
+
+	/* last pq descriptor carries the unmap parameters and fence bit */
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	if (result)
+		desc->result = result;
+	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+	dump_pq_desc_dbg(ioat_chan, desc, ext);
+
+	if (!cb32) {
+		pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+		pq->ctl_f.compl_write = 1;
+		compl_desc = desc;
+	} else {
+		/* completion descriptor carries interrupt bit */
+		compl_desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
+		hw = compl_desc->hw;
+		hw->ctl = 0;
+		hw->ctl_f.null = 1;
+		hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+		hw->ctl_f.compl_write = 1;
+		hw->size = NULL_DESC_BUFFER_SIZE;
+		dump_desc_dbg(ioat_chan, compl_desc);
+	}
+
+
+	/* we leave the channel locked to ensure in order submission */
+	return &compl_desc->txd;
+}
+
+static struct dma_async_tx_descriptor *
+__ioat_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
+		       const dma_addr_t *dst, const dma_addr_t *src,
+		       unsigned int src_cnt, const unsigned char *scf,
+		       size_t len, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
+	struct ioat_ring_ent *desc;
+	size_t total_len = len;
+	struct ioat_pq_descriptor *pq;
+	u32 offset = 0;
+	u8 op;
+	int i, s, idx, num_descs;
+
+	/* this function is only called with 9-16 sources */
+	op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
+
+	dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
+
+	num_descs = ioat_xferlen_to_descs(ioat_chan, len);
+
+	/*
+	 * 16 source pq is only available on cb3.3 and has no completion
+	 * write hw bug.
+	 */
+	if (num_descs && ioat_check_space_lock(ioat_chan, num_descs) == 0)
+		idx = ioat_chan->head;
+	else
+		return NULL;
+
+	i = 0;
+
+	do {
+		struct ioat_raw_descriptor *descs[4];
+		size_t xfer_size = min_t(size_t, len,
+					 1 << ioat_chan->xfercap_log);
+
+		desc = ioat_get_ring_ent(ioat_chan, idx + i);
+		pq = desc->pq;
+
+		descs[0] = (struct ioat_raw_descriptor *) pq;
+
+		desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3);
+		if (!desc->sed) {
+			dev_err(to_dev(ioat_chan),
+				"%s: no free sed entries\n", __func__);
+			return NULL;
+		}
+
+		pq->sed_addr = desc->sed->dma;
+		desc->sed->parent = desc;
+
+		descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
+		descs[2] = (void *)descs[1] + 64;
+
+		for (s = 0; s < src_cnt; s++)
+			pq16_set_src(descs, src[s], offset, scf[s], s);
+
+		/* see the comment for dma_maxpq in include/linux/dmaengine.h */
+		if (dmaf_p_disabled_continue(flags))
+			pq16_set_src(descs, dst[1], offset, 1, s++);
+		else if (dmaf_continue(flags)) {
+			pq16_set_src(descs, dst[0], offset, 0, s++);
+			pq16_set_src(descs, dst[1], offset, 1, s++);
+			pq16_set_src(descs, dst[1], offset, 0, s++);
+		}
+
+		pq->size = xfer_size;
+		pq->p_addr = dst[0] + offset;
+		pq->q_addr = dst[1] + offset;
+		pq->ctl = 0;
+		pq->ctl_f.op = op;
+		pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
+		/* we turn on descriptor write back error status */
+		if (ioat_dma->cap & IOAT_CAP_DWBES)
+			pq->ctl_f.wb_en = result ? 1 : 0;
+		pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
+		pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
+
+		len -= xfer_size;
+		offset += xfer_size;
+	} while (++i < num_descs);
+
+	/* last pq descriptor carries the unmap parameters and fence bit */
+	desc->txd.flags = flags;
+	desc->len = total_len;
+	if (result)
+		desc->result = result;
+	pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+
+	/* with cb3.3 we should be able to do completion w/o a null desc */
+	pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
+	pq->ctl_f.compl_write = 1;
+
+	dump_pq16_desc_dbg(ioat_chan, desc);
+
+	/* we leave the channel locked to ensure in order submission */
+	return &desc->txd;
+}
+
+static int src_cnt_flags(unsigned int src_cnt, unsigned long flags)
+{
+	if (dmaf_p_disabled_continue(flags))
+		return src_cnt + 1;
+	else if (dmaf_continue(flags))
+		return src_cnt + 3;
+	else
+		return src_cnt;
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+	      unsigned int src_cnt, const unsigned char *scf, size_t len,
+	      unsigned long flags)
+{
+	/* specify valid address for disabled result */
+	if (flags & DMA_PREP_PQ_DISABLE_P)
+		dst[0] = dst[1];
+	if (flags & DMA_PREP_PQ_DISABLE_Q)
+		dst[1] = dst[0];
+
+	/* handle the single source multiply case from the raid6
+	 * recovery path
+	 */
+	if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
+		dma_addr_t single_source[2];
+		unsigned char single_source_coef[2];
+
+		BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
+		single_source[0] = src[0];
+		single_source[1] = src[0];
+		single_source_coef[0] = scf[0];
+		single_source_coef[1] = 0;
+
+		return src_cnt_flags(src_cnt, flags) > 8 ?
+			__ioat_prep_pq16_lock(chan, NULL, dst, single_source,
+					       2, single_source_coef, len,
+					       flags) :
+			__ioat_prep_pq_lock(chan, NULL, dst, single_source, 2,
+					     single_source_coef, len, flags);
+
+	} else {
+		return src_cnt_flags(src_cnt, flags) > 8 ?
+			__ioat_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
+					       scf, len, flags) :
+			__ioat_prep_pq_lock(chan, NULL, dst, src, src_cnt,
+					     scf, len, flags);
+	}
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
+		  unsigned int src_cnt, const unsigned char *scf, size_t len,
+		  enum sum_check_flags *pqres, unsigned long flags)
+{
+	/* specify valid address for disabled result */
+	if (flags & DMA_PREP_PQ_DISABLE_P)
+		pq[0] = pq[1];
+	if (flags & DMA_PREP_PQ_DISABLE_Q)
+		pq[1] = pq[0];
+
+	/* the cleanup routine only sets bits on validate failure, it
+	 * does not clear bits on validate success... so clear it here
+	 */
+	*pqres = 0;
+
+	return src_cnt_flags(src_cnt, flags) > 8 ?
+		__ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
+				       flags) :
+		__ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
+				     flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
+		 unsigned int src_cnt, size_t len, unsigned long flags)
+{
+	unsigned char scf[MAX_SCF];
+	dma_addr_t pq[2];
+
+	if (src_cnt > MAX_SCF)
+		return NULL;
+
+	memset(scf, 0, src_cnt);
+	pq[0] = dst;
+	flags |= DMA_PREP_PQ_DISABLE_Q;
+	pq[1] = dst; /* specify valid address for disabled result */
+
+	return src_cnt_flags(src_cnt, flags) > 8 ?
+		__ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
+				       flags) :
+		__ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
+				     flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
+		     unsigned int src_cnt, size_t len,
+		     enum sum_check_flags *result, unsigned long flags)
+{
+	unsigned char scf[MAX_SCF];
+	dma_addr_t pq[2];
+
+	if (src_cnt > MAX_SCF)
+		return NULL;
+
+	/* the cleanup routine only sets bits on validate failure, it
+	 * does not clear bits on validate success... so clear it here
+	 */
+	*result = 0;
+
+	memset(scf, 0, src_cnt);
+	pq[0] = src[0];
+	flags |= DMA_PREP_PQ_DISABLE_Q;
+	pq[1] = pq[0]; /* specify valid address for disabled result */
+
+	return src_cnt_flags(src_cnt, flags) > 8 ?
+		__ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
+				       scf, len, flags) :
+		__ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
+				     scf, len, flags);
+}
+
+struct dma_async_tx_descriptor *
+ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+	struct ioat_ring_ent *desc;
+	struct ioat_dma_descriptor *hw;
+
+	if (ioat_check_space_lock(ioat_chan, 1) == 0)
+		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
+	else
+		return NULL;
+
+	hw = desc->hw;
+	hw->ctl = 0;
+	hw->ctl_f.null = 1;
+	hw->ctl_f.int_en = 1;
+	hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
+	hw->ctl_f.compl_write = 1;
+	hw->size = NULL_DESC_BUFFER_SIZE;
+	hw->src_addr = 0;
+	hw->dst_addr = 0;
+
+	desc->txd.flags = flags;
+	desc->len = 1;
+
+	dump_desc_dbg(ioat_chan, desc);
+
+	/* we leave the channel locked to ensure in order submission */
+	return &desc->txd;
+}
+
diff --git a/drivers/dma/ioat/sysfs.c b/drivers/dma/ioat/sysfs.c
new file mode 100644
index 0000000..cb4a857
--- /dev/null
+++ b/drivers/dma/ioat/sysfs.c
@@ -0,0 +1,135 @@
+/*
+ * Intel I/OAT DMA Linux driver
+ * Copyright(c) 2004 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/pci.h>
+#include "dma.h"
+#include "registers.h"
+#include "hw.h"
+
+#include "../dmaengine.h"
+
+static ssize_t cap_show(struct dma_chan *c, char *page)
+{
+	struct dma_device *dma = c->device;
+
+	return sprintf(page, "copy%s%s%s%s%s\n",
+		       dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
+		       dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
+		       dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
+		       dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
+		       dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
+
+}
+struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
+
+static ssize_t version_show(struct dma_chan *c, char *page)
+{
+	struct dma_device *dma = c->device;
+	struct ioatdma_device *ioat_dma = to_ioatdma_device(dma);
+
+	return sprintf(page, "%d.%d\n",
+		       ioat_dma->version >> 4, ioat_dma->version & 0xf);
+}
+struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
+
+static ssize_t
+ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+	struct ioat_sysfs_entry *entry;
+	struct ioatdma_chan *ioat_chan;
+
+	entry = container_of(attr, struct ioat_sysfs_entry, attr);
+	ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
+
+	if (!entry->show)
+		return -EIO;
+	return entry->show(&ioat_chan->dma_chan, page);
+}
+
+const struct sysfs_ops ioat_sysfs_ops = {
+	.show	= ioat_attr_show,
+};
+
+void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type)
+{
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct dma_chan *c;
+
+	list_for_each_entry(c, &dma->channels, device_node) {
+		struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+		struct kobject *parent = &c->dev->device.kobj;
+		int err;
+
+		err = kobject_init_and_add(&ioat_chan->kobj, type,
+					   parent, "quickdata");
+		if (err) {
+			dev_warn(to_dev(ioat_chan),
+				 "sysfs init error (%d), continuing...\n", err);
+			kobject_put(&ioat_chan->kobj);
+			set_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state);
+		}
+	}
+}
+
+void ioat_kobject_del(struct ioatdma_device *ioat_dma)
+{
+	struct dma_device *dma = &ioat_dma->dma_dev;
+	struct dma_chan *c;
+
+	list_for_each_entry(c, &dma->channels, device_node) {
+		struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+		if (!test_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state)) {
+			kobject_del(&ioat_chan->kobj);
+			kobject_put(&ioat_chan->kobj);
+		}
+	}
+}
+
+static ssize_t ring_size_show(struct dma_chan *c, char *page)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+	return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1);
+}
+static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
+
+static ssize_t ring_active_show(struct dma_chan *c, char *page)
+{
+	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
+
+	/* ...taken outside the lock, no need to be precise */
+	return sprintf(page, "%d\n", ioat_ring_active(ioat_chan));
+}
+static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
+
+static struct attribute *ioat_attrs[] = {
+	&ring_size_attr.attr,
+	&ring_active_attr.attr,
+	&ioat_cap_attr.attr,
+	&ioat_version_attr.attr,
+	NULL,
+};
+
+struct kobj_type ioat_ktype = {
+	.sysfs_ops = &ioat_sysfs_ops,
+	.default_attrs = ioat_attrs,
+};
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 9988268..e4f4312 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1300,10 +1300,11 @@
 	 * note: writecombine gives slightly better performance, but
 	 * requires that we explicitly flush the writes
 	 */
-	if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
-					plat_data->pool_size,
-					&adev->dma_desc_pool,
-					GFP_KERNEL)) == NULL) {
+	adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
+							  plat_data->pool_size,
+							  &adev->dma_desc_pool,
+							  GFP_KERNEL);
+	if (!adev->dma_desc_pool_virt) {
 		ret = -ENOMEM;
 		goto err_free_adev;
 	}
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 2e284a4..4768a82 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -265,10 +265,10 @@
 	return ret;
 }
 
-/* Chained IRQ handler for IPU error interrupt */
-static void ipu_irq_err(unsigned int irq, struct irq_desc *desc)
+/* Chained IRQ handler for IPU function and error interrupt */
+static void ipu_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
-	struct ipu *ipu = irq_get_handler_data(irq);
+	struct ipu *ipu = irq_desc_get_handler_data(desc);
 	u32 status;
 	int i, line;
 
@@ -286,43 +286,7 @@
 		raw_spin_unlock(&bank_lock);
 		while ((line = ffs(status))) {
 			struct ipu_irq_map *map;
-
-			line--;
-			status &= ~(1UL << line);
-
-			raw_spin_lock(&bank_lock);
-			map = src2map(32 * i + line);
-			if (map)
-				irq = map->irq;
-			raw_spin_unlock(&bank_lock);
-
-			if (!map) {
-				pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
-				       line, i);
-				continue;
-			}
-			generic_handle_irq(irq);
-		}
-	}
-}
-
-/* Chained IRQ handler for IPU function interrupt */
-static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc)
-{
-	struct ipu *ipu = irq_desc_get_handler_data(desc);
-	u32 status;
-	int i, line;
-
-	for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) {
-		struct ipu_irq_bank *bank = irq_bank + i;
-
-		raw_spin_lock(&bank_lock);
-		status = ipu_read_reg(ipu, bank->status);
-		/* Not clearing all interrupts, see above */
-		status &= ipu_read_reg(ipu, bank->control);
-		raw_spin_unlock(&bank_lock);
-		while ((line = ffs(status))) {
-			struct ipu_irq_map *map;
+			unsigned int irq = NO_IRQ;
 
 			line--;
 			status &= ~(1UL << line);
@@ -377,16 +341,12 @@
 		irq_map[i].irq = irq;
 		irq_map[i].source = -EINVAL;
 		irq_set_handler(irq, handle_level_irq);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-#endif
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
-	irq_set_handler_data(ipu->irq_fn, ipu);
-	irq_set_chained_handler(ipu->irq_fn, ipu_irq_fn);
+	irq_set_chained_handler_and_data(ipu->irq_fn, ipu_irq_handler, ipu);
 
-	irq_set_handler_data(ipu->irq_err, ipu);
-	irq_set_chained_handler(ipu->irq_err, ipu_irq_err);
+	irq_set_chained_handler_and_data(ipu->irq_err, ipu_irq_handler, ipu);
 
 	ipu->irq_base = irq_base;
 
@@ -399,16 +359,12 @@
 
 	irq_base = ipu->irq_base;
 
-	irq_set_chained_handler(ipu->irq_fn, NULL);
-	irq_set_handler_data(ipu->irq_fn, NULL);
+	irq_set_chained_handler_and_data(ipu->irq_fn, NULL, NULL);
 
-	irq_set_chained_handler(ipu->irq_err, NULL);
-	irq_set_handler_data(ipu->irq_err, NULL);
+	irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
 
 	for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) {
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, 0);
-#endif
+		irq_set_status_flags(irq, IRQ_NOREQUEST);
 		irq_set_chip(irq, NULL);
 		irq_set_chip_data(irq, NULL);
 	}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 647e362..1ba2fd7 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -24,7 +24,6 @@
 #include "virt-dma.h"
 
 #define DRIVER_NAME		"k3-dma"
-#define DMA_ALIGN		3
 #define DMA_MAX_SIZE		0x1ffc
 
 #define INT_STAT		0x00
@@ -732,7 +731,7 @@
 	d->slave.device_pause = k3_dma_transfer_pause;
 	d->slave.device_resume = k3_dma_transfer_resume;
 	d->slave.device_terminate_all = k3_dma_terminate_all;
-	d->slave.copy_align = DMA_ALIGN;
+	d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
 
 	/* init virtual channel */
 	d->chans = devm_kzalloc(&op->dev,
diff --git a/drivers/dma/lpc18xx-dmamux.c b/drivers/dma/lpc18xx-dmamux.c
new file mode 100644
index 0000000..761f326
--- /dev/null
+++ b/drivers/dma/lpc18xx-dmamux.c
@@ -0,0 +1,183 @@
+/*
+ * DMA Router driver for LPC18xx/43xx DMA MUX
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on TI DMA Crossbar driver by:
+ *   Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ *   Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+/* CREG register offset and macros for mux manipulation */
+#define LPC18XX_CREG_DMAMUX		0x11c
+#define LPC18XX_DMAMUX_VAL(v, n)	((v) << (n * 2))
+#define LPC18XX_DMAMUX_MASK(n)		(0x3 << (n * 2))
+#define LPC18XX_DMAMUX_MAX_VAL		0x3
+
+struct lpc18xx_dmamux {
+	u32 value;
+	bool busy;
+};
+
+struct lpc18xx_dmamux_data {
+	struct dma_router dmarouter;
+	struct lpc18xx_dmamux *muxes;
+	u32 dma_master_requests;
+	u32 dma_mux_requests;
+	struct regmap *reg;
+	spinlock_t lock;
+};
+
+static void lpc18xx_dmamux_free(struct device *dev, void *route_data)
+{
+	struct lpc18xx_dmamux_data *dmamux = dev_get_drvdata(dev);
+	struct lpc18xx_dmamux *mux = route_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dmamux->lock, flags);
+	mux->busy = false;
+	spin_unlock_irqrestore(&dmamux->lock, flags);
+}
+
+static void *lpc18xx_dmamux_reserve(struct of_phandle_args *dma_spec,
+				    struct of_dma *ofdma)
+{
+	struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
+	struct lpc18xx_dmamux_data *dmamux = platform_get_drvdata(pdev);
+	unsigned long flags;
+	unsigned mux;
+
+	if (dma_spec->args_count != 3) {
+		dev_err(&pdev->dev, "invalid number of dma mux args\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	mux = dma_spec->args[0];
+	if (mux >= dmamux->dma_master_requests) {
+		dev_err(&pdev->dev, "invalid mux number: %d\n",
+			dma_spec->args[0]);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (dma_spec->args[1] > LPC18XX_DMAMUX_MAX_VAL) {
+		dev_err(&pdev->dev, "invalid dma mux value: %d\n",
+			dma_spec->args[1]);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* The of_node_put() will be done in the core for the node */
+	dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
+	if (!dma_spec->np) {
+		dev_err(&pdev->dev, "can't get dma master\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	spin_lock_irqsave(&dmamux->lock, flags);
+	if (dmamux->muxes[mux].busy) {
+		spin_unlock_irqrestore(&dmamux->lock, flags);
+		dev_err(&pdev->dev, "dma request %u busy with %u.%u\n",
+			mux, mux, dmamux->muxes[mux].value);
+		of_node_put(dma_spec->np);
+		return ERR_PTR(-EBUSY);
+	}
+
+	dmamux->muxes[mux].busy = true;
+	dmamux->muxes[mux].value = dma_spec->args[1];
+
+	regmap_update_bits(dmamux->reg, LPC18XX_CREG_DMAMUX,
+			   LPC18XX_DMAMUX_MASK(mux),
+			   LPC18XX_DMAMUX_VAL(dmamux->muxes[mux].value, mux));
+	spin_unlock_irqrestore(&dmamux->lock, flags);
+
+	dma_spec->args[1] = dma_spec->args[2];
+	dma_spec->args_count = 2;
+
+	dev_dbg(&pdev->dev, "mapping dmamux %u.%u to dma request %u\n", mux,
+		dmamux->muxes[mux].value, mux);
+
+	return &dmamux->muxes[mux];
+}
+
+static int lpc18xx_dmamux_probe(struct platform_device *pdev)
+{
+	struct device_node *dma_np, *np = pdev->dev.of_node;
+	struct lpc18xx_dmamux_data *dmamux;
+	int ret;
+
+	dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL);
+	if (!dmamux)
+		return -ENOMEM;
+
+	dmamux->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+	if (IS_ERR(dmamux->reg)) {
+		dev_err(&pdev->dev, "syscon lookup failed\n");
+		return PTR_ERR(dmamux->reg);
+	}
+
+	ret = of_property_read_u32(np, "dma-requests",
+				   &dmamux->dma_mux_requests);
+	if (ret) {
+		dev_err(&pdev->dev, "missing dma-requests property\n");
+		return ret;
+	}
+
+	dma_np = of_parse_phandle(np, "dma-masters", 0);
+	if (!dma_np) {
+		dev_err(&pdev->dev, "can't get dma master\n");
+		return -ENODEV;
+	}
+
+	ret = of_property_read_u32(dma_np, "dma-requests",
+				   &dmamux->dma_master_requests);
+	of_node_put(dma_np);
+	if (ret) {
+		dev_err(&pdev->dev, "missing master dma-requests property\n");
+		return ret;
+	}
+
+	dmamux->muxes = devm_kcalloc(&pdev->dev, dmamux->dma_master_requests,
+				     sizeof(struct lpc18xx_dmamux),
+				     GFP_KERNEL);
+	if (!dmamux->muxes)
+		return -ENOMEM;
+
+	spin_lock_init(&dmamux->lock);
+	platform_set_drvdata(pdev, dmamux);
+	dmamux->dmarouter.dev = &pdev->dev;
+	dmamux->dmarouter.route_free = lpc18xx_dmamux_free;
+
+	return of_dma_router_register(np, lpc18xx_dmamux_reserve,
+				      &dmamux->dmarouter);
+}
+
+static const struct of_device_id lpc18xx_dmamux_match[] = {
+	{ .compatible = "nxp,lpc1850-dmamux" },
+	{},
+};
+
+static struct platform_driver lpc18xx_dmamux_driver = {
+	.probe	= lpc18xx_dmamux_probe,
+	.driver = {
+		.name = "lpc18xx-dmamux",
+		.of_match_table = lpc18xx_dmamux_match,
+	},
+};
+
+static int __init lpc18xx_dmamux_init(void)
+{
+	return platform_driver_register(&lpc18xx_dmamux_driver);
+}
+arch_initcall(lpc18xx_dmamux_init);
diff --git a/drivers/dma/mic_x100_dma.h b/drivers/dma/mic_x100_dma.h
index f663b0b..d899820 100644
--- a/drivers/dma/mic_x100_dma.h
+++ b/drivers/dma/mic_x100_dma.h
@@ -39,7 +39,7 @@
  */
 #define MIC_DMA_MAX_NUM_CHAN	8
 #define MIC_DMA_NUM_CHAN	4
-#define MIC_DMA_ALIGN_SHIFT	6
+#define MIC_DMA_ALIGN_SHIFT	DMAENGINE_ALIGN_64_BYTES
 #define MIC_DMA_ALIGN_BYTES	(1 << MIC_DMA_ALIGN_SHIFT)
 #define MIC_DMA_DESC_RX_SIZE	(128 * 1024 - 4)
 
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index 462a022..e39457f 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -72,7 +72,6 @@
 #define DCMD_WIDTH4	(3 << 14)	/* 4 byte width (Word) */
 #define DCMD_LENGTH	0x01fff		/* length mask (max = 8K - 1) */
 
-#define PDMA_ALIGNMENT		3
 #define PDMA_MAX_DESC_BYTES	DCMD_LENGTH
 
 struct mmp_pdma_desc_hw {
@@ -1071,7 +1070,7 @@
 	pdev->device.device_issue_pending = mmp_pdma_issue_pending;
 	pdev->device.device_config = mmp_pdma_config;
 	pdev->device.device_terminate_all = mmp_pdma_terminate_all;
-	pdev->device.copy_align = PDMA_ALIGNMENT;
+	pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
 	pdev->device.src_addr_widths = widths;
 	pdev->device.dst_addr_widths = widths;
 	pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index e683761..3df0422 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -100,7 +100,6 @@
 	PXA910_SQU,
 };
 
-#define TDMA_ALIGNMENT		3
 #define TDMA_MAX_XFER_BYTES    SZ_64K
 
 struct mmp_tdma_chan {
@@ -695,7 +694,7 @@
 	tdev->device.device_pause = mmp_tdma_pause_chan;
 	tdev->device.device_resume = mmp_tdma_resume_chan;
 	tdev->device.device_terminate_all = mmp_tdma_terminate_all;
-	tdev->device.copy_align = TDMA_ALIGNMENT;
+	tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
 
 	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
 	platform_set_drvdata(pdev, tdev);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index f1325f6..1c2de9a 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -13,7 +13,6 @@
  */
 
 #include <linux/init.h>
-#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
@@ -26,6 +25,7 @@
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/irqdomain.h>
+#include <linux/cpumask.h>
 #include <linux/platform_data/dma-mv_xor.h>
 
 #include "dmaengine.h"
@@ -1126,7 +1126,8 @@
 	{ .compatible = "marvell,armada-380-xor", .data = (void *)XOR_MODE_IN_DESC },
 	{},
 };
-MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
+
+static unsigned int mv_xor_engine_count;
 
 static int mv_xor_probe(struct platform_device *pdev)
 {
@@ -1134,6 +1135,7 @@
 	struct mv_xor_device *xordev;
 	struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct resource *res;
+	unsigned int max_engines, max_channels;
 	int i, ret;
 	int op_in_desc;
 
@@ -1177,6 +1179,21 @@
 	if (!IS_ERR(xordev->clk))
 		clk_prepare_enable(xordev->clk);
 
+	/*
+	 * We don't want to have more than one channel per CPU in
+	 * order for async_tx to perform well. So we limit the number
+	 * of engines and channels so that we take into account this
+	 * constraint. Note that we also want to use channels from
+	 * separate engines when possible.
+	 */
+	max_engines = num_present_cpus();
+	max_channels = min_t(unsigned int,
+			     MV_XOR_MAX_CHANNELS,
+			     DIV_ROUND_UP(num_present_cpus(), 2));
+
+	if (mv_xor_engine_count >= max_engines)
+		return 0;
+
 	if (pdev->dev.of_node) {
 		struct device_node *np;
 		int i = 0;
@@ -1190,13 +1207,13 @@
 			int irq;
 			op_in_desc = (int)of_id->data;
 
+			if (i >= max_channels)
+				continue;
+
 			dma_cap_zero(cap_mask);
-			if (of_property_read_bool(np, "dmacap,memcpy"))
-				dma_cap_set(DMA_MEMCPY, cap_mask);
-			if (of_property_read_bool(np, "dmacap,xor"))
-				dma_cap_set(DMA_XOR, cap_mask);
-			if (of_property_read_bool(np, "dmacap,interrupt"))
-				dma_cap_set(DMA_INTERRUPT, cap_mask);
+			dma_cap_set(DMA_MEMCPY, cap_mask);
+			dma_cap_set(DMA_XOR, cap_mask);
+			dma_cap_set(DMA_INTERRUPT, cap_mask);
 
 			irq = irq_of_parse_and_map(np, 0);
 			if (!irq) {
@@ -1216,7 +1233,7 @@
 			i++;
 		}
 	} else if (pdata && pdata->channels) {
-		for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+		for (i = 0; i < max_channels; i++) {
 			struct mv_xor_channel_data *cd;
 			struct mv_xor_chan *chan;
 			int irq;
@@ -1263,27 +1280,8 @@
 	return ret;
 }
 
-static int mv_xor_remove(struct platform_device *pdev)
-{
-	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
-	int i;
-
-	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
-		if (xordev->channels[i])
-			mv_xor_channel_remove(xordev->channels[i]);
-	}
-
-	if (!IS_ERR(xordev->clk)) {
-		clk_disable_unprepare(xordev->clk);
-		clk_put(xordev->clk);
-	}
-
-	return 0;
-}
-
 static struct platform_driver mv_xor_driver = {
 	.probe		= mv_xor_probe,
-	.remove		= mv_xor_remove,
 	.driver		= {
 		.name	        = MV_XOR_NAME,
 		.of_match_table = of_match_ptr(mv_xor_dt_ids),
@@ -1295,19 +1293,10 @@
 {
 	return platform_driver_register(&mv_xor_driver);
 }
-module_init(mv_xor_init);
+device_initcall(mv_xor_init);
 
-/* it's currently unsafe to unload this module */
-#if 0
-static void __exit mv_xor_exit(void)
-{
-	platform_driver_unregister(&mv_xor_driver);
-	return;
-}
-
-module_exit(mv_xor_exit);
-#endif
-
+/*
 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
 MODULE_LICENSE("GPL");
+*/
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index b859792d..113605f 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -11,10 +11,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/dmaengine.h>
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index ecab4ea0..17ee758 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1198,6 +1198,9 @@
 	unsigned lcnt0, lcnt1, ljmp0, ljmp1;
 	struct _arg_LPEND lpend;
 
+	if (*bursts == 1)
+		return _bursts(dry_run, buf, pxs, 1);
+
 	/* Max iterations possible in DMALP is 256 */
 	if (*bursts >= 256*256) {
 		lcnt1 = 256;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index ddcbbf5..5cb61ce 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -184,19 +184,18 @@
 
 static int dbg_show_requester_chan(struct seq_file *s, void *p)
 {
-	int pos = 0;
 	struct pxad_phy *phy = s->private;
 	int i;
 	u32 drcmr;
 
-	pos += seq_printf(s, "DMA channel %d requester :\n", phy->idx);
+	seq_printf(s, "DMA channel %d requester :\n", phy->idx);
 	for (i = 0; i < 70; i++) {
 		drcmr = readl_relaxed(phy->base + pxad_drcmr(i));
 		if ((drcmr & DRCMR_CHLNUM) == phy->idx)
-			pos += seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
-					  !!(drcmr & DRCMR_MAPVLD));
+			seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
+				   !!(drcmr & DRCMR_MAPVLD));
 	}
-	return pos;
+	return 0;
 }
 
 static inline int dbg_burst_from_dcmd(u32 dcmd)
@@ -906,21 +905,21 @@
 	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 
 	*dcmd = 0;
-	if (chan->cfg.direction == DMA_DEV_TO_MEM) {
+	if (dir == DMA_DEV_TO_MEM) {
 		maxburst = chan->cfg.src_maxburst;
 		width = chan->cfg.src_addr_width;
 		dev_addr = chan->cfg.src_addr;
 		*dev_src = dev_addr;
 		*dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC;
 	}
-	if (chan->cfg.direction == DMA_MEM_TO_DEV) {
+	if (dir == DMA_MEM_TO_DEV) {
 		maxburst = chan->cfg.dst_maxburst;
 		width = chan->cfg.dst_addr_width;
 		dev_addr = chan->cfg.dst_addr;
 		*dev_dst = dev_addr;
 		*dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG;
 	}
-	if (chan->cfg.direction == DMA_MEM_TO_MEM)
+	if (dir == DMA_MEM_TO_MEM)
 		*dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
 			PXA_DCMD_INCSRCADDR;
 
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 0f37152..9fda65a 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -39,18 +39,6 @@
 
 endif
 
-config SUDMAC
-	tristate "Renesas SUDMAC support"
-	depends on SH_DMAE_BASE
-	help
-	  Enable support for the Renesas SUDMAC controllers.
-
-config RCAR_HPB_DMAE
-	tristate "Renesas R-Car HPB DMAC support"
-	depends on SH_DMAE_BASE
-	help
-	  Enable support for the Renesas R-Car series DMA controllers.
-
 config RCAR_DMAC
 	tristate "Renesas R-Car Gen2 DMA Controller"
 	depends on ARCH_SHMOBILE || COMPILE_TEST
@@ -59,6 +47,12 @@
 	  This driver supports the general purpose DMA controller found in the
 	  Renesas R-Car second generation SoCs.
 
+config RCAR_HPB_DMAE
+	tristate "Renesas R-Car HPB DMAC support"
+	depends on SH_DMAE_BASE
+	help
+	  Enable support for the Renesas R-Car series DMA controllers.
+
 config RENESAS_USB_DMAC
 	tristate "Renesas USB-DMA Controller"
 	depends on ARCH_SHMOBILE || COMPILE_TEST
@@ -67,3 +61,9 @@
 	help
 	  This driver supports the USB-DMA controller found in the Renesas
 	  SoCs.
+
+config SUDMAC
+	tristate "Renesas SUDMAC support"
+	depends on SH_DMAE_BASE
+	help
+	  Enable support for the Renesas SUDMAC controllers.
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index b8a59806..0133e46 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -13,7 +13,7 @@
 shdma-objs := $(shdma-y)
 obj-$(CONFIG_SH_DMAE) += shdma.o
 
-obj-$(CONFIG_SUDMAC) += sudmac.o
-obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
 obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
+obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
 obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
+obj-$(CONFIG_SUDMAC) += sudmac.o
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 8c5186c..7d5598d 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -455,6 +455,7 @@
 	switch (sdma->type) {
 	case SIRFSOC_DMA_VER_A7V1:
 		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
+		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT);
 		writel_relaxed((1 << cid) | 1 << (cid + 16),
 			       sdma->base +
 			       SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
@@ -462,6 +463,8 @@
 		break;
 	case SIRFSOC_DMA_VER_A7V2:
 		writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7);
+		writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7,
+			       sdma->base + SIRFSOC_DMA_INT_ATLAS7);
 		writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
 		writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7);
 		break;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 3c10f03..750d1b3 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2853,7 +2853,7 @@
 		 * This controller can only access address at even
 		 * 32bit boundaries, i.e. 2^2
 		 */
-		dev->copy_align = 2;
+		dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
 	}
 
 	if (dma_has_cap(DMA_SG, dev->cap_mask))
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
new file mode 100644
index 0000000..a1a500d
--- /dev/null
+++ b/drivers/dma/sun4i-dma.c
@@ -0,0 +1,1288 @@
+/*
+ * Copyright (C) 2014 Emilio López
+ * Emilio López <emilio@elopez.com.ar>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "virt-dma.h"
+
+/** Common macros to normal and dedicated DMA registers **/
+
+#define SUN4I_DMA_CFG_LOADING			BIT(31)
+#define SUN4I_DMA_CFG_DST_DATA_WIDTH(width)	((width) << 25)
+#define SUN4I_DMA_CFG_DST_BURST_LENGTH(len)	((len) << 23)
+#define SUN4I_DMA_CFG_DST_ADDR_MODE(mode)	((mode) << 21)
+#define SUN4I_DMA_CFG_DST_DRQ_TYPE(type)	((type) << 16)
+#define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width)	((width) << 9)
+#define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len)	((len) << 7)
+#define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode)	((mode) << 5)
+#define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type)	(type)
+
+/** Normal DMA register values **/
+
+/* Normal DMA source/destination data request type values */
+#define SUN4I_NDMA_DRQ_TYPE_SDRAM		0x16
+#define SUN4I_NDMA_DRQ_TYPE_LIMIT		(0x1F + 1)
+
+/** Normal DMA register layout **/
+
+/* Dedicated DMA source/destination address mode values */
+#define SUN4I_NDMA_ADDR_MODE_LINEAR		0
+#define SUN4I_NDMA_ADDR_MODE_IO			1
+
+/* Normal DMA configuration register layout */
+#define SUN4I_NDMA_CFG_CONT_MODE		BIT(30)
+#define SUN4I_NDMA_CFG_WAIT_STATE(n)		((n) << 27)
+#define SUN4I_NDMA_CFG_DST_NON_SECURE		BIT(22)
+#define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN	BIT(15)
+#define SUN4I_NDMA_CFG_SRC_NON_SECURE		BIT(6)
+
+/** Dedicated DMA register values **/
+
+/* Dedicated DMA source/destination address mode values */
+#define SUN4I_DDMA_ADDR_MODE_LINEAR		0
+#define SUN4I_DDMA_ADDR_MODE_IO			1
+#define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE	2
+#define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE	3
+
+/* Dedicated DMA source/destination data request type values */
+#define SUN4I_DDMA_DRQ_TYPE_SDRAM		0x1
+#define SUN4I_DDMA_DRQ_TYPE_LIMIT		(0x1F + 1)
+
+/** Dedicated DMA register layout **/
+
+/* Dedicated DMA configuration register layout */
+#define SUN4I_DDMA_CFG_BUSY			BIT(30)
+#define SUN4I_DDMA_CFG_CONT_MODE		BIT(29)
+#define SUN4I_DDMA_CFG_DST_NON_SECURE		BIT(28)
+#define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN	BIT(15)
+#define SUN4I_DDMA_CFG_SRC_NON_SECURE		BIT(12)
+
+/* Dedicated DMA parameter register layout */
+#define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n)	(((n) - 1) << 24)
+#define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n)	(((n) - 1) << 16)
+#define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n)	(((n) - 1) << 8)
+#define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n)	(((n) - 1) << 0)
+
+/** DMA register offsets **/
+
+/* General register offsets */
+#define SUN4I_DMA_IRQ_ENABLE_REG		0x0
+#define SUN4I_DMA_IRQ_PENDING_STATUS_REG	0x4
+
+/* Normal DMA register offsets */
+#define SUN4I_NDMA_CHANNEL_REG_BASE(n)		(0x100 + (n) * 0x20)
+#define SUN4I_NDMA_CFG_REG			0x0
+#define SUN4I_NDMA_SRC_ADDR_REG			0x4
+#define SUN4I_NDMA_DST_ADDR_REG		0x8
+#define SUN4I_NDMA_BYTE_COUNT_REG		0xC
+
+/* Dedicated DMA register offsets */
+#define SUN4I_DDMA_CHANNEL_REG_BASE(n)		(0x300 + (n) * 0x20)
+#define SUN4I_DDMA_CFG_REG			0x0
+#define SUN4I_DDMA_SRC_ADDR_REG			0x4
+#define SUN4I_DDMA_DST_ADDR_REG		0x8
+#define SUN4I_DDMA_BYTE_COUNT_REG		0xC
+#define SUN4I_DDMA_PARA_REG			0x18
+
+/** DMA Driver **/
+
+/*
+ * Normal DMA has 8 channels, and Dedicated DMA has another 8, so
+ * that's 16 channels. As for endpoints, there's 29 and 21
+ * respectively. Given that the Normal DMA endpoints (other than
+ * SDRAM) can be used as tx/rx, we need 78 vchans in total
+ */
+#define SUN4I_NDMA_NR_MAX_CHANNELS	8
+#define SUN4I_DDMA_NR_MAX_CHANNELS	8
+#define SUN4I_DMA_NR_MAX_CHANNELS					\
+	(SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS)
+#define SUN4I_NDMA_NR_MAX_VCHANS	(29 * 2 - 1)
+#define SUN4I_DDMA_NR_MAX_VCHANS	21
+#define SUN4I_DMA_NR_MAX_VCHANS						\
+	(SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
+
+/* This set of SUN4I_DDMA timing parameters were found experimentally while
+ * working with the SPI driver and seem to make it behave correctly */
+#define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
+	(SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) |			\
+	 SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) |				\
+	 SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) |				\
+	 SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
+
+struct sun4i_dma_pchan {
+	/* Register base of channel */
+	void __iomem			*base;
+	/* vchan currently being serviced */
+	struct sun4i_dma_vchan		*vchan;
+	/* Is this a dedicated pchan? */
+	int				is_dedicated;
+};
+
+struct sun4i_dma_vchan {
+	struct virt_dma_chan		vc;
+	struct dma_slave_config		cfg;
+	struct sun4i_dma_pchan		*pchan;
+	struct sun4i_dma_promise	*processing;
+	struct sun4i_dma_contract	*contract;
+	u8				endpoint;
+	int				is_dedicated;
+};
+
+struct sun4i_dma_promise {
+	u32				cfg;
+	u32				para;
+	dma_addr_t			src;
+	dma_addr_t			dst;
+	size_t				len;
+	struct list_head		list;
+};
+
+/* A contract is a set of promises */
+struct sun4i_dma_contract {
+	struct virt_dma_desc		vd;
+	struct list_head		demands;
+	struct list_head		completed_demands;
+	int				is_cyclic;
+};
+
+struct sun4i_dma_dev {
+	DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
+	struct dma_device		slave;
+	struct sun4i_dma_pchan		*pchans;
+	struct sun4i_dma_vchan		*vchans;
+	void __iomem			*base;
+	struct clk			*clk;
+	int				irq;
+	spinlock_t			lock;
+};
+
+static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
+{
+	return container_of(dev, struct sun4i_dma_dev, slave);
+}
+
+static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan)
+{
+	return container_of(chan, struct sun4i_dma_vchan, vc.chan);
+}
+
+static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd)
+{
+	return container_of(vd, struct sun4i_dma_contract, vd);
+}
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+	return &chan->dev->device;
+}
+
+static int convert_burst(u32 maxburst)
+{
+	if (maxburst > 8)
+		return -EINVAL;
+
+	/* 1 -> 0, 4 -> 1, 8 -> 2 */
+	return (maxburst >> 2);
+}
+
+static int convert_buswidth(enum dma_slave_buswidth addr_width)
+{
+	if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
+		return -EINVAL;
+
+	/* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */
+	return (addr_width >> 1);
+}
+
+static void sun4i_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+
+	vchan_free_chan_resources(&vchan->vc);
+}
+
+static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
+						  struct sun4i_dma_vchan *vchan)
+{
+	struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans;
+	unsigned long flags;
+	int i, max;
+
+	/*
+	 * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
+	 * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
+	 */
+	if (vchan->is_dedicated) {
+		i = SUN4I_NDMA_NR_MAX_CHANNELS;
+		max = SUN4I_DMA_NR_MAX_CHANNELS;
+	} else {
+		i = 0;
+		max = SUN4I_NDMA_NR_MAX_CHANNELS;
+	}
+
+	spin_lock_irqsave(&priv->lock, flags);
+	for_each_clear_bit_from(i, &priv->pchans_used, max) {
+		pchan = &pchans[i];
+		pchan->vchan = vchan;
+		set_bit(i, priv->pchans_used);
+		break;
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return pchan;
+}
+
+static void release_pchan(struct sun4i_dma_dev *priv,
+			  struct sun4i_dma_pchan *pchan)
+{
+	unsigned long flags;
+	int nr = pchan - priv->pchans;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	pchan->vchan = NULL;
+	clear_bit(nr, priv->pchans_used);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void configure_pchan(struct sun4i_dma_pchan *pchan,
+			    struct sun4i_dma_promise *d)
+{
+	/*
+	 * Configure addresses and misc parameters depending on type
+	 * SUN4I_DDMA has an extra field with timing parameters
+	 */
+	if (pchan->is_dedicated) {
+		writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG);
+		writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG);
+		writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
+		writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG);
+		writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG);
+	} else {
+		writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG);
+		writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG);
+		writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
+		writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG);
+	}
+}
+
+static void set_pchan_interrupt(struct sun4i_dma_dev *priv,
+				struct sun4i_dma_pchan *pchan,
+				int half, int end)
+{
+	u32 reg;
+	int pchan_number = pchan - priv->pchans;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+
+	if (half)
+		reg |= BIT(pchan_number * 2);
+	else
+		reg &= ~BIT(pchan_number * 2);
+
+	if (end)
+		reg |= BIT(pchan_number * 2 + 1);
+	else
+		reg &= ~BIT(pchan_number * 2 + 1);
+
+	writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/**
+ * Execute pending operations on a vchan
+ *
+ * When given a vchan, this function will try to acquire a suitable
+ * pchan and, if successful, will configure it to fulfill a promise
+ * from the next pending contract.
+ *
+ * This function must be called with &vchan->vc.lock held.
+ */
+static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
+				   struct sun4i_dma_vchan *vchan)
+{
+	struct sun4i_dma_promise *promise = NULL;
+	struct sun4i_dma_contract *contract = NULL;
+	struct sun4i_dma_pchan *pchan;
+	struct virt_dma_desc *vd;
+	int ret;
+
+	lockdep_assert_held(&vchan->vc.lock);
+
+	/* We need a pchan to do anything, so secure one if available */
+	pchan = find_and_use_pchan(priv, vchan);
+	if (!pchan)
+		return -EBUSY;
+
+	/*
+	 * Channel endpoints must not be repeated, so if this vchan
+	 * has already submitted some work, we can't do anything else
+	 */
+	if (vchan->processing) {
+		dev_dbg(chan2dev(&vchan->vc.chan),
+			"processing something to this endpoint already\n");
+		ret = -EBUSY;
+		goto release_pchan;
+	}
+
+	do {
+		/* Figure out which contract we're working with today */
+		vd = vchan_next_desc(&vchan->vc);
+		if (!vd) {
+			dev_dbg(chan2dev(&vchan->vc.chan),
+				"No pending contract found");
+			ret = 0;
+			goto release_pchan;
+		}
+
+		contract = to_sun4i_dma_contract(vd);
+		if (list_empty(&contract->demands)) {
+			/* The contract has been completed so mark it as such */
+			list_del(&contract->vd.node);
+			vchan_cookie_complete(&contract->vd);
+			dev_dbg(chan2dev(&vchan->vc.chan),
+				"Empty contract found and marked complete");
+		}
+	} while (list_empty(&contract->demands));
+
+	/* Now find out what we need to do */
+	promise = list_first_entry(&contract->demands,
+				   struct sun4i_dma_promise, list);
+	vchan->processing = promise;
+
+	/* ... and make it reality */
+	if (promise) {
+		vchan->contract = contract;
+		vchan->pchan = pchan;
+		set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
+		configure_pchan(pchan, promise);
+	}
+
+	return 0;
+
+release_pchan:
+	release_pchan(priv, pchan);
+	return ret;
+}
+
+static int sanitize_config(struct dma_slave_config *sconfig,
+			   enum dma_transfer_direction direction)
+{
+	switch (direction) {
+	case DMA_MEM_TO_DEV:
+		if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
+		    !sconfig->dst_maxburst)
+			return -EINVAL;
+
+		if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+			sconfig->src_addr_width = sconfig->dst_addr_width;
+
+		if (!sconfig->src_maxburst)
+			sconfig->src_maxburst = sconfig->dst_maxburst;
+
+		break;
+
+	case DMA_DEV_TO_MEM:
+		if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
+		    !sconfig->src_maxburst)
+			return -EINVAL;
+
+		if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+			sconfig->dst_addr_width = sconfig->src_addr_width;
+
+		if (!sconfig->dst_maxburst)
+			sconfig->dst_maxburst = sconfig->src_maxburst;
+
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+/**
+ * Generate a promise, to be used in a normal DMA contract.
+ *
+ * A NDMA promise contains all the information required to program the
+ * normal part of the DMA Engine and get data copied. A non-executed
+ * promise will live in the demands list on a contract. Once it has been
+ * completed, it will be moved to the completed demands list for later freeing.
+ * All linked promises will be freed when the corresponding contract is freed
+ */
+static struct sun4i_dma_promise *
+generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
+		      size_t len, struct dma_slave_config *sconfig,
+		      enum dma_transfer_direction direction)
+{
+	struct sun4i_dma_promise *promise;
+	int ret;
+
+	ret = sanitize_config(sconfig, direction);
+	if (ret)
+		return NULL;
+
+	promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
+	if (!promise)
+		return NULL;
+
+	promise->src = src;
+	promise->dst = dest;
+	promise->len = len;
+	promise->cfg = SUN4I_DMA_CFG_LOADING |
+		SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN;
+
+	dev_dbg(chan2dev(chan),
+		"src burst %d, dst burst %d, src buswidth %d, dst buswidth %d",
+		sconfig->src_maxburst, sconfig->dst_maxburst,
+		sconfig->src_addr_width, sconfig->dst_addr_width);
+
+	/* Source burst */
+	ret = convert_burst(sconfig->src_maxburst);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
+
+	/* Destination burst */
+	ret = convert_burst(sconfig->dst_maxburst);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
+
+	/* Source bus width */
+	ret = convert_buswidth(sconfig->src_addr_width);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+
+	/* Destination bus width */
+	ret = convert_buswidth(sconfig->dst_addr_width);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+
+	return promise;
+
+fail:
+	kfree(promise);
+	return NULL;
+}
+
+/**
+ * Generate a promise, to be used in a dedicated DMA contract.
+ *
+ * A DDMA promise contains all the information required to program the
+ * Dedicated part of the DMA Engine and get data copied. A non-executed
+ * promise will live in the demands list on a contract. Once it has been
+ * completed, it will be moved to the completed demands list for later freeing.
+ * All linked promises will be freed when the corresponding contract is freed
+ */
+static struct sun4i_dma_promise *
+generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
+		      size_t len, struct dma_slave_config *sconfig)
+{
+	struct sun4i_dma_promise *promise;
+	int ret;
+
+	promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
+	if (!promise)
+		return NULL;
+
+	promise->src = src;
+	promise->dst = dest;
+	promise->len = len;
+	promise->cfg = SUN4I_DMA_CFG_LOADING |
+		SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
+
+	/* Source burst */
+	ret = convert_burst(sconfig->src_maxburst);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
+
+	/* Destination burst */
+	ret = convert_burst(sconfig->dst_maxburst);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
+
+	/* Source bus width */
+	ret = convert_buswidth(sconfig->src_addr_width);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
+
+	/* Destination bus width */
+	ret = convert_buswidth(sconfig->dst_addr_width);
+	if (IS_ERR_VALUE(ret))
+		goto fail;
+	promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
+
+	return promise;
+
+fail:
+	kfree(promise);
+	return NULL;
+}
+
+/**
+ * Generate a contract
+ *
+ * Contracts function as DMA descriptors. As our hardware does not support
+ * linked lists, we need to implement SG via software. We use a contract
+ * to hold all the pieces of the request and process them serially one
+ * after another. Each piece is represented as a promise.
+ */
+static struct sun4i_dma_contract *generate_dma_contract(void)
+{
+	struct sun4i_dma_contract *contract;
+
+	contract = kzalloc(sizeof(*contract), GFP_NOWAIT);
+	if (!contract)
+		return NULL;
+
+	INIT_LIST_HEAD(&contract->demands);
+	INIT_LIST_HEAD(&contract->completed_demands);
+
+	return contract;
+}
+
+/**
+ * Get next promise on a cyclic transfer
+ *
+ * Cyclic contracts contain a series of promises which are executed on a
+ * loop. This function returns the next promise from a cyclic contract,
+ * so it can be programmed into the hardware.
+ */
+static struct sun4i_dma_promise *
+get_next_cyclic_promise(struct sun4i_dma_contract *contract)
+{
+	struct sun4i_dma_promise *promise;
+
+	promise = list_first_entry_or_null(&contract->demands,
+					   struct sun4i_dma_promise, list);
+	if (!promise) {
+		list_splice_init(&contract->completed_demands,
+				 &contract->demands);
+		promise = list_first_entry(&contract->demands,
+					   struct sun4i_dma_promise, list);
+	}
+
+	return promise;
+}
+
+/**
+ * Free a contract and all its associated promises
+ */
+static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
+{
+	struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
+	struct sun4i_dma_promise *promise;
+
+	/* Free all the demands and completed demands */
+	list_for_each_entry(promise, &contract->demands, list)
+		kfree(promise);
+
+	list_for_each_entry(promise, &contract->completed_demands, list)
+		kfree(promise);
+
+	kfree(contract);
+}
+
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
+			  dma_addr_t src, size_t len, unsigned long flags)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct dma_slave_config *sconfig = &vchan->cfg;
+	struct sun4i_dma_promise *promise;
+	struct sun4i_dma_contract *contract;
+
+	contract = generate_dma_contract();
+	if (!contract)
+		return NULL;
+
+	/*
+	 * We can only do the copy to bus aligned addresses, so
+	 * choose the best one so we get decent performance. We also
+	 * maximize the burst size for this same reason.
+	 */
+	sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	sconfig->src_maxburst = 8;
+	sconfig->dst_maxburst = 8;
+
+	if (vchan->is_dedicated)
+		promise = generate_ddma_promise(chan, src, dest, len, sconfig);
+	else
+		promise = generate_ndma_promise(chan, src, dest, len, sconfig,
+						DMA_MEM_TO_MEM);
+
+	if (!promise) {
+		kfree(contract);
+		return NULL;
+	}
+
+	/* Configure memcpy mode */
+	if (vchan->is_dedicated) {
+		promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
+				SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
+	} else {
+		promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
+				SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+	}
+
+	/* Fill the contract with our only promise */
+	list_add_tail(&promise->list, &contract->demands);
+
+	/* And add it to the vchan */
+	return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
+			  size_t period_len, enum dma_transfer_direction dir,
+			  unsigned long flags)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct dma_slave_config *sconfig = &vchan->cfg;
+	struct sun4i_dma_promise *promise;
+	struct sun4i_dma_contract *contract;
+	dma_addr_t src, dest;
+	u32 endpoints;
+	int nr_periods, offset, plength, i;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "Invalid DMA direction\n");
+		return NULL;
+	}
+
+	if (vchan->is_dedicated) {
+		/*
+		 * As we are using this just for audio data, we need to use
+		 * normal DMA. There is nothing stopping us from supporting
+		 * dedicated DMA here as well, so if a client comes up and
+		 * requires it, it will be simple to implement it.
+		 */
+		dev_err(chan2dev(chan),
+			"Cyclic transfers are only supported on Normal DMA\n");
+		return NULL;
+	}
+
+	contract = generate_dma_contract();
+	if (!contract)
+		return NULL;
+
+	contract->is_cyclic = 1;
+
+	/* Figure out the endpoints and the address we need */
+	if (dir == DMA_MEM_TO_DEV) {
+		src = buf;
+		dest = sconfig->dst_addr;
+		endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
+			    SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+			    SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO);
+	} else {
+		src = sconfig->src_addr;
+		dest = buf;
+		endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+			    SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) |
+			    SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
+	}
+
+	/*
+	 * We will be using half done interrupts to make two periods
+	 * out of a promise, so we need to program the DMA engine less
+	 * often
+	 */
+
+	/*
+	 * The engine can interrupt on half-transfer, so we can use
+	 * this feature to program the engine half as often as if we
+	 * didn't use it (keep in mind the hardware doesn't support
+	 * linked lists).
+	 *
+	 * Say you have a set of periods (| marks the start/end, I for
+	 * interrupt, P for programming the engine to do a new
+	 * transfer), the easy but slow way would be to do
+	 *
+	 *  |---|---|---|---| (periods / promises)
+	 *  P  I,P I,P I,P  I
+	 *
+	 * Using half transfer interrupts you can do
+	 *
+	 *  |-------|-------| (promises as configured on hw)
+	 *  |---|---|---|---| (periods)
+	 *  P   I  I,P  I   I
+	 *
+	 * Which requires half the engine programming for the same
+	 * functionality.
+	 */
+	nr_periods = DIV_ROUND_UP(len / period_len, 2);
+	for (i = 0; i < nr_periods; i++) {
+		/* Calculate the offset in the buffer and the length needed */
+		offset = i * period_len * 2;
+		plength = min((len - offset), (period_len * 2));
+		if (dir == DMA_MEM_TO_DEV)
+			src = buf + offset;
+		else
+			dest = buf + offset;
+
+		/* Make the promise */
+		promise = generate_ndma_promise(chan, src, dest,
+						plength, sconfig, dir);
+		if (!promise) {
+			/* TODO: should we free everything? */
+			return NULL;
+		}
+		promise->cfg |= endpoints;
+
+		/* Then add it to the contract */
+		list_add_tail(&promise->list, &contract->demands);
+	}
+
+	/* And add it to the vchan */
+	return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *
+sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+			unsigned int sg_len, enum dma_transfer_direction dir,
+			unsigned long flags, void *context)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct dma_slave_config *sconfig = &vchan->cfg;
+	struct sun4i_dma_promise *promise;
+	struct sun4i_dma_contract *contract;
+	u8 ram_type, io_mode, linear_mode;
+	struct scatterlist *sg;
+	dma_addr_t srcaddr, dstaddr;
+	u32 endpoints, para;
+	int i;
+
+	if (!sgl)
+		return NULL;
+
+	if (!is_slave_direction(dir)) {
+		dev_err(chan2dev(chan), "Invalid DMA direction\n");
+		return NULL;
+	}
+
+	contract = generate_dma_contract();
+	if (!contract)
+		return NULL;
+
+	if (vchan->is_dedicated) {
+		io_mode = SUN4I_DDMA_ADDR_MODE_IO;
+		linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
+		ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
+	} else {
+		io_mode = SUN4I_NDMA_ADDR_MODE_IO;
+		linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
+		ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
+	}
+
+	if (dir == DMA_MEM_TO_DEV)
+		endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
+			    SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
+			    SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
+			    SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
+	else
+		endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
+			    SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
+			    SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
+			    SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		/* Figure out addresses */
+		if (dir == DMA_MEM_TO_DEV) {
+			srcaddr = sg_dma_address(sg);
+			dstaddr = sconfig->dst_addr;
+		} else {
+			srcaddr = sconfig->src_addr;
+			dstaddr = sg_dma_address(sg);
+		}
+
+		/*
+		 * These are the magic DMA engine timings that keep SPI going.
+		 * I haven't seen any interface on DMAEngine to configure
+		 * timings, and so far they seem to work for everything we
+		 * support, so I've kept them here. I don't know if other
+		 * devices need different timings because, as usual, we only
+		 * have the "para" bitfield meanings, but no comment on what
+		 * the values should be when doing a certain operation :|
+		 */
+		para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS;
+
+		/* And make a suitable promise */
+		if (vchan->is_dedicated)
+			promise = generate_ddma_promise(chan, srcaddr, dstaddr,
+							sg_dma_len(sg),
+							sconfig);
+		else
+			promise = generate_ndma_promise(chan, srcaddr, dstaddr,
+							sg_dma_len(sg),
+							sconfig, dir);
+
+		if (!promise)
+			return NULL; /* TODO: should we free everything? */
+
+		promise->cfg |= endpoints;
+		promise->para = para;
+
+		/* Then add it to the contract */
+		list_add_tail(&promise->list, &contract->demands);
+	}
+
+	/*
+	 * Once we've got all the promises ready, add the contract
+	 * to the pending list on the vchan
+	 */
+	return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
+}
+
+static int sun4i_dma_terminate_all(struct dma_chan *chan)
+{
+	struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct sun4i_dma_pchan *pchan = vchan->pchan;
+	LIST_HEAD(head);
+	unsigned long flags;
+
+	spin_lock_irqsave(&vchan->vc.lock, flags);
+	vchan_get_all_descriptors(&vchan->vc, &head);
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+	/*
+	 * Clearing the configuration register will halt the pchan. Interrupts
+	 * may still trigger, so don't forget to disable them.
+	 */
+	if (pchan) {
+		if (pchan->is_dedicated)
+			writel(0, pchan->base + SUN4I_DDMA_CFG_REG);
+		else
+			writel(0, pchan->base + SUN4I_NDMA_CFG_REG);
+		set_pchan_interrupt(priv, pchan, 0, 0);
+		release_pchan(priv, pchan);
+	}
+
+	spin_lock_irqsave(&vchan->vc.lock, flags);
+	vchan_dma_desc_free_list(&vchan->vc, &head);
+	/* Clear these so the vchan is usable again */
+	vchan->processing = NULL;
+	vchan->pchan = NULL;
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+	return 0;
+}
+
+static int sun4i_dma_config(struct dma_chan *chan,
+			    struct dma_slave_config *config)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+
+	memcpy(&vchan->cfg, config, sizeof(*config));
+
+	return 0;
+}
+
+static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec,
+					   struct of_dma *ofdma)
+{
+	struct sun4i_dma_dev *priv = ofdma->of_dma_data;
+	struct sun4i_dma_vchan *vchan;
+	struct dma_chan *chan;
+	u8 is_dedicated = dma_spec->args[0];
+	u8 endpoint = dma_spec->args[1];
+
+	/* Check if type is Normal or Dedicated */
+	if (is_dedicated != 0 && is_dedicated != 1)
+		return NULL;
+
+	/* Make sure the endpoint looks sane */
+	if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) ||
+	    (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT))
+		return NULL;
+
+	chan = dma_get_any_slave_channel(&priv->slave);
+	if (!chan)
+		return NULL;
+
+	/* Assign the endpoint to the vchan */
+	vchan = to_sun4i_dma_vchan(chan);
+	vchan->is_dedicated = is_dedicated;
+	vchan->endpoint = endpoint;
+
+	return chan;
+}
+
+static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan,
+					   dma_cookie_t cookie,
+					   struct dma_tx_state *state)
+{
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	struct sun4i_dma_pchan *pchan = vchan->pchan;
+	struct sun4i_dma_contract *contract;
+	struct sun4i_dma_promise *promise;
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+	enum dma_status ret;
+	size_t bytes = 0;
+
+	ret = dma_cookie_status(chan, cookie, state);
+	if (!state || (ret == DMA_COMPLETE))
+		return ret;
+
+	spin_lock_irqsave(&vchan->vc.lock, flags);
+	vd = vchan_find_desc(&vchan->vc, cookie);
+	if (!vd)
+		goto exit;
+	contract = to_sun4i_dma_contract(vd);
+
+	list_for_each_entry(promise, &contract->demands, list)
+		bytes += promise->len;
+
+	/*
+	 * The hardware is configured to return the remaining byte
+	 * quantity. If possible, replace the first listed element's
+	 * full size with the actual remaining amount
+	 */
+	promise = list_first_entry_or_null(&contract->demands,
+					   struct sun4i_dma_promise, list);
+	if (promise && pchan) {
+		bytes -= promise->len;
+		if (pchan->is_dedicated)
+			bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
+		else
+			bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
+	}
+
+exit:
+
+	dma_set_residue(state, bytes);
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+	return ret;
+}
+
+static void sun4i_dma_issue_pending(struct dma_chan *chan)
+{
+	struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
+	struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&vchan->vc.lock, flags);
+
+	/*
+	 * If there are pending transactions for this vchan, push one of
+	 * them into the engine to get the ball rolling.
+	 */
+	if (vchan_issue_pending(&vchan->vc))
+		__execute_vchan_pending(priv, vchan);
+
+	spin_unlock_irqrestore(&vchan->vc.lock, flags);
+}
+
+static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
+{
+	struct sun4i_dma_dev *priv = dev_id;
+	struct sun4i_dma_pchan *pchans = priv->pchans, *pchan;
+	struct sun4i_dma_vchan *vchan;
+	struct sun4i_dma_contract *contract;
+	struct sun4i_dma_promise *promise;
+	unsigned long pendirq, irqs, disableirqs;
+	int bit, i, free_room, allow_mitigation = 1;
+
+	pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+
+handle_pending:
+
+	disableirqs = 0;
+	free_room = 0;
+
+	for_each_set_bit(bit, &pendirq, 32) {
+		pchan = &pchans[bit >> 1];
+		vchan = pchan->vchan;
+		if (!vchan) /* a terminated channel may still interrupt */
+			continue;
+		contract = vchan->contract;
+
+		/*
+		 * Disable the IRQ and free the pchan if it's an end
+		 * interrupt (odd bit)
+		 */
+		if (bit & 1) {
+			spin_lock(&vchan->vc.lock);
+
+			/*
+			 * Move the promise into the completed list now that
+			 * we're done with it
+			 */
+			list_del(&vchan->processing->list);
+			list_add_tail(&vchan->processing->list,
+				      &contract->completed_demands);
+
+			/*
+			 * Cyclic DMA transfers are special:
+			 * - There's always something we can dispatch
+			 * - We need to run the callback
+			 * - Latency is very important, as this is used by audio
+			 * We therefore just cycle through the list and dispatch
+			 * whatever we have here, reusing the pchan. There's
+			 * no need to run the thread after this.
+			 *
+			 * For non-cyclic transfers we need to look around,
+			 * so we can program some more work, or notify the
+			 * client that their transfers have been completed.
+			 */
+			if (contract->is_cyclic) {
+				promise = get_next_cyclic_promise(contract);
+				vchan->processing = promise;
+				configure_pchan(pchan, promise);
+				vchan_cyclic_callback(&contract->vd);
+			} else {
+				vchan->processing = NULL;
+				vchan->pchan = NULL;
+
+				free_room = 1;
+				disableirqs |= BIT(bit);
+				release_pchan(priv, pchan);
+			}
+
+			spin_unlock(&vchan->vc.lock);
+		} else {
+			/* Half done interrupt */
+			if (contract->is_cyclic)
+				vchan_cyclic_callback(&contract->vd);
+			else
+				disableirqs |= BIT(bit);
+		}
+	}
+
+	/* Disable the IRQs for events we handled */
+	spin_lock(&priv->lock);
+	irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+	writel_relaxed(irqs & ~disableirqs,
+		       priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+	spin_unlock(&priv->lock);
+
+	/* Writing 1 to the pending field will clear the pending interrupt */
+	writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+
+	/*
+	 * If a pchan was freed, we may be able to schedule something else,
+	 * so have a look around
+	 */
+	if (free_room) {
+		for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
+			vchan = &priv->vchans[i];
+			spin_lock(&vchan->vc.lock);
+			__execute_vchan_pending(priv, vchan);
+			spin_unlock(&vchan->vc.lock);
+		}
+	}
+
+	/*
+	 * Handle newer interrupts if some showed up, but only do it once
+	 * to avoid a too long a loop
+	 */
+	if (allow_mitigation) {
+		pendirq = readl_relaxed(priv->base +
+					SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+		if (pendirq) {
+			allow_mitigation = 0;
+			goto handle_pending;
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int sun4i_dma_probe(struct platform_device *pdev)
+{
+	struct sun4i_dma_dev *priv;
+	struct resource *res;
+	int i, j, ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	priv->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	priv->irq = platform_get_irq(pdev, 0);
+	if (priv->irq < 0) {
+		dev_err(&pdev->dev, "Cannot claim IRQ\n");
+		return priv->irq;
+	}
+
+	priv->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(priv->clk)) {
+		dev_err(&pdev->dev, "No clock specified\n");
+		return PTR_ERR(priv->clk);
+	}
+
+	platform_set_drvdata(pdev, priv);
+	spin_lock_init(&priv->lock);
+
+	dma_cap_zero(priv->slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
+	dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
+	dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask);
+	dma_cap_set(DMA_SLAVE, priv->slave.cap_mask);
+
+	INIT_LIST_HEAD(&priv->slave.channels);
+	priv->slave.device_free_chan_resources	= sun4i_dma_free_chan_resources;
+	priv->slave.device_tx_status		= sun4i_dma_tx_status;
+	priv->slave.device_issue_pending	= sun4i_dma_issue_pending;
+	priv->slave.device_prep_slave_sg	= sun4i_dma_prep_slave_sg;
+	priv->slave.device_prep_dma_memcpy	= sun4i_dma_prep_dma_memcpy;
+	priv->slave.device_prep_dma_cyclic	= sun4i_dma_prep_dma_cyclic;
+	priv->slave.device_config		= sun4i_dma_config;
+	priv->slave.device_terminate_all	= sun4i_dma_terminate_all;
+	priv->slave.copy_align			= 2;
+	priv->slave.src_addr_widths		= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+						  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+						  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	priv->slave.dst_addr_widths		= BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+						  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+						  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+	priv->slave.directions			= BIT(DMA_DEV_TO_MEM) |
+						  BIT(DMA_MEM_TO_DEV);
+	priv->slave.residue_granularity		= DMA_RESIDUE_GRANULARITY_BURST;
+
+	priv->slave.dev = &pdev->dev;
+
+	priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
+				    sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
+	priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
+				    sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
+	if (!priv->vchans || !priv->pchans)
+		return -ENOMEM;
+
+	/*
+	 * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
+	 * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
+	 * dedicated ones
+	 */
+	for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
+		priv->pchans[i].base = priv->base +
+			SUN4I_NDMA_CHANNEL_REG_BASE(i);
+
+	for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
+		priv->pchans[i].base = priv->base +
+			SUN4I_DDMA_CHANNEL_REG_BASE(j);
+		priv->pchans[i].is_dedicated = 1;
+	}
+
+	for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
+		struct sun4i_dma_vchan *vchan = &priv->vchans[i];
+
+		spin_lock_init(&vchan->vc.lock);
+		vchan->vc.desc_free = sun4i_dma_free_contract;
+		vchan_init(&vchan->vc, &priv->slave);
+	}
+
+	ret = clk_prepare_enable(priv->clk);
+	if (ret) {
+		dev_err(&pdev->dev, "Couldn't enable the clock\n");
+		return ret;
+	}
+
+	/*
+	 * Make sure the IRQs are all disabled and accounted for. The bootloader
+	 * likes to leave these dirty
+	 */
+	writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
+	writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
+
+	ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
+			       0, dev_name(&pdev->dev), priv);
+	if (ret) {
+		dev_err(&pdev->dev, "Cannot request IRQ\n");
+		goto err_clk_disable;
+	}
+
+	ret = dma_async_device_register(&priv->slave);
+	if (ret) {
+		dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
+		goto err_clk_disable;
+	}
+
+	ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
+					 priv);
+	if (ret) {
+		dev_err(&pdev->dev, "of_dma_controller_register failed\n");
+		goto err_dma_unregister;
+	}
+
+	dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
+
+	return 0;
+
+err_dma_unregister:
+	dma_async_device_unregister(&priv->slave);
+err_clk_disable:
+	clk_disable_unprepare(priv->clk);
+	return ret;
+}
+
+static int sun4i_dma_remove(struct platform_device *pdev)
+{
+	struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
+
+	/* Disable IRQ so no more work is scheduled */
+	disable_irq(priv->irq);
+
+	of_dma_controller_free(pdev->dev.of_node);
+	dma_async_device_unregister(&priv->slave);
+
+	clk_disable_unprepare(priv->clk);
+
+	return 0;
+}
+
+static const struct of_device_id sun4i_dma_match[] = {
+	{ .compatible = "allwinner,sun4i-a10-dma" },
+	{ /* sentinel */ },
+};
+
+static struct platform_driver sun4i_dma_driver = {
+	.probe	= sun4i_dma_probe,
+	.remove	= sun4i_dma_remove,
+	.driver	= {
+		.name		= "sun4i-dma",
+		.of_match_table	= sun4i_dma_match,
+	},
+};
+
+module_platform_driver(sun4i_dma_driver);
+
+MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver");
+MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 842ff97..73e0be6 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -969,7 +969,7 @@
 	sdc->slave.device_issue_pending		= sun6i_dma_issue_pending;
 	sdc->slave.device_prep_slave_sg		= sun6i_dma_prep_slave_sg;
 	sdc->slave.device_prep_dma_memcpy	= sun6i_dma_prep_dma_memcpy;
-	sdc->slave.copy_align			= 4;
+	sdc->slave.copy_align			= DMAENGINE_ALIGN_4_BYTES;
 	sdc->slave.device_config		= sun6i_dma_config;
 	sdc->slave.device_pause			= sun6i_dma_pause;
 	sdc->slave.device_resume		= sun6i_dma_resume;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index eaf585e..c8f79dc 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -155,7 +155,6 @@
 	int				req_len;
 	bool				configured;
 	bool				last_sg;
-	bool				half_done;
 	struct list_head		node;
 	struct tegra_dma_desc		*dma_desc;
 };
@@ -188,7 +187,7 @@
 	bool			config_init;
 	int			id;
 	int			irq;
-	unsigned long		chan_base_offset;
+	void __iomem		*chan_addr;
 	spinlock_t		lock;
 	bool			busy;
 	struct tegra_dma	*tdma;
@@ -203,8 +202,6 @@
 	/* ISR handler and tasklet for bottom half of isr handling */
 	dma_isr_handler		isr_handler;
 	struct tasklet_struct	tasklet;
-	dma_async_tx_callback	callback;
-	void			*callback_param;
 
 	/* Channel-slave specific configuration */
 	unsigned int slave_id;
@@ -222,6 +219,13 @@
 	void __iomem			*base_addr;
 	const struct tegra_dma_chip_data *chip_data;
 
+	/*
+	 * Counter for managing global pausing of the DMA controller.
+	 * Only applicable for devices that don't support individual
+	 * channel pausing.
+	 */
+	u32				global_pause_count;
+
 	/* Some register need to be cache before suspend */
 	u32				reg_gen;
 
@@ -242,12 +246,12 @@
 static inline void tdc_write(struct tegra_dma_channel *tdc,
 		u32 reg, u32 val)
 {
-	writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+	writel(val, tdc->chan_addr + reg);
 }
 
 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
 {
-	return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+	return readl(tdc->chan_addr + reg);
 }
 
 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
@@ -361,16 +365,32 @@
 	struct tegra_dma *tdma = tdc->tdma;
 
 	spin_lock(&tdma->global_lock);
-	tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
-	if (wait_for_burst_complete)
-		udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
+
+	if (tdc->tdma->global_pause_count == 0) {
+		tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
+		if (wait_for_burst_complete)
+			udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
+	}
+
+	tdc->tdma->global_pause_count++;
+
+	spin_unlock(&tdma->global_lock);
 }
 
 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
 {
 	struct tegra_dma *tdma = tdc->tdma;
 
-	tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
+	spin_lock(&tdma->global_lock);
+
+	if (WARN_ON(tdc->tdma->global_pause_count == 0))
+		goto out;
+
+	if (--tdc->tdma->global_pause_count == 0)
+		tdma_write(tdma, TEGRA_APBDMA_GENERAL,
+			   TEGRA_APBDMA_GENERAL_ENABLE);
+
+out:
 	spin_unlock(&tdma->global_lock);
 }
 
@@ -601,7 +621,6 @@
 		return;
 
 	tdc_start_head_req(tdc);
-	return;
 }
 
 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
@@ -628,7 +647,6 @@
 		if (!st)
 			dma_desc->dma_status = DMA_ERROR;
 	}
-	return;
 }
 
 static void tegra_dma_tasklet(unsigned long data)
@@ -720,7 +738,6 @@
 	}
 end:
 	spin_unlock_irqrestore(&tdc->lock, flags);
-	return;
 }
 
 static int tegra_dma_terminate_all(struct dma_chan *dc)
@@ -932,7 +949,6 @@
 	struct tegra_dma_sg_req  *sg_req = NULL;
 	u32 burst_size;
 	enum dma_slave_buswidth slave_bw;
-	int ret;
 
 	if (!tdc->config_init) {
 		dev_err(tdc2dev(tdc), "dma channel is not configured\n");
@@ -943,9 +959,8 @@
 		return NULL;
 	}
 
-	ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
-				&burst_size, &slave_bw);
-	if (ret < 0)
+	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
+				&burst_size, &slave_bw) < 0)
 		return NULL;
 
 	INIT_LIST_HEAD(&req_list);
@@ -1048,7 +1063,6 @@
 	dma_addr_t mem = buf_addr;
 	u32 burst_size;
 	enum dma_slave_buswidth slave_bw;
-	int ret;
 
 	if (!buf_len || !period_len) {
 		dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
@@ -1087,12 +1101,10 @@
 		return NULL;
 	}
 
-	ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
-				&burst_size, &slave_bw);
-	if (ret < 0)
+	if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
+				&burst_size, &slave_bw) < 0)
 		return NULL;
 
-
 	ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
 	ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
 					TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
@@ -1136,7 +1148,6 @@
 		sg_req->ch_regs.apb_seq = apb_seq;
 		sg_req->ch_regs.ahb_seq = ahb_seq;
 		sg_req->configured = false;
-		sg_req->half_done = false;
 		sg_req->last_sg = false;
 		sg_req->dma_desc = dma_desc;
 		sg_req->req_len = len;
@@ -1377,8 +1388,9 @@
 	for (i = 0; i < cdata->nr_channels; i++) {
 		struct tegra_dma_channel *tdc = &tdma->channels[i];
 
-		tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
-					i * cdata->channel_reg_size;
+		tdc->chan_addr = tdma->base_addr +
+				 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
+				 (i * cdata->channel_reg_size);
 
 		res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
 		if (!res) {
@@ -1418,6 +1430,7 @@
 	dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
 	dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
 
+	tdma->global_pause_count = 0;
 	tdma->dma_dev.dev = &pdev->dev;
 	tdma->dma_dev.device_alloc_chan_resources =
 					tegra_dma_alloc_chan_resources;
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 24f5ca2..5cce8c9 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -20,16 +20,19 @@
 #define TI_XBAR_OUTPUTS	127
 #define TI_XBAR_INPUTS	256
 
-static DEFINE_IDR(map_idr);
+#define TI_XBAR_EDMA_OFFSET	0
+#define TI_XBAR_SDMA_OFFSET	1
 
 struct ti_dma_xbar_data {
 	void __iomem *iomem;
 
 	struct dma_router dmarouter;
+	struct idr map_idr;
 
 	u16 safe_val; /* Value to rest the crossbar lines */
 	u32 xbar_requests; /* number of DMA requests connected to XBAR */
 	u32 dma_requests; /* number of DMA requests forwarded to DMA */
+	u32 dma_offset;
 };
 
 struct ti_dma_xbar_map {
@@ -51,7 +54,7 @@
 		map->xbar_in, map->xbar_out);
 
 	ti_dma_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
-	idr_remove(&map_idr, map->xbar_out);
+	idr_remove(&xbar->map_idr, map->xbar_out);
 	kfree(map);
 }
 
@@ -81,12 +84,11 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	map->xbar_out = idr_alloc(&map_idr, NULL, 0, xbar->dma_requests,
+	map->xbar_out = idr_alloc(&xbar->map_idr, NULL, 0, xbar->dma_requests,
 				  GFP_KERNEL);
 	map->xbar_in = (u16)dma_spec->args[0];
 
-	/* The DMA request is 1 based in sDMA */
-	dma_spec->args[0] = map->xbar_out + 1;
+	dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
 
 	dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
 		map->xbar_in, map->xbar_out);
@@ -96,9 +98,22 @@
 	return map;
 }
 
+static const struct of_device_id ti_dma_master_match[] = {
+	{
+		.compatible = "ti,omap4430-sdma",
+		.data = (void *)TI_XBAR_SDMA_OFFSET,
+	},
+	{
+		.compatible = "ti,edma3",
+		.data = (void *)TI_XBAR_EDMA_OFFSET,
+	},
+	{},
+};
+
 static int ti_dma_xbar_probe(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node;
+	const struct of_device_id *match;
 	struct device_node *dma_node;
 	struct ti_dma_xbar_data *xbar;
 	struct resource *res;
@@ -113,12 +128,20 @@
 	if (!xbar)
 		return -ENOMEM;
 
+	idr_init(&xbar->map_idr);
+
 	dma_node = of_parse_phandle(node, "dma-masters", 0);
 	if (!dma_node) {
 		dev_err(&pdev->dev, "Can't get DMA master node\n");
 		return -ENODEV;
 	}
 
+	match = of_match_node(ti_dma_master_match, dma_node);
+	if (!match) {
+		dev_err(&pdev->dev, "DMA master is not supported\n");
+		return -EINVAL;
+	}
+
 	if (of_property_read_u32(dma_node, "dma-requests",
 				 &xbar->dma_requests)) {
 		dev_info(&pdev->dev,
@@ -139,17 +162,15 @@
 		xbar->safe_val = (u16)safe_val;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
 	iomem = devm_ioremap_resource(&pdev->dev, res);
-	if (!iomem)
-		return -ENOMEM;
+	if (IS_ERR(iomem))
+		return PTR_ERR(iomem);
 
 	xbar->iomem = iomem;
 
 	xbar->dmarouter.dev = &pdev->dev;
 	xbar->dmarouter.route_free = ti_dma_xbar_free;
+	xbar->dma_offset = (u32)match->data;
 
 	platform_set_drvdata(pdev, xbar);
 
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index c4c3d93..559cd40 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -10,10 +10,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 /* Supports:
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index dff22ab..b23e8d5 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -21,6 +21,7 @@
  * NOTE: PM support is currently not available.
  */
 
+#include <linux/acpi.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
@@ -151,7 +152,6 @@
 #define XGENE_DMA_PQ_CHANNEL		1
 #define XGENE_DMA_MAX_BYTE_CNT		0x4000	/* 16 KB */
 #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT	0x14000	/* 80 KB */
-#define XGENE_DMA_XOR_ALIGNMENT		6	/* 64 Bytes */
 #define XGENE_DMA_MAX_XOR_SRC		5
 #define XGENE_DMA_16K_BUFFER_LEN_CODE	0x0
 #define XGENE_DMA_INVALID_LEN_CODE	0x7800000000000000ULL
@@ -764,12 +764,17 @@
 	struct xgene_dma_ring *ring = &chan->rx_ring;
 	struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
 	struct xgene_dma_desc_hw *desc_hw;
+	struct list_head ld_completed;
 	u8 status;
 
+	INIT_LIST_HEAD(&ld_completed);
+
+	spin_lock_bh(&chan->lock);
+
 	/* Clean already completed and acked descriptors */
 	xgene_dma_clean_completed_descriptor(chan);
 
-	/* Run the callback for each descriptor, in order */
+	/* Move all completed descriptors to ld completed queue, in order */
 	list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
 		/* Get subsequent hw descriptor from DMA rx ring */
 		desc_hw = &ring->desc_hw[ring->head];
@@ -812,15 +817,17 @@
 		/* Mark this hw descriptor as processed */
 		desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
 
-		xgene_dma_run_tx_complete_actions(chan, desc_sw);
-
-		xgene_dma_clean_running_descriptor(chan, desc_sw);
-
 		/*
 		 * Decrement the pending transaction count
 		 * as we have processed one
 		 */
 		chan->pending--;
+
+		/*
+		 * Delete this node from ld running queue and append it to
+		 * ld completed queue for further processing
+		 */
+		list_move_tail(&desc_sw->node, &ld_completed);
 	}
 
 	/*
@@ -829,6 +836,14 @@
 	 * ahead and free the descriptors below.
 	 */
 	xgene_chan_xfer_ld_pending(chan);
+
+	spin_unlock_bh(&chan->lock);
+
+	/* Run the callback for each descriptor, in order */
+	list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
+		xgene_dma_run_tx_complete_actions(chan, desc_sw);
+		xgene_dma_clean_running_descriptor(chan, desc_sw);
+	}
 }
 
 static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
@@ -877,11 +892,11 @@
 	if (!chan->desc_pool)
 		return;
 
-	spin_lock_bh(&chan->lock);
-
 	/* Process all running descriptor */
 	xgene_dma_cleanup_descriptors(chan);
 
+	spin_lock_bh(&chan->lock);
+
 	/* Clean all link descriptor queues */
 	xgene_dma_free_desc_list(chan, &chan->ld_pending);
 	xgene_dma_free_desc_list(chan, &chan->ld_running);
@@ -1201,15 +1216,11 @@
 {
 	struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
 
-	spin_lock_bh(&chan->lock);
-
 	/* Run all cleanup for descriptors which have been completed */
 	xgene_dma_cleanup_descriptors(chan);
 
 	/* Re-enable DMA channel IRQ */
 	enable_irq(chan->rx_irq);
-
-	spin_unlock_bh(&chan->lock);
 }
 
 static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)
@@ -1741,13 +1752,13 @@
 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
 		dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
 		dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC;
-		dma_dev->xor_align = XGENE_DMA_XOR_ALIGNMENT;
+		dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES;
 	}
 
 	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
 		dma_dev->device_prep_dma_pq = xgene_dma_prep_pq;
 		dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC;
-		dma_dev->pq_align = XGENE_DMA_XOR_ALIGNMENT;
+		dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES;
 	}
 }
 
@@ -1944,16 +1955,18 @@
 		return ret;
 
 	pdma->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(pdma->clk)) {
+	if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) {
 		dev_err(&pdev->dev, "Failed to get clk\n");
 		return PTR_ERR(pdma->clk);
 	}
 
 	/* Enable clk before accessing registers */
-	ret = clk_prepare_enable(pdma->clk);
-	if (ret) {
-		dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
-		return ret;
+	if (!IS_ERR(pdma->clk)) {
+		ret = clk_prepare_enable(pdma->clk);
+		if (ret) {
+			dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
+			return ret;
+		}
 	}
 
 	/* Remove DMA RAM out of shutdown */
@@ -1998,7 +2011,8 @@
 
 err_dma_mask:
 err_clk_enable:
-	clk_disable_unprepare(pdma->clk);
+	if (!IS_ERR(pdma->clk))
+		clk_disable_unprepare(pdma->clk);
 
 	return ret;
 }
@@ -2022,11 +2036,20 @@
 		xgene_dma_delete_chan_rings(chan);
 	}
 
-	clk_disable_unprepare(pdma->clk);
+	if (!IS_ERR(pdma->clk))
+		clk_disable_unprepare(pdma->clk);
 
 	return 0;
 }
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = {
+	{"APMC0D43", 0},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr);
+#endif
+
 static const struct of_device_id xgene_dma_of_match_ptr[] = {
 	{.compatible = "apm,xgene-storm-dma",},
 	{},
@@ -2039,6 +2062,7 @@
 	.driver = {
 		.name = "X-Gene-DMA",
 		.of_match_table = xgene_dma_of_match_ptr,
+		.acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr),
 	},
 };
 
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
new file mode 100644
index 0000000..39915a6
--- /dev/null
+++ b/drivers/dma/zx296702_dma.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright 2015 Linaro.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define DRIVER_NAME		"zx-dma"
+#define DMA_ALIGN		4
+#define DMA_MAX_SIZE		(0x10000 - PAGE_SIZE)
+#define LLI_BLOCK_SIZE		(4 * PAGE_SIZE)
+
+#define REG_ZX_SRC_ADDR			0x00
+#define REG_ZX_DST_ADDR			0x04
+#define REG_ZX_TX_X_COUNT		0x08
+#define REG_ZX_TX_ZY_COUNT		0x0c
+#define REG_ZX_SRC_ZY_STEP		0x10
+#define REG_ZX_DST_ZY_STEP		0x14
+#define REG_ZX_LLI_ADDR			0x1c
+#define REG_ZX_CTRL			0x20
+#define REG_ZX_TC_IRQ			0x800
+#define REG_ZX_SRC_ERR_IRQ		0x804
+#define REG_ZX_DST_ERR_IRQ		0x808
+#define REG_ZX_CFG_ERR_IRQ		0x80c
+#define REG_ZX_TC_IRQ_RAW		0x810
+#define REG_ZX_SRC_ERR_IRQ_RAW		0x814
+#define REG_ZX_DST_ERR_IRQ_RAW		0x818
+#define REG_ZX_CFG_ERR_IRQ_RAW		0x81c
+#define REG_ZX_STATUS			0x820
+#define REG_ZX_DMA_GRP_PRIO		0x824
+#define REG_ZX_DMA_ARB			0x828
+
+#define ZX_FORCE_CLOSE			BIT(31)
+#define ZX_DST_BURST_WIDTH(x)		(((x) & 0x7) << 13)
+#define ZX_MAX_BURST_LEN		16
+#define ZX_SRC_BURST_LEN(x)		(((x) & 0xf) << 9)
+#define ZX_SRC_BURST_WIDTH(x)		(((x) & 0x7) << 6)
+#define ZX_IRQ_ENABLE_ALL		(3 << 4)
+#define ZX_DST_FIFO_MODE		BIT(3)
+#define ZX_SRC_FIFO_MODE		BIT(2)
+#define ZX_SOFT_REQ			BIT(1)
+#define ZX_CH_ENABLE			BIT(0)
+
+#define ZX_DMA_BUSWIDTHS \
+	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+enum zx_dma_burst_width {
+	ZX_DMA_WIDTH_8BIT	= 0,
+	ZX_DMA_WIDTH_16BIT	= 1,
+	ZX_DMA_WIDTH_32BIT	= 2,
+	ZX_DMA_WIDTH_64BIT	= 3,
+};
+
+struct zx_desc_hw {
+	u32 saddr;
+	u32 daddr;
+	u32 src_x;
+	u32 src_zy;
+	u32 src_zy_step;
+	u32 dst_zy_step;
+	u32 reserved1;
+	u32 lli;
+	u32 ctr;
+	u32 reserved[7]; /* pack as hardware registers region size */
+} __aligned(32);
+
+struct zx_dma_desc_sw {
+	struct virt_dma_desc	vd;
+	dma_addr_t		desc_hw_lli;
+	size_t			desc_num;
+	size_t			size;
+	struct zx_desc_hw	*desc_hw;
+};
+
+struct zx_dma_phy;
+
+struct zx_dma_chan {
+	struct dma_slave_config slave_cfg;
+	int			id; /* Request phy chan id */
+	u32			ccfg;
+	u32			cyclic;
+	struct virt_dma_chan	vc;
+	struct zx_dma_phy	*phy;
+	struct list_head	node;
+	dma_addr_t		dev_addr;
+	enum dma_status		status;
+};
+
+struct zx_dma_phy {
+	u32			idx;
+	void __iomem		*base;
+	struct zx_dma_chan	*vchan;
+	struct zx_dma_desc_sw	*ds_run;
+	struct zx_dma_desc_sw	*ds_done;
+};
+
+struct zx_dma_dev {
+	struct dma_device	slave;
+	void __iomem		*base;
+	spinlock_t		lock; /* lock for ch and phy */
+	struct list_head	chan_pending;
+	struct zx_dma_phy	*phy;
+	struct zx_dma_chan	*chans;
+	struct clk		*clk;
+	struct dma_pool		*pool;
+	u32			dma_channels;
+	u32			dma_requests;
+	int 			irq;
+};
+
+#define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave)
+
+static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan)
+{
+	return container_of(chan, struct zx_dma_chan, vc.chan);
+}
+
+static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d)
+{
+	u32 val = 0;
+
+	val = readl_relaxed(phy->base + REG_ZX_CTRL);
+	val &= ~ZX_CH_ENABLE;
+	val |= ZX_FORCE_CLOSE;
+	writel_relaxed(val, phy->base + REG_ZX_CTRL);
+
+	val = 0x1 << phy->idx;
+	writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW);
+	writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+	writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+	writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+}
+
+static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw)
+{
+	writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR);
+	writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR);
+	writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT);
+	writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT);
+	writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP);
+	writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP);
+	writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR);
+	writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL);
+}
+
+static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy)
+{
+	return readl_relaxed(phy->base + REG_ZX_LLI_ADDR);
+}
+
+static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d)
+{
+	return readl_relaxed(d->base + REG_ZX_STATUS);
+}
+
+static void zx_dma_init_state(struct zx_dma_dev *d)
+{
+	/* set same priority */
+	writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB);
+	/* clear all irq */
+	writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW);
+	writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+	writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+	writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+}
+
+static int zx_dma_start_txd(struct zx_dma_chan *c)
+{
+	struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device);
+	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+
+	if (!c->phy)
+		return -EAGAIN;
+
+	if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d))
+		return -EAGAIN;
+
+	if (vd) {
+		struct zx_dma_desc_sw *ds =
+			container_of(vd, struct zx_dma_desc_sw, vd);
+		/*
+		 * fetch and remove request from vc->desc_issued
+		 * so vc->desc_issued only contains desc pending
+		 */
+		list_del(&ds->vd.node);
+		c->phy->ds_run = ds;
+		c->phy->ds_done = NULL;
+		/* start dma */
+		zx_dma_set_desc(c->phy, ds->desc_hw);
+		return 0;
+	}
+	c->phy->ds_done = NULL;
+	c->phy->ds_run = NULL;
+	return -EAGAIN;
+}
+
+static void zx_dma_task(struct zx_dma_dev *d)
+{
+	struct zx_dma_phy *p;
+	struct zx_dma_chan *c, *cn;
+	unsigned pch, pch_alloc = 0;
+	unsigned long flags;
+
+	/* check new dma request of running channel in vc->desc_issued */
+	list_for_each_entry_safe(c, cn, &d->slave.channels,
+				 vc.chan.device_node) {
+		spin_lock_irqsave(&c->vc.lock, flags);
+		p = c->phy;
+		if (p && p->ds_done && zx_dma_start_txd(c)) {
+			/* No current txd associated with this channel */
+			dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
+			/* Mark this channel free */
+			c->phy = NULL;
+			p->vchan = NULL;
+		}
+		spin_unlock_irqrestore(&c->vc.lock, flags);
+	}
+
+	/* check new channel request in d->chan_pending */
+	spin_lock_irqsave(&d->lock, flags);
+	while (!list_empty(&d->chan_pending)) {
+		c = list_first_entry(&d->chan_pending,
+				     struct zx_dma_chan, node);
+		p = &d->phy[c->id];
+		if (!p->vchan) {
+			/* remove from d->chan_pending */
+			list_del_init(&c->node);
+			pch_alloc |= 1 << c->id;
+			/* Mark this channel allocated */
+			p->vchan = c;
+			c->phy = p;
+		} else {
+			dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id);
+		}
+	}
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	for (pch = 0; pch < d->dma_channels; pch++) {
+		if (pch_alloc & (1 << pch)) {
+			p = &d->phy[pch];
+			c = p->vchan;
+			if (c) {
+				spin_lock_irqsave(&c->vc.lock, flags);
+				zx_dma_start_txd(c);
+				spin_unlock_irqrestore(&c->vc.lock, flags);
+			}
+		}
+	}
+}
+
+static irqreturn_t zx_dma_int_handler(int irq, void *dev_id)
+{
+	struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id;
+	struct zx_dma_phy *p;
+	struct zx_dma_chan *c;
+	u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ);
+	u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ);
+	u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ);
+	u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ);
+	u32 i, irq_chan = 0, task = 0;
+
+	while (tc) {
+		i = __ffs(tc);
+		tc &= ~BIT(i);
+		p = &d->phy[i];
+		c = p->vchan;
+		if (c) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&c->vc.lock, flags);
+			if (c->cyclic) {
+				vchan_cyclic_callback(&p->ds_run->vd);
+			} else {
+				vchan_cookie_complete(&p->ds_run->vd);
+				p->ds_done = p->ds_run;
+				task = 1;
+			}
+			spin_unlock_irqrestore(&c->vc.lock, flags);
+			irq_chan |= BIT(i);
+		}
+	}
+
+	if (serr || derr || cfg)
+		dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n",
+			 serr, derr, cfg);
+
+	writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW);
+	writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW);
+	writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW);
+	writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW);
+
+	if (task)
+		zx_dma_task(d);
+	return IRQ_HANDLED;
+}
+
+static void zx_dma_free_chan_resources(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_dev *d = to_zx_dma(chan->device);
+	unsigned long flags;
+
+	spin_lock_irqsave(&d->lock, flags);
+	list_del_init(&c->node);
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	vchan_free_chan_resources(&c->vc);
+	c->ccfg = 0;
+}
+
+static enum dma_status zx_dma_tx_status(struct dma_chan *chan,
+					dma_cookie_t cookie,
+					struct dma_tx_state *state)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_phy *p;
+	struct virt_dma_desc *vd;
+	unsigned long flags;
+	enum dma_status ret;
+	size_t bytes = 0;
+
+	ret = dma_cookie_status(&c->vc.chan, cookie, state);
+	if (ret == DMA_COMPLETE || !state)
+		return ret;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	p = c->phy;
+	ret = c->status;
+
+	/*
+	 * If the cookie is on our issue queue, then the residue is
+	 * its total size.
+	 */
+	vd = vchan_find_desc(&c->vc, cookie);
+	if (vd) {
+		bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size;
+	} else if ((!p) || (!p->ds_run)) {
+		bytes = 0;
+	} else {
+		struct zx_dma_desc_sw *ds = p->ds_run;
+		u32 clli = 0, index = 0;
+
+		bytes = 0;
+		clli = zx_dma_get_curr_lli(p);
+		index = (clli - ds->desc_hw_lli) / sizeof(struct zx_desc_hw);
+		for (; index < ds->desc_num; index++) {
+			bytes += ds->desc_hw[index].src_x;
+			/* end of lli */
+			if (!ds->desc_hw[index].lli)
+				break;
+		}
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+	dma_set_residue(state, bytes);
+	return ret;
+}
+
+static void zx_dma_issue_pending(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_dev *d = to_zx_dma(chan->device);
+	unsigned long flags;
+	int issue = 0;
+
+	spin_lock_irqsave(&c->vc.lock, flags);
+	/* add request to vc->desc_issued */
+	if (vchan_issue_pending(&c->vc)) {
+		spin_lock(&d->lock);
+		if (!c->phy && list_empty(&c->node)) {
+			/* if new channel, add chan_pending */
+			list_add_tail(&c->node, &d->chan_pending);
+			issue = 1;
+			dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
+		}
+		spin_unlock(&d->lock);
+	} else {
+		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+
+	if (issue)
+		zx_dma_task(d);
+}
+
+static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst,
+			     dma_addr_t src, size_t len, u32 num, u32 ccfg)
+{
+	if ((num + 1) < ds->desc_num)
+		ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
+			sizeof(struct zx_desc_hw);
+	ds->desc_hw[num].saddr = src;
+	ds->desc_hw[num].daddr = dst;
+	ds->desc_hw[num].src_x = len;
+	ds->desc_hw[num].ctr = ccfg;
+}
+
+static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num,
+						     struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_desc_sw *ds;
+	struct zx_dma_dev *d = to_zx_dma(chan->device);
+	int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw);
+
+	if (num > lli_limit) {
+		dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
+			&c->vc, num, lli_limit);
+		return NULL;
+	}
+
+	ds = kzalloc(sizeof(*ds), GFP_ATOMIC);
+	if (!ds)
+		return NULL;
+
+	ds->desc_hw = dma_pool_alloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
+	if (!ds->desc_hw) {
+		dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
+		kfree(ds);
+		return NULL;
+	}
+	memset(ds->desc_hw, sizeof(struct zx_desc_hw) * num, 0);
+	ds->desc_num = num;
+	return ds;
+}
+
+static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width)
+{
+	switch (width) {
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+	case DMA_SLAVE_BUSWIDTH_8_BYTES:
+		return ffs(width) - 1;
+	default:
+		return ZX_DMA_WIDTH_32BIT;
+	}
+}
+
+static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir)
+{
+	struct dma_slave_config *cfg = &c->slave_cfg;
+	enum zx_dma_burst_width src_width;
+	enum zx_dma_burst_width dst_width;
+	u32 maxburst = 0;
+
+	switch (dir) {
+	case DMA_MEM_TO_MEM:
+		c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ
+			| ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1)
+			| ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT)
+			| ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT);
+		break;
+	case DMA_MEM_TO_DEV:
+		c->dev_addr = cfg->dst_addr;
+		/* dst len is calculated from src width, len and dst width.
+		 * We need make sure dst len not exceed MAX LEN.
+		 * Trailing single transaction that does not fill a full
+		 * burst also require identical src/dst data width.
+		 */
+		dst_width = zx_dma_burst_width(cfg->dst_addr_width);
+		maxburst = cfg->dst_maxburst;
+		maxburst = maxburst < ZX_MAX_BURST_LEN ?
+				maxburst : ZX_MAX_BURST_LEN;
+		c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE
+			| ZX_SRC_BURST_LEN(maxburst - 1)
+			| ZX_SRC_BURST_WIDTH(dst_width)
+			| ZX_DST_BURST_WIDTH(dst_width);
+		break;
+	case DMA_DEV_TO_MEM:
+		c->dev_addr = cfg->src_addr;
+		src_width = zx_dma_burst_width(cfg->src_addr_width);
+		maxburst = cfg->src_maxburst;
+		maxburst = maxburst < ZX_MAX_BURST_LEN ?
+				maxburst : ZX_MAX_BURST_LEN;
+		c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE
+			| ZX_SRC_BURST_LEN(maxburst - 1)
+			| ZX_SRC_BURST_WIDTH(src_width)
+			| ZX_DST_BURST_WIDTH(src_width);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_memcpy(
+	struct dma_chan *chan,	dma_addr_t dst, dma_addr_t src,
+	size_t len, unsigned long flags)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_desc_sw *ds;
+	size_t copy = 0;
+	int num = 0;
+
+	if (!len)
+		return NULL;
+
+	if (zx_pre_config(c, DMA_MEM_TO_MEM))
+		return NULL;
+
+	num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
+
+	ds = zx_alloc_desc_resource(num, chan);
+	if (!ds)
+		return NULL;
+
+	ds->size = len;
+	num = 0;
+
+	do {
+		copy = min_t(size_t, len, DMA_MAX_SIZE);
+		zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
+
+		src += copy;
+		dst += copy;
+		len -= copy;
+	} while (len);
+
+	c->cyclic = 0;
+	ds->desc_hw[num - 1].lli = 0;	/* end of link */
+	ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
+	return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg(
+	struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
+	enum dma_transfer_direction dir, unsigned long flags, void *context)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_desc_sw *ds;
+	size_t len, avail, total = 0;
+	struct scatterlist *sg;
+	dma_addr_t addr, src = 0, dst = 0;
+	int num = sglen, i;
+
+	if (!sgl)
+		return NULL;
+
+	if (zx_pre_config(c, dir))
+		return NULL;
+
+	for_each_sg(sgl, sg, sglen, i) {
+		avail = sg_dma_len(sg);
+		if (avail > DMA_MAX_SIZE)
+			num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
+	}
+
+	ds = zx_alloc_desc_resource(num, chan);
+	if (!ds)
+		return NULL;
+
+	c->cyclic = 0;
+	num = 0;
+	for_each_sg(sgl, sg, sglen, i) {
+		addr = sg_dma_address(sg);
+		avail = sg_dma_len(sg);
+		total += avail;
+
+		do {
+			len = min_t(size_t, avail, DMA_MAX_SIZE);
+
+			if (dir == DMA_MEM_TO_DEV) {
+				src = addr;
+				dst = c->dev_addr;
+			} else if (dir == DMA_DEV_TO_MEM) {
+				src = c->dev_addr;
+				dst = addr;
+			}
+
+			zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
+
+			addr += len;
+			avail -= len;
+		} while (avail);
+	}
+
+	ds->desc_hw[num - 1].lli = 0;	/* end of link */
+	ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL;
+	ds->size = total;
+	return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic(
+		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+		size_t period_len, enum dma_transfer_direction dir,
+		unsigned long flags)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_desc_sw *ds;
+	dma_addr_t src = 0, dst = 0;
+	int num_periods = buf_len / period_len;
+	int buf = 0, num = 0;
+
+	if (period_len > DMA_MAX_SIZE) {
+		dev_err(chan->device->dev, "maximum period size exceeded\n");
+		return NULL;
+	}
+
+	if (zx_pre_config(c, dir))
+		return NULL;
+
+	ds = zx_alloc_desc_resource(num_periods, chan);
+	if (!ds)
+		return NULL;
+	c->cyclic = 1;
+
+	while (buf < buf_len) {
+		if (dir == DMA_MEM_TO_DEV) {
+			src = dma_addr;
+			dst = c->dev_addr;
+		} else if (dir == DMA_DEV_TO_MEM) {
+			src = c->dev_addr;
+			dst = dma_addr;
+		}
+		zx_dma_fill_desc(ds, dst, src, period_len, num++,
+				 c->ccfg | ZX_IRQ_ENABLE_ALL);
+		dma_addr += period_len;
+		buf += period_len;
+	}
+
+	ds->desc_hw[num - 1].lli = ds->desc_hw_lli;
+	ds->size = buf_len;
+	return vchan_tx_prep(&c->vc, &ds->vd, flags);
+}
+
+static int zx_dma_config(struct dma_chan *chan,
+			 struct dma_slave_config *cfg)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+
+	if (!cfg)
+		return -EINVAL;
+
+	memcpy(&c->slave_cfg, cfg, sizeof(*cfg));
+
+	return 0;
+}
+
+static int zx_dma_terminate_all(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	struct zx_dma_dev *d = to_zx_dma(chan->device);
+	struct zx_dma_phy *p = c->phy;
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
+
+	/* Prevent this channel being scheduled */
+	spin_lock(&d->lock);
+	list_del_init(&c->node);
+	spin_unlock(&d->lock);
+
+	/* Clear the tx descriptor lists */
+	spin_lock_irqsave(&c->vc.lock, flags);
+	vchan_get_all_descriptors(&c->vc, &head);
+	if (p) {
+		/* vchan is assigned to a pchan - stop the channel */
+		zx_dma_terminate_chan(p, d);
+		c->phy = NULL;
+		p->vchan = NULL;
+		p->ds_run = NULL;
+		p->ds_done = NULL;
+	}
+	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_dma_desc_free_list(&c->vc, &head);
+
+	return 0;
+}
+
+static int zx_dma_transfer_pause(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	u32 val = 0;
+
+	val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
+	val &= ~ZX_CH_ENABLE;
+	writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
+
+	return 0;
+}
+
+static int zx_dma_transfer_resume(struct dma_chan *chan)
+{
+	struct zx_dma_chan *c = to_zx_chan(chan);
+	u32 val = 0;
+
+	val = readl_relaxed(c->phy->base + REG_ZX_CTRL);
+	val |= ZX_CH_ENABLE;
+	writel_relaxed(val, c->phy->base + REG_ZX_CTRL);
+
+	return 0;
+}
+
+static void zx_dma_free_desc(struct virt_dma_desc *vd)
+{
+	struct zx_dma_desc_sw *ds =
+		container_of(vd, struct zx_dma_desc_sw, vd);
+	struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device);
+
+	dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
+	kfree(ds);
+}
+
+static const struct of_device_id zx6702_dma_dt_ids[] = {
+	{ .compatible = "zte,zx296702-dma", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids);
+
+static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+					       struct of_dma *ofdma)
+{
+	struct zx_dma_dev *d = ofdma->of_dma_data;
+	unsigned int request = dma_spec->args[0];
+	struct dma_chan *chan;
+	struct zx_dma_chan *c;
+
+	if (request > d->dma_requests)
+		return NULL;
+
+	chan = dma_get_any_slave_channel(&d->slave);
+	if (!chan) {
+		dev_err(d->slave.dev, "get channel fail in %s.\n", __func__);
+		return NULL;
+	}
+	c = to_zx_chan(chan);
+	c->id = request;
+	dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n",
+		 c->id, &c->vc);
+	return chan;
+}
+
+static int zx_dma_probe(struct platform_device *op)
+{
+	struct zx_dma_dev *d;
+	struct resource *iores;
+	int i, ret = 0;
+
+	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+	if (!iores)
+		return -EINVAL;
+
+	d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+
+	d->base = devm_ioremap_resource(&op->dev, iores);
+	if (IS_ERR(d->base))
+		return PTR_ERR(d->base);
+
+	of_property_read_u32((&op->dev)->of_node,
+			     "dma-channels", &d->dma_channels);
+	of_property_read_u32((&op->dev)->of_node,
+			     "dma-requests", &d->dma_requests);
+	if (!d->dma_requests || !d->dma_channels)
+		return -EINVAL;
+
+	d->clk = devm_clk_get(&op->dev, NULL);
+	if (IS_ERR(d->clk)) {
+		dev_err(&op->dev, "no dma clk\n");
+		return PTR_ERR(d->clk);
+	}
+
+	d->irq = platform_get_irq(op, 0);
+	ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler,
+			       0, DRIVER_NAME, d);
+	if (ret)
+		return ret;
+
+	/* A DMA memory pool for LLIs, align on 32-byte boundary */
+	d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
+			LLI_BLOCK_SIZE, 32, 0);
+	if (!d->pool)
+		return -ENOMEM;
+
+	/* init phy channel */
+	d->phy = devm_kzalloc(&op->dev,
+		d->dma_channels * sizeof(struct zx_dma_phy), GFP_KERNEL);
+	if (!d->phy)
+		return -ENOMEM;
+
+	for (i = 0; i < d->dma_channels; i++) {
+		struct zx_dma_phy *p = &d->phy[i];
+
+		p->idx = i;
+		p->base = d->base + i * 0x40;
+	}
+
+	INIT_LIST_HEAD(&d->slave.channels);
+	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
+	dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
+	dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
+	d->slave.dev = &op->dev;
+	d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
+	d->slave.device_tx_status = zx_dma_tx_status;
+	d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy;
+	d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg;
+	d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic;
+	d->slave.device_issue_pending = zx_dma_issue_pending;
+	d->slave.device_config = zx_dma_config;
+	d->slave.device_terminate_all = zx_dma_terminate_all;
+	d->slave.device_pause = zx_dma_transfer_pause;
+	d->slave.device_resume = zx_dma_transfer_resume;
+	d->slave.copy_align = DMA_ALIGN;
+	d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS;
+	d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS;
+	d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV)
+			| BIT(DMA_DEV_TO_MEM);
+	d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
+	/* init virtual channel */
+	d->chans = devm_kzalloc(&op->dev,
+		d->dma_requests * sizeof(struct zx_dma_chan), GFP_KERNEL);
+	if (!d->chans)
+		return -ENOMEM;
+
+	for (i = 0; i < d->dma_requests; i++) {
+		struct zx_dma_chan *c = &d->chans[i];
+
+		c->status = DMA_IN_PROGRESS;
+		INIT_LIST_HEAD(&c->node);
+		c->vc.desc_free = zx_dma_free_desc;
+		vchan_init(&c->vc, &d->slave);
+	}
+
+	/* Enable clock before accessing registers */
+	ret = clk_prepare_enable(d->clk);
+	if (ret < 0) {
+		dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
+		goto zx_dma_out;
+	}
+
+	zx_dma_init_state(d);
+
+	spin_lock_init(&d->lock);
+	INIT_LIST_HEAD(&d->chan_pending);
+	platform_set_drvdata(op, d);
+
+	ret = dma_async_device_register(&d->slave);
+	if (ret)
+		goto clk_dis;
+
+	ret = of_dma_controller_register((&op->dev)->of_node,
+					 zx_of_dma_simple_xlate, d);
+	if (ret)
+		goto of_dma_register_fail;
+
+	dev_info(&op->dev, "initialized\n");
+	return 0;
+
+of_dma_register_fail:
+	dma_async_device_unregister(&d->slave);
+clk_dis:
+	clk_disable_unprepare(d->clk);
+zx_dma_out:
+	return ret;
+}
+
+static int zx_dma_remove(struct platform_device *op)
+{
+	struct zx_dma_chan *c, *cn;
+	struct zx_dma_dev *d = platform_get_drvdata(op);
+
+	/* explictly free the irq */
+	devm_free_irq(&op->dev, d->irq, d);
+
+	dma_async_device_unregister(&d->slave);
+	of_dma_controller_free((&op->dev)->of_node);
+
+	list_for_each_entry_safe(c, cn, &d->slave.channels,
+				 vc.chan.device_node) {
+		list_del(&c->vc.chan.device_node);
+	}
+	clk_disable_unprepare(d->clk);
+	dmam_pool_destroy(d->pool);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int zx_dma_suspend_dev(struct device *dev)
+{
+	struct zx_dma_dev *d = dev_get_drvdata(dev);
+	u32 stat = 0;
+
+	stat = zx_dma_get_chan_stat(d);
+	if (stat) {
+		dev_warn(d->slave.dev,
+			 "chan %d is running fail to suspend\n", stat);
+		return -1;
+	}
+	clk_disable_unprepare(d->clk);
+	return 0;
+}
+
+static int zx_dma_resume_dev(struct device *dev)
+{
+	struct zx_dma_dev *d = dev_get_drvdata(dev);
+	int ret = 0;
+
+	ret = clk_prepare_enable(d->clk);
+	if (ret < 0) {
+		dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
+		return ret;
+	}
+	zx_dma_init_state(d);
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev);
+
+static struct platform_driver zx_pdma_driver = {
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.pm	= &zx_dma_pmops,
+		.of_match_table = zx6702_dma_dt_ids,
+	},
+	.probe		= zx_dma_probe,
+	.remove		= zx_dma_remove,
+};
+
+module_platform_driver(zx_pdma_driver);
+
+MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver");
+MODULE_AUTHOR("Jun Nie jun.nie@linaro.org");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 8677ead..ef25000 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -61,16 +61,6 @@
 	  which occur really early upon boot, before the module infrastructure
 	  has been initialized.
 
-config EDAC_MCE_INJ
-	tristate "Simple MCE injection interface"
-	depends on EDAC_DECODE_MCE && DEBUG_FS
-	default n
-	help
-	  This is a simple debugfs interface to inject MCEs and test different
-	  aspects of the MCE handling code.
-
-	  WARNING: Do not even assume this interface is staying stable!
-
 config EDAC_MM_EDAC
 	tristate "Main Memory EDAC (Error Detection And Correction) reporting"
 	select RAS
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 28ef2a5..ae3c5f3 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -17,7 +17,6 @@
 endif
 
 obj-$(CONFIG_EDAC_GHES)			+= ghes_edac.o
-obj-$(CONFIG_EDAC_MCE_INJ)		+= mce_amd_inj.o
 
 edac_mce_amd-y				:= mce_amd.o
 obj-$(CONFIG_EDAC_DECODE_MCE)		+= edac_mce_amd.o
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 58586d5..e3a945c 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -763,7 +763,8 @@
 		c->x86, c->x86_model, c->x86_mask,
 		m->bank,
 		((m->status & MCI_STATUS_OVER)	? "Over"  : "-"),
-		((m->status & MCI_STATUS_UC)	? "UE"	  : "CE"),
+		((m->status & MCI_STATUS_UC)	? "UE"	  :
+		 (m->status & MCI_STATUS_DEFERRED) ? "-"  : "CE"),
 		((m->status & MCI_STATUS_MISCV)	? "MiscV" : "-"),
 		((m->status & MCI_STATUS_PCC)	? "PCC"	  : "-"),
 		((m->status & MCI_STATUS_ADDRV)	? "AddrV" : "-"));
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
deleted file mode 100644
index 4c73e4d0..0000000
--- a/drivers/edac/mce_amd_inj.c
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * A simple MCE injection facility for testing different aspects of the RAS
- * code. This driver should be built as module so that it can be loaded
- * on production kernels for testing purposes.
- *
- * This file may be distributed under the terms of the GNU General Public
- * License version 2.
- *
- * Copyright (c) 2010-14:  Borislav Petkov <bp@alien8.de>
- *			Advanced Micro Devices Inc.
- */
-
-#include <linux/kobject.h>
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/cpu.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-#include <asm/mce.h>
-
-#include "mce_amd.h"
-
-/*
- * Collect all the MCi_XXX settings
- */
-static struct mce i_mce;
-static struct dentry *dfs_inj;
-
-static u8 n_banks;
-
-#define MAX_FLAG_OPT_SIZE	3
-
-enum injection_type {
-	SW_INJ = 0,	/* SW injection, simply decode the error */
-	HW_INJ,		/* Trigger a #MC */
-	N_INJ_TYPES,
-};
-
-static const char * const flags_options[] = {
-	[SW_INJ] = "sw",
-	[HW_INJ] = "hw",
-	NULL
-};
-
-/* Set default injection to SW_INJ */
-static enum injection_type inj_type = SW_INJ;
-
-#define MCE_INJECT_SET(reg)						\
-static int inj_##reg##_set(void *data, u64 val)				\
-{									\
-	struct mce *m = (struct mce *)data;				\
-									\
-	m->reg = val;							\
-	return 0;							\
-}
-
-MCE_INJECT_SET(status);
-MCE_INJECT_SET(misc);
-MCE_INJECT_SET(addr);
-
-#define MCE_INJECT_GET(reg)						\
-static int inj_##reg##_get(void *data, u64 *val)			\
-{									\
-	struct mce *m = (struct mce *)data;				\
-									\
-	*val = m->reg;							\
-	return 0;							\
-}
-
-MCE_INJECT_GET(status);
-MCE_INJECT_GET(misc);
-MCE_INJECT_GET(addr);
-
-DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n");
-
-/*
- * Caller needs to be make sure this cpu doesn't disappear
- * from under us, i.e.: get_cpu/put_cpu.
- */
-static int toggle_hw_mce_inject(unsigned int cpu, bool enable)
-{
-	u32 l, h;
-	int err;
-
-	err = rdmsr_on_cpu(cpu, MSR_K7_HWCR, &l, &h);
-	if (err) {
-		pr_err("%s: error reading HWCR\n", __func__);
-		return err;
-	}
-
-	enable ? (l |= BIT(18)) : (l &= ~BIT(18));
-
-	err = wrmsr_on_cpu(cpu, MSR_K7_HWCR, l, h);
-	if (err)
-		pr_err("%s: error writing HWCR\n", __func__);
-
-	return err;
-}
-
-static int __set_inj(const char *buf)
-{
-	int i;
-
-	for (i = 0; i < N_INJ_TYPES; i++) {
-		if (!strncmp(flags_options[i], buf, strlen(flags_options[i]))) {
-			inj_type = i;
-			return 0;
-		}
-	}
-	return -EINVAL;
-}
-
-static ssize_t flags_read(struct file *filp, char __user *ubuf,
-			  size_t cnt, loff_t *ppos)
-{
-	char buf[MAX_FLAG_OPT_SIZE];
-	int n;
-
-	n = sprintf(buf, "%s\n", flags_options[inj_type]);
-
-	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
-}
-
-static ssize_t flags_write(struct file *filp, const char __user *ubuf,
-			   size_t cnt, loff_t *ppos)
-{
-	char buf[MAX_FLAG_OPT_SIZE], *__buf;
-	int err;
-	size_t ret;
-
-	if (cnt > MAX_FLAG_OPT_SIZE)
-		cnt = MAX_FLAG_OPT_SIZE;
-
-	ret = cnt;
-
-	if (copy_from_user(&buf, ubuf, cnt))
-		return -EFAULT;
-
-	buf[cnt - 1] = 0;
-
-	/* strip whitespace */
-	__buf = strstrip(buf);
-
-	err = __set_inj(__buf);
-	if (err) {
-		pr_err("%s: Invalid flags value: %s\n", __func__, __buf);
-		return err;
-	}
-
-	*ppos += ret;
-
-	return ret;
-}
-
-static const struct file_operations flags_fops = {
-	.read           = flags_read,
-	.write          = flags_write,
-	.llseek         = generic_file_llseek,
-};
-
-/*
- * On which CPU to inject?
- */
-MCE_INJECT_GET(extcpu);
-
-static int inj_extcpu_set(void *data, u64 val)
-{
-	struct mce *m = (struct mce *)data;
-
-	if (val >= nr_cpu_ids || !cpu_online(val)) {
-		pr_err("%s: Invalid CPU: %llu\n", __func__, val);
-		return -EINVAL;
-	}
-	m->extcpu = val;
-	return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(extcpu_fops, inj_extcpu_get, inj_extcpu_set, "%llu\n");
-
-static void trigger_mce(void *info)
-{
-	asm volatile("int $18");
-}
-
-static void do_inject(void)
-{
-	u64 mcg_status = 0;
-	unsigned int cpu = i_mce.extcpu;
-	u8 b = i_mce.bank;
-
-	if (i_mce.misc)
-		i_mce.status |= MCI_STATUS_MISCV;
-
-	if (inj_type == SW_INJ) {
-		amd_decode_mce(NULL, 0, &i_mce);
-		return;
-	}
-
-	/* prep MCE global settings for the injection */
-	mcg_status = MCG_STATUS_MCIP | MCG_STATUS_EIPV;
-
-	if (!(i_mce.status & MCI_STATUS_PCC))
-		mcg_status |= MCG_STATUS_RIPV;
-
-	get_online_cpus();
-	if (!cpu_online(cpu))
-		goto err;
-
-	toggle_hw_mce_inject(cpu, true);
-
-	wrmsr_on_cpu(cpu, MSR_IA32_MCG_STATUS,
-		     (u32)mcg_status, (u32)(mcg_status >> 32));
-
-	wrmsr_on_cpu(cpu, MSR_IA32_MCx_STATUS(b),
-		     (u32)i_mce.status, (u32)(i_mce.status >> 32));
-
-	wrmsr_on_cpu(cpu, MSR_IA32_MCx_ADDR(b),
-		     (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
-
-	wrmsr_on_cpu(cpu, MSR_IA32_MCx_MISC(b),
-		     (u32)i_mce.misc, (u32)(i_mce.misc >> 32));
-
-	toggle_hw_mce_inject(cpu, false);
-
-	smp_call_function_single(cpu, trigger_mce, NULL, 0);
-
-err:
-	put_online_cpus();
-
-}
-
-/*
- * This denotes into which bank we're injecting and triggers
- * the injection, at the same time.
- */
-static int inj_bank_set(void *data, u64 val)
-{
-	struct mce *m = (struct mce *)data;
-
-	if (val >= n_banks) {
-		pr_err("Non-existent MCE bank: %llu\n", val);
-		return -EINVAL;
-	}
-
-	m->bank = val;
-	do_inject();
-
-	return 0;
-}
-
-MCE_INJECT_GET(bank);
-
-DEFINE_SIMPLE_ATTRIBUTE(bank_fops, inj_bank_get, inj_bank_set, "%llu\n");
-
-static const char readme_msg[] =
-"Description of the files and their usages:\n"
-"\n"
-"Note1: i refers to the bank number below.\n"
-"Note2: See respective BKDGs for the exact bit definitions of the files below\n"
-"as they mirror the hardware registers.\n"
-"\n"
-"status:\t Set MCi_STATUS: the bits in that MSR control the error type and\n"
-"\t attributes of the error which caused the MCE.\n"
-"\n"
-"misc:\t Set MCi_MISC: provide auxiliary info about the error. It is mostly\n"
-"\t used for error thresholding purposes and its validity is indicated by\n"
-"\t MCi_STATUS[MiscV].\n"
-"\n"
-"addr:\t Error address value to be written to MCi_ADDR. Log address information\n"
-"\t associated with the error.\n"
-"\n"
-"cpu:\t The CPU to inject the error on.\n"
-"\n"
-"bank:\t Specify the bank you want to inject the error into: the number of\n"
-"\t banks in a processor varies and is family/model-specific, therefore, the\n"
-"\t supplied value is sanity-checked. Setting the bank value also triggers the\n"
-"\t injection.\n"
-"\n"
-"flags:\t Injection type to be performed. Writing to this file will trigger a\n"
-"\t real machine check, an APIC interrupt or invoke the error decoder routines\n"
-"\t for AMD processors.\n"
-"\n"
-"\t Allowed error injection types:\n"
-"\t  - \"sw\": Software error injection. Decode error to a human-readable \n"
-"\t    format only. Safe to use.\n"
-"\t  - \"hw\": Hardware error injection. Causes the #MC exception handler to \n"
-"\t    handle the error. Be warned: might cause system panic if MCi_STATUS[PCC] \n"
-"\t    is set. Therefore, consider setting (debugfs_mountpoint)/mce/fake_panic \n"
-"\t    before injecting.\n"
-"\n";
-
-static ssize_t
-inj_readme_read(struct file *filp, char __user *ubuf,
-		       size_t cnt, loff_t *ppos)
-{
-	return simple_read_from_buffer(ubuf, cnt, ppos,
-					readme_msg, strlen(readme_msg));
-}
-
-static const struct file_operations readme_fops = {
-	.read		= inj_readme_read,
-};
-
-static struct dfs_node {
-	char *name;
-	struct dentry *d;
-	const struct file_operations *fops;
-	umode_t perm;
-} dfs_fls[] = {
-	{ .name = "status",	.fops = &status_fops, .perm = S_IRUSR | S_IWUSR },
-	{ .name = "misc",	.fops = &misc_fops,   .perm = S_IRUSR | S_IWUSR },
-	{ .name = "addr",	.fops = &addr_fops,   .perm = S_IRUSR | S_IWUSR },
-	{ .name = "bank",	.fops = &bank_fops,   .perm = S_IRUSR | S_IWUSR },
-	{ .name = "flags",	.fops = &flags_fops,  .perm = S_IRUSR | S_IWUSR },
-	{ .name = "cpu",	.fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR },
-	{ .name = "README",	.fops = &readme_fops, .perm = S_IRUSR | S_IRGRP | S_IROTH },
-};
-
-static int __init init_mce_inject(void)
-{
-	int i;
-	u64 cap;
-
-	rdmsrl(MSR_IA32_MCG_CAP, cap);
-	n_banks = cap & MCG_BANKCNT_MASK;
-
-	dfs_inj = debugfs_create_dir("mce-inject", NULL);
-	if (!dfs_inj)
-		return -EINVAL;
-
-	for (i = 0; i < ARRAY_SIZE(dfs_fls); i++) {
-		dfs_fls[i].d = debugfs_create_file(dfs_fls[i].name,
-						    dfs_fls[i].perm,
-						    dfs_inj,
-						    &i_mce,
-						    dfs_fls[i].fops);
-
-		if (!dfs_fls[i].d)
-			goto err_dfs_add;
-	}
-
-	return 0;
-
-err_dfs_add:
-	while (--i >= 0)
-		debugfs_remove(dfs_fls[i].d);
-
-	debugfs_remove(dfs_inj);
-	dfs_inj = NULL;
-
-	return -ENOMEM;
-}
-
-static void __exit exit_mce_inject(void)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(dfs_fls); i++)
-		debugfs_remove(dfs_fls[i].d);
-
-	memset(&dfs_fls, 0, sizeof(dfs_fls));
-
-	debugfs_remove(dfs_inj);
-	dfs_inj = NULL;
-}
-module_init(init_mce_inject);
-module_exit(exit_mce_inject);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>");
-MODULE_AUTHOR("AMD Inc.");
-MODULE_DESCRIPTION("MCE injection facility for RAS testing");
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 14636e4..ba06904 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1168,7 +1168,6 @@
 	.remove = xgene_edac_remove,
 	.driver = {
 		.name = "xgene-edac",
-		.owner = THIS_MODULE,
 		.of_match_table = xgene_edac_of_match,
 	},
 };
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index ad87f26..4b9f09c 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -20,10 +20,12 @@
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/err.h>
+#include <linux/gpio/consumer.h>
 #include <linux/gpio.h>
 #include <linux/input.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 #include <linux/regulator/consumer.h>
 #include <linux/extcon.h>
 
@@ -46,6 +48,9 @@
 #define HPDET_DEBOUNCE 500
 #define DEFAULT_MICD_TIMEOUT 2000
 
+#define MICD_DBTIME_TWO_READINGS 2
+#define MICD_DBTIME_FOUR_READINGS 4
+
 #define MICD_LVL_1_TO_7 (ARIZONA_MICD_LVL_1 | ARIZONA_MICD_LVL_2 | \
 			 ARIZONA_MICD_LVL_3 | ARIZONA_MICD_LVL_4 | \
 			 ARIZONA_MICD_LVL_5 | ARIZONA_MICD_LVL_6 | \
@@ -94,6 +99,8 @@
 	int hpdet_ip_version;
 
 	struct extcon_dev *edev;
+
+	struct gpio_desc *micd_pol_gpio;
 };
 
 static const struct arizona_micd_config micd_default_modes[] = {
@@ -204,6 +211,10 @@
 	if (arizona->pdata.micd_pol_gpio > 0)
 		gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
 					info->micd_modes[mode].gpio);
+	else
+		gpiod_set_value_cansleep(info->micd_pol_gpio,
+					 info->micd_modes[mode].gpio);
+
 	regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
 			   ARIZONA_MICD_BIAS_SRC_MASK,
 			   info->micd_modes[mode].bias <<
@@ -757,10 +768,11 @@
 	mutex_lock(&info->lock);
 
 	dev_dbg(info->arizona->dev, "MICD timed out, reporting HP\n");
-	arizona_identify_headphone(info);
 
 	info->detecting = false;
 
+	arizona_identify_headphone(info);
+
 	arizona_stop_mic(info);
 
 	mutex_unlock(&info->lock);
@@ -820,12 +832,18 @@
 	/* Due to jack detect this should never happen */
 	if (!(val & ARIZONA_MICD_STS)) {
 		dev_warn(arizona->dev, "Detected open circuit\n");
+		info->mic = false;
+		arizona_stop_mic(info);
 		info->detecting = false;
+		arizona_identify_headphone(info);
 		goto handled;
 	}
 
 	/* If we got a high impedence we should have a headset, report it. */
 	if (info->detecting && (val & ARIZONA_MICD_LVL_8)) {
+		info->mic = true;
+		info->detecting = false;
+
 		arizona_identify_headphone(info);
 
 		ret = extcon_set_cable_state_(info->edev,
@@ -841,8 +859,6 @@
 				ret);
 		}
 
-		info->mic = true;
-		info->detecting = false;
 		goto handled;
 	}
 
@@ -855,10 +871,11 @@
 	if (info->detecting && (val & MICD_LVL_1_TO_7)) {
 		if (info->jack_flips >= info->micd_num_modes * 10) {
 			dev_dbg(arizona->dev, "Detected HP/line\n");
-			arizona_identify_headphone(info);
 
 			info->detecting = false;
 
+			arizona_identify_headphone(info);
+
 			arizona_stop_mic(info);
 		} else {
 			info->micd_mode++;
@@ -1110,12 +1127,12 @@
 	regmap_update_bits(arizona->regmap, reg, mask, level);
 }
 
-static int arizona_extcon_of_get_pdata(struct arizona *arizona)
+static int arizona_extcon_device_get_pdata(struct arizona *arizona)
 {
 	struct arizona_pdata *pdata = &arizona->pdata;
 	unsigned int val = ARIZONA_ACCDET_MODE_HPL;
 
-	of_property_read_u32(arizona->dev->of_node, "wlf,hpdet-channel", &val);
+	device_property_read_u32(arizona->dev, "wlf,hpdet-channel", &val);
 	switch (val) {
 	case ARIZONA_ACCDET_MODE_HPL:
 	case ARIZONA_ACCDET_MODE_HPR:
@@ -1127,6 +1144,24 @@
 		pdata->hpdet_channel = ARIZONA_ACCDET_MODE_HPL;
 	}
 
+	device_property_read_u32(arizona->dev, "wlf,micd-detect-debounce",
+				 &pdata->micd_detect_debounce);
+
+	device_property_read_u32(arizona->dev, "wlf,micd-bias-start-time",
+				 &pdata->micd_bias_start_time);
+
+	device_property_read_u32(arizona->dev, "wlf,micd-rate",
+				 &pdata->micd_rate);
+
+	device_property_read_u32(arizona->dev, "wlf,micd-dbtime",
+				 &pdata->micd_dbtime);
+
+	device_property_read_u32(arizona->dev, "wlf,micd-timeout",
+				 &pdata->micd_timeout);
+
+	pdata->micd_force_micbias = device_property_read_bool(arizona->dev,
+						"wlf,micd-force-micbias");
+
 	return 0;
 }
 
@@ -1147,10 +1182,8 @@
 	if (!info)
 		return -ENOMEM;
 
-	if (IS_ENABLED(CONFIG_OF)) {
-		if (!dev_get_platdata(arizona->dev))
-			arizona_extcon_of_get_pdata(arizona);
-	}
+	if (!dev_get_platdata(arizona->dev))
+		arizona_extcon_device_get_pdata(arizona);
 
 	info->micvdd = devm_regulator_get(&pdev->dev, "MICVDD");
 	if (IS_ERR(info->micvdd)) {
@@ -1241,6 +1274,27 @@
 				arizona->pdata.micd_pol_gpio, ret);
 			goto err_register;
 		}
+	} else {
+		if (info->micd_modes[0].gpio)
+			mode = GPIOD_OUT_HIGH;
+		else
+			mode = GPIOD_OUT_LOW;
+
+		/* We can't use devm here because we need to do the get
+		 * against the MFD device, as that is where the of_node
+		 * will reside, but if we devm against that the GPIO
+		 * will not be freed if the extcon driver is unloaded.
+		 */
+		info->micd_pol_gpio = gpiod_get_optional(arizona->dev,
+							 "wlf,micd-pol",
+							 GPIOD_OUT_LOW);
+		if (IS_ERR(info->micd_pol_gpio)) {
+			ret = PTR_ERR(info->micd_pol_gpio);
+			dev_err(arizona->dev,
+				"Failed to get microphone polarity GPIO: %d\n",
+				ret);
+			goto err_register;
+		}
 	}
 
 	if (arizona->pdata.hpdet_id_gpio > 0) {
@@ -1251,7 +1305,7 @@
 		if (ret != 0) {
 			dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
 				arizona->pdata.hpdet_id_gpio, ret);
-			goto err_register;
+			goto err_gpio;
 		}
 	}
 
@@ -1267,11 +1321,19 @@
 				   arizona->pdata.micd_rate
 				   << ARIZONA_MICD_RATE_SHIFT);
 
-	if (arizona->pdata.micd_dbtime)
+	switch (arizona->pdata.micd_dbtime) {
+	case MICD_DBTIME_FOUR_READINGS:
 		regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
 				   ARIZONA_MICD_DBTIME_MASK,
-				   arizona->pdata.micd_dbtime
-				   << ARIZONA_MICD_DBTIME_SHIFT);
+				   ARIZONA_MICD_DBTIME);
+		break;
+	case MICD_DBTIME_TWO_READINGS:
+		regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+				   ARIZONA_MICD_DBTIME_MASK, 0);
+		break;
+	default:
+		break;
+	}
 
 	BUILD_BUG_ON(ARRAY_SIZE(arizona_micd_levels) != 0x40);
 
@@ -1295,7 +1357,7 @@
 				dev_err(arizona->dev,
 					"MICD ranges must be sorted\n");
 				ret = -EINVAL;
-				goto err_input;
+				goto err_gpio;
 			}
 		}
 	}
@@ -1314,7 +1376,7 @@
 			dev_err(arizona->dev, "Unsupported MICD level %d\n",
 				info->micd_ranges[i].max);
 			ret = -EINVAL;
-			goto err_input;
+			goto err_gpio;
 		}
 
 		dev_dbg(arizona->dev, "%d ohms for MICD threshold %d\n",
@@ -1387,7 +1449,7 @@
 	if (ret != 0) {
 		dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
 			ret);
-		goto err_input;
+		goto err_gpio;
 	}
 
 	ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1);
@@ -1458,7 +1520,8 @@
 	arizona_set_irq_wake(arizona, jack_irq_rise, 0);
 err_rise:
 	arizona_free_irq(arizona, jack_irq_rise, info);
-err_input:
+err_gpio:
+	gpiod_put(info->micd_pol_gpio);
 err_register:
 	pm_runtime_disable(&pdev->dev);
 	return ret;
@@ -1470,6 +1533,8 @@
 	struct arizona *arizona = info->arizona;
 	int jack_irq_rise, jack_irq_fall;
 
+	gpiod_put(info->micd_pol_gpio);
+
 	pm_runtime_disable(&pdev->dev);
 
 	regmap_update_bits(arizona->regmap,
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 355459a..57c24fa 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -65,22 +65,6 @@
 	return IRQ_HANDLED;
 }
 
-static ssize_t extcon_gpio_print_state(struct extcon_dev *edev, char *buf)
-{
-	struct device *dev = edev->dev.parent;
-	struct gpio_extcon_data *extcon_data = dev_get_drvdata(dev);
-	const char *state;
-
-	if (extcon_get_state(edev))
-		state = extcon_data->state_on;
-	else
-		state = extcon_data->state_off;
-
-	if (state)
-		return sprintf(buf, "%s\n", state);
-	return -EINVAL;
-}
-
 static int gpio_extcon_probe(struct platform_device *pdev)
 {
 	struct gpio_extcon_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -110,8 +94,6 @@
 	extcon_data->state_on = pdata->state_on;
 	extcon_data->state_off = pdata->state_off;
 	extcon_data->check_on_resume = pdata->check_on_resume;
-	if (pdata->state_on && pdata->state_off)
-		extcon_data->edev->print_state = extcon_gpio_print_state;
 
 	ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN,
 				    pdev->name);
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index f4f3b3d..35b9e11 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -24,6 +24,7 @@
 #include <linux/err.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-common.h>
 #include <linux/mfd/max77693-private.h>
 #include <linux/extcon.h>
 #include <linux/regmap.h>
@@ -42,7 +43,7 @@
 	{
 		/* STATUS2 - [3]ChgDetRun */
 		.addr = MAX77693_MUIC_REG_STATUS2,
-		.data = STATUS2_CHGDETRUN_MASK,
+		.data = MAX77693_STATUS2_CHGDETRUN_MASK,
 	}, {
 		/* INTMASK1 - Unmask [3]ADC1KM,[0]ADCM */
 		.addr = MAX77693_MUIC_REG_INTMASK1,
@@ -235,7 +236,7 @@
 		 */
 		ret = regmap_write(info->max77693->regmap_muic,
 				  MAX77693_MUIC_REG_CTRL3,
-				  time << CONTROL3_ADCDBSET_SHIFT);
+				  time << MAX77693_CONTROL3_ADCDBSET_SHIFT);
 		if (ret) {
 			dev_err(info->dev, "failed to set ADC debounce time\n");
 			return ret;
@@ -268,7 +269,7 @@
 	if (attached)
 		ctrl1 = val;
 	else
-		ctrl1 = CONTROL1_SW_OPEN;
+		ctrl1 = MAX77693_CONTROL1_SW_OPEN;
 
 	ret = regmap_update_bits(info->max77693->regmap_muic,
 			MAX77693_MUIC_REG_CTRL1, COMP_SW_MASK, ctrl1);
@@ -278,13 +279,14 @@
 	}
 
 	if (attached)
-		ctrl2 |= CONTROL2_CPEN_MASK;	/* LowPwr=0, CPEn=1 */
+		ctrl2 |= MAX77693_CONTROL2_CPEN_MASK;	/* LowPwr=0, CPEn=1 */
 	else
-		ctrl2 |= CONTROL2_LOWPWR_MASK;	/* LowPwr=1, CPEn=0 */
+		ctrl2 |= MAX77693_CONTROL2_LOWPWR_MASK;	/* LowPwr=1, CPEn=0 */
 
 	ret = regmap_update_bits(info->max77693->regmap_muic,
 			MAX77693_MUIC_REG_CTRL2,
-			CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK, ctrl2);
+			MAX77693_CONTROL2_LOWPWR_MASK | MAX77693_CONTROL2_CPEN_MASK,
+			ctrl2);
 	if (ret < 0) {
 		dev_err(info->dev, "failed to update MUIC register\n");
 		return ret;
@@ -326,8 +328,8 @@
 		 * Read ADC value to check cable type and decide cable state
 		 * according to cable type
 		 */
-		adc = info->status[0] & STATUS1_ADC_MASK;
-		adc >>= STATUS1_ADC_SHIFT;
+		adc = info->status[0] & MAX77693_STATUS1_ADC_MASK;
+		adc >>= MAX77693_STATUS1_ADC_SHIFT;
 
 		/*
 		 * Check current cable state/cable type and store cable type
@@ -350,8 +352,8 @@
 		 * Read ADC value to check cable type and decide cable state
 		 * according to cable type
 		 */
-		adc = info->status[0] & STATUS1_ADC_MASK;
-		adc >>= STATUS1_ADC_SHIFT;
+		adc = info->status[0] & MAX77693_STATUS1_ADC_MASK;
+		adc >>= MAX77693_STATUS1_ADC_SHIFT;
 
 		/*
 		 * Check current cable state/cable type and store cable type
@@ -366,13 +368,13 @@
 		} else {
 			*attached = true;
 
-			adclow = info->status[0] & STATUS1_ADCLOW_MASK;
-			adclow >>= STATUS1_ADCLOW_SHIFT;
-			adc1k = info->status[0] & STATUS1_ADC1K_MASK;
-			adc1k >>= STATUS1_ADC1K_SHIFT;
+			adclow = info->status[0] & MAX77693_STATUS1_ADCLOW_MASK;
+			adclow >>= MAX77693_STATUS1_ADCLOW_SHIFT;
+			adc1k = info->status[0] & MAX77693_STATUS1_ADC1K_MASK;
+			adc1k >>= MAX77693_STATUS1_ADC1K_SHIFT;
 
-			vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
-			vbvolt >>= STATUS2_VBVOLT_SHIFT;
+			vbvolt = info->status[1] & MAX77693_STATUS2_VBVOLT_MASK;
+			vbvolt >>= MAX77693_STATUS2_VBVOLT_SHIFT;
 
 			/**
 			 * [0x1|VBVolt|ADCLow|ADC1K]
@@ -397,8 +399,8 @@
 		 * Read charger type to check cable type and decide cable state
 		 * according to type of charger cable.
 		 */
-		chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
-		chg_type >>= STATUS2_CHGTYP_SHIFT;
+		chg_type = info->status[1] & MAX77693_STATUS2_CHGTYP_MASK;
+		chg_type >>= MAX77693_STATUS2_CHGTYP_SHIFT;
 
 		if (chg_type == MAX77693_CHARGER_TYPE_NONE) {
 			*attached = false;
@@ -422,10 +424,10 @@
 		 * Read ADC value to check cable type and decide cable state
 		 * according to cable type
 		 */
-		adc = info->status[0] & STATUS1_ADC_MASK;
-		adc >>= STATUS1_ADC_SHIFT;
-		chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
-		chg_type >>= STATUS2_CHGTYP_SHIFT;
+		adc = info->status[0] & MAX77693_STATUS1_ADC_MASK;
+		adc >>= MAX77693_STATUS1_ADC_SHIFT;
+		chg_type = info->status[1] & MAX77693_STATUS2_CHGTYP_MASK;
+		chg_type >>= MAX77693_STATUS2_CHGTYP_SHIFT;
 
 		if (adc == MAX77693_MUIC_ADC_OPEN
 				&& chg_type == MAX77693_CHARGER_TYPE_NONE)
@@ -437,8 +439,8 @@
 		 * Read vbvolt field, if vbvolt is 1,
 		 * this cable is used for charging.
 		 */
-		vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
-		vbvolt >>= STATUS2_VBVOLT_SHIFT;
+		vbvolt = info->status[1] & MAX77693_STATUS2_VBVOLT_MASK;
+		vbvolt >>= MAX77693_STATUS2_VBVOLT_SHIFT;
 
 		cable_type = vbvolt;
 		break;
@@ -520,7 +522,8 @@
 	}
 
 	/* Dock-Car/Desk/Audio, PATH:AUDIO */
-	ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
+	ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_AUDIO,
+					attached);
 	if (ret < 0)
 		return ret;
 	extcon_set_cable_state_(info->edev, dock_id, attached);
@@ -585,14 +588,16 @@
 	case MAX77693_MUIC_GND_USB_HOST:
 	case MAX77693_MUIC_GND_USB_HOST_VB:
 		/* USB_HOST, PATH: AP_USB */
-		ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
+		ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_USB,
+						attached);
 		if (ret < 0)
 			return ret;
 		extcon_set_cable_state_(info->edev, EXTCON_USB_HOST, attached);
 		break;
 	case MAX77693_MUIC_GND_AV_CABLE_LOAD:
 		/* Audio Video Cable with load, PATH:AUDIO */
-		ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
+		ret = max77693_muic_set_path(info, MAX77693_CONTROL1_SW_AUDIO,
+						attached);
 		if (ret < 0)
 			return ret;
 		extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
@@ -615,7 +620,7 @@
 		int cable_type, bool attached)
 {
 	int ret = 0;
-	u8 path = CONTROL1_SW_OPEN;
+	u8 path = MAX77693_CONTROL1_SW_OPEN;
 
 	dev_info(info->dev,
 		"external connector is %s (adc:0x%02x)\n",
@@ -625,12 +630,12 @@
 	case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF:	/* ADC_JIG_USB_OFF */
 	case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON:	/* ADC_JIG_USB_ON */
 		/* PATH:AP_USB */
-		path = CONTROL1_SW_USB;
+		path = MAX77693_CONTROL1_SW_USB;
 		break;
 	case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF:	/* ADC_JIG_UART_OFF */
 	case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON:	/* ADC_JIG_UART_ON */
 		/* PATH:AP_UART */
-		path = CONTROL1_SW_UART;
+		path = MAX77693_CONTROL1_SW_UART;
 		break;
 	default:
 		dev_err(info->dev, "failed to detect %s jig cable\n",
@@ -1077,7 +1082,7 @@
 		dev_dbg(&pdev->dev, "allocate register map\n");
 	} else {
 		info->max77693->regmap_muic = devm_regmap_init_i2c(
-						info->max77693->muic,
+						info->max77693->i2c_muic,
 						&max77693_muic_regmap_config);
 		if (IS_ERR(info->max77693->regmap_muic)) {
 			ret = PTR_ERR(info->max77693->regmap_muic);
@@ -1164,28 +1169,9 @@
 	}
 
 	for (i = 0; i < num_init_data; i++) {
-		enum max77693_irq_source irq_src
-				= MAX77693_IRQ_GROUP_NR;
-
 		regmap_write(info->max77693->regmap_muic,
 				init_data[i].addr,
 				init_data[i].data);
-
-		switch (init_data[i].addr) {
-		case MAX77693_MUIC_REG_INTMASK1:
-			irq_src = MUIC_INT1;
-			break;
-		case MAX77693_MUIC_REG_INTMASK2:
-			irq_src = MUIC_INT2;
-			break;
-		case MAX77693_MUIC_REG_INTMASK3:
-			irq_src = MUIC_INT3;
-			break;
-		}
-
-		if (irq_src < MAX77693_IRQ_GROUP_NR)
-			info->max77693->irq_masks_cur[irq_src]
-				= init_data[i].data;
 	}
 
 	if (pdata && pdata->muic_data) {
@@ -1199,12 +1185,12 @@
 		if (muic_pdata->path_uart)
 			info->path_uart = muic_pdata->path_uart;
 		else
-			info->path_uart = CONTROL1_SW_UART;
+			info->path_uart = MAX77693_CONTROL1_SW_UART;
 
 		if (muic_pdata->path_usb)
 			info->path_usb = muic_pdata->path_usb;
 		else
-			info->path_usb = CONTROL1_SW_USB;
+			info->path_usb = MAX77693_CONTROL1_SW_USB;
 
 		/*
 		 * Default delay time for detecting cable state
@@ -1216,8 +1202,8 @@
 		else
 			delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
 	} else {
-		info->path_usb = CONTROL1_SW_USB;
-		info->path_uart = CONTROL1_SW_UART;
+		info->path_usb = MAX77693_CONTROL1_SW_USB;
+		info->path_uart = MAX77693_CONTROL1_SW_UART;
 		delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
 	}
 
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index fac2f14..fdd9285 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -15,6 +15,7 @@
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
+#include <linux/mfd/max77693-common.h>
 #include <linux/mfd/max77843-private.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -32,7 +33,7 @@
 
 struct max77843_muic_info {
 	struct device *dev;
-	struct max77843 *max77843;
+	struct max77693_dev *max77843;
 	struct extcon_dev *edev;
 
 	struct mutex mutex;
@@ -198,18 +199,18 @@
 static int max77843_muic_set_path(struct max77843_muic_info *info,
 		u8 val, bool attached)
 {
-	struct max77843 *max77843 = info->max77843;
+	struct max77693_dev *max77843 = info->max77843;
 	int ret = 0;
 	unsigned int ctrl1, ctrl2;
 
 	if (attached)
 		ctrl1 = val;
 	else
-		ctrl1 = CONTROL1_SW_OPEN;
+		ctrl1 = MAX77843_MUIC_CONTROL1_SW_OPEN;
 
 	ret = regmap_update_bits(max77843->regmap_muic,
 			MAX77843_MUIC_REG_CONTROL1,
-			CONTROL1_COM_SW, ctrl1);
+			MAX77843_MUIC_CONTROL1_COM_SW, ctrl1);
 	if (ret < 0) {
 		dev_err(info->dev, "Cannot switch MUIC port\n");
 		return ret;
@@ -243,7 +244,7 @@
 
 	adc = info->status[MAX77843_MUIC_STATUS1] &
 			MAX77843_MUIC_STATUS1_ADC_MASK;
-	adc >>= STATUS1_ADC_SHIFT;
+	adc >>= MAX77843_MUIC_STATUS1_ADC_SHIFT;
 
 	switch (group) {
 	case MAX77843_CABLE_GROUP_ADC:
@@ -309,7 +310,7 @@
 			/* Get VBVolt register bit */
 			gnd_type |= (info->status[MAX77843_MUIC_STATUS2] &
 					MAX77843_MUIC_STATUS2_VBVOLT_MASK);
-			gnd_type >>= STATUS2_VBVOLT_SHIFT;
+			gnd_type >>= MAX77843_MUIC_STATUS2_VBVOLT_SHIFT;
 
 			/* Offset of GND cable */
 			gnd_type |= MAX77843_MUIC_GND_USB_HOST;
@@ -338,7 +339,9 @@
 	switch (gnd_cable_type) {
 	case MAX77843_MUIC_GND_USB_HOST:
 	case MAX77843_MUIC_GND_USB_HOST_VB:
-		ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
+		ret = max77843_muic_set_path(info,
+					     MAX77843_MUIC_CONTROL1_SW_USB,
+					     attached);
 		if (ret < 0)
 			return ret;
 
@@ -346,7 +349,9 @@
 		break;
 	case MAX77843_MUIC_GND_MHL_VB:
 	case MAX77843_MUIC_GND_MHL:
-		ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+		ret = max77843_muic_set_path(info,
+					     MAX77843_MUIC_CONTROL1_SW_OPEN,
+					     attached);
 		if (ret < 0)
 			return ret;
 
@@ -365,7 +370,7 @@
 		int cable_type, bool attached)
 {
 	int ret;
-	u8 path = CONTROL1_SW_OPEN;
+	u8 path = MAX77843_MUIC_CONTROL1_SW_OPEN;
 
 	dev_dbg(info->dev, "external connector is %s (adc:0x%02x)\n",
 			attached ? "attached" : "detached", cable_type);
@@ -373,10 +378,10 @@
 	switch (cable_type) {
 	case MAX77843_MUIC_ADC_FACTORY_MODE_USB_OFF:
 	case MAX77843_MUIC_ADC_FACTORY_MODE_USB_ON:
-		path = CONTROL1_SW_USB;
+		path = MAX77843_MUIC_CONTROL1_SW_USB;
 		break;
 	case MAX77843_MUIC_ADC_FACTORY_MODE_UART_OFF:
-		path = CONTROL1_SW_UART;
+		path = MAX77843_MUIC_CONTROL1_SW_UART;
 		break;
 	default:
 		return -EINVAL;
@@ -474,14 +479,18 @@
 
 	switch (chg_type) {
 	case MAX77843_MUIC_CHG_USB:
-		ret = max77843_muic_set_path(info, CONTROL1_SW_USB, attached);
+		ret = max77843_muic_set_path(info,
+					     MAX77843_MUIC_CONTROL1_SW_USB,
+					     attached);
 		if (ret < 0)
 			return ret;
 
 		extcon_set_cable_state_(info->edev, EXTCON_USB, attached);
 		break;
 	case MAX77843_MUIC_CHG_DOWNSTREAM:
-		ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+		ret = max77843_muic_set_path(info,
+					     MAX77843_MUIC_CONTROL1_SW_OPEN,
+					     attached);
 		if (ret < 0)
 			return ret;
 
@@ -489,14 +498,18 @@
 					attached);
 		break;
 	case MAX77843_MUIC_CHG_DEDICATED:
-		ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+		ret = max77843_muic_set_path(info,
+					     MAX77843_MUIC_CONTROL1_SW_OPEN,
+					     attached);
 		if (ret < 0)
 			return ret;
 
 		extcon_set_cable_state_(info->edev, EXTCON_TA, attached);
 		break;
 	case MAX77843_MUIC_CHG_SPECIAL_500MA:
-		ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+		ret = max77843_muic_set_path(info,
+					     MAX77843_MUIC_CONTROL1_SW_OPEN,
+					     attached);
 		if (ret < 0)
 			return ret;
 
@@ -504,7 +517,9 @@
 					attached);
 		break;
 	case MAX77843_MUIC_CHG_SPECIAL_1A:
-		ret = max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+		ret = max77843_muic_set_path(info,
+					     MAX77843_MUIC_CONTROL1_SW_OPEN,
+					     attached);
 		if (ret < 0)
 			return ret;
 
@@ -528,7 +543,8 @@
 			"failed to detect %s accessory (chg_type:0x%x)\n",
 			attached ? "attached" : "detached", chg_type);
 
-		max77843_muic_set_path(info, CONTROL1_SW_OPEN, attached);
+		max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_OPEN,
+				       attached);
 		return -EINVAL;
 	}
 
@@ -539,7 +555,7 @@
 {
 	struct max77843_muic_info *info = container_of(work,
 			struct max77843_muic_info, irq_work);
-	struct max77843 *max77843 = info->max77843;
+	struct max77693_dev *max77843 = info->max77843;
 	int ret = 0;
 
 	mutex_lock(&info->mutex);
@@ -615,7 +631,7 @@
 {
 	struct max77843_muic_info *info = container_of(to_delayed_work(work),
 			struct max77843_muic_info, wq_detcable);
-	struct max77843 *max77843 = info->max77843;
+	struct max77693_dev *max77843 = info->max77843;
 	int chg_type, adc, ret;
 	bool attached;
 
@@ -656,7 +672,7 @@
 static int max77843_muic_set_debounce_time(struct max77843_muic_info *info,
 		enum max77843_muic_adc_debounce_time time)
 {
-	struct max77843 *max77843 = info->max77843;
+	struct max77693_dev *max77843 = info->max77843;
 	int ret;
 
 	switch (time) {
@@ -667,7 +683,7 @@
 		ret = regmap_update_bits(max77843->regmap_muic,
 				MAX77843_MUIC_REG_CONTROL4,
 				MAX77843_MUIC_CONTROL4_ADCDBSET_MASK,
-				time << CONTROL4_ADCDBSET_SHIFT);
+				time << MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT);
 		if (ret < 0) {
 			dev_err(info->dev, "Cannot write MUIC regmap\n");
 			return ret;
@@ -681,7 +697,7 @@
 	return 0;
 }
 
-static int max77843_init_muic_regmap(struct max77843 *max77843)
+static int max77843_init_muic_regmap(struct max77693_dev *max77843)
 {
 	int ret;
 
@@ -720,7 +736,7 @@
 
 static int max77843_muic_probe(struct platform_device *pdev)
 {
-	struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
+	struct max77693_dev *max77843 = dev_get_drvdata(pdev->dev.parent);
 	struct max77843_muic_info *info;
 	unsigned int id;
 	int i, ret;
@@ -768,7 +784,7 @@
 	max77843_muic_set_debounce_time(info, MAX77843_DEBOUNCE_TIME_25MS);
 
 	/* Set initial path for UART */
-	max77843_muic_set_path(info, CONTROL1_SW_UART, true);
+	max77843_muic_set_path(info, MAX77843_MUIC_CONTROL1_SW_UART, true);
 
 	/* Check revision number of MUIC device */
 	ret = regmap_read(max77843->regmap_muic, MAX77843_MUIC_REG_ID, &id);
@@ -781,6 +797,15 @@
 	/* Support virtual irq domain for max77843 MUIC device */
 	INIT_WORK(&info->irq_work, max77843_muic_irq_work);
 
+	/* Clear IRQ bits before request IRQs */
+	ret = regmap_bulk_read(max77843->regmap_muic,
+			MAX77843_MUIC_REG_INT1, info->status,
+			MAX77843_MUIC_IRQ_NUM);
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
+		goto err_muic_irq;
+	}
+
 	for (i = 0; i < ARRAY_SIZE(max77843_muic_irqs); i++) {
 		struct max77843_muic_irq *muic_irq = &max77843_muic_irqs[i];
 		unsigned int virq = 0;
@@ -821,7 +846,7 @@
 static int max77843_muic_remove(struct platform_device *pdev)
 {
 	struct max77843_muic_info *info = platform_get_drvdata(pdev);
-	struct max77843 *max77843 = info->max77843;
+	struct max77693_dev *max77843 = info->max77843;
 
 	cancel_work_sync(&info->irq_work);
 	regmap_del_irq_chip(max77843->irq, max77843->irq_data_muic);
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index eebdf2a..93c30a8 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -28,6 +28,11 @@
 #include <linux/mfd/palmas.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/workqueue.h>
+
+#define USB_GPIO_DEBOUNCE_MS	20	/* ms */
 
 static const unsigned int palmas_extcon_cable[] = {
 	EXTCON_USB,
@@ -35,8 +40,6 @@
 	EXTCON_NONE,
 };
 
-static const int mutually_exclusive[] = {0x3, 0x0};
-
 static void palmas_usb_wakeup(struct palmas *palmas, int enable)
 {
 	if (enable)
@@ -120,19 +123,54 @@
 	return IRQ_HANDLED;
 }
 
+static void palmas_gpio_id_detect(struct work_struct *work)
+{
+	int id;
+	struct palmas_usb *palmas_usb = container_of(to_delayed_work(work),
+						     struct palmas_usb,
+						     wq_detectid);
+	struct extcon_dev *edev = palmas_usb->edev;
+
+	if (!palmas_usb->id_gpiod)
+		return;
+
+	id = gpiod_get_value_cansleep(palmas_usb->id_gpiod);
+
+	if (id) {
+		extcon_set_cable_state_(edev, EXTCON_USB_HOST, false);
+		dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
+	} else {
+		extcon_set_cable_state_(edev, EXTCON_USB_HOST, true);
+		dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
+	}
+}
+
+static irqreturn_t palmas_gpio_id_irq_handler(int irq, void *_palmas_usb)
+{
+	struct palmas_usb *palmas_usb = _palmas_usb;
+
+	queue_delayed_work(system_power_efficient_wq, &palmas_usb->wq_detectid,
+			   palmas_usb->sw_debounce_jiffies);
+
+	return IRQ_HANDLED;
+}
+
 static void palmas_enable_irq(struct palmas_usb *palmas_usb)
 {
 	palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
 		PALMAS_USB_VBUS_CTRL_SET,
 		PALMAS_USB_VBUS_CTRL_SET_VBUS_ACT_COMP);
 
-	palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
-		PALMAS_USB_ID_CTRL_SET, PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP);
+	if (palmas_usb->enable_id_detection) {
+		palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
+			     PALMAS_USB_ID_CTRL_SET,
+			     PALMAS_USB_ID_CTRL_SET_ID_ACT_COMP);
 
-	palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
-		PALMAS_USB_ID_INT_EN_HI_SET,
-		PALMAS_USB_ID_INT_EN_HI_SET_ID_GND |
-		PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT);
+		palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
+			     PALMAS_USB_ID_INT_EN_HI_SET,
+			     PALMAS_USB_ID_INT_EN_HI_SET_ID_GND |
+			     PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT);
+	}
 
 	if (palmas_usb->enable_vbus_detection)
 		palmas_vbus_irq_handler(palmas_usb->vbus_irq, palmas_usb);
@@ -171,20 +209,37 @@
 			palmas_usb->wakeup = pdata->wakeup;
 	}
 
+	palmas_usb->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id",
+							GPIOD_IN);
+	if (IS_ERR(palmas_usb->id_gpiod)) {
+		dev_err(&pdev->dev, "failed to get id gpio\n");
+		return PTR_ERR(palmas_usb->id_gpiod);
+	}
+
+	if (palmas_usb->enable_id_detection && palmas_usb->id_gpiod) {
+		palmas_usb->enable_id_detection = false;
+		palmas_usb->enable_gpio_id_detection = true;
+	}
+
+	if (palmas_usb->enable_gpio_id_detection) {
+		u32 debounce;
+
+		if (of_property_read_u32(node, "debounce-delay-ms", &debounce))
+			debounce = USB_GPIO_DEBOUNCE_MS;
+
+		status = gpiod_set_debounce(palmas_usb->id_gpiod,
+					    debounce * 1000);
+		if (status < 0)
+			palmas_usb->sw_debounce_jiffies = msecs_to_jiffies(debounce);
+	}
+
+	INIT_DELAYED_WORK(&palmas_usb->wq_detectid, palmas_gpio_id_detect);
+
 	palmas->usb = palmas_usb;
 	palmas_usb->palmas = palmas;
 
 	palmas_usb->dev	 = &pdev->dev;
 
-	palmas_usb->id_otg_irq = regmap_irq_get_virq(palmas->irq_data,
-						PALMAS_ID_OTG_IRQ);
-	palmas_usb->id_irq = regmap_irq_get_virq(palmas->irq_data,
-						PALMAS_ID_IRQ);
-	palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
-						PALMAS_VBUS_OTG_IRQ);
-	palmas_usb->vbus_irq = regmap_irq_get_virq(palmas->irq_data,
-						PALMAS_VBUS_IRQ);
-
 	palmas_usb_wakeup(palmas, palmas_usb->wakeup);
 
 	platform_set_drvdata(pdev, palmas_usb);
@@ -195,7 +250,6 @@
 		dev_err(&pdev->dev, "failed to allocate extcon device\n");
 		return -ENOMEM;
 	}
-	palmas_usb->edev->mutually_exclusive = mutually_exclusive;
 
 	status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
 	if (status) {
@@ -204,6 +258,10 @@
 	}
 
 	if (palmas_usb->enable_id_detection) {
+		palmas_usb->id_otg_irq = regmap_irq_get_virq(palmas->irq_data,
+							     PALMAS_ID_OTG_IRQ);
+		palmas_usb->id_irq = regmap_irq_get_virq(palmas->irq_data,
+							 PALMAS_ID_IRQ);
 		status = devm_request_threaded_irq(palmas_usb->dev,
 				palmas_usb->id_irq,
 				NULL, palmas_id_irq_handler,
@@ -215,9 +273,33 @@
 					palmas_usb->id_irq, status);
 			return status;
 		}
+	} else if (palmas_usb->enable_gpio_id_detection) {
+		palmas_usb->gpio_id_irq = gpiod_to_irq(palmas_usb->id_gpiod);
+		if (palmas_usb->gpio_id_irq < 0) {
+			dev_err(&pdev->dev, "failed to get id irq\n");
+			return palmas_usb->gpio_id_irq;
+		}
+		status = devm_request_threaded_irq(&pdev->dev,
+						   palmas_usb->gpio_id_irq,
+						   NULL,
+						   palmas_gpio_id_irq_handler,
+						   IRQF_TRIGGER_RISING |
+						   IRQF_TRIGGER_FALLING |
+						   IRQF_ONESHOT,
+						   "palmas_usb_id",
+						   palmas_usb);
+		if (status < 0) {
+			dev_err(&pdev->dev,
+				"failed to request handler for id irq\n");
+			return status;
+		}
 	}
 
 	if (palmas_usb->enable_vbus_detection) {
+		palmas_usb->vbus_otg_irq = regmap_irq_get_virq(palmas->irq_data,
+						       PALMAS_VBUS_OTG_IRQ);
+		palmas_usb->vbus_irq = regmap_irq_get_virq(palmas->irq_data,
+							   PALMAS_VBUS_IRQ);
 		status = devm_request_threaded_irq(palmas_usb->dev,
 				palmas_usb->vbus_irq, NULL,
 				palmas_vbus_irq_handler,
@@ -232,10 +314,21 @@
 	}
 
 	palmas_enable_irq(palmas_usb);
+	/* perform initial detection */
+	palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
 	device_set_wakeup_capable(&pdev->dev, true);
 	return 0;
 }
 
+static int palmas_usb_remove(struct platform_device *pdev)
+{
+	struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
+
+	cancel_delayed_work_sync(&palmas_usb->wq_detectid);
+
+	return 0;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int palmas_usb_suspend(struct device *dev)
 {
@@ -246,6 +339,8 @@
 			enable_irq_wake(palmas_usb->vbus_irq);
 		if (palmas_usb->enable_id_detection)
 			enable_irq_wake(palmas_usb->id_irq);
+		if (palmas_usb->enable_gpio_id_detection)
+			enable_irq_wake(palmas_usb->gpio_id_irq);
 	}
 	return 0;
 }
@@ -259,6 +354,8 @@
 			disable_irq_wake(palmas_usb->vbus_irq);
 		if (palmas_usb->enable_id_detection)
 			disable_irq_wake(palmas_usb->id_irq);
+		if (palmas_usb->enable_gpio_id_detection)
+			disable_irq_wake(palmas_usb->gpio_id_irq);
 	}
 	return 0;
 };
@@ -276,6 +373,7 @@
 
 static struct platform_driver palmas_usb_driver = {
 	.probe = palmas_usb_probe,
+	.remove = palmas_usb_remove,
 	.driver = {
 		.name = "palmas-usb",
 		.of_match_table = of_palmas_match_tbl,
diff --git a/drivers/extcon/extcon-rt8973a.c b/drivers/extcon/extcon-rt8973a.c
index 92c9392..11592e9 100644
--- a/drivers/extcon/extcon-rt8973a.c
+++ b/drivers/extcon/extcon-rt8973a.c
@@ -693,7 +693,6 @@
 static struct i2c_driver rt8973a_muic_i2c_driver = {
 	.driver		= {
 		.name	= "rt8973a",
-		.owner	= THIS_MODULE,
 		.pm	= &rt8973a_muic_pm_ops,
 		.of_match_table = rt8973a_dt_match,
 	},
diff --git a/drivers/extcon/extcon-sm5502.c b/drivers/extcon/extcon-sm5502.c
index 817dece..0ffefef 100644
--- a/drivers/extcon/extcon-sm5502.c
+++ b/drivers/extcon/extcon-sm5502.c
@@ -685,7 +685,6 @@
 static struct i2c_driver sm5502_muic_i2c_driver = {
 	.driver		= {
 		.name	= "sm5502",
-		.owner	= THIS_MODULE,
 		.pm	= &sm5502_muic_pm_ops,
 		.of_match_table = sm5502_dt_match,
 	},
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index a2a4453..2b2fecf 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -15,6 +15,7 @@
  */
 
 #include <linux/extcon.h>
+#include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 43b57b0..a07addd 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -126,7 +126,7 @@
 
 static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
 {
-	unsigned int id = -EINVAL;
+	int id = -EINVAL;
 	int i = 0;
 
 	/* Find the id of extcon cable */
@@ -143,7 +143,7 @@
 
 static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
 {
-	unsigned int id;
+	int id;
 
 	if (edev->max_supported == 0)
 		return -EINVAL;
@@ -172,14 +172,6 @@
 	int i, count = 0;
 	struct extcon_dev *edev = dev_get_drvdata(dev);
 
-	if (edev->print_state) {
-		int ret = edev->print_state(edev, buf);
-
-		if (ret >= 0)
-			return ret;
-		/* Use default if failed */
-	}
-
 	if (edev->max_supported == 0)
 		return sprintf(buf, "%u\n", edev->state);
 
@@ -272,6 +264,9 @@
 	unsigned long flags;
 	bool attached;
 
+	if (!edev)
+		return -EINVAL;
+
 	spin_lock_irqsave(&edev->lock, flags);
 
 	if (edev->state != ((edev->state & ~mask) | (state & mask))) {
@@ -345,6 +340,9 @@
  */
 int extcon_set_state(struct extcon_dev *edev, u32 state)
 {
+	if (!edev)
+		return -EINVAL;
+
 	return extcon_update_state(edev, 0xffffffff, state);
 }
 EXPORT_SYMBOL_GPL(extcon_set_state);
@@ -358,6 +356,9 @@
 {
 	int index;
 
+	if (!edev)
+		return -EINVAL;
+
 	index = find_cable_index_by_id(edev, id);
 	if (index < 0)
 		return index;
@@ -378,7 +379,7 @@
  */
 int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
 {
-	unsigned int id;
+	int id;
 
 	id = find_cable_id_by_name(edev, cable_name);
 	if (id < 0)
@@ -402,6 +403,9 @@
 	u32 state;
 	int index;
 
+	if (!edev)
+		return -EINVAL;
+
 	index = find_cable_index_by_id(edev, id);
 	if (index < 0)
 		return index;
@@ -426,7 +430,7 @@
 int extcon_set_cable_state(struct extcon_dev *edev,
 			const char *cable_name, bool cable_state)
 {
-	unsigned int id;
+	int id;
 
 	id = find_cable_id_by_name(edev, cable_name);
 	if (id < 0)
@@ -444,6 +448,9 @@
 {
 	struct extcon_dev *sd;
 
+	if (!extcon_name)
+		return ERR_PTR(-EINVAL);
+
 	mutex_lock(&extcon_dev_list_lock);
 	list_for_each_entry(sd, &extcon_dev_list, entry) {
 		if (!strcmp(sd->name, extcon_name))
@@ -572,6 +579,9 @@
 	unsigned long flags;
 	int ret, idx;
 
+	if (!edev || !nb)
+		return -EINVAL;
+
 	idx = find_cable_index_by_id(edev, id);
 
 	spin_lock_irqsave(&edev->lock, flags);
@@ -594,6 +604,9 @@
 	unsigned long flags;
 	int ret, idx;
 
+	if (!edev || !nb)
+		return -EINVAL;
+
 	idx = find_cable_index_by_id(edev, id);
 
 	spin_lock_irqsave(&edev->lock, flags);
@@ -654,6 +667,9 @@
 {
 	struct extcon_dev *edev;
 
+	if (!supported_cable)
+		return ERR_PTR(-EINVAL);
+
 	edev = kzalloc(sizeof(*edev), GFP_KERNEL);
 	if (!edev)
 		return ERR_PTR(-ENOMEM);
@@ -754,7 +770,7 @@
 			return ret;
 	}
 
-	if (!edev->supported_cable)
+	if (!edev || !edev->supported_cable)
 		return -EINVAL;
 
 	for (; edev->supported_cable[index] != EXTCON_NONE; index++);
@@ -960,6 +976,9 @@
 {
 	int index;
 
+	if (!edev)
+		return;
+
 	mutex_lock(&extcon_dev_list_lock);
 	list_del(&edev->entry);
 	mutex_unlock(&extcon_dev_list_lock);
@@ -1066,6 +1085,9 @@
 	struct device_node *node;
 	struct extcon_dev *edev;
 
+	if (!dev)
+		return ERR_PTR(-EINVAL);
+
 	if (!dev->of_node) {
 		dev_err(dev, "device does not have a device node entry\n");
 		return ERR_PTR(-EINVAL);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 99c69a3..d8de6a8 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -5,6 +5,9 @@
 
 menu "Firmware Drivers"
 
+config ARM_PSCI_FW
+	bool
+
 config EDD
 	tristate "BIOS Enhanced Disk Drive calls determine boot disk"
 	depends on X86
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 4a4b897..000830f 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -1,6 +1,7 @@
 #
 # Makefile for the linux kernel.
 #
+obj-$(CONFIG_ARM_PSCI_FW)	+= psci.o
 obj-$(CONFIG_DMI)		+= dmi_scan.o
 obj-$(CONFIG_DMI_SYSFS)		+= dmi-sysfs.o
 obj-$(CONFIG_EDD)		+= edd.o
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
new file mode 100644
index 0000000..42700f0
--- /dev/null
+++ b/drivers/firmware/psci.c
@@ -0,0 +1,382 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#define pr_fmt(fmt) "psci: " fmt
+
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/printk.h>
+#include <linux/psci.h>
+#include <linux/reboot.h>
+
+#include <uapi/linux/psci.h>
+
+#include <asm/cputype.h>
+#include <asm/system_misc.h>
+#include <asm/smp_plat.h>
+
+/*
+ * While a 64-bit OS can make calls with SMC32 calling conventions, for some
+ * calls it is necessary to use SMC64 to pass or return 64-bit values. For such
+ * calls PSCI_0_2_FN_NATIVE(x) will choose the appropriate (native-width)
+ * function ID.
+ */
+#ifdef CONFIG_64BIT
+#define PSCI_0_2_FN_NATIVE(name)	PSCI_0_2_FN64_##name
+#else
+#define PSCI_0_2_FN_NATIVE(name)	PSCI_0_2_FN_##name
+#endif
+
+/*
+ * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
+ * calls to its resident CPU, so we must avoid issuing those. We never migrate
+ * a Trusted OS even if it claims to be capable of migration -- doing so will
+ * require cooperation with a Trusted OS driver.
+ */
+static int resident_cpu = -1;
+
+bool psci_tos_resident_on(int cpu)
+{
+	return cpu == resident_cpu;
+}
+
+struct psci_operations psci_ops;
+
+typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+				unsigned long, unsigned long);
+asmlinkage psci_fn __invoke_psci_fn_hvc;
+asmlinkage psci_fn __invoke_psci_fn_smc;
+static psci_fn *invoke_psci_fn;
+
+enum psci_function {
+	PSCI_FN_CPU_SUSPEND,
+	PSCI_FN_CPU_ON,
+	PSCI_FN_CPU_OFF,
+	PSCI_FN_MIGRATE,
+	PSCI_FN_MAX,
+};
+
+static u32 psci_function_id[PSCI_FN_MAX];
+
+static int psci_to_linux_errno(int errno)
+{
+	switch (errno) {
+	case PSCI_RET_SUCCESS:
+		return 0;
+	case PSCI_RET_NOT_SUPPORTED:
+		return -EOPNOTSUPP;
+	case PSCI_RET_INVALID_PARAMS:
+		return -EINVAL;
+	case PSCI_RET_DENIED:
+		return -EPERM;
+	};
+
+	return -EINVAL;
+}
+
+static u32 psci_get_version(void)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
+}
+
+static int psci_cpu_suspend(u32 state, unsigned long entry_point)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
+	err = invoke_psci_fn(fn, state, entry_point, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_off(u32 state)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_CPU_OFF];
+	err = invoke_psci_fn(fn, state, 0, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_CPU_ON];
+	err = invoke_psci_fn(fn, cpuid, entry_point, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_migrate(unsigned long cpuid)
+{
+	int err;
+	u32 fn;
+
+	fn = psci_function_id[PSCI_FN_MIGRATE];
+	err = invoke_psci_fn(fn, cpuid, 0, 0);
+	return psci_to_linux_errno(err);
+}
+
+static int psci_affinity_info(unsigned long target_affinity,
+		unsigned long lowest_affinity_level)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_NATIVE(AFFINITY_INFO),
+			      target_affinity, lowest_affinity_level, 0);
+}
+
+static int psci_migrate_info_type(void)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
+}
+
+static unsigned long psci_migrate_info_up_cpu(void)
+{
+	return invoke_psci_fn(PSCI_0_2_FN_NATIVE(MIGRATE_INFO_UP_CPU),
+			      0, 0, 0);
+}
+
+static int get_set_conduit_method(struct device_node *np)
+{
+	const char *method;
+
+	pr_info("probing for conduit method from DT.\n");
+
+	if (of_property_read_string(np, "method", &method)) {
+		pr_warn("missing \"method\" property\n");
+		return -ENXIO;
+	}
+
+	if (!strcmp("hvc", method)) {
+		invoke_psci_fn = __invoke_psci_fn_hvc;
+	} else if (!strcmp("smc", method)) {
+		invoke_psci_fn = __invoke_psci_fn_smc;
+	} else {
+		pr_warn("invalid \"method\" property: %s\n", method);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
+{
+	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+}
+
+static void psci_sys_poweroff(void)
+{
+	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
+}
+
+/*
+ * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
+ * return DENIED (which would be fatal).
+ */
+static void __init psci_init_migrate(void)
+{
+	unsigned long cpuid;
+	int type, cpu = -1;
+
+	type = psci_ops.migrate_info_type();
+
+	if (type == PSCI_0_2_TOS_MP) {
+		pr_info("Trusted OS migration not required\n");
+		return;
+	}
+
+	if (type == PSCI_RET_NOT_SUPPORTED) {
+		pr_info("MIGRATE_INFO_TYPE not supported.\n");
+		return;
+	}
+
+	if (type != PSCI_0_2_TOS_UP_MIGRATE &&
+	    type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
+		pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
+		return;
+	}
+
+	cpuid = psci_migrate_info_up_cpu();
+	if (cpuid & ~MPIDR_HWID_BITMASK) {
+		pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
+			cpuid);
+		return;
+	}
+
+	cpu = get_logical_index(cpuid);
+	resident_cpu = cpu >= 0 ? cpu : -1;
+
+	pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
+}
+
+static void __init psci_0_2_set_functions(void)
+{
+	pr_info("Using standard PSCI v0.2 function IDs\n");
+	psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_NATIVE(CPU_SUSPEND);
+	psci_ops.cpu_suspend = psci_cpu_suspend;
+
+	psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
+	psci_ops.cpu_off = psci_cpu_off;
+
+	psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_NATIVE(CPU_ON);
+	psci_ops.cpu_on = psci_cpu_on;
+
+	psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_NATIVE(MIGRATE);
+	psci_ops.migrate = psci_migrate;
+
+	psci_ops.affinity_info = psci_affinity_info;
+
+	psci_ops.migrate_info_type = psci_migrate_info_type;
+
+	arm_pm_restart = psci_sys_reset;
+
+	pm_power_off = psci_sys_poweroff;
+}
+
+/*
+ * Probe function for PSCI firmware versions >= 0.2
+ */
+static int __init psci_probe(void)
+{
+	u32 ver = psci_get_version();
+
+	pr_info("PSCIv%d.%d detected in firmware.\n",
+			PSCI_VERSION_MAJOR(ver),
+			PSCI_VERSION_MINOR(ver));
+
+	if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
+		pr_err("Conflicting PSCI version detected.\n");
+		return -EINVAL;
+	}
+
+	psci_0_2_set_functions();
+
+	psci_init_migrate();
+
+	return 0;
+}
+
+typedef int (*psci_initcall_t)(const struct device_node *);
+
+/*
+ * PSCI init function for PSCI versions >=0.2
+ *
+ * Probe based on PSCI PSCI_VERSION function
+ */
+static int __init psci_0_2_init(struct device_node *np)
+{
+	int err;
+
+	err = get_set_conduit_method(np);
+
+	if (err)
+		goto out_put_node;
+	/*
+	 * Starting with v0.2, the PSCI specification introduced a call
+	 * (PSCI_VERSION) that allows probing the firmware version, so
+	 * that PSCI function IDs and version specific initialization
+	 * can be carried out according to the specific version reported
+	 * by firmware
+	 */
+	err = psci_probe();
+
+out_put_node:
+	of_node_put(np);
+	return err;
+}
+
+/*
+ * PSCI < v0.2 get PSCI Function IDs via DT.
+ */
+static int __init psci_0_1_init(struct device_node *np)
+{
+	u32 id;
+	int err;
+
+	err = get_set_conduit_method(np);
+
+	if (err)
+		goto out_put_node;
+
+	pr_info("Using PSCI v0.1 Function IDs from DT\n");
+
+	if (!of_property_read_u32(np, "cpu_suspend", &id)) {
+		psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
+		psci_ops.cpu_suspend = psci_cpu_suspend;
+	}
+
+	if (!of_property_read_u32(np, "cpu_off", &id)) {
+		psci_function_id[PSCI_FN_CPU_OFF] = id;
+		psci_ops.cpu_off = psci_cpu_off;
+	}
+
+	if (!of_property_read_u32(np, "cpu_on", &id)) {
+		psci_function_id[PSCI_FN_CPU_ON] = id;
+		psci_ops.cpu_on = psci_cpu_on;
+	}
+
+	if (!of_property_read_u32(np, "migrate", &id)) {
+		psci_function_id[PSCI_FN_MIGRATE] = id;
+		psci_ops.migrate = psci_migrate;
+	}
+
+out_put_node:
+	of_node_put(np);
+	return err;
+}
+
+static const struct of_device_id const psci_of_match[] __initconst = {
+	{ .compatible = "arm,psci",	.data = psci_0_1_init},
+	{ .compatible = "arm,psci-0.2",	.data = psci_0_2_init},
+	{},
+};
+
+int __init psci_dt_init(void)
+{
+	struct device_node *np;
+	const struct of_device_id *matched_np;
+	psci_initcall_t init_fn;
+
+	np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
+
+	if (!np)
+		return -ENODEV;
+
+	init_fn = (psci_initcall_t)matched_np->data;
+	return init_fn(np);
+}
+
+#ifdef CONFIG_ACPI
+/*
+ * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
+ * explicitly clarified in SBBR
+ */
+int __init psci_acpi_init(void)
+{
+	if (!acpi_psci_present()) {
+		pr_info("is not implemented in ACPI.\n");
+		return -EOPNOTSUPP;
+	}
+
+	pr_info("probing for conduit method from ACPI.\n");
+
+	if (acpi_psci_use_hvc())
+		invoke_psci_fn = __invoke_psci_fn_hvc;
+	else
+		invoke_psci_fn = __invoke_psci_fn_smc;
+
+	return psci_probe();
+}
+#endif
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 1bd6f9c..29e6850 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -24,7 +24,6 @@
 #include <linux/err.h>
 #include <linux/qcom_scm.h>
 
-#include <asm/outercache.h>
 #include <asm/cacheflush.h>
 
 #include "qcom_scm.h"
@@ -219,8 +218,7 @@
 	 * Flush the command buffer so that the secure world sees
 	 * the correct data.
 	 */
-	__cpuc_flush_dcache_area((void *)cmd, cmd->len);
-	outer_flush_range(cmd_addr, cmd_addr + cmd->len);
+	secure_flush_area(cmd, cmd->len);
 
 	ret = smc(cmd_addr);
 	if (ret < 0)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8f1fe73..b4fc9e4 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -113,7 +113,6 @@
 config GPIO_ALTERA
 	tristate "Altera GPIO"
 	depends on OF_GPIO
-	select GPIO_GENERIC
 	select GPIOLIB_IRQCHIP
 	help
 	  Say Y or M here to build support for the Altera PIO device.
@@ -131,6 +130,7 @@
 	default y if ARCH_BRCMSTB
 	depends on OF_GPIO && (ARCH_BRCMSTB || COMPILE_TEST)
 	select GPIO_GENERIC
+	select GPIOLIB_IRQCHIP
 	help
 	  Say yes here to enable GPIO support for Broadcom STB (BCM7XXX) SoCs.
 
@@ -172,6 +172,7 @@
 	depends on CRIS || COMPILE_TEST
 	depends on OF
 	select GPIO_GENERIC
+	select GPIOLIB_IRQCHIP
 	help
 	  Say yes here to support the GPIO controller on Axis ETRAX FS SoCs.
 
@@ -308,7 +309,6 @@
 	def_bool y
 	depends on PLAT_ORION
 	depends on OF
-	select GPIO_GENERIC
 	select GENERIC_IRQ_CHIP
 
 config GPIO_MXC
@@ -1005,6 +1005,12 @@
 	  SPI driver for Freescale MC33880 high-side/low-side switch.
 	  This provides GPIO interface supporting inputs and outputs.
 
+config GPIO_ZX
+	bool "ZTE ZX GPIO support"
+	select GPIOLIB_IRQCHIP
+	help
+	  Say yes here to support the GPIO device on ZTE ZX SoCs.
+
 endmenu
 
 menu "USB GPIO expanders"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index f82cd67..f79a7c4 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_GPIO_ALTERA)  	+= gpio-altera.o
 obj-$(CONFIG_GPIO_AMD8111)	+= gpio-amd8111.o
 obj-$(CONFIG_GPIO_ARIZONA)	+= gpio-arizona.o
+obj-$(CONFIG_ATH79)		+= gpio-ath79.o
 obj-$(CONFIG_GPIO_BCM_KONA)	+= gpio-bcm-kona.o
 obj-$(CONFIG_GPIO_BRCMSTB)	+= gpio-brcmstb.o
 obj-$(CONFIG_GPIO_BT8XX)	+= gpio-bt8xx.o
@@ -116,3 +117,4 @@
 obj-$(CONFIG_GPIO_XTENSA)	+= gpio-xtensa.o
 obj-$(CONFIG_GPIO_ZEVIO)	+= gpio-zevio.o
 obj-$(CONFIG_GPIO_ZYNQ)		+= gpio-zynq.o
+obj-$(CONFIG_GPIO_ZX)		+= gpio-zx.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 07ba823..903fcf4 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -59,13 +59,13 @@
  * automatically disposed on driver detach. See gpiod_get() for detailed
  * information about behavior and return values.
  */
-struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
 					      const char *con_id,
 					      enum gpiod_flags flags)
 {
 	return devm_gpiod_get_index(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL(__devm_gpiod_get);
+EXPORT_SYMBOL(devm_gpiod_get);
 
 /**
  * devm_gpiod_get_optional - Resource-managed gpiod_get_optional()
@@ -77,13 +77,13 @@
  * are automatically disposed on driver detach. See gpiod_get_optional() for
  * detailed information about behavior and return values.
  */
-struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
 						       const char *con_id,
 						       enum gpiod_flags flags)
 {
 	return devm_gpiod_get_index_optional(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL(__devm_gpiod_get_optional);
+EXPORT_SYMBOL(devm_gpiod_get_optional);
 
 /**
  * devm_gpiod_get_index - Resource-managed gpiod_get_index()
@@ -96,7 +96,7 @@
  * automatically disposed on driver detach. See gpiod_get_index() for detailed
  * information about behavior and return values.
  */
-struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
 						    const char *con_id,
 						    unsigned int idx,
 						    enum gpiod_flags flags)
@@ -120,7 +120,7 @@
 
 	return desc;
 }
-EXPORT_SYMBOL(__devm_gpiod_get_index);
+EXPORT_SYMBOL(devm_gpiod_get_index);
 
 /**
  * devm_get_gpiod_from_child - get a GPIO descriptor from a device's child node
@@ -182,10 +182,10 @@
  * gpiod_get_index_optional() for detailed information about behavior and
  * return values.
  */
-struct gpio_desc *__must_check __devm_gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev,
 							     const char *con_id,
 							     unsigned int index,
-							 enum gpiod_flags flags)
+							     enum gpiod_flags flags)
 {
 	struct gpio_desc *desc;
 
@@ -197,7 +197,7 @@
 
 	return desc;
 }
-EXPORT_SYMBOL(__devm_gpiod_get_index_optional);
+EXPORT_SYMBOL(devm_gpiod_get_index_optional);
 
 /**
  * devm_gpiod_get_array - Resource-managed gpiod_get_array()
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index 0763655..6ed7c0f 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -129,7 +129,7 @@
 	if (IS_ERR(dat))
 		return PTR_ERR(dat);
 
-	priv->flags = (unsigned)of_id->data;
+	priv->flags = (uintptr_t) of_id->data;
 
 	err = bgpio_init(&priv->bgc, &pdev->dev,
 			 DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8),
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index d3fe6a6..984186e 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -305,15 +305,7 @@
 		irq_set_chip_and_handler(irq, &adp5588_irq_chip,
 					 handle_level_irq);
 		irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-		/*
-		 * ARM needs us to explicitly flag the IRQ as VALID,
-		 * once we do so, it will also set the noprobe.
-		 */
-		set_irq_flags(irq, IRQF_VALID);
-#else
-		irq_set_noprobe(irq);
-#endif
+		irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE);
 	}
 
 	ret = request_threaded_irq(client->irq,
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 0f3d336..9b7e0b3 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -338,9 +338,9 @@
 {
 	struct altera_gpio_chip *altera_gc = platform_get_drvdata(pdev);
 
-	gpiochip_remove(&altera_gc->mmchip.gc);
+	of_mm_gpiochip_remove(&altera_gc->mmchip);
 
-	return -EIO;
+	return 0;
 }
 
 static const struct of_device_id altera_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
new file mode 100644
index 0000000..03b9953
--- /dev/null
+++ b/drivers/gpio/gpio-ath79.c
@@ -0,0 +1,204 @@
+/*
+ *  Atheros AR71XX/AR724X/AR913X GPIO API support
+ *
+ *  Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
+ *  Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ *  Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/gpio-ath79.h>
+#include <linux/of_device.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+
+static void __iomem *ath79_gpio_base;
+static u32 ath79_gpio_count;
+static DEFINE_SPINLOCK(ath79_gpio_lock);
+
+static void __ath79_gpio_set_value(unsigned gpio, int value)
+{
+	void __iomem *base = ath79_gpio_base;
+
+	if (value)
+		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_SET);
+	else
+		__raw_writel(1 << gpio, base + AR71XX_GPIO_REG_CLEAR);
+}
+
+static int __ath79_gpio_get_value(unsigned gpio)
+{
+	return (__raw_readl(ath79_gpio_base + AR71XX_GPIO_REG_IN) >> gpio) & 1;
+}
+
+static int ath79_gpio_get_value(struct gpio_chip *chip, unsigned offset)
+{
+	return __ath79_gpio_get_value(offset);
+}
+
+static void ath79_gpio_set_value(struct gpio_chip *chip,
+				  unsigned offset, int value)
+{
+	__ath79_gpio_set_value(offset, value);
+}
+
+static int ath79_gpio_direction_input(struct gpio_chip *chip,
+				       unsigned offset)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static int ath79_gpio_direction_output(struct gpio_chip *chip,
+					unsigned offset, int value)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	if (value)
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+	else
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+					int value)
+{
+	void __iomem *base = ath79_gpio_base;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ath79_gpio_lock, flags);
+
+	if (value)
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
+	else
+		__raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
+
+	__raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
+		     base + AR71XX_GPIO_REG_OE);
+
+	spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+
+	return 0;
+}
+
+static struct gpio_chip ath79_gpio_chip = {
+	.label			= "ath79",
+	.get			= ath79_gpio_get_value,
+	.set			= ath79_gpio_set_value,
+	.direction_input	= ath79_gpio_direction_input,
+	.direction_output	= ath79_gpio_direction_output,
+	.base			= 0,
+};
+
+static const struct of_device_id ath79_gpio_of_match[] = {
+	{ .compatible = "qca,ar7100-gpio" },
+	{ .compatible = "qca,ar9340-gpio" },
+	{},
+};
+
+static int ath79_gpio_probe(struct platform_device *pdev)
+{
+	struct ath79_gpio_platform_data *pdata = pdev->dev.platform_data;
+	struct device_node *np = pdev->dev.of_node;
+	struct resource *res;
+	bool oe_inverted;
+	int err;
+
+	if (np) {
+		err = of_property_read_u32(np, "ngpios", &ath79_gpio_count);
+		if (err) {
+			dev_err(&pdev->dev, "ngpios property is not valid\n");
+			return err;
+		}
+		if (ath79_gpio_count >= 32) {
+			dev_err(&pdev->dev, "ngpios must be less than 32\n");
+			return -EINVAL;
+		}
+		oe_inverted = of_device_is_compatible(np, "qca,ar9340-gpio");
+	} else if (pdata) {
+		ath79_gpio_count = pdata->ngpios;
+		oe_inverted = pdata->oe_inverted;
+	} else {
+		dev_err(&pdev->dev, "No DT node or platform data found\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ath79_gpio_base = devm_ioremap_nocache(
+		&pdev->dev, res->start, resource_size(res));
+	if (!ath79_gpio_base)
+		return -ENOMEM;
+
+	ath79_gpio_chip.dev = &pdev->dev;
+	ath79_gpio_chip.ngpio = ath79_gpio_count;
+	if (oe_inverted) {
+		ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
+		ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
+	}
+
+	err = gpiochip_add(&ath79_gpio_chip);
+	if (err) {
+		dev_err(&pdev->dev,
+			"cannot add AR71xx GPIO chip, error=%d", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static struct platform_driver ath79_gpio_driver = {
+	.driver = {
+		.name = "ath79-gpio",
+		.of_match_table	= ath79_gpio_of_match,
+	},
+	.probe = ath79_gpio_probe,
+};
+
+module_platform_driver(ath79_gpio_driver);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 40343fa..31b90ac 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -438,7 +438,7 @@
 	void __iomem *reg_base;
 	int bit, bank_id;
 	unsigned long sta;
-	struct bcm_kona_gpio_bank *bank = irq_get_handler_data(irq);
+	struct bcm_kona_gpio_bank *bank = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 	chained_irq_enter(chip, desc);
@@ -525,11 +525,7 @@
 		return ret;
 	irq_set_lockdep_class(irq, &gpio_lock_class);
 	irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
 
 	return 0;
 }
@@ -644,17 +640,6 @@
 		dev_err(dev, "Couldn't add GPIO chip -- %d\n", ret);
 		goto err_irq_domain;
 	}
-	for (i = 0; i < chip->ngpio; i++) {
-		int irq = bcm_kona_gpio_to_irq(chip, i);
-		irq_set_lockdep_class(irq, &gpio_lock_class);
-		irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip,
-					 handle_simple_irq);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID);
-#else
-		irq_set_noprobe(irq);
-#endif
-	}
 	for (i = 0; i < kona_gpio->num_bank; i++) {
 		bank = &kona_gpio->banks[i];
 		irq_set_chained_handler_and_data(bank->irq,
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 4630a81..9ea86d2 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -17,6 +17,10 @@
 #include <linux/of_irq.h>
 #include <linux/module.h>
 #include <linux/basic_mmio_gpio.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/interrupt.h>
+#include <linux/reboot.h>
 
 #define GIO_BANK_SIZE           0x20
 #define GIO_ODEN(bank)          (((bank) * GIO_BANK_SIZE) + 0x00)
@@ -34,14 +38,18 @@
 	struct bgpio_chip bgc;
 	struct brcmstb_gpio_priv *parent_priv;
 	u32 width;
+	struct irq_chip irq_chip;
 };
 
 struct brcmstb_gpio_priv {
 	struct list_head bank_list;
 	void __iomem *reg_base;
-	int num_banks;
 	struct platform_device *pdev;
+	int parent_irq;
 	int gpio_base;
+	bool can_wake;
+	int parent_wake_irq;
+	struct notifier_block reboot_notifier;
 };
 
 #define MAX_GPIO_PER_BANK           32
@@ -63,6 +71,203 @@
 	return bank->parent_priv;
 }
 
+static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank,
+		unsigned int offset, bool enable)
+{
+	struct bgpio_chip *bgc = &bank->bgc;
+	struct brcmstb_gpio_priv *priv = bank->parent_priv;
+	u32 mask = bgc->pin2mask(bgc, offset);
+	u32 imask;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bgc->lock, flags);
+	imask = bgc->read_reg(priv->reg_base + GIO_MASK(bank->id));
+	if (enable)
+		imask |= mask;
+	else
+		imask &= ~mask;
+	bgc->write_reg(priv->reg_base + GIO_MASK(bank->id), imask);
+	spin_unlock_irqrestore(&bgc->lock, flags);
+}
+
+/* -------------------- IRQ chip functions -------------------- */
+
+static void brcmstb_gpio_irq_mask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+
+	brcmstb_gpio_set_imask(bank, d->hwirq, false);
+}
+
+static void brcmstb_gpio_irq_unmask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+
+	brcmstb_gpio_set_imask(bank, d->hwirq, true);
+}
+
+static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+	struct brcmstb_gpio_priv *priv = bank->parent_priv;
+	u32 mask = BIT(d->hwirq);
+	u32 edge_insensitive, iedge_insensitive;
+	u32 edge_config, iedge_config;
+	u32 level, ilevel;
+	unsigned long flags;
+
+	switch (type) {
+	case IRQ_TYPE_LEVEL_LOW:
+		level = 0;
+		edge_config = 0;
+		edge_insensitive = 0;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		level = mask;
+		edge_config = 0;
+		edge_insensitive = 0;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		level = 0;
+		edge_config = 0;
+		edge_insensitive = 0;
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		level = 0;
+		edge_config = mask;
+		edge_insensitive = 0;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		level = 0;
+		edge_config = 0;  /* don't care, but want known value */
+		edge_insensitive = mask;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&bank->bgc.lock, flags);
+
+	iedge_config = bank->bgc.read_reg(priv->reg_base +
+			GIO_EC(bank->id)) & ~mask;
+	iedge_insensitive = bank->bgc.read_reg(priv->reg_base +
+			GIO_EI(bank->id)) & ~mask;
+	ilevel = bank->bgc.read_reg(priv->reg_base +
+			GIO_LEVEL(bank->id)) & ~mask;
+
+	bank->bgc.write_reg(priv->reg_base + GIO_EC(bank->id),
+			iedge_config | edge_config);
+	bank->bgc.write_reg(priv->reg_base + GIO_EI(bank->id),
+			iedge_insensitive | edge_insensitive);
+	bank->bgc.write_reg(priv->reg_base + GIO_LEVEL(bank->id),
+			ilevel | level);
+
+	spin_unlock_irqrestore(&bank->bgc.lock, flags);
+	return 0;
+}
+
+static int brcmstb_gpio_priv_set_wake(struct brcmstb_gpio_priv *priv,
+		unsigned int enable)
+{
+	int ret = 0;
+
+	/*
+	 * Only enable wake IRQ once for however many hwirqs can wake
+	 * since they all use the same wake IRQ.  Mask will be set
+	 * up appropriately thanks to IRQCHIP_MASK_ON_SUSPEND flag.
+	 */
+	if (enable)
+		ret = enable_irq_wake(priv->parent_wake_irq);
+	else
+		ret = disable_irq_wake(priv->parent_wake_irq);
+	if (ret)
+		dev_err(&priv->pdev->dev, "failed to %s wake-up interrupt\n",
+				enable ? "enable" : "disable");
+	return ret;
+}
+
+static int brcmstb_gpio_irq_set_wake(struct irq_data *d, unsigned int enable)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+
+	return brcmstb_gpio_priv_set_wake(priv, enable);
+}
+
+static irqreturn_t brcmstb_gpio_wake_irq_handler(int irq, void *data)
+{
+	struct brcmstb_gpio_priv *priv = data;
+
+	if (!priv || irq != priv->parent_wake_irq)
+		return IRQ_NONE;
+	pm_wakeup_event(&priv->pdev->dev, 0);
+	return IRQ_HANDLED;
+}
+
+static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
+{
+	struct brcmstb_gpio_priv *priv = bank->parent_priv;
+	struct irq_domain *irq_domain = bank->bgc.gc.irqdomain;
+	void __iomem *reg_base = priv->reg_base;
+	unsigned long status;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bank->bgc.lock, flags);
+	while ((status = bank->bgc.read_reg(reg_base + GIO_STAT(bank->id)) &
+			 bank->bgc.read_reg(reg_base + GIO_MASK(bank->id)))) {
+		int bit;
+
+		for_each_set_bit(bit, &status, 32) {
+			u32 stat = bank->bgc.read_reg(reg_base +
+						      GIO_STAT(bank->id));
+			if (bit >= bank->width)
+				dev_warn(&priv->pdev->dev,
+					 "IRQ for invalid GPIO (bank=%d, offset=%d)\n",
+					 bank->id, bit);
+			bank->bgc.write_reg(reg_base + GIO_STAT(bank->id),
+					    stat | BIT(bit));
+			generic_handle_irq(irq_find_mapping(irq_domain, bit));
+		}
+	}
+	spin_unlock_irqrestore(&bank->bgc.lock, flags);
+}
+
+/* Each UPG GIO block has one IRQ for all banks */
+static void brcmstb_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+	struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct list_head *pos;
+
+	/* Interrupts weren't properly cleared during probe */
+	BUG_ON(!priv || !chip);
+
+	chained_irq_enter(chip, desc);
+	list_for_each(pos, &priv->bank_list) {
+		struct brcmstb_gpio_bank *bank =
+			list_entry(pos, struct brcmstb_gpio_bank, node);
+		brcmstb_gpio_irq_bank_handler(bank);
+	}
+	chained_irq_exit(chip, desc);
+}
+
+static int brcmstb_gpio_reboot(struct notifier_block *nb,
+		unsigned long action, void *data)
+{
+	struct brcmstb_gpio_priv *priv =
+		container_of(nb, struct brcmstb_gpio_priv, reboot_notifier);
+
+	/* Enable GPIO for S5 cold boot */
+	if (action == SYS_POWER_OFF)
+		brcmstb_gpio_priv_set_wake(priv, 1);
+
+	return NOTIFY_DONE;
+}
+
 /* Make sure that the number of banks matches up between properties */
 static int brcmstb_gpio_sanity_check_banks(struct device *dev,
 		struct device_node *np, struct resource *res)
@@ -100,7 +305,13 @@
 		bank = list_entry(pos, struct brcmstb_gpio_bank, node);
 		ret = bgpio_remove(&bank->bgc);
 		if (ret)
-			dev_err(&pdev->dev, "gpiochip_remove fail in cleanup");
+			dev_err(&pdev->dev, "gpiochip_remove fail in cleanup\n");
+	}
+	if (priv->reboot_notifier.notifier_call) {
+		ret = unregister_reboot_notifier(&priv->reboot_notifier);
+		if (ret)
+			dev_err(&pdev->dev,
+				"failed to unregister reboot notifier\n");
 	}
 	return ret;
 }
@@ -121,7 +332,7 @@
 		return -EINVAL;
 
 	offset = gpiospec->args[0] - (gc->base - priv->gpio_base);
-	if (offset >= gc->ngpio)
+	if (offset >= gc->ngpio || offset < 0)
 		return -EINVAL;
 
 	if (unlikely(offset >= bank->width)) {
@@ -136,6 +347,65 @@
 	return offset;
 }
 
+/* Before calling, must have bank->parent_irq set and gpiochip registered */
+static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
+		struct brcmstb_gpio_bank *bank)
+{
+	struct brcmstb_gpio_priv *priv = bank->parent_priv;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+
+	bank->irq_chip.name = dev_name(dev);
+	bank->irq_chip.irq_mask = brcmstb_gpio_irq_mask;
+	bank->irq_chip.irq_unmask = brcmstb_gpio_irq_unmask;
+	bank->irq_chip.irq_set_type = brcmstb_gpio_irq_set_type;
+
+	/* Ensures that all non-wakeup IRQs are disabled at suspend */
+	bank->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
+	if (IS_ENABLED(CONFIG_PM_SLEEP) && !priv->can_wake &&
+			of_property_read_bool(np, "wakeup-source")) {
+		priv->parent_wake_irq = platform_get_irq(pdev, 1);
+		if (priv->parent_wake_irq < 0) {
+			dev_warn(dev,
+				"Couldn't get wake IRQ - GPIOs will not be able to wake from sleep");
+		} else {
+			int err;
+
+			/*
+			 * Set wakeup capability before requesting wakeup
+			 * interrupt, so we can process boot-time "wakeups"
+			 * (e.g., from S5 cold boot)
+			 */
+			device_set_wakeup_capable(dev, true);
+			device_wakeup_enable(dev);
+			err = devm_request_irq(dev, priv->parent_wake_irq,
+					brcmstb_gpio_wake_irq_handler, 0,
+					"brcmstb-gpio-wake", priv);
+
+			if (err < 0) {
+				dev_err(dev, "Couldn't request wake IRQ");
+				return err;
+			}
+
+			priv->reboot_notifier.notifier_call =
+				brcmstb_gpio_reboot;
+			register_reboot_notifier(&priv->reboot_notifier);
+			priv->can_wake = true;
+		}
+	}
+
+	if (priv->can_wake)
+		bank->irq_chip.irq_set_wake = brcmstb_gpio_irq_set_wake;
+
+	gpiochip_irqchip_add(&bank->bgc.gc, &bank->irq_chip, 0,
+			handle_simple_irq, IRQ_TYPE_NONE);
+	gpiochip_set_chained_irqchip(&bank->bgc.gc, &bank->irq_chip,
+			priv->parent_irq, brcmstb_gpio_irq_handler);
+
+	return 0;
+}
+
 static int brcmstb_gpio_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
@@ -146,6 +416,7 @@
 	struct property *prop;
 	const __be32 *p;
 	u32 bank_width;
+	int num_banks = 0;
 	int err;
 	static int gpio_base;
 
@@ -164,6 +435,16 @@
 	priv->reg_base = reg_base;
 	priv->pdev = pdev;
 
+	if (of_property_read_bool(np, "interrupt-controller")) {
+		priv->parent_irq = platform_get_irq(pdev, 0);
+		if (priv->parent_irq <= 0) {
+			dev_err(dev, "Couldn't get IRQ");
+			return -ENOENT;
+		}
+	} else {
+		priv->parent_irq = -ENOENT;
+	}
+
 	if (brcmstb_gpio_sanity_check_banks(dev, np, res))
 		return -EINVAL;
 
@@ -180,7 +461,7 @@
 		}
 
 		bank->parent_priv = priv;
-		bank->id = priv->num_banks;
+		bank->id = num_banks;
 		if (bank_width <= 0 || bank_width > MAX_GPIO_PER_BANK) {
 			dev_err(dev, "Invalid bank width %d\n", bank_width);
 			goto fail;
@@ -212,6 +493,12 @@
 		/* not all ngpio lines are valid, will use bank width later */
 		gc->ngpio = MAX_GPIO_PER_BANK;
 
+		/*
+		 * Mask all interrupts by default, since wakeup interrupts may
+		 * be retained from S5 cold boot
+		 */
+		bank->bgc.write_reg(reg_base + GIO_MASK(bank->id), 0);
+
 		err = gpiochip_add(gc);
 		if (err) {
 			dev_err(dev, "Could not add gpiochip for bank %d\n",
@@ -219,17 +506,24 @@
 			goto fail;
 		}
 		gpio_base += gc->ngpio;
+
+		if (priv->parent_irq > 0) {
+			err = brcmstb_gpio_irq_setup(pdev, bank);
+			if (err)
+				goto fail;
+		}
+
 		dev_dbg(dev, "bank=%d, base=%d, ngpio=%d, width=%d\n", bank->id,
 			gc->base, gc->ngpio, bank->width);
 
 		/* Everything looks good, so add bank to list */
 		list_add(&bank->node, &priv->bank_list);
 
-		priv->num_banks++;
+		num_banks++;
 	}
 
 	dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n",
-			priv->num_banks, priv->gpio_base, gpio_base - 1);
+			num_banks, priv->gpio_base, gpio_base - 1);
 
 	return 0;
 
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index c246ac3..94b0ab7 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -65,11 +65,11 @@
 	return ptr;
 }
 
-static inline struct davinci_gpio_regs __iomem *irq2regs(int irq)
+static inline struct davinci_gpio_regs __iomem *irq2regs(struct irq_data *d)
 {
 	struct davinci_gpio_regs __iomem *g;
 
-	g = (__force struct davinci_gpio_regs __iomem *)irq_get_chip_data(irq);
+	g = (__force struct davinci_gpio_regs __iomem *)irq_data_get_irq_chip_data(d);
 
 	return g;
 }
@@ -287,7 +287,7 @@
 
 static void gpio_irq_disable(struct irq_data *d)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	struct davinci_gpio_regs __iomem *g = irq2regs(d);
 	u32 mask = (u32) irq_data_get_irq_handler_data(d);
 
 	writel_relaxed(mask, &g->clr_falling);
@@ -296,7 +296,7 @@
 
 static void gpio_irq_enable(struct irq_data *d)
 {
-	struct davinci_gpio_regs __iomem *g = irq2regs(d->irq);
+	struct davinci_gpio_regs __iomem *g = irq2regs(d);
 	u32 mask = (u32) irq_data_get_irq_handler_data(d);
 	unsigned status = irqd_get_trigger_type(d);
 
@@ -327,8 +327,9 @@
 };
 
 static void
-gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct davinci_gpio_regs __iomem *g;
 	u32 mask = 0xffff;
 	struct davinci_gpio_controller *d;
@@ -396,7 +397,7 @@
 	struct davinci_gpio_regs __iomem *g;
 	u32 mask;
 
-	d = (struct davinci_gpio_controller *)data->handler_data;
+	d = (struct davinci_gpio_controller *)irq_data_get_irq_handler_data(data);
 	g = (struct davinci_gpio_regs __iomem *)d->regs;
 	mask = __gpio_mask(data->irq - d->gpio_irq);
 
@@ -422,7 +423,6 @@
 	irq_set_irq_type(irq, IRQ_TYPE_NONE);
 	irq_set_chip_data(irq, (__force void *)g);
 	irq_set_handler_data(irq, (void *)__gpio_mask(hw));
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
@@ -545,7 +545,7 @@
 		chips[0].chip.to_irq = gpio_to_irq_unbanked;
 		chips[0].gpio_irq = bank_irq;
 		chips[0].gpio_unbanked = pdata->gpio_unbanked;
-		binten = BIT(0);
+		binten = GENMASK(pdata->gpio_unbanked / 16, 0);
 
 		/* AINTC handles mask/unmask; GPIO handles triggering */
 		irq = bank_irq;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 55fa985..c5be4b9 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -149,7 +149,7 @@
 
 static void dwapb_irq_handler(u32 irq, struct irq_desc *desc)
 {
-	struct dwapb_gpio *gpio = irq_get_handler_data(irq);
+	struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 	dwapb_do_irq(gpio);
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index fbf2873..6bca1e1 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -31,7 +31,6 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/pinctrl/consumer.h>
-#include <linux/platform_data/gpio-em.h>
 
 struct em_gio_priv {
 	void __iomem *base0;
@@ -262,7 +261,6 @@
 
 	irq_set_chip_data(irq, h->host_data);
 	irq_set_chip_and_handler(irq, &p->irq_chip, handle_level_irq);
-	set_irq_flags(irq, IRQF_VALID); /* kill me now */
 	return 0;
 }
 
@@ -273,13 +271,12 @@
 
 static int em_gio_probe(struct platform_device *pdev)
 {
-	struct gpio_em_config pdata_dt;
-	struct gpio_em_config *pdata = dev_get_platdata(&pdev->dev);
 	struct em_gio_priv *p;
 	struct resource *io[2], *irq[2];
 	struct gpio_chip *gpio_chip;
 	struct irq_chip *irq_chip;
 	const char *name = dev_name(&pdev->dev);
+	unsigned int ngpios;
 	int ret;
 
 	p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
@@ -319,18 +316,10 @@
 		goto err0;
 	}
 
-	if (!pdata) {
-		memset(&pdata_dt, 0, sizeof(pdata_dt));
-		pdata = &pdata_dt;
-
-		if (of_property_read_u32(pdev->dev.of_node, "ngpios",
-					 &pdata->number_of_pins)) {
-			dev_err(&pdev->dev, "Missing ngpios OF property\n");
-			ret = -EINVAL;
-			goto err0;
-		}
-
-		pdata->gpio_base = -1;
+	if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
+		dev_err(&pdev->dev, "Missing ngpios OF property\n");
+		ret = -EINVAL;
+		goto err0;
 	}
 
 	gpio_chip = &p->gpio_chip;
@@ -345,8 +334,8 @@
 	gpio_chip->label = name;
 	gpio_chip->dev = &pdev->dev;
 	gpio_chip->owner = THIS_MODULE;
-	gpio_chip->base = pdata->gpio_base;
-	gpio_chip->ngpio = pdata->number_of_pins;
+	gpio_chip->base = -1;
+	gpio_chip->ngpio = ngpios;
 
 	irq_chip = &p->irq_chip;
 	irq_chip->name = name;
@@ -357,9 +346,7 @@
 	irq_chip->irq_release_resources = em_gio_irq_relres;
 	irq_chip->flags	= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
 
-	p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
-					      pdata->number_of_pins,
-					      pdata->irq_base,
+	p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, ngpios, 0,
 					      &em_gio_irq_domain_ops, p);
 	if (!p->irq_domain) {
 		ret = -ENXIO;
@@ -387,12 +374,6 @@
 		goto err1;
 	}
 
-	if (pdata->pctl_name) {
-		ret = gpiochip_add_pin_range(gpio_chip, pdata->pctl_name, 0,
-					     gpio_chip->base, gpio_chip->ngpio);
-		if (ret < 0)
-			dev_warn(&pdev->dev, "failed to add pin range\n");
-	}
 	return 0;
 
 err1:
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 45684f3..9d90366 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -100,13 +100,15 @@
 	}
 }
 
-static void ep93xx_gpio_f_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ep93xx_gpio_f_irq_handler(unsigned int __irq,
+				      struct irq_desc *desc)
 {
 	/*
 	 * map discontiguous hw irq range to continuous sw irq range:
 	 *
 	 *  IRQ_EP93XX_GPIO{0..7}MUX -> gpio_to_irq(EP93XX_GPIO_LINE_F({0..7})
 	 */
+	unsigned int irq = irq_desc_get_irq(desc);
 	int port_f_idx = ((irq + 1) & 7) ^ 4; /* {19..22,47..50} -> {0..7} */
 	int gpio_irq = gpio_to_irq(EP93XX_GPIO_LINE_F(0)) + port_f_idx;
 
@@ -208,7 +210,7 @@
 		return -EINVAL;
 	}
 
-	__irq_set_handler_locked(d->irq, handler);
+	irq_set_handler_locked(d, handler);
 
 	gpio_int_enabled[port] |= port_mask;
 
@@ -234,7 +236,7 @@
 	     gpio_irq <= gpio_to_irq(EP93XX_GPIO_LINE_MAX_IRQ); ++gpio_irq) {
 		irq_set_chip_and_handler(gpio_irq, &ep93xx_gpio_irq_chip,
 					 handle_level_irq);
-		set_irq_flags(gpio_irq, IRQF_VALID);
+		irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
 	}
 
 	irq_set_chained_handler(IRQ_EP93XX_GPIO_AB,
diff --git a/drivers/gpio/gpio-etraxfs.c b/drivers/gpio/gpio-etraxfs.c
index 28071f4..2ffcd9f 100644
--- a/drivers/gpio/gpio-etraxfs.c
+++ b/drivers/gpio/gpio-etraxfs.c
@@ -1,8 +1,10 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/gpio.h>
+#include <linux/gpio/driver.h>
 #include <linux/of_gpio.h>
 #include <linux/io.h>
+#include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/basic_mmio_gpio.h>
 
@@ -13,6 +15,7 @@
 #define ETRAX_FS_rw_intr_mask	16
 #define ETRAX_FS_rw_ack_intr	20
 #define ETRAX_FS_r_intr		24
+#define ETRAX_FS_r_masked_intr	28
 #define ETRAX_FS_rw_pb_dout	32
 #define ETRAX_FS_r_pb_din	36
 #define ETRAX_FS_rw_pb_oe	40
@@ -26,6 +29,48 @@
 #define ETRAX_FS_r_pe_din	84
 #define ETRAX_FS_rw_pe_oe	88
 
+#define ARTPEC3_r_pa_din	0
+#define ARTPEC3_rw_pa_dout	4
+#define ARTPEC3_rw_pa_oe	8
+#define ARTPEC3_r_pb_din	44
+#define ARTPEC3_rw_pb_dout	48
+#define ARTPEC3_rw_pb_oe	52
+#define ARTPEC3_r_pc_din	88
+#define ARTPEC3_rw_pc_dout	92
+#define ARTPEC3_rw_pc_oe	96
+#define ARTPEC3_r_pd_din	116
+#define ARTPEC3_rw_intr_cfg	120
+#define ARTPEC3_rw_intr_pins	124
+#define ARTPEC3_rw_intr_mask	128
+#define ARTPEC3_rw_ack_intr	132
+#define ARTPEC3_r_masked_intr	140
+
+#define GIO_CFG_OFF		0
+#define GIO_CFG_HI		1
+#define GIO_CFG_LO		2
+#define GIO_CFG_SET		3
+#define GIO_CFG_POSEDGE		5
+#define GIO_CFG_NEGEDGE		6
+#define GIO_CFG_ANYEDGE		7
+
+struct etraxfs_gpio_info;
+
+struct etraxfs_gpio_block {
+	spinlock_t lock;
+	u32 mask;
+	u32 cfg;
+	u32 pins;
+	unsigned int group[8];
+
+	void __iomem *regs;
+	const struct etraxfs_gpio_info *info;
+};
+
+struct etraxfs_gpio_chip {
+	struct bgpio_chip bgc;
+	struct etraxfs_gpio_block *block;
+};
+
 struct etraxfs_gpio_port {
 	const char *label;
 	unsigned int oe;
@@ -37,6 +82,12 @@
 struct etraxfs_gpio_info {
 	unsigned int num_ports;
 	const struct etraxfs_gpio_port *ports;
+
+	unsigned int rw_ack_intr;
+	unsigned int rw_intr_mask;
+	unsigned int rw_intr_cfg;
+	unsigned int rw_intr_pins;
+	unsigned int r_masked_intr;
 };
 
 static const struct etraxfs_gpio_port etraxfs_gpio_etraxfs_ports[] = {
@@ -80,8 +131,56 @@
 static const struct etraxfs_gpio_info etraxfs_gpio_etraxfs = {
 	.num_ports = ARRAY_SIZE(etraxfs_gpio_etraxfs_ports),
 	.ports = etraxfs_gpio_etraxfs_ports,
+	.rw_ack_intr	= ETRAX_FS_rw_ack_intr,
+	.rw_intr_mask	= ETRAX_FS_rw_intr_mask,
+	.rw_intr_cfg	= ETRAX_FS_rw_intr_cfg,
+	.r_masked_intr	= ETRAX_FS_r_masked_intr,
 };
 
+static const struct etraxfs_gpio_port etraxfs_gpio_artpec3_ports[] = {
+	{
+		.label	= "A",
+		.ngpio	= 32,
+		.oe	= ARTPEC3_rw_pa_oe,
+		.dout	= ARTPEC3_rw_pa_dout,
+		.din	= ARTPEC3_r_pa_din,
+	},
+	{
+		.label	= "B",
+		.ngpio	= 32,
+		.oe	= ARTPEC3_rw_pb_oe,
+		.dout	= ARTPEC3_rw_pb_dout,
+		.din	= ARTPEC3_r_pb_din,
+	},
+	{
+		.label	= "C",
+		.ngpio	= 16,
+		.oe	= ARTPEC3_rw_pc_oe,
+		.dout	= ARTPEC3_rw_pc_dout,
+		.din	= ARTPEC3_r_pc_din,
+	},
+	{
+		.label	= "D",
+		.ngpio	= 32,
+		.din	= ARTPEC3_r_pd_din,
+	},
+};
+
+static const struct etraxfs_gpio_info etraxfs_gpio_artpec3 = {
+	.num_ports = ARRAY_SIZE(etraxfs_gpio_artpec3_ports),
+	.ports = etraxfs_gpio_artpec3_ports,
+	.rw_ack_intr	= ARTPEC3_rw_ack_intr,
+	.rw_intr_mask	= ARTPEC3_rw_intr_mask,
+	.rw_intr_cfg	= ARTPEC3_rw_intr_cfg,
+	.r_masked_intr	= ARTPEC3_r_masked_intr,
+	.rw_intr_pins	= ARTPEC3_rw_intr_pins,
+};
+
+static unsigned int etraxfs_gpio_chip_to_port(struct gpio_chip *gc)
+{
+	return gc->label[0] - 'A';
+}
+
 static int etraxfs_gpio_of_xlate(struct gpio_chip *gc,
 			       const struct of_phandle_args *gpiospec,
 			       u32 *flags)
@@ -90,7 +189,7 @@
 	 * Port numbers are A to E, and the properties are integers, so we
 	 * specify them as 0xA - 0xE.
 	 */
-	if (gc->label[0] - 'A' + 0xA != gpiospec->args[2])
+	if (etraxfs_gpio_chip_to_port(gc) + 0xA != gpiospec->args[2])
 		return -EINVAL;
 
 	return of_gpio_simple_xlate(gc, gpiospec, flags);
@@ -101,24 +200,174 @@
 		.compatible = "axis,etraxfs-gio",
 		.data = &etraxfs_gpio_etraxfs,
 	},
+	{
+		.compatible = "axis,artpec3-gio",
+		.data = &etraxfs_gpio_artpec3,
+	},
 	{},
 };
 
+static unsigned int etraxfs_gpio_to_group_irq(unsigned int gpio)
+{
+	return gpio % 8;
+}
+
+static unsigned int etraxfs_gpio_to_group_pin(struct etraxfs_gpio_chip *chip,
+					      unsigned int gpio)
+{
+	return 4 * etraxfs_gpio_chip_to_port(&chip->bgc.gc) + gpio / 8;
+}
+
+static void etraxfs_gpio_irq_ack(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+	writel(BIT(grpirq), block->regs + block->info->rw_ack_intr);
+}
+
+static void etraxfs_gpio_irq_mask(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+	spin_lock(&block->lock);
+	block->mask &= ~BIT(grpirq);
+	writel(block->mask, block->regs + block->info->rw_intr_mask);
+	spin_unlock(&block->lock);
+}
+
+static void etraxfs_gpio_irq_unmask(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+	spin_lock(&block->lock);
+	block->mask |= BIT(grpirq);
+	writel(block->mask, block->regs + block->info->rw_intr_mask);
+	spin_unlock(&block->lock);
+}
+
+static int etraxfs_gpio_irq_set_type(struct irq_data *d, u32 type)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+	u32 cfg;
+
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		cfg = GIO_CFG_POSEDGE;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		cfg = GIO_CFG_NEGEDGE;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		cfg = GIO_CFG_ANYEDGE;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		cfg = GIO_CFG_LO;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		cfg = GIO_CFG_HI;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock(&block->lock);
+	block->cfg &= ~(0x7 << (grpirq * 3));
+	block->cfg |= (cfg << (grpirq * 3));
+	writel(block->cfg, block->regs + block->info->rw_intr_cfg);
+	spin_unlock(&block->lock);
+
+	return 0;
+}
+
+static int etraxfs_gpio_irq_request_resources(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+	int ret = -EBUSY;
+
+	spin_lock(&block->lock);
+	if (block->group[grpirq])
+		goto out;
+
+	ret = gpiochip_lock_as_irq(&chip->bgc.gc, d->hwirq);
+	if (ret)
+		goto out;
+
+	block->group[grpirq] = d->irq;
+	if (block->info->rw_intr_pins) {
+		unsigned int pin = etraxfs_gpio_to_group_pin(chip, d->hwirq);
+
+		block->pins &= ~(0xf << (grpirq * 4));
+		block->pins |= (pin << (grpirq * 4));
+
+		writel(block->pins, block->regs + block->info->rw_intr_pins);
+	}
+
+out:
+	spin_unlock(&block->lock);
+	return ret;
+}
+
+static void etraxfs_gpio_irq_release_resources(struct irq_data *d)
+{
+	struct etraxfs_gpio_chip *chip = irq_data_get_irq_chip_data(d);
+	struct etraxfs_gpio_block *block = chip->block;
+	unsigned int grpirq = etraxfs_gpio_to_group_irq(d->hwirq);
+
+	spin_lock(&block->lock);
+	block->group[grpirq] = 0;
+	gpiochip_unlock_as_irq(&chip->bgc.gc, d->hwirq);
+	spin_unlock(&block->lock);
+}
+
+static struct irq_chip etraxfs_gpio_irq_chip = {
+	.name		= "gpio-etraxfs",
+	.irq_ack	= etraxfs_gpio_irq_ack,
+	.irq_mask	= etraxfs_gpio_irq_mask,
+	.irq_unmask	= etraxfs_gpio_irq_unmask,
+	.irq_set_type	= etraxfs_gpio_irq_set_type,
+	.irq_request_resources = etraxfs_gpio_irq_request_resources,
+	.irq_release_resources = etraxfs_gpio_irq_release_resources,
+};
+
+static irqreturn_t etraxfs_gpio_interrupt(int irq, void *dev_id)
+{
+	struct etraxfs_gpio_block *block = dev_id;
+	unsigned long intr = readl(block->regs + block->info->r_masked_intr);
+	int bit;
+
+	for_each_set_bit(bit, &intr, 8)
+		generic_handle_irq(block->group[bit]);
+
+	return IRQ_RETVAL(intr & 0xff);
+}
+
 static int etraxfs_gpio_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	const struct etraxfs_gpio_info *info;
 	const struct of_device_id *match;
-	struct bgpio_chip *chips;
-	struct resource *res;
+	struct etraxfs_gpio_block *block;
+	struct etraxfs_gpio_chip *chips;
+	struct resource *res, *irq;
+	bool allportsirq = false;
 	void __iomem *regs;
 	int ret;
 	int i;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	regs = devm_ioremap_resource(dev, res);
-	if (!regs)
-		return -ENOMEM;
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
 
 	match = of_match_node(etraxfs_gpio_of_table, dev->of_node);
 	if (!match)
@@ -130,19 +379,57 @@
 	if (!chips)
 		return -ENOMEM;
 
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq)
+		return -EINVAL;
+
+	block = devm_kzalloc(dev, sizeof(*block), GFP_KERNEL);
+	if (!block)
+		return -ENOMEM;
+
+	spin_lock_init(&block->lock);
+
+	block->regs = regs;
+	block->info = info;
+
+	writel(0, block->regs + info->rw_intr_mask);
+	writel(0, block->regs + info->rw_intr_cfg);
+	if (info->rw_intr_pins) {
+		allportsirq = true;
+		writel(0, block->regs + info->rw_intr_pins);
+	}
+
+	ret = devm_request_irq(dev, irq->start, etraxfs_gpio_interrupt,
+			       IRQF_SHARED, dev_name(dev), block);
+	if (ret) {
+		dev_err(dev, "Unable to request irq %d\n", ret);
+		return ret;
+	}
+
 	for (i = 0; i < info->num_ports; i++) {
-		struct bgpio_chip *bgc = &chips[i];
+		struct etraxfs_gpio_chip *chip = &chips[i];
+		struct bgpio_chip *bgc = &chip->bgc;
 		const struct etraxfs_gpio_port *port = &info->ports[i];
+		unsigned long flags = BGPIOF_READ_OUTPUT_REG_SET;
+		void __iomem *dat = regs + port->din;
+		void __iomem *set = regs + port->dout;
+		void __iomem *dirout = regs + port->oe;
+
+		chip->block = block;
+
+		if (dirout == set) {
+			dirout = set = NULL;
+			flags = BGPIOF_NO_OUTPUT;
+		}
 
 		ret = bgpio_init(bgc, dev, 4,
-				 regs + port->din,	/* dat */
-				 regs + port->dout,	/* set */
-				 NULL,			/* clr */
-				 regs + port->oe,	/* dirout */
-				 NULL,			/* dirin */
-				 BGPIOF_UNREADABLE_REG_SET);
-		if (ret)
-			return ret;
+				 dat, set, NULL, dirout, NULL,
+				 flags);
+		if (ret) {
+			dev_err(dev, "Unable to init port %s\n",
+				port->label);
+			continue;
+		}
 
 		bgc->gc.ngpio = port->ngpio;
 		bgc->gc.label = port->label;
@@ -152,9 +439,21 @@
 		bgc->gc.of_xlate = etraxfs_gpio_of_xlate;
 
 		ret = gpiochip_add(&bgc->gc);
-		if (ret)
+		if (ret) {
 			dev_err(dev, "Unable to register port %s\n",
 				bgc->gc.label);
+			continue;
+		}
+
+		if (i > 0 && !allportsirq)
+			continue;
+
+		ret = gpiochip_irqchip_add(&bgc->gc, &etraxfs_gpio_irq_chip, 0,
+					   handle_level_irq, IRQ_TYPE_NONE);
+		if (ret) {
+			dev_err(dev, "Unable to add irqchip to port %s\n",
+				bgc->gc.label);
+		}
 	}
 
 	return 0;
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 9bda372..a3f0753 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -153,6 +153,10 @@
 	return !!(bgc->read_reg(bgc->reg_dat) & bgc->pin2mask(bgc, gpio));
 }
 
+static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+}
+
 static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
 {
 	struct bgpio_chip *bgc = to_bgpio_chip(gc);
@@ -279,6 +283,12 @@
 	return 0;
 }
 
+static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
+				int val)
+{
+	return -EINVAL;
+}
+
 static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
 				int val)
 {
@@ -302,6 +312,14 @@
 	return 0;
 }
 
+static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
+{
+	struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+	return (bgc->read_reg(bgc->reg_dir) & bgc->pin2mask(bgc, gpio)) ?
+	       GPIOF_DIR_OUT : GPIOF_DIR_IN;
+}
+
 static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 {
 	struct bgpio_chip *bgc = to_bgpio_chip(gc);
@@ -351,6 +369,14 @@
 	return 0;
 }
 
+static int bgpio_get_dir_inv(struct gpio_chip *gc, unsigned int gpio)
+{
+	struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+	return (bgc->read_reg(bgc->reg_dir) & bgc->pin2mask(bgc, gpio)) ?
+	       GPIOF_DIR_IN : GPIOF_DIR_OUT;
+}
+
 static int bgpio_setup_accessors(struct device *dev,
 				 struct bgpio_chip *bgc,
 				 bool bit_be,
@@ -444,6 +470,9 @@
 		bgc->reg_set = set;
 		bgc->gc.set = bgpio_set_set;
 		bgc->gc.set_multiple = bgpio_set_multiple_set;
+	} else if (flags & BGPIOF_NO_OUTPUT) {
+		bgc->gc.set = bgpio_set_none;
+		bgc->gc.set_multiple = NULL;
 	} else {
 		bgc->gc.set = bgpio_set;
 		bgc->gc.set_multiple = bgpio_set_multiple;
@@ -460,7 +489,8 @@
 
 static int bgpio_setup_direction(struct bgpio_chip *bgc,
 				 void __iomem *dirout,
-				 void __iomem *dirin)
+				 void __iomem *dirin,
+				 unsigned long flags)
 {
 	if (dirout && dirin) {
 		return -EINVAL;
@@ -468,12 +498,17 @@
 		bgc->reg_dir = dirout;
 		bgc->gc.direction_output = bgpio_dir_out;
 		bgc->gc.direction_input = bgpio_dir_in;
+		bgc->gc.get_direction = bgpio_get_dir;
 	} else if (dirin) {
 		bgc->reg_dir = dirin;
 		bgc->gc.direction_output = bgpio_dir_out_inv;
 		bgc->gc.direction_input = bgpio_dir_in_inv;
+		bgc->gc.get_direction = bgpio_get_dir_inv;
 	} else {
-		bgc->gc.direction_output = bgpio_simple_dir_out;
+		if (flags & BGPIOF_NO_OUTPUT)
+			bgc->gc.direction_output = bgpio_dir_out_err;
+		else
+			bgc->gc.direction_output = bgpio_simple_dir_out;
 		bgc->gc.direction_input = bgpio_simple_dir_in;
 	}
 
@@ -525,7 +560,7 @@
 	if (ret)
 		return ret;
 
-	ret = bgpio_setup_direction(bgc, dirout, dirin);
+	ret = bgpio_setup_direction(bgc, dirout, dirin, flags);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 0a8f761..801423f 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -104,17 +104,12 @@
 {
 	struct bgpio_chip *bgc = &priv->bgc;
 	unsigned long mask = bgc->pin2mask(bgc, offset);
-	unsigned long flags;
-
-	spin_lock_irqsave(&bgc->lock, flags);
 
 	if (val)
 		priv->imask |= mask;
 	else
 		priv->imask &= ~mask;
 	bgc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask);
-
-	spin_unlock_irqrestore(&bgc->lock, flags);
 }
 
 static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -180,16 +175,26 @@
 {
 	struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
 	int offset = d->hwirq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->bgc.lock, flags);
 
 	grgpio_set_imask(priv, offset, 0);
+
+	spin_unlock_irqrestore(&priv->bgc.lock, flags);
 }
 
 static void grgpio_irq_unmask(struct irq_data *d)
 {
 	struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
 	int offset = d->hwirq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->bgc.lock, flags);
 
 	grgpio_set_imask(priv, offset, 1);
+
+	spin_unlock_irqrestore(&priv->bgc.lock, flags);
 }
 
 static struct irq_chip grgpio_irq_chip = {
@@ -281,12 +286,7 @@
 	irq_set_chip_data(irq, priv);
 	irq_set_chip_and_handler(irq, &grgpio_irq_chip,
 				 handle_simple_irq);
-	irq_clear_status_flags(irq, IRQ_NOREQUEST);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
 
 	return ret;
 }
@@ -301,9 +301,6 @@
 	int ngpio = priv->bgc.gc.ngpio;
 	int i;
 
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, 0);
-#endif
 	irq_set_chip_and_handler(irq, NULL, NULL);
 	irq_set_chip_data(irq, NULL);
 
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 7d3c90e..8c5252c 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -685,9 +685,14 @@
 
 	mutex_init(&chip->lock);
 
-	max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
-	if (nr_port > 8)
-		max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+	ret = max732x_readb(chip, is_group_a(chip, 0), &chip->reg_out[0]);
+	if (ret)
+		goto out_failed;
+	if (nr_port > 8) {
+		ret = max732x_readb(chip, is_group_a(chip, 8), &chip->reg_out[1]);
+		if (ret)
+			goto out_failed;
+	}
 
 	ret = gpiochip_add(&chip->gpio_chip);
 	if (ret)
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 2fc7ff8..73db7ec 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -507,11 +507,7 @@
 		irq_set_chip_data(irq, mcp);
 		irq_set_chip(irq, &mcp23s08_irq_chip);
 		irq_set_nested_thread(irq, true);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID);
-#else
 		irq_set_noprobe(irq);
-#endif
 	}
 	return 0;
 }
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 20aa66f..8ef7a12 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -32,7 +32,7 @@
 
 struct mpc8xxx_gpio_chip {
 	struct of_mm_gpio_chip mm_gc;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 
 	/*
 	 * shadowed data register to be able to clear/set output pins in
@@ -95,7 +95,7 @@
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
 	unsigned long flags;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	if (val)
 		mpc8xxx_gc->data |= mpc8xxx_gpio2mask(gpio);
@@ -104,7 +104,7 @@
 
 	out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 }
 
 static void mpc8xxx_gpio_set_multiple(struct gpio_chip *gc,
@@ -115,7 +115,7 @@
 	unsigned long flags;
 	int i;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	for (i = 0; i < gc->ngpio; i++) {
 		if (*mask == 0)
@@ -130,7 +130,7 @@
 
 	out_be32(mm->regs + GPIO_DAT, mpc8xxx_gc->data);
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 }
 
 static int mpc8xxx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
@@ -139,11 +139,11 @@
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm);
 	unsigned long flags;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	clrbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 
 	return 0;
 }
@@ -156,11 +156,11 @@
 
 	mpc8xxx_gpio_set(gc, gpio, val);
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	setbits32(mm->regs + GPIO_DIR, mpc8xxx_gpio2mask(gpio));
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 
 	return 0;
 }
@@ -174,6 +174,15 @@
 	return mpc8xxx_gpio_dir_out(gc, gpio, val);
 }
 
+static int mpc5125_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+	/* GPIO 0..3 are input only on MPC5125 */
+	if (gpio <= 3)
+		return -EINVAL;
+
+	return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
 static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
 {
 	struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -206,11 +215,11 @@
 	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
 	unsigned long flags;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 }
 
 static void mpc8xxx_irq_mask(struct irq_data *d)
@@ -219,11 +228,11 @@
 	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
 	unsigned long flags;
 
-	spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+	raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 
 	clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
 
-	spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+	raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 }
 
 static void mpc8xxx_irq_ack(struct irq_data *d)
@@ -242,17 +251,17 @@
 
 	switch (flow_type) {
 	case IRQ_TYPE_EDGE_FALLING:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		setbits32(mm->regs + GPIO_ICR,
 			  mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	case IRQ_TYPE_EDGE_BOTH:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		clrbits32(mm->regs + GPIO_ICR,
 			  mpc8xxx_gpio2mask(irqd_to_hwirq(d)));
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	default:
@@ -282,22 +291,22 @@
 	switch (flow_type) {
 	case IRQ_TYPE_EDGE_FALLING:
 	case IRQ_TYPE_LEVEL_LOW:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		clrsetbits_be32(reg, 3 << shift, 2 << shift);
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	case IRQ_TYPE_EDGE_RISING:
 	case IRQ_TYPE_LEVEL_HIGH:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		clrsetbits_be32(reg, 3 << shift, 1 << shift);
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	case IRQ_TYPE_EDGE_BOTH:
-		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
 		clrbits32(reg, 3 << shift);
-		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
 		break;
 
 	default:
@@ -312,17 +321,13 @@
 	.irq_unmask	= mpc8xxx_irq_unmask,
 	.irq_mask	= mpc8xxx_irq_mask,
 	.irq_ack	= mpc8xxx_irq_ack,
+	/* this might get overwritten in mpc8xxx_probe() */
 	.irq_set_type	= mpc8xxx_irq_set_type,
 };
 
 static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int irq,
 				irq_hw_number_t hwirq)
 {
-	struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
-
-	if (mpc8xxx_gc->of_dev_id_data)
-		mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data;
-
 	irq_set_chip_data(irq, h->host_data);
 	irq_set_chip_and_handler(irq, &mpc8xxx_irq_chip, handle_level_irq);
 
@@ -334,11 +339,38 @@
 	.xlate	= irq_domain_xlate_twocell,
 };
 
-static struct of_device_id mpc8xxx_gpio_ids[] = {
+struct mpc8xxx_gpio_devtype {
+	int (*gpio_dir_out)(struct gpio_chip *, unsigned int, int);
+	int (*gpio_get)(struct gpio_chip *, unsigned int);
+	int (*irq_set_type)(struct irq_data *, unsigned int);
+};
+
+static const struct mpc8xxx_gpio_devtype mpc512x_gpio_devtype = {
+	.gpio_dir_out = mpc5121_gpio_dir_out,
+	.irq_set_type = mpc512x_irq_set_type,
+};
+
+static const struct mpc8xxx_gpio_devtype mpc5125_gpio_devtype = {
+	.gpio_dir_out = mpc5125_gpio_dir_out,
+	.irq_set_type = mpc512x_irq_set_type,
+};
+
+static const struct mpc8xxx_gpio_devtype mpc8572_gpio_devtype = {
+	.gpio_get = mpc8572_gpio_get,
+};
+
+static const struct mpc8xxx_gpio_devtype mpc8xxx_gpio_devtype_default = {
+	.gpio_dir_out = mpc8xxx_gpio_dir_out,
+	.gpio_get = mpc8xxx_gpio_get,
+	.irq_set_type = mpc8xxx_irq_set_type,
+};
+
+static const struct of_device_id mpc8xxx_gpio_ids[] = {
 	{ .compatible = "fsl,mpc8349-gpio", },
-	{ .compatible = "fsl,mpc8572-gpio", },
+	{ .compatible = "fsl,mpc8572-gpio", .data = &mpc8572_gpio_devtype, },
 	{ .compatible = "fsl,mpc8610-gpio", },
-	{ .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, },
+	{ .compatible = "fsl,mpc5121-gpio", .data = &mpc512x_gpio_devtype, },
+	{ .compatible = "fsl,mpc5125-gpio", .data = &mpc5125_gpio_devtype, },
 	{ .compatible = "fsl,pq3-gpio",     },
 	{ .compatible = "fsl,qoriq-gpio",   },
 	{}
@@ -351,6 +383,8 @@
 	struct of_mm_gpio_chip *mm_gc;
 	struct gpio_chip *gc;
 	const struct of_device_id *id;
+	const struct mpc8xxx_gpio_devtype *devtype =
+		of_device_get_match_data(&pdev->dev);
 	int ret;
 
 	mpc8xxx_gc = devm_kzalloc(&pdev->dev, sizeof(*mpc8xxx_gc), GFP_KERNEL);
@@ -359,7 +393,7 @@
 
 	platform_set_drvdata(pdev, mpc8xxx_gc);
 
-	spin_lock_init(&mpc8xxx_gc->lock);
+	raw_spin_lock_init(&mpc8xxx_gc->lock);
 
 	mm_gc = &mpc8xxx_gc->mm_gc;
 	gc = &mm_gc->gc;
@@ -367,10 +401,18 @@
 	mm_gc->save_regs = mpc8xxx_gpio_save_regs;
 	gc->ngpio = MPC8XXX_GPIO_PINS;
 	gc->direction_input = mpc8xxx_gpio_dir_in;
-	gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
-		mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
-	gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
-		mpc8572_gpio_get : mpc8xxx_gpio_get;
+
+	if (!devtype)
+		devtype = &mpc8xxx_gpio_devtype_default;
+
+	/*
+	 * It's assumed that only a single type of gpio controller is available
+	 * on the current machine, so overwriting global data is fine.
+	 */
+	mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
+
+	gc->direction_output = devtype->gpio_dir_out ?: mpc8xxx_gpio_dir_out;
+	gc->get = devtype->gpio_get ?: mpc8xxx_gpio_get;
 	gc->set = mpc8xxx_gpio_set;
 	gc->set_multiple = mpc8xxx_gpio_set_multiple;
 	gc->to_irq = mpc8xxx_gpio_to_irq;
@@ -396,8 +438,8 @@
 	out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
 	out_be32(mm_gc->regs + GPIO_IMR, 0);
 
-	irq_set_handler_data(mpc8xxx_gc->irqn, mpc8xxx_gc);
-	irq_set_chained_handler(mpc8xxx_gc->irqn, mpc8xxx_gpio_irq_cascade);
+	irq_set_chained_handler_and_data(mpc8xxx_gc->irqn,
+					 mpc8xxx_gpio_irq_cascade, mpc8xxx_gc);
 
 	return 0;
 }
@@ -407,8 +449,7 @@
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc = platform_get_drvdata(pdev);
 
 	if (mpc8xxx_gc->irq) {
-		irq_set_handler_data(mpc8xxx_gc->irqn, NULL);
-		irq_set_chained_handler(mpc8xxx_gc->irqn, NULL);
+		irq_set_chained_handler_and_data(mpc8xxx_gc->irqn, NULL, NULL);
 		irq_domain_remove(mpc8xxx_gc->irq);
 	}
 
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 52ff182..d2012cf 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -187,14 +187,6 @@
 	return irq_create_mapping(domain, offset);
 }
 
-static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq)
-{
-	struct irq_data *irq_data = irq_get_irq_data(irq);
-
-	return irq_data->hwirq;
-}
-
-
 /* For dual-edge interrupts in software, since the hardware has no
  * such support:
  *
@@ -238,7 +230,7 @@
 
 static void msm_gpio_irq_ack(struct irq_data *d)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+	int gpio = d->hwirq;
 
 	writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio));
 	if (test_bit(gpio, msm_gpio.dual_edge_irqs))
@@ -247,8 +239,8 @@
 
 static void msm_gpio_irq_mask(struct irq_data *d)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
 	unsigned long irq_flags;
+	int gpio = d->hwirq;
 
 	spin_lock_irqsave(&tlmm_lock, irq_flags);
 	writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
@@ -259,8 +251,8 @@
 
 static void msm_gpio_irq_unmask(struct irq_data *d)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
 	unsigned long irq_flags;
+	int gpio = d->hwirq;
 
 	spin_lock_irqsave(&tlmm_lock, irq_flags);
 	__set_bit(gpio, msm_gpio.enabled_irqs);
@@ -271,8 +263,8 @@
 
 static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
 	unsigned long irq_flags;
+	int gpio = d->hwirq;
 	uint32_t bits;
 
 	spin_lock_irqsave(&tlmm_lock, irq_flags);
@@ -281,14 +273,14 @@
 
 	if (flow_type & IRQ_TYPE_EDGE_BOTH) {
 		bits |= BIT(INTR_DECT_CTL);
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
 			__set_bit(gpio, msm_gpio.dual_edge_irqs);
 		else
 			__clear_bit(gpio, msm_gpio.dual_edge_irqs);
 	} else {
 		bits &= ~BIT(INTR_DECT_CTL);
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		__clear_bit(gpio, msm_gpio.dual_edge_irqs);
 	}
 
@@ -331,7 +323,7 @@
 
 static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
 {
-	int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq);
+	int gpio = d->hwirq;
 
 	if (on) {
 		if (bitmap_empty(msm_gpio.wake_irqs, MAX_NR_GPIO))
@@ -363,7 +355,6 @@
 	irq_set_lockdep_class(irq, &msm_gpio_lock_class);
 	irq_set_chip_and_handler(irq, &msm_gpio_irq_chip,
 			handle_level_irq);
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 1a54205..b396bf3 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -458,9 +458,9 @@
 	return 0;
 }
 
-static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void mvebu_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
-	struct mvebu_gpio_chip *mvchip = irq_get_handler_data(irq);
+	struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	u32 cause, type;
 	int i;
@@ -787,8 +787,8 @@
 
 		if (irq < 0)
 			continue;
-		irq_set_handler_data(irq, mvchip);
-		irq_set_chained_handler(irq, mvebu_gpio_irq_handler);
+		irq_set_chained_handler_and_data(irq, mvebu_gpio_irq_handler,
+						 mvchip);
 	}
 
 	mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index ec1eb1b..b752b56 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -275,8 +275,8 @@
 static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
 {
 	u32 irq_stat;
-	struct mxc_gpio_port *port = irq_get_handler_data(irq);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct mxc_gpio_port *port = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 	chained_irq_enter(chip, desc);
 
@@ -292,7 +292,7 @@
 {
 	u32 irq_msk, irq_stat;
 	struct mxc_gpio_port *port;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 	chained_irq_enter(chip, desc);
 
@@ -339,7 +339,7 @@
 	return 0;
 }
 
-static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
+static void mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
 {
 	struct irq_chip_generic *gc;
 	struct irq_chip_type *ct;
@@ -354,6 +354,7 @@
 	ct->chip.irq_unmask = irq_gc_mask_set_bit;
 	ct->chip.irq_set_type = gpio_set_irq_type;
 	ct->chip.irq_set_wake = gpio_set_wake_irq;
+	ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND;
 	ct->regs.ack = GPIO_ISR;
 	ct->regs.mask = GPIO_IMR;
 
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 551d15d..b7f383e 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -157,7 +157,7 @@
 static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
 {
 	u32 irq_stat;
-	struct mxs_gpio_port *port = irq_get_handler_data(irq);
+	struct mxs_gpio_port *port = irq_desc_get_handler_data(desc);
 
 	desc->irq_data.chip->irq_ack(&desc->irq_data);
 
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 61a731f..2ae0d47 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -29,6 +29,7 @@
 #include <linux/platform_data/gpio-omap.h>
 
 #define OFF_MODE	1
+#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
 
 static LIST_HEAD(omap_gpio_list);
 
@@ -57,7 +58,7 @@
 	u32 saved_datain;
 	u32 level_mask;
 	u32 toggle_mask;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 	struct gpio_chip chip;
 	struct clk *dbck;
 	u32 mod_usage;
@@ -175,7 +176,7 @@
 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
 {
 	if (bank->dbck_enable_mask && !bank->dbck_enabled) {
-		clk_prepare_enable(bank->dbck);
+		clk_enable(bank->dbck);
 		bank->dbck_enabled = true;
 
 		writel_relaxed(bank->dbck_enable_mask,
@@ -193,7 +194,7 @@
 		 */
 		writel_relaxed(0, bank->base + bank->regs->debounce_en);
 
-		clk_disable_unprepare(bank->dbck);
+		clk_disable(bank->dbck);
 		bank->dbck_enabled = false;
 	}
 }
@@ -204,8 +205,9 @@
  * @offset: the gpio number on this @bank
  * @debounce: debounce time to use
  *
- * OMAP's debounce time is in 31us steps so we need
- * to convert and round up to the closest unit.
+ * OMAP's debounce time is in 31us steps
+ *   <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31
+ * so we need to convert and round up to the closest unit.
  */
 static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
 				    unsigned debounce)
@@ -213,34 +215,33 @@
 	void __iomem		*reg;
 	u32			val;
 	u32			l;
+	bool			enable = !!debounce;
 
 	if (!bank->dbck_flag)
 		return;
 
-	if (debounce < 32)
-		debounce = 0x01;
-	else if (debounce > 7936)
-		debounce = 0xff;
-	else
-		debounce = (debounce / 0x1f) - 1;
+	if (enable) {
+		debounce = DIV_ROUND_UP(debounce, 31) - 1;
+		debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK;
+	}
 
 	l = BIT(offset);
 
-	clk_prepare_enable(bank->dbck);
+	clk_enable(bank->dbck);
 	reg = bank->base + bank->regs->debounce;
 	writel_relaxed(debounce, reg);
 
 	reg = bank->base + bank->regs->debounce_en;
 	val = readl_relaxed(reg);
 
-	if (debounce)
+	if (enable)
 		val |= l;
 	else
 		val &= ~l;
 	bank->dbck_enable_mask = val;
 
 	writel_relaxed(val, reg);
-	clk_disable_unprepare(bank->dbck);
+	clk_disable(bank->dbck);
 	/*
 	 * Enable debounce clock per module.
 	 * This call is mandatory because in omap_gpio_request() when
@@ -285,7 +286,7 @@
 		bank->context.debounce = 0;
 		writel_relaxed(bank->context.debounce, bank->base +
 			     bank->regs->debounce);
-		clk_disable_unprepare(bank->dbck);
+		clk_disable(bank->dbck);
 		bank->dbck_enabled = false;
 	}
 }
@@ -498,24 +499,24 @@
 	if (!BANK_USED(bank))
 		pm_runtime_get_sync(bank->dev);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	retval = omap_set_gpio_triggering(bank, offset, type);
 	if (retval) {
-		spin_unlock_irqrestore(&bank->lock, flags);
+		raw_spin_unlock_irqrestore(&bank->lock, flags);
 		goto error;
 	}
 	omap_gpio_init_irq(bank, offset);
 	if (!omap_gpio_is_input(bank, offset)) {
-		spin_unlock_irqrestore(&bank->lock, flags);
+		raw_spin_unlock_irqrestore(&bank->lock, flags);
 		retval = -EINVAL;
 		goto error;
 	}
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return 0;
 
@@ -636,14 +637,14 @@
 		return -EINVAL;
 	}
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	if (enable)
 		bank->context.wake_en |= gpio_bit;
 	else
 		bank->context.wake_en &= ~gpio_bit;
 
 	writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -669,10 +670,10 @@
 	if (!BANK_USED(bank))
 		pm_runtime_get_sync(bank->dev);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap_enable_gpio_module(bank, offset);
 	bank->mod_usage |= BIT(offset);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -682,14 +683,14 @@
 	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->mod_usage &= ~(BIT(offset));
 	if (!LINE_USED(bank->irq_usage, offset)) {
 		omap_set_gpio_direction(bank, offset, 1);
 		omap_clear_gpio_debounce(bank, offset);
 	}
 	omap_disable_gpio_module(bank, offset);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	/*
 	 * If this is the last gpio to be freed in the bank,
@@ -716,7 +717,8 @@
 	struct gpio_bank *bank;
 	int unmasked = 0;
 	struct irq_chip *irqchip = irq_desc_get_chip(desc);
-	struct gpio_chip *chip = irq_get_handler_data(irq);
+	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
+	unsigned long lock_flags;
 
 	chained_irq_enter(irqchip, desc);
 
@@ -731,6 +733,8 @@
 		u32 isr_saved, level_mask = 0;
 		u32 enabled;
 
+		raw_spin_lock_irqsave(&bank->lock, lock_flags);
+
 		enabled = omap_get_gpio_irqbank_mask(bank);
 		isr_saved = isr = readl_relaxed(isr_reg) & enabled;
 
@@ -744,6 +748,8 @@
 		omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
 		omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
 
+		raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
+
 		/* if there is only edge sensitive GPIO pin interrupts
 		configured, we could unmask GPIO bank interrupt immediately */
 		if (!level_mask && !unmasked) {
@@ -758,6 +764,7 @@
 			bit = __ffs(isr);
 			isr &= ~(BIT(bit));
 
+			raw_spin_lock_irqsave(&bank->lock, lock_flags);
 			/*
 			 * Some chips can't respond to both rising and falling
 			 * at the same time.  If this irq was requested with
@@ -768,6 +775,8 @@
 			if (bank->toggle_mask & (BIT(bit)))
 				omap_toggle_gpio_edge_triggering(bank, bit);
 
+			raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
+
 			generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
 							    bit));
 		}
@@ -791,7 +800,7 @@
 	if (!BANK_USED(bank))
 		pm_runtime_get_sync(bank->dev);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 
 	if (!LINE_USED(bank->mod_usage, offset))
 		omap_set_gpio_direction(bank, offset, 1);
@@ -800,12 +809,12 @@
 	omap_enable_gpio_module(bank, offset);
 	bank->irq_usage |= BIT(offset);
 
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	omap_gpio_unmask_irq(d);
 
 	return 0;
 err:
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	if (!BANK_USED(bank))
 		pm_runtime_put(bank->dev);
 	return -EINVAL;
@@ -817,7 +826,7 @@
 	unsigned long flags;
 	unsigned offset = d->hwirq;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->irq_usage &= ~(BIT(offset));
 	omap_set_gpio_irqenable(bank, offset, 0);
 	omap_clear_gpio_irqstatus(bank, offset);
@@ -825,7 +834,7 @@
 	if (!LINE_USED(bank->mod_usage, offset))
 		omap_clear_gpio_debounce(bank, offset);
 	omap_disable_gpio_module(bank, offset);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	/*
 	 * If this is the last IRQ to be freed in the bank,
@@ -849,10 +858,10 @@
 	unsigned offset = d->hwirq;
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap_set_gpio_irqenable(bank, offset, 0);
 	omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 static void omap_gpio_unmask_irq(struct irq_data *d)
@@ -862,7 +871,7 @@
 	u32 trigger = irqd_get_trigger_type(d);
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	if (trigger)
 		omap_set_gpio_triggering(bank, offset, trigger);
 
@@ -874,7 +883,7 @@
 	}
 
 	omap_set_gpio_irqenable(bank, offset, 1);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 /*---------------------------------------------------------------------*/
@@ -887,9 +896,9 @@
 					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 	unsigned long		flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -902,9 +911,9 @@
 					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 	unsigned long		flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	writel_relaxed(bank->context.wake_en, mask_reg);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -950,9 +959,9 @@
 
 	bank = container_of(chip, struct gpio_bank, chip);
 	reg = bank->base + bank->regs->direction;
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	dir = !!(readl_relaxed(reg) & BIT(offset));
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	return dir;
 }
 
@@ -962,9 +971,9 @@
 	unsigned long flags;
 
 	bank = container_of(chip, struct gpio_bank, chip);
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap_set_gpio_direction(bank, offset, 1);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	return 0;
 }
 
@@ -986,10 +995,10 @@
 	unsigned long flags;
 
 	bank = container_of(chip, struct gpio_bank, chip);
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->set_dataout(bank, offset, value);
 	omap_set_gpio_direction(bank, offset, 0);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	return 0;
 }
 
@@ -1001,9 +1010,9 @@
 
 	bank = container_of(chip, struct gpio_bank, chip);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap2_set_gpio_debounce(bank, offset, debounce);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -1014,9 +1023,9 @@
 	unsigned long flags;
 
 	bank = container_of(chip, struct gpio_bank, chip);
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->set_dataout(bank, offset, value);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 /*---------------------------------------------------------------------*/
@@ -1061,10 +1070,6 @@
 	 /* Initialize interface clk ungated, module enabled */
 	if (bank->regs->ctrl)
 		writel_relaxed(0, base + bank->regs->ctrl);
-
-	bank->dbck = clk_get(bank->dev, "dbclk");
-	if (IS_ERR(bank->dbck))
-		dev_err(bank->dev, "Could not get gpio dbck\n");
 }
 
 static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
@@ -1178,13 +1183,16 @@
 	irqc->irq_set_wake = omap_gpio_wake_enable,
 	irqc->name = dev_name(&pdev->dev);
 
-	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (unlikely(!res)) {
-		dev_err(dev, "Invalid IRQ resource\n");
-		return -ENODEV;
+	bank->irq = platform_get_irq(pdev, 0);
+	if (bank->irq <= 0) {
+		if (!bank->irq)
+			bank->irq = -ENXIO;
+		if (bank->irq != -EPROBE_DEFER)
+			dev_err(dev,
+				"can't get irq resource ret=%d\n", bank->irq);
+		return bank->irq;
 	}
 
-	bank->irq = res->start;
 	bank->dev = dev;
 	bank->chip.dev = dev;
 	bank->chip.owner = THIS_MODULE;
@@ -1213,16 +1221,26 @@
 	else
 		bank->set_dataout = omap_set_gpio_dataout_mask;
 
-	spin_lock_init(&bank->lock);
+	raw_spin_lock_init(&bank->lock);
 
 	/* Static mapping, never released */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	bank->base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(bank->base)) {
-		irq_domain_remove(bank->chip.irqdomain);
 		return PTR_ERR(bank->base);
 	}
 
+	if (bank->dbck_flag) {
+		bank->dbck = devm_clk_get(bank->dev, "dbclk");
+		if (IS_ERR(bank->dbck)) {
+			dev_err(bank->dev,
+				"Could not get gpio dbck. Disable debounce\n");
+			bank->dbck_flag = false;
+		} else {
+			clk_prepare(bank->dbck);
+		}
+	}
+
 	platform_set_drvdata(pdev, bank);
 
 	pm_runtime_enable(bank->dev);
@@ -1254,6 +1272,8 @@
 	list_del(&bank->node);
 	gpiochip_remove(&bank->chip);
 	pm_runtime_disable(bank->dev);
+	if (bank->dbck_flag)
+		clk_unprepare(bank->dbck);
 
 	return 0;
 }
@@ -1271,7 +1291,7 @@
 	unsigned long flags;
 	u32 wake_low, wake_hi;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 
 	/*
 	 * Only edges can generate a wakeup event to the PRCM.
@@ -1324,7 +1344,7 @@
 				bank->get_context_loss_count(bank->dev);
 
 	omap_gpio_dbck_disable(bank);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@@ -1339,7 +1359,7 @@
 	unsigned long flags;
 	int c;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 
 	/*
 	 * On the first resume during the probe, the context has not
@@ -1375,14 +1395,14 @@
 			if (c != bank->context_loss_count) {
 				omap_gpio_restore_context(bank);
 			} else {
-				spin_unlock_irqrestore(&bank->lock, flags);
+				raw_spin_unlock_irqrestore(&bank->lock, flags);
 				return 0;
 			}
 		}
 	}
 
 	if (!bank->workaround_enabled) {
-		spin_unlock_irqrestore(&bank->lock, flags);
+		raw_spin_unlock_irqrestore(&bank->lock, flags);
 		return 0;
 	}
 
@@ -1437,7 +1457,7 @@
 	}
 
 	bank->workaround_enabled = false;
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 404f3c6..1d4d9bc 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -88,7 +88,6 @@
 	struct gpio_chip	chip;
 	struct i2c_client	*client;
 	struct mutex		lock;		/* protect 'out' */
-	spinlock_t		slock;		/* protect irq demux */
 	unsigned		out;		/* software latch */
 	unsigned		status;		/* current status */
 	unsigned int		irq_parent;
@@ -185,23 +184,21 @@
 static irqreturn_t pcf857x_irq(int irq, void *data)
 {
 	struct pcf857x  *gpio = data;
-	unsigned long change, i, status, flags;
+	unsigned long change, i, status;
 
 	status = gpio->read(gpio->client);
 
-	spin_lock_irqsave(&gpio->slock, flags);
-
 	/*
 	 * call the interrupt handler iff gpio is used as
 	 * interrupt source, just to avoid bad irqs
 	 */
-
+	mutex_lock(&gpio->lock);
 	change = (gpio->status ^ status) & gpio->irq_enabled;
+	gpio->status = status;
+	mutex_unlock(&gpio->lock);
+
 	for_each_set_bit(i, &change, gpio->chip.ngpio)
 		handle_nested_irq(irq_find_mapping(gpio->chip.irqdomain, i));
-	gpio->status = status;
-
-	spin_unlock_irqrestore(&gpio->slock, flags);
 
 	return IRQ_HANDLED;
 }
@@ -293,7 +290,6 @@
 		return -ENOMEM;
 
 	mutex_init(&gpio->lock);
-	spin_lock_init(&gpio->slock);
 
 	gpio->chip.base			= pdata ? pdata->gpio_base : -1;
 	gpio->chip.can_sleep		= true;
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 2d9a950..34ed176 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -281,9 +281,9 @@
 
 	/* And the handler */
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 unlock:
 	spin_unlock_irqrestore(&chip->spinlock, flags);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index cdbbcf0..55a11de 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -524,7 +524,7 @@
 {
 	irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
 				 handle_edge_irq);
-	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	irq_set_noprobe(irq);
 	return 0;
 }
 
@@ -643,20 +643,20 @@
 			irq = gpio_to_irq(0);
 			irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
 						 handle_edge_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 		if (irq1 > 0) {
 			irq = gpio_to_irq(1);
 			irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
 						 handle_edge_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 
 		for (irq  = gpio_to_irq(gpio_offset);
 			irq <= gpio_to_irq(pxa_last_gpio); irq++) {
 			irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
 						 handle_edge_irq);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 	}
 
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 1e14a6c..2a81224 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -251,17 +251,32 @@
 
 static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
 {
-	return pinctrl_request_gpio(chip->base + offset);
+	struct gpio_rcar_priv *p = gpio_to_priv(chip);
+	int error;
+
+	error = pm_runtime_get_sync(&p->pdev->dev);
+	if (error < 0)
+		return error;
+
+	error = pinctrl_request_gpio(chip->base + offset);
+	if (error)
+		pm_runtime_put(&p->pdev->dev);
+
+	return error;
 }
 
 static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
 {
+	struct gpio_rcar_priv *p = gpio_to_priv(chip);
+
 	pinctrl_free_gpio(chip->base + offset);
 
 	/* Set the GPIO as an input to ensure that the next GPIO request won't
 	 * drive the GPIO pin as an output.
 	 */
 	gpio_rcar_config_general_input_output_mode(chip, offset, false);
+
+	pm_runtime_put(&p->pdev->dev);
 }
 
 static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -327,6 +342,10 @@
 		.compatible = "renesas,gpio-r8a7794",
 		.data = &gpio_rcar_info_gen2,
 	}, {
+		.compatible = "renesas,gpio-r8a7795",
+		/* Gen3 GPIO is identical to Gen2. */
+		.data = &gpio_rcar_info_gen2,
+	}, {
 		.compatible = "renesas,gpio-rcar",
 		.data = &gpio_rcar_info_gen1,
 	}, {
@@ -405,7 +424,6 @@
 	}
 
 	pm_runtime_enable(dev);
-	pm_runtime_get_sync(dev);
 
 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -487,7 +505,6 @@
 err1:
 	gpiochip_remove(gpio_chip);
 err0:
-	pm_runtime_put(dev);
 	pm_runtime_disable(dev);
 	return ret;
 }
@@ -498,7 +515,6 @@
 
 	gpiochip_remove(&p->gpio_chip);
 
-	pm_runtime_put(&pdev->dev);
 	pm_runtime_disable(&pdev->dev);
 	return 0;
 }
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 3fa22da..67bd2f5 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -155,7 +155,7 @@
 {
 	irq_set_chip_and_handler(irq, &sa1100_gpio_irq_chip,
 				 handle_edge_irq);
-	set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+	irq_set_noprobe(irq);
 
 	return 0;
 }
@@ -173,9 +173,9 @@
  * and call the handler.
  */
 static void
-sa1100_gpio_handler(unsigned int irq, struct irq_desc *desc)
+sa1100_gpio_handler(unsigned int __irq, struct irq_desc *desc)
 {
-	unsigned int mask;
+	unsigned int irq, mask;
 
 	mask = GEDR;
 	do {
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 18579ac..55e47828 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -346,7 +346,7 @@
 			i = chip->irq_base + j;
 			irq_set_chip_and_handler(i, &ct->chip, ct->handler);
 			irq_set_chip_data(i, gc);
-			irq_modify_status(i, IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+			irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 		gc->irq_cnt = i - gc->irq_base;
 	}
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 31b244c..d1d585d 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -102,7 +102,7 @@
 static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 	int offset = d->hwirq;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
@@ -130,7 +130,7 @@
 static void tc3589x_gpio_irq_lock(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 
 	mutex_lock(&tc3589x_gpio->irq_lock);
 }
@@ -138,7 +138,7 @@
 static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 	struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
 	static const u8 regmap[] = {
 		[REG_IBE]	= TC3589x_GPIOIBE0,
@@ -167,7 +167,7 @@
 static void tc3589x_gpio_irq_mask(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 	int offset = d->hwirq;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
@@ -178,7 +178,7 @@
 static void tc3589x_gpio_irq_unmask(struct irq_data *d)
 {
 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-	struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip);
+	struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(gc);
 	int offset = d->hwirq;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 9b25c90..9b14aaf 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -252,9 +252,9 @@
 	tegra_gpio_enable(gpio);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return 0;
 }
@@ -268,16 +268,14 @@
 
 static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
-	struct tegra_gpio_bank *bank;
 	int port;
 	int pin;
 	int unmasked = 0;
 	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct tegra_gpio_bank *bank = irq_desc_get_handler_data(desc);
 
 	chained_irq_enter(chip, desc);
 
-	bank = irq_get_handler_data(irq);
-
 	for (port = 0; port < 4; port++) {
 		int gpio = tegra_gpio_compose(bank->bank, port, 0);
 		unsigned long sta = tegra_gpio_readl(GPIO_INT_STA(gpio)) &
@@ -509,7 +507,6 @@
 		irq_set_chip_data(irq, bank);
 		irq_set_chip_and_handler(irq, &tegra_gpio_irq_chip,
 					 handle_simple_irq);
-		set_irq_flags(irq, IRQF_VALID);
 	}
 
 	for (i = 0; i < tegra_gpio_bank_count; i++) {
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index e8f97e0..5a49205 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -194,11 +194,12 @@
 
 static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct timbgpio *tgpio = irq_get_handler_data(irq);
+	struct timbgpio *tgpio = irq_desc_get_handler_data(desc);
+	struct irq_data *data = irq_desc_get_irq_data(desc);
 	unsigned long ipr;
 	int offset;
 
-	desc->irq_data.chip->irq_ack(irq_get_irq_data(irq));
+	data->chip->irq_ack(data);
 	ipr = ioread32(tgpio->membase + TGPIO_IPR);
 	iowrite32(ipr, tgpio->membase + TGPIO_ICR);
 
@@ -294,13 +295,10 @@
 		irq_set_chip_and_handler(tgpio->irq_base + i,
 			&timbgpio_irqchip, handle_simple_irq);
 		irq_set_chip_data(tgpio->irq_base + i, tgpio);
-#ifdef CONFIG_ARM
-		set_irq_flags(tgpio->irq_base + i, IRQF_VALID | IRQF_PROBE);
-#endif
+		irq_clear_status_flags(tgpio->irq_base + i, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
-	irq_set_handler_data(irq, tgpio);
-	irq_set_chained_handler(irq, timbgpio_irq);
+	irq_set_chained_handler_and_data(irq, timbgpio_irq, tgpio);
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index 445660a..bbac92a 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -510,8 +510,8 @@
 	gc->chip_types[1].chip.flags		= IRQCHIP_MASK_ON_SUSPEND;
 
 	/* Setup chained handler for this GPIO bank */
-	irq_set_handler_data(bank->irq, bank);
-	irq_set_chained_handler(bank->irq, tz1090_gpio_irq_handler);
+	irq_set_chained_handler_and_data(bank->irq, tz1090_gpio_irq_handler,
+					 bank);
 
 	return 0;
 }
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 7bd9f20..3d5714d 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -60,6 +60,8 @@
 #define PORT_INT_EITHER_EDGE	0xb
 #define PORT_INT_LOGIC_ONE	0xc
 
+static struct irq_chip vf610_gpio_irq_chip;
+
 static const struct of_device_id vf610_gpio_dt_ids[] = {
 	{ .compatible = "fsl,vf610-gpio" },
 	{ /* sentinel */ }
@@ -120,7 +122,7 @@
 
 static void vf610_gpio_irq_handler(u32 irq, struct irq_desc *desc)
 {
-	struct vf610_gpio_port *port = irq_get_handler_data(irq);
+	struct vf610_gpio_port *port = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int pin;
 	unsigned long irq_isfr;
@@ -173,6 +175,11 @@
 
 	port->irqc[d->hwirq] = irqc;
 
+	if (type & IRQ_TYPE_LEVEL_MASK)
+		__irq_set_handler_locked(d->irq, handle_level_irq);
+	else
+		__irq_set_handler_locked(d->irq, handle_edge_irq);
+
 	return 0;
 }
 
@@ -263,7 +270,7 @@
 	vf610_gpio_writel(~0, port->base + PORT_ISFR);
 
 	ret = gpiochip_irqchip_add(gc, &vf610_gpio_irq_chip, 0,
-				   handle_simple_irq, IRQ_TYPE_NONE);
+				   handle_edge_irq, IRQ_TYPE_NONE);
 	if (ret) {
 		dev_err(dev, "failed to add irqchip\n");
 		gpiochip_remove(gc);
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 9bdab72..e02499a 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -387,7 +387,7 @@
 	irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
 	if (irq_base < 0) {
 		dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
-		return err;
+		return -ENODEV;
 	}
 
 	err = gpiochip_add(gc);
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
new file mode 100644
index 0000000..12ee196
--- /dev/null
+++ b/drivers/gpio/gpio-zx.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define ZX_GPIO_DIR	0x00
+#define ZX_GPIO_IVE	0x04
+#define ZX_GPIO_IV	0x08
+#define ZX_GPIO_IEP	0x0C
+#define ZX_GPIO_IEN	0x10
+#define ZX_GPIO_DI	0x14
+#define ZX_GPIO_DO1	0x18
+#define ZX_GPIO_DO0	0x1C
+#define ZX_GPIO_DO	0x20
+
+#define ZX_GPIO_IM	0x28
+#define ZX_GPIO_IE	0x2C
+
+#define ZX_GPIO_MIS	0x30
+#define ZX_GPIO_IC	0x34
+
+#define ZX_GPIO_NR	16
+
+struct zx_gpio {
+	spinlock_t		lock;
+
+	void __iomem		*base;
+	struct gpio_chip	gc;
+	bool			uses_pinctrl;
+};
+
+static inline struct zx_gpio *to_zx(struct gpio_chip *gc)
+{
+	return container_of(gc, struct zx_gpio, gc);
+}
+
+static int zx_gpio_request(struct gpio_chip *gc, unsigned offset)
+{
+	struct zx_gpio *chip = to_zx(gc);
+	int gpio = gc->base + offset;
+
+	if (chip->uses_pinctrl)
+		return pinctrl_request_gpio(gpio);
+	return 0;
+}
+
+static void zx_gpio_free(struct gpio_chip *gc, unsigned offset)
+{
+	struct zx_gpio *chip = to_zx(gc);
+	int gpio = gc->base + offset;
+
+	if (chip->uses_pinctrl)
+		pinctrl_free_gpio(gpio);
+}
+
+static int zx_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+	struct zx_gpio *chip = to_zx(gc);
+	unsigned long flags;
+	u16 gpiodir;
+
+	if (offset >= gc->ngpio)
+		return -EINVAL;
+
+	spin_lock_irqsave(&chip->lock, flags);
+	gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
+	gpiodir &= ~BIT(offset);
+	writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
+	spin_unlock_irqrestore(&chip->lock, flags);
+
+	return 0;
+}
+
+static int zx_direction_output(struct gpio_chip *gc, unsigned offset,
+		int value)
+{
+	struct zx_gpio *chip = to_zx(gc);
+	unsigned long flags;
+	u16 gpiodir;
+
+	if (offset >= gc->ngpio)
+		return -EINVAL;
+
+	spin_lock_irqsave(&chip->lock, flags);
+	gpiodir = readw_relaxed(chip->base + ZX_GPIO_DIR);
+	gpiodir |= BIT(offset);
+	writew_relaxed(gpiodir, chip->base + ZX_GPIO_DIR);
+
+	if (value)
+		writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1);
+	else
+		writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0);
+	spin_unlock_irqrestore(&chip->lock, flags);
+
+	return 0;
+}
+
+static int zx_get_value(struct gpio_chip *gc, unsigned offset)
+{
+	struct zx_gpio *chip = to_zx(gc);
+
+	return !!(readw_relaxed(chip->base + ZX_GPIO_DI) & BIT(offset));
+}
+
+static void zx_set_value(struct gpio_chip *gc, unsigned offset, int value)
+{
+	struct zx_gpio *chip = to_zx(gc);
+
+	if (value)
+		writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO1);
+	else
+		writew_relaxed(BIT(offset), chip->base + ZX_GPIO_DO0);
+}
+
+static int zx_irq_type(struct irq_data *d, unsigned trigger)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct zx_gpio *chip = to_zx(gc);
+	int offset = irqd_to_hwirq(d);
+	unsigned long flags;
+	u16 gpiois, gpioi_epos, gpioi_eneg, gpioiev;
+	u16 bit = BIT(offset);
+
+	if (offset < 0 || offset >= ZX_GPIO_NR)
+		return -EINVAL;
+
+	spin_lock_irqsave(&chip->lock, flags);
+
+	gpioiev = readw_relaxed(chip->base + ZX_GPIO_IV);
+	gpiois = readw_relaxed(chip->base + ZX_GPIO_IVE);
+	gpioi_epos = readw_relaxed(chip->base + ZX_GPIO_IEP);
+	gpioi_eneg = readw_relaxed(chip->base + ZX_GPIO_IEN);
+
+	if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
+		gpiois |= bit;
+		if (trigger & IRQ_TYPE_LEVEL_HIGH)
+			gpioiev |= bit;
+		else
+			gpioiev &= ~bit;
+	} else
+		gpiois &= ~bit;
+
+	if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
+		gpioi_epos |= bit;
+		gpioi_eneg |= bit;
+	} else {
+		if (trigger & IRQ_TYPE_EDGE_RISING) {
+			gpioi_epos |= bit;
+			gpioi_eneg &= ~bit;
+		} else if (trigger & IRQ_TYPE_EDGE_FALLING) {
+			gpioi_eneg |= bit;
+			gpioi_epos &= ~bit;
+		}
+	}
+
+	writew_relaxed(gpiois, chip->base + ZX_GPIO_IVE);
+	writew_relaxed(gpioi_epos, chip->base + ZX_GPIO_IEP);
+	writew_relaxed(gpioi_eneg, chip->base + ZX_GPIO_IEN);
+	writew_relaxed(gpioiev, chip->base + ZX_GPIO_IV);
+	spin_unlock_irqrestore(&chip->lock, flags);
+
+	return 0;
+}
+
+static void zx_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+	unsigned long pending;
+	int offset;
+	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+	struct zx_gpio *chip = to_zx(gc);
+	struct irq_chip *irqchip = irq_desc_get_chip(desc);
+
+	chained_irq_enter(irqchip, desc);
+
+	pending = readw_relaxed(chip->base + ZX_GPIO_MIS);
+	writew_relaxed(pending, chip->base + ZX_GPIO_IC);
+	if (pending) {
+		for_each_set_bit(offset, &pending, ZX_GPIO_NR)
+			generic_handle_irq(irq_find_mapping(gc->irqdomain,
+							    offset));
+	}
+
+	chained_irq_exit(irqchip, desc);
+}
+
+static void zx_irq_mask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct zx_gpio *chip = to_zx(gc);
+	u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
+	u16 gpioie;
+
+	spin_lock(&chip->lock);
+	gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) | mask;
+	writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
+	gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) & ~mask;
+	writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
+	spin_unlock(&chip->lock);
+}
+
+static void zx_irq_unmask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct zx_gpio *chip = to_zx(gc);
+	u16 mask = BIT(irqd_to_hwirq(d) % ZX_GPIO_NR);
+	u16 gpioie;
+
+	spin_lock(&chip->lock);
+	gpioie = readw_relaxed(chip->base + ZX_GPIO_IM) & ~mask;
+	writew_relaxed(gpioie, chip->base + ZX_GPIO_IM);
+	gpioie = readw_relaxed(chip->base + ZX_GPIO_IE) | mask;
+	writew_relaxed(gpioie, chip->base + ZX_GPIO_IE);
+	spin_unlock(&chip->lock);
+}
+
+static struct irq_chip zx_irqchip = {
+	.name		= "zx-gpio",
+	.irq_mask	= zx_irq_mask,
+	.irq_unmask	= zx_irq_unmask,
+	.irq_set_type	= zx_irq_type,
+};
+
+static int zx_gpio_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct zx_gpio *chip;
+	struct resource *res;
+	int irq, id, ret;
+
+	chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	chip->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(chip->base))
+		return PTR_ERR(chip->base);
+
+	spin_lock_init(&chip->lock);
+	if (of_property_read_bool(dev->of_node, "gpio-ranges"))
+		chip->uses_pinctrl = true;
+
+	id = of_alias_get_id(dev->of_node, "gpio");
+	chip->gc.request = zx_gpio_request;
+	chip->gc.free = zx_gpio_free;
+	chip->gc.direction_input = zx_direction_input;
+	chip->gc.direction_output = zx_direction_output;
+	chip->gc.get = zx_get_value;
+	chip->gc.set = zx_set_value;
+	chip->gc.base = ZX_GPIO_NR * id;
+	chip->gc.ngpio = ZX_GPIO_NR;
+	chip->gc.label = dev_name(dev);
+	chip->gc.dev = dev;
+	chip->gc.owner = THIS_MODULE;
+
+	ret = gpiochip_add(&chip->gc);
+	if (ret)
+		return ret;
+
+	/*
+	 * irq_chip support
+	 */
+	writew_relaxed(0xffff, chip->base + ZX_GPIO_IM);
+	writew_relaxed(0, chip->base + ZX_GPIO_IE);
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "invalid IRQ\n");
+		gpiochip_remove(&chip->gc);
+		return -ENODEV;
+	}
+
+	ret = gpiochip_irqchip_add(&chip->gc, &zx_irqchip,
+				   0, handle_simple_irq,
+				   IRQ_TYPE_NONE);
+	if (ret) {
+		dev_err(dev, "could not add irqchip\n");
+		gpiochip_remove(&chip->gc);
+		return ret;
+	}
+	gpiochip_set_chained_irqchip(&chip->gc, &zx_irqchip,
+				     irq, zx_irq_handler);
+
+	platform_set_drvdata(pdev, chip);
+	dev_info(dev, "ZX GPIO chip registered\n");
+
+	return 0;
+}
+
+static const struct of_device_id zx_gpio_match[] = {
+	{
+		.compatible = "zte,zx296702-gpio",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, zx_gpio_match);
+
+static struct platform_driver zx_gpio_driver = {
+	.probe		= zx_gpio_probe,
+	.driver = {
+		.name	= "zx_gpio",
+		.of_match_table = of_match_ptr(zx_gpio_match),
+	},
+};
+
+module_platform_driver(zx_gpio_driver)
+
+MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
+MODULE_DESCRIPTION("ZTE ZX296702 GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index a788823..27348e7 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -441,10 +441,10 @@
 		       gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num));
 
 	if (type & IRQ_TYPE_LEVEL_MASK) {
-		__irq_set_chip_handler_name_locked(irq_data->irq,
+		irq_set_chip_handler_name_locked(irq_data,
 			&zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL);
 	} else {
-		__irq_set_chip_handler_name_locked(irq_data->irq,
+		irq_set_chip_handler_name_locked(irq_data,
 			&zynq_gpio_edge_irqchip, handle_level_irq, NULL);
 	}
 
@@ -518,7 +518,7 @@
 {
 	u32 int_sts, int_enb;
 	unsigned int bank_num;
-	struct zynq_gpio *gpio = irq_get_handler_data(irq);
+	struct zynq_gpio *gpio = irq_desc_get_handler_data(desc);
 	struct irq_chip *irqchip = irq_desc_get_chip(desc);
 
 	chained_irq_enter(irqchip, desc);
@@ -782,6 +782,12 @@
 }
 postcore_initcall(zynq_gpio_init);
 
+static void __exit zynq_gpio_exit(void)
+{
+	platform_driver_unregister(&zynq_gpio_driver);
+}
+module_exit(zynq_gpio_exit);
+
 MODULE_AUTHOR("Xilinx Inc.");
 MODULE_DESCRIPTION("Zynq GPIO driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 533fe5d..143a9bd 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -68,7 +68,7 @@
  * GPIO controller driver.
  *
  * Typically the returned offset is same as @pin, but if the GPIO
- * controller uses pin controller and the mapping is not contigous the
+ * controller uses pin controller and the mapping is not contiguous the
  * offset might be different.
  */
 static int acpi_gpiochip_pin_to_gpio_offset(struct gpio_chip *chip, int pin)
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 9a0ec48..fa6e3c8 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -136,7 +136,6 @@
 {
 	struct device_node *chip_np;
 	enum of_gpio_flags xlate_flags;
-	struct gpio_desc *desc;
 	struct gg_data gg_data = {
 		.flags = &xlate_flags,
 	};
@@ -193,9 +192,7 @@
 	if (name && of_property_read_string(np, "line-name", name))
 		*name = np->name;
 
-	desc = gg_data.out_gpio;
-
-	return desc;
+	return gg_data.out_gpio;
 }
 
 /**
@@ -338,7 +335,7 @@
 EXPORT_SYMBOL(of_mm_gpiochip_remove);
 
 #ifdef CONFIG_PINCTRL
-static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
+static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
 {
 	struct device_node *np = chip->of_node;
 	struct of_phandle_args pinspec;
@@ -349,7 +346,7 @@
 	struct property *group_names;
 
 	if (!np)
-		return;
+		return 0;
 
 	group_names = of_find_property(np, group_names_propname, NULL);
 
@@ -361,11 +358,11 @@
 
 		pctldev = of_pinctrl_get(pinspec.np);
 		if (!pctldev)
-			break;
+			return -EPROBE_DEFER;
 
 		if (pinspec.args[2]) {
 			if (group_names) {
-				ret = of_property_read_string_index(np,
+				of_property_read_string_index(np,
 						group_names_propname,
 						index, &name);
 				if (strlen(name)) {
@@ -381,7 +378,7 @@
 					pinspec.args[1],
 					pinspec.args[2]);
 			if (ret)
-				break;
+				return ret;
 		} else {
 			/* npins == 0: special range */
 			if (pinspec.args[1]) {
@@ -411,32 +408,41 @@
 			ret = gpiochip_add_pingroup_range(chip, pctldev,
 						pinspec.args[0], name);
 			if (ret)
-				break;
+				return ret;
 		}
 	}
+
+	return 0;
 }
 
 #else
-static void of_gpiochip_add_pin_range(struct gpio_chip *chip) {}
+static int of_gpiochip_add_pin_range(struct gpio_chip *chip) { return 0; }
 #endif
 
-void of_gpiochip_add(struct gpio_chip *chip)
+int of_gpiochip_add(struct gpio_chip *chip)
 {
+	int status;
+
 	if ((!chip->of_node) && (chip->dev))
 		chip->of_node = chip->dev->of_node;
 
 	if (!chip->of_node)
-		return;
+		return 0;
 
 	if (!chip->of_xlate) {
 		chip->of_gpio_n_cells = 2;
 		chip->of_xlate = of_gpio_simple_xlate;
 	}
 
-	of_gpiochip_add_pin_range(chip);
+	status = of_gpiochip_add_pin_range(chip);
+	if (status)
+		return status;
+
 	of_node_get(chip->of_node);
 
 	of_gpiochip_scan_hogs(chip);
+
+	return 0;
 }
 
 void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f25dc88..980c1f8 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -190,7 +190,7 @@
  */
 static int gpiochip_add_to_list(struct gpio_chip *chip)
 {
-	struct list_head *pos = &gpio_chips;
+	struct list_head *pos;
 	struct gpio_chip *_chip;
 	int err = 0;
 
@@ -287,7 +287,13 @@
 	INIT_LIST_HEAD(&chip->pin_ranges);
 #endif
 
-	of_gpiochip_add(chip);
+	if (!chip->owner && chip->dev && chip->dev->driver)
+		chip->owner = chip->dev->driver->owner;
+
+	status = of_gpiochip_add(chip);
+	if (status)
+		goto err_remove_chip;
+
 	acpi_gpiochip_add(chip);
 
 	status = gpiochip_sysfs_register(chip);
@@ -443,8 +449,8 @@
 		 * The parent irqchip is already using the chip_data for this
 		 * irqchip, so our callbacks simply use the handler_data.
 		 */
-		irq_set_handler_data(parent_irq, gpiochip);
-		irq_set_chained_handler(parent_irq, parent_handler);
+		irq_set_chained_handler_and_data(parent_irq, parent_handler,
+						 gpiochip);
 
 		gpiochip->irq_parent = parent_irq;
 	}
@@ -456,12 +462,6 @@
 }
 EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip);
 
-/*
- * This lock class tells lockdep that GPIO irqs are in a different
- * category than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpiochip_irq_lock_class;
-
 /**
  * gpiochip_irq_map() - maps an IRQ into a GPIO irqchip
  * @d: the irqdomain used by this irqchip
@@ -478,16 +478,17 @@
 	struct gpio_chip *chip = d->host_data;
 
 	irq_set_chip_data(irq, chip);
-	irq_set_lockdep_class(irq, &gpiochip_irq_lock_class);
+	/*
+	 * This lock class tells lockdep that GPIO irqs are in a different
+	 * category than their parents, so it won't report false recursion.
+	 */
+	irq_set_lockdep_class(irq, chip->lock_key);
 	irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler);
 	/* Chips that can sleep need nested thread handlers */
 	if (chip->can_sleep && !chip->irq_not_threaded)
 		irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
+
 	/*
 	 * No set-up of the hardware will happen if IRQ_TYPE_NONE
 	 * is passed as default type.
@@ -502,9 +503,6 @@
 {
 	struct gpio_chip *chip = d->host_data;
 
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, 0);
-#endif
 	if (chip->can_sleep)
 		irq_set_nested_thread(irq, 0);
 	irq_set_chip_and_handler(irq, NULL, NULL);
@@ -522,10 +520,14 @@
 {
 	struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
 
+	if (!try_module_get(chip->owner))
+		return -ENODEV;
+
 	if (gpiochip_lock_as_irq(chip, d->hwirq)) {
 		chip_err(chip,
 			"unable to lock HW IRQ %lu for IRQ\n",
 			d->hwirq);
+		module_put(chip->owner);
 		return -EINVAL;
 	}
 	return 0;
@@ -536,6 +538,7 @@
 	struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
 
 	gpiochip_unlock_as_irq(chip, d->hwirq);
+	module_put(chip->owner);
 }
 
 static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -584,6 +587,7 @@
  * @handler: the irq handler to use (often a predefined irq core function)
  * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
  * to have the core avoid setting up any default type in the hardware.
+ * @lock_key: lockdep class
  *
  * This function closely associates a certain irqchip with a certain
  * gpiochip, providing an irq domain to translate the local IRQs to
@@ -599,11 +603,12 @@
  * the pins on the gpiochip can generate a unique IRQ. Everything else
  * need to be open coded.
  */
-int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
-			 struct irq_chip *irqchip,
-			 unsigned int first_irq,
-			 irq_flow_handler_t handler,
-			 unsigned int type)
+int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+			  struct irq_chip *irqchip,
+			  unsigned int first_irq,
+			  irq_flow_handler_t handler,
+			  unsigned int type,
+			  struct lock_class_key *lock_key)
 {
 	struct device_node *of_node;
 	unsigned int offset;
@@ -629,6 +634,7 @@
 	gpiochip->irq_handler = handler;
 	gpiochip->irq_default_type = type;
 	gpiochip->to_irq = gpiochip_to_irq;
+	gpiochip->lock_key = lock_key;
 	gpiochip->irqdomain = irq_domain_add_simple(of_node,
 					gpiochip->ngpio, first_irq,
 					&gpiochip_domain_ops, gpiochip);
@@ -636,8 +642,16 @@
 		gpiochip->irqchip = NULL;
 		return -EINVAL;
 	}
-	irqchip->irq_request_resources = gpiochip_irq_reqres;
-	irqchip->irq_release_resources = gpiochip_irq_relres;
+
+	/*
+	 * It is possible for a driver to override this, but only if the
+	 * alternative functions are both implemented.
+	 */
+	if (!irqchip->irq_request_resources &&
+	    !irqchip->irq_release_resources) {
+		irqchip->irq_request_resources = gpiochip_irq_reqres;
+		irqchip->irq_release_resources = gpiochip_irq_relres;
+	}
 
 	/*
 	 * Prepare the mapping since the irqchip shall be orthogonal to
@@ -658,7 +672,7 @@
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(gpiochip_irqchip_add);
+EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add);
 
 #else /* CONFIG_GPIOLIB_IRQCHIP */
 
@@ -671,7 +685,7 @@
 /**
  * gpiochip_add_pingroup_range() - add a range for GPIO <-> pin mapping
  * @chip: the gpiochip to add the range for
- * @pinctrl: the dev_name() of the pin controller to map to
+ * @pctldev: the pin controller to map to
  * @gpio_offset: the start offset in the current gpio_chip number space
  * @pin_group: name of the pin group inside the pin controller
  */
@@ -1907,12 +1921,12 @@
  * dev, -ENOENT if no GPIO has been assigned to the requested function, or
  * another IS_ERR() code if an error occurred while trying to acquire the GPIO.
  */
-struct gpio_desc *__must_check __gpiod_get(struct device *dev, const char *con_id,
+struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id,
 					 enum gpiod_flags flags)
 {
 	return gpiod_get_index(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL_GPL(__gpiod_get);
+EXPORT_SYMBOL_GPL(gpiod_get);
 
 /**
  * gpiod_get_optional - obtain an optional GPIO for a given GPIO function
@@ -1924,13 +1938,13 @@
  * the requested function it will return NULL. This is convenient for drivers
  * that need to handle optional GPIOs.
  */
-struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
 						  const char *con_id,
 						  enum gpiod_flags flags)
 {
 	return gpiod_get_index_optional(dev, con_id, 0, flags);
 }
-EXPORT_SYMBOL_GPL(__gpiod_get_optional);
+EXPORT_SYMBOL_GPL(gpiod_get_optional);
 
 
 /**
@@ -1987,7 +2001,7 @@
  * requested function and/or index, or another IS_ERR() code if an error
  * occurred while trying to acquire the GPIO.
  */
-struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
 					       const char *con_id,
 					       unsigned int idx,
 					       enum gpiod_flags flags)
@@ -2036,7 +2050,7 @@
 
 	return desc;
 }
-EXPORT_SYMBOL_GPL(__gpiod_get_index);
+EXPORT_SYMBOL_GPL(gpiod_get_index);
 
 /**
  * fwnode_get_named_gpiod - obtain a GPIO from firmware node
@@ -2105,7 +2119,7 @@
  * specified index was assigned to the requested function it will return NULL.
  * This is convenient for drivers that need to handle optional GPIOs.
  */
-struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
 							const char *con_id,
 							unsigned int index,
 							enum gpiod_flags flags)
@@ -2120,7 +2134,7 @@
 
 	return desc;
 }
-EXPORT_SYMBOL_GPL(__gpiod_get_index_optional);
+EXPORT_SYMBOL_GPL(gpiod_get_index_optional);
 
 /**
  * gpiod_hog - Hog the specified GPIO desc given the provided flags
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index d432348..aecb5d6 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -790,7 +790,7 @@
  * This function shuts down all the outputs that need to be shut down and
  * prepares them (if required) with the new mode.
  *
- * For compatability with legacy crtc helpers this should be called before
+ * For compatibility with legacy crtc helpers this should be called before
  * drm_atomic_helper_commit_planes(), which is what the default commit function
  * does. But drivers with different needs can group the modeset commits together
  * and do the plane commits at the end. This is useful for drivers doing runtime
@@ -815,7 +815,7 @@
  * This function enables all the outputs with the new configuration which had to
  * be turned off for the update.
  *
- * For compatability with legacy crtc helpers this should be called after
+ * For compatibility with legacy crtc helpers this should be called after
  * drm_atomic_helper_commit_planes(), which is what the default commit function
  * does. But drivers with different needs can group the modeset commits together
  * and do the plane commits at the end. This is useful for drivers doing runtime
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 2aaa3c8..00416f2 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -54,7 +54,7 @@
 }
 
 /* ADI recommended values for proper operation. */
-static const struct reg_default adv7511_fixed_registers[] = {
+static const struct reg_sequence adv7511_fixed_registers[] = {
 	{ 0x98, 0x03 },
 	{ 0x9a, 0xe0 },
 	{ 0x9c, 0x30 },
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8edcec8..ab64d68 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -730,7 +730,7 @@
 	mutex_lock(&dev->struct_mutex);
 	if (i915_gem_init_hw(dev)) {
 		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
-		atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+			atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
 	}
 	mutex_unlock(&dev->struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e304d4e..81adf89 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1883,6 +1883,7 @@
 	struct drm_property *force_audio_property;
 
 	/* hda/i915 audio component */
+	struct i915_audio_component *audio_component;
 	bool audio_component_registered;
 
 	uint32_t hw_context_size;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 84f91bc..4d631a9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4779,7 +4779,7 @@
 		 * for all other failure, such as an allocation failure, bail.
 		 */
 		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
-		atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+		atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
 		ret = 0;
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d94c92d..b5fb143 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2253,7 +2253,7 @@
 			kobject_uevent_env(&dev->primary->kdev->kobj,
 					   KOBJ_CHANGE, reset_done_event);
 		} else {
-			atomic_set_mask(I915_WEDGED, &error->reset_counter);
+			atomic_or(I915_WEDGED, &error->reset_counter);
 		}
 
 		/*
@@ -2381,7 +2381,7 @@
 	i915_report_and_clear_eir(dev);
 
 	if (wedged) {
-		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
+		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
 				&dev_priv->gpu_error.reset_counter);
 
 		/*
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index dc32cf4..89c1a8ce 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -400,6 +400,9 @@
 	struct drm_connector *connector;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_audio_component *acomp = dev_priv->audio_component;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	enum port port = intel_dig_port->port;
 
 	connector = drm_select_eld(encoder, mode);
 	if (!connector)
@@ -420,6 +423,9 @@
 
 	if (dev_priv->display.audio_codec_enable)
 		dev_priv->display.audio_codec_enable(connector, intel_encoder, mode);
+
+	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
 }
 
 /**
@@ -429,13 +435,20 @@
  * The disable sequences must be performed before disabling the transcoder or
  * port.
  */
-void intel_audio_codec_disable(struct intel_encoder *encoder)
+void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
 {
-	struct drm_device *dev = encoder->base.dev;
+	struct drm_encoder *encoder = &intel_encoder->base;
+	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct i915_audio_component *acomp = dev_priv->audio_component;
+	struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+	enum port port = intel_dig_port->port;
 
 	if (dev_priv->display.audio_codec_disable)
-		dev_priv->display.audio_codec_disable(encoder);
+		dev_priv->display.audio_codec_disable(intel_encoder);
+
+	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
+		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
 }
 
 /**
@@ -526,12 +539,16 @@
 				     struct device *hda_dev, void *data)
 {
 	struct i915_audio_component *acomp = data;
+	struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
 
 	if (WARN_ON(acomp->ops || acomp->dev))
 		return -EEXIST;
 
+	drm_modeset_lock_all(dev_priv->dev);
 	acomp->ops = &i915_audio_component_ops;
 	acomp->dev = i915_dev;
+	dev_priv->audio_component = acomp;
+	drm_modeset_unlock_all(dev_priv->dev);
 
 	return 0;
 }
@@ -540,9 +557,13 @@
 					struct device *hda_dev, void *data)
 {
 	struct i915_audio_component *acomp = data;
+	struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
 
+	drm_modeset_lock_all(dev_priv->dev);
 	acomp->ops = NULL;
 	acomp->dev = NULL;
+	dev_priv->audio_component = NULL;
+	drm_modeset_unlock_all(dev_priv->dev);
 }
 
 static const struct component_ops i915_audio_component_bind_ops = {
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 1912cfc..598fdaf 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -455,7 +455,7 @@
 	cached_state->postdiv1 =
 			pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
 	cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
-	cached_state->vco_rate = __clk_get_rate(pll->clk_hw.clk);
+	cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
 }
 
 static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
index 9436ada..4542867 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c
@@ -791,7 +791,7 @@
 		if (nv10_gr_ctx_regs[i] == reg)
 			return i;
 	}
-	nvkm_error(subdev, "unknow offset nv10_ctx_regs %d\n", reg);
+	nvkm_error(subdev, "unknown offset nv10_ctx_regs %d\n", reg);
 	return -1;
 }
 
@@ -804,7 +804,7 @@
 		if (nv17_gr_ctx_regs[i] == reg)
 			return i;
 	}
-	nvkm_error(subdev, "unknow offset nv17_ctx_regs %d\n", reg);
+	nvkm_error(subdev, "unknown offset nv17_ctx_regs %d\n", reg);
 	return -1;
 }
 
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 7a03158..0af8bed 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -375,25 +375,17 @@
 		dev_info(&pdev->dev, "found backlight\n");
 	}
 
-	panel_mod->enable_gpio = devm_gpiod_get(&pdev->dev, "enable");
+	panel_mod->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
+							 GPIOD_OUT_LOW);
 	if (IS_ERR(panel_mod->enable_gpio)) {
 		ret = PTR_ERR(panel_mod->enable_gpio);
-		if (ret != -ENOENT) {
-			dev_err(&pdev->dev, "failed to request enable GPIO\n");
-			goto fail_backlight;
-		}
-
-		/* Optional GPIO is not here, continue silently. */
-		panel_mod->enable_gpio = NULL;
-	} else {
-		ret = gpiod_direction_output(panel_mod->enable_gpio, 0);
-		if (ret < 0) {
-			dev_err(&pdev->dev, "failed to setup GPIO\n");
-			goto fail_backlight;
-		}
-		dev_info(&pdev->dev, "found enable GPIO\n");
+		dev_err(&pdev->dev, "failed to request enable GPIO\n");
+		goto fail_backlight;
 	}
 
+	if (panel_mod->enable_gpio)
+		dev_info(&pdev->dev, "found enable GPIO\n");
+
 	mod = &panel_mod->base;
 	pdev->dev.platform_data = mod;
 
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index ba33cf6..d0cbd5e 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -260,7 +260,7 @@
 /*
  * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
  * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
- * quite large for some blits, and pages don't need to be contingous.
+ * quite large for some blits, and pages don't need to be contiguous.
  */
 
 static int
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 8f40692..6d02de6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -240,7 +240,7 @@
  * device-specific information.
  *
  * @sgt: Pointer to a struct sg_table with binding information
- * @num_regions: Number of regions with device-address contigous pages
+ * @num_regions: Number of regions with device-address contiguous pages
  */
 struct vmw_sg_table {
 	enum vmw_dma_map_mode mode;
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 00f2058..243f99a 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -915,8 +915,8 @@
 static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
-	struct irq_chip *chip = irq_get_chip(irq);
 
 	chained_irq_enter(chip, desc);
 
@@ -928,8 +928,8 @@
 static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	const int int_reg[] = { 4, 5, 8, 9};
-	struct irq_chip *chip = irq_get_chip(irq);
 
 	chained_irq_enter(chip, desc);
 
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index cc4c664..6ab51ae 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -251,6 +251,12 @@
 	---help---
 	Support for Ezkey BTC 8193 keyboard.
 
+config HID_GEMBIRD
+	tristate "Gembird Joypad"
+	depends on HID
+	---help---
+	Support for Gembird JPD-DualForce 2.
+
 config HID_HOLTEK
 	tristate "Holtek HID devices"
 	depends on USB_HID
@@ -480,6 +486,7 @@
 	  - Atmel panels
 	  - Cando dual touch panels
 	  - Chunghwa panels
+	  - CJTouch panels
 	  - CVTouch panels
 	  - Cypress TrueTouch panels
 	  - Elan Microelectronics touch panels
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 2f8a41d..e6441bc 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -36,6 +36,7 @@
 obj-$(CONFIG_HID_ELECOM)	+= hid-elecom.o
 obj-$(CONFIG_HID_ELO)		+= hid-elo.o
 obj-$(CONFIG_HID_EZKEY)		+= hid-ezkey.o
+obj-$(CONFIG_HID_GEMBIRD)	+= hid-gembird.o
 obj-$(CONFIG_HID_GT683R)	+= hid-gt683r.o
 obj-$(CONFIG_HID_GYRATION)	+= hid-gyration.o
 obj-$(CONFIG_HID_HOLTEK)	+= hid-holtek-kbd.o
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index b613d5a..bc3cec1 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -20,6 +20,7 @@
 #include <linux/input.h>
 #include <linux/hid.h>
 #include <linux/module.h>
+#include <linux/usb.h>
 
 #include "hid-ids.h"
 
@@ -57,10 +58,34 @@
 	return 1;
 }
 
+static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+		unsigned int *rsize)
+{
+	struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+	
+	if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+		/* Change usage maximum and logical maximum from 0x7fff to
+		 * 0x2fff, so they don't exceed HID_MAX_USAGES */
+		switch (hdev->product) {
+		case USB_DEVICE_ID_CHICONY_ACER_SWITCH12:
+			if (*rsize >= 128 && rdesc[64] == 0xff && rdesc[65] == 0x7f
+					&& rdesc[69] == 0xff && rdesc[70] == 0x7f) {
+				hid_info(hdev, "Fixing up report descriptor\n");
+				rdesc[65] = rdesc[70] = 0x2f;
+			}
+			break;
+		}
+
+	}
+	return rdesc;
+}
+
+
 static const struct hid_device_id ch_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, ch_devices);
@@ -68,6 +93,7 @@
 static struct hid_driver ch_driver = {
 	.name = "chicony",
 	.id_table = ch_devices,
+	.report_fixup = ch_switch12_report_fixup,
 	.input_mapping = ch_input_mapping,
 };
 module_hid_driver(ch_driver);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index e6fce23..70a11ac 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -427,6 +427,7 @@
 {
 	__u32 data;
 	unsigned n;
+	__u32 count;
 
 	data = item_udata(item);
 
@@ -490,6 +491,24 @@
 		if (item->size <= 2)
 			data = (parser->global.usage_page << 16) + data;
 
+		count = data - parser->local.usage_minimum;
+		if (count + parser->local.usage_index >= HID_MAX_USAGES) {
+			/*
+			 * We do not warn if the name is not set, we are
+			 * actually pre-scanning the device.
+			 */
+			if (dev_name(&parser->device->dev))
+				hid_warn(parser->device,
+					 "ignoring exceeding usage max\n");
+			data = HID_MAX_USAGES - parser->local.usage_index +
+				parser->local.usage_minimum - 1;
+			if (data <= 0) {
+				hid_err(parser->device,
+					"no more usage index available\n");
+				return -1;
+			}
+		}
+
 		for (n = parser->local.usage_minimum; n <= data; n++)
 			if (hid_add_usage(parser, n)) {
 				dbg_hid("hid_add_usage failed\n");
@@ -705,8 +724,9 @@
 		hid->group = HID_GROUP_SENSOR_HUB;
 
 	if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
-	    (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
-	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3_JP ||
+	    (hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3 ||
+	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP ||
+	     hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 ||
 	     hid->product == USB_DEVICE_ID_MS_POWER_COVER) &&
 	    hid->group == HID_GROUP_MULTITOUCH)
 		hid->group = HID_GROUP_GENERIC;
@@ -1807,6 +1827,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
@@ -1823,6 +1844,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
@@ -1905,8 +1927,9 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
@@ -2270,6 +2293,8 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
+	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
+	{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0401) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index a2dbbbe..7afc3fc 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -156,6 +156,7 @@
 	wait_queue_head_t wait;
 	u8 read_data[61];
 	u8 read_length;
+	u8 hwversion;
 	int xfer_status;
 	atomic_t read_avail;
 	atomic_t xfer_avail;
@@ -446,6 +447,24 @@
 	return data_length + 3;
 }
 
+static int cp2112_i2c_write_read_req(void *buf, u8 slave_address,
+				     u8 *addr, int addr_length,
+				     int read_length)
+{
+	struct cp2112_write_read_req_report *report = buf;
+
+	if (read_length < 1 || read_length > 512 ||
+	    addr_length > sizeof(report->target_address))
+		return -EINVAL;
+
+	report->report = CP2112_DATA_WRITE_READ_REQUEST;
+	report->slave_address = slave_address << 1;
+	report->length = cpu_to_be16(read_length);
+	report->target_address_length = addr_length;
+	memcpy(report->target_address, addr, addr_length);
+	return addr_length + 5;
+}
+
 static int cp2112_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
 			   int num)
 {
@@ -453,26 +472,46 @@
 	struct hid_device *hdev = dev->hdev;
 	u8 buf[64];
 	ssize_t count;
+	ssize_t read_length = 0;
+	u8 *read_buf = NULL;
 	unsigned int retries;
 	int ret;
 
 	hid_dbg(hdev, "I2C %d messages\n", num);
 
-	if (num != 1) {
+	if (num == 1) {
+		if (msgs->flags & I2C_M_RD) {
+			hid_dbg(hdev, "I2C read %#04x len %d\n",
+				msgs->addr, msgs->len);
+			read_length = msgs->len;
+			read_buf = msgs->buf;
+			count = cp2112_read_req(buf, msgs->addr, msgs->len);
+		} else {
+			hid_dbg(hdev, "I2C write %#04x len %d\n",
+				msgs->addr, msgs->len);
+			count = cp2112_i2c_write_req(buf, msgs->addr,
+						     msgs->buf, msgs->len);
+		}
+		if (count < 0)
+			return count;
+	} else if (dev->hwversion > 1 &&  /* no repeated start in rev 1 */
+		   num == 2 &&
+		   msgs[0].addr == msgs[1].addr &&
+		   !(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD)) {
+		hid_dbg(hdev, "I2C write-read %#04x wlen %d rlen %d\n",
+			msgs[0].addr, msgs[0].len, msgs[1].len);
+		read_length = msgs[1].len;
+		read_buf = msgs[1].buf;
+		count = cp2112_i2c_write_read_req(buf, msgs[0].addr,
+				msgs[0].buf, msgs[0].len, msgs[1].len);
+		if (count < 0)
+			return count;
+	} else {
 		hid_err(hdev,
 			"Multi-message I2C transactions not supported\n");
 		return -EOPNOTSUPP;
 	}
 
-	if (msgs->flags & I2C_M_RD)
-		count = cp2112_read_req(buf, msgs->addr, msgs->len);
-	else
-		count = cp2112_i2c_write_req(buf, msgs->addr, msgs->buf,
-					     msgs->len);
-
-	if (count < 0)
-		return count;
-
 	ret = hid_hw_power(hdev, PM_HINT_FULLON);
 	if (ret < 0) {
 		hid_err(hdev, "power management error: %d\n", ret);
@@ -508,21 +547,34 @@
 		goto power_normal;
 	}
 
-	if (!(msgs->flags & I2C_M_RD))
-		goto finish;
-
-	ret = cp2112_read(dev, msgs->buf, msgs->len);
-	if (ret < 0)
-		goto power_normal;
-	if (ret != msgs->len) {
-		hid_warn(hdev, "short read: %d < %d\n", ret, msgs->len);
-		ret = -EIO;
-		goto power_normal;
+	for (count = 0; count < read_length;) {
+		ret = cp2112_read(dev, read_buf + count, read_length - count);
+		if (ret < 0)
+			goto power_normal;
+		if (ret == 0) {
+			hid_err(hdev, "read returned 0\n");
+			ret = -EIO;
+			goto power_normal;
+		}
+		count += ret;
+		if (count > read_length) {
+			/*
+			 * The hardware returned too much data.
+			 * This is mostly harmless because cp2112_read()
+			 * has a limit check so didn't overrun our
+			 * buffer.  Nevertheless, we return an error
+			 * because something is seriously wrong and
+			 * it shouldn't go unnoticed.
+			 */
+			hid_err(hdev, "long read: %d > %zd\n",
+				ret, read_length - count + ret);
+			ret = -EIO;
+			goto power_normal;
+		}
 	}
 
-finish:
 	/* return the number of transferred messages */
-	ret = 1;
+	ret = num;
 
 power_normal:
 	hid_hw_power(hdev, PM_HINT_NORMAL);
@@ -537,7 +589,7 @@
 	struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data;
 	struct hid_device *hdev = dev->hdev;
 	u8 buf[64];
-	__be16 word;
+	__le16 word;
 	ssize_t count;
 	size_t read_length = 0;
 	unsigned int retries;
@@ -554,7 +606,7 @@
 		if (I2C_SMBUS_READ == read_write)
 			count = cp2112_read_req(buf, addr, read_length);
 		else
-			count = cp2112_write_req(buf, addr, data->byte, NULL,
+			count = cp2112_write_req(buf, addr, command, NULL,
 						 0);
 		break;
 	case I2C_SMBUS_BYTE_DATA:
@@ -569,7 +621,7 @@
 		break;
 	case I2C_SMBUS_WORD_DATA:
 		read_length = 2;
-		word = cpu_to_be16(data->word);
+		word = cpu_to_le16(data->word);
 
 		if (I2C_SMBUS_READ == read_write)
 			count = cp2112_write_read_req(buf, addr, read_length,
@@ -582,7 +634,7 @@
 		size = I2C_SMBUS_WORD_DATA;
 		read_write = I2C_SMBUS_READ;
 		read_length = 2;
-		word = cpu_to_be16(data->word);
+		word = cpu_to_le16(data->word);
 
 		count = cp2112_write_read_req(buf, addr, read_length, command,
 					      (u8 *)&word, 2);
@@ -675,7 +727,7 @@
 		data->byte = buf[0];
 		break;
 	case I2C_SMBUS_WORD_DATA:
-		data->word = be16_to_cpup((__be16 *)buf);
+		data->word = le16_to_cpup((__le16 *)buf);
 		break;
 	case I2C_SMBUS_BLOCK_DATA:
 		if (read_length > I2C_SMBUS_BLOCK_MAX) {
@@ -1030,6 +1082,7 @@
 	dev->adap.dev.parent	= &hdev->dev;
 	snprintf(dev->adap.name, sizeof(dev->adap.name),
 		 "CP2112 SMBus Bridge on hiddev%d", hdev->minor);
+	dev->hwversion = buf[2];
 	init_waitqueue_head(&dev->wait);
 
 	hid_device_io_start(hdev);
diff --git a/drivers/hid/hid-gembird.c b/drivers/hid/hid-gembird.c
new file mode 100644
index 0000000..e55e519
--- /dev/null
+++ b/drivers/hid/hid-gembird.c
@@ -0,0 +1,116 @@
+/*
+ *  HID driver for Gembird Joypad, "PC Game Controller"
+ *
+ *  Copyright (c) 2015 Red Hat, Inc
+ *  Copyright (c) 2015 Benjamin Tissoires
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+#define GEMBIRD_START_FAULTY_RDESC	8
+
+static const __u8 gembird_jpd_faulty_rdesc[] = {
+	0x75, 0x08,			/*   Report Size (8)		*/
+	0x95, 0x05,			/*   Report Count (5)		*/
+	0x15, 0x00,			/*   Logical Minimum (0)	*/
+	0x26, 0xff, 0x00,		/*   Logical Maximum (255)	*/
+	0x35, 0x00,			/*   Physical Minimum (0)	*/
+	0x46, 0xff, 0x00,		/*   Physical Maximum (255)	*/
+	0x09, 0x30,			/*   Usage (X)			*/
+	0x09, 0x31,			/*   Usage (Y)			*/
+	0x09, 0x32,			/*   Usage (Z)			*/
+	0x09, 0x32,			/*   Usage (Z)			*/
+	0x09, 0x35,			/*   Usage (Rz)			*/
+	0x81, 0x02,			/*   Input (Data,Var,Abs)	*/
+};
+
+/*
+ * we fix the report descriptor by:
+ * - marking the first Z axis as constant (so it is ignored by HID)
+ * - assign the original second Z to Rx
+ * - assign the original Rz to Ry
+ */
+static const __u8 gembird_jpd_fixed_rdesc[] = {
+	0x75, 0x08,			/*   Report Size (8)		*/
+	0x95, 0x02,			/*   Report Count (2)		*/
+	0x15, 0x00,			/*   Logical Minimum (0)	*/
+	0x26, 0xff, 0x00,		/*   Logical Maximum (255)	*/
+	0x35, 0x00,			/*   Physical Minimum (0)	*/
+	0x46, 0xff, 0x00,		/*   Physical Maximum (255)	*/
+	0x09, 0x30,			/*   Usage (X)			*/
+	0x09, 0x31,			/*   Usage (Y)			*/
+	0x81, 0x02,			/*   Input (Data,Var,Abs)	*/
+	0x95, 0x01,			/*   Report Count (1)		*/
+	0x09, 0x32,			/*   Usage (Z)			*/
+	0x81, 0x01,			/*   Input (Cnst,Arr,Abs)	*/
+	0x95, 0x02,			/*   Report Count (2)		*/
+	0x09, 0x33,			/*   Usage (Rx)			*/
+	0x09, 0x34,			/*   Usage (Ry)			*/
+	0x81, 0x02,			/*   Input (Data,Var,Abs)	*/
+};
+
+static __u8 *gembird_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+		unsigned int *rsize)
+{
+	__u8 *new_rdesc;
+	/* delta_size is > 0 */
+	size_t delta_size = sizeof(gembird_jpd_fixed_rdesc) -
+			    sizeof(gembird_jpd_faulty_rdesc);
+	size_t new_size = *rsize + delta_size;
+
+	if (*rsize >= 31 && !memcmp(&rdesc[GEMBIRD_START_FAULTY_RDESC],
+				    gembird_jpd_faulty_rdesc,
+				    sizeof(gembird_jpd_faulty_rdesc))) {
+		new_rdesc = devm_kzalloc(&hdev->dev, new_size, GFP_KERNEL);
+		if (new_rdesc == NULL)
+			return rdesc;
+
+		dev_info(&hdev->dev,
+			 "fixing Gembird JPD-DualForce 2 report descriptor.\n");
+
+		/* start by copying the end of the rdesc */
+		memcpy(new_rdesc + delta_size, rdesc, *rsize);
+
+		/* add the correct beginning */
+		memcpy(new_rdesc, rdesc, GEMBIRD_START_FAULTY_RDESC);
+
+		/* replace the faulty part with the fixed one */
+		memcpy(new_rdesc + GEMBIRD_START_FAULTY_RDESC,
+		       gembird_jpd_fixed_rdesc,
+		       sizeof(gembird_jpd_fixed_rdesc));
+
+		*rsize = new_size;
+		rdesc = new_rdesc;
+	}
+
+	return rdesc;
+}
+
+static const struct hid_device_id gembird_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD,
+			 USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, gembird_devices);
+
+static struct hid_driver gembird_driver = {
+	.name = "gembird",
+	.id_table = gembird_devices,
+	.report_fixup = gembird_report_fixup,
+};
+module_hid_driver(gembird_driver);
+
+MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
+MODULE_DESCRIPTION("HID Gembird joypad driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b3b225b..f769208 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -233,12 +233,17 @@
 #define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE	0x1053
 #define USB_DEVICE_ID_CHICONY_WIRELESS2	0x1123
 #define USB_DEVICE_ID_CHICONY_AK1D	0x1125
+#define USB_DEVICE_ID_CHICONY_ACER_SWITCH12	0x1421
 
 #define USB_VENDOR_ID_CHUNGHWAT		0x2247
 #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH	0x0001
 
 #define USB_VENDOR_ID_CIDC		0x1677
 
+#define USB_VENDOR_ID_CJTOUCH		0x24b8
+#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020	0x0020
+#define USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040	0x0040
+
 #define USB_VENDOR_ID_CMEDIA		0x0d8c
 #define USB_DEVICE_ID_CM109		0x000e
 
@@ -358,6 +363,9 @@
 #define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR	0x0001
 #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR	0x0002
 
+#define USB_VENDOR_ID_GEMBIRD			0x11ff
+#define USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2	0x3331
+
 #define USB_VENDOR_ID_GENERAL_TOUCH	0x0dfc
 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS 0x0100
@@ -500,6 +508,9 @@
 #define USB_VENDOR_ID_IRTOUCHSYSTEMS	0x6615
 #define USB_DEVICE_ID_IRTOUCH_INFRARED_USB	0x0070
 
+#define USB_VENDOR_ID_ITE               0x048d
+#define USB_DEVICE_ID_ITE_LENOVO_YOGA   0x8386
+
 #define USB_VENDOR_ID_JABRA		0x0b0e
 #define USB_DEVICE_ID_JABRA_SPEAK_410	0x0412
 #define USB_DEVICE_ID_JABRA_SPEAK_510	0x0420
@@ -602,6 +613,7 @@
 #define USB_DEVICE_ID_LOGITECH_DUAL_ACTION	0xc216
 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2	0xc218
 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2	0xc219
+#define USB_DEVICE_ID_LOGITECH_G29_WHEEL	0xc24f
 #define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D	0xc283
 #define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO	0xc286
 #define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940	0xc287
@@ -666,8 +678,9 @@
 #define USB_DEVICE_ID_MS_SURFACE_PRO_2   0x0799
 #define USB_DEVICE_ID_MS_TOUCH_COVER_2   0x07a7
 #define USB_DEVICE_ID_MS_TYPE_COVER_2    0x07a9
-#define USB_DEVICE_ID_MS_TYPE_COVER_3    0x07dc
-#define USB_DEVICE_ID_MS_TYPE_COVER_3_JP 0x07dd
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3    0x07dc
+#define USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP 0x07dd
+#define USB_DEVICE_ID_MS_TYPE_COVER_3    0x07de
 #define USB_DEVICE_ID_MS_POWER_COVER     0x07da
 
 #define USB_VENDOR_ID_MOJO		0x8282
@@ -925,7 +938,8 @@
 #define USB_DEVICE_ID_TOUCHPACK_RTS	0x1688
 
 #define USB_VENDOR_ID_TPV		0x25aa
-#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN	0x8883
+#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882	0x8882
+#define USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883	0x8883
 
 #define USB_VENDOR_ID_TURBOX		0x062a
 #define USB_DEVICE_ID_TURBOX_KEYBOARD	0x0201
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index e3c6364..53aeaf6 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1166,8 +1166,11 @@
 
 	input_event(input, usage->type, usage->code, value);
 
-	if ((field->flags & HID_MAIN_ITEM_RELATIVE) && (usage->type == EV_KEY))
+	if ((field->flags & HID_MAIN_ITEM_RELATIVE) &&
+	    usage->type == EV_KEY && value) {
+		input_sync(input);
 		input_event(input, usage->type, usage->code, 0);
+	}
 }
 
 void hidinput_report_event(struct hid_device *hid, struct hid_report *report)
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 4f59bff..e4bc6cb 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -37,6 +37,7 @@
 };
 
 struct lenovo_drvdata_cptkbd {
+	u8 middlebutton_state; /* 0:Up, 1:Down (undecided), 2:Scrolling */
 	bool fn_lock;
 	int sensitivity;
 };
@@ -146,10 +147,10 @@
 
 		switch (usage->hid & HID_USAGE) {
 		case 0x0000:
-			hid_map_usage(hi, usage, bit, max, EV_REL, 0x06);
+			hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL);
 			return 1;
 		case 0x0001:
-			hid_map_usage(hi, usage, bit, max, EV_REL, 0x08);
+			hid_map_usage(hi, usage, bit, max, EV_REL, REL_WHEEL);
 			return 1;
 		default:
 			return -1;
@@ -207,9 +208,12 @@
 	struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
 
 	ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock);
-	ret = lenovo_send_cmd_cptkbd(hdev, 0x02, cptkbd_data->sensitivity);
 	if (ret)
 		hid_err(hdev, "Fn-lock setting failed: %d\n", ret);
+
+	ret = lenovo_send_cmd_cptkbd(hdev, 0x02, cptkbd_data->sensitivity);
+	if (ret)
+		hid_err(hdev, "Sensitivity setting failed: %d\n", ret);
 }
 
 static ssize_t attr_fn_lock_show_cptkbd(struct device *dev,
@@ -313,6 +317,53 @@
 	return 0;
 }
 
+static int lenovo_event_cptkbd(struct hid_device *hdev,
+		struct hid_field *field, struct hid_usage *usage, __s32 value)
+{
+	struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
+
+	/* "wheel" scroll events */
+	if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
+			usage->code == REL_HWHEEL)) {
+		/* Scroll events disable middle-click event */
+		cptkbd_data->middlebutton_state = 2;
+		return 0;
+	}
+
+	/* Middle click events */
+	if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
+		if (value == 1) {
+			cptkbd_data->middlebutton_state = 1;
+		} else if (value == 0) {
+			if (cptkbd_data->middlebutton_state == 1) {
+				/* No scrolling inbetween, send middle-click */
+				input_event(field->hidinput->input,
+					EV_KEY, BTN_MIDDLE, 1);
+				input_sync(field->hidinput->input);
+				input_event(field->hidinput->input,
+					EV_KEY, BTN_MIDDLE, 0);
+				input_sync(field->hidinput->input);
+			}
+			cptkbd_data->middlebutton_state = 0;
+		}
+		return 1;
+	}
+
+	return 0;
+}
+
+static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
+		struct hid_usage *usage, __s32 value)
+{
+	switch (hdev->product) {
+	case USB_DEVICE_ID_LENOVO_CUSBKBD:
+	case USB_DEVICE_ID_LENOVO_CBTKBD:
+		return lenovo_event_cptkbd(hdev, field, usage, value);
+	default:
+		return 0;
+	}
+}
+
 static int lenovo_features_set_tpkbd(struct hid_device *hdev)
 {
 	struct hid_report *report;
@@ -705,6 +756,7 @@
 		hid_warn(hdev, "Failed to switch middle button: %d\n", ret);
 
 	/* Set keyboard settings to known state */
+	cptkbd_data->middlebutton_state = 0;
 	cptkbd_data->fn_lock = true;
 	cptkbd_data->sensitivity = 0x05;
 	lenovo_features_set_cptkbd(hdev);
@@ -832,6 +884,7 @@
 	.probe = lenovo_probe,
 	.remove = lenovo_remove,
 	.raw_event = lenovo_raw_event,
+	.event = lenovo_event,
 	.report_fixup = lenovo_report_fixup,
 };
 module_hid_driver(lenovo_driver);
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 429340d..5332fb7 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -776,6 +776,8 @@
 		.driver_data = LG_FF },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2),
 		.driver_data = LG_FF },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL),
+		.driver_data = LG_FF4 },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D),
 		.driver_data = LG_FF },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO),
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 32a596f..9aa3515 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -276,9 +276,11 @@
 		.driver_data = MS_NOGET },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
 		.driver_data = MS_DUPLICATE_USAGES },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3),
 		.driver_data = MS_HIDINPUT },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP),
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP),
+		.driver_data = MS_HIDINPUT },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
 		.driver_data = MS_HIDINPUT },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER),
 		.driver_data = MS_HIDINPUT },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 7c81125..426b2f1 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1145,6 +1145,14 @@
 		MT_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
 			USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
 
+	/* CJTouch panels */
+	{ .driver_data = MT_CLS_NSMU,
+		MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
+			USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0020) },
+	{ .driver_data = MT_CLS_NSMU,
+		MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
+			USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040) },
+
 	/* CVTouch panels */
 	{ .driver_data = MT_CLS_NSMU,
 		MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
diff --git a/drivers/hid/hid-picolcd_backlight.c b/drivers/hid/hid-picolcd_backlight.c
index a32c5f8..808807a 100644
--- a/drivers/hid/hid-picolcd_backlight.c
+++ b/drivers/hid/hid-picolcd_backlight.c
@@ -94,8 +94,7 @@
 	struct backlight_device *bdev = data->backlight;
 
 	data->backlight = NULL;
-	if (bdev)
-		backlight_device_unregister(bdev);
+	backlight_device_unregister(bdev);
 }
 
 int picolcd_resume_backlight(struct picolcd_data *data)
diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
index 045f8eb..9628651 100644
--- a/drivers/hid/hid-picolcd_cir.c
+++ b/drivers/hid/hid-picolcd_cir.c
@@ -145,7 +145,6 @@
 	struct rc_dev *rdev = data->rc_dev;
 
 	data->rc_dev = NULL;
-	if (rdev)
-		rc_unregister_device(rdev);
+	rc_unregister_device(rdev);
 }
 
diff --git a/drivers/hid/hid-picolcd_lcd.c b/drivers/hid/hid-picolcd_lcd.c
index 89821c2..22dcbe1 100644
--- a/drivers/hid/hid-picolcd_lcd.c
+++ b/drivers/hid/hid-picolcd_lcd.c
@@ -92,8 +92,7 @@
 	struct lcd_device *ldev = data->lcd;
 
 	data->lcd = NULL;
-	if (ldev)
-		lcd_device_unregister(ldev);
+	lcd_device_unregister(ldev);
 }
 
 int picolcd_resume_lcd(struct picolcd_data *data)
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 4cf80bb..2c14812 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -33,10 +33,21 @@
 #define RMI_READ_DATA_PENDING		1
 #define RMI_STARTED			2
 
+#define RMI_SLEEP_NORMAL		0x0
+#define RMI_SLEEP_DEEP_SLEEP		0x1
+
 /* device flags */
 #define RMI_DEVICE			BIT(0)
 #define RMI_DEVICE_HAS_PHYS_BUTTONS	BIT(1)
 
+/*
+ * retrieve the ctrl registers
+ * the ctrl register has a size of 20 but a fw bug split it into 16 + 4,
+ * and there is no way to know if the first 20 bytes are here or not.
+ * We use only the first 12 bytes, so get only them.
+ */
+#define RMI_F11_CTRL_REG_COUNT		12
+
 enum rmi_mode_type {
 	RMI_MODE_OFF			= 0,
 	RMI_MODE_ATTN_REPORTS		= 1,
@@ -113,6 +124,8 @@
 	unsigned int max_y;
 	unsigned int x_size_mm;
 	unsigned int y_size_mm;
+	bool read_f11_ctrl_regs;
+	u8 f11_ctrl_regs[RMI_F11_CTRL_REG_COUNT];
 
 	unsigned int gpio_led_count;
 	unsigned int button_count;
@@ -126,6 +139,10 @@
 
 	unsigned long device_flags;
 	unsigned long firmware_id;
+
+	u8 f01_ctrl0;
+	u8 interrupt_enable_mask;
+	bool restore_interrupt_mask;
 };
 
 #define RMI_PAGE(addr) (((addr) >> 8) & 0xff)
@@ -346,13 +363,34 @@
 	}
 }
 
+static int rmi_reset_attn_mode(struct hid_device *hdev)
+{
+	struct rmi_data *data = hid_get_drvdata(hdev);
+	int ret;
+
+	ret = rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
+	if (ret)
+		return ret;
+
+	if (data->restore_interrupt_mask) {
+		ret = rmi_write(hdev, data->f01.control_base_addr + 1,
+				&data->interrupt_enable_mask);
+		if (ret) {
+			hid_err(hdev, "can not write F01 control register\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
 static void rmi_reset_work(struct work_struct *work)
 {
 	struct rmi_data *hdata = container_of(work, struct rmi_data,
 						reset_work);
 
 	/* switch the device to RMI if we receive a generic mouse report */
-	rmi_set_mode(hdata->hdev, RMI_MODE_ATTN_REPORTS);
+	rmi_reset_attn_mode(hdata->hdev);
 }
 
 static inline int rmi_schedule_reset(struct hid_device *hdev)
@@ -532,14 +570,77 @@
 }
 
 #ifdef CONFIG_PM
+static int rmi_set_sleep_mode(struct hid_device *hdev, int sleep_mode)
+{
+	struct rmi_data *data = hid_get_drvdata(hdev);
+	int ret;
+	u8 f01_ctrl0;
+
+	f01_ctrl0 = (data->f01_ctrl0 & ~0x3) | sleep_mode;
+
+	ret = rmi_write(hdev, data->f01.control_base_addr,
+			&f01_ctrl0);
+	if (ret) {
+		hid_err(hdev, "can not write sleep mode\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int rmi_suspend(struct hid_device *hdev, pm_message_t message)
+{
+	struct rmi_data *data = hid_get_drvdata(hdev);
+	int ret;
+	u8 buf[RMI_F11_CTRL_REG_COUNT];
+
+	ret = rmi_read_block(hdev, data->f11.control_base_addr, buf,
+				RMI_F11_CTRL_REG_COUNT);
+	if (ret)
+		hid_warn(hdev, "can not read F11 control registers\n");
+	else
+		memcpy(data->f11_ctrl_regs, buf, RMI_F11_CTRL_REG_COUNT);
+
+
+	if (!device_may_wakeup(hdev->dev.parent))
+		return rmi_set_sleep_mode(hdev, RMI_SLEEP_DEEP_SLEEP);
+
+	return 0;
+}
+
 static int rmi_post_reset(struct hid_device *hdev)
 {
-	return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
+	struct rmi_data *data = hid_get_drvdata(hdev);
+	int ret;
+
+	ret = rmi_reset_attn_mode(hdev);
+	if (ret) {
+		hid_err(hdev, "can not set rmi mode\n");
+		return ret;
+	}
+
+	if (data->read_f11_ctrl_regs) {
+		ret = rmi_write_block(hdev, data->f11.control_base_addr,
+				data->f11_ctrl_regs, RMI_F11_CTRL_REG_COUNT);
+		if (ret)
+			hid_warn(hdev,
+				"can not write F11 control registers after reset\n");
+	}
+
+	if (!device_may_wakeup(hdev->dev.parent)) {
+		ret = rmi_set_sleep_mode(hdev, RMI_SLEEP_NORMAL);
+		if (ret) {
+			hid_err(hdev, "can not write sleep mode\n");
+			return ret;
+		}
+	}
+
+	return ret;
 }
 
 static int rmi_post_resume(struct hid_device *hdev)
 {
-	return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
+	return rmi_reset_attn_mode(hdev);
 }
 #endif /* CONFIG_PM */
 
@@ -595,6 +696,7 @@
 		f->interrupt_count = pdt_entry->interrupt_source_count;
 		f->irq_mask = rmi_gen_mask(f->interrupt_base,
 						f->interrupt_count);
+		data->interrupt_enable_mask |= f->irq_mask;
 	}
 }
 
@@ -732,6 +834,35 @@
 		data->firmware_id += info[2] * 65536;
 	}
 
+	ret = rmi_read_block(hdev, data->f01.control_base_addr, info,
+				2);
+
+	if (ret) {
+		hid_err(hdev, "can not read f01 ctrl registers\n");
+		return ret;
+	}
+
+	data->f01_ctrl0 = info[0];
+
+	if (!info[1]) {
+		/*
+		 * Do to a firmware bug in some touchpads the F01 interrupt
+		 * enable control register will be cleared on reset.
+		 * This will stop the touchpad from reporting data, so
+		 * if F01 CTRL1 is 0 then we need to explicitly enable
+		 * interrupts for the functions we want data for.
+		 */
+		data->restore_interrupt_mask = true;
+
+		ret = rmi_write(hdev, data->f01.control_base_addr + 1,
+				&data->interrupt_enable_mask);
+		if (ret) {
+			hid_err(hdev, "can not write to control reg 1: %d.\n",
+				ret);
+			return ret;
+		}
+	}
+
 	return 0;
 }
 
@@ -904,24 +1035,23 @@
 	if (has_data40)
 		data->f11.report_size += data->max_fingers * 2;
 
-	/*
-	 * retrieve the ctrl registers
-	 * the ctrl register has a size of 20 but a fw bug split it into 16 + 4,
-	 * and there is no way to know if the first 20 bytes are here or not.
-	 * We use only the first 12 bytes, so get only them.
-	 */
-	ret = rmi_read_block(hdev, data->f11.control_base_addr, buf, 12);
+	ret = rmi_read_block(hdev, data->f11.control_base_addr,
+			data->f11_ctrl_regs, RMI_F11_CTRL_REG_COUNT);
 	if (ret) {
 		hid_err(hdev, "can not read ctrl block of size 11: %d.\n", ret);
 		return ret;
 	}
 
-	data->max_x = buf[6] | (buf[7] << 8);
-	data->max_y = buf[8] | (buf[9] << 8);
+	/* data->f11_ctrl_regs now contains valid register data */
+	data->read_f11_ctrl_regs = true;
+
+	data->max_x = data->f11_ctrl_regs[6] | (data->f11_ctrl_regs[7] << 8);
+	data->max_y = data->f11_ctrl_regs[8] | (data->f11_ctrl_regs[9] << 8);
 
 	if (has_dribble) {
-		buf[0] = buf[0] & ~BIT(6);
-		ret = rmi_write(hdev, data->f11.control_base_addr, buf);
+		data->f11_ctrl_regs[0] = data->f11_ctrl_regs[0] & ~BIT(6);
+		ret = rmi_write(hdev, data->f11.control_base_addr,
+				data->f11_ctrl_regs);
 		if (ret) {
 			hid_err(hdev, "can not write to control reg 0: %d.\n",
 				ret);
@@ -930,9 +1060,9 @@
 	}
 
 	if (has_palm_detect) {
-		buf[11] = buf[11] & ~BIT(0);
+		data->f11_ctrl_regs[11] = data->f11_ctrl_regs[11] & ~BIT(0);
 		ret = rmi_write(hdev, data->f11.control_base_addr + 11,
-				&buf[11]);
+				&data->f11_ctrl_regs[11]);
 		if (ret) {
 			hid_err(hdev, "can not write to control reg 11: %d.\n",
 				ret);
@@ -1273,6 +1403,7 @@
 	.input_mapping		= rmi_input_mapping,
 	.input_configured	= rmi_input_configured,
 #ifdef CONFIG_PM
+	.suspend		= rmi_suspend,
 	.resume			= rmi_post_resume,
 	.reset_resume		= rmi_post_reset,
 #endif
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 090a1ba..a76eb2a 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -774,6 +774,9 @@
 	{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_TEXAS_INSTRUMENTS,
 			USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA),
 			.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+	{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_ITE,
+			USB_DEVICE_ID_ITE_LENOVO_YOGA),
+			.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
 	{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
 		     HID_ANY_ID) },
 	{ }
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index ed2f008..661f94f 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -296,7 +296,14 @@
 	0x09, 0x01,         /*          Usage (Pointer),            */
 	0x81, 0x02,         /*          Input (Variable),           */
 	0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),         */
-	0x95, 0x20,         /*          Report Count (26),          */
+	0x95, 0x01,         /*          Report Count (1),           */
+	0x81, 0x02,         /*          Input (Variable),           */
+	0x05, 0x01,         /*          Usage Page (Desktop),       */
+	0x95, 0x01,         /*          Report Count (1),           */
+	0x09, 0x01,         /*          Usage (Pointer),            */
+	0x81, 0x02,         /*          Input (Variable),           */
+	0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),         */
+	0x95, 0x1E,         /*          Report Count (24),          */
 	0x81, 0x02,         /*          Input (Variable),           */
 	0x75, 0x08,         /*          Report Size (8),            */
 	0x95, 0x30,         /*          Report Count (48),          */
@@ -1270,6 +1277,17 @@
 	 * has to be BYTE_SWAPPED before passing up to joystick interface
 	 */
 	if ((sc->quirks & SIXAXIS_CONTROLLER) && rd[0] == 0x01 && size == 49) {
+		/*
+		 * When connected via Bluetooth the Sixaxis occasionally sends
+		 * a report with the second byte 0xff and the rest zeroed.
+		 *
+		 * This report does not reflect the actual state of the
+		 * controller must be ignored to avoid generating false input
+		 * events.
+		 */
+		if (rd[1] == 0xff)
+			return -EINVAL;
+
 		swap(rd[41], rd[42]);
 		swap(rd[43], rd[44]);
 		swap(rd[45], rd[46]);
@@ -1836,7 +1854,7 @@
 	} else {
 		memset(buf, 0, DS4_REPORT_0x11_SIZE);
 		buf[0] = 0x11;
-		buf[1] = 0xB0;
+		buf[1] = 0x80;
 		buf[3] = 0x0F;
 		offset = 6;
 	}
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index f77469d..2871f3c 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -149,6 +149,8 @@
 	int			irq;
 
 	struct i2c_hid_platform_data pdata;
+
+	bool			irq_wake_enabled;
 };
 
 static int __i2c_hid_command(struct i2c_client *client,
@@ -1091,14 +1093,21 @@
 	struct i2c_hid *ihid = i2c_get_clientdata(client);
 	struct hid_device *hid = ihid->hid;
 	int ret = 0;
-
-	disable_irq(ihid->irq);
-	if (device_may_wakeup(&client->dev))
-		enable_irq_wake(ihid->irq);
+	int wake_status;
 
 	if (hid->driver && hid->driver->suspend)
 		ret = hid->driver->suspend(hid, PMSG_SUSPEND);
 
+	disable_irq(ihid->irq);
+	if (device_may_wakeup(&client->dev)) {
+		wake_status = enable_irq_wake(ihid->irq);
+		if (!wake_status)
+			ihid->irq_wake_enabled = true;
+		else
+			hid_warn(hid, "Failed to enable irq wake: %d\n",
+				wake_status);
+	}
+
 	/* Save some power */
 	i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
 
@@ -1111,14 +1120,21 @@
 	struct i2c_client *client = to_i2c_client(dev);
 	struct i2c_hid *ihid = i2c_get_clientdata(client);
 	struct hid_device *hid = ihid->hid;
+	int wake_status;
 
 	enable_irq(ihid->irq);
 	ret = i2c_hid_hwreset(client);
 	if (ret)
 		return ret;
 
-	if (device_may_wakeup(&client->dev))
-		disable_irq_wake(ihid->irq);
+	if (device_may_wakeup(&client->dev) && ihid->irq_wake_enabled) {
+		wake_status = disable_irq_wake(ihid->irq);
+		if (!wake_status)
+			ihid->irq_wake_enabled = false;
+		else
+			hid_warn(hid, "Failed to disable irq wake: %d\n",
+				wake_status);
+	}
 
 	if (hid->driver && hid->driver->reset_resume) {
 		ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index bfbe1be..36712e9 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -164,7 +164,7 @@
 	if (time_after(jiffies, usbhid->stop_retry)) {
 
 		/* Retries failed, so do a port reset unless we lack bandwidth*/
-		if (test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
+		if (!test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
 		     && !test_and_set_bit(HID_RESET_PENDING, &usbhid->iofl)) {
 
 			schedule_work(&usbhid->reset_work);
@@ -710,7 +710,8 @@
 		 * Wait 50 msec for the queue to empty before allowing events
 		 * to go through hid.
 		 */
-		msleep(50);
+		if (res == 0 && !(hid->quirks & HID_QUIRK_ALWAYS_POLL))
+			msleep(50);
 		clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
 	}
 done:
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 20f9a65..1dff8f0 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -90,8 +90,9 @@
 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
+	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3, HID_QUIRK_NO_INIT_REPORTS },
+	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_PRO_3_JP, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
-	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
 	{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
@@ -117,7 +118,8 @@
 	{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
-	{ USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN, HID_QUIRK_NOGET },
+	{ USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882, HID_QUIRK_NOGET },
+	{ USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8883, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index a533787..4681a65 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -113,7 +113,7 @@
 	struct mutex lock;
 	struct work_struct work;
 	struct wacom_led {
-		u8 select[2]; /* status led selector (0..3) */
+		u8 select[5]; /* status led selector (0..3) */
 		u8 llv;       /* status led brightness no button (1..127) */
 		u8 hlv;       /* status led brightness button pressed (1..127) */
 		u8 img_lum;   /* OLED matrix display brightness */
@@ -123,6 +123,8 @@
 	struct power_supply *ac;
 	struct power_supply_desc battery_desc;
 	struct power_supply_desc ac_desc;
+	struct kobject *remote_dir;
+	struct attribute_group remote_group[5];
 };
 
 static inline void wacom_schedule_work(struct wacom_wac *wacom_wac)
@@ -147,4 +149,7 @@
 		struct hid_usage *usage, __s32 value);
 void wacom_wac_report(struct hid_device *hdev, struct hid_report *report);
 void wacom_battery_work(struct work_struct *work);
+int wacom_remote_create_attr_group(struct wacom *wacom, __u32 serial,
+				   int index);
+void wacom_remote_destroy_attr_group(struct wacom *wacom, __u32 serial);
 #endif
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 01b937e..9a4912c 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -23,9 +23,13 @@
 #define WAC_CMD_ICON_XFER	0x23
 #define WAC_CMD_ICON_BT_XFER	0x26
 #define WAC_CMD_RETRIES		10
+#define WAC_CMD_DELETE_PAIRING	0x20
+#define WAC_CMD_UNPAIR_ALL	0xFF
+#define WAC_REMOTE_SERIAL_MAX_STRLEN	9
 
 #define DEV_ATTR_RW_PERM (S_IRUGO | S_IWUSR | S_IWGRP)
 #define DEV_ATTR_WO_PERM (S_IWUSR | S_IWGRP)
+#define DEV_ATTR_RO_PERM (S_IRUSR | S_IRGRP)
 
 static int wacom_get_report(struct hid_device *hdev, u8 type, u8 *buf,
 			    size_t size, unsigned int retries)
@@ -335,7 +339,7 @@
 		if (error >= 0)
 			error = wacom_get_report(hdev, HID_FEATURE_REPORT,
 			                         rep_data, length, 1);
-	} while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES);
+	} while (error >= 0 && rep_data[1] != mode && limit++ < WAC_MSG_RETRIES);
 
 	kfree(rep_data);
 
@@ -453,12 +457,11 @@
 	 * interface number.
 	 */
 	if (features->type == WIRELESS) {
-		if (intf->cur_altsetting->desc.bInterfaceNumber == 0) {
+		if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
+			features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
+		else
 			features->device_type = WACOM_DEVICETYPE_NONE;
-		} else if (intf->cur_altsetting->desc.bInterfaceNumber == 2) {
-			features->device_type |= WACOM_DEVICETYPE_TOUCH;
-			features->pktlen = WACOM_PKGLEN_BBTOUCH3;
-		}
+		return;
 	}
 
 	wacom_parse_hid(hdev, features);
@@ -1120,6 +1123,189 @@
 static DEVICE_ATTR(speed, DEV_ATTR_RW_PERM,
 		wacom_show_speed, wacom_store_speed);
 
+
+static ssize_t wacom_show_remote_mode(struct kobject *kobj,
+				      struct kobj_attribute *kattr,
+				      char *buf, int index)
+{
+	struct device *dev = container_of(kobj->parent, struct device, kobj);
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct wacom *wacom = hid_get_drvdata(hdev);
+	u8 mode;
+
+	mode = wacom->led.select[index];
+	if (mode >= 0 && mode < 3)
+		return snprintf(buf, PAGE_SIZE, "%d\n", mode);
+	else
+		return snprintf(buf, PAGE_SIZE, "%d\n", -1);
+}
+
+#define DEVICE_EKR_ATTR_GROUP(SET_ID)					\
+static ssize_t wacom_show_remote##SET_ID##_mode(struct kobject *kobj,	\
+			       struct kobj_attribute *kattr, char *buf)	\
+{									\
+	return wacom_show_remote_mode(kobj, kattr, buf, SET_ID);	\
+}									\
+static struct kobj_attribute remote##SET_ID##_mode_attr = {		\
+	.attr = {.name = "remote_mode",					\
+		.mode = DEV_ATTR_RO_PERM},				\
+	.show = wacom_show_remote##SET_ID##_mode,			\
+};									\
+static struct attribute *remote##SET_ID##_serial_attrs[] = {		\
+	&remote##SET_ID##_mode_attr.attr,				\
+	NULL								\
+};									\
+static struct attribute_group remote##SET_ID##_serial_group = {		\
+	.name = NULL,							\
+	.attrs = remote##SET_ID##_serial_attrs,				\
+}
+
+DEVICE_EKR_ATTR_GROUP(0);
+DEVICE_EKR_ATTR_GROUP(1);
+DEVICE_EKR_ATTR_GROUP(2);
+DEVICE_EKR_ATTR_GROUP(3);
+DEVICE_EKR_ATTR_GROUP(4);
+
+int wacom_remote_create_attr_group(struct wacom *wacom, __u32 serial, int index)
+{
+	int error = 0;
+	char *buf;
+	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+
+	wacom_wac->serial[index] = serial;
+
+	buf = kzalloc(WAC_REMOTE_SERIAL_MAX_STRLEN, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	snprintf(buf, WAC_REMOTE_SERIAL_MAX_STRLEN, "%d", serial);
+	wacom->remote_group[index].name = buf;
+
+	error = sysfs_create_group(wacom->remote_dir,
+				   &wacom->remote_group[index]);
+	if (error) {
+		hid_err(wacom->hdev,
+			"cannot create sysfs group err: %d\n", error);
+		kobject_put(wacom->remote_dir);
+		return error;
+	}
+
+	return 0;
+}
+
+void wacom_remote_destroy_attr_group(struct wacom *wacom, __u32 serial)
+{
+	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+	int i;
+
+	if (!serial)
+		return;
+
+	for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+		if (wacom_wac->serial[i] == serial) {
+			wacom_wac->serial[i] = 0;
+			wacom->led.select[i] = WACOM_STATUS_UNKNOWN;
+			if (wacom->remote_group[i].name) {
+				sysfs_remove_group(wacom->remote_dir,
+						   &wacom->remote_group[i]);
+				kfree(wacom->remote_group[i].name);
+				wacom->remote_group[i].name = NULL;
+			}
+		}
+	}
+}
+
+static int wacom_cmd_unpair_remote(struct wacom *wacom, unsigned char selector)
+{
+	const size_t buf_size = 2;
+	unsigned char *buf;
+	int retval;
+
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	buf[0] = WAC_CMD_DELETE_PAIRING;
+	buf[1] = selector;
+
+	retval = wacom_set_report(wacom->hdev, HID_OUTPUT_REPORT, buf,
+				  buf_size, WAC_CMD_RETRIES);
+	kfree(buf);
+
+	return retval;
+}
+
+static ssize_t wacom_store_unpair_remote(struct kobject *kobj,
+					 struct kobj_attribute *attr,
+					 const char *buf, size_t count)
+{
+	unsigned char selector = 0;
+	struct device *dev = container_of(kobj->parent, struct device, kobj);
+	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+	struct wacom *wacom = hid_get_drvdata(hdev);
+	int err;
+
+	if (!strncmp(buf, "*\n", 2)) {
+		selector = WAC_CMD_UNPAIR_ALL;
+	} else {
+		hid_info(wacom->hdev, "remote: unrecognized unpair code: %s\n",
+			 buf);
+		return -1;
+	}
+
+	mutex_lock(&wacom->lock);
+
+	err = wacom_cmd_unpair_remote(wacom, selector);
+	mutex_unlock(&wacom->lock);
+
+	return err < 0 ? err : count;
+}
+
+static struct kobj_attribute unpair_remote_attr = {
+	.attr = {.name = "unpair_remote", .mode = 0200},
+	.store = wacom_store_unpair_remote,
+};
+
+static const struct attribute *remote_unpair_attrs[] = {
+	&unpair_remote_attr.attr,
+	NULL
+};
+
+static int wacom_initialize_remote(struct wacom *wacom)
+{
+	int error = 0;
+	struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+	int i;
+
+	if (wacom->wacom_wac.features.type != REMOTE)
+		return 0;
+
+	wacom->remote_group[0] = remote0_serial_group;
+	wacom->remote_group[1] = remote1_serial_group;
+	wacom->remote_group[2] = remote2_serial_group;
+	wacom->remote_group[3] = remote3_serial_group;
+	wacom->remote_group[4] = remote4_serial_group;
+
+	wacom->remote_dir = kobject_create_and_add("wacom_remote",
+						   &wacom->hdev->dev.kobj);
+	if (!wacom->remote_dir)
+		return -ENOMEM;
+
+	error = sysfs_create_files(wacom->remote_dir, remote_unpair_attrs);
+
+	if (error) {
+		hid_err(wacom->hdev,
+			"cannot create sysfs group err: %d\n", error);
+		return error;
+	}
+
+	for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+		wacom->led.select[i] = WACOM_STATUS_UNKNOWN;
+		wacom_wac->serial[i] = 0;
+	}
+
+	return 0;
+}
+
 static struct input_dev *wacom_allocate_input(struct wacom *wacom)
 {
 	struct input_dev *input_dev;
@@ -1130,7 +1316,7 @@
 	if (!input_dev)
 		return NULL;
 
-	input_dev->name = wacom_wac->pen_name;
+	input_dev->name = wacom_wac->features.name;
 	input_dev->phys = hdev->phys;
 	input_dev->dev.parent = &hdev->dev;
 	input_dev->open = wacom_open;
@@ -1145,43 +1331,6 @@
 	return input_dev;
 }
 
-static void wacom_free_inputs(struct wacom *wacom)
-{
-	struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
-
-	if (wacom_wac->pen_input)
-		input_free_device(wacom_wac->pen_input);
-	if (wacom_wac->touch_input)
-		input_free_device(wacom_wac->touch_input);
-	if (wacom_wac->pad_input)
-		input_free_device(wacom_wac->pad_input);
-	wacom_wac->pen_input = NULL;
-	wacom_wac->touch_input = NULL;
-	wacom_wac->pad_input = NULL;
-}
-
-static int wacom_allocate_inputs(struct wacom *wacom)
-{
-	struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
-	struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
-
-	pen_input_dev = wacom_allocate_input(wacom);
-	touch_input_dev = wacom_allocate_input(wacom);
-	pad_input_dev = wacom_allocate_input(wacom);
-	if (!pen_input_dev || !touch_input_dev || !pad_input_dev) {
-		wacom_free_inputs(wacom);
-		return -ENOMEM;
-	}
-
-	wacom_wac->pen_input = pen_input_dev;
-	wacom_wac->touch_input = touch_input_dev;
-	wacom_wac->touch_input->name = wacom_wac->touch_name;
-	wacom_wac->pad_input = pad_input_dev;
-	wacom_wac->pad_input->name = wacom_wac->pad_name;
-
-	return 0;
-}
-
 static void wacom_clean_inputs(struct wacom *wacom)
 {
 	if (wacom->wacom_wac.pen_input) {
@@ -1202,12 +1351,33 @@
 		else
 			input_free_device(wacom->wacom_wac.pad_input);
 	}
+	if (wacom->remote_dir)
+		kobject_put(wacom->remote_dir);
 	wacom->wacom_wac.pen_input = NULL;
 	wacom->wacom_wac.touch_input = NULL;
 	wacom->wacom_wac.pad_input = NULL;
 	wacom_destroy_leds(wacom);
 }
 
+static int wacom_allocate_inputs(struct wacom *wacom)
+{
+	struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+
+	wacom_wac->pen_input = wacom_allocate_input(wacom);
+	wacom_wac->touch_input = wacom_allocate_input(wacom);
+	wacom_wac->pad_input = wacom_allocate_input(wacom);
+	if (!wacom_wac->pen_input || !wacom_wac->touch_input || !wacom_wac->pad_input) {
+		wacom_clean_inputs(wacom);
+		return -ENOMEM;
+	}
+
+	wacom_wac->pen_input->name = wacom_wac->pen_name;
+	wacom_wac->touch_input->name = wacom_wac->touch_name;
+	wacom_wac->pad_input->name = wacom_wac->pad_name;
+
+	return 0;
+}
+
 static int wacom_register_inputs(struct wacom *wacom)
 {
 	struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev;
@@ -1262,10 +1432,16 @@
 		error = wacom_initialize_leds(wacom);
 		if (error)
 			goto fail_leds;
+
+		error = wacom_initialize_remote(wacom);
+		if (error)
+			goto fail_remote;
 	}
 
 	return 0;
 
+fail_remote:
+	wacom_destroy_leds(wacom);
 fail_leds:
 	input_unregister_device(pad_input_dev);
 	pad_input_dev = NULL;
@@ -1556,11 +1732,9 @@
 	mutex_init(&wacom->lock);
 	INIT_WORK(&wacom->work, wacom_wireless_work);
 
-	if (!(features->quirks & WACOM_QUIRK_NO_INPUT)) {
-		error = wacom_allocate_inputs(wacom);
-		if (error)
-			goto fail_allocate_inputs;
-	}
+	error = wacom_allocate_inputs(wacom);
+	if (error)
+		goto fail_allocate_inputs;
 
 	/*
 	 * Bamboo Pad has a generic hid handling for the Pen, and we switch it
@@ -1606,18 +1780,16 @@
 	if (error)
 		goto fail_shared_data;
 
-	if (!(features->quirks & WACOM_QUIRK_MONITOR) &&
+	if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
 	     (features->quirks & WACOM_QUIRK_BATTERY)) {
 		error = wacom_initialize_battery(wacom);
 		if (error)
 			goto fail_battery;
 	}
 
-	if (!(features->quirks & WACOM_QUIRK_NO_INPUT)) {
-		error = wacom_register_inputs(wacom);
-		if (error)
-			goto fail_register_inputs;
-	}
+	error = wacom_register_inputs(wacom);
+	if (error)
+		goto fail_register_inputs;
 
 	if (hdev->bus == BUS_BLUETOOTH) {
 		error = device_create_file(&hdev->dev, &dev_attr_speed);
@@ -1640,7 +1812,7 @@
 	/* Note that if query fails it is not a hard failure */
 	wacom_query_tablet_data(hdev, features);
 
-	if (features->quirks & WACOM_QUIRK_MONITOR)
+	if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
 		error = hid_hw_open(hdev);
 
 	if (wacom_wac->features.type == INTUOSHT && 
@@ -1714,7 +1886,6 @@
 	.id_table =	wacom_ids,
 	.probe =	wacom_probe,
 	.remove =	wacom_remove,
-	.event =	wacom_wac_event,
 	.report =	wacom_wac_report,
 #ifdef CONFIG_PM
 	.resume =	wacom_resume,
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 0d24423..0215ab62 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -125,61 +125,47 @@
 
 	prox = data[1] & 0x40;
 
-	if (prox) {
-		wacom->id[0] = ERASER_DEVICE_ID;
-		pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
-		if (features->pressure_max > 255)
-			pressure = (pressure << 1) | ((data[4] >> 6) & 1);
-		pressure += (features->pressure_max + 1) / 2;
-
-		/*
-		 * if going from out of proximity into proximity select between the eraser
-		 * and the pen based on the state of the stylus2 button, choose eraser if
-		 * pressed else choose pen. if not a proximity change from out to in, send
-		 * an out of proximity for previous tool then a in for new tool.
-		 */
-		if (!wacom->tool[0]) {
-			/* Eraser bit set for DTF */
-			if (data[1] & 0x10)
-				wacom->tool[1] = BTN_TOOL_RUBBER;
-			else
-				/* Going into proximity select tool */
-				wacom->tool[1] = (data[4] & 0x20) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
-		} else {
-			/* was entered with stylus2 pressed */
-			if (wacom->tool[1] == BTN_TOOL_RUBBER && !(data[4] & 0x20)) {
-				/* report out proximity for previous tool */
-				input_report_key(input, wacom->tool[1], 0);
-				input_sync(input);
-				wacom->tool[1] = BTN_TOOL_PEN;
-				return 0;
-			}
+	if (!wacom->id[0]) {
+		if ((data[0] & 0x10) || (data[4] & 0x20)) {
+			wacom->tool[0] = BTN_TOOL_RUBBER;
+			wacom->id[0] = ERASER_DEVICE_ID;
 		}
-		if (wacom->tool[1] != BTN_TOOL_RUBBER) {
-			/* Unknown tool selected default to pen tool */
-			wacom->tool[1] = BTN_TOOL_PEN;
+		else {
+			wacom->tool[0] = BTN_TOOL_PEN;
 			wacom->id[0] = STYLUS_DEVICE_ID;
 		}
-		input_report_key(input, wacom->tool[1], prox); /* report in proximity for tool */
-		input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
-		input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
-		input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
-		input_report_abs(input, ABS_PRESSURE, pressure);
-
-		input_report_key(input, BTN_TOUCH, data[4] & 0x08);
-		input_report_key(input, BTN_STYLUS, data[4] & 0x10);
-		/* Only allow the stylus2 button to be reported for the pen tool. */
-		input_report_key(input, BTN_STYLUS2, (wacom->tool[1] == BTN_TOOL_PEN) && (data[4] & 0x20));
-	} else {
-		/* report proximity-out of a (valid) tool */
-		if (wacom->tool[1] != BTN_TOOL_RUBBER) {
-			/* Unknown tool selected default to pen tool */
-			wacom->tool[1] = BTN_TOOL_PEN;
-		}
-		input_report_key(input, wacom->tool[1], prox);
 	}
 
-	wacom->tool[0] = prox; /* Save proximity state */
+	/* If the eraser is in prox, STYLUS2 is always set. If we
+	 * mis-detected the type and notice that STYLUS2 isn't set
+	 * then force the eraser out of prox and let the pen in.
+	 */
+	if (wacom->tool[0] == BTN_TOOL_RUBBER && !(data[4] & 0x20)) {
+		input_report_key(input, BTN_TOOL_RUBBER, 0);
+		input_report_abs(input, ABS_MISC, 0);
+		input_sync(input);
+		wacom->tool[0] = BTN_TOOL_PEN;
+		wacom->id[0] = STYLUS_DEVICE_ID;
+	}
+
+	pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
+	if (features->pressure_max > 255)
+		pressure = (pressure << 1) | ((data[4] >> 6) & 1);
+	pressure += (features->pressure_max + 1) / 2;
+
+	input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+	input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+	input_report_abs(input, ABS_PRESSURE, pressure);
+
+	input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+	input_report_key(input, BTN_STYLUS, data[4] & 0x10);
+	/* Only allow the stylus2 button to be reported for the pen tool. */
+	input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+
+	if (!prox)
+		wacom->id[0] = 0;
+	input_report_key(input, wacom->tool[0], prox);
+	input_report_abs(input, ABS_MISC, wacom->id[0]);
 	return 1;
 }
 
@@ -645,6 +631,130 @@
 	return 0;
 }
 
+static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
+{
+	unsigned char *data = wacom_wac->data;
+	struct input_dev *input = wacom_wac->pad_input;
+	struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac);
+	struct wacom_features *features = &wacom_wac->features;
+	int bat_charging, bat_percent, touch_ring_mode;
+	__u32 serial;
+	int i;
+
+	if (data[0] != WACOM_REPORT_REMOTE) {
+		dev_dbg(input->dev.parent,
+			"%s: received unknown report #%d", __func__, data[0]);
+		return 0;
+	}
+
+	serial = data[3] + (data[4] << 8) + (data[5] << 16);
+	wacom_wac->id[0] = PAD_DEVICE_ID;
+
+	input_report_key(input, BTN_0, (data[9] & 0x01));
+	input_report_key(input, BTN_1, (data[9] & 0x02));
+	input_report_key(input, BTN_2, (data[9] & 0x04));
+	input_report_key(input, BTN_3, (data[9] & 0x08));
+	input_report_key(input, BTN_4, (data[9] & 0x10));
+	input_report_key(input, BTN_5, (data[9] & 0x20));
+	input_report_key(input, BTN_6, (data[9] & 0x40));
+	input_report_key(input, BTN_7, (data[9] & 0x80));
+
+	input_report_key(input, BTN_8, (data[10] & 0x01));
+	input_report_key(input, BTN_9, (data[10] & 0x02));
+	input_report_key(input, BTN_A, (data[10] & 0x04));
+	input_report_key(input, BTN_B, (data[10] & 0x08));
+	input_report_key(input, BTN_C, (data[10] & 0x10));
+	input_report_key(input, BTN_X, (data[10] & 0x20));
+	input_report_key(input, BTN_Y, (data[10] & 0x40));
+	input_report_key(input, BTN_Z, (data[10] & 0x80));
+
+	input_report_key(input, BTN_BASE, (data[11] & 0x01));
+	input_report_key(input, BTN_BASE2, (data[11] & 0x02));
+
+	if (data[12] & 0x80)
+		input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
+	else
+		input_report_abs(input, ABS_WHEEL, 0);
+
+	bat_percent = data[7] & 0x7f;
+	bat_charging = !!(data[7] & 0x80);
+
+	if (data[9] | data[10] | (data[11] & 0x03) | data[12])
+		input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+	else
+		input_report_abs(input, ABS_MISC, 0);
+
+	input_event(input, EV_MSC, MSC_SERIAL, serial);
+
+	/*Which mode select (LED light) is currently on?*/
+	touch_ring_mode = (data[11] & 0xC0) >> 6;
+
+	for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+		if (wacom_wac->serial[i] == serial)
+			wacom->led.select[i] = touch_ring_mode;
+	}
+
+	if (!wacom->battery &&
+	    !(features->quirks & WACOM_QUIRK_BATTERY)) {
+		features->quirks |= WACOM_QUIRK_BATTERY;
+		INIT_WORK(&wacom->work, wacom_battery_work);
+		wacom_schedule_work(wacom_wac);
+	}
+
+	wacom_notify_battery(wacom_wac, bat_percent, bat_charging, 1,
+			     bat_charging);
+
+	return 1;
+}
+
+static int wacom_remote_status_irq(struct wacom_wac *wacom_wac, size_t len)
+{
+	struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac);
+	unsigned char *data = wacom_wac->data;
+	int i;
+
+	if (data[0] != WACOM_REPORT_DEVICE_LIST)
+		return 0;
+
+	for (i = 0; i < WACOM_MAX_REMOTES; i++) {
+		int j = i * 6;
+		int serial = (data[j+6] << 16) + (data[j+5] << 8) + data[j+4];
+		bool connected = data[j+2];
+
+		if (connected) {
+			int k;
+
+			if (wacom_wac->serial[i] == serial)
+				continue;
+
+			if (wacom_wac->serial[i]) {
+				wacom_remote_destroy_attr_group(wacom,
+							wacom_wac->serial[i]);
+			}
+
+			/* A remote can pair more than once with an EKR,
+			 * check to make sure this serial isn't already paired.
+			 */
+			for (k = 0; k < WACOM_MAX_REMOTES; k++) {
+				if (wacom_wac->serial[k] == serial)
+					break;
+			}
+
+			if (k < WACOM_MAX_REMOTES) {
+				wacom_wac->serial[i] = serial;
+				continue;
+			}
+			wacom_remote_create_attr_group(wacom, serial, i);
+
+		} else if (wacom_wac->serial[i]) {
+			wacom_remote_destroy_attr_group(wacom,
+							wacom_wac->serial[i]);
+		}
+	}
+
+	return 0;
+}
+
 static void wacom_intuos_general(struct wacom_wac *wacom)
 {
 	struct wacom_features *features = &wacom->features;
@@ -1437,6 +1547,12 @@
 	return 0;
 }
 
+static void wacom_wac_pen_pre_report(struct hid_device *hdev,
+		struct hid_report *report)
+{
+	return;
+}
+
 static void wacom_wac_pen_report(struct hid_device *hdev,
 		struct hid_report *report)
 {
@@ -1491,6 +1607,13 @@
 			wacom_map_usage(input, usage, field, EV_ABS,
 					ABS_MT_POSITION_Y, 4);
 		break;
+	case HID_DG_WIDTH:
+	case HID_DG_HEIGHT:
+		features->last_slot_field = usage->hid;
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_MT_TOUCH_MAJOR, 0);
+		wacom_map_usage(input, usage, field, EV_ABS, ABS_MT_TOUCH_MINOR, 0);
+		input_set_abs_params(input, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+		break;
 	case HID_DG_CONTACTID:
 		features->last_slot_field = usage->hid;
 		break;
@@ -1504,6 +1627,10 @@
 		features->last_slot_field = usage->hid;
 		wacom_map_usage(input, usage, field, EV_KEY, BTN_TOUCH, 0);
 		break;
+	case HID_DG_CONTACTCOUNT:
+		wacom_wac->hid_data.cc_index = field->index;
+		wacom_wac->hid_data.cc_value_index = usage->usage_index;
+		break;
 	}
 }
 
@@ -1515,6 +1642,10 @@
 	bool prox = hid_data->tipswitch &&
 		    !wacom_wac->shared->stylus_in_proximity;
 
+	wacom_wac->hid_data.num_received++;
+	if (wacom_wac->hid_data.num_received > wacom_wac->hid_data.num_expected)
+		return;
+
 	if (mt) {
 		int slot;
 
@@ -1531,6 +1662,13 @@
 				 hid_data->x);
 		input_report_abs(input, mt ? ABS_MT_POSITION_Y : ABS_Y,
 				 hid_data->y);
+
+		if (test_bit(ABS_MT_TOUCH_MAJOR, input->absbit)) {
+			input_report_abs(input, ABS_MT_TOUCH_MAJOR, max(hid_data->width, hid_data->height));
+			input_report_abs(input, ABS_MT_TOUCH_MINOR, min(hid_data->width, hid_data->height));
+			if (hid_data->width != hid_data->height)
+				input_report_abs(input, ABS_MT_ORIENTATION, hid_data->width <= hid_data->height ? 0 : 1);
+		}
 	}
 }
 
@@ -1547,6 +1685,12 @@
 	case HID_GD_Y:
 		wacom_wac->hid_data.y = value;
 		break;
+	case HID_DG_WIDTH:
+		wacom_wac->hid_data.width = value;
+		break;
+	case HID_DG_HEIGHT:
+		wacom_wac->hid_data.height = value;
+		break;
 	case HID_DG_CONTACTID:
 		wacom_wac->hid_data.id = value;
 		break;
@@ -1564,6 +1708,24 @@
 	return 0;
 }
 
+static void wacom_wac_finger_pre_report(struct hid_device *hdev,
+		struct hid_report *report)
+{
+	struct wacom *wacom = hid_get_drvdata(hdev);
+	struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+	struct hid_data* hid_data = &wacom_wac->hid_data;
+
+	if (hid_data->cc_index >= 0) {
+		struct hid_field *field = report->field[hid_data->cc_index];
+		int value = field->value[hid_data->cc_value_index];
+		if (value)
+			hid_data->num_expected = value;
+	}
+	else {
+		hid_data->num_expected = wacom_wac->features.touch_max;
+	}
+}
+
 static void wacom_wac_finger_report(struct hid_device *hdev,
 		struct hid_report *report)
 {
@@ -1572,10 +1734,18 @@
 	struct input_dev *input = wacom_wac->touch_input;
 	unsigned touch_max = wacom_wac->features.touch_max;
 
+	/* If more packets of data are expected, give us a chance to
+	 * process them rather than immediately syncing a partial
+	 * update.
+	 */
+	if (wacom_wac->hid_data.num_received < wacom_wac->hid_data.num_expected)
+		return;
+
 	if (touch_max > 1)
 		input_mt_sync_frame(input);
 
 	input_sync(input);
+	wacom_wac->hid_data.num_received = 0;
 
 	/* keep touch state for pen event */
 	wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac);
@@ -1615,6 +1785,25 @@
 	return 0;
 }
 
+static void wacom_report_events(struct hid_device *hdev, struct hid_report *report)
+{
+	int r;
+
+	for (r = 0; r < report->maxfield; r++) {
+		struct hid_field *field;
+		unsigned count, n;
+
+		field = report->field[r];
+		count = field->report_count;
+
+		if (!(HID_MAIN_ITEM_VARIABLE & field->flags))
+			continue;
+
+		for (n = 0; n < count; n++)
+			wacom_wac_event(hdev, field, &field->usage[n], field->value[n]);
+	}
+}
+
 void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
 {
 	struct wacom *wacom = hid_get_drvdata(hdev);
@@ -1625,6 +1814,14 @@
 		return;
 
 	if (WACOM_PEN_FIELD(field))
+		wacom_wac_pen_pre_report(hdev, report);
+
+	if (WACOM_FINGER_FIELD(field))
+		wacom_wac_finger_pre_report(hdev, report);
+
+	wacom_report_events(hdev, report);
+
+	if (WACOM_PEN_FIELD(field))
 		return wacom_wac_pen_report(hdev, report);
 
 	if (WACOM_FINGER_FIELD(field))
@@ -1699,7 +1896,7 @@
 		int y = (data[3] << 4) | (data[4] & 0x0f);
 		int width, height;
 
-		if (features->type >= INTUOSPS && features->type <= INTUOSPL) {
+		if (features->type >= INTUOSPS && features->type <= INTUOSHT) {
 			width  = data[5] * 100;
 			height = data[6] * 100;
 		} else {
@@ -2118,6 +2315,13 @@
 		sync = wacom_wireless_irq(wacom_wac, len);
 		break;
 
+	case REMOTE:
+		if (wacom_wac->data[0] == WACOM_REPORT_DEVICE_LIST)
+			sync = wacom_remote_status_irq(wacom_wac, len);
+		else
+			sync = wacom_remote_irq(wacom_wac, len);
+		break;
+
 	default:
 		sync = false;
 		break;
@@ -2223,10 +2427,13 @@
 	 * 0, whose HID descriptor has an application usage of 0xFF0D
 	 * (i.e., WACOM_VENDORDEFINED_PEN). We route pen packets back
 	 * out through the HID_GENERIC device created for interface 1,
-	 * so rewrite this one to be of type BTN_TOOL_FINGER.
+	 * so rewrite this one to be of type WACOM_DEVICETYPE_TOUCH.
 	 */
 	if (features->type == BAMBOO_PAD)
-		features->device_type |= WACOM_DEVICETYPE_TOUCH;
+		features->device_type = WACOM_DEVICETYPE_TOUCH;
+
+	if (features->type == REMOTE)
+		features->device_type = WACOM_DEVICETYPE_PAD;
 
 	if (wacom->hdev->bus == BUS_BLUETOOTH)
 		features->quirks |= WACOM_QUIRK_BATTERY;
@@ -2242,13 +2449,7 @@
 	}
 
 	if (features->type == WIRELESS) {
-
-		/* monitor never has input and pen/touch have delayed create */
-		features->quirks |= WACOM_QUIRK_NO_INPUT;
-
-		/* must be monitor interface if no device_type set */
-		if (features->device_type == WACOM_DEVICETYPE_NONE) {
-			features->quirks |= WACOM_QUIRK_MONITOR;
+		if (features->device_type == WACOM_DEVICETYPE_WL_MONITOR) {
 			features->quirks |= WACOM_QUIRK_BATTERY;
 		}
 	}
@@ -2513,11 +2714,23 @@
 	return 0;
 }
 
+static void wacom_setup_numbered_buttons(struct input_dev *input_dev,
+				int button_count)
+{
+	int i;
+
+	for (i = 0; i < button_count && i < 10; i++)
+		__set_bit(BTN_0 + i, input_dev->keybit);
+	for (i = 10; i < button_count && i < 16; i++)
+		__set_bit(BTN_A + (i-10), input_dev->keybit);
+	for (i = 16; i < button_count && i < 18; i++)
+		__set_bit(BTN_BASE + (i-16), input_dev->keybit);
+}
+
 int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
 				   struct wacom_wac *wacom_wac)
 {
 	struct wacom_features *features = &wacom_wac->features;
-	int i;
 
 	if (!(features->device_type & WACOM_DEVICETYPE_PAD))
 		return -ENODEV;
@@ -2534,10 +2747,14 @@
 	/* kept for making udev and libwacom accepting the pad */
 	__set_bit(BTN_STYLUS, input_dev->keybit);
 
+	wacom_setup_numbered_buttons(input_dev, features->numbered_buttons);
+
 	switch (features->type) {
+
+	case CINTIQ_HYBRID:
+	case DTK:
+	case DTUS:
 	case GRAPHIRE_BT:
-		__set_bit(BTN_0, input_dev->keybit);
-		__set_bit(BTN_1, input_dev->keybit);
 		break;
 
 	case WACOM_MO:
@@ -2555,16 +2772,6 @@
 		break;
 
 	case WACOM_24HD:
-		__set_bit(BTN_A, input_dev->keybit);
-		__set_bit(BTN_B, input_dev->keybit);
-		__set_bit(BTN_C, input_dev->keybit);
-		__set_bit(BTN_X, input_dev->keybit);
-		__set_bit(BTN_Y, input_dev->keybit);
-		__set_bit(BTN_Z, input_dev->keybit);
-
-		for (i = 0; i < 10; i++)
-			__set_bit(BTN_0 + i, input_dev->keybit);
-
 		__set_bit(KEY_PROG1, input_dev->keybit);
 		__set_bit(KEY_PROG2, input_dev->keybit);
 		__set_bit(KEY_PROG3, input_dev->keybit);
@@ -2586,12 +2793,6 @@
 		__set_bit(INPUT_PROP_ACCELEROMETER, input_dev->propbit);
 		break;
 
-	case DTK:
-		for (i = 0; i < 6; i++)
-			__set_bit(BTN_0 + i, input_dev->keybit);
-
-		break;
-
 	case WACOM_22HD:
 		__set_bit(KEY_PROG1, input_dev->keybit);
 		__set_bit(KEY_PROG2, input_dev->keybit);
@@ -2599,52 +2800,22 @@
 		/* fall through */
 
 	case WACOM_21UX2:
-		__set_bit(BTN_A, input_dev->keybit);
-		__set_bit(BTN_B, input_dev->keybit);
-		__set_bit(BTN_C, input_dev->keybit);
-		__set_bit(BTN_X, input_dev->keybit);
-		__set_bit(BTN_Y, input_dev->keybit);
-		__set_bit(BTN_Z, input_dev->keybit);
-		__set_bit(BTN_BASE, input_dev->keybit);
-		__set_bit(BTN_BASE2, input_dev->keybit);
-		/* fall through */
-
 	case WACOM_BEE:
-		__set_bit(BTN_8, input_dev->keybit);
-		__set_bit(BTN_9, input_dev->keybit);
-		/* fall through */
-
 	case CINTIQ:
-		for (i = 0; i < 8; i++)
-			__set_bit(BTN_0 + i, input_dev->keybit);
-
 		input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
 		input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
 		break;
 
 	case WACOM_13HD:
-		for (i = 0; i < 9; i++)
-			__set_bit(BTN_0 + i, input_dev->keybit);
-
 		input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
 		break;
 
 	case INTUOS3:
 	case INTUOS3L:
-		__set_bit(BTN_4, input_dev->keybit);
-		__set_bit(BTN_5, input_dev->keybit);
-		__set_bit(BTN_6, input_dev->keybit);
-		__set_bit(BTN_7, input_dev->keybit);
-
 		input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
 		/* fall through */
 
 	case INTUOS3S:
-		__set_bit(BTN_0, input_dev->keybit);
-		__set_bit(BTN_1, input_dev->keybit);
-		__set_bit(BTN_2, input_dev->keybit);
-		__set_bit(BTN_3, input_dev->keybit);
-
 		input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
 		break;
 
@@ -2652,15 +2823,8 @@
 	case INTUOS5L:
 	case INTUOSPM:
 	case INTUOSPL:
-		__set_bit(BTN_7, input_dev->keybit);
-		__set_bit(BTN_8, input_dev->keybit);
-		/* fall through */
-
 	case INTUOS5S:
 	case INTUOSPS:
-		for (i = 0; i < 7; i++)
-			__set_bit(BTN_0 + i, input_dev->keybit);
-
 		input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
 		break;
 
@@ -2675,28 +2839,10 @@
 
 	case INTUOS4:
 	case INTUOS4L:
-		__set_bit(BTN_7, input_dev->keybit);
-		__set_bit(BTN_8, input_dev->keybit);
-		/* fall through */
-
 	case INTUOS4S:
-		for (i = 0; i < 7; i++)
-			__set_bit(BTN_0 + i, input_dev->keybit);
-
 		input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
 		break;
 
-	case CINTIQ_HYBRID:
-		for (i = 0; i < 9; i++)
-			__set_bit(BTN_0 + i, input_dev->keybit);
-
-		break;
-
-	case DTUS:
-		for (i = 0; i < 4; i++)
-			__set_bit(BTN_0 + i, input_dev->keybit);
-		break;
-
 	case INTUOSHT:
 	case BAMBOO_PT:
 		__clear_bit(ABS_MISC, input_dev->absbit);
@@ -2708,6 +2854,11 @@
 
 		break;
 
+	case REMOTE:
+		input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
+		input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
+		break;
+
 	default:
 		/* no pad supported */
 		return -ENODEV;
@@ -2723,7 +2874,7 @@
 	  GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
 static const struct wacom_features wacom_features_0x81 =
 	{ "Wacom Graphire BT", 16704, 12064, 511, 32,
-	  GRAPHIRE_BT, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
+	  GRAPHIRE_BT, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES, 2 };
 static const struct wacom_features wacom_features_0x11 =
 	{ "Wacom Graphire2 4x5", 10206, 7422, 511, 63,
 	  GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
@@ -2849,77 +3000,77 @@
 	  INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0xB0 =
 	{ "Wacom Intuos3 4x5", 25400, 20320, 1023, 63,
-	  INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 4 };
 static const struct wacom_features wacom_features_0xB1 =
 	{ "Wacom Intuos3 6x8", 40640, 30480, 1023, 63,
-	  INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 };
 static const struct wacom_features wacom_features_0xB2 =
 	{ "Wacom Intuos3 9x12", 60960, 45720, 1023, 63,
-	  INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 };
 static const struct wacom_features wacom_features_0xB3 =
 	{ "Wacom Intuos3 12x12", 60960, 60960, 1023, 63,
-	  INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 };
 static const struct wacom_features wacom_features_0xB4 =
 	{ "Wacom Intuos3 12x19", 97536, 60960, 1023, 63,
-	  INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 };
 static const struct wacom_features wacom_features_0xB5 =
 	{ "Wacom Intuos3 6x11", 54204, 31750, 1023, 63,
-	  INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 };
 static const struct wacom_features wacom_features_0xB7 =
 	{ "Wacom Intuos3 4x6", 31496, 19685, 1023, 63,
-	  INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 4 };
 static const struct wacom_features wacom_features_0xB8 =
 	{ "Wacom Intuos4 4x6", 31496, 19685, 2047, 63,
-	  INTUOS4S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS4S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7 };
 static const struct wacom_features wacom_features_0xB9 =
 	{ "Wacom Intuos4 6x9", 44704, 27940, 2047, 63,
-	  INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 };
 static const struct wacom_features wacom_features_0xBA =
 	{ "Wacom Intuos4 8x13", 65024, 40640, 2047, 63,
-	  INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 };
 static const struct wacom_features wacom_features_0xBB =
 	{ "Wacom Intuos4 12x19", 97536, 60960, 2047, 63,
-	  INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 };
 static const struct wacom_features wacom_features_0xBC =
 	{ "Wacom Intuos4 WL", 40640, 25400, 2047, 63,
-	  INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 };
 static const struct wacom_features wacom_features_0xBD =
 	{ "Wacom Intuos4 WL", 40640, 25400, 2047, 63,
-	  INTUOS4WL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS4WL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 };
 static const struct wacom_features wacom_features_0x26 =
 	{ "Wacom Intuos5 touch S", 31496, 19685, 2047, 63,
-	  INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16 };
+	  INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7, .touch_max = 16 };
 static const struct wacom_features wacom_features_0x27 =
 	{ "Wacom Intuos5 touch M", 44704, 27940, 2047, 63,
-	  INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16 };
+	  INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16 };
 static const struct wacom_features wacom_features_0x28 =
 	{ "Wacom Intuos5 touch L", 65024, 40640, 2047, 63,
-	  INTUOS5L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16 };
+	  INTUOS5L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16 };
 static const struct wacom_features wacom_features_0x29 =
 	{ "Wacom Intuos5 S", 31496, 19685, 2047, 63,
-	  INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7 };
 static const struct wacom_features wacom_features_0x2A =
 	{ "Wacom Intuos5 M", 44704, 27940, 2047, 63,
-	  INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9 };
 static const struct wacom_features wacom_features_0x314 =
 	{ "Wacom Intuos Pro S", 31496, 19685, 2047, 63,
-	  INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16,
+	  INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7, .touch_max = 16,
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0x315 =
 	{ "Wacom Intuos Pro M", 44704, 27940, 2047, 63,
-	  INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16,
+	  INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16,
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0x317 =
 	{ "Wacom Intuos Pro L", 65024, 40640, 2047, 63,
-	  INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16,
+	  INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 16,
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0xF4 =
 	{ "Wacom Cintiq 24HD", 104080, 65200, 2047, 63,
-	  WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0xF8 =
 	{ "Wacom Cintiq 24HD touch", 104080, 65200, 2047, 63, /* Pen */
-	  WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 16,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
 static const struct wacom_features wacom_features_0xF6 =
@@ -2928,11 +3079,11 @@
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0x32A =
 	{ "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
-	  WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0x32B =
 	{ "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
-	  WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 0,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32C };
 static const struct wacom_features wacom_features_0x32C =
@@ -2940,20 +3091,20 @@
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x32B, .touch_max = 10 };
 static const struct wacom_features wacom_features_0x3F =
 	{ "Wacom Cintiq 21UX", 87200, 65600, 1023, 63,
-	  CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 8 };
 static const struct wacom_features wacom_features_0xC5 =
 	{ "Wacom Cintiq 20WSX", 86680, 54180, 1023, 63,
-	  WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 10 };
 static const struct wacom_features wacom_features_0xC6 =
 	{ "Wacom Cintiq 12WX", 53020, 33440, 1023, 63,
-	  WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+	  WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 10 };
 static const struct wacom_features wacom_features_0x304 =
 	{ "Wacom Cintiq 13HD", 59152, 33448, 1023, 63,
-	  WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0x333 =
 	{ "Wacom Cintiq 13HD touch", 59152, 33448, 2047, 63,
-	  WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x335 };
 static const struct wacom_features wacom_features_0x335 =
@@ -2972,22 +3123,22 @@
 	  DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0xFB =
 	{ "Wacom DTU1031", 21896, 13760, 511, 0,
-	  DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+	  DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
 	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 static const struct wacom_features wacom_features_0x32F =
 	{ "Wacom DTU1031X", 22472, 12728, 511, 0,
-	  DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+	  DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 0,
 	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 static const struct wacom_features wacom_features_0x336 =
 	{ "Wacom DTU1141", 23472, 13203, 1023, 0,
-	  DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+	  DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
 static const struct wacom_features wacom_features_0x57 =
 	{ "Wacom DTK2241", 95640, 54060, 2047, 63,
-	  DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0x59 = /* Pen */
 	{ "Wacom DTH2242", 95640, 54060, 2047, 63,
-	  DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
 static const struct wacom_features wacom_features_0x5D = /* Touch */
@@ -2996,15 +3147,15 @@
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0xCC =
 	{ "Wacom Cintiq 21UX2", 86800, 65200, 2047, 63,
-	  WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0xFA =
 	{ "Wacom Cintiq 22HD", 95440, 53860, 2047, 63,
-	  WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0x5B =
 	{ "Wacom Cintiq 22HDT", 95440, 53860, 2047, 63,
-	  WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 18,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
 static const struct wacom_features wacom_features_0x5E =
@@ -3151,7 +3302,7 @@
 	  TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0x307 =
 	{ "Wacom ISDv5 307", 59152, 33448, 2047, 63,
-	  CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
 static const struct wacom_features wacom_features_0x309 =
@@ -3160,7 +3311,7 @@
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0x30A =
 	{ "Wacom ISDv5 30A", 59152, 33448, 2047, 63,
-	  CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+	  CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9,
 	  WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
 	  .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C };
 static const struct wacom_features wacom_features_0x30C =
@@ -3177,6 +3328,10 @@
 	{ "Wacom Intuos P M", 21600, 13500, 1023, 31,
 	  INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
 	  .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+static const struct wacom_features wacom_features_0x331 =
+	{ "Wacom Express Key Remote", 0, 0, 0, 0,
+	  REMOTE, 0, 0, 18, .check_for_hid_type = true,
+	  .hid_type = HID_TYPE_USBNONE };
 
 static const struct wacom_features wacom_features_HID_ANY_ID =
 	{ "Wacom HID", .type = HID_GENERIC };
@@ -3332,6 +3487,7 @@
 	{ USB_DEVICE_WACOM(0x32B) },
 	{ USB_DEVICE_WACOM(0x32C) },
 	{ USB_DEVICE_WACOM(0x32F) },
+	{ USB_DEVICE_WACOM(0x331) },
 	{ USB_DEVICE_WACOM(0x333) },
 	{ USB_DEVICE_WACOM(0x335) },
 	{ USB_DEVICE_WACOM(0x336) },
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 2978c30..1e270d4 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -16,6 +16,8 @@
 #define WACOM_PKGLEN_MAX	192
 
 #define WACOM_NAME_MAX		64
+#define WACOM_MAX_REMOTES	5
+#define WACOM_STATUS_UNKNOWN	255
 
 /* packet length for individual models */
 #define WACOM_PKGLEN_BBFUN	 9
@@ -65,11 +67,11 @@
 #define WACOM_REPORT_USB		192
 #define WACOM_REPORT_BPAD_PEN		3
 #define WACOM_REPORT_BPAD_TOUCH		16
+#define WACOM_REPORT_DEVICE_LIST	16
+#define WACOM_REPORT_REMOTE		17
 
 /* device quirks */
 #define WACOM_QUIRK_BBTOUCH_LOWRES	0x0001
-#define WACOM_QUIRK_NO_INPUT		0x0002
-#define WACOM_QUIRK_MONITOR		0x0004
 #define WACOM_QUIRK_BATTERY		0x0008
 
 /* device types */
@@ -77,6 +79,7 @@
 #define WACOM_DEVICETYPE_PEN            0x0001
 #define WACOM_DEVICETYPE_TOUCH          0x0002
 #define WACOM_DEVICETYPE_PAD            0x0004
+#define WACOM_DEVICETYPE_WL_MONITOR     0x0008
 
 #define WACOM_VENDORDEFINED_PEN		0xff0d0001
 
@@ -130,6 +133,7 @@
 	WACOM_24HDT,
 	WACOM_27QHDT,
 	BAMBOO_PAD,
+	REMOTE,
 	TABLETPC,   /* add new TPC below */
 	TABLETPCE,
 	TABLETPC2FG,
@@ -149,6 +153,7 @@
 	int type;
 	int x_resolution;
 	int y_resolution;
+	int numbered_buttons;
 	int x_min;
 	int y_min;
 	int device_type;
@@ -193,6 +198,10 @@
 	int width;
 	int height;
 	int id;
+	int cc_index;
+	int cc_value_index;
+	int num_expected;
+	int num_received;
 };
 
 struct wacom_wac {
@@ -204,7 +213,7 @@
 	unsigned char data[WACOM_PKGLEN_MAX];
 	int tool[2];
 	int id[2];
-	__u32 serial[2];
+	__u32 serial[5];
 	bool reporting_data;
 	struct wacom_features features;
 	struct wacom_shared *shared;
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 603ce97..c4dcab0 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -601,6 +601,7 @@
 	u64 aligned_data = 0;
 	int ret;
 	bool signal = false;
+	int num_vecs = ((bufferlen != 0) ? 3 : 1);
 
 
 	/* Setup the descriptor */
@@ -618,7 +619,8 @@
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
+	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
+				  &signal);
 
 	/*
 	 * Signalling the host is conditional on many factors:
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 4506a66..2f9aead 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -347,6 +347,7 @@
 	IDE = 0,
 	SCSI,
 	NIC,
+	ND_NIC,
 	MAX_PERF_CHN,
 };
 
@@ -391,6 +392,7 @@
 	struct vmbus_channel *primary = channel->primary_channel;
 	int next_node;
 	struct cpumask available_mask;
+	struct cpumask *alloced_mask;
 
 	for (i = IDE; i < MAX_PERF_CHN; i++) {
 		if (!memcmp(type_guid->b, hp_devs[i].guid,
@@ -408,7 +410,6 @@
 		 * channel, bind it to cpu 0.
 		 */
 		channel->numa_node = 0;
-		cpumask_set_cpu(0, &channel->alloced_cpus_in_node);
 		channel->target_cpu = 0;
 		channel->target_vp = hv_context.vp_index[0];
 		return;
@@ -433,21 +434,38 @@
 		channel->numa_node = next_node;
 		primary = channel;
 	}
+	alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
 
-	if (cpumask_weight(&primary->alloced_cpus_in_node) ==
+	if (cpumask_weight(alloced_mask) ==
 	    cpumask_weight(cpumask_of_node(primary->numa_node))) {
 		/*
 		 * We have cycled through all the CPUs in the node;
 		 * reset the alloced map.
 		 */
-		cpumask_clear(&primary->alloced_cpus_in_node);
+		cpumask_clear(alloced_mask);
 	}
 
-	cpumask_xor(&available_mask, &primary->alloced_cpus_in_node,
+	cpumask_xor(&available_mask, alloced_mask,
 		    cpumask_of_node(primary->numa_node));
 
-	cur_cpu = cpumask_next(-1, &available_mask);
-	cpumask_set_cpu(cur_cpu, &primary->alloced_cpus_in_node);
+	cur_cpu = -1;
+	while (true) {
+		cur_cpu = cpumask_next(cur_cpu, &available_mask);
+		if (cur_cpu >= nr_cpu_ids) {
+			cur_cpu = -1;
+			cpumask_copy(&available_mask,
+				     cpumask_of_node(primary->numa_node));
+			continue;
+		}
+
+		if (!cpumask_test_cpu(cur_cpu,
+				&primary->alloced_cpus_in_node)) {
+			cpumask_set_cpu(cur_cpu,
+					&primary->alloced_cpus_in_node);
+			cpumask_set_cpu(cur_cpu, alloced_mask);
+			break;
+		}
+	}
 
 	channel->target_cpu = cur_cpu;
 	channel->target_vp = hv_context.vp_index[cur_cpu];
@@ -469,6 +487,10 @@
 {
 	struct vmbus_channel_message_header hdr;
 
+	/* Pre-Win2012R2 hosts don't support reconnect */
+	if (vmbus_proto_version < VERSION_WIN8_1)
+		return;
+
 	init_completion(&vmbus_connection.unload_event);
 	memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
 	hdr.msgtype = CHANNELMSG_UNLOAD;
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index d3943bc..6341be8 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -93,11 +93,14 @@
  */
 static u64 do_hypercall(u64 control, void *input, void *output)
 {
-#ifdef CONFIG_X86_64
-	u64 hv_status = 0;
 	u64 input_address = (input) ? virt_to_phys(input) : 0;
 	u64 output_address = (output) ? virt_to_phys(output) : 0;
 	void *hypercall_page = hv_context.hypercall_page;
+#ifdef CONFIG_X86_64
+	u64 hv_status = 0;
+
+	if (!hypercall_page)
+		return (u64)ULLONG_MAX;
 
 	__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
 	__asm__ __volatile__("call *%3" : "=a" (hv_status) :
@@ -112,13 +115,13 @@
 	u32 control_lo = control & 0xFFFFFFFF;
 	u32 hv_status_hi = 1;
 	u32 hv_status_lo = 1;
-	u64 input_address = (input) ? virt_to_phys(input) : 0;
 	u32 input_address_hi = input_address >> 32;
 	u32 input_address_lo = input_address & 0xFFFFFFFF;
-	u64 output_address = (output) ? virt_to_phys(output) : 0;
 	u32 output_address_hi = output_address >> 32;
 	u32 output_address_lo = output_address & 0xFFFFFFFF;
-	void *hypercall_page = hv_context.hypercall_page;
+
+	if (!hypercall_page)
+		return (u64)ULLONG_MAX;
 
 	__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
 			      "=a"(hv_status_lo) : "d" (control_hi),
@@ -130,6 +133,56 @@
 #endif /* !x86_64 */
 }
 
+#ifdef CONFIG_X86_64
+static cycle_t read_hv_clock_tsc(struct clocksource *arg)
+{
+	cycle_t current_tick;
+	struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
+
+	if (tsc_pg->tsc_sequence != -1) {
+		/*
+		 * Use the tsc page to compute the value.
+		 */
+
+		while (1) {
+			cycle_t tmp;
+			u32 sequence = tsc_pg->tsc_sequence;
+			u64 cur_tsc;
+			u64 scale = tsc_pg->tsc_scale;
+			s64 offset = tsc_pg->tsc_offset;
+
+			rdtscll(cur_tsc);
+			/* current_tick = ((cur_tsc *scale) >> 64) + offset */
+			asm("mulq %3"
+				: "=d" (current_tick), "=a" (tmp)
+				: "a" (cur_tsc), "r" (scale));
+
+			current_tick += offset;
+			if (tsc_pg->tsc_sequence == sequence)
+				return current_tick;
+
+			if (tsc_pg->tsc_sequence != -1)
+				continue;
+			/*
+			 * Fallback using MSR method.
+			 */
+			break;
+		}
+	}
+	rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
+	return current_tick;
+}
+
+static struct clocksource hyperv_cs_tsc = {
+		.name           = "hyperv_clocksource_tsc_page",
+		.rating         = 425,
+		.read           = read_hv_clock_tsc,
+		.mask           = CLOCKSOURCE_MASK(64),
+		.flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+#endif
+
+
 /*
  * hv_init - Main initialization routine.
  *
@@ -139,7 +192,9 @@
 {
 	int max_leaf;
 	union hv_x64_msr_hypercall_contents hypercall_msr;
+	union hv_x64_msr_hypercall_contents tsc_msr;
 	void *virtaddr = NULL;
+	void *va_tsc = NULL;
 
 	memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
 	memset(hv_context.synic_message_page, 0,
@@ -183,6 +238,22 @@
 
 	hv_context.hypercall_page = virtaddr;
 
+#ifdef CONFIG_X86_64
+	if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
+		va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
+		if (!va_tsc)
+			goto cleanup;
+		hv_context.tsc_page = va_tsc;
+
+		rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
+
+		tsc_msr.enable = 1;
+		tsc_msr.guest_physical_address = vmalloc_to_pfn(va_tsc);
+
+		wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
+		clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
+	}
+#endif
 	return 0;
 
 cleanup:
@@ -216,6 +287,21 @@
 		vfree(hv_context.hypercall_page);
 		hv_context.hypercall_page = NULL;
 	}
+
+#ifdef CONFIG_X86_64
+	/*
+	 * Cleanup the TSC page based CS.
+	 */
+	if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
+		clocksource_change_rating(&hyperv_cs_tsc, 10);
+		clocksource_unregister(&hyperv_cs_tsc);
+
+		hypercall_msr.as_uint64 = 0;
+		wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
+		vfree(hv_context.tsc_page);
+		hv_context.tsc_page = NULL;
+	}
+#endif
 }
 
 /*
@@ -271,7 +357,7 @@
 {
 	cycle_t current_tick;
 
-	WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
+	WARN_ON(!clockevent_state_oneshot(evt));
 
 	rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
 	current_tick += delta;
@@ -279,31 +365,24 @@
 	return 0;
 }
 
-static void hv_ce_setmode(enum clock_event_mode mode,
-			  struct clock_event_device *evt)
+static int hv_ce_shutdown(struct clock_event_device *evt)
+{
+	wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
+	wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
+
+	return 0;
+}
+
+static int hv_ce_set_oneshot(struct clock_event_device *evt)
 {
 	union hv_timer_config timer_cfg;
 
-	switch (mode) {
-	case CLOCK_EVT_MODE_PERIODIC:
-		/* unsupported */
-		break;
+	timer_cfg.enable = 1;
+	timer_cfg.auto_enable = 1;
+	timer_cfg.sintx = VMBUS_MESSAGE_SINT;
+	wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
 
-	case CLOCK_EVT_MODE_ONESHOT:
-		timer_cfg.enable = 1;
-		timer_cfg.auto_enable = 1;
-		timer_cfg.sintx = VMBUS_MESSAGE_SINT;
-		wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
-		break;
-
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
-		wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
-		break;
-	case CLOCK_EVT_MODE_RESUME:
-		break;
-	}
+	return 0;
 }
 
 static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
@@ -318,7 +397,8 @@
 	 * references to the hv_vmbus module making it impossible to unload.
 	 */
 
-	dev->set_mode = hv_ce_setmode;
+	dev->set_state_shutdown = hv_ce_shutdown;
+	dev->set_state_oneshot = hv_ce_set_oneshot;
 	dev->set_next_event = hv_ce_set_next_event;
 }
 
@@ -329,6 +409,13 @@
 	size_t ced_size = sizeof(struct clock_event_device);
 	int cpu;
 
+	hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
+					 GFP_ATOMIC);
+	if (hv_context.hv_numa_map == NULL) {
+		pr_err("Unable to allocate NUMA map\n");
+		goto err;
+	}
+
 	for_each_online_cpu(cpu) {
 		hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
 		if (hv_context.event_dpc[cpu] == NULL) {
@@ -342,6 +429,7 @@
 			pr_err("Unable to allocate clock event device\n");
 			goto err;
 		}
+
 		hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
 
 		hv_context.synic_message_page[cpu] =
@@ -390,6 +478,7 @@
 {
 	int cpu;
 
+	kfree(hv_context.hv_numa_map);
 	for_each_online_cpu(cpu)
 		hv_synic_free_cpu(cpu);
 }
@@ -503,8 +592,7 @@
 
 	/* Turn off clockevent device */
 	if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
-		hv_ce_setmode(CLOCK_EVT_MODE_SHUTDOWN,
-			      hv_context.clk_evt[cpu]);
+		hv_ce_shutdown(hv_context.clk_evt[cpu]);
 
 	rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
@@ -530,6 +618,4 @@
 	rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
 	sctrl.enable = 0;
 	wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
-
-	hv_synic_free_cpu(cpu);
 }
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 8a725cd..b853b4b 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -62,11 +62,13 @@
 enum {
 	DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
 	DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
+	DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
 
 	DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
 	DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
+	DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
 
-	DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8
+	DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
 };
 
 
@@ -1296,13 +1298,25 @@
 	if (dm->next_version == 0)
 		goto version_error;
 
-	dm->next_version = 0;
 	memset(&version_req, 0, sizeof(struct dm_version_request));
 	version_req.hdr.type = DM_VERSION_REQUEST;
 	version_req.hdr.size = sizeof(struct dm_version_request);
 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
-	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
-	version_req.is_last_attempt = 1;
+	version_req.version.version = dm->next_version;
+
+	/*
+	 * Set the next version to try in case current version fails.
+	 * Win7 protocol ought to be the last one to try.
+	 */
+	switch (version_req.version.version) {
+	case DYNMEM_PROTOCOL_VERSION_WIN8:
+		dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
+		version_req.is_last_attempt = 0;
+		break;
+	default:
+		dm->next_version = 0;
+		version_req.is_last_attempt = 1;
+	}
 
 	ret = vmbus_sendpacket(dm->dev->channel, &version_req,
 				sizeof(struct dm_version_request),
@@ -1442,7 +1456,7 @@
 
 	dm_device.dev = dev;
 	dm_device.state = DM_INITIALIZING;
-	dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
+	dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
 	init_completion(&dm_device.host_event);
 	init_completion(&dm_device.config_event);
 	INIT_LIST_HEAD(&dm_device.ha_region_list);
@@ -1474,7 +1488,7 @@
 	version_req.hdr.type = DM_VERSION_REQUEST;
 	version_req.hdr.size = sizeof(struct dm_version_request);
 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
-	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
+	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
 	version_req.is_last_attempt = 0;
 
 	ret = vmbus_sendpacket(dev->channel, &version_req,
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index b50dd33..db4b887 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -116,7 +116,7 @@
 
 static void fcopy_send_data(struct work_struct *dummy)
 {
-	struct hv_start_fcopy smsg_out;
+	struct hv_start_fcopy *smsg_out = NULL;
 	int operation = fcopy_transaction.fcopy_msg->operation;
 	struct hv_start_fcopy *smsg_in;
 	void *out_src;
@@ -136,21 +136,24 @@
 	switch (operation) {
 	case START_FILE_COPY:
 		out_len = sizeof(struct hv_start_fcopy);
-		memset(&smsg_out, 0, out_len);
-		smsg_out.hdr.operation = operation;
+		smsg_out = kzalloc(sizeof(*smsg_out), GFP_KERNEL);
+		if (!smsg_out)
+			return;
+
+		smsg_out->hdr.operation = operation;
 		smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
 
 		utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
 				UTF16_LITTLE_ENDIAN,
-				(__u8 *)&smsg_out.file_name, W_MAX_PATH - 1);
+				(__u8 *)&smsg_out->file_name, W_MAX_PATH - 1);
 
 		utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
 				UTF16_LITTLE_ENDIAN,
-				(__u8 *)&smsg_out.path_name, W_MAX_PATH - 1);
+				(__u8 *)&smsg_out->path_name, W_MAX_PATH - 1);
 
-		smsg_out.copy_flags = smsg_in->copy_flags;
-		smsg_out.file_size = smsg_in->file_size;
-		out_src = &smsg_out;
+		smsg_out->copy_flags = smsg_in->copy_flags;
+		smsg_out->file_size = smsg_in->file_size;
+		out_src = smsg_out;
 		break;
 
 	default:
@@ -168,6 +171,8 @@
 			fcopy_transaction.state = HVUTIL_READY;
 		}
 	}
+	kfree(smsg_out);
+
 	return;
 }
 
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index d85798d..74c38a9 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -353,6 +353,9 @@
 		return;
 
 	message = kzalloc(sizeof(*message), GFP_KERNEL);
+	if (!message)
+		return;
+
 	message->kvp_hdr.operation = operation;
 	message->kvp_hdr.pool = pool;
 	in_msg = kvp_transaction.kvp_msg;
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index ea7ba5e..6a9d80a 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -186,7 +186,7 @@
 		return -EINVAL;
 	} else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
 		cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC);
-		if (!msg)
+		if (!cn_msg)
 			return -ENOMEM;
 		cn_msg->id.idx = hvt->cn_id.idx;
 		cn_msg->id.val = hvt->cn_id.val;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index cddc0c9..3d70e36 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -141,7 +141,7 @@
 		struct {
 			u32 target_sint;
 			u32 target_vp;
-			u16 base_flag_bumber;
+			u16 base_flag_number;
 			u16 flag_count;
 			u32 rsvdz;
 		} event_port_info;
@@ -517,6 +517,7 @@
 	u64 guestid;
 
 	void *hypercall_page;
+	void *tsc_page;
 
 	bool synic_initialized;
 
@@ -551,10 +552,23 @@
 	 * Support PV clockevent device.
 	 */
 	struct clock_event_device *clk_evt[NR_CPUS];
+	/*
+	 * To manage allocations in a NUMA node.
+	 * Array indexed by numa node ID.
+	 */
+	struct cpumask *hv_numa_map;
 };
 
 extern struct hv_context hv_context;
 
+struct ms_hyperv_tsc_page {
+	volatile u32 tsc_sequence;
+	u32 reserved1;
+	volatile u64 tsc_scale;
+	volatile s64 tsc_offset;
+	u64 reserved2[509];
+};
+
 struct hv_ring_buffer_debug_info {
 	u32 current_interrupt_mask;
 	u32 current_read_index;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 6361d12..70a1a9a 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -103,10 +103,9 @@
  *    there is room for the producer to send the pending packet.
  */
 
-static bool hv_need_to_signal_on_read(u32 old_rd,
-					 struct hv_ring_buffer_info *rbi)
+static bool hv_need_to_signal_on_read(u32 prev_write_sz,
+				      struct hv_ring_buffer_info *rbi)
 {
-	u32 prev_write_sz;
 	u32 cur_write_sz;
 	u32 r_size;
 	u32 write_loc = rbi->ring_buffer->write_index;
@@ -123,10 +122,6 @@
 	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
 			read_loc - write_loc;
 
-	prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
-			old_rd - write_loc;
-
-
 	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
 		return true;
 
@@ -517,7 +512,6 @@
 	u32 next_read_location = 0;
 	u64 prev_indices = 0;
 	unsigned long flags;
-	u32 old_read;
 
 	if (buflen <= 0)
 		return -EINVAL;
@@ -528,8 +522,6 @@
 				&bytes_avail_toread,
 				&bytes_avail_towrite);
 
-	old_read = bytes_avail_toread;
-
 	/* Make sure there is something to read */
 	if (bytes_avail_toread < buflen) {
 		spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -560,7 +552,7 @@
 
 	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
 
-	*signal = hv_need_to_signal_on_read(old_read, inring_info);
+	*signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
 
 	return 0;
 }
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index cf20400..f19b6f7 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -39,6 +39,8 @@
 #include <asm/mshyperv.h>
 #include <linux/notifier.h>
 #include <linux/ptrace.h>
+#include <linux/screen_info.h>
+#include <linux/kdebug.h>
 #include "hyperv_vmbus.h"
 
 static struct acpi_device  *hv_acpi_dev;
@@ -48,12 +50,18 @@
 static int irq;
 
 
-static int hyperv_panic_event(struct notifier_block *nb,
-			unsigned long event, void *ptr)
+static void hyperv_report_panic(struct pt_regs *regs)
 {
-	struct pt_regs *regs;
+	static bool panic_reported;
 
-	regs = current_pt_regs();
+	/*
+	 * We prefer to report panic on 'die' chain as we have proper
+	 * registers to report, but if we miss it (e.g. on BUG()) we need
+	 * to report it on 'panic'.
+	 */
+	if (panic_reported)
+		return;
+	panic_reported = true;
 
 	wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
 	wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
@@ -65,18 +73,37 @@
 	 * Let Hyper-V know there is crash data available
 	 */
 	wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
+}
+
+static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
+			      void *args)
+{
+	struct pt_regs *regs;
+
+	regs = current_pt_regs();
+
+	hyperv_report_panic(regs);
 	return NOTIFY_DONE;
 }
 
+static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
+			    void *args)
+{
+	struct die_args *die = (struct die_args *)args;
+	struct pt_regs *regs = die->regs;
+
+	hyperv_report_panic(regs);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block hyperv_die_block = {
+	.notifier_call = hyperv_die_event,
+};
 static struct notifier_block hyperv_panic_block = {
 	.notifier_call = hyperv_panic_event,
 };
 
-struct resource hyperv_mmio = {
-	.name  = "hyperv mmio",
-	.flags = IORESOURCE_MEM,
-};
-EXPORT_SYMBOL_GPL(hyperv_mmio);
+struct resource *hyperv_mmio;
 
 static int vmbus_exists(void)
 {
@@ -414,6 +441,43 @@
 }
 static DEVICE_ATTR_RO(in_write_bytes_avail);
 
+static ssize_t channel_vp_mapping_show(struct device *dev,
+				       struct device_attribute *dev_attr,
+				       char *buf)
+{
+	struct hv_device *hv_dev = device_to_hv_device(dev);
+	struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
+	unsigned long flags;
+	int buf_size = PAGE_SIZE, n_written, tot_written;
+	struct list_head *cur;
+
+	if (!channel)
+		return -ENODEV;
+
+	tot_written = snprintf(buf, buf_size, "%u:%u\n",
+		channel->offermsg.child_relid, channel->target_cpu);
+
+	spin_lock_irqsave(&channel->lock, flags);
+
+	list_for_each(cur, &channel->sc_list) {
+		if (tot_written >= buf_size - 1)
+			break;
+
+		cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
+		n_written = scnprintf(buf + tot_written,
+				     buf_size - tot_written,
+				     "%u:%u\n",
+				     cur_sc->offermsg.child_relid,
+				     cur_sc->target_cpu);
+		tot_written += n_written;
+	}
+
+	spin_unlock_irqrestore(&channel->lock, flags);
+
+	return tot_written;
+}
+static DEVICE_ATTR_RO(channel_vp_mapping);
+
 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
 static struct attribute *vmbus_attrs[] = {
 	&dev_attr_id.attr,
@@ -438,6 +502,7 @@
 	&dev_attr_in_write_index.attr,
 	&dev_attr_in_read_bytes_avail.attr,
 	&dev_attr_in_write_bytes_avail.attr,
+	&dev_attr_channel_vp_mapping.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(vmbus);
@@ -763,38 +828,6 @@
 	}
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-static int hyperv_cpu_disable(void)
-{
-	return -ENOSYS;
-}
-
-static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
-{
-	static void *previous_cpu_disable;
-
-	/*
-	 * Offlining a CPU when running on newer hypervisors (WS2012R2, Win8,
-	 * ...) is not supported at this moment as channel interrupts are
-	 * distributed across all of them.
-	 */
-
-	if ((vmbus_proto_version == VERSION_WS2008) ||
-	    (vmbus_proto_version == VERSION_WIN7))
-		return;
-
-	if (vmbus_loaded) {
-		previous_cpu_disable = smp_ops.cpu_disable;
-		smp_ops.cpu_disable = hyperv_cpu_disable;
-		pr_notice("CPU offlining is not supported by hypervisor\n");
-	} else if (previous_cpu_disable)
-		smp_ops.cpu_disable = previous_cpu_disable;
-}
-#else
-static void hv_cpu_hotplug_quirk(bool vmbus_loaded)
-{
-}
-#endif
 
 /*
  * vmbus_bus_init -Main vmbus driver initialization routine.
@@ -836,12 +869,14 @@
 	if (ret)
 		goto err_alloc;
 
-	hv_cpu_hotplug_quirk(true);
+	if (vmbus_proto_version > VERSION_WIN7)
+		cpu_hotplug_disable();
 
 	/*
 	 * Only register if the crash MSRs are available
 	 */
-	if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+		register_die_notifier(&hyperv_die_block);
 		atomic_notifier_chain_register(&panic_notifier_list,
 					       &hyperv_panic_block);
 	}
@@ -863,8 +898,8 @@
 }
 
 /**
- * __vmbus_child_driver_register - Register a vmbus's driver
- * @drv: Pointer to driver structure you want to register
+ * __vmbus_child_driver_register() - Register a vmbus's driver
+ * @hv_driver: Pointer to driver structure you want to register
  * @owner: owner module of the drv
  * @mod_name: module name string
  *
@@ -896,7 +931,8 @@
 
 /**
  * vmbus_driver_unregister() - Unregister a vmbus's driver
- * @drv: Pointer to driver structure you want to un-register
+ * @hv_driver: Pointer to driver structure you want to
+ *             un-register
  *
  * Un-register the given driver that was previous registered with a call to
  * vmbus_driver_register()
@@ -982,30 +1018,184 @@
 
 
 /*
- * VMBUS is an acpi enumerated device. Get the the information we
+ * VMBUS is an acpi enumerated device. Get the information we
  * need from DSDT.
  */
-
+#define VTPM_BASE_ADDRESS 0xfed40000
 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
 {
+	resource_size_t start = 0;
+	resource_size_t end = 0;
+	struct resource *new_res;
+	struct resource **old_res = &hyperv_mmio;
+	struct resource **prev_res = NULL;
+
 	switch (res->type) {
 	case ACPI_RESOURCE_TYPE_IRQ:
 		irq = res->data.irq.interrupts[0];
+		return AE_OK;
+
+	/*
+	 * "Address" descriptors are for bus windows. Ignore
+	 * "memory" descriptors, which are for registers on
+	 * devices.
+	 */
+	case ACPI_RESOURCE_TYPE_ADDRESS32:
+		start = res->data.address32.address.minimum;
+		end = res->data.address32.address.maximum;
 		break;
 
 	case ACPI_RESOURCE_TYPE_ADDRESS64:
-		hyperv_mmio.start = res->data.address64.address.minimum;
-		hyperv_mmio.end = res->data.address64.address.maximum;
+		start = res->data.address64.address.minimum;
+		end = res->data.address64.address.maximum;
 		break;
+
+	default:
+		/* Unused resource type */
+		return AE_OK;
+
 	}
+	/*
+	 * Ignore ranges that are below 1MB, as they're not
+	 * necessary or useful here.
+	 */
+	if (end < 0x100000)
+		return AE_OK;
+
+	new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
+	if (!new_res)
+		return AE_NO_MEMORY;
+
+	/* If this range overlaps the virtual TPM, truncate it. */
+	if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
+		end = VTPM_BASE_ADDRESS;
+
+	new_res->name = "hyperv mmio";
+	new_res->flags = IORESOURCE_MEM;
+	new_res->start = start;
+	new_res->end = end;
+
+	do {
+		if (!*old_res) {
+			*old_res = new_res;
+			break;
+		}
+
+		if ((*old_res)->end < new_res->start) {
+			new_res->sibling = *old_res;
+			if (prev_res)
+				(*prev_res)->sibling = new_res;
+			*old_res = new_res;
+			break;
+		}
+
+		prev_res = old_res;
+		old_res = &(*old_res)->sibling;
+
+	} while (1);
 
 	return AE_OK;
 }
 
+static int vmbus_acpi_remove(struct acpi_device *device)
+{
+	struct resource *cur_res;
+	struct resource *next_res;
+
+	if (hyperv_mmio) {
+		for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
+			next_res = cur_res->sibling;
+			kfree(cur_res);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
+ * @new:		If successful, supplied a pointer to the
+ *			allocated MMIO space.
+ * @device_obj:		Identifies the caller
+ * @min:		Minimum guest physical address of the
+ *			allocation
+ * @max:		Maximum guest physical address
+ * @size:		Size of the range to be allocated
+ * @align:		Alignment of the range to be allocated
+ * @fb_overlap_ok:	Whether this allocation can be allowed
+ *			to overlap the video frame buffer.
+ *
+ * This function walks the resources granted to VMBus by the
+ * _CRS object in the ACPI namespace underneath the parent
+ * "bridge" whether that's a root PCI bus in the Generation 1
+ * case or a Module Device in the Generation 2 case.  It then
+ * attempts to allocate from the global MMIO pool in a way that
+ * matches the constraints supplied in these parameters and by
+ * that _CRS.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
+			resource_size_t min, resource_size_t max,
+			resource_size_t size, resource_size_t align,
+			bool fb_overlap_ok)
+{
+	struct resource *iter;
+	resource_size_t range_min, range_max, start, local_min, local_max;
+	const char *dev_n = dev_name(&device_obj->device);
+	u32 fb_end = screen_info.lfb_base + (screen_info.lfb_size << 1);
+	int i;
+
+	for (iter = hyperv_mmio; iter; iter = iter->sibling) {
+		if ((iter->start >= max) || (iter->end <= min))
+			continue;
+
+		range_min = iter->start;
+		range_max = iter->end;
+
+		/* If this range overlaps the frame buffer, split it into
+		   two tries. */
+		for (i = 0; i < 2; i++) {
+			local_min = range_min;
+			local_max = range_max;
+			if (fb_overlap_ok || (range_min >= fb_end) ||
+			    (range_max <= screen_info.lfb_base)) {
+				i++;
+			} else {
+				if ((range_min <= screen_info.lfb_base) &&
+				    (range_max >= screen_info.lfb_base)) {
+					/*
+					 * The frame buffer is in this window,
+					 * so trim this into the part that
+					 * preceeds the frame buffer.
+					 */
+					local_max = screen_info.lfb_base - 1;
+					range_min = fb_end;
+				} else {
+					range_min = fb_end;
+					continue;
+				}
+			}
+
+			start = (local_min + align - 1) & ~(align - 1);
+			for (; start + size - 1 <= local_max; start += align) {
+				*new = request_mem_region_exclusive(start, size,
+								    dev_n);
+				if (*new)
+					return 0;
+			}
+		}
+	}
+
+	return -ENXIO;
+}
+EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
+
 static int vmbus_acpi_add(struct acpi_device *device)
 {
 	acpi_status result;
 	int ret_val = -ENODEV;
+	struct acpi_device *ancestor;
 
 	hv_acpi_dev = device;
 
@@ -1015,35 +1205,27 @@
 	if (ACPI_FAILURE(result))
 		goto acpi_walk_err;
 	/*
-	 * The parent of the vmbus acpi device (Gen2 firmware) is the VMOD that
-	 * has the mmio ranges. Get that.
+	 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
+	 * firmware) is the VMOD that has the mmio ranges. Get that.
 	 */
-	if (device->parent) {
-		result = acpi_walk_resources(device->parent->handle,
-					METHOD_NAME__CRS,
-					vmbus_walk_resources, NULL);
+	for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
+		result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
+					     vmbus_walk_resources, NULL);
 
 		if (ACPI_FAILURE(result))
-			goto acpi_walk_err;
-		if (hyperv_mmio.start && hyperv_mmio.end)
-			request_resource(&iomem_resource, &hyperv_mmio);
+			continue;
+		if (hyperv_mmio)
+			break;
 	}
 	ret_val = 0;
 
 acpi_walk_err:
 	complete(&probe_event);
+	if (ret_val)
+		vmbus_acpi_remove(device);
 	return ret_val;
 }
 
-static int vmbus_acpi_remove(struct acpi_device *device)
-{
-	int ret = 0;
-
-	if (hyperv_mmio.start && hyperv_mmio.end)
-		ret = release_resource(&hyperv_mmio);
-	return ret;
-}
-
 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
 	{"VMBUS", 0},
 	{"VMBus", 0},
@@ -1060,6 +1242,29 @@
 	},
 };
 
+static void hv_kexec_handler(void)
+{
+	int cpu;
+
+	hv_synic_clockevents_cleanup();
+	vmbus_initiate_unload();
+	for_each_online_cpu(cpu)
+		smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
+	hv_cleanup();
+};
+
+static void hv_crash_handler(struct pt_regs *regs)
+{
+	vmbus_initiate_unload();
+	/*
+	 * In crash handler we can't schedule synic cleanup for all CPUs,
+	 * doing the cleanup for current CPU only. This should be sufficient
+	 * for kdump.
+	 */
+	hv_synic_cleanup(NULL);
+	hv_cleanup();
+};
+
 static int __init hv_acpi_init(void)
 {
 	int ret, t;
@@ -1092,6 +1297,9 @@
 	if (ret)
 		goto cleanup;
 
+	hv_setup_kexec_handler(hv_kexec_handler);
+	hv_setup_crash_handler(hv_crash_handler);
+
 	return 0;
 
 cleanup:
@@ -1104,13 +1312,16 @@
 {
 	int cpu;
 
+	hv_remove_kexec_handler();
+	hv_remove_crash_handler();
 	vmbus_connection.conn_state = DISCONNECTED;
 	hv_synic_clockevents_cleanup();
 	vmbus_disconnect();
 	hv_remove_vmbus_irq();
 	tasklet_kill(&msg_dpc);
 	vmbus_free_channels();
-	if (ms_hyperv.features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+	if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
+		unregister_die_notifier(&hyperv_die_block);
 		atomic_notifier_chain_unregister(&panic_notifier_list,
 						 &hyperv_panic_block);
 	}
@@ -1120,8 +1331,10 @@
 		tasklet_kill(hv_context.event_dpc[cpu]);
 		smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
 	}
+	hv_synic_free();
 	acpi_bus_unregister_driver(&vmbus_acpi_driver);
-	hv_cpu_hotplug_quirk(false);
+	if (vmbus_proto_version > VERSION_WIN7)
+		cpu_hotplug_enable();
 }
 
 
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7c65b73..500b262 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -609,8 +609,8 @@
 	depends on !PPC
 	select HWMON_VID
 	help
-	  If you say yes here you get support for ITE IT8705F, IT8712F,
-	  IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E,
+	  If you say yes here you get support for ITE IT8705F, IT8712F, IT8716F,
+	  IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8732F, IT8758E,
 	  IT8771E, IT8772E, IT8781F, IT8782F, IT8783E/F, IT8786E, IT8790E,
 	  IT8603E, IT8620E, and IT8623E sensor chips, and the SiS950 clone.
 
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 2e5c6f4..cb28e4b 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -51,6 +51,7 @@
 #define SIO_F71808A_ID		0x1001	/* Chipset ID */
 #define SIO_F71858_ID		0x0507  /* Chipset ID */
 #define SIO_F71862_ID		0x0601	/* Chipset ID */
+#define SIO_F71868_ID		0x1106	/* Chipset ID */
 #define SIO_F71869_ID		0x0814	/* Chipset ID */
 #define SIO_F71869A_ID		0x1007	/* Chipset ID */
 #define SIO_F71882_ID		0x0541	/* Chipset ID */
@@ -58,7 +59,9 @@
 #define SIO_F71889E_ID		0x0909	/* Chipset ID */
 #define SIO_F71889A_ID		0x1005	/* Chipset ID */
 #define SIO_F8000_ID		0x0581	/* Chipset ID */
+#define SIO_F81768D_ID		0x1210	/* Chipset ID */
 #define SIO_F81865_ID		0x0704	/* Chipset ID */
+#define SIO_F81866_ID		0x1010	/* Chipset ID */
 
 #define REGION_LENGTH		8
 #define ADDR_REG_OFFSET		5
@@ -69,6 +72,10 @@
 #define F71882FG_REG_IN(nr)		(0x20  + (nr))
 #define F71882FG_REG_IN1_HIGH		0x32 /* f7188x only */
 
+#define F81866_REG_IN_STATUS		0x16 /* F81866 only */
+#define F81866_REG_IN_BEEP			0x17 /* F81866 only */
+#define F81866_REG_IN1_HIGH		0x3a /* F81866 only */
+
 #define F71882FG_REG_FAN(nr)		(0xA0 + (16 * (nr)))
 #define F71882FG_REG_FAN_TARGET(nr)	(0xA2 + (16 * (nr)))
 #define F71882FG_REG_FAN_FULL_SPEED(nr)	(0xA4 + (16 * (nr)))
@@ -101,7 +108,7 @@
 
 #define	F71882FG_REG_START		0x01
 
-#define F71882FG_MAX_INS		9
+#define F71882FG_MAX_INS		11
 
 #define FAN_MIN_DETECT			366 /* Lowest detectable fanspeed */
 
@@ -109,14 +116,16 @@
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
-enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71869a, f71882fg,
-	     f71889fg, f71889ed, f71889a, f8000, f81865f };
+enum chips { f71808e, f71808a, f71858fg, f71862fg, f71868a, f71869, f71869a,
+	f71882fg, f71889fg, f71889ed, f71889a, f8000, f81768d, f81865f,
+	f81866a};
 
 static const char *const f71882fg_names[] = {
 	"f71808e",
 	"f71808a",
 	"f71858fg",
 	"f71862fg",
+	"f71868a",
 	"f71869", /* Both f71869f and f71869e, reg. compatible and same id */
 	"f71869a",
 	"f71882fg",
@@ -124,22 +133,27 @@
 	"f71889ed",
 	"f71889a",
 	"f8000",
+	"f81768d",
 	"f81865f",
+	"f81866a",
 };
 
 static const char f71882fg_has_in[][F71882FG_MAX_INS] = {
-	[f71808e]	= { 1, 1, 1, 1, 1, 1, 0, 1, 1 },
-	[f71808a]	= { 1, 1, 1, 1, 0, 0, 0, 1, 1 },
-	[f71858fg]	= { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
-	[f71862fg]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
-	[f71869]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
-	[f71869a]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
-	[f71882fg]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
-	[f71889fg]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
-	[f71889ed]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
-	[f71889a]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
-	[f8000]		= { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
-	[f81865f]	= { 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+	[f71808e]	= { 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0 },
+	[f71808a]	= { 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0 },
+	[f71858fg]	= { 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
+	[f71862fg]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+	[f71868a]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0 },
+	[f71869]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+	[f71869a]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+	[f71882fg]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+	[f71889fg]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+	[f71889ed]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+	[f71889a]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0 },
+	[f8000]		= { 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
+	[f81768d]	= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+	[f81865f]	= { 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 },
+	[f81866a]	= { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0 },
 };
 
 static const char f71882fg_has_in1_alarm[] = {
@@ -147,6 +161,7 @@
 	[f71808a]	= 0,
 	[f71858fg]	= 0,
 	[f71862fg]	= 0,
+	[f71868a]	= 0,
 	[f71869]	= 0,
 	[f71869a]	= 0,
 	[f71882fg]	= 1,
@@ -154,7 +169,9 @@
 	[f71889ed]	= 1,
 	[f71889a]	= 1,
 	[f8000]		= 0,
+	[f81768d]	= 1,
 	[f81865f]	= 1,
+	[f81866a]	= 1,
 };
 
 static const char f71882fg_fan_has_beep[] = {
@@ -162,6 +179,7 @@
 	[f71808a]	= 0,
 	[f71858fg]	= 0,
 	[f71862fg]	= 1,
+	[f71868a]	= 1,
 	[f71869]	= 1,
 	[f71869a]	= 1,
 	[f71882fg]	= 1,
@@ -169,7 +187,9 @@
 	[f71889ed]	= 1,
 	[f71889a]	= 1,
 	[f8000]		= 0,
+	[f81768d]	= 1,
 	[f81865f]	= 1,
+	[f81866a]	= 1,
 };
 
 static const char f71882fg_nr_fans[] = {
@@ -177,6 +197,7 @@
 	[f71808a]	= 2, /* +1 fan which is monitor + simple pwm only */
 	[f71858fg]	= 3,
 	[f71862fg]	= 3,
+	[f71868a]	= 3,
 	[f71869]	= 3,
 	[f71869a]	= 3,
 	[f71882fg]	= 4,
@@ -184,7 +205,9 @@
 	[f71889ed]	= 3,
 	[f71889a]	= 3,
 	[f8000]		= 3, /* +1 fan which is monitor only */
+	[f81768d]	= 3,
 	[f81865f]	= 2,
+	[f81866a]	= 3,
 };
 
 static const char f71882fg_temp_has_beep[] = {
@@ -192,6 +215,7 @@
 	[f71808a]	= 1,
 	[f71858fg]	= 0,
 	[f71862fg]	= 1,
+	[f71868a]	= 1,
 	[f71869]	= 1,
 	[f71869a]	= 1,
 	[f71882fg]	= 1,
@@ -199,7 +223,9 @@
 	[f71889ed]	= 1,
 	[f71889a]	= 1,
 	[f8000]		= 0,
+	[f81768d]	= 1,
 	[f81865f]	= 1,
+	[f81866a]	= 1,
 };
 
 static const char f71882fg_nr_temps[] = {
@@ -207,6 +233,7 @@
 	[f71808a]	= 2,
 	[f71858fg]	= 3,
 	[f71862fg]	= 3,
+	[f71868a]	= 3,
 	[f71869]	= 3,
 	[f71869a]	= 3,
 	[f71882fg]	= 3,
@@ -214,7 +241,9 @@
 	[f71889ed]	= 3,
 	[f71889a]	= 3,
 	[f8000]		= 3,
+	[f81768d]	= 3,
 	[f81865f]	= 2,
+	[f81866a]	= 3,
 };
 
 static struct platform_device *f71882fg_pdev;
@@ -490,6 +519,23 @@
 		store_temp_beep, 0, 7),
 } };
 
+static struct sensor_device_attribute_2 f81866_temp_beep_attr[3][2] = { {
+	SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+		store_temp_beep, 0, 0),
+	SENSOR_ATTR_2(temp1_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+		store_temp_beep, 0, 4),
+}, {
+	SENSOR_ATTR_2(temp2_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+		store_temp_beep, 0, 1),
+	SENSOR_ATTR_2(temp2_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+		store_temp_beep, 0, 5),
+}, {
+	SENSOR_ATTR_2(temp3_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+		store_temp_beep, 0, 2),
+	SENSOR_ATTR_2(temp3_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+		store_temp_beep, 0, 6),
+} };
+
 /*
  * Temp attr for the f8000
  * Note on the f8000 temp_ovt (crit) is used as max, and temp_high (max)
@@ -531,6 +577,8 @@
 	SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
 	SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
 	SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
+	SENSOR_ATTR_2(in9_input, S_IRUGO, show_in, NULL, 0, 9),
+	SENSOR_ATTR_2(in10_input, S_IRUGO, show_in, NULL, 0, 10),
 };
 
 /* For models with in1 alarm capability */
@@ -1170,10 +1218,21 @@
 	if (time_after(jiffies, data->last_limits + 60 * HZ) ||
 			!data->valid) {
 		if (f71882fg_has_in1_alarm[data->type]) {
-			data->in1_max =
-				f71882fg_read8(data, F71882FG_REG_IN1_HIGH);
-			data->in_beep =
-				f71882fg_read8(data, F71882FG_REG_IN_BEEP);
+			if (data->type == f81866a) {
+				data->in1_max =
+					f71882fg_read8(data,
+						       F81866_REG_IN1_HIGH);
+				data->in_beep =
+					f71882fg_read8(data,
+						       F81866_REG_IN_BEEP);
+			} else {
+				data->in1_max =
+					f71882fg_read8(data,
+						       F71882FG_REG_IN1_HIGH);
+				data->in_beep =
+					f71882fg_read8(data,
+						       F71882FG_REG_IN_BEEP);
+			}
 		}
 
 		/* Get High & boundary temps*/
@@ -1297,9 +1356,16 @@
 			data->fan[3] = f71882fg_read16(data,
 						F71882FG_REG_FAN(3));
 
-		if (f71882fg_has_in1_alarm[data->type])
-			data->in_status = f71882fg_read8(data,
+		if (f71882fg_has_in1_alarm[data->type]) {
+			if (data->type == f81866a)
+				data->in_status = f71882fg_read8(data,
+						F81866_REG_IN_STATUS);
+
+			else
+				data->in_status = f71882fg_read8(data,
 						F71882FG_REG_IN_STATUS);
+		}
+
 		for (nr = 0; nr < F71882FG_MAX_INS; nr++)
 			if (f71882fg_has_in[data->type][nr])
 				data->in[nr] = f71882fg_read8(data,
@@ -1440,7 +1506,10 @@
 	val = clamp_val(val, 0, 255);
 
 	mutex_lock(&data->update_lock);
-	f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val);
+	if (data->type == f81866a)
+		f71882fg_write8(data, F81866_REG_IN1_HIGH, val);
+	else
+		f71882fg_write8(data, F71882FG_REG_IN1_HIGH, val);
 	data->in1_max = val;
 	mutex_unlock(&data->update_lock);
 
@@ -1471,13 +1540,20 @@
 		return err;
 
 	mutex_lock(&data->update_lock);
-	data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
+	if (data->type == f81866a)
+		data->in_beep = f71882fg_read8(data, F81866_REG_IN_BEEP);
+	else
+		data->in_beep = f71882fg_read8(data, F71882FG_REG_IN_BEEP);
+
 	if (val)
 		data->in_beep |= 1 << nr;
 	else
 		data->in_beep &= ~(1 << nr);
 
-	f71882fg_write8(data, F71882FG_REG_IN_BEEP, data->in_beep);
+	if (data->type == f81866a)
+		f71882fg_write8(data, F81866_REG_IN_BEEP, data->in_beep);
+	else
+		f71882fg_write8(data, F71882FG_REG_IN_BEEP, data->in_beep);
 	mutex_unlock(&data->update_lock);
 
 	return count;
@@ -2270,6 +2346,7 @@
 	int nr_fans = f71882fg_nr_fans[sio_data->type];
 	int nr_temps = f71882fg_nr_temps[sio_data->type];
 	int err, i;
+	int size;
 	u8 start_reg, reg;
 
 	data = devm_kzalloc(&pdev->dev, sizeof(struct f71882fg_data),
@@ -2280,7 +2357,8 @@
 	data->addr = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
 	data->type = sio_data->type;
 	data->temp_start =
-	    (data->type == f71858fg || data->type == f8000) ? 0 : 1;
+	    (data->type == f71858fg || data->type == f8000 ||
+		data->type == f81866a) ? 0 : 1;
 	mutex_init(&data->update_lock);
 	platform_set_drvdata(pdev, data);
 
@@ -2322,6 +2400,11 @@
 					f8000_temp_attr,
 					ARRAY_SIZE(f8000_temp_attr));
 			break;
+		case f81866a:
+			err = f71882fg_create_sysfs_files(pdev,
+					f71858fg_temp_attr,
+					ARRAY_SIZE(f71858fg_temp_attr));
+			break;
 		default:
 			err = f71882fg_create_sysfs_files(pdev,
 				&fxxxx_temp_attr[0][0],
@@ -2331,10 +2414,18 @@
 			goto exit_unregister_sysfs;
 
 		if (f71882fg_temp_has_beep[data->type]) {
-			err = f71882fg_create_sysfs_files(pdev,
-					&fxxxx_temp_beep_attr[0][0],
-					ARRAY_SIZE(fxxxx_temp_beep_attr[0])
-						* nr_temps);
+			if (data->type == f81866a) {
+				size = ARRAY_SIZE(f81866_temp_beep_attr[0]);
+				err = f71882fg_create_sysfs_files(pdev,
+						&f81866_temp_beep_attr[0][0],
+						size * nr_temps);
+
+			} else {
+				size = ARRAY_SIZE(fxxxx_temp_beep_attr[0]);
+				err = f71882fg_create_sysfs_files(pdev,
+						&fxxxx_temp_beep_attr[0][0],
+						size * nr_temps);
+			}
 			if (err)
 				goto exit_unregister_sysfs;
 		}
@@ -2451,15 +2542,27 @@
 					f8000_temp_attr,
 					ARRAY_SIZE(f8000_temp_attr));
 			break;
+		case f81866a:
+			f71882fg_remove_sysfs_files(pdev,
+					f71858fg_temp_attr,
+					ARRAY_SIZE(f71858fg_temp_attr));
+			break;
 		default:
 			f71882fg_remove_sysfs_files(pdev,
 				&fxxxx_temp_attr[0][0],
 				ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps);
 		}
 		if (f71882fg_temp_has_beep[data->type]) {
-			f71882fg_remove_sysfs_files(pdev,
-			       &fxxxx_temp_beep_attr[0][0],
-			       ARRAY_SIZE(fxxxx_temp_beep_attr[0]) * nr_temps);
+			if (data->type == f81866a)
+				f71882fg_remove_sysfs_files(pdev,
+					&f81866_temp_beep_attr[0][0],
+					ARRAY_SIZE(f81866_temp_beep_attr[0])
+						* nr_temps);
+			else
+				f71882fg_remove_sysfs_files(pdev,
+					&fxxxx_temp_beep_attr[0][0],
+					ARRAY_SIZE(fxxxx_temp_beep_attr[0])
+						* nr_temps);
 		}
 
 		for (i = 0; i < F71882FG_MAX_INS; i++) {
@@ -2551,6 +2654,9 @@
 	case SIO_F71862_ID:
 		sio_data->type = f71862fg;
 		break;
+	case SIO_F71868_ID:
+		sio_data->type = f71868a;
+		break;
 	case SIO_F71869_ID:
 		sio_data->type = f71869;
 		break;
@@ -2572,9 +2678,15 @@
 	case SIO_F8000_ID:
 		sio_data->type = f8000;
 		break;
+	case SIO_F81768D_ID:
+		sio_data->type = f81768d;
+		break;
 	case SIO_F81865_ID:
 		sio_data->type = f81865f;
 		break;
+	case SIO_F81866_ID:
+		sio_data->type = f81866a;
+		break;
 	default:
 		pr_info("Unsupported Fintek device: %04x\n",
 			(unsigned int)devid);
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 3057dfc..e80ee23 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -46,6 +46,7 @@
 	unsigned int tdp_to_watts;
 	unsigned int base_tdp;
 	unsigned int processor_pwr_watts;
+	unsigned int cpu_pwr_sample_ratio;
 };
 
 static ssize_t show_power(struct device *dev,
@@ -59,8 +60,19 @@
 
 	pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
 				  REG_TDP_RUNNING_AVERAGE, &val);
-	running_avg_capture = (val >> 4) & 0x3fffff;
-	running_avg_capture = sign_extend32(running_avg_capture, 21);
+
+	/*
+	 * On Carrizo and later platforms, TdpRunAvgAccCap bit field
+	 * is extended to 4:31 from 4:25.
+	 */
+	if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60) {
+		running_avg_capture = val >> 4;
+		running_avg_capture = sign_extend32(running_avg_capture, 27);
+	} else {
+		running_avg_capture = (val >> 4) & 0x3fffff;
+		running_avg_capture = sign_extend32(running_avg_capture, 21);
+	}
+
 	running_avg_range = (val & 0xf) + 1;
 
 	pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
@@ -117,7 +129,7 @@
 };
 __ATTRIBUTE_GROUPS(fam15h_power);
 
-static bool fam15h_power_is_internal_node0(struct pci_dev *f4)
+static bool should_load_on_this_node(struct pci_dev *f4)
 {
 	u32 val;
 
@@ -177,7 +189,7 @@
 static void fam15h_power_init_data(struct pci_dev *f4,
 					     struct fam15h_power_data *data)
 {
-	u32 val;
+	u32 val, eax, ebx, ecx, edx;
 	u64 tmp;
 
 	pci_read_config_dword(f4, REG_PROCESSOR_TDP, &val);
@@ -198,6 +210,19 @@
 
 	/* convert to microWatt */
 	data->processor_pwr_watts = (tmp * 15625) >> 10;
+
+	cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
+
+	/* CPUID Fn8000_0007:EDX[12] indicates to support accumulated power */
+	if (!(edx & BIT(12)))
+		return;
+
+	/*
+	 * determine the ratio of the compute unit power accumulator
+	 * sample period to the PTSC counter period by executing CPUID
+	 * Fn8000_0007:ECX
+	 */
+	data->cpu_pwr_sample_ratio = ecx;
 }
 
 static int fam15h_power_probe(struct pci_dev *pdev,
@@ -214,7 +239,7 @@
 	 */
 	tweak_runavg_range(pdev);
 
-	if (!fam15h_power_is_internal_node0(pdev))
+	if (!should_load_on_this_node(pdev))
 		return -ENODEV;
 
 	data = devm_kzalloc(dev, sizeof(struct fam15h_power_data), GFP_KERNEL);
@@ -233,6 +258,7 @@
 static const struct pci_device_id fam15h_power_id_table[] = {
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
+	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
 	{}
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 85d106f..b96a2a9 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -1113,7 +1113,6 @@
 static struct i2c_driver g762_driver = {
 	.driver = {
 		.name = DRVNAME,
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(g762_dt_match),
 	},
 	.probe	  = g762_probe,
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index d0ee556..1896e26 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -21,6 +21,7 @@
  *            IT8721F  Super I/O chip w/LPC interface
  *            IT8726F  Super I/O chip w/LPC interface
  *            IT8728F  Super I/O chip w/LPC interface
+ *            IT8732F  Super I/O chip w/LPC interface
  *            IT8758E  Super I/O chip w/LPC interface
  *            IT8771E  Super I/O chip w/LPC interface
  *            IT8772E  Super I/O chip w/LPC interface
@@ -69,8 +70,9 @@
 
 #define DRVNAME "it87"
 
-enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8771,
-	     it8772, it8781, it8782, it8783, it8786, it8790, it8603, it8620 };
+enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8732,
+	     it8771, it8772, it8781, it8782, it8783, it8786, it8790, it8603,
+	     it8620 };
 
 static unsigned short force_id;
 module_param(force_id, ushort, 0);
@@ -148,6 +150,7 @@
 #define IT8721F_DEVID 0x8721
 #define IT8726F_DEVID 0x8726
 #define IT8728F_DEVID 0x8728
+#define IT8732F_DEVID 0x8732
 #define IT8771E_DEVID 0x8771
 #define IT8772E_DEVID 0x8772
 #define IT8781F_DEVID 0x8781
@@ -265,6 +268,7 @@
 #define FEAT_VID		(1 << 9)	/* Set if chip supports VID */
 #define FEAT_IN7_INTERNAL	(1 << 10)	/* Set if in7 is internal */
 #define FEAT_SIX_FANS		(1 << 11)	/* Supports six fans */
+#define FEAT_10_9MV_ADC		(1 << 12)
 
 static const struct it87_devices it87_devices[] = {
 	[it87] = {
@@ -315,6 +319,15 @@
 		  | FEAT_IN7_INTERNAL,
 		.peci_mask = 0x07,
 	},
+	[it8732] = {
+		.name = "it8732",
+		.suffix = "F",
+		.features = FEAT_NEWER_AUTOPWM | FEAT_16BIT_FANS
+		  | FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
+		  | FEAT_10_9MV_ADC | FEAT_IN7_INTERNAL,
+		.peci_mask = 0x07,
+		.old_peci_mask = 0x02,	/* Actually reports PCH */
+	},
 	[it8771] = {
 		.name = "it8771",
 		.suffix = "E",
@@ -391,6 +404,7 @@
 
 #define has_16bit_fans(data)	((data)->features & FEAT_16BIT_FANS)
 #define has_12mv_adc(data)	((data)->features & FEAT_12MV_ADC)
+#define has_10_9mv_adc(data)	((data)->features & FEAT_10_9MV_ADC)
 #define has_newer_autopwm(data)	((data)->features & FEAT_NEWER_AUTOPWM)
 #define has_old_autopwm(data)	((data)->features & FEAT_OLD_AUTOPWM)
 #define has_temp_offset(data)	((data)->features & FEAT_TEMP_OFFSET)
@@ -475,7 +489,14 @@
 
 static int adc_lsb(const struct it87_data *data, int nr)
 {
-	int lsb = has_12mv_adc(data) ? 12 : 16;
+	int lsb;
+
+	if (has_12mv_adc(data))
+		lsb = 120;
+	else if (has_10_9mv_adc(data))
+		lsb = 109;
+	else
+		lsb = 160;
 	if (data->in_scaled & (1 << nr))
 		lsb <<= 1;
 	return lsb;
@@ -483,13 +504,13 @@
 
 static u8 in_to_reg(const struct it87_data *data, int nr, long val)
 {
-	val = DIV_ROUND_CLOSEST(val, adc_lsb(data, nr));
+	val = DIV_ROUND_CLOSEST(val * 10, adc_lsb(data, nr));
 	return clamp_val(val, 0, 255);
 }
 
 static int in_from_reg(const struct it87_data *data, int nr, int val)
 {
-	return val * adc_lsb(data, nr);
+	return DIV_ROUND_CLOSEST(val * adc_lsb(data, nr), 10);
 }
 
 static inline u8 FAN_TO_REG(long rpm, int div)
@@ -1515,9 +1536,14 @@
 	};
 	struct it87_data *data = dev_get_drvdata(dev);
 	int nr = to_sensor_dev_attr(attr)->index;
+	const char *label;
 
-	return sprintf(buf, "%s\n", has_12mv_adc(data) ? labels_it8721[nr]
-						       : labels[nr]);
+	if (has_12mv_adc(data) || has_10_9mv_adc(data))
+		label = labels_it8721[nr];
+	else
+		label = labels[nr];
+
+	return sprintf(buf, "%s\n", label);
 }
 static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
 static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
@@ -1853,6 +1879,9 @@
 	case IT8728F_DEVID:
 		sio_data->type = it8728;
 		break;
+	case IT8732F_DEVID:
+		sio_data->type = it8732;
+		break;
 	case IT8771E_DEVID:
 		sio_data->type = it8771;
 		break;
diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c
index 97204dc..9296e9d 100644
--- a/drivers/hwmon/lm70.c
+++ b/drivers/hwmon/lm70.c
@@ -37,6 +37,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/spi/spi.h>
 #include <linux/slab.h>
+#include <linux/of_device.h>
 
 
 #define DRVNAME		"lm70"
@@ -130,11 +131,41 @@
 
 /*----------------------------------------------------------------------*/
 
+#ifdef CONFIG_OF
+static const struct of_device_id lm70_of_ids[] = {
+	{
+		.compatible = "ti,lm70",
+		.data = (void *) LM70_CHIP_LM70,
+	},
+	{
+		.compatible = "ti,tmp121",
+		.data = (void *) LM70_CHIP_TMP121,
+	},
+	{
+		.compatible = "ti,lm71",
+		.data = (void *) LM70_CHIP_LM71,
+	},
+	{
+		.compatible = "ti,lm74",
+		.data = (void *) LM70_CHIP_LM74,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, lm70_of_ids);
+#endif
+
 static int lm70_probe(struct spi_device *spi)
 {
-	int chip = spi_get_device_id(spi)->driver_data;
+	const struct of_device_id *match;
 	struct device *hwmon_dev;
 	struct lm70 *p_lm70;
+	int chip;
+
+	match = of_match_device(lm70_of_ids, &spi->dev);
+	if (match)
+		chip = (int)(uintptr_t)match->data;
+	else
+		chip = spi_get_device_id(spi)->driver_data;
 
 	/* signaling is SPI_MODE_0 */
 	if (spi->mode & (SPI_CPOL | SPI_CPHA))
@@ -169,6 +200,7 @@
 	.driver = {
 		.name	= "lm70",
 		.owner	= THIS_MODULE,
+		.of_match_table	= of_match_ptr(lm70_of_ids),
 	},
 	.id_table = lm70_ids,
 	.probe	= lm70_probe,
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index fbfc02b..3ce33d2 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -49,10 +49,13 @@
 #define REG_VOLTAGE_LOW		0x0f
 #define REG_FANCOUNT_LOW	0x13
 #define REG_START		0x21
-#define REG_MODE		0x22
+#define REG_MODE		0x22 /* 7.2.32 Mode Selection Register */
 #define REG_PECI_ENABLE		0x23
 #define REG_FAN_ENABLE		0x24
 #define REG_VMON_ENABLE		0x25
+#define REG_PWM(x)		(0x60 + (x))
+#define REG_SMARTFAN_EN(x)      (0x64 + (x) / 2)
+#define SMARTFAN_EN_SHIFT(x)    ((x) % 2 * 4)
 #define REG_VENDOR_ID		0xfd
 #define REG_CHIP_ID		0xfe
 #define REG_VERSION_ID		0xff
@@ -66,6 +69,129 @@
 	struct mutex access_lock; /* for multi-byte read and write operations */
 };
 
+static ssize_t show_temp_type(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct nct7802_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+	unsigned int mode;
+	int ret;
+
+	ret = regmap_read(data->regmap, REG_MODE, &mode);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%u\n", (mode >> (2 * sattr->index) & 3) + 2);
+}
+
+static ssize_t store_temp_type(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	struct nct7802_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+	unsigned int type;
+	int err;
+
+	err = kstrtouint(buf, 0, &type);
+	if (err < 0)
+		return err;
+	if (sattr->index == 2 && type != 4) /* RD3 */
+		return -EINVAL;
+	if (type < 3 || type > 4)
+		return -EINVAL;
+	err = regmap_update_bits(data->regmap, REG_MODE,
+			3 << 2 * sattr->index, (type - 2) << 2 * sattr->index);
+	return err ? : count;
+}
+
+static ssize_t show_pwm_mode(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+	struct nct7802_data *data = dev_get_drvdata(dev);
+	unsigned int regval;
+	int ret;
+
+	if (sattr->index > 1)
+		return sprintf(buf, "1\n");
+
+	ret = regmap_read(data->regmap, 0x5E, &regval);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%u\n", !(regval & (1 << sattr->index)));
+}
+
+static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr,
+			char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct nct7802_data *data = dev_get_drvdata(dev);
+	unsigned int val;
+	int ret;
+
+	if (!attr->index)
+		return sprintf(buf, "255\n");
+
+	ret = regmap_read(data->regmap, attr->index, &val);
+	if (ret < 0)
+		return ret;
+
+	return sprintf(buf, "%d\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr,
+			 const char *buf, size_t count)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct nct7802_data *data = dev_get_drvdata(dev);
+	int err;
+	u8 val;
+
+	err = kstrtou8(buf, 0, &val);
+	if (err < 0)
+		return err;
+
+	err = regmap_write(data->regmap, attr->index, val);
+	return err ? : count;
+}
+
+static ssize_t show_pwm_enable(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct nct7802_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+	unsigned int reg, enabled;
+	int ret;
+
+	ret = regmap_read(data->regmap, REG_SMARTFAN_EN(sattr->index), &reg);
+	if (ret < 0)
+		return ret;
+	enabled = reg >> SMARTFAN_EN_SHIFT(sattr->index) & 1;
+	return sprintf(buf, "%u\n", enabled + 1);
+}
+
+static ssize_t store_pwm_enable(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct nct7802_data *data = dev_get_drvdata(dev);
+	struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+	u8 val;
+	int ret;
+
+	ret = kstrtou8(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	if (val < 1 || val > 2)
+		return -EINVAL;
+	ret = regmap_update_bits(data->regmap, REG_SMARTFAN_EN(sattr->index),
+				 1 << SMARTFAN_EN_SHIFT(sattr->index),
+				 (val - 1) << SMARTFAN_EN_SHIFT(sattr->index));
+	return ret ? : count;
+}
+
 static int nct7802_read_temp(struct nct7802_data *data,
 			     u8 reg_temp, u8 reg_temp_low, int *temp)
 {
@@ -377,6 +503,8 @@
 	return err ? : count;
 }
 
+static SENSOR_DEVICE_ATTR(temp1_type, S_IRUGO | S_IWUSR,
+			  show_temp_type, store_temp_type, 0);
 static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0x01,
 			    REG_TEMP_LSB);
 static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR, show_temp,
@@ -386,6 +514,8 @@
 static SENSOR_DEVICE_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp,
 			    store_temp, 0x3a, 0);
 
+static SENSOR_DEVICE_ATTR(temp2_type, S_IRUGO | S_IWUSR,
+			  show_temp_type, store_temp_type, 1);
 static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0x02,
 			    REG_TEMP_LSB);
 static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR, show_temp,
@@ -395,6 +525,8 @@
 static SENSOR_DEVICE_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp,
 			    store_temp, 0x3b, 0);
 
+static SENSOR_DEVICE_ATTR(temp3_type, S_IRUGO | S_IWUSR,
+			  show_temp_type, store_temp_type, 2);
 static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0x03,
 			    REG_TEMP_LSB);
 static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR, show_temp,
@@ -475,6 +607,7 @@
 			    store_beep, 0x5c, 5);
 
 static struct attribute *nct7802_temp_attrs[] = {
+	&sensor_dev_attr_temp1_type.dev_attr.attr,
 	&sensor_dev_attr_temp1_input.dev_attr.attr,
 	&sensor_dev_attr_temp1_min.dev_attr.attr,
 	&sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -485,7 +618,8 @@
 	&sensor_dev_attr_temp1_fault.dev_attr.attr,
 	&sensor_dev_attr_temp1_beep.dev_attr.attr,
 
-	&sensor_dev_attr_temp2_input.dev_attr.attr,		/* 9 */
+	&sensor_dev_attr_temp2_type.dev_attr.attr,		/* 10 */
+	&sensor_dev_attr_temp2_input.dev_attr.attr,
 	&sensor_dev_attr_temp2_min.dev_attr.attr,
 	&sensor_dev_attr_temp2_max.dev_attr.attr,
 	&sensor_dev_attr_temp2_crit.dev_attr.attr,
@@ -495,7 +629,8 @@
 	&sensor_dev_attr_temp2_fault.dev_attr.attr,
 	&sensor_dev_attr_temp2_beep.dev_attr.attr,
 
-	&sensor_dev_attr_temp3_input.dev_attr.attr,		/* 18 */
+	&sensor_dev_attr_temp3_type.dev_attr.attr,		/* 20 */
+	&sensor_dev_attr_temp3_input.dev_attr.attr,
 	&sensor_dev_attr_temp3_min.dev_attr.attr,
 	&sensor_dev_attr_temp3_max.dev_attr.attr,
 	&sensor_dev_attr_temp3_crit.dev_attr.attr,
@@ -505,7 +640,7 @@
 	&sensor_dev_attr_temp3_fault.dev_attr.attr,
 	&sensor_dev_attr_temp3_beep.dev_attr.attr,
 
-	&sensor_dev_attr_temp4_input.dev_attr.attr,		/* 27 */
+	&sensor_dev_attr_temp4_input.dev_attr.attr,		/* 30 */
 	&sensor_dev_attr_temp4_min.dev_attr.attr,
 	&sensor_dev_attr_temp4_max.dev_attr.attr,
 	&sensor_dev_attr_temp4_crit.dev_attr.attr,
@@ -514,7 +649,7 @@
 	&sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
 	&sensor_dev_attr_temp4_beep.dev_attr.attr,
 
-	&sensor_dev_attr_temp5_input.dev_attr.attr,		/* 35 */
+	&sensor_dev_attr_temp5_input.dev_attr.attr,		/* 38 */
 	&sensor_dev_attr_temp5_min.dev_attr.attr,
 	&sensor_dev_attr_temp5_max.dev_attr.attr,
 	&sensor_dev_attr_temp5_crit.dev_attr.attr,
@@ -523,7 +658,7 @@
 	&sensor_dev_attr_temp5_crit_alarm.dev_attr.attr,
 	&sensor_dev_attr_temp5_beep.dev_attr.attr,
 
-	&sensor_dev_attr_temp6_input.dev_attr.attr,		/* 43 */
+	&sensor_dev_attr_temp6_input.dev_attr.attr,		/* 46 */
 	&sensor_dev_attr_temp6_beep.dev_attr.attr,
 
 	NULL
@@ -541,25 +676,27 @@
 	if (err < 0)
 		return 0;
 
-	if (index < 9 &&
+	if (index < 10 &&
 	    (reg & 03) != 0x01 && (reg & 0x03) != 0x02)		/* RD1 */
 		return 0;
-	if (index >= 9 && index < 18 &&
+
+	if (index >= 10 && index < 20 &&
 	    (reg & 0x0c) != 0x04 && (reg & 0x0c) != 0x08)	/* RD2 */
 		return 0;
-	if (index >= 18 && index < 27 && (reg & 0x30) != 0x20)	/* RD3 */
+	if (index >= 20 && index < 30 && (reg & 0x30) != 0x20)	/* RD3 */
 		return 0;
-	if (index >= 27 && index < 35)				/* local */
+
+	if (index >= 30 && index < 38)				/* local */
 		return attr->mode;
 
 	err = regmap_read(data->regmap, REG_PECI_ENABLE, &reg);
 	if (err < 0)
 		return 0;
 
-	if (index >= 35 && index < 43 && !(reg & 0x01))		/* PECI 0 */
+	if (index >= 38 && index < 46 && !(reg & 0x01))		/* PECI 0 */
 		return 0;
 
-	if (index >= 0x43 && (!(reg & 0x02)))			/* PECI 1 */
+	if (index >= 0x46 && (!(reg & 0x02)))			/* PECI 1 */
 		return 0;
 
 	return attr->mode;
@@ -687,6 +824,27 @@
 static SENSOR_DEVICE_ATTR_2(fan3_beep, S_IRUGO | S_IWUSR, show_beep, store_beep,
 			    0x5b, 2);
 
+/* 7.2.89 Fan Control Output Type */
+static SENSOR_DEVICE_ATTR(pwm1_mode, S_IRUGO, show_pwm_mode, NULL, 0);
+static SENSOR_DEVICE_ATTR(pwm2_mode, S_IRUGO, show_pwm_mode, NULL, 1);
+static SENSOR_DEVICE_ATTR(pwm3_mode, S_IRUGO, show_pwm_mode, NULL, 2);
+
+/* 7.2.91... Fan Control Output Value */
+static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, store_pwm,
+			  REG_PWM(0));
+static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, store_pwm,
+			  REG_PWM(1));
+static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm, store_pwm,
+			  REG_PWM(2));
+
+/* 7.2.95... Temperature to Fan mapping Relationships Register */
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
+			  store_pwm_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
+			  store_pwm_enable, 1);
+static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
+			  store_pwm_enable, 2);
+
 static struct attribute *nct7802_fan_attrs[] = {
 	&sensor_dev_attr_fan1_input.dev_attr.attr,
 	&sensor_dev_attr_fan1_min.dev_attr.attr,
@@ -725,10 +883,142 @@
 	.is_visible = nct7802_fan_is_visible,
 };
 
+static struct attribute *nct7802_pwm_attrs[] = {
+	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm1_mode.dev_attr.attr,
+	&sensor_dev_attr_pwm1.dev_attr.attr,
+	&sensor_dev_attr_pwm2_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm2_mode.dev_attr.attr,
+	&sensor_dev_attr_pwm2.dev_attr.attr,
+	&sensor_dev_attr_pwm3_enable.dev_attr.attr,
+	&sensor_dev_attr_pwm3_mode.dev_attr.attr,
+	&sensor_dev_attr_pwm3.dev_attr.attr,
+	NULL
+};
+
+static struct attribute_group nct7802_pwm_group = {
+	.attrs = nct7802_pwm_attrs,
+};
+
+/* 7.2.115... 0x80-0x83, 0x84 Temperature (X-axis) transition */
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point1_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0x80, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point2_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0x81, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point3_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0x82, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point4_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0x83, 0);
+static SENSOR_DEVICE_ATTR_2(pwm1_auto_point5_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0x84, 0);
+
+/* 7.2.120... 0x85-0x88 PWM (Y-axis) transition */
+static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IRUGO | S_IWUSR,
+			  show_pwm, store_pwm, 0x85);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IRUGO | S_IWUSR,
+			  show_pwm, store_pwm, 0x86);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point3_pwm, S_IRUGO | S_IWUSR,
+			  show_pwm, store_pwm, 0x87);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point4_pwm, S_IRUGO | S_IWUSR,
+			  show_pwm, store_pwm, 0x88);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point5_pwm, S_IRUGO, show_pwm, NULL, 0);
+
+/* 7.2.124 Table 2 X-axis Transition Point 1 Register */
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point1_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0x90, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point2_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0x91, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point3_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0x92, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point4_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0x93, 0);
+static SENSOR_DEVICE_ATTR_2(pwm2_auto_point5_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0x94, 0);
+
+/* 7.2.129 Table 2 Y-axis Transition Point 1 Register */
+static SENSOR_DEVICE_ATTR(pwm2_auto_point1_pwm, S_IRUGO | S_IWUSR,
+			  show_pwm, store_pwm, 0x95);
+static SENSOR_DEVICE_ATTR(pwm2_auto_point2_pwm, S_IRUGO | S_IWUSR,
+			  show_pwm, store_pwm, 0x96);
+static SENSOR_DEVICE_ATTR(pwm2_auto_point3_pwm, S_IRUGO | S_IWUSR,
+			  show_pwm, store_pwm, 0x97);
+static SENSOR_DEVICE_ATTR(pwm2_auto_point4_pwm, S_IRUGO | S_IWUSR,
+			  show_pwm, store_pwm, 0x98);
+static SENSOR_DEVICE_ATTR(pwm2_auto_point5_pwm, S_IRUGO, show_pwm, NULL, 0);
+
+/* 7.2.133 Table 3 X-axis Transition Point 1 Register */
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point1_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0xA0, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point2_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0xA1, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point3_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0xA2, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point4_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0xA3, 0);
+static SENSOR_DEVICE_ATTR_2(pwm3_auto_point5_temp, S_IRUGO | S_IWUSR,
+			    show_temp, store_temp, 0xA4, 0);
+
+/* 7.2.138 Table 3 Y-axis Transition Point 1 Register */
+static SENSOR_DEVICE_ATTR(pwm3_auto_point1_pwm, S_IRUGO | S_IWUSR,
+			  show_pwm, store_pwm, 0xA5);
+static SENSOR_DEVICE_ATTR(pwm3_auto_point2_pwm, S_IRUGO | S_IWUSR,
+			  show_pwm, store_pwm, 0xA6);
+static SENSOR_DEVICE_ATTR(pwm3_auto_point3_pwm, S_IRUGO | S_IWUSR,
+			  show_pwm, store_pwm, 0xA7);
+static SENSOR_DEVICE_ATTR(pwm3_auto_point4_pwm, S_IRUGO | S_IWUSR,
+			  show_pwm, store_pwm, 0xA8);
+static SENSOR_DEVICE_ATTR(pwm3_auto_point5_pwm, S_IRUGO, show_pwm, NULL, 0);
+
+static struct attribute *nct7802_auto_point_attrs[] = {
+	&sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr,
+
+	&sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr,
+
+	&sensor_dev_attr_pwm2_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point4_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point5_temp.dev_attr.attr,
+
+	&sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point3_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point4_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm2_auto_point5_pwm.dev_attr.attr,
+
+	&sensor_dev_attr_pwm3_auto_point1_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point2_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point3_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point4_temp.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point5_temp.dev_attr.attr,
+
+	&sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point3_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point4_pwm.dev_attr.attr,
+	&sensor_dev_attr_pwm3_auto_point5_pwm.dev_attr.attr,
+
+	NULL
+};
+
+static struct attribute_group nct7802_auto_point_group = {
+	.attrs = nct7802_auto_point_attrs,
+};
+
 static const struct attribute_group *nct7802_groups[] = {
 	&nct7802_temp_group,
 	&nct7802_in_group,
 	&nct7802_fan_group,
+	&nct7802_pwm_group,
+	&nct7802_auto_point_group,
 	NULL
 };
 
@@ -776,7 +1066,8 @@
 
 static bool nct7802_regmap_is_volatile(struct device *dev, unsigned int reg)
 {
-	return reg != REG_BANK && reg <= 0x20;
+	return (reg != REG_BANK && reg <= 0x20) ||
+		(reg >= REG_PWM(0) && reg <= REG_PWM(2));
 }
 
 static const struct regmap_config nct7802_regmap_config = {
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 9f7dbd1..df6ebb2 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -20,7 +20,8 @@
 	help
 	  If you say yes here you get hardware monitoring support for generic
 	  PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
-	  MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, UDT020, and TPS40400.
+	  MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, TPS544B20,
+	  TPS544B25, TPS544C20, TPS544C25, and UDT020.
 
 	  This driver can also be built as a module. If so, the module will
 	  be called pmbus.
@@ -30,8 +31,8 @@
 	default n
 	help
 	  If you say yes here you get hardware monitoring support for Analog
-	  Devices ADM1075, ADM1275, and ADM1276 Hot-Swap Controller and Digital
-	  Power Monitors.
+	  Devices ADM1075, ADM1275, ADM1276, ADM1293, and ADM1294 Hot-Swap
+	  Controller and Digital Power Monitors.
 
 	  This driver can also be built as a module. If so, the module will
 	  be called adm1275.
@@ -51,7 +52,8 @@
 	default n
 	help
 	  If you say yes here you get hardware monitoring support for Linear
-	  Technology LTC2974, LTC2977, LTC2978, LTC3880, LTC3883, and LTM4676.
+	  Technology LTC2974, LTC2975, LTC2977, LTC2978, LTC2980, LTC3880,
+	  LTC3883, LTC3886, LTC3887, LTCM2987, LTM4675, and LTM4676.
 
 	  This driver can also be built as a module. If so, the module will
 	  be called ltc2978.
@@ -73,6 +75,16 @@
 	  This driver can also be built as a module. If so, the module will
 	  be called max16064.
 
+config SENSORS_MAX20751
+	tristate "Maxim MAX20751"
+	default n
+	help
+	  If you say yes here you get hardware monitoring support for Maxim
+	  MAX20751.
+
+	  This driver can also be built as a module. If so, the module will
+	  be called max20751.
+
 config SENSORS_MAX34440
 	tristate "Maxim MAX34440 and compatibles"
 	default n
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 1454293..bce046d 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_SENSORS_LM25066)	+= lm25066.o
 obj-$(CONFIG_SENSORS_LTC2978)	+= ltc2978.o
 obj-$(CONFIG_SENSORS_MAX16064)	+= max16064.o
+obj-$(CONFIG_SENSORS_MAX20751)	+= max20751.o
 obj-$(CONFIG_SENSORS_MAX34440)	+= max34440.o
 obj-$(CONFIG_SENSORS_MAX8688)	+= max8688.o
 obj-$(CONFIG_SENSORS_TPS40422)	+= tps40422.o
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 60aad95..188af4c 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -21,46 +21,120 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
+#include <linux/bitops.h>
 #include "pmbus.h"
 
-enum chips { adm1075, adm1275, adm1276 };
+enum chips { adm1075, adm1275, adm1276, adm1293, adm1294 };
+
+#define ADM1275_MFR_STATUS_IOUT_WARN2	BIT(0)
+#define ADM1293_MFR_STATUS_VAUX_UV_WARN	BIT(5)
+#define ADM1293_MFR_STATUS_VAUX_OV_WARN	BIT(6)
 
 #define ADM1275_PEAK_IOUT		0xd0
 #define ADM1275_PEAK_VIN		0xd1
 #define ADM1275_PEAK_VOUT		0xd2
 #define ADM1275_PMON_CONFIG		0xd4
 
-#define ADM1275_VIN_VOUT_SELECT		(1 << 6)
-#define ADM1275_VRANGE			(1 << 5)
-#define ADM1075_IRANGE_50		(1 << 4)
-#define ADM1075_IRANGE_25		(1 << 3)
-#define ADM1075_IRANGE_MASK		((1 << 3) | (1 << 4))
+#define ADM1275_VIN_VOUT_SELECT		BIT(6)
+#define ADM1275_VRANGE			BIT(5)
+#define ADM1075_IRANGE_50		BIT(4)
+#define ADM1075_IRANGE_25		BIT(3)
+#define ADM1075_IRANGE_MASK		(BIT(3) | BIT(4))
+
+#define ADM1293_IRANGE_25		0
+#define ADM1293_IRANGE_50		BIT(6)
+#define ADM1293_IRANGE_100		BIT(7)
+#define ADM1293_IRANGE_200		(BIT(6) | BIT(7))
+#define ADM1293_IRANGE_MASK		(BIT(6) | BIT(7))
+
+#define ADM1293_VIN_SEL_012		BIT(2)
+#define ADM1293_VIN_SEL_074		BIT(3)
+#define ADM1293_VIN_SEL_210		(BIT(2) | BIT(3))
+#define ADM1293_VIN_SEL_MASK		(BIT(2) | BIT(3))
+
+#define ADM1293_VAUX_EN			BIT(1)
 
 #define ADM1275_IOUT_WARN2_LIMIT	0xd7
 #define ADM1275_DEVICE_CONFIG		0xd8
 
-#define ADM1275_IOUT_WARN2_SELECT	(1 << 4)
+#define ADM1275_IOUT_WARN2_SELECT	BIT(4)
 
 #define ADM1276_PEAK_PIN		0xda
-
-#define ADM1275_MFR_STATUS_IOUT_WARN2	(1 << 0)
-
 #define ADM1075_READ_VAUX		0xdd
 #define ADM1075_VAUX_OV_WARN_LIMIT	0xde
 #define ADM1075_VAUX_UV_WARN_LIMIT	0xdf
+#define ADM1293_IOUT_MIN		0xe3
+#define ADM1293_PIN_MIN			0xe4
 #define ADM1075_VAUX_STATUS		0xf6
 
-#define ADM1075_VAUX_OV_WARN		(1<<7)
-#define ADM1075_VAUX_UV_WARN		(1<<6)
+#define ADM1075_VAUX_OV_WARN		BIT(7)
+#define ADM1075_VAUX_UV_WARN		BIT(6)
 
 struct adm1275_data {
 	int id;
 	bool have_oc_fault;
+	bool have_uc_fault;
+	bool have_vout;
+	bool have_vaux_status;
+	bool have_mfr_vaux_status;
+	bool have_iout_min;
+	bool have_pin_min;
+	bool have_pin_max;
 	struct pmbus_driver_info info;
 };
 
 #define to_adm1275_data(x)  container_of(x, struct adm1275_data, info)
 
+struct coefficients {
+	s16 m;
+	s16 b;
+	s16 R;
+};
+
+static const struct coefficients adm1075_coefficients[] = {
+	[0] = { 27169, 0, -1 },		/* voltage */
+	[1] = { 806, 20475, -1 },	/* current, irange25 */
+	[2] = { 404, 20475, -1 },	/* current, irange50 */
+	[3] = { 0, -1, 8549 },		/* power, irange25 */
+	[4] = { 0, -1, 4279 },		/* power, irange50 */
+};
+
+static const struct coefficients adm1275_coefficients[] = {
+	[0] = { 19199, 0, -2 },		/* voltage, vrange set */
+	[1] = { 6720, 0, -1 },		/* voltage, vrange not set */
+	[2] = { 807, 20475, -1 },	/* current */
+};
+
+static const struct coefficients adm1276_coefficients[] = {
+	[0] = { 19199, 0, -2 },		/* voltage, vrange set */
+	[1] = { 6720, 0, -1 },		/* voltage, vrange not set */
+	[2] = { 807, 20475, -1 },	/* current */
+	[3] = { 6043, 0, -2 },		/* power, vrange set */
+	[4] = { 2115, 0, -1 },		/* power, vrange not set */
+};
+
+static const struct coefficients adm1293_coefficients[] = {
+	[0] = { 3333, -1, 0 },		/* voltage, vrange 1.2V */
+	[1] = { 5552, -5, -1 },		/* voltage, vrange 7.4V */
+	[2] = { 19604, -50, -2 },	/* voltage, vrange 21V */
+	[3] = { 8000, -100, -2 },	/* current, irange25 */
+	[4] = { 4000, -100, -2 },	/* current, irange50 */
+	[5] = { 20000, -1000, -3 },	/* current, irange100 */
+	[6] = { 10000, -1000, -3 },	/* current, irange200 */
+	[7] = { 10417, 0, -1 },		/* power, 1.2V, irange25 */
+	[8] = { 5208, 0, -1 },		/* power, 1.2V, irange50 */
+	[9] = { 26042, 0, -2 },		/* power, 1.2V, irange100 */
+	[10] = { 13021, 0, -2 },	/* power, 1.2V, irange200 */
+	[11] = { 17351, 0, -2 },	/* power, 7.4V, irange25 */
+	[12] = { 8676, 0, -2 },		/* power, 7.4V, irange50 */
+	[13] = { 4338, 0, -2 },		/* power, 7.4V, irange100 */
+	[14] = { 21689, 0, -3 },	/* power, 7.4V, irange200 */
+	[15] = { 6126, 0, -2 },		/* power, 21V, irange25 */
+	[16] = { 30631, 0, -3 },	/* power, 21V, irange50 */
+	[17] = { 15316, 0, -3 },	/* power, 21V, irange100 */
+	[18] = { 7658, 0, -3 },		/* power, 21V, irange200 */
+};
+
 static int adm1275_read_word_data(struct i2c_client *client, int page, int reg)
 {
 	const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
@@ -72,42 +146,37 @@
 
 	switch (reg) {
 	case PMBUS_IOUT_UC_FAULT_LIMIT:
-		if (data->have_oc_fault) {
-			ret = -ENXIO;
-			break;
-		}
+		if (!data->have_uc_fault)
+			return -ENXIO;
 		ret = pmbus_read_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT);
 		break;
 	case PMBUS_IOUT_OC_FAULT_LIMIT:
-		if (!data->have_oc_fault) {
-			ret = -ENXIO;
-			break;
-		}
+		if (!data->have_oc_fault)
+			return -ENXIO;
 		ret = pmbus_read_word_data(client, 0, ADM1275_IOUT_WARN2_LIMIT);
 		break;
 	case PMBUS_VOUT_OV_WARN_LIMIT:
-		if (data->id != adm1075) {
-			ret = -ENODATA;
-			break;
-		}
+		if (data->have_vout)
+			return -ENODATA;
 		ret = pmbus_read_word_data(client, 0,
 					   ADM1075_VAUX_OV_WARN_LIMIT);
 		break;
 	case PMBUS_VOUT_UV_WARN_LIMIT:
-		if (data->id != adm1075) {
-			ret = -ENODATA;
-			break;
-		}
+		if (data->have_vout)
+			return -ENODATA;
 		ret = pmbus_read_word_data(client, 0,
 					   ADM1075_VAUX_UV_WARN_LIMIT);
 		break;
 	case PMBUS_READ_VOUT:
-		if (data->id != adm1075) {
-			ret = -ENODATA;
-			break;
-		}
+		if (data->have_vout)
+			return -ENODATA;
 		ret = pmbus_read_word_data(client, 0, ADM1075_READ_VAUX);
 		break;
+	case PMBUS_VIRT_READ_IOUT_MIN:
+		if (!data->have_iout_min)
+			return -ENXIO;
+		ret = pmbus_read_word_data(client, 0, ADM1293_IOUT_MIN);
+		break;
 	case PMBUS_VIRT_READ_IOUT_MAX:
 		ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_IOUT);
 		break;
@@ -117,11 +186,14 @@
 	case PMBUS_VIRT_READ_VIN_MAX:
 		ret = pmbus_read_word_data(client, 0, ADM1275_PEAK_VIN);
 		break;
+	case PMBUS_VIRT_READ_PIN_MIN:
+		if (!data->have_pin_min)
+			return -ENXIO;
+		ret = pmbus_read_word_data(client, 0, ADM1293_PIN_MIN);
+		break;
 	case PMBUS_VIRT_READ_PIN_MAX:
-		if (data->id == adm1275) {
-			ret = -ENXIO;
-			break;
-		}
+		if (!data->have_pin_max)
+			return -ENXIO;
 		ret = pmbus_read_word_data(client, 0, ADM1276_PEAK_PIN);
 		break;
 	case PMBUS_VIRT_RESET_IOUT_HISTORY:
@@ -129,8 +201,8 @@
 	case PMBUS_VIRT_RESET_VIN_HISTORY:
 		break;
 	case PMBUS_VIRT_RESET_PIN_HISTORY:
-		if (data->id == adm1275)
-			ret = -ENXIO;
+		if (!data->have_pin_max)
+			return -ENXIO;
 		break;
 	default:
 		ret = -ENODATA;
@@ -142,6 +214,8 @@
 static int adm1275_write_word_data(struct i2c_client *client, int page, int reg,
 				   u16 word)
 {
+	const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+	const struct adm1275_data *data = to_adm1275_data(info);
 	int ret;
 
 	if (page)
@@ -155,6 +229,9 @@
 		break;
 	case PMBUS_VIRT_RESET_IOUT_HISTORY:
 		ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_IOUT, 0);
+		if (!ret && data->have_iout_min)
+			ret = pmbus_write_word_data(client, 0,
+						    ADM1293_IOUT_MIN, 0);
 		break;
 	case PMBUS_VIRT_RESET_VOUT_HISTORY:
 		ret = pmbus_write_word_data(client, 0, ADM1275_PEAK_VOUT, 0);
@@ -164,6 +241,9 @@
 		break;
 	case PMBUS_VIRT_RESET_PIN_HISTORY:
 		ret = pmbus_write_word_data(client, 0, ADM1276_PEAK_PIN, 0);
+		if (!ret && data->have_pin_min)
+			ret = pmbus_write_word_data(client, 0,
+						    ADM1293_PIN_MIN, 0);
 		break;
 	default:
 		ret = -ENODATA;
@@ -186,29 +266,40 @@
 		ret = pmbus_read_byte_data(client, page, PMBUS_STATUS_IOUT);
 		if (ret < 0)
 			break;
+		if (!data->have_oc_fault && !data->have_uc_fault)
+			break;
 		mfr_status = pmbus_read_byte_data(client, page,
 						  PMBUS_STATUS_MFR_SPECIFIC);
-		if (mfr_status < 0) {
-			ret = mfr_status;
-			break;
-		}
+		if (mfr_status < 0)
+			return mfr_status;
 		if (mfr_status & ADM1275_MFR_STATUS_IOUT_WARN2) {
 			ret |= data->have_oc_fault ?
 			  PB_IOUT_OC_FAULT : PB_IOUT_UC_FAULT;
 		}
 		break;
 	case PMBUS_STATUS_VOUT:
-		if (data->id != adm1075) {
-			ret = -ENODATA;
-			break;
-		}
+		if (data->have_vout)
+			return -ENODATA;
 		ret = 0;
-		mfr_status = pmbus_read_byte_data(client, 0,
-						  ADM1075_VAUX_STATUS);
-		if (mfr_status & ADM1075_VAUX_OV_WARN)
-			ret |= PB_VOLTAGE_OV_WARNING;
-		if (mfr_status & ADM1075_VAUX_UV_WARN)
-			ret |= PB_VOLTAGE_UV_WARNING;
+		if (data->have_vaux_status) {
+			mfr_status = pmbus_read_byte_data(client, 0,
+							  ADM1075_VAUX_STATUS);
+			if (mfr_status < 0)
+				return mfr_status;
+			if (mfr_status & ADM1075_VAUX_OV_WARN)
+				ret |= PB_VOLTAGE_OV_WARNING;
+			if (mfr_status & ADM1075_VAUX_UV_WARN)
+				ret |= PB_VOLTAGE_UV_WARNING;
+		} else if (data->have_mfr_vaux_status) {
+			mfr_status = pmbus_read_byte_data(client, page,
+						PMBUS_STATUS_MFR_SPECIFIC);
+			if (mfr_status < 0)
+				return mfr_status;
+			if (mfr_status & ADM1293_MFR_STATUS_VAUX_OV_WARN)
+				ret |= PB_VOLTAGE_OV_WARNING;
+			if (mfr_status & ADM1293_MFR_STATUS_VAUX_UV_WARN)
+				ret |= PB_VOLTAGE_UV_WARNING;
+		}
 		break;
 	default:
 		ret = -ENODATA;
@@ -221,6 +312,8 @@
 	{ "adm1075", adm1075 },
 	{ "adm1275", adm1275 },
 	{ "adm1276", adm1276 },
+	{ "adm1293", adm1293 },
+	{ "adm1294", adm1294 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, adm1275_id);
@@ -234,6 +327,8 @@
 	struct pmbus_driver_info *info;
 	struct adm1275_data *data;
 	const struct i2c_device_id *mid;
+	const struct coefficients *coefficients;
+	int vindex = -1, voindex = -1, cindex = -1, pindex = -1;
 
 	if (!i2c_check_functionality(client->adapter,
 				     I2C_FUNC_SMBUS_READ_BYTE_DATA
@@ -290,61 +385,38 @@
 	info->format[PSC_VOLTAGE_IN] = direct;
 	info->format[PSC_VOLTAGE_OUT] = direct;
 	info->format[PSC_CURRENT_OUT] = direct;
-	info->m[PSC_CURRENT_OUT] = 807;
-	info->b[PSC_CURRENT_OUT] = 20475;
-	info->R[PSC_CURRENT_OUT] = -1;
+	info->format[PSC_POWER] = direct;
 	info->func[0] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
 
 	info->read_word_data = adm1275_read_word_data;
 	info->read_byte_data = adm1275_read_byte_data;
 	info->write_word_data = adm1275_write_word_data;
 
-	if (data->id == adm1075) {
-		info->m[PSC_VOLTAGE_IN] = 27169;
-		info->b[PSC_VOLTAGE_IN] = 0;
-		info->R[PSC_VOLTAGE_IN] = -1;
-		info->m[PSC_VOLTAGE_OUT] = 27169;
-		info->b[PSC_VOLTAGE_OUT] = 0;
-		info->R[PSC_VOLTAGE_OUT] = -1;
-	} else if (config & ADM1275_VRANGE) {
-		info->m[PSC_VOLTAGE_IN] = 19199;
-		info->b[PSC_VOLTAGE_IN] = 0;
-		info->R[PSC_VOLTAGE_IN] = -2;
-		info->m[PSC_VOLTAGE_OUT] = 19199;
-		info->b[PSC_VOLTAGE_OUT] = 0;
-		info->R[PSC_VOLTAGE_OUT] = -2;
-	} else {
-		info->m[PSC_VOLTAGE_IN] = 6720;
-		info->b[PSC_VOLTAGE_IN] = 0;
-		info->R[PSC_VOLTAGE_IN] = -1;
-		info->m[PSC_VOLTAGE_OUT] = 6720;
-		info->b[PSC_VOLTAGE_OUT] = 0;
-		info->R[PSC_VOLTAGE_OUT] = -1;
-	}
-
-	if (device_config & ADM1275_IOUT_WARN2_SELECT)
-		data->have_oc_fault = true;
-
 	switch (data->id) {
 	case adm1075:
-		info->format[PSC_POWER] = direct;
-		info->b[PSC_POWER] = 0;
-		info->R[PSC_POWER] = -1;
+		if (device_config & ADM1275_IOUT_WARN2_SELECT)
+			data->have_oc_fault = true;
+		else
+			data->have_uc_fault = true;
+		data->have_pin_max = true;
+		data->have_vaux_status = true;
+
+		coefficients = adm1075_coefficients;
+		vindex = 0;
 		switch (config & ADM1075_IRANGE_MASK) {
 		case ADM1075_IRANGE_25:
-			info->m[PSC_POWER] = 8549;
-			info->m[PSC_CURRENT_OUT] = 806;
+			cindex = 1;
+			pindex = 3;
 			break;
 		case ADM1075_IRANGE_50:
-			info->m[PSC_POWER] = 4279;
-			info->m[PSC_CURRENT_OUT] = 404;
+			cindex = 2;
+			pindex = 4;
 			break;
 		default:
 			dev_err(&client->dev, "Invalid input current range");
-			info->m[PSC_POWER] = 0;
-			info->m[PSC_CURRENT_OUT] = 0;
 			break;
 		}
+
 		info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_PIN
 		  | PMBUS_HAVE_STATUS_INPUT;
 		if (config & ADM1275_VIN_VOUT_SELECT)
@@ -352,6 +424,16 @@
 			  PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
 		break;
 	case adm1275:
+		if (device_config & ADM1275_IOUT_WARN2_SELECT)
+			data->have_oc_fault = true;
+		else
+			data->have_uc_fault = true;
+		data->have_vout = true;
+
+		coefficients = adm1275_coefficients;
+		vindex = (config & ADM1275_VRANGE) ? 0 : 1;
+		cindex = 2;
+
 		if (config & ADM1275_VIN_VOUT_SELECT)
 			info->func[0] |=
 			  PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
@@ -360,22 +442,100 @@
 			  PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
 		break;
 	case adm1276:
-		info->format[PSC_POWER] = direct;
+		if (device_config & ADM1275_IOUT_WARN2_SELECT)
+			data->have_oc_fault = true;
+		else
+			data->have_uc_fault = true;
+		data->have_vout = true;
+		data->have_pin_max = true;
+
+		coefficients = adm1276_coefficients;
+		vindex = (config & ADM1275_VRANGE) ? 0 : 1;
+		cindex = 2;
+		pindex = (config & ADM1275_VRANGE) ? 3 : 4;
+
 		info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_PIN
 		  | PMBUS_HAVE_STATUS_INPUT;
 		if (config & ADM1275_VIN_VOUT_SELECT)
 			info->func[0] |=
 			  PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
-		if (config & ADM1275_VRANGE) {
-			info->m[PSC_POWER] = 6043;
-			info->b[PSC_POWER] = 0;
-			info->R[PSC_POWER] = -2;
-		} else {
-			info->m[PSC_POWER] = 2115;
-			info->b[PSC_POWER] = 0;
-			info->R[PSC_POWER] = -1;
-		}
 		break;
+	case adm1293:
+	case adm1294:
+		data->have_iout_min = true;
+		data->have_pin_min = true;
+		data->have_pin_max = true;
+		data->have_mfr_vaux_status = true;
+
+		coefficients = adm1293_coefficients;
+
+		voindex = 0;
+		switch (config & ADM1293_VIN_SEL_MASK) {
+		case ADM1293_VIN_SEL_012:	/* 1.2V */
+			vindex = 0;
+			break;
+		case ADM1293_VIN_SEL_074:	/* 7.4V */
+			vindex = 1;
+			break;
+		case ADM1293_VIN_SEL_210:	/* 21V */
+			vindex = 2;
+			break;
+		default:			/* disabled */
+			break;
+		}
+
+		switch (config & ADM1293_IRANGE_MASK) {
+		case ADM1293_IRANGE_25:
+			cindex = 3;
+			break;
+		case ADM1293_IRANGE_50:
+			cindex = 4;
+			break;
+		case ADM1293_IRANGE_100:
+			cindex = 5;
+			break;
+		case ADM1293_IRANGE_200:
+			cindex = 6;
+			break;
+		}
+
+		if (vindex >= 0)
+			pindex = 7 + vindex * 4 + (cindex - 3);
+
+		if (config & ADM1293_VAUX_EN)
+			info->func[0] |=
+				PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
+
+		info->func[0] |= PMBUS_HAVE_PIN |
+			PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
+
+		break;
+	default:
+		dev_err(&client->dev, "Unsupported device\n");
+		return -ENODEV;
+	}
+
+	if (voindex < 0)
+		voindex = vindex;
+	if (vindex >= 0) {
+		info->m[PSC_VOLTAGE_IN] = coefficients[vindex].m;
+		info->b[PSC_VOLTAGE_IN] = coefficients[vindex].b;
+		info->R[PSC_VOLTAGE_IN] = coefficients[vindex].R;
+	}
+	if (voindex >= 0) {
+		info->m[PSC_VOLTAGE_OUT] = coefficients[voindex].m;
+		info->b[PSC_VOLTAGE_OUT] = coefficients[voindex].b;
+		info->R[PSC_VOLTAGE_OUT] = coefficients[voindex].R;
+	}
+	if (cindex >= 0) {
+		info->m[PSC_CURRENT_OUT] = coefficients[cindex].m;
+		info->b[PSC_CURRENT_OUT] = coefficients[cindex].b;
+		info->R[PSC_CURRENT_OUT] = coefficients[cindex].R;
+	}
+	if (pindex >= 0) {
+		info->m[PSC_POWER] = coefficients[pindex].m;
+		info->b[PSC_POWER] = coefficients[pindex].b;
+		info->R[PSC_POWER] = coefficients[pindex].R;
 	}
 
 	return pmbus_do_probe(client, id, info);
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index a26b1d1..a3d912cd 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -19,6 +19,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/bitops.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -42,15 +43,15 @@
 #define LM25066_READ_AVG_IIN		0xde
 #define LM25066_READ_AVG_PIN		0xdf
 
-#define LM25066_DEV_SETUP_CL		(1 << 4)	/* Current limit */
+#define LM25066_DEV_SETUP_CL		BIT(4)	/* Current limit */
 
 /* LM25056 only */
 
 #define LM25056_VAUX_OV_WARN_LIMIT	0xe3
 #define LM25056_VAUX_UV_WARN_LIMIT	0xe4
 
-#define LM25056_MFR_STS_VAUX_OV_WARN	(1 << 1)
-#define LM25056_MFR_STS_VAUX_UV_WARN	(1 << 0)
+#define LM25056_MFR_STS_VAUX_OV_WARN	BIT(1)
+#define LM25056_MFR_STS_VAUX_UV_WARN	BIT(0)
 
 /* LM25063 only */
 
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 0835050..58b789c 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -1,9 +1,9 @@
 /*
- * Hardware monitoring driver for LTC2974, LTC2977, LTC2978, LTC3880,
- * LTC3883, and LTM4676
+ * Hardware monitoring driver for LTC2978 and compatible chips.
  *
  * Copyright (c) 2011 Ericsson AB.
- * Copyright (c) 2013, 2014 Guenter Roeck
+ * Copyright (c) 2013, 2014, 2015 Guenter Roeck
+ * Copyright (c) 2015 Linear Technology
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -16,6 +16,8 @@
  * GNU General Public License for more details.
  */
 
+#include <linux/delay.h>
+#include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -25,49 +27,71 @@
 #include <linux/regulator/driver.h>
 #include "pmbus.h"
 
-enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883, ltm4676 };
+enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
+	ltc3883, ltc3886, ltc3887, ltm2987, ltm4675, ltm4676 };
 
 /* Common for all chips */
 #define LTC2978_MFR_VOUT_PEAK		0xdd
 #define LTC2978_MFR_VIN_PEAK		0xde
 #define LTC2978_MFR_TEMPERATURE_PEAK	0xdf
-#define LTC2978_MFR_SPECIAL_ID		0xe7
+#define LTC2978_MFR_SPECIAL_ID		0xe7	/* Undocumented on LTC3882 */
+#define LTC2978_MFR_COMMON		0xef
 
-/* LTC2974, LCT2977, and LTC2978 */
+/* LTC2974, LTC2975, LCT2977, LTC2980, LTC2978, and LTM2987 */
 #define LTC2978_MFR_VOUT_MIN		0xfb
 #define LTC2978_MFR_VIN_MIN		0xfc
 #define LTC2978_MFR_TEMPERATURE_MIN	0xfd
 
-/* LTC2974 only */
+/* LTC2974, LTC2975 */
 #define LTC2974_MFR_IOUT_PEAK		0xd7
 #define LTC2974_MFR_IOUT_MIN		0xd8
 
-/* LTC3880, LTC3883, and LTM4676 */
+/* LTC3880, LTC3882, LTC3883, LTC3887, LTM4675, and LTM4676 */
 #define LTC3880_MFR_IOUT_PEAK		0xd7
 #define LTC3880_MFR_CLEAR_PEAKS		0xe3
 #define LTC3880_MFR_TEMPERATURE2_PEAK	0xf4
 
-/* LTC3883 only */
+/* LTC3883 and LTC3886 only */
 #define LTC3883_MFR_IIN_PEAK		0xe1
 
-#define LTC2974_ID_REV1			0x0212
-#define LTC2974_ID_REV2			0x0213
+/* LTC2975 only */
+#define LTC2975_MFR_IIN_PEAK		0xc4
+#define LTC2975_MFR_IIN_MIN		0xc5
+#define LTC2975_MFR_PIN_PEAK		0xc6
+#define LTC2975_MFR_PIN_MIN		0xc7
+
+#define LTC2978_ID_MASK			0xfff0
+
+#define LTC2974_ID			0x0210
+#define LTC2975_ID			0x0220
 #define LTC2977_ID			0x0130
-#define LTC2978_ID_REV1			0x0121
-#define LTC2978_ID_REV2			0x0122
-#define LTC2978A_ID			0x0124
-#define LTC3880_ID			0x4000
-#define LTC3880_ID_MASK			0xff00
+#define LTC2978_ID_REV1			0x0110	/* Early revision */
+#define LTC2978_ID_REV2			0x0120
+#define LTC2980_ID_A			0x8030	/* A/B for two die IDs */
+#define LTC2980_ID_B			0x8040
+#define LTC3880_ID			0x4020
+#define LTC3882_ID			0x4200
+#define LTC3882_ID_D1			0x4240	/* Dash 1 */
 #define LTC3883_ID			0x4300
-#define LTC3883_ID_MASK			0xff00
-#define LTM4676_ID			0x4480	/* datasheet claims 0x440X */
-#define LTM4676_ID_MASK			0xfff0
+#define LTC3886_ID			0x4600
+#define LTC3887_ID			0x4700
+#define LTM2987_ID_A			0x8010	/* A/B for two die IDs */
+#define LTM2987_ID_B			0x8020
+#define LTM4675_ID			0x47a0
+#define LTM4676_ID_REV1			0x4400
+#define LTM4676_ID_REV2			0x4480
+#define LTM4676A_ID			0x47e0
 
 #define LTC2974_NUM_PAGES		4
 #define LTC2978_NUM_PAGES		8
 #define LTC3880_NUM_PAGES		2
 #define LTC3883_NUM_PAGES		1
 
+#define LTC_POLL_TIMEOUT		100	/* in milli-seconds */
+
+#define LTC_NOT_BUSY			BIT(5)
+#define LTC_NOT_PENDING			BIT(4)
+
 /*
  * LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which
  * happens pretty much each time chip data is updated. Raw peak data therefore
@@ -82,13 +106,91 @@
 	u16 temp_min[LTC2974_NUM_PAGES], temp_max[LTC2974_NUM_PAGES];
 	u16 vout_min[LTC2978_NUM_PAGES], vout_max[LTC2978_NUM_PAGES];
 	u16 iout_min[LTC2974_NUM_PAGES], iout_max[LTC2974_NUM_PAGES];
-	u16 iin_max;
+	u16 iin_min, iin_max;
+	u16 pin_min, pin_max;
 	u16 temp2_max;
 	struct pmbus_driver_info info;
+	u32 features;
 };
-
 #define to_ltc2978_data(x)  container_of(x, struct ltc2978_data, info)
 
+#define FEAT_CLEAR_PEAKS	BIT(0)
+#define FEAT_NEEDS_POLLING	BIT(1)
+
+#define has_clear_peaks(d)	((d)->features & FEAT_CLEAR_PEAKS)
+#define needs_polling(d)	((d)->features & FEAT_NEEDS_POLLING)
+
+static int ltc_wait_ready(struct i2c_client *client)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(LTC_POLL_TIMEOUT);
+	const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+	struct ltc2978_data *data = to_ltc2978_data(info);
+	int status;
+	u8 mask;
+
+	if (!needs_polling(data))
+		return 0;
+
+	/*
+	 * LTC3883 does not support LTC_NOT_PENDING, even though
+	 * the datasheet claims that it does.
+	 */
+	mask = LTC_NOT_BUSY;
+	if (data->id != ltc3883)
+		mask |= LTC_NOT_PENDING;
+
+	do {
+		status = pmbus_read_byte_data(client, 0, LTC2978_MFR_COMMON);
+		if (status == -EBADMSG || status == -ENXIO) {
+			/* PEC error or NACK: chip may be busy, try again */
+			usleep_range(50, 100);
+			continue;
+		}
+		if (status < 0)
+			return status;
+
+		if ((status & mask) == mask)
+			return 0;
+
+		usleep_range(50, 100);
+	} while (time_before(jiffies, timeout));
+
+	return -ETIMEDOUT;
+}
+
+static int ltc_read_word_data(struct i2c_client *client, int page, int reg)
+{
+	int ret;
+
+	ret = ltc_wait_ready(client);
+	if (ret < 0)
+		return ret;
+
+	return pmbus_read_word_data(client, page, reg);
+}
+
+static int ltc_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+	int ret;
+
+	ret = ltc_wait_ready(client);
+	if (ret < 0)
+		return ret;
+
+	return pmbus_read_byte_data(client, page, reg);
+}
+
+static int ltc_write_byte(struct i2c_client *client, int page, u8 byte)
+{
+	int ret;
+
+	ret = ltc_wait_ready(client);
+	if (ret < 0)
+		return ret;
+
+	return pmbus_write_byte(client, page, byte);
+}
+
 static inline int lin11_to_val(int data)
 {
 	s16 e = ((s16)data) >> 11;
@@ -102,6 +204,34 @@
 	return (e < 0 ? m >> -e : m << e);
 }
 
+static int ltc_get_max(struct ltc2978_data *data, struct i2c_client *client,
+		       int page, int reg, u16 *pmax)
+{
+	int ret;
+
+	ret = ltc_read_word_data(client, page, reg);
+	if (ret >= 0) {
+		if (lin11_to_val(ret) > lin11_to_val(*pmax))
+			*pmax = ret;
+		ret = *pmax;
+	}
+	return ret;
+}
+
+static int ltc_get_min(struct ltc2978_data *data, struct i2c_client *client,
+		       int page, int reg, u16 *pmin)
+{
+	int ret;
+
+	ret = ltc_read_word_data(client, page, reg);
+	if (ret >= 0) {
+		if (lin11_to_val(ret) < lin11_to_val(*pmin))
+			*pmin = ret;
+		ret = *pmin;
+	}
+	return ret;
+}
+
 static int ltc2978_read_word_data_common(struct i2c_client *client, int page,
 					 int reg)
 {
@@ -111,15 +241,11 @@
 
 	switch (reg) {
 	case PMBUS_VIRT_READ_VIN_MAX:
-		ret = pmbus_read_word_data(client, page, LTC2978_MFR_VIN_PEAK);
-		if (ret >= 0) {
-			if (lin11_to_val(ret) > lin11_to_val(data->vin_max))
-				data->vin_max = ret;
-			ret = data->vin_max;
-		}
+		ret = ltc_get_max(data, client, page, LTC2978_MFR_VIN_PEAK,
+				  &data->vin_max);
 		break;
 	case PMBUS_VIRT_READ_VOUT_MAX:
-		ret = pmbus_read_word_data(client, page, LTC2978_MFR_VOUT_PEAK);
+		ret = ltc_read_word_data(client, page, LTC2978_MFR_VOUT_PEAK);
 		if (ret >= 0) {
 			/*
 			 * VOUT is 16 bit unsigned with fixed exponent,
@@ -131,14 +257,9 @@
 		}
 		break;
 	case PMBUS_VIRT_READ_TEMP_MAX:
-		ret = pmbus_read_word_data(client, page,
-					   LTC2978_MFR_TEMPERATURE_PEAK);
-		if (ret >= 0) {
-			if (lin11_to_val(ret)
-			    > lin11_to_val(data->temp_max[page]))
-				data->temp_max[page] = ret;
-			ret = data->temp_max[page];
-		}
+		ret = ltc_get_max(data, client, page,
+				  LTC2978_MFR_TEMPERATURE_PEAK,
+				  &data->temp_max[page]);
 		break;
 	case PMBUS_VIRT_RESET_VOUT_HISTORY:
 	case PMBUS_VIRT_RESET_VIN_HISTORY:
@@ -146,6 +267,9 @@
 		ret = 0;
 		break;
 	default:
+		ret = ltc_wait_ready(client);
+		if (ret < 0)
+			return ret;
 		ret = -ENODATA;
 		break;
 	}
@@ -160,15 +284,11 @@
 
 	switch (reg) {
 	case PMBUS_VIRT_READ_VIN_MIN:
-		ret = pmbus_read_word_data(client, page, LTC2978_MFR_VIN_MIN);
-		if (ret >= 0) {
-			if (lin11_to_val(ret) < lin11_to_val(data->vin_min))
-				data->vin_min = ret;
-			ret = data->vin_min;
-		}
+		ret = ltc_get_min(data, client, page, LTC2978_MFR_VIN_MIN,
+				  &data->vin_min);
 		break;
 	case PMBUS_VIRT_READ_VOUT_MIN:
-		ret = pmbus_read_word_data(client, page, LTC2978_MFR_VOUT_MIN);
+		ret = ltc_read_word_data(client, page, LTC2978_MFR_VOUT_MIN);
 		if (ret >= 0) {
 			/*
 			 * VOUT_MIN is known to not be supported on some lots
@@ -184,14 +304,9 @@
 		}
 		break;
 	case PMBUS_VIRT_READ_TEMP_MIN:
-		ret = pmbus_read_word_data(client, page,
-					   LTC2978_MFR_TEMPERATURE_MIN);
-		if (ret >= 0) {
-			if (lin11_to_val(ret)
-			    < lin11_to_val(data->temp_min[page]))
-				data->temp_min[page] = ret;
-			ret = data->temp_min[page];
-		}
+		ret = ltc_get_min(data, client, page,
+				  LTC2978_MFR_TEMPERATURE_MIN,
+				  &data->temp_min[page]);
 		break;
 	case PMBUS_VIRT_READ_IOUT_MAX:
 	case PMBUS_VIRT_RESET_IOUT_HISTORY:
@@ -214,22 +329,12 @@
 
 	switch (reg) {
 	case PMBUS_VIRT_READ_IOUT_MAX:
-		ret = pmbus_read_word_data(client, page, LTC2974_MFR_IOUT_PEAK);
-		if (ret >= 0) {
-			if (lin11_to_val(ret)
-			    > lin11_to_val(data->iout_max[page]))
-				data->iout_max[page] = ret;
-			ret = data->iout_max[page];
-		}
+		ret = ltc_get_max(data, client, page, LTC2974_MFR_IOUT_PEAK,
+				  &data->iout_max[page]);
 		break;
 	case PMBUS_VIRT_READ_IOUT_MIN:
-		ret = pmbus_read_word_data(client, page, LTC2974_MFR_IOUT_MIN);
-		if (ret >= 0) {
-			if (lin11_to_val(ret)
-			    < lin11_to_val(data->iout_min[page]))
-				data->iout_min[page] = ret;
-			ret = data->iout_min[page];
-		}
+		ret = ltc_get_min(data, client, page, LTC2974_MFR_IOUT_MIN,
+				  &data->iout_min[page]);
 		break;
 	case PMBUS_VIRT_RESET_IOUT_HISTORY:
 		ret = 0;
@@ -241,6 +346,40 @@
 	return ret;
 }
 
+static int ltc2975_read_word_data(struct i2c_client *client, int page, int reg)
+{
+	const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+	struct ltc2978_data *data = to_ltc2978_data(info);
+	int ret;
+
+	switch (reg) {
+	case PMBUS_VIRT_READ_IIN_MAX:
+		ret = ltc_get_max(data, client, page, LTC2975_MFR_IIN_PEAK,
+				  &data->iin_max);
+		break;
+	case PMBUS_VIRT_READ_IIN_MIN:
+		ret = ltc_get_min(data, client, page, LTC2975_MFR_IIN_MIN,
+				  &data->iin_min);
+		break;
+	case PMBUS_VIRT_READ_PIN_MAX:
+		ret = ltc_get_max(data, client, page, LTC2975_MFR_PIN_PEAK,
+				  &data->pin_max);
+		break;
+	case PMBUS_VIRT_READ_PIN_MIN:
+		ret = ltc_get_min(data, client, page, LTC2975_MFR_PIN_MIN,
+				  &data->pin_min);
+		break;
+	case PMBUS_VIRT_RESET_IIN_HISTORY:
+	case PMBUS_VIRT_RESET_PIN_HISTORY:
+		ret = 0;
+		break;
+	default:
+		ret = ltc2978_read_word_data(client, page, reg);
+		break;
+	}
+	return ret;
+}
+
 static int ltc3880_read_word_data(struct i2c_client *client, int page, int reg)
 {
 	const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
@@ -249,22 +388,13 @@
 
 	switch (reg) {
 	case PMBUS_VIRT_READ_IOUT_MAX:
-		ret = pmbus_read_word_data(client, page, LTC3880_MFR_IOUT_PEAK);
-		if (ret >= 0) {
-			if (lin11_to_val(ret)
-			    > lin11_to_val(data->iout_max[page]))
-				data->iout_max[page] = ret;
-			ret = data->iout_max[page];
-		}
+		ret = ltc_get_max(data, client, page, LTC3880_MFR_IOUT_PEAK,
+				  &data->iout_max[page]);
 		break;
 	case PMBUS_VIRT_READ_TEMP2_MAX:
-		ret = pmbus_read_word_data(client, page,
-					   LTC3880_MFR_TEMPERATURE2_PEAK);
-		if (ret >= 0) {
-			if (lin11_to_val(ret) > lin11_to_val(data->temp2_max))
-				data->temp2_max = ret;
-			ret = data->temp2_max;
-		}
+		ret = ltc_get_max(data, client, page,
+				  LTC3880_MFR_TEMPERATURE2_PEAK,
+				  &data->temp2_max);
 		break;
 	case PMBUS_VIRT_READ_VIN_MIN:
 	case PMBUS_VIRT_READ_VOUT_MIN:
@@ -290,13 +420,8 @@
 
 	switch (reg) {
 	case PMBUS_VIRT_READ_IIN_MAX:
-		ret = pmbus_read_word_data(client, page, LTC3883_MFR_IIN_PEAK);
-		if (ret >= 0) {
-			if (lin11_to_val(ret)
-			    > lin11_to_val(data->iin_max))
-				data->iin_max = ret;
-			ret = data->iin_max;
-		}
+		ret = ltc_get_max(data, client, page, LTC3883_MFR_IIN_PEAK,
+				  &data->iin_max);
 		break;
 	case PMBUS_VIRT_RESET_IIN_HISTORY:
 		ret = 0;
@@ -308,15 +433,15 @@
 	return ret;
 }
 
-static int ltc2978_clear_peaks(struct i2c_client *client, int page,
-			       enum chips id)
+static int ltc2978_clear_peaks(struct ltc2978_data *data,
+			       struct i2c_client *client, int page)
 {
 	int ret;
 
-	if (id == ltc3880 || id == ltc3883)
-		ret = pmbus_write_byte(client, 0, LTC3880_MFR_CLEAR_PEAKS);
+	if (has_clear_peaks(data))
+		ret = ltc_write_byte(client, 0, LTC3880_MFR_CLEAR_PEAKS);
 	else
-		ret = pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
+		ret = ltc_write_byte(client, page, PMBUS_CLEAR_FAULTS);
 
 	return ret;
 }
@@ -331,33 +456,42 @@
 	switch (reg) {
 	case PMBUS_VIRT_RESET_IIN_HISTORY:
 		data->iin_max = 0x7c00;
-		ret = ltc2978_clear_peaks(client, page, data->id);
+		data->iin_min = 0x7bff;
+		ret = ltc2978_clear_peaks(data, client, 0);
+		break;
+	case PMBUS_VIRT_RESET_PIN_HISTORY:
+		data->pin_max = 0x7c00;
+		data->pin_min = 0x7bff;
+		ret = ltc2978_clear_peaks(data, client, 0);
 		break;
 	case PMBUS_VIRT_RESET_IOUT_HISTORY:
 		data->iout_max[page] = 0x7c00;
 		data->iout_min[page] = 0xfbff;
-		ret = ltc2978_clear_peaks(client, page, data->id);
+		ret = ltc2978_clear_peaks(data, client, page);
 		break;
 	case PMBUS_VIRT_RESET_TEMP2_HISTORY:
 		data->temp2_max = 0x7c00;
-		ret = ltc2978_clear_peaks(client, page, data->id);
+		ret = ltc2978_clear_peaks(data, client, page);
 		break;
 	case PMBUS_VIRT_RESET_VOUT_HISTORY:
 		data->vout_min[page] = 0xffff;
 		data->vout_max[page] = 0;
-		ret = ltc2978_clear_peaks(client, page, data->id);
+		ret = ltc2978_clear_peaks(data, client, page);
 		break;
 	case PMBUS_VIRT_RESET_VIN_HISTORY:
 		data->vin_min = 0x7bff;
 		data->vin_max = 0x7c00;
-		ret = ltc2978_clear_peaks(client, page, data->id);
+		ret = ltc2978_clear_peaks(data, client, page);
 		break;
 	case PMBUS_VIRT_RESET_TEMP_HISTORY:
 		data->temp_min[page] = 0x7bff;
 		data->temp_max[page] = 0x7c00;
-		ret = ltc2978_clear_peaks(client, page, data->id);
+		ret = ltc2978_clear_peaks(data, client, page);
 		break;
 	default:
+		ret = ltc_wait_ready(client);
+		if (ret < 0)
+			return ret;
 		ret = -ENODATA;
 		break;
 	}
@@ -366,10 +500,17 @@
 
 static const struct i2c_device_id ltc2978_id[] = {
 	{"ltc2974", ltc2974},
+	{"ltc2975", ltc2975},
 	{"ltc2977", ltc2977},
 	{"ltc2978", ltc2978},
+	{"ltc2980", ltc2980},
 	{"ltc3880", ltc3880},
+	{"ltc3882", ltc3882},
 	{"ltc3883", ltc3883},
+	{"ltc3886", ltc3886},
+	{"ltc3887", ltc3887},
+	{"ltm2987", ltm2987},
+	{"ltm4675", ltm4675},
 	{"ltm4676", ltm4676},
 	{}
 };
@@ -388,10 +529,74 @@
 };
 #endif /* CONFIG_SENSORS_LTC2978_REGULATOR */
 
+static int ltc2978_get_id(struct i2c_client *client)
+{
+	int chip_id;
+
+	chip_id = i2c_smbus_read_word_data(client, LTC2978_MFR_SPECIAL_ID);
+	if (chip_id < 0) {
+		const struct i2c_device_id *id;
+		u8 buf[I2C_SMBUS_BLOCK_MAX];
+		int ret;
+
+		if (!i2c_check_functionality(client->adapter,
+					     I2C_FUNC_SMBUS_READ_BLOCK_DATA))
+			return -ENODEV;
+
+		ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf);
+		if (ret < 0)
+			return ret;
+		if (ret < 3 || strncmp(buf, "LTC", 3))
+			return -ENODEV;
+
+		ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf);
+		if (ret < 0)
+			return ret;
+		for (id = &ltc2978_id[0]; strlen(id->name); id++) {
+			if (!strncasecmp(id->name, buf, strlen(id->name)))
+				return (int)id->driver_data;
+		}
+		return -ENODEV;
+	}
+
+	chip_id &= LTC2978_ID_MASK;
+
+	if (chip_id == LTC2974_ID)
+		return ltc2974;
+	else if (chip_id == LTC2975_ID)
+		return ltc2975;
+	else if (chip_id == LTC2977_ID)
+		return ltc2977;
+	else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2)
+		return ltc2978;
+	else if (chip_id == LTC2980_ID_A || chip_id == LTC2980_ID_B)
+		return ltc2980;
+	else if (chip_id == LTC3880_ID)
+		return ltc3880;
+	else if (chip_id == LTC3882_ID || chip_id == LTC3882_ID_D1)
+		return ltc3882;
+	else if (chip_id == LTC3883_ID)
+		return ltc3883;
+	else if (chip_id == LTC3886_ID)
+		return ltc3886;
+	else if (chip_id == LTC3887_ID)
+		return ltc3887;
+	else if (chip_id == LTM2987_ID_A || chip_id == LTM2987_ID_B)
+		return ltm2987;
+	else if (chip_id == LTM4675_ID)
+		return ltm4675;
+	else if (chip_id == LTM4676_ID_REV1 || chip_id == LTM4676_ID_REV2 ||
+		 chip_id == LTM4676A_ID)
+		return ltm4676;
+
+	dev_err(&client->dev, "Unsupported chip ID 0x%x\n", chip_id);
+	return -ENODEV;
+}
+
 static int ltc2978_probe(struct i2c_client *client,
 			 const struct i2c_device_id *id)
 {
-	int chip_id, i;
+	int i, chip_id;
 	struct ltc2978_data *data;
 	struct pmbus_driver_info *info;
 
@@ -404,27 +609,11 @@
 	if (!data)
 		return -ENOMEM;
 
-	chip_id = i2c_smbus_read_word_data(client, LTC2978_MFR_SPECIAL_ID);
+	chip_id = ltc2978_get_id(client);
 	if (chip_id < 0)
 		return chip_id;
 
-	if (chip_id == LTC2974_ID_REV1 || chip_id == LTC2974_ID_REV2) {
-		data->id = ltc2974;
-	} else if (chip_id == LTC2977_ID) {
-		data->id = ltc2977;
-	} else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2 ||
-		   chip_id == LTC2978A_ID) {
-		data->id = ltc2978;
-	} else if ((chip_id & LTC3880_ID_MASK) == LTC3880_ID) {
-		data->id = ltc3880;
-	} else if ((chip_id & LTC3883_ID_MASK) == LTC3883_ID) {
-		data->id = ltc3883;
-	} else if ((chip_id & LTM4676_ID_MASK) == LTM4676_ID) {
-		data->id = ltm4676;
-	} else {
-		dev_err(&client->dev, "Unsupported chip ID 0x%x\n", chip_id);
-		return -ENODEV;
-	}
+	data->id = chip_id;
 	if (data->id != id->driver_data)
 		dev_warn(&client->dev,
 			 "Device mismatch: Configured %s, detected %s\n",
@@ -433,6 +622,9 @@
 
 	info = &data->info;
 	info->write_word_data = ltc2978_write_word_data;
+	info->write_byte = ltc_write_byte;
+	info->read_word_data = ltc_read_word_data;
+	info->read_byte_data = ltc_read_byte_data;
 
 	data->vin_min = 0x7bff;
 	data->vin_max = 0x7c00;
@@ -461,8 +653,23 @@
 			  | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
 		}
 		break;
+	case ltc2975:
+		info->read_word_data = ltc2975_read_word_data;
+		info->pages = LTC2974_NUM_PAGES;
+		info->func[0] = PMBUS_HAVE_IIN | PMBUS_HAVE_PIN
+		  | PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+		  | PMBUS_HAVE_TEMP2;
+		for (i = 0; i < info->pages; i++) {
+			info->func[i] |= PMBUS_HAVE_VOUT
+			  | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_POUT
+			  | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
+			  | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
+		}
+		break;
 	case ltc2977:
 	case ltc2978:
+	case ltc2980:
+	case ltm2987:
 		info->read_word_data = ltc2978_read_word_data;
 		info->pages = LTC2978_NUM_PAGES;
 		info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
@@ -474,7 +681,10 @@
 		}
 		break;
 	case ltc3880:
+	case ltc3887:
+	case ltm4675:
 	case ltm4676:
+		data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING;
 		info->read_word_data = ltc3880_read_word_data;
 		info->pages = LTC3880_NUM_PAGES;
 		info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
@@ -488,7 +698,23 @@
 		  | PMBUS_HAVE_POUT
 		  | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
 		break;
+	case ltc3882:
+		data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING;
+		info->read_word_data = ltc3880_read_word_data;
+		info->pages = LTC3880_NUM_PAGES;
+		info->func[0] = PMBUS_HAVE_VIN
+		  | PMBUS_HAVE_STATUS_INPUT
+		  | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+		  | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+		  | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP
+		  | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
+		info->func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+		  | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+		  | PMBUS_HAVE_POUT
+		  | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+		break;
 	case ltc3883:
+		data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING;
 		info->read_word_data = ltc3883_read_word_data;
 		info->pages = LTC3883_NUM_PAGES;
 		info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
@@ -498,6 +724,21 @@
 		  | PMBUS_HAVE_PIN | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP
 		  | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
 		break;
+	case ltc3886:
+		data->features |= FEAT_CLEAR_PEAKS | FEAT_NEEDS_POLLING;
+		info->read_word_data = ltc3883_read_word_data;
+		info->pages = LTC3880_NUM_PAGES;
+		info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN
+		  | PMBUS_HAVE_STATUS_INPUT
+		  | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+		  | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+		  | PMBUS_HAVE_PIN | PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP
+		  | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
+		info->func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+		  | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+		  | PMBUS_HAVE_POUT
+		  | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+		break;
 	default:
 		return -ENODEV;
 	}
@@ -517,10 +758,17 @@
 #ifdef CONFIG_OF
 static const struct of_device_id ltc2978_of_match[] = {
 	{ .compatible = "lltc,ltc2974" },
+	{ .compatible = "lltc,ltc2975" },
 	{ .compatible = "lltc,ltc2977" },
 	{ .compatible = "lltc,ltc2978" },
+	{ .compatible = "lltc,ltc2980" },
 	{ .compatible = "lltc,ltc3880" },
+	{ .compatible = "lltc,ltc3882" },
 	{ .compatible = "lltc,ltc3883" },
+	{ .compatible = "lltc,ltc3886" },
+	{ .compatible = "lltc,ltc3887" },
+	{ .compatible = "lltc,ltm2987" },
+	{ .compatible = "lltc,ltm4675" },
 	{ .compatible = "lltc,ltm4676" },
 	{ }
 };
@@ -540,5 +788,5 @@
 module_i2c_driver(ltc2978_driver);
 
 MODULE_AUTHOR("Guenter Roeck");
-MODULE_DESCRIPTION("PMBus driver for LTC2974, LTC2978, LTC3880, LTC3883, and LTM4676");
+MODULE_DESCRIPTION("PMBus driver for LTC2978 and comppatible chips");
 MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/max20751.c b/drivers/hwmon/pmbus/max20751.c
new file mode 100644
index 0000000..ab74aea
--- /dev/null
+++ b/drivers/hwmon/pmbus/max20751.c
@@ -0,0 +1,64 @@
+/*
+ * Hardware monitoring driver for Maxim MAX20751
+ *
+ * Copyright (c) 2015 Guenter Roeck
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+static struct pmbus_driver_info max20751_info = {
+	.pages = 1,
+	.format[PSC_VOLTAGE_IN] = linear,
+	.format[PSC_VOLTAGE_OUT] = vid,
+	.vrm_version = vr12,
+	.format[PSC_TEMPERATURE] = linear,
+	.format[PSC_CURRENT_OUT] = linear,
+	.format[PSC_POWER] = linear,
+	.func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT |
+		PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT |
+		PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
+		PMBUS_HAVE_POUT,
+};
+
+static int max20751_probe(struct i2c_client *client,
+			  const struct i2c_device_id *id)
+{
+	return pmbus_do_probe(client, id, &max20751_info);
+}
+
+static const struct i2c_device_id max20751_id[] = {
+	{"max20751", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, max20751_id);
+
+static struct i2c_driver max20751_driver = {
+	.driver = {
+		   .name = "max20751",
+		   },
+	.probe = max20751_probe,
+	.remove = pmbus_do_remove,
+	.id_table = max20751_id,
+};
+
+module_i2c_driver(max20751_driver);
+
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX20751");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 7e930c3..74a1f6f 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -19,6 +19,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/bitops.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -38,10 +39,10 @@
 #define MAX34446_MFR_IOUT_AVG		0xe2
 #define MAX34446_MFR_TEMPERATURE_AVG	0xe3
 
-#define MAX34440_STATUS_OC_WARN		(1 << 0)
-#define MAX34440_STATUS_OC_FAULT	(1 << 1)
-#define MAX34440_STATUS_OT_FAULT	(1 << 5)
-#define MAX34440_STATUS_OT_WARN		(1 << 6)
+#define MAX34440_STATUS_OC_WARN		BIT(0)
+#define MAX34440_STATUS_OC_FAULT	BIT(1)
+#define MAX34440_STATUS_OT_FAULT	BIT(5)
+#define MAX34440_STATUS_OT_WARN		BIT(6)
 
 struct max34440_data {
 	int id;
diff --git a/drivers/hwmon/pmbus/max8688.c b/drivers/hwmon/pmbus/max8688.c
index f04454a..dd4883a 100644
--- a/drivers/hwmon/pmbus/max8688.c
+++ b/drivers/hwmon/pmbus/max8688.c
@@ -18,6 +18,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/bitops.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -30,15 +31,15 @@
 #define MAX8688_MFR_TEMPERATURE_PEAK	0xd6
 #define MAX8688_MFG_STATUS		0xd8
 
-#define MAX8688_STATUS_OC_FAULT		(1 << 4)
-#define MAX8688_STATUS_OV_FAULT		(1 << 5)
-#define MAX8688_STATUS_OV_WARNING	(1 << 8)
-#define MAX8688_STATUS_UV_FAULT		(1 << 9)
-#define MAX8688_STATUS_UV_WARNING	(1 << 10)
-#define MAX8688_STATUS_UC_FAULT		(1 << 11)
-#define MAX8688_STATUS_OC_WARNING	(1 << 12)
-#define MAX8688_STATUS_OT_FAULT		(1 << 13)
-#define MAX8688_STATUS_OT_WARNING	(1 << 14)
+#define MAX8688_STATUS_OC_FAULT		BIT(4)
+#define MAX8688_STATUS_OV_FAULT		BIT(5)
+#define MAX8688_STATUS_OV_WARNING	BIT(8)
+#define MAX8688_STATUS_UV_FAULT		BIT(9)
+#define MAX8688_STATUS_UV_WARNING	BIT(10)
+#define MAX8688_STATUS_UC_FAULT		BIT(11)
+#define MAX8688_STATUS_OC_WARNING	BIT(12)
+#define MAX8688_STATUS_OT_FAULT		BIT(13)
+#define MAX8688_STATUS_OT_WARNING	BIT(14)
 
 static int max8688_read_word_data(struct i2c_client *client, int page, int reg)
 {
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 554d024..0a74991 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -129,6 +129,7 @@
 				break;
 			case 1:
 				info->format[PSC_VOLTAGE_OUT] = vid;
+				info->vrm_version = vr11;
 				break;
 			case 2:
 				info->format[PSC_VOLTAGE_OUT] = direct;
@@ -193,6 +194,10 @@
 	{"pdt012", 1},
 	{"pmbus", 0},
 	{"tps40400", 1},
+	{"tps544b20", 1},
+	{"tps544b25", 1},
+	{"tps544c20", 1},
+	{"tps544c25", 1},
 	{"udt020", 1},
 	{}
 };
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 89a23ff..bfcb13b 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -19,114 +19,116 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#include <linux/regulator/driver.h>
-
 #ifndef PMBUS_H
 #define PMBUS_H
 
+#include <linux/bitops.h>
+#include <linux/regulator/driver.h>
+
 /*
  * Registers
  */
-#define PMBUS_PAGE			0x00
-#define PMBUS_OPERATION			0x01
-#define PMBUS_ON_OFF_CONFIG		0x02
-#define PMBUS_CLEAR_FAULTS		0x03
-#define PMBUS_PHASE			0x04
+enum pmbus_regs {
+	PMBUS_PAGE			= 0x00,
+	PMBUS_OPERATION			= 0x01,
+	PMBUS_ON_OFF_CONFIG		= 0x02,
+	PMBUS_CLEAR_FAULTS		= 0x03,
+	PMBUS_PHASE			= 0x04,
 
-#define PMBUS_CAPABILITY		0x19
-#define PMBUS_QUERY			0x1A
+	PMBUS_CAPABILITY		= 0x19,
+	PMBUS_QUERY			= 0x1A,
 
-#define PMBUS_VOUT_MODE			0x20
-#define PMBUS_VOUT_COMMAND		0x21
-#define PMBUS_VOUT_TRIM			0x22
-#define PMBUS_VOUT_CAL_OFFSET		0x23
-#define PMBUS_VOUT_MAX			0x24
-#define PMBUS_VOUT_MARGIN_HIGH		0x25
-#define PMBUS_VOUT_MARGIN_LOW		0x26
-#define PMBUS_VOUT_TRANSITION_RATE	0x27
-#define PMBUS_VOUT_DROOP		0x28
-#define PMBUS_VOUT_SCALE_LOOP		0x29
-#define PMBUS_VOUT_SCALE_MONITOR	0x2A
+	PMBUS_VOUT_MODE			= 0x20,
+	PMBUS_VOUT_COMMAND		= 0x21,
+	PMBUS_VOUT_TRIM			= 0x22,
+	PMBUS_VOUT_CAL_OFFSET		= 0x23,
+	PMBUS_VOUT_MAX			= 0x24,
+	PMBUS_VOUT_MARGIN_HIGH		= 0x25,
+	PMBUS_VOUT_MARGIN_LOW		= 0x26,
+	PMBUS_VOUT_TRANSITION_RATE	= 0x27,
+	PMBUS_VOUT_DROOP		= 0x28,
+	PMBUS_VOUT_SCALE_LOOP		= 0x29,
+	PMBUS_VOUT_SCALE_MONITOR	= 0x2A,
 
-#define PMBUS_COEFFICIENTS		0x30
-#define PMBUS_POUT_MAX			0x31
+	PMBUS_COEFFICIENTS		= 0x30,
+	PMBUS_POUT_MAX			= 0x31,
 
-#define PMBUS_FAN_CONFIG_12		0x3A
-#define PMBUS_FAN_COMMAND_1		0x3B
-#define PMBUS_FAN_COMMAND_2		0x3C
-#define PMBUS_FAN_CONFIG_34		0x3D
-#define PMBUS_FAN_COMMAND_3		0x3E
-#define PMBUS_FAN_COMMAND_4		0x3F
+	PMBUS_FAN_CONFIG_12		= 0x3A,
+	PMBUS_FAN_COMMAND_1		= 0x3B,
+	PMBUS_FAN_COMMAND_2		= 0x3C,
+	PMBUS_FAN_CONFIG_34		= 0x3D,
+	PMBUS_FAN_COMMAND_3		= 0x3E,
+	PMBUS_FAN_COMMAND_4		= 0x3F,
 
-#define PMBUS_VOUT_OV_FAULT_LIMIT	0x40
-#define PMBUS_VOUT_OV_FAULT_RESPONSE	0x41
-#define PMBUS_VOUT_OV_WARN_LIMIT	0x42
-#define PMBUS_VOUT_UV_WARN_LIMIT	0x43
-#define PMBUS_VOUT_UV_FAULT_LIMIT	0x44
-#define PMBUS_VOUT_UV_FAULT_RESPONSE	0x45
-#define PMBUS_IOUT_OC_FAULT_LIMIT	0x46
-#define PMBUS_IOUT_OC_FAULT_RESPONSE	0x47
-#define PMBUS_IOUT_OC_LV_FAULT_LIMIT	0x48
-#define PMBUS_IOUT_OC_LV_FAULT_RESPONSE	0x49
-#define PMBUS_IOUT_OC_WARN_LIMIT	0x4A
-#define PMBUS_IOUT_UC_FAULT_LIMIT	0x4B
-#define PMBUS_IOUT_UC_FAULT_RESPONSE	0x4C
+	PMBUS_VOUT_OV_FAULT_LIMIT	= 0x40,
+	PMBUS_VOUT_OV_FAULT_RESPONSE	= 0x41,
+	PMBUS_VOUT_OV_WARN_LIMIT	= 0x42,
+	PMBUS_VOUT_UV_WARN_LIMIT	= 0x43,
+	PMBUS_VOUT_UV_FAULT_LIMIT	= 0x44,
+	PMBUS_VOUT_UV_FAULT_RESPONSE	= 0x45,
+	PMBUS_IOUT_OC_FAULT_LIMIT	= 0x46,
+	PMBUS_IOUT_OC_FAULT_RESPONSE	= 0x47,
+	PMBUS_IOUT_OC_LV_FAULT_LIMIT	= 0x48,
+	PMBUS_IOUT_OC_LV_FAULT_RESPONSE	= 0x49,
+	PMBUS_IOUT_OC_WARN_LIMIT	= 0x4A,
+	PMBUS_IOUT_UC_FAULT_LIMIT	= 0x4B,
+	PMBUS_IOUT_UC_FAULT_RESPONSE	= 0x4C,
 
-#define PMBUS_OT_FAULT_LIMIT		0x4F
-#define PMBUS_OT_FAULT_RESPONSE		0x50
-#define PMBUS_OT_WARN_LIMIT		0x51
-#define PMBUS_UT_WARN_LIMIT		0x52
-#define PMBUS_UT_FAULT_LIMIT		0x53
-#define PMBUS_UT_FAULT_RESPONSE		0x54
-#define PMBUS_VIN_OV_FAULT_LIMIT	0x55
-#define PMBUS_VIN_OV_FAULT_RESPONSE	0x56
-#define PMBUS_VIN_OV_WARN_LIMIT		0x57
-#define PMBUS_VIN_UV_WARN_LIMIT		0x58
-#define PMBUS_VIN_UV_FAULT_LIMIT	0x59
+	PMBUS_OT_FAULT_LIMIT		= 0x4F,
+	PMBUS_OT_FAULT_RESPONSE		= 0x50,
+	PMBUS_OT_WARN_LIMIT		= 0x51,
+	PMBUS_UT_WARN_LIMIT		= 0x52,
+	PMBUS_UT_FAULT_LIMIT		= 0x53,
+	PMBUS_UT_FAULT_RESPONSE		= 0x54,
+	PMBUS_VIN_OV_FAULT_LIMIT	= 0x55,
+	PMBUS_VIN_OV_FAULT_RESPONSE	= 0x56,
+	PMBUS_VIN_OV_WARN_LIMIT		= 0x57,
+	PMBUS_VIN_UV_WARN_LIMIT		= 0x58,
+	PMBUS_VIN_UV_FAULT_LIMIT	= 0x59,
 
-#define PMBUS_IIN_OC_FAULT_LIMIT	0x5B
-#define PMBUS_IIN_OC_WARN_LIMIT		0x5D
+	PMBUS_IIN_OC_FAULT_LIMIT	= 0x5B,
+	PMBUS_IIN_OC_WARN_LIMIT		= 0x5D,
 
-#define PMBUS_POUT_OP_FAULT_LIMIT	0x68
-#define PMBUS_POUT_OP_WARN_LIMIT	0x6A
-#define PMBUS_PIN_OP_WARN_LIMIT		0x6B
+	PMBUS_POUT_OP_FAULT_LIMIT	= 0x68,
+	PMBUS_POUT_OP_WARN_LIMIT	= 0x6A,
+	PMBUS_PIN_OP_WARN_LIMIT		= 0x6B,
 
-#define PMBUS_STATUS_BYTE		0x78
-#define PMBUS_STATUS_WORD		0x79
-#define PMBUS_STATUS_VOUT		0x7A
-#define PMBUS_STATUS_IOUT		0x7B
-#define PMBUS_STATUS_INPUT		0x7C
-#define PMBUS_STATUS_TEMPERATURE	0x7D
-#define PMBUS_STATUS_CML		0x7E
-#define PMBUS_STATUS_OTHER		0x7F
-#define PMBUS_STATUS_MFR_SPECIFIC	0x80
-#define PMBUS_STATUS_FAN_12		0x81
-#define PMBUS_STATUS_FAN_34		0x82
+	PMBUS_STATUS_BYTE		= 0x78,
+	PMBUS_STATUS_WORD		= 0x79,
+	PMBUS_STATUS_VOUT		= 0x7A,
+	PMBUS_STATUS_IOUT		= 0x7B,
+	PMBUS_STATUS_INPUT		= 0x7C,
+	PMBUS_STATUS_TEMPERATURE	= 0x7D,
+	PMBUS_STATUS_CML		= 0x7E,
+	PMBUS_STATUS_OTHER		= 0x7F,
+	PMBUS_STATUS_MFR_SPECIFIC	= 0x80,
+	PMBUS_STATUS_FAN_12		= 0x81,
+	PMBUS_STATUS_FAN_34		= 0x82,
 
-#define PMBUS_READ_VIN			0x88
-#define PMBUS_READ_IIN			0x89
-#define PMBUS_READ_VCAP			0x8A
-#define PMBUS_READ_VOUT			0x8B
-#define PMBUS_READ_IOUT			0x8C
-#define PMBUS_READ_TEMPERATURE_1	0x8D
-#define PMBUS_READ_TEMPERATURE_2	0x8E
-#define PMBUS_READ_TEMPERATURE_3	0x8F
-#define PMBUS_READ_FAN_SPEED_1		0x90
-#define PMBUS_READ_FAN_SPEED_2		0x91
-#define PMBUS_READ_FAN_SPEED_3		0x92
-#define PMBUS_READ_FAN_SPEED_4		0x93
-#define PMBUS_READ_DUTY_CYCLE		0x94
-#define PMBUS_READ_FREQUENCY		0x95
-#define PMBUS_READ_POUT			0x96
-#define PMBUS_READ_PIN			0x97
+	PMBUS_READ_VIN			= 0x88,
+	PMBUS_READ_IIN			= 0x89,
+	PMBUS_READ_VCAP			= 0x8A,
+	PMBUS_READ_VOUT			= 0x8B,
+	PMBUS_READ_IOUT			= 0x8C,
+	PMBUS_READ_TEMPERATURE_1	= 0x8D,
+	PMBUS_READ_TEMPERATURE_2	= 0x8E,
+	PMBUS_READ_TEMPERATURE_3	= 0x8F,
+	PMBUS_READ_FAN_SPEED_1		= 0x90,
+	PMBUS_READ_FAN_SPEED_2		= 0x91,
+	PMBUS_READ_FAN_SPEED_3		= 0x92,
+	PMBUS_READ_FAN_SPEED_4		= 0x93,
+	PMBUS_READ_DUTY_CYCLE		= 0x94,
+	PMBUS_READ_FREQUENCY		= 0x95,
+	PMBUS_READ_POUT			= 0x96,
+	PMBUS_READ_PIN			= 0x97,
 
-#define PMBUS_REVISION			0x98
-#define PMBUS_MFR_ID			0x99
-#define PMBUS_MFR_MODEL			0x9A
-#define PMBUS_MFR_REVISION		0x9B
-#define PMBUS_MFR_LOCATION		0x9C
-#define PMBUS_MFR_DATE			0x9D
-#define PMBUS_MFR_SERIAL		0x9E
+	PMBUS_REVISION			= 0x98,
+	PMBUS_MFR_ID			= 0x99,
+	PMBUS_MFR_MODEL			= 0x9A,
+	PMBUS_MFR_REVISION		= 0x9B,
+	PMBUS_MFR_LOCATION		= 0x9C,
+	PMBUS_MFR_DATE			= 0x9D,
+	PMBUS_MFR_SERIAL		= 0x9E,
 
 /*
  * Virtual registers.
@@ -148,55 +150,58 @@
  * the calling PMBus core code will abort if the chip driver returns an error
  * code when reading or writing virtual registers.
  */
-#define PMBUS_VIRT_BASE			0x100
-#define PMBUS_VIRT_READ_TEMP_AVG	(PMBUS_VIRT_BASE + 0)
-#define PMBUS_VIRT_READ_TEMP_MIN	(PMBUS_VIRT_BASE + 1)
-#define PMBUS_VIRT_READ_TEMP_MAX	(PMBUS_VIRT_BASE + 2)
-#define PMBUS_VIRT_RESET_TEMP_HISTORY	(PMBUS_VIRT_BASE + 3)
-#define PMBUS_VIRT_READ_VIN_AVG		(PMBUS_VIRT_BASE + 4)
-#define PMBUS_VIRT_READ_VIN_MIN		(PMBUS_VIRT_BASE + 5)
-#define PMBUS_VIRT_READ_VIN_MAX		(PMBUS_VIRT_BASE + 6)
-#define PMBUS_VIRT_RESET_VIN_HISTORY	(PMBUS_VIRT_BASE + 7)
-#define PMBUS_VIRT_READ_IIN_AVG		(PMBUS_VIRT_BASE + 8)
-#define PMBUS_VIRT_READ_IIN_MIN		(PMBUS_VIRT_BASE + 9)
-#define PMBUS_VIRT_READ_IIN_MAX		(PMBUS_VIRT_BASE + 10)
-#define PMBUS_VIRT_RESET_IIN_HISTORY	(PMBUS_VIRT_BASE + 11)
-#define PMBUS_VIRT_READ_PIN_AVG		(PMBUS_VIRT_BASE + 12)
-#define PMBUS_VIRT_READ_PIN_MAX		(PMBUS_VIRT_BASE + 13)
-#define PMBUS_VIRT_RESET_PIN_HISTORY	(PMBUS_VIRT_BASE + 14)
-#define PMBUS_VIRT_READ_POUT_AVG	(PMBUS_VIRT_BASE + 15)
-#define PMBUS_VIRT_READ_POUT_MAX	(PMBUS_VIRT_BASE + 16)
-#define PMBUS_VIRT_RESET_POUT_HISTORY	(PMBUS_VIRT_BASE + 17)
-#define PMBUS_VIRT_READ_VOUT_AVG	(PMBUS_VIRT_BASE + 18)
-#define PMBUS_VIRT_READ_VOUT_MIN	(PMBUS_VIRT_BASE + 19)
-#define PMBUS_VIRT_READ_VOUT_MAX	(PMBUS_VIRT_BASE + 20)
-#define PMBUS_VIRT_RESET_VOUT_HISTORY	(PMBUS_VIRT_BASE + 21)
-#define PMBUS_VIRT_READ_IOUT_AVG	(PMBUS_VIRT_BASE + 22)
-#define PMBUS_VIRT_READ_IOUT_MIN	(PMBUS_VIRT_BASE + 23)
-#define PMBUS_VIRT_READ_IOUT_MAX	(PMBUS_VIRT_BASE + 24)
-#define PMBUS_VIRT_RESET_IOUT_HISTORY	(PMBUS_VIRT_BASE + 25)
-#define PMBUS_VIRT_READ_TEMP2_AVG	(PMBUS_VIRT_BASE + 26)
-#define PMBUS_VIRT_READ_TEMP2_MIN	(PMBUS_VIRT_BASE + 27)
-#define PMBUS_VIRT_READ_TEMP2_MAX	(PMBUS_VIRT_BASE + 28)
-#define PMBUS_VIRT_RESET_TEMP2_HISTORY	(PMBUS_VIRT_BASE + 29)
+	PMBUS_VIRT_BASE			= 0x100,
+	PMBUS_VIRT_READ_TEMP_AVG,
+	PMBUS_VIRT_READ_TEMP_MIN,
+	PMBUS_VIRT_READ_TEMP_MAX,
+	PMBUS_VIRT_RESET_TEMP_HISTORY,
+	PMBUS_VIRT_READ_VIN_AVG,
+	PMBUS_VIRT_READ_VIN_MIN,
+	PMBUS_VIRT_READ_VIN_MAX,
+	PMBUS_VIRT_RESET_VIN_HISTORY,
+	PMBUS_VIRT_READ_IIN_AVG,
+	PMBUS_VIRT_READ_IIN_MIN,
+	PMBUS_VIRT_READ_IIN_MAX,
+	PMBUS_VIRT_RESET_IIN_HISTORY,
+	PMBUS_VIRT_READ_PIN_AVG,
+	PMBUS_VIRT_READ_PIN_MIN,
+	PMBUS_VIRT_READ_PIN_MAX,
+	PMBUS_VIRT_RESET_PIN_HISTORY,
+	PMBUS_VIRT_READ_POUT_AVG,
+	PMBUS_VIRT_READ_POUT_MIN,
+	PMBUS_VIRT_READ_POUT_MAX,
+	PMBUS_VIRT_RESET_POUT_HISTORY,
+	PMBUS_VIRT_READ_VOUT_AVG,
+	PMBUS_VIRT_READ_VOUT_MIN,
+	PMBUS_VIRT_READ_VOUT_MAX,
+	PMBUS_VIRT_RESET_VOUT_HISTORY,
+	PMBUS_VIRT_READ_IOUT_AVG,
+	PMBUS_VIRT_READ_IOUT_MIN,
+	PMBUS_VIRT_READ_IOUT_MAX,
+	PMBUS_VIRT_RESET_IOUT_HISTORY,
+	PMBUS_VIRT_READ_TEMP2_AVG,
+	PMBUS_VIRT_READ_TEMP2_MIN,
+	PMBUS_VIRT_READ_TEMP2_MAX,
+	PMBUS_VIRT_RESET_TEMP2_HISTORY,
 
-#define PMBUS_VIRT_READ_VMON		(PMBUS_VIRT_BASE + 30)
-#define PMBUS_VIRT_VMON_UV_WARN_LIMIT	(PMBUS_VIRT_BASE + 31)
-#define PMBUS_VIRT_VMON_OV_WARN_LIMIT	(PMBUS_VIRT_BASE + 32)
-#define PMBUS_VIRT_VMON_UV_FAULT_LIMIT	(PMBUS_VIRT_BASE + 33)
-#define PMBUS_VIRT_VMON_OV_FAULT_LIMIT	(PMBUS_VIRT_BASE + 34)
-#define PMBUS_VIRT_STATUS_VMON		(PMBUS_VIRT_BASE + 35)
+	PMBUS_VIRT_READ_VMON,
+	PMBUS_VIRT_VMON_UV_WARN_LIMIT,
+	PMBUS_VIRT_VMON_OV_WARN_LIMIT,
+	PMBUS_VIRT_VMON_UV_FAULT_LIMIT,
+	PMBUS_VIRT_VMON_OV_FAULT_LIMIT,
+	PMBUS_VIRT_STATUS_VMON,
+};
 
 /*
  * OPERATION
  */
-#define PB_OPERATION_CONTROL_ON		(1<<7)
+#define PB_OPERATION_CONTROL_ON		BIT(7)
 
 /*
  * CAPABILITY
  */
-#define PB_CAPABILITY_SMBALERT		(1<<4)
-#define PB_CAPABILITY_ERROR_CHECK	(1<<7)
+#define PB_CAPABILITY_SMBALERT		BIT(4)
+#define PB_CAPABILITY_ERROR_CHECK	BIT(7)
 
 /*
  * VOUT_MODE
@@ -211,94 +216,94 @@
 /*
  * Fan configuration
  */
-#define PB_FAN_2_PULSE_MASK		((1 << 0) | (1 << 1))
-#define PB_FAN_2_RPM			(1 << 2)
-#define PB_FAN_2_INSTALLED		(1 << 3)
-#define PB_FAN_1_PULSE_MASK		((1 << 4) | (1 << 5))
-#define PB_FAN_1_RPM			(1 << 6)
-#define PB_FAN_1_INSTALLED		(1 << 7)
+#define PB_FAN_2_PULSE_MASK		(BIT(0) | BIT(1))
+#define PB_FAN_2_RPM			BIT(2)
+#define PB_FAN_2_INSTALLED		BIT(3)
+#define PB_FAN_1_PULSE_MASK		(BIT(4) | BIT(5))
+#define PB_FAN_1_RPM			BIT(6)
+#define PB_FAN_1_INSTALLED		BIT(7)
 
 /*
  * STATUS_BYTE, STATUS_WORD (lower)
  */
-#define PB_STATUS_NONE_ABOVE		(1<<0)
-#define PB_STATUS_CML			(1<<1)
-#define PB_STATUS_TEMPERATURE		(1<<2)
-#define PB_STATUS_VIN_UV		(1<<3)
-#define PB_STATUS_IOUT_OC		(1<<4)
-#define PB_STATUS_VOUT_OV		(1<<5)
-#define PB_STATUS_OFF			(1<<6)
-#define PB_STATUS_BUSY			(1<<7)
+#define PB_STATUS_NONE_ABOVE		BIT(0)
+#define PB_STATUS_CML			BIT(1)
+#define PB_STATUS_TEMPERATURE		BIT(2)
+#define PB_STATUS_VIN_UV		BIT(3)
+#define PB_STATUS_IOUT_OC		BIT(4)
+#define PB_STATUS_VOUT_OV		BIT(5)
+#define PB_STATUS_OFF			BIT(6)
+#define PB_STATUS_BUSY			BIT(7)
 
 /*
  * STATUS_WORD (upper)
  */
-#define PB_STATUS_UNKNOWN		(1<<8)
-#define PB_STATUS_OTHER			(1<<9)
-#define PB_STATUS_FANS			(1<<10)
-#define PB_STATUS_POWER_GOOD_N		(1<<11)
-#define PB_STATUS_WORD_MFR		(1<<12)
-#define PB_STATUS_INPUT			(1<<13)
-#define PB_STATUS_IOUT_POUT		(1<<14)
-#define PB_STATUS_VOUT			(1<<15)
+#define PB_STATUS_UNKNOWN		BIT(8)
+#define PB_STATUS_OTHER			BIT(9)
+#define PB_STATUS_FANS			BIT(10)
+#define PB_STATUS_POWER_GOOD_N		BIT(11)
+#define PB_STATUS_WORD_MFR		BIT(12)
+#define PB_STATUS_INPUT			BIT(13)
+#define PB_STATUS_IOUT_POUT		BIT(14)
+#define PB_STATUS_VOUT			BIT(15)
 
 /*
  * STATUS_IOUT
  */
-#define PB_POUT_OP_WARNING		(1<<0)
-#define PB_POUT_OP_FAULT		(1<<1)
-#define PB_POWER_LIMITING		(1<<2)
-#define PB_CURRENT_SHARE_FAULT		(1<<3)
-#define PB_IOUT_UC_FAULT		(1<<4)
-#define PB_IOUT_OC_WARNING		(1<<5)
-#define PB_IOUT_OC_LV_FAULT		(1<<6)
-#define PB_IOUT_OC_FAULT		(1<<7)
+#define PB_POUT_OP_WARNING		BIT(0)
+#define PB_POUT_OP_FAULT		BIT(1)
+#define PB_POWER_LIMITING		BIT(2)
+#define PB_CURRENT_SHARE_FAULT		BIT(3)
+#define PB_IOUT_UC_FAULT		BIT(4)
+#define PB_IOUT_OC_WARNING		BIT(5)
+#define PB_IOUT_OC_LV_FAULT		BIT(6)
+#define PB_IOUT_OC_FAULT		BIT(7)
 
 /*
  * STATUS_VOUT, STATUS_INPUT
  */
-#define PB_VOLTAGE_UV_FAULT		(1<<4)
-#define PB_VOLTAGE_UV_WARNING		(1<<5)
-#define PB_VOLTAGE_OV_WARNING		(1<<6)
-#define PB_VOLTAGE_OV_FAULT		(1<<7)
+#define PB_VOLTAGE_UV_FAULT		BIT(4)
+#define PB_VOLTAGE_UV_WARNING		BIT(5)
+#define PB_VOLTAGE_OV_WARNING		BIT(6)
+#define PB_VOLTAGE_OV_FAULT		BIT(7)
 
 /*
  * STATUS_INPUT
  */
-#define PB_PIN_OP_WARNING		(1<<0)
-#define PB_IIN_OC_WARNING		(1<<1)
-#define PB_IIN_OC_FAULT			(1<<2)
+#define PB_PIN_OP_WARNING		BIT(0)
+#define PB_IIN_OC_WARNING		BIT(1)
+#define PB_IIN_OC_FAULT			BIT(2)
 
 /*
  * STATUS_TEMPERATURE
  */
-#define PB_TEMP_UT_FAULT		(1<<4)
-#define PB_TEMP_UT_WARNING		(1<<5)
-#define PB_TEMP_OT_WARNING		(1<<6)
-#define PB_TEMP_OT_FAULT		(1<<7)
+#define PB_TEMP_UT_FAULT		BIT(4)
+#define PB_TEMP_UT_WARNING		BIT(5)
+#define PB_TEMP_OT_WARNING		BIT(6)
+#define PB_TEMP_OT_FAULT		BIT(7)
 
 /*
  * STATUS_FAN
  */
-#define PB_FAN_AIRFLOW_WARNING		(1<<0)
-#define PB_FAN_AIRFLOW_FAULT		(1<<1)
-#define PB_FAN_FAN2_SPEED_OVERRIDE	(1<<2)
-#define PB_FAN_FAN1_SPEED_OVERRIDE	(1<<3)
-#define PB_FAN_FAN2_WARNING		(1<<4)
-#define PB_FAN_FAN1_WARNING		(1<<5)
-#define PB_FAN_FAN2_FAULT		(1<<6)
-#define PB_FAN_FAN1_FAULT		(1<<7)
+#define PB_FAN_AIRFLOW_WARNING		BIT(0)
+#define PB_FAN_AIRFLOW_FAULT		BIT(1)
+#define PB_FAN_FAN2_SPEED_OVERRIDE	BIT(2)
+#define PB_FAN_FAN1_SPEED_OVERRIDE	BIT(3)
+#define PB_FAN_FAN2_WARNING		BIT(4)
+#define PB_FAN_FAN1_WARNING		BIT(5)
+#define PB_FAN_FAN2_FAULT		BIT(6)
+#define PB_FAN_FAN1_FAULT		BIT(7)
 
 /*
  * CML_FAULT_STATUS
  */
-#define PB_CML_FAULT_OTHER_MEM_LOGIC	(1<<0)
-#define PB_CML_FAULT_OTHER_COMM		(1<<1)
-#define PB_CML_FAULT_PROCESSOR		(1<<3)
-#define PB_CML_FAULT_MEMORY		(1<<4)
-#define PB_CML_FAULT_PACKET_ERROR	(1<<5)
-#define PB_CML_FAULT_INVALID_DATA	(1<<6)
-#define PB_CML_FAULT_INVALID_COMMAND	(1<<7)
+#define PB_CML_FAULT_OTHER_MEM_LOGIC	BIT(0)
+#define PB_CML_FAULT_OTHER_COMM		BIT(1)
+#define PB_CML_FAULT_PROCESSOR		BIT(3)
+#define PB_CML_FAULT_MEMORY		BIT(4)
+#define PB_CML_FAULT_PACKET_ERROR	BIT(5)
+#define PB_CML_FAULT_INVALID_DATA	BIT(6)
+#define PB_CML_FAULT_INVALID_COMMAND	BIT(7)
 
 enum pmbus_sensor_classes {
 	PSC_VOLTAGE_IN = 0,
@@ -314,32 +319,34 @@
 #define PMBUS_PAGES	32	/* Per PMBus specification */
 
 /* Functionality bit mask */
-#define PMBUS_HAVE_VIN		(1 << 0)
-#define PMBUS_HAVE_VCAP		(1 << 1)
-#define PMBUS_HAVE_VOUT		(1 << 2)
-#define PMBUS_HAVE_IIN		(1 << 3)
-#define PMBUS_HAVE_IOUT		(1 << 4)
-#define PMBUS_HAVE_PIN		(1 << 5)
-#define PMBUS_HAVE_POUT		(1 << 6)
-#define PMBUS_HAVE_FAN12	(1 << 7)
-#define PMBUS_HAVE_FAN34	(1 << 8)
-#define PMBUS_HAVE_TEMP		(1 << 9)
-#define PMBUS_HAVE_TEMP2	(1 << 10)
-#define PMBUS_HAVE_TEMP3	(1 << 11)
-#define PMBUS_HAVE_STATUS_VOUT	(1 << 12)
-#define PMBUS_HAVE_STATUS_IOUT	(1 << 13)
-#define PMBUS_HAVE_STATUS_INPUT	(1 << 14)
-#define PMBUS_HAVE_STATUS_TEMP	(1 << 15)
-#define PMBUS_HAVE_STATUS_FAN12	(1 << 16)
-#define PMBUS_HAVE_STATUS_FAN34	(1 << 17)
-#define PMBUS_HAVE_VMON		(1 << 18)
-#define PMBUS_HAVE_STATUS_VMON	(1 << 19)
+#define PMBUS_HAVE_VIN		BIT(0)
+#define PMBUS_HAVE_VCAP		BIT(1)
+#define PMBUS_HAVE_VOUT		BIT(2)
+#define PMBUS_HAVE_IIN		BIT(3)
+#define PMBUS_HAVE_IOUT		BIT(4)
+#define PMBUS_HAVE_PIN		BIT(5)
+#define PMBUS_HAVE_POUT		BIT(6)
+#define PMBUS_HAVE_FAN12	BIT(7)
+#define PMBUS_HAVE_FAN34	BIT(8)
+#define PMBUS_HAVE_TEMP		BIT(9)
+#define PMBUS_HAVE_TEMP2	BIT(10)
+#define PMBUS_HAVE_TEMP3	BIT(11)
+#define PMBUS_HAVE_STATUS_VOUT	BIT(12)
+#define PMBUS_HAVE_STATUS_IOUT	BIT(13)
+#define PMBUS_HAVE_STATUS_INPUT	BIT(14)
+#define PMBUS_HAVE_STATUS_TEMP	BIT(15)
+#define PMBUS_HAVE_STATUS_FAN12	BIT(16)
+#define PMBUS_HAVE_STATUS_FAN34	BIT(17)
+#define PMBUS_HAVE_VMON		BIT(18)
+#define PMBUS_HAVE_STATUS_VMON	BIT(19)
 
 enum pmbus_data_format { linear = 0, direct, vid };
+enum vrm_version { vr11 = 0, vr12 };
 
 struct pmbus_driver_info {
 	int pages;		/* Total number of pages */
 	enum pmbus_data_format format[PSC_NUM_CLASSES];
+	enum vrm_version vrm_version;
 	/*
 	 * Support one set of coefficients for each sensor type
 	 * Used for chips providing data in direct mode.
@@ -380,7 +387,7 @@
 
 /* Regulator ops */
 
-extern struct regulator_ops pmbus_regulator_ops;
+extern const struct regulator_ops pmbus_regulator_ops;
 
 /* Macro for filling in array of struct regulator_desc */
 #define PMBUS_REGULATOR(_name, _id)				\
@@ -390,6 +397,7 @@
 		.of_match = of_match_ptr(_name # _id),		\
 		.regulators_node = of_match_ptr("regulators"),	\
 		.ops = &pmbus_regulator_ops,			\
+		.type = REGULATOR_VOLTAGE,			\
 		.owner = THIS_MODULE,				\
 	}
 
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index f2e47c7..ba59eae 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -515,16 +515,24 @@
 /*
  * Convert VID sensor values to milli- or micro-units
  * depending on sensor type.
- * We currently only support VR11.
  */
 static long pmbus_reg2data_vid(struct pmbus_data *data,
 			       struct pmbus_sensor *sensor)
 {
 	long val = sensor->data;
+	long rv = 0;
 
-	if (val < 0x02 || val > 0xb2)
-		return 0;
-	return DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
+	switch (data->info->vrm_version) {
+	case vr11:
+		if (val >= 0x02 && val <= 0xb2)
+			rv = DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100);
+		break;
+	case vr12:
+		if (val >= 0x01)
+			rv = 250 + (val - 1) * 5;
+		break;
+	}
+	return rv;
 }
 
 static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
@@ -1329,6 +1337,10 @@
 		.update = true,
 		.attr = "average",
 	}, {
+		.reg = PMBUS_VIRT_READ_PIN_MIN,
+		.update = true,
+		.attr = "input_lowest",
+	}, {
 		.reg = PMBUS_VIRT_READ_PIN_MAX,
 		.update = true,
 		.attr = "input_highest",
@@ -1359,6 +1371,10 @@
 		.update = true,
 		.attr = "average",
 	}, {
+		.reg = PMBUS_VIRT_READ_POUT_MIN,
+		.update = true,
+		.attr = "input_lowest",
+	}, {
 		.reg = PMBUS_VIRT_READ_POUT_MAX,
 		.update = true,
 		.attr = "input_highest",
@@ -1735,6 +1751,11 @@
 		}
 	}
 
+	/* Enable PEC if the controller supports it */
+	ret = i2c_smbus_read_byte_data(client, PMBUS_CAPABILITY);
+	if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
+		client->flags |= I2C_CLIENT_PEC;
+
 	pmbus_clear_faults(client);
 
 	if (info->identify) {
@@ -1796,7 +1817,7 @@
 	return _pmbus_regulator_on_off(rdev, 0);
 }
 
-struct regulator_ops pmbus_regulator_ops = {
+const struct regulator_ops pmbus_regulator_ops = {
 	.enable = pmbus_regulator_enable,
 	.disable = pmbus_regulator_disable,
 	.is_enabled = pmbus_regulator_is_enabled,
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index 8196441..771802d 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -19,6 +19,7 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/bitops.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -44,16 +45,16 @@
 #define ZL6100_MFR_CONFIG		0xd0
 #define ZL6100_DEVICE_ID		0xe4
 
-#define ZL6100_MFR_XTEMP_ENABLE		(1 << 7)
+#define ZL6100_MFR_XTEMP_ENABLE		BIT(7)
 
 #define MFR_VMON_OV_FAULT_LIMIT		0xf5
 #define MFR_VMON_UV_FAULT_LIMIT		0xf6
 #define MFR_READ_VMON			0xf7
 
-#define VMON_UV_WARNING			(1 << 5)
-#define VMON_OV_WARNING			(1 << 4)
-#define VMON_UV_FAULT			(1 << 1)
-#define VMON_OV_FAULT			(1 << 0)
+#define VMON_UV_WARNING			BIT(5)
+#define VMON_OV_WARNING			BIT(4)
+#define VMON_UV_FAULT			BIT(1)
+#define VMON_OV_FAULT			BIT(0)
 
 #define ZL6100_WAIT_TIME		1000	/* uS	*/
 
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 497a7f8..a2fdbb7 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -33,6 +33,7 @@
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/atomic.h>
+#include <linux/bitrev.h>
 
 /* Commands */
 #define SHT15_MEASURE_TEMP		0x03
@@ -173,19 +174,6 @@
 };
 
 /**
- * sht15_reverse() - reverse a byte
- * @byte:    byte to reverse.
- */
-static u8 sht15_reverse(u8 byte)
-{
-	u8 i, c;
-
-	for (c = 0, i = 0; i < 8; i++)
-		c |= (!!(byte & (1 << i))) << (7 - i);
-	return c;
-}
-
-/**
  * sht15_crc8() - compute crc8
  * @data:	sht15 specific data.
  * @value:	sht15 retrieved data.
@@ -196,7 +184,7 @@
 		const u8 *value,
 		int len)
 {
-	u8 crc = sht15_reverse(data->val_status & 0x0F);
+	u8 crc = bitrev8(data->val_status & 0x0F);
 
 	while (len--) {
 		crc = sht15_crc8_table[*value ^ crc];
@@ -477,7 +465,7 @@
 
 		if (data->checksumming) {
 			sht15_ack(data);
-			dev_checksum = sht15_reverse(sht15_read_byte(data));
+			dev_checksum = bitrev8(sht15_read_byte(data));
 			checksum_vals[0] = SHT15_READ_STATUS;
 			checksum_vals[1] = status;
 			data->checksum_ok = (sht15_crc8(data, checksum_vals, 2)
@@ -864,7 +852,7 @@
 		 */
 		if (sht15_ack(data))
 			goto wakeup;
-		dev_checksum = sht15_reverse(sht15_read_byte(data));
+		dev_checksum = bitrev8(sht15_read_byte(data));
 		checksum_vals[0] = (data->state == SHT15_READING_TEMP) ?
 			SHT15_MEASURE_TEMP : SHT15_MEASURE_RH;
 		checksum_vals[1] = (u8) (val >> 8);
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 098ffbe..b4481eb 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -183,7 +183,9 @@
  * @seq_13_event: event causing the transition from 1 to 3.
  * @seq_curr_state: current value of the sequencer register.
  * @ctxid_idx: index for the context ID registers.
- * @ctxid_val: value for the context ID to trigger on.
+ * @ctxid_pid: value for the context ID to trigger on.
+ * @ctxid_vpid:	Virtual PID seen by users if PID namespace is enabled, otherwise
+ *		the same value of ctxid_pid.
  * @ctxid_mask: mask applicable to all the context IDs.
  * @sync_freq:	Synchronisation frequency.
  * @timestamp_event: Defines an event that requests the insertion
@@ -235,7 +237,8 @@
 	u32				seq_13_event;
 	u32				seq_curr_state;
 	u8				ctxid_idx;
-	u32				ctxid_val[ETM_MAX_CTXID_CMP];
+	u32				ctxid_pid[ETM_MAX_CTXID_CMP];
+	u32				ctxid_vpid[ETM_MAX_CTXID_CMP];
 	u32				ctxid_mask;
 	u32				sync_freq;
 	u32				timestamp_event;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 018a00f..bf2476e 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -237,8 +237,11 @@
 
 	drvdata->seq_curr_state = 0x0;
 	drvdata->ctxid_idx = 0x0;
-	for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
-		drvdata->ctxid_val[i] = 0x0;
+	for (i = 0; i < drvdata->nr_ctxid_cmp; i++) {
+		drvdata->ctxid_pid[i] = 0x0;
+		drvdata->ctxid_vpid[i] = 0x0;
+	}
+
 	drvdata->ctxid_mask = 0x0;
 }
 
@@ -289,7 +292,7 @@
 	for (i = 0; i < drvdata->nr_ext_out; i++)
 		etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
 	for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
-		etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i));
+		etm_writel(drvdata, drvdata->ctxid_pid[i], ETMCIDCVRn(i));
 	etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
 	etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
 	/* No external input selected */
@@ -1386,38 +1389,41 @@
 }
 static DEVICE_ATTR_RW(ctxid_idx);
 
-static ssize_t ctxid_val_show(struct device *dev,
+static ssize_t ctxid_pid_show(struct device *dev,
 			      struct device_attribute *attr, char *buf)
 {
 	unsigned long val;
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 
 	spin_lock(&drvdata->spinlock);
-	val = drvdata->ctxid_val[drvdata->ctxid_idx];
+	val = drvdata->ctxid_vpid[drvdata->ctxid_idx];
 	spin_unlock(&drvdata->spinlock);
 
 	return sprintf(buf, "%#lx\n", val);
 }
 
-static ssize_t ctxid_val_store(struct device *dev,
+static ssize_t ctxid_pid_store(struct device *dev,
 			       struct device_attribute *attr,
 			       const char *buf, size_t size)
 {
 	int ret;
-	unsigned long val;
+	unsigned long vpid, pid;
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 
-	ret = kstrtoul(buf, 16, &val);
+	ret = kstrtoul(buf, 16, &vpid);
 	if (ret)
 		return ret;
 
+	pid = coresight_vpid_to_pid(vpid);
+
 	spin_lock(&drvdata->spinlock);
-	drvdata->ctxid_val[drvdata->ctxid_idx] = val;
+	drvdata->ctxid_pid[drvdata->ctxid_idx] = pid;
+	drvdata->ctxid_vpid[drvdata->ctxid_idx] = vpid;
 	spin_unlock(&drvdata->spinlock);
 
 	return size;
 }
-static DEVICE_ATTR_RW(ctxid_val);
+static DEVICE_ATTR_RW(ctxid_pid);
 
 static ssize_t ctxid_mask_show(struct device *dev,
 			       struct device_attribute *attr, char *buf)
@@ -1609,7 +1615,7 @@
 	&dev_attr_seq_13_event.attr,
 	&dev_attr_seq_curr_state.attr,
 	&dev_attr_ctxid_idx.attr,
-	&dev_attr_ctxid_val.attr,
+	&dev_attr_ctxid_pid.attr,
 	&dev_attr_ctxid_mask.attr,
 	&dev_attr_sync_freq.attr,
 	&dev_attr_timestamp_event.attr,
@@ -1912,6 +1918,11 @@
 		.mask	= 0x0003ffff,
 		.data	= "PTM 1.1",
 	},
+	{	/* PTM 1.1 Qualcomm */
+		.id	= 0x0003006f,
+		.mask	= 0x0003ffff,
+		.data	= "PTM 1.1",
+	},
 	{ 0, 0},
 };
 
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 1312e99..254a81a 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -155,7 +155,7 @@
 			       drvdata->base + TRCACATRn(i));
 	}
 	for (i = 0; i < drvdata->numcidc; i++)
-		writeq_relaxed(drvdata->ctxid_val[i],
+		writeq_relaxed(drvdata->ctxid_pid[i],
 			       drvdata->base + TRCCIDCVRn(i));
 	writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
 	writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
@@ -506,8 +506,11 @@
 	}
 
 	drvdata->ctxid_idx = 0x0;
-	for (i = 0; i < drvdata->numcidc; i++)
-		drvdata->ctxid_val[i] = 0x0;
+	for (i = 0; i < drvdata->numcidc; i++) {
+		drvdata->ctxid_pid[i] = 0x0;
+		drvdata->ctxid_vpid[i] = 0x0;
+	}
+
 	drvdata->ctxid_mask0 = 0x0;
 	drvdata->ctxid_mask1 = 0x0;
 
@@ -1815,7 +1818,7 @@
 }
 static DEVICE_ATTR_RW(ctxid_idx);
 
-static ssize_t ctxid_val_show(struct device *dev,
+static ssize_t ctxid_pid_show(struct device *dev,
 			      struct device_attribute *attr,
 			      char *buf)
 {
@@ -1825,17 +1828,17 @@
 
 	spin_lock(&drvdata->spinlock);
 	idx = drvdata->ctxid_idx;
-	val = (unsigned long)drvdata->ctxid_val[idx];
+	val = (unsigned long)drvdata->ctxid_vpid[idx];
 	spin_unlock(&drvdata->spinlock);
 	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
 }
 
-static ssize_t ctxid_val_store(struct device *dev,
+static ssize_t ctxid_pid_store(struct device *dev,
 			       struct device_attribute *attr,
 			       const char *buf, size_t size)
 {
 	u8 idx;
-	unsigned long val;
+	unsigned long vpid, pid;
 	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
 
 	/*
@@ -1845,16 +1848,19 @@
 	 */
 	if (!drvdata->ctxid_size || !drvdata->numcidc)
 		return -EINVAL;
-	if (kstrtoul(buf, 16, &val))
+	if (kstrtoul(buf, 16, &vpid))
 		return -EINVAL;
 
+	pid = coresight_vpid_to_pid(vpid);
+
 	spin_lock(&drvdata->spinlock);
 	idx = drvdata->ctxid_idx;
-	drvdata->ctxid_val[idx] = (u64)val;
+	drvdata->ctxid_pid[idx] = (u64)pid;
+	drvdata->ctxid_vpid[idx] = (u64)vpid;
 	spin_unlock(&drvdata->spinlock);
 	return size;
 }
-static DEVICE_ATTR_RW(ctxid_val);
+static DEVICE_ATTR_RW(ctxid_pid);
 
 static ssize_t ctxid_masks_show(struct device *dev,
 				struct device_attribute *attr,
@@ -1949,7 +1955,7 @@
 		 */
 		for (j = 0; j < 8; j++) {
 			if (maskbyte & 1)
-				drvdata->ctxid_val[i] &= ~(0xFF << (j * 8));
+				drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
 			maskbyte >>= 1;
 		}
 		/* Select the next ctxid comparator mask value */
@@ -2193,7 +2199,7 @@
 	&dev_attr_res_idx.attr,
 	&dev_attr_res_ctrl.attr,
 	&dev_attr_ctxid_idx.attr,
-	&dev_attr_ctxid_val.attr,
+	&dev_attr_ctxid_pid.attr,
 	&dev_attr_ctxid_masks.attr,
 	&dev_attr_vmid_idx.attr,
 	&dev_attr_vmid_val.attr,
@@ -2513,8 +2519,11 @@
 		drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
 	}
 
-	for (i = 0; i < drvdata->numcidc; i++)
-		drvdata->ctxid_val[i] = 0x0;
+	for (i = 0; i < drvdata->numcidc; i++) {
+		drvdata->ctxid_pid[i] = 0x0;
+		drvdata->ctxid_vpid[i] = 0x0;
+	}
+
 	drvdata->ctxid_mask0 = 0x0;
 	drvdata->ctxid_mask1 = 0x0;
 
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index e08e983..c341002 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -265,7 +265,9 @@
  * @addr_type:	Current status of the comparator register.
  * @ctxid_idx:	Context ID index selector.
  * @ctxid_size:	Size of the context ID field to consider.
- * @ctxid_val:	Value of the context ID comparator.
+ * @ctxid_pid:	Value of the context ID comparator.
+ * @ctxid_vpid:	Virtual PID seen by users if PID namespace is enabled, otherwise
+ *		the same value of ctxid_pid.
  * @ctxid_mask0:Context ID comparator mask for comparator 0-3.
  * @ctxid_mask1:Context ID comparator mask for comparator 4-7.
  * @vmid_idx:	VM ID index selector.
@@ -352,7 +354,8 @@
 	u8				addr_type[ETM_MAX_SINGLE_ADDR_CMP];
 	u8				ctxid_idx;
 	u8				ctxid_size;
-	u64				ctxid_val[ETMv4_MAX_CTXID_CMP];
+	u64				ctxid_pid[ETMv4_MAX_CTXID_CMP];
+	u64				ctxid_vpid[ETMv4_MAX_CTXID_CMP];
 	u32				ctxid_mask0;
 	u32				ctxid_mask1;
 	u8				vmid_idx;
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 7974b7c..963ac19 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -12,7 +12,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
@@ -184,17 +183,7 @@
 	},
 };
 
-static int __init replicator_init(void)
-{
-	return platform_driver_register(&replicator_driver);
-}
-module_init(replicator_init);
-
-static void __exit replicator_exit(void)
-{
-	platform_driver_unregister(&replicator_driver);
-}
-module_exit(replicator_exit);
+builtin_platform_driver(replicator_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("CoreSight Replicator driver");
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 5ecbb3f..eaef9bc 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -88,12 +88,13 @@
 #include <linux/slab.h>
 #include <linux/wait.h>
 #include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/itco_wdt.h>
 
 #if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
 		defined CONFIG_DMI
 #include <linux/gpio.h>
 #include <linux/i2c-mux-gpio.h>
-#include <linux/platform_device.h>
 #endif
 
 /* I801 SMBus address offsets */
@@ -113,6 +114,16 @@
 #define SMBPCICTL	0x004
 #define SMBPCISTS	0x006
 #define SMBHSTCFG	0x040
+#define TCOBASE		0x050
+#define TCOCTL		0x054
+
+#define ACPIBASE		0x040
+#define ACPIBASE_SMI_OFF	0x030
+#define ACPICTRL		0x044
+#define ACPICTRL_EN		0x080
+
+#define SBREG_BAR		0x10
+#define SBREG_SMBCTRL		0xc6000c
 
 /* Host status bits for SMBPCISTS */
 #define SMBPCISTS_INTS		0x08
@@ -125,6 +136,9 @@
 #define SMBHSTCFG_SMB_SMI_EN	2
 #define SMBHSTCFG_I2C_EN	4
 
+/* TCO configuration bits for TCOCTL */
+#define TCOCTL_EN		0x0100
+
 /* Auxiliary control register bits, ICH4+ only */
 #define SMBAUXCTL_CRC		1
 #define SMBAUXCTL_E32B		2
@@ -221,6 +235,7 @@
 	const struct i801_mux_config *mux_drvdata;
 	struct platform_device *mux_pdev;
 #endif
+	struct platform_device *tco_pdev;
 };
 
 #define FEATURE_SMBUS_PEC	(1 << 0)
@@ -230,6 +245,7 @@
 #define FEATURE_IRQ		(1 << 4)
 /* Not really a feature, but it's convenient to handle it as such */
 #define FEATURE_IDF		(1 << 15)
+#define FEATURE_TCO		(1 << 16)
 
 static const char *i801_feature_names[] = {
 	"SMBus PEC",
@@ -1132,6 +1148,95 @@
 }
 #endif
 
+static const struct itco_wdt_platform_data tco_platform_data = {
+	.name = "Intel PCH",
+	.version = 4,
+};
+
+static DEFINE_SPINLOCK(p2sb_spinlock);
+
+static void i801_add_tco(struct i801_priv *priv)
+{
+	struct pci_dev *pci_dev = priv->pci_dev;
+	struct resource tco_res[3], *res;
+	struct platform_device *pdev;
+	unsigned int devfn;
+	u32 tco_base, tco_ctl;
+	u32 base_addr, ctrl_val;
+	u64 base64_addr;
+
+	if (!(priv->features & FEATURE_TCO))
+		return;
+
+	pci_read_config_dword(pci_dev, TCOBASE, &tco_base);
+	pci_read_config_dword(pci_dev, TCOCTL, &tco_ctl);
+	if (!(tco_ctl & TCOCTL_EN))
+		return;
+
+	memset(tco_res, 0, sizeof(tco_res));
+
+	res = &tco_res[ICH_RES_IO_TCO];
+	res->start = tco_base & ~1;
+	res->end = res->start + 32 - 1;
+	res->flags = IORESOURCE_IO;
+
+	/*
+	 * Power Management registers.
+	 */
+	devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 2);
+	pci_bus_read_config_dword(pci_dev->bus, devfn, ACPIBASE, &base_addr);
+
+	res = &tco_res[ICH_RES_IO_SMI];
+	res->start = (base_addr & ~1) + ACPIBASE_SMI_OFF;
+	res->end = res->start + 3;
+	res->flags = IORESOURCE_IO;
+
+	/*
+	 * Enable the ACPI I/O space.
+	 */
+	pci_bus_read_config_dword(pci_dev->bus, devfn, ACPICTRL, &ctrl_val);
+	ctrl_val |= ACPICTRL_EN;
+	pci_bus_write_config_dword(pci_dev->bus, devfn, ACPICTRL, ctrl_val);
+
+	/*
+	 * We must access the NO_REBOOT bit over the Primary to Sideband
+	 * bridge (P2SB). The BIOS prevents the P2SB device from being
+	 * enumerated by the PCI subsystem, so we need to unhide/hide it
+	 * to lookup the P2SB BAR.
+	 */
+	spin_lock(&p2sb_spinlock);
+
+	devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 1);
+
+	/* Unhide the P2SB device */
+	pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x0);
+
+	pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR, &base_addr);
+	base64_addr = base_addr & 0xfffffff0;
+
+	pci_bus_read_config_dword(pci_dev->bus, devfn, SBREG_BAR + 0x4, &base_addr);
+	base64_addr |= (u64)base_addr << 32;
+
+	/* Hide the P2SB device */
+	pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, 0x1);
+	spin_unlock(&p2sb_spinlock);
+
+	res = &tco_res[ICH_RES_MEM_OFF];
+	res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+	res->end = res->start + 3;
+	res->flags = IORESOURCE_MEM;
+
+	pdev = platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
+						 tco_res, 3, &tco_platform_data,
+						 sizeof(tco_platform_data));
+	if (IS_ERR(pdev)) {
+		dev_warn(&pci_dev->dev, "failed to create iTCO device\n");
+		return;
+	}
+
+	priv->tco_pdev = pdev;
+}
+
 static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
 	unsigned char temp;
@@ -1149,6 +1254,15 @@
 
 	priv->pci_dev = dev;
 	switch (dev->device) {
+	case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
+	case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
+		priv->features |= FEATURE_I2C_BLOCK_READ;
+		priv->features |= FEATURE_IRQ;
+		priv->features |= FEATURE_SMBUS_PEC;
+		priv->features |= FEATURE_BLOCK_BUFFER;
+		priv->features |= FEATURE_TCO;
+		break;
+
 	case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0:
 	case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1:
 	case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2:
@@ -1265,6 +1379,8 @@
 	dev_info(&dev->dev, "SMBus using %s\n",
 		 priv->features & FEATURE_IRQ ? "PCI interrupt" : "polling");
 
+	i801_add_tco(priv);
+
 	/* set up the sysfs linkage to our parent device */
 	priv->adapter.dev.parent = &dev->dev;
 
@@ -1296,6 +1412,8 @@
 	i2c_del_adapter(&priv->adapter);
 	pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
 
+	platform_device_unregister(priv->tco_pdev);
+
 	/*
 	 * do not call pci_disable_device(dev) since it can cause hard hangs on
 	 * some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010)
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 2a36a95..3a3738f 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -591,6 +591,67 @@
 		.enter = NULL }
 };
 
+static struct cpuidle_state skl_cstates[] = {
+	{
+		.name = "C1-SKL",
+		.desc = "MWAIT 0x00",
+		.flags = MWAIT2flg(0x00),
+		.exit_latency = 2,
+		.target_residency = 2,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C1E-SKL",
+		.desc = "MWAIT 0x01",
+		.flags = MWAIT2flg(0x01),
+		.exit_latency = 10,
+		.target_residency = 20,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C3-SKL",
+		.desc = "MWAIT 0x10",
+		.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 70,
+		.target_residency = 100,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C6-SKL",
+		.desc = "MWAIT 0x20",
+		.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 75,
+		.target_residency = 200,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C7s-SKL",
+		.desc = "MWAIT 0x33",
+		.flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 124,
+		.target_residency = 800,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C8-SKL",
+		.desc = "MWAIT 0x40",
+		.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 174,
+		.target_residency = 800,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.name = "C10-SKL",
+		.desc = "MWAIT 0x60",
+		.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
+		.exit_latency = 890,
+		.target_residency = 5000,
+		.enter = &intel_idle,
+		.enter_freeze = intel_idle_freeze, },
+	{
+		.enter = NULL }
+};
+
 static struct cpuidle_state atom_cstates[] = {
 	{
 		.name = "C1E-ATM",
@@ -810,6 +871,12 @@
 	.disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_skl = {
+	.state_table = skl_cstates,
+	.disable_promotion_to_c1e = true,
+};
+
+
 static const struct idle_cpu idle_cpu_avn = {
 	.state_table = avn_cstates,
 	.disable_promotion_to_c1e = true,
@@ -844,6 +911,8 @@
 	ICPU(0x47, idle_cpu_bdw),
 	ICPU(0x4f, idle_cpu_bdw),
 	ICPU(0x56, idle_cpu_bdw),
+	ICPU(0x4e, idle_cpu_skl),
+	ICPU(0x5e, idle_cpu_skl),
 	{}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -965,7 +1034,8 @@
 	for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
 		int num_substates, mwait_hint, mwait_cstate;
 
-		if (cpuidle_state_table[cstate].enter == NULL)
+		if ((cpuidle_state_table[cstate].enter == NULL) &&
+		    (cpuidle_state_table[cstate].enter_freeze == NULL))
 			break;
 
 		if (cstate + 1 > max_cstate) {
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 00e7bcb..a59047d 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -86,18 +86,6 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called kxsd9.
 
-config MMA8452
-	tristate "Freescale MMA8452Q Accelerometer Driver"
-	depends on I2C
-	select IIO_BUFFER
-	select IIO_TRIGGERED_BUFFER
-	help
-	  Say yes here to build support for the Freescale MMA8452Q 3-axis
-	  accelerometer.
-
-	  To compile this driver as a module, choose M here: the module
-	  will be called mma8452.
-
 config KXCJK1013
 	tristate "Kionix 3-Axis Accelerometer Driver"
 	depends on I2C
@@ -111,6 +99,18 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called kxcjk-1013.
 
+config MMA8452
+	tristate "Freescale MMA8452Q Accelerometer Driver"
+	depends on I2C
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
+	help
+	  Say yes here to build support for the Freescale MMA8452Q 3-axis
+	  accelerometer.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called mma8452.
+
 config MMA9551_CORE
 	tristate
 
@@ -140,6 +140,8 @@
 config STK8312
 	tristate "Sensortek STK8312 3-Axis Accelerometer Driver"
 	depends on I2C
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
 	help
 	  Say yes here to get support for the Sensortek STK8312 3-axis
 	  accelerometer.
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 75c6d21..f04b884 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -846,7 +846,6 @@
 static struct i2c_driver bma180_driver = {
 	.driver = {
 		.name	= "bma180",
-		.owner	= THIS_MODULE,
 		.pm	= BMA180_PM_OPS,
 	},
 	.probe		= bma180_probe,
diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
index cc5a357..0104cde 100644
--- a/drivers/iio/accel/bmc150-accel.c
+++ b/drivers/iio/accel/bmc150-accel.c
@@ -151,6 +151,7 @@
 };
 
 struct bmc150_accel_chip_info {
+	const char *name;
 	u8 chip_id;
 	const struct iio_chan_spec *channels;
 	int num_channels;
@@ -241,7 +242,6 @@
 				       {500000, BMC150_ACCEL_SLEEP_500_MS},
 				       {1000000, BMC150_ACCEL_SLEEP_1_SEC} };
 
-
 static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
 				 enum bmc150_power_modes mode,
 				 int dur_us)
@@ -259,8 +259,9 @@
 				dur_val =
 				bmc150_accel_sleep_value_table[i].reg_value;
 		}
-	} else
+	} else {
 		dur_val = 0;
+	}
 
 	if (dur_val < 0)
 		return -EINVAL;
@@ -288,7 +289,7 @@
 
 	for (i = 0; i < ARRAY_SIZE(bmc150_accel_samp_freq_table); ++i) {
 		if (bmc150_accel_samp_freq_table[i].val == val &&
-				bmc150_accel_samp_freq_table[i].val2 == val2) {
+		    bmc150_accel_samp_freq_table[i].val2 == val2) {
 			ret = i2c_smbus_write_byte_data(
 				data->client,
 				BMC150_ACCEL_REG_PMU_BW,
@@ -345,65 +346,6 @@
 	return 0;
 }
 
-static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
-{
-	int ret;
-
-	ret = i2c_smbus_read_byte_data(data->client, BMC150_ACCEL_REG_CHIP_ID);
-	if (ret < 0) {
-		dev_err(&data->client->dev,
-			"Error: Reading chip id\n");
-		return ret;
-	}
-
-	dev_dbg(&data->client->dev, "Chip Id %x\n", ret);
-	if (ret != data->chip_info->chip_id) {
-		dev_err(&data->client->dev, "Invalid chip %x\n", ret);
-		return -ENODEV;
-	}
-
-	ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
-	if (ret < 0)
-		return ret;
-
-	/* Set Bandwidth */
-	ret = bmc150_accel_set_bw(data, BMC150_ACCEL_DEF_BW, 0);
-	if (ret < 0)
-		return ret;
-
-	/* Set Default Range */
-	ret = i2c_smbus_write_byte_data(data->client,
-					BMC150_ACCEL_REG_PMU_RANGE,
-					BMC150_ACCEL_DEF_RANGE_4G);
-	if (ret < 0) {
-		dev_err(&data->client->dev,
-					"Error writing reg_pmu_range\n");
-		return ret;
-	}
-
-	data->range = BMC150_ACCEL_DEF_RANGE_4G;
-
-	/* Set default slope duration and thresholds */
-	data->slope_thres = BMC150_ACCEL_DEF_SLOPE_THRESHOLD;
-	data->slope_dur = BMC150_ACCEL_DEF_SLOPE_DURATION;
-	ret = bmc150_accel_update_slope(data);
-	if (ret < 0)
-		return ret;
-
-	/* Set default as latched interrupts */
-	ret = i2c_smbus_write_byte_data(data->client,
-					BMC150_ACCEL_REG_INT_RST_LATCH,
-					BMC150_ACCEL_INT_MODE_LATCH_INT |
-					BMC150_ACCEL_INT_MODE_LATCH_RESET);
-	if (ret < 0) {
-		dev_err(&data->client->dev,
-			"Error writing reg_int_rst_latch\n");
-		return ret;
-	}
-
-	return 0;
-}
-
 static int bmc150_accel_get_bw(struct bmc150_accel_data *data, int *val,
 			       int *val2)
 {
@@ -437,12 +379,13 @@
 {
 	int ret;
 
-	if (on)
+	if (on) {
 		ret = pm_runtime_get_sync(&data->client->dev);
-	else {
+	} else {
 		pm_runtime_mark_last_busy(&data->client->dev);
 		ret = pm_runtime_put_autosuspend(&data->client->dev);
 	}
+
 	if (ret < 0) {
 		dev_err(&data->client->dev,
 			"Failed: bmc150_accel_set_power_state for %d\n", on);
@@ -514,13 +457,13 @@
 	}
 
 	/*
-	 * We will expect the enable and disable to do operation in
-	 * in reverse order. This will happen here anyway as our
-	 * resume operation uses sync mode runtime pm calls, the
-	 * suspend operation will be delayed by autosuspend delay
-	 * So the disable operation will still happen in reverse of
-	 * enable operation. When runtime pm is disabled the mode
-	 * is always on so sequence doesn't matter
+	 * We will expect the enable and disable to do operation in reverse
+	 * order. This will happen here anyway, as our resume operation uses
+	 * sync mode runtime pm calls. The suspend operation will be delayed
+	 * by autosuspend delay.
+	 * So the disable operation will still happen in reverse order of
+	 * enable operation. When runtime pm is disabled the mode is always on,
+	 * so sequence doesn't matter.
 	 */
 	ret = bmc150_accel_set_power_state(data, state);
 	if (ret < 0)
@@ -574,7 +517,6 @@
 	return ret;
 }
 
-
 static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
 {
 	int ret, i;
@@ -674,8 +616,9 @@
 		if (chan->type == IIO_TEMP) {
 			*val = BMC150_ACCEL_TEMP_CENTER_VAL;
 			return IIO_VAL_INT;
-		} else
+		} else {
 			return -EINVAL;
+		}
 	case IIO_CHAN_INFO_SCALE:
 		*val = 0;
 		switch (chan->type) {
@@ -776,7 +719,7 @@
 
 	switch (info) {
 	case IIO_EV_INFO_VALUE:
-		data->slope_thres = val & 0xFF;
+		data->slope_thres = val & BMC150_ACCEL_SLOPE_THRES_MASK;
 		break;
 	case IIO_EV_INFO_PERIOD:
 		data->slope_dur = val & BMC150_ACCEL_SLOPE_DUR_MASK;
@@ -793,7 +736,6 @@
 					  enum iio_event_type type,
 					  enum iio_event_direction dir)
 {
-
 	struct bmc150_accel_data *data = iio_priv(indio_dev);
 
 	return data->ev_enable_state;
@@ -827,7 +769,7 @@
 }
 
 static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
-				   struct iio_trigger *trig)
+					 struct iio_trigger *trig)
 {
 	struct bmc150_accel_data *data = iio_priv(indio_dev);
 	int i;
@@ -963,6 +905,7 @@
 	u16 buffer[BMC150_ACCEL_FIFO_LENGTH * 3];
 	int64_t tstamp;
 	uint64_t sample_period;
+
 	ret = i2c_smbus_read_byte_data(data->client,
 				       BMC150_ACCEL_REG_FIFO_STATUS);
 	if (ret < 0) {
@@ -1120,6 +1063,7 @@
 
 static const struct bmc150_accel_chip_info bmc150_accel_chip_info_tbl[] = {
 	[bmc150] = {
+		.name = "BMC150A",
 		.chip_id = 0xFA,
 		.channels = bmc150_accel_channels,
 		.num_channels = ARRAY_SIZE(bmc150_accel_channels),
@@ -1129,6 +1073,7 @@
 				 {76590, BMC150_ACCEL_DEF_RANGE_16G} },
 	},
 	[bmi055] = {
+		.name = "BMI055A",
 		.chip_id = 0xFA,
 		.channels = bmc150_accel_channels,
 		.num_channels = ARRAY_SIZE(bmc150_accel_channels),
@@ -1138,6 +1083,7 @@
 				 {76590, BMC150_ACCEL_DEF_RANGE_16G} },
 	},
 	[bma255] = {
+		.name = "BMA0255",
 		.chip_id = 0xFA,
 		.channels = bmc150_accel_channels,
 		.num_channels = ARRAY_SIZE(bmc150_accel_channels),
@@ -1147,6 +1093,7 @@
 				 {76590, BMC150_ACCEL_DEF_RANGE_16G} },
 	},
 	[bma250e] = {
+		.name = "BMA250E",
 		.chip_id = 0xF9,
 		.channels = bma250e_accel_channels,
 		.num_channels = ARRAY_SIZE(bma250e_accel_channels),
@@ -1156,6 +1103,7 @@
 				 {306457, BMC150_ACCEL_DEF_RANGE_16G} },
 	},
 	[bma222e] = {
+		.name = "BMA222E",
 		.chip_id = 0xF8,
 		.channels = bma222e_accel_channels,
 		.num_channels = ARRAY_SIZE(bma222e_accel_channels),
@@ -1165,6 +1113,7 @@
 				 {1225831, BMC150_ACCEL_DEF_RANGE_16G} },
 	},
 	[bma280] = {
+		.name = "BMA0280",
 		.chip_id = 0xFB,
 		.channels = bma280_accel_channels,
 		.num_channels = ARRAY_SIZE(bma280_accel_channels),
@@ -1255,7 +1204,7 @@
 }
 
 static int bmc150_accel_trigger_set_state(struct iio_trigger *trig,
-						   bool state)
+					  bool state)
 {
 	struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
 	struct bmc150_accel_data *data = t->data;
@@ -1314,26 +1263,32 @@
 		dir = IIO_EV_DIR_RISING;
 
 	if (ret & BMC150_ACCEL_ANY_MOTION_BIT_X)
-		iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
-							0,
-							IIO_MOD_X,
-							IIO_EV_TYPE_ROC,
-							dir),
-							data->timestamp);
+		iio_push_event(indio_dev,
+			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
+						  0,
+						  IIO_MOD_X,
+						  IIO_EV_TYPE_ROC,
+						  dir),
+			       data->timestamp);
+
 	if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Y)
-		iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
-							0,
-							IIO_MOD_Y,
-							IIO_EV_TYPE_ROC,
-							dir),
-							data->timestamp);
+		iio_push_event(indio_dev,
+			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
+						  0,
+						  IIO_MOD_Y,
+						  IIO_EV_TYPE_ROC,
+						  dir),
+			       data->timestamp);
+
 	if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Z)
-		iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
-							0,
-							IIO_MOD_Z,
-							IIO_EV_TYPE_ROC,
-							dir),
-							data->timestamp);
+		iio_push_event(indio_dev,
+			       IIO_MOD_EVENT_CODE(IIO_ACCEL,
+						  0,
+						  IIO_MOD_Z,
+						  IIO_EV_TYPE_ROC,
+						  dir),
+			       data->timestamp);
+
 	return ret;
 }
 
@@ -1365,7 +1320,9 @@
 					BMC150_ACCEL_INT_MODE_LATCH_INT |
 					BMC150_ACCEL_INT_MODE_LATCH_RESET);
 		if (ret)
-			dev_err(&data->client->dev, "Error writing reg_int_rst_latch\n");
+			dev_err(&data->client->dev,
+				"Error writing reg_int_rst_latch\n");
+
 		ret = IRQ_HANDLED;
 	} else {
 		ret = IRQ_NONE;
@@ -1403,22 +1360,8 @@
 	return IRQ_NONE;
 }
 
-static const char *bmc150_accel_match_acpi_device(struct device *dev, int *data)
-{
-	const struct acpi_device_id *id;
-
-	id = acpi_match_device(dev->driver->acpi_match_table, dev);
-
-	if (!id)
-		return NULL;
-
-	*data = (int) id->driver_data;
-
-	return dev_name(dev);
-}
-
 static int bmc150_accel_gpio_probe(struct i2c_client *client,
-					struct bmc150_accel_data *data)
+				   struct bmc150_accel_data *data)
 {
 	struct device *dev;
 	struct gpio_desc *gpio;
@@ -1611,6 +1554,70 @@
 	.postdisable = bmc150_accel_buffer_postdisable,
 };
 
+static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
+{
+	int ret, i;
+
+	ret = i2c_smbus_read_byte_data(data->client, BMC150_ACCEL_REG_CHIP_ID);
+	if (ret < 0) {
+		dev_err(&data->client->dev, "Error: Reading chip id\n");
+		return ret;
+	}
+
+	dev_dbg(&data->client->dev, "Chip Id %x\n", ret);
+	for (i = 0; i < ARRAY_SIZE(bmc150_accel_chip_info_tbl); i++) {
+		if (bmc150_accel_chip_info_tbl[i].chip_id == ret) {
+			data->chip_info = &bmc150_accel_chip_info_tbl[i];
+			break;
+		}
+	}
+
+	if (!data->chip_info) {
+		dev_err(&data->client->dev, "Unsupported chip %x\n", ret);
+		return -ENODEV;
+	}
+
+	ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+	if (ret < 0)
+		return ret;
+
+	/* Set Bandwidth */
+	ret = bmc150_accel_set_bw(data, BMC150_ACCEL_DEF_BW, 0);
+	if (ret < 0)
+		return ret;
+
+	/* Set Default Range */
+	ret = i2c_smbus_write_byte_data(data->client,
+					BMC150_ACCEL_REG_PMU_RANGE,
+					BMC150_ACCEL_DEF_RANGE_4G);
+	if (ret < 0) {
+		dev_err(&data->client->dev, "Error writing reg_pmu_range\n");
+		return ret;
+	}
+
+	data->range = BMC150_ACCEL_DEF_RANGE_4G;
+
+	/* Set default slope duration and thresholds */
+	data->slope_thres = BMC150_ACCEL_DEF_SLOPE_THRESHOLD;
+	data->slope_dur = BMC150_ACCEL_DEF_SLOPE_DURATION;
+	ret = bmc150_accel_update_slope(data);
+	if (ret < 0)
+		return ret;
+
+	/* Set default as latched interrupts */
+	ret = i2c_smbus_write_byte_data(data->client,
+					BMC150_ACCEL_REG_INT_RST_LATCH,
+					BMC150_ACCEL_INT_MODE_LATCH_INT |
+					BMC150_ACCEL_INT_MODE_LATCH_RESET);
+	if (ret < 0) {
+		dev_err(&data->client->dev,
+			"Error writing reg_int_rst_latch\n");
+		return ret;
+	}
+
+	return 0;
+}
+
 static int bmc150_accel_probe(struct i2c_client *client,
 			      const struct i2c_device_id *id)
 {
@@ -1618,7 +1625,6 @@
 	struct iio_dev *indio_dev;
 	int ret;
 	const char *name = NULL;
-	int chip_id = 0;
 
 	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
 	if (!indio_dev)
@@ -1628,15 +1634,8 @@
 	i2c_set_clientdata(client, indio_dev);
 	data->client = client;
 
-	if (id) {
+	if (id)
 		name = id->name;
-		chip_id = id->driver_data;
-	}
-
-	if (ACPI_HANDLE(&client->dev))
-		name = bmc150_accel_match_acpi_device(&client->dev, &chip_id);
-
-	data->chip_info = &bmc150_accel_chip_info_tbl[chip_id];
 
 	ret = bmc150_accel_chip_init(data);
 	if (ret < 0)
@@ -1647,7 +1646,7 @@
 	indio_dev->dev.parent = &client->dev;
 	indio_dev->channels = data->chip_info->channels;
 	indio_dev->num_channels = data->chip_info->num_channels;
-	indio_dev->name = name;
+	indio_dev->name = name ? name : data->chip_info->name;
 	indio_dev->modes = INDIO_DIRECT_MODE;
 	indio_dev->info = &bmc150_accel_info;
 
@@ -1663,7 +1662,7 @@
 	if (client->irq < 0)
 		client->irq = bmc150_accel_gpio_probe(client, data);
 
-	if (client->irq >= 0) {
+	if (client->irq > 0) {
 		ret = devm_request_threaded_irq(
 						&client->dev, client->irq,
 						bmc150_accel_irq_handler,
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 0d9bd35..3292bc0 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -658,10 +658,8 @@
 	int ret, i;
 	enum kxcjk1013_mode store_mode;
 
-
 	for (i = 0; i < ARRAY_SIZE(KXCJK1013_scale_table); ++i) {
 		if (KXCJK1013_scale_table[i].scale == val) {
-
 			ret = kxcjk1013_get_mode(data, &store_mode);
 			if (ret < 0)
 				return ret;
@@ -820,7 +818,6 @@
 					  enum iio_event_type type,
 					  enum iio_event_direction dir)
 {
-
 	struct kxcjk1013_data *data = iio_priv(indio_dev);
 
 	return data->ev_enable_state;
@@ -1243,7 +1240,7 @@
 	if (client->irq < 0)
 		client->irq = kxcjk1013_gpio_probe(client, data);
 
-	if (client->irq >= 0) {
+	if (client->irq > 0) {
 		ret = devm_request_threaded_irq(&client->dev, client->irq,
 						kxcjk1013_data_rdy_trig_poll,
 						kxcjk1013_event_handler,
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 13ea1ea..b921d84 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -16,7 +16,6 @@
 #include <linux/i2c.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
-#include <linux/iio/trigger_consumer.h>
 #include <linux/iio/buffer.h>
 #include <linux/iio/trigger.h>
 #include <linux/iio/trigger_consumer.h>
@@ -24,54 +23,51 @@
 #include <linux/iio/events.h>
 #include <linux/delay.h>
 
-#define MMA8452_STATUS 0x00
-#define MMA8452_OUT_X 0x01 /* MSB first, 12-bit  */
-#define MMA8452_OUT_Y 0x03
-#define MMA8452_OUT_Z 0x05
-#define MMA8452_INT_SRC 0x0c
-#define MMA8452_WHO_AM_I 0x0d
-#define MMA8452_DATA_CFG 0x0e
-#define MMA8452_HP_FILTER_CUTOFF 0x0f
-#define MMA8452_HP_FILTER_CUTOFF_SEL_MASK	(BIT(0) | BIT(1))
-#define MMA8452_TRANSIENT_CFG 0x1d
-#define MMA8452_TRANSIENT_CFG_ELE		BIT(4)
-#define MMA8452_TRANSIENT_CFG_CHAN(chan)	BIT(chan + 1)
-#define MMA8452_TRANSIENT_CFG_HPF_BYP		BIT(0)
-#define MMA8452_TRANSIENT_SRC 0x1e
-#define MMA8452_TRANSIENT_SRC_XTRANSE		BIT(1)
-#define MMA8452_TRANSIENT_SRC_YTRANSE		BIT(3)
-#define MMA8452_TRANSIENT_SRC_ZTRANSE		BIT(5)
-#define MMA8452_TRANSIENT_THS 0x1f
-#define MMA8452_TRANSIENT_THS_MASK	0x7f
-#define MMA8452_TRANSIENT_COUNT 0x20
-#define MMA8452_OFF_X 0x2f
-#define MMA8452_OFF_Y 0x30
-#define MMA8452_OFF_Z 0x31
-#define MMA8452_CTRL_REG1 0x2a
-#define MMA8452_CTRL_REG2 0x2b
-#define MMA8452_CTRL_REG2_RST		BIT(6)
-#define MMA8452_CTRL_REG4 0x2d
-#define MMA8452_CTRL_REG5 0x2e
+#define MMA8452_STATUS				0x00
+#define  MMA8452_STATUS_DRDY			(BIT(2) | BIT(1) | BIT(0))
+#define MMA8452_OUT_X				0x01 /* MSB first, 12-bit  */
+#define MMA8452_OUT_Y				0x03
+#define MMA8452_OUT_Z				0x05
+#define MMA8452_INT_SRC				0x0c
+#define MMA8452_WHO_AM_I			0x0d
+#define MMA8452_DATA_CFG			0x0e
+#define  MMA8452_DATA_CFG_FS_MASK		GENMASK(1, 0)
+#define  MMA8452_DATA_CFG_FS_2G			0
+#define  MMA8452_DATA_CFG_FS_4G			1
+#define  MMA8452_DATA_CFG_FS_8G			2
+#define  MMA8452_DATA_CFG_HPF_MASK		BIT(4)
+#define MMA8452_HP_FILTER_CUTOFF		0x0f
+#define  MMA8452_HP_FILTER_CUTOFF_SEL_MASK	GENMASK(1, 0)
+#define MMA8452_TRANSIENT_CFG			0x1d
+#define  MMA8452_TRANSIENT_CFG_HPF_BYP		BIT(0)
+#define  MMA8452_TRANSIENT_CFG_CHAN(chan)	BIT(chan + 1)
+#define  MMA8452_TRANSIENT_CFG_ELE		BIT(4)
+#define MMA8452_TRANSIENT_SRC			0x1e
+#define  MMA8452_TRANSIENT_SRC_XTRANSE		BIT(1)
+#define  MMA8452_TRANSIENT_SRC_YTRANSE		BIT(3)
+#define  MMA8452_TRANSIENT_SRC_ZTRANSE		BIT(5)
+#define MMA8452_TRANSIENT_THS			0x1f
+#define  MMA8452_TRANSIENT_THS_MASK		GENMASK(6, 0)
+#define MMA8452_TRANSIENT_COUNT			0x20
+#define MMA8452_CTRL_REG1			0x2a
+#define  MMA8452_CTRL_ACTIVE			BIT(0)
+#define  MMA8452_CTRL_DR_MASK			GENMASK(5, 3)
+#define  MMA8452_CTRL_DR_SHIFT			3
+#define  MMA8452_CTRL_DR_DEFAULT		0x4 /* 50 Hz sample frequency */
+#define MMA8452_CTRL_REG2			0x2b
+#define  MMA8452_CTRL_REG2_RST			BIT(6)
+#define MMA8452_CTRL_REG4			0x2d
+#define MMA8452_CTRL_REG5			0x2e
+#define MMA8452_OFF_X				0x2f
+#define MMA8452_OFF_Y				0x30
+#define MMA8452_OFF_Z				0x31
 
-#define MMA8452_MAX_REG 0x31
+#define MMA8452_MAX_REG				0x31
 
-#define MMA8452_STATUS_DRDY (BIT(2) | BIT(1) | BIT(0))
+#define  MMA8452_INT_DRDY			BIT(0)
+#define  MMA8452_INT_TRANS			BIT(5)
 
-#define MMA8452_CTRL_DR_MASK (BIT(5) | BIT(4) | BIT(3))
-#define MMA8452_CTRL_DR_SHIFT 3
-#define MMA8452_CTRL_DR_DEFAULT 0x4 /* 50 Hz sample frequency */
-#define MMA8452_CTRL_ACTIVE BIT(0)
-
-#define MMA8452_DATA_CFG_FS_MASK (BIT(1) | BIT(0))
-#define MMA8452_DATA_CFG_FS_2G 0
-#define MMA8452_DATA_CFG_FS_4G 1
-#define MMA8452_DATA_CFG_FS_8G 2
-#define MMA8452_DATA_CFG_HPF_MASK BIT(4)
-
-#define MMA8452_INT_DRDY	BIT(0)
-#define MMA8452_INT_TRANS	BIT(5)
-
-#define MMA8452_DEVICE_ID 0x2a
+#define  MMA8452_DEVICE_ID			0x2a
 
 struct mma8452_data {
 	struct i2c_client *client;
@@ -91,30 +87,34 @@
 			return ret;
 		if ((ret & MMA8452_STATUS_DRDY) == MMA8452_STATUS_DRDY)
 			return 0;
+
 		msleep(20);
 	}
 
 	dev_err(&data->client->dev, "data not ready\n");
+
 	return -EIO;
 }
 
 static int mma8452_read(struct mma8452_data *data, __be16 buf[3])
 {
 	int ret = mma8452_drdy(data);
+
 	if (ret < 0)
 		return ret;
-	return i2c_smbus_read_i2c_block_data(data->client,
-		MMA8452_OUT_X, 3 * sizeof(__be16), (u8 *) buf);
+
+	return i2c_smbus_read_i2c_block_data(data->client, MMA8452_OUT_X,
+					     3 * sizeof(__be16), (u8 *)buf);
 }
 
-static ssize_t mma8452_show_int_plus_micros(char *buf,
-	const int (*vals)[2], int n)
+static ssize_t mma8452_show_int_plus_micros(char *buf, const int (*vals)[2],
+					    int n)
 {
 	size_t len = 0;
 
 	while (n-- > 0)
-		len += scnprintf(buf + len, PAGE_SIZE - len,
-			"%d.%06d ", vals[n][0], vals[n][1]);
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%06d ",
+				 vals[n][0], vals[n][1]);
 
 	/* replace trailing space by newline */
 	buf[len - 1] = '\n';
@@ -123,7 +123,7 @@
 }
 
 static int mma8452_get_int_plus_micros_index(const int (*vals)[2], int n,
-					int val, int val2)
+					     int val, int val2)
 {
 	while (n-- > 0)
 		if (val == vals[n][0] && val2 == vals[n][1])
@@ -147,7 +147,7 @@
  * Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
  * The userspace interface uses m/s^2 and we declare micro units
  * So scale factor is given by:
- * 	g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
+ *	g * N * 1000000 / 2048 for N = 2, 4, 8 and g = 9.80665
  */
 static const int mma8452_scales[3][2] = {
 	{0, 9577}, {0, 19154}, {0, 38307}
@@ -178,17 +178,19 @@
 };
 
 static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
-				struct device_attribute *attr, char *buf)
+					    struct device_attribute *attr,
+					    char *buf)
 {
 	return mma8452_show_int_plus_micros(buf, mma8452_samp_freq,
-		ARRAY_SIZE(mma8452_samp_freq));
+					    ARRAY_SIZE(mma8452_samp_freq));
 }
 
 static ssize_t mma8452_show_scale_avail(struct device *dev,
-				struct device_attribute *attr, char *buf)
+					struct device_attribute *attr,
+					char *buf)
 {
 	return mma8452_show_int_plus_micros(buf, mma8452_scales,
-		ARRAY_SIZE(mma8452_scales));
+					    ARRAY_SIZE(mma8452_scales));
 }
 
 static ssize_t mma8452_show_hp_cutoff_avail(struct device *dev,
@@ -205,22 +207,23 @@
 
 static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(mma8452_show_samp_freq_avail);
 static IIO_DEVICE_ATTR(in_accel_scale_available, S_IRUGO,
-	mma8452_show_scale_avail, NULL, 0);
+		       mma8452_show_scale_avail, NULL, 0);
 static IIO_DEVICE_ATTR(in_accel_filter_high_pass_3db_frequency_available,
-			S_IRUGO, mma8452_show_hp_cutoff_avail, NULL, 0);
+		       S_IRUGO, mma8452_show_hp_cutoff_avail, NULL, 0);
 
 static int mma8452_get_samp_freq_index(struct mma8452_data *data,
-	int val, int val2)
+				       int val, int val2)
 {
 	return mma8452_get_int_plus_micros_index(mma8452_samp_freq,
-		ARRAY_SIZE(mma8452_samp_freq), val, val2);
+						 ARRAY_SIZE(mma8452_samp_freq),
+						 val, val2);
 }
 
-static int mma8452_get_scale_index(struct mma8452_data *data,
-	int val, int val2)
+static int mma8452_get_scale_index(struct mma8452_data *data, int val, int val2)
 {
 	return mma8452_get_int_plus_micros_index(mma8452_scales,
-		ARRAY_SIZE(mma8452_scales), val, val2);
+						 ARRAY_SIZE(mma8452_scales),
+						 val, val2);
 }
 
 static int mma8452_get_hp_filter_index(struct mma8452_data *data,
@@ -229,7 +232,7 @@
 	int i = mma8452_get_odr_index(data);
 
 	return mma8452_get_int_plus_micros_index(mma8452_hp_filter_cutoff[i],
-		ARRAY_SIZE(mma8452_scales[0]), val, val2);
+		ARRAY_SIZE(mma8452_hp_filter_cutoff[0]), val, val2);
 }
 
 static int mma8452_read_hp_filter(struct mma8452_data *data, int *hz, int *uHz)
@@ -266,25 +269,31 @@
 		mutex_unlock(&data->lock);
 		if (ret < 0)
 			return ret;
-		*val = sign_extend32(
-			be16_to_cpu(buffer[chan->scan_index]) >> 4, 11);
+
+		*val = sign_extend32(be16_to_cpu(buffer[chan->scan_index]) >> 4,
+				     11);
+
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_SCALE:
 		i = data->data_cfg & MMA8452_DATA_CFG_FS_MASK;
 		*val = mma8452_scales[i][0];
 		*val2 = mma8452_scales[i][1];
+
 		return IIO_VAL_INT_PLUS_MICRO;
 	case IIO_CHAN_INFO_SAMP_FREQ:
 		i = mma8452_get_odr_index(data);
 		*val = mma8452_samp_freq[i][0];
 		*val2 = mma8452_samp_freq[i][1];
+
 		return IIO_VAL_INT_PLUS_MICRO;
 	case IIO_CHAN_INFO_CALIBBIAS:
-		ret = i2c_smbus_read_byte_data(data->client, MMA8452_OFF_X +
-			chan->scan_index);
+		ret = i2c_smbus_read_byte_data(data->client,
+					      MMA8452_OFF_X + chan->scan_index);
 		if (ret < 0)
 			return ret;
+
 		*val = sign_extend32(ret, 7);
+
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
 		if (data->data_cfg & MMA8452_DATA_CFG_HPF_MASK) {
@@ -295,21 +304,23 @@
 			*val = 0;
 			*val2 = 0;
 		}
+
 		return IIO_VAL_INT_PLUS_MICRO;
 	}
+
 	return -EINVAL;
 }
 
 static int mma8452_standby(struct mma8452_data *data)
 {
 	return i2c_smbus_write_byte_data(data->client, MMA8452_CTRL_REG1,
-		data->ctrl_reg1 & ~MMA8452_CTRL_ACTIVE);
+					data->ctrl_reg1 & ~MMA8452_CTRL_ACTIVE);
 }
 
 static int mma8452_active(struct mma8452_data *data)
 {
 	return i2c_smbus_write_byte_data(data->client, MMA8452_CTRL_REG1,
-		data->ctrl_reg1);
+					 data->ctrl_reg1);
 }
 
 static int mma8452_change_config(struct mma8452_data *data, u8 reg, u8 val)
@@ -334,6 +345,7 @@
 	ret = 0;
 fail:
 	mutex_unlock(&data->lock);
+
 	return ret;
 }
 
@@ -344,12 +356,13 @@
 
 	i = mma8452_get_hp_filter_index(data, val, val2);
 	if (i < 0)
-		return -EINVAL;
+		return i;
 
 	reg = i2c_smbus_read_byte_data(data->client,
 				       MMA8452_HP_FILTER_CUTOFF);
 	if (reg < 0)
 		return reg;
+
 	reg &= ~MMA8452_HP_FILTER_CUTOFF_SEL_MASK;
 	reg |= i;
 
@@ -370,25 +383,30 @@
 	case IIO_CHAN_INFO_SAMP_FREQ:
 		i = mma8452_get_samp_freq_index(data, val, val2);
 		if (i < 0)
-			return -EINVAL;
+			return i;
 
 		data->ctrl_reg1 &= ~MMA8452_CTRL_DR_MASK;
 		data->ctrl_reg1 |= i << MMA8452_CTRL_DR_SHIFT;
+
 		return mma8452_change_config(data, MMA8452_CTRL_REG1,
-			data->ctrl_reg1);
+					     data->ctrl_reg1);
 	case IIO_CHAN_INFO_SCALE:
 		i = mma8452_get_scale_index(data, val, val2);
 		if (i < 0)
-			return -EINVAL;
+			return i;
+
 		data->data_cfg &= ~MMA8452_DATA_CFG_FS_MASK;
 		data->data_cfg |= i;
+
 		return mma8452_change_config(data, MMA8452_DATA_CFG,
-			data->data_cfg);
+					     data->data_cfg);
 	case IIO_CHAN_INFO_CALIBBIAS:
 		if (val < -128 || val > 127)
 			return -EINVAL;
-		return mma8452_change_config(data, MMA8452_OFF_X +
-			chan->scan_index, val);
+
+		return mma8452_change_config(data,
+					     MMA8452_OFF_X + chan->scan_index,
+					     val);
 
 	case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
 		if (val == 0 && val2 == 0) {
@@ -399,8 +417,9 @@
 			if (ret < 0)
 				return ret;
 		}
+
 		return mma8452_change_config(data, MMA8452_DATA_CFG,
-						data->data_cfg);
+					     data->data_cfg);
 
 	default:
 		return -EINVAL;
@@ -425,6 +444,7 @@
 			return ret;
 
 		*val = ret & MMA8452_TRANSIENT_THS_MASK;
+
 		return IIO_VAL_INT;
 
 	case IIO_EV_INFO_PERIOD:
@@ -437,6 +457,7 @@
 				mma8452_get_odr_index(data)];
 		*val = us / USEC_PER_SEC;
 		*val2 = us % USEC_PER_SEC;
+
 		return IIO_VAL_INT_PLUS_MICRO;
 
 	case IIO_EV_INFO_HIGH_PASS_FILTER_3DB:
@@ -453,6 +474,7 @@
 			if (ret < 0)
 				return ret;
 		}
+
 		return IIO_VAL_INT_PLUS_MICRO;
 
 	default:
@@ -472,19 +494,22 @@
 
 	switch (info) {
 	case IIO_EV_INFO_VALUE:
-		return mma8452_change_config(data, MMA8452_TRANSIENT_THS,
-					     val & MMA8452_TRANSIENT_THS_MASK);
+		if (val < 0 || val > MMA8452_TRANSIENT_THS_MASK)
+			return -EINVAL;
+
+		return mma8452_change_config(data, MMA8452_TRANSIENT_THS, val);
 
 	case IIO_EV_INFO_PERIOD:
 		steps = (val * USEC_PER_SEC + val2) /
 				mma8452_transient_time_step_us[
 					mma8452_get_odr_index(data)];
 
-		if (steps > 0xff)
+		if (steps < 0 || steps > 0xff)
 			return -EINVAL;
 
 		return mma8452_change_config(data, MMA8452_TRANSIENT_COUNT,
 					     steps);
+
 	case IIO_EV_INFO_HIGH_PASS_FILTER_3DB:
 		reg = i2c_smbus_read_byte_data(data->client,
 					       MMA8452_TRANSIENT_CFG);
@@ -499,6 +524,7 @@
 			if (ret < 0)
 				return ret;
 		}
+
 		return mma8452_change_config(data, MMA8452_TRANSIENT_CFG, reg);
 
 	default:
@@ -608,15 +634,16 @@
 	u8 buffer[16]; /* 3 16-bit channels + padding + ts */
 	int ret;
 
-	ret = mma8452_read(data, (__be16 *) buffer);
+	ret = mma8452_read(data, (__be16 *)buffer);
 	if (ret < 0)
 		goto done;
 
 	iio_push_to_buffers_with_timestamp(indio_dev, buffer,
-		iio_get_time_ns());
+					   iio_get_time_ns());
 
 done:
 	iio_trigger_notify_done(indio_dev->trig);
+
 	return IRQ_HANDLED;
 }
 
@@ -674,10 +701,10 @@
 	.modified = 1, \
 	.channel2 = IIO_MOD_##axis, \
 	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
-		BIT(IIO_CHAN_INFO_CALIBBIAS), \
+			      BIT(IIO_CHAN_INFO_CALIBBIAS), \
 	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
-		BIT(IIO_CHAN_INFO_SCALE) | \
-		BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
+			BIT(IIO_CHAN_INFO_SCALE) | \
+			BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
 	.scan_index = idx, \
 	.scan_type = { \
 		.sign = 's', \
@@ -780,6 +807,7 @@
 		return ret;
 
 	indio_dev->trig = trig;
+
 	return 0;
 }
 
@@ -849,7 +877,7 @@
 
 	data->data_cfg = MMA8452_DATA_CFG_FS_2G;
 	ret = i2c_smbus_write_byte_data(client, MMA8452_DATA_CFG,
-		data->data_cfg);
+					data->data_cfg);
 	if (ret < 0)
 		return ret;
 
@@ -891,14 +919,14 @@
 	}
 
 	data->ctrl_reg1 = MMA8452_CTRL_ACTIVE |
-		(MMA8452_CTRL_DR_DEFAULT << MMA8452_CTRL_DR_SHIFT);
+			  (MMA8452_CTRL_DR_DEFAULT << MMA8452_CTRL_DR_SHIFT);
 	ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG1,
 					data->ctrl_reg1);
 	if (ret < 0)
 		goto trigger_cleanup;
 
 	ret = iio_triggered_buffer_setup(indio_dev, NULL,
-		mma8452_trigger_handler, NULL);
+					 mma8452_trigger_handler, NULL);
 	if (ret < 0)
 		goto trigger_cleanup;
 
@@ -968,6 +996,7 @@
 	{ .compatible = "fsl,mma8452" },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, mma8452_dt_ids);
 
 static struct i2c_driver mma8452_driver = {
 	.driver = {
diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c
index 2fd2a99..c34c5ce 100644
--- a/drivers/iio/accel/mma9551_core.c
+++ b/drivers/iio/accel/mma9551_core.c
@@ -297,7 +297,7 @@
  * Returns: 0 on success, negative value on failure.
  */
 int mma9551_read_config_word(struct i2c_client *client, u8 app_id,
-			    u16 reg, u16 *val)
+			     u16 reg, u16 *val)
 {
 	int ret;
 	__be16 v;
@@ -328,12 +328,12 @@
  * Returns: 0 on success, negative value on failure.
  */
 int mma9551_write_config_word(struct i2c_client *client, u8 app_id,
-			     u16 reg, u16 val)
+			      u16 reg, u16 val)
 {
 	__be16 v = cpu_to_be16(val);
 
 	return mma9551_transfer(client, app_id, MMA9551_CMD_WRITE_CONFIG, reg,
-				(u8 *) &v, 2, NULL, 0);
+				(u8 *)&v, 2, NULL, 0);
 }
 EXPORT_SYMBOL(mma9551_write_config_word);
 
@@ -373,7 +373,7 @@
  * @client:	I2C client
  * @app_id:	Application ID
  * @reg:	Application register
- * @len:	Length of array to read in bytes
+ * @len:	Length of array to read (in words)
  * @buf:	Array of words to read
  *
  * Read multiple configuration registers (word-sized registers).
@@ -385,23 +385,22 @@
  * Returns: 0 on success, negative value on failure.
  */
 int mma9551_read_config_words(struct i2c_client *client, u8 app_id,
-			     u16 reg, u8 len, u16 *buf)
+			      u16 reg, u8 len, u16 *buf)
 {
 	int ret, i;
-	int len_words = len / sizeof(u16);
 	__be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS / 2];
 
-	if (len_words > ARRAY_SIZE(be_buf)) {
+	if (len > ARRAY_SIZE(be_buf)) {
 		dev_err(&client->dev, "Invalid buffer size %d\n", len);
 		return -EINVAL;
 	}
 
 	ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG,
-			       reg, NULL, 0, (u8 *) be_buf, len);
+			       reg, NULL, 0, (u8 *)be_buf, len * sizeof(u16));
 	if (ret < 0)
 		return ret;
 
-	for (i = 0; i < len_words; i++)
+	for (i = 0; i < len; i++)
 		buf[i] = be16_to_cpu(be_buf[i]);
 
 	return 0;
@@ -413,7 +412,7 @@
  * @client:	I2C client
  * @app_id:	Application ID
  * @reg:	Application register
- * @len:	Length of array to read in bytes
+ * @len:	Length of array to read (in words)
  * @buf:	Array of words to read
  *
  * Read multiple status registers (word-sized registers).
@@ -428,20 +427,19 @@
 			      u16 reg, u8 len, u16 *buf)
 {
 	int ret, i;
-	int len_words = len / sizeof(u16);
 	__be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS / 2];
 
-	if (len_words > ARRAY_SIZE(be_buf)) {
+	if (len > ARRAY_SIZE(be_buf)) {
 		dev_err(&client->dev, "Invalid buffer size %d\n", len);
 		return -EINVAL;
 	}
 
 	ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS,
-			       reg, NULL, 0, (u8 *) be_buf, len);
+			       reg, NULL, 0, (u8 *)be_buf, len * sizeof(u16));
 	if (ret < 0)
 		return ret;
 
-	for (i = 0; i < len_words; i++)
+	for (i = 0; i < len; i++)
 		buf[i] = be16_to_cpu(be_buf[i]);
 
 	return 0;
@@ -453,7 +451,7 @@
  * @client:	I2C client
  * @app_id:	Application ID
  * @reg:	Application register
- * @len:	Length of array to write in bytes
+ * @len:	Length of array to write (in words)
  * @buf:	Array of words to write
  *
  * Write multiple configuration registers (word-sized registers).
@@ -468,19 +466,18 @@
 			       u16 reg, u8 len, u16 *buf)
 {
 	int i;
-	int len_words = len / sizeof(u16);
 	__be16 be_buf[(MMA9551_MAX_MAILBOX_DATA_REGS - 1) / 2];
 
-	if (len_words > ARRAY_SIZE(be_buf)) {
+	if (len > ARRAY_SIZE(be_buf)) {
 		dev_err(&client->dev, "Invalid buffer size %d\n", len);
 		return -EINVAL;
 	}
 
-	for (i = 0; i < len_words; i++)
+	for (i = 0; i < len; i++)
 		be_buf[i] = cpu_to_be16(buf[i]);
 
 	return mma9551_transfer(client, app_id, MMA9551_CMD_WRITE_CONFIG,
-				reg, (u8 *) be_buf, len, NULL, 0);
+				reg, (u8 *)be_buf, len * sizeof(u16), NULL, 0);
 }
 EXPORT_SYMBOL(mma9551_write_config_words);
 
diff --git a/drivers/iio/accel/mma9551_core.h b/drivers/iio/accel/mma9551_core.h
index 79939e4..5e88e64 100644
--- a/drivers/iio/accel/mma9551_core.h
+++ b/drivers/iio/accel/mma9551_core.h
@@ -53,13 +53,13 @@
 int mma9551_read_status_byte(struct i2c_client *client, u8 app_id,
 			     u16 reg, u8 *val);
 int mma9551_read_config_word(struct i2c_client *client, u8 app_id,
-			    u16 reg, u16 *val);
+			     u16 reg, u16 *val);
 int mma9551_write_config_word(struct i2c_client *client, u8 app_id,
-			     u16 reg, u16 val);
+			      u16 reg, u16 val);
 int mma9551_read_status_word(struct i2c_client *client, u8 app_id,
 			     u16 reg, u16 *val);
 int mma9551_read_config_words(struct i2c_client *client, u8 app_id,
-			     u16 reg, u8 len, u16 *buf);
+			      u16 reg, u8 len, u16 *buf);
 int mma9551_read_status_words(struct i2c_client *client, u8 app_id,
 			      u16 reg, u8 len, u16 *buf);
 int mma9551_write_config_words(struct i2c_client *client, u8 app_id,
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index 8bfc618..771858c 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -182,6 +182,10 @@
 
 struct mma9553_data {
 	struct i2c_client *client;
+	/*
+	 * 1. Serialize access to HW (requested by mma9551_core API).
+	 * 2. Serialize sequences that power on/off the device and access HW.
+	 */
 	struct mutex mutex;
 	struct mma9553_conf_regs conf;
 	struct mma9553_event events[MMA9553_EVENTS_INFO_SIZE];
@@ -322,7 +326,8 @@
 	int ret;
 
 	ret = mma9551_read_status_words(data->client, MMA9551_APPID_PEDOMETER,
-					MMA9553_REG_STATUS, sizeof(u32), buf);
+					MMA9553_REG_STATUS, ARRAY_SIZE(buf),
+					buf);
 	if (ret < 0) {
 		dev_err(&data->client->dev,
 			"error reading status and stepcnt\n");
@@ -342,10 +347,10 @@
 	struct mma9553_event *ev_step_detect;
 	bool activity_enabled;
 
-	activity_enabled =
-	    mma9553_is_any_event_enabled(data, true, IIO_ACTIVITY);
-	ev_step_detect =
-	    mma9553_get_event(data, IIO_STEPS, IIO_NO_MOD, IIO_EV_DIR_NONE);
+	activity_enabled = mma9553_is_any_event_enabled(data, true,
+							IIO_ACTIVITY);
+	ev_step_detect = mma9553_get_event(data, IIO_STEPS, IIO_NO_MOD,
+					   IIO_EV_DIR_NONE);
 
 	/*
 	 * If both step detector and activity are enabled, use the MRGFL bit.
@@ -371,9 +376,8 @@
 			return ret;
 	}
 
-	ret = mma9551_gpio_config(data->client,
-				  MMA9553_DEFAULT_GPIO_PIN,
-				  appid, bitnum, MMA9553_DEFAULT_GPIO_POLARITY);
+	ret = mma9551_gpio_config(data->client, MMA9553_DEFAULT_GPIO_PIN, appid,
+				  bitnum, MMA9553_DEFAULT_GPIO_POLARITY);
 	if (ret < 0)
 		return ret;
 	data->gpio_bitnum = bitnum;
@@ -394,17 +398,16 @@
 	 * a device identification command to differentiate the MMA9553L
 	 * from the MMA9550L.
 	 */
-	ret =
-	    mma9551_read_config_words(data->client, MMA9551_APPID_PEDOMETER,
-				      MMA9553_REG_CONF_SLEEPMIN,
-				      sizeof(data->conf), (u16 *) &data->conf);
+	ret = mma9551_read_config_words(data->client, MMA9551_APPID_PEDOMETER,
+					MMA9553_REG_CONF_SLEEPMIN,
+					sizeof(data->conf) / sizeof(u16),
+					(u16 *)&data->conf);
 	if (ret < 0) {
 		dev_err(&data->client->dev,
 			"failed to read configuration registers\n");
 		return ret;
 	}
 
-
 	/* Reset GPIO */
 	data->gpio_bitnum = MMA9553_MAX_BITNUM;
 	ret = mma9553_conf_gpio(data);
@@ -419,18 +422,18 @@
 	data->conf.sleepmin = MMA9553_DEFAULT_SLEEPMIN;
 	data->conf.sleepmax = MMA9553_DEFAULT_SLEEPMAX;
 	data->conf.sleepthd = MMA9553_DEFAULT_SLEEPTHD;
-	data->conf.config =
-	    mma9553_set_bits(data->conf.config, 1, MMA9553_MASK_CONF_CONFIG);
+	data->conf.config = mma9553_set_bits(data->conf.config, 1,
+					     MMA9553_MASK_CONF_CONFIG);
 	/*
 	 * Clear the activity debounce counter when the activity level changes,
 	 * so that the confidence level applies for any activity level.
 	 */
 	data->conf.config = mma9553_set_bits(data->conf.config, 1,
 					     MMA9553_MASK_CONF_ACT_DBCNTM);
-	ret =
-	    mma9551_write_config_words(data->client, MMA9551_APPID_PEDOMETER,
-				       MMA9553_REG_CONF_SLEEPMIN,
-				       sizeof(data->conf), (u16 *) &data->conf);
+	ret = mma9551_write_config_words(data->client, MMA9551_APPID_PEDOMETER,
+					 MMA9553_REG_CONF_SLEEPMIN,
+					 sizeof(data->conf) / sizeof(u16),
+					 (u16 *)&data->conf);
 	if (ret < 0) {
 		dev_err(&data->client->dev,
 			"failed to write configuration registers\n");
@@ -567,7 +570,7 @@
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_CALIBHEIGHT:
 		tmp = mma9553_get_bits(data->conf.height_weight,
-					MMA9553_MASK_CONF_HEIGHT);
+				       MMA9553_MASK_CONF_HEIGHT);
 		*val = tmp / 100;	/* cm to m */
 		*val2 = (tmp % 100) * 10000;
 		return IIO_VAL_INT_PLUS_MICRO;
@@ -719,7 +722,6 @@
 				     enum iio_event_type type,
 				     enum iio_event_direction dir)
 {
-
 	struct mma9553_data *data = iio_priv(indio_dev);
 	struct mma9553_event *event;
 
@@ -1026,22 +1028,22 @@
 		return IRQ_HANDLED;
 	}
 
-	ev_prev_activity =
-	    mma9553_get_event(data, IIO_ACTIVITY,
-			      mma9553_activity_to_mod(data->activity),
-			      IIO_EV_DIR_FALLING);
-	ev_activity =
-	    mma9553_get_event(data, IIO_ACTIVITY,
-			      mma9553_activity_to_mod(activity),
-			      IIO_EV_DIR_RISING);
-	ev_step_detect =
-	    mma9553_get_event(data, IIO_STEPS, IIO_NO_MOD, IIO_EV_DIR_NONE);
+	ev_prev_activity = mma9553_get_event(data, IIO_ACTIVITY,
+					     mma9553_activity_to_mod(
+					     data->activity),
+					     IIO_EV_DIR_FALLING);
+	ev_activity = mma9553_get_event(data, IIO_ACTIVITY,
+					mma9553_activity_to_mod(activity),
+					IIO_EV_DIR_RISING);
+	ev_step_detect = mma9553_get_event(data, IIO_STEPS, IIO_NO_MOD,
+					   IIO_EV_DIR_NONE);
 
 	if (ev_step_detect->enabled && (stepcnt != data->stepcnt)) {
 		data->stepcnt = stepcnt;
 		iio_push_event(indio_dev,
 			       IIO_EVENT_CODE(IIO_STEPS, 0, IIO_NO_MOD,
-			       IIO_EV_DIR_NONE, IIO_EV_TYPE_CHANGE, 0, 0, 0),
+					      IIO_EV_DIR_NONE,
+					      IIO_EV_TYPE_CHANGE, 0, 0, 0),
 			       data->timestamp);
 	}
 
@@ -1051,17 +1053,19 @@
 		if (ev_prev_activity && ev_prev_activity->enabled)
 			iio_push_event(indio_dev,
 				       IIO_EVENT_CODE(IIO_ACTIVITY, 0,
-				       ev_prev_activity->info->mod,
-				       IIO_EV_DIR_FALLING,
-				       IIO_EV_TYPE_THRESH, 0, 0, 0),
+						    ev_prev_activity->info->mod,
+						    IIO_EV_DIR_FALLING,
+						    IIO_EV_TYPE_THRESH, 0, 0,
+						    0),
 				       data->timestamp);
 
 		if (ev_activity && ev_activity->enabled)
 			iio_push_event(indio_dev,
 				       IIO_EVENT_CODE(IIO_ACTIVITY, 0,
-				       ev_activity->info->mod,
-				       IIO_EV_DIR_RISING,
-				       IIO_EV_TYPE_THRESH, 0, 0, 0),
+						      ev_activity->info->mod,
+						      IIO_EV_DIR_RISING,
+						      IIO_EV_TYPE_THRESH, 0, 0,
+						      0),
 				       data->timestamp);
 	}
 	mutex_unlock(&data->mutex);
@@ -1145,7 +1149,7 @@
 	if (client->irq < 0)
 		client->irq = mma9553_gpio_probe(client);
 
-	if (client->irq >= 0) {
+	if (client->irq > 0) {
 		ret = devm_request_threaded_irq(&client->dev, client->irq,
 						mma9553_irq_handler,
 						mma9553_event_handler,
@@ -1156,7 +1160,6 @@
 				client->irq);
 			goto out_poweroff;
 		}
-
 	}
 
 	ret = iio_device_register(indio_dev);
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index aa10019..468f21f 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -26,6 +26,7 @@
 #define LSM303DLH_ACCEL_DEV_NAME	"lsm303dlh_accel"
 #define LSM303DLM_ACCEL_DEV_NAME	"lsm303dlm_accel"
 #define LSM330_ACCEL_DEV_NAME		"lsm330_accel"
+#define LSM303AGR_ACCEL_DEV_NAME	"lsm303agr_accel"
 
 /**
 * struct st_sensors_platform_data - default accel platform data
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 4002e64..ff30f88 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -226,12 +226,14 @@
 static const struct st_sensor_settings st_accel_sensors_settings[] = {
 	{
 		.wai = ST_ACCEL_1_WAI_EXP,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LIS3DH_ACCEL_DEV_NAME,
 			[1] = LSM303DLHC_ACCEL_DEV_NAME,
 			[2] = LSM330D_ACCEL_DEV_NAME,
 			[3] = LSM330DL_ACCEL_DEV_NAME,
 			[4] = LSM330DLC_ACCEL_DEV_NAME,
+			[5] = LSM303AGR_ACCEL_DEV_NAME,
 		},
 		.ch = (struct iio_chan_spec *)st_accel_12bit_channels,
 		.odr = {
@@ -297,6 +299,7 @@
 	},
 	{
 		.wai = ST_ACCEL_2_WAI_EXP,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LIS331DLH_ACCEL_DEV_NAME,
 			[1] = LSM303DL_ACCEL_DEV_NAME,
@@ -359,6 +362,7 @@
 	},
 	{
 		.wai = ST_ACCEL_3_WAI_EXP,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LSM330_ACCEL_DEV_NAME,
 		},
@@ -437,6 +441,7 @@
 	},
 	{
 		.wai = ST_ACCEL_4_WAI_EXP,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LIS3LV02DL_ACCEL_DEV_NAME,
 		},
@@ -494,6 +499,7 @@
 	},
 	{
 		.wai = ST_ACCEL_5_WAI_EXP,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LIS331DL_ACCEL_DEV_NAME,
 		},
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index d4ad72c..8b9cc84 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -68,6 +68,10 @@
 		.compatible = "st,lsm330-accel",
 		.data = LSM330_ACCEL_DEV_NAME,
 	},
+	{
+		.compatible = "st,lsm303agr-accel",
+		.data = LSM303AGR_ACCEL_DEV_NAME,
+	},
 	{},
 };
 MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -116,13 +120,13 @@
 	{ LSM303DL_ACCEL_DEV_NAME },
 	{ LSM303DLM_ACCEL_DEV_NAME },
 	{ LSM330_ACCEL_DEV_NAME },
+	{ LSM303AGR_ACCEL_DEV_NAME },
 	{},
 };
 MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
 
 static struct i2c_driver st_accel_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "st-accel-i2c",
 		.of_match_table = of_match_ptr(st_accel_of_match),
 	},
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 12ec293..54b61a3 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -57,6 +57,7 @@
 	{ LSM303DL_ACCEL_DEV_NAME },
 	{ LSM303DLM_ACCEL_DEV_NAME },
 	{ LSM330_ACCEL_DEV_NAME },
+	{ LSM303AGR_ACCEL_DEV_NAME },
 	{},
 };
 MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index d211d9f3..c764af2 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -11,17 +11,25 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/delay.h>
+#include <linux/iio/buffer.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
 
 #define STK8312_REG_XOUT		0x00
 #define STK8312_REG_YOUT		0x01
 #define STK8312_REG_ZOUT		0x02
+#define STK8312_REG_INTSU		0x06
 #define STK8312_REG_MODE		0x07
+#define STK8312_REG_SR			0x08
 #define STK8312_REG_STH			0x13
 #define STK8312_REG_RESET		0x20
 #define STK8312_REG_AFECTRL		0x24
@@ -29,14 +37,21 @@
 #define STK8312_REG_OTPDATA		0x3E
 #define STK8312_REG_OTPCTRL		0x3F
 
-#define STK8312_MODE_ACTIVE		1
-#define STK8312_MODE_STANDBY		0
-#define STK8312_MODE_MASK		0x01
-#define STK8312_RNG_MASK		0xC0
+#define STK8312_MODE_ACTIVE		BIT(0)
+#define STK8312_MODE_STANDBY		0x00
+#define STK8312_MODE_INT_AH_PP		0xC0	/* active-high, push-pull */
+#define STK8312_DREADY_BIT		BIT(4)
+#define STK8312_RNG_6G			1
 #define STK8312_RNG_SHIFT		6
-#define STK8312_READ_RETRIES		16
+#define STK8312_RNG_MASK		GENMASK(7, 6)
+#define STK8312_SR_MASK			GENMASK(2, 0)
+#define STK8312_SR_400HZ_IDX		0
+#define STK8312_ALL_CHANNEL_MASK	GENMASK(2, 0)
+#define STK8312_ALL_CHANNEL_SIZE	3
 
 #define STK8312_DRIVER_NAME		"stk8312"
+#define STK8312_GPIO			"stk8312_gpio"
+#define STK8312_IRQ_NAME		"stk8312_event"
 
 /*
  * The accelerometer has two measurement ranges:
@@ -53,32 +68,56 @@
 	{0, 461600}, {1, 231100}
 };
 
-#define STK8312_ACCEL_CHANNEL(reg, axis) {			\
-	.type = IIO_ACCEL,					\
-	.address = reg,						\
-	.modified = 1,						\
-	.channel2 = IIO_MOD_##axis,				\
-	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),		\
-	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),	\
+static const struct {
+	int val;
+	int val2;
+} stk8312_samp_freq_table[] = {
+	{400, 0}, {200, 0}, {100, 0}, {50, 0}, {25, 0},
+	{12, 500000}, {6, 250000}, {3, 125000}
+};
+
+#define STK8312_ACCEL_CHANNEL(index, reg, axis) {			\
+	.type = IIO_ACCEL,						\
+	.address = reg,							\
+	.modified = 1,							\
+	.channel2 = IIO_MOD_##axis,					\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),			\
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |		\
+				    BIT(IIO_CHAN_INFO_SAMP_FREQ),	\
+	.scan_index = index,						\
+	.scan_type = {							\
+		.sign = 's',						\
+		.realbits = 8,						\
+		.storagebits = 8,					\
+		.endianness = IIO_CPU,					\
+	},								\
 }
 
 static const struct iio_chan_spec stk8312_channels[] = {
-	STK8312_ACCEL_CHANNEL(STK8312_REG_XOUT, X),
-	STK8312_ACCEL_CHANNEL(STK8312_REG_YOUT, Y),
-	STK8312_ACCEL_CHANNEL(STK8312_REG_ZOUT, Z),
+	STK8312_ACCEL_CHANNEL(0, STK8312_REG_XOUT, X),
+	STK8312_ACCEL_CHANNEL(1, STK8312_REG_YOUT, Y),
+	STK8312_ACCEL_CHANNEL(2, STK8312_REG_ZOUT, Z),
+	IIO_CHAN_SOFT_TIMESTAMP(3),
 };
 
 struct stk8312_data {
 	struct i2c_client *client;
 	struct mutex lock;
-	int range;
+	u8 range;
+	u8 sample_rate_idx;
 	u8 mode;
+	struct iio_trigger *dready_trig;
+	bool dready_trigger_on;
+	s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 64-bit timestamp */
 };
 
 static IIO_CONST_ATTR(in_accel_scale_available, STK8312_SCALE_AVAIL);
 
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("3.125 6.25 12.5 25 50 100 200 400");
+
 static struct attribute *stk8312_attributes[] = {
 	&iio_const_attr_in_accel_scale_available.dev_attr.attr,
+	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
 	NULL,
 };
 
@@ -105,22 +144,25 @@
 		if (ret < 0)
 			goto exit_err;
 		count--;
-	} while (!(ret & 0x80) && count > 0);
+	} while (!(ret & BIT(7)) && count > 0);
 
-	if (count == 0)
+	if (count == 0) {
+		ret = -ETIMEDOUT;
 		goto exit_err;
+	}
 
 	ret = i2c_smbus_read_byte_data(client, STK8312_REG_OTPDATA);
+	if (ret == 0)
+		ret = -EINVAL;
 	if (ret < 0)
 		goto exit_err;
 
-	ret = i2c_smbus_write_byte_data(data->client,
-			STK8312_REG_AFECTRL, ret);
+	ret = i2c_smbus_write_byte_data(data->client, STK8312_REG_AFECTRL, ret);
 	if (ret < 0)
 		goto exit_err;
 	msleep(150);
 
-	return ret;
+	return 0;
 
 exit_err:
 	dev_err(&client->dev, "failed to initialize sensor\n");
@@ -130,31 +172,19 @@
 static int stk8312_set_mode(struct stk8312_data *data, u8 mode)
 {
 	int ret;
-	u8 masked_reg;
 	struct i2c_client *client = data->client;
 
-	if (mode > 1)
-		return -EINVAL;
-	else if (mode == data->mode)
+	if (mode == data->mode)
 		return 0;
 
-	ret = i2c_smbus_read_byte_data(client, STK8312_REG_MODE);
-	if (ret < 0) {
-		dev_err(&client->dev, "failed to change sensor mode\n");
-		return ret;
-	}
-	masked_reg = ret & (~STK8312_MODE_MASK);
-	masked_reg |= mode;
-
-	ret = i2c_smbus_write_byte_data(client,
-			STK8312_REG_MODE, masked_reg);
+	ret = i2c_smbus_write_byte_data(client, STK8312_REG_MODE, mode);
 	if (ret < 0) {
 		dev_err(&client->dev, "failed to change sensor mode\n");
 		return ret;
 	}
 
 	data->mode = mode;
-	if (mode == STK8312_MODE_ACTIVE) {
+	if (mode & STK8312_MODE_ACTIVE) {
 		/* Need to run OTP sequence before entering active mode */
 		usleep_range(1000, 5000);
 		ret = stk8312_otp_init(data);
@@ -163,6 +193,92 @@
 	return ret;
 }
 
+static int stk8312_set_interrupts(struct stk8312_data *data, u8 int_mask)
+{
+	int ret;
+	u8 mode;
+	struct i2c_client *client = data->client;
+
+	mode = data->mode;
+	/* We need to go in standby mode to modify registers */
+	ret = stk8312_set_mode(data, STK8312_MODE_STANDBY);
+	if (ret < 0)
+		return ret;
+
+	ret = i2c_smbus_write_byte_data(client, STK8312_REG_INTSU, int_mask);
+	if (ret < 0) {
+		dev_err(&client->dev, "failed to set interrupts\n");
+		stk8312_set_mode(data, mode);
+		return ret;
+	}
+
+	return stk8312_set_mode(data, mode);
+}
+
+static int stk8312_data_rdy_trigger_set_state(struct iio_trigger *trig,
+					      bool state)
+{
+	struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+	struct stk8312_data *data = iio_priv(indio_dev);
+	int ret;
+
+	if (state)
+		ret = stk8312_set_interrupts(data, STK8312_DREADY_BIT);
+	else
+		ret = stk8312_set_interrupts(data, 0x00);
+
+	if (ret < 0) {
+		dev_err(&data->client->dev, "failed to set trigger state\n");
+		return ret;
+	}
+
+	data->dready_trigger_on = state;
+
+	return 0;
+}
+
+static const struct iio_trigger_ops stk8312_trigger_ops = {
+	.set_trigger_state = stk8312_data_rdy_trigger_set_state,
+	.owner = THIS_MODULE,
+};
+
+static int stk8312_set_sample_rate(struct stk8312_data *data, u8 rate)
+{
+	int ret;
+	u8 masked_reg;
+	u8 mode;
+	struct i2c_client *client = data->client;
+
+	if (rate == data->sample_rate_idx)
+		return 0;
+
+	mode = data->mode;
+	/* We need to go in standby mode to modify registers */
+	ret = stk8312_set_mode(data, STK8312_MODE_STANDBY);
+	if (ret < 0)
+		return ret;
+
+	ret = i2c_smbus_read_byte_data(client, STK8312_REG_SR);
+	if (ret < 0)
+		goto err_activate;
+
+	masked_reg = (ret & (~STK8312_SR_MASK)) | rate;
+
+	ret = i2c_smbus_write_byte_data(client, STK8312_REG_SR, masked_reg);
+	if (ret < 0)
+		goto err_activate;
+
+	data->sample_rate_idx = rate;
+
+	return stk8312_set_mode(data, mode);
+
+err_activate:
+	dev_err(&client->dev, "failed to set sampling rate\n");
+	stk8312_set_mode(data, mode);
+
+	return ret;
+}
+
 static int stk8312_set_range(struct stk8312_data *data, u8 range)
 {
 	int ret;
@@ -182,21 +298,25 @@
 		return ret;
 
 	ret = i2c_smbus_read_byte_data(client, STK8312_REG_STH);
-	if (ret < 0) {
-		dev_err(&client->dev, "failed to change sensor range\n");
-		return ret;
-	}
+	if (ret < 0)
+		goto err_activate;
 
 	masked_reg = ret & (~STK8312_RNG_MASK);
 	masked_reg |= range << STK8312_RNG_SHIFT;
 
 	ret = i2c_smbus_write_byte_data(client, STK8312_REG_STH, masked_reg);
 	if (ret < 0)
-		dev_err(&client->dev, "failed to change sensor range\n");
-	else
-		data->range = range;
+		goto err_activate;
+
+	data->range = range;
 
 	return stk8312_set_mode(data, mode);
+
+err_activate:
+	dev_err(&client->dev, "failed to change sensor range\n");
+	stk8312_set_mode(data, mode);
+
+	return ret;
 }
 
 static int stk8312_read_accel(struct stk8312_data *data, u8 address)
@@ -208,12 +328,10 @@
 		return -EINVAL;
 
 	ret = i2c_smbus_read_byte_data(client, address);
-	if (ret < 0) {
+	if (ret < 0)
 		dev_err(&client->dev, "register read failed\n");
-		return ret;
-	}
 
-	return sign_extend32(ret, 7);
+	return ret;
 }
 
 static int stk8312_read_raw(struct iio_dev *indio_dev,
@@ -221,20 +339,40 @@
 			    int *val, int *val2, long mask)
 {
 	struct stk8312_data *data = iio_priv(indio_dev);
-
-	if (chan->type != IIO_ACCEL)
-		return -EINVAL;
+	int ret;
 
 	switch (mask) {
 	case IIO_CHAN_INFO_RAW:
+		if (iio_buffer_enabled(indio_dev))
+			return -EBUSY;
 		mutex_lock(&data->lock);
-		*val = stk8312_read_accel(data, chan->address);
+		ret = stk8312_set_mode(data, data->mode | STK8312_MODE_ACTIVE);
+		if (ret < 0) {
+			mutex_unlock(&data->lock);
+			return ret;
+		}
+		ret = stk8312_read_accel(data, chan->address);
+		if (ret < 0) {
+			stk8312_set_mode(data,
+					 data->mode & (~STK8312_MODE_ACTIVE));
+			mutex_unlock(&data->lock);
+			return ret;
+		}
+		*val = sign_extend32(ret, 7);
+		ret = stk8312_set_mode(data,
+				       data->mode & (~STK8312_MODE_ACTIVE));
 		mutex_unlock(&data->lock);
+		if (ret < 0)
+			return ret;
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_SCALE:
 		*val = stk8312_scale_table[data->range - 1][0];
 		*val2 = stk8312_scale_table[data->range - 1][1];
 		return IIO_VAL_INT_PLUS_MICRO;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		*val = stk8312_samp_freq_table[data->sample_rate_idx].val;
+		*val2 = stk8312_samp_freq_table[data->sample_rate_idx].val2;
+		return IIO_VAL_INT_PLUS_MICRO;
 	}
 
 	return -EINVAL;
@@ -265,6 +403,20 @@
 		mutex_unlock(&data->lock);
 
 		return ret;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		for (i = 0; i < ARRAY_SIZE(stk8312_samp_freq_table); i++)
+			if (val == stk8312_samp_freq_table[i].val &&
+			    val2 == stk8312_samp_freq_table[i].val2) {
+				index = i;
+				break;
+			}
+		if (index < 0)
+			return -EINVAL;
+		mutex_lock(&data->lock);
+		ret = stk8312_set_sample_rate(data, index);
+		mutex_unlock(&data->lock);
+
+		return ret;
 	}
 
 	return -EINVAL;
@@ -277,6 +429,105 @@
 	.attrs			= &stk8312_attribute_group,
 };
 
+static irqreturn_t stk8312_trigger_handler(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct stk8312_data *data = iio_priv(indio_dev);
+	int bit, ret, i = 0;
+
+	mutex_lock(&data->lock);
+	/*
+	 * Do a bulk read if all channels are requested,
+	 * from 0x00 (XOUT) to 0x02 (ZOUT)
+	 */
+	if (*(indio_dev->active_scan_mask) == STK8312_ALL_CHANNEL_MASK) {
+		ret = i2c_smbus_read_i2c_block_data(data->client,
+						    STK8312_REG_XOUT,
+						    STK8312_ALL_CHANNEL_SIZE,
+						    data->buffer);
+		if (ret < STK8312_ALL_CHANNEL_SIZE) {
+			dev_err(&data->client->dev, "register read failed\n");
+			mutex_unlock(&data->lock);
+			goto err;
+		}
+	} else {
+		for_each_set_bit(bit, indio_dev->active_scan_mask,
+				 indio_dev->masklength) {
+			ret = stk8312_read_accel(data, bit);
+			if (ret < 0) {
+				mutex_unlock(&data->lock);
+				goto err;
+			}
+			data->buffer[i++] = ret;
+		}
+	}
+	mutex_unlock(&data->lock);
+
+	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+					   pf->timestamp);
+err:
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t stk8312_data_rdy_trig_poll(int irq, void *private)
+{
+	struct iio_dev *indio_dev = private;
+	struct stk8312_data *data = iio_priv(indio_dev);
+
+	if (data->dready_trigger_on)
+		iio_trigger_poll(data->dready_trig);
+
+	return IRQ_HANDLED;
+}
+
+static int stk8312_buffer_preenable(struct iio_dev *indio_dev)
+{
+	struct stk8312_data *data = iio_priv(indio_dev);
+
+	return stk8312_set_mode(data, data->mode | STK8312_MODE_ACTIVE);
+}
+
+static int stk8312_buffer_postdisable(struct iio_dev *indio_dev)
+{
+	struct stk8312_data *data = iio_priv(indio_dev);
+
+	return stk8312_set_mode(data, data->mode & (~STK8312_MODE_ACTIVE));
+}
+
+static const struct iio_buffer_setup_ops stk8312_buffer_setup_ops = {
+	.preenable   = stk8312_buffer_preenable,
+	.postenable  = iio_triggered_buffer_postenable,
+	.predisable  = iio_triggered_buffer_predisable,
+	.postdisable = stk8312_buffer_postdisable,
+};
+
+static int stk8312_gpio_probe(struct i2c_client *client)
+{
+	struct device *dev;
+	struct gpio_desc *gpio;
+	int ret;
+
+	if (!client)
+		return -EINVAL;
+
+	dev = &client->dev;
+
+	/* data ready gpio interrupt pin */
+	gpio = devm_gpiod_get_index(dev, STK8312_GPIO, 0, GPIOD_IN);
+	if (IS_ERR(gpio)) {
+		dev_err(dev, "acpi gpio get index failed\n");
+		return PTR_ERR(gpio);
+	}
+
+	ret = gpiod_to_irq(gpio);
+	dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
+
+	return ret;
+}
+
 static int stk8312_probe(struct i2c_client *client,
 			 const struct i2c_device_id *id)
 {
@@ -308,30 +559,91 @@
 		dev_err(&client->dev, "failed to reset sensor\n");
 		return ret;
 	}
-	ret = stk8312_set_range(data, 1);
+	data->sample_rate_idx = STK8312_SR_400HZ_IDX;
+	ret = stk8312_set_range(data, STK8312_RNG_6G);
 	if (ret < 0)
 		return ret;
 
-	ret = stk8312_set_mode(data, STK8312_MODE_ACTIVE);
+	ret = stk8312_set_mode(data,
+			       STK8312_MODE_INT_AH_PP | STK8312_MODE_ACTIVE);
 	if (ret < 0)
 		return ret;
 
+	if (client->irq < 0)
+		client->irq = stk8312_gpio_probe(client);
+
+	if (client->irq >= 0) {
+		ret = devm_request_threaded_irq(&client->dev, client->irq,
+						stk8312_data_rdy_trig_poll,
+						NULL,
+						IRQF_TRIGGER_RISING |
+						IRQF_ONESHOT,
+						STK8312_IRQ_NAME,
+						indio_dev);
+		if (ret < 0) {
+			dev_err(&client->dev, "request irq %d failed\n",
+				client->irq);
+			goto err_power_off;
+		}
+
+		data->dready_trig = devm_iio_trigger_alloc(&client->dev,
+							   "%s-dev%d",
+							   indio_dev->name,
+							   indio_dev->id);
+		if (!data->dready_trig) {
+			ret = -ENOMEM;
+			goto err_power_off;
+		}
+
+		data->dready_trig->dev.parent = &client->dev;
+		data->dready_trig->ops = &stk8312_trigger_ops;
+		iio_trigger_set_drvdata(data->dready_trig, indio_dev);
+		ret = iio_trigger_register(data->dready_trig);
+		if (ret) {
+			dev_err(&client->dev, "iio trigger register failed\n");
+			goto err_power_off;
+		}
+	}
+
+	ret = iio_triggered_buffer_setup(indio_dev,
+					 iio_pollfunc_store_time,
+					 stk8312_trigger_handler,
+					 &stk8312_buffer_setup_ops);
+	if (ret < 0) {
+		dev_err(&client->dev, "iio triggered buffer setup failed\n");
+		goto err_trigger_unregister;
+	}
+
 	ret = iio_device_register(indio_dev);
 	if (ret < 0) {
 		dev_err(&client->dev, "device_register failed\n");
-		stk8312_set_mode(data, STK8312_MODE_STANDBY);
+		goto err_buffer_cleanup;
 	}
 
+	return 0;
+
+err_buffer_cleanup:
+	iio_triggered_buffer_cleanup(indio_dev);
+err_trigger_unregister:
+	if (data->dready_trig)
+		iio_trigger_unregister(data->dready_trig);
+err_power_off:
+	stk8312_set_mode(data, STK8312_MODE_STANDBY);
 	return ret;
 }
 
 static int stk8312_remove(struct i2c_client *client)
 {
 	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct stk8312_data *data = iio_priv(indio_dev);
 
 	iio_device_unregister(indio_dev);
+	iio_triggered_buffer_cleanup(indio_dev);
 
-	return stk8312_set_mode(iio_priv(indio_dev), STK8312_MODE_STANDBY);
+	if (data->dready_trig)
+		iio_trigger_unregister(data->dready_trig);
+
+	return stk8312_set_mode(data, STK8312_MODE_STANDBY);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -341,7 +653,7 @@
 
 	data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
 
-	return stk8312_set_mode(data, STK8312_MODE_STANDBY);
+	return stk8312_set_mode(data, data->mode & (~STK8312_MODE_ACTIVE));
 }
 
 static int stk8312_resume(struct device *dev)
@@ -350,7 +662,7 @@
 
 	data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
 
-	return stk8312_set_mode(data, STK8312_MODE_ACTIVE);
+	return stk8312_set_mode(data, data->mode | STK8312_MODE_ACTIVE);
 }
 
 static SIMPLE_DEV_PM_OPS(stk8312_pm_ops, stk8312_suspend, stk8312_resume);
@@ -364,6 +676,7 @@
 	{"STK8312", 0},
 	{}
 };
+MODULE_DEVICE_TABLE(i2c, stk8312_i2c_id);
 
 static const struct acpi_device_id stk8312_acpi_id[] = {
 	{"STK8312", 0},
@@ -374,7 +687,7 @@
 
 static struct i2c_driver stk8312_driver = {
 	.driver = {
-		.name = "stk8312",
+		.name = STK8312_DRIVER_NAME,
 		.pm = STK8312_PM_OPS,
 		.acpi_match_table = ACPI_PTR(stk8312_acpi_id),
 	},
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 30950c6..80f77d8 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -11,26 +11,42 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/iio/buffer.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
 
 #define STK8BA50_REG_XOUT			0x02
 #define STK8BA50_REG_YOUT			0x04
 #define STK8BA50_REG_ZOUT			0x06
 #define STK8BA50_REG_RANGE			0x0F
+#define STK8BA50_REG_BWSEL			0x10
 #define STK8BA50_REG_POWMODE			0x11
 #define STK8BA50_REG_SWRST			0x14
+#define STK8BA50_REG_INTEN2			0x17
+#define STK8BA50_REG_INTMAP2			0x1A
 
 #define STK8BA50_MODE_NORMAL			0
 #define STK8BA50_MODE_SUSPEND			1
 #define STK8BA50_MODE_POWERBIT			BIT(7)
 #define STK8BA50_DATA_SHIFT			6
 #define STK8BA50_RESET_CMD			0xB6
+#define STK8BA50_SR_1792HZ_IDX			7
+#define STK8BA50_DREADY_INT_MASK		0x10
+#define STK8BA50_DREADY_INT_MAP			0x81
+#define STK8BA50_ALL_CHANNEL_MASK		7
+#define STK8BA50_ALL_CHANNEL_SIZE		6
 
 #define STK8BA50_DRIVER_NAME			"stk8ba50"
+#define STK8BA50_GPIO				"stk8ba50_gpio"
+#define STK8BA50_IRQ_NAME			"stk8ba50_event"
 
 #define STK8BA50_SCALE_AVAIL			"0.0384 0.0767 0.1534 0.3069"
 
@@ -50,35 +66,76 @@
  *
  * Locally, the range is stored as a table index.
  */
-static const int stk8ba50_scale_table[][2] = {
+static const struct {
+	u8 reg_val;
+	u32 scale_val;
+} stk8ba50_scale_table[] = {
 	{3, 38400}, {5, 76700}, {8, 153400}, {12, 306900}
 };
 
+/* Sample rates are stored as { <register value>, <Hz value> } */
+static const struct {
+	u8 reg_val;
+	u16 samp_freq;
+} stk8ba50_samp_freq_table[] = {
+	{0x08, 14},  {0x09, 25},  {0x0A, 56},  {0x0B, 112},
+	{0x0C, 224}, {0x0D, 448}, {0x0E, 896}, {0x0F, 1792}
+};
+
+/* Used to map scan mask bits to their corresponding channel register. */
+static const int stk8ba50_channel_table[] = {
+	STK8BA50_REG_XOUT,
+	STK8BA50_REG_YOUT,
+	STK8BA50_REG_ZOUT
+};
+
 struct stk8ba50_data {
 	struct i2c_client *client;
 	struct mutex lock;
 	int range;
+	u8 sample_rate_idx;
+	struct iio_trigger *dready_trig;
+	bool dready_trigger_on;
+	/*
+	 * 3 x 16-bit channels (10-bit data, 6-bit padding) +
+	 * 1 x 16 padding +
+	 * 4 x 16 64-bit timestamp
+	 */
+	s16 buffer[8];
 };
 
-#define STK8BA50_ACCEL_CHANNEL(reg, axis) {			\
-	.type = IIO_ACCEL,					\
-	.address = reg,						\
-	.modified = 1,						\
-	.channel2 = IIO_MOD_##axis,				\
-	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),		\
-	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),	\
+#define STK8BA50_ACCEL_CHANNEL(index, reg, axis) {			\
+	.type = IIO_ACCEL,						\
+	.address = reg,							\
+	.modified = 1,							\
+	.channel2 = IIO_MOD_##axis,					\
+	.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),			\
+	.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),		\
+				    BIT(IIO_CHAN_INFO_SAMP_FREQ),	\
+	.scan_index = index,						\
+	.scan_type = {							\
+		.sign = 's',						\
+		.realbits = 10,						\
+		.storagebits = 16,					\
+		.shift = STK8BA50_DATA_SHIFT,				\
+		.endianness = IIO_CPU,					\
+	},								\
 }
 
 static const struct iio_chan_spec stk8ba50_channels[] = {
-	STK8BA50_ACCEL_CHANNEL(STK8BA50_REG_XOUT, X),
-	STK8BA50_ACCEL_CHANNEL(STK8BA50_REG_YOUT, Y),
-	STK8BA50_ACCEL_CHANNEL(STK8BA50_REG_ZOUT, Z),
+	STK8BA50_ACCEL_CHANNEL(0, STK8BA50_REG_XOUT, X),
+	STK8BA50_ACCEL_CHANNEL(1, STK8BA50_REG_YOUT, Y),
+	STK8BA50_ACCEL_CHANNEL(2, STK8BA50_REG_ZOUT, Z),
+	IIO_CHAN_SOFT_TIMESTAMP(3),
 };
 
 static IIO_CONST_ATTR(in_accel_scale_available, STK8BA50_SCALE_AVAIL);
 
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("14 25 56 112 224 448 896 1792");
+
 static struct attribute *stk8ba50_attributes[] = {
 	&iio_const_attr_in_accel_scale_available.dev_attr.attr,
+	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
 	NULL,
 };
 
@@ -97,72 +154,34 @@
 		return ret;
 	}
 
-	return sign_extend32(ret >> STK8BA50_DATA_SHIFT, 9);
+	return ret;
 }
 
-static int stk8ba50_read_raw(struct iio_dev *indio_dev,
-			     struct iio_chan_spec const *chan,
-			     int *val, int *val2, long mask)
+static int stk8ba50_data_rdy_trigger_set_state(struct iio_trigger *trig,
+					       bool state)
 {
+	struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
 	struct stk8ba50_data *data = iio_priv(indio_dev);
-
-	switch (mask) {
-	case IIO_CHAN_INFO_RAW:
-		mutex_lock(&data->lock);
-		*val = stk8ba50_read_accel(data, chan->address);
-		mutex_unlock(&data->lock);
-		return IIO_VAL_INT;
-	case IIO_CHAN_INFO_SCALE:
-		*val = 0;
-		*val2 = stk8ba50_scale_table[data->range][1];
-		return IIO_VAL_INT_PLUS_MICRO;
-	}
-
-	return -EINVAL;
-}
-
-static int stk8ba50_write_raw(struct iio_dev *indio_dev,
-			      struct iio_chan_spec const *chan,
-			      int val, int val2, long mask)
-{
 	int ret;
-	int i;
-	int index = -1;
-	struct stk8ba50_data *data = iio_priv(indio_dev);
 
-	switch (mask) {
-	case IIO_CHAN_INFO_SCALE:
-		if (val != 0)
-			return -EINVAL;
-
-		for (i = 0; i < ARRAY_SIZE(stk8ba50_scale_table); i++)
-			if (val2 == stk8ba50_scale_table[i][1]) {
-				index = i;
-				break;
-			}
-		if (index < 0)
-			return -EINVAL;
-
+	if (state)
 		ret = i2c_smbus_write_byte_data(data->client,
-				STK8BA50_REG_RANGE,
-				stk8ba50_scale_table[index][0]);
-		if (ret < 0)
-			dev_err(&data->client->dev,
-					"failed to set measurement range\n");
-		else
-			data->range = index;
+			STK8BA50_REG_INTEN2, STK8BA50_DREADY_INT_MASK);
+	else
+		ret = i2c_smbus_write_byte_data(data->client,
+			STK8BA50_REG_INTEN2, 0x00);
 
-		return ret;
-	}
+	if (ret < 0)
+		dev_err(&data->client->dev, "failed to set trigger state\n");
+	else
+		data->dready_trigger_on = state;
 
-	return -EINVAL;
+	return ret;
 }
 
-static const struct iio_info stk8ba50_info = {
-	.driver_module		= THIS_MODULE,
-	.read_raw		= stk8ba50_read_raw,
-	.write_raw		= stk8ba50_write_raw,
-	.attrs			= &stk8ba50_attribute_group,
+static const struct iio_trigger_ops stk8ba50_trigger_ops = {
+	.set_trigger_state = stk8ba50_data_rdy_trigger_set_state,
+	.owner = THIS_MODULE,
 };
 
 static int stk8ba50_set_power(struct stk8ba50_data *data, bool mode)
@@ -192,6 +211,207 @@
 	return ret;
 }
 
+static int stk8ba50_read_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan,
+			     int *val, int *val2, long mask)
+{
+	struct stk8ba50_data *data = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		if (iio_buffer_enabled(indio_dev))
+			return -EBUSY;
+		mutex_lock(&data->lock);
+		ret = stk8ba50_set_power(data, STK8BA50_MODE_NORMAL);
+		if (ret < 0) {
+			mutex_unlock(&data->lock);
+			return -EINVAL;
+		}
+		ret = stk8ba50_read_accel(data, chan->address);
+		if (ret < 0) {
+			stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
+			mutex_unlock(&data->lock);
+			return -EINVAL;
+		}
+		*val = sign_extend32(ret >> STK8BA50_DATA_SHIFT, 9);
+		stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
+		mutex_unlock(&data->lock);
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		*val = 0;
+		*val2 = stk8ba50_scale_table[data->range].scale_val;
+		return IIO_VAL_INT_PLUS_MICRO;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		*val = stk8ba50_samp_freq_table
+				[data->sample_rate_idx].samp_freq;
+		*val2 = 0;
+		return IIO_VAL_INT;
+	}
+
+	return -EINVAL;
+}
+
+static int stk8ba50_write_raw(struct iio_dev *indio_dev,
+			      struct iio_chan_spec const *chan,
+			      int val, int val2, long mask)
+{
+	int ret;
+	int i;
+	int index = -1;
+	struct stk8ba50_data *data = iio_priv(indio_dev);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_SCALE:
+		if (val != 0)
+			return -EINVAL;
+
+		for (i = 0; i < ARRAY_SIZE(stk8ba50_scale_table); i++)
+			if (val2 == stk8ba50_scale_table[i].scale_val) {
+				index = i;
+				break;
+			}
+		if (index < 0)
+			return -EINVAL;
+
+		ret = i2c_smbus_write_byte_data(data->client,
+				STK8BA50_REG_RANGE,
+				stk8ba50_scale_table[index].reg_val);
+		if (ret < 0)
+			dev_err(&data->client->dev,
+					"failed to set measurement range\n");
+		else
+			data->range = index;
+
+		return ret;
+	case IIO_CHAN_INFO_SAMP_FREQ:
+		for (i = 0; i < ARRAY_SIZE(stk8ba50_samp_freq_table); i++)
+			if (val == stk8ba50_samp_freq_table[i].samp_freq) {
+				index = i;
+				break;
+			}
+		if (index < 0)
+			return -EINVAL;
+
+		ret = i2c_smbus_write_byte_data(data->client,
+				STK8BA50_REG_BWSEL,
+				stk8ba50_samp_freq_table[index].reg_val);
+		if (ret < 0)
+			dev_err(&data->client->dev,
+					"failed to set sampling rate\n");
+		else
+			data->sample_rate_idx = index;
+
+		return ret;
+	}
+
+	return -EINVAL;
+}
+
+static const struct iio_info stk8ba50_info = {
+	.driver_module		= THIS_MODULE,
+	.read_raw		= stk8ba50_read_raw,
+	.write_raw		= stk8ba50_write_raw,
+	.attrs			= &stk8ba50_attribute_group,
+};
+
+static irqreturn_t stk8ba50_trigger_handler(int irq, void *p)
+{
+	struct iio_poll_func *pf = p;
+	struct iio_dev *indio_dev = pf->indio_dev;
+	struct stk8ba50_data *data = iio_priv(indio_dev);
+	int bit, ret, i = 0;
+
+	mutex_lock(&data->lock);
+	/*
+	 * Do a bulk read if all channels are requested,
+	 * from 0x02 (XOUT1) to 0x07 (ZOUT2)
+	 */
+	if (*(indio_dev->active_scan_mask) == STK8BA50_ALL_CHANNEL_MASK) {
+		ret = i2c_smbus_read_i2c_block_data(data->client,
+						    STK8BA50_REG_XOUT,
+						    STK8BA50_ALL_CHANNEL_SIZE,
+						    (u8 *)data->buffer);
+		if (ret < STK8BA50_ALL_CHANNEL_SIZE) {
+			dev_err(&data->client->dev, "register read failed\n");
+			goto err;
+		}
+	} else {
+		for_each_set_bit(bit, indio_dev->active_scan_mask,
+				 indio_dev->masklength) {
+			ret = stk8ba50_read_accel(data,
+						  stk8ba50_channel_table[bit]);
+			if (ret < 0)
+				goto err;
+
+			data->buffer[i++] = ret;
+		}
+	}
+	iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+					   pf->timestamp);
+err:
+	mutex_unlock(&data->lock);
+	iio_trigger_notify_done(indio_dev->trig);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t stk8ba50_data_rdy_trig_poll(int irq, void *private)
+{
+	struct iio_dev *indio_dev = private;
+	struct stk8ba50_data *data = iio_priv(indio_dev);
+
+	if (data->dready_trigger_on)
+		iio_trigger_poll(data->dready_trig);
+
+	return IRQ_HANDLED;
+}
+
+static int stk8ba50_buffer_preenable(struct iio_dev *indio_dev)
+{
+	struct stk8ba50_data *data = iio_priv(indio_dev);
+
+	return stk8ba50_set_power(data, STK8BA50_MODE_NORMAL);
+}
+
+static int stk8ba50_buffer_postdisable(struct iio_dev *indio_dev)
+{
+	struct stk8ba50_data *data = iio_priv(indio_dev);
+
+	return stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
+}
+
+static const struct iio_buffer_setup_ops stk8ba50_buffer_setup_ops = {
+	.preenable   = stk8ba50_buffer_preenable,
+	.postenable  = iio_triggered_buffer_postenable,
+	.predisable  = iio_triggered_buffer_predisable,
+	.postdisable = stk8ba50_buffer_postdisable,
+};
+
+static int stk8ba50_gpio_probe(struct i2c_client *client)
+{
+	struct device *dev;
+	struct gpio_desc *gpio;
+	int ret;
+
+	if (!client)
+		return -EINVAL;
+
+	dev = &client->dev;
+
+	/* data ready gpio interrupt pin */
+	gpio = devm_gpiod_get_index(dev, STK8BA50_GPIO, 0, GPIOD_IN);
+	if (IS_ERR(gpio)) {
+		dev_err(dev, "acpi gpio get index failed\n");
+		return PTR_ERR(gpio);
+	}
+
+	ret = gpiod_to_irq(gpio);
+	dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
+
+	return ret;
+}
+
 static int stk8ba50_probe(struct i2c_client *client,
 			  const struct i2c_device_id *id)
 {
@@ -222,28 +442,104 @@
 			STK8BA50_REG_SWRST, STK8BA50_RESET_CMD);
 	if (ret < 0) {
 		dev_err(&client->dev, "failed to reset sensor\n");
-		return ret;
+		goto err_power_off;
 	}
 
 	/* The default range is +/-2g */
 	data->range = 0;
 
+	/* The default sampling rate is 1792 Hz (maximum) */
+	data->sample_rate_idx = STK8BA50_SR_1792HZ_IDX;
+
+	/* Set up interrupts */
+	ret = i2c_smbus_write_byte_data(client,
+			STK8BA50_REG_INTEN2, STK8BA50_DREADY_INT_MASK);
+	if (ret < 0) {
+		dev_err(&client->dev, "failed to set up interrupts\n");
+		goto err_power_off;
+	}
+	ret = i2c_smbus_write_byte_data(client,
+			STK8BA50_REG_INTMAP2, STK8BA50_DREADY_INT_MAP);
+	if (ret < 0) {
+		dev_err(&client->dev, "failed to set up interrupts\n");
+		goto err_power_off;
+	}
+
+	if (client->irq < 0)
+		client->irq = stk8ba50_gpio_probe(client);
+
+	if (client->irq >= 0) {
+		ret = devm_request_threaded_irq(&client->dev, client->irq,
+						stk8ba50_data_rdy_trig_poll,
+						NULL,
+						IRQF_TRIGGER_RISING |
+						IRQF_ONESHOT,
+						STK8BA50_IRQ_NAME,
+						indio_dev);
+		if (ret < 0) {
+			dev_err(&client->dev, "request irq %d failed\n",
+				client->irq);
+			goto err_power_off;
+		}
+
+		data->dready_trig = devm_iio_trigger_alloc(&client->dev,
+							   "%s-dev%d",
+							   indio_dev->name,
+							   indio_dev->id);
+		if (!data->dready_trig) {
+			ret = -ENOMEM;
+			goto err_power_off;
+		}
+
+		data->dready_trig->dev.parent = &client->dev;
+		data->dready_trig->ops = &stk8ba50_trigger_ops;
+		iio_trigger_set_drvdata(data->dready_trig, indio_dev);
+		ret = iio_trigger_register(data->dready_trig);
+		if (ret) {
+			dev_err(&client->dev, "iio trigger register failed\n");
+			goto err_power_off;
+		}
+	}
+
+	ret = iio_triggered_buffer_setup(indio_dev,
+					 iio_pollfunc_store_time,
+					 stk8ba50_trigger_handler,
+					 &stk8ba50_buffer_setup_ops);
+	if (ret < 0) {
+		dev_err(&client->dev, "iio triggered buffer setup failed\n");
+		goto err_trigger_unregister;
+	}
+
 	ret = iio_device_register(indio_dev);
 	if (ret < 0) {
 		dev_err(&client->dev, "device_register failed\n");
-		stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
+		goto err_buffer_cleanup;
 	}
 
 	return ret;
+
+err_buffer_cleanup:
+	iio_triggered_buffer_cleanup(indio_dev);
+err_trigger_unregister:
+	if (data->dready_trig)
+		iio_trigger_unregister(data->dready_trig);
+err_power_off:
+	stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
+	return ret;
 }
 
 static int stk8ba50_remove(struct i2c_client *client)
 {
 	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+	struct stk8ba50_data *data = iio_priv(indio_dev);
 
 	iio_device_unregister(indio_dev);
+	iio_triggered_buffer_cleanup(indio_dev);
 
-	return stk8ba50_set_power(iio_priv(indio_dev), STK8BA50_MODE_SUSPEND);
+	if (data->dready_trig)
+		iio_trigger_unregister(data->dready_trig);
+
+	return stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -276,6 +572,7 @@
 	{"stk8ba50", 0},
 	{}
 };
+MODULE_DEVICE_TABLE(i2c, stk8ba50_i2c_id);
 
 static const struct acpi_device_id stk8ba50_acpi_id[] = {
 	{"STK8BA50", 0},
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index eb0cd89..50c103d 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -20,6 +20,9 @@
 	  Say yes here to build support for Analog Devices AD7265 and AD7266
 	  ADCs.
 
+	  To compile this driver as a module, choose M here: the module will be
+	  called ad7266.
+
 config AD7291
 	tristate "Analog Devices AD7291 ADC driver"
 	depends on I2C
@@ -52,8 +55,6 @@
 	  AD7277, AD7278, AD7475, AD7476, AD7477, AD7478, AD7466, AD7467, AD7468,
 	  AD7495, AD7910, AD7920, AD7920 SPI analog to digital converters (ADC).
 
-	  If unsure, say N (but it's safe to say "Y").
-
 	  To compile this driver as a module, choose M here: the
 	  module will be called ad7476.
 
@@ -63,8 +64,7 @@
 	select AD_SIGMA_DELTA
 	help
 	  Say yes here to build support for Analog Devices AD7787, AD7788, AD7789,
-	  AD7790 and AD7791 SPI analog to digital converters (ADC). If unsure, say
-	  N (but it is safe to say "Y").
+	  AD7790 and AD7791 SPI analog to digital converters (ADC).
 
 	  To compile this driver as a module, choose M here: the module will be
 	  called ad7791.
@@ -76,7 +76,6 @@
 	help
 	  Say yes here to build support for Analog Devices AD7785, AD7792, AD7793,
 	  AD7794 and AD7795 SPI analog to digital converters (ADC).
-	  If unsure, say N (but it's safe to say "Y").
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called AD7793.
@@ -89,7 +88,6 @@
 	help
 	  Say yes here to build support for Analog Devices
 	  AD7887 SPI analog to digital converter (ADC).
-	  If unsure, say N (but it's safe to say "Y").
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called ad7887.
@@ -117,6 +115,9 @@
 	  i2c analog to digital converters (ADC). Provides direct access
 	  via sysfs.
 
+	  To compile this driver as a module, choose M here: the module will be
+	  called ad799x.
+
 config AT91_ADC
 	tristate "Atmel AT91 ADC"
 	depends on ARCH_AT91
@@ -127,6 +128,9 @@
 	help
 	  Say yes here to build support for Atmel AT91 ADC.
 
+	  To compile this driver as a module, choose M here: the module will be
+	  called at91_adc.
+
 config AXP288_ADC
 	tristate "X-Powers AXP288 ADC driver"
 	depends on MFD_AXP20X
@@ -135,6 +139,9 @@
 	  device. Depending on platform configuration, this general purpose ADC can
 	  be used for sampling sensors such as thermal resistors.
 
+	  To compile this driver as a module, choose M here: the module will be
+	  called axp288_adc.
+
 config BERLIN2_ADC
 	tristate "Marvell Berlin2 ADC driver"
 	depends on ARCH_BERLIN
@@ -151,6 +158,9 @@
 	  This driver can also be built as a module. If chosen, the module name
 	  will be da9150-gpadc.
 
+	  To compile this driver as a module, choose M here: the module will be
+	  called berlin2-adc.
+
 config CC10001_ADC
 	tristate "Cosmic Circuits 10001 ADC driver"
 	depends on HAS_IOMEM && HAVE_CLK && REGULATOR
@@ -170,12 +180,18 @@
 	  of SoCs for drivers such as the touchscreen and hwmon to use to share
 	  this resource.
 
+	  To compile this driver as a module, choose M here: the module will be
+	  called exynos_adc.
+
 config LP8788_ADC
 	tristate "LP8788 ADC driver"
 	depends on MFD_LP8788
 	help
 	  Say yes here to build support for TI LP8788 ADC.
 
+	  To compile this driver as a module, choose M here: the module will be
+	  called lp8788_adc.
+
 config MAX1027
 	tristate "Maxim max1027 ADC driver"
 	depends on SPI
@@ -185,6 +201,9 @@
 	  Say yes here to build support for Maxim SPI ADC models
 	  max1027, max1029 and max1031.
 
+	  To compile this driver as a module, choose M here: the module will be
+	  called max1027.
+
 config MAX1363
 	tristate "Maxim max1363 ADC driver"
 	depends on I2C
@@ -201,13 +220,16 @@
 	  max11646, max11647) Provides direct access via sysfs and buffered
 	  data via the iio dev interface.
 
+	  To compile this driver as a module, choose M here: the module will be
+	  called max1363.
+
 config MCP320X
 	tristate "Microchip Technology MCP3x01/02/04/08"
 	depends on SPI
 	help
 	  Say yes here to build support for Microchip Technology's
-	  MCP3001, MCP3002, MCP3004, MCP3008, MCP3201, MCP3202, MCP3204 or
-	  MCP3208 analog to digital converter.
+	  MCP3001, MCP3002, MCP3004, MCP3008, MCP3201, MCP3202, MCP3204,
+	  MCP3208 or MCP3301 analog to digital converter.
 
 	  This driver can also be built as a module. If so, the module will be
 	  called mcp320x.
@@ -309,15 +331,18 @@
 	  Say yes here to build support for Texas Instruments ADC
 	  driver which is also a MFD client.
 
+	  To compile this driver as a module, choose M here: the module will be
+	  called ti_am335x_adc.
+
 config TWL4030_MADC
 	tristate "TWL4030 MADC (Monitoring A/D Converter)"
 	depends on TWL4030_CORE
 	help
-	This driver provides support for Triton TWL4030-MADC. The
-	driver supports both RT and SW conversion methods.
+	  This driver provides support for Triton TWL4030-MADC. The
+	  driver supports both RT and SW conversion methods.
 
-	This driver can also be built as a module. If so, the module will be
-	called twl4030-madc.
+	  This driver can also be built as a module. If so, the module will be
+	  called twl4030-madc.
 
 config TWL6030_GPADC
 	tristate "TWL6030 GPADC (General Purpose A/D Converter) Support"
@@ -350,6 +375,9 @@
 	  Say yes here to access the ADC part of the Nano River
 	  Technologies Viperboard.
 
+	  To compile this driver as a module, choose M here: the module will be
+	  called viperboard_adc.
+
 config XILINX_XADC
 	tristate "Xilinx XADC driver"
 	depends on ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST
diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
index aecc9ad..4946d9b 100644
--- a/drivers/iio/adc/berlin2-adc.c
+++ b/drivers/iio/adc/berlin2-adc.c
@@ -26,7 +26,7 @@
 #define BERLIN2_SM_CTRL				0x14
 #define  BERLIN2_SM_CTRL_SM_SOC_INT		BIT(1)
 #define  BERLIN2_SM_CTRL_SOC_SM_INT		BIT(2)
-#define  BERLIN2_SM_CTRL_ADC_SEL(x)		(BIT(x) << 5)	/* 0-15 */
+#define  BERLIN2_SM_CTRL_ADC_SEL(x)		((x) << 5)	/* 0-15 */
 #define  BERLIN2_SM_CTRL_ADC_SEL_MASK		(0xf << 5)
 #define  BERLIN2_SM_CTRL_ADC_POWER		BIT(9)
 #define  BERLIN2_SM_CTRL_ADC_CLKSEL_DIV2	(0x0 << 10)
@@ -53,14 +53,14 @@
 #define  BERLIN2_SM_ADC_MASK			0x3ff
 #define BERLIN2_SM_ADC_STATUS			0x1c
 #define  BERLIN2_SM_ADC_STATUS_DATA_RDY(x)	BIT(x)		/* 0-15 */
-#define  BERLIN2_SM_ADC_STATUS_DATA_RDY_MASK	0xf
+#define  BERLIN2_SM_ADC_STATUS_DATA_RDY_MASK	GENMASK(15, 0)
 #define  BERLIN2_SM_ADC_STATUS_INT_EN(x)	(BIT(x) << 16)	/* 0-15 */
-#define  BERLIN2_SM_ADC_STATUS_INT_EN_MASK	(0xf << 16)
+#define  BERLIN2_SM_ADC_STATUS_INT_EN_MASK	GENMASK(31, 16)
 #define BERLIN2_SM_TSEN_STATUS			0x24
 #define  BERLIN2_SM_TSEN_STATUS_DATA_RDY	BIT(0)
 #define  BERLIN2_SM_TSEN_STATUS_INT_EN		BIT(1)
 #define BERLIN2_SM_TSEN_DATA			0x28
-#define  BERLIN2_SM_TSEN_MASK			0xfff
+#define  BERLIN2_SM_TSEN_MASK			GENMASK(9, 0)
 #define BERLIN2_SM_TSEN_CTRL			0x74
 #define  BERLIN2_SM_TSEN_CTRL_START		BIT(8)
 #define  BERLIN2_SM_TSEN_CTRL_SETTLING_4	(0x0 << 21)	/* 4 us */
@@ -86,7 +86,7 @@
 			.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),	\
 		}
 
-static struct iio_chan_spec berlin2_adc_channels[] = {
+static const struct iio_chan_spec berlin2_adc_channels[] = {
 	BERLIN2_ADC_CHANNEL(0, IIO_VOLTAGE),	/* external input */
 	BERLIN2_ADC_CHANNEL(1, IIO_VOLTAGE),	/* external input */
 	BERLIN2_ADC_CHANNEL(2, IIO_VOLTAGE),	/* external input */
@@ -103,7 +103,6 @@
 	BERLIN2_ADC_CHANNEL(7, IIO_VOLTAGE),	/* reserved */
 	IIO_CHAN_SOFT_TIMESTAMP(8),		/* timestamp */
 };
-#define BERLIN2_N_CHANNELS	ARRAY_SIZE(berlin2_adc_channels)
 
 static int berlin2_adc_read(struct iio_dev *indio_dev, int channel)
 {
@@ -221,7 +220,7 @@
 			return temp;
 
 		if (temp > 2047)
-			temp = -(4096 - temp);
+			temp -= 4096;
 
 		/* Convert to milli Celsius */
 		*val = ((temp * 100000) / 264 - 270000);
@@ -286,8 +285,7 @@
 	int irq, tsen_irq;
 	int ret;
 
-	indio_dev = devm_iio_device_alloc(&pdev->dev,
-					  sizeof(struct berlin2_adc_priv));
+	indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
 	if (!indio_dev)
 		return -ENOMEM;
 
@@ -301,11 +299,11 @@
 
 	irq = platform_get_irq_byname(pdev, "adc");
 	if (irq < 0)
-		return -ENODEV;
+		return irq;
 
 	tsen_irq = platform_get_irq_byname(pdev, "tsen");
 	if (tsen_irq < 0)
-		return -ENODEV;
+		return tsen_irq;
 
 	ret = devm_request_irq(&pdev->dev, irq, berlin2_adc_irq, 0,
 			pdev->dev.driver->name, indio_dev);
@@ -325,8 +323,8 @@
 	indio_dev->modes = INDIO_DIRECT_MODE;
 	indio_dev->info = &berlin2_adc_info;
 
-	indio_dev->num_channels = BERLIN2_N_CHANNELS;
 	indio_dev->channels = berlin2_adc_channels;
+	indio_dev->num_channels = ARRAY_SIZE(berlin2_adc_channels);
 
 	/* Power up the ADC */
 	regmap_update_bits(priv->regmap, BERLIN2_SM_CTRL,
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
index 115f6e9..8254f52 100644
--- a/drivers/iio/adc/cc10001_adc.c
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -62,6 +62,7 @@
 	struct regulator *reg;
 	u16 *buf;
 
+	bool shared;
 	struct mutex lock;
 	unsigned int start_delay_ns;
 	unsigned int eoc_delay_ns;
@@ -153,7 +154,8 @@
 
 	mutex_lock(&adc_dev->lock);
 
-	cc10001_adc_power_up(adc_dev);
+	if (!adc_dev->shared)
+		cc10001_adc_power_up(adc_dev);
 
 	/* Calculate delay step for eoc and sampled data */
 	delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
@@ -177,7 +179,8 @@
 	}
 
 done:
-	cc10001_adc_power_down(adc_dev);
+	if (!adc_dev->shared)
+		cc10001_adc_power_down(adc_dev);
 
 	mutex_unlock(&adc_dev->lock);
 
@@ -196,7 +199,8 @@
 	unsigned int delay_ns;
 	u16 val;
 
-	cc10001_adc_power_up(adc_dev);
+	if (!adc_dev->shared)
+		cc10001_adc_power_up(adc_dev);
 
 	/* Calculate delay step for eoc and sampled data */
 	delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
@@ -205,7 +209,8 @@
 
 	val = cc10001_adc_poll_done(indio_dev, chan->channel, delay_ns);
 
-	cc10001_adc_power_down(adc_dev);
+	if (!adc_dev->shared)
+		cc10001_adc_power_down(adc_dev);
 
 	return val;
 }
@@ -322,8 +327,10 @@
 	adc_dev = iio_priv(indio_dev);
 
 	channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0);
-	if (!of_property_read_u32(node, "adc-reserved-channels", &ret))
+	if (!of_property_read_u32(node, "adc-reserved-channels", &ret)) {
+		adc_dev->shared = true;
 		channel_map &= ~ret;
+	}
 
 	adc_dev->reg = devm_regulator_get(&pdev->dev, "vref");
 	if (IS_ERR(adc_dev->reg))
@@ -368,6 +375,14 @@
 	adc_dev->eoc_delay_ns = NSEC_PER_SEC / adc_clk_rate;
 	adc_dev->start_delay_ns = adc_dev->eoc_delay_ns * CC10001_WAIT_CYCLES;
 
+	/*
+	 * There is only one register to power-up/power-down the AUX ADC.
+	 * If the ADC is shared among multiple CPUs, always power it up here.
+	 * If the ADC is used only by the MIPS, power-up/power-down at runtime.
+	 */
+	if (adc_dev->shared)
+		cc10001_adc_power_up(adc_dev);
+
 	/* Setup the ADC channels available on the device */
 	ret = cc10001_adc_channel_init(indio_dev, channel_map);
 	if (ret < 0)
@@ -402,6 +417,7 @@
 	struct iio_dev *indio_dev = platform_get_drvdata(pdev);
 	struct cc10001_adc_device *adc_dev = iio_priv(indio_dev);
 
+	cc10001_adc_power_down(adc_dev);
 	iio_device_unregister(indio_dev);
 	iio_triggered_buffer_cleanup(indio_dev);
 	clk_disable_unprepare(adc_dev->adc_clk);
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index d819823..b19e4f9 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -25,6 +25,7 @@
  * http://ww1.microchip.com/downloads/en/DeviceDoc/21290D.pdf  mcp3201
  * http://ww1.microchip.com/downloads/en/DeviceDoc/21034D.pdf  mcp3202
  * http://ww1.microchip.com/downloads/en/DeviceDoc/21298c.pdf  mcp3204/08
+ * http://ww1.microchip.com/downloads/en/DeviceDoc/21700E.pdf  mcp3301
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -47,6 +48,7 @@
 	mcp3202,
 	mcp3204,
 	mcp3208,
+	mcp3301,
 };
 
 struct mcp320x_chip_info {
@@ -76,6 +78,7 @@
 	switch (device_index) {
 	case mcp3001:
 	case mcp3201:
+	case mcp3301:
 		return 0;
 	case mcp3002:
 	case mcp3202:
@@ -102,7 +105,7 @@
 	adc->tx_buf = mcp320x_channel_to_tx_data(device_index,
 						channel, differential);
 
-	if (device_index != mcp3001 && device_index != mcp3201) {
+	if (device_index != mcp3001 && device_index != mcp3201 && device_index != mcp3301) {
 		ret = spi_sync(adc->spi, &adc->msg);
 		if (ret < 0)
 			return ret;
@@ -125,6 +128,8 @@
 	case mcp3204:
 	case mcp3208:
 		return (adc->rx_buf[0] << 4 | adc->rx_buf[1] >> 4);
+	case mcp3301:
+		return sign_extend32((adc->rx_buf[0] & 0x1f) << 8 | adc->rx_buf[1], 12);
 	default:
 		return -EINVAL;
 	}
@@ -274,6 +279,11 @@
 		.num_channels = ARRAY_SIZE(mcp3208_channels),
 		.resolution = 12
 	},
+	[mcp3301] = {
+		.channels = mcp3201_channels,
+		.num_channels = ARRAY_SIZE(mcp3201_channels),
+		.resolution = 13
+	},
 };
 
 static int mcp320x_probe(struct spi_device *spi)
@@ -369,6 +379,9 @@
 		.compatible = "mcp3208",
 		.data = &mcp320x_chip_infos[mcp3208],
 	}, {
+		.compatible = "mcp3301",
+		.data = &mcp320x_chip_infos[mcp3301],
+	}, {
 	}
 };
 MODULE_DEVICE_TABLE(of, mcp320x_dt_ids);
@@ -383,6 +396,7 @@
 	{ "mcp3202", mcp3202 },
 	{ "mcp3204", mcp3204 },
 	{ "mcp3208", mcp3208 },
+	{ "mcp3301", mcp3301 },
 	{ }
 };
 MODULE_DEVICE_TABLE(spi, mcp320x_id);
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index b96c636..3555122 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -404,7 +404,6 @@
 static struct i2c_driver mcp3422_driver = {
 	.driver = {
 		.name = "mcp3422",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(mcp3422_of_match),
 	},
 	.probe = mcp3422_probe,
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index b3a82b4..2c8374f 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -140,7 +140,6 @@
 static struct i2c_driver adc081c_driver = {
 	.driver = {
 		.name = "adc081c",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(adc081c_of_match),
 	},
 	.probe = adc081c_probe,
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 819632b..6bf4c20 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -68,6 +68,9 @@
 #define VF610_ADC_CLK_DIV8		0x60
 #define VF610_ADC_CLK_MASK		0x60
 #define VF610_ADC_ADLSMP_LONG		0x10
+#define VF610_ADC_ADSTS_SHORT   0x100
+#define VF610_ADC_ADSTS_NORMAL  0x200
+#define VF610_ADC_ADSTS_LONG    0x300
 #define VF610_ADC_ADSTS_MASK		0x300
 #define VF610_ADC_ADLPC_EN		0x80
 #define VF610_ADC_ADHSC_EN		0x400
@@ -98,6 +101,8 @@
 #define VF610_ADC_CALF			0x2
 #define VF610_ADC_TIMEOUT		msecs_to_jiffies(100)
 
+#define DEFAULT_SAMPLE_TIME		1000
+
 enum clk_sel {
 	VF610_ADCIOC_BUSCLK_SET,
 	VF610_ADCIOC_ALTCLK_SET,
@@ -124,6 +129,17 @@
 	VF610_ADC_CONV_LOW_POWER,
 };
 
+enum lst_adder_sel {
+	VF610_ADCK_CYCLES_3,
+	VF610_ADCK_CYCLES_5,
+	VF610_ADCK_CYCLES_7,
+	VF610_ADCK_CYCLES_9,
+	VF610_ADCK_CYCLES_13,
+	VF610_ADCK_CYCLES_17,
+	VF610_ADCK_CYCLES_21,
+	VF610_ADCK_CYCLES_25,
+};
+
 struct vf610_adc_feature {
 	enum clk_sel	clk_sel;
 	enum vol_ref	vol_ref;
@@ -132,6 +148,8 @@
 	int	clk_div;
 	int     sample_rate;
 	int	res_mode;
+	u32 lst_adder_index;
+	u32 default_sample_time;
 
 	bool	calibration;
 	bool	ovwren;
@@ -155,11 +173,13 @@
 };
 
 static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
+static const u32 vf610_lst_adder[] = { 3, 5, 7, 9, 13, 17, 21, 25 };
 
 static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
 {
 	struct vf610_adc_feature *adc_feature = &info->adc_feature;
 	unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk);
+	u32 adck_period, lst_addr_min;
 	int divisor, i;
 
 	adck_rate = info->max_adck_rate[adc_feature->conv_mode];
@@ -174,6 +194,19 @@
 	}
 
 	/*
+	 * Determine the long sample time adder value to be used based
+	 * on the default minimum sample time provided.
+	 */
+	adck_period = NSEC_PER_SEC / adck_rate;
+	lst_addr_min = adc_feature->default_sample_time / adck_period;
+	for (i = 0; i < ARRAY_SIZE(vf610_lst_adder); i++) {
+		if (vf610_lst_adder[i] > lst_addr_min) {
+			adc_feature->lst_adder_index = i;
+			break;
+		}
+	}
+
+	/*
 	 * Calculate ADC sample frequencies
 	 * Sample time unit is ADCK cycles. ADCK clk source is ipg clock,
 	 * which is the same as bus clock.
@@ -182,12 +215,13 @@
 	 * SFCAdder: fixed to 6 ADCK cycles
 	 * AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
 	 * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
-	 * LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
+	 * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles
 	 */
 	adck_rate = ipg_rate / info->adc_feature.clk_div;
 	for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
 		info->sample_freq_avail[i] =
-			adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3));
+			adck_rate / (6 + vf610_hw_avgs[i] *
+			 (25 + vf610_lst_adder[adc_feature->lst_adder_index]));
 }
 
 static inline void vf610_adc_cfg_init(struct vf610_adc *info)
@@ -347,8 +381,40 @@
 		break;
 	}
 
-	/* Use the short sample mode */
-	cfg_data &= ~(VF610_ADC_ADLSMP_LONG | VF610_ADC_ADSTS_MASK);
+	/*
+	 * Set ADLSMP and ADSTS based on the Long Sample Time Adder value
+	 * determined.
+	 */
+	switch (adc_feature->lst_adder_index) {
+	case VF610_ADCK_CYCLES_3:
+		break;
+	case VF610_ADCK_CYCLES_5:
+		cfg_data |= VF610_ADC_ADSTS_SHORT;
+		break;
+	case VF610_ADCK_CYCLES_7:
+		cfg_data |= VF610_ADC_ADSTS_NORMAL;
+		break;
+	case VF610_ADCK_CYCLES_9:
+		cfg_data |= VF610_ADC_ADSTS_LONG;
+		break;
+	case VF610_ADCK_CYCLES_13:
+		cfg_data |= VF610_ADC_ADLSMP_LONG;
+		break;
+	case VF610_ADCK_CYCLES_17:
+		cfg_data |= VF610_ADC_ADLSMP_LONG;
+		cfg_data |= VF610_ADC_ADSTS_SHORT;
+		break;
+	case VF610_ADCK_CYCLES_21:
+		cfg_data |= VF610_ADC_ADLSMP_LONG;
+		cfg_data |= VF610_ADC_ADSTS_NORMAL;
+		break;
+	case VF610_ADCK_CYCLES_25:
+		cfg_data |= VF610_ADC_ADLSMP_LONG;
+		cfg_data |= VF610_ADC_ADSTS_NORMAL;
+		break;
+	default:
+		dev_err(info->dev, "error in sample time select\n");
+	}
 
 	/* update hardware average selection */
 	cfg_data &= ~VF610_ADC_AVGS_MASK;
@@ -713,6 +779,11 @@
 	of_property_read_u32_array(pdev->dev.of_node, "fsl,adck-max-frequency",
 			info->max_adck_rate, 3);
 
+	ret = of_property_read_u32(pdev->dev.of_node, "min-sample-time",
+			&info->adc_feature.default_sample_time);
+	if (ret)
+		info->adc_feature.default_sample_time = DEFAULT_SAMPLE_TIME;
+
 	platform_set_drvdata(pdev, indio_dev);
 
 	init_completion(&info->completion);
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index 9a40097..d338bb5 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -700,7 +700,6 @@
 	.remove = ssp_remove,
 	.driver = {
 		.pm = &ssp_pm_ops,
-		.bus = &spi_bus_type,
 		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(ssp_of_match),
 		.name = "sensorhub"
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 8086cbc..2e7fdb5 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -126,6 +126,9 @@
 	int err, i = 0;
 	struct st_sensor_data *sdata = iio_priv(indio_dev);
 
+	if (sdata->sensor_settings->fs.addr == 0)
+		return 0;
+
 	err = st_sensors_match_fs(sdata->sensor_settings, fs, &i);
 	if (err < 0)
 		goto st_accel_set_fullscale_error;
@@ -479,46 +482,43 @@
 			int num_sensors_list,
 			const struct st_sensor_settings *sensor_settings)
 {
-	u8 wai;
 	int i, n, err;
+	u8 wai;
 	struct st_sensor_data *sdata = iio_priv(indio_dev);
 
+	for (i = 0; i < num_sensors_list; i++) {
+		for (n = 0; n < ST_SENSORS_MAX_4WAI; n++) {
+			if (strcmp(indio_dev->name,
+				sensor_settings[i].sensors_supported[n]) == 0) {
+				break;
+			}
+		}
+		if (n < ST_SENSORS_MAX_4WAI)
+			break;
+	}
+	if (i == num_sensors_list) {
+		dev_err(&indio_dev->dev, "device name %s not recognized.\n",
+							indio_dev->name);
+		return -ENODEV;
+	}
+
 	err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
-					ST_SENSORS_DEFAULT_WAI_ADDRESS, &wai);
+					sensor_settings[i].wai_addr, &wai);
 	if (err < 0) {
 		dev_err(&indio_dev->dev, "failed to read Who-Am-I register.\n");
-		goto read_wai_error;
+		return err;
 	}
 
-	for (i = 0; i < num_sensors_list; i++) {
-		if (sensor_settings[i].wai == wai)
-			break;
-	}
-	if (i == num_sensors_list)
-		goto device_not_supported;
-
-	for (n = 0; n < ARRAY_SIZE(sensor_settings[i].sensors_supported); n++) {
-		if (strcmp(indio_dev->name,
-				&sensor_settings[i].sensors_supported[n][0]) == 0)
-			break;
-	}
-	if (n == ARRAY_SIZE(sensor_settings[i].sensors_supported)) {
-		dev_err(&indio_dev->dev, "device name \"%s\" and WhoAmI (0x%02x) mismatch",
-			indio_dev->name, wai);
-		goto sensor_name_mismatch;
+	if (sensor_settings[i].wai != wai) {
+		dev_err(&indio_dev->dev, "%s: WhoAmI mismatch (0x%x).\n",
+						indio_dev->name, wai);
+		return -EINVAL;
 	}
 
 	sdata->sensor_settings =
 			(struct st_sensor_settings *)&sensor_settings[i];
 
 	return i;
-
-device_not_supported:
-	dev_err(&indio_dev->dev, "device not supported: WhoAmI (0x%x).\n", wai);
-sensor_name_mismatch:
-	err = -ENODEV;
-read_wai_error:
-	return err;
 }
 EXPORT_SYMBOL(st_sensors_check_device_support);
 
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index f03b92f..c067e68 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -630,7 +630,6 @@
 static struct i2c_driver ad5064_i2c_driver = {
 	.driver = {
 		   .name = "ad5064",
-		   .owner = THIS_MODULE,
 	},
 	.probe = ad5064_i2c_probe,
 	.remove = ad5064_i2c_remove,
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 9de4c4d..130de9b 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -593,7 +593,6 @@
 static struct i2c_driver ad5380_i2c_driver = {
 	.driver = {
 		   .name = "ad5380",
-		   .owner = THIS_MODULE,
 	},
 	.probe = ad5380_i2c_probe,
 	.remove = ad5380_i2c_remove,
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 46bb62a..07e17d7 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -569,7 +569,6 @@
 static struct i2c_driver ad5446_i2c_driver = {
 	.driver = {
 		   .name = "ad5446",
-		   .owner = THIS_MODULE,
 	},
 	.probe = ad5446_i2c_probe,
 	.remove = ad5446_i2c_remove,
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index 6e91449..28b8748 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -392,7 +392,6 @@
 	.driver = {
 		.name	= "max5821",
 		.pm     = MAX5821_PM_OPS,
-		.owner	= THIS_MODULE,
 	},
 	.probe		= max5821_probe,
 	.remove		= max5821_remove,
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 10a0dfc..9890c81 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -72,7 +72,6 @@
 	for (i = ADF4350_REG5; i >= ADF4350_REG0; i--) {
 		if ((st->regs_hw[i] != st->regs[i]) ||
 			((i == ADF4350_REG0) && doublebuf)) {
-
 			switch (i) {
 			case ADF4350_REG1:
 			case ADF4350_REG4:
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index b3d0e94..8d24393 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -53,7 +53,8 @@
 config BMG160
 	tristate "BOSCH BMG160 Gyro Sensor"
 	depends on I2C
-	select IIO_TRIGGERED_BUFFER if IIO_BUFFER
+	select IIO_BUFFER
+	select IIO_TRIGGERED_BUFFER
 	help
 	  Say yes here to build support for Bosch BMG160 Tri-axis Gyro Sensor
 	  driver. This driver also supports BMI055 gyroscope.
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index 591bd55..26de876 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -473,6 +473,7 @@
 	ID_ADIS16133,
 	ID_ADIS16135,
 	ID_ADIS16136,
+	ID_ADIS16137,
 };
 
 static const struct adis16136_chip_info adis16136_chip_info[] = {
@@ -488,6 +489,10 @@
 		.precision = IIO_DEGREE_TO_RAD(450),
 		.fullscale = 24623,
 	},
+	[ID_ADIS16137] = {
+		.precision = IIO_DEGREE_TO_RAD(1000),
+		.fullscale = 24609,
+	},
 };
 
 static int adis16136_probe(struct spi_device *spi)
@@ -557,6 +562,7 @@
 	{ "adis16133", ID_ADIS16133 },
 	{ "adis16135", ID_ADIS16135 },
 	{ "adis16136", ID_ADIS16136 },
+	{ "adis16137", ID_ADIS16137 },
 	{ }
 };
 MODULE_DEVICE_TABLE(spi, adis16136_ids);
diff --git a/drivers/iio/gyro/adis16260.c b/drivers/iio/gyro/adis16260.c
index 75fe0ed..00c6ad9 100644
--- a/drivers/iio/gyro/adis16260.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -101,19 +101,24 @@
 #define ADIS16260_SCAN_TEMP	3
 #define ADIS16260_SCAN_ANGL	4
 
-/* Power down the device */
-static int adis16260_stop_device(struct iio_dev *indio_dev)
-{
-	struct adis *adis = iio_priv(indio_dev);
-	int ret;
-	u16 val = ADIS16260_SLP_CNT_POWER_OFF;
+struct adis16260_chip_info {
+	unsigned int gyro_max_val;
+	unsigned int gyro_max_scale;
+	const struct iio_chan_spec *channels;
+	unsigned int num_channels;
+};
 
-	ret = adis_write_reg_16(adis, ADIS16260_SLP_CNT, val);
-	if (ret)
-		dev_err(&indio_dev->dev, "problem with turning device off: SLP_CNT");
+struct adis16260 {
+	const struct adis16260_chip_info *info;
 
-	return ret;
-}
+	struct adis adis;
+};
+
+enum adis16260_type {
+	ADIS16251,
+	ADIS16260,
+	ADIS16266,
+};
 
 static const struct iio_chan_spec adis16260_channels[] = {
 	ADIS_GYRO_CHAN(X, ADIS16260_GYRO_OUT, ADIS16260_SCAN_GYRO,
@@ -131,6 +136,55 @@
 	IIO_CHAN_SOFT_TIMESTAMP(5),
 };
 
+static const struct iio_chan_spec adis16266_channels[] = {
+	ADIS_GYRO_CHAN(X, ADIS16260_GYRO_OUT, ADIS16260_SCAN_GYRO,
+		BIT(IIO_CHAN_INFO_CALIBBIAS) |
+		BIT(IIO_CHAN_INFO_CALIBSCALE),
+		BIT(IIO_CHAN_INFO_SAMP_FREQ), 14),
+	ADIS_TEMP_CHAN(ADIS16260_TEMP_OUT, ADIS16260_SCAN_TEMP,
+		BIT(IIO_CHAN_INFO_SAMP_FREQ), 12),
+	ADIS_SUPPLY_CHAN(ADIS16260_SUPPLY_OUT, ADIS16260_SCAN_SUPPLY,
+		BIT(IIO_CHAN_INFO_SAMP_FREQ), 12),
+	ADIS_AUX_ADC_CHAN(ADIS16260_AUX_ADC, ADIS16260_SCAN_AUX_ADC,
+		BIT(IIO_CHAN_INFO_SAMP_FREQ), 12),
+	IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const struct adis16260_chip_info adis16260_chip_info_table[] = {
+	[ADIS16251] = {
+		.gyro_max_scale = 80,
+		.gyro_max_val = IIO_RAD_TO_DEGREE(4368),
+		.channels = adis16260_channels,
+		.num_channels = ARRAY_SIZE(adis16260_channels),
+	},
+	[ADIS16260] = {
+		.gyro_max_scale = 320,
+		.gyro_max_val = IIO_RAD_TO_DEGREE(4368),
+		.channels = adis16260_channels,
+		.num_channels = ARRAY_SIZE(adis16260_channels),
+	},
+	[ADIS16266] = {
+		.gyro_max_scale = 14000,
+		.gyro_max_val = IIO_RAD_TO_DEGREE(3357),
+		.channels = adis16266_channels,
+		.num_channels = ARRAY_SIZE(adis16266_channels),
+	},
+};
+
+/* Power down the device */
+static int adis16260_stop_device(struct iio_dev *indio_dev)
+{
+	struct adis16260 *adis16260 = iio_priv(indio_dev);
+	int ret;
+	u16 val = ADIS16260_SLP_CNT_POWER_OFF;
+
+	ret = adis_write_reg_16(&adis16260->adis, ADIS16260_SLP_CNT, val);
+	if (ret)
+		dev_err(&indio_dev->dev, "problem with turning device off: SLP_CNT");
+
+	return ret;
+}
+
 static const u8 adis16260_addresses[][2] = {
 	[ADIS16260_SCAN_GYRO] = { ADIS16260_GYRO_OFF, ADIS16260_GYRO_SCALE },
 };
@@ -140,7 +194,9 @@
 			      int *val, int *val2,
 			      long mask)
 {
-	struct adis *adis = iio_priv(indio_dev);
+	struct adis16260 *adis16260 = iio_priv(indio_dev);
+	const struct adis16260_chip_info *info = adis16260->info;
+	struct adis *adis = &adis16260->adis;
 	int ret;
 	u8 addr;
 	s16 val16;
@@ -152,15 +208,9 @@
 	case IIO_CHAN_INFO_SCALE:
 		switch (chan->type) {
 		case IIO_ANGL_VEL:
-			*val = 0;
-			if (spi_get_device_id(adis->spi)->driver_data) {
-				/* 0.01832 degree / sec */
-				*val2 = IIO_DEGREE_TO_RAD(18320);
-			} else {
-				/* 0.07326 degree / sec */
-				*val2 = IIO_DEGREE_TO_RAD(73260);
-			}
-			return IIO_VAL_INT_PLUS_MICRO;
+			*val = info->gyro_max_scale;
+			*val2 = info->gyro_max_val;
+			return IIO_VAL_FRACTIONAL;
 		case IIO_INCLI:
 			*val = 0;
 			*val2 = IIO_DEGREE_TO_RAD(36630);
@@ -224,7 +274,8 @@
 			       int val2,
 			       long mask)
 {
-	struct adis *adis = iio_priv(indio_dev);
+	struct adis16260 *adis16260 = iio_priv(indio_dev);
+	struct adis *adis = &adis16260->adis;
 	int ret;
 	u8 addr;
 	u8 t;
@@ -305,35 +356,42 @@
 
 static int adis16260_probe(struct spi_device *spi)
 {
+	const struct spi_device_id *id;
+	struct adis16260 *adis16260;
 	struct iio_dev *indio_dev;
-	struct adis *adis;
 	int ret;
 
+	id = spi_get_device_id(spi);
+	if (!id)
+		return -ENODEV;
+
 	/* setup the industrialio driver allocated elements */
-	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adis));
+	indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adis16260));
 	if (!indio_dev)
 		return -ENOMEM;
-	adis = iio_priv(indio_dev);
+	adis16260 = iio_priv(indio_dev);
 	/* this is only used for removal purposes */
 	spi_set_drvdata(spi, indio_dev);
 
-	indio_dev->name = spi_get_device_id(spi)->name;
+	adis16260->info = &adis16260_chip_info_table[id->driver_data];
+
+	indio_dev->name = id->name;
 	indio_dev->dev.parent = &spi->dev;
 	indio_dev->info = &adis16260_info;
-	indio_dev->channels = adis16260_channels;
-	indio_dev->num_channels = ARRAY_SIZE(adis16260_channels);
+	indio_dev->channels = adis16260->info->channels;
+	indio_dev->num_channels = adis16260->info->num_channels;
 	indio_dev->modes = INDIO_DIRECT_MODE;
 
-	ret = adis_init(adis, indio_dev, spi, &adis16260_data);
+	ret = adis_init(&adis16260->adis, indio_dev, spi, &adis16260_data);
 	if (ret)
 		return ret;
 
-	ret = adis_setup_buffer_and_trigger(adis, indio_dev, NULL);
+	ret = adis_setup_buffer_and_trigger(&adis16260->adis, indio_dev, NULL);
 	if (ret)
 		return ret;
 
 	/* Get the device into a sane initial state */
-	ret = adis_initial_startup(adis);
+	ret = adis_initial_startup(&adis16260->adis);
 	if (ret)
 		goto error_cleanup_buffer_trigger;
 	ret = iio_device_register(indio_dev);
@@ -343,18 +401,18 @@
 	return 0;
 
 error_cleanup_buffer_trigger:
-	adis_cleanup_buffer_and_trigger(adis, indio_dev);
+	adis_cleanup_buffer_and_trigger(&adis16260->adis, indio_dev);
 	return ret;
 }
 
 static int adis16260_remove(struct spi_device *spi)
 {
 	struct iio_dev *indio_dev = spi_get_drvdata(spi);
-	struct adis *adis = iio_priv(indio_dev);
+	struct adis16260 *adis16260 = iio_priv(indio_dev);
 
 	iio_device_unregister(indio_dev);
 	adis16260_stop_device(indio_dev);
-	adis_cleanup_buffer_and_trigger(adis, indio_dev);
+	adis_cleanup_buffer_and_trigger(&adis16260->adis, indio_dev);
 
 	return 0;
 }
@@ -364,11 +422,12 @@
  * support for the on chip filtering.
  */
 static const struct spi_device_id adis16260_id[] = {
-	{"adis16260", 0},
-	{"adis16265", 0},
-	{"adis16250", 0},
-	{"adis16255", 0},
-	{"adis16251", 1},
+	{"adis16260", ADIS16260},
+	{"adis16265", ADIS16260},
+	{"adis16266", ADIS16266},
+	{"adis16250", ADIS16260},
+	{"adis16255", ADIS16260},
+	{"adis16251", ADIS16251},
 	{}
 };
 MODULE_DEVICE_TABLE(spi, adis16260_id);
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index f0fd940..c102a63 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -379,7 +379,6 @@
 
 static struct i2c_driver itg3200_driver = {
 	.driver = {
-		.owner  = THIS_MODULE,
 		.name	= "itg3200",
 		.pm	= &itg3200_pm_ops,
 	},
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index ffe9664..4b993a5 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -131,6 +131,7 @@
 static const struct st_sensor_settings st_gyro_sensors_settings[] = {
 	{
 		.wai = ST_GYRO_1_WAI_EXP,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = L3G4200D_GYRO_DEV_NAME,
 			[1] = LSM330DL_GYRO_DEV_NAME,
@@ -190,6 +191,7 @@
 	},
 	{
 		.wai = ST_GYRO_2_WAI_EXP,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = L3GD20_GYRO_DEV_NAME,
 			[1] = LSM330D_GYRO_DEV_NAME,
@@ -252,6 +254,7 @@
 	},
 	{
 		.wai = ST_GYRO_3_WAI_EXP,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = L3GD20_GYRO_DEV_NAME,
 		},
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 64480b1..6848451 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -99,7 +99,6 @@
 
 static struct i2c_driver st_gyro_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "st-gyro-i2c",
 		.of_match_table = of_match_ptr(st_gyro_of_match),
 	},
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 7d79a1a..1165b1c 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -33,6 +33,7 @@
 #include <linux/delay.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
+#include <linux/timekeeping.h>
 
 #include <linux/iio/iio.h>
 
@@ -46,7 +47,8 @@
  * Note that when reading the sensor actually 84 edges are detected, but
  * since the last edge is not significant, we only store 83:
  */
-#define DHT11_EDGES_PER_READ (2*DHT11_BITS_PER_READ + DHT11_EDGES_PREAMBLE + 1)
+#define DHT11_EDGES_PER_READ (2 * DHT11_BITS_PER_READ + \
+			      DHT11_EDGES_PREAMBLE + 1)
 
 /* Data transmission timing (nano seconds) */
 #define DHT11_START_TRANSMISSION	18  /* ms */
@@ -62,6 +64,7 @@
 	int				irq;
 
 	struct completion		completion;
+	/* The iio sysfs interface doesn't prevent concurrent reads: */
 	struct mutex			lock;
 
 	s64				timestamp;
@@ -87,32 +90,20 @@
 	return ret;
 }
 
-static int dht11_decode(struct dht11 *dht11, int offset)
+static int dht11_decode(struct dht11 *dht11, int offset, int timeres)
 {
-	int i, t, timing[DHT11_BITS_PER_READ], threshold,
-		timeres = DHT11_SENSOR_RESPONSE;
+	int i, t, timing[DHT11_BITS_PER_READ], threshold;
 	unsigned char temp_int, temp_dec, hum_int, hum_dec, checksum;
 
-	/* Calculate timestamp resolution */
-	for (i = 1; i < dht11->num_edges; ++i) {
-		t = dht11->edges[i].ts - dht11->edges[i-1].ts;
-		if (t > 0 && t < timeres)
-			timeres = t;
-	}
-	if (2*timeres > DHT11_DATA_BIT_HIGH) {
-		pr_err("dht11: timeresolution %d too bad for decoding\n",
-			timeres);
-		return -EIO;
-	}
 	threshold = DHT11_DATA_BIT_HIGH / timeres;
-	if (DHT11_DATA_BIT_LOW/timeres + 1 >= threshold)
+	if (DHT11_DATA_BIT_LOW / timeres + 1 >= threshold)
 		pr_err("dht11: WARNING: decoding ambiguous\n");
 
 	/* scale down with timeres and check validity */
 	for (i = 0; i < DHT11_BITS_PER_READ; ++i) {
-		t = dht11->edges[offset + 2*i + 2].ts -
-			dht11->edges[offset + 2*i + 1].ts;
-		if (!dht11->edges[offset + 2*i + 1].value)
+		t = dht11->edges[offset + 2 * i + 2].ts -
+			dht11->edges[offset + 2 * i + 1].ts;
+		if (!dht11->edges[offset + 2 * i + 1].value)
 			return -EIO;  /* lost synchronisation */
 		timing[i] = t / timeres;
 	}
@@ -126,7 +117,7 @@
 	if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
 		return -EIO;
 
-	dht11->timestamp = iio_get_time_ns();
+	dht11->timestamp = ktime_get_real_ns();
 	if (hum_int < 20) {  /* DHT22 */
 		dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
 					((temp_int & 0x80) ? -100 : 100);
@@ -154,7 +145,7 @@
 
 	/* TODO: Consider making the handler safe for IRQ sharing */
 	if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
-		dht11->edges[dht11->num_edges].ts = iio_get_time_ns();
+		dht11->edges[dht11->num_edges].ts = ktime_get_real_ns();
 		dht11->edges[dht11->num_edges++].value =
 						gpio_get_value(dht11->gpio);
 
@@ -166,14 +157,26 @@
 }
 
 static int dht11_read_raw(struct iio_dev *iio_dev,
-			const struct iio_chan_spec *chan,
+			  const struct iio_chan_spec *chan,
 			int *val, int *val2, long m)
 {
 	struct dht11 *dht11 = iio_priv(iio_dev);
-	int ret;
+	int ret, timeres;
 
 	mutex_lock(&dht11->lock);
-	if (dht11->timestamp + DHT11_DATA_VALID_TIME < iio_get_time_ns()) {
+	if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_real_ns()) {
+		timeres = ktime_get_resolution_ns();
+		if (DHT11_DATA_BIT_HIGH < 2 * timeres) {
+			dev_err(dht11->dev, "timeresolution %dns too low\n",
+				timeres);
+			/* In theory a better clock could become available
+			 * at some point ... and there is no error code
+			 * that really fits better.
+			 */
+			ret = -EAGAIN;
+			goto err;
+		}
+
 		reinit_completion(&dht11->completion);
 
 		dht11->num_edges = 0;
@@ -192,13 +195,13 @@
 			goto err;
 
 		ret = wait_for_completion_killable_timeout(&dht11->completion,
-								 HZ);
+							   HZ);
 
 		free_irq(dht11->irq, iio_dev);
 
 		if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
 			dev_err(&iio_dev->dev,
-					"Only %d signal edges detected\n",
+				"Only %d signal edges detected\n",
 					dht11->num_edges);
 			ret = -ETIMEDOUT;
 		}
@@ -206,9 +209,10 @@
 			goto err;
 
 		ret = dht11_decode(dht11,
-				dht11->num_edges == DHT11_EDGES_PER_READ ?
+				   dht11->num_edges == DHT11_EDGES_PER_READ ?
 					DHT11_EDGES_PREAMBLE :
-					DHT11_EDGES_PREAMBLE - 2);
+					DHT11_EDGES_PREAMBLE - 2,
+				timeres);
 		if (ret)
 			goto err;
 	}
@@ -261,9 +265,10 @@
 	dht11 = iio_priv(iio);
 	dht11->dev = dev;
 
-	dht11->gpio = ret = of_get_gpio(node, 0);
+	ret = of_get_gpio(node, 0);
 	if (ret < 0)
 		return ret;
+	dht11->gpio = ret;
 	ret = devm_gpio_request_one(dev, dht11->gpio, GPIOF_IN, pdev->name);
 	if (ret)
 		return ret;
@@ -274,7 +279,7 @@
 		return -EINVAL;
 	}
 
-	dht11->timestamp = iio_get_time_ns() - DHT11_DATA_VALID_TIME - 1;
+	dht11->timestamp = ktime_get_real_ns() - DHT11_DATA_VALID_TIME - 1;
 	dht11->num_edges = -1;
 
 	platform_set_drvdata(pdev, iio);
diff --git a/drivers/iio/humidity/si7005.c b/drivers/iio/humidity/si7005.c
index bdd586e..91972cc 100644
--- a/drivers/iio/humidity/si7005.c
+++ b/drivers/iio/humidity/si7005.c
@@ -177,7 +177,6 @@
 static struct i2c_driver si7005_driver = {
 	.driver = {
 		.name	= "si7005",
-		.owner	= THIS_MODULE,
 	},
 	.probe = si7005_probe,
 	.id_table = si7005_id,
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 2fd68f2..abc4c50 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -139,7 +139,9 @@
 	ADIS16360,
 	ADIS16362,
 	ADIS16364,
+	ADIS16367,
 	ADIS16400,
+	ADIS16445,
 	ADIS16448,
 };
 
@@ -622,6 +624,17 @@
 	IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
 };
 
+static const struct iio_chan_spec adis16445_channels[] = {
+	ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 16),
+	ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 16),
+	ADIS16400_GYRO_CHAN(Z, ADIS16400_ZGYRO_OUT, 16),
+	ADIS16400_ACCEL_CHAN(X, ADIS16400_XACCL_OUT, 16),
+	ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 16),
+	ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 16),
+	ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
+	IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
+};
+
 static const struct iio_chan_spec adis16448_channels[] = {
 	ADIS16400_GYRO_CHAN(X, ADIS16400_XGYRO_OUT, 16),
 	ADIS16400_GYRO_CHAN(Y, ADIS16400_YGYRO_OUT, 16),
@@ -696,7 +709,8 @@
 	[ADIS16300] = {
 		.channels = adis16300_channels,
 		.num_channels = ARRAY_SIZE(adis16300_channels),
-		.flags = ADIS16400_HAS_SLOW_MODE,
+		.flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE |
+				ADIS16400_HAS_SERIAL_NUMBER,
 		.gyro_scale_micro = IIO_DEGREE_TO_RAD(50000), /* 0.05 deg/s */
 		.accel_scale_micro = 5884,
 		.temp_scale_nano = 140000000, /* 0.14 C */
@@ -763,6 +777,18 @@
 		.set_freq = adis16400_set_freq,
 		.get_freq = adis16400_get_freq,
 	},
+	[ADIS16367] = {
+		.channels = adis16350_channels,
+		.num_channels = ARRAY_SIZE(adis16350_channels),
+		.flags = ADIS16400_HAS_PROD_ID | ADIS16400_HAS_SLOW_MODE |
+				ADIS16400_HAS_SERIAL_NUMBER,
+		.gyro_scale_micro = IIO_DEGREE_TO_RAD(2000), /* 0.2 deg/s */
+		.accel_scale_micro = IIO_G_TO_M_S_2(3333), /* 3.333 mg */
+		.temp_scale_nano = 136000000, /* 0.136 C */
+		.temp_offset = 25000000 / 136000, /* 25 C = 0x00 */
+		.set_freq = adis16400_set_freq,
+		.get_freq = adis16400_get_freq,
+	},
 	[ADIS16400] = {
 		.channels = adis16400_channels,
 		.num_channels = ARRAY_SIZE(adis16400_channels),
@@ -774,13 +800,26 @@
 		.set_freq = adis16400_set_freq,
 		.get_freq = adis16400_get_freq,
 	},
+	[ADIS16445] = {
+		.channels = adis16445_channels,
+		.num_channels = ARRAY_SIZE(adis16445_channels),
+		.flags = ADIS16400_HAS_PROD_ID |
+				ADIS16400_HAS_SERIAL_NUMBER |
+				ADIS16400_BURST_DIAG_STAT,
+		.gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
+		.accel_scale_micro = IIO_G_TO_M_S_2(250), /* 1/4000 g */
+		.temp_scale_nano = 73860000, /* 0.07386 C */
+		.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
+		.set_freq = adis16334_set_freq,
+		.get_freq = adis16334_get_freq,
+	},
 	[ADIS16448] = {
 		.channels = adis16448_channels,
 		.num_channels = ARRAY_SIZE(adis16448_channels),
 		.flags = ADIS16400_HAS_PROD_ID |
 				ADIS16400_HAS_SERIAL_NUMBER |
 				ADIS16400_BURST_DIAG_STAT,
-		.gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
+		.gyro_scale_micro = IIO_DEGREE_TO_RAD(40000), /* 0.04 deg/s */
 		.accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
 		.temp_scale_nano = 73860000, /* 0.07386 C */
 		.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
@@ -926,6 +965,7 @@
 
 static const struct spi_device_id adis16400_id[] = {
 	{"adis16300", ADIS16300},
+	{"adis16305", ADIS16300},
 	{"adis16334", ADIS16334},
 	{"adis16350", ADIS16350},
 	{"adis16354", ADIS16350},
@@ -934,8 +974,10 @@
 	{"adis16362", ADIS16362},
 	{"adis16364", ADIS16364},
 	{"adis16365", ADIS16360},
+	{"adis16367", ADIS16367},
 	{"adis16400", ADIS16400},
 	{"adis16405", ADIS16400},
+	{"adis16445", ADIS16445},
 	{"adis16448", ADIS16448},
 	{}
 };
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 989605d..b94bfd3 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -110,6 +110,10 @@
 struct adis16480_chip_info {
 	unsigned int num_channels;
 	const struct iio_chan_spec *channels;
+	unsigned int gyro_max_val;
+	unsigned int gyro_max_scale;
+	unsigned int accel_max_val;
+	unsigned int accel_max_scale;
 };
 
 struct adis16480 {
@@ -497,19 +501,21 @@
 static int adis16480_read_raw(struct iio_dev *indio_dev,
 	const struct iio_chan_spec *chan, int *val, int *val2, long info)
 {
+	struct adis16480 *st = iio_priv(indio_dev);
+
 	switch (info) {
 	case IIO_CHAN_INFO_RAW:
 		return adis_single_conversion(indio_dev, chan, 0, val);
 	case IIO_CHAN_INFO_SCALE:
 		switch (chan->type) {
 		case IIO_ANGL_VEL:
-			*val = 0;
-			*val2 = IIO_DEGREE_TO_RAD(20000); /* 0.02 degree/sec */
-			return IIO_VAL_INT_PLUS_MICRO;
+			*val = st->chip_info->gyro_max_scale;
+			*val2 = st->chip_info->gyro_max_val;
+			return IIO_VAL_FRACTIONAL;
 		case IIO_ACCEL:
-			*val = 0;
-			*val2 = IIO_G_TO_M_S_2(800); /* 0.8 mg */
-			return IIO_VAL_INT_PLUS_MICRO;
+			*val = st->chip_info->accel_max_scale;
+			*val2 = st->chip_info->accel_max_val;
+			return IIO_VAL_FRACTIONAL;
 		case IIO_MAGN:
 			*val = 0;
 			*val2 = 100; /* 0.0001 gauss */
@@ -674,18 +680,39 @@
 	[ADIS16375] = {
 		.channels = adis16485_channels,
 		.num_channels = ARRAY_SIZE(adis16485_channels),
+		/*
+		 * storing the value in rad/degree and the scale in degree
+		 * gives us the result in rad and better precession than
+		 * storing the scale directly in rad.
+		 */
+		.gyro_max_val = IIO_RAD_TO_DEGREE(22887),
+		.gyro_max_scale = 300,
+		.accel_max_val = IIO_M_S_2_TO_G(21973),
+		.accel_max_scale = 18,
 	},
 	[ADIS16480] = {
 		.channels = adis16480_channels,
 		.num_channels = ARRAY_SIZE(adis16480_channels),
+		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+		.gyro_max_scale = 450,
+		.accel_max_val = IIO_M_S_2_TO_G(12500),
+		.accel_max_scale = 5,
 	},
 	[ADIS16485] = {
 		.channels = adis16485_channels,
 		.num_channels = ARRAY_SIZE(adis16485_channels),
+		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+		.gyro_max_scale = 450,
+		.accel_max_val = IIO_M_S_2_TO_G(20000),
+		.accel_max_scale = 5,
 	},
 	[ADIS16488] = {
 		.channels = adis16480_channels,
 		.num_channels = ARRAY_SIZE(adis16480_channels),
+		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+		.gyro_max_scale = 450,
+		.accel_max_val = IIO_M_S_2_TO_G(22500),
+		.accel_max_scale = 18,
 	},
 };
 
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 65ce868..f0e0609 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -690,6 +690,10 @@
 
 /* constant IIO attribute */
 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("10 20 50 100 200 500");
+static IIO_CONST_ATTR(in_anglvel_scale_available,
+					  "0.000133090 0.000266181 0.000532362 0.001064724");
+static IIO_CONST_ATTR(in_accel_scale_available,
+					  "0.000598 0.001196 0.002392 0.004785");
 static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR, inv_fifo_rate_show,
 	inv_mpu6050_fifo_rate_store);
 static IIO_DEVICE_ATTR(in_gyro_matrix, S_IRUGO, inv_attr_show, NULL,
@@ -702,6 +706,8 @@
 	&iio_dev_attr_in_accel_matrix.dev_attr.attr,
 	&iio_dev_attr_sampling_frequency.dev_attr.attr,
 	&iio_const_attr_sampling_frequency_available.dev_attr.attr,
+	&iio_const_attr_in_accel_scale_available.dev_attr.attr,
+	&iio_const_attr_in_anglvel_scale_available.dev_attr.attr,
 	NULL,
 };
 
@@ -921,7 +927,6 @@
 	.remove		=	inv_mpu_remove,
 	.id_table	=	inv_mpu_id,
 	.driver = {
-		.owner	=	THIS_MODULE,
 		.name	=	"inv-mpu6050",
 		.pm     =       INV_MPU6050_PMOPS,
 		.acpi_match_table = ACPI_PTR(inv_acpi_match),
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 462a010..82cdf50 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1363,7 +1363,7 @@
 	if (client->irq < 0)
 		client->irq = kmx61_gpio_probe(client, data);
 
-	if (client->irq >= 0) {
+	if (client->irq > 0) {
 		ret = devm_request_threaded_irq(&client->dev, client->irq,
 						kmx61_data_rdy_trig_poll,
 						kmx61_event_handler,
@@ -1445,10 +1445,10 @@
 err_iio_unregister_acc:
 	iio_device_unregister(data->acc_indio_dev);
 err_buffer_cleanup_mag:
-	if (client->irq >= 0)
+	if (client->irq > 0)
 		iio_triggered_buffer_cleanup(data->mag_indio_dev);
 err_buffer_cleanup_acc:
-	if (client->irq >= 0)
+	if (client->irq > 0)
 		iio_triggered_buffer_cleanup(data->acc_indio_dev);
 err_trigger_unregister_motion:
 	iio_trigger_unregister(data->motion_trig);
@@ -1472,7 +1472,7 @@
 	iio_device_unregister(data->acc_indio_dev);
 	iio_device_unregister(data->mag_indio_dev);
 
-	if (client->irq >= 0) {
+	if (client->irq > 0) {
 		iio_triggered_buffer_cleanup(data->acc_indio_dev);
 		iio_triggered_buffer_cleanup(data->mag_indio_dev);
 		iio_trigger_unregister(data->acc_dready_trig);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 6eee1b0..d7e908a 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -71,8 +71,9 @@
 
 	if (avail >= to_wait) {
 		/* force a flush for non-blocking reads */
-		if (!to_wait && !avail && to_flush)
-			iio_buffer_flush_hwfifo(indio_dev, buf, to_flush);
+		if (!to_wait && avail < to_flush)
+			iio_buffer_flush_hwfifo(indio_dev, buf,
+						to_flush - avail);
 		return true;
 	}
 
@@ -90,9 +91,16 @@
 
 /**
  * iio_buffer_read_first_n_outer() - chrdev read for buffer access
+ * @filp:	File structure pointer for the char device
+ * @buf:	Destination buffer for iio buffer read
+ * @n:		First n bytes to read
+ * @f_ps:	Long offset provided by the user as a seek position
  *
  * This function relies on all buffer implementations having an
  * iio_buffer as their first element.
+ *
+ * Return: negative values corresponding to error codes or ret != 0
+ *	   for ending the reading activity
  **/
 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
 				      size_t n, loff_t *f_ps)
@@ -100,8 +108,7 @@
 	struct iio_dev *indio_dev = filp->private_data;
 	struct iio_buffer *rb = indio_dev->buffer;
 	size_t datum_size;
-	size_t to_wait = 0;
-	size_t to_read;
+	size_t to_wait;
 	int ret;
 
 	if (!indio_dev->info)
@@ -119,14 +126,14 @@
 	if (!datum_size)
 		return 0;
 
-	to_read = min_t(size_t, n / datum_size, rb->watermark);
-
-	if (!(filp->f_flags & O_NONBLOCK))
-		to_wait = to_read;
+	if (filp->f_flags & O_NONBLOCK)
+		to_wait = 0;
+	else
+		to_wait = min_t(size_t, n / datum_size, rb->watermark);
 
 	do {
 		ret = wait_event_interruptible(rb->pollq,
-			iio_buffer_ready(indio_dev, rb, to_wait, to_read));
+		      iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size));
 		if (ret)
 			return ret;
 
@@ -143,6 +150,12 @@
 
 /**
  * iio_buffer_poll() - poll the buffer to find out if it has data
+ * @filp:	File structure pointer for device access
+ * @wait:	Poll table structure pointer for which the driver adds
+ *		a wait queue
+ *
+ * Return: (POLLIN | POLLRDNORM) if data is available for reading
+ *	   or 0 for other cases
  */
 unsigned int iio_buffer_poll(struct file *filp,
 			     struct poll_table_struct *wait)
@@ -151,7 +164,7 @@
 	struct iio_buffer *rb = indio_dev->buffer;
 
 	if (!indio_dev->info)
-		return -ENODEV;
+		return 0;
 
 	poll_wait(filp, &rb->pollq, wait);
 	if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
@@ -1136,7 +1149,7 @@
 EXPORT_SYMBOL_GPL(iio_scan_mask_query);
 
 /**
- * struct iio_demux_table() - table describing demux memcpy ops
+ * struct iio_demux_table - table describing demux memcpy ops
  * @from:	index to copy from
  * @to:		index to copy to
  * @length:	how many bytes to copy
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 3524b0d..b3fcc2c 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -81,6 +81,14 @@
 	[IIO_MOD_X] = "x",
 	[IIO_MOD_Y] = "y",
 	[IIO_MOD_Z] = "z",
+	[IIO_MOD_X_AND_Y] = "x&y",
+	[IIO_MOD_X_AND_Z] = "x&z",
+	[IIO_MOD_Y_AND_Z] = "y&z",
+	[IIO_MOD_X_AND_Y_AND_Z] = "x&y&z",
+	[IIO_MOD_X_OR_Y] = "x|y",
+	[IIO_MOD_X_OR_Z] = "x|z",
+	[IIO_MOD_Y_OR_Z] = "y|z",
+	[IIO_MOD_X_OR_Y_OR_Z] = "x|y|z",
 	[IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)",
 	[IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2",
 	[IIO_MOD_LIGHT_BOTH] = "both",
@@ -398,10 +406,16 @@
 
 /**
  * iio_format_value() - Formats a IIO value into its string representation
- * @buf: The buffer to which the formated value gets written
- * @type: One of the IIO_VAL_... constants. This decides how the val and val2
- *        parameters are formatted.
- * @vals: pointer to the values, exact meaning depends on the type parameter.
+ * @buf:	The buffer to which the formatted value gets written
+ * @type:	One of the IIO_VAL_... constants. This decides how the val
+ *		and val2 parameters are formatted.
+ * @size:	Number of IIO value entries contained in vals
+ * @vals:	Pointer to the values, exact meaning depends on the
+ *		type parameter.
+ *
+ * Return: 0 by default, a negative number on failure or the
+ *	   total number of characters written for a type that belongs
+ *	   to the IIO_VAL_... constant.
  */
 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
 {
@@ -1088,6 +1102,11 @@
 
 /**
  * iio_chrdev_open() - chrdev file open for buffer access and ioctls
+ * @inode:	Inode structure for identifying the device in the file system
+ * @filp:	File structure for iio device used to keep and later access
+ *		private data
+ *
+ * Return: 0 on success or -EBUSY if the device is already opened
  **/
 static int iio_chrdev_open(struct inode *inode, struct file *filp)
 {
@@ -1106,7 +1125,11 @@
 
 /**
  * iio_chrdev_release() - chrdev file close buffer access and ioctls
- **/
+ * @inode:	Inode structure pointer for the char device
+ * @filp:	File structure pointer for the char device
+ *
+ * Return: 0 for successful release
+ */
 static int iio_chrdev_release(struct inode *inode, struct file *filp)
 {
 	struct iio_dev *indio_dev = container_of(inode->i_cdev,
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 894d813..cae332b 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -32,6 +32,7 @@
  * @dev_attr_list:	list of event interface sysfs attribute
  * @flags:		file operations related flags including busy flag.
  * @group:		event interface sysfs attribute group
+ * @read_lock:		lock to protect kfifo read operations
  */
 struct iio_event_interface {
 	wait_queue_head_t	wait;
@@ -75,6 +76,11 @@
 
 /**
  * iio_event_poll() - poll the event queue to find out if it has data
+ * @filep:	File structure pointer to identify the device
+ * @wait:	Poll table pointer to add the wait queue on
+ *
+ * Return: (POLLIN | POLLRDNORM) if data is available for reading
+ *	   or a negative error code on failure
  */
 static unsigned int iio_event_poll(struct file *filep,
 			     struct poll_table_struct *wait)
@@ -84,7 +90,7 @@
 	unsigned int events = 0;
 
 	if (!indio_dev->info)
-		return -ENODEV;
+		return events;
 
 	poll_wait(filep, &ev_int->wait, wait);
 
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index d31098e..570606c 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -40,7 +40,14 @@
 
 /**
  * iio_trigger_read_name() - retrieve useful identifying name
- **/
+ * @dev:	device associated with the iio_trigger
+ * @attr:	pointer to the device_attribute structure that is
+ *		being processed
+ * @buf:	buffer to print the name into
+ *
+ * Return: a negative number on failure or the number of written
+ *	   characters on success.
+ */
 static ssize_t iio_trigger_read_name(struct device *dev,
 				     struct device_attribute *attr,
 				     char *buf)
@@ -288,10 +295,17 @@
 
 /**
  * iio_trigger_read_current() - trigger consumer sysfs query current trigger
+ * @dev:	device associated with an industrial I/O device
+ * @attr:	pointer to the device_attribute structure that
+ *		is being processed
+ * @buf:	buffer where the current trigger name will be printed into
  *
  * For trigger consumers the current_trigger interface allows the trigger
  * used by the device to be queried.
- **/
+ *
+ * Return: a negative number on failure, the number of characters written
+ *	   on success or 0 if no trigger is available
+ */
 static ssize_t iio_trigger_read_current(struct device *dev,
 					struct device_attribute *attr,
 					char *buf)
@@ -305,11 +319,18 @@
 
 /**
  * iio_trigger_write_current() - trigger consumer sysfs set current trigger
+ * @dev:	device associated with an industrial I/O device
+ * @attr:	device attribute that is being processed
+ * @buf:	string buffer that holds the name of the trigger
+ * @len:	length of the trigger name held by buf
  *
  * For trigger consumers the current_trigger interface allows the trigger
  * used for this device to be specified at run time based on the trigger's
  * name.
- **/
+ *
+ * Return: negative error code on failure or length of the buffer
+ *	   on success
+ */
 static ssize_t iio_trigger_write_current(struct device *dev,
 					 struct device_attribute *attr,
 					 const char *buf,
diff --git a/drivers/iio/industrialio-triggered-buffer.c b/drivers/iio/industrialio-triggered-buffer.c
index 15a5341..4b2858b 100644
--- a/drivers/iio/industrialio-triggered-buffer.c
+++ b/drivers/iio/industrialio-triggered-buffer.c
@@ -24,8 +24,8 @@
 /**
  * iio_triggered_buffer_setup() - Setup triggered buffer and pollfunc
  * @indio_dev:		IIO device structure
- * @pollfunc_bh:	Function which will be used as pollfunc bottom half
- * @pollfunc_th:	Function which will be used as pollfunc top half
+ * @h:			Function which will be used as pollfunc top half
+ * @thread:		Function which will be used as pollfunc bottom half
  * @setup_ops:		Buffer setup functions to use for this device.
  *			If NULL the default setup functions for triggered
  *			buffers will be used.
@@ -42,8 +42,8 @@
  * iio_triggered_buffer_cleanup().
  */
 int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
-	irqreturn_t (*pollfunc_bh)(int irq, void *p),
-	irqreturn_t (*pollfunc_th)(int irq, void *p),
+	irqreturn_t (*h)(int irq, void *p),
+	irqreturn_t (*thread)(int irq, void *p),
 	const struct iio_buffer_setup_ops *setup_ops)
 {
 	struct iio_buffer *buffer;
@@ -57,8 +57,8 @@
 
 	iio_device_attach_buffer(indio_dev, buffer);
 
-	indio_dev->pollfunc = iio_alloc_pollfunc(pollfunc_bh,
-						 pollfunc_th,
+	indio_dev->pollfunc = iio_alloc_pollfunc(h,
+						 thread,
 						 IRQF_ONESHOT,
 						 indio_dev,
 						 "%s_consumer%d",
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index a5c5925..7ed859a 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -86,7 +86,7 @@
 	depends on I2C
 	tristate "Capella CM3323 color light sensor"
 	help
-	 Say Y here if you want to build a driver for Capela CM3323
+	 Say Y here if you want to build a driver for Capella CM3323
 	 color sensor.
 
 	 To compile this driver as a module, choose M here: the module will
@@ -168,6 +168,17 @@
 	 To compile this driver as a module, choose M here:
 	 the module will be called jsa1212.
 
+config RPR0521
+	tristate "ROHM RPR0521 ALS and proximity sensor driver"
+	depends on I2C
+	select REGMAP_I2C
+	help
+	 Say Y here if you want to build support for ROHM's RPR0521
+	 ambient light and proximity sensor device.
+
+	 To compile this driver as a module, choose M here:
+	 the module will be called rpr0521.
+
 config SENSORS_LM3533
 	tristate "LM3533 ambient light sensor"
 	depends on MFD_LM3533
@@ -199,6 +210,27 @@
 	 This driver can also be built as a module.  If so, the module
          will be called ltr501.
 
+config OPT3001
+	tristate "Texas Instruments OPT3001 Light Sensor"
+	depends on I2C
+	help
+	  If you say Y or M here, you get support for Texas Instruments
+	  OPT3001 Ambient Light Sensor.
+
+	  If built as a dynamically linked module, it will be called
+	  opt3001.
+
+config PA12203001
+        tristate "TXC PA12203001 light and proximity sensor"
+        depends on I2C
+        select REGMAP_I2C
+        help
+         If you say yes here you get support for the TXC PA12203001
+         ambient light and proximity sensor.
+
+         This driver can also be built as a module.  If so, the module
+         will be called pa12203001.
+
 config STK3310
 	tristate "STK3310 ALS and proximity sensor"
 	depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index e2d50fd..91c74c0 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -19,6 +19,9 @@
 obj-$(CONFIG_JSA1212)		+= jsa1212.o
 obj-$(CONFIG_SENSORS_LM3533)	+= lm3533-als.o
 obj-$(CONFIG_LTR501)		+= ltr501.o
+obj-$(CONFIG_OPT3001)		+= opt3001.o
+obj-$(CONFIG_PA12203001)	+= pa12203001.o
+obj-$(CONFIG_RPR0521)		+= rpr0521.o
 obj-$(CONFIG_SENSORS_TSL2563)	+= tsl2563.o
 obj-$(CONFIG_STK3310)          += stk3310.o
 obj-$(CONFIG_TCS3414)		+= tcs3414.o
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index 1dafa07..60537ec 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -65,20 +65,20 @@
  * to acpi_als_channels[], the evt_buffer below will grow
  * automatically.
  */
-#define EVT_NR_SOURCES		ARRAY_SIZE(acpi_als_channels)
-#define EVT_BUFFER_SIZE		\
-	(sizeof(s64) + (EVT_NR_SOURCES * sizeof(s32)))
+#define ACPI_ALS_EVT_NR_SOURCES		ARRAY_SIZE(acpi_als_channels)
+#define ACPI_ALS_EVT_BUFFER_SIZE		\
+	(sizeof(s64) + (ACPI_ALS_EVT_NR_SOURCES * sizeof(s32)))
 
 struct acpi_als {
 	struct acpi_device	*device;
 	struct mutex		lock;
 
-	s32			evt_buffer[EVT_BUFFER_SIZE];
+	s32			evt_buffer[ACPI_ALS_EVT_BUFFER_SIZE];
 };
 
 /*
  * All types of properties the ACPI0008 block can report. The ALI, ALC, ALT
- * and ALP can all be handled by als_read_value() below, while the ALR is
+ * and ALP can all be handled by acpi_als_read_value() below, while the ALR is
  * special.
  *
  * The _ALR property returns tables that can be used to fine-tune the values
@@ -93,7 +93,7 @@
 #define ACPI_ALS_POLLING	"_ALP"
 #define ACPI_ALS_TABLES		"_ALR"
 
-static int als_read_value(struct acpi_als *als, char *prop, s32 *val)
+static int acpi_als_read_value(struct acpi_als *als, char *prop, s32 *val)
 {
 	unsigned long long temp_val;
 	acpi_status status;
@@ -122,11 +122,11 @@
 
 	mutex_lock(&als->lock);
 
-	memset(buffer, 0, EVT_BUFFER_SIZE);
+	memset(buffer, 0, ACPI_ALS_EVT_BUFFER_SIZE);
 
 	switch (event) {
 	case ACPI_ALS_NOTIFY_ILLUMINANCE:
-		ret = als_read_value(als, ACPI_ALS_ILLUMINANCE, &val);
+		ret = acpi_als_read_value(als, ACPI_ALS_ILLUMINANCE, &val);
 		if (ret < 0)
 			goto out;
 		*buffer++ = val;
@@ -159,7 +159,7 @@
 	if (chan->type != IIO_LIGHT)
 		return -EINVAL;
 
-	ret = als_read_value(als, ACPI_ALS_ILLUMINANCE, &temp_val);
+	ret = acpi_als_read_value(als, ACPI_ALS_ILLUMINANCE, &temp_val);
 	if (ret < 0)
 		return ret;
 
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
index 9ddde0c..e1b9fa5 100644
--- a/drivers/iio/light/apds9300.c
+++ b/drivers/iio/light/apds9300.c
@@ -515,7 +515,6 @@
 static struct i2c_driver apds9300_driver = {
 	.driver = {
 		.name	= APDS9300_DRV_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= APDS9300_PM_OPS,
 	},
 	.probe		= apds9300_probe,
diff --git a/drivers/iio/light/bh1750.c b/drivers/iio/light/bh1750.c
index 564c2b3..8b41643 100644
--- a/drivers/iio/light/bh1750.c
+++ b/drivers/iio/light/bh1750.c
@@ -319,7 +319,6 @@
 static struct i2c_driver bh1750_driver = {
 	.driver = {
 		.name = "bh1750",
-		.owner = THIS_MODULE,
 		.pm = BH1750_PM_OPS,
 	},
 	.probe = bh1750_probe,
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index 5d12ae54..d6fd0da 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -353,12 +353,12 @@
 	{ .compatible = "capella,cm32181" },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, cm32181_of_match);
 
 static struct i2c_driver cm32181_driver = {
 	.driver = {
 		.name	= "cm32181",
 		.of_match_table = of_match_ptr(cm32181_of_match),
-		.owner	= THIS_MODULE,
 	},
 	.id_table       = cm32181_id,
 	.probe		= cm32181_probe,
diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
index 39c8d99..fe89b68 100644
--- a/drivers/iio/light/cm3232.c
+++ b/drivers/iio/light/cm3232.c
@@ -417,11 +417,11 @@
 	{.compatible = "capella,cm3232"},
 	{}
 };
+MODULE_DEVICE_TABLE(of, cm3232_of_match);
 
 static struct i2c_driver cm3232_driver = {
 	.driver = {
 		.name	= "cm3232",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(cm3232_of_match),
 #ifdef CONFIG_PM_SLEEP
 		.pm	= &cm3232_pm_ops,
diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c
index a1d4905..d823c11 100644
--- a/drivers/iio/light/cm3323.c
+++ b/drivers/iio/light/cm3323.c
@@ -29,7 +29,7 @@
 
 #define CM3323_CONF_SD_BIT	BIT(0) /* sensor disable */
 #define CM3323_CONF_AF_BIT	BIT(1) /* auto/manual force mode */
-#define CM3323_CONF_IT_MASK	(BIT(4) | BIT(5) | BIT(6))
+#define CM3323_CONF_IT_MASK	GENMASK(6, 4)
 #define CM3323_CONF_IT_SHIFT	4
 
 #define CM3323_INT_TIME_AVAILABLE "0.04 0.08 0.16 0.32 0.64 1.28"
@@ -133,9 +133,11 @@
 				return ret;
 
 			data->reg_conf = reg_conf;
+
 			return 0;
 		}
 	}
+
 	return -EINVAL;
 }
 
@@ -148,6 +150,7 @@
 
 	if (bits >= ARRAY_SIZE(cm3323_int_time))
 		return -EINVAL;
+
 	return bits;
 }
 
@@ -155,7 +158,7 @@
 			   struct iio_chan_spec const *chan, int *val,
 			   int *val2, long mask)
 {
-	int i, ret;
+	int ret;
 	struct cm3323_data *data = iio_priv(indio_dev);
 
 	switch (mask) {
@@ -172,14 +175,14 @@
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_INT_TIME:
 		mutex_lock(&data->mutex);
-		i = cm3323_get_it_bits(data);
-		if (i < 0) {
+		ret = cm3323_get_it_bits(data);
+		if (ret < 0) {
 			mutex_unlock(&data->mutex);
-			return -EINVAL;
+			return ret;
 		}
 
-		*val = cm3323_int_time[i].val;
-		*val2 = cm3323_int_time[i].val2;
+		*val = cm3323_int_time[ret].val;
+		*val2 = cm3323_int_time[ret].val2;
 		mutex_unlock(&data->mutex);
 
 		return IIO_VAL_INT_PLUS_MICRO;
@@ -243,11 +246,13 @@
 		dev_err(&client->dev, "cm3323 chip init failed\n");
 		return ret;
 	}
+
 	ret = iio_device_register(indio_dev);
 	if (ret < 0) {
 		dev_err(&client->dev, "failed to register iio dev\n");
 		goto err_init;
 	}
+
 	return 0;
 err_init:
 	cm3323_disable(indio_dev);
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 39fc67e..c8d7b5e 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -731,12 +731,12 @@
 	{ .compatible = "capella,cm36651" },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, cm36651_of_match);
 
 static struct i2c_driver cm36651_driver = {
 	.driver = {
 		.name	= "cm36651",
 		.of_match_table = cm36651_of_match,
-		.owner	= THIS_MODULE,
 	},
 	.probe		= cm36651_probe,
 	.remove		= cm36651_remove,
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index 32b6449..6d41086 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -1634,13 +1634,13 @@
 	{ .compatible = "sharp,gp2ap020a00f" },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, gp2ap020a00f_of_match);
 #endif
 
 static struct i2c_driver gp2ap020a00f_driver = {
 	.driver = {
 		.name	= GP2A_I2C_NAME,
 		.of_match_table = of_match_ptr(gp2ap020a00f_of_match),
-		.owner	= THIS_MODULE,
 	},
 	.probe		= gp2ap020a00f_probe,
 	.remove		= gp2ap020a00f_remove,
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 0d24847..45ca056 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -284,8 +284,7 @@
 		goto error_free_dev_mem;
 	}
 
-	indio_dev->num_channels =
-				ARRAY_SIZE(prox_channels);
+	indio_dev->num_channels = ARRAY_SIZE(prox_channels);
 	indio_dev->dev.parent = &pdev->dev;
 	indio_dev->info = &prox_info;
 	indio_dev->name = name;
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index c82f4a6..e2945a2 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -197,9 +197,21 @@
 	return IRQ_HANDLED;
 }
 
+static IIO_CONST_ATTR(scale_available, "0.005722 0.152590");
+
+static struct attribute *isl29125_attributes[] = {
+	&iio_const_attr_scale_available.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group isl29125_attribute_group = {
+	.attrs = isl29125_attributes,
+};
+
 static const struct iio_info isl29125_info = {
 	.read_raw = isl29125_read_raw,
 	.write_raw = isl29125_write_raw,
+	.attrs = &isl29125_attribute_group,
 	.driver_module = THIS_MODULE,
 };
 
@@ -334,7 +346,6 @@
 	.driver = {
 		.name	= ISL29125_DRV_NAME,
 		.pm	= &isl29125_pm_ops,
-		.owner	= THIS_MODULE,
 	},
 	.probe		= isl29125_probe,
 	.remove		= isl29125_remove,
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index 3a3af89..c4e8c6b 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -457,7 +457,6 @@
 	.driver = {
 		.name	= JSA1212_DRIVER_NAME,
 		.pm	= JSA1212_PM_OPS,
-		.owner	= THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(jsa1212_acpi_match),
 	},
 	.probe		= jsa1212_probe,
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index b5a0e66..809a961 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1551,7 +1551,6 @@
 		.name   = LTR501_DRV_NAME,
 		.pm	= &ltr501_pm_ops,
 		.acpi_match_table = ACPI_PTR(ltr_acpi_match),
-		.owner  = THIS_MODULE,
 	},
 	.probe  = ltr501_probe,
 	.remove	= ltr501_remove,
diff --git a/drivers/iio/light/opt3001.c b/drivers/iio/light/opt3001.c
new file mode 100644
index 0000000..923aa6a
--- /dev/null
+++ b/drivers/iio/light/opt3001.c
@@ -0,0 +1,804 @@
+/**
+ * opt3001.c - Texas Instruments OPT3001 Light Sensor
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Andreas Dannenberg <dannenberg@ti.com>
+ * Based on previous work from: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 of the License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define OPT3001_RESULT		0x00
+#define OPT3001_CONFIGURATION	0x01
+#define OPT3001_LOW_LIMIT	0x02
+#define OPT3001_HIGH_LIMIT	0x03
+#define OPT3001_MANUFACTURER_ID	0x7e
+#define OPT3001_DEVICE_ID	0x7f
+
+#define OPT3001_CONFIGURATION_RN_MASK	(0xf << 12)
+#define OPT3001_CONFIGURATION_RN_AUTO	(0xc << 12)
+
+#define OPT3001_CONFIGURATION_CT	BIT(11)
+
+#define OPT3001_CONFIGURATION_M_MASK	(3 << 9)
+#define OPT3001_CONFIGURATION_M_SHUTDOWN (0 << 9)
+#define OPT3001_CONFIGURATION_M_SINGLE	(1 << 9)
+#define OPT3001_CONFIGURATION_M_CONTINUOUS (2 << 9) /* also 3 << 9 */
+
+#define OPT3001_CONFIGURATION_OVF	BIT(8)
+#define OPT3001_CONFIGURATION_CRF	BIT(7)
+#define OPT3001_CONFIGURATION_FH	BIT(6)
+#define OPT3001_CONFIGURATION_FL	BIT(5)
+#define OPT3001_CONFIGURATION_L		BIT(4)
+#define OPT3001_CONFIGURATION_POL	BIT(3)
+#define OPT3001_CONFIGURATION_ME	BIT(2)
+
+#define OPT3001_CONFIGURATION_FC_MASK	(3 << 0)
+
+/* The end-of-conversion enable is located in the low-limit register */
+#define OPT3001_LOW_LIMIT_EOC_ENABLE	0xc000
+
+#define OPT3001_REG_EXPONENT(n)		((n) >> 12)
+#define OPT3001_REG_MANTISSA(n)		((n) & 0xfff)
+
+/*
+ * Time to wait for conversion result to be ready. The device datasheet
+ * worst-case max value is 880ms. Add some slack to be on the safe side.
+ */
+#define OPT3001_RESULT_READY_TIMEOUT	msecs_to_jiffies(1000)
+
+struct opt3001 {
+	struct i2c_client	*client;
+	struct device		*dev;
+
+	struct mutex		lock;
+	u16			ok_to_ignore_lock:1;
+	u16			result_ready:1;
+	wait_queue_head_t	result_ready_queue;
+	u16			result;
+
+	u32			int_time;
+	u32			mode;
+
+	u16			high_thresh_mantissa;
+	u16			low_thresh_mantissa;
+
+	u8			high_thresh_exp;
+	u8			low_thresh_exp;
+};
+
+struct opt3001_scale {
+	int	val;
+	int	val2;
+};
+
+static const struct opt3001_scale opt3001_scales[] = {
+	{
+		.val = 40,
+		.val2 = 950000,
+	},
+	{
+		.val = 81,
+		.val2 = 900000,
+	},
+	{
+		.val = 163,
+		.val2 = 800000,
+	},
+	{
+		.val = 327,
+		.val2 = 600000,
+	},
+	{
+		.val = 655,
+		.val2 = 200000,
+	},
+	{
+		.val = 1310,
+		.val2 = 400000,
+	},
+	{
+		.val = 2620,
+		.val2 = 800000,
+	},
+	{
+		.val = 5241,
+		.val2 = 600000,
+	},
+	{
+		.val = 10483,
+		.val2 = 200000,
+	},
+	{
+		.val = 20966,
+		.val2 = 400000,
+	},
+	{
+		.val = 83865,
+		.val2 = 600000,
+	},
+};
+
+static int opt3001_find_scale(const struct opt3001 *opt, int val,
+		int val2, u8 *exponent)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(opt3001_scales); i++) {
+		const struct opt3001_scale *scale = &opt3001_scales[i];
+
+		/*
+		 * Combine the integer and micro parts for comparison
+		 * purposes. Use milli lux precision to avoid 32-bit integer
+		 * overflows.
+		 */
+		if ((val * 1000 + val2 / 1000) <=
+				(scale->val * 1000 + scale->val2 / 1000)) {
+			*exponent = i;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static void opt3001_to_iio_ret(struct opt3001 *opt, u8 exponent,
+		u16 mantissa, int *val, int *val2)
+{
+	int lux;
+
+	lux = 10 * (mantissa << exponent);
+	*val = lux / 1000;
+	*val2 = (lux - (*val * 1000)) * 1000;
+}
+
+static void opt3001_set_mode(struct opt3001 *opt, u16 *reg, u16 mode)
+{
+	*reg &= ~OPT3001_CONFIGURATION_M_MASK;
+	*reg |= mode;
+	opt->mode = mode;
+}
+
+static IIO_CONST_ATTR_INT_TIME_AVAIL("0.1 0.8");
+
+static struct attribute *opt3001_attributes[] = {
+	&iio_const_attr_integration_time_available.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group opt3001_attribute_group = {
+	.attrs = opt3001_attributes,
+};
+
+static const struct iio_event_spec opt3001_event_spec[] = {
+	{
+		.type = IIO_EV_TYPE_THRESH,
+		.dir = IIO_EV_DIR_RISING,
+		.mask_separate = BIT(IIO_EV_INFO_VALUE) |
+			BIT(IIO_EV_INFO_ENABLE),
+	},
+	{
+		.type = IIO_EV_TYPE_THRESH,
+		.dir = IIO_EV_DIR_FALLING,
+		.mask_separate = BIT(IIO_EV_INFO_VALUE) |
+			BIT(IIO_EV_INFO_ENABLE),
+	},
+};
+
+static const struct iio_chan_spec opt3001_channels[] = {
+	{
+		.type = IIO_LIGHT,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+				BIT(IIO_CHAN_INFO_INT_TIME),
+		.event_spec = opt3001_event_spec,
+		.num_event_specs = ARRAY_SIZE(opt3001_event_spec),
+	},
+	IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static int opt3001_get_lux(struct opt3001 *opt, int *val, int *val2)
+{
+	int ret;
+	u16 mantissa;
+	u16 reg;
+	u8 exponent;
+	u16 value;
+
+	/*
+	 * Enable the end-of-conversion interrupt mechanism. Note that doing
+	 * so will overwrite the low-level limit value however we will restore
+	 * this value later on.
+	 */
+	ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_LOW_LIMIT,
+			OPT3001_LOW_LIMIT_EOC_ENABLE);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to write register %02x\n",
+				OPT3001_LOW_LIMIT);
+		return ret;
+	}
+
+	/* Reset data-ready indicator flag (will be set in the IRQ routine) */
+	opt->result_ready = false;
+
+	/* Allow IRQ to access the device despite lock being set */
+	opt->ok_to_ignore_lock = true;
+
+	/* Configure for single-conversion mode and start a new conversion */
+	ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to read register %02x\n",
+				OPT3001_CONFIGURATION);
+		goto err;
+	}
+
+	reg = ret;
+	opt3001_set_mode(opt, &reg, OPT3001_CONFIGURATION_M_SINGLE);
+
+	ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_CONFIGURATION,
+			reg);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to write register %02x\n",
+				OPT3001_CONFIGURATION);
+		goto err;
+	}
+
+	/* Wait for the IRQ to indicate the conversion is complete */
+	ret = wait_event_timeout(opt->result_ready_queue, opt->result_ready,
+			OPT3001_RESULT_READY_TIMEOUT);
+
+err:
+	/* Disallow IRQ to access the device while lock is active */
+	opt->ok_to_ignore_lock = false;
+
+	if (ret == 0)
+		return -ETIMEDOUT;
+	else if (ret < 0)
+		return ret;
+
+	/*
+	 * Disable the end-of-conversion interrupt mechanism by restoring the
+	 * low-level limit value (clearing OPT3001_LOW_LIMIT_EOC_ENABLE). Note
+	 * that selectively clearing those enable bits would affect the actual
+	 * limit value due to bit-overlap and therefore can't be done.
+	 */
+	value = (opt->low_thresh_exp << 12) | opt->low_thresh_mantissa;
+	ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_LOW_LIMIT,
+			value);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to write register %02x\n",
+				OPT3001_LOW_LIMIT);
+		return ret;
+	}
+
+	exponent = OPT3001_REG_EXPONENT(opt->result);
+	mantissa = OPT3001_REG_MANTISSA(opt->result);
+
+	opt3001_to_iio_ret(opt, exponent, mantissa, val, val2);
+
+	return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int opt3001_get_int_time(struct opt3001 *opt, int *val, int *val2)
+{
+	*val = 0;
+	*val2 = opt->int_time;
+
+	return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int opt3001_set_int_time(struct opt3001 *opt, int time)
+{
+	int ret;
+	u16 reg;
+
+	ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to read register %02x\n",
+				OPT3001_CONFIGURATION);
+		return ret;
+	}
+
+	reg = ret;
+
+	switch (time) {
+	case 100000:
+		reg &= ~OPT3001_CONFIGURATION_CT;
+		opt->int_time = 100000;
+		break;
+	case 800000:
+		reg |= OPT3001_CONFIGURATION_CT;
+		opt->int_time = 800000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return i2c_smbus_write_word_swapped(opt->client, OPT3001_CONFIGURATION,
+			reg);
+}
+
+static int opt3001_read_raw(struct iio_dev *iio,
+		struct iio_chan_spec const *chan, int *val, int *val2,
+		long mask)
+{
+	struct opt3001 *opt = iio_priv(iio);
+	int ret;
+
+	if (opt->mode == OPT3001_CONFIGURATION_M_CONTINUOUS)
+		return -EBUSY;
+
+	if (chan->type != IIO_LIGHT)
+		return -EINVAL;
+
+	mutex_lock(&opt->lock);
+
+	switch (mask) {
+	case IIO_CHAN_INFO_PROCESSED:
+		ret = opt3001_get_lux(opt, val, val2);
+		break;
+	case IIO_CHAN_INFO_INT_TIME:
+		ret = opt3001_get_int_time(opt, val, val2);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&opt->lock);
+
+	return ret;
+}
+
+static int opt3001_write_raw(struct iio_dev *iio,
+		struct iio_chan_spec const *chan, int val, int val2,
+		long mask)
+{
+	struct opt3001 *opt = iio_priv(iio);
+	int ret;
+
+	if (opt->mode == OPT3001_CONFIGURATION_M_CONTINUOUS)
+		return -EBUSY;
+
+	if (chan->type != IIO_LIGHT)
+		return -EINVAL;
+
+	if (mask != IIO_CHAN_INFO_INT_TIME)
+		return -EINVAL;
+
+	if (val != 0)
+		return -EINVAL;
+
+	mutex_lock(&opt->lock);
+	ret = opt3001_set_int_time(opt, val2);
+	mutex_unlock(&opt->lock);
+
+	return ret;
+}
+
+static int opt3001_read_event_value(struct iio_dev *iio,
+		const struct iio_chan_spec *chan, enum iio_event_type type,
+		enum iio_event_direction dir, enum iio_event_info info,
+		int *val, int *val2)
+{
+	struct opt3001 *opt = iio_priv(iio);
+	int ret = IIO_VAL_INT_PLUS_MICRO;
+
+	mutex_lock(&opt->lock);
+
+	switch (dir) {
+	case IIO_EV_DIR_RISING:
+		opt3001_to_iio_ret(opt, opt->high_thresh_exp,
+				opt->high_thresh_mantissa, val, val2);
+		break;
+	case IIO_EV_DIR_FALLING:
+		opt3001_to_iio_ret(opt, opt->low_thresh_exp,
+				opt->low_thresh_mantissa, val, val2);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&opt->lock);
+
+	return ret;
+}
+
+static int opt3001_write_event_value(struct iio_dev *iio,
+		const struct iio_chan_spec *chan, enum iio_event_type type,
+		enum iio_event_direction dir, enum iio_event_info info,
+		int val, int val2)
+{
+	struct opt3001 *opt = iio_priv(iio);
+	int ret;
+
+	u16 mantissa;
+	u16 value;
+	u16 reg;
+
+	u8 exponent;
+
+	if (val < 0)
+		return -EINVAL;
+
+	mutex_lock(&opt->lock);
+
+	ret = opt3001_find_scale(opt, val, val2, &exponent);
+	if (ret < 0) {
+		dev_err(opt->dev, "can't find scale for %d.%06u\n", val, val2);
+		goto err;
+	}
+
+	mantissa = (((val * 1000) + (val2 / 1000)) / 10) >> exponent;
+	value = (exponent << 12) | mantissa;
+
+	switch (dir) {
+	case IIO_EV_DIR_RISING:
+		reg = OPT3001_HIGH_LIMIT;
+		opt->high_thresh_mantissa = mantissa;
+		opt->high_thresh_exp = exponent;
+		break;
+	case IIO_EV_DIR_FALLING:
+		reg = OPT3001_LOW_LIMIT;
+		opt->low_thresh_mantissa = mantissa;
+		opt->low_thresh_exp = exponent;
+		break;
+	default:
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ret = i2c_smbus_write_word_swapped(opt->client, reg, value);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to write register %02x\n", reg);
+		goto err;
+	}
+
+err:
+	mutex_unlock(&opt->lock);
+
+	return ret;
+}
+
+static int opt3001_read_event_config(struct iio_dev *iio,
+		const struct iio_chan_spec *chan, enum iio_event_type type,
+		enum iio_event_direction dir)
+{
+	struct opt3001 *opt = iio_priv(iio);
+
+	return opt->mode == OPT3001_CONFIGURATION_M_CONTINUOUS;
+}
+
+static int opt3001_write_event_config(struct iio_dev *iio,
+		const struct iio_chan_spec *chan, enum iio_event_type type,
+		enum iio_event_direction dir, int state)
+{
+	struct opt3001 *opt = iio_priv(iio);
+	int ret;
+	u16 mode;
+	u16 reg;
+
+	if (state && opt->mode == OPT3001_CONFIGURATION_M_CONTINUOUS)
+		return 0;
+
+	if (!state && opt->mode == OPT3001_CONFIGURATION_M_SHUTDOWN)
+		return 0;
+
+	mutex_lock(&opt->lock);
+
+	mode = state ? OPT3001_CONFIGURATION_M_CONTINUOUS
+		: OPT3001_CONFIGURATION_M_SHUTDOWN;
+
+	ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to read register %02x\n",
+				OPT3001_CONFIGURATION);
+		goto err;
+	}
+
+	reg = ret;
+	opt3001_set_mode(opt, &reg, mode);
+
+	ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_CONFIGURATION,
+			reg);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to write register %02x\n",
+				OPT3001_CONFIGURATION);
+		goto err;
+	}
+
+err:
+	mutex_unlock(&opt->lock);
+
+	return ret;
+}
+
+static const struct iio_info opt3001_info = {
+	.driver_module = THIS_MODULE,
+	.attrs = &opt3001_attribute_group,
+	.read_raw = opt3001_read_raw,
+	.write_raw = opt3001_write_raw,
+	.read_event_value = opt3001_read_event_value,
+	.write_event_value = opt3001_write_event_value,
+	.read_event_config = opt3001_read_event_config,
+	.write_event_config = opt3001_write_event_config,
+};
+
+static int opt3001_read_id(struct opt3001 *opt)
+{
+	char manufacturer[2];
+	u16 device_id;
+	int ret;
+
+	ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_MANUFACTURER_ID);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to read register %02x\n",
+				OPT3001_MANUFACTURER_ID);
+		return ret;
+	}
+
+	manufacturer[0] = ret >> 8;
+	manufacturer[1] = ret & 0xff;
+
+	ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_DEVICE_ID);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to read register %02x\n",
+				OPT3001_DEVICE_ID);
+		return ret;
+	}
+
+	device_id = ret;
+
+	dev_info(opt->dev, "Found %c%c OPT%04x\n", manufacturer[0],
+			manufacturer[1], device_id);
+
+	return 0;
+}
+
+static int opt3001_configure(struct opt3001 *opt)
+{
+	int ret;
+	u16 reg;
+
+	ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to read register %02x\n",
+				OPT3001_CONFIGURATION);
+		return ret;
+	}
+
+	reg = ret;
+
+	/* Enable automatic full-scale setting mode */
+	reg &= ~OPT3001_CONFIGURATION_RN_MASK;
+	reg |= OPT3001_CONFIGURATION_RN_AUTO;
+
+	/* Reflect status of the device's integration time setting */
+	if (reg & OPT3001_CONFIGURATION_CT)
+		opt->int_time = 800000;
+	else
+		opt->int_time = 100000;
+
+	/* Ensure device is in shutdown initially */
+	opt3001_set_mode(opt, &reg, OPT3001_CONFIGURATION_M_SHUTDOWN);
+
+	/* Configure for latched window-style comparison operation */
+	reg |= OPT3001_CONFIGURATION_L;
+	reg &= ~OPT3001_CONFIGURATION_POL;
+	reg &= ~OPT3001_CONFIGURATION_ME;
+	reg &= ~OPT3001_CONFIGURATION_FC_MASK;
+
+	ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_CONFIGURATION,
+			reg);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to write register %02x\n",
+				OPT3001_CONFIGURATION);
+		return ret;
+	}
+
+	ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_LOW_LIMIT);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to read register %02x\n",
+				OPT3001_LOW_LIMIT);
+		return ret;
+	}
+
+	opt->low_thresh_mantissa = OPT3001_REG_MANTISSA(ret);
+	opt->low_thresh_exp = OPT3001_REG_EXPONENT(ret);
+
+	ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_HIGH_LIMIT);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to read register %02x\n",
+				OPT3001_HIGH_LIMIT);
+		return ret;
+	}
+
+	opt->high_thresh_mantissa = OPT3001_REG_MANTISSA(ret);
+	opt->high_thresh_exp = OPT3001_REG_EXPONENT(ret);
+
+	return 0;
+}
+
+static irqreturn_t opt3001_irq(int irq, void *_iio)
+{
+	struct iio_dev *iio = _iio;
+	struct opt3001 *opt = iio_priv(iio);
+	int ret;
+
+	if (!opt->ok_to_ignore_lock)
+		mutex_lock(&opt->lock);
+
+	ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to read register %02x\n",
+				OPT3001_CONFIGURATION);
+		goto out;
+	}
+
+	if ((ret & OPT3001_CONFIGURATION_M_MASK) ==
+			OPT3001_CONFIGURATION_M_CONTINUOUS) {
+		if (ret & OPT3001_CONFIGURATION_FH)
+			iio_push_event(iio,
+					IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+							IIO_EV_TYPE_THRESH,
+							IIO_EV_DIR_RISING),
+					iio_get_time_ns());
+		if (ret & OPT3001_CONFIGURATION_FL)
+			iio_push_event(iio,
+					IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+							IIO_EV_TYPE_THRESH,
+							IIO_EV_DIR_FALLING),
+					iio_get_time_ns());
+	} else if (ret & OPT3001_CONFIGURATION_CRF) {
+		ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_RESULT);
+		if (ret < 0) {
+			dev_err(opt->dev, "failed to read register %02x\n",
+					OPT3001_RESULT);
+			goto out;
+		}
+		opt->result = ret;
+		opt->result_ready = true;
+		wake_up(&opt->result_ready_queue);
+	}
+
+out:
+	if (!opt->ok_to_ignore_lock)
+		mutex_unlock(&opt->lock);
+
+	return IRQ_HANDLED;
+}
+
+static int opt3001_probe(struct i2c_client *client,
+		const struct i2c_device_id *id)
+{
+	struct device *dev = &client->dev;
+
+	struct iio_dev *iio;
+	struct opt3001 *opt;
+	int irq = client->irq;
+	int ret;
+
+	iio = devm_iio_device_alloc(dev, sizeof(*opt));
+	if (!iio)
+		return -ENOMEM;
+
+	opt = iio_priv(iio);
+	opt->client = client;
+	opt->dev = dev;
+
+	mutex_init(&opt->lock);
+	init_waitqueue_head(&opt->result_ready_queue);
+	i2c_set_clientdata(client, iio);
+
+	ret = opt3001_read_id(opt);
+	if (ret)
+		return ret;
+
+	ret = opt3001_configure(opt);
+	if (ret)
+		return ret;
+
+	iio->name = client->name;
+	iio->channels = opt3001_channels;
+	iio->num_channels = ARRAY_SIZE(opt3001_channels);
+	iio->dev.parent = dev;
+	iio->modes = INDIO_DIRECT_MODE;
+	iio->info = &opt3001_info;
+
+	ret = devm_iio_device_register(dev, iio);
+	if (ret) {
+		dev_err(dev, "failed to register IIO device\n");
+		return ret;
+	}
+
+	ret = request_threaded_irq(irq, NULL, opt3001_irq,
+			IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+			"opt3001", iio);
+	if (ret) {
+		dev_err(dev, "failed to request IRQ #%d\n", irq);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int opt3001_remove(struct i2c_client *client)
+{
+	struct iio_dev *iio = i2c_get_clientdata(client);
+	struct opt3001 *opt = iio_priv(iio);
+	int ret;
+	u16 reg;
+
+	free_irq(client->irq, iio);
+
+	ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_CONFIGURATION);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to read register %02x\n",
+				OPT3001_CONFIGURATION);
+		return ret;
+	}
+
+	reg = ret;
+	opt3001_set_mode(opt, &reg, OPT3001_CONFIGURATION_M_SHUTDOWN);
+
+	ret = i2c_smbus_write_word_swapped(opt->client, OPT3001_CONFIGURATION,
+			reg);
+	if (ret < 0) {
+		dev_err(opt->dev, "failed to write register %02x\n",
+				OPT3001_CONFIGURATION);
+		return ret;
+	}
+
+	return 0;
+}
+
+static const struct i2c_device_id opt3001_id[] = {
+	{ "opt3001", 0 },
+	{ } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(i2c, opt3001_id);
+
+static const struct of_device_id opt3001_of_match[] = {
+	{ .compatible = "ti,opt3001" },
+	{ }
+};
+
+static struct i2c_driver opt3001_driver = {
+	.probe = opt3001_probe,
+	.remove = opt3001_remove,
+	.id_table = opt3001_id,
+
+	.driver = {
+		.name = "opt3001",
+		.of_match_table = of_match_ptr(opt3001_of_match),
+		.owner = THIS_MODULE,
+	},
+};
+
+module_i2c_driver(opt3001_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments OPT3001 Light Sensor Driver");
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
new file mode 100644
index 0000000..45f7bde
--- /dev/null
+++ b/drivers/iio/light/pa12203001.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Driver for TXC PA12203001 Proximity and Ambient Light Sensor.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ * To do: Interrupt support.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#define PA12203001_DRIVER_NAME	"pa12203001"
+
+#define PA12203001_REG_CFG0		0x00
+#define PA12203001_REG_CFG1		0x01
+#define PA12203001_REG_CFG2		0x02
+#define PA12203001_REG_CFG3		0x03
+
+#define PA12203001_REG_ADL		0x0b
+#define PA12203001_REG_PDH		0x0e
+
+#define PA12203001_REG_POFS		0x10
+#define PA12203001_REG_PSET		0x11
+
+#define PA12203001_ALS_EN_MASK		BIT(0)
+#define PA12203001_PX_EN_MASK		BIT(1)
+#define PA12203001_PX_NORMAL_MODE_MASK		GENMASK(7, 6)
+#define PA12203001_AFSR_MASK		GENMASK(5, 4)
+#define PA12203001_AFSR_SHIFT		4
+
+#define PA12203001_PSCAN			0x03
+
+/* als range 31000, ps, als disabled */
+#define PA12203001_REG_CFG0_DEFAULT		0x30
+
+/* led current: 100 mA */
+#define PA12203001_REG_CFG1_DEFAULT		0x20
+
+/* ps mode: normal, interrupts not active */
+#define PA12203001_REG_CFG2_DEFAULT		0xcc
+
+#define PA12203001_REG_CFG3_DEFAULT		0x00
+
+#define PA12203001_SLEEP_DELAY_MS		3000
+
+#define PA12203001_CHIP_ENABLE		0xff
+#define PA12203001_CHIP_DISABLE		0x00
+
+/* available scales: corresponding to [500, 4000, 7000, 31000]  lux */
+static const int pa12203001_scales[] = { 7629, 61036, 106813, 473029};
+
+struct pa12203001_data {
+	struct i2c_client *client;
+
+	/* protect device states */
+	struct mutex lock;
+
+	bool als_enabled;
+	bool px_enabled;
+	bool als_needs_enable;
+	bool px_needs_enable;
+
+	struct regmap *map;
+};
+
+static const struct {
+	u8 reg;
+	u8 val;
+} regvals[] = {
+	{PA12203001_REG_CFG0, PA12203001_REG_CFG0_DEFAULT},
+	{PA12203001_REG_CFG1, PA12203001_REG_CFG1_DEFAULT},
+	{PA12203001_REG_CFG2, PA12203001_REG_CFG2_DEFAULT},
+	{PA12203001_REG_CFG3, PA12203001_REG_CFG3_DEFAULT},
+	{PA12203001_REG_PSET, PA12203001_PSCAN},
+};
+
+static IIO_CONST_ATTR(in_illuminance_scale_available,
+		      "0.007629 0.061036 0.106813 0.473029");
+
+static struct attribute *pa12203001_attrs[] = {
+	&iio_const_attr_in_illuminance_scale_available.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group pa12203001_attr_group = {
+	.attrs = pa12203001_attrs,
+};
+
+static const struct iio_chan_spec pa12203001_channels[] = {
+	{
+		.type = IIO_LIGHT,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+				      BIT(IIO_CHAN_INFO_SCALE),
+	},
+	{
+		.type = IIO_PROXIMITY,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+	}
+};
+
+static const struct regmap_range pa12203001_volatile_regs_ranges[] = {
+	regmap_reg_range(PA12203001_REG_ADL, PA12203001_REG_ADL + 1),
+	regmap_reg_range(PA12203001_REG_PDH, PA12203001_REG_PDH),
+};
+
+static const struct regmap_access_table pa12203001_volatile_regs = {
+	.yes_ranges = pa12203001_volatile_regs_ranges,
+	.n_yes_ranges = ARRAY_SIZE(pa12203001_volatile_regs_ranges),
+};
+
+static const struct regmap_config pa12203001_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = PA12203001_REG_PSET,
+	.cache_type = REGCACHE_RBTREE,
+	.volatile_table = &pa12203001_volatile_regs,
+};
+
+static inline int pa12203001_als_enable(struct pa12203001_data *data, u8 enable)
+{
+	int ret;
+
+	ret = regmap_update_bits(data->map, PA12203001_REG_CFG0,
+				 PA12203001_ALS_EN_MASK, enable);
+	if (ret < 0)
+		return ret;
+
+	data->als_enabled = !!enable;
+
+	return 0;
+}
+
+static inline int pa12203001_px_enable(struct pa12203001_data *data, u8 enable)
+{
+	int ret;
+
+	ret = regmap_update_bits(data->map, PA12203001_REG_CFG0,
+				 PA12203001_PX_EN_MASK, enable);
+	if (ret < 0)
+		return ret;
+
+	data->px_enabled = !!enable;
+
+	return 0;
+}
+
+static int pa12203001_set_power_state(struct pa12203001_data *data, bool on,
+				      u8 mask)
+{
+#ifdef CONFIG_PM
+	int ret;
+
+	if (on && (mask & PA12203001_ALS_EN_MASK)) {
+		mutex_lock(&data->lock);
+		if (data->px_enabled) {
+			ret = pa12203001_als_enable(data,
+						    PA12203001_ALS_EN_MASK);
+			if (ret < 0)
+				goto err;
+		} else {
+			data->als_needs_enable = true;
+		}
+		mutex_unlock(&data->lock);
+	}
+
+	if (on && (mask & PA12203001_PX_EN_MASK)) {
+		mutex_lock(&data->lock);
+		if (data->als_enabled) {
+			ret = pa12203001_px_enable(data, PA12203001_PX_EN_MASK);
+			if (ret < 0)
+				goto err;
+		} else {
+			data->px_needs_enable = true;
+		}
+		mutex_unlock(&data->lock);
+	}
+
+	if (on) {
+		ret = pm_runtime_get_sync(&data->client->dev);
+		if (ret < 0)
+			pm_runtime_put_noidle(&data->client->dev);
+
+	} else {
+		pm_runtime_mark_last_busy(&data->client->dev);
+		ret = pm_runtime_put_autosuspend(&data->client->dev);
+	}
+
+	return ret;
+
+err:
+	mutex_unlock(&data->lock);
+	return ret;
+
+#endif
+	return 0;
+}
+
+static int pa12203001_read_raw(struct iio_dev *indio_dev,
+			       struct iio_chan_spec const *chan, int *val,
+			       int *val2, long mask)
+{
+	struct pa12203001_data *data = iio_priv(indio_dev);
+	int ret;
+	u8 dev_mask;
+	unsigned int reg_byte;
+	__le16 reg_word;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		switch (chan->type) {
+		case IIO_LIGHT:
+			dev_mask = PA12203001_ALS_EN_MASK;
+			ret = pa12203001_set_power_state(data, true, dev_mask);
+			if (ret < 0)
+				return ret;
+			/*
+			 * ALS ADC value is stored in registers
+			 * PA12203001_REG_ADL and in PA12203001_REG_ADL + 1.
+			 */
+			ret = regmap_bulk_read(data->map, PA12203001_REG_ADL,
+					       &reg_word, 2);
+			if (ret < 0)
+				goto reg_err;
+
+			*val = le16_to_cpu(reg_word);
+			ret = pa12203001_set_power_state(data, false, dev_mask);
+			if (ret < 0)
+				return ret;
+			break;
+		case IIO_PROXIMITY:
+			dev_mask = PA12203001_PX_EN_MASK;
+			ret = pa12203001_set_power_state(data, true, dev_mask);
+			if (ret < 0)
+				return ret;
+			ret = regmap_read(data->map, PA12203001_REG_PDH,
+					  &reg_byte);
+			if (ret < 0)
+				goto reg_err;
+
+			*val = reg_byte;
+			ret = pa12203001_set_power_state(data, false, dev_mask);
+			if (ret < 0)
+				return ret;
+			break;
+		default:
+			return -EINVAL;
+		}
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		ret = regmap_read(data->map, PA12203001_REG_CFG0, &reg_byte);
+		if (ret < 0)
+			return ret;
+		*val = 0;
+		reg_byte = (reg_byte & PA12203001_AFSR_MASK);
+		*val2 = pa12203001_scales[reg_byte >> 4];
+		return IIO_VAL_INT_PLUS_MICRO;
+	default:
+		return -EINVAL;
+	}
+
+reg_err:
+	pa12203001_set_power_state(data, false, dev_mask);
+	return ret;
+}
+
+static int pa12203001_write_raw(struct iio_dev *indio_dev,
+				struct iio_chan_spec const *chan, int val,
+				int val2, long mask)
+{
+	struct pa12203001_data *data = iio_priv(indio_dev);
+	int i, ret, new_val;
+	unsigned int reg_byte;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_SCALE:
+		ret = regmap_read(data->map, PA12203001_REG_CFG0, &reg_byte);
+		if (val != 0 || ret < 0)
+			return -EINVAL;
+		for (i = 0; i < ARRAY_SIZE(pa12203001_scales); i++) {
+			if (val2 == pa12203001_scales[i]) {
+				new_val = i << PA12203001_AFSR_SHIFT;
+				return regmap_update_bits(data->map,
+							  PA12203001_REG_CFG0,
+							  PA12203001_AFSR_MASK,
+							  new_val);
+			}
+		}
+		break;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static const struct iio_info pa12203001_info = {
+	.driver_module	= THIS_MODULE,
+	.read_raw = pa12203001_read_raw,
+	.write_raw = pa12203001_write_raw,
+	.attrs = &pa12203001_attr_group,
+};
+
+static int pa12203001_init(struct iio_dev *indio_dev)
+{
+	struct pa12203001_data *data = iio_priv(indio_dev);
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(regvals); i++) {
+		ret = regmap_write(data->map, regvals[i].reg, regvals[i].val);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int pa12203001_power_chip(struct iio_dev *indio_dev, u8 state)
+{
+	struct pa12203001_data *data = iio_priv(indio_dev);
+	int ret;
+
+	mutex_lock(&data->lock);
+	ret = pa12203001_als_enable(data, state);
+	if (ret < 0)
+		goto out;
+
+	ret = pa12203001_px_enable(data, state);
+
+out:
+	mutex_unlock(&data->lock);
+	return ret;
+}
+
+static int pa12203001_probe(struct i2c_client *client,
+			    const struct i2c_device_id *id)
+{
+	struct pa12203001_data *data;
+	struct iio_dev *indio_dev;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(&client->dev,
+					  sizeof(struct pa12203001_data));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	data = iio_priv(indio_dev);
+	i2c_set_clientdata(client, indio_dev);
+	data->client = client;
+
+	data->map = devm_regmap_init_i2c(client, &pa12203001_regmap_config);
+	if (IS_ERR(data->map))
+		return PTR_ERR(data->map);
+
+	mutex_init(&data->lock);
+
+	indio_dev->dev.parent = &client->dev;
+	indio_dev->info = &pa12203001_info;
+	indio_dev->name = PA12203001_DRIVER_NAME;
+	indio_dev->channels = pa12203001_channels;
+	indio_dev->num_channels = ARRAY_SIZE(pa12203001_channels);
+	indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = pa12203001_init(indio_dev);
+	if (ret < 0)
+		return ret;
+
+	ret = pa12203001_power_chip(indio_dev, PA12203001_CHIP_ENABLE);
+	if (ret < 0)
+		return ret;
+
+	ret = pm_runtime_set_active(&client->dev);
+	if (ret < 0) {
+		pa12203001_power_chip(indio_dev, PA12203001_CHIP_DISABLE);
+		return ret;
+	}
+
+	pm_runtime_enable(&client->dev);
+	pm_runtime_set_autosuspend_delay(&client->dev,
+					 PA12203001_SLEEP_DELAY_MS);
+	pm_runtime_use_autosuspend(&client->dev);
+
+	return iio_device_register(indio_dev);
+}
+
+static int pa12203001_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+	iio_device_unregister(indio_dev);
+
+	pm_runtime_disable(&client->dev);
+	pm_runtime_set_suspended(&client->dev);
+
+	return pa12203001_power_chip(indio_dev, PA12203001_CHIP_DISABLE);
+}
+
+#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM)
+static int pa12203001_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+
+	return pa12203001_power_chip(indio_dev, PA12203001_CHIP_DISABLE);
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int pa12203001_resume(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+
+	return pa12203001_power_chip(indio_dev, PA12203001_CHIP_ENABLE);
+}
+#endif
+
+#ifdef CONFIG_PM
+static int pa12203001_runtime_resume(struct device *dev)
+{
+	struct pa12203001_data *data;
+
+	data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+	mutex_lock(&data->lock);
+	if (data->als_needs_enable) {
+		pa12203001_als_enable(data, PA12203001_ALS_EN_MASK);
+		data->als_needs_enable = false;
+	}
+	if (data->px_needs_enable) {
+		pa12203001_px_enable(data, PA12203001_PX_EN_MASK);
+		data->px_needs_enable = false;
+	}
+	mutex_unlock(&data->lock);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops pa12203001_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(pa12203001_suspend, pa12203001_resume)
+	SET_RUNTIME_PM_OPS(pa12203001_suspend, pa12203001_runtime_resume, NULL)
+};
+
+static const struct acpi_device_id pa12203001_acpi_match[] = {
+	{ "TXCPA122", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(acpi, pa12203001_acpi_match);
+
+static const struct i2c_device_id pa12203001_id[] = {
+		{"txcpa122", 0},
+		{}
+};
+
+MODULE_DEVICE_TABLE(i2c, pa12203001_id);
+
+static struct i2c_driver pa12203001_driver = {
+	.driver = {
+		.name = PA12203001_DRIVER_NAME,
+		.pm = &pa12203001_pm_ops,
+		.acpi_match_table = ACPI_PTR(pa12203001_acpi_match),
+	},
+	.probe = pa12203001_probe,
+	.remove = pa12203001_remove,
+	.id_table = pa12203001_id,
+
+};
+module_i2c_driver(pa12203001_driver);
+
+MODULE_AUTHOR("Adriana Reus <adriana.reus@intel.com>");
+MODULE_DESCRIPTION("Driver for TXC PA12203001 Proximity and Light Sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
new file mode 100644
index 0000000..4b75bb0
--- /dev/null
+++ b/drivers/iio/light/rpr0521.c
@@ -0,0 +1,615 @@
+/*
+ * RPR-0521 ROHM Ambient Light and Proximity Sensor
+ *
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License.  See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for RPR-0521RS (7-bit I2C slave address 0x38).
+ *
+ * TODO: illuminance channel, PM support, buffer
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/pm_runtime.h>
+
+#define RPR0521_REG_SYSTEM_CTRL		0x40
+#define RPR0521_REG_MODE_CTRL		0x41
+#define RPR0521_REG_ALS_CTRL		0x42
+#define RPR0521_REG_PXS_CTRL		0x43
+#define RPR0521_REG_PXS_DATA		0x44 /* 16-bit, little endian */
+#define RPR0521_REG_ALS_DATA0		0x46 /* 16-bit, little endian */
+#define RPR0521_REG_ALS_DATA1		0x48 /* 16-bit, little endian */
+#define RPR0521_REG_ID			0x92
+
+#define RPR0521_MODE_ALS_MASK		BIT(7)
+#define RPR0521_MODE_PXS_MASK		BIT(6)
+#define RPR0521_MODE_MEAS_TIME_MASK	GENMASK(3, 0)
+#define RPR0521_ALS_DATA0_GAIN_MASK	GENMASK(5, 4)
+#define RPR0521_ALS_DATA0_GAIN_SHIFT	4
+#define RPR0521_ALS_DATA1_GAIN_MASK	GENMASK(3, 2)
+#define RPR0521_ALS_DATA1_GAIN_SHIFT	2
+#define RPR0521_PXS_GAIN_MASK		GENMASK(5, 4)
+#define RPR0521_PXS_GAIN_SHIFT		4
+
+#define RPR0521_MODE_ALS_ENABLE		BIT(7)
+#define RPR0521_MODE_ALS_DISABLE	0x00
+#define RPR0521_MODE_PXS_ENABLE		BIT(6)
+#define RPR0521_MODE_PXS_DISABLE	0x00
+
+#define RPR0521_MANUFACT_ID		0xE0
+#define RPR0521_DEFAULT_MEAS_TIME	0x06 /* ALS - 100ms, PXS - 100ms */
+
+#define RPR0521_DRV_NAME		"RPR0521"
+#define RPR0521_REGMAP_NAME		"rpr0521_regmap"
+
+#define RPR0521_SLEEP_DELAY_MS	2000
+
+#define RPR0521_ALS_SCALE_AVAIL "0.007812 0.015625 0.5 1"
+#define RPR0521_PXS_SCALE_AVAIL "0.125 0.5 1"
+
+struct rpr0521_gain {
+	int scale;
+	int uscale;
+};
+
+static const struct rpr0521_gain rpr0521_als_gain[4] = {
+	{1, 0},		/* x1 */
+	{0, 500000},	/* x2 */
+	{0, 15625},	/* x64 */
+	{0, 7812},	/* x128 */
+};
+
+static const struct rpr0521_gain rpr0521_pxs_gain[3] = {
+	{1, 0},		/* x1 */
+	{0, 500000},	/* x2 */
+	{0, 125000},	/* x4 */
+};
+
+enum rpr0521_channel {
+	RPR0521_CHAN_ALS_DATA0,
+	RPR0521_CHAN_ALS_DATA1,
+	RPR0521_CHAN_PXS,
+};
+
+struct rpr0521_reg_desc {
+	u8 address;
+	u8 device_mask;
+};
+
+static const struct rpr0521_reg_desc rpr0521_data_reg[] = {
+	[RPR0521_CHAN_ALS_DATA0] = {
+		.address	= RPR0521_REG_ALS_DATA0,
+		.device_mask	= RPR0521_MODE_ALS_MASK,
+	},
+	[RPR0521_CHAN_ALS_DATA1] = {
+		.address	= RPR0521_REG_ALS_DATA1,
+		.device_mask	= RPR0521_MODE_ALS_MASK,
+	},
+	[RPR0521_CHAN_PXS]	= {
+		.address	= RPR0521_REG_PXS_DATA,
+		.device_mask	= RPR0521_MODE_PXS_MASK,
+	},
+};
+
+static const struct rpr0521_gain_info {
+	u8 reg;
+	u8 mask;
+	u8 shift;
+	const struct rpr0521_gain *gain;
+	int size;
+} rpr0521_gain[] = {
+	[RPR0521_CHAN_ALS_DATA0] = {
+		.reg	= RPR0521_REG_ALS_CTRL,
+		.mask	= RPR0521_ALS_DATA0_GAIN_MASK,
+		.shift	= RPR0521_ALS_DATA0_GAIN_SHIFT,
+		.gain	= rpr0521_als_gain,
+		.size	= ARRAY_SIZE(rpr0521_als_gain),
+	},
+	[RPR0521_CHAN_ALS_DATA1] = {
+		.reg	= RPR0521_REG_ALS_CTRL,
+		.mask	= RPR0521_ALS_DATA1_GAIN_MASK,
+		.shift	= RPR0521_ALS_DATA1_GAIN_SHIFT,
+		.gain	= rpr0521_als_gain,
+		.size	= ARRAY_SIZE(rpr0521_als_gain),
+	},
+	[RPR0521_CHAN_PXS] = {
+		.reg	= RPR0521_REG_PXS_CTRL,
+		.mask	= RPR0521_PXS_GAIN_MASK,
+		.shift	= RPR0521_PXS_GAIN_SHIFT,
+		.gain	= rpr0521_pxs_gain,
+		.size	= ARRAY_SIZE(rpr0521_pxs_gain),
+	},
+};
+
+struct rpr0521_data {
+	struct i2c_client *client;
+
+	/* protect device params updates (e.g state, gain) */
+	struct mutex lock;
+
+	/* device active status */
+	bool als_dev_en;
+	bool pxs_dev_en;
+
+	/* optimize runtime pm ops - enable device only if needed */
+	bool als_ps_need_en;
+	bool pxs_ps_need_en;
+
+	struct regmap *regmap;
+};
+
+static IIO_CONST_ATTR(in_intensity_scale_available, RPR0521_ALS_SCALE_AVAIL);
+static IIO_CONST_ATTR(in_proximity_scale_available, RPR0521_PXS_SCALE_AVAIL);
+
+static struct attribute *rpr0521_attributes[] = {
+	&iio_const_attr_in_intensity_scale_available.dev_attr.attr,
+	&iio_const_attr_in_proximity_scale_available.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group rpr0521_attribute_group = {
+	.attrs = rpr0521_attributes,
+};
+
+static const struct iio_chan_spec rpr0521_channels[] = {
+	{
+		.type = IIO_INTENSITY,
+		.modified = 1,
+		.address = RPR0521_CHAN_ALS_DATA0,
+		.channel2 = IIO_MOD_LIGHT_BOTH,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+			BIT(IIO_CHAN_INFO_SCALE),
+	},
+	{
+		.type = IIO_INTENSITY,
+		.modified = 1,
+		.address = RPR0521_CHAN_ALS_DATA1,
+		.channel2 = IIO_MOD_LIGHT_IR,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+			BIT(IIO_CHAN_INFO_SCALE),
+	},
+	{
+		.type = IIO_PROXIMITY,
+		.address = RPR0521_CHAN_PXS,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+			BIT(IIO_CHAN_INFO_SCALE),
+	}
+};
+
+static int rpr0521_als_enable(struct rpr0521_data *data, u8 status)
+{
+	int ret;
+
+	ret = regmap_update_bits(data->regmap, RPR0521_REG_MODE_CTRL,
+				 RPR0521_MODE_ALS_MASK,
+				 status);
+	if (ret < 0)
+		return ret;
+
+	data->als_dev_en = true;
+
+	return 0;
+}
+
+static int rpr0521_pxs_enable(struct rpr0521_data *data, u8 status)
+{
+	int ret;
+
+	ret = regmap_update_bits(data->regmap, RPR0521_REG_MODE_CTRL,
+				 RPR0521_MODE_PXS_MASK,
+				 status);
+	if (ret < 0)
+		return ret;
+
+	data->pxs_dev_en = true;
+
+	return 0;
+}
+
+/**
+ * rpr0521_set_power_state - handles runtime PM state and sensors enabled status
+ *
+ * @data: rpr0521 device private data
+ * @on: state to be set for devices in @device_mask
+ * @device_mask: bitmask specifying for which device we need to update @on state
+ *
+ * We rely on rpr0521_runtime_resume to enable our @device_mask devices, but
+ * if (for example) PXS was enabled (pxs_dev_en = true) by a previous call to
+ * rpr0521_runtime_resume and we want to enable ALS we MUST set ALS enable
+ * bit of RPR0521_REG_MODE_CTRL here because rpr0521_runtime_resume will not
+ * be called twice.
+ */
+static int rpr0521_set_power_state(struct rpr0521_data *data, bool on,
+				   u8 device_mask)
+{
+#ifdef CONFIG_PM
+	int ret;
+	u8 update_mask = 0;
+
+	if (device_mask & RPR0521_MODE_ALS_MASK) {
+		if (on && !data->als_ps_need_en && data->pxs_dev_en)
+			update_mask |= RPR0521_MODE_ALS_MASK;
+		else
+			data->als_ps_need_en = on;
+	}
+
+	if (device_mask & RPR0521_MODE_PXS_MASK) {
+		if (on && !data->pxs_ps_need_en && data->als_dev_en)
+			update_mask |= RPR0521_MODE_PXS_MASK;
+		else
+			data->pxs_ps_need_en = on;
+	}
+
+	if (update_mask) {
+		ret = regmap_update_bits(data->regmap, RPR0521_REG_MODE_CTRL,
+					 update_mask, update_mask);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (on) {
+		ret = pm_runtime_get_sync(&data->client->dev);
+	} else {
+		pm_runtime_mark_last_busy(&data->client->dev);
+		ret = pm_runtime_put_autosuspend(&data->client->dev);
+	}
+	if (ret < 0) {
+		dev_err(&data->client->dev,
+			"Failed: rpr0521_set_power_state for %d, ret %d\n",
+			on, ret);
+		if (on)
+			pm_runtime_put_noidle(&data->client->dev);
+
+		return ret;
+	}
+#endif
+	return 0;
+}
+
+static int rpr0521_get_gain(struct rpr0521_data *data, int chan,
+			    int *val, int *val2)
+{
+	int ret, reg, idx;
+
+	ret = regmap_read(data->regmap, rpr0521_gain[chan].reg, &reg);
+	if (ret < 0)
+		return ret;
+
+	idx = (rpr0521_gain[chan].mask & reg) >> rpr0521_gain[chan].shift;
+	*val = rpr0521_gain[chan].gain[idx].scale;
+	*val2 = rpr0521_gain[chan].gain[idx].uscale;
+
+	return 0;
+}
+
+static int rpr0521_set_gain(struct rpr0521_data *data, int chan,
+			    int val, int val2)
+{
+	int i, idx = -EINVAL;
+
+	/* get gain index */
+	for (i = 0; i < rpr0521_gain[chan].size; i++)
+		if (val == rpr0521_gain[chan].gain[i].scale &&
+		    val2 == rpr0521_gain[chan].gain[i].uscale) {
+			idx = i;
+			break;
+		}
+
+	if (idx < 0)
+		return idx;
+
+	return regmap_update_bits(data->regmap, rpr0521_gain[chan].reg,
+				  rpr0521_gain[chan].mask,
+				  idx << rpr0521_gain[chan].shift);
+}
+
+static int rpr0521_read_raw(struct iio_dev *indio_dev,
+			    struct iio_chan_spec const *chan, int *val,
+			    int *val2, long mask)
+{
+	struct rpr0521_data *data = iio_priv(indio_dev);
+	int ret;
+	u8 device_mask;
+	__le16 raw_data;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_RAW:
+		if (chan->type != IIO_INTENSITY && chan->type != IIO_PROXIMITY)
+			return -EINVAL;
+
+		device_mask = rpr0521_data_reg[chan->address].device_mask;
+
+		mutex_lock(&data->lock);
+		ret = rpr0521_set_power_state(data, true, device_mask);
+		if (ret < 0) {
+			mutex_unlock(&data->lock);
+			return ret;
+		}
+
+		ret = regmap_bulk_read(data->regmap,
+				       rpr0521_data_reg[chan->address].address,
+				       &raw_data, 2);
+		if (ret < 0) {
+			rpr0521_set_power_state(data, false, device_mask);
+			mutex_unlock(&data->lock);
+			return ret;
+		}
+
+		ret = rpr0521_set_power_state(data, false, device_mask);
+		mutex_unlock(&data->lock);
+		if (ret < 0)
+			return ret;
+
+		*val = le16_to_cpu(raw_data);
+
+		return IIO_VAL_INT;
+	case IIO_CHAN_INFO_SCALE:
+		mutex_lock(&data->lock);
+		ret = rpr0521_get_gain(data, chan->address, val, val2);
+		mutex_unlock(&data->lock);
+		if (ret < 0)
+			return ret;
+
+		return IIO_VAL_INT_PLUS_MICRO;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int rpr0521_write_raw(struct iio_dev *indio_dev,
+			     struct iio_chan_spec const *chan, int val,
+			     int val2, long mask)
+{
+	struct rpr0521_data *data = iio_priv(indio_dev);
+	int ret;
+
+	switch (mask) {
+	case IIO_CHAN_INFO_SCALE:
+		mutex_lock(&data->lock);
+		ret = rpr0521_set_gain(data, chan->address, val, val2);
+		mutex_unlock(&data->lock);
+
+		return ret;
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct iio_info rpr0521_info = {
+	.driver_module	= THIS_MODULE,
+	.read_raw	= rpr0521_read_raw,
+	.write_raw	= rpr0521_write_raw,
+	.attrs		= &rpr0521_attribute_group,
+};
+
+static int rpr0521_init(struct rpr0521_data *data)
+{
+	int ret;
+	int id;
+
+	ret = regmap_read(data->regmap, RPR0521_REG_ID, &id);
+	if (ret < 0) {
+		dev_err(&data->client->dev, "Failed to read REG_ID register\n");
+		return ret;
+	}
+
+	if (id != RPR0521_MANUFACT_ID) {
+		dev_err(&data->client->dev, "Wrong id, got %x, expected %x\n",
+			id, RPR0521_MANUFACT_ID);
+		return -ENODEV;
+	}
+
+	/* set default measurement time - 100 ms for both ALS and PS */
+	ret = regmap_update_bits(data->regmap, RPR0521_REG_MODE_CTRL,
+				 RPR0521_MODE_MEAS_TIME_MASK,
+				 RPR0521_DEFAULT_MEAS_TIME);
+	if (ret) {
+		pr_err("regmap_update_bits returned %d\n", ret);
+		return ret;
+	}
+
+	ret = rpr0521_als_enable(data, RPR0521_MODE_ALS_ENABLE);
+	if (ret < 0)
+		return ret;
+	ret = rpr0521_pxs_enable(data, RPR0521_MODE_PXS_ENABLE);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int rpr0521_poweroff(struct rpr0521_data *data)
+{
+	int ret;
+
+	ret = regmap_update_bits(data->regmap, RPR0521_REG_MODE_CTRL,
+				 RPR0521_MODE_ALS_MASK |
+				 RPR0521_MODE_PXS_MASK,
+				 RPR0521_MODE_ALS_DISABLE |
+				 RPR0521_MODE_PXS_DISABLE);
+	if (ret < 0)
+		return ret;
+
+	data->als_dev_en = false;
+	data->pxs_dev_en = false;
+
+	return 0;
+}
+
+static bool rpr0521_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case RPR0521_REG_MODE_CTRL:
+	case RPR0521_REG_ALS_CTRL:
+	case RPR0521_REG_PXS_CTRL:
+		return false;
+	default:
+		return true;
+	}
+}
+
+static const struct regmap_config rpr0521_regmap_config = {
+	.name		= RPR0521_REGMAP_NAME,
+
+	.reg_bits	= 8,
+	.val_bits	= 8,
+
+	.max_register	= RPR0521_REG_ID,
+	.cache_type	= REGCACHE_RBTREE,
+	.volatile_reg	= rpr0521_is_volatile_reg,
+};
+
+static int rpr0521_probe(struct i2c_client *client,
+			 const struct i2c_device_id *id)
+{
+	struct rpr0521_data *data;
+	struct iio_dev *indio_dev;
+	struct regmap *regmap;
+	int ret;
+
+	indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+	if (!indio_dev)
+		return -ENOMEM;
+
+	regmap = devm_regmap_init_i2c(client, &rpr0521_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(&client->dev, "regmap_init failed!\n");
+		return PTR_ERR(regmap);
+	}
+
+	data = iio_priv(indio_dev);
+	i2c_set_clientdata(client, indio_dev);
+	data->client = client;
+	data->regmap = regmap;
+
+	mutex_init(&data->lock);
+
+	indio_dev->dev.parent = &client->dev;
+	indio_dev->info = &rpr0521_info;
+	indio_dev->name = RPR0521_DRV_NAME;
+	indio_dev->channels = rpr0521_channels;
+	indio_dev->num_channels = ARRAY_SIZE(rpr0521_channels);
+	indio_dev->modes = INDIO_DIRECT_MODE;
+
+	ret = rpr0521_init(data);
+	if (ret < 0) {
+		dev_err(&client->dev, "rpr0521 chip init failed\n");
+		return ret;
+	}
+	ret = iio_device_register(indio_dev);
+	if (ret < 0)
+		return ret;
+
+	ret = pm_runtime_set_active(&client->dev);
+	if (ret < 0)
+		goto err_iio_unregister;
+
+	pm_runtime_enable(&client->dev);
+	pm_runtime_set_autosuspend_delay(&client->dev, RPR0521_SLEEP_DELAY_MS);
+	pm_runtime_use_autosuspend(&client->dev);
+
+	return 0;
+
+err_iio_unregister:
+	iio_device_unregister(indio_dev);
+	return ret;
+}
+
+static int rpr0521_remove(struct i2c_client *client)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+	pm_runtime_disable(&client->dev);
+	pm_runtime_set_suspended(&client->dev);
+	pm_runtime_put_noidle(&client->dev);
+
+	iio_device_unregister(indio_dev);
+	rpr0521_poweroff(iio_priv(indio_dev));
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int rpr0521_runtime_suspend(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+	struct rpr0521_data *data = iio_priv(indio_dev);
+	int ret;
+
+	/* disable channels and sets {als,pxs}_dev_en to false */
+	mutex_lock(&data->lock);
+	ret = rpr0521_poweroff(data);
+	mutex_unlock(&data->lock);
+
+	return ret;
+}
+
+static int rpr0521_runtime_resume(struct device *dev)
+{
+	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+	struct rpr0521_data *data = iio_priv(indio_dev);
+	int ret;
+
+	if (data->als_ps_need_en) {
+		ret = rpr0521_als_enable(data, RPR0521_MODE_ALS_ENABLE);
+		if (ret < 0)
+			return ret;
+		data->als_ps_need_en = false;
+	}
+
+	if (data->pxs_ps_need_en) {
+		ret = rpr0521_pxs_enable(data, RPR0521_MODE_PXS_ENABLE);
+		if (ret < 0)
+			return ret;
+		data->pxs_ps_need_en = false;
+	}
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops rpr0521_pm_ops = {
+	SET_RUNTIME_PM_OPS(rpr0521_runtime_suspend,
+			   rpr0521_runtime_resume, NULL)
+};
+
+static const struct acpi_device_id rpr0521_acpi_match[] = {
+	{"RPR0521", 0},
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, rpr0521_acpi_match);
+
+static const struct i2c_device_id rpr0521_id[] = {
+	{"rpr0521", 0},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(i2c, rpr0521_id);
+
+static struct i2c_driver rpr0521_driver = {
+	.driver = {
+		.name	= RPR0521_DRV_NAME,
+		.pm	= &rpr0521_pm_ops,
+		.acpi_match_table = ACPI_PTR(rpr0521_acpi_match),
+	},
+	.probe		= rpr0521_probe,
+	.remove		= rpr0521_remove,
+	.id_table	= rpr0521_id,
+};
+
+module_i2c_driver(rpr0521_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
+MODULE_DESCRIPTION("RPR0521 ROHM Ambient Light and Proximity Sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 11a027a..993eb20 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -469,16 +469,12 @@
 	dev = &client->dev;
 
 	/* gpio interrupt pin */
-	gpio = devm_gpiod_get_index(dev, STK3310_GPIO, 0);
+	gpio = devm_gpiod_get_index(dev, STK3310_GPIO, 0, GPIOD_IN);
 	if (IS_ERR(gpio)) {
 		dev_err(dev, "acpi gpio get index failed\n");
 		return PTR_ERR(gpio);
 	}
 
-	ret = gpiod_direction_input(gpio);
-	if (ret)
-		return ret;
-
 	ret = gpiod_to_irq(gpio);
 	dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
 
@@ -676,6 +672,7 @@
 	{"STK3311", 0},
 	{}
 };
+MODULE_DEVICE_TABLE(i2c, stk3310_i2c_id);
 
 static const struct acpi_device_id stk3310_acpi_id[] = {
 	{"STK3310", 0},
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index f8b1df0..f90f8c5 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -392,7 +392,6 @@
 	.driver = {
 		.name	= TCS3414_DRV_NAME,
 		.pm	= &tcs3414_pm_ops,
-		.owner	= THIS_MODULE,
 	},
 	.probe		= tcs3414_probe,
 	.remove		= tcs3414_remove,
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 7525699..1b530bf 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -366,7 +366,6 @@
 	.driver = {
 		.name	= TCS3472_DRV_NAME,
 		.pm	= &tcs3472_pm_ops,
-		.owner	= THIS_MODULE,
 	},
 	.probe		= tcs3472_probe,
 	.remove		= tcs3472_remove,
diff --git a/drivers/iio/light/tsl4531.c b/drivers/iio/light/tsl4531.c
index 63c26e2..2697918 100644
--- a/drivers/iio/light/tsl4531.c
+++ b/drivers/iio/light/tsl4531.c
@@ -247,7 +247,6 @@
 	.driver = {
 		.name   = TSL4531_DRV_NAME,
 		.pm	= TSL4531_PM_OPS,
-		.owner  = THIS_MODULE,
 	},
 	.probe  = tsl4531_probe,
 	.remove = tsl4531_remove,
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index d948c47..c9d85bb 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -185,7 +185,6 @@
 static struct i2c_driver vcnl4000_driver = {
 	.driver = {
 		.name   = VCNL4000_DRV_NAME,
-		.owner  = THIS_MODULE,
 	},
 	.probe  = vcnl4000_probe,
 	.id_table = vcnl4000_id,
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index 1347a1f..d8e614c 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -85,6 +85,7 @@
 #define BMC150_MAGN_REG_HIGH_THRESH		0x50
 #define BMC150_MAGN_REG_REP_XY			0x51
 #define BMC150_MAGN_REG_REP_Z			0x52
+#define BMC150_MAGN_REG_REP_DATAMASK		GENMASK(7, 0)
 
 #define BMC150_MAGN_REG_TRIM_START		0x5D
 #define BMC150_MAGN_REG_TRIM_END		0x71
@@ -559,7 +560,7 @@
 			}
 			ret = regmap_update_bits(data->regmap,
 						 BMC150_MAGN_REG_REP_XY,
-						 0xFF,
+						 BMC150_MAGN_REG_REP_DATAMASK,
 						 BMC150_MAGN_REPXY_TO_REGVAL
 						 (val));
 			mutex_unlock(&data->mutex);
@@ -575,7 +576,7 @@
 			}
 			ret = regmap_update_bits(data->regmap,
 						 BMC150_MAGN_REG_REP_Z,
-						 0xFF,
+						 BMC150_MAGN_REG_REP_DATAMASK,
 						 BMC150_MAGN_REPZ_TO_REGVAL
 						 (val));
 			mutex_unlock(&data->mutex);
@@ -588,17 +589,6 @@
 	}
 }
 
-static int bmc150_magn_validate_trigger(struct iio_dev *indio_dev,
-					struct iio_trigger *trig)
-{
-	struct bmc150_magn_data *data = iio_priv(indio_dev);
-
-	if (data->dready_trig != trig)
-		return -EINVAL;
-
-	return 0;
-}
-
 static ssize_t bmc150_magn_show_samp_freq_avail(struct device *dev,
 						struct device_attribute *attr,
 						char *buf)
@@ -659,11 +649,12 @@
 	.attrs = &bmc150_magn_attrs_group,
 	.read_raw = bmc150_magn_read_raw,
 	.write_raw = bmc150_magn_write_raw,
-	.validate_trigger = bmc150_magn_validate_trigger,
 	.driver_module = THIS_MODULE,
 };
 
-static const unsigned long bmc150_magn_scan_masks[] = {0x07, 0};
+static const unsigned long bmc150_magn_scan_masks[] = {
+					BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
+					0};
 
 static irqreturn_t bmc150_magn_trigger_handler(int irq, void *p)
 {
@@ -674,7 +665,6 @@
 
 	mutex_lock(&data->mutex);
 	ret = bmc150_magn_read_xyz(data, data->buffer);
-	mutex_unlock(&data->mutex);
 	if (ret < 0)
 		goto err;
 
@@ -682,7 +672,8 @@
 					   pf->timestamp);
 
 err:
-	iio_trigger_notify_done(data->dready_trig);
+	mutex_unlock(&data->mutex);
+	iio_trigger_notify_done(indio_dev->trig);
 
 	return IRQ_HANDLED;
 }
@@ -793,29 +784,23 @@
 	if (state == data->dready_trigger_on)
 		goto err_unlock;
 
-	ret = bmc150_magn_set_power_state(data, state);
-	if (ret < 0)
-		goto err_unlock;
-
 	ret = regmap_update_bits(data->regmap, BMC150_MAGN_REG_INT_DRDY,
 				 BMC150_MAGN_MASK_DRDY_EN,
 				 state << BMC150_MAGN_SHIFT_DRDY_EN);
 	if (ret < 0)
-		goto err_poweroff;
+		goto err_unlock;
 
 	data->dready_trigger_on = state;
 
 	if (state) {
 		ret = bmc150_magn_reset_intr(data);
 		if (ret < 0)
-			goto err_poweroff;
+			goto err_unlock;
 	}
 	mutex_unlock(&data->mutex);
 
 	return 0;
 
-err_poweroff:
-	bmc150_magn_set_power_state(data, false);
 err_unlock:
 	mutex_unlock(&data->mutex);
 	return ret;
@@ -827,6 +812,27 @@
 	.owner = THIS_MODULE,
 };
 
+static int bmc150_magn_buffer_preenable(struct iio_dev *indio_dev)
+{
+	struct bmc150_magn_data *data = iio_priv(indio_dev);
+
+	return bmc150_magn_set_power_state(data, true);
+}
+
+static int bmc150_magn_buffer_postdisable(struct iio_dev *indio_dev)
+{
+	struct bmc150_magn_data *data = iio_priv(indio_dev);
+
+	return bmc150_magn_set_power_state(data, false);
+}
+
+static const struct iio_buffer_setup_ops bmc150_magn_buffer_setup_ops = {
+	.preenable = bmc150_magn_buffer_preenable,
+	.postenable = iio_triggered_buffer_postenable,
+	.predisable = iio_triggered_buffer_predisable,
+	.postdisable = bmc150_magn_buffer_postdisable,
+};
+
 static int bmc150_magn_gpio_probe(struct i2c_client *client)
 {
 	struct device *dev;
@@ -839,16 +845,12 @@
 	dev = &client->dev;
 
 	/* data ready GPIO interrupt pin */
-	gpio = devm_gpiod_get_index(dev, BMC150_MAGN_GPIO_INT, 0);
+	gpio = devm_gpiod_get_index(dev, BMC150_MAGN_GPIO_INT, 0, GPIOD_IN);
 	if (IS_ERR(gpio)) {
 		dev_err(dev, "ACPI GPIO get index failed\n");
 		return PTR_ERR(gpio);
 	}
 
-	ret = gpiod_direction_input(gpio);
-	if (ret)
-		return ret;
-
 	ret = gpiod_to_irq(gpio);
 
 	dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
@@ -932,16 +934,6 @@
 			goto err_poweroff;
 		}
 
-		ret = iio_triggered_buffer_setup(indio_dev,
-						 &iio_pollfunc_store_time,
-						 bmc150_magn_trigger_handler,
-						 NULL);
-		if (ret < 0) {
-			dev_err(&client->dev,
-				"iio triggered buffer setup failed\n");
-			goto err_trigger_unregister;
-		}
-
 		ret = request_threaded_irq(client->irq,
 					   iio_trigger_generic_data_rdy_poll,
 					   NULL,
@@ -951,14 +943,24 @@
 		if (ret < 0) {
 			dev_err(&client->dev, "request irq %d failed\n",
 				client->irq);
-			goto err_buffer_cleanup;
+			goto err_trigger_unregister;
 		}
 	}
 
+	ret = iio_triggered_buffer_setup(indio_dev,
+					 iio_pollfunc_store_time,
+					 bmc150_magn_trigger_handler,
+					 &bmc150_magn_buffer_setup_ops);
+	if (ret < 0) {
+		dev_err(&client->dev,
+			"iio triggered buffer setup failed\n");
+		goto err_free_irq;
+	}
+
 	ret = iio_device_register(indio_dev);
 	if (ret < 0) {
 		dev_err(&client->dev, "unable to register iio device\n");
-		goto err_free_irq;
+		goto err_buffer_cleanup;
 	}
 
 	ret = pm_runtime_set_active(&client->dev);
@@ -976,12 +978,11 @@
 
 err_iio_unregister:
 	iio_device_unregister(indio_dev);
+err_buffer_cleanup:
+	iio_triggered_buffer_cleanup(indio_dev);
 err_free_irq:
 	if (client->irq > 0)
 		free_irq(client->irq, data->dready_trig);
-err_buffer_cleanup:
-	if (data->dready_trig)
-		iio_triggered_buffer_cleanup(indio_dev);
 err_trigger_unregister:
 	if (data->dready_trig)
 		iio_trigger_unregister(data->dready_trig);
@@ -1000,14 +1001,13 @@
 	pm_runtime_put_noidle(&client->dev);
 
 	iio_device_unregister(indio_dev);
+	iio_triggered_buffer_cleanup(indio_dev);
 
 	if (client->irq > 0)
 		free_irq(data->client->irq, data->dready_trig);
 
-	if (data->dready_trig) {
-		iio_triggered_buffer_cleanup(indio_dev);
+	if (data->dready_trig)
 		iio_trigger_unregister(data->dready_trig);
-	}
 
 	mutex_lock(&data->mutex);
 	bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
@@ -1034,6 +1034,9 @@
 	return 0;
 }
 
+/*
+ * Should be called with data->mutex held.
+ */
 static int bmc150_magn_runtime_resume(struct device *dev)
 {
 	struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
@@ -1082,12 +1085,14 @@
 
 static const struct acpi_device_id bmc150_magn_acpi_match[] = {
 	{"BMC150B", 0},
+	{"BMC156B", 0},
 	{},
 };
 MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
 
 static const struct i2c_device_id bmc150_magn_id[] = {
 	{"bmc150_magn", 0},
+	{"bmc156_magn", 0},
 	{},
 };
 MODULE_DEVICE_TABLE(i2c, bmc150_magn_id);
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index 706ebfd..176e14a 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -316,31 +316,31 @@
 static int mmc35240_raw_to_mgauss(struct mmc35240_data *data, int index,
 				  __le16 buf[], int *val)
 {
-	int raw_x, raw_y, raw_z;
-	int sens_x, sens_y, sens_z;
+	int raw[3];
+	int sens[3];
 	int nfo;
 
-	raw_x = le16_to_cpu(buf[AXIS_X]);
-	raw_y = le16_to_cpu(buf[AXIS_Y]);
-	raw_z = le16_to_cpu(buf[AXIS_Z]);
+	raw[AXIS_X] = le16_to_cpu(buf[AXIS_X]);
+	raw[AXIS_Y] = le16_to_cpu(buf[AXIS_Y]);
+	raw[AXIS_Z] = le16_to_cpu(buf[AXIS_Z]);
 
-	sens_x = mmc35240_props_table[data->res].sens[AXIS_X];
-	sens_y = mmc35240_props_table[data->res].sens[AXIS_Y];
-	sens_z = mmc35240_props_table[data->res].sens[AXIS_Z];
+	sens[AXIS_X] = mmc35240_props_table[data->res].sens[AXIS_X];
+	sens[AXIS_Y] = mmc35240_props_table[data->res].sens[AXIS_Y];
+	sens[AXIS_Z] = mmc35240_props_table[data->res].sens[AXIS_Z];
 
 	nfo = mmc35240_props_table[data->res].nfo;
 
 	switch (index) {
 	case AXIS_X:
-		*val = (raw_x - nfo) * 1000 / sens_x;
+		*val = (raw[AXIS_X] - nfo) * 1000 / sens[AXIS_X];
 		break;
 	case AXIS_Y:
-		*val = (raw_y - nfo) * 1000 / sens_y -
-			(raw_z - nfo)  * 1000 / sens_z;
+		*val = (raw[AXIS_Y] - nfo) * 1000 / sens[AXIS_Y] -
+			(raw[AXIS_Z] - nfo)  * 1000 / sens[AXIS_Z];
 		break;
 	case AXIS_Z:
-		*val = (raw_y - nfo) * 1000 / sens_y +
-			(raw_z - nfo) * 1000 / sens_z;
+		*val = (raw[AXIS_Y] - nfo) * 1000 / sens[AXIS_Y] +
+			(raw[AXIS_Z] - nfo) * 1000 / sens[AXIS_Z];
 		break;
 	default:
 		return -EINVAL;
@@ -559,6 +559,12 @@
 	SET_SYSTEM_SLEEP_PM_OPS(mmc35240_suspend, mmc35240_resume)
 };
 
+static const struct of_device_id mmc35240_of_match[] = {
+	{ .compatible = "memsic,mmc35240", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mmc35240_of_match);
+
 static const struct acpi_device_id mmc35240_acpi_match[] = {
 	{"MMC35240", 0},
 	{ },
@@ -574,6 +580,7 @@
 static struct i2c_driver mmc35240_driver = {
 	.driver = {
 		.name = MMC35240_DRV_NAME,
+		.of_match_table = mmc35240_of_match,
 		.pm = &mmc35240_pm_ops,
 		.acpi_match_table = ACPI_PTR(mmc35240_acpi_match),
 	},
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 287691c..06a4d9c 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -18,6 +18,7 @@
 #define LSM303DLHC_MAGN_DEV_NAME	"lsm303dlhc_magn"
 #define LSM303DLM_MAGN_DEV_NAME		"lsm303dlm_magn"
 #define LIS3MDL_MAGN_DEV_NAME		"lis3mdl"
+#define LSM303AGR_MAGN_DEV_NAME		"lsm303agr_magn"
 
 int st_magn_common_probe(struct iio_dev *indio_dev);
 void st_magn_common_remove(struct iio_dev *indio_dev);
@@ -25,6 +26,8 @@
 #ifdef CONFIG_IIO_BUFFER
 int st_magn_allocate_ring(struct iio_dev *indio_dev);
 void st_magn_deallocate_ring(struct iio_dev *indio_dev);
+int st_magn_trig_set_state(struct iio_trigger *trig, bool state);
+#define ST_MAGN_TRIGGER_SET_STATE (&st_magn_trig_set_state)
 #else /* CONFIG_IIO_BUFFER */
 static inline int st_magn_probe_trigger(struct iio_dev *indio_dev, int irq)
 {
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
index bf427dc..ecd3bd0 100644
--- a/drivers/iio/magnetometer/st_magn_buffer.c
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -23,6 +23,13 @@
 #include <linux/iio/common/st_sensors.h>
 #include "st_magn.h"
 
+int st_magn_trig_set_state(struct iio_trigger *trig, bool state)
+{
+	struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+
+	return st_sensors_set_dataready_irq(indio_dev, state);
+}
+
 static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
 {
 	return st_sensors_set_enable(indio_dev, true);
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index b4bcfb7..f8dc4b8 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -43,6 +43,7 @@
 #define ST_MAGN_FS_AVL_8000MG			8000
 #define ST_MAGN_FS_AVL_8100MG			8100
 #define ST_MAGN_FS_AVL_12000MG			12000
+#define ST_MAGN_FS_AVL_15000MG			15000
 #define ST_MAGN_FS_AVL_16000MG			16000
 
 /* CUSTOM VALUES FOR SENSOR 0 */
@@ -157,6 +158,29 @@
 #define ST_MAGN_2_OUT_Y_L_ADDR			0x2a
 #define ST_MAGN_2_OUT_Z_L_ADDR			0x2c
 
+/* CUSTOM VALUES FOR SENSOR 3 */
+#define ST_MAGN_3_WAI_ADDR			0x4f
+#define ST_MAGN_3_WAI_EXP			0x40
+#define ST_MAGN_3_ODR_ADDR			0x60
+#define ST_MAGN_3_ODR_MASK			0x0c
+#define ST_MAGN_3_ODR_AVL_10HZ_VAL		0x00
+#define ST_MAGN_3_ODR_AVL_20HZ_VAL		0x01
+#define ST_MAGN_3_ODR_AVL_50HZ_VAL		0x02
+#define ST_MAGN_3_ODR_AVL_100HZ_VAL		0x03
+#define ST_MAGN_3_PW_ADDR			0x60
+#define ST_MAGN_3_PW_MASK			0x03
+#define ST_MAGN_3_PW_ON				0x00
+#define ST_MAGN_3_PW_OFF			0x03
+#define ST_MAGN_3_BDU_ADDR			0x62
+#define ST_MAGN_3_BDU_MASK			0x10
+#define ST_MAGN_3_DRDY_IRQ_ADDR			0x62
+#define ST_MAGN_3_DRDY_INT_MASK			0x01
+#define ST_MAGN_3_FS_AVL_15000_GAIN		1500
+#define ST_MAGN_3_MULTIREAD_BIT			false
+#define ST_MAGN_3_OUT_X_L_ADDR			0x68
+#define ST_MAGN_3_OUT_Y_L_ADDR			0x6a
+#define ST_MAGN_3_OUT_Z_L_ADDR			0x6c
+
 static const struct iio_chan_spec st_magn_16bit_channels[] = {
 	ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
 			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -189,9 +213,26 @@
 	IIO_CHAN_SOFT_TIMESTAMP(3)
 };
 
+static const struct iio_chan_spec st_magn_3_16bit_channels[] = {
+	ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+			ST_SENSORS_SCAN_X, 1, IIO_MOD_X, 's', IIO_LE, 16, 16,
+			ST_MAGN_3_OUT_X_L_ADDR),
+	ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+			ST_SENSORS_SCAN_Y, 1, IIO_MOD_Y, 's', IIO_LE, 16, 16,
+			ST_MAGN_3_OUT_Y_L_ADDR),
+	ST_SENSORS_LSM_CHANNELS(IIO_MAGN,
+			BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+			ST_SENSORS_SCAN_Z, 1, IIO_MOD_Z, 's', IIO_LE, 16, 16,
+			ST_MAGN_3_OUT_Z_L_ADDR),
+	IIO_CHAN_SOFT_TIMESTAMP(3)
+};
+
 static const struct st_sensor_settings st_magn_sensors_settings[] = {
 	{
 		.wai = 0, /* This sensor has no valid WhoAmI report 0 */
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LSM303DLH_MAGN_DEV_NAME,
 		},
@@ -268,6 +309,7 @@
 	},
 	{
 		.wai = ST_MAGN_1_WAI_EXP,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LSM303DLHC_MAGN_DEV_NAME,
 			[1] = LSM303DLM_MAGN_DEV_NAME,
@@ -346,6 +388,7 @@
 	},
 	{
 		.wai = ST_MAGN_2_WAI_EXP,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LIS3MDL_MAGN_DEV_NAME,
 		},
@@ -399,6 +442,48 @@
 		.multi_read_bit = ST_MAGN_2_MULTIREAD_BIT,
 		.bootime = 2,
 	},
+	{
+		.wai = ST_MAGN_3_WAI_EXP,
+		.wai_addr = ST_MAGN_3_WAI_ADDR,
+		.sensors_supported = {
+			[0] = LSM303AGR_MAGN_DEV_NAME,
+		},
+		.ch = (struct iio_chan_spec *)st_magn_3_16bit_channels,
+		.odr = {
+			.addr = ST_MAGN_3_ODR_ADDR,
+			.mask = ST_MAGN_3_ODR_MASK,
+			.odr_avl = {
+				{ 10, ST_MAGN_3_ODR_AVL_10HZ_VAL, },
+				{ 20, ST_MAGN_3_ODR_AVL_20HZ_VAL, },
+				{ 50, ST_MAGN_3_ODR_AVL_50HZ_VAL, },
+				{ 100, ST_MAGN_3_ODR_AVL_100HZ_VAL, },
+			},
+		},
+		.pw = {
+			.addr = ST_MAGN_3_PW_ADDR,
+			.mask = ST_MAGN_3_PW_MASK,
+			.value_on = ST_MAGN_3_PW_ON,
+			.value_off = ST_MAGN_3_PW_OFF,
+		},
+		.fs = {
+			.fs_avl = {
+				[0] = {
+					.num = ST_MAGN_FS_AVL_15000MG,
+					.gain = ST_MAGN_3_FS_AVL_15000_GAIN,
+				},
+			},
+		},
+		.bdu = {
+			.addr = ST_MAGN_3_BDU_ADDR,
+			.mask = ST_MAGN_3_BDU_MASK,
+		},
+		.drdy_irq = {
+			.addr = ST_MAGN_3_DRDY_IRQ_ADDR,
+			.mask_int1 = ST_MAGN_3_DRDY_INT_MASK,
+		},
+		.multi_read_bit = ST_MAGN_3_MULTIREAD_BIT,
+		.bootime = 2,
+	},
 };
 
 static int st_magn_read_raw(struct iio_dev *indio_dev,
@@ -477,6 +562,16 @@
 	.write_raw = &st_magn_write_raw,
 };
 
+#ifdef CONFIG_IIO_TRIGGER
+static const struct iio_trigger_ops st_magn_trigger_ops = {
+	.owner = THIS_MODULE,
+	.set_trigger_state = ST_MAGN_TRIGGER_SET_STATE,
+};
+#define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops)
+#else
+#define ST_MAGN_TRIGGER_OPS NULL
+#endif
+
 int st_magn_common_probe(struct iio_dev *indio_dev)
 {
 	struct st_sensor_data *mdata = iio_priv(indio_dev);
@@ -513,7 +608,8 @@
 		return err;
 
 	if (irq > 0) {
-		err = st_sensors_allocate_trigger(indio_dev, NULL);
+		err = st_sensors_allocate_trigger(indio_dev,
+						ST_MAGN_TRIGGER_OPS);
 		if (err < 0)
 			goto st_magn_probe_trigger_error;
 	}
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index 5311d8a..8aa37af 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -36,6 +36,10 @@
 		.compatible = "st,lis3mdl-magn",
 		.data = LIS3MDL_MAGN_DEV_NAME,
 	},
+	{
+		.compatible = "st,lsm303agr-magn",
+		.data = LSM303AGR_MAGN_DEV_NAME,
+	},
 	{},
 };
 MODULE_DEVICE_TABLE(of, st_magn_of_match);
@@ -79,13 +83,13 @@
 	{ LSM303DLHC_MAGN_DEV_NAME },
 	{ LSM303DLM_MAGN_DEV_NAME },
 	{ LIS3MDL_MAGN_DEV_NAME },
+	{ LSM303AGR_MAGN_DEV_NAME },
 	{},
 };
 MODULE_DEVICE_TABLE(i2c, st_magn_id_table);
 
 static struct i2c_driver st_magn_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "st-magn-i2c",
 		.of_match_table = of_match_ptr(st_magn_of_match),
 	},
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index 7adacf1..0abca2c 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -51,6 +51,7 @@
 	{ LSM303DLHC_MAGN_DEV_NAME },
 	{ LSM303DLM_MAGN_DEV_NAME },
 	{ LIS3MDL_MAGN_DEV_NAME },
+	{ LSM303AGR_MAGN_DEV_NAME },
 	{},
 };
 MODULE_DEVICE_TABLE(spi, st_magn_id_table);
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index fa62950..4745179 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -53,10 +53,10 @@
           will be called mpl3115.
 
 config MS5611
-	tristate "Measurement Specialities MS5611 pressure sensor driver"
+	tristate "Measurement Specialties MS5611 pressure sensor driver"
 	help
-	  Say Y here to build support for the Measurement Specialities
-	  MS5611 pressure and temperature sensor.
+	  Say Y here to build support for the Measurement Specialties
+	  MS5611, MS5607 pressure and temperature sensors.
 
 	  To compile this driver as a module, choose M here: the module will
 	  be called ms5611_core.
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index 099c6cd..23b93c7 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -27,6 +27,18 @@
 
 #define MS5611_PROM_WORDS_NB		8
 
+enum {
+	MS5611,
+	MS5607,
+};
+
+struct ms5611_chip_info {
+	u16 prom[MS5611_PROM_WORDS_NB];
+
+	int (*temp_and_pressure_compensate)(struct ms5611_chip_info *chip_info,
+					    s32 *temp, s32 *pressure);
+};
+
 struct ms5611_state {
 	void *client;
 	struct mutex lock;
@@ -36,9 +48,9 @@
 	int (*read_adc_temp_and_pressure)(struct device *dev,
 					  s32 *temp, s32 *pressure);
 
-	u16 prom[MS5611_PROM_WORDS_NB];
+	struct ms5611_chip_info *chip_info;
 };
 
-int ms5611_probe(struct iio_dev *indio_dev, struct device *dev);
+int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type);
 
 #endif /* _MS5611_H */
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index e42c853..2f3d9b4 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -9,6 +9,7 @@
  *
  * Data sheet:
  *  http://www.meas-spec.com/downloads/MS5611-01BA03.pdf
+ *  http://www.meas-spec.com/downloads/MS5607-02BA03.pdf
  *
  */
 
@@ -50,7 +51,8 @@
 	struct ms5611_state *st = iio_priv(indio_dev);
 
 	for (i = 0; i < MS5611_PROM_WORDS_NB; i++) {
-		ret = st->read_prom_word(&indio_dev->dev, i, &st->prom[i]);
+		ret = st->read_prom_word(&indio_dev->dev,
+					 i, &st->chip_info->prom[i]);
 		if (ret < 0) {
 			dev_err(&indio_dev->dev,
 				"failed to read prom at %d\n", i);
@@ -58,7 +60,7 @@
 		}
 	}
 
-	if (!ms5611_prom_is_valid(st->prom, MS5611_PROM_WORDS_NB)) {
+	if (!ms5611_prom_is_valid(st->chip_info->prom, MS5611_PROM_WORDS_NB)) {
 		dev_err(&indio_dev->dev, "PROM integrity check failed\n");
 		return -ENODEV;
 	}
@@ -70,22 +72,30 @@
 					 s32 *temp, s32 *pressure)
 {
 	int ret;
-	s32 t, p;
-	s64 off, sens, dt;
 	struct ms5611_state *st = iio_priv(indio_dev);
 
-	ret = st->read_adc_temp_and_pressure(&indio_dev->dev, &t, &p);
+	ret = st->read_adc_temp_and_pressure(&indio_dev->dev, temp, pressure);
 	if (ret < 0) {
 		dev_err(&indio_dev->dev,
 			"failed to read temperature and pressure\n");
 		return ret;
 	}
 
-	dt = t - (st->prom[5] << 8);
-	off = ((s64)st->prom[2] << 16) + ((st->prom[4] * dt) >> 7);
-	sens = ((s64)st->prom[1] << 15) + ((st->prom[3] * dt) >> 8);
+	return st->chip_info->temp_and_pressure_compensate(st->chip_info,
+							   temp, pressure);
+}
 
-	t = 2000 + ((st->prom[6] * dt) >> 23);
+static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
+					       s32 *temp, s32 *pressure)
+{
+	s32 t = *temp, p = *pressure;
+	s64 off, sens, dt;
+
+	dt = t - (chip_info->prom[5] << 8);
+	off = ((s64)chip_info->prom[2] << 16) + ((chip_info->prom[4] * dt) >> 7);
+	sens = ((s64)chip_info->prom[1] << 15) + ((chip_info->prom[3] * dt) >> 8);
+
+	t = 2000 + ((chip_info->prom[6] * dt) >> 23);
 	if (t < 2000) {
 		s64 off2, sens2, t2;
 
@@ -111,6 +121,42 @@
 	return 0;
 }
 
+static int ms5607_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info,
+					       s32 *temp, s32 *pressure)
+{
+	s32 t = *temp, p = *pressure;
+	s64 off, sens, dt;
+
+	dt = t - (chip_info->prom[5] << 8);
+	off = ((s64)chip_info->prom[2] << 17) + ((chip_info->prom[4] * dt) >> 6);
+	sens = ((s64)chip_info->prom[1] << 16) + ((chip_info->prom[3] * dt) >> 7);
+
+	t = 2000 + ((chip_info->prom[6] * dt) >> 23);
+	if (t < 2000) {
+		s64 off2, sens2, t2;
+
+		t2 = (dt * dt) >> 31;
+		off2 = (61 * (t - 2000) * (t - 2000)) >> 4;
+		sens2 = off2 << 1;
+
+		if (t < -1500) {
+			s64 tmp = (t + 1500) * (t + 1500);
+
+			off2 += 15 * tmp;
+			sens2 += (8 * tmp);
+		}
+
+		t -= t2;
+		off -= off2;
+		sens -= sens2;
+	}
+
+	*temp = t;
+	*pressure = (((p * sens) >> 21) - off) >> 15;
+
+	return 0;
+}
+
 static int ms5611_reset(struct iio_dev *indio_dev)
 {
 	int ret;
@@ -160,16 +206,23 @@
 	return -EINVAL;
 }
 
+static struct ms5611_chip_info chip_info_tbl[] = {
+	[MS5611] = {
+		.temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate,
+	},
+	[MS5607] = {
+		.temp_and_pressure_compensate = ms5607_temp_and_pressure_compensate,
+	}
+};
+
 static const struct iio_chan_spec ms5611_channels[] = {
 	{
 		.type = IIO_PRESSURE,
-		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
-			BIT(IIO_CHAN_INFO_SCALE)
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
 	},
 	{
 		.type = IIO_TEMP,
-		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
-			BIT(IIO_CHAN_INFO_SCALE)
+		.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
 	}
 };
 
@@ -189,12 +242,13 @@
 	return ms5611_read_prom(indio_dev);
 }
 
-int ms5611_probe(struct iio_dev *indio_dev, struct device *dev)
+int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, int type)
 {
 	int ret;
 	struct ms5611_state *st = iio_priv(indio_dev);
 
 	mutex_init(&st->lock);
+	st->chip_info = &chip_info_tbl[type];
 	indio_dev->dev.parent = dev;
 	indio_dev->name = dev->driver->name;
 	indio_dev->info = &ms5611_info;
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 748fd9a..245797d 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -104,11 +104,12 @@
 	st->read_adc_temp_and_pressure = ms5611_i2c_read_adc_temp_and_pressure;
 	st->client = client;
 
-	return ms5611_probe(indio_dev, &client->dev);
+	return ms5611_probe(indio_dev, &client->dev, id->driver_data);
 }
 
 static const struct i2c_device_id ms5611_id[] = {
-	{ "ms5611", 0 },
+	{ "ms5611", MS5611 },
+	{ "ms5607", MS5607 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, ms5611_id);
@@ -116,7 +117,6 @@
 static struct i2c_driver ms5611_driver = {
 	.driver = {
 		.name = "ms5611",
-		.owner = THIS_MODULE,
 	},
 	.id_table = ms5611_id,
 	.probe = ms5611_i2c_probe,
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 976726f..08ee6e8 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -103,11 +103,13 @@
 	st->read_adc_temp_and_pressure = ms5611_spi_read_adc_temp_and_pressure;
 	st->client = spi;
 
-	return ms5611_probe(indio_dev, &spi->dev);
+	return ms5611_probe(indio_dev, &spi->dev,
+			    spi_get_device_id(spi)->driver_data);
 }
 
 static const struct spi_device_id ms5611_id[] = {
-	{ "ms5611", 0 },
+	{ "ms5611", MS5611 },
+	{ "ms5607", MS5607 },
 	{ }
 };
 MODULE_DEVICE_TABLE(spi, ms5611_id);
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index e881fa6..eb41d2b 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -178,6 +178,7 @@
 static const struct st_sensor_settings st_press_sensors_settings[] = {
 	{
 		.wai = ST_PRESS_LPS331AP_WAI_EXP,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LPS331AP_PRESS_DEV_NAME,
 		},
@@ -225,6 +226,7 @@
 	},
 	{
 		.wai = ST_PRESS_LPS001WP_WAI_EXP,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LPS001WP_PRESS_DEV_NAME,
 		},
@@ -260,6 +262,7 @@
 	},
 	{
 		.wai = ST_PRESS_LPS25H_WAI_EXP,
+		.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
 		.sensors_supported = {
 			[0] = LPS25H_PRESS_DEV_NAME,
 		},
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 137788b..8fcf976 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -79,7 +79,6 @@
 
 static struct i2c_driver st_press_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "st-press-i2c",
 		.of_match_table = of_match_ptr(st_press_of_match),
 	},
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 7a2b639..5d033a5 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -65,6 +65,13 @@
 
 #define MLX90614_AUTOSLEEP_DELAY 5000 /* default autosleep delay */
 
+/* Magic constants */
+#define MLX90614_CONST_OFFSET_DEC -13657 /* decimal part of the Kelvin offset */
+#define MLX90614_CONST_OFFSET_REM 500000 /* remainder of offset (273.15*50) */
+#define MLX90614_CONST_SCALE 20 /* Scale in milliKelvin (0.02 * 1000) */
+#define MLX90614_CONST_RAW_EMISSIVITY_MAX 65535 /* max value for emissivity */
+#define MLX90614_CONST_EMISSIVITY_RESOLUTION 15259 /* 1/65535 ~ 0.000015259 */
+
 struct mlx90614_data {
 	struct i2c_client *client;
 	struct mutex lock; /* for EEPROM access only */
@@ -204,11 +211,11 @@
 		*val = ret;
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_OFFSET:
-		*val = -13657;
-		*val2 = 500000;
+		*val = MLX90614_CONST_OFFSET_DEC;
+		*val2 = MLX90614_CONST_OFFSET_REM;
 		return IIO_VAL_INT_PLUS_MICRO;
 	case IIO_CHAN_INFO_SCALE:
-		*val = 20;
+		*val = MLX90614_CONST_SCALE;
 		return IIO_VAL_INT;
 	case IIO_CHAN_INFO_CALIBEMISSIVITY: /* 1/65535 / LSB */
 		mlx90614_power_get(data, false);
@@ -221,12 +228,12 @@
 		if (ret < 0)
 			return ret;
 
-		if (ret == 65535) {
+		if (ret == MLX90614_CONST_RAW_EMISSIVITY_MAX) {
 			*val = 1;
 			*val2 = 0;
 		} else {
 			*val = 0;
-			*val2 = ret * 15259; /* 1/65535 ~ 0.000015259 */
+			*val2 = ret * MLX90614_CONST_EMISSIVITY_RESOLUTION;
 		}
 		return IIO_VAL_INT_PLUS_NANO;
 	default:
@@ -245,7 +252,8 @@
 	case IIO_CHAN_INFO_CALIBEMISSIVITY: /* 1/65535 / LSB */
 		if (val < 0 || val2 < 0 || val > 1 || (val == 1 && val2 != 0))
 			return -EINVAL;
-		val = val * 65535 + val2 / 15259; /* 1/65535 ~ 0.000015259 */
+		val = val * MLX90614_CONST_RAW_EMISSIVITY_MAX +
+			val2 / MLX90614_CONST_EMISSIVITY_RESOLUTION;
 
 		mlx90614_power_get(data, false);
 		mutex_lock(&data->lock);
@@ -551,7 +559,6 @@
 static struct i2c_driver mlx90614_driver = {
 	.driver = {
 		.name	= "mlx90614",
-		.owner	= THIS_MODULE,
 		.pm	= &mlx90614_pm_ops,
 	},
 	.probe = mlx90614_probe,
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 8f21f32..e78c106 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -36,9 +36,9 @@
 #define TMP006_CONFIG_DRDY_EN BIT(8)
 #define TMP006_CONFIG_DRDY BIT(7)
 
-#define TMP006_CONFIG_MOD_MASK 0x7000
+#define TMP006_CONFIG_MOD_MASK GENMASK(14, 12)
 
-#define TMP006_CONFIG_CR_MASK 0x0e00
+#define TMP006_CONFIG_CR_MASK GENMASK(11, 9)
 #define TMP006_CONFIG_CR_SHIFT 9
 
 #define TMP006_MANUFACTURER_MAGIC 0x5449
@@ -280,7 +280,6 @@
 	.driver = {
 		.name	= "tmp006",
 		.pm	= &tmp006_pm_ops,
-		.owner	= THIS_MODULE,
 	},
 	.probe = tmp006_probe,
 	.remove = tmp006_remove,
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 36eb3d0..180a8f7 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -871,7 +871,7 @@
 		if (is_eth) {
 			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
 			if (be32_to_cpu(cqe->vlan_my_qpn) &
-					MLX4_CQE_VLAN_PRESENT_MASK) {
+					MLX4_CQE_CVLAN_PRESENT_MASK) {
 				wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
 					MLX4_CQE_VID_MASK;
 			} else {
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 8f4a30fc..c642082 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -343,9 +343,8 @@
 	__set_bit(EV_FF, dev->evbit);
 
 	/* Copy "true" bits into ff device bitmap */
-	for (i = 0; i <= FF_MAX; i++)
-		if (test_bit(i, dev->ffbit))
-			__set_bit(i, ff->ffbit);
+	for_each_set_bit(i, dev->ffbit, FF_CNT)
+		__set_bit(i, ff->ffbit);
 
 	/* we can emulate RUMBLE with periodic effects */
 	if (test_bit(FF_PERIODIC, ff->ffbit))
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index e853a21..4a2a9e3 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -149,9 +149,9 @@
 
 	for(i = 0; i < 50; i++) {
 		local_irq_save(flags);
-		rdtscl(t1);
+		t1 = rdtsc();
 		for (t = 0; t < 50; t++) gameport_read(gameport);
-		rdtscl(t2);
+		t2 = rdtsc();
 		local_irq_restore(flags);
 		udelay(i * 10);
 		if (t2 - t1 < tx) tx = t2 - t1;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 78d2499..5391abd 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -674,13 +674,19 @@
  */
 static void input_dev_release_keys(struct input_dev *dev)
 {
+	bool need_sync = false;
 	int code;
 
 	if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
-		for_each_set_bit(code, dev->key, KEY_CNT)
+		for_each_set_bit(code, dev->key, KEY_CNT) {
 			input_pass_event(dev, EV_KEY, code, 0);
+			need_sync = true;
+		}
+
+		if (need_sync)
+			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+
 		memset(dev->key, 0, sizeof(dev->key));
-		input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
 	}
 }
 
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 1d247bc..6cb5a3e 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -859,12 +859,11 @@
 	joydev->handle.handler = handler;
 	joydev->handle.private = joydev;
 
-	for (i = 0; i < ABS_CNT; i++)
-		if (test_bit(i, dev->absbit)) {
-			joydev->absmap[i] = joydev->nabs;
-			joydev->abspam[joydev->nabs] = i;
-			joydev->nabs++;
-		}
+	for_each_set_bit(i, dev->absbit, ABS_CNT) {
+		joydev->absmap[i] = joydev->nabs;
+		joydev->abspam[joydev->nabs] = i;
+		joydev->nabs++;
+	}
 
 	for (i = BTN_JOYSTICK - BTN_MISC; i < KEY_MAX - BTN_MISC + 1; i++)
 		if (test_bit(i + BTN_MISC, dev->keybit)) {
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 4284080..6f8b084 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -143,7 +143,7 @@
 
 #include <linux/i8253.h>
 
-#define GET_TIME(x)	do { if (cpu_has_tsc) rdtscl(x); else x = get_time_pit(); } while (0)
+#define GET_TIME(x)	do { if (cpu_has_tsc) x = (unsigned int)rdtsc(); else x = get_time_pit(); } while (0)
 #define DELTA(x,y)	(cpu_has_tsc ? ((y) - (x)) : ((x) - (y) + ((x) < (y) ? PIT_TICK_RATE / HZ : 0)))
 #define TIME_NAME	(cpu_has_tsc?"TSC":"PIT")
 static unsigned int get_time_pit(void)
@@ -160,7 +160,7 @@
         return count;
 }
 #elif defined(__x86_64__)
-#define GET_TIME(x)	rdtscl(x)
+#define GET_TIME(x)	do { x = (unsigned int)rdtsc(); } while (0)
 #define DELTA(x,y)	((y)-(x))
 #define TIME_NAME	"TSC"
 #elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
diff --git a/drivers/input/joystick/zhenhua.c b/drivers/input/joystick/zhenhua.c
index 30af2e8..4a8258b 100644
--- a/drivers/input/joystick/zhenhua.c
+++ b/drivers/input/joystick/zhenhua.c
@@ -47,6 +47,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/bitrev.h>
 #include <linux/input.h>
 #include <linux/serio.h>
 
@@ -72,16 +73,6 @@
 	char phys[32];
 };
 
-
-/* bits in all incoming bytes needs to be "reversed" */
-static int zhenhua_bitreverse(int x)
-{
-	x = ((x & 0xaa) >> 1) | ((x & 0x55) << 1);
-	x = ((x & 0xcc) >> 2) | ((x & 0x33) << 2);
-	x = ((x & 0xf0) >> 4) | ((x & 0x0f) << 4);
-	return x;
-}
-
 /*
  * zhenhua_process_packet() decodes packets the driver receives from the
  * RC transmitter. It updates the data accordingly.
@@ -120,7 +111,7 @@
 		return IRQ_HANDLED;	/* wrong MSB -- ignore this byte */
 
 	if (zhenhua->idx < ZHENHUA_MAX_LENGTH)
-		zhenhua->data[zhenhua->idx++] = zhenhua_bitreverse(data);
+		zhenhua->data[zhenhua->idx++] = bitrev8(data);
 
 	if (zhenhua->idx == ZHENHUA_MAX_LENGTH) {
 		zhenhua_process_packet(zhenhua);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 4cd94fd..2e80107 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -187,7 +187,7 @@
 
 config KEYBOARD_GPIO
 	tristate "GPIO Buttons"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  This driver implements support for buttons connected
 	  to GPIO pins of various CPUs (and some other chips).
@@ -253,7 +253,7 @@
 
 config KEYBOARD_MATRIX
 	tristate "GPIO driven matrix keypad support"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	select INPUT_MATRIXKMAP
 	help
 	  Enable support for GPIO driven matrix keypad.
@@ -401,6 +401,17 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called mpr121_touchkey.
 
+config KEYBOARD_SNVS_PWRKEY
+	tristate "IMX SNVS Power Key Driver"
+	depends on SOC_IMX6SX
+	depends on OF
+	help
+	  This is the snvs powerkey driver for the Freescale i.MX application
+	  processors that are newer than i.MX6 SX.
+
+	  To compile this driver as a module, choose M here; the
+	  module will be called snvs_pwrkey.
+
 config KEYBOARD_IMX
 	tristate "IMX keypad support"
 	depends on ARCH_MXC
@@ -678,7 +689,7 @@
 config KEYBOARD_CROS_EC
 	tristate "ChromeOS EC keyboard"
 	select INPUT_MATRIXKMAP
-	depends on CROS_EC_PROTO
+	depends on MFD_CROS_EC
 	help
 	  Say Y here to enable the matrix keyboard used by ChromeOS devices
 	  and implemented on the ChromeOS EC. You must enable one bus option
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index df28d55..1d416dd 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -51,6 +51,7 @@
 obj-$(CONFIG_KEYBOARD_QT2160)		+= qt2160.o
 obj-$(CONFIG_KEYBOARD_SAMSUNG)		+= samsung-keypad.o
 obj-$(CONFIG_KEYBOARD_SH_KEYSC)		+= sh_keysc.o
+obj-$(CONFIG_KEYBOARD_SNVS_PWRKEY)	+= snvs_pwrkey.o
 obj-$(CONFIG_KEYBOARD_SPEAR)		+= spear-keyboard.o
 obj-$(CONFIG_KEYBOARD_STMPE)		+= stmpe-keypad.o
 obj-$(CONFIG_KEYBOARD_STOWAWAY)		+= stowaway.o
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 6ed83cf..4d446d5 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -1097,7 +1097,6 @@
 static struct i2c_driver adp5589_driver = {
 	.driver = {
 		.name = KBUILD_MODNAME,
-		.owner = THIS_MODULE,
 		.pm = &adp5589_dev_pm_ops,
 	},
 	.probe = adp5589_probe,
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index f07461a..378db10 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/input.h>
+#include <linux/leds.h>
 #include <linux/of_irq.h>
 #include <linux/regmap.h>
 #include <linux/i2c.h>
@@ -47,6 +48,20 @@
 #define CAP11XX_REG_CONFIG2		0x44
 #define CAP11XX_REG_CONFIG2_ALT_POL	BIT(6)
 #define CAP11XX_REG_SENSOR_BASE_CNT(X)	(0x50 + (X))
+#define CAP11XX_REG_LED_POLARITY	0x73
+#define CAP11XX_REG_LED_OUTPUT_CONTROL	0x74
+
+#define CAP11XX_REG_LED_DUTY_CYCLE_1	0x90
+#define CAP11XX_REG_LED_DUTY_CYCLE_2	0x91
+#define CAP11XX_REG_LED_DUTY_CYCLE_3	0x92
+#define CAP11XX_REG_LED_DUTY_CYCLE_4	0x93
+
+#define CAP11XX_REG_LED_DUTY_MIN_MASK	(0x0f)
+#define CAP11XX_REG_LED_DUTY_MIN_MASK_SHIFT	(0)
+#define CAP11XX_REG_LED_DUTY_MAX_MASK	(0xf0)
+#define CAP11XX_REG_LED_DUTY_MAX_MASK_SHIFT	(4)
+#define CAP11XX_REG_LED_DUTY_MAX_VALUE	(15)
+
 #define CAP11XX_REG_SENSOR_CALIB	(0xb1 + (X))
 #define CAP11XX_REG_SENSOR_CALIB_LSB1	0xb9
 #define CAP11XX_REG_SENSOR_CALIB_LSB2	0xba
@@ -56,10 +71,23 @@
 
 #define CAP11XX_MANUFACTURER_ID	0x5d
 
+#ifdef CONFIG_LEDS_CLASS
+struct cap11xx_led {
+	struct cap11xx_priv *priv;
+	struct led_classdev cdev;
+	struct work_struct work;
+	u32 reg;
+	enum led_brightness new_brightness;
+};
+#endif
+
 struct cap11xx_priv {
 	struct regmap *regmap;
 	struct input_dev *idev;
 
+	struct cap11xx_led *leds;
+	int num_leds;
+
 	/* config */
 	u32 keycodes[];
 };
@@ -67,6 +95,7 @@
 struct cap11xx_hw_model {
 	u8 product_id;
 	unsigned int num_channels;
+	unsigned int num_leds;
 };
 
 enum {
@@ -76,9 +105,9 @@
 };
 
 static const struct cap11xx_hw_model cap11xx_devices[] = {
-	[CAP1106] = { .product_id = 0x55, .num_channels = 6 },
-	[CAP1126] = { .product_id = 0x53, .num_channels = 6 },
-	[CAP1188] = { .product_id = 0x50, .num_channels = 8 },
+	[CAP1106] = { .product_id = 0x55, .num_channels = 6, .num_leds = 0 },
+	[CAP1126] = { .product_id = 0x53, .num_channels = 6, .num_leds = 2 },
+	[CAP1188] = { .product_id = 0x50, .num_channels = 8, .num_leds = 8 },
 };
 
 static const struct reg_default cap11xx_reg_defaults[] = {
@@ -111,6 +140,7 @@
 	{ CAP11XX_REG_STANDBY_SENSITIVITY,	0x02 },
 	{ CAP11XX_REG_STANDBY_THRESH,		0x40 },
 	{ CAP11XX_REG_CONFIG2,			0x40 },
+	{ CAP11XX_REG_LED_POLARITY,		0x00 },
 	{ CAP11XX_REG_SENSOR_CALIB_LSB1,	0x00 },
 	{ CAP11XX_REG_SENSOR_CALIB_LSB2,	0x00 },
 };
@@ -177,6 +207,12 @@
 
 static int cap11xx_set_sleep(struct cap11xx_priv *priv, bool sleep)
 {
+	/*
+	 * DLSEEP mode will turn off all LEDS, prevent this
+	 */
+	if (IS_ENABLED(CONFIG_LEDS_CLASS) && priv->num_leds)
+		return 0;
+
 	return regmap_update_bits(priv->regmap, CAP11XX_REG_MAIN_CONTROL,
 				  CAP11XX_REG_MAIN_CONTROL_DLSEEP,
 				  sleep ? CAP11XX_REG_MAIN_CONTROL_DLSEEP : 0);
@@ -196,6 +232,104 @@
 	cap11xx_set_sleep(priv, true);
 }
 
+#ifdef CONFIG_LEDS_CLASS
+static void cap11xx_led_work(struct work_struct *work)
+{
+	struct cap11xx_led *led = container_of(work, struct cap11xx_led, work);
+	struct cap11xx_priv *priv = led->priv;
+	int value = led->new_brightness;
+
+	/*
+	 * All LEDs share the same duty cycle as this is a HW limitation.
+	 * Brightness levels per LED are either 0 (OFF) and 1 (ON).
+	 */
+	regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL,
+				BIT(led->reg), value ? BIT(led->reg) : 0);
+}
+
+static void cap11xx_led_set(struct led_classdev *cdev,
+			   enum led_brightness value)
+{
+	struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev);
+
+	if (led->new_brightness == value)
+		return;
+
+	led->new_brightness = value;
+	schedule_work(&led->work);
+}
+
+static int cap11xx_init_leds(struct device *dev,
+			     struct cap11xx_priv *priv, int num_leds)
+{
+	struct device_node *node = dev->of_node, *child;
+	struct cap11xx_led *led;
+	int cnt = of_get_child_count(node);
+	int error;
+
+	if (!num_leds || !cnt)
+		return 0;
+
+	if (cnt > num_leds)
+		return -EINVAL;
+
+	led = devm_kcalloc(dev, cnt, sizeof(struct cap11xx_led), GFP_KERNEL);
+	if (!led)
+		return -ENOMEM;
+
+	priv->leds = led;
+
+	error = regmap_update_bits(priv->regmap,
+				CAP11XX_REG_LED_OUTPUT_CONTROL, 0xff, 0);
+	if (error)
+		return error;
+
+	error = regmap_update_bits(priv->regmap, CAP11XX_REG_LED_DUTY_CYCLE_4,
+				CAP11XX_REG_LED_DUTY_MAX_MASK,
+				CAP11XX_REG_LED_DUTY_MAX_VALUE <<
+				CAP11XX_REG_LED_DUTY_MAX_MASK_SHIFT);
+	if (error)
+		return error;
+
+	for_each_child_of_node(node, child) {
+		u32 reg;
+
+		led->cdev.name =
+			of_get_property(child, "label", NULL) ? : child->name;
+		led->cdev.default_trigger =
+			of_get_property(child, "linux,default-trigger", NULL);
+		led->cdev.flags = 0;
+		led->cdev.brightness_set = cap11xx_led_set;
+		led->cdev.max_brightness = 1;
+		led->cdev.brightness = LED_OFF;
+
+		error = of_property_read_u32(child, "reg", &reg);
+		if (error != 0 || reg >= num_leds)
+			return -EINVAL;
+
+		led->reg = reg;
+		led->priv = priv;
+
+		INIT_WORK(&led->work, cap11xx_led_work);
+
+		error = devm_led_classdev_register(dev, &led->cdev);
+		if (error)
+			return error;
+
+		priv->num_leds++;
+		led++;
+	}
+
+	return 0;
+}
+#else
+static int cap11xx_init_leds(struct device *dev,
+			     struct cap11xx_priv *priv, int num_leds)
+{
+	return 0;
+}
+#endif
+
 static int cap11xx_i2c_probe(struct i2c_client *i2c_client,
 			     const struct i2c_device_id *id)
 {
@@ -316,6 +450,10 @@
 	priv->idev->open = cap11xx_input_open;
 	priv->idev->close = cap11xx_input_close;
 
+	error = cap11xx_init_leds(dev, priv, cap->num_leds);
+	if (error)
+		return error;
+
 	input_set_drvdata(priv->idev, priv);
 
 	/*
@@ -361,7 +499,6 @@
 static struct i2c_driver cap11xx_i2c_driver = {
 	.driver = {
 		.name	= "cap11xx",
-		.owner	= THIS_MODULE,
 		.of_match_table = cap11xx_dt_ids,
 	},
 	.id_table	= cap11xx_i2c_ids,
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index ddf4045..9d517ca 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -239,6 +239,11 @@
 		}
 	}
 
+	if (i == ddata->pdata->nbuttons) {
+		error = -EINVAL;
+		goto out;
+	}
+
 	mutex_lock(&ddata->disable_lock);
 
 	for (i = 0; i < ddata->pdata->nbuttons; i++) {
@@ -655,7 +660,9 @@
 		if (of_property_read_u32(pp, "linux,input-type", &button->type))
 			button->type = EV_KEY;
 
-		button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
+		button->wakeup = of_property_read_bool(pp, "wakeup-source") ||
+				 /* legacy name */
+				 of_property_read_bool(pp, "gpio-key,wakeup");
 
 		button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
 
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index c6dc644..870cfa6 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -152,7 +152,10 @@
 					     &button->type))
 			button->type = EV_KEY;
 
-		button->wakeup = fwnode_property_present(child, "gpio-key,wakeup");
+		button->wakeup =
+			fwnode_property_read_bool(child, "wakeup-source") ||
+			/* legacy name */
+			fwnode_property_read_bool(child, "gpio-key,wakeup");
 
 		if (fwnode_property_read_u32(child, "debounce-interval",
 					     &button->debounce_interval))
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index 0ad422b..c717e8f 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -223,7 +223,6 @@
 static struct i2c_driver lm8333_driver = {
 	.driver = {
 		.name		= "lm8333",
-		.owner		= THIS_MODULE,
 	},
 	.probe		= lm8333_probe,
 	.remove		= lm8333_remove,
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index b370a59..7f12b65 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -425,8 +425,10 @@
 
 	if (of_get_property(np, "linux,no-autorepeat", NULL))
 		pdata->no_autorepeat = true;
-	if (of_get_property(np, "linux,wakeup", NULL))
-		pdata->wakeup = true;
+
+	pdata->wakeup = of_property_read_bool(np, "wakeup-source") ||
+			of_property_read_bool(np, "linux,wakeup"); /* legacy */
+
 	if (of_get_property(np, "gpio-activelow", NULL))
 		pdata->active_low = true;
 
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 375b05c..31090d7 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -265,7 +265,6 @@
 static struct i2c_driver mcs_touchkey_driver = {
 	.driver = {
 		.name	= "mcs_touchkey",
-		.owner	= THIS_MODULE,
 		.pm	= &mcs_touchkey_pm_ops,
 	},
 	.probe		= mcs_touchkey_probe,
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index 3aa2ec4..0fd612d 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -305,7 +305,6 @@
 static struct i2c_driver mpr_touchkey_driver = {
 	.driver = {
 		.name	= "mpr121",
-		.owner	= THIS_MODULE,
 		.pm	= &mpr121_touchkey_pm_ops,
 	},
 	.id_table	= mpr121_id,
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 32580af..5c68e3f 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -507,6 +507,7 @@
  */
 static int pmic8xxx_kp_probe(struct platform_device *pdev)
 {
+	struct device_node *np = pdev->dev.of_node;
 	unsigned int rows, cols;
 	bool repeat;
 	bool wakeup;
@@ -524,10 +525,11 @@
 		return -EINVAL;
 	}
 
-	repeat = !of_property_read_bool(pdev->dev.of_node,
-					"linux,input-no-autorepeat");
-	wakeup = of_property_read_bool(pdev->dev.of_node,
-					"linux,keypad-wakeup");
+	repeat = !of_property_read_bool(np, "linux,input-no-autorepeat");
+
+	wakeup = of_property_read_bool(np, "wakeup-source") ||
+		 /* legacy name */
+		 of_property_read_bool(np, "linux,keypad-wakeup");
 
 	kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
 	if (!kp)
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 52cd6e8..5a57787 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -277,7 +277,6 @@
 static struct i2c_driver qt1070_driver = {
 	.driver	= {
 		.name	= "qt1070",
-		.owner	= THIS_MODULE,
 		.pm	= &qt1070_pm_ops,
 	},
 	.id_table	= qt1070_id,
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 819b228..43b8648 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -497,7 +497,6 @@
 static struct i2c_driver qt2160_driver = {
 	.driver = {
 		.name	= "qt2160",
-		.owner  = THIS_MODULE,
 	},
 
 	.id_table	= qt2160_idtable,
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index 43e48dac..4e319eb 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -299,8 +299,10 @@
 	if (of_get_property(np, "linux,input-no-autorepeat", NULL))
 		pdata->no_autorepeat = true;
 
-	if (of_get_property(np, "linux,input-wakeup", NULL))
-		pdata->wakeup = true;
+	pdata->wakeup = of_property_read_bool(np, "wakeup-source") ||
+			/* legacy name */
+			of_property_read_bool(np, "linux,input-wakeup");
+
 
 	return pdata;
 }
diff --git a/drivers/input/keyboard/snvs_pwrkey.c b/drivers/input/keyboard/snvs_pwrkey.c
new file mode 100644
index 0000000..78fd24c
--- /dev/null
+++ b/drivers/input/keyboard/snvs_pwrkey.c
@@ -0,0 +1,227 @@
+/*
+ * Driver for the IMX SNVS ON/OFF Power Key
+ * Copyright (C) 2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#define SNVS_LPSR_REG	0x4C	/* LP Status Register */
+#define SNVS_LPCR_REG	0x38	/* LP Control Register */
+#define SNVS_HPSR_REG	0x14
+#define SNVS_HPSR_BTN	BIT(6)
+#define SNVS_LPSR_SPO	BIT(18)
+#define SNVS_LPCR_DEP_EN BIT(5)
+
+#define DEBOUNCE_TIME 30
+#define REPEAT_INTERVAL 60
+
+struct pwrkey_drv_data {
+	struct regmap *snvs;
+	int irq;
+	int keycode;
+	int keystate;  /* 1:pressed */
+	int wakeup;
+	struct timer_list check_timer;
+	struct input_dev *input;
+};
+
+static void imx_imx_snvs_check_for_events(unsigned long data)
+{
+	struct pwrkey_drv_data *pdata = (struct pwrkey_drv_data *) data;
+	struct input_dev *input = pdata->input;
+	u32 state;
+
+	regmap_read(pdata->snvs, SNVS_HPSR_REG, &state);
+	state = state & SNVS_HPSR_BTN ? 1 : 0;
+
+	/* only report new event if status changed */
+	if (state ^ pdata->keystate) {
+		pdata->keystate = state;
+		input_event(input, EV_KEY, pdata->keycode, state);
+		input_sync(input);
+		pm_relax(pdata->input->dev.parent);
+	}
+
+	/* repeat check if pressed long */
+	if (state) {
+		mod_timer(&pdata->check_timer,
+			  jiffies + msecs_to_jiffies(REPEAT_INTERVAL));
+	}
+}
+
+static irqreturn_t imx_snvs_pwrkey_interrupt(int irq, void *dev_id)
+{
+	struct platform_device *pdev = dev_id;
+	struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
+	u32 lp_status;
+
+	pm_wakeup_event(pdata->input->dev.parent, 0);
+
+	regmap_read(pdata->snvs, SNVS_LPSR_REG, &lp_status);
+	if (lp_status & SNVS_LPSR_SPO)
+		mod_timer(&pdata->check_timer, jiffies + msecs_to_jiffies(DEBOUNCE_TIME));
+
+	/* clear SPO status */
+	regmap_write(pdata->snvs, SNVS_LPSR_REG, SNVS_LPSR_SPO);
+
+	return IRQ_HANDLED;
+}
+
+static void imx_snvs_pwrkey_act(void *pdata)
+{
+	struct pwrkey_drv_data *pd = pdata;
+
+	del_timer_sync(&pd->check_timer);
+}
+
+static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
+{
+	struct pwrkey_drv_data *pdata = NULL;
+	struct input_dev *input = NULL;
+	struct device_node *np;
+	int error;
+
+	/* Get SNVS register Page */
+	np = pdev->dev.of_node;
+	if (!np)
+		return -ENODEV;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	pdata->snvs = syscon_regmap_lookup_by_phandle(np, "regmap");;
+
+	if (!pdata->snvs) {
+		dev_err(&pdev->dev, "Can't get snvs syscon\n");
+		return -ENODEV;
+	}
+
+	if (of_property_read_u32(np, "linux,keycode", &pdata->keycode)) {
+		pdata->keycode = KEY_POWER;
+		dev_warn(&pdev->dev, "KEY_POWER without setting in dts\n");
+	}
+
+	pdata->wakeup = of_property_read_bool(np, "wakeup-source");
+
+	pdata->irq = platform_get_irq(pdev, 0);
+	if (pdata->irq < 0) {
+		dev_err(&pdev->dev, "no irq defined in platform data\n");
+		return -EINVAL;
+	}
+
+	regmap_update_bits(pdata->snvs, SNVS_LPCR_REG, SNVS_LPCR_DEP_EN, SNVS_LPCR_DEP_EN);
+
+	/* clear the unexpected interrupt before driver ready */
+	regmap_write(pdata->snvs, SNVS_LPSR_REG, SNVS_LPSR_SPO);
+
+	setup_timer(&pdata->check_timer,
+		    imx_imx_snvs_check_for_events, (unsigned long) pdata);
+
+	input = devm_input_allocate_device(&pdev->dev);
+	if (!input) {
+		dev_err(&pdev->dev, "failed to allocate the input device\n");
+		return -ENOMEM;
+	}
+
+	input->name = pdev->name;
+	input->phys = "snvs-pwrkey/input0";
+	input->id.bustype = BUS_HOST;
+
+	input_set_capability(input, EV_KEY, pdata->keycode);
+
+	/* input customer action to cancel release timer */
+	error = devm_add_action(&pdev->dev, imx_snvs_pwrkey_act, pdata);
+	if (error) {
+		dev_err(&pdev->dev, "failed to register remove action\n");
+		return error;
+	}
+
+	error = devm_request_irq(&pdev->dev, pdata->irq,
+			       imx_snvs_pwrkey_interrupt,
+			       0, pdev->name, pdev);
+
+	if (error) {
+		dev_err(&pdev->dev, "interrupt not available.\n");
+		return error;
+	}
+
+	error = input_register_device(input);
+	if (error < 0) {
+		dev_err(&pdev->dev, "failed to register input device\n");
+		input_free_device(input);
+		return error;
+	}
+
+	pdata->input = input;
+	platform_set_drvdata(pdev, pdata);
+
+	device_init_wakeup(&pdev->dev, pdata->wakeup);
+
+	return 0;
+}
+
+static int imx_snvs_pwrkey_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
+
+	if (device_may_wakeup(&pdev->dev))
+		enable_irq_wake(pdata->irq);
+
+	return 0;
+}
+
+static int imx_snvs_pwrkey_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct pwrkey_drv_data *pdata = platform_get_drvdata(pdev);
+
+	if (device_may_wakeup(&pdev->dev))
+		disable_irq_wake(pdata->irq);
+
+	return 0;
+}
+
+static const struct of_device_id imx_snvs_pwrkey_ids[] = {
+	{ .compatible = "fsl,sec-v4.0-pwrkey" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_snvs_pwrkey_ids);
+
+static SIMPLE_DEV_PM_OPS(imx_snvs_pwrkey_pm_ops, imx_snvs_pwrkey_suspend,
+				imx_snvs_pwrkey_resume);
+
+static struct platform_driver imx_snvs_pwrkey_driver = {
+	.driver = {
+		.name = "snvs_pwrkey",
+		.pm     = &imx_snvs_pwrkey_pm_ops,
+		.of_match_table = imx_snvs_pwrkey_ids,
+	},
+	.probe = imx_snvs_pwrkey_probe,
+};
+module_platform_driver(imx_snvs_pwrkey_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_DESCRIPTION("i.MX snvs power key Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index 31c606a..e92dfd8 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -17,6 +17,7 @@
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/mfd/tc3589x.h>
+#include <linux/device.h>
 
 /* Maximum supported keypad matrix row/columns size */
 #define TC3589x_MAX_KPROW               8
@@ -352,7 +353,10 @@
 	}
 
 	plat->no_autorepeat = of_property_read_bool(np, "linux,no-autorepeat");
-	plat->enable_wakeup = of_property_read_bool(np, "linux,wakeup");
+
+	plat->enable_wakeup = of_property_read_bool(np, "wakeup-source") ||
+			      /* legacy name */
+			      of_property_read_bool(np, "linux,wakeup");
 
 	/* The custom delay format is ms/16 */
 	of_property_read_u32(np, "debounce-delay-ms", &debounce_ms);
@@ -386,12 +390,15 @@
 	if (irq < 0)
 		return irq;
 
-	keypad = kzalloc(sizeof(struct tc_keypad), GFP_KERNEL);
-	input = input_allocate_device();
-	if (!keypad || !input) {
-		dev_err(&pdev->dev, "failed to allocate keypad memory\n");
-		error = -ENOMEM;
-		goto err_free_mem;
+	keypad = devm_kzalloc(&pdev->dev, sizeof(struct tc_keypad),
+			      GFP_KERNEL);
+	if (!keypad)
+		return -ENOMEM;
+
+	input = devm_input_allocate_device(&pdev->dev);
+	if (!input) {
+		dev_err(&pdev->dev, "failed to allocate input device\n");
+		return -ENOMEM;
 	}
 
 	keypad->board = plat;
@@ -410,7 +417,7 @@
 					   NULL, input);
 	if (error) {
 		dev_err(&pdev->dev, "Failed to build keymap\n");
-		goto err_free_mem;
+		return error;
 	}
 
 	keypad->keymap = input->keycode;
@@ -421,20 +428,23 @@
 
 	input_set_drvdata(input, keypad);
 
-	error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq,
-				     plat->irqtype | IRQF_ONESHOT,
-				     "tc3589x-keypad", keypad);
-	if (error < 0) {
+	tc3589x_keypad_disable(keypad);
+
+	error = devm_request_threaded_irq(&pdev->dev, irq,
+					  NULL, tc3589x_keypad_irq,
+					  plat->irqtype | IRQF_ONESHOT,
+					  "tc3589x-keypad", keypad);
+	if (error) {
 		dev_err(&pdev->dev,
 				"Could not allocate irq %d,error %d\n",
 				irq, error);
-		goto err_free_mem;
+		return error;
 	}
 
 	error = input_register_device(input);
 	if (error) {
 		dev_err(&pdev->dev, "Could not register input device\n");
-		goto err_free_irq;
+		return error;
 	}
 
 	/* let platform decide if keypad is a wakeup source or not */
@@ -444,30 +454,6 @@
 	platform_set_drvdata(pdev, keypad);
 
 	return 0;
-
-err_free_irq:
-	free_irq(irq, keypad);
-err_free_mem:
-	input_free_device(input);
-	kfree(keypad);
-	return error;
-}
-
-static int tc3589x_keypad_remove(struct platform_device *pdev)
-{
-	struct tc_keypad *keypad = platform_get_drvdata(pdev);
-	int irq = platform_get_irq(pdev, 0);
-
-	if (!keypad->keypad_stopped)
-		tc3589x_keypad_disable(keypad);
-
-	free_irq(irq, keypad);
-
-	input_unregister_device(keypad->input);
-
-	kfree(keypad);
-
-	return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -518,7 +504,6 @@
 		.pm	= &tc3589x_keypad_dev_pm_ops,
 	},
 	.probe	= tc3589x_keypad_probe,
-	.remove	= tc3589x_keypad_remove,
 };
 module_platform_driver(tc3589x_keypad_driver);
 
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 4e491c1..9002298 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -404,7 +404,6 @@
 static struct i2c_driver tca8418_keypad_driver = {
 	.driver = {
 		.name	= TCA8418_NAME,
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tca8418_dt_ids),
 	},
 	.probe		= tca8418_keypad_probe,
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index d4f0a81..906dd1b 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -167,28 +167,16 @@
 	depends on M68K
 
 config INPUT_MAX77693_HAPTIC
-	tristate "MAXIM MAX77693 haptic controller support"
-	depends on MFD_MAX77693 && PWM
+	tristate "MAXIM MAX77693/MAX77843 haptic controller support"
+	depends on (MFD_MAX77693 || MFD_MAX77843) && PWM
 	select INPUT_FF_MEMLESS
 	help
 	  This option enables support for the haptic controller on
-	  MAXIM MAX77693 chip.
+	  MAXIM MAX77693 and MAX77843 chips.
 
 	  To compile this driver as module, choose M here: the
 	  module will be called max77693-haptic.
 
-config INPUT_MAX77843_HAPTIC
-    tristate "MAXIM MAX77843 haptic controller support"
-    depends on MFD_MAX77843 && REGULATOR
-    select INPUT_FF_MEMLESS
-    help
-      This option enables support for the haptic controller on
-      MAXIM MAX77843 chip. The driver supports ff-memless interface
-      from input framework.
-
-      To compile this driver as module, choose M here: the
-      module will be called max77843-haptic.
-
 config INPUT_MAX8925_ONKEY
 	tristate "MAX8925 ONKEY support"
 	depends on MFD_MAX8925
@@ -259,7 +247,7 @@
 config INPUT_GP2A
 	tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
 	depends on I2C
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
 	  hooked to an I2C bus.
@@ -269,7 +257,7 @@
 
 config INPUT_GPIO_BEEPER
 	tristate "Generic GPIO Beeper support"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a beeper connected to a GPIO pin.
 
@@ -278,7 +266,7 @@
 
 config INPUT_GPIO_TILT_POLLED
 	tristate "Polled GPIO tilt switch"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	select INPUT_POLLDEV
 	help
 	  This driver implements support for tilt switches connected
@@ -569,7 +557,7 @@
 
 config INPUT_GPIO_ROTARY_ENCODER
 	tristate "Rotary encoders connected to GPIO pins"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here to add support for rotary encoders connected to GPIO lines.
 	  Check file:Documentation/input/rotary-encoder.txt for more
@@ -776,7 +764,8 @@
 
 config INPUT_DRV260X_HAPTICS
 	tristate "TI DRV260X haptics support"
-	depends on INPUT && I2C && GPIOLIB
+	depends on INPUT && I2C
+	depends on GPIOLIB || COMPILE_TEST
 	select INPUT_FF_MEMLESS
 	select REGMAP_I2C
 	help
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 53df07d..0357a08 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -41,7 +41,6 @@
 obj-$(CONFIG_INPUT_KXTJ9)		+= kxtj9.o
 obj-$(CONFIG_INPUT_M68K_BEEP)		+= m68kspkr.o
 obj-$(CONFIG_INPUT_MAX77693_HAPTIC)	+= max77693-haptic.o
-obj-$(CONFIG_INPUT_MAX77843_HAPTIC)	+= max77843-haptic.o
 obj-$(CONFIG_INPUT_MAX8925_ONKEY)	+= max8925_onkey.o
 obj-$(CONFIG_INPUT_MAX8997_HAPTIC)	+= max8997_haptic.o
 obj-$(CONFIG_INPUT_MC13783_PWRBUTTON)	+= mc13783-pwrbutton.o
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index bdb5d03..a8b0a2e 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -158,7 +158,6 @@
 static struct i2c_driver adxl34x_driver = {
 	.driver = {
 		.name = "adxl34x",
-		.owner = THIS_MODULE,
 		.pm = &adxl34x_i2c_pm,
 		.of_match_table = of_match_ptr(adxl34x_of_id),
 	},
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 4dbbed7..4bf6785 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -170,8 +170,8 @@
 
 	INIT_WORK(&haptics->work, arizona_haptics_work);
 
-	haptics->input_dev = input_allocate_device();
-	if (haptics->input_dev == NULL) {
+	haptics->input_dev = devm_input_allocate_device(&pdev->dev);
+	if (!haptics->input_dev) {
 		dev_err(arizona->dev, "Failed to allocate input device\n");
 		return -ENOMEM;
 	}
@@ -188,41 +188,23 @@
 	if (ret < 0) {
 		dev_err(arizona->dev, "input_ff_create_memless() failed: %d\n",
 			ret);
-		goto err_ialloc;
+		return ret;
 	}
 
 	ret = input_register_device(haptics->input_dev);
 	if (ret < 0) {
 		dev_err(arizona->dev, "couldn't register input device: %d\n",
 			ret);
-		goto err_iff;
+		return ret;
 	}
 
 	platform_set_drvdata(pdev, haptics);
 
 	return 0;
-
-err_iff:
-	if (haptics->input_dev)
-		input_ff_destroy(haptics->input_dev);
-err_ialloc:
-	input_free_device(haptics->input_dev);
-
-	return ret;
-}
-
-static int arizona_haptics_remove(struct platform_device *pdev)
-{
-	struct arizona_haptics *haptics = platform_get_drvdata(pdev);
-
-	input_unregister_device(haptics->input_dev);
-
-	return 0;
 }
 
 static struct platform_driver arizona_haptics_driver = {
 	.probe		= arizona_haptics_probe,
-	.remove		= arizona_haptics_remove,
 	.driver		= {
 		.name	= "arizona-haptics",
 	},
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index b36831c..1d0e61d 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -333,10 +333,9 @@
 	y = ((0xc0 & data[2]) >> 6) | (data[3] << 2);
 	z = ((0xc0 & data[4]) >> 6) | (data[5] << 2);
 
-	/* sign extension */
-	x = (s16) (x << 6) >> 6;
-	y = (s16) (y << 6) >> 6;
-	z = (s16) (z << 6) >> 6;
+	x = sign_extend32(x, 9);
+	y = sign_extend32(y, 9);
+	z = sign_extend32(z, 9);
 
 	input_report_abs(bma150->input, ABS_X, x);
 	input_report_abs(bma150->input, ABS_Y, y);
@@ -654,7 +653,6 @@
 
 static struct i2c_driver bma150_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= BMA150_DRIVER,
 		.pm	= &bma150_pm,
 	},
diff --git a/drivers/input/misc/cma3000_d0x_i2c.c b/drivers/input/misc/cma3000_d0x_i2c.c
index 4fdef98..c702191 100644
--- a/drivers/input/misc/cma3000_d0x_i2c.c
+++ b/drivers/input/misc/cma3000_d0x_i2c.c
@@ -118,7 +118,6 @@
 	.id_table	= cma3000_i2c_id,
 	.driver = {
 		.name	= "cma3000_i2c_accl",
-		.owner	= THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	= &cma3000_i2c_pm_ops,
 #endif
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index e5d60ec..2adfd86c 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -204,7 +204,7 @@
 	int overdrive_voltage;
 };
 
-static struct reg_default drv260x_reg_defs[] = {
+static const struct reg_default drv260x_reg_defs[] = {
 	{ DRV260X_STATUS, 0xe0 },
 	{ DRV260X_MODE, 0x40 },
 	{ DRV260X_RT_PB_IN, 0x00 },
@@ -313,14 +313,14 @@
 	gpiod_set_value(haptics->enable_gpio, 0);
 }
 
-static const struct reg_default drv260x_lra_cal_regs[] = {
+static const struct reg_sequence drv260x_lra_cal_regs[] = {
 	{ DRV260X_MODE, DRV260X_AUTO_CAL },
 	{ DRV260X_CTRL3, DRV260X_NG_THRESH_2 },
 	{ DRV260X_FEEDBACK_CTRL, DRV260X_FB_REG_LRA_MODE |
 		DRV260X_BRAKE_FACTOR_4X | DRV260X_LOOP_GAIN_HIGH },
 };
 
-static const struct reg_default drv260x_lra_init_regs[] = {
+static const struct reg_sequence drv260x_lra_init_regs[] = {
 	{ DRV260X_MODE, DRV260X_RT_PLAYBACK },
 	{ DRV260X_A_TO_V_CTRL, DRV260X_AUDIO_HAPTICS_PEAK_20MS |
 		DRV260X_AUDIO_HAPTICS_FILTER_125HZ },
@@ -337,7 +337,7 @@
 	{ DRV260X_CTRL4, DRV260X_AUTOCAL_TIME_500MS },
 };
 
-static const struct reg_default drv260x_erm_cal_regs[] = {
+static const struct reg_sequence drv260x_erm_cal_regs[] = {
 	{ DRV260X_MODE, DRV260X_AUTO_CAL },
 	{ DRV260X_A_TO_V_MIN_INPUT, DRV260X_AUDIO_HAPTICS_MIN_IN_VOLT },
 	{ DRV260X_A_TO_V_MAX_INPUT, DRV260X_AUDIO_HAPTICS_MAX_IN_VOLT },
@@ -720,7 +720,6 @@
 	.probe		= drv260x_probe,
 	.driver		= {
 		.name	= "drv260x-haptics",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(drv260x_of_match),
 		.pm	= &drv260x_pm_ops,
 	},
diff --git a/drivers/input/misc/drv2665.c b/drivers/input/misc/drv2665.c
index 0afaa33..ef9bc12 100644
--- a/drivers/input/misc/drv2665.c
+++ b/drivers/input/misc/drv2665.c
@@ -74,7 +74,7 @@
 	0x9b, 0x9f, 0xa5, 0xad, 0xb8, 0xc4, 0xd2, 0xe0, 0xf0, 0x00,
 };
 
-static struct reg_default drv2665_reg_defs[] = {
+static const struct reg_default drv2665_reg_defs[] = {
 	{ DRV2665_STATUS, 0x02 },
 	{ DRV2665_CTRL_1, 0x28 },
 	{ DRV2665_CTRL_2, 0x40 },
@@ -132,7 +132,7 @@
 			"Failed to enter standby mode: %d\n", error);
 }
 
-static const struct reg_default drv2665_init_regs[] = {
+static const struct reg_sequence drv2665_init_regs[] = {
 	{ DRV2665_CTRL_2, 0 | DRV2665_10_MS_IDLE_TOUT },
 	{ DRV2665_CTRL_1, DRV2665_25_VPP_GAIN },
 };
@@ -309,7 +309,6 @@
 	.probe		= drv2665_probe,
 	.driver		= {
 		.name	= "drv2665-haptics",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(drv2665_of_match),
 		.pm	= &drv2665_pm_ops,
 	},
diff --git a/drivers/input/misc/drv2667.c b/drivers/input/misc/drv2667.c
index fc0fddf..d5ba748 100644
--- a/drivers/input/misc/drv2667.c
+++ b/drivers/input/misc/drv2667.c
@@ -116,7 +116,7 @@
 	u32 frequency;
 };
 
-static struct reg_default drv2667_reg_defs[] = {
+static const struct reg_default drv2667_reg_defs[] = {
 	{ DRV2667_STATUS, 0x02 },
 	{ DRV2667_CTRL_1, 0x28 },
 	{ DRV2667_CTRL_2, 0x40 },
@@ -262,14 +262,14 @@
 			"Failed to enter standby mode: %d\n", error);
 }
 
-static const struct reg_default drv2667_init_regs[] = {
+static const struct reg_sequence drv2667_init_regs[] = {
 	{ DRV2667_CTRL_2, 0 },
 	{ DRV2667_CTRL_1, DRV2667_25_VPP_GAIN },
 	{ DRV2667_WV_SEQ_0, 1 },
 	{ DRV2667_WV_SEQ_1, 0 }
 };
 
-static const struct reg_default drv2667_page1_init[] = {
+static const struct reg_sequence drv2667_page1_init[] = {
 	{ DRV2667_RAM_HDR_SZ, 0x05 },
 	{ DRV2667_RAM_START_HI, 0x80 },
 	{ DRV2667_RAM_START_LO, 0x06 },
@@ -484,7 +484,6 @@
 	.probe		= drv2667_probe,
 	.driver		= {
 		.name	= "drv2667-haptics",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(drv2667_of_match),
 		.pm	= &drv2667_pm_ops,
 	},
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
index 0ac176d..3bfdfcc 100644
--- a/drivers/input/misc/gp2ap002a00f.c
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -267,11 +267,11 @@
 	{ GP2A_I2C_NAME, 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, gp2a_i2c_id);
 
 static struct i2c_driver gp2a_i2c_driver = {
 	.driver = {
 		.name	= GP2A_I2C_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &gp2a_pm,
 	},
 	.probe		= gp2a_probe,
diff --git a/drivers/input/misc/kxtj9.c b/drivers/input/misc/kxtj9.c
index 6e29349..e058d71 100644
--- a/drivers/input/misc/kxtj9.c
+++ b/drivers/input/misc/kxtj9.c
@@ -658,7 +658,6 @@
 static struct i2c_driver kxtj9_driver = {
 	.driver = {
 		.name	= NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &kxtj9_pm_ops,
 	},
 	.probe		= kxtj9_probe,
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 39e930c..6d96bff 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -1,8 +1,9 @@
 /*
- * MAXIM MAX77693 Haptic device driver
+ * MAXIM MAX77693/MAX77843 Haptic device driver
  *
- * Copyright (C) 2014 Samsung Electronics
+ * Copyright (C) 2014,2015 Samsung Electronics
  * Jaewon Kim <jaewon02.kim@samsung.com>
+ * Krzysztof Kozlowski <k.kozlowski@samsung.com>
  *
  * This program is not provided / owned by Maxim Integrated Products.
  *
@@ -24,7 +25,9 @@
 #include <linux/workqueue.h>
 #include <linux/regulator/consumer.h>
 #include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-common.h>
 #include <linux/mfd/max77693-private.h>
+#include <linux/mfd/max77843-private.h>
 
 #define MAX_MAGNITUDE_SHIFT	16
 
@@ -46,6 +49,8 @@
 };
 
 struct max77693_haptic {
+	enum max77693_types dev_type;
+
 	struct regmap *regmap_pmic;
 	struct regmap *regmap_haptic;
 	struct device *dev;
@@ -59,7 +64,6 @@
 	unsigned int pwm_duty;
 	enum max77693_haptic_motor_type type;
 	enum max77693_haptic_pulse_mode mode;
-	enum max77693_haptic_pwm_divisor pwm_divisor;
 
 	struct work_struct work;
 };
@@ -78,19 +82,52 @@
 	return 0;
 }
 
+static int max77843_haptic_bias(struct max77693_haptic *haptic, bool on)
+{
+	int error;
+
+	if (haptic->dev_type != TYPE_MAX77843)
+		return 0;
+
+	error = regmap_update_bits(haptic->regmap_haptic,
+				   MAX77843_SYS_REG_MAINCTRL1,
+				   MAX77843_MAINCTRL1_BIASEN_MASK,
+				   on << MAINCTRL1_BIASEN_SHIFT);
+	if (error) {
+		dev_err(haptic->dev, "failed to %s bias: %d\n",
+			on ? "enable" : "disable", error);
+		return error;
+	}
+
+	return 0;
+}
+
 static int max77693_haptic_configure(struct max77693_haptic *haptic,
 				     bool enable)
 {
-	unsigned int value;
+	unsigned int value, config_reg;
 	int error;
 
-	value = ((haptic->type << MAX77693_CONFIG2_MODE) |
-		(enable << MAX77693_CONFIG2_MEN) |
-		(haptic->mode << MAX77693_CONFIG2_HTYP) |
-		(haptic->pwm_divisor));
+	switch (haptic->dev_type) {
+	case TYPE_MAX77693:
+		value = ((haptic->type << MAX77693_CONFIG2_MODE) |
+			(enable << MAX77693_CONFIG2_MEN) |
+			(haptic->mode << MAX77693_CONFIG2_HTYP) |
+			MAX77693_HAPTIC_PWM_DIVISOR_128);
+		config_reg = MAX77693_HAPTIC_REG_CONFIG2;
+		break;
+	case TYPE_MAX77843:
+		value = (haptic->type << MCONFIG_MODE_SHIFT) |
+			(enable << MCONFIG_MEN_SHIFT) |
+			MAX77693_HAPTIC_PWM_DIVISOR_128;
+		config_reg = MAX77843_HAP_REG_MCONFIG;
+		break;
+	default:
+		return -EINVAL;
+	}
 
 	error = regmap_write(haptic->regmap_haptic,
-			     MAX77693_HAPTIC_REG_CONFIG2, value);
+			     config_reg, value);
 	if (error) {
 		dev_err(haptic->dev,
 			"failed to update haptic config: %d\n", error);
@@ -104,6 +141,9 @@
 {
 	int error;
 
+	if (haptic->dev_type != TYPE_MAX77693)
+		return 0;
+
 	error = regmap_update_bits(haptic->regmap_pmic,
 				   MAX77693_PMIC_REG_LSCNFG,
 				   MAX77693_PMIC_LOW_SYS_MASK,
@@ -219,6 +259,10 @@
 	struct max77693_haptic *haptic = input_get_drvdata(dev);
 	int error;
 
+	error = max77843_haptic_bias(haptic, true);
+	if (error)
+		return error;
+
 	error = regulator_enable(haptic->motor_reg);
 	if (error) {
 		dev_err(haptic->dev,
@@ -241,6 +285,8 @@
 	if (error)
 		dev_err(haptic->dev,
 			"failed to disable regulator: %d\n", error);
+
+	max77843_haptic_bias(haptic, false);
 }
 
 static int max77693_haptic_probe(struct platform_device *pdev)
@@ -254,13 +300,26 @@
 		return -ENOMEM;
 
 	haptic->regmap_pmic = max77693->regmap;
-	haptic->regmap_haptic = max77693->regmap_haptic;
 	haptic->dev = &pdev->dev;
 	haptic->type = MAX77693_HAPTIC_LRA;
 	haptic->mode = MAX77693_HAPTIC_EXTERNAL_MODE;
-	haptic->pwm_divisor = MAX77693_HAPTIC_PWM_DIVISOR_128;
 	haptic->suspend_state = false;
 
+	/* Variant-specific init */
+	haptic->dev_type = platform_get_device_id(pdev)->driver_data;
+	switch (haptic->dev_type) {
+	case TYPE_MAX77693:
+		haptic->regmap_haptic = max77693->regmap_haptic;
+		break;
+	case TYPE_MAX77843:
+		haptic->regmap_haptic = max77693->regmap;
+		break;
+	default:
+		dev_err(&pdev->dev, "unsupported device type: %u\n",
+			haptic->dev_type);
+		return -EINVAL;
+	}
+
 	INIT_WORK(&haptic->work, max77693_haptic_play_work);
 
 	/* Get pwm and regulatot for haptic device */
@@ -338,16 +397,25 @@
 static SIMPLE_DEV_PM_OPS(max77693_haptic_pm_ops,
 			 max77693_haptic_suspend, max77693_haptic_resume);
 
+static const struct platform_device_id max77693_haptic_id[] = {
+	{ "max77693-haptic", TYPE_MAX77693 },
+	{ "max77843-haptic", TYPE_MAX77843 },
+	{},
+};
+MODULE_DEVICE_TABLE(platform, max77693_haptic_id);
+
 static struct platform_driver max77693_haptic_driver = {
 	.driver		= {
 		.name	= "max77693-haptic",
 		.pm	= &max77693_haptic_pm_ops,
 	},
 	.probe		= max77693_haptic_probe,
+	.id_table	= max77693_haptic_id,
 };
 module_platform_driver(max77693_haptic_driver);
 
 MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
-MODULE_DESCRIPTION("MAXIM MAX77693 Haptic driver");
+MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski@samsung.com>");
+MODULE_DESCRIPTION("MAXIM 77693/77843 Haptic driver");
 MODULE_ALIAS("platform:max77693-haptic");
 MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/max77843-haptic.c b/drivers/input/misc/max77843-haptic.c
deleted file mode 100644
index dccbb46..0000000
--- a/drivers/input/misc/max77843-haptic.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * MAXIM MAX77693 Haptic device driver
- *
- * Copyright (C) 2015 Samsung Electronics
- * Author: Jaewon Kim <jaewon02.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/err.h>
-#include <linux/i2c.h>
-#include <linux/init.h>
-#include <linux/input.h>
-#include <linux/mfd/max77843-private.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pwm.h>
-#include <linux/regmap.h>
-#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-
-#define MAX_MAGNITUDE_SHIFT        16
-
-enum max77843_haptic_motor_type {
-	MAX77843_HAPTIC_ERM = 0,
-	MAX77843_HAPTIC_LRA,
-};
-
-enum max77843_haptic_pwm_divisor {
-	MAX77843_HAPTIC_PWM_DIVISOR_32 = 0,
-	MAX77843_HAPTIC_PWM_DIVISOR_64,
-	MAX77843_HAPTIC_PWM_DIVISOR_128,
-	MAX77843_HAPTIC_PWM_DIVISOR_256,
-};
-
-struct max77843_haptic {
-	struct regmap *regmap_haptic;
-	struct device *dev;
-	struct input_dev *input_dev;
-	struct pwm_device *pwm_dev;
-	struct regulator *motor_reg;
-	struct work_struct work;
-	struct mutex mutex;
-
-	unsigned int magnitude;
-	unsigned int pwm_duty;
-
-	bool active;
-	bool suspended;
-
-	enum max77843_haptic_motor_type type;
-	enum max77843_haptic_pwm_divisor pwm_divisor;
-};
-
-static int max77843_haptic_set_duty_cycle(struct max77843_haptic *haptic)
-{
-	int delta = (haptic->pwm_dev->period + haptic->pwm_duty) / 2;
-	int error;
-
-	error = pwm_config(haptic->pwm_dev, delta, haptic->pwm_dev->period);
-	if (error) {
-		dev_err(haptic->dev, "failed to configure pwm: %d\n", error);
-		return error;
-	}
-
-	return 0;
-}
-
-static int max77843_haptic_bias(struct max77843_haptic *haptic, bool on)
-{
-	int error;
-
-	error = regmap_update_bits(haptic->regmap_haptic,
-				   MAX77843_SYS_REG_MAINCTRL1,
-				   MAX77843_MAINCTRL1_BIASEN_MASK,
-				   on << MAINCTRL1_BIASEN_SHIFT);
-	if (error) {
-		dev_err(haptic->dev, "failed to %s bias: %d\n",
-			on ? "enable" : "disable", error);
-		return error;
-	}
-
-	return 0;
-}
-
-static int max77843_haptic_config(struct max77843_haptic *haptic, bool enable)
-{
-	unsigned int value;
-	int error;
-
-	value = (haptic->type << MCONFIG_MODE_SHIFT) |
-		(enable << MCONFIG_MEN_SHIFT) |
-		(haptic->pwm_divisor << MCONFIG_PDIV_SHIFT);
-
-	error = regmap_write(haptic->regmap_haptic,
-			     MAX77843_HAP_REG_MCONFIG, value);
-	if (error) {
-		dev_err(haptic->dev,
-			"failed to update haptic config: %d\n", error);
-		return error;
-	}
-
-	return 0;
-}
-
-static int max77843_haptic_enable(struct max77843_haptic *haptic)
-{
-	int error;
-
-	if (haptic->active)
-		return 0;
-
-	error = pwm_enable(haptic->pwm_dev);
-	if (error) {
-		dev_err(haptic->dev,
-			"failed to enable pwm device: %d\n", error);
-		return error;
-	}
-
-	error = max77843_haptic_config(haptic, true);
-	if (error)
-		goto err_config;
-
-	haptic->active = true;
-
-	return 0;
-
-err_config:
-	pwm_disable(haptic->pwm_dev);
-
-	return error;
-}
-
-static int max77843_haptic_disable(struct max77843_haptic *haptic)
-{
-	int error;
-
-	if (!haptic->active)
-		return 0;
-
-	error = max77843_haptic_config(haptic, false);
-	if (error)
-		return error;
-
-	pwm_disable(haptic->pwm_dev);
-
-	haptic->active = false;
-
-	return 0;
-}
-
-static void max77843_haptic_play_work(struct work_struct *work)
-{
-	struct max77843_haptic *haptic =
-			container_of(work, struct max77843_haptic, work);
-	int error;
-
-	mutex_lock(&haptic->mutex);
-
-	if (haptic->suspended)
-		goto out_unlock;
-
-	if (haptic->magnitude) {
-		error = max77843_haptic_set_duty_cycle(haptic);
-		if (error) {
-			dev_err(haptic->dev,
-				"failed to set duty cycle: %d\n", error);
-			goto out_unlock;
-		}
-
-		error = max77843_haptic_enable(haptic);
-		if (error)
-			dev_err(haptic->dev,
-				"cannot enable haptic: %d\n", error);
-	} else {
-		error = max77843_haptic_disable(haptic);
-		if (error)
-			dev_err(haptic->dev,
-				"cannot disable haptic: %d\n", error);
-	}
-
-out_unlock:
-	mutex_unlock(&haptic->mutex);
-}
-
-static int max77843_haptic_play_effect(struct input_dev *dev, void *data,
-		struct ff_effect *effect)
-{
-	struct max77843_haptic *haptic = input_get_drvdata(dev);
-	u64 period_mag_multi;
-
-	haptic->magnitude = effect->u.rumble.strong_magnitude;
-	if (!haptic->magnitude)
-		haptic->magnitude = effect->u.rumble.weak_magnitude;
-
-	period_mag_multi = (u64)haptic->pwm_dev->period * haptic->magnitude;
-	haptic->pwm_duty = (unsigned int)(period_mag_multi >>
-						MAX_MAGNITUDE_SHIFT);
-
-	schedule_work(&haptic->work);
-
-	return 0;
-}
-
-static int max77843_haptic_open(struct input_dev *dev)
-{
-	struct max77843_haptic *haptic = input_get_drvdata(dev);
-	int error;
-
-	error = max77843_haptic_bias(haptic, true);
-	if (error)
-		return error;
-
-	error = regulator_enable(haptic->motor_reg);
-	if (error) {
-		dev_err(haptic->dev,
-			"failed to enable regulator: %d\n", error);
-		return error;
-	}
-
-	return 0;
-}
-
-static void max77843_haptic_close(struct input_dev *dev)
-{
-	struct max77843_haptic *haptic = input_get_drvdata(dev);
-	int error;
-
-	cancel_work_sync(&haptic->work);
-	max77843_haptic_disable(haptic);
-
-	error = regulator_disable(haptic->motor_reg);
-	if (error)
-		dev_err(haptic->dev,
-			"failed to disable regulator: %d\n", error);
-
-	max77843_haptic_bias(haptic, false);
-}
-
-static int max77843_haptic_probe(struct platform_device *pdev)
-{
-	struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
-	struct max77843_haptic *haptic;
-	int error;
-
-	haptic = devm_kzalloc(&pdev->dev, sizeof(*haptic), GFP_KERNEL);
-	if (!haptic)
-		return -ENOMEM;
-
-	haptic->regmap_haptic = max77843->regmap;
-	haptic->dev = &pdev->dev;
-	haptic->type = MAX77843_HAPTIC_LRA;
-	haptic->pwm_divisor = MAX77843_HAPTIC_PWM_DIVISOR_128;
-
-	INIT_WORK(&haptic->work, max77843_haptic_play_work);
-	mutex_init(&haptic->mutex);
-
-	haptic->pwm_dev = devm_pwm_get(&pdev->dev, NULL);
-	if (IS_ERR(haptic->pwm_dev)) {
-		dev_err(&pdev->dev, "failed to get pwm device\n");
-		return PTR_ERR(haptic->pwm_dev);
-	}
-
-	haptic->motor_reg = devm_regulator_get_exclusive(&pdev->dev, "haptic");
-	if (IS_ERR(haptic->motor_reg)) {
-		dev_err(&pdev->dev, "failed to get regulator\n");
-		return PTR_ERR(haptic->motor_reg);
-	}
-
-	haptic->input_dev = devm_input_allocate_device(&pdev->dev);
-	if (!haptic->input_dev) {
-		dev_err(&pdev->dev, "failed to allocate input device\n");
-		return -ENOMEM;
-	}
-
-	haptic->input_dev->name = "max77843-haptic";
-	haptic->input_dev->id.version = 1;
-	haptic->input_dev->dev.parent = &pdev->dev;
-	haptic->input_dev->open = max77843_haptic_open;
-	haptic->input_dev->close = max77843_haptic_close;
-	input_set_drvdata(haptic->input_dev, haptic);
-	input_set_capability(haptic->input_dev, EV_FF, FF_RUMBLE);
-
-	error = input_ff_create_memless(haptic->input_dev, NULL,
-			max77843_haptic_play_effect);
-	if (error) {
-		dev_err(&pdev->dev, "failed to create force-feedback\n");
-		return error;
-	}
-
-	error = input_register_device(haptic->input_dev);
-	if (error) {
-		dev_err(&pdev->dev, "failed to register input device\n");
-		return error;
-	}
-
-	platform_set_drvdata(pdev, haptic);
-
-	return 0;
-}
-
-static int __maybe_unused max77843_haptic_suspend(struct device *dev)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct max77843_haptic *haptic = platform_get_drvdata(pdev);
-	int error;
-
-	error = mutex_lock_interruptible(&haptic->mutex);
-	if (error)
-		return error;
-
-	max77843_haptic_disable(haptic);
-
-	haptic->suspended = true;
-
-	mutex_unlock(&haptic->mutex);
-
-	return 0;
-}
-
-static int __maybe_unused max77843_haptic_resume(struct device *dev)
-{
-	struct platform_device *pdev = to_platform_device(dev);
-	struct max77843_haptic *haptic = platform_get_drvdata(pdev);
-	unsigned int magnitude;
-
-	mutex_lock(&haptic->mutex);
-
-	haptic->suspended = false;
-
-	magnitude = ACCESS_ONCE(haptic->magnitude);
-	if (magnitude)
-		max77843_haptic_enable(haptic);
-
-	mutex_unlock(&haptic->mutex);
-
-	return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(max77843_haptic_pm_ops,
-		max77843_haptic_suspend, max77843_haptic_resume);
-
-static struct platform_driver max77843_haptic_driver = {
-	.driver		= {
-		.name	= "max77843-haptic",
-		.pm	= &max77843_haptic_pm_ops,
-	},
-	.probe		= max77843_haptic_probe,
-};
-module_platform_driver(max77843_haptic_driver);
-
-MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
-MODULE_DESCRIPTION("MAXIM MAX77843 Haptic driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
index d0f6872..a806ba3 100644
--- a/drivers/input/misc/max8997_haptic.c
+++ b/drivers/input/misc/max8997_haptic.c
@@ -394,7 +394,7 @@
 	{ "max8997-haptic", 0 },
 	{ },
 };
-MODULE_DEVICE_TABLE(i2c, max8997_haptic_id);
+MODULE_DEVICE_TABLE(platform, max8997_haptic_id);
 
 static struct platform_driver max8997_haptic_driver = {
 	.driver	= {
@@ -407,7 +407,6 @@
 };
 module_platform_driver(max8997_haptic_driver);
 
-MODULE_ALIAS("platform:max8997-haptic");
 MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
 MODULE_DESCRIPTION("max8997_haptic driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index 5e50513..f088db3 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -466,7 +466,6 @@
 static struct i2c_driver mpu3050_i2c_driver = {
 	.driver	= {
 		.name	= "mpu3050",
-		.owner	= THIS_MODULE,
 		.pm	= &mpu3050_pm,
 		.of_match_table = mpu3050_of_match,
 	},
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 97f711a..4abdf1e 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -208,7 +208,6 @@
 static struct i2c_driver pcf8574_kp_driver = {
 	.driver = {
 		.name  = DRV_NAME,
-		.owner = THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm = &pcf8574_kp_pm_ops,
 #endif
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index c4ca20e..3f02e0e 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -20,17 +20,72 @@
 #include <linux/regmap.h>
 #include <linux/log2.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 
 #define PON_CNTL_1 0x1C
 #define PON_CNTL_PULL_UP BIT(7)
 #define PON_CNTL_TRIG_DELAY_MASK (0x7)
+#define PON_CNTL_1_PULL_UP_EN			0xe0
+#define PON_CNTL_1_USB_PWR_EN			0x10
+#define PON_CNTL_1_WD_EN_RESET			0x08
+
+#define PM8058_SLEEP_CTRL			0x02b
+#define PM8921_SLEEP_CTRL			0x10a
+
+#define SLEEP_CTRL_SMPL_EN_RESET		0x04
+
+/* Regulator master enable addresses */
+#define REG_PM8058_VREG_EN_MSM			0x018
+#define REG_PM8058_VREG_EN_GRP_5_4		0x1c8
+
+/* Regulator control registers for shutdown/reset */
+#define PM8058_S0_CTRL				0x004
+#define PM8058_S1_CTRL				0x005
+#define PM8058_S3_CTRL				0x111
+#define PM8058_L21_CTRL				0x120
+#define PM8058_L22_CTRL				0x121
+
+#define PM8058_REGULATOR_ENABLE_MASK		0x80
+#define PM8058_REGULATOR_ENABLE			0x80
+#define PM8058_REGULATOR_DISABLE		0x00
+#define PM8058_REGULATOR_PULL_DOWN_MASK		0x40
+#define PM8058_REGULATOR_PULL_DOWN_EN		0x40
+
+/* Buck CTRL register */
+#define PM8058_SMPS_LEGACY_VREF_SEL		0x20
+#define PM8058_SMPS_LEGACY_VPROG_MASK		0x1f
+#define PM8058_SMPS_ADVANCED_BAND_MASK		0xC0
+#define PM8058_SMPS_ADVANCED_BAND_SHIFT		6
+#define PM8058_SMPS_ADVANCED_VPROG_MASK		0x3f
+
+/* Buck TEST2 registers for shutdown/reset */
+#define PM8058_S0_TEST2				0x084
+#define PM8058_S1_TEST2				0x085
+#define PM8058_S3_TEST2				0x11a
+
+#define PM8058_REGULATOR_BANK_WRITE		0x80
+#define PM8058_REGULATOR_BANK_MASK		0x70
+#define PM8058_REGULATOR_BANK_SHIFT		4
+#define PM8058_REGULATOR_BANK_SEL(n)	((n) << PM8058_REGULATOR_BANK_SHIFT)
+
+/* Buck TEST2 register bank 1 */
+#define PM8058_SMPS_LEGACY_VLOW_SEL		0x01
+
+/* Buck TEST2 register bank 7 */
+#define PM8058_SMPS_ADVANCED_MODE_MASK		0x02
+#define PM8058_SMPS_ADVANCED_MODE		0x02
+#define PM8058_SMPS_LEGACY_MODE			0x00
 
 /**
  * struct pmic8xxx_pwrkey - pmic8xxx pwrkey information
  * @key_press_irq: key press irq number
+ * @regmap: device regmap
+ * @shutdown_fn: shutdown configuration function
  */
 struct pmic8xxx_pwrkey {
 	int key_press_irq;
+	struct regmap *regmap;
+	int (*shutdown_fn)(struct pmic8xxx_pwrkey *, bool);
 };
 
 static irqreturn_t pwrkey_press_irq(int irq, void *_pwr)
@@ -76,6 +131,212 @@
 static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops,
 		pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume);
 
+static void pmic8xxx_pwrkey_shutdown(struct platform_device *pdev)
+{
+	struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev);
+	int error;
+	u8 mask, val;
+	bool reset = system_state == SYSTEM_RESTART;
+
+	if (pwrkey->shutdown_fn) {
+		error = pwrkey->shutdown_fn(pwrkey, reset);
+		if (error)
+			return;
+	}
+
+	/*
+	 * Select action to perform (reset or shutdown) when PS_HOLD goes low.
+	 * Also ensure that KPD, CBL0, and CBL1 pull ups are enabled and that
+	 * USB charging is enabled.
+	 */
+	mask = PON_CNTL_1_PULL_UP_EN | PON_CNTL_1_USB_PWR_EN;
+	mask |= PON_CNTL_1_WD_EN_RESET;
+	val = mask;
+	if (!reset)
+		val &= ~PON_CNTL_1_WD_EN_RESET;
+
+	regmap_update_bits(pwrkey->regmap, PON_CNTL_1, mask, val);
+}
+
+/*
+ * Set an SMPS regulator to be disabled in its CTRL register, but enabled
+ * in the master enable register.  Also set it's pull down enable bit.
+ * Take care to make sure that the output voltage doesn't change if switching
+ * from advanced mode to legacy mode.
+ */
+static int pm8058_disable_smps_locally_set_pull_down(struct regmap *regmap,
+	u16 ctrl_addr, u16 test2_addr, u16 master_enable_addr,
+	u8 master_enable_bit)
+{
+	int error;
+	u8 vref_sel, vlow_sel, band, vprog, bank;
+	unsigned int reg;
+
+	bank = PM8058_REGULATOR_BANK_SEL(7);
+	error = regmap_write(regmap, test2_addr, bank);
+	if (error)
+		return error;
+
+	error = regmap_read(regmap, test2_addr, &reg);
+	if (error)
+		return error;
+
+	reg &= PM8058_SMPS_ADVANCED_MODE_MASK;
+	/* Check if in advanced mode. */
+	if (reg == PM8058_SMPS_ADVANCED_MODE) {
+		/* Determine current output voltage. */
+		error = regmap_read(regmap, ctrl_addr, &reg);
+		if (error)
+			return error;
+
+		band = reg & PM8058_SMPS_ADVANCED_BAND_MASK;
+		band >>= PM8058_SMPS_ADVANCED_BAND_SHIFT;
+		switch (band) {
+		case 3:
+			vref_sel = 0;
+			vlow_sel = 0;
+			break;
+		case 2:
+			vref_sel = PM8058_SMPS_LEGACY_VREF_SEL;
+			vlow_sel = 0;
+			break;
+		case 1:
+			vref_sel = PM8058_SMPS_LEGACY_VREF_SEL;
+			vlow_sel = PM8058_SMPS_LEGACY_VLOW_SEL;
+			break;
+		default:
+			pr_err("%s: regulator already disabled\n", __func__);
+			return -EPERM;
+		}
+		vprog = reg & PM8058_SMPS_ADVANCED_VPROG_MASK;
+		/* Round up if fine step is in use. */
+		vprog = (vprog + 1) >> 1;
+		if (vprog > PM8058_SMPS_LEGACY_VPROG_MASK)
+			vprog = PM8058_SMPS_LEGACY_VPROG_MASK;
+
+		/* Set VLOW_SEL bit. */
+		bank = PM8058_REGULATOR_BANK_SEL(1);
+		error = regmap_write(regmap, test2_addr, bank);
+		if (error)
+			return error;
+
+		error = regmap_update_bits(regmap, test2_addr,
+			PM8058_REGULATOR_BANK_WRITE | PM8058_REGULATOR_BANK_MASK
+				| PM8058_SMPS_LEGACY_VLOW_SEL,
+			PM8058_REGULATOR_BANK_WRITE |
+			PM8058_REGULATOR_BANK_SEL(1) | vlow_sel);
+		if (error)
+			return error;
+
+		/* Switch to legacy mode */
+		bank = PM8058_REGULATOR_BANK_SEL(7);
+		error = regmap_write(regmap, test2_addr, bank);
+		if (error)
+			return error;
+
+		error = regmap_update_bits(regmap, test2_addr,
+				PM8058_REGULATOR_BANK_WRITE |
+				PM8058_REGULATOR_BANK_MASK |
+				PM8058_SMPS_ADVANCED_MODE_MASK,
+				PM8058_REGULATOR_BANK_WRITE |
+				PM8058_REGULATOR_BANK_SEL(7) |
+				PM8058_SMPS_LEGACY_MODE);
+		if (error)
+			return error;
+
+		/* Enable locally, enable pull down, keep voltage the same. */
+		error = regmap_update_bits(regmap, ctrl_addr,
+			PM8058_REGULATOR_ENABLE_MASK |
+			PM8058_REGULATOR_PULL_DOWN_MASK |
+			PM8058_SMPS_LEGACY_VREF_SEL |
+			PM8058_SMPS_LEGACY_VPROG_MASK,
+			PM8058_REGULATOR_ENABLE | PM8058_REGULATOR_PULL_DOWN_EN
+				| vref_sel | vprog);
+		if (error)
+			return error;
+	}
+
+	/* Enable in master control register. */
+	error = regmap_update_bits(regmap, master_enable_addr,
+			master_enable_bit, master_enable_bit);
+	if (error)
+		return error;
+
+	/* Disable locally and enable pull down. */
+	return regmap_update_bits(regmap, ctrl_addr,
+		PM8058_REGULATOR_ENABLE_MASK | PM8058_REGULATOR_PULL_DOWN_MASK,
+		PM8058_REGULATOR_DISABLE | PM8058_REGULATOR_PULL_DOWN_EN);
+}
+
+static int pm8058_disable_ldo_locally_set_pull_down(struct regmap *regmap,
+		u16 ctrl_addr, u16 master_enable_addr, u8 master_enable_bit)
+{
+	int error;
+
+	/* Enable LDO in master control register. */
+	error = regmap_update_bits(regmap, master_enable_addr,
+			master_enable_bit, master_enable_bit);
+	if (error)
+		return error;
+
+	/* Disable LDO in CTRL register and set pull down */
+	return regmap_update_bits(regmap, ctrl_addr,
+		PM8058_REGULATOR_ENABLE_MASK | PM8058_REGULATOR_PULL_DOWN_MASK,
+		PM8058_REGULATOR_DISABLE | PM8058_REGULATOR_PULL_DOWN_EN);
+}
+
+static int pm8058_pwrkey_shutdown(struct pmic8xxx_pwrkey *pwrkey, bool reset)
+{
+	int error;
+	struct regmap *regmap = pwrkey->regmap;
+	u8 mask, val;
+
+	/* When shutting down, enable active pulldowns on important rails. */
+	if (!reset) {
+		/* Disable SMPS's 0,1,3 locally and set pulldown enable bits. */
+		pm8058_disable_smps_locally_set_pull_down(regmap,
+			PM8058_S0_CTRL, PM8058_S0_TEST2,
+			REG_PM8058_VREG_EN_MSM, BIT(7));
+		pm8058_disable_smps_locally_set_pull_down(regmap,
+			PM8058_S1_CTRL, PM8058_S1_TEST2,
+			REG_PM8058_VREG_EN_MSM, BIT(6));
+		pm8058_disable_smps_locally_set_pull_down(regmap,
+			PM8058_S3_CTRL, PM8058_S3_TEST2,
+			REG_PM8058_VREG_EN_GRP_5_4, BIT(7) | BIT(4));
+		/* Disable LDO 21 locally and set pulldown enable bit. */
+		pm8058_disable_ldo_locally_set_pull_down(regmap,
+			PM8058_L21_CTRL, REG_PM8058_VREG_EN_GRP_5_4,
+			BIT(1));
+	}
+
+	/*
+	 * Fix-up: Set regulator LDO22 to 1.225 V in high power mode. Leave its
+	 * pull-down state intact. This ensures a safe shutdown.
+	 */
+	error = regmap_update_bits(regmap, PM8058_L22_CTRL, 0xbf, 0x93);
+	if (error)
+		return error;
+
+	/* Enable SMPL if resetting is desired */
+	mask = SLEEP_CTRL_SMPL_EN_RESET;
+	val = 0;
+	if (reset)
+		val = mask;
+	return regmap_update_bits(regmap, PM8058_SLEEP_CTRL, mask, val);
+}
+
+static int pm8921_pwrkey_shutdown(struct pmic8xxx_pwrkey *pwrkey, bool reset)
+{
+	struct regmap *regmap = pwrkey->regmap;
+	u8 mask = SLEEP_CTRL_SMPL_EN_RESET;
+	u8 val = 0;
+
+	/* Enable SMPL if resetting is desired */
+	if (reset)
+		val = mask;
+	return regmap_update_bits(regmap, PM8921_SLEEP_CTRL, mask, val);
+}
+
 static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
 {
 	struct input_dev *pwr;
@@ -109,6 +370,8 @@
 	if (!pwrkey)
 		return -ENOMEM;
 
+	pwrkey->shutdown_fn = of_device_get_match_data(&pdev->dev);
+	pwrkey->regmap = regmap;
 	pwrkey->key_press_irq = key_press_irq;
 
 	pwr = devm_input_allocate_device(&pdev->dev);
@@ -182,8 +445,8 @@
 }
 
 static const struct of_device_id pm8xxx_pwr_key_id_table[] = {
-	{ .compatible = "qcom,pm8058-pwrkey" },
-	{ .compatible = "qcom,pm8921-pwrkey" },
+	{ .compatible = "qcom,pm8058-pwrkey", .data = &pm8058_pwrkey_shutdown },
+	{ .compatible = "qcom,pm8921-pwrkey", .data = &pm8921_pwrkey_shutdown },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, pm8xxx_pwr_key_id_table);
@@ -191,6 +454,7 @@
 static struct platform_driver pmic8xxx_pwrkey_driver = {
 	.probe		= pmic8xxx_pwrkey_probe,
 	.remove		= pmic8xxx_pwrkey_remove,
+	.shutdown	= pmic8xxx_pwrkey_shutdown,
 	.driver		= {
 		.name	= "pm8xxx-pwrkey",
 		.pm	= &pm8xxx_pwr_key_pm_ops,
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index e956e81..62c5814 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -7,6 +7,7 @@
 #include <linux/input-polldev.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/gpio.h>
 
 #include <asm/mach-rc32434/gpio.h>
 #include <asm/mach-rc32434/rb.h>
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 421e29e..345df9b 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -320,10 +320,8 @@
 	 * Check if absmin/absmax/absfuzz/absflat are sane.
 	 */
 
-	for (cnt = 0; cnt < ABS_CNT; cnt++) {
+	for_each_set_bit(cnt, dev->absbit, ABS_CNT) {
 		int min, max;
-		if (!test_bit(cnt, dev->absbit))
-			continue;
 
 		min = input_abs_get_min(dev, cnt);
 		max = input_abs_get_max(dev, cnt);
@@ -416,7 +414,7 @@
 	dev->id.product	= user_dev->id.product;
 	dev->id.version	= user_dev->id.version;
 
-	for (i = 0; i < ABS_CNT; i++) {
+	for_each_set_bit(i, dev->absbit, ABS_CNT) {
 		input_abs_set_max(dev, i, user_dev->absmax[i]);
 		input_abs_set_min(dev, i, user_dev->absmin[i]);
 		input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index d7820d1..17f97e5 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -341,7 +341,7 @@
 
 config MOUSE_GPIO
 	tristate "GPIO mouse"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	select INPUT_POLLDEV
 	help
 	  This driver simulates a mouse on GPIO lines of various CPUs (and some
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index 793300b..ee6a6e9 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -24,7 +24,7 @@
 obj-$(CONFIG_MOUSE_SYNAPTICS_USB)	+= synaptics_usb.o
 obj-$(CONFIG_MOUSE_VSXXXAA)		+= vsxxxaa.o
 
-cyapatp-objs := cyapa.o cyapa_gen3.o cyapa_gen5.o
+cyapatp-objs := cyapa.o cyapa_gen3.o cyapa_gen5.o cyapa_gen6.o
 psmouse-objs := psmouse-base.o synaptics.o focaltech.o
 
 psmouse-$(CONFIG_MOUSE_PS2_ALPS)	+= alps.o
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index efe1484..eb76b61 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -6,7 +6,7 @@
  *   Daniel Kurtz <djkurtz@chromium.org>
  *   Benson Leung <bleung@chromium.org>
  *
- * Copyright (C) 2011-2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2015 Cypress Semiconductor, Inc.
  * Copyright (C) 2011-2012 Google, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -21,10 +21,12 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/pm_runtime.h>
 #include <linux/acpi.h>
+#include <linux/of.h>
 #include "cyapa.h"
 
 
@@ -39,11 +41,33 @@
 
 static int cyapa_reinitialize(struct cyapa *cyapa);
 
-static inline bool cyapa_is_bootloader_mode(struct cyapa *cyapa)
+bool cyapa_is_pip_bl_mode(struct cyapa *cyapa)
 {
+	if (cyapa->gen == CYAPA_GEN6 && cyapa->state == CYAPA_STATE_GEN6_BL)
+		return true;
+
 	if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_BL)
 		return true;
 
+	return false;
+}
+
+bool cyapa_is_pip_app_mode(struct cyapa *cyapa)
+{
+	if (cyapa->gen == CYAPA_GEN6 && cyapa->state == CYAPA_STATE_GEN6_APP)
+		return true;
+
+	if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_APP)
+		return true;
+
+	return false;
+}
+
+static bool cyapa_is_bootloader_mode(struct cyapa *cyapa)
+{
+	if (cyapa_is_pip_bl_mode(cyapa))
+		return true;
+
 	if (cyapa->gen == CYAPA_GEN3 &&
 		cyapa->state >= CYAPA_STATE_BL_BUSY &&
 		cyapa->state <= CYAPA_STATE_BL_ACTIVE)
@@ -54,7 +78,7 @@
 
 static inline bool cyapa_is_operational_mode(struct cyapa *cyapa)
 {
-	if (cyapa->gen == CYAPA_GEN5 && cyapa->state == CYAPA_STATE_GEN5_APP)
+	if (cyapa_is_pip_app_mode(cyapa))
 		return true;
 
 	if (cyapa->gen == CYAPA_GEN3 && cyapa->state == CYAPA_STATE_OP)
@@ -188,6 +212,15 @@
 			if (!error)
 				goto out_detected;
 		}
+		if (cyapa->gen == CYAPA_GEN_UNKNOWN ||
+				cyapa->gen == CYAPA_GEN6 ||
+				cyapa->gen == CYAPA_GEN5) {
+			error = cyapa_pip_state_parse(cyapa,
+					status, BL_STATUS_SIZE);
+			if (!error)
+				goto out_detected;
+		}
+		/* For old Gen5 trackpads detecting. */
 		if ((cyapa->gen == CYAPA_GEN_UNKNOWN ||
 				cyapa->gen == CYAPA_GEN5) &&
 			!smbus && even_addr) {
@@ -284,6 +317,9 @@
 		return error;
 
 	switch (cyapa->gen) {
+	case CYAPA_GEN6:
+		cyapa->ops = &cyapa_gen6_ops;
+		break;
 	case CYAPA_GEN5:
 		cyapa->ops = &cyapa_gen5_ops;
 		break;
@@ -306,7 +342,7 @@
 
 /*
  * Returns 0 on device detected, negative errno on no device detected.
- * And when the device is detected and opertaional, it will be reset to
+ * And when the device is detected and operational, it will be reset to
  * full power active mode automatically.
  */
 static int cyapa_detect(struct cyapa *cyapa)
@@ -333,6 +369,7 @@
 {
 	struct cyapa *cyapa = input_get_drvdata(input);
 	struct i2c_client *client = cyapa->client;
+	struct device *dev = &client->dev;
 	int error;
 
 	error = mutex_lock_interruptible(&cyapa->state_sync_lock);
@@ -346,10 +383,9 @@
 		 * when in operational mode.
 		 */
 		error = cyapa->ops->set_power_mode(cyapa,
-				PWR_MODE_FULL_ACTIVE, 0);
+				PWR_MODE_FULL_ACTIVE, 0, false);
 		if (error) {
-			dev_warn(&client->dev,
-				"set active power failed: %d\n", error);
+			dev_warn(dev, "set active power failed: %d\n", error);
 			goto out;
 		}
 	} else {
@@ -361,10 +397,14 @@
 	}
 
 	enable_irq(client->irq);
-	if (!pm_runtime_enabled(&client->dev)) {
-		pm_runtime_set_active(&client->dev);
-		pm_runtime_enable(&client->dev);
+	if (!pm_runtime_enabled(dev)) {
+		pm_runtime_set_active(dev);
+		pm_runtime_enable(dev);
 	}
+
+	pm_runtime_get_sync(dev);
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_sync_autosuspend(dev);
 out:
 	mutex_unlock(&cyapa->state_sync_lock);
 	return error;
@@ -374,16 +414,17 @@
 {
 	struct cyapa *cyapa = input_get_drvdata(input);
 	struct i2c_client *client = cyapa->client;
+	struct device *dev = &cyapa->client->dev;
 
 	mutex_lock(&cyapa->state_sync_lock);
 
 	disable_irq(client->irq);
-	if (pm_runtime_enabled(&client->dev))
-		pm_runtime_disable(&client->dev);
-	pm_runtime_set_suspended(&client->dev);
+	if (pm_runtime_enabled(dev))
+		pm_runtime_disable(dev);
+	pm_runtime_set_suspended(dev);
 
 	if (cyapa->operational)
-		cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+		cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0, false);
 
 	mutex_unlock(&cyapa->state_sync_lock);
 }
@@ -443,6 +484,7 @@
 	if (cyapa->gen >= CYAPA_GEN5) {
 		input_set_abs_params(input, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
 		input_set_abs_params(input, ABS_MT_WIDTH_MINOR, 0, 255, 0, 0);
+		input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0);
 	}
 
 	input_abs_set_res(input, ABS_MT_POSITION_X,
@@ -492,7 +534,7 @@
 		 */
 		if (!input || cyapa->operational)
 			cyapa->ops->set_power_mode(cyapa,
-				PWR_MODE_FULL_ACTIVE, 0);
+				PWR_MODE_FULL_ACTIVE, 0, false);
 		/* Gen3 always using polling mode for command. */
 		if (cyapa->gen >= CYAPA_GEN5)
 			enable_irq(cyapa->client->irq);
@@ -507,7 +549,8 @@
 		if (cyapa->gen >= CYAPA_GEN5)
 			disable_irq(cyapa->client->irq);
 		if (!input || cyapa->operational)
-			cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+			cyapa->ops->set_power_mode(cyapa,
+						   PWR_MODE_OFF, 0, false);
 	}
 }
 
@@ -563,6 +606,8 @@
 	error = cyapa_gen3_ops.initialize(cyapa);
 	if (!error)
 		error = cyapa_gen5_ops.initialize(cyapa);
+	if (!error)
+		error = cyapa_gen6_ops.initialize(cyapa);
 	if (error)
 		return error;
 
@@ -572,7 +617,7 @@
 
 	/* Power down the device until we need it. */
 	if (cyapa->operational)
-		cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+		cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0, false);
 
 	return 0;
 }
@@ -588,7 +633,8 @@
 
 	/* Avoid command failures when TP was in OFF state. */
 	if (cyapa->operational)
-		cyapa->ops->set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE, 0);
+		cyapa->ops->set_power_mode(cyapa,
+					   PWR_MODE_FULL_ACTIVE, 0, false);
 
 	error = cyapa_detect(cyapa);
 	if (error)
@@ -607,7 +653,8 @@
 	if (!input || !input->users) {
 		/* Reset to power OFF state to save power when no user open. */
 		if (cyapa->operational)
-			cyapa->ops->set_power_mode(cyapa, PWR_MODE_OFF, 0);
+			cyapa->ops->set_power_mode(cyapa,
+						   PWR_MODE_OFF, 0, false);
 	} else if (!error && cyapa->operational) {
 		/*
 		 * Make sure only enable runtime PM when device is
@@ -615,6 +662,10 @@
 		 */
 		pm_runtime_set_active(dev);
 		pm_runtime_enable(dev);
+
+		pm_runtime_get_sync(dev);
+		pm_runtime_mark_last_busy(dev);
+		pm_runtime_put_sync_autosuspend(dev);
 	}
 
 	return error;
@@ -624,27 +675,44 @@
 {
 	struct cyapa *cyapa = dev_id;
 	struct device *dev = &cyapa->client->dev;
+	int error;
 
-	pm_runtime_get_sync(dev);
 	if (device_may_wakeup(dev))
 		pm_wakeup_event(dev, 0);
 
-	/* Interrupt event maybe cuased by host command to trackpad device. */
+	/* Interrupt event can be caused by host command to trackpad device. */
 	if (cyapa->ops->irq_cmd_handler(cyapa)) {
 		/*
 		 * Interrupt event maybe from trackpad device input reporting.
 		 */
 		if (!cyapa->input) {
 			/*
-			 * Still in probling or in firware image
-			 * udpating or reading.
+			 * Still in probing or in firmware image
+			 * updating or reading.
 			 */
 			cyapa->ops->sort_empty_output_data(cyapa,
 					NULL, NULL, NULL);
 			goto out;
 		}
 
-		if (!cyapa->operational || cyapa->ops->irq_handler(cyapa)) {
+		if (cyapa->operational) {
+			error = cyapa->ops->irq_handler(cyapa);
+
+			/*
+			 * Apply runtime power management to touch report event
+			 * except the events caused by the command responses.
+			 * Note:
+			 * It will introduce about 20~40 ms additional delay
+			 * time in receiving for first valid touch report data.
+			 * The time is used to execute device runtime resume
+			 * process.
+			 */
+			pm_runtime_get_sync(dev);
+			pm_runtime_mark_last_busy(dev);
+			pm_runtime_put_sync_autosuspend(dev);
+		}
+
+		if (!cyapa->operational || error) {
 			if (!mutex_trylock(&cyapa->state_sync_lock)) {
 				cyapa->ops->sort_empty_output_data(cyapa,
 					NULL, NULL, NULL);
@@ -656,8 +724,6 @@
 	}
 
 out:
-	pm_runtime_mark_last_busy(dev);
-	pm_runtime_put_sync_autosuspend(dev);
 	return IRQ_HANDLED;
 }
 
@@ -1051,12 +1117,12 @@
 		dev_dbg(dev, "firmware update successfully done.\n");
 
 	/*
-	 * Redetect trackpad device states because firmware update process
+	 * Re-detect trackpad device states because firmware update process
 	 * will reset trackpad device into bootloader mode.
 	 */
 	ret = cyapa_reinitialize(cyapa);
 	if (ret) {
-		dev_err(dev, "failed to redetect after updated: %d\n", ret);
+		dev_err(dev, "failed to re-detect after updated: %d\n", ret);
 		error = error ? error : ret;
 	}
 
@@ -1120,9 +1186,11 @@
 	case CYAPA_STATE_BL_ACTIVE:
 		return "bootloader active";
 	case CYAPA_STATE_GEN5_BL:
+	case CYAPA_STATE_GEN6_BL:
 		return "bootloader";
 	case CYAPA_STATE_OP:
 	case CYAPA_STATE_GEN5_APP:
+	case CYAPA_STATE_GEN6_APP:
 		return "operational";  /* Normal valid state. */
 	default:
 		return "invalid mode";
@@ -1175,6 +1243,13 @@
 	sysfs_remove_group(&cyapa->client->dev.kobj, &cyapa_sysfs_group);
 }
 
+static void cyapa_disable_regulator(void *data)
+{
+	struct cyapa *cyapa = data;
+
+	regulator_disable(cyapa->vcc);
+}
+
 static int cyapa_probe(struct i2c_client *client,
 		       const struct i2c_device_id *dev_id)
 {
@@ -1208,6 +1283,27 @@
 	sprintf(cyapa->phys, "i2c-%d-%04x/input0", client->adapter->nr,
 		client->addr);
 
+	cyapa->vcc = devm_regulator_get(dev, "vcc");
+	if (IS_ERR(cyapa->vcc)) {
+		error = PTR_ERR(cyapa->vcc);
+		dev_err(dev, "failed to get vcc regulator: %d\n", error);
+		return error;
+	}
+
+	error = regulator_enable(cyapa->vcc);
+	if (error) {
+		dev_err(dev, "failed to enable regulator: %d\n", error);
+		return error;
+	}
+
+	error = devm_add_action(dev, cyapa_disable_regulator, cyapa);
+	if (error) {
+		cyapa_disable_regulator(cyapa);
+		dev_err(dev, "failed to add disable regulator action: %d\n",
+			error);
+		return error;
+	}
+
 	error = cyapa_initialize(cyapa);
 	if (error) {
 		dev_err(dev, "failed to detect and initialize tp device.\n");
@@ -1296,12 +1392,19 @@
 		power_mode = device_may_wakeup(dev) ? cyapa->suspend_power_mode
 						    : PWR_MODE_OFF;
 		error = cyapa->ops->set_power_mode(cyapa, power_mode,
-				cyapa->suspend_sleep_time);
+				cyapa->suspend_sleep_time, true);
 		if (error)
 			dev_err(dev, "suspend set power mode failed: %d\n",
 					error);
 	}
 
+	/*
+	 * Disable proximity interrupt when system idle, want true touch to
+	 * wake the system.
+	 */
+	if (cyapa->dev_pwr_mode != PWR_MODE_OFF)
+		cyapa->ops->set_proximity(cyapa, false);
+
 	if (device_may_wakeup(dev))
 		cyapa->irq_wake = (enable_irq_wake(client->irq) == 0);
 
@@ -1322,7 +1425,10 @@
 		cyapa->irq_wake = false;
 	}
 
-	/* Update device states and runtime PM states. */
+	/*
+	 * Update device states and runtime PM states.
+	 * Re-Enable proximity interrupt after enter operational mode.
+	 */
 	error = cyapa_reinitialize(cyapa);
 	if (error)
 		dev_warn(dev, "failed to reinitialize TP device: %d\n", error);
@@ -1340,7 +1446,8 @@
 
 	error = cyapa->ops->set_power_mode(cyapa,
 			cyapa->runtime_suspend_power_mode,
-			cyapa->runtime_suspend_sleep_time);
+			cyapa->runtime_suspend_sleep_time,
+			false);
 	if (error)
 		dev_warn(dev, "runtime suspend failed: %d\n", error);
 
@@ -1352,7 +1459,8 @@
 	struct cyapa *cyapa = dev_get_drvdata(dev);
 	int error;
 
-	error = cyapa->ops->set_power_mode(cyapa, PWR_MODE_FULL_ACTIVE, 0);
+	error = cyapa->ops->set_power_mode(cyapa,
+					   PWR_MODE_FULL_ACTIVE, 0, false);
 	if (error)
 		dev_warn(dev, "runtime resume failed: %d\n", error);
 
@@ -1374,17 +1482,26 @@
 static const struct acpi_device_id cyapa_acpi_id[] = {
 	{ "CYAP0000", 0 },  /* Gen3 trackpad with 0x67 I2C address. */
 	{ "CYAP0001", 0 },  /* Gen5 trackpad with 0x24 I2C address. */
+	{ "CYAP0002", 0 },  /* Gen6 trackpad with 0x24 I2C address. */
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, cyapa_acpi_id);
 #endif
 
+#ifdef CONFIG_OF
+static const struct of_device_id cyapa_of_match[] = {
+	{ .compatible = "cypress,cyapa" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, cyapa_of_match);
+#endif
+
 static struct i2c_driver cyapa_driver = {
 	.driver = {
 		.name = "cyapa",
-		.owner = THIS_MODULE,
 		.pm = &cyapa_pm_ops,
 		.acpi_match_table = ACPI_PTR(cyapa_acpi_id),
+		.of_match_table = of_match_ptr(cyapa_of_match),
 	},
 
 	.probe = cyapa_probe,
diff --git a/drivers/input/mouse/cyapa.h b/drivers/input/mouse/cyapa.h
index adc9ed5..b812bba 100644
--- a/drivers/input/mouse/cyapa.h
+++ b/drivers/input/mouse/cyapa.h
@@ -3,7 +3,7 @@
  *
  * Author: Dudley Du <dudl@cypress.com>
  *
- * Copyright (C) 2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2014-2015 Cypress Semiconductor, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file COPYING in the main directory of this archive for
@@ -19,13 +19,14 @@
 #define CYAPA_GEN_UNKNOWN   0x00   /* unknown protocol. */
 #define CYAPA_GEN3   0x03   /* support MT-protocol B with tracking ID. */
 #define CYAPA_GEN5   0x05   /* support TrueTouch GEN5 trackpad device. */
+#define CYAPA_GEN6   0x06   /* support TrueTouch GEN6 trackpad device. */
 
 #define CYAPA_NAME   "Cypress APA Trackpad (cyapa)"
 
 /*
  * Macros for SMBus communication
  */
-#define SMBUS_READ   0x01
+#define SMBUS_READ  0x01
 #define SMBUS_WRITE 0x00
 #define SMBUS_ENCODE_IDX(cmd, idx) ((cmd) | (((idx) & 0x03) << 1))
 #define SMBUS_ENCODE_RW(cmd, rw) ((cmd) | ((rw) & 0x01))
@@ -159,12 +160,89 @@
 
 #define AUTOSUSPEND_DELAY   2000 /* unit : ms */
 
-#define UNINIT_SLEEP_TIME 0xFFFF
-#define UNINIT_PWR_MODE   0xFF
-
 #define BTN_ONLY_MODE_NAME   "buttononly"
 #define OFF_MODE_NAME        "off"
 
+/* Common macros for PIP interface. */
+#define PIP_HID_DESCRIPTOR_ADDR		0x0001
+#define PIP_REPORT_DESCRIPTOR_ADDR	0x0002
+#define PIP_INPUT_REPORT_ADDR		0x0003
+#define PIP_OUTPUT_REPORT_ADDR		0x0004
+#define PIP_CMD_DATA_ADDR		0x0006
+
+#define PIP_RETRIEVE_DATA_STRUCTURE	0x24
+#define PIP_CMD_CALIBRATE		0x28
+#define PIP_BL_CMD_VERIFY_APP_INTEGRITY	0x31
+#define PIP_BL_CMD_GET_BL_INFO		0x38
+#define PIP_BL_CMD_PROGRAM_VERIFY_ROW	0x39
+#define PIP_BL_CMD_LAUNCH_APP		0x3b
+#define PIP_BL_CMD_INITIATE_BL		0x48
+#define PIP_INVALID_CMD			0xff
+
+#define PIP_HID_DESCRIPTOR_SIZE		32
+#define PIP_HID_APP_REPORT_ID		0xf7
+#define PIP_HID_BL_REPORT_ID		0xff
+
+#define PIP_BL_CMD_REPORT_ID		0x40
+#define PIP_BL_RESP_REPORT_ID		0x30
+#define PIP_APP_CMD_REPORT_ID		0x2f
+#define PIP_APP_RESP_REPORT_ID		0x1f
+
+#define PIP_READ_SYS_INFO_CMD_LENGTH	7
+#define PIP_BL_READ_APP_INFO_CMD_LENGTH	13
+#define PIP_MIN_BL_CMD_LENGTH		13
+#define PIP_MIN_BL_RESP_LENGTH		11
+#define PIP_MIN_APP_CMD_LENGTH		7
+#define PIP_MIN_APP_RESP_LENGTH		5
+#define PIP_UNSUPPORTED_CMD_RESP_LENGTH	6
+#define PIP_READ_SYS_INFO_RESP_LENGTH	71
+#define PIP_BL_APP_INFO_RESP_LENGTH	30
+#define PIP_BL_GET_INFO_RESP_LENGTH	19
+
+#define PIP_BL_PLATFORM_VER_SHIFT	4
+#define PIP_BL_PLATFORM_VER_MASK	0x0f
+
+#define PIP_PRODUCT_FAMILY_MASK		0xf000
+#define PIP_PRODUCT_FAMILY_TRACKPAD	0x1000
+
+#define PIP_DEEP_SLEEP_STATE_ON		0x00
+#define PIP_DEEP_SLEEP_STATE_OFF	0x01
+#define PIP_DEEP_SLEEP_STATE_MASK	0x03
+#define PIP_APP_DEEP_SLEEP_REPORT_ID	0xf0
+#define PIP_DEEP_SLEEP_RESP_LENGTH	5
+#define PIP_DEEP_SLEEP_OPCODE		0x08
+#define PIP_DEEP_SLEEP_OPCODE_MASK	0x0f
+
+#define PIP_RESP_LENGTH_OFFSET		0
+#define	    PIP_RESP_LENGTH_SIZE	2
+#define PIP_RESP_REPORT_ID_OFFSET	2
+#define PIP_RESP_RSVD_OFFSET		3
+#define     PIP_RESP_RSVD_KEY		0x00
+#define PIP_RESP_BL_SOP_OFFSET		4
+#define     PIP_SOP_KEY			0x01  /* Start of Packet */
+#define     PIP_EOP_KEY			0x17  /* End of Packet */
+#define PIP_RESP_APP_CMD_OFFSET		4
+#define     GET_PIP_CMD_CODE(reg)	((reg) & 0x7f)
+#define PIP_RESP_STATUS_OFFSET		5
+
+#define VALID_CMD_RESP_HEADER(resp, cmd)				  \
+	(((resp)[PIP_RESP_REPORT_ID_OFFSET] == PIP_APP_RESP_REPORT_ID) && \
+	((resp)[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY) &&		  \
+	(GET_PIP_CMD_CODE((resp)[PIP_RESP_APP_CMD_OFFSET]) == (cmd)))
+
+#define PIP_CMD_COMPLETE_SUCCESS(resp_data) \
+	((resp_data)[PIP_RESP_STATUS_OFFSET] == 0x00)
+
+/* Variables to record latest gen5 trackpad power states. */
+#define UNINIT_SLEEP_TIME	0xffff
+#define UNINIT_PWR_MODE		0xff
+#define PIP_DEV_SET_PWR_STATE(cyapa, s)		((cyapa)->dev_pwr_mode = (s))
+#define PIP_DEV_GET_PWR_STATE(cyapa)		((cyapa)->dev_pwr_mode)
+#define PIP_DEV_SET_SLEEP_TIME(cyapa, t)	((cyapa)->dev_sleep_time = (t))
+#define PIP_DEV_GET_SLEEP_TIME(cyapa)		((cyapa)->dev_sleep_time)
+#define PIP_DEV_UNINIT_SLEEP_TIME(cyapa)	\
+		(((cyapa)->dev_sleep_time) == UNINIT_SLEEP_TIME)
+
 /* The touch.id is used as the MT slot id, thus max MT slot is 15 */
 #define CYAPA_MAX_MT_SLOTS  15
 
@@ -195,10 +273,12 @@
 	int (*sort_empty_output_data)(struct cyapa *,
 			u8 *, int *, cb_sort);
 
-	int (*set_power_mode)(struct cyapa *, u8, u16);
+	int (*set_power_mode)(struct cyapa *, u8, u16, bool);
+
+	int (*set_proximity)(struct cyapa *, bool);
 };
 
-struct cyapa_gen5_cmd_states {
+struct cyapa_pip_cmd_states {
 	struct mutex cmd_lock;
 	struct completion cmd_ready;
 	atomic_t cmd_issued;
@@ -214,7 +294,7 @@
 };
 
 union cyapa_cmd_states {
-	struct cyapa_gen5_cmd_states gen5;
+	struct cyapa_pip_cmd_states pip;
 };
 
 enum cyapa_state {
@@ -225,6 +305,14 @@
 	CYAPA_STATE_OP,
 	CYAPA_STATE_GEN5_BL,
 	CYAPA_STATE_GEN5_APP,
+	CYAPA_STATE_GEN6_BL,
+	CYAPA_STATE_GEN6_APP,
+};
+
+struct gen6_interval_setting {
+	u16 active_interval;
+	u16 lp1_interval;
+	u16 lp2_interval;
 };
 
 /* The main device structure */
@@ -233,6 +321,7 @@
 	u8 status[BL_STATUS_SIZE];
 	bool operational; /* true: ready for data reporting; false: not. */
 
+	struct regulator *vcc;
 	struct i2c_client *client;
 	struct input_dev *input;
 	char phys[32];	/* Device physical location */
@@ -246,9 +335,11 @@
 	u16 runtime_suspend_sleep_time;
 	u8 dev_pwr_mode;
 	u16 dev_sleep_time;
+	struct gen6_interval_setting gen6_interval_setting;
 
 	/* Read from query data region. */
 	char product_id[16];
+	u8 platform_ver;  /* Platform version. */
 	u8 fw_maj_ver;  /* Firmware major version. */
 	u8 fw_min_ver;  /* Firmware minor version. */
 	u8 btn_capability;
@@ -259,7 +350,7 @@
 	int physical_size_y;
 
 	/* Used in ttsp and truetouch based trackpad devices. */
-	u8 x_origin;  /* X Axis Origin: 0 = left side; 1 = rigth side. */
+	u8 x_origin;  /* X Axis Origin: 0 = left side; 1 = right side. */
 	u8 y_origin;  /* Y Axis Origin: 0 = top; 1 = bottom. */
 	int electrodes_x;  /* Number of electrodes on the X Axis*/
 	int electrodes_y;  /* Number of electrodes on the Y Axis*/
@@ -282,9 +373,9 @@
 
 
 ssize_t cyapa_i2c_reg_read_block(struct cyapa *cyapa, u8 reg, size_t len,
-				u8 *values);
+				 u8 *values);
 ssize_t cyapa_smbus_read_block(struct cyapa *cyapa, u8 cmd, size_t len,
-				u8 *values);
+			       u8 *values);
 
 ssize_t cyapa_read_block(struct cyapa *cyapa, u8 cmd_idx, u8 *values);
 
@@ -293,9 +384,51 @@
 u8 cyapa_sleep_time_to_pwr_cmd(u16 sleep_time);
 u16 cyapa_pwr_cmd_to_sleep_time(u8 pwr_mode);
 
+ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size);
+ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size);
+int cyapa_empty_pip_output_data(struct cyapa *cyapa,
+				u8 *buf, int *len, cb_sort func);
+int cyapa_i2c_pip_cmd_irq_sync(struct cyapa *cyapa,
+			       u8 *cmd, int cmd_len,
+			       u8 *resp_data, int *resp_len,
+			       unsigned long timeout,
+			       cb_sort func,
+			       bool irq_mode);
+int cyapa_pip_state_parse(struct cyapa *cyapa, u8 *reg_data, int len);
+bool cyapa_pip_sort_system_info_data(struct cyapa *cyapa, u8 *buf, int len);
+bool cyapa_sort_tsg_pip_bl_resp_data(struct cyapa *cyapa, u8 *data, int len);
+int cyapa_pip_deep_sleep(struct cyapa *cyapa, u8 state);
+bool cyapa_sort_tsg_pip_app_resp_data(struct cyapa *cyapa, u8 *data, int len);
+int cyapa_pip_bl_exit(struct cyapa *cyapa);
+int cyapa_pip_bl_enter(struct cyapa *cyapa);
 
+
+bool cyapa_is_pip_bl_mode(struct cyapa *cyapa);
+bool cyapa_is_pip_app_mode(struct cyapa *cyapa);
+int cyapa_pip_cmd_state_initialize(struct cyapa *cyapa);
+
+int cyapa_pip_resume_scanning(struct cyapa *cyapa);
+int cyapa_pip_suspend_scanning(struct cyapa *cyapa);
+
+int cyapa_pip_check_fw(struct cyapa *cyapa, const struct firmware *fw);
+int cyapa_pip_bl_initiate(struct cyapa *cyapa, const struct firmware *fw);
+int cyapa_pip_do_fw_update(struct cyapa *cyapa, const struct firmware *fw);
+int cyapa_pip_bl_activate(struct cyapa *cyapa);
+int cyapa_pip_bl_deactivate(struct cyapa *cyapa);
+ssize_t cyapa_pip_do_calibrate(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count);
+int cyapa_pip_set_proximity(struct cyapa *cyapa, bool enable);
+
+bool cyapa_pip_irq_cmd_handler(struct cyapa *cyapa);
+int cyapa_pip_irq_handler(struct cyapa *cyapa);
+
+
+extern u8 pip_read_sys_info[];
+extern u8 pip_bl_read_app_info[];
 extern const char product_id[];
 extern const struct cyapa_dev_ops cyapa_gen3_ops;
 extern const struct cyapa_dev_ops cyapa_gen5_ops;
+extern const struct cyapa_dev_ops cyapa_gen6_ops;
 
 #endif
diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c
index 3faf01c..1a9d12a 100644
--- a/drivers/input/mouse/cyapa_gen3.c
+++ b/drivers/input/mouse/cyapa_gen3.c
@@ -6,7 +6,7 @@
  *   Daniel Kurtz <djkurtz@chromium.org>
  *   Benson Leung <bleung@chromium.org>
  *
- * Copyright (C) 2011-2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2011-2015 Cypress Semiconductor, Inc.
  * Copyright (C) 2011-2012 Google, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -950,7 +950,7 @@
  * Device power mode can only be set when device is in operational mode.
  */
 static int cyapa_gen3_set_power_mode(struct cyapa *cyapa, u8 power_mode,
-				     u16 always_unused)
+		u16 always_unused, bool is_suspend_unused)
 {
 	int ret;
 	u8 power;
@@ -999,6 +999,11 @@
 	return ret;
 }
 
+static int cyapa_gen3_set_proximity(struct cyapa *cyapa, bool enable)
+{
+	return -EOPNOTSUPP;
+}
+
 static int cyapa_gen3_get_query_data(struct cyapa *cyapa)
 {
 	u8 query_data[QUERY_DATA_SIZE];
@@ -1107,7 +1112,7 @@
 		 * may cause problems, so we set the power mode first here.
 		 */
 		error = cyapa_gen3_set_power_mode(cyapa,
-				PWR_MODE_FULL_ACTIVE, 0);
+				PWR_MODE_FULL_ACTIVE, 0, false);
 		if (error)
 			dev_err(dev, "%s: set full power mode failed: %d\n",
 				__func__, error);
@@ -1156,7 +1161,7 @@
 	 * so, stop cyapa_gen3_irq_handler to continue process to
 	 * avoid unwanted to error detecting and processing.
 	 *
-	 * And also, avoid the periodicly accerted interrupts to be processed
+	 * And also, avoid the periodically asserted interrupts to be processed
 	 * as touch inputs when gen3 failed to launch into application mode,
 	 * which will cause gen3 stays in bootloader mode.
 	 */
@@ -1243,4 +1248,6 @@
 	.irq_cmd_handler = cyapa_gen3_irq_cmd_handler,
 	.sort_empty_output_data = cyapa_gen3_empty_output_data,
 	.set_power_mode = cyapa_gen3_set_power_mode,
+
+	.set_proximity = cyapa_gen3_set_proximity,
 };
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index afc39e7..118ba97 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -3,7 +3,7 @@
  *
  * Author: Dudley Du <dudl@cypress.com>
  *
- * Copyright (C) 2014 Cypress Semiconductor, Inc.
+ * Copyright (C) 2014-2015 Cypress Semiconductor, Inc.
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file COPYING in the main directory of this archive for
@@ -19,15 +19,11 @@
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 #include <linux/crc-itu-t.h>
+#include <linux/pm_runtime.h>
 #include "cyapa.h"
 
 
-/* Macro of Gen5 */
-#define RECORD_EVENT_NONE        0
-#define RECORD_EVENT_TOUCHDOWN	 1
-#define RECORD_EVENT_DISPLACE    2
-#define RECORD_EVENT_LIFTOFF     3
-
+/* Macro of TSG firmware image */
 #define CYAPA_TSG_FLASH_MAP_BLOCK_SIZE      0x80
 #define CYAPA_TSG_IMG_FW_HDR_SIZE           13
 #define CYAPA_TSG_FW_ROW_SIZE               (CYAPA_TSG_FLASH_MAP_BLOCK_SIZE)
@@ -44,32 +40,65 @@
 
 #define CYAPA_TSG_MAX_CMD_SIZE              256
 
-#define GEN5_BL_CMD_VERIFY_APP_INTEGRITY    0x31
-#define GEN5_BL_CMD_GET_BL_INFO		    0x38
-#define GEN5_BL_CMD_PROGRAM_VERIFY_ROW      0x39
-#define GEN5_BL_CMD_LAUNCH_APP		    0x3b
-#define GEN5_BL_CMD_INITIATE_BL		    0x48
+/* Macro of PIP interface */
+#define PIP_BL_INITIATE_RESP_LEN            11
+#define PIP_BL_FAIL_EXIT_RESP_LEN           11
+#define PIP_BL_FAIL_EXIT_STATUS_CODE        0x0c
+#define PIP_BL_VERIFY_INTEGRITY_RESP_LEN    12
+#define PIP_BL_INTEGRITY_CHEKC_PASS         0x00
+#define PIP_BL_BLOCK_WRITE_RESP_LEN         11
 
-#define GEN5_HID_DESCRIPTOR_ADDR	0x0001
-#define GEN5_REPORT_DESCRIPTOR_ADDR	0x0002
-#define GEN5_INPUT_REPORT_ADDR		0x0003
-#define GEN5_OUTPUT_REPORT_ADDR		0x0004
-#define GEN5_CMD_DATA_ADDR		0x0006
+#define PIP_TOUCH_REPORT_ID         0x01
+#define PIP_BTN_REPORT_ID           0x03
+#define PIP_WAKEUP_EVENT_REPORT_ID  0x04
+#define PIP_PUSH_BTN_REPORT_ID      0x06
+#define GEN5_OLD_PUSH_BTN_REPORT_ID 0x05  /* Special for old Gen5 TP. */
+#define PIP_PROXIMITY_REPORT_ID     0x07
 
-#define GEN5_TOUCH_REPORT_HEAD_SIZE     7
-#define GEN5_TOUCH_REPORT_MAX_SIZE      127
-#define GEN5_BTN_REPORT_HEAD_SIZE       6
-#define GEN5_BTN_REPORT_MAX_SIZE        14
-#define GEN5_WAKEUP_EVENT_SIZE          4
-#define GEN5_RAW_DATA_HEAD_SIZE         24
+#define PIP_PROXIMITY_REPORT_SIZE	6
+#define PIP_PROXIMITY_DISTANCE_OFFSET	0x05
+#define PIP_PROXIMITY_DISTANCE_MASK	0x01
 
-#define GEN5_BL_CMD_REPORT_ID           0x40
-#define GEN5_BL_RESP_REPORT_ID          0x30
-#define GEN5_APP_CMD_REPORT_ID          0x2f
-#define GEN5_APP_RESP_REPORT_ID         0x1f
+#define PIP_TOUCH_REPORT_HEAD_SIZE     7
+#define PIP_TOUCH_REPORT_MAX_SIZE      127
+#define PIP_BTN_REPORT_HEAD_SIZE       6
+#define PIP_BTN_REPORT_MAX_SIZE        14
+#define PIP_WAKEUP_EVENT_SIZE          4
 
-#define GEN5_APP_DEEP_SLEEP_REPORT_ID   0xf0
-#define GEN5_DEEP_SLEEP_RESP_LENGTH     5
+#define PIP_NUMBER_OF_TOUCH_OFFSET  5
+#define PIP_NUMBER_OF_TOUCH_MASK    0x1f
+#define PIP_BUTTONS_OFFSET          5
+#define PIP_BUTTONS_MASK            0x0f
+#define PIP_GET_EVENT_ID(reg)       (((reg) >> 5) & 0x03)
+#define PIP_GET_TOUCH_ID(reg)       ((reg) & 0x1f)
+#define PIP_TOUCH_TYPE_FINGER	    0x00
+#define PIP_TOUCH_TYPE_PROXIMITY    0x01
+#define PIP_TOUCH_TYPE_HOVER	    0x02
+#define PIP_GET_TOUCH_TYPE(reg)     ((reg) & 0x07)
+
+#define RECORD_EVENT_NONE        0
+#define RECORD_EVENT_TOUCHDOWN	 1
+#define RECORD_EVENT_DISPLACE    2
+#define RECORD_EVENT_LIFTOFF     3
+
+#define PIP_SENSING_MODE_MUTUAL_CAP_FINE   0x00
+#define PIP_SENSING_MODE_SELF_CAP          0x02
+
+#define PIP_SET_PROXIMITY	0x49
+
+/* Macro of Gen5 */
+#define GEN5_BL_MAX_OUTPUT_LENGTH     0x0100
+#define GEN5_APP_MAX_OUTPUT_LENGTH    0x00fe
+
+#define GEN5_POWER_STATE_ACTIVE              0x01
+#define GEN5_POWER_STATE_LOOK_FOR_TOUCH      0x02
+#define GEN5_POWER_STATE_READY               0x03
+#define GEN5_POWER_STATE_IDLE                0x04
+#define GEN5_POWER_STATE_BTN_ONLY            0x05
+#define GEN5_POWER_STATE_OFF                 0x06
+
+#define GEN5_POWER_READY_MAX_INTRVL_TIME  50   /* Unit: ms */
+#define GEN5_POWER_IDLE_MAX_INTRVL_TIME   250  /* Unit: ms */
 
 #define GEN5_CMD_GET_PARAMETER		     0x05
 #define GEN5_CMD_SET_PARAMETER		     0x06
@@ -82,80 +111,12 @@
 
 #define GEN5_PARAMETER_DISABLE_PIP_REPORT    0x08
 
-#define GEN5_POWER_STATE_ACTIVE              0x01
-#define GEN5_POWER_STATE_LOOK_FOR_TOUCH      0x02
-#define GEN5_POWER_STATE_READY               0x03
-#define GEN5_POWER_STATE_IDLE                0x04
-#define GEN5_POWER_STATE_BTN_ONLY            0x05
-#define GEN5_POWER_STATE_OFF                 0x06
-
-#define GEN5_DEEP_SLEEP_STATE_MASK  0x03
-#define GEN5_DEEP_SLEEP_STATE_ON    0x00
-#define GEN5_DEEP_SLEEP_STATE_OFF   0x01
-
-#define GEN5_DEEP_SLEEP_OPCODE      0x08
-#define GEN5_DEEP_SLEEP_OPCODE_MASK 0x0f
-
-#define GEN5_POWER_READY_MAX_INTRVL_TIME  50   /* Unit: ms */
-#define GEN5_POWER_IDLE_MAX_INTRVL_TIME   250  /* Unit: ms */
-
-#define GEN5_CMD_REPORT_ID_OFFSET       4
-
-#define GEN5_RESP_REPORT_ID_OFFSET      2
-#define GEN5_RESP_RSVD_OFFSET           3
-#define     GEN5_RESP_RSVD_KEY          0x00
-#define GEN5_RESP_BL_SOP_OFFSET         4
-#define     GEN5_SOP_KEY                0x01  /* Start of Packet */
-#define     GEN5_EOP_KEY                0x17  /* End of Packet */
-#define GEN5_RESP_APP_CMD_OFFSET        4
-#define     GET_GEN5_CMD_CODE(reg)      ((reg) & 0x7f)
-
-#define VALID_CMD_RESP_HEADER(resp, cmd)				    \
-	(((resp)[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_APP_RESP_REPORT_ID) && \
-	((resp)[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY) &&	    \
-	(GET_GEN5_CMD_CODE((resp)[GEN5_RESP_APP_CMD_OFFSET]) == (cmd)))
-
-#define GEN5_MIN_BL_CMD_LENGTH           13
-#define GEN5_MIN_BL_RESP_LENGTH          11
-#define GEN5_MIN_APP_CMD_LENGTH          7
-#define GEN5_MIN_APP_RESP_LENGTH         5
-#define GEN5_UNSUPPORTED_CMD_RESP_LENGTH 6
-
-#define GEN5_RESP_LENGTH_OFFSET  0x00
-#define GEN5_RESP_LENGTH_SIZE    2
-
-#define GEN5_HID_DESCRIPTOR_SIZE      32
-#define GEN5_BL_HID_REPORT_ID         0xff
-#define GEN5_APP_HID_REPORT_ID        0xf7
-#define GEN5_BL_MAX_OUTPUT_LENGTH     0x0100
-#define GEN5_APP_MAX_OUTPUT_LENGTH    0x00fe
-
 #define GEN5_BL_REPORT_DESCRIPTOR_SIZE            0x1d
 #define GEN5_BL_REPORT_DESCRIPTOR_ID              0xfe
 #define GEN5_APP_REPORT_DESCRIPTOR_SIZE           0xee
 #define GEN5_APP_CONTRACT_REPORT_DESCRIPTOR_SIZE  0xfa
 #define GEN5_APP_REPORT_DESCRIPTOR_ID             0xf6
 
-#define GEN5_TOUCH_REPORT_ID         0x01
-#define GEN5_BTN_REPORT_ID           0x03
-#define GEN5_WAKEUP_EVENT_REPORT_ID  0x04
-#define GEN5_OLD_PUSH_BTN_REPORT_ID  0x05
-#define GEN5_PUSH_BTN_REPORT_ID      0x06
-
-#define GEN5_CMD_COMPLETE_SUCCESS(status) ((status) == 0x00)
-
-#define GEN5_BL_INITIATE_RESP_LEN            11
-#define GEN5_BL_FAIL_EXIT_RESP_LEN           11
-#define GEN5_BL_FAIL_EXIT_STATUS_CODE        0x0c
-#define GEN5_BL_VERIFY_INTEGRITY_RESP_LEN    12
-#define GEN5_BL_INTEGRITY_CHEKC_PASS         0x00
-#define GEN5_BL_BLOCK_WRITE_RESP_LEN         11
-#define GEN5_BL_READ_APP_INFO_RESP_LEN       31
-#define GEN5_CMD_CALIBRATE                   0x28
-#define CYAPA_SENSING_MODE_MUTUAL_CAP_FINE   0x00
-#define CYAPA_SENSING_MODE_SELF_CAP          0x02
-
-#define GEN5_CMD_RETRIEVE_DATA_STRUCTURE     0x24
 #define GEN5_RETRIEVE_MUTUAL_PWC_DATA        0x00
 #define GEN5_RETRIEVE_SELF_CAP_PWC_DATA      0x01
 
@@ -170,28 +131,19 @@
 #define GEN5_PANEL_SCAN_SELF_BASELINE        0x04
 #define GEN5_PANEL_SCAN_SELF_DIFFCOUNT       0x05
 
-/* The offset only valid for reterive PWC and panel scan commands */
+/* The offset only valid for retrieve PWC and panel scan commands */
 #define GEN5_RESP_DATA_STRUCTURE_OFFSET      10
 #define GEN5_PWC_DATA_ELEMENT_SIZE_MASK      0x07
 
-#define	GEN5_NUMBER_OF_TOUCH_OFFSET  5
-#define GEN5_NUMBER_OF_TOUCH_MASK    0x1f
-#define GEN5_BUTTONS_OFFSET          5
-#define GEN5_BUTTONS_MASK            0x0f
-#define GEN5_GET_EVENT_ID(reg)       (((reg) >> 5) & 0x03)
-#define GEN5_GET_TOUCH_ID(reg)       ((reg) & 0x1f)
 
-#define GEN5_PRODUCT_FAMILY_MASK        0xf000
-#define GEN5_PRODUCT_FAMILY_TRACKPAD    0x1000
-
-#define TSG_INVALID_CMD   0xff
-
-struct cyapa_gen5_touch_record {
+struct cyapa_pip_touch_record {
 	/*
 	 * Bit 7 - 3: reserved
 	 * Bit 2 - 0: touch type;
 	 *            0 : standard finger;
-	 *            1 - 15 : reserved.
+	 *            1 : proximity (Start supported in Gen5 TP).
+	 *            2 : finger hover (defined, but not used yet.)
+	 *            3 - 15 : reserved.
 	 */
 	u8 touch_type;
 
@@ -221,7 +173,14 @@
 	/* Bit 15 - 8 of Y-axis coordinate of the touch in pixel. */
 	u8 y_hi;
 
-	/* Touch intensity in counts, pressure value. */
+	/*
+	 * The meaning of this value is different when touch_type is different.
+	 * For standard finger type:
+	 *	Touch intensity in counts, pressure value.
+	 * For proximity type (Start supported in Gen5 TP):
+	 *	The distance, in surface units, between the contact and
+	 *	the surface.
+	 **/
 	u8 z;
 
 	/*
@@ -260,9 +219,9 @@
 	u8 orientation;
 } __packed;
 
-struct cyapa_gen5_report_data {
-	u8 report_head[GEN5_TOUCH_REPORT_HEAD_SIZE];
-	struct cyapa_gen5_touch_record touch_records[10];
+struct cyapa_pip_report_data {
+	u8 report_head[PIP_TOUCH_REPORT_HEAD_SIZE];
+	struct cyapa_pip_touch_record touch_records[10];
 } __packed;
 
 struct cyapa_tsg_bin_image_head {
@@ -272,6 +231,12 @@
 	u8 fw_major_version;
 	u8 fw_minor_version;
 	u8 fw_revision_control_number[8];
+	u8 silicon_id_hi;
+	u8 silicon_id_lo;
+	u8 chip_revision;
+	u8 family_id;
+	u8 bl_ver_maj;
+	u8 bl_ver_min;
 } __packed;
 
 struct cyapa_tsg_bin_image_data_record {
@@ -288,36 +253,36 @@
 	struct cyapa_tsg_bin_image_data_record records[0];
 } __packed;
 
-struct gen5_bl_packet_start {
+struct pip_bl_packet_start {
 	u8 sop;  /* Start of packet, must be 01h */
 	u8 cmd_code;
 	__le16 data_length;  /* Size of data parameter start from data[0] */
 } __packed;
 
-struct gen5_bl_packet_end {
+struct pip_bl_packet_end {
 	__le16 crc;
 	u8 eop;  /* End of packet, must be 17h */
 } __packed;
 
-struct gen5_bl_cmd_head {
+struct pip_bl_cmd_head {
 	__le16 addr;   /* Output report register address, must be 0004h */
 	/* Size of packet not including output report register address */
 	__le16 length;
 	u8 report_id;  /* Bootloader output report id, must be 40h */
 	u8 rsvd;  /* Reserved, must be 0 */
-	struct gen5_bl_packet_start packet_start;
+	struct pip_bl_packet_start packet_start;
 	u8 data[0];  /* Command data variable based on commands */
 } __packed;
 
 /* Initiate bootload command data structure. */
-struct gen5_bl_initiate_cmd_data {
+struct pip_bl_initiate_cmd_data {
 	/* Key must be "A5h 01h 02h 03h FFh FEh FDh 5Ah" */
 	u8 key[CYAPA_TSG_BL_KEY_SIZE];
 	u8 metadata_raw_parameter[CYAPA_TSG_FLASH_MAP_METADATA_SIZE];
 	__le16 metadata_crc;
 } __packed;
 
-struct gen5_bl_metadata_row_params {
+struct tsg_bl_metadata_row_params {
 	__le16 size;
 	__le16 maximum_size;
 	__le32 app_start;
@@ -332,13 +297,13 @@
 } __packed;
 
 /* Bootload program and verify row command data structure */
-struct gen5_bl_flash_row_head {
+struct tsg_bl_flash_row_head {
 	u8 flash_array_id;
 	__le16 flash_row_id;
 	u8 flash_data[0];
 } __packed;
 
-struct gen5_app_cmd_head {
+struct pip_app_cmd_head {
 	__le16 addr;   /* Output report register address, must be 0004h */
 	/* Size of packet not including output report register address */
 	__le16 length;
@@ -369,30 +334,26 @@
 	u8 data_id;
 } __packed;
 
-/* Variables to record latest gen5 trackpad power states. */
-#define GEN5_DEV_SET_PWR_STATE(cyapa, s)	((cyapa)->dev_pwr_mode = (s))
-#define GEN5_DEV_GET_PWR_STATE(cyapa)		((cyapa)->dev_pwr_mode)
-#define GEN5_DEV_SET_SLEEP_TIME(cyapa, t)	((cyapa)->dev_sleep_time = (t))
-#define GEN5_DEV_GET_SLEEP_TIME(cyapa)		((cyapa)->dev_sleep_time)
-#define GEN5_DEV_UNINIT_SLEEP_TIME(cyapa)	\
-		(((cyapa)->dev_sleep_time) == UNINIT_SLEEP_TIME)
+u8 pip_read_sys_info[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x02 };
+u8 pip_bl_read_app_info[] = { 0x04, 0x00, 0x0b, 0x00, 0x40, 0x00,
+		0x01, 0x3c, 0x00, 0x00, 0xb0, 0x42, 0x17
+	};
 
-
-static u8 cyapa_gen5_bl_cmd_key[] = { 0xa5, 0x01, 0x02, 0x03,
+static u8 cyapa_pip_bl_cmd_key[] = { 0xa5, 0x01, 0x02, 0x03,
 	0xff, 0xfe, 0xfd, 0x5a };
 
-static int cyapa_gen5_initialize(struct cyapa *cyapa)
+int cyapa_pip_cmd_state_initialize(struct cyapa *cyapa)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 
-	init_completion(&gen5_pip->cmd_ready);
-	atomic_set(&gen5_pip->cmd_issued, 0);
-	mutex_init(&gen5_pip->cmd_lock);
+	init_completion(&pip->cmd_ready);
+	atomic_set(&pip->cmd_issued, 0);
+	mutex_init(&pip->cmd_lock);
 
-	gen5_pip->resp_sort_func = NULL;
-	gen5_pip->in_progress_cmd = TSG_INVALID_CMD;
-	gen5_pip->resp_data = NULL;
-	gen5_pip->resp_len = NULL;
+	pip->resp_sort_func = NULL;
+	pip->in_progress_cmd = PIP_INVALID_CMD;
+	pip->resp_data = NULL;
+	pip->resp_len = NULL;
 
 	cyapa->dev_pwr_mode = UNINIT_PWR_MODE;
 	cyapa->dev_sleep_time = UNINIT_SLEEP_TIME;
@@ -401,7 +362,7 @@
 }
 
 /* Return negative errno, or else the number of bytes read. */
-static ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size)
+ssize_t cyapa_i2c_pip_read(struct cyapa *cyapa, u8 *buf, size_t size)
 {
 	int ret;
 
@@ -415,14 +376,13 @@
 
 	if (ret != size)
 		return (ret < 0) ? ret : -EIO;
-
 	return size;
 }
 
 /**
  * Return a negative errno code else zero on success.
  */
-static ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
+ssize_t cyapa_i2c_pip_write(struct cyapa *cyapa, u8 *buf, size_t size)
 {
 	int ret;
 
@@ -441,10 +401,10 @@
  * This function is aimed to dump all not read data in Gen5 trackpad
  * before send any command, otherwise, the interrupt line will be blocked.
  */
-static int cyapa_empty_pip_output_data(struct cyapa *cyapa,
+int cyapa_empty_pip_output_data(struct cyapa *cyapa,
 		u8 *buf, int *len, cb_sort func)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int length;
 	int report_count;
 	int empty_count;
@@ -476,13 +436,13 @@
 		if (empty_count > 5)
 			return 0;
 
-		error = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf,
-				GEN5_RESP_LENGTH_SIZE);
+		error = cyapa_i2c_pip_read(cyapa, pip->empty_buf,
+				PIP_RESP_LENGTH_SIZE);
 		if (error < 0)
 			return error;
 
-		length = get_unaligned_le16(gen5_pip->empty_buf);
-		if (length == GEN5_RESP_LENGTH_SIZE) {
+		length = get_unaligned_le16(pip->empty_buf);
+		if (length == PIP_RESP_LENGTH_SIZE) {
 			empty_count++;
 			continue;
 		} else if (length > CYAPA_REG_MAP_SIZE) {
@@ -490,11 +450,11 @@
 			return -EINVAL;
 		} else if (length == 0) {
 			/* Application or bootloader launch data polled out. */
-			length = GEN5_RESP_LENGTH_SIZE;
+			length = PIP_RESP_LENGTH_SIZE;
 			if (buf && buf_len && func &&
-				func(cyapa, gen5_pip->empty_buf, length)) {
+				func(cyapa, pip->empty_buf, length)) {
 				length = min(buf_len, length);
-				memcpy(buf, gen5_pip->empty_buf, length);
+				memcpy(buf, pip->empty_buf, length);
 				*len = length;
 				/* Response found, success. */
 				return 0;
@@ -502,19 +462,19 @@
 			continue;
 		}
 
-		error = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf, length);
+		error = cyapa_i2c_pip_read(cyapa, pip->empty_buf, length);
 		if (error < 0)
 			return error;
 
 		report_count--;
 		empty_count = 0;
-		length = get_unaligned_le16(gen5_pip->empty_buf);
-		if (length <= GEN5_RESP_LENGTH_SIZE) {
+		length = get_unaligned_le16(pip->empty_buf);
+		if (length <= PIP_RESP_LENGTH_SIZE) {
 			empty_count++;
 		} else if (buf && buf_len && func &&
-			func(cyapa, gen5_pip->empty_buf, length)) {
+			func(cyapa, pip->empty_buf, length)) {
 			length = min(buf_len, length);
-			memcpy(buf, gen5_pip->empty_buf, length);
+			memcpy(buf, pip->empty_buf, length);
 			*len = length;
 			/* Response found, success. */
 			return 0;
@@ -531,24 +491,24 @@
 		u8 *cmd, size_t cmd_len,
 		unsigned long timeout)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int error;
 
 	/* Wait for interrupt to set ready completion */
-	init_completion(&gen5_pip->cmd_ready);
+	init_completion(&pip->cmd_ready);
 
-	atomic_inc(&gen5_pip->cmd_issued);
+	atomic_inc(&pip->cmd_issued);
 	error = cyapa_i2c_pip_write(cyapa, cmd, cmd_len);
 	if (error) {
-		atomic_dec(&gen5_pip->cmd_issued);
+		atomic_dec(&pip->cmd_issued);
 		return (error < 0) ? error : -EIO;
 	}
 
 	/* Wait for interrupt to indicate command is completed. */
-	timeout = wait_for_completion_timeout(&gen5_pip->cmd_ready,
+	timeout = wait_for_completion_timeout(&pip->cmd_ready,
 				msecs_to_jiffies(timeout));
 	if (timeout == 0) {
-		atomic_dec(&gen5_pip->cmd_issued);
+		atomic_dec(&pip->cmd_issued);
 		return -ETIMEDOUT;
 	}
 
@@ -562,15 +522,15 @@
 		unsigned long timeout,
 		cb_sort func)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int tries;
 	int length;
 	int error;
 
-	atomic_inc(&gen5_pip->cmd_issued);
+	atomic_inc(&pip->cmd_issued);
 	error = cyapa_i2c_pip_write(cyapa, cmd, cmd_len);
 	if (error) {
-		atomic_dec(&gen5_pip->cmd_issued);
+		atomic_dec(&pip->cmd_issued);
 		return error < 0 ? error : -EIO;
 	}
 
@@ -591,11 +551,11 @@
 			error = error ? error : -ETIMEDOUT;
 	}
 
-	atomic_dec(&gen5_pip->cmd_issued);
+	atomic_dec(&pip->cmd_issued);
 	return error;
 }
 
-static int cyapa_i2c_pip_cmd_irq_sync(
+int cyapa_i2c_pip_cmd_irq_sync(
 		struct cyapa *cyapa,
 		u8 *cmd, int cmd_len,
 		u8 *resp_data, int *resp_len,
@@ -603,34 +563,34 @@
 		cb_sort func,
 		bool irq_mode)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int error;
 
 	if (!cmd || !cmd_len)
 		return -EINVAL;
 
 	/* Commands must be serialized. */
-	error = mutex_lock_interruptible(&gen5_pip->cmd_lock);
+	error = mutex_lock_interruptible(&pip->cmd_lock);
 	if (error)
 		return error;
 
-	gen5_pip->resp_sort_func = func;
-	gen5_pip->resp_data = resp_data;
-	gen5_pip->resp_len = resp_len;
+	pip->resp_sort_func = func;
+	pip->resp_data = resp_data;
+	pip->resp_len = resp_len;
 
-	if (cmd_len >= GEN5_MIN_APP_CMD_LENGTH &&
-			cmd[4] == GEN5_APP_CMD_REPORT_ID) {
+	if (cmd_len >= PIP_MIN_APP_CMD_LENGTH &&
+			cmd[4] == PIP_APP_CMD_REPORT_ID) {
 		/* Application command */
-		gen5_pip->in_progress_cmd = cmd[6] & 0x7f;
-	} else if (cmd_len >= GEN5_MIN_BL_CMD_LENGTH &&
-			cmd[4] == GEN5_BL_CMD_REPORT_ID) {
+		pip->in_progress_cmd = cmd[6] & 0x7f;
+	} else if (cmd_len >= PIP_MIN_BL_CMD_LENGTH &&
+			cmd[4] == PIP_BL_CMD_REPORT_ID) {
 		/* Bootloader command */
-		gen5_pip->in_progress_cmd = cmd[7];
+		pip->in_progress_cmd = cmd[7];
 	}
 
 	/* Send command data, wait and read output response data's length. */
 	if (irq_mode) {
-		gen5_pip->is_irq_mode = true;
+		pip->is_irq_mode = true;
 		error = cyapa_do_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
 							timeout);
 		if (error == -ETIMEDOUT && resp_data &&
@@ -646,54 +606,54 @@
 				error = error ? error : -ETIMEDOUT;
 		}
 	} else {
-		gen5_pip->is_irq_mode = false;
+		pip->is_irq_mode = false;
 		error = cyapa_do_i2c_pip_cmd_polling(cyapa, cmd, cmd_len,
 				resp_data, resp_len, timeout, func);
 	}
 
-	gen5_pip->resp_sort_func = NULL;
-	gen5_pip->resp_data = NULL;
-	gen5_pip->resp_len = NULL;
-	gen5_pip->in_progress_cmd = TSG_INVALID_CMD;
+	pip->resp_sort_func = NULL;
+	pip->resp_data = NULL;
+	pip->resp_len = NULL;
+	pip->in_progress_cmd = PIP_INVALID_CMD;
 
-	mutex_unlock(&gen5_pip->cmd_lock);
+	mutex_unlock(&pip->cmd_lock);
 	return error;
 }
 
-static bool cyapa_gen5_sort_tsg_pip_bl_resp_data(struct cyapa *cyapa,
+bool cyapa_sort_tsg_pip_bl_resp_data(struct cyapa *cyapa,
 		u8 *data, int len)
 {
-	if (!data || len < GEN5_MIN_BL_RESP_LENGTH)
+	if (!data || len < PIP_MIN_BL_RESP_LENGTH)
 		return false;
 
 	/* Bootloader input report id 30h */
-	if (data[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_BL_RESP_REPORT_ID &&
-			data[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY &&
-			data[GEN5_RESP_BL_SOP_OFFSET] == GEN5_SOP_KEY)
+	if (data[PIP_RESP_REPORT_ID_OFFSET] == PIP_BL_RESP_REPORT_ID &&
+			data[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY &&
+			data[PIP_RESP_BL_SOP_OFFSET] == PIP_SOP_KEY)
 		return true;
 
 	return false;
 }
 
-static bool cyapa_gen5_sort_tsg_pip_app_resp_data(struct cyapa *cyapa,
+bool cyapa_sort_tsg_pip_app_resp_data(struct cyapa *cyapa,
 		u8 *data, int len)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int resp_len;
 
-	if (!data || len < GEN5_MIN_APP_RESP_LENGTH)
+	if (!data || len < PIP_MIN_APP_RESP_LENGTH)
 		return false;
 
-	if (data[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_APP_RESP_REPORT_ID &&
-			data[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY) {
-		resp_len = get_unaligned_le16(&data[GEN5_RESP_LENGTH_OFFSET]);
-		if (GET_GEN5_CMD_CODE(data[GEN5_RESP_APP_CMD_OFFSET]) == 0x00 &&
-			resp_len == GEN5_UNSUPPORTED_CMD_RESP_LENGTH &&
-			data[5] == gen5_pip->in_progress_cmd) {
+	if (data[PIP_RESP_REPORT_ID_OFFSET] == PIP_APP_RESP_REPORT_ID &&
+			data[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY) {
+		resp_len = get_unaligned_le16(&data[PIP_RESP_LENGTH_OFFSET]);
+		if (GET_PIP_CMD_CODE(data[PIP_RESP_APP_CMD_OFFSET]) == 0x00 &&
+			resp_len == PIP_UNSUPPORTED_CMD_RESP_LENGTH &&
+			data[5] == pip->in_progress_cmd) {
 			/* Unsupported command code */
 			return false;
-		} else if (GET_GEN5_CMD_CODE(data[GEN5_RESP_APP_CMD_OFFSET]) ==
-				gen5_pip->in_progress_cmd) {
+		} else if (GET_PIP_CMD_CODE(data[PIP_RESP_APP_CMD_OFFSET]) ==
+				pip->in_progress_cmd) {
 			/* Correct command response received */
 			return true;
 		}
@@ -702,10 +662,10 @@
 	return false;
 }
 
-static bool cyapa_gen5_sort_application_launch_data(struct cyapa *cyapa,
+static bool cyapa_sort_pip_application_launch_data(struct cyapa *cyapa,
 		u8 *buf, int len)
 {
-	if (buf == NULL || len < GEN5_RESP_LENGTH_SIZE)
+	if (buf == NULL || len < PIP_RESP_LENGTH_SIZE)
 		return false;
 
 	/*
@@ -718,25 +678,25 @@
 	return false;
 }
 
-static bool cyapa_gen5_sort_hid_descriptor_data(struct cyapa *cyapa,
+static bool cyapa_sort_gen5_hid_descriptor_data(struct cyapa *cyapa,
 		u8 *buf, int len)
 {
 	int resp_len;
 	int max_output_len;
 
 	/* Check hid descriptor. */
-	if (len != GEN5_HID_DESCRIPTOR_SIZE)
+	if (len != PIP_HID_DESCRIPTOR_SIZE)
 		return false;
 
-	resp_len = get_unaligned_le16(&buf[GEN5_RESP_LENGTH_OFFSET]);
+	resp_len = get_unaligned_le16(&buf[PIP_RESP_LENGTH_OFFSET]);
 	max_output_len = get_unaligned_le16(&buf[16]);
-	if (resp_len == GEN5_HID_DESCRIPTOR_SIZE) {
-		if (buf[GEN5_RESP_REPORT_ID_OFFSET] == GEN5_BL_HID_REPORT_ID &&
+	if (resp_len == PIP_HID_DESCRIPTOR_SIZE) {
+		if (buf[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_BL_REPORT_ID &&
 				max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
 			/* BL mode HID Descriptor */
 			return true;
-		} else if ((buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_APP_HID_REPORT_ID) &&
+		} else if ((buf[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_APP_REPORT_ID) &&
 				max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
 			/* APP mode HID Descriptor */
 			return true;
@@ -746,21 +706,21 @@
 	return false;
 }
 
-static bool cyapa_gen5_sort_deep_sleep_data(struct cyapa *cyapa,
+static bool cyapa_sort_pip_deep_sleep_data(struct cyapa *cyapa,
 		u8 *buf, int len)
 {
-	if (len == GEN5_DEEP_SLEEP_RESP_LENGTH &&
-		buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-			GEN5_APP_DEEP_SLEEP_REPORT_ID &&
-		(buf[4] & GEN5_DEEP_SLEEP_OPCODE_MASK) ==
-			GEN5_DEEP_SLEEP_OPCODE)
+	if (len == PIP_DEEP_SLEEP_RESP_LENGTH &&
+		buf[PIP_RESP_REPORT_ID_OFFSET] ==
+			PIP_APP_DEEP_SLEEP_REPORT_ID &&
+		(buf[4] & PIP_DEEP_SLEEP_OPCODE_MASK) ==
+			PIP_DEEP_SLEEP_OPCODE)
 		return true;
 	return false;
 }
 
 static int gen5_idle_state_parse(struct cyapa *cyapa)
 {
-	u8 resp_data[GEN5_HID_DESCRIPTOR_SIZE];
+	u8 resp_data[PIP_HID_DESCRIPTOR_SIZE];
 	int max_output_len;
 	int length;
 	u8 cmd[2];
@@ -778,9 +738,9 @@
 	if (ret != 3)
 		return ret < 0 ? ret : -EIO;
 
-	length = get_unaligned_le16(&resp_data[GEN5_RESP_LENGTH_OFFSET]);
-	if (length == GEN5_RESP_LENGTH_SIZE) {
-		/* Normal state of Gen5 with no data to respose */
+	length = get_unaligned_le16(&resp_data[PIP_RESP_LENGTH_OFFSET]);
+	if (length == PIP_RESP_LENGTH_SIZE) {
+		/* Normal state of Gen5 with no data to response */
 		cyapa->gen = CYAPA_GEN5;
 
 		cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
@@ -788,30 +748,30 @@
 		/* Read description from trackpad device */
 		cmd[0] = 0x01;
 		cmd[1] = 0x00;
-		length = GEN5_HID_DESCRIPTOR_SIZE;
+		length = PIP_HID_DESCRIPTOR_SIZE;
 		error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
-				cmd, GEN5_RESP_LENGTH_SIZE,
+				cmd, PIP_RESP_LENGTH_SIZE,
 				resp_data, &length,
 				300,
-				cyapa_gen5_sort_hid_descriptor_data,
+				cyapa_sort_gen5_hid_descriptor_data,
 				false);
 		if (error)
 			return error;
 
 		length = get_unaligned_le16(
-				&resp_data[GEN5_RESP_LENGTH_OFFSET]);
+				&resp_data[PIP_RESP_LENGTH_OFFSET]);
 		max_output_len = get_unaligned_le16(&resp_data[16]);
-		if ((length == GEN5_HID_DESCRIPTOR_SIZE ||
-				length == GEN5_RESP_LENGTH_SIZE) &&
-			(resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_HID_REPORT_ID) &&
+		if ((length == PIP_HID_DESCRIPTOR_SIZE ||
+				length == PIP_RESP_LENGTH_SIZE) &&
+			(resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_BL_REPORT_ID) &&
 			max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
 			/* BL mode HID Description read */
 			cyapa->state = CYAPA_STATE_GEN5_BL;
-		} else if ((length == GEN5_HID_DESCRIPTOR_SIZE ||
-				length == GEN5_RESP_LENGTH_SIZE) &&
-			(resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_APP_HID_REPORT_ID) &&
+		} else if ((length == PIP_HID_DESCRIPTOR_SIZE ||
+				length == PIP_RESP_LENGTH_SIZE) &&
+			(resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_APP_REPORT_ID) &&
 			max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
 			/* APP mode HID Description read */
 			cyapa->state = CYAPA_STATE_GEN5_APP;
@@ -839,14 +799,14 @@
 	 * or report any touch or button data.
 	 */
 	ret = cyapa_i2c_pip_read(cyapa, resp_data,
-			GEN5_HID_DESCRIPTOR_SIZE);
-	if (ret != GEN5_HID_DESCRIPTOR_SIZE)
+			PIP_HID_DESCRIPTOR_SIZE);
+	if (ret != PIP_HID_DESCRIPTOR_SIZE)
 		return ret < 0 ? ret : -EIO;
-	length = get_unaligned_le16(&resp_data[GEN5_RESP_LENGTH_OFFSET]);
+	length = get_unaligned_le16(&resp_data[PIP_RESP_LENGTH_OFFSET]);
 	max_output_len = get_unaligned_le16(&resp_data[16]);
-	if (length == GEN5_RESP_LENGTH_SIZE) {
-		if (reg_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_HID_REPORT_ID) {
+	if (length == PIP_RESP_LENGTH_SIZE) {
+		if (reg_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_BL_REPORT_ID) {
 			/*
 			 * BL mode HID Description has been previously
 			 * read out.
@@ -861,15 +821,15 @@
 			cyapa->gen = CYAPA_GEN5;
 			cyapa->state = CYAPA_STATE_GEN5_APP;
 		}
-	} else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
-			resp_data[2] == GEN5_BL_HID_REPORT_ID &&
+	} else if (length == PIP_HID_DESCRIPTOR_SIZE &&
+			resp_data[2] == PIP_HID_BL_REPORT_ID &&
 			max_output_len == GEN5_BL_MAX_OUTPUT_LENGTH) {
 		/* BL mode HID Description read. */
 		cyapa->gen = CYAPA_GEN5;
 		cyapa->state = CYAPA_STATE_GEN5_BL;
-	} else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
-			(resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_APP_HID_REPORT_ID) &&
+	} else if (length == PIP_HID_DESCRIPTOR_SIZE &&
+			(resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_HID_APP_REPORT_ID) &&
 			max_output_len == GEN5_APP_MAX_OUTPUT_LENGTH) {
 		/* APP mode HID Description read. */
 		cyapa->gen = CYAPA_GEN5;
@@ -886,22 +846,22 @@
 {
 	int length;
 
-	length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
-	switch (reg_data[GEN5_RESP_REPORT_ID_OFFSET]) {
-	case GEN5_TOUCH_REPORT_ID:
-		if (length < GEN5_TOUCH_REPORT_HEAD_SIZE ||
-			length > GEN5_TOUCH_REPORT_MAX_SIZE)
+	length = get_unaligned_le16(&reg_data[PIP_RESP_LENGTH_OFFSET]);
+	switch (reg_data[PIP_RESP_REPORT_ID_OFFSET]) {
+	case PIP_TOUCH_REPORT_ID:
+		if (length < PIP_TOUCH_REPORT_HEAD_SIZE ||
+			length > PIP_TOUCH_REPORT_MAX_SIZE)
 			return -EINVAL;
 		break;
-	case GEN5_BTN_REPORT_ID:
+	case PIP_BTN_REPORT_ID:
 	case GEN5_OLD_PUSH_BTN_REPORT_ID:
-	case GEN5_PUSH_BTN_REPORT_ID:
-		if (length < GEN5_BTN_REPORT_HEAD_SIZE ||
-			length > GEN5_BTN_REPORT_MAX_SIZE)
+	case PIP_PUSH_BTN_REPORT_ID:
+		if (length < PIP_BTN_REPORT_HEAD_SIZE ||
+			length > PIP_BTN_REPORT_MAX_SIZE)
 			return -EINVAL;
 		break;
-	case GEN5_WAKEUP_EVENT_REPORT_ID:
-		if (length != GEN5_WAKEUP_EVENT_SIZE)
+	case PIP_WAKEUP_EVENT_REPORT_ID:
+		if (length != PIP_WAKEUP_EVENT_SIZE)
 			return -EINVAL;
 		break;
 	default:
@@ -915,7 +875,7 @@
 
 static int gen5_cmd_resp_header_parse(struct cyapa *cyapa, u8 *reg_data)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int length;
 	int ret;
 
@@ -924,15 +884,15 @@
 	 * otherwise Gen5 trackpad cannot response next command
 	 * or report any touch or button data.
 	 */
-	length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
-	ret = cyapa_i2c_pip_read(cyapa, gen5_pip->empty_buf, length);
+	length = get_unaligned_le16(&reg_data[PIP_RESP_LENGTH_OFFSET]);
+	ret = cyapa_i2c_pip_read(cyapa, pip->empty_buf, length);
 	if (ret != length)
 		return ret < 0 ? ret : -EIO;
 
-	if (length == GEN5_RESP_LENGTH_SIZE) {
+	if (length == PIP_RESP_LENGTH_SIZE) {
 		/* Previous command has read the data through out. */
-		if (reg_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_RESP_REPORT_ID) {
+		if (reg_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_BL_RESP_REPORT_ID) {
 			/* Gen5 BL command response data detected */
 			cyapa->gen = CYAPA_GEN5;
 			cyapa->state = CYAPA_STATE_GEN5_BL;
@@ -941,21 +901,21 @@
 			cyapa->gen = CYAPA_GEN5;
 			cyapa->state = CYAPA_STATE_GEN5_APP;
 		}
-	} else if ((gen5_pip->empty_buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_RESP_REPORT_ID) &&
-			(gen5_pip->empty_buf[GEN5_RESP_RSVD_OFFSET] ==
-				GEN5_RESP_RSVD_KEY) &&
-			(gen5_pip->empty_buf[GEN5_RESP_BL_SOP_OFFSET] ==
-				GEN5_SOP_KEY) &&
-			(gen5_pip->empty_buf[length - 1] ==
-				GEN5_EOP_KEY)) {
+	} else if ((pip->empty_buf[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_BL_RESP_REPORT_ID) &&
+			(pip->empty_buf[PIP_RESP_RSVD_OFFSET] ==
+				PIP_RESP_RSVD_KEY) &&
+			(pip->empty_buf[PIP_RESP_BL_SOP_OFFSET] ==
+				PIP_SOP_KEY) &&
+			(pip->empty_buf[length - 1] ==
+				PIP_EOP_KEY)) {
 		/* Gen5 BL command response data detected */
 		cyapa->gen = CYAPA_GEN5;
 		cyapa->state = CYAPA_STATE_GEN5_BL;
-	} else if (gen5_pip->empty_buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_APP_RESP_REPORT_ID &&
-			gen5_pip->empty_buf[GEN5_RESP_RSVD_OFFSET] ==
-				GEN5_RESP_RSVD_KEY) {
+	} else if (pip->empty_buf[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_APP_RESP_REPORT_ID &&
+			pip->empty_buf[PIP_RESP_RSVD_OFFSET] ==
+				PIP_RESP_RSVD_KEY) {
 		/* Gen5 APP command response data detected */
 		cyapa->gen = CYAPA_GEN5;
 		cyapa->state = CYAPA_STATE_GEN5_APP;
@@ -977,12 +937,12 @@
 	cyapa->state = CYAPA_STATE_NO_DEVICE;
 
 	/* Parse based on Gen5 characteristic registers and bits */
-	length = get_unaligned_le16(&reg_data[GEN5_RESP_LENGTH_OFFSET]);
-	if (length == 0 || length == GEN5_RESP_LENGTH_SIZE) {
+	length = get_unaligned_le16(&reg_data[PIP_RESP_LENGTH_OFFSET]);
+	if (length == 0 || length == PIP_RESP_LENGTH_SIZE) {
 		gen5_idle_state_parse(cyapa);
-	} else if (length == GEN5_HID_DESCRIPTOR_SIZE &&
-			(reg_data[2] == GEN5_BL_HID_REPORT_ID ||
-				reg_data[2] == GEN5_APP_HID_REPORT_ID)) {
+	} else if (length == PIP_HID_DESCRIPTOR_SIZE &&
+			(reg_data[2] == PIP_HID_BL_REPORT_ID ||
+				reg_data[2] == PIP_HID_APP_REPORT_ID)) {
 		gen5_hid_description_header_parse(cyapa, reg_data);
 	} else if ((length == GEN5_APP_REPORT_DESCRIPTOR_SIZE ||
 			length == GEN5_APP_CONTRACT_REPORT_DESCRIPTOR_SIZE) &&
@@ -992,17 +952,17 @@
 		cyapa->state = CYAPA_STATE_GEN5_APP;
 	} else if (length == GEN5_BL_REPORT_DESCRIPTOR_SIZE &&
 			reg_data[2] == GEN5_BL_REPORT_DESCRIPTOR_ID) {
-		/* 0x1D 0x00 0xFE is Gen5 BL report descriptior header. */
+		/* 0x1D 0x00 0xFE is Gen5 BL report descriptor header. */
 		cyapa->gen = CYAPA_GEN5;
 		cyapa->state = CYAPA_STATE_GEN5_BL;
-	} else if (reg_data[2] == GEN5_TOUCH_REPORT_ID ||
-			reg_data[2] == GEN5_BTN_REPORT_ID ||
+	} else if (reg_data[2] == PIP_TOUCH_REPORT_ID ||
+			reg_data[2] == PIP_BTN_REPORT_ID ||
 			reg_data[2] == GEN5_OLD_PUSH_BTN_REPORT_ID ||
-			reg_data[2] == GEN5_PUSH_BTN_REPORT_ID ||
-			reg_data[2] == GEN5_WAKEUP_EVENT_REPORT_ID) {
+			reg_data[2] == PIP_PUSH_BTN_REPORT_ID ||
+			reg_data[2] == PIP_WAKEUP_EVENT_REPORT_ID) {
 		gen5_report_data_header_parse(cyapa, reg_data);
-	} else if (reg_data[2] == GEN5_BL_RESP_REPORT_ID ||
-			reg_data[2] == GEN5_APP_RESP_REPORT_ID) {
+	} else if (reg_data[2] == PIP_BL_RESP_REPORT_ID ||
+			reg_data[2] == PIP_APP_RESP_REPORT_ID) {
 		gen5_cmd_resp_header_parse(cyapa, reg_data);
 	}
 
@@ -1023,14 +983,25 @@
 	return -EAGAIN;
 }
 
-static int cyapa_gen5_bl_initiate(struct cyapa *cyapa,
-		const struct firmware *fw)
+static struct cyapa_tsg_bin_image_data_record *
+cyapa_get_image_record_data_num(const struct firmware *fw,
+		int *record_num)
 {
-	struct cyapa_tsg_bin_image *image;
-	struct gen5_bl_cmd_head *bl_cmd_head;
-	struct gen5_bl_packet_start *bl_packet_start;
-	struct gen5_bl_initiate_cmd_data *cmd_data;
-	struct gen5_bl_packet_end *bl_packet_end;
+	int head_size;
+
+	head_size = fw->data[0] + 1;
+	*record_num = (fw->size - head_size) /
+			sizeof(struct cyapa_tsg_bin_image_data_record);
+	return (struct cyapa_tsg_bin_image_data_record *)&fw->data[head_size];
+}
+
+int cyapa_pip_bl_initiate(struct cyapa *cyapa, const struct firmware *fw)
+{
+	struct cyapa_tsg_bin_image_data_record *image_records;
+	struct pip_bl_cmd_head *bl_cmd_head;
+	struct pip_bl_packet_start *bl_packet_start;
+	struct pip_bl_initiate_cmd_data *cmd_data;
+	struct pip_bl_packet_end *bl_packet_end;
 	u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
 	int cmd_len;
 	u16 cmd_data_len;
@@ -1046,30 +1017,28 @@
 	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
 
 	memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
-	bl_cmd_head = (struct gen5_bl_cmd_head *)cmd;
+	bl_cmd_head = (struct pip_bl_cmd_head *)cmd;
 	cmd_data_len = CYAPA_TSG_BL_KEY_SIZE + CYAPA_TSG_FLASH_MAP_BLOCK_SIZE;
-	cmd_len = sizeof(struct gen5_bl_cmd_head) + cmd_data_len +
-		  sizeof(struct gen5_bl_packet_end);
+	cmd_len = sizeof(struct pip_bl_cmd_head) + cmd_data_len +
+		  sizeof(struct pip_bl_packet_end);
 
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
 	put_unaligned_le16(cmd_len - 2, &bl_cmd_head->length);
-	bl_cmd_head->report_id = GEN5_BL_CMD_REPORT_ID;
+	bl_cmd_head->report_id = PIP_BL_CMD_REPORT_ID;
 
 	bl_packet_start = &bl_cmd_head->packet_start;
-	bl_packet_start->sop = GEN5_SOP_KEY;
-	bl_packet_start->cmd_code = GEN5_BL_CMD_INITIATE_BL;
+	bl_packet_start->sop = PIP_SOP_KEY;
+	bl_packet_start->cmd_code = PIP_BL_CMD_INITIATE_BL;
 	/* 8 key bytes and 128 bytes block size */
 	put_unaligned_le16(cmd_data_len, &bl_packet_start->data_length);
 
-	cmd_data = (struct gen5_bl_initiate_cmd_data *)bl_cmd_head->data;
-	memcpy(cmd_data->key, cyapa_gen5_bl_cmd_key, CYAPA_TSG_BL_KEY_SIZE);
+	cmd_data = (struct pip_bl_initiate_cmd_data *)bl_cmd_head->data;
+	memcpy(cmd_data->key, cyapa_pip_bl_cmd_key, CYAPA_TSG_BL_KEY_SIZE);
 
-	/* Copy 60 bytes Meta Data Row Parameters */
-	image = (struct cyapa_tsg_bin_image *)fw->data;
-	records_num = (fw->size - sizeof(struct cyapa_tsg_bin_image_head)) /
-				sizeof(struct cyapa_tsg_bin_image_data_record);
+	image_records = cyapa_get_image_record_data_num(fw, &records_num);
+
 	/* APP_INTEGRITY row is always the last row block */
-	data = image->records[records_num - 1].record_data;
+	data = image_records[records_num - 1].record_data;
 	memcpy(cmd_data->metadata_raw_parameter, data,
 		CYAPA_TSG_FLASH_MAP_METADATA_SIZE);
 
@@ -1077,47 +1046,47 @@
 				CYAPA_TSG_FLASH_MAP_METADATA_SIZE);
 	put_unaligned_le16(meta_data_crc, &cmd_data->metadata_crc);
 
-	bl_packet_end = (struct gen5_bl_packet_end *)(bl_cmd_head->data +
+	bl_packet_end = (struct pip_bl_packet_end *)(bl_cmd_head->data +
 				cmd_data_len);
 	cmd_crc = crc_itu_t(0xffff, (u8 *)bl_packet_start,
-		sizeof(struct gen5_bl_packet_start) + cmd_data_len);
+		sizeof(struct pip_bl_packet_start) + cmd_data_len);
 	put_unaligned_le16(cmd_crc, &bl_packet_end->crc);
-	bl_packet_end->eop = GEN5_EOP_KEY;
+	bl_packet_end->eop = PIP_EOP_KEY;
 
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, cmd_len,
 			resp_data, &resp_len, 12000,
-			cyapa_gen5_sort_tsg_pip_bl_resp_data, true);
-	if (error || resp_len != GEN5_BL_INITIATE_RESP_LEN ||
-			resp_data[2] != GEN5_BL_RESP_REPORT_ID ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			cyapa_sort_tsg_pip_bl_resp_data, true);
+	if (error || resp_len != PIP_BL_INITIATE_RESP_LEN ||
+			resp_data[2] != PIP_BL_RESP_REPORT_ID ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error ? error : -EAGAIN;
 
 	return 0;
 }
 
-static bool cyapa_gen5_sort_bl_exit_data(struct cyapa *cyapa, u8 *buf, int len)
+static bool cyapa_sort_pip_bl_exit_data(struct cyapa *cyapa, u8 *buf, int len)
 {
-	if (buf == NULL || len < GEN5_RESP_LENGTH_SIZE)
+	if (buf == NULL || len < PIP_RESP_LENGTH_SIZE)
 		return false;
 
 	if (buf[0] == 0 && buf[1] == 0)
 		return true;
 
 	/* Exit bootloader failed for some reason. */
-	if (len == GEN5_BL_FAIL_EXIT_RESP_LEN &&
-			buf[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_RESP_REPORT_ID &&
-			buf[GEN5_RESP_RSVD_OFFSET] == GEN5_RESP_RSVD_KEY &&
-			buf[GEN5_RESP_BL_SOP_OFFSET] == GEN5_SOP_KEY &&
-			buf[10] == GEN5_EOP_KEY)
+	if (len == PIP_BL_FAIL_EXIT_RESP_LEN &&
+			buf[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_BL_RESP_REPORT_ID &&
+			buf[PIP_RESP_RSVD_OFFSET] == PIP_RESP_RSVD_KEY &&
+			buf[PIP_RESP_BL_SOP_OFFSET] == PIP_SOP_KEY &&
+			buf[10] == PIP_EOP_KEY)
 		return true;
 
 	return false;
 }
 
-static int cyapa_gen5_bl_exit(struct cyapa *cyapa)
+int cyapa_pip_bl_exit(struct cyapa *cyapa)
 {
 
 	u8 bl_gen5_bl_exit[] = { 0x04, 0x00,
@@ -1132,13 +1101,13 @@
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			bl_gen5_bl_exit, sizeof(bl_gen5_bl_exit),
 			resp_data, &resp_len,
-			5000, cyapa_gen5_sort_bl_exit_data, false);
+			5000, cyapa_sort_pip_bl_exit_data, false);
 	if (error)
 		return error;
 
-	if (resp_len == GEN5_BL_FAIL_EXIT_RESP_LEN ||
-			resp_data[GEN5_RESP_REPORT_ID_OFFSET] ==
-				GEN5_BL_RESP_REPORT_ID)
+	if (resp_len == PIP_BL_FAIL_EXIT_RESP_LEN ||
+			resp_data[PIP_RESP_REPORT_ID_OFFSET] ==
+				PIP_BL_RESP_REPORT_ID)
 		return -EAGAIN;
 
 	if (resp_data[0] == 0x00 && resp_data[1] == 0x00)
@@ -1147,7 +1116,7 @@
 	return -ENODEV;
 }
 
-static int cyapa_gen5_bl_enter(struct cyapa *cyapa)
+int cyapa_pip_bl_enter(struct cyapa *cyapa)
 {
 	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2F, 0x00, 0x01 };
 	u8 resp_data[2];
@@ -1157,15 +1126,12 @@
 	error = cyapa_poll_state(cyapa, 500);
 	if (error < 0)
 		return error;
-	if (cyapa->gen != CYAPA_GEN5)
-		return -EINVAL;
 
-	/* Already in Gen5 BL. Skipping exit. */
-	if (cyapa->state == CYAPA_STATE_GEN5_BL)
+	/* Already in bootloader mode, Skipping exit. */
+	if (cyapa_is_pip_bl_mode(cyapa))
 		return 0;
-
-	if (cyapa->state != CYAPA_STATE_GEN5_APP)
-		return -EAGAIN;
+	else if (!cyapa_is_pip_app_mode(cyapa))
+		return -EINVAL;
 
 	/* Try to dump all buffered report data before any send command. */
 	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
@@ -1179,39 +1145,79 @@
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			5000, cyapa_gen5_sort_application_launch_data,
+			5000, cyapa_sort_pip_application_launch_data,
 			true);
 	if (error || resp_data[0] != 0x00 || resp_data[1] != 0x00)
 		return error < 0 ? error : -EAGAIN;
 
 	cyapa->operational = false;
-	cyapa->state = CYAPA_STATE_GEN5_BL;
+	if (cyapa->gen == CYAPA_GEN5)
+		cyapa->state = CYAPA_STATE_GEN5_BL;
+	else if (cyapa->gen == CYAPA_GEN6)
+		cyapa->state = CYAPA_STATE_GEN6_BL;
 	return 0;
 }
 
-static int cyapa_gen5_check_fw(struct cyapa *cyapa, const struct firmware *fw)
+static int cyapa_pip_fw_head_check(struct cyapa *cyapa,
+		struct cyapa_tsg_bin_image_head *image_head)
+{
+	if (image_head->head_size != 0x0C && image_head->head_size != 0x12)
+		return -EINVAL;
+
+	switch (cyapa->gen) {
+	case CYAPA_GEN6:
+		if (image_head->family_id != 0x9B ||
+		    image_head->silicon_id_hi != 0x0B)
+			return -EINVAL;
+		break;
+	case CYAPA_GEN5:
+		/* Gen5 without proximity support. */
+		if (cyapa->platform_ver < 2) {
+			if (image_head->head_size == 0x0C)
+				break;
+			return -EINVAL;
+		}
+
+		if (image_head->family_id != 0x91 ||
+		    image_head->silicon_id_hi != 0x02)
+			return -EINVAL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cyapa_pip_check_fw(struct cyapa *cyapa, const struct firmware *fw)
 {
 	struct device *dev = &cyapa->client->dev;
-	const struct cyapa_tsg_bin_image *image = (const void *)fw->data;
+	struct cyapa_tsg_bin_image_data_record *image_records;
 	const struct cyapa_tsg_bin_image_data_record *app_integrity;
-	const struct gen5_bl_metadata_row_params *metadata;
-	size_t flash_records_count;
+	const struct tsg_bl_metadata_row_params *metadata;
+	int flash_records_count;
 	u32 fw_app_start, fw_upgrade_start;
 	u16 fw_app_len, fw_upgrade_len;
 	u16 app_crc;
 	u16 app_integrity_crc;
-	int record_index;
 	int i;
 
-	flash_records_count = (fw->size -
-			sizeof(struct cyapa_tsg_bin_image_head)) /
-			sizeof(struct cyapa_tsg_bin_image_data_record);
+	/* Verify the firmware image not miss-used for Gen5 and Gen6. */
+	if (cyapa_pip_fw_head_check(cyapa,
+		(struct cyapa_tsg_bin_image_head *)fw->data)) {
+		dev_err(dev, "%s: firmware image not match TP device.\n",
+			     __func__);
+		return -EINVAL;
+	}
+
+	image_records =
+		cyapa_get_image_record_data_num(fw, &flash_records_count);
 
 	/*
 	 * APP_INTEGRITY row is always the last row block,
 	 * and the row id must be 0x01ff.
 	 */
-	app_integrity = &image->records[flash_records_count - 1];
+	app_integrity = &image_records[flash_records_count - 1];
 
 	if (app_integrity->flash_array_id != 0x00 ||
 	    get_unaligned_be16(&app_integrity->row_number) != 0x01ff) {
@@ -1242,14 +1248,11 @@
 		return -EINVAL;
 	}
 
-	/*
-	 * Verify application image CRC
-	 */
-	record_index = fw_app_start / CYAPA_TSG_FW_ROW_SIZE -
-				CYAPA_TSG_IMG_START_ROW_NUM;
+	/* Verify application image CRC. */
 	app_crc = 0xffffU;
 	for (i = 0; i < fw_app_len / CYAPA_TSG_FW_ROW_SIZE; i++) {
-		const u8 *data = image->records[record_index + i].record_data;
+		const u8 *data = image_records[i].record_data;
+
 		app_crc = crc_itu_t(app_crc, data, CYAPA_TSG_FW_ROW_SIZE);
 	}
 
@@ -1261,13 +1264,13 @@
 	return 0;
 }
 
-static int cyapa_gen5_write_fw_block(struct cyapa *cyapa,
+static int cyapa_pip_write_fw_block(struct cyapa *cyapa,
 		struct cyapa_tsg_bin_image_data_record *flash_record)
 {
-	struct gen5_bl_cmd_head *bl_cmd_head;
-	struct gen5_bl_packet_start *bl_packet_start;
-	struct gen5_bl_flash_row_head *flash_row_head;
-	struct gen5_bl_packet_end *bl_packet_end;
+	struct pip_bl_cmd_head *bl_cmd_head;
+	struct pip_bl_packet_start *bl_packet_start;
+	struct tsg_bl_flash_row_head *flash_row_head;
+	struct pip_bl_packet_end *bl_packet_end;
 	u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
 	u16 cmd_len;
 	u8 flash_array_id;
@@ -1286,71 +1289,68 @@
 	record_data = flash_record->record_data;
 
 	memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
-	bl_cmd_head = (struct gen5_bl_cmd_head *)cmd;
+	bl_cmd_head = (struct pip_bl_cmd_head *)cmd;
 	bl_packet_start = &bl_cmd_head->packet_start;
-	cmd_len = sizeof(struct gen5_bl_cmd_head) +
-		  sizeof(struct gen5_bl_flash_row_head) +
+	cmd_len = sizeof(struct pip_bl_cmd_head) +
+		  sizeof(struct tsg_bl_flash_row_head) +
 		  CYAPA_TSG_FLASH_MAP_BLOCK_SIZE +
-		  sizeof(struct gen5_bl_packet_end);
+		  sizeof(struct pip_bl_packet_end);
 
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &bl_cmd_head->addr);
 	/* Don't include 2 bytes register address */
 	put_unaligned_le16(cmd_len - 2, &bl_cmd_head->length);
-	bl_cmd_head->report_id = GEN5_BL_CMD_REPORT_ID;
-	bl_packet_start->sop = GEN5_SOP_KEY;
-	bl_packet_start->cmd_code = GEN5_BL_CMD_PROGRAM_VERIFY_ROW;
+	bl_cmd_head->report_id = PIP_BL_CMD_REPORT_ID;
+	bl_packet_start->sop = PIP_SOP_KEY;
+	bl_packet_start->cmd_code = PIP_BL_CMD_PROGRAM_VERIFY_ROW;
 
 	/* 1 (Flash Array ID) + 2 (Flash Row ID) + 128 (flash data) */
-	data_len = sizeof(struct gen5_bl_flash_row_head) + record_len;
+	data_len = sizeof(struct tsg_bl_flash_row_head) + record_len;
 	put_unaligned_le16(data_len, &bl_packet_start->data_length);
 
-	flash_row_head = (struct gen5_bl_flash_row_head *)bl_cmd_head->data;
+	flash_row_head = (struct tsg_bl_flash_row_head *)bl_cmd_head->data;
 	flash_row_head->flash_array_id = flash_array_id;
 	put_unaligned_le16(flash_row_id, &flash_row_head->flash_row_id);
 	memcpy(flash_row_head->flash_data, record_data, record_len);
 
-	bl_packet_end = (struct gen5_bl_packet_end *)(bl_cmd_head->data +
+	bl_packet_end = (struct pip_bl_packet_end *)(bl_cmd_head->data +
 						      data_len);
 	crc = crc_itu_t(0xffff, (u8 *)bl_packet_start,
-		sizeof(struct gen5_bl_packet_start) + data_len);
+		sizeof(struct pip_bl_packet_start) + data_len);
 	put_unaligned_le16(crc, &bl_packet_end->crc);
-	bl_packet_end->eop = GEN5_EOP_KEY;
+	bl_packet_end->eop = PIP_EOP_KEY;
 
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_bl_resp_data, true);
-	if (error || resp_len != GEN5_BL_BLOCK_WRITE_RESP_LEN ||
-			resp_data[2] != GEN5_BL_RESP_REPORT_ID ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			500, cyapa_sort_tsg_pip_bl_resp_data, true);
+	if (error || resp_len != PIP_BL_BLOCK_WRITE_RESP_LEN ||
+			resp_data[2] != PIP_BL_RESP_REPORT_ID ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error < 0 ? error : -EAGAIN;
 
 	return 0;
 }
 
-static int cyapa_gen5_do_fw_update(struct cyapa *cyapa,
+int cyapa_pip_do_fw_update(struct cyapa *cyapa,
 		const struct firmware *fw)
 {
 	struct device *dev = &cyapa->client->dev;
-	struct cyapa_tsg_bin_image_data_record *flash_record;
-	struct cyapa_tsg_bin_image *image =
-		(struct cyapa_tsg_bin_image *)fw->data;
+	struct cyapa_tsg_bin_image_data_record *image_records;
 	int flash_records_count;
 	int i;
 	int error;
 
 	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
 
-	flash_records_count =
-		(fw->size - sizeof(struct cyapa_tsg_bin_image_head)) /
-			sizeof(struct cyapa_tsg_bin_image_data_record);
+	image_records =
+		cyapa_get_image_record_data_num(fw, &flash_records_count);
+
 	/*
 	 * The last flash row 0x01ff has been written through bl_initiate
 	 * command, so DO NOT write flash 0x01ff to trackpad device.
 	 */
 	for (i = 0; i < (flash_records_count - 1); i++) {
-		flash_record = &image->records[i];
-		error = cyapa_gen5_write_fw_block(cyapa, flash_record);
+		error = cyapa_pip_write_fw_block(cyapa, &image_records[i]);
 		if (error) {
 			dev_err(dev, "%s: Gen5 FW update aborted: %d\n",
 				__func__, error);
@@ -1372,9 +1372,9 @@
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
 	if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x08) ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error < 0 ? error : -EINVAL;
 
 	return 0;
@@ -1383,7 +1383,7 @@
 static int cyapa_gen5_set_interval_time(struct cyapa *cyapa,
 		u8 parameter_id, u16 interval_time)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	struct gen5_app_set_parameter_data *parameter_data;
 	u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
 	int cmd_len;
@@ -1393,10 +1393,10 @@
 	int error;
 
 	memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
 	parameter_data = (struct gen5_app_set_parameter_data *)
 			 app_cmd_head->parameter_data;
-	cmd_len = sizeof(struct gen5_app_cmd_head) +
+	cmd_len = sizeof(struct pip_app_cmd_head) +
 		  sizeof(struct gen5_app_set_parameter_data);
 
 	switch (parameter_id) {
@@ -1413,14 +1413,14 @@
 		return -EINVAL;
 	}
 
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	/*
 	 * Don't include unused parameter value bytes and
 	 * 2 bytes register address.
 	 */
 	put_unaligned_le16(cmd_len - (4 - parameter_size) - 2,
 			   &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = GEN5_CMD_SET_PARAMETER;
 	parameter_data->parameter_id = parameter_id;
 	parameter_data->parameter_size = parameter_size;
@@ -1428,7 +1428,7 @@
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
 	if (error || resp_data[5] != parameter_id ||
 		resp_data[6] != parameter_size ||
 		!VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_SET_PARAMETER))
@@ -1440,7 +1440,7 @@
 static int cyapa_gen5_get_interval_time(struct cyapa *cyapa,
 		u8 parameter_id, u16 *interval_time)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	struct gen5_app_get_parameter_data *parameter_data;
 	u8 cmd[CYAPA_TSG_MAX_CMD_SIZE];
 	int cmd_len;
@@ -1451,10 +1451,10 @@
 	int error;
 
 	memset(cmd, 0, CYAPA_TSG_MAX_CMD_SIZE);
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
 	parameter_data = (struct gen5_app_get_parameter_data *)
 			 app_cmd_head->parameter_data;
-	cmd_len = sizeof(struct gen5_app_cmd_head) +
+	cmd_len = sizeof(struct pip_app_cmd_head) +
 		  sizeof(struct gen5_app_get_parameter_data);
 
 	*interval_time = 0;
@@ -1472,17 +1472,17 @@
 		return -EINVAL;
 	}
 
-	put_unaligned_le16(GEN5_HID_DESCRIPTOR_ADDR, &app_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	/* Don't include 2 bytes register address */
 	put_unaligned_le16(cmd_len - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = GEN5_CMD_GET_PARAMETER;
 	parameter_data->parameter_id = parameter_id;
 
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, cmd_len,
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
 	if (error || resp_data[5] != parameter_id || resp_data[6] == 0 ||
 		!VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_GET_PARAMETER))
 		return error < 0 ? error : -EINVAL;
@@ -1497,18 +1497,18 @@
 
 static int cyapa_gen5_disable_pip_report(struct cyapa *cyapa)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	u8 cmd[10];
 	u8 resp_data[7];
 	int resp_len;
 	int error;
 
 	memset(cmd, 0, sizeof(cmd));
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
 
-	put_unaligned_le16(GEN5_HID_DESCRIPTOR_ADDR, &app_cmd_head->addr);
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = GEN5_CMD_SET_PARAMETER;
 	app_cmd_head->parameter_data[0] = GEN5_PARAMETER_DISABLE_PIP_REPORT;
 	app_cmd_head->parameter_data[1] = 0x01;
@@ -1516,7 +1516,7 @@
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, false);
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
 	if (error || resp_data[5] != GEN5_PARAMETER_DISABLE_PIP_REPORT ||
 		!VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_SET_PARAMETER) ||
 		resp_data[6] != 0x01)
@@ -1525,26 +1525,48 @@
 	return 0;
 }
 
-static int cyapa_gen5_deep_sleep(struct cyapa *cyapa, u8 state)
+int cyapa_pip_set_proximity(struct cyapa *cyapa, bool enable)
+{
+	u8 cmd[] = { 0x04, 0x00, 0x06, 0x00, 0x2f, 0x00, PIP_SET_PROXIMITY,
+		     (u8)!!enable
+	};
+	u8 resp_data[6];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error || !VALID_CMD_RESP_HEADER(resp_data, PIP_SET_PROXIMITY) ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data)) {
+		error = (error == -ETIMEDOUT) ? -EOPNOTSUPP : error;
+		return error < 0 ? error : -EINVAL;
+	}
+
+	return 0;
+}
+
+int cyapa_pip_deep_sleep(struct cyapa *cyapa, u8 state)
 {
 	u8 cmd[] = { 0x05, 0x00, 0x00, 0x08};
 	u8 resp_data[5];
 	int resp_len;
 	int error;
 
-	cmd[2] = state & GEN5_DEEP_SLEEP_STATE_MASK;
+	cmd[2] = state & PIP_DEEP_SLEEP_STATE_MASK;
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_deep_sleep_data, false);
-	if (error || ((resp_data[3] & GEN5_DEEP_SLEEP_STATE_MASK) != state))
+			500, cyapa_sort_pip_deep_sleep_data, false);
+	if (error || ((resp_data[3] & PIP_DEEP_SLEEP_STATE_MASK) != state))
 		return -EINVAL;
 
 	return 0;
 }
 
 static int cyapa_gen5_set_power_mode(struct cyapa *cyapa,
-		u8 power_mode, u16 sleep_time)
+		u8 power_mode, u16 sleep_time, bool is_suspend)
 {
 	struct device *dev = &cyapa->client->dev;
 	u8 power_state;
@@ -1553,43 +1575,40 @@
 	if (cyapa->state != CYAPA_STATE_GEN5_APP)
 		return 0;
 
-	/* Dump all the report data before do power mode commmands. */
-	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
-
-	if (GEN5_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
 		/*
 		 * Assume TP in deep sleep mode when driver is loaded,
 		 * avoid driver unload and reload command IO issue caused by TP
 		 * has been set into deep sleep mode when unloading.
 		 */
-		GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
 	}
 
-	if (GEN5_DEV_UNINIT_SLEEP_TIME(cyapa) &&
-			GEN5_DEV_GET_PWR_STATE(cyapa) != PWR_MODE_OFF)
+	if (PIP_DEV_UNINIT_SLEEP_TIME(cyapa) &&
+			PIP_DEV_GET_PWR_STATE(cyapa) != PWR_MODE_OFF)
 		if (cyapa_gen5_get_interval_time(cyapa,
 				GEN5_PARAMETER_LP_INTRVL_ID,
 				&cyapa->dev_sleep_time) != 0)
-			GEN5_DEV_SET_SLEEP_TIME(cyapa, UNINIT_SLEEP_TIME);
+			PIP_DEV_SET_SLEEP_TIME(cyapa, UNINIT_SLEEP_TIME);
 
-	if (GEN5_DEV_GET_PWR_STATE(cyapa) == power_mode) {
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == power_mode) {
 		if (power_mode == PWR_MODE_OFF ||
 			power_mode == PWR_MODE_FULL_ACTIVE ||
 			power_mode == PWR_MODE_BTN_ONLY ||
-			GEN5_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
+			PIP_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
 			/* Has in correct power mode state, early return. */
 			return 0;
 		}
 	}
 
 	if (power_mode == PWR_MODE_OFF) {
-		error = cyapa_gen5_deep_sleep(cyapa, GEN5_DEEP_SLEEP_STATE_OFF);
+		error = cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_OFF);
 		if (error) {
 			dev_err(dev, "enter deep sleep fail: %d\n", error);
 			return error;
 		}
 
-		GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
 		return 0;
 	}
 
@@ -1598,8 +1617,8 @@
 	 * state directly, must be wake up from sleep firstly, then
 	 * continue to do next power sate change.
 	 */
-	if (GEN5_DEV_GET_PWR_STATE(cyapa) == PWR_MODE_OFF) {
-		error = cyapa_gen5_deep_sleep(cyapa, GEN5_DEEP_SLEEP_STATE_ON);
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == PWR_MODE_OFF) {
+		error = cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
 		if (error) {
 			dev_err(dev, "deep sleep wake fail: %d\n", error);
 			return error;
@@ -1614,7 +1633,7 @@
 			return error;
 		}
 
-		GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
 	} else if (power_mode == PWR_MODE_BTN_ONLY) {
 		error = cyapa_gen5_change_power_state(cyapa,
 				GEN5_POWER_STATE_BTN_ONLY);
@@ -1623,19 +1642,19 @@
 			return error;
 		}
 
-		GEN5_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
 	} else {
 		/*
 		 * Continue to change power mode even failed to set
 		 * interval time, it won't affect the power mode change.
 		 * except the sleep interval time is not correct.
 		 */
-		if (GEN5_DEV_UNINIT_SLEEP_TIME(cyapa) ||
-				sleep_time != GEN5_DEV_GET_SLEEP_TIME(cyapa))
+		if (PIP_DEV_UNINIT_SLEEP_TIME(cyapa) ||
+				sleep_time != PIP_DEV_GET_SLEEP_TIME(cyapa))
 			if (cyapa_gen5_set_interval_time(cyapa,
 					GEN5_PARAMETER_LP_INTRVL_ID,
 					sleep_time) == 0)
-				GEN5_DEV_SET_SLEEP_TIME(cyapa, sleep_time);
+				PIP_DEV_SET_SLEEP_TIME(cyapa, sleep_time);
 
 		if (sleep_time <= GEN5_POWER_READY_MAX_INTRVL_TIME)
 			power_state = GEN5_POWER_STATE_READY;
@@ -1658,17 +1677,17 @@
 		 * is suspending which may cause interrupt line unable to be
 		 * asserted again.
 		 */
-		cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
-		cyapa_gen5_disable_pip_report(cyapa);
+		if (is_suspend)
+			cyapa_gen5_disable_pip_report(cyapa);
 
-		GEN5_DEV_SET_PWR_STATE(cyapa,
+		PIP_DEV_SET_PWR_STATE(cyapa,
 			cyapa_sleep_time_to_pwr_cmd(sleep_time));
 	}
 
 	return 0;
 }
 
-static int cyapa_gen5_resume_scanning(struct cyapa *cyapa)
+int cyapa_pip_resume_scanning(struct cyapa *cyapa)
 {
 	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x04 };
 	u8 resp_data[6];
@@ -1682,7 +1701,7 @@
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+			500, cyapa_sort_tsg_pip_app_resp_data, true);
 	if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x04))
 		return -EINVAL;
 
@@ -1692,7 +1711,7 @@
 	return 0;
 }
 
-static int cyapa_gen5_suspend_scanning(struct cyapa *cyapa)
+int cyapa_pip_suspend_scanning(struct cyapa *cyapa)
 {
 	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x03 };
 	u8 resp_data[6];
@@ -1706,7 +1725,7 @@
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+			500, cyapa_sort_tsg_pip_app_resp_data, true);
 	if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x03))
 		return -EINVAL;
 
@@ -1716,10 +1735,10 @@
 	return 0;
 }
 
-static int cyapa_gen5_calibrate_pwcs(struct cyapa *cyapa,
+static int cyapa_pip_calibrate_pwcs(struct cyapa *cyapa,
 		u8 calibrate_sensing_mode_type)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	u8 cmd[8];
 	u8 resp_data[6];
 	int resp_len;
@@ -1729,25 +1748,25 @@
 	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
 
 	memset(cmd, 0, sizeof(cmd));
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
-	app_cmd_head->cmd_code = GEN5_CMD_CALIBRATE;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
+	app_cmd_head->cmd_code = PIP_CMD_CALIBRATE;
 	app_cmd_head->parameter_data[0] = calibrate_sensing_mode_type;
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			5000, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
-	if (error || !VALID_CMD_RESP_HEADER(resp_data, GEN5_CMD_CALIBRATE) ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			5000, cyapa_sort_tsg_pip_app_resp_data, true);
+	if (error || !VALID_CMD_RESP_HEADER(resp_data, PIP_CMD_CALIBRATE) ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error < 0 ? error : -EAGAIN;
 
 	return 0;
 }
 
-static ssize_t cyapa_gen5_do_calibrate(struct device *dev,
+ssize_t cyapa_pip_do_calibrate(struct device *dev,
 				     struct device_attribute *attr,
 				     const char *buf, size_t count)
 {
@@ -1755,25 +1774,25 @@
 	int error, calibrate_error;
 
 	/* 1. Suspend Scanning*/
-	error = cyapa_gen5_suspend_scanning(cyapa);
+	error = cyapa_pip_suspend_scanning(cyapa);
 	if (error)
 		return error;
 
 	/* 2. Do mutual capacitance fine calibrate. */
-	calibrate_error = cyapa_gen5_calibrate_pwcs(cyapa,
-				CYAPA_SENSING_MODE_MUTUAL_CAP_FINE);
+	calibrate_error = cyapa_pip_calibrate_pwcs(cyapa,
+				PIP_SENSING_MODE_MUTUAL_CAP_FINE);
 	if (calibrate_error)
 		goto resume_scanning;
 
 	/* 3. Do self capacitance calibrate. */
-	calibrate_error = cyapa_gen5_calibrate_pwcs(cyapa,
-				CYAPA_SENSING_MODE_SELF_CAP);
+	calibrate_error = cyapa_pip_calibrate_pwcs(cyapa,
+				PIP_SENSING_MODE_SELF_CAP);
 	if (calibrate_error)
 		goto resume_scanning;
 
 resume_scanning:
 	/* 4. Resume Scanning*/
-	error = cyapa_gen5_resume_scanning(cyapa);
+	error = cyapa_pip_resume_scanning(cyapa);
 	if (error || calibrate_error)
 		return error ? error : calibrate_error;
 
@@ -1856,7 +1875,7 @@
  * If the input value of @data_size is not 0, than means read the mutual or
  * self local PWC data. The @idac_max, @idac_min and @idac_ave are used to
  * return the max, min and average value of the mutual or self local PWC data.
- * Note, in order to raed mutual local PWC data, must read invoke this function
+ * Note, in order to read mutual local PWC data, must read invoke this function
  * to read the mutual global idac data firstly to set the correct Rx number
  * value, otherwise, the read mutual idac and PWC data may not correct.
  */
@@ -1864,7 +1883,7 @@
 		u8 cmd_code, u8 idac_data_type, int *data_size,
 		int *idac_max, int *idac_min, int *idac_ave)
 {
-	struct gen5_app_cmd_head *cmd_head;
+	struct pip_app_cmd_head *cmd_head;
 	u8 cmd[12];
 	u8 resp_data[256];
 	int resp_len;
@@ -1879,7 +1898,7 @@
 	int i;
 	int error;
 
-	if (cmd_code != GEN5_CMD_RETRIEVE_DATA_STRUCTURE ||
+	if (cmd_code != PIP_RETRIEVE_DATA_STRUCTURE ||
 		(idac_data_type != GEN5_RETRIEVE_MUTUAL_PWC_DATA &&
 		idac_data_type != GEN5_RETRIEVE_SELF_CAP_PWC_DATA) ||
 		!data_size || !idac_max || !idac_min || !idac_ave)
@@ -1935,10 +1954,10 @@
 	}
 
 	memset(cmd, 0, sizeof(cmd));
-	cmd_head = (struct gen5_app_cmd_head *)cmd;
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &cmd_head->addr);
+	cmd_head = (struct pip_app_cmd_head *)cmd;
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &cmd_head->length);
-	cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	cmd_head->cmd_code = cmd_code;
 	do {
 		read_elements = (256 - GEN5_RESP_DATA_STRUCTURE_OFFSET) /
@@ -1953,11 +1972,11 @@
 		error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 				cmd, sizeof(cmd),
 				resp_data, &resp_len,
-				500, cyapa_gen5_sort_tsg_pip_app_resp_data,
+				500, cyapa_sort_tsg_pip_app_resp_data,
 				true);
 		if (error || resp_len < GEN5_RESP_DATA_STRUCTURE_OFFSET ||
 				!VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
-				!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]) ||
+				!PIP_CMD_COMPLETE_SUCCESS(resp_data) ||
 				resp_data[6] != idac_data_type)
 			return (error < 0) ? error : -EAGAIN;
 		read_len = get_unaligned_le16(&resp_data[7]);
@@ -1997,7 +2016,7 @@
 				tmp_count < cyapa->aligned_electrodes_rx &&
 				read_global_idac) {
 				/*
-				 * The value gap betwen global and local mutual
+				 * The value gap between global and local mutual
 				 * idac data must bigger than 50%.
 				 * Normally, global value bigger than 50,
 				 * local values less than 10.
@@ -2061,7 +2080,7 @@
 
 	data_size = 0;
 	error = cyapa_gen5_read_idac_data(cyapa,
-		GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+		PIP_RETRIEVE_DATA_STRUCTURE,
 		GEN5_RETRIEVE_MUTUAL_PWC_DATA,
 		&data_size,
 		gidac_mutual_max, gidac_mutual_min, gidac_mutual_ave);
@@ -2069,7 +2088,7 @@
 		return error;
 
 	error = cyapa_gen5_read_idac_data(cyapa,
-		GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+		PIP_RETRIEVE_DATA_STRUCTURE,
 		GEN5_RETRIEVE_MUTUAL_PWC_DATA,
 		&data_size,
 		lidac_mutual_max, lidac_mutual_min, lidac_mutual_ave);
@@ -2088,7 +2107,7 @@
 
 	data_size = 0;
 	error = cyapa_gen5_read_idac_data(cyapa,
-		GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+		PIP_RETRIEVE_DATA_STRUCTURE,
 		GEN5_RETRIEVE_SELF_CAP_PWC_DATA,
 		&data_size,
 		lidac_self_max, lidac_self_min, lidac_self_ave);
@@ -2098,7 +2117,7 @@
 	*gidac_self_tx = *lidac_self_min;
 
 	error = cyapa_gen5_read_idac_data(cyapa,
-		GEN5_CMD_RETRIEVE_DATA_STRUCTURE,
+		PIP_RETRIEVE_DATA_STRUCTURE,
 		GEN5_RETRIEVE_SELF_CAP_PWC_DATA,
 		&data_size,
 		lidac_self_max, lidac_self_min, lidac_self_ave);
@@ -2107,27 +2126,27 @@
 
 static ssize_t cyapa_gen5_execute_panel_scan(struct cyapa *cyapa)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	u8 cmd[7];
 	u8 resp_data[6];
 	int resp_len;
 	int error;
 
 	memset(cmd, 0, sizeof(cmd));
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = GEN5_CMD_EXECUTE_PANEL_SCAN;
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+			500, cyapa_sort_tsg_pip_app_resp_data, true);
 	if (error || resp_len != sizeof(resp_data) ||
 			!VALID_CMD_RESP_HEADER(resp_data,
 				GEN5_CMD_EXECUTE_PANEL_SCAN) ||
-			!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error ? error : -EAGAIN;
 
 	return 0;
@@ -2138,7 +2157,7 @@
 		int *raw_data_max, int *raw_data_min, int *raw_data_ave,
 		u8 *buffer)
 {
-	struct gen5_app_cmd_head *app_cmd_head;
+	struct pip_app_cmd_head *app_cmd_head;
 	struct gen5_retrieve_panel_scan_data *panel_sacn_data;
 	u8 cmd[12];
 	u8 resp_data[256];  /* Max bytes can transfer one time. */
@@ -2166,10 +2185,10 @@
 	/* Assume max element size is 4 currently. */
 	read_elements = (256 - GEN5_RESP_DATA_STRUCTURE_OFFSET) / 4;
 	read_len = read_elements * 4;
-	app_cmd_head = (struct gen5_app_cmd_head *)cmd;
-	put_unaligned_le16(GEN5_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
+	app_cmd_head = (struct pip_app_cmd_head *)cmd;
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &app_cmd_head->addr);
 	put_unaligned_le16(sizeof(cmd) - 2, &app_cmd_head->length);
-	app_cmd_head->report_id = GEN5_APP_CMD_REPORT_ID;
+	app_cmd_head->report_id = PIP_APP_CMD_REPORT_ID;
 	app_cmd_head->cmd_code = cmd_code;
 	panel_sacn_data = (struct gen5_retrieve_panel_scan_data *)
 			app_cmd_head->parameter_data;
@@ -2183,10 +2202,10 @@
 		error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
 			cmd, sizeof(cmd),
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_app_resp_data, true);
+			500, cyapa_sort_tsg_pip_app_resp_data, true);
 		if (error || resp_len < GEN5_RESP_DATA_STRUCTURE_OFFSET ||
 				!VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
-				!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]) ||
+				!PIP_CMD_COMPLETE_SUCCESS(resp_data) ||
 				resp_data[6] != raw_data_type)
 			return error ? error : -EAGAIN;
 
@@ -2245,11 +2264,11 @@
 	int error, resume_error;
 	int size;
 
-	if (cyapa->state != CYAPA_STATE_GEN5_APP)
+	if (!cyapa_is_pip_app_mode(cyapa))
 		return -EBUSY;
 
 	/* 1. Suspend Scanning*/
-	error = cyapa_gen5_suspend_scanning(cyapa);
+	error = cyapa_pip_suspend_scanning(cyapa);
 	if (error)
 		return error;
 
@@ -2270,7 +2289,7 @@
 	if (error)
 		goto resume_scanning;
 
-	/* 4. Execuate panel scan. It must be executed before read data. */
+	/* 4. Execute panel scan. It must be executed before read data. */
 	error = cyapa_gen5_execute_panel_scan(cyapa);
 	if (error)
 		goto resume_scanning;
@@ -2343,7 +2362,7 @@
 
 resume_scanning:
 	/* 11. Resume Scanning*/
-	resume_error = cyapa_gen5_resume_scanning(cyapa);
+	resume_error = cyapa_pip_resume_scanning(cyapa);
 	if (resume_error || error)
 		return resume_error ? resume_error : error;
 
@@ -2364,7 +2383,7 @@
 	return size;
 }
 
-static bool cyapa_gen5_sort_system_info_data(struct cyapa *cyapa,
+bool cyapa_pip_sort_system_info_data(struct cyapa *cyapa,
 		u8 *buf, int len)
 {
 	/* Check the report id and command code */
@@ -2376,20 +2395,17 @@
 
 static int cyapa_gen5_bl_query_data(struct cyapa *cyapa)
 {
-	u8 bl_query_data_cmd[] = { 0x04, 0x00, 0x0b, 0x00, 0x40, 0x00,
-		0x01, 0x3c, 0x00, 0x00, 0xb0, 0x42, 0x17
-	};
-	u8 resp_data[GEN5_BL_READ_APP_INFO_RESP_LEN];
+	u8 resp_data[PIP_BL_APP_INFO_RESP_LENGTH];
 	int resp_len;
 	int error;
 
-	resp_len = GEN5_BL_READ_APP_INFO_RESP_LEN;
+	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
-			bl_query_data_cmd, sizeof(bl_query_data_cmd),
+			pip_bl_read_app_info, PIP_BL_READ_APP_INFO_CMD_LENGTH,
 			resp_data, &resp_len,
-			500, cyapa_gen5_sort_tsg_pip_bl_resp_data, false);
-	if (error || resp_len != GEN5_BL_READ_APP_INFO_RESP_LEN ||
-		!GEN5_CMD_COMPLETE_SUCCESS(resp_data[5]))
+			500, cyapa_sort_tsg_pip_bl_resp_data, false);
+	if (error || resp_len < PIP_BL_APP_INFO_RESP_LENGTH ||
+		!PIP_CMD_COMPLETE_SUCCESS(resp_data))
 		return error ? error : -EIO;
 
 	memcpy(&cyapa->product_id[0], &resp_data[8], 5);
@@ -2402,34 +2418,42 @@
 	cyapa->fw_maj_ver = resp_data[22];
 	cyapa->fw_min_ver = resp_data[23];
 
+	cyapa->platform_ver = (resp_data[26] >> PIP_BL_PLATFORM_VER_SHIFT) &
+			      PIP_BL_PLATFORM_VER_MASK;
+
 	return 0;
 }
 
 static int cyapa_gen5_get_query_data(struct cyapa *cyapa)
 {
-	u8 get_system_information[] = {
-		0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x02
-	};
-	u8 resp_data[71];
+	u8 resp_data[PIP_READ_SYS_INFO_RESP_LENGTH];
 	int resp_len;
 	u16 product_family;
 	int error;
 
 	resp_len = sizeof(resp_data);
 	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
-			get_system_information, sizeof(get_system_information),
+			pip_read_sys_info, PIP_READ_SYS_INFO_CMD_LENGTH,
 			resp_data, &resp_len,
-			2000, cyapa_gen5_sort_system_info_data, false);
+			2000, cyapa_pip_sort_system_info_data, false);
 	if (error || resp_len < sizeof(resp_data))
 		return error ? error : -EIO;
 
 	product_family = get_unaligned_le16(&resp_data[7]);
-	if ((product_family & GEN5_PRODUCT_FAMILY_MASK) !=
-		GEN5_PRODUCT_FAMILY_TRACKPAD)
+	if ((product_family & PIP_PRODUCT_FAMILY_MASK) !=
+		PIP_PRODUCT_FAMILY_TRACKPAD)
 		return -EINVAL;
 
-	cyapa->fw_maj_ver = resp_data[15];
-	cyapa->fw_min_ver = resp_data[16];
+	cyapa->platform_ver = (resp_data[49] >> PIP_BL_PLATFORM_VER_SHIFT) &
+			      PIP_BL_PLATFORM_VER_MASK;
+	if (cyapa->gen == CYAPA_GEN5 && cyapa->platform_ver < 2) {
+		/* Gen5 firmware that does not support proximity. */
+		cyapa->fw_maj_ver = resp_data[15];
+		cyapa->fw_min_ver = resp_data[16];
+	} else {
+		cyapa->fw_maj_ver = resp_data[9];
+		cyapa->fw_min_ver = resp_data[10];
+	}
 
 	cyapa->electrodes_x = resp_data[52];
 	cyapa->electrodes_y = resp_data[53];
@@ -2472,9 +2496,9 @@
 
 	switch (cyapa->state) {
 	case CYAPA_STATE_GEN5_BL:
-		error = cyapa_gen5_bl_exit(cyapa);
+		error = cyapa_pip_bl_exit(cyapa);
 		if (error) {
-			/* Rry to update trackpad product information. */
+			/* Try to update trackpad product information. */
 			cyapa_gen5_bl_query_data(cyapa);
 			goto out;
 		}
@@ -2486,14 +2510,23 @@
 		 * If trackpad device in deep sleep mode,
 		 * the app command will fail.
 		 * So always try to reset trackpad device to full active when
-		 * the device state is requeried.
+		 * the device state is required.
 		 */
 		error = cyapa_gen5_set_power_mode(cyapa,
-				PWR_MODE_FULL_ACTIVE, 0);
+				PWR_MODE_FULL_ACTIVE, 0, false);
 		if (error)
 			dev_warn(dev, "%s: failed to set power active mode.\n",
 				__func__);
 
+		/* By default, the trackpad proximity function is enabled. */
+		if (cyapa->platform_ver >= 2) {
+			error = cyapa_pip_set_proximity(cyapa, true);
+			if (error)
+				dev_warn(dev,
+					"%s: failed to enable proximity.\n",
+					__func__);
+		}
+
 		/* Get trackpad product information. */
 		error = cyapa_gen5_get_query_data(cyapa);
 		if (error)
@@ -2518,14 +2551,14 @@
  * Return false, do not continue process
  * Return true, continue process.
  */
-static bool cyapa_gen5_irq_cmd_handler(struct cyapa *cyapa)
+bool cyapa_pip_irq_cmd_handler(struct cyapa *cyapa)
 {
-	struct cyapa_gen5_cmd_states *gen5_pip = &cyapa->cmd_states.gen5;
+	struct cyapa_pip_cmd_states *pip = &cyapa->cmd_states.pip;
 	int length;
 
-	if (atomic_read(&gen5_pip->cmd_issued)) {
+	if (atomic_read(&pip->cmd_issued)) {
 		/* Polling command response data. */
-		if (gen5_pip->is_irq_mode == false)
+		if (pip->is_irq_mode == false)
 			return false;
 
 		/*
@@ -2533,59 +2566,64 @@
 		 * these output data may caused by user put finger on
 		 * trackpad when host waiting the command response.
 		 */
-		cyapa_i2c_pip_read(cyapa, gen5_pip->irq_cmd_buf,
-			GEN5_RESP_LENGTH_SIZE);
-		length = get_unaligned_le16(gen5_pip->irq_cmd_buf);
-		length = (length <= GEN5_RESP_LENGTH_SIZE) ?
-				GEN5_RESP_LENGTH_SIZE : length;
-		if (length > GEN5_RESP_LENGTH_SIZE)
+		cyapa_i2c_pip_read(cyapa, pip->irq_cmd_buf,
+			PIP_RESP_LENGTH_SIZE);
+		length = get_unaligned_le16(pip->irq_cmd_buf);
+		length = (length <= PIP_RESP_LENGTH_SIZE) ?
+				PIP_RESP_LENGTH_SIZE : length;
+		if (length > PIP_RESP_LENGTH_SIZE)
 			cyapa_i2c_pip_read(cyapa,
-				gen5_pip->irq_cmd_buf, length);
-
-		if (!(gen5_pip->resp_sort_func &&
-			gen5_pip->resp_sort_func(cyapa,
-				gen5_pip->irq_cmd_buf, length))) {
+				pip->irq_cmd_buf, length);
+		if (!(pip->resp_sort_func &&
+			pip->resp_sort_func(cyapa,
+				pip->irq_cmd_buf, length))) {
 			/*
-			 * Work around the Gen5 V1 firmware
-			 * that does not assert interrupt signalling
-			 * that command response is ready if user
-			 * keeps touching the trackpad while command
-			 * is sent to the device.
+			 * Cover the Gen5 V1 firmware issue.
+			 * The issue is no interrupt would be asserted from
+			 * trackpad device to host for the command response
+			 * ready event. Because when there was a finger touch
+			 * on trackpad device, and the firmware output queue
+			 * won't be empty (always with touch report data), so
+			 * the interrupt signal won't be asserted again until
+			 * the output queue was previous emptied.
+			 * This issue would happen in the scenario that
+			 * user always has his/her fingers touched on the
+			 * trackpad device during system booting/rebooting.
 			 */
 			length = 0;
-			if (gen5_pip->resp_len)
-				length = *gen5_pip->resp_len;
+			if (pip->resp_len)
+				length = *pip->resp_len;
 			cyapa_empty_pip_output_data(cyapa,
-					gen5_pip->resp_data,
+					pip->resp_data,
 					&length,
-					gen5_pip->resp_sort_func);
-			if (gen5_pip->resp_len && length != 0) {
-				*gen5_pip->resp_len = length;
-				atomic_dec(&gen5_pip->cmd_issued);
-				complete(&gen5_pip->cmd_ready);
+					pip->resp_sort_func);
+			if (pip->resp_len && length != 0) {
+				*pip->resp_len = length;
+				atomic_dec(&pip->cmd_issued);
+				complete(&pip->cmd_ready);
 			}
 			return false;
 		}
 
-		if (gen5_pip->resp_data && gen5_pip->resp_len) {
-			*gen5_pip->resp_len = (*gen5_pip->resp_len < length) ?
-				*gen5_pip->resp_len : length;
-			memcpy(gen5_pip->resp_data, gen5_pip->irq_cmd_buf,
-				*gen5_pip->resp_len);
+		if (pip->resp_data && pip->resp_len) {
+			*pip->resp_len = (*pip->resp_len < length) ?
+				*pip->resp_len : length;
+			memcpy(pip->resp_data, pip->irq_cmd_buf,
+				*pip->resp_len);
 		}
-		atomic_dec(&gen5_pip->cmd_issued);
-		complete(&gen5_pip->cmd_ready);
+		atomic_dec(&pip->cmd_issued);
+		complete(&pip->cmd_ready);
 		return false;
 	}
 
 	return true;
 }
 
-static void cyapa_gen5_report_buttons(struct cyapa *cyapa,
-		const struct cyapa_gen5_report_data *report_data)
+static void cyapa_pip_report_buttons(struct cyapa *cyapa,
+		const struct cyapa_pip_report_data *report_data)
 {
 	struct input_dev *input = cyapa->input;
-	u8 buttons = report_data->report_head[GEN5_BUTTONS_OFFSET];
+	u8 buttons = report_data->report_head[PIP_BUTTONS_OFFSET];
 
 	buttons = (buttons << CAPABILITY_BTN_SHIFT) & CAPABILITY_BTN_MASK;
 
@@ -2605,12 +2643,23 @@
 	input_sync(input);
 }
 
-static void cyapa_gen5_report_slot_data(struct cyapa *cyapa,
-		const struct cyapa_gen5_touch_record *touch)
+static void cyapa_pip_report_proximity(struct cyapa *cyapa,
+		const struct cyapa_pip_report_data *report_data)
 {
 	struct input_dev *input = cyapa->input;
-	u8 event_id = GEN5_GET_EVENT_ID(touch->touch_tip_event_id);
-	int slot = GEN5_GET_TOUCH_ID(touch->touch_tip_event_id);
+	u8 distance = report_data->report_head[PIP_PROXIMITY_DISTANCE_OFFSET] &
+			PIP_PROXIMITY_DISTANCE_MASK;
+
+	input_report_abs(input, ABS_DISTANCE, distance);
+	input_sync(input);
+}
+
+static void cyapa_pip_report_slot_data(struct cyapa *cyapa,
+		const struct cyapa_pip_touch_record *touch)
+{
+	struct input_dev *input = cyapa->input;
+	u8 event_id = PIP_GET_EVENT_ID(touch->touch_tip_event_id);
+	int slot = PIP_GET_TOUCH_ID(touch->touch_tip_event_id);
 	int x, y;
 
 	if (event_id == RECORD_EVENT_LIFTOFF)
@@ -2621,11 +2670,12 @@
 	x = (touch->x_hi << 8) | touch->x_lo;
 	if (cyapa->x_origin)
 		x = cyapa->max_abs_x - x;
-	input_report_abs(input, ABS_MT_POSITION_X, x);
 	y = (touch->y_hi << 8) | touch->y_lo;
 	if (cyapa->y_origin)
 		y = cyapa->max_abs_y - y;
+	input_report_abs(input, ABS_MT_POSITION_X, x);
 	input_report_abs(input, ABS_MT_POSITION_Y, y);
+	input_report_abs(input, ABS_DISTANCE, 0);
 	input_report_abs(input, ABS_MT_PRESSURE,
 		touch->z);
 	input_report_abs(input, ABS_MT_TOUCH_MAJOR,
@@ -2642,50 +2692,49 @@
 		touch->orientation);
 }
 
-static void cyapa_gen5_report_touches(struct cyapa *cyapa,
-		const struct cyapa_gen5_report_data *report_data)
+static void cyapa_pip_report_touches(struct cyapa *cyapa,
+		const struct cyapa_pip_report_data *report_data)
 {
 	struct input_dev *input = cyapa->input;
 	unsigned int touch_num;
 	int i;
 
-	touch_num = report_data->report_head[GEN5_NUMBER_OF_TOUCH_OFFSET] &
-			GEN5_NUMBER_OF_TOUCH_MASK;
+	touch_num = report_data->report_head[PIP_NUMBER_OF_TOUCH_OFFSET] &
+			PIP_NUMBER_OF_TOUCH_MASK;
 
 	for (i = 0; i < touch_num; i++)
-		cyapa_gen5_report_slot_data(cyapa,
+		cyapa_pip_report_slot_data(cyapa,
 			&report_data->touch_records[i]);
 
 	input_mt_sync_frame(input);
 	input_sync(input);
 }
 
-static int cyapa_gen5_irq_handler(struct cyapa *cyapa)
+int cyapa_pip_irq_handler(struct cyapa *cyapa)
 {
 	struct device *dev = &cyapa->client->dev;
-	struct cyapa_gen5_report_data report_data;
-	int ret;
-	u8 report_id;
+	struct cyapa_pip_report_data report_data;
 	unsigned int report_len;
+	u8 report_id;
+	int ret;
 
-	if (cyapa->gen != CYAPA_GEN5 ||
-		cyapa->state != CYAPA_STATE_GEN5_APP) {
+	if (!cyapa_is_pip_app_mode(cyapa)) {
 		dev_err(dev, "invalid device state, gen=%d, state=0x%02x\n",
 			cyapa->gen, cyapa->state);
 		return -EINVAL;
 	}
 
 	ret = cyapa_i2c_pip_read(cyapa, (u8 *)&report_data,
-			GEN5_RESP_LENGTH_SIZE);
-	if (ret != GEN5_RESP_LENGTH_SIZE) {
+			PIP_RESP_LENGTH_SIZE);
+	if (ret != PIP_RESP_LENGTH_SIZE) {
 		dev_err(dev, "failed to read length bytes, (%d)\n", ret);
 		return -EINVAL;
 	}
 
 	report_len = get_unaligned_le16(
-			&report_data.report_head[GEN5_RESP_LENGTH_OFFSET]);
-	if (report_len < GEN5_RESP_LENGTH_SIZE) {
-		/* Invliad length or internal reset happened. */
+			&report_data.report_head[PIP_RESP_LENGTH_OFFSET]);
+	if (report_len < PIP_RESP_LENGTH_SIZE) {
+		/* Invalid length or internal reset happened. */
 		dev_err(dev, "invalid report_len=%d. bytes: %02x %02x\n",
 			report_len, report_data.report_head[0],
 			report_data.report_head[1]);
@@ -2693,7 +2742,7 @@
 	}
 
 	/* Idle, no data for report. */
-	if (report_len == GEN5_RESP_LENGTH_SIZE)
+	if (report_len == PIP_RESP_LENGTH_SIZE)
 		return 0;
 
 	ret = cyapa_i2c_pip_read(cyapa, (u8 *)&report_data, report_len);
@@ -2703,70 +2752,92 @@
 		return -EINVAL;
 	}
 
-	report_id = report_data.report_head[GEN5_RESP_REPORT_ID_OFFSET];
-	if (report_id == GEN5_WAKEUP_EVENT_REPORT_ID &&
-			report_len == GEN5_WAKEUP_EVENT_SIZE) {
+	report_id = report_data.report_head[PIP_RESP_REPORT_ID_OFFSET];
+	if (report_id == PIP_WAKEUP_EVENT_REPORT_ID &&
+			report_len == PIP_WAKEUP_EVENT_SIZE) {
 		/*
 		 * Device wake event from deep sleep mode for touch.
 		 * This interrupt event is used to wake system up.
+		 *
+		 * Note:
+		 * It will introduce about 20~40 ms additional delay
+		 * time in receiving for first valid touch report data.
+		 * The time is used to execute device runtime resume
+		 * process.
 		 */
+		pm_runtime_get_sync(dev);
+		pm_runtime_mark_last_busy(dev);
+		pm_runtime_put_sync_autosuspend(dev);
 		return 0;
-	} else if (report_id != GEN5_TOUCH_REPORT_ID &&
-			report_id != GEN5_BTN_REPORT_ID &&
+	} else if (report_id != PIP_TOUCH_REPORT_ID &&
+			report_id != PIP_BTN_REPORT_ID &&
 			report_id != GEN5_OLD_PUSH_BTN_REPORT_ID &&
-			report_id != GEN5_PUSH_BTN_REPORT_ID) {
+			report_id != PIP_PUSH_BTN_REPORT_ID &&
+			report_id != PIP_PROXIMITY_REPORT_ID) {
 		/* Running in BL mode or unknown response data read. */
 		dev_err(dev, "invalid report_id=0x%02x\n", report_id);
 		return -EINVAL;
 	}
 
-	if (report_id == GEN5_TOUCH_REPORT_ID &&
-		(report_len < GEN5_TOUCH_REPORT_HEAD_SIZE ||
-			report_len > GEN5_TOUCH_REPORT_MAX_SIZE)) {
+	if (report_id == PIP_TOUCH_REPORT_ID &&
+		(report_len < PIP_TOUCH_REPORT_HEAD_SIZE ||
+			report_len > PIP_TOUCH_REPORT_MAX_SIZE)) {
 		/* Invalid report data length for finger packet. */
 		dev_err(dev, "invalid touch packet length=%d\n", report_len);
 		return 0;
 	}
 
-	if ((report_id == GEN5_BTN_REPORT_ID ||
+	if ((report_id == PIP_BTN_REPORT_ID ||
 			report_id == GEN5_OLD_PUSH_BTN_REPORT_ID ||
-			report_id == GEN5_PUSH_BTN_REPORT_ID) &&
-		(report_len < GEN5_BTN_REPORT_HEAD_SIZE ||
-			report_len > GEN5_BTN_REPORT_MAX_SIZE)) {
+			report_id == PIP_PUSH_BTN_REPORT_ID) &&
+		(report_len < PIP_BTN_REPORT_HEAD_SIZE ||
+			report_len > PIP_BTN_REPORT_MAX_SIZE)) {
 		/* Invalid report data length of button packet. */
 		dev_err(dev, "invalid button packet length=%d\n", report_len);
 		return 0;
 	}
 
-	if (report_id == GEN5_TOUCH_REPORT_ID)
-		cyapa_gen5_report_touches(cyapa, &report_data);
+	if (report_id == PIP_PROXIMITY_REPORT_ID &&
+			report_len != PIP_PROXIMITY_REPORT_SIZE) {
+		/* Invalid report data length of proximity packet. */
+		dev_err(dev, "invalid proximity data, length=%d\n", report_len);
+		return 0;
+	}
+
+	if (report_id == PIP_TOUCH_REPORT_ID)
+		cyapa_pip_report_touches(cyapa, &report_data);
+	else if (report_id == PIP_PROXIMITY_REPORT_ID)
+		cyapa_pip_report_proximity(cyapa, &report_data);
 	else
-		cyapa_gen5_report_buttons(cyapa, &report_data);
+		cyapa_pip_report_buttons(cyapa, &report_data);
 
 	return 0;
 }
 
-static int cyapa_gen5_bl_activate(struct cyapa *cyapa) { return 0; }
-static int cyapa_gen5_bl_deactivate(struct cyapa *cyapa) { return 0; }
+int cyapa_pip_bl_activate(struct cyapa *cyapa) { return 0; }
+int cyapa_pip_bl_deactivate(struct cyapa *cyapa) { return 0; }
+
 
 const struct cyapa_dev_ops cyapa_gen5_ops = {
-	.check_fw = cyapa_gen5_check_fw,
-	.bl_enter = cyapa_gen5_bl_enter,
-	.bl_initiate = cyapa_gen5_bl_initiate,
-	.update_fw = cyapa_gen5_do_fw_update,
-	.bl_activate = cyapa_gen5_bl_activate,
-	.bl_deactivate = cyapa_gen5_bl_deactivate,
+	.check_fw = cyapa_pip_check_fw,
+	.bl_enter = cyapa_pip_bl_enter,
+	.bl_initiate = cyapa_pip_bl_initiate,
+	.update_fw = cyapa_pip_do_fw_update,
+	.bl_activate = cyapa_pip_bl_activate,
+	.bl_deactivate = cyapa_pip_bl_deactivate,
 
 	.show_baseline = cyapa_gen5_show_baseline,
-	.calibrate_store = cyapa_gen5_do_calibrate,
+	.calibrate_store = cyapa_pip_do_calibrate,
 
-	.initialize = cyapa_gen5_initialize,
+	.initialize = cyapa_pip_cmd_state_initialize,
 
 	.state_parse = cyapa_gen5_state_parse,
 	.operational_check = cyapa_gen5_do_operational_check,
 
-	.irq_handler = cyapa_gen5_irq_handler,
-	.irq_cmd_handler = cyapa_gen5_irq_cmd_handler,
+	.irq_handler = cyapa_pip_irq_handler,
+	.irq_cmd_handler = cyapa_pip_irq_cmd_handler,
 	.sort_empty_output_data = cyapa_empty_pip_output_data,
 	.set_power_mode = cyapa_gen5_set_power_mode,
+
+	.set_proximity = cyapa_pip_set_proximity,
 };
diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
new file mode 100644
index 0000000..5f19107
--- /dev/null
+++ b/drivers/input/mouse/cyapa_gen6.c
@@ -0,0 +1,749 @@
+/*
+ * Cypress APA trackpad with I2C interface
+ *
+ * Author: Dudley Du <dudl@cypress.com>
+ *
+ * Copyright (C) 2015 Cypress Semiconductor, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+#include <linux/crc-itu-t.h>
+#include "cyapa.h"
+
+
+#define GEN6_ENABLE_CMD_IRQ	0x41
+#define GEN6_DISABLE_CMD_IRQ	0x42
+#define GEN6_ENABLE_DEV_IRQ	0x43
+#define GEN6_DISABLE_DEV_IRQ	0x44
+
+#define GEN6_POWER_MODE_ACTIVE		0x01
+#define GEN6_POWER_MODE_LP_MODE1	0x02
+#define GEN6_POWER_MODE_LP_MODE2	0x03
+#define GEN6_POWER_MODE_BTN_ONLY	0x04
+
+#define GEN6_SET_POWER_MODE_INTERVAL	0x47
+#define GEN6_GET_POWER_MODE_INTERVAL	0x48
+
+#define GEN6_MAX_RX_NUM 14
+#define GEN6_RETRIEVE_DATA_ID_RX_ATTENURATOR_IDAC	0x00
+#define GEN6_RETRIEVE_DATA_ID_ATTENURATOR_TRIM		0x12
+
+
+struct pip_app_cmd_head {
+	__le16 addr;
+	__le16 length;
+	u8 report_id;
+	u8 resv;  /* Reserved, must be 0 */
+	u8 cmd_code;  /* bit7: resv, set to 0; bit6~0: command code.*/
+} __packed;
+
+struct pip_app_resp_head {
+	__le16 length;
+	u8 report_id;
+	u8 resv;  /* Reserved, must be 0 */
+	u8 cmd_code;  /* bit7: TGL; bit6~0: command code.*/
+	/*
+	 * The value of data_status can be the first byte of data or
+	 * the command status or the unsupported command code depending on the
+	 * requested command code.
+	*/
+	u8 data_status;
+} __packed;
+
+struct pip_fixed_info {
+	u8 silicon_id_high;
+	u8 silicon_id_low;
+	u8 family_id;
+};
+
+static u8 pip_get_bl_info[] = {
+	0x04, 0x00, 0x0B, 0x00, 0x40, 0x00, 0x01, 0x38,
+	0x00, 0x00, 0x70, 0x9E, 0x17
+};
+
+static bool cyapa_sort_pip_hid_descriptor_data(struct cyapa *cyapa,
+		u8 *buf, int len)
+{
+	if (len != PIP_HID_DESCRIPTOR_SIZE)
+		return false;
+
+	if (buf[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_APP_REPORT_ID ||
+		buf[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_BL_REPORT_ID)
+		return true;
+
+	return false;
+}
+
+static int cyapa_get_pip_fixed_info(struct cyapa *cyapa,
+		struct pip_fixed_info *pip_info, bool is_bootloader)
+{
+	u8 resp_data[PIP_READ_SYS_INFO_RESP_LENGTH];
+	int resp_len;
+	u16 product_family;
+	int error;
+
+	if (is_bootloader) {
+		/* Read Bootloader Information to determine Gen5 or Gen6. */
+		resp_len = sizeof(resp_data);
+		error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+				pip_get_bl_info, sizeof(pip_get_bl_info),
+				resp_data, &resp_len,
+				2000, cyapa_sort_tsg_pip_bl_resp_data,
+				false);
+		if (error || resp_len < PIP_BL_GET_INFO_RESP_LENGTH)
+			return error ? error : -EIO;
+
+		pip_info->family_id = resp_data[8];
+		pip_info->silicon_id_low = resp_data[10];
+		pip_info->silicon_id_high = resp_data[11];
+
+		return 0;
+	}
+
+	/* Get App System Information to determine Gen5 or Gen6. */
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			pip_read_sys_info, PIP_READ_SYS_INFO_CMD_LENGTH,
+			resp_data, &resp_len,
+			2000, cyapa_pip_sort_system_info_data, false);
+	if (error || resp_len < PIP_READ_SYS_INFO_RESP_LENGTH)
+		return error ? error : -EIO;
+
+	product_family = get_unaligned_le16(&resp_data[7]);
+	if ((product_family & PIP_PRODUCT_FAMILY_MASK) !=
+		PIP_PRODUCT_FAMILY_TRACKPAD)
+		return -EINVAL;
+
+	pip_info->family_id = resp_data[19];
+	pip_info->silicon_id_low = resp_data[21];
+	pip_info->silicon_id_high = resp_data[22];
+
+	return 0;
+
+}
+
+int cyapa_pip_state_parse(struct cyapa *cyapa, u8 *reg_data, int len)
+{
+	u8 cmd[] = { 0x01, 0x00};
+	struct pip_fixed_info pip_info;
+	u8 resp_data[PIP_HID_DESCRIPTOR_SIZE];
+	int resp_len;
+	bool is_bootloader;
+	int error;
+
+	cyapa->state = CYAPA_STATE_NO_DEVICE;
+
+	/* Try to wake from it deep sleep state if it is. */
+	cyapa_pip_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
+
+	/* Empty the buffer queue to get fresh data with later commands. */
+	cyapa_empty_pip_output_data(cyapa, NULL, NULL, NULL);
+
+	/*
+	 * Read description info from trackpad device to determine running in
+	 * APP mode or Bootloader mode.
+	 */
+	resp_len = PIP_HID_DESCRIPTOR_SIZE;
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			300,
+			cyapa_sort_pip_hid_descriptor_data,
+			false);
+	if (error)
+		return error;
+
+	if (resp_data[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_BL_REPORT_ID)
+		is_bootloader = true;
+	else if (resp_data[PIP_RESP_REPORT_ID_OFFSET] == PIP_HID_APP_REPORT_ID)
+		is_bootloader = false;
+	else
+		return -EAGAIN;
+
+	/* Get PIP fixed information to determine Gen5 or Gen6. */
+	memset(&pip_info, 0, sizeof(struct pip_fixed_info));
+	error = cyapa_get_pip_fixed_info(cyapa, &pip_info, is_bootloader);
+	if (error)
+		return error;
+
+	if (pip_info.family_id == 0x9B && pip_info.silicon_id_high == 0x0B) {
+		cyapa->gen = CYAPA_GEN6;
+		cyapa->state = is_bootloader ? CYAPA_STATE_GEN6_BL
+					     : CYAPA_STATE_GEN6_APP;
+	} else if (pip_info.family_id == 0x91 &&
+		   pip_info.silicon_id_high == 0x02) {
+		cyapa->gen = CYAPA_GEN5;
+		cyapa->state = is_bootloader ? CYAPA_STATE_GEN5_BL
+					     : CYAPA_STATE_GEN5_APP;
+	}
+
+	return 0;
+}
+
+static int cyapa_gen6_read_sys_info(struct cyapa *cyapa)
+{
+	u8 resp_data[PIP_READ_SYS_INFO_RESP_LENGTH];
+	int resp_len;
+	u16 product_family;
+	u8 rotat_align;
+	int error;
+
+	/* Get App System Information to determine Gen5 or Gen6. */
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			pip_read_sys_info, PIP_READ_SYS_INFO_CMD_LENGTH,
+			resp_data, &resp_len,
+			2000, cyapa_pip_sort_system_info_data, false);
+	if (error || resp_len < sizeof(resp_data))
+		return error ? error : -EIO;
+
+	product_family = get_unaligned_le16(&resp_data[7]);
+	if ((product_family & PIP_PRODUCT_FAMILY_MASK) !=
+		PIP_PRODUCT_FAMILY_TRACKPAD)
+		return -EINVAL;
+
+	cyapa->platform_ver = (resp_data[67] >> PIP_BL_PLATFORM_VER_SHIFT) &
+			      PIP_BL_PLATFORM_VER_MASK;
+	cyapa->fw_maj_ver = resp_data[9];
+	cyapa->fw_min_ver = resp_data[10];
+
+	cyapa->electrodes_x = resp_data[33];
+	cyapa->electrodes_y = resp_data[34];
+
+	cyapa->physical_size_x =  get_unaligned_le16(&resp_data[35]) / 100;
+	cyapa->physical_size_y = get_unaligned_le16(&resp_data[37]) / 100;
+
+	cyapa->max_abs_x = get_unaligned_le16(&resp_data[39]);
+	cyapa->max_abs_y = get_unaligned_le16(&resp_data[41]);
+
+	cyapa->max_z = get_unaligned_le16(&resp_data[43]);
+
+	cyapa->x_origin = resp_data[45] & 0x01;
+	cyapa->y_origin = resp_data[46] & 0x01;
+
+	cyapa->btn_capability = (resp_data[70] << 3) & CAPABILITY_BTN_MASK;
+
+	memcpy(&cyapa->product_id[0], &resp_data[51], 5);
+	cyapa->product_id[5] = '-';
+	memcpy(&cyapa->product_id[6], &resp_data[56], 6);
+	cyapa->product_id[12] = '-';
+	memcpy(&cyapa->product_id[13], &resp_data[62], 2);
+	cyapa->product_id[15] = '\0';
+
+	rotat_align = resp_data[68];
+	if (rotat_align) {
+		cyapa->electrodes_rx = cyapa->electrodes_y;
+		cyapa->electrodes_rx = cyapa->electrodes_y;
+	} else {
+		cyapa->electrodes_rx = cyapa->electrodes_x;
+		cyapa->electrodes_rx = cyapa->electrodes_y;
+	}
+	cyapa->aligned_electrodes_rx = (cyapa->electrodes_rx + 3) & ~3u;
+
+	if (!cyapa->electrodes_x || !cyapa->electrodes_y ||
+		!cyapa->physical_size_x || !cyapa->physical_size_y ||
+		!cyapa->max_abs_x || !cyapa->max_abs_y || !cyapa->max_z)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int cyapa_gen6_bl_read_app_info(struct cyapa *cyapa)
+{
+	u8 resp_data[PIP_BL_APP_INFO_RESP_LENGTH];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			pip_bl_read_app_info, PIP_BL_READ_APP_INFO_CMD_LENGTH,
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_bl_resp_data, false);
+	if (error || resp_len < PIP_BL_APP_INFO_RESP_LENGTH ||
+		!PIP_CMD_COMPLETE_SUCCESS(resp_data))
+		return error ? error : -EIO;
+
+	cyapa->fw_maj_ver = resp_data[8];
+	cyapa->fw_min_ver = resp_data[9];
+
+	cyapa->platform_ver = (resp_data[12] >> PIP_BL_PLATFORM_VER_SHIFT) &
+			      PIP_BL_PLATFORM_VER_MASK;
+
+	memcpy(&cyapa->product_id[0], &resp_data[13], 5);
+	cyapa->product_id[5] = '-';
+	memcpy(&cyapa->product_id[6], &resp_data[18], 6);
+	cyapa->product_id[12] = '-';
+	memcpy(&cyapa->product_id[13], &resp_data[24], 2);
+	cyapa->product_id[15] = '\0';
+
+	return 0;
+
+}
+
+static int cyapa_gen6_config_dev_irq(struct cyapa *cyapa, u8 cmd_code)
+{
+	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, cmd_code };
+	u8 resp_data[6];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error || !VALID_CMD_RESP_HEADER(resp_data, cmd_code) ||
+			!PIP_CMD_COMPLETE_SUCCESS(resp_data)
+			)
+		return error < 0 ? error : -EINVAL;
+
+	return 0;
+}
+
+static int cyapa_gen6_set_proximity(struct cyapa *cyapa, bool enable)
+{
+	int error;
+
+	cyapa_gen6_config_dev_irq(cyapa, GEN6_DISABLE_CMD_IRQ);
+	error = cyapa_pip_set_proximity(cyapa, enable);
+	cyapa_gen6_config_dev_irq(cyapa, GEN6_ENABLE_CMD_IRQ);
+
+	return error;
+}
+
+static int cyapa_gen6_change_power_state(struct cyapa *cyapa, u8 power_mode)
+{
+	u8 cmd[] = { 0x04, 0x00, 0x06, 0x00, 0x2f, 0x00, 0x46, power_mode };
+	u8 resp_data[6];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error || !VALID_CMD_RESP_HEADER(resp_data, 0x46))
+		return error < 0 ? error : -EINVAL;
+
+	/* New power state applied in device not match the set power state. */
+	if (resp_data[5] != power_mode)
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int cyapa_gen6_set_interval_setting(struct cyapa *cyapa,
+		struct gen6_interval_setting *interval_setting)
+{
+	struct gen6_set_interval_cmd {
+		__le16 addr;
+		__le16 length;
+		u8 report_id;
+		u8 rsvd;  /* Reserved, must be 0 */
+		u8 cmd_code;
+		__le16 active_interval;
+		__le16 lp1_interval;
+		__le16 lp2_interval;
+	} __packed set_interval_cmd;
+	u8 resp_data[11];
+	int resp_len;
+	int error;
+
+	memset(&set_interval_cmd, 0, sizeof(set_interval_cmd));
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &set_interval_cmd.addr);
+	put_unaligned_le16(sizeof(set_interval_cmd) - 2,
+			   &set_interval_cmd.length);
+	set_interval_cmd.report_id = PIP_APP_CMD_REPORT_ID;
+	set_interval_cmd.cmd_code = GEN6_SET_POWER_MODE_INTERVAL;
+	put_unaligned_le16(interval_setting->active_interval,
+			   &set_interval_cmd.active_interval);
+	put_unaligned_le16(interval_setting->lp1_interval,
+			   &set_interval_cmd.lp1_interval);
+	put_unaligned_le16(interval_setting->lp2_interval,
+			   &set_interval_cmd.lp2_interval);
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+			(u8 *)&set_interval_cmd, sizeof(set_interval_cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error ||
+		!VALID_CMD_RESP_HEADER(resp_data, GEN6_SET_POWER_MODE_INTERVAL))
+		return error < 0 ? error : -EINVAL;
+
+	/* Get the real set intervals from response. */
+	interval_setting->active_interval = get_unaligned_le16(&resp_data[5]);
+	interval_setting->lp1_interval = get_unaligned_le16(&resp_data[7]);
+	interval_setting->lp2_interval = get_unaligned_le16(&resp_data[9]);
+
+	return 0;
+}
+
+static int cyapa_gen6_get_interval_setting(struct cyapa *cyapa,
+		struct gen6_interval_setting *interval_setting)
+{
+	u8 cmd[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00,
+		     GEN6_GET_POWER_MODE_INTERVAL };
+	u8 resp_data[11];
+	int resp_len;
+	int error;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa, cmd, sizeof(cmd),
+			resp_data, &resp_len,
+			500, cyapa_sort_tsg_pip_app_resp_data, false);
+	if (error ||
+		!VALID_CMD_RESP_HEADER(resp_data, GEN6_GET_POWER_MODE_INTERVAL))
+		return error < 0 ? error : -EINVAL;
+
+	interval_setting->active_interval = get_unaligned_le16(&resp_data[5]);
+	interval_setting->lp1_interval = get_unaligned_le16(&resp_data[7]);
+	interval_setting->lp2_interval = get_unaligned_le16(&resp_data[9]);
+
+	return 0;
+}
+
+static int cyapa_gen6_deep_sleep(struct cyapa *cyapa, u8 state)
+{
+	u8 ping[] = { 0x04, 0x00, 0x05, 0x00, 0x2f, 0x00, 0x00 };
+
+	if (state == PIP_DEEP_SLEEP_STATE_ON)
+		/*
+		 * Send ping command to notify device prepare for wake up
+		 * when it's in deep sleep mode. At this time, device will
+		 * response nothing except an I2C NAK.
+		 */
+		cyapa_i2c_pip_write(cyapa, ping, sizeof(ping));
+
+	return cyapa_pip_deep_sleep(cyapa, state);
+}
+
+static int cyapa_gen6_set_power_mode(struct cyapa *cyapa,
+		u8 power_mode, u16 sleep_time, bool is_suspend)
+{
+	struct device *dev = &cyapa->client->dev;
+	struct gen6_interval_setting *interval_setting =
+			&cyapa->gen6_interval_setting;
+	u8 lp_mode;
+	int error;
+
+	if (cyapa->state != CYAPA_STATE_GEN6_APP)
+		return 0;
+
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == UNINIT_PWR_MODE) {
+		/*
+		 * Assume TP in deep sleep mode when driver is loaded,
+		 * avoid driver unload and reload command IO issue caused by TP
+		 * has been set into deep sleep mode when unloading.
+		 */
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+	}
+
+	if (PIP_DEV_UNINIT_SLEEP_TIME(cyapa) &&
+		PIP_DEV_GET_PWR_STATE(cyapa) != PWR_MODE_OFF)
+		PIP_DEV_SET_SLEEP_TIME(cyapa, UNINIT_SLEEP_TIME);
+
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == power_mode) {
+		if (power_mode == PWR_MODE_OFF ||
+			power_mode == PWR_MODE_FULL_ACTIVE ||
+			power_mode == PWR_MODE_BTN_ONLY ||
+			PIP_DEV_GET_SLEEP_TIME(cyapa) == sleep_time) {
+			/* Has in correct power mode state, early return. */
+			return 0;
+		}
+	}
+
+	if (power_mode == PWR_MODE_OFF) {
+		cyapa_gen6_config_dev_irq(cyapa, GEN6_DISABLE_CMD_IRQ);
+
+		error = cyapa_gen6_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_OFF);
+		if (error) {
+			dev_err(dev, "enter deep sleep fail: %d\n", error);
+			return error;
+		}
+
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_OFF);
+		return 0;
+	}
+
+	/*
+	 * When trackpad in power off mode, it cannot change to other power
+	 * state directly, must be wake up from sleep firstly, then
+	 * continue to do next power sate change.
+	 */
+	if (PIP_DEV_GET_PWR_STATE(cyapa) == PWR_MODE_OFF) {
+		error = cyapa_gen6_deep_sleep(cyapa, PIP_DEEP_SLEEP_STATE_ON);
+		if (error) {
+			dev_err(dev, "deep sleep wake fail: %d\n", error);
+			return error;
+		}
+	}
+
+	/*
+	 * Disable device assert interrupts for command response to avoid
+	 * disturbing system suspending or hibernating process.
+	 */
+	cyapa_gen6_config_dev_irq(cyapa, GEN6_DISABLE_CMD_IRQ);
+
+	if (power_mode == PWR_MODE_FULL_ACTIVE) {
+		error = cyapa_gen6_change_power_state(cyapa,
+				GEN6_POWER_MODE_ACTIVE);
+		if (error) {
+			dev_err(dev, "change to active fail: %d\n", error);
+			goto out;
+		}
+
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_FULL_ACTIVE);
+
+		/* Sync the interval setting from device. */
+		cyapa_gen6_get_interval_setting(cyapa, interval_setting);
+
+	} else if (power_mode == PWR_MODE_BTN_ONLY) {
+		error = cyapa_gen6_change_power_state(cyapa,
+				GEN6_POWER_MODE_BTN_ONLY);
+		if (error) {
+			dev_err(dev, "fail to button only mode: %d\n", error);
+			goto out;
+		}
+
+		PIP_DEV_SET_PWR_STATE(cyapa, PWR_MODE_BTN_ONLY);
+	} else {
+		/*
+		 * Gen6 internally supports to 2 low power scan interval time,
+		 * so can help to switch power mode quickly.
+		 * such as runtime suspend and system suspend.
+		 */
+		if (interval_setting->lp1_interval == sleep_time) {
+			lp_mode = GEN6_POWER_MODE_LP_MODE1;
+		} else if (interval_setting->lp2_interval == sleep_time) {
+			lp_mode = GEN6_POWER_MODE_LP_MODE2;
+		} else {
+			if (interval_setting->lp1_interval == 0) {
+				interval_setting->lp1_interval = sleep_time;
+				lp_mode = GEN6_POWER_MODE_LP_MODE1;
+			} else {
+				interval_setting->lp2_interval = sleep_time;
+				lp_mode = GEN6_POWER_MODE_LP_MODE2;
+			}
+			cyapa_gen6_set_interval_setting(cyapa,
+							interval_setting);
+		}
+
+		error = cyapa_gen6_change_power_state(cyapa, lp_mode);
+		if (error) {
+			dev_err(dev, "set power state to 0x%02x failed: %d\n",
+				lp_mode, error);
+			goto out;
+		}
+
+		PIP_DEV_SET_SLEEP_TIME(cyapa, sleep_time);
+		PIP_DEV_SET_PWR_STATE(cyapa,
+			cyapa_sleep_time_to_pwr_cmd(sleep_time));
+	}
+
+out:
+	cyapa_gen6_config_dev_irq(cyapa, GEN6_ENABLE_CMD_IRQ);
+	return error;
+}
+
+static int cyapa_gen6_initialize(struct cyapa *cyapa)
+{
+	return 0;
+}
+
+static int cyapa_pip_retrieve_data_structure(struct cyapa *cyapa,
+		u16 read_offset, u16 read_len, u8 data_id,
+		u8 *data, int *data_buf_lens)
+{
+	struct retrieve_data_struct_cmd {
+		struct pip_app_cmd_head head;
+		__le16 read_offset;
+		__le16 read_length;
+		u8 data_id;
+	} __packed cmd;
+	u8 resp_data[GEN6_MAX_RX_NUM + 10];
+	int resp_len;
+	int error;
+
+	memset(&cmd, 0, sizeof(cmd));
+	put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd.head.addr);
+	put_unaligned_le16(sizeof(cmd), &cmd.head.length - 2);
+	cmd.head.report_id = PIP_APP_CMD_REPORT_ID;
+	cmd.head.cmd_code = PIP_RETRIEVE_DATA_STRUCTURE;
+	put_unaligned_le16(read_offset, &cmd.read_offset);
+	put_unaligned_le16(read_len, &cmd.read_length);
+	cmd.data_id = data_id;
+
+	resp_len = sizeof(resp_data);
+	error = cyapa_i2c_pip_cmd_irq_sync(cyapa,
+				(u8 *)&cmd, sizeof(cmd),
+				resp_data, &resp_len,
+				500, cyapa_sort_tsg_pip_app_resp_data,
+				true);
+	if (error || !PIP_CMD_COMPLETE_SUCCESS(resp_data) ||
+		resp_data[6] != data_id ||
+		!VALID_CMD_RESP_HEADER(resp_data, PIP_RETRIEVE_DATA_STRUCTURE))
+		return (error < 0) ? error : -EAGAIN;
+
+	read_len = get_unaligned_le16(&resp_data[7]);
+	if (*data_buf_lens < read_len) {
+		*data_buf_lens = read_len;
+		return -ENOBUFS;
+	}
+
+	memcpy(data, &resp_data[10], read_len);
+	*data_buf_lens = read_len;
+	return 0;
+}
+
+static ssize_t cyapa_gen6_show_baseline(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct cyapa *cyapa = dev_get_drvdata(dev);
+	u8 data[GEN6_MAX_RX_NUM];
+	int data_len;
+	int size = 0;
+	int i;
+	int error;
+	int resume_error;
+
+	if (!cyapa_is_pip_app_mode(cyapa))
+		return -EBUSY;
+
+	/* 1. Suspend Scanning*/
+	error = cyapa_pip_suspend_scanning(cyapa);
+	if (error)
+		return error;
+
+	/* 2. IDAC and RX Attenuator Calibration Data (Center Frequency). */
+	data_len = sizeof(data);
+	error = cyapa_pip_retrieve_data_structure(cyapa, 0, data_len,
+			GEN6_RETRIEVE_DATA_ID_RX_ATTENURATOR_IDAC,
+			data, &data_len);
+	if (error)
+		goto resume_scanning;
+
+	size = scnprintf(buf, PAGE_SIZE, "%d %d %d %d %d %d ",
+			data[0],  /* RX Attenuator Mutual */
+			data[1],  /* IDAC Mutual */
+			data[2],  /* RX Attenuator Self RX */
+			data[3],  /* IDAC Self RX */
+			data[4],  /* RX Attenuator Self TX */
+			data[5]	  /* IDAC Self TX */
+			);
+
+	/* 3. Read Attenuator Trim. */
+	data_len = sizeof(data);
+	error = cyapa_pip_retrieve_data_structure(cyapa, 0, data_len,
+			GEN6_RETRIEVE_DATA_ID_ATTENURATOR_TRIM,
+			data, &data_len);
+	if (error)
+		goto resume_scanning;
+
+	/* set attenuator trim values. */
+	for (i = 0; i < data_len; i++)
+		size += scnprintf(buf + size, PAGE_SIZE - size,	"%d ", data[i]);
+	size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+
+resume_scanning:
+	/* 4. Resume Scanning*/
+	resume_error = cyapa_pip_resume_scanning(cyapa);
+	if (resume_error || error) {
+		memset(buf, 0, PAGE_SIZE);
+		return resume_error ? resume_error : error;
+	}
+
+	return size;
+}
+
+static int cyapa_gen6_operational_check(struct cyapa *cyapa)
+{
+	struct device *dev = &cyapa->client->dev;
+	int error;
+
+	if (cyapa->gen != CYAPA_GEN6)
+		return -ENODEV;
+
+	switch (cyapa->state) {
+	case CYAPA_STATE_GEN6_BL:
+		error = cyapa_pip_bl_exit(cyapa);
+		if (error) {
+			/* Try to update trackpad product information. */
+			cyapa_gen6_bl_read_app_info(cyapa);
+			goto out;
+		}
+
+		cyapa->state = CYAPA_STATE_GEN6_APP;
+
+	case CYAPA_STATE_GEN6_APP:
+		/*
+		 * If trackpad device in deep sleep mode,
+		 * the app command will fail.
+		 * So always try to reset trackpad device to full active when
+		 * the device state is required.
+		 */
+		error = cyapa_gen6_set_power_mode(cyapa,
+				PWR_MODE_FULL_ACTIVE, 0, false);
+		if (error)
+			dev_warn(dev, "%s: failed to set power active mode.\n",
+				__func__);
+
+		/* By default, the trackpad proximity function is enabled. */
+		error = cyapa_pip_set_proximity(cyapa, true);
+		if (error)
+			dev_warn(dev, "%s: failed to enable proximity.\n",
+				__func__);
+
+		/* Get trackpad product information. */
+		error = cyapa_gen6_read_sys_info(cyapa);
+		if (error)
+			goto out;
+		/* Only support product ID starting with CYTRA */
+		if (memcmp(cyapa->product_id, product_id,
+				strlen(product_id)) != 0) {
+			dev_err(dev, "%s: unknown product ID (%s)\n",
+				__func__, cyapa->product_id);
+			error = -EINVAL;
+		}
+		break;
+	default:
+		error = -EINVAL;
+	}
+
+out:
+	return error;
+}
+
+const struct cyapa_dev_ops cyapa_gen6_ops = {
+	.check_fw = cyapa_pip_check_fw,
+	.bl_enter = cyapa_pip_bl_enter,
+	.bl_initiate = cyapa_pip_bl_initiate,
+	.update_fw = cyapa_pip_do_fw_update,
+	.bl_activate = cyapa_pip_bl_activate,
+	.bl_deactivate = cyapa_pip_bl_deactivate,
+
+	.show_baseline = cyapa_gen6_show_baseline,
+	.calibrate_store = cyapa_pip_do_calibrate,
+
+	.initialize = cyapa_gen6_initialize,
+
+	.state_parse = cyapa_pip_state_parse,
+	.operational_check = cyapa_gen6_operational_check,
+
+	.irq_handler = cyapa_pip_irq_handler,
+	.irq_cmd_handler = cyapa_pip_irq_cmd_handler,
+	.sort_empty_output_data = cyapa_empty_pip_output_data,
+	.set_power_mode = cyapa_gen6_set_power_mode,
+
+	.set_proximity = cyapa_gen6_set_proximity,
+};
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 5b5f403..e2b7420 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -4,7 +4,7 @@
  * Copyright (c) 2013 ELAN Microelectronics Corp.
  *
  * Author: 林政維 (Duson Lin) <dusonlin@emc.com.tw>
- * Version: 1.5.9
+ * Version: 1.6.0
  *
  * Based on cyapa driver:
  * copyright (c) 2011-2012 Cypress Semiconductor, Inc.
@@ -40,7 +40,7 @@
 #include "elan_i2c.h"
 
 #define DRIVER_NAME		"elan_i2c"
-#define ELAN_DRIVER_VERSION	"1.5.9"
+#define ELAN_DRIVER_VERSION	"1.6.0"
 #define ETP_MAX_PRESSURE	255
 #define ETP_FWIDTH_REDUCE	90
 #define ETP_FINGER_WIDTH	15
@@ -84,7 +84,7 @@
 	int			pressure_adjustment;
 	u8			mode;
 	u8			ic_type;
-	u16			fw_vaildpage_count;
+	u16			fw_validpage_count;
 	u16			fw_signature_address;
 
 	bool			irq_wake;
@@ -94,25 +94,28 @@
 	bool			baseline_ready;
 };
 
-static int elan_get_fwinfo(u8 ic_type, u16 *vaildpage_count,
+static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count,
 			   u16 *signature_address)
 {
-	switch(ic_type) {
+	switch (iap_version) {
+	case 0x08:
+		*validpage_count = 512;
+		break;
 	case 0x09:
-		*vaildpage_count = 768;
+		*validpage_count = 768;
 		break;
 	case 0x0D:
-		*vaildpage_count = 896;
+		*validpage_count = 896;
 		break;
 	default:
 		/* unknown ic type clear value */
-		*vaildpage_count = 0;
+		*validpage_count = 0;
 		*signature_address = 0;
 		return -ENXIO;
 	}
 
 	*signature_address =
-		(*vaildpage_count * ETP_FW_PAGE_SIZE) - ETP_FW_SIGNATURE_SIZE;
+		(*validpage_count * ETP_FW_PAGE_SIZE) - ETP_FW_SIGNATURE_SIZE;
 
 	return 0;
 }
@@ -261,11 +264,11 @@
 	if (error)
 		return error;
 
-	error = elan_get_fwinfo(data->ic_type, &data->fw_vaildpage_count,
+	error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count,
 				&data->fw_signature_address);
 	if (error) {
 		dev_err(&data->client->dev,
-			"unknown ic type %d\n", data->ic_type);
+			"unknown iap version %d\n", data->iap_version);
 		return error;
 	}
 
@@ -353,7 +356,7 @@
 	iap_start_addr = get_unaligned_le16(&fw->data[ETP_IAP_START_ADDR * 2]);
 
 	boot_page_count = (iap_start_addr * 2) / ETP_FW_PAGE_SIZE;
-	for (i = boot_page_count; i < data->fw_vaildpage_count; i++) {
+	for (i = boot_page_count; i < data->fw_validpage_count; i++) {
 		u16 checksum = 0;
 		const u8 *page = &fw->data[i * ETP_FW_PAGE_SIZE];
 
@@ -1165,6 +1168,8 @@
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id elan_acpi_id[] = {
 	{ "ELAN0000", 0 },
+	{ "ELAN0100", 0 },
+	{ "ELAN0600", 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(acpi, elan_acpi_id);
@@ -1181,10 +1186,10 @@
 static struct i2c_driver elan_driver = {
 	.driver = {
 		.name	= DRIVER_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &elan_pm_ops,
 		.acpi_match_table = ACPI_PTR(elan_acpi_id),
 		.of_match_table = of_match_ptr(elan_of_match),
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 	},
 	.probe		= elan_probe,
 	.id_table	= elan_id,
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index ec347703..ad18dab 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1540,6 +1540,10 @@
 	if (error)
 		goto err_clear_drvdata;
 
+	/* give PT device some time to settle down before probing */
+	if (serio->id.type == SERIO_PS_PSTHRU)
+		usleep_range(10000, 15000);
+
 	if (psmouse_probe(psmouse) < 0) {
 		error = -ENODEV;
 		goto err_close_serio;
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index cc7e0d4..11c32ac 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -432,7 +432,7 @@
 static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
 				   const char *buf, size_t count)
 {
-	int reg, val;
+	unsigned int reg, val;
 	char *rest;
 	ssize_t retval;
 
@@ -440,7 +440,7 @@
 	if (rest == buf || *rest != ' ' || reg > 0xff)
 		return -EINVAL;
 
-	retval = kstrtoint(rest + 1, 16, &val);
+	retval = kstrtouint(rest + 1, 16, &val);
 	if (retval)
 		return retval;
 
@@ -476,9 +476,10 @@
 					const char *buf, size_t count)
 {
 	struct fsp_data *pad = psmouse->private;
-	int reg, val, err;
+	unsigned int reg, val;
+	int err;
 
-	err = kstrtoint(buf, 16, &reg);
+	err = kstrtouint(buf, 16, &reg);
 	if (err)
 		return err;
 
@@ -511,9 +512,10 @@
 static ssize_t fsp_attr_set_pagereg(struct psmouse *psmouse, void *data,
 					const char *buf, size_t count)
 {
-	int val, err;
+	unsigned int val;
+	int err;
 
-	err = kstrtoint(buf, 16, &val);
+	err = kstrtouint(buf, 16, &val);
 	if (err)
 		return err;
 
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 6025eb4..994ae78 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -519,14 +519,18 @@
 	struct synaptics_data *priv = psmouse->private;
 
 	priv->mode = 0;
-	if (priv->absolute_mode)
+
+	if (priv->absolute_mode) {
 		priv->mode |= SYN_BIT_ABSOLUTE_MODE;
-	if (priv->disable_gesture)
+		if (SYN_CAP_EXTENDED(priv->capabilities))
+			priv->mode |= SYN_BIT_W_MODE;
+	}
+
+	if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture)
 		priv->mode |= SYN_BIT_DISABLE_GESTURE;
+
 	if (psmouse->rate >= 80)
 		priv->mode |= SYN_BIT_HIGH_RATE;
-	if (SYN_CAP_EXTENDED(priv->capabilities))
-		priv->mode |= SYN_BIT_W_MODE;
 
 	if (synaptics_mode_cmd(psmouse, priv->mode))
 		return -1;
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index ffceedc..aa7c5da 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -655,7 +655,6 @@
 static struct i2c_driver synaptics_i2c_driver = {
 	.driver = {
 		.name	= DRIVER_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &synaptics_i2c_pm,
 	},
 
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 8b748d9..c6606ca 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -175,9 +175,9 @@
 	return 0;
 }
 
-static int amba_kmi_resume(struct amba_device *dev)
+static int __maybe_unused amba_kmi_resume(struct device *dev)
 {
-	struct amba_kmi_port *kmi = amba_get_drvdata(dev);
+	struct amba_kmi_port *kmi = dev_get_drvdata(dev);
 
 	/* kick the serio layer to rescan this port */
 	serio_reconnect(kmi->io);
@@ -185,6 +185,8 @@
 	return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(amba_kmi_dev_pm_ops, NULL, amba_kmi_resume);
+
 static struct amba_id amba_kmi_idtable[] = {
 	{
 		.id	= 0x00041050,
@@ -199,11 +201,11 @@
 	.drv		= {
 		.name	= "kmi-pl050",
 		.owner	= THIS_MODULE,
+		.pm	= &amba_kmi_dev_pm_ops,
 	},
 	.id_table	= amba_kmi_idtable,
 	.probe		= amba_kmi_probe,
 	.remove		= amba_kmi_remove,
-	.resume		= amba_kmi_resume,
 };
 
 module_amba_driver(ambakmi_driver);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index cb5ece7..c9c98f0a 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -88,6 +88,10 @@
 static bool i8042_debug;
 module_param_named(debug, i8042_debug, bool, 0600);
 MODULE_PARM_DESC(debug, "Turn i8042 debugging mode on and off");
+
+static bool i8042_unmask_kbd_data;
+module_param_named(unmask_kbd_data, i8042_unmask_kbd_data, bool, 0600);
+MODULE_PARM_DESC(unmask_kbd_data, "Unconditional enable (may reveal sensitive data) of normally sanitize-filtered kbd data traffic debug log [pre-condition: i8042.debug=1 enabled]");
 #endif
 
 static bool i8042_bypass_aux_irq_test;
@@ -116,6 +120,7 @@
 	struct serio *serio;
 	int irq;
 	bool exists;
+	bool driver_bound;
 	signed char mux;
 };
 
@@ -133,6 +138,7 @@
 static bool i8042_aux_irq_registered;
 static unsigned char i8042_suppress_kbd_ack;
 static struct platform_device *i8042_platform_device;
+static struct notifier_block i8042_kbd_bind_notifier_block;
 
 static irqreturn_t i8042_interrupt(int irq, void *dev_id);
 static bool (*i8042_platform_filter)(unsigned char data, unsigned char str,
@@ -528,10 +534,10 @@
 	port = &i8042_ports[port_no];
 	serio = port->exists ? port->serio : NULL;
 
-	dbg("%02x <- i8042 (interrupt, %d, %d%s%s)\n",
-	    data, port_no, irq,
-	    dfl & SERIO_PARITY ? ", bad parity" : "",
-	    dfl & SERIO_TIMEOUT ? ", timeout" : "");
+	filter_dbg(port->driver_bound, data, "<- i8042 (interrupt, %d, %d%s%s)\n",
+		   port_no, irq,
+		   dfl & SERIO_PARITY ? ", bad parity" : "",
+		   dfl & SERIO_TIMEOUT ? ", timeout" : "");
 
 	filtered = i8042_filter(data, str, serio);
 
@@ -1438,6 +1444,29 @@
 	return error;
 }
 
+static int i8042_kbd_bind_notifier(struct notifier_block *nb,
+				   unsigned long action, void *data)
+{
+	struct device *dev = data;
+	struct serio *serio = to_serio_port(dev);
+	struct i8042_port *port = serio->port_data;
+
+	if (serio != i8042_ports[I8042_KBD_PORT_NO].serio)
+		return 0;
+
+	switch (action) {
+	case BUS_NOTIFY_BOUND_DRIVER:
+		port->driver_bound = true;
+		break;
+
+	case BUS_NOTIFY_UNBIND_DRIVER:
+		port->driver_bound = false;
+		break;
+	}
+
+	return 0;
+}
+
 static int __init i8042_probe(struct platform_device *dev)
 {
 	int error;
@@ -1507,6 +1536,10 @@
 	.shutdown	= i8042_shutdown,
 };
 
+static struct notifier_block i8042_kbd_bind_notifier_block = {
+	.notifier_call = i8042_kbd_bind_notifier,
+};
+
 static int __init i8042_init(void)
 {
 	struct platform_device *pdev;
@@ -1528,6 +1561,7 @@
 		goto err_platform_exit;
 	}
 
+	bus_register_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
 	panic_blink = i8042_panic_blink;
 
 	return 0;
@@ -1543,6 +1577,7 @@
 	platform_driver_unregister(&i8042_driver);
 	i8042_platform_exit();
 
+	bus_unregister_notifier(&serio_bus, &i8042_kbd_bind_notifier_block);
 	panic_blink = NULL;
 }
 
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index fc080be..1db0a40 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -73,6 +73,17 @@
 			printk(KERN_DEBUG KBUILD_MODNAME ": [%d] " format,	\
 			       (int) (jiffies - i8042_start_time), ##arg);	\
 	} while (0)
+
+#define filter_dbg(filter, data, format, args...)		\
+	do {							\
+		if (!i8042_debug)				\
+			break;					\
+								\
+		if (!filter || i8042_unmask_kbd_data)		\
+			dbg("%02x " format, data, ##args);	\
+		else						\
+			dbg("** " format, ##args);		\
+	} while (0)
 #else
 #define dbg_init() do { } while (0)
 #define dbg(format, arg...)							\
@@ -80,6 +91,8 @@
 		if (0)								\
 			printk(KERN_DEBUG pr_fmt(format), ##arg);		\
 	} while (0)
+
+#define filter_dbg(filter, data, format, args...) do { } while (0)
 #endif
 
 #endif /* _I8042_H */
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index a05a517..8f82897 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -49,8 +49,6 @@
 
 static LIST_HEAD(serio_list);
 
-static struct bus_type serio_bus;
-
 static void serio_add_port(struct serio *serio);
 static int serio_reconnect_port(struct serio *serio);
 static void serio_disconnect_port(struct serio *serio);
@@ -1017,7 +1015,7 @@
 }
 EXPORT_SYMBOL(serio_interrupt);
 
-static struct bus_type serio_bus = {
+struct bus_type serio_bus = {
 	.name		= "serio",
 	.drv_groups	= serio_driver_groups,
 	.match		= serio_bus_match,
@@ -1029,6 +1027,7 @@
 	.pm		= &serio_pm_ops,
 #endif
 };
+EXPORT_SYMBOL(serio_bus);
 
 static int __init serio_init(void)
 {
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index a854c6e..059edeb 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -11,9 +11,9 @@
 
 if INPUT_TOUCHSCREEN
 
-config OF_TOUCHSCREEN
+config TOUCHSCREEN_PROPERTIES
 	def_tristate INPUT
-	depends on INPUT && OF
+	depends on INPUT
 
 config TOUCHSCREEN_88PM860X
 	tristate "Marvell 88PM860x touchscreen"
@@ -118,7 +118,7 @@
 config TOUCHSCREEN_AUO_PIXCIR
 	tristate "AUO in-cell touchscreen using Pixcir ICs"
 	depends on I2C
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a AUO display with in-cell touchscreen
 	  using Pixcir ICs.
@@ -142,7 +142,7 @@
 
 config TOUCHSCREEN_CHIPONE_ICN8318
 	tristate "chipone icn8318 touchscreen controller"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	depends on I2C
 	depends on OF
 	help
@@ -156,7 +156,7 @@
 config TOUCHSCREEN_CY8CTMG110
 	tristate "cy8ctmg110 touchscreen"
 	depends on I2C
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a cy8ctmg110 capacitive touchscreen on
 	  an AAVA device.
@@ -915,10 +915,11 @@
 	  module will be called tsc40.
 
 config TOUCHSCREEN_TSC2005
-        tristate "TSC2005 based touchscreens"
-        depends on SPI_MASTER
-        help
-          Say Y here if you have a TSC2005 based touchscreen.
+	tristate "TSC2005 based touchscreens"
+	depends on SPI_MASTER
+	select REGMAP_SPI
+	help
+	  Say Y here if you have a TSC2005 based touchscreen.
 
 	  If unsure, say N.
 
@@ -1029,7 +1030,7 @@
 config TOUCHSCREEN_ZFORCE
 	tristate "Neonode zForce infrared touchscreens"
 	depends on I2C
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y here if you have a touchscreen using the zforce
 	  infraread technology from Neonode.
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index fa3d33b..c85aae2 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -6,7 +6,7 @@
 
 wm97xx-ts-y := wm97xx-core.o
 
-obj-$(CONFIG_OF_TOUCHSCREEN)		+= of_touchscreen.o
+obj-$(CONFIG_TOUCHSCREEN_PROPERTIES)	+= of_touchscreen.o
 obj-$(CONFIG_TOUCHSCREEN_88PM860X)	+= 88pm860x-ts.o
 obj-$(CONFIG_TOUCHSCREEN_AD7877)	+= ad7877.o
 obj-$(CONFIG_TOUCHSCREEN_AD7879)	+= ad7879.o
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index dcf3907..d66962c 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -94,7 +94,6 @@
 static struct i2c_driver ad7879_i2c_driver = {
 	.driver = {
 		.name	= "ad7879",
-		.owner	= THIS_MODULE,
 		.pm	= &ad7879_pm_ops,
 	},
 	.probe		= ad7879_i2c_probe,
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index e4eb8a6..0f5f968 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1234,7 +1234,8 @@
 	of_property_read_u32(node, "ti,pendown-gpio-debounce",
 			     &pdata->gpio_pendown_debounce);
 
-	pdata->wakeup = of_property_read_bool(node, "linux,wakeup");
+	pdata->wakeup = of_property_read_bool(node, "wakeup-source") ||
+			of_property_read_bool(node, "linux,wakeup");
 
 	pdata->gpio_pendown = of_get_named_gpio(dev->of_node, "pendown-gpio", 0);
 
diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
index f0b954d..71b5a63 100644
--- a/drivers/input/touchscreen/ar1021_i2c.c
+++ b/drivers/input/touchscreen/ar1021_i2c.c
@@ -166,7 +166,6 @@
 static struct i2c_driver ar1021_i2c_driver = {
 	.driver	= {
 		.name	= "ar1021_i2c",
-		.owner	= THIS_MODULE,
 		.pm	= &ar1021_i2c_pm,
 		.of_match_table = ar1021_i2c_of_match,
 	},
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index dfc7309..c562205 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -22,34 +22,20 @@
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/i2c.h>
-#include <linux/i2c/atmel_mxt_ts.h>
+#include <linux/platform_data/atmel_mxt_ts.h>
 #include <linux/input/mt.h>
 #include <linux/interrupt.h>
 #include <linux/of.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
 
-/* Version */
-#define MXT_VER_20		20
-#define MXT_VER_21		21
-#define MXT_VER_22		22
-
 /* Firmware files */
 #define MXT_FW_NAME		"maxtouch.fw"
 #define MXT_CFG_NAME		"maxtouch.cfg"
 #define MXT_CFG_MAGIC		"OBP_RAW V1"
 
 /* Registers */
-#define MXT_INFO		0x00
-#define MXT_FAMILY_ID		0x00
-#define MXT_VARIANT_ID		0x01
-#define MXT_VERSION		0x02
-#define MXT_BUILD		0x03
-#define MXT_MATRIX_X_SIZE	0x04
-#define MXT_MATRIX_Y_SIZE	0x05
-#define MXT_OBJECT_NUM		0x06
 #define MXT_OBJECT_START	0x07
-
 #define MXT_OBJECT_SIZE		6
 #define MXT_INFO_CHECKSUM_SIZE	3
 #define MXT_MAX_BLOCK_WRITE	256
@@ -103,21 +89,16 @@
 #define MXT_T6_STATUS_COMSERR	(1 << 2)
 
 /* MXT_GEN_POWER_T7 field */
-#define MXT_POWER_IDLEACQINT	0
-#define MXT_POWER_ACTVACQINT	1
-#define MXT_POWER_ACTV2IDLETO	2
+struct t7_config {
+	u8 idle;
+	u8 active;
+} __packed;
 
-/* MXT_GEN_ACQUIRE_T8 field */
-#define MXT_ACQUIRE_CHRGTIME	0
-#define MXT_ACQUIRE_TCHDRIFT	2
-#define MXT_ACQUIRE_DRIFTST	3
-#define MXT_ACQUIRE_TCHAUTOCAL	4
-#define MXT_ACQUIRE_SYNC	5
-#define MXT_ACQUIRE_ATCHCALST	6
-#define MXT_ACQUIRE_ATCHCALSTHR	7
+#define MXT_POWER_CFG_RUN		0
+#define MXT_POWER_CFG_DEEPSLEEP		1
 
 /* MXT_TOUCH_MULTI_T9 field */
-#define MXT_TOUCH_CTRL		0
+#define MXT_T9_CTRL		0
 #define MXT_T9_ORIENT		9
 #define MXT_T9_RANGE		18
 
@@ -139,51 +120,10 @@
 /* MXT_TOUCH_MULTI_T9 orient */
 #define MXT_T9_ORIENT_SWITCH	(1 << 0)
 
-/* MXT_PROCI_GRIPFACE_T20 field */
-#define MXT_GRIPFACE_CTRL	0
-#define MXT_GRIPFACE_XLOGRIP	1
-#define MXT_GRIPFACE_XHIGRIP	2
-#define MXT_GRIPFACE_YLOGRIP	3
-#define MXT_GRIPFACE_YHIGRIP	4
-#define MXT_GRIPFACE_MAXTCHS	5
-#define MXT_GRIPFACE_SZTHR1	7
-#define MXT_GRIPFACE_SZTHR2	8
-#define MXT_GRIPFACE_SHPTHR1	9
-#define MXT_GRIPFACE_SHPTHR2	10
-#define MXT_GRIPFACE_SUPEXTTO	11
-
-/* MXT_PROCI_NOISE field */
-#define MXT_NOISE_CTRL		0
-#define MXT_NOISE_OUTFLEN	1
-#define MXT_NOISE_GCAFUL_LSB	3
-#define MXT_NOISE_GCAFUL_MSB	4
-#define MXT_NOISE_GCAFLL_LSB	5
-#define MXT_NOISE_GCAFLL_MSB	6
-#define MXT_NOISE_ACTVGCAFVALID	7
-#define MXT_NOISE_NOISETHR	8
-#define MXT_NOISE_FREQHOPSCALE	10
-#define MXT_NOISE_FREQ0		11
-#define MXT_NOISE_FREQ1		12
-#define MXT_NOISE_FREQ2		13
-#define MXT_NOISE_FREQ3		14
-#define MXT_NOISE_FREQ4		15
-#define MXT_NOISE_IDLEGCAFVALID	16
-
 /* MXT_SPT_COMMSCONFIG_T18 */
 #define MXT_COMMS_CTRL		0
 #define MXT_COMMS_CMD		1
 
-/* MXT_SPT_CTECONFIG_T28 field */
-#define MXT_CTE_CTRL		0
-#define MXT_CTE_CMD		1
-#define MXT_CTE_MODE		2
-#define MXT_CTE_IDLEGCAFDEPTH	3
-#define MXT_CTE_ACTVGCAFDEPTH	4
-#define MXT_CTE_VOLTAGE		5
-
-#define MXT_VOLTAGE_DEFAULT	2700000
-#define MXT_VOLTAGE_STEP	10000
-
 /* Define for MXT_GEN_COMMAND_T6 */
 #define MXT_BOOT_VALUE		0xa5
 #define MXT_RESET_VALUE		0x01
@@ -291,6 +231,7 @@
 	u8 last_message_count;
 	u8 num_touchids;
 	u8 multitouch;
+	struct t7_config t7_cfg;
 
 	/* Cached parameters from object table */
 	u16 T5_address;
@@ -997,16 +938,15 @@
 
 	count = data->msg_buf[0];
 
-	if (count == 0) {
-		/*
-		 * This condition is caused by the CHG line being configured
-		 * in Mode 0. It results in unnecessary I2C operations but it
-		 * is benign.
-		 */
-		dev_dbg(dev, "Interrupt triggered but zero messages\n");
+	/*
+	 * This condition may be caused by the CHG line being configured in
+	 * Mode 0. It results in unnecessary I2C operations but it is benign.
+	 */
+	if (count == 0)
 		return IRQ_NONE;
-	} else if (count > data->max_reportid) {
-		dev_err(dev, "T44 count %d exceeded max report id\n", count);
+
+	if (count > data->max_reportid) {
+		dev_warn(dev, "T44 count %d exceeded max report id\n", count);
 		count = data->max_reportid;
 	}
 
@@ -1157,7 +1097,9 @@
 	struct device *dev = &data->client->dev;
 	int ret = 0;
 
-	dev_info(dev, "Resetting chip\n");
+	dev_info(dev, "Resetting device\n");
+
+	disable_irq(data->irq);
 
 	reinit_completion(&data->reset_completion);
 
@@ -1165,6 +1107,11 @@
 	if (ret)
 		return ret;
 
+	/* Ignore CHG line for 100ms after reset */
+	msleep(100);
+
+	enable_irq(data->irq);
+
 	ret = mxt_wait_for_completion(data, &data->reset_completion,
 				      MXT_RESET_TIMEOUT);
 	if (ret)
@@ -1361,6 +1308,8 @@
 	return 0;
 }
 
+static int mxt_init_t7_power_cfg(struct mxt_data *data);
+
 /*
  * mxt_update_cfg - download configuration to chip
  *
@@ -1508,6 +1457,9 @@
 
 	dev_info(dev, "Config successfully updated\n");
 
+	/* T7 config may have changed */
+	mxt_init_t7_power_cfg(data);
+
 release_mem:
 	kfree(config_mem);
 	return ret;
@@ -1533,7 +1485,7 @@
 	int error;
 
 	/* Read 7-byte info block starting at address 0 */
-	error = __mxt_read_reg(client, MXT_INFO, sizeof(*info), info);
+	error = __mxt_read_reg(client, 0, sizeof(*info), info);
 	if (error)
 		return error;
 
@@ -1905,6 +1857,8 @@
 	if (pdata->t19_num_keys) {
 		mxt_set_up_as_touchpad(input_dev, data);
 		mt_flags |= INPUT_MT_POINTER;
+	} else {
+		mt_flags |= INPUT_MT_DIRECT;
 	}
 
 	/* For multi touch */
@@ -2051,6 +2005,60 @@
 	return error;
 }
 
+static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
+{
+	struct device *dev = &data->client->dev;
+	int error;
+	struct t7_config *new_config;
+	struct t7_config deepsleep = { .active = 0, .idle = 0 };
+
+	if (sleep == MXT_POWER_CFG_DEEPSLEEP)
+		new_config = &deepsleep;
+	else
+		new_config = &data->t7_cfg;
+
+	error = __mxt_write_reg(data->client, data->T7_address,
+				sizeof(data->t7_cfg), new_config);
+	if (error)
+		return error;
+
+	dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
+		new_config->active, new_config->idle);
+
+	return 0;
+}
+
+static int mxt_init_t7_power_cfg(struct mxt_data *data)
+{
+	struct device *dev = &data->client->dev;
+	int error;
+	bool retry = false;
+
+recheck:
+	error = __mxt_read_reg(data->client, data->T7_address,
+				sizeof(data->t7_cfg), &data->t7_cfg);
+	if (error)
+		return error;
+
+	if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
+		if (!retry) {
+			dev_dbg(dev, "T7 cfg zero, resetting\n");
+			mxt_soft_reset(data);
+			retry = true;
+			goto recheck;
+		} else {
+			dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
+			data->t7_cfg.active = 20;
+			data->t7_cfg.idle = 100;
+			return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
+		}
+	}
+
+	dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
+		data->t7_cfg.active, data->t7_cfg.idle);
+	return 0;
+}
+
 static int mxt_configure_objects(struct mxt_data *data,
 				 const struct firmware *cfg)
 {
@@ -2058,6 +2066,12 @@
 	struct mxt_info *info = &data->info;
 	int error;
 
+	error = mxt_init_t7_power_cfg(data);
+	if (error) {
+		dev_err(dev, "Failed to initialize power cfg\n");
+		return error;
+	}
+
 	if (cfg) {
 		error = mxt_update_cfg(data, cfg);
 		if (error)
@@ -2346,14 +2360,41 @@
 
 static void mxt_start(struct mxt_data *data)
 {
-	/* Touch enable */
-	mxt_write_object(data, data->multitouch, MXT_TOUCH_CTRL, 0x83);
+	switch (data->pdata->suspend_mode) {
+	case MXT_SUSPEND_T9_CTRL:
+		mxt_soft_reset(data);
+
+		/* Touch enable */
+		/* 0x83 = SCANEN | RPTEN | ENABLE */
+		mxt_write_object(data,
+				MXT_TOUCH_MULTI_T9, MXT_T9_CTRL, 0x83);
+		break;
+
+	case MXT_SUSPEND_DEEP_SLEEP:
+	default:
+		mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
+
+		/* Recalibrate since chip has been in deep sleep */
+		mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
+		break;
+	}
+
 }
 
 static void mxt_stop(struct mxt_data *data)
 {
-	/* Touch disable */
-	mxt_write_object(data, data->multitouch, MXT_TOUCH_CTRL, 0);
+	switch (data->pdata->suspend_mode) {
+	case MXT_SUSPEND_T9_CTRL:
+		/* Touch disable */
+		mxt_write_object(data,
+				MXT_TOUCH_MULTI_T9, MXT_T9_CTRL, 0);
+		break;
+
+	case MXT_SUSPEND_DEEP_SLEEP:
+	default:
+		mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
+		break;
+	}
 }
 
 static int mxt_input_open(struct input_dev *dev)
@@ -2376,19 +2417,18 @@
 static const struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
 {
 	struct mxt_platform_data *pdata;
+	struct device_node *np = client->dev.of_node;
 	u32 *keymap;
-	u32 keycode;
-	int proplen, i, ret;
+	int proplen, ret;
 
-	if (!client->dev.of_node)
+	if (!np)
 		return ERR_PTR(-ENOENT);
 
 	pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return ERR_PTR(-ENOMEM);
 
-	if (of_find_property(client->dev.of_node, "linux,gpio-keymap",
-			     &proplen)) {
+	if (of_find_property(np, "linux,gpio-keymap", &proplen)) {
 		pdata->t19_num_keys = proplen / sizeof(u32);
 
 		keymap = devm_kzalloc(&client->dev,
@@ -2397,18 +2437,17 @@
 		if (!keymap)
 			return ERR_PTR(-ENOMEM);
 
-		for (i = 0; i < pdata->t19_num_keys; i++) {
-			ret = of_property_read_u32_index(client->dev.of_node,
-					"linux,gpio-keymap", i, &keycode);
-			if (ret)
-				keycode = KEY_RESERVED;
-
-			keymap[i] = keycode;
-		}
+		ret = of_property_read_u32_array(np, "linux,gpio-keymap",
+						 keymap, pdata->t19_num_keys);
+		if (ret)
+			dev_warn(&client->dev,
+				 "Couldn't read linux,gpio-keymap: %d\n", ret);
 
 		pdata->t19_keymap = keymap;
 	}
 
+	pdata->suspend_mode = MXT_SUSPEND_DEEP_SLEEP;
+
 	return pdata;
 }
 #else
@@ -2609,6 +2648,9 @@
 	struct mxt_data *data = i2c_get_clientdata(client);
 	struct input_dev *input_dev = data->input_dev;
 
+	if (!input_dev)
+		return 0;
+
 	mutex_lock(&input_dev->mutex);
 
 	if (input_dev->users)
@@ -2625,7 +2667,8 @@
 	struct mxt_data *data = i2c_get_clientdata(client);
 	struct input_dev *input_dev = data->input_dev;
 
-	mxt_soft_reset(data);
+	if (!input_dev)
+		return 0;
 
 	mutex_lock(&input_dev->mutex);
 
@@ -2666,7 +2709,6 @@
 static struct i2c_driver mxt_driver = {
 	.driver = {
 		.name	= "atmel_mxt_ts",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(mxt_of_match),
 		.acpi_match_table = ACPI_PTR(mxt_acpi_id),
 		.pm	= &mxt_pm_ops,
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
index 40e02dd..38c06f7 100644
--- a/drivers/input/touchscreen/auo-pixcir-ts.c
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -686,7 +686,6 @@
 
 static struct i2c_driver auo_pixcir_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "auo_pixcir_ts",
 		.pm	= &auo_pixcir_pm_ops,
 		.of_match_table	= of_match_ptr(auo_pixcir_ts_dt_idtable),
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index b9b5dda..931417e 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -716,7 +716,6 @@
 static struct i2c_driver bu21013_driver = {
 	.driver	= {
 		.name	=	DRIVER_TP,
-		.owner	=	THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	=	&bu21013_dev_pm_ops,
 #endif
diff --git a/drivers/input/touchscreen/chipone_icn8318.c b/drivers/input/touchscreen/chipone_icn8318.c
index 32e9db0..22a6fea 100644
--- a/drivers/input/touchscreen/chipone_icn8318.c
+++ b/drivers/input/touchscreen/chipone_icn8318.c
@@ -300,7 +300,6 @@
 
 static struct i2c_driver icn8318_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "chipone_icn8318",
 		.pm	= &icn8318_pm_ops,
 		.of_match_table = icn8318_of_match,
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index f2119ee..cc1d135 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -347,7 +347,6 @@
 
 static struct i2c_driver cy8ctmg110_driver = {
 	.driver		= {
-		.owner	= THIS_MODULE,
 		.name	= CY8CTMG110_DRIVER_NAME,
 		.pm	= &cy8ctmg110_pm,
 	},
diff --git a/drivers/input/touchscreen/cyttsp4_i2c.c b/drivers/input/touchscreen/cyttsp4_i2c.c
index 8e2012c..9a323dd 100644
--- a/drivers/input/touchscreen/cyttsp4_i2c.c
+++ b/drivers/input/touchscreen/cyttsp4_i2c.c
@@ -74,7 +74,6 @@
 static struct i2c_driver cyttsp4_i2c_driver = {
 	.driver = {
 		.name	= CYTTSP4_I2C_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &cyttsp4_pm_ops,
 	},
 	.probe		= cyttsp4_i2c_probe,
diff --git a/drivers/input/touchscreen/cyttsp_i2c.c b/drivers/input/touchscreen/cyttsp_i2c.c
index 63104a8..519e2de 100644
--- a/drivers/input/touchscreen/cyttsp_i2c.c
+++ b/drivers/input/touchscreen/cyttsp_i2c.c
@@ -74,7 +74,6 @@
 static struct i2c_driver cyttsp_i2c_driver = {
 	.driver = {
 		.name	= CY_I2C_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= &cyttsp_pm_ops,
 	},
 	.probe		= cyttsp_i2c_probe,
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 394b1de..48de1e8 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1041,7 +1041,7 @@
 			     0, tsdata->num_y * 64 - 1, 0, 0);
 
 	if (!pdata)
-		touchscreen_parse_of_params(input, true);
+		touchscreen_parse_properties(input, true);
 
 	error = input_mt_init_slots(input, MAX_SUPPORT_POINTS, INPUT_MT_DIRECT);
 	if (error) {
@@ -1134,7 +1134,6 @@
 
 static struct i2c_driver edt_ft5x06_ts_driver = {
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "edt_ft5x06",
 		.of_match_table = of_match_ptr(edt_ft5x06_of_match),
 		.pm = &edt_ft5x06_ts_pm_ops,
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 4c56299..1afc08b 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -264,11 +264,11 @@
 	{ .compatible = "eeti,egalax_ts" },
 	{ /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, egalax_ts_dt_ids);
 
 static struct i2c_driver egalax_ts_driver = {
 	.driver = {
 		.name	= "egalax_ts",
-		.owner	= THIS_MODULE,
 		.pm	= &egalax_ts_pm_ops,
 		.of_match_table	= egalax_ts_dt_ids,
 	},
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 0efd766..ddac134 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -38,6 +38,8 @@
 #include <linux/input/mt.h>
 #include <linux/acpi.h>
 #include <linux/of.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
 #include <asm/unaligned.h>
 
 /* Device, Driver information */
@@ -102,6 +104,9 @@
 /* calibration timeout definition */
 #define ELAN_CALI_TIMEOUT_MSEC	10000
 
+#define ELAN_POWERON_DELAY_USEC	500
+#define ELAN_RESET_DELAY_MSEC	20
+
 enum elants_state {
 	ELAN_STATE_NORMAL,
 	ELAN_WAIT_QUEUE_HEADER,
@@ -118,6 +123,10 @@
 	struct i2c_client *client;
 	struct input_dev *input;
 
+	struct regulator *vcc33;
+	struct regulator *vccio;
+	struct gpio_desc *reset_gpio;
+
 	u16 fw_version;
 	u8 test_version;
 	u8 solution_version;
@@ -141,6 +150,7 @@
 	u8 buf[MAX_PACKET_SIZE];
 
 	bool wake_irq_enabled;
+	bool keep_power_in_suspend;
 };
 
 static int elants_i2c_send(struct i2c_client *client,
@@ -605,6 +615,7 @@
 	const u8 enter_iap[] = { 0x45, 0x49, 0x41, 0x50 };
 	const u8 enter_iap2[] = { 0x54, 0x00, 0x12, 0x34 };
 	const u8 iap_ack[] = { 0x55, 0xaa, 0x33, 0xcc };
+	const u8 close_idle[] = {0x54, 0x2c, 0x01, 0x01};
 	u8 buf[HEADER_SIZE];
 	u16 send_id;
 	int page, n_fw_pages;
@@ -617,8 +628,13 @@
 	} else {
 		/* Start IAP Procedure */
 		dev_dbg(&client->dev, "Normal IAP procedure\n");
+		/* Close idle mode */
+		error = elants_i2c_send(client, close_idle, sizeof(close_idle));
+		if (error)
+			dev_err(&client->dev, "Failed close idle: %d\n", error);
+		msleep(60);
 		elants_i2c_sw_reset(client);
-
+		msleep(20);
 		error = elants_i2c_send(client, enter_iap, sizeof(enter_iap));
 	}
 
@@ -1052,6 +1068,67 @@
 	sysfs_remove_group(&ts->client->dev.kobj, &elants_attribute_group);
 }
 
+static int elants_i2c_power_on(struct elants_data *ts)
+{
+	int error;
+
+	/*
+	 * If we do not have reset gpio assume platform firmware
+	 * controls regulators and does power them on for us.
+	 */
+	if (IS_ERR_OR_NULL(ts->reset_gpio))
+		return 0;
+
+	gpiod_set_value_cansleep(ts->reset_gpio, 1);
+
+	error = regulator_enable(ts->vcc33);
+	if (error) {
+		dev_err(&ts->client->dev,
+			"failed to enable vcc33 regulator: %d\n",
+			error);
+		goto release_reset_gpio;
+	}
+
+	error = regulator_enable(ts->vccio);
+	if (error) {
+		dev_err(&ts->client->dev,
+			"failed to enable vccio regulator: %d\n",
+			error);
+		regulator_disable(ts->vcc33);
+		goto release_reset_gpio;
+	}
+
+	/*
+	 * We need to wait a bit after powering on controller before
+	 * we are allowed to release reset GPIO.
+	 */
+	udelay(ELAN_POWERON_DELAY_USEC);
+
+release_reset_gpio:
+	gpiod_set_value_cansleep(ts->reset_gpio, 0);
+	if (error)
+		return error;
+
+	msleep(ELAN_RESET_DELAY_MSEC);
+
+	return 0;
+}
+
+static void elants_i2c_power_off(void *_data)
+{
+	struct elants_data *ts = _data;
+
+	if (!IS_ERR_OR_NULL(ts->reset_gpio)) {
+		/*
+		 * Activate reset gpio to prevent leakage through the
+		 * pin once we shut off power to the controller.
+		 */
+		gpiod_set_value_cansleep(ts->reset_gpio, 1);
+		regulator_disable(ts->vccio);
+		regulator_disable(ts->vcc33);
+	}
+}
+
 static int elants_i2c_probe(struct i2c_client *client,
 			    const struct i2c_device_id *id)
 {
@@ -1066,13 +1143,6 @@
 		return -ENXIO;
 	}
 
-	/* Make sure there is something at this address */
-	if (i2c_smbus_xfer(client->adapter, client->addr, 0,
-			I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy) < 0) {
-		dev_err(&client->dev, "nothing at this address\n");
-		return -ENXIO;
-	}
-
 	ts = devm_kzalloc(&client->dev, sizeof(struct elants_data), GFP_KERNEL);
 	if (!ts)
 		return -ENOMEM;
@@ -1083,6 +1153,62 @@
 	ts->client = client;
 	i2c_set_clientdata(client, ts);
 
+	ts->vcc33 = devm_regulator_get(&client->dev, "vcc33");
+	if (IS_ERR(ts->vcc33)) {
+		error = PTR_ERR(ts->vcc33);
+		if (error != -EPROBE_DEFER)
+			dev_err(&client->dev,
+				"Failed to get 'vcc33' regulator: %d\n",
+				error);
+		return error;
+	}
+
+	ts->vccio = devm_regulator_get(&client->dev, "vccio");
+	if (IS_ERR(ts->vccio)) {
+		error = PTR_ERR(ts->vccio);
+		if (error != -EPROBE_DEFER)
+			dev_err(&client->dev,
+				"Failed to get 'vccio' regulator: %d\n",
+				error);
+		return error;
+	}
+
+	ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_LOW);
+	if (IS_ERR(ts->reset_gpio)) {
+		error = PTR_ERR(ts->reset_gpio);
+
+		if (error == -EPROBE_DEFER)
+			return error;
+
+		if (error != -ENOENT && error != -ENOSYS) {
+			dev_err(&client->dev,
+				"failed to get reset gpio: %d\n",
+				error);
+			return error;
+		}
+
+		ts->keep_power_in_suspend = true;
+	}
+
+	error = elants_i2c_power_on(ts);
+	if (error)
+		return error;
+
+	error = devm_add_action(&client->dev, elants_i2c_power_off, ts);
+	if (error) {
+		dev_err(&client->dev,
+			"failed to install power off action: %d\n", error);
+		elants_i2c_power_off(ts);
+		return error;
+	}
+
+	/* Make sure there is something at this address */
+	if (i2c_smbus_xfer(client->adapter, client->addr, 0,
+			   I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE, &dummy) < 0) {
+		dev_err(&client->dev, "nothing at this address\n");
+		return -ENXIO;
+	}
+
 	error = elants_i2c_initialize(ts);
 	if (error) {
 		dev_err(&client->dev, "failed to initialize: %d\n", error);
@@ -1190,18 +1316,24 @@
 
 	disable_irq(client->irq);
 
-	for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
-		error = elants_i2c_send(client, set_sleep_cmd,
-					sizeof(set_sleep_cmd));
-		if (!error)
-			break;
+	if (device_may_wakeup(dev) || ts->keep_power_in_suspend) {
+		for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+			error = elants_i2c_send(client, set_sleep_cmd,
+						sizeof(set_sleep_cmd));
+			if (!error)
+				break;
 
-		dev_err(&client->dev, "suspend command failed: %d\n", error);
+			dev_err(&client->dev,
+				"suspend command failed: %d\n", error);
+		}
+
+		if (device_may_wakeup(dev))
+			ts->wake_irq_enabled =
+					(enable_irq_wake(client->irq) == 0);
+	} else {
+		elants_i2c_power_off(ts);
 	}
 
-	if (device_may_wakeup(dev))
-		ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0);
-
 	return 0;
 }
 
@@ -1216,13 +1348,19 @@
 	if (device_may_wakeup(dev) && ts->wake_irq_enabled)
 		disable_irq_wake(client->irq);
 
-	for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
-		error = elants_i2c_send(client, set_active_cmd,
-					sizeof(set_active_cmd));
-		if (!error)
-			break;
+	if (ts->keep_power_in_suspend) {
+		for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
+			error = elants_i2c_send(client, set_active_cmd,
+						sizeof(set_active_cmd));
+			if (!error)
+				break;
 
-		dev_err(&client->dev, "resume command failed: %d\n", error);
+			dev_err(&client->dev,
+				"resume command failed: %d\n", error);
+		}
+	} else {
+		elants_i2c_power_on(ts);
+		elants_i2c_initialize(ts);
 	}
 
 	ts->state = ELAN_STATE_NORMAL;
@@ -1261,10 +1399,10 @@
 	.id_table = elants_i2c_id,
 	.driver = {
 		.name = DEVICE_NAME,
-		.owner = THIS_MODULE,
 		.pm = &elants_i2c_pm_ops,
 		.acpi_match_table = ACPI_PTR(elants_acpi_id),
 		.of_match_table = of_match_ptr(elants_of_match),
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
 	},
 };
 module_i2c_driver(elants_i2c_driver);
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index e36162b..4d113c9 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -420,6 +420,7 @@
 	{ "GDIX1001:00", 0 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
 
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id goodix_acpi_match[] = {
@@ -448,7 +449,6 @@
 	.id_table = goodix_ts_id,
 	.driver = {
 		.name = "Goodix-TS",
-		.owner = THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(goodix_acpi_match),
 		.of_match_table = of_match_ptr(goodix_of_match),
 	},
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index da6dc81..ddf694b 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -216,7 +216,7 @@
 	/* get panel info */
 	error = ili210x_read_reg(client, REG_PANEL_INFO, &panel, sizeof(panel));
 	if (error) {
-		dev_err(dev, "Failed to get panel informations, err: %d\n",
+		dev_err(dev, "Failed to get panel information, err: %d\n",
 			error);
 		return error;
 	}
@@ -276,7 +276,7 @@
 
 	error = input_register_device(priv->input);
 	if (error) {
-		dev_err(dev, "Cannot regiser input device, err: %d\n", error);
+		dev_err(dev, "Cannot register input device, err: %d\n", error);
 		goto err_remove_sysfs;
 	}
 
@@ -343,7 +343,6 @@
 static struct i2c_driver ili210x_ts_driver = {
 	.driver = {
 		.name = "ili210x_i2c",
-		.owner = THIS_MODULE,
 		.pm = &ili210x_i2c_pm,
 	},
 	.id_table = ili210x_i2c_id,
diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c
index a68ec14..82079cd 100644
--- a/drivers/input/touchscreen/max11801_ts.c
+++ b/drivers/input/touchscreen/max11801_ts.c
@@ -229,7 +229,6 @@
 static struct i2c_driver max11801_ts_driver = {
 	.driver = {
 		.name	= "max11801_ts",
-		.owner	= THIS_MODULE,
 	},
 	.id_table	= max11801_ts_id,
 	.probe		= max11801_ts_probe,
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 67c0d31..7cce876 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -572,12 +572,12 @@
 	{ .compatible = "melfas,mms114" },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, mms114_dt_match);
 #endif
 
 static struct i2c_driver mms114_driver = {
 	.driver = {
 		.name	= "mms114",
-		.owner	= THIS_MODULE,
 		.pm	= &mms114_pm_ops,
 		.of_match_table = of_match_ptr(mms114_dt_match),
 	},
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
index 806cd0a..bb6f2fe 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -9,12 +9,12 @@
  *
  */
 
-#include <linux/of.h>
+#include <linux/property.h>
 #include <linux/input.h>
 #include <linux/input/mt.h>
 #include <linux/input/touchscreen.h>
 
-static bool touchscreen_get_prop_u32(struct device_node *np,
+static bool touchscreen_get_prop_u32(struct device *dev,
 				     const char *property,
 				     unsigned int default_value,
 				     unsigned int *value)
@@ -22,7 +22,7 @@
 	u32 val;
 	int error;
 
-	error = of_property_read_u32(np, property, &val);
+	error = device_property_read_u32(dev, property, &val);
 	if (error) {
 		*value = default_value;
 		return false;
@@ -39,13 +39,9 @@
 	struct input_absinfo *absinfo;
 
 	if (!test_bit(axis, dev->absbit)) {
-		/*
-		 * Emit a warning only if the axis is not a multitouch
-		 * axis, which might not be set by the driver.
-		 */
-		if (!input_is_mt_axis(axis))
-			dev_warn(&dev->dev,
-				 "DT specifies parameters but the axis is not set up\n");
+		dev_warn(&dev->dev,
+			 "DT specifies parameters but the axis %lu is not set up\n",
+			 axis);
 		return;
 	}
 
@@ -55,52 +51,58 @@
 }
 
 /**
- * touchscreen_parse_of_params - parse common touchscreen DT properties
- * @dev: device that should be parsed
+ * touchscreen_parse_properties - parse common touchscreen DT properties
+ * @input: input device that should be parsed
+ * @multitouch: specifies whether parsed properties should be applied to
+ *	single-touch or multi-touch axes
  *
  * This function parses common DT properties for touchscreens and setups the
- * input device accordingly. The function keeps previously setuped default
+ * input device accordingly. The function keeps previously set up default
  * values if no value is specified via DT.
  */
-void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch)
+void touchscreen_parse_properties(struct input_dev *input, bool multitouch)
 {
-	struct device_node *np = dev->dev.parent->of_node;
+	struct device *dev = input->dev.parent;
 	unsigned int axis;
 	unsigned int maximum, fuzz;
 	bool data_present;
 
-	input_alloc_absinfo(dev);
-	if (!dev->absinfo)
+	input_alloc_absinfo(input);
+	if (!input->absinfo)
 		return;
 
 	axis = multitouch ? ABS_MT_POSITION_X : ABS_X;
-	data_present = touchscreen_get_prop_u32(np, "touchscreen-size-x",
-						input_abs_get_max(dev, axis),
+	data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-x",
+						input_abs_get_max(input,
+								  axis) + 1,
 						&maximum) |
-		       touchscreen_get_prop_u32(np, "touchscreen-fuzz-x",
-						input_abs_get_fuzz(dev, axis),
+		       touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
+						input_abs_get_fuzz(input, axis),
 						&fuzz);
 	if (data_present)
-		touchscreen_set_params(dev, axis, maximum, fuzz);
+		touchscreen_set_params(input, axis, maximum - 1, fuzz);
 
 	axis = multitouch ? ABS_MT_POSITION_Y : ABS_Y;
-	data_present = touchscreen_get_prop_u32(np, "touchscreen-size-y",
-						input_abs_get_max(dev, axis),
+	data_present = touchscreen_get_prop_u32(dev, "touchscreen-size-y",
+						input_abs_get_max(input,
+								  axis) + 1,
 						&maximum) |
-		       touchscreen_get_prop_u32(np, "touchscreen-fuzz-y",
-						input_abs_get_fuzz(dev, axis),
+		       touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
+						input_abs_get_fuzz(input, axis),
 						&fuzz);
 	if (data_present)
-		touchscreen_set_params(dev, axis, maximum, fuzz);
+		touchscreen_set_params(input, axis, maximum - 1, fuzz);
 
 	axis = multitouch ? ABS_MT_PRESSURE : ABS_PRESSURE;
-	data_present = touchscreen_get_prop_u32(np, "touchscreen-max-pressure",
-						input_abs_get_max(dev, axis),
+	data_present = touchscreen_get_prop_u32(dev,
+						"touchscreen-max-pressure",
+						input_abs_get_max(input, axis),
 						&maximum) |
-		       touchscreen_get_prop_u32(np, "touchscreen-fuzz-pressure",
-						input_abs_get_fuzz(dev, axis),
+		       touchscreen_get_prop_u32(dev,
+						"touchscreen-fuzz-pressure",
+						input_abs_get_fuzz(input, axis),
 						&fuzz);
 	if (data_present)
-		touchscreen_set_params(dev, axis, maximum, fuzz);
+		touchscreen_set_params(input, axis, maximum, fuzz);
 }
-EXPORT_SYMBOL(touchscreen_parse_of_params);
+EXPORT_SYMBOL(touchscreen_parse_properties);
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 8f3e243..9162172 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -24,20 +24,23 @@
 #include <linux/i2c.h>
 #include <linux/input.h>
 #include <linux/input/mt.h>
-#include <linux/input/pixcir_ts.h>
+#include <linux/input/touchscreen.h>
 #include <linux/gpio.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
+/*#include <linux/of.h>*/
 #include <linux/of_device.h>
+#include <linux/platform_data/pixcir_i2c_ts.h>
 
 #define PIXCIR_MAX_SLOTS       5 /* Max fingers supported by driver */
 
 struct pixcir_i2c_ts_data {
 	struct i2c_client *client;
 	struct input_dev *input;
-	const struct pixcir_ts_platform_data *pdata;
-	bool running;
+	struct gpio_desc *gpio_attb;
+	struct gpio_desc *gpio_reset;
+	const struct pixcir_i2c_chip_data *chip;
 	int max_fingers;	/* Max fingers supported in this instance */
+	bool running;
 };
 
 struct pixcir_touch {
@@ -60,7 +63,7 @@
 	u8 touch;
 	int ret, i;
 	int readsize;
-	const struct pixcir_i2c_chip_data *chip = &tsdata->pdata->chip;
+	const struct pixcir_i2c_chip_data *chip = tsdata->chip;
 
 	memset(report, 0, sizeof(struct pixcir_report_data));
 
@@ -113,13 +116,13 @@
 	struct pixcir_touch *touch;
 	int n, i, slot;
 	struct device *dev = &ts->client->dev;
-	const struct pixcir_i2c_chip_data *chip = &ts->pdata->chip;
+	const struct pixcir_i2c_chip_data *chip = ts->chip;
 
 	n = report->num_touches;
 	if (n > PIXCIR_MAX_SLOTS)
 		n = PIXCIR_MAX_SLOTS;
 
-	if (!chip->has_hw_ids) {
+	if (!ts->chip->has_hw_ids) {
 		for (i = 0; i < n; i++) {
 			touch = &report->touches[i];
 			pos[i].x = touch->x;
@@ -161,7 +164,6 @@
 static irqreturn_t pixcir_ts_isr(int irq, void *dev_id)
 {
 	struct pixcir_i2c_ts_data *tsdata = dev_id;
-	const struct pixcir_ts_platform_data *pdata = tsdata->pdata;
 	struct pixcir_report_data report;
 
 	while (tsdata->running) {
@@ -171,7 +173,7 @@
 		/* report it */
 		pixcir_ts_report(tsdata, &report);
 
-		if (gpio_get_value(pdata->gpio_attb)) {
+		if (gpiod_get_value_cansleep(tsdata->gpio_attb)) {
 			if (report.num_touches) {
 				/*
 				 * Last report with no finger up?
@@ -189,6 +191,17 @@
 	return IRQ_HANDLED;
 }
 
+static void pixcir_reset(struct pixcir_i2c_ts_data *tsdata)
+{
+	if (!IS_ERR_OR_NULL(tsdata->gpio_reset)) {
+		gpiod_set_value_cansleep(tsdata->gpio_reset, 1);
+		ndelay(100);	/* datasheet section 1.2.3 says 80ns min. */
+		gpiod_set_value_cansleep(tsdata->gpio_reset, 0);
+		/* wait for controller ready. 100ms guess. */
+		msleep(100);
+	}
+}
+
 static int pixcir_set_power_mode(struct pixcir_i2c_ts_data *ts,
 				 enum pixcir_power_mode mode)
 {
@@ -411,85 +424,59 @@
 #ifdef CONFIG_OF
 static const struct of_device_id pixcir_of_match[];
 
-static struct pixcir_ts_platform_data *pixcir_parse_dt(struct device *dev)
+static int pixcir_parse_dt(struct device *dev,
+			   struct pixcir_i2c_ts_data *tsdata)
 {
-	struct pixcir_ts_platform_data *pdata;
-	struct device_node *np = dev->of_node;
 	const struct of_device_id *match;
 
 	match = of_match_device(of_match_ptr(pixcir_of_match), dev);
 	if (!match)
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 
-	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata)
-		return ERR_PTR(-ENOMEM);
+	tsdata->chip = (const struct pixcir_i2c_chip_data *)match->data;
+	if (!tsdata->chip)
+		return -EINVAL;
 
-	pdata->chip = *(const struct pixcir_i2c_chip_data *)match->data;
-
-	pdata->gpio_attb = of_get_named_gpio(np, "attb-gpio", 0);
-	/* gpio_attb validity is checked in probe */
-
-	if (of_property_read_u32(np, "touchscreen-size-x", &pdata->x_max)) {
-		dev_err(dev, "Failed to get touchscreen-size-x property\n");
-		return ERR_PTR(-EINVAL);
-	}
-	pdata->x_max -= 1;
-
-	if (of_property_read_u32(np, "touchscreen-size-y", &pdata->y_max)) {
-		dev_err(dev, "Failed to get touchscreen-size-y property\n");
-		return ERR_PTR(-EINVAL);
-	}
-	pdata->y_max -= 1;
-
-	dev_dbg(dev, "%s: x %d, y %d, gpio %d\n", __func__,
-		pdata->x_max + 1, pdata->y_max + 1, pdata->gpio_attb);
-
-	return pdata;
+	return 0;
 }
 #else
-static struct pixcir_ts_platform_data *pixcir_parse_dt(struct device *dev)
+static int pixcir_parse_dt(struct device *dev,
+			   struct pixcir_i2c_ts_data *tsdata)
 {
-	return ERR_PTR(-EINVAL);
+	return -EINVAL;
 }
 #endif
 
 static int pixcir_i2c_ts_probe(struct i2c_client *client,
-					 const struct i2c_device_id *id)
+			       const struct i2c_device_id *id)
 {
 	const struct pixcir_ts_platform_data *pdata =
 			dev_get_platdata(&client->dev);
 	struct device *dev = &client->dev;
-	struct device_node *np = dev->of_node;
 	struct pixcir_i2c_ts_data *tsdata;
 	struct input_dev *input;
 	int error;
 
-	if (np && !pdata) {
-		pdata = pixcir_parse_dt(dev);
-		if (IS_ERR(pdata))
-			return PTR_ERR(pdata);
-	}
+	tsdata = devm_kzalloc(dev, sizeof(*tsdata), GFP_KERNEL);
+	if (!tsdata)
+		return -ENOMEM;
 
-	if (!pdata) {
+	if (pdata) {
+		tsdata->chip = &pdata->chip;
+	} else if (dev->of_node) {
+		error = pixcir_parse_dt(dev, tsdata);
+		if (error)
+			return error;
+	} else {
 		dev_err(&client->dev, "platform data not defined\n");
 		return -EINVAL;
 	}
 
-	if (!gpio_is_valid(pdata->gpio_attb)) {
-		dev_err(dev, "Invalid gpio_attb in pdata\n");
+	if (!tsdata->chip->max_fingers) {
+		dev_err(dev, "Invalid max_fingers in chip data\n");
 		return -EINVAL;
 	}
 
-	if (!pdata->chip.max_fingers) {
-		dev_err(dev, "Invalid max_fingers in pdata\n");
-		return -EINVAL;
-	}
-
-	tsdata = devm_kzalloc(dev, sizeof(*tsdata), GFP_KERNEL);
-	if (!tsdata)
-		return -ENOMEM;
-
 	input = devm_input_allocate_device(dev);
 	if (!input) {
 		dev_err(dev, "Failed to allocate input device\n");
@@ -498,7 +485,6 @@
 
 	tsdata->client = client;
 	tsdata->input = input;
-	tsdata->pdata = pdata;
 
 	input->name = client->name;
 	input->id.bustype = BUS_I2C;
@@ -506,15 +492,21 @@
 	input->close = pixcir_input_close;
 	input->dev.parent = &client->dev;
 
-	__set_bit(EV_KEY, input->evbit);
-	__set_bit(EV_ABS, input->evbit);
-	__set_bit(BTN_TOUCH, input->keybit);
-	input_set_abs_params(input, ABS_X, 0, pdata->x_max, 0, 0);
-	input_set_abs_params(input, ABS_Y, 0, pdata->y_max, 0, 0);
-	input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
-	input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
+	if (pdata) {
+		input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
+		input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
+	} else {
+		input_set_capability(input, EV_ABS, ABS_MT_POSITION_X);
+		input_set_capability(input, EV_ABS, ABS_MT_POSITION_Y);
+		touchscreen_parse_properties(input, true);
+		if (!input_abs_get_max(input, ABS_MT_POSITION_X) ||
+		    !input_abs_get_max(input, ABS_MT_POSITION_Y)) {
+			dev_err(dev, "Touchscreen size is not specified\n");
+			return -EINVAL;
+		}
+	}
 
-	tsdata->max_fingers = tsdata->pdata->chip.max_fingers;
+	tsdata->max_fingers = tsdata->chip->max_fingers;
 	if (tsdata->max_fingers > PIXCIR_MAX_SLOTS) {
 		tsdata->max_fingers = PIXCIR_MAX_SLOTS;
 		dev_info(dev, "Limiting maximum fingers to %d\n",
@@ -530,10 +522,18 @@
 
 	input_set_drvdata(input, tsdata);
 
-	error = devm_gpio_request_one(dev, pdata->gpio_attb,
-				      GPIOF_DIR_IN, "pixcir_i2c_attb");
-	if (error) {
-		dev_err(dev, "Failed to request ATTB gpio\n");
+	tsdata->gpio_attb = devm_gpiod_get(dev, "attb", GPIOD_IN);
+	if (IS_ERR(tsdata->gpio_attb)) {
+		error = PTR_ERR(tsdata->gpio_attb);
+		dev_err(dev, "Failed to request ATTB gpio: %d\n", error);
+		return error;
+	}
+
+	tsdata->gpio_reset = devm_gpiod_get_optional(dev, "reset",
+						     GPIOD_OUT_LOW);
+	if (IS_ERR(tsdata->gpio_reset)) {
+		error = PTR_ERR(tsdata->gpio_reset);
+		dev_err(dev, "Failed to request RESET gpio: %d\n", error);
 		return error;
 	}
 
@@ -545,6 +545,8 @@
 		return error;
 	}
 
+	pixcir_reset(tsdata);
+
 	/* Always be in IDLE mode to save power, device supports auto wake */
 	error = pixcir_set_power_mode(tsdata, PIXCIR_POWER_IDLE);
 	if (error) {
@@ -602,7 +604,6 @@
 
 static struct i2c_driver pixcir_i2c_ts_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "pixcir_ts",
 		.pm	= &pixcir_dev_pm_ops,
 		.of_match_table = of_match_ptr(pixcir_of_match),
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 697e26e..e943678 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -296,7 +296,6 @@
 	.id_table	= st1232_ts_id,
 	.driver = {
 		.name	= ST1232_TS_NAME,
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(st1232_ts_dt_ids),
 		.pm	= &st1232_ts_pm_ops,
 	},
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 8be7b9b..3f11763 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -581,6 +581,7 @@
 	sur40->alloc_ctx = vb2_dma_sg_init_ctx(sur40->dev);
 	if (IS_ERR(sur40->alloc_ctx)) {
 		dev_err(sur40->dev, "Can't allocate buffer context");
+		error = PTR_ERR(sur40->alloc_ctx);
 		goto err_unreg_v4l2;
 	}
 
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index d8c025b..0f65d02 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -30,10 +30,11 @@
 #include <linux/delay.h>
 #include <linux/pm.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/tsc2005.h>
 #include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
 
 /*
  * The touchscreen interface operates as follows:
@@ -61,16 +62,24 @@
 #define TSC2005_CMD_12BIT		0x04
 
 /* control byte 0 */
-#define TSC2005_REG_READ		0x0001
-#define TSC2005_REG_PND0		0x0002
-#define TSC2005_REG_X			0x0000
-#define TSC2005_REG_Y			0x0008
-#define TSC2005_REG_Z1			0x0010
-#define TSC2005_REG_Z2			0x0018
-#define TSC2005_REG_TEMP_HIGH		0x0050
-#define TSC2005_REG_CFR0		0x0060
-#define TSC2005_REG_CFR1		0x0068
-#define TSC2005_REG_CFR2		0x0070
+#define TSC2005_REG_READ		0x01 /* R/W access */
+#define TSC2005_REG_PND0		0x02 /* Power Not Down Control */
+#define TSC2005_REG_X			(0x0 << 3)
+#define TSC2005_REG_Y			(0x1 << 3)
+#define TSC2005_REG_Z1			(0x2 << 3)
+#define TSC2005_REG_Z2			(0x3 << 3)
+#define TSC2005_REG_AUX			(0x4 << 3)
+#define TSC2005_REG_TEMP1		(0x5 << 3)
+#define TSC2005_REG_TEMP2		(0x6 << 3)
+#define TSC2005_REG_STATUS		(0x7 << 3)
+#define TSC2005_REG_AUX_HIGH		(0x8 << 3)
+#define TSC2005_REG_AUX_LOW		(0x9 << 3)
+#define TSC2005_REG_TEMP_HIGH		(0xA << 3)
+#define TSC2005_REG_TEMP_LOW		(0xB << 3)
+#define TSC2005_REG_CFR0		(0xC << 3)
+#define TSC2005_REG_CFR1		(0xD << 3)
+#define TSC2005_REG_CFR2		(0xE << 3)
+#define TSC2005_REG_CONV_FUNC		(0xF << 3)
 
 /* configuration register 0 */
 #define TSC2005_CFR0_PRECHARGE_276US	0x0040
@@ -112,20 +121,37 @@
 #define TSC2005_SPI_MAX_SPEED_HZ	10000000
 #define TSC2005_PENUP_TIME_MS		40
 
-struct tsc2005_spi_rd {
-	struct spi_transfer	spi_xfer;
-	u32			spi_tx;
-	u32			spi_rx;
+static const struct regmap_range tsc2005_writable_ranges[] = {
+	regmap_reg_range(TSC2005_REG_AUX_HIGH, TSC2005_REG_CFR2),
 };
 
+static const struct regmap_access_table tsc2005_writable_table = {
+	.yes_ranges = tsc2005_writable_ranges,
+	.n_yes_ranges = ARRAY_SIZE(tsc2005_writable_ranges),
+};
+
+static struct regmap_config tsc2005_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 16,
+	.reg_stride = 0x08,
+	.max_register = 0x78,
+	.read_flag_mask = TSC2005_REG_READ,
+	.write_flag_mask = TSC2005_REG_PND0,
+	.wr_table = &tsc2005_writable_table,
+	.use_single_rw = true,
+};
+
+struct tsc2005_data {
+	u16 x;
+	u16 y;
+	u16 z1;
+	u16 z2;
+} __packed;
+#define TSC2005_DATA_REGS 4
+
 struct tsc2005 {
 	struct spi_device	*spi;
-
-	struct spi_message      spi_read_msg;
-	struct tsc2005_spi_rd	spi_x;
-	struct tsc2005_spi_rd	spi_y;
-	struct tsc2005_spi_rd	spi_z1;
-	struct tsc2005_spi_rd	spi_z2;
+	struct regmap		*regmap;
 
 	struct input_dev	*idev;
 	char			phys[32];
@@ -154,7 +180,7 @@
 
 	struct regulator	*vio;
 
-	int			reset_gpio;
+	struct gpio_desc	*reset_gpio;
 	void			(*set_reset)(bool enable);
 };
 
@@ -182,62 +208,6 @@
 	return 0;
 }
 
-static int tsc2005_write(struct tsc2005 *ts, u8 reg, u16 value)
-{
-	u32 tx = ((reg | TSC2005_REG_PND0) << 16) | value;
-	struct spi_transfer xfer = {
-		.tx_buf		= &tx,
-		.len		= 4,
-		.bits_per_word	= 24,
-	};
-	struct spi_message msg;
-	int error;
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&xfer, &msg);
-
-	error = spi_sync(ts->spi, &msg);
-	if (error) {
-		dev_err(&ts->spi->dev,
-			"%s: failed, register: %x, value: %x, error: %d\n",
-			__func__, reg, value, error);
-		return error;
-	}
-
-	return 0;
-}
-
-static void tsc2005_setup_read(struct tsc2005_spi_rd *rd, u8 reg, bool last)
-{
-	memset(rd, 0, sizeof(*rd));
-
-	rd->spi_tx		   = (reg | TSC2005_REG_READ) << 16;
-	rd->spi_xfer.tx_buf	   = &rd->spi_tx;
-	rd->spi_xfer.rx_buf	   = &rd->spi_rx;
-	rd->spi_xfer.len	   = 4;
-	rd->spi_xfer.bits_per_word = 24;
-	rd->spi_xfer.cs_change	   = !last;
-}
-
-static int tsc2005_read(struct tsc2005 *ts, u8 reg, u16 *value)
-{
-	struct tsc2005_spi_rd spi_rd;
-	struct spi_message msg;
-	int error;
-
-	tsc2005_setup_read(&spi_rd, reg, true);
-
-	spi_message_init(&msg);
-	spi_message_add_tail(&spi_rd.spi_xfer, &msg);
-
-	error = spi_sync(ts->spi, &msg);
-	if (error)
-		return error;
-
-	*value = spi_rd.spi_rx;
-	return 0;
-}
-
 static void tsc2005_update_pen_state(struct tsc2005 *ts,
 				     int x, int y, int pressure)
 {
@@ -266,26 +236,23 @@
 	struct tsc2005 *ts = _ts;
 	unsigned long flags;
 	unsigned int pressure;
-	u32 x, y;
-	u32 z1, z2;
+	struct tsc2005_data tsdata;
 	int error;
 
 	/* read the coordinates */
-	error = spi_sync(ts->spi, &ts->spi_read_msg);
+	error = regmap_bulk_read(ts->regmap, TSC2005_REG_X, &tsdata,
+				 TSC2005_DATA_REGS);
 	if (unlikely(error))
 		goto out;
 
-	x = ts->spi_x.spi_rx;
-	y = ts->spi_y.spi_rx;
-	z1 = ts->spi_z1.spi_rx;
-	z2 = ts->spi_z2.spi_rx;
-
 	/* validate position */
-	if (unlikely(x > MAX_12BIT || y > MAX_12BIT))
+	if (unlikely(tsdata.x > MAX_12BIT || tsdata.y > MAX_12BIT))
 		goto out;
 
 	/* Skip reading if the pressure components are out of range */
-	if (unlikely(z1 == 0 || z2 > MAX_12BIT || z1 >= z2))
+	if (unlikely(tsdata.z1 == 0 || tsdata.z2 > MAX_12BIT))
+		goto out;
+	if (unlikely(tsdata.z1 >= tsdata.z2))
 		goto out;
 
        /*
@@ -293,8 +260,8 @@
 	* the value before pen-up - that implies SPI fed us stale data
 	*/
 	if (!ts->pen_down &&
-	    ts->in_x == x && ts->in_y == y &&
-	    ts->in_z1 == z1 && ts->in_z2 == z2) {
+	    ts->in_x == tsdata.x && ts->in_y == tsdata.y &&
+	    ts->in_z1 == tsdata.z1 && ts->in_z2 == tsdata.z2) {
 		goto out;
 	}
 
@@ -302,20 +269,20 @@
 	 * At this point we are happy we have a valid and useful reading.
 	 * Remember it for later comparisons. We may now begin downsampling.
 	 */
-	ts->in_x = x;
-	ts->in_y = y;
-	ts->in_z1 = z1;
-	ts->in_z2 = z2;
+	ts->in_x = tsdata.x;
+	ts->in_y = tsdata.y;
+	ts->in_z1 = tsdata.z1;
+	ts->in_z2 = tsdata.z2;
 
 	/* Compute touch pressure resistance using equation #1 */
-	pressure = x * (z2 - z1) / z1;
+	pressure = tsdata.x * (tsdata.z2 - tsdata.z1) / tsdata.z1;
 	pressure = pressure * ts->x_plate_ohm / 4096;
 	if (unlikely(pressure > MAX_12BIT))
 		goto out;
 
 	spin_lock_irqsave(&ts->lock, flags);
 
-	tsc2005_update_pen_state(ts, x, y, pressure);
+	tsc2005_update_pen_state(ts, tsdata.x, tsdata.y, pressure);
 	mod_timer(&ts->penup_timer,
 		  jiffies + msecs_to_jiffies(TSC2005_PENUP_TIME_MS));
 
@@ -338,9 +305,9 @@
 
 static void tsc2005_start_scan(struct tsc2005 *ts)
 {
-	tsc2005_write(ts, TSC2005_REG_CFR0, TSC2005_CFR0_INITVALUE);
-	tsc2005_write(ts, TSC2005_REG_CFR1, TSC2005_CFR1_INITVALUE);
-	tsc2005_write(ts, TSC2005_REG_CFR2, TSC2005_CFR2_INITVALUE);
+	regmap_write(ts->regmap, TSC2005_REG_CFR0, TSC2005_CFR0_INITVALUE);
+	regmap_write(ts->regmap, TSC2005_REG_CFR1, TSC2005_CFR1_INITVALUE);
+	regmap_write(ts->regmap, TSC2005_REG_CFR2, TSC2005_CFR2_INITVALUE);
 	tsc2005_cmd(ts, TSC2005_CMD_NORMAL);
 }
 
@@ -351,8 +318,8 @@
 
 static void tsc2005_set_reset(struct tsc2005 *ts, bool enable)
 {
-	if (ts->reset_gpio >= 0)
-		gpio_set_value(ts->reset_gpio, enable);
+	if (ts->reset_gpio)
+		gpiod_set_value_cansleep(ts->reset_gpio, enable);
 	else if (ts->set_reset)
 		ts->set_reset(enable);
 }
@@ -388,11 +355,10 @@
 				     struct device_attribute *attr,
 				     char *buf)
 {
-	struct spi_device *spi = to_spi_device(dev);
-	struct tsc2005 *ts = spi_get_drvdata(spi);
-	u16 temp_high;
-	u16 temp_high_orig;
-	u16 temp_high_test;
+	struct tsc2005 *ts = dev_get_drvdata(dev);
+	unsigned int temp_high;
+	unsigned int temp_high_orig;
+	unsigned int temp_high_test;
 	bool success = true;
 	int error;
 
@@ -403,7 +369,7 @@
 	 */
 	__tsc2005_disable(ts);
 
-	error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high_orig);
+	error = regmap_read(ts->regmap, TSC2005_REG_TEMP_HIGH, &temp_high_orig);
 	if (error) {
 		dev_warn(dev, "selftest failed: read error %d\n", error);
 		success = false;
@@ -412,14 +378,14 @@
 
 	temp_high_test = (temp_high_orig - 1) & MAX_12BIT;
 
-	error = tsc2005_write(ts, TSC2005_REG_TEMP_HIGH, temp_high_test);
+	error = regmap_write(ts->regmap, TSC2005_REG_TEMP_HIGH, temp_high_test);
 	if (error) {
 		dev_warn(dev, "selftest failed: write error %d\n", error);
 		success = false;
 		goto out;
 	}
 
-	error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
+	error = regmap_read(ts->regmap, TSC2005_REG_TEMP_HIGH, &temp_high);
 	if (error) {
 		dev_warn(dev, "selftest failed: read error %d after write\n",
 			 error);
@@ -442,7 +408,7 @@
 		goto out;
 
 	/* test that the reset really happened */
-	error = tsc2005_read(ts, TSC2005_REG_TEMP_HIGH, &temp_high);
+	error = regmap_read(ts->regmap, TSC2005_REG_TEMP_HIGH, &temp_high);
 	if (error) {
 		dev_warn(dev, "selftest failed: read error %d after reset\n",
 			 error);
@@ -474,8 +440,7 @@
 				      struct attribute *attr, int n)
 {
 	struct device *dev = container_of(kobj, struct device, kobj);
-	struct spi_device *spi = to_spi_device(dev);
-	struct tsc2005 *ts = spi_get_drvdata(spi);
+	struct tsc2005 *ts = dev_get_drvdata(dev);
 	umode_t mode = attr->mode;
 
 	if (attr == &dev_attr_selftest.attr) {
@@ -495,7 +460,7 @@
 {
 	struct tsc2005 *ts = container_of(work, struct tsc2005, esd_work.work);
 	int error;
-	u16 r;
+	unsigned int r;
 
 	if (!mutex_trylock(&ts->mutex)) {
 		/*
@@ -511,7 +476,7 @@
 		goto out;
 
 	/* We should be able to read register without disabling interrupts. */
-	error = tsc2005_read(ts, TSC2005_REG_CFR0, &r);
+	error = regmap_read(ts->regmap, TSC2005_REG_CFR0, &r);
 	if (!error &&
 	    !((r ^ TSC2005_CFR0_INITVALUE) & TSC2005_CFR0_RW_MASK)) {
 		goto out;
@@ -575,20 +540,6 @@
 	mutex_unlock(&ts->mutex);
 }
 
-static void tsc2005_setup_spi_xfer(struct tsc2005 *ts)
-{
-	tsc2005_setup_read(&ts->spi_x, TSC2005_REG_X, false);
-	tsc2005_setup_read(&ts->spi_y, TSC2005_REG_Y, false);
-	tsc2005_setup_read(&ts->spi_z1, TSC2005_REG_Z1, false);
-	tsc2005_setup_read(&ts->spi_z2, TSC2005_REG_Z2, true);
-
-	spi_message_init(&ts->spi_read_msg);
-	spi_message_add_tail(&ts->spi_x.spi_xfer, &ts->spi_read_msg);
-	spi_message_add_tail(&ts->spi_y.spi_xfer, &ts->spi_read_msg);
-	spi_message_add_tail(&ts->spi_z1.spi_xfer, &ts->spi_read_msg);
-	spi_message_add_tail(&ts->spi_z2.spi_xfer, &ts->spi_read_msg);
-}
-
 static int tsc2005_probe(struct spi_device *spi)
 {
 	const struct tsc2005_platform_data *pdata = dev_get_platdata(&spi->dev);
@@ -653,38 +604,31 @@
 	ts->spi = spi;
 	ts->idev = input_dev;
 
+	ts->regmap = devm_regmap_init_spi(spi, &tsc2005_regmap_config);
+	if (IS_ERR(ts->regmap))
+		return PTR_ERR(ts->regmap);
+
 	ts->x_plate_ohm = x_plate_ohm;
 	ts->esd_timeout = esd_timeout;
 
-	if (np) {
-		ts->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
-		if (ts->reset_gpio == -EPROBE_DEFER)
-			return ts->reset_gpio;
-		if (ts->reset_gpio < 0) {
-			dev_err(&spi->dev, "error acquiring reset gpio: %d\n",
-				ts->reset_gpio);
-			return ts->reset_gpio;
-		}
-
-		error = devm_gpio_request_one(&spi->dev, ts->reset_gpio, 0,
-					      "reset-gpios");
-		if (error) {
-			dev_err(&spi->dev, "error requesting reset gpio: %d\n",
-				error);
-			return error;
-		}
-
-		ts->vio = devm_regulator_get(&spi->dev, "vio");
-		if (IS_ERR(ts->vio)) {
-			error = PTR_ERR(ts->vio);
-			dev_err(&spi->dev, "vio regulator missing (%d)", error);
-			return error;
-		}
-	} else {
-		ts->reset_gpio = -1;
-		ts->set_reset = pdata->set_reset;
+	ts->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset",
+						 GPIOD_OUT_HIGH);
+	if (IS_ERR(ts->reset_gpio)) {
+		error = PTR_ERR(ts->reset_gpio);
+		dev_err(&spi->dev, "error acquiring reset gpio: %d\n", error);
+		return error;
 	}
 
+	ts->vio = devm_regulator_get_optional(&spi->dev, "vio");
+	if (IS_ERR(ts->vio)) {
+		error = PTR_ERR(ts->vio);
+		dev_err(&spi->dev, "vio regulator missing (%d)", error);
+		return error;
+	}
+
+	if (!ts->reset_gpio && pdata)
+		ts->set_reset = pdata->set_reset;
+
 	mutex_init(&ts->mutex);
 
 	spin_lock_init(&ts->lock);
@@ -692,8 +636,6 @@
 
 	INIT_DELAYED_WORK(&ts->esd_work, tsc2005_esd_work);
 
-	tsc2005_setup_spi_xfer(ts);
-
 	snprintf(ts->phys, sizeof(ts->phys),
 		 "%s/input-ts", dev_name(&spi->dev));
 
@@ -709,7 +651,7 @@
 	input_set_abs_params(input_dev, ABS_PRESSURE, 0, max_p, fudge_p, 0);
 
 	if (np)
-		touchscreen_parse_of_params(input_dev, false);
+		touchscreen_parse_properties(input_dev, false);
 
 	input_dev->open = tsc2005_open;
 	input_dev->close = tsc2005_close;
@@ -735,7 +677,7 @@
 			return error;
 	}
 
-	spi_set_drvdata(spi, ts);
+	dev_set_drvdata(&spi->dev, ts);
 	error = sysfs_create_group(&spi->dev.kobj, &tsc2005_attr_group);
 	if (error) {
 		dev_err(&spi->dev,
@@ -763,7 +705,7 @@
 
 static int tsc2005_remove(struct spi_device *spi)
 {
-	struct tsc2005 *ts = spi_get_drvdata(spi);
+	struct tsc2005 *ts = dev_get_drvdata(&spi->dev);
 
 	sysfs_remove_group(&spi->dev.kobj, &tsc2005_attr_group);
 
@@ -775,8 +717,7 @@
 
 static int __maybe_unused tsc2005_suspend(struct device *dev)
 {
-	struct spi_device *spi = to_spi_device(dev);
-	struct tsc2005 *ts = spi_get_drvdata(spi);
+	struct tsc2005 *ts = dev_get_drvdata(dev);
 
 	mutex_lock(&ts->mutex);
 
@@ -792,8 +733,7 @@
 
 static int __maybe_unused tsc2005_resume(struct device *dev)
 {
-	struct spi_device *spi = to_spi_device(dev);
-	struct tsc2005 *ts = spi_get_drvdata(spi);
+	struct tsc2005 *ts = dev_get_drvdata(dev);
 
 	mutex_lock(&ts->mutex);
 
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index ccc8aa6..5d0cd51 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -482,7 +482,6 @@
 
 static struct i2c_driver tsc2007_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "tsc2007",
 		.of_match_table = of_match_ptr(tsc2007_of_match),
 	},
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index 32f8ac0..8d7a285 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -271,7 +271,6 @@
 static struct i2c_driver wacom_i2c_driver = {
 	.driver	= {
 		.name	= "wacom_i2c",
-		.owner	= THIS_MODULE,
 		.pm	= &wacom_i2c_pm,
 	},
 
diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c
index fb92ae1..515c20a 100644
--- a/drivers/input/touchscreen/wdt87xx_i2c.c
+++ b/drivers/input/touchscreen/wdt87xx_i2c.c
@@ -23,7 +23,7 @@
 #include <asm/unaligned.h>
 
 #define WDT87XX_NAME		"wdt87xx_i2c"
-#define WDT87XX_DRV_VER		"0.9.6"
+#define WDT87XX_DRV_VER		"0.9.7"
 #define WDT87XX_FW_NAME		"wdt87xx_fw.bin"
 #define WDT87XX_CFG_NAME	"wdt87xx_cfg.bin"
 
@@ -85,6 +85,11 @@
 #define CTL_PARAM_OFFSET_PHY_H		24
 #define CTL_PARAM_OFFSET_FACTOR		32
 
+/* The definition of the device descriptor */
+#define WDT_GD_DEVICE			1
+#define DEV_DESC_OFFSET_VID		8
+#define DEV_DESC_OFFSET_PID		10
+
 /* Communication commands */
 #define PACKET_SIZE			56
 #define VND_REQ_READ			0x06
@@ -152,6 +157,7 @@
 /* Controller requires minimum 300us between commands */
 #define WDT_COMMAND_DELAY_MS		2
 #define WDT_FLASH_WRITE_DELAY_MS	4
+#define WDT_FW_RESET_TIME		2500
 
 struct wdt87xx_sys_param {
 	u16	fw_id;
@@ -165,6 +171,8 @@
 	u16	scaling_factor;
 	u32	max_x;
 	u32	max_y;
+	u16	vendor_id;
+	u16	product_id;
 };
 
 struct wdt87xx_data {
@@ -208,6 +216,32 @@
 	return 0;
 }
 
+static int wdt87xx_get_desc(struct i2c_client *client, u8 desc_idx,
+			    u8 *buf, size_t len)
+{
+	u8 tx_buf[] = { 0x22, 0x00, 0x10, 0x0E, 0x23, 0x00 };
+	int error;
+
+	tx_buf[2] |= desc_idx & 0xF;
+
+	error = wdt87xx_i2c_xfer(client, tx_buf, sizeof(tx_buf),
+				 buf, len);
+	if (error) {
+		dev_err(&client->dev, "get desc failed: %d\n", error);
+		return error;
+	}
+
+	if (buf[0] != len) {
+		dev_err(&client->dev, "unexpected response to get desc: %d\n",
+			buf[0]);
+		return -EINVAL;
+	}
+
+	mdelay(WDT_COMMAND_DELAY_MS);
+
+	return 0;
+}
+
 static int wdt87xx_get_string(struct i2c_client *client, u8 str_idx,
 			      u8 *buf, size_t len)
 {
@@ -373,7 +407,7 @@
 	}
 
 	/* Wait the device to be ready */
-	msleep(200);
+	msleep(WDT_FW_RESET_TIME);
 
 	return 0;
 }
@@ -403,6 +437,15 @@
 	u8 buf[PKT_READ_SIZE];
 	int error;
 
+	error = wdt87xx_get_desc(client, WDT_GD_DEVICE, buf, 18);
+	if (error) {
+		dev_err(&client->dev, "failed to get device desc\n");
+		return error;
+	}
+
+	param->vendor_id = get_unaligned_le16(buf + DEV_DESC_OFFSET_VID);
+	param->product_id = get_unaligned_le16(buf + DEV_DESC_OFFSET_PID);
+
 	error = wdt87xx_get_string(client, STRIDX_PARAMETERS, buf, 34);
 	if (error) {
 		dev_err(&client->dev, "failed to get parameters\n");
@@ -994,6 +1037,8 @@
 
 	input->name = "WDT87xx Touchscreen";
 	input->id.bustype = BUS_I2C;
+	input->id.vendor = wdt->param.vendor_id;
+	input->id.product = wdt->param.product_id;
 	input->phys = wdt->phys;
 
 	input_set_abs_params(input, ABS_MT_POSITION_X, 0,
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index b1ae779..1534e9b 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -732,8 +732,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int wm97xx_suspend(struct device *dev, pm_message_t state)
+static int __maybe_unused wm97xx_suspend(struct device *dev)
 {
 	struct wm97xx *wm = dev_get_drvdata(dev);
 	u16 reg;
@@ -765,7 +764,7 @@
 	return 0;
 }
 
-static int wm97xx_resume(struct device *dev)
+static int __maybe_unused wm97xx_resume(struct device *dev)
 {
 	struct wm97xx *wm = dev_get_drvdata(dev);
 
@@ -799,10 +798,7 @@
 	return 0;
 }
 
-#else
-#define wm97xx_suspend		NULL
-#define wm97xx_resume		NULL
-#endif
+static SIMPLE_DEV_PM_OPS(wm97xx_pm_ops, wm97xx_suspend, wm97xx_resume);
 
 /*
  * Machine specific operations
@@ -836,8 +832,7 @@
 	.owner =	THIS_MODULE,
 	.probe =	wm97xx_probe,
 	.remove =	wm97xx_remove,
-	.suspend =	wm97xx_suspend,
-	.resume =	wm97xx_resume,
+	.pm =		&wm97xx_pm_ops,
 };
 
 static int __init wm97xx_init(void)
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index f58a196..781d0f8 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -24,14 +24,13 @@
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/device.h>
 #include <linux/sysfs.h>
 #include <linux/input/mt.h>
 #include <linux/platform_data/zforce_ts.h>
 #include <linux/regulator/consumer.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
 
 #define WAIT_TIMEOUT		msecs_to_jiffies(1000)
 
@@ -120,6 +119,9 @@
 
 	struct regulator	*reg_vdd;
 
+	struct gpio_desc	*gpio_int;
+	struct gpio_desc	*gpio_rst;
+
 	bool			suspending;
 	bool			suspended;
 	bool			boot_complete;
@@ -161,6 +163,16 @@
 	return 0;
 }
 
+static void zforce_reset_assert(struct zforce_ts *ts)
+{
+	gpiod_set_value_cansleep(ts->gpio_rst, 1);
+}
+
+static void zforce_reset_deassert(struct zforce_ts *ts)
+{
+	gpiod_set_value_cansleep(ts->gpio_rst, 0);
+}
+
 static int zforce_send_wait(struct zforce_ts *ts, const char *buf, int len)
 {
 	struct i2c_client *client = ts->client;
@@ -479,7 +491,6 @@
 {
 	struct zforce_ts *ts = dev_id;
 	struct i2c_client *client = ts->client;
-	const struct zforce_ts_platdata *pdata = ts->pdata;
 	int ret;
 	u8 payload_buffer[FRAME_MAXSIZE];
 	u8 *payload;
@@ -499,7 +510,16 @@
 	if (!ts->suspending && device_may_wakeup(&client->dev))
 		pm_stay_awake(&client->dev);
 
-	while (!gpio_get_value(pdata->gpio_int)) {
+	/*
+	 * Run at least once and exit the loop if
+	 * - the optional interrupt GPIO isn't specified
+	 *   (there is only one packet read per ISR invocation, then)
+	 * or
+	 * - the GPIO isn't active any more
+	 *   (packet read until the level GPIO indicates that there is
+	 *    no IRQ any more)
+	 */
+	do {
 		ret = zforce_read_packet(ts, payload_buffer);
 		if (ret < 0) {
 			dev_err(&client->dev,
@@ -566,7 +586,7 @@
 				payload[RESPONSE_ID]);
 			break;
 		}
-	}
+	} while (gpiod_get_value_cansleep(ts->gpio_int));
 
 	if (!ts->suspending && device_may_wakeup(&client->dev))
 		pm_relax(&client->dev);
@@ -690,7 +710,7 @@
 {
 	struct zforce_ts *ts = data;
 
-	gpio_set_value(ts->pdata->gpio_rst, 0);
+	zforce_reset_assert(ts);
 
 	udelay(10);
 
@@ -712,18 +732,6 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	pdata->gpio_int = of_get_gpio(np, 0);
-	if (!gpio_is_valid(pdata->gpio_int)) {
-		dev_err(dev, "failed to get interrupt gpio\n");
-		return ERR_PTR(-EINVAL);
-	}
-
-	pdata->gpio_rst = of_get_gpio(np, 1);
-	if (!gpio_is_valid(pdata->gpio_rst)) {
-		dev_err(dev, "failed to get reset gpio\n");
-		return ERR_PTR(-EINVAL);
-	}
-
 	if (of_property_read_u32(np, "x-size", &pdata->x_max)) {
 		dev_err(dev, "failed to get x-size property\n");
 		return ERR_PTR(-EINVAL);
@@ -755,20 +763,49 @@
 	if (!ts)
 		return -ENOMEM;
 
-	ret = devm_gpio_request_one(&client->dev, pdata->gpio_int, GPIOF_IN,
-				    "zforce_ts_int");
-	if (ret) {
-		dev_err(&client->dev, "request of gpio %d failed, %d\n",
-			pdata->gpio_int, ret);
+	ts->gpio_rst = devm_gpiod_get_optional(&client->dev, "reset",
+					       GPIOD_OUT_HIGH);
+	if (IS_ERR(ts->gpio_rst)) {
+		ret = PTR_ERR(ts->gpio_rst);
+		dev_err(&client->dev,
+			"failed to request reset GPIO: %d\n", ret);
 		return ret;
 	}
 
-	ret = devm_gpio_request_one(&client->dev, pdata->gpio_rst,
-				    GPIOF_OUT_INIT_LOW, "zforce_ts_rst");
-	if (ret) {
-		dev_err(&client->dev, "request of gpio %d failed, %d\n",
-			pdata->gpio_rst, ret);
-		return ret;
+	if (ts->gpio_rst) {
+		ts->gpio_int = devm_gpiod_get_optional(&client->dev, "irq",
+						       GPIOD_IN);
+		if (IS_ERR(ts->gpio_int)) {
+			ret = PTR_ERR(ts->gpio_int);
+			dev_err(&client->dev,
+				"failed to request interrupt GPIO: %d\n", ret);
+			return ret;
+		}
+	} else {
+		/*
+		 * Deprecated GPIO handling for compatibility
+		 * with legacy binding.
+		 */
+
+		/* INT GPIO */
+		ts->gpio_int = devm_gpiod_get_index(&client->dev, NULL, 0,
+						    GPIOD_IN);
+		if (IS_ERR(ts->gpio_int)) {
+			ret = PTR_ERR(ts->gpio_int);
+			dev_err(&client->dev,
+				"failed to request interrupt GPIO: %d\n", ret);
+			return ret;
+		}
+
+		/* RST GPIO */
+		ts->gpio_rst = devm_gpiod_get_index(&client->dev, NULL, 1,
+					    GPIOD_OUT_HIGH);
+		if (IS_ERR(ts->gpio_rst)) {
+			ret = PTR_ERR(ts->gpio_rst);
+			dev_err(&client->dev,
+				"failed to request reset GPIO: %d\n", ret);
+			return ret;
+		}
 	}
 
 	ts->reg_vdd = devm_regulator_get_optional(&client->dev, "vdd");
@@ -863,7 +900,7 @@
 	i2c_set_clientdata(client, ts);
 
 	/* let the controller boot */
-	gpio_set_value(pdata->gpio_rst, 1);
+	zforce_reset_deassert(ts);
 
 	ts->command_waiting = NOTIFICATION_BOOTCOMPLETE;
 	if (wait_for_completion_timeout(&ts->command_done, WAIT_TIMEOUT) == 0)
@@ -917,7 +954,6 @@
 
 static struct i2c_driver zforce_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "zforce-ts",
 		.pm	= &zforce_pm_ops,
 		.of_match_table	= of_match_ptr(zforce_dt_idtable),
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index f1fb1d3..f491aec 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -222,7 +222,7 @@
 	select IOMMU_API
 	help
 	  This driver supports the IOMMU hardware (SMMU) found on NVIDIA Tegra
-	  SoCs (Tegra30 up to Tegra132).
+	  SoCs (Tegra30 up to Tegra210).
 
 config EXYNOS_IOMMU
 	bool "Exynos IOMMU Support"
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0649b94..c82ebee 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -408,6 +408,10 @@
 	struct list_head global; /* link to global list */
 	u8 bus;			/* PCI bus number */
 	u8 devfn;		/* PCI devfn number */
+	struct {
+		u8 enabled:1;
+		u8 qdep;
+	} ats;			/* ATS state */
 	struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
 	struct intel_iommu *iommu; /* IOMMU used by this device */
 	struct dmar_domain *domain; /* pointer to domain */
@@ -1391,19 +1395,26 @@
 
 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
 {
+	struct pci_dev *pdev;
+
 	if (!info || !dev_is_pci(info->dev))
 		return;
 
-	pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
+	pdev = to_pci_dev(info->dev);
+	if (pci_enable_ats(pdev, VTD_PAGE_SHIFT))
+		return;
+
+	info->ats.enabled = 1;
+	info->ats.qdep = pci_ats_queue_depth(pdev);
 }
 
 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
 {
-	if (!info->dev || !dev_is_pci(info->dev) ||
-	    !pci_ats_enabled(to_pci_dev(info->dev)))
+	if (!info->ats.enabled)
 		return;
 
 	pci_disable_ats(to_pci_dev(info->dev));
+	info->ats.enabled = 0;
 }
 
 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
@@ -1415,16 +1426,11 @@
 
 	spin_lock_irqsave(&device_domain_lock, flags);
 	list_for_each_entry(info, &domain->devices, link) {
-		struct pci_dev *pdev;
-		if (!info->dev || !dev_is_pci(info->dev))
-			continue;
-
-		pdev = to_pci_dev(info->dev);
-		if (!pci_ats_enabled(pdev))
+		if (!info->ats.enabled)
 			continue;
 
 		sid = info->bus << 8 | info->devfn;
-		qdep = pci_ats_queue_depth(pdev);
+		qdep = info->ats.qdep;
 		qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
 	}
 	spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -2097,7 +2103,7 @@
 			sg_res = aligned_nrpages(sg->offset, sg->length);
 			sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
 			sg->dma_length = sg->length;
-			pteval = page_to_phys(sg_page(sg)) | prot;
+			pteval = (sg_phys(sg) & PAGE_MASK) | prot;
 			phys_pfn = pteval >> VTD_PAGE_SHIFT;
 		}
 
@@ -2275,6 +2281,8 @@
 
 	info->bus = bus;
 	info->devfn = devfn;
+	info->ats.enabled = 0;
+	info->ats.qdep = 0;
 	info->dev = dev;
 	info->domain = domain;
 	info->iommu = iommu;
@@ -3623,7 +3631,7 @@
 
 	for_each_sg(sglist, sg, nelems, i) {
 		BUG_ON(!sg_page(sg));
-		sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
+		sg->dma_address = sg_phys(sg);
 		sg->dma_length = sg->length;
 	}
 	return nelems;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f286090..049df49 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1408,7 +1408,7 @@
 	min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
 
 	for_each_sg(sg, s, nents, i) {
-		phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+		phys_addr_t phys = sg_phys(s);
 
 		/*
 		 * We are mapping on IOMMU page boundaries, so offset within
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 120d815..27b52c8 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -61,6 +61,10 @@
 	select MULTI_IRQ_HANDLER
 	select SPARSE_IRQ
 
+config I8259
+	bool
+	select IRQ_DOMAIN
+
 config BCM7038_L1_IRQ
 	bool
 	select GENERIC_IRQ_CHIP
@@ -177,3 +181,9 @@
 config RENESAS_H8S_INTC
         bool
 	select IRQ_DOMAIN
+
+config IMX_GPCV2
+	bool
+	select IRQ_DOMAIN
+	help
+	  Enables the wakeup IRQs for IMX platforms with GPCv2 block
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b8d4e96..bb3048f 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -1,6 +1,7 @@
 obj-$(CONFIG_IRQCHIP)			+= irqchip.o
 
 obj-$(CONFIG_ARCH_BCM2835)		+= irq-bcm2835.o
+obj-$(CONFIG_ARCH_BCM2835)		+= irq-bcm2836.o
 obj-$(CONFIG_ARCH_EXYNOS)		+= exynos-combiner.o
 obj-$(CONFIG_ARCH_HIP04)		+= irq-hip04.o
 obj-$(CONFIG_ARCH_MMP)			+= irq-mmp.o
@@ -22,11 +23,12 @@
 obj-$(CONFIG_ARM_GIC)			+= irq-gic.o irq-gic-common.o
 obj-$(CONFIG_ARM_GIC_V2M)		+= irq-gic-v2m.o
 obj-$(CONFIG_ARM_GIC_V3)		+= irq-gic-v3.o irq-gic-common.o
-obj-$(CONFIG_ARM_GIC_V3_ITS)		+= irq-gic-v3-its.o
+obj-$(CONFIG_ARM_GIC_V3_ITS)		+= irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o
 obj-$(CONFIG_ARM_NVIC)			+= irq-nvic.o
 obj-$(CONFIG_ARM_VIC)			+= irq-vic.o
 obj-$(CONFIG_ATMEL_AIC_IRQ)		+= irq-atmel-aic-common.o irq-atmel-aic.o
 obj-$(CONFIG_ATMEL_AIC5_IRQ)	+= irq-atmel-aic-common.o irq-atmel-aic5.o
+obj-$(CONFIG_I8259)			+= irq-i8259.o
 obj-$(CONFIG_IMGPDC_IRQ)		+= irq-imgpdc.o
 obj-$(CONFIG_IRQ_MIPS_CPU)		+= irq-mips-cpu.o
 obj-$(CONFIG_SIRF_IRQ)			+= irq-sirfsoc.o
@@ -52,3 +54,4 @@
 obj-$(CONFIG_RENESAS_H8S_INTC)		+= irq-renesas-h8s.o
 obj-$(CONFIG_ARCH_SA1100)		+= irq-sa11x0.o
 obj-$(CONFIG_INGENIC_IRQ)		+= irq-ingenic.o
+obj-$(CONFIG_IMX_GPCV2)			+= irq-imx-gpcv2.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index 5c82e3b..e9c6f2a 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -15,13 +15,12 @@
 #include <linux/slab.h>
 #include <linux/syscore_ops.h>
 #include <linux/irqdomain.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/interrupt.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 
-#include "irqchip.h"
-
 #define COMBINER_ENABLE_SET	0x0
 #define COMBINER_ENABLE_CLEAR	0x4
 #define COMBINER_INT_STATUS	0xC
@@ -66,10 +65,12 @@
 	__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
 }
 
-static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+static void combiner_handle_cascade_irq(unsigned int __irq,
+					struct irq_desc *desc)
 {
-	struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int irq = irq_desc_get_irq(desc);
 	unsigned int cascade_irq, combiner_irq;
 	unsigned long status;
 
@@ -122,9 +123,8 @@
 static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
 					unsigned int irq)
 {
-	if (irq_set_handler_data(irq, combiner_data) != 0)
-		BUG();
-	irq_set_chained_handler(irq, combiner_handle_cascade_irq);
+	irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
+					 combiner_data);
 }
 
 static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
@@ -185,14 +185,14 @@
 
 	combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
 	if (!combiner_data) {
-		pr_warning("%s: could not allocate combiner data\n", __func__);
+		pr_warn("%s: could not allocate combiner data\n", __func__);
 		return;
 	}
 
 	combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
 				&combiner_irq_domain_ops, combiner_data);
 	if (WARN_ON(!combiner_irq_domain)) {
-		pr_warning("%s: irq domain init failed\n", __func__);
+		pr_warn("%s: irq domain init failed\n", __func__);
 		return;
 	}
 
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 0d3b0fe..39b72da 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/cpu.h>
 #include <linux/io.h>
@@ -33,8 +34,6 @@
 #include <asm/smp_plat.h>
 #include <asm/mach/irq.h>
 
-#include "irqchip.h"
-
 /* Interrupt Controller Registers Map */
 #define ARMADA_370_XP_INT_SET_MASK_OFFS		(0x48)
 #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS	(0x4C)
@@ -451,7 +450,7 @@
 static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
 						  struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned long irqmap, irqn, irqsrc, cpuid;
 	unsigned int cascade_irq;
 
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index dae3604..8a0c7f2 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -19,6 +19,7 @@
 #include <linux/bitmap.h>
 #include <linux/types.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -31,7 +32,6 @@
 #include <asm/mach/irq.h>
 
 #include "irq-atmel-aic-common.h"
-#include "irqchip.h"
 
 /* Number of irq lines managed by AIC */
 #define NR_AIC_IRQS	32
@@ -225,7 +225,7 @@
 	aic_common_rtt_irq_fixup(root);
 }
 
-static const struct of_device_id __initdata aic_irq_fixups[] = {
+static const struct of_device_id aic_irq_fixups[] __initconst = {
 	{ .compatible = "atmel,at91rm9200", .data = at91rm9200_aic_irq_fixup },
 	{ .compatible = "atmel,at91sam9g45", .data = at91sam9g45_aic_irq_fixup },
 	{ .compatible = "atmel,at91sam9n12", .data = at91rm9200_aic_irq_fixup },
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 459bf44..9da9942 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -19,6 +19,7 @@
 #include <linux/bitmap.h>
 #include <linux/types.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -31,7 +32,6 @@
 #include <asm/mach/irq.h>
 
 #include "irq-atmel-aic-common.h"
-#include "irqchip.h"
 
 /* Number of irq lines managed by AIC */
 #define NR_AIC5_IRQS	128
@@ -290,7 +290,7 @@
 	aic_common_rtc_irq_fixup(root);
 }
 
-static const struct of_device_id __initdata aic5_irq_fixups[] = {
+static const struct of_device_id aic5_irq_fixups[] __initconst = {
 	{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
 	{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
 	{ /* sentinel */ },
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index e68c3b6..ed4ca9d 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -48,13 +48,12 @@
 #include <linux/slab.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 
 #include <asm/exception.h>
 #include <asm/mach/irq.h>
 
-#include "irqchip.h"
-
 /* Put the bank and irq (32 bits) into the hwirq */
 #define MAKE_HWIRQ(b, n)	((b << 5) | (n))
 #define HWIRQ_BANK(i)		(i >> 5)
@@ -76,10 +75,10 @@
 #define NR_BANKS		3
 #define IRQS_PER_BANK		32
 
-static int reg_pending[] __initconst = { 0x00, 0x04, 0x08 };
-static int reg_enable[] __initconst = { 0x18, 0x10, 0x14 };
-static int reg_disable[] __initconst = { 0x24, 0x1c, 0x20 };
-static int bank_irqs[] __initconst = { 8, 32, 32 };
+static const int reg_pending[] __initconst = { 0x00, 0x04, 0x08 };
+static const int reg_enable[] __initconst = { 0x18, 0x10, 0x14 };
+static const int reg_disable[] __initconst = { 0x24, 0x1c, 0x20 };
+static const int bank_irqs[] __initconst = { 8, 32, 32 };
 
 static const int shortcuts[] = {
 	7, 9, 10, 18, 19,		/* Bank 1 */
@@ -97,6 +96,7 @@
 static struct armctrl_ic intc __read_mostly;
 static void __exception_irq_entry bcm2835_handle_irq(
 	struct pt_regs *regs);
+static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc);
 
 static void armctrl_mask_irq(struct irq_data *d)
 {
@@ -140,7 +140,8 @@
 };
 
 static int __init armctrl_of_init(struct device_node *node,
-	struct device_node *parent)
+				  struct device_node *parent,
+				  bool is_2836)
 {
 	void __iomem *base;
 	int irq, b, i;
@@ -169,54 +170,90 @@
 		}
 	}
 
-	set_handle_irq(bcm2835_handle_irq);
+	if (is_2836) {
+		int parent_irq = irq_of_parse_and_map(node, 0);
+
+		if (!parent_irq) {
+			panic("%s: unable to get parent interrupt.\n",
+			      node->full_name);
+		}
+		irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq);
+	} else {
+		set_handle_irq(bcm2835_handle_irq);
+	}
+
 	return 0;
 }
 
+static int __init bcm2835_armctrl_of_init(struct device_node *node,
+					  struct device_node *parent)
+{
+	return armctrl_of_init(node, parent, false);
+}
+
+static int __init bcm2836_armctrl_of_init(struct device_node *node,
+					  struct device_node *parent)
+{
+	return armctrl_of_init(node, parent, true);
+}
+
+
 /*
  * Handle each interrupt across the entire interrupt controller.  This reads the
  * status register before handling each interrupt, which is necessary given that
  * handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
  */
 
-static void armctrl_handle_bank(int bank, struct pt_regs *regs)
+static u32 armctrl_translate_bank(int bank)
 {
-	u32 stat, irq;
+	u32 stat = readl_relaxed(intc.pending[bank]);
 
-	while ((stat = readl_relaxed(intc.pending[bank]))) {
-		irq = MAKE_HWIRQ(bank, ffs(stat) - 1);
-		handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
-	}
+	return MAKE_HWIRQ(bank, ffs(stat) - 1);
 }
 
-static void armctrl_handle_shortcut(int bank, struct pt_regs *regs,
-	u32 stat)
+static u32 armctrl_translate_shortcut(int bank, u32 stat)
 {
-	u32 irq = MAKE_HWIRQ(bank, shortcuts[ffs(stat >> SHORTCUT_SHIFT) - 1]);
-	handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
+	return MAKE_HWIRQ(bank, shortcuts[ffs(stat >> SHORTCUT_SHIFT) - 1]);
+}
+
+static u32 get_next_armctrl_hwirq(void)
+{
+	u32 stat = readl_relaxed(intc.pending[0]) & BANK0_VALID_MASK;
+
+	if (stat == 0)
+		return ~0;
+	else if (stat & BANK0_HWIRQ_MASK)
+		return MAKE_HWIRQ(0, ffs(stat & BANK0_HWIRQ_MASK) - 1);
+	else if (stat & SHORTCUT1_MASK)
+		return armctrl_translate_shortcut(1, stat & SHORTCUT1_MASK);
+	else if (stat & SHORTCUT2_MASK)
+		return armctrl_translate_shortcut(2, stat & SHORTCUT2_MASK);
+	else if (stat & BANK1_HWIRQ)
+		return armctrl_translate_bank(1);
+	else if (stat & BANK2_HWIRQ)
+		return armctrl_translate_bank(2);
+	else
+		BUG();
 }
 
 static void __exception_irq_entry bcm2835_handle_irq(
 	struct pt_regs *regs)
 {
-	u32 stat, irq;
+	u32 hwirq;
 
-	while ((stat = readl_relaxed(intc.pending[0]) & BANK0_VALID_MASK)) {
-		if (stat & BANK0_HWIRQ_MASK) {
-			irq = MAKE_HWIRQ(0, ffs(stat & BANK0_HWIRQ_MASK) - 1);
-			handle_IRQ(irq_linear_revmap(intc.domain, irq), regs);
-		} else if (stat & SHORTCUT1_MASK) {
-			armctrl_handle_shortcut(1, regs, stat & SHORTCUT1_MASK);
-		} else if (stat & SHORTCUT2_MASK) {
-			armctrl_handle_shortcut(2, regs, stat & SHORTCUT2_MASK);
-		} else if (stat & BANK1_HWIRQ) {
-			armctrl_handle_bank(1, regs);
-		} else if (stat & BANK2_HWIRQ) {
-			armctrl_handle_bank(2, regs);
-		} else {
-			BUG();
-		}
-	}
+	while ((hwirq = get_next_armctrl_hwirq()) != ~0)
+		handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
 }
 
-IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic", armctrl_of_init);
+static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc)
+{
+	u32 hwirq;
+
+	while ((hwirq = get_next_armctrl_hwirq()) != ~0)
+		generic_handle_irq(irq_linear_revmap(intc.domain, hwirq));
+}
+
+IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic",
+		bcm2835_armctrl_of_init);
+IRQCHIP_DECLARE(bcm2836_armctrl_ic, "brcm,bcm2836-armctrl-ic",
+		bcm2836_armctrl_of_init);
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
new file mode 100644
index 0000000..f687082
--- /dev/null
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -0,0 +1,275 @@
+/*
+ * Root interrupt controller for the BCM2836 (Raspberry Pi 2).
+ *
+ * Copyright 2015 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <asm/exception.h>
+
+/*
+ * The low 2 bits identify the CPU that the GPU IRQ goes to, and the
+ * next 2 bits identify the CPU that the GPU FIQ goes to.
+ */
+#define LOCAL_GPU_ROUTING		0x00c
+/* When setting bits 0-3, enables PMU interrupts on that CPU. */
+#define LOCAL_PM_ROUTING_SET		0x010
+/* When setting bits 0-3, disables PMU interrupts on that CPU. */
+#define LOCAL_PM_ROUTING_CLR		0x014
+/*
+ * The low 4 bits of this are the CPU's timer IRQ enables, and the
+ * next 4 bits are the CPU's timer FIQ enables (which override the IRQ
+ * bits).
+ */
+#define LOCAL_TIMER_INT_CONTROL0	0x040
+/*
+ * The low 4 bits of this are the CPU's per-mailbox IRQ enables, and
+ * the next 4 bits are the CPU's per-mailbox FIQ enables (which
+ * override the IRQ bits).
+ */
+#define LOCAL_MAILBOX_INT_CONTROL0	0x050
+/*
+ * The CPU's interrupt status register.  Bits are defined by the the
+ * LOCAL_IRQ_* bits below.
+ */
+#define LOCAL_IRQ_PENDING0		0x060
+/* Same status bits as above, but for FIQ. */
+#define LOCAL_FIQ_PENDING0		0x070
+/*
+ * Mailbox0 write-to-set bits.  There are 16 mailboxes, 4 per CPU, and
+ * these bits are organized by mailbox number and then CPU number.  We
+ * use mailbox 0 for IPIs.  The mailbox's interrupt is raised while
+ * any bit is set.
+ */
+#define LOCAL_MAILBOX0_SET0		0x080
+/* Mailbox0 write-to-clear bits. */
+#define LOCAL_MAILBOX0_CLR0		0x0c0
+
+#define LOCAL_IRQ_CNTPSIRQ	0
+#define LOCAL_IRQ_CNTPNSIRQ	1
+#define LOCAL_IRQ_CNTHPIRQ	2
+#define LOCAL_IRQ_CNTVIRQ	3
+#define LOCAL_IRQ_MAILBOX0	4
+#define LOCAL_IRQ_MAILBOX1	5
+#define LOCAL_IRQ_MAILBOX2	6
+#define LOCAL_IRQ_MAILBOX3	7
+#define LOCAL_IRQ_GPU_FAST	8
+#define LOCAL_IRQ_PMU_FAST	9
+#define LAST_IRQ		LOCAL_IRQ_PMU_FAST
+
+struct bcm2836_arm_irqchip_intc {
+	struct irq_domain *domain;
+	void __iomem *base;
+};
+
+static struct bcm2836_arm_irqchip_intc intc  __read_mostly;
+
+static void bcm2836_arm_irqchip_mask_per_cpu_irq(unsigned int reg_offset,
+						 unsigned int bit,
+						 int cpu)
+{
+	void __iomem *reg = intc.base + reg_offset + 4 * cpu;
+
+	writel(readl(reg) & ~BIT(bit), reg);
+}
+
+static void bcm2836_arm_irqchip_unmask_per_cpu_irq(unsigned int reg_offset,
+						   unsigned int bit,
+						 int cpu)
+{
+	void __iomem *reg = intc.base + reg_offset + 4 * cpu;
+
+	writel(readl(reg) | BIT(bit), reg);
+}
+
+static void bcm2836_arm_irqchip_mask_timer_irq(struct irq_data *d)
+{
+	bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_TIMER_INT_CONTROL0,
+					     d->hwirq - LOCAL_IRQ_CNTPSIRQ,
+					     smp_processor_id());
+}
+
+static void bcm2836_arm_irqchip_unmask_timer_irq(struct irq_data *d)
+{
+	bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_TIMER_INT_CONTROL0,
+					       d->hwirq - LOCAL_IRQ_CNTPSIRQ,
+					       smp_processor_id());
+}
+
+static struct irq_chip bcm2836_arm_irqchip_timer = {
+	.name		= "bcm2836-timer",
+	.irq_mask	= bcm2836_arm_irqchip_mask_timer_irq,
+	.irq_unmask	= bcm2836_arm_irqchip_unmask_timer_irq,
+};
+
+static void bcm2836_arm_irqchip_mask_pmu_irq(struct irq_data *d)
+{
+	writel(1 << smp_processor_id(), intc.base + LOCAL_PM_ROUTING_CLR);
+}
+
+static void bcm2836_arm_irqchip_unmask_pmu_irq(struct irq_data *d)
+{
+	writel(1 << smp_processor_id(), intc.base + LOCAL_PM_ROUTING_SET);
+}
+
+static struct irq_chip bcm2836_arm_irqchip_pmu = {
+	.name		= "bcm2836-pmu",
+	.irq_mask	= bcm2836_arm_irqchip_mask_pmu_irq,
+	.irq_unmask	= bcm2836_arm_irqchip_unmask_pmu_irq,
+};
+
+static void bcm2836_arm_irqchip_mask_gpu_irq(struct irq_data *d)
+{
+}
+
+static void bcm2836_arm_irqchip_unmask_gpu_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip bcm2836_arm_irqchip_gpu = {
+	.name		= "bcm2836-gpu",
+	.irq_mask	= bcm2836_arm_irqchip_mask_gpu_irq,
+	.irq_unmask	= bcm2836_arm_irqchip_unmask_gpu_irq,
+};
+
+static void bcm2836_arm_irqchip_register_irq(int hwirq, struct irq_chip *chip)
+{
+	int irq = irq_create_mapping(intc.domain, hwirq);
+
+	irq_set_percpu_devid(irq);
+	irq_set_chip_and_handler(irq, chip, handle_percpu_devid_irq);
+	irq_set_status_flags(irq, IRQ_NOAUTOEN);
+}
+
+static void
+__exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
+{
+	int cpu = smp_processor_id();
+	u32 stat;
+
+	stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu);
+	if (stat & 0x10) {
+#ifdef CONFIG_SMP
+		void __iomem *mailbox0 = (intc.base +
+					  LOCAL_MAILBOX0_CLR0 + 16 * cpu);
+		u32 mbox_val = readl(mailbox0);
+		u32 ipi = ffs(mbox_val) - 1;
+
+		writel(1 << ipi, mailbox0);
+		handle_IPI(ipi, regs);
+#endif
+	} else {
+		u32 hwirq = ffs(stat) - 1;
+
+		handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
+	}
+}
+
+#ifdef CONFIG_SMP
+static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
+					 unsigned int ipi)
+{
+	int cpu;
+	void __iomem *mailbox0_base = intc.base + LOCAL_MAILBOX0_SET0;
+
+	/*
+	 * Ensure that stores to normal memory are visible to the
+	 * other CPUs before issuing the IPI.
+	 */
+	dsb();
+
+	for_each_cpu(cpu, mask)	{
+		writel(1 << ipi, mailbox0_base + 16 * cpu);
+	}
+}
+
+/* Unmasks the IPI on the CPU when it's online. */
+static int bcm2836_arm_irqchip_cpu_notify(struct notifier_block *nfb,
+					  unsigned long action, void *hcpu)
+{
+	unsigned int cpu = (unsigned long)hcpu;
+	unsigned int int_reg = LOCAL_MAILBOX_INT_CONTROL0;
+	unsigned int mailbox = 0;
+
+	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+		bcm2836_arm_irqchip_unmask_per_cpu_irq(int_reg, mailbox, cpu);
+	else if (action == CPU_DYING)
+		bcm2836_arm_irqchip_mask_per_cpu_irq(int_reg, mailbox, cpu);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
+	.notifier_call = bcm2836_arm_irqchip_cpu_notify,
+	.priority = 100,
+};
+#endif
+
+static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
+	.xlate = irq_domain_xlate_onecell
+};
+
+static void
+bcm2836_arm_irqchip_smp_init(void)
+{
+#ifdef CONFIG_SMP
+	/* Unmask IPIs to the boot CPU. */
+	bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier,
+				       CPU_STARTING,
+				       (void *)smp_processor_id());
+	register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
+
+	set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
+#endif
+}
+
+static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
+						      struct device_node *parent)
+{
+	intc.base = of_iomap(node, 0);
+	if (!intc.base) {
+		panic("%s: unable to map local interrupt registers\n",
+			node->full_name);
+	}
+
+	intc.domain = irq_domain_add_linear(node, LAST_IRQ + 1,
+					    &bcm2836_arm_irqchip_intc_ops,
+					    NULL);
+	if (!intc.domain)
+		panic("%s: unable to create IRQ domain\n", node->full_name);
+
+	bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
+					 &bcm2836_arm_irqchip_timer);
+	bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPNSIRQ,
+					 &bcm2836_arm_irqchip_timer);
+	bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTHPIRQ,
+					 &bcm2836_arm_irqchip_timer);
+	bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTVIRQ,
+					 &bcm2836_arm_irqchip_timer);
+	bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_GPU_FAST,
+					 &bcm2836_arm_irqchip_gpu);
+	bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_PMU_FAST,
+					 &bcm2836_arm_irqchip_pmu);
+
+	bcm2836_arm_irqchip_smp_init();
+
+	set_handle_irq(bcm2836_arm_irqchip_handle_irq);
+	return 0;
+}
+
+IRQCHIP_DECLARE(bcm2836_arm_irqchip_l1_intc, "brcm,bcm2836-l1-intc",
+		bcm2836_arm_irqchip_l1_intc_of_init);
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index d3b8c8b..409bdc6 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -29,10 +29,9 @@
 #include <linux/slab.h>
 #include <linux/smp.h>
 #include <linux/types.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
 
-#include "irqchip.h"
-
 #define IRQS_PER_WORD		32
 #define REG_BYTES_PER_IRQ_WORD	(sizeof(u32) * 4)
 #define MAX_WORDS		8
@@ -257,8 +256,8 @@
 		pr_err("failed to map parent interrupt %d\n", parent_irq);
 		return -EINVAL;
 	}
-	irq_set_handler_data(parent_irq, intc);
-	irq_set_chained_handler(parent_irq, bcm7038_l1_irq_handle);
+	irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
+					 intc);
 
 	return 0;
 }
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 3ba5cc7..d3f9769 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -26,10 +26,9 @@
 #include <linux/irqdomain.h>
 #include <linux/reboot.h>
 #include <linux/bitops.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
 
-#include "irqchip.h"
-
 /* Register offset in the L2 interrupt controller */
 #define IRQEN		0x00
 #define IRQSTAT		0x04
@@ -38,6 +37,11 @@
 #define MAX_MAPPINGS	(MAX_WORDS * 2)
 #define IRQS_PER_WORD	32
 
+struct bcm7120_l1_intc_data {
+	struct bcm7120_l2_intc_data *b;
+	u32 irq_map_mask[MAX_WORDS];
+};
+
 struct bcm7120_l2_intc_data {
 	unsigned int n_words;
 	void __iomem *map_base[MAX_MAPPINGS];
@@ -47,14 +51,15 @@
 	struct irq_domain *domain;
 	bool can_wake;
 	u32 irq_fwd_mask[MAX_WORDS];
-	u32 irq_map_mask[MAX_WORDS];
+	struct bcm7120_l1_intc_data *l1_data;
 	int num_parent_irqs;
 	const __be32 *map_mask_prop;
 };
 
 static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
 {
-	struct bcm7120_l2_intc_data *b = irq_desc_get_handler_data(desc);
+	struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc);
+	struct bcm7120_l2_intc_data *b = data->b;
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned int idx;
 
@@ -69,7 +74,8 @@
 
 		irq_gc_lock(gc);
 		pending = irq_reg_readl(gc, b->stat_offset[idx]) &
-					    gc->mask_cache;
+					    gc->mask_cache &
+					    data->irq_map_mask[idx];
 		irq_gc_unlock(gc);
 
 		for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
@@ -81,11 +87,10 @@
 	chained_irq_exit(chip, desc);
 }
 
-static void bcm7120_l2_intc_suspend(struct irq_data *d)
+static void bcm7120_l2_intc_suspend(struct irq_chip_generic *gc)
 {
-	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-	struct irq_chip_type *ct = irq_data_get_chip_type(d);
 	struct bcm7120_l2_intc_data *b = gc->private;
+	struct irq_chip_type *ct = gc->chip_types;
 
 	irq_gc_lock(gc);
 	if (b->can_wake)
@@ -94,10 +99,9 @@
 	irq_gc_unlock(gc);
 }
 
-static void bcm7120_l2_intc_resume(struct irq_data *d)
+static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc)
 {
-	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
-	struct irq_chip_type *ct = irq_data_get_chip_type(d);
+	struct irq_chip_type *ct = gc->chip_types;
 
 	/* Restore the saved mask */
 	irq_gc_lock(gc);
@@ -107,8 +111,9 @@
 
 static int bcm7120_l2_intc_init_one(struct device_node *dn,
 					struct bcm7120_l2_intc_data *data,
-					int irq)
+					int irq, u32 *valid_mask)
 {
+	struct bcm7120_l1_intc_data *l1_data = &data->l1_data[irq];
 	int parent_irq;
 	unsigned int idx;
 
@@ -120,20 +125,28 @@
 
 	/* For multiple parent IRQs with multiple words, this looks like:
 	 * <irq0_w0 irq0_w1 irq1_w0 irq1_w1 ...>
+	 *
+	 * We need to associate a given parent interrupt with its corresponding
+	 * map_mask in order to mask the status register with it because we
+	 * have the same handler being called for multiple parent interrupts.
+	 *
+	 * This is typically something needed on BCM7xxx (STB chips).
 	 */
 	for (idx = 0; idx < data->n_words; idx++) {
 		if (data->map_mask_prop) {
-			data->irq_map_mask[idx] |=
+			l1_data->irq_map_mask[idx] |=
 				be32_to_cpup(data->map_mask_prop +
 					     irq * data->n_words + idx);
 		} else {
-			data->irq_map_mask[idx] = 0xffffffff;
+			l1_data->irq_map_mask[idx] = 0xffffffff;
 		}
+		valid_mask[idx] |= l1_data->irq_map_mask[idx];
 	}
 
-	irq_set_handler_data(parent_irq, data);
-	irq_set_chained_handler(parent_irq, bcm7120_l2_intc_irq_handle);
+	l1_data->b = data;
 
+	irq_set_chained_handler_and_data(parent_irq,
+					 bcm7120_l2_intc_irq_handle, l1_data);
 	return 0;
 }
 
@@ -214,6 +227,7 @@
 	struct irq_chip_type *ct;
 	int ret = 0;
 	unsigned int idx, irq, flags;
+	u32 valid_mask[MAX_WORDS] = { };
 
 	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (!data)
@@ -226,9 +240,16 @@
 		goto out_unmap;
 	}
 
+	data->l1_data = kcalloc(data->num_parent_irqs, sizeof(*data->l1_data),
+				GFP_KERNEL);
+	if (!data->l1_data) {
+		ret = -ENOMEM;
+		goto out_free_l1_data;
+	}
+
 	ret = iomap_regs_fn(dn, data);
 	if (ret < 0)
-		goto out_unmap;
+		goto out_free_l1_data;
 
 	for (idx = 0; idx < data->n_words; idx++) {
 		__raw_writel(data->irq_fwd_mask[idx],
@@ -237,16 +258,16 @@
 	}
 
 	for (irq = 0; irq < data->num_parent_irqs; irq++) {
-		ret = bcm7120_l2_intc_init_one(dn, data, irq);
+		ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask);
 		if (ret)
-			goto out_unmap;
+			goto out_free_l1_data;
 	}
 
 	data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words,
 					     &irq_generic_chip_ops, NULL);
 	if (!data->domain) {
 		ret = -ENOMEM;
-		goto out_unmap;
+		goto out_free_l1_data;
 	}
 
 	/* MIPS chips strapped for BE will automagically configure the
@@ -270,7 +291,7 @@
 		irq = idx * IRQS_PER_WORD;
 		gc = irq_get_domain_generic_chip(data->domain, irq);
 
-		gc->unused = 0xffffffff & ~data->irq_map_mask[idx];
+		gc->unused = 0xffffffff & ~valid_mask[idx];
 		gc->private = data;
 		ct = gc->chip_types;
 
@@ -280,8 +301,15 @@
 		ct->chip.irq_mask = irq_gc_mask_clr_bit;
 		ct->chip.irq_unmask = irq_gc_mask_set_bit;
 		ct->chip.irq_ack = irq_gc_noop;
-		ct->chip.irq_suspend = bcm7120_l2_intc_suspend;
-		ct->chip.irq_resume = bcm7120_l2_intc_resume;
+		gc->suspend = bcm7120_l2_intc_suspend;
+		gc->resume = bcm7120_l2_intc_resume;
+
+		/*
+		 * Initialize mask-cache, in case we need it for
+		 * saving/restoring fwd mask even w/o any child interrupts
+		 * installed
+		 */
+		gc->mask_cache = irq_reg_readl(gc, ct->regs.mask);
 
 		if (data->can_wake) {
 			/* This IRQ chip can wake the system, set all
@@ -300,6 +328,8 @@
 
 out_free_domain:
 	irq_domain_remove(data->domain);
+out_free_l1_data:
+	kfree(data->l1_data);
 out_unmap:
 	for (idx = 0; idx < MAX_MAPPINGS; idx++) {
 		if (data->map_base[idx])
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index d6bcc6b..aedda06 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -32,8 +32,6 @@
 #include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
 
-#include "irqchip.h"
-
 /* Register offsets in the L2 interrupt controller */
 #define CPU_STATUS	0x00
 #define CPU_SET		0x04
@@ -51,11 +49,13 @@
 	u32 saved_mask; /* for suspend/resume */
 };
 
-static void brcmstb_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
+static void brcmstb_l2_intc_irq_handle(unsigned int __irq,
+				       struct irq_desc *desc)
 {
 	struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
 	struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int irq = irq_desc_get_irq(desc);
 	u32 status;
 
 	chained_irq_enter(chip, desc);
@@ -172,8 +172,8 @@
 	}
 
 	/* Set the IRQ chaining logic */
-	irq_set_handler_data(data->parent_irq, data);
-	irq_set_chained_handler(data->parent_irq, brcmstb_l2_intc_irq_handle);
+	irq_set_chained_handler_and_data(data->parent_irq,
+					 brcmstb_l2_intc_irq_handle, data);
 
 	gc = irq_get_domain_generic_chip(data->domain, 0);
 	gc->reg_base = data->base;
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index 33127f1..2dd929e 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -11,6 +11,7 @@
 
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -19,8 +20,6 @@
 #include <asm/exception.h>
 #include <asm/mach/irq.h>
 
-#include "irqchip.h"
-
 #define CLPS711X_INTSR1	(0x0240)
 #define CLPS711X_INTMR1	(0x0280)
 #define CLPS711X_BLEOI	(0x0600)
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index c12bb93..a7f5626 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -11,13 +11,12 @@
  */
 #include <linux/err.h>
 #include <linux/io.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/slab.h>
 
-#include "irqchip.h"
-
 #define IRQ_FREE	-1
 #define IRQ_RESERVED	-2
 #define IRQ_SKIP	-3
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index 3cbc658..dad85e7 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -12,6 +12,7 @@
 
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -20,8 +21,6 @@
 
 #include <asm/exception.h>
 
-#include "irqchip.h"
-
 #define UC_IRQ_CONTROL		0x04
 
 #define IC_FLAG_CLEAR_LO	0x00
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index 53bb732..efd95d9 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -13,36 +13,36 @@
 
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 
-#include "irqchip.h"
-
 #define APB_INT_ENABLE_L	0x00
 #define APB_INT_ENABLE_H	0x04
 #define APB_INT_MASK_L		0x08
 #define APB_INT_MASK_H		0x0c
 #define APB_INT_FINALSTATUS_L	0x30
 #define APB_INT_FINALSTATUS_H	0x34
+#define APB_INT_BASE_OFFSET	0x04
 
 static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct irq_chip_generic *gc = irq_get_handler_data(irq);
-	struct irq_domain *d = gc->private;
-	u32 stat;
+	struct irq_domain *d = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int n;
 
 	chained_irq_enter(chip, desc);
 
-	for (n = 0; n < gc->num_ct; n++) {
-		stat = readl_relaxed(gc->reg_base +
-				     APB_INT_FINALSTATUS_L + 4 * n);
+	for (n = 0; n < d->revmap_size; n += 32) {
+		struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n);
+		u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L);
+
 		while (stat) {
 			u32 hwirq = ffs(stat) - 1;
-			generic_handle_irq(irq_find_mapping(d,
-					    gc->irq_base + hwirq + 32 * n));
+			u32 virq = irq_find_mapping(d, gc->irq_base + hwirq);
+
+			generic_handle_irq(virq);
 			stat &= ~(1 << hwirq);
 		}
 	}
@@ -73,7 +73,7 @@
 	struct irq_domain *domain;
 	struct irq_chip_generic *gc;
 	void __iomem *iobase;
-	int ret, nrirqs, irq;
+	int ret, nrirqs, irq, i;
 	u32 reg;
 
 	/* Map the parent interrupt for the chained handler */
@@ -128,35 +128,25 @@
 		goto err_unmap;
 	}
 
-	ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1,
-					     np->name, handle_level_irq, clr, 0,
-					     IRQ_GC_MASK_CACHE_PER_TYPE |
+	ret = irq_alloc_domain_generic_chips(domain, 32, 1, np->name,
+					     handle_level_irq, clr, 0,
 					     IRQ_GC_INIT_MASK_CACHE);
 	if (ret) {
 		pr_err("%s: unable to alloc irq domain gc\n", np->full_name);
 		goto err_unmap;
 	}
 
-	gc = irq_get_domain_generic_chip(domain, 0);
-	gc->private = domain;
-	gc->reg_base = iobase;
-
-	gc->chip_types[0].regs.mask = APB_INT_MASK_L;
-	gc->chip_types[0].regs.enable = APB_INT_ENABLE_L;
-	gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
-	gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
-	gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
-
-	if (nrirqs > 32) {
-		gc->chip_types[1].regs.mask = APB_INT_MASK_H;
-		gc->chip_types[1].regs.enable = APB_INT_ENABLE_H;
-		gc->chip_types[1].chip.irq_mask = irq_gc_mask_set_bit;
-		gc->chip_types[1].chip.irq_unmask = irq_gc_mask_clr_bit;
-		gc->chip_types[1].chip.irq_resume = dw_apb_ictl_resume;
+	for (i = 0; i < DIV_ROUND_UP(nrirqs, 32); i++) {
+		gc = irq_get_domain_generic_chip(domain, i * 32);
+		gc->reg_base = iobase + i * APB_INT_BASE_OFFSET;
+		gc->chip_types[0].regs.mask = APB_INT_MASK_L;
+		gc->chip_types[0].regs.enable = APB_INT_ENABLE_L;
+		gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+		gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+		gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
 	}
 
-	irq_set_handler_data(irq, gc);
-	irq_set_chained_handler(irq, dw_apb_ictl_handler);
+	irq_set_chained_handler_and_data(irq, dw_apb_ictl_handler, domain);
 
 	return 0;
 
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index fdf7065..db04fc1 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -45,13 +45,11 @@
 
 struct v2m_data {
 	spinlock_t msi_cnt_lock;
-	struct msi_controller mchip;
 	struct resource res;	/* GICv2m resource */
 	void __iomem *base;	/* GICv2m virt address */
 	u32 spi_start;		/* The SPI number that MSIs start */
 	u32 nr_spis;		/* The number of SPIs for MSIs */
 	unsigned long *bm;	/* MSI vector bitmap */
-	struct irq_domain *domain;
 };
 
 static void gicv2m_mask_msi_irq(struct irq_data *d)
@@ -213,11 +211,25 @@
 	return true;
 }
 
+static struct irq_chip gicv2m_pmsi_irq_chip = {
+	.name			= "pMSI",
+};
+
+static struct msi_domain_ops gicv2m_pmsi_ops = {
+};
+
+static struct msi_domain_info gicv2m_pmsi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+	.ops	= &gicv2m_pmsi_ops,
+	.chip	= &gicv2m_pmsi_irq_chip,
+};
+
 static int __init gicv2m_init_one(struct device_node *node,
 				  struct irq_domain *parent)
 {
 	int ret;
 	struct v2m_data *v2m;
+	struct irq_domain *inner_domain, *pci_domain, *plat_domain;
 
 	v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
 	if (!v2m) {
@@ -261,32 +273,28 @@
 		goto err_iounmap;
 	}
 
-	v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m);
-	if (!v2m->domain) {
+	inner_domain = irq_domain_add_tree(node, &gicv2m_domain_ops, v2m);
+	if (!inner_domain) {
 		pr_err("Failed to create GICv2m domain\n");
 		ret = -ENOMEM;
 		goto err_free_bm;
 	}
 
-	v2m->domain->parent = parent;
-	v2m->mchip.of_node = node;
-	v2m->mchip.domain = pci_msi_create_irq_domain(node,
-						      &gicv2m_msi_domain_info,
-						      v2m->domain);
-	if (!v2m->mchip.domain) {
-		pr_err("Failed to create MSI domain\n");
+	inner_domain->bus_token = DOMAIN_BUS_NEXUS;
+	inner_domain->parent = parent;
+	pci_domain = pci_msi_create_irq_domain(node, &gicv2m_msi_domain_info,
+					       inner_domain);
+	plat_domain = platform_msi_create_irq_domain(node,
+						     &gicv2m_pmsi_domain_info,
+						     inner_domain);
+	if (!pci_domain || !plat_domain) {
+		pr_err("Failed to create MSI domains\n");
 		ret = -ENOMEM;
 		goto err_free_domains;
 	}
 
 	spin_lock_init(&v2m->msi_cnt_lock);
 
-	ret = of_pci_msi_chip_add(&v2m->mchip);
-	if (ret) {
-		pr_err("Failed to add msi_chip.\n");
-		goto err_free_domains;
-	}
-
 	pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name,
 		(unsigned long)v2m->res.start, (unsigned long)v2m->res.end,
 		v2m->spi_start, (v2m->spi_start + v2m->nr_spis));
@@ -294,10 +302,12 @@
 	return 0;
 
 err_free_domains:
-	if (v2m->mchip.domain)
-		irq_domain_remove(v2m->mchip.domain);
-	if (v2m->domain)
-		irq_domain_remove(v2m->domain);
+	if (plat_domain)
+		irq_domain_remove(plat_domain);
+	if (pci_domain)
+		irq_domain_remove(pci_domain);
+	if (inner_domain)
+		irq_domain_remove(inner_domain);
 err_free_bm:
 	kfree(v2m->bm);
 err_iounmap:
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
new file mode 100644
index 0000000..cf351c6
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+static void its_mask_msi_irq(struct irq_data *d)
+{
+	pci_msi_mask_irq(d);
+	irq_chip_mask_parent(d);
+}
+
+static void its_unmask_msi_irq(struct irq_data *d)
+{
+	pci_msi_unmask_irq(d);
+	irq_chip_unmask_parent(d);
+}
+
+static struct irq_chip its_msi_irq_chip = {
+	.name			= "ITS-MSI",
+	.irq_unmask		= its_unmask_msi_irq,
+	.irq_mask		= its_mask_msi_irq,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_write_msi_msg	= pci_msi_domain_write_msg,
+};
+
+struct its_pci_alias {
+	struct pci_dev	*pdev;
+	u32		dev_id;
+	u32		count;
+};
+
+static int its_pci_msi_vec_count(struct pci_dev *pdev)
+{
+	int msi, msix;
+
+	msi = max(pci_msi_vec_count(pdev), 0);
+	msix = max(pci_msix_vec_count(pdev), 0);
+
+	return max(msi, msix);
+}
+
+static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
+{
+	struct its_pci_alias *dev_alias = data;
+
+	dev_alias->dev_id = alias;
+	if (pdev != dev_alias->pdev)
+		dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
+
+	return 0;
+}
+
+static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
+			       int nvec, msi_alloc_info_t *info)
+{
+	struct pci_dev *pdev;
+	struct its_pci_alias dev_alias;
+	struct msi_domain_info *msi_info;
+
+	if (!dev_is_pci(dev))
+		return -EINVAL;
+
+	msi_info = msi_get_domain_info(domain->parent);
+
+	pdev = to_pci_dev(dev);
+	dev_alias.pdev = pdev;
+	dev_alias.count = nvec;
+
+	pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
+
+	/* ITS specific DeviceID, as the core ITS ignores dev. */
+	info->scratchpad[0].ul = dev_alias.dev_id;
+
+	return msi_info->ops->msi_prepare(domain->parent,
+					  dev, dev_alias.count, info);
+}
+
+static struct msi_domain_ops its_pci_msi_ops = {
+	.msi_prepare	= its_pci_msi_prepare,
+};
+
+static struct msi_domain_info its_pci_msi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+	.ops	= &its_pci_msi_ops,
+	.chip	= &its_msi_irq_chip,
+};
+
+static struct of_device_id its_device_id[] = {
+	{	.compatible	= "arm,gic-v3-its",	},
+	{},
+};
+
+static int __init its_pci_msi_init(void)
+{
+	struct device_node *np;
+	struct irq_domain *parent;
+
+	for (np = of_find_matching_node(NULL, its_device_id); np;
+	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_property_read_bool(np, "msi-controller"))
+			continue;
+
+		parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
+		if (!parent || !msi_get_domain_info(parent)) {
+			pr_err("%s: unable to locate ITS domain\n",
+			       np->full_name);
+			continue;
+		}
+
+		if (!pci_msi_create_irq_domain(np, &its_pci_msi_domain_info,
+					       parent)) {
+			pr_err("%s: unable to create PCI domain\n",
+			       np->full_name);
+			continue;
+		}
+
+		pr_info("PCI/MSI: %s domain created\n", np->full_name);
+	}
+
+	return 0;
+}
+early_initcall(its_pci_msi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
new file mode 100644
index 0000000..a865505
--- /dev/null
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+static struct irq_chip its_pmsi_irq_chip = {
+	.name			= "ITS-pMSI",
+};
+
+static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
+			    int nvec, msi_alloc_info_t *info)
+{
+	struct msi_domain_info *msi_info;
+	u32 dev_id;
+	int ret;
+
+	msi_info = msi_get_domain_info(domain->parent);
+
+	/* Suck the DeviceID out of the msi-parent property */
+	ret = of_property_read_u32_index(dev->of_node, "msi-parent",
+					 1, &dev_id);
+	if (ret)
+		return ret;
+
+	/* ITS specific DeviceID, as the core ITS ignores dev. */
+	info->scratchpad[0].ul = dev_id;
+
+	return msi_info->ops->msi_prepare(domain->parent,
+					  dev, nvec, info);
+}
+
+static struct msi_domain_ops its_pmsi_ops = {
+	.msi_prepare	= its_pmsi_prepare,
+};
+
+static struct msi_domain_info its_pmsi_domain_info = {
+	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
+	.ops	= &its_pmsi_ops,
+	.chip	= &its_pmsi_irq_chip,
+};
+
+static struct of_device_id its_device_id[] = {
+	{	.compatible	= "arm,gic-v3-its",	},
+	{},
+};
+
+static int __init its_pmsi_init(void)
+{
+	struct device_node *np;
+	struct irq_domain *parent;
+
+	for (np = of_find_matching_node(NULL, its_device_id); np;
+	     np = of_find_matching_node(np, its_device_id)) {
+		if (!of_property_read_bool(np, "msi-controller"))
+			continue;
+
+		parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS);
+		if (!parent || !msi_get_domain_info(parent)) {
+			pr_err("%s: unable to locate ITS domain\n",
+			       np->full_name);
+			continue;
+		}
+
+		if (!platform_msi_create_irq_domain(np, &its_pmsi_domain_info,
+						    parent)) {
+			pr_err("%s: unable to create platform domain\n",
+			       np->full_name);
+			continue;
+		}
+
+		pr_info("Platform MSI: %s domain created\n", np->full_name);
+	}
+
+	return 0;
+}
+early_initcall(its_pmsi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c00e2db..26b55c5 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -30,14 +30,13 @@
 #include <linux/percpu.h>
 #include <linux/slab.h>
 
+#include <linux/irqchip.h>
 #include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cputype.h>
 #include <asm/exception.h>
 
-#include "irqchip.h"
-
 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING		(1 << 0)
 
 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING	(1 << 0)
@@ -54,14 +53,12 @@
 
 /*
  * The ITS structure - contains most of the infrastructure, with the
- * msi_controller, the command queue, the collections, and the list of
- * devices writing to it.
+ * top-level MSI domain, the command queue, the collections, and the
+ * list of devices writing to it.
  */
 struct its_node {
 	raw_spinlock_t		lock;
 	struct list_head	entry;
-	struct msi_controller	msi_chip;
-	struct irq_domain	*domain;
 	void __iomem		*base;
 	unsigned long		phys_base;
 	struct its_cmd_block	*cmd_base;
@@ -643,26 +640,6 @@
 	.irq_compose_msi_msg	= its_irq_compose_msi_msg,
 };
 
-static void its_mask_msi_irq(struct irq_data *d)
-{
-	pci_msi_mask_irq(d);
-	irq_chip_mask_parent(d);
-}
-
-static void its_unmask_msi_irq(struct irq_data *d)
-{
-	pci_msi_unmask_irq(d);
-	irq_chip_unmask_parent(d);
-}
-
-static struct irq_chip its_msi_irq_chip = {
-	.name			= "ITS-MSI",
-	.irq_unmask		= its_unmask_msi_irq,
-	.irq_mask		= its_mask_msi_irq,
-	.irq_eoi		= irq_chip_eoi_parent,
-	.irq_write_msi_msg	= pci_msi_domain_write_msg,
-};
-
 /*
  * How we allocate LPIs:
  *
@@ -831,7 +808,7 @@
 	}
 }
 
-static int its_alloc_tables(struct its_node *its)
+static int its_alloc_tables(const char *node_name, struct its_node *its)
 {
 	int err;
 	int i;
@@ -874,7 +851,7 @@
 			if (order >= MAX_ORDER) {
 				order = MAX_ORDER - 1;
 				pr_warn("%s: Device Table too large, reduce its page order to %u\n",
-					its->msi_chip.of_node->full_name, order);
+					node_name, order);
 			}
 		}
 
@@ -944,7 +921,7 @@
 
 		if (val != tmp) {
 			pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
-			       its->msi_chip.of_node->full_name, i,
+			       node_name, i,
 			       (unsigned long) val, (unsigned long) tmp);
 			err = -ENXIO;
 			goto out_free;
@@ -1209,85 +1186,50 @@
 	return 0;
 }
 
-struct its_pci_alias {
-	struct pci_dev	*pdev;
-	u32		dev_id;
-	u32		count;
-};
-
-static int its_pci_msi_vec_count(struct pci_dev *pdev)
-{
-	int msi, msix;
-
-	msi = max(pci_msi_vec_count(pdev), 0);
-	msix = max(pci_msix_vec_count(pdev), 0);
-
-	return max(msi, msix);
-}
-
-static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
-{
-	struct its_pci_alias *dev_alias = data;
-
-	dev_alias->dev_id = alias;
-	if (pdev != dev_alias->pdev)
-		dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
-
-	return 0;
-}
-
 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
 			   int nvec, msi_alloc_info_t *info)
 {
-	struct pci_dev *pdev;
 	struct its_node *its;
 	struct its_device *its_dev;
-	struct its_pci_alias dev_alias;
+	struct msi_domain_info *msi_info;
+	u32 dev_id;
 
-	if (!dev_is_pci(dev))
-		return -EINVAL;
+	/*
+	 * We ignore "dev" entierely, and rely on the dev_id that has
+	 * been passed via the scratchpad. This limits this domain's
+	 * usefulness to upper layers that definitely know that they
+	 * are built on top of the ITS.
+	 */
+	dev_id = info->scratchpad[0].ul;
 
-	pdev = to_pci_dev(dev);
-	dev_alias.pdev = pdev;
-	dev_alias.count = nvec;
+	msi_info = msi_get_domain_info(domain);
+	its = msi_info->data;
 
-	pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
-	its = domain->parent->host_data;
-
-	its_dev = its_find_device(its, dev_alias.dev_id);
+	its_dev = its_find_device(its, dev_id);
 	if (its_dev) {
 		/*
 		 * We already have seen this ID, probably through
 		 * another alias (PCI bridge of some sort). No need to
 		 * create the device.
 		 */
-		dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id);
+		pr_debug("Reusing ITT for devID %x\n", dev_id);
 		goto out;
 	}
 
-	its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count);
+	its_dev = its_create_device(its, dev_id, nvec);
 	if (!its_dev)
 		return -ENOMEM;
 
-	dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n",
-		dev_alias.count, ilog2(dev_alias.count));
+	pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
 out:
 	info->scratchpad[0].ptr = its_dev;
-	info->scratchpad[1].ptr = dev;
 	return 0;
 }
 
-static struct msi_domain_ops its_pci_msi_ops = {
+static struct msi_domain_ops its_msi_domain_ops = {
 	.msi_prepare	= its_msi_prepare,
 };
 
-static struct msi_domain_info its_pci_msi_domain_info = {
-	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
-		   MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
-	.ops	= &its_pci_msi_ops,
-	.chip	= &its_msi_irq_chip,
-};
-
 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
 				    unsigned int virq,
 				    irq_hw_number_t hwirq)
@@ -1323,9 +1265,9 @@
 
 		irq_domain_set_hwirq_and_chip(domain, virq + i,
 					      hwirq, &its_irq_chip, its_dev);
-		dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
-			(int)(hwirq - its_dev->event_map.lpi_base),
-			(int)hwirq, virq + i);
+		pr_debug("ID:%d pID:%d vID:%d\n",
+			 (int)(hwirq - its_dev->event_map.lpi_base),
+			 (int) hwirq, virq + i);
 	}
 
 	return 0;
@@ -1426,6 +1368,7 @@
 	struct resource res;
 	struct its_node *its;
 	void __iomem *its_base;
+	struct irq_domain *inner_domain;
 	u32 val;
 	u64 baser, tmp;
 	int err;
@@ -1469,7 +1412,6 @@
 	INIT_LIST_HEAD(&its->its_device_list);
 	its->base = its_base;
 	its->phys_base = res.start;
-	its->msi_chip.of_node = node;
 	its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
 
 	its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
@@ -1479,7 +1421,7 @@
 	}
 	its->cmd_write = its->cmd_base;
 
-	err = its_alloc_tables(its);
+	err = its_alloc_tables(node->full_name, its);
 	if (err)
 		goto out_free_cmd;
 
@@ -1515,26 +1457,27 @@
 	writeq_relaxed(0, its->base + GITS_CWRITER);
 	writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
 
-	if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
-		its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
-		if (!its->domain) {
+	if (of_property_read_bool(node, "msi-controller")) {
+		struct msi_domain_info *info;
+
+		info = kzalloc(sizeof(*info), GFP_KERNEL);
+		if (!info) {
 			err = -ENOMEM;
 			goto out_free_tables;
 		}
 
-		its->domain->parent = parent;
-
-		its->msi_chip.domain = pci_msi_create_irq_domain(node,
-								 &its_pci_msi_domain_info,
-								 its->domain);
-		if (!its->msi_chip.domain) {
+		inner_domain = irq_domain_add_tree(node, &its_domain_ops, its);
+		if (!inner_domain) {
 			err = -ENOMEM;
-			goto out_free_domains;
+			kfree(info);
+			goto out_free_tables;
 		}
 
-		err = of_pci_msi_chip_add(&its->msi_chip);
-		if (err)
-			goto out_free_domains;
+		inner_domain->parent = parent;
+		inner_domain->bus_token = DOMAIN_BUS_NEXUS;
+		info->ops = &its_msi_domain_ops;
+		info->data = its;
+		inner_domain->host_data = info;
 	}
 
 	spin_lock(&its_lock);
@@ -1543,11 +1486,6 @@
 
 	return 0;
 
-out_free_domains:
-	if (its->msi_chip.domain)
-		irq_domain_remove(its->msi_chip.domain);
-	if (its->domain)
-		irq_domain_remove(its->domain);
 out_free_tables:
 	its_free_tables(its);
 out_free_cmd:
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index c52f7ba..e406bc5 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -25,6 +25,7 @@
 #include <linux/percpu.h>
 #include <linux/slab.h>
 
+#include <linux/irqchip.h>
 #include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/cputype.h>
@@ -32,7 +33,6 @@
 #include <asm/smp_plat.h>
 
 #include "irq-gic-common.h"
-#include "irqchip.h"
 
 struct redist_region {
 	void __iomem		*redist_base;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 4dd8826..aa3e7b8 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -38,6 +38,7 @@
 #include <linux/interrupt.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqchip/arm-gic.h>
 #include <linux/irqchip/arm-gic-acpi.h>
@@ -48,7 +49,6 @@
 #include <asm/smp_plat.h>
 
 #include "irq-gic-common.h"
-#include "irqchip.h"
 
 union gic_base {
 	void __iomem *common_base;
@@ -288,8 +288,8 @@
 
 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct gic_chip_data *chip_data = irq_get_handler_data(irq);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned int cascade_irq, gic_irq;
 	unsigned long status;
 
@@ -324,16 +324,17 @@
 #endif
 	.irq_get_irqchip_state	= gic_irq_get_irqchip_state,
 	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
-	.flags			= IRQCHIP_SET_TYPE_MASKED,
+	.flags			= IRQCHIP_SET_TYPE_MASKED |
+				  IRQCHIP_SKIP_SET_WAKE |
+				  IRQCHIP_MASK_ON_SUSPEND,
 };
 
 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
 {
 	if (gic_nr >= MAX_GIC_NR)
 		BUG();
-	if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
-		BUG();
-	irq_set_chained_handler(irq, gic_handle_cascade_irq);
+	irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq,
+					 &gic_data[gic_nr]);
 }
 
 static u8 gic_get_cpumask(struct gic_chip_data *gic)
@@ -355,9 +356,9 @@
 	return mask;
 }
 
-static void gic_cpu_if_up(void)
+static void gic_cpu_if_up(struct gic_chip_data *gic)
 {
-	void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
+	void __iomem *cpu_base = gic_data_cpu_base(gic);
 	u32 bypass = 0;
 
 	/*
@@ -401,34 +402,47 @@
 	int i;
 
 	/*
-	 * Get what the GIC says our CPU mask is.
+	 * Setting up the CPU map is only relevant for the primary GIC
+	 * because any nested/secondary GICs do not directly interface
+	 * with the CPU(s).
 	 */
-	BUG_ON(cpu >= NR_GIC_CPU_IF);
-	cpu_mask = gic_get_cpumask(gic);
-	gic_cpu_map[cpu] = cpu_mask;
+	if (gic == &gic_data[0]) {
+		/*
+		 * Get what the GIC says our CPU mask is.
+		 */
+		BUG_ON(cpu >= NR_GIC_CPU_IF);
+		cpu_mask = gic_get_cpumask(gic);
+		gic_cpu_map[cpu] = cpu_mask;
 
-	/*
-	 * Clear our mask from the other map entries in case they're
-	 * still undefined.
-	 */
-	for (i = 0; i < NR_GIC_CPU_IF; i++)
-		if (i != cpu)
-			gic_cpu_map[i] &= ~cpu_mask;
+		/*
+		 * Clear our mask from the other map entries in case they're
+		 * still undefined.
+		 */
+		for (i = 0; i < NR_GIC_CPU_IF; i++)
+			if (i != cpu)
+				gic_cpu_map[i] &= ~cpu_mask;
+	}
 
 	gic_cpu_config(dist_base, NULL);
 
 	writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
-	gic_cpu_if_up();
+	gic_cpu_if_up(gic);
 }
 
-void gic_cpu_if_down(void)
+int gic_cpu_if_down(unsigned int gic_nr)
 {
-	void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
+	void __iomem *cpu_base;
 	u32 val = 0;
 
+	if (gic_nr >= MAX_GIC_NR)
+		return -EINVAL;
+
+	cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
 	val = readl(cpu_base + GIC_CPU_CTRL);
 	val &= ~GICC_ENABLE;
 	writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
+
+	return 0;
 }
 
 #ifdef CONFIG_CPU_PM
@@ -564,7 +578,7 @@
 					dist_base + GIC_DIST_PRI + i * 4);
 
 	writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
-	gic_cpu_if_up();
+	gic_cpu_if_up(&gic_data[gic_nr]);
 }
 
 static int gic_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
@@ -880,11 +894,6 @@
 	.xlate = gic_irq_domain_xlate,
 };
 
-void gic_set_irqchip_flags(unsigned long flags)
-{
-	gic_chip.flags |= flags;
-}
-
 void __init gic_init_bases(unsigned int gic_nr, int irq_start,
 			   void __iomem *dist_base, void __iomem *cpu_base,
 			   u32 percpu_offset, struct device_node *node)
@@ -930,13 +939,6 @@
 	}
 
 	/*
-	 * Initialize the CPU interface map to all CPUs.
-	 * It will be refined as each CPU probes its ID.
-	 */
-	for (i = 0; i < NR_GIC_CPU_IF; i++)
-		gic_cpu_map[i] = 0xff;
-
-	/*
 	 * Find out how many interrupts are supported.
 	 * The GIC only supports up to 1020 interrupt sources.
 	 */
@@ -981,6 +983,13 @@
 		return;
 
 	if (gic_nr == 0) {
+		/*
+		 * Initialize the CPU interface map to all CPUs.
+		 * It will be refined as each CPU probes its ID.
+		 * This is only necessary for the primary GIC.
+		 */
+		for (i = 0; i < NR_GIC_CPU_IF; i++)
+			gic_cpu_map[i] = 0xff;
 #ifdef CONFIG_SMP
 		set_smp_cross_call(gic_raise_softirq);
 		register_cpu_notifier(&gic_cpu_notifier);
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 0cae45d..a0128c7 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -41,6 +41,7 @@
 #include <linux/irqdomain.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/arm-gic.h>
 
 #include <asm/irq.h>
@@ -48,7 +49,6 @@
 #include <asm/smp_plat.h>
 
 #include "irq-gic-common.h"
-#include "irqchip.h"
 
 #define HIP04_MAX_IRQS		510
 
@@ -202,7 +202,9 @@
 #ifdef CONFIG_SMP
 	.irq_set_affinity	= hip04_irq_set_affinity,
 #endif
-	.flags			= IRQCHIP_SET_TYPE_MASKED,
+	.flags			= IRQCHIP_SET_TYPE_MASKED |
+				  IRQCHIP_SKIP_SET_WAKE |
+				  IRQCHIP_MASK_ON_SUSPEND,
 };
 
 static u16 hip04_get_cpumask(struct hip04_irq_data *intc)
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
new file mode 100644
index 0000000..4836102
--- /dev/null
+++ b/drivers/irqchip/irq-i8259.c
@@ -0,0 +1,384 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Code to handle x86 style IRQs plus some generic interrupt stuff.
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
+#include <linux/irq.h>
+
+#include <asm/i8259.h>
+#include <asm/io.h>
+
+/*
+ * This is the 'legacy' 8259A Programmable Interrupt Controller,
+ * present in the majority of PC/AT boxes.
+ * plus some generic x86 specific things if generic specifics makes
+ * any sense at all.
+ * this file should become arch/i386/kernel/irq.c when the old irq.c
+ * moves to arch independent land
+ */
+
+static int i8259A_auto_eoi = -1;
+DEFINE_RAW_SPINLOCK(i8259A_lock);
+static void disable_8259A_irq(struct irq_data *d);
+static void enable_8259A_irq(struct irq_data *d);
+static void mask_and_ack_8259A(struct irq_data *d);
+static void init_8259A(int auto_eoi);
+
+static struct irq_chip i8259A_chip = {
+	.name			= "XT-PIC",
+	.irq_mask		= disable_8259A_irq,
+	.irq_disable		= disable_8259A_irq,
+	.irq_unmask		= enable_8259A_irq,
+	.irq_mask_ack		= mask_and_ack_8259A,
+};
+
+/*
+ * 8259A PIC functions to handle ISA devices:
+ */
+
+/*
+ * This contains the irq mask for both 8259A irq controllers,
+ */
+static unsigned int cached_irq_mask = 0xffff;
+
+#define cached_master_mask	(cached_irq_mask)
+#define cached_slave_mask	(cached_irq_mask >> 8)
+
+static void disable_8259A_irq(struct irq_data *d)
+{
+	unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
+	unsigned long flags;
+
+	mask = 1 << irq;
+	raw_spin_lock_irqsave(&i8259A_lock, flags);
+	cached_irq_mask |= mask;
+	if (irq & 8)
+		outb(cached_slave_mask, PIC_SLAVE_IMR);
+	else
+		outb(cached_master_mask, PIC_MASTER_IMR);
+	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+static void enable_8259A_irq(struct irq_data *d)
+{
+	unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
+	unsigned long flags;
+
+	mask = ~(1 << irq);
+	raw_spin_lock_irqsave(&i8259A_lock, flags);
+	cached_irq_mask &= mask;
+	if (irq & 8)
+		outb(cached_slave_mask, PIC_SLAVE_IMR);
+	else
+		outb(cached_master_mask, PIC_MASTER_IMR);
+	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+int i8259A_irq_pending(unsigned int irq)
+{
+	unsigned int mask;
+	unsigned long flags;
+	int ret;
+
+	irq -= I8259A_IRQ_BASE;
+	mask = 1 << irq;
+	raw_spin_lock_irqsave(&i8259A_lock, flags);
+	if (irq < 8)
+		ret = inb(PIC_MASTER_CMD) & mask;
+	else
+		ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
+	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+
+	return ret;
+}
+
+void make_8259A_irq(unsigned int irq)
+{
+	disable_irq_nosync(irq);
+	irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
+	enable_irq(irq);
+}
+
+/*
+ * This function assumes to be called rarely. Switching between
+ * 8259A registers is slow.
+ * This has to be protected by the irq controller spinlock
+ * before being called.
+ */
+static inline int i8259A_irq_real(unsigned int irq)
+{
+	int value;
+	int irqmask = 1 << irq;
+
+	if (irq < 8) {
+		outb(0x0B, PIC_MASTER_CMD);	/* ISR register */
+		value = inb(PIC_MASTER_CMD) & irqmask;
+		outb(0x0A, PIC_MASTER_CMD);	/* back to the IRR register */
+		return value;
+	}
+	outb(0x0B, PIC_SLAVE_CMD);	/* ISR register */
+	value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
+	outb(0x0A, PIC_SLAVE_CMD);	/* back to the IRR register */
+	return value;
+}
+
+/*
+ * Careful! The 8259A is a fragile beast, it pretty
+ * much _has_ to be done exactly like this (mask it
+ * first, _then_ send the EOI, and the order of EOI
+ * to the two 8259s is important!
+ */
+static void mask_and_ack_8259A(struct irq_data *d)
+{
+	unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
+	unsigned long flags;
+
+	irqmask = 1 << irq;
+	raw_spin_lock_irqsave(&i8259A_lock, flags);
+	/*
+	 * Lightweight spurious IRQ detection. We do not want
+	 * to overdo spurious IRQ handling - it's usually a sign
+	 * of hardware problems, so we only do the checks we can
+	 * do without slowing down good hardware unnecessarily.
+	 *
+	 * Note that IRQ7 and IRQ15 (the two spurious IRQs
+	 * usually resulting from the 8259A-1|2 PICs) occur
+	 * even if the IRQ is masked in the 8259A. Thus we
+	 * can check spurious 8259A IRQs without doing the
+	 * quite slow i8259A_irq_real() call for every IRQ.
+	 * This does not cover 100% of spurious interrupts,
+	 * but should be enough to warn the user that there
+	 * is something bad going on ...
+	 */
+	if (cached_irq_mask & irqmask)
+		goto spurious_8259A_irq;
+	cached_irq_mask |= irqmask;
+
+handle_real_irq:
+	if (irq & 8) {
+		inb(PIC_SLAVE_IMR);	/* DUMMY - (do we need this?) */
+		outb(cached_slave_mask, PIC_SLAVE_IMR);
+		outb(0x60+(irq&7), PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
+		outb(0x60+PIC_CASCADE_IR, PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
+	} else {
+		inb(PIC_MASTER_IMR);	/* DUMMY - (do we need this?) */
+		outb(cached_master_mask, PIC_MASTER_IMR);
+		outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
+	}
+	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+	return;
+
+spurious_8259A_irq:
+	/*
+	 * this is the slow path - should happen rarely.
+	 */
+	if (i8259A_irq_real(irq))
+		/*
+		 * oops, the IRQ _is_ in service according to the
+		 * 8259A - not spurious, go handle it.
+		 */
+		goto handle_real_irq;
+
+	{
+		static int spurious_irq_mask;
+		/*
+		 * At this point we can be sure the IRQ is spurious,
+		 * lets ACK and report it. [once per IRQ]
+		 */
+		if (!(spurious_irq_mask & irqmask)) {
+			printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
+			spurious_irq_mask |= irqmask;
+		}
+		atomic_inc(&irq_err_count);
+		/*
+		 * Theoretically we do not have to handle this IRQ,
+		 * but in Linux this does not cause problems and is
+		 * simpler for us.
+		 */
+		goto handle_real_irq;
+	}
+}
+
+static void i8259A_resume(void)
+{
+	if (i8259A_auto_eoi >= 0)
+		init_8259A(i8259A_auto_eoi);
+}
+
+static void i8259A_shutdown(void)
+{
+	/* Put the i8259A into a quiescent state that
+	 * the kernel initialization code can get it
+	 * out of.
+	 */
+	if (i8259A_auto_eoi >= 0) {
+		outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
+		outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-2 */
+	}
+}
+
+static struct syscore_ops i8259_syscore_ops = {
+	.resume = i8259A_resume,
+	.shutdown = i8259A_shutdown,
+};
+
+static int __init i8259A_init_sysfs(void)
+{
+	register_syscore_ops(&i8259_syscore_ops);
+	return 0;
+}
+
+device_initcall(i8259A_init_sysfs);
+
+static void init_8259A(int auto_eoi)
+{
+	unsigned long flags;
+
+	i8259A_auto_eoi = auto_eoi;
+
+	raw_spin_lock_irqsave(&i8259A_lock, flags);
+
+	outb(0xff, PIC_MASTER_IMR);	/* mask all of 8259A-1 */
+	outb(0xff, PIC_SLAVE_IMR);	/* mask all of 8259A-2 */
+
+	/*
+	 * outb_p - this has to work on a wide range of PC hardware.
+	 */
+	outb_p(0x11, PIC_MASTER_CMD);	/* ICW1: select 8259A-1 init */
+	outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR);	/* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
+	outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);	/* 8259A-1 (the master) has a slave on IR2 */
+	if (auto_eoi)	/* master does Auto EOI */
+		outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
+	else		/* master expects normal EOI */
+		outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
+
+	outb_p(0x11, PIC_SLAVE_CMD);	/* ICW1: select 8259A-2 init */
+	outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR);	/* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
+	outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR);	/* 8259A-2 is a slave on master's IR2 */
+	outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
+	if (auto_eoi)
+		/*
+		 * In AEOI mode we just have to mask the interrupt
+		 * when acking.
+		 */
+		i8259A_chip.irq_mask_ack = disable_8259A_irq;
+	else
+		i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
+
+	udelay(100);		/* wait for 8259A to initialize */
+
+	outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
+	outb(cached_slave_mask, PIC_SLAVE_IMR);	  /* restore slave IRQ mask */
+
+	raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+}
+
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+static struct irqaction irq2 = {
+	.handler = no_action,
+	.name = "cascade",
+	.flags = IRQF_NO_THREAD,
+};
+
+static struct resource pic1_io_resource = {
+	.name = "pic1",
+	.start = PIC_MASTER_CMD,
+	.end = PIC_MASTER_IMR,
+	.flags = IORESOURCE_BUSY
+};
+
+static struct resource pic2_io_resource = {
+	.name = "pic2",
+	.start = PIC_SLAVE_CMD,
+	.end = PIC_SLAVE_IMR,
+	.flags = IORESOURCE_BUSY
+};
+
+static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
+				 irq_hw_number_t hw)
+{
+	irq_set_chip_and_handler(virq, &i8259A_chip, handle_level_irq);
+	irq_set_probe(virq);
+	return 0;
+}
+
+static struct irq_domain_ops i8259A_ops = {
+	.map = i8259A_irq_domain_map,
+	.xlate = irq_domain_xlate_onecell,
+};
+
+/*
+ * On systems with i8259-style interrupt controllers we assume for
+ * driver compatibility reasons interrupts 0 - 15 to be the i8259
+ * interrupts even if the hardware uses a different interrupt numbering.
+ */
+struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
+{
+	struct irq_domain *domain;
+
+	insert_resource(&ioport_resource, &pic1_io_resource);
+	insert_resource(&ioport_resource, &pic2_io_resource);
+
+	init_8259A(0);
+
+	domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0,
+				       &i8259A_ops, NULL);
+	if (!domain)
+		panic("Failed to add i8259 IRQ domain");
+
+	setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
+	return domain;
+}
+
+void __init init_i8259_irqs(void)
+{
+	__init_i8259_irqs(NULL);
+}
+
+static void i8259_irq_dispatch(unsigned int __irq, struct irq_desc *desc)
+{
+	struct irq_domain *domain = irq_desc_get_handler_data(desc);
+	int hwirq = i8259_irq();
+	unsigned int irq;
+
+	if (hwirq < 0)
+		return;
+
+	irq = irq_linear_revmap(domain, hwirq);
+	generic_handle_irq(irq);
+}
+
+int __init i8259_of_init(struct device_node *node, struct device_node *parent)
+{
+	struct irq_domain *domain;
+	unsigned int parent_irq;
+
+	parent_irq = irq_of_parse_and_map(node, 0);
+	if (!parent_irq) {
+		pr_err("Failed to map i8259 parent IRQ\n");
+		return -ENODEV;
+	}
+
+	domain = __init_i8259_irqs(node);
+	irq_set_handler_data(parent_irq, domain);
+	irq_set_chained_handler(parent_irq, i8259_irq_dispatch);
+	return 0;
+}
+IRQCHIP_DECLARE(i8259, "intel,i8259", i8259_of_init);
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index 8071c2e..841604b 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -218,8 +218,9 @@
 	return 0;
 }
 
-static void pdc_intc_perip_isr(unsigned int irq, struct irq_desc *desc)
+static void pdc_intc_perip_isr(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct pdc_intc_priv *priv;
 	unsigned int i, irq_no;
 
@@ -451,13 +452,13 @@
 	/* Setup chained handlers for the peripheral IRQs */
 	for (i = 0; i < priv->nr_perips; ++i) {
 		irq = priv->perip_irqs[i];
-		irq_set_handler_data(irq, priv);
-		irq_set_chained_handler(irq, pdc_intc_perip_isr);
+		irq_set_chained_handler_and_data(irq, pdc_intc_perip_isr,
+						 priv);
 	}
 
 	/* Setup chained handler for the syswake IRQ */
-	irq_set_handler_data(priv->syswake_irq, priv);
-	irq_set_chained_handler(priv->syswake_irq, pdc_intc_syswake_isr);
+	irq_set_chained_handler_and_data(priv->syswake_irq,
+					 pdc_intc_syswake_isr, priv);
 
 	dev_info(&pdev->dev,
 		 "PDC IRQ controller initialised (%u perip IRQs, %u syswake IRQs)\n",
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
new file mode 100644
index 0000000..e48d330
--- /dev/null
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/irqchip.h>
+#include <linux/syscore_ops.h>
+
+#define IMR_NUM			4
+#define GPC_MAX_IRQS            (IMR_NUM * 32)
+
+#define GPC_IMR1_CORE0		0x30
+#define GPC_IMR1_CORE1		0x40
+
+struct gpcv2_irqchip_data {
+	struct raw_spinlock	rlock;
+	void __iomem		*gpc_base;
+	u32			wakeup_sources[IMR_NUM];
+	u32			saved_irq_mask[IMR_NUM];
+	u32			cpu2wakeup;
+};
+
+static struct gpcv2_irqchip_data *imx_gpcv2_instance;
+
+/*
+ * Interface for the low level wakeup code.
+ */
+u32 imx_gpcv2_get_wakeup_source(u32 **sources)
+{
+	if (!imx_gpcv2_instance)
+		return 0;
+
+	if (sources)
+		*sources = imx_gpcv2_instance->wakeup_sources;
+
+	return IMR_NUM;
+}
+
+static int gpcv2_wakeup_source_save(void)
+{
+	struct gpcv2_irqchip_data *cd;
+	void __iomem *reg;
+	int i;
+
+	cd = imx_gpcv2_instance;
+	if (!cd)
+		return 0;
+
+	for (i = 0; i < IMR_NUM; i++) {
+		reg = cd->gpc_base + cd->cpu2wakeup + i * 4;
+		cd->saved_irq_mask[i] = readl_relaxed(reg);
+		writel_relaxed(cd->wakeup_sources[i], reg);
+	}
+
+	return 0;
+}
+
+static void gpcv2_wakeup_source_restore(void)
+{
+	struct gpcv2_irqchip_data *cd;
+	void __iomem *reg;
+	int i;
+
+	cd = imx_gpcv2_instance;
+	if (!cd)
+		return;
+
+	for (i = 0; i < IMR_NUM; i++) {
+		reg = cd->gpc_base + cd->cpu2wakeup + i * 4;
+		writel_relaxed(cd->saved_irq_mask[i], reg);
+	}
+}
+
+static struct syscore_ops imx_gpcv2_syscore_ops = {
+	.suspend	= gpcv2_wakeup_source_save,
+	.resume		= gpcv2_wakeup_source_restore,
+};
+
+static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+	struct gpcv2_irqchip_data *cd = d->chip_data;
+	unsigned int idx = d->hwirq / 32;
+	unsigned long flags;
+	void __iomem *reg;
+	u32 mask, val;
+
+	raw_spin_lock_irqsave(&cd->rlock, flags);
+	reg = cd->gpc_base + cd->cpu2wakeup + idx * 4;
+	mask = 1 << d->hwirq % 32;
+	val = cd->wakeup_sources[idx];
+
+	cd->wakeup_sources[idx] = on ? (val & ~mask) : (val | mask);
+	raw_spin_unlock_irqrestore(&cd->rlock, flags);
+
+	/*
+	 * Do *not* call into the parent, as the GIC doesn't have any
+	 * wake-up facility...
+	 */
+
+	return 0;
+}
+
+static void imx_gpcv2_irq_unmask(struct irq_data *d)
+{
+	struct gpcv2_irqchip_data *cd = d->chip_data;
+	void __iomem *reg;
+	u32 val;
+
+	raw_spin_lock(&cd->rlock);
+	reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4;
+	val = readl_relaxed(reg);
+	val &= ~(1 << d->hwirq % 32);
+	writel_relaxed(val, reg);
+	raw_spin_unlock(&cd->rlock);
+
+	irq_chip_unmask_parent(d);
+}
+
+static void imx_gpcv2_irq_mask(struct irq_data *d)
+{
+	struct gpcv2_irqchip_data *cd = d->chip_data;
+	void __iomem *reg;
+	u32 val;
+
+	raw_spin_lock(&cd->rlock);
+	reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4;
+	val = readl_relaxed(reg);
+	val |= 1 << (d->hwirq % 32);
+	writel_relaxed(val, reg);
+	raw_spin_unlock(&cd->rlock);
+
+	irq_chip_mask_parent(d);
+}
+
+static struct irq_chip gpcv2_irqchip_data_chip = {
+	.name			= "GPCv2",
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_mask		= imx_gpcv2_irq_mask,
+	.irq_unmask		= imx_gpcv2_irq_unmask,
+	.irq_set_wake		= imx_gpcv2_irq_set_wake,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+#ifdef CONFIG_SMP
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+#endif
+};
+
+static int imx_gpcv2_domain_xlate(struct irq_domain *domain,
+				struct device_node *controller,
+				const u32 *intspec,
+				unsigned int intsize,
+				unsigned long *out_hwirq,
+				unsigned int *out_type)
+{
+	/* Shouldn't happen, really... */
+	if (domain->of_node != controller)
+		return -EINVAL;
+
+	/* Not GIC compliant */
+	if (intsize != 3)
+		return -EINVAL;
+
+	/* No PPI should point to this domain */
+	if (intspec[0] != 0)
+		return -EINVAL;
+
+	*out_hwirq = intspec[1];
+	*out_type = intspec[2];
+	return 0;
+}
+
+static int imx_gpcv2_domain_alloc(struct irq_domain *domain,
+				  unsigned int irq, unsigned int nr_irqs,
+				  void *data)
+{
+	struct of_phandle_args *args = data;
+	struct of_phandle_args parent_args;
+	irq_hw_number_t hwirq;
+	int i;
+
+	/* Not GIC compliant */
+	if (args->args_count != 3)
+		return -EINVAL;
+
+	/* No PPI should point to this domain */
+	if (args->args[0] != 0)
+		return -EINVAL;
+
+	/* Can't deal with this */
+	hwirq = args->args[1];
+	if (hwirq >= GPC_MAX_IRQS)
+		return -EINVAL;
+
+	for (i = 0; i < nr_irqs; i++) {
+		irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i,
+				&gpcv2_irqchip_data_chip, domain->host_data);
+	}
+
+	parent_args = *args;
+	parent_args.np = domain->parent->of_node;
+	return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_args);
+}
+
+static struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
+	.xlate	= imx_gpcv2_domain_xlate,
+	.alloc	= imx_gpcv2_domain_alloc,
+	.free	= irq_domain_free_irqs_common,
+};
+
+static int __init imx_gpcv2_irqchip_init(struct device_node *node,
+			       struct device_node *parent)
+{
+	struct irq_domain *parent_domain, *domain;
+	struct gpcv2_irqchip_data *cd;
+	int i;
+
+	if (!parent) {
+		pr_err("%s: no parent, giving up\n", node->full_name);
+		return -ENODEV;
+	}
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain) {
+		pr_err("%s: unable to get parent domain\n", node->full_name);
+		return -ENXIO;
+	}
+
+	cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL);
+	if (!cd) {
+		pr_err("kzalloc failed!\n");
+		return -ENOMEM;
+	}
+
+	cd->gpc_base = of_iomap(node, 0);
+	if (!cd->gpc_base) {
+		pr_err("fsl-gpcv2: unable to map gpc registers\n");
+		kfree(cd);
+		return -ENOMEM;
+	}
+
+	domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS,
+				node, &gpcv2_irqchip_data_domain_ops, cd);
+	if (!domain) {
+		iounmap(cd->gpc_base);
+		kfree(cd);
+		return -ENOMEM;
+	}
+	irq_set_default_host(domain);
+
+	/* Initially mask all interrupts */
+	for (i = 0; i < IMR_NUM; i++) {
+		writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE0 + i * 4);
+		writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE1 + i * 4);
+		cd->wakeup_sources[i] = ~0;
+	}
+
+	/* Let CORE0 as the default CPU to wake up by GPC */
+	cd->cpu2wakeup = GPC_IMR1_CORE0;
+
+	/*
+	 * Due to hardware design failure, need to make sure GPR
+	 * interrupt(#32) is unmasked during RUN mode to avoid entering
+	 * DSM by mistake.
+	 */
+	writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup);
+
+	imx_gpcv2_instance = cd;
+	register_syscore_ops(&imx_gpcv2_syscore_ops);
+
+	return 0;
+}
+
+IRQCHIP_DECLARE(imx_gpcv2, "fsl,imx7d-gpc", imx_gpcv2_irqchip_init);
diff --git a/drivers/irqchip/irq-ingenic.c b/drivers/irqchip/irq-ingenic.c
index 005de3f..fc5953d 100644
--- a/drivers/irqchip/irq-ingenic.c
+++ b/drivers/irqchip/irq-ingenic.c
@@ -18,6 +18,7 @@
 #include <linux/types.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/ingenic.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -28,8 +29,6 @@
 #include <asm/io.h>
 #include <asm/mach-jz4740/irq.h>
 
-#include "irqchip.h"
-
 struct ingenic_intc_data {
 	void __iomem *base;
 	unsigned num_chips;
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index 81e3cf5..c151726 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -20,13 +20,12 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/irqdomain.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/mfd/syscon.h>
 #include <linux/regmap.h>
-#include "irqchip.h"
-
 
 /* The source ID bits start from 4 to 31 (total 28 bits)*/
 #define BIT_OFS			4
@@ -84,8 +83,9 @@
 	/* nothing to do here */
 }
 
-static void keystone_irq_handler(unsigned irq, struct irq_desc *desc)
+static void keystone_irq_handler(unsigned __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
 	unsigned long pending;
 	int src, virq;
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 2cb474a..5f4c529 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -404,7 +404,6 @@
 #ifdef CONFIG_METAG_SUSPEND_MEM
 	struct meta_intc_priv *priv = &meta_intc_priv;
 #endif
-	unsigned int irq = data->irq;
 	irq_hw_number_t hw = data->hwirq;
 	unsigned int bit = 1 << meta_intc_offset(hw);
 	void __iomem *level_addr = meta_intc_level_addr(hw);
@@ -413,11 +412,11 @@
 
 	/* update the chip/handler */
 	if (flow_type & IRQ_TYPE_LEVEL_MASK)
-		__irq_set_chip_handler_name_locked(irq, &meta_intc_level_chip,
-						   handle_level_irq, NULL);
+		irq_set_chip_handler_name_locked(data, &meta_intc_level_chip,
+						 handle_level_irq, NULL);
 	else
-		__irq_set_chip_handler_name_locked(irq, &meta_intc_edge_chip,
-						   handle_edge_irq, NULL);
+		irq_set_chip_handler_name_locked(data, &meta_intc_edge_chip,
+						 handle_edge_irq, NULL);
 
 	/* and clear/set the bit in HWLEVELEXT */
 	__global_lock2(flags);
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
index c16c186..3d23ce3 100644
--- a/drivers/irqchip/irq-metag.c
+++ b/drivers/irqchip/irq-metag.c
@@ -286,8 +286,7 @@
 	int irq = tbisig_map(signum);
 
 	/* Register the multiplexed IRQ handler */
-	irq_set_handler_data(irq, priv);
-	irq_set_chained_handler(irq, metag_internal_irq_demux);
+	irq_set_chained_handler_and_data(irq, metag_internal_irq_demux, priv);
 	irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
 }
 
diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c
index a43c419..8c504f5 100644
--- a/drivers/irqchip/irq-mips-cpu.c
+++ b/drivers/irqchip/irq-mips-cpu.c
@@ -31,6 +31,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 
 #include <asm/irq_cpu.h>
@@ -38,8 +39,6 @@
 #include <asm/mipsmtregs.h>
 #include <asm/setup.h>
 
-#include "irqchip.h"
-
 static inline void unmask_mips_irq(struct irq_data *d)
 {
 	set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index ff4be05..1764bcf 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -11,6 +11,7 @@
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/mips-gic.h>
 #include <linux/of_address.h>
 #include <linux/sched.h>
@@ -22,8 +23,6 @@
 
 #include <dt-bindings/interrupt-controller/mips-gic.h>
 
-#include "irqchip.h"
-
 unsigned int gic_present;
 
 struct gic_pcpu_mask {
@@ -42,20 +41,46 @@
 
 static void __gic_irq_dispatch(void);
 
-static inline unsigned int gic_read(unsigned int reg)
+static inline u32 gic_read32(unsigned int reg)
 {
 	return __raw_readl(gic_base + reg);
 }
 
-static inline void gic_write(unsigned int reg, unsigned int val)
+static inline u64 gic_read64(unsigned int reg)
 {
-	__raw_writel(val, gic_base + reg);
+	return __raw_readq(gic_base + reg);
 }
 
-static inline void gic_update_bits(unsigned int reg, unsigned int mask,
-				   unsigned int val)
+static inline unsigned long gic_read(unsigned int reg)
 {
-	unsigned int regval;
+	if (!mips_cm_is64)
+		return gic_read32(reg);
+	else
+		return gic_read64(reg);
+}
+
+static inline void gic_write32(unsigned int reg, u32 val)
+{
+	return __raw_writel(val, gic_base + reg);
+}
+
+static inline void gic_write64(unsigned int reg, u64 val)
+{
+	return __raw_writeq(val, gic_base + reg);
+}
+
+static inline void gic_write(unsigned int reg, unsigned long val)
+{
+	if (!mips_cm_is64)
+		return gic_write32(reg, (u32)val);
+	else
+		return gic_write64(reg, (u64)val);
+}
+
+static inline void gic_update_bits(unsigned int reg, unsigned long mask,
+				   unsigned long val)
+{
+	unsigned long regval;
 
 	regval = gic_read(reg);
 	regval &= ~mask;
@@ -66,40 +91,40 @@
 static inline void gic_reset_mask(unsigned int intr)
 {
 	gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
-		  1 << GIC_INTR_BIT(intr));
+		  1ul << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_set_mask(unsigned int intr)
 {
 	gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
-		  1 << GIC_INTR_BIT(intr));
+		  1ul << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
 {
 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
-			GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
-			pol << GIC_INTR_BIT(intr));
+			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
+			(unsigned long)pol << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
 {
 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
-			GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
-			trig << GIC_INTR_BIT(intr));
+			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
+			(unsigned long)trig << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
 {
 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
-			1 << GIC_INTR_BIT(intr),
-			dual << GIC_INTR_BIT(intr));
+			1ul << GIC_INTR_BIT(intr),
+			(unsigned long)dual << GIC_INTR_BIT(intr));
 }
 
 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
 {
-	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
-		  GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
+	gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
+		    GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
 }
 
 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
@@ -114,10 +139,13 @@
 {
 	unsigned int hi, hi2, lo;
 
+	if (mips_cm_is64)
+		return (cycle_t)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
+
 	do {
-		hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
-		lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
-		hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
+		hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
+		lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
+		hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
 	} while (hi2 != hi);
 
 	return (((cycle_t) hi) << 32) + lo;
@@ -136,10 +164,14 @@
 
 void gic_write_compare(cycle_t cnt)
 {
-	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
-				(int)(cnt >> 32));
-	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
-				(int)(cnt & 0xffffffff));
+	if (mips_cm_is64) {
+		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
+	} else {
+		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
+					(int)(cnt >> 32));
+		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
+					(int)(cnt & 0xffffffff));
+	}
 }
 
 void gic_write_cpu_compare(cycle_t cnt, int cpu)
@@ -149,10 +181,15 @@
 	local_irq_save(flags);
 
 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
-	gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
-				(int)(cnt >> 32));
-	gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
-				(int)(cnt & 0xffffffff));
+
+	if (mips_cm_is64) {
+		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
+	} else {
+		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
+					(int)(cnt >> 32));
+		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
+					(int)(cnt & 0xffffffff));
+	}
 
 	local_irq_restore(flags);
 }
@@ -161,8 +198,11 @@
 {
 	unsigned int hi, lo;
 
-	hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
-	lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
+	if (mips_cm_is64)
+		return (cycle_t)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
+
+	hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
+	lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
 
 	return (((cycle_t) hi) << 32) + lo;
 }
@@ -197,7 +237,7 @@
 	if (cpu_has_veic)
 		return true;
 
-	vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
+	vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
 	switch (intr) {
 	case GIC_LOCAL_INT_TIMER:
 		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
@@ -263,7 +303,7 @@
 
 static void gic_handle_shared_int(bool chained)
 {
-	unsigned int i, intr, virq;
+	unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
 	unsigned long *pcpu_mask;
 	unsigned long pending_reg, intrmask_reg;
 	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
@@ -278,8 +318,8 @@
 	for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
 		pending[i] = gic_read(pending_reg);
 		intrmask[i] = gic_read(intrmask_reg);
-		pending_reg += 0x4;
-		intrmask_reg += 0x4;
+		pending_reg += gic_reg_step;
+		intrmask_reg += gic_reg_step;
 	}
 
 	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
@@ -358,15 +398,12 @@
 		break;
 	}
 
-	if (is_edge) {
-		__irq_set_chip_handler_name_locked(d->irq,
-						   &gic_edge_irq_controller,
-						   handle_edge_irq, NULL);
-	} else {
-		__irq_set_chip_handler_name_locked(d->irq,
-						   &gic_level_irq_controller,
-						   handle_level_irq, NULL);
-	}
+	if (is_edge)
+		irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
+						 handle_edge_irq, NULL);
+	else
+		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
+						 handle_level_irq, NULL);
 	spin_unlock_irqrestore(&gic_lock, flags);
 
 	return 0;
@@ -396,7 +433,7 @@
 		clear_bit(irq, pcpu_masks[i].pcpu_mask);
 	set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
 
-	cpumask_copy(d->affinity, cpumask);
+	cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
 	spin_unlock_irqrestore(&gic_lock, flags);
 
 	return IRQ_SET_MASK_OK_NOCOPY;
@@ -429,8 +466,8 @@
 	unsigned long pending, masked;
 	unsigned int intr, virq;
 
-	pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
-	masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
+	pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
+	masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
 
 	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
 
@@ -453,14 +490,14 @@
 {
 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
 
-	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
+	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
 }
 
 static void gic_unmask_local_irq(struct irq_data *d)
 {
 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
 
-	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
+	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
 }
 
 static struct irq_chip gic_local_irq_controller = {
@@ -478,7 +515,7 @@
 	spin_lock_irqsave(&gic_lock, flags);
 	for (i = 0; i < gic_vpes; i++) {
 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
-		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
+		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
 	}
 	spin_unlock_irqrestore(&gic_lock, flags);
 }
@@ -492,7 +529,7 @@
 	spin_lock_irqsave(&gic_lock, flags);
 	for (i = 0; i < gic_vpes; i++) {
 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
-		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
+		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
 	}
 	spin_unlock_irqrestore(&gic_lock, flags);
 }
@@ -612,7 +649,7 @@
 		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
 			if (!gic_local_irq_is_routable(j))
 				continue;
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
 		}
 	}
 }
@@ -657,27 +694,32 @@
 
 		switch (intr) {
 		case GIC_LOCAL_INT_WD:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
 			break;
 		case GIC_LOCAL_INT_COMPARE:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_TIMER:
 			/* CONFIG_MIPS_CMP workaround (see __gic_init) */
 			val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_PERFCTR:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_SWINT0:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_SWINT1:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
+				    val);
 			break;
 		case GIC_LOCAL_INT_FDC:
-			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
+			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
 			break;
 		default:
 			pr_err("Invalid local IRQ %d\n", intr);
@@ -782,7 +824,7 @@
 		 */
 		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
 		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
-			timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
+			timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
 							 GIC_VPE_TIMER_MAP)) &
 					GIC_MAP_MSK;
 			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index c0da57b..781ed6e 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
@@ -24,8 +25,6 @@
 #include <asm/exception.h>
 #include <asm/hardirq.h>
 
-#include "irqchip.h"
-
 #define MAX_ICU_NR		16
 
 #define PJ1_INT_SEL		0x10c
@@ -130,8 +129,9 @@
 	.irq_unmask	= icu_unmask_irq,
 };
 
-static void icu_mux_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void icu_mux_irq_demux(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct irq_domain *domain;
 	struct icu_chip_data *data;
 	int i;
diff --git a/drivers/irqchip/irq-moxart.c b/drivers/irqchip/irq-moxart.c
index 00b3cc9..a24b06a 100644
--- a/drivers/irqchip/irq-moxart.c
+++ b/drivers/irqchip/irq-moxart.c
@@ -12,6 +12,7 @@
 
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -19,8 +20,6 @@
 
 #include <asm/exception.h>
 
-#include "irqchip.h"
-
 #define IRQ_SOURCE_REG		0
 #define IRQ_MASK_REG		0x04
 #define IRQ_CLEAR_REG		0x08
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index 15c1303..c8753da 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -13,6 +13,7 @@
  */
 
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
@@ -21,8 +22,6 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
-#include "irqchip.h"
-
 struct mtk_sysirq_chip_data {
 	spinlock_t lock;
 	void __iomem *intpol_base;
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 04bf97b..1faf812 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 #include <linux/io.h>
 #include <linux/of.h>
@@ -27,8 +28,6 @@
 #include <linux/stmp_device.h>
 #include <asm/exception.h>
 
-#include "irqchip.h"
-
 #define HW_ICOLL_VECTOR				0x0000
 #define HW_ICOLL_LEVELACK			0x0010
 #define HW_ICOLL_CTRL				0x0020
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index 5fac910..a878b8d 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -21,13 +21,12 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 
 #include <asm/v7m.h>
 #include <asm/exception.h>
 
-#include "irqchip.h"
-
 #define NVIC_ISER		0x000
 #define NVIC_ICER		0x080
 #define NVIC_IPR		0x300
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index a569c6d..8587d0f 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -17,13 +17,12 @@
 #include <linux/io.h>
 
 #include <asm/exception.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 
-#include "irqchip.h"
-
 /* Define these here for now until we drop all board-files */
 #define OMAP24XX_IC_BASE	0x480fe000
 #define OMAP34XX_IC_BASE	0x48200000
@@ -331,37 +330,12 @@
 static asmlinkage void __exception_irq_entry
 omap_intc_handle_irq(struct pt_regs *regs)
 {
-	u32 irqnr = 0;
-	int handled_irq = 0;
-	int i;
+	u32 irqnr;
 
-	do {
-		for (i = 0; i < omap_nr_pending; i++) {
-			irqnr = intc_readl(INTC_PENDING_IRQ0 + (0x20 * i));
-			if (irqnr)
-				goto out;
-		}
-
-out:
-		if (!irqnr)
-			break;
-
-		irqnr = intc_readl(INTC_SIR);
-		irqnr &= ACTIVEIRQ_MASK;
-
-		if (irqnr) {
-			handle_domain_irq(domain, irqnr, regs);
-			handled_irq = 1;
-		}
-	} while (irqnr);
-
-	/*
-	 * If an irq is masked or deasserted while active, we will
-	 * keep ending up here with no irq handled. So remove it from
-	 * the INTC with an ack.
-	 */
-	if (!handled_irq)
-		omap_ack_irq(NULL);
+	irqnr = intc_readl(INTC_SIR);
+	irqnr &= ACTIVEIRQ_MASK;
+	WARN_ONCE(!irqnr, "Spurious IRQ ?\n");
+	handle_domain_irq(domain, irqnr, regs);
 }
 
 void __init omap3_init_irq(void)
diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c
index e93d079..6a9a3e7 100644
--- a/drivers/irqchip/irq-or1k-pic.c
+++ b/drivers/irqchip/irq-or1k-pic.c
@@ -9,12 +9,11 @@
  */
 
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
 
-#include "irqchip.h"
-
 /* OR1K PIC implementation */
 
 struct or1k_pic_dev {
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index ad0c0f6..5ea999a 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -10,14 +10,13 @@
 
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <asm/exception.h>
 #include <asm/mach/irq.h>
 
-#include "irqchip.h"
-
 /*
  * Orion SoC main interrupt controller
  */
@@ -109,7 +108,7 @@
 
 static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_domain *d = irq_get_handler_data(irq);
+	struct irq_domain *d = irq_desc_get_handler_data(desc);
 
 	struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
 	u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
@@ -198,8 +197,8 @@
 	writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
 	writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
 
-	irq_set_handler_data(irq, domain);
-	irq_set_chained_handler(irq, orion_bridge_irq_handler);
+	irq_set_chained_handler_and_data(irq, orion_bridge_irq_handler,
+					 domain);
 
 	return 0;
 }
diff --git a/drivers/irqchip/irq-renesas-h8300h.c b/drivers/irqchip/irq-renesas-h8300h.c
index 1870e6b..6fd30d5 100644
--- a/drivers/irqchip/irq-renesas-h8300h.c
+++ b/drivers/irqchip/irq-renesas-h8300h.c
@@ -11,8 +11,6 @@
 #include <linux/of_irq.h>
 #include <asm/io.h>
 
-#include "irqchip.h"
-
 static const char ipr_bit[] = {
 	 7,  6,  5,  5,
 	 4,  4,  4,  4,  3,  3,  3,  3,
diff --git a/drivers/irqchip/irq-renesas-h8s.c b/drivers/irqchip/irq-renesas-h8s.c
index 64425f4..8098ead 100644
--- a/drivers/irqchip/irq-renesas-h8s.c
+++ b/drivers/irqchip/irq-renesas-h8s.c
@@ -5,10 +5,10 @@
  */
 
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <asm/io.h>
-#include "irqchip.h"
 
 static void *intc_baseaddr;
 #define IPRA ((unsigned long)intc_baseaddr)
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 778bd07..2aa3add7 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -53,7 +53,6 @@
 struct irqc_irq {
 	int hw_irq;
 	int requested_irq;
-	int domain_irq;
 	struct irqc_priv *p;
 };
 
@@ -70,8 +69,8 @@
 
 static void irqc_dbg(struct irqc_irq *i, char *str)
 {
-	dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n",
-		str, i->requested_irq, i->hw_irq, i->domain_irq);
+	dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
+		str, i->requested_irq, i->hw_irq);
 }
 
 static void irqc_irq_enable(struct irq_data *d)
@@ -145,7 +144,7 @@
 	if (ioread32(p->iomem + DETECT_STATUS) & bit) {
 		iowrite32(bit, p->iomem + DETECT_STATUS);
 		irqc_dbg(i, "demux2");
-		generic_handle_irq(i->domain_irq);
+		generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq));
 		return IRQ_HANDLED;
 	}
 	return IRQ_NONE;
@@ -156,13 +155,9 @@
 {
 	struct irqc_priv *p = h->host_data;
 
-	p->irq[hw].domain_irq = virq;
-	p->irq[hw].hw_irq = hw;
-
 	irqc_dbg(&p->irq[hw], "map");
 	irq_set_chip_data(virq, h->host_data);
 	irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
-	set_irq_flags(virq, IRQF_VALID); /* kill me now */
 	return 0;
 }
 
@@ -215,6 +210,7 @@
 			break;
 
 		p->irq[k].p = p;
+		p->irq[k].hw_irq = k;
 		p->irq[k].requested_irq = irq->start;
 	}
 
@@ -243,8 +239,8 @@
 	irq_chip->irq_set_wake = irqc_irq_set_wake;
 	irq_chip->flags	= IRQCHIP_MASK_ON_SUSPEND;
 
-	p->irq_domain = irq_domain_add_simple(pdev->dev.of_node,
-					      p->number_of_irqs, 0,
+	p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
+					      p->number_of_irqs,
 					      &irqc_irq_domain_ops, p);
 	if (!p->irq_domain) {
 		ret = -ENXIO;
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index e96717f..506d9f2 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -25,6 +25,7 @@
 #include <linux/ioport.h>
 #include <linux/device.h>
 #include <linux/irqdomain.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
@@ -40,8 +41,6 @@
 #include <plat/regs-irqtype.h>
 #include <plat/pm.h>
 
-#include "irqchip.h"
-
 #define S3C_IRQTYPE_NONE	0
 #define S3C_IRQTYPE_EINT	1
 #define S3C_IRQTYPE_EDGE	2
@@ -299,16 +298,14 @@
 	.irq_set_type	= s3c_irqext0_type,
 };
 
-static void s3c_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void s3c_irq_demux(unsigned int __irq, struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc);
 	struct s3c_irq_intc *intc = irq_data->intc;
 	struct s3c_irq_intc *sub_intc = irq_data->sub_intc;
-	unsigned long src;
-	unsigned long msk;
-	unsigned int n;
-	unsigned int offset;
+	unsigned int n, offset, irq;
+	unsigned long src, msk;
 
 	/* we're using individual domains for the non-dt case
 	 * and one big domain for the dt case where the subintc
diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c
index 46df287..61bb28d 100644
--- a/drivers/irqchip/irq-sa11x0.c
+++ b/drivers/irqchip/irq-sa11x0.c
@@ -70,7 +70,6 @@
 {
 	irq_set_chip_and_handler(irq, &sa1100_normal_chip,
 				 handle_level_irq);
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
index a469355..10cb21b 100644
--- a/drivers/irqchip/irq-sirfsoc.c
+++ b/drivers/irqchip/irq-sirfsoc.c
@@ -11,40 +11,44 @@
 #include <linux/irq.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 #include <linux/syscore_ops.h>
 #include <asm/mach/irq.h>
 #include <asm/exception.h>
-#include "irqchip.h"
 
-#define SIRFSOC_INT_RISC_MASK0          0x0018
-#define SIRFSOC_INT_RISC_MASK1          0x001C
-#define SIRFSOC_INT_RISC_LEVEL0         0x0020
-#define SIRFSOC_INT_RISC_LEVEL1         0x0024
+#define SIRFSOC_INT_RISC_MASK0		0x0018
+#define SIRFSOC_INT_RISC_MASK1		0x001C
+#define SIRFSOC_INT_RISC_LEVEL0		0x0020
+#define SIRFSOC_INT_RISC_LEVEL1		0x0024
 #define SIRFSOC_INIT_IRQ_ID		0x0038
+#define SIRFSOC_INT_BASE_OFFSET		0x0004
 
 #define SIRFSOC_NUM_IRQS		64
+#define SIRFSOC_NUM_BANKS		(SIRFSOC_NUM_IRQS / 32)
 
 static struct irq_domain *sirfsoc_irqdomain;
 
-static __init void
-sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
+static __init void sirfsoc_alloc_gc(void __iomem *base)
 {
-	struct irq_chip_generic *gc;
-	struct irq_chip_type *ct;
-	int ret;
 	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
 	unsigned int set = IRQ_LEVEL;
+	struct irq_chip_generic *gc;
+	struct irq_chip_type *ct;
+	int i;
 
-	ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc",
-		handle_level_irq, clr, set, IRQ_GC_INIT_MASK_CACHE);
+	irq_alloc_domain_generic_chips(sirfsoc_irqdomain, 32, 1, "irq_sirfsoc",
+				       handle_level_irq, clr, set,
+				       IRQ_GC_INIT_MASK_CACHE);
 
-	gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start);
-	gc->reg_base = base;
-	ct = gc->chip_types;
-	ct->chip.irq_mask = irq_gc_mask_clr_bit;
-	ct->chip.irq_unmask = irq_gc_mask_set_bit;
-	ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
+	for (i = 0; i < SIRFSOC_NUM_BANKS; i++) {
+		gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, i * 32);
+		gc->reg_base = base + i * SIRFSOC_INT_BASE_OFFSET;
+		ct = gc->chip_types;
+		ct->chip.irq_mask = irq_gc_mask_clr_bit;
+		ct->chip.irq_unmask = irq_gc_mask_set_bit;
+		ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
+	}
 }
 
 static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
@@ -64,10 +68,8 @@
 		panic("unable to map intc cpu registers\n");
 
 	sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS,
-		&irq_generic_chip_ops, base);
-
-	sirfsoc_alloc_gc(base, 0, 32);
-	sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32);
+						  &irq_generic_chip_ops, base);
+	sirfsoc_alloc_gc(base);
 
 	writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL0);
 	writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL1);
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 83d6aa6..4ad3e7c 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -16,6 +16,7 @@
 
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -23,8 +24,6 @@
 #include <asm/exception.h>
 #include <asm/mach/irq.h>
 
-#include "irqchip.h"
-
 #define SUN4I_IRQ_VECTOR_REG		0x00
 #define SUN4I_IRQ_PROTECTION_REG	0x08
 #define SUN4I_IRQ_NMI_CTRL_REG		0x0c
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 6b2b582..772a82c 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -17,8 +17,8 @@
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
-#include "irqchip.h"
 
 #define SUNXI_NMI_SRC_TYPE_MASK	0x00000003
 
@@ -61,7 +61,7 @@
 static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc)
 {
 	struct irq_domain *domain = irq_desc_get_handler_data(desc);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned int virq = irq_find_mapping(domain, 0);
 
 	chained_irq_enter(chip, desc);
@@ -182,8 +182,7 @@
 	sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
 	sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1);
 
-	irq_set_handler_data(irq, domain);
-	irq_set_chained_handler(irq, sunxi_sc_nmi_handle_irq);
+	irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
 
 	return 0;
 
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index accc200..3318296 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -22,13 +22,13 @@
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/bitops.h>
-#include "irqchip.h"
 
 #define AB_IRQCTL_INT_ENABLE   0x00
 #define AB_IRQCTL_INT_STATUS   0x04
@@ -97,9 +97,10 @@
 	return IRQ_SET_MASK_OK;
 }
 
-static void tb10x_irq_cascade(unsigned int irq, struct irq_desc *desc)
+static void tb10x_irq_cascade(unsigned int __irq, struct irq_desc *desc)
 {
 	struct irq_domain *domain = irq_desc_get_handler_data(desc);
+	unsigned int irq = irq_desc_get_irq(desc);
 
 	generic_handle_irq(irq_find_mapping(domain, irq));
 }
@@ -173,8 +174,8 @@
 	for (i = 0; i < nrirqs; i++) {
 		unsigned int irq = irq_of_parse_and_map(ictl, i);
 
-		irq_set_handler_data(irq, domain);
-		irq_set_chained_handler(irq, tb10x_irq_cascade);
+		irq_set_chained_handler_and_data(irq, tb10x_irq_cascade,
+						 domain);
 	}
 
 	ab_irqctl_writereg(gc, AB_IRQCTL_INT_ENABLE, 0);
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index f67bbd8..2fd89eb 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -24,6 +24,7 @@
 
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 #include <linux/of_address.h>
 #include <linux/slab.h>
@@ -31,8 +32,6 @@
 
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 
-#include "irqchip.h"
-
 #define ICTLR_CPU_IEP_VFIQ	0x08
 #define ICTLR_CPU_IEP_FIR	0x14
 #define ICTLR_CPU_IEP_FIR_SET	0x18
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 888111b..16123f6 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -4,6 +4,7 @@
 #include <linux/bitops.h>
 #include <linux/irq.h>
 #include <linux/io.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/versatile-fpga.h>
 #include <linux/irqdomain.h>
 #include <linux/module.h>
@@ -14,8 +15,6 @@
 #include <asm/exception.h>
 #include <asm/mach/irq.h>
 
-#include "irqchip.h"
-
 #define IRQ_STATUS		0x00
 #define IRQ_RAW_STATUS		0x04
 #define IRQ_ENABLE_SET		0x08
@@ -66,9 +65,10 @@
 	writel(mask, f->base + IRQ_ENABLE_SET);
 }
 
-static void fpga_irq_handle(unsigned int irq, struct irq_desc *desc)
+static void fpga_irq_handle(unsigned int __irq, struct irq_desc *desc)
 {
 	struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
+	unsigned int irq = irq_desc_get_irq(desc);
 	u32 status = readl(f->base + IRQ_STATUS);
 
 	if (status == 0) {
@@ -156,8 +156,8 @@
 	f->valid = valid;
 
 	if (parent_irq != -1) {
-		irq_set_handler_data(parent_irq, f);
-		irq_set_chained_handler(parent_irq, fpga_irq_handle);
+		irq_set_chained_handler_and_data(parent_irq, fpga_irq_handle,
+						 f);
 	}
 
 	/* This will also allocate irq descriptors */
diff --git a/drivers/irqchip/irq-vf610-mscm-ir.c b/drivers/irqchip/irq-vf610-mscm-ir.c
index f5c01cb..2c22558 100644
--- a/drivers/irqchip/irq-vf610-mscm-ir.c
+++ b/drivers/irqchip/irq-vf610-mscm-ir.c
@@ -26,6 +26,7 @@
 #include <linux/cpu_pm.h>
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 #include <linux/mfd/syscon.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
@@ -34,8 +35,6 @@
 #include <linux/slab.h>
 #include <linux/regmap.h>
 
-#include "irqchip.h"
-
 #define MSCM_CPxNUM		0x4
 
 #define MSCM_IRSPRC(n)		(0x80 + 2 * (n))
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index d4ce331..03846df 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -24,6 +24,7 @@
 #include <linux/list.h>
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqdomain.h>
 #include <linux/of.h>
@@ -37,8 +38,6 @@
 #include <asm/exception.h>
 #include <asm/irq.h>
 
-#include "irqchip.h"
-
 #define VIC_IRQ_STATUS			0x00
 #define VIC_FIQ_STATUS			0x04
 #define VIC_INT_SELECT			0x0c	/* 1 = FIQ, 0 = IRQ */
@@ -297,8 +296,8 @@
 	vic_id++;
 
 	if (parent_irq) {
-		irq_set_handler_data(parent_irq, v);
-		irq_set_chained_handler(parent_irq, vic_handle_irq_cascaded);
+		irq_set_chained_handler_and_data(parent_irq,
+						 vic_handle_irq_cascaded, v);
 	}
 
 	v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
index 0b29700..8371d99 100644
--- a/drivers/irqchip/irq-vt8500.c
+++ b/drivers/irqchip/irq-vt8500.c
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 #include <linux/interrupt.h>
 #include <linux/bitops.h>
@@ -39,8 +40,6 @@
 #include <asm/exception.h>
 #include <asm/mach/irq.h>
 
-#include "irqchip.h"
-
 #define VT8500_ICPC_IRQ		0x20
 #define VT8500_ICPC_FIQ		0x24
 #define VT8500_ICDC		0x40		/* Destination Control 64*u32 */
@@ -127,15 +126,15 @@
 		return -EINVAL;
 	case IRQF_TRIGGER_HIGH:
 		dctr |= VT8500_TRIGGER_HIGH;
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		break;
 	case IRQF_TRIGGER_FALLING:
 		dctr |= VT8500_TRIGGER_FALLING;
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		break;
 	case IRQF_TRIGGER_RISING:
 		dctr |= VT8500_TRIGGER_RISING;
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		break;
 	}
 	writeb(dctr, base + VT8500_ICDC + d->hwirq);
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index e1c2f96..bb3ac5f 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -11,12 +11,11 @@
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/of.h>
 
 #include <asm/mxregs.h>
 
-#include "irqchip.h"
-
 #define HW_IRQ_IPI_COUNT 2
 #define HW_IRQ_MX_BASE 2
 #define HW_IRQ_EXTERN_BASE 3
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 7d71126..472ae17 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -15,10 +15,9 @@
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/of.h>
 
-#include "irqchip.h"
-
 unsigned int cached_irq_mask;
 
 /*
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
index e4ef74e..4c48fa8 100644
--- a/drivers/irqchip/irq-zevio.c
+++ b/drivers/irqchip/irq-zevio.c
@@ -11,6 +11,7 @@
 
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
@@ -18,8 +19,6 @@
 #include <asm/mach/irq.h>
 #include <asm/exception.h>
 
-#include "irqchip.h"
-
 #define IO_STATUS	0x000
 #define IO_RAW_STATUS	0x004
 #define IO_ENABLE	0x008
diff --git a/drivers/irqchip/irqchip.h b/drivers/irqchip/irqchip.h
deleted file mode 100644
index 0f67ae32..0000000
--- a/drivers/irqchip/irqchip.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Copyright (C) 2012 Thomas Petazzoni
- *
- * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2.  This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/irqchip.h>
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index acb721b..4cbd9c5 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -18,14 +18,13 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/spinlock.h>
 
-#include "irqchip.h"
-
 /*
  * struct spear_shirq: shared irq structure
  *
@@ -183,9 +182,9 @@
 	&spear320_shirq_intrcomm_ras,
 };
 
-static void shirq_handler(unsigned irq, struct irq_desc *desc)
+static void shirq_handler(unsigned __irq, struct irq_desc *desc)
 {
-	struct spear_shirq *shirq = irq_get_handler_data(irq);
+	struct spear_shirq *shirq = irq_desc_get_handler_data(desc);
 	u32 pend;
 
 	pend = readl(shirq->base + shirq->status_reg) & shirq->mask;
diff --git a/drivers/isdn/mISDN/dsp_audio.c b/drivers/isdn/mISDN/dsp_audio.c
index 0602295..bbef98e 100644
--- a/drivers/isdn/mISDN/dsp_audio.c
+++ b/drivers/isdn/mISDN/dsp_audio.c
@@ -13,6 +13,7 @@
 #include <linux/mISDNif.h>
 #include <linux/mISDNdsp.h>
 #include <linux/export.h>
+#include <linux/bitrev.h>
 #include "core.h"
 #include "dsp.h"
 
@@ -137,27 +138,14 @@
 	return ulawbyte;
 }
 
-static int reverse_bits(int i)
-{
-	int z, j;
-	z = 0;
-
-	for (j = 0; j < 8; j++) {
-		if ((i & (1 << j)) != 0)
-			z |= 1 << (7 - j);
-	}
-	return z;
-}
-
-
 void dsp_audio_generate_law_tables(void)
 {
 	int i;
 	for (i = 0; i < 256; i++)
-		dsp_audio_alaw_to_s32[i] = alaw2linear(reverse_bits(i));
+		dsp_audio_alaw_to_s32[i] = alaw2linear(bitrev8((u8)i));
 
 	for (i = 0; i < 256; i++)
-		dsp_audio_ulaw_to_s32[i] = ulaw2linear(reverse_bits(i));
+		dsp_audio_ulaw_to_s32[i] = ulaw2linear(bitrev8((u8)i));
 
 	for (i = 0; i < 256; i++) {
 		dsp_audio_alaw_to_ulaw[i] =
@@ -176,13 +164,13 @@
 		/* generating ulaw-table */
 		for (i = -32768; i < 32768; i++) {
 			dsp_audio_s16_to_law[i & 0xffff] =
-				reverse_bits(linear2ulaw(i));
+				bitrev8(linear2ulaw(i));
 		}
 	} else {
 		/* generating alaw-table */
 		for (i = -32768; i < 32768; i++) {
 			dsp_audio_s16_to_law[i & 0xffff] =
-				reverse_bits(linear2alaw(i));
+				bitrev8(linear2alaw(i));
 		}
 	}
 }
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 52c4382..8e3aa00 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -506,7 +506,7 @@
 		       __func__, conf->id);
 
 	if (list_empty(&conf->mlist)) {
-		printk(KERN_ERR "%s: conference whithout members\n",
+		printk(KERN_ERR "%s: conference without members\n",
 		       __func__);
 		return;
 	}
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 9ad35f7..70f4255 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -43,7 +43,7 @@
 	tristate "LED support for the AAT1290"
 	depends on LEDS_CLASS_FLASH
 	depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	depends on OF
 	depends on PINCTRL
 	help
@@ -419,7 +419,7 @@
 config LEDS_LT3593
 	tristate "LED driver for LT3593 controllers"
 	depends on LEDS_CLASS
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  This option enables support for LEDs driven by a Linear Technology
 	  LT3593 controller. This controller uses a special one-wire pulse
@@ -455,12 +455,16 @@
 config LEDS_NS2
 	tristate "LED support for Network Space v2 GPIO LEDs"
 	depends on LEDS_CLASS
-	depends on MACH_KIRKWOOD
+	depends on MACH_KIRKWOOD || MACH_ARMADA_370
 	default y
 	help
-	  This option enable support for the dual-GPIO LED found on the
-	  Network Space v2 board (and parents). This include Internet Space v2,
-	  Network Space (Max) v2 and d2 Network v2 boards.
+	  This option enables support for the dual-GPIO LEDs found on the
+	  following LaCie/Seagate boards:
+
+		Network Space v2 (and parents: Max, Mini)
+		Internet Space v2
+		d2 Network v2
+		n090401 (Seagate NAS 4-Bay)
 
 config LEDS_NETXBIG
 	tristate "LED support for Big Network series LEDs"
@@ -543,7 +547,8 @@
 
 config LEDS_KTD2692
 	tristate "LED support for KTD2692 flash LED controller"
-	depends on LEDS_CLASS_FLASH && GPIOLIB && OF
+	depends on LEDS_CLASS_FLASH && OF
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  This option enables support for KTD2692 LED flash connected
 	  through ExpressWire interface.
@@ -560,6 +565,17 @@
 	  This option enables support for the BlinkM RGB LED connected
 	  through I2C. Say Y to enable support for the BlinkM LED.
 
+config LEDS_POWERNV
+	tristate "LED support for PowerNV Platform"
+	depends on LEDS_CLASS
+	depends on PPC_POWERNV
+	depends on OF
+	help
+	  This option enables support for the system LEDs present on
+	  PowerNV platforms. Say 'y' to enable this support in kernel.
+	  To compile this driver as a module, choose 'm' here: the module
+	  will be called leds-powernv.
+
 config LEDS_SYSCON
 	bool "LED support for LEDs on system controllers"
 	depends on LEDS_CLASS=y
@@ -578,14 +594,6 @@
 	  This option enabled support for the LEDs on the ARM Versatile
 	  and RealView boards. Say Y to enabled these.
 
-config LEDS_PM8941_WLED
-	tristate "LED support for the Qualcomm PM8941 WLED block"
-	depends on LEDS_CLASS
-	select REGMAP
-	help
-	  This option enables support for the 'White' LED block
-	  on Qualcomm PM8941 PMICs.
-
 comment "LED Triggers"
 source "drivers/leds/trigger/Kconfig"
 
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 8d6a24a..b503f92 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -63,8 +63,8 @@
 obj-$(CONFIG_LEDS_SYSCON)		+= leds-syscon.o
 obj-$(CONFIG_LEDS_VERSATILE)		+= leds-versatile.o
 obj-$(CONFIG_LEDS_MENF21BMC)		+= leds-menf21bmc.o
-obj-$(CONFIG_LEDS_PM8941_WLED)		+= leds-pm8941-wled.o
 obj-$(CONFIG_LEDS_KTD2692)		+= leds-ktd2692.o
+obj-$(CONFIG_LEDS_POWERNV)		+= leds-powernv.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_DAC124S085)		+= leds-dac124s085.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index beabfbc..ca51d58 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -228,12 +228,15 @@
 {
 	unsigned int i = 0;
 	int ret = 0;
+	struct device *dev;
 
 	strlcpy(name, init_name, len);
 
-	while (class_find_device(leds_class, NULL, name, match_name) &&
-	       (ret < len))
+	while ((ret < len) &&
+	       (dev = class_find_device(leds_class, NULL, name, match_name))) {
+		put_device(dev);
 		ret = snprintf(name, len, "%s_%u", init_name, ++i);
+	}
 
 	if (ret >= len)
 		return -ENOMEM;
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index 2b4dc73..257a813 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -156,63 +156,35 @@
 	latch_value = 0xffff;
 	*latch_address = latch_value;
 
-	ret = led_classdev_register(&pdev->dev, &fsg_wlan_led);
+	ret = devm_led_classdev_register(&pdev->dev, &fsg_wlan_led);
 	if (ret < 0)
-		goto failwlan;
+		return ret;
 
-	ret = led_classdev_register(&pdev->dev, &fsg_wan_led);
+	ret = devm_led_classdev_register(&pdev->dev, &fsg_wan_led);
 	if (ret < 0)
-		goto failwan;
+		return ret;
 
-	ret = led_classdev_register(&pdev->dev, &fsg_sata_led);
+	ret = devm_led_classdev_register(&pdev->dev, &fsg_sata_led);
 	if (ret < 0)
-		goto failsata;
+		return ret;
 
-	ret = led_classdev_register(&pdev->dev, &fsg_usb_led);
+	ret = devm_led_classdev_register(&pdev->dev, &fsg_usb_led);
 	if (ret < 0)
-		goto failusb;
+		return ret;
 
-	ret = led_classdev_register(&pdev->dev, &fsg_sync_led);
+	ret = devm_led_classdev_register(&pdev->dev, &fsg_sync_led);
 	if (ret < 0)
-		goto failsync;
+		return ret;
 
-	ret = led_classdev_register(&pdev->dev, &fsg_ring_led);
+	ret = devm_led_classdev_register(&pdev->dev, &fsg_ring_led);
 	if (ret < 0)
-		goto failring;
-
-	return ret;
-
- failring:
-	led_classdev_unregister(&fsg_sync_led);
- failsync:
-	led_classdev_unregister(&fsg_usb_led);
- failusb:
-	led_classdev_unregister(&fsg_sata_led);
- failsata:
-	led_classdev_unregister(&fsg_wan_led);
- failwan:
-	led_classdev_unregister(&fsg_wlan_led);
- failwlan:
+		return ret;
 
 	return ret;
 }
 
-static int fsg_led_remove(struct platform_device *pdev)
-{
-	led_classdev_unregister(&fsg_wlan_led);
-	led_classdev_unregister(&fsg_wan_led);
-	led_classdev_unregister(&fsg_sata_led);
-	led_classdev_unregister(&fsg_usb_led);
-	led_classdev_unregister(&fsg_sync_led);
-	led_classdev_unregister(&fsg_ring_led);
-
-	return 0;
-}
-
-
 static struct platform_driver fsg_led_driver = {
 	.probe		= fsg_led_probe,
-	.remove		= fsg_led_remove,
 	.driver		= {
 		.name		= "fsg-led",
 	},
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 91325de..b38430c 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -492,7 +492,6 @@
 	.id_table = lm3530_id,
 	.driver = {
 		.name = LM3530_NAME,
-		.owner = THIS_MODULE,
 	},
 };
 
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index f5112cb..48872997 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -555,7 +555,6 @@
 static struct i2c_driver lm355x_i2c_driver = {
 	.driver = {
 		   .name = LM355x_NAME,
-		   .owner = THIS_MODULE,
 		   .pm = NULL,
 		   },
 	.probe = lm355x_probe,
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index d3dec01..02ebe34 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -446,7 +446,6 @@
 static struct i2c_driver lm3642_i2c_driver = {
 	.driver = {
 		   .name = LM3642_NAME,
-		   .owner = THIS_MODULE,
 		   .pm = NULL,
 		   },
 	.probe = lm3642_probe,
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 8ca197a..63a9254 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -514,20 +514,19 @@
 	int ret;
 	struct lp55xx_chip *chip;
 	struct lp55xx_led *led;
-	struct lp55xx_platform_data *pdata;
+	struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
 	struct device_node *np = client->dev.of_node;
 
-	if (!dev_get_platdata(&client->dev)) {
+	if (!pdata) {
 		if (np) {
-			ret = lp55xx_of_populate_pdata(&client->dev, np);
-			if (ret < 0)
-				return ret;
+			pdata = lp55xx_of_populate_pdata(&client->dev, np);
+			if (IS_ERR(pdata))
+				return PTR_ERR(pdata);
 		} else {
 			dev_err(&client->dev, "no platform data\n");
 			return -EINVAL;
 		}
 	}
-	pdata = dev_get_platdata(&client->dev);
 
 	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
 	if (!chip)
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 584dbbc..1d0187f 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -880,20 +880,19 @@
 	int ret;
 	struct lp55xx_chip *chip;
 	struct lp55xx_led *led;
-	struct lp55xx_platform_data *pdata;
+	struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
 	struct device_node *np = client->dev.of_node;
 
-	if (!dev_get_platdata(&client->dev)) {
+	if (!pdata) {
 		if (np) {
-			ret = lp55xx_of_populate_pdata(&client->dev, np);
-			if (ret < 0)
-				return ret;
+			pdata = lp55xx_of_populate_pdata(&client->dev, np);
+			if (IS_ERR(pdata))
+				return PTR_ERR(pdata);
 		} else {
 			dev_err(&client->dev, "no platform data\n");
 			return -EINVAL;
 		}
 	}
-	pdata = dev_get_platdata(&client->dev);
 
 	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
 	if (!chip)
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index ca85724..0360c59 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -515,20 +515,19 @@
 	int ret;
 	struct lp55xx_chip *chip;
 	struct lp55xx_led *led;
-	struct lp55xx_platform_data *pdata;
+	struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
 	struct device_node *np = client->dev.of_node;
 
-	if (!dev_get_platdata(&client->dev)) {
+	if (!pdata) {
 		if (np) {
-			ret = lp55xx_of_populate_pdata(&client->dev, np);
-			if (ret < 0)
-				return ret;
+			pdata = lp55xx_of_populate_pdata(&client->dev, np);
+			if (IS_ERR(pdata))
+				return PTR_ERR(pdata);
 		} else {
 			dev_err(&client->dev, "no platform data\n");
 			return -EINVAL;
 		}
 	}
-	pdata = dev_get_platdata(&client->dev);
 
 	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
 	if (!chip)
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 96d51e9..59b7683 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -543,7 +543,8 @@
 }
 EXPORT_SYMBOL_GPL(lp55xx_unregister_sysfs);
 
-int lp55xx_of_populate_pdata(struct device *dev, struct device_node *np)
+struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev,
+						      struct device_node *np)
 {
 	struct device_node *child;
 	struct lp55xx_platform_data *pdata;
@@ -553,17 +554,17 @@
 
 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	num_channels = of_get_child_count(np);
 	if (num_channels == 0) {
 		dev_err(dev, "no LED channels\n");
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 	}
 
 	cfg = devm_kzalloc(dev, sizeof(*cfg) * num_channels, GFP_KERNEL);
 	if (!cfg)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	pdata->led_config = &cfg[0];
 	pdata->num_channels = num_channels;
@@ -588,9 +589,7 @@
 	/* LP8501 specific */
 	of_property_read_u8(np, "pwr-sel", (u8 *)&pdata->pwr_sel);
 
-	dev->platform_data = pdata;
-
-	return 0;
+	return pdata;
 }
 EXPORT_SYMBOL_GPL(lp55xx_of_populate_pdata);
 
diff --git a/drivers/leds/leds-lp55xx-common.h b/drivers/leds/leds-lp55xx-common.h
index cceab48..c7f1e61 100644
--- a/drivers/leds/leds-lp55xx-common.h
+++ b/drivers/leds/leds-lp55xx-common.h
@@ -202,7 +202,7 @@
 extern void lp55xx_unregister_sysfs(struct lp55xx_chip *chip);
 
 /* common device tree population function */
-extern int lp55xx_of_populate_pdata(struct device *dev,
-				    struct device_node *np);
+extern struct lp55xx_platform_data
+*lp55xx_of_populate_pdata(struct device *dev, struct device_node *np);
 
 #endif /* _LEDS_LP55XX_COMMON_H */
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index d3098e3..3f54f6f 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -308,20 +308,19 @@
 	int ret;
 	struct lp55xx_chip *chip;
 	struct lp55xx_led *led;
-	struct lp55xx_platform_data *pdata;
+	struct lp55xx_platform_data *pdata = dev_get_platdata(&client->dev);
 	struct device_node *np = client->dev.of_node;
 
-	if (!dev_get_platdata(&client->dev)) {
+	if (!pdata) {
 		if (np) {
-			ret = lp55xx_of_populate_pdata(&client->dev, np);
-			if (ret < 0)
-				return ret;
+			pdata = lp55xx_of_populate_pdata(&client->dev, np);
+			if (IS_ERR(pdata))
+				return PTR_ERR(pdata);
 		} else {
 			dev_err(&client->dev, "no platform data\n");
 			return -EINVAL;
 		}
 	}
-	pdata = dev_get_platdata(&client->dev);
 
 	chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
 	if (!chip)
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index 8c2b7fb..79f0843 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -302,7 +302,7 @@
 	return ret;
 }
 
-static struct reg_default lp8860_reg_defs[] = {
+static const struct reg_default lp8860_reg_defs[] = {
 	{ LP8860_DISP_CL1_BRT_MSB, 0x00},
 	{ LP8860_DISP_CL1_BRT_LSB, 0x00},
 	{ LP8860_DISP_CL1_CURR_MSB, 0x00},
@@ -332,7 +332,7 @@
 	.cache_type = REGCACHE_NONE,
 };
 
-static struct reg_default lp8860_eeprom_defs[] = {
+static const struct reg_default lp8860_eeprom_defs[] = {
 	{ LP8860_EEPROM_REG_0, 0x00 },
 	{ LP8860_EEPROM_REG_1, 0x00 },
 	{ LP8860_EEPROM_REG_2, 0x00 },
diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c
index b8b0eec..df348a0 100644
--- a/drivers/leds/leds-max77693.c
+++ b/drivers/leds/leds-max77693.c
@@ -13,6 +13,7 @@
 
 #include <linux/led-class-flash.h>
 #include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-common.h>
 #include <linux/mfd/max77693-private.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 1fd6adb..b33514d 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -31,50 +31,38 @@
 #include <linux/platform_data/leds-kirkwood-ns2.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
+#include "leds.h"
 
 /*
- * The Network Space v2 dual-GPIO LED is wired to a CPLD and can blink in
- * relation with the SATA activity. This capability is exposed through the
- * "sata" sysfs attribute.
- *
- * The following array detail the different LED registers and the combination
- * of their possible values:
- *
- *  cmd_led   |  slow_led  | /SATA active | LED state
- *            |            |              |
- *     1      |     0      |      x       |  off
- *     -      |     1      |      x       |  on
- *     0      |     0      |      1       |  on
- *     0      |     0      |      0       |  blink (rate 300ms)
+ * The Network Space v2 dual-GPIO LED is wired to a CPLD. Three different LED
+ * modes are available: off, on and SATA activity blinking. The LED modes are
+ * controlled through two GPIOs (command and slow): each combination of values
+ * for the command/slow GPIOs corresponds to a LED mode.
  */
 
-enum ns2_led_modes {
-	NS_V2_LED_OFF,
-	NS_V2_LED_ON,
-	NS_V2_LED_SATA,
-};
-
-struct ns2_led_mode_value {
-	enum ns2_led_modes	mode;
-	int			cmd_level;
-	int			slow_level;
-};
-
-static struct ns2_led_mode_value ns2_led_modval[] = {
-	{ NS_V2_LED_OFF	, 1, 0 },
-	{ NS_V2_LED_ON	, 0, 1 },
-	{ NS_V2_LED_ON	, 1, 1 },
-	{ NS_V2_LED_SATA, 0, 0 },
-};
-
 struct ns2_led_data {
 	struct led_classdev	cdev;
 	unsigned		cmd;
 	unsigned		slow;
+	bool			can_sleep;
+	int			mode_index;
 	unsigned char		sata; /* True when SATA mode active. */
 	rwlock_t		rw_lock; /* Lock GPIOs. */
+	struct work_struct	work;
+	int			num_modes;
+	struct ns2_led_modval	*modval;
 };
 
+static void ns2_led_work(struct work_struct *work)
+{
+	struct ns2_led_data *led_dat =
+		container_of(work, struct ns2_led_data, work);
+	int i = led_dat->mode_index;
+
+	gpio_set_value_cansleep(led_dat->cmd, led_dat->modval[i].cmd_level);
+	gpio_set_value_cansleep(led_dat->slow, led_dat->modval[i].slow_level);
+}
+
 static int ns2_led_get_mode(struct ns2_led_data *led_dat,
 			    enum ns2_led_modes *mode)
 {
@@ -83,22 +71,18 @@
 	int cmd_level;
 	int slow_level;
 
-	read_lock_irq(&led_dat->rw_lock);
+	cmd_level = gpio_get_value_cansleep(led_dat->cmd);
+	slow_level = gpio_get_value_cansleep(led_dat->slow);
 
-	cmd_level = gpio_get_value(led_dat->cmd);
-	slow_level = gpio_get_value(led_dat->slow);
-
-	for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
-		if (cmd_level == ns2_led_modval[i].cmd_level &&
-		    slow_level == ns2_led_modval[i].slow_level) {
-			*mode = ns2_led_modval[i].mode;
+	for (i = 0; i < led_dat->num_modes; i++) {
+		if (cmd_level == led_dat->modval[i].cmd_level &&
+		    slow_level == led_dat->modval[i].slow_level) {
+			*mode = led_dat->modval[i].mode;
 			ret = 0;
 			break;
 		}
 	}
 
-	read_unlock_irq(&led_dat->rw_lock);
-
 	return ret;
 }
 
@@ -106,19 +90,32 @@
 			     enum ns2_led_modes mode)
 {
 	int i;
+	bool found = false;
 	unsigned long flags;
 
+	for (i = 0; i < led_dat->num_modes; i++)
+		if (mode == led_dat->modval[i].mode) {
+			found = true;
+			break;
+		}
+
+	if (!found)
+		return;
+
 	write_lock_irqsave(&led_dat->rw_lock, flags);
 
-	for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) {
-		if (mode == ns2_led_modval[i].mode) {
-			gpio_set_value(led_dat->cmd,
-				       ns2_led_modval[i].cmd_level);
-			gpio_set_value(led_dat->slow,
-				       ns2_led_modval[i].slow_level);
-		}
+	if (!led_dat->can_sleep) {
+		gpio_set_value(led_dat->cmd,
+			       led_dat->modval[i].cmd_level);
+		gpio_set_value(led_dat->slow,
+			       led_dat->modval[i].slow_level);
+		goto exit_unlock;
 	}
 
+	led_dat->mode_index = i;
+	schedule_work(&led_dat->work);
+
+exit_unlock:
 	write_unlock_irqrestore(&led_dat->rw_lock, flags);
 }
 
@@ -148,7 +145,6 @@
 		container_of(led_cdev, struct ns2_led_data, cdev);
 	int ret;
 	unsigned long enable;
-	enum ns2_led_modes mode;
 
 	ret = kstrtoul(buff, 10, &enable);
 	if (ret < 0)
@@ -157,19 +153,19 @@
 	enable = !!enable;
 
 	if (led_dat->sata == enable)
-		return count;
-
-	ret = ns2_led_get_mode(led_dat, &mode);
-	if (ret < 0)
-		return ret;
-
-	if (enable && mode == NS_V2_LED_ON)
-		ns2_led_set_mode(led_dat, NS_V2_LED_SATA);
-	if (!enable && mode == NS_V2_LED_SATA)
-		ns2_led_set_mode(led_dat, NS_V2_LED_ON);
+		goto exit;
 
 	led_dat->sata = enable;
 
+	if (!led_get_brightness(led_cdev))
+		goto exit;
+
+	if (enable)
+		ns2_led_set_mode(led_dat, NS_V2_LED_SATA);
+	else
+		ns2_led_set_mode(led_dat, NS_V2_LED_ON);
+
+exit:
 	return count;
 }
 
@@ -199,7 +195,7 @@
 	enum ns2_led_modes mode;
 
 	ret = devm_gpio_request_one(&pdev->dev, template->cmd,
-			gpio_get_value(template->cmd) ?
+			gpio_get_value_cansleep(template->cmd) ?
 			GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
 			template->name);
 	if (ret) {
@@ -209,7 +205,7 @@
 	}
 
 	ret = devm_gpio_request_one(&pdev->dev, template->slow,
-			gpio_get_value(template->slow) ?
+			gpio_get_value_cansleep(template->slow) ?
 			GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
 			template->name);
 	if (ret) {
@@ -228,6 +224,10 @@
 	led_dat->cdev.groups = ns2_led_groups;
 	led_dat->cmd = template->cmd;
 	led_dat->slow = template->slow;
+	led_dat->can_sleep = gpio_cansleep(led_dat->cmd) |
+				gpio_cansleep(led_dat->slow);
+	led_dat->modval = template->modval;
+	led_dat->num_modes = template->num_modes;
 
 	ret = ns2_led_get_mode(led_dat, &mode);
 	if (ret < 0)
@@ -238,6 +238,8 @@
 	led_dat->cdev.brightness =
 		(mode == NS_V2_LED_OFF) ? LED_OFF : LED_FULL;
 
+	INIT_WORK(&led_dat->work, ns2_led_work);
+
 	ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
 	if (ret < 0)
 		return ret;
@@ -248,6 +250,7 @@
 static void delete_ns2_led(struct ns2_led_data *led_dat)
 {
 	led_classdev_unregister(&led_dat->cdev);
+	cancel_work_sync(&led_dat->work);
 }
 
 #ifdef CONFIG_OF_GPIO
@@ -259,9 +262,8 @@
 {
 	struct device_node *np = dev->of_node;
 	struct device_node *child;
-	struct ns2_led *leds;
+	struct ns2_led *led, *leds;
 	int num_leds = 0;
-	int i = 0;
 
 	num_leds = of_get_child_count(np);
 	if (!num_leds)
@@ -272,26 +274,57 @@
 	if (!leds)
 		return -ENOMEM;
 
+	led = leds;
 	for_each_child_of_node(np, child) {
 		const char *string;
-		int ret;
+		int ret, i, num_modes;
+		struct ns2_led_modval *modval;
 
 		ret = of_get_named_gpio(child, "cmd-gpio", 0);
 		if (ret < 0)
 			return ret;
-		leds[i].cmd = ret;
+		led->cmd = ret;
 		ret = of_get_named_gpio(child, "slow-gpio", 0);
 		if (ret < 0)
 			return ret;
-		leds[i].slow = ret;
+		led->slow = ret;
 		ret = of_property_read_string(child, "label", &string);
-		leds[i].name = (ret == 0) ? string : child->name;
+		led->name = (ret == 0) ? string : child->name;
 		ret = of_property_read_string(child, "linux,default-trigger",
 					      &string);
 		if (ret == 0)
-			leds[i].default_trigger = string;
+			led->default_trigger = string;
 
-		i++;
+		ret = of_property_count_u32_elems(child, "modes-map");
+		if (ret < 0 || ret % 3) {
+			dev_err(dev,
+				"Missing or malformed modes-map property\n");
+			return -EINVAL;
+		}
+
+		num_modes = ret / 3;
+		modval = devm_kzalloc(dev,
+				      num_modes * sizeof(struct ns2_led_modval),
+				      GFP_KERNEL);
+		if (!modval)
+			return -ENOMEM;
+
+		for (i = 0; i < num_modes; i++) {
+			of_property_read_u32_index(child,
+						"modes-map", 3 * i,
+						(u32 *) &modval[i].mode);
+			of_property_read_u32_index(child,
+						"modes-map", 3 * i + 1,
+						(u32 *) &modval[i].cmd_level);
+			of_property_read_u32_index(child,
+						"modes-map", 3 * i + 2,
+						(u32 *) &modval[i].slow_level);
+		}
+
+		led->num_modes = num_modes;
+		led->modval = modval;
+
+		led++;
 	}
 
 	pdata->leds = leds;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index c3a08b6..b775e1e 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -379,7 +379,6 @@
 static struct i2c_driver pca955x_driver = {
 	.driver = {
 		.name	= "leds-pca955x",
-		.owner	= THIS_MODULE,
 	},
 	.probe	= pca955x_probe,
 	.remove	= pca955x_remove,
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index bee3e1a..41f269f 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -332,6 +332,7 @@
 	{ .compatible = "nxp,pca9635", },
 	{},
 };
+MODULE_DEVICE_TABLE(of, of_pca963x_match);
 #else
 static struct pca963x_platform_data *
 pca963x_dt_init(struct i2c_client *client, struct pca963x_chipdef *chip)
@@ -458,7 +459,6 @@
 static struct i2c_driver pca963x_driver = {
 	.driver = {
 		.name	= "leds-pca963x",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(of_pca963x_match),
 	},
 	.probe	= pca963x_probe,
diff --git a/drivers/leds/leds-pm8941-wled.c b/drivers/leds/leds-pm8941-wled.c
deleted file mode 100644
index bf64a59..0000000
--- a/drivers/leds/leds-pm8941-wled.c
+++ /dev/null
@@ -1,435 +0,0 @@
-/* Copyright (c) 2015, Sony Mobile Communications, AB.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/leds.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/regmap.h>
-
-#define PM8941_WLED_REG_VAL_BASE		0x40
-#define  PM8941_WLED_REG_VAL_MAX		0xFFF
-
-#define PM8941_WLED_REG_MOD_EN			0x46
-#define  PM8941_WLED_REG_MOD_EN_BIT		BIT(7)
-#define  PM8941_WLED_REG_MOD_EN_MASK		BIT(7)
-
-#define PM8941_WLED_REG_SYNC			0x47
-#define  PM8941_WLED_REG_SYNC_MASK		0x07
-#define  PM8941_WLED_REG_SYNC_LED1		BIT(0)
-#define  PM8941_WLED_REG_SYNC_LED2		BIT(1)
-#define  PM8941_WLED_REG_SYNC_LED3		BIT(2)
-#define  PM8941_WLED_REG_SYNC_ALL		0x07
-#define  PM8941_WLED_REG_SYNC_CLEAR		0x00
-
-#define PM8941_WLED_REG_FREQ			0x4c
-#define  PM8941_WLED_REG_FREQ_MASK		0x0f
-
-#define PM8941_WLED_REG_OVP			0x4d
-#define  PM8941_WLED_REG_OVP_MASK		0x03
-
-#define PM8941_WLED_REG_BOOST			0x4e
-#define  PM8941_WLED_REG_BOOST_MASK		0x07
-
-#define PM8941_WLED_REG_SINK			0x4f
-#define  PM8941_WLED_REG_SINK_MASK		0xe0
-#define  PM8941_WLED_REG_SINK_SHFT		0x05
-
-/* Per-'string' registers below */
-#define PM8941_WLED_REG_STR_OFFSET		0x10
-
-#define PM8941_WLED_REG_STR_MOD_EN_BASE		0x60
-#define  PM8941_WLED_REG_STR_MOD_MASK		BIT(7)
-#define  PM8941_WLED_REG_STR_MOD_EN		BIT(7)
-
-#define PM8941_WLED_REG_STR_SCALE_BASE		0x62
-#define  PM8941_WLED_REG_STR_SCALE_MASK		0x1f
-
-#define PM8941_WLED_REG_STR_MOD_SRC_BASE	0x63
-#define  PM8941_WLED_REG_STR_MOD_SRC_MASK	0x01
-#define  PM8941_WLED_REG_STR_MOD_SRC_INT	0x00
-#define  PM8941_WLED_REG_STR_MOD_SRC_EXT	0x01
-
-#define PM8941_WLED_REG_STR_CABC_BASE		0x66
-#define  PM8941_WLED_REG_STR_CABC_MASK		BIT(7)
-#define  PM8941_WLED_REG_STR_CABC_EN		BIT(7)
-
-struct pm8941_wled_config {
-	u32 i_boost_limit;
-	u32 ovp;
-	u32 switch_freq;
-	u32 num_strings;
-	u32 i_limit;
-	bool cs_out_en;
-	bool ext_gen;
-	bool cabc_en;
-};
-
-struct pm8941_wled {
-	struct regmap *regmap;
-	u16 addr;
-
-	struct led_classdev cdev;
-
-	struct pm8941_wled_config cfg;
-};
-
-static int pm8941_wled_set(struct led_classdev *cdev,
-			   enum led_brightness value)
-{
-	struct pm8941_wled *wled;
-	u8 ctrl = 0;
-	u16 val;
-	int rc;
-	int i;
-
-	wled = container_of(cdev, struct pm8941_wled, cdev);
-
-	if (value != 0)
-		ctrl = PM8941_WLED_REG_MOD_EN_BIT;
-
-	val = value * PM8941_WLED_REG_VAL_MAX / LED_FULL;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_MOD_EN,
-			PM8941_WLED_REG_MOD_EN_MASK, ctrl);
-	if (rc)
-		return rc;
-
-	for (i = 0; i < wled->cfg.num_strings; ++i) {
-		u8 v[2] = { val & 0xff, (val >> 8) & 0xf };
-
-		rc = regmap_bulk_write(wled->regmap,
-				wled->addr + PM8941_WLED_REG_VAL_BASE + 2 * i,
-				v, 2);
-		if (rc)
-			return rc;
-	}
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_SYNC,
-			PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_ALL);
-	if (rc)
-		return rc;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_SYNC,
-			PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_CLEAR);
-	return rc;
-}
-
-static void pm8941_wled_set_brightness(struct led_classdev *cdev,
-				       enum led_brightness value)
-{
-	if (pm8941_wled_set(cdev, value)) {
-		dev_err(cdev->dev, "Unable to set brightness\n");
-		return;
-	}
-	cdev->brightness = value;
-}
-
-static int pm8941_wled_setup(struct pm8941_wled *wled)
-{
-	int rc;
-	int i;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_OVP,
-			PM8941_WLED_REG_OVP_MASK, wled->cfg.ovp);
-	if (rc)
-		return rc;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_BOOST,
-			PM8941_WLED_REG_BOOST_MASK, wled->cfg.i_boost_limit);
-	if (rc)
-		return rc;
-
-	rc = regmap_update_bits(wled->regmap,
-			wled->addr + PM8941_WLED_REG_FREQ,
-			PM8941_WLED_REG_FREQ_MASK, wled->cfg.switch_freq);
-	if (rc)
-		return rc;
-
-	if (wled->cfg.cs_out_en) {
-		u8 all = (BIT(wled->cfg.num_strings) - 1)
-				<< PM8941_WLED_REG_SINK_SHFT;
-
-		rc = regmap_update_bits(wled->regmap,
-				wled->addr + PM8941_WLED_REG_SINK,
-				PM8941_WLED_REG_SINK_MASK, all);
-		if (rc)
-			return rc;
-	}
-
-	for (i = 0; i < wled->cfg.num_strings; ++i) {
-		u16 addr = wled->addr + PM8941_WLED_REG_STR_OFFSET * i;
-
-		rc = regmap_update_bits(wled->regmap,
-				addr + PM8941_WLED_REG_STR_MOD_EN_BASE,
-				PM8941_WLED_REG_STR_MOD_MASK,
-				PM8941_WLED_REG_STR_MOD_EN);
-		if (rc)
-			return rc;
-
-		if (wled->cfg.ext_gen) {
-			rc = regmap_update_bits(wled->regmap,
-					addr + PM8941_WLED_REG_STR_MOD_SRC_BASE,
-					PM8941_WLED_REG_STR_MOD_SRC_MASK,
-					PM8941_WLED_REG_STR_MOD_SRC_EXT);
-			if (rc)
-				return rc;
-		}
-
-		rc = regmap_update_bits(wled->regmap,
-				addr + PM8941_WLED_REG_STR_SCALE_BASE,
-				PM8941_WLED_REG_STR_SCALE_MASK,
-				wled->cfg.i_limit);
-		if (rc)
-			return rc;
-
-		rc = regmap_update_bits(wled->regmap,
-				addr + PM8941_WLED_REG_STR_CABC_BASE,
-				PM8941_WLED_REG_STR_CABC_MASK,
-				wled->cfg.cabc_en ?
-					PM8941_WLED_REG_STR_CABC_EN : 0);
-		if (rc)
-			return rc;
-	}
-
-	return 0;
-}
-
-static const struct pm8941_wled_config pm8941_wled_config_defaults = {
-	.i_boost_limit = 3,
-	.i_limit = 20,
-	.ovp = 2,
-	.switch_freq = 5,
-	.num_strings = 0,
-	.cs_out_en = false,
-	.ext_gen = false,
-	.cabc_en = false,
-};
-
-struct pm8941_wled_var_cfg {
-	const u32 *values;
-	u32 (*fn)(u32);
-	int size;
-};
-
-static const u32 pm8941_wled_i_boost_limit_values[] = {
-	105, 385, 525, 805, 980, 1260, 1400, 1680,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_i_boost_limit_cfg = {
-	.values = pm8941_wled_i_boost_limit_values,
-	.size = ARRAY_SIZE(pm8941_wled_i_boost_limit_values),
-};
-
-static const u32 pm8941_wled_ovp_values[] = {
-	35, 32, 29, 27,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_ovp_cfg = {
-	.values = pm8941_wled_ovp_values,
-	.size = ARRAY_SIZE(pm8941_wled_ovp_values),
-};
-
-static u32 pm8941_wled_num_strings_values_fn(u32 idx)
-{
-	return idx + 1;
-}
-
-static const struct pm8941_wled_var_cfg pm8941_wled_num_strings_cfg = {
-	.fn = pm8941_wled_num_strings_values_fn,
-	.size = 3,
-};
-
-static u32 pm8941_wled_switch_freq_values_fn(u32 idx)
-{
-	return 19200 / (2 * (1 + idx));
-}
-
-static const struct pm8941_wled_var_cfg pm8941_wled_switch_freq_cfg = {
-	.fn = pm8941_wled_switch_freq_values_fn,
-	.size = 16,
-};
-
-static const struct pm8941_wled_var_cfg pm8941_wled_i_limit_cfg = {
-	.size = 26,
-};
-
-static u32 pm8941_wled_values(const struct pm8941_wled_var_cfg *cfg, u32 idx)
-{
-	if (idx >= cfg->size)
-		return UINT_MAX;
-	if (cfg->fn)
-		return cfg->fn(idx);
-	if (cfg->values)
-		return cfg->values[idx];
-	return idx;
-}
-
-static int pm8941_wled_configure(struct pm8941_wled *wled, struct device *dev)
-{
-	struct pm8941_wled_config *cfg = &wled->cfg;
-	u32 val;
-	int rc;
-	u32 c;
-	int i;
-	int j;
-
-	const struct {
-		const char *name;
-		u32 *val_ptr;
-		const struct pm8941_wled_var_cfg *cfg;
-	} u32_opts[] = {
-		{
-			"qcom,current-boost-limit",
-			&cfg->i_boost_limit,
-			.cfg = &pm8941_wled_i_boost_limit_cfg,
-		},
-		{
-			"qcom,current-limit",
-			&cfg->i_limit,
-			.cfg = &pm8941_wled_i_limit_cfg,
-		},
-		{
-			"qcom,ovp",
-			&cfg->ovp,
-			.cfg = &pm8941_wled_ovp_cfg,
-		},
-		{
-			"qcom,switching-freq",
-			&cfg->switch_freq,
-			.cfg = &pm8941_wled_switch_freq_cfg,
-		},
-		{
-			"qcom,num-strings",
-			&cfg->num_strings,
-			.cfg = &pm8941_wled_num_strings_cfg,
-		},
-	};
-	const struct {
-		const char *name;
-		bool *val_ptr;
-	} bool_opts[] = {
-		{ "qcom,cs-out", &cfg->cs_out_en, },
-		{ "qcom,ext-gen", &cfg->ext_gen, },
-		{ "qcom,cabc", &cfg->cabc_en, },
-	};
-
-	rc = of_property_read_u32(dev->of_node, "reg", &val);
-	if (rc || val > 0xffff) {
-		dev_err(dev, "invalid IO resources\n");
-		return rc ? rc : -EINVAL;
-	}
-	wled->addr = val;
-
-	rc = of_property_read_string(dev->of_node, "label", &wled->cdev.name);
-	if (rc)
-		wled->cdev.name = dev->of_node->name;
-
-	wled->cdev.default_trigger = of_get_property(dev->of_node,
-			"linux,default-trigger", NULL);
-
-	*cfg = pm8941_wled_config_defaults;
-	for (i = 0; i < ARRAY_SIZE(u32_opts); ++i) {
-		rc = of_property_read_u32(dev->of_node, u32_opts[i].name, &val);
-		if (rc == -EINVAL) {
-			continue;
-		} else if (rc) {
-			dev_err(dev, "error reading '%s'\n", u32_opts[i].name);
-			return rc;
-		}
-
-		c = UINT_MAX;
-		for (j = 0; c != val; j++) {
-			c = pm8941_wled_values(u32_opts[i].cfg, j);
-			if (c == UINT_MAX) {
-				dev_err(dev, "invalid value for '%s'\n",
-					u32_opts[i].name);
-				return -EINVAL;
-			}
-		}
-
-		dev_dbg(dev, "'%s' = %u\n", u32_opts[i].name, c);
-		*u32_opts[i].val_ptr = j;
-	}
-
-	for (i = 0; i < ARRAY_SIZE(bool_opts); ++i) {
-		if (of_property_read_bool(dev->of_node, bool_opts[i].name))
-			*bool_opts[i].val_ptr = true;
-	}
-
-	cfg->num_strings = cfg->num_strings + 1;
-
-	return 0;
-}
-
-static int pm8941_wled_probe(struct platform_device *pdev)
-{
-	struct pm8941_wled *wled;
-	struct regmap *regmap;
-	int rc;
-
-	regmap = dev_get_regmap(pdev->dev.parent, NULL);
-	if (!regmap) {
-		dev_err(&pdev->dev, "Unable to get regmap\n");
-		return -EINVAL;
-	}
-
-	wled = devm_kzalloc(&pdev->dev, sizeof(*wled), GFP_KERNEL);
-	if (!wled)
-		return -ENOMEM;
-
-	wled->regmap = regmap;
-
-	rc = pm8941_wled_configure(wled, &pdev->dev);
-	if (rc)
-		return rc;
-
-	rc = pm8941_wled_setup(wled);
-	if (rc)
-		return rc;
-
-	wled->cdev.brightness_set = pm8941_wled_set_brightness;
-
-	rc = devm_led_classdev_register(&pdev->dev, &wled->cdev);
-	if (rc)
-		return rc;
-
-	platform_set_drvdata(pdev, wled);
-
-	return 0;
-};
-
-static const struct of_device_id pm8941_wled_match_table[] = {
-	{ .compatible = "qcom,pm8941-wled" },
-	{}
-};
-MODULE_DEVICE_TABLE(of, pm8941_wled_match_table);
-
-static struct platform_driver pm8941_wled_driver = {
-	.probe = pm8941_wled_probe,
-	.driver	= {
-		.name = "pm8941-wled",
-		.of_match_table	= pm8941_wled_match_table,
-	},
-};
-
-module_platform_driver(pm8941_wled_driver);
-
-MODULE_DESCRIPTION("pm8941 wled driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:pm8941-wled");
diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c
new file mode 100644
index 0000000..2c5c5b1
--- /dev/null
+++ b/drivers/leds/leds-powernv.c
@@ -0,0 +1,345 @@
+/*
+ * PowerNV LED Driver
+ *
+ * Copyright IBM Corp. 2015
+ *
+ * Author: Vasant Hegde <hegdevasant@linux.vnet.ibm.com>
+ * Author: Anshuman Khandual <khandual@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/opal.h>
+
+/* Map LED type to description. */
+struct led_type_map {
+	const int	type;
+	const char	*desc;
+};
+static const struct led_type_map led_type_map[] = {
+	{OPAL_SLOT_LED_TYPE_ID,		"identify"},
+	{OPAL_SLOT_LED_TYPE_FAULT,	"fault"},
+	{OPAL_SLOT_LED_TYPE_ATTN,	"attention"},
+	{-1,				NULL},
+};
+
+struct powernv_led_common {
+	/*
+	 * By default unload path resets all the LEDs. But on PowerNV
+	 * platform we want to retain LED state across reboot as these
+	 * are controlled by firmware. Also service processor can modify
+	 * the LEDs independent of OS. Hence avoid resetting LEDs in
+	 * unload path.
+	 */
+	bool		led_disabled;
+
+	/* Max supported LED type */
+	__be64		max_led_type;
+
+	/* glabal lock */
+	struct mutex	lock;
+};
+
+/* PowerNV LED data */
+struct powernv_led_data {
+	struct led_classdev	cdev;
+	char			*loc_code;	/* LED location code */
+	int			led_type;	/* OPAL_SLOT_LED_TYPE_* */
+
+	struct powernv_led_common *common;
+};
+
+
+/* Returns OPAL_SLOT_LED_TYPE_* for given led type string */
+static int powernv_get_led_type(const char *led_type_desc)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(led_type_map); i++)
+		if (!strcmp(led_type_map[i].desc, led_type_desc))
+			return led_type_map[i].type;
+
+	return -1;
+}
+
+/*
+ * This commits the state change of the requested LED through an OPAL call.
+ * This function is called from work queue task context when ever it gets
+ * scheduled. This function can sleep at opal_async_wait_response call.
+ */
+static void powernv_led_set(struct powernv_led_data *powernv_led,
+			    enum led_brightness value)
+{
+	int rc, token;
+	u64 led_mask, led_value = 0;
+	__be64 max_type;
+	struct opal_msg msg;
+	struct device *dev = powernv_led->cdev.dev;
+	struct powernv_led_common *powernv_led_common = powernv_led->common;
+
+	/* Prepare for the OPAL call */
+	max_type = powernv_led_common->max_led_type;
+	led_mask = OPAL_SLOT_LED_STATE_ON << powernv_led->led_type;
+	if (value)
+		led_value = led_mask;
+
+	/* OPAL async call */
+	token = opal_async_get_token_interruptible();
+	if (token < 0) {
+		if (token != -ERESTARTSYS)
+			dev_err(dev, "%s: Couldn't get OPAL async token\n",
+				__func__);
+		return;
+	}
+
+	rc = opal_leds_set_ind(token, powernv_led->loc_code,
+			       led_mask, led_value, &max_type);
+	if (rc != OPAL_ASYNC_COMPLETION) {
+		dev_err(dev, "%s: OPAL set LED call failed for %s [rc=%d]\n",
+			__func__, powernv_led->loc_code, rc);
+		goto out_token;
+	}
+
+	rc = opal_async_wait_response(token, &msg);
+	if (rc) {
+		dev_err(dev,
+			"%s: Failed to wait for the async response [rc=%d]\n",
+			__func__, rc);
+		goto out_token;
+	}
+
+	rc = be64_to_cpu(msg.params[1]);
+	if (rc != OPAL_SUCCESS)
+		dev_err(dev, "%s : OAPL async call returned failed [rc=%d]\n",
+			__func__, rc);
+
+out_token:
+	opal_async_release_token(token);
+}
+
+/*
+ * This function fetches the LED state for a given LED type for
+ * mentioned LED classdev structure.
+ */
+static enum led_brightness powernv_led_get(struct powernv_led_data *powernv_led)
+{
+	int rc;
+	__be64 mask, value, max_type;
+	u64 led_mask, led_value;
+	struct device *dev = powernv_led->cdev.dev;
+	struct powernv_led_common *powernv_led_common = powernv_led->common;
+
+	/* Fetch all LED status */
+	mask = cpu_to_be64(0);
+	value = cpu_to_be64(0);
+	max_type = powernv_led_common->max_led_type;
+
+	rc = opal_leds_get_ind(powernv_led->loc_code,
+			       &mask, &value, &max_type);
+	if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL) {
+		dev_err(dev, "%s: OPAL get led call failed [rc=%d]\n",
+			__func__, rc);
+		return LED_OFF;
+	}
+
+	led_mask = be64_to_cpu(mask);
+	led_value = be64_to_cpu(value);
+
+	/* LED status available */
+	if (!((led_mask >> powernv_led->led_type) & OPAL_SLOT_LED_STATE_ON)) {
+		dev_err(dev, "%s: LED status not available for %s\n",
+			__func__, powernv_led->cdev.name);
+		return LED_OFF;
+	}
+
+	/* LED status value */
+	if ((led_value >> powernv_led->led_type) & OPAL_SLOT_LED_STATE_ON)
+		return LED_FULL;
+
+	return LED_OFF;
+}
+
+/*
+ * LED classdev 'brightness_get' function. This schedules work
+ * to update LED state.
+ */
+static void powernv_brightness_set(struct led_classdev *led_cdev,
+				   enum led_brightness value)
+{
+	struct powernv_led_data *powernv_led =
+		container_of(led_cdev, struct powernv_led_data, cdev);
+	struct powernv_led_common *powernv_led_common = powernv_led->common;
+
+	/* Do not modify LED in unload path */
+	if (powernv_led_common->led_disabled)
+		return;
+
+	mutex_lock(&powernv_led_common->lock);
+	powernv_led_set(powernv_led, value);
+	mutex_unlock(&powernv_led_common->lock);
+}
+
+/* LED classdev 'brightness_get' function */
+static enum led_brightness powernv_brightness_get(struct led_classdev *led_cdev)
+{
+	struct powernv_led_data *powernv_led =
+		container_of(led_cdev, struct powernv_led_data, cdev);
+
+	return powernv_led_get(powernv_led);
+}
+
+/*
+ * This function registers classdev structure for any given type of LED on
+ * a given child LED device node.
+ */
+static int powernv_led_create(struct device *dev,
+			      struct powernv_led_data *powernv_led,
+			      const char *led_type_desc)
+{
+	int rc;
+
+	/* Make sure LED type is supported */
+	powernv_led->led_type = powernv_get_led_type(led_type_desc);
+	if (powernv_led->led_type == -1) {
+		dev_warn(dev, "%s: No support for led type : %s\n",
+			 __func__, led_type_desc);
+		return -EINVAL;
+	}
+
+	/* Create the name for classdev */
+	powernv_led->cdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
+						powernv_led->loc_code,
+						led_type_desc);
+	if (!powernv_led->cdev.name) {
+		dev_err(dev,
+			"%s: Memory allocation failed for classdev name\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	powernv_led->cdev.brightness_set = powernv_brightness_set;
+	powernv_led->cdev.brightness_get = powernv_brightness_get;
+	powernv_led->cdev.brightness = LED_OFF;
+	powernv_led->cdev.max_brightness = LED_FULL;
+
+	/* Register the classdev */
+	rc = devm_led_classdev_register(dev, &powernv_led->cdev);
+	if (rc) {
+		dev_err(dev, "%s: Classdev registration failed for %s\n",
+			__func__, powernv_led->cdev.name);
+	}
+
+	return rc;
+}
+
+/* Go through LED device tree node and register LED classdev structure */
+static int powernv_led_classdev(struct platform_device *pdev,
+				struct device_node *led_node,
+				struct powernv_led_common *powernv_led_common)
+{
+	const char *cur = NULL;
+	int rc = -1;
+	struct property *p;
+	struct device_node *np;
+	struct powernv_led_data *powernv_led;
+	struct device *dev = &pdev->dev;
+
+	for_each_child_of_node(led_node, np) {
+		p = of_find_property(np, "led-types", NULL);
+		if (!p)
+			continue;
+
+		while ((cur = of_prop_next_string(p, cur)) != NULL) {
+			powernv_led = devm_kzalloc(dev, sizeof(*powernv_led),
+						   GFP_KERNEL);
+			if (!powernv_led)
+				return -ENOMEM;
+
+			powernv_led->common = powernv_led_common;
+			powernv_led->loc_code = (char *)np->name;
+
+			rc = powernv_led_create(dev, powernv_led, cur);
+			if (rc)
+				return rc;
+		} /* while end */
+	}
+
+	return rc;
+}
+
+/* Platform driver probe */
+static int powernv_led_probe(struct platform_device *pdev)
+{
+	struct device_node *led_node;
+	struct powernv_led_common *powernv_led_common;
+	struct device *dev = &pdev->dev;
+
+	led_node = of_find_node_by_path("/ibm,opal/leds");
+	if (!led_node) {
+		dev_err(dev, "%s: LED parent device node not found\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	powernv_led_common = devm_kzalloc(dev, sizeof(*powernv_led_common),
+					  GFP_KERNEL);
+	if (!powernv_led_common)
+		return -ENOMEM;
+
+	mutex_init(&powernv_led_common->lock);
+	powernv_led_common->max_led_type = cpu_to_be64(OPAL_SLOT_LED_TYPE_MAX);
+
+	platform_set_drvdata(pdev, powernv_led_common);
+
+	return powernv_led_classdev(pdev, led_node, powernv_led_common);
+}
+
+/* Platform driver remove */
+static int powernv_led_remove(struct platform_device *pdev)
+{
+	struct powernv_led_common *powernv_led_common;
+
+	/* Disable LED operation */
+	powernv_led_common = platform_get_drvdata(pdev);
+	powernv_led_common->led_disabled = true;
+
+	/* Destroy lock */
+	mutex_destroy(&powernv_led_common->lock);
+
+	dev_info(&pdev->dev, "PowerNV led module unregistered\n");
+	return 0;
+}
+
+/* Platform driver property match */
+static const struct of_device_id powernv_led_match[] = {
+	{
+		.compatible	= "ibm,opal-v3-led",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, powernv_led_match);
+
+static struct platform_driver powernv_led_driver = {
+	.probe	= powernv_led_probe,
+	.remove = powernv_led_remove,
+	.driver = {
+		.name = "powernv-led-driver",
+		.of_match_table = powernv_led_match,
+	},
+};
+
+module_platform_driver(powernv_led_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PowerNV LED driver");
+MODULE_AUTHOR("Vasant Hegde <hegdevasant@linux.vnet.ibm.com>");
diff --git a/drivers/leds/leds-syscon.c b/drivers/leds/leds-syscon.c
index d1660b0..b88900d 100644
--- a/drivers/leds/leds-syscon.c
+++ b/drivers/leds/leds-syscon.c
@@ -83,9 +83,9 @@
 		return -ENODEV;
 	}
 	map = syscon_node_to_regmap(parent->of_node);
-	if (!map) {
+	if (IS_ERR(map)) {
 		dev_err(dev, "no regmap for syscon LED parent\n");
-		return -ENODEV;
+		return PTR_ERR(map);
 	}
 
 	sled = devm_kzalloc(dev, sizeof(*sled), GFP_KERNEL);
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 20fa8e7..edbecc4 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -735,6 +735,7 @@
 	{ .compatible = "ti,tca6507", },
 	{},
 };
+MODULE_DEVICE_TABLE(of, of_tca6507_leds_match);
 
 #else
 static struct tca6507_platform_data *
@@ -830,7 +831,6 @@
 static struct i2c_driver tca6507_driver = {
 	.driver   = {
 		.name    = "leds-tca6507",
-		.owner   = THIS_MODULE,
 		.of_match_table = of_match_ptr(of_tca6507_leds_match),
 	},
 	.probe    = tca6507_probe,
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index de16c29..b806eca 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -231,10 +231,6 @@
 	if (!count || count > tlc591xx->max_leds)
 		return -EINVAL;
 
-	if (!i2c_check_functionality(client->adapter,
-				     I2C_FUNC_SMBUS_BYTE_DATA))
-		return -EIO;
-
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 	if (!priv)
 		return -ENOMEM;
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 49794b4..5bda6a9 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -72,7 +72,7 @@
 config LEDS_TRIGGER_GPIO
 	tristate "LED GPIO Trigger"
 	depends on LEDS_TRIGGERS
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  This allows LEDs to be controlled by gpio events. It's good
 	  when using gpios as switches and triggering the needed LEDs
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 109dcaa..68dcbcb 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -408,6 +408,7 @@
 	{ "therm_adm1030", adm1030 },
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id);
 
 static int
 do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
@@ -459,6 +460,7 @@
 	.compatible	= "adm1030"
     }, {}
 };
+MODULE_DEVICE_TABLE(of, therm_of_match);
 
 static struct platform_driver therm_of_driver = {
 	.driver = {
diff --git a/drivers/macintosh/windfarm.h b/drivers/macintosh/windfarm.h
index 028cdac..901c42f 100644
--- a/drivers/macintosh/windfarm.h
+++ b/drivers/macintosh/windfarm.h
@@ -53,11 +53,9 @@
  * the kref and wf_unregister_control will decrement it, thus the
  * object creating/disposing a given control shouldn't assume it
  * still exists after wf_unregister_control has been called.
- * wf_find_control will inc the refcount for you
  */
 extern int wf_register_control(struct wf_control *ct);
 extern void wf_unregister_control(struct wf_control *ct);
-extern struct wf_control * wf_find_control(const char *name);
 extern int wf_get_control(struct wf_control *ct);
 extern void wf_put_control(struct wf_control *ct);
 
@@ -117,7 +115,6 @@
 /* Same lifetime rules as controls */
 extern int wf_register_sensor(struct wf_sensor *sr);
 extern void wf_unregister_sensor(struct wf_sensor *sr);
-extern struct wf_sensor * wf_find_sensor(const char *name);
 extern int wf_get_sensor(struct wf_sensor *sr);
 extern void wf_put_sensor(struct wf_sensor *sr);
 
@@ -144,7 +141,6 @@
 /* Overtemp conditions. Those are refcounted */
 extern void wf_set_overtemp(void);
 extern void wf_clear_overtemp(void);
-extern int wf_is_overtemp(void);
 
 #define WF_EVENT_NEW_CONTROL	0 /* param is wf_control * */
 #define WF_EVENT_NEW_SENSOR	1 /* param is wf_sensor * */
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index 3ee198b..465d770 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -72,7 +72,7 @@
 	blocking_notifier_call_chain(&wf_client_list, event, param);
 }
 
-int wf_critical_overtemp(void)
+static int wf_critical_overtemp(void)
 {
 	static char * critical_overtemp_path = "/sbin/critical_overtemp";
 	char *argv[] = { critical_overtemp_path, NULL };
@@ -84,7 +84,6 @@
 	return call_usermodehelper(critical_overtemp_path,
 				   argv, envp, UMH_WAIT_EXEC);
 }
-EXPORT_SYMBOL_GPL(wf_critical_overtemp);
 
 static int wf_thread_func(void *data)
 {
@@ -255,24 +254,6 @@
 }
 EXPORT_SYMBOL_GPL(wf_unregister_control);
 
-struct wf_control * wf_find_control(const char *name)
-{
-	struct wf_control *ct;
-
-	mutex_lock(&wf_lock);
-	list_for_each_entry(ct, &wf_controls, link) {
-		if (!strcmp(ct->name, name)) {
-			if (wf_get_control(ct))
-				ct = NULL;
-			mutex_unlock(&wf_lock);
-			return ct;
-		}
-	}
-	mutex_unlock(&wf_lock);
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(wf_find_control);
-
 int wf_get_control(struct wf_control *ct)
 {
 	if (!try_module_get(ct->ops->owner))
@@ -368,24 +349,6 @@
 }
 EXPORT_SYMBOL_GPL(wf_unregister_sensor);
 
-struct wf_sensor * wf_find_sensor(const char *name)
-{
-	struct wf_sensor *sr;
-
-	mutex_lock(&wf_lock);
-	list_for_each_entry(sr, &wf_sensors, link) {
-		if (!strcmp(sr->name, name)) {
-			if (wf_get_sensor(sr))
-				sr = NULL;
-			mutex_unlock(&wf_lock);
-			return sr;
-		}
-	}
-	mutex_unlock(&wf_lock);
-	return NULL;
-}
-EXPORT_SYMBOL_GPL(wf_find_sensor);
-
 int wf_get_sensor(struct wf_sensor *sr)
 {
 	if (!try_module_get(sr->ops->owner))
@@ -435,7 +398,7 @@
 {
 	mutex_lock(&wf_lock);
 	blocking_notifier_chain_unregister(&wf_client_list, nb);
-	wf_client_count++;
+	wf_client_count--;
 	if (wf_client_count == 0)
 		wf_stop_thread();
 	mutex_unlock(&wf_lock);
@@ -474,12 +437,6 @@
 }
 EXPORT_SYMBOL_GPL(wf_clear_overtemp);
 
-int wf_is_overtemp(void)
-{
-	return (wf_overtemp != 0);
-}
-EXPORT_SYMBOL_GPL(wf_is_overtemp);
-
 static int __init windfarm_core_init(void)
 {
 	DBG("wf: core loaded\n");
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index e269f08..bbec500 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -46,6 +46,7 @@
 config PCC
 	bool "Platform Communication Channel Driver"
 	depends on ACPI
+	default n
 	help
 	  ACPI 5.0+ spec defines a generic mode of communication
 	  between the OS and a platform such as the BMC. This medium
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
index d9e99f9..d0d0b03 100644
--- a/drivers/mailbox/arm_mhu.c
+++ b/drivers/mailbox/arm_mhu.c
@@ -96,7 +96,7 @@
 			  IRQF_SHARED, "mhu_link", chan);
 	if (ret) {
 		dev_err(chan->mbox->dev,
-			"Unable to aquire IRQ %d\n", mlink->irq);
+			"Unable to acquire IRQ %d\n", mlink->irq);
 		return ret;
 	}
 
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 26d121d..68885a8 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -352,4 +352,10 @@
 
 	return 0;
 }
-device_initcall(pcc_init);
+
+/*
+ * Make PCC init postcore so that users of this mailbox
+ * such as the ACPI Processor driver have it available
+ * at their init.
+ */
+postcore_initcall(pcc_init);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index bfec3bd..d5415ee 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -478,7 +478,7 @@
 	  This device-mapper target takes two devices, one device to use
 	  normally, one to log all write operations done to the first device.
 	  This is for use by file system developers wishing to verify that
-	  their fs is writing a consitent file system at all times by allowing
+	  their fs is writing a consistent file system at all times by allowing
 	  them to replay the log in a variety of ways and to check the
 	  contents.
 
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 04f7bc2..6b420a5 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -243,19 +243,6 @@
 	DECLARE_ARRAY_ALLOCATOR(struct keybuf_key, freelist, KEYBUF_NR);
 };
 
-struct bio_split_pool {
-	struct bio_set		*bio_split;
-	mempool_t		*bio_split_hook;
-};
-
-struct bio_split_hook {
-	struct closure		cl;
-	struct bio_split_pool	*p;
-	struct bio		*bio;
-	bio_end_io_t		*bi_end_io;
-	void			*bi_private;
-};
-
 struct bcache_device {
 	struct closure		cl;
 
@@ -288,8 +275,6 @@
 	int (*cache_miss)(struct btree *, struct search *,
 			  struct bio *, unsigned);
 	int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long);
-
-	struct bio_split_pool	bio_split_hook;
 };
 
 struct io {
@@ -454,8 +439,6 @@
 	atomic_long_t		meta_sectors_written;
 	atomic_long_t		btree_sectors_written;
 	atomic_long_t		sectors_written;
-
-	struct bio_split_pool	bio_split_hook;
 };
 
 struct gc_stat {
@@ -873,7 +856,6 @@
 void bch_bbio_free(struct bio *, struct cache_set *);
 struct bio *bch_bbio_alloc(struct cache_set *);
 
-void bch_generic_make_request(struct bio *, struct bio_split_pool *);
 void __bch_submit_bbio(struct bio *, struct cache_set *);
 void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
 
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 00cde40..83392f8 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -278,7 +278,7 @@
 	goto out;
 }
 
-static void btree_node_read_endio(struct bio *bio, int error)
+static void btree_node_read_endio(struct bio *bio)
 {
 	struct closure *cl = bio->bi_private;
 	closure_put(cl);
@@ -305,7 +305,7 @@
 	bch_submit_bbio(bio, b->c, &b->key, 0);
 	closure_sync(&cl);
 
-	if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+	if (bio->bi_error)
 		set_btree_node_io_error(b);
 
 	bch_bbio_free(bio, b->c);
@@ -371,15 +371,15 @@
 	__btree_node_write_done(cl);
 }
 
-static void btree_node_write_endio(struct bio *bio, int error)
+static void btree_node_write_endio(struct bio *bio)
 {
 	struct closure *cl = bio->bi_private;
 	struct btree *b = container_of(cl, struct btree, io);
 
-	if (error)
+	if (bio->bi_error)
 		set_btree_node_io_error(b);
 
-	bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
+	bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree");
 	closure_put(cl);
 }
 
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index 79a6d63..782cc2c 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -38,7 +38,7 @@
  * they are running owned by the thread that is running them. Otherwise, suppose
  * you submit some bios and wish to have a function run when they all complete:
  *
- * foo_endio(struct bio *bio, int error)
+ * foo_endio(struct bio *bio)
  * {
  *	closure_put(cl);
  * }
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index bf6a9ca..86a0bb8 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -11,105 +11,6 @@
 
 #include <linux/blkdev.h>
 
-static unsigned bch_bio_max_sectors(struct bio *bio)
-{
-	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-	struct bio_vec bv;
-	struct bvec_iter iter;
-	unsigned ret = 0, seg = 0;
-
-	if (bio->bi_rw & REQ_DISCARD)
-		return min(bio_sectors(bio), q->limits.max_discard_sectors);
-
-	bio_for_each_segment(bv, bio, iter) {
-		struct bvec_merge_data bvm = {
-			.bi_bdev	= bio->bi_bdev,
-			.bi_sector	= bio->bi_iter.bi_sector,
-			.bi_size	= ret << 9,
-			.bi_rw		= bio->bi_rw,
-		};
-
-		if (seg == min_t(unsigned, BIO_MAX_PAGES,
-				 queue_max_segments(q)))
-			break;
-
-		if (q->merge_bvec_fn &&
-		    q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
-			break;
-
-		seg++;
-		ret += bv.bv_len >> 9;
-	}
-
-	ret = min(ret, queue_max_sectors(q));
-
-	WARN_ON(!ret);
-	ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
-
-	return ret;
-}
-
-static void bch_bio_submit_split_done(struct closure *cl)
-{
-	struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
-
-	s->bio->bi_end_io = s->bi_end_io;
-	s->bio->bi_private = s->bi_private;
-	bio_endio(s->bio, 0);
-
-	closure_debug_destroy(&s->cl);
-	mempool_free(s, s->p->bio_split_hook);
-}
-
-static void bch_bio_submit_split_endio(struct bio *bio, int error)
-{
-	struct closure *cl = bio->bi_private;
-	struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl);
-
-	if (error)
-		clear_bit(BIO_UPTODATE, &s->bio->bi_flags);
-
-	bio_put(bio);
-	closure_put(cl);
-}
-
-void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
-{
-	struct bio_split_hook *s;
-	struct bio *n;
-
-	if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD))
-		goto submit;
-
-	if (bio_sectors(bio) <= bch_bio_max_sectors(bio))
-		goto submit;
-
-	s = mempool_alloc(p->bio_split_hook, GFP_NOIO);
-	closure_init(&s->cl, NULL);
-
-	s->bio		= bio;
-	s->p		= p;
-	s->bi_end_io	= bio->bi_end_io;
-	s->bi_private	= bio->bi_private;
-	bio_get(bio);
-
-	do {
-		n = bio_next_split(bio, bch_bio_max_sectors(bio),
-				   GFP_NOIO, s->p->bio_split);
-
-		n->bi_end_io	= bch_bio_submit_split_endio;
-		n->bi_private	= &s->cl;
-
-		closure_get(&s->cl);
-		generic_make_request(n);
-	} while (n != bio);
-
-	continue_at(&s->cl, bch_bio_submit_split_done, NULL);
-	return;
-submit:
-	generic_make_request(bio);
-}
-
 /* Bios with headers */
 
 void bch_bbio_free(struct bio *bio, struct cache_set *c)
@@ -139,7 +40,7 @@
 	bio->bi_bdev		= PTR_CACHE(c, &b->key, 0)->bdev;
 
 	b->submit_time_us = local_clock_us();
-	closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
+	closure_bio_submit(bio, bio->bi_private);
 }
 
 void bch_submit_bbio(struct bio *bio, struct cache_set *c,
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 418607a..29eba72 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -24,7 +24,7 @@
  * bit.
  */
 
-static void journal_read_endio(struct bio *bio, int error)
+static void journal_read_endio(struct bio *bio)
 {
 	struct closure *cl = bio->bi_private;
 	closure_put(cl);
@@ -61,7 +61,7 @@
 		bio->bi_private = &cl;
 		bch_bio_map(bio, data);
 
-		closure_bio_submit(bio, &cl, ca);
+		closure_bio_submit(bio, &cl);
 		closure_sync(&cl);
 
 		/* This function could be simpler now since we no longer write
@@ -401,7 +401,7 @@
 
 #define last_seq(j)	((j)->seq - fifo_used(&(j)->pin) + 1)
 
-static void journal_discard_endio(struct bio *bio, int error)
+static void journal_discard_endio(struct bio *bio)
 {
 	struct journal_device *ja =
 		container_of(bio, struct journal_device, discard_bio);
@@ -547,11 +547,11 @@
 		pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
 }
 
-static void journal_write_endio(struct bio *bio, int error)
+static void journal_write_endio(struct bio *bio)
 {
 	struct journal_write *w = bio->bi_private;
 
-	cache_set_err_on(error, w->c, "journal io error");
+	cache_set_err_on(bio->bi_error, w->c, "journal io error");
 	closure_put(&w->c->journal.io);
 }
 
@@ -648,7 +648,7 @@
 	spin_unlock(&c->journal.lock);
 
 	while ((bio = bio_list_pop(&list)))
-		closure_bio_submit(bio, cl, c->cache[0]);
+		closure_bio_submit(bio, cl);
 
 	continue_at(cl, journal_write_done, NULL);
 }
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index cd74903..b929fc9 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -60,20 +60,20 @@
 	closure_return_with_destructor(cl, moving_io_destructor);
 }
 
-static void read_moving_endio(struct bio *bio, int error)
+static void read_moving_endio(struct bio *bio)
 {
 	struct bbio *b = container_of(bio, struct bbio, bio);
 	struct moving_io *io = container_of(bio->bi_private,
 					    struct moving_io, cl);
 
-	if (error)
-		io->op.error = error;
+	if (bio->bi_error)
+		io->op.error = bio->bi_error;
 	else if (!KEY_DIRTY(&b->key) &&
 		 ptr_stale(io->op.c, &b->key, 0)) {
 		io->op.error = -EINTR;
 	}
 
-	bch_bbio_endio(io->op.c, bio, error, "reading data to move");
+	bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move");
 }
 
 static void moving_init(struct moving_io *io)
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index f292790..8e9877b 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -173,22 +173,22 @@
 	bch_data_insert_keys(cl);
 }
 
-static void bch_data_insert_endio(struct bio *bio, int error)
+static void bch_data_insert_endio(struct bio *bio)
 {
 	struct closure *cl = bio->bi_private;
 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 
-	if (error) {
+	if (bio->bi_error) {
 		/* TODO: We could try to recover from this. */
 		if (op->writeback)
-			op->error = error;
+			op->error = bio->bi_error;
 		else if (!op->replace)
 			set_closure_fn(cl, bch_data_insert_error, op->wq);
 		else
 			set_closure_fn(cl, NULL, NULL);
 	}
 
-	bch_bbio_endio(op->c, bio, error, "writing data to cache");
+	bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
 }
 
 static void bch_data_insert_start(struct closure *cl)
@@ -477,7 +477,7 @@
 	struct data_insert_op	iop;
 };
 
-static void bch_cache_read_endio(struct bio *bio, int error)
+static void bch_cache_read_endio(struct bio *bio)
 {
 	struct bbio *b = container_of(bio, struct bbio, bio);
 	struct closure *cl = bio->bi_private;
@@ -490,15 +490,15 @@
 	 * from the backing device.
 	 */
 
-	if (error)
-		s->iop.error = error;
+	if (bio->bi_error)
+		s->iop.error = bio->bi_error;
 	else if (!KEY_DIRTY(&b->key) &&
 		 ptr_stale(s->iop.c, &b->key, 0)) {
 		atomic_long_inc(&s->iop.c->cache_read_races);
 		s->iop.error = -EINTR;
 	}
 
-	bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
+	bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache");
 }
 
 /*
@@ -591,13 +591,13 @@
 
 /* Common code for the make_request functions */
 
-static void request_endio(struct bio *bio, int error)
+static void request_endio(struct bio *bio)
 {
 	struct closure *cl = bio->bi_private;
 
-	if (error) {
+	if (bio->bi_error) {
 		struct search *s = container_of(cl, struct search, cl);
-		s->iop.error = error;
+		s->iop.error = bio->bi_error;
 		/* Only cache read errors are recoverable */
 		s->recoverable = false;
 	}
@@ -613,7 +613,8 @@
 				    &s->d->disk->part0, s->start_time);
 
 		trace_bcache_request_end(s->d, s->orig_bio);
-		bio_endio(s->orig_bio, s->iop.error);
+		s->orig_bio->bi_error = s->iop.error;
+		bio_endio(s->orig_bio);
 		s->orig_bio = NULL;
 	}
 }
@@ -718,7 +719,7 @@
 
 		/* XXX: invalidate cache */
 
-		closure_bio_submit(bio, cl, s->d);
+		closure_bio_submit(bio, cl);
 	}
 
 	continue_at(cl, cached_dev_cache_miss_done, NULL);
@@ -841,7 +842,7 @@
 	s->cache_miss	= miss;
 	s->iop.bio	= cache_bio;
 	bio_get(cache_bio);
-	closure_bio_submit(cache_bio, &s->cl, s->d);
+	closure_bio_submit(cache_bio, &s->cl);
 
 	return ret;
 out_put:
@@ -849,7 +850,7 @@
 out_submit:
 	miss->bi_end_io		= request_endio;
 	miss->bi_private	= &s->cl;
-	closure_bio_submit(miss, &s->cl, s->d);
+	closure_bio_submit(miss, &s->cl);
 	return ret;
 }
 
@@ -914,7 +915,7 @@
 
 		if (!(bio->bi_rw & REQ_DISCARD) ||
 		    blk_queue_discard(bdev_get_queue(dc->bdev)))
-			closure_bio_submit(bio, cl, s->d);
+			closure_bio_submit(bio, cl);
 	} else if (s->iop.writeback) {
 		bch_writeback_add(dc);
 		s->iop.bio = bio;
@@ -929,12 +930,12 @@
 			flush->bi_end_io = request_endio;
 			flush->bi_private = cl;
 
-			closure_bio_submit(flush, cl, s->d);
+			closure_bio_submit(flush, cl);
 		}
 	} else {
 		s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
 
-		closure_bio_submit(bio, cl, s->d);
+		closure_bio_submit(bio, cl);
 	}
 
 	closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
@@ -950,7 +951,7 @@
 		bch_journal_meta(s->iop.c, cl);
 
 	/* If it's a flush, we send the flush to the backing device too */
-	closure_bio_submit(bio, cl, s->d);
+	closure_bio_submit(bio, cl);
 
 	continue_at(cl, cached_dev_bio_complete, NULL);
 }
@@ -992,9 +993,9 @@
 	} else {
 		if ((bio->bi_rw & REQ_DISCARD) &&
 		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
-			bio_endio(bio, 0);
+			bio_endio(bio);
 		else
-			bch_generic_make_request(bio, &d->bio_split_hook);
+			generic_make_request(bio);
 	}
 }
 
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 94980bf..679a093 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -59,29 +59,6 @@
 
 #define BTREE_MAX_PAGES		(256 * 1024 / PAGE_SIZE)
 
-static void bio_split_pool_free(struct bio_split_pool *p)
-{
-	if (p->bio_split_hook)
-		mempool_destroy(p->bio_split_hook);
-
-	if (p->bio_split)
-		bioset_free(p->bio_split);
-}
-
-static int bio_split_pool_init(struct bio_split_pool *p)
-{
-	p->bio_split = bioset_create(4, 0);
-	if (!p->bio_split)
-		return -ENOMEM;
-
-	p->bio_split_hook = mempool_create_kmalloc_pool(4,
-				sizeof(struct bio_split_hook));
-	if (!p->bio_split_hook)
-		return -ENOMEM;
-
-	return 0;
-}
-
 /* Superblock */
 
 static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
@@ -221,7 +198,7 @@
 	return err;
 }
 
-static void write_bdev_super_endio(struct bio *bio, int error)
+static void write_bdev_super_endio(struct bio *bio)
 {
 	struct cached_dev *dc = bio->bi_private;
 	/* XXX: error checking */
@@ -290,11 +267,11 @@
 	closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
 }
 
-static void write_super_endio(struct bio *bio, int error)
+static void write_super_endio(struct bio *bio)
 {
 	struct cache *ca = bio->bi_private;
 
-	bch_count_io_errors(ca, error, "writing superblock");
+	bch_count_io_errors(ca, bio->bi_error, "writing superblock");
 	closure_put(&ca->set->sb_write);
 }
 
@@ -339,12 +316,12 @@
 
 /* UUID io */
 
-static void uuid_endio(struct bio *bio, int error)
+static void uuid_endio(struct bio *bio)
 {
 	struct closure *cl = bio->bi_private;
 	struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
 
-	cache_set_err_on(error, c, "accessing uuids");
+	cache_set_err_on(bio->bi_error, c, "accessing uuids");
 	bch_bbio_free(bio, c);
 	closure_put(cl);
 }
@@ -512,11 +489,11 @@
  * disk.
  */
 
-static void prio_endio(struct bio *bio, int error)
+static void prio_endio(struct bio *bio)
 {
 	struct cache *ca = bio->bi_private;
 
-	cache_set_err_on(error, ca->set, "accessing priorities");
+	cache_set_err_on(bio->bi_error, ca->set, "accessing priorities");
 	bch_bbio_free(bio, ca->set);
 	closure_put(&ca->prio);
 }
@@ -537,7 +514,7 @@
 	bio->bi_private = ca;
 	bch_bio_map(bio, ca->disk_buckets);
 
-	closure_bio_submit(bio, &ca->prio, ca);
+	closure_bio_submit(bio, &ca->prio);
 	closure_sync(cl);
 }
 
@@ -757,7 +734,6 @@
 		put_disk(d->disk);
 	}
 
-	bio_split_pool_free(&d->bio_split_hook);
 	if (d->bio_split)
 		bioset_free(d->bio_split);
 	kvfree(d->full_dirty_stripes);
@@ -804,7 +780,6 @@
 		return minor;
 
 	if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
-	    bio_split_pool_init(&d->bio_split_hook) ||
 	    !(d->disk = alloc_disk(1))) {
 		ida_simple_remove(&bcache_minor, minor);
 		return -ENOMEM;
@@ -830,7 +805,7 @@
 	q->limits.max_sectors		= UINT_MAX;
 	q->limits.max_segment_size	= UINT_MAX;
 	q->limits.max_segments		= BIO_MAX_PAGES;
-	q->limits.max_discard_sectors	= UINT_MAX;
+	blk_queue_max_discard_sectors(q, UINT_MAX);
 	q->limits.discard_granularity	= 512;
 	q->limits.io_min		= block_size;
 	q->limits.logical_block_size	= block_size;
@@ -1793,8 +1768,6 @@
 		ca->set->cache[ca->sb.nr_this_dev] = NULL;
 	}
 
-	bio_split_pool_free(&ca->bio_split_hook);
-
 	free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
 	kfree(ca->prio_buckets);
 	vfree(ca->buckets);
@@ -1839,8 +1812,7 @@
 					  ca->sb.nbuckets)) ||
 	    !(ca->prio_buckets	= kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
 					  2, GFP_KERNEL)) ||
-	    !(ca->disk_buckets	= alloc_bucket_pages(GFP_KERNEL, ca)) ||
-	    bio_split_pool_init(&ca->bio_split_hook))
+	    !(ca->disk_buckets	= alloc_bucket_pages(GFP_KERNEL, ca)))
 		return -ENOMEM;
 
 	ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 1d04c48..cf2cbc2 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -4,6 +4,7 @@
 
 #include <linux/blkdev.h>
 #include <linux/errno.h>
+#include <linux/blkdev.h>
 #include <linux/kernel.h>
 #include <linux/llist.h>
 #include <linux/ratelimit.h>
@@ -570,10 +571,10 @@
 	return bdev->bd_inode->i_size >> 9;
 }
 
-#define closure_bio_submit(bio, cl, dev)				\
+#define closure_bio_submit(bio, cl)					\
 do {									\
 	closure_get(cl);						\
-	bch_generic_make_request(bio, &(dev)->bio_split_hook);		\
+	generic_make_request(bio);					\
 } while (0)
 
 uint64_t bch_crc64_update(uint64_t, const void *, size_t);
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index f1986bc..b23f88d 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -166,12 +166,12 @@
 	closure_return_with_destructor(cl, dirty_io_destructor);
 }
 
-static void dirty_endio(struct bio *bio, int error)
+static void dirty_endio(struct bio *bio)
 {
 	struct keybuf_key *w = bio->bi_private;
 	struct dirty_io *io = w->private;
 
-	if (error)
+	if (bio->bi_error)
 		SET_KEY_DIRTY(&w->key, false);
 
 	closure_put(&io->cl);
@@ -188,27 +188,27 @@
 	io->bio.bi_bdev		= io->dc->bdev;
 	io->bio.bi_end_io	= dirty_endio;
 
-	closure_bio_submit(&io->bio, cl, &io->dc->disk);
+	closure_bio_submit(&io->bio, cl);
 
 	continue_at(cl, write_dirty_finish, system_wq);
 }
 
-static void read_dirty_endio(struct bio *bio, int error)
+static void read_dirty_endio(struct bio *bio)
 {
 	struct keybuf_key *w = bio->bi_private;
 	struct dirty_io *io = w->private;
 
 	bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
-			    error, "reading dirty data from cache");
+			    bio->bi_error, "reading dirty data from cache");
 
-	dirty_endio(bio, error);
+	dirty_endio(bio);
 }
 
 static void read_dirty_submit(struct closure *cl)
 {
 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
 
-	closure_bio_submit(&io->bio, cl, &io->dc->disk);
+	closure_bio_submit(&io->bio, cl);
 
 	continue_at(cl, write_dirty, system_wq);
 }
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index cd6d1d2..03af174 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -236,8 +236,10 @@
 	bio_list_init(&bios);
 	dm_cell_release(prison, cell, &bios);
 
-	while ((bio = bio_list_pop(&bios)))
-		bio_endio(bio, error);
+	while ((bio = bio_list_pop(&bios))) {
+		bio->bi_error = error;
+		bio_endio(bio);
+	}
 }
 EXPORT_SYMBOL_GPL(dm_cell_error);
 
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 86dbbc7..83cc52e 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -545,7 +545,8 @@
 {
 	struct dm_buffer *b = context;
 
-	b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
+	b->bio.bi_error = error ? -EIO : 0;
+	b->bio.bi_end_io(&b->bio);
 }
 
 static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
@@ -575,13 +576,16 @@
 	b->bio.bi_end_io = end_io;
 
 	r = dm_io(&io_req, 1, &region, NULL);
-	if (r)
-		end_io(&b->bio, r);
+	if (r) {
+		b->bio.bi_error = r;
+		end_io(&b->bio);
+	}
 }
 
-static void inline_endio(struct bio *bio, int error)
+static void inline_endio(struct bio *bio)
 {
 	bio_end_io_t *end_fn = bio->bi_private;
+	int error = bio->bi_error;
 
 	/*
 	 * Reset the bio to free any attached resources
@@ -589,7 +593,8 @@
 	 */
 	bio_reset(bio);
 
-	end_fn(bio, error);
+	bio->bi_error = error;
+	end_fn(bio);
 }
 
 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
@@ -661,13 +666,14 @@
  * Set the error, clear B_WRITING bit and wake anyone who was waiting on
  * it.
  */
-static void write_endio(struct bio *bio, int error)
+static void write_endio(struct bio *bio)
 {
 	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
 
-	b->write_error = error;
-	if (unlikely(error)) {
+	b->write_error = bio->bi_error;
+	if (unlikely(bio->bi_error)) {
 		struct dm_bufio_client *c = b->c;
+		int error = bio->bi_error;
 		(void)cmpxchg(&c->async_write_error, 0, error);
 	}
 
@@ -1026,11 +1032,11 @@
  * The endio routine for reading: set the error, clear the bit and wake up
  * anyone waiting on the buffer.
  */
-static void read_endio(struct bio *bio, int error)
+static void read_endio(struct bio *bio)
 {
 	struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
 
-	b->read_error = error;
+	b->read_error = bio->bi_error;
 
 	BUG_ON(!test_bit(B_READING, &b->state));
 
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 200366c..1ffbeb1 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -772,7 +772,7 @@
 	struct dm_cache_policy policy;
 
 	/* protects everything */
-	struct mutex lock;
+	spinlock_t lock;
 	dm_cblock_t cache_size;
 	sector_t cache_block_size;
 
@@ -807,13 +807,7 @@
 	/*
 	 * Keeps track of time, incremented by the core.  We use this to
 	 * avoid attributing multiple hits within the same tick.
-	 *
-	 * Access to tick_protected should be done with the spin lock held.
-	 * It's copied to tick at the start of the map function (within the
-	 * mutex).
 	 */
-	spinlock_t tick_lock;
-	unsigned tick_protected;
 	unsigned tick;
 
 	/*
@@ -1296,46 +1290,20 @@
 	kfree(mq);
 }
 
-static void copy_tick(struct smq_policy *mq)
-{
-	unsigned long flags, tick;
-
-	spin_lock_irqsave(&mq->tick_lock, flags);
-	tick = mq->tick_protected;
-	if (tick != mq->tick) {
-		update_sentinels(mq);
-		end_hotspot_period(mq);
-		end_cache_period(mq);
-		mq->tick = tick;
-	}
-	spin_unlock_irqrestore(&mq->tick_lock, flags);
-}
-
-static bool maybe_lock(struct smq_policy *mq, bool can_block)
-{
-	if (can_block) {
-		mutex_lock(&mq->lock);
-		return true;
-	} else
-		return mutex_trylock(&mq->lock);
-}
-
 static int smq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
 		   bool can_block, bool can_migrate, bool fast_promote,
 		   struct bio *bio, struct policy_locker *locker,
 		   struct policy_result *result)
 {
 	int r;
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 
 	result->op = POLICY_MISS;
 
-	if (!maybe_lock(mq, can_block))
-		return -EWOULDBLOCK;
-
-	copy_tick(mq);
+	spin_lock_irqsave(&mq->lock, flags);
 	r = map(mq, bio, oblock, can_migrate, fast_promote, locker, result);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 
 	return r;
 }
@@ -1343,20 +1311,18 @@
 static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
 {
 	int r;
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 	struct entry *e;
 
-	if (!mutex_trylock(&mq->lock))
-		return -EWOULDBLOCK;
-
+	spin_lock_irqsave(&mq->lock, flags);
 	e = h_lookup(&mq->table, oblock);
 	if (e) {
 		*cblock = infer_cblock(mq, e);
 		r = 0;
 	} else
 		r = -ENOENT;
-
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 
 	return r;
 }
@@ -1375,20 +1341,22 @@
 
 static void smq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
 {
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	__smq_set_clear_dirty(mq, oblock, true);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 }
 
 static void smq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
 {
 	struct smq_policy *mq = to_smq_policy(p);
+	unsigned long flags;
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	__smq_set_clear_dirty(mq, oblock, false);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 }
 
 static int smq_load_mapping(struct dm_cache_policy *p,
@@ -1433,14 +1401,14 @@
 	struct smq_policy *mq = to_smq_policy(p);
 	int r = 0;
 
-	mutex_lock(&mq->lock);
-
+	/*
+	 * We don't need to lock here since this method is only called once
+	 * the IO has stopped.
+	 */
 	r = smq_save_hints(mq, &mq->clean, fn, context);
 	if (!r)
 		r = smq_save_hints(mq, &mq->dirty, fn, context);
 
-	mutex_unlock(&mq->lock);
-
 	return r;
 }
 
@@ -1458,10 +1426,11 @@
 static void smq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
 {
 	struct smq_policy *mq = to_smq_policy(p);
+	unsigned long flags;
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	__remove_mapping(mq, oblock);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 }
 
 static int __remove_cblock(struct smq_policy *mq, dm_cblock_t cblock)
@@ -1480,11 +1449,12 @@
 static int smq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
 {
 	int r;
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	r = __remove_cblock(mq, cblock);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 
 	return r;
 }
@@ -1537,11 +1507,12 @@
 			      dm_cblock_t *cblock, bool critical_only)
 {
 	int r;
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	r = __smq_writeback_work(mq, oblock, cblock, critical_only);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 
 	return r;
 }
@@ -1562,21 +1533,23 @@
 static void smq_force_mapping(struct dm_cache_policy *p,
 			      dm_oblock_t current_oblock, dm_oblock_t new_oblock)
 {
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	__force_mapping(mq, current_oblock, new_oblock);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 }
 
 static dm_cblock_t smq_residency(struct dm_cache_policy *p)
 {
 	dm_cblock_t r;
+	unsigned long flags;
 	struct smq_policy *mq = to_smq_policy(p);
 
-	mutex_lock(&mq->lock);
+	spin_lock_irqsave(&mq->lock, flags);
 	r = to_cblock(mq->cache_alloc.nr_allocated);
-	mutex_unlock(&mq->lock);
+	spin_unlock_irqrestore(&mq->lock, flags);
 
 	return r;
 }
@@ -1586,15 +1559,12 @@
 	struct smq_policy *mq = to_smq_policy(p);
 	unsigned long flags;
 
-	spin_lock_irqsave(&mq->tick_lock, flags);
-	mq->tick_protected++;
-	spin_unlock_irqrestore(&mq->tick_lock, flags);
-
-	if (can_block) {
-		mutex_lock(&mq->lock);
-		copy_tick(mq);
-		mutex_unlock(&mq->lock);
-	}
+	spin_lock_irqsave(&mq->lock, flags);
+	mq->tick++;
+	update_sentinels(mq);
+	end_hotspot_period(mq);
+	end_cache_period(mq);
+	spin_unlock_irqrestore(&mq->lock, flags);
 }
 
 /* Init the policy plugin interface function pointers. */
@@ -1694,10 +1664,8 @@
 	} else
 		mq->cache_hit_bits = NULL;
 
-	mq->tick_protected = 0;
 	mq->tick = 0;
-	mutex_init(&mq->lock);
-	spin_lock_init(&mq->tick_lock);
+	spin_lock_init(&mq->lock);
 
 	q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
 	mq->hotspot.nr_top_levels = 8;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1fe93cf..dd90d12 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -424,7 +424,6 @@
 		wake_up(&cache->migration_wait);
 
 	mempool_free(mg, cache->migration_pool);
-	wake_worker(cache);
 }
 
 static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
@@ -919,14 +918,14 @@
 	wake_worker(cache);
 }
 
-static void writethrough_endio(struct bio *bio, int err)
+static void writethrough_endio(struct bio *bio)
 {
 	struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
 
 	dm_unhook_bio(&pb->hook_info, bio);
 
-	if (err) {
-		bio_endio(bio, err);
+	if (bio->bi_error) {
+		bio_endio(bio);
 		return;
 	}
 
@@ -1064,14 +1063,6 @@
 	atomic_dec(&cache->nr_io_migrations);
 }
 
-static void __cell_release(struct cache *cache, struct dm_bio_prison_cell *cell,
-			   bool holder, struct bio_list *bios)
-{
-	(holder ? dm_cell_release : dm_cell_release_no_holder)
-		(cache->prison, cell, bios);
-	free_prison_cell(cache, cell);
-}
-
 static bool discard_or_flush(struct bio *bio)
 {
 	return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD);
@@ -1079,14 +1070,13 @@
 
 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
 {
-	if (discard_or_flush(cell->holder))
+	if (discard_or_flush(cell->holder)) {
 		/*
-		 * We have to handle these bios
-		 * individually.
+		 * We have to handle these bios individually.
 		 */
-		__cell_release(cache, cell, true, &cache->deferred_bios);
-
-	else
+		dm_cell_release(cache->prison, cell, &cache->deferred_bios);
+		free_prison_cell(cache, cell);
+	} else
 		list_add_tail(&cell->user_list, &cache->deferred_cells);
 }
 
@@ -1113,7 +1103,7 @@
 static void cell_error_with_code(struct cache *cache, struct dm_bio_prison_cell *cell, int err)
 {
 	dm_cell_error(cache->prison, cell, err);
-	dm_bio_prison_free_cell(cache->prison, cell);
+	free_prison_cell(cache, cell);
 }
 
 static void cell_requeue(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1123,8 +1113,11 @@
 
 static void free_io_migration(struct dm_cache_migration *mg)
 {
-	dec_io_migrations(mg->cache);
+	struct cache *cache = mg->cache;
+
+	dec_io_migrations(cache);
 	free_migration(mg);
+	wake_worker(cache);
 }
 
 static void migration_failure(struct dm_cache_migration *mg)
@@ -1231,7 +1224,7 @@
 			 * The block was promoted via an overwrite, so it's dirty.
 			 */
 			set_dirty(cache, mg->new_oblock, mg->cblock);
-			bio_endio(mg->new_ocell->holder, 0);
+			bio_endio(mg->new_ocell->holder);
 			cell_defer(cache, mg->new_ocell, false);
 		}
 		free_io_migration(mg);
@@ -1284,7 +1277,7 @@
 	}
 }
 
-static void overwrite_endio(struct bio *bio, int err)
+static void overwrite_endio(struct bio *bio)
 {
 	struct dm_cache_migration *mg = bio->bi_private;
 	struct cache *cache = mg->cache;
@@ -1294,7 +1287,7 @@
 
 	dm_unhook_bio(&pb->hook_info, bio);
 
-	if (err)
+	if (bio->bi_error)
 		mg->err = true;
 
 	mg->requeue_holder = false;
@@ -1351,16 +1344,18 @@
 {
 	dm_dblock_t b, e;
 	struct bio *bio = mg->new_ocell->holder;
+	struct cache *cache = mg->cache;
 
-	calc_discard_block_range(mg->cache, bio, &b, &e);
+	calc_discard_block_range(cache, bio, &b, &e);
 	while (b != e) {
-		set_discard(mg->cache, b);
+		set_discard(cache, b);
 		b = to_dblock(from_dblock(b) + 1);
 	}
 
-	bio_endio(bio, 0);
-	cell_defer(mg->cache, mg->new_ocell, false);
+	bio_endio(bio);
+	cell_defer(cache, mg->new_ocell, false);
 	free_migration(mg);
+	wake_worker(cache);
 }
 
 static void issue_copy_or_discard(struct dm_cache_migration *mg)
@@ -1631,7 +1626,7 @@
 
 	calc_discard_block_range(cache, bio, &b, &e);
 	if (b == e) {
-		bio_endio(bio, 0);
+		bio_endio(bio);
 		return;
 	}
 
@@ -1729,6 +1724,8 @@
 		remap_to_origin(cache, bio);
 		issue(cache, bio);
 	}
+
+	free_prison_cell(cache, cell);
 }
 
 static void remap_cell_to_cache_dirty(struct cache *cache, struct dm_bio_prison_cell *cell,
@@ -1763,6 +1760,8 @@
 		remap_to_cache(cache, bio, cblock);
 		issue(cache, bio);
 	}
+
+	free_prison_cell(cache, cell);
 }
 
 /*----------------------------------------------------------------*/
@@ -2217,8 +2216,10 @@
 	bio_list_merge(&bios, &cache->deferred_bios);
 	bio_list_init(&cache->deferred_bios);
 
-	while ((bio = bio_list_pop(&bios)))
-		bio_endio(bio, DM_ENDIO_REQUEUE);
+	while ((bio = bio_list_pop(&bios))) {
+		bio->bi_error = DM_ENDIO_REQUEUE;
+		bio_endio(bio);
+	}
 }
 
 static int more_work(struct cache *cache)
@@ -3123,7 +3124,7 @@
 			 * This is a duplicate writethrough io that is no
 			 * longer needed because the block has been demoted.
 			 */
-			bio_endio(bio, 0);
+			bio_endio(bio);
 			// FIXME: remap everything as a miss
 			cell_defer(cache, cell, false);
 			r = DM_MAPIO_SUBMITTED;
@@ -3778,26 +3779,6 @@
 	return r;
 }
 
-/*
- * We assume I/O is going to the origin (which is the volume
- * more likely to have restrictions e.g. by being striped).
- * (Looking up the exact location of the data would be expensive
- * and could always be out of date by the time the bio is submitted.)
- */
-static int cache_bvec_merge(struct dm_target *ti,
-			    struct bvec_merge_data *bvm,
-			    struct bio_vec *biovec, int max_size)
-{
-	struct cache *cache = ti->private;
-	struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
-
-	if (!q->merge_bvec_fn)
-		return max_size;
-
-	bvm->bi_bdev = cache->origin_dev->bdev;
-	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
 {
 	/*
@@ -3841,7 +3822,6 @@
 	.status = cache_status,
 	.message = cache_message,
 	.iterate_devices = cache_iterate_devices,
-	.merge = cache_bvec_merge,
 	.io_hints = cache_io_hints,
 };
 
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0f48fed..d60c88d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1076,7 +1076,8 @@
 	if (io->ctx.req)
 		crypt_free_req(cc, io->ctx.req, base_bio);
 
-	bio_endio(base_bio, error);
+	base_bio->bi_error = error;
+	bio_endio(base_bio);
 }
 
 /*
@@ -1096,14 +1097,12 @@
  * The work is done per CPU global for all dm-crypt instances.
  * They should not depend on each other and do not block.
  */
-static void crypt_endio(struct bio *clone, int error)
+static void crypt_endio(struct bio *clone)
 {
 	struct dm_crypt_io *io = clone->bi_private;
 	struct crypt_config *cc = io->cc;
 	unsigned rw = bio_data_dir(clone);
-
-	if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
-		error = -EIO;
+	int error;
 
 	/*
 	 * free the processed pages
@@ -1111,6 +1110,7 @@
 	if (rw == WRITE)
 		crypt_free_buffer_pages(cc, clone);
 
+	error = clone->bi_error;
 	bio_put(clone);
 
 	if (rw == READ && !error) {
@@ -1811,11 +1811,13 @@
 	}
 	cc->iv_offset = tmpll;
 
-	if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
+	ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
+	if (ret) {
 		ti->error = "Device lookup failed";
 		goto bad;
 	}
 
+	ret = -EINVAL;
 	if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
 		ti->error = "Invalid device sector";
 		goto bad;
@@ -2035,21 +2037,6 @@
 	return -EINVAL;
 }
 
-static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-		       struct bio_vec *biovec, int max_size)
-{
-	struct crypt_config *cc = ti->private;
-	struct request_queue *q = bdev_get_queue(cc->dev->bdev);
-
-	if (!q->merge_bvec_fn)
-		return max_size;
-
-	bvm->bi_bdev = cc->dev->bdev;
-	bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
-
-	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int crypt_iterate_devices(struct dm_target *ti,
 				 iterate_devices_callout_fn fn, void *data)
 {
@@ -2070,7 +2057,6 @@
 	.preresume = crypt_preresume,
 	.resume = crypt_resume,
 	.message = crypt_message,
-	.merge  = crypt_merge,
 	.iterate_devices = crypt_iterate_devices,
 };
 
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 57b6a19..b34f6e2 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -129,6 +129,7 @@
 	struct delay_c *dc;
 	unsigned long long tmpll;
 	char dummy;
+	int ret;
 
 	if (argc != 3 && argc != 6) {
 		ti->error = "requires exactly 3 or 6 arguments";
@@ -143,6 +144,7 @@
 
 	dc->reads = dc->writes = 0;
 
+	ret = -EINVAL;
 	if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) {
 		ti->error = "Invalid device sector";
 		goto bad;
@@ -154,12 +156,14 @@
 		goto bad;
 	}
 
-	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
-			  &dc->dev_read)) {
+	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+			    &dc->dev_read);
+	if (ret) {
 		ti->error = "Device lookup failed";
 		goto bad;
 	}
 
+	ret = -EINVAL;
 	dc->dev_write = NULL;
 	if (argc == 3)
 		goto out;
@@ -175,13 +179,15 @@
 		goto bad_dev_read;
 	}
 
-	if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
-			  &dc->dev_write)) {
+	ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
+			    &dc->dev_write);
+	if (ret) {
 		ti->error = "Write device lookup failed";
 		goto bad_dev_read;
 	}
 
 out:
+	ret = -EINVAL;
 	dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
 	if (!dc->kdelayd_wq) {
 		DMERR("Couldn't start kdelayd");
@@ -208,7 +214,7 @@
 	dm_put_device(ti, dc->dev_read);
 bad:
 	kfree(dc);
-	return -EINVAL;
+	return ret;
 }
 
 static void delay_dtr(struct dm_target *ti)
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index ad913cd..0119ebf 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1673,20 +1673,6 @@
 	return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
 }
 
-static int era_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-		     struct bio_vec *biovec, int max_size)
-{
-	struct era *era = ti->private;
-	struct request_queue *q = bdev_get_queue(era->origin_dev->bdev);
-
-	if (!q->merge_bvec_fn)
-		return max_size;
-
-	bvm->bi_bdev = era->origin_dev->bdev;
-
-	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static void era_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
 	struct era *era = ti->private;
@@ -1717,7 +1703,6 @@
 	.status = era_status,
 	.message = era_message,
 	.iterate_devices = era_iterate_devices,
-	.merge = era_merge,
 	.io_hints = era_io_hints
 };
 
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index b257e46..645e8b4 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -183,6 +183,7 @@
 
 	devname = dm_shift_arg(&as);
 
+	r = -EINVAL;
 	if (sscanf(dm_shift_arg(&as), "%llu%c", &tmpll, &dummy) != 1) {
 		ti->error = "Invalid device sector";
 		goto bad;
@@ -211,7 +212,8 @@
 	if (r)
 		goto bad;
 
-	if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev)) {
+	r = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &fc->dev);
+	if (r) {
 		ti->error = "Device lookup failed";
 		goto bad;
 	}
@@ -224,7 +226,7 @@
 
 bad:
 	kfree(fc);
-	return -EINVAL;
+	return r;
 }
 
 static void flakey_dtr(struct dm_target *ti)
@@ -296,7 +298,7 @@
 		 * Drop writes?
 		 */
 		if (test_bit(DROP_WRITES, &fc->flags)) {
-			bio_endio(bio, 0);
+			bio_endio(bio);
 			return DM_MAPIO_SUBMITTED;
 		}
 
@@ -387,21 +389,6 @@
 	return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
 }
 
-static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-			struct bio_vec *biovec, int max_size)
-{
-	struct flakey_c *fc = ti->private;
-	struct request_queue *q = bdev_get_queue(fc->dev->bdev);
-
-	if (!q->merge_bvec_fn)
-		return max_size;
-
-	bvm->bi_bdev = fc->dev->bdev;
-	bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector);
-
-	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
 {
 	struct flakey_c *fc = ti->private;
@@ -419,7 +406,6 @@
 	.end_io = flakey_end_io,
 	.status = flakey_status,
 	.ioctl	= flakey_ioctl,
-	.merge	= flakey_merge,
 	.iterate_devices = flakey_iterate_devices,
 };
 
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 74adcd2..6f8e83b2 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -134,12 +134,13 @@
 		complete_io(io);
 }
 
-static void endio(struct bio *bio, int error)
+static void endio(struct bio *bio)
 {
 	struct io *io;
 	unsigned region;
+	int error;
 
-	if (error && bio_data_dir(bio) == READ)
+	if (bio->bi_error && bio_data_dir(bio) == READ)
 		zero_fill_bio(bio);
 
 	/*
@@ -147,6 +148,7 @@
 	 */
 	retrieve_io_and_region_from_bio(bio, &io, &region);
 
+	error = bio->bi_error;
 	bio_put(bio);
 
 	dec_count(io, region, error);
@@ -314,7 +316,7 @@
 		if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
 			num_bvecs = 1;
 		else
-			num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
+			num_bvecs = min_t(int, BIO_MAX_PAGES,
 					  dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
 
 		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 720ceeb..80a4395 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1919,9 +1919,7 @@
 
 void dm_interface_exit(void)
 {
-	if (misc_deregister(&_dm_misc) < 0)
-		DMERR("misc_deregister failed for control device");
-
+	misc_deregister(&_dm_misc);
 	dm_hash_exit();
 }
 
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 53e848c..436f5c9 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -30,6 +30,7 @@
 	struct linear_c *lc;
 	unsigned long long tmp;
 	char dummy;
+	int ret;
 
 	if (argc != 2) {
 		ti->error = "Invalid argument count";
@@ -42,13 +43,15 @@
 		return -ENOMEM;
 	}
 
+	ret = -EINVAL;
 	if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
 		ti->error = "dm-linear: Invalid device sector";
 		goto bad;
 	}
 	lc->start = tmp;
 
-	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev)) {
+	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
+	if (ret) {
 		ti->error = "dm-linear: Device lookup failed";
 		goto bad;
 	}
@@ -61,7 +64,7 @@
 
       bad:
 	kfree(lc);
-	return -EINVAL;
+	return ret;
 }
 
 static void linear_dtr(struct dm_target *ti)
@@ -130,21 +133,6 @@
 	return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
 }
 
-static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-			struct bio_vec *biovec, int max_size)
-{
-	struct linear_c *lc = ti->private;
-	struct request_queue *q = bdev_get_queue(lc->dev->bdev);
-
-	if (!q->merge_bvec_fn)
-		return max_size;
-
-	bvm->bi_bdev = lc->dev->bdev;
-	bvm->bi_sector = linear_map_sector(ti, bvm->bi_sector);
-
-	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int linear_iterate_devices(struct dm_target *ti,
 				  iterate_devices_callout_fn fn, void *data)
 {
@@ -162,7 +150,6 @@
 	.map    = linear_map,
 	.status = linear_status,
 	.ioctl  = linear_ioctl,
-	.merge  = linear_merge,
 	.iterate_devices = linear_iterate_devices,
 };
 
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index ad1b049..b2912db 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -146,16 +146,16 @@
 	}
 }
 
-static void log_end_io(struct bio *bio, int err)
+static void log_end_io(struct bio *bio)
 {
 	struct log_writes_c *lc = bio->bi_private;
 	struct bio_vec *bvec;
 	int i;
 
-	if (err) {
+	if (bio->bi_error) {
 		unsigned long flags;
 
-		DMERR("Error writing log block, error=%d", err);
+		DMERR("Error writing log block, error=%d", bio->bi_error);
 		spin_lock_irqsave(&lc->blocks_lock, flags);
 		lc->logging_enabled = false;
 		spin_unlock_irqrestore(&lc->blocks_lock, flags);
@@ -205,7 +205,6 @@
 	bio->bi_bdev = lc->logdev->bdev;
 	bio->bi_end_io = log_end_io;
 	bio->bi_private = lc;
-	set_bit(BIO_UPTODATE, &bio->bi_flags);
 
 	page = alloc_page(GFP_KERNEL);
 	if (!page) {
@@ -270,7 +269,6 @@
 	bio->bi_bdev = lc->logdev->bdev;
 	bio->bi_end_io = log_end_io;
 	bio->bi_private = lc;
-	set_bit(BIO_UPTODATE, &bio->bi_flags);
 
 	for (i = 0; i < block->vec_cnt; i++) {
 		/*
@@ -292,7 +290,6 @@
 			bio->bi_bdev = lc->logdev->bdev;
 			bio->bi_end_io = log_end_io;
 			bio->bi_private = lc;
-			set_bit(BIO_UPTODATE, &bio->bi_flags);
 
 			ret = bio_add_page(bio, block->vecs[i].bv_page,
 					   block->vecs[i].bv_len, 0);
@@ -420,6 +417,7 @@
 	struct log_writes_c *lc;
 	struct dm_arg_set as;
 	const char *devname, *logdevname;
+	int ret;
 
 	as.argc = argc;
 	as.argv = argv;
@@ -443,18 +441,22 @@
 	atomic_set(&lc->pending_blocks, 0);
 
 	devname = dm_shift_arg(&as);
-	if (dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev)) {
+	ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev);
+	if (ret) {
 		ti->error = "Device lookup failed";
 		goto bad;
 	}
 
 	logdevname = dm_shift_arg(&as);
-	if (dm_get_device(ti, logdevname, dm_table_get_mode(ti->table), &lc->logdev)) {
+	ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table),
+			    &lc->logdev);
+	if (ret) {
 		ti->error = "Log device lookup failed";
 		dm_put_device(ti, lc->dev);
 		goto bad;
 	}
 
+	ret = -EINVAL;
 	lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
 	if (!lc->log_kthread) {
 		ti->error = "Couldn't alloc kthread";
@@ -479,7 +481,7 @@
 
 bad:
 	kfree(lc);
-	return -EINVAL;
+	return ret;
 }
 
 static int log_mark(struct log_writes_c *lc, char *data)
@@ -606,7 +608,7 @@
 		WARN_ON(flush_bio || fua_bio);
 		if (lc->device_supports_discard)
 			goto map_bio;
-		bio_endio(bio, 0);
+		bio_endio(bio);
 		return DM_MAPIO_SUBMITTED;
 	}
 
@@ -728,21 +730,6 @@
 	return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
 }
 
-static int log_writes_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-			    struct bio_vec *biovec, int max_size)
-{
-	struct log_writes_c *lc = ti->private;
-	struct request_queue *q = bdev_get_queue(lc->dev->bdev);
-
-	if (!q->merge_bvec_fn)
-		return max_size;
-
-	bvm->bi_bdev = lc->dev->bdev;
-	bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
-
-	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int log_writes_iterate_devices(struct dm_target *ti,
 				      iterate_devices_callout_fn fn,
 				      void *data)
@@ -796,7 +783,6 @@
 	.end_io = normal_end_io,
 	.status = log_writes_status,
 	.ioctl	= log_writes_ioctl,
-	.merge	= log_writes_merge,
 	.message = log_writes_message,
 	.iterate_devices = log_writes_iterate_devices,
 	.io_hints = log_writes_io_hints,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 2daa677..97e1651 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1717,24 +1717,6 @@
 	mddev_resume(&rs->md);
 }
 
-static int raid_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-		      struct bio_vec *biovec, int max_size)
-{
-	struct raid_set *rs = ti->private;
-	struct md_personality *pers = rs->md.pers;
-
-	if (pers && pers->mergeable_bvec)
-		return min(max_size, pers->mergeable_bvec(&rs->md, bvm, biovec));
-
-	/*
-	 * In case we can't request the personality because
-	 * the raid set is not running yet
-	 *
-	 * -> return safe minimum
-	 */
-	return rs->md.chunk_sectors;
-}
-
 static struct target_type raid_target = {
 	.name = "raid",
 	.version = {1, 7, 0},
@@ -1749,7 +1731,6 @@
 	.presuspend = raid_presuspend,
 	.postsuspend = raid_postsuspend,
 	.resume = raid_resume,
-	.merge = raid_merge,
 };
 
 static int __init dm_raid_init(void)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index d83696b..f2a363a 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -490,9 +490,11 @@
 		 * If device is suspended, complete the bio.
 		 */
 		if (dm_noflush_suspending(ms->ti))
-			bio_endio(bio, DM_ENDIO_REQUEUE);
+			bio->bi_error = DM_ENDIO_REQUEUE;
 		else
-			bio_endio(bio, -EIO);
+			bio->bi_error = -EIO;
+
+		bio_endio(bio);
 		return;
 	}
 
@@ -515,7 +517,7 @@
 	bio_set_m(bio, NULL);
 
 	if (likely(!error)) {
-		bio_endio(bio, 0);
+		bio_endio(bio);
 		return;
 	}
 
@@ -531,7 +533,7 @@
 
 	DMERR_LIMIT("Read failure on mirror device %s.  Failing I/O.",
 		    m->dev->name);
-	bio_endio(bio, -EIO);
+	bio_io_error(bio);
 }
 
 /* Asynchronous read. */
@@ -580,7 +582,7 @@
 		if (likely(m))
 			read_async_bio(m, bio);
 		else
-			bio_endio(bio, -EIO);
+			bio_io_error(bio);
 	}
 }
 
@@ -598,7 +600,7 @@
 
 static void write_callback(unsigned long error, void *context)
 {
-	unsigned i, ret = 0;
+	unsigned i;
 	struct bio *bio = (struct bio *) context;
 	struct mirror_set *ms;
 	int should_wake = 0;
@@ -614,7 +616,7 @@
 	 * regions with the same code.
 	 */
 	if (likely(!error)) {
-		bio_endio(bio, ret);
+		bio_endio(bio);
 		return;
 	}
 
@@ -623,7 +625,8 @@
 	 * degrade the array.
 	 */
 	if (bio->bi_rw & REQ_DISCARD) {
-		bio_endio(bio, -EOPNOTSUPP);
+		bio->bi_error = -EOPNOTSUPP;
+		bio_endio(bio);
 		return;
 	}
 
@@ -828,13 +831,12 @@
 		 * be wrong if the failed leg returned after reboot and
 		 * got replicated back to the good legs.)
 		 */
-
 		if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
-			bio_endio(bio, -EIO);
+			bio_io_error(bio);
 		else if (errors_handled(ms) && !keep_log(ms))
 			hold_bio(ms, bio);
 		else
-			bio_endio(bio, 0);
+			bio_endio(bio);
 	}
 }
 
@@ -943,16 +945,18 @@
 {
 	unsigned long long offset;
 	char dummy;
+	int ret;
 
 	if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
 		ti->error = "Invalid offset";
 		return -EINVAL;
 	}
 
-	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
-			  &ms->mirror[mirror].dev)) {
+	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+			    &ms->mirror[mirror].dev);
+	if (ret) {
 		ti->error = "Device lookup failure";
-		return -ENXIO;
+		return ret;
 	}
 
 	ms->mirror[mirror].ms = ms;
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 808b841..bf71583 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -533,7 +533,7 @@
 		chunk = area_location(ps, ps->current_area);
 
 		area = dm_bufio_read(client, chunk, &bp);
-		if (unlikely(IS_ERR(area))) {
+		if (IS_ERR(area)) {
 			r = PTR_ERR(area);
 			goto ret_destroy_bufio;
 		}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 7c82d3c..c0bcd65 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -63,6 +63,13 @@
 	 */
 	int valid;
 
+	/*
+	 * The snapshot overflowed because of a write to the snapshot device.
+	 * We don't have to invalidate the snapshot in this case, but we need
+	 * to prevent further writes.
+	 */
+	int snapshot_overflowed;
+
 	/* Origin writes don't trigger exceptions until this is set */
 	int active;
 
@@ -1152,6 +1159,7 @@
 
 	s->ti = ti;
 	s->valid = 1;
+	s->snapshot_overflowed = 0;
 	s->active = 0;
 	atomic_set(&s->pending_exceptions_count, 0);
 	s->exception_start_sequence = 0;
@@ -1301,6 +1309,7 @@
 
 	snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
 	snap_dest->valid = snap_src->valid;
+	snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed;
 
 	/*
 	 * Set source invalid to ensure it receives no further I/O.
@@ -1490,7 +1499,7 @@
 		error_bios(snapshot_bios);
 	} else {
 		if (full_bio)
-			bio_endio(full_bio, 0);
+			bio_endio(full_bio);
 		flush_bios(snapshot_bios);
 	}
 
@@ -1580,11 +1589,11 @@
 	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
 }
 
-static void full_bio_end_io(struct bio *bio, int error)
+static void full_bio_end_io(struct bio *bio)
 {
 	void *callback_data = bio->bi_private;
 
-	dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
+	dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0);
 }
 
 static void start_full_bio(struct dm_snap_pending_exception *pe,
@@ -1691,7 +1700,7 @@
 	 * to copy an exception */
 	down_write(&s->lock);
 
-	if (!s->valid) {
+	if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
 		r = -EIO;
 		goto out_unlock;
 	}
@@ -1715,7 +1724,7 @@
 			pe = alloc_pending_exception(s);
 			down_write(&s->lock);
 
-			if (!s->valid) {
+			if (!s->valid || s->snapshot_overflowed) {
 				free_pending_exception(pe);
 				r = -EIO;
 				goto out_unlock;
@@ -1730,7 +1739,8 @@
 
 			pe = __find_pending_exception(s, pe, chunk);
 			if (!pe) {
-				__invalidate_snapshot(s, -ENOMEM);
+				s->snapshot_overflowed = 1;
+				DMERR("Snapshot overflowed: Unable to allocate exception.");
 				r = -EIO;
 				goto out_unlock;
 			}
@@ -1990,6 +2000,8 @@
 			DMEMIT("Invalid");
 		else if (snap->merge_failed)
 			DMEMIT("Merge failed");
+		else if (snap->snapshot_overflowed)
+			DMEMIT("Overflow");
 		else {
 			if (snap->store->type->usage) {
 				sector_t total_sectors, sectors_allocated,
@@ -2330,20 +2342,6 @@
 	}
 }
 
-static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-			struct bio_vec *biovec, int max_size)
-{
-	struct dm_origin *o = ti->private;
-	struct request_queue *q = bdev_get_queue(o->dev->bdev);
-
-	if (!q->merge_bvec_fn)
-		return max_size;
-
-	bvm->bi_bdev = o->dev->bdev;
-
-	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int origin_iterate_devices(struct dm_target *ti,
 				  iterate_devices_callout_fn fn, void *data)
 {
@@ -2362,13 +2360,12 @@
 	.resume  = origin_resume,
 	.postsuspend = origin_postsuspend,
 	.status  = origin_status,
-	.merge	 = origin_merge,
 	.iterate_devices = origin_iterate_devices,
 };
 
 static struct target_type snapshot_target = {
 	.name    = "snapshot",
-	.version = {1, 13, 0},
+	.version = {1, 14, 0},
 	.module  = THIS_MODULE,
 	.ctr     = snapshot_ctr,
 	.dtr     = snapshot_dtr,
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 8a8b48f..8289804 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -457,12 +457,24 @@
 	list_for_each_entry(s, &stats->list, list_entry) {
 		if (!program || !strcmp(program, s->program_id)) {
 			len = s->end - s->start;
-			DMEMIT("%d: %llu+%llu %llu %s %s\n", s->id,
+			DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
 				(unsigned long long)s->start,
 				(unsigned long long)len,
 				(unsigned long long)s->step,
 				s->program_id,
 				s->aux_data);
+			if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
+				DMEMIT(" precise_timestamps");
+			if (s->n_histogram_entries) {
+				unsigned i;
+				DMEMIT(" histogram:");
+				for (i = 0; i < s->n_histogram_entries; i++) {
+					if (i)
+						DMEMIT(",");
+					DMEMIT("%llu", s->histogram_boundaries[i]);
+				}
+			}
+			DMEMIT("\n");
 		}
 	}
 	mutex_unlock(&stats->mutex);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index a672a15..797ddb9 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -75,13 +75,15 @@
 {
 	unsigned long long start;
 	char dummy;
+	int ret;
 
 	if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1)
 		return -EINVAL;
 
-	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
-			  &sc->stripe[stripe].dev))
-		return -ENXIO;
+	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+			    &sc->stripe[stripe].dev);
+	if (ret)
+		return ret;
 
 	sc->stripe[stripe].physical_start = start;
 
@@ -273,7 +275,7 @@
 		return DM_MAPIO_REMAPPED;
 	} else {
 		/* The range doesn't map to the target stripe */
-		bio_endio(bio, 0);
+		bio_endio(bio);
 		return DM_MAPIO_SUBMITTED;
 	}
 }
@@ -412,26 +414,6 @@
 	blk_limits_io_opt(limits, chunk_size * sc->stripes);
 }
 
-static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-			struct bio_vec *biovec, int max_size)
-{
-	struct stripe_c *sc = ti->private;
-	sector_t bvm_sector = bvm->bi_sector;
-	uint32_t stripe;
-	struct request_queue *q;
-
-	stripe_map_sector(sc, bvm_sector, &stripe, &bvm_sector);
-
-	q = bdev_get_queue(sc->stripe[stripe].dev->bdev);
-	if (!q->merge_bvec_fn)
-		return max_size;
-
-	bvm->bi_bdev = sc->stripe[stripe].dev->bdev;
-	bvm->bi_sector = sc->stripe[stripe].physical_start + bvm_sector;
-
-	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static struct target_type stripe_target = {
 	.name   = "striped",
 	.version = {1, 5, 1},
@@ -443,7 +425,6 @@
 	.status = stripe_status,
 	.iterate_devices = stripe_iterate_devices,
 	.io_hints = stripe_io_hints,
-	.merge  = stripe_merge,
 };
 
 int __init dm_stripe_init(void)
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 16ba55a..e76ed00 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -440,14 +440,6 @@
 		       q->limits.alignment_offset,
 		       (unsigned long long) start << SECTOR_SHIFT);
 
-	/*
-	 * Check if merge fn is supported.
-	 * If not we'll force DM to use PAGE_SIZE or
-	 * smaller I/O, just to be safe.
-	 */
-	if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
-		blk_limits_max_hw_sectors(limits,
-					  (unsigned int) (PAGE_SIZE >> 9));
 	return 0;
 }
 
@@ -1388,14 +1380,6 @@
 	return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
 }
 
-static int queue_supports_sg_gaps(struct dm_target *ti, struct dm_dev *dev,
-				  sector_t start, sector_t len, void *data)
-{
-	struct request_queue *q = bdev_get_queue(dev->bdev);
-
-	return q && !test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags);
-}
-
 static bool dm_table_all_devices_attribute(struct dm_table *t,
 					   iterate_devices_callout_fn func)
 {
@@ -1516,11 +1500,6 @@
 	else
 		queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
 
-	if (dm_table_all_devices_attribute(t, queue_supports_sg_gaps))
-		queue_flag_clear_unlocked(QUEUE_FLAG_SG_GAPS, q);
-	else
-		queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, q);
-
 	dm_table_set_integrity(t);
 
 	/*
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index d2bbe8c..6578b7b 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -332,9 +332,6 @@
  *
  * Description:
  *    Asynchronously issue a discard request for the sectors in question.
- *    NOTE: this variant of blk-core's blkdev_issue_discard() is a stop-gap
- *    that is being kept local to DM thinp until the block changes to allow
- *    late bio splitting land upstream.
  */
 static int __blkdev_issue_discard_async(struct block_device *bdev, sector_t sector,
 					sector_t nr_sects, gfp_t gfp_mask, unsigned long flags,
@@ -342,91 +339,36 @@
 {
 	struct request_queue *q = bdev_get_queue(bdev);
 	int type = REQ_WRITE | REQ_DISCARD;
-	unsigned int max_discard_sectors, granularity;
-	int alignment;
 	struct bio *bio;
-	int ret = 0;
-	struct blk_plug plug;
 
-	if (!q)
+	if (!q || !nr_sects)
 		return -ENXIO;
 
 	if (!blk_queue_discard(q))
 		return -EOPNOTSUPP;
 
-	/* Zero-sector (unknown) and one-sector granularities are the same.  */
-	granularity = max(q->limits.discard_granularity >> 9, 1U);
-	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
-
-	/*
-	 * Ensure that max_discard_sectors is of the proper
-	 * granularity, so that requests stay aligned after a split.
-	 */
-	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-	max_discard_sectors -= max_discard_sectors % granularity;
-	if (unlikely(!max_discard_sectors)) {
-		/* Avoid infinite loop below. Being cautious never hurts. */
-		return -EOPNOTSUPP;
-	}
-
 	if (flags & BLKDEV_DISCARD_SECURE) {
 		if (!blk_queue_secdiscard(q))
 			return -EOPNOTSUPP;
 		type |= REQ_SECURE;
 	}
 
-	blk_start_plug(&plug);
-	while (nr_sects) {
-		unsigned int req_sects;
-		sector_t end_sect, tmp;
+	/*
+	 * Required bio_put occurs in bio_endio thanks to bio_chain below
+	 */
+	bio = bio_alloc(gfp_mask, 1);
+	if (!bio)
+		return -ENOMEM;
 
-		/*
-		 * Required bio_put occurs in bio_endio thanks to bio_chain below
-		 */
-		bio = bio_alloc(gfp_mask, 1);
-		if (!bio) {
-			ret = -ENOMEM;
-			break;
-		}
+	bio_chain(bio, parent_bio);
 
-		req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
+	bio->bi_iter.bi_sector = sector;
+	bio->bi_bdev = bdev;
+	bio->bi_iter.bi_size = nr_sects << 9;
 
-		/*
-		 * If splitting a request, and the next starting sector would be
-		 * misaligned, stop the discard at the previous aligned sector.
-		 */
-		end_sect = sector + req_sects;
-		tmp = end_sect;
-		if (req_sects < nr_sects &&
-		    sector_div(tmp, granularity) != alignment) {
-			end_sect = end_sect - alignment;
-			sector_div(end_sect, granularity);
-			end_sect = end_sect * granularity + alignment;
-			req_sects = end_sect - sector;
-		}
+	submit_bio(type, bio);
 
-		bio_chain(bio, parent_bio);
-
-		bio->bi_iter.bi_sector = sector;
-		bio->bi_bdev = bdev;
-
-		bio->bi_iter.bi_size = req_sects << 9;
-		nr_sects -= req_sects;
-		sector = end_sect;
-
-		submit_bio(type, bio);
-
-		/*
-		 * We can loop for a long time in here, if someone does
-		 * full device discards (like mkfs). Be nice and allow
-		 * us to schedule out to avoid softlocking if preempt
-		 * is disabled.
-		 */
-		cond_resched();
-	}
-	blk_finish_plug(&plug);
-
-	return ret;
+	return 0;
 }
 
 static bool block_size_is_power_of_two(struct pool *pool)
@@ -615,8 +557,10 @@
 {
 	struct bio *bio;
 
-	while ((bio = bio_list_pop(bios)))
-		bio_endio(bio, error);
+	while ((bio = bio_list_pop(bios))) {
+		bio->bi_error = error;
+		bio_endio(bio);
+	}
 }
 
 static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
@@ -870,14 +814,14 @@
 	complete_mapping_preparation(m);
 }
 
-static void overwrite_endio(struct bio *bio, int err)
+static void overwrite_endio(struct bio *bio)
 {
 	struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 	struct dm_thin_new_mapping *m = h->overwrite_mapping;
 
 	bio->bi_end_io = m->saved_bi_end_io;
 
-	m->err = err;
+	m->err = bio->bi_error;
 	complete_mapping_preparation(m);
 }
 
@@ -1002,7 +946,7 @@
 	 */
 	if (bio) {
 		inc_remap_and_issue_cell(tc, m->cell, m->data_block);
-		bio_endio(bio, 0);
+		bio_endio(bio);
 	} else {
 		inc_all_io_entry(tc->pool, m->cell->holder);
 		remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -1032,7 +976,7 @@
 
 static void process_prepared_discard_success(struct dm_thin_new_mapping *m)
 {
-	bio_endio(m->bio, 0);
+	bio_endio(m->bio);
 	free_discard_mapping(m);
 }
 
@@ -1046,7 +990,7 @@
 		metadata_operation_failed(tc->pool, "dm_thin_remove_range", r);
 		bio_io_error(m->bio);
 	} else
-		bio_endio(m->bio, 0);
+		bio_endio(m->bio);
 
 	cell_defer_no_holder(tc, m->cell);
 	mempool_free(m, tc->pool->mapping_pool);
@@ -1117,7 +1061,8 @@
 	 * Even if r is set, there could be sub discards in flight that we
 	 * need to wait for.
 	 */
-	bio_endio(m->bio, r);
+	m->bio->bi_error = r;
+	bio_endio(m->bio);
 	cell_defer_no_holder(tc, m->cell);
 	mempool_free(m, pool->mapping_pool);
 }
@@ -1493,9 +1438,10 @@
 {
 	int error = should_error_unserviceable_bio(pool);
 
-	if (error)
-		bio_endio(bio, error);
-	else
+	if (error) {
+		bio->bi_error = error;
+		bio_endio(bio);
+	} else
 		retry_on_resume(bio);
 }
 
@@ -1539,9 +1485,8 @@
 }
 
 /*
- * FIXME: DM local hack to defer parent bios's end_io until we
- * _know_ all chained sub range discard bios have completed.
- * Will go away once late bio splitting lands upstream!
+ * __bio_inc_remaining() is used to defer parent bios's end_io until
+ * we _know_ all chained sub range discard bios have completed.
  */
 static inline void __bio_inc_remaining(struct bio *bio)
 {
@@ -1631,7 +1576,7 @@
 	 * will prevent completion until the sub range discards have
 	 * completed.
 	 */
-	bio_endio(bio, 0);
+	bio_endio(bio);
 }
 
 static void process_discard_bio(struct thin_c *tc, struct bio *bio)
@@ -1645,7 +1590,7 @@
 		/*
 		 * The discard covers less than a block.
 		 */
-		bio_endio(bio, 0);
+		bio_endio(bio);
 		return;
 	}
 
@@ -1790,7 +1735,7 @@
 	if (bio_data_dir(bio) == READ) {
 		zero_fill_bio(bio);
 		cell_defer_no_holder(tc, cell);
-		bio_endio(bio, 0);
+		bio_endio(bio);
 		return;
 	}
 
@@ -1855,7 +1800,7 @@
 
 			} else {
 				zero_fill_bio(bio);
-				bio_endio(bio, 0);
+				bio_endio(bio);
 			}
 		} else
 			provision_block(tc, bio, block, cell);
@@ -1926,7 +1871,7 @@
 		}
 
 		zero_fill_bio(bio);
-		bio_endio(bio, 0);
+		bio_endio(bio);
 		break;
 
 	default:
@@ -1951,7 +1896,7 @@
 
 static void process_bio_success(struct thin_c *tc, struct bio *bio)
 {
-	bio_endio(bio, 0);
+	bio_endio(bio);
 }
 
 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
@@ -2600,7 +2545,8 @@
 	thin_hook_bio(tc, bio);
 
 	if (tc->requeue_mode) {
-		bio_endio(bio, DM_ENDIO_REQUEUE);
+		bio->bi_error = DM_ENDIO_REQUEUE;
+		bio_endio(bio);
 		return DM_MAPIO_SUBMITTED;
 	}
 
@@ -3875,20 +3821,6 @@
 	return fn(ti, pt->data_dev, 0, ti->len, data);
 }
 
-static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-		      struct bio_vec *biovec, int max_size)
-{
-	struct pool_c *pt = ti->private;
-	struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
-
-	if (!q->merge_bvec_fn)
-		return max_size;
-
-	bvm->bi_bdev = pt->data_dev->bdev;
-
-	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
 	struct pool_c *pt = ti->private;
@@ -3965,7 +3897,6 @@
 	.resume = pool_resume,
 	.message = pool_message,
 	.status = pool_status,
-	.merge = pool_merge,
 	.iterate_devices = pool_iterate_devices,
 	.io_hints = pool_io_hints,
 };
@@ -4292,21 +4223,6 @@
 	DMEMIT("Error");
 }
 
-static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-		      struct bio_vec *biovec, int max_size)
-{
-	struct thin_c *tc = ti->private;
-	struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev);
-
-	if (!q->merge_bvec_fn)
-		return max_size;
-
-	bvm->bi_bdev = tc->pool_dev->bdev;
-	bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
-
-	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int thin_iterate_devices(struct dm_target *ti,
 				iterate_devices_callout_fn fn, void *data)
 {
@@ -4350,7 +4266,6 @@
 	.presuspend = thin_presuspend,
 	.postsuspend = thin_postsuspend,
 	.status = thin_status,
-	.merge = thin_merge,
 	.iterate_devices = thin_iterate_devices,
 	.io_hints = thin_io_hints,
 };
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index bb9c6a0..edc624b 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -26,8 +26,6 @@
 #define DM_VERITY_ENV_LENGTH		42
 #define DM_VERITY_ENV_VAR_NAME		"DM_VERITY_ERR_BLOCK_NR"
 
-#define DM_VERITY_IO_VEC_INLINE		16
-#define DM_VERITY_MEMPOOL_SIZE		4
 #define DM_VERITY_DEFAULT_PREFETCH_SIZE	262144
 
 #define DM_VERITY_MAX_LEVELS		63
@@ -76,8 +74,6 @@
 	enum verity_mode mode;	/* mode for handling verification errors */
 	unsigned corrupted_errs;/* Number of errors for corrupted blocks */
 
-	mempool_t *vec_mempool;	/* mempool of bio vector */
-
 	struct workqueue_struct *verify_wq;
 
 	/* starting blocks for each tree level. 0 is the lowest level. */
@@ -271,7 +267,7 @@
 	verity_hash_at_level(v, block, level, &hash_block, &offset);
 
 	data = dm_bufio_read(v->bufio, hash_block, &buf);
-	if (unlikely(IS_ERR(data)))
+	if (IS_ERR(data))
 		return PTR_ERR(data);
 
 	aux = dm_bufio_get_aux_data(buf);
@@ -458,8 +454,9 @@
 
 	bio->bi_end_io = io->orig_bi_end_io;
 	bio->bi_private = io->orig_bi_private;
+	bio->bi_error = error;
 
-	bio_endio(bio, error);
+	bio_endio(bio);
 }
 
 static void verity_work(struct work_struct *w)
@@ -469,12 +466,12 @@
 	verity_finish_io(io, verity_verify_io(io));
 }
 
-static void verity_end_io(struct bio *bio, int error)
+static void verity_end_io(struct bio *bio)
 {
 	struct dm_verity_io *io = bio->bi_private;
 
-	if (error) {
-		verity_finish_io(io, error);
+	if (bio->bi_error) {
+		verity_finish_io(io, bio->bi_error);
 		return;
 	}
 
@@ -648,21 +645,6 @@
 				     cmd, arg);
 }
 
-static int verity_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-			struct bio_vec *biovec, int max_size)
-{
-	struct dm_verity *v = ti->private;
-	struct request_queue *q = bdev_get_queue(v->data_dev->bdev);
-
-	if (!q->merge_bvec_fn)
-		return max_size;
-
-	bvm->bi_bdev = v->data_dev->bdev;
-	bvm->bi_sector = verity_map_sector(v, bvm->bi_sector);
-
-	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int verity_iterate_devices(struct dm_target *ti,
 				  iterate_devices_callout_fn fn, void *data)
 {
@@ -691,9 +673,6 @@
 	if (v->verify_wq)
 		destroy_workqueue(v->verify_wq);
 
-	if (v->vec_mempool)
-		mempool_destroy(v->vec_mempool);
-
 	if (v->bufio)
 		dm_bufio_client_destroy(v->bufio);
 
@@ -962,14 +941,6 @@
 
 	ti->per_bio_data_size = roundup(sizeof(struct dm_verity_io) + v->shash_descsize + v->digest_size * 2, __alignof__(struct dm_verity_io));
 
-	v->vec_mempool = mempool_create_kmalloc_pool(DM_VERITY_MEMPOOL_SIZE,
-					BIO_MAX_PAGES * sizeof(struct bio_vec));
-	if (!v->vec_mempool) {
-		ti->error = "Cannot allocate vector mempool";
-		r = -ENOMEM;
-		goto bad;
-	}
-
 	/* WQ_UNBOUND greatly improves performance when running on ramdisk */
 	v->verify_wq = alloc_workqueue("kverityd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus());
 	if (!v->verify_wq) {
@@ -995,7 +966,6 @@
 	.map		= verity_map,
 	.status		= verity_status,
 	.ioctl		= verity_ioctl,
-	.merge		= verity_merge,
 	.iterate_devices = verity_iterate_devices,
 	.io_hints	= verity_io_hints,
 };
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index b9a64bb..766bc93 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -47,7 +47,7 @@
 		break;
 	}
 
-	bio_endio(bio, 0);
+	bio_endio(bio);
 
 	/* accepted bio, don't make new request */
 	return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0d7ab20..6264781 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -124,9 +124,8 @@
 #define DMF_FREEING 3
 #define DMF_DELETING 4
 #define DMF_NOFLUSH_SUSPENDING 5
-#define DMF_MERGE_IS_OPTIONAL 6
-#define DMF_DEFERRED_REMOVE 7
-#define DMF_SUSPENDED_INTERNALLY 8
+#define DMF_DEFERRED_REMOVE 6
+#define DMF_SUSPENDED_INTERNALLY 7
 
 /*
  * A dummy definition to make RCU happy.
@@ -944,7 +943,8 @@
 		} else {
 			/* done with normal IO or empty flush */
 			trace_block_bio_complete(md->queue, bio, io_error);
-			bio_endio(bio, io_error);
+			bio->bi_error = io_error;
+			bio_endio(bio);
 		}
 	}
 }
@@ -957,17 +957,15 @@
 	limits->max_write_same_sectors = 0;
 }
 
-static void clone_endio(struct bio *bio, int error)
+static void clone_endio(struct bio *bio)
 {
+	int error = bio->bi_error;
 	int r = error;
 	struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
 	struct dm_io *io = tio->io;
 	struct mapped_device *md = tio->io->md;
 	dm_endio_fn endio = tio->ti->type->end_io;
 
-	if (!bio_flagged(bio, BIO_UPTODATE) && !error)
-		error = -EIO;
-
 	if (endio) {
 		r = endio(tio->ti, bio, error);
 		if (r < 0 || r == DM_ENDIO_REQUEUE)
@@ -996,7 +994,7 @@
 /*
  * Partial completion handling for request-based dm
  */
-static void end_clone_bio(struct bio *clone, int error)
+static void end_clone_bio(struct bio *clone)
 {
 	struct dm_rq_clone_bio_info *info =
 		container_of(clone, struct dm_rq_clone_bio_info, clone);
@@ -1013,13 +1011,13 @@
 		 * the remainder.
 		 */
 		return;
-	else if (error) {
+	else if (bio->bi_error) {
 		/*
 		 * Don't notice the error to the upper layer yet.
 		 * The error handling decision is made by the target driver,
 		 * when the request is completed.
 		 */
-		tio->error = error;
+		tio->error = bio->bi_error;
 		return;
 	}
 
@@ -1466,7 +1464,7 @@
 		md = tio->io->md;
 		dec_pending(tio->io, r);
 		free_tio(md, tio);
-	} else if (r) {
+	} else if (r != DM_MAPIO_SUBMITTED) {
 		DMWARN("unimplemented target map return value: %d", r);
 		BUG();
 	}
@@ -1722,60 +1720,6 @@
  * CRUD END
  *---------------------------------------------------------------*/
 
-static int dm_merge_bvec(struct request_queue *q,
-			 struct bvec_merge_data *bvm,
-			 struct bio_vec *biovec)
-{
-	struct mapped_device *md = q->queuedata;
-	struct dm_table *map = dm_get_live_table_fast(md);
-	struct dm_target *ti;
-	sector_t max_sectors;
-	int max_size = 0;
-
-	if (unlikely(!map))
-		goto out;
-
-	ti = dm_table_find_target(map, bvm->bi_sector);
-	if (!dm_target_is_valid(ti))
-		goto out;
-
-	/*
-	 * Find maximum amount of I/O that won't need splitting
-	 */
-	max_sectors = min(max_io_len(bvm->bi_sector, ti),
-			  (sector_t) BIO_MAX_SECTORS);
-	max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
-	if (max_size < 0)
-		max_size = 0;
-
-	/*
-	 * merge_bvec_fn() returns number of bytes
-	 * it can accept at this offset
-	 * max is precomputed maximal io size
-	 */
-	if (max_size && ti->type->merge)
-		max_size = ti->type->merge(ti, bvm, biovec, max_size);
-	/*
-	 * If the target doesn't support merge method and some of the devices
-	 * provided their merge_bvec method (we know this by looking at
-	 * queue_max_hw_sectors), then we can't allow bios with multiple vector
-	 * entries.  So always set max_size to 0, and the code below allows
-	 * just one page.
-	 */
-	else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
-		max_size = 0;
-
-out:
-	dm_put_live_table_fast(md);
-	/*
-	 * Always allow an entire first page
-	 */
-	if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
-		max_size = biovec->bv_len;
-
-	return max_size;
-}
-
 /*
  * The request function that just remaps the bio built up by
  * dm_merge_bvec.
@@ -1789,6 +1733,8 @@
 
 	map = dm_get_live_table(md, &srcu_idx);
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	generic_start_io_acct(rw, bio_sectors(bio), &dm_disk(md)->part0);
 
 	/* if we're suspended, we have to queue this io for later */
@@ -2496,59 +2442,6 @@
 }
 
 /*
- * Return 1 if the queue has a compulsory merge_bvec_fn function.
- *
- * If this function returns 0, then the device is either a non-dm
- * device without a merge_bvec_fn, or it is a dm device that is
- * able to split any bios it receives that are too big.
- */
-int dm_queue_merge_is_compulsory(struct request_queue *q)
-{
-	struct mapped_device *dev_md;
-
-	if (!q->merge_bvec_fn)
-		return 0;
-
-	if (q->make_request_fn == dm_make_request) {
-		dev_md = q->queuedata;
-		if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
-			return 0;
-	}
-
-	return 1;
-}
-
-static int dm_device_merge_is_compulsory(struct dm_target *ti,
-					 struct dm_dev *dev, sector_t start,
-					 sector_t len, void *data)
-{
-	struct block_device *bdev = dev->bdev;
-	struct request_queue *q = bdev_get_queue(bdev);
-
-	return dm_queue_merge_is_compulsory(q);
-}
-
-/*
- * Return 1 if it is acceptable to ignore merge_bvec_fn based
- * on the properties of the underlying devices.
- */
-static int dm_table_merge_is_optional(struct dm_table *table)
-{
-	unsigned i = 0;
-	struct dm_target *ti;
-
-	while (i < dm_table_get_num_targets(table)) {
-		ti = dm_table_get_target(table, i++);
-
-		if (ti->type->iterate_devices &&
-		    ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
-			return 0;
-	}
-
-	return 1;
-}
-
-/*
  * Returns old map, which caller must destroy.
  */
 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
@@ -2557,7 +2450,6 @@
 	struct dm_table *old_map;
 	struct request_queue *q = md->queue;
 	sector_t size;
-	int merge_is_optional;
 
 	size = dm_table_get_size(t);
 
@@ -2583,17 +2475,11 @@
 
 	__bind_mempools(md, t);
 
-	merge_is_optional = dm_table_merge_is_optional(t);
-
 	old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
 	rcu_assign_pointer(md->map, t);
 	md->immutable_target_type = dm_table_get_immutable_target_type(t);
 
 	dm_table_set_restrictions(t, q, limits);
-	if (merge_is_optional)
-		set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
-	else
-		clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
 	if (old_map)
 		dm_sync_table(md);
 
@@ -2874,7 +2760,6 @@
 	case DM_TYPE_BIO_BASED:
 		dm_init_old_md_queue(md);
 		blk_queue_make_request(md->queue, dm_make_request);
-		blk_queue_merge_bvec(md->queue, dm_merge_bvec);
 		break;
 	}
 
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 4e98499..7edcf97 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -78,8 +78,6 @@
 void dm_table_free_md_mempools(struct dm_table *t);
 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
 
-int dm_queue_merge_is_compulsory(struct request_queue *q);
-
 void dm_lock_md_type(struct mapped_device *md);
 void dm_unlock_md_type(struct mapped_device *md);
 void dm_set_md_type(struct mapped_device *md, unsigned type);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 1277eb2..4a8e150 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -70,7 +70,7 @@
 #include <linux/seq_file.h>
 
 
-static void faulty_fail(struct bio *bio, int error)
+static void faulty_fail(struct bio *bio)
 {
 	struct bio *b = bio->bi_private;
 
@@ -181,7 +181,7 @@
 			/* special case - don't decrement, don't generic_make_request,
 			 * just fail immediately
 			 */
-			bio_endio(bio, -EIO);
+			bio_io_error(bio);
 			return;
 		}
 
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index fa7d577..b7fe7e9 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -52,48 +52,6 @@
 	return conf->disks + lo;
 }
 
-/**
- *	linear_mergeable_bvec -- tell bio layer if two requests can be merged
- *	@q: request queue
- *	@bvm: properties of new bio
- *	@biovec: the request that could be merged to it.
- *
- *	Return amount of bytes we can take at this offset
- */
-static int linear_mergeable_bvec(struct mddev *mddev,
-				 struct bvec_merge_data *bvm,
-				 struct bio_vec *biovec)
-{
-	struct dev_info *dev0;
-	unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
-	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
-	int maxbytes = biovec->bv_len;
-	struct request_queue *subq;
-
-	dev0 = which_dev(mddev, sector);
-	maxsectors = dev0->end_sector - sector;
-	subq = bdev_get_queue(dev0->rdev->bdev);
-	if (subq->merge_bvec_fn) {
-		bvm->bi_bdev = dev0->rdev->bdev;
-		bvm->bi_sector -= dev0->end_sector - dev0->rdev->sectors;
-		maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm,
-							     biovec));
-	}
-
-	if (maxsectors < bio_sectors)
-		maxsectors = 0;
-	else
-		maxsectors -= bio_sectors;
-
-	if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0)
-		return maxbytes;
-
-	if (maxsectors > (maxbytes >> 9))
-		return maxbytes;
-	else
-		return maxsectors << 9;
-}
-
 static int linear_congested(struct mddev *mddev, int bits)
 {
 	struct linear_conf *conf;
@@ -297,7 +255,7 @@
 		if (unlikely((split->bi_rw & REQ_DISCARD) &&
 			 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
 			/* Just ignore it */
-			bio_endio(split, 0);
+			bio_endio(split);
 		} else
 			generic_make_request(split);
 	} while (split != bio);
@@ -338,7 +296,6 @@
 	.size		= linear_size,
 	.quiesce	= linear_quiesce,
 	.congested	= linear_congested,
-	.mergeable_bvec	= linear_mergeable_bvec,
 };
 
 static int __init linear_init (void)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index e25f00f..4033262 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -257,13 +257,17 @@
 	unsigned int sectors;
 	int cpu;
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	if (mddev == NULL || mddev->pers == NULL
 	    || !mddev->ready) {
 		bio_io_error(bio);
 		return;
 	}
 	if (mddev->ro == 1 && unlikely(rw == WRITE)) {
-		bio_endio(bio, bio_sectors(bio) == 0 ? 0 : -EROFS);
+		if (bio_sectors(bio) != 0)
+			bio->bi_error = -EROFS;
+		bio_endio(bio);
 		return;
 	}
 	smp_rmb(); /* Ensure implications of  'active' are visible */
@@ -350,34 +354,11 @@
 	return mddev_congested(mddev, bits);
 }
 
-static int md_mergeable_bvec(struct request_queue *q,
-			     struct bvec_merge_data *bvm,
-			     struct bio_vec *biovec)
-{
-	struct mddev *mddev = q->queuedata;
-	int ret;
-	rcu_read_lock();
-	if (mddev->suspended) {
-		/* Must always allow one vec */
-		if (bvm->bi_size == 0)
-			ret = biovec->bv_len;
-		else
-			ret = 0;
-	} else {
-		struct md_personality *pers = mddev->pers;
-		if (pers && pers->mergeable_bvec)
-			ret = pers->mergeable_bvec(mddev, bvm, biovec);
-		else
-			ret = biovec->bv_len;
-	}
-	rcu_read_unlock();
-	return ret;
-}
 /*
  * Generic flush handling for md
  */
 
-static void md_end_flush(struct bio *bio, int err)
+static void md_end_flush(struct bio *bio)
 {
 	struct md_rdev *rdev = bio->bi_private;
 	struct mddev *mddev = rdev->mddev;
@@ -433,7 +414,7 @@
 
 	if (bio->bi_iter.bi_size == 0)
 		/* an empty barrier - all done */
-		bio_endio(bio, 0);
+		bio_endio(bio);
 	else {
 		bio->bi_rw &= ~REQ_FLUSH;
 		mddev->pers->make_request(mddev, bio);
@@ -728,15 +709,13 @@
 }
 EXPORT_SYMBOL_GPL(md_rdev_clear);
 
-static void super_written(struct bio *bio, int error)
+static void super_written(struct bio *bio)
 {
 	struct md_rdev *rdev = bio->bi_private;
 	struct mddev *mddev = rdev->mddev;
 
-	if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
-		printk("md: super_written gets error=%d, uptodate=%d\n",
-		       error, test_bit(BIO_UPTODATE, &bio->bi_flags));
-		WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags));
+	if (bio->bi_error) {
+		printk("md: super_written gets error=%d\n", bio->bi_error);
 		md_error(mddev, rdev);
 	}
 
@@ -791,7 +770,7 @@
 	bio_add_page(bio, page, size, 0);
 	submit_bio_wait(rw, bio);
 
-	ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
+	ret = !bio->bi_error;
 	bio_put(bio);
 	return ret;
 }
@@ -5186,7 +5165,6 @@
 	if (mddev->queue) {
 		mddev->queue->backing_dev_info.congested_data = mddev;
 		mddev->queue->backing_dev_info.congested_fn = md_congested;
-		blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec);
 	}
 	if (pers->sync_request) {
 		if (mddev->kobj.sd &&
@@ -5315,7 +5293,6 @@
 	mddev->degraded = 0;
 	mddev->safemode = 0;
 	mddev->private = NULL;
-	mddev->merge_check_needed = 0;
 	mddev->bitmap_info.offset = 0;
 	mddev->bitmap_info.default_offset = 0;
 	mddev->bitmap_info.default_space = 0;
@@ -5514,7 +5491,6 @@
 
 		__md_stop_writes(mddev);
 		__md_stop(mddev);
-		mddev->queue->merge_bvec_fn = NULL;
 		mddev->queue->backing_dev_info.congested_fn = NULL;
 
 		/* tell userspace to handle 'inactive' */
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7da6e9c..ab33957 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -134,10 +134,6 @@
 	Bitmap_sync,		/* ..actually, not quite In_sync.  Need a
 				 * bitmap-based recovery to get fully in sync
 				 */
-	Unmerged,		/* device is being added to array and should
-				 * be considerred for bvec_merge_fn but not
-				 * yet for actual IO
-				 */
 	WriteMostly,		/* Avoid reading if at all possible */
 	AutoDetected,		/* added by auto-detect */
 	Blocked,		/* An error occurred but has not yet
@@ -374,10 +370,6 @@
 	int				degraded;	/* whether md should consider
 							 * adding a spare
 							 */
-	int				merge_check_needed; /* at least one
-							     * member device
-							     * has a
-							     * merge_bvec_fn */
 
 	atomic_t			recovery_active; /* blocks scheduled, but not written */
 	wait_queue_head_t		recovery_wait;
@@ -532,10 +524,6 @@
 	/* congested implements bdi.congested_fn().
 	 * Will not be called while array is 'suspended' */
 	int (*congested)(struct mddev *mddev, int bits);
-	/* mergeable_bvec is use to implement ->merge_bvec_fn */
-	int (*mergeable_bvec)(struct mddev *mddev,
-			      struct bvec_merge_data *bvm,
-			      struct bio_vec *biovec);
 };
 
 struct md_sysfs_entry {
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index ac3ede2..d222522 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -77,18 +77,18 @@
 	struct bio *bio = mp_bh->master_bio;
 	struct mpconf *conf = mp_bh->mddev->private;
 
-	bio_endio(bio, err);
+	bio->bi_error = err;
+	bio_endio(bio);
 	mempool_free(mp_bh, conf->pool);
 }
 
-static void multipath_end_request(struct bio *bio, int error)
+static void multipath_end_request(struct bio *bio)
 {
-	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct multipath_bh *mp_bh = bio->bi_private;
 	struct mpconf *conf = mp_bh->mddev->private;
 	struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
 
-	if (uptodate)
+	if (!bio->bi_error)
 		multipath_end_bh_io(mp_bh, 0);
 	else if (!(bio->bi_rw & REQ_RAHEAD)) {
 		/*
@@ -101,7 +101,7 @@
 		       (unsigned long long)bio->bi_iter.bi_sector);
 		multipath_reschedule_retry(mp_bh);
 	} else
-		multipath_end_bh_io(mp_bh, error);
+		multipath_end_bh_io(mp_bh, bio->bi_error);
 	rdev_dec_pending(rdev, conf->mddev);
 }
 
@@ -123,7 +123,7 @@
 
 	mp_bh->path = multipath_map(conf);
 	if (mp_bh->path < 0) {
-		bio_endio(bio, -EIO);
+		bio_io_error(bio);
 		mempool_free(mp_bh, conf->pool);
 		return;
 	}
@@ -257,18 +257,6 @@
 			disk_stack_limits(mddev->gendisk, rdev->bdev,
 					  rdev->data_offset << 9);
 
-		/* as we don't honour merge_bvec_fn, we must never risk
-		 * violating it, so limit ->max_segments to one, lying
-		 * within a single page.
-		 * (Note: it is very unlikely that a device with
-		 * merge_bvec_fn will be involved in multipath.)
-		 */
-			if (q->merge_bvec_fn) {
-				blk_queue_max_segments(mddev->queue, 1);
-				blk_queue_segment_boundary(mddev->queue,
-							   PAGE_CACHE_SIZE - 1);
-			}
-
 			spin_lock_irq(&conf->device_lock);
 			mddev->degraded--;
 			rdev->raid_disk = path;
@@ -432,15 +420,6 @@
 		disk_stack_limits(mddev->gendisk, rdev->bdev,
 				  rdev->data_offset << 9);
 
-		/* as we don't honour merge_bvec_fn, we must never risk
-		 * violating it, not that we ever expect a device with
-		 * a merge_bvec_fn to be involved in multipath */
-		if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
-			blk_queue_max_segments(mddev->queue, 1);
-			blk_queue_segment_boundary(mddev->queue,
-						   PAGE_CACHE_SIZE - 1);
-		}
-
 		if (!test_bit(Faulty, &rdev->flags))
 			working_disks++;
 	}
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 4d6c9b6..88dbe7b 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -454,7 +454,7 @@
 	int r;
 
 	p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
-	if (unlikely(IS_ERR(p)))
+	if (IS_ERR(p))
 		return PTR_ERR(p);
 
 	aux = dm_bufio_get_aux_data(to_buffer(*result));
@@ -490,7 +490,7 @@
 		return -EPERM;
 
 	p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
-	if (unlikely(IS_ERR(p)))
+	if (IS_ERR(p))
 		return PTR_ERR(p);
 
 	aux = dm_bufio_get_aux_data(to_buffer(*result));
@@ -523,7 +523,7 @@
 	int r;
 
 	p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
-	if (unlikely(IS_ERR(p)))
+	if (IS_ERR(p))
 		return PTR_ERR(p);
 	if (unlikely(!p))
 		return -EWOULDBLOCK;
@@ -559,7 +559,7 @@
 		return -EPERM;
 
 	p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
-	if (unlikely(IS_ERR(p)))
+	if (IS_ERR(p))
 		return PTR_ERR(p);
 
 	memset(p, 0, dm_bm_block_size(bm));
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 4222f77..421a36c 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -409,29 +409,11 @@
 	return 0;
 }
 
-static int get_nr_entries(struct dm_transaction_manager *tm,
-			  dm_block_t b, uint32_t *result)
-{
-	int r;
-	struct dm_block *block;
-	struct btree_node *n;
-
-	r = dm_tm_read_lock(tm, b, &btree_node_validator, &block);
-	if (r)
-		return r;
-
-	n = dm_block_data(block);
-	*result = le32_to_cpu(n->header.nr_entries);
-
-	return dm_tm_unlock(tm, block);
-}
-
 static int rebalance_children(struct shadow_spine *s,
 			      struct dm_btree_info *info,
 			      struct dm_btree_value_type *vt, uint64_t key)
 {
 	int i, r, has_left_sibling, has_right_sibling;
-	uint32_t child_entries;
 	struct btree_node *n;
 
 	n = dm_block_data(shadow_current(s));
@@ -458,10 +440,6 @@
 	if (i < 0)
 		return -ENODATA;
 
-	r = get_nr_entries(info->tm, value64(n, i), &child_entries);
-	if (r)
-		return r;
-
 	has_left_sibling = i > 0;
 	has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
 
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index c7726ce..b6cec25 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -420,8 +420,8 @@
  *
  * Where A* is a shadow of A.
  */
-static int btree_split_sibling(struct shadow_spine *s, dm_block_t root,
-			       unsigned parent_index, uint64_t key)
+static int btree_split_sibling(struct shadow_spine *s, unsigned parent_index,
+			       uint64_t key)
 {
 	int r;
 	size_t size;
@@ -625,7 +625,7 @@
 			if (top)
 				r = btree_split_beneath(s, key);
 			else
-				r = btree_split_sibling(s, root, i, key);
+				r = btree_split_sibling(s, i, key);
 
 			if (r < 0)
 				return r;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index efb654e..59cda50 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -192,9 +192,6 @@
 			disk_stack_limits(mddev->gendisk, rdev1->bdev,
 					  rdev1->data_offset << 9);
 
-		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
-			conf->has_merge_bvec = 1;
-
 		if (!smallest || (rdev1->sectors < smallest->sectors))
 			smallest = rdev1;
 		cnt++;
@@ -351,58 +348,6 @@
 			     + sector_div(sector, zone->nb_dev)];
 }
 
-/**
- *	raid0_mergeable_bvec -- tell bio layer if two requests can be merged
- *	@mddev: the md device
- *	@bvm: properties of new bio
- *	@biovec: the request that could be merged to it.
- *
- *	Return amount of bytes we can accept at this offset
- */
-static int raid0_mergeable_bvec(struct mddev *mddev,
-				struct bvec_merge_data *bvm,
-				struct bio_vec *biovec)
-{
-	struct r0conf *conf = mddev->private;
-	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
-	sector_t sector_offset = sector;
-	int max;
-	unsigned int chunk_sectors = mddev->chunk_sectors;
-	unsigned int bio_sectors = bvm->bi_size >> 9;
-	struct strip_zone *zone;
-	struct md_rdev *rdev;
-	struct request_queue *subq;
-
-	if (is_power_of_2(chunk_sectors))
-		max =  (chunk_sectors - ((sector & (chunk_sectors-1))
-						+ bio_sectors)) << 9;
-	else
-		max =  (chunk_sectors - (sector_div(sector, chunk_sectors)
-						+ bio_sectors)) << 9;
-	if (max < 0)
-		max = 0; /* bio_add cannot handle a negative return */
-	if (max <= biovec->bv_len && bio_sectors == 0)
-		return biovec->bv_len;
-	if (max < biovec->bv_len)
-		/* too small already, no need to check further */
-		return max;
-	if (!conf->has_merge_bvec)
-		return max;
-
-	/* May need to check subordinate device */
-	sector = sector_offset;
-	zone = find_zone(mddev->private, &sector_offset);
-	rdev = map_sector(mddev, zone, sector, &sector_offset);
-	subq = bdev_get_queue(rdev->bdev);
-	if (subq->merge_bvec_fn) {
-		bvm->bi_bdev = rdev->bdev;
-		bvm->bi_sector = sector_offset + zone->dev_start +
-			rdev->data_offset;
-		return min(max, subq->merge_bvec_fn(subq, bvm, biovec));
-	} else
-		return max;
-}
-
 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
 {
 	sector_t array_sectors = 0;
@@ -543,7 +488,7 @@
 		if (unlikely((split->bi_rw & REQ_DISCARD) &&
 			 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
 			/* Just ignore it */
-			bio_endio(split, 0);
+			bio_endio(split);
 		} else
 			generic_make_request(split);
 	} while (split != bio);
@@ -727,7 +672,6 @@
 	.takeover	= raid0_takeover,
 	.quiesce	= raid0_quiesce,
 	.congested	= raid0_congested,
-	.mergeable_bvec	= raid0_mergeable_bvec,
 };
 
 static int __init raid0_init (void)
diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
index 05539d9..7127a62 100644
--- a/drivers/md/raid0.h
+++ b/drivers/md/raid0.h
@@ -12,8 +12,6 @@
 	struct md_rdev		**devlist; /* lists of rdevs, pointed to
 					    * by strip_zone->dev */
 	int			nr_strip_zones;
-	int			has_merge_bvec;	/* at least one member has
-						 * a merge_bvec_fn */
 };
 
 #endif
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 967a4ed..f39d69f 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -255,9 +255,10 @@
 		done = 1;
 
 	if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
-		clear_bit(BIO_UPTODATE, &bio->bi_flags);
+		bio->bi_error = -EIO;
+
 	if (done) {
-		bio_endio(bio, 0);
+		bio_endio(bio);
 		/*
 		 * Wake up any possible resync thread that waits for the device
 		 * to go idle.
@@ -312,9 +313,9 @@
 	return mirror;
 }
 
-static void raid1_end_read_request(struct bio *bio, int error)
+static void raid1_end_read_request(struct bio *bio)
 {
-	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+	int uptodate = !bio->bi_error;
 	struct r1bio *r1_bio = bio->bi_private;
 	int mirror;
 	struct r1conf *conf = r1_bio->mddev->private;
@@ -397,9 +398,8 @@
 	}
 }
 
-static void raid1_end_write_request(struct bio *bio, int error)
+static void raid1_end_write_request(struct bio *bio)
 {
-	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct r1bio *r1_bio = bio->bi_private;
 	int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
 	struct r1conf *conf = r1_bio->mddev->private;
@@ -410,7 +410,7 @@
 	/*
 	 * 'one mirror IO has finished' event handler:
 	 */
-	if (!uptodate) {
+	if (bio->bi_error) {
 		set_bit(WriteErrorSeen,
 			&conf->mirrors[mirror].rdev->flags);
 		if (!test_and_set_bit(WantReplacement,
@@ -557,7 +557,6 @@
 		rdev = rcu_dereference(conf->mirrors[disk].rdev);
 		if (r1_bio->bios[disk] == IO_BLOCKED
 		    || rdev == NULL
-		    || test_bit(Unmerged, &rdev->flags)
 		    || test_bit(Faulty, &rdev->flags))
 			continue;
 		if (!test_bit(In_sync, &rdev->flags) &&
@@ -708,38 +707,6 @@
 	return best_disk;
 }
 
-static int raid1_mergeable_bvec(struct mddev *mddev,
-				struct bvec_merge_data *bvm,
-				struct bio_vec *biovec)
-{
-	struct r1conf *conf = mddev->private;
-	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
-	int max = biovec->bv_len;
-
-	if (mddev->merge_check_needed) {
-		int disk;
-		rcu_read_lock();
-		for (disk = 0; disk < conf->raid_disks * 2; disk++) {
-			struct md_rdev *rdev = rcu_dereference(
-				conf->mirrors[disk].rdev);
-			if (rdev && !test_bit(Faulty, &rdev->flags)) {
-				struct request_queue *q =
-					bdev_get_queue(rdev->bdev);
-				if (q->merge_bvec_fn) {
-					bvm->bi_sector = sector +
-						rdev->data_offset;
-					bvm->bi_bdev = rdev->bdev;
-					max = min(max, q->merge_bvec_fn(
-							  q, bvm, biovec));
-				}
-			}
-		}
-		rcu_read_unlock();
-	}
-	return max;
-
-}
-
 static int raid1_congested(struct mddev *mddev, int bits)
 {
 	struct r1conf *conf = mddev->private;
@@ -793,7 +760,7 @@
 			if (unlikely((bio->bi_rw & REQ_DISCARD) &&
 			    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
 				/* Just ignore it */
-				bio_endio(bio, 0);
+				bio_endio(bio);
 			else
 				generic_make_request(bio);
 			bio = next;
@@ -1068,7 +1035,7 @@
 		if (unlikely((bio->bi_rw & REQ_DISCARD) &&
 		    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
 			/* Just ignore it */
-			bio_endio(bio, 0);
+			bio_endio(bio);
 		else
 			generic_make_request(bio);
 		bio = next;
@@ -1158,7 +1125,7 @@
 	 * non-zero, then it is the number of not-completed requests.
 	 */
 	bio->bi_phys_segments = 0;
-	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+	bio_clear_flag(bio, BIO_SEG_VALID);
 
 	if (rw == READ) {
 		/*
@@ -1269,8 +1236,7 @@
 			break;
 		}
 		r1_bio->bios[i] = NULL;
-		if (!rdev || test_bit(Faulty, &rdev->flags)
-		    || test_bit(Unmerged, &rdev->flags)) {
+		if (!rdev || test_bit(Faulty, &rdev->flags)) {
 			if (i < conf->raid_disks)
 				set_bit(R1BIO_Degraded, &r1_bio->state);
 			continue;
@@ -1617,7 +1583,6 @@
 	struct raid1_info *p;
 	int first = 0;
 	int last = conf->raid_disks - 1;
-	struct request_queue *q = bdev_get_queue(rdev->bdev);
 
 	if (mddev->recovery_disabled == conf->recovery_disabled)
 		return -EBUSY;
@@ -1625,11 +1590,6 @@
 	if (rdev->raid_disk >= 0)
 		first = last = rdev->raid_disk;
 
-	if (q->merge_bvec_fn) {
-		set_bit(Unmerged, &rdev->flags);
-		mddev->merge_check_needed = 1;
-	}
-
 	for (mirror = first; mirror <= last; mirror++) {
 		p = conf->mirrors+mirror;
 		if (!p->rdev) {
@@ -1661,19 +1621,6 @@
 			break;
 		}
 	}
-	if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
-		/* Some requests might not have seen this new
-		 * merge_bvec_fn.  We must wait for them to complete
-		 * before merging the device fully.
-		 * First we make sure any code which has tested
-		 * our function has submitted the request, then
-		 * we wait for all outstanding requests to complete.
-		 */
-		synchronize_sched();
-		freeze_array(conf, 0);
-		unfreeze_array(conf);
-		clear_bit(Unmerged, &rdev->flags);
-	}
 	md_integrity_add_rdev(rdev, mddev);
 	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
@@ -1737,7 +1684,7 @@
 	return err;
 }
 
-static void end_sync_read(struct bio *bio, int error)
+static void end_sync_read(struct bio *bio)
 {
 	struct r1bio *r1_bio = bio->bi_private;
 
@@ -1748,16 +1695,16 @@
 	 * or re-read if the read failed.
 	 * We don't do much here, just schedule handling by raid1d
 	 */
-	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+	if (!bio->bi_error)
 		set_bit(R1BIO_Uptodate, &r1_bio->state);
 
 	if (atomic_dec_and_test(&r1_bio->remaining))
 		reschedule_retry(r1_bio);
 }
 
-static void end_sync_write(struct bio *bio, int error)
+static void end_sync_write(struct bio *bio)
 {
-	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+	int uptodate = !bio->bi_error;
 	struct r1bio *r1_bio = bio->bi_private;
 	struct mddev *mddev = r1_bio->mddev;
 	struct r1conf *conf = mddev->private;
@@ -1944,7 +1891,7 @@
 		idx ++;
 	}
 	set_bit(R1BIO_Uptodate, &r1_bio->state);
-	set_bit(BIO_UPTODATE, &bio->bi_flags);
+	bio->bi_error = 0;
 	return 1;
 }
 
@@ -1968,15 +1915,14 @@
 	for (i = 0; i < conf->raid_disks * 2; i++) {
 		int j;
 		int size;
-		int uptodate;
+		int error;
 		struct bio *b = r1_bio->bios[i];
 		if (b->bi_end_io != end_sync_read)
 			continue;
-		/* fixup the bio for reuse, but preserve BIO_UPTODATE */
-		uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
+		/* fixup the bio for reuse, but preserve errno */
+		error = b->bi_error;
 		bio_reset(b);
-		if (!uptodate)
-			clear_bit(BIO_UPTODATE, &b->bi_flags);
+		b->bi_error = error;
 		b->bi_vcnt = vcnt;
 		b->bi_iter.bi_size = r1_bio->sectors << 9;
 		b->bi_iter.bi_sector = r1_bio->sector +
@@ -1999,7 +1945,7 @@
 	}
 	for (primary = 0; primary < conf->raid_disks * 2; primary++)
 		if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
-		    test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
+		    !r1_bio->bios[primary]->bi_error) {
 			r1_bio->bios[primary]->bi_end_io = NULL;
 			rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
 			break;
@@ -2009,14 +1955,14 @@
 		int j;
 		struct bio *pbio = r1_bio->bios[primary];
 		struct bio *sbio = r1_bio->bios[i];
-		int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
+		int error = sbio->bi_error;
 
 		if (sbio->bi_end_io != end_sync_read)
 			continue;
-		/* Now we can 'fixup' the BIO_UPTODATE flag */
-		set_bit(BIO_UPTODATE, &sbio->bi_flags);
+		/* Now we can 'fixup' the error value */
+		sbio->bi_error = 0;
 
-		if (uptodate) {
+		if (!error) {
 			for (j = vcnt; j-- ; ) {
 				struct page *p, *s;
 				p = pbio->bi_io_vec[j].bv_page;
@@ -2031,7 +1977,7 @@
 		if (j >= 0)
 			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
 		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
-			      && uptodate)) {
+			      && !error)) {
 			/* No need to write to this device. */
 			sbio->bi_end_io = NULL;
 			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
@@ -2272,11 +2218,11 @@
 		struct bio *bio = r1_bio->bios[m];
 		if (bio->bi_end_io == NULL)
 			continue;
-		if (test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+		if (!bio->bi_error &&
 		    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
 			rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
 		}
-		if (!test_bit(BIO_UPTODATE, &bio->bi_flags) &&
+		if (bio->bi_error &&
 		    test_bit(R1BIO_WriteError, &r1_bio->state)) {
 			if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
 				md_error(conf->mddev, rdev);
@@ -2715,7 +2661,7 @@
 						/* remove last page from this bio */
 						bio->bi_vcnt--;
 						bio->bi_iter.bi_size -= len;
-						__clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+						bio_clear_flag(bio, BIO_SEG_VALID);
 					}
 					goto bio_full;
 				}
@@ -2810,8 +2756,6 @@
 			goto abort;
 		disk->rdev = rdev;
 		q = bdev_get_queue(rdev->bdev);
-		if (q->merge_bvec_fn)
-			mddev->merge_check_needed = 1;
 
 		disk->head_position = 0;
 		disk->seq_start = MaxSector;
@@ -3176,7 +3120,6 @@
 	.quiesce	= raid1_quiesce,
 	.takeover	= raid1_takeover,
 	.congested	= raid1_congested,
-	.mergeable_bvec	= raid1_mergeable_bvec,
 };
 
 static int __init raid_init(void)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 38c58e1..b0fce2e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -101,7 +101,7 @@
 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
 				int *skipped);
 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
-static void end_reshape_write(struct bio *bio, int error);
+static void end_reshape_write(struct bio *bio);
 static void end_reshape(struct r10conf *conf);
 
 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
@@ -307,9 +307,9 @@
 	} else
 		done = 1;
 	if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
-		clear_bit(BIO_UPTODATE, &bio->bi_flags);
+		bio->bi_error = -EIO;
 	if (done) {
-		bio_endio(bio, 0);
+		bio_endio(bio);
 		/*
 		 * Wake up any possible resync thread that waits for the device
 		 * to go idle.
@@ -358,9 +358,9 @@
 	return r10_bio->devs[slot].devnum;
 }
 
-static void raid10_end_read_request(struct bio *bio, int error)
+static void raid10_end_read_request(struct bio *bio)
 {
-	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+	int uptodate = !bio->bi_error;
 	struct r10bio *r10_bio = bio->bi_private;
 	int slot, dev;
 	struct md_rdev *rdev;
@@ -438,9 +438,8 @@
 	}
 }
 
-static void raid10_end_write_request(struct bio *bio, int error)
+static void raid10_end_write_request(struct bio *bio)
 {
-	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct r10bio *r10_bio = bio->bi_private;
 	int dev;
 	int dec_rdev = 1;
@@ -460,7 +459,7 @@
 	/*
 	 * this branch is our 'one mirror IO has finished' event handler:
 	 */
-	if (!uptodate) {
+	if (bio->bi_error) {
 		if (repl)
 			/* Never record new bad blocks to replacement,
 			 * just fail it.
@@ -672,93 +671,6 @@
 	return (vchunk << geo->chunk_shift) + offset;
 }
 
-/**
- *	raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
- *	@mddev: the md device
- *	@bvm: properties of new bio
- *	@biovec: the request that could be merged to it.
- *
- *	Return amount of bytes we can accept at this offset
- *	This requires checking for end-of-chunk if near_copies != raid_disks,
- *	and for subordinate merge_bvec_fns if merge_check_needed.
- */
-static int raid10_mergeable_bvec(struct mddev *mddev,
-				 struct bvec_merge_data *bvm,
-				 struct bio_vec *biovec)
-{
-	struct r10conf *conf = mddev->private;
-	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
-	int max;
-	unsigned int chunk_sectors;
-	unsigned int bio_sectors = bvm->bi_size >> 9;
-	struct geom *geo = &conf->geo;
-
-	chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
-	if (conf->reshape_progress != MaxSector &&
-	    ((sector >= conf->reshape_progress) !=
-	     conf->mddev->reshape_backwards))
-		geo = &conf->prev;
-
-	if (geo->near_copies < geo->raid_disks) {
-		max = (chunk_sectors - ((sector & (chunk_sectors - 1))
-					+ bio_sectors)) << 9;
-		if (max < 0)
-			/* bio_add cannot handle a negative return */
-			max = 0;
-		if (max <= biovec->bv_len && bio_sectors == 0)
-			return biovec->bv_len;
-	} else
-		max = biovec->bv_len;
-
-	if (mddev->merge_check_needed) {
-		struct {
-			struct r10bio r10_bio;
-			struct r10dev devs[conf->copies];
-		} on_stack;
-		struct r10bio *r10_bio = &on_stack.r10_bio;
-		int s;
-		if (conf->reshape_progress != MaxSector) {
-			/* Cannot give any guidance during reshape */
-			if (max <= biovec->bv_len && bio_sectors == 0)
-				return biovec->bv_len;
-			return 0;
-		}
-		r10_bio->sector = sector;
-		raid10_find_phys(conf, r10_bio);
-		rcu_read_lock();
-		for (s = 0; s < conf->copies; s++) {
-			int disk = r10_bio->devs[s].devnum;
-			struct md_rdev *rdev = rcu_dereference(
-				conf->mirrors[disk].rdev);
-			if (rdev && !test_bit(Faulty, &rdev->flags)) {
-				struct request_queue *q =
-					bdev_get_queue(rdev->bdev);
-				if (q->merge_bvec_fn) {
-					bvm->bi_sector = r10_bio->devs[s].addr
-						+ rdev->data_offset;
-					bvm->bi_bdev = rdev->bdev;
-					max = min(max, q->merge_bvec_fn(
-							  q, bvm, biovec));
-				}
-			}
-			rdev = rcu_dereference(conf->mirrors[disk].replacement);
-			if (rdev && !test_bit(Faulty, &rdev->flags)) {
-				struct request_queue *q =
-					bdev_get_queue(rdev->bdev);
-				if (q->merge_bvec_fn) {
-					bvm->bi_sector = r10_bio->devs[s].addr
-						+ rdev->data_offset;
-					bvm->bi_bdev = rdev->bdev;
-					max = min(max, q->merge_bvec_fn(
-							  q, bvm, biovec));
-				}
-			}
-		}
-		rcu_read_unlock();
-	}
-	return max;
-}
-
 /*
  * This routine returns the disk from which the requested read should
  * be done. There is a per-array 'next expected sequential IO' sector
@@ -821,12 +733,10 @@
 		disk = r10_bio->devs[slot].devnum;
 		rdev = rcu_dereference(conf->mirrors[disk].replacement);
 		if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
-		    test_bit(Unmerged, &rdev->flags) ||
 		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
 			rdev = rcu_dereference(conf->mirrors[disk].rdev);
 		if (rdev == NULL ||
-		    test_bit(Faulty, &rdev->flags) ||
-		    test_bit(Unmerged, &rdev->flags))
+		    test_bit(Faulty, &rdev->flags))
 			continue;
 		if (!test_bit(In_sync, &rdev->flags) &&
 		    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
@@ -957,7 +867,7 @@
 			if (unlikely((bio->bi_rw & REQ_DISCARD) &&
 			    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
 				/* Just ignore it */
-				bio_endio(bio, 0);
+				bio_endio(bio);
 			else
 				generic_make_request(bio);
 			bio = next;
@@ -1133,7 +1043,7 @@
 		if (unlikely((bio->bi_rw & REQ_DISCARD) &&
 		    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
 			/* Just ignore it */
-			bio_endio(bio, 0);
+			bio_endio(bio);
 		else
 			generic_make_request(bio);
 		bio = next;
@@ -1217,7 +1127,7 @@
 	 * non-zero, then it is the number of not-completed requests.
 	 */
 	bio->bi_phys_segments = 0;
-	clear_bit(BIO_SEG_VALID, &bio->bi_flags);
+	bio_clear_flag(bio, BIO_SEG_VALID);
 
 	if (rw == READ) {
 		/*
@@ -1326,11 +1236,9 @@
 			blocked_rdev = rrdev;
 			break;
 		}
-		if (rdev && (test_bit(Faulty, &rdev->flags)
-			     || test_bit(Unmerged, &rdev->flags)))
+		if (rdev && (test_bit(Faulty, &rdev->flags)))
 			rdev = NULL;
-		if (rrdev && (test_bit(Faulty, &rrdev->flags)
-			      || test_bit(Unmerged, &rrdev->flags)))
+		if (rrdev && (test_bit(Faulty, &rrdev->flags)))
 			rrdev = NULL;
 
 		r10_bio->devs[i].bio = NULL;
@@ -1777,7 +1685,6 @@
 	int mirror;
 	int first = 0;
 	int last = conf->geo.raid_disks - 1;
-	struct request_queue *q = bdev_get_queue(rdev->bdev);
 
 	if (mddev->recovery_cp < MaxSector)
 		/* only hot-add to in-sync arrays, as recovery is
@@ -1790,11 +1697,6 @@
 	if (rdev->raid_disk >= 0)
 		first = last = rdev->raid_disk;
 
-	if (q->merge_bvec_fn) {
-		set_bit(Unmerged, &rdev->flags);
-		mddev->merge_check_needed = 1;
-	}
-
 	if (rdev->saved_raid_disk >= first &&
 	    conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
 		mirror = rdev->saved_raid_disk;
@@ -1833,19 +1735,6 @@
 		rcu_assign_pointer(p->rdev, rdev);
 		break;
 	}
-	if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
-		/* Some requests might not have seen this new
-		 * merge_bvec_fn.  We must wait for them to complete
-		 * before merging the device fully.
-		 * First we make sure any code which has tested
-		 * our function has submitted the request, then
-		 * we wait for all outstanding requests to complete.
-		 */
-		synchronize_sched();
-		freeze_array(conf, 0);
-		unfreeze_array(conf);
-		clear_bit(Unmerged, &rdev->flags);
-	}
 	md_integrity_add_rdev(rdev, mddev);
 	if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
@@ -1916,7 +1805,7 @@
 	return err;
 }
 
-static void end_sync_read(struct bio *bio, int error)
+static void end_sync_read(struct bio *bio)
 {
 	struct r10bio *r10_bio = bio->bi_private;
 	struct r10conf *conf = r10_bio->mddev->private;
@@ -1928,7 +1817,7 @@
 	} else
 		d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
 
-	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+	if (!bio->bi_error)
 		set_bit(R10BIO_Uptodate, &r10_bio->state);
 	else
 		/* The write handler will notice the lack of
@@ -1977,9 +1866,8 @@
 	}
 }
 
-static void end_sync_write(struct bio *bio, int error)
+static void end_sync_write(struct bio *bio)
 {
-	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct r10bio *r10_bio = bio->bi_private;
 	struct mddev *mddev = r10_bio->mddev;
 	struct r10conf *conf = mddev->private;
@@ -1996,7 +1884,7 @@
 	else
 		rdev = conf->mirrors[d].rdev;
 
-	if (!uptodate) {
+	if (bio->bi_error) {
 		if (repl)
 			md_error(mddev, rdev);
 		else {
@@ -2044,7 +1932,7 @@
 
 	/* find the first device with a block */
 	for (i=0; i<conf->copies; i++)
-		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
+		if (!r10_bio->devs[i].bio->bi_error)
 			break;
 
 	if (i == conf->copies)
@@ -2064,7 +1952,7 @@
 			continue;
 		if (i == first)
 			continue;
-		if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
+		if (!r10_bio->devs[i].bio->bi_error) {
 			/* We know that the bi_io_vec layout is the same for
 			 * both 'first' and 'i', so we just compare them.
 			 * All vec entries are PAGE_SIZE;
@@ -2394,7 +2282,6 @@
 			d = r10_bio->devs[sl].devnum;
 			rdev = rcu_dereference(conf->mirrors[d].rdev);
 			if (rdev &&
-			    !test_bit(Unmerged, &rdev->flags) &&
 			    test_bit(In_sync, &rdev->flags) &&
 			    is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
 					&first_bad, &bad_sectors) == 0) {
@@ -2448,7 +2335,6 @@
 			d = r10_bio->devs[sl].devnum;
 			rdev = rcu_dereference(conf->mirrors[d].rdev);
 			if (!rdev ||
-			    test_bit(Unmerged, &rdev->flags) ||
 			    !test_bit(In_sync, &rdev->flags))
 				continue;
 
@@ -2706,8 +2592,7 @@
 			rdev = conf->mirrors[dev].rdev;
 			if (r10_bio->devs[m].bio == NULL)
 				continue;
-			if (test_bit(BIO_UPTODATE,
-				     &r10_bio->devs[m].bio->bi_flags)) {
+			if (!r10_bio->devs[m].bio->bi_error) {
 				rdev_clear_badblocks(
 					rdev,
 					r10_bio->devs[m].addr,
@@ -2722,8 +2607,8 @@
 			rdev = conf->mirrors[dev].replacement;
 			if (r10_bio->devs[m].repl_bio == NULL)
 				continue;
-			if (test_bit(BIO_UPTODATE,
-				     &r10_bio->devs[m].repl_bio->bi_flags)) {
+
+			if (!r10_bio->devs[m].repl_bio->bi_error) {
 				rdev_clear_badblocks(
 					rdev,
 					r10_bio->devs[m].addr,
@@ -2748,8 +2633,7 @@
 					r10_bio->devs[m].addr,
 					r10_bio->sectors, 0);
 				rdev_dec_pending(rdev, conf->mddev);
-			} else if (bio != NULL &&
-				   !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+			} else if (bio != NULL && bio->bi_error) {
 				if (!narrow_write_error(r10_bio, m)) {
 					md_error(conf->mddev, rdev);
 					set_bit(R10BIO_Degraded,
@@ -3263,7 +3147,7 @@
 
 			bio = r10_bio->devs[i].bio;
 			bio_reset(bio);
-			clear_bit(BIO_UPTODATE, &bio->bi_flags);
+			bio->bi_error = -EIO;
 			if (conf->mirrors[d].rdev == NULL ||
 			    test_bit(Faulty, &conf->mirrors[d].rdev->flags))
 				continue;
@@ -3300,7 +3184,7 @@
 			/* Need to set up for writing to the replacement */
 			bio = r10_bio->devs[i].repl_bio;
 			bio_reset(bio);
-			clear_bit(BIO_UPTODATE, &bio->bi_flags);
+			bio->bi_error = -EIO;
 
 			sector = r10_bio->devs[i].addr;
 			atomic_inc(&conf->mirrors[d].rdev->nr_pending);
@@ -3357,7 +3241,7 @@
 				/* remove last page from this bio */
 				bio2->bi_vcnt--;
 				bio2->bi_iter.bi_size -= len;
-				__clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
+				bio_clear_flag(bio2, BIO_SEG_VALID);
 			}
 			goto bio_full;
 		}
@@ -3377,7 +3261,7 @@
 
 		if (bio->bi_end_io == end_sync_read) {
 			md_sync_acct(bio->bi_bdev, nr_sectors);
-			set_bit(BIO_UPTODATE, &bio->bi_flags);
+			bio->bi_error = 0;
 			generic_make_request(bio);
 		}
 	}
@@ -3643,8 +3527,6 @@
 			disk->rdev = rdev;
 		}
 		q = bdev_get_queue(rdev->bdev);
-		if (q->merge_bvec_fn)
-			mddev->merge_check_needed = 1;
 		diff = (rdev->new_data_offset - rdev->data_offset);
 		if (!mddev->reshape_backwards)
 			diff = -diff;
@@ -4382,7 +4264,7 @@
 	read_bio->bi_end_io = end_sync_read;
 	read_bio->bi_rw = READ;
 	read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
-	__set_bit(BIO_UPTODATE, &read_bio->bi_flags);
+	read_bio->bi_error = 0;
 	read_bio->bi_vcnt = 0;
 	read_bio->bi_iter.bi_size = 0;
 	r10_bio->master_bio = read_bio;
@@ -4439,7 +4321,7 @@
 				/* Remove last page from this bio */
 				bio2->bi_vcnt--;
 				bio2->bi_iter.bi_size -= len;
-				__clear_bit(BIO_SEG_VALID, &bio2->bi_flags);
+				bio_clear_flag(bio2, BIO_SEG_VALID);
 			}
 			goto bio_full;
 		}
@@ -4604,9 +4486,8 @@
 	return 0;
 }
 
-static void end_reshape_write(struct bio *bio, int error)
+static void end_reshape_write(struct bio *bio)
 {
-	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct r10bio *r10_bio = bio->bi_private;
 	struct mddev *mddev = r10_bio->mddev;
 	struct r10conf *conf = mddev->private;
@@ -4623,7 +4504,7 @@
 		rdev = conf->mirrors[d].rdev;
 	}
 
-	if (!uptodate) {
+	if (bio->bi_error) {
 		/* FIXME should record badblock */
 		md_error(mddev, rdev);
 	}
@@ -4700,7 +4581,6 @@
 	.start_reshape	= raid10_start_reshape,
 	.finish_reshape	= raid10_finish_reshape,
 	.congested	= raid10_congested,
-	.mergeable_bvec	= raid10_mergeable_bvec,
 };
 
 static int __init raid_init(void)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f757023..b29e89c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -233,7 +233,7 @@
 		bi->bi_iter.bi_size = 0;
 		trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
 					 bi, 0);
-		bio_endio(bi, 0);
+		bio_endio(bi);
 		bi = return_bi;
 	}
 }
@@ -887,9 +887,9 @@
 }
 
 static void
-raid5_end_read_request(struct bio *bi, int error);
+raid5_end_read_request(struct bio *bi);
 static void
-raid5_end_write_request(struct bio *bi, int error);
+raid5_end_write_request(struct bio *bi);
 
 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 {
@@ -2282,12 +2282,11 @@
 	conf->slab_cache = NULL;
 }
 
-static void raid5_end_read_request(struct bio * bi, int error)
+static void raid5_end_read_request(struct bio * bi)
 {
 	struct stripe_head *sh = bi->bi_private;
 	struct r5conf *conf = sh->raid_conf;
 	int disks = sh->disks, i;
-	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
 	char b[BDEVNAME_SIZE];
 	struct md_rdev *rdev = NULL;
 	sector_t s;
@@ -2296,9 +2295,9 @@
 		if (bi == &sh->dev[i].req)
 			break;
 
-	pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
+	pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
 		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
-		uptodate);
+		bi->bi_error);
 	if (i == disks) {
 		BUG();
 		return;
@@ -2317,7 +2316,7 @@
 		s = sh->sector + rdev->new_data_offset;
 	else
 		s = sh->sector + rdev->data_offset;
-	if (uptodate) {
+	if (!bi->bi_error) {
 		set_bit(R5_UPTODATE, &sh->dev[i].flags);
 		if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
 			/* Note that this cannot happen on a
@@ -2405,13 +2404,12 @@
 	release_stripe(sh);
 }
 
-static void raid5_end_write_request(struct bio *bi, int error)
+static void raid5_end_write_request(struct bio *bi)
 {
 	struct stripe_head *sh = bi->bi_private;
 	struct r5conf *conf = sh->raid_conf;
 	int disks = sh->disks, i;
 	struct md_rdev *uninitialized_var(rdev);
-	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
 	sector_t first_bad;
 	int bad_sectors;
 	int replacement = 0;
@@ -2434,23 +2432,23 @@
 			break;
 		}
 	}
-	pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
+	pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
 		(unsigned long long)sh->sector, i, atomic_read(&sh->count),
-		uptodate);
+		bi->bi_error);
 	if (i == disks) {
 		BUG();
 		return;
 	}
 
 	if (replacement) {
-		if (!uptodate)
+		if (bi->bi_error)
 			md_error(conf->mddev, rdev);
 		else if (is_badblock(rdev, sh->sector,
 				     STRIPE_SECTORS,
 				     &first_bad, &bad_sectors))
 			set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
 	} else {
-		if (!uptodate) {
+		if (bi->bi_error) {
 			set_bit(STRIPE_DEGRADED, &sh->state);
 			set_bit(WriteErrorSeen, &rdev->flags);
 			set_bit(R5_WriteError, &sh->dev[i].flags);
@@ -2471,7 +2469,7 @@
 	}
 	rdev_dec_pending(rdev, conf->mddev);
 
-	if (sh->batch_head && !uptodate && !replacement)
+	if (sh->batch_head && bi->bi_error && !replacement)
 		set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
 
 	if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
@@ -3112,7 +3110,8 @@
 		while (bi && bi->bi_iter.bi_sector <
 			sh->dev[i].sector + STRIPE_SECTORS) {
 			struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
-			clear_bit(BIO_UPTODATE, &bi->bi_flags);
+
+			bi->bi_error = -EIO;
 			if (!raid5_dec_bi_active_stripes(bi)) {
 				md_write_end(conf->mddev);
 				bi->bi_next = *return_bi;
@@ -3136,7 +3135,8 @@
 		while (bi && bi->bi_iter.bi_sector <
 		       sh->dev[i].sector + STRIPE_SECTORS) {
 			struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
-			clear_bit(BIO_UPTODATE, &bi->bi_flags);
+
+			bi->bi_error = -EIO;
 			if (!raid5_dec_bi_active_stripes(bi)) {
 				md_write_end(conf->mddev);
 				bi->bi_next = *return_bi;
@@ -3161,7 +3161,8 @@
 			       sh->dev[i].sector + STRIPE_SECTORS) {
 				struct bio *nextbi =
 					r5_next_bio(bi, sh->dev[i].sector);
-				clear_bit(BIO_UPTODATE, &bi->bi_flags);
+
+				bi->bi_error = -EIO;
 				if (!raid5_dec_bi_active_stripes(bi)) {
 					bi->bi_next = *return_bi;
 					*return_bi = bi;
@@ -4669,35 +4670,6 @@
 	return 0;
 }
 
-/* We want read requests to align with chunks where possible,
- * but write requests don't need to.
- */
-static int raid5_mergeable_bvec(struct mddev *mddev,
-				struct bvec_merge_data *bvm,
-				struct bio_vec *biovec)
-{
-	sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
-	int max;
-	unsigned int chunk_sectors = mddev->chunk_sectors;
-	unsigned int bio_sectors = bvm->bi_size >> 9;
-
-	/*
-	 * always allow writes to be mergeable, read as well if array
-	 * is degraded as we'll go through stripe cache anyway.
-	 */
-	if ((bvm->bi_rw & 1) == WRITE || mddev->degraded)
-		return biovec->bv_len;
-
-	if (mddev->new_chunk_sectors < mddev->chunk_sectors)
-		chunk_sectors = mddev->new_chunk_sectors;
-	max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
-	if (max < 0) max = 0;
-	if (max <= biovec->bv_len && bio_sectors == 0)
-		return biovec->bv_len;
-	else
-		return max;
-}
-
 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
 {
 	sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
@@ -4756,13 +4728,13 @@
  *  first).
  *  If the read failed..
  */
-static void raid5_align_endio(struct bio *bi, int error)
+static void raid5_align_endio(struct bio *bi)
 {
 	struct bio* raid_bi  = bi->bi_private;
 	struct mddev *mddev;
 	struct r5conf *conf;
-	int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
 	struct md_rdev *rdev;
+	int error = bi->bi_error;
 
 	bio_put(bi);
 
@@ -4773,10 +4745,10 @@
 
 	rdev_dec_pending(rdev, conf->mddev);
 
-	if (!error && uptodate) {
+	if (!error) {
 		trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
 					 raid_bi, 0);
-		bio_endio(raid_bi, 0);
+		bio_endio(raid_bi);
 		if (atomic_dec_and_test(&conf->active_aligned_reads))
 			wake_up(&conf->wait_for_quiescent);
 		return;
@@ -4787,26 +4759,7 @@
 	add_bio_to_retry(raid_bi, conf);
 }
 
-static int bio_fits_rdev(struct bio *bi)
-{
-	struct request_queue *q = bdev_get_queue(bi->bi_bdev);
-
-	if (bio_sectors(bi) > queue_max_sectors(q))
-		return 0;
-	blk_recount_segments(q, bi);
-	if (bi->bi_phys_segments > queue_max_segments(q))
-		return 0;
-
-	if (q->merge_bvec_fn)
-		/* it's too hard to apply the merge_bvec_fn at this stage,
-		 * just just give up
-		 */
-		return 0;
-
-	return 1;
-}
-
-static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
+static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
 {
 	struct r5conf *conf = mddev->private;
 	int dd_idx;
@@ -4815,7 +4768,7 @@
 	sector_t end_sector;
 
 	if (!in_chunk_boundary(mddev, raid_bio)) {
-		pr_debug("chunk_aligned_read : non aligned\n");
+		pr_debug("%s: non aligned\n", __func__);
 		return 0;
 	}
 	/*
@@ -4857,13 +4810,11 @@
 		rcu_read_unlock();
 		raid_bio->bi_next = (void*)rdev;
 		align_bi->bi_bdev =  rdev->bdev;
-		__clear_bit(BIO_SEG_VALID, &align_bi->bi_flags);
+		bio_clear_flag(align_bi, BIO_SEG_VALID);
 
-		if (!bio_fits_rdev(align_bi) ||
-		    is_badblock(rdev, align_bi->bi_iter.bi_sector,
+		if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
 				bio_sectors(align_bi),
 				&first_bad, &bad_sectors)) {
-			/* too big in some way, or has a known bad block */
 			bio_put(align_bi);
 			rdev_dec_pending(rdev, mddev);
 			return 0;
@@ -4892,6 +4843,31 @@
 	}
 }
 
+static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
+{
+	struct bio *split;
+
+	do {
+		sector_t sector = raid_bio->bi_iter.bi_sector;
+		unsigned chunk_sects = mddev->chunk_sectors;
+		unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
+
+		if (sectors < bio_sectors(raid_bio)) {
+			split = bio_split(raid_bio, sectors, GFP_NOIO, fs_bio_set);
+			bio_chain(split, raid_bio);
+		} else
+			split = raid_bio;
+
+		if (!raid5_read_one_chunk(mddev, split)) {
+			if (split != raid_bio)
+				generic_make_request(raid_bio);
+			return split;
+		}
+	} while (split != raid_bio);
+
+	return NULL;
+}
+
 /* __get_priority_stripe - get the next stripe to process
  *
  * Full stripe writes are allowed to pass preread active stripes up until
@@ -5140,7 +5116,7 @@
 	remaining = raid5_dec_bi_active_stripes(bi);
 	if (remaining == 0) {
 		md_write_end(mddev);
-		bio_endio(bi, 0);
+		bio_endio(bi);
 	}
 }
 
@@ -5169,9 +5145,11 @@
 	 * data on failed drives.
 	 */
 	if (rw == READ && mddev->degraded == 0 &&
-	     mddev->reshape_position == MaxSector &&
-	     chunk_aligned_read(mddev,bi))
-		return;
+	    mddev->reshape_position == MaxSector) {
+		bi = chunk_aligned_read(mddev, bi);
+		if (!bi)
+			return;
+	}
 
 	if (unlikely(bi->bi_rw & REQ_DISCARD)) {
 		make_discard_request(mddev, bi);
@@ -5304,7 +5282,7 @@
 			release_stripe_plug(mddev, sh);
 		} else {
 			/* cannot get stripe for read-ahead, just give-up */
-			clear_bit(BIO_UPTODATE, &bi->bi_flags);
+			bi->bi_error = -EIO;
 			break;
 		}
 	}
@@ -5318,7 +5296,7 @@
 
 		trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
 					 bi, 0);
-		bio_endio(bi, 0);
+		bio_endio(bi);
 	}
 }
 
@@ -5714,7 +5692,7 @@
 	if (remaining == 0) {
 		trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
 					 raid_bio, 0);
-		bio_endio(raid_bio, 0);
+		bio_endio(raid_bio);
 	}
 	if (atomic_dec_and_test(&conf->active_aligned_reads))
 		wake_up(&conf->wait_for_quiescent);
@@ -7779,7 +7757,6 @@
 	.quiesce	= raid5_quiesce,
 	.takeover	= raid6_takeover,
 	.congested	= raid5_congested,
-	.mergeable_bvec	= raid5_mergeable_bvec,
 };
 static struct md_personality raid5_personality =
 {
@@ -7803,7 +7780,6 @@
 	.quiesce	= raid5_quiesce,
 	.takeover	= raid5_takeover,
 	.congested	= raid5_congested,
-	.mergeable_bvec	= raid5_mergeable_bvec,
 };
 
 static struct md_personality raid4_personality =
@@ -7828,7 +7804,6 @@
 	.quiesce	= raid5_quiesce,
 	.takeover	= raid4_takeover,
 	.congested	= raid5_congested,
-	.mergeable_bvec	= raid5_mergeable_bvec,
 };
 
 static int __init raid5_init(void)
diff --git a/drivers/media/dvb-frontends/au8522_dig.c b/drivers/media/dvb-frontends/au8522_dig.c
index b744a3f..f956f13 100644
--- a/drivers/media/dvb-frontends/au8522_dig.c
+++ b/drivers/media/dvb-frontends/au8522_dig.c
@@ -922,7 +922,7 @@
 MODULE_PARM_DESC(debug, "Enable verbose debug messages");
 
 module_param(zv_mode, int, 0644);
-MODULE_PARM_DESC(zv_mode, "Turn on/off ZeeVee modulator compatability mode (default:on).\n"
+MODULE_PARM_DESC(zv_mode, "Turn on/off ZeeVee modulator compatibility mode (default:on).\n"
 	"\t\ton - modified AU8522 QAM256 initialization.\n"
 	"\t\tProvides faster lock when using ZeeVee modulator based sources");
 
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index c70abab..5dd3977 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -465,7 +465,7 @@
 
 	of_node_put(child);
 
-	pd->enable_gpio = devm_gpiod_get(&client->dev, "enable");
+	pd->enable_gpio = devm_gpiod_get(&client->dev, "enable", GPIOD_OUT_LOW);
 	if (!pd->enable_gpio) {
 		dev_err(&client->dev, "Error getting GPIO\n");
 		return -EINVAL;
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index 0ad1b6f..d2bfe7c 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -188,7 +188,7 @@
 	if (IS_ERR(f))
 		return PTR_ERR(f);
 	/*
-	 * Return number of non-contigous planes (plane buffers)
+	 * Return number of non-contiguous planes (plane buffers)
 	 * depending on the configured color format.
 	 */
 	if (!f->fmt)
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 12497f5..906c83c 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -1734,7 +1734,7 @@
 
 	mfc_debug(1, "New context: %d\n", new_ctx);
 	ctx = dev->ctx[new_ctx];
-	mfc_debug(1, "Seting new context to %p\n", ctx);
+	mfc_debug(1, "Setting new context to %p\n", ctx);
 	/* Got context to run in ctx */
 	mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n",
 		ctx->dst_queue_cnt, ctx->pb_count, ctx->src_queue_cnt);
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 81c1ad8..0d49b79 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -125,7 +125,7 @@
 	}
 
 	/*
-	 * Memory is contigous, lock vma and return to the caller
+	 * Memory is contiguous, lock vma and return to the caller
 	 */
 	*res_vma = vb2_get_vma(vma);
 	if (*res_vma == NULL)
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 8406c668..c6a644b 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -7,6 +7,14 @@
 
 if MEMORY
 
+config ARM_PL172_MPMC
+	tristate "ARM PL172 MPMC driver"
+	depends on ARM_AMBA && OF
+	help
+	  This selects the ARM PrimeCell PL172 MultiPort Memory Controller.
+	  If you have an embedded system with an AMBA bus and a PL172
+	  controller, say Y or M here.
+
 config ATMEL_SDRAMC
 	bool "Atmel (Multi-port DDR-)SDRAM Controller"
 	default y
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index b670441..1c46af5 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -5,6 +5,7 @@
 ifeq ($(CONFIG_DDR),y)
 obj-$(CONFIG_OF)		+= of_memory.o
 endif
+obj-$(CONFIG_ARM_PL172_MPMC)	+= pl172.o
 obj-$(CONFIG_ATMEL_SDRAMC)	+= atmel-sdramc.o
 obj-$(CONFIG_TI_AEMIF)		+= ti-aemif.o
 obj-$(CONFIG_TI_EMIF)		+= emif.o
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index 410c397..e87459f 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -62,7 +62,7 @@
 		return -ENODEV;
 
 	for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) {
-		u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
+		u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr);
 		if (cspr & CSPR_V && (cspr & CSPR_BA) ==
 				convert_ifc_address(addr_base))
 			return i;
@@ -79,16 +79,16 @@
 	/*
 	 * Clear all the common status and event registers
 	 */
-	if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
-		out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+	if (ifc_in32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER)
+		ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
 
 	/* enable all error and events */
-	out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN);
+	ifc_out32(IFC_CM_EVTER_EN_CSEREN, &ifc->cm_evter_en);
 
 	/* enable all error and event interrupts */
-	out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN);
-	out_be32(&ifc->cm_erattr0, 0x0);
-	out_be32(&ifc->cm_erattr1, 0x0);
+	ifc_out32(IFC_CM_EVTER_INTR_EN_CSERIREN, &ifc->cm_evter_intr_en);
+	ifc_out32(0x0, &ifc->cm_erattr0);
+	ifc_out32(0x0, &ifc->cm_erattr1);
 
 	return 0;
 }
@@ -127,9 +127,9 @@
 
 	spin_lock_irqsave(&nand_irq_lock, flags);
 
-	stat = in_be32(&ifc->ifc_nand.nand_evter_stat);
+	stat = ifc_in32(&ifc->ifc_nand.nand_evter_stat);
 	if (stat) {
-		out_be32(&ifc->ifc_nand.nand_evter_stat, stat);
+		ifc_out32(stat, &ifc->ifc_nand.nand_evter_stat);
 		ctrl->nand_stat = stat;
 		wake_up(&ctrl->nand_wait);
 	}
@@ -161,16 +161,16 @@
 	irqreturn_t ret = IRQ_NONE;
 
 	/* read for chip select error */
-	cs_err = in_be32(&ifc->cm_evter_stat);
+	cs_err = ifc_in32(&ifc->cm_evter_stat);
 	if (cs_err) {
 		dev_err(ctrl->dev, "transaction sent to IFC is not mapped to"
 				"any memory bank 0x%08X\n", cs_err);
 		/* clear the chip select error */
-		out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER);
+		ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat);
 
 		/* read error attribute registers print the error information */
-		status = in_be32(&ifc->cm_erattr0);
-		err_addr = in_be32(&ifc->cm_erattr1);
+		status = ifc_in32(&ifc->cm_erattr0);
+		err_addr = ifc_in32(&ifc->cm_erattr1);
 
 		if (status & IFC_CM_ERATTR0_ERTYP_READ)
 			dev_err(ctrl->dev, "Read transaction error"
@@ -231,6 +231,23 @@
 		goto err;
 	}
 
+	version = ifc_in32(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
+			FSL_IFC_VERSION_MASK;
+	banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
+	dev_info(&dev->dev, "IFC version %d.%d, %d banks\n",
+		version >> 24, (version >> 16) & 0xf, banks);
+
+	fsl_ifc_ctrl_dev->version = version;
+	fsl_ifc_ctrl_dev->banks = banks;
+
+	if (of_property_read_bool(dev->dev.of_node, "little-endian")) {
+		fsl_ifc_ctrl_dev->little_endian = true;
+		dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n");
+	} else {
+		fsl_ifc_ctrl_dev->little_endian = false;
+		dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n");
+	}
+
 	version = ioread32be(&fsl_ifc_ctrl_dev->regs->ifc_rev) &
 			FSL_IFC_VERSION_MASK;
 	banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8;
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 9426276..32ac049 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -1176,8 +1176,8 @@
 		gpmc_client_irq[i].irq = gpmc_irq_start + i;
 		irq_set_chip_and_handler(gpmc_client_irq[i].irq,
 					&gpmc_irq_chip, handle_simple_irq);
-		set_irq_flags(gpmc_client_irq[i].irq,
-				IRQF_VALID | IRQF_NOAUTOEN);
+		irq_modify_status(gpmc_client_irq[i].irq, IRQ_NOREQUEST,
+				  IRQ_NOAUTOEN);
 	}
 
 	/* Disable interrupts */
@@ -1200,7 +1200,6 @@
 	for (i = 0; i < GPMC_NR_IRQ; i++) {
 		irq_set_handler(gpmc_client_irq[i].irq, NULL);
 		irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip);
-		irq_modify_status(gpmc_client_irq[i].irq, 0, 0);
 	}
 
 	irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ);
diff --git a/drivers/memory/pl172.c b/drivers/memory/pl172.c
new file mode 100644
index 0000000..b2ef607
--- /dev/null
+++ b/drivers/memory/pl172.c
@@ -0,0 +1,301 @@
+/*
+ * Memory controller driver for ARM PrimeCell PL172
+ * PrimeCell MultiPort Memory Controller (PL172)
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on:
+ * TI AEMIF driver, Copyright (C) 2010 - 2013 Texas Instruments Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/time.h>
+
+#define MPMC_STATIC_CFG(n)		(0x200 + 0x20 * n)
+#define  MPMC_STATIC_CFG_MW_8BIT	0x0
+#define  MPMC_STATIC_CFG_MW_16BIT	0x1
+#define  MPMC_STATIC_CFG_MW_32BIT	0x2
+#define  MPMC_STATIC_CFG_PM		BIT(3)
+#define  MPMC_STATIC_CFG_PC		BIT(6)
+#define  MPMC_STATIC_CFG_PB		BIT(7)
+#define  MPMC_STATIC_CFG_EW		BIT(8)
+#define  MPMC_STATIC_CFG_B		BIT(19)
+#define  MPMC_STATIC_CFG_P		BIT(20)
+#define MPMC_STATIC_WAIT_WEN(n)		(0x204 + 0x20 * n)
+#define  MPMC_STATIC_WAIT_WEN_MAX	0x0f
+#define MPMC_STATIC_WAIT_OEN(n)		(0x208 + 0x20 * n)
+#define  MPMC_STATIC_WAIT_OEN_MAX	0x0f
+#define MPMC_STATIC_WAIT_RD(n)		(0x20c + 0x20 * n)
+#define  MPMC_STATIC_WAIT_RD_MAX	0x1f
+#define MPMC_STATIC_WAIT_PAGE(n)	(0x210 + 0x20 * n)
+#define  MPMC_STATIC_WAIT_PAGE_MAX	0x1f
+#define MPMC_STATIC_WAIT_WR(n)		(0x214 + 0x20 * n)
+#define  MPMC_STATIC_WAIT_WR_MAX	0x1f
+#define MPMC_STATIC_WAIT_TURN(n)	(0x218 + 0x20 * n)
+#define  MPMC_STATIC_WAIT_TURN_MAX	0x0f
+
+/* Maximum number of static chip selects */
+#define PL172_MAX_CS		4
+
+struct pl172_data {
+	void __iomem *base;
+	unsigned long rate;
+	struct clk *clk;
+};
+
+static int pl172_timing_prop(struct amba_device *adev,
+			     const struct device_node *np, const char *name,
+			     u32 reg_offset, u32 max, int start)
+{
+	struct pl172_data *pl172 = amba_get_drvdata(adev);
+	int cycles;
+	u32 val;
+
+	if (!of_property_read_u32(np, name, &val)) {
+		cycles = DIV_ROUND_UP(val * pl172->rate, NSEC_PER_MSEC) - start;
+		if (cycles < 0) {
+			cycles = 0;
+		} else if (cycles > max) {
+			dev_err(&adev->dev, "%s timing too tight\n", name);
+			return -EINVAL;
+		}
+
+		writel(cycles, pl172->base + reg_offset);
+	}
+
+	dev_dbg(&adev->dev, "%s: %u cycle(s)\n", name, start +
+				readl(pl172->base + reg_offset));
+
+	return 0;
+}
+
+static int pl172_setup_static(struct amba_device *adev,
+			      struct device_node *np, u32 cs)
+{
+	struct pl172_data *pl172 = amba_get_drvdata(adev);
+	u32 cfg;
+	int ret;
+
+	/* MPMC static memory configuration */
+	if (!of_property_read_u32(np, "mpmc,memory-width", &cfg)) {
+		if (cfg == 8) {
+			cfg = MPMC_STATIC_CFG_MW_8BIT;
+		} else if (cfg == 16) {
+			cfg = MPMC_STATIC_CFG_MW_16BIT;
+		} else if (cfg == 32) {
+			cfg = MPMC_STATIC_CFG_MW_32BIT;
+		} else {
+			dev_err(&adev->dev, "invalid memory width cs%u\n", cs);
+			return -EINVAL;
+		}
+	} else {
+		dev_err(&adev->dev, "memory-width property required\n");
+		return -EINVAL;
+	}
+
+	if (of_property_read_bool(np, "mpmc,async-page-mode"))
+		cfg |= MPMC_STATIC_CFG_PM;
+
+	if (of_property_read_bool(np, "mpmc,cs-active-high"))
+		cfg |= MPMC_STATIC_CFG_PC;
+
+	if (of_property_read_bool(np, "mpmc,byte-lane-low"))
+		cfg |= MPMC_STATIC_CFG_PB;
+
+	if (of_property_read_bool(np, "mpmc,extended-wait"))
+		cfg |= MPMC_STATIC_CFG_EW;
+
+	if (of_property_read_bool(np, "mpmc,buffer-enable"))
+		cfg |= MPMC_STATIC_CFG_B;
+
+	if (of_property_read_bool(np, "mpmc,write-protect"))
+		cfg |= MPMC_STATIC_CFG_P;
+
+	writel(cfg, pl172->base + MPMC_STATIC_CFG(cs));
+	dev_dbg(&adev->dev, "mpmc static config cs%u: 0x%08x\n", cs, cfg);
+
+	/* MPMC static memory timing */
+	ret = pl172_timing_prop(adev, np, "mpmc,write-enable-delay",
+				MPMC_STATIC_WAIT_WEN(cs),
+				MPMC_STATIC_WAIT_WEN_MAX, 1);
+	if (ret)
+		goto fail;
+
+	ret = pl172_timing_prop(adev, np, "mpmc,output-enable-delay",
+				MPMC_STATIC_WAIT_OEN(cs),
+				MPMC_STATIC_WAIT_OEN_MAX, 0);
+	if (ret)
+		goto fail;
+
+	ret = pl172_timing_prop(adev, np, "mpmc,read-access-delay",
+				MPMC_STATIC_WAIT_RD(cs),
+				MPMC_STATIC_WAIT_RD_MAX, 1);
+	if (ret)
+		goto fail;
+
+	ret = pl172_timing_prop(adev, np, "mpmc,page-mode-read-delay",
+				MPMC_STATIC_WAIT_PAGE(cs),
+				MPMC_STATIC_WAIT_PAGE_MAX, 1);
+	if (ret)
+		goto fail;
+
+	ret = pl172_timing_prop(adev, np, "mpmc,write-access-delay",
+				MPMC_STATIC_WAIT_WR(cs),
+				MPMC_STATIC_WAIT_WR_MAX, 2);
+	if (ret)
+		goto fail;
+
+	ret = pl172_timing_prop(adev, np, "mpmc,turn-round-delay",
+				MPMC_STATIC_WAIT_TURN(cs),
+				MPMC_STATIC_WAIT_TURN_MAX, 1);
+	if (ret)
+		goto fail;
+
+	return 0;
+fail:
+	dev_err(&adev->dev, "failed to configure cs%u\n", cs);
+	return ret;
+}
+
+static int pl172_parse_cs_config(struct amba_device *adev,
+				 struct device_node *np)
+{
+	u32 cs;
+
+	if (!of_property_read_u32(np, "mpmc,cs", &cs)) {
+		if (cs >= PL172_MAX_CS) {
+			dev_err(&adev->dev, "cs%u invalid\n", cs);
+			return -EINVAL;
+		}
+
+		return pl172_setup_static(adev, np, cs);
+	}
+
+	dev_err(&adev->dev, "cs property required\n");
+
+	return -EINVAL;
+}
+
+static const char * const pl172_revisions[] = {"r1", "r2", "r2p3", "r2p4"};
+
+static int pl172_probe(struct amba_device *adev, const struct amba_id *id)
+{
+	struct device_node *child_np, *np = adev->dev.of_node;
+	struct device *dev = &adev->dev;
+	static const char *rev = "?";
+	struct pl172_data *pl172;
+	int ret;
+
+	if (amba_part(adev) == 0x172) {
+		if (amba_rev(adev) < ARRAY_SIZE(pl172_revisions))
+			rev = pl172_revisions[amba_rev(adev)];
+	}
+
+	dev_info(dev, "ARM PL%x revision %s\n", amba_part(adev), rev);
+
+	pl172 = devm_kzalloc(dev, sizeof(*pl172), GFP_KERNEL);
+	if (!pl172)
+		return -ENOMEM;
+
+	pl172->clk = devm_clk_get(dev, "mpmcclk");
+	if (IS_ERR(pl172->clk)) {
+		dev_err(dev, "no mpmcclk provided clock\n");
+		return PTR_ERR(pl172->clk);
+	}
+
+	ret = clk_prepare_enable(pl172->clk);
+	if (ret) {
+		dev_err(dev, "unable to mpmcclk enable clock\n");
+		return ret;
+	}
+
+	pl172->rate = clk_get_rate(pl172->clk) / MSEC_PER_SEC;
+	if (!pl172->rate) {
+		dev_err(dev, "unable to get mpmcclk clock rate\n");
+		ret = -EINVAL;
+		goto err_clk_enable;
+	}
+
+	ret = amba_request_regions(adev, NULL);
+	if (ret) {
+		dev_err(dev, "unable to request AMBA regions\n");
+		goto err_clk_enable;
+	}
+
+	pl172->base = devm_ioremap(dev, adev->res.start,
+				   resource_size(&adev->res));
+	if (!pl172->base) {
+		dev_err(dev, "ioremap failed\n");
+		ret = -ENOMEM;
+		goto err_no_ioremap;
+	}
+
+	amba_set_drvdata(adev, pl172);
+
+	/*
+	 * Loop through each child node, which represent a chip select, and
+	 * configure parameters and timing. If successful; populate devices
+	 * under that node.
+	 */
+	for_each_available_child_of_node(np, child_np) {
+		ret = pl172_parse_cs_config(adev, child_np);
+		if (ret)
+			continue;
+
+		of_platform_populate(child_np, NULL, NULL, dev);
+	}
+
+	return 0;
+
+err_no_ioremap:
+	amba_release_regions(adev);
+err_clk_enable:
+	clk_disable_unprepare(pl172->clk);
+	return ret;
+}
+
+static int pl172_remove(struct amba_device *adev)
+{
+	struct pl172_data *pl172 = amba_get_drvdata(adev);
+
+	clk_disable_unprepare(pl172->clk);
+	amba_release_regions(adev);
+
+	return 0;
+}
+
+static const struct amba_id pl172_ids[] = {
+	{
+		.id	= 0x07341172,
+		.mask	= 0xffffffff,
+	},
+	{ 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, pl172_ids);
+
+static struct amba_driver pl172_driver = {
+	.drv = {
+		.name	= "memory-pl172",
+	},
+	.probe		= pl172_probe,
+	.remove		= pl172_remove,
+	.id_table	= pl172_ids,
+};
+module_amba_driver(pl172_driver);
+
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_DESCRIPTION("PL172 Memory Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/Makefile b/drivers/memory/tegra/Makefile
index 6a0b9ac..c2cb671f 100644
--- a/drivers/memory/tegra/Makefile
+++ b/drivers/memory/tegra/Makefile
@@ -4,6 +4,7 @@
 tegra-mc-$(CONFIG_ARCH_TEGRA_114_SOC) += tegra114.o
 tegra-mc-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124.o
 tegra-mc-$(CONFIG_ARCH_TEGRA_132_SOC) += tegra124.o
+tegra-mc-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210.o
 
 obj-$(CONFIG_TEGRA_MC) += tegra-mc.o
 
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index c71ede6..a1ae0cc 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -42,7 +42,6 @@
 #define  MC_ERR_STATUS_ADR_HI_MASK 0x3
 #define  MC_ERR_STATUS_SECURITY (1 << 17)
 #define  MC_ERR_STATUS_RW (1 << 16)
-#define  MC_ERR_STATUS_CLIENT_MASK 0x7f
 
 #define MC_ERR_ADR 0x0c
 
@@ -67,6 +66,9 @@
 #ifdef CONFIG_ARCH_TEGRA_132_SOC
 	{ .compatible = "nvidia,tegra132-mc", .data = &tegra132_mc_soc },
 #endif
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+	{ .compatible = "nvidia,tegra210-mc", .data = &tegra210_mc_soc },
+#endif
 	{ }
 };
 MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
@@ -283,7 +285,7 @@
 		else
 			secure = "";
 
-		id = value & MC_ERR_STATUS_CLIENT_MASK;
+		id = value & mc->soc->client_id_mask;
 
 		for (i = 0; i < mc->soc->num_clients; i++) {
 			if (mc->soc->clients[i].id == id) {
@@ -410,6 +412,8 @@
 		return err;
 	}
 
+	WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n");
+
 	value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
 		MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
 		MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM;
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index b7361b0..ddb1667 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -41,4 +41,8 @@
 extern const struct tegra_mc_soc tegra132_mc_soc;
 #endif
 
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+extern const struct tegra_mc_soc tegra210_mc_soc;
+#endif
+
 #endif /* MEMORY_TEGRA_MC_H */
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index 9f57958..c8765db 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -944,5 +944,6 @@
 	.num_clients = ARRAY_SIZE(tegra114_mc_clients),
 	.num_address_bits = 32,
 	.atom_size = 32,
+	.client_id_mask = 0x7f,
 	.smmu = &tegra114_smmu_soc,
 };
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 8620355..3dac7be 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1027,7 +1027,40 @@
 DEFINE_SIMPLE_ATTRIBUTE(emc_debug_rate_fops, emc_debug_rate_get,
 			emc_debug_rate_set, "%lld\n");
 
-static void emc_debugfs_init(struct device *dev)
+static int emc_debug_supported_rates_show(struct seq_file *s, void *data)
+{
+	struct tegra_emc *emc = s->private;
+	const char *prefix = "";
+	unsigned int i;
+
+	for (i = 0; i < emc->num_timings; i++) {
+		struct emc_timing *timing = &emc->timings[i];
+
+		seq_printf(s, "%s%lu", prefix, timing->rate);
+
+		prefix = " ";
+	}
+
+	seq_puts(s, "\n");
+
+	return 0;
+}
+
+static int emc_debug_supported_rates_open(struct inode *inode,
+					  struct file *file)
+{
+	return single_open(file, emc_debug_supported_rates_show,
+			   inode->i_private);
+}
+
+static const struct file_operations emc_debug_supported_rates_fops = {
+	.open = emc_debug_supported_rates_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
 {
 	struct dentry *root, *file;
 	struct clk *clk;
@@ -1048,6 +1081,11 @@
 				   &emc_debug_rate_fops);
 	if (!file)
 		dev_err(dev, "failed to create debugfs entry\n");
+
+	file = debugfs_create_file("supported_rates", S_IRUGO, root, emc,
+				   &emc_debug_supported_rates_fops);
+	if (!file)
+		dev_err(dev, "failed to create debugfs entry\n");
 }
 
 static int tegra_emc_probe(struct platform_device *pdev)
@@ -1119,7 +1157,7 @@
 	platform_set_drvdata(pdev, emc);
 
 	if (IS_ENABLED(CONFIG_DEBUG_FS))
-		emc_debugfs_init(&pdev->dev);
+		emc_debugfs_init(&pdev->dev, emc);
 
 	return 0;
 };
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 966e155..060fb3d 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -1032,6 +1032,7 @@
 	.num_clients = ARRAY_SIZE(tegra124_mc_clients),
 	.num_address_bits = 34,
 	.atom_size = 32,
+	.client_id_mask = 0x7f,
 	.smmu = &tegra124_smmu_soc,
 	.emem_regs = tegra124_mc_emem_regs,
 	.num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs),
@@ -1067,6 +1068,7 @@
 	.num_clients = ARRAY_SIZE(tegra124_mc_clients),
 	.num_address_bits = 34,
 	.atom_size = 32,
+	.client_id_mask = 0x7f,
 	.smmu = &tegra132_smmu_soc,
 };
 #endif /* CONFIG_ARCH_TEGRA_132_SOC */
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
new file mode 100644
index 0000000..5e144ab
--- /dev/null
+++ b/drivers/memory/tegra/tegra210.c
@@ -0,0 +1,1080 @@
+/*
+ * Copyright (C) 2015 NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+
+#include <dt-bindings/memory/tegra210-mc.h>
+
+#include "mc.h"
+
+static const struct tegra_mc_client tegra210_mc_clients[] = {
+	{
+		.id = 0x00,
+		.name = "ptcr",
+		.swgroup = TEGRA_SWGROUP_PTC,
+	}, {
+		.id = 0x01,
+		.name = "display0a",
+		.swgroup = TEGRA_SWGROUP_DC,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 1,
+		},
+		.la = {
+			.reg = 0x2e8,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0xc2,
+		},
+	}, {
+		.id = 0x02,
+		.name = "display0ab",
+		.swgroup = TEGRA_SWGROUP_DCB,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 2,
+		},
+		.la = {
+			.reg = 0x2f4,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0xc6,
+		},
+	}, {
+		.id = 0x03,
+		.name = "display0b",
+		.swgroup = TEGRA_SWGROUP_DC,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 3,
+		},
+		.la = {
+			.reg = 0x2e8,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x50,
+		},
+	}, {
+		.id = 0x04,
+		.name = "display0bb",
+		.swgroup = TEGRA_SWGROUP_DCB,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 4,
+		},
+		.la = {
+			.reg = 0x2f4,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x50,
+		},
+	}, {
+		.id = 0x05,
+		.name = "display0c",
+		.swgroup = TEGRA_SWGROUP_DC,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 5,
+		},
+		.la = {
+			.reg = 0x2ec,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x50,
+		},
+	}, {
+		.id = 0x06,
+		.name = "display0cb",
+		.swgroup = TEGRA_SWGROUP_DCB,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 6,
+		},
+		.la = {
+			.reg = 0x2f8,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x50,
+		},
+	}, {
+		.id = 0x0e,
+		.name = "afir",
+		.swgroup = TEGRA_SWGROUP_AFI,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 14,
+		},
+		.la = {
+			.reg = 0x2e0,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x13,
+		},
+	}, {
+		.id = 0x0f,
+		.name = "avpcarm7r",
+		.swgroup = TEGRA_SWGROUP_AVPC,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 15,
+		},
+		.la = {
+			.reg = 0x2e4,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x04,
+		},
+	}, {
+		.id = 0x10,
+		.name = "displayhc",
+		.swgroup = TEGRA_SWGROUP_DC,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 16,
+		},
+		.la = {
+			.reg = 0x2f0,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x50,
+		},
+	}, {
+		.id = 0x11,
+		.name = "displayhcb",
+		.swgroup = TEGRA_SWGROUP_DCB,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 17,
+		},
+		.la = {
+			.reg = 0x2fc,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x50,
+		},
+	}, {
+		.id = 0x15,
+		.name = "hdar",
+		.swgroup = TEGRA_SWGROUP_HDA,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 21,
+		},
+		.la = {
+			.reg = 0x318,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x24,
+		},
+	}, {
+		.id = 0x16,
+		.name = "host1xdmar",
+		.swgroup = TEGRA_SWGROUP_HC,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 22,
+		},
+		.la = {
+			.reg = 0x310,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x1e,
+		},
+	}, {
+		.id = 0x17,
+		.name = "host1xr",
+		.swgroup = TEGRA_SWGROUP_HC,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 23,
+		},
+		.la = {
+			.reg = 0x310,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x50,
+		},
+	}, {
+		.id = 0x1c,
+		.name = "nvencsrd",
+		.swgroup = TEGRA_SWGROUP_NVENC,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 28,
+		},
+		.la = {
+			.reg = 0x328,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x23,
+		},
+	}, {
+		.id = 0x1d,
+		.name = "ppcsahbdmar",
+		.swgroup = TEGRA_SWGROUP_PPCS,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 29,
+		},
+		.la = {
+			.reg = 0x344,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x49,
+		},
+	}, {
+		.id = 0x1e,
+		.name = "ppcsahbslvr",
+		.swgroup = TEGRA_SWGROUP_PPCS,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 30,
+		},
+		.la = {
+			.reg = 0x344,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x1a,
+		},
+	}, {
+		.id = 0x1f,
+		.name = "satar",
+		.swgroup = TEGRA_SWGROUP_SATA,
+		.smmu = {
+			.reg = 0x228,
+			.bit = 31,
+		},
+		.la = {
+			.reg = 0x350,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x65,
+		},
+	}, {
+		.id = 0x27,
+		.name = "mpcorer",
+		.swgroup = TEGRA_SWGROUP_MPCORE,
+		.la = {
+			.reg = 0x320,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x04,
+		},
+	}, {
+		.id = 0x2b,
+		.name = "nvencswr",
+		.swgroup = TEGRA_SWGROUP_NVENC,
+		.smmu = {
+			.reg = 0x22c,
+			.bit = 11,
+		},
+		.la = {
+			.reg = 0x328,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x31,
+		.name = "afiw",
+		.swgroup = TEGRA_SWGROUP_AFI,
+		.smmu = {
+			.reg = 0x22c,
+			.bit = 17,
+		},
+		.la = {
+			.reg = 0x2e0,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x32,
+		.name = "avpcarm7w",
+		.swgroup = TEGRA_SWGROUP_AVPC,
+		.smmu = {
+			.reg = 0x22c,
+			.bit = 18,
+		},
+		.la = {
+			.reg = 0x2e4,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x35,
+		.name = "hdaw",
+		.swgroup = TEGRA_SWGROUP_HDA,
+		.smmu = {
+			.reg = 0x22c,
+			.bit = 21,
+		},
+		.la = {
+			.reg = 0x318,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x36,
+		.name = "host1xw",
+		.swgroup = TEGRA_SWGROUP_HC,
+		.smmu = {
+			.reg = 0x22c,
+			.bit = 22,
+		},
+		.la = {
+			.reg = 0x314,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x39,
+		.name = "mpcorew",
+		.swgroup = TEGRA_SWGROUP_MPCORE,
+		.la = {
+			.reg = 0x320,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x3b,
+		.name = "ppcsahbdmaw",
+		.swgroup = TEGRA_SWGROUP_PPCS,
+		.smmu = {
+			.reg = 0x22c,
+			.bit = 27,
+		},
+		.la = {
+			.reg = 0x348,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x3c,
+		.name = "ppcsahbslvw",
+		.swgroup = TEGRA_SWGROUP_PPCS,
+		.smmu = {
+			.reg = 0x22c,
+			.bit = 28,
+		},
+		.la = {
+			.reg = 0x348,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x3d,
+		.name = "sataw",
+		.swgroup = TEGRA_SWGROUP_SATA,
+		.smmu = {
+			.reg = 0x22c,
+			.bit = 29,
+		},
+		.la = {
+			.reg = 0x350,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x65,
+		},
+	}, {
+		.id = 0x44,
+		.name = "ispra",
+		.swgroup = TEGRA_SWGROUP_ISP2,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 4,
+		},
+		.la = {
+			.reg = 0x370,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x18,
+		},
+	}, {
+		.id = 0x46,
+		.name = "ispwa",
+		.swgroup = TEGRA_SWGROUP_ISP2,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 6,
+		},
+		.la = {
+			.reg = 0x374,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x47,
+		.name = "ispwb",
+		.swgroup = TEGRA_SWGROUP_ISP2,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 7,
+		},
+		.la = {
+			.reg = 0x374,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x4a,
+		.name = "xusb_hostr",
+		.swgroup = TEGRA_SWGROUP_XUSB_HOST,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 10,
+		},
+		.la = {
+			.reg = 0x37c,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x39,
+		},
+	}, {
+		.id = 0x4b,
+		.name = "xusb_hostw",
+		.swgroup = TEGRA_SWGROUP_XUSB_HOST,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 11,
+		},
+		.la = {
+			.reg = 0x37c,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x4c,
+		.name = "xusb_devr",
+		.swgroup = TEGRA_SWGROUP_XUSB_DEV,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 12,
+		},
+		.la = {
+			.reg = 0x380,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x39,
+		},
+	}, {
+		.id = 0x4d,
+		.name = "xusb_devw",
+		.swgroup = TEGRA_SWGROUP_XUSB_DEV,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 13,
+		},
+		.la = {
+			.reg = 0x380,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x4e,
+		.name = "isprab",
+		.swgroup = TEGRA_SWGROUP_ISP2B,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 14,
+		},
+		.la = {
+			.reg = 0x384,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x18,
+		},
+	}, {
+		.id = 0x50,
+		.name = "ispwab",
+		.swgroup = TEGRA_SWGROUP_ISP2B,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 16,
+		},
+		.la = {
+			.reg = 0x388,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x51,
+		.name = "ispwbb",
+		.swgroup = TEGRA_SWGROUP_ISP2B,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 17,
+		},
+		.la = {
+			.reg = 0x388,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x54,
+		.name = "tsecsrd",
+		.swgroup = TEGRA_SWGROUP_TSEC,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 20,
+		},
+		.la = {
+			.reg = 0x390,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x9b,
+		},
+	}, {
+		.id = 0x55,
+		.name = "tsecswr",
+		.swgroup = TEGRA_SWGROUP_TSEC,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 21,
+		},
+		.la = {
+			.reg = 0x390,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x56,
+		.name = "a9avpscr",
+		.swgroup = TEGRA_SWGROUP_A9AVP,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 22,
+		},
+		.la = {
+			.reg = 0x3a4,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x04,
+		},
+	}, {
+		.id = 0x57,
+		.name = "a9avpscw",
+		.swgroup = TEGRA_SWGROUP_A9AVP,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 23,
+		},
+		.la = {
+			.reg = 0x3a4,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x58,
+		.name = "gpusrd",
+		.swgroup = TEGRA_SWGROUP_GPU,
+		.smmu = {
+			/* read-only */
+			.reg = 0x230,
+			.bit = 24,
+		},
+		.la = {
+			.reg = 0x3c8,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x1a,
+		},
+	}, {
+		.id = 0x59,
+		.name = "gpuswr",
+		.swgroup = TEGRA_SWGROUP_GPU,
+		.smmu = {
+			/* read-only */
+			.reg = 0x230,
+			.bit = 25,
+		},
+		.la = {
+			.reg = 0x3c8,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x5a,
+		.name = "displayt",
+		.swgroup = TEGRA_SWGROUP_DC,
+		.smmu = {
+			.reg = 0x230,
+			.bit = 26,
+		},
+		.la = {
+			.reg = 0x2f0,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x50,
+		},
+	}, {
+		.id = 0x60,
+		.name = "sdmmcra",
+		.swgroup = TEGRA_SWGROUP_SDMMC1A,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 0,
+		},
+		.la = {
+			.reg = 0x3b8,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x49,
+		},
+	}, {
+		.id = 0x61,
+		.name = "sdmmcraa",
+		.swgroup = TEGRA_SWGROUP_SDMMC2A,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 1,
+		},
+		.la = {
+			.reg = 0x3bc,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x49,
+		},
+	}, {
+		.id = 0x62,
+		.name = "sdmmcr",
+		.swgroup = TEGRA_SWGROUP_SDMMC3A,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 2,
+		},
+		.la = {
+			.reg = 0x3c0,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x49,
+		},
+	}, {
+		.id = 0x63,
+		.swgroup = TEGRA_SWGROUP_SDMMC4A,
+		.name = "sdmmcrab",
+		.smmu = {
+			.reg = 0x234,
+			.bit = 3,
+		},
+		.la = {
+			.reg = 0x3c4,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x49,
+		},
+	}, {
+		.id = 0x64,
+		.name = "sdmmcwa",
+		.swgroup = TEGRA_SWGROUP_SDMMC1A,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 4,
+		},
+		.la = {
+			.reg = 0x3b8,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x65,
+		.name = "sdmmcwaa",
+		.swgroup = TEGRA_SWGROUP_SDMMC2A,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 5,
+		},
+		.la = {
+			.reg = 0x3bc,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x66,
+		.name = "sdmmcw",
+		.swgroup = TEGRA_SWGROUP_SDMMC3A,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 6,
+		},
+		.la = {
+			.reg = 0x3c0,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x67,
+		.name = "sdmmcwab",
+		.swgroup = TEGRA_SWGROUP_SDMMC4A,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 7,
+		},
+		.la = {
+			.reg = 0x3c4,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x6c,
+		.name = "vicsrd",
+		.swgroup = TEGRA_SWGROUP_VIC,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 12,
+		},
+		.la = {
+			.reg = 0x394,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x1a,
+		},
+	}, {
+		.id = 0x6d,
+		.name = "vicswr",
+		.swgroup = TEGRA_SWGROUP_VIC,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 13,
+		},
+		.la = {
+			.reg = 0x394,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x72,
+		.name = "viw",
+		.swgroup = TEGRA_SWGROUP_VI,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 18,
+		},
+		.la = {
+			.reg = 0x398,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x73,
+		.name = "displayd",
+		.swgroup = TEGRA_SWGROUP_DC,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 19,
+		},
+		.la = {
+			.reg = 0x3c8,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x50,
+		},
+	}, {
+		.id = 0x78,
+		.name = "nvdecsrd",
+		.swgroup = TEGRA_SWGROUP_NVDEC,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 24,
+		},
+		.la = {
+			.reg = 0x3d8,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x23,
+		},
+	}, {
+		.id = 0x79,
+		.name = "nvdecswr",
+		.swgroup = TEGRA_SWGROUP_NVDEC,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 25,
+		},
+		.la = {
+			.reg = 0x3d8,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x7a,
+		.name = "aper",
+		.swgroup = TEGRA_SWGROUP_APE,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 26,
+		},
+		.la = {
+			.reg = 0x3dc,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0xff,
+		},
+	}, {
+		.id = 0x7b,
+		.name = "apew",
+		.swgroup = TEGRA_SWGROUP_APE,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 27,
+		},
+		.la = {
+			.reg = 0x3dc,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x7e,
+		.name = "nvjpgsrd",
+		.swgroup = TEGRA_SWGROUP_NVJPG,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 30,
+		},
+		.la = {
+			.reg = 0x3e4,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x23,
+		},
+	}, {
+		.id = 0x7f,
+		.name = "nvjpgswr",
+		.swgroup = TEGRA_SWGROUP_NVJPG,
+		.smmu = {
+			.reg = 0x234,
+			.bit = 31,
+		},
+		.la = {
+			.reg = 0x3e4,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x80,
+		.name = "sesrd",
+		.swgroup = TEGRA_SWGROUP_SE,
+		.smmu = {
+			.reg = 0xb98,
+			.bit = 0,
+		},
+		.la = {
+			.reg = 0x3e0,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x2e,
+		},
+	}, {
+		.id = 0x81,
+		.name = "seswr",
+		.swgroup = TEGRA_SWGROUP_SE,
+		.smmu = {
+			.reg = 0xb98,
+			.bit = 1,
+		},
+		.la = {
+			.reg = 0xb98,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x82,
+		.name = "axiapr",
+		.swgroup = TEGRA_SWGROUP_AXIAP,
+		.smmu = {
+			.reg = 0xb98,
+			.bit = 2,
+		},
+		.la = {
+			.reg = 0x3a0,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0xff,
+		},
+	}, {
+		.id = 0x83,
+		.name = "axiapw",
+		.swgroup = TEGRA_SWGROUP_AXIAP,
+		.smmu = {
+			.reg = 0xb98,
+			.bit = 3,
+		},
+		.la = {
+			.reg = 0x3a0,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x84,
+		.name = "etrr",
+		.swgroup = TEGRA_SWGROUP_ETR,
+		.smmu = {
+			.reg = 0xb98,
+			.bit = 4,
+		},
+		.la = {
+			.reg = 0x3ec,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0xff,
+		},
+	}, {
+		.id = 0x85,
+		.name = "etrw",
+		.swgroup = TEGRA_SWGROUP_ETR,
+		.smmu = {
+			.reg = 0xb98,
+			.bit = 5,
+		},
+		.la = {
+			.reg = 0x3ec,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0xff,
+		},
+	}, {
+		.id = 0x86,
+		.name = "tsecsrdb",
+		.swgroup = TEGRA_SWGROUP_TSECB,
+		.smmu = {
+			.reg = 0xb98,
+			.bit = 6,
+		},
+		.la = {
+			.reg = 0x3f0,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x9b,
+		},
+	}, {
+		.id = 0x87,
+		.name = "tsecswrb",
+		.swgroup = TEGRA_SWGROUP_TSECB,
+		.smmu = {
+			.reg = 0xb98,
+			.bit = 7,
+		},
+		.la = {
+			.reg = 0x3f0,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	}, {
+		.id = 0x88,
+		.name = "gpusrd2",
+		.swgroup = TEGRA_SWGROUP_GPU,
+		.smmu = {
+			/* read-only */
+			.reg = 0xb98,
+			.bit = 8,
+		},
+		.la = {
+			.reg = 0x3e8,
+			.shift = 0,
+			.mask = 0xff,
+			.def = 0x1a,
+		},
+	}, {
+		.id = 0x89,
+		.name = "gpuswr2",
+		.swgroup = TEGRA_SWGROUP_GPU,
+		.smmu = {
+			/* read-only */
+			.reg = 0xb98,
+			.bit = 9,
+		},
+		.la = {
+			.reg = 0x3e8,
+			.shift = 16,
+			.mask = 0xff,
+			.def = 0x80,
+		},
+	},
+};
+
+static const struct tegra_smmu_swgroup tegra210_swgroups[] = {
+	{ .name = "dc",        .swgroup = TEGRA_SWGROUP_DC,        .reg = 0x240 },
+	{ .name = "dcb",       .swgroup = TEGRA_SWGROUP_DCB,       .reg = 0x244 },
+	{ .name = "afi",       .swgroup = TEGRA_SWGROUP_AFI,       .reg = 0x238 },
+	{ .name = "avpc",      .swgroup = TEGRA_SWGROUP_AVPC,      .reg = 0x23c },
+	{ .name = "hda",       .swgroup = TEGRA_SWGROUP_HDA,       .reg = 0x254 },
+	{ .name = "hc",        .swgroup = TEGRA_SWGROUP_HC,        .reg = 0x250 },
+	{ .name = "nvenc",     .swgroup = TEGRA_SWGROUP_NVENC,     .reg = 0x264 },
+	{ .name = "ppcs",      .swgroup = TEGRA_SWGROUP_PPCS,      .reg = 0x270 },
+	{ .name = "sata",      .swgroup = TEGRA_SWGROUP_SATA,      .reg = 0x274 },
+	{ .name = "isp2",      .swgroup = TEGRA_SWGROUP_ISP2,      .reg = 0x258 },
+	{ .name = "xusb_host", .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 },
+	{ .name = "xusb_dev",  .swgroup = TEGRA_SWGROUP_XUSB_DEV,  .reg = 0x28c },
+	{ .name = "isp2b",     .swgroup = TEGRA_SWGROUP_ISP2B,     .reg = 0xaa4 },
+	{ .name = "tsec",      .swgroup = TEGRA_SWGROUP_TSEC,      .reg = 0x294 },
+	{ .name = "a9avp",     .swgroup = TEGRA_SWGROUP_A9AVP,     .reg = 0x290 },
+	{ .name = "gpu",       .swgroup = TEGRA_SWGROUP_GPU,       .reg = 0xaac },
+	{ .name = "sdmmc1a",   .swgroup = TEGRA_SWGROUP_SDMMC1A,   .reg = 0xa94 },
+	{ .name = "sdmmc2a",   .swgroup = TEGRA_SWGROUP_SDMMC2A,   .reg = 0xa98 },
+	{ .name = "sdmmc3a",   .swgroup = TEGRA_SWGROUP_SDMMC3A,   .reg = 0xa9c },
+	{ .name = "sdmmc4a",   .swgroup = TEGRA_SWGROUP_SDMMC4A,   .reg = 0xaa0 },
+	{ .name = "vic",       .swgroup = TEGRA_SWGROUP_VIC,       .reg = 0x284 },
+	{ .name = "vi",        .swgroup = TEGRA_SWGROUP_VI,        .reg = 0x280 },
+	{ .name = "nvdec",     .swgroup = TEGRA_SWGROUP_NVDEC,     .reg = 0xab4 },
+	{ .name = "ape",       .swgroup = TEGRA_SWGROUP_APE,       .reg = 0xab8 },
+	{ .name = "nvjpg",     .swgroup = TEGRA_SWGROUP_NVJPG,     .reg = 0xac0 },
+	{ .name = "se",        .swgroup = TEGRA_SWGROUP_SE,        .reg = 0xabc },
+	{ .name = "axiap",     .swgroup = TEGRA_SWGROUP_AXIAP,     .reg = 0xacc },
+	{ .name = "etr",       .swgroup = TEGRA_SWGROUP_ETR,       .reg = 0xad0 },
+	{ .name = "tsecb",     .swgroup = TEGRA_SWGROUP_TSECB,     .reg = 0xad4 },
+};
+
+static const struct tegra_smmu_soc tegra210_smmu_soc = {
+	.clients = tegra210_mc_clients,
+	.num_clients = ARRAY_SIZE(tegra210_mc_clients),
+	.swgroups = tegra210_swgroups,
+	.num_swgroups = ARRAY_SIZE(tegra210_swgroups),
+	.supports_round_robin_arbitration = true,
+	.supports_request_limit = true,
+	.num_tlb_lines = 32,
+	.num_asids = 128,
+};
+
+const struct tegra_mc_soc tegra210_mc_soc = {
+	.clients = tegra210_mc_clients,
+	.num_clients = ARRAY_SIZE(tegra210_mc_clients),
+	.num_address_bits = 34,
+	.atom_size = 64,
+	.client_id_mask = 0xff,
+	.smmu = &tegra210_smmu_soc,
+};
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index 1abcd8f..52e16c7 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -966,5 +966,6 @@
 	.num_clients = ARRAY_SIZE(tegra30_mc_clients),
 	.num_address_bits = 32,
 	.atom_size = 16,
+	.client_id_mask = 0x7f,
 	.smmu = &tegra30_smmu_soc,
 };
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 70bb753..fc73937 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1859,6 +1859,15 @@
 	}
 	spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
 
+	/* Basic sanity checks to prevent underflows or integer overflows */
+	if (karg.maxReplyBytes < 0 ||
+	    karg.dataInSize < 0 ||
+	    karg.dataOutSize < 0 ||
+	    karg.dataSgeOffset < 0 ||
+	    karg.maxSenseBytes < 0 ||
+	    karg.dataSgeOffset > ioc->req_sz / 4)
+		return -EINVAL;
+
 	/* Verify that the final request frame will not be too large.
 	 */
 	sz = karg.dataSgeOffset * 4;
diff --git a/drivers/mfd/88pm800.c b/drivers/mfd/88pm800.c
index 841717a..f2d9fb4 100644
--- a/drivers/mfd/88pm800.c
+++ b/drivers/mfd/88pm800.c
@@ -609,7 +609,6 @@
 static struct i2c_driver pm800_driver = {
 	.driver = {
 		.name = "88PM800",
-		.owner = THIS_MODULE,
 		.pm = &pm80x_pm_ops,
 		},
 	.probe = pm800_probe,
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index e9d5064..39f2302 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -267,7 +267,6 @@
 static struct i2c_driver pm805_driver = {
 	.driver = {
 		.name = "88PM805",
-		.owner = THIS_MODULE,
 		.pm = &pm80x_pm_ops,
 		},
 	.probe = pm805_probe,
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index e03b7f4..3269a99 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -558,11 +558,7 @@
 	irq_set_chip_data(virq, d->host_data);
 	irq_set_chip_and_handler(virq, &pm860x_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 	return 0;
 }
 
@@ -1258,7 +1254,6 @@
 static struct i2c_driver pm860x_driver = {
 	.driver	= {
 		.name	= "88PM860x",
-		.owner	= THIS_MODULE,
 		.pm     = &pm860x_pm_ops,
 		.of_match_table	= pm860x_dt_ids,
 	},
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0f0cad8..99d6367 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -97,6 +97,7 @@
 	select MFD_CORE
 	select CHROME_PLATFORMS
 	select CROS_EC_PROTO
+	depends on X86 || ARM || COMPILE_TEST
 	help
 	  If you say Y here you get support for the ChromeOS Embedded
 	  Controller (EC) providing keyboard, battery and power services.
@@ -105,7 +106,7 @@
 
 config MFD_CROS_EC_I2C
 	tristate "ChromeOS Embedded Controller (I2C)"
-	depends on MFD_CROS_EC && CROS_EC_PROTO && I2C
+	depends on MFD_CROS_EC && I2C
 
 	help
 	  If you say Y here, you get support for talking to the ChromeOS
@@ -115,7 +116,7 @@
 
 config MFD_CROS_EC_SPI
 	tristate "ChromeOS Embedded Controller (SPI)"
-	depends on MFD_CROS_EC && CROS_EC_PROTO && SPI
+	depends on MFD_CROS_EC && SPI
 
 	---help---
 	  If you say Y here, you get support for talking to the ChromeOS EC
@@ -186,6 +187,18 @@
 	  This driver can be built as a module. If built as a module it will be
 	  called "da9055"
 
+config MFD_DA9062
+	tristate "Dialog Semiconductor DA9062 PMIC Support"
+	select MFD_CORE
+	select REGMAP_I2C
+	select REGMAP_IRQ
+	depends on I2C=y
+	help
+	  Say yes here for support for the Dialog Semiconductor DA9062 PMIC.
+	  This includes the I2C driver and core APIs.
+	  Additional drivers must be enabled in order to use the functionality
+	  of the device.
+
 config MFD_DA9063
 	bool "Dialog Semiconductor DA9063 PMIC Support"
 	select MFD_CORE
@@ -329,6 +342,29 @@
 	  thermal, charger and related power management functions
 	  on these systems.
 
+config MFD_INTEL_LPSS
+	tristate
+	select COMMON_CLK
+	select MFD_CORE
+
+config MFD_INTEL_LPSS_ACPI
+	tristate "Intel Low Power Subsystem support in ACPI mode"
+	select MFD_INTEL_LPSS
+	depends on X86 && ACPI
+	help
+	  This driver supports Intel Low Power Subsystem (LPSS) devices such as
+	  I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake
+	  PCH) in ACPI mode.
+
+config MFD_INTEL_LPSS_PCI
+	tristate "Intel Low Power Subsystem support in PCI mode"
+	select MFD_INTEL_LPSS
+	depends on X86 && PCI
+	help
+	  This driver supports Intel Low Power Subsystem (LPSS) devices such as
+	  I2C, SPI and HS-UART starting from Intel Sunrisepoint (Intel Skylake
+	  PCH) in PCI mode.
+
 config MFD_INTEL_MSIC
 	bool "Intel MSIC"
 	depends on INTEL_SCU_IPC
@@ -376,12 +412,14 @@
 	  device may provide functions like watchdog, GPIO, UART and I2C bus.
 
 	  The following modules are supported:
+		* COMe-bBL6
 		* COMe-bHL6
 		* COMe-bIP#
 		* COMe-bPC2 (ETXexpress-PC)
 		* COMe-bSC# (ETXexpress-SC T#)
 		* COMe-cBL6
 		* COMe-cBT6
+		* COMe-cBW6
 		* COMe-cCT6
 		* COMe-cDC2 (microETXexpress-DC)
 		* COMe-cHL6
@@ -1357,6 +1395,12 @@
 	help
 	  Support for Wolfson Microelectronics WM8997 low power audio SoC
 
+config MFD_WM8998
+	bool "Wolfson Microelectronics WM8998"
+	depends on MFD_ARIZONA
+	help
+	  Support for Wolfson Microelectronics WM8998 low power audio SoC
+
 config MFD_WM8400
 	bool "Wolfson Microelectronics WM8400"
 	select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index ea40e07..a59e3fc 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -48,6 +48,9 @@
 ifeq ($(CONFIG_MFD_WM8997),y)
 obj-$(CONFIG_MFD_ARIZONA)	+= wm8997-tables.o
 endif
+ifeq ($(CONFIG_MFD_WM8998),y)
+obj-$(CONFIG_MFD_ARIZONA)	+= wm8998-tables.o
+endif
 obj-$(CONFIG_MFD_WM8400)	+= wm8400-core.o
 wm831x-objs			:= wm831x-core.o wm831x-irq.o wm831x-otp.o
 wm831x-objs			+= wm831x-auxadc.o
@@ -110,10 +113,11 @@
 
 da9055-objs			:= da9055-core.o da9055-i2c.o
 obj-$(CONFIG_MFD_DA9055)	+= da9055.o
-
+obj-$(CONFIG_MFD_DA9062)	+= da9062-core.o
 da9063-objs			:= da9063-core.o da9063-irq.o da9063-i2c.o
 obj-$(CONFIG_MFD_DA9063)	+= da9063.o
 obj-$(CONFIG_MFD_DA9150)	+= da9150-core.o
+
 obj-$(CONFIG_MFD_MAX14577)	+= max14577.o
 obj-$(CONFIG_MFD_MAX77686)	+= max77686.o
 obj-$(CONFIG_MFD_MAX77693)	+= max77693.o
@@ -161,6 +165,9 @@
 obj-$(CONFIG_MFD_TPS65090)	+= tps65090.o
 obj-$(CONFIG_MFD_AAT2870_CORE)	+= aat2870-core.o
 obj-$(CONFIG_MFD_ATMEL_HLCDC)	+= atmel-hlcdc.o
+obj-$(CONFIG_MFD_INTEL_LPSS)	+= intel-lpss.o
+obj-$(CONFIG_MFD_INTEL_LPSS_PCI)	+= intel-lpss-pci.o
+obj-$(CONFIG_MFD_INTEL_LPSS_ACPI)	+= intel-lpss-acpi.o
 obj-$(CONFIG_MFD_INTEL_MSIC)	+= intel_msic.o
 obj-$(CONFIG_MFD_PALMAS)	+= palmas.o
 obj-$(CONFIG_MFD_VIPERBOARD)    += viperboard.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 4e6e03d..29b6a2d 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -500,7 +500,6 @@
 static struct i2c_driver aat2870_i2c_driver = {
 	.driver = {
 		.name	= "aat2870",
-		.owner	= THIS_MODULE,
 		.pm	= &aat2870_pm_ops,
 	},
 	.probe		= aat2870_i2c_probe,
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 4659ac1..f0afb44 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -972,7 +972,6 @@
 static struct i2c_driver ab3100_driver = {
 	.driver = {
 		.name	= "ab3100",
-		.owner	= THIS_MODULE,
 	},
 	.id_table	= ab3100_id,
 	.probe		= ab3100_probe,
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 000da72..fefbe4c 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -565,11 +565,7 @@
 	irq_set_chip_and_handler(virq, &ab8500_irq_chip,
 				handle_simple_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index f495b8b..ae88654 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -351,7 +351,6 @@
 static struct i2c_driver adp5520_driver = {
 	.driver = {
 		.name	= "adp5520",
-		.owner	= THIS_MODULE,
 		.pm	= &adp5520_pm,
 	},
 	.probe		= adp5520_probe,
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index a72ddb29..44cfdbb 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -30,7 +30,7 @@
 
 #include "arizona.h"
 
-static const char *wm5102_core_supplies[] = {
+static const char * const wm5102_core_supplies[] = {
 	"AVDD",
 	"DBVDD1",
 };
@@ -146,17 +146,31 @@
 static irqreturn_t arizona_overclocked(int irq, void *data)
 {
 	struct arizona *arizona = data;
-	unsigned int val[2];
+	unsigned int val[3];
 	int ret;
-	
+
 	ret = regmap_bulk_read(arizona->regmap, ARIZONA_INTERRUPT_RAW_STATUS_6,
-			       &val[0], 2);
+			       &val[0], 3);
 	if (ret != 0) {
 		dev_err(arizona->dev, "Failed to read overclock status: %d\n",
 			ret);
 		return IRQ_NONE;
 	}
 
+	switch (arizona->type) {
+	case WM8998:
+	case WM1814:
+		/* Some bits are shifted on WM8998,
+		 * rearrange to match the standard bit layout
+		 */
+		val[0] = ((val[0] & 0x60e0) >> 1) |
+			 ((val[0] & 0x1e00) >> 2) |
+			 (val[0] & 0x000f);
+		break;
+	default:
+		break;
+	}
+
 	if (val[0] & ARIZONA_PWM_OVERCLOCKED_STS)
 		dev_err(arizona->dev, "PWM overclocked\n");
 	if (val[0] & ARIZONA_FX_CORE_OVERCLOCKED_STS)
@@ -201,6 +215,9 @@
 	if (val[1] & ARIZONA_ISRC1_OVERCLOCKED_STS)
 		dev_err(arizona->dev, "ISRC1 overclocked\n");
 
+	if (val[2] & ARIZONA_SPDIF_OVERCLOCKED_STS)
+		dev_err(arizona->dev, "SPDIF overclocked\n");
+
 	return IRQ_HANDLED;
 }
 
@@ -392,7 +409,7 @@
  * Register patch to some of the CODECs internal write sequences
  * to ensure a clean exit from the low power sleep state.
  */
-static const struct reg_default wm5110_sleep_patch[] = {
+static const struct reg_sequence wm5110_sleep_patch[] = {
 	{ 0x337A, 0xC100 },
 	{ 0x337B, 0x0041 },
 	{ 0x3300, 0xA210 },
@@ -550,9 +567,8 @@
 		break;
 	default:
 		ret = arizona_wait_for_boot(arizona);
-		if (ret != 0) {
+		if (ret != 0)
 			goto err;
-		}
 
 		if (arizona->external_dcvdd) {
 			ret = regmap_update_bits(arizona->regmap,
@@ -759,8 +775,8 @@
 
 	ret = of_property_read_u32_array(arizona->dev->of_node,
 					 "wlf,gpio-defaults",
-					 arizona->pdata.gpio_defaults,
-					 ARRAY_SIZE(arizona->pdata.gpio_defaults));
+					 pdata->gpio_defaults,
+					 ARRAY_SIZE(pdata->gpio_defaults));
 	if (ret >= 0) {
 		/*
 		 * All values are literal except out of range values
@@ -768,11 +784,11 @@
 		 * data which uses 0 as chip default and out of range
 		 * as zero.
 		 */
-		for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
-			if (arizona->pdata.gpio_defaults[i] > 0xffff)
-				arizona->pdata.gpio_defaults[i] = 0;
-			else if (arizona->pdata.gpio_defaults[i] == 0)
-				arizona->pdata.gpio_defaults[i] = 0x10000;
+		for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
+			if (pdata->gpio_defaults[i] > 0xffff)
+				pdata->gpio_defaults[i] = 0;
+			else if (pdata->gpio_defaults[i] == 0)
+				pdata->gpio_defaults[i] = 0x10000;
 		}
 	} else {
 		dev_err(arizona->dev, "Failed to parse GPIO defaults: %d\n",
@@ -781,20 +797,20 @@
 
 	of_property_for_each_u32(arizona->dev->of_node, "wlf,inmode", prop,
 				 cur, val) {
-		if (count == ARRAY_SIZE(arizona->pdata.inmode))
+		if (count == ARRAY_SIZE(pdata->inmode))
 			break;
 
-		arizona->pdata.inmode[count] = val;
+		pdata->inmode[count] = val;
 		count++;
 	}
 
 	count = 0;
 	of_property_for_each_u32(arizona->dev->of_node, "wlf,dmic-ref", prop,
 				 cur, val) {
-		if (count == ARRAY_SIZE(arizona->pdata.dmic_ref))
+		if (count == ARRAY_SIZE(pdata->dmic_ref))
 			break;
 
-		arizona->pdata.dmic_ref[count] = val;
+		pdata->dmic_ref[count] = val;
 		count++;
 	}
 
@@ -806,6 +822,8 @@
 	{ .compatible = "wlf,wm5110", .data = (void *)WM5110 },
 	{ .compatible = "wlf,wm8280", .data = (void *)WM8280 },
 	{ .compatible = "wlf,wm8997", .data = (void *)WM8997 },
+	{ .compatible = "wlf,wm8998", .data = (void *)WM8998 },
+	{ .compatible = "wlf,wm1814", .data = (void *)WM1814 },
 	{},
 };
 EXPORT_SYMBOL_GPL(arizona_of_match);
@@ -820,7 +838,7 @@
 	{ .name = "arizona-ldo1" },
 };
 
-static const char *wm5102_supplies[] = {
+static const char * const wm5102_supplies[] = {
 	"MICVDD",
 	"DBVDD2",
 	"DBVDD3",
@@ -863,7 +881,7 @@
 	},
 };
 
-static const char *wm8997_supplies[] = {
+static const char * const wm8997_supplies[] = {
 	"MICVDD",
 	"DBVDD2",
 	"CPVDD",
@@ -887,11 +905,28 @@
 	},
 };
 
+static const struct mfd_cell wm8998_devs[] = {
+	{
+		.name = "arizona-extcon",
+		.parent_supplies = wm5102_supplies,
+		.num_parent_supplies = 1, /* We only need MICVDD */
+	},
+	{ .name = "arizona-gpio" },
+	{ .name = "arizona-haptics" },
+	{ .name = "arizona-pwm" },
+	{
+		.name = "wm8998-codec",
+		.parent_supplies = wm5102_supplies,
+		.num_parent_supplies = ARRAY_SIZE(wm5102_supplies),
+	},
+	{ .name = "arizona-micsupp" },
+};
+
 int arizona_dev_init(struct arizona *arizona)
 {
 	struct device *dev = arizona->dev;
 	const char *type_name;
-	unsigned int reg, val;
+	unsigned int reg, val, mask;
 	int (*apply_patch)(struct arizona *) = NULL;
 	int ret, i;
 
@@ -911,6 +946,8 @@
 	case WM5110:
 	case WM8280:
 	case WM8997:
+	case WM8998:
+	case WM1814:
 		for (i = 0; i < ARRAY_SIZE(wm5102_core_supplies); i++)
 			arizona->core_supplies[i].supply
 				= wm5102_core_supplies[i];
@@ -992,6 +1029,7 @@
 	switch (reg) {
 	case 0x5102:
 	case 0x5110:
+	case 0x6349:
 	case 0x8997:
 		break;
 	default:
@@ -1093,6 +1131,27 @@
 		apply_patch = wm8997_patch;
 		break;
 #endif
+#ifdef CONFIG_MFD_WM8998
+	case 0x6349:
+		switch (arizona->type) {
+		case WM8998:
+			type_name = "WM8998";
+			break;
+
+		case WM1814:
+			type_name = "WM1814";
+			break;
+
+		default:
+			type_name = "WM8998";
+			dev_err(arizona->dev, "WM8998 registered as %d\n",
+				arizona->type);
+			arizona->type = WM8998;
+		}
+
+		apply_patch = wm8998_patch;
+		break;
+#endif
 	default:
 		dev_err(arizona->dev, "Unknown device ID %x\n", reg);
 		goto err_reset;
@@ -1204,14 +1263,38 @@
 			<< ARIZONA_IN1_DMIC_SUP_SHIFT;
 		if (arizona->pdata.inmode[i] & ARIZONA_INMODE_DMIC)
 			val |= 1 << ARIZONA_IN1_MODE_SHIFT;
-		if (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
-			val |= 1 << ARIZONA_IN1_SINGLE_ENDED_SHIFT;
+
+		switch (arizona->type) {
+		case WM8998:
+		case WM1814:
+			regmap_update_bits(arizona->regmap,
+				ARIZONA_ADC_DIGITAL_VOLUME_1L + (i * 8),
+				ARIZONA_IN1L_SRC_SE_MASK,
+				(arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
+					<< ARIZONA_IN1L_SRC_SE_SHIFT);
+
+			regmap_update_bits(arizona->regmap,
+				ARIZONA_ADC_DIGITAL_VOLUME_1R + (i * 8),
+				ARIZONA_IN1R_SRC_SE_MASK,
+				(arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
+					<< ARIZONA_IN1R_SRC_SE_SHIFT);
+
+			mask = ARIZONA_IN1_DMIC_SUP_MASK |
+				ARIZONA_IN1_MODE_MASK;
+			break;
+		default:
+			if (arizona->pdata.inmode[i] & ARIZONA_INMODE_SE)
+				val |= 1 << ARIZONA_IN1_SINGLE_ENDED_SHIFT;
+
+			mask = ARIZONA_IN1_DMIC_SUP_MASK |
+				ARIZONA_IN1_MODE_MASK |
+				ARIZONA_IN1_SINGLE_ENDED_MASK;
+			break;
+		}
 
 		regmap_update_bits(arizona->regmap,
 				   ARIZONA_IN1L_CONTROL + (i * 8),
-				   ARIZONA_IN1_DMIC_SUP_MASK |
-				   ARIZONA_IN1_MODE_MASK |
-				   ARIZONA_IN1_SINGLE_ENDED_MASK, val);
+				   mask, val);
 	}
 
 	for (i = 0; i < ARIZONA_MAX_OUTPUT; i++) {
@@ -1273,6 +1356,11 @@
 		ret = mfd_add_devices(arizona->dev, -1, wm8997_devs,
 				      ARRAY_SIZE(wm8997_devs), NULL, 0, NULL);
 		break;
+	case WM8998:
+	case WM1814:
+		ret = mfd_add_devices(arizona->dev, -1, wm8998_devs,
+				      ARRAY_SIZE(wm8998_devs), NULL, 0, NULL);
+		break;
 	}
 
 	if (ret != 0) {
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index ff782a5..cea1b40 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -53,6 +53,12 @@
 		regmap_config = &wm8997_i2c_regmap;
 		break;
 #endif
+#ifdef CONFIG_MFD_WM8998
+	case WM8998:
+	case WM1814:
+		regmap_config = &wm8998_i2c_regmap;
+		break;
+#endif
 	default:
 		dev_err(&i2c->dev, "Unknown device type %ld\n",
 			id->driver_data);
@@ -90,6 +96,8 @@
 	{ "wm5110", WM5110 },
 	{ "wm8280", WM8280 },
 	{ "wm8997", WM8997 },
+	{ "wm8998", WM8998 },
+	{ "wm1814", WM1814 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, arizona_i2c_id);
@@ -97,7 +105,6 @@
 static struct i2c_driver arizona_i2c_driver = {
 	.driver = {
 		.name	= "arizona",
-		.owner	= THIS_MODULE,
 		.pm	= &arizona_pm_ops,
 		.of_match_table	= of_match_ptr(arizona_of_match),
 	},
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 2b9965d5..2cac4f4 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -174,14 +174,7 @@
 	irq_set_chip_data(virq, data);
 	irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq);
 	irq_set_nested_thread(virq, 1);
-
-	/* ARM needs us to explicitly flag the IRQ as valid
-	 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
@@ -234,6 +227,15 @@
 		arizona->ctrlif_error = false;
 		break;
 #endif
+#ifdef CONFIG_MFD_WM8998
+	case WM8998:
+	case WM1814:
+		aod = &wm8998_aod;
+		irq = &wm8998_irq;
+
+		arizona->ctrlif_error = false;
+		break;
+#endif
 	default:
 		BUG_ON("Unknown Arizona class device" == NULL);
 		return -EINVAL;
diff --git a/drivers/mfd/arizona.h b/drivers/mfd/arizona.h
index fbe2843..3af12e9 100644
--- a/drivers/mfd/arizona.h
+++ b/drivers/mfd/arizona.h
@@ -27,6 +27,8 @@
 
 extern const struct regmap_config wm8997_i2c_regmap;
 
+extern const struct regmap_config wm8998_i2c_regmap;
+
 extern const struct dev_pm_ops arizona_pm_ops;
 
 extern const struct of_device_id arizona_of_match[];
@@ -41,6 +43,9 @@
 extern const struct regmap_irq_chip wm8997_aod;
 extern const struct regmap_irq_chip wm8997_irq;
 
+extern struct regmap_irq_chip wm8998_aod;
+extern struct regmap_irq_chip wm8998_irq;
+
 int arizona_dev_init(struct arizona *arizona);
 int arizona_dev_exit(struct arizona *arizona);
 int arizona_irq_init(struct arizona *arizona);
diff --git a/drivers/mfd/as3711.c b/drivers/mfd/as3711.c
index d9706ed..d001f7e2 100644
--- a/drivers/mfd/as3711.c
+++ b/drivers/mfd/as3711.c
@@ -211,7 +211,6 @@
 static struct i2c_driver as3711_i2c_driver = {
 	.driver = {
 		   .name = "as3711",
-		   .owner = THIS_MODULE,
 		   .of_match_table = of_match_ptr(as3711_of_match),
 	},
 	.probe = as3711_i2c_probe,
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index 39fa554..924ea90 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -437,7 +437,6 @@
 static struct i2c_driver as3722_i2c_driver = {
 	.driver = {
 		.name = "as3722",
-		.owner = THIS_MODULE,
 		.of_match_table = as3722_of_match,
 	},
 	.probe = as3722_i2c_probe,
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 120df5c..4b54128 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -411,7 +411,7 @@
 
 		irq_set_chip_data(irq, asic);
 		irq_set_handler(irq, handle_level_irq);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	asic3_write_register(asic, ASIC3_OFFSET(INTR, INT_MASK),
@@ -431,7 +431,7 @@
 	irq_base = asic->irq_base;
 
 	for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
-		set_irq_flags(irq, 0);
+		irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		irq_set_chip_and_handler(irq, NULL, NULL);
 		irq_set_chip_data(irq, NULL);
 	}
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index cfd58f4..3fff6b5 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -18,6 +18,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/iopoll.h>
 #include <linux/mfd/atmel-hlcdc.h>
 #include <linux/mfd/core.h>
 #include <linux/module.h>
@@ -26,6 +27,10 @@
 
 #define ATMEL_HLCDC_REG_MAX		(0x4000 - 0x4)
 
+struct atmel_hlcdc_regmap {
+	void __iomem *regs;
+};
+
 static const struct mfd_cell atmel_hlcdc_cells[] = {
 	{
 		.name = "atmel-hlcdc-pwm",
@@ -37,28 +42,62 @@
 	},
 };
 
+static int regmap_atmel_hlcdc_reg_write(void *context, unsigned int reg,
+					unsigned int val)
+{
+	struct atmel_hlcdc_regmap *hregmap = context;
+
+	if (reg <= ATMEL_HLCDC_DIS) {
+		u32 status;
+
+		readl_poll_timeout(hregmap->regs + ATMEL_HLCDC_SR, status,
+				   !(status & ATMEL_HLCDC_SIP), 1, 100);
+	}
+
+	writel(val, hregmap->regs + reg);
+
+	return 0;
+}
+
+static int regmap_atmel_hlcdc_reg_read(void *context, unsigned int reg,
+				       unsigned int *val)
+{
+	struct atmel_hlcdc_regmap *hregmap = context;
+
+	*val = readl(hregmap->regs + reg);
+
+	return 0;
+}
+
 static const struct regmap_config atmel_hlcdc_regmap_config = {
 	.reg_bits = 32,
 	.val_bits = 32,
 	.reg_stride = 4,
 	.max_register = ATMEL_HLCDC_REG_MAX,
+	.reg_write = regmap_atmel_hlcdc_reg_write,
+	.reg_read = regmap_atmel_hlcdc_reg_read,
+	.fast_io = true,
 };
 
 static int atmel_hlcdc_probe(struct platform_device *pdev)
 {
+	struct atmel_hlcdc_regmap *hregmap;
 	struct device *dev = &pdev->dev;
 	struct atmel_hlcdc *hlcdc;
 	struct resource *res;
-	void __iomem *regs;
+
+	hregmap = devm_kzalloc(dev, sizeof(*hregmap), GFP_KERNEL);
+	if (!hregmap)
+		return -ENOMEM;
 
 	hlcdc = devm_kzalloc(dev, sizeof(*hlcdc), GFP_KERNEL);
 	if (!hlcdc)
 		return -ENOMEM;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	regs = devm_ioremap_resource(dev, res);
-	if (IS_ERR(regs))
-		return PTR_ERR(regs);
+	hregmap->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(hregmap->regs))
+		return PTR_ERR(hregmap->regs);
 
 	hlcdc->irq = platform_get_irq(pdev, 0);
 	if (hlcdc->irq < 0)
@@ -82,8 +121,8 @@
 		return PTR_ERR(hlcdc->slow_clk);
 	}
 
-	hlcdc->regmap = devm_regmap_init_mmio(dev, regs,
-					      &atmel_hlcdc_regmap_config);
+	hlcdc->regmap = devm_regmap_init(dev, NULL, hregmap,
+					 &atmel_hlcdc_regmap_config);
 	if (IS_ERR(hlcdc->regmap))
 		return PTR_ERR(hlcdc->regmap);
 
@@ -102,7 +141,11 @@
 }
 
 static const struct of_device_id atmel_hlcdc_match[] = {
+	{ .compatible = "atmel,at91sam9n12-hlcdc" },
+	{ .compatible = "atmel,at91sam9x5-hlcdc" },
+	{ .compatible = "atmel,sama5d2-hlcdc" },
 	{ .compatible = "atmel,sama5d3-hlcdc" },
+	{ .compatible = "atmel,sama5d4-hlcdc" },
 	{ /* sentinel */ },
 };
 
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 6df9155..3f576b7 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -30,19 +30,47 @@
 #define AXP20X_OFF	0x80
 
 static const char * const axp20x_model_names[] = {
+	"AXP152",
 	"AXP202",
 	"AXP209",
 	"AXP221",
 	"AXP288",
 };
 
+static const struct regmap_range axp152_writeable_ranges[] = {
+	regmap_reg_range(AXP152_LDO3456_DC1234_CTRL, AXP152_IRQ3_STATE),
+	regmap_reg_range(AXP152_DCDC_MODE, AXP152_PWM1_DUTY_CYCLE),
+};
+
+static const struct regmap_range axp152_volatile_ranges[] = {
+	regmap_reg_range(AXP152_PWR_OP_MODE, AXP152_PWR_OP_MODE),
+	regmap_reg_range(AXP152_IRQ1_EN, AXP152_IRQ3_STATE),
+	regmap_reg_range(AXP152_GPIO_INPUT, AXP152_GPIO_INPUT),
+};
+
+static const struct regmap_access_table axp152_writeable_table = {
+	.yes_ranges	= axp152_writeable_ranges,
+	.n_yes_ranges	= ARRAY_SIZE(axp152_writeable_ranges),
+};
+
+static const struct regmap_access_table axp152_volatile_table = {
+	.yes_ranges	= axp152_volatile_ranges,
+	.n_yes_ranges	= ARRAY_SIZE(axp152_volatile_ranges),
+};
+
 static const struct regmap_range axp20x_writeable_ranges[] = {
 	regmap_reg_range(AXP20X_DATACACHE(0), AXP20X_IRQ5_STATE),
 	regmap_reg_range(AXP20X_DCDC_MODE, AXP20X_FG_RES),
+	regmap_reg_range(AXP20X_RDC_H, AXP20X_OCV(AXP20X_OCV_MAX)),
 };
 
 static const struct regmap_range axp20x_volatile_ranges[] = {
+	regmap_reg_range(AXP20X_PWR_INPUT_STATUS, AXP20X_USB_OTG_STATUS),
+	regmap_reg_range(AXP20X_CHRG_CTRL1, AXP20X_CHRG_CTRL2),
 	regmap_reg_range(AXP20X_IRQ1_EN, AXP20X_IRQ5_STATE),
+	regmap_reg_range(AXP20X_ACIN_V_ADC_H, AXP20X_IPSOUT_V_HIGH_L),
+	regmap_reg_range(AXP20X_GPIO20_SS, AXP20X_GPIO3_CTRL),
+	regmap_reg_range(AXP20X_FG_RES, AXP20X_RDC_L),
 };
 
 static const struct regmap_access_table axp20x_writeable_table = {
@@ -93,6 +121,11 @@
 	.n_yes_ranges	= ARRAY_SIZE(axp288_volatile_ranges),
 };
 
+static struct resource axp152_pek_resources[] = {
+	DEFINE_RES_IRQ_NAMED(AXP152_IRQ_PEK_RIS_EDGE, "PEK_DBR"),
+	DEFINE_RES_IRQ_NAMED(AXP152_IRQ_PEK_FAL_EDGE, "PEK_DBF"),
+};
+
 static struct resource axp20x_pek_resources[] = {
 	{
 		.name	= "PEK_DBR",
@@ -107,6 +140,13 @@
 	},
 };
 
+static struct resource axp20x_usb_power_supply_resources[] = {
+	DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_PLUGIN, "VBUS_PLUGIN"),
+	DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_REMOVAL, "VBUS_REMOVAL"),
+	DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_VALID, "VBUS_VALID"),
+	DEFINE_RES_IRQ_NAMED(AXP20X_IRQ_VBUS_NOT_VALID, "VBUS_NOT_VALID"),
+};
+
 static struct resource axp22x_pek_resources[] = {
 	{
 		.name   = "PEK_DBR",
@@ -154,12 +194,21 @@
 	},
 };
 
+static const struct regmap_config axp152_regmap_config = {
+	.reg_bits	= 8,
+	.val_bits	= 8,
+	.wr_table	= &axp152_writeable_table,
+	.volatile_table	= &axp152_volatile_table,
+	.max_register	= AXP152_PWM1_DUTY_CYCLE,
+	.cache_type	= REGCACHE_RBTREE,
+};
+
 static const struct regmap_config axp20x_regmap_config = {
 	.reg_bits	= 8,
 	.val_bits	= 8,
 	.wr_table	= &axp20x_writeable_table,
 	.volatile_table	= &axp20x_volatile_table,
-	.max_register	= AXP20X_FG_RES,
+	.max_register	= AXP20X_OCV(AXP20X_OCV_MAX),
 	.cache_type	= REGCACHE_RBTREE,
 };
 
@@ -184,6 +233,26 @@
 #define INIT_REGMAP_IRQ(_variant, _irq, _off, _mask)			\
 	[_variant##_IRQ_##_irq] = { .reg_offset = (_off), .mask = BIT(_mask) }
 
+static const struct regmap_irq axp152_regmap_irqs[] = {
+	INIT_REGMAP_IRQ(AXP152, LDO0IN_CONNECT,		0, 6),
+	INIT_REGMAP_IRQ(AXP152, LDO0IN_REMOVAL,		0, 5),
+	INIT_REGMAP_IRQ(AXP152, ALDO0IN_CONNECT,	0, 3),
+	INIT_REGMAP_IRQ(AXP152, ALDO0IN_REMOVAL,	0, 2),
+	INIT_REGMAP_IRQ(AXP152, DCDC1_V_LOW,		1, 5),
+	INIT_REGMAP_IRQ(AXP152, DCDC2_V_LOW,		1, 4),
+	INIT_REGMAP_IRQ(AXP152, DCDC3_V_LOW,		1, 3),
+	INIT_REGMAP_IRQ(AXP152, DCDC4_V_LOW,		1, 2),
+	INIT_REGMAP_IRQ(AXP152, PEK_SHORT,		1, 1),
+	INIT_REGMAP_IRQ(AXP152, PEK_LONG,		1, 0),
+	INIT_REGMAP_IRQ(AXP152, TIMER,			2, 7),
+	INIT_REGMAP_IRQ(AXP152, PEK_RIS_EDGE,		2, 6),
+	INIT_REGMAP_IRQ(AXP152, PEK_FAL_EDGE,		2, 5),
+	INIT_REGMAP_IRQ(AXP152, GPIO3_INPUT,		2, 3),
+	INIT_REGMAP_IRQ(AXP152, GPIO2_INPUT,		2, 2),
+	INIT_REGMAP_IRQ(AXP152, GPIO1_INPUT,		2, 1),
+	INIT_REGMAP_IRQ(AXP152, GPIO0_INPUT,		2, 0),
+};
+
 static const struct regmap_irq axp20x_regmap_irqs[] = {
 	INIT_REGMAP_IRQ(AXP20X, ACIN_OVER_V,		0, 7),
 	INIT_REGMAP_IRQ(AXP20X, ACIN_PLUGIN,		0, 6),
@@ -293,6 +362,7 @@
 };
 
 static const struct of_device_id axp20x_of_match[] = {
+	{ .compatible = "x-powers,axp152", .data = (void *) AXP152_ID },
 	{ .compatible = "x-powers,axp202", .data = (void *) AXP202_ID },
 	{ .compatible = "x-powers,axp209", .data = (void *) AXP209_ID },
 	{ .compatible = "x-powers,axp221", .data = (void *) AXP221_ID },
@@ -317,6 +387,18 @@
 };
 MODULE_DEVICE_TABLE(acpi, axp20x_acpi_match);
 
+static const struct regmap_irq_chip axp152_regmap_irq_chip = {
+	.name			= "axp152_irq_chip",
+	.status_base		= AXP152_IRQ1_STATE,
+	.ack_base		= AXP152_IRQ1_STATE,
+	.mask_base		= AXP152_IRQ1_EN,
+	.mask_invert		= true,
+	.init_ack_masked	= true,
+	.irqs			= axp152_regmap_irqs,
+	.num_irqs		= ARRAY_SIZE(axp152_regmap_irqs),
+	.num_regs		= 3,
+};
+
 static const struct regmap_irq_chip axp20x_regmap_irq_chip = {
 	.name			= "axp20x_irq_chip",
 	.status_base		= AXP20X_IRQ1_STATE,
@@ -357,11 +439,16 @@
 
 static struct mfd_cell axp20x_cells[] = {
 	{
-		.name			= "axp20x-pek",
-		.num_resources		= ARRAY_SIZE(axp20x_pek_resources),
-		.resources		= axp20x_pek_resources,
+		.name		= "axp20x-pek",
+		.num_resources	= ARRAY_SIZE(axp20x_pek_resources),
+		.resources	= axp20x_pek_resources,
 	}, {
-		.name			= "axp20x-regulator",
+		.name		= "axp20x-regulator",
+	}, {
+		.name		= "axp20x-usb-power-supply",
+		.of_compatible	= "x-powers,axp202-usb-power-supply",
+		.num_resources	= ARRAY_SIZE(axp20x_usb_power_supply_resources),
+		.resources	= axp20x_usb_power_supply_resources,
 	},
 };
 
@@ -375,6 +462,14 @@
 	},
 };
 
+static struct mfd_cell axp152_cells[] = {
+	{
+		.name			= "axp20x-pek",
+		.num_resources		= ARRAY_SIZE(axp152_pek_resources),
+		.resources		= axp152_pek_resources,
+	},
+};
+
 static struct resource axp288_adc_resources[] = {
 	{
 		.name  = "GPADC",
@@ -513,6 +608,12 @@
 	}
 
 	switch (axp20x->variant) {
+	case AXP152_ID:
+		axp20x->nr_cells = ARRAY_SIZE(axp152_cells);
+		axp20x->cells = axp152_cells;
+		axp20x->regmap_cfg = &axp152_regmap_config;
+		axp20x->regmap_irq_chip = &axp152_regmap_irq_chip;
+		break;
 	case AXP202_ID:
 	case AXP209_ID:
 		axp20x->nr_cells = ARRAY_SIZE(axp20x_cells);
@@ -613,7 +714,6 @@
 static struct i2c_driver axp20x_i2c_driver = {
 	.driver = {
 		.name	= "axp20x",
-		.owner	= THIS_MODULE,
 		.of_match_table	= of_match_ptr(axp20x_of_match),
 		.acpi_match_table = ACPI_PTR(axp20x_acpi_match),
 	},
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index e334de0..da2af5b 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -117,7 +117,6 @@
 static struct i2c_driver bcm590xx_i2c_driver = {
 	.driver = {
 		   .name = "bcm590xx",
-		   .owner = THIS_MODULE,
 		   .of_match_table = of_match_ptr(bcm590xx_of_match),
 	},
 	.probe = bcm590xx_i2c_probe,
diff --git a/drivers/mfd/cros_ec_i2c.c b/drivers/mfd/cros_ec_i2c.c
index b9a0963..d06e4b4 100644
--- a/drivers/mfd/cros_ec_i2c.c
+++ b/drivers/mfd/cros_ec_i2c.c
@@ -353,7 +353,6 @@
 static struct i2c_driver cros_ec_driver = {
 	.driver	= {
 		.name	= "cros-ec-i2c",
-		.owner	= THIS_MODULE,
 		.pm	= &cros_ec_i2c_pm_ops,
 	},
 	.probe		= cros_ec_i2c_probe,
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 16f228d..30a296b 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -701,6 +701,12 @@
 static SIMPLE_DEV_PM_OPS(cros_ec_spi_pm_ops, cros_ec_spi_suspend,
 			 cros_ec_spi_resume);
 
+static const struct of_device_id cros_ec_spi_of_match[] = {
+	{ .compatible = "google,cros-ec-spi", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cros_ec_spi_of_match);
+
 static const struct spi_device_id cros_ec_spi_id[] = {
 	{ "cros-ec-spi", 0 },
 	{ }
@@ -710,6 +716,7 @@
 static struct spi_driver cros_ec_driver_spi = {
 	.driver	= {
 		.name	= "cros-ec-spi",
+		.of_match_table = of_match_ptr(cros_ec_spi_of_match),
 		.owner	= THIS_MODULE,
 		.pm	= &cros_ec_spi_pm_ops,
 	},
diff --git a/drivers/mfd/da903x.c b/drivers/mfd/da903x.c
index e0a2e0e..ef7fe2a 100644
--- a/drivers/mfd/da903x.c
+++ b/drivers/mfd/da903x.c
@@ -550,7 +550,6 @@
 static struct i2c_driver da903x_driver = {
 	.driver	= {
 		.name	= "da903x",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= da903x_probe,
 	.remove		= da903x_remove,
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index ec39287..0288700 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -195,7 +195,6 @@
 	.id_table = da9052_i2c_id,
 	.driver = {
 		.name = "da9052",
-		.owner = THIS_MODULE,
 #ifdef CONFIG_OF
 		.of_match_table = dialog_dt_ids,
 #endif
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index d4d4c16..b53e100 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -79,7 +79,6 @@
 	.id_table = da9055_i2c_id,
 	.driver = {
 		.name = "da9055-pmic",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(da9055_of_match),
 	},
 };
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
new file mode 100644
index 0000000..f80d947
--- /dev/null
+++ b/drivers/mfd/da9062-core.c
@@ -0,0 +1,533 @@
+/*
+ * Core, IRQ and I2C device driver for DA9062 PMIC
+ * Copyright (C) 2015  Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/irq.h>
+#include <linux/mfd/core.h>
+#include <linux/i2c.h>
+#include <linux/mfd/da9062/core.h>
+#include <linux/mfd/da9062/registers.h>
+#include <linux/regulator/of_regulator.h>
+
+#define	DA9062_REG_EVENT_A_OFFSET	0
+#define	DA9062_REG_EVENT_B_OFFSET	1
+#define	DA9062_REG_EVENT_C_OFFSET	2
+
+static struct regmap_irq da9062_irqs[] = {
+	/* EVENT A */
+	[DA9062_IRQ_ONKEY] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_NONKEY_MASK,
+	},
+	[DA9062_IRQ_ALARM] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_ALARM_MASK,
+	},
+	[DA9062_IRQ_TICK] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_TICK_MASK,
+	},
+	[DA9062_IRQ_WDG_WARN] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_WDG_WARN_MASK,
+	},
+	[DA9062_IRQ_SEQ_RDY] = {
+		.reg_offset = DA9062_REG_EVENT_A_OFFSET,
+		.mask = DA9062AA_M_SEQ_RDY_MASK,
+	},
+	/* EVENT B */
+	[DA9062_IRQ_TEMP] = {
+		.reg_offset = DA9062_REG_EVENT_B_OFFSET,
+		.mask = DA9062AA_M_TEMP_MASK,
+	},
+	[DA9062_IRQ_LDO_LIM] = {
+		.reg_offset = DA9062_REG_EVENT_B_OFFSET,
+		.mask = DA9062AA_M_LDO_LIM_MASK,
+	},
+	[DA9062_IRQ_DVC_RDY] = {
+		.reg_offset = DA9062_REG_EVENT_B_OFFSET,
+		.mask = DA9062AA_M_DVC_RDY_MASK,
+	},
+	[DA9062_IRQ_VDD_WARN] = {
+		.reg_offset = DA9062_REG_EVENT_B_OFFSET,
+		.mask = DA9062AA_M_VDD_WARN_MASK,
+	},
+	/* EVENT C */
+	[DA9062_IRQ_GPI0] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI0_MASK,
+	},
+	[DA9062_IRQ_GPI1] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI1_MASK,
+	},
+	[DA9062_IRQ_GPI2] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI2_MASK,
+	},
+	[DA9062_IRQ_GPI3] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI3_MASK,
+	},
+	[DA9062_IRQ_GPI4] = {
+		.reg_offset = DA9062_REG_EVENT_C_OFFSET,
+		.mask = DA9062AA_M_GPI4_MASK,
+	},
+};
+
+static struct regmap_irq_chip da9062_irq_chip = {
+	.name = "da9062-irq",
+	.irqs = da9062_irqs,
+	.num_irqs = DA9062_NUM_IRQ,
+	.num_regs = 3,
+	.status_base = DA9062AA_EVENT_A,
+	.mask_base = DA9062AA_IRQ_MASK_A,
+	.ack_base = DA9062AA_EVENT_A,
+};
+
+static struct resource da9062_core_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_VDD_WARN, 1, "VDD_WARN", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_regulators_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_LDO_LIM, 1, "LDO_LIM", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_thermal_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_TEMP, 1, "THERMAL", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_wdt_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_WDG_WARN, 1, "WD_WARN", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_rtc_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_ALARM, 1, "ALARM", IORESOURCE_IRQ),
+	DEFINE_RES_NAMED(DA9062_IRQ_TICK, 1, "TICK", IORESOURCE_IRQ),
+};
+
+static struct resource da9062_onkey_resources[] = {
+	DEFINE_RES_NAMED(DA9062_IRQ_ONKEY, 1, "ONKEY", IORESOURCE_IRQ),
+};
+
+static const struct mfd_cell da9062_devs[] = {
+	{
+		.name		= "da9062-core",
+		.num_resources	= ARRAY_SIZE(da9062_core_resources),
+		.resources	= da9062_core_resources,
+	},
+	{
+		.name		= "da9062-regulators",
+		.num_resources	= ARRAY_SIZE(da9062_regulators_resources),
+		.resources	= da9062_regulators_resources,
+	},
+	{
+		.name		= "da9062-watchdog",
+		.num_resources	= ARRAY_SIZE(da9062_wdt_resources),
+		.resources	= da9062_wdt_resources,
+		.of_compatible  = "dlg,da9062-wdt",
+	},
+	{
+		.name		= "da9062-thermal",
+		.num_resources	= ARRAY_SIZE(da9062_thermal_resources),
+		.resources	= da9062_thermal_resources,
+		.of_compatible  = "dlg,da9062-thermal",
+	},
+	{
+		.name		= "da9062-rtc",
+		.num_resources	= ARRAY_SIZE(da9062_rtc_resources),
+		.resources	= da9062_rtc_resources,
+		.of_compatible  = "dlg,da9062-rtc",
+	},
+	{
+		.name		= "da9062-onkey",
+		.num_resources	= ARRAY_SIZE(da9062_onkey_resources),
+		.resources	= da9062_onkey_resources,
+		.of_compatible = "dlg,da9062-onkey",
+	},
+};
+
+static int da9062_clear_fault_log(struct da9062 *chip)
+{
+	int ret;
+	int fault_log;
+
+	ret = regmap_read(chip->regmap, DA9062AA_FAULT_LOG, &fault_log);
+	if (ret < 0)
+		return ret;
+
+	if (fault_log) {
+		if (fault_log & DA9062AA_TWD_ERROR_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: TWD_ERROR\n");
+		if (fault_log & DA9062AA_POR_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: POR\n");
+		if (fault_log & DA9062AA_VDD_FAULT_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: VDD_FAULT\n");
+		if (fault_log & DA9062AA_VDD_START_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: VDD_START\n");
+		if (fault_log & DA9062AA_TEMP_CRIT_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: TEMP_CRIT\n");
+		if (fault_log & DA9062AA_KEY_RESET_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: KEY_RESET\n");
+		if (fault_log & DA9062AA_NSHUTDOWN_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: NSHUTDOWN\n");
+		if (fault_log & DA9062AA_WAIT_SHUT_MASK)
+			dev_dbg(chip->dev, "Fault log entry detected: WAIT_SHUT\n");
+
+		ret = regmap_write(chip->regmap, DA9062AA_FAULT_LOG,
+				   fault_log);
+	}
+
+	return ret;
+}
+
+int get_device_type(struct da9062 *chip)
+{
+	int device_id, variant_id, variant_mrc;
+	int ret;
+
+	ret = regmap_read(chip->regmap, DA9062AA_DEVICE_ID, &device_id);
+	if (ret < 0) {
+		dev_err(chip->dev, "Cannot read chip ID.\n");
+		return -EIO;
+	}
+	if (device_id != DA9062_PMIC_DEVICE_ID) {
+		dev_err(chip->dev, "Invalid device ID: 0x%02x\n", device_id);
+		return -ENODEV;
+	}
+
+	ret = regmap_read(chip->regmap, DA9062AA_VARIANT_ID, &variant_id);
+	if (ret < 0) {
+		dev_err(chip->dev, "Cannot read chip variant id.\n");
+		return -EIO;
+	}
+
+	dev_info(chip->dev,
+		 "Device detected (device-ID: 0x%02X, var-ID: 0x%02X)\n",
+		 device_id, variant_id);
+
+	variant_mrc = (variant_id & DA9062AA_MRC_MASK) >> DA9062AA_MRC_SHIFT;
+
+	if (variant_mrc < DA9062_PMIC_VARIANT_MRC_AA) {
+		dev_err(chip->dev,
+			"Cannot support variant MRC: 0x%02X\n", variant_mrc);
+		return -ENODEV;
+	}
+
+	return ret;
+}
+
+static const struct regmap_range da9062_aa_readable_ranges[] = {
+	{
+		.range_min = DA9062AA_PAGE_CON,
+		.range_max = DA9062AA_STATUS_B,
+	}, {
+		.range_min = DA9062AA_STATUS_D,
+		.range_max = DA9062AA_EVENT_C,
+	}, {
+		.range_min = DA9062AA_IRQ_MASK_A,
+		.range_max = DA9062AA_IRQ_MASK_C,
+	}, {
+		.range_min = DA9062AA_CONTROL_A,
+		.range_max = DA9062AA_GPIO_4,
+	}, {
+		.range_min = DA9062AA_GPIO_WKUP_MODE,
+		.range_max = DA9062AA_BUCK4_CONT,
+	}, {
+		.range_min = DA9062AA_BUCK3_CONT,
+		.range_max = DA9062AA_BUCK3_CONT,
+	}, {
+		.range_min = DA9062AA_LDO1_CONT,
+		.range_max = DA9062AA_LDO4_CONT,
+	}, {
+		.range_min = DA9062AA_DVC_1,
+		.range_max = DA9062AA_DVC_1,
+	}, {
+		.range_min = DA9062AA_COUNT_S,
+		.range_max = DA9062AA_SECOND_D,
+	}, {
+		.range_min = DA9062AA_SEQ,
+		.range_max = DA9062AA_ID_4_3,
+	}, {
+		.range_min = DA9062AA_ID_12_11,
+		.range_max = DA9062AA_ID_16_15,
+	}, {
+		.range_min = DA9062AA_ID_22_21,
+		.range_max = DA9062AA_ID_32_31,
+	}, {
+		.range_min = DA9062AA_SEQ_A,
+		.range_max = DA9062AA_BUCK3_CFG,
+	}, {
+		.range_min = DA9062AA_VBUCK2_A,
+		.range_max = DA9062AA_VBUCK4_A,
+	}, {
+		.range_min = DA9062AA_VBUCK3_A,
+		.range_max = DA9062AA_VBUCK3_A,
+	}, {
+		.range_min = DA9062AA_VLDO1_A,
+		.range_max = DA9062AA_VLDO4_A,
+	}, {
+		.range_min = DA9062AA_VBUCK2_B,
+		.range_max = DA9062AA_VBUCK4_B,
+	}, {
+		.range_min = DA9062AA_VBUCK3_B,
+		.range_max = DA9062AA_VBUCK3_B,
+	}, {
+		.range_min = DA9062AA_VLDO1_B,
+		.range_max = DA9062AA_VLDO4_B,
+	}, {
+		.range_min = DA9062AA_BBAT_CONT,
+		.range_max = DA9062AA_BBAT_CONT,
+	}, {
+		.range_min = DA9062AA_INTERFACE,
+		.range_max = DA9062AA_CONFIG_E,
+	}, {
+		.range_min = DA9062AA_CONFIG_G,
+		.range_max = DA9062AA_CONFIG_K,
+	}, {
+		.range_min = DA9062AA_CONFIG_M,
+		.range_max = DA9062AA_CONFIG_M,
+	}, {
+		.range_min = DA9062AA_TRIM_CLDR,
+		.range_max = DA9062AA_GP_ID_19,
+	}, {
+		.range_min = DA9062AA_DEVICE_ID,
+		.range_max = DA9062AA_CONFIG_ID,
+	},
+};
+
+static const struct regmap_range da9062_aa_writeable_ranges[] = {
+	{
+		.range_min = DA9062AA_PAGE_CON,
+		.range_max = DA9062AA_PAGE_CON,
+	}, {
+		.range_min = DA9062AA_FAULT_LOG,
+		.range_max = DA9062AA_EVENT_C,
+	}, {
+		.range_min = DA9062AA_IRQ_MASK_A,
+		.range_max = DA9062AA_IRQ_MASK_C,
+	}, {
+		.range_min = DA9062AA_CONTROL_A,
+		.range_max = DA9062AA_GPIO_4,
+	}, {
+		.range_min = DA9062AA_GPIO_WKUP_MODE,
+		.range_max = DA9062AA_BUCK4_CONT,
+	}, {
+		.range_min = DA9062AA_BUCK3_CONT,
+		.range_max = DA9062AA_BUCK3_CONT,
+	}, {
+		.range_min = DA9062AA_LDO1_CONT,
+		.range_max = DA9062AA_LDO4_CONT,
+	}, {
+		.range_min = DA9062AA_DVC_1,
+		.range_max = DA9062AA_DVC_1,
+	}, {
+		.range_min = DA9062AA_COUNT_S,
+		.range_max = DA9062AA_ALARM_Y,
+	}, {
+		.range_min = DA9062AA_SEQ,
+		.range_max = DA9062AA_ID_4_3,
+	}, {
+		.range_min = DA9062AA_ID_12_11,
+		.range_max = DA9062AA_ID_16_15,
+	}, {
+		.range_min = DA9062AA_ID_22_21,
+		.range_max = DA9062AA_ID_32_31,
+	}, {
+		.range_min = DA9062AA_SEQ_A,
+		.range_max = DA9062AA_BUCK3_CFG,
+	}, {
+		.range_min = DA9062AA_VBUCK2_A,
+		.range_max = DA9062AA_VBUCK4_A,
+	}, {
+		.range_min = DA9062AA_VBUCK3_A,
+		.range_max = DA9062AA_VBUCK3_A,
+	}, {
+		.range_min = DA9062AA_VLDO1_A,
+		.range_max = DA9062AA_VLDO4_A,
+	}, {
+		.range_min = DA9062AA_VBUCK2_B,
+		.range_max = DA9062AA_VBUCK4_B,
+	}, {
+		.range_min = DA9062AA_VBUCK3_B,
+		.range_max = DA9062AA_VBUCK3_B,
+	}, {
+		.range_min = DA9062AA_VLDO1_B,
+		.range_max = DA9062AA_VLDO4_B,
+	}, {
+		.range_min = DA9062AA_BBAT_CONT,
+		.range_max = DA9062AA_BBAT_CONT,
+	}, {
+		.range_min = DA9062AA_GP_ID_0,
+		.range_max = DA9062AA_GP_ID_19,
+	},
+};
+
+static const struct regmap_range da9062_aa_volatile_ranges[] = {
+	{
+		.range_min = DA9062AA_PAGE_CON,
+		.range_max = DA9062AA_STATUS_B,
+	}, {
+		.range_min = DA9062AA_STATUS_D,
+		.range_max = DA9062AA_EVENT_C,
+	}, {
+		.range_min = DA9062AA_CONTROL_F,
+		.range_max = DA9062AA_CONTROL_F,
+	}, {
+		.range_min = DA9062AA_COUNT_S,
+		.range_max = DA9062AA_SECOND_D,
+	},
+};
+
+static const struct regmap_access_table da9062_aa_readable_table = {
+	.yes_ranges = da9062_aa_readable_ranges,
+	.n_yes_ranges = ARRAY_SIZE(da9062_aa_readable_ranges),
+};
+
+static const struct regmap_access_table da9062_aa_writeable_table = {
+	.yes_ranges = da9062_aa_writeable_ranges,
+	.n_yes_ranges = ARRAY_SIZE(da9062_aa_writeable_ranges),
+};
+
+static const struct regmap_access_table da9062_aa_volatile_table = {
+	.yes_ranges = da9062_aa_volatile_ranges,
+	.n_yes_ranges = ARRAY_SIZE(da9062_aa_volatile_ranges),
+};
+
+static const struct regmap_range_cfg da9062_range_cfg[] = {
+	{
+		.range_min = DA9062AA_PAGE_CON,
+		.range_max = DA9062AA_CONFIG_ID,
+		.selector_reg = DA9062AA_PAGE_CON,
+		.selector_mask = 1 << DA9062_I2C_PAGE_SEL_SHIFT,
+		.selector_shift = DA9062_I2C_PAGE_SEL_SHIFT,
+		.window_start = 0,
+		.window_len = 256,
+	}
+};
+
+static struct regmap_config da9062_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.ranges = da9062_range_cfg,
+	.num_ranges = ARRAY_SIZE(da9062_range_cfg),
+	.max_register = DA9062AA_CONFIG_ID,
+	.cache_type = REGCACHE_RBTREE,
+	.rd_table = &da9062_aa_readable_table,
+	.wr_table = &da9062_aa_writeable_table,
+	.volatile_table = &da9062_aa_volatile_table,
+};
+
+static int da9062_i2c_probe(struct i2c_client *i2c,
+	const struct i2c_device_id *id)
+{
+	struct da9062 *chip;
+	unsigned int irq_base;
+	int ret;
+
+	chip = devm_kzalloc(&i2c->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, chip);
+	chip->dev = &i2c->dev;
+
+	if (!i2c->irq) {
+		dev_err(chip->dev, "No IRQ configured\n");
+		return -EINVAL;
+	}
+
+	chip->regmap = devm_regmap_init_i2c(i2c, &da9062_regmap_config);
+	if (IS_ERR(chip->regmap)) {
+		ret = PTR_ERR(chip->regmap);
+		dev_err(chip->dev, "Failed to allocate register map: %d\n",
+			ret);
+		return ret;
+	}
+
+	ret = da9062_clear_fault_log(chip);
+	if (ret < 0)
+		dev_warn(chip->dev, "Cannot clear fault log\n");
+
+	ret = get_device_type(chip);
+	if (ret)
+		return ret;
+
+	ret = regmap_add_irq_chip(chip->regmap, i2c->irq,
+			IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
+			-1, &da9062_irq_chip,
+			&chip->regmap_irq);
+	if (ret) {
+		dev_err(chip->dev, "Failed to request IRQ %d: %d\n",
+			i2c->irq, ret);
+		return ret;
+	}
+
+	irq_base = regmap_irq_chip_get_base(chip->regmap_irq);
+
+	ret = mfd_add_devices(chip->dev, PLATFORM_DEVID_NONE, da9062_devs,
+			      ARRAY_SIZE(da9062_devs), NULL, irq_base,
+			      NULL);
+	if (ret) {
+		dev_err(chip->dev, "Cannot register child devices\n");
+		regmap_del_irq_chip(i2c->irq, chip->regmap_irq);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int da9062_i2c_remove(struct i2c_client *i2c)
+{
+	struct da9062 *chip = i2c_get_clientdata(i2c);
+
+	mfd_remove_devices(chip->dev);
+	regmap_del_irq_chip(i2c->irq, chip->regmap_irq);
+
+	return 0;
+}
+
+static const struct i2c_device_id da9062_i2c_id[] = {
+	{ "da9062", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, da9062_i2c_id);
+
+static const struct of_device_id da9062_dt_ids[] = {
+	{ .compatible = "dlg,da9062", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, da9062_dt_ids);
+
+static struct i2c_driver da9062_i2c_driver = {
+	.driver = {
+		.name = "da9062",
+		.of_match_table = of_match_ptr(da9062_dt_ids),
+	},
+	.probe    = da9062_i2c_probe,
+	.remove   = da9062_i2c_remove,
+	.id_table = da9062_i2c_id,
+};
+
+module_i2c_driver(da9062_i2c_driver);
+
+MODULE_DESCRIPTION("Core device driver for Dialog DA9062");
+MODULE_AUTHOR("Steve Twiss <stwiss.opensource@diasemi.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 6f3a7c0..2d4e3e0 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -264,7 +264,6 @@
 static struct i2c_driver da9063_i2c_driver = {
 	.driver = {
 		.name = "da9063",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(da9063_dt_ids),
 	},
 	.probe    = da9063_i2c_probe,
diff --git a/drivers/mfd/da9063-irq.c b/drivers/mfd/da9063-irq.c
index eaf1ec9..2630263 100644
--- a/drivers/mfd/da9063-irq.c
+++ b/drivers/mfd/da9063-irq.c
@@ -77,6 +77,10 @@
 		.reg_offset = DA9063_REG_EVENT_B_OFFSET,
 		.mask = DA9063_M_UVOV,
 	},
+	[DA9063_IRQ_DVC_RDY] = {
+		.reg_offset = DA9063_REG_EVENT_B_OFFSET,
+		.mask = DA9063_M_DVC_RDY,
+	},
 	[DA9063_IRQ_VDD_MON] = {
 		.reg_offset = DA9063_REG_EVENT_B_OFFSET,
 		.mask = DA9063_M_VDD_MON,
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 8b14740..e6e4bac 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2654,7 +2654,6 @@
 {
 	irq_set_chip_and_handler(virq, &prcmu_irq_chip,
 				handle_simple_irq);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 5991fad..a76eb6e 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -207,7 +207,7 @@
 
 static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
-	struct pcap_chip *pcap = irq_get_handler_data(irq);
+	struct pcap_chip *pcap = irq_desc_get_handler_data(desc);
 
 	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	queue_work(pcap->workqueue, &pcap->isr_work);
@@ -463,11 +463,7 @@
 	for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) {
 		irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
 		irq_set_chip_data(i, pcap);
-#ifdef CONFIG_ARM
-		set_irq_flags(i, IRQF_VALID);
-#else
-		irq_set_noprobe(i);
-#endif
+		irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	/* mask/ack all PCAP interrupts */
@@ -476,8 +472,7 @@
 	pcap->msr = PCAP_MASK_ALL_INTERRUPT;
 
 	irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
-	irq_set_handler_data(spi->irq, pcap);
-	irq_set_chained_handler(spi->irq, pcap_irq_handler);
+	irq_set_chained_handler_and_data(spi->irq, pcap_irq_handler, pcap);
 	irq_set_irq_wake(spi->irq, 1);
 
 	/* ADC */
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index 49f39fe..9131cdc 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -350,11 +350,11 @@
 			irq_set_chip_and_handler(irq, &egpio_muxed_chip,
 						 handle_simple_irq);
 			irq_set_chip_data(irq, ei);
-			set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+			irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 		irq_set_irq_type(ei->chained_irq, IRQ_TYPE_EDGE_RISING);
-		irq_set_handler_data(ei->chained_irq, ei);
-		irq_set_chained_handler(ei->chained_irq, egpio_handler);
+		irq_set_chained_handler_and_data(ei->chained_irq,
+						 egpio_handler, ei);
 		ack_irqs(ei);
 
 		device_init_wakeup(&pdev->dev, 1);
@@ -376,7 +376,7 @@
 		irq_end = ei->irq_start + ei->nirqs;
 		for (irq = ei->irq_start; irq < irq_end; irq++) {
 			irq_set_chip_and_handler(irq, NULL, NULL);
-			set_irq_flags(irq, 0);
+			irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		}
 		irq_set_chained_handler(ei->chained_irq, NULL);
 		device_init_wakeup(&pdev->dev, 0);
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index b54baad..1bd5b04 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -330,11 +330,7 @@
 		irq_set_chip_and_handler(irq, &htcpld_muxed_chip,
 					 handle_simple_irq);
 		irq_set_chip_data(irq, chip);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-#else
-		irq_set_probe(irq);
-#endif
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	return ret;
diff --git a/drivers/mfd/intel-lpss-acpi.c b/drivers/mfd/intel-lpss-acpi.c
new file mode 100644
index 0000000..0d92d73
--- /dev/null
+++ b/drivers/mfd/intel-lpss-acpi.c
@@ -0,0 +1,84 @@
+/*
+ * Intel LPSS ACPI support.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+
+#include "intel-lpss.h"
+
+static const struct intel_lpss_platform_info spt_info = {
+	.clk_rate = 120000000,
+};
+
+static const struct acpi_device_id intel_lpss_acpi_ids[] = {
+	/* SPT */
+	{ "INT3446", (kernel_ulong_t)&spt_info },
+	{ "INT3447", (kernel_ulong_t)&spt_info },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, intel_lpss_acpi_ids);
+
+static int intel_lpss_acpi_probe(struct platform_device *pdev)
+{
+	struct intel_lpss_platform_info *info;
+	const struct acpi_device_id *id;
+
+	id = acpi_match_device(intel_lpss_acpi_ids, &pdev->dev);
+	if (!id)
+		return -ENODEV;
+
+	info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
+			    GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	info->irq = platform_get_irq(pdev, 0);
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	return intel_lpss_probe(&pdev->dev, info);
+}
+
+static int intel_lpss_acpi_remove(struct platform_device *pdev)
+{
+	intel_lpss_remove(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+static INTEL_LPSS_PM_OPS(intel_lpss_acpi_pm_ops);
+
+static struct platform_driver intel_lpss_acpi_driver = {
+	.probe = intel_lpss_acpi_probe,
+	.remove = intel_lpss_acpi_remove,
+	.driver = {
+		.name = "intel-lpss",
+		.acpi_match_table = intel_lpss_acpi_ids,
+		.pm = &intel_lpss_acpi_pm_ops,
+	},
+};
+
+module_platform_driver(intel_lpss_acpi_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel LPSS ACPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
new file mode 100644
index 0000000..9236dff
--- /dev/null
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -0,0 +1,113 @@
+/*
+ * Intel LPSS PCI support.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include "intel-lpss.h"
+
+static int intel_lpss_pci_probe(struct pci_dev *pdev,
+				const struct pci_device_id *id)
+{
+	struct intel_lpss_platform_info *info;
+	int ret;
+
+	ret = pcim_enable_device(pdev);
+	if (ret)
+		return ret;
+
+	info = devm_kmemdup(&pdev->dev, (void *)id->driver_data, sizeof(*info),
+			    GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->mem = &pdev->resource[0];
+	info->irq = pdev->irq;
+
+	/* Probably it is enough to set this for iDMA capable devices only */
+	pci_set_master(pdev);
+
+	ret = intel_lpss_probe(&pdev->dev, info);
+	if (ret)
+		return ret;
+
+	pm_runtime_put(&pdev->dev);
+	pm_runtime_allow(&pdev->dev);
+
+	return 0;
+}
+
+static void intel_lpss_pci_remove(struct pci_dev *pdev)
+{
+	pm_runtime_forbid(&pdev->dev);
+	pm_runtime_get_sync(&pdev->dev);
+
+	intel_lpss_remove(&pdev->dev);
+}
+
+static INTEL_LPSS_PM_OPS(intel_lpss_pci_pm_ops);
+
+static const struct intel_lpss_platform_info spt_info = {
+	.clk_rate = 120000000,
+};
+
+static const struct intel_lpss_platform_info spt_uart_info = {
+	.clk_rate = 120000000,
+	.clk_con_id = "baudclk",
+};
+
+static const struct pci_device_id intel_lpss_pci_ids[] = {
+	/* SPT-LP */
+	{ PCI_VDEVICE(INTEL, 0x9d27), (kernel_ulong_t)&spt_uart_info },
+	{ PCI_VDEVICE(INTEL, 0x9d28), (kernel_ulong_t)&spt_uart_info },
+	{ PCI_VDEVICE(INTEL, 0x9d29), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0x9d2a), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0x9d60), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0x9d61), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0x9d62), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0x9d63), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0x9d64), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0x9d65), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0x9d66), (kernel_ulong_t)&spt_uart_info },
+	/* SPT-H */
+	{ PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
+	{ PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
+	{ PCI_VDEVICE(INTEL, 0xa129), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0xa12a), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0xa160), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_info },
+	{ PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info },
+	{ }
+};
+MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
+
+static struct pci_driver intel_lpss_pci_driver = {
+	.name = "intel-lpss",
+	.id_table = intel_lpss_pci_ids,
+	.probe = intel_lpss_pci_probe,
+	.remove = intel_lpss_pci_remove,
+	.driver = {
+		.pm = &intel_lpss_pci_pm_ops,
+	},
+};
+
+module_pci_driver(intel_lpss_pci_driver);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_DESCRIPTION("Intel LPSS PCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
new file mode 100644
index 0000000..fdf4d5c
--- /dev/null
+++ b/drivers/mfd/intel-lpss.c
@@ -0,0 +1,524 @@
+/*
+ * Intel Sunrisepoint LPSS core support.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *          Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ *          Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
+#include <linux/idr.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/seq_file.h>
+
+#include "intel-lpss.h"
+
+#define LPSS_DEV_OFFSET		0x000
+#define LPSS_DEV_SIZE		0x200
+#define LPSS_PRIV_OFFSET	0x200
+#define LPSS_PRIV_SIZE		0x100
+#define LPSS_IDMA64_OFFSET	0x800
+#define LPSS_IDMA64_SIZE	0x800
+
+/* Offsets from lpss->priv */
+#define LPSS_PRIV_RESETS		0x04
+#define LPSS_PRIV_RESETS_FUNC		BIT(2)
+#define LPSS_PRIV_RESETS_IDMA		0x3
+
+#define LPSS_PRIV_ACTIVELTR		0x10
+#define LPSS_PRIV_IDLELTR		0x14
+
+#define LPSS_PRIV_LTR_REQ		BIT(15)
+#define LPSS_PRIV_LTR_SCALE_MASK	0xc00
+#define LPSS_PRIV_LTR_SCALE_1US		0x800
+#define LPSS_PRIV_LTR_SCALE_32US	0xc00
+#define LPSS_PRIV_LTR_VALUE_MASK	0x3ff
+
+#define LPSS_PRIV_SSP_REG		0x20
+#define LPSS_PRIV_SSP_REG_DIS_DMA_FIN	BIT(0)
+
+#define LPSS_PRIV_REMAP_ADDR_LO		0x40
+#define LPSS_PRIV_REMAP_ADDR_HI		0x44
+
+#define LPSS_PRIV_CAPS			0xfc
+#define LPSS_PRIV_CAPS_NO_IDMA		BIT(8)
+#define LPSS_PRIV_CAPS_TYPE_SHIFT	4
+#define LPSS_PRIV_CAPS_TYPE_MASK	(0xf << LPSS_PRIV_CAPS_TYPE_SHIFT)
+
+/* This matches the type field in CAPS register */
+enum intel_lpss_dev_type {
+	LPSS_DEV_I2C = 0,
+	LPSS_DEV_UART,
+	LPSS_DEV_SPI,
+};
+
+struct intel_lpss {
+	const struct intel_lpss_platform_info *info;
+	enum intel_lpss_dev_type type;
+	struct clk *clk;
+	struct clk_lookup *clock;
+	const struct mfd_cell *cell;
+	struct device *dev;
+	void __iomem *priv;
+	int devid;
+	u32 caps;
+	u32 active_ltr;
+	u32 idle_ltr;
+	struct dentry *debugfs;
+};
+
+static const struct resource intel_lpss_dev_resources[] = {
+	DEFINE_RES_MEM_NAMED(LPSS_DEV_OFFSET, LPSS_DEV_SIZE, "lpss_dev"),
+	DEFINE_RES_MEM_NAMED(LPSS_PRIV_OFFSET, LPSS_PRIV_SIZE, "lpss_priv"),
+	DEFINE_RES_IRQ(0),
+};
+
+static const struct resource intel_lpss_idma64_resources[] = {
+	DEFINE_RES_MEM(LPSS_IDMA64_OFFSET, LPSS_IDMA64_SIZE),
+	DEFINE_RES_IRQ(0),
+};
+
+#define LPSS_IDMA64_DRIVER_NAME		"idma64"
+
+/*
+ * Cells needs to be ordered so that the iDMA is created first. This is
+ * because we need to be sure the DMA is available when the host controller
+ * driver is probed.
+ */
+static const struct mfd_cell intel_lpss_idma64_cell = {
+	.name = LPSS_IDMA64_DRIVER_NAME,
+	.num_resources = ARRAY_SIZE(intel_lpss_idma64_resources),
+	.resources = intel_lpss_idma64_resources,
+};
+
+static const struct mfd_cell intel_lpss_i2c_cell = {
+	.name = "i2c_designware",
+	.num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
+	.resources = intel_lpss_dev_resources,
+};
+
+static const struct mfd_cell intel_lpss_uart_cell = {
+	.name = "dw-apb-uart",
+	.num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
+	.resources = intel_lpss_dev_resources,
+};
+
+static const struct mfd_cell intel_lpss_spi_cell = {
+	.name = "pxa2xx-spi",
+	.num_resources = ARRAY_SIZE(intel_lpss_dev_resources),
+	.resources = intel_lpss_dev_resources,
+};
+
+static DEFINE_IDA(intel_lpss_devid_ida);
+static struct dentry *intel_lpss_debugfs;
+
+static int intel_lpss_request_dma_module(const char *name)
+{
+	static bool intel_lpss_dma_requested;
+
+	if (intel_lpss_dma_requested)
+		return 0;
+
+	intel_lpss_dma_requested = true;
+	return request_module("%s", name);
+}
+
+static void intel_lpss_cache_ltr(struct intel_lpss *lpss)
+{
+	lpss->active_ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
+	lpss->idle_ltr = readl(lpss->priv + LPSS_PRIV_IDLELTR);
+}
+
+static int intel_lpss_debugfs_add(struct intel_lpss *lpss)
+{
+	struct dentry *dir;
+
+	dir = debugfs_create_dir(dev_name(lpss->dev), intel_lpss_debugfs);
+	if (IS_ERR(dir))
+		return PTR_ERR(dir);
+
+	/* Cache the values into lpss structure */
+	intel_lpss_cache_ltr(lpss);
+
+	debugfs_create_x32("capabilities", S_IRUGO, dir, &lpss->caps);
+	debugfs_create_x32("active_ltr", S_IRUGO, dir, &lpss->active_ltr);
+	debugfs_create_x32("idle_ltr", S_IRUGO, dir, &lpss->idle_ltr);
+
+	lpss->debugfs = dir;
+	return 0;
+}
+
+static void intel_lpss_debugfs_remove(struct intel_lpss *lpss)
+{
+	debugfs_remove_recursive(lpss->debugfs);
+}
+
+static void intel_lpss_ltr_set(struct device *dev, s32 val)
+{
+	struct intel_lpss *lpss = dev_get_drvdata(dev);
+	u32 ltr;
+
+	/*
+	 * Program latency tolerance (LTR) accordingly what has been asked
+	 * by the PM QoS layer or disable it in case we were passed
+	 * negative value or PM_QOS_LATENCY_ANY.
+	 */
+	ltr = readl(lpss->priv + LPSS_PRIV_ACTIVELTR);
+
+	if (val == PM_QOS_LATENCY_ANY || val < 0) {
+		ltr &= ~LPSS_PRIV_LTR_REQ;
+	} else {
+		ltr |= LPSS_PRIV_LTR_REQ;
+		ltr &= ~LPSS_PRIV_LTR_SCALE_MASK;
+		ltr &= ~LPSS_PRIV_LTR_VALUE_MASK;
+
+		if (val > LPSS_PRIV_LTR_VALUE_MASK)
+			ltr |= LPSS_PRIV_LTR_SCALE_32US | val >> 5;
+		else
+			ltr |= LPSS_PRIV_LTR_SCALE_1US | val;
+	}
+
+	if (ltr == lpss->active_ltr)
+		return;
+
+	writel(ltr, lpss->priv + LPSS_PRIV_ACTIVELTR);
+	writel(ltr, lpss->priv + LPSS_PRIV_IDLELTR);
+
+	/* Cache the values into lpss structure */
+	intel_lpss_cache_ltr(lpss);
+}
+
+static void intel_lpss_ltr_expose(struct intel_lpss *lpss)
+{
+	lpss->dev->power.set_latency_tolerance = intel_lpss_ltr_set;
+	dev_pm_qos_expose_latency_tolerance(lpss->dev);
+}
+
+static void intel_lpss_ltr_hide(struct intel_lpss *lpss)
+{
+	dev_pm_qos_hide_latency_tolerance(lpss->dev);
+	lpss->dev->power.set_latency_tolerance = NULL;
+}
+
+static int intel_lpss_assign_devs(struct intel_lpss *lpss)
+{
+	unsigned int type;
+
+	type = lpss->caps & LPSS_PRIV_CAPS_TYPE_MASK;
+	type >>= LPSS_PRIV_CAPS_TYPE_SHIFT;
+
+	switch (type) {
+	case LPSS_DEV_I2C:
+		lpss->cell = &intel_lpss_i2c_cell;
+		break;
+	case LPSS_DEV_UART:
+		lpss->cell = &intel_lpss_uart_cell;
+		break;
+	case LPSS_DEV_SPI:
+		lpss->cell = &intel_lpss_spi_cell;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	lpss->type = type;
+
+	return 0;
+}
+
+static bool intel_lpss_has_idma(const struct intel_lpss *lpss)
+{
+	return (lpss->caps & LPSS_PRIV_CAPS_NO_IDMA) == 0;
+}
+
+static void intel_lpss_set_remap_addr(const struct intel_lpss *lpss)
+{
+	resource_size_t addr = lpss->info->mem->start;
+
+	writel(addr, lpss->priv + LPSS_PRIV_REMAP_ADDR_LO);
+#if BITS_PER_LONG > 32
+	writel(addr >> 32, lpss->priv + LPSS_PRIV_REMAP_ADDR_HI);
+#else
+	writel(0, lpss->priv + LPSS_PRIV_REMAP_ADDR_HI);
+#endif
+}
+
+static void intel_lpss_deassert_reset(const struct intel_lpss *lpss)
+{
+	u32 value = LPSS_PRIV_RESETS_FUNC | LPSS_PRIV_RESETS_IDMA;
+
+	/* Bring out the device from reset */
+	writel(value, lpss->priv + LPSS_PRIV_RESETS);
+}
+
+static void intel_lpss_init_dev(const struct intel_lpss *lpss)
+{
+	u32 value = LPSS_PRIV_SSP_REG_DIS_DMA_FIN;
+
+	intel_lpss_deassert_reset(lpss);
+
+	if (!intel_lpss_has_idma(lpss))
+		return;
+
+	intel_lpss_set_remap_addr(lpss);
+
+	/* Make sure that SPI multiblock DMA transfers are re-enabled */
+	if (lpss->type == LPSS_DEV_SPI)
+		writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
+}
+
+static void intel_lpss_unregister_clock_tree(struct clk *clk)
+{
+	struct clk *parent;
+
+	while (clk) {
+		parent = clk_get_parent(clk);
+		clk_unregister(clk);
+		clk = parent;
+	}
+}
+
+static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
+					     const char *devname,
+					     struct clk **clk)
+{
+	char name[32];
+	struct clk *tmp = *clk;
+
+	snprintf(name, sizeof(name), "%s-enable", devname);
+	tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0,
+				lpss->priv, 0, 0, NULL);
+	if (IS_ERR(tmp))
+		return PTR_ERR(tmp);
+
+	snprintf(name, sizeof(name), "%s-div", devname);
+	tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp),
+					      0, lpss->priv, 1, 15, 16, 15, 0,
+					      NULL);
+	if (IS_ERR(tmp))
+		return PTR_ERR(tmp);
+	*clk = tmp;
+
+	snprintf(name, sizeof(name), "%s-update", devname);
+	tmp = clk_register_gate(NULL, name, __clk_get_name(tmp),
+				CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL);
+	if (IS_ERR(tmp))
+		return PTR_ERR(tmp);
+	*clk = tmp;
+
+	return 0;
+}
+
+static int intel_lpss_register_clock(struct intel_lpss *lpss)
+{
+	const struct mfd_cell *cell = lpss->cell;
+	struct clk *clk;
+	char devname[24];
+	int ret;
+
+	if (!lpss->info->clk_rate)
+		return 0;
+
+	/* Root clock */
+	clk = clk_register_fixed_rate(NULL, dev_name(lpss->dev), NULL,
+				      CLK_IS_ROOT, lpss->info->clk_rate);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	snprintf(devname, sizeof(devname), "%s.%d", cell->name, lpss->devid);
+
+	/*
+	 * Support for clock divider only if it has some preset value.
+	 * Otherwise we assume that the divider is not used.
+	 */
+	if (lpss->type != LPSS_DEV_I2C) {
+		ret = intel_lpss_register_clock_divider(lpss, devname, &clk);
+		if (ret)
+			goto err_clk_register;
+	}
+
+	ret = -ENOMEM;
+
+	/* Clock for the host controller */
+	lpss->clock = clkdev_create(clk, lpss->info->clk_con_id, "%s", devname);
+	if (!lpss->clock)
+		goto err_clk_register;
+
+	lpss->clk = clk;
+
+	return 0;
+
+err_clk_register:
+	intel_lpss_unregister_clock_tree(clk);
+
+	return ret;
+}
+
+static void intel_lpss_unregister_clock(struct intel_lpss *lpss)
+{
+	if (IS_ERR_OR_NULL(lpss->clk))
+		return;
+
+	clkdev_drop(lpss->clock);
+	intel_lpss_unregister_clock_tree(lpss->clk);
+}
+
+int intel_lpss_probe(struct device *dev,
+		     const struct intel_lpss_platform_info *info)
+{
+	struct intel_lpss *lpss;
+	int ret;
+
+	if (!info || !info->mem || info->irq <= 0)
+		return -EINVAL;
+
+	lpss = devm_kzalloc(dev, sizeof(*lpss), GFP_KERNEL);
+	if (!lpss)
+		return -ENOMEM;
+
+	lpss->priv = devm_ioremap(dev, info->mem->start + LPSS_PRIV_OFFSET,
+				  LPSS_PRIV_SIZE);
+	if (!lpss->priv)
+		return -ENOMEM;
+
+	lpss->info = info;
+	lpss->dev = dev;
+	lpss->caps = readl(lpss->priv + LPSS_PRIV_CAPS);
+
+	dev_set_drvdata(dev, lpss);
+
+	ret = intel_lpss_assign_devs(lpss);
+	if (ret)
+		return ret;
+
+	intel_lpss_init_dev(lpss);
+
+	lpss->devid = ida_simple_get(&intel_lpss_devid_ida, 0, 0, GFP_KERNEL);
+	if (lpss->devid < 0)
+		return lpss->devid;
+
+	ret = intel_lpss_register_clock(lpss);
+	if (ret)
+		goto err_clk_register;
+
+	intel_lpss_ltr_expose(lpss);
+
+	ret = intel_lpss_debugfs_add(lpss);
+	if (ret)
+		dev_warn(dev, "Failed to create debugfs entries\n");
+
+	if (intel_lpss_has_idma(lpss)) {
+		/*
+		 * Ensure the DMA driver is loaded before the host
+		 * controller device appears, so that the host controller
+		 * driver can request its DMA channels as early as
+		 * possible.
+		 *
+		 * If the DMA module is not there that's OK as well.
+		 */
+		intel_lpss_request_dma_module(LPSS_IDMA64_DRIVER_NAME);
+
+		ret = mfd_add_devices(dev, lpss->devid, &intel_lpss_idma64_cell,
+				      1, info->mem, info->irq, NULL);
+		if (ret)
+			dev_warn(dev, "Failed to add %s, fallback to PIO\n",
+				 LPSS_IDMA64_DRIVER_NAME);
+	}
+
+	ret = mfd_add_devices(dev, lpss->devid, lpss->cell,
+			      1, info->mem, info->irq, NULL);
+	if (ret)
+		goto err_remove_ltr;
+
+	return 0;
+
+err_remove_ltr:
+	intel_lpss_debugfs_remove(lpss);
+	intel_lpss_ltr_hide(lpss);
+
+err_clk_register:
+	ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(intel_lpss_probe);
+
+void intel_lpss_remove(struct device *dev)
+{
+	struct intel_lpss *lpss = dev_get_drvdata(dev);
+
+	mfd_remove_devices(dev);
+	intel_lpss_debugfs_remove(lpss);
+	intel_lpss_ltr_hide(lpss);
+	intel_lpss_unregister_clock(lpss);
+	ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
+}
+EXPORT_SYMBOL_GPL(intel_lpss_remove);
+
+static int resume_lpss_device(struct device *dev, void *data)
+{
+	pm_runtime_resume(dev);
+	return 0;
+}
+
+int intel_lpss_prepare(struct device *dev)
+{
+	/*
+	 * Resume both child devices before entering system sleep. This
+	 * ensures that they are in proper state before they get suspended.
+	 */
+	device_for_each_child_reverse(dev, NULL, resume_lpss_device);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(intel_lpss_prepare);
+
+int intel_lpss_suspend(struct device *dev)
+{
+	return 0;
+}
+EXPORT_SYMBOL_GPL(intel_lpss_suspend);
+
+int intel_lpss_resume(struct device *dev)
+{
+	struct intel_lpss *lpss = dev_get_drvdata(dev);
+
+	intel_lpss_init_dev(lpss);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(intel_lpss_resume);
+
+static int __init intel_lpss_init(void)
+{
+	intel_lpss_debugfs = debugfs_create_dir("intel_lpss", NULL);
+	return 0;
+}
+module_init(intel_lpss_init);
+
+static void __exit intel_lpss_exit(void)
+{
+	debugfs_remove(intel_lpss_debugfs);
+}
+module_exit(intel_lpss_exit);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
+MODULE_DESCRIPTION("Intel LPSS core driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
new file mode 100644
index 0000000..f28cb28
--- /dev/null
+++ b/drivers/mfd/intel-lpss.h
@@ -0,0 +1,62 @@
+/*
+ * Intel LPSS core support.
+ *
+ * Copyright (C) 2015, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *          Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFD_INTEL_LPSS_H
+#define __MFD_INTEL_LPSS_H
+
+struct device;
+struct resource;
+
+struct intel_lpss_platform_info {
+	struct resource *mem;
+	int irq;
+	unsigned long clk_rate;
+	const char *clk_con_id;
+};
+
+int intel_lpss_probe(struct device *dev,
+		     const struct intel_lpss_platform_info *info);
+void intel_lpss_remove(struct device *dev);
+
+#ifdef CONFIG_PM
+int intel_lpss_prepare(struct device *dev);
+int intel_lpss_suspend(struct device *dev);
+int intel_lpss_resume(struct device *dev);
+
+#ifdef CONFIG_PM_SLEEP
+#define INTEL_LPSS_SLEEP_PM_OPS			\
+	.prepare = intel_lpss_prepare,		\
+	.suspend = intel_lpss_suspend,		\
+	.resume = intel_lpss_resume,		\
+	.freeze = intel_lpss_suspend,		\
+	.thaw = intel_lpss_resume,		\
+	.poweroff = intel_lpss_suspend,		\
+	.restore = intel_lpss_resume,
+#endif
+
+#define INTEL_LPSS_RUNTIME_PM_OPS		\
+	.runtime_suspend = intel_lpss_suspend,	\
+	.runtime_resume = intel_lpss_resume,
+
+#else /* !CONFIG_PM */
+#define INTEL_LPSS_SLEEP_PM_OPS
+#define INTEL_LPSS_RUNTIME_PM_OPS
+#endif /* CONFIG_PM */
+
+#define INTEL_LPSS_PM_OPS(name)			\
+const struct dev_pm_ops name = {		\
+	INTEL_LPSS_SLEEP_PM_OPS			\
+	INTEL_LPSS_RUNTIME_PM_OPS		\
+}
+
+#endif /* __MFD_INTEL_LPSS_H */
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index a00ddd9..d9e15cf 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -176,7 +176,7 @@
 MODULE_DEVICE_TABLE(i2c, intel_soc_pmic_i2c_id);
 
 #if defined(CONFIG_ACPI)
-static struct acpi_device_id intel_soc_pmic_acpi_match[] = {
+static const struct acpi_device_id intel_soc_pmic_acpi_match[] = {
 	{"INT33FD", (kernel_ulong_t)&intel_soc_pmic_config_crc},
 	{ },
 };
@@ -186,7 +186,6 @@
 static struct i2c_driver intel_soc_pmic_i2c_driver = {
 	.driver = {
 		.name = "intel_soc_pmic_i2c",
-		.owner = THIS_MODULE,
 		.pm = &intel_soc_pmic_pm_ops,
 		.acpi_match_table = ACPI_PTR(intel_soc_pmic_acpi_match),
 	},
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index 8df3266..a41859c 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -53,8 +53,8 @@
 	tx->buf[bp++] = checksum;
 	tx->len = bp;
 	tx->index = 0;
-	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
-		       tx->buf, tx->len, true);
+	print_hex_dump_debug("data: ", DUMP_PREFIX_OFFSET, 16, 1,
+			     tx->buf, tx->len, true);
 
 	/* Enable interrupt */
 	val = readl(micro->base + UTCR3);
@@ -242,7 +242,7 @@
 	return data[1] << 8 | data[0];
 }
 
-static void ipaq_micro_eeprom_dump(struct ipaq_micro *micro)
+static void __init ipaq_micro_eeprom_dump(struct ipaq_micro *micro)
 {
 	u8 dump[256];
 	char *str;
@@ -250,7 +250,7 @@
 	ipaq_micro_eeprom_read(micro, 0, 128, dump);
 	str = ipaq_micro_str(dump, 10);
 	if (str) {
-		dev_info(micro->dev, "HM version %s\n", str);
+		dev_info(micro->dev, "HW version %s\n", str);
 		kfree(str);
 	}
 	str = ipaq_micro_str(dump+10, 40);
@@ -281,8 +281,8 @@
 	dev_info(micro->dev, "RAM size: %u KiB\n", ipaq_micro_to_u16(dump+92));
 	dev_info(micro->dev, "screen: %u x %u\n",
 		 ipaq_micro_to_u16(dump+94), ipaq_micro_to_u16(dump+96));
-	print_hex_dump(KERN_DEBUG, "eeprom: ", DUMP_PREFIX_OFFSET, 16, 1,
-		       dump, 256, true);
+	print_hex_dump_debug("eeprom: ", DUMP_PREFIX_OFFSET, 16, 1,
+			     dump, 256, true);
 
 }
 
@@ -386,7 +386,7 @@
 	return 0;
 }
 
-static int micro_probe(struct platform_device *pdev)
+static int __init micro_probe(struct platform_device *pdev)
 {
 	struct ipaq_micro *micro;
 	struct resource *res;
@@ -448,21 +448,6 @@
 	return 0;
 }
 
-static int micro_remove(struct platform_device *pdev)
-{
-	struct ipaq_micro *micro = platform_get_drvdata(pdev);
-	u32 val;
-
-	mfd_remove_devices(&pdev->dev);
-
-	val = readl(micro->base + UTCR3);
-	val &= ~(UTCR3_RXE | UTCR3_RIE); /* disable receive interrupt */
-	val &= ~(UTCR3_TXE | UTCR3_TIE); /* disable transmit interrupt */
-	writel(val, micro->base + UTCR3);
-
-	return 0;
-}
-
 static const struct dev_pm_ops micro_dev_pm_ops = {
 	SET_SYSTEM_SLEEP_PM_OPS(NULL, micro_resume)
 };
@@ -471,12 +456,7 @@
 	.driver   = {
 		.name	= "ipaq-h3xxx-micro",
 		.pm	= &micro_dev_pm_ops,
+		.suppress_bind_attrs = true,
 	},
-	.probe    = micro_probe,
-	.remove   = micro_remove,
-	/* .shutdown = micro_suspend, // FIXME */
 };
-module_platform_driver(micro_device_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("driver for iPAQ Atmel micro core and backlight");
+builtin_platform_driver_probe(micro_device_driver, micro_probe);
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index b31c54e..5bb49f0 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -273,12 +273,12 @@
 	ct->chip.irq_unmask = irq_gc_mask_clr_bit;
 	ct->chip.irq_ack = irq_gc_ack_set_bit;
 
-	irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
+	irq_setup_generic_chip(gc, IRQ_MSK(5), IRQ_GC_INIT_MASK_CACHE, 0,
+				IRQ_NOPROBE | IRQ_LEVEL);
 
 	adc->gc = gc;
 
-	irq_set_handler_data(adc->irq, gc);
-	irq_set_chained_handler(adc->irq, jz4740_adc_irq_demux);
+	irq_set_chained_handler_and_data(adc->irq, jz4740_adc_irq_demux, gc);
 
 	writeb(0x00, adc->base + JZ_REG_ADC_ENABLE);
 	writeb(0xff, adc->base + JZ_REG_ADC_CTRL);
@@ -308,8 +308,7 @@
 
 	irq_remove_generic_chip(adc->gc, IRQ_MSK(5), IRQ_NOPROBE | IRQ_LEVEL, 0);
 	kfree(adc->gc);
-	irq_set_handler_data(adc->irq, NULL);
-	irq_set_chained_handler(adc->irq, NULL);
+	irq_set_chained_handler_and_data(adc->irq, NULL, NULL);
 
 	iounmap(adc->base);
 	release_mem_region(adc->mem->start, resource_size(adc->mem));
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index 8057849..463f4ea 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -501,6 +501,14 @@
 
 static struct dmi_system_id kempld_dmi_table[] __initdata = {
 	{
+		.ident = "BBL6",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+			DMI_MATCH(DMI_BOARD_NAME, "COMe-bBL6"),
+		},
+		.driver_data = (void *)&kempld_platform_data_generic,
+		.callback = kempld_create_platform_device,
+	}, {
 		.ident = "BHL6",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
@@ -517,6 +525,14 @@
 		.driver_data = (void *)&kempld_platform_data_generic,
 		.callback = kempld_create_platform_device,
 	}, {
+		.ident = "CBW6",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
+			DMI_MATCH(DMI_BOARD_NAME, "COMe-cBW6"),
+		},
+		.driver_data = (void *)&kempld_platform_data_generic,
+		.callback = kempld_create_platform_device,
+	}, {
 		.ident = "CCR2",
 		.matches = {
 			DMI_MATCH(DMI_BOARD_VENDOR, "Kontron"),
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index d42fbb6..643f375 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -640,7 +640,6 @@
 static struct i2c_driver lm3533_i2c_driver = {
 	.driver = {
 		   .name = "lm3533",
-		   .owner = THIS_MODULE,
 	},
 	.id_table	= lm3533_i2c_ids,
 	.probe		= lm3533_i2c_probe,
diff --git a/drivers/mfd/lp3943.c b/drivers/mfd/lp3943.c
index 335b930..eecbb13 100644
--- a/drivers/mfd/lp3943.c
+++ b/drivers/mfd/lp3943.c
@@ -154,7 +154,6 @@
 	.remove = lp3943_remove,
 	.driver = {
 		.name = "lp3943",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(lp3943_of_match),
 	},
 	.id_table = lp3943_ids,
diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c
index a87f2b5..c7a9825 100644
--- a/drivers/mfd/lp8788-irq.c
+++ b/drivers/mfd/lp8788-irq.c
@@ -141,12 +141,7 @@
 	irq_set_chip_data(virq, irqd);
 	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
diff --git a/drivers/mfd/lp8788.c b/drivers/mfd/lp8788.c
index a30bc15..acf6165 100644
--- a/drivers/mfd/lp8788.c
+++ b/drivers/mfd/lp8788.c
@@ -221,7 +221,6 @@
 static struct i2c_driver lp8788_driver = {
 	.driver = {
 		.name = "lp8788",
-		.owner = THIS_MODULE,
 	},
 	.probe = lp8788_probe,
 	.remove = lp8788_remove,
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
index 8de3439..c5a9a08 100644
--- a/drivers/mfd/lpc_ich.c
+++ b/drivers/mfd/lpc_ich.c
@@ -66,6 +66,7 @@
 #include <linux/pci.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/lpc_ich.h>
+#include <linux/platform_data/itco_wdt.h>
 
 #define ACPIBASE		0x40
 #define ACPIBASE_GPE_OFF	0x28
@@ -835,9 +836,31 @@
 	priv->actrl_pbase_save = reg_save;
 }
 
-static void lpc_ich_finalize_cell(struct pci_dev *dev, struct mfd_cell *cell)
+static int lpc_ich_finalize_wdt_cell(struct pci_dev *dev)
+{
+	struct itco_wdt_platform_data *pdata;
+	struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+	struct lpc_ich_info *info;
+	struct mfd_cell *cell = &lpc_ich_cells[LPC_WDT];
+
+	pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	info = &lpc_chipset_info[priv->chipset];
+
+	pdata->version = info->iTCO_version;
+	strlcpy(pdata->name, info->name, sizeof(pdata->name));
+
+	cell->platform_data = pdata;
+	cell->pdata_size = sizeof(*pdata);
+	return 0;
+}
+
+static void lpc_ich_finalize_gpio_cell(struct pci_dev *dev)
 {
 	struct lpc_ich_priv *priv = pci_get_drvdata(dev);
+	struct mfd_cell *cell = &lpc_ich_cells[LPC_GPIO];
 
 	cell->platform_data = &lpc_chipset_info[priv->chipset];
 	cell->pdata_size = sizeof(struct lpc_ich_info);
@@ -933,7 +956,7 @@
 	lpc_chipset_info[priv->chipset].use_gpio = ret;
 	lpc_ich_enable_gpio_space(dev);
 
-	lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_GPIO]);
+	lpc_ich_finalize_gpio_cell(dev);
 	ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO,
 			      &lpc_ich_cells[LPC_GPIO], 1, NULL, 0, NULL);
 
@@ -1007,7 +1030,10 @@
 		res->end = base_addr + ACPIBASE_PMC_END;
 	}
 
-	lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_WDT]);
+	ret = lpc_ich_finalize_wdt_cell(dev);
+	if (ret)
+		goto wdt_done;
+
 	ret = mfd_add_devices(&dev->dev, PLATFORM_DEVID_AUTO,
 			      &lpc_ich_cells[LPC_WDT], 1, NULL, 0, NULL);
 
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 3bf8def..56e216d 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -532,7 +532,6 @@
 static struct i2c_driver max14577_i2c_driver = {
 	.driver = {
 		.name = "max14577",
-		.owner = THIS_MODULE,
 		.pm = &max14577_pm,
 		.of_match_table = max14577_dt_match,
 	},
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 760d08d..d19be64 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -391,7 +391,6 @@
 static struct i2c_driver max77686_i2c_driver = {
 	.driver = {
 		   .name = "max77686",
-		   .owner = THIS_MODULE,
 		   .pm = &max77686_pm,
 		   .of_match_table = of_match_ptr(max77686_pmic_dt_match),
 	},
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index cb14afa..007f729 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -33,6 +33,7 @@
 #include <linux/mutex.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-common.h>
 #include <linux/mfd/max77693-private.h>
 #include <linux/regulator/machine.h>
 #include <linux/regmap.h>
@@ -193,22 +194,22 @@
 	} else
 		dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
 
-	max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
-	if (!max77693->muic) {
+	max77693->i2c_muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
+	if (!max77693->i2c_muic) {
 		dev_err(max77693->dev, "Failed to allocate I2C device for MUIC\n");
 		return -ENODEV;
 	}
-	i2c_set_clientdata(max77693->muic, max77693);
+	i2c_set_clientdata(max77693->i2c_muic, max77693);
 
-	max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
-	if (!max77693->haptic) {
+	max77693->i2c_haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
+	if (!max77693->i2c_haptic) {
 		dev_err(max77693->dev, "Failed to allocate I2C device for Haptic\n");
 		ret = -ENODEV;
 		goto err_i2c_haptic;
 	}
-	i2c_set_clientdata(max77693->haptic, max77693);
+	i2c_set_clientdata(max77693->i2c_haptic, max77693);
 
-	max77693->regmap_haptic = devm_regmap_init_i2c(max77693->haptic,
+	max77693->regmap_haptic = devm_regmap_init_i2c(max77693->i2c_haptic,
 					&max77693_regmap_haptic_config);
 	if (IS_ERR(max77693->regmap_haptic)) {
 		ret = PTR_ERR(max77693->regmap_haptic);
@@ -222,7 +223,7 @@
 	 * instance of MUIC device when irq of max77693 is initialized
 	 * before call max77693-muic probe() function.
 	 */
-	max77693->regmap_muic = devm_regmap_init_i2c(max77693->muic,
+	max77693->regmap_muic = devm_regmap_init_i2c(max77693->i2c_muic,
 					 &max77693_regmap_muic_config);
 	if (IS_ERR(max77693->regmap_muic)) {
 		ret = PTR_ERR(max77693->regmap_muic);
@@ -255,7 +256,7 @@
 				IRQF_ONESHOT | IRQF_SHARED |
 				IRQF_TRIGGER_FALLING, 0,
 				&max77693_charger_irq_chip,
-				&max77693->irq_data_charger);
+				&max77693->irq_data_chg);
 	if (ret) {
 		dev_err(max77693->dev, "failed to add irq chip: %d\n", ret);
 		goto err_irq_charger;
@@ -296,15 +297,15 @@
 err_intsrc:
 	regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
 err_irq_muic:
-	regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
+	regmap_del_irq_chip(max77693->irq, max77693->irq_data_chg);
 err_irq_charger:
 	regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys);
 err_irq_topsys:
 	regmap_del_irq_chip(max77693->irq, max77693->irq_data_led);
 err_regmap:
-	i2c_unregister_device(max77693->haptic);
+	i2c_unregister_device(max77693->i2c_haptic);
 err_i2c_haptic:
-	i2c_unregister_device(max77693->muic);
+	i2c_unregister_device(max77693->i2c_muic);
 	return ret;
 }
 
@@ -315,12 +316,12 @@
 	mfd_remove_devices(max77693->dev);
 
 	regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
-	regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
+	regmap_del_irq_chip(max77693->irq, max77693->irq_data_chg);
 	regmap_del_irq_chip(max77693->irq, max77693->irq_data_topsys);
 	regmap_del_irq_chip(max77693->irq, max77693->irq_data_led);
 
-	i2c_unregister_device(max77693->muic);
-	i2c_unregister_device(max77693->haptic);
+	i2c_unregister_device(max77693->i2c_muic);
+	i2c_unregister_device(max77693->i2c_haptic);
 
 	return 0;
 }
@@ -372,7 +373,6 @@
 static struct i2c_driver max77693_i2c_driver = {
 	.driver = {
 		   .name = "max77693",
-		   .owner = THIS_MODULE,
 		   .pm = &max77693_pm,
 		   .of_match_table = of_match_ptr(max77693_dt_match),
 	},
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index a354ac6..c52162e 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -17,6 +17,7 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/mfd/core.h>
+#include <linux/mfd/max77693-common.h>
 #include <linux/mfd/max77843-private.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
@@ -71,7 +72,7 @@
 };
 
 /* Charger and Charger regulator use same regmap. */
-static int max77843_chg_init(struct max77843 *max77843)
+static int max77843_chg_init(struct max77693_dev *max77843)
 {
 	int ret;
 
@@ -101,7 +102,7 @@
 static int max77843_probe(struct i2c_client *i2c,
 			  const struct i2c_device_id *id)
 {
-	struct max77843 *max77843;
+	struct max77693_dev *max77843;
 	unsigned int reg_data;
 	int ret;
 
@@ -113,6 +114,7 @@
 	max77843->dev = &i2c->dev;
 	max77843->i2c = i2c;
 	max77843->irq = i2c->irq;
+	max77843->type = id->driver_data;
 
 	max77843->regmap = devm_regmap_init_i2c(i2c,
 			&max77843_regmap_config);
@@ -123,7 +125,7 @@
 
 	ret = regmap_add_irq_chip(max77843->regmap, max77843->irq,
 			IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
-			0, &max77843_irq_chip, &max77843->irq_data);
+			0, &max77843_irq_chip, &max77843->irq_data_topsys);
 	if (ret) {
 		dev_err(&i2c->dev, "Failed to add TOPSYS IRQ chip\n");
 		return ret;
@@ -164,18 +166,18 @@
 	return 0;
 
 err_pmic_id:
-	regmap_del_irq_chip(max77843->irq, max77843->irq_data);
+	regmap_del_irq_chip(max77843->irq, max77843->irq_data_topsys);
 
 	return ret;
 }
 
 static int max77843_remove(struct i2c_client *i2c)
 {
-	struct max77843 *max77843 = i2c_get_clientdata(i2c);
+	struct max77693_dev *max77843 = i2c_get_clientdata(i2c);
 
 	mfd_remove_devices(max77843->dev);
 
-	regmap_del_irq_chip(max77843->irq, max77843->irq_data);
+	regmap_del_irq_chip(max77843->irq, max77843->irq_data_topsys);
 
 	i2c_unregister_device(max77843->i2c_chg);
 
@@ -188,7 +190,7 @@
 };
 
 static const struct i2c_device_id max77843_id[] = {
-	{ "max77843", },
+	{ "max77843", TYPE_MAX77843, },
 	{ },
 };
 MODULE_DEVICE_TABLE(i2c, max77843_id);
@@ -196,7 +198,7 @@
 static int __maybe_unused max77843_suspend(struct device *dev)
 {
 	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
-	struct max77843 *max77843 = i2c_get_clientdata(i2c);
+	struct max77693_dev *max77843 = i2c_get_clientdata(i2c);
 
 	disable_irq(max77843->irq);
 	if (device_may_wakeup(dev))
@@ -208,7 +210,7 @@
 static int __maybe_unused max77843_resume(struct device *dev)
 {
 	struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
-	struct max77843 *max77843 = i2c_get_clientdata(i2c);
+	struct max77693_dev *max77843 = i2c_get_clientdata(i2c);
 
 	if (device_may_wakeup(dev))
 		disable_irq_wake(max77843->irq);
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index 232749c..2974c8b1 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -321,7 +321,6 @@
 static struct i2c_driver max8907_i2c_driver = {
 	.driver = {
 		.name = "max8907",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(max8907_of_match),
 	},
 	.probe = max8907_i2c_probe,
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 8520bd6..fd8b15c 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -650,11 +650,8 @@
 	irq_set_chip_data(virq, d->host_data);
 	irq_set_chip_and_handler(virq, &max8925_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
+
 	return 0;
 }
 
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index c880c89..b0fe810 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -245,7 +245,6 @@
 static struct i2c_driver max8925_driver = {
 	.driver	= {
 		.name	= "max8925",
-		.owner	= THIS_MODULE,
 		.pm     = &max8925_pm_ops,
 		.of_match_table = max8925_dt_ids,
 	},
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index d3025be..b95a46d 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -113,14 +113,14 @@
 
 static void max8997_irq_lock(struct irq_data *data)
 {
-	struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+	struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&max8997->irqlock);
 }
 
 static void max8997_irq_sync_unlock(struct irq_data *data)
 {
-	struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+	struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
 	int i;
 
 	for (i = 0; i < MAX8997_IRQ_GROUP_NR; i++) {
@@ -140,26 +140,25 @@
 }
 
 static const inline struct max8997_irq_data *
-irq_to_max8997_irq(struct max8997_dev *max8997, int irq)
+irq_to_max8997_irq(struct max8997_dev *max8997, struct irq_data *data)
 {
-	struct irq_data *data = irq_get_irq_data(irq);
 	return &max8997_irqs[data->hwirq];
 }
 
 static void max8997_irq_mask(struct irq_data *data)
 {
-	struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+	struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
 	const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997,
-								data->irq);
+								     data);
 
 	max8997->irq_masks_cur[irq_data->group] |= irq_data->mask;
 }
 
 static void max8997_irq_unmask(struct irq_data *data)
 {
-	struct max8997_dev *max8997 = irq_get_chip_data(data->irq);
+	struct max8997_dev *max8997 = irq_data_get_irq_chip_data(data);
 	const struct max8997_irq_data *irq_data = irq_to_max8997_irq(max8997,
-								data->irq);
+								     data);
 
 	max8997->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
 }
@@ -295,11 +294,8 @@
 	irq_set_chip_data(irq, max8997);
 	irq_set_chip_and_handler(irq, &max8997_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
+
 	return 0;
 }
 
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 595364e..d3cfa9cf 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -508,7 +508,6 @@
 static struct i2c_driver max8997_i2c_driver = {
 	.driver = {
 		   .name = "max8997",
-		   .owner = THIS_MODULE,
 		   .pm = &max8997_pm,
 		   .of_match_table = of_match_ptr(max8997_pmic_dt_match),
 	},
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
index 3702056..90bad9f 100644
--- a/drivers/mfd/max8998-irq.c
+++ b/drivers/mfd/max8998-irq.c
@@ -98,9 +98,8 @@
 };
 
 static inline struct max8998_irq_data *
-irq_to_max8998_irq(struct max8998_dev *max8998, int irq)
+irq_to_max8998_irq(struct max8998_dev *max8998, struct irq_data *data)
 {
-	struct irq_data *data = irq_get_irq_data(irq);
 	return &max8998_irqs[data->hwirq];
 }
 
@@ -134,8 +133,7 @@
 static void max8998_irq_unmask(struct irq_data *data)
 {
 	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
-	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
-							       data->irq);
+	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, data);
 
 	max8998->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
 }
@@ -143,8 +141,7 @@
 static void max8998_irq_mask(struct irq_data *data)
 {
 	struct max8998_dev *max8998 = irq_data_get_irq_chip_data(data);
-	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998,
-							       data->irq);
+	struct max8998_irq_data *irq_data = irq_to_max8998_irq(max8998, data);
 
 	max8998->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
 }
@@ -206,11 +203,8 @@
 	irq_set_chip_data(irq, max8998);
 	irq_set_chip_and_handler(irq, &max8998_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
+
 	return 0;
 }
 
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index a37cb74..a7afe3b 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -377,7 +377,6 @@
 static struct i2c_driver max8998_i2c_driver = {
 	.driver = {
 		   .name = "max8998",
-		   .owner = THIS_MODULE,
 		   .pm = &max8998_pm,
 		   .of_match_table = of_match_ptr(max8998_dt_match),
 	},
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index 68b8448..67e4c9a 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -96,7 +96,6 @@
 static struct i2c_driver mc13xxx_i2c_driver = {
 	.id_table = mc13xxx_i2c_device_id,
 	.driver = {
-		.owner = THIS_MODULE,
 		.name = "mc13xxx",
 		.of_match_table = mc13xxx_dt_ids,
 	},
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 14fd5cb..c17635d 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -302,7 +302,7 @@
 {
 	atomic_t *cnts = NULL;
 
-	device_for_each_child(parent, &cnts, mfd_remove_devices_fn);
+	device_for_each_child_reverse(parent, &cnts, mfd_remove_devices_fn);
 	kfree(cnts);
 }
 EXPORT_SYMBOL(mfd_remove_devices);
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 03929a6..1749c1c 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -60,14 +60,14 @@
 
 static void mt6397_irq_lock(struct irq_data *data)
 {
-	struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
 
 	mutex_lock(&mt6397->irqlock);
 }
 
 static void mt6397_irq_sync_unlock(struct irq_data *data)
 {
-	struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
 
 	regmap_write(mt6397->regmap, MT6397_INT_CON0, mt6397->irq_masks_cur[0]);
 	regmap_write(mt6397->regmap, MT6397_INT_CON1, mt6397->irq_masks_cur[1]);
@@ -77,7 +77,7 @@
 
 static void mt6397_irq_disable(struct irq_data *data)
 {
-	struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
 	int shift = data->hwirq & 0xf;
 	int reg = data->hwirq >> 4;
 
@@ -86,19 +86,38 @@
 
 static void mt6397_irq_enable(struct irq_data *data)
 {
-	struct mt6397_chip *mt6397 = irq_get_chip_data(data->irq);
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(data);
 	int shift = data->hwirq & 0xf;
 	int reg = data->hwirq >> 4;
 
 	mt6397->irq_masks_cur[reg] |= BIT(shift);
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int mt6397_irq_set_wake(struct irq_data *irq_data, unsigned int on)
+{
+	struct mt6397_chip *mt6397 = irq_data_get_irq_chip_data(irq_data);
+	int shift = irq_data->hwirq & 0xf;
+	int reg = irq_data->hwirq >> 4;
+
+	if (on)
+		mt6397->wake_mask[reg] |= BIT(shift);
+	else
+		mt6397->wake_mask[reg] &= ~BIT(shift);
+
+	return 0;
+}
+#else
+#define mt6397_irq_set_wake NULL
+#endif
+
 static struct irq_chip mt6397_irq_chip = {
 	.name = "mt6397-irq",
 	.irq_bus_lock = mt6397_irq_lock,
 	.irq_bus_sync_unlock = mt6397_irq_sync_unlock,
 	.irq_enable = mt6397_irq_enable,
 	.irq_disable = mt6397_irq_disable,
+	.irq_set_wake = mt6397_irq_set_wake,
 };
 
 static void mt6397_irq_handle_reg(struct mt6397_chip *mt6397, int reg,
@@ -142,11 +161,7 @@
 	irq_set_chip_data(irq, mt6397);
 	irq_set_chip_and_handler(irq, &mt6397_irq_chip, handle_level_irq);
 	irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
 
 	return 0;
 }
@@ -183,6 +198,35 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int mt6397_irq_suspend(struct device *dev)
+{
+	struct mt6397_chip *chip = dev_get_drvdata(dev);
+
+	regmap_write(chip->regmap, MT6397_INT_CON0, chip->wake_mask[0]);
+	regmap_write(chip->regmap, MT6397_INT_CON1, chip->wake_mask[1]);
+
+	enable_irq_wake(chip->irq);
+
+	return 0;
+}
+
+static int mt6397_irq_resume(struct device *dev)
+{
+	struct mt6397_chip *chip = dev_get_drvdata(dev);
+
+	regmap_write(chip->regmap, MT6397_INT_CON0, chip->irq_masks_cur[0]);
+	regmap_write(chip->regmap, MT6397_INT_CON1, chip->irq_masks_cur[1]);
+
+	disable_irq_wake(chip->irq);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
+			mt6397_irq_resume);
+
 static int mt6397_probe(struct platform_device *pdev)
 {
 	int ret;
@@ -237,6 +281,7 @@
 	.driver = {
 		.name = "mt6397",
 		.of_match_table = of_match_ptr(mt6397_of_match),
+		.pm = &mt6397_pm_ops,
 	},
 };
 
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 28cb048..8f8bacb 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -719,7 +719,6 @@
 	.driver = {
 		   .name = "palmas",
 		   .of_match_table = of_palmas_match_tbl,
-		   .owner = THIS_MODULE,
 	},
 	.probe = palmas_i2c_probe,
 	.remove = palmas_i2c_remove,
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index 5a92646..59502d0 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -236,11 +236,49 @@
 	return pm8xxx_config_irq(chip, block, config);
 }
 
+static int pm8xxx_irq_get_irqchip_state(struct irq_data *d,
+					enum irqchip_irq_state which,
+					bool *state)
+{
+	struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+	unsigned int pmirq = irqd_to_hwirq(d);
+	unsigned int bits;
+	int irq_bit;
+	u8 block;
+	int rc;
+
+	if (which != IRQCHIP_STATE_LINE_LEVEL)
+		return -EINVAL;
+
+	block = pmirq / 8;
+	irq_bit = pmirq % 8;
+
+	spin_lock(&chip->pm_irq_lock);
+	rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, block);
+	if (rc) {
+		pr_err("Failed Selecting Block %d rc=%d\n", block, rc);
+		goto bail;
+	}
+
+	rc = regmap_read(chip->regmap, SSBI_REG_ADDR_IRQ_RT_STATUS, &bits);
+	if (rc) {
+		pr_err("Failed Reading Status rc=%d\n", rc);
+		goto bail;
+	}
+
+	*state = !!(bits & BIT(irq_bit));
+bail:
+	spin_unlock(&chip->pm_irq_lock);
+
+	return rc;
+}
+
 static struct irq_chip pm8xxx_irq_chip = {
 	.name		= "pm8xxx",
 	.irq_mask_ack	= pm8xxx_irq_mask_ack,
 	.irq_unmask	= pm8xxx_irq_unmask,
 	.irq_set_type	= pm8xxx_irq_set_type,
+	.irq_get_irqchip_state = pm8xxx_irq_get_irqchip_state,
 	.flags		= IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
 };
 
@@ -251,11 +289,8 @@
 
 	irq_set_chip_and_handler(irq, &pm8xxx_irq_chip, handle_level_irq);
 	irq_set_chip_data(irq, chip);
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
+
 	return 0;
 }
 
@@ -336,14 +371,12 @@
 	if (!chip->irqdomain)
 		return -ENODEV;
 
-	irq_set_handler_data(irq, chip);
-	irq_set_chained_handler(irq, pm8xxx_irq_handler);
+	irq_set_chained_handler_and_data(irq, pm8xxx_irq_handler, chip);
 	irq_set_irq_wake(irq, 1);
 
 	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
 	if (rc) {
-		irq_set_chained_handler(irq, NULL);
-		irq_set_handler_data(irq, NULL);
+		irq_set_chained_handler_and_data(irq, NULL, NULL);
 		irq_domain_remove(chip->irqdomain);
 	}
 
@@ -362,8 +395,7 @@
 	struct pm_irq_chip *chip = platform_get_drvdata(pdev);
 
 	device_for_each_child(&pdev->dev, NULL, pm8921_remove_child);
-	irq_set_chained_handler(irq, NULL);
-	irq_set_handler_data(irq, NULL);
+	irq_set_chained_handler_and_data(irq, NULL, NULL);
 	irq_domain_remove(chip->irqdomain);
 
 	return 0;
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 12e3243..6afc9fa 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -149,6 +149,7 @@
 	[QCOM_RPM_USB_OTG_SWITCH] =		{ 210, 125, 82, 1 },
 	[QCOM_RPM_HDMI_SWITCH] =		{ 211, 126, 83, 1 },
 	[QCOM_RPM_DDR_DMM] =			{ 212, 127, 84, 2 },
+	[QCOM_RPM_QDSS_CLK] =			{ 214, ~0, 7, 1 },
 	[QCOM_RPM_VDDMIN_GPIO] =		{ 215, 131, 89, 1 },
 };
 
diff --git a/drivers/mfd/rc5t583-irq.c b/drivers/mfd/rc5t583-irq.c
index bb85020..3f8812d 100644
--- a/drivers/mfd/rc5t583-irq.c
+++ b/drivers/mfd/rc5t583-irq.c
@@ -386,9 +386,7 @@
 		irq_set_chip_and_handler(__irq, &rc5t583_irq_chip,
 					 handle_simple_irq);
 		irq_set_nested_thread(__irq, 1);
-#ifdef CONFIG_ARM
-		set_irq_flags(__irq, IRQF_VALID);
-#endif
+		irq_clear_status_flags(__irq, IRQ_NOREQUEST);
 	}
 
 	ret = request_threaded_irq(irq, NULL, rc5t583_irq, IRQF_ONESHOT,
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index df276ad..e10f02f 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -322,7 +322,6 @@
 static struct i2c_driver rc5t583_i2c_driver = {
 	.driver = {
 		   .name = "rc5t583",
-		   .owner = THIS_MODULE,
 		   },
 	.probe = rc5t583_i2c_probe,
 	.remove = rc5t583_i2c_remove,
diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c
index 2d64430..d4c114a 100644
--- a/drivers/mfd/retu-mfd.c
+++ b/drivers/mfd/retu-mfd.c
@@ -311,7 +311,6 @@
 static struct i2c_driver retu_driver = {
 	.driver		= {
 		.name = "retu-mfd",
-		.owner = THIS_MODULE,
 	},
 	.probe		= retu_probe,
 	.remove		= retu_remove,
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
index db395a6..d60f916 100644
--- a/drivers/mfd/rt5033.c
+++ b/drivers/mfd/rt5033.c
@@ -124,6 +124,7 @@
 	{ .compatible = "richtek,rt5033", },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, rt5033_dt_match);
 
 static struct i2c_driver rt5033_driver = {
 	.driver = {
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 4a69afb..d206a3e 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -486,7 +486,6 @@
 static struct i2c_driver sec_pmic_driver = {
 	.driver = {
 		   .name = "sec_pmic",
-		   .owner = THIS_MODULE,
 		   .pm = &sec_pmic_pm_ops,
 		   .of_match_table = of_match_ptr(sec_dt_match),
 	},
diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c
index e3deb46..fb4ce6d0 100644
--- a/drivers/mfd/si476x-i2c.c
+++ b/drivers/mfd/si476x-i2c.c
@@ -873,7 +873,6 @@
 static struct i2c_driver si476x_core_driver = {
 	.driver		= {
 		.name	= "si476x-core",
-		.owner  = THIS_MODULE,
 	},
 	.probe		= si476x_core_probe,
 	.remove         = si476x_core_remove,
diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c
index 0324688..a4c0df7 100644
--- a/drivers/mfd/smsc-ece1099.c
+++ b/drivers/mfd/smsc-ece1099.c
@@ -98,7 +98,6 @@
 static struct i2c_driver smsc_i2c_driver = {
 	.driver = {
 		   .name = "smsc",
-		   .owner = THIS_MODULE,
 	},
 	.probe = smsc_i2c_probe,
 	.remove = smsc_i2c_remove,
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index e14c8c9..c3f4aab 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -112,7 +112,6 @@
 static struct i2c_driver stmpe_i2c_driver = {
 	.driver = {
 		.name = "stmpe-i2c",
-		.owner = THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm = &stmpe_dev_pm_ops,
 #endif
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index 6fdb30e..618ba24 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -11,6 +11,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/types.h>
 #include "stmpe.h"
 
@@ -108,6 +109,17 @@
 	return stmpe_remove(stmpe);
 }
 
+static const struct of_device_id stmpe_spi_of_match[] = {
+	{ .compatible = "st,stmpe610", },
+	{ .compatible = "st,stmpe801", },
+	{ .compatible = "st,stmpe811", },
+	{ .compatible = "st,stmpe1601", },
+	{ .compatible = "st,stmpe2401", },
+	{ .compatible = "st,stmpe2403", },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stmpe_spi_of_match);
+
 static const struct spi_device_id stmpe_spi_id[] = {
 	{ "stmpe610", STMPE610 },
 	{ "stmpe801", STMPE801 },
@@ -122,6 +134,7 @@
 static struct spi_driver stmpe_spi_driver = {
 	.driver = {
 		.name	= "stmpe-spi",
+		.of_match_table = of_match_ptr(stmpe_spi_of_match),
 		.owner	= THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	= &stmpe_dev_pm_ops,
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 18c4d72..e971af8 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -971,20 +971,13 @@
 	irq_set_chip_data(virq, stmpe);
 	irq_set_chip_and_handler(virq, chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
 
 static void stmpe_irq_unmap(struct irq_domain *d, unsigned int virq)
 {
-#ifdef CONFIG_ARM
-		set_irq_flags(virq, 0);
-#endif
 		irq_set_chip_and_handler(virq, NULL, NULL);
 		irq_set_chip_data(virq, NULL);
 }
diff --git a/drivers/mfd/stw481x.c b/drivers/mfd/stw481x.c
index 7ceb3df..ca613df 100644
--- a/drivers/mfd/stw481x.c
+++ b/drivers/mfd/stw481x.c
@@ -231,6 +231,7 @@
 	{ "stw481x", 0 },
 	{ },
 };
+MODULE_DEVICE_TABLE(i2c, stw481x_id);
 
 static const struct of_device_id stw481x_match[] = {
 	{ .compatible = "st,stw4810", },
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index c09fb5d..16fc1ad 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -187,7 +187,7 @@
 /* Handle the T7L66XB interrupt mux */
 static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct t7l66xb *t7l66xb = irq_get_handler_data(irq);
+	struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc);
 	unsigned int isr;
 	unsigned int i, irq_base;
 
@@ -246,14 +246,10 @@
 	for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
 		irq_set_chip_and_handler(irq, &t7l66xb_chip, handle_level_irq);
 		irq_set_chip_data(irq, t7l66xb);
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-#endif
 	}
 
 	irq_set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING);
-	irq_set_handler_data(t7l66xb->irq, t7l66xb);
-	irq_set_chained_handler(t7l66xb->irq, t7l66xb_irq);
+	irq_set_chained_handler_and_data(t7l66xb->irq, t7l66xb_irq, t7l66xb);
 }
 
 static void t7l66xb_detach_irq(struct platform_device *dev)
@@ -263,13 +259,9 @@
 
 	irq_base = t7l66xb->irq_base;
 
-	irq_set_chained_handler(t7l66xb->irq, NULL);
-	irq_set_handler_data(t7l66xb->irq, NULL);
+	irq_set_chained_handler_and_data(t7l66xb->irq, NULL, NULL);
 
 	for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
-#ifdef CONFIG_ARM
-		set_irq_flags(irq, 0);
-#endif
 		irq_set_chip(irq, NULL);
 		irq_set_chip_data(irq, NULL);
 	}
@@ -318,7 +310,7 @@
 	struct resource *iomem, *rscr;
 	int ret;
 
-	if (pdata == NULL)
+	if (!pdata)
 		return -EINVAL;
 
 	iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@ -371,7 +363,7 @@
 
 	clk_prepare_enable(t7l66xb->clk48m);
 
-	if (pdata && pdata->enable)
+	if (pdata->enable)
 		pdata->enable(dev);
 
 	/* Mask all interrupts */
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 96d420d..274bf39 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -215,20 +215,13 @@
 	irq_set_chip_and_handler(virq, &dummy_irq_chip,
 				handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
 
 static void tc3589x_irq_unmap(struct irq_domain *d, unsigned int virq)
 {
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, 0);
-#endif
 	irq_set_chip_and_handler(virq, NULL, NULL);
 	irq_set_chip_data(virq, NULL);
 }
@@ -492,7 +485,6 @@
 static struct i2c_driver tc3589x_driver = {
 	.driver = {
 		.name	= "tc3589x",
-		.owner	= THIS_MODULE,
 		.pm	= &tc3589x_dev_pm_ops,
 		.of_match_table = of_match_ptr(tc3589x_match),
 	},
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 63458b3..775b9ac 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -525,7 +525,7 @@
 static void
 tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct tc6393xb *tc6393xb = irq_get_handler_data(irq);
+	struct tc6393xb *tc6393xb = irq_desc_get_handler_data(desc);
 	unsigned int isr;
 	unsigned int i, irq_base;
 
@@ -586,12 +586,12 @@
 	for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
 		irq_set_chip_and_handler(irq, &tc6393xb_chip, handle_edge_irq);
 		irq_set_chip_data(irq, tc6393xb);
-		set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	irq_set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING);
-	irq_set_handler_data(tc6393xb->irq, tc6393xb);
-	irq_set_chained_handler(tc6393xb->irq, tc6393xb_irq);
+	irq_set_chained_handler_and_data(tc6393xb->irq, tc6393xb_irq,
+					 tc6393xb);
 }
 
 static void tc6393xb_detach_irq(struct platform_device *dev)
@@ -599,13 +599,12 @@
 	struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
 	unsigned int irq, irq_base;
 
-	irq_set_chained_handler(tc6393xb->irq, NULL);
-	irq_set_handler_data(tc6393xb->irq, NULL);
+	irq_set_chained_handler_and_data(tc6393xb->irq, NULL, NULL);
 
 	irq_base = tc6393xb->irq_base;
 
 	for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
-		set_irq_flags(irq, 0);
+		irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 		irq_set_chip(irq, NULL);
 		irq_set_chip_data(irq, NULL);
 	}
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index a2e1990..1ab3dd6 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -129,7 +129,6 @@
 static struct i2c_driver tps6507x_i2c_driver = {
 	.driver = {
 		   .name = "tps6507x",
-		   .owner = THIS_MODULE,
 		   .of_match_table = of_match_ptr(tps6507x_of_match),
 	},
 	.probe = tps6507x_i2c_probe,
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 14b62e11a..f88085a 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -259,7 +259,6 @@
 static struct i2c_driver tps65090_driver = {
 	.driver	= {
 		.name	= "tps65090",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tps65090_of_match),
 	},
 	.probe		= tps65090_i2c_probe,
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 7d1cfc1..55add04 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -156,6 +156,7 @@
 	{ .compatible = "ti,tps65217", .data = (void *)TPS65217 },
 	{ /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, tps65217_of_match);
 
 static int tps65217_probe(struct i2c_client *client,
 				const struct i2c_device_id *ids)
@@ -248,7 +249,6 @@
 static struct i2c_driver tps65217_driver = {
 	.driver		= {
 		.name	= "tps65217",
-		.owner	= THIS_MODULE,
 		.of_match_table = tps65217_of_match,
 	},
 	.id_table	= tps65217_id_table,
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 7af11a8..80b9dc3 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -211,6 +211,7 @@
 	{ .compatible = "ti,tps65218", },
 	{}
 };
+MODULE_DEVICE_TABLE(of, of_tps65218_match_table);
 
 static int tps65218_probe(struct i2c_client *client,
 				const struct i2c_device_id *ids)
@@ -280,7 +281,6 @@
 static struct i2c_driver tps65218_driver = {
 	.driver		= {
 		.name	= "tps65218",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_tps65218_match_table,
 	},
 	.probe		= tps65218_probe,
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index e0a2583..5628a6b 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -52,7 +52,7 @@
 #define TPS6586X_VERSIONCRC	0xcd
 
 /* Maximum register */
-#define TPS6586X_MAX_REGISTER	(TPS6586X_VERSIONCRC + 1)
+#define TPS6586X_MAX_REGISTER	TPS6586X_VERSIONCRC
 
 struct tps6586x_irq_data {
 	u8	mask_reg;
@@ -299,14 +299,7 @@
 	irq_set_chip_data(virq, tps6586x);
 	irq_set_chip_and_handler(virq, &tps6586x_irq_chip, handle_simple_irq);
 	irq_set_nested_thread(virq, 1);
-
-	/* ARM needs us to explicitly flag the IRQ as valid
-	 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
@@ -467,7 +460,7 @@
 static const struct regmap_config tps6586x_regmap_config = {
 	.reg_bits = 8,
 	.val_bits = 8,
-	.max_register = TPS6586X_MAX_REGISTER - 1,
+	.max_register = TPS6586X_MAX_REGISTER,
 	.volatile_reg = is_volatile_reg,
 	.cache_type = REGCACHE_RBTREE,
 };
@@ -610,7 +603,6 @@
 static struct i2c_driver tps6586x_driver = {
 	.driver	= {
 		.name	= "tps6586x",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tps6586x_of_match),
 	},
 	.probe		= tps6586x_i2c_probe,
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index 7612d89..f7ab115 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -544,7 +544,6 @@
 static struct i2c_driver tps65910_i2c_driver = {
 	.driver = {
 		   .name = "tps65910",
-		   .owner = THIS_MODULE,
 		   .of_match_table = of_match_ptr(tps65910_of_match),
 	},
 	.probe = tps65910_i2c_probe,
diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c
index 6a6343e..7e55640 100644
--- a/drivers/mfd/tps65912-i2c.c
+++ b/drivers/mfd/tps65912-i2c.c
@@ -109,7 +109,6 @@
 static struct i2c_driver tps65912_i2c_driver = {
 	.driver = {
 		   .name = "tps65912",
-		   .owner = THIS_MODULE,
 	},
 	.probe = tps65912_i2c_probe,
 	.remove = tps65912_i2c_remove,
diff --git a/drivers/mfd/tps65912-irq.c b/drivers/mfd/tps65912-irq.c
index fbecec7..db2c29c 100644
--- a/drivers/mfd/tps65912-irq.c
+++ b/drivers/mfd/tps65912-irq.c
@@ -197,13 +197,7 @@
 		irq_set_chip_and_handler(cur_irq, &tps65912_irq_chip,
 					 handle_edge_irq);
 		irq_set_nested_thread(cur_irq, 1);
-		/* ARM needs us to explicitly flag the IRQ as valid
-		 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-		set_irq_flags(cur_irq, IRQF_VALID);
-#else
-		irq_set_noprobe(cur_irq);
-#endif
+		irq_clear_status_flags(cur_irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	ret = request_threaded_irq(irq, NULL, tps65912_irq, flags,
diff --git a/drivers/mfd/tps80031.c b/drivers/mfd/tps80031.c
index ed6c5b0..0812df3 100644
--- a/drivers/mfd/tps80031.c
+++ b/drivers/mfd/tps80031.c
@@ -549,7 +549,6 @@
 static struct i2c_driver tps80031_driver = {
 	.driver	= {
 		.name	= "tps80031",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= tps80031_probe,
 	.remove		= tps80031_remove,
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 489674a..831696e 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -788,9 +788,8 @@
 		static struct regulator_consumer_supply usb1v8 = {
 			.supply =	"usb1v8",
 		};
-		static struct regulator_consumer_supply usb3v1[] = {
-			{ .supply =	"usb3v1" },
-			{ .supply =	"bci3v1" },
+		static struct regulator_consumer_supply usb3v1 = {
+			.supply =	"usb3v1",
 		};
 
 	/* First add the regulators so that they can be used by transceiver */
@@ -818,7 +817,7 @@
 				return PTR_ERR(child);
 
 			child = add_regulator_linked(TWL4030_REG_VUSB3V1,
-						      &usb_fixed, usb3v1, 2,
+						      &usb_fixed, &usb3v1, 1,
 						      features);
 			if (IS_ERR(child))
 				return PTR_ERR(child);
@@ -838,7 +837,7 @@
 		if (IS_ENABLED(CONFIG_REGULATOR_TWL4030) && child) {
 			usb1v5.dev_name = dev_name(child);
 			usb1v8.dev_name = dev_name(child);
-			usb3v1[0].dev_name = dev_name(child);
+			usb3v1.dev_name = dev_name(child);
 		}
 	}
 
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index a3fa7f4..40e51b0 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -419,16 +419,7 @@
 
 static inline void activate_irq(int irq)
 {
-#ifdef CONFIG_ARM
-	/*
-	 * ARM requires an extra step to clear IRQ_NOREQUEST, which it
-	 * sets on behalf of every irq_chip.  Also sets IRQ_NOPROBE.
-	 */
-	set_irq_flags(irq, IRQF_VALID);
-#else
-	/* same effect on other architectures */
-	irq_set_noprobe(irq);
-#endif
+	irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 }
 
 /*----------------------------------------------------------------------*/
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 20fb581..5357450 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -231,7 +231,7 @@
 
 static int twl6030_irq_set_wake(struct irq_data *d, unsigned int on)
 {
-	struct twl6030_irq *pdata = irq_get_chip_data(d->irq);
+	struct twl6030_irq *pdata = irq_data_get_irq_chip_data(d);
 
 	if (on)
 		atomic_inc(&pdata->wakeirqs);
@@ -352,26 +352,13 @@
 	irq_set_chip_and_handler(virq,  &pdata->irq_chip, handle_simple_irq);
 	irq_set_nested_thread(virq, true);
 	irq_set_parent(virq, pdata->twl_irq);
-
-#ifdef CONFIG_ARM
-	/*
-	 * ARM requires an extra step to clear IRQ_NOREQUEST, which it
-	 * sets on behalf of every irq_chip.  Also sets IRQ_NOPROBE.
-	 */
-	set_irq_flags(virq, IRQF_VALID);
-#else
-	/* same effect on other architectures */
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
 
 static void twl6030_irq_unmap(struct irq_domain *d, unsigned int virq)
 {
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, 0);
-#endif
 	irq_set_chip_and_handler(virq, NULL, NULL);
 	irq_set_chip_data(virq, NULL);
 }
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index c5265c1..a151ee2 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -86,7 +86,7 @@
 	{ 0x2E, 0x00 }, /* REG_STATUS	(ro) */
 };
 
-static struct reg_default twl6040_patch[] = {
+static struct reg_sequence twl6040_patch[] = {
 	/*
 	 * Select I2C bus access to dual access registers
 	 * Interrupt register is cleared on read
@@ -801,7 +801,6 @@
 static struct i2c_driver twl6040_driver = {
 	.driver = {
 		.name = "twl6040",
-		.owner = THIS_MODULE,
 	},
 	.probe		= twl6040_probe,
 	.remove		= twl6040_remove,
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 3591550..9a23021 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -282,7 +282,7 @@
  * SIBCLK to talk to the chip.  We leave the clock running until
  * we have finished processing all interrupts from the chip.
  */
-static void ucb1x00_irq(unsigned int irq, struct irq_desc *desc)
+static void ucb1x00_irq(unsigned int __irq, struct irq_desc *desc)
 {
 	struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
 	unsigned int isr, i;
@@ -292,7 +292,7 @@
 	ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr);
 	ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0);
 
-	for (i = 0; i < 16 && isr; i++, isr >>= 1, irq++)
+	for (i = 0; i < 16 && isr; i++, isr >>= 1)
 		if (isr & 1)
 			generic_handle_irq(ucb->irq_base + i);
 	ucb1x00_disable(ucb);
@@ -562,7 +562,7 @@
 
 		irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq);
 		irq_set_chip_data(irq, ucb);
-		set_irq_flags(irq, IRQF_VALID | IRQ_NOREQUEST);
+		irq_clear_status_flags(irq, IRQ_NOREQUEST);
 	}
 
 	irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING);
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index aeae6ec..0386eaf 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -21,7 +21,7 @@
 #define WM5102_NUM_AOD_ISR 2
 #define WM5102_NUM_ISR 5
 
-static const struct reg_default wm5102_reva_patch[] = {
+static const struct reg_sequence wm5102_reva_patch[] = {
 	{ 0x80, 0x0003 },
 	{ 0x221, 0x0090 },
 	{ 0x211, 0x0014 },
@@ -57,7 +57,7 @@
 	{ 0x80, 0x0000 },
 };
 
-static const struct reg_default wm5102_revb_patch[] = {
+static const struct reg_sequence wm5102_revb_patch[] = {
 	{ 0x19, 0x0001 },
 	{ 0x80, 0x0003 },
 	{ 0x081, 0xE022 },
@@ -80,7 +80,7 @@
 /* We use a function so we can use ARRAY_SIZE() */
 int wm5102_patch(struct arizona *arizona)
 {
-	const struct reg_default *wm5102_patch;
+	const struct reg_sequence *wm5102_patch;
 	int patch_size;
 
 	switch (arizona->rev) {
@@ -266,8 +266,6 @@
 	{ 0x00000069, 0x01FF },   /* R105   - Always On Triggers Sequence Select 4 */
 	{ 0x0000006A, 0x01FF },   /* R106   - Always On Triggers Sequence Select 5 */
 	{ 0x0000006B, 0x01FF },   /* R107   - Always On Triggers Sequence Select 6 */
-	{ 0x0000006E, 0x01FF },   /* R110   - Trigger Sequence Select 32 */
-	{ 0x0000006F, 0x01FF },   /* R111   - Trigger Sequence Select 33 */
 	{ 0x00000070, 0x0000 },   /* R112   - Comfort Noise Generator */ 
 	{ 0x00000090, 0x0000 },   /* R144   - Haptics Control 1 */ 
 	{ 0x00000091, 0x7FFF },   /* R145   - Haptics Control 2 */ 
@@ -300,7 +298,6 @@
 	{ 0x00000175, 0x0004 },   /* R373   - FLL1 Control 5 */ 
 	{ 0x00000176, 0x0000 },   /* R374   - FLL1 Control 6 */ 
 	{ 0x00000177, 0x0181 },   /* R375   - FLL1 Loop Filter Test 1 */ 
-	{ 0x00000178, 0x0000 },   /* R376   - FLL1 NCO Test 0 */
 	{ 0x00000179, 0x0000 },   /* R377   - FLL1 Control 7 */
 	{ 0x00000181, 0x0000 },   /* R385   - FLL1 Synchroniser 1 */ 
 	{ 0x00000182, 0x0000 },   /* R386   - FLL1 Synchroniser 2 */ 
@@ -318,7 +315,6 @@
 	{ 0x00000195, 0x0004 },   /* R405   - FLL2 Control 5 */ 
 	{ 0x00000196, 0x0000 },   /* R406   - FLL2 Control 6 */ 
 	{ 0x00000197, 0x0000 },   /* R407   - FLL2 Loop Filter Test 1 */ 
-	{ 0x00000198, 0x0000 },   /* R408   - FLL2 NCO Test 0 */
 	{ 0x00000199, 0x0000 },   /* R409   - FLL2 Control 7 */
 	{ 0x000001A1, 0x0000 },   /* R417   - FLL2 Synchroniser 1 */ 
 	{ 0x000001A2, 0x0000 },   /* R418   - FLL2 Synchroniser 2 */ 
@@ -338,12 +334,9 @@
 	{ 0x0000021A, 0x01A6 },   /* R538   - Mic Bias Ctrl 3 */ 
 	{ 0x00000293, 0x0000 },   /* R659   - Accessory Detect Mode 1 */ 
 	{ 0x0000029B, 0x0020 },   /* R667   - Headphone Detect 1 */ 
-	{ 0x0000029C, 0x0000 },   /* R668   - Headphone Detect 2 */
-	{ 0x0000029F, 0x0000 },   /* R671   - Headphone Detect Test */
 	{ 0x000002A2, 0x0000 },   /* R674   - Micd clamp control */
 	{ 0x000002A3, 0x1102 },   /* R675   - Mic Detect 1 */ 
 	{ 0x000002A4, 0x009F },   /* R676   - Mic Detect 2 */ 
-	{ 0x000002A5, 0x0000 },   /* R677   - Mic Detect 3 */ 
 	{ 0x000002A6, 0x3737 },   /* R678   - Mic Detect Level 1 */
 	{ 0x000002A7, 0x372C },   /* R679   - Mic Detect Level 2 */
 	{ 0x000002A8, 0x1422 },   /* R680   - Mic Detect Level 3 */
@@ -887,11 +880,11 @@
 	{ 0x00000D1B, 0xFFFF },   /* R3355  - IRQ2 Status 4 Mask */ 
 	{ 0x00000D1C, 0xFFFF },   /* R3356  - IRQ2 Status 5 Mask */ 
 	{ 0x00000D1F, 0x0000 },   /* R3359  - IRQ2 Control */ 
+	{ 0x00000D41, 0x0000 },   /* R3393  - ADSP2 IRQ0 */
 	{ 0x00000D53, 0xFFFF },   /* R3411  - AOD IRQ Mask IRQ1 */ 
 	{ 0x00000D54, 0xFFFF },   /* R3412  - AOD IRQ Mask IRQ2 */ 
 	{ 0x00000D56, 0x0000 },   /* R3414  - Jack detect debounce */ 
 	{ 0x00000E00, 0x0000 },   /* R3584  - FX_Ctrl1 */ 
-	{ 0x00000E01, 0x0000 },   /* R3585  - FX_Ctrl2 */ 
 	{ 0x00000E10, 0x6318 },   /* R3600  - EQ1_1 */ 
 	{ 0x00000E11, 0x6300 },   /* R3601  - EQ1_2 */ 
 	{ 0x00000E12, 0x0FC8 },   /* R3602  - EQ1_3 */ 
@@ -991,6 +984,7 @@
 	{ 0x00000ECD, 0x0000 },   /* R3789  - HPLPF4_2 */ 
 	{ 0x00000EE0, 0x0000 },   /* R3808  - ASRC_ENABLE */ 
 	{ 0x00000EE2, 0x0000 },   /* R3810  - ASRC_RATE1 */ 
+	{ 0x00000EE3, 0x0400 },   /* R3811  - ASRC_RATE2 */
 	{ 0x00000EF0, 0x0000 },   /* R3824  - ISRC 1 CTRL 1 */ 
 	{ 0x00000EF1, 0x0000 },   /* R3825  - ISRC 1 CTRL 2 */ 
 	{ 0x00000EF2, 0x0000 },   /* R3826  - ISRC 1 CTRL 3 */ 
@@ -998,7 +992,6 @@
 	{ 0x00000EF4, 0x0000 },   /* R3828  - ISRC 2 CTRL 2 */ 
 	{ 0x00000EF5, 0x0000 },   /* R3829  - ISRC 2 CTRL 3 */ 
 	{ 0x00001100, 0x0010 },   /* R4352  - DSP1 Control 1 */ 
-	{ 0x00001101, 0x0000 },   /* R4353  - DSP1 Clocking 1 */ 
 };
 
 static bool wm5102_readable_register(struct device *dev, unsigned int reg)
@@ -1008,12 +1001,10 @@
 	case ARIZONA_DEVICE_REVISION:
 	case ARIZONA_CTRL_IF_SPI_CFG_1:
 	case ARIZONA_CTRL_IF_I2C1_CFG_1:
-	case ARIZONA_CTRL_IF_STATUS_1:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_0:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_1:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_2:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_3:
-	case ARIZONA_WRITE_SEQUENCER_PROM:
 	case ARIZONA_TONE_GENERATOR_1:
 	case ARIZONA_TONE_GENERATOR_2:
 	case ARIZONA_TONE_GENERATOR_3:
@@ -1034,8 +1025,6 @@
 	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4:
 	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5:
 	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6:
-	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_7:
-	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_8:
 	case ARIZONA_COMFORT_NOISE_GENERATOR:
 	case ARIZONA_HAPTICS_CONTROL_1:
 	case ARIZONA_HAPTICS_CONTROL_2:
@@ -1176,7 +1165,6 @@
 	case ARIZONA_DAC_DIGITAL_VOLUME_4L:
 	case ARIZONA_OUT_VOLUME_4L:
 	case ARIZONA_NOISE_GATE_SELECT_4L:
-	case ARIZONA_OUTPUT_PATH_CONFIG_4R:
 	case ARIZONA_DAC_DIGITAL_VOLUME_4R:
 	case ARIZONA_OUT_VOLUME_4R:
 	case ARIZONA_NOISE_GATE_SELECT_4R:
@@ -1184,7 +1172,6 @@
 	case ARIZONA_DAC_DIGITAL_VOLUME_5L:
 	case ARIZONA_DAC_VOLUME_LIMIT_5L:
 	case ARIZONA_NOISE_GATE_SELECT_5L:
-	case ARIZONA_OUTPUT_PATH_CONFIG_5R:
 	case ARIZONA_DAC_DIGITAL_VOLUME_5R:
 	case ARIZONA_DAC_VOLUME_LIMIT_5R:
 	case ARIZONA_NOISE_GATE_SELECT_5R:
@@ -1195,8 +1182,6 @@
 	case ARIZONA_NOISE_GATE_CONTROL:
 	case ARIZONA_PDM_SPK1_CTRL_1:
 	case ARIZONA_PDM_SPK1_CTRL_2:
-	case ARIZONA_SPK_CTRL_2:
-	case ARIZONA_SPK_CTRL_3:
 	case ARIZONA_DAC_COMP_1:
 	case ARIZONA_DAC_COMP_2:
 	case ARIZONA_DAC_COMP_3:
@@ -1228,7 +1213,6 @@
 	case ARIZONA_AIF1_FRAME_CTRL_18:
 	case ARIZONA_AIF1_TX_ENABLES:
 	case ARIZONA_AIF1_RX_ENABLES:
-	case ARIZONA_AIF1_FORCE_WRITE:
 	case ARIZONA_AIF2_BCLK_CTRL:
 	case ARIZONA_AIF2_TX_PIN_CTRL:
 	case ARIZONA_AIF2_RX_PIN_CTRL:
@@ -1244,7 +1228,6 @@
 	case ARIZONA_AIF2_FRAME_CTRL_12:
 	case ARIZONA_AIF2_TX_ENABLES:
 	case ARIZONA_AIF2_RX_ENABLES:
-	case ARIZONA_AIF2_FORCE_WRITE:
 	case ARIZONA_AIF3_BCLK_CTRL:
 	case ARIZONA_AIF3_TX_PIN_CTRL:
 	case ARIZONA_AIF3_RX_PIN_CTRL:
@@ -1260,7 +1243,6 @@
 	case ARIZONA_AIF3_FRAME_CTRL_12:
 	case ARIZONA_AIF3_TX_ENABLES:
 	case ARIZONA_AIF3_RX_ENABLES:
-	case ARIZONA_AIF3_FORCE_WRITE:
 	case ARIZONA_SLIMBUS_FRAMER_REF_GEAR:
 	case ARIZONA_SLIMBUS_RATES_1:
 	case ARIZONA_SLIMBUS_RATES_2:
@@ -1586,22 +1568,6 @@
 	case ARIZONA_DRC1RMIX_INPUT_3_VOLUME:
 	case ARIZONA_DRC1RMIX_INPUT_4_SOURCE:
 	case ARIZONA_DRC1RMIX_INPUT_4_VOLUME:
-	case ARIZONA_DRC2LMIX_INPUT_1_SOURCE:
-	case ARIZONA_DRC2LMIX_INPUT_1_VOLUME:
-	case ARIZONA_DRC2LMIX_INPUT_2_SOURCE:
-	case ARIZONA_DRC2LMIX_INPUT_2_VOLUME:
-	case ARIZONA_DRC2LMIX_INPUT_3_SOURCE:
-	case ARIZONA_DRC2LMIX_INPUT_3_VOLUME:
-	case ARIZONA_DRC2LMIX_INPUT_4_SOURCE:
-	case ARIZONA_DRC2LMIX_INPUT_4_VOLUME:
-	case ARIZONA_DRC2RMIX_INPUT_1_SOURCE:
-	case ARIZONA_DRC2RMIX_INPUT_1_VOLUME:
-	case ARIZONA_DRC2RMIX_INPUT_2_SOURCE:
-	case ARIZONA_DRC2RMIX_INPUT_2_VOLUME:
-	case ARIZONA_DRC2RMIX_INPUT_3_SOURCE:
-	case ARIZONA_DRC2RMIX_INPUT_3_VOLUME:
-	case ARIZONA_DRC2RMIX_INPUT_4_SOURCE:
-	case ARIZONA_DRC2RMIX_INPUT_4_VOLUME:
 	case ARIZONA_HPLP1MIX_INPUT_1_SOURCE:
 	case ARIZONA_HPLP1MIX_INPUT_1_VOLUME:
 	case ARIZONA_HPLP1MIX_INPUT_2_SOURCE:
@@ -1810,11 +1776,6 @@
 	case ARIZONA_DRC1_CTRL3:
 	case ARIZONA_DRC1_CTRL4:
 	case ARIZONA_DRC1_CTRL5:
-	case ARIZONA_DRC2_CTRL1:
-	case ARIZONA_DRC2_CTRL2:
-	case ARIZONA_DRC2_CTRL3:
-	case ARIZONA_DRC2_CTRL4:
-	case ARIZONA_DRC2_CTRL5:
 	case ARIZONA_HPLPF1_1:
 	case ARIZONA_HPLPF1_2:
 	case ARIZONA_HPLPF2_1:
@@ -1832,9 +1793,6 @@
 	case ARIZONA_ISRC_2_CTRL_1:
 	case ARIZONA_ISRC_2_CTRL_2:
 	case ARIZONA_ISRC_2_CTRL_3:
-	case ARIZONA_ISRC_3_CTRL_1:
-	case ARIZONA_ISRC_3_CTRL_2:
-	case ARIZONA_ISRC_3_CTRL_3:
 	case ARIZONA_DSP1_CONTROL_1:
 	case ARIZONA_DSP1_CLOCKING_1:
 	case ARIZONA_DSP1_STATUS_1:
@@ -1883,7 +1841,6 @@
 	case ARIZONA_WRITE_SEQUENCER_CTRL_2:
 	case ARIZONA_WRITE_SEQUENCER_CTRL_3:
 	case ARIZONA_OUTPUT_STATUS_1:
-	case ARIZONA_RAW_OUTPUT_STATUS_1:
 	case ARIZONA_SLIMBUS_RX_PORT_STATUS:
 	case ARIZONA_SLIMBUS_TX_PORT_STATUS:
 	case ARIZONA_SAMPLE_RATE_1_STATUS:
@@ -1969,6 +1926,8 @@
 	.reg_bits = 32,
 	.pad_bits = 16,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM5102_MAX_REGISTER,
 	.readable_reg = wm5102_readable_register,
@@ -1983,6 +1942,8 @@
 const struct regmap_config wm5102_i2c_regmap = {
 	.reg_bits = 32,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM5102_MAX_REGISTER,
 	.readable_reg = wm5102_readable_register,
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 12cad94..c4b9374 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -21,7 +21,7 @@
 #define WM5110_NUM_AOD_ISR 2
 #define WM5110_NUM_ISR 5
 
-static const struct reg_default wm5110_reva_patch[] = {
+static const struct reg_sequence wm5110_reva_patch[] = {
 	{ 0x80, 0x3 },
 	{ 0x44, 0x20 },
 	{ 0x45, 0x40 },
@@ -134,7 +134,7 @@
 	{ 0x209, 0x002A },
 };
 
-static const struct reg_default wm5110_revb_patch[] = {
+static const struct reg_sequence wm5110_revb_patch[] = {
 	{ 0x80, 0x3 },
 	{ 0x36e, 0x0210 },
 	{ 0x370, 0x0210 },
@@ -224,7 +224,7 @@
 	{ 0x80, 0x0 },
 };
 
-static const struct reg_default wm5110_revd_patch[] = {
+static const struct reg_sequence wm5110_revd_patch[] = {
 	{ 0x80, 0x3 },
 	{ 0x80, 0x3 },
 	{ 0x393, 0x27 },
@@ -249,6 +249,16 @@
 	{ 0x80, 0x0 },
 };
 
+/* Add extra headphone write sequence locations */
+static const struct reg_default wm5110_reve_patch[] = {
+	{ 0x80, 0x3 },
+	{ 0x80, 0x3 },
+	{ 0x4b, 0x138 },
+	{ 0x4c, 0x13d },
+	{ 0x80, 0x0 },
+	{ 0x80, 0x0 },
+};
+
 /* We use a function so we can use ARRAY_SIZE() */
 int wm5110_patch(struct arizona *arizona)
 {
@@ -266,7 +276,9 @@
 					     wm5110_revd_patch,
 					     ARRAY_SIZE(wm5110_revd_patch));
 	default:
-		return 0;
+		return regmap_register_patch(arizona->regmap,
+					     wm5110_reve_patch,
+					     ARRAY_SIZE(wm5110_reve_patch));
 	}
 }
 EXPORT_SYMBOL_GPL(wm5110_patch);
@@ -676,6 +688,7 @@
 	{ 0x00000032, 0x0100 },    /* R50    - PWM Drive 3 */
 	{ 0x00000040, 0x0000 },    /* R64    - Wake control */
 	{ 0x00000041, 0x0000 },    /* R65    - Sequence control */
+	{ 0x00000042, 0x0000 },    /* R66    - Spare Triggers */
 	{ 0x00000061, 0x01FF },    /* R97    - Sample Rate Sequence Select 1 */
 	{ 0x00000062, 0x01FF },    /* R98    - Sample Rate Sequence Select 2 */
 	{ 0x00000063, 0x01FF },    /* R99    - Sample Rate Sequence Select 3 */
@@ -754,11 +767,9 @@
 	{ 0x0000021A, 0x01A6 },    /* R538   - Mic Bias Ctrl 3 */
 	{ 0x00000293, 0x0000 },    /* R659   - Accessory Detect Mode 1 */
 	{ 0x0000029B, 0x0028 },    /* R667   - Headphone Detect 1 */
-	{ 0x0000029C, 0x0000 },    /* R668   - Headphone Detect 2 */
 	{ 0x000002A2, 0x0000 },    /* R674   - Micd clamp control */
 	{ 0x000002A3, 0x1102 },    /* R675   - Mic Detect 1 */
 	{ 0x000002A4, 0x009F },    /* R676   - Mic Detect 2 */
-	{ 0x000002A5, 0x0000 },    /* R677   - Mic Detect 3 */
 	{ 0x000002A6, 0x3737 },    /* R678   - Mic Detect Level 1 */
 	{ 0x000002A7, 0x372C },    /* R679   - Mic Detect Level 2 */
 	{ 0x000002A8, 0x1422 },    /* R680   - Mic Detect Level 3 */
@@ -848,8 +859,6 @@
 	{ 0x00000440, 0x8FFF },    /* R1088  - DRE Enable */
 	{ 0x00000450, 0x0000 },    /* R1104  - DAC AEC Control 1 */
 	{ 0x00000458, 0x0000 },    /* R1112  - Noise Gate Control */
-	{ 0x00000480, 0x0040 },    /* R1152  - Class W ANC Threshold 1 */
-	{ 0x00000481, 0x0040 },    /* R1153  - Class W ANC Threshold 2 */
 	{ 0x00000490, 0x0069 },    /* R1168  - PDM SPK1 CTRL 1 */
 	{ 0x00000491, 0x0000 },    /* R1169  - PDM SPK1 CTRL 2 */
 	{ 0x00000492, 0x0069 },    /* R1170  - PDM SPK2 CTRL 1 */
@@ -1508,7 +1517,6 @@
 	{ 0x00000D54, 0xFFFF },    /* R3412  - AOD IRQ Mask IRQ2 */
 	{ 0x00000D56, 0x0000 },    /* R3414  - Jack detect debounce */
 	{ 0x00000E00, 0x0000 },    /* R3584  - FX_Ctrl1 */
-	{ 0x00000E01, 0x0000 },    /* R3585  - FX_Ctrl2 */
 	{ 0x00000E10, 0x6318 },    /* R3600  - EQ1_1 */
 	{ 0x00000E11, 0x6300 },    /* R3601  - EQ1_2 */
 	{ 0x00000E12, 0x0FC8 },    /* R3602  - EQ1_3 */
@@ -1625,14 +1633,9 @@
 	{ 0x00000F00, 0x0000 },    /* R3840  - Clock Control */
 	{ 0x00000F01, 0x0000 },    /* R3841  - ANC_SRC */
 	{ 0x00001100, 0x0010 },    /* R4352  - DSP1 Control 1 */
-	{ 0x00001101, 0x0000 },    /* R4353  - DSP1 Clocking 1 */
 	{ 0x00001200, 0x0010 },    /* R4608  - DSP2 Control 1 */
-	{ 0x00001201, 0x0000 },    /* R4609  - DSP2 Clocking 1 */
 	{ 0x00001300, 0x0010 },    /* R4864  - DSP3 Control 1 */
-	{ 0x00001301, 0x0000 },    /* R4865  - DSP3 Clocking 1 */
 	{ 0x00001400, 0x0010 },    /* R5120  - DSP4 Control 1 */
-	{ 0x00001401, 0x0000 },    /* R5121  - DSP4 Clocking 1 */
-	{ 0x00001404, 0x0000 },    /* R5124  - DSP4 Status 1 */
 };
 
 static bool wm5110_is_rev_b_adsp_memory(unsigned int reg)
@@ -1716,6 +1719,7 @@
 	case ARIZONA_PWM_DRIVE_3:
 	case ARIZONA_WAKE_CONTROL:
 	case ARIZONA_SEQUENCE_CONTROL:
+	case ARIZONA_SPARE_TRIGGERS:
 	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1:
 	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2:
 	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3:
@@ -3007,6 +3011,8 @@
 	.reg_bits = 32,
 	.pad_bits = 16,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM5110_MAX_REGISTER,
 	.readable_reg = wm5110_readable_register,
@@ -3021,6 +3027,8 @@
 const struct regmap_config wm5110_i2c_regmap = {
 	.reg_bits = 32,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM5110_MAX_REGISTER,
 	.readable_reg = wm5110_readable_register,
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index a4cbefe..824bcba 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -93,7 +93,6 @@
 static struct i2c_driver wm831x_i2c_driver = {
 	.driver = {
 		.name = "wm831x",
-		.owner = THIS_MODULE,
 		.pm = &wm831x_pm_ops,
 	},
 	.probe = wm831x_i2c_probe,
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index 3da8126..dfea8b9 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -552,14 +552,7 @@
 	irq_set_chip_data(virq, h->host_data);
 	irq_set_chip_and_handler(virq, &wm831x_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-
-	/* ARM needs us to explicitly flag the IRQ as valid
-	 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 6a16a8a..9358f03 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -69,7 +69,6 @@
 static struct i2c_driver wm8350_i2c_driver = {
 	.driver = {
 		   .name = "wm8350",
-		   .owner = THIS_MODULE,
 	},
 	.probe = wm8350_i2c_probe,
 	.remove = wm8350_i2c_remove,
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index 813ff50..27054f3 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -526,13 +526,7 @@
 					 handle_edge_irq);
 		irq_set_nested_thread(cur_irq, 1);
 
-		/* ARM needs us to explicitly flag the IRQ as valid
-		 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-		set_irq_flags(cur_irq, IRQF_VALID);
-#else
-		irq_set_noprobe(cur_irq);
-#endif
+		irq_clear_status_flags(cur_irq, IRQ_NOREQUEST | IRQ_NOPROBE);
 	}
 
 	ret = request_threaded_irq(irq, NULL, wm8350_irq, flags,
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index c6fb5d1..3bd44a4 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -194,7 +194,6 @@
 static struct i2c_driver wm8400_i2c_driver = {
 	.driver = {
 		.name = "WM8400",
-		.owner = THIS_MODULE,
 	},
 	.probe    = wm8400_i2c_probe,
 	.remove   = wm8400_i2c_remove,
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 53ae5af..7eec619 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -243,21 +243,21 @@
 }
 #endif
 
-static const struct reg_default wm8994_revc_patch[] = {
+static const struct reg_sequence wm8994_revc_patch[] = {
 	{ 0x102, 0x3 },
 	{ 0x56, 0x3 },
 	{ 0x817, 0x0 },
 	{ 0x102, 0x0 },
 };
 
-static const struct reg_default wm8958_reva_patch[] = {
+static const struct reg_sequence wm8958_reva_patch[] = {
 	{ 0x102, 0x3 },
 	{ 0xcb, 0x81 },
 	{ 0x817, 0x0 },
 	{ 0x102, 0x0 },
 };
 
-static const struct reg_default wm1811_reva_patch[] = {
+static const struct reg_sequence wm1811_reva_patch[] = {
 	{ 0x102, 0x3 },
 	{ 0x56, 0xc07 },
 	{ 0x5d, 0x7e },
@@ -326,7 +326,7 @@
 {
 	struct wm8994_pdata *pdata;
 	struct regmap_config *regmap_config;
-	const struct reg_default *regmap_patch = NULL;
+	const struct reg_sequence *regmap_patch = NULL;
 	const char *devname;
 	int ret, i, patch_regs = 0;
 	int pulls = 0;
@@ -677,7 +677,6 @@
 static struct i2c_driver wm8994_i2c_driver = {
 	.driver = {
 		.name = "wm8994",
-		.owner = THIS_MODULE,
 		.pm = &wm8994_pm_ops,
 		.of_match_table = of_match_ptr(wm8994_of_match),
 	},
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index 55c380a..18710f3 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -172,14 +172,7 @@
 	irq_set_chip_data(virq, wm8994);
 	irq_set_chip_and_handler(virq, &wm8994_edge_irq_chip, handle_edge_irq);
 	irq_set_nested_thread(virq, 1);
-
-	/* ARM needs us to explicitly flag the IRQ as valid
-	 * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
-	set_irq_flags(virq, IRQF_VALID);
-#else
 	irq_set_noprobe(virq);
-#endif
 
 	return 0;
 }
@@ -193,7 +186,7 @@
 {
 	int ret;
 	unsigned long irqflags;
-	struct wm8994_pdata *pdata = dev_get_platdata(wm8994->dev);
+	struct wm8994_pdata *pdata = &wm8994->pdata;
 
 	if (!wm8994->irq) {
 		dev_warn(wm8994->dev,
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
index 300e9b6..c56b160 100644
--- a/drivers/mfd/wm8994-regmap.c
+++ b/drivers/mfd/wm8994-regmap.c
@@ -19,7 +19,7 @@
 
 #include "wm8994.h"
 
-static struct reg_default wm1811_defaults[] = {
+static const struct reg_default wm1811_defaults[] = {
 	{ 0x0001, 0x0000 },    /* R1    - Power Management (1) */
 	{ 0x0002, 0x6000 },    /* R2    - Power Management (2) */
 	{ 0x0003, 0x0000 },    /* R3    - Power Management (3) */
@@ -251,7 +251,7 @@
 	{ 0x0748, 0x003F },    /* R1864 - IRQ Debounce */
 };
 
-static struct reg_default wm8994_defaults[] = {
+static const struct reg_default wm8994_defaults[] = {
 	{ 0x0001, 0x0000 },    /* R1     - Power Management (1) */ 
 	{ 0x0002, 0x6000 },    /* R2     - Power Management (2) */ 
 	{ 0x0003, 0x0000 },    /* R3     - Power Management (3) */ 
@@ -470,7 +470,7 @@
 	{ 0x0748, 0x003F },    /* R1864  - IRQ Debounce */ 
 };
 
-static struct reg_default wm8958_defaults[] = {
+static const struct reg_default wm8958_defaults[] = {
 	{ 0x0001, 0x0000 },    /* R1     - Power Management (1) */
 	{ 0x0002, 0x6000 },    /* R2     - Power Management (2) */
 	{ 0x0003, 0x0000 },    /* R3     - Power Management (3) */
diff --git a/drivers/mfd/wm8997-tables.c b/drivers/mfd/wm8997-tables.c
index c0c25d75..ca41a56 100644
--- a/drivers/mfd/wm8997-tables.c
+++ b/drivers/mfd/wm8997-tables.c
@@ -17,7 +17,7 @@
 
 #include "arizona.h"
 
-static const struct reg_default wm8997_reva_patch[] = {
+static const struct reg_sequence wm8997_reva_patch[] = {
 	{ 0x80, 0x0003 },
 	{ 0x214, 0x0008 },
 	{ 0x458, 0x0000 },
@@ -243,7 +243,6 @@
 	{ 0x0000029B, 0x0020 },    /* R667   - Headphone Detect 1 */
 	{ 0x000002A3, 0x1102 },    /* R675   - Mic Detect 1 */
 	{ 0x000002A4, 0x009F },    /* R676   - Mic Detect 2 */
-	{ 0x000002A5, 0x0000 },    /* R677   - Mic Detect 3 */
 	{ 0x000002C3, 0x0000 },    /* R707   - Mic noise mix control 1 */
 	{ 0x000002CB, 0x0000 },    /* R715   - Isolation control */
 	{ 0x000002D3, 0x0000 },    /* R723   - Jack detect analogue */
@@ -684,7 +683,6 @@
 	{ 0x00000D54, 0xFFFF },    /* R3412  - AOD IRQ Mask IRQ2 */
 	{ 0x00000D56, 0x0000 },    /* R3414  - Jack detect debounce */
 	{ 0x00000E00, 0x0000 },    /* R3584  - FX_Ctrl1 */
-	{ 0x00000E01, 0x0000 },    /* R3585  - FX_Ctrl2 */
 	{ 0x00000E10, 0x6318 },    /* R3600  - EQ1_1 */
 	{ 0x00000E11, 0x6300 },    /* R3601  - EQ1_2 */
 	{ 0x00000E12, 0x0FC8 },    /* R3602  - EQ1_3 */
@@ -788,8 +786,6 @@
 	{ 0x00000EF3, 0x0000 },    /* R3827  - ISRC 2 CTRL 1 */
 	{ 0x00000EF4, 0x0000 },    /* R3828  - ISRC 2 CTRL 2 */
 	{ 0x00000EF5, 0x0000 },    /* R3829  - ISRC 2 CTRL 3 */
-	{ 0x00001100, 0x0010 },    /* R4352  - DSP1 Control 1 */
-	{ 0x00001101, 0x0000 },    /* R4353  - DSP1 Clocking 1 */
 };
 
 static bool wm8997_readable_register(struct device *dev, unsigned int reg)
@@ -1480,6 +1476,8 @@
 	case ARIZONA_SAMPLE_RATE_2_STATUS:
 	case ARIZONA_SAMPLE_RATE_3_STATUS:
 	case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_FLL1_NCO_TEST_0:
+	case ARIZONA_FLL2_NCO_TEST_0:
 	case ARIZONA_MIC_DETECT_3:
 	case ARIZONA_HP_CTRL_1L:
 	case ARIZONA_HP_CTRL_1R:
@@ -1521,6 +1519,8 @@
 const struct regmap_config wm8997_i2c_regmap = {
 	.reg_bits = 32,
 	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
 
 	.max_register = WM8997_MAX_REGISTER,
 	.readable_reg = wm8997_readable_register,
diff --git a/drivers/mfd/wm8998-tables.c b/drivers/mfd/wm8998-tables.c
new file mode 100644
index 0000000..e6de3cd
--- /dev/null
+++ b/drivers/mfd/wm8998-tables.c
@@ -0,0 +1,1594 @@
+/*
+ * wm8998-tables.c  --  data tables for wm8998-class codecs
+ *
+ * Copyright 2014 Wolfson Microelectronics plc
+ *
+ * Author: Richard Fitzgerald <rf@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/registers.h>
+#include <linux/device.h>
+
+#include "arizona.h"
+
+#define WM8998_NUM_AOD_ISR 2
+#define WM8998_NUM_ISR 5
+
+static const struct reg_default wm8998_rev_a_patch[] = {
+	{ 0x0212, 0x0000 },
+	{ 0x0211, 0x0014 },
+	{ 0x04E4, 0x0E0D },
+	{ 0x04E5, 0x0E0D },
+	{ 0x04E6, 0x0E0D },
+	{ 0x04EB, 0x060E },
+	{ 0x0441, 0xC759 },
+	{ 0x0442, 0x2A08 },
+	{ 0x0443, 0x5CFA },
+	{ 0x026E, 0x0064 },
+	{ 0x026F, 0x00EA },
+	{ 0x0270, 0x1F16 },
+	{ 0x0410, 0x2080 },
+	{ 0x0418, 0x2080 },
+	{ 0x0420, 0x2080 },
+	{ 0x04B8, 0x1120 },
+	{ 0x047E, 0x080E },
+	{ 0x0448, 0x03EF },
+};
+
+/* We use a function so we can use ARRAY_SIZE() */
+int wm8998_patch(struct arizona *arizona)
+{
+	return regmap_register_patch(arizona->regmap,
+				     wm8998_rev_a_patch,
+				     ARRAY_SIZE(wm8998_rev_a_patch));
+}
+
+static const struct regmap_irq wm8998_aod_irqs[ARIZONA_NUM_IRQ] = {
+	[ARIZONA_IRQ_MICD_CLAMP_FALL] = {
+		.mask = ARIZONA_MICD_CLAMP_FALL_EINT1
+	},
+	[ARIZONA_IRQ_MICD_CLAMP_RISE] = {
+		.mask = ARIZONA_MICD_CLAMP_RISE_EINT1
+	},
+	[ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
+	[ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
+	[ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
+	[ARIZONA_IRQ_JD_RISE] = { .mask = ARIZONA_JD1_RISE_EINT1 },
+};
+
+struct regmap_irq_chip wm8998_aod = {
+	.name = "wm8998 AOD",
+	.status_base = ARIZONA_AOD_IRQ1,
+	.mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
+	.ack_base = ARIZONA_AOD_IRQ1,
+	.wake_base = ARIZONA_WAKE_CONTROL,
+	.wake_invert = 1,
+	.num_regs = 1,
+	.irqs = wm8998_aod_irqs,
+	.num_irqs = ARRAY_SIZE(wm8998_aod_irqs),
+};
+
+static const struct regmap_irq wm8998_irqs[ARIZONA_NUM_IRQ] = {
+	[ARIZONA_IRQ_GP4] = { .reg_offset = 0, .mask = ARIZONA_GP4_EINT1 },
+	[ARIZONA_IRQ_GP3] = { .reg_offset = 0, .mask = ARIZONA_GP3_EINT1 },
+	[ARIZONA_IRQ_GP2] = { .reg_offset = 0, .mask = ARIZONA_GP2_EINT1 },
+	[ARIZONA_IRQ_GP1] = { .reg_offset = 0, .mask = ARIZONA_GP1_EINT1 },
+
+	[ARIZONA_IRQ_SPK_OVERHEAT_WARN] = {
+		.reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_WARN_EINT1
+	},
+	[ARIZONA_IRQ_SPK_OVERHEAT] = {
+		.reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_EINT1
+	},
+	[ARIZONA_IRQ_HPDET] = {
+		.reg_offset = 2, .mask = ARIZONA_HPDET_EINT1
+	},
+	[ARIZONA_IRQ_MICDET] = {
+		.reg_offset = 2, .mask = ARIZONA_MICDET_EINT1
+	},
+	[ARIZONA_IRQ_WSEQ_DONE] = {
+		.reg_offset = 2, .mask = ARIZONA_WSEQ_DONE_EINT1
+	},
+	[ARIZONA_IRQ_DRC1_SIG_DET] = {
+		.reg_offset = 2, .mask = ARIZONA_DRC1_SIG_DET_EINT1
+	},
+	[ARIZONA_IRQ_ASRC2_LOCK] = {
+		.reg_offset = 2, .mask = ARIZONA_ASRC2_LOCK_EINT1
+	},
+	[ARIZONA_IRQ_ASRC1_LOCK] = {
+		.reg_offset = 2, .mask = ARIZONA_ASRC1_LOCK_EINT1
+	},
+	[ARIZONA_IRQ_UNDERCLOCKED] = {
+		.reg_offset = 2, .mask = ARIZONA_UNDERCLOCKED_EINT1
+	},
+	[ARIZONA_IRQ_OVERCLOCKED] = {
+		.reg_offset = 2, .mask = ARIZONA_OVERCLOCKED_EINT1
+	},
+	[ARIZONA_IRQ_FLL2_LOCK] = {
+		.reg_offset = 2, .mask = ARIZONA_FLL2_LOCK_EINT1
+	},
+	[ARIZONA_IRQ_FLL1_LOCK] = {
+		.reg_offset = 2, .mask = ARIZONA_FLL1_LOCK_EINT1
+	},
+	[ARIZONA_IRQ_CLKGEN_ERR] = {
+		.reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_EINT1
+	},
+	[ARIZONA_IRQ_CLKGEN_ERR_ASYNC] = {
+		.reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_ASYNC_EINT1
+	},
+
+	[ARIZONA_IRQ_ASRC_CFG_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_ASRC_CFG_ERR_EINT1
+	},
+	[ARIZONA_IRQ_AIF3_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_AIF3_ERR_EINT1
+	},
+	[ARIZONA_IRQ_AIF2_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_AIF2_ERR_EINT1
+	},
+	[ARIZONA_IRQ_AIF1_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_AIF1_ERR_EINT1
+	},
+	[ARIZONA_IRQ_CTRLIF_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_CTRLIF_ERR_EINT1
+	},
+	[ARIZONA_IRQ_MIXER_DROPPED_SAMPLES] = {
+		.reg_offset = 3, .mask = ARIZONA_MIXER_DROPPED_SAMPLE_EINT1
+	},
+	[ARIZONA_IRQ_ASYNC_CLK_ENA_LOW] = {
+		.reg_offset = 3, .mask = ARIZONA_ASYNC_CLK_ENA_LOW_EINT1
+	},
+	[ARIZONA_IRQ_SYSCLK_ENA_LOW] = {
+		.reg_offset = 3, .mask = ARIZONA_SYSCLK_ENA_LOW_EINT1
+	},
+	[ARIZONA_IRQ_ISRC1_CFG_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_ISRC1_CFG_ERR_EINT1
+	},
+	[ARIZONA_IRQ_ISRC2_CFG_ERR] = {
+		.reg_offset = 3, .mask = ARIZONA_ISRC2_CFG_ERR_EINT1
+	},
+
+	[ARIZONA_IRQ_BOOT_DONE] = {
+		.reg_offset = 4, .mask = ARIZONA_BOOT_DONE_EINT1
+	},
+	[ARIZONA_IRQ_FLL2_CLOCK_OK] = {
+		.reg_offset = 4, .mask = ARIZONA_FLL2_CLOCK_OK_EINT1
+	},
+	[ARIZONA_IRQ_FLL1_CLOCK_OK] = {
+		.reg_offset = 4, .mask = ARIZONA_FLL1_CLOCK_OK_EINT1
+	},
+};
+
+struct regmap_irq_chip wm8998_irq = {
+	.name = "wm8998 IRQ",
+	.status_base = ARIZONA_INTERRUPT_STATUS_1,
+	.mask_base = ARIZONA_INTERRUPT_STATUS_1_MASK,
+	.ack_base = ARIZONA_INTERRUPT_STATUS_1,
+	.num_regs = 5,
+	.irqs = wm8998_irqs,
+	.num_irqs = ARRAY_SIZE(wm8998_irqs),
+};
+
+static const struct reg_default wm8998_reg_default[] = {
+	{ 0x00000009, 0x0001 },    /* R9     - Ctrl IF I2C1 CFG 1 */
+	{ 0x0000000B, 0x001A },    /* R11    - Ctrl IF I2C1 CFG 2 */
+	{ 0x00000020, 0x0000 },    /* R32    - Tone Generator 1 */
+	{ 0x00000021, 0x1000 },    /* R33    - Tone Generator 2 */
+	{ 0x00000022, 0x0000 },    /* R34    - Tone Generator 3 */
+	{ 0x00000023, 0x1000 },    /* R35    - Tone Generator 4 */
+	{ 0x00000024, 0x0000 },    /* R36    - Tone Generator 5 */
+	{ 0x00000030, 0x0000 },    /* R48    - PWM Drive 1 */
+	{ 0x00000031, 0x0100 },    /* R49    - PWM Drive 2 */
+	{ 0x00000032, 0x0100 },    /* R50    - PWM Drive 3 */
+	{ 0x00000040, 0x0000 },    /* R64    - Wake control */
+	{ 0x00000041, 0x0000 },    /* R65    - Sequence control */
+	{ 0x00000061, 0x01FF },    /* R97    - Sample Rate Sequence Select 1 */
+	{ 0x00000062, 0x01FF },    /* R98    - Sample Rate Sequence Select 2 */
+	{ 0x00000063, 0x01FF },    /* R99    - Sample Rate Sequence Select 3 */
+	{ 0x00000064, 0x01FF },    /* R100   - Sample Rate Sequence Select 4 */
+	{ 0x00000066, 0x01FF },    /* R102   - Always On Triggers Sequence Select 1 */
+	{ 0x00000067, 0x01FF },    /* R103   - Always On Triggers Sequence Select 2 */
+	{ 0x00000068, 0x01FF },    /* R104   - Always On Triggers Sequence Select 3 */
+	{ 0x00000069, 0x01FF },    /* R105   - Always On Triggers Sequence Select 4 */
+	{ 0x0000006A, 0x01FF },    /* R106   - Always On Triggers Sequence Select 5 */
+	{ 0x0000006B, 0x01FF },    /* R107   - Always On Triggers Sequence Select 6 */
+	{ 0x0000006E, 0x01FF },    /* R110   - Trigger Sequence Select 32 */
+	{ 0x0000006F, 0x01FF },    /* R111   - Trigger Sequence Select 33 */
+	{ 0x00000090, 0x0000 },    /* R144   - Haptics Control 1 */
+	{ 0x00000091, 0x7FFF },    /* R145   - Haptics Control 2 */
+	{ 0x00000092, 0x0000 },    /* R146   - Haptics phase 1 intensity */
+	{ 0x00000093, 0x0000 },    /* R147   - Haptics phase 1 duration */
+	{ 0x00000094, 0x0000 },    /* R148   - Haptics phase 2 intensity */
+	{ 0x00000095, 0x0000 },    /* R149   - Haptics phase 2 duration */
+	{ 0x00000096, 0x0000 },    /* R150   - Haptics phase 3 intensity */
+	{ 0x00000097, 0x0000 },    /* R151   - Haptics phase 3 duration */
+	{ 0x00000100, 0x0002 },    /* R256   - Clock 32k 1 */
+	{ 0x00000101, 0x0304 },    /* R257   - System Clock 1 */
+	{ 0x00000102, 0x0011 },    /* R258   - Sample rate 1 */
+	{ 0x00000103, 0x0011 },    /* R259   - Sample rate 2 */
+	{ 0x00000104, 0x0011 },    /* R260   - Sample rate 3 */
+	{ 0x00000112, 0x0305 },    /* R274   - Async clock 1 */
+	{ 0x00000113, 0x0011 },    /* R275   - Async sample rate 1 */
+	{ 0x00000114, 0x0011 },    /* R276   - Async sample rate 2 */
+	{ 0x00000149, 0x0000 },    /* R329   - Output system clock */
+	{ 0x0000014A, 0x0000 },    /* R330   - Output async clock */
+	{ 0x00000152, 0x0000 },    /* R338   - Rate Estimator 1 */
+	{ 0x00000153, 0x0000 },    /* R339   - Rate Estimator 2 */
+	{ 0x00000154, 0x0000 },    /* R340   - Rate Estimator 3 */
+	{ 0x00000155, 0x0000 },    /* R341   - Rate Estimator 4 */
+	{ 0x00000156, 0x0000 },    /* R342   - Rate Estimator 5 */
+	{ 0x00000161, 0x0000 },    /* R353   - Dynamic Frequency Scaling 1 */
+	{ 0x00000171, 0x0002 },    /* R369   - FLL1 Control 1 */
+	{ 0x00000172, 0x0008 },    /* R370   - FLL1 Control 2 */
+	{ 0x00000173, 0x0018 },    /* R371   - FLL1 Control 3 */
+	{ 0x00000174, 0x007D },    /* R372   - FLL1 Control 4 */
+	{ 0x00000175, 0x0004 },    /* R373   - FLL1 Control 5 */
+	{ 0x00000176, 0x0000 },    /* R374   - FLL1 Control 6 */
+	{ 0x00000177, 0x0181 },    /* R375   - FLL1 Loop Filter Test 1 */
+	{ 0x00000178, 0x0000 },    /* R376   - FLL1 NCO Test 0 */
+	{ 0x00000179, 0x0000 },    /* R377   - FLL1 Control 7 */
+	{ 0x00000181, 0x0000 },    /* R385   - FLL1 Synchroniser 1 */
+	{ 0x00000182, 0x0000 },    /* R386   - FLL1 Synchroniser 2 */
+	{ 0x00000183, 0x0000 },    /* R387   - FLL1 Synchroniser 3 */
+	{ 0x00000184, 0x0000 },    /* R388   - FLL1 Synchroniser 4 */
+	{ 0x00000185, 0x0000 },    /* R389   - FLL1 Synchroniser 5 */
+	{ 0x00000186, 0x0000 },    /* R390   - FLL1 Synchroniser 6 */
+	{ 0x00000187, 0x0001 },    /* R391   - FLL1 Synchroniser 7 */
+	{ 0x00000189, 0x0000 },    /* R393   - FLL1 Spread Spectrum */
+	{ 0x0000018A, 0x0004 },    /* R394   - FLL1 GPIO Clock */
+	{ 0x00000191, 0x0000 },    /* R401   - FLL2 Control 1 */
+	{ 0x00000192, 0x0008 },    /* R402   - FLL2 Control 2 */
+	{ 0x00000193, 0x0018 },    /* R403   - FLL2 Control 3 */
+	{ 0x00000194, 0x007D },    /* R404   - FLL2 Control 4 */
+	{ 0x00000195, 0x0004 },    /* R405   - FLL2 Control 5 */
+	{ 0x00000196, 0x0000 },    /* R406   - FLL2 Control 6 */
+	{ 0x00000197, 0x0000 },    /* R407   - FLL2 Loop Filter Test 1 */
+	{ 0x00000198, 0x0000 },    /* R408   - FLL2 NCO Test 0 */
+	{ 0x00000199, 0x0000 },    /* R409   - FLL2 Control 7 */
+	{ 0x000001A1, 0x0000 },    /* R417   - FLL2 Synchroniser 1 */
+	{ 0x000001A2, 0x0000 },    /* R418   - FLL2 Synchroniser 2 */
+	{ 0x000001A3, 0x0000 },    /* R419   - FLL2 Synchroniser 3 */
+	{ 0x000001A4, 0x0000 },    /* R420   - FLL2 Synchroniser 4 */
+	{ 0x000001A5, 0x0000 },    /* R421   - FLL2 Synchroniser 5 */
+	{ 0x000001A6, 0x0000 },    /* R422   - FLL2 Synchroniser 6 */
+	{ 0x000001A7, 0x0001 },    /* R423   - FLL2 Synchroniser 7 */
+	{ 0x000001A9, 0x0000 },    /* R425   - FLL2 Spread Spectrum */
+	{ 0x000001AA, 0x0004 },    /* R426   - FLL2 GPIO Clock */
+	{ 0x00000200, 0x0006 },    /* R512   - Mic Charge Pump 1 */
+	{ 0x00000210, 0x00D4 },    /* R528   - LDO1 Control 1 */
+	{ 0x00000212, 0x0000 },    /* R530   - LDO1 Control 2 */
+	{ 0x00000213, 0x0344 },    /* R531   - LDO2 Control 1 */
+	{ 0x00000218, 0x01A6 },    /* R536   - Mic Bias Ctrl 1 */
+	{ 0x00000219, 0x01A6 },    /* R537   - Mic Bias Ctrl 2 */
+	{ 0x0000021A, 0x01A6 },    /* R538   - Mic Bias Ctrl 3 */
+	{ 0x00000293, 0x0080 },    /* R659   - Accessory Detect Mode 1 */
+	{ 0x0000029B, 0x0000 },    /* R667   - Headphone Detect 1 */
+	{ 0x0000029C, 0x0000 },    /* R668   - Headphone Detect 2 */
+	{ 0x000002A2, 0x0000 },    /* R674   - Micd Clamp control */
+	{ 0x000002A3, 0x1102 },    /* R675   - Mic Detect 1 */
+	{ 0x000002A4, 0x009F },    /* R676   - Mic Detect 2 */
+	{ 0x000002A5, 0x0000 },    /* R677   - Mic Detect 3 */
+	{ 0x000002A6, 0x3737 },    /* R678   - Mic Detect Level 1 */
+	{ 0x000002A7, 0x2C37 },    /* R679   - Mic Detect Level 2 */
+	{ 0x000002A8, 0x1422 },    /* R680   - Mic Detect Level 3 */
+	{ 0x000002A9, 0x030A },    /* R681   - Mic Detect Level 4 */
+	{ 0x000002AB, 0x0000 },    /* R683   - Mic Detect 4 */
+	{ 0x000002CB, 0x0000 },    /* R715   - Isolation control */
+	{ 0x000002D3, 0x0000 },    /* R723   - Jack detect analogue */
+	{ 0x00000300, 0x0000 },    /* R768   - Input Enables */
+	{ 0x00000308, 0x0000 },    /* R776   - Input Rate */
+	{ 0x00000309, 0x0022 },    /* R777   - Input Volume Ramp */
+	{ 0x0000030C, 0x0002 },    /* R780   - HPF Control */
+	{ 0x00000310, 0x2080 },    /* R784   - IN1L Control */
+	{ 0x00000311, 0x0180 },    /* R785   - ADC Digital Volume 1L */
+	{ 0x00000312, 0x0000 },    /* R786   - DMIC1L Control */
+	{ 0x00000314, 0x0080 },    /* R788   - IN1R Control */
+	{ 0x00000315, 0x0180 },    /* R789   - ADC Digital Volume 1R */
+	{ 0x00000316, 0x0000 },    /* R790   - DMIC1R Control */
+	{ 0x00000318, 0x2080 },    /* R792   - IN2L Control */
+	{ 0x00000319, 0x0180 },    /* R793   - ADC Digital Volume 2L */
+	{ 0x0000031A, 0x0000 },    /* R794   - DMIC2L Control */
+	{ 0x00000400, 0x0000 },    /* R1024  - Output Enables 1 */
+	{ 0x00000408, 0x0000 },    /* R1032  - Output Rate 1 */
+	{ 0x00000409, 0x0022 },    /* R1033  - Output Volume Ramp */
+	{ 0x00000410, 0x2080 },    /* R1040  - Output Path Config 1L */
+	{ 0x00000411, 0x0180 },    /* R1041  - DAC Digital Volume 1L */
+	{ 0x00000413, 0x0001 },    /* R1043  - Noise Gate Select 1L */
+	{ 0x00000414, 0x0080 },    /* R1044  - Output Path Config 1R */
+	{ 0x00000415, 0x0180 },    /* R1045  - DAC Digital Volume 1R */
+	{ 0x00000417, 0x0002 },    /* R1047  - Noise Gate Select 1R */
+	{ 0x00000418, 0x2080 },    /* R1048  - Output Path Config 2L */
+	{ 0x00000419, 0x0180 },    /* R1049  - DAC Digital Volume 2L */
+	{ 0x0000041B, 0x0004 },    /* R1051  - Noise Gate Select 2L */
+	{ 0x0000041C, 0x0080 },    /* R1052  - Output Path Config 2R */
+	{ 0x0000041D, 0x0180 },    /* R1053  - DAC Digital Volume 2R */
+	{ 0x0000041F, 0x0008 },    /* R1055  - Noise Gate Select 2R */
+	{ 0x00000420, 0x2080 },    /* R1056  - Output Path Config 3L */
+	{ 0x00000421, 0x0180 },    /* R1057  - DAC Digital Volume 3L */
+	{ 0x00000423, 0x0010 },    /* R1059  - Noise Gate Select 3L */
+	{ 0x00000428, 0x0000 },    /* R1064  - Output Path Config 4L */
+	{ 0x00000429, 0x0180 },    /* R1065  - DAC Digital Volume 4L */
+	{ 0x0000042B, 0x0040 },    /* R1067  - Noise Gate Select 4L */
+	{ 0x0000042C, 0x0000 },    /* R1068  - Output Path Config 4R */
+	{ 0x0000042D, 0x0180 },    /* R1069  - DAC Digital Volume 4R */
+	{ 0x0000042F, 0x0080 },    /* R1071  - Noise Gate Select 4R */
+	{ 0x00000430, 0x0000 },    /* R1072  - Output Path Config 5L */
+	{ 0x00000431, 0x0180 },    /* R1073  - DAC Digital Volume 5L */
+	{ 0x00000433, 0x0100 },    /* R1075  - Noise Gate Select 5L */
+	{ 0x00000434, 0x0000 },    /* R1076  - Output Path Config 5R */
+	{ 0x00000435, 0x0180 },    /* R1077  - DAC Digital Volume 5R */
+	{ 0x00000437, 0x0200 },    /* R1079  - Noise Gate Select 5R */
+	{ 0x00000440, 0x8FFF },    /* R1088  - DRE Enable */
+	{ 0x00000441, 0xC759 },    /* R1089  - DRE Control 1 */
+	{ 0x00000442, 0x2A08 },    /* R1089  - DRE Control 2 */
+	{ 0x00000443, 0x5CFA },    /* R1089  - DRE Control 3 */
+	{ 0x00000448, 0x03EF },    /* R1096  - EDRE Enable */
+	{ 0x00000450, 0x0000 },    /* R1104  - DAC AEC Control 1 */
+	{ 0x00000451, 0x0000 },    /* R1105  - DAC AEC Control 2 */
+	{ 0x00000458, 0x0000 },    /* R1112  - Noise Gate Control */
+	{ 0x00000490, 0x0069 },    /* R1168  - PDM SPK1 CTRL 1 */
+	{ 0x00000491, 0x0000 },    /* R1169  - PDM SPK1 CTRL 2 */
+	{ 0x0000049A, 0x0000 },    /* R1178  - HP_TEST_CTRL_13 */
+	{ 0x00000500, 0x000C },    /* R1280  - AIF1 BCLK Ctrl */
+	{ 0x00000501, 0x0008 },    /* R1281  - AIF1 Tx Pin Ctrl */
+	{ 0x00000502, 0x0000 },    /* R1282  - AIF1 Rx Pin Ctrl */
+	{ 0x00000503, 0x0000 },    /* R1283  - AIF1 Rate Ctrl */
+	{ 0x00000504, 0x0000 },    /* R1284  - AIF1 Format */
+	{ 0x00000506, 0x0040 },    /* R1286  - AIF1 Rx BCLK Rate */
+	{ 0x00000507, 0x1818 },    /* R1287  - AIF1 Frame Ctrl 1 */
+	{ 0x00000508, 0x1818 },    /* R1288  - AIF1 Frame Ctrl 2 */
+	{ 0x00000509, 0x0000 },    /* R1289  - AIF1 Frame Ctrl 3 */
+	{ 0x0000050A, 0x0001 },    /* R1290  - AIF1 Frame Ctrl 4 */
+	{ 0x0000050B, 0x0002 },    /* R1291  - AIF1 Frame Ctrl 5 */
+	{ 0x0000050C, 0x0003 },    /* R1292  - AIF1 Frame Ctrl 6 */
+	{ 0x0000050D, 0x0004 },    /* R1293  - AIF1 Frame Ctrl 7 */
+	{ 0x0000050E, 0x0005 },    /* R1294  - AIF1 Frame Ctrl 8 */
+	{ 0x00000511, 0x0000 },    /* R1297  - AIF1 Frame Ctrl 11 */
+	{ 0x00000512, 0x0001 },    /* R1298  - AIF1 Frame Ctrl 12 */
+	{ 0x00000513, 0x0002 },    /* R1299  - AIF1 Frame Ctrl 13 */
+	{ 0x00000514, 0x0003 },    /* R1300  - AIF1 Frame Ctrl 14 */
+	{ 0x00000515, 0x0004 },    /* R1301  - AIF1 Frame Ctrl 15 */
+	{ 0x00000516, 0x0005 },    /* R1302  - AIF1 Frame Ctrl 16 */
+	{ 0x00000519, 0x0000 },    /* R1305  - AIF1 Tx Enables */
+	{ 0x0000051A, 0x0000 },    /* R1306  - AIF1 Rx Enables */
+	{ 0x00000540, 0x000C },    /* R1344  - AIF2 BCLK Ctrl */
+	{ 0x00000541, 0x0008 },    /* R1345  - AIF2 Tx Pin Ctrl */
+	{ 0x00000542, 0x0000 },    /* R1346  - AIF2 Rx Pin Ctrl */
+	{ 0x00000543, 0x0000 },    /* R1347  - AIF2 Rate Ctrl */
+	{ 0x00000544, 0x0000 },    /* R1348  - AIF2 Format */
+	{ 0x00000546, 0x0040 },    /* R1350  - AIF2 Rx BCLK Rate */
+	{ 0x00000547, 0x1818 },    /* R1351  - AIF2 Frame Ctrl 1 */
+	{ 0x00000548, 0x1818 },    /* R1352  - AIF2 Frame Ctrl 2 */
+	{ 0x00000549, 0x0000 },    /* R1353  - AIF2 Frame Ctrl 3 */
+	{ 0x0000054A, 0x0001 },    /* R1354  - AIF2 Frame Ctrl 4 */
+	{ 0x0000054B, 0x0002 },    /* R1355  - AIF2 Frame Ctrl 5 */
+	{ 0x0000054C, 0x0003 },    /* R1356  - AIF2 Frame Ctrl 6 */
+	{ 0x0000054D, 0x0004 },    /* R1357  - AIF2 Frame Ctrl 7 */
+	{ 0x0000054E, 0x0005 },    /* R1358  - AIF2 Frame Ctrl 8 */
+	{ 0x00000551, 0x0000 },    /* R1361  - AIF2 Frame Ctrl 11 */
+	{ 0x00000552, 0x0001 },    /* R1362  - AIF2 Frame Ctrl 12 */
+	{ 0x00000553, 0x0002 },    /* R1363  - AIF2 Frame Ctrl 13 */
+	{ 0x00000554, 0x0003 },    /* R1364  - AIF2 Frame Ctrl 14 */
+	{ 0x00000555, 0x0004 },    /* R1365  - AIF2 Frame Ctrl 15 */
+	{ 0x00000556, 0x0005 },    /* R1366  - AIF2 Frame Ctrl 16 */
+	{ 0x00000559, 0x0000 },    /* R1369  - AIF2 Tx Enables */
+	{ 0x0000055A, 0x0000 },    /* R1370  - AIF2 Rx Enables */
+	{ 0x00000580, 0x000C },    /* R1408  - AIF3 BCLK Ctrl */
+	{ 0x00000581, 0x0008 },    /* R1409  - AIF3 Tx Pin Ctrl */
+	{ 0x00000582, 0x0000 },    /* R1410  - AIF3 Rx Pin Ctrl */
+	{ 0x00000583, 0x0000 },    /* R1411  - AIF3 Rate Ctrl */
+	{ 0x00000584, 0x0000 },    /* R1412  - AIF3 Format */
+	{ 0x00000586, 0x0040 },    /* R1414  - AIF3 Rx BCLK Rate */
+	{ 0x00000587, 0x1818 },    /* R1415  - AIF3 Frame Ctrl 1 */
+	{ 0x00000588, 0x1818 },    /* R1416  - AIF3 Frame Ctrl 2 */
+	{ 0x00000589, 0x0000 },    /* R1417  - AIF3 Frame Ctrl 3 */
+	{ 0x0000058A, 0x0001 },    /* R1418  - AIF3 Frame Ctrl 4 */
+	{ 0x00000591, 0x0000 },    /* R1425  - AIF3 Frame Ctrl 11 */
+	{ 0x00000592, 0x0001 },    /* R1426  - AIF3 Frame Ctrl 12 */
+	{ 0x00000599, 0x0000 },    /* R1433  - AIF3 Tx Enables */
+	{ 0x0000059A, 0x0000 },    /* R1434  - AIF3 Rx Enables */
+	{ 0x000005C2, 0x0000 },    /* R1474  - SPD1 TX Control */
+	{ 0x000005C3, 0x0000 },    /* R1475  - SPD1 TX Channel Status 1 */
+	{ 0x000005C4, 0x0B01 },    /* R1476  - SPD1 TX Channel Status 2 */
+	{ 0x000005C5, 0x0000 },    /* R1477  - SPD1 TX Channel Status 3 */
+	{ 0x000005E3, 0x0004 },    /* R1507  - SLIMbus Framer Ref Gear */
+	{ 0x000005E5, 0x0000 },    /* R1509  - SLIMbus Rates 1 */
+	{ 0x000005E6, 0x0000 },    /* R1510  - SLIMbus Rates 2 */
+	{ 0x000005E9, 0x0000 },    /* R1513  - SLIMbus Rates 5 */
+	{ 0x000005EA, 0x0000 },    /* R1514  - SLIMbus Rates 6 */
+	{ 0x000005EB, 0x0000 },    /* R1515  - SLIMbus Rates 7 */
+	{ 0x000005F5, 0x0000 },    /* R1525  - SLIMbus RX Channel Enable */
+	{ 0x000005F6, 0x0000 },    /* R1526  - SLIMbus TX Channel Enable */
+	{ 0x00000640, 0x0000 },    /* R1600  - PWM1MIX Input 1 Source */
+	{ 0x00000641, 0x0080 },    /* R1601  - PWM1MIX Input 1 Volume */
+	{ 0x00000642, 0x0000 },    /* R1602  - PWM1MIX Input 2 Source */
+	{ 0x00000643, 0x0080 },    /* R1603  - PWM1MIX Input 2 Volume */
+	{ 0x00000644, 0x0000 },    /* R1604  - PWM1MIX Input 3 Source */
+	{ 0x00000645, 0x0080 },    /* R1605  - PWM1MIX Input 3 Volume */
+	{ 0x00000646, 0x0000 },    /* R1606  - PWM1MIX Input 4 Source */
+	{ 0x00000647, 0x0080 },    /* R1607  - PWM1MIX Input 4 Volume */
+	{ 0x00000648, 0x0000 },    /* R1608  - PWM2MIX Input 1 Source */
+	{ 0x00000649, 0x0080 },    /* R1609  - PWM2MIX Input 1 Volume */
+	{ 0x0000064A, 0x0000 },    /* R1610  - PWM2MIX Input 2 Source */
+	{ 0x0000064B, 0x0080 },    /* R1611  - PWM2MIX Input 2 Volume */
+	{ 0x0000064C, 0x0000 },    /* R1612  - PWM2MIX Input 3 Source */
+	{ 0x0000064D, 0x0080 },    /* R1613  - PWM2MIX Input 3 Volume */
+	{ 0x0000064E, 0x0000 },    /* R1614  - PWM2MIX Input 4 Source */
+	{ 0x0000064F, 0x0080 },    /* R1615  - PWM2MIX Input 4 Volume */
+	{ 0x00000680, 0x0000 },    /* R1664  - OUT1LMIX Input 1 Source */
+	{ 0x00000681, 0x0080 },    /* R1665  - OUT1LMIX Input 1 Volume */
+	{ 0x00000682, 0x0000 },    /* R1666  - OUT1LMIX Input 2 Source */
+	{ 0x00000683, 0x0080 },    /* R1667  - OUT1LMIX Input 2 Volume */
+	{ 0x00000684, 0x0000 },    /* R1668  - OUT1LMIX Input 3 Source */
+	{ 0x00000685, 0x0080 },    /* R1669  - OUT1LMIX Input 3 Volume */
+	{ 0x00000686, 0x0000 },    /* R1670  - OUT1LMIX Input 4 Source */
+	{ 0x00000687, 0x0080 },    /* R1671  - OUT1LMIX Input 4 Volume */
+	{ 0x00000688, 0x0000 },    /* R1672  - OUT1RMIX Input 1 Source */
+	{ 0x00000689, 0x0080 },    /* R1673  - OUT1RMIX Input 1 Volume */
+	{ 0x0000068A, 0x0000 },    /* R1674  - OUT1RMIX Input 2 Source */
+	{ 0x0000068B, 0x0080 },    /* R1675  - OUT1RMIX Input 2 Volume */
+	{ 0x0000068C, 0x0000 },    /* R1676  - OUT1RMIX Input 3 Source */
+	{ 0x0000068D, 0x0080 },    /* R1677  - OUT1RMIX Input 3 Volume */
+	{ 0x0000068E, 0x0000 },    /* R1678  - OUT1RMIX Input 4 Source */
+	{ 0x0000068F, 0x0080 },    /* R1679  - OUT1RMIX Input 4 Volume */
+	{ 0x00000690, 0x0000 },    /* R1680  - OUT2LMIX Input 1 Source */
+	{ 0x00000691, 0x0080 },    /* R1681  - OUT2LMIX Input 1 Volume */
+	{ 0x00000692, 0x0000 },    /* R1682  - OUT2LMIX Input 2 Source */
+	{ 0x00000693, 0x0080 },    /* R1683  - OUT2LMIX Input 2 Volume */
+	{ 0x00000694, 0x0000 },    /* R1684  - OUT2LMIX Input 3 Source */
+	{ 0x00000695, 0x0080 },    /* R1685  - OUT2LMIX Input 3 Volume */
+	{ 0x00000696, 0x0000 },    /* R1686  - OUT2LMIX Input 4 Source */
+	{ 0x00000697, 0x0080 },    /* R1687  - OUT2LMIX Input 4 Volume */
+	{ 0x00000698, 0x0000 },    /* R1688  - OUT2RMIX Input 1 Source */
+	{ 0x00000699, 0x0080 },    /* R1689  - OUT2RMIX Input 1 Volume */
+	{ 0x0000069A, 0x0000 },    /* R1690  - OUT2RMIX Input 2 Source */
+	{ 0x0000069B, 0x0080 },    /* R1691  - OUT2RMIX Input 2 Volume */
+	{ 0x0000069C, 0x0000 },    /* R1692  - OUT2RMIX Input 3 Source */
+	{ 0x0000069D, 0x0080 },    /* R1693  - OUT2RMIX Input 3 Volume */
+	{ 0x0000069E, 0x0000 },    /* R1694  - OUT2RMIX Input 4 Source */
+	{ 0x0000069F, 0x0080 },    /* R1695  - OUT2RMIX Input 4 Volume */
+	{ 0x000006A0, 0x0000 },    /* R1696  - OUT3LMIX Input 1 Source */
+	{ 0x000006A1, 0x0080 },    /* R1697  - OUT3LMIX Input 1 Volume */
+	{ 0x000006A2, 0x0000 },    /* R1698  - OUT3LMIX Input 2 Source */
+	{ 0x000006A3, 0x0080 },    /* R1699  - OUT3LMIX Input 2 Volume */
+	{ 0x000006A4, 0x0000 },    /* R1700  - OUT3LMIX Input 3 Source */
+	{ 0x000006A5, 0x0080 },    /* R1701  - OUT3LMIX Input 3 Volume */
+	{ 0x000006A6, 0x0000 },    /* R1702  - OUT3LMIX Input 4 Source */
+	{ 0x000006A7, 0x0080 },    /* R1703  - OUT3LMIX Input 4 Volume */
+	{ 0x000006B0, 0x0000 },    /* R1712  - OUT4LMIX Input 1 Source */
+	{ 0x000006B1, 0x0080 },    /* R1713  - OUT4LMIX Input 1 Volume */
+	{ 0x000006B2, 0x0000 },    /* R1714  - OUT4LMIX Input 2 Source */
+	{ 0x000006B3, 0x0080 },    /* R1715  - OUT4LMIX Input 2 Volume */
+	{ 0x000006B4, 0x0000 },    /* R1716  - OUT4LMIX Input 3 Source */
+	{ 0x000006B5, 0x0080 },    /* R1717  - OUT4LMIX Input 3 Volume */
+	{ 0x000006B6, 0x0000 },    /* R1718  - OUT4LMIX Input 4 Source */
+	{ 0x000006B7, 0x0080 },    /* R1719  - OUT4LMIX Input 4 Volume */
+	{ 0x000006B8, 0x0000 },    /* R1720  - OUT4RMIX Input 1 Source */
+	{ 0x000006B9, 0x0080 },    /* R1721  - OUT4RMIX Input 1 Volume */
+	{ 0x000006BA, 0x0000 },    /* R1722  - OUT4RMIX Input 2 Source */
+	{ 0x000006BB, 0x0080 },    /* R1723  - OUT4RMIX Input 2 Volume */
+	{ 0x000006BC, 0x0000 },    /* R1724  - OUT4RMIX Input 3 Source */
+	{ 0x000006BD, 0x0080 },    /* R1725  - OUT4RMIX Input 3 Volume */
+	{ 0x000006BE, 0x0000 },    /* R1726  - OUT4RMIX Input 4 Source */
+	{ 0x000006BF, 0x0080 },    /* R1727  - OUT4RMIX Input 4 Volume */
+	{ 0x000006C0, 0x0000 },    /* R1728  - OUT5LMIX Input 1 Source */
+	{ 0x000006C1, 0x0080 },    /* R1729  - OUT5LMIX Input 1 Volume */
+	{ 0x000006C2, 0x0000 },    /* R1730  - OUT5LMIX Input 2 Source */
+	{ 0x000006C3, 0x0080 },    /* R1731  - OUT5LMIX Input 2 Volume */
+	{ 0x000006C4, 0x0000 },    /* R1732  - OUT5LMIX Input 3 Source */
+	{ 0x000006C5, 0x0080 },    /* R1733  - OUT5LMIX Input 3 Volume */
+	{ 0x000006C6, 0x0000 },    /* R1734  - OUT5LMIX Input 4 Source */
+	{ 0x000006C7, 0x0080 },    /* R1735  - OUT5LMIX Input 4 Volume */
+	{ 0x000006C8, 0x0000 },    /* R1736  - OUT5RMIX Input 1 Source */
+	{ 0x000006C9, 0x0080 },    /* R1737  - OUT5RMIX Input 1 Volume */
+	{ 0x000006CA, 0x0000 },    /* R1738  - OUT5RMIX Input 2 Source */
+	{ 0x000006CB, 0x0080 },    /* R1739  - OUT5RMIX Input 2 Volume */
+	{ 0x000006CC, 0x0000 },    /* R1740  - OUT5RMIX Input 3 Source */
+	{ 0x000006CD, 0x0080 },    /* R1741  - OUT5RMIX Input 3 Volume */
+	{ 0x000006CE, 0x0000 },    /* R1742  - OUT5RMIX Input 4 Source */
+	{ 0x000006CF, 0x0080 },    /* R1743  - OUT5RMIX Input 4 Volume */
+	{ 0x00000700, 0x0000 },    /* R1792  - AIF1TX1MIX Input 1 Source */
+	{ 0x00000701, 0x0080 },    /* R1793  - AIF1TX1MIX Input 1 Volume */
+	{ 0x00000702, 0x0000 },    /* R1794  - AIF1TX1MIX Input 2 Source */
+	{ 0x00000703, 0x0080 },    /* R1795  - AIF1TX1MIX Input 2 Volume */
+	{ 0x00000704, 0x0000 },    /* R1796  - AIF1TX1MIX Input 3 Source */
+	{ 0x00000705, 0x0080 },    /* R1797  - AIF1TX1MIX Input 3 Volume */
+	{ 0x00000706, 0x0000 },    /* R1798  - AIF1TX1MIX Input 4 Source */
+	{ 0x00000707, 0x0080 },    /* R1799  - AIF1TX1MIX Input 4 Volume */
+	{ 0x00000708, 0x0000 },    /* R1800  - AIF1TX2MIX Input 1 Source */
+	{ 0x00000709, 0x0080 },    /* R1801  - AIF1TX2MIX Input 1 Volume */
+	{ 0x0000070A, 0x0000 },    /* R1802  - AIF1TX2MIX Input 2 Source */
+	{ 0x0000070B, 0x0080 },    /* R1803  - AIF1TX2MIX Input 2 Volume */
+	{ 0x0000070C, 0x0000 },    /* R1804  - AIF1TX2MIX Input 3 Source */
+	{ 0x0000070D, 0x0080 },    /* R1805  - AIF1TX2MIX Input 3 Volume */
+	{ 0x0000070E, 0x0000 },    /* R1806  - AIF1TX2MIX Input 4 Source */
+	{ 0x0000070F, 0x0080 },    /* R1807  - AIF1TX2MIX Input 4 Volume */
+	{ 0x00000710, 0x0000 },    /* R1808  - AIF1TX3MIX Input 1 Source */
+	{ 0x00000711, 0x0080 },    /* R1809  - AIF1TX3MIX Input 1 Volume */
+	{ 0x00000712, 0x0000 },    /* R1810  - AIF1TX3MIX Input 2 Source */
+	{ 0x00000713, 0x0080 },    /* R1811  - AIF1TX3MIX Input 2 Volume */
+	{ 0x00000714, 0x0000 },    /* R1812  - AIF1TX3MIX Input 3 Source */
+	{ 0x00000715, 0x0080 },    /* R1813  - AIF1TX3MIX Input 3 Volume */
+	{ 0x00000716, 0x0000 },    /* R1814  - AIF1TX3MIX Input 4 Source */
+	{ 0x00000717, 0x0080 },    /* R1815  - AIF1TX3MIX Input 4 Volume */
+	{ 0x00000718, 0x0000 },    /* R1816  - AIF1TX4MIX Input 1 Source */
+	{ 0x00000719, 0x0080 },    /* R1817  - AIF1TX4MIX Input 1 Volume */
+	{ 0x0000071A, 0x0000 },    /* R1818  - AIF1TX4MIX Input 2 Source */
+	{ 0x0000071B, 0x0080 },    /* R1819  - AIF1TX4MIX Input 2 Volume */
+	{ 0x0000071C, 0x0000 },    /* R1820  - AIF1TX4MIX Input 3 Source */
+	{ 0x0000071D, 0x0080 },    /* R1821  - AIF1TX4MIX Input 3 Volume */
+	{ 0x0000071E, 0x0000 },    /* R1822  - AIF1TX4MIX Input 4 Source */
+	{ 0x0000071F, 0x0080 },    /* R1823  - AIF1TX4MIX Input 4 Volume */
+	{ 0x00000720, 0x0000 },    /* R1824  - AIF1TX5MIX Input 1 Source */
+	{ 0x00000721, 0x0080 },    /* R1825  - AIF1TX5MIX Input 1 Volume */
+	{ 0x00000722, 0x0000 },    /* R1826  - AIF1TX5MIX Input 2 Source */
+	{ 0x00000723, 0x0080 },    /* R1827  - AIF1TX5MIX Input 2 Volume */
+	{ 0x00000724, 0x0000 },    /* R1828  - AIF1TX5MIX Input 3 Source */
+	{ 0x00000725, 0x0080 },    /* R1829  - AIF1TX5MIX Input 3 Volume */
+	{ 0x00000726, 0x0000 },    /* R1830  - AIF1TX5MIX Input 4 Source */
+	{ 0x00000727, 0x0080 },    /* R1831  - AIF1TX5MIX Input 4 Volume */
+	{ 0x00000728, 0x0000 },    /* R1832  - AIF1TX6MIX Input 1 Source */
+	{ 0x00000729, 0x0080 },    /* R1833  - AIF1TX6MIX Input 1 Volume */
+	{ 0x0000072A, 0x0000 },    /* R1834  - AIF1TX6MIX Input 2 Source */
+	{ 0x0000072B, 0x0080 },    /* R1835  - AIF1TX6MIX Input 2 Volume */
+	{ 0x0000072C, 0x0000 },    /* R1836  - AIF1TX6MIX Input 3 Source */
+	{ 0x0000072D, 0x0080 },    /* R1837  - AIF1TX6MIX Input 3 Volume */
+	{ 0x0000072E, 0x0000 },    /* R1838  - AIF1TX6MIX Input 4 Source */
+	{ 0x0000072F, 0x0080 },    /* R1839  - AIF1TX6MIX Input 4 Volume */
+	{ 0x00000740, 0x0000 },    /* R1856  - AIF2TX1MIX Input 1 Source */
+	{ 0x00000741, 0x0080 },    /* R1857  - AIF2TX1MIX Input 1 Volume */
+	{ 0x00000742, 0x0000 },    /* R1858  - AIF2TX1MIX Input 2 Source */
+	{ 0x00000743, 0x0080 },    /* R1859  - AIF2TX1MIX Input 2 Volume */
+	{ 0x00000744, 0x0000 },    /* R1860  - AIF2TX1MIX Input 3 Source */
+	{ 0x00000745, 0x0080 },    /* R1861  - AIF2TX1MIX Input 3 Volume */
+	{ 0x00000746, 0x0000 },    /* R1862  - AIF2TX1MIX Input 4 Source */
+	{ 0x00000747, 0x0080 },    /* R1863  - AIF2TX1MIX Input 4 Volume */
+	{ 0x00000748, 0x0000 },    /* R1864  - AIF2TX2MIX Input 1 Source */
+	{ 0x00000749, 0x0080 },    /* R1865  - AIF2TX2MIX Input 1 Volume */
+	{ 0x0000074A, 0x0000 },    /* R1866  - AIF2TX2MIX Input 2 Source */
+	{ 0x0000074B, 0x0080 },    /* R1867  - AIF2TX2MIX Input 2 Volume */
+	{ 0x0000074C, 0x0000 },    /* R1868  - AIF2TX2MIX Input 3 Source */
+	{ 0x0000074D, 0x0080 },    /* R1869  - AIF2TX2MIX Input 3 Volume */
+	{ 0x0000074E, 0x0000 },    /* R1870  - AIF2TX2MIX Input 4 Source */
+	{ 0x0000074F, 0x0080 },    /* R1871  - AIF2TX2MIX Input 4 Volume */
+	{ 0x00000750, 0x0000 },    /* R1872  - AIF2TX3MIX Input 1 Source */
+	{ 0x00000751, 0x0080 },    /* R1873  - AIF2TX3MIX Input 1 Volume */
+	{ 0x00000752, 0x0000 },    /* R1874  - AIF2TX3MIX Input 2 Source */
+	{ 0x00000753, 0x0080 },    /* R1875  - AIF2TX3MIX Input 2 Volume */
+	{ 0x00000754, 0x0000 },    /* R1876  - AIF2TX3MIX Input 3 Source */
+	{ 0x00000755, 0x0080 },    /* R1877  - AIF2TX3MIX Input 3 Volume */
+	{ 0x00000756, 0x0000 },    /* R1878  - AIF2TX3MIX Input 4 Source */
+	{ 0x00000757, 0x0080 },    /* R1879  - AIF2TX3MIX Input 4 Volume */
+	{ 0x00000758, 0x0000 },    /* R1880  - AIF2TX4MIX Input 1 Source */
+	{ 0x00000759, 0x0080 },    /* R1881  - AIF2TX4MIX Input 1 Volume */
+	{ 0x0000075A, 0x0000 },    /* R1882  - AIF2TX4MIX Input 2 Source */
+	{ 0x0000075B, 0x0080 },    /* R1883  - AIF2TX4MIX Input 2 Volume */
+	{ 0x0000075C, 0x0000 },    /* R1884  - AIF2TX4MIX Input 3 Source */
+	{ 0x0000075D, 0x0080 },    /* R1885  - AIF2TX4MIX Input 3 Volume */
+	{ 0x0000075E, 0x0000 },    /* R1886  - AIF2TX4MIX Input 4 Source */
+	{ 0x0000075F, 0x0080 },    /* R1887  - AIF2TX4MIX Input 4 Volume */
+	{ 0x00000760, 0x0000 },    /* R1888  - AIF2TX5MIX Input 1 Source */
+	{ 0x00000761, 0x0080 },    /* R1889  - AIF2TX5MIX Input 1 Volume */
+	{ 0x00000762, 0x0000 },    /* R1890  - AIF2TX5MIX Input 2 Source */
+	{ 0x00000763, 0x0080 },    /* R1891  - AIF2TX5MIX Input 2 Volume */
+	{ 0x00000764, 0x0000 },    /* R1892  - AIF2TX5MIX Input 3 Source */
+	{ 0x00000765, 0x0080 },    /* R1893  - AIF2TX5MIX Input 3 Volume */
+	{ 0x00000766, 0x0000 },    /* R1894  - AIF2TX5MIX Input 4 Source */
+	{ 0x00000767, 0x0080 },    /* R1895  - AIF2TX5MIX Input 4 Volume */
+	{ 0x00000768, 0x0000 },    /* R1896  - AIF2TX6MIX Input 1 Source */
+	{ 0x00000769, 0x0080 },    /* R1897  - AIF2TX6MIX Input 1 Volume */
+	{ 0x0000076A, 0x0000 },    /* R1898  - AIF2TX6MIX Input 2 Source */
+	{ 0x0000076B, 0x0080 },    /* R1899  - AIF2TX6MIX Input 2 Volume */
+	{ 0x0000076C, 0x0000 },    /* R1900  - AIF2TX6MIX Input 3 Source */
+	{ 0x0000076D, 0x0080 },    /* R1901  - AIF2TX6MIX Input 3 Volume */
+	{ 0x0000076E, 0x0000 },    /* R1902  - AIF2TX6MIX Input 4 Source */
+	{ 0x0000076F, 0x0080 },    /* R1903  - AIF2TX6MIX Input 4 Volume */
+	{ 0x00000780, 0x0000 },    /* R1920  - AIF3TX1MIX Input 1 Source */
+	{ 0x00000781, 0x0080 },    /* R1921  - AIF3TX1MIX Input 1 Volume */
+	{ 0x00000782, 0x0000 },    /* R1922  - AIF3TX1MIX Input 2 Source */
+	{ 0x00000783, 0x0080 },    /* R1923  - AIF3TX1MIX Input 2 Volume */
+	{ 0x00000784, 0x0000 },    /* R1924  - AIF3TX1MIX Input 3 Source */
+	{ 0x00000785, 0x0080 },    /* R1925  - AIF3TX1MIX Input 3 Volume */
+	{ 0x00000786, 0x0000 },    /* R1926  - AIF3TX1MIX Input 4 Source */
+	{ 0x00000787, 0x0080 },    /* R1927  - AIF3TX1MIX Input 4 Volume */
+	{ 0x00000788, 0x0000 },    /* R1928  - AIF3TX2MIX Input 1 Source */
+	{ 0x00000789, 0x0080 },    /* R1929  - AIF3TX2MIX Input 1 Volume */
+	{ 0x0000078A, 0x0000 },    /* R1930  - AIF3TX2MIX Input 2 Source */
+	{ 0x0000078B, 0x0080 },    /* R1931  - AIF3TX2MIX Input 2 Volume */
+	{ 0x0000078C, 0x0000 },    /* R1932  - AIF3TX2MIX Input 3 Source */
+	{ 0x0000078D, 0x0080 },    /* R1933  - AIF3TX2MIX Input 3 Volume */
+	{ 0x0000078E, 0x0000 },    /* R1934  - AIF3TX2MIX Input 4 Source */
+	{ 0x0000078F, 0x0080 },    /* R1935  - AIF3TX2MIX Input 4 Volume */
+	{ 0x000007C0, 0x0000 },    /* R1984  - SLIMTX1MIX Input 1 Source */
+	{ 0x000007C1, 0x0080 },    /* R1985  - SLIMTX1MIX Input 1 Volume */
+	{ 0x000007C8, 0x0000 },    /* R1992  - SLIMTX2MIX Input 1 Source */
+	{ 0x000007C9, 0x0080 },    /* R1993  - SLIMTX2MIX Input 1 Volume */
+	{ 0x000007D0, 0x0000 },    /* R2000  - SLIMTX3MIX Input 1 Source */
+	{ 0x000007D1, 0x0080 },    /* R2001  - SLIMTX3MIX Input 1 Volume */
+	{ 0x000007D8, 0x0000 },    /* R2008  - SLIMTX4MIX Input 1 Source */
+	{ 0x000007D9, 0x0080 },    /* R2009  - SLIMTX4MIX Input 1 Volume */
+	{ 0x000007E0, 0x0000 },    /* R2016  - SLIMTX5MIX Input 1 Source */
+	{ 0x000007E1, 0x0080 },    /* R2017  - SLIMTX5MIX Input 1 Volume */
+	{ 0x000007E8, 0x0000 },    /* R2024  - SLIMTX6MIX Input 1 Source */
+	{ 0x000007E9, 0x0080 },    /* R2025  - SLIMTX6MIX Input 1 Volume */
+	{ 0x00000800, 0x0000 },    /* R2048  - SPDIF1TX1MIX Input 1 Source */
+	{ 0x00000801, 0x0080 },    /* R2049  - SPDIF1TX1MIX Input 1 Volume */
+	{ 0x00000808, 0x0000 },    /* R2056  - SPDIF1TX2MIX Input 1 Source */
+	{ 0x00000809, 0x0080 },    /* R2057  - SPDIF1TX2MIX Input 1 Volume */
+	{ 0x00000880, 0x0000 },    /* R2176  - EQ1MIX Input 1 Source */
+	{ 0x00000881, 0x0080 },    /* R2177  - EQ1MIX Input 1 Volume */
+	{ 0x00000888, 0x0000 },    /* R2184  - EQ2MIX Input 1 Source */
+	{ 0x00000889, 0x0080 },    /* R2185  - EQ2MIX Input 1 Volume */
+	{ 0x00000890, 0x0000 },    /* R2192  - EQ3MIX Input 1 Source */
+	{ 0x00000891, 0x0080 },    /* R2193  - EQ3MIX Input 1 Volume */
+	{ 0x00000898, 0x0000 },    /* R2200  - EQ4MIX Input 1 Source */
+	{ 0x00000899, 0x0080 },    /* R2201  - EQ4MIX Input 1 Volume */
+	{ 0x000008C0, 0x0000 },    /* R2240  - DRC1LMIX Input 1 Source */
+	{ 0x000008C1, 0x0080 },    /* R2241  - DRC1LMIX Input 1 Volume */
+	{ 0x000008C8, 0x0000 },    /* R2248  - DRC1RMIX Input 1 Source */
+	{ 0x000008C9, 0x0080 },    /* R2249  - DRC1RMIX Input 1 Volume */
+	{ 0x00000900, 0x0000 },    /* R2304  - HPLP1MIX Input 1 Source */
+	{ 0x00000901, 0x0080 },    /* R2305  - HPLP1MIX Input 1 Volume */
+	{ 0x00000902, 0x0000 },    /* R2306  - HPLP1MIX Input 2 Source */
+	{ 0x00000903, 0x0080 },    /* R2307  - HPLP1MIX Input 2 Volume */
+	{ 0x00000904, 0x0000 },    /* R2308  - HPLP1MIX Input 3 Source */
+	{ 0x00000905, 0x0080 },    /* R2309  - HPLP1MIX Input 3 Volume */
+	{ 0x00000906, 0x0000 },    /* R2310  - HPLP1MIX Input 4 Source */
+	{ 0x00000907, 0x0080 },    /* R2311  - HPLP1MIX Input 4 Volume */
+	{ 0x00000908, 0x0000 },    /* R2312  - HPLP2MIX Input 1 Source */
+	{ 0x00000909, 0x0080 },    /* R2313  - HPLP2MIX Input 1 Volume */
+	{ 0x0000090A, 0x0000 },    /* R2314  - HPLP2MIX Input 2 Source */
+	{ 0x0000090B, 0x0080 },    /* R2315  - HPLP2MIX Input 2 Volume */
+	{ 0x0000090C, 0x0000 },    /* R2316  - HPLP2MIX Input 3 Source */
+	{ 0x0000090D, 0x0080 },    /* R2317  - HPLP2MIX Input 3 Volume */
+	{ 0x0000090E, 0x0000 },    /* R2318  - HPLP2MIX Input 4 Source */
+	{ 0x0000090F, 0x0080 },    /* R2319  - HPLP2MIX Input 4 Volume */
+	{ 0x00000910, 0x0000 },    /* R2320  - HPLP3MIX Input 1 Source */
+	{ 0x00000911, 0x0080 },    /* R2321  - HPLP3MIX Input 1 Volume */
+	{ 0x00000912, 0x0000 },    /* R2322  - HPLP3MIX Input 2 Source */
+	{ 0x00000913, 0x0080 },    /* R2323  - HPLP3MIX Input 2 Volume */
+	{ 0x00000914, 0x0000 },    /* R2324  - HPLP3MIX Input 3 Source */
+	{ 0x00000915, 0x0080 },    /* R2325  - HPLP3MIX Input 3 Volume */
+	{ 0x00000916, 0x0000 },    /* R2326  - HPLP3MIX Input 4 Source */
+	{ 0x00000917, 0x0080 },    /* R2327  - HPLP3MIX Input 4 Volume */
+	{ 0x00000918, 0x0000 },    /* R2328  - HPLP4MIX Input 1 Source */
+	{ 0x00000919, 0x0080 },    /* R2329  - HPLP4MIX Input 1 Volume */
+	{ 0x0000091A, 0x0000 },    /* R2330  - HPLP4MIX Input 2 Source */
+	{ 0x0000091B, 0x0080 },    /* R2331  - HPLP4MIX Input 2 Volume */
+	{ 0x0000091C, 0x0000 },    /* R2332  - HPLP4MIX Input 3 Source */
+	{ 0x0000091D, 0x0080 },    /* R2333  - HPLP4MIX Input 3 Volume */
+	{ 0x0000091E, 0x0000 },    /* R2334  - HPLP4MIX Input 4 Source */
+	{ 0x0000091F, 0x0080 },    /* R2335  - HPLP4MIX Input 4 Volume */
+	{ 0x00000A80, 0x0000 },    /* R2688  - ASRC1LMIX Input 1 Source */
+	{ 0x00000A88, 0x0000 },    /* R2696  - ASRC1RMIX Input 1 Source */
+	{ 0x00000A90, 0x0000 },    /* R2704  - ASRC2LMIX Input 1 Source */
+	{ 0x00000A98, 0x0000 },    /* R2712  - ASRC2RMIX Input 1 Source */
+	{ 0x00000B00, 0x0000 },    /* R2816  - ISRC1DEC1MIX Input 1 Source */
+	{ 0x00000B08, 0x0000 },    /* R2824  - ISRC1DEC2MIX Input 1 Source */
+	{ 0x00000B10, 0x0000 },    /* R2832  - ISRC1DEC3MIX Input 1 Source */
+	{ 0x00000B18, 0x0000 },    /* R2840  - ISRC1DEC4MIX Input 1 Source */
+	{ 0x00000B20, 0x0000 },    /* R2848  - ISRC1INT1MIX Input 1 Source */
+	{ 0x00000B28, 0x0000 },    /* R2856  - ISRC1INT2MIX Input 1 Source */
+	{ 0x00000B30, 0x0000 },    /* R2864  - ISRC1INT3MIX Input 1 Source */
+	{ 0x00000B38, 0x0000 },    /* R2872  - ISRC1INT4MIX Input 1 Source */
+	{ 0x00000B40, 0x0000 },    /* R2880  - ISRC2DEC1MIX Input 1 Source */
+	{ 0x00000B48, 0x0000 },    /* R2888  - ISRC2DEC2MIX Input 1 Source */
+	{ 0x00000B60, 0x0000 },    /* R2912  - ISRC2INT1MIX Input 1 Source */
+	{ 0x00000B68, 0x0000 },    /* R2920  - ISRC2INT2MIX Input 1 Source */
+	{ 0x00000C00, 0xA101 },    /* R3072  - GPIO1 CTRL */
+	{ 0x00000C01, 0xA101 },    /* R3073  - GPIO2 CTRL */
+	{ 0x00000C02, 0xA101 },    /* R3074  - GPIO3 CTRL */
+	{ 0x00000C03, 0xA101 },    /* R3075  - GPIO4 CTRL */
+	{ 0x00000C04, 0xA101 },    /* R3076  - GPIO5 CTRL */
+	{ 0x00000C0F, 0x0400 },    /* R3087  - IRQ CTRL 1 */
+	{ 0x00000C10, 0x1000 },    /* R3088  - GPIO Debounce Config */
+	{ 0x00000C18, 0x0000 },    /* R3096  - GP Switch 1 */
+	{ 0x00000C20, 0x8002 },    /* R3104  - Misc Pad Ctrl 1 */
+	{ 0x00000C21, 0x8001 },    /* R3105  - Misc Pad Ctrl 2 */
+	{ 0x00000C22, 0x0000 },    /* R3106  - Misc Pad Ctrl 3 */
+	{ 0x00000C23, 0x0000 },    /* R3107  - Misc Pad Ctrl 4 */
+	{ 0x00000C24, 0x0000 },    /* R3108  - Misc Pad Ctrl 5 */
+	{ 0x00000C25, 0x0000 },    /* R3109  - Misc Pad Ctrl 6 */
+	{ 0x00000D08, 0xFFFF },    /* R3336  - Interrupt Status 1 Mask */
+	{ 0x00000D09, 0xFFFF },    /* R3337  - Interrupt Status 2 Mask */
+	{ 0x00000D0A, 0xFFFF },    /* R3338  - Interrupt Status 3 Mask */
+	{ 0x00000D0B, 0xFFFF },    /* R3339  - Interrupt Status 4 Mask */
+	{ 0x00000D0C, 0xFEFF },    /* R3340  - Interrupt Status 5 Mask */
+	{ 0x00000D0F, 0x0000 },    /* R3343  - Interrupt Control */
+	{ 0x00000D18, 0xFFFF },    /* R3352  - IRQ2 Status 1 Mask */
+	{ 0x00000D19, 0xFFFF },    /* R3353  - IRQ2 Status 2 Mask */
+	{ 0x00000D1A, 0xFFFF },    /* R3354  - IRQ2 Status 3 Mask */
+	{ 0x00000D1B, 0xFFFF },    /* R3355  - IRQ2 Status 4 Mask */
+	{ 0x00000D1C, 0xFEFF },    /* R3356  - IRQ2 Status 5 Mask */
+	{ 0x00000D1D, 0xFFFF },    /* R3357  - IRQ2 Status 6 Mask */
+	{ 0x00000D1F, 0x0000 },    /* R3359  - IRQ2 Control */
+	{ 0x00000D53, 0xFFFF },    /* R3411  - AOD IRQ Mask IRQ1 */
+	{ 0x00000D54, 0xFFFF },    /* R3412  - AOD IRQ Mask IRQ2 */
+	{ 0x00000D56, 0x0000 },    /* R3414  - Jack detect debounce */
+	{ 0x00000E00, 0x0000 },    /* R3584  - FX_Ctrl1 */
+	{ 0x00000E01, 0x0000 },    /* R3585  - FX_Ctrl2 */
+	{ 0x00000E10, 0x6318 },    /* R3600  - EQ1_1 */
+	{ 0x00000E11, 0x6300 },    /* R3601  - EQ1_2 */
+	{ 0x00000E12, 0x0FC8 },    /* R3602  - EQ1_3 */
+	{ 0x00000E13, 0x03FE },    /* R3603  - EQ1_4 */
+	{ 0x00000E14, 0x00E0 },    /* R3604  - EQ1_5 */
+	{ 0x00000E15, 0x1EC4 },    /* R3605  - EQ1_6 */
+	{ 0x00000E16, 0xF136 },    /* R3606  - EQ1_7 */
+	{ 0x00000E17, 0x0409 },    /* R3607  - EQ1_8 */
+	{ 0x00000E18, 0x04CC },    /* R3608  - EQ1_9 */
+	{ 0x00000E19, 0x1C9B },    /* R3609  - EQ1_10 */
+	{ 0x00000E1A, 0xF337 },    /* R3610  - EQ1_11 */
+	{ 0x00000E1B, 0x040B },    /* R3611  - EQ1_12 */
+	{ 0x00000E1C, 0x0CBB },    /* R3612  - EQ1_13 */
+	{ 0x00000E1D, 0x16F8 },    /* R3613  - EQ1_14 */
+	{ 0x00000E1E, 0xF7D9 },    /* R3614  - EQ1_15 */
+	{ 0x00000E1F, 0x040A },    /* R3615  - EQ1_16 */
+	{ 0x00000E20, 0x1F14 },    /* R3616  - EQ1_17 */
+	{ 0x00000E21, 0x058C },    /* R3617  - EQ1_18 */
+	{ 0x00000E22, 0x0563 },    /* R3618  - EQ1_19 */
+	{ 0x00000E23, 0x4000 },    /* R3619  - EQ1_20 */
+	{ 0x00000E24, 0x0B75 },    /* R3620  - EQ1_21 */
+	{ 0x00000E26, 0x6318 },    /* R3622  - EQ2_1 */
+	{ 0x00000E27, 0x6300 },    /* R3623  - EQ2_2 */
+	{ 0x00000E28, 0x0FC8 },    /* R3624  - EQ2_3 */
+	{ 0x00000E29, 0x03FE },    /* R3625  - EQ2_4 */
+	{ 0x00000E2A, 0x00E0 },    /* R3626  - EQ2_5 */
+	{ 0x00000E2B, 0x1EC4 },    /* R3627  - EQ2_6 */
+	{ 0x00000E2C, 0xF136 },    /* R3628  - EQ2_7 */
+	{ 0x00000E2D, 0x0409 },    /* R3629  - EQ2_8 */
+	{ 0x00000E2E, 0x04CC },    /* R3630  - EQ2_9 */
+	{ 0x00000E2F, 0x1C9B },    /* R3631  - EQ2_10 */
+	{ 0x00000E30, 0xF337 },    /* R3632  - EQ2_11 */
+	{ 0x00000E31, 0x040B },    /* R3633  - EQ2_12 */
+	{ 0x00000E32, 0x0CBB },    /* R3634  - EQ2_13 */
+	{ 0x00000E33, 0x16F8 },    /* R3635  - EQ2_14 */
+	{ 0x00000E34, 0xF7D9 },    /* R3636  - EQ2_15 */
+	{ 0x00000E35, 0x040A },    /* R3637  - EQ2_16 */
+	{ 0x00000E36, 0x1F14 },    /* R3638  - EQ2_17 */
+	{ 0x00000E37, 0x058C },    /* R3639  - EQ2_18 */
+	{ 0x00000E38, 0x0563 },    /* R3640  - EQ2_19 */
+	{ 0x00000E39, 0x4000 },    /* R3641  - EQ2_20 */
+	{ 0x00000E3A, 0x0B75 },    /* R3642  - EQ2_21 */
+	{ 0x00000E3C, 0x6318 },    /* R3644  - EQ3_1 */
+	{ 0x00000E3D, 0x6300 },    /* R3645  - EQ3_2 */
+	{ 0x00000E3E, 0x0FC8 },    /* R3646  - EQ3_3 */
+	{ 0x00000E3F, 0x03FE },    /* R3647  - EQ3_4 */
+	{ 0x00000E40, 0x00E0 },    /* R3648  - EQ3_5 */
+	{ 0x00000E41, 0x1EC4 },    /* R3649  - EQ3_6 */
+	{ 0x00000E42, 0xF136 },    /* R3650  - EQ3_7 */
+	{ 0x00000E43, 0x0409 },    /* R3651  - EQ3_8 */
+	{ 0x00000E44, 0x04CC },    /* R3652  - EQ3_9 */
+	{ 0x00000E45, 0x1C9B },    /* R3653  - EQ3_10 */
+	{ 0x00000E46, 0xF337 },    /* R3654  - EQ3_11 */
+	{ 0x00000E47, 0x040B },    /* R3655  - EQ3_12 */
+	{ 0x00000E48, 0x0CBB },    /* R3656  - EQ3_13 */
+	{ 0x00000E49, 0x16F8 },    /* R3657  - EQ3_14 */
+	{ 0x00000E4A, 0xF7D9 },    /* R3658  - EQ3_15 */
+	{ 0x00000E4B, 0x040A },    /* R3659  - EQ3_16 */
+	{ 0x00000E4C, 0x1F14 },    /* R3660  - EQ3_17 */
+	{ 0x00000E4D, 0x058C },    /* R3661  - EQ3_18 */
+	{ 0x00000E4E, 0x0563 },    /* R3662  - EQ3_19 */
+	{ 0x00000E4F, 0x4000 },    /* R3663  - EQ3_20 */
+	{ 0x00000E50, 0x0B75 },    /* R3664  - EQ3_21 */
+	{ 0x00000E52, 0x6318 },    /* R3666  - EQ4_1 */
+	{ 0x00000E53, 0x6300 },    /* R3667  - EQ4_2 */
+	{ 0x00000E54, 0x0FC8 },    /* R3668  - EQ4_3 */
+	{ 0x00000E55, 0x03FE },    /* R3669  - EQ4_4 */
+	{ 0x00000E56, 0x00E0 },    /* R3670  - EQ4_5 */
+	{ 0x00000E57, 0x1EC4 },    /* R3671  - EQ4_6 */
+	{ 0x00000E58, 0xF136 },    /* R3672  - EQ4_7 */
+	{ 0x00000E59, 0x0409 },    /* R3673  - EQ4_8 */
+	{ 0x00000E5A, 0x04CC },    /* R3674  - EQ4_9 */
+	{ 0x00000E5B, 0x1C9B },    /* R3675  - EQ4_10 */
+	{ 0x00000E5C, 0xF337 },    /* R3676  - EQ4_11 */
+	{ 0x00000E5D, 0x040B },    /* R3677  - EQ4_12 */
+	{ 0x00000E5E, 0x0CBB },    /* R3678  - EQ4_13 */
+	{ 0x00000E5F, 0x16F8 },    /* R3679  - EQ4_14 */
+	{ 0x00000E60, 0xF7D9 },    /* R3680  - EQ4_15 */
+	{ 0x00000E61, 0x040A },    /* R3681  - EQ4_16 */
+	{ 0x00000E62, 0x1F14 },    /* R3682  - EQ4_17 */
+	{ 0x00000E63, 0x058C },    /* R3683  - EQ4_18 */
+	{ 0x00000E64, 0x0563 },    /* R3684  - EQ4_19 */
+	{ 0x00000E65, 0x4000 },    /* R3685  - EQ4_20 */
+	{ 0x00000E66, 0x0B75 },    /* R3686  - EQ4_21 */
+	{ 0x00000E80, 0x0018 },    /* R3712  - DRC1 ctrl1 */
+	{ 0x00000E81, 0x0933 },    /* R3713  - DRC1 ctrl2 */
+	{ 0x00000E82, 0x0018 },    /* R3714  - DRC1 ctrl3 */
+	{ 0x00000E83, 0x0000 },    /* R3715  - DRC1 ctrl4 */
+	{ 0x00000E84, 0x0000 },    /* R3716  - DRC1 ctrl5 */
+	{ 0x00000EC0, 0x0000 },    /* R3776  - HPLPF1_1 */
+	{ 0x00000EC1, 0x0000 },    /* R3777  - HPLPF1_2 */
+	{ 0x00000EC4, 0x0000 },    /* R3780  - HPLPF2_1 */
+	{ 0x00000EC5, 0x0000 },    /* R3781  - HPLPF2_2 */
+	{ 0x00000EC8, 0x0000 },    /* R3784  - HPLPF3_1 */
+	{ 0x00000EC9, 0x0000 },    /* R3785  - HPLPF3_2 */
+	{ 0x00000ECC, 0x0000 },    /* R3788  - HPLPF4_1 */
+	{ 0x00000ECD, 0x0000 },    /* R3789  - HPLPF4_2 */
+	{ 0x00000EE0, 0x0000 },    /* R3808  - ASRC_ENABLE */
+	{ 0x00000EE2, 0x0000 },    /* R3810  - ASRC_RATE1 */
+	{ 0x00000EE3, 0x4000 },    /* R3811  - ASRC_RATE2 */
+	{ 0x00000EF0, 0x0000 },    /* R3824  - ISRC 1 CTRL 1 */
+	{ 0x00000EF1, 0x0001 },    /* R3825  - ISRC 1 CTRL 2 */
+	{ 0x00000EF2, 0x0000 },    /* R3826  - ISRC 1 CTRL 3 */
+	{ 0x00000EF3, 0x0000 },    /* R3827  - ISRC 2 CTRL 1 */
+	{ 0x00000EF4, 0x0001 },    /* R3828  - ISRC 2 CTRL 2 */
+	{ 0x00000EF5, 0x0000 },    /* R3829  - ISRC 2 CTRL 3 */
+	{ 0x00001700, 0x0000 },    /* R5888  - FRF_COEFF_1 */
+	{ 0x00001701, 0x0000 },    /* R5889  - FRF_COEFF_2 */
+	{ 0x00001702, 0x0000 },    /* R5890  - FRF_COEFF_3 */
+	{ 0x00001703, 0x0000 },    /* R5891  - FRF_COEFF_4 */
+	{ 0x00001704, 0x0000 },    /* R5892  - DAC_COMP_1 */
+	{ 0x00001705, 0x0000 },    /* R5893  - DAC_COMP_2 */
+};
+
+static bool wm8998_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case ARIZONA_SOFTWARE_RESET:
+	case ARIZONA_DEVICE_REVISION:
+	case ARIZONA_CTRL_IF_SPI_CFG_1:
+	case ARIZONA_CTRL_IF_I2C1_CFG_1:
+	case ARIZONA_CTRL_IF_I2C1_CFG_2:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_0:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_1:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_2:
+	case ARIZONA_TONE_GENERATOR_1:
+	case ARIZONA_TONE_GENERATOR_2:
+	case ARIZONA_TONE_GENERATOR_3:
+	case ARIZONA_TONE_GENERATOR_4:
+	case ARIZONA_TONE_GENERATOR_5:
+	case ARIZONA_PWM_DRIVE_1:
+	case ARIZONA_PWM_DRIVE_2:
+	case ARIZONA_PWM_DRIVE_3:
+	case ARIZONA_WAKE_CONTROL:
+	case ARIZONA_SEQUENCE_CONTROL:
+	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1:
+	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2:
+	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3:
+	case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5:
+	case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6:
+	case ARIZONA_HAPTICS_CONTROL_1:
+	case ARIZONA_HAPTICS_CONTROL_2:
+	case ARIZONA_HAPTICS_PHASE_1_INTENSITY:
+	case ARIZONA_HAPTICS_PHASE_1_DURATION:
+	case ARIZONA_HAPTICS_PHASE_2_INTENSITY:
+	case ARIZONA_HAPTICS_PHASE_2_DURATION:
+	case ARIZONA_HAPTICS_PHASE_3_INTENSITY:
+	case ARIZONA_HAPTICS_PHASE_3_DURATION:
+	case ARIZONA_HAPTICS_STATUS:
+	case ARIZONA_CLOCK_32K_1:
+	case ARIZONA_SYSTEM_CLOCK_1:
+	case ARIZONA_SAMPLE_RATE_1:
+	case ARIZONA_SAMPLE_RATE_2:
+	case ARIZONA_SAMPLE_RATE_3:
+	case ARIZONA_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_SAMPLE_RATE_2_STATUS:
+	case ARIZONA_SAMPLE_RATE_3_STATUS:
+	case ARIZONA_ASYNC_CLOCK_1:
+	case ARIZONA_ASYNC_SAMPLE_RATE_1:
+	case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_ASYNC_SAMPLE_RATE_2:
+	case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
+	case ARIZONA_OUTPUT_SYSTEM_CLOCK:
+	case ARIZONA_OUTPUT_ASYNC_CLOCK:
+	case ARIZONA_RATE_ESTIMATOR_1:
+	case ARIZONA_RATE_ESTIMATOR_2:
+	case ARIZONA_RATE_ESTIMATOR_3:
+	case ARIZONA_RATE_ESTIMATOR_4:
+	case ARIZONA_RATE_ESTIMATOR_5:
+	case ARIZONA_DYNAMIC_FREQUENCY_SCALING_1:
+	case ARIZONA_FLL1_CONTROL_1:
+	case ARIZONA_FLL1_CONTROL_2:
+	case ARIZONA_FLL1_CONTROL_3:
+	case ARIZONA_FLL1_CONTROL_4:
+	case ARIZONA_FLL1_CONTROL_5:
+	case ARIZONA_FLL1_CONTROL_6:
+	case ARIZONA_FLL1_CONTROL_7:
+	case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
+	case ARIZONA_FLL1_NCO_TEST_0:
+	case ARIZONA_FLL1_SYNCHRONISER_1:
+	case ARIZONA_FLL1_SYNCHRONISER_2:
+	case ARIZONA_FLL1_SYNCHRONISER_3:
+	case ARIZONA_FLL1_SYNCHRONISER_4:
+	case ARIZONA_FLL1_SYNCHRONISER_5:
+	case ARIZONA_FLL1_SYNCHRONISER_6:
+	case ARIZONA_FLL1_SYNCHRONISER_7:
+	case ARIZONA_FLL1_SPREAD_SPECTRUM:
+	case ARIZONA_FLL1_GPIO_CLOCK:
+	case ARIZONA_FLL2_CONTROL_1:
+	case ARIZONA_FLL2_CONTROL_2:
+	case ARIZONA_FLL2_CONTROL_3:
+	case ARIZONA_FLL2_CONTROL_4:
+	case ARIZONA_FLL2_CONTROL_5:
+	case ARIZONA_FLL2_CONTROL_6:
+	case ARIZONA_FLL2_CONTROL_7:
+	case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
+	case ARIZONA_FLL2_NCO_TEST_0:
+	case ARIZONA_FLL2_SYNCHRONISER_1:
+	case ARIZONA_FLL2_SYNCHRONISER_2:
+	case ARIZONA_FLL2_SYNCHRONISER_3:
+	case ARIZONA_FLL2_SYNCHRONISER_4:
+	case ARIZONA_FLL2_SYNCHRONISER_5:
+	case ARIZONA_FLL2_SYNCHRONISER_6:
+	case ARIZONA_FLL2_SYNCHRONISER_7:
+	case ARIZONA_FLL2_SPREAD_SPECTRUM:
+	case ARIZONA_FLL2_GPIO_CLOCK:
+	case ARIZONA_MIC_CHARGE_PUMP_1:
+	case ARIZONA_LDO1_CONTROL_1:
+	case ARIZONA_LDO1_CONTROL_2:
+	case ARIZONA_LDO2_CONTROL_1:
+	case ARIZONA_MIC_BIAS_CTRL_1:
+	case ARIZONA_MIC_BIAS_CTRL_2:
+	case ARIZONA_MIC_BIAS_CTRL_3:
+	case ARIZONA_ACCESSORY_DETECT_MODE_1:
+	case ARIZONA_HEADPHONE_DETECT_1:
+	case ARIZONA_HEADPHONE_DETECT_2:
+	case ARIZONA_MICD_CLAMP_CONTROL:
+	case ARIZONA_MIC_DETECT_1:
+	case ARIZONA_MIC_DETECT_2:
+	case ARIZONA_MIC_DETECT_3:
+	case ARIZONA_MIC_DETECT_4:
+	case ARIZONA_MIC_DETECT_LEVEL_1:
+	case ARIZONA_MIC_DETECT_LEVEL_2:
+	case ARIZONA_MIC_DETECT_LEVEL_3:
+	case ARIZONA_MIC_DETECT_LEVEL_4:
+	case ARIZONA_ISOLATION_CONTROL:
+	case ARIZONA_JACK_DETECT_ANALOGUE:
+	case ARIZONA_INPUT_ENABLES:
+	case ARIZONA_INPUT_ENABLES_STATUS:
+	case ARIZONA_INPUT_RATE:
+	case ARIZONA_INPUT_VOLUME_RAMP:
+	case ARIZONA_HPF_CONTROL:
+	case ARIZONA_IN1L_CONTROL:
+	case ARIZONA_ADC_DIGITAL_VOLUME_1L:
+	case ARIZONA_DMIC1L_CONTROL:
+	case ARIZONA_IN1R_CONTROL:
+	case ARIZONA_ADC_DIGITAL_VOLUME_1R:
+	case ARIZONA_DMIC1R_CONTROL:
+	case ARIZONA_IN2L_CONTROL:
+	case ARIZONA_ADC_DIGITAL_VOLUME_2L:
+	case ARIZONA_DMIC2L_CONTROL:
+	case ARIZONA_OUTPUT_ENABLES_1:
+	case ARIZONA_OUTPUT_STATUS_1:
+	case ARIZONA_RAW_OUTPUT_STATUS_1:
+	case ARIZONA_OUTPUT_RATE_1:
+	case ARIZONA_OUTPUT_VOLUME_RAMP:
+	case ARIZONA_OUTPUT_PATH_CONFIG_1L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_1L:
+	case ARIZONA_NOISE_GATE_SELECT_1L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_1R:
+	case ARIZONA_DAC_DIGITAL_VOLUME_1R:
+	case ARIZONA_NOISE_GATE_SELECT_1R:
+	case ARIZONA_OUTPUT_PATH_CONFIG_2L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_2L:
+	case ARIZONA_NOISE_GATE_SELECT_2L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_2R:
+	case ARIZONA_DAC_DIGITAL_VOLUME_2R:
+	case ARIZONA_NOISE_GATE_SELECT_2R:
+	case ARIZONA_OUTPUT_PATH_CONFIG_3L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_3L:
+	case ARIZONA_NOISE_GATE_SELECT_3L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_4L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_4L:
+	case ARIZONA_NOISE_GATE_SELECT_4L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_4R:
+	case ARIZONA_DAC_DIGITAL_VOLUME_4R:
+	case ARIZONA_NOISE_GATE_SELECT_4R:
+	case ARIZONA_OUTPUT_PATH_CONFIG_5L:
+	case ARIZONA_DAC_DIGITAL_VOLUME_5L:
+	case ARIZONA_NOISE_GATE_SELECT_5L:
+	case ARIZONA_OUTPUT_PATH_CONFIG_5R:
+	case ARIZONA_DAC_DIGITAL_VOLUME_5R:
+	case ARIZONA_NOISE_GATE_SELECT_5R:
+	case ARIZONA_DRE_ENABLE:
+	case ARIZONA_DRE_CONTROL_1:
+	case ARIZONA_DRE_CONTROL_2:
+	case ARIZONA_DRE_CONTROL_3:
+	case ARIZONA_EDRE_ENABLE:
+	case ARIZONA_DAC_AEC_CONTROL_1:
+	case ARIZONA_DAC_AEC_CONTROL_2:
+	case ARIZONA_NOISE_GATE_CONTROL:
+	case ARIZONA_PDM_SPK1_CTRL_1:
+	case ARIZONA_PDM_SPK1_CTRL_2:
+	case ARIZONA_HP_TEST_CTRL_13:
+	case ARIZONA_AIF1_BCLK_CTRL:
+	case ARIZONA_AIF1_TX_PIN_CTRL:
+	case ARIZONA_AIF1_RX_PIN_CTRL:
+	case ARIZONA_AIF1_RATE_CTRL:
+	case ARIZONA_AIF1_FORMAT:
+	case ARIZONA_AIF1_RX_BCLK_RATE:
+	case ARIZONA_AIF1_FRAME_CTRL_1:
+	case ARIZONA_AIF1_FRAME_CTRL_2:
+	case ARIZONA_AIF1_FRAME_CTRL_3:
+	case ARIZONA_AIF1_FRAME_CTRL_4:
+	case ARIZONA_AIF1_FRAME_CTRL_5:
+	case ARIZONA_AIF1_FRAME_CTRL_6:
+	case ARIZONA_AIF1_FRAME_CTRL_7:
+	case ARIZONA_AIF1_FRAME_CTRL_8:
+	case ARIZONA_AIF1_FRAME_CTRL_11:
+	case ARIZONA_AIF1_FRAME_CTRL_12:
+	case ARIZONA_AIF1_FRAME_CTRL_13:
+	case ARIZONA_AIF1_FRAME_CTRL_14:
+	case ARIZONA_AIF1_FRAME_CTRL_15:
+	case ARIZONA_AIF1_FRAME_CTRL_16:
+	case ARIZONA_AIF1_TX_ENABLES:
+	case ARIZONA_AIF1_RX_ENABLES:
+	case ARIZONA_AIF2_BCLK_CTRL:
+	case ARIZONA_AIF2_TX_PIN_CTRL:
+	case ARIZONA_AIF2_RX_PIN_CTRL:
+	case ARIZONA_AIF2_RATE_CTRL:
+	case ARIZONA_AIF2_FORMAT:
+	case ARIZONA_AIF2_RX_BCLK_RATE:
+	case ARIZONA_AIF2_FRAME_CTRL_1:
+	case ARIZONA_AIF2_FRAME_CTRL_2:
+	case ARIZONA_AIF2_FRAME_CTRL_3:
+	case ARIZONA_AIF2_FRAME_CTRL_4:
+	case ARIZONA_AIF2_FRAME_CTRL_5:
+	case ARIZONA_AIF2_FRAME_CTRL_6:
+	case ARIZONA_AIF2_FRAME_CTRL_7:
+	case ARIZONA_AIF2_FRAME_CTRL_8:
+	case ARIZONA_AIF2_FRAME_CTRL_11:
+	case ARIZONA_AIF2_FRAME_CTRL_12:
+	case ARIZONA_AIF2_FRAME_CTRL_13:
+	case ARIZONA_AIF2_FRAME_CTRL_14:
+	case ARIZONA_AIF2_FRAME_CTRL_15:
+	case ARIZONA_AIF2_FRAME_CTRL_16:
+	case ARIZONA_AIF2_TX_ENABLES:
+	case ARIZONA_AIF2_RX_ENABLES:
+	case ARIZONA_AIF3_BCLK_CTRL:
+	case ARIZONA_AIF3_TX_PIN_CTRL:
+	case ARIZONA_AIF3_RX_PIN_CTRL:
+	case ARIZONA_AIF3_RATE_CTRL:
+	case ARIZONA_AIF3_FORMAT:
+	case ARIZONA_AIF3_RX_BCLK_RATE:
+	case ARIZONA_AIF3_FRAME_CTRL_1:
+	case ARIZONA_AIF3_FRAME_CTRL_2:
+	case ARIZONA_AIF3_FRAME_CTRL_3:
+	case ARIZONA_AIF3_FRAME_CTRL_4:
+	case ARIZONA_AIF3_FRAME_CTRL_11:
+	case ARIZONA_AIF3_FRAME_CTRL_12:
+	case ARIZONA_AIF3_TX_ENABLES:
+	case ARIZONA_AIF3_RX_ENABLES:
+	case ARIZONA_SPD1_TX_CONTROL:
+	case ARIZONA_SPD1_TX_CHANNEL_STATUS_1:
+	case ARIZONA_SPD1_TX_CHANNEL_STATUS_2:
+	case ARIZONA_SPD1_TX_CHANNEL_STATUS_3:
+	case ARIZONA_SLIMBUS_FRAMER_REF_GEAR:
+	case ARIZONA_SLIMBUS_RATES_1:
+	case ARIZONA_SLIMBUS_RATES_2:
+	case ARIZONA_SLIMBUS_RATES_5:
+	case ARIZONA_SLIMBUS_RATES_6:
+	case ARIZONA_SLIMBUS_RATES_7:
+	case ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE:
+	case ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE:
+	case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+	case ARIZONA_SLIMBUS_TX_PORT_STATUS:
+	case ARIZONA_PWM1MIX_INPUT_1_SOURCE:
+	case ARIZONA_PWM1MIX_INPUT_1_VOLUME:
+	case ARIZONA_PWM1MIX_INPUT_2_SOURCE:
+	case ARIZONA_PWM1MIX_INPUT_2_VOLUME:
+	case ARIZONA_PWM1MIX_INPUT_3_SOURCE:
+	case ARIZONA_PWM1MIX_INPUT_3_VOLUME:
+	case ARIZONA_PWM1MIX_INPUT_4_SOURCE:
+	case ARIZONA_PWM1MIX_INPUT_4_VOLUME:
+	case ARIZONA_PWM2MIX_INPUT_1_SOURCE:
+	case ARIZONA_PWM2MIX_INPUT_1_VOLUME:
+	case ARIZONA_PWM2MIX_INPUT_2_SOURCE:
+	case ARIZONA_PWM2MIX_INPUT_2_VOLUME:
+	case ARIZONA_PWM2MIX_INPUT_3_SOURCE:
+	case ARIZONA_PWM2MIX_INPUT_3_VOLUME:
+	case ARIZONA_PWM2MIX_INPUT_4_SOURCE:
+	case ARIZONA_PWM2MIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT1LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT1LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT1LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT1LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT1LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT1LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT1LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT1LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT1RMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT1RMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT1RMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT1RMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT1RMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT1RMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT1RMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT1RMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT2LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT2LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT2LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT2LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT2LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT2LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT2LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT2LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT2RMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT2RMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT2RMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT2RMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT2RMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT2RMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT2RMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT2RMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT3LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT3LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT3LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT3LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT3LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT3LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT3LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT3LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT4LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT4LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT4LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT4LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT4LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT4LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT4LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT4LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT4RMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT4RMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT4RMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT4RMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT4RMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT4RMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT4RMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT4RMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT5LMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT5LMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT5LMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT5LMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT5LMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT5LMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT5LMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT5LMIX_INPUT_4_VOLUME:
+	case ARIZONA_OUT5RMIX_INPUT_1_SOURCE:
+	case ARIZONA_OUT5RMIX_INPUT_1_VOLUME:
+	case ARIZONA_OUT5RMIX_INPUT_2_SOURCE:
+	case ARIZONA_OUT5RMIX_INPUT_2_VOLUME:
+	case ARIZONA_OUT5RMIX_INPUT_3_SOURCE:
+	case ARIZONA_OUT5RMIX_INPUT_3_VOLUME:
+	case ARIZONA_OUT5RMIX_INPUT_4_SOURCE:
+	case ARIZONA_OUT5RMIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX1MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX1MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX1MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX1MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX1MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX1MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX2MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX2MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX2MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX2MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX2MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX2MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX3MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX3MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX3MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX3MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX3MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX3MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX3MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX4MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX4MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX4MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX4MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX4MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX4MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX4MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX5MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX5MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX5MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX5MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX5MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX5MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX5MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF1TX6MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF1TX6MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF1TX6MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF1TX6MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF1TX6MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF1TX6MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF1TX6MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX1MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX1MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX1MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX1MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX1MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX1MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX2MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX2MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX2MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX2MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX2MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX2MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX3MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX3MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX3MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX3MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX3MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX3MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX3MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX3MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX4MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX4MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX4MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX4MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX4MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX4MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX4MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX4MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX5MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX5MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX5MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX5MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX5MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX5MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX5MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX5MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF2TX6MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF2TX6MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF2TX6MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF2TX6MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF2TX6MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF2TX6MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF2TX6MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF2TX6MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF3TX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF3TX1MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF3TX1MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF3TX1MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF3TX1MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF3TX1MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF3TX1MIX_INPUT_4_VOLUME:
+	case ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_AIF3TX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_AIF3TX2MIX_INPUT_2_SOURCE:
+	case ARIZONA_AIF3TX2MIX_INPUT_2_VOLUME:
+	case ARIZONA_AIF3TX2MIX_INPUT_3_SOURCE:
+	case ARIZONA_AIF3TX2MIX_INPUT_3_VOLUME:
+	case ARIZONA_AIF3TX2MIX_INPUT_4_SOURCE:
+	case ARIZONA_AIF3TX2MIX_INPUT_4_VOLUME:
+	case ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX3MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX4MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX5MIX_INPUT_1_VOLUME:
+	case ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE:
+	case ARIZONA_SLIMTX6MIX_INPUT_1_VOLUME:
+	case ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE:
+	case ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME:
+	case ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE:
+	case ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME:
+	case ARIZONA_EQ1MIX_INPUT_1_SOURCE:
+	case ARIZONA_EQ1MIX_INPUT_1_VOLUME:
+	case ARIZONA_EQ2MIX_INPUT_1_SOURCE:
+	case ARIZONA_EQ2MIX_INPUT_1_VOLUME:
+	case ARIZONA_EQ3MIX_INPUT_1_SOURCE:
+	case ARIZONA_EQ3MIX_INPUT_1_VOLUME:
+	case ARIZONA_EQ4MIX_INPUT_1_SOURCE:
+	case ARIZONA_EQ4MIX_INPUT_1_VOLUME:
+	case ARIZONA_DRC1LMIX_INPUT_1_SOURCE:
+	case ARIZONA_DRC1LMIX_INPUT_1_VOLUME:
+	case ARIZONA_DRC1RMIX_INPUT_1_SOURCE:
+	case ARIZONA_DRC1RMIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP1MIX_INPUT_1_SOURCE:
+	case ARIZONA_HPLP1MIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP1MIX_INPUT_2_SOURCE:
+	case ARIZONA_HPLP1MIX_INPUT_2_VOLUME:
+	case ARIZONA_HPLP1MIX_INPUT_3_SOURCE:
+	case ARIZONA_HPLP1MIX_INPUT_3_VOLUME:
+	case ARIZONA_HPLP1MIX_INPUT_4_SOURCE:
+	case ARIZONA_HPLP1MIX_INPUT_4_VOLUME:
+	case ARIZONA_HPLP2MIX_INPUT_1_SOURCE:
+	case ARIZONA_HPLP2MIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP2MIX_INPUT_2_SOURCE:
+	case ARIZONA_HPLP2MIX_INPUT_2_VOLUME:
+	case ARIZONA_HPLP2MIX_INPUT_3_SOURCE:
+	case ARIZONA_HPLP2MIX_INPUT_3_VOLUME:
+	case ARIZONA_HPLP2MIX_INPUT_4_SOURCE:
+	case ARIZONA_HPLP2MIX_INPUT_4_VOLUME:
+	case ARIZONA_HPLP3MIX_INPUT_1_SOURCE:
+	case ARIZONA_HPLP3MIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP3MIX_INPUT_2_SOURCE:
+	case ARIZONA_HPLP3MIX_INPUT_2_VOLUME:
+	case ARIZONA_HPLP3MIX_INPUT_3_SOURCE:
+	case ARIZONA_HPLP3MIX_INPUT_3_VOLUME:
+	case ARIZONA_HPLP3MIX_INPUT_4_SOURCE:
+	case ARIZONA_HPLP3MIX_INPUT_4_VOLUME:
+	case ARIZONA_HPLP4MIX_INPUT_1_SOURCE:
+	case ARIZONA_HPLP4MIX_INPUT_1_VOLUME:
+	case ARIZONA_HPLP4MIX_INPUT_2_SOURCE:
+	case ARIZONA_HPLP4MIX_INPUT_2_VOLUME:
+	case ARIZONA_HPLP4MIX_INPUT_3_SOURCE:
+	case ARIZONA_HPLP4MIX_INPUT_3_VOLUME:
+	case ARIZONA_HPLP4MIX_INPUT_4_SOURCE:
+	case ARIZONA_HPLP4MIX_INPUT_4_VOLUME:
+	case ARIZONA_ASRC1LMIX_INPUT_1_SOURCE:
+	case ARIZONA_ASRC1RMIX_INPUT_1_SOURCE:
+	case ARIZONA_ASRC2LMIX_INPUT_1_SOURCE:
+	case ARIZONA_ASRC2RMIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1DEC3MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1DEC4MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1INT3MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC1INT4MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE:
+	case ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE:
+	case ARIZONA_GPIO1_CTRL:
+	case ARIZONA_GPIO2_CTRL:
+	case ARIZONA_GPIO3_CTRL:
+	case ARIZONA_GPIO4_CTRL:
+	case ARIZONA_GPIO5_CTRL:
+	case ARIZONA_IRQ_CTRL_1:
+	case ARIZONA_GPIO_DEBOUNCE_CONFIG:
+	case ARIZONA_GP_SWITCH_1:
+	case ARIZONA_MISC_PAD_CTRL_1:
+	case ARIZONA_MISC_PAD_CTRL_2:
+	case ARIZONA_MISC_PAD_CTRL_3:
+	case ARIZONA_MISC_PAD_CTRL_4:
+	case ARIZONA_MISC_PAD_CTRL_5:
+	case ARIZONA_MISC_PAD_CTRL_6:
+	case ARIZONA_INTERRUPT_STATUS_1:
+	case ARIZONA_INTERRUPT_STATUS_2:
+	case ARIZONA_INTERRUPT_STATUS_3:
+	case ARIZONA_INTERRUPT_STATUS_4:
+	case ARIZONA_INTERRUPT_STATUS_5:
+	case ARIZONA_INTERRUPT_STATUS_1_MASK:
+	case ARIZONA_INTERRUPT_STATUS_2_MASK:
+	case ARIZONA_INTERRUPT_STATUS_3_MASK:
+	case ARIZONA_INTERRUPT_STATUS_4_MASK:
+	case ARIZONA_INTERRUPT_STATUS_5_MASK:
+	case ARIZONA_INTERRUPT_CONTROL:
+	case ARIZONA_IRQ2_STATUS_1:
+	case ARIZONA_IRQ2_STATUS_2:
+	case ARIZONA_IRQ2_STATUS_3:
+	case ARIZONA_IRQ2_STATUS_4:
+	case ARIZONA_IRQ2_STATUS_5:
+	case ARIZONA_IRQ2_STATUS_1_MASK:
+	case ARIZONA_IRQ2_STATUS_2_MASK:
+	case ARIZONA_IRQ2_STATUS_3_MASK:
+	case ARIZONA_IRQ2_STATUS_4_MASK:
+	case ARIZONA_IRQ2_STATUS_5_MASK:
+	case ARIZONA_IRQ2_CONTROL:
+	case ARIZONA_INTERRUPT_RAW_STATUS_2:
+	case ARIZONA_INTERRUPT_RAW_STATUS_3:
+	case ARIZONA_INTERRUPT_RAW_STATUS_4:
+	case ARIZONA_INTERRUPT_RAW_STATUS_5:
+	case ARIZONA_INTERRUPT_RAW_STATUS_6:
+	case ARIZONA_INTERRUPT_RAW_STATUS_7:
+	case ARIZONA_INTERRUPT_RAW_STATUS_8:
+	case ARIZONA_IRQ_PIN_STATUS:
+	case ARIZONA_AOD_WKUP_AND_TRIG:
+	case ARIZONA_AOD_IRQ1:
+	case ARIZONA_AOD_IRQ2:
+	case ARIZONA_AOD_IRQ_MASK_IRQ1:
+	case ARIZONA_AOD_IRQ_MASK_IRQ2:
+	case ARIZONA_AOD_IRQ_RAW_STATUS:
+	case ARIZONA_JACK_DETECT_DEBOUNCE:
+	case ARIZONA_FX_CTRL1:
+	case ARIZONA_FX_CTRL2:
+	case ARIZONA_EQ1_1:
+	case ARIZONA_EQ1_2:
+	case ARIZONA_EQ1_3:
+	case ARIZONA_EQ1_4:
+	case ARIZONA_EQ1_5:
+	case ARIZONA_EQ1_6:
+	case ARIZONA_EQ1_7:
+	case ARIZONA_EQ1_8:
+	case ARIZONA_EQ1_9:
+	case ARIZONA_EQ1_10:
+	case ARIZONA_EQ1_11:
+	case ARIZONA_EQ1_12:
+	case ARIZONA_EQ1_13:
+	case ARIZONA_EQ1_14:
+	case ARIZONA_EQ1_15:
+	case ARIZONA_EQ1_16:
+	case ARIZONA_EQ1_17:
+	case ARIZONA_EQ1_18:
+	case ARIZONA_EQ1_19:
+	case ARIZONA_EQ1_20:
+	case ARIZONA_EQ1_21:
+	case ARIZONA_EQ2_1:
+	case ARIZONA_EQ2_2:
+	case ARIZONA_EQ2_3:
+	case ARIZONA_EQ2_4:
+	case ARIZONA_EQ2_5:
+	case ARIZONA_EQ2_6:
+	case ARIZONA_EQ2_7:
+	case ARIZONA_EQ2_8:
+	case ARIZONA_EQ2_9:
+	case ARIZONA_EQ2_10:
+	case ARIZONA_EQ2_11:
+	case ARIZONA_EQ2_12:
+	case ARIZONA_EQ2_13:
+	case ARIZONA_EQ2_14:
+	case ARIZONA_EQ2_15:
+	case ARIZONA_EQ2_16:
+	case ARIZONA_EQ2_17:
+	case ARIZONA_EQ2_18:
+	case ARIZONA_EQ2_19:
+	case ARIZONA_EQ2_20:
+	case ARIZONA_EQ2_21:
+	case ARIZONA_EQ3_1:
+	case ARIZONA_EQ3_2:
+	case ARIZONA_EQ3_3:
+	case ARIZONA_EQ3_4:
+	case ARIZONA_EQ3_5:
+	case ARIZONA_EQ3_6:
+	case ARIZONA_EQ3_7:
+	case ARIZONA_EQ3_8:
+	case ARIZONA_EQ3_9:
+	case ARIZONA_EQ3_10:
+	case ARIZONA_EQ3_11:
+	case ARIZONA_EQ3_12:
+	case ARIZONA_EQ3_13:
+	case ARIZONA_EQ3_14:
+	case ARIZONA_EQ3_15:
+	case ARIZONA_EQ3_16:
+	case ARIZONA_EQ3_17:
+	case ARIZONA_EQ3_18:
+	case ARIZONA_EQ3_19:
+	case ARIZONA_EQ3_20:
+	case ARIZONA_EQ3_21:
+	case ARIZONA_EQ4_1:
+	case ARIZONA_EQ4_2:
+	case ARIZONA_EQ4_3:
+	case ARIZONA_EQ4_4:
+	case ARIZONA_EQ4_5:
+	case ARIZONA_EQ4_6:
+	case ARIZONA_EQ4_7:
+	case ARIZONA_EQ4_8:
+	case ARIZONA_EQ4_9:
+	case ARIZONA_EQ4_10:
+	case ARIZONA_EQ4_11:
+	case ARIZONA_EQ4_12:
+	case ARIZONA_EQ4_13:
+	case ARIZONA_EQ4_14:
+	case ARIZONA_EQ4_15:
+	case ARIZONA_EQ4_16:
+	case ARIZONA_EQ4_17:
+	case ARIZONA_EQ4_18:
+	case ARIZONA_EQ4_19:
+	case ARIZONA_EQ4_20:
+	case ARIZONA_EQ4_21:
+	case ARIZONA_DRC1_CTRL1:
+	case ARIZONA_DRC1_CTRL2:
+	case ARIZONA_DRC1_CTRL3:
+	case ARIZONA_DRC1_CTRL4:
+	case ARIZONA_DRC1_CTRL5:
+	case ARIZONA_HPLPF1_1:
+	case ARIZONA_HPLPF1_2:
+	case ARIZONA_HPLPF2_1:
+	case ARIZONA_HPLPF2_2:
+	case ARIZONA_HPLPF3_1:
+	case ARIZONA_HPLPF3_2:
+	case ARIZONA_HPLPF4_1:
+	case ARIZONA_HPLPF4_2:
+	case ARIZONA_ASRC_ENABLE:
+	case ARIZONA_ASRC_STATUS:
+	case ARIZONA_ASRC_RATE1:
+	case ARIZONA_ASRC_RATE2:
+	case ARIZONA_ISRC_1_CTRL_1:
+	case ARIZONA_ISRC_1_CTRL_2:
+	case ARIZONA_ISRC_1_CTRL_3:
+	case ARIZONA_ISRC_2_CTRL_1:
+	case ARIZONA_ISRC_2_CTRL_2:
+	case ARIZONA_ISRC_2_CTRL_3:
+	case ARIZONA_FRF_COEFF_1:
+	case ARIZONA_FRF_COEFF_2:
+	case ARIZONA_FRF_COEFF_3:
+	case ARIZONA_FRF_COEFF_4:
+	case ARIZONA_V2_DAC_COMP_1:
+	case ARIZONA_V2_DAC_COMP_2:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool wm8998_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case ARIZONA_SOFTWARE_RESET:
+	case ARIZONA_DEVICE_REVISION:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_0:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_1:
+	case ARIZONA_WRITE_SEQUENCER_CTRL_2:
+	case ARIZONA_HAPTICS_STATUS:
+	case ARIZONA_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_SAMPLE_RATE_2_STATUS:
+	case ARIZONA_SAMPLE_RATE_3_STATUS:
+	case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
+	case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
+	case ARIZONA_MIC_DETECT_3:
+	case ARIZONA_MIC_DETECT_4:
+	case ARIZONA_HEADPHONE_DETECT_2:
+	case ARIZONA_INPUT_ENABLES_STATUS:
+	case ARIZONA_OUTPUT_STATUS_1:
+	case ARIZONA_RAW_OUTPUT_STATUS_1:
+	case ARIZONA_SLIMBUS_RX_PORT_STATUS:
+	case ARIZONA_SLIMBUS_TX_PORT_STATUS:
+	case ARIZONA_INTERRUPT_STATUS_1:
+	case ARIZONA_INTERRUPT_STATUS_2:
+	case ARIZONA_INTERRUPT_STATUS_3:
+	case ARIZONA_INTERRUPT_STATUS_4:
+	case ARIZONA_INTERRUPT_STATUS_5:
+	case ARIZONA_IRQ2_STATUS_1:
+	case ARIZONA_IRQ2_STATUS_2:
+	case ARIZONA_IRQ2_STATUS_3:
+	case ARIZONA_IRQ2_STATUS_4:
+	case ARIZONA_IRQ2_STATUS_5:
+	case ARIZONA_INTERRUPT_RAW_STATUS_2:
+	case ARIZONA_INTERRUPT_RAW_STATUS_3:
+	case ARIZONA_INTERRUPT_RAW_STATUS_4:
+	case ARIZONA_INTERRUPT_RAW_STATUS_5:
+	case ARIZONA_INTERRUPT_RAW_STATUS_6:
+	case ARIZONA_INTERRUPT_RAW_STATUS_7:
+	case ARIZONA_INTERRUPT_RAW_STATUS_8:
+	case ARIZONA_IRQ_PIN_STATUS:
+	case ARIZONA_AOD_WKUP_AND_TRIG:
+	case ARIZONA_AOD_IRQ1:
+	case ARIZONA_AOD_IRQ2:
+	case ARIZONA_AOD_IRQ_RAW_STATUS:
+	case ARIZONA_FX_CTRL2:
+	case ARIZONA_ASRC_STATUS:
+		return true;
+	default:
+		return false;
+	}
+}
+
+#define WM8998_MAX_REGISTER 0x31ff
+
+const struct regmap_config wm8998_i2c_regmap = {
+	.reg_bits = 32,
+	.val_bits = 16,
+	.reg_format_endian = REGMAP_ENDIAN_BIG,
+	.val_format_endian = REGMAP_ENDIAN_BIG,
+
+	.max_register = WM8998_MAX_REGISTER,
+	.readable_reg = wm8998_readable_register,
+	.volatile_reg = wm8998_volatile_register,
+
+	.cache_type = REGCACHE_RBTREE,
+	.reg_defaults = wm8998_reg_default,
+	.num_reg_defaults = ARRAY_SIZE(wm8998_reg_default),
+};
+EXPORT_SYMBOL_GPL(wm8998_i2c_regmap);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 42c3852..ccccc29 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -271,6 +271,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called hpilo.
 
+config QCOM_COINCELL
+	tristate "Qualcomm coincell charger support"
+	depends on MFD_SPMI_PMIC || COMPILE_TEST
+	help
+	  This driver supports the coincell block found inside of
+	  Qualcomm PMICs.  The coincell charger provides a means to
+	  charge a coincell battery or backup capacitor which is used
+	  to maintain PMIC register and RTC state in the absence of
+	  external power.
+
 config SGI_GRU
 	tristate "SGI GRU driver"
 	depends on X86_UV && SMP
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d056fb7..537d7f3 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -18,6 +18,7 @@
 obj-$(CONFIG_TIFM_CORE)       	+= tifm_core.o
 obj-$(CONFIG_TIFM_7XX1)       	+= tifm_7xx1.o
 obj-$(CONFIG_PHANTOM)		+= phantom.o
+obj-$(CONFIG_QCOM_COINCELL)	+= qcom-coincell.o
 obj-$(CONFIG_SENSORS_BH1780)	+= bh1780gli.o
 obj-$(CONFIG_SENSORS_BH1770)	+= bh1770glc.o
 obj-$(CONFIG_SENSORS_APDS990X)	+= apds990x.o
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 705b881..d11187d 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -106,7 +106,6 @@
 static struct i2c_driver ad_dpot_i2c_driver = {
 	.driver = {
 		.name	= "ad_dpot",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= ad_dpot_i2c_probe,
 	.remove		= ad_dpot_i2c_remove,
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index 3739ffa..a3e789b 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -1275,7 +1275,6 @@
 static struct i2c_driver apds990x_driver = {
 	.driver	 = {
 		.name	= "apds990x",
-		.owner	= THIS_MODULE,
 		.pm	= &apds990x_pm_ops,
 	},
 	.probe	  = apds990x_probe,
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index b756381..753d7ec 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -1396,7 +1396,6 @@
 static struct i2c_driver bh1770_driver = {
 	.driver	 = {
 		.name	= "bh1770glc",
-		.owner	= THIS_MODULE,
 		.pm	= &bh1770_pm_ops,
 	},
 	.probe	  = bh1770_probe,
diff --git a/drivers/misc/bmp085-i2c.c b/drivers/misc/bmp085-i2c.c
index a7c1629..f35c218 100644
--- a/drivers/misc/bmp085-i2c.c
+++ b/drivers/misc/bmp085-i2c.c
@@ -66,7 +66,6 @@
 
 static struct i2c_driver bmp085_i2c_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= BMP085_NAME,
 	},
 	.id_table	= bmp085_id,
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index b6db9eb..8756d06 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -11,11 +11,16 @@
 	bool
 	default n
 
+config CXL_EEH
+	bool
+	default n
+
 config CXL
 	tristate "Support for IBM Coherent Accelerators (CXL)"
-	depends on PPC_POWERNV && PCI_MSI
+	depends on PPC_POWERNV && PCI_MSI && EEH
 	select CXL_BASE
 	select CXL_KERNEL_API
+	select CXL_EEH
 	default m
 	help
 	  Select this option to enable driver support for IBM Coherent
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 14e3f82..6f484df 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -1,3 +1,5 @@
+ccflags-y := -Werror
+
 cxl-y				+= main.o file.o irq.o fault.o native.o
 cxl-y				+= context.o sysfs.o debugfs.o pci.o trace.o
 cxl-y				+= vphb.o api.o
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 729e085..8af12c8 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -12,11 +12,13 @@
 #include <linux/anon_inodes.h>
 #include <linux/file.h>
 #include <misc/cxl.h>
+#include <linux/fs.h>
 
 #include "cxl.h"
 
 struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
 {
+	struct address_space *mapping;
 	struct cxl_afu *afu;
 	struct cxl_context  *ctx;
 	int rc;
@@ -25,19 +27,42 @@
 
 	get_device(&afu->dev);
 	ctx = cxl_context_alloc();
-	if (IS_ERR(ctx))
-		return ctx;
+	if (IS_ERR(ctx)) {
+		rc = PTR_ERR(ctx);
+		goto err_dev;
+	}
+
+	ctx->kernelapi = true;
+
+	/*
+	 * Make our own address space since we won't have one from the
+	 * filesystem like the user api has, and even if we do associate a file
+	 * with this context we don't want to use the global anonymous inode's
+	 * address space as that can invalidate unrelated users:
+	 */
+	mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL);
+	if (!mapping) {
+		rc = -ENOMEM;
+		goto err_ctx;
+	}
+	address_space_init_once(mapping);
 
 	/* Make it a slave context.  We can promote it later? */
-	rc = cxl_context_init(ctx, afu, false, NULL);
-	if (rc) {
-		kfree(ctx);
-		put_device(&afu->dev);
-		return ERR_PTR(-ENOMEM);
-	}
+	rc = cxl_context_init(ctx, afu, false, mapping);
+	if (rc)
+		goto err_mapping;
+
 	cxl_assign_psn_space(ctx);
 
 	return ctx;
+
+err_mapping:
+	kfree(mapping);
+err_ctx:
+	kfree(ctx);
+err_dev:
+	put_device(&afu->dev);
+	return ERR_PTR(rc);
 }
 EXPORT_SYMBOL_GPL(cxl_dev_context_init);
 
@@ -59,7 +84,7 @@
 
 int cxl_release_context(struct cxl_context *ctx)
 {
-	if (ctx->status != CLOSED)
+	if (ctx->status >= STARTED)
 		return -EBUSY;
 
 	put_device(&ctx->afu->dev);
@@ -255,9 +280,16 @@
 
 	file = anon_inode_getfile("cxl", fops, ctx, flags);
 	if (IS_ERR(file))
-		put_unused_fd(fdtmp);
+		goto err_fd;
+
+	file->f_mapping = ctx->mapping;
+
 	*fd = fdtmp;
 	return file;
+
+err_fd:
+	put_unused_fd(fdtmp);
+	return NULL;
 }
 EXPORT_SYMBOL_GPL(cxl_get_fd);
 
@@ -327,3 +359,10 @@
 	return cxl_afu_check_and_enable(afu);
 }
 EXPORT_SYMBOL_GPL(cxl_afu_reset);
+
+void cxl_perst_reloads_same_image(struct cxl_afu *afu,
+				  bool perst_reloads_same_image)
+{
+	afu->adapter->perst_same_image = perst_reloads_same_image;
+}
+EXPORT_SYMBOL_GPL(cxl_perst_reloads_same_image);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 1287148..e762f85 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -126,6 +126,18 @@
 	if (ctx->status != STARTED) {
 		mutex_unlock(&ctx->status_mutex);
 		pr_devel("%s: Context not started, failing problem state access\n", __func__);
+		if (ctx->mmio_err_ff) {
+			if (!ctx->ff_page) {
+				ctx->ff_page = alloc_page(GFP_USER);
+				if (!ctx->ff_page)
+					return VM_FAULT_OOM;
+				memset(page_address(ctx->ff_page), 0xff, PAGE_SIZE);
+			}
+			get_page(ctx->ff_page);
+			vmf->page = ctx->ff_page;
+			vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
+			return 0;
+		}
 		return VM_FAULT_SIGBUS;
 	}
 
@@ -193,7 +205,11 @@
 	if (status != STARTED)
 		return -EBUSY;
 
-	WARN_ON(cxl_detach_process(ctx));
+	/* Only warn if we detached while the link was OK.
+	 * If detach fails when hw is down, we don't care.
+	 */
+	WARN_ON(cxl_detach_process(ctx) &&
+		cxl_adapter_link_ok(ctx->afu->adapter));
 	flush_work(&ctx->fault_work); /* Only needed for dedicated process */
 	put_pid(ctx->pid);
 	cxl_ctx_put();
@@ -253,7 +269,11 @@
 	struct cxl_context *ctx = container_of(rcu, struct cxl_context, rcu);
 
 	free_page((u64)ctx->sstp);
+	if (ctx->ff_page)
+		__free_page(ctx->ff_page);
 	ctx->sstp = NULL;
+	if (ctx->kernelapi)
+		kfree(ctx->mapping);
 
 	kfree(ctx);
 }
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 4fd66ca..1c30ef7 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -34,7 +34,7 @@
  * Bump version each time a user API change is made, whether it is
  * backwards compatible ot not.
  */
-#define CXL_API_VERSION 1
+#define CXL_API_VERSION 2
 #define CXL_API_VERSION_COMPATIBLE 1
 
 /*
@@ -83,8 +83,10 @@
 /* 0x00C0:7EFF Implementation dependent area */
 static const cxl_p1_reg_t CXL_PSL_FIR1      = {0x0100};
 static const cxl_p1_reg_t CXL_PSL_FIR2      = {0x0108};
+static const cxl_p1_reg_t CXL_PSL_Timebase  = {0x0110};
 static const cxl_p1_reg_t CXL_PSL_VERSION   = {0x0118};
 static const cxl_p1_reg_t CXL_PSL_RESLCKTO  = {0x0128};
+static const cxl_p1_reg_t CXL_PSL_TB_CTLSTAT = {0x0140};
 static const cxl_p1_reg_t CXL_PSL_FIR_CNTL  = {0x0148};
 static const cxl_p1_reg_t CXL_PSL_DSNDCTL   = {0x0150};
 static const cxl_p1_reg_t CXL_PSL_SNWRALLOC = {0x0158};
@@ -152,6 +154,9 @@
 #define CXL_PSL_SPAP_Size_Shift 4
 #define CXL_PSL_SPAP_V    0x0000000000000001ULL
 
+/****** CXL_PSL_Control ****************************************************/
+#define CXL_PSL_Control_tb 0x0000000000000001ULL
+
 /****** CXL_PSL_DLCNTL *****************************************************/
 #define CXL_PSL_DLCNTL_D (0x1ull << (63-28))
 #define CXL_PSL_DLCNTL_C (0x1ull << (63-29))
@@ -418,6 +423,9 @@
 	/* Used to unmap any mmaps when force detaching */
 	struct address_space *mapping;
 	struct mutex mapping_lock;
+	struct page *ff_page;
+	bool mmio_err_ff;
+	bool kernelapi;
 
 	spinlock_t sste_lock; /* Protects segment table entries */
 	struct cxl_sste *sstp;
@@ -493,6 +501,7 @@
 	bool user_image_loaded;
 	bool perst_loads_image;
 	bool perst_select_user;
+	bool perst_same_image;
 };
 
 int cxl_alloc_one_irq(struct cxl *adapter);
@@ -531,16 +540,33 @@
 	__be32 software_state;
 } __packed;
 
+static inline bool cxl_adapter_link_ok(struct cxl *cxl)
+{
+	struct pci_dev *pdev;
+
+	pdev = to_pci_dev(cxl->dev.parent);
+	return !pci_channel_offline(pdev);
+}
+
 static inline void __iomem *_cxl_p1_addr(struct cxl *cxl, cxl_p1_reg_t reg)
 {
 	WARN_ON(!cpu_has_feature(CPU_FTR_HVMODE));
 	return cxl->p1_mmio + cxl_reg_off(reg);
 }
 
-#define cxl_p1_write(cxl, reg, val) \
-	out_be64(_cxl_p1_addr(cxl, reg), val)
-#define cxl_p1_read(cxl, reg) \
-	in_be64(_cxl_p1_addr(cxl, reg))
+static inline void cxl_p1_write(struct cxl *cxl, cxl_p1_reg_t reg, u64 val)
+{
+	if (likely(cxl_adapter_link_ok(cxl)))
+		out_be64(_cxl_p1_addr(cxl, reg), val);
+}
+
+static inline u64 cxl_p1_read(struct cxl *cxl, cxl_p1_reg_t reg)
+{
+	if (likely(cxl_adapter_link_ok(cxl)))
+		return in_be64(_cxl_p1_addr(cxl, reg));
+	else
+		return ~0ULL;
+}
 
 static inline void __iomem *_cxl_p1n_addr(struct cxl_afu *afu, cxl_p1n_reg_t reg)
 {
@@ -548,26 +574,56 @@
 	return afu->p1n_mmio + cxl_reg_off(reg);
 }
 
-#define cxl_p1n_write(afu, reg, val) \
-	out_be64(_cxl_p1n_addr(afu, reg), val)
-#define cxl_p1n_read(afu, reg) \
-	in_be64(_cxl_p1n_addr(afu, reg))
+static inline void cxl_p1n_write(struct cxl_afu *afu, cxl_p1n_reg_t reg, u64 val)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		out_be64(_cxl_p1n_addr(afu, reg), val);
+}
+
+static inline u64 cxl_p1n_read(struct cxl_afu *afu, cxl_p1n_reg_t reg)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		return in_be64(_cxl_p1n_addr(afu, reg));
+	else
+		return ~0ULL;
+}
 
 static inline void __iomem *_cxl_p2n_addr(struct cxl_afu *afu, cxl_p2n_reg_t reg)
 {
 	return afu->p2n_mmio + cxl_reg_off(reg);
 }
 
-#define cxl_p2n_write(afu, reg, val) \
-	out_be64(_cxl_p2n_addr(afu, reg), val)
-#define cxl_p2n_read(afu, reg) \
-	in_be64(_cxl_p2n_addr(afu, reg))
+static inline void cxl_p2n_write(struct cxl_afu *afu, cxl_p2n_reg_t reg, u64 val)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		out_be64(_cxl_p2n_addr(afu, reg), val);
+}
 
+static inline u64 cxl_p2n_read(struct cxl_afu *afu, cxl_p2n_reg_t reg)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		return in_be64(_cxl_p2n_addr(afu, reg));
+	else
+		return ~0ULL;
+}
 
-#define cxl_afu_cr_read64(afu, cr, off) \
-	in_le64((afu)->afu_desc_mmio + (afu)->crs_offset + ((cr) * (afu)->crs_len) + (off))
-#define cxl_afu_cr_read32(afu, cr, off) \
-	in_le32((afu)->afu_desc_mmio + (afu)->crs_offset + ((cr) * (afu)->crs_len) + (off))
+static inline u64 cxl_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		return in_le64((afu)->afu_desc_mmio + (afu)->crs_offset +
+			       ((cr) * (afu)->crs_len) + (off));
+	else
+		return ~0ULL;
+}
+
+static inline u32 cxl_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off)
+{
+	if (likely(cxl_adapter_link_ok(afu->adapter)))
+		return in_le32((afu)->afu_desc_mmio + (afu)->crs_offset +
+			       ((cr) * (afu)->crs_len) + (off));
+	else
+		return 0xffffffff;
+}
 u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off);
 u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off);
 
@@ -585,6 +641,9 @@
 int cxl_alloc_adapter_nr(struct cxl *adapter);
 void cxl_remove_adapter_nr(struct cxl *adapter);
 
+int cxl_alloc_spa(struct cxl_afu *afu);
+void cxl_release_spa(struct cxl_afu *afu);
+
 int cxl_file_init(void);
 void cxl_file_exit(void);
 int cxl_register_adapter(struct cxl *adapter);
@@ -675,6 +734,7 @@
 
 void cxl_stop_trace(struct cxl *cxl);
 int cxl_pci_vphb_add(struct cxl_afu *afu);
+void cxl_pci_vphb_reconfigure(struct cxl_afu *afu);
 void cxl_pci_vphb_remove(struct cxl_afu *afu);
 
 extern struct pci_driver cxl_pci_driver;
diff --git a/drivers/misc/cxl/debugfs.c b/drivers/misc/cxl/debugfs.c
index 825c412..18df6f4 100644
--- a/drivers/misc/cxl/debugfs.c
+++ b/drivers/misc/cxl/debugfs.c
@@ -48,7 +48,7 @@
 static struct dentry *debugfs_create_io_x64(const char *name, umode_t mode,
 					    struct dentry *parent, u64 __iomem *value)
 {
-	return debugfs_create_file(name, mode, parent, (void *)value, &fops_io_x64);
+	return debugfs_create_file(name, mode, parent, (void __force *)value, &fops_io_x64);
 }
 
 int cxl_debugfs_adapter_add(struct cxl *adapter)
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index e3f4b69..a30bf28 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -73,6 +73,11 @@
 	if (!afu->current_mode)
 		goto err_put_afu;
 
+	if (!cxl_adapter_link_ok(adapter)) {
+		rc = -EIO;
+		goto err_put_afu;
+	}
+
 	if (!(ctx = cxl_context_alloc())) {
 		rc = -ENOMEM;
 		goto err_put_afu;
@@ -179,6 +184,8 @@
 	if (work.flags & CXL_START_WORK_AMR)
 		amr = work.amr & mfspr(SPRN_UAMOR);
 
+	ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
+
 	/*
 	 * We grab the PID here and not in the file open to allow for the case
 	 * where a process (master, some daemon, etc) has opened the chardev on
@@ -238,6 +245,9 @@
 	if (ctx->status == CLOSED)
 		return -EIO;
 
+	if (!cxl_adapter_link_ok(ctx->afu->adapter))
+		return -EIO;
+
 	pr_devel("afu_ioctl\n");
 	switch (cmd) {
 	case CXL_IOCTL_START_WORK:
@@ -251,7 +261,7 @@
 	return -EINVAL;
 }
 
-long afu_compat_ioctl(struct file *file, unsigned int cmd,
+static long afu_compat_ioctl(struct file *file, unsigned int cmd,
 			     unsigned long arg)
 {
 	return afu_ioctl(file, cmd, arg);
@@ -265,6 +275,9 @@
 	if (ctx->status != STARTED)
 		return -EIO;
 
+	if (!cxl_adapter_link_ok(ctx->afu->adapter))
+		return -EIO;
+
 	return cxl_context_iomap(ctx, vm);
 }
 
@@ -309,6 +322,9 @@
 	int rc;
 	DEFINE_WAIT(wait);
 
+	if (!cxl_adapter_link_ok(ctx->afu->adapter))
+		return -EIO;
+
 	if (count < CXL_READ_MIN_SIZE)
 		return -EINVAL;
 
@@ -319,6 +335,11 @@
 		if (ctx_event_pending(ctx))
 			break;
 
+		if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+			rc = -EIO;
+			goto out;
+		}
+
 		if (file->f_flags & O_NONBLOCK) {
 			rc = -EAGAIN;
 			goto out;
@@ -396,7 +417,7 @@
 	.mmap           = afu_mmap,
 };
 
-const struct file_operations afu_master_fops = {
+static const struct file_operations afu_master_fops = {
 	.owner		= THIS_MODULE,
 	.open           = afu_master_open,
 	.poll		= afu_poll,
@@ -519,7 +540,7 @@
 	 * If these change we really need to update API.  Either change some
 	 * flags or update API version number CXL_API_VERSION.
 	 */
-	BUILD_BUG_ON(CXL_API_VERSION != 1);
+	BUILD_BUG_ON(CXL_API_VERSION != 2);
 	BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
 	BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
 	BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 680cd26..583b42a 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -30,12 +30,12 @@
 	serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
 	afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
 
-	dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
-	dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%.16llx\n", fir1);
-	dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%.16llx\n", fir2);
-	dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
-	dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
-	dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
+	dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
+	dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
+	dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
+	dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+	dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+	dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
 
 	dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
 	cxl_stop_trace(ctx->afu->adapter);
@@ -54,10 +54,10 @@
 	fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
 	errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
 	afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
-	dev_crit(&afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr);
-	dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice);
-	dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%.16llx\n", errstat);
-	dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug);
+	dev_crit(&afu->dev, "PSL_SERR_An: 0x%016llx\n", serr);
+	dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
+	dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
+	dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
 
 	cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
 
@@ -72,7 +72,7 @@
 	WARN(1, "CXL ERROR interrupt %i\n", irq);
 
 	err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
-	dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%.16llx\n", err_ivte);
+	dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
 
 	dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
 	cxl_stop_trace(adapter);
@@ -80,7 +80,7 @@
 	fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
 	fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
 
-	dev_crit(&adapter->dev, "PSL_FIR1: 0x%.16llx\nPSL_FIR2: 0x%.16llx\n", fir1, fir2);
+	dev_crit(&adapter->dev, "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n", fir1, fir2);
 
 	return IRQ_HANDLED;
 }
@@ -147,7 +147,7 @@
 	if (dsisr & CXL_PSL_DSISR_An_PE)
 		return handle_psl_slice_error(ctx, dsisr, irq_info->errstat);
 	if (dsisr & CXL_PSL_DSISR_An_AE) {
-		pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info->afu_err);
+		pr_devel("CXL interrupt: AFU Error 0x%016llx\n", irq_info->afu_err);
 
 		if (ctx->pending_afu_err) {
 			/*
@@ -158,7 +158,7 @@
 			 * probably best that we log them somewhere:
 			 */
 			dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
-					    "undelivered to pe %i: %.llx\n",
+					    "undelivered to pe %i: 0x%016llx\n",
 					    ctx->pe, irq_info->afu_err);
 		} else {
 			spin_lock(&ctx->lock);
@@ -211,8 +211,8 @@
 	}
 	rcu_read_unlock();
 
-	WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %.16llx DAR"
-		" %.16llx\n(Possible AFU HW issue - was a term/remove acked"
+	WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
+		" %016llx\n(Possible AFU HW issue - was a term/remove acked"
 		" with outstanding transactions?)\n", ph, irq_info.dsisr,
 		irq_info.dar);
 	return fail_psl_irq(afu, &irq_info);
@@ -341,6 +341,9 @@
 
 void cxl_release_psl_err_irq(struct cxl *adapter)
 {
+	if (adapter->err_virq != irq_find_mapping(NULL, adapter->err_hwirq))
+		return;
+
 	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
 	cxl_unmap_irq(adapter->err_virq, adapter);
 	cxl_release_one_irq(adapter, adapter->err_hwirq);
@@ -374,6 +377,9 @@
 
 void cxl_release_serr_irq(struct cxl_afu *afu)
 {
+	if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+		return;
+
 	cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
 	cxl_unmap_irq(afu->serr_virq, afu);
 	cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
@@ -400,12 +406,15 @@
 
 void cxl_release_psl_irq(struct cxl_afu *afu)
 {
+	if (afu->psl_virq != irq_find_mapping(NULL, afu->psl_hwirq))
+		return;
+
 	cxl_unmap_irq(afu->psl_virq, afu);
 	cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
 	kfree(afu->psl_irq_name);
 }
 
-void afu_irq_name_free(struct cxl_context *ctx)
+static void afu_irq_name_free(struct cxl_context *ctx)
 {
 	struct cxl_irq_name *irq_name, *tmp;
 
@@ -421,6 +430,9 @@
 	int rc, r, i, j = 1;
 	struct cxl_irq_name *irq_name;
 
+	/* Initialize the list head to hold irq names */
+	INIT_LIST_HEAD(&ctx->irq_names);
+
 	if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
 		return rc;
 
@@ -432,13 +444,12 @@
 	ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count),
 				  sizeof(*ctx->irq_bitmap), GFP_KERNEL);
 	if (!ctx->irq_bitmap)
-		return -ENOMEM;
+		goto out;
 
 	/*
 	 * Allocate names first.  If any fail, bail out before allocating
 	 * actual hardware IRQs.
 	 */
-	INIT_LIST_HEAD(&ctx->irq_names);
 	for (r = 1; r < CXL_IRQ_RANGES; r++) {
 		for (i = 0; i < ctx->irqs.range[r]; i++) {
 			irq_name = kmalloc(sizeof(struct cxl_irq_name),
@@ -460,11 +471,12 @@
 	return 0;
 
 out:
+	cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
 	afu_irq_name_free(ctx);
 	return -ENOMEM;
 }
 
-void afu_register_hwirqs(struct cxl_context *ctx)
+static void afu_register_hwirqs(struct cxl_context *ctx)
 {
 	irq_hw_number_t hwirq;
 	struct cxl_irq_name *irq_name;
@@ -511,4 +523,8 @@
 
 	afu_irq_name_free(ctx);
 	cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
+
+	kfree(ctx->irq_bitmap);
+	ctx->irq_bitmap = NULL;
+	ctx->irq_count = 0;
 }
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index 4a164ab..9fde75e 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -222,6 +222,7 @@
 	cxl_debugfs_exit();
 	cxl_file_exit();
 	unregister_cxl_calls(&cxl_calls);
+	idr_destroy(&cxl_adapter_idr);
 }
 
 module_init(init_cxl);
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 10567f2..b37f2e8 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -41,7 +41,14 @@
 			rc = -EBUSY;
 			goto out;
 		}
-		pr_devel_ratelimited("AFU control... (0x%.16llx)\n",
+
+		if (!cxl_adapter_link_ok(afu->adapter)) {
+			afu->enabled = enabled;
+			rc = -EIO;
+			goto out;
+		}
+
+		pr_devel_ratelimited("AFU control... (0x%016llx)\n",
 				     AFU_Cntl | command);
 		cpu_relax();
 		AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
@@ -85,6 +92,10 @@
 
 int cxl_afu_check_and_enable(struct cxl_afu *afu)
 {
+	if (!cxl_adapter_link_ok(afu->adapter)) {
+		WARN(1, "Refusing to enable afu while link down!\n");
+		return -EIO;
+	}
 	if (afu->enabled)
 		return 0;
 	return afu_enable(afu);
@@ -103,6 +114,12 @@
 
 	pr_devel("PSL purge request\n");
 
+	if (!cxl_adapter_link_ok(afu->adapter)) {
+		dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
+		rc = -EIO;
+		goto out;
+	}
+
 	if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
 		WARN(1, "psl_purge request while AFU not disabled!\n");
 		cxl_afu_disable(afu);
@@ -119,14 +136,19 @@
 			rc = -EBUSY;
 			goto out;
 		}
+		if (!cxl_adapter_link_ok(afu->adapter)) {
+			rc = -EIO;
+			goto out;
+		}
+
 		dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
-		pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%.16llx  PSL_DSISR: 0x%.16llx\n", PSL_CNTL, dsisr);
+		pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx  PSL_DSISR: 0x%016llx\n", PSL_CNTL, dsisr);
 		if (dsisr & CXL_PSL_DSISR_TRANS) {
 			dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
-			dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%.16llx, DAR: 0x%.16llx\n", dsisr, dar);
+			dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n", dsisr, dar);
 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
 		} else if (dsisr) {
-			dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%.16llx\n", dsisr);
+			dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n", dsisr);
 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
 		} else {
 			cpu_relax();
@@ -161,10 +183,8 @@
 	return ((spa_size / 8) - 96) / 17;
 }
 
-static int alloc_spa(struct cxl_afu *afu)
+int cxl_alloc_spa(struct cxl_afu *afu)
 {
-	u64 spap;
-
 	/* Work out how many pages to allocate */
 	afu->spa_order = 0;
 	do {
@@ -183,6 +203,13 @@
 	pr_devel("spa pages: %i afu->spa_max_procs: %i   afu->num_procs: %i\n",
 		 1<<afu->spa_order, afu->spa_max_procs, afu->num_procs);
 
+	return 0;
+}
+
+static void attach_spa(struct cxl_afu *afu)
+{
+	u64 spap;
+
 	afu->sw_command_status = (__be64 *)((char *)afu->spa +
 					    ((afu->spa_max_procs + 3) * 128));
 
@@ -191,14 +218,19 @@
 	spap |= CXL_PSL_SPAP_V;
 	pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n", afu->spa, afu->spa_max_procs, afu->sw_command_status, spap);
 	cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
-
-	return 0;
 }
 
-static void release_spa(struct cxl_afu *afu)
+static inline void detach_spa(struct cxl_afu *afu)
 {
 	cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
-	free_pages((unsigned long) afu->spa, afu->spa_order);
+}
+
+void cxl_release_spa(struct cxl_afu *afu)
+{
+	if (afu->spa) {
+		free_pages((unsigned long) afu->spa, afu->spa_order);
+		afu->spa = NULL;
+	}
 }
 
 int cxl_tlb_slb_invalidate(struct cxl *adapter)
@@ -215,6 +247,8 @@
 			dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
 			return -EBUSY;
 		}
+		if (!cxl_adapter_link_ok(adapter))
+			return -EIO;
 		cpu_relax();
 	}
 
@@ -224,6 +258,8 @@
 			dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
 			return -EBUSY;
 		}
+		if (!cxl_adapter_link_ok(adapter))
+			return -EIO;
 		cpu_relax();
 	}
 	return 0;
@@ -240,6 +276,11 @@
 			dev_warn(&afu->dev, "WARNING: CXL AFU SLBIA timed out!\n");
 			return -EBUSY;
 		}
+		/* If the adapter has gone down, we can assume that we
+		 * will PERST it and that will invalidate everything.
+		 */
+		if (!cxl_adapter_link_ok(afu->adapter))
+			return -EIO;
 		cpu_relax();
 	}
 	return 0;
@@ -279,6 +320,8 @@
 	cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
 
 	while (1) {
+		if (!cxl_adapter_link_ok(adapter))
+			break;
 		slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
 		if (!(slbia & CXL_TLB_SLB_P))
 			break;
@@ -308,6 +351,11 @@
 			rc = -EBUSY;
 			goto out;
 		}
+		if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+			dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
+			rc = -EIO;
+			goto out;
+		}
 		state = be64_to_cpup(ctx->afu->sw_command_status);
 		if (state == ~0ULL) {
 			pr_err("cxl: Error adding process element to AFU\n");
@@ -355,8 +403,13 @@
 
 	mutex_lock(&ctx->afu->spa_mutex);
 	pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
-	rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
-				    CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
+	/* We could be asked to terminate when the hw is down. That
+	 * should always succeed: it's not running if the hw has gone
+	 * away and is being reset.
+	 */
+	if (cxl_adapter_link_ok(ctx->afu->adapter))
+		rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
+					    CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
 	ctx->elem->software_state = 0;	/* Remove Valid bit */
 	pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
 	mutex_unlock(&ctx->afu->spa_mutex);
@@ -369,7 +422,14 @@
 
 	mutex_lock(&ctx->afu->spa_mutex);
 	pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
-	if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0)))
+
+	/* We could be asked to remove when the hw is down. Again, if
+	 * the hw is down, the PE is gone, so we succeed.
+	 */
+	if (cxl_adapter_link_ok(ctx->afu->adapter))
+		rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
+
+	if (!rc)
 		ctx->pe_inserted = false;
 	slb_invalid(ctx);
 	pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
@@ -397,8 +457,11 @@
 
 	dev_info(&afu->dev, "Activating AFU directed mode\n");
 
-	if (alloc_spa(afu))
-		return -ENOMEM;
+	if (afu->spa == NULL) {
+		if (cxl_alloc_spa(afu))
+			return -ENOMEM;
+	}
+	attach_spa(afu);
 
 	cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
 	cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
@@ -492,9 +555,7 @@
 	if ((result = cxl_afu_check_and_enable(ctx->afu)))
 		return result;
 
-	add_process_element(ctx);
-
-	return 0;
+	return add_process_element(ctx);
 }
 
 static int deactivate_afu_directed(struct cxl_afu *afu)
@@ -511,8 +572,6 @@
 	cxl_afu_disable(afu);
 	cxl_psl_purge(afu);
 
-	release_spa(afu);
-
 	return 0;
 }
 
@@ -614,6 +673,11 @@
 	if (!(mode & afu->modes_supported))
 		return -EINVAL;
 
+	if (!cxl_adapter_link_ok(afu->adapter)) {
+		WARN(1, "Device link is down, refusing to activate!\n");
+		return -EIO;
+	}
+
 	if (mode == CXL_MODE_DIRECTED)
 		return activate_afu_directed(afu);
 	if (mode == CXL_MODE_DEDICATED)
@@ -624,6 +688,11 @@
 
 int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
 {
+	if (!cxl_adapter_link_ok(ctx->afu->adapter)) {
+		WARN(1, "Device link is down, refusing to attach process!\n");
+		return -EIO;
+	}
+
 	ctx->kernel = kernel;
 	if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
 		return attach_afu_directed(ctx, wed, amr);
@@ -668,6 +737,12 @@
 {
 	u64 pidtid;
 
+	/* If the adapter has gone away, we can't get any meaningful
+	 * information.
+	 */
+	if (!cxl_adapter_link_ok(afu->adapter))
+		return -EIO;
+
 	info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
 	info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
 	info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
@@ -684,7 +759,7 @@
 {
 	u64 dsisr;
 
-	pr_devel("RECOVERING FROM PSL ERROR... (0x%.16llx)\n", errstat);
+	pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
 
 	/* Clear PSL_DSISR[PE] */
 	dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 32ad097..02c8516 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -24,6 +24,7 @@
 #include <asm/io.h>
 
 #include "cxl.h"
+#include <misc/cxl.h>
 
 
 #define CXL_PCI_VSEC_ID	0x1280
@@ -133,7 +134,7 @@
 	return (val >> ((off & 0x3) * 8)) & 0xff;
 }
 
-static DEFINE_PCI_DEVICE_TABLE(cxl_pci_tbl) = {
+static const struct pci_device_id cxl_pci_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
@@ -369,6 +370,55 @@
 	return 0;
 }
 
+#define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
+#define _2048_250MHZ_CYCLES 1
+
+static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
+{
+	u64 psl_tb;
+	int delta;
+	unsigned int retry = 0;
+	struct device_node *np;
+
+	if (!(np = pnv_pci_get_phb_node(dev)))
+		return -ENODEV;
+
+	/* Do not fail when CAPP timebase sync is not supported by OPAL */
+	of_node_get(np);
+	if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
+		of_node_put(np);
+		pr_err("PSL: Timebase sync: OPAL support missing\n");
+		return 0;
+	}
+	of_node_put(np);
+
+	/*
+	 * Setup PSL Timebase Control and Status register
+	 * with the recommended Timebase Sync Count value
+	 */
+	cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
+		     TBSYNC_CNT(2 * _2048_250MHZ_CYCLES));
+
+	/* Enable PSL Timebase */
+	cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
+	cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
+
+	/* Wait until CORE TB and PSL TB difference <= 16usecs */
+	do {
+		msleep(1);
+		if (retry++ > 5) {
+			pr_err("PSL: Timebase sync: giving up!\n");
+			return -EIO;
+		}
+		psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase);
+		delta = mftb() - psl_tb;
+		if (delta < 0)
+			delta = -delta;
+	} while (cputime_to_usecs(delta) > 16);
+
+	return 0;
+}
+
 static int init_implementation_afu_regs(struct cxl_afu *afu)
 {
 	/* read/write masks for this slice */
@@ -539,10 +589,18 @@
 
 static void cxl_unmap_slice_regs(struct cxl_afu *afu)
 {
-	if (afu->p2n_mmio)
+	if (afu->p2n_mmio) {
 		iounmap(afu->p2n_mmio);
-	if (afu->p1n_mmio)
+		afu->p2n_mmio = NULL;
+	}
+	if (afu->p1n_mmio) {
 		iounmap(afu->p1n_mmio);
+		afu->p1n_mmio = NULL;
+	}
+	if (afu->afu_desc_mmio) {
+		iounmap(afu->afu_desc_mmio);
+		afu->afu_desc_mmio = NULL;
+	}
 }
 
 static void cxl_release_afu(struct device *dev)
@@ -551,6 +609,9 @@
 
 	pr_devel("cxl_release_afu\n");
 
+	idr_destroy(&afu->contexts_idr);
+	cxl_release_spa(afu);
+
 	kfree(afu);
 }
 
@@ -656,7 +717,7 @@
 	 */
 	reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
 	if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
-		dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#.16llx\n", reg);
+		dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
 		if (__cxl_afu_reset(afu))
 			return -EIO;
 		if (cxl_afu_disable(afu))
@@ -677,7 +738,7 @@
 	cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
 	reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
 	if (reg) {
-		dev_warn(&afu->dev, "AFU had pending DSISR: %#.16llx\n", reg);
+		dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
 		if (reg & CXL_PSL_DSISR_TRANS)
 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
 		else
@@ -686,12 +747,12 @@
 	reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
 	if (reg) {
 		if (reg & ~0xffff)
-			dev_warn(&afu->dev, "AFU had pending SERR: %#.16llx\n", reg);
+			dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
 		cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
 	}
 	reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
 	if (reg) {
-		dev_warn(&afu->dev, "AFU had pending error status: %#.16llx\n", reg);
+		dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
 		cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
 	}
 
@@ -742,45 +803,70 @@
 	return count;
 }
 
-static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
 {
-	struct cxl_afu *afu;
-	bool free = true;
 	int rc;
 
-	if (!(afu = cxl_alloc_afu(adapter, slice)))
-		return -ENOMEM;
-
-	if ((rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice)))
-		goto err1;
-
 	if ((rc = cxl_map_slice_regs(afu, adapter, dev)))
-		goto err1;
+		return rc;
 
 	if ((rc = sanitise_afu_regs(afu)))
-		goto err2;
+		goto err1;
 
 	/* We need to reset the AFU before we can read the AFU descriptor */
 	if ((rc = __cxl_afu_reset(afu)))
-		goto err2;
+		goto err1;
 
 	if (cxl_verbose)
 		dump_afu_descriptor(afu);
 
 	if ((rc = cxl_read_afu_descriptor(afu)))
-		goto err2;
+		goto err1;
 
 	if ((rc = cxl_afu_descriptor_looks_ok(afu)))
-		goto err2;
+		goto err1;
 
 	if ((rc = init_implementation_afu_regs(afu)))
-		goto err2;
+		goto err1;
 
 	if ((rc = cxl_register_serr_irq(afu)))
-		goto err2;
+		goto err1;
 
 	if ((rc = cxl_register_psl_irq(afu)))
-		goto err3;
+		goto err2;
+
+	return 0;
+
+err2:
+	cxl_release_serr_irq(afu);
+err1:
+	cxl_unmap_slice_regs(afu);
+	return rc;
+}
+
+static void cxl_deconfigure_afu(struct cxl_afu *afu)
+{
+	cxl_release_psl_irq(afu);
+	cxl_release_serr_irq(afu);
+	cxl_unmap_slice_regs(afu);
+}
+
+static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
+{
+	struct cxl_afu *afu;
+	int rc;
+
+	afu = cxl_alloc_afu(adapter, slice);
+	if (!afu)
+		return -ENOMEM;
+
+	rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
+	if (rc)
+		goto err_free;
+
+	rc = cxl_configure_afu(afu, adapter, dev);
+	if (rc)
+		goto err_free;
 
 	/* Don't care if this fails */
 	cxl_debugfs_afu_add(afu);
@@ -795,10 +881,6 @@
 	if ((rc = cxl_sysfs_afu_add(afu)))
 		goto err_put1;
 
-
-	if ((rc = cxl_afu_select_best_mode(afu)))
-		goto err_put2;
-
 	adapter->afu[afu->slice] = afu;
 
 	if ((rc = cxl_pci_vphb_add(afu)))
@@ -806,21 +888,16 @@
 
 	return 0;
 
-err_put2:
-	cxl_sysfs_afu_remove(afu);
 err_put1:
-	device_unregister(&afu->dev);
-	free = false;
+	cxl_deconfigure_afu(afu);
 	cxl_debugfs_afu_remove(afu);
-	cxl_release_psl_irq(afu);
-err3:
-	cxl_release_serr_irq(afu);
-err2:
-	cxl_unmap_slice_regs(afu);
-err1:
-	if (free)
-		kfree(afu);
+	device_unregister(&afu->dev);
 	return rc;
+
+err_free:
+	kfree(afu);
+	return rc;
+
 }
 
 static void cxl_remove_afu(struct cxl_afu *afu)
@@ -840,10 +917,7 @@
 	cxl_context_detach_all(afu);
 	cxl_afu_deactivate_mode(afu);
 
-	cxl_release_psl_irq(afu);
-	cxl_release_serr_irq(afu);
-	cxl_unmap_slice_regs(afu);
-
+	cxl_deconfigure_afu(afu);
 	device_unregister(&afu->dev);
 }
 
@@ -851,16 +925,15 @@
 {
 	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
 	int rc;
-	int i;
-	u32 val;
+
+	if (adapter->perst_same_image) {
+		dev_warn(&dev->dev,
+			 "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
+		return -EINVAL;
+	}
 
 	dev_info(&dev->dev, "CXL reset\n");
 
-	for (i = 0; i < adapter->slices; i++) {
-		cxl_pci_vphb_remove(adapter->afu[i]);
-		cxl_remove_afu(adapter->afu[i]);
-	}
-
 	/* pcie_warm_reset requests a fundamental pci reset which includes a
 	 * PERST assert/deassert.  PERST triggers a loading of the image
 	 * if "user" or "factory" is selected in sysfs */
@@ -869,20 +942,6 @@
 		return rc;
 	}
 
-	/* the PERST done above fences the PHB.  So, reset depends on EEH
-	 * to unbind the driver, tell Sapphire to reinit the PHB, and rebind
-	 * the driver.  Do an mmio read explictly to ensure EEH notices the
-	 * fenced PHB.  Retry for a few seconds before giving up. */
-	i = 0;
-	while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) &&
-		(i < 5)) {
-		msleep(500);
-		i++;
-	}
-
-	if (val != 0xffffffff)
-		dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n");
-
 	return rc;
 }
 
@@ -893,7 +952,7 @@
 	if (pci_request_region(dev, 0, "priv 1 regs"))
 		goto err2;
 
-	pr_devel("cxl_map_adapter_regs: p1: %#.16llx %#llx, p2: %#.16llx %#llx",
+	pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
 			p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
 
 	if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
@@ -917,10 +976,16 @@
 
 static void cxl_unmap_adapter_regs(struct cxl *adapter)
 {
-	if (adapter->p1_mmio)
+	if (adapter->p1_mmio) {
 		iounmap(adapter->p1_mmio);
-	if (adapter->p2_mmio)
+		adapter->p1_mmio = NULL;
+		pci_release_region(to_pci_dev(adapter->dev.parent), 2);
+	}
+	if (adapter->p2_mmio) {
 		iounmap(adapter->p2_mmio);
+		adapter->p2_mmio = NULL;
+		pci_release_region(to_pci_dev(adapter->dev.parent), 0);
+	}
 }
 
 static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
@@ -949,7 +1014,6 @@
 	CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
 	CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
 	adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
-	adapter->perst_loads_image = true;
 	adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
 
 	CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
@@ -1009,82 +1073,139 @@
 
 	pr_devel("cxl_release_adapter\n");
 
+	cxl_remove_adapter_nr(adapter);
+
 	kfree(adapter);
 }
 
-static struct cxl *cxl_alloc_adapter(struct pci_dev *dev)
+static struct cxl *cxl_alloc_adapter(void)
 {
 	struct cxl *adapter;
 
 	if (!(adapter = kzalloc(sizeof(struct cxl), GFP_KERNEL)))
 		return NULL;
 
-	adapter->dev.parent = &dev->dev;
-	adapter->dev.release = cxl_release_adapter;
-	pci_set_drvdata(dev, adapter);
 	spin_lock_init(&adapter->afu_list_lock);
 
+	if (cxl_alloc_adapter_nr(adapter))
+		goto err1;
+
+	if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
+		goto err2;
+
 	return adapter;
+
+err2:
+	cxl_remove_adapter_nr(adapter);
+err1:
+	kfree(adapter);
+	return NULL;
 }
 
+#define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
+
 static int sanitise_adapter_regs(struct cxl *adapter)
 {
-	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
+	/* Clear PSL tberror bit by writing 1 to it */
+	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
 	return cxl_tlb_slb_invalidate(adapter);
 }
 
+/* This should contain *only* operations that can safely be done in
+ * both creation and recovery.
+ */
+static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
+{
+	int rc;
+
+	adapter->dev.parent = &dev->dev;
+	adapter->dev.release = cxl_release_adapter;
+	pci_set_drvdata(dev, adapter);
+
+	rc = pci_enable_device(dev);
+	if (rc) {
+		dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
+		return rc;
+	}
+
+	if ((rc = cxl_read_vsec(adapter, dev)))
+		return rc;
+
+	if ((rc = cxl_vsec_looks_ok(adapter, dev)))
+	        return rc;
+
+	if ((rc = setup_cxl_bars(dev)))
+		return rc;
+
+	if ((rc = switch_card_to_cxl(dev)))
+		return rc;
+
+	if ((rc = cxl_update_image_control(adapter)))
+		return rc;
+
+	if ((rc = cxl_map_adapter_regs(adapter, dev)))
+		return rc;
+
+	if ((rc = sanitise_adapter_regs(adapter)))
+		goto err;
+
+	if ((rc = init_implementation_adapter_regs(adapter, dev)))
+		goto err;
+
+	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
+		goto err;
+
+	/* If recovery happened, the last step is to turn on snooping.
+	 * In the non-recovery case this has no effect */
+	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
+		goto err;
+
+	if ((rc = cxl_setup_psl_timebase(adapter, dev)))
+		goto err;
+
+	if ((rc = cxl_register_psl_err_irq(adapter)))
+		goto err;
+
+	return 0;
+
+err:
+	cxl_unmap_adapter_regs(adapter);
+	return rc;
+
+}
+
+static void cxl_deconfigure_adapter(struct cxl *adapter)
+{
+	struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
+
+	cxl_release_psl_err_irq(adapter);
+	cxl_unmap_adapter_regs(adapter);
+
+	pci_disable_device(pdev);
+}
+
 static struct cxl *cxl_init_adapter(struct pci_dev *dev)
 {
 	struct cxl *adapter;
-	bool free = true;
 	int rc;
 
-
-	if (!(adapter = cxl_alloc_adapter(dev)))
+	adapter = cxl_alloc_adapter();
+	if (!adapter)
 		return ERR_PTR(-ENOMEM);
 
-	if ((rc = cxl_read_vsec(adapter, dev)))
-		goto err1;
+	/* Set defaults for parameters which need to persist over
+	 * configure/reconfigure
+	 */
+	adapter->perst_loads_image = true;
+	adapter->perst_same_image = false;
 
-	if ((rc = cxl_vsec_looks_ok(adapter, dev)))
-		goto err1;
-
-	if ((rc = setup_cxl_bars(dev)))
-		goto err1;
-
-	if ((rc = switch_card_to_cxl(dev)))
-		goto err1;
-
-	if ((rc = cxl_alloc_adapter_nr(adapter)))
-		goto err1;
-
-	if ((rc = dev_set_name(&adapter->dev, "card%i", adapter->adapter_num)))
-		goto err2;
-
-	if ((rc = cxl_update_image_control(adapter)))
-		goto err2;
-
-	if ((rc = cxl_map_adapter_regs(adapter, dev)))
-		goto err2;
-
-	if ((rc = sanitise_adapter_regs(adapter)))
-		goto err2;
-
-	if ((rc = init_implementation_adapter_regs(adapter, dev)))
-		goto err3;
-
-	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
-		goto err3;
-
-	/* If recovery happened, the last step is to turn on snooping.
-	 * In the non-recovery case this has no effect */
-	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON))) {
-		goto err3;
+	rc = cxl_configure_adapter(adapter, dev);
+	if (rc) {
+		pci_disable_device(dev);
+		cxl_release_adapter(&adapter->dev);
+		return ERR_PTR(rc);
 	}
 
-	if ((rc = cxl_register_psl_err_irq(adapter)))
-		goto err3;
-
 	/* Don't care if this one fails: */
 	cxl_debugfs_adapter_add(adapter);
 
@@ -1101,37 +1222,25 @@
 	return adapter;
 
 err_put1:
-	device_unregister(&adapter->dev);
-	free = false;
+	/* This should mirror cxl_remove_adapter, except without the
+	 * sysfs parts
+	 */
 	cxl_debugfs_adapter_remove(adapter);
-	cxl_release_psl_err_irq(adapter);
-err3:
-	cxl_unmap_adapter_regs(adapter);
-err2:
-	cxl_remove_adapter_nr(adapter);
-err1:
-	if (free)
-		kfree(adapter);
+	cxl_deconfigure_adapter(adapter);
+	device_unregister(&adapter->dev);
 	return ERR_PTR(rc);
 }
 
 static void cxl_remove_adapter(struct cxl *adapter)
 {
-	struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
-
-	pr_devel("cxl_release_adapter\n");
+	pr_devel("cxl_remove_adapter\n");
 
 	cxl_sysfs_adapter_remove(adapter);
 	cxl_debugfs_adapter_remove(adapter);
-	cxl_release_psl_err_irq(adapter);
-	cxl_unmap_adapter_regs(adapter);
-	cxl_remove_adapter_nr(adapter);
+
+	cxl_deconfigure_adapter(adapter);
 
 	device_unregister(&adapter->dev);
-
-	pci_release_region(pdev, 0);
-	pci_release_region(pdev, 2);
-	pci_disable_device(pdev);
 }
 
 static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -1145,21 +1254,21 @@
 	if (cxl_verbose)
 		dump_cxl_config_space(dev);
 
-	if ((rc = pci_enable_device(dev))) {
-		dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
-		return rc;
-	}
-
 	adapter = cxl_init_adapter(dev);
 	if (IS_ERR(adapter)) {
 		dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
-		pci_disable_device(dev);
 		return PTR_ERR(adapter);
 	}
 
 	for (slice = 0; slice < adapter->slices; slice++) {
-		if ((rc = cxl_init_afu(adapter, slice, dev)))
+		if ((rc = cxl_init_afu(adapter, slice, dev))) {
 			dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
+			continue;
+		}
+
+		rc = cxl_afu_select_best_mode(adapter->afu[slice]);
+		if (rc)
+			dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
 	}
 
 	return 0;
@@ -1183,10 +1292,262 @@
 	cxl_remove_adapter(adapter);
 }
 
+static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
+						pci_channel_state_t state)
+{
+	struct pci_dev *afu_dev;
+	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
+	pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
+
+	/* There should only be one entry, but go through the list
+	 * anyway
+	 */
+	list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+		if (!afu_dev->driver)
+			continue;
+
+		afu_dev->error_state = state;
+
+		if (afu_dev->driver->err_handler)
+			afu_result = afu_dev->driver->err_handler->error_detected(afu_dev,
+										  state);
+		/* Disconnect trumps all, NONE trumps NEED_RESET */
+		if (afu_result == PCI_ERS_RESULT_DISCONNECT)
+			result = PCI_ERS_RESULT_DISCONNECT;
+		else if ((afu_result == PCI_ERS_RESULT_NONE) &&
+			 (result == PCI_ERS_RESULT_NEED_RESET))
+			result = PCI_ERS_RESULT_NONE;
+	}
+	return result;
+}
+
+static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
+					       pci_channel_state_t state)
+{
+	struct cxl *adapter = pci_get_drvdata(pdev);
+	struct cxl_afu *afu;
+	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
+	int i;
+
+	/* At this point, we could still have an interrupt pending.
+	 * Let's try to get them out of the way before they do
+	 * anything we don't like.
+	 */
+	schedule();
+
+	/* If we're permanently dead, give up. */
+	if (state == pci_channel_io_perm_failure) {
+		/* Tell the AFU drivers; but we don't care what they
+		 * say, we're going away.
+		 */
+		for (i = 0; i < adapter->slices; i++) {
+			afu = adapter->afu[i];
+			cxl_vphb_error_detected(afu, state);
+		}
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	/* Are we reflashing?
+	 *
+	 * If we reflash, we could come back as something entirely
+	 * different, including a non-CAPI card. As such, by default
+	 * we don't participate in the process. We'll be unbound and
+	 * the slot re-probed. (TODO: check EEH doesn't blindly rebind
+	 * us!)
+	 *
+	 * However, this isn't the entire story: for reliablity
+	 * reasons, we usually want to reflash the FPGA on PERST in
+	 * order to get back to a more reliable known-good state.
+	 *
+	 * This causes us a bit of a problem: if we reflash we can't
+	 * trust that we'll come back the same - we could have a new
+	 * image and been PERSTed in order to load that
+	 * image. However, most of the time we actually *will* come
+	 * back the same - for example a regular EEH event.
+	 *
+	 * Therefore, we allow the user to assert that the image is
+	 * indeed the same and that we should continue on into EEH
+	 * anyway.
+	 */
+	if (adapter->perst_loads_image && !adapter->perst_same_image) {
+		/* TODO take the PHB out of CXL mode */
+		dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
+		return PCI_ERS_RESULT_NONE;
+	}
+
+	/*
+	 * At this point, we want to try to recover.  We'll always
+	 * need a complete slot reset: we don't trust any other reset.
+	 *
+	 * Now, we go through each AFU:
+	 *  - We send the driver, if bound, an error_detected callback.
+	 *    We expect it to clean up, but it can also tell us to give
+	 *    up and permanently detach the card. To simplify things, if
+	 *    any bound AFU driver doesn't support EEH, we give up on EEH.
+	 *
+	 *  - We detach all contexts associated with the AFU. This
+	 *    does not free them, but puts them into a CLOSED state
+	 *    which causes any the associated files to return useful
+	 *    errors to userland. It also unmaps, but does not free,
+	 *    any IRQs.
+	 *
+	 *  - We clean up our side: releasing and unmapping resources we hold
+	 *    so we can wire them up again when the hardware comes back up.
+	 *
+	 * Driver authors should note:
+	 *
+	 *  - Any contexts you create in your kernel driver (except
+	 *    those associated with anonymous file descriptors) are
+	 *    your responsibility to free and recreate. Likewise with
+	 *    any attached resources.
+	 *
+	 *  - We will take responsibility for re-initialising the
+	 *    device context (the one set up for you in
+	 *    cxl_pci_enable_device_hook and accessed through
+	 *    cxl_get_context). If you've attached IRQs or other
+	 *    resources to it, they remains yours to free.
+	 *
+	 * You can call the same functions to release resources as you
+	 * normally would: we make sure that these functions continue
+	 * to work when the hardware is down.
+	 *
+	 * Two examples:
+	 *
+	 * 1) If you normally free all your resources at the end of
+	 *    each request, or if you use anonymous FDs, your
+	 *    error_detected callback can simply set a flag to tell
+	 *    your driver not to start any new calls. You can then
+	 *    clear the flag in the resume callback.
+	 *
+	 * 2) If you normally allocate your resources on startup:
+	 *     * Set a flag in error_detected as above.
+	 *     * Let CXL detach your contexts.
+	 *     * In slot_reset, free the old resources and allocate new ones.
+	 *     * In resume, clear the flag to allow things to start.
+	 */
+	for (i = 0; i < adapter->slices; i++) {
+		afu = adapter->afu[i];
+
+		result = cxl_vphb_error_detected(afu, state);
+
+		/* Only continue if everyone agrees on NEED_RESET */
+		if (result != PCI_ERS_RESULT_NEED_RESET)
+			return result;
+
+		cxl_context_detach_all(afu);
+		cxl_afu_deactivate_mode(afu);
+		cxl_deconfigure_afu(afu);
+	}
+	cxl_deconfigure_adapter(adapter);
+
+	return result;
+}
+
+static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
+{
+	struct cxl *adapter = pci_get_drvdata(pdev);
+	struct cxl_afu *afu;
+	struct cxl_context *ctx;
+	struct pci_dev *afu_dev;
+	pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
+	pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
+	int i;
+
+	if (cxl_configure_adapter(adapter, pdev))
+		goto err;
+
+	for (i = 0; i < adapter->slices; i++) {
+		afu = adapter->afu[i];
+
+		if (cxl_configure_afu(afu, adapter, pdev))
+			goto err;
+
+		if (cxl_afu_select_best_mode(afu))
+			goto err;
+
+		cxl_pci_vphb_reconfigure(afu);
+
+		list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+			/* Reset the device context.
+			 * TODO: make this less disruptive
+			 */
+			ctx = cxl_get_context(afu_dev);
+
+			if (ctx && cxl_release_context(ctx))
+				goto err;
+
+			ctx = cxl_dev_context_init(afu_dev);
+			if (!ctx)
+				goto err;
+
+			afu_dev->dev.archdata.cxl_ctx = ctx;
+
+			if (cxl_afu_check_and_enable(afu))
+				goto err;
+
+			afu_dev->error_state = pci_channel_io_normal;
+
+			/* If there's a driver attached, allow it to
+			 * chime in on recovery. Drivers should check
+			 * if everything has come back OK, but
+			 * shouldn't start new work until we call
+			 * their resume function.
+			 */
+			if (!afu_dev->driver)
+				continue;
+
+			if (afu_dev->driver->err_handler &&
+			    afu_dev->driver->err_handler->slot_reset)
+				afu_result = afu_dev->driver->err_handler->slot_reset(afu_dev);
+
+			if (afu_result == PCI_ERS_RESULT_DISCONNECT)
+				result = PCI_ERS_RESULT_DISCONNECT;
+		}
+	}
+	return result;
+
+err:
+	/* All the bits that happen in both error_detected and cxl_remove
+	 * should be idempotent, so we don't need to worry about leaving a mix
+	 * of unconfigured and reconfigured resources.
+	 */
+	dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
+	return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void cxl_pci_resume(struct pci_dev *pdev)
+{
+	struct cxl *adapter = pci_get_drvdata(pdev);
+	struct cxl_afu *afu;
+	struct pci_dev *afu_dev;
+	int i;
+
+	/* Everything is back now. Drivers should restart work now.
+	 * This is not the place to be checking if everything came back up
+	 * properly, because there's no return value: do that in slot_reset.
+	 */
+	for (i = 0; i < adapter->slices; i++) {
+		afu = adapter->afu[i];
+
+		list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
+			if (afu_dev->driver && afu_dev->driver->err_handler &&
+			    afu_dev->driver->err_handler->resume)
+				afu_dev->driver->err_handler->resume(afu_dev);
+		}
+	}
+}
+
+static const struct pci_error_handlers cxl_err_handler = {
+	.error_detected = cxl_pci_error_detected,
+	.slot_reset = cxl_pci_slot_reset,
+	.resume = cxl_pci_resume,
+};
+
 struct pci_driver cxl_pci_driver = {
 	.name = "cxl-pci",
 	.id_table = cxl_pci_tbl,
 	.probe = cxl_probe,
 	.remove = cxl_remove,
 	.shutdown = cxl_remove,
+	.err_handler = &cxl_err_handler,
 };
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 31f38bc..25868c2 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -112,12 +112,38 @@
 	return count;
 }
 
+static ssize_t perst_reloads_same_image_show(struct device *device,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct cxl *adapter = to_cxl_adapter(device);
+
+	return scnprintf(buf, PAGE_SIZE, "%i\n", adapter->perst_same_image);
+}
+
+static ssize_t perst_reloads_same_image_store(struct device *device,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct cxl *adapter = to_cxl_adapter(device);
+	int rc;
+	int val;
+
+	rc = sscanf(buf, "%i", &val);
+	if ((rc != 1) || !(val == 1 || val == 0))
+		return -EINVAL;
+
+	adapter->perst_same_image = (val == 1 ? true : false);
+	return count;
+}
+
 static struct device_attribute adapter_attrs[] = {
 	__ATTR_RO(caia_version),
 	__ATTR_RO(psl_revision),
 	__ATTR_RO(base_image),
 	__ATTR_RO(image_loaded),
 	__ATTR_RW(load_image_on_perst),
+	__ATTR_RW(perst_reloads_same_image),
 	__ATTR(reset, S_IWUSR, NULL, reset_adapter_store),
 };
 
@@ -443,12 +469,7 @@
 	struct afu_config_record *cr = to_cr(kobj);
 	struct cxl_afu *afu = to_cxl_afu(container_of(kobj->parent, struct device, kobj));
 
-	u64 i, j, val, size = afu->crs_len;
-
-	if (off > size)
-		return 0;
-	if (off + count > size)
-		count = size - off;
+	u64 i, j, val;
 
 	for (i = 0; i < count;) {
 		val = cxl_afu_cr_read64(afu, cr->cr, off & ~0x7);
diff --git a/drivers/misc/cxl/trace.h b/drivers/misc/cxl/trace.h
index ae434d8..6e1e2ad 100644
--- a/drivers/misc/cxl/trace.h
+++ b/drivers/misc/cxl/trace.h
@@ -105,7 +105,7 @@
 		__entry->num_interrupts = num_interrupts;
 	),
 
-	TP_printk("afu%i.%i pid=%i pe=%i wed=0x%.16llx irqs=%i amr=0x%llx",
+	TP_printk("afu%i.%i pid=%i pe=%i wed=0x%016llx irqs=%i amr=0x%llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pid,
@@ -177,7 +177,7 @@
 		__entry->dar = dar;
 	),
 
-	TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%.16llx",
+	TP_printk("afu%i.%i pe=%i irq=%i dsisr=%s dar=0x%016llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pe,
@@ -233,7 +233,7 @@
 		__entry->dar = dar;
 	),
 
-	TP_printk("afu%i.%i pe=%i dar=0x%.16llx",
+	TP_printk("afu%i.%i pe=%i dar=0x%016llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pe,
@@ -264,7 +264,7 @@
 		__entry->v = v;
 	),
 
-	TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%.16llx V=0x%.16llx",
+	TP_printk("afu%i.%i pe=%i SSTE[%i] E=0x%016llx V=0x%016llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pe,
@@ -295,7 +295,7 @@
 		__entry->dar = dar;
 	),
 
-	TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%.16llx",
+	TP_printk("afu%i.%i pe=%i dsisr=%s dar=0x%016llx",
 		__entry->card,
 		__entry->afu,
 		__entry->pe,
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 2eba002..6dd16a6 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -138,6 +138,26 @@
 	return 0;
 }
 
+
+static inline bool cxl_config_link_ok(struct pci_bus *bus)
+{
+	struct pci_controller *phb;
+	struct cxl_afu *afu;
+
+	/* Config space IO is based on phb->cfg_addr, which is based on
+	 * afu_desc_mmio. This isn't safe to read/write when the link
+	 * goes down, as EEH tears down MMIO space.
+	 *
+	 * Check if the link is OK before proceeding.
+	 */
+
+	phb = pci_bus_to_host(bus);
+	if (phb == NULL)
+		return false;
+	afu = (struct cxl_afu *)phb->private_data;
+	return cxl_adapter_link_ok(afu->adapter);
+}
+
 static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
 				int offset, int len, u32 *val)
 {
@@ -150,6 +170,9 @@
 	if (rc)
 		return rc;
 
+	if (!cxl_config_link_ok(bus))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
 	/* Can only read 32 bits */
 	*val = (in_le32(ioaddr) >> shift) & mask;
 	return PCIBIOS_SUCCESSFUL;
@@ -167,6 +190,9 @@
 	if (rc)
 		return rc;
 
+	if (!cxl_config_link_ok(bus))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
 	/* Can only write 32 bits so do read-modify-write */
 	mask <<= shift;
 	val <<= shift;
@@ -240,6 +266,14 @@
 	return 0;
 }
 
+void cxl_pci_vphb_reconfigure(struct cxl_afu *afu)
+{
+	/* When we are reconfigured, the AFU's MMIO space is unmapped
+	 * and remapped. We need to reflect this in the PHB's view of
+	 * the world.
+	 */
+	afu->phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
+}
 
 void cxl_pci_vphb_remove(struct cxl_afu *afu)
 {
diff --git a/drivers/misc/ds1682.c b/drivers/misc/ds1682.c
index b909fb3..c711227 100644
--- a/drivers/misc/ds1682.c
+++ b/drivers/misc/ds1682.c
@@ -148,12 +148,6 @@
 	dev_dbg(&client->dev, "ds1682_eeprom_read(p=%p, off=%lli, c=%zi)\n",
 		buf, off, count);
 
-	if (off >= DS1682_EEPROM_SIZE)
-		return 0;
-
-	if (off + count > DS1682_EEPROM_SIZE)
-		count = DS1682_EEPROM_SIZE - off;
-
 	rc = i2c_smbus_read_i2c_block_data(client, DS1682_REG_EEPROM + off,
 					   count, buf);
 	if (rc < 0)
@@ -171,12 +165,6 @@
 	dev_dbg(&client->dev, "ds1682_eeprom_write(p=%p, off=%lli, c=%zi)\n",
 		buf, off, count);
 
-	if (off >= DS1682_EEPROM_SIZE)
-		return -ENOSPC;
-
-	if (off + count > DS1682_EEPROM_SIZE)
-		count = DS1682_EEPROM_SIZE - off;
-
 	/* Write out to the device */
 	if (i2c_smbus_write_i2c_block_data(client, DS1682_REG_EEPROM + off,
 					   count, buf) < 0)
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 9536852f..04f2e1f 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -96,17 +96,4 @@
 
 	  If unsure, say N.
 
-config EEPROM_SUNXI_SID
-	tristate "Allwinner sunxi security ID support"
-	depends on ARCH_SUNXI && SYSFS
-	help
-	  This is a driver for the 'security ID' available on various Allwinner
-	  devices.
-
-	  Due to the potential risks involved with changing e-fuses,
-	  this driver is read-only.
-
-	  This driver can also be built as a module. If so, the module
-	  will be called sunxi_sid.
-
 endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index 9507aec..fc1e81d 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -4,5 +4,4 @@
 obj-$(CONFIG_EEPROM_MAX6875)	+= max6875.o
 obj-$(CONFIG_EEPROM_93CX6)	+= eeprom_93cx6.o
 obj-$(CONFIG_EEPROM_93XX46)	+= eeprom_93xx46.o
-obj-$(CONFIG_EEPROM_SUNXI_SID)	+= sunxi_sid.o
 obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 6ded3dc..2b254f3 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -686,7 +686,6 @@
 static struct i2c_driver at24_driver = {
 	.driver = {
 		.name = "at24",
-		.owner = THIS_MODULE,
 	},
 	.probe = at24_probe,
 	.remove = at24_remove,
diff --git a/drivers/misc/eeprom/eeprom.c b/drivers/misc/eeprom/eeprom.c
index b432873..7342fd6 100644
--- a/drivers/misc/eeprom/eeprom.c
+++ b/drivers/misc/eeprom/eeprom.c
@@ -88,11 +88,6 @@
 	struct eeprom_data *data = i2c_get_clientdata(client);
 	u8 slice;
 
-	if (off > EEPROM_SIZE)
-		return 0;
-	if (off + count > EEPROM_SIZE)
-		count = EEPROM_SIZE - off;
-
 	/* Only refresh slices which contain requested bytes */
 	for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++)
 		eeprom_update_client(client, slice);
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 9ebeacd..a6bd9e3 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -48,13 +48,6 @@
 	dev = container_of(kobj, struct device, kobj);
 	edev = dev_get_drvdata(dev);
 
-	if (unlikely(off >= edev->bin.size))
-		return 0;
-	if ((off + count) > edev->bin.size)
-		count = edev->bin.size - off;
-	if (unlikely(!count))
-		return count;
-
 	cmd_addr = OP_READ << edev->addrlen;
 
 	if (edev->addrlen == 7) {
@@ -200,13 +193,6 @@
 	dev = container_of(kobj, struct device, kobj);
 	edev = dev_get_drvdata(dev);
 
-	if (unlikely(off >= edev->bin.size))
-		return -EFBIG;
-	if ((off + count) > edev->bin.size)
-		count = edev->bin.size - off;
-	if (unlikely(!count))
-		return count;
-
 	/* only write even number of bytes on 16-bit devices */
 	if (edev->addrlen == 6) {
 		step = 2;
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 580ff9d..9aa4332 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -114,12 +114,6 @@
 	struct max6875_data *data = i2c_get_clientdata(client);
 	int slice, max_slice;
 
-	if (off > USER_EEPROM_SIZE)
-		return 0;
-
-	if (off + count > USER_EEPROM_SIZE)
-		count = USER_EEPROM_SIZE - off;
-
 	/* refresh slices which contain requested bytes */
 	max_slice = (off + count - 1) >> SLICE_BITS;
 	for (slice = (off >> SLICE_BITS); slice <= max_slice; slice++)
diff --git a/drivers/misc/eeprom/sunxi_sid.c b/drivers/misc/eeprom/sunxi_sid.c
deleted file mode 100644
index 8385177..0000000
--- a/drivers/misc/eeprom/sunxi_sid.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl>
- * http://www.linux-sunxi.org
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * This driver exposes the Allwinner security ID, efuses exported in byte-
- * sized chunks.
- */
-
-#include <linux/compiler.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/kobject.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/random.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/sysfs.h>
-#include <linux/types.h>
-
-#define DRV_NAME "sunxi-sid"
-
-struct sunxi_sid_data {
-	void __iomem *reg_base;
-	unsigned int keysize;
-};
-
-/* We read the entire key, due to a 32 bit read alignment requirement. Since we
- * want to return the requested byte, this results in somewhat slower code and
- * uses 4 times more reads as needed but keeps code simpler. Since the SID is
- * only very rarely probed, this is not really an issue.
- */
-static u8 sunxi_sid_read_byte(const struct sunxi_sid_data *sid_data,
-			      const unsigned int offset)
-{
-	u32 sid_key;
-
-	if (offset >= sid_data->keysize)
-		return 0;
-
-	sid_key = ioread32be(sid_data->reg_base + round_down(offset, 4));
-	sid_key >>= (offset % 4) * 8;
-
-	return sid_key; /* Only return the last byte */
-}
-
-static ssize_t sid_read(struct file *fd, struct kobject *kobj,
-			struct bin_attribute *attr, char *buf,
-			loff_t pos, size_t size)
-{
-	struct platform_device *pdev;
-	struct sunxi_sid_data *sid_data;
-	int i;
-
-	pdev = to_platform_device(kobj_to_dev(kobj));
-	sid_data = platform_get_drvdata(pdev);
-
-	if (pos < 0 || pos >= sid_data->keysize)
-		return 0;
-	if (size > sid_data->keysize - pos)
-		size = sid_data->keysize - pos;
-
-	for (i = 0; i < size; i++)
-		buf[i] = sunxi_sid_read_byte(sid_data, pos + i);
-
-	return i;
-}
-
-static struct bin_attribute sid_bin_attr = {
-	.attr = { .name = "eeprom", .mode = S_IRUGO, },
-	.read = sid_read,
-};
-
-static int sunxi_sid_remove(struct platform_device *pdev)
-{
-	device_remove_bin_file(&pdev->dev, &sid_bin_attr);
-	dev_dbg(&pdev->dev, "driver unloaded\n");
-
-	return 0;
-}
-
-static const struct of_device_id sunxi_sid_of_match[] = {
-	{ .compatible = "allwinner,sun4i-a10-sid", .data = (void *)16},
-	{ .compatible = "allwinner,sun7i-a20-sid", .data = (void *)512},
-	{/* sentinel */},
-};
-MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
-
-static int sunxi_sid_probe(struct platform_device *pdev)
-{
-	struct sunxi_sid_data *sid_data;
-	struct resource *res;
-	const struct of_device_id *of_dev_id;
-	u8 *entropy;
-	unsigned int i;
-
-	sid_data = devm_kzalloc(&pdev->dev, sizeof(struct sunxi_sid_data),
-				GFP_KERNEL);
-	if (!sid_data)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	sid_data->reg_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(sid_data->reg_base))
-		return PTR_ERR(sid_data->reg_base);
-
-	of_dev_id = of_match_device(sunxi_sid_of_match, &pdev->dev);
-	if (!of_dev_id)
-		return -ENODEV;
-	sid_data->keysize = (int)of_dev_id->data;
-
-	platform_set_drvdata(pdev, sid_data);
-
-	sid_bin_attr.size = sid_data->keysize;
-	if (device_create_bin_file(&pdev->dev, &sid_bin_attr))
-		return -ENODEV;
-
-	entropy = kzalloc(sizeof(u8) * sid_data->keysize, GFP_KERNEL);
-	for (i = 0; i < sid_data->keysize; i++)
-		entropy[i] = sunxi_sid_read_byte(sid_data, i);
-	add_device_randomness(entropy, sid_data->keysize);
-	kfree(entropy);
-
-	dev_dbg(&pdev->dev, "loaded\n");
-
-	return 0;
-}
-
-static struct platform_driver sunxi_sid_driver = {
-	.probe = sunxi_sid_probe,
-	.remove = sunxi_sid_remove,
-	.driver = {
-		.name = DRV_NAME,
-		.of_match_table = sunxi_sid_of_match,
-	},
-};
-module_platform_driver(sunxi_sid_driver);
-
-MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>");
-MODULE_DESCRIPTION("Allwinner sunxi security id driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index 12c30b4..976df00 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -465,7 +465,6 @@
 static struct i2c_driver isl29003_driver = {
 	.driver = {
 		.name	= ISL29003_DRV_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= ISL29003_PM_OPS,
 	},
 	.probe	= isl29003_probe,
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index e3e7f1d..0c3bb7e 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -274,7 +274,6 @@
 static struct i2c_driver lis3lv02d_i2c_driver = {
 	.driver	 = {
 		.name   = DRV_NAME,
-		.owner  = THIS_MODULE,
 		.pm     = &lis3_pm_ops,
 		.of_match_table = of_match_ptr(lis3lv02d_i2c_dt_ids),
 	},
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 518914a..01447ca 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -11,7 +11,7 @@
 mei-objs += amthif.o
 mei-objs += wd.o
 mei-objs += bus.o
-mei-objs += nfc.o
+mei-objs += bus-fixup.o
 mei-$(CONFIG_DEBUG_FS) += debugfs.o
 
 obj-$(CONFIG_INTEL_MEI_ME) += mei-me.o
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
new file mode 100644
index 0000000..3e536ca
--- /dev/null
+++ b/drivers/misc/mei/bus-fixup.c
@@ -0,0 +1,306 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+
+#include <linux/mei_cl_bus.h>
+
+#include "mei_dev.h"
+#include "client.h"
+
+#define MEI_UUID_NFC_INFO UUID_LE(0xd2de1625, 0x382d, 0x417d, \
+			0x48, 0xa4, 0xef, 0xab, 0xba, 0x8a, 0x12, 0x06)
+
+static const uuid_le mei_nfc_info_guid = MEI_UUID_NFC_INFO;
+
+#define MEI_UUID_NFC_HCI UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, \
+			0x94, 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
+
+#define MEI_UUID_ANY NULL_UUID_LE
+
+/**
+ * number_of_connections - determine whether an client be on the bus
+ *    according number of connections
+ *    We support only clients:
+ *       1. with single connection
+ *       2. and fixed clients (max_number_of_connections == 0)
+ *
+ * @cldev: me clients device
+ */
+static void number_of_connections(struct mei_cl_device *cldev)
+{
+	dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
+			__func__, mei_me_cl_uuid(cldev->me_cl));
+
+	if (cldev->me_cl->props.max_number_of_connections > 1)
+		cldev->do_match = 0;
+}
+
+/**
+ * blacklist - blacklist a client from the bus
+ *
+ * @cldev: me clients device
+ */
+static void blacklist(struct mei_cl_device *cldev)
+{
+	dev_dbg(&cldev->dev, "running hook %s on %pUl\n",
+			__func__, mei_me_cl_uuid(cldev->me_cl));
+	cldev->do_match = 0;
+}
+
+struct mei_nfc_cmd {
+	u8 command;
+	u8 status;
+	u16 req_id;
+	u32 reserved;
+	u16 data_size;
+	u8 sub_command;
+	u8 data[];
+} __packed;
+
+struct mei_nfc_reply {
+	u8 command;
+	u8 status;
+	u16 req_id;
+	u32 reserved;
+	u16 data_size;
+	u8 sub_command;
+	u8 reply_status;
+	u8 data[];
+} __packed;
+
+struct mei_nfc_if_version {
+	u8 radio_version_sw[3];
+	u8 reserved[3];
+	u8 radio_version_hw[3];
+	u8 i2c_addr;
+	u8 fw_ivn;
+	u8 vendor_id;
+	u8 radio_type;
+} __packed;
+
+
+#define MEI_NFC_CMD_MAINTENANCE 0x00
+#define MEI_NFC_SUBCMD_IF_VERSION 0x01
+
+/* Vendors */
+#define MEI_NFC_VENDOR_INSIDE 0x00
+#define MEI_NFC_VENDOR_NXP    0x01
+
+/* Radio types */
+#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00
+#define MEI_NFC_VENDOR_NXP_PN544    0x01
+
+/**
+ * mei_nfc_if_version - get NFC interface version
+ *
+ * @cl: host client (nfc info)
+ * @ver: NFC interface version to be filled in
+ *
+ * Return: 0 on success; < 0 otherwise
+ */
+static int mei_nfc_if_version(struct mei_cl *cl,
+			      struct mei_nfc_if_version *ver)
+{
+	struct mei_device *bus;
+	struct mei_nfc_cmd cmd = {
+		.command = MEI_NFC_CMD_MAINTENANCE,
+		.data_size = 1,
+		.sub_command = MEI_NFC_SUBCMD_IF_VERSION,
+	};
+	struct mei_nfc_reply *reply = NULL;
+	size_t if_version_length;
+	int bytes_recv, ret;
+
+	bus = cl->dev;
+
+	WARN_ON(mutex_is_locked(&bus->device_lock));
+
+	ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
+	if (ret < 0) {
+		dev_err(bus->dev, "Could not send IF version cmd\n");
+		return ret;
+	}
+
+	/* to be sure on the stack we alloc memory */
+	if_version_length = sizeof(struct mei_nfc_reply) +
+		sizeof(struct mei_nfc_if_version);
+
+	reply = kzalloc(if_version_length, GFP_KERNEL);
+	if (!reply)
+		return -ENOMEM;
+
+	ret = 0;
+	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
+	if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
+		dev_err(bus->dev, "Could not read IF version\n");
+		ret = -EIO;
+		goto err;
+	}
+
+	memcpy(ver, reply->data, sizeof(struct mei_nfc_if_version));
+
+	dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
+		ver->fw_ivn, ver->vendor_id, ver->radio_type);
+
+err:
+	kfree(reply);
+	return ret;
+}
+
+/**
+ * mei_nfc_radio_name - derive nfc radio name from the interface version
+ *
+ * @ver: NFC radio version
+ *
+ * Return: radio name string
+ */
+static const char *mei_nfc_radio_name(struct mei_nfc_if_version *ver)
+{
+
+	if (ver->vendor_id == MEI_NFC_VENDOR_INSIDE) {
+		if (ver->radio_type == MEI_NFC_VENDOR_INSIDE_UREAD)
+			return "microread";
+	}
+
+	if (ver->vendor_id == MEI_NFC_VENDOR_NXP) {
+		if (ver->radio_type == MEI_NFC_VENDOR_NXP_PN544)
+			return "pn544";
+	}
+
+	return NULL;
+}
+
+/**
+ * mei_nfc - The nfc fixup function. The function retrieves nfc radio
+ *    name and set is as device attribute so we can load
+ *    the proper device driver for it
+ *
+ * @cldev: me client device (nfc)
+ */
+static void mei_nfc(struct mei_cl_device *cldev)
+{
+	struct mei_device *bus;
+	struct mei_cl *cl;
+	struct mei_me_client *me_cl = NULL;
+	struct mei_nfc_if_version ver;
+	const char *radio_name = NULL;
+	int ret;
+
+	bus = cldev->bus;
+
+	dev_dbg(bus->dev, "running hook %s: %pUl match=%d\n",
+		__func__, mei_me_cl_uuid(cldev->me_cl), cldev->do_match);
+
+	mutex_lock(&bus->device_lock);
+	/* we need to connect to INFO GUID */
+	cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+	if (IS_ERR(cl)) {
+		ret = PTR_ERR(cl);
+		cl = NULL;
+		dev_err(bus->dev, "nfc hook alloc failed %d\n", ret);
+		goto out;
+	}
+
+	me_cl = mei_me_cl_by_uuid(bus, &mei_nfc_info_guid);
+	if (!me_cl) {
+		ret = -ENOTTY;
+		dev_err(bus->dev, "Cannot find nfc info %d\n", ret);
+		goto out;
+	}
+
+	ret = mei_cl_connect(cl, me_cl, NULL);
+	if (ret < 0) {
+		dev_err(&cldev->dev, "Can't connect to the NFC INFO ME ret = %d\n",
+			ret);
+		goto out;
+	}
+
+	mutex_unlock(&bus->device_lock);
+
+	ret = mei_nfc_if_version(cl, &ver);
+	if (ret)
+		goto disconnect;
+
+	radio_name = mei_nfc_radio_name(&ver);
+
+	if (!radio_name) {
+		ret = -ENOENT;
+		dev_err(&cldev->dev, "Can't get the NFC interface version ret = %d\n",
+			ret);
+		goto disconnect;
+	}
+
+	dev_dbg(bus->dev, "nfc radio %s\n", radio_name);
+	strlcpy(cldev->name, radio_name, sizeof(cldev->name));
+
+disconnect:
+	mutex_lock(&bus->device_lock);
+	if (mei_cl_disconnect(cl) < 0)
+		dev_err(bus->dev, "Can't disconnect the NFC INFO ME\n");
+
+	mei_cl_flush_queues(cl, NULL);
+
+out:
+	mei_cl_unlink(cl);
+	mutex_unlock(&bus->device_lock);
+	mei_me_cl_put(me_cl);
+	kfree(cl);
+
+	if (ret)
+		cldev->do_match = 0;
+
+	dev_dbg(bus->dev, "end of fixup match = %d\n", cldev->do_match);
+}
+
+#define MEI_FIXUP(_uuid, _hook) { _uuid, _hook }
+
+static struct mei_fixup {
+
+	const uuid_le uuid;
+	void (*hook)(struct mei_cl_device *cldev);
+} mei_fixups[] = {
+	MEI_FIXUP(MEI_UUID_ANY, number_of_connections),
+	MEI_FIXUP(MEI_UUID_NFC_INFO, blacklist),
+	MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
+};
+
+/**
+ * mei_cl_dev_fixup - run fixup handlers
+ *
+ * @cldev: me client device
+ */
+void mei_cl_dev_fixup(struct mei_cl_device *cldev)
+{
+	struct mei_fixup *f;
+	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mei_fixups); i++) {
+
+		f = &mei_fixups[i];
+		if (uuid_le_cmp(f->uuid, MEI_UUID_ANY) == 0 ||
+		    uuid_le_cmp(f->uuid, *uuid) == 0)
+			f->hook(cldev);
+	}
+}
+
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 458aa5a..eef1c6b 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -30,276 +30,29 @@
 #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
 #define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
 
-static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	struct mei_cl_driver *driver = to_mei_cl_driver(drv);
-	const struct mei_cl_device_id *id;
-	const uuid_le *uuid;
-	const char *name;
-
-	if (!device)
-		return 0;
-
-	uuid = mei_me_cl_uuid(device->me_cl);
-	name = device->name;
-
-	if (!driver || !driver->id_table)
-		return 0;
-
-	id = driver->id_table;
-
-	while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
-
-		if (!uuid_le_cmp(*uuid, id->uuid)) {
-			if (id->name[0]) {
-				if (!strncmp(name, id->name, sizeof(id->name)))
-					return 1;
-			} else {
-				return 1;
-			}
-		}
-
-		id++;
-	}
-
-	return 0;
-}
-
-static int mei_cl_device_probe(struct device *dev)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	struct mei_cl_driver *driver;
-	struct mei_cl_device_id id;
-
-	if (!device)
-		return 0;
-
-	driver = to_mei_cl_driver(dev->driver);
-	if (!driver || !driver->probe)
-		return -ENODEV;
-
-	dev_dbg(dev, "Device probe\n");
-
-	strlcpy(id.name, device->name, sizeof(id.name));
-
-	return driver->probe(device, &id);
-}
-
-static int mei_cl_device_remove(struct device *dev)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	struct mei_cl_driver *driver;
-
-	if (!device || !dev->driver)
-		return 0;
-
-	if (device->event_cb) {
-		device->event_cb = NULL;
-		cancel_work_sync(&device->event_work);
-	}
-
-	driver = to_mei_cl_driver(dev->driver);
-	if (!driver->remove) {
-		dev->driver = NULL;
-
-		return 0;
-	}
-
-	return driver->remove(device);
-}
-
-static ssize_t name_show(struct device *dev, struct device_attribute *a,
-			     char *buf)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	size_t len;
-
-	len = snprintf(buf, PAGE_SIZE, "%s", device->name);
-
-	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
-}
-static DEVICE_ATTR_RO(name);
-
-static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
-			     char *buf)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
-	size_t len;
-
-	len = snprintf(buf, PAGE_SIZE, "%pUl", uuid);
-
-	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
-}
-static DEVICE_ATTR_RO(uuid);
-
-static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
-			     char *buf)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
-	size_t len;
-
-	len = snprintf(buf, PAGE_SIZE, "mei:%s:" MEI_CL_UUID_FMT ":",
-		device->name, MEI_CL_UUID_ARGS(uuid->b));
-
-	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
-}
-static DEVICE_ATTR_RO(modalias);
-
-static struct attribute *mei_cl_dev_attrs[] = {
-	&dev_attr_name.attr,
-	&dev_attr_uuid.attr,
-	&dev_attr_modalias.attr,
-	NULL,
-};
-ATTRIBUTE_GROUPS(mei_cl_dev);
-
-static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-	const uuid_le *uuid = mei_me_cl_uuid(device->me_cl);
-
-	if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
-		return -ENOMEM;
-
-	if (add_uevent_var(env, "MEI_CL_NAME=%s", device->name))
-		return -ENOMEM;
-
-	if (add_uevent_var(env, "MODALIAS=mei:%s:" MEI_CL_UUID_FMT ":",
-		device->name, MEI_CL_UUID_ARGS(uuid->b)))
-		return -ENOMEM;
-
-	return 0;
-}
-
-static struct bus_type mei_cl_bus_type = {
-	.name		= "mei",
-	.dev_groups	= mei_cl_dev_groups,
-	.match		= mei_cl_device_match,
-	.probe		= mei_cl_device_probe,
-	.remove		= mei_cl_device_remove,
-	.uevent		= mei_cl_uevent,
-};
-
-static void mei_cl_dev_release(struct device *dev)
-{
-	struct mei_cl_device *device = to_mei_cl_device(dev);
-
-	if (!device)
-		return;
-
-	mei_me_cl_put(device->me_cl);
-	kfree(device);
-}
-
-static struct device_type mei_cl_device_type = {
-	.release	= mei_cl_dev_release,
-};
-
-struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev,
-					 uuid_le uuid)
-{
-	struct mei_cl *cl;
-
-	list_for_each_entry(cl, &dev->device_list, device_link) {
-		if (cl->device && cl->device->me_cl &&
-		    !uuid_le_cmp(uuid, *mei_me_cl_uuid(cl->device->me_cl)))
-			return cl;
-	}
-
-	return NULL;
-}
-
-struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
-					struct mei_me_client *me_cl,
-					struct mei_cl *cl,
-					char *name)
-{
-	struct mei_cl_device *device;
-	int status;
-
-	device = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
-	if (!device)
-		return NULL;
-
-	device->me_cl = mei_me_cl_get(me_cl);
-	if (!device->me_cl) {
-		kfree(device);
-		return NULL;
-	}
-
-	device->cl = cl;
-	device->dev.parent = dev->dev;
-	device->dev.bus = &mei_cl_bus_type;
-	device->dev.type = &mei_cl_device_type;
-
-	strlcpy(device->name, name, sizeof(device->name));
-
-	dev_set_name(&device->dev, "mei:%s:%pUl", name, mei_me_cl_uuid(me_cl));
-
-	status = device_register(&device->dev);
-	if (status) {
-		dev_err(dev->dev, "Failed to register MEI device\n");
-		mei_me_cl_put(device->me_cl);
-		kfree(device);
-		return NULL;
-	}
-
-	cl->device = device;
-
-	dev_dbg(&device->dev, "client %s registered\n", name);
-
-	return device;
-}
-EXPORT_SYMBOL_GPL(mei_cl_add_device);
-
-void mei_cl_remove_device(struct mei_cl_device *device)
-{
-	device_unregister(&device->dev);
-}
-EXPORT_SYMBOL_GPL(mei_cl_remove_device);
-
-int __mei_cl_driver_register(struct mei_cl_driver *driver, struct module *owner)
-{
-	int err;
-
-	driver->driver.name = driver->name;
-	driver->driver.owner = owner;
-	driver->driver.bus = &mei_cl_bus_type;
-
-	err = driver_register(&driver->driver);
-	if (err)
-		return err;
-
-	pr_debug("mei: driver [%s] registered\n", driver->driver.name);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(__mei_cl_driver_register);
-
-void mei_cl_driver_unregister(struct mei_cl_driver *driver)
-{
-	driver_unregister(&driver->driver);
-
-	pr_debug("mei: driver [%s] unregistered\n", driver->driver.name);
-}
-EXPORT_SYMBOL_GPL(mei_cl_driver_unregister);
-
+/**
+ * __mei_cl_send - internal client send (write)
+ *
+ * @cl: host client
+ * @buf: buffer to send
+ * @length: buffer length
+ * @blocking: wait for write completion
+ *
+ * Return: written size bytes or < 0 on error
+ */
 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 			bool blocking)
 {
-	struct mei_device *dev;
+	struct mei_device *bus;
 	struct mei_cl_cb *cb = NULL;
 	ssize_t rets;
 
 	if (WARN_ON(!cl || !cl->dev))
 		return -ENODEV;
 
-	dev = cl->dev;
+	bus = cl->dev;
 
-	mutex_lock(&dev->device_lock);
+	mutex_lock(&bus->device_lock);
 	if (!mei_cl_is_connected(cl)) {
 		rets = -ENODEV;
 		goto out;
@@ -327,16 +80,25 @@
 	rets = mei_cl_write(cl, cb, blocking);
 
 out:
-	mutex_unlock(&dev->device_lock);
+	mutex_unlock(&bus->device_lock);
 	if (rets < 0)
 		mei_io_cb_free(cb);
 
 	return rets;
 }
 
+/**
+ * __mei_cl_recv - internal client receive (read)
+ *
+ * @cl: host client
+ * @buf: buffer to send
+ * @length: buffer length
+ *
+ * Return: read size in bytes of < 0 on error
+ */
 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
 {
-	struct mei_device *dev;
+	struct mei_device *bus;
 	struct mei_cl_cb *cb;
 	size_t r_length;
 	ssize_t rets;
@@ -344,9 +106,9 @@
 	if (WARN_ON(!cl || !cl->dev))
 		return -ENODEV;
 
-	dev = cl->dev;
+	bus = cl->dev;
 
-	mutex_lock(&dev->device_lock);
+	mutex_lock(&bus->device_lock);
 
 	cb = mei_cl_read_cb(cl, NULL);
 	if (cb)
@@ -356,9 +118,10 @@
 	if (rets && rets != -EBUSY)
 		goto out;
 
+	/* wait on event only if there is no other waiter */
 	if (list_empty(&cl->rd_completed) && !waitqueue_active(&cl->rx_wait)) {
 
-		mutex_unlock(&dev->device_lock);
+		mutex_unlock(&bus->device_lock);
 
 		if (wait_event_interruptible(cl->rx_wait,
 				(!list_empty(&cl->rd_completed)) ||
@@ -369,7 +132,7 @@
 			return -ERESTARTSYS;
 		}
 
-		mutex_lock(&dev->device_lock);
+		mutex_lock(&bus->device_lock);
 
 		if (!mei_cl_is_connected(cl)) {
 			rets = -EBUSY;
@@ -396,14 +159,23 @@
 free:
 	mei_io_cb_free(cb);
 out:
-	mutex_unlock(&dev->device_lock);
+	mutex_unlock(&bus->device_lock);
 
 	return rets;
 }
 
-ssize_t mei_cl_send(struct mei_cl_device *device, u8 *buf, size_t length)
+/**
+ * mei_cl_send - me device send  (write)
+ *
+ * @cldev: me client device
+ * @buf: buffer to send
+ * @length: buffer length
+ *
+ * Return: written size in bytes or < 0 on error
+ */
+ssize_t mei_cl_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
 {
-	struct mei_cl *cl = device->cl;
+	struct mei_cl *cl = cldev->cl;
 
 	if (cl == NULL)
 		return -ENODEV;
@@ -412,9 +184,18 @@
 }
 EXPORT_SYMBOL_GPL(mei_cl_send);
 
-ssize_t mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length)
+/**
+ * mei_cl_recv - client receive (read)
+ *
+ * @cldev: me client device
+ * @buf: buffer to send
+ * @length: buffer length
+ *
+ * Return: read size in bytes of < 0 on error
+ */
+ssize_t mei_cl_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
 {
-	struct mei_cl *cl = device->cl;
+	struct mei_cl *cl = cldev->cl;
 
 	if (cl == NULL)
 		return -ENODEV;
@@ -423,135 +204,698 @@
 }
 EXPORT_SYMBOL_GPL(mei_cl_recv);
 
+/**
+ * mei_bus_event_work  - dispatch rx event for a bus device
+ *    and schedule new work
+ *
+ * @work: work
+ */
 static void mei_bus_event_work(struct work_struct *work)
 {
-	struct mei_cl_device *device;
+	struct mei_cl_device *cldev;
 
-	device = container_of(work, struct mei_cl_device, event_work);
+	cldev = container_of(work, struct mei_cl_device, event_work);
 
-	if (device->event_cb)
-		device->event_cb(device, device->events, device->event_context);
+	if (cldev->event_cb)
+		cldev->event_cb(cldev, cldev->events, cldev->event_context);
 
-	device->events = 0;
+	cldev->events = 0;
 
 	/* Prepare for the next read */
-	mei_cl_read_start(device->cl, 0, NULL);
+	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX))
+		mei_cl_read_start(cldev->cl, 0, NULL);
 }
 
-int mei_cl_register_event_cb(struct mei_cl_device *device,
+/**
+ * mei_cl_bus_notify_event - schedule notify cb on bus client
+ *
+ * @cl: host client
+ */
+void mei_cl_bus_notify_event(struct mei_cl *cl)
+{
+	struct mei_cl_device *cldev = cl->cldev;
+
+	if (!cldev || !cldev->event_cb)
+		return;
+
+	if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)))
+		return;
+
+	if (!cl->notify_ev)
+		return;
+
+	set_bit(MEI_CL_EVENT_NOTIF, &cldev->events);
+
+	schedule_work(&cldev->event_work);
+
+	cl->notify_ev = false;
+}
+
+/**
+ * mei_cl_bus_rx_event  - schedule rx evenet
+ *
+ * @cl: host client
+ */
+void mei_cl_bus_rx_event(struct mei_cl *cl)
+{
+	struct mei_cl_device *cldev = cl->cldev;
+
+	if (!cldev || !cldev->event_cb)
+		return;
+
+	if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX)))
+		return;
+
+	set_bit(MEI_CL_EVENT_RX, &cldev->events);
+
+	schedule_work(&cldev->event_work);
+}
+
+/**
+ * mei_cl_register_event_cb - register event callback
+ *
+ * @cldev: me client devices
+ * @event_cb: callback function
+ * @events_mask: requested events bitmask
+ * @context: driver context data
+ *
+ * Return: 0 on success
+ *         -EALREADY if an callback is already registered
+ *         <0 on other errors
+ */
+int mei_cl_register_event_cb(struct mei_cl_device *cldev,
+			  unsigned long events_mask,
 			  mei_cl_event_cb_t event_cb, void *context)
 {
-	if (device->event_cb)
+	int ret;
+
+	if (cldev->event_cb)
 		return -EALREADY;
 
-	device->events = 0;
-	device->event_cb = event_cb;
-	device->event_context = context;
-	INIT_WORK(&device->event_work, mei_bus_event_work);
+	cldev->events = 0;
+	cldev->events_mask = events_mask;
+	cldev->event_cb = event_cb;
+	cldev->event_context = context;
+	INIT_WORK(&cldev->event_work, mei_bus_event_work);
 
-	mei_cl_read_start(device->cl, 0, NULL);
+	if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) {
+		ret = mei_cl_read_start(cldev->cl, 0, NULL);
+		if (ret && ret != -EBUSY)
+			return ret;
+	}
+
+	if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) {
+		mutex_lock(&cldev->cl->dev->device_lock);
+		ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0);
+		mutex_unlock(&cldev->cl->dev->device_lock);
+		if (ret)
+			return ret;
+	}
 
 	return 0;
 }
 EXPORT_SYMBOL_GPL(mei_cl_register_event_cb);
 
-void *mei_cl_get_drvdata(const struct mei_cl_device *device)
+/**
+ * mei_cl_get_drvdata - driver data getter
+ *
+ * @cldev: mei client device
+ *
+ * Return: driver private data
+ */
+void *mei_cl_get_drvdata(const struct mei_cl_device *cldev)
 {
-	return dev_get_drvdata(&device->dev);
+	return dev_get_drvdata(&cldev->dev);
 }
 EXPORT_SYMBOL_GPL(mei_cl_get_drvdata);
 
-void mei_cl_set_drvdata(struct mei_cl_device *device, void *data)
+/**
+ * mei_cl_set_drvdata - driver data setter
+ *
+ * @cldev: mei client device
+ * @data: data to store
+ */
+void mei_cl_set_drvdata(struct mei_cl_device *cldev, void *data)
 {
-	dev_set_drvdata(&device->dev, data);
+	dev_set_drvdata(&cldev->dev, data);
 }
 EXPORT_SYMBOL_GPL(mei_cl_set_drvdata);
 
-int mei_cl_enable_device(struct mei_cl_device *device)
+/**
+ * mei_cl_enable_device - enable me client device
+ *     create connection with me client
+ *
+ * @cldev: me client device
+ *
+ * Return: 0 on success and < 0 on error
+ */
+int mei_cl_enable_device(struct mei_cl_device *cldev)
 {
-	int err;
-	struct mei_device *dev;
-	struct mei_cl *cl = device->cl;
+	struct mei_device *bus = cldev->bus;
+	struct mei_cl *cl;
+	int ret;
 
-	if (cl == NULL)
-		return -ENODEV;
+	cl = cldev->cl;
 
-	dev = cl->dev;
+	if (!cl) {
+		mutex_lock(&bus->device_lock);
+		cl = mei_cl_alloc_linked(bus, MEI_HOST_CLIENT_ID_ANY);
+		mutex_unlock(&bus->device_lock);
+		if (IS_ERR(cl))
+			return PTR_ERR(cl);
+		/* update pointers */
+		cldev->cl = cl;
+		cl->cldev = cldev;
+	}
 
-	mutex_lock(&dev->device_lock);
-
+	mutex_lock(&bus->device_lock);
 	if (mei_cl_is_connected(cl)) {
-		mutex_unlock(&dev->device_lock);
-		dev_warn(dev->dev, "Already connected");
-		return -EBUSY;
+		ret = 0;
+		goto out;
 	}
 
-	err = mei_cl_connect(cl, device->me_cl, NULL);
-	if (err < 0) {
-		mutex_unlock(&dev->device_lock);
-		dev_err(dev->dev, "Could not connect to the ME client");
-
-		return err;
+	if (!mei_me_cl_is_active(cldev->me_cl)) {
+		dev_err(&cldev->dev, "me client is not active\n");
+		ret = -ENOTTY;
+		goto out;
 	}
 
-	mutex_unlock(&dev->device_lock);
+	ret = mei_cl_connect(cl, cldev->me_cl, NULL);
+	if (ret < 0)
+		dev_err(&cldev->dev, "cannot connect\n");
 
-	if (device->event_cb)
-		mei_cl_read_start(device->cl, 0, NULL);
+out:
+	mutex_unlock(&bus->device_lock);
 
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(mei_cl_enable_device);
 
-int mei_cl_disable_device(struct mei_cl_device *device)
+/**
+ * mei_cl_disable_device - disable me client device
+ *     disconnect form the me client
+ *
+ * @cldev: me client device
+ *
+ * Return: 0 on success and < 0 on error
+ */
+int mei_cl_disable_device(struct mei_cl_device *cldev)
 {
+	struct mei_device *bus;
+	struct mei_cl *cl;
 	int err;
-	struct mei_device *dev;
-	struct mei_cl *cl = device->cl;
 
-	if (cl == NULL)
+	if (!cldev || !cldev->cl)
 		return -ENODEV;
 
-	dev = cl->dev;
+	cl = cldev->cl;
 
-	device->event_cb = NULL;
+	bus = cldev->bus;
 
-	mutex_lock(&dev->device_lock);
+	cldev->event_cb = NULL;
+
+	mutex_lock(&bus->device_lock);
 
 	if (!mei_cl_is_connected(cl)) {
-		dev_err(dev->dev, "Already disconnected");
+		dev_err(bus->dev, "Already disconnected");
 		err = 0;
 		goto out;
 	}
 
 	err = mei_cl_disconnect(cl);
-	if (err < 0) {
-		dev_err(dev->dev, "Could not disconnect from the ME client");
-		goto out;
-	}
-
-	/* Flush queues and remove any pending read */
-	mei_cl_flush_queues(cl, NULL);
+	if (err < 0)
+		dev_err(bus->dev, "Could not disconnect from the ME client");
 
 out:
-	mutex_unlock(&dev->device_lock);
-	return err;
+	/* Flush queues and remove any pending read */
+	mei_cl_flush_queues(cl, NULL);
+	mei_cl_unlink(cl);
 
+	kfree(cl);
+	cldev->cl = NULL;
+
+	mutex_unlock(&bus->device_lock);
+	return err;
 }
 EXPORT_SYMBOL_GPL(mei_cl_disable_device);
 
-void mei_cl_bus_rx_event(struct mei_cl *cl)
+/**
+ * mei_cl_device_find - find matching entry in the driver id table
+ *
+ * @cldev: me client device
+ * @cldrv: me client driver
+ *
+ * Return: id on success; NULL if no id is matching
+ */
+static const
+struct mei_cl_device_id *mei_cl_device_find(struct mei_cl_device *cldev,
+					    struct mei_cl_driver *cldrv)
 {
-	struct mei_cl_device *device = cl->device;
+	const struct mei_cl_device_id *id;
+	const uuid_le *uuid;
 
-	if (!device || !device->event_cb)
+	uuid = mei_me_cl_uuid(cldev->me_cl);
+
+	id = cldrv->id_table;
+	while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
+		if (!uuid_le_cmp(*uuid, id->uuid)) {
+
+			if (!cldev->name[0])
+				return id;
+
+			if (!strncmp(cldev->name, id->name, sizeof(id->name)))
+				return id;
+		}
+
+		id++;
+	}
+
+	return NULL;
+}
+
+/**
+ * mei_cl_device_match  - device match function
+ *
+ * @dev: device
+ * @drv: driver
+ *
+ * Return:  1 if matching device was found 0 otherwise
+ */
+static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+	struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
+	const struct mei_cl_device_id *found_id;
+
+	if (!cldev)
+		return 0;
+
+	if (!cldev->do_match)
+		return 0;
+
+	if (!cldrv || !cldrv->id_table)
+		return 0;
+
+	found_id = mei_cl_device_find(cldev, cldrv);
+	if (found_id)
+		return 1;
+
+	return 0;
+}
+
+/**
+ * mei_cl_device_probe - bus probe function
+ *
+ * @dev: device
+ *
+ * Return:  0 on success; < 0 otherwise
+ */
+static int mei_cl_device_probe(struct device *dev)
+{
+	struct mei_cl_device *cldev;
+	struct mei_cl_driver *cldrv;
+	const struct mei_cl_device_id *id;
+
+	cldev = to_mei_cl_device(dev);
+	cldrv = to_mei_cl_driver(dev->driver);
+
+	if (!cldev)
+		return 0;
+
+	if (!cldrv || !cldrv->probe)
+		return -ENODEV;
+
+	id = mei_cl_device_find(cldev, cldrv);
+	if (!id)
+		return -ENODEV;
+
+	__module_get(THIS_MODULE);
+
+	return cldrv->probe(cldev, id);
+}
+
+/**
+ * mei_cl_device_remove - remove device from the bus
+ *
+ * @dev: device
+ *
+ * Return:  0 on success; < 0 otherwise
+ */
+static int mei_cl_device_remove(struct device *dev)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+	struct mei_cl_driver *cldrv;
+	int ret = 0;
+
+	if (!cldev || !dev->driver)
+		return 0;
+
+	if (cldev->event_cb) {
+		cldev->event_cb = NULL;
+		cancel_work_sync(&cldev->event_work);
+	}
+
+	cldrv = to_mei_cl_driver(dev->driver);
+	if (cldrv->remove)
+		ret = cldrv->remove(cldev);
+
+	module_put(THIS_MODULE);
+	dev->driver = NULL;
+	return ret;
+
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *a,
+			     char *buf)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+	size_t len;
+
+	len = snprintf(buf, PAGE_SIZE, "%s", cldev->name);
+
+	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
+			     char *buf)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+	size_t len;
+
+	len = snprintf(buf, PAGE_SIZE, "%pUl", uuid);
+
+	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(uuid);
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
+			     char *buf)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+	size_t len;
+
+	len = snprintf(buf, PAGE_SIZE, "mei:%s:" MEI_CL_UUID_FMT ":",
+		cldev->name, MEI_CL_UUID_ARGS(uuid->b));
+
+	return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *mei_cl_dev_attrs[] = {
+	&dev_attr_name.attr,
+	&dev_attr_uuid.attr,
+	&dev_attr_modalias.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(mei_cl_dev);
+
+/**
+ * mei_cl_device_uevent - me client bus uevent handler
+ *
+ * @dev: device
+ * @env: uevent kobject
+ *
+ * Return: 0 on success -ENOMEM on when add_uevent_var fails
+ */
+static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+
+	if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "MODALIAS=mei:%s:" MEI_CL_UUID_FMT ":",
+		cldev->name, MEI_CL_UUID_ARGS(uuid->b)))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static struct bus_type mei_cl_bus_type = {
+	.name		= "mei",
+	.dev_groups	= mei_cl_dev_groups,
+	.match		= mei_cl_device_match,
+	.probe		= mei_cl_device_probe,
+	.remove		= mei_cl_device_remove,
+	.uevent		= mei_cl_device_uevent,
+};
+
+static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
+{
+	if (bus)
+		get_device(bus->dev);
+
+	return bus;
+}
+
+static void mei_dev_bus_put(struct mei_device *bus)
+{
+	if (bus)
+		put_device(bus->dev);
+}
+
+static void mei_cl_dev_release(struct device *dev)
+{
+	struct mei_cl_device *cldev = to_mei_cl_device(dev);
+
+	if (!cldev)
 		return;
 
-	set_bit(MEI_CL_EVENT_RX, &device->events);
-
-	schedule_work(&device->event_work);
+	mei_me_cl_put(cldev->me_cl);
+	mei_dev_bus_put(cldev->bus);
+	kfree(cldev);
 }
 
+static struct device_type mei_cl_device_type = {
+	.release	= mei_cl_dev_release,
+};
+
+/**
+ * mei_cl_dev_alloc - initialize and allocate mei client device
+ *
+ * @bus: mei device
+ * @me_cl: me client
+ *
+ * Return: allocated device structur or NULL on allocation failure
+ */
+static struct mei_cl_device *mei_cl_dev_alloc(struct mei_device *bus,
+					      struct mei_me_client *me_cl)
+{
+	struct mei_cl_device *cldev;
+
+	cldev = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
+	if (!cldev)
+		return NULL;
+
+	device_initialize(&cldev->dev);
+	cldev->dev.parent = bus->dev;
+	cldev->dev.bus    = &mei_cl_bus_type;
+	cldev->dev.type   = &mei_cl_device_type;
+	cldev->bus        = mei_dev_bus_get(bus);
+	cldev->me_cl      = mei_me_cl_get(me_cl);
+	cldev->is_added   = 0;
+	INIT_LIST_HEAD(&cldev->bus_list);
+
+	return cldev;
+}
+
+/**
+ * mei_cl_dev_setup - setup me client device
+ *    run fix up routines and set the device name
+ *
+ * @bus: mei device
+ * @cldev: me client device
+ *
+ * Return: true if the device is eligible for enumeration
+ */
+static bool mei_cl_dev_setup(struct mei_device *bus,
+			     struct mei_cl_device *cldev)
+{
+	cldev->do_match = 1;
+	mei_cl_dev_fixup(cldev);
+
+	if (cldev->do_match)
+		dev_set_name(&cldev->dev, "mei:%s:%pUl",
+			     cldev->name, mei_me_cl_uuid(cldev->me_cl));
+
+	return cldev->do_match == 1;
+}
+
+/**
+ * mei_cl_bus_dev_add - add me client devices
+ *
+ * @cldev: me client device
+ *
+ * Return: 0 on success; < 0 on failre
+ */
+static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
+{
+	int ret;
+
+	dev_dbg(cldev->bus->dev, "adding %pUL\n", mei_me_cl_uuid(cldev->me_cl));
+	ret = device_add(&cldev->dev);
+	if (!ret)
+		cldev->is_added = 1;
+
+	return ret;
+}
+
+/**
+ * mei_cl_bus_dev_stop - stop the driver
+ *
+ * @cldev: me client device
+ */
+static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
+{
+	if (cldev->is_added)
+		device_release_driver(&cldev->dev);
+}
+
+/**
+ * mei_cl_bus_dev_destroy - destroy me client devices object
+ *
+ * @cldev: me client device
+ */
+static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
+{
+	if (!cldev->is_added)
+		return;
+
+	device_del(&cldev->dev);
+
+	mutex_lock(&cldev->bus->cl_bus_lock);
+	list_del_init(&cldev->bus_list);
+	mutex_unlock(&cldev->bus->cl_bus_lock);
+
+	cldev->is_added = 0;
+	put_device(&cldev->dev);
+}
+
+/**
+ * mei_cl_bus_remove_device - remove a devices form the bus
+ *
+ * @cldev: me client device
+ */
+static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
+{
+	mei_cl_bus_dev_stop(cldev);
+	mei_cl_bus_dev_destroy(cldev);
+}
+
+/**
+ * mei_cl_bus_remove_devices - remove all devices form the bus
+ *
+ * @bus: mei device
+ */
+void mei_cl_bus_remove_devices(struct mei_device *bus)
+{
+	struct mei_cl_device *cldev, *next;
+
+	list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
+		mei_cl_bus_remove_device(cldev);
+}
+
+
+/**
+ * mei_cl_dev_init - allocate and initializes an mei client devices
+ *     based on me client
+ *
+ * @bus: mei device
+ * @me_cl: me client
+ */
+static void mei_cl_dev_init(struct mei_device *bus, struct mei_me_client *me_cl)
+{
+	struct mei_cl_device *cldev;
+
+	dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
+
+	if (me_cl->bus_added)
+		return;
+
+	cldev = mei_cl_dev_alloc(bus, me_cl);
+	if (!cldev)
+		return;
+
+	mutex_lock(&cldev->bus->cl_bus_lock);
+	me_cl->bus_added = true;
+	list_add_tail(&cldev->bus_list, &bus->device_list);
+	mutex_unlock(&cldev->bus->cl_bus_lock);
+
+}
+
+/**
+ * mei_cl_bus_rescan - scan me clients list and add create
+ *    devices for eligible clients
+ *
+ * @bus: mei device
+ */
+void mei_cl_bus_rescan(struct mei_device *bus)
+{
+	struct mei_cl_device *cldev, *n;
+	struct mei_me_client *me_cl;
+
+	down_read(&bus->me_clients_rwsem);
+	list_for_each_entry(me_cl, &bus->me_clients, list)
+		mei_cl_dev_init(bus, me_cl);
+	up_read(&bus->me_clients_rwsem);
+
+	mutex_lock(&bus->cl_bus_lock);
+	list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
+
+		if (!mei_me_cl_is_active(cldev->me_cl)) {
+			mei_cl_bus_remove_device(cldev);
+			continue;
+		}
+
+		if (cldev->is_added)
+			continue;
+
+		if (mei_cl_dev_setup(bus, cldev))
+			mei_cl_bus_dev_add(cldev);
+		else {
+			list_del_init(&cldev->bus_list);
+			put_device(&cldev->dev);
+		}
+	}
+	mutex_unlock(&bus->cl_bus_lock);
+
+	dev_dbg(bus->dev, "rescan end");
+}
+
+int __mei_cl_driver_register(struct mei_cl_driver *cldrv, struct module *owner)
+{
+	int err;
+
+	cldrv->driver.name = cldrv->name;
+	cldrv->driver.owner = owner;
+	cldrv->driver.bus = &mei_cl_bus_type;
+
+	err = driver_register(&cldrv->driver);
+	if (err)
+		return err;
+
+	pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__mei_cl_driver_register);
+
+void mei_cl_driver_unregister(struct mei_cl_driver *cldrv)
+{
+	driver_unregister(&cldrv->driver);
+
+	pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
+}
+EXPORT_SYMBOL_GPL(mei_cl_driver_unregister);
+
+
 int __init mei_cl_bus_init(void)
 {
 	return bus_register(&mei_cl_bus_type);
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 6decbe1..a6c87c7 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -555,10 +555,10 @@
 	init_waitqueue_head(&cl->wait);
 	init_waitqueue_head(&cl->rx_wait);
 	init_waitqueue_head(&cl->tx_wait);
+	init_waitqueue_head(&cl->ev_wait);
 	INIT_LIST_HEAD(&cl->rd_completed);
 	INIT_LIST_HEAD(&cl->rd_pending);
 	INIT_LIST_HEAD(&cl->link);
-	INIT_LIST_HEAD(&cl->device_link);
 	cl->writing_state = MEI_IDLE;
 	cl->state = MEI_FILE_INITIALIZING;
 	cl->dev = dev;
@@ -690,16 +690,12 @@
 		mei_wd_host_init(dev, me_cl);
 	mei_me_cl_put(me_cl);
 
-	me_cl = mei_me_cl_by_uuid(dev, &mei_nfc_guid);
-	if (me_cl)
-		mei_nfc_host_init(dev, me_cl);
-	mei_me_cl_put(me_cl);
-
-
 	dev->dev_state = MEI_DEV_ENABLED;
 	dev->reset_count = 0;
 	mutex_unlock(&dev->device_lock);
 
+	mei_cl_bus_rescan(dev);
+
 	pm_runtime_mark_last_busy(dev->dev);
 	dev_dbg(dev->dev, "rpm: autosuspend\n");
 	pm_runtime_autosuspend(dev->dev);
@@ -841,45 +837,22 @@
 	return ret;
 }
 
-
-
 /**
- * mei_cl_disconnect - disconnect host client from the me one
+ * __mei_cl_disconnect - disconnect host client from the me one
+ *     internal function runtime pm has to be already acquired
  *
  * @cl: host client
  *
- * Locking: called under "dev->device_lock" lock
- *
  * Return: 0 on success, <0 on failure.
  */
-int mei_cl_disconnect(struct mei_cl *cl)
+static int __mei_cl_disconnect(struct mei_cl *cl)
 {
 	struct mei_device *dev;
 	struct mei_cl_cb *cb;
 	int rets;
 
-	if (WARN_ON(!cl || !cl->dev))
-		return -ENODEV;
-
 	dev = cl->dev;
 
-	cl_dbg(dev, cl, "disconnecting");
-
-	if (!mei_cl_is_connected(cl))
-		return 0;
-
-	if (mei_cl_is_fixed_address(cl)) {
-		mei_cl_set_disconnected(cl);
-		return 0;
-	}
-
-	rets = pm_runtime_get(dev->dev);
-	if (rets < 0 && rets != -EINPROGRESS) {
-		pm_runtime_put_noidle(dev->dev);
-		cl_err(dev, cl, "rpm: get failed %d\n", rets);
-		return rets;
-	}
-
 	cl->state = MEI_FILE_DISCONNECTING;
 
 	cb = mei_io_cb_init(cl, MEI_FOP_DISCONNECT, NULL);
@@ -915,11 +888,52 @@
 	if (!rets)
 		cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
 
+	mei_io_cb_free(cb);
+	return rets;
+}
+
+/**
+ * mei_cl_disconnect - disconnect host client from the me one
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return: 0 on success, <0 on failure.
+ */
+int mei_cl_disconnect(struct mei_cl *cl)
+{
+	struct mei_device *dev;
+	int rets;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	dev = cl->dev;
+
+	cl_dbg(dev, cl, "disconnecting");
+
+	if (!mei_cl_is_connected(cl))
+		return 0;
+
+	if (mei_cl_is_fixed_address(cl)) {
+		mei_cl_set_disconnected(cl);
+		return 0;
+	}
+
+	rets = pm_runtime_get(dev->dev);
+	if (rets < 0 && rets != -EINPROGRESS) {
+		pm_runtime_put_noidle(dev->dev);
+		cl_err(dev, cl, "rpm: get failed %d\n", rets);
+		return rets;
+	}
+
+	rets = __mei_cl_disconnect(cl);
+
 	cl_dbg(dev, cl, "rpm: autosuspend\n");
 	pm_runtime_mark_last_busy(dev->dev);
 	pm_runtime_put_autosuspend(dev->dev);
 
-	mei_io_cb_free(cb);
 	return rets;
 }
 
@@ -1064,11 +1078,23 @@
 	mutex_unlock(&dev->device_lock);
 	wait_event_timeout(cl->wait,
 			(cl->state == MEI_FILE_CONNECTED ||
+			 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
 			 cl->state == MEI_FILE_DISCONNECT_REPLY),
 			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
 	mutex_lock(&dev->device_lock);
 
 	if (!mei_cl_is_connected(cl)) {
+		if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
+			mei_io_list_flush(&dev->ctrl_rd_list, cl);
+			mei_io_list_flush(&dev->ctrl_wr_list, cl);
+			 /* ignore disconnect return valuue;
+			  * in case of failure reset will be invoked
+			  */
+			__mei_cl_disconnect(cl);
+			rets = -EFAULT;
+			goto out;
+		}
+
 		/* timeout or something went really wrong */
 		if (!cl->status)
 			cl->status = -EFAULT;
@@ -1181,6 +1207,221 @@
 }
 
 /**
+ *  mei_cl_notify_fop2req - convert fop to proper request
+ *
+ * @fop: client notification start response command
+ *
+ * Return:  MEI_HBM_NOTIFICATION_START/STOP
+ */
+u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
+{
+	if (fop == MEI_FOP_NOTIFY_START)
+		return MEI_HBM_NOTIFICATION_START;
+	else
+		return MEI_HBM_NOTIFICATION_STOP;
+}
+
+/**
+ *  mei_cl_notify_req2fop - convert notification request top file operation type
+ *
+ * @req: hbm notification request type
+ *
+ * Return:  MEI_FOP_NOTIFY_START/STOP
+ */
+enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
+{
+	if (req == MEI_HBM_NOTIFICATION_START)
+		return MEI_FOP_NOTIFY_START;
+	else
+		return MEI_FOP_NOTIFY_STOP;
+}
+
+/**
+ * mei_cl_irq_notify - send notification request in irq_thread context
+ *
+ * @cl: client
+ * @cb: callback block.
+ * @cmpl_list: complete list.
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
+		      struct mei_cl_cb *cmpl_list)
+{
+	struct mei_device *dev = cl->dev;
+	u32 msg_slots;
+	int slots;
+	int ret;
+	bool request;
+
+	msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
+	slots = mei_hbuf_empty_slots(dev);
+
+	if (slots < msg_slots)
+		return -EMSGSIZE;
+
+	request = mei_cl_notify_fop2req(cb->fop_type);
+	ret = mei_hbm_cl_notify_req(dev, cl, request);
+	if (ret) {
+		cl->status = ret;
+		list_move_tail(&cb->list, &cmpl_list->list);
+		return ret;
+	}
+
+	list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
+	return 0;
+}
+
+/**
+ * mei_cl_notify_request - send notification stop/start request
+ *
+ * @cl: host client
+ * @file: associate request with file
+ * @request: 1 for start or 0 for stop
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request)
+{
+	struct mei_device *dev;
+	struct mei_cl_cb *cb;
+	enum mei_cb_file_ops fop_type;
+	int rets;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	dev = cl->dev;
+
+	if (!dev->hbm_f_ev_supported) {
+		cl_dbg(dev, cl, "notifications not supported\n");
+		return -EOPNOTSUPP;
+	}
+
+	rets = pm_runtime_get(dev->dev);
+	if (rets < 0 && rets != -EINPROGRESS) {
+		pm_runtime_put_noidle(dev->dev);
+		cl_err(dev, cl, "rpm: get failed %d\n", rets);
+		return rets;
+	}
+
+	fop_type = mei_cl_notify_req2fop(request);
+	cb = mei_io_cb_init(cl, fop_type, file);
+	if (!cb) {
+		rets = -ENOMEM;
+		goto out;
+	}
+
+	if (mei_hbuf_acquire(dev)) {
+		if (mei_hbm_cl_notify_req(dev, cl, request)) {
+			rets = -ENODEV;
+			goto out;
+		}
+		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+	} else {
+		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+	}
+
+	mutex_unlock(&dev->device_lock);
+	wait_event_timeout(cl->wait, cl->notify_en == request,
+			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+	mutex_lock(&dev->device_lock);
+
+	if (cl->notify_en != request) {
+		mei_io_list_flush(&dev->ctrl_rd_list, cl);
+		mei_io_list_flush(&dev->ctrl_wr_list, cl);
+		if (!cl->status)
+			cl->status = -EFAULT;
+	}
+
+	rets = cl->status;
+
+out:
+	cl_dbg(dev, cl, "rpm: autosuspend\n");
+	pm_runtime_mark_last_busy(dev->dev);
+	pm_runtime_put_autosuspend(dev->dev);
+
+	mei_io_cb_free(cb);
+	return rets;
+}
+
+/**
+ * mei_cl_notify - raise notification
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ */
+void mei_cl_notify(struct mei_cl *cl)
+{
+	struct mei_device *dev;
+
+	if (!cl || !cl->dev)
+		return;
+
+	dev = cl->dev;
+
+	if (!cl->notify_en)
+		return;
+
+	cl_dbg(dev, cl, "notify event");
+	cl->notify_ev = true;
+	wake_up_interruptible_all(&cl->ev_wait);
+
+	if (cl->ev_async)
+		kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
+
+	mei_cl_bus_notify_event(cl);
+}
+
+/**
+ * mei_cl_notify_get - get or wait for notification event
+ *
+ * @cl: host client
+ * @block: this request is blocking
+ * @notify_ev: true if notification event was received
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * Return: 0 on such and error otherwise.
+ */
+int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
+{
+	struct mei_device *dev;
+	int rets;
+
+	*notify_ev = false;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	dev = cl->dev;
+
+	if (!mei_cl_is_connected(cl))
+		return -ENODEV;
+
+	if (cl->notify_ev)
+		goto out;
+
+	if (!block)
+		return -EAGAIN;
+
+	mutex_unlock(&dev->device_lock);
+	rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
+	mutex_lock(&dev->device_lock);
+
+	if (rets < 0)
+		return rets;
+
+out:
+	*notify_ev = cl->notify_ev;
+	cl->notify_ev = false;
+	return 0;
+}
+
+/**
  * mei_cl_read_start - the start read client message function.
  *
  * @cl: host client
@@ -1356,6 +1597,7 @@
 	struct mei_device *dev;
 	struct mei_msg_data *buf;
 	struct mei_msg_hdr mei_hdr;
+	int size;
 	int rets;
 
 
@@ -1367,10 +1609,10 @@
 
 	dev = cl->dev;
 
-
 	buf = &cb->buf;
+	size = buf->size;
 
-	cl_dbg(dev, cl, "size=%d\n", buf->size);
+	cl_dbg(dev, cl, "size=%d\n", size);
 
 	rets = pm_runtime_get(dev->dev);
 	if (rets < 0 && rets != -EINPROGRESS) {
@@ -1394,21 +1636,21 @@
 
 	if (rets == 0) {
 		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
-		rets = buf->size;
+		rets = size;
 		goto out;
 	}
 	if (!mei_hbuf_acquire(dev)) {
 		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
-		rets = buf->size;
+		rets = size;
 		goto out;
 	}
 
 	/* Check for a maximum length */
-	if (buf->size > mei_hbuf_max_len(dev)) {
+	if (size > mei_hbuf_max_len(dev)) {
 		mei_hdr.length = mei_hbuf_max_len(dev);
 		mei_hdr.msg_complete = 0;
 	} else {
-		mei_hdr.length = buf->size;
+		mei_hdr.length = size;
 		mei_hdr.msg_complete = 1;
 	}
 
@@ -1430,6 +1672,7 @@
 	else
 		list_add_tail(&cb->list, &dev->write_list.list);
 
+	cb = NULL;
 	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
 
 		mutex_unlock(&dev->device_lock);
@@ -1444,7 +1687,7 @@
 		}
 	}
 
-	rets = buf->size;
+	rets = size;
 err:
 	cl_dbg(dev, cl, "rpm: autosuspend\n");
 	pm_runtime_mark_last_busy(dev->dev);
@@ -1486,6 +1729,8 @@
 
 	case MEI_FOP_CONNECT:
 	case MEI_FOP_DISCONNECT:
+	case MEI_FOP_NOTIFY_STOP:
+	case MEI_FOP_NOTIFY_START:
 		if (waitqueue_active(&cl->wait))
 			wake_up(&cl->wait);
 
@@ -1528,6 +1773,12 @@
 			cl_dbg(dev, cl, "Waking up writing client!\n");
 			wake_up_interruptible(&cl->tx_wait);
 		}
+
+		/* synchronized under device mutex */
+		if (waitqueue_active(&cl->ev_wait)) {
+			cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
+			wake_up_interruptible(&cl->ev_wait);
+		}
 	}
 }
 
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 8d7f057..1c7cad0 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -219,6 +219,14 @@
 
 void mei_host_client_init(struct work_struct *work);
 
+u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop);
+enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request);
+int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request);
+int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
+		      struct mei_cl_cb *cmpl_list);
+int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
+void mei_cl_notify(struct mei_cl *cl);
+
 void mei_cl_all_disconnect(struct mei_device *dev);
 void mei_cl_all_wakeup(struct mei_device *dev);
 void mei_cl_all_write_clear(struct mei_device *dev);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index eb86834..4b469cf 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -154,6 +154,12 @@
 		pos += scnprintf(buf + pos, bufsz - pos, "hbm features:\n");
 		pos += scnprintf(buf + pos, bufsz - pos, "\tPG: %01d\n",
 				 dev->hbm_f_pg_supported);
+		pos += scnprintf(buf + pos, bufsz - pos, "\tDC: %01d\n",
+				 dev->hbm_f_dc_supported);
+		pos += scnprintf(buf + pos, bufsz - pos, "\tDOT: %01d\n",
+				 dev->hbm_f_dot_supported);
+		pos += scnprintf(buf + pos, bufsz - pos, "\tEV: %01d\n",
+				 dev->hbm_f_ev_supported);
 	}
 
 	pos += scnprintf(buf + pos, bufsz - pos, "pg:  %s, %s\n",
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index a4f2831..8eec887 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -52,6 +52,7 @@
 	MEI_CL_CS(ALREADY_STARTED);
 	MEI_CL_CS(OUT_OF_RESOURCES);
 	MEI_CL_CS(MESSAGE_SMALL);
+	MEI_CL_CS(NOT_ALLOWED);
 	default: return "unknown";
 	}
 #undef MEI_CL_CCS
@@ -89,6 +90,7 @@
 	case MEI_CL_CONN_ALREADY_STARTED:  return -EBUSY;
 	case MEI_CL_CONN_OUT_OF_RESOURCES: return -EBUSY;
 	case MEI_CL_CONN_MESSAGE_SMALL:    return -EINVAL;
+	case MEI_CL_CONN_NOT_ALLOWED:      return -EBUSY;
 	default:                           return -EINVAL;
 	}
 }
@@ -299,6 +301,7 @@
 	enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
 	memset(enum_req, 0, len);
 	enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+	enum_req->allow_add = dev->hbm_f_dc_supported;
 
 	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
 	if (ret) {
@@ -344,6 +347,180 @@
 }
 
 /**
+ * mei_hbm_add_cl_resp - send response to fw on client add request
+ *
+ * @dev: the device structure
+ * @addr: me address
+ * @status: response status
+ *
+ * Return: 0 on success and < 0 on failure
+ */
+static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
+{
+	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+	struct hbm_add_client_response *resp;
+	const size_t len = sizeof(struct hbm_add_client_response);
+	int ret;
+
+	dev_dbg(dev->dev, "adding client response\n");
+
+	resp = (struct hbm_add_client_response *)dev->wr_msg.data;
+
+	mei_hbm_hdr(mei_hdr, len);
+	memset(resp, 0, sizeof(struct hbm_add_client_response));
+
+	resp->hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
+	resp->me_addr = addr;
+	resp->status  = status;
+
+	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+	if (ret)
+		dev_err(dev->dev, "add client response write failed: ret = %d\n",
+			ret);
+	return ret;
+}
+
+/**
+ * mei_hbm_fw_add_cl_req - request from the fw to add a client
+ *
+ * @dev: the device structure
+ * @req: add client request
+ *
+ * Return: 0 on success and < 0 on failure
+ */
+static int mei_hbm_fw_add_cl_req(struct mei_device *dev,
+			      struct hbm_add_client_request *req)
+{
+	int ret;
+	u8 status = MEI_HBMS_SUCCESS;
+
+	BUILD_BUG_ON(sizeof(struct hbm_add_client_request) !=
+			sizeof(struct hbm_props_response));
+
+	ret = mei_hbm_me_cl_add(dev, (struct hbm_props_response *)req);
+	if (ret)
+		status = !MEI_HBMS_SUCCESS;
+
+	return mei_hbm_add_cl_resp(dev, req->me_addr, status);
+}
+
+/**
+ * mei_hbm_cl_notify_req - send notification request
+ *
+ * @dev: the device structure
+ * @cl: a client to disconnect from
+ * @start: true for start false for stop
+ *
+ * Return: 0 on success and -EIO on write failure
+ */
+int mei_hbm_cl_notify_req(struct mei_device *dev,
+			  struct mei_cl *cl, u8 start)
+{
+
+	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+	struct hbm_notification_request *req;
+	const size_t len = sizeof(struct hbm_notification_request);
+	int ret;
+
+	mei_hbm_hdr(mei_hdr, len);
+	mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, dev->wr_msg.data, len);
+
+	req = (struct hbm_notification_request *)dev->wr_msg.data;
+	req->start = start;
+
+	ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+	if (ret)
+		dev_err(dev->dev, "notify request failed: ret = %d\n", ret);
+
+	return ret;
+}
+
+/**
+ *  notify_res_to_fop - convert notification response to the proper
+ *      notification FOP
+ *
+ * @cmd: client notification start response command
+ *
+ * Return:  MEI_FOP_NOTIFY_START or MEI_FOP_NOTIFY_STOP;
+ */
+static inline enum mei_cb_file_ops notify_res_to_fop(struct mei_hbm_cl_cmd *cmd)
+{
+	struct hbm_notification_response *rs =
+		(struct hbm_notification_response *)cmd;
+
+	return mei_cl_notify_req2fop(rs->start);
+}
+
+/**
+ * mei_hbm_cl_notify_start_res - update the client state according
+ *       notify start response
+ *
+ * @dev: the device structure
+ * @cl: mei host client
+ * @cmd: client notification start response command
+ */
+static void mei_hbm_cl_notify_start_res(struct mei_device *dev,
+					struct mei_cl *cl,
+					struct mei_hbm_cl_cmd *cmd)
+{
+	struct hbm_notification_response *rs =
+		(struct hbm_notification_response *)cmd;
+
+	cl_dbg(dev, cl, "hbm: notify start response status=%d\n", rs->status);
+
+	if (rs->status == MEI_HBMS_SUCCESS ||
+	    rs->status == MEI_HBMS_ALREADY_STARTED) {
+		cl->notify_en = true;
+		cl->status = 0;
+	} else {
+		cl->status = -EINVAL;
+	}
+}
+
+/**
+ * mei_hbm_cl_notify_stop_res - update the client state according
+ *       notify stop response
+ *
+ * @dev: the device structure
+ * @cl: mei host client
+ * @cmd: client notification stop response command
+ */
+static void mei_hbm_cl_notify_stop_res(struct mei_device *dev,
+				       struct mei_cl *cl,
+				       struct mei_hbm_cl_cmd *cmd)
+{
+	struct hbm_notification_response *rs =
+		(struct hbm_notification_response *)cmd;
+
+	cl_dbg(dev, cl, "hbm: notify stop response status=%d\n", rs->status);
+
+	if (rs->status == MEI_HBMS_SUCCESS ||
+	    rs->status == MEI_HBMS_NOT_STARTED) {
+		cl->notify_en = false;
+		cl->status = 0;
+	} else {
+		/* TODO: spec is not clear yet about other possible issues */
+		cl->status = -EINVAL;
+	}
+}
+
+/**
+ * mei_hbm_cl_notify - signal notification event
+ *
+ * @dev: the device structure
+ * @cmd: notification client message
+ */
+static void mei_hbm_cl_notify(struct mei_device *dev,
+			      struct mei_hbm_cl_cmd *cmd)
+{
+	struct mei_cl *cl;
+
+	cl = mei_hbm_cl_find_by_cmd(dev, cmd);
+	if (cl)
+		mei_cl_notify(cl);
+}
+
+/**
  * mei_hbm_prop_req - request property for a single client
  *
  * @dev: the device structure
@@ -610,8 +787,11 @@
 
 	if (rs->status == MEI_CL_CONN_SUCCESS)
 		cl->state = MEI_FILE_CONNECTED;
-	else
+	else {
 		cl->state = MEI_FILE_DISCONNECT_REPLY;
+		if (rs->status == MEI_CL_CONN_NOT_FOUND)
+			mei_me_cl_del(dev, cl->me_cl);
+	}
 	cl->status = mei_cl_conn_status_to_errno(rs->status);
 }
 
@@ -654,6 +834,12 @@
 	case MEI_FOP_DISCONNECT:
 		mei_hbm_cl_disconnect_res(dev, cl, rs);
 		break;
+	case MEI_FOP_NOTIFY_START:
+		mei_hbm_cl_notify_start_res(dev, cl, rs);
+		break;
+	case MEI_FOP_NOTIFY_STOP:
+		mei_hbm_cl_notify_stop_res(dev, cl, rs);
+		break;
 	default:
 		return;
 	}
@@ -694,6 +880,79 @@
 }
 
 /**
+ * mei_hbm_pg_enter_res - PG enter response received
+ *
+ * @dev: the device structure.
+ *
+ * Return: 0 on success, -EPROTO on state mismatch
+ */
+static int mei_hbm_pg_enter_res(struct mei_device *dev)
+{
+	if (mei_pg_state(dev) != MEI_PG_OFF ||
+	    dev->pg_event != MEI_PG_EVENT_WAIT) {
+		dev_err(dev->dev, "hbm: pg entry response: state mismatch [%s, %d]\n",
+			mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
+		return -EPROTO;
+	}
+
+	dev->pg_event = MEI_PG_EVENT_RECEIVED;
+	wake_up(&dev->wait_pg);
+
+	return 0;
+}
+
+/**
+ * mei_hbm_pg_resume - process with PG resume
+ *
+ * @dev: the device structure.
+ */
+void mei_hbm_pg_resume(struct mei_device *dev)
+{
+	pm_request_resume(dev->dev);
+}
+EXPORT_SYMBOL_GPL(mei_hbm_pg_resume);
+
+/**
+ * mei_hbm_pg_exit_res - PG exit response received
+ *
+ * @dev: the device structure.
+ *
+ * Return: 0 on success, -EPROTO on state mismatch
+ */
+static int mei_hbm_pg_exit_res(struct mei_device *dev)
+{
+	if (mei_pg_state(dev) != MEI_PG_ON ||
+	    (dev->pg_event != MEI_PG_EVENT_WAIT &&
+	     dev->pg_event != MEI_PG_EVENT_IDLE)) {
+		dev_err(dev->dev, "hbm: pg exit response: state mismatch [%s, %d]\n",
+			mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
+		return -EPROTO;
+	}
+
+	switch (dev->pg_event) {
+	case MEI_PG_EVENT_WAIT:
+		dev->pg_event = MEI_PG_EVENT_RECEIVED;
+		wake_up(&dev->wait_pg);
+		break;
+	case MEI_PG_EVENT_IDLE:
+		/*
+		* If the driver is not waiting on this then
+		* this is HW initiated exit from PG.
+		* Start runtime pm resume sequence to exit from PG.
+		*/
+		dev->pg_event = MEI_PG_EVENT_RECEIVED;
+		mei_hbm_pg_resume(dev);
+		break;
+	default:
+		WARN(1, "hbm: pg exit response: unexpected pg event = %d\n",
+		     dev->pg_event);
+		return -EPROTO;
+	}
+
+	return 0;
+}
+
+/**
  * mei_hbm_config_features - check what hbm features and commands
  *        are supported by the fw
  *
@@ -709,6 +968,17 @@
 	if (dev->version.major_version == HBM_MAJOR_VERSION_PGI &&
 	    dev->version.minor_version >= HBM_MINOR_VERSION_PGI)
 		dev->hbm_f_pg_supported = 1;
+
+	if (dev->version.major_version >= HBM_MAJOR_VERSION_DC)
+		dev->hbm_f_dc_supported = 1;
+
+	/* disconnect on connect timeout instead of link reset */
+	if (dev->version.major_version >= HBM_MAJOR_VERSION_DOT)
+		dev->hbm_f_dot_supported = 1;
+
+	/* Notification Event Support */
+	if (dev->version.major_version >= HBM_MAJOR_VERSION_EV)
+		dev->hbm_f_ev_supported = 1;
 }
 
 /**
@@ -740,6 +1010,8 @@
 	struct hbm_host_version_response *version_res;
 	struct hbm_props_response *props_res;
 	struct hbm_host_enum_response *enum_res;
+	struct hbm_add_client_request *add_cl_req;
+	int ret;
 
 	struct mei_hbm_cl_cmd *cl_cmd;
 	struct hbm_client_connect_request *disconnect_req;
@@ -828,24 +1100,17 @@
 		break;
 
 	case MEI_PG_ISOLATION_ENTRY_RES_CMD:
-		dev_dbg(dev->dev, "power gate isolation entry response received\n");
-		dev->pg_event = MEI_PG_EVENT_RECEIVED;
-		if (waitqueue_active(&dev->wait_pg))
-			wake_up(&dev->wait_pg);
+		dev_dbg(dev->dev, "hbm: power gate isolation entry response received\n");
+		ret = mei_hbm_pg_enter_res(dev);
+		if (ret)
+			return ret;
 		break;
 
 	case MEI_PG_ISOLATION_EXIT_REQ_CMD:
-		dev_dbg(dev->dev, "power gate isolation exit request received\n");
-		dev->pg_event = MEI_PG_EVENT_RECEIVED;
-		if (waitqueue_active(&dev->wait_pg))
-			wake_up(&dev->wait_pg);
-		else
-			/*
-			* If the driver is not waiting on this then
-			* this is HW initiated exit from PG.
-			* Start runtime pm resume sequence to exit from PG.
-			*/
-			pm_request_resume(dev->dev);
+		dev_dbg(dev->dev, "hbm: power gate isolation exit request received\n");
+		ret = mei_hbm_pg_exit_res(dev);
+		if (ret)
+			return ret;
 		break;
 
 	case HOST_CLIENT_PROPERTIES_RES_CMD:
@@ -937,6 +1202,39 @@
 			return -EIO;
 		}
 		break;
+
+	case MEI_HBM_ADD_CLIENT_REQ_CMD:
+		dev_dbg(dev->dev, "hbm: add client request received\n");
+		/*
+		 * after the host receives the enum_resp
+		 * message clients may be added or removed
+		 */
+		if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS &&
+		    dev->hbm_state >= MEI_HBM_STOPPED) {
+			dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
+				dev->dev_state, dev->hbm_state);
+			return -EPROTO;
+		}
+		add_cl_req = (struct hbm_add_client_request *)mei_msg;
+		ret = mei_hbm_fw_add_cl_req(dev, add_cl_req);
+		if (ret) {
+			dev_err(dev->dev, "hbm: add client: failed to send response %d\n",
+				ret);
+			return -EIO;
+		}
+		dev_dbg(dev->dev, "hbm: add client request processed\n");
+		break;
+
+	case MEI_HBM_NOTIFY_RES_CMD:
+		dev_dbg(dev->dev, "hbm: notify response received\n");
+		mei_hbm_cl_res(dev, cl_cmd, notify_res_to_fop(cl_cmd));
+		break;
+
+	case MEI_HBM_NOTIFICATION_CMD:
+		dev_dbg(dev->dev, "hbm: notification\n");
+		mei_hbm_cl_notify(dev, cl_cmd);
+		break;
+
 	default:
 		BUG();
 		break;
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index 2544db7..a2025a5 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -54,6 +54,9 @@
 int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
 bool mei_hbm_version_is_supported(struct mei_device *dev);
 int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd);
+void mei_hbm_pg_resume(struct mei_device *dev);
+int mei_hbm_cl_notify_req(struct mei_device *dev,
+			  struct mei_cl *cl, u8 request);
 
 #endif /* _MEI_HBM_H_ */
 
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 9eb7ed7..a8a68ac 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -117,12 +117,17 @@
 #define MEI_DEV_ID_WPT_LP     0x9CBA  /* Wildcat Point LP */
 #define MEI_DEV_ID_WPT_LP_2   0x9CBB  /* Wildcat Point LP 2 */
 
+#define MEI_DEV_ID_SPT        0x9D3A  /* Sunrise Point */
+#define MEI_DEV_ID_SPT_2      0x9D3B  /* Sunrise Point 2 */
+#define MEI_DEV_ID_SPT_H      0xA13A  /* Sunrise Point H */
+#define MEI_DEV_ID_SPT_H_2    0xA13B  /* Sunrise Point H 2 */
 /*
  * MEI HW Section
  */
 
 /* Host Firmware Status Registers in PCI Config Space */
 #define PCI_CFG_HFS_1         0x40
+#  define PCI_CFG_HFS_1_D0I3_MSK     0x80000000
 #define PCI_CFG_HFS_2         0x48
 #define PCI_CFG_HFS_3         0x60
 #define PCI_CFG_HFS_4         0x64
@@ -140,7 +145,8 @@
 #define ME_CSR_HA  0xC
 /* H_HGC_CSR - PGI register */
 #define H_HPG_CSR  0x10
-
+/* H_D0I3C - D0I3 Control  */
+#define H_D0I3C    0x800
 
 /* register bits of H_CSR (Host Control Status register) */
 /* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
@@ -159,7 +165,14 @@
 #define H_IS              0x00000002
 /* Host Interrupt Enable */
 #define H_IE              0x00000001
+/* Host D0I3 Interrupt Enable */
+#define H_D0I3C_IE        0x00000020
+/* Host D0I3 Interrupt Status */
+#define H_D0I3C_IS        0x00000040
 
+/* H_CSR masks */
+#define H_CSR_IE_MASK     (H_IE | H_D0I3C_IE)
+#define H_CSR_IS_MASK     (H_IS | H_D0I3C_IS)
 
 /* register bits of ME_CSR_HA (ME Control Status Host Access register) */
 /* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
@@ -183,8 +196,14 @@
 #define ME_IE_HRA         0x00000001
 
 
-/* register bits - H_HPG_CSR */
-#define H_HPG_CSR_PGIHEXR       0x00000001
-#define H_HPG_CSR_PGI           0x00000002
+/* H_HPG_CSR register bits */
+#define H_HPG_CSR_PGIHEXR 0x00000001
+#define H_HPG_CSR_PGI     0x00000002
+
+/* H_D0I3C register bits */
+#define H_D0I3C_CIP      0x00000001
+#define H_D0I3C_IR       0x00000002
+#define H_D0I3C_I3       0x00000004
+#define H_D0I3C_RR       0x00000008
 
 #endif /* _MEI_HW_MEI_REGS_H_ */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 43d7101..65511d3 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -134,11 +134,40 @@
  */
 static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
 {
-	reg &= ~H_IS;
+	reg &= ~H_CSR_IS_MASK;
 	mei_hcsr_write(dev, reg);
 }
 
 /**
+ * mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
+ *
+ * @dev: the device structure
+ *
+ * Return: H_D0I3C register value (u32)
+ */
+static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
+{
+	u32 reg;
+
+	reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
+	trace_mei_reg_read(dev->dev, "H_D0I3C", H_CSR, reg);
+
+	return reg;
+}
+
+/**
+ * mei_me_d0i3c_write - writes H_D0I3C register to device
+ *
+ * @dev: the device structure
+ * @reg: new register value
+ */
+static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
+{
+	trace_mei_reg_write(dev->dev, "H_D0I3C", H_CSR, reg);
+	mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
+}
+
+/**
  * mei_me_fw_status - read fw status register from pci config space
  *
  * @dev: mei device
@@ -176,12 +205,25 @@
  */
 static void mei_me_hw_config(struct mei_device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev->dev);
 	struct mei_me_hw *hw = to_me_hw(dev);
-	u32 hcsr = mei_hcsr_read(dev);
+	u32 hcsr, reg;
+
 	/* Doesn't change in runtime */
+	hcsr = mei_hcsr_read(dev);
 	dev->hbuf_depth = (hcsr & H_CBD) >> 24;
 
+	reg = 0;
+	pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
+	hw->d0i3_supported =
+		((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
+
 	hw->pg_state = MEI_PG_OFF;
+	if (hw->d0i3_supported) {
+		reg = mei_me_d0i3c_read(dev);
+		if (reg & H_D0I3C_I3)
+			hw->pg_state = MEI_PG_ON;
+	}
 }
 
 /**
@@ -208,7 +250,7 @@
 {
 	u32 hcsr = mei_hcsr_read(dev);
 
-	if ((hcsr & H_IS) == H_IS)
+	if (hcsr & H_CSR_IS_MASK)
 		mei_hcsr_write(dev, hcsr);
 }
 /**
@@ -220,7 +262,7 @@
 {
 	u32 hcsr = mei_hcsr_read(dev);
 
-	hcsr |= H_IE;
+	hcsr |= H_CSR_IE_MASK;
 	mei_hcsr_set(dev, hcsr);
 }
 
@@ -233,7 +275,7 @@
 {
 	u32 hcsr = mei_hcsr_read(dev);
 
-	hcsr  &= ~H_IE;
+	hcsr  &= ~H_CSR_IE_MASK;
 	mei_hcsr_set(dev, hcsr);
 }
 
@@ -253,57 +295,6 @@
 	/* complete this write before we set host ready on another CPU */
 	mmiowb();
 }
-/**
- * mei_me_hw_reset - resets fw via mei csr register.
- *
- * @dev: the device structure
- * @intr_enable: if interrupt should be enabled after reset.
- *
- * Return: always 0
- */
-static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
-{
-	u32 hcsr = mei_hcsr_read(dev);
-
-	/* H_RST may be found lit before reset is started,
-	 * for example if preceding reset flow hasn't completed.
-	 * In that case asserting H_RST will be ignored, therefore
-	 * we need to clean H_RST bit to start a successful reset sequence.
-	 */
-	if ((hcsr & H_RST) == H_RST) {
-		dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
-		hcsr &= ~H_RST;
-		mei_hcsr_set(dev, hcsr);
-		hcsr = mei_hcsr_read(dev);
-	}
-
-	hcsr |= H_RST | H_IG | H_IS;
-
-	if (intr_enable)
-		hcsr |= H_IE;
-	else
-		hcsr &= ~H_IE;
-
-	dev->recvd_hw_ready = false;
-	mei_hcsr_write(dev, hcsr);
-
-	/*
-	 * Host reads the H_CSR once to ensure that the
-	 * posted write to H_CSR completes.
-	 */
-	hcsr = mei_hcsr_read(dev);
-
-	if ((hcsr & H_RST) == 0)
-		dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
-
-	if ((hcsr & H_RDY) == H_RDY)
-		dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
-
-	if (intr_enable == false)
-		mei_me_hw_reset_release(dev);
-
-	return 0;
-}
 
 /**
  * mei_me_host_set_ready - enable device
@@ -314,7 +305,7 @@
 {
 	u32 hcsr = mei_hcsr_read(dev);
 
-	hcsr |= H_IE | H_IG | H_RDY;
+	hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
 	mei_hcsr_set(dev, hcsr);
 }
 
@@ -601,13 +592,13 @@
 }
 
 /**
- * mei_me_pg_enter_sync - perform pg entry procedure
+ * mei_me_pg_legacy_enter_sync - perform legacy pg entry procedure
  *
  * @dev: the device structure
  *
  * Return: 0 on success an error code otherwise
  */
-int mei_me_pg_enter_sync(struct mei_device *dev)
+static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
 {
 	struct mei_me_hw *hw = to_me_hw(dev);
 	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
@@ -638,13 +629,13 @@
 }
 
 /**
- * mei_me_pg_exit_sync - perform pg exit procedure
+ * mei_me_pg_legacy_exit_sync - perform legacy pg exit procedure
  *
  * @dev: the device structure
  *
  * Return: 0 on success an error code otherwise
  */
-int mei_me_pg_exit_sync(struct mei_device *dev)
+static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
 {
 	struct mei_me_hw *hw = to_me_hw(dev);
 	unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
@@ -712,8 +703,12 @@
  */
 static bool mei_me_pg_is_enabled(struct mei_device *dev)
 {
+	struct mei_me_hw *hw = to_me_hw(dev);
 	u32 reg = mei_me_mecsr_read(dev);
 
+	if (hw->d0i3_supported)
+		return true;
+
 	if ((reg & ME_PGIC_HRA) == 0)
 		goto notsupported;
 
@@ -723,7 +718,8 @@
 	return true;
 
 notsupported:
-	dev_dbg(dev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
+	dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
+		hw->d0i3_supported,
 		!!(reg & ME_PGIC_HRA),
 		dev->version.major_version,
 		dev->version.minor_version,
@@ -734,11 +730,211 @@
 }
 
 /**
- * mei_me_pg_intr - perform pg processing in interrupt thread handler
+ * mei_me_d0i3_set - write d0i3 register bit on mei device.
+ *
+ * @dev: the device structure
+ * @intr: ask for interrupt
+ *
+ * Return: D0I3C register value
+ */
+static u32 mei_me_d0i3_set(struct mei_device *dev, bool intr)
+{
+	u32 reg = mei_me_d0i3c_read(dev);
+
+	reg |= H_D0I3C_I3;
+	if (intr)
+		reg |= H_D0I3C_IR;
+	else
+		reg &= ~H_D0I3C_IR;
+	mei_me_d0i3c_write(dev, reg);
+	/* read it to ensure HW consistency */
+	reg = mei_me_d0i3c_read(dev);
+	return reg;
+}
+
+/**
+ * mei_me_d0i3_unset - clean d0i3 register bit on mei device.
+ *
+ * @dev: the device structure
+ *
+ * Return: D0I3C register value
+ */
+static u32 mei_me_d0i3_unset(struct mei_device *dev)
+{
+	u32 reg = mei_me_d0i3c_read(dev);
+
+	reg &= ~H_D0I3C_I3;
+	reg |= H_D0I3C_IR;
+	mei_me_d0i3c_write(dev, reg);
+	/* read it to ensure HW consistency */
+	reg = mei_me_d0i3c_read(dev);
+	return reg;
+}
+
+/**
+ * mei_me_d0i3_enter_sync - perform d0i3 entry procedure
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_me_d0i3_enter_sync(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
+	unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
+	int ret;
+	u32 reg;
+
+	reg = mei_me_d0i3c_read(dev);
+	if (reg & H_D0I3C_I3) {
+		/* we are in d0i3, nothing to do */
+		dev_dbg(dev->dev, "d0i3 set not needed\n");
+		ret = 0;
+		goto on;
+	}
+
+	/* PGI entry procedure */
+	dev->pg_event = MEI_PG_EVENT_WAIT;
+
+	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
+	if (ret)
+		/* FIXME: should we reset here? */
+		goto out;
+
+	mutex_unlock(&dev->device_lock);
+	wait_event_timeout(dev->wait_pg,
+		dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
+	mutex_lock(&dev->device_lock);
+
+	if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
+		ret = -ETIME;
+		goto out;
+	}
+	/* end PGI entry procedure */
+
+	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
+
+	reg = mei_me_d0i3_set(dev, true);
+	if (!(reg & H_D0I3C_CIP)) {
+		dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
+		ret = 0;
+		goto on;
+	}
+
+	mutex_unlock(&dev->device_lock);
+	wait_event_timeout(dev->wait_pg,
+		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
+	mutex_lock(&dev->device_lock);
+
+	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
+		reg = mei_me_d0i3c_read(dev);
+		if (!(reg & H_D0I3C_I3)) {
+			ret = -ETIME;
+			goto out;
+		}
+	}
+
+	ret = 0;
+on:
+	hw->pg_state = MEI_PG_ON;
+out:
+	dev->pg_event = MEI_PG_EVENT_IDLE;
+	dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
+	return ret;
+}
+
+/**
+ * mei_me_d0i3_enter - perform d0i3 entry procedure
+ *   no hbm PG handshake
+ *   no waiting for confirmation; runs with interrupts
+ *   disabled
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_me_d0i3_enter(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	u32 reg;
+
+	reg = mei_me_d0i3c_read(dev);
+	if (reg & H_D0I3C_I3) {
+		/* we are in d0i3, nothing to do */
+		dev_dbg(dev->dev, "already d0i3 : set not needed\n");
+		goto on;
+	}
+
+	mei_me_d0i3_set(dev, false);
+on:
+	hw->pg_state = MEI_PG_ON;
+	dev->pg_event = MEI_PG_EVENT_IDLE;
+	dev_dbg(dev->dev, "d0i3 enter\n");
+	return 0;
+}
+
+/**
+ * mei_me_d0i3_exit_sync - perform d0i3 exit procedure
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_me_d0i3_exit_sync(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
+	int ret;
+	u32 reg;
+
+	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
+
+	reg = mei_me_d0i3c_read(dev);
+	if (!(reg & H_D0I3C_I3)) {
+		/* we are not in d0i3, nothing to do */
+		dev_dbg(dev->dev, "d0i3 exit not needed\n");
+		ret = 0;
+		goto off;
+	}
+
+	reg = mei_me_d0i3_unset(dev);
+	if (!(reg & H_D0I3C_CIP)) {
+		dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
+		ret = 0;
+		goto off;
+	}
+
+	mutex_unlock(&dev->device_lock);
+	wait_event_timeout(dev->wait_pg,
+		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+	mutex_lock(&dev->device_lock);
+
+	if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
+		reg = mei_me_d0i3c_read(dev);
+		if (reg & H_D0I3C_I3) {
+			ret = -ETIME;
+			goto out;
+		}
+	}
+
+	ret = 0;
+off:
+	hw->pg_state = MEI_PG_OFF;
+out:
+	dev->pg_event = MEI_PG_EVENT_IDLE;
+
+	dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
+	return ret;
+}
+
+/**
+ * mei_me_pg_legacy_intr - perform legacy pg processing
+ *			   in interrupt thread handler
  *
  * @dev: the device structure
  */
-static void mei_me_pg_intr(struct mei_device *dev)
+static void mei_me_pg_legacy_intr(struct mei_device *dev)
 {
 	struct mei_me_hw *hw = to_me_hw(dev);
 
@@ -752,6 +948,162 @@
 }
 
 /**
+ * mei_me_d0i3_intr - perform d0i3 processing in interrupt thread handler
+ *
+ * @dev: the device structure
+ */
+static void mei_me_d0i3_intr(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+
+	if (dev->pg_event == MEI_PG_EVENT_INTR_WAIT &&
+	    (hw->intr_source & H_D0I3C_IS)) {
+		dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
+		if (hw->pg_state == MEI_PG_ON) {
+			hw->pg_state = MEI_PG_OFF;
+			if (dev->hbm_state != MEI_HBM_IDLE) {
+				/*
+				 * force H_RDY because it could be
+				 * wiped off during PG
+				 */
+				dev_dbg(dev->dev, "d0i3 set host ready\n");
+				mei_me_host_set_ready(dev);
+			}
+		} else {
+			hw->pg_state = MEI_PG_ON;
+		}
+
+		wake_up(&dev->wait_pg);
+	}
+
+	if (hw->pg_state == MEI_PG_ON && (hw->intr_source & H_IS)) {
+		/*
+		 * HW sent some data and we are in D0i3, so
+		 * we got here because of HW initiated exit from D0i3.
+		 * Start runtime pm resume sequence to exit low power state.
+		 */
+		dev_dbg(dev->dev, "d0i3 want resume\n");
+		mei_hbm_pg_resume(dev);
+	}
+}
+
+/**
+ * mei_me_pg_intr - perform pg processing in interrupt thread handler
+ *
+ * @dev: the device structure
+ */
+static void mei_me_pg_intr(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+
+	if (hw->d0i3_supported)
+		mei_me_d0i3_intr(dev);
+	else
+		mei_me_pg_legacy_intr(dev);
+}
+
+/**
+ * mei_me_pg_enter_sync - perform runtime pm entry procedure
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+int mei_me_pg_enter_sync(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+
+	if (hw->d0i3_supported)
+		return mei_me_d0i3_enter_sync(dev);
+	else
+		return mei_me_pg_legacy_enter_sync(dev);
+}
+
+/**
+ * mei_me_pg_exit_sync - perform runtime pm exit procedure
+ *
+ * @dev: the device structure
+ *
+ * Return: 0 on success an error code otherwise
+ */
+int mei_me_pg_exit_sync(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+
+	if (hw->d0i3_supported)
+		return mei_me_d0i3_exit_sync(dev);
+	else
+		return mei_me_pg_legacy_exit_sync(dev);
+}
+
+/**
+ * mei_me_hw_reset - resets fw via mei csr register.
+ *
+ * @dev: the device structure
+ * @intr_enable: if interrupt should be enabled after reset.
+ *
+ * Return: 0 on success an error code otherwise
+ */
+static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	int ret;
+	u32 hcsr;
+
+	if (intr_enable) {
+		mei_me_intr_enable(dev);
+		if (hw->d0i3_supported) {
+			ret = mei_me_d0i3_exit_sync(dev);
+			if (ret)
+				return ret;
+		}
+	}
+
+	hcsr = mei_hcsr_read(dev);
+	/* H_RST may be found lit before reset is started,
+	 * for example if preceding reset flow hasn't completed.
+	 * In that case asserting H_RST will be ignored, therefore
+	 * we need to clean H_RST bit to start a successful reset sequence.
+	 */
+	if ((hcsr & H_RST) == H_RST) {
+		dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
+		hcsr &= ~H_RST;
+		mei_hcsr_set(dev, hcsr);
+		hcsr = mei_hcsr_read(dev);
+	}
+
+	hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
+
+	if (!intr_enable)
+		hcsr &= ~H_CSR_IE_MASK;
+
+	dev->recvd_hw_ready = false;
+	mei_hcsr_write(dev, hcsr);
+
+	/*
+	 * Host reads the H_CSR once to ensure that the
+	 * posted write to H_CSR completes.
+	 */
+	hcsr = mei_hcsr_read(dev);
+
+	if ((hcsr & H_RST) == 0)
+		dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
+
+	if ((hcsr & H_RDY) == H_RDY)
+		dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
+
+	if (!intr_enable) {
+		mei_me_hw_reset_release(dev);
+		if (hw->d0i3_supported) {
+			ret = mei_me_d0i3_enter(dev);
+			if (ret)
+				return ret;
+		}
+	}
+	return 0;
+}
+
+/**
  * mei_me_irq_quick_handler - The ISR of the MEI device
  *
  * @irq: The irq number
@@ -759,16 +1111,20 @@
  *
  * Return: irqreturn_t
  */
-
 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
 {
-	struct mei_device *dev = (struct mei_device *) dev_id;
-	u32 hcsr = mei_hcsr_read(dev);
+	struct mei_device *dev = (struct mei_device *)dev_id;
+	struct mei_me_hw *hw = to_me_hw(dev);
+	u32 hcsr;
 
-	if ((hcsr & H_IS) != H_IS)
+	hcsr = mei_hcsr_read(dev);
+	if (!(hcsr & H_CSR_IS_MASK))
 		return IRQ_NONE;
 
-	/* clear H_IS bit in H_CSR */
+	hw->intr_source = hcsr & H_CSR_IS_MASK;
+	dev_dbg(dev->dev, "interrupt source 0x%08X.\n", hw->intr_source);
+
+	/* clear H_IS and H_D0I3C_IS bits in H_CSR to clear the interrupts */
 	mei_hcsr_write(dev, hcsr);
 
 	return IRQ_WAKE_THREAD;
@@ -796,11 +1152,6 @@
 	mutex_lock(&dev->device_lock);
 	mei_io_list_init(&complete_list);
 
-	/* Ack the interrupt here
-	 * In case of MSI we don't go through the quick handler */
-	if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
-		mei_clear_interrupts(dev);
-
 	/* check if ME wants a reset */
 	if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
 		dev_warn(dev->dev, "FW not ready: resetting.\n");
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index 6022d52..2ee14dc 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -50,13 +50,17 @@
  * struct mei_me_hw - me hw specific data
  *
  * @cfg: per device generation config and ops
- * @mem_addr:  io memory address
- * @pg_state:      power gating state
+ * @mem_addr: io memory address
+ * @intr_source: interrupt source
+ * @pg_state: power gating state
+ * @d0i3_supported: di03 support
  */
 struct mei_me_hw {
 	const struct mei_cfg *cfg;
 	void __iomem *mem_addr;
+	u32 intr_source;
 	enum mei_pg_state pg_state;
+	bool d0i3_supported;
 };
 
 #define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 16fef6d..4cebde8 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -31,14 +31,15 @@
 #define MEI_IAMTHIF_STALL_TIMER    12  /* HPS */
 #define MEI_IAMTHIF_READ_TIMER     10  /* HPS */
 
-#define MEI_PGI_TIMEOUT            1  /* PG Isolation time response 1 sec */
-#define MEI_HBM_TIMEOUT            1   /* 1 second */
+#define MEI_PGI_TIMEOUT             1  /* PG Isolation time response 1 sec */
+#define MEI_D0I3_TIMEOUT            5  /* D0i3 set/unset max response time */
+#define MEI_HBM_TIMEOUT             1  /* 1 second */
 
 /*
  * MEI Version
  */
-#define HBM_MINOR_VERSION                   1
-#define HBM_MAJOR_VERSION                   1
+#define HBM_MINOR_VERSION                   0
+#define HBM_MAJOR_VERSION                   2
 
 /*
  * MEI version with PGI support
@@ -46,6 +47,24 @@
 #define HBM_MINOR_VERSION_PGI               1
 #define HBM_MAJOR_VERSION_PGI               1
 
+/*
+ * MEI version with Dynamic clients support
+ */
+#define HBM_MINOR_VERSION_DC               0
+#define HBM_MAJOR_VERSION_DC               2
+
+/*
+ * MEI version with disconnect on connection timeout support
+ */
+#define HBM_MINOR_VERSION_DOT              0
+#define HBM_MAJOR_VERSION_DOT              2
+
+/*
+ * MEI version with notifcation support
+ */
+#define HBM_MINOR_VERSION_EV               0
+#define HBM_MAJOR_VERSION_EV               2
+
 /* Host bus message command opcode */
 #define MEI_HBM_CMD_OP_MSK                  0x7f
 /* Host bus message command RESPONSE */
@@ -81,6 +100,13 @@
 #define MEI_PG_ISOLATION_EXIT_REQ_CMD       0x0b
 #define MEI_PG_ISOLATION_EXIT_RES_CMD       0x8b
 
+#define MEI_HBM_ADD_CLIENT_REQ_CMD          0x0f
+#define MEI_HBM_ADD_CLIENT_RES_CMD          0x8f
+
+#define MEI_HBM_NOTIFY_REQ_CMD              0x10
+#define MEI_HBM_NOTIFY_RES_CMD              0x90
+#define MEI_HBM_NOTIFICATION_CMD            0x11
+
 /*
  * MEI Stop Reason
  * used by hbm_host_stop_request.reason
@@ -136,6 +162,7 @@
 	MEI_CL_CONN_ALREADY_STARTED  = MEI_HBMS_ALREADY_EXISTS,
 	MEI_CL_CONN_OUT_OF_RESOURCES = MEI_HBMS_REJECTED,
 	MEI_CL_CONN_MESSAGE_SMALL    = MEI_HBMS_INVALID_PARAMETER,
+	MEI_CL_CONN_NOT_ALLOWED      = MEI_HBMS_NOT_ALLOWED,
 };
 
 /*
@@ -213,9 +240,17 @@
 	u8 reserved[2];
 } __packed;
 
+/**
+ * struct hbm_host_enum_request -  enumeration request from host to fw
+ *
+ * @hbm_cmd: bus message command header
+ * @allow_add: allow dynamic clients add HBM version >= 2.0
+ * @reserved: reserved
+ */
 struct hbm_host_enum_request {
 	u8 hbm_cmd;
-	u8 reserved[3];
+	u8 allow_add;
+	u8 reserved[2];
 } __packed;
 
 struct hbm_host_enum_response {
@@ -248,6 +283,38 @@
 } __packed;
 
 /**
+ * struct hbm_add_client_request - request to add a client
+ *     might be sent by fw after enumeration has already completed
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr: address of the client in ME
+ * @reserved: reserved
+ * @client_properties: client properties
+ */
+struct hbm_add_client_request {
+	u8 hbm_cmd;
+	u8 me_addr;
+	u8 reserved[2];
+	struct mei_client_properties client_properties;
+} __packed;
+
+/**
+ * struct hbm_add_client_response - response to add a client
+ *     sent by the host to report client addition status to fw
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr: address of the client in ME
+ * @status: if HBMS_SUCCESS then the client can now accept connections.
+ * @reserved: reserved
+ */
+struct hbm_add_client_response {
+	u8 hbm_cmd;
+	u8 me_addr;
+	u8 status;
+	u8 reserved[1];
+} __packed;
+
+/**
  * struct hbm_power_gate - power gate request/response
  *
  * @hbm_cmd: bus message command header
@@ -298,5 +365,62 @@
 	u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
 } __packed;
 
+#define MEI_HBM_NOTIFICATION_START 1
+#define MEI_HBM_NOTIFICATION_STOP  0
+/**
+ * struct hbm_notification_request - start/stop notification request
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr: address of the client in ME
+ * @host_addr: address of the client in the driver
+ * @start:  start = 1 or stop = 0 asynchronous notifications
+ */
+struct hbm_notification_request {
+	u8 hbm_cmd;
+	u8 me_addr;
+	u8 host_addr;
+	u8 start;
+} __packed;
+
+/**
+ * struct hbm_notification_response - start/stop notification response
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr: address of the client in ME
+ * @host_addr: - address of the client in the driver
+ * @status: (mei_hbm_status) response status for the request
+ *  - MEI_HBMS_SUCCESS: successful stop/start
+ *  - MEI_HBMS_CLIENT_NOT_FOUND: if the connection could not be found.
+ *  - MEI_HBMS_ALREADY_STARTED: for start requests for a previously
+ *                         started notification.
+ *  - MEI_HBMS_NOT_STARTED: for stop request for a connected client for whom
+ *                         asynchronous notifications are currently disabled.
+ *
+ * @start:  start = 1 or stop = 0 asynchronous notifications
+ * @reserved: reserved
+ */
+struct hbm_notification_response {
+	u8 hbm_cmd;
+	u8 me_addr;
+	u8 host_addr;
+	u8 status;
+	u8 start;
+	u8 reserved[3];
+} __packed;
+
+/**
+ * struct hbm_notification - notification event
+ *
+ * @hbm_cmd: bus message command header
+ * @me_addr:  address of the client in ME
+ * @host_addr:  address of the client in the driver
+ * @reserved: reserved for alignment
+ */
+struct hbm_notification {
+	u8 hbm_cmd;
+	u8 me_addr;
+	u8 host_addr;
+	u8 reserved[1];
+} __packed;
 
 #endif
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 00c3865..e374661 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -331,7 +331,7 @@
 
 	mei_cancel_work(dev);
 
-	mei_nfc_host_exit(dev);
+	mei_cl_bus_remove_devices(dev);
 
 	mutex_lock(&dev->device_lock);
 
@@ -390,6 +390,7 @@
 	INIT_LIST_HEAD(&dev->me_clients);
 	mutex_init(&dev->device_lock);
 	init_rwsem(&dev->me_clients_rwsem);
+	mutex_init(&dev->cl_bus_lock);
 	init_waitqueue_head(&dev->wait_hw_ready);
 	init_waitqueue_head(&dev->wait_pg);
 	init_waitqueue_head(&dev->wait_hbm_start);
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 3f34052..c418d78 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -403,6 +403,13 @@
 			if (ret)
 				return ret;
 			break;
+
+		case MEI_FOP_NOTIFY_START:
+		case MEI_FOP_NOTIFY_STOP:
+			ret = mei_cl_irq_notify(cl, cb, cmpl_list);
+			if (ret)
+				return ret;
+			break;
 		default:
 			BUG();
 		}
@@ -424,6 +431,24 @@
 EXPORT_SYMBOL_GPL(mei_irq_write_handler);
 
 
+/**
+ * mei_connect_timeout  - connect/disconnect timeouts
+ *
+ * @cl: host client
+ */
+static void mei_connect_timeout(struct mei_cl *cl)
+{
+	struct mei_device *dev = cl->dev;
+
+	if (cl->state == MEI_FILE_CONNECTING) {
+		if (dev->hbm_f_dot_supported) {
+			cl->state = MEI_FILE_DISCONNECT_REQUIRED;
+			wake_up(&cl->wait);
+			return;
+		}
+	}
+	mei_reset(dev);
+}
 
 /**
  * mei_timer - timer function.
@@ -464,7 +489,7 @@
 		if (cl->timer_count) {
 			if (--cl->timer_count == 0) {
 				dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
-				mei_reset(dev);
+				mei_connect_timeout(cl);
 				goto out;
 			}
 		}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index e9513d6..b2f2486 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -446,6 +446,45 @@
 }
 
 /**
+ * mei_ioctl_client_notify_request -
+ *     propagate event notification request to client
+ *
+ * @file: pointer to file structure
+ * @request: 0 - disable, 1 - enable
+ *
+ * Return: 0 on success , <0 on error
+ */
+static int mei_ioctl_client_notify_request(struct file *file, u32 request)
+{
+	struct mei_cl *cl = file->private_data;
+
+	return mei_cl_notify_request(cl, file, request);
+}
+
+/**
+ * mei_ioctl_client_notify_get -  wait for notification request
+ *
+ * @file: pointer to file structure
+ * @notify_get: 0 - disable, 1 - enable
+ *
+ * Return: 0 on success , <0 on error
+ */
+static int mei_ioctl_client_notify_get(struct file *file, u32 *notify_get)
+{
+	struct mei_cl *cl = file->private_data;
+	bool notify_ev;
+	bool block = (file->f_flags & O_NONBLOCK) == 0;
+	int rets;
+
+	rets = mei_cl_notify_get(cl, block, &notify_ev);
+	if (rets)
+		return rets;
+
+	*notify_get = notify_ev ? 1 : 0;
+	return 0;
+}
+
+/**
  * mei_ioctl - the IOCTL function
  *
  * @file: pointer to file structure
@@ -459,6 +498,7 @@
 	struct mei_device *dev;
 	struct mei_cl *cl = file->private_data;
 	struct mei_connect_client_data connect_data;
+	u32 notify_get, notify_req;
 	int rets;
 
 
@@ -499,6 +539,33 @@
 
 		break;
 
+	case IOCTL_MEI_NOTIFY_SET:
+		dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n");
+		if (copy_from_user(&notify_req,
+				   (char __user *)data, sizeof(notify_req))) {
+			dev_dbg(dev->dev, "failed to copy data from userland\n");
+			rets = -EFAULT;
+			goto out;
+		}
+		rets = mei_ioctl_client_notify_request(file, notify_req);
+		break;
+
+	case IOCTL_MEI_NOTIFY_GET:
+		dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n");
+		rets = mei_ioctl_client_notify_get(file, &notify_get);
+		if (rets)
+			goto out;
+
+		dev_dbg(dev->dev, "copy connect data to user\n");
+		if (copy_to_user((char __user *)data,
+				&notify_get, sizeof(notify_get))) {
+			dev_dbg(dev->dev, "failed to copy data to userland\n");
+			rets = -EFAULT;
+			goto out;
+
+		}
+		break;
+
 	default:
 		dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd);
 		rets = -ENOIOCTLCMD;
@@ -541,6 +608,7 @@
 	struct mei_cl *cl = file->private_data;
 	struct mei_device *dev;
 	unsigned int mask = 0;
+	bool notify_en;
 
 	if (WARN_ON(!cl || !cl->dev))
 		return POLLERR;
@@ -549,6 +617,7 @@
 
 	mutex_lock(&dev->device_lock);
 
+	notify_en = cl->notify_en && (req_events & POLLPRI);
 
 	if (dev->dev_state != MEI_DEV_ENABLED ||
 	    !mei_cl_is_connected(cl)) {
@@ -561,6 +630,12 @@
 		goto out;
 	}
 
+	if (notify_en) {
+		poll_wait(file, &cl->ev_wait, wait);
+		if (cl->notify_ev)
+			mask |= POLLPRI;
+	}
+
 	if (req_events & (POLLIN | POLLRDNORM)) {
 		poll_wait(file, &cl->rx_wait, wait);
 
@@ -576,6 +651,26 @@
 }
 
 /**
+ * mei_fasync - asynchronous io support
+ *
+ * @fd: file descriptor
+ * @file: pointer to file structure
+ * @band: band bitmap
+ *
+ * Return: poll mask
+ */
+static int mei_fasync(int fd, struct file *file, int band)
+{
+
+	struct mei_cl *cl = file->private_data;
+
+	if (!mei_cl_is_connected(cl))
+		return POLLERR;
+
+	return fasync_helper(fd, file, band, &cl->ev_async);
+}
+
+/**
  * fw_status_show - mei device attribute show method
  *
  * @device: device pointer
@@ -627,6 +722,7 @@
 	.release = mei_release,
 	.write = mei_write,
 	.poll = mei_poll,
+	.fasync = mei_fasync,
 	.llseek = no_llseek
 };
 
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 453f6a3..e25ee16 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -89,6 +89,7 @@
 	MEI_FILE_CONNECTED,
 	MEI_FILE_DISCONNECTING,
 	MEI_FILE_DISCONNECT_REPLY,
+	MEI_FILE_DISCONNECT_REQUIRED,
 	MEI_FILE_DISCONNECTED,
 };
 
@@ -135,6 +136,8 @@
  * @MEI_FOP_CONNECT:    connect
  * @MEI_FOP_DISCONNECT: disconnect
  * @MEI_FOP_DISCONNECT_RSP: disconnect response
+ * @MEI_FOP_NOTIFY_START:   start notification
+ * @MEI_FOP_NOTIFY_STOP:    stop notification
  */
 enum mei_cb_file_ops {
 	MEI_FOP_READ = 0,
@@ -142,6 +145,8 @@
 	MEI_FOP_CONNECT,
 	MEI_FOP_DISCONNECT,
 	MEI_FOP_DISCONNECT_RSP,
+	MEI_FOP_NOTIFY_START,
+	MEI_FOP_NOTIFY_STOP,
 };
 
 /*
@@ -178,7 +183,7 @@
  * @client_id: me client id
  * @mei_flow_ctrl_creds: flow control credits
  * @connect_count: number connections to this client
- * @reserved: reserved
+ * @bus_added: added to bus
  */
 struct mei_me_client {
 	struct list_head list;
@@ -187,7 +192,7 @@
 	u8 client_id;
 	u8 mei_flow_ctrl_creds;
 	u8 connect_count;
-	u8 reserved;
+	u8 bus_added;
 };
 
 
@@ -230,18 +235,21 @@
  * @tx_wait: wait queue for tx completion
  * @rx_wait: wait queue for rx completion
  * @wait:  wait queue for management operation
+ * @ev_wait: notification wait queue
+ * @ev_async: event async notification
  * @status: connection status
  * @me_cl: fw client connected
  * @host_client_id: host id
  * @mei_flow_ctrl_creds: transmit flow credentials
  * @timer_count:  watchdog timer for operation completion
  * @reserved: reserved for alignment
+ * @notify_en: notification - enabled/disabled
+ * @notify_ev: pending notification event
  * @writing_state: state of the tx
  * @rd_pending: pending read credits
  * @rd_completed: completed read
  *
- * @device: device on the mei client bus
- * @device_link:  link to bus clients
+ * @cldev: device on the mei client bus
  */
 struct mei_cl {
 	struct list_head link;
@@ -250,19 +258,21 @@
 	wait_queue_head_t tx_wait;
 	wait_queue_head_t rx_wait;
 	wait_queue_head_t wait;
+	wait_queue_head_t ev_wait;
+	struct fasync_struct *ev_async;
 	int status;
 	struct mei_me_client *me_cl;
 	u8 host_client_id;
 	u8 mei_flow_ctrl_creds;
 	u8 timer_count;
 	u8 reserved;
+	u8 notify_en;
+	u8 notify_ev;
 	enum mei_file_transaction_states writing_state;
 	struct list_head rd_pending;
 	struct list_head rd_completed;
 
-	/* MEI CL bus data */
-	struct mei_cl_device *device;
-	struct list_head device_link;
+	struct mei_cl_device *cldev;
 };
 
 /** struct mei_hw_ops
@@ -329,21 +339,16 @@
 };
 
 /* MEI bus API*/
-
-struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
-					struct mei_me_client *me_cl,
-					struct mei_cl *cl,
-					char *name);
-void mei_cl_remove_device(struct mei_cl_device *device);
-
+void mei_cl_bus_rescan(struct mei_device *bus);
+void mei_cl_dev_fixup(struct mei_cl_device *dev);
 ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
 			bool blocking);
 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length);
 void mei_cl_bus_rx_event(struct mei_cl *cl);
-void mei_cl_bus_remove_devices(struct mei_device *dev);
+void mei_cl_bus_notify_event(struct mei_cl *cl);
+void mei_cl_bus_remove_devices(struct mei_device *bus);
 int mei_cl_bus_init(void);
 void mei_cl_bus_exit(void);
-struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev, uuid_le uuid);
 
 /**
  * enum mei_pg_event - power gating transition events
@@ -416,7 +421,10 @@
  * @wr_msg      : the buffer for hbm control messages
  *
  * @version     : HBM protocol version in use
- * @hbm_f_pg_supported : hbm feature pgi protocol
+ * @hbm_f_pg_supported  : hbm feature pgi protocol
+ * @hbm_f_dc_supported  : hbm feature dynamic clients
+ * @hbm_f_dot_supported : hbm feature disconnect on timeout
+ * @hbm_f_ev_supported  : hbm feature event notification
  *
  * @me_clients_rwsem: rw lock over me_clients list
  * @me_clients  : list of FW clients
@@ -447,6 +455,7 @@
  * @reset_work  : work item for the device reset
  *
  * @device_list : mei client bus list
+ * @cl_bus_lock : client bus list lock
  *
  * @dbgfs_dir   : debugfs mei root directory
  *
@@ -509,6 +518,9 @@
 
 	struct hbm_version version;
 	unsigned int hbm_f_pg_supported:1;
+	unsigned int hbm_f_dc_supported:1;
+	unsigned int hbm_f_dot_supported:1;
+	unsigned int hbm_f_ev_supported:1;
 
 	struct rw_semaphore me_clients_rwsem;
 	struct list_head me_clients;
@@ -543,6 +555,7 @@
 
 	/* List of bus devices */
 	struct list_head device_list;
+	struct mutex cl_bus_lock;
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
 	struct dentry *dbgfs_dir;
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
deleted file mode 100644
index 290ef30..0000000
--- a/drivers/misc/mei/nfc.c
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-
-#include <linux/mei_cl_bus.h>
-
-#include "mei_dev.h"
-#include "client.h"
-
-struct mei_nfc_cmd {
-	u8 command;
-	u8 status;
-	u16 req_id;
-	u32 reserved;
-	u16 data_size;
-	u8 sub_command;
-	u8 data[];
-} __packed;
-
-struct mei_nfc_reply {
-	u8 command;
-	u8 status;
-	u16 req_id;
-	u32 reserved;
-	u16 data_size;
-	u8 sub_command;
-	u8 reply_status;
-	u8 data[];
-} __packed;
-
-struct mei_nfc_if_version {
-	u8 radio_version_sw[3];
-	u8 reserved[3];
-	u8 radio_version_hw[3];
-	u8 i2c_addr;
-	u8 fw_ivn;
-	u8 vendor_id;
-	u8 radio_type;
-} __packed;
-
-struct mei_nfc_connect {
-	u8 fw_ivn;
-	u8 vendor_id;
-} __packed;
-
-struct mei_nfc_connect_resp {
-	u8 fw_ivn;
-	u8 vendor_id;
-	u16 me_major;
-	u16 me_minor;
-	u16 me_hotfix;
-	u16 me_build;
-} __packed;
-
-struct mei_nfc_hci_hdr {
-	u8 cmd;
-	u8 status;
-	u16 req_id;
-	u32 reserved;
-	u16 data_size;
-} __packed;
-
-#define MEI_NFC_CMD_MAINTENANCE 0x00
-#define MEI_NFC_CMD_HCI_SEND 0x01
-#define MEI_NFC_CMD_HCI_RECV 0x02
-
-#define MEI_NFC_SUBCMD_CONNECT    0x00
-#define MEI_NFC_SUBCMD_IF_VERSION 0x01
-
-#define MEI_NFC_HEADER_SIZE 10
-
-/**
- * struct mei_nfc_dev - NFC mei device
- *
- * @me_cl: NFC me client
- * @cl: NFC host client
- * @cl_info: NFC info host client
- * @init_work: perform connection to the info client
- * @fw_ivn: NFC Interface Version Number
- * @vendor_id: NFC manufacturer ID
- * @radio_type: NFC radio type
- * @bus_name: bus name
- *
- */
-struct mei_nfc_dev {
-	struct mei_me_client *me_cl;
-	struct mei_cl *cl;
-	struct mei_cl *cl_info;
-	struct work_struct init_work;
-	u8 fw_ivn;
-	u8 vendor_id;
-	u8 radio_type;
-	char *bus_name;
-};
-
-/* UUIDs for NFC F/W clients */
-const uuid_le mei_nfc_guid = UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50,
-				     0x94, 0xd4, 0x50, 0x26,
-				     0x67, 0x23, 0x77, 0x5c);
-
-static const uuid_le mei_nfc_info_guid = UUID_LE(0xd2de1625, 0x382d, 0x417d,
-					0x48, 0xa4, 0xef, 0xab,
-					0xba, 0x8a, 0x12, 0x06);
-
-/* Vendors */
-#define MEI_NFC_VENDOR_INSIDE 0x00
-#define MEI_NFC_VENDOR_NXP    0x01
-
-/* Radio types */
-#define MEI_NFC_VENDOR_INSIDE_UREAD 0x00
-#define MEI_NFC_VENDOR_NXP_PN544    0x01
-
-static void mei_nfc_free(struct mei_nfc_dev *ndev)
-{
-	if (!ndev)
-		return;
-
-	if (ndev->cl) {
-		list_del(&ndev->cl->device_link);
-		mei_cl_unlink(ndev->cl);
-		kfree(ndev->cl);
-	}
-
-	if (ndev->cl_info) {
-		list_del(&ndev->cl_info->device_link);
-		mei_cl_unlink(ndev->cl_info);
-		kfree(ndev->cl_info);
-	}
-
-	mei_me_cl_put(ndev->me_cl);
-	kfree(ndev);
-}
-
-static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
-{
-	struct mei_device *dev;
-
-	if (!ndev->cl)
-		return -ENODEV;
-
-	dev = ndev->cl->dev;
-
-	switch (ndev->vendor_id) {
-	case MEI_NFC_VENDOR_INSIDE:
-		switch (ndev->radio_type) {
-		case MEI_NFC_VENDOR_INSIDE_UREAD:
-			ndev->bus_name = "microread";
-			return 0;
-
-		default:
-			dev_err(dev->dev, "Unknown radio type 0x%x\n",
-				ndev->radio_type);
-
-			return -EINVAL;
-		}
-
-	case MEI_NFC_VENDOR_NXP:
-		switch (ndev->radio_type) {
-		case MEI_NFC_VENDOR_NXP_PN544:
-			ndev->bus_name = "pn544";
-			return 0;
-		default:
-			dev_err(dev->dev, "Unknown radio type 0x%x\n",
-				ndev->radio_type);
-
-			return -EINVAL;
-		}
-
-	default:
-		dev_err(dev->dev, "Unknown vendor ID 0x%x\n",
-			ndev->vendor_id);
-
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int mei_nfc_if_version(struct mei_nfc_dev *ndev)
-{
-	struct mei_device *dev;
-	struct mei_cl *cl;
-
-	struct mei_nfc_cmd cmd;
-	struct mei_nfc_reply *reply = NULL;
-	struct mei_nfc_if_version *version;
-	size_t if_version_length;
-	int bytes_recv, ret;
-
-	cl = ndev->cl_info;
-	dev = cl->dev;
-
-	memset(&cmd, 0, sizeof(struct mei_nfc_cmd));
-	cmd.command = MEI_NFC_CMD_MAINTENANCE;
-	cmd.data_size = 1;
-	cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION;
-
-	ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(struct mei_nfc_cmd), 1);
-	if (ret < 0) {
-		dev_err(dev->dev, "Could not send IF version cmd\n");
-		return ret;
-	}
-
-	/* to be sure on the stack we alloc memory */
-	if_version_length = sizeof(struct mei_nfc_reply) +
-		sizeof(struct mei_nfc_if_version);
-
-	reply = kzalloc(if_version_length, GFP_KERNEL);
-	if (!reply)
-		return -ENOMEM;
-
-	bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
-	if (bytes_recv < 0 || bytes_recv < sizeof(struct mei_nfc_reply)) {
-		dev_err(dev->dev, "Could not read IF version\n");
-		ret = -EIO;
-		goto err;
-	}
-
-	version = (struct mei_nfc_if_version *)reply->data;
-
-	ndev->fw_ivn = version->fw_ivn;
-	ndev->vendor_id = version->vendor_id;
-	ndev->radio_type = version->radio_type;
-
-err:
-	kfree(reply);
-	return ret;
-}
-
-static void mei_nfc_init(struct work_struct *work)
-{
-	struct mei_device *dev;
-	struct mei_cl_device *cldev;
-	struct mei_nfc_dev *ndev;
-	struct mei_cl *cl_info;
-	struct mei_me_client *me_cl_info;
-
-	ndev = container_of(work, struct mei_nfc_dev, init_work);
-
-	cl_info = ndev->cl_info;
-	dev = cl_info->dev;
-
-	mutex_lock(&dev->device_lock);
-
-	/* check for valid client id */
-	me_cl_info = mei_me_cl_by_uuid(dev, &mei_nfc_info_guid);
-	if (!me_cl_info) {
-		mutex_unlock(&dev->device_lock);
-		dev_info(dev->dev, "nfc: failed to find the info client\n");
-		goto err;
-	}
-
-	if (mei_cl_connect(cl_info, me_cl_info, NULL) < 0) {
-		mei_me_cl_put(me_cl_info);
-		mutex_unlock(&dev->device_lock);
-		dev_err(dev->dev, "Could not connect to the NFC INFO ME client");
-
-		goto err;
-	}
-	mei_me_cl_put(me_cl_info);
-	mutex_unlock(&dev->device_lock);
-
-	if (mei_nfc_if_version(ndev) < 0) {
-		dev_err(dev->dev, "Could not get the NFC interface version");
-
-		goto err;
-	}
-
-	dev_info(dev->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
-		ndev->fw_ivn, ndev->vendor_id, ndev->radio_type);
-
-	mutex_lock(&dev->device_lock);
-
-	if (mei_cl_disconnect(cl_info) < 0) {
-		mutex_unlock(&dev->device_lock);
-		dev_err(dev->dev, "Could not disconnect the NFC INFO ME client");
-
-		goto err;
-	}
-
-	mutex_unlock(&dev->device_lock);
-
-	if (mei_nfc_build_bus_name(ndev) < 0) {
-		dev_err(dev->dev, "Could not build the bus ID name\n");
-		return;
-	}
-
-	cldev = mei_cl_add_device(dev, ndev->me_cl, ndev->cl,
-				  ndev->bus_name);
-	if (!cldev) {
-		dev_err(dev->dev, "Could not add the NFC device to the MEI bus\n");
-
-		goto err;
-	}
-
-	cldev->priv_data = ndev;
-
-
-	return;
-
-err:
-	mutex_lock(&dev->device_lock);
-	mei_nfc_free(ndev);
-	mutex_unlock(&dev->device_lock);
-
-}
-
-
-int mei_nfc_host_init(struct mei_device *dev, struct mei_me_client *me_cl)
-{
-	struct mei_nfc_dev *ndev;
-	struct mei_cl *cl_info, *cl;
-	int ret;
-
-
-	/* in case of internal reset bail out
-	 * as the device is already setup
-	 */
-	cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid);
-	if (cl)
-		return 0;
-
-	ndev = kzalloc(sizeof(struct mei_nfc_dev), GFP_KERNEL);
-	if (!ndev) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	ndev->me_cl = mei_me_cl_get(me_cl);
-	if (!ndev->me_cl) {
-		ret = -ENODEV;
-		goto err;
-	}
-
-	cl_info = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
-	if (IS_ERR(cl_info)) {
-		ret = PTR_ERR(cl_info);
-		goto err;
-	}
-
-	list_add_tail(&cl_info->device_link, &dev->device_list);
-
-	ndev->cl_info = cl_info;
-
-	cl = mei_cl_alloc_linked(dev, MEI_HOST_CLIENT_ID_ANY);
-	if (IS_ERR(cl)) {
-		ret = PTR_ERR(cl);
-		goto err;
-	}
-
-	list_add_tail(&cl->device_link, &dev->device_list);
-
-	ndev->cl = cl;
-
-	INIT_WORK(&ndev->init_work, mei_nfc_init);
-	schedule_work(&ndev->init_work);
-
-	return 0;
-
-err:
-	mei_nfc_free(ndev);
-
-	return ret;
-}
-
-void mei_nfc_host_exit(struct mei_device *dev)
-{
-	struct mei_nfc_dev *ndev;
-	struct mei_cl *cl;
-	struct mei_cl_device *cldev;
-
-	cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid);
-	if (!cl)
-		return;
-
-	cldev = cl->device;
-	if (!cldev)
-		return;
-
-	ndev = (struct mei_nfc_dev *)cldev->priv_data;
-	if (ndev)
-		cancel_work_sync(&ndev->init_work);
-
-	cldev->priv_data = NULL;
-
-	/* Need to remove the device here
-	 * since mei_nfc_free will unlink the clients
-	 */
-	mei_cl_remove_device(cldev);
-
-	mutex_lock(&dev->device_lock);
-	mei_nfc_free(ndev);
-	mutex_unlock(&dev->device_lock);
-}
-
-
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 23f71f5..27678d8 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -82,6 +82,11 @@
 	{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
 	{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
 
+	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT, mei_me_pch8_cfg)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_cfg)},
+	{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_cfg)},
+
 	/* required last entry */
 	{0, }
 };
@@ -128,6 +133,7 @@
 	const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
 	struct mei_device *dev;
 	struct mei_me_hw *hw;
+	unsigned int irqflags;
 	int err;
 
 
@@ -180,17 +186,12 @@
 	pci_enable_msi(pdev);
 
 	 /* request and enable interrupt */
-	if (pci_dev_msi_enabled(pdev))
-		err = request_threaded_irq(pdev->irq,
-			NULL,
-			mei_me_irq_thread_handler,
-			IRQF_ONESHOT, KBUILD_MODNAME, dev);
-	else
-		err = request_threaded_irq(pdev->irq,
+	irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
+
+	err = request_threaded_irq(pdev->irq,
 			mei_me_irq_quick_handler,
 			mei_me_irq_thread_handler,
-			IRQF_SHARED, KBUILD_MODNAME, dev);
-
+			irqflags, KBUILD_MODNAME, dev);
 	if (err) {
 		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
 		       pdev->irq);
@@ -319,6 +320,7 @@
 {
 	struct pci_dev *pdev = to_pci_dev(device);
 	struct mei_device *dev;
+	unsigned int irqflags;
 	int err;
 
 	dev = pci_get_drvdata(pdev);
@@ -327,17 +329,13 @@
 
 	pci_enable_msi(pdev);
 
+	irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
+
 	/* request and enable interrupt */
-	if (pci_dev_msi_enabled(pdev))
-		err = request_threaded_irq(pdev->irq,
-			NULL,
-			mei_me_irq_thread_handler,
-			IRQF_ONESHOT, KBUILD_MODNAME, dev);
-	else
-		err = request_threaded_irq(pdev->irq,
+	err = request_threaded_irq(pdev->irq,
 			mei_me_irq_quick_handler,
 			mei_me_irq_thread_handler,
-			IRQF_SHARED, KBUILD_MODNAME, dev);
+			irqflags, KBUILD_MODNAME, dev);
 
 	if (err) {
 		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
diff --git a/drivers/misc/qcom-coincell.c b/drivers/misc/qcom-coincell.c
new file mode 100644
index 0000000..7b4a2da
--- /dev/null
+++ b/drivers/misc/qcom-coincell.c
@@ -0,0 +1,152 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+struct qcom_coincell {
+	struct device	*dev;
+	struct regmap	*regmap;
+	u32		base_addr;
+};
+
+#define QCOM_COINCELL_REG_RSET		0x44
+#define QCOM_COINCELL_REG_VSET		0x45
+#define QCOM_COINCELL_REG_ENABLE	0x46
+
+#define QCOM_COINCELL_ENABLE		BIT(7)
+
+static const int qcom_rset_map[] = { 2100, 1700, 1200, 800 };
+static const int qcom_vset_map[] = { 2500, 3200, 3100, 3000 };
+/* NOTE: for pm8921 and others, voltage of 2500 is 16 (10000b), not 0 */
+
+/* if enable==0, rset and vset are ignored */
+static int qcom_coincell_chgr_config(struct qcom_coincell *chgr, int rset,
+				     int vset, bool enable)
+{
+	int i, j, rc;
+
+	/* if disabling, just do that and skip other operations */
+	if (!enable)
+		return regmap_write(chgr->regmap,
+			  chgr->base_addr + QCOM_COINCELL_REG_ENABLE, 0);
+
+	/* find index for current-limiting resistor */
+	for (i = 0; i < ARRAY_SIZE(qcom_rset_map); i++)
+		if (rset == qcom_rset_map[i])
+			break;
+
+	if (i >= ARRAY_SIZE(qcom_rset_map)) {
+		dev_err(chgr->dev, "invalid rset-ohms value %d\n", rset);
+		return -EINVAL;
+	}
+
+	/* find index for charge voltage */
+	for (j = 0; j < ARRAY_SIZE(qcom_vset_map); j++)
+		if (vset == qcom_vset_map[j])
+			break;
+
+	if (j >= ARRAY_SIZE(qcom_vset_map)) {
+		dev_err(chgr->dev, "invalid vset-millivolts value %d\n", vset);
+		return -EINVAL;
+	}
+
+	rc = regmap_write(chgr->regmap,
+			  chgr->base_addr + QCOM_COINCELL_REG_RSET, i);
+	if (rc) {
+		/*
+		 * This is mainly to flag a bad base_addr (reg) from dts.
+		 * Other failures writing to the registers should be
+		 * extremely rare, or indicative of problems that
+		 * should be reported elsewhere (eg. spmi failure).
+		 */
+		dev_err(chgr->dev, "could not write to RSET register\n");
+		return rc;
+	}
+
+	rc = regmap_write(chgr->regmap,
+		chgr->base_addr + QCOM_COINCELL_REG_VSET, j);
+	if (rc)
+		return rc;
+
+	/* set 'enable' register */
+	return regmap_write(chgr->regmap,
+			    chgr->base_addr + QCOM_COINCELL_REG_ENABLE,
+			    QCOM_COINCELL_ENABLE);
+}
+
+static int qcom_coincell_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct qcom_coincell chgr;
+	u32 rset, vset;
+	bool enable;
+	int rc;
+
+	chgr.dev = &pdev->dev;
+
+	chgr.regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!chgr.regmap) {
+		dev_err(chgr.dev, "Unable to get regmap\n");
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(node, "reg", &chgr.base_addr);
+	if (rc)
+		return rc;
+
+	enable = !of_property_read_bool(node, "qcom,charger-disable");
+
+	if (enable) {
+		rc = of_property_read_u32(node, "qcom,rset-ohms", &rset);
+		if (rc) {
+			dev_err(chgr.dev,
+				"can't find 'qcom,rset-ohms' in DT block");
+			return rc;
+		}
+
+		rc = of_property_read_u32(node, "qcom,vset-millivolts", &vset);
+		if (rc) {
+			dev_err(chgr.dev,
+			    "can't find 'qcom,vset-millivolts' in DT block");
+			return rc;
+		}
+	}
+
+	return qcom_coincell_chgr_config(&chgr, rset, vset, enable);
+}
+
+static const struct of_device_id qcom_coincell_match_table[] = {
+	{ .compatible = "qcom,pm8941-coincell", },
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, qcom_coincell_match_table);
+
+static struct platform_driver qcom_coincell_driver = {
+	.driver	= {
+		.name		= "qcom-spmi-coincell",
+		.of_match_table	= qcom_coincell_match_table,
+	},
+	.probe		= qcom_coincell_probe,
+};
+
+module_platform_driver(qcom_coincell_driver);
+
+MODULE_DESCRIPTION("Qualcomm PMIC coincell charger driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 5027b8f..71b6455 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -36,8 +36,6 @@
 #include <linux/skbuff.h>
 #include <linux/ti_wilink_st.h>
 #include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
 
 #define MAX_ST_DEVICES	3	/* Imagine 1 on each UART for now */
 static struct platform_device *st_kim_devices[MAX_ST_DEVICES];
@@ -45,9 +43,6 @@
 /**********************************************************************/
 /* internal functions */
 
-struct ti_st_plat_data	*dt_pdata;
-static struct ti_st_plat_data *get_platform_data(struct device *dev);
-
 /**
  * st_get_plat_device -
  *	function which returns the reference to the platform device
@@ -469,12 +464,7 @@
 	struct kim_data_s	*kim_gdata = (struct kim_data_s *)kim_data;
 
 	pr_info(" %s", __func__);
-	if (kim_gdata->kim_pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else {
-		pdata = kim_gdata->kim_pdev->dev.platform_data;
-	}
+	pdata = kim_gdata->kim_pdev->dev.platform_data;
 
 	do {
 		/* platform specific enabling code here */
@@ -482,9 +472,9 @@
 			pdata->chip_enable(kim_gdata);
 
 		/* Configure BT nShutdown to HIGH state */
-		gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+		gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
 		mdelay(5);	/* FIXME: a proper toggle */
-		gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+		gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH);
 		mdelay(100);
 		/* re-initialize the completion */
 		reinit_completion(&kim_gdata->ldisc_installed);
@@ -534,18 +524,12 @@
 {
 	long err = 0;
 	struct kim_data_s	*kim_gdata = (struct kim_data_s *)kim_data;
-	struct ti_st_plat_data	*pdata;
+	struct ti_st_plat_data	*pdata =
+		kim_gdata->kim_pdev->dev.platform_data;
 	struct tty_struct	*tty = kim_gdata->core_data->tty;
 
 	reinit_completion(&kim_gdata->ldisc_installed);
 
-	if (kim_gdata->kim_pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else
-		pdata = kim_gdata->kim_pdev->dev.platform_data;
-
-
 	if (tty) {	/* can be called before ldisc is installed */
 		/* Flush any pending characters in the driver and discipline. */
 		tty_ldisc_flush(tty);
@@ -566,11 +550,11 @@
 	}
 
 	/* By default configure BT nShutdown to LOW state */
-	gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+	gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
 	mdelay(1);
-	gpio_set_value(kim_gdata->nshutdown, GPIO_HIGH);
+	gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_HIGH);
 	mdelay(1);
-	gpio_set_value(kim_gdata->nshutdown, GPIO_LOW);
+	gpio_set_value_cansleep(kim_gdata->nshutdown, GPIO_LOW);
 
 	/* platform specific disable */
 	if (pdata->chip_disable)
@@ -737,52 +721,13 @@
  * board-*.c file
  */
 
-static const struct of_device_id kim_of_match[] = {
-{
-	.compatible = "kim",
-	},
-	{}
-};
-MODULE_DEVICE_TABLE(of, kim_of_match);
-
-static struct ti_st_plat_data *get_platform_data(struct device *dev)
-{
-	struct device_node *np = dev->of_node;
-	const u32 *dt_property;
-	int len;
-
-	dt_pdata = kzalloc(sizeof(*dt_pdata), GFP_KERNEL);
-	if (!dt_pdata)
-		return NULL;
-
-	dt_property = of_get_property(np, "dev_name", &len);
-	if (dt_property)
-		memcpy(&dt_pdata->dev_name, dt_property, len);
-	of_property_read_u32(np, "nshutdown_gpio",
-			     &dt_pdata->nshutdown_gpio);
-	of_property_read_u32(np, "flow_cntrl", &dt_pdata->flow_cntrl);
-	of_property_read_u32(np, "baud_rate", &dt_pdata->baud_rate);
-
-	return dt_pdata;
-}
-
 static struct dentry *kim_debugfs_dir;
 static int kim_probe(struct platform_device *pdev)
 {
 	struct kim_data_s	*kim_gdata;
-	struct ti_st_plat_data	*pdata;
+	struct ti_st_plat_data	*pdata = pdev->dev.platform_data;
 	int err;
 
-	if (pdev->dev.of_node)
-		pdata = get_platform_data(&pdev->dev);
-	else
-		pdata = pdev->dev.platform_data;
-
-	if (pdata == NULL) {
-		dev_err(&pdev->dev, "Platform Data is missing\n");
-		return -ENXIO;
-	}
-
 	if ((pdev->id != -1) && (pdev->id < MAX_ST_DEVICES)) {
 		/* multiple devices could exist */
 		st_kim_devices[pdev->id] = pdev;
@@ -863,16 +808,9 @@
 static int kim_remove(struct platform_device *pdev)
 {
 	/* free the GPIOs requested */
-	struct ti_st_plat_data	*pdata;
+	struct ti_st_plat_data	*pdata = pdev->dev.platform_data;
 	struct kim_data_s	*kim_gdata;
 
-	if (pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else {
-		pdata = pdev->dev.platform_data;
-	}
-
 	kim_gdata = platform_get_drvdata(pdev);
 
 	/* Free the Bluetooth/FM/GPIO
@@ -890,22 +828,12 @@
 
 	kfree(kim_gdata);
 	kim_gdata = NULL;
-	kfree(dt_pdata);
-	dt_pdata = NULL;
-
 	return 0;
 }
 
 static int kim_suspend(struct platform_device *pdev, pm_message_t state)
 {
-	struct ti_st_plat_data	*pdata;
-
-	if (pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else {
-		pdata = pdev->dev.platform_data;
-	}
+	struct ti_st_plat_data	*pdata = pdev->dev.platform_data;
 
 	if (pdata->suspend)
 		return pdata->suspend(pdev, state);
@@ -915,14 +843,7 @@
 
 static int kim_resume(struct platform_device *pdev)
 {
-	struct ti_st_plat_data	*pdata;
-
-	if (pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else {
-		pdata = pdev->dev.platform_data;
-	}
+	struct ti_st_plat_data	*pdata = pdev->dev.platform_data;
 
 	if (pdata->resume)
 		return pdata->resume(pdev);
@@ -939,8 +860,6 @@
 	.resume = kim_resume,
 	.driver = {
 		.name = "kim",
-		.owner = THIS_MODULE,
-		.of_match_table = of_match_ptr(kim_of_match),
 	},
 };
 
diff --git a/drivers/misc/ti-st/st_ll.c b/drivers/misc/ti-st/st_ll.c
index 518e1b7..93b4d67 100644
--- a/drivers/misc/ti-st/st_ll.c
+++ b/drivers/misc/ti-st/st_ll.c
@@ -26,7 +26,6 @@
 #include <linux/ti_wilink_st.h>
 
 /**********************************************************************/
-
 /* internal functions */
 static void send_ll_cmd(struct st_data_s *st_data,
 	unsigned char cmd)
@@ -54,13 +53,7 @@
 
 	/* communicate to platform about chip asleep */
 	kim_data = st_data->kim_data;
-	if (kim_data->kim_pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else {
-		pdata = kim_data->kim_pdev->dev.platform_data;
-	}
-
+	pdata = kim_data->kim_pdev->dev.platform_data;
 	if (pdata->chip_asleep)
 		pdata->chip_asleep(NULL);
 }
@@ -93,13 +86,7 @@
 
 	/* communicate to platform about chip wakeup */
 	kim_data = st_data->kim_data;
-	if (kim_data->kim_pdev->dev.of_node) {
-		pr_debug("use device tree data");
-		pdata = dt_pdata;
-	} else {
-		pdata = kim_data->kim_pdev->dev.platform_data;
-	}
-
+	pdata = kim_data->kim_pdev->dev.platform_data;
 	if (pdata->chip_awake)
 		pdata->chip_awake(NULL);
 }
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index b003356..87a1337 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -446,7 +446,6 @@
 static struct i2c_driver tsl2550_driver = {
 	.driver = {
 		.name	= TSL2550_DRV_NAME,
-		.owner	= THIS_MODULE,
 		.pm	= TSL2550_PM_OPS,
 	},
 	.probe	= tsl2550_probe,
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 1916174..ffb5634 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -46,7 +46,7 @@
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.2.1.3-k");
+MODULE_VERSION("1.3.0.0-k");
 MODULE_ALIAS("dmi:*:svnVMware*:*");
 MODULE_ALIAS("vmware_vmmemctl");
 MODULE_LICENSE("GPL");
@@ -110,9 +110,18 @@
  */
 #define VMW_BALLOON_HV_PORT		0x5670
 #define VMW_BALLOON_HV_MAGIC		0x456c6d6f
-#define VMW_BALLOON_PROTOCOL_VERSION	2
 #define VMW_BALLOON_GUEST_ID		1	/* Linux */
 
+enum vmwballoon_capabilities {
+	/*
+	 * Bit 0 is reserved and not associated to any capability.
+	 */
+	VMW_BALLOON_BASIC_CMDS		= (1 << 1),
+	VMW_BALLOON_BATCHED_CMDS	= (1 << 2)
+};
+
+#define VMW_BALLOON_CAPABILITIES	(VMW_BALLOON_BASIC_CMDS)
+
 #define VMW_BALLOON_CMD_START		0
 #define VMW_BALLOON_CMD_GET_TARGET	1
 #define VMW_BALLOON_CMD_LOCK		2
@@ -120,32 +129,36 @@
 #define VMW_BALLOON_CMD_GUEST_ID	4
 
 /* error codes */
-#define VMW_BALLOON_SUCCESS		0
-#define VMW_BALLOON_FAILURE		-1
-#define VMW_BALLOON_ERROR_CMD_INVALID	1
-#define VMW_BALLOON_ERROR_PPN_INVALID	2
-#define VMW_BALLOON_ERROR_PPN_LOCKED	3
-#define VMW_BALLOON_ERROR_PPN_UNLOCKED	4
-#define VMW_BALLOON_ERROR_PPN_PINNED	5
-#define VMW_BALLOON_ERROR_PPN_NOTNEEDED	6
-#define VMW_BALLOON_ERROR_RESET		7
-#define VMW_BALLOON_ERROR_BUSY		8
+#define VMW_BALLOON_SUCCESS		        0
+#define VMW_BALLOON_FAILURE		        -1
+#define VMW_BALLOON_ERROR_CMD_INVALID	        1
+#define VMW_BALLOON_ERROR_PPN_INVALID	        2
+#define VMW_BALLOON_ERROR_PPN_LOCKED	        3
+#define VMW_BALLOON_ERROR_PPN_UNLOCKED	        4
+#define VMW_BALLOON_ERROR_PPN_PINNED	        5
+#define VMW_BALLOON_ERROR_PPN_NOTNEEDED	        6
+#define VMW_BALLOON_ERROR_RESET		        7
+#define VMW_BALLOON_ERROR_BUSY		        8
 
-#define VMWARE_BALLOON_CMD(cmd, data, result)		\
-({							\
-	unsigned long __stat, __dummy1, __dummy2;	\
-	__asm__ __volatile__ ("inl %%dx" :		\
-		"=a"(__stat),				\
-		"=c"(__dummy1),				\
-		"=d"(__dummy2),				\
-		"=b"(result) :				\
-		"0"(VMW_BALLOON_HV_MAGIC),		\
-		"1"(VMW_BALLOON_CMD_##cmd),		\
-		"2"(VMW_BALLOON_HV_PORT),		\
-		"3"(data) :				\
-		"memory");				\
-	result &= -1UL;					\
-	__stat & -1UL;					\
+#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES	(0x03000000)
+
+#define VMWARE_BALLOON_CMD(cmd, data, result)			\
+({								\
+	unsigned long __status, __dummy1, __dummy2;		\
+	__asm__ __volatile__ ("inl %%dx" :			\
+		"=a"(__status),					\
+		"=c"(__dummy1),					\
+		"=d"(__dummy2),					\
+		"=b"(result) :					\
+		"0"(VMW_BALLOON_HV_MAGIC),			\
+		"1"(VMW_BALLOON_CMD_##cmd),			\
+		"2"(VMW_BALLOON_HV_PORT),			\
+		"3"(data) :					\
+		"memory");					\
+	if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START)	\
+		result = __dummy1;				\
+	result &= -1UL;						\
+	__status & -1UL;					\
 })
 
 #ifdef CONFIG_DEBUG_FS
@@ -223,11 +236,12 @@
  */
 static bool vmballoon_send_start(struct vmballoon *b)
 {
-	unsigned long status, dummy;
+	unsigned long status, capabilities;
 
 	STATS_INC(b->stats.start);
 
-	status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_PROTOCOL_VERSION, dummy);
+	status = VMWARE_BALLOON_CMD(START, VMW_BALLOON_CAPABILITIES,
+				capabilities);
 	if (status == VMW_BALLOON_SUCCESS)
 		return true;
 
@@ -402,55 +416,37 @@
 }
 
 /*
- * Allocate (or reserve) a page for the balloon and notify the host.  If host
- * refuses the page put it on "refuse" list and allocate another one until host
- * is satisfied. "Refused" pages are released at the end of inflation cycle
- * (when we allocate b->rate_alloc pages).
+ * Notify the host of a ballooned page. If host rejects the page put it on the
+ * refuse list, those refused page are then released at the end of the
+ * inflation cycle.
  */
-static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
+static int vmballoon_lock_page(struct vmballoon *b, struct page *page)
 {
-	struct page *page;
-	gfp_t flags;
-	unsigned int hv_status;
-	int locked;
-	flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
+	int locked, hv_status;
 
-	do {
-		if (!can_sleep)
-			STATS_INC(b->stats.alloc);
-		else
-			STATS_INC(b->stats.sleep_alloc);
+	locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
+	if (locked > 0) {
+		STATS_INC(b->stats.refused_alloc);
 
-		page = alloc_page(flags);
-		if (!page) {
-			if (!can_sleep)
-				STATS_INC(b->stats.alloc_fail);
-			else
-				STATS_INC(b->stats.sleep_alloc_fail);
-			return -ENOMEM;
+		if (hv_status == VMW_BALLOON_ERROR_RESET ||
+				hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
+			__free_page(page);
+			return -EIO;
 		}
 
-		/* inform monitor */
-		locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
-		if (locked > 0) {
-			STATS_INC(b->stats.refused_alloc);
-
-			if (hv_status == VMW_BALLOON_ERROR_RESET ||
-			    hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
-				__free_page(page);
-				return -EIO;
-			}
-
-			/*
-			 * Place page on the list of non-balloonable pages
-			 * and retry allocation, unless we already accumulated
-			 * too many of them, in which case take a breather.
-			 */
+		/*
+		 * Place page on the list of non-balloonable pages
+		 * and retry allocation, unless we already accumulated
+		 * too many of them, in which case take a breather.
+		 */
+		if (b->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
+			b->n_refused_pages++;
 			list_add(&page->lru, &b->refused_pages);
-			if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
-				return -EIO;
+		} else {
+			__free_page(page);
 		}
-	} while (locked != 0);
+		return -EIO;
+	}
 
 	/* track allocated page */
 	list_add(&page->lru, &b->pages);
@@ -512,7 +508,7 @@
 	unsigned int i;
 	unsigned int allocations = 0;
 	int error = 0;
-	bool alloc_can_sleep = false;
+	gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
 
 	pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
 
@@ -543,19 +539,16 @@
 		 __func__, goal, rate, b->rate_alloc);
 
 	for (i = 0; i < goal; i++) {
+		struct page *page;
 
-		error = vmballoon_reserve_page(b, alloc_can_sleep);
-		if (error) {
-			if (error != -ENOMEM) {
-				/*
-				 * Not a page allocation failure, stop this
-				 * cycle. Maybe we'll get new target from
-				 * the host soon.
-				 */
-				break;
-			}
+		if (flags == VMW_PAGE_ALLOC_NOSLEEP)
+			STATS_INC(b->stats.alloc);
+		else
+			STATS_INC(b->stats.sleep_alloc);
 
-			if (alloc_can_sleep) {
+		page = alloc_page(flags);
+		if (!page) {
+			if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
 				/*
 				 * CANSLEEP page allocation failed, so guest
 				 * is under severe memory pressure. Quickly
@@ -563,8 +556,10 @@
 				 */
 				b->rate_alloc = max(b->rate_alloc / 2,
 						    VMW_BALLOON_RATE_ALLOC_MIN);
+				STATS_INC(b->stats.sleep_alloc_fail);
 				break;
 			}
+			STATS_INC(b->stats.alloc_fail);
 
 			/*
 			 * NOSLEEP page allocation failed, so the guest is
@@ -579,11 +574,16 @@
 			if (i >= b->rate_alloc)
 				break;
 
-			alloc_can_sleep = true;
+			flags = VMW_PAGE_ALLOC_CANSLEEP;
 			/* Lower rate for sleeping allocations. */
 			rate = b->rate_alloc;
+			continue;
 		}
 
+		error = vmballoon_lock_page(b, page);
+		if (error)
+			break;
+
 		if (++allocations > VMW_BALLOON_YIELD_THRESHOLD) {
 			cond_resched();
 			allocations = 0;
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index a721b5d..9ec262a 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -1031,14 +1031,9 @@
 
 void __exit vmci_host_exit(void)
 {
-	int error;
-
 	vmci_host_device_initialized = false;
 
-	error = misc_deregister(&vmci_host_miscdev);
-	if (error)
-		pr_warn("Error unregistering character device: %d\n", error);
-
+	misc_deregister(&vmci_host_miscdev);
 	vmci_ctx_destroy(host_context);
 	vmci_qp_broker_exit();
 
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index b5a2b14..6f4323c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -165,7 +165,7 @@
 		return;
 
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
-	q->limits.max_discard_sectors = max_discard;
+	blk_queue_max_discard_sectors(q, max_discard);
 	if (card->erased_byte == 0 && !mmc_can_discard(card))
 		q->limits.discard_zeroes_data = 1;
 	q->limits.discard_granularity = card->pref_erase << 9;
@@ -467,7 +467,7 @@
 			sg_set_buf(__sg, buf + offset, len);
 			offset += len;
 			remain -= len;
-			(__sg++)->page_link &= ~0x02;
+			sg_unmark_end(__sg++);
 			sg_len++;
 		} while (remain);
 	}
@@ -475,7 +475,7 @@
 	list_for_each_entry(req, &packed->list, queuelist) {
 		sg_len += blk_rq_map_sg(mq->queue, req, __sg);
 		__sg = sg + (sg_len - 1);
-		(__sg++)->page_link &= ~0x02;
+		sg_unmark_end(__sg++);
 	}
 	sg_mark_end(sg + (sg_len - 1));
 	return sg_len;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index d313f948b..9cd3631 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -223,8 +223,6 @@
 	 */
 	if (data && data->type)
 		flash_name = data->type;
-	else if (!strcmp(spi->modalias, "spi-nor"))
-		flash_name = NULL; /* auto-detect */
 	else
 		flash_name = spi->modalias;
 
@@ -289,19 +287,25 @@
 	{"m25p40-nonjedec"},	{"m25p80-nonjedec"},	{"m25p16-nonjedec"},
 	{"m25p32-nonjedec"},	{"m25p64-nonjedec"},	{"m25p128-nonjedec"},
 
-	/*
-	 * Generic support for SPI NOR that can be identified by the JEDEC READ
-	 * ID opcode (0x9F). Use this, if possible.
-	 */
-	{"spi-nor"},
 	{ },
 };
 MODULE_DEVICE_TABLE(spi, m25p_ids);
 
+static const struct of_device_id m25p_of_table[] = {
+	/*
+	 * Generic compatibility for SPI NOR that can be identified by the
+	 * JEDEC READ ID opcode (0x9F). Use this, if possible.
+	 */
+	{ .compatible = "jedec,spi-nor" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, m25p_of_table);
+
 static struct spi_driver m25p80_driver = {
 	.driver = {
 		.name	= "m25p80",
 		.owner	= THIS_MODULE,
+		.of_match_table = m25p_of_table,
 	},
 	.id_table	= m25p_ids,
 	.probe	= m25p_probe,
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 0099aba..df6f611 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -102,6 +102,7 @@
 	{ .compatible = "atmel,dataflash", },
 	{ /* sentinel */ }
 };
+MODULE_DEVICE_TABLE(of, dataflash_dt_ids);
 #endif
 
 /* ......................................................................... */
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index eadcfff..a577ef8 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -385,20 +385,28 @@
 	}
 	rc = mtd_device_register(intel_mtd, nettel_intel_partitions,
 				 num_intel_partitions);
+	if (rc)
+		goto out_map_destroy;
 #endif
 
 	if (amd_mtd) {
 		rc = mtd_device_register(amd_mtd, nettel_amd_partitions,
 					 num_amd_partitions);
+		if (rc)
+			goto out_mtd_unreg;
 	}
 
 #ifdef CONFIG_MTD_CFI_INTELEXT
 	register_reboot_notifier(&nettel_notifier_block);
 #endif
 
-	return(rc);
+	return rc;
 
+out_mtd_unreg:
 #ifdef CONFIG_MTD_CFI_INTELEXT
+	mtd_device_unregister(intel_mtd);
+out_map_destroy:
+	map_destroy(intel_mtd);
 out_unmap1:
 	iounmap(nettel_intel_map.virt);
 #endif
@@ -407,8 +415,7 @@
 	iounmap(nettel_mmcrp);
 	iounmap(nettel_amd_map.virt);
 
-	return(rc);
-
+	return rc;
 }
 
 /****************************************************************************/
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 774b32f..3e614e9 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -130,6 +130,8 @@
 			count++;
 
 	res = kzalloc((count + 1)*sizeof(*res), GFP_KERNEL);
+	if (!res)
+		return NULL;
 	count = 0;
 	while (cplen > 0) {
 		res[count] = cp;
@@ -311,6 +313,10 @@
 
 	ppdata.of_node = dp;
 	part_probe_types = of_get_probes(dp);
+	if (!part_probe_types) {
+		err = -ENOMEM;
+		goto err_out;
+	}
 	mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata,
 			NULL, 0);
 	of_free_probes(part_probe_types);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 41acc50..44dc965 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -97,14 +97,13 @@
 	if (req->cmd_flags & REQ_DISCARD)
 		return tr->discard(dev, block, nsect);
 
-	switch(rq_data_dir(req)) {
-	case READ:
+	if (rq_data_dir(req) == READ) {
 		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
 			if (tr->readsect(dev, block, buf))
 				return -EIO;
 		rq_flush_dcache_pages(req);
 		return 0;
-	case WRITE:
+	} else {
 		if (!tr->writesect)
 			return -EIO;
 
@@ -113,9 +112,6 @@
 			if (tr->writesect(dev, block, buf))
 				return -EIO;
 		return 0;
-	default:
-		printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
-		return -EIO;
 	}
 }
 
@@ -423,7 +419,7 @@
 
 	if (tr->discard) {
 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
-		new->rq->limits.max_discard_sectors = UINT_MAX;
+		blk_queue_max_discard_sectors(new->rq, UINT_MAX);
 	}
 
 	gd->queue = new->rq;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 5b2806a..3324281 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -42,23 +42,20 @@
 	default n
 
 config MTD_NAND_DENALI
-        tristate "Support Denali NAND controller"
-        depends on HAS_DMA
-        help
-	  Enable support for the Denali NAND controller.  This should be
-	  combined with either the PCI or platform drivers to provide device
-	  registration.
+	tristate
 
 config MTD_NAND_DENALI_PCI
         tristate "Support Denali NAND controller on Intel Moorestown"
-	depends on PCI && MTD_NAND_DENALI
+	select MTD_NAND_DENALI
+	depends on HAS_DMA && PCI
         help
           Enable the driver for NAND flash on Intel Moorestown, using the
           Denali NAND controller core.
 
 config MTD_NAND_DENALI_DT
 	tristate "Support Denali NAND controller as a DT device"
-	depends on HAVE_CLK && MTD_NAND_DENALI
+	select MTD_NAND_DENALI
+	depends on HAS_DMA && HAVE_CLK
 	help
 	  Enable the driver for NAND flash on platforms using a Denali NAND
 	  controller as a DT device.
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.h b/drivers/mtd/nand/brcmnand/brcmnand.h
index a20c736..169f99e 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.h
+++ b/drivers/mtd/nand/brcmnand/brcmnand.h
@@ -50,7 +50,7 @@
 	 * Other architectures (e.g., ARM) either do not support big endian, or
 	 * else leave I/O in little endian mode.
 	 */
-	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
 		return __raw_readl(addr);
 	else
 		return readl_relaxed(addr);
@@ -59,7 +59,7 @@
 static inline void brcmnand_writel(u32 val, void __iomem *addr)
 {
 	/* See brcmnand_readl() comments */
-	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
 		__raw_writel(val, addr);
 	else
 		writel_relaxed(val, addr);
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index feb6d18..b9080130 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -520,6 +520,32 @@
 	},
 };
 
+/*
+ * An ECC layout for using 4-bit ECC with large-page (4096bytes) flash,
+ * storing ten ECC bytes plus the manufacturer's bad block marker byte,
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_4096 = {
+	.eccbytes = 80,
+	.eccpos = {
+		/* at the end of spare sector */
+		48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+		58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+		68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
+		78, 79, 80, 81, 82, 83, 84, 85, 86, 87,
+		88, 89, 90, 91, 92, 93, 94, 95, 96, 97,
+		98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+		108, 109, 110, 111, 112, 113, 114, 115, 116, 117,
+		118, 119, 120, 121, 122, 123, 124, 125, 126, 127,
+	},
+	.oobfree = {
+		/* 2 bytes at offset 0 hold manufacturer badblock markers */
+		{.offset = 2, .length = 46, },
+		/* 5 bytes at offset 8 hold BBT markers */
+		/* 8 bytes at offset 16 hold JFFS2 clean markers */
+	},
+};
+
 #if defined(CONFIG_OF)
 static const struct of_device_id davinci_nand_of_match[] = {
 	{.compatible = "ti,davinci-nand", },
@@ -796,18 +822,12 @@
 			info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
 			goto syndrome_done;
 		}
+		if (chunks == 8) {
+			info->ecclayout = hwecc4_4096;
+			info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
+			goto syndrome_done;
+		}
 
-		/* 4KiB page chips are not yet supported. The eccpos from
-		 * nand_ecclayout cannot hold 80 bytes and change to eccpos[]
-		 * breaks userspace ioctl interface with mtd-utils. Once we
-		 * resolve this issue, NAND_ECC_HW_OOB_FIRST mode can be used
-		 * for the 4KiB page chips.
-		 *
-		 * TODO: Note that nand_ecclayout has now been expanded and can
-		 *  hold plenty of OOB entries.
-		 */
-		dev_warn(&pdev->dev, "no 4-bit ECC support yet "
-				"for 4KiB-page NAND\n");
 		ret = -EIO;
 		goto err;
 
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
index 6e2f387..de31514 100644
--- a/drivers/mtd/nand/denali_pci.c
+++ b/drivers/mtd/nand/denali_pci.c
@@ -30,19 +30,19 @@
 
 static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
-	int ret = -ENODEV;
+	int ret;
 	resource_size_t csr_base, mem_base;
 	unsigned long csr_len, mem_len;
 	struct denali_nand_info *denali;
 
-	denali = kzalloc(sizeof(*denali), GFP_KERNEL);
+	denali = devm_kzalloc(&dev->dev, sizeof(*denali), GFP_KERNEL);
 	if (!denali)
 		return -ENOMEM;
 
-	ret = pci_enable_device(dev);
+	ret = pcim_enable_device(dev);
 	if (ret) {
-		pr_err("Spectra: pci_enable_device failed.\n");
-		goto failed_alloc_memery;
+		dev_err(&dev->dev, "Spectra: pci_enable_device failed.\n");
+		return ret;
 	}
 
 	if (id->driver_data == INTEL_CE4100) {
@@ -69,20 +69,19 @@
 
 	ret = pci_request_regions(dev, DENALI_NAND_NAME);
 	if (ret) {
-		pr_err("Spectra: Unable to request memory regions\n");
-		goto failed_enable_dev;
+		dev_err(&dev->dev, "Spectra: Unable to request memory regions\n");
+		return ret;
 	}
 
 	denali->flash_reg = ioremap_nocache(csr_base, csr_len);
 	if (!denali->flash_reg) {
-		pr_err("Spectra: Unable to remap memory region\n");
-		ret = -ENOMEM;
-		goto failed_req_regions;
+		dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
+		return -ENOMEM;
 	}
 
 	denali->flash_mem = ioremap_nocache(mem_base, mem_len);
 	if (!denali->flash_mem) {
-		pr_err("Spectra: ioremap_nocache failed!");
+		dev_err(&dev->dev, "Spectra: ioremap_nocache failed!");
 		ret = -ENOMEM;
 		goto failed_remap_reg;
 	}
@@ -99,13 +98,6 @@
 	iounmap(denali->flash_mem);
 failed_remap_reg:
 	iounmap(denali->flash_reg);
-failed_req_regions:
-	pci_release_regions(dev);
-failed_enable_dev:
-	pci_disable_device(dev);
-failed_alloc_memery:
-	kfree(denali);
-
 	return ret;
 }
 
@@ -117,9 +109,6 @@
 	denali_remove(denali);
 	iounmap(denali->flash_reg);
 	iounmap(denali->flash_mem);
-	pci_release_regions(dev);
-	pci_disable_device(dev);
-	kfree(denali);
 }
 
 static struct pci_driver denali_pci_driver = {
@@ -129,14 +118,4 @@
 	.remove = denali_pci_remove,
 };
 
-static int denali_init_pci(void)
-{
-	return pci_register_driver(&denali_pci_driver);
-}
-module_init(denali_init_pci);
-
-static void denali_exit_pci(void)
-{
-	pci_unregister_driver(&denali_pci_driver);
-}
-module_exit(denali_exit_pci);
+module_pci_driver(denali_pci_driver);
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 51394e5..a4e27e8 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -238,8 +238,8 @@
 
 	ifc_nand_ctrl->page = page_addr;
 	/* Program ROW0/COL0 */
-	iowrite32be(page_addr, &ifc->ifc_nand.row0);
-	iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
+	ifc_out32(page_addr, &ifc->ifc_nand.row0);
+	ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
 
 	buf_num = page_addr & priv->bufnum_mask;
 
@@ -301,19 +301,19 @@
 	int i;
 
 	/* set the chip select for NAND Transaction */
-	iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT,
-		    &ifc->ifc_nand.nand_csel);
+	ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT,
+		  &ifc->ifc_nand.nand_csel);
 
 	dev_vdbg(priv->dev,
 			"%s: fir0=%08x fcr0=%08x\n",
 			__func__,
-			ioread32be(&ifc->ifc_nand.nand_fir0),
-			ioread32be(&ifc->ifc_nand.nand_fcr0));
+			ifc_in32(&ifc->ifc_nand.nand_fir0),
+			ifc_in32(&ifc->ifc_nand.nand_fcr0));
 
 	ctrl->nand_stat = 0;
 
 	/* start read/write seq */
-	iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+	ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
 
 	/* wait for command complete flag or timeout */
 	wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -336,7 +336,7 @@
 		int sector_end = sector + chip->ecc.steps - 1;
 
 		for (i = sector / 4; i <= sector_end / 4; i++)
-			eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]);
+			eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]);
 
 		for (i = sector; i <= sector_end; i++) {
 			errors = check_read_ecc(mtd, ctrl, eccstat, i);
@@ -376,33 +376,33 @@
 
 	/* Program FIR/IFC_NAND_FCR0 for Small/Large page */
 	if (mtd->writesize > 512) {
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-			    (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-			    (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
-			    (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+			  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+			  (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
 
-		iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
-			    (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
-			    &ifc->ifc_nand.nand_fcr0);
+		ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+			  (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+			  &ifc->ifc_nand.nand_fcr0);
 	} else {
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-			    (IFC_FIR_OP_RA0  << IFC_NAND_FIR0_OP2_SHIFT) |
-			    (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (IFC_FIR_OP_RA0  << IFC_NAND_FIR0_OP2_SHIFT) |
+			  (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
 
 		if (oob)
-			iowrite32be(NAND_CMD_READOOB <<
-				    IFC_NAND_FCR0_CMD0_SHIFT,
-				    &ifc->ifc_nand.nand_fcr0);
+			ifc_out32(NAND_CMD_READOOB <<
+				  IFC_NAND_FCR0_CMD0_SHIFT,
+				  &ifc->ifc_nand.nand_fcr0);
 		else
-			iowrite32be(NAND_CMD_READ0 <<
-				    IFC_NAND_FCR0_CMD0_SHIFT,
-				    &ifc->ifc_nand.nand_fcr0);
+			ifc_out32(NAND_CMD_READ0 <<
+				  IFC_NAND_FCR0_CMD0_SHIFT,
+				  &ifc->ifc_nand.nand_fcr0);
 	}
 }
 
@@ -422,7 +422,7 @@
 	switch (command) {
 	/* READ0 read the entire buffer to use hardware ECC. */
 	case NAND_CMD_READ0:
-		iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+		ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
 		set_addr(mtd, 0, page_addr, 0);
 
 		ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -437,7 +437,7 @@
 
 	/* READOOB reads only the OOB because no ECC is performed. */
 	case NAND_CMD_READOOB:
-		iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
+		ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
 		set_addr(mtd, column, page_addr, 1);
 
 		ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -453,19 +453,19 @@
 		if (command == NAND_CMD_PARAM)
 			timing = IFC_FIR_OP_RBCD;
 
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-			    (timing << IFC_NAND_FIR0_OP2_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT,
-			    &ifc->ifc_nand.nand_fcr0);
-		iowrite32be(column, &ifc->ifc_nand.row3);
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (timing << IFC_NAND_FIR0_OP2_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT,
+			  &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(column, &ifc->ifc_nand.row3);
 
 		/*
 		 * although currently it's 8 bytes for READID, we always read
 		 * the maximum 256 bytes(for PARAM)
 		 */
-		iowrite32be(256, &ifc->ifc_nand.nand_fbcr);
+		ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
 		ifc_nand_ctrl->read_bytes = 256;
 
 		set_addr(mtd, 0, 0, 0);
@@ -480,16 +480,16 @@
 
 	/* ERASE2 uses the block and page address from ERASE1 */
 	case NAND_CMD_ERASE2:
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-			    (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
 
-		iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
-			    (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
-			    &ifc->ifc_nand.nand_fcr0);
+		ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+			  (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+			  &ifc->ifc_nand.nand_fcr0);
 
-		iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+		ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
 		ifc_nand_ctrl->read_bytes = 0;
 		fsl_ifc_run_command(mtd);
 		return;
@@ -506,19 +506,18 @@
 				(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
 				(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
 
-			iowrite32be(
-				 (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-				 (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-				 (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-				 (IFC_FIR_OP_WBCD  << IFC_NAND_FIR0_OP3_SHIFT) |
-				 (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
-				 &ifc->ifc_nand.nand_fir0);
-			iowrite32be(
-				 (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
-				 (IFC_FIR_OP_RDSTAT <<
-					IFC_NAND_FIR1_OP6_SHIFT) |
-				 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
-				 &ifc->ifc_nand.nand_fir1);
+			ifc_out32(
+				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+				(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+				(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+				&ifc->ifc_nand.nand_fir0);
+			ifc_out32(
+				(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+				(IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) |
+				(IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+				&ifc->ifc_nand.nand_fir1);
 		} else {
 			nand_fcr0 = ((NAND_CMD_PAGEPROG <<
 					IFC_NAND_FCR0_CMD1_SHIFT) |
@@ -527,20 +526,19 @@
 				    (NAND_CMD_STATUS <<
 					IFC_NAND_FCR0_CMD3_SHIFT));
 
-			iowrite32be(
+			ifc_out32(
 				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
 				(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
 				(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
 				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
 				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
 				&ifc->ifc_nand.nand_fir0);
-			iowrite32be(
-				 (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
-				 (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
-				 (IFC_FIR_OP_RDSTAT <<
-					IFC_NAND_FIR1_OP7_SHIFT) |
-				 (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
-				  &ifc->ifc_nand.nand_fir1);
+			ifc_out32(
+				(IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+				(IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+				(IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) |
+				(IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+				&ifc->ifc_nand.nand_fir1);
 
 			if (column >= mtd->writesize)
 				nand_fcr0 |=
@@ -555,7 +553,7 @@
 			column -= mtd->writesize;
 			ifc_nand_ctrl->oob = 1;
 		}
-		iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
 		set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
 		return;
 	}
@@ -563,24 +561,26 @@
 	/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
 	case NAND_CMD_PAGEPROG: {
 		if (ifc_nand_ctrl->oob) {
-			iowrite32be(ifc_nand_ctrl->index -
-				    ifc_nand_ctrl->column,
-				    &ifc->ifc_nand.nand_fbcr);
+			ifc_out32(ifc_nand_ctrl->index -
+				  ifc_nand_ctrl->column,
+				  &ifc->ifc_nand.nand_fbcr);
 		} else {
-			iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
+			ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
 		}
 
 		fsl_ifc_run_command(mtd);
 		return;
 	}
 
-	case NAND_CMD_STATUS:
-		iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			    (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
-			    &ifc->ifc_nand.nand_fcr0);
-		iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+	case NAND_CMD_STATUS: {
+		void __iomem *addr;
+
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+			  &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
 		set_addr(mtd, 0, 0, 0);
 		ifc_nand_ctrl->read_bytes = 1;
 
@@ -590,17 +590,19 @@
 		 * The chip always seems to report that it is
 		 * write-protected, even when it is not.
 		 */
+		addr = ifc_nand_ctrl->addr;
 		if (chip->options & NAND_BUSWIDTH_16)
-			setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+			ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr);
 		else
-			setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP);
+			ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr);
 		return;
+	}
 
 	case NAND_CMD_RESET:
-		iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
-			    &ifc->ifc_nand.nand_fir0);
-		iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
-			    &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+			  &ifc->ifc_nand.nand_fcr0);
 		fsl_ifc_run_command(mtd);
 		return;
 
@@ -658,7 +660,7 @@
 	 */
 	if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
 		offset = ifc_nand_ctrl->index++;
-		return in_8(ifc_nand_ctrl->addr + offset);
+		return ifc_in8(ifc_nand_ctrl->addr + offset);
 	}
 
 	dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
@@ -680,7 +682,7 @@
 	 * next byte.
 	 */
 	if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
-		data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
+		data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
 		ifc_nand_ctrl->index += 2;
 		return (uint8_t) data;
 	}
@@ -726,18 +728,18 @@
 	u32 nand_fsr;
 
 	/* Use READ_STATUS command, but wait for the device to be ready */
-	iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-		    (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
-		    &ifc->ifc_nand.nand_fir0);
-	iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
-		    &ifc->ifc_nand.nand_fcr0);
-	iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
+	ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+		  (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+		  &ifc->ifc_nand.nand_fir0);
+	ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+		  &ifc->ifc_nand.nand_fcr0);
+	ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
 	set_addr(mtd, 0, 0, 0);
 	ifc_nand_ctrl->read_bytes = 1;
 
 	fsl_ifc_run_command(mtd);
 
-	nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr);
+	nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
 
 	/*
 	 * The chip always seems to report that it is
@@ -829,34 +831,34 @@
 	uint32_t cs = priv->bank;
 
 	/* Save CSOR and CSOR_ext */
-	csor = ioread32be(&ifc->csor_cs[cs].csor);
-	csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext);
+	csor = ifc_in32(&ifc->csor_cs[cs].csor);
+	csor_ext = ifc_in32(&ifc->csor_cs[cs].csor_ext);
 
 	/* chage PageSize 8K and SpareSize 1K*/
 	csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
-	iowrite32be(csor_8k, &ifc->csor_cs[cs].csor);
-	iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext);
+	ifc_out32(csor_8k, &ifc->csor_cs[cs].csor);
+	ifc_out32(0x0000400, &ifc->csor_cs[cs].csor_ext);
 
 	/* READID */
-	iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-		    (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-		    (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
-		    &ifc->ifc_nand.nand_fir0);
-	iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
-		    &ifc->ifc_nand.nand_fcr0);
-	iowrite32be(0x0, &ifc->ifc_nand.row3);
+	ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+		  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
+		  (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+		  &ifc->ifc_nand.nand_fir0);
+	ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+		  &ifc->ifc_nand.nand_fcr0);
+	ifc_out32(0x0, &ifc->ifc_nand.row3);
 
-	iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr);
+	ifc_out32(0x0, &ifc->ifc_nand.nand_fbcr);
 
 	/* Program ROW0/COL0 */
-	iowrite32be(0x0, &ifc->ifc_nand.row0);
-	iowrite32be(0x0, &ifc->ifc_nand.col0);
+	ifc_out32(0x0, &ifc->ifc_nand.row0);
+	ifc_out32(0x0, &ifc->ifc_nand.col0);
 
 	/* set the chip select for NAND Transaction */
-	iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
+	ifc_out32(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
 
 	/* start read seq */
-	iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+	ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
 
 	/* wait for command complete flag or timeout */
 	wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -866,8 +868,8 @@
 		printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
 
 	/* Restore CSOR and CSOR_ext */
-	iowrite32be(csor, &ifc->csor_cs[cs].csor);
-	iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext);
+	ifc_out32(csor, &ifc->csor_cs[cs].csor);
+	ifc_out32(csor_ext, &ifc->csor_cs[cs].csor_ext);
 }
 
 static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
@@ -884,7 +886,7 @@
 
 	/* fill in nand_chip structure */
 	/* set up function call table */
-	if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+	if ((ifc_in32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
 		chip->read_byte = fsl_ifc_read_byte16;
 	else
 		chip->read_byte = fsl_ifc_read_byte;
@@ -898,13 +900,13 @@
 	chip->bbt_td = &bbt_main_descr;
 	chip->bbt_md = &bbt_mirror_descr;
 
-	iowrite32be(0x0, &ifc->ifc_nand.ncfgr);
+	ifc_out32(0x0, &ifc->ifc_nand.ncfgr);
 
 	/* set up nand options */
 	chip->bbt_options = NAND_BBT_USE_FLASH;
 	chip->options = NAND_NO_SUBPAGE_WRITE;
 
-	if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+	if (ifc_in32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
 		chip->read_byte = fsl_ifc_read_byte16;
 		chip->options |= NAND_BUSWIDTH_16;
 	} else {
@@ -917,7 +919,7 @@
 	chip->ecc.read_page = fsl_ifc_read_page;
 	chip->ecc.write_page = fsl_ifc_write_page;
 
-	csor = ioread32be(&ifc->csor_cs[priv->bank].csor);
+	csor = ifc_in32(&ifc->csor_cs[priv->bank].csor);
 
 	/* Hardware generates ECC per 512 Bytes */
 	chip->ecc.size = 512;
@@ -1006,7 +1008,7 @@
 static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
 		      phys_addr_t addr)
 {
-	u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr);
+	u32 cspr = ifc_in32(&ifc->cspr_cs[bank].cspr);
 
 	if (!(cspr & CSPR_V))
 		return 0;
@@ -1092,16 +1094,16 @@
 
 	dev_set_drvdata(priv->dev, priv);
 
-	iowrite32be(IFC_NAND_EVTER_EN_OPC_EN |
-		    IFC_NAND_EVTER_EN_FTOER_EN |
-		    IFC_NAND_EVTER_EN_WPER_EN,
-		    &ifc->ifc_nand.nand_evter_en);
+	ifc_out32(IFC_NAND_EVTER_EN_OPC_EN |
+		  IFC_NAND_EVTER_EN_FTOER_EN |
+		  IFC_NAND_EVTER_EN_WPER_EN,
+		  &ifc->ifc_nand.nand_evter_en);
 
 	/* enable NAND Machine Interrupts */
-	iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN |
-		    IFC_NAND_EVTER_INTR_FTOERIR_EN |
-		    IFC_NAND_EVTER_INTR_WPERIR_EN,
-		    &ifc->ifc_nand.nand_evter_intr_en);
+	ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN |
+		  IFC_NAND_EVTER_INTR_FTOERIR_EN |
+		  IFC_NAND_EVTER_INTR_WPERIR_EN,
+		  &ifc->ifc_nand.nand_evter_intr_en);
 	priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
 	if (!priv->mtd.name) {
 		ret = -ENOMEM;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 7124400..a8804a3 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -29,6 +29,10 @@
 	 * listed by full ID. We list them first so that we can easily identify
 	 * the most specific match.
 	 */
+	{"TC58NVG0S3E 1G 3.3V 8-bit",
+		{ .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
+		  SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512),
+		  2 },
 	{"TC58NVG2S0F 4G 3.3V 8-bit",
 		{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
 		  SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 52c0c1a..95d0cc4 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -649,7 +649,8 @@
 				kmem_cache_free(ns->nand_pages_slab,
 						ns->pages[i].byte);
 		}
-		kmem_cache_destroy(ns->nand_pages_slab);
+		if (ns->nand_pages_slab)
+			kmem_cache_destroy(ns->nand_pages_slab);
 		vfree(ns->pages);
 	}
 }
@@ -729,8 +730,7 @@
 	/* Fill the partition_info structure */
 	if (parts_num > ARRAY_SIZE(ns->partitions)) {
 		NS_ERR("too many partitions.\n");
-		ret = -EINVAL;
-		goto error;
+		return -EINVAL;
 	}
 	remains = ns->geom.totsz;
 	next_offset = 0;
@@ -739,14 +739,12 @@
 
 		if (!part_sz || part_sz > remains) {
 			NS_ERR("bad partition size.\n");
-			ret = -EINVAL;
-			goto error;
+			return -EINVAL;
 		}
 		ns->partitions[i].name   = get_partition_name(i);
 		if (!ns->partitions[i].name) {
 			NS_ERR("unable to allocate memory.\n");
-			ret = -ENOMEM;
-			goto error;
+			return -ENOMEM;
 		}
 		ns->partitions[i].offset = next_offset;
 		ns->partitions[i].size   = part_sz;
@@ -757,14 +755,12 @@
 	if (remains) {
 		if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
 			NS_ERR("too many partitions.\n");
-			ret = -EINVAL;
-			goto error;
+			return -EINVAL;
 		}
 		ns->partitions[i].name   = get_partition_name(i);
 		if (!ns->partitions[i].name) {
 			NS_ERR("unable to allocate memory.\n");
-			ret = -ENOMEM;
-			goto error;
+			return -ENOMEM;
 		}
 		ns->partitions[i].offset = next_offset;
 		ns->partitions[i].size   = remains;
@@ -792,24 +788,18 @@
 	printk("options: %#x\n",                ns->options);
 
 	if ((ret = alloc_device(ns)) != 0)
-		goto error;
+		return ret;
 
 	/* Allocate / initialize the internal buffer */
 	ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
 	if (!ns->buf.byte) {
 		NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
 			ns->geom.pgszoob);
-		ret = -ENOMEM;
-		goto error;
+		return -ENOMEM;
 	}
 	memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
 
 	return 0;
-
-error:
-	free_device(ns);
-
-	return ret;
 }
 
 /*
diff --git a/drivers/mtd/nand/omap_elm.c b/drivers/mtd/nand/omap_elm.c
index 376bfe1..235ec79 100644
--- a/drivers/mtd/nand/omap_elm.c
+++ b/drivers/mtd/nand/omap_elm.c
@@ -574,5 +574,5 @@
 
 MODULE_DESCRIPTION("ELM driver for BCH error correction");
 MODULE_AUTHOR("Texas Instruments");
-MODULE_ALIAS("platform: elm");
+MODULE_ALIAS("platform:" DRIVER_NAME);
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 1259cc5..740983a 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -45,10 +45,13 @@
 
 /*
  * Define a buffer size for the initial command that detects the flash device:
- * STATUS, READID and PARAM. The largest of these is the PARAM command,
- * needing 256 bytes.
+ * STATUS, READID and PARAM.
+ * ONFI param page is 256 bytes, and there are three redundant copies
+ * to be read. JEDEC param page is 512 bytes, and there are also three
+ * redundant copies to be read.
+ * Hence this buffer should be at least 512 x 3. Let's pick 2048.
  */
-#define INIT_BUFFER_SIZE	256
+#define INIT_BUFFER_SIZE	2048
 
 /* registers and bit definitions */
 #define NDCR		(0x00) /* Control register */
@@ -126,6 +129,13 @@
 #define EXT_CMD_TYPE_LAST_RW	1 /* Last naked read/write */
 #define EXT_CMD_TYPE_MONO	0 /* Monolithic read/write */
 
+/*
+ * This should be large enough to read 'ONFI' and 'JEDEC'.
+ * Let's use 7 bytes, which is the maximum ID count supported
+ * by the controller (see NDCR_RD_ID_CNT_MASK).
+ */
+#define READ_ID_BYTES		7
+
 /* macros for registers read/write */
 #define nand_writel(info, off, val)	\
 	writel_relaxed((val), (info)->mmio_base + (off))
@@ -173,8 +183,6 @@
 	/* calculated from pxa3xx_nand_flash data */
 	unsigned int		col_addr_cycles;
 	unsigned int		row_addr_cycles;
-	size_t			read_id_bytes;
-
 };
 
 struct pxa3xx_nand_info {
@@ -439,8 +447,8 @@
 	ndcr |= NDCR_ND_RUN;
 
 	/* clear status bits and run */
-	nand_writel(info, NDCR, 0);
 	nand_writel(info, NDSR, NDSR_MASK);
+	nand_writel(info, NDCR, 0);
 	nand_writel(info, NDCR, ndcr);
 }
 
@@ -675,8 +683,14 @@
 		is_ready = 1;
 	}
 
+	/*
+	 * Clear all status bit before issuing the next command, which
+	 * can and will alter the status bits and will deserve a new
+	 * interrupt on its own. This lets the controller exit the IRQ
+	 */
+	nand_writel(info, NDSR, status);
+
 	if (status & NDSR_WRCMDREQ) {
-		nand_writel(info, NDSR, NDSR_WRCMDREQ);
 		status &= ~NDSR_WRCMDREQ;
 		info->state = STATE_CMD_HANDLE;
 
@@ -697,8 +711,6 @@
 			nand_writel(info, NDCB0, info->ndcb3);
 	}
 
-	/* clear NDSR to let the controller exit the IRQ */
-	nand_writel(info, NDSR, status);
 	if (is_completed)
 		complete(&info->cmd_complete);
 	if (is_ready)
@@ -899,18 +911,18 @@
 		break;
 
 	case NAND_CMD_PARAM:
-		info->buf_count = 256;
+		info->buf_count = INIT_BUFFER_SIZE;
 		info->ndcb0 |= NDCB0_CMD_TYPE(0)
 				| NDCB0_ADDR_CYC(1)
 				| NDCB0_LEN_OVRD
 				| command;
 		info->ndcb1 = (column & 0xFF);
-		info->ndcb3 = 256;
-		info->data_size = 256;
+		info->ndcb3 = INIT_BUFFER_SIZE;
+		info->data_size = INIT_BUFFER_SIZE;
 		break;
 
 	case NAND_CMD_READID:
-		info->buf_count = host->read_id_bytes;
+		info->buf_count = READ_ID_BYTES;
 		info->ndcb0 |= NDCB0_CMD_TYPE(3)
 				| NDCB0_ADDR_CYC(1)
 				| command;
@@ -1247,9 +1259,6 @@
 		return -EINVAL;
 	}
 
-	/* calculate flash information */
-	host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
-
 	/* calculate addressing information */
 	host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
 
@@ -1265,7 +1274,7 @@
 	ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
 	ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
 
-	ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
+	ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
 	ndcr |= NDCR_SPARE_EN; /* enable spare by default */
 
 	info->reg_ndcr = ndcr;
@@ -1276,23 +1285,10 @@
 
 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
 {
-	/*
-	 * We set 0 by hard coding here, for we don't support keep_config
-	 * when there is more than one chip attached to the controller
-	 */
-	struct pxa3xx_nand_host *host = info->host[0];
 	uint32_t ndcr = nand_readl(info, NDCR);
 
-	if (ndcr & NDCR_PAGE_SZ) {
-		/* Controller's FIFO size */
-		info->chunk_size = 2048;
-		host->read_id_bytes = 4;
-	} else {
-		info->chunk_size = 512;
-		host->read_id_bytes = 2;
-	}
-
 	/* Set an initial chunk size */
+	info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
 	info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
 	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
 	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
@@ -1473,6 +1469,9 @@
 	if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
 		goto KEEP_CONFIG;
 
+	/* Set a default chunk size */
+	info->chunk_size = 512;
+
 	ret = pxa3xx_nand_sensing(info);
 	if (ret) {
 		dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index 77e96d2..cc6bac5 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -466,7 +466,7 @@
 static int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
 				uint8_t *read_ecc, uint8_t *calc_ecc)
 {
-	uint16_t ecc_reg;
+	uint32_t ecc_reg;
 	uint8_t ecc_status, err_byte;
 	int i, error = 0;
 
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
index 6f93b29..f97a58d 100644
--- a/drivers/mtd/nand/sunxi_nand.c
+++ b/drivers/mtd/nand/sunxi_nand.c
@@ -99,6 +99,15 @@
 				 NFC_CMD_INT_ENABLE | \
 				 NFC_DMA_INT_ENABLE)
 
+/* define bit use in NFC_TIMING_CTL */
+#define NFC_TIMING_CTL_EDO	BIT(8)
+
+/* define NFC_TIMING_CFG register layout */
+#define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD)		\
+	(((tWB) & 0x3) | (((tADL) & 0x3) << 2) |		\
+	(((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) |		\
+	(((tCAD) & 0x7) << 8))
+
 /* define bit use in NFC_CMD */
 #define NFC_CMD_LOW_BYTE	GENMASK(7, 0)
 #define NFC_CMD_HIGH_BYTE	GENMASK(15, 8)
@@ -208,6 +217,7 @@
  * @nand:		base NAND chip structure
  * @mtd:		base MTD structure
  * @clk_rate:		clk_rate required for this NAND chip
+ * @timing_cfg		TIMING_CFG register value for this NAND chip
  * @selected:		current active CS
  * @nsels:		number of CS lines required by the NAND chip
  * @sels:		array of CS lines descriptions
@@ -217,6 +227,8 @@
 	struct nand_chip nand;
 	struct mtd_info mtd;
 	unsigned long clk_rate;
+	u32 timing_cfg;
+	u32 timing_ctl;
 	int selected;
 	int nsels;
 	struct sunxi_nand_chip_sel sels[0];
@@ -403,6 +415,8 @@
 		}
 	}
 
+	writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
+	writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
 	writel(ctl, nfc->regs + NFC_REG_CTL);
 
 	sunxi_nand->selected = chip;
@@ -807,10 +821,33 @@
 	return 0;
 }
 
+static const s32 tWB_lut[] = {6, 12, 16, 20};
+static const s32 tRHW_lut[] = {4, 8, 12, 20};
+
+static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
+		u32 clk_period)
+{
+	u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
+	int i;
+
+	for (i = 0; i < lut_size; i++) {
+		if (clk_cycles <= lut[i])
+			return i;
+	}
+
+	/* Doesn't fit */
+	return -EINVAL;
+}
+
+#define sunxi_nand_lookup_timing(l, p, c) \
+			_sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
+
 static int sunxi_nand_chip_set_timings(struct sunxi_nand_chip *chip,
 				       const struct nand_sdr_timings *timings)
 {
+	struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
 	u32 min_clk_period = 0;
+	s32 tWB, tADL, tWHR, tRHW, tCAD;
 
 	/* T1 <=> tCLS */
 	if (timings->tCLS_min > min_clk_period)
@@ -872,6 +909,48 @@
 	if (timings->tWC_min > (min_clk_period * 2))
 		min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
 
+	/* T16 - T19 + tCAD */
+	tWB  = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
+					min_clk_period);
+	if (tWB < 0) {
+		dev_err(nfc->dev, "unsupported tWB\n");
+		return tWB;
+	}
+
+	tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
+	if (tADL > 3) {
+		dev_err(nfc->dev, "unsupported tADL\n");
+		return -EINVAL;
+	}
+
+	tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
+	if (tWHR > 3) {
+		dev_err(nfc->dev, "unsupported tWHR\n");
+		return -EINVAL;
+	}
+
+	tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
+					min_clk_period);
+	if (tRHW < 0) {
+		dev_err(nfc->dev, "unsupported tRHW\n");
+		return tRHW;
+	}
+
+	/*
+	 * TODO: according to ONFI specs this value only applies for DDR NAND,
+	 * but Allwinner seems to set this to 0x7. Mimic them for now.
+	 */
+	tCAD = 0x7;
+
+	/* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
+	chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
+
+	/*
+	 * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
+	 * output cycle timings shall be used if the host drives tRC less than
+	 * 30 ns.
+	 */
+	chip->timing_ctl = (timings->tRC_min < 30000) ? NFC_TIMING_CTL_EDO : 0;
 
 	/* Convert min_clk_period from picoseconds to nanoseconds */
 	min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
@@ -884,8 +963,6 @@
 	 */
 	chip->clk_rate = (2 * NSEC_PER_SEC) / min_clk_period;
 
-	/* TODO: configure T16-T19 */
-
 	return 0;
 }
 
@@ -1376,13 +1453,6 @@
 
 	platform_set_drvdata(pdev, nfc);
 
-	/*
-	 * TODO: replace these magic values with proper flags as soon as we
-	 * know what they are encoding.
-	 */
-	writel(0x100, nfc->regs + NFC_REG_TIMING_CTL);
-	writel(0x7ff, nfc->regs + NFC_REG_TIMING_CFG);
-
 	ret = sunxi_nand_chips_init(dev, nfc);
 	if (ret) {
 		dev_err(dev, "failed to init nand chips\n");
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 64a4f0e..89bf4c1 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -26,6 +26,18 @@
 	depends on ARCH_MXC
 	help
 	  This enables support for the Quad SPI controller in master mode.
-	  We only connect the NOR to this controller now.
+	  This controller does not support generic SPI. It only supports
+	  SPI NOR.
+
+config SPI_NXP_SPIFI
+	tristate "NXP SPI Flash Interface (SPIFI)"
+	depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+	depends on HAS_IOMEM
+	help
+	  Enable support for the NXP LPC SPI Flash Interface controller.
+
+	  SPIFI is a specialized controller for connecting serial SPI
+	  Flash. Enable this option if you have a device with a SPIFI
+	  controller and want to access the Flash as a mtd device.
 
 endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 6a7ce14..e53333e 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -1,2 +1,3 @@
 obj-$(CONFIG_MTD_SPI_NOR)	+= spi-nor.o
 obj-$(CONFIG_SPI_FSL_QUADSPI)	+= fsl-quadspi.o
+obj-$(CONFIG_SPI_NXP_SPIFI)	+= nxp-spifi.o
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 52a872f..d32b7e0 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -26,6 +26,20 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/spi-nor.h>
+#include <linux/mutex.h>
+#include <linux/pm_qos.h>
+
+/* Controller needs driver to swap endian */
+#define QUADSPI_QUIRK_SWAP_ENDIAN	(1 << 0)
+/* Controller needs 4x internal clock */
+#define QUADSPI_QUIRK_4X_INT_CLK	(1 << 1)
+/*
+ * TKT253890, Controller needs driver to fill txfifo till 16 byte to
+ * trigger data transfer even though extern data will not transferred.
+ */
+#define QUADSPI_QUIRK_TKT253890		(1 << 2)
+/* Controller cannot wake up from wait mode, TKT245618 */
+#define QUADSPI_QUIRK_TKT245618         (1 << 3)
 
 /* The registers */
 #define QUADSPI_MCR			0x00
@@ -191,9 +205,13 @@
 #define SEQID_EN4B		10
 #define SEQID_BRWR		11
 
+#define QUADSPI_MIN_IOMAP SZ_4M
+
 enum fsl_qspi_devtype {
 	FSL_QUADSPI_VYBRID,
 	FSL_QUADSPI_IMX6SX,
+	FSL_QUADSPI_IMX7D,
+	FSL_QUADSPI_IMX6UL,
 };
 
 struct fsl_qspi_devtype_data {
@@ -201,20 +219,42 @@
 	int rxfifo;
 	int txfifo;
 	int ahb_buf_size;
+	int driver_data;
 };
 
 static struct fsl_qspi_devtype_data vybrid_data = {
 	.devtype = FSL_QUADSPI_VYBRID,
 	.rxfifo = 128,
 	.txfifo = 64,
-	.ahb_buf_size = 1024
+	.ahb_buf_size = 1024,
+	.driver_data = QUADSPI_QUIRK_SWAP_ENDIAN,
 };
 
 static struct fsl_qspi_devtype_data imx6sx_data = {
 	.devtype = FSL_QUADSPI_IMX6SX,
 	.rxfifo = 128,
 	.txfifo = 512,
-	.ahb_buf_size = 1024
+	.ahb_buf_size = 1024,
+	.driver_data = QUADSPI_QUIRK_4X_INT_CLK
+		       | QUADSPI_QUIRK_TKT245618,
+};
+
+static struct fsl_qspi_devtype_data imx7d_data = {
+	.devtype = FSL_QUADSPI_IMX7D,
+	.rxfifo = 512,
+	.txfifo = 512,
+	.ahb_buf_size = 1024,
+	.driver_data = QUADSPI_QUIRK_TKT253890
+		       | QUADSPI_QUIRK_4X_INT_CLK,
+};
+
+static struct fsl_qspi_devtype_data imx6ul_data = {
+	.devtype = FSL_QUADSPI_IMX6UL,
+	.rxfifo = 128,
+	.txfifo = 512,
+	.ahb_buf_size = 1024,
+	.driver_data = QUADSPI_QUIRK_TKT253890
+		       | QUADSPI_QUIRK_4X_INT_CLK,
 };
 
 #define FSL_QSPI_MAX_CHIP	4
@@ -222,8 +262,10 @@
 	struct mtd_info mtd[FSL_QSPI_MAX_CHIP];
 	struct spi_nor nor[FSL_QSPI_MAX_CHIP];
 	void __iomem *iobase;
-	void __iomem *ahb_base; /* Used when read from AHB bus */
+	void __iomem *ahb_addr;
 	u32 memmap_phy;
+	u32 memmap_offs;
+	u32 memmap_len;
 	struct clk *clk, *clk_en;
 	struct device *dev;
 	struct completion c;
@@ -233,16 +275,28 @@
 	u32 clk_rate;
 	unsigned int chip_base_addr; /* We may support two chips. */
 	bool has_second_chip;
+	struct mutex lock;
+	struct pm_qos_request pm_qos_req;
 };
 
-static inline int is_vybrid_qspi(struct fsl_qspi *q)
+static inline int needs_swap_endian(struct fsl_qspi *q)
 {
-	return q->devtype_data->devtype == FSL_QUADSPI_VYBRID;
+	return q->devtype_data->driver_data & QUADSPI_QUIRK_SWAP_ENDIAN;
 }
 
-static inline int is_imx6sx_qspi(struct fsl_qspi *q)
+static inline int needs_4x_clock(struct fsl_qspi *q)
 {
-	return q->devtype_data->devtype == FSL_QUADSPI_IMX6SX;
+	return q->devtype_data->driver_data & QUADSPI_QUIRK_4X_INT_CLK;
+}
+
+static inline int needs_fill_txfifo(struct fsl_qspi *q)
+{
+	return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT253890;
+}
+
+static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
+{
+	return q->devtype_data->driver_data & QUADSPI_QUIRK_TKT245618;
 }
 
 /*
@@ -251,7 +305,7 @@
  */
 static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
 {
-	return is_vybrid_qspi(q) ? __swab32(a) : a;
+	return needs_swap_endian(q) ? __swab32(a) : a;
 }
 
 static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q)
@@ -343,14 +397,8 @@
 	/* Erase a sector */
 	lut_base = SEQID_SE * 4;
 
-	if (q->nor_size <= SZ_16M) {
-		cmd = SPINOR_OP_SE;
-		addrlen = ADDR24BIT;
-	} else {
-		/* use the 4-byte address */
-		cmd = SPINOR_OP_SE;
-		addrlen = ADDR32BIT;
-	}
+	cmd = q->nor[0].erase_opcode;
+	addrlen = q->nor_size <= SZ_16M ? ADDR24BIT : ADDR32BIT;
 
 	writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
 			base + QUADSPI_LUT(lut_base));
@@ -419,6 +467,8 @@
 	case SPINOR_OP_BRWR:
 		return SEQID_BRWR;
 	default:
+		if (cmd == q->nor[0].erase_opcode)
+			return SEQID_SE;
 		dev_err(q->dev, "Unsupported cmd 0x%.2x\n", cmd);
 		break;
 	}
@@ -537,7 +587,7 @@
 
 	/* clear the TX FIFO. */
 	tmp = readl(q->iobase + QUADSPI_MCR);
-	writel(tmp | QUADSPI_MCR_CLR_RXF_MASK, q->iobase + QUADSPI_MCR);
+	writel(tmp | QUADSPI_MCR_CLR_TXF_MASK, q->iobase + QUADSPI_MCR);
 
 	/* fill the TX data to the FIFO */
 	for (j = 0, i = ((count + 3) / 4); j < i; j++) {
@@ -546,6 +596,11 @@
 		txbuf++;
 	}
 
+	/* fill the TXFIFO upto 16 bytes for i.MX7d */
+	if (needs_fill_txfifo(q))
+		for (; i < 4; i++)
+			writel(tmp, q->iobase + QUADSPI_TBDR);
+
 	/* Trigger it */
 	ret = fsl_qspi_runcmd(q, opcode, to, count);
 
@@ -606,6 +661,38 @@
 		q->iobase + QUADSPI_BFGENCR);
 }
 
+/* This function was used to prepare and enable QSPI clock */
+static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
+{
+	int ret;
+
+	ret = clk_prepare_enable(q->clk_en);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(q->clk);
+	if (ret) {
+		clk_disable_unprepare(q->clk_en);
+		return ret;
+	}
+
+	if (needs_wakeup_wait_mode(q))
+		pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
+
+	return 0;
+}
+
+/* This function was used to disable and unprepare QSPI clock */
+static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
+{
+	if (needs_wakeup_wait_mode(q))
+		pm_qos_remove_request(&q->pm_qos_req);
+
+	clk_disable_unprepare(q->clk);
+	clk_disable_unprepare(q->clk_en);
+
+}
+
 /* We use this function to do some basic init for spi_nor_scan(). */
 static int fsl_qspi_nor_setup(struct fsl_qspi *q)
 {
@@ -613,11 +700,23 @@
 	u32 reg;
 	int ret;
 
-	/* the default frequency, we will change it in the future.*/
+	/* disable and unprepare clock to avoid glitch pass to controller */
+	fsl_qspi_clk_disable_unprep(q);
+
+	/* the default frequency, we will change it in the future. */
 	ret = clk_set_rate(q->clk, 66000000);
 	if (ret)
 		return ret;
 
+	ret = fsl_qspi_clk_prep_enable(q);
+	if (ret)
+		return ret;
+
+	/* Reset the module */
+	writel(QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
+		base + QUADSPI_MCR);
+	udelay(1);
+
 	/* Init the LUT table. */
 	fsl_qspi_init_lut(q);
 
@@ -635,6 +734,9 @@
 	writel(QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
 			base + QUADSPI_MCR);
 
+	/* clear all interrupt status */
+	writel(0xffffffff, q->iobase + QUADSPI_FR);
+
 	/* enable the interrupt */
 	writel(QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
 
@@ -646,13 +748,20 @@
 	unsigned long rate = q->clk_rate;
 	int ret;
 
-	if (is_imx6sx_qspi(q))
+	if (needs_4x_clock(q))
 		rate *= 4;
 
+	/* disable and unprepare clock to avoid glitch pass to controller */
+	fsl_qspi_clk_disable_unprep(q);
+
 	ret = clk_set_rate(q->clk, rate);
 	if (ret)
 		return ret;
 
+	ret = fsl_qspi_clk_prep_enable(q);
+	if (ret)
+		return ret;
+
 	/* Init the LUT table again. */
 	fsl_qspi_init_lut(q);
 
@@ -665,6 +774,8 @@
 static const struct of_device_id fsl_qspi_dt_ids[] = {
 	{ .compatible = "fsl,vf610-qspi", .data = (void *)&vybrid_data, },
 	{ .compatible = "fsl,imx6sx-qspi", .data = (void *)&imx6sx_data, },
+	{ .compatible = "fsl,imx7d-qspi", .data = (void *)&imx7d_data, },
+	{ .compatible = "fsl,imx6ul-qspi", .data = (void *)&imx6ul_data, },
 	{ /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
@@ -730,11 +841,42 @@
 	struct fsl_qspi *q = nor->priv;
 	u8 cmd = nor->read_opcode;
 
-	dev_dbg(q->dev, "cmd [%x],read from (0x%p, 0x%.8x, 0x%.8x),len:%d\n",
-		cmd, q->ahb_base, q->chip_base_addr, (unsigned int)from, len);
+	/* if necessary,ioremap buffer before AHB read, */
+	if (!q->ahb_addr) {
+		q->memmap_offs = q->chip_base_addr + from;
+		q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
+
+		q->ahb_addr = ioremap_nocache(
+				q->memmap_phy + q->memmap_offs,
+				q->memmap_len);
+		if (!q->ahb_addr) {
+			dev_err(q->dev, "ioremap failed\n");
+			return -ENOMEM;
+		}
+	/* ioremap if the data requested is out of range */
+	} else if (q->chip_base_addr + from < q->memmap_offs
+			|| q->chip_base_addr + from + len >
+			q->memmap_offs + q->memmap_len) {
+		iounmap(q->ahb_addr);
+
+		q->memmap_offs = q->chip_base_addr + from;
+		q->memmap_len = len > QUADSPI_MIN_IOMAP ? len : QUADSPI_MIN_IOMAP;
+		q->ahb_addr = ioremap_nocache(
+				q->memmap_phy + q->memmap_offs,
+				q->memmap_len);
+		if (!q->ahb_addr) {
+			dev_err(q->dev, "ioremap failed\n");
+			return -ENOMEM;
+		}
+	}
+
+	dev_dbg(q->dev, "cmd [%x],read from 0x%p, len:%d\n",
+		cmd, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
+		len);
 
 	/* Read out the data directly from the AHB buffer.*/
-	memcpy(buf, q->ahb_base + q->chip_base_addr + from, len);
+	memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs,
+		len);
 
 	*retlen += len;
 	return 0;
@@ -761,26 +903,26 @@
 	struct fsl_qspi *q = nor->priv;
 	int ret;
 
-	ret = clk_enable(q->clk_en);
-	if (ret)
-		return ret;
+	mutex_lock(&q->lock);
 
-	ret = clk_enable(q->clk);
-	if (ret) {
-		clk_disable(q->clk_en);
-		return ret;
-	}
+	ret = fsl_qspi_clk_prep_enable(q);
+	if (ret)
+		goto err_mutex;
 
 	fsl_qspi_set_base_addr(q, nor);
 	return 0;
+
+err_mutex:
+	mutex_unlock(&q->lock);
+	return ret;
 }
 
 static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
 {
 	struct fsl_qspi *q = nor->priv;
 
-	clk_disable(q->clk);
-	clk_disable(q->clk_en);
+	fsl_qspi_clk_disable_unprep(q);
+	mutex_unlock(&q->lock);
 }
 
 static int fsl_qspi_probe(struct platform_device *pdev)
@@ -804,6 +946,10 @@
 	if (!q->nor_num || q->nor_num > FSL_QSPI_MAX_CHIP)
 		return -ENODEV;
 
+	q->dev = dev;
+	q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data;
+	platform_set_drvdata(pdev, q);
+
 	/* find the resources */
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
 	q->iobase = devm_ioremap_resource(dev, res);
@@ -812,9 +958,11 @@
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 					"QuadSPI-memory");
-	q->ahb_base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(q->ahb_base))
-		return PTR_ERR(q->ahb_base);
+	if (!devm_request_mem_region(dev, res->start, resource_size(res),
+				     res->name)) {
+		dev_err(dev, "can't request region for resource %pR\n", res);
+		return -EBUSY;
+	}
 
 	q->memmap_phy = res->start;
 
@@ -827,15 +975,9 @@
 	if (IS_ERR(q->clk))
 		return PTR_ERR(q->clk);
 
-	ret = clk_prepare_enable(q->clk_en);
+	ret = fsl_qspi_clk_prep_enable(q);
 	if (ret) {
-		dev_err(dev, "cannot enable the qspi_en clock: %d\n", ret);
-		return ret;
-	}
-
-	ret = clk_prepare_enable(q->clk);
-	if (ret) {
-		dev_err(dev, "cannot enable the qspi clock: %d\n", ret);
+		dev_err(dev, "can not enable the clock\n");
 		goto clk_failed;
 	}
 
@@ -853,10 +995,6 @@
 		goto irq_failed;
 	}
 
-	q->dev = dev;
-	q->devtype_data = (struct fsl_qspi_devtype_data *)of_id->data;
-	platform_set_drvdata(pdev, q);
-
 	ret = fsl_qspi_nor_setup(q);
 	if (ret)
 		goto irq_failed;
@@ -864,6 +1002,8 @@
 	if (of_get_property(np, "fsl,qspi-has-second-chip", NULL))
 		q->has_second_chip = true;
 
+	mutex_init(&q->lock);
+
 	/* iterate the subnodes. */
 	for_each_available_child_of_node(dev->of_node, np) {
 		char modalias[40];
@@ -892,24 +1032,24 @@
 
 		ret = of_modalias_node(np, modalias, sizeof(modalias));
 		if (ret < 0)
-			goto irq_failed;
+			goto mutex_failed;
 
 		ret = of_property_read_u32(np, "spi-max-frequency",
 				&q->clk_rate);
 		if (ret < 0)
-			goto irq_failed;
+			goto mutex_failed;
 
 		/* set the chip address for READID */
 		fsl_qspi_set_base_addr(q, nor);
 
 		ret = spi_nor_scan(nor, modalias, SPI_NOR_QUAD);
 		if (ret)
-			goto irq_failed;
+			goto mutex_failed;
 
 		ppdata.of_node = np;
 		ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
 		if (ret)
-			goto irq_failed;
+			goto mutex_failed;
 
 		/* Set the correct NOR size now. */
 		if (q->nor_size == 0) {
@@ -939,8 +1079,7 @@
 	if (ret)
 		goto last_init_failed;
 
-	clk_disable(q->clk);
-	clk_disable(q->clk_en);
+	fsl_qspi_clk_disable_unprep(q);
 	return 0;
 
 last_init_failed:
@@ -950,10 +1089,12 @@
 			i *= 2;
 		mtd_device_unregister(&q->mtd[i]);
 	}
+mutex_failed:
+	mutex_destroy(&q->lock);
 irq_failed:
-	clk_disable_unprepare(q->clk);
+	fsl_qspi_clk_disable_unprep(q);
 clk_failed:
-	clk_disable_unprepare(q->clk_en);
+	dev_err(dev, "Freescale QuadSPI probe failed\n");
 	return ret;
 }
 
@@ -973,8 +1114,11 @@
 	writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
 	writel(0x0, q->iobase + QUADSPI_RSER);
 
-	clk_unprepare(q->clk);
-	clk_unprepare(q->clk_en);
+	mutex_destroy(&q->lock);
+
+	if (q->ahb_addr)
+		iounmap(q->ahb_addr);
+
 	return 0;
 }
 
@@ -985,12 +1129,19 @@
 
 static int fsl_qspi_resume(struct platform_device *pdev)
 {
+	int ret;
 	struct fsl_qspi *q = platform_get_drvdata(pdev);
 
+	ret = fsl_qspi_clk_prep_enable(q);
+	if (ret)
+		return ret;
+
 	fsl_qspi_nor_setup(q);
 	fsl_qspi_set_map_addr(q);
 	fsl_qspi_nor_setup_last(q);
 
+	fsl_qspi_clk_disable_unprep(q);
+
 	return 0;
 }
 
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c
new file mode 100644
index 0000000..9ad1dd0
--- /dev/null
+++ b/drivers/mtd/spi-nor/nxp-spifi.c
@@ -0,0 +1,482 @@
+/*
+ * SPI-NOR driver for NXP SPI Flash Interface (SPIFI)
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on Freescale QuadSPI driver:
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/spi-nor.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+/* NXP SPIFI registers, bits and macros */
+#define SPIFI_CTRL				0x000
+#define  SPIFI_CTRL_TIMEOUT(timeout)		(timeout)
+#define  SPIFI_CTRL_CSHIGH(cshigh)		((cshigh) << 16)
+#define  SPIFI_CTRL_MODE3			BIT(23)
+#define  SPIFI_CTRL_DUAL			BIT(28)
+#define  SPIFI_CTRL_FBCLK			BIT(30)
+#define SPIFI_CMD				0x004
+#define  SPIFI_CMD_DATALEN(dlen)		((dlen) & 0x3fff)
+#define  SPIFI_CMD_DOUT				BIT(15)
+#define  SPIFI_CMD_INTLEN(ilen)			((ilen) << 16)
+#define  SPIFI_CMD_FIELDFORM(field)		((field) << 19)
+#define  SPIFI_CMD_FIELDFORM_ALL_SERIAL		SPIFI_CMD_FIELDFORM(0x0)
+#define  SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA	SPIFI_CMD_FIELDFORM(0x1)
+#define  SPIFI_CMD_FRAMEFORM(frame)		((frame) << 21)
+#define  SPIFI_CMD_FRAMEFORM_OPCODE_ONLY	SPIFI_CMD_FRAMEFORM(0x1)
+#define  SPIFI_CMD_OPCODE(op)			((op) << 24)
+#define SPIFI_ADDR				0x008
+#define SPIFI_IDATA				0x00c
+#define SPIFI_CLIMIT				0x010
+#define SPIFI_DATA				0x014
+#define SPIFI_MCMD				0x018
+#define SPIFI_STAT				0x01c
+#define  SPIFI_STAT_MCINIT			BIT(0)
+#define  SPIFI_STAT_CMD				BIT(1)
+#define  SPIFI_STAT_RESET			BIT(4)
+
+#define SPI_NOR_MAX_ID_LEN	6
+
+struct nxp_spifi {
+	struct device *dev;
+	struct clk *clk_spifi;
+	struct clk *clk_reg;
+	void __iomem *io_base;
+	void __iomem *flash_base;
+	struct mtd_info mtd;
+	struct spi_nor nor;
+	bool memory_mode;
+	u32 mcmd;
+};
+
+static int nxp_spifi_wait_for_cmd(struct nxp_spifi *spifi)
+{
+	u8 stat;
+	int ret;
+
+	ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+				 !(stat & SPIFI_STAT_CMD), 10, 30);
+	if (ret)
+		dev_warn(spifi->dev, "command timed out\n");
+
+	return ret;
+}
+
+static int nxp_spifi_reset(struct nxp_spifi *spifi)
+{
+	u8 stat;
+	int ret;
+
+	writel(SPIFI_STAT_RESET, spifi->io_base + SPIFI_STAT);
+	ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+				 !(stat & SPIFI_STAT_RESET), 10, 30);
+	if (ret)
+		dev_warn(spifi->dev, "state reset timed out\n");
+
+	return ret;
+}
+
+static int nxp_spifi_set_memory_mode_off(struct nxp_spifi *spifi)
+{
+	int ret;
+
+	if (!spifi->memory_mode)
+		return 0;
+
+	ret = nxp_spifi_reset(spifi);
+	if (ret)
+		dev_err(spifi->dev, "unable to enter command mode\n");
+	else
+		spifi->memory_mode = false;
+
+	return ret;
+}
+
+static int nxp_spifi_set_memory_mode_on(struct nxp_spifi *spifi)
+{
+	u8 stat;
+	int ret;
+
+	if (spifi->memory_mode)
+		return 0;
+
+	writel(spifi->mcmd, spifi->io_base + SPIFI_MCMD);
+	ret = readb_poll_timeout(spifi->io_base + SPIFI_STAT, stat,
+				 stat & SPIFI_STAT_MCINIT, 10, 30);
+	if (ret)
+		dev_err(spifi->dev, "unable to enter memory mode\n");
+	else
+		spifi->memory_mode = true;
+
+	return ret;
+}
+
+static int nxp_spifi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+	struct nxp_spifi *spifi = nor->priv;
+	u32 cmd;
+	int ret;
+
+	ret = nxp_spifi_set_memory_mode_off(spifi);
+	if (ret)
+		return ret;
+
+	cmd = SPIFI_CMD_DATALEN(len) |
+	      SPIFI_CMD_OPCODE(opcode) |
+	      SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+	      SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
+	writel(cmd, spifi->io_base + SPIFI_CMD);
+
+	while (len--)
+		*buf++ = readb(spifi->io_base + SPIFI_DATA);
+
+	return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
+			       int len, int write_enable)
+{
+	struct nxp_spifi *spifi = nor->priv;
+	u32 cmd;
+	int ret;
+
+	ret = nxp_spifi_set_memory_mode_off(spifi);
+	if (ret)
+		return ret;
+
+	cmd = SPIFI_CMD_DOUT |
+	      SPIFI_CMD_DATALEN(len) |
+	      SPIFI_CMD_OPCODE(opcode) |
+	      SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+	      SPIFI_CMD_FRAMEFORM_OPCODE_ONLY;
+	writel(cmd, spifi->io_base + SPIFI_CMD);
+
+	while (len--)
+		writeb(*buf++, spifi->io_base + SPIFI_DATA);
+
+	return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len,
+			  size_t *retlen, u_char *buf)
+{
+	struct nxp_spifi *spifi = nor->priv;
+	int ret;
+
+	ret = nxp_spifi_set_memory_mode_on(spifi);
+	if (ret)
+		return ret;
+
+	memcpy_fromio(buf, spifi->flash_base + from, len);
+	*retlen += len;
+
+	return 0;
+}
+
+static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len,
+			    size_t *retlen, const u_char *buf)
+{
+	struct nxp_spifi *spifi = nor->priv;
+	u32 cmd;
+	int ret;
+
+	ret = nxp_spifi_set_memory_mode_off(spifi);
+	if (ret)
+		return;
+
+	writel(to, spifi->io_base + SPIFI_ADDR);
+	*retlen += len;
+
+	cmd = SPIFI_CMD_DOUT |
+	      SPIFI_CMD_DATALEN(len) |
+	      SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+	      SPIFI_CMD_OPCODE(nor->program_opcode) |
+	      SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+	writel(cmd, spifi->io_base + SPIFI_CMD);
+
+	while (len--)
+		writeb(*buf++, spifi->io_base + SPIFI_DATA);
+
+	nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs)
+{
+	struct nxp_spifi *spifi = nor->priv;
+	u32 cmd;
+	int ret;
+
+	ret = nxp_spifi_set_memory_mode_off(spifi);
+	if (ret)
+		return ret;
+
+	writel(offs, spifi->io_base + SPIFI_ADDR);
+
+	cmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL |
+	      SPIFI_CMD_OPCODE(nor->erase_opcode) |
+	      SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+	writel(cmd, spifi->io_base + SPIFI_CMD);
+
+	return nxp_spifi_wait_for_cmd(spifi);
+}
+
+static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi)
+{
+	switch (spifi->nor.flash_read) {
+	case SPI_NOR_NORMAL:
+	case SPI_NOR_FAST:
+		spifi->mcmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL;
+		break;
+	case SPI_NOR_DUAL:
+	case SPI_NOR_QUAD:
+		spifi->mcmd = SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA;
+		break;
+	default:
+		dev_err(spifi->dev, "unsupported SPI read mode\n");
+		return -EINVAL;
+	}
+
+	/* Memory mode supports address length between 1 and 4 */
+	if (spifi->nor.addr_width < 1 || spifi->nor.addr_width > 4)
+		return -EINVAL;
+
+	spifi->mcmd |= SPIFI_CMD_OPCODE(spifi->nor.read_opcode) |
+		       SPIFI_CMD_INTLEN(spifi->nor.read_dummy / 8) |
+		       SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1);
+
+	return 0;
+}
+
+static void nxp_spifi_dummy_id_read(struct spi_nor *nor)
+{
+	u8 id[SPI_NOR_MAX_ID_LEN];
+	nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+}
+
+static int nxp_spifi_setup_flash(struct nxp_spifi *spifi,
+				 struct device_node *np)
+{
+	struct mtd_part_parser_data ppdata;
+	enum read_mode flash_read;
+	u32 ctrl, property;
+	u16 mode = 0;
+	int ret;
+
+	if (!of_property_read_u32(np, "spi-rx-bus-width", &property)) {
+		switch (property) {
+		case 1:
+			break;
+		case 2:
+			mode |= SPI_RX_DUAL;
+			break;
+		case 4:
+			mode |= SPI_RX_QUAD;
+			break;
+		default:
+			dev_err(spifi->dev, "unsupported rx-bus-width\n");
+			return -EINVAL;
+		}
+	}
+
+	if (of_find_property(np, "spi-cpha", NULL))
+		mode |= SPI_CPHA;
+
+	if (of_find_property(np, "spi-cpol", NULL))
+		mode |= SPI_CPOL;
+
+	/* Setup control register defaults */
+	ctrl = SPIFI_CTRL_TIMEOUT(1000) |
+	       SPIFI_CTRL_CSHIGH(15) |
+	       SPIFI_CTRL_FBCLK;
+
+	if (mode & SPI_RX_DUAL) {
+		ctrl |= SPIFI_CTRL_DUAL;
+		flash_read = SPI_NOR_DUAL;
+	} else if (mode & SPI_RX_QUAD) {
+		ctrl &= ~SPIFI_CTRL_DUAL;
+		flash_read = SPI_NOR_QUAD;
+	} else {
+		ctrl |= SPIFI_CTRL_DUAL;
+		flash_read = SPI_NOR_NORMAL;
+	}
+
+	switch (mode & (SPI_CPHA | SPI_CPOL)) {
+	case SPI_MODE_0:
+		ctrl &= ~SPIFI_CTRL_MODE3;
+		break;
+	case SPI_MODE_3:
+		ctrl |= SPIFI_CTRL_MODE3;
+		break;
+	default:
+		dev_err(spifi->dev, "only mode 0 and 3 supported\n");
+		return -EINVAL;
+	}
+
+	writel(ctrl, spifi->io_base + SPIFI_CTRL);
+
+	spifi->mtd.priv  = &spifi->nor;
+	spifi->nor.mtd   = &spifi->mtd;
+	spifi->nor.dev   = spifi->dev;
+	spifi->nor.priv  = spifi;
+	spifi->nor.read  = nxp_spifi_read;
+	spifi->nor.write = nxp_spifi_write;
+	spifi->nor.erase = nxp_spifi_erase;
+	spifi->nor.read_reg  = nxp_spifi_read_reg;
+	spifi->nor.write_reg = nxp_spifi_write_reg;
+
+	/*
+	 * The first read on a hard reset isn't reliable so do a
+	 * dummy read of the id before calling spi_nor_scan().
+	 * The reason for this problem is unknown.
+	 *
+	 * The official NXP spifilib uses more or less the same
+	 * workaround that is applied here by reading the device
+	 * id multiple times.
+	 */
+	nxp_spifi_dummy_id_read(&spifi->nor);
+
+	ret = spi_nor_scan(&spifi->nor, NULL, flash_read);
+	if (ret) {
+		dev_err(spifi->dev, "device scan failed\n");
+		return ret;
+	}
+
+	ret = nxp_spifi_setup_memory_cmd(spifi);
+	if (ret) {
+		dev_err(spifi->dev, "memory command setup failed\n");
+		return ret;
+	}
+
+	ppdata.of_node = np;
+	ret = mtd_device_parse_register(&spifi->mtd, NULL, &ppdata, NULL, 0);
+	if (ret) {
+		dev_err(spifi->dev, "mtd device parse failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int nxp_spifi_probe(struct platform_device *pdev)
+{
+	struct device_node *flash_np;
+	struct nxp_spifi *spifi;
+	struct resource *res;
+	int ret;
+
+	spifi = devm_kzalloc(&pdev->dev, sizeof(*spifi), GFP_KERNEL);
+	if (!spifi)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spifi");
+	spifi->io_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(spifi->io_base))
+		return PTR_ERR(spifi->io_base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash");
+	spifi->flash_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(spifi->flash_base))
+		return PTR_ERR(spifi->flash_base);
+
+	spifi->clk_spifi = devm_clk_get(&pdev->dev, "spifi");
+	if (IS_ERR(spifi->clk_spifi)) {
+		dev_err(&pdev->dev, "spifi clock not found\n");
+		return PTR_ERR(spifi->clk_spifi);
+	}
+
+	spifi->clk_reg = devm_clk_get(&pdev->dev, "reg");
+	if (IS_ERR(spifi->clk_reg)) {
+		dev_err(&pdev->dev, "reg clock not found\n");
+		return PTR_ERR(spifi->clk_reg);
+	}
+
+	ret = clk_prepare_enable(spifi->clk_reg);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to enable reg clock\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(spifi->clk_spifi);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to enable spifi clock\n");
+		goto dis_clk_reg;
+	}
+
+	spifi->dev = &pdev->dev;
+	platform_set_drvdata(pdev, spifi);
+
+	/* Initialize and reset device */
+	nxp_spifi_reset(spifi);
+	writel(0, spifi->io_base + SPIFI_IDATA);
+	writel(0, spifi->io_base + SPIFI_MCMD);
+	nxp_spifi_reset(spifi);
+
+	flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
+	if (!flash_np) {
+		dev_err(&pdev->dev, "no SPI flash device to configure\n");
+		ret = -ENODEV;
+		goto dis_clks;
+	}
+
+	ret = nxp_spifi_setup_flash(spifi, flash_np);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to setup flash chip\n");
+		goto dis_clks;
+	}
+
+	return 0;
+
+dis_clks:
+	clk_disable_unprepare(spifi->clk_spifi);
+dis_clk_reg:
+	clk_disable_unprepare(spifi->clk_reg);
+	return ret;
+}
+
+static int nxp_spifi_remove(struct platform_device *pdev)
+{
+	struct nxp_spifi *spifi = platform_get_drvdata(pdev);
+
+	mtd_device_unregister(&spifi->mtd);
+	clk_disable_unprepare(spifi->clk_spifi);
+	clk_disable_unprepare(spifi->clk_reg);
+
+	return 0;
+}
+
+static const struct of_device_id nxp_spifi_match[] = {
+	{.compatible = "nxp,lpc1773-spifi"},
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nxp_spifi_match);
+
+static struct platform_driver nxp_spifi_driver = {
+	.probe	= nxp_spifi_probe,
+	.remove	= nxp_spifi_remove,
+	.driver	= {
+		.name = "nxp-spifi",
+		.of_match_table = nxp_spifi_match,
+	},
+};
+module_platform_driver(nxp_spifi_driver);
+
+MODULE_DESCRIPTION("NXP SPI Flash Interface driver");
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index d78831b..c27d427 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -29,6 +29,8 @@
 #define SPI_NOR_MAX_ID_LEN	6
 
 struct flash_info {
+	char		*name;
+
 	/*
 	 * This array stores the ID bytes.
 	 * The first three bytes are the JEDIC ID.
@@ -59,7 +61,7 @@
 
 #define JEDEC_MFR(info)	((info)->id[0])
 
-static const struct spi_device_id *spi_nor_match_id(const char *name);
+static const struct flash_info *spi_nor_match_id(const char *name);
 
 /*
  * Read the status register, returning its value in the location
@@ -169,7 +171,7 @@
 }
 
 /* Enable/disable 4-byte addressing mode. */
-static inline int set_4byte(struct spi_nor *nor, struct flash_info *info,
+static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
 			    int enable)
 {
 	int status;
@@ -469,7 +471,6 @@
 
 /* Used when the "_ext_id" is two bytes at most */
 #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)	\
-	((kernel_ulong_t)&(struct flash_info) {				\
 		.id = {							\
 			((_jedec_id) >> 16) & 0xff,			\
 			((_jedec_id) >> 8) & 0xff,			\
@@ -481,11 +482,9 @@
 		.sector_size = (_sector_size),				\
 		.n_sectors = (_n_sectors),				\
 		.page_size = 256,					\
-		.flags = (_flags),					\
-	})
+		.flags = (_flags),
 
 #define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)	\
-	((kernel_ulong_t)&(struct flash_info) {				\
 		.id = {							\
 			((_jedec_id) >> 16) & 0xff,			\
 			((_jedec_id) >> 8) & 0xff,			\
@@ -498,17 +497,14 @@
 		.sector_size = (_sector_size),				\
 		.n_sectors = (_n_sectors),				\
 		.page_size = 256,					\
-		.flags = (_flags),					\
-	})
+		.flags = (_flags),
 
 #define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags)	\
-	((kernel_ulong_t)&(struct flash_info) {				\
 		.sector_size = (_sector_size),				\
 		.n_sectors = (_n_sectors),				\
 		.page_size = (_page_size),				\
 		.addr_width = (_addr_width),				\
-		.flags = (_flags),					\
-	})
+		.flags = (_flags),
 
 /* NOTE: double check command sets and memory organization when you add
  * more nor chips.  This current list focusses on newer chips, which
@@ -521,7 +517,7 @@
  * For historical (and compatibility) reasons (before we got above config) some
  * old entries may be missing 4K flag.
  */
-static const struct spi_device_id spi_nor_ids[] = {
+static const struct flash_info spi_nor_ids[] = {
 	/* Atmel -- some are (confusingly) marketed as "DataFlash" */
 	{ "at25fs010",  INFO(0x1f6601, 0, 32 * 1024,   4, SECT_4K) },
 	{ "at25fs040",  INFO(0x1f6604, 0, 64 * 1024,   8, SECT_4K) },
@@ -589,7 +585,7 @@
 
 	/* Micron */
 	{ "n25q032",	 INFO(0x20ba16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
-	{ "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, SPI_NOR_QUAD_READ) },
+	{ "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
 	{ "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, SPI_NOR_QUAD_READ) },
 	{ "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, SPI_NOR_QUAD_READ) },
 	{ "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_QUAD_READ) },
@@ -626,6 +622,7 @@
 	{ "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128, SECT_4K) },
 	{ "s25fl132k",  INFO(0x014016,      0,  64 * 1024,  64, SECT_4K) },
 	{ "s25fl164k",  INFO(0x014017,      0,  64 * 1024, 128, SECT_4K) },
+	{ "s25fl204k",  INFO(0x014013,      0,  64 * 1024,   8, SECT_4K) },
 
 	/* SST -- large erase sizes are "overlays", "sectors" are 4K */
 	{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
@@ -702,11 +699,11 @@
 	{ },
 };
 
-static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
+static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
 {
 	int			tmp;
 	u8			id[SPI_NOR_MAX_ID_LEN];
-	struct flash_info	*info;
+	const struct flash_info	*info;
 
 	tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
 	if (tmp < 0) {
@@ -715,7 +712,7 @@
 	}
 
 	for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
-		info = (void *)spi_nor_ids[tmp].driver_data;
+		info = &spi_nor_ids[tmp];
 		if (info->id_len) {
 			if (!memcmp(info->id, id, info->id_len))
 				return &spi_nor_ids[tmp];
@@ -961,7 +958,7 @@
 	return 0;
 }
 
-static int set_quad_mode(struct spi_nor *nor, struct flash_info *info)
+static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
 {
 	int status;
 
@@ -1003,8 +1000,7 @@
 
 int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
 {
-	const struct spi_device_id	*id = NULL;
-	struct flash_info		*info;
+	const struct flash_info *info = NULL;
 	struct device *dev = nor->dev;
 	struct mtd_info *mtd = nor->mtd;
 	struct device_node *np = dev->of_node;
@@ -1015,27 +1011,25 @@
 	if (ret)
 		return ret;
 
-	/* Try to auto-detect if chip name wasn't specified */
-	if (!name)
-		id = spi_nor_read_id(nor);
-	else
-		id = spi_nor_match_id(name);
-	if (IS_ERR_OR_NULL(id))
+	if (name)
+		info = spi_nor_match_id(name);
+	/* Try to auto-detect if chip name wasn't specified or not found */
+	if (!info)
+		info = spi_nor_read_id(nor);
+	if (IS_ERR_OR_NULL(info))
 		return -ENOENT;
 
-	info = (void *)id->driver_data;
-
 	/*
 	 * If caller has specified name of flash model that can normally be
 	 * detected using JEDEC, let's verify it.
 	 */
 	if (name && info->id_len) {
-		const struct spi_device_id *jid;
+		const struct flash_info *jinfo;
 
-		jid = spi_nor_read_id(nor);
-		if (IS_ERR(jid)) {
-			return PTR_ERR(jid);
-		} else if (jid != id) {
+		jinfo = spi_nor_read_id(nor);
+		if (IS_ERR(jinfo)) {
+			return PTR_ERR(jinfo);
+		} else if (jinfo != info) {
 			/*
 			 * JEDEC knows better, so overwrite platform ID. We
 			 * can't trust partitions any longer, but we'll let
@@ -1044,9 +1038,8 @@
 			 * information, even if it's not 100% accurate.
 			 */
 			dev_warn(dev, "found %s, expected %s\n",
-				 jid->name, id->name);
-			id = jid;
-			info = (void *)jid->driver_data;
+				 jinfo->name, info->name);
+			info = jinfo;
 		}
 	}
 
@@ -1196,7 +1189,7 @@
 
 	nor->read_dummy = spi_nor_read_dummy_cycles(nor);
 
-	dev_info(dev, "%s (%lld Kbytes)\n", id->name,
+	dev_info(dev, "%s (%lld Kbytes)\n", info->name,
 			(long long)mtd->size >> 10);
 
 	dev_dbg(dev,
@@ -1219,9 +1212,9 @@
 }
 EXPORT_SYMBOL_GPL(spi_nor_scan);
 
-static const struct spi_device_id *spi_nor_match_id(const char *name)
+static const struct flash_info *spi_nor_match_id(const char *name)
 {
-	const struct spi_device_id *id = spi_nor_ids;
+	const struct flash_info *id = spi_nor_ids;
 
 	while (id->name[0]) {
 		if (!strcmp(name, id->name))
diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c
index 8e8525f..3176212 100644
--- a/drivers/mtd/tests/oobtest.c
+++ b/drivers/mtd/tests/oobtest.c
@@ -125,7 +125,8 @@
  * Display the address, offset and data bytes at comparison failure.
  * Return number of bitflips encountered.
  */
-static size_t memcmpshow(loff_t addr, const void *cs, const void *ct, size_t count)
+static size_t memcmpshowoffset(loff_t addr, loff_t offset, const void *cs,
+			       const void *ct, size_t count)
 {
 	const unsigned char *su1, *su2;
 	int res;
@@ -135,8 +136,9 @@
 	for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--, i++) {
 		res = *su1 ^ *su2;
 		if (res) {
-			pr_info("error @addr[0x%lx:0x%zx] 0x%x -> 0x%x diff 0x%x\n",
-				(unsigned long)addr, i, *su1, *su2, res);
+			pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0x%x diff 0x%x\n",
+				(unsigned long)addr, (unsigned long)offset + i,
+				*su1, *su2, res);
 			bitflips += hweight8(res);
 		}
 	}
@@ -144,6 +146,9 @@
 	return bitflips;
 }
 
+#define memcmpshow(addr, cs, ct, count) memcmpshowoffset((addr), 0, (cs), (ct),\
+							 (count))
+
 /*
  * Compare with 0xff and show the address, offset and data bytes at
  * comparison failure. Return number of bitflips encountered.
@@ -228,9 +233,10 @@
 				errcnt += 1;
 				return err ? err : -1;
 			}
-			bitflips = memcmpshow(addr, readbuf + use_offset,
-					      writebuf + (use_len_max * i) + use_offset,
-					      use_len);
+			bitflips = memcmpshowoffset(addr, use_offset,
+						    readbuf + use_offset,
+						    writebuf + (use_len_max * i) + use_offset,
+						    use_len);
 
 			/* verify pre-offset area for 0xff */
 			bitflips += memffshow(addr, 0, readbuf, use_offset);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c18f9e6..d18eb60 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -180,8 +180,8 @@
 	  will be called vxlan.
 
 config GENEVE
-       tristate "Generic Network Virtualization Encapsulation netdev"
-       depends on INET && GENEVE_CORE
+       tristate "Generic Network Virtualization Encapsulation"
+       depends on INET && NET_UDP_TUNNEL
        select NET_IP_TUNNEL
        ---help---
 	  This allows one to create geneve virtual interfaces that provide
@@ -282,7 +282,6 @@
 config VIRTIO_NET
 	tristate "Virtio network driver"
 	depends on VIRTIO
-	select AVERAGE
 	---help---
 	  This is the virtual network driver for virtio.  It can be used with
 	  lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
@@ -297,6 +296,13 @@
 	  diagnostics, etc. This is mostly intended for developers or support
 	  to debug netlink issues. If unsure, say N.
 
+config NET_VRF
+	tristate "Virtual Routing and Forwarding (Lite)"
+	depends on IP_MULTIPLE_TABLES && IPV6_MULTIPLE_TABLES
+	---help---
+	  This option enables the support for mapping interfaces into VRF's. The
+	  support enables VRF devices.
+
 endif # NET_CORE
 
 config SUNGEM_PHY
@@ -407,6 +413,13 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called vmxnet3.
 
+config FUJITSU_ES
+	tristate "FUJITSU Extended Socket Network Device driver"
+	depends on ACPI
+	help
+	  This driver provides support for Extended Socket network device
+          on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
+
 source "drivers/net/hyperv/Kconfig"
 
 endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index c12cb22..900b0c5 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -25,6 +25,7 @@
 obj-$(CONFIG_VXLAN) += vxlan.o
 obj-$(CONFIG_GENEVE) += geneve.o
 obj-$(CONFIG_NLMON) += nlmon.o
+obj-$(CONFIG_NET_VRF) += vrf.o
 
 #
 # Networking Drivers
@@ -67,3 +68,5 @@
 
 obj-$(CONFIG_HYPERV_NET) += hyperv/
 obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
+
+obj-$(CONFIG_FUJITSU_ES) += fjes/
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 7fde4d5..3c45358 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1870,8 +1870,6 @@
 static void ad_marker_response_received(struct bond_marker *marker,
 					struct port *port)
 {
-	marker = NULL;
-	port = NULL;
 	/* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */
 }
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a98dd4f..771a449 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -979,7 +979,6 @@
 		if (bond_3ad_get_active_agg_info(bond, &ad_info))
 			return;
 
-	rcu_read_lock_bh();
 	bond_for_each_slave_rcu(bond, slave, iter) {
 		ops = slave->dev->netdev_ops;
 		if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
@@ -1000,7 +999,6 @@
 		ops->ndo_poll_controller(slave->dev);
 		up(&ni->dev_lock);
 	}
-	rcu_read_unlock_bh();
 }
 
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
@@ -3097,7 +3095,7 @@
 	int noff, proto = -1;
 
 	if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
-		return skb_flow_dissect_flow_keys(skb, fk);
+		return skb_flow_dissect_flow_keys(skb, fk, 0);
 
 	fk->ports.ports = 0;
 	noff = skb_network_offset(skb);
@@ -3780,7 +3778,6 @@
 	struct slave *slave;
 	struct list_head *iter;
 	struct bond_up_slave *new_arr, *old_arr;
-	int slaves_in_agg;
 	int agg_id = 0;
 	int ret = 0;
 
@@ -3811,7 +3808,6 @@
 			}
 			goto out;
 		}
-		slaves_in_agg = ad_info.ports;
 		agg_id = ad_info.aggregator_id;
 	}
 	bond_for_each_slave(bond, slave, iter) {
@@ -4122,9 +4118,8 @@
 	SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
 
 	/* Initialize the device options */
-	bond_dev->tx_queue_len = 0;
 	bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
-	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT;
+	bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
 	bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
 
 	/* don't acquire bond device's netif_tx_lock when transmitting */
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 1bda292..db760e8 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -111,6 +111,7 @@
 	[IFLA_BOND_AD_USER_PORT_KEY]	= { .type = NLA_U16 },
 	[IFLA_BOND_AD_ACTOR_SYSTEM]	= { .type = NLA_BINARY,
 					    .len  = ETH_ALEN },
+	[IFLA_BOND_TLB_DYNAMIC_LB]	= { .type = NLA_U8 },
 };
 
 static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -405,7 +406,6 @@
 		if (err)
 			return err;
 	}
-
 	if (data[IFLA_BOND_AD_USER_PORT_KEY]) {
 		int port_key =
 			nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
@@ -415,7 +415,6 @@
 		if (err)
 			return err;
 	}
-
 	if (data[IFLA_BOND_AD_ACTOR_SYSTEM]) {
 		if (nla_len(data[IFLA_BOND_AD_ACTOR_SYSTEM]) != ETH_ALEN)
 			return -EINVAL;
@@ -426,6 +425,15 @@
 		if (err)
 			return err;
 	}
+	if (data[IFLA_BOND_TLB_DYNAMIC_LB]) {
+		int dynamic_lb = nla_get_u8(data[IFLA_BOND_TLB_DYNAMIC_LB]);
+
+		bond_opt_initval(&newval, dynamic_lb);
+		err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval);
+		if (err)
+			return err;
+	}
+
 	return 0;
 }
 
@@ -476,6 +484,7 @@
 		nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_ACTOR_SYS_PRIO */
 		nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */
 		nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
+		nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
 		0;
 }
 
@@ -598,6 +607,10 @@
 		       bond->params.ad_select))
 		goto nla_put_failure;
 
+	if (nla_put_u8(skb, IFLA_BOND_TLB_DYNAMIC_LB,
+		       bond->params.tlb_dynamic_lb))
+		goto nla_put_failure;
+
 	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
 		struct ad_info info;
 
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index e9c624d..6dda57e 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -420,6 +420,13 @@
 		.flags = BOND_OPTFLAG_IFDOWN,
 		.values = bond_ad_user_port_key_tbl,
 		.set = bond_option_ad_user_port_key_set,
+	},
+	[BOND_OPT_NUM_PEER_NOTIF_ALIAS] = {
+		.id = BOND_OPT_NUM_PEER_NOTIF_ALIAS,
+		.name = "num_grat_arp",
+		.desc = "Number of peer notifications to send on failover event",
+		.values = bond_num_peer_notif_tbl,
+		.set = bond_option_num_peer_notif_set
 	}
 };
 
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 31835a4..f4ae720 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -380,7 +380,7 @@
 static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
 		   bonding_show_ad_select, bonding_sysfs_store_option);
 
-/* Show and set the number of peer notifications to send after a failover event. */
+/* Show the number of peer notifications to send after a failover event. */
 static ssize_t bonding_show_num_peer_notif(struct device *d,
 					   struct device_attribute *attr,
 					   char *buf)
@@ -388,24 +388,10 @@
 	struct bonding *bond = to_bond(d);
 	return sprintf(buf, "%d\n", bond->params.num_peer_notif);
 }
-
-static ssize_t bonding_store_num_peer_notif(struct device *d,
-					    struct device_attribute *attr,
-					    const char *buf, size_t count)
-{
-	struct bonding *bond = to_bond(d);
-	int ret;
-
-	ret = bond_opt_tryset_rtnl(bond, BOND_OPT_NUM_PEER_NOTIF, (char *)buf);
-	if (!ret)
-		ret = count;
-
-	return ret;
-}
 static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
-		   bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+		   bonding_show_num_peer_notif, bonding_sysfs_store_option);
 static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
-		   bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+		   bonding_show_num_peer_notif, bonding_sysfs_store_option);
 
 /* Show the MII monitor interval. */
 static ssize_t bonding_show_miimon(struct device *d,
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index b3b922a..615c65d 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -1120,7 +1120,7 @@
 	dev->type = ARPHRD_CAIF;
 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
 	dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->destructor = free_netdev;
 	dev->netdev_ops = &cfhsi_netdevops;
 	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 9da0653..c2dea49 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -427,7 +427,7 @@
 	dev->type = ARPHRD_CAIF;
 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
 	dev->mtu = CAIF_MAX_MTU;
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->destructor = free_netdev;
 	skb_queue_head_init(&serdev->head);
 	serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 72ea9ff..de39620 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -710,7 +710,7 @@
 	dev->netdev_ops = &cfspi_ops;
 	dev->type = ARPHRD_CAIF;
 	dev->flags = IFF_NOARP | IFF_POINTOPOINT;
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->mtu = SPI_MAX_PAYLOAD_SIZE;
 	dev->destructor = free_netdev;
 	skb_queue_head_init(&cfspi->qhead);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index b1e8d72..c83f0f03 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -805,7 +805,7 @@
 	if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
 		reg |= FLEXCAN_CTRL_SMP;
 
-	netdev_info(dev, "writing ctrl=0x%08x\n", reg);
+	netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
 	flexcan_write(reg, &regs->ctrl);
 
 	/* print chip status */
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 8b4d3e6..5eee62b 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -162,7 +162,7 @@
 	struct can_bittiming_const bt_const;
 	unsigned int channel;	/* channel number */
 
-	/* This lock prevents a race condition between xmit and recieve. */
+	/* This lock prevents a race condition between xmit and receive. */
 	spinlock_t tx_ctx_lock;
 	struct gs_tx_context tx_context[GS_MAX_TX_URBS];
 
@@ -274,7 +274,7 @@
 	}
 }
 
-static void gs_usb_recieve_bulk_callback(struct urb *urb)
+static void gs_usb_receive_bulk_callback(struct urb *urb)
 {
 	struct gs_usb *usbcan = urb->context;
 	struct gs_can *dev;
@@ -376,7 +376,7 @@
 			  usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
 			  hf,
 			  sizeof(struct gs_host_frame),
-			  gs_usb_recieve_bulk_callback,
+			  gs_usb_receive_bulk_callback,
 			  usbcan
 			  );
 
@@ -605,7 +605,7 @@
 							  GSUSB_ENDPOINT_IN),
 					  buf,
 					  sizeof(struct gs_host_frame),
-					  gs_usb_recieve_bulk_callback,
+					  gs_usb_receive_bulk_callback,
 					  parent);
 			urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 6b94007..838545c 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -854,6 +854,18 @@
 /*
  * describe the PCAN-USB adapter
  */
+static const struct can_bittiming_const pcan_usb_const = {
+	.name = "pcan_usb",
+	.tseg1_min = 1,
+	.tseg1_max = 16,
+	.tseg2_min = 1,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 64,
+	.brp_inc = 1,
+};
+
 const struct peak_usb_adapter pcan_usb = {
 	.name = "PCAN-USB",
 	.device_id = PCAN_USB_PRODUCT_ID,
@@ -862,17 +874,7 @@
 	.clock = {
 		.freq = PCAN_USB_CRYSTAL_HZ / 2 ,
 	},
-	.bittiming_const = {
-		.name = "pcan_usb",
-		.tseg1_min = 1,
-		.tseg1_max = 16,
-		.tseg2_min = 1,
-		.tseg2_max = 8,
-		.sjw_max = 4,
-		.brp_min = 1,
-		.brp_max = 64,
-		.brp_inc = 1,
-	},
+	.bittiming_const = &pcan_usb_const,
 
 	/* size of device private data */
 	.sizeof_dev_private = sizeof(struct pcan_usb),
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 7921cff..5a2e341 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -792,9 +792,9 @@
 	dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx];
 
 	dev->can.clock = peak_usb_adapter->clock;
-	dev->can.bittiming_const = &peak_usb_adapter->bittiming_const;
+	dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
 	dev->can.do_set_bittiming = peak_usb_set_bittiming;
-	dev->can.data_bittiming_const = &peak_usb_adapter->data_bittiming_const;
+	dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
 	dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
 	dev->can.do_set_mode = peak_usb_set_mode;
 	dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index 9e624f0..506fe50 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -48,8 +48,8 @@
 	u32 device_id;
 	u32 ctrlmode_supported;
 	struct can_clock clock;
-	const struct can_bittiming_const bittiming_const;
-	const struct can_bittiming_const data_bittiming_const;
+	const struct can_bittiming_const * const bittiming_const;
+	const struct can_bittiming_const * const data_bittiming_const;
 	unsigned int ctrl_count;
 
 	int (*intf_probe)(struct usb_interface *intf);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 09d14e7..ce44a03 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -990,6 +990,30 @@
 }
 
 /* describes the PCAN-USB FD adapter */
+static const struct can_bittiming_const pcan_usb_fd_const = {
+	.name = "pcan_usb_fd",
+	.tseg1_min = 1,
+	.tseg1_max = 64,
+	.tseg2_min = 1,
+	.tseg2_max = 16,
+	.sjw_max = 16,
+	.brp_min = 1,
+	.brp_max = 1024,
+	.brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_fd_data_const = {
+	.name = "pcan_usb_fd",
+	.tseg1_min = 1,
+	.tseg1_max = 16,
+	.tseg2_min = 1,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 1024,
+	.brp_inc = 1,
+};
+
 const struct peak_usb_adapter pcan_usb_fd = {
 	.name = "PCAN-USB FD",
 	.device_id = PCAN_USBFD_PRODUCT_ID,
@@ -999,28 +1023,8 @@
 	.clock = {
 		.freq = PCAN_UFD_CRYSTAL_HZ,
 	},
-	.bittiming_const = {
-		.name = "pcan_usb_fd",
-		.tseg1_min = 1,
-		.tseg1_max = 64,
-		.tseg2_min = 1,
-		.tseg2_max = 16,
-		.sjw_max = 16,
-		.brp_min = 1,
-		.brp_max = 1024,
-		.brp_inc = 1,
-	},
-	.data_bittiming_const = {
-		.name = "pcan_usb_fd",
-		.tseg1_min = 1,
-		.tseg1_max = 16,
-		.tseg2_min = 1,
-		.tseg2_max = 8,
-		.sjw_max = 4,
-		.brp_min = 1,
-		.brp_max = 1024,
-		.brp_inc = 1,
-	},
+	.bittiming_const = &pcan_usb_fd_const,
+	.data_bittiming_const = &pcan_usb_fd_data_const,
 
 	/* size of device private data */
 	.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
@@ -1058,6 +1062,30 @@
 };
 
 /* describes the PCAN-USB Pro FD adapter */
+static const struct can_bittiming_const pcan_usb_pro_fd_const = {
+	.name = "pcan_usb_pro_fd",
+	.tseg1_min = 1,
+	.tseg1_max = 64,
+	.tseg2_min = 1,
+	.tseg2_max = 16,
+	.sjw_max = 16,
+	.brp_min = 1,
+	.brp_max = 1024,
+	.brp_inc = 1,
+};
+
+static const struct can_bittiming_const pcan_usb_pro_fd_data_const = {
+	.name = "pcan_usb_pro_fd",
+	.tseg1_min = 1,
+	.tseg1_max = 16,
+	.tseg2_min = 1,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 1024,
+	.brp_inc = 1,
+};
+
 const struct peak_usb_adapter pcan_usb_pro_fd = {
 	.name = "PCAN-USB Pro FD",
 	.device_id = PCAN_USBPROFD_PRODUCT_ID,
@@ -1067,28 +1095,8 @@
 	.clock = {
 		.freq = PCAN_UFD_CRYSTAL_HZ,
 	},
-	.bittiming_const = {
-		.name = "pcan_usb_pro_fd",
-		.tseg1_min = 1,
-		.tseg1_max = 64,
-		.tseg2_min = 1,
-		.tseg2_max = 16,
-		.sjw_max = 16,
-		.brp_min = 1,
-		.brp_max = 1024,
-		.brp_inc = 1,
-	},
-	.data_bittiming_const = {
-		.name = "pcan_usb_pro_fd",
-		.tseg1_min = 1,
-		.tseg1_max = 16,
-		.tseg2_min = 1,
-		.tseg2_max = 8,
-		.sjw_max = 4,
-		.brp_min = 1,
-		.brp_max = 1024,
-		.brp_inc = 1,
-	},
+	.bittiming_const = &pcan_usb_pro_fd_const,
+	.data_bittiming_const = &pcan_usb_pro_fd_data_const,
 
 	/* size of device private data */
 	.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 7d61b32..bbdd605 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -1004,6 +1004,18 @@
 /*
  * describe the PCAN-USB Pro adapter
  */
+static const struct can_bittiming_const pcan_usb_pro_const = {
+	.name = "pcan_usb_pro",
+	.tseg1_min = 1,
+	.tseg1_max = 16,
+	.tseg2_min = 1,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 1024,
+	.brp_inc = 1,
+};
+
 const struct peak_usb_adapter pcan_usb_pro = {
 	.name = "PCAN-USB Pro",
 	.device_id = PCAN_USBPRO_PRODUCT_ID,
@@ -1012,17 +1024,7 @@
 	.clock = {
 		.freq = PCAN_USBPRO_CRYSTAL_HZ,
 	},
-	.bittiming_const = {
-		.name = "pcan_usb_pro",
-		.tseg1_min = 1,
-		.tseg1_max = 16,
-		.tseg2_min = 1,
-		.tseg2_max = 8,
-		.sjw_max = 4,
-		.brp_min = 1,
-		.brp_max = 1024,
-		.brp_inc = 1,
-	},
+	.bittiming_const = &pcan_usb_pro_const,
 
 	/* size of device private data */
 	.sizeof_dev_private = sizeof(struct pcan_usb_pro_device),
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 7ad0a4d..4c483d9 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -46,13 +46,13 @@
 	  ethernet switches chips.
 
 config NET_DSA_MV88E6352
-	tristate "Marvell 88E6172/88E6176/88E6352 ethernet switch chip support"
+	tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support"
 	depends on NET_DSA
 	select NET_DSA_MV88E6XXX
 	select NET_DSA_TAG_EDSA
 	---help---
-	  This enables support for the Marvell 88E6172, 88E6176 and 88E6352
-	  ethernet switch chips.
+	  This enables support for the Marvell 88E6172, 88E6176, 88E6320,
+	  88E6321 and 88E6352 ethernet switch chips.
 
 config NET_DSA_BCM_SF2
 	tristate "Broadcom Starfighter 2 Ethernet switch support"
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 079897b..289e2044 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -901,15 +901,11 @@
 					 struct fixed_phy_status *status)
 {
 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
-	u32 duplex, pause, speed;
+	u32 duplex, pause;
 	u32 reg;
 
 	duplex = core_readl(priv, CORE_DUPSTS);
 	pause = core_readl(priv, CORE_PAUSESTS);
-	speed = core_readl(priv, CORE_SPDSTS);
-
-	speed >>= (port * SPDSTS_SHIFT);
-	speed &= SPDSTS_MASK;
 
 	status->link = 0;
 
@@ -944,18 +940,6 @@
 		reg &= ~LINK_STS;
 	core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
 
-	switch (speed) {
-	case SPDSTS_10:
-		status->speed = SPEED_10;
-		break;
-	case SPDSTS_100:
-		status->speed = SPEED_100;
-		break;
-	case SPDSTS_1000:
-		status->speed = SPEED_1000;
-		break;
-	}
-
 	if ((pause & (1 << port)) &&
 	    (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
 		status->asym_pause = 1;
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
index 71a29a7..3de2a6d 100644
--- a/drivers/net/dsa/mv88e6123_61_65.c
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -129,6 +129,7 @@
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
+	.adjust_link		= mv88e6xxx_adjust_link,
 #ifdef CONFIG_NET_DSA_HWMON
 	.get_temp		= mv88e6xxx_get_temp,
 #endif
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
index 32f4a08..3e83865 100644
--- a/drivers/net/dsa/mv88e6131.c
+++ b/drivers/net/dsa/mv88e6131.c
@@ -182,6 +182,7 @@
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
+	.adjust_link		= mv88e6xxx_adjust_link,
 };
 
 MODULE_ALIAS("platform:mv88e6085");
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 1c78084..d54b740 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -108,6 +108,7 @@
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
+	.adjust_link		= mv88e6xxx_adjust_link,
 #ifdef CONFIG_NET_DSA_HWMON
 	.get_temp               = mv88e6xxx_get_temp,
 #endif
@@ -116,9 +117,9 @@
 	.port_join_bridge       = mv88e6xxx_join_bridge,
 	.port_leave_bridge      = mv88e6xxx_leave_bridge,
 	.port_stp_update        = mv88e6xxx_port_stp_update,
-	.fdb_add		= mv88e6xxx_port_fdb_add,
-	.fdb_del		= mv88e6xxx_port_fdb_del,
-	.fdb_getnext		= mv88e6xxx_port_fdb_getnext,
+	.port_fdb_add		= mv88e6xxx_port_fdb_add,
+	.port_fdb_del		= mv88e6xxx_port_fdb_del,
+	.port_fdb_getnext	= mv88e6xxx_port_fdb_getnext,
 };
 
 MODULE_ALIAS("platform:mv88e6171");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index 632815c..1f5129c 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -36,6 +36,18 @@
 			return "Marvell 88E6172";
 		if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
 			return "Marvell 88E6176";
+		if (ret == PORT_SWITCH_ID_6320_A1)
+			return "Marvell 88E6320 (A1)";
+		if (ret == PORT_SWITCH_ID_6320_A2)
+			return "Marvell 88e6320 (A2)";
+		if ((ret & 0xfff0) == PORT_SWITCH_ID_6320)
+			return "Marvell 88E6320";
+		if (ret == PORT_SWITCH_ID_6321_A1)
+			return "Marvell 88E6321 (A1)";
+		if (ret == PORT_SWITCH_ID_6321_A2)
+			return "Marvell 88e6321 (A2)";
+		if ((ret & 0xfff0) == PORT_SWITCH_ID_6321)
+			return "Marvell 88E6321";
 		if (ret == PORT_SWITCH_ID_6352_A0)
 			return "Marvell 88E6352 (A0)";
 		if (ret == PORT_SWITCH_ID_6352_A1)
@@ -80,66 +92,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_NET_DSA_HWMON
-
-static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
-{
-	int ret;
-
-	*temp = 0;
-
-	ret = mv88e6xxx_phy_page_read(ds, 0, 6, 27);
-	if (ret < 0)
-		return ret;
-
-	*temp = (ret & 0xff) - 25;
-
-	return 0;
-}
-
-static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
-{
-	int ret;
-
-	*temp = 0;
-
-	ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
-	if (ret < 0)
-		return ret;
-
-	*temp = (((ret >> 8) & 0x1f) * 5) - 25;
-
-	return 0;
-}
-
-static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
-{
-	int ret;
-
-	ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
-	if (ret < 0)
-		return ret;
-	temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
-	return mv88e6xxx_phy_page_write(ds, 0, 6, 26,
-					(ret & 0xe0ff) | (temp << 8));
-}
-
-static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
-{
-	int ret;
-
-	*alarm = false;
-
-	ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
-	if (ret < 0)
-		return ret;
-
-	*alarm = !!(ret & 0x40);
-
-	return 0;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
 static int mv88e6352_setup(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -171,8 +123,9 @@
 
 	mutex_lock(&ps->eeprom_mutex);
 
-	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
-				  0xc000 | (addr & 0xff));
+	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+				  GLOBAL2_EEPROM_OP_READ |
+				  (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
 	if (ret < 0)
 		goto error;
 
@@ -180,7 +133,7 @@
 	if (ret < 0)
 		goto error;
 
-	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x15);
+	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
 error:
 	mutex_unlock(&ps->eeprom_mutex);
 	return ret;
@@ -253,11 +206,11 @@
 {
 	int ret;
 
-	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, 0x14);
+	ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
 	if (ret < 0)
 		return ret;
 
-	if (!(ret & 0x0400))
+	if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
 		return -EROFS;
 
 	return 0;
@@ -271,12 +224,13 @@
 
 	mutex_lock(&ps->eeprom_mutex);
 
-	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x15, data);
+	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
 	if (ret < 0)
 		goto error;
 
-	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, 0x14,
-				  0xb000 | (addr & 0xff));
+	ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+				  GLOBAL2_EEPROM_OP_WRITE |
+				  (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
 	if (ret < 0)
 		goto error;
 
@@ -374,13 +328,14 @@
 	.get_strings		= mv88e6xxx_get_strings,
 	.get_ethtool_stats	= mv88e6xxx_get_ethtool_stats,
 	.get_sset_count		= mv88e6xxx_get_sset_count,
+	.adjust_link		= mv88e6xxx_adjust_link,
 	.set_eee		= mv88e6xxx_set_eee,
 	.get_eee		= mv88e6xxx_get_eee,
 #ifdef CONFIG_NET_DSA_HWMON
-	.get_temp		= mv88e6352_get_temp,
-	.get_temp_limit		= mv88e6352_get_temp_limit,
-	.set_temp_limit		= mv88e6352_set_temp_limit,
-	.get_temp_alarm		= mv88e6352_get_temp_alarm,
+	.get_temp		= mv88e6xxx_get_temp,
+	.get_temp_limit		= mv88e6xxx_get_temp_limit,
+	.set_temp_limit		= mv88e6xxx_set_temp_limit,
+	.get_temp_alarm		= mv88e6xxx_get_temp_alarm,
 #endif
 	.get_eeprom		= mv88e6352_get_eeprom,
 	.set_eeprom		= mv88e6352_set_eeprom,
@@ -389,10 +344,18 @@
 	.port_join_bridge	= mv88e6xxx_join_bridge,
 	.port_leave_bridge	= mv88e6xxx_leave_bridge,
 	.port_stp_update	= mv88e6xxx_port_stp_update,
-	.fdb_add		= mv88e6xxx_port_fdb_add,
-	.fdb_del		= mv88e6xxx_port_fdb_del,
-	.fdb_getnext		= mv88e6xxx_port_fdb_getnext,
+	.port_pvid_get		= mv88e6xxx_port_pvid_get,
+	.port_pvid_set		= mv88e6xxx_port_pvid_set,
+	.port_vlan_add		= mv88e6xxx_port_vlan_add,
+	.port_vlan_del		= mv88e6xxx_port_vlan_del,
+	.vlan_getnext		= mv88e6xxx_vlan_getnext,
+	.port_fdb_add		= mv88e6xxx_port_fdb_add,
+	.port_fdb_del		= mv88e6xxx_port_fdb_del,
+	.port_fdb_getnext	= mv88e6xxx_port_fdb_getnext,
 };
 
-MODULE_ALIAS("platform:mv88e6352");
 MODULE_ALIAS("platform:mv88e6172");
+MODULE_ALIAS("platform:mv88e6176");
+MODULE_ALIAS("platform:mv88e6320");
+MODULE_ALIAS("platform:mv88e6321");
+MODULE_ALIAS("platform:mv88e6352");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 5613424..6f13f72 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2,6 +2,9 @@
  * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
  * Copyright (c) 2008 Marvell Semiconductor
  *
+ * Copyright (c) 2015 CMC Electronics, Inc.
+ *	Added support for VLAN Table Unit operations
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -11,6 +14,7 @@
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
+#include <linux/ethtool.h>
 #include <linux/if_bridge.h>
 #include <linux/jiffies.h>
 #include <linux/list.h>
@@ -391,6 +395,7 @@
 	for (i = 0; i < DSA_MAX_PORTS; i++) {
 		struct net_device *dev;
 		int uninitialized_var(port_status);
+		int pcs_ctrl;
 		int link;
 		int speed;
 		int duplex;
@@ -400,6 +405,10 @@
 		if (dev == NULL)
 			continue;
 
+		pcs_ctrl = mv88e6xxx_reg_read(ds, REG_PORT(i), PORT_PCS_CTRL);
+		if (pcs_ctrl < 0 || pcs_ctrl & PORT_PCS_CTRL_FORCE_LINK)
+			continue;
+
 		link = 0;
 		if (dev->flags & IFF_UP) {
 			port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
@@ -517,6 +526,18 @@
 	return false;
 }
 
+static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+	switch (ps->id) {
+	case PORT_SWITCH_ID_6320:
+	case PORT_SWITCH_ID_6321:
+		return true;
+	}
+	return false;
+}
+
 static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -545,6 +566,73 @@
 	return false;
 }
 
+/* We expect the switch to perform auto negotiation if there is a real
+ * phy. However, in the case of a fixed link phy, we force the port
+ * settings from the fixed link settings.
+ */
+void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
+			   struct phy_device *phydev)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	u32 ret, reg;
+
+	if (!phy_is_pseudo_fixed_link(phydev))
+		return;
+
+	mutex_lock(&ps->smi_mutex);
+
+	ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+	if (ret < 0)
+		goto out;
+
+	reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
+		      PORT_PCS_CTRL_FORCE_LINK |
+		      PORT_PCS_CTRL_DUPLEX_FULL |
+		      PORT_PCS_CTRL_FORCE_DUPLEX |
+		      PORT_PCS_CTRL_UNFORCED);
+
+	reg |= PORT_PCS_CTRL_FORCE_LINK;
+	if (phydev->link)
+			reg |= PORT_PCS_CTRL_LINK_UP;
+
+	if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
+		goto out;
+
+	switch (phydev->speed) {
+	case SPEED_1000:
+		reg |= PORT_PCS_CTRL_1000;
+		break;
+	case SPEED_100:
+		reg |= PORT_PCS_CTRL_100;
+		break;
+	case SPEED_10:
+		reg |= PORT_PCS_CTRL_10;
+		break;
+	default:
+		pr_info("Unknown speed");
+		goto out;
+	}
+
+	reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
+	if (phydev->duplex == DUPLEX_FULL)
+		reg |= PORT_PCS_CTRL_DUPLEX_FULL;
+
+	if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
+	    (port >= ps->num_ports - 2)) {
+		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+			reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
+		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+			reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
+		if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+			reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
+				PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
+	}
+	_mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
+
+out:
+	mutex_unlock(&ps->smi_mutex);
+}
+
 /* Must be called with SMI mutex held */
 static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
 {
@@ -565,7 +653,7 @@
 {
 	int ret;
 
-	if (mv88e6xxx_6352_family(ds))
+	if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
 		port = (port + 1) << 5;
 
 	/* Snapshot the hardware statistics counters for this port. */
@@ -796,54 +884,6 @@
 	}
 }
 
-#ifdef CONFIG_NET_DSA_HWMON
-
-int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
-{
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	int ret;
-	int val;
-
-	*temp = 0;
-
-	mutex_lock(&ps->smi_mutex);
-
-	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
-	if (ret < 0)
-		goto error;
-
-	/* Enable temperature sensor */
-	ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
-	if (ret < 0)
-		goto error;
-
-	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
-	if (ret < 0)
-		goto error;
-
-	/* Wait for temperature to stabilize */
-	usleep_range(10000, 12000);
-
-	val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
-	if (val < 0) {
-		ret = val;
-		goto error;
-	}
-
-	/* Disable temperature sensor */
-	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
-	if (ret < 0)
-		goto error;
-
-	*temp = ((val & 0x1f) - 5) * 5;
-
-error:
-	_mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
-	mutex_unlock(&ps->smi_mutex);
-	return ret;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
 /* Must be called with SMI lock held */
 static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
 			   u16 mask)
@@ -1000,7 +1040,7 @@
 {
 	int ret;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x01, fid);
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
 	if (ret < 0)
 		return ret;
 
@@ -1127,7 +1167,7 @@
 	ps->bridge_mask[fid] = br_port_mask;
 
 	if (fid != ps->fid[port]) {
-		ps->fid_mask |= 1 << ps->fid[port];
+		clear_bit(ps->fid[port], ps->fid_bitmap);
 		ps->fid[port] = fid;
 		ret = _mv88e6xxx_update_bridge_config(ds, fid);
 	}
@@ -1161,9 +1201,16 @@
 
 	mutex_lock(&ps->smi_mutex);
 
-	newfid = __ffs(ps->fid_mask);
+	newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
+	if (unlikely(newfid > ps->num_ports)) {
+		netdev_err(ds->ports[port], "all first %d FIDs are used\n",
+			   ps->num_ports);
+		ret = -ENOSPC;
+		goto unlock;
+	}
+
 	ps->fid[port] = newfid;
-	ps->fid_mask &= ~(1 << newfid);
+	set_bit(newfid, ps->fid_bitmap);
 	ps->bridge_mask[fid] &= ~(1 << port);
 	ps->bridge_mask[newfid] = 1 << port;
 
@@ -1171,6 +1218,7 @@
 	if (!ret)
 		ret = _mv88e6xxx_update_bridge_config(ds, newfid);
 
+unlock:
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
@@ -1210,8 +1258,476 @@
 	return 0;
 }
 
-static int __mv88e6xxx_write_addr(struct dsa_switch *ds,
-				  const unsigned char *addr)
+int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
+{
+	int ret;
+
+	ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
+	if (ret < 0)
+		return ret;
+
+	*pvid = ret & PORT_DEFAULT_VLAN_MASK;
+
+	return 0;
+}
+
+int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
+{
+	return mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
+				   pvid & PORT_DEFAULT_VLAN_MASK);
+}
+
+static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
+{
+	return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
+			       GLOBAL_VTU_OP_BUSY);
+}
+
+static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
+{
+	int ret;
+
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
+	if (ret < 0)
+		return ret;
+
+	return _mv88e6xxx_vtu_wait(ds);
+}
+
+static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
+{
+	int ret;
+
+	ret = _mv88e6xxx_vtu_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
+}
+
+static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
+					struct mv88e6xxx_vtu_stu_entry *entry,
+					unsigned int nibble_offset)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	u16 regs[3];
+	int i;
+	int ret;
+
+	for (i = 0; i < 3; ++i) {
+		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+					  GLOBAL_VTU_DATA_0_3 + i);
+		if (ret < 0)
+			return ret;
+
+		regs[i] = ret;
+	}
+
+	for (i = 0; i < ps->num_ports; ++i) {
+		unsigned int shift = (i % 4) * 4 + nibble_offset;
+		u16 reg = regs[i / 4];
+
+		entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
+	}
+
+	return 0;
+}
+
+static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
+					 struct mv88e6xxx_vtu_stu_entry *entry,
+					 unsigned int nibble_offset)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	u16 regs[3] = { 0 };
+	int i;
+	int ret;
+
+	for (i = 0; i < ps->num_ports; ++i) {
+		unsigned int shift = (i % 4) * 4 + nibble_offset;
+		u8 data = entry->data[i];
+
+		regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
+	}
+
+	for (i = 0; i < 3; ++i) {
+		ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
+					   GLOBAL_VTU_DATA_0_3 + i, regs[i]);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
+				  struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	struct mv88e6xxx_vtu_stu_entry next = { 0 };
+	int ret;
+
+	ret = _mv88e6xxx_vtu_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
+				   vid & GLOBAL_VTU_VID_MASK);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+	if (ret < 0)
+		return ret;
+
+	next.vid = ret & GLOBAL_VTU_VID_MASK;
+	next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+
+	if (next.valid) {
+		ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
+		if (ret < 0)
+			return ret;
+
+		if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
+		    mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+			ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+						  GLOBAL_VTU_FID);
+			if (ret < 0)
+				return ret;
+
+			next.fid = ret & GLOBAL_VTU_FID_MASK;
+
+			ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+						  GLOBAL_VTU_SID);
+			if (ret < 0)
+				return ret;
+
+			next.sid = ret & GLOBAL_VTU_SID_MASK;
+		}
+	}
+
+	*entry = next;
+	return 0;
+}
+
+static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
+				    struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	u16 reg = 0;
+	int ret;
+
+	ret = _mv88e6xxx_vtu_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	if (!entry->valid)
+		goto loadpurge;
+
+	/* Write port member tags */
+	ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
+	if (ret < 0)
+		return ret;
+
+	if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
+	    mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+		reg = entry->sid & GLOBAL_VTU_SID_MASK;
+		ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+		if (ret < 0)
+			return ret;
+
+		reg = entry->fid & GLOBAL_VTU_FID_MASK;
+		ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
+		if (ret < 0)
+			return ret;
+	}
+
+	reg = GLOBAL_VTU_VID_VALID;
+loadpurge:
+	reg |= entry->vid & GLOBAL_VTU_VID_MASK;
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+	if (ret < 0)
+		return ret;
+
+	return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
+}
+
+static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
+				  struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	struct mv88e6xxx_vtu_stu_entry next = { 0 };
+	int ret;
+
+	ret = _mv88e6xxx_vtu_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
+				   sid & GLOBAL_VTU_SID_MASK);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
+	if (ret < 0)
+		return ret;
+
+	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
+	if (ret < 0)
+		return ret;
+
+	next.sid = ret & GLOBAL_VTU_SID_MASK;
+
+	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+	if (ret < 0)
+		return ret;
+
+	next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
+
+	if (next.valid) {
+		ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
+		if (ret < 0)
+			return ret;
+	}
+
+	*entry = next;
+	return 0;
+}
+
+static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
+				    struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	u16 reg = 0;
+	int ret;
+
+	ret = _mv88e6xxx_vtu_wait(ds);
+	if (ret < 0)
+		return ret;
+
+	if (!entry->valid)
+		goto loadpurge;
+
+	/* Write port states */
+	ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
+	if (ret < 0)
+		return ret;
+
+	reg = GLOBAL_VTU_VID_VALID;
+loadpurge:
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+	if (ret < 0)
+		return ret;
+
+	reg = entry->sid & GLOBAL_VTU_SID_MASK;
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+	if (ret < 0)
+		return ret;
+
+	return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+}
+
+static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
+				struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct mv88e6xxx_vtu_stu_entry vlan = {
+		.valid = true,
+		.vid = vid,
+	};
+	int i;
+
+	/* exclude all ports except the CPU */
+	for (i = 0; i < ps->num_ports; ++i)
+		vlan.data[i] = dsa_is_cpu_port(ds, i) ?
+			GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED :
+			GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+
+	if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
+	    mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+		struct mv88e6xxx_vtu_stu_entry vstp;
+		int err;
+
+		/* Adding a VTU entry requires a valid STU entry. As VSTP is not
+		 * implemented, only one STU entry is needed to cover all VTU
+		 * entries. Thus, validate the SID 0.
+		 */
+		vlan.sid = 0;
+		err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
+		if (err)
+			return err;
+
+		if (vstp.sid != vlan.sid || !vstp.valid) {
+			memset(&vstp, 0, sizeof(vstp));
+			vstp.valid = true;
+			vstp.sid = vlan.sid;
+
+			err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
+			if (err)
+				return err;
+		}
+
+		/* Non-bridged ports and bridge groups use FIDs from 1 to
+		 * num_ports; VLANs use FIDs from num_ports+1 to 4095.
+		 */
+		vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID,
+					      ps->num_ports + 1);
+		if (unlikely(vlan.fid == VLAN_N_VID)) {
+			pr_err("no more FID available for VLAN %d\n", vid);
+			return -ENOSPC;
+		}
+
+		err = _mv88e6xxx_flush_fid(ds, vlan.fid);
+		if (err)
+			return err;
+
+		set_bit(vlan.fid, ps->fid_bitmap);
+	}
+
+	*entry = vlan;
+	return 0;
+}
+
+int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+			    bool untagged)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct mv88e6xxx_vtu_stu_entry vlan;
+	int err;
+
+	mutex_lock(&ps->smi_mutex);
+	err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+	if (err)
+		goto unlock;
+
+	if (vlan.vid != vid || !vlan.valid) {
+		err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
+		if (err)
+			goto unlock;
+	}
+
+	vlan.data[port] = untagged ?
+		GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
+		GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
+
+	err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+unlock:
+	mutex_unlock(&ps->smi_mutex);
+
+	return err;
+}
+
+int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct mv88e6xxx_vtu_stu_entry vlan;
+	bool keep = false;
+	int i, err;
+
+	mutex_lock(&ps->smi_mutex);
+
+	err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
+	if (err)
+		goto unlock;
+
+	if (vlan.vid != vid || !vlan.valid ||
+	    vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
+		err = -ENOENT;
+		goto unlock;
+	}
+
+	vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
+
+	/* keep the VLAN unless all ports are excluded */
+	for (i = 0; i < ps->num_ports; ++i) {
+		if (dsa_is_cpu_port(ds, i))
+			continue;
+
+		if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
+			keep = true;
+			break;
+		}
+	}
+
+	vlan.valid = keep;
+	err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+	if (err)
+		goto unlock;
+
+	if (!keep)
+		clear_bit(vlan.fid, ps->fid_bitmap);
+
+unlock:
+	mutex_unlock(&ps->smi_mutex);
+
+	return err;
+}
+
+static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid,
+				       struct mv88e6xxx_vtu_stu_entry *entry)
+{
+	int err;
+
+	do {
+		if (vid == 4095)
+			return -ENOENT;
+
+		err = _mv88e6xxx_vtu_getnext(ds, vid, entry);
+		if (err)
+			return err;
+
+		if (!entry->valid)
+			return -ENOENT;
+
+		vid = entry->vid;
+	} while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED &&
+		 entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED);
+
+	return 0;
+}
+
+int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
+			   unsigned long *ports, unsigned long *untagged)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct mv88e6xxx_vtu_stu_entry next;
+	int port;
+	int err;
+
+	if (*vid == 4095)
+		return -ENOENT;
+
+	mutex_lock(&ps->smi_mutex);
+	err = _mv88e6xxx_vtu_getnext(ds, *vid, &next);
+	mutex_unlock(&ps->smi_mutex);
+
+	if (err)
+		return err;
+
+	if (!next.valid)
+		return -ENOENT;
+
+	*vid = next.vid;
+
+	for (port = 0; port < ps->num_ports; ++port) {
+		clear_bit(port, ports);
+		clear_bit(port, untagged);
+
+		if (dsa_is_cpu_port(ds, port))
+			continue;
+
+		if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
+		    next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
+			set_bit(port, ports);
+
+		if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
+			set_bit(port, untagged);
+	}
+
+	return 0;
+}
+
+static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
+				    const unsigned char *addr)
 {
 	int i, ret;
 
@@ -1226,7 +1742,7 @@
 	return 0;
 }
 
-static int __mv88e6xxx_read_addr(struct dsa_switch *ds, unsigned char *addr)
+static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
 {
 	int i, ret;
 
@@ -1242,29 +1758,83 @@
 	return 0;
 }
 
-static int __mv88e6xxx_port_fdb_cmd(struct dsa_switch *ds, int port,
-				    const unsigned char *addr, int state)
+static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
+			       struct mv88e6xxx_atu_entry *entry)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	u8 fid = ps->fid[port];
+	u16 reg = 0;
 	int ret;
 
 	ret = _mv88e6xxx_atu_wait(ds);
 	if (ret < 0)
 		return ret;
 
-	ret = __mv88e6xxx_write_addr(ds, addr);
+	ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
 	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA,
-				   (0x10 << port) | state);
-	if (ret)
+	if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+		unsigned int mask, shift;
+
+		if (entry->trunk) {
+			reg |= GLOBAL_ATU_DATA_TRUNK;
+			mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
+			shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
+		} else {
+			mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
+			shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
+		}
+
+		reg |= (entry->portv_trunkid << shift) & mask;
+	}
+
+	reg |= entry->state & GLOBAL_ATU_DATA_STATE_MASK;
+
+	ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, reg);
+	if (ret < 0)
 		return ret;
 
-	ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_LOAD_DB);
+	return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
+}
 
-	return ret;
+static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct mv88e6xxx_vtu_stu_entry vlan;
+	int err;
+
+	if (vid == 0)
+		return ps->fid[port];
+
+	err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan);
+	if (err)
+		return err;
+
+	if (vlan.vid == vid)
+		return vlan.fid;
+
+	return -ENOENT;
+}
+
+static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
+				    const unsigned char *addr, u16 vid,
+				    u8 state)
+{
+	struct mv88e6xxx_atu_entry entry = { 0 };
+	int ret;
+
+	ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
+	if (ret < 0)
+		return ret;
+
+	entry.fid = ret;
+	entry.state = state;
+	ether_addr_copy(entry.mac, addr);
+	if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+		entry.trunk = false;
+		entry.portv_trunkid = BIT(port);
+	}
+
+	return _mv88e6xxx_atu_load(ds, &entry);
 }
 
 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
@@ -1277,7 +1847,7 @@
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr, state);
+	ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state);
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
@@ -1290,61 +1860,105 @@
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = __mv88e6xxx_port_fdb_cmd(ds, port, addr,
+	ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid,
 				       GLOBAL_ATU_DATA_STATE_UNUSED);
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
 }
 
-static int __mv88e6xxx_port_getnext(struct dsa_switch *ds, int port,
-				    unsigned char *addr, bool *is_static)
+static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
+				  const unsigned char *addr,
+				  struct mv88e6xxx_atu_entry *entry)
 {
-	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-	u8 fid = ps->fid[port];
-	int ret, state;
+	struct mv88e6xxx_atu_entry next = { 0 };
+	int ret;
+
+	next.fid = fid;
 
 	ret = _mv88e6xxx_atu_wait(ds);
 	if (ret < 0)
 		return ret;
 
-	ret = __mv88e6xxx_write_addr(ds, addr);
+	ret = _mv88e6xxx_atu_mac_write(ds, addr);
 	if (ret < 0)
 		return ret;
 
-	do {
-		ret = _mv88e6xxx_atu_cmd(ds, fid,  GLOBAL_ATU_OP_GET_NEXT_DB);
-		if (ret < 0)
-			return ret;
-
-		ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
-		if (ret < 0)
-			return ret;
-		state = ret & GLOBAL_ATU_DATA_STATE_MASK;
-		if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
-			return -ENOENT;
-	} while (!(((ret >> 4) & 0xff) & (1 << port)));
-
-	ret = __mv88e6xxx_read_addr(ds, addr);
+	ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
 	if (ret < 0)
 		return ret;
 
-	*is_static = state == (is_multicast_ether_addr(addr) ?
-			       GLOBAL_ATU_DATA_STATE_MC_STATIC :
-			       GLOBAL_ATU_DATA_STATE_UC_STATIC);
+	ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
+	if (ret < 0)
+		return ret;
 
+	ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
+	if (ret < 0)
+		return ret;
+
+	next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
+	if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
+		unsigned int mask, shift;
+
+		if (ret & GLOBAL_ATU_DATA_TRUNK) {
+			next.trunk = true;
+			mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
+			shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
+		} else {
+			next.trunk = false;
+			mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
+			shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
+		}
+
+		next.portv_trunkid = (ret & mask) >> shift;
+	}
+
+	*entry = next;
 	return 0;
 }
 
 /* get next entry for port */
 int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
-			       unsigned char *addr, bool *is_static)
+			       unsigned char *addr, u16 *vid, bool *is_static)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	struct mv88e6xxx_atu_entry next;
+	u16 fid;
 	int ret;
 
 	mutex_lock(&ps->smi_mutex);
-	ret = __mv88e6xxx_port_getnext(ds, port, addr, is_static);
+
+	ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
+	if (ret < 0)
+		goto unlock;
+	fid = ret;
+
+	do {
+		if (is_broadcast_ether_addr(addr)) {
+			struct mv88e6xxx_vtu_stu_entry vtu;
+
+			ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu);
+			if (ret < 0)
+				goto unlock;
+
+			*vid = vtu.vid;
+			fid = vtu.fid;
+		}
+
+		ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
+		if (ret < 0)
+			goto unlock;
+
+		ether_addr_copy(addr, next.mac);
+
+		if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
+			continue;
+	} while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
+
+	*is_static = next.state == (is_multicast_ether_addr(addr) ?
+				    GLOBAL_ATU_DATA_STATE_MC_STATIC :
+				    GLOBAL_ATU_DATA_STATE_UC_STATIC);
+unlock:
 	mutex_unlock(&ps->smi_mutex);
 
 	return ret;
@@ -1377,7 +1991,7 @@
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
 	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
 	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
-	    mv88e6xxx_6065_family(ds)) {
+	    mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
 		/* MAC Forcing register: don't force link, speed,
 		 * duplex or flow control state to any particular
 		 * values on physical ports, but force the CPU port
@@ -1385,8 +1999,7 @@
 		 * full duplex.
 		 */
 		reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
-		if (dsa_is_cpu_port(ds, port) ||
-		    ds->dsa_port_mask & (1 << port)) {
+		if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
 			reg |= PORT_PCS_CTRL_FORCE_LINK |
 				PORT_PCS_CTRL_LINK_UP |
 				PORT_PCS_CTRL_DUPLEX_FULL |
@@ -1423,7 +2036,7 @@
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
 	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
 	    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-	    mv88e6xxx_6185_family(ds))
+	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
 		reg = PORT_CONTROL_IGMP_MLD_SNOOP |
 		PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
 		PORT_CONTROL_STATE_FORWARDING;
@@ -1431,7 +2044,8 @@
 		if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
 			reg |= PORT_CONTROL_DSA_TAG;
 		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+		    mv88e6xxx_6320_family(ds)) {
 			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
 				reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
 			else
@@ -1441,16 +2055,20 @@
 		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
 		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
 		    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-		    mv88e6xxx_6185_family(ds)) {
+		    mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
 			if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
 				reg |= PORT_CONTROL_EGRESS_ADD_TAG;
 		}
 	}
-	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) {
-		if (ds->dsa_port_mask & (1 << port))
+	if (dsa_is_dsa_port(ds, port)) {
+		if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+			reg |= PORT_CONTROL_DSA_TAG;
+		if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
+		    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+		    mv88e6xxx_6320_family(ds)) {
 			reg |= PORT_CONTROL_FRAME_MODE_DSA;
+		}
+
 		if (port == dsa_upstream_port(ds))
 			reg |= PORT_CONTROL_FORWARD_UNKNOWN |
 				PORT_CONTROL_FORWARD_UNKNOWN_MC;
@@ -1462,22 +2080,20 @@
 			goto abort;
 	}
 
-	/* Port Control 2: don't force a good FCS, set the maximum
-	 * frame size to 10240 bytes, don't let the switch add or
-	 * strip 802.1q tags, don't discard tagged or untagged frames
-	 * on this port, do a destination address lookup on all
-	 * received packets as usual, disable ARP mirroring and don't
-	 * send a copy of all transmitted/received frames on this port
-	 * to the CPU.
+	/* Port Control 2: don't force a good FCS, set the maximum frame size to
+	 * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
+	 * untagged frames on this port, do a destination address lookup on all
+	 * received packets as usual, disable ARP mirroring and don't send a
+	 * copy of all transmitted/received frames on this port to the CPU.
 	 */
 	reg = 0;
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
 	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6095_family(ds))
+	    mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
 		reg = PORT_CONTROL_2_MAP_DA;
 
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds))
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
 		reg |= PORT_CONTROL_2_JUMBO_10240;
 
 	if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
@@ -1490,6 +2106,8 @@
 			reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
 	}
 
+	reg |= PORT_CONTROL_2_8021Q_FALLBACK;
+
 	if (reg) {
 		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
 					   PORT_CONTROL_2, reg);
@@ -1514,7 +2132,8 @@
 		goto abort;
 
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+	    mv88e6xxx_6320_family(ds)) {
 		/* Do not limit the period of time that this port can
 		 * be paused for by the remote end or the period of
 		 * time that this port can pause the remote end.
@@ -1564,7 +2183,8 @@
 
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
 	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+	    mv88e6xxx_6320_family(ds)) {
 		/* Rate Control: disable ingress rate limiting. */
 		ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
 					   PORT_RATE_CONTROL, 0x0001);
@@ -1584,9 +2204,9 @@
 	 * ports, and allow each of the 'real' ports to only talk to
 	 * the upstream port.
 	 */
-	fid = __ffs(ps->fid_mask);
+	fid = port + 1;
 	ps->fid[port] = fid;
-	ps->fid_mask &= ~(1 << fid);
+	set_bit(fid, ps->fid_bitmap);
 
 	if (!dsa_is_cpu_port(ds, port))
 		ps->bridge_mask[fid] = 1 << port;
@@ -1683,7 +2303,7 @@
 	unsigned char addr[6];
 	int ret, data, state;
 
-	ret = __mv88e6xxx_write_addr(ds, bcast);
+	ret = _mv88e6xxx_atu_mac_write(ds, bcast);
 	if (ret < 0)
 		return ret;
 
@@ -1698,7 +2318,7 @@
 		state = data & GLOBAL_ATU_DATA_STATE_MASK;
 		if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
 			break;
-		ret = __mv88e6xxx_read_addr(ds, addr);
+		ret = _mv88e6xxx_atu_mac_read(ds, addr);
 		if (ret < 0)
 			return ret;
 		mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
@@ -1885,8 +2505,6 @@
 
 	ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
 
-	ps->fid_mask = (1 << DSA_MAX_PORTS) - 1;
-
 	INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
 
 	name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
@@ -1913,6 +2531,7 @@
 int mv88e6xxx_setup_global(struct dsa_switch *ds)
 {
 	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
 	int i;
 
 	/* Set the default address aging time to 5 minutes, and
@@ -1976,7 +2595,8 @@
 			  (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
 
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+	    mv88e6xxx_6320_family(ds)) {
 		/* Send all frames with destination addresses matching
 		 * 01:80:c2:00:00:2x to the CPU port.
 		 */
@@ -1995,7 +2615,8 @@
 
 	if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
 	    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+	    mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+	    mv88e6xxx_6320_family(ds)) {
 		/* Disable ingress rate limiting by resetting all
 		 * ingress rate limit registers to their initial
 		 * state.
@@ -2009,9 +2630,17 @@
 	REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
 
 	/* Wait for the flush to complete. */
-	_mv88e6xxx_stats_wait(ds);
+	mutex_lock(&ps->smi_mutex);
+	ret = _mv88e6xxx_stats_wait(ds);
+	if (ret < 0)
+		goto unlock;
 
-	return 0;
+	/* Clear all the VTU and STU entries */
+	ret = _mv88e6xxx_vtu_stu_flush(ds);
+unlock:
+	mutex_unlock(&ps->smi_mutex);
+
+	return ret;
 }
 
 int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
@@ -2162,6 +2791,132 @@
 	return ret;
 }
 
+#ifdef CONFIG_NET_DSA_HWMON
+
+static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+	struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+	int ret;
+	int val;
+
+	*temp = 0;
+
+	mutex_lock(&ps->smi_mutex);
+
+	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+	if (ret < 0)
+		goto error;
+
+	/* Enable temperature sensor */
+	ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+	if (ret < 0)
+		goto error;
+
+	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+	if (ret < 0)
+		goto error;
+
+	/* Wait for temperature to stabilize */
+	usleep_range(10000, 12000);
+
+	val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+	if (val < 0) {
+		ret = val;
+		goto error;
+	}
+
+	/* Disable temperature sensor */
+	ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+	if (ret < 0)
+		goto error;
+
+	*temp = ((val & 0x1f) - 5) * 5;
+
+error:
+	_mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+	mutex_unlock(&ps->smi_mutex);
+	return ret;
+}
+
+static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	int ret;
+
+	*temp = 0;
+
+	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
+	if (ret < 0)
+		return ret;
+
+	*temp = (ret & 0xff) - 25;
+
+	return 0;
+}
+
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+{
+	if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+		return mv88e63xx_get_temp(ds, temp);
+
+	return mv88e61xx_get_temp(ds, temp);
+}
+
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+{
+	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	int ret;
+
+	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+		return -EOPNOTSUPP;
+
+	*temp = 0;
+
+	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+	if (ret < 0)
+		return ret;
+
+	*temp = (((ret >> 8) & 0x1f) * 5) - 25;
+
+	return 0;
+}
+
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
+{
+	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	int ret;
+
+	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+		return -EOPNOTSUPP;
+
+	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+	if (ret < 0)
+		return ret;
+	temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+	return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
+					(ret & 0xe0ff) | (temp << 8));
+}
+
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+{
+	int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+	int ret;
+
+	if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+		return -EOPNOTSUPP;
+
+	*alarm = false;
+
+	ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+	if (ret < 0)
+		return ret;
+
+	*alarm = !!(ret & 0x40);
+
+	return 0;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
 static int __init mv88e6xxx_init(void)
 {
 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index a650b26..9b6f3d9 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -11,6 +11,8 @@
 #ifndef __MV88E6XXX_H
 #define __MV88E6XXX_H
 
+#include <linux/if_vlan.h>
+
 #ifndef UINT64_MAX
 #define UINT64_MAX		(u64)(~((u64)0))
 #endif
@@ -44,6 +46,8 @@
 #define PORT_STATUS_TX_PAUSED	BIT(5)
 #define PORT_STATUS_FLOW_CTRL	BIT(4)
 #define PORT_PCS_CTRL		0x01
+#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK	BIT(15)
+#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK	BIT(14)
 #define PORT_PCS_CTRL_FC		BIT(7)
 #define PORT_PCS_CTRL_FORCE_FC		BIT(6)
 #define PORT_PCS_CTRL_LINK_UP		BIT(5)
@@ -89,7 +93,12 @@
 #define PORT_SWITCH_ID_6182	0x1a60
 #define PORT_SWITCH_ID_6185	0x1a70
 #define PORT_SWITCH_ID_6240	0x2400
-#define PORT_SWITCH_ID_6320	0x1250
+#define PORT_SWITCH_ID_6320	0x1150
+#define PORT_SWITCH_ID_6320_A1	0x1151
+#define PORT_SWITCH_ID_6320_A2	0x1152
+#define PORT_SWITCH_ID_6321	0x3100
+#define PORT_SWITCH_ID_6321_A1	0x3101
+#define PORT_SWITCH_ID_6321_A2	0x3102
 #define PORT_SWITCH_ID_6350	0x3710
 #define PORT_SWITCH_ID_6351	0x3750
 #define PORT_SWITCH_ID_6352	0x3520
@@ -124,6 +133,7 @@
 #define PORT_CONTROL_1		0x05
 #define PORT_BASE_VLAN		0x06
 #define PORT_DEFAULT_VLAN	0x07
+#define PORT_DEFAULT_VLAN_MASK	0xfff
 #define PORT_CONTROL_2		0x08
 #define PORT_CONTROL_2_IGNORE_FCS	BIT(15)
 #define PORT_CONTROL_2_VTU_PRI_OVERRIDE	BIT(14)
@@ -132,6 +142,11 @@
 #define PORT_CONTROL_2_JUMBO_1522	(0x00 << 12)
 #define PORT_CONTROL_2_JUMBO_2048	(0x01 << 12)
 #define PORT_CONTROL_2_JUMBO_10240	(0x02 << 12)
+#define PORT_CONTROL_2_8021Q_MASK	(0x03 << 10)
+#define PORT_CONTROL_2_8021Q_DISABLED	(0x00 << 10)
+#define PORT_CONTROL_2_8021Q_FALLBACK	(0x01 << 10)
+#define PORT_CONTROL_2_8021Q_CHECK	(0x02 << 10)
+#define PORT_CONTROL_2_8021Q_SECURE	(0x03 << 10)
 #define PORT_CONTROL_2_DISCARD_TAGGED	BIT(9)
 #define PORT_CONTROL_2_DISCARD_UNTAGGED	BIT(8)
 #define PORT_CONTROL_2_MAP_DA		BIT(7)
@@ -164,6 +179,11 @@
 #define GLOBAL_MAC_01		0x01
 #define GLOBAL_MAC_23		0x02
 #define GLOBAL_MAC_45		0x03
+#define GLOBAL_ATU_FID		0x01	/* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_FID		0x02	/* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_FID_MASK	0xfff
+#define GLOBAL_VTU_SID		0x03	/* 6097 6165 6351 6352 */
+#define GLOBAL_VTU_SID_MASK	0x3f
 #define GLOBAL_CONTROL		0x04
 #define GLOBAL_CONTROL_SW_RESET		BIT(15)
 #define GLOBAL_CONTROL_PPU_ENABLE	BIT(14)
@@ -180,10 +200,27 @@
 #define GLOBAL_CONTROL_TCAM_EN		BIT(1)
 #define GLOBAL_CONTROL_EEPROM_DONE_EN	BIT(0)
 #define GLOBAL_VTU_OP		0x05
+#define GLOBAL_VTU_OP_BUSY	BIT(15)
+#define GLOBAL_VTU_OP_FLUSH_ALL		((0x01 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_VTU_LOAD_PURGE	((0x03 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_VTU_GET_NEXT	((0x04 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_STU_LOAD_PURGE	((0x05 << 12) | GLOBAL_VTU_OP_BUSY)
+#define GLOBAL_VTU_OP_STU_GET_NEXT	((0x06 << 12) | GLOBAL_VTU_OP_BUSY)
 #define GLOBAL_VTU_VID		0x06
+#define GLOBAL_VTU_VID_MASK	0xfff
+#define GLOBAL_VTU_VID_VALID	BIT(12)
 #define GLOBAL_VTU_DATA_0_3	0x07
 #define GLOBAL_VTU_DATA_4_7	0x08
 #define GLOBAL_VTU_DATA_8_11	0x09
+#define GLOBAL_VTU_STU_DATA_MASK		0x03
+#define GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED	0x00
+#define GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED	0x01
+#define GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED	0x02
+#define GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER	0x03
+#define GLOBAL_STU_DATA_PORT_STATE_DISABLED	0x00
+#define GLOBAL_STU_DATA_PORT_STATE_BLOCKING	0x01
+#define GLOBAL_STU_DATA_PORT_STATE_LEARNING	0x02
+#define GLOBAL_STU_DATA_PORT_STATE_FORWARDING	0x03
 #define GLOBAL_ATU_CONTROL	0x0a
 #define GLOBAL_ATU_CONTROL_LEARN2ALL	BIT(3)
 #define GLOBAL_ATU_OP		0x0b
@@ -198,6 +235,8 @@
 #define GLOBAL_ATU_OP_GET_CLR_VIOLATION	  ((7 << 12) | GLOBAL_ATU_OP_BUSY)
 #define GLOBAL_ATU_DATA		0x0c
 #define GLOBAL_ATU_DATA_TRUNK			BIT(15)
+#define GLOBAL_ATU_DATA_TRUNK_ID_MASK		0x00f0
+#define GLOBAL_ATU_DATA_TRUNK_ID_SHIFT		4
 #define GLOBAL_ATU_DATA_PORT_VECTOR_MASK	0x3ff0
 #define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT	4
 #define GLOBAL_ATU_DATA_STATE_MASK		0x0f
@@ -280,8 +319,12 @@
 #define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP		BIT(3)
 #define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT		0
 #define GLOBAL2_EEPROM_OP	0x14
-#define GLOBAL2_EEPROM_OP_BUSY	BIT(15)
-#define GLOBAL2_EEPROM_OP_LOAD	BIT(11)
+#define GLOBAL2_EEPROM_OP_BUSY		BIT(15)
+#define GLOBAL2_EEPROM_OP_WRITE		((3 << 12) | GLOBAL2_EEPROM_OP_BUSY)
+#define GLOBAL2_EEPROM_OP_READ		((4 << 12) | GLOBAL2_EEPROM_OP_BUSY)
+#define GLOBAL2_EEPROM_OP_LOAD		BIT(11)
+#define GLOBAL2_EEPROM_OP_WRITE_EN	BIT(10)
+#define GLOBAL2_EEPROM_OP_ADDR_MASK	0xff
 #define GLOBAL2_EEPROM_DATA	0x15
 #define GLOBAL2_PTP_AVB_OP	0x16
 #define GLOBAL2_PTP_AVB_DATA	0x17
@@ -304,6 +347,25 @@
 #define GLOBAL2_QOS_WEIGHT	0x1c
 #define GLOBAL2_MISC		0x1d
 
+struct mv88e6xxx_atu_entry {
+	u16	fid;
+	u8	state;
+	bool	trunk;
+	u16	portv_trunkid;
+	u8	mac[ETH_ALEN];
+};
+
+struct mv88e6xxx_vtu_stu_entry {
+	/* VTU only */
+	u16	vid;
+	u16	fid;
+
+	/* VTU and STU */
+	u8	sid;
+	bool	valid;
+	u8	data[DSA_MAX_PORTS];
+};
+
 struct mv88e6xxx_priv_state {
 	/* When using multi-chip addressing, this mutex protects
 	 * access to the indirect access registers.  (In single-chip
@@ -342,9 +404,9 @@
 
 	/* hw bridging */
 
-	u32 fid_mask;
-	u8 fid[DSA_MAX_PORTS];
-	u16 bridge_mask[DSA_MAX_PORTS];
+	DECLARE_BITMAP(fid_bitmap, VLAN_N_VID);	/* FIDs 1 to 4095 available */
+	u16 fid[DSA_MAX_PORTS];			/* per (non-bridged) port FID */
+	u16 bridge_mask[DSA_MAX_PORTS];		/* br groups (indexed by FID) */
 
 	unsigned long port_state_update_mask;
 	u8 port_state[DSA_MAX_PORTS];
@@ -386,10 +448,15 @@
 				 uint64_t *data);
 int mv88e6xxx_get_sset_count(struct dsa_switch *ds);
 int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
+void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
+			   struct phy_device *phydev);
 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
 			struct ethtool_regs *regs, void *_p);
-int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp);
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm);
 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
 int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
@@ -401,15 +468,23 @@
 int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
 int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask);
 int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
+int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *vid);
+int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 vid);
+int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+			    bool untagged);
+int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid);
+int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
+			   unsigned long *ports, unsigned long *untagged);
 int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
 			   const unsigned char *addr, u16 vid);
 int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
 			   const unsigned char *addr, u16 vid);
 int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
-			       unsigned char *addr, bool *is_static);
+			       unsigned char *addr, u16 *vid, bool *is_static);
 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
 			     int reg, int val);
+
 extern struct dsa_switch_driver mv88e6131_switch_driver;
 extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
 extern struct dsa_switch_driver mv88e6352_switch_driver;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 49adbf1..815eb94 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -144,10 +144,9 @@
 	dev->destructor = free_netdev;
 
 	/* Fill in device structure with ethernet-generic values. */
-	dev->tx_queue_len = 0;
 	dev->flags |= IFF_NOARP;
 	dev->flags &= ~IFF_MULTICAST;
-	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
 	dev->features	|= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
 	dev->features	|= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
 	eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 753887d..2839af0 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1726,6 +1726,7 @@
 	if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
 		iowrite32(0x8000, vp->cb_fn_base + 4);
 	netif_start_queue (dev);
+	netdev_reset_queue(dev);
 err_out:
 	return err;
 }
@@ -1935,16 +1936,18 @@
 		if (vp->cur_tx - vp->dirty_tx > 0  &&  ioread32(ioaddr + DownListPtr) == 0)
 			iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
 				 ioaddr + DownListPtr);
-		if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
+		if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) {
 			netif_wake_queue (dev);
+			netdev_reset_queue (dev);
+		}
 		if (vp->drv_flags & IS_BOOMERANG)
 			iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
 		iowrite16(DownUnstall, ioaddr + EL3_CMD);
 	} else {
 		dev->stats.tx_dropped++;
 		netif_wake_queue(dev);
+		netdev_reset_queue(dev);
 	}
-
 	/* Issue Tx Enable */
 	iowrite16(TxEnable, ioaddr + EL3_CMD);
 	dev->trans_start = jiffies; /* prevent tx timeout */
@@ -2063,6 +2066,7 @@
 {
 	struct vortex_private *vp = netdev_priv(dev);
 	void __iomem *ioaddr = vp->ioaddr;
+	int skblen = skb->len;
 
 	/* Put out the doubleword header... */
 	iowrite32(skb->len, ioaddr + TX_FIFO);
@@ -2094,6 +2098,7 @@
 		}
 	}
 
+	netdev_sent_queue(dev, skblen);
 
 	/* Clear the Tx status stack. */
 	{
@@ -2125,6 +2130,7 @@
 	void __iomem *ioaddr = vp->ioaddr;
 	/* Calculate the next Tx descriptor entry. */
 	int entry = vp->cur_tx % TX_RING_SIZE;
+	int skblen = skb->len;
 	struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
 	unsigned long flags;
 	dma_addr_t dma_addr;
@@ -2230,6 +2236,8 @@
 	}
 
 	vp->cur_tx++;
+	netdev_sent_queue(dev, skblen);
+
 	if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
 		netif_stop_queue (dev);
 	} else {					/* Clear previous interrupt enable. */
@@ -2267,6 +2275,7 @@
 	int status;
 	int work_done = max_interrupt_work;
 	int handled = 0;
+	unsigned int bytes_compl = 0, pkts_compl = 0;
 
 	ioaddr = vp->ioaddr;
 	spin_lock(&vp->lock);
@@ -2314,6 +2323,8 @@
 			if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
 				iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
 				pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
+				pkts_compl++;
+				bytes_compl += vp->tx_skb->len;
 				dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
 				if (ioread16(ioaddr + TxFree) > 1536) {
 					/*
@@ -2358,6 +2369,7 @@
 		iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
 	} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
 
+	netdev_completed_queue(dev, pkts_compl, bytes_compl);
 	spin_unlock(&vp->window_lock);
 
 	if (vortex_debug > 4)
@@ -2382,6 +2394,7 @@
 	int status;
 	int work_done = max_interrupt_work;
 	int handled = 0;
+	unsigned int bytes_compl = 0, pkts_compl = 0;
 
 	ioaddr = vp->ioaddr;
 
@@ -2455,6 +2468,8 @@
 					pci_unmap_single(VORTEX_PCI(vp),
 						le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
 #endif
+					pkts_compl++;
+					bytes_compl += skb->len;
 					dev_kfree_skb_irq(skb);
 					vp->tx_skbuff[entry] = NULL;
 				} else {
@@ -2495,6 +2510,7 @@
 			iowrite32(0x8000, vp->cb_fn_base + 4);
 
 	} while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
+	netdev_completed_queue(dev, pkts_compl, bytes_compl);
 
 	if (vortex_debug > 4)
 		pr_debug("%s: exiting interrupt, status %4.4x.\n",
@@ -2696,7 +2712,8 @@
 	struct vortex_private *vp = netdev_priv(dev);
 	void __iomem *ioaddr = vp->ioaddr;
 
-	netif_stop_queue (dev);
+	netdev_reset_queue(dev);
+	netif_stop_queue(dev);
 
 	del_timer_sync(&vp->rx_oom_timer);
 	del_timer_sync(&vp->timer);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index f3bb178..05aa759 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -167,6 +167,7 @@
 source "drivers/net/ethernet/smsc/Kconfig"
 source "drivers/net/ethernet/stmicro/Kconfig"
 source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
 source "drivers/net/ethernet/tehuti/Kconfig"
 source "drivers/net/ethernet/ti/Kconfig"
 source "drivers/net/ethernet/tile/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c51014b..ddfc808 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -65,7 +65,7 @@
 obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/
 obj-$(CONFIG_NET_VENDOR_QUALCOMM) += qualcomm/
 obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
-obj-$(CONFIG_SH_ETH) += renesas/
+obj-$(CONFIG_NET_VENDOR_RENESAS) += renesas/
 obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
 obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
 obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
@@ -77,6 +77,7 @@
 obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
 obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
 obj-$(CONFIG_NET_VENDOR_SUN) += sun/
+obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
 obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
 obj-$(CONFIG_NET_VENDOR_TI) += ti/
 obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index bab01c84..48ce83e 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -28,6 +28,7 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/phy.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
 
 #include "sun4i-emac.h"
 
@@ -857,11 +858,17 @@
 
 	clk_prepare_enable(db->clk);
 
+	ret = sunxi_sram_claim(&pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
+		goto out;
+	}
+
 	db->phy_node = of_parse_phandle(np, "phy", 0);
 	if (!db->phy_node) {
 		dev_err(&pdev->dev, "no associated PHY\n");
 		ret = -ENODEV;
-		goto out;
+		goto out_release_sram;
 	}
 
 	/* Read MAC-address from DT */
@@ -893,7 +900,7 @@
 	if (ret) {
 		dev_err(&pdev->dev, "Registering netdev failed!\n");
 		ret = -ENODEV;
-		goto out;
+		goto out_release_sram;
 	}
 
 	dev_info(&pdev->dev, "%s: at %p, IRQ %d MAC: %pM\n",
@@ -901,6 +908,8 @@
 
 	return 0;
 
+out_release_sram:
+	sunxi_sram_release(&pdev->dev);
 out:
 	dev_err(db->dev, "not found (%d).\n", ret);
 
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 580553d..88ef67a 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -71,8 +71,6 @@
 		      SGDMA_CTRLREG_INTEN |
 		      SGDMA_CTRLREG_ILASTD;
 
-	priv->sgdmadesclen = sizeof(struct sgdma_descrip);
-
 	INIT_LIST_HEAD(&priv->txlisthd);
 	INIT_LIST_HEAD(&priv->rxlisthd);
 
@@ -254,7 +252,7 @@
 		unsigned int pktstatus = 0;
 		dma_sync_single_for_cpu(priv->device,
 					priv->rxdescphys,
-					priv->sgdmadesclen,
+					SGDMA_DESC_LEN,
 					DMA_FROM_DEVICE);
 
 		pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred));
@@ -374,7 +372,7 @@
 
 		dma_sync_single_for_device(priv->device,
 					   priv->rxdescphys,
-					   priv->sgdmadesclen,
+					   SGDMA_DESC_LEN,
 					   DMA_TO_DEVICE);
 
 		csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
@@ -402,7 +400,7 @@
 	csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status));
 
 	dma_sync_single_for_device(priv->device, priv->txdescphys,
-				   priv->sgdmadesclen, DMA_TO_DEVICE);
+				   SGDMA_DESC_LEN, DMA_TO_DEVICE);
 
 	csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
 		priv->tx_dma_csr,
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
index 85bc33b..bbd52f0 100644
--- a/drivers/net/ethernet/altera/altera_sgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -50,6 +50,7 @@
 	u8	control;
 } __packed;
 
+#define SGDMA_DESC_LEN	sizeof(struct sgdma_descrip)
 
 #define SGDMA_STATUS_ERR		BIT(0)
 #define SGDMA_STATUS_LENGTH_ERR		BIT(1)
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 2adb24d..103c30d 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -458,7 +458,6 @@
 	u32 rxctrlreg;
 	dma_addr_t rxdescphys;
 	dma_addr_t txdescphys;
-	size_t sgdmadesclen;
 
 	struct list_head txlisthd;
 	struct list_head rxlisthd;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 717ce21..8c9d01e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -140,7 +140,7 @@
 
 #define XGBE_TX_MAX_BUF_SIZE	(0x3fff & ~(64 - 1))
 
-/* Descriptors required for maximum contigous TSO/GSO packet */
+/* Descriptors required for maximum contiguous TSO/GSO packet */
 #define XGBE_TX_MAX_SPLIT	((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
 
 /* Maximum possible descriptors needed for an SKB:
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index a626c43..cfa3704 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -801,6 +801,9 @@
 
 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
 {
+	if (pdata->phy_dev)
+		phy_disconnect(pdata->phy_dev);
+
 	mdiobus_unregister(pdata->mdio_bus);
 	mdiobus_free(pdata->mdio_bus);
 	pdata->mdio_bus = NULL;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 541bed0..ff05bbc 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -193,12 +193,16 @@
 #define USERINFO_LEN			32
 #define FPQNUM_POS			32
 #define FPQNUM_LEN			12
+#define NV_POS				50
+#define NV_LEN				1
+#define LL_POS				51
+#define LL_LEN				1
 #define LERR_POS			60
 #define LERR_LEN			3
 #define STASH_POS			52
 #define STASH_LEN			2
 #define BUFDATALEN_POS			48
-#define BUFDATALEN_LEN			12
+#define BUFDATALEN_LEN			15
 #define DATAADDR_POS			0
 #define DATAADDR_LEN			42
 #define COHERENT_POS			63
@@ -215,9 +219,19 @@
 #define IPHDR_LEN			6
 #define EC_POS				22	/* Enable checksum */
 #define EC_LEN				1
+#define ET_POS				23	/* Enable TSO */
 #define IS_POS				24	/* IP protocol select */
 #define IS_LEN				1
 #define TYPE_ETH_WORK_MESSAGE_POS	44
+#define LL_BYTES_MSB_POS		56
+#define LL_BYTES_MSB_LEN		8
+#define LL_BYTES_LSB_POS		48
+#define LL_BYTES_LSB_LEN		12
+#define LL_LEN_POS			48
+#define LL_LEN_LEN			8
+#define DATALEN_MASK			GENMASK(11, 0)
+
+#define LAST_BUFFER			(0x7800ULL << BUFDATALEN_POS)
 
 struct xgene_enet_raw_desc {
 	__le64 m0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 299eb43..e47298f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -147,18 +147,27 @@
 {
 	struct sk_buff *skb;
 	struct device *dev;
+	skb_frag_t *frag;
+	dma_addr_t *frag_dma_addr;
 	u16 skb_index;
 	u8 status;
-	int ret = 0;
+	int i, ret = 0;
 
 	skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
 	skb = cp_ring->cp_skb[skb_index];
+	frag_dma_addr = &cp_ring->frag_dma_addr[skb_index * MAX_SKB_FRAGS];
 
 	dev = ndev_to_dev(cp_ring->ndev);
 	dma_unmap_single(dev, GET_VAL(DATAADDR, le64_to_cpu(raw_desc->m1)),
-			 GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1)),
+			 skb_headlen(skb),
 			 DMA_TO_DEVICE);
 
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		frag = &skb_shinfo(skb)->frags[i];
+		dma_unmap_page(dev, frag_dma_addr[i], skb_frag_size(frag),
+			       DMA_TO_DEVICE);
+	}
+
 	/* Checking for error */
 	status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
 	if (unlikely(status > 2)) {
@@ -179,12 +188,16 @@
 
 static u64 xgene_enet_work_msg(struct sk_buff *skb)
 {
+	struct net_device *ndev = skb->dev;
+	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
 	struct iphdr *iph;
-	u8 l3hlen, l4hlen = 0;
-	u8 csum_enable = 0;
-	u8 proto = 0;
-	u8 ethhdr;
-	u64 hopinfo;
+	u8 l3hlen = 0, l4hlen = 0;
+	u8 ethhdr, proto = 0, csum_enable = 0;
+	u64 hopinfo = 0;
+	u32 hdr_len, mss = 0;
+	u32 i, len, nr_frags;
+
+	ethhdr = xgene_enet_hdr_len(skb->data);
 
 	if (unlikely(skb->protocol != htons(ETH_P_IP)) &&
 	    unlikely(skb->protocol != htons(ETH_P_8021Q)))
@@ -201,14 +214,40 @@
 		l4hlen = tcp_hdrlen(skb) >> 2;
 		csum_enable = 1;
 		proto = TSO_IPPROTO_TCP;
+		if (ndev->features & NETIF_F_TSO) {
+			hdr_len = ethhdr + ip_hdrlen(skb) + tcp_hdrlen(skb);
+			mss = skb_shinfo(skb)->gso_size;
+
+			if (skb_is_nonlinear(skb)) {
+				len = skb_headlen(skb);
+				nr_frags = skb_shinfo(skb)->nr_frags;
+
+				for (i = 0; i < 2 && i < nr_frags; i++)
+					len += skb_shinfo(skb)->frags[i].size;
+
+				/* HW requires header must reside in 3 buffer */
+				if (unlikely(hdr_len > len)) {
+					if (skb_linearize(skb))
+						return 0;
+				}
+			}
+
+			if (!mss || ((skb->len - hdr_len) <= mss))
+				goto out;
+
+			if (mss != pdata->mss) {
+				pdata->mss = mss;
+				pdata->mac_ops->set_mss(pdata);
+			}
+			hopinfo |= SET_BIT(ET);
+		}
 	} else if (iph->protocol == IPPROTO_UDP) {
 		l4hlen = UDP_HDR_SIZE;
 		csum_enable = 1;
 	}
 out:
 	l3hlen = ip_hdrlen(skb) >> 2;
-	ethhdr = xgene_enet_hdr_len(skb->data);
-	hopinfo = SET_VAL(TCPHDR, l4hlen) |
+	hopinfo |= SET_VAL(TCPHDR, l4hlen) |
 		  SET_VAL(IPHDR, l3hlen) |
 		  SET_VAL(ETHHDR, ethhdr) |
 		  SET_VAL(EC, csum_enable) |
@@ -219,35 +258,170 @@
 	return hopinfo;
 }
 
+static u16 xgene_enet_encode_len(u16 len)
+{
+	return (len == BUFLEN_16K) ? 0 : len;
+}
+
+static void xgene_set_addr_len(__le64 *desc, u32 idx, dma_addr_t addr, u32 len)
+{
+	desc[idx ^ 1] = cpu_to_le64(SET_VAL(DATAADDR, addr) |
+				    SET_VAL(BUFDATALEN, len));
+}
+
+static __le64 *xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring *ring)
+{
+	__le64 *exp_bufs;
+
+	exp_bufs = &ring->exp_bufs[ring->exp_buf_tail * MAX_EXP_BUFFS];
+	memset(exp_bufs, 0, sizeof(__le64) * MAX_EXP_BUFFS);
+	ring->exp_buf_tail = (ring->exp_buf_tail + 1) & ((ring->slots / 2) - 1);
+
+	return exp_bufs;
+}
+
+static dma_addr_t *xgene_get_frag_dma_array(struct xgene_enet_desc_ring *ring)
+{
+	return &ring->cp_ring->frag_dma_addr[ring->tail * MAX_SKB_FRAGS];
+}
+
 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
 				    struct sk_buff *skb)
 {
 	struct device *dev = ndev_to_dev(tx_ring->ndev);
 	struct xgene_enet_raw_desc *raw_desc;
-	dma_addr_t dma_addr;
+	__le64 *exp_desc = NULL, *exp_bufs = NULL;
+	dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
+	skb_frag_t *frag;
 	u16 tail = tx_ring->tail;
 	u64 hopinfo;
+	u32 len, hw_len;
+	u8 ll = 0, nv = 0, idx = 0;
+	bool split = false;
+	u32 size, offset, ell_bytes = 0;
+	u32 i, fidx, nr_frags, count = 1;
 
 	raw_desc = &tx_ring->raw_desc[tail];
+	tail = (tail + 1) & (tx_ring->slots - 1);
 	memset(raw_desc, 0, sizeof(struct xgene_enet_raw_desc));
 
-	dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+	hopinfo = xgene_enet_work_msg(skb);
+	if (!hopinfo)
+		return -EINVAL;
+	raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
+				   hopinfo);
+
+	len = skb_headlen(skb);
+	hw_len = xgene_enet_encode_len(len);
+
+	dma_addr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
 	if (dma_mapping_error(dev, dma_addr)) {
 		netdev_err(tx_ring->ndev, "DMA mapping error\n");
 		return -EINVAL;
 	}
 
 	/* Hardware expects descriptor in little endian format */
-	raw_desc->m0 = cpu_to_le64(tail);
 	raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
-				   SET_VAL(BUFDATALEN, skb->len) |
+				   SET_VAL(BUFDATALEN, hw_len) |
 				   SET_BIT(COHERENT));
-	hopinfo = xgene_enet_work_msg(skb);
-	raw_desc->m3 = cpu_to_le64(SET_VAL(HENQNUM, tx_ring->dst_ring_num) |
-				   hopinfo);
-	tx_ring->cp_ring->cp_skb[tail] = skb;
 
-	return 0;
+	if (!skb_is_nonlinear(skb))
+		goto out;
+
+	/* scatter gather */
+	nv = 1;
+	exp_desc = (void *)&tx_ring->raw_desc[tail];
+	tail = (tail + 1) & (tx_ring->slots - 1);
+	memset(exp_desc, 0, sizeof(struct xgene_enet_raw_desc));
+
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	for (i = nr_frags; i < 4 ; i++)
+		exp_desc[i ^ 1] = cpu_to_le64(LAST_BUFFER);
+
+	frag_dma_addr = xgene_get_frag_dma_array(tx_ring);
+
+	for (i = 0, fidx = 0; split || (fidx < nr_frags); i++) {
+		if (!split) {
+			frag = &skb_shinfo(skb)->frags[fidx];
+			size = skb_frag_size(frag);
+			offset = 0;
+
+			pbuf_addr = skb_frag_dma_map(dev, frag, 0, size,
+						     DMA_TO_DEVICE);
+			if (dma_mapping_error(dev, pbuf_addr))
+				return -EINVAL;
+
+			frag_dma_addr[fidx] = pbuf_addr;
+			fidx++;
+
+			if (size > BUFLEN_16K)
+				split = true;
+		}
+
+		if (size > BUFLEN_16K) {
+			len = BUFLEN_16K;
+			size -= BUFLEN_16K;
+		} else {
+			len = size;
+			split = false;
+		}
+
+		dma_addr = pbuf_addr + offset;
+		hw_len = xgene_enet_encode_len(len);
+
+		switch (i) {
+		case 0:
+		case 1:
+		case 2:
+			xgene_set_addr_len(exp_desc, i, dma_addr, hw_len);
+			break;
+		case 3:
+			if (split || (fidx != nr_frags)) {
+				exp_bufs = xgene_enet_get_exp_bufs(tx_ring);
+				xgene_set_addr_len(exp_bufs, idx, dma_addr,
+						   hw_len);
+				idx++;
+				ell_bytes += len;
+			} else {
+				xgene_set_addr_len(exp_desc, i, dma_addr,
+						   hw_len);
+			}
+			break;
+		default:
+			xgene_set_addr_len(exp_bufs, idx, dma_addr, hw_len);
+			idx++;
+			ell_bytes += len;
+			break;
+		}
+
+		if (split)
+			offset += BUFLEN_16K;
+	}
+	count++;
+
+	if (idx) {
+		ll = 1;
+		dma_addr = dma_map_single(dev, exp_bufs,
+					  sizeof(u64) * MAX_EXP_BUFFS,
+					  DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, dma_addr)) {
+			dev_kfree_skb_any(skb);
+			return -EINVAL;
+		}
+		i = ell_bytes >> LL_BYTES_LSB_LEN;
+		exp_desc[2] = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) |
+					  SET_VAL(LL_BYTES_MSB, i) |
+					  SET_VAL(LL_LEN, idx));
+		raw_desc->m2 = cpu_to_le64(SET_VAL(LL_BYTES_LSB, ell_bytes));
+	}
+
+out:
+	raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
+				   SET_VAL(USERINFO, tx_ring->tail));
+	tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
+	tx_ring->tail = tail;
+
+	return count;
 }
 
 static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
@@ -257,6 +431,7 @@
 	struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
 	struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
 	u32 tx_level, cq_level;
+	int count;
 
 	tx_level = pdata->ring_ops->len(tx_ring);
 	cq_level = pdata->ring_ops->len(cp_ring);
@@ -266,14 +441,17 @@
 		return NETDEV_TX_BUSY;
 	}
 
-	if (xgene_enet_setup_tx_desc(tx_ring, skb)) {
+	if (skb_padto(skb, XGENE_MIN_ENET_FRAME_SIZE))
+		return NETDEV_TX_OK;
+
+	count = xgene_enet_setup_tx_desc(tx_ring, skb);
+	if (count <= 0) {
 		dev_kfree_skb_any(skb);
 		return NETDEV_TX_OK;
 	}
 
-	pdata->ring_ops->wr_cmd(tx_ring, 1);
+	pdata->ring_ops->wr_cmd(tx_ring, count);
 	skb_tx_timestamp(skb);
-	tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1);
 
 	pdata->stats.tx_packets++;
 	pdata->stats.tx_bytes += skb->len;
@@ -326,7 +504,7 @@
 
 	/* strip off CRC as HW isn't doing this */
 	datalen = GET_VAL(BUFDATALEN, le64_to_cpu(raw_desc->m1));
-	datalen -= 4;
+	datalen = (datalen & DATALEN_MASK) - 4;
 	prefetch(skb->data - NET_IP_ALIGN);
 	skb_put(skb, datalen);
 
@@ -358,26 +536,41 @@
 				   int budget)
 {
 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
-	struct xgene_enet_raw_desc *raw_desc;
+	struct xgene_enet_raw_desc *raw_desc, *exp_desc;
 	u16 head = ring->head;
 	u16 slots = ring->slots - 1;
-	int ret, count = 0;
+	int ret, count = 0, processed = 0;
 
 	do {
 		raw_desc = &ring->raw_desc[head];
+		exp_desc = NULL;
 		if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
 			break;
 
 		/* read fpqnum field after dataaddr field */
 		dma_rmb();
+		if (GET_BIT(NV, le64_to_cpu(raw_desc->m0))) {
+			head = (head + 1) & slots;
+			exp_desc = &ring->raw_desc[head];
+
+			if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc))) {
+				head = (head - 1) & slots;
+				break;
+			}
+			dma_rmb();
+			count++;
+		}
 		if (is_rx_desc(raw_desc))
 			ret = xgene_enet_rx_frame(ring, raw_desc);
 		else
 			ret = xgene_enet_tx_completion(ring, raw_desc);
 		xgene_enet_mark_desc_slot_empty(raw_desc);
+		if (exp_desc)
+			xgene_enet_mark_desc_slot_empty(exp_desc);
 
 		head = (head + 1) & slots;
 		count++;
+		processed++;
 
 		if (ret)
 			break;
@@ -393,7 +586,7 @@
 		}
 	}
 
-	return count;
+	return processed;
 }
 
 static int xgene_enet_napi(struct napi_struct *napi, const int budget)
@@ -738,12 +931,13 @@
 	struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
 	struct xgene_enet_desc_ring *buf_pool = NULL;
 	enum xgene_ring_owner owner;
+	dma_addr_t dma_exp_bufs;
 	u8 cpu_bufnum = pdata->cpu_bufnum;
 	u8 eth_bufnum = pdata->eth_bufnum;
 	u8 bp_bufnum = pdata->bp_bufnum;
 	u16 ring_num = pdata->ring_num;
 	u16 ring_id;
-	int ret;
+	int ret, size;
 
 	/* allocate rx descriptor ring */
 	owner = xgene_derive_ring_owner(pdata);
@@ -794,6 +988,15 @@
 		ret = -ENOMEM;
 		goto err;
 	}
+
+	size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
+	tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs,
+						GFP_KERNEL);
+	if (!tx_ring->exp_bufs) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
 	pdata->tx_ring = tx_ring;
 
 	if (!pdata->cq_cnt) {
@@ -818,6 +1021,16 @@
 		ret = -ENOMEM;
 		goto err;
 	}
+
+	size = sizeof(dma_addr_t) * MAX_SKB_FRAGS;
+	cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots,
+					      size, GFP_KERNEL);
+	if (!cp_ring->frag_dma_addr) {
+		devm_kfree(dev, cp_ring->cp_skb);
+		ret = -ENOMEM;
+		goto err;
+	}
+
 	pdata->tx_ring->cp_ring = cp_ring;
 	pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
 
@@ -905,40 +1118,6 @@
 	return ret;
 }
 
-static int xgene_get_mac_address(struct device *dev,
-				 unsigned char *addr)
-{
-	int ret;
-
-	ret = device_property_read_u8_array(dev, "local-mac-address", addr, 6);
-	if (ret)
-		ret = device_property_read_u8_array(dev, "mac-address",
-						    addr, 6);
-	if (ret)
-		return -ENODEV;
-
-	return ETH_ALEN;
-}
-
-static int xgene_get_phy_mode(struct device *dev)
-{
-	int i, ret;
-	char *modestr;
-
-	ret = device_property_read_string(dev, "phy-connection-type",
-					  (const char **)&modestr);
-	if (ret)
-		ret = device_property_read_string(dev, "phy-mode",
-						  (const char **)&modestr);
-	if (ret)
-		return -ENODEV;
-
-	for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
-		if (!strcasecmp(modestr, phy_modes(i)))
-			return i;
-	}
-	return -ENODEV;
-}
 
 static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
 {
@@ -998,12 +1177,12 @@
 	if (ret)
 		return ret;
 
-	if (xgene_get_mac_address(dev, ndev->dev_addr) != ETH_ALEN)
+	if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
 		eth_hw_addr_random(ndev);
 
 	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
 
-	pdata->phy_mode = xgene_get_phy_mode(dev);
+	pdata->phy_mode = device_get_phy_mode(dev);
 	if (pdata->phy_mode < 0) {
 		dev_err(dev, "Unable to get phy-connection-type\n");
 		return pdata->phy_mode;
@@ -1207,7 +1386,8 @@
 	xgene_enet_set_ethtool_ops(ndev);
 	ndev->features |= NETIF_F_IP_CSUM |
 			  NETIF_F_GSO |
-			  NETIF_F_GRO;
+			  NETIF_F_GRO |
+			  NETIF_F_SG;
 
 	of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
 	if (of_id) {
@@ -1233,6 +1413,12 @@
 
 	xgene_enet_setup_ops(pdata);
 
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+		ndev->features |= NETIF_F_TSO;
+		pdata->mss = XGENE_ENET_MSS;
+	}
+	ndev->hw_features = ndev->features;
+
 	ret = register_netdev(ndev);
 	if (ret) {
 		netdev_err(ndev, "Failed to register netdev\n");
@@ -1277,9 +1463,10 @@
 	mac_ops->tx_disable(pdata);
 
 	xgene_enet_napi_del(pdata);
-	xgene_enet_mdio_remove(pdata);
-	xgene_enet_delete_desc_rings(pdata);
+	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+		xgene_enet_mdio_remove(pdata);
 	unregister_netdev(ndev);
+	xgene_enet_delete_desc_rings(pdata);
 	pdata->port_ops->shutdown(pdata);
 	free_netdev(ndev);
 
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 1c85fc8..50f92c3 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -40,8 +40,12 @@
 #define XGENE_DRV_VERSION	"v1.0"
 #define XGENE_ENET_MAX_MTU	1536
 #define SKB_BUFFER_SIZE		(XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
+#define BUFLEN_16K	(16 * 1024)
 #define NUM_PKT_BUF	64
 #define NUM_BUFPOOL	32
+#define MAX_EXP_BUFFS	256
+#define XGENE_ENET_MSS	1448
+#define XGENE_MIN_ENET_FRAME_SIZE	60
 
 #define START_CPU_BUFNUM_0	0
 #define START_ETH_BUFNUM_0	2
@@ -79,6 +83,7 @@
 	u16 num;
 	u16 head;
 	u16 tail;
+	u16 exp_buf_tail;
 	u16 slots;
 	u16 irq;
 	char irq_name[IRQ_ID_SIZE];
@@ -93,6 +98,7 @@
 	u8 nbufpool;
 	struct sk_buff *(*rx_skb);
 	struct sk_buff *(*cp_skb);
+	dma_addr_t *frag_dma_addr;
 	enum xgene_enet_ring_cfgsize cfgsize;
 	struct xgene_enet_desc_ring *cp_ring;
 	struct xgene_enet_desc_ring *buf_pool;
@@ -102,6 +108,7 @@
 		struct xgene_enet_raw_desc *raw_desc;
 		struct xgene_enet_raw_desc16 *raw_desc16;
 	};
+	__le64 *exp_bufs;
 };
 
 struct xgene_mac_ops {
@@ -112,6 +119,7 @@
 	void (*tx_disable)(struct xgene_enet_pdata *pdata);
 	void (*rx_disable)(struct xgene_enet_pdata *pdata);
 	void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
+	void (*set_mss)(struct xgene_enet_pdata *pdata);
 	void (*link_state)(struct work_struct *work);
 };
 
@@ -170,6 +178,7 @@
 	u8 eth_bufnum;
 	u8 bp_bufnum;
 	u16 ring_num;
+	u32 mss;
 };
 
 struct xgene_indirect_ctl {
@@ -204,6 +213,9 @@
 #define GET_VAL(field, src) \
 		xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
 
+#define GET_BIT(field, src) \
+		xgene_enet_get_field_value(field ## _POS, 1, src)
+
 static inline struct device *ndev_to_dev(struct net_device *ndev)
 {
 	return ndev->dev.parent;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 05edb84..7a28a48 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -184,6 +184,11 @@
 	xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
 }
 
+static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata)
+{
+	xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR, pdata->mss);
+}
+
 static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
 {
 	u32 data;
@@ -204,8 +209,8 @@
 	data &= ~HSTLENCHK;
 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
 
-	xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR, 0x06000600);
 	xgene_xgmac_set_mac_addr(pdata);
+	xgene_xgmac_set_mss(pdata);
 
 	xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
 	data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
@@ -329,6 +334,7 @@
 	.rx_disable = xgene_xgmac_rx_disable,
 	.tx_disable = xgene_xgmac_tx_disable,
 	.set_mac_addr = xgene_xgmac_set_mac_addr,
+	.set_mss = xgene_xgmac_set_mss,
 	.link_state = xgene_enet_link_state
 };
 
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index bf0a994..f8f908d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -62,7 +62,9 @@
 #define XCLE_BYPASS_REG0_ADDR           0x0160
 #define XCLE_BYPASS_REG1_ADDR           0x0164
 #define XG_CFG_BYPASS_ADDR		0x0204
+#define XG_CFG_LINK_AGGR_RESUME_0_ADDR	0x0214
 #define XG_LINK_STATUS_ADDR		0x0228
+#define XG_TSIF_MSS_REG0_ADDR		0x02a4
 #define XG_ENET_SPARE_CFG_REG_ADDR	0x040c
 #define XG_ENET_SPARE_CFG_REG_1_ADDR	0x0410
 #define XGENET_RX_DV_GATE_REG_0_ADDR	0x0804
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 932bd18..2795d6d 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -874,6 +874,8 @@
 		atl1c_clean_buffer(pdev, buffer_info);
 	}
 
+	netdev_reset_queue(adapter->netdev);
+
 	/* Zero out Tx-buffers */
 	memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
 		ring_count);
@@ -1551,6 +1553,7 @@
 	u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
 	u16 hw_next_to_clean;
 	u16 reg;
+	unsigned int total_bytes = 0, total_packets = 0;
 
 	reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX;
 
@@ -1558,12 +1561,18 @@
 
 	while (next_to_clean != hw_next_to_clean) {
 		buffer_info = &tpd_ring->buffer_info[next_to_clean];
+		if (buffer_info->skb) {
+			total_bytes += buffer_info->skb->len;
+			total_packets++;
+		}
 		atl1c_clean_buffer(pdev, buffer_info);
 		if (++next_to_clean == tpd_ring->count)
 			next_to_clean = 0;
 		atomic_set(&tpd_ring->next_to_clean, next_to_clean);
 	}
 
+	netdev_completed_queue(adapter->netdev, total_packets, total_bytes);
+
 	if (netif_queue_stopped(adapter->netdev) &&
 			netif_carrier_ok(adapter->netdev)) {
 		netif_wake_queue(adapter->netdev);
@@ -2256,6 +2265,7 @@
 		spin_unlock_irqrestore(&adapter->tx_lock, flags);
 		dev_kfree_skb_any(skb);
 	} else {
+		netdev_sent_queue(adapter->netdev, skb->len);
 		atl1c_tx_queue(adapter, skb, tpd, type);
 		spin_unlock_irqrestore(&adapter->tx_lock, flags);
 	}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 8be9eab..e930aa9 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -139,6 +139,16 @@
 	  Virtualization support in the 578xx and 57712 products. This
 	  allows for virtual function acceleration in virtual environments.
 
+config BNX2X_VXLAN
+	bool "Virtual eXtensible Local Area Network support"
+	default n
+	depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m)
+	---help---
+	  This enables hardward offload support for VXLAN protocol over the
+	  NetXtremeII series adapters.
+	  Say Y here if you want to enable hardware offload support for
+	  Virtual eXtensible Local Area Network (VXLAN) in the driver.
+
 config BGMAC
 	tristate "BCMA bus GBit core support"
 	depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X)
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 4566cdf..b9a5a97 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -933,6 +933,21 @@
 	return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcm_sysport_poll_controller(struct net_device *dev)
+{
+	struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+	disable_irq(priv->irq0);
+	bcm_sysport_rx_isr(priv->irq0, priv);
+	enable_irq(priv->irq0);
+
+	disable_irq(priv->irq1);
+	bcm_sysport_tx_isr(priv->irq1, priv);
+	enable_irq(priv->irq1);
+}
+#endif
+
 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
 					      struct net_device *dev)
 {
@@ -1723,6 +1738,9 @@
 	.ndo_set_features	= bcm_sysport_set_features,
 	.ndo_set_rx_mode	= bcm_sysport_set_rx_mode,
 	.ndo_set_mac_address	= bcm_sysport_change_mac,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= bcm_sysport_poll_controller,
+#endif
 };
 
 #define REV_FMT	"v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 21e3c38..28f7610 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1447,7 +1447,7 @@
 	struct phy_device *phy_dev;
 	int err;
 
-	phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+	phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
 	if (!phy_dev || IS_ERR(phy_dev)) {
 		bgmac_err(bgmac, "Failed to register fixed PHY device\n");
 		return -ENODEV;
@@ -1549,11 +1549,20 @@
 	struct net_device *net_dev;
 	struct bgmac *bgmac;
 	struct ssb_sprom *sprom = &core->bus->sprom;
-	u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
+	u8 *mac;
 	int err;
 
-	/* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
-	if (core->core_unit > 1) {
+	switch (core->core_unit) {
+	case 0:
+		mac = sprom->et0mac;
+		break;
+	case 1:
+		mac = sprom->et1mac;
+		break;
+	case 2:
+		mac = sprom->et2mac;
+		break;
+	default:
 		pr_err("Unsupported core_unit %d\n", core->core_unit);
 		return -ENOTSUPP;
 	}
@@ -1588,8 +1597,17 @@
 	}
 	bgmac->cmn = core->bus->drv_gmac_cmn.core;
 
-	bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
-			 sprom->et0phyaddr;
+	switch (core->core_unit) {
+	case 0:
+		bgmac->phyaddr = sprom->et0phyaddr;
+		break;
+	case 1:
+		bgmac->phyaddr = sprom->et1phyaddr;
+		break;
+	case 2:
+		bgmac->phyaddr = sprom->et2phyaddr;
+		break;
+	}
 	bgmac->phyaddr &= BGMAC_PHY_MASK;
 	if (bgmac->phyaddr == BGMAC_PHY_MASK) {
 		bgmac_err(bgmac, "No PHY found\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index cd4ae76..ba93663 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1,6 +1,8 @@
-/* bnx2x.h: Broadcom Everest network driver.
+/* bnx2x.h: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -30,7 +32,7 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.710.51-0"
+#define DRV_MODULE_VERSION      "1.712.30-0"
 #define DRV_MODULE_RELDATE      "2014/02/10"
 #define BNX2X_BC_VER            0x040200
 
@@ -1227,6 +1229,10 @@
 	} mac_rdata;
 
 	union {
+		struct eth_classify_rules_ramrod_data	e2;
+	} vlan_rdata;
+
+	union {
 		struct tstorm_eth_mac_filter_config	e1x;
 		struct eth_filter_rules_ramrod_data	e2;
 	} rx_mode_rdata;
@@ -1386,6 +1392,8 @@
 	BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 	BNX2X_SP_RTNL_TX_STOP,
 	BNX2X_SP_RTNL_GET_DRV_VERSION,
+	BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+	BNX2X_SP_RTNL_DEL_VXLAN_PORT,
 };
 
 enum bnx2x_iov_flag {
@@ -1408,6 +1416,9 @@
 
 	/* Queue State object */
 	struct bnx2x_queue_sp_obj q_obj;
+
+	/* VLANs object */
+	struct bnx2x_vlan_mac_obj vlan_obj;
 };
 
 struct bnx2x_fp_stats {
@@ -1422,6 +1433,13 @@
 	SUB_MF_MODE_UNKNOWN = 0,
 	SUB_MF_MODE_UFP,
 	SUB_MF_MODE_NPAR1_DOT_5,
+	SUB_MF_MODE_BD,
+};
+
+struct bnx2x_vlan_entry {
+	struct list_head link;
+	u16 vid;
+	bool hw;
 };
 
 struct bnx2x {
@@ -1636,6 +1654,8 @@
 	u8			mf_sub_mode;
 #define IS_MF_UFP(bp)		(IS_MF_SD(bp) && \
 				 bp->mf_sub_mode == SUB_MF_MODE_UFP)
+#define IS_MF_BD(bp)		(IS_MF_SD(bp) && \
+				 bp->mf_sub_mode == SUB_MF_MODE_BD)
 
 	u8			wol;
 
@@ -1860,8 +1880,6 @@
 	int					dcb_version;
 
 	/* CAM credit pools */
-
-	/* used only in sriov */
 	struct bnx2x_credit_pool_obj		vlans_pool;
 
 	struct bnx2x_credit_pool_obj		macs_pool;
@@ -1924,6 +1942,11 @@
 	u16 rx_filter;
 
 	struct bnx2x_link_report_data		vf_link_vars;
+	struct list_head vlan_reg;
+	u16 vlan_cnt;
+	u16 vlan_credit;
+	u16 vxlan_dst_port;
+	bool accept_any_vlan;
 };
 
 /* Tx queues may be less or equal to Rx queues */
@@ -1951,23 +1974,14 @@
 #define RSS_IPV6_TCP_CAP_MASK						\
 	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
 
-/* func init flags */
-#define FUNC_FLG_RSS		0x0001
-#define FUNC_FLG_STATS		0x0002
-/* removed  FUNC_FLG_UNMATCHED	0x0004 */
-#define FUNC_FLG_TPA		0x0008
-#define FUNC_FLG_SPQ		0x0010
-#define FUNC_FLG_LEADING	0x0020	/* PF only */
-#define FUNC_FLG_LEADING_STATS	0x0040
 struct bnx2x_func_init_params {
 	/* dma */
-	dma_addr_t	fw_stat_map;	/* valid iff FUNC_FLG_STATS */
-	dma_addr_t	spq_map;	/* valid iff FUNC_FLG_SPQ */
+	bool		spq_active;
+	dma_addr_t	spq_map;
+	u16		spq_prod;
 
-	u16		func_flgs;
 	u16		func_id;	/* abs fid */
 	u16		pf_id;
-	u16		spq_prod;	/* valid iff FUNC_FLG_SPQ */
 };
 
 #define for_each_cnic_queue(bp, var) \
@@ -2077,6 +2091,11 @@
 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
 		      struct bnx2x_vlan_mac_obj *obj, bool set,
 		      int mac_type, unsigned long *ramrod_flags);
+
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+		       struct bnx2x_vlan_mac_obj *obj, bool set,
+		       unsigned long *ramrod_flags);
+
 /**
  * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
  *
@@ -2481,6 +2500,7 @@
 #define VF_ACQUIRE_THRESH		3
 #define VF_ACQUIRE_MAC_FILTERS		1
 #define VF_ACQUIRE_MC_FILTERS		10
+#define VF_ACQUIRE_VLAN_FILTERS		2 /* VLAN0 + 'real' VLAN */
 
 #define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
 			    (!((me_reg) & ME_REG_VF_ERR)))
@@ -2553,6 +2573,10 @@
 			(IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp) ||	\
 			 IS_MF_SI_STORAGE_PERSONALITY_ONLY(bp))
 
+/* Determines whether BW configuration arrives in 100Mb units or in
+ * percentages from actual physical link speed.
+ */
+#define IS_MF_PERCENT_BW(bp) (IS_MF_SI(bp) || IS_MF_UFP(bp) || IS_MF_BD(bp))
 
 #define SET_FLAG(value, mask, flag) \
 	do {\
@@ -2577,6 +2601,8 @@
 
 void bnx2x_update_mng_version(struct bnx2x *bp);
 
+void bnx2x_update_mfw_dump(struct bnx2x *bp);
+
 #define MCPR_SCRATCH_BASE(bp) \
 	(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
 
@@ -2589,4 +2615,9 @@
 #define BNX2X_MAX_PHC_DRIFT 31000000
 #define BNX2X_PTP_TX_TIMEOUT
 
+/* Re-configure all previously configured vlan filters.
+ * Meant for implicit re-load flows.
+ */
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
+
 #endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index f7fbdc9..44173be 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.c: Broadcom Everest network driver.
+/* bnx2x_cmn.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1188,7 +1190,7 @@
 		/* Calculate the current MAX line speed limit for the MF
 		 * devices
 		 */
-		if (IS_MF_SI(bp))
+		if (IS_MF_PERCENT_BW(bp))
 			line_speed = (line_speed * maxCfg) / 100;
 		else { /* SD mode */
 			u16 vn_max_rate = maxCfg * 100;
@@ -2103,9 +2105,14 @@
 		if (rss_obj->udp_rss_v6)
 			__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
 
-		if (!CHIP_IS_E1x(bp))
+		if (!CHIP_IS_E1x(bp)) {
+			/* valid only for TUNN_MODE_VXLAN tunnel mode */
+			__set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
+			__set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
+
 			/* valid only for TUNN_MODE_GRE tunnel mode */
-			__set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
+			__set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
+		}
 	} else {
 		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
 	}
@@ -2510,6 +2517,20 @@
 		fp->mode = TPA_MODE_DISABLED;
 }
 
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
+{
+	u32 cur;
+
+	if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
+		return;
+
+	cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
+	DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
+	   cur, state);
+
+	SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
+}
+
 int bnx2x_load_cnic(struct bnx2x *bp)
 {
 	int i, rc, port = BP_PORT(bp);
@@ -2827,6 +2848,11 @@
 
 	/* Start fast path */
 
+	/* Re-configure vlan filters */
+	rc = bnx2x_vlan_reconfigure_vid(bp);
+	if (rc)
+		LOAD_ERROR_EXIT(bp, load_error3);
+
 	/* Initialize Rx filter. */
 	bnx2x_set_rx_mode_inner(bp);
 
@@ -2873,6 +2899,8 @@
 		/* mark driver is loaded in shmem2 */
 		u32 val;
 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+		val &= ~DRV_FLAGS_MTU_MASK;
+		val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
 			  val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
 			  DRV_FLAGS_CAPABILITIES_LOADED_L2);
@@ -2885,10 +2913,17 @@
 		return -EBUSY;
 	}
 
+	/* Update driver data for On-Chip MFW dump. */
+	if (IS_PF(bp))
+		bnx2x_update_mfw_dump(bp);
+
 	/* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
 	if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
 		bnx2x_dcbx_init(bp, false);
 
+	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
+
 	DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
 
 	return 0;
@@ -2956,6 +2991,9 @@
 
 	DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
 
+	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
 	/* mark driver is unloaded in shmem2 */
 	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
 		u32 val;
@@ -3677,7 +3715,7 @@
 		pbd2->fw_ip_hdr_to_payload_w =
 			hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
 		pbd_e2->data.tunnel_data.flags |=
-			ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
+			ETH_TUNNEL_DATA_IPV6_OUTER;
 	}
 
 	pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
@@ -4184,6 +4222,41 @@
 	return NETDEV_TX_OK;
 }
 
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
+{
+	int mfw_vn = BP_FW_MB_IDX(bp);
+	u32 tmp;
+
+	/* If the shmem shouldn't affect configuration, reflect */
+	if (!IS_MF_BD(bp)) {
+		int i;
+
+		for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
+			c2s_map[i] = i;
+		*c2s_default = 0;
+
+		return;
+	}
+
+	tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
+	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+	c2s_map[0] = tmp & 0xff;
+	c2s_map[1] = (tmp >> 8) & 0xff;
+	c2s_map[2] = (tmp >> 16) & 0xff;
+	c2s_map[3] = (tmp >> 24) & 0xff;
+
+	tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
+	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+	c2s_map[4] = tmp & 0xff;
+	c2s_map[5] = (tmp >> 8) & 0xff;
+	c2s_map[6] = (tmp >> 16) & 0xff;
+	c2s_map[7] = (tmp >> 24) & 0xff;
+
+	tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
+	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+	*c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
+}
+
 /**
  * bnx2x_setup_tc - routine to configure net_device for multi tc
  *
@@ -4194,8 +4267,9 @@
  */
 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
 {
-	int cos, prio, count, offset;
 	struct bnx2x *bp = netdev_priv(dev);
+	u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
+	int cos, prio, count, offset;
 
 	/* setup tc must be called under rtnl lock */
 	ASSERT_RTNL();
@@ -4219,12 +4293,16 @@
 		return -EINVAL;
 	}
 
+	bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
+
 	/* configure priority to traffic class mapping */
 	for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
-		netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
+		int outer_prio = c2s_map[prio];
+
+		netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
 		   "mapping priority %d to tc %d\n",
-		   prio, bp->prio_to_cos[prio]);
+		   outer_prio, bp->prio_to_cos[outer_prio]);
 	}
 
 	/* Use this configuration to differentiate tc0 from other COSes
@@ -4278,6 +4356,9 @@
 	if (netif_running(dev))
 		rc = bnx2x_set_eth_mac(bp, true);
 
+	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
 	return rc;
 }
 
@@ -4831,6 +4912,9 @@
 	 */
 	dev->mtu = new_mtu;
 
+	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
 	return bnx2x_reload_if_running(dev);
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 03b7404..b7d32e8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.h: Broadcom Everest network driver.
+/* bnx2x_cmn.h: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -620,6 +622,14 @@
  */
 void bnx2x_tx_timeout(struct net_device *dev);
 
+/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration
+ * c2s_map should have BNX2X_MAX_PRIORITY entries.
+ * @bp:			driver handle
+ * @c2s_map:		should have BNX2X_MAX_PRIORITY entries for mapping
+ * @c2s_default:	entry for non-tagged configuration
+ */
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default);
+
 /*********************** Inlines **********************************/
 /*********************** Fast path ********************************/
 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -931,14 +941,35 @@
 	start_params->mf_mode = bp->mf_mode;
 	start_params->sd_vlan_tag = bp->mf_ov;
 
+	/* Configure Ethertype for BD mode */
+	if (IS_MF_BD(bp)) {
+		DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n");
+		start_params->sd_vlan_eth_type = ETH_P_8021AD;
+		REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD);
+		REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD);
+		REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD);
+
+		bnx2x_get_c2s_mapping(bp, start_params->c2s_pri,
+				      &start_params->c2s_pri_default);
+		start_params->c2s_pri_valid = 1;
+
+		DP(NETIF_MSG_IFUP,
+		   "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n",
+		   start_params->c2s_pri[0], start_params->c2s_pri[1],
+		   start_params->c2s_pri[2], start_params->c2s_pri[3],
+		   start_params->c2s_pri[4], start_params->c2s_pri[5],
+		   start_params->c2s_pri[6], start_params->c2s_pri[7],
+		   start_params->c2s_pri_default);
+	}
+
 	if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
 		start_params->network_cos_mode = STATIC_COS;
 	else /* CHIP_IS_E1X */
 		start_params->network_cos_mode = FW_WRR;
 
-	start_params->tunnel_mode	= TUNN_MODE_GRE;
-	start_params->gre_tunnel_type	= IPGRE_TUNNEL;
-	start_params->inner_gre_rss_en	= 1;
+	start_params->vxlan_dst_port = bp->vxlan_dst_port;
+
+	start_params->inner_rss = 1;
 
 	if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
 		start_params->class_fail_ethtype = ETH_P_FIP;
@@ -1037,6 +1068,15 @@
 			   BNX2X_FILTER_MAC_PENDING,
 			   &bp->sp_state, obj_type,
 			   &bp->macs_pool);
+
+	if (!CHIP_IS_E1x(bp))
+		bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj,
+				    fp->cl_id, fp->cid, BP_FUNC(bp),
+				    bnx2x_sp(bp, vlan_rdata),
+				    bnx2x_sp_mapping(bp, vlan_rdata),
+				    BNX2X_FILTER_VLAN_PENDING,
+				    &bp->sp_state, obj_type,
+				    &bp->vlans_pool);
 }
 
 /**
@@ -1096,7 +1136,7 @@
 	bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
 				   bnx2x_get_path_func_num(bp));
 
-	bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1,
+	bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp),
 				    bnx2x_get_path_func_num(bp));
 
 	/* RSS configuration object */
@@ -1106,6 +1146,8 @@
 				  bnx2x_sp_mapping(bp, rss_rdata),
 				  BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
 				  BNX2X_OBJ_TYPE_RX);
+
+	bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp));
 }
 
 static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
@@ -1339,4 +1381,23 @@
 void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
 			    u32 verbose);
 
+/**
+ * bnx2x_set_os_driver_state - write driver state for management FW usage
+ *
+ * @bp:		driver handle
+ * @state:	OS_DRIVER_STATE_* value reflecting current driver state
+ */
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state);
+
+/**
+ * bnx2x_nvram_read - reads data from nvram [might sleep]
+ *
+ * @bp:		driver handle
+ * @offset:	byte offset in nvram
+ * @ret_buf:	pointer to buffer where data is to be stored
+ * @buf_size:   Length of 'ret_buf' in bytes
+ */
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+		     int buf_size);
+
 #endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 6e4294e..7ccf668 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.c: Broadcom Everest network driver.
+/* bnx2x_dcb.c: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -1850,6 +1852,8 @@
 			if (bp->dcbx_port_params.ets.cos_params[cos].
 						pri_bitmask & pri_bit)
 					tt2cos[pri].cos = cos;
+
+		pfc_fw_cfg->dcb_outer_pri[pri]  = ttp[pri];
 	}
 
 	/* we never want the FW to add a 0 vlan tag */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index c6939ec..9a9517c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.h: Broadcom Everest network driver.
+/* bnx2x_dcb.h: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index 741aa13..eccfa13 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -1,15 +1,17 @@
-/* bnx2x_dump.h: Broadcom Everest network driver.
+/* bnx2x_dump.h: QLogic Everest network driver.
  *
  * Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  */
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 5907c82..aeb7ce6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,8 @@
-/* bnx2x_ethtool.c: Broadcom Everest network driver.
+/* bnx2x_ethtool.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1129,6 +1131,9 @@
 	} else
 		bp->wol = 0;
 
+	if (SHMEM2_HAS(bp, curr_cfg))
+		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
 	return 0;
 }
 
@@ -1343,8 +1348,8 @@
 	return rc;
 }
 
-static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
-			    int buf_size)
+int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+		     int buf_size)
 {
 	int rc;
 	u32 cmd_flags;
@@ -3578,17 +3583,8 @@
 
 		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
 				   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
-				   (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-				   (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
 				   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-				   (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+				   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 
 		info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 7636e3c..226ab29 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,8 @@
-/* bnx2x_fw_defs.h: Broadcom Everest network driver.
+/* bnx2x_fw_defs.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -372,7 +374,7 @@
 #define MAX_COS_NUMBER 4
 #define MAX_TRAFFIC_TYPES 8
 #define MAX_PFC_PRIORITIES 8
-
+#define MAX_VLAN_PRIORITIES 8
 	/* used by array traffic_type_to_priority[] to mark traffic type \
 	that is not mapped to priority*/
 #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index 8aafd9b..9e3b5a1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,8 @@
 /* bnx2x_fw_file_hdr.h: FW binary file header structure.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 058bc73..cafd5de 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,8 @@
-/* bnx2x_hsi.h: Broadcom Everest network driver.
+/* bnx2x_hsi.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -729,6 +731,7 @@
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722       0x00000f00
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616      0x00001000
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834      0x00001100
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84858      0x00001200
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE       0x0000fd00
 		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN      0x0000ff00
 
@@ -786,6 +789,7 @@
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722        0x00000f00
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616       0x00001000
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834       0x00001100
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858       0x00001200
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC      0x0000fc00
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE        0x0000fd00
 		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN       0x0000ff00
@@ -864,6 +868,7 @@
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4          0x00000200
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT  0x00000300
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE      0x00000400
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE        0x00000500
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE       0x00000600
 		#define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE  0x00000700
 
@@ -2064,6 +2069,45 @@
 	#define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET          0
 };
 
+enum curr_cfg_method_e {
+	CURR_CFG_MET_NONE = 0,  /* default config */
+	CURR_CFG_MET_OS = 1,
+	CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */
+};
+
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct bdn_npiv_settings {
+	u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+	u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct bdn_fc_npiv_cfg {
+	/* hdr used internally by the MFW */
+	u32 hdr;
+	u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct bdn_fc_npiv_tbl {
+	struct bdn_fc_npiv_cfg fc_npiv_cfg;
+	struct bdn_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+struct mdump_driver_info {
+	u32 epoc;
+	u32 drv_ver;
+	u32 fw_ver;
+
+	u32 valid_dump;
+	#define FIRST_DUMP_VALID        (1 << 0)
+	#define SECOND_DUMP_VALID       (1 << 1)
+
+	u32 flags;
+	#define ENABLE_ALL_TRIGGERS     (0x7fffffff)
+	#define TRIGGER_MDUMP_ONCE      (1 << 31)
+};
+
 struct ncsi_oem_data {
 	u32 driver_version[4];
 	struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
@@ -2187,6 +2231,8 @@
 #define DRV_FLAGS_CAPABILITIES_LOADED_L2        0x00000002
 #define DRV_FLAGS_CAPABILITIES_LOADED_FCOE      0x00000004
 #define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI     0x00000008
+#define DRV_FLAGS_MTU_MASK			0xffff0000
+#define DRV_FLAGS_MTU_SHIFT			16
 
 	u32 extended_dev_info_shared_cfg_size;
 
@@ -2251,6 +2297,7 @@
 	u32 reserved4;				/* Offset 0x150 */
 	u32 link_attr_sync[PORT_MAX];		/* Offset 0x154 */
 	#define LINK_ATTR_SYNC_KR2_ENABLE	0x00000001
+	#define LINK_ATTR_84858			0x00000002
 	#define LINK_SFP_EEPROM_COMP_CODE_MASK	0x0000ff00
 	#define LINK_SFP_EEPROM_COMP_CODE_SHIFT		 8
 	#define LINK_SFP_EEPROM_COMP_CODE_SR	0x00001000
@@ -2268,6 +2315,74 @@
 
 	/* We use indication for each PF (0..3) */
 #define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
+	union { /* For various OEMs */			/* Offset 0x1a0 */
+		u8 storage_boot_prog[E2_FUNC_MAX];
+	#define STORAGE_BOOT_PROG_MASK				0x000000FF
+	#define STORAGE_BOOT_PROG_NONE				0x00000000
+	#define STORAGE_BOOT_PROG_ISCSI_IP_ACQUIRED		0x00000002
+	#define STORAGE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS	0x00000002
+	#define STORAGE_BOOT_PROG_TARGET_FOUND			0x00000004
+	#define STORAGE_BOOT_PROG_ISCSI_CHAP_SUCCESS		0x00000008
+	#define STORAGE_BOOT_PROG_FCOE_LUN_FOUND		0x00000008
+	#define STORAGE_BOOT_PROG_LOGGED_INTO_TGT		0x00000010
+	#define STORAGE_BOOT_PROG_IMG_DOWNLOADED		0x00000020
+	#define STORAGE_BOOT_PROG_OS_HANDOFF			0x00000040
+	#define STORAGE_BOOT_PROG_COMPLETED			0x00000080
+
+		u32 oem_i2c_data_addr;
+	};
+
+	/* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+	/* For PCP values 0-3 use the map lower */
+	/* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+	 * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+	 */
+	u32 c2s_pcp_map_lower[E2_FUNC_MAX];			/* 0x1a4 */
+
+	/* For PCP values 4-7 use the map upper */
+	/* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+	 * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+	 */
+	u32 c2s_pcp_map_upper[E2_FUNC_MAX];			/* 0x1b4 */
+
+	/* For PCP default value get the MSB byte of the map default */
+	u32 c2s_pcp_map_default[E2_FUNC_MAX];			/* 0x1c4 */
+
+	/* FC_NPIV table offset in NVRAM */
+	u32 fc_npiv_nvram_tbl_addr[PORT_MAX];			/* 0x1d4 */
+
+	/* Shows last method that changed configuration of this device */
+	enum curr_cfg_method_e curr_cfg;			/* 0x1dc */
+
+	/* Storm FW version, shold be kept in the format 0xMMmmbbdd:
+	 * MM - Major, mm - Minor, bb - Build ,dd - Drop
+	 */
+	u32 netproc_fw_ver;					/* 0x1e0 */
+
+	/* Option ROM SMASH CLP version */
+	u32 clp_ver;						/* 0x1e4 */
+
+	u32 pcie_bus_num;					/* 0x1e8 */
+
+	u32 sriov_switch_mode;					/* 0x1ec */
+	#define SRIOV_SWITCH_MODE_NONE		0x0
+	#define SRIOV_SWITCH_MODE_VEB		0x1
+	#define SRIOV_SWITCH_MODE_VEPA		0x2
+
+	u8  rsrv2[E2_FUNC_MAX];					/* 0x1f0 */
+
+	u32 img_inv_table_addr;	/* Address to INV_TABLE_P */	/* 0x1f4 */
+
+	u32 mtu_size[E2_FUNC_MAX];				/* 0x1f8 */
+
+	u32 os_driver_state[E2_FUNC_MAX];			/* 0x208 */
+	#define OS_DRIVER_STATE_NOT_LOADED	0 /* not installed */
+	#define OS_DRIVER_STATE_LOADING		1 /* transition state */
+	#define OS_DRIVER_STATE_DISABLED	2 /* installed but disabled */
+	#define OS_DRIVER_STATE_ACTIVE		3 /* installed and active */
+
+	/* mini dump driver info */
+	struct mdump_driver_info drv_info;			/* 0x218 */
 };
 
 
@@ -2898,8 +3013,8 @@
 };
 
 #define BCM_5710_FW_MAJOR_VERSION			7
-#define BCM_5710_FW_MINOR_VERSION			10
-#define BCM_5710_FW_REVISION_VERSION		51
+#define BCM_5710_FW_MINOR_VERSION			12
+#define BCM_5710_FW_REVISION_VERSION		30
 #define BCM_5710_FW_ENGINEERING_VERSION		0
 #define BCM_5710_FW_COMPILE_FLAGS			1
 
@@ -3901,7 +4016,11 @@
 	__le16 len_on_bd;
 	struct parsing_flags pars_flags;
 	union eth_sgl_or_raw_data sgl_or_raw_data;
-	__le32 reserved1[7];
+	u8 tunn_type;
+	u8 tunn_inner_hdrs_offset;
+	__le16 reserved1;
+	__le32 tunn_tenant_id;
+	__le32 padding[5];
 	u32 marker;
 };
 
@@ -4012,8 +4131,8 @@
 	__le16 pseudo_csum;
 	u8 ip_hdr_start_inner_w;
 	u8 flags;
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0)
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_IPV6_OUTER (0x1<<0)
+#define ETH_TUNNEL_DATA_IPV6_OUTER_SHIFT 0
 #define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
 #define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
 };
@@ -4120,16 +4239,12 @@
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7)
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8)
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9)
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10)
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11)
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12)
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY (0x1<<8)
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY_SHIFT 8
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<9)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 9
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0x3F<<10)
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 10
 	u8 rss_result_mask;
 	u8 reserved3;
 	__le16 reserved4;
@@ -4314,6 +4429,18 @@
 	MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION
 };
 
+enum eth_tunn_type {
+	TUNN_TYPE_NONE,
+	TUNN_TYPE_VXLAN,
+	TUNN_TYPE_L2_GRE,
+	TUNN_TYPE_IPV4_GRE,
+	TUNN_TYPE_IPV6_GRE,
+	TUNN_TYPE_L2_GENEVE,
+	TUNN_TYPE_IPV4_GENEVE,
+	TUNN_TYPE_IPV6_GENEVE,
+	MAX_ETH_TUNN_TYPE
+};
+
 /*
  * Tx regular BD structure
  */
@@ -4758,6 +4885,9 @@
 	__le16 reserved1;
 };
 
+struct c2s_pri_trans_table_entry {
+	u8 val[MAX_VLAN_PRIORITIES];
+};
 
 /*
  * cfc delete event data
@@ -5246,6 +5376,7 @@
 	u8 dont_add_pri_0_en;
 	u8 reserved1;
 	__le32 reserved2;
+	u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
 };
 
 
@@ -5260,18 +5391,25 @@
 	u8 path_id;
 	u8 network_cos_mode;
 	u8 dmae_cmd_id;
-	u8 tunnel_mode;
-	u8 gre_tunnel_type;
-	u8 tunn_clss_en;
-	u8 inner_gre_rss_en;
-	u8 sd_accept_mf_clss_fail;
+	u8 no_added_tags;
+	__le16 reserved0;
+	__le32 reserved1;
+	u8 inner_clss_vxlan;
+	u8 inner_clss_l2gre;
+	u8 inner_clss_l2geneve;
+	u8 inner_rss;
 	__le16 vxlan_dst_port;
+	__le16 geneve_dst_port;
+	u8 sd_accept_mf_clss_fail;
+	u8 sd_accept_mf_clss_fail_match_ethtype;
 	__le16 sd_accept_mf_clss_fail_ethtype;
 	__le16 sd_vlan_eth_type;
 	u8 sd_vlan_force_pri_flg;
 	u8 sd_vlan_force_pri_val;
-	u8 sd_accept_mf_clss_fail_match_ethtype;
-	u8 no_added_tags;
+	u8 c2s_pri_tt_valid;
+	u8 c2s_pri_default;
+	u8 reserved2[6];
+	struct c2s_pri_trans_table_entry c2s_pri_trans_table;
 };
 
 struct function_update_data {
@@ -5289,11 +5427,12 @@
 	u8 tx_switch_suspend;
 	u8 echo;
 	u8 update_tunn_cfg_flg;
-	u8 tunnel_mode;
-	u8 gre_tunnel_type;
-	u8 tunn_clss_en;
-	u8 inner_gre_rss_en;
+	u8 inner_clss_vxlan;
+	u8 inner_clss_l2gre;
+	u8 inner_clss_l2geneve;
+	u8 inner_rss;
 	__le16 vxlan_dst_port;
+	__le16 geneve_dst_port;
 	u8 sd_vlan_force_pri_change_flg;
 	u8 sd_vlan_force_pri_flg;
 	u8 sd_vlan_force_pri_val;
@@ -5302,6 +5441,8 @@
 	u8 reserved1;
 	__le16 sd_vlan_tag;
 	__le16 sd_vlan_eth_type;
+	__le16 reserved0;
+	__le32 reserved2;
 };
 
 /*
@@ -5330,15 +5471,6 @@
 #define __FW_VERSION_RESERVED_SHIFT 4
 };
 
-
-/* GRE Tunnel Mode */
-enum gre_tunnel_type {
-	NVGRE_TUNNEL,
-	L2GRE_TUNNEL,
-	IPGRE_TUNNEL,
-	MAX_GRE_TUNNEL_TYPE
-};
-
 /*
  * Dynamic Host-Coalescing - Driver(host) counters
  */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index d6e1975..46ee2c0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -1,7 +1,9 @@
-/* bnx2x_init.h: Broadcom Everest network driver.
+/* bnx2x_init.h: Qlogic Everest network driver.
  *               Structures and macroes needed during the initialization.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 5669ed2..1835d2e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -1,8 +1,10 @@
-/* bnx2x_init_ops.h: Broadcom Everest network driver.
+/* bnx2x_init_ops.h: Qlogic Everest network driver.
  *               Static functions needed during the initialization.
  *               This file is "included" in bnx2x_main.c.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index a0b03c2..d946bba 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1,13 +1,15 @@
 /* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Written by Yaniv Rosner
@@ -9652,6 +9654,13 @@
 /******************************************************************/
 /*		BCM8481/BCM84823/BCM84833 PHY SECTION	          */
 /******************************************************************/
+static int bnx2x_is_8483x_8485x(struct bnx2x_phy *phy)
+{
+	return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+		(phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) ||
+		(phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858));
+}
+
 static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
 					    struct bnx2x *bp,
 					    u8 port)
@@ -9666,8 +9675,7 @@
 	};
 	u16 fw_ver1;
 
-	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-	    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+	if (bnx2x_is_8483x_8485x(phy)) {
 		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
 		bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
 				phy->ver_addr);
@@ -9749,8 +9757,7 @@
 		bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
 				 reg_set[i].val);
 
-	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-	    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+	if (bnx2x_is_8483x_8485x(phy))
 		offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
 	else
 		offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
@@ -9768,8 +9775,7 @@
 	struct bnx2x *bp = params->bp;
 	switch (action) {
 	case PHY_INIT:
-		if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
-		    (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+		if (!bnx2x_is_8483x_8485x(phy)) {
 			/* Save spirom version */
 			bnx2x_save_848xx_spirom_version(phy, bp, params->port);
 		}
@@ -9901,8 +9907,7 @@
 	/* Always write this if this is not 84833/4.
 	 * For 84833/4, write it only when it's a forced speed.
 	 */
-	if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
-	     (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) ||
+	if (!bnx2x_is_8483x_8485x(phy) ||
 	    ((autoneg_val & (1<<12)) == 0))
 		bnx2x_cl45_write(bp, phy,
 			 MDIO_AN_DEVAD,
@@ -9949,8 +9954,86 @@
 	return bnx2x_848xx_cmn_config_init(phy, params, vars);
 }
 
-#define PHY84833_CMDHDLR_WAIT 300
-#define PHY84833_CMDHDLR_MAX_ARGS 5
+#define PHY848xx_CMDHDLR_WAIT 300
+#define PHY848xx_CMDHDLR_MAX_ARGS 5
+
+static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
+				struct link_params *params,
+				u16 fw_cmd,
+				u16 cmd_args[], int argc)
+{
+	int idx;
+	u16 val;
+	struct bnx2x *bp = params->bp;
+
+	/* Step 1: Poll the STATUS register to see whether the previous command
+	 * is in progress or the system is busy (CMD_IN_PROGRESS or
+	 * SYSTEM_BUSY). If previous command is in progress or system is busy,
+	 * check again until the previous command finishes execution and the
+	 * system is available for taking command
+	 */
+
+	for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_848xx_CMD_HDLR_STATUS, &val);
+		if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) &&
+		    (val != PHY84858_STATUS_CMD_SYSTEM_BUSY))
+			break;
+		usleep_range(1000, 2000);
+	}
+	if (idx >= PHY848xx_CMDHDLR_WAIT) {
+		DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
+		return -EINVAL;
+	}
+
+	/* Step2: If any parameters are required for the function, write them
+	 * to the required DATA registers
+	 */
+
+	for (idx = 0; idx < argc; idx++) {
+		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+				 MDIO_848xx_CMD_HDLR_DATA1 + idx,
+				 cmd_args[idx]);
+	}
+
+	/* Step3: When the firmware is ready for commands, write the 'Command
+	 * code' to the CMD register
+	 */
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			 MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+
+	/* Step4: Once the command has been written, poll the STATUS register
+	 * to check whether the command has completed (CMD_COMPLETED_PASS/
+	 * CMD_FOR_CMDS or CMD_COMPLETED_ERROR).
+	 */
+
+	for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_848xx_CMD_HDLR_STATUS, &val);
+		if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) ||
+		    (val == PHY84858_STATUS_CMD_COMPLETE_ERROR))
+			break;
+		usleep_range(1000, 2000);
+	}
+	if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+	    (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) {
+		DP(NETIF_MSG_LINK, "FW cmd failed.\n");
+		return -EINVAL;
+	}
+	/* Step5: Once the command has completed, read the specficied DATA
+	 * registers for any saved results for the command, if applicable
+	 */
+
+	/* Gather returning data */
+	for (idx = 0; idx < argc; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_848xx_CMD_HDLR_DATA1 + idx,
+				&cmd_args[idx]);
+	}
+
+	return 0;
+}
+
 static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
 				struct link_params *params, u16 fw_cmd,
 				u16 cmd_args[], int argc)
@@ -9960,16 +10043,16 @@
 	struct bnx2x *bp = params->bp;
 	/* Write CMD_OPEN_OVERRIDE to STATUS reg */
 	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-			MDIO_84833_CMD_HDLR_STATUS,
+			MDIO_848xx_CMD_HDLR_STATUS,
 			PHY84833_STATUS_CMD_OPEN_OVERRIDE);
-	for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+	for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
 		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_CMD_HDLR_STATUS, &val);
+				MDIO_848xx_CMD_HDLR_STATUS, &val);
 		if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
 			break;
 		usleep_range(1000, 2000);
 	}
-	if (idx >= PHY84833_CMDHDLR_WAIT) {
+	if (idx >= PHY848xx_CMDHDLR_WAIT) {
 		DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
 		return -EINVAL;
 	}
@@ -9977,42 +10060,62 @@
 	/* Prepare argument(s) and issue command */
 	for (idx = 0; idx < argc; idx++) {
 		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_CMD_HDLR_DATA1 + idx,
+				MDIO_848xx_CMD_HDLR_DATA1 + idx,
 				cmd_args[idx]);
 	}
 	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-			MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
-	for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+			MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+	for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
 		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_CMD_HDLR_STATUS, &val);
+				MDIO_848xx_CMD_HDLR_STATUS, &val);
 		if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
-			(val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
+		    (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
 			break;
 		usleep_range(1000, 2000);
 	}
-	if ((idx >= PHY84833_CMDHDLR_WAIT) ||
-		(val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+	if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+	    (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
 		DP(NETIF_MSG_LINK, "FW cmd failed.\n");
 		return -EINVAL;
 	}
 	/* Gather returning data */
 	for (idx = 0; idx < argc; idx++) {
 		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-				MDIO_84833_CMD_HDLR_DATA1 + idx,
+				MDIO_848xx_CMD_HDLR_DATA1 + idx,
 				&cmd_args[idx]);
 	}
 	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-			MDIO_84833_CMD_HDLR_STATUS,
+			MDIO_848xx_CMD_HDLR_STATUS,
 			PHY84833_STATUS_CMD_CLEAR_COMPLETE);
 	return 0;
 }
 
-static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
-				   struct link_params *params,
-				   struct link_vars *vars)
+static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
+				struct link_params *params,
+				u16 fw_cmd,
+				u16 cmd_args[], int argc)
+{
+	struct bnx2x *bp = params->bp;
+
+	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) ||
+	    (REG_RD(bp, params->shmem2_base +
+		    offsetof(struct shmem2_region,
+			     link_attr_sync[params->port])) &
+	     LINK_ATTR_84858)) {
+		return bnx2x_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+					    argc);
+	} else {
+		return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+					    argc);
+	}
+}
+
+static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
+				     struct link_params *params,
+				     struct link_vars *vars)
 {
 	u32 pair_swap;
-	u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+	u16 data[PHY848xx_CMDHDLR_MAX_ARGS];
 	int status;
 	struct bnx2x *bp = params->bp;
 
@@ -10028,8 +10131,9 @@
 	/* Only the second argument is used for this command */
 	data[1] = (u16)pair_swap;
 
-	status = bnx2x_84833_cmd_hdlr(phy, params,
-		PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
+	status = bnx2x_848xx_cmd_hdlr(phy, params,
+				      PHY848xx_CMD_SET_PAIR_SWAP, data,
+				      PHY848xx_CMDHDLR_MAX_ARGS);
 	if (status == 0)
 		DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
 
@@ -10118,8 +10222,8 @@
 	DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
 
 	/* Prevent Phy from working in EEE and advertising it */
-	rc = bnx2x_84833_cmd_hdlr(phy, params,
-		PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+	rc = bnx2x_848xx_cmd_hdlr(phy, params,
+				  PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
 	if (rc) {
 		DP(NETIF_MSG_LINK, "EEE disable failed.\n");
 		return rc;
@@ -10136,8 +10240,8 @@
 	struct bnx2x *bp = params->bp;
 	u16 cmd_args = 1;
 
-	rc = bnx2x_84833_cmd_hdlr(phy, params,
-		PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+	rc = bnx2x_848xx_cmd_hdlr(phy, params,
+				  PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
 	if (rc) {
 		DP(NETIF_MSG_LINK, "EEE enable failed.\n");
 		return rc;
@@ -10155,7 +10259,7 @@
 	u8 port, initialize = 1;
 	u16 val;
 	u32 actual_phy_selection;
-	u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
+	u16 cmd_args[PHY848xx_CMDHDLR_MAX_ARGS];
 	int rc = 0;
 
 	usleep_range(1000, 2000);
@@ -10180,8 +10284,7 @@
 
 	/* Wait for GPHY to come out of reset */
 	msleep(50);
-	if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
-	    (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+	if (!bnx2x_is_8483x_8485x(phy)) {
 		/* BCM84823 requires that XGXS links up first @ 10G for normal
 		 * behavior.
 		 */
@@ -10192,7 +10295,19 @@
 		bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
 		vars->line_speed = temp;
 	}
+	/* Check if this is actually BCM84858 */
+	if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+		u16 hw_rev;
 
+		bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+				MDIO_AN_REG_848xx_ID_MSB, &hw_rev);
+		if (hw_rev == BCM84858_PHY_ID) {
+			params->link_attr_sync |= LINK_ATTR_84858;
+			bnx2x_update_link_attr(params, params->link_attr_sync);
+		}
+	}
+
+	/* Set dual-media configuration according to configuration */
 	bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
 			MDIO_CTL_REG_84823_MEDIA, &val);
 	val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
@@ -10237,18 +10352,17 @@
 	DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
 		   params->multi_phy_config, val);
 
-	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-	    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
-		bnx2x_84833_pair_swap_cfg(phy, params, vars);
+	if (bnx2x_is_8483x_8485x(phy)) {
+		bnx2x_848xx_pair_swap_cfg(phy, params, vars);
 
 		/* Keep AutogrEEEn disabled. */
 		cmd_args[0] = 0x0;
 		cmd_args[1] = 0x0;
 		cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
 		cmd_args[3] = PHY84833_CONSTANT_LATENCY;
-		rc = bnx2x_84833_cmd_hdlr(phy, params,
-			PHY84833_CMD_SET_EEE_MODE, cmd_args,
-			PHY84833_CMDHDLR_MAX_ARGS);
+		rc = bnx2x_848xx_cmd_hdlr(phy, params,
+					  PHY848xx_CMD_SET_EEE_MODE, cmd_args,
+					  PHY848xx_CMDHDLR_MAX_ARGS);
 		if (rc)
 			DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
 	}
@@ -10302,8 +10416,7 @@
 		vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
 	}
 
-	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-	    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+	if (bnx2x_is_8483x_8485x(phy)) {
 		/* Bring PHY out of super isolate mode as the final step. */
 		bnx2x_cl45_read_and_write(bp, phy,
 					  MDIO_CTL_DEVAD,
@@ -10435,8 +10548,7 @@
 				LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
 
 		/* Determine if EEE was negotiated */
-		if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-		    (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+		if (bnx2x_is_8483x_8485x(phy))
 			bnx2x_eee_an_resolve(phy, params, vars);
 	}
 
@@ -11842,6 +11954,40 @@
 	.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
 };
 
+static const struct bnx2x_phy phy_84858 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
+			    FLAGS_REARM_LATCH_SIGNAL,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_848x3_config_init,
+	.read_status	= (read_status_t)bnx2x_848xx_read_status,
+	.link_reset	= (link_reset_t)bnx2x_848x3_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_848xx_format_ver,
+	.hw_reset	= (hw_reset_t)bnx2x_84833_hw_reset_phy,
+	.set_link_led	= (set_link_led_t)bnx2x_848xx_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
 static const struct bnx2x_phy phy_54618se = {
 	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
 	.addr		= 0xff,
@@ -12128,6 +12274,9 @@
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
 		*phy = phy_84834;
 		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
+		*phy = phy_84858;
+		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
 		*phy = phy_54618se;
@@ -12184,9 +12333,7 @@
 	}
 	phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
 
-	if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-	     (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) &&
-	    (phy->ver_addr)) {
+	if (bnx2x_is_8483x_8485x(phy) && (phy->ver_addr)) {
 		/* Remove 100Mb link supported for BCM84833/4 when phy fw
 		 * version lower than or equal to 1.39
 		 */
@@ -13281,6 +13428,7 @@
 		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
 		/* GPIO3's are linked, and so both need to be toggled
 		 * to obtain required 2us pulse.
 		 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index d9cce4c..b7d2511 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -1,13 +1,15 @@
 /* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Written by Yaniv Rosner
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c27af12..e3da2bd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1,6 +1,8 @@
-/* bnx2x_main.c: Broadcom Everest network driver.
+/* bnx2x_main.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -81,11 +83,11 @@
 #define TX_TIMEOUT		(5*HZ)
 
 static char version[] =
-	"Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
+	"QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
 	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Eliezer Tamir");
-MODULE_DESCRIPTION("Broadcom NetXtreme II "
+MODULE_DESCRIPTION("QLogic "
 		   "BCM57710/57711/57711E/"
 		   "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
 		   "57840/57840_MF Driver");
@@ -163,27 +165,27 @@
 static struct {
 	char *name;
 } board_info[] = {
-	[BCM57710]	= { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
-	[BCM57711]	= { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
-	[BCM57711E]	= { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
-	[BCM57712]	= { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
-	[BCM57712_MF]	= { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
-	[BCM57712_VF]	= { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
-	[BCM57800]	= { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
-	[BCM57800_MF]	= { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
-	[BCM57800_VF]	= { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
-	[BCM57810]	= { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
-	[BCM57810_MF]	= { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
-	[BCM57810_VF]	= { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
-	[BCM57840_4_10]	= { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
-	[BCM57840_2_20]	= { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
-	[BCM57840_MF]	= { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
-	[BCM57840_VF]	= { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
-	[BCM57811]	= { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
-	[BCM57811_MF]	= { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
-	[BCM57840_O]	= { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
-	[BCM57840_MFO]	= { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
-	[BCM57811_VF]	= { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
+	[BCM57710]	= { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
+	[BCM57711]	= { "QLogic BCM57711 10 Gigabit PCIe" },
+	[BCM57711E]	= { "QLogic BCM57711E 10 Gigabit PCIe" },
+	[BCM57712]	= { "QLogic BCM57712 10 Gigabit Ethernet" },
+	[BCM57712_MF]	= { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
+	[BCM57712_VF]	= { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
+	[BCM57800]	= { "QLogic BCM57800 10 Gigabit Ethernet" },
+	[BCM57800_MF]	= { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
+	[BCM57800_VF]	= { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
+	[BCM57810]	= { "QLogic BCM57810 10 Gigabit Ethernet" },
+	[BCM57810_MF]	= { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
+	[BCM57810_VF]	= { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
+	[BCM57840_4_10]	= { "QLogic BCM57840 10 Gigabit Ethernet" },
+	[BCM57840_2_20]	= { "QLogic BCM57840 20 Gigabit Ethernet" },
+	[BCM57840_MF]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+	[BCM57840_VF]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+	[BCM57811]	= { "QLogic BCM57811 10 Gigabit Ethernet" },
+	[BCM57811_MF]	= { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
+	[BCM57840_O]	= { "QLogic BCM57840 10/20 Gigabit Ethernet" },
+	[BCM57840_MFO]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+	[BCM57811_VF]	= { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
 };
 
 #ifndef PCI_DEVICE_ID_NX2_57710
@@ -264,11 +266,14 @@
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
+	{ PCI_VDEVICE(QLOGIC,	PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
 	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
@@ -2492,7 +2497,7 @@
 	else {
 		u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
 
-		if (IS_MF_SI(bp)) {
+		if (IS_MF_PERCENT_BW(bp)) {
 			/* maxCfg in percents of linkspeed */
 			vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
 		} else /* SD modes */
@@ -2916,7 +2921,7 @@
 	func_params.f_obj = &bp->func_obj;
 	func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
 
-	if (IS_MF_UFP(bp)) {
+	if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
 		int func = BP_ABS_FUNC(bp);
 		u32 val;
 
@@ -2943,16 +2948,16 @@
 			BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
 				  bp->mf_ov);
 			goto fail;
+		} else {
+			DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
+			   bp->mf_ov);
 		}
-
-		DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov);
-
-		bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
-
-		return;
+	} else {
+		goto fail;
 	}
 
-	/* not supported by SW yet */
+	bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
+	return;
 fail:
 	bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
 }
@@ -3065,7 +3070,7 @@
 	storm_memset_func_en(bp, p->func_id, 1);
 
 	/* spq */
-	if (p->func_flgs & FUNC_FLG_SPQ) {
+	if (p->spq_active) {
 		storm_memset_spq_addr(bp, p->spq_map, p->func_id);
 		REG_WR(bp, XSEM_REG_FAST_MEMORY +
 		       XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
@@ -3281,7 +3286,6 @@
 {
 	struct bnx2x_func_init_params func_init = {0};
 	struct event_ring_data eq_data = { {0} };
-	u16 flags;
 
 	if (!CHIP_IS_E1x(bp)) {
 		/* reset IGU PF statistics: MSIX + ATTN */
@@ -3298,15 +3302,7 @@
 				BP_FUNC(bp) : BP_VN(bp))*4, 0);
 	}
 
-	/* function setup flags */
-	flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
-
-	/* This flag is relevant for E1x only.
-	 * E2 doesn't have a TPA configuration in a function level.
-	 */
-	flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
-
-	func_init.func_flgs = flags;
+	func_init.spq_active = true;
 	func_init.pf_id = BP_FUNC(bp);
 	func_init.func_id = BP_FUNC(bp);
 	func_init.spq_map = bp->spq_mapping;
@@ -3707,6 +3703,34 @@
 	   ethver, iscsiver, fcoever);
 }
 
+void bnx2x_update_mfw_dump(struct bnx2x *bp)
+{
+	struct timeval epoc;
+	u32 drv_ver;
+	u32 valid_dump;
+
+	if (!SHMEM2_HAS(bp, drv_info))
+		return;
+
+	/* Update Driver load time */
+	do_gettimeofday(&epoc);
+	SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec);
+
+	drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+	SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
+
+	SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
+	/* Check & notify On-Chip dump. */
+	valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
+
+	if (valid_dump & FIRST_DUMP_VALID)
+		DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
+
+	if (valid_dump & SECOND_DUMP_VALID)
+		DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
+}
+
 static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
 {
 	u32 cmd_ok, cmd_fail;
@@ -5274,6 +5298,10 @@
 			vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
 
 		break;
+	case BNX2X_FILTER_VLAN_PENDING:
+		DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
+		vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
+		break;
 	case BNX2X_FILTER_MCAST_PENDING:
 		DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
 		/* This is only relevant for 57710 where multicast MACs are
@@ -5568,6 +5596,8 @@
 		      BNX2X_STATE_OPEN):
 		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
 		      BNX2X_STATE_OPENING_WAIT4_PORT):
+		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
 			cid = elem->message.data.eth_event.echo &
 				BNX2X_SWCID_MASK;
 			DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
@@ -5585,7 +5615,7 @@
 		      BNX2X_STATE_DIAG):
 		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
 		      BNX2X_STATE_CLOSING_WAIT4_HALT):
-			DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
+			DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
 			bnx2x_handle_classification_eqe(bp, elem);
 			break;
 
@@ -6173,6 +6203,11 @@
 		__set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
 		__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
 
+		if (bp->accept_any_vlan) {
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+		}
+
 		break;
 	case BNX2X_RX_MODE_ALLMULTI:
 		__set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
@@ -6184,6 +6219,11 @@
 		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
 		__set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
 
+		if (bp->accept_any_vlan) {
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+			__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+		}
+
 		break;
 	case BNX2X_RX_MODE_PROMISC:
 		/* According to definition of SI mode, iface in promisc mode
@@ -6204,18 +6244,15 @@
 		else
 			__set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
 
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+
 		break;
 	default:
 		BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
 		return -EINVAL;
 	}
 
-	/* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
-	if (rx_mode != BNX2X_RX_MODE_NONE) {
-		__set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
-		__set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
-	}
-
 	return 0;
 }
 
@@ -7429,6 +7466,9 @@
 	} else
 		BNX2X_ERR("Bootcode is missing - can not initialize link\n");
 
+	if (SHMEM2_HAS(bp, netproc_fw_ver))
+		SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
 	return 0;
 }
 
@@ -8406,6 +8446,42 @@
 	return rc;
 }
 
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+		       struct bnx2x_vlan_mac_obj *obj, bool set,
+		       unsigned long *ramrod_flags)
+{
+	int rc;
+	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+	/* Fill general parameters */
+	ramrod_param.vlan_mac_obj = obj;
+	ramrod_param.ramrod_flags = *ramrod_flags;
+
+	/* Fill a user request section if needed */
+	if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+		ramrod_param.user_req.u.vlan.vlan = vlan;
+		/* Set the command: ADD or DEL */
+		if (set)
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+		else
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+	}
+
+	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+
+	if (rc == -EEXIST) {
+		/* Do not treat adding same vlan as error. */
+		DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+		rc = 0;
+	} else if (rc < 0) {
+		BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
+	}
+
+	return rc;
+}
+
 int bnx2x_del_all_macs(struct bnx2x *bp,
 		       struct bnx2x_vlan_mac_obj *mac_obj,
 		       int mac_type, bool wait_for_comp)
@@ -10002,6 +10078,81 @@
 	}
 }
 
+#ifdef CONFIG_BNX2X_VXLAN
+static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port)
+{
+	struct bnx2x_func_switch_update_params *switch_update_params;
+	struct bnx2x_func_state_params func_params = {NULL};
+	int rc;
+
+	switch_update_params = &func_params.params.switch_update;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	__set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+
+	/* Function parameters */
+	__set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
+		  &switch_update_params->changes);
+	switch_update_params->vxlan_dst_port = port;
+	rc = bnx2x_func_state_change(bp, &func_params);
+	if (rc)
+		BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n",
+			  port, rc);
+	return rc;
+}
+
+static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
+{
+	if (!netif_running(bp->dev))
+		return;
+
+	if (bp->vxlan_dst_port || !IS_PF(bp)) {
+		DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
+		return;
+	}
+
+	bp->vxlan_dst_port = port;
+	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
+}
+
+static void bnx2x_add_vxlan_port(struct net_device *netdev,
+				 sa_family_t sa_family, __be16 port)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u16 t_port = ntohs(port);
+
+	__bnx2x_add_vxlan_port(bp, t_port);
+}
+
+static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
+{
+	if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) {
+		DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
+		return;
+	}
+
+	if (netif_running(bp->dev)) {
+		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
+	} else {
+		bp->vxlan_dst_port = 0;
+		netdev_info(bp->dev, "Deleted vxlan dest port %d", port);
+	}
+}
+
+static void bnx2x_del_vxlan_port(struct net_device *netdev,
+				 sa_family_t sa_family, __be16 port)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u16 t_port = ntohs(port);
+
+	__bnx2x_del_vxlan_port(bp, t_port);
+}
+#endif
+
 static int bnx2x_close(struct net_device *dev);
 
 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
@@ -10010,6 +10161,9 @@
 static void bnx2x_sp_rtnl_task(struct work_struct *work)
 {
 	struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
+#ifdef CONFIG_BNX2X_VXLAN
+	u16 port;
+#endif
 
 	rtnl_lock();
 
@@ -10108,6 +10262,27 @@
 			       &bp->sp_rtnl_state))
 		bnx2x_update_mng_version(bp);
 
+#ifdef CONFIG_BNX2X_VXLAN
+	port = bp->vxlan_dst_port;
+	if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT,
+			       &bp->sp_rtnl_state)) {
+		if (!bnx2x_vxlan_port_update(bp, port))
+			netdev_info(bp->dev, "Added vxlan dest port %d", port);
+		else
+			bp->vxlan_dst_port = 0;
+	}
+
+	if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT,
+			       &bp->sp_rtnl_state)) {
+		if (!bnx2x_vxlan_port_update(bp, 0)) {
+			netdev_info(bp->dev,
+				    "Deleted vxlan dest port %d", port);
+			bp->vxlan_dst_port = 0;
+			vxlan_get_rx_port(bp->dev);
+		}
+	}
+#endif
+
 	/* work which needs rtnl lock not-taken (as it takes the lock itself and
 	 * can be called from other contexts as well)
 	 */
@@ -11678,7 +11853,7 @@
 static int bnx2x_get_hwinfo(struct bnx2x *bp)
 {
 	int /*abs*/func = BP_ABS_FUNC(bp);
-	int vn;
+	int vn, mfw_vn;
 	u32 val = 0, val2 = 0;
 	int rc = 0;
 
@@ -11768,6 +11943,7 @@
 	bp->mf_mode = 0;
 	bp->mf_sub_mode = 0;
 	vn = BP_VN(bp);
+	mfw_vn = BP_FW_MB_IDX(bp);
 
 	if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
 		BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -11824,6 +12000,31 @@
 				} else
 					BNX2X_DEV_INFO("illegal OV for SD\n");
 				break;
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
+				bp->mf_mode = MULTI_FUNCTION_SD;
+				bp->mf_sub_mode = SUB_MF_MODE_BD;
+				bp->mf_config[vn] =
+					MF_CFG_RD(bp,
+						  func_mf_config[func].config);
+
+				if (SHMEM2_HAS(bp, mtu_size)) {
+					int mtu_idx = BP_FW_MB_IDX(bp);
+					u16 mtu_size;
+					u32 mtu;
+
+					mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
+					mtu_size = (u16)mtu;
+					DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
+					   mtu_size, mtu);
+
+					/* if valid: update device mtu */
+					if (((mtu_size + ETH_HLEN) >=
+					     ETH_MIN_PACKET_SIZE) &&
+					    (mtu_size <=
+					     ETH_MAX_JUMBO_PACKET_SIZE))
+						bp->dev->mtu = mtu_size;
+				}
+				break;
 			case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
 				bp->mf_mode = MULTI_FUNCTION_SD;
 				bp->mf_sub_mode = SUB_MF_MODE_UFP;
@@ -11871,9 +12072,10 @@
 
 				BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
 					       func, bp->mf_ov, bp->mf_ov);
-			} else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) {
+			} else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
+				   (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
 				dev_err(&bp->pdev->dev,
-					"Unexpected - no valid MF OV for func %d in UFP mode\n",
+					"Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
 					func);
 				bp->path_has_ovlan = true;
 			} else {
@@ -12078,6 +12280,7 @@
 	mutex_init(&bp->drv_info_mutex);
 	sema_init(&bp->stats_lock, 1);
 	bp->drv_info_mng_owner = false;
+	INIT_LIST_HEAD(&bp->vlan_reg);
 
 	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
 	INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12278,6 +12481,12 @@
 	rc = bnx2x_nic_load(bp, LOAD_OPEN);
 	if (rc)
 		return rc;
+
+#ifdef CONFIG_BNX2X_VXLAN
+	if (IS_PF(bp))
+		vxlan_get_rx_port(dev);
+#endif
+
 	return 0;
 }
 
@@ -12596,6 +12805,169 @@
 	return vxlan_features_check(skb, features);
 }
 
+static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
+{
+	int rc;
+
+	if (IS_PF(bp)) {
+		unsigned long ramrod_flags = 0;
+
+		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+		rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
+					add, &ramrod_flags);
+	} else {
+		rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
+	}
+
+	return rc;
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+	struct bnx2x_vlan_entry *vlan;
+	int rc = 0;
+
+	if (!bp->vlan_cnt) {
+		DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
+		return 0;
+	}
+
+	list_for_each_entry(vlan, &bp->vlan_reg, link) {
+		/* Prepare for cleanup in case of errors */
+		if (rc) {
+			vlan->hw = false;
+			continue;
+		}
+
+		if (!vlan->hw)
+			continue;
+
+		DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+
+		rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+		if (rc) {
+			BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
+			vlan->hw = false;
+			rc = -EINVAL;
+			continue;
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bnx2x_vlan_entry *vlan;
+	bool hw = false;
+	int rc = 0;
+
+	if (!netif_running(bp->dev)) {
+		DP(NETIF_MSG_IFUP,
+		   "Ignoring VLAN configuration the interface is down\n");
+		return -EFAULT;
+	}
+
+	DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
+
+	vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
+	if (!vlan)
+		return -ENOMEM;
+
+	bp->vlan_cnt++;
+	if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
+		DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
+		bp->accept_any_vlan = true;
+		if (IS_PF(bp))
+			bnx2x_set_rx_mode_inner(bp);
+		else
+			bnx2x_vfpf_storm_rx_mode(bp);
+	} else if (bp->vlan_cnt <= bp->vlan_credit) {
+		rc = __bnx2x_vlan_configure_vid(bp, vid, true);
+		hw = true;
+	}
+
+	vlan->vid = vid;
+	vlan->hw = hw;
+
+	if (!rc) {
+		list_add(&vlan->link, &bp->vlan_reg);
+	} else {
+		bp->vlan_cnt--;
+		kfree(vlan);
+	}
+
+	DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+
+	return rc;
+}
+
+static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bnx2x_vlan_entry *vlan;
+	int rc = 0;
+
+	if (!netif_running(bp->dev)) {
+		DP(NETIF_MSG_IFUP,
+		   "Ignoring VLAN configuration the interface is down\n");
+		return -EFAULT;
+	}
+
+	DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
+
+	if (!bp->vlan_cnt) {
+		BNX2X_ERR("Unable to kill VLAN %d\n", vid);
+		return -EINVAL;
+	}
+
+	list_for_each_entry(vlan, &bp->vlan_reg, link)
+		if (vlan->vid == vid)
+			break;
+
+	if (vlan->vid != vid) {
+		BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
+		return -EINVAL;
+	}
+
+	if (vlan->hw)
+		rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+
+	list_del(&vlan->link);
+	kfree(vlan);
+
+	bp->vlan_cnt--;
+
+	if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
+		/* Configure all non-configured entries */
+		list_for_each_entry(vlan, &bp->vlan_reg, link) {
+			if (vlan->hw)
+				continue;
+
+			rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+			if (rc) {
+				BNX2X_ERR("Unable to config VLAN %d\n",
+					  vlan->vid);
+				continue;
+			}
+			DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
+			   vlan->vid);
+			vlan->hw = true;
+		}
+		DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
+		bp->accept_any_vlan = false;
+		if (IS_PF(bp))
+			bnx2x_set_rx_mode_inner(bp);
+		else
+			bnx2x_vfpf_storm_rx_mode(bp);
+	}
+
+	DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
+
+	return rc;
+}
+
 static const struct net_device_ops bnx2x_netdev_ops = {
 	.ndo_open		= bnx2x_open,
 	.ndo_stop		= bnx2x_close,
@@ -12609,6 +12981,8 @@
 	.ndo_fix_features	= bnx2x_fix_features,
 	.ndo_set_features	= bnx2x_set_features,
 	.ndo_tx_timeout		= bnx2x_tx_timeout,
+	.ndo_vlan_rx_add_vid	= bnx2x_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= bnx2x_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= poll_bnx2x,
 #endif
@@ -12628,6 +13002,10 @@
 	.ndo_get_phys_port_id	= bnx2x_get_phys_port_id,
 	.ndo_set_vf_link_state	= bnx2x_set_vf_link_state,
 	.ndo_features_check	= bnx2x_features_check,
+#ifdef CONFIG_BNX2X_VXLAN
+	.ndo_add_vxlan_port	= bnx2x_add_vxlan_port,
+	.ndo_del_vxlan_port	= bnx2x_del_vxlan_port,
+#endif
 };
 
 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -12819,6 +13197,18 @@
 	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 
+	/* VF with OLD Hypervisor or old PF do not support filtering */
+	if (IS_PF(bp)) {
+		if (CHIP_IS_E1x(bp))
+			bp->accept_any_vlan = true;
+		else
+			dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#ifdef CONFIG_BNX2X_SRIOV
+	} else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+		dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif
+	}
+
 	dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
 	dev->features |= NETIF_F_HIGHDMA;
 
@@ -13561,6 +13951,9 @@
 
 	bnx2x_register_phc(bp);
 
+	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
 	return 0;
 
 init_one_exit:
@@ -13623,6 +14016,7 @@
 	/* Power on: we can't let PCI layer write to us while we are in D3 */
 	if (IS_PF(bp)) {
 		bnx2x_set_power_state(bp, PCI_D0);
+		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
 
 		/* Set endianity registers to reset values in case next driver
 		 * boots in different endianty environment.
@@ -14371,6 +14765,90 @@
 		rc = -EINVAL;
 	}
 
+	/* For storage-only interfaces, change driver state */
+	if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
+		switch (ctl->drv_state) {
+		case DRV_NOP:
+			break;
+		case DRV_ACTIVE:
+			bnx2x_set_os_driver_state(bp,
+						  OS_DRIVER_STATE_ACTIVE);
+			break;
+		case DRV_INACTIVE:
+			bnx2x_set_os_driver_state(bp,
+						  OS_DRIVER_STATE_DISABLED);
+			break;
+		case DRV_UNLOADED:
+			bnx2x_set_os_driver_state(bp,
+						  OS_DRIVER_STATE_NOT_LOADED);
+			break;
+		default:
+		BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_get_fc_npiv(struct net_device *dev,
+			     struct cnic_fc_npiv_tbl *cnic_tbl)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct bdn_fc_npiv_tbl *tbl = NULL;
+	u32 offset, entries;
+	int rc = -EINVAL;
+	int i;
+
+	if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
+		goto out;
+
+	DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
+
+	tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
+	if (!tbl) {
+		BNX2X_ERR("Failed to allocate fc_npiv table\n");
+		goto out;
+	}
+
+	offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
+	DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
+
+	/* Read the table contents from nvram */
+	if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
+		BNX2X_ERR("Failed to read FC-NPIV table\n");
+		goto out;
+	}
+
+	/* Since bnx2x_nvram_read() returns data in be32, we need to convert
+	 * the number of entries back to cpu endianness.
+	 */
+	entries = tbl->fc_npiv_cfg.num_of_npiv;
+	entries = (__force u32)be32_to_cpu((__force __be32)entries);
+	tbl->fc_npiv_cfg.num_of_npiv = entries;
+
+	if (!tbl->fc_npiv_cfg.num_of_npiv) {
+		DP(BNX2X_MSG_MCP,
+		   "No FC-NPIV table [valid, simply not present]\n");
+		goto out;
+	} else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
+		BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
+			  tbl->fc_npiv_cfg.num_of_npiv);
+		goto out;
+	} else {
+		DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
+		   tbl->fc_npiv_cfg.num_of_npiv);
+	}
+
+	/* Copy the data into cnic-provided struct */
+	cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
+	for (i = 0; i < cnic_tbl->count; i++) {
+		memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
+		memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
+	}
+
+	rc = 0;
+out:
+	kfree(tbl);
 	return rc;
 }
 
@@ -14516,6 +14994,7 @@
 	cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
 	cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
 	cp->drv_ctl = bnx2x_drv_ctl;
+	cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
 	cp->drv_register_cnic = bnx2x_register_cnic;
 	cp->drv_unregister_cnic = bnx2x_unregister_cnic;
 	cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
index caf1aef..a91ccbf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -1,6 +1,8 @@
-/* bnx2x_mfw_req.h: Broadcom Everest network driver.
+/* bnx2x_mfw_req.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 49d5110..4dead49 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1,6 +1,8 @@
-/* bnx2x_reg.h: Broadcom Everest network driver.
+/* bnx2x_reg.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -2137,6 +2139,10 @@
 /* [RW 1] When this bit is set; the LLH will expect all packets to be with
    e1hov */
 #define NIG_REG_LLH_E1HOV_MODE					 0x160d8
+/* [RW 16] Outer VLAN type identifier for multi-function mode. In non
+ * multi-function mode; it will hold the inner VLAN type. Typically 0x8100.
+ */
+#define NIG_REG_LLH_E1HOV_TYPE_1				 0x16028
 /* [RW 1] When this bit is set; the LLH will classify the packet before
    sending it to the BRB or calculating WoL on it. */
 #define NIG_REG_LLH_MF_MODE					 0x16024
@@ -2953,7 +2959,12 @@
 #define PBF_REG_TQ_OCCUPANCY_Q0					 0x1403ac
 /* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
 #define PBF_REG_TQ_OCCUPANCY_Q1					 0x1403b0
-#define PB_REG_CONTROL						 0
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PBF_REG_VLAN_TYPE_0					 0x15c06c
 /* [RW 2] Interrupt mask register #0 read/write */
 #define PB_REG_PB_INT_MASK					 0x28
 /* [R 2] Interrupt register #0 read */
@@ -3372,6 +3383,12 @@
 #define PRS_REG_TCM_CURRENT_CREDIT				 0x40160
 /* [R 8] debug only: TSDM current credit. Transaction based. */
 #define PRS_REG_TSDM_CURRENT_CREDIT				 0x4015c
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PRS_REG_VLAN_TYPE_0					 0x401a8
 #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT			 (0x1<<19)
 #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF			 (0x1<<20)
 #define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN			 (0x1<<22)
@@ -7240,6 +7257,9 @@
 #define MDIO_AN_REG_8481_LEGACY_MII_CTRL	0xffe0
 #define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G	0x40
 #define MDIO_AN_REG_8481_LEGACY_MII_STATUS	0xffe1
+#define MDIO_AN_REG_848xx_ID_MSB		0xffe2
+#define BCM84858_PHY_ID					0x600d
+#define MDIO_AN_REG_848xx_ID_LSB		0xffe3
 #define MDIO_AN_REG_8481_LEGACY_AN_ADV		0xffe4
 #define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION	0xffe6
 #define MDIO_AN_REG_8481_1000T_CTRL		0xffe9
@@ -7283,31 +7303,31 @@
 #define MDIO_84833_TOP_CFG_FW_NO_EEE		0x1f81
 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1			0x401a
 #define MDIO_84833_SUPER_ISOLATE		0x8000
-/* These are mailbox register set used by 84833. */
-#define MDIO_84833_TOP_CFG_SCRATCH_REG0			0x4005
-#define MDIO_84833_TOP_CFG_SCRATCH_REG1			0x4006
-#define MDIO_84833_TOP_CFG_SCRATCH_REG2			0x4007
-#define MDIO_84833_TOP_CFG_SCRATCH_REG3			0x4008
-#define MDIO_84833_TOP_CFG_SCRATCH_REG4			0x4009
-#define MDIO_84833_TOP_CFG_SCRATCH_REG26		0x4037
-#define MDIO_84833_TOP_CFG_SCRATCH_REG27		0x4038
-#define MDIO_84833_TOP_CFG_SCRATCH_REG28		0x4039
-#define MDIO_84833_TOP_CFG_SCRATCH_REG29		0x403a
-#define MDIO_84833_TOP_CFG_SCRATCH_REG30		0x403b
-#define MDIO_84833_TOP_CFG_SCRATCH_REG31		0x403c
-#define MDIO_84833_CMD_HDLR_COMMAND	MDIO_84833_TOP_CFG_SCRATCH_REG0
-#define MDIO_84833_CMD_HDLR_STATUS	MDIO_84833_TOP_CFG_SCRATCH_REG26
-#define MDIO_84833_CMD_HDLR_DATA1	MDIO_84833_TOP_CFG_SCRATCH_REG27
-#define MDIO_84833_CMD_HDLR_DATA2	MDIO_84833_TOP_CFG_SCRATCH_REG28
-#define MDIO_84833_CMD_HDLR_DATA3	MDIO_84833_TOP_CFG_SCRATCH_REG29
-#define MDIO_84833_CMD_HDLR_DATA4	MDIO_84833_TOP_CFG_SCRATCH_REG30
-#define MDIO_84833_CMD_HDLR_DATA5	MDIO_84833_TOP_CFG_SCRATCH_REG31
+/* These are mailbox register set used by 84833/84858. */
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG0			0x4005
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG1			0x4006
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG2			0x4007
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG3			0x4008
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG4			0x4009
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG26		0x4037
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG27		0x4038
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG28		0x4039
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG29		0x403a
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG30		0x403b
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG31		0x403c
+#define MDIO_848xx_CMD_HDLR_COMMAND	(MDIO_848xx_TOP_CFG_SCRATCH_REG0)
+#define MDIO_848xx_CMD_HDLR_STATUS	(MDIO_848xx_TOP_CFG_SCRATCH_REG26)
+#define MDIO_848xx_CMD_HDLR_DATA1	(MDIO_848xx_TOP_CFG_SCRATCH_REG27)
+#define MDIO_848xx_CMD_HDLR_DATA2	(MDIO_848xx_TOP_CFG_SCRATCH_REG28)
+#define MDIO_848xx_CMD_HDLR_DATA3	(MDIO_848xx_TOP_CFG_SCRATCH_REG29)
+#define MDIO_848xx_CMD_HDLR_DATA4	(MDIO_848xx_TOP_CFG_SCRATCH_REG30)
+#define MDIO_848xx_CMD_HDLR_DATA5	(MDIO_848xx_TOP_CFG_SCRATCH_REG31)
 
-/* Mailbox command set used by 84833. */
-#define PHY84833_CMD_SET_PAIR_SWAP			0x8001
-#define PHY84833_CMD_GET_EEE_MODE			0x8008
-#define PHY84833_CMD_SET_EEE_MODE			0x8009
-/* Mailbox status set used by 84833. */
+/* Mailbox command set used by 84833/84858 */
+#define PHY848xx_CMD_SET_PAIR_SWAP			0x8001
+#define PHY848xx_CMD_GET_EEE_MODE			0x8008
+#define PHY848xx_CMD_SET_EEE_MODE			0x8009
+/* Mailbox status set used by 84833 only */
 #define PHY84833_STATUS_CMD_RECEIVED			0x0001
 #define PHY84833_STATUS_CMD_IN_PROGRESS			0x0002
 #define PHY84833_STATUS_CMD_COMPLETE_PASS		0x0004
@@ -7318,6 +7338,13 @@
 #define PHY84833_STATUS_CMD_CLEAR_COMPLETE		0x0080
 #define PHY84833_STATUS_CMD_OPEN_OVERRIDE		0xa5a5
 
+/* Mailbox status set used by 84858 only */
+#define PHY84858_STATUS_CMD_RECEIVED			0x0001
+#define PHY84858_STATUS_CMD_IN_PROGRESS			0x0002
+#define PHY84858_STATUS_CMD_COMPLETE_PASS		0x0004
+#define PHY84858_STATUS_CMD_COMPLETE_ERROR		0x0008
+#define PHY84858_STATUS_CMD_SYSTEM_BUSY			0xbbbb
+
 
 /* Warpcore clause 45 addressing */
 #define MDIO_WC_DEVAD					0x3
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 4ad415a..c9bd7f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1,15 +1,17 @@
-/* bnx2x_sp.c: Broadcom Everest network driver.
+/* bnx2x_sp.c: Qlogic Everest network driver.
  *
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -355,6 +357,23 @@
 
 	return vp->get(vp, 1);
 }
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->get(mp, 1))
+		return false;
+
+	if (!vp->get(vp, 1)) {
+		mp->put(mp, 1);
+		return false;
+	}
+
+	return true;
+}
+
 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
 {
 	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -383,6 +402,22 @@
 	return vp->put(vp, 1);
 }
 
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->put(mp, 1))
+		return false;
+
+	if (!vp->put(vp, 1)) {
+		mp->get(mp, 1);
+		return false;
+	}
+
+	return true;
+}
+
 /**
  * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
  *
@@ -636,6 +671,26 @@
 	return 0;
 }
 
+static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
+				    struct bnx2x_vlan_mac_obj *o,
+				   union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
+	   data->vlan_mac.mac, data->vlan_mac.vlan);
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+				  ETH_ALEN)) &&
+		    (data->vlan_mac.is_inner_mac ==
+		     pos->u.vlan_mac.is_inner_mac))
+			return -EEXIST;
+
+	return 0;
+}
+
 /* check_del() callbacks */
 static struct bnx2x_vlan_mac_registry_elem *
 	bnx2x_check_mac_del(struct bnx2x *bp,
@@ -670,6 +725,27 @@
 	return NULL;
 }
 
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_vlan_mac_del(struct bnx2x *bp,
+				 struct bnx2x_vlan_mac_obj *o,
+				 union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
+	   data->vlan_mac.mac, data->vlan_mac.vlan);
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+			     ETH_ALEN)) &&
+		    (data->vlan_mac.is_inner_mac ==
+		     pos->u.vlan_mac.is_inner_mac))
+			return pos;
+
+	return NULL;
+}
+
 /* check_move() callback */
 static bool bnx2x_check_move(struct bnx2x *bp,
 			     struct bnx2x_vlan_mac_obj *src_o,
@@ -1036,6 +1112,96 @@
 					rule_cnt);
 }
 
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+				      struct bnx2x_vlan_mac_obj *o,
+				      struct bnx2x_exeq_elem *elem,
+				      int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+	u16 inner_mac;
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Set a rule header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+				      &rule_entry->pair.header);
+
+	/* Set VLAN and MAC themselves */
+	rule_entry->pair.vlan = cpu_to_le16(vlan);
+	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+			      &rule_entry->pair.mac_mid,
+			      &rule_entry->pair.mac_lsb, mac);
+	inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
+	rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+	/* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		struct bnx2x_vlan_mac_obj *target_obj;
+
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		target_obj = elem->cmd_data.vlan_mac.target_obj;
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj,
+					      true, CLASSIFY_RULE_OPCODE_PAIR,
+					      &rule_entry->pair.header);
+
+		/* Set a VLAN itself */
+		rule_entry->pair.vlan = cpu_to_le16(vlan);
+		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+				      &rule_entry->pair.mac_mid,
+				      &rule_entry->pair.mac_lsb, mac);
+		rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+	}
+
+	/* Set the ramrod data header */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @elem:	bnx2x_exeq_elem
+ * @rule_idx:	rule_idx
+ * @cam_offset:	cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+				       struct bnx2x_vlan_mac_obj *o,
+				       struct bnx2x_exeq_elem *elem,
+				       int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *config =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	/* 57710 and 57711 do not support MOVE command,
+	 * so it's either ADD or DEL
+	 */
+	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+		true : false;
+
+	/* Reset the ramrod data buffer */
+	memset(config, 0, sizeof(*config));
+
+	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+				     cam_offset, add,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+				     ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
 /**
  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
  *
@@ -1135,6 +1301,25 @@
 	return NULL;
 }
 
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_vlan_mac_ramrod_data *data =
+		&elem->cmd_data.vlan_mac.u.vlan_mac;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+			    sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd ==
+		     elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
 /**
  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
  *
@@ -2042,6 +2227,68 @@
 	}
 }
 
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	union bnx2x_qable_obj *qable_obj =
+		(union bnx2x_qable_obj *)vlan_mac_obj;
+
+	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type,
+				   macs_pool, vlans_pool);
+
+	/* CAM pool handling */
+	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+	/* CAM offset is relevant for 57710 and 57711 chips only which have a
+	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
+	 * will be taken from MACs' pool object only.
+	 */
+	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+	if (CHIP_IS_E1(bp)) {
+		BNX2X_ERR("Do not support chips others than E2\n");
+		BUG();
+	} else if (CHIP_IS_E1H(bp)) {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
+		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue, 1, qable_obj,
+				     bnx2x_validate_vlan_mac,
+				     bnx2x_remove_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	} else {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move;
+		vlan_mac_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue,
+				     CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_remove_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	}
+}
 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
 			struct tstorm_eth_mac_filter_config *mac_filters,
@@ -3854,8 +4101,8 @@
  * If credit is negative pool operations will always succeed (unlimited pool).
  *
  */
-static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
-					  int base, int credit)
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+			    int base, int credit)
 {
 	/* Zero the object first */
 	memset(p, 0, sizeof(*p));
@@ -3934,9 +4181,9 @@
 		/* CAM credit is equaly divided between all active functions
 		 * on the PATH.
 		 */
-		if ((func_num > 0)) {
+		if (func_num > 0) {
 			if (!CHIP_REV_IS_SLOW(bp))
-				cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+				cam_sz = PF_MAC_CREDIT_E2(bp, func_num);
 			else
 				cam_sz = BNX2X_CAM_SIZE_EMUL;
 
@@ -3966,8 +4213,9 @@
 		 * on the PATH.
 		 */
 		if (func_num > 0) {
-			int credit = MAX_VLAN_CREDIT_E2 / func_num;
-			bnx2x_init_credit_pool(p, func_id * credit, credit);
+			int credit = PF_VLAN_CREDIT_E2(bp, func_num);
+
+			bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit);
 		} else
 			/* this should never happen! Block VLAN operations. */
 			bnx2x_init_credit_pool(p, 0, 0);
@@ -4060,8 +4308,14 @@
 	if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
 		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
 
-	if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
-		caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
+	if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags))
+		caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
 
 	/* RSS keys */
 	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
@@ -5669,10 +5923,14 @@
 	rdata->sd_vlan_tag	= cpu_to_le16(start_params->sd_vlan_tag);
 	rdata->path_id		= BP_PATH(bp);
 	rdata->network_cos_mode	= start_params->network_cos_mode;
-	rdata->tunnel_mode	= start_params->tunnel_mode;
-	rdata->gre_tunnel_type	= start_params->gre_tunnel_type;
-	rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
-	rdata->vxlan_dst_port	= cpu_to_le16(4789);
+
+	rdata->vxlan_dst_port	= cpu_to_le16(start_params->vxlan_dst_port);
+	rdata->geneve_dst_port	= cpu_to_le16(start_params->geneve_dst_port);
+	rdata->inner_clss_l2gre	= start_params->inner_clss_l2gre;
+	rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
+	rdata->inner_clss_vxlan	= start_params->inner_clss_vxlan;
+	rdata->inner_rss	= start_params->inner_rss;
+
 	rdata->sd_accept_mf_clss_fail = start_params->class_fail;
 	if (start_params->class_fail_ethtype) {
 		rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
@@ -5690,6 +5948,14 @@
 			cpu_to_le16(0x8100);
 
 	rdata->no_added_tags = start_params->no_added_tags;
+
+	rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
+	if (rdata->c2s_pri_tt_valid) {
+		memcpy(rdata->c2s_pri_trans_table.val,
+		       start_params->c2s_pri,
+		       MAX_VLAN_PRIORITIES);
+		rdata->c2s_pri_default = start_params->c2s_pri_default;
+	}
 	/* No need for an explicit memory barrier here as long we would
 	 * need to ensure the ordering of writing to the SPQ element
 	 * and updating of the SPQ producer which involves a memory
@@ -5750,15 +6016,22 @@
 	if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
 		     &switch_update_params->changes)) {
 		rdata->update_tunn_cfg_flg = 1;
-		if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
 			     &switch_update_params->changes))
-			rdata->tunn_clss_en = 1;
-		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+			rdata->inner_clss_l2gre = 1;
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
 			     &switch_update_params->changes))
-			rdata->inner_gre_rss_en = 1;
-		rdata->tunnel_mode = switch_update_params->tunnel_mode;
-		rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
-		rdata->vxlan_dst_port = cpu_to_le16(4789);
+			rdata->inner_clss_vxlan = 1;
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+			     &switch_update_params->changes))
+			rdata->inner_clss_l2geneve = 1;
+		if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
+			     &switch_update_params->changes))
+			rdata->inner_rss = 1;
+		rdata->vxlan_dst_port =
+			cpu_to_le16(switch_update_params->vxlan_dst_port);
+		rdata->geneve_dst_port =
+			cpu_to_le16(switch_update_params->geneve_dst_port);
 	}
 
 	rdata->echo = SWITCH_UPDATE;
@@ -5885,6 +6158,8 @@
 		rdata->traffic_type_to_priority_cos[i] =
 			tx_start_params->traffic_type_to_priority_cos[i];
 
+	for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
+		rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
 	/* No need for an explicit memory barrier here as long as we
 	 * ensure the ordering of writing to the SPQ element
 	 * and updating of the SPQ producer which involves a memory
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 86baecb..4048fc5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1,15 +1,17 @@
-/* bnx2x_sp.h: Broadcom Everest network driver.
+/* bnx2x_sp.h: Qlogic Everest network driver.
  *
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -711,7 +713,10 @@
 	BNX2X_RSS_IPV6,
 	BNX2X_RSS_IPV6_TCP,
 	BNX2X_RSS_IPV6_UDP,
-	BNX2X_RSS_GRE_INNER_HDRS,
+
+	BNX2X_RSS_IPV4_VXLAN,
+	BNX2X_RSS_IPV6_VXLAN,
+	BNX2X_RSS_TUNN_INNER_HDRS,
 };
 
 struct bnx2x_config_rss_params {
@@ -1105,8 +1110,10 @@
 	BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
 	BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
 	BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
-	BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
-	BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+	BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+	BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+	BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+	BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
 };
 
 /* Allowed Function states */
@@ -1171,19 +1178,23 @@
 	/* Function cos mode */
 	u8 network_cos_mode;
 
-	/* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */
-	u8 tunnel_mode;
+	/* UDP dest port for VXLAN */
+	u16 vxlan_dst_port;
 
-	/* tunneling classification enablement */
-	u8 tunn_clss_en;
+	/* UDP dest port for Geneve */
+	u16 geneve_dst_port;
 
-	/* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
-	u8 gre_tunnel_type;
+	/* Enable inner Rx classifications for L2GRE packets */
+	u8 inner_clss_l2gre;
 
-	/* Enables Inner GRE RSS on the function, depends on the client RSS
-	 * capailities
-	 */
-	u8 inner_gre_rss_en;
+	/* Enable inner Rx classifications for L2-Geneve packets */
+	u8 inner_clss_l2geneve;
+
+	/* Enable inner Rx classification for vxlan packets */
+	u8 inner_clss_vxlan;
+
+	/* Enable RSS according to inner header */
+	u8 inner_rss;
 
 	/* Allows accepting of packets failing MF classification, possibly
 	 * only matching a given ethertype
@@ -1200,6 +1211,11 @@
 
 	/* Prevent inner vlans from being added by FW */
 	u8 no_added_tags;
+
+	/* Inner-to-Outer vlan priority mapping */
+	u8 c2s_pri[MAX_VLAN_PRIORITIES];
+	u8 c2s_pri_default;
+	u8 c2s_pri_valid;
 };
 
 struct bnx2x_func_switch_update_params {
@@ -1207,8 +1223,8 @@
 	u16 vlan;
 	u16 vlan_eth_type;
 	u8 vlan_force_prio;
-	u8 tunnel_mode;
-	u8 gre_tunnel_type;
+	u16 vxlan_dst_port;
+	u16 geneve_dst_port;
 };
 
 struct bnx2x_func_afex_update_params {
@@ -1229,6 +1245,7 @@
 	u8 dcb_enabled;
 	u8 dcb_version;
 	u8 dont_add_pri_0_en;
+	u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
 };
 
 struct bnx2x_func_set_timesync_params {
@@ -1396,6 +1413,14 @@
 			 unsigned long *pstate, bnx2x_obj_type type,
 			 struct bnx2x_credit_pool_obj *vlans_pool);
 
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool);
+
 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
 					struct bnx2x_vlan_mac_obj *o);
 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
@@ -1466,6 +1491,8 @@
 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
 				 struct bnx2x_credit_pool_obj *p, u8 func_id,
 				 u8 func_num);
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+			    int base, int credit);
 
 /****************** RSS CONFIGURATION ****************/
 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
@@ -1493,4 +1520,12 @@
 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
 			     u8 *ind_table);
 
+#define PF_MAC_CREDIT_E2(bp, func_num)					\
+	((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
+	 func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
+
+#define PF_VLAN_CREDIT_E2(bp, func_num)					 \
+	((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
+	 func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
+
 #endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index f67348d..9d02734 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.c: Broadcom Everest network driver.
+/* bnx2x_sriov.c: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -195,14 +197,6 @@
 	setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
 	setup_p->gen_params.fp_hsi = vf->fp_hsi;
 
-	/* Setup-op pause params:
-	 * Nothing to do, the pause thresholds are set by default to 0 which
-	 * effectively turns off the feature for this queue. We don't want
-	 * one queue (VF) to interfering with another queue (another VF)
-	 */
-	if (vf->cfg_flags & VF_CFG_FW_FC)
-		BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
-			  vf->abs_vfid);
 	/* Setup-op flags:
 	 * collect statistics, zero statistics, local-switching, security,
 	 * OV for Flex10, RSS and MCAST for leading
@@ -358,22 +352,24 @@
 }
 
 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
-				   int qid, bool drv_only, bool mac)
+				   int qid, bool drv_only, int type)
 {
 	struct bnx2x_vlan_mac_ramrod_params ramrod;
 	int rc;
 
 	DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
-	   mac ? "MACs" : "VLANs");
+			  (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+			  (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 
 	/* Prepare ramrod params */
 	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
-	if (mac) {
+	if (type == BNX2X_VF_FILTER_VLAN_MAC) {
+		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+	} else if (type == BNX2X_VF_FILTER_MAC) {
 		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
 	} else {
-		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-			&ramrod.user_req.vlan_mac_flags);
 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 	}
 	ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
@@ -391,14 +387,11 @@
 					     &ramrod.ramrod_flags);
 	if (rc) {
 		BNX2X_ERR("Failed to delete all %s\n",
-			  mac ? "MACs" : "VLANs");
+			  (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+			  (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 		return rc;
 	}
 
-	/* Clear the vlan counters */
-	if (!mac)
-		atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
-
 	return 0;
 }
 
@@ -412,13 +405,17 @@
 
 	DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
 	   vf->abs_vfid, filter->add ? "Adding" : "Deleting",
-	   filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
+	   (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
+	   (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
 
 	/* Prepare ramrod params */
 	memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
-	if (filter->type == BNX2X_VF_FILTER_VLAN) {
-		set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-			&ramrod.user_req.vlan_mac_flags);
+	if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
+		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+		ramrod.user_req.u.vlan.vlan = filter->vid;
+		memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+		set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+	} else if (filter->type == BNX2X_VF_FILTER_VLAN) {
 		ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
 		ramrod.user_req.u.vlan.vlan = filter->vid;
 	} else {
@@ -429,16 +426,6 @@
 	ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
 					    BNX2X_VLAN_MAC_DEL;
 
-	/* Verify there are available vlan credits */
-	if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
-	    (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
-	     vf_vlan_rules_cnt(vf))) {
-		BNX2X_ERR("No credits for vlan [%d >= %d]\n",
-			  atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
-			  vf_vlan_rules_cnt(vf));
-		return -ENOMEM;
-	}
-
 	set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
 	if (drv_only)
 		set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
@@ -450,16 +437,13 @@
 	if (rc && rc != -EEXIST) {
 		BNX2X_ERR("Failed to %s %s\n",
 			  filter->add ? "add" : "delete",
-			  filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
-								"VLAN");
+			  (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
+				"VLAN-MAC" :
+			  (filter->type == BNX2X_VF_FILTER_MAC) ?
+				"MAC" : "VLAN");
 		return rc;
 	}
 
-	/* Update the vlan counters */
-	if (filter->type == BNX2X_VF_FILTER_VLAN)
-		bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
-				     &bnx2x_vfq(vf, qid, vlan_count));
-
 	return 0;
 }
 
@@ -511,21 +495,7 @@
 	if (rc)
 		goto op_err;
 
-	/* Configure vlan0 for leading queue */
-	if (!qid) {
-		struct bnx2x_vf_mac_vlan_filter filter;
-
-		memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
-		filter.type = BNX2X_VF_FILTER_VLAN;
-		filter.add = true;
-		filter.vid = 0;
-		rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
-		if (rc)
-			goto op_err;
-	}
-
 	/* Schedule the configuration of any pending vlan filters */
-	vf->cfg_flags |= VF_CFG_VLAN;
 	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
 			       BNX2X_MSG_IOV);
 	return 0;
@@ -544,10 +514,16 @@
 	/* If needed, clean the filtering data base */
 	if ((qid == LEADING_IDX) &&
 	    bnx2x_validate_vf_sp_objs(bp, vf, false)) {
-		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
+		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+					     BNX2X_VF_FILTER_VLAN_MAC);
 		if (rc)
 			goto op_err;
-		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
+		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+					     BNX2X_VF_FILTER_VLAN);
+		if (rc)
+			goto op_err;
+		rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+					     BNX2X_VF_FILTER_MAC);
 		if (rc)
 			goto op_err;
 	}
@@ -680,11 +656,18 @@
 		/* Remove filtering if feasible */
 		if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
 			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
-						     false, false);
+						     false,
+						     BNX2X_VF_FILTER_VLAN_MAC);
 			if (rc)
 				goto op_err;
 			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
-						     false, true);
+						     false,
+						     BNX2X_VF_FILTER_VLAN);
+			if (rc)
+				goto op_err;
+			rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+						     false,
+						     BNX2X_VF_FILTER_MAC);
 			if (rc)
 				goto op_err;
 			rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
@@ -765,8 +748,6 @@
 
 	val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
 	val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
-	if (vf->cfg_flags & VF_CFG_INT_SIMD)
-		val |= IGU_VF_CONF_SINGLE_ISR_EN;
 	val &= ~IGU_VF_CONF_PARENT_MASK;
 	val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
 	REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
@@ -845,29 +826,6 @@
 	return 0;
 }
 
-static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
-					  struct bnx2x_virtf *vf,
-					  int new)
-{
-	int num = vf_vlan_rules_cnt(vf);
-	int diff = new - num;
-	bool rc = true;
-
-	DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
-	   vf->abs_vfid, new, num);
-
-	if (diff > 0)
-		rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
-	else if (diff < 0)
-		rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
-
-	if (rc)
-		vf_vlan_rules_cnt(vf) = new;
-	else
-		DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
-		   vf->abs_vfid);
-}
-
 /* must be called after the number of PF queues and the number of VFs are
  * both known
  */
@@ -875,21 +833,13 @@
 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
 	struct vf_pf_resc_request *resc = &vf->alloc_resc;
-	u16 vlan_count = 0;
 
 	/* will be set only during VF-ACQUIRE */
 	resc->num_rxqs = 0;
 	resc->num_txqs = 0;
 
-	/* no credit calculations for macs (just yet) */
-	resc->num_mac_filters = 1;
-
-	/* divvy up vlan rules */
-	bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
-	vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
-	vlan_count = 1 << ilog2(vlan_count);
-	bnx2x_iov_re_set_vlan_filters(bp, vf,
-				      vlan_count / BNX2X_NR_VIRTFN(bp));
+	resc->num_mac_filters = VF_MAC_CREDIT_CNT;
+	resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
 
 	/* no real limitation */
 	resc->num_mc_filters = 0;
@@ -1338,6 +1288,9 @@
 
 	mutex_init(&bp->vfdb->bulletin_mutex);
 
+	if (SHMEM2_HAS(bp, sriov_switch_mode))
+		SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
+
 	return 0;
 failed:
 	DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -1620,6 +1573,11 @@
 		vf->filter_state = 0;
 		vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
 
+		bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
+				       vf_vlan_rules_cnt(vf));
+		bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
+				       vf_mac_rules_cnt(vf));
+
 		/*  init mcast object - This object will be re-initialized
 		 *  during VF-ACQUIRE with the proper cl_id and cid.
 		 *  It needs to be initialized here so that it can be safely
@@ -2032,12 +1990,11 @@
 	u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
 	u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
 
-	/* Save a vlan filter for the Hypervisor */
 	return ((req_resc->num_rxqs <= rxq_cnt) &&
 		(req_resc->num_txqs <= txq_cnt) &&
 		(req_resc->num_sbs <= vf_sb_count(vf))   &&
 		(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
-		(req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
+		(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
 }
 
 /* CORE VF API */
@@ -2091,16 +2048,12 @@
 	vf_sb_count(vf) = resc->num_sbs;
 	vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
 	vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
-	if (resc->num_mac_filters)
-		vf_mac_rules_cnt(vf) = resc->num_mac_filters;
-	/* Add an additional vlan filter credit for the hypervisor */
-	bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
 
 	DP(BNX2X_MSG_IOV,
 	   "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
 	   vf_sb_count(vf), vf_rxq_count(vf),
 	   vf_txq_count(vf), vf_mac_rules_cnt(vf),
-	   vf_vlan_rules_visible_cnt(vf));
+	   vf_vlan_rules_cnt(vf));
 
 	/* Initialize the queues */
 	if (!vf->vfqs) {
@@ -2133,7 +2086,6 @@
 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
 {
 	struct bnx2x_func_init_params func_init = {0};
-	u16 flags = 0;
 	int i;
 
 	/* the sb resources are initialized at this point, do the
@@ -2160,23 +2112,9 @@
 	/* reset IGU VF statistics: MSIX */
 	REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
 
-	/* vf init */
-	if (vf->cfg_flags & VF_CFG_STATS)
-		flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
-
-	if (vf->cfg_flags & VF_CFG_TPA)
-		flags |= FUNC_FLG_TPA;
-
-	if (is_vf_multi(vf))
-		flags |= FUNC_FLG_RSS;
-
 	/* function setup */
-	func_init.func_flgs = flags;
 	func_init.pf_id = BP_FUNC(bp);
 	func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
-	func_init.fw_stat_map = vf->fw_stat_map;
-	func_init.spq_map = vf->spq_map;
-	func_init.spq_prod = 0;
 	bnx2x_func_init(bp, &func_init);
 
 	/* Enable the vf */
@@ -2589,8 +2527,8 @@
 
 	DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
 	for_each_vf(bp, vfidx) {
-	bulletin = BP_VF_BULLETIN(bp, vfidx);
-		if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+		bulletin = BP_VF_BULLETIN(bp, vfidx);
+		if (bulletin->valid_bitmap & (1 << VLAN_VALID))
 			bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
 	}
 }
@@ -2808,20 +2746,58 @@
 	return rc;
 }
 
+static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
+					 struct bnx2x_virtf *vf, bool accept)
+{
+	struct bnx2x_rx_mode_ramrod_params rx_ramrod;
+	unsigned long accept_flags;
+
+	/* need to remove/add the VF's accept_any_vlan bit */
+	accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+	if (accept)
+		set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+	else
+		clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+	bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+			      accept_flags);
+	bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+	bnx2x_config_rx_mode(bp, &rx_ramrod);
+}
+
+static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
+				    u16 vlan, bool add)
+{
+	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+	unsigned long ramrod_flags = 0;
+	int rc = 0;
+
+	/* configure the new vlan to device */
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+	ramrod_param.ramrod_flags = ramrod_flags;
+	ramrod_param.user_req.u.vlan.vlan = vlan;
+	ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
+					: BNX2X_VLAN_MAC_DEL;
+	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+	if (rc) {
+		BNX2X_ERR("failed to configure vlan\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
 {
-	struct bnx2x_queue_state_params q_params = {NULL};
-	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
-	struct bnx2x_queue_update_params *update_params;
 	struct pf_vf_bulletin_content *bulletin = NULL;
-	struct bnx2x_rx_mode_ramrod_params rx_ramrod;
 	struct bnx2x *bp = netdev_priv(dev);
 	struct bnx2x_vlan_mac_obj *vlan_obj;
 	unsigned long vlan_mac_flags = 0;
 	unsigned long ramrod_flags = 0;
 	struct bnx2x_virtf *vf = NULL;
-	unsigned long accept_flags;
-	int rc;
+	int i, rc;
 
 	if (vlan > 4095) {
 		BNX2X_ERR("illegal vlan value %d\n", vlan);
@@ -2850,6 +2826,10 @@
 		bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
 	bulletin->vlan = vlan;
 
+	/* Post update on VF's bulletin board */
+	rc = bnx2x_post_vf_bulletin(bp, vfidx);
+	if (rc)
+		BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
 	mutex_unlock(&bp->vfdb->bulletin_mutex);
 
 	/* is vf initialized and queue set up? */
@@ -2876,84 +2856,76 @@
 		goto out;
 	}
 
-	/* need to remove/add the VF's accept_any_vlan bit */
-	accept_flags = bnx2x_leading_vfq(vf, accept_flags);
-	if (vlan)
-		clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
-	else
-		set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
-
-	bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
-			      accept_flags);
-	bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
-	bnx2x_config_rx_mode(bp, &rx_ramrod);
-
-	/* configure the new vlan to device */
-	memset(&ramrod_param, 0, sizeof(ramrod_param));
-	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
-	ramrod_param.vlan_mac_obj = vlan_obj;
-	ramrod_param.ramrod_flags = ramrod_flags;
-	set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-		&ramrod_param.user_req.vlan_mac_flags);
-	ramrod_param.user_req.u.vlan.vlan = vlan;
-	ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
-	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
-	if (rc) {
-		BNX2X_ERR("failed to configure vlan\n");
-		rc =  -EINVAL;
-		goto out;
-	}
-
-	/* send queue update ramrod to configure default vlan and silent
-	 * vlan removal
+	/* clear accept_any_vlan when HV forces vlan, otherwise
+	 * according to VF capabilities
 	 */
-	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
-	q_params.cmd = BNX2X_Q_CMD_UPDATE;
-	q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
-	update_params = &q_params.params.update;
-	__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
-		  &update_params->update_flags);
-	__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
-		  &update_params->update_flags);
-	if (vlan == 0) {
-		/* if vlan is 0 then we want to leave the VF traffic
-		 * untagged, and leave the incoming traffic untouched
-		 * (i.e. do not remove any vlan tags).
-		 */
-		__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
-			    &update_params->update_flags);
-		__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
-			    &update_params->update_flags);
-	} else {
-		/* configure default vlan to vf queue and set silent
-		 * vlan removal (the vf remains unaware of this vlan).
-		 */
-		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
-			  &update_params->update_flags);
-		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
-			  &update_params->update_flags);
-		update_params->def_vlan = vlan;
-		update_params->silent_removal_value =
-			vlan & VLAN_VID_MASK;
-		update_params->silent_removal_mask = VLAN_VID_MASK;
-	}
+	if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
+		bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
 
-	/* Update the Queue state */
-	rc = bnx2x_queue_state_change(bp, &q_params);
-	if (rc) {
-		BNX2X_ERR("Failed to configure default VLAN\n");
+	rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
+	if (rc)
 		goto out;
-	}
 
-
-	/* clear the flag indicating that this VF needs its vlan
-	 * (will only be set if the HV configured the Vlan before vf was
-	 * up and we were called because the VF came up later
+	/* send queue update ramrods to configure default vlan and
+	 * silent vlan removal
 	 */
+	for_each_vfq(vf, i) {
+		struct bnx2x_queue_state_params q_params = {NULL};
+		struct bnx2x_queue_update_params *update_params;
+
+		q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
+
+		/* validate the Q is UP */
+		if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
+		    BNX2X_Q_LOGICAL_STATE_ACTIVE)
+			continue;
+
+		__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+		q_params.cmd = BNX2X_Q_CMD_UPDATE;
+		update_params = &q_params.params.update;
+		__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+			  &update_params->update_flags);
+		__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+			  &update_params->update_flags);
+		if (vlan == 0) {
+			/* if vlan is 0 then we want to leave the VF traffic
+			 * untagged, and leave the incoming traffic untouched
+			 * (i.e. do not remove any vlan tags).
+			 */
+			__clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+				    &update_params->update_flags);
+			__clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+				    &update_params->update_flags);
+		} else {
+			/* configure default vlan to vf queue and set silent
+			 * vlan removal (the vf remains unaware of this vlan).
+			 */
+			__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+				  &update_params->update_flags);
+			__set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+				  &update_params->update_flags);
+			update_params->def_vlan = vlan;
+			update_params->silent_removal_value =
+				vlan & VLAN_VID_MASK;
+			update_params->silent_removal_mask = VLAN_VID_MASK;
+		}
+
+		/* Update the Queue state */
+		rc = bnx2x_queue_state_change(bp, &q_params);
+		if (rc) {
+			BNX2X_ERR("Failed to configure default VLAN queue %d\n",
+				  i);
+			goto out;
+		}
+	}
 out:
-	vf->cfg_flags &= ~VF_CFG_VLAN;
 	bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
 
+	if (rc)
+		DP(BNX2X_MSG_IOV,
+		   "updated VF[%d] vlan configuration (vlan = %d)\n",
+		   vfidx, vlan);
+
 	return rc;
 }
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 66ee62a..670a581 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.h: Broadcom Everest network driver.
+/* bnx2x_sriov.h: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -75,7 +77,10 @@
 
 	/* VLANs object */
 	struct bnx2x_vlan_mac_obj	vlan_obj;
-	atomic_t vlan_count;		/* 0 means vlan-0 is set  ~ untagged */
+
+	/* VLAN-MACs object */
+	struct bnx2x_vlan_mac_obj	vlan_mac_obj;
+
 	unsigned long accept_flags;	/* last accept flags configured */
 
 	/* Queue Slow-path State object */
@@ -103,8 +108,10 @@
 
 struct bnx2x_vf_mac_vlan_filter {
 	int type;
-#define BNX2X_VF_FILTER_MAC	1
-#define BNX2X_VF_FILTER_VLAN	2
+#define BNX2X_VF_FILTER_MAC	BIT(0)
+#define BNX2X_VF_FILTER_VLAN	BIT(1)
+#define BNX2X_VF_FILTER_VLAN_MAC \
+	(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
 
 	bool add;
 	u8 *mac;
@@ -119,14 +126,9 @@
 /* vf context */
 struct bnx2x_virtf {
 	u16 cfg_flags;
-#define VF_CFG_STATS		0x0001
-#define VF_CFG_FW_FC		0x0002
-#define VF_CFG_TPA		0x0004
-#define VF_CFG_INT_SIMD		0x0008
-#define VF_CACHE_LINE		0x0010
-#define VF_CFG_VLAN		0x0020
-#define VF_CFG_STATS_COALESCE	0x0040
-#define VF_CFG_EXT_BULLETIN	0x0080
+#define VF_CFG_STATS_COALESCE	0x1
+#define VF_CFG_EXT_BULLETIN	0x2
+#define VF_CFG_VLAN_FILTER	0x4
 	u8 link_cfg;		/* IFLA_VF_LINK_STATE_AUTO
 				 * IFLA_VF_LINK_STATE_ENABLE
 				 * IFLA_VF_LINK_STATE_DISABLE
@@ -140,9 +142,8 @@
 	bool flr_clnup_stage;	/* true during flr cleanup */
 
 	/* dma */
-	dma_addr_t fw_stat_map;		/* valid iff VF_CFG_STATS */
+	dma_addr_t fw_stat_map;
 	u16 stats_stride;
-	dma_addr_t spq_map;
 	dma_addr_t bulletin_map;
 
 	/* Allocated resources counters. Before the VF is acquired, the
@@ -163,8 +164,6 @@
 #define vf_mac_rules_cnt(vf)		((vf)->alloc_resc.num_mac_filters)
 #define vf_vlan_rules_cnt(vf)		((vf)->alloc_resc.num_vlan_filters)
 #define vf_mc_rules_cnt(vf)		((vf)->alloc_resc.num_mc_filters)
-	/* Hide a single vlan filter credit for the hypervisor */
-#define vf_vlan_rules_visible_cnt(vf)	(vf_vlan_rules_cnt(vf) - 1)
 
 	u8 sb_count;	/* actual number of SBs */
 	u8 igu_base_id;	/* base igu status block id */
@@ -207,6 +206,9 @@
 	enum channel_tlvs		op_current;
 
 	u8 fp_hsi;
+
+	struct bnx2x_credit_pool_obj	vf_vlans_pool;
+	struct bnx2x_credit_pool_obj	vf_macs_pool;
 };
 
 #define BNX2X_NR_VIRTFN(bp)	((bp)->vfdb->sriov.nr_virtfn)
@@ -230,6 +232,12 @@
 #define FW_VF_HANDLE(abs_vfid)	\
 	(abs_vfid + FW_PF_MAX_HANDLE)
 
+#define GET_NUM_VFS_PER_PATH(bp)	64 /* use max possible value */
+#define GET_NUM_VFS_PER_PF(bp)		((bp)->vfdb ? (bp)->vfdb->sriov.total \
+						    : 0)
+#define VF_MAC_CREDIT_CNT		1
+#define VF_VLAN_CREDIT_CNT		2 /* VLAN0 + 'real' VLAN */
+
 /* locking and unlocking the channel mutex */
 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
 			      enum channel_tlvs tlv);
@@ -274,6 +282,10 @@
 	} vlan_rdata;
 
 	union {
+		struct eth_classify_rules_ramrod_data	e2;
+	} vlan_mac_rdata;
+
+	union {
 		struct eth_filter_rules_ramrod_data	e2;
 	} rx_mode_rdata;
 
@@ -536,8 +548,14 @@
 
 int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
 
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add);
 #else /* CONFIG_BNX2X_SRIOV */
 
+#define GET_NUM_VFS_PER_PATH(bp)	0
+#define GET_NUM_VFS_PER_PF(bp)		0
+#define VF_MAC_CREDIT_CNT		0
+#define VF_VLAN_CREDIT_CNT		0
+
 static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
 				struct bnx2x_queue_sp_obj **q_obj) {}
 static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
@@ -604,5 +622,7 @@
 static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
 					      bool support_long) {}
 
+static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; }
+
 #endif /* CONFIG_BNX2X_SRIOV */
 #endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 69d699f0..7e0919a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1,6 +1,8 @@
-/* bnx2x_stats.c: Broadcom Everest network driver.
+/* bnx2x_stats.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 965539a..b2644ed 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -1,6 +1,8 @@
-/* bnx2x_stats.h: Broadcom Everest network driver.
+/* bnx2x_stats.h: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 06b8c0d..1374e53 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -1,15 +1,17 @@
-/* bnx2x_vfpf.c: Broadcom Everest network driver.
+/* bnx2x_vfpf.c: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -245,6 +247,7 @@
 	req->resc_request.num_sbs = bp->igu_sb_cnt;
 	req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
 	req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+	req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
 
 	/* pf 2 vf bulletin board address */
 	req->bulletin_addr = bp->pf2vf_bulletin_mapping;
@@ -255,6 +258,8 @@
 
 	/* Bulletin support for bulletin board with length > legacy length */
 	req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
+	/* vlan filtering is supported */
+	req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
 
 	/* add list termination tlv */
 	bnx2x_add_tlv(bp, req,
@@ -373,6 +378,8 @@
 		NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
 	bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
 	bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+	bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
+
 	strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
 		sizeof(bp->fw_ver));
 
@@ -546,7 +553,7 @@
 			   BNX2X_FILTER_MAC_PENDING,
 			   &vf->filter_state,
 			   BNX2X_OBJ_TYPE_RX_TX,
-			   &bp->macs_pool);
+			   &vf->vf_macs_pool);
 	/* vlan */
 	bnx2x_init_vlan_obj(bp, &q->vlan_obj,
 			    cl_id, q->cid, func_id,
@@ -555,8 +562,17 @@
 			    BNX2X_FILTER_VLAN_PENDING,
 			    &vf->filter_state,
 			    BNX2X_OBJ_TYPE_RX_TX,
-			    &bp->vlans_pool);
-
+			    &vf->vf_vlans_pool);
+	/* vlan-mac */
+	bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
+				cl_id, q->cid, func_id,
+				bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
+				bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
+				BNX2X_FILTER_VLAN_MAC_PENDING,
+				&vf->filter_state,
+				BNX2X_OBJ_TYPE_RX_TX,
+				&vf->vf_macs_pool,
+				&vf->vf_vlans_pool);
 	/* mcast */
 	bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
 			     q->cid, func_id, func_id,
@@ -723,7 +739,7 @@
 
 	req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
 	if (set)
-		req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
+		req->filters[0].flags |= VFPF_Q_FILTER_SET;
 
 	/* sample bulletin board for new mac */
 	bnx2x_sample_bulletin(bp);
@@ -911,6 +927,67 @@
 	return 0;
 }
 
+/* request pf to add a vlan for the vf */
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
+{
+	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+	int rc = 0;
+
+	if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
+		DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
+		return 0;
+	}
+
+	/* clear mailbox and prep first tlv */
+	bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+			sizeof(*req));
+
+	req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+	req->vf_qid = vf_qid;
+	req->n_mac_vlan_filters = 1;
+
+	req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
+
+	if (add)
+		req->filters[0].flags |= VFPF_Q_FILTER_SET;
+
+	/* sample bulletin board for hypervisor vlan */
+	bnx2x_sample_bulletin(bp);
+
+	if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
+		BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	req->filters[0].vlan_tag = vid;
+
+	/* add list termination tlv */
+	bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+		      sizeof(struct channel_list_end_tlv));
+
+	/* output tlvs list */
+	bnx2x_dp_tlv_list(bp, req);
+
+	/* send message to pf */
+	rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+	if (rc) {
+		BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+		goto out;
+	}
+
+	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+		BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
+			  vid);
+		rc = -EINVAL;
+	}
+out:
+	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+	return rc;
+}
+
 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
 {
 	int mode = bp->rx_mode;
@@ -934,8 +1011,13 @@
 		req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
 		req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
 		req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+		if (mode == BNX2X_RX_MODE_PROMISC)
+			req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
 	}
 
+	if (bp->accept_any_vlan)
+		req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
+
 	req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
 	req->vf_qid = 0;
 
@@ -1188,7 +1270,8 @@
 	resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
 	resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
 				   PFVF_CAP_TPA |
-				   PFVF_CAP_TPA_UPDATE);
+				   PFVF_CAP_TPA_UPDATE |
+				   PFVF_CAP_VLAN_FILTER);
 	bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
 			  sizeof(resp->pfdev_info.fw_ver));
 
@@ -1203,7 +1286,7 @@
 			bnx2x_vf_max_queue_cnt(bp, vf);
 		resc->num_sbs = vf_sb_count(vf);
 		resc->num_mac_filters = vf_mac_rules_cnt(vf);
-		resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
+		resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
 		resc->num_mc_filters = 0;
 
 		if (status == PFVF_STATUS_SUCCESS) {
@@ -1370,6 +1453,14 @@
 		vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
 	}
 
+	if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
+		DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
+		   vf->abs_vfid);
+		vf->cfg_flags |= VF_CFG_VLAN_FILTER;
+	} else {
+		vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
+	}
+
 out:
 	/* response */
 	bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
@@ -1382,7 +1473,6 @@
 	int rc;
 
 	/* record ghost addresses from vf message */
-	vf->spq_map = init->spq_addr;
 	vf->fw_stat_map = init->stats_addr;
 	vf->stats_stride = init->stats_stride;
 	rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
@@ -1578,17 +1668,18 @@
 
 		if ((msg_filter->flags & type_flag) != type_flag)
 			continue;
-		if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
+		memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
+		if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
 			fl->filters[j].mac = msg_filter->mac;
-			fl->filters[j].type = BNX2X_VF_FILTER_MAC;
-		} else {
-			fl->filters[j].vid = msg_filter->vlan_tag;
-			fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
+			fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
 		}
-		fl->filters[j].add =
-			(msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
-			true : false;
+		if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
+			fl->filters[j].vid = msg_filter->vlan_tag;
+			fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
+		}
+		fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
 		fl->count++;
+		j++;
 	}
 	if (!fl->count)
 		kfree(fl);
@@ -1598,6 +1689,18 @@
 	return 0;
 }
 
+static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
+				    u32 flags)
+{
+	int i, cnt = 0;
+
+	for (i = 0; i < filters->n_mac_vlan_filters; i++)
+		if  ((filters->filters[i].flags & flags) == flags)
+			cnt++;
+
+	return cnt;
+}
+
 static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
 				       struct vfpf_q_mac_vlan_filter *filter)
 {
@@ -1629,6 +1732,7 @@
 
 #define VFPF_MAC_FILTER		VFPF_Q_FILTER_DEST_MAC_VALID
 #define VFPF_VLAN_FILTER	VFPF_Q_FILTER_VLAN_TAG_VALID
+#define VFPF_VLAN_MAC_FILTER	(VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
 
 static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
@@ -1639,16 +1743,33 @@
 
 	/* check for any mac/vlan changes */
 	if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
-		/* build mac list */
 		struct bnx2x_vf_mac_vlan_filters *fl = NULL;
 
+		/* build vlan-mac list */
+		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+					       VFPF_VLAN_MAC_FILTER);
+		if (rc)
+			goto op_err;
+
+		if (fl) {
+
+			/* set vlan-mac list */
+			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+							   msg->vf_qid,
+							   false);
+			if (rc)
+				goto op_err;
+		}
+
+		/* build mac list */
+		fl = NULL;
+
 		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
 					       VFPF_MAC_FILTER);
 		if (rc)
 			goto op_err;
 
 		if (fl) {
-
 			/* set mac list */
 			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
 							   msg->vf_qid,
@@ -1657,22 +1778,6 @@
 				goto op_err;
 		}
 
-		/* build vlan list */
-		fl = NULL;
-
-		rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
-					       VFPF_VLAN_FILTER);
-		if (rc)
-			goto op_err;
-
-		if (fl) {
-			/* set vlan list */
-			rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
-							   msg->vf_qid,
-							   false);
-			if (rc)
-				goto op_err;
-		}
 	}
 
 	if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
@@ -1687,11 +1792,15 @@
 			__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
 		}
 
-		/* A packet arriving the vf's mac should be accepted
-		 * with any vlan, unless a vlan has already been
-		 * configured.
+		/* any_vlan is not configured if HV is forcing VLAN
+		 * any_vlan is configured if
+		 *   1. VF does not support vlan filtering
+		 *   OR
+		 *   2. VF supports vlan filtering and explicitly requested it
 		 */
-		if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+		if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
+		    (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
+		     msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
 			__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
 
 		/* set rx-mode */
@@ -1727,17 +1836,31 @@
 	 * since queue was not set up.
 	 */
 	if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
-		/* once a mac was set by ndo can only accept a single mac... */
-		if (filters->n_mac_vlan_filters > 1) {
-			BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
-				  vf->abs_vfid);
-			rc = -EPERM;
-			goto response;
+		struct vfpf_q_mac_vlan_filter *filter = NULL;
+		int i;
+
+		for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+			if (!(filters->filters[i].flags &
+			      VFPF_Q_FILTER_DEST_MAC_VALID))
+				continue;
+
+			/* once a mac was set by ndo can only accept
+			 * a single mac...
+			 */
+			if (filter) {
+				BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
+					  vf->abs_vfid,
+					  filters->n_mac_vlan_filters);
+				rc = -EPERM;
+				goto response;
+			}
+
+			filter = &filters->filters[i];
 		}
 
 		/* ...and only the mac set by the ndo */
-		if (filters->n_mac_vlan_filters == 1 &&
-		    !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
+		if (filter &&
+		    !ether_addr_equal(filter->mac, bulletin->mac)) {
 			BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
 				  vf->abs_vfid);
 
@@ -1759,17 +1882,14 @@
 
 	/* if vlan was set by hypervisor we don't allow guest to config vlan */
 	if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
-		int i;
-
 		/* search for vlan filters */
-		for (i = 0; i < filters->n_mac_vlan_filters; i++) {
-			if (filters->filters[i].flags &
-			    VFPF_Q_FILTER_VLAN_TAG_VALID) {
-				BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
-					  vf->abs_vfid);
-				rc = -EPERM;
-				goto response;
-			}
+
+		if (bnx2x_vf_filters_contain(filters,
+					     VFPF_Q_FILTER_VLAN_TAG_VALID)) {
+			BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+				  vf->abs_vfid);
+			rc = -EPERM;
+			goto response;
 		}
 	}
 
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index b86479f..64f2b52 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -1,16 +1,22 @@
-/* bnx2x_vfpf.h: Broadcom Everest network driver.
+/* bnx2x_vfpf.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * under the terms of the GNU General Public License version 2 (the “GPL”),
+ * available at http://www.gnu.org/licenses/gpl-2.0.html, with the following
+ * added to such license:
  *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions
+ * of the license of that module.  An independent module is a module which is
+ * not derived from this software.  The special exception does not apply to any
+ * modifications of the software.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Ariel Elior <ariel.elior@qlogic.com>
@@ -64,6 +70,8 @@
 #define VFPF_RX_MASK_ACCEPT_ALL_UNICAST		0x00000004
 #define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST	0x00000008
 #define VFPF_RX_MASK_ACCEPT_BROADCAST		0x00000010
+#define VFPF_RX_MASK_ACCEPT_ANY_VLAN		0x00000020
+
 #define BULLETIN_CONTENT_SIZE		(sizeof(struct pf_vf_bulletin_content))
 #define BULLETIN_CONTENT_LEGACY_SIZE	(32)
 #define BULLETIN_ATTEMPTS	5 /* crc failures before throwing towel */
@@ -127,6 +135,7 @@
 		u8 fp_hsi_ver;
 		u8 caps;
 #define VF_CAP_SUPPORT_EXT_BULLETIN	(1 << 0)
+#define VF_CAP_SUPPORT_VLAN_FILTER	(1 << 1)
 	} vfdev_info;
 
 	struct vf_pf_resc_request resc_request;
@@ -168,10 +177,12 @@
 	struct pf_vf_pfdev_info {
 		u32 chip_num;
 		u32 pf_cap;
-#define PFVF_CAP_RSS		0x00000001
-#define PFVF_CAP_DHC		0x00000002
-#define PFVF_CAP_TPA		0x00000004
-#define PFVF_CAP_TPA_UPDATE	0x00000008
+#define PFVF_CAP_RSS          0x00000001
+#define PFVF_CAP_DHC          0x00000002
+#define PFVF_CAP_TPA          0x00000004
+#define PFVF_CAP_TPA_UPDATE   0x00000008
+#define PFVF_CAP_VLAN_FILTER  0x00000010
+
 		char fw_ver[32];
 		u16 db_size;
 		u8  indices_per_sb;
@@ -288,7 +299,7 @@
 	u32 flags;
 #define VFPF_Q_FILTER_DEST_MAC_VALID	0x01
 #define VFPF_Q_FILTER_VLAN_TAG_VALID	0x02
-#define VFPF_Q_FILTER_SET_MAC		0x100	/* set/clear */
+#define VFPF_Q_FILTER_SET		0x100	/* set/clear */
 	u8  mac[ETH_ALEN];
 	u16 vlan_tag;
 };
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 17c145f..b69dc58 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -192,6 +192,7 @@
 	struct drv_ctl_info info;
 	struct drv_ctl_io *io = &info.data.io;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	info.cmd = DRV_CTL_CTX_WR_CMD;
 	io->cid_addr = cid_addr;
 	io->offset = off;
@@ -206,6 +207,7 @@
 	struct drv_ctl_info info;
 	struct drv_ctl_io *io = &info.data.io;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
 	io->offset = off;
 	io->dma_addr = addr;
@@ -219,6 +221,7 @@
 	struct drv_ctl_info info;
 	struct drv_ctl_l2_ring *ring = &info.data.ring;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	if (start)
 		info.cmd = DRV_CTL_START_L2_CMD;
 	else
@@ -236,6 +239,7 @@
 	struct drv_ctl_info info;
 	struct drv_ctl_io *io = &info.data.io;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	info.cmd = DRV_CTL_IO_WR_CMD;
 	io->offset = off;
 	io->data = val;
@@ -249,13 +253,14 @@
 	struct drv_ctl_info info;
 	struct drv_ctl_io *io = &info.data.io;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	info.cmd = DRV_CTL_IO_RD_CMD;
 	io->offset = off;
 	ethdev->drv_ctl(dev->netdev, &info);
 	return io->data;
 }
 
-static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg, int state)
 {
 	struct cnic_local *cp = dev->cnic_priv;
 	struct cnic_eth_dev *ethdev = cp->ethdev;
@@ -263,6 +268,7 @@
 	struct fcoe_capabilities *fcoe_cap =
 		&info.data.register_data.fcoe_features;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	if (reg) {
 		info.cmd = DRV_CTL_ULP_REGISTER_CMD;
 		if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
@@ -272,6 +278,7 @@
 	}
 
 	info.data.ulp_type = ulp_type;
+	info.drv_state = state;
 	ethdev->drv_ctl(dev->netdev, &info);
 }
 
@@ -286,6 +293,7 @@
 	struct cnic_eth_dev *ethdev = cp->ethdev;
 	struct drv_ctl_info info;
 
+	memset(&info, 0, sizeof(struct drv_ctl_info));
 	info.cmd = cmd;
 	info.data.credit.credit_count = count;
 	ethdev->drv_ctl(dev->netdev, &info);
@@ -591,7 +599,7 @@
 
 	mutex_unlock(&cnic_lock);
 
-	cnic_ulp_ctl(dev, ulp_type, true);
+	cnic_ulp_ctl(dev, ulp_type, true, DRV_ACTIVE);
 
 	return 0;
 
@@ -636,7 +644,10 @@
 	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
 		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
 
-	cnic_ulp_ctl(dev, ulp_type, false);
+	if (test_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+		cnic_ulp_ctl(dev, ulp_type, false, DRV_UNLOADED);
+	else
+		cnic_ulp_ctl(dev, ulp_type, false, DRV_INACTIVE);
 
 	return 0;
 }
@@ -4267,6 +4278,7 @@
 
 		cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
 
+		memset(&info, 0, sizeof(struct drv_ctl_info));
 		info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
 		cp->ethdev->drv_ctl(dev->netdev, &info);
 	}
@@ -5433,6 +5445,23 @@
 	kfree(dev);
 }
 
+static int cnic_get_fc_npiv_tbl(struct cnic_dev *dev,
+				struct cnic_fc_npiv_tbl *npiv_tbl)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct bnx2x *bp = netdev_priv(dev->netdev);
+	int ret;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;     /* bnx2x is down */
+
+	if (!BNX2X_CHIP_IS_E2_PLUS(bp))
+		return -EINVAL;
+
+	ret = cp->ethdev->drv_get_fc_npiv_tbl(dev->netdev, npiv_tbl);
+	return ret;
+}
+
 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
 				       struct pci_dev *pdev)
 {
@@ -5451,6 +5480,7 @@
 	cdev->register_device = cnic_register_device;
 	cdev->unregister_device = cnic_unregister_device;
 	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+	cdev->get_fc_npiv_tbl = cnic_get_fc_npiv_tbl;
 
 	cp = cdev->cnic_priv;
 	cp->dev = cdev;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ef6125b..789e5c7 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -15,8 +15,8 @@
 
 #include "bnx2x/bnx2x_mfw_req.h"
 
-#define CNIC_MODULE_VERSION	"2.5.21"
-#define CNIC_MODULE_RELDATE	"January 29, 2015"
+#define CNIC_MODULE_VERSION	"2.5.22"
+#define CNIC_MODULE_RELDATE	"July 20, 2015"
 
 #define CNIC_ULP_RDMA		0
 #define CNIC_ULP_ISCSI		1
@@ -151,6 +151,11 @@
 
 struct drv_ctl_info {
 	int	cmd;
+	int     drv_state;
+#define DRV_NOP		0
+#define DRV_ACTIVE	1
+#define DRV_INACTIVE	2
+#define DRV_UNLOADED	3
 	union {
 		struct drv_ctl_spq_credit credit;
 		struct drv_ctl_io io;
@@ -161,6 +166,15 @@
 	} data;
 };
 
+#define MAX_NPIV_ENTRIES 64
+#define FC_NPIV_WWN_SIZE 8
+
+struct cnic_fc_npiv_tbl {
+	u8 wwpn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+	u8 wwnn[MAX_NPIV_ENTRIES][FC_NPIV_WWN_SIZE];
+	u32 count;
+};
+
 struct cnic_ops {
 	struct module	*cnic_owner;
 	/* Calls to these functions are protected by RCU.  When
@@ -226,6 +240,8 @@
 	int		(*drv_submit_kwqes_16)(struct net_device *,
 					       struct kwqe_16 *[], u32);
 	int		(*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+	int		(*drv_get_fc_npiv_tbl)(struct net_device *,
+					       struct cnic_fc_npiv_tbl *);
 	unsigned long	reserved1[2];
 	union drv_info_to_mcp	*addr_drv_info_to_mcp;
 };
@@ -314,6 +330,7 @@
 	struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
 	int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
 				 char *data, u16 data_size);
+	int (*get_fc_npiv_tbl)(struct cnic_dev *, struct cnic_fc_npiv_tbl *);
 	unsigned long	flags;
 #define CNIC_F_CNIC_UP		1
 #define CNIC_F_BNX2_CLASS	3
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 64c1e9d..fadbd00 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -907,9 +907,8 @@
 	}
 
 	bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-
 	if (mode == GENET_POWER_PASSIVE)
-		bcmgenet_mii_reset(priv->dev);
+		bcmgenet_phy_power_set(priv->dev, true);
 }
 
 /* ioctl handle special commands that are not present in ethtool. */
@@ -1725,7 +1724,7 @@
 	int0_enable |= UMAC_IRQ_TXDMA_DONE;
 
 	/* Monitor cable plug/unplugged event for internal PHY */
-	if (phy_is_internal(priv->phydev)) {
+	if (priv->internal_phy) {
 		int0_enable |= UMAC_IRQ_LINK_EVENT;
 	} else if (priv->ext_phy) {
 		int0_enable |= UMAC_IRQ_LINK_EVENT;
@@ -2126,6 +2125,8 @@
 	int ret = 0;
 	int timeout = 0;
 	u32 reg;
+	u32 dma_ctrl;
+	int i;
 
 	/* Disable TDMA to stop add more frames in TX DMA */
 	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
@@ -2169,6 +2170,20 @@
 		ret = -ETIMEDOUT;
 	}
 
+	dma_ctrl = 0;
+	for (i = 0; i < priv->hw_params->rx_queues; i++)
+		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+	reg &= ~dma_ctrl;
+	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+	dma_ctrl = 0;
+	for (i = 0; i < priv->hw_params->tx_queues; i++)
+		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
+	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+	reg &= ~dma_ctrl;
+	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
 	return ret;
 }
 
@@ -2389,6 +2404,23 @@
 	return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcmgenet_poll_controller(struct net_device *dev)
+{
+	struct bcmgenet_priv *priv = netdev_priv(dev);
+
+	/* Invoke the main RX/TX interrupt handler */
+	disable_irq(priv->irq0);
+	bcmgenet_isr0(priv->irq0, priv);
+	enable_irq(priv->irq0);
+
+	/* And the interrupt handler for RX/TX priority queues */
+	disable_irq(priv->irq1);
+	bcmgenet_isr1(priv->irq1, priv);
+	enable_irq(priv->irq1);
+}
+#endif
+
 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
 {
 	u32 reg;
@@ -2626,13 +2658,12 @@
 	netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
 
 	/* Turn on the clock */
-	if (!IS_ERR(priv->clk))
-		clk_prepare_enable(priv->clk);
+	clk_prepare_enable(priv->clk);
 
 	/* If this is an internal GPHY, power it back on now, before UniMAC is
 	 * brought out of reset as absolutely no UniMAC activity is allowed
 	 */
-	if (phy_is_internal(priv->phydev))
+	if (priv->internal_phy)
 		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
 	/* take MAC out of reset */
@@ -2651,7 +2682,7 @@
 
 	bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
-	if (phy_is_internal(priv->phydev)) {
+	if (priv->internal_phy) {
 		reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
 		reg |= EXT_ENERGY_DET_MASK;
 		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
@@ -2687,23 +2718,24 @@
 		goto err_irq0;
 	}
 
-	/* Re-configure the port multiplexer towards the PHY device */
-	bcmgenet_mii_config(priv->dev, false);
-
-	phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
-			   priv->phy_interface);
+	ret = bcmgenet_mii_probe(dev);
+	if (ret) {
+		netdev_err(dev, "failed to connect to PHY\n");
+		goto err_irq1;
+	}
 
 	bcmgenet_netif_start(dev);
 
 	return 0;
 
+err_irq1:
+	free_irq(priv->irq1, priv);
 err_irq0:
-	free_irq(priv->irq0, dev);
+	free_irq(priv->irq0, priv);
 err_fini_dma:
 	bcmgenet_fini_dma(priv);
 err_clk_disable:
-	if (!IS_ERR(priv->clk))
-		clk_disable_unprepare(priv->clk);
+	clk_disable_unprepare(priv->clk);
 	return ret;
 }
 
@@ -2757,11 +2789,10 @@
 	free_irq(priv->irq0, priv);
 	free_irq(priv->irq1, priv);
 
-	if (phy_is_internal(priv->phydev))
+	if (priv->internal_phy)
 		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 
-	if (!IS_ERR(priv->clk))
-		clk_disable_unprepare(priv->clk);
+	clk_disable_unprepare(priv->clk);
 
 	return ret;
 }
@@ -2820,8 +2851,6 @@
 
 	netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
 
-	bcmgenet_disable_tx_napi(priv);
-
 	for (q = 0; q < priv->hw_params->tx_queues; q++)
 		bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
 	bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
@@ -2837,8 +2866,6 @@
 	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
 	bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
 
-	bcmgenet_enable_tx_napi(priv);
-
 	dev->trans_start = jiffies;
 
 	dev->stats.tx_errors++;
@@ -2941,6 +2968,9 @@
 	.ndo_set_mac_address	= bcmgenet_set_mac_addr,
 	.ndo_do_ioctl		= bcmgenet_ioctl,
 	.ndo_set_features	= bcmgenet_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= bcmgenet_poll_controller,
+#endif
 };
 
 /* Array of GENET hardware parameters/characteristics */
@@ -3214,11 +3244,12 @@
 		priv->version = pd->genet_version;
 
 	priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
-	if (IS_ERR(priv->clk))
+	if (IS_ERR(priv->clk)) {
 		dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+		priv->clk = NULL;
+	}
 
-	if (!IS_ERR(priv->clk))
-		clk_prepare_enable(priv->clk);
+	clk_prepare_enable(priv->clk);
 
 	bcmgenet_set_hw_params(priv);
 
@@ -3229,8 +3260,10 @@
 	INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
 
 	priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
-	if (IS_ERR(priv->clk_wol))
+	if (IS_ERR(priv->clk_wol)) {
 		dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+		priv->clk_wol = NULL;
+	}
 
 	priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
 	if (IS_ERR(priv->clk_eee)) {
@@ -3256,8 +3289,7 @@
 	netif_carrier_off(dev);
 
 	/* Turn off the main clock, WOL clock is handled separately */
-	if (!IS_ERR(priv->clk))
-		clk_disable_unprepare(priv->clk);
+	clk_disable_unprepare(priv->clk);
 
 	err = register_netdev(dev);
 	if (err)
@@ -3266,8 +3298,7 @@
 	return err;
 
 err_clk_disable:
-	if (!IS_ERR(priv->clk))
-		clk_disable_unprepare(priv->clk);
+	clk_disable_unprepare(priv->clk);
 err:
 	free_netdev(dev);
 	return err;
@@ -3319,7 +3350,7 @@
 	if (device_may_wakeup(d) && priv->wolopts) {
 		ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
 		clk_prepare_enable(priv->clk_wol);
-	} else if (phy_is_internal(priv->phydev)) {
+	} else if (priv->internal_phy) {
 		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 	}
 
@@ -3348,7 +3379,7 @@
 	/* If this is an internal GPHY, power it back on now, before UniMAC is
 	 * brought out of reset as absolutely no UniMAC activity is allowed
 	 */
-	if (phy_is_internal(priv->phydev))
+	if (priv->internal_phy)
 		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
 	bcmgenet_umac_reset(priv);
@@ -3363,14 +3394,14 @@
 
 	phy_init_hw(priv->phydev);
 	/* Speed settings must be restored */
-	bcmgenet_mii_config(priv->dev, false);
+	bcmgenet_mii_config(priv->dev);
 
 	/* disable ethernet MAC while updating its registers */
 	umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
 
 	bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
-	if (phy_is_internal(priv->phydev)) {
+	if (priv->internal_phy) {
 		reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
 		reg |= EXT_ENERGY_DET_MASK;
 		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 6159dea..7299d10 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -593,6 +593,7 @@
 	/* MDIO bus variables */
 	wait_queue_head_t wq;
 	struct phy_device *phydev;
+	bool internal_phy;
 	struct device_node *phy_dn;
 	struct device_node *mdio_dn;
 	struct mii_bus *mii_bus;
@@ -670,9 +671,9 @@
 
 /* MDIO routines */
 int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev, bool init);
+int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_probe(struct net_device *dev);
 void bcmgenet_mii_exit(struct net_device *dev);
-void bcmgenet_mii_reset(struct net_device *dev);
 void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
 void bcmgenet_mii_setup(struct net_device *dev);
 
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index adf23d2..c8affad 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -163,14 +163,13 @@
 	phy_print_status(phydev);
 }
 
-void bcmgenet_mii_reset(struct net_device *dev)
+static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
+					  struct fixed_phy_status *status)
 {
-	struct bcmgenet_priv *priv = netdev_priv(dev);
+	if (dev && dev->phydev && status)
+		status->link = dev->phydev->link;
 
-	if (priv->phydev) {
-		phy_init_hw(priv->phydev);
-		phy_start_aneg(priv->phydev);
-	}
+	return 0;
 }
 
 void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
@@ -215,7 +214,6 @@
 	reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
 	reg |= EXT_PWR_DN_EN_LD;
 	bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-	bcmgenet_mii_reset(dev);
 }
 
 static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
@@ -226,9 +224,13 @@
 	reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
 	reg |= LED_ACT_SOURCE_MAC;
 	bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+
+	if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+		fixed_phy_set_link_update(priv->phydev,
+					  bcmgenet_fixed_phy_link_update);
 }
 
-int bcmgenet_mii_config(struct net_device *dev, bool init)
+int bcmgenet_mii_config(struct net_device *dev)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	struct phy_device *phydev = priv->phydev;
@@ -238,10 +240,10 @@
 	u32 port_ctrl;
 	u32 reg;
 
-	priv->ext_phy = !phy_is_internal(priv->phydev) &&
+	priv->ext_phy = !priv->internal_phy &&
 			(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
 
-	if (phy_is_internal(priv->phydev))
+	if (priv->internal_phy)
 		priv->phy_interface = PHY_INTERFACE_MODE_NA;
 
 	switch (priv->phy_interface) {
@@ -259,7 +261,7 @@
 
 		bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
 
-		if (phy_is_internal(priv->phydev)) {
+		if (priv->internal_phy) {
 			phy_name = "internal PHY";
 			bcmgenet_internal_phy_setup(dev);
 		} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
@@ -321,13 +323,12 @@
 		bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
 	}
 
-	if (init)
-		dev_info(kdev, "configuring instance for %s\n", phy_name);
+	dev_info_once(kdev, "configuring instance for %s\n", phy_name);
 
 	return 0;
 }
 
-static int bcmgenet_mii_probe(struct net_device *dev)
+int bcmgenet_mii_probe(struct net_device *dev)
 {
 	struct bcmgenet_priv *priv = netdev_priv(dev);
 	struct device_node *dn = priv->pdev->dev.of_node;
@@ -345,22 +346,6 @@
 	priv->old_pause = -1;
 
 	if (dn) {
-		if (priv->phydev) {
-			pr_info("PHY already attached\n");
-			return 0;
-		}
-
-		/* In the case of a fixed PHY, the DT node associated
-		 * to the PHY is the Ethernet MAC DT node.
-		 */
-		if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
-			ret = of_phy_register_fixed_link(dn);
-			if (ret)
-				return ret;
-
-			priv->phy_dn = of_node_get(dn);
-		}
-
 		phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
 					phy_flags, priv->phy_interface);
 		if (!phydev) {
@@ -386,7 +371,7 @@
 	 * PHY speed which is needed for bcmgenet_mii_config() to configure
 	 * things appropriately.
 	 */
-	ret = bcmgenet_mii_config(dev, true);
+	ret = bcmgenet_mii_config(dev);
 	if (ret) {
 		phy_disconnect(priv->phydev);
 		return ret;
@@ -397,14 +382,11 @@
 	/* The internal PHY has its link interrupts routed to the
 	 * Ethernet MAC ISRs
 	 */
-	if (phy_is_internal(priv->phydev))
+	if (priv->internal_phy)
 		priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
 	else
 		priv->mii_bus->irq[phydev->addr] = PHY_POLL;
 
-	pr_info("attached PHY at address %d [%s]\n",
-		phydev->addr, phydev->drv->name);
-
 	return 0;
 }
 
@@ -490,7 +472,10 @@
 {
 	struct device_node *dn = priv->pdev->dev.of_node;
 	struct device *kdev = &priv->pdev->dev;
+	const char *phy_mode_str = NULL;
+	struct phy_device *phydev = NULL;
 	char *compat;
+	int phy_mode;
 	int ret;
 
 	compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
@@ -513,17 +498,43 @@
 	/* Fetch the PHY phandle */
 	priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
 
+	/* In the case of a fixed PHY, the DT node associated
+	 * to the PHY is the Ethernet MAC DT node.
+	 */
+	if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
+		ret = of_phy_register_fixed_link(dn);
+		if (ret)
+			return ret;
+
+		priv->phy_dn = of_node_get(dn);
+	}
+
 	/* Get the link mode */
-	priv->phy_interface = of_get_phy_mode(dn);
+	phy_mode = of_get_phy_mode(dn);
+	priv->phy_interface = phy_mode;
 
-	return 0;
-}
+	/* We need to specifically look up whether this PHY interface is internal
+	 * or not *before* we even try to probe the PHY driver over MDIO as we
+	 * may have shut down the internal PHY for power saving purposes.
+	 */
+	if (phy_mode < 0) {
+		ret = of_property_read_string(dn, "phy-mode", &phy_mode_str);
+		if (ret < 0) {
+			dev_err(kdev, "invalid PHY mode property\n");
+			return ret;
+		}
 
-static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
-					  struct fixed_phy_status *status)
-{
-	if (dev && dev->phydev && status)
-		status->link = dev->phydev->link;
+		priv->phy_interface = PHY_INTERFACE_MODE_NA;
+		if (!strcasecmp(phy_mode_str, "internal"))
+			priv->internal_phy = true;
+	}
+
+	/* Make sure we initialize MoCA PHYs with a link down */
+	if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
+		phydev = of_phy_find_device(dn);
+		if (phydev)
+			phydev->link = 0;
+	}
 
 	return 0;
 }
@@ -574,18 +585,15 @@
 			.asym_pause = 0,
 		};
 
-		phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
+		phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
 		if (!phydev || IS_ERR(phydev)) {
 			dev_err(kdev, "failed to register fixed PHY device\n");
 			return -ENODEV;
 		}
 
-		if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) {
-			ret = fixed_phy_set_link_update(
-				phydev, bcmgenet_fixed_phy_link_update);
-			if (!ret)
-				phydev->link = 0;
-		}
+		/* Make sure we initialize MoCA PHYs with a link down */
+		phydev->link = 0;
+
 	}
 
 	priv->phydev = phydev;
@@ -615,10 +623,6 @@
 
 	ret = bcmgenet_mii_bus_init(priv);
 	if (ret)
-		goto out_free;
-
-	ret = bcmgenet_mii_probe(dev);
-	if (ret)
 		goto out;
 
 	return 0;
@@ -626,7 +630,6 @@
 out:
 	of_node_put(priv->phy_dn);
 	mdiobus_unregister(priv->mii_bus);
-out_free:
 	kfree(priv->mii_bus->irq);
 	mdiobus_free(priv->mii_bus);
 	return ret;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 73c934c..79789d8 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10757,7 +10757,7 @@
 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
 				sizeof(temperature));
 	spin_unlock_bh(&tp->lock);
-	return sprintf(buf, "%u\n", temperature);
+	return sprintf(buf, "%u\n", temperature * 1000);
 }
 
 
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index bf9eb2e..88c1e1a 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2774,8 +2774,7 @@
 
 
 static const struct macb_config zynqmp_config = {
-	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
-		MACB_CAPS_JUMBO,
+	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
 	.dma_burst_length = 16,
 	.clk_init = macb_clk_init,
 	.init = macb_init,
@@ -2783,8 +2782,7 @@
 };
 
 static const struct macb_config zynq_config = {
-	.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
-		MACB_CAPS_NO_GIGABIT_HALF,
+	.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
 	.dma_burst_length = 16,
 	.clk_init = macb_clk_init,
 	.init = macb_init,
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 1895b6b..6e1faea 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -399,7 +399,7 @@
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE	0x20000000
 #define MACB_CAPS_SG_DISABLED			0x40000000
 #define MACB_CAPS_MACB_IS_GEM			0x80000000
-#define MACB_CAPS_JUMBO				0x00000008
+#define MACB_CAPS_JUMBO				0x00000010
 
 /* Bit manipulation macros */
 #define MACB_BIT(name)					\
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 02e23e6..9b35d14 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -34,6 +34,8 @@
 config	THUNDER_NIC_BGX
 	tristate "Thunder MAC interface driver (BGX)"
 	depends on 64BIT
+	select PHYLIB
+	select MDIO_OCTEON
 	---help---
 	  This driver supports programming and controlling of MAC
 	  interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 8aee250..d3950b2 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -135,6 +135,7 @@
 #define	NICVF_TX_TIMEOUT		(50 * HZ)
 
 struct nicvf_cq_poll {
+	struct  nicvf *nicvf;
 	u8	cq_idx;		/* Completion queue index */
 	struct	napi_struct napi;
 };
@@ -190,10 +191,10 @@
 };
 
 struct nicvf_hw_stats {
-	u64 rx_bytes_ok;
-	u64 rx_ucast_frames_ok;
-	u64 rx_bcast_frames_ok;
-	u64 rx_mcast_frames_ok;
+	u64 rx_bytes;
+	u64 rx_ucast_frames;
+	u64 rx_bcast_frames;
+	u64 rx_mcast_frames;
 	u64 rx_fcs_errors;
 	u64 rx_l2_errors;
 	u64 rx_drop_red;
@@ -204,6 +205,31 @@
 	u64 rx_drop_mcast;
 	u64 rx_drop_l3_bcast;
 	u64 rx_drop_l3_mcast;
+	u64 rx_bgx_truncated_pkts;
+	u64 rx_jabber_errs;
+	u64 rx_fcs_errs;
+	u64 rx_bgx_errs;
+	u64 rx_prel2_errs;
+	u64 rx_l2_hdr_malformed;
+	u64 rx_oversize;
+	u64 rx_undersize;
+	u64 rx_l2_len_mismatch;
+	u64 rx_l2_pclp;
+	u64 rx_ip_ver_errs;
+	u64 rx_ip_csum_errs;
+	u64 rx_ip_hdr_malformed;
+	u64 rx_ip_payload_malformed;
+	u64 rx_ip_ttl_errs;
+	u64 rx_l3_pclp;
+	u64 rx_l4_malformed;
+	u64 rx_l4_csum_errs;
+	u64 rx_udp_len_errs;
+	u64 rx_l4_port_errs;
+	u64 rx_tcp_flag_errs;
+	u64 rx_tcp_offset_errs;
+	u64 rx_l4_pclp;
+	u64 rx_truncated_pkts;
+
 	u64 tx_bytes_ok;
 	u64 tx_ucast_frames_ok;
 	u64 tx_bcast_frames_ok;
@@ -222,6 +248,7 @@
 	u64 rx_frames_1518;
 	u64 rx_frames_jumbo;
 	u64 rx_drops;
+
 	/* Tx */
 	u64 tx_frames_ok;
 	u64 tx_drops;
@@ -231,13 +258,24 @@
 };
 
 struct nicvf {
+	struct nicvf		*pnicvf;
 	struct net_device	*netdev;
 	struct pci_dev		*pdev;
 	u8			vf_id;
 	u8			node;
-	u8			tns_mode;
+	u8			tns_mode:1;
+	u8			sqs_mode:1;
+	u8			loopback_supported:1;
 	u16			mtu;
 	struct queue_set	*qs;
+#define	MAX_SQS_PER_VF_SINGLE_NODE		5
+#define	MAX_SQS_PER_VF				11
+	u8			sqs_id;
+	u8			sqs_count; /* Secondary Qset count */
+	struct nicvf		*snicvf[MAX_SQS_PER_VF];
+	u8			rx_queues;
+	u8			tx_queues;
+	u8			max_queues;
 	void __iomem		*reg_base;
 	bool			link_up;
 	u8			duplex;
@@ -257,7 +295,7 @@
 	u32			cq_coalesce_usecs;
 
 	u32			msg_enable;
-	struct nicvf_hw_stats   stats;
+	struct nicvf_hw_stats   hw_stats;
 	struct nicvf_drv_stats  drv_stats;
 	struct bgx_stats	bgx_stats;
 	struct work_struct	reset_task;
@@ -269,10 +307,9 @@
 	char			irq_name[NIC_VF_MSIX_VECTORS][20];
 	bool			irq_allocated[NIC_VF_MSIX_VECTORS];
 
-	bool			pf_ready_to_rcv_msg;
+	/* VF <-> PF mailbox communication */
 	bool			pf_acked;
 	bool			pf_nacked;
-	bool			bgx_stats_acked;
 	bool			set_mac_pending;
 } ____cacheline_aligned_in_smp;
 
@@ -304,14 +341,21 @@
 #define	NIC_MBOX_MSG_RQ_SW_SYNC		0x0F	/* Flush inflight pkts to RQ */
 #define	NIC_MBOX_MSG_BGX_STATS		0x10	/* Get stats from BGX */
 #define	NIC_MBOX_MSG_BGX_LINK_CHANGE	0x11	/* BGX:LMAC link status */
-#define NIC_MBOX_MSG_CFG_DONE		0x12	/* VF configuration done */
-#define NIC_MBOX_MSG_SHUTDOWN		0x13	/* VF is being shutdown */
+#define	NIC_MBOX_MSG_ALLOC_SQS		0x12	/* Allocate secondary Qset */
+#define	NIC_MBOX_MSG_NICVF_PTR		0x13	/* Send nicvf ptr to PF */
+#define	NIC_MBOX_MSG_PNICVF_PTR		0x14	/* Get primary qset nicvf ptr */
+#define	NIC_MBOX_MSG_SNICVF_PTR		0x15	/* Send sqet nicvf ptr to PVF */
+#define	NIC_MBOX_MSG_LOOPBACK		0x16	/* Set interface in loopback */
+#define	NIC_MBOX_MSG_CFG_DONE		0xF0	/* VF configuration done */
+#define	NIC_MBOX_MSG_SHUTDOWN		0xF1	/* VF is being shutdown */
 
 struct nic_cfg_msg {
 	u8    msg;
 	u8    vf_id;
-	u8    tns_mode;
 	u8    node_id;
+	u8    tns_mode:1;
+	u8    sqs_mode:1;
+	u8    loopback_supported:1;
 	u8    mac_addr[ETH_ALEN];
 };
 
@@ -319,6 +363,7 @@
 struct qs_cfg_msg {
 	u8    msg;
 	u8    num;
+	u8    sqs_count;
 	u64   cfg;
 };
 
@@ -335,6 +380,7 @@
 	u8    msg;
 	u8    qs_num;
 	u8    sq_num;
+	bool  sqs_mode;
 	u64   cfg;
 };
 
@@ -394,6 +440,28 @@
 	u32   speed;
 };
 
+/* Get Extra Qset IDs */
+struct sqs_alloc {
+	u8    msg;
+	u8    vf_id;
+	u8    qs_count;
+};
+
+struct nicvf_ptr {
+	u8    msg;
+	u8    vf_id;
+	bool  sqs_mode;
+	u8    sqs_id;
+	u64   nicvf;
+};
+
+/* Set interface in loopback mode */
+struct set_loopback {
+	u8    msg;
+	u8    vf_id;
+	bool  enable;
+};
+
 /* 128 bit shared memory between PF and each VF */
 union nic_mbx {
 	struct { u8 msg; }	msg;
@@ -408,6 +476,9 @@
 	struct rss_cfg_msg	rss_cfg;
 	struct bgx_stats_msg    bgx_stats;
 	struct bgx_link_status  link_status;
+	struct sqs_alloc        sqs_alloc;
+	struct nicvf_ptr	nicvf;
+	struct set_loopback	lbk;
 };
 
 #define NIC_NODE_ID_MASK	0x03
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 6e0c031..b3a5947 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -28,6 +28,11 @@
 	u8			num_vf_en;      /* No of VF enabled */
 	bool			vf_enabled[MAX_NUM_VFS_SUPPORTED];
 	void __iomem		*reg_base;       /* Register start address */
+	u8			num_sqs_en;	/* Secondary qsets enabled */
+	u64			nicvf[MAX_NUM_VFS_SUPPORTED];
+	u8			vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
+	u8			pqs_vf[MAX_NUM_VFS_SUPPORTED];
+	bool			sqs_used[MAX_NUM_VFS_SUPPORTED];
 	struct pkind_cfg	pkind;
 #define	NIC_SET_VF_LMAC_MAP(bgx, lmac)	(((bgx & 0xF) << 4) | (lmac & 0xF))
 #define	NIC_GET_BGX_FROM_VF_LMAC_MAP(map)	((map >> 4) & 0xF)
@@ -139,14 +144,19 @@
 
 	mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
 
-	bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
-	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+	if (vf < MAX_LMAC) {
+		bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
 
-	mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
-	if (mac)
-		ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
-
+		mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
+		if (mac)
+			ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
+	}
+	mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
 	mbx.nic_cfg.node_id = nic->node;
+
+	mbx.nic_cfg.loopback_supported = vf < MAX_LMAC;
+
 	nic_send_msg_to_vf(nic, vf, &mbx);
 }
 
@@ -329,6 +339,10 @@
 
 	/* Timer config */
 	nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
+
+	/* Enable VLAN ethertype matching and stripping */
+	nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
+		      (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
 }
 
 /* Channel parse index configuration */
@@ -429,6 +443,12 @@
 	qset = cfg->vf_id;
 
 	for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
+		u8 svf = cfg->ind_tbl[idx] >> 3;
+
+		if (svf)
+			qset = nic->vf_sqs[cfg->vf_id][svf - 1];
+		else
+			qset = cfg->vf_id;
 		nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
 			      (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
 		idx++;
@@ -452,19 +472,31 @@
  * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
  * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
  */
-static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
+			       struct sq_cfg_msg *sq)
 {
 	u32 bgx, lmac, chan;
 	u32 tl2, tl3, tl4;
 	u32 rr_quantum;
+	u8 sq_idx = sq->sq_num;
+	u8 pqs_vnic;
 
-	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
-	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+	if (sq->sqs_mode)
+		pqs_vnic = nic->pqs_vf[vnic];
+	else
+		pqs_vnic = vnic;
+
+	bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
+	lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
+
 	/* 24 bytes for FCS, IPG and preamble */
 	rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
 
 	tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
 	tl4 += sq_idx;
+	if (sq->sqs_mode)
+		tl4 += vnic * 8;
+
 	tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
 	nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
 		      ((u64)vnic << NIC_QS_ID_SHIFT) |
@@ -485,6 +517,86 @@
 	nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
 }
 
+/* Send primary nicvf pointer to secondary QS's VF */
+static void nic_send_pnicvf(struct nicpf *nic, int sqs)
+{
+	union nic_mbx mbx = {};
+
+	mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
+	mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]];
+	nic_send_msg_to_vf(nic, sqs, &mbx);
+}
+
+/* Send SQS's nicvf pointer to primary QS's VF */
+static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf)
+{
+	union nic_mbx mbx = {};
+	int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id];
+
+	mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
+	mbx.nicvf.sqs_id = nicvf->sqs_id;
+	mbx.nicvf.nicvf = nic->nicvf[sqs_id];
+	nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx);
+}
+
+/* Find next available Qset that can be assigned as a
+ * secondary Qset to a VF.
+ */
+static int nic_nxt_avail_sqs(struct nicpf *nic)
+{
+	int sqs;
+
+	for (sqs = 0; sqs < nic->num_sqs_en; sqs++) {
+		if (!nic->sqs_used[sqs])
+			nic->sqs_used[sqs] = true;
+		else
+			continue;
+		return sqs + nic->num_vf_en;
+	}
+	return -1;
+}
+
+/* Allocate additional Qsets for requested VF */
+static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs)
+{
+	union nic_mbx mbx = {};
+	int idx, alloc_qs = 0;
+	int sqs_id;
+
+	if (!nic->num_sqs_en)
+		goto send_mbox;
+
+	for (idx = 0; idx < sqs->qs_count; idx++) {
+		sqs_id = nic_nxt_avail_sqs(nic);
+		if (sqs_id < 0)
+			break;
+		nic->vf_sqs[sqs->vf_id][idx] = sqs_id;
+		nic->pqs_vf[sqs_id] = sqs->vf_id;
+		alloc_qs++;
+	}
+
+send_mbox:
+	mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
+	mbx.sqs_alloc.vf_id = sqs->vf_id;
+	mbx.sqs_alloc.qs_count = alloc_qs;
+	nic_send_msg_to_vf(nic, sqs->vf_id, &mbx);
+}
+
+static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
+{
+	int bgx_idx, lmac_idx;
+
+	if (lbk->vf_id > MAX_LMAC)
+		return -1;
+
+	bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+	lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+
+	bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
+
+	return 0;
+}
+
 /* Interrupt handler to handle mailbox messages from VFs */
 static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
 {
@@ -492,6 +604,7 @@
 	u64 *mbx_data;
 	u64 mbx_addr;
 	u64 reg_addr;
+	u64 cfg;
 	int bgx, lmac;
 	int i;
 	int ret = 0;
@@ -512,15 +625,24 @@
 	switch (mbx.msg.msg) {
 	case NIC_MBOX_MSG_READY:
 		nic_mbx_send_ready(nic, vf);
-		nic->link[vf] = 0;
-		nic->duplex[vf] = 0;
-		nic->speed[vf] = 0;
+		if (vf < MAX_LMAC) {
+			nic->link[vf] = 0;
+			nic->duplex[vf] = 0;
+			nic->speed[vf] = 0;
+		}
 		ret = 1;
 		break;
 	case NIC_MBOX_MSG_QS_CFG:
 		reg_addr = NIC_PF_QSET_0_127_CFG |
 			   (mbx.qs.num << NIC_QS_ID_SHIFT);
-		nic_reg_write(nic, reg_addr, mbx.qs.cfg);
+		cfg = mbx.qs.cfg;
+		/* Check if its a secondary Qset */
+		if (vf >= nic->num_vf_en) {
+			cfg = cfg & (~0x7FULL);
+			/* Assign this Qset to primary Qset's VF */
+			cfg |= nic->pqs_vf[vf];
+		}
+		nic_reg_write(nic, reg_addr, cfg);
 		break;
 	case NIC_MBOX_MSG_RQ_CFG:
 		reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
@@ -548,9 +670,11 @@
 			   (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
 			   (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
 		nic_reg_write(nic, reg_addr, mbx.sq.cfg);
-		nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num);
+		nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
 		break;
 	case NIC_MBOX_MSG_SET_MAC:
+		if (vf >= nic->num_vf_en)
+			break;
 		lmac = mbx.mac.vf_id;
 		bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
 		lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
@@ -577,10 +701,28 @@
 	case NIC_MBOX_MSG_SHUTDOWN:
 		/* First msg in VF teardown sequence */
 		nic->vf_enabled[vf] = false;
+		if (vf >= nic->num_vf_en)
+			nic->sqs_used[vf - nic->num_vf_en] = false;
+		nic->pqs_vf[vf] = 0;
 		break;
+	case NIC_MBOX_MSG_ALLOC_SQS:
+		nic_alloc_sqs(nic, &mbx.sqs_alloc);
+		goto unlock;
+	case NIC_MBOX_MSG_NICVF_PTR:
+		nic->nicvf[vf] = mbx.nicvf.nicvf;
+		break;
+	case NIC_MBOX_MSG_PNICVF_PTR:
+		nic_send_pnicvf(nic, vf);
+		goto unlock;
+	case NIC_MBOX_MSG_SNICVF_PTR:
+		nic_send_snicvf(nic, &mbx.nicvf);
+		goto unlock;
 	case NIC_MBOX_MSG_BGX_STATS:
 		nic_get_bgx_stats(nic, &mbx.bgx_stats);
 		goto unlock;
+	case NIC_MBOX_MSG_LOOPBACK:
+		ret = nic_config_loopback(nic, &mbx.lbk);
+		break;
 	default:
 		dev_err(&nic->pdev->dev,
 			"Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
@@ -606,8 +748,7 @@
 		if (intr & (1ULL << vf)) {
 			dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
 				vf + (mbx * vf_per_mbx_reg));
-			if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en)
-				break;
+
 			nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
 			nic_clear_mbx_intr(nic, vf, mbx);
 		}
@@ -713,9 +854,24 @@
 	nic_disable_msix(nic);
 }
 
+static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
+{
+	int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
+	u16 total_vf;
+
+	/* Check if its a multi-node environment */
+	if (nr_node_ids > 1)
+		sqs_per_vf = MAX_SQS_PER_VF;
+
+	pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV);
+	pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf);
+	return min(total_vf - vf_en, vf_en * sqs_per_vf);
+}
+
 static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
 {
 	int pos = 0;
+	int vf_en;
 	int err;
 	u16 total_vf_cnt;
 
@@ -732,16 +888,20 @@
 	if (!total_vf_cnt)
 		return 0;
 
-	err = pci_enable_sriov(pdev, nic->num_vf_en);
+	vf_en = nic->num_vf_en;
+	nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en);
+	vf_en += nic->num_sqs_en;
+
+	err = pci_enable_sriov(pdev, vf_en);
 	if (err) {
 		dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
-			nic->num_vf_en);
+			vf_en);
 		nic->num_vf_en = 0;
 		return err;
 	}
 
 	dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
-		 nic->num_vf_en);
+		 vf_en);
 
 	nic->flags |= NIC_SRIOV_ENABLED;
 	return 0;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index a4228e6..af54c10 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -35,10 +35,10 @@
 }
 
 static const struct nicvf_stat nicvf_hw_stats[] = {
-	NICVF_HW_STAT(rx_bytes_ok),
-	NICVF_HW_STAT(rx_ucast_frames_ok),
-	NICVF_HW_STAT(rx_bcast_frames_ok),
-	NICVF_HW_STAT(rx_mcast_frames_ok),
+	NICVF_HW_STAT(rx_bytes),
+	NICVF_HW_STAT(rx_ucast_frames),
+	NICVF_HW_STAT(rx_bcast_frames),
+	NICVF_HW_STAT(rx_mcast_frames),
 	NICVF_HW_STAT(rx_fcs_errors),
 	NICVF_HW_STAT(rx_l2_errors),
 	NICVF_HW_STAT(rx_drop_red),
@@ -49,6 +49,30 @@
 	NICVF_HW_STAT(rx_drop_mcast),
 	NICVF_HW_STAT(rx_drop_l3_bcast),
 	NICVF_HW_STAT(rx_drop_l3_mcast),
+	NICVF_HW_STAT(rx_bgx_truncated_pkts),
+	NICVF_HW_STAT(rx_jabber_errs),
+	NICVF_HW_STAT(rx_fcs_errs),
+	NICVF_HW_STAT(rx_bgx_errs),
+	NICVF_HW_STAT(rx_prel2_errs),
+	NICVF_HW_STAT(rx_l2_hdr_malformed),
+	NICVF_HW_STAT(rx_oversize),
+	NICVF_HW_STAT(rx_undersize),
+	NICVF_HW_STAT(rx_l2_len_mismatch),
+	NICVF_HW_STAT(rx_l2_pclp),
+	NICVF_HW_STAT(rx_ip_ver_errs),
+	NICVF_HW_STAT(rx_ip_csum_errs),
+	NICVF_HW_STAT(rx_ip_hdr_malformed),
+	NICVF_HW_STAT(rx_ip_payload_malformed),
+	NICVF_HW_STAT(rx_ip_ttl_errs),
+	NICVF_HW_STAT(rx_l3_pclp),
+	NICVF_HW_STAT(rx_l4_malformed),
+	NICVF_HW_STAT(rx_l4_csum_errs),
+	NICVF_HW_STAT(rx_udp_len_errs),
+	NICVF_HW_STAT(rx_l4_port_errs),
+	NICVF_HW_STAT(rx_tcp_flag_errs),
+	NICVF_HW_STAT(rx_tcp_offset_errs),
+	NICVF_HW_STAT(rx_l4_pclp),
+	NICVF_HW_STAT(rx_truncated_pkts),
 	NICVF_HW_STAT(tx_bytes_ok),
 	NICVF_HW_STAT(tx_ucast_frames_ok),
 	NICVF_HW_STAT(tx_bcast_frames_ok),
@@ -125,10 +149,33 @@
 	nic->msg_enable = lvl;
 }
 
+static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset)
+{
+	int stats, qidx;
+	int start_qidx = qset * MAX_RCV_QUEUES_PER_QS;
+
+	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
+		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+			sprintf(*data, "rxq%d: %s", qidx + start_qidx,
+				nicvf_queue_stats[stats].name);
+			*data += ETH_GSTRING_LEN;
+		}
+	}
+
+	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
+		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+			sprintf(*data, "txq%d: %s", qidx + start_qidx,
+				nicvf_queue_stats[stats].name);
+			*data += ETH_GSTRING_LEN;
+		}
+	}
+}
+
 static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
 {
 	struct nicvf *nic = netdev_priv(netdev);
-	int stats, qidx;
+	int stats;
+	int sqs;
 
 	if (sset != ETH_SS_STATS)
 		return;
@@ -143,20 +190,12 @@
 		data += ETH_GSTRING_LEN;
 	}
 
-	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
-		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
-			sprintf(data, "rxq%d: %s", qidx,
-				nicvf_queue_stats[stats].name);
-			data += ETH_GSTRING_LEN;
-		}
-	}
+	nicvf_get_qset_strings(nic, &data, 0);
 
-	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
-		for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
-			sprintf(data, "txq%d: %s", qidx,
-				nicvf_queue_stats[stats].name);
-			data += ETH_GSTRING_LEN;
-		}
+	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+		if (!nic->snicvf[sqs])
+			continue;
+		nicvf_get_qset_strings(nic->snicvf[sqs], &data, sqs + 1);
 	}
 
 	for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
@@ -173,21 +212,58 @@
 static int nicvf_get_sset_count(struct net_device *netdev, int sset)
 {
 	struct nicvf *nic = netdev_priv(netdev);
+	int qstats_count;
+	int sqs;
 
 	if (sset != ETH_SS_STATS)
 		return -EINVAL;
 
+	qstats_count = nicvf_n_queue_stats *
+		       (nic->qs->rq_cnt + nic->qs->sq_cnt);
+	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+		struct nicvf *snic;
+
+		snic = nic->snicvf[sqs];
+		if (!snic)
+			continue;
+		qstats_count += nicvf_n_queue_stats *
+				(snic->qs->rq_cnt + snic->qs->sq_cnt);
+	}
+
 	return nicvf_n_hw_stats + nicvf_n_drv_stats +
-		(nicvf_n_queue_stats *
-		 (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
+		qstats_count +
 		BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
 }
 
+static void nicvf_get_qset_stats(struct nicvf *nic,
+				 struct ethtool_stats *stats, u64 **data)
+{
+	int stat, qidx;
+
+	if (!nic)
+		return;
+
+	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
+		nicvf_update_rq_stats(nic, qidx);
+		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+			*((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats)
+					[nicvf_queue_stats[stat].index];
+	}
+
+	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
+		nicvf_update_sq_stats(nic, qidx);
+		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+			*((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats)
+					[nicvf_queue_stats[stat].index];
+	}
+}
+
 static void nicvf_get_ethtool_stats(struct net_device *netdev,
 				    struct ethtool_stats *stats, u64 *data)
 {
 	struct nicvf *nic = netdev_priv(netdev);
-	int stat, qidx;
+	int stat;
+	int sqs;
 
 	nicvf_update_stats(nic);
 
@@ -195,22 +271,18 @@
 	nicvf_update_lmac_stats(nic);
 
 	for (stat = 0; stat < nicvf_n_hw_stats; stat++)
-		*(data++) = ((u64 *)&nic->stats)
+		*(data++) = ((u64 *)&nic->hw_stats)
 				[nicvf_hw_stats[stat].index];
 	for (stat = 0; stat < nicvf_n_drv_stats; stat++)
 		*(data++) = ((u64 *)&nic->drv_stats)
 				[nicvf_drv_stats[stat].index];
 
-	for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
-		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
-			*(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
-					[nicvf_queue_stats[stat].index];
-	}
+	nicvf_get_qset_stats(nic, stats, &data);
 
-	for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
-		for (stat = 0; stat < nicvf_n_queue_stats; stat++)
-			*(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
-					[nicvf_queue_stats[stat].index];
+	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+		if (!nic->snicvf[sqs])
+			continue;
+		nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data);
 	}
 
 	for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
@@ -369,7 +441,7 @@
 
 	switch (info->cmd) {
 	case ETHTOOL_GRXRINGS:
-		info->data = nic->qs->rq_cnt;
+		info->data = nic->rx_queues;
 		ret = 0;
 		break;
 	case ETHTOOL_GRXFH:
@@ -501,17 +573,15 @@
 	struct nicvf_rss_info *rss = &nic->rss_info;
 	int idx;
 
-	if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
-		rss->enable = false;
-		rss->hash_bits = 0;
-		return -EIO;
-	}
-
-	/* We do not allow change in unsupported parameters */
 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
 		return -EOPNOTSUPP;
 
-	rss->enable = true;
+	if (!rss->enable) {
+		netdev_err(nic->netdev,
+			   "RSS is disabled, cannot change settings\n");
+		return -EIO;
+	}
+
 	if (indir) {
 		for (idx = 0; idx < rss->rss_size; idx++)
 			rss->ind_tbl[idx] = indir[idx];
@@ -534,11 +604,11 @@
 
 	memset(channel, 0, sizeof(*channel));
 
-	channel->max_rx = MAX_RCV_QUEUES_PER_QS;
-	channel->max_tx = MAX_SND_QUEUES_PER_QS;
+	channel->max_rx = nic->max_queues;
+	channel->max_tx = nic->max_queues;
 
-	channel->rx_count = nic->qs->rq_cnt;
-	channel->tx_count = nic->qs->sq_cnt;
+	channel->rx_count = nic->rx_queues;
+	channel->tx_count = nic->tx_queues;
 }
 
 /* Set no of Tx, Rx queues to be used */
@@ -548,22 +618,34 @@
 	struct nicvf *nic = netdev_priv(dev);
 	int err = 0;
 	bool if_up = netif_running(dev);
+	int cqcount;
 
 	if (!channel->rx_count || !channel->tx_count)
 		return -EINVAL;
-	if (channel->rx_count > MAX_RCV_QUEUES_PER_QS)
+	if (channel->rx_count > nic->max_queues)
 		return -EINVAL;
-	if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
+	if (channel->tx_count > nic->max_queues)
 		return -EINVAL;
 
 	if (if_up)
 		nicvf_stop(dev);
 
-	nic->qs->rq_cnt = channel->rx_count;
-	nic->qs->sq_cnt = channel->tx_count;
+	cqcount = max(channel->rx_count, channel->tx_count);
+
+	if (cqcount > MAX_CMP_QUEUES_PER_QS) {
+		nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
+		nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
+	} else {
+		nic->sqs_count = 0;
+	}
+
+	nic->qs->rq_cnt = min_t(u32, channel->rx_count, MAX_RCV_QUEUES_PER_QS);
+	nic->qs->sq_cnt = min_t(u32, channel->tx_count, MAX_SND_QUEUES_PER_QS);
 	nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
 
-	err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt);
+	nic->rx_queues = channel->rx_count;
+	nic->tx_queues = channel->tx_count;
+	err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
 	if (err)
 		return err;
 
@@ -571,7 +653,7 @@
 		nicvf_open(dev);
 
 	netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
-		    nic->qs->sq_cnt, nic->qs->rq_cnt);
+		    nic->tx_queues, nic->rx_queues);
 
 	return err;
 }
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 3b90afb..b63e579 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -10,6 +10,7 @@
 #include <linux/interrupt.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
+#include <linux/if_vlan.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/log2.h>
@@ -50,6 +51,14 @@
 MODULE_PARM_DESC(cpi_alg,
 		 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
 
+static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
+{
+	if (nic->sqs_mode)
+		return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
+	else
+		return qidx;
+}
+
 static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
 					  struct sk_buff *skb)
 {
@@ -105,7 +114,6 @@
 }
 
 /* VF -> PF mailbox communication */
-
 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
 {
 	u64 *msg = (u64 *)mbx;
@@ -147,26 +155,15 @@
 */
 static int nicvf_check_pf_ready(struct nicvf *nic)
 {
-	int timeout = 5000, sleep = 20;
 	union nic_mbx mbx = {};
 
 	mbx.msg.msg = NIC_MBOX_MSG_READY;
-
-	nic->pf_ready_to_rcv_msg = false;
-
-	nicvf_write_to_mbx(nic, &mbx);
-
-	while (!nic->pf_ready_to_rcv_msg) {
-		msleep(sleep);
-		if (nic->pf_ready_to_rcv_msg)
-			break;
-		timeout -= sleep;
-		if (!timeout) {
-			netdev_err(nic->netdev,
-				   "PF didn't respond to READY msg\n");
-			return 0;
-		}
+	if (nicvf_send_msg_to_pf(nic, &mbx)) {
+		netdev_err(nic->netdev,
+			   "PF didn't respond to READY msg\n");
+		return 0;
 	}
+
 	return 1;
 }
 
@@ -197,13 +194,15 @@
 	netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
 	switch (mbx.msg.msg) {
 	case NIC_MBOX_MSG_READY:
-		nic->pf_ready_to_rcv_msg = true;
+		nic->pf_acked = true;
 		nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
 		nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
 		nic->node = mbx.nic_cfg.node_id;
 		if (!nic->set_mac_pending)
 			ether_addr_copy(nic->netdev->dev_addr,
 					mbx.nic_cfg.mac_addr);
+		nic->sqs_mode = mbx.nic_cfg.sqs_mode;
+		nic->loopback_supported = mbx.nic_cfg.loopback_supported;
 		nic->link_up = false;
 		nic->duplex = 0;
 		nic->speed = 0;
@@ -221,7 +220,6 @@
 	case NIC_MBOX_MSG_BGX_STATS:
 		nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
 		nic->pf_acked = true;
-		nic->bgx_stats_acked = true;
 		break;
 	case NIC_MBOX_MSG_BGX_LINK_CHANGE:
 		nic->pf_acked = true;
@@ -242,6 +240,26 @@
 			netif_tx_stop_all_queues(nic->netdev);
 		}
 		break;
+	case NIC_MBOX_MSG_ALLOC_SQS:
+		nic->sqs_count = mbx.sqs_alloc.qs_count;
+		nic->pf_acked = true;
+		break;
+	case NIC_MBOX_MSG_SNICVF_PTR:
+		/* Primary VF: make note of secondary VF's pointer
+		 * to be used while packet transmission.
+		 */
+		nic->snicvf[mbx.nicvf.sqs_id] =
+			(struct nicvf *)mbx.nicvf.nicvf;
+		nic->pf_acked = true;
+		break;
+	case NIC_MBOX_MSG_PNICVF_PTR:
+		/* Secondary VF/Qset: make note of primary VF's pointer
+		 * to be used while packet reception, to handover packet
+		 * to primary VF's netdev.
+		 */
+		nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
+		nic->pf_acked = true;
+		break;
 	default:
 		netdev_err(nic->netdev,
 			   "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
@@ -326,7 +344,7 @@
 
 	nicvf_get_rss_size(nic);
 
-	if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) {
+	if (cpi_alg != CPI_ALG_NONE) {
 		rss->enable = false;
 		rss->hash_bits = 0;
 		return 0;
@@ -350,11 +368,100 @@
 
 	for (idx = 0; idx < rss->rss_size; idx++)
 		rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
-							       nic->qs->rq_cnt);
+							       nic->rx_queues);
 	nicvf_config_rss(nic);
 	return 1;
 }
 
+/* Request PF to allocate additional Qsets */
+static void nicvf_request_sqs(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+	int sqs;
+	int sqs_count = nic->sqs_count;
+	int rx_queues = 0, tx_queues = 0;
+
+	/* Only primary VF should request */
+	if (nic->sqs_mode ||  !nic->sqs_count)
+		return;
+
+	mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
+	mbx.sqs_alloc.vf_id = nic->vf_id;
+	mbx.sqs_alloc.qs_count = nic->sqs_count;
+	if (nicvf_send_msg_to_pf(nic, &mbx)) {
+		/* No response from PF */
+		nic->sqs_count = 0;
+		return;
+	}
+
+	/* Return if no Secondary Qsets available */
+	if (!nic->sqs_count)
+		return;
+
+	if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
+		rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
+	if (nic->tx_queues > MAX_SND_QUEUES_PER_QS)
+		tx_queues = nic->tx_queues - MAX_SND_QUEUES_PER_QS;
+
+	/* Set no of Rx/Tx queues in each of the SQsets */
+	for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+		mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
+		mbx.nicvf.vf_id = nic->vf_id;
+		mbx.nicvf.sqs_id = sqs;
+		nicvf_send_msg_to_pf(nic, &mbx);
+
+		nic->snicvf[sqs]->sqs_id = sqs;
+		if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
+			nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
+			rx_queues -= MAX_RCV_QUEUES_PER_QS;
+		} else {
+			nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
+			rx_queues = 0;
+		}
+
+		if (tx_queues > MAX_SND_QUEUES_PER_QS) {
+			nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
+			tx_queues -= MAX_SND_QUEUES_PER_QS;
+		} else {
+			nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
+			tx_queues = 0;
+		}
+
+		nic->snicvf[sqs]->qs->cq_cnt =
+		max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
+
+		/* Initialize secondary Qset's queues and its interrupts */
+		nicvf_open(nic->snicvf[sqs]->netdev);
+	}
+
+	/* Update stack with actual Rx/Tx queue count allocated */
+	if (sqs_count != nic->sqs_count)
+		nicvf_set_real_num_queues(nic->netdev,
+					  nic->tx_queues, nic->rx_queues);
+}
+
+/* Send this Qset's nicvf pointer to PF.
+ * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
+ * so that packets received by these Qsets can use primary VF's netdev
+ */
+static void nicvf_send_vf_struct(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+
+	mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
+	mbx.nicvf.sqs_mode = nic->sqs_mode;
+	mbx.nicvf.nicvf = (u64)nic;
+	nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_get_primary_vf_struct(struct nicvf *nic)
+{
+	union nic_mbx mbx = {};
+
+	mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
+	nicvf_send_msg_to_pf(nic, &mbx);
+}
+
 int nicvf_set_real_num_queues(struct net_device *netdev,
 			      int tx_queues, int rx_queues)
 {
@@ -429,6 +536,34 @@
 	}
 }
 
+static inline void nicvf_set_rxhash(struct net_device *netdev,
+				    struct cqe_rx_t *cqe_rx,
+				    struct sk_buff *skb)
+{
+	u8 hash_type;
+	u32 hash;
+
+	if (!(netdev->features & NETIF_F_RXHASH))
+		return;
+
+	switch (cqe_rx->rss_alg) {
+	case RSS_ALG_TCP_IP:
+	case RSS_ALG_UDP_IP:
+		hash_type = PKT_HASH_TYPE_L4;
+		hash = cqe_rx->rss_tag;
+		break;
+	case RSS_ALG_IP:
+		hash_type = PKT_HASH_TYPE_L3;
+		hash = cqe_rx->rss_tag;
+		break;
+	default:
+		hash_type = PKT_HASH_TYPE_NONE;
+		hash = 0;
+	}
+
+	skb_set_hash(skb, hash, hash_type);
+}
+
 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 				  struct napi_struct *napi,
 				  struct cmp_queue *cq,
@@ -437,6 +572,15 @@
 	struct sk_buff *skb;
 	struct nicvf *nic = netdev_priv(netdev);
 	int err = 0;
+	int rq_idx;
+
+	rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
+
+	if (nic->sqs_mode) {
+		/* Use primary VF's 'nicvf' struct */
+		nic = nic->pnicvf;
+		netdev = nic->netdev;
+	}
 
 	/* Check for errors */
 	err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
@@ -456,9 +600,17 @@
 			       skb->data, skb->len, true);
 	}
 
+	/* If error packet, drop it here */
+	if (err) {
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
 	nicvf_set_rx_frame_cnt(nic, skb);
 
-	skb_record_rx_queue(skb, cqe_rx->rq_idx);
+	nicvf_set_rxhash(netdev, cqe_rx, skb);
+
+	skb_record_rx_queue(skb, rq_idx);
 	if (netdev->hw_features & NETIF_F_RXCSUM) {
 		/* HW by default verifies TCP/UDP/SCTP checksums */
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -468,6 +620,11 @@
 
 	skb->protocol = eth_type_trans(skb, netdev);
 
+	/* Check for stripped VLAN */
+	if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+				       ntohs((__force __be16)cqe_rx->vlan_tci));
+
 	if (napi && (netdev->features & NETIF_F_GRO))
 		napi_gro_receive(napi, skb);
 	else
@@ -549,8 +706,11 @@
 done:
 	/* Wakeup TXQ if its stopped earlier due to SQ full */
 	if (tx_done) {
-		txq = netdev_get_tx_queue(netdev, cq_idx);
-		if (netif_tx_queue_stopped(txq)) {
+		netdev = nic->pnicvf->netdev;
+		txq = netdev_get_tx_queue(netdev,
+					  nicvf_netdev_qidx(nic, cq_idx));
+		nic = nic->pnicvf;
+		if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
 			netif_tx_start_queue(txq);
 			nic->drv_stats.txq_wake++;
 			if (netif_msg_tx_err(nic))
@@ -624,11 +784,20 @@
 	nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
 }
 
+static void nicvf_dump_intr_status(struct nicvf *nic)
+{
+	if (netif_msg_intr(nic))
+		netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
+			    nic->netdev->name, nicvf_reg_read(nic, NIC_VF_INT));
+}
+
 static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
 {
 	struct nicvf *nic = (struct nicvf *)nicvf_irq;
 	u64 intr;
 
+	nicvf_dump_intr_status(nic);
+
 	intr = nicvf_reg_read(nic, NIC_VF_INT);
 	/* Check for spurious interrupt */
 	if (!(intr & NICVF_INTR_MBOX_MASK))
@@ -639,59 +808,58 @@
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq)
+static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
 {
-	u64 qidx, intr, clear_intr = 0;
-	u64 cq_intr, rbdr_intr, qs_err_intr;
+	struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
+	struct nicvf *nic = cq_poll->nicvf;
+	int qidx = cq_poll->cq_idx;
+
+	nicvf_dump_intr_status(nic);
+
+	/* Disable interrupts */
+	nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+
+	/* Schedule NAPI */
+	napi_schedule(&cq_poll->napi);
+
+	/* Clear interrupt */
+	nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
+{
 	struct nicvf *nic = (struct nicvf *)nicvf_irq;
-	struct queue_set *qs = nic->qs;
-	struct nicvf_cq_poll *cq_poll = NULL;
+	u8 qidx;
 
-	intr = nicvf_reg_read(nic, NIC_VF_INT);
-	if (netif_msg_intr(nic))
-		netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
-			    nic->netdev->name, intr);
 
-	qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
-	if (qs_err_intr) {
-		/* Disable Qset err interrupt and schedule softirq */
-		nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
-		tasklet_hi_schedule(&nic->qs_err_task);
-		clear_intr |= qs_err_intr;
-	}
+	nicvf_dump_intr_status(nic);
 
-	/* Disable interrupts and start polling */
-	cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
-	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
-		if (!(cq_intr & (1 << qidx)))
+	/* Disable RBDR interrupt and schedule softirq */
+	for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
+		if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
 			continue;
-		if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
-			continue;
-
-		nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
-		clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT);
-
-		cq_poll = nic->napi[qidx];
-		/* Schedule NAPI */
-		if (cq_poll)
-			napi_schedule(&cq_poll->napi);
+		nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+		tasklet_hi_schedule(&nic->rbdr_task);
+		/* Clear interrupt */
+		nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
 	}
 
-	/* Handle RBDR interrupts */
-	rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
-	if (rbdr_intr) {
-		/* Disable RBDR interrupt and schedule softirq */
-		for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
-			if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
-				continue;
-			nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
-			tasklet_hi_schedule(&nic->rbdr_task);
-			clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
-		}
-	}
+	return IRQ_HANDLED;
+}
 
-	/* Clear interrupts */
-	nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
+static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
+{
+	struct nicvf *nic = (struct nicvf *)nicvf_irq;
+
+	nicvf_dump_intr_status(nic);
+
+	/* Disable Qset err interrupt and schedule softirq */
+	nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+	tasklet_hi_schedule(&nic->qs_err_task);
+	nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
+
 	return IRQ_HANDLED;
 }
 
@@ -725,7 +893,7 @@
 
 static int nicvf_register_interrupts(struct nicvf *nic)
 {
-	int irq, free, ret = 0;
+	int irq, ret = 0;
 	int vector;
 
 	for_each_cq_irq(irq)
@@ -740,44 +908,42 @@
 		sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
 			nic->vf_id, irq - NICVF_INTR_ID_RBDR);
 
-	/* Register all interrupts except mailbox */
-	for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) {
+	/* Register CQ interrupts */
+	for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
 		vector = nic->msix_entries[irq].vector;
 		ret = request_irq(vector, nicvf_intr_handler,
-				  0, nic->irq_name[irq], nic);
+				  0, nic->irq_name[irq], nic->napi[irq]);
 		if (ret)
-			break;
+			goto err;
 		nic->irq_allocated[irq] = true;
 	}
 
-	for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) {
+	/* Register RBDR interrupt */
+	for (irq = NICVF_INTR_ID_RBDR;
+	     irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
 		vector = nic->msix_entries[irq].vector;
-		ret = request_irq(vector, nicvf_intr_handler,
+		ret = request_irq(vector, nicvf_rbdr_intr_handler,
 				  0, nic->irq_name[irq], nic);
 		if (ret)
-			break;
+			goto err;
 		nic->irq_allocated[irq] = true;
 	}
 
+	/* Register QS error interrupt */
 	sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
 		"NICVF%d Qset error", nic->vf_id);
-	if (!ret) {
-		vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector;
-		irq = NICVF_INTR_ID_QS_ERR;
-		ret = request_irq(vector, nicvf_intr_handler,
-				  0, nic->irq_name[irq], nic);
-		if (!ret)
-			nic->irq_allocated[irq] = true;
-	}
+	irq = NICVF_INTR_ID_QS_ERR;
+	ret = request_irq(nic->msix_entries[irq].vector,
+			  nicvf_qs_err_intr_handler,
+			  0, nic->irq_name[irq], nic);
+	if (!ret)
+		nic->irq_allocated[irq] = true;
 
-	if (ret) {
-		netdev_err(nic->netdev, "Request irq failed\n");
-		for (free = 0; free < irq; free++)
-			free_irq(nic->msix_entries[free].vector, nic);
-		return ret;
-	}
+err:
+	if (ret)
+		netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
 
-	return 0;
+	return ret;
 }
 
 static void nicvf_unregister_interrupts(struct nicvf *nic)
@@ -786,8 +952,14 @@
 
 	/* Free registered interrupts */
 	for (irq = 0; irq < nic->num_vec; irq++) {
-		if (nic->irq_allocated[irq])
+		if (!nic->irq_allocated[irq])
+			continue;
+
+		if (irq < NICVF_INTR_ID_SQ)
+			free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
+		else
 			free_irq(nic->msix_entries[irq].vector, nic);
+
 		nic->irq_allocated[irq] = false;
 	}
 
@@ -852,13 +1024,26 @@
 			netdev_warn(netdev,
 				    "%s: Transmit ring full, stopping SQ%d\n",
 				    netdev->name, qid);
-
 		return NETDEV_TX_BUSY;
 	}
 
 	return NETDEV_TX_OK;
 }
 
+static inline void nicvf_free_cq_poll(struct nicvf *nic)
+{
+	struct nicvf_cq_poll *cq_poll;
+	int qidx;
+
+	for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
+		cq_poll = nic->napi[qidx];
+		if (!cq_poll)
+			continue;
+		nic->napi[qidx] = NULL;
+		kfree(cq_poll);
+	}
+}
+
 int nicvf_stop(struct net_device *netdev)
 {
 	int irq, qidx;
@@ -871,6 +1056,17 @@
 	nicvf_send_msg_to_pf(nic, &mbx);
 
 	netif_carrier_off(netdev);
+	netif_tx_stop_all_queues(nic->netdev);
+
+	/* Teardown secondary qsets first */
+	if (!nic->sqs_mode) {
+		for (qidx = 0; qidx < nic->sqs_count; qidx++) {
+			if (!nic->snicvf[qidx])
+				continue;
+			nicvf_stop(nic->snicvf[qidx]->netdev);
+			nic->snicvf[qidx] = NULL;
+		}
+	}
 
 	/* Disable RBDR & QS error interrupts */
 	for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
@@ -893,7 +1089,6 @@
 		cq_poll = nic->napi[qidx];
 		if (!cq_poll)
 			continue;
-		nic->napi[qidx] = NULL;
 		napi_synchronize(&cq_poll->napi);
 		/* CQ intr is enabled while napi_complete,
 		 * so disable it now
@@ -902,7 +1097,6 @@
 		nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
 		napi_disable(&cq_poll->napi);
 		netif_napi_del(&cq_poll->napi);
-		kfree(cq_poll);
 	}
 
 	netif_tx_disable(netdev);
@@ -918,6 +1112,12 @@
 
 	nicvf_unregister_interrupts(nic);
 
+	nicvf_free_cq_poll(nic);
+
+	/* Clear multiqset info */
+	nic->pnicvf = nic;
+	nic->sqs_count = 0;
+
 	return 0;
 }
 
@@ -944,6 +1144,7 @@
 			goto napi_del;
 		}
 		cq_poll->cq_idx = qidx;
+		cq_poll->nicvf = nic;
 		netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
 			       NAPI_POLL_WEIGHT);
 		napi_enable(&cq_poll->napi);
@@ -972,10 +1173,16 @@
 
 	/* Configure CPI alorithm */
 	nic->cpi_alg = cpi_alg;
-	nicvf_config_cpi(nic);
+	if (!nic->sqs_mode)
+		nicvf_config_cpi(nic);
+
+	nicvf_request_sqs(nic);
+	if (nic->sqs_mode)
+		nicvf_get_primary_vf_struct(nic);
 
 	/* Configure receive side scaling */
-	nicvf_rss_init(nic);
+	if (!nic->sqs_mode)
+		nicvf_rss_init(nic);
 
 	err = nicvf_register_interrupts(nic);
 	if (err)
@@ -1011,6 +1218,8 @@
 cleanup:
 	nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
 	nicvf_unregister_interrupts(nic);
+	tasklet_kill(&nic->qs_err_task);
+	tasklet_kill(&nic->rbdr_task);
 napi_del:
 	for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
 		cq_poll = nic->napi[qidx];
@@ -1018,9 +1227,8 @@
 			continue;
 		napi_disable(&cq_poll->napi);
 		netif_napi_del(&cq_poll->napi);
-		kfree(cq_poll);
-		nic->napi[qidx] = NULL;
 	}
+	nicvf_free_cq_poll(nic);
 	return err;
 }
 
@@ -1077,7 +1285,6 @@
 {
 	int stat = 0;
 	union nic_mbx mbx = {};
-	int timeout;
 
 	if (!netif_running(nic->netdev))
 		return;
@@ -1087,14 +1294,9 @@
 	/* Rx stats */
 	mbx.bgx_stats.rx = 1;
 	while (stat < BGX_RX_STATS_COUNT) {
-		nic->bgx_stats_acked = 0;
 		mbx.bgx_stats.idx = stat;
-		nicvf_send_msg_to_pf(nic, &mbx);
-		timeout = 0;
-		while ((!nic->bgx_stats_acked) && (timeout < 10)) {
-			msleep(2);
-			timeout++;
-		}
+		if (nicvf_send_msg_to_pf(nic, &mbx))
+			return;
 		stat++;
 	}
 
@@ -1103,14 +1305,9 @@
 	/* Tx stats */
 	mbx.bgx_stats.rx = 0;
 	while (stat < BGX_TX_STATS_COUNT) {
-		nic->bgx_stats_acked = 0;
 		mbx.bgx_stats.idx = stat;
-		nicvf_send_msg_to_pf(nic, &mbx);
-		timeout = 0;
-		while ((!nic->bgx_stats_acked) && (timeout < 10)) {
-			msleep(2);
-			timeout++;
-		}
+		if (nicvf_send_msg_to_pf(nic, &mbx))
+			return;
 		stat++;
 	}
 }
@@ -1118,7 +1315,7 @@
 void nicvf_update_stats(struct nicvf *nic)
 {
 	int qidx;
-	struct nicvf_hw_stats *stats = &nic->stats;
+	struct nicvf_hw_stats *stats = &nic->hw_stats;
 	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
 	struct queue_set *qs = nic->qs;
 
@@ -1127,14 +1324,16 @@
 #define GET_TX_STATS(reg) \
 	nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
 
-	stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS);
-	stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST);
-	stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST);
-	stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST);
+	stats->rx_bytes = GET_RX_STATS(RX_OCTS);
+	stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
+	stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
+	stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
 	stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
 	stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
 	stats->rx_drop_red = GET_RX_STATS(RX_RED);
+	stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
 	stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
+	stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
 	stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
 	stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
 	stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
@@ -1146,9 +1345,6 @@
 	stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
 	stats->tx_drops = GET_TX_STATS(TX_DROP);
 
-	drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok +
-				  stats->rx_bcast_frames_ok +
-				  stats->rx_mcast_frames_ok;
 	drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
 				  stats->tx_bcast_frames_ok +
 				  stats->tx_mcast_frames_ok;
@@ -1167,14 +1363,15 @@
 					    struct rtnl_link_stats64 *stats)
 {
 	struct nicvf *nic = netdev_priv(netdev);
-	struct nicvf_hw_stats *hw_stats = &nic->stats;
+	struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
 	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
 
 	nicvf_update_stats(nic);
 
-	stats->rx_bytes = hw_stats->rx_bytes_ok;
+	stats->rx_bytes = hw_stats->rx_bytes;
 	stats->rx_packets = drv_stats->rx_frames_ok;
 	stats->rx_dropped = drv_stats->rx_drops;
+	stats->multicast = hw_stats->rx_mcast_frames;
 
 	stats->tx_bytes = hw_stats->tx_bytes_ok;
 	stats->tx_packets = drv_stats->tx_frames_ok;
@@ -1208,6 +1405,45 @@
 	nic->netdev->trans_start = jiffies;
 }
 
+static int nicvf_config_loopback(struct nicvf *nic,
+				 netdev_features_t features)
+{
+	union nic_mbx mbx = {};
+
+	mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
+	mbx.lbk.vf_id = nic->vf_id;
+	mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
+
+	return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static netdev_features_t nicvf_fix_features(struct net_device *netdev,
+					    netdev_features_t features)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+
+	if ((features & NETIF_F_LOOPBACK) &&
+	    netif_running(netdev) && !nic->loopback_supported)
+		features &= ~NETIF_F_LOOPBACK;
+
+	return features;
+}
+
+static int nicvf_set_features(struct net_device *netdev,
+			      netdev_features_t features)
+{
+	struct nicvf *nic = netdev_priv(netdev);
+	netdev_features_t changed = features ^ netdev->features;
+
+	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+		nicvf_config_vlan_stripping(nic, features);
+
+	if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
+		return nicvf_config_loopback(nic, features);
+
+	return 0;
+}
+
 static const struct net_device_ops nicvf_netdev_ops = {
 	.ndo_open		= nicvf_open,
 	.ndo_stop		= nicvf_stop,
@@ -1216,6 +1452,8 @@
 	.ndo_set_mac_address	= nicvf_set_mac_address,
 	.ndo_get_stats64	= nicvf_get_stats64,
 	.ndo_tx_timeout         = nicvf_tx_timeout,
+	.ndo_fix_features       = nicvf_fix_features,
+	.ndo_set_features       = nicvf_set_features,
 };
 
 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -1223,8 +1461,7 @@
 	struct device *dev = &pdev->dev;
 	struct net_device *netdev;
 	struct nicvf *nic;
-	struct queue_set *qs;
-	int    err;
+	int    err, qcount;
 
 	err = pci_enable_device(pdev);
 	if (err) {
@@ -1250,9 +1487,17 @@
 		goto err_release_regions;
 	}
 
-	netdev = alloc_etherdev_mqs(sizeof(struct nicvf),
-				    MAX_RCV_QUEUES_PER_QS,
-				    MAX_SND_QUEUES_PER_QS);
+	qcount = MAX_CMP_QUEUES_PER_QS;
+
+	/* Restrict multiqset support only for host bound VFs */
+	if (pdev->is_virtfn) {
+		/* Set max number of queues per VF */
+		qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS);
+		qcount = min(qcount,
+			     (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
+	}
+
+	netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
 	if (!netdev) {
 		err = -ENOMEM;
 		goto err_release_regions;
@@ -1265,6 +1510,8 @@
 	nic = netdev_priv(netdev);
 	nic->netdev = netdev;
 	nic->pdev = pdev;
+	nic->pnicvf = nic;
+	nic->max_queues = qcount;
 
 	/* MAP VF's configuration registers */
 	nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
@@ -1278,20 +1525,31 @@
 	if (err)
 		goto err_free_netdev;
 
-	qs = nic->qs;
-
-	err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
-	if (err)
-		goto err_free_netdev;
-
 	/* Check if PF is alive and get MAC address for this VF */
 	err = nicvf_register_misc_interrupt(nic);
 	if (err)
 		goto err_free_netdev;
 
-	netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
-			     NETIF_F_TSO | NETIF_F_GRO);
-	netdev->hw_features = netdev->features;
+	nicvf_send_vf_struct(nic);
+
+	/* Check if this VF is in QS only mode */
+	if (nic->sqs_mode)
+		return 0;
+
+	err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
+	if (err)
+		goto err_unregister_interrupts;
+
+	netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+			       NETIF_F_TSO | NETIF_F_GRO |
+			       NETIF_F_HW_VLAN_CTAG_RX);
+
+	netdev->hw_features |= NETIF_F_RXHASH;
+
+	netdev->features |= netdev->hw_features;
+	netdev->hw_features |= NETIF_F_LOOPBACK;
+
+	netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
 
 	netdev->netdev_ops = &nicvf_netdev_ops;
 	netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
@@ -1326,8 +1584,13 @@
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct nicvf *nic = netdev_priv(netdev);
+	struct net_device *pnetdev = nic->pnicvf->netdev;
 
-	unregister_netdev(netdev);
+	/* Check if this Qset is assigned to different VF.
+	 * If yes, clean primary and all secondary Qsets.
+	 */
+	if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
+		unregister_netdev(pnetdev);
 	nicvf_unregister_interrupts(nic);
 	pci_set_drvdata(pdev, NULL);
 	free_netdev(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index ca4240a..e404ea8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -475,6 +475,27 @@
 		return;
 }
 
+void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
+{
+	u64 rq_cfg;
+	int sqs;
+
+	rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
+
+	/* Enable first VLAN stripping */
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		rq_cfg |= (1ULL << 25);
+	else
+		rq_cfg &= ~(1ULL << 25);
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
+
+	/* Configure Secondary Qsets, if any */
+	for (sqs = 0; sqs < nic->sqs_count; sqs++)
+		if (nic->snicvf[sqs])
+			nicvf_queue_reg_write(nic->snicvf[sqs],
+					      NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
+}
+
 /* Configures receive queue */
 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
 				   int qidx, bool enable)
@@ -524,7 +545,9 @@
 	mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
 	nicvf_send_msg_to_pf(nic, &mbx);
 
-	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00);
+	nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
+	if (!nic->sqs_mode)
+		nicvf_config_vlan_stripping(nic, nic->netdev->features);
 
 	/* Enable Receive queue */
 	rq_cfg.ena = 1;
@@ -598,6 +621,7 @@
 	mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
 	mbx.sq.qs_num = qs->vnic_id;
 	mbx.sq.sq_num = qidx;
+	mbx.sq.sqs_mode = nic->sqs_mode;
 	mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
 	nicvf_send_msg_to_pf(nic, &mbx);
 
@@ -679,6 +703,7 @@
 	/* Send a mailbox msg to PF to config Qset */
 	mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
 	mbx.qs.num = qs->vnic_id;
+	mbx.qs.sqs_count = nic->sqs_count;
 
 	mbx.qs.cfg = 0;
 	qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
@@ -759,6 +784,10 @@
 	qs->rbdr_len = RCV_BUF_COUNT;
 	qs->sq_len = SND_QUEUE_LEN;
 	qs->cq_len = CMP_QUEUE_LEN;
+
+	nic->rx_queues = qs->rq_cnt;
+	nic->tx_queues = qs->sq_cnt;
+
 	return 0;
 }
 
@@ -961,9 +990,6 @@
 
 	/* Offload checksum calculation to HW */
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		if (skb->protocol != htons(ETH_P_IP))
-			return;
-
 		hdr->csum_l3 = 1; /* Enable IP csum calculation */
 		hdr->l3_offset = skb_network_offset(skb);
 		hdr->l4_offset = skb_transport_offset(skb);
@@ -1005,7 +1031,7 @@
  * them to SQ for transfer
  */
 static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
-			       int qentry, struct sk_buff *skb)
+			       int sq_num, int qentry, struct sk_buff *skb)
 {
 	struct tso_t tso;
 	int seg_subdescs = 0, desc_cnt = 0;
@@ -1065,7 +1091,7 @@
 
 	/* Inform HW to xmit all TSO segments */
 	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
-			      skb_get_queue_mapping(skb), desc_cnt);
+			      sq_num, desc_cnt);
 	nic->drv_stats.tx_tso++;
 	return 1;
 }
@@ -1076,10 +1102,24 @@
 	int i, size;
 	int subdesc_cnt;
 	int sq_num, qentry;
-	struct queue_set *qs = nic->qs;
+	struct queue_set *qs;
 	struct snd_queue *sq;
 
 	sq_num = skb_get_queue_mapping(skb);
+	if (sq_num >= MAX_SND_QUEUES_PER_QS) {
+		/* Get secondary Qset's SQ structure */
+		i = sq_num / MAX_SND_QUEUES_PER_QS;
+		if (!nic->snicvf[i - 1]) {
+			netdev_warn(nic->netdev,
+				    "Secondary Qset#%d's ptr not initialized\n",
+				    i - 1);
+			return 1;
+		}
+		nic = (struct nicvf *)nic->snicvf[i - 1];
+		sq_num = sq_num % MAX_SND_QUEUES_PER_QS;
+	}
+
+	qs = nic->qs;
 	sq = &qs->sq[sq_num];
 
 	subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
@@ -1090,7 +1130,7 @@
 
 	/* Check if its a TSO packet */
 	if (skb_shinfo(skb)->gso_size)
-		return nicvf_sq_append_tso(nic, sq, qentry, skb);
+		return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
 
 	/* Add SQ header subdesc */
 	nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
@@ -1126,6 +1166,8 @@
 	return 1;
 
 append_fail:
+	/* Use original PCI dev for debug log */
+	nic = nic->pnicvf;
 	netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
 	return 0;
 }
@@ -1371,10 +1413,11 @@
 int nicvf_check_cqe_rx_errs(struct nicvf *nic,
 			    struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
 {
-	struct cmp_queue_stats *stats = &cq->stats;
+	struct nicvf_hw_stats *stats = &nic->hw_stats;
+	struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
 
 	if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
-		stats->rx.errop.good++;
+		drv_stats->rx_frames_ok++;
 		return 0;
 	}
 
@@ -1384,111 +1427,78 @@
 			   nic->netdev->name,
 			   cqe_rx->err_level, cqe_rx->err_opcode);
 
-	switch (cqe_rx->err_level) {
-	case CQ_ERRLVL_MAC:
-		stats->rx.errlvl.mac_errs++;
-		break;
-	case CQ_ERRLVL_L2:
-		stats->rx.errlvl.l2_errs++;
-		break;
-	case CQ_ERRLVL_L3:
-		stats->rx.errlvl.l3_errs++;
-		break;
-	case CQ_ERRLVL_L4:
-		stats->rx.errlvl.l4_errs++;
-		break;
-	}
-
 	switch (cqe_rx->err_opcode) {
 	case CQ_RX_ERROP_RE_PARTIAL:
-		stats->rx.errop.partial_pkts++;
+		stats->rx_bgx_truncated_pkts++;
 		break;
 	case CQ_RX_ERROP_RE_JABBER:
-		stats->rx.errop.jabber_errs++;
+		stats->rx_jabber_errs++;
 		break;
 	case CQ_RX_ERROP_RE_FCS:
-		stats->rx.errop.fcs_errs++;
-		break;
-	case CQ_RX_ERROP_RE_TERMINATE:
-		stats->rx.errop.terminate_errs++;
+		stats->rx_fcs_errs++;
 		break;
 	case CQ_RX_ERROP_RE_RX_CTL:
-		stats->rx.errop.bgx_rx_errs++;
+		stats->rx_bgx_errs++;
 		break;
 	case CQ_RX_ERROP_PREL2_ERR:
-		stats->rx.errop.prel2_errs++;
-		break;
-	case CQ_RX_ERROP_L2_FRAGMENT:
-		stats->rx.errop.l2_frags++;
-		break;
-	case CQ_RX_ERROP_L2_OVERRUN:
-		stats->rx.errop.l2_overruns++;
-		break;
-	case CQ_RX_ERROP_L2_PFCS:
-		stats->rx.errop.l2_pfcs++;
-		break;
-	case CQ_RX_ERROP_L2_PUNY:
-		stats->rx.errop.l2_puny++;
+		stats->rx_prel2_errs++;
 		break;
 	case CQ_RX_ERROP_L2_MAL:
-		stats->rx.errop.l2_hdr_malformed++;
+		stats->rx_l2_hdr_malformed++;
 		break;
 	case CQ_RX_ERROP_L2_OVERSIZE:
-		stats->rx.errop.l2_oversize++;
+		stats->rx_oversize++;
 		break;
 	case CQ_RX_ERROP_L2_UNDERSIZE:
-		stats->rx.errop.l2_undersize++;
+		stats->rx_undersize++;
 		break;
 	case CQ_RX_ERROP_L2_LENMISM:
-		stats->rx.errop.l2_len_mismatch++;
+		stats->rx_l2_len_mismatch++;
 		break;
 	case CQ_RX_ERROP_L2_PCLP:
-		stats->rx.errop.l2_pclp++;
+		stats->rx_l2_pclp++;
 		break;
 	case CQ_RX_ERROP_IP_NOT:
-		stats->rx.errop.non_ip++;
+		stats->rx_ip_ver_errs++;
 		break;
 	case CQ_RX_ERROP_IP_CSUM_ERR:
-		stats->rx.errop.ip_csum_err++;
+		stats->rx_ip_csum_errs++;
 		break;
 	case CQ_RX_ERROP_IP_MAL:
-		stats->rx.errop.ip_hdr_malformed++;
+		stats->rx_ip_hdr_malformed++;
 		break;
 	case CQ_RX_ERROP_IP_MALD:
-		stats->rx.errop.ip_payload_malformed++;
+		stats->rx_ip_payload_malformed++;
 		break;
 	case CQ_RX_ERROP_IP_HOP:
-		stats->rx.errop.ip_hop_errs++;
-		break;
-	case CQ_RX_ERROP_L3_ICRC:
-		stats->rx.errop.l3_icrc_errs++;
+		stats->rx_ip_ttl_errs++;
 		break;
 	case CQ_RX_ERROP_L3_PCLP:
-		stats->rx.errop.l3_pclp++;
+		stats->rx_l3_pclp++;
 		break;
 	case CQ_RX_ERROP_L4_MAL:
-		stats->rx.errop.l4_malformed++;
+		stats->rx_l4_malformed++;
 		break;
 	case CQ_RX_ERROP_L4_CHK:
-		stats->rx.errop.l4_csum_errs++;
+		stats->rx_l4_csum_errs++;
 		break;
 	case CQ_RX_ERROP_UDP_LEN:
-		stats->rx.errop.udp_len_err++;
+		stats->rx_udp_len_errs++;
 		break;
 	case CQ_RX_ERROP_L4_PORT:
-		stats->rx.errop.bad_l4_port++;
+		stats->rx_l4_port_errs++;
 		break;
 	case CQ_RX_ERROP_TCP_FLAG:
-		stats->rx.errop.bad_tcp_flag++;
+		stats->rx_tcp_flag_errs++;
 		break;
 	case CQ_RX_ERROP_TCP_OFFSET:
-		stats->rx.errop.tcp_offset_errs++;
+		stats->rx_tcp_offset_errs++;
 		break;
 	case CQ_RX_ERROP_L4_PCLP:
-		stats->rx.errop.l4_pclp++;
+		stats->rx_l4_pclp++;
 		break;
 	case CQ_RX_ERROP_RBDR_TRUNC:
-		stats->rx.errop.pkt_truncated++;
+		stats->rx_truncated_pkts++;
 		break;
 	}
 
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index f0937b7..fb4957d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -181,47 +181,6 @@
 };
 
 struct cmp_queue_stats {
-	struct rx_stats {
-		struct {
-			u64 mac_errs;
-			u64 l2_errs;
-			u64 l3_errs;
-			u64 l4_errs;
-		} errlvl;
-		struct {
-			u64 good;
-			u64 partial_pkts;
-			u64 jabber_errs;
-			u64 fcs_errs;
-			u64 terminate_errs;
-			u64 bgx_rx_errs;
-			u64 prel2_errs;
-			u64 l2_frags;
-			u64 l2_overruns;
-			u64 l2_pfcs;
-			u64 l2_puny;
-			u64 l2_hdr_malformed;
-			u64 l2_oversize;
-			u64 l2_undersize;
-			u64 l2_len_mismatch;
-			u64 l2_pclp;
-			u64 non_ip;
-			u64 ip_csum_err;
-			u64 ip_hdr_malformed;
-			u64 ip_payload_malformed;
-			u64 ip_hop_errs;
-			u64 l3_icrc_errs;
-			u64 l3_pclp;
-			u64 l4_malformed;
-			u64 l4_csum_errs;
-			u64 udp_len_err;
-			u64 bad_l4_port;
-			u64 bad_tcp_flag;
-			u64 tcp_offset_errs;
-			u64 l4_pclp;
-			u64 pkt_truncated;
-		} errop;
-	} rx;
 	struct tx_stats {
 		u64 good;
 		u64 desc_fault;
@@ -292,6 +251,7 @@
 	void		*desc;
 	struct q_desc_mem   dmem;
 	struct cmp_queue_stats	stats;
+	int		irq;
 } ____cacheline_aligned_in_smp;
 
 struct snd_queue {
@@ -347,6 +307,8 @@
 
 #define	CQ_ERR_MASK	(CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
 
+void nicvf_config_vlan_stripping(struct nicvf *nic,
+				 netdev_features_t features);
 int nicvf_set_qset_resources(struct nicvf *nic);
 int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
 void nicvf_qset_config(struct nicvf *nic, bool enable);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index b961a89..574c492 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -6,6 +6,7 @@
  * as published by the Free Software Foundation.
  */
 
+#include <linux/acpi.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
@@ -26,7 +27,7 @@
 struct lmac {
 	struct bgx		*bgx;
 	int			dmac;
-	unsigned char		mac[ETH_ALEN];
+	u8			mac[ETH_ALEN];
 	bool			link_up;
 	int			lmacid; /* ID within BGX */
 	int			lmacid_bd; /* ID on board */
@@ -328,6 +329,37 @@
 	}
 }
 
+/* Configure BGX LMAC in internal loopback mode */
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+				int lmac_idx, bool enable)
+{
+	struct bgx *bgx;
+	struct lmac *lmac;
+	u64    cfg;
+
+	bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+	if (!bgx)
+		return;
+
+	lmac = &bgx->lmac[lmac_idx];
+	if (lmac->is_sgmii) {
+		cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
+		if (enable)
+			cfg |= PCS_MRX_CTL_LOOPBACK1;
+		else
+			cfg &= ~PCS_MRX_CTL_LOOPBACK1;
+		bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
+	} else {
+		cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
+		if (enable)
+			cfg |= SPU_CTL_LOOPBACK;
+		else
+			cfg &= ~SPU_CTL_LOOPBACK;
+		bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
+	}
+}
+EXPORT_SYMBOL(bgx_lmac_internal_loopback);
+
 static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
 {
 	u64 cfg;
@@ -835,18 +867,108 @@
 	}
 }
 
-static void bgx_init_of(struct bgx *bgx, struct device_node *np)
+#ifdef CONFIG_ACPI
+
+static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
 {
+	u8 mac[ETH_ALEN];
+	int ret;
+
+	ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
+					    "mac-address", mac, ETH_ALEN);
+	if (ret)
+		goto out;
+
+	if (!is_valid_ether_addr(mac)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	memcpy(dst, mac, ETH_ALEN);
+out:
+	return ret;
+}
+
+/* Currently only sets the MAC address. */
+static acpi_status bgx_acpi_register_phy(acpi_handle handle,
+					 u32 lvl, void *context, void **rv)
+{
+	struct bgx *bgx = context;
+	struct acpi_device *adev;
+
+	if (acpi_bus_get_device(handle, &adev))
+		goto out;
+
+	acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac);
+
+	SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev);
+
+	bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
+out:
+	bgx->lmac_count++;
+	return AE_OK;
+}
+
+static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
+				     void *context, void **ret_val)
+{
+	struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct bgx *bgx = context;
+	char bgx_sel[5];
+
+	snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
+	if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
+		pr_warn("Invalid link device\n");
+		return AE_OK;
+	}
+
+	if (strncmp(string.pointer, bgx_sel, 4))
+		return AE_OK;
+
+	acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+			    bgx_acpi_register_phy, NULL, bgx, NULL);
+
+	kfree(string.pointer);
+	return AE_CTRL_TERMINATE;
+}
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
+{
+	acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
+	return 0;
+}
+
+#else
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_ACPI */
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+	struct device_node *np;
 	struct device_node *np_child;
 	u8 lmac = 0;
+	char bgx_sel[5];
+	const char *mac;
+
+	/* Get BGX node from DT */
+	snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
+	np = of_find_node_by_name(NULL, bgx_sel);
+	if (!np)
+		return -ENODEV;
 
 	for_each_child_of_node(np, np_child) {
-		struct device_node *phy_np;
-		const char *mac;
-
-		phy_np = of_parse_phandle(np_child, "phy-handle", 0);
-		if (phy_np)
-			bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
+		struct device_node *phy_np = of_parse_phandle(np_child,
+							      "phy-handle", 0);
+		if (!phy_np)
+			continue;
+		bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
 
 		mac = of_get_mac_address(np_child);
 		if (mac)
@@ -858,6 +980,24 @@
 		if (lmac == MAX_LMAC_PER_BGX)
 			break;
 	}
+	return 0;
+}
+
+#else
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_OF_MDIO */
+
+static int bgx_init_phy(struct bgx *bgx)
+{
+	if (!acpi_disabled)
+		return bgx_init_acpi_phy(bgx);
+
+	return bgx_init_of_phy(bgx);
 }
 
 static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -865,8 +1005,6 @@
 	int err;
 	struct device *dev = &pdev->dev;
 	struct bgx *bgx = NULL;
-	struct device_node *np;
-	char bgx_sel[5];
 	u8 lmac;
 
 	bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
@@ -902,10 +1040,9 @@
 	bgx_vnic[bgx->bgx_id] = bgx;
 	bgx_get_qlm_mode(bgx);
 
-	snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
-	np = of_find_node_by_name(NULL, bgx_sel);
-	if (np)
-		bgx_init_of(bgx, np);
+	err = bgx_init_phy(bgx);
+	if (err)
+		goto err_enable;
 
 	bgx_init_hw(bgx);
 
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index ba4f53b..07b7ec66 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -72,6 +72,7 @@
 
 #define BGX_SPUX_CONTROL1		0x10000
 #define  SPU_CTL_LOW_POWER			BIT_ULL(11)
+#define  SPU_CTL_LOOPBACK			BIT_ULL(14)
 #define  SPU_CTL_RESET				BIT_ULL(15)
 #define BGX_SPUX_STATUS1		0x10008
 #define  SPU_STATUS1_RCV_LNK			BIT_ULL(2)
@@ -126,6 +127,7 @@
 #define	 PCS_MRX_CTL_RST_AN			BIT_ULL(9)
 #define	 PCS_MRX_CTL_PWR_DN			BIT_ULL(11)
 #define	 PCS_MRX_CTL_AN_EN			BIT_ULL(12)
+#define	 PCS_MRX_CTL_LOOPBACK1			BIT_ULL(14)
 #define	 PCS_MRX_CTL_RESET			BIT_ULL(15)
 #define BGX_GMP_PCS_MRX_STATUS		0x30008
 #define	 PCS_MRX_STATUS_AN_CPT			BIT_ULL(5)
@@ -186,6 +188,8 @@
 const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
 void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
 void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+				int lmac_idx, bool enable);
 u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
 u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
 #define BGX_RX_STATS_COUNT 11
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 629f75d..fa0c7b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -767,6 +767,11 @@
 	bool tid_release_task_busy;
 
 	struct dentry *debugfs_root;
+	u32 use_bd;     /* Use SGE Back Door intfc for reading SGE Contexts */
+	u32 trace_rss;	/* 1 implies that different RSS flit per filter is
+			 * used per filter else if 0 default RSS flit is
+			 * used for all 4 filters.
+			 */
 
 	spinlock_t stats_lock;
 	spinlock_t win0_lock ____cacheline_aligned_in_smp;
@@ -1284,6 +1289,7 @@
 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
 		  const u8 *fw_data, unsigned int size, int force);
 unsigned int t4_flash_cfg_addr(struct adapter *adapter);
+int t4_check_fw_version(struct adapter *adap);
 int t4_get_fw_version(struct adapter *adapter, u32 *vers);
 int t4_get_tp_version(struct adapter *adapter, u32 *vers);
 int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
@@ -1440,6 +1446,10 @@
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
 void t4_db_full(struct adapter *adapter);
 void t4_db_dropped(struct adapter *adapter);
+int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
+			int filter_index, int enable);
+void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
+			 int filter_index, int *enabled);
 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
 			 u32 addr, u32 val);
 void t4_sge_decode_idma_state(struct adapter *adapter, int state);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 6074680..052c660 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -31,6 +31,15 @@
 	"Auto Negotiated"
 };
 
+static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state)
+{
+	if (state == CXGB4_DCB_STATE_FW_ALLSYNCED ||
+	    state == CXGB4_DCB_STATE_HOST)
+		return true;
+	else
+		return false;
+}
+
 /* Initialize a port's Data Center Bridging state.  Typically used after a
  * Link Down event.
  */
@@ -603,7 +612,7 @@
 	struct port_info *pi = netdev2pinfo(dev);
 	struct port_dcb_info *dcb = &pi->dcb;
 
-	if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+	if (!cxgb4_dcb_state_synced(dcb->state) ||
 	    priority >= CXGB4_MAX_PRIORITY)
 		*pfccfg = 0;
 	else
@@ -620,7 +629,7 @@
 	struct adapter *adap = pi->adapter;
 	int err;
 
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+	if (!cxgb4_dcb_state_synced(pi->dcb.state) ||
 	    priority >= CXGB4_MAX_PRIORITY)
 		return;
 
@@ -732,7 +741,7 @@
 {
 	struct port_info *pi = netdev2pinfo(dev);
 
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
 		return false;
 
 	return pi->dcb.pfcen != 0;
@@ -756,7 +765,7 @@
 	struct adapter *adap = pi->adapter;
 	int i;
 
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
 		return 0;
 
 	for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -794,7 +803,9 @@
  */
 static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id)
 {
-	return __cxgb4_getapp(dev, app_idtype, app_id, 0);
+	/* Convert app_idtype to firmware format before querying */
+	return __cxgb4_getapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ?
+			      app_idtype : 3, app_id, 0);
 }
 
 /* Write a new Application User Priority Map for the specified Application ID
@@ -808,7 +819,7 @@
 	int i, err;
 
 
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
 		return -EINVAL;
 
 	/* DCB info gets thrown away on link up */
@@ -896,10 +907,11 @@
 	struct port_info *pi = netdev2pinfo(dev);
 	struct port_dcb_info *dcb = &pi->dcb;
 
-	if (dcb_subtype && !(dcb->msgs & dcb_subtype))
-		return 0;
+	if (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED)
+		if (dcb_subtype && !(dcb->msgs & dcb_subtype))
+			return 0;
 
-	return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
+	return (cxgb4_dcb_state_synced(dcb->state) &&
 		(dcb->supported & DCB_CAP_DCBX_VER_IEEE));
 }
 
@@ -1057,7 +1069,7 @@
 
 	/* Can't enable DCB if we haven't successfully negotiated it.
 	 */
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
 		return 1;
 
 	/* There's currently no mechanism to allow for the firmware DCBX
@@ -1080,7 +1092,7 @@
 	struct adapter *adap = pi->adapter;
 	int i, err = 0;
 
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
 		return 1;
 
 	info->willing = 0;
@@ -1114,7 +1126,7 @@
 	struct adapter *adap = pi->adapter;
 	int i, err = 0;
 
-	if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+	if (!cxgb4_dcb_state_synced(pi->dcb.state))
 		return 1;
 
 	for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -1133,7 +1145,7 @@
 		if (!pcmd.u.dcb.app_priority.protocolid)
 			break;
 
-		table[i].selector = pcmd.u.dcb.app_priority.sel_field;
+		table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1);
 		table[i].protocol =
 			be16_to_cpu(pcmd.u.dcb.app_priority.protocolid);
 		table[i].priority =
@@ -1181,6 +1193,8 @@
 	for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
 		pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
 
+	pg->tcs_supported = pcmd.u.dcb.pgrate.num_tcs_supported;
+
 	return 0;
 }
 
@@ -1198,6 +1212,8 @@
 	 */
 	pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
 
+	pfc->tcs_supported = pi->dcb.pfc_num_tcs_supported;
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index c3c7db4..0a87a32 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -151,6 +151,45 @@
 	return 0;
 }
 
+static int cim_la_show_t6(struct seq_file *seq, void *v, int idx)
+{
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "Status   Inst    Data      PC     LS0Stat  "
+			 "LS0Addr  LS0Data  LS1Stat  LS1Addr  LS1Data\n");
+	} else {
+		const u32 *p = v;
+
+		seq_printf(seq, "  %02x   %04x%04x %04x%04x %04x%04x %08x %08x %08x %08x %08x %08x\n",
+			   (p[9] >> 16) & 0xff,       /* Status */
+			   p[9] & 0xffff, p[8] >> 16, /* Inst */
+			   p[8] & 0xffff, p[7] >> 16, /* Data */
+			   p[7] & 0xffff, p[6] >> 16, /* PC */
+			   p[2], p[1], p[0],      /* LS0 Stat, Addr and Data */
+			   p[5], p[4], p[3]);     /* LS1 Stat, Addr and Data */
+	}
+	return 0;
+}
+
+static int cim_la_show_pc_t6(struct seq_file *seq, void *v, int idx)
+{
+	if (v == SEQ_START_TOKEN) {
+		seq_puts(seq, "Status   Inst    Data      PC\n");
+	} else {
+		const u32 *p = v;
+
+		seq_printf(seq, "  %02x   %08x %08x %08x\n",
+			   p[3] & 0xff, p[2], p[1], p[0]);
+		seq_printf(seq, "  %02x   %02x%06x %02x%06x %02x%06x\n",
+			   (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
+			   p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
+		seq_printf(seq, "  %02x   %04x%04x %04x%04x %04x%04x\n",
+			   (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
+			   p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
+			   p[6] >> 16);
+	}
+	return 0;
+}
+
 static int cim_la_open(struct inode *inode, struct file *file)
 {
 	int ret;
@@ -162,9 +201,18 @@
 	if (ret)
 		return ret;
 
-	p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1,
-			 cfg & UPDBGLACAPTPCONLY_F ?
-			 cim_la_show_3in1 : cim_la_show);
+	if (is_t6(adap->params.chip)) {
+		/* +1 to account for integer division of CIMLA_SIZE/10 */
+		p = seq_open_tab(file, (adap->params.cim_la_size / 10) + 1,
+				 10 * sizeof(u32), 1,
+				 cfg & UPDBGLACAPTPCONLY_F ?
+					cim_la_show_pc_t6 : cim_la_show_t6);
+	} else {
+		p = seq_open_tab(file, adap->params.cim_la_size / 8,
+				 8 * sizeof(u32), 1,
+				 cfg & UPDBGLACAPTPCONLY_F ? cim_la_show_3in1 :
+							     cim_la_show);
+	}
 	if (!p)
 		return -ENOMEM;
 
@@ -298,11 +346,11 @@
 		if (is_t4(adap->params.chip)) {
 			i = t4_cim_read(adap, UP_OBQ_0_REALADDR_A,
 					ARRAY_SIZE(obq_wr_t4), obq_wr_t4);
-				wr = obq_wr_t4;
+			wr = obq_wr_t4;
 		} else {
 			i = t4_cim_read(adap, UP_OBQ_0_SHADOW_REALADDR_A,
 					ARRAY_SIZE(obq_wr_t5), obq_wr_t5);
-				wr = obq_wr_t5;
+			wr = obq_wr_t5;
 		}
 	}
 	if (i)
@@ -1153,6 +1201,299 @@
 	.write   = mbox_write
 };
 
+static int mps_trc_show(struct seq_file *seq, void *v)
+{
+	int enabled, i;
+	struct trace_params tp;
+	unsigned int trcidx = (uintptr_t)seq->private & 3;
+	struct adapter *adap = seq->private - trcidx;
+
+	t4_get_trace_filter(adap, &tp, trcidx, &enabled);
+	if (!enabled) {
+		seq_puts(seq, "tracer is disabled\n");
+		return 0;
+	}
+
+	if (tp.skip_ofst * 8 >= TRACE_LEN) {
+		dev_err(adap->pdev_dev, "illegal trace pattern skip offset\n");
+		return -EINVAL;
+	}
+	if (tp.port < 8) {
+		i = adap->chan_map[tp.port & 3];
+		if (i >= MAX_NPORTS) {
+			dev_err(adap->pdev_dev, "tracer %u is assigned "
+				"to non-existing port\n", trcidx);
+			return -EINVAL;
+		}
+		seq_printf(seq, "tracer is capturing %s %s, ",
+			   adap->port[i]->name, tp.port < 4 ? "Rx" : "Tx");
+	} else
+		seq_printf(seq, "tracer is capturing loopback %d, ",
+			   tp.port - 8);
+	seq_printf(seq, "snap length: %u, min length: %u\n", tp.snap_len,
+		   tp.min_len);
+	seq_printf(seq, "packets captured %smatch filter\n",
+		   tp.invert ? "do not " : "");
+
+	if (tp.skip_ofst) {
+		seq_puts(seq, "filter pattern: ");
+		for (i = 0; i < tp.skip_ofst * 2; i += 2)
+			seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
+		seq_putc(seq, '/');
+		for (i = 0; i < tp.skip_ofst * 2; i += 2)
+			seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
+		seq_puts(seq, "@0\n");
+	}
+
+	seq_puts(seq, "filter pattern: ");
+	for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
+		seq_printf(seq, "%08x%08x", tp.data[i], tp.data[i + 1]);
+	seq_putc(seq, '/');
+	for (i = tp.skip_ofst * 2; i < TRACE_LEN / 4; i += 2)
+		seq_printf(seq, "%08x%08x", tp.mask[i], tp.mask[i + 1]);
+	seq_printf(seq, "@%u\n", (tp.skip_ofst + tp.skip_len) * 8);
+	return 0;
+}
+
+static int mps_trc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mps_trc_show, inode->i_private);
+}
+
+static unsigned int xdigit2int(unsigned char c)
+{
+	return isdigit(c) ? c - '0' : tolower(c) - 'a' + 10;
+}
+
+#define TRC_PORT_NONE 0xff
+#define TRC_RSS_ENABLE 0x33
+#define TRC_RSS_DISABLE 0x13
+
+/* Set an MPS trace filter.  Syntax is:
+ *
+ * disable
+ *
+ * to disable tracing, or
+ *
+ * interface qid=<qid no> [snaplen=<val>] [minlen=<val>] [not] [<pattern>]...
+ *
+ * where interface is one of rxN, txN, or loopbackN, N = 0..3, qid can be one
+ * of the NIC's response qid obtained from sge_qinfo and pattern has the form
+ *
+ * <pattern data>[/<pattern mask>][@<anchor>]
+ *
+ * Up to 2 filter patterns can be specified.  If 2 are supplied the first one
+ * must be anchored at 0.  An omited mask is taken as a mask of 1s, an omitted
+ * anchor is taken as 0.
+ */
+static ssize_t mps_trc_write(struct file *file, const char __user *buf,
+			     size_t count, loff_t *pos)
+{
+	int i, enable, ret;
+	u32 *data, *mask;
+	struct trace_params tp;
+	const struct inode *ino;
+	unsigned int trcidx;
+	char *s, *p, *word, *end;
+	struct adapter *adap;
+	u32 j;
+
+	ino = file_inode(file);
+	trcidx = (uintptr_t)ino->i_private & 3;
+	adap = ino->i_private - trcidx;
+
+	/* Don't accept input more than 1K, can't be anything valid except lots
+	 * of whitespace.  Well, use less.
+	 */
+	if (count > 1024)
+		return -EFBIG;
+	p = s = kzalloc(count + 1, GFP_USER);
+	if (!s)
+		return -ENOMEM;
+	if (copy_from_user(s, buf, count)) {
+		count = -EFAULT;
+		goto out;
+	}
+
+	if (s[count - 1] == '\n')
+		s[count - 1] = '\0';
+
+	enable = strcmp("disable", s) != 0;
+	if (!enable)
+		goto apply;
+
+	/* enable or disable trace multi rss filter */
+	if (adap->trace_rss)
+		t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_ENABLE);
+	else
+		t4_write_reg(adap, MPS_TRC_CFG_A, TRC_RSS_DISABLE);
+
+	memset(&tp, 0, sizeof(tp));
+	tp.port = TRC_PORT_NONE;
+	i = 0;	/* counts pattern nibbles */
+
+	while (p) {
+		while (isspace(*p))
+			p++;
+		word = strsep(&p, " ");
+		if (!*word)
+			break;
+
+		if (!strncmp(word, "qid=", 4)) {
+			end = (char *)word + 4;
+			ret = kstrtouint(end, 10, &j);
+			if (ret)
+				goto out;
+			if (!adap->trace_rss) {
+				t4_write_reg(adap, MPS_T5_TRC_RSS_CONTROL_A, j);
+				continue;
+			}
+
+			switch (trcidx) {
+			case 0:
+				t4_write_reg(adap, MPS_TRC_RSS_CONTROL_A, j);
+				break;
+			case 1:
+				t4_write_reg(adap,
+					     MPS_TRC_FILTER1_RSS_CONTROL_A, j);
+				break;
+			case 2:
+				t4_write_reg(adap,
+					     MPS_TRC_FILTER2_RSS_CONTROL_A, j);
+				break;
+			case 3:
+				t4_write_reg(adap,
+					     MPS_TRC_FILTER3_RSS_CONTROL_A, j);
+				break;
+			}
+			continue;
+		}
+		if (!strncmp(word, "snaplen=", 8)) {
+			end = (char *)word + 8;
+			ret = kstrtouint(end, 10, &j);
+			if (ret || j > 9600) {
+inval:				count = -EINVAL;
+				goto out;
+			}
+			tp.snap_len = j;
+			continue;
+		}
+		if (!strncmp(word, "minlen=", 7)) {
+			end = (char *)word + 7;
+			ret = kstrtouint(end, 10, &j);
+			if (ret || j > TFMINPKTSIZE_M)
+				goto inval;
+			tp.min_len = j;
+			continue;
+		}
+		if (!strcmp(word, "not")) {
+			tp.invert = !tp.invert;
+			continue;
+		}
+		if (!strncmp(word, "loopback", 8) && tp.port == TRC_PORT_NONE) {
+			if (word[8] < '0' || word[8] > '3' || word[9])
+				goto inval;
+			tp.port = word[8] - '0' + 8;
+			continue;
+		}
+		if (!strncmp(word, "tx", 2) && tp.port == TRC_PORT_NONE) {
+			if (word[2] < '0' || word[2] > '3' || word[3])
+				goto inval;
+			tp.port = word[2] - '0' + 4;
+			if (adap->chan_map[tp.port & 3] >= MAX_NPORTS)
+				goto inval;
+			continue;
+		}
+		if (!strncmp(word, "rx", 2) && tp.port == TRC_PORT_NONE) {
+			if (word[2] < '0' || word[2] > '3' || word[3])
+				goto inval;
+			tp.port = word[2] - '0';
+			if (adap->chan_map[tp.port] >= MAX_NPORTS)
+				goto inval;
+			continue;
+		}
+		if (!isxdigit(*word))
+			goto inval;
+
+		/* we have found a trace pattern */
+		if (i) {                            /* split pattern */
+			if (tp.skip_len)            /* too many splits */
+				goto inval;
+			tp.skip_ofst = i / 16;
+		}
+
+		data = &tp.data[i / 8];
+		mask = &tp.mask[i / 8];
+		j = i;
+
+		while (isxdigit(*word)) {
+			if (i >= TRACE_LEN * 2) {
+				count = -EFBIG;
+				goto out;
+			}
+			*data = (*data << 4) + xdigit2int(*word++);
+			if (++i % 8 == 0)
+				data++;
+		}
+		if (*word == '/') {
+			word++;
+			while (isxdigit(*word)) {
+				if (j >= i)         /* mask longer than data */
+					goto inval;
+				*mask = (*mask << 4) + xdigit2int(*word++);
+				if (++j % 8 == 0)
+					mask++;
+			}
+			if (i != j)                 /* mask shorter than data */
+				goto inval;
+		} else {                            /* no mask, use all 1s */
+			for ( ; i - j >= 8; j += 8)
+				*mask++ = 0xffffffff;
+			if (i % 8)
+				*mask = (1 << (i % 8) * 4) - 1;
+		}
+		if (*word == '@') {
+			end = (char *)word + 1;
+			ret = kstrtouint(end, 10, &j);
+			if (*end && *end != '\n')
+				goto inval;
+			if (j & 7)          /* doesn't start at multiple of 8 */
+				goto inval;
+			j /= 8;
+			if (j < tp.skip_ofst)     /* overlaps earlier pattern */
+				goto inval;
+			if (j - tp.skip_ofst > 31)            /* skip too big */
+				goto inval;
+			tp.skip_len = j - tp.skip_ofst;
+		}
+		if (i % 8) {
+			*data <<= (8 - i % 8) * 4;
+			*mask <<= (8 - i % 8) * 4;
+			i = (i + 15) & ~15;         /* 8-byte align */
+		}
+	}
+
+	if (tp.port == TRC_PORT_NONE)
+		goto inval;
+
+apply:
+	i = t4_set_trace_filter(adap, &tp, trcidx, enable);
+	if (i)
+		count = i;
+out:
+	kfree(s);
+	return count;
+}
+
+static const struct file_operations mps_trc_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = mps_trc_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+	.write   = mps_trc_write
+};
+
 static ssize_t flash_read(struct file *file, char __user *buf, size_t count,
 			  loff_t *ppos)
 {
@@ -1895,13 +2236,13 @@
 {
 	struct adapter *adap = seq->private;
 	int eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4);
-	int toe_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
+	int iscsi_entries = DIV_ROUND_UP(adap->sge.ofldqsets, 4);
 	int rdma_entries = DIV_ROUND_UP(adap->sge.rdmaqs, 4);
 	int ciq_entries = DIV_ROUND_UP(adap->sge.rdmaciqs, 4);
 	int ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4);
 	int i, r = (uintptr_t)v - 1;
-	int toe_idx = r - eth_entries;
-	int rdma_idx = toe_idx - toe_entries;
+	int iscsi_idx = r - eth_entries;
+	int rdma_idx = iscsi_idx - iscsi_entries;
 	int ciq_idx = rdma_idx - rdma_entries;
 	int ctrl_idx =  ciq_idx - ciq_entries;
 	int fq_idx =  ctrl_idx - ctrl_entries;
@@ -1917,8 +2258,12 @@
 		seq_putc(seq, '\n'); \
 } while (0)
 #define S(s, v) S3("s", s, v)
+#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
 #define T(s, v) S3("u", s, tx[i].v)
+#define TL(s, v) T3("lu", s, v)
+#define R3(fmt_spec, s, v) S3(fmt_spec, s, rx[i].v)
 #define R(s, v) S3("u", s, rx[i].v)
+#define RL(s, v) R3("lu", s, v)
 
 	if (r < eth_entries) {
 		int base_qset = r * 4;
@@ -1957,12 +2302,30 @@
 		R("FL avail:", fl.avail);
 		R("FL PIDX:", fl.pidx);
 		R("FL CIDX:", fl.cidx);
-	} else if (toe_idx < toe_entries) {
-		const struct sge_ofld_rxq *rx = &adap->sge.ofldrxq[toe_idx * 4];
-		const struct sge_ofld_txq *tx = &adap->sge.ofldtxq[toe_idx * 4];
-		int n = min(4, adap->sge.ofldqsets - 4 * toe_idx);
+		RL("RxPackets:", stats.pkts);
+		RL("RxCSO:", stats.rx_cso);
+		RL("VLANxtract:", stats.vlan_ex);
+		RL("LROmerged:", stats.lro_merged);
+		RL("LROpackets:", stats.lro_pkts);
+		RL("RxDrops:", stats.rx_drops);
+		TL("TSO:", tso);
+		TL("TxCSO:", tx_cso);
+		TL("VLANins:", vlan_ins);
+		TL("TxQFull:", q.stops);
+		TL("TxQRestarts:", q.restarts);
+		TL("TxMapErr:", mapping_err);
+		RL("FLAllocErr:", fl.alloc_failed);
+		RL("FLLrgAlcErr:", fl.large_alloc_failed);
+		RL("FLStarving:", fl.starving);
 
-		S("QType:", "TOE");
+	} else if (iscsi_idx < iscsi_entries) {
+		const struct sge_ofld_rxq *rx =
+			&adap->sge.ofldrxq[iscsi_idx * 4];
+		const struct sge_ofld_txq *tx =
+			&adap->sge.ofldtxq[iscsi_idx * 4];
+		int n = min(4, adap->sge.ofldqsets - 4 * iscsi_idx);
+
+		S("QType:", "iSCSI");
 		T("TxQ ID:", q.cntxt_id);
 		T("TxQ size:", q.size);
 		T("TxQ inuse:", q.in_use);
@@ -1982,6 +2345,13 @@
 		R("FL avail:", fl.avail);
 		R("FL PIDX:", fl.pidx);
 		R("FL CIDX:", fl.cidx);
+		RL("RxPackets:", stats.pkts);
+		RL("RxImmPkts:", stats.imm);
+		RL("RxNoMem:", stats.nomem);
+		RL("FLAllocErr:", fl.alloc_failed);
+		RL("FLLrgAlcErr:", fl.large_alloc_failed);
+		RL("FLStarving:", fl.starving);
+
 	} else if (rdma_idx < rdma_entries) {
 		const struct sge_ofld_rxq *rx =
 				&adap->sge.rdmarxq[rdma_idx * 4];
@@ -2004,6 +2374,13 @@
 		R("FL avail:", fl.avail);
 		R("FL PIDX:", fl.pidx);
 		R("FL CIDX:", fl.cidx);
+		RL("RxPackets:", stats.pkts);
+		RL("RxImmPkts:", stats.imm);
+		RL("RxNoMem:", stats.nomem);
+		RL("FLAllocErr:", fl.alloc_failed);
+		RL("FLLrgAlcErr:", fl.large_alloc_failed);
+		RL("FLStarving:", fl.starving);
+
 	} else if (ciq_idx < ciq_entries) {
 		const struct sge_ofld_rxq *rx = &adap->sge.rdmaciq[ciq_idx * 4];
 		int n = min(4, adap->sge.rdmaciqs - 4 * ciq_idx);
@@ -2019,6 +2396,9 @@
 		S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
 		S3("u", "Intr pktcnt:",
 		   adap->sge.counter_val[rx[i].rspq.pktcnt_idx]);
+		RL("RxAN:", stats.an);
+		RL("RxNoMem:", stats.nomem);
+
 	} else if (ctrl_idx < ctrl_entries) {
 		const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
 		int n = min(4, adap->params.nports - 4 * ctrl_idx);
@@ -2029,6 +2409,8 @@
 		T("TxQ inuse:", q.in_use);
 		T("TxQ CIDX:", q.cidx);
 		T("TxQ PIDX:", q.pidx);
+		TL("TxQFull:", q.stops);
+		TL("TxQRestarts:", q.restarts);
 	} else if (fq_idx == 0) {
 		const struct sge_rspq *evtq = &adap->sge.fw_evtq;
 
@@ -2044,10 +2426,14 @@
 			   adap->sge.counter_val[evtq->pktcnt_idx]);
 	}
 #undef R
+#undef RL
 #undef T
+#undef TL
 #undef S
+#undef R3
+#undef T3
 #undef S3
-return 0;
+	return 0;
 }
 
 static int sge_queue_entries(const struct adapter *adap)
@@ -2164,6 +2550,73 @@
 	.llseek  = default_llseek,
 };
 
+static int tid_info_show(struct seq_file *seq, void *v)
+{
+	struct adapter *adap = seq->private;
+	const struct tid_info *t = &adap->tids;
+	enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+	if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+		unsigned int sb;
+
+		if (chip <= CHELSIO_T5)
+			sb = t4_read_reg(adap, LE_DB_SERVER_INDEX_A) / 4;
+		else
+			sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
+
+		if (sb) {
+			seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
+				   adap->tids.hash_base,
+				   t->ntids - 1);
+			seq_printf(seq, ", in use: %u/%u\n",
+				   atomic_read(&t->tids_in_use),
+				   atomic_read(&t->hash_tids_in_use));
+		} else if (adap->flags & FW_OFLD_CONN) {
+			seq_printf(seq, "TID range: %u..%u/%u..%u",
+				   t->aftid_base,
+				   t->aftid_end,
+				   adap->tids.hash_base,
+				   t->ntids - 1);
+			seq_printf(seq, ", in use: %u/%u\n",
+				   atomic_read(&t->tids_in_use),
+				   atomic_read(&t->hash_tids_in_use));
+		} else {
+			seq_printf(seq, "TID range: %u..%u",
+				   adap->tids.hash_base,
+				   t->ntids - 1);
+			seq_printf(seq, ", in use: %u\n",
+				   atomic_read(&t->hash_tids_in_use));
+		}
+	} else if (t->ntids) {
+		seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+		seq_printf(seq, ", in use: %u\n",
+			   atomic_read(&t->tids_in_use));
+	}
+
+	if (t->nstids)
+		seq_printf(seq, "STID range: %u..%u, in use: %u\n",
+			   (!t->stid_base &&
+			   (chip <= CHELSIO_T5)) ?
+			   t->stid_base + 1 : t->stid_base,
+			   t->stid_base + t->nstids - 1, t->stids_in_use);
+	if (t->natids)
+		seq_printf(seq, "ATID range: 0..%u, in use: %u\n",
+			   t->natids - 1, t->atids_in_use);
+	seq_printf(seq, "FTID range: %u..%u\n", t->ftid_base,
+		   t->ftid_base + t->nftids - 1);
+	if (t->nsftids)
+		seq_printf(seq, "SFTID range: %u..%u in use: %u\n",
+			   t->sftid_base, t->sftid_base + t->nsftids - 2,
+			   t->sftids_in_use);
+	if (t->ntids)
+		seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n",
+			   t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
+			   t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
+	return 0;
+}
+
+DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
+
 static void add_debugfs_mem(struct adapter *adap, const char *name,
 			    unsigned int idx, unsigned int size_mb)
 {
@@ -2227,6 +2680,290 @@
 	.llseek  = generic_file_llseek,
 };
 
+struct mem_desc {
+	unsigned int base;
+	unsigned int limit;
+	unsigned int idx;
+};
+
+static int mem_desc_cmp(const void *a, const void *b)
+{
+	return ((const struct mem_desc *)a)->base -
+	       ((const struct mem_desc *)b)->base;
+}
+
+static void mem_region_show(struct seq_file *seq, const char *name,
+			    unsigned int from, unsigned int to)
+{
+	char buf[40];
+
+	string_get_size((u64)to - from + 1, 1, STRING_UNITS_2, buf,
+			sizeof(buf));
+	seq_printf(seq, "%-15s %#x-%#x [%s]\n", name, from, to, buf);
+}
+
+static int meminfo_show(struct seq_file *seq, void *v)
+{
+	static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
+					"MC0:", "MC1:"};
+	static const char * const region[] = {
+		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+		"RQUDP region:", "PBL region:", "TXPBL region:",
+		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+		"On-chip queues:"
+	};
+
+	int i, n;
+	u32 lo, hi, used, alloc;
+	struct mem_desc avail[4];
+	struct mem_desc mem[ARRAY_SIZE(region) + 3];      /* up to 3 holes */
+	struct mem_desc *md = mem;
+	struct adapter *adap = seq->private;
+
+	for (i = 0; i < ARRAY_SIZE(mem); i++) {
+		mem[i].limit = 0;
+		mem[i].idx = i;
+	}
+
+	/* Find and sort the populated memory ranges */
+	i = 0;
+	lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+	if (lo & EDRAM0_ENABLE_F) {
+		hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+		avail[i].base = EDRAM0_BASE_G(hi) << 20;
+		avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
+		avail[i].idx = 0;
+		i++;
+	}
+	if (lo & EDRAM1_ENABLE_F) {
+		hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+		avail[i].base = EDRAM1_BASE_G(hi) << 20;
+		avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
+		avail[i].idx = 1;
+		i++;
+	}
+
+	if (is_t5(adap->params.chip)) {
+		if (lo & EXT_MEM0_ENABLE_F) {
+			hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+			avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
+			avail[i].limit =
+				avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
+			avail[i].idx = 3;
+			i++;
+		}
+		if (lo & EXT_MEM1_ENABLE_F) {
+			hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+			avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
+			avail[i].limit =
+				avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
+			avail[i].idx = 4;
+			i++;
+		}
+	} else {
+		if (lo & EXT_MEM_ENABLE_F) {
+			hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+			avail[i].base = EXT_MEM_BASE_G(hi) << 20;
+			avail[i].limit =
+				avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
+			avail[i].idx = 2;
+			i++;
+		}
+	}
+	if (!i)                                    /* no memory available */
+		return 0;
+	sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+	(md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
+	(md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
+	(md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
+	(md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
+
+	/* the next few have explicit upper bounds */
+	md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
+	md->limit = md->base - 1 +
+		    t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
+		    PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
+	md++;
+
+	md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
+	md->limit = md->base - 1 +
+		    t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
+		    PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
+	md++;
+
+	if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
+		if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
+			hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
+			md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+		 } else {
+			hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
+			md->base = t4_read_reg(adap,
+					       LE_DB_HASH_TBL_BASE_ADDR_A);
+		}
+		md->limit = 0;
+	} else {
+		md->base = 0;
+		md->idx = ARRAY_SIZE(region);  /* hide it */
+	}
+	md++;
+
+#define ulp_region(reg) do { \
+	md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
+	(md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
+} while (0)
+
+	ulp_region(RX_ISCSI);
+	ulp_region(RX_TDDP);
+	ulp_region(TX_TPT);
+	ulp_region(RX_STAG);
+	ulp_region(RX_RQ);
+	ulp_region(RX_RQUDP);
+	ulp_region(RX_PBL);
+	ulp_region(TX_PBL);
+#undef ulp_region
+	md->base = 0;
+	md->idx = ARRAY_SIZE(region);
+	if (!is_t4(adap->params.chip)) {
+		u32 size = 0;
+		u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
+		u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
+
+		if (is_t5(adap->params.chip)) {
+			if (sge_ctrl & VFIFO_ENABLE_F)
+				size = DBVFIFO_SIZE_G(fifo_size);
+		} else {
+			size = T6_DBVFIFO_SIZE_G(fifo_size);
+		}
+
+		if (size) {
+			md->base = BASEADDR_G(t4_read_reg(adap,
+					SGE_DBVFIFO_BADDR_A));
+			md->limit = md->base + (size << 2) - 1;
+		}
+	}
+
+	md++;
+
+	md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
+	md->limit = 0;
+	md++;
+	md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
+	md->limit = 0;
+	md++;
+
+	md->base = adap->vres.ocq.start;
+	if (adap->vres.ocq.size)
+		md->limit = md->base + adap->vres.ocq.size - 1;
+	else
+		md->idx = ARRAY_SIZE(region);  /* hide it */
+	md++;
+
+	/* add any address-space holes, there can be up to 3 */
+	for (n = 0; n < i - 1; n++)
+		if (avail[n].limit < avail[n + 1].base)
+			(md++)->base = avail[n].limit;
+	if (avail[n].limit)
+		(md++)->base = avail[n].limit;
+
+	n = md - mem;
+	sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+
+	for (lo = 0; lo < i; lo++)
+		mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
+				avail[lo].limit - 1);
+
+	seq_putc(seq, '\n');
+	for (i = 0; i < n; i++) {
+		if (mem[i].idx >= ARRAY_SIZE(region))
+			continue;                        /* skip holes */
+		if (!mem[i].limit)
+			mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
+		mem_region_show(seq, region[mem[i].idx], mem[i].base,
+				mem[i].limit);
+	}
+
+	seq_putc(seq, '\n');
+	lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
+	hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+	mem_region_show(seq, "uP RAM:", lo, hi);
+
+	lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
+	hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+	mem_region_show(seq, "uP Extmem2:", lo, hi);
+
+	lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
+	seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
+		   PMRXMAXPAGE_G(lo),
+		   t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
+		   (lo & PMRXNUMCHN_F) ? 2 : 1);
+
+	lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
+	hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
+	seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
+		   PMTXMAXPAGE_G(lo),
+		   hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
+		   hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
+	seq_printf(seq, "%u p-structs\n\n",
+		   t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
+
+	for (i = 0; i < 4; i++) {
+		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+			lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+		else
+			lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
+		if (is_t5(adap->params.chip)) {
+			used = T5_USED_G(lo);
+			alloc = T5_ALLOC_G(lo);
+		} else {
+			used = USED_G(lo);
+			alloc = ALLOC_G(lo);
+		}
+		/* For T6 these are MAC buffer groups */
+		seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
+			   i, used, alloc);
+	}
+	for (i = 0; i < adap->params.arch.nchan; i++) {
+		if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
+			lo = t4_read_reg(adap,
+					 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+		else
+			lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
+		if (is_t5(adap->params.chip)) {
+			used = T5_USED_G(lo);
+			alloc = T5_ALLOC_G(lo);
+		} else {
+			used = USED_G(lo);
+			alloc = ALLOC_G(lo);
+		}
+		/* For T6 these are MAC buffer groups */
+		seq_printf(seq,
+			   "Loopback %d using %u pages out of %u allocated\n",
+			   i, used, alloc);
+	}
+	return 0;
+}
+
+static int meminfo_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, meminfo_show, inode->i_private);
+}
+
+static const struct file_operations meminfo_fops = {
+	.owner   = THIS_MODULE,
+	.open    = meminfo_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = single_release,
+};
 /* Add an array of Debug FS files.
  */
 void add_debugfs_files(struct adapter *adap,
@@ -2264,6 +3001,10 @@
 		{ "mbox5", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 5 },
 		{ "mbox6", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 6 },
 		{ "mbox7", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 7 },
+		{ "trace0", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
+		{ "trace1", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
+		{ "trace2", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
+		{ "trace3", &mps_trc_debugfs_fops, S_IRUSR | S_IWUSR, 3 },
 		{ "l2t", &t4_l2t_fops, S_IRUSR, 0},
 		{ "mps_tcam", &mps_tcam_debugfs_fops, S_IRUSR, 0 },
 		{ "rss", &rss_debugfs_fops, S_IRUSR, 0 },
@@ -2293,7 +3034,9 @@
 #if IS_ENABLED(CONFIG_IPV6)
 		{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
 #endif
+		{ "tids", &tid_info_debugfs_fops, S_IRUSR, 0},
 		{ "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
+		{ "meminfo", &meminfo_fops, S_IRUSR, 0 },
 	};
 
 	/* Debug FS nodes common to all T5 and later adapters.
@@ -2341,6 +3084,10 @@
 
 	de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
 				      &flash_debugfs_fops, adap->params.sf_size);
+	debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR,
+			    adap->debugfs_root, &adap->use_bd);
+	debugfs_create_bool("trace_rss", S_IWUSR | S_IRUSR,
+			    adap->debugfs_root, &adap->trace_rss);
 
 	return 0;
 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 687acf7..5eedb98 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -925,6 +925,20 @@
 	const struct firmware *fw;
 	struct adapter *adap = netdev2adap(netdev);
 	unsigned int mbox = PCIE_FW_MASTER_M + 1;
+	u32 pcie_fw;
+	unsigned int master;
+	u8 master_vld = 0;
+
+	pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+	master = PCIE_FW_MASTER_G(pcie_fw);
+	if (pcie_fw & PCIE_FW_MASTER_VLD_F)
+		master_vld = 1;
+	/* if csiostor is the master return */
+	if (master_vld && (master != adap->pf)) {
+		dev_warn(adap->pdev_dev,
+			 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
+		return -EOPNOTSUPP;
+	}
 
 	ef->data[sizeof(ef->data) - 1] = '\0';
 	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 351f3b1..eb22d58 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1548,7 +1548,7 @@
 		t->stid_tab[stid].data = data;
 		stid -= t->nstids;
 		stid += t->sftid_base;
-		t->stids_in_use++;
+		t->sftids_in_use++;
 	}
 	spin_unlock_bh(&t->stid_lock);
 	return stid;
@@ -1573,10 +1573,14 @@
 	else
 		bitmap_release_region(t->stid_bmap, stid, 2);
 	t->stid_tab[stid].data = NULL;
-	if (family == PF_INET)
-		t->stids_in_use--;
-	else
-		t->stids_in_use -= 4;
+	if (stid < t->nstids) {
+		if (family == PF_INET)
+			t->stids_in_use--;
+		else
+			t->stids_in_use -= 4;
+	} else {
+		t->sftids_in_use--;
+	}
 	spin_unlock_bh(&t->stid_lock);
 }
 EXPORT_SYMBOL(cxgb4_free_stid);
@@ -1654,20 +1658,25 @@
  */
 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
 {
-	void *old;
 	struct sk_buff *skb;
 	struct adapter *adap = container_of(t, struct adapter, tids);
 
-	old = t->tid_tab[tid];
+	WARN_ON(tid >= t->ntids);
+
+	if (t->tid_tab[tid]) {
+		t->tid_tab[tid] = NULL;
+		if (t->hash_base && (tid >= t->hash_base))
+			atomic_dec(&t->hash_tids_in_use);
+		else
+			atomic_dec(&t->tids_in_use);
+	}
+
 	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
 	if (likely(skb)) {
-		t->tid_tab[tid] = NULL;
 		mk_tid_release(skb, chan, tid);
 		t4_ofld_send(adap, skb);
 	} else
 		cxgb4_queue_tid_release(t, chan, tid);
-	if (old)
-		atomic_dec(&t->tids_in_use);
 }
 EXPORT_SYMBOL(cxgb4_remove_tid);
 
@@ -1702,9 +1711,11 @@
 	spin_lock_init(&t->atid_lock);
 
 	t->stids_in_use = 0;
+	t->sftids_in_use = 0;
 	t->afree = NULL;
 	t->atids_in_use = 0;
 	atomic_set(&t->tids_in_use, 0);
+	atomic_set(&t->hash_tids_in_use, 0);
 
 	/* Setup the free list for atid_tab and clear the stid bitmap. */
 	if (natids) {
@@ -3657,6 +3668,10 @@
 	 */
 	t4_get_fw_version(adap, &adap->params.fw_vers);
 	t4_get_tp_version(adap, &adap->params.tp_vers);
+	ret = t4_check_fw_version(adap);
+	/* If firmware is too old (not supported by driver) force an update. */
+	if (ret == -EFAULT)
+		state = DEV_STATE_UNINIT;
 	if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
 		struct fw_info *fw_info;
 		struct fw_hdr *card_fw;
@@ -4551,6 +4566,32 @@
 		   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
 #define SEGMENT_SIZE 128
 
+static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
+{
+	int ver, chip;
+	u16 device_id;
+
+	/* Retrieve adapter's device ID */
+	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+	ver = device_id >> 12;
+	switch (ver) {
+	case CHELSIO_T4:
+		chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+		break;
+	case CHELSIO_T5:
+		chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+		break;
+	case CHELSIO_T6:
+		chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+		break;
+	default:
+		dev_err(&pdev->dev, "Device %d is not supported\n",
+			device_id);
+		return -EINVAL;
+	}
+	return chip;
+}
+
 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
 	int func, i, err, s_qpp, qpp, num_seg;
@@ -4558,6 +4599,8 @@
 	bool highdma = false;
 	struct adapter *adapter = NULL;
 	void __iomem *regs;
+	u32 whoami, pl_rev;
+	enum chip_type chip;
 
 	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
 
@@ -4586,7 +4629,11 @@
 		goto out_unmap_bar0;
 
 	/* We control everything through one PF */
-	func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
+	whoami = readl(regs + PL_WHOAMI_A);
+	pl_rev = REV_G(readl(regs + PL_REV_A));
+	chip = get_chip_type(pdev, pl_rev);
+	func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
+		SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 	if (func != ent->driver_data) {
 		iounmap(regs);
 		pci_disable_device(pdev);
@@ -4757,7 +4804,7 @@
 	 */
 	cfg_queues(adapter);
 
-	adapter->l2t = t4_init_l2t();
+	adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
 	if (!adapter->l2t) {
 		/* We tolerate a lack of L2T, giving up some functionality */
 		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
@@ -4782,6 +4829,22 @@
 		adapter->params.offload = 0;
 	}
 
+	if (is_offload(adapter)) {
+		if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
+			u32 hash_base, hash_reg;
+
+			if (chip <= CHELSIO_T5) {
+				hash_reg = LE_DB_TID_HASHBASE_A;
+				hash_base = t4_read_reg(adapter, hash_reg);
+				adapter->tids.hash_base = hash_base / 4;
+			} else {
+				hash_reg = T6_LE_DB_HASH_TID_BASE_A;
+				hash_base = t4_read_reg(adapter, hash_reg);
+				adapter->tids.hash_base = hash_base;
+			}
+		}
+	}
+
 	/* See what interrupts we'll be using */
 	if (msi > 1 && enable_msix(adapter) == 0)
 		adapter->flags |= USING_MSIX;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index b27897d..c3a8be5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -96,6 +96,7 @@
 	unsigned long *stid_bmap;
 	unsigned int nstids;
 	unsigned int stid_base;
+	unsigned int hash_base;
 
 	union aopen_entry *atid_tab;
 	unsigned int natids;
@@ -116,8 +117,12 @@
 
 	spinlock_t stid_lock;
 	unsigned int stids_in_use;
+	unsigned int sftids_in_use;
 
+	/* TIDs in the TCAM */
 	atomic_t tids_in_use;
+	/* TIDs in the HASH */
+	atomic_t hash_tids_in_use;
 };
 
 static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
@@ -147,7 +152,10 @@
 				    unsigned int tid)
 {
 	t->tid_tab[tid] = data;
-	atomic_inc(&t->tids_in_use);
+	if (t->hash_base && (tid >= t->hash_base))
+		atomic_inc(&t->hash_tids_in_use);
+	else
+		atomic_inc(&t->tids_in_use);
 }
 
 int cxgb4_alloc_atid(struct tid_info *t, void *data);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 252efc2..ac27898 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -51,24 +51,17 @@
 #define VLAN_NONE 0xfff
 
 /* identifies sync vs async L2T_WRITE_REQs */
-#define F_SYNC_WR    (1 << 12)
-
-enum {
-	L2T_STATE_VALID,      /* entry is up to date */
-	L2T_STATE_STALE,      /* entry may be used but needs revalidation */
-	L2T_STATE_RESOLVING,  /* entry needs address resolution */
-	L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
-
-	/* when state is one of the below the entry is not hashed */
-	L2T_STATE_SWITCHING,  /* entry is being used by a switching filter */
-	L2T_STATE_UNUSED      /* entry not in use */
-};
+#define SYNC_WR_S    12
+#define SYNC_WR_V(x) ((x) << SYNC_WR_S)
+#define SYNC_WR_F    SYNC_WR_V(1)
 
 struct l2t_data {
+	unsigned int l2t_start;     /* start index of our piece of the L2T */
+	unsigned int l2t_size;      /* number of entries in l2tab */
 	rwlock_t lock;
 	atomic_t nfree;             /* number of free entries */
 	struct l2t_entry *rover;    /* starting point for next allocation */
-	struct l2t_entry l2tab[L2T_SIZE];
+	struct l2t_entry l2tab[0];  /* MUST BE LAST */
 };
 
 static inline unsigned int vlan_prio(const struct l2t_entry *e)
@@ -85,29 +78,36 @@
 /*
  * To avoid having to check address families we do not allow v4 and v6
  * neighbors to be on the same hash chain.  We keep v4 entries in the first
- * half of available hash buckets and v6 in the second.
+ * half of available hash buckets and v6 in the second.  We need at least two
+ * entries in our L2T for this scheme to work.
  */
 enum {
-	L2T_SZ_HALF = L2T_SIZE / 2,
-	L2T_HASH_MASK = L2T_SZ_HALF - 1
+	L2T_MIN_HASH_BUCKETS = 2,
 };
 
-static inline unsigned int arp_hash(const u32 *key, int ifindex)
+static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key,
+				    int ifindex)
 {
-	return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
+	unsigned int l2t_size_half = d->l2t_size / 2;
+
+	return jhash_2words(*key, ifindex, 0) % l2t_size_half;
 }
 
-static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
+static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key,
+				     int ifindex)
 {
+	unsigned int l2t_size_half = d->l2t_size / 2;
 	u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
 
-	return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
+	return (l2t_size_half +
+		(jhash_2words(xor, ifindex, 0) % l2t_size_half));
 }
 
-static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
+static unsigned int addr_hash(struct l2t_data *d, const u32 *addr,
+			      int addr_len, int ifindex)
 {
-	return addr_len == 4 ? arp_hash(addr, ifindex) :
-			       ipv6_hash(addr, ifindex);
+	return addr_len == 4 ? arp_hash(d, addr, ifindex) :
+			       ipv6_hash(d, addr, ifindex);
 }
 
 /*
@@ -139,6 +139,8 @@
  */
 static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
 {
+	struct l2t_data *d = adap->l2t;
+	unsigned int l2t_idx = e->idx + d->l2t_start;
 	struct sk_buff *skb;
 	struct cpl_l2t_write_req *req;
 
@@ -150,10 +152,10 @@
 	INIT_TP_WR(req, 0);
 
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
-					e->idx | (sync ? F_SYNC_WR : 0) |
+					l2t_idx | (sync ? SYNC_WR_F : 0) |
 					TID_QID_V(adap->sge.fw_evtq.abs_id)));
 	req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
-	req->l2t_idx = htons(e->idx);
+	req->l2t_idx = htons(l2t_idx);
 	req->vlan = htons(e->vlan);
 	if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
 		memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
@@ -190,18 +192,19 @@
  */
 void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
 {
+	struct l2t_data *d = adap->l2t;
 	unsigned int tid = GET_TID(rpl);
-	unsigned int idx = tid & (L2T_SIZE - 1);
+	unsigned int l2t_idx = tid % L2T_SIZE;
 
 	if (unlikely(rpl->status != CPL_ERR_NONE)) {
 		dev_err(adap->pdev_dev,
 			"Unexpected L2T_WRITE_RPL status %u for entry %u\n",
-			rpl->status, idx);
+			rpl->status, l2t_idx);
 		return;
 	}
 
-	if (tid & F_SYNC_WR) {
-		struct l2t_entry *e = &adap->l2t->l2tab[idx];
+	if (tid & SYNC_WR_F) {
+		struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
 
 		spin_lock(&e->lock);
 		if (e->state != L2T_STATE_SWITCHING) {
@@ -276,7 +279,7 @@
 		return NULL;
 
 	/* there's definitely a free entry */
-	for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
+	for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
 		if (atomic_read(&e->refcnt) == 0)
 			goto found;
 
@@ -368,7 +371,7 @@
 	int addr_len = neigh->tbl->key_len;
 	u32 *addr = (u32 *)neigh->primary_key;
 	int ifidx = neigh->dev->ifindex;
-	int hash = addr_hash(addr, addr_len, ifidx);
+	int hash = addr_hash(d, addr, addr_len, ifidx);
 
 	if (neigh->dev->flags & IFF_LOOPBACK)
 		lport = netdev2pinfo(physdev)->tx_chan + 4;
@@ -481,7 +484,7 @@
 	int addr_len = neigh->tbl->key_len;
 	u32 *addr = (u32 *) neigh->primary_key;
 	int ifidx = neigh->dev->ifindex;
-	int hash = addr_hash(addr, addr_len, ifidx);
+	int hash = addr_hash(d, addr, addr_len, ifidx);
 
 	read_lock_bh(&d->lock);
 	for (e = d->l2tab[hash].first; e; e = e->next)
@@ -554,20 +557,30 @@
 	return write_l2e(adap, e, 0);
 }
 
-struct l2t_data *t4_init_l2t(void)
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
 {
+	unsigned int l2t_size;
 	int i;
 	struct l2t_data *d;
 
-	d = t4_alloc_mem(sizeof(*d));
+	if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
+		return NULL;
+	l2t_size = l2t_end - l2t_start + 1;
+	if (l2t_size < L2T_MIN_HASH_BUCKETS)
+		return NULL;
+
+	d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
 	if (!d)
 		return NULL;
 
+	d->l2t_start = l2t_start;
+	d->l2t_size = l2t_size;
+
 	d->rover = d->l2tab;
-	atomic_set(&d->nfree, L2T_SIZE);
+	atomic_set(&d->nfree, l2t_size);
 	rwlock_init(&d->lock);
 
-	for (i = 0; i < L2T_SIZE; ++i) {
+	for (i = 0; i < d->l2t_size; ++i) {
 		d->l2tab[i].idx = i;
 		d->l2tab[i].state = L2T_STATE_UNUSED;
 		spin_lock_init(&d->l2tab[i].lock);
@@ -578,9 +591,9 @@
 
 static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
 {
-	struct l2t_entry *l2tab = seq->private;
+	struct l2t_data *d = seq->private;
 
-	return pos >= L2T_SIZE ? NULL : &l2tab[pos];
+	return pos >= d->l2t_size ? NULL : &d->l2tab[pos];
 }
 
 static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
@@ -620,6 +633,7 @@
 			 "Ethernet address  VLAN/P LP State Users Port\n");
 	else {
 		char ip[60];
+		struct l2t_data *d = seq->private;
 		struct l2t_entry *e = v;
 
 		spin_lock_bh(&e->lock);
@@ -628,7 +642,7 @@
 		else
 			sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
 		seq_printf(seq, "%4u %-25s %17pM %4d %u %2u   %c   %5u %s\n",
-			   e->idx, ip, e->dmac,
+			   e->idx + d->l2t_start, ip, e->dmac,
 			   e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
 			   l2e_state(e), atomic_read(&e->refcnt),
 			   e->neigh ? e->neigh->dev->name : "");
@@ -652,7 +666,7 @@
 		struct adapter *adap = inode->i_private;
 		struct seq_file *seq = file->private_data;
 
-		seq->private = adap->l2t->l2tab;
+		seq->private = adap->l2t;
 	}
 	return rc;
 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index a30126c..b38dc52 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -39,6 +39,20 @@
 #include <linux/if_ether.h>
 #include <linux/atomic.h>
 
+enum { L2T_SIZE = 4096 };     /* # of L2T entries */
+
+enum {
+	L2T_STATE_VALID,      /* entry is up to date */
+	L2T_STATE_STALE,      /* entry may be used but needs revalidation */
+	L2T_STATE_RESOLVING,  /* entry needs address resolution */
+	L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+	L2T_STATE_NOARP,      /* Netdev down or removed*/
+
+	/* when state is one of the below the entry is not hashed */
+	L2T_STATE_SWITCHING,  /* entry is being used by a switching filter */
+	L2T_STATE_UNUSED      /* entry not in use */
+};
+
 struct adapter;
 struct l2t_data;
 struct neighbour;
@@ -56,7 +70,7 @@
  */
 struct l2t_entry {
 	u16 state;                  /* entry state */
-	u16 idx;                    /* entry index */
+	u16 idx;                    /* entry index within in-memory table */
 	u32 addr[4];                /* next hop IP or IPv6 address */
 	int ifindex;                /* neighbor's net_device's ifindex */
 	struct neighbour *neigh;    /* associated neighbour */
@@ -104,7 +118,7 @@
 struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
 int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
 			 u8 port, u8 *eth_addr);
-struct l2t_data *t4_init_l2t(void);
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
 void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
 
 extern const struct file_operations t4_l2t_fops;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 942db07..78f446c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1137,7 +1137,7 @@
  */
 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-	u32 wr_mid;
+	u32 wr_mid, ctrl0;
 	u64 cntrl, *end;
 	int qidx, credits;
 	unsigned int flits, ndesc;
@@ -1274,9 +1274,15 @@
 #endif /* CONFIG_CHELSIO_T4_FCOE */
 	}
 
-	cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
-			   TXPKT_INTF_V(pi->tx_chan) |
-			   TXPKT_PF_V(adap->pf));
+	ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+		TXPKT_PF_V(adap->pf);
+#ifdef CONFIG_CHELSIO_T4_DCB
+	if (is_t4(adap->params.chip))
+		ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
+	else
+		ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
+#endif
+	cpl->ctrl0 = htonl(ctrl0);
 	cpl->pack = htons(0);
 	cpl->len = htons(skb->len);
 	cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1418,18 +1424,17 @@
 		struct fw_wr_hdr *wr;
 		unsigned int ndesc = skb->priority;     /* previously saved */
 
-		/*
-		 * Write descriptors and free skbs outside the lock to limit
+		written += ndesc;
+		/* Write descriptors and free skbs outside the lock to limit
 		 * wait times.  q->full is still set so new skbs will be queued.
 		 */
+		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+		txq_advance(&q->q, ndesc);
 		spin_unlock(&q->sendq.lock);
 
-		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
 		inline_tx_skb(skb, &q->q, wr);
 		kfree_skb(skb);
 
-		written += ndesc;
-		txq_advance(&q->q, ndesc);
 		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
 			unsigned long old = q->q.stops;
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2b52aae..4480625 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -37,6 +37,7 @@
 #include "t4_regs.h"
 #include "t4_values.h"
 #include "t4fw_api.h"
+#include "t4fw_version.h"
 
 /**
  *	t4_wait_op_done_val - wait until an operation is completed
@@ -345,6 +346,43 @@
 				       FW_CMD_MAX_TIMEOUT);
 }
 
+static int t4_edc_err_read(struct adapter *adap, int idx)
+{
+	u32 edc_ecc_err_addr_reg;
+	u32 rdata_reg;
+
+	if (is_t4(adap->params.chip)) {
+		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
+		return 0;
+	}
+	if (idx != 0 && idx != 1) {
+		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
+		return 0;
+	}
+
+	edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
+	rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
+
+	CH_WARN(adap,
+		"edc%d err addr 0x%x: 0x%x.\n",
+		idx, edc_ecc_err_addr_reg,
+		t4_read_reg(adap, edc_ecc_err_addr_reg));
+	CH_WARN(adap,
+		"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
+		rdata_reg,
+		(unsigned long long)t4_read_reg64(adap, rdata_reg),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
+		(unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
+
+	return 0;
+}
+
 /**
  *	t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
  *	@adap: the adapter
@@ -1322,9 +1360,10 @@
 	};
 
 	static const unsigned int t6_reg_ranges[] = {
-		0x1008, 0x114c,
+		0x1008, 0x1124,
+		0x1138, 0x114c,
 		0x1180, 0x11b4,
-		0x11fc, 0x1250,
+		0x11fc, 0x1254,
 		0x1280, 0x133c,
 		0x1800, 0x18fc,
 		0x3000, 0x302c,
@@ -1345,18 +1384,18 @@
 		0x5a80, 0x5a9c,
 		0x5b94, 0x5bfc,
 		0x5c10, 0x5ec0,
-		0x5ec8, 0x5ec8,
+		0x5ec8, 0x5ecc,
 		0x6000, 0x6040,
-		0x6058, 0x6154,
+		0x6058, 0x619c,
 		0x7700, 0x7798,
 		0x77c0, 0x7880,
 		0x78cc, 0x78fc,
 		0x7b00, 0x7c54,
 		0x7d00, 0x7efc,
-		0x8dc0, 0x8de0,
+		0x8dc0, 0x8de4,
 		0x8df8, 0x8e84,
 		0x8ea0, 0x8f88,
-		0x8fb8, 0x911c,
+		0x8fb8, 0x9124,
 		0x9400, 0x9470,
 		0x9600, 0x971c,
 		0x9800, 0x9808,
@@ -1371,20 +1410,21 @@
 		0x9f00, 0x9f6c,
 		0x9f80, 0xa020,
 		0xd004, 0xd03c,
+		0xd100, 0xd118,
+		0xd200, 0xd31c,
 		0xdfc0, 0xdfe0,
 		0xe000, 0xf008,
 		0x11000, 0x11014,
-		0x11048, 0x11110,
-		0x11118, 0x1117c,
-		0x11190, 0x11260,
+		0x11048, 0x1117c,
+		0x11190, 0x11270,
 		0x11300, 0x1130c,
-		0x12000, 0x1205c,
+		0x12000, 0x1206c,
 		0x19040, 0x1906c,
 		0x19078, 0x19080,
 		0x1908c, 0x19124,
 		0x19150, 0x191b0,
 		0x191d0, 0x191e8,
-		0x19238, 0x192b8,
+		0x19238, 0x192bc,
 		0x193f8, 0x19474,
 		0x19490, 0x194cc,
 		0x194f0, 0x194f8,
@@ -1461,12 +1501,11 @@
 		0x1ff00, 0x1ff84,
 		0x1ffc0, 0x1ffc8,
 		0x30000, 0x30070,
-		0x30100, 0x3015c,
-		0x30190, 0x301d0,
-		0x30200, 0x30318,
+		0x30100, 0x301d0,
+		0x30200, 0x30320,
 		0x30400, 0x3052c,
 		0x30540, 0x3061c,
-		0x30800, 0x3088c,
+		0x30800, 0x30890,
 		0x308c0, 0x30908,
 		0x30910, 0x309b8,
 		0x30a00, 0x30a04,
@@ -1539,12 +1578,11 @@
 		0x33c24, 0x33c50,
 		0x33cf0, 0x33cfc,
 		0x34000, 0x34070,
-		0x34100, 0x3415c,
-		0x34190, 0x341d0,
-		0x34200, 0x34318,
+		0x34100, 0x341d0,
+		0x34200, 0x34320,
 		0x34400, 0x3452c,
 		0x34540, 0x3461c,
-		0x34800, 0x3488c,
+		0x34800, 0x34890,
 		0x348c0, 0x34908,
 		0x34910, 0x349b8,
 		0x34a00, 0x34a04,
@@ -2129,6 +2167,61 @@
 	return 0;
 }
 
+/**
+ *	t4_check_fw_version - check if the FW is supported with this driver
+ *	@adap: the adapter
+ *
+ *	Checks if an adapter's FW is compatible with the driver.  Returns 0
+ *	if there's exact match, a negative error if the version could not be
+ *	read or there's a major version mismatch
+ */
+int t4_check_fw_version(struct adapter *adap)
+{
+	int ret, major, minor, micro;
+	int exp_major, exp_minor, exp_micro;
+	unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+	ret = t4_get_fw_version(adap, &adap->params.fw_vers);
+	if (ret)
+		return ret;
+
+	major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
+	minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
+	micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
+
+	switch (chip_version) {
+	case CHELSIO_T4:
+		exp_major = T4FW_MIN_VERSION_MAJOR;
+		exp_minor = T4FW_MIN_VERSION_MINOR;
+		exp_micro = T4FW_MIN_VERSION_MICRO;
+		break;
+	case CHELSIO_T5:
+		exp_major = T5FW_MIN_VERSION_MAJOR;
+		exp_minor = T5FW_MIN_VERSION_MINOR;
+		exp_micro = T5FW_MIN_VERSION_MICRO;
+		break;
+	case CHELSIO_T6:
+		exp_major = T6FW_MIN_VERSION_MAJOR;
+		exp_minor = T6FW_MIN_VERSION_MINOR;
+		exp_micro = T6FW_MIN_VERSION_MICRO;
+		break;
+	default:
+		dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
+			adap->chip);
+		return -EINVAL;
+	}
+
+	if (major < exp_major || (major == exp_major && minor < exp_minor) ||
+	    (major == exp_major && minor == exp_minor && micro < exp_micro)) {
+		dev_err(adap->pdev_dev,
+			"Card has firmware version %u.%u.%u, minimum "
+			"supported firmware is %u.%u.%u.\n", major, minor,
+			micro, exp_major, exp_minor, exp_micro);
+		return -EFAULT;
+	}
+	return 0;
+}
+
 /* Is the given firmware API compatible with the one the driver was compiled
  * with?
  */
@@ -3281,6 +3374,8 @@
 	if (v & ECC_CE_INT_CAUSE_F) {
 		u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
 
+		t4_edc_err_read(adapter, idx);
+
 		t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
 		if (printk_ratelimit())
 			dev_warn(adapter->pdev_dev,
@@ -3488,7 +3583,9 @@
 void t4_intr_enable(struct adapter *adapter)
 {
 	u32 val = 0;
-	u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+	u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+	u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 
 	if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
 		val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
@@ -3513,7 +3610,9 @@
  */
 void t4_intr_disable(struct adapter *adapter)
 {
-	u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+	u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+	u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 
 	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
 	t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
@@ -3687,6 +3786,11 @@
 	return 0;
 }
 
+static unsigned int t4_use_ldst(struct adapter *adap)
+{
+	return (adap->flags & FW_OK) || !adap->use_bd;
+}
+
 /**
  *	t4_fw_tp_pio_rw - Access TP PIO through LDST
  *	@adap: the adapter
@@ -3730,7 +3834,7 @@
  */
 void t4_read_rss_key(struct adapter *adap, u32 *key)
 {
-	if (adap->flags & FW_OK)
+	if (t4_use_ldst(adap))
 		t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
 	else
 		t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3760,7 +3864,7 @@
 	    (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
 		rss_key_addr_cnt = 32;
 
-	if (adap->flags & FW_OK)
+	if (t4_use_ldst(adap))
 		t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
 	else
 		t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3789,7 +3893,7 @@
 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
 			   u32 *valp)
 {
-	if (adapter->flags & FW_OK)
+	if (t4_use_ldst(adapter))
 		t4_fw_tp_pio_rw(adapter, valp, 1,
 				TP_RSS_PF0_CONFIG_A + index, 1);
 	else
@@ -3829,7 +3933,7 @@
 
 	/* Grab the VFL/VFH values ...
 	 */
-	if (adapter->flags & FW_OK) {
+	if (t4_use_ldst(adapter)) {
 		t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
 		t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
 	} else {
@@ -3850,7 +3954,7 @@
 {
 	u32 pfmap;
 
-	if (adapter->flags & FW_OK)
+	if (t4_use_ldst(adapter))
 		t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
 	else
 		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3868,7 +3972,7 @@
 {
 	u32 pfmask;
 
-	if (adapter->flags & FW_OK)
+	if (t4_use_ldst(adapter))
 		t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
 	else
 		t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3924,43 +4028,25 @@
  */
 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
 {
-	/* T6 and later has 2 channels */
-	if (adap->params.arch.nchan == NCHAN) {
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tnl_cong_drops, 8,
-				 TP_MIB_TNL_CNG_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tnl_tx_drops, 4,
-				 TP_MIB_TNL_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->ofld_vlan_drops, 4,
-				 TP_MIB_OFD_VLN_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tcp6_in_errs, 4,
-				 TP_MIB_TCP_V6IN_ERR_0_A);
-	} else {
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tnl_cong_drops, 2,
-				 TP_MIB_TNL_CNG_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->ofld_chan_drops, 2,
-				 TP_MIB_OFD_CHN_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->ofld_vlan_drops, 2,
-				 TP_MIB_OFD_VLN_DROP_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-				 st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
-	}
+	int nchan = adap->params.arch.nchan;
+
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+			 st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A);
+
 	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
 			 &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
 }
@@ -3974,16 +4060,13 @@
  */
 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
 {
-	/* T6 and later has 2 channels */
-	if (adap->params.arch.nchan == NCHAN) {
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
-				 8, TP_MIB_CPL_IN_REQ_0_A);
-	} else {
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
-				 2, TP_MIB_CPL_IN_REQ_0_A);
-		t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
-				 2, TP_MIB_CPL_OUT_RSP_0_A);
-	}
+	int nchan = adap->params.arch.nchan;
+
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+			 nchan, TP_MIB_CPL_IN_REQ_0_A);
+	t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+			 nchan, TP_MIB_CPL_OUT_RSP_0_A);
+
 }
 
 /**
@@ -4238,6 +4321,119 @@
 }
 
 /**
+ *	t4_set_trace_filter - configure one of the tracing filters
+ *	@adap: the adapter
+ *	@tp: the desired trace filter parameters
+ *	@idx: which filter to configure
+ *	@enable: whether to enable or disable the filter
+ *
+ *	Configures one of the tracing filters available in HW.  If @enable is
+ *	%0 @tp is not examined and may be %NULL. The user is responsible to
+ *	set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
+ */
+int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
+			int idx, int enable)
+{
+	int i, ofst = idx * 4;
+	u32 data_reg, mask_reg, cfg;
+	u32 multitrc = TRCMULTIFILTER_F;
+
+	if (!enable) {
+		t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
+		return 0;
+	}
+
+	cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
+	if (cfg & TRCMULTIFILTER_F) {
+		/* If multiple tracers are enabled, then maximum
+		 * capture size is 2.5KB (FIFO size of a single channel)
+		 * minus 2 flits for CPL_TRACE_PKT header.
+		 */
+		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
+			return -EINVAL;
+	} else {
+		/* If multiple tracers are disabled, to avoid deadlocks
+		 * maximum packet capture size of 9600 bytes is recommended.
+		 * Also in this mode, only trace0 can be enabled and running.
+		 */
+		multitrc = 0;
+		if (tp->snap_len > 9600 || idx)
+			return -EINVAL;
+	}
+
+	if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
+	    tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
+	    tp->min_len > TFMINPKTSIZE_M)
+		return -EINVAL;
+
+	/* stop the tracer we'll be changing */
+	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
+
+	idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
+	data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
+	mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
+
+	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+		t4_write_reg(adap, data_reg, tp->data[i]);
+		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
+	}
+	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
+		     TFCAPTUREMAX_V(tp->snap_len) |
+		     TFMINPKTSIZE_V(tp->min_len));
+	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
+		     TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
+		     (is_t4(adap->params.chip) ?
+		     TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
+		     T5_TFPORT_V(tp->port) | T5_TFEN_F |
+		     T5_TFINVERTMATCH_V(tp->invert)));
+
+	return 0;
+}
+
+/**
+ *	t4_get_trace_filter - query one of the tracing filters
+ *	@adap: the adapter
+ *	@tp: the current trace filter parameters
+ *	@idx: which trace filter to query
+ *	@enabled: non-zero if the filter is enabled
+ *
+ *	Returns the current settings of one of the HW tracing filters.
+ */
+void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
+			 int *enabled)
+{
+	u32 ctla, ctlb;
+	int i, ofst = idx * 4;
+	u32 data_reg, mask_reg;
+
+	ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
+	ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
+
+	if (is_t4(adap->params.chip)) {
+		*enabled = !!(ctla & TFEN_F);
+		tp->port =  TFPORT_G(ctla);
+		tp->invert = !!(ctla & TFINVERTMATCH_F);
+	} else {
+		*enabled = !!(ctla & T5_TFEN_F);
+		tp->port = T5_TFPORT_G(ctla);
+		tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
+	}
+	tp->snap_len = TFCAPTUREMAX_G(ctlb);
+	tp->min_len = TFMINPKTSIZE_G(ctlb);
+	tp->skip_ofst = TFOFFSET_G(ctla);
+	tp->skip_len = TFLENGTH_G(ctla);
+
+	ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
+	data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
+	mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
+
+	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
+		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
+	}
+}
+
+/**
  *	t4_pmtx_get_stats - returns the HW stats from PMTX
  *	@adap: the adapter
  *	@cnt: where to store the count statistics
@@ -6294,7 +6490,7 @@
 	/* Cache the adapter's Compressed Filter Mode and global Incress
 	 * Configuration.
 	 */
-	if (adap->flags & FW_OK) {
+	if (t4_use_ldst(adap)) {
 		t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
 				TP_VLAN_PRI_MAP_A, 1);
 		t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index c8488f4..640369d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -47,7 +47,6 @@
 	TCB_SIZE       = 128,   /* TCB size */
 	NMTUS          = 16,    /* size of MTU table */
 	NCCTRL_WIN     = 32,    /* # of congestion control windows */
-	L2T_SIZE       = 4096,  /* # of L2T entries */
 	PM_NSTATS      = 5,     /* # of PM stats */
 	MBOX_LEN       = 64,    /* mailbox size in bytes */
 	TRACE_LEN      = 112,   /* length of trace data and mask */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 132cb8f..b99144a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -660,6 +660,9 @@
 #define TXPKT_OVLAN_IDX_S    12
 #define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
 
+#define TXPKT_T5_OVLAN_IDX_S	12
+#define TXPKT_T5_OVLAN_IDX_V(x)	((x) << TXPKT_T5_OVLAN_IDX_S)
+
 #define TXPKT_INTF_S    16
 #define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
 
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index d7ca106..8353a6c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -142,6 +142,8 @@
 	CH_PCI_ID_TABLE_FENTRY(0x5013),	/* T580-chr */
 	CH_PCI_ID_TABLE_FENTRY(0x5014),	/* T580-so */
 	CH_PCI_ID_TABLE_FENTRY(0x5015),	/* T502-bt */
+	CH_PCI_ID_TABLE_FENTRY(0x5016),	/* T580-OCP-SO */
+	CH_PCI_ID_TABLE_FENTRY(0x5017),	/* T520-OCP-SO */
 	CH_PCI_ID_TABLE_FENTRY(0x5080),	/* Custom T540-cr */
 	CH_PCI_ID_TABLE_FENTRY(0x5081),	/* Custom T540-LL-cr */
 	CH_PCI_ID_TABLE_FENTRY(0x5082),	/* Custom T504-cr */
@@ -155,6 +157,22 @@
 	CH_PCI_ID_TABLE_FENTRY(0x5090),	/* Custom T540-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x5091),	/* Custom T522-CR */
 	CH_PCI_ID_TABLE_FENTRY(0x5092),	/* Custom T520-CR */
+
+	/* T6 adapters:
+	 */
+	CH_PCI_ID_TABLE_FENTRY(0x6001),
+	CH_PCI_ID_TABLE_FENTRY(0x6002),
+	CH_PCI_ID_TABLE_FENTRY(0x6003),
+	CH_PCI_ID_TABLE_FENTRY(0x6004),
+	CH_PCI_ID_TABLE_FENTRY(0x6005),
+	CH_PCI_ID_TABLE_FENTRY(0x6006),
+	CH_PCI_ID_TABLE_FENTRY(0x6007),
+	CH_PCI_ID_TABLE_FENTRY(0x6009),
+	CH_PCI_ID_TABLE_FENTRY(0x600d),
+	CH_PCI_ID_TABLE_FENTRY(0x6010),
+	CH_PCI_ID_TABLE_FENTRY(0x6011),
+	CH_PCI_ID_TABLE_FENTRY(0x6014),
+	CH_PCI_ID_TABLE_FENTRY(0x6015),
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 375a825..fc3044c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -136,6 +136,20 @@
 #define  INGPACKBOUNDARY_G(x)	(((x) >> INGPACKBOUNDARY_S) \
 				 & INGPACKBOUNDARY_M)
 
+#define VFIFO_ENABLE_S    10
+#define VFIFO_ENABLE_V(x) ((x) << VFIFO_ENABLE_S)
+#define VFIFO_ENABLE_F    VFIFO_ENABLE_V(1U)
+
+#define SGE_DBVFIFO_BADDR_A 0x1138
+
+#define DBVFIFO_SIZE_S    6
+#define DBVFIFO_SIZE_M    0xfffU
+#define DBVFIFO_SIZE_G(x) (((x) >> DBVFIFO_SIZE_S) & DBVFIFO_SIZE_M)
+
+#define T6_DBVFIFO_SIZE_S    0
+#define T6_DBVFIFO_SIZE_M    0x1fffU
+#define T6_DBVFIFO_SIZE_G(x) (((x) >> T6_DBVFIFO_SIZE_S) & T6_DBVFIFO_SIZE_M)
+
 #define GLOBALENABLE_S    0
 #define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S)
 #define GLOBALENABLE_F    GLOBALENABLE_V(1U)
@@ -303,6 +317,8 @@
 #define SGE_FL_BUFFER_SIZE7_A 0x1060
 #define SGE_FL_BUFFER_SIZE8_A 0x1064
 
+#define SGE_IMSG_CTXT_BADDR_A 0x1088
+#define SGE_FLM_CACHE_BADDR_A 0x108c
 #define SGE_INGRESS_RX_THRESHOLD_A 0x10a0
 
 #define THRESHOLD_0_S    24
@@ -338,6 +354,11 @@
 #define EGRTHRESHOLDPACKING_G(x) \
 	(((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
 
+#define T6_EGRTHRESHOLDPACKING_S    16
+#define T6_EGRTHRESHOLDPACKING_M    0xffU
+#define T6_EGRTHRESHOLDPACKING_G(x) \
+	(((x) >> T6_EGRTHRESHOLDPACKING_S) & T6_EGRTHRESHOLDPACKING_M)
+
 #define SGE_TIMESTAMP_LO_A 0x1098
 #define SGE_TIMESTAMP_HI_A 0x109c
 
@@ -352,6 +373,7 @@
 #define TSVAL_G(x) (((x) >> TSVAL_S) & TSVAL_M)
 
 #define SGE_DBFIFO_STATUS_A 0x10a4
+#define SGE_DBVFIFO_SIZE_A 0x113c
 
 #define HP_INT_THRESH_S    28
 #define HP_INT_THRESH_M    0xfU
@@ -864,6 +886,10 @@
 /* registers for module MA */
 #define MA_EDRAM0_BAR_A 0x77c0
 
+#define EDRAM0_BASE_S    16
+#define EDRAM0_BASE_M    0xfffU
+#define EDRAM0_BASE_G(x) (((x) >> EDRAM0_BASE_S) & EDRAM0_BASE_M)
+
 #define EDRAM0_SIZE_S    0
 #define EDRAM0_SIZE_M    0xfffU
 #define EDRAM0_SIZE_V(x) ((x) << EDRAM0_SIZE_S)
@@ -871,6 +897,10 @@
 
 #define MA_EDRAM1_BAR_A 0x77c4
 
+#define EDRAM1_BASE_S    16
+#define EDRAM1_BASE_M    0xfffU
+#define EDRAM1_BASE_G(x) (((x) >> EDRAM1_BASE_S) & EDRAM1_BASE_M)
+
 #define EDRAM1_SIZE_S    0
 #define EDRAM1_SIZE_M    0xfffU
 #define EDRAM1_SIZE_V(x) ((x) << EDRAM1_SIZE_S)
@@ -878,6 +908,11 @@
 
 #define MA_EXT_MEMORY_BAR_A 0x77c8
 
+#define EXT_MEM_BASE_S    16
+#define EXT_MEM_BASE_M    0xfffU
+#define EXT_MEM_BASE_V(x) ((x) << EXT_MEM_BASE_S)
+#define EXT_MEM_BASE_G(x) (((x) >> EXT_MEM_BASE_S) & EXT_MEM_BASE_M)
+
 #define EXT_MEM_SIZE_S    0
 #define EXT_MEM_SIZE_M    0xfffU
 #define EXT_MEM_SIZE_V(x) ((x) << EXT_MEM_SIZE_S)
@@ -885,6 +920,10 @@
 
 #define MA_EXT_MEMORY1_BAR_A 0x7808
 
+#define EXT_MEM1_BASE_S    16
+#define EXT_MEM1_BASE_M    0xfffU
+#define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
+
 #define EXT_MEM1_SIZE_S    0
 #define EXT_MEM1_SIZE_M    0xfffU
 #define EXT_MEM1_SIZE_V(x) ((x) << EXT_MEM1_SIZE_S)
@@ -892,6 +931,10 @@
 
 #define MA_EXT_MEMORY0_BAR_A 0x77c8
 
+#define EXT_MEM0_BASE_S    16
+#define EXT_MEM0_BASE_M    0xfffU
+#define EXT_MEM0_BASE_G(x) (((x) >> EXT_MEM0_BASE_S) & EXT_MEM0_BASE_M)
+
 #define EXT_MEM0_SIZE_S    0
 #define EXT_MEM0_SIZE_M    0xfffU
 #define EXT_MEM0_SIZE_V(x) ((x) << EXT_MEM0_SIZE_S)
@@ -973,6 +1016,10 @@
 
 /* registers for module CIM */
 #define CIM_BOOT_CFG_A 0x7b00
+#define CIM_SDRAM_BASE_ADDR_A 0x7b14
+#define CIM_SDRAM_ADDR_SIZE_A 0x7b18
+#define CIM_EXTMEM2_BASE_ADDR_A 0x7b1c
+#define CIM_EXTMEM2_ADDR_SIZE_A 0x7b20
 #define CIM_PF_MAILBOX_CTRL_SHADOW_COPY_A 0x290
 
 #define  BOOTADDR_M	0xffffff00U
@@ -1231,6 +1278,33 @@
 #define TP_OUT_CONFIG_A		0x7d04
 #define TP_GLOBAL_CONFIG_A	0x7d08
 
+#define TP_CMM_TCB_BASE_A 0x7d10
+#define TP_CMM_MM_BASE_A 0x7d14
+#define TP_CMM_TIMER_BASE_A 0x7d18
+#define TP_PMM_TX_BASE_A 0x7d20
+#define TP_PMM_RX_BASE_A 0x7d28
+#define TP_PMM_RX_PAGE_SIZE_A 0x7d2c
+#define TP_PMM_RX_MAX_PAGE_A 0x7d30
+#define TP_PMM_TX_PAGE_SIZE_A 0x7d34
+#define TP_PMM_TX_MAX_PAGE_A 0x7d38
+#define TP_CMM_MM_MAX_PSTRUCT_A 0x7e6c
+
+#define PMRXNUMCHN_S    31
+#define PMRXNUMCHN_V(x) ((x) << PMRXNUMCHN_S)
+#define PMRXNUMCHN_F    PMRXNUMCHN_V(1U)
+
+#define PMTXNUMCHN_S    30
+#define PMTXNUMCHN_M    0x3U
+#define PMTXNUMCHN_G(x) (((x) >> PMTXNUMCHN_S) & PMTXNUMCHN_M)
+
+#define PMTXMAXPAGE_S    0
+#define PMTXMAXPAGE_M    0x1fffffU
+#define PMTXMAXPAGE_G(x) (((x) >> PMTXMAXPAGE_S) & PMTXMAXPAGE_M)
+
+#define PMRXMAXPAGE_S    0
+#define PMRXMAXPAGE_M    0x1fffffU
+#define PMRXMAXPAGE_G(x) (((x) >> PMRXMAXPAGE_S) & PMRXMAXPAGE_M)
+
 #define DBGLAMODE_S	14
 #define DBGLAMODE_M	0x3U
 #define DBGLAMODE_G(x)	(((x) >> DBGLAMODE_S) & DBGLAMODE_M)
@@ -1338,6 +1412,9 @@
 #define MTUVALUE_G(x) (((x) >> MTUVALUE_S) & MTUVALUE_M)
 
 #define TP_RSS_LKP_TABLE_A	0x7dec
+#define TP_CMM_MM_RX_FLST_BASE_A 0x7e60
+#define TP_CMM_MM_TX_FLST_BASE_A 0x7e64
+#define TP_CMM_MM_PS_FLST_BASE_A 0x7e68
 
 #define LKPTBLROWVLD_S    31
 #define LKPTBLROWVLD_V(x) ((x) << LKPTBLROWVLD_S)
@@ -1483,6 +1560,11 @@
 #define TP_MIB_RQE_DFR_PKT_A	0x64
 
 #define ULP_TX_INT_CAUSE_A	0x8dcc
+#define ULP_TX_TPT_LLIMIT_A	0x8dd4
+#define ULP_TX_TPT_ULIMIT_A	0x8dd8
+#define ULP_TX_PBL_LLIMIT_A	0x8ddc
+#define ULP_TX_PBL_ULIMIT_A	0x8de0
+#define ULP_TX_ERR_TABLE_BASE_A 0x8e04
 
 #define PBL_BOUND_ERR_CH3_S    31
 #define PBL_BOUND_ERR_CH3_V(x) ((x) << PBL_BOUND_ERR_CH3_S)
@@ -1804,6 +1886,9 @@
 #define TRCMULTIFILTER_F    TRCMULTIFILTER_V(1U)
 
 #define MPS_TRC_RSS_CONTROL_A		0x9808
+#define MPS_TRC_FILTER1_RSS_CONTROL_A	0x9ff4
+#define MPS_TRC_FILTER2_RSS_CONTROL_A	0x9ffc
+#define MPS_TRC_FILTER3_RSS_CONTROL_A	0xa004
 #define MPS_T5_TRC_RSS_CONTROL_A	0xa00c
 
 #define RSSCONTROL_S    16
@@ -1812,6 +1897,59 @@
 #define QUEUENUMBER_S    0
 #define QUEUENUMBER_V(x) ((x) << QUEUENUMBER_S)
 
+#define TFINVERTMATCH_S    24
+#define TFINVERTMATCH_V(x) ((x) << TFINVERTMATCH_S)
+#define TFINVERTMATCH_F    TFINVERTMATCH_V(1U)
+
+#define TFEN_S    22
+#define TFEN_V(x) ((x) << TFEN_S)
+#define TFEN_F    TFEN_V(1U)
+
+#define TFPORT_S    18
+#define TFPORT_M    0xfU
+#define TFPORT_V(x) ((x) << TFPORT_S)
+#define TFPORT_G(x) (((x) >> TFPORT_S) & TFPORT_M)
+
+#define TFLENGTH_S    8
+#define TFLENGTH_M    0x1fU
+#define TFLENGTH_V(x) ((x) << TFLENGTH_S)
+#define TFLENGTH_G(x) (((x) >> TFLENGTH_S) & TFLENGTH_M)
+
+#define TFOFFSET_S    0
+#define TFOFFSET_M    0x1fU
+#define TFOFFSET_V(x) ((x) << TFOFFSET_S)
+#define TFOFFSET_G(x) (((x) >> TFOFFSET_S) & TFOFFSET_M)
+
+#define T5_TFINVERTMATCH_S    25
+#define T5_TFINVERTMATCH_V(x) ((x) << T5_TFINVERTMATCH_S)
+#define T5_TFINVERTMATCH_F    T5_TFINVERTMATCH_V(1U)
+
+#define T5_TFEN_S    23
+#define T5_TFEN_V(x) ((x) << T5_TFEN_S)
+#define T5_TFEN_F    T5_TFEN_V(1U)
+
+#define T5_TFPORT_S    18
+#define T5_TFPORT_M    0x1fU
+#define T5_TFPORT_V(x) ((x) << T5_TFPORT_S)
+#define T5_TFPORT_G(x) (((x) >> T5_TFPORT_S) & T5_TFPORT_M)
+
+#define MPS_TRC_FILTER_MATCH_CTL_A_A 0x9810
+#define MPS_TRC_FILTER_MATCH_CTL_B_A 0x9820
+
+#define TFMINPKTSIZE_S    16
+#define TFMINPKTSIZE_M    0x1ffU
+#define TFMINPKTSIZE_V(x) ((x) << TFMINPKTSIZE_S)
+#define TFMINPKTSIZE_G(x) (((x) >> TFMINPKTSIZE_S) & TFMINPKTSIZE_M)
+
+#define TFCAPTUREMAX_S    0
+#define TFCAPTUREMAX_M    0x3fffU
+#define TFCAPTUREMAX_V(x) ((x) << TFCAPTUREMAX_S)
+#define TFCAPTUREMAX_G(x) (((x) >> TFCAPTUREMAX_S) & TFCAPTUREMAX_M)
+
+#define MPS_TRC_FILTER0_MATCH_A 0x9c00
+#define MPS_TRC_FILTER0_DONT_CARE_A 0x9c80
+#define MPS_TRC_FILTER1_MATCH_A 0x9d00
+
 #define TP_RSS_CONFIG_A 0x7df0
 
 #define TNL4TUPENIPV6_S    31
@@ -2247,12 +2385,32 @@
 #define MATCHSRAM_V(x) ((x) << MATCHSRAM_S)
 #define MATCHSRAM_F    MATCHSRAM_V(1U)
 
+#define MPS_RX_PG_RSV0_A 0x11010
+#define MPS_RX_PG_RSV4_A 0x11020
 #define MPS_RX_PERR_INT_CAUSE_A 0x11074
+#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
+#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
 
 #define MPS_CLS_TCAM_Y_L_A 0xf000
 #define MPS_CLS_TCAM_DATA0_A 0xf000
 #define MPS_CLS_TCAM_DATA1_A 0xf004
 
+#define USED_S    16
+#define USED_M    0x7ffU
+#define USED_G(x) (((x) >> USED_S) & USED_M)
+
+#define ALLOC_S    0
+#define ALLOC_M    0x7ffU
+#define ALLOC_G(x) (((x) >> ALLOC_S) & ALLOC_M)
+
+#define T5_USED_S    16
+#define T5_USED_M    0xfffU
+#define T5_USED_G(x) (((x) >> T5_USED_S) & T5_USED_M)
+
+#define T5_ALLOC_S    0
+#define T5_ALLOC_M    0xfffU
+#define T5_ALLOC_G(x) (((x) >> T5_ALLOC_S) & T5_ALLOC_M)
+
 #define DMACH_S    0
 #define DMACH_M    0xffffU
 #define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
@@ -2410,8 +2568,21 @@
 #define SLVFIFOPARINT_F    SLVFIFOPARINT_V(1U)
 
 #define ULP_RX_INT_CAUSE_A 0x19158
+#define ULP_RX_ISCSI_LLIMIT_A 0x1915c
+#define ULP_RX_ISCSI_ULIMIT_A 0x19160
 #define ULP_RX_ISCSI_TAGMASK_A 0x19164
 #define ULP_RX_ISCSI_PSZ_A 0x19168
+#define ULP_RX_TDDP_LLIMIT_A 0x1916c
+#define ULP_RX_TDDP_ULIMIT_A 0x19170
+#define ULP_RX_STAG_LLIMIT_A 0x1917c
+#define ULP_RX_STAG_ULIMIT_A 0x19180
+#define ULP_RX_RQ_LLIMIT_A 0x19184
+#define ULP_RX_RQ_ULIMIT_A 0x19188
+#define ULP_RX_PBL_LLIMIT_A 0x1918c
+#define ULP_RX_PBL_ULIMIT_A 0x19190
+#define ULP_RX_CTX_BASE_A 0x19194
+#define ULP_RX_RQUDP_LLIMIT_A 0x191a4
+#define ULP_RX_RQUDP_ULIMIT_A 0x191a8
 #define ULP_RX_LA_CTL_A 0x1923c
 #define ULP_RX_LA_RDPTR_A 0x19240
 #define ULP_RX_LA_RDDATA_A 0x19244
@@ -2473,6 +2644,10 @@
 #define SOURCEPF_M    0x7U
 #define SOURCEPF_G(x) (((x) >> SOURCEPF_S) & SOURCEPF_M)
 
+#define T6_SOURCEPF_S    9
+#define T6_SOURCEPF_M    0x7U
+#define T6_SOURCEPF_G(x) (((x) >> T6_SOURCEPF_S) & T6_SOURCEPF_M)
+
 #define PL_INT_CAUSE_A 0x1940c
 
 #define ULP_TX_S    27
@@ -2612,7 +2787,20 @@
 #define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
 #define T6_LIPMISS_F    T6_LIPMISS_V(1U)
 
+#define LE_DB_CONFIG_A 0x19c04
+#define LE_DB_SERVER_INDEX_A 0x19c18
+#define LE_DB_SRVR_START_INDEX_A 0x19c18
+#define LE_DB_ACT_CNT_IPV4_A 0x19c20
+#define LE_DB_ACT_CNT_IPV6_A 0x19c24
+#define LE_DB_HASH_TID_BASE_A 0x19c30
+#define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
 #define LE_DB_INT_CAUSE_A 0x19c3c
+#define LE_DB_TID_HASHBASE_A 0x19df8
+#define T6_LE_DB_HASH_TID_BASE_A 0x19df8
+
+#define HASHEN_S    20
+#define HASHEN_V(x) ((x) << HASHEN_S)
+#define HASHEN_F    HASHEN_V(1U)
 
 #define REQQPARERR_S    16
 #define REQQPARERR_V(x) ((x) << REQQPARERR_S)
@@ -2634,6 +2822,10 @@
 #define LIP0_V(x) ((x) << LIP0_S)
 #define LIP0_F    LIP0_V(1U)
 
+#define BASEADDR_S    3
+#define BASEADDR_M    0x1fffffffU
+#define BASEADDR_G(x) (((x) >> BASEADDR_S) & BASEADDR_M)
+
 #define TCAMINTPERR_S    13
 #define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
 #define TCAMINTPERR_F    TCAMINTPERR_V(1U)
@@ -2740,10 +2932,11 @@
 #define EDC_H_BIST_DATA_PATTERN_A	0x50010
 #define EDC_H_BIST_STATUS_RDATA_A	0x50028
 
+#define EDC_H_ECC_ERR_ADDR_A		0x50084
 #define EDC_T51_BASE_ADDR		0x50800
 
-#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
-#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx)
 
 #define PL_VF_REV_A 0x4
 #define PL_VF_WHOAMI_A 0x0
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 32b2135..92bafa7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -40,14 +40,25 @@
 #define T4FW_VERSION_MICRO 0x20
 #define T4FW_VERSION_BUILD 0x00
 
+#define T4FW_MIN_VERSION_MAJOR 0x01
+#define T4FW_MIN_VERSION_MINOR 0x04
+#define T4FW_MIN_VERSION_MICRO 0x00
+
 #define T5FW_VERSION_MAJOR 0x01
 #define T5FW_VERSION_MINOR 0x0D
 #define T5FW_VERSION_MICRO 0x20
 #define T5FW_VERSION_BUILD 0x00
 
+#define T5FW_MIN_VERSION_MAJOR 0x00
+#define T5FW_MIN_VERSION_MINOR 0x00
+#define T5FW_MIN_VERSION_MICRO 0x00
+
 #define T6FW_VERSION_MAJOR 0x01
 #define T6FW_VERSION_MINOR 0x0D
 #define T6FW_VERSION_MICRO 0x2D
 #define T6FW_VERSION_BUILD 0x00
 
+#define T6FW_MIN_VERSION_MAJOR 0x00
+#define T6FW_MIN_VERSION_MINOR 0x00
+#define T6FW_MIN_VERSION_MICRO 0x00
 #endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index ad53e5a..fa3786a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1898,7 +1898,10 @@
 		rspq->unhandled_irqs++;
 
 	val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
-	if (is_t4(rspq->adapter->params.chip)) {
+	/* If we don't have access to the new User GTS (T5+), use the old
+	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
+	 */
+	if (unlikely(!rspq->bar2_addr)) {
 		t4_write_reg(rspq->adapter,
 			     T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
 			     val | INGRESSQID_V((u32)rspq->cntxt_id));
@@ -1998,10 +2001,13 @@
 	}
 
 	val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
-	if (is_t4(adapter->params.chip))
+	/* If we don't have access to the new User GTS (T5+), use the old
+	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
+	 */
+	if (unlikely(!intrq->bar2_addr)) {
 		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
 			     val | INGRESSQID_V(intrq->cntxt_id));
-	else {
+	} else {
 		writel(val | INGRESSQID_V(intrq->bar2_qid),
 		       intrq->bar2_addr + SGE_UDB_GTS);
 		wmb();
@@ -2662,8 +2668,22 @@
 	 * give it more Free List entries.  (Note that the SGE's Egress
 	 * Congestion Threshold is in units of 2 Free List pointers.)
 	 */
-	s->fl_starve_thres
-		= EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
+	switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
+	case CHELSIO_T4:
+		s->fl_starve_thres =
+		   EGRTHRESHOLD_G(sge_params->sge_congestion_control);
+		break;
+	case CHELSIO_T5:
+		s->fl_starve_thres =
+		   EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+		break;
+	case CHELSIO_T6:
+	default:
+		s->fl_starve_thres =
+		   T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+		break;
+	}
+	s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
 
 	/*
 	 * Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 0db6dc9..63dd5fd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -619,7 +619,8 @@
 		 */
 		whoami = t4_read_reg(adapter,
 				     T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
-		pf = SOURCEPF_G(whoami);
+		pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+			SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 
 		s_hps = (HOSTPAGESIZEPF0_S +
 			 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 84b6a2b..8b53f7d 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
 
 #define DRV_NAME		"enic"
 #define DRV_DESCRIPTION		"Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION		"2.1.1.83"
+#define DRV_VERSION		"2.3.0.12"
 #define DRV_COPYRIGHT		"Copyright 2008-2013 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX		6
@@ -191,6 +191,25 @@
 	struct vnic_gen_stats gen_stats;
 };
 
+static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev)
+{
+	struct enic *enic = vdev->priv;
+
+	return enic->netdev;
+}
+
+/* wrappers function for kernel log
+ * Make sure variable vdev of struct vnic_dev is available in the block where
+ * these macros are used
+ */
+#define vdev_info(args...)	dev_info(&vdev->pdev->dev, args)
+#define vdev_warn(args...)	dev_warn(&vdev->pdev->dev, args)
+#define vdev_err(args...)	dev_err(&vdev->pdev->dev, args)
+
+#define vdev_netinfo(args...)	netdev_info(vnic_get_netdev(vdev), args)
+#define vdev_netwarn(args...)	netdev_warn(vnic_get_netdev(vdev), args)
+#define vdev_neterr(args...)	netdev_err(vnic_get_netdev(vdev), args)
+
 static inline struct device *enic_get_dev(struct enic *enic)
 {
 	return &(enic->pdev->dev);
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index d106186..3c677ed 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -177,7 +177,7 @@
 	int res, i;
 
 	enic = netdev_priv(dev);
-	res = skb_flow_dissect_flow_keys(skb, &keys);
+	res = skb_flow_dissect_flow_keys(skb, &keys, 0);
 	if (!res || keys.basic.n_proto != htons(ETH_P_IP) ||
 	    (keys.basic.ip_proto != IPPROTO_TCP &&
 	     keys.basic.ip_proto != IPPROTO_UDP))
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f3f1601..f44a39c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -224,7 +224,8 @@
 	struct enic *enic = netdev_priv(netdev);
 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
 
-	ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+		ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
 	ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
 	if (rxcoal->use_adaptive_rx_coalesce)
 		ecmd->use_adaptive_rx_coalesce = 1;
@@ -234,6 +235,53 @@
 	return 0;
 }
 
+static int enic_coalesce_valid(struct enic *enic,
+			       struct ethtool_coalesce *ec)
+{
+	u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
+	u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
+					   ec->rx_coalesce_usecs_high);
+	u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
+					  ec->rx_coalesce_usecs_low);
+
+	if (ec->rx_max_coalesced_frames		||
+	    ec->rx_coalesce_usecs_irq		||
+	    ec->rx_max_coalesced_frames_irq	||
+	    ec->tx_max_coalesced_frames		||
+	    ec->tx_coalesce_usecs_irq		||
+	    ec->tx_max_coalesced_frames_irq	||
+	    ec->stats_block_coalesce_usecs	||
+	    ec->use_adaptive_tx_coalesce	||
+	    ec->pkt_rate_low			||
+	    ec->rx_max_coalesced_frames_low	||
+	    ec->tx_coalesce_usecs_low		||
+	    ec->tx_max_coalesced_frames_low	||
+	    ec->pkt_rate_high			||
+	    ec->rx_max_coalesced_frames_high	||
+	    ec->tx_coalesce_usecs_high		||
+	    ec->tx_max_coalesced_frames_high	||
+	    ec->rate_sample_interval)
+		return -EINVAL;
+
+	if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
+	    ec->tx_coalesce_usecs)
+		return -EINVAL;
+
+	if ((ec->tx_coalesce_usecs > coalesce_usecs_max)	||
+	    (ec->rx_coalesce_usecs > coalesce_usecs_max)	||
+	    (ec->rx_coalesce_usecs_low > coalesce_usecs_max)	||
+	    (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
+		netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
+			    coalesce_usecs_max);
+
+	if (ec->rx_coalesce_usecs_high &&
+	    (rx_coalesce_usecs_high <
+	     rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+		return -EINVAL;
+
+	return 0;
+}
+
 static int enic_set_coalesce(struct net_device *netdev,
 	struct ethtool_coalesce *ecmd)
 {
@@ -244,8 +292,12 @@
 	u32 rx_coalesce_usecs_high;
 	u32 coalesce_usecs_max;
 	unsigned int i, intr;
+	int ret;
 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
 
+	ret = enic_coalesce_valid(enic, ecmd);
+	if (ret)
+		return ret;
 	coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
 	tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
 				  coalesce_usecs_max);
@@ -257,59 +309,24 @@
 	rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
 				       coalesce_usecs_max);
 
-	switch (vnic_dev_get_intr_mode(enic->vdev)) {
-	case VNIC_DEV_INTR_MODE_INTX:
-		if (tx_coalesce_usecs != rx_coalesce_usecs)
-			return -EINVAL;
-		if (ecmd->use_adaptive_rx_coalesce	||
-		    ecmd->rx_coalesce_usecs_low		||
-		    ecmd->rx_coalesce_usecs_high)
-			return -EINVAL;
-
-		intr = enic_legacy_io_intr();
-		vnic_intr_coalescing_timer_set(&enic->intr[intr],
-			tx_coalesce_usecs);
-		break;
-	case VNIC_DEV_INTR_MODE_MSI:
-		if (tx_coalesce_usecs != rx_coalesce_usecs)
-			return -EINVAL;
-		if (ecmd->use_adaptive_rx_coalesce	||
-		    ecmd->rx_coalesce_usecs_low		||
-		    ecmd->rx_coalesce_usecs_high)
-			return -EINVAL;
-
-		vnic_intr_coalescing_timer_set(&enic->intr[0],
-			tx_coalesce_usecs);
-		break;
-	case VNIC_DEV_INTR_MODE_MSIX:
-		if (ecmd->rx_coalesce_usecs_high &&
-		    (rx_coalesce_usecs_high <
-		     rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
-				return -EINVAL;
-
+	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
 		for (i = 0; i < enic->wq_count; i++) {
 			intr = enic_msix_wq_intr(enic, i);
 			vnic_intr_coalescing_timer_set(&enic->intr[intr],
-				tx_coalesce_usecs);
+						       tx_coalesce_usecs);
 		}
-
-		rxcoal->use_adaptive_rx_coalesce =
-					!!ecmd->use_adaptive_rx_coalesce;
-		if (!rxcoal->use_adaptive_rx_coalesce)
-			enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
-
-		if (ecmd->rx_coalesce_usecs_high) {
-			rxcoal->range_end = rx_coalesce_usecs_high;
-			rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
-			rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
-							ENIC_AIC_LARGE_PKT_DIFF;
-		}
-		break;
-	default:
-		break;
+		enic->tx_coalesce_usecs = tx_coalesce_usecs;
+	}
+	rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
+	if (!rxcoal->use_adaptive_rx_coalesce)
+		enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
+	if (ecmd->rx_coalesce_usecs_high) {
+		rxcoal->range_end = rx_coalesce_usecs_high;
+		rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
+		rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
+						ENIC_AIC_LARGE_PKT_DIFF;
 	}
 
-	enic->tx_coalesce_usecs = tx_coalesce_usecs;
 	enic->rx_coalesce_usecs = rx_coalesce_usecs;
 
 	return 0;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 918a8e4..3352d02 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1149,70 +1149,6 @@
 	return 0;
 }
 
-static int enic_poll(struct napi_struct *napi, int budget)
-{
-	struct net_device *netdev = napi->dev;
-	struct enic *enic = netdev_priv(netdev);
-	unsigned int cq_rq = enic_cq_rq(enic, 0);
-	unsigned int cq_wq = enic_cq_wq(enic, 0);
-	unsigned int intr = enic_legacy_io_intr();
-	unsigned int rq_work_to_do = budget;
-	unsigned int wq_work_to_do = -1; /* no limit */
-	unsigned int  work_done, rq_work_done = 0, wq_work_done;
-	int err;
-
-	wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
-				       enic_wq_service, NULL);
-
-	if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
-		if (wq_work_done > 0)
-			vnic_intr_return_credits(&enic->intr[intr],
-						 wq_work_done,
-						 0 /* dont unmask intr */,
-						 0 /* dont reset intr timer */);
-		return budget;
-	}
-
-	if (budget > 0)
-		rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
-			rq_work_to_do, enic_rq_service, NULL);
-
-	/* Accumulate intr event credits for this polling
-	 * cycle.  An intr event is the completion of a
-	 * a WQ or RQ packet.
-	 */
-
-	work_done = rq_work_done + wq_work_done;
-
-	if (work_done > 0)
-		vnic_intr_return_credits(&enic->intr[intr],
-			work_done,
-			0 /* don't unmask intr */,
-			0 /* don't reset intr timer */);
-
-	err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
-	enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
-
-	/* Buffer allocation failed. Stay in polling
-	 * mode so we can try to fill the ring again.
-	 */
-
-	if (err)
-		rq_work_done = rq_work_to_do;
-
-	if (rq_work_done < rq_work_to_do) {
-
-		/* Some work done, but not enough to stay in polling,
-		 * exit polling
-		 */
-
-		napi_complete(napi);
-		vnic_intr_unmask(&enic->intr[intr]);
-	}
-
-	return rq_work_done;
-}
-
 static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
 {
 	unsigned int intr = enic_msix_rq_intr(enic, rq->index);
@@ -1271,6 +1207,77 @@
 	pkt_size_counter->small_pkt_bytes_cnt = 0;
 }
 
+static int enic_poll(struct napi_struct *napi, int budget)
+{
+	struct net_device *netdev = napi->dev;
+	struct enic *enic = netdev_priv(netdev);
+	unsigned int cq_rq = enic_cq_rq(enic, 0);
+	unsigned int cq_wq = enic_cq_wq(enic, 0);
+	unsigned int intr = enic_legacy_io_intr();
+	unsigned int rq_work_to_do = budget;
+	unsigned int wq_work_to_do = -1; /* no limit */
+	unsigned int  work_done, rq_work_done = 0, wq_work_done;
+	int err;
+
+	wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
+				       enic_wq_service, NULL);
+
+	if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
+		if (wq_work_done > 0)
+			vnic_intr_return_credits(&enic->intr[intr],
+						 wq_work_done,
+						 0 /* dont unmask intr */,
+						 0 /* dont reset intr timer */);
+		return budget;
+	}
+
+	if (budget > 0)
+		rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
+			rq_work_to_do, enic_rq_service, NULL);
+
+	/* Accumulate intr event credits for this polling
+	 * cycle.  An intr event is the completion of a
+	 * a WQ or RQ packet.
+	 */
+
+	work_done = rq_work_done + wq_work_done;
+
+	if (work_done > 0)
+		vnic_intr_return_credits(&enic->intr[intr],
+			work_done,
+			0 /* don't unmask intr */,
+			0 /* don't reset intr timer */);
+
+	err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
+	enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
+
+	/* Buffer allocation failed. Stay in polling
+	 * mode so we can try to fill the ring again.
+	 */
+
+	if (err)
+		rq_work_done = rq_work_to_do;
+	if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+		/* Call the function which refreshes the intr coalescing timer
+		 * value based on the traffic.
+		 */
+		enic_calc_int_moderation(enic, &enic->rq[0]);
+
+	if (rq_work_done < rq_work_to_do) {
+
+		/* Some work done, but not enough to stay in polling,
+		 * exit polling
+		 */
+
+		napi_complete(napi);
+		if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+			enic_set_int_moderation(enic, &enic->rq[0]);
+		vnic_intr_unmask(&enic->intr[intr]);
+	}
+
+	return rq_work_done;
+}
+
 #ifdef CONFIG_RFS_ACCEL
 static void enic_free_rx_cpu_rmap(struct enic *enic)
 {
@@ -1407,10 +1414,8 @@
 	if (err)
 		work_done = work_to_do;
 	if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
-		/* Call the function which refreshes
-		 * the intr coalescing timer value based on
-		 * the traffic.  This is supported only in
-		 * the case of MSI-x mode
+		/* Call the function which refreshes the intr coalescing timer
+		 * value based on the traffic.
 		 */
 		enic_calc_int_moderation(enic, &enic->rq[rq]);
 
@@ -1569,12 +1574,6 @@
 	int index = -1;
 	struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
 
-	/* If intr mode is not MSIX, do not do adaptive coalescing */
-	if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
-		netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
-		return;
-	}
-
 	/* 1. Read the link speed from fw
 	 * 2. Pick the default range for the speed
 	 * 3. Update it in enic->rx_coalesce_setting
@@ -2485,6 +2484,11 @@
 		goto err_out_iounmap;
 	}
 
+	err = vnic_devcmd_init(enic->vdev);
+
+	if (err)
+		goto err_out_vnic_unregister;
+
 #ifdef CONFIG_PCI_IOV
 	/* Get number of subvnics */
 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
@@ -2659,8 +2663,8 @@
 		pci_disable_sriov(pdev);
 		enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
 	}
-err_out_vnic_unregister:
 #endif
+err_out_vnic_unregister:
 	vnic_dev_unregister(enic->vdev);
 err_out_iounmap:
 	enic_iounmap(enic);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c
index 0daa1c7..abeda2a 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c
@@ -24,6 +24,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_cq.h"
+#include "enic.h"
 
 void vnic_cq_free(struct vnic_cq *cq)
 {
@@ -42,7 +43,7 @@
 
 	cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
 	if (!cq->ctrl) {
-		pr_err("Failed to hook CQ[%d] resource\n", index);
+		vdev_err("Failed to hook CQ[%d] resource\n", index);
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 62f7b7b..a3badef 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -27,46 +27,9 @@
 #include "vnic_resource.h"
 #include "vnic_devcmd.h"
 #include "vnic_dev.h"
+#include "vnic_wq.h"
 #include "vnic_stats.h"
-
-enum vnic_proxy_type {
-	PROXY_NONE,
-	PROXY_BY_BDF,
-	PROXY_BY_INDEX,
-};
-
-struct vnic_res {
-	void __iomem *vaddr;
-	dma_addr_t bus_addr;
-	unsigned int count;
-};
-
-struct vnic_intr_coal_timer_info {
-	u32 mul;
-	u32 div;
-	u32 max_usec;
-};
-
-struct vnic_dev {
-	void *priv;
-	struct pci_dev *pdev;
-	struct vnic_res res[RES_TYPE_MAX];
-	enum vnic_dev_intr_mode intr_mode;
-	struct vnic_devcmd __iomem *devcmd;
-	struct vnic_devcmd_notify *notify;
-	struct vnic_devcmd_notify notify_copy;
-	dma_addr_t notify_pa;
-	u32 notify_sz;
-	dma_addr_t linkstatus_pa;
-	struct vnic_stats *stats;
-	dma_addr_t stats_pa;
-	struct vnic_devcmd_fw_info *fw_info;
-	dma_addr_t fw_info_pa;
-	enum vnic_proxy_type proxy;
-	u32 proxy_index;
-	u64 args[VNIC_DEVCMD_NARGS];
-	struct vnic_intr_coal_timer_info intr_coal_timer_info;
-};
+#include "enic.h"
 
 #define VNIC_MAX_RES_HDR_SIZE \
 	(sizeof(struct vnic_resource_header) + \
@@ -90,14 +53,14 @@
 		return -EINVAL;
 
 	if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
-		pr_err("vNIC BAR0 res hdr length error\n");
+		vdev_err("vNIC BAR0 res hdr length error\n");
 		return -EINVAL;
 	}
 
 	rh  = bar->vaddr;
 	mrh = bar->vaddr;
 	if (!rh) {
-		pr_err("vNIC BAR0 res hdr not mem-mapped\n");
+		vdev_err("vNIC BAR0 res hdr not mem-mapped\n");
 		return -EINVAL;
 	}
 
@@ -106,11 +69,10 @@
 		(ioread32(&rh->version) != VNIC_RES_VERSION)) {
 		if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
 			(ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
-			pr_err("vNIC BAR0 res magic/version error "
-			"exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
-			VNIC_RES_MAGIC, VNIC_RES_VERSION,
-			MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
-			ioread32(&rh->magic), ioread32(&rh->version));
+			vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
+				 VNIC_RES_MAGIC, VNIC_RES_VERSION,
+				 MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
+				 ioread32(&rh->magic), ioread32(&rh->version));
 			return -EINVAL;
 		}
 	}
@@ -144,17 +106,15 @@
 			/* each count is stride bytes long */
 			len = count * VNIC_RES_STRIDE;
 			if (len + bar_offset > bar[bar_num].len) {
-				pr_err("vNIC BAR0 resource %d "
-					"out-of-bounds, offset 0x%x + "
-					"size 0x%x > bar len 0x%lx\n",
-					type, bar_offset,
-					len,
-					bar[bar_num].len);
+				vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
+					 type, bar_offset, len,
+					 bar[bar_num].len);
 				return -EINVAL;
 			}
 			break;
 		case RES_TYPE_INTR_PBA_LEGACY:
 		case RES_TYPE_DEVCMD:
+		case RES_TYPE_DEVCMD2:
 			len = count;
 			break;
 		default:
@@ -238,8 +198,8 @@
 		&ring->base_addr_unaligned);
 
 	if (!ring->descs_unaligned) {
-		pr_err("Failed to allocate ring (size=%d), aborting\n",
-			(int)ring->size);
+		vdev_err("Failed to allocate ring (size=%d), aborting\n",
+			 (int)ring->size);
 		return -ENOMEM;
 	}
 
@@ -281,7 +241,7 @@
 		return -ENODEV;
 	}
 	if (status & STAT_BUSY) {
-		pr_err("Busy devcmd %d\n", _CMD_N(cmd));
+		vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd));
 		return -EBUSY;
 	}
 
@@ -315,8 +275,8 @@
 					return -err;
 				if (err != ERR_ECMDUNKNOWN ||
 				    cmd != CMD_CAPABILITY)
-					pr_err("Error %d devcmd %d\n",
-						err, _CMD_N(cmd));
+					vdev_neterr("Error %d devcmd %d\n",
+						    err, _CMD_N(cmd));
 				return -err;
 			}
 
@@ -330,10 +290,162 @@
 		}
 	}
 
-	pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
+	vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd));
 	return -ETIMEDOUT;
 }
 
+static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+			  int wait)
+{
+	struct devcmd2_controller *dc2c = vdev->devcmd2;
+	struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+	unsigned int i;
+	int delay, err;
+	u32 fetch_index, new_posted;
+	u32 posted = dc2c->posted;
+
+	fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
+
+	if (fetch_index == 0xFFFFFFFF)
+		return -ENODEV;
+
+	new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
+
+	if (new_posted == fetch_index) {
+		vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
+			    _CMD_N(cmd), fetch_index, posted);
+		return -EBUSY;
+	}
+	dc2c->cmd_ring[posted].cmd = cmd;
+	dc2c->cmd_ring[posted].flags = 0;
+
+	if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+		dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
+	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
+		for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+			dc2c->cmd_ring[posted].args[i] = vdev->args[i];
+
+	/* Adding write memory barrier prevents compiler and/or CPU reordering,
+	 * thus avoiding descriptor posting before descriptor is initialized.
+	 * Otherwise, hardware can read stale descriptor fields.
+	 */
+	wmb();
+	iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
+	dc2c->posted = new_posted;
+
+	if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
+		return 0;
+
+	for (delay = 0; delay < wait; delay++) {
+		if (result->color == dc2c->color) {
+			dc2c->next_result++;
+			if (dc2c->next_result == dc2c->result_size) {
+				dc2c->next_result = 0;
+				dc2c->color = dc2c->color ? 0 : 1;
+			}
+			if (result->error) {
+				err = result->error;
+				if (err != ERR_ECMDUNKNOWN ||
+				    cmd != CMD_CAPABILITY)
+					vdev_neterr("Error %d devcmd %d\n",
+						    err, _CMD_N(cmd));
+				return -err;
+			}
+			if (_CMD_DIR(cmd) & _CMD_DIR_READ)
+				for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
+					vdev->args[i] = result->results[i];
+
+			return 0;
+		}
+		udelay(100);
+	}
+
+	vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd));
+
+	return -ETIMEDOUT;
+}
+
+static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+{
+	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+	if (!vdev->devcmd)
+		return -ENODEV;
+	vdev->devcmd_rtn = _vnic_dev_cmd;
+
+	return 0;
+}
+
+static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+{
+	int err;
+	unsigned int fetch_index;
+
+	if (vdev->devcmd2)
+		return 0;
+
+	vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL);
+	if (!vdev->devcmd2)
+		return -ENOMEM;
+
+	vdev->devcmd2->color = 1;
+	vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
+	err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
+				    DEVCMD2_DESC_SIZE);
+	if (err)
+		goto err_free_devcmd2;
+
+	fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
+	if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
+		vdev_err("Fatal error in devcmd2 init - hardware surprise removal");
+
+		return -ENODEV;
+	}
+
+	enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
+			   0);
+	vdev->devcmd2->posted = fetch_index;
+	vnic_wq_enable(&vdev->devcmd2->wq);
+
+	err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
+				       DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
+	if (err)
+		goto err_free_wq;
+
+	vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
+	vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
+	vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
+	vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
+			VNIC_PADDR_TARGET;
+	vdev->args[1] = DEVCMD2_RING_SIZE;
+
+	err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
+	if (err)
+		goto err_free_desc_ring;
+
+	vdev->devcmd_rtn = _vnic_dev_cmd2;
+
+	return 0;
+
+err_free_desc_ring:
+	vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+err_free_wq:
+	vnic_wq_disable(&vdev->devcmd2->wq);
+	vnic_wq_free(&vdev->devcmd2->wq);
+err_free_devcmd2:
+	kfree(vdev->devcmd2);
+	vdev->devcmd2 = NULL;
+
+	return err;
+}
+
+static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+{
+	vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+	vnic_wq_disable(&vdev->devcmd2->wq);
+	vnic_wq_free(&vdev->devcmd2->wq);
+	kfree(vdev->devcmd2);
+}
+
 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
 	enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
 	u64 *a0, u64 *a1, int wait)
@@ -348,7 +460,7 @@
 	vdev->args[2] = *a0;
 	vdev->args[3] = *a1;
 
-	err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
+	err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
 	if (err)
 		return err;
 
@@ -357,7 +469,8 @@
 		err = (int)vdev->args[1];
 		if (err != ERR_ECMDUNKNOWN ||
 		    cmd != CMD_CAPABILITY)
-			pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
+			vdev_neterr("Error %d proxy devcmd %d\n", err,
+				    _CMD_N(cmd));
 		return err;
 	}
 
@@ -375,7 +488,7 @@
 	vdev->args[0] = *a0;
 	vdev->args[1] = *a1;
 
-	err = _vnic_dev_cmd(vdev, cmd, wait);
+	err = vdev->devcmd_rtn(vdev, cmd, wait);
 
 	*a0 = vdev->args[0];
 	*a1 = vdev->args[1];
@@ -650,7 +763,7 @@
 
 	err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
 	if (err)
-		pr_err("Can't set packet filter\n");
+		vdev_neterr("Can't set packet filter\n");
 
 	return err;
 }
@@ -667,7 +780,7 @@
 
 	err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
 	if (err)
-		pr_err("Can't add addr [%pM], %d\n", addr, err);
+		vdev_neterr("Can't add addr [%pM], %d\n", addr, err);
 
 	return err;
 }
@@ -684,7 +797,7 @@
 
 	err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
 	if (err)
-		pr_err("Can't del addr [%pM], %d\n", addr, err);
+		vdev_neterr("Can't del addr [%pM], %d\n", addr, err);
 
 	return err;
 }
@@ -728,7 +841,7 @@
 	dma_addr_t notify_pa;
 
 	if (vdev->notify || vdev->notify_pa) {
-		pr_err("notify block %p still allocated", vdev->notify);
+		vdev_neterr("notify block %p still allocated", vdev->notify);
 		return -EINVAL;
 	}
 
@@ -838,7 +951,7 @@
 	memset(vdev->args, 0, sizeof(vdev->args));
 
 	if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
-		err = _vnic_dev_cmd(vdev, CMD_INTR_COAL_CONVERT, wait);
+		err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait);
 	else
 		err = ERR_ECMDUNKNOWN;
 
@@ -847,7 +960,7 @@
 	 */
 	if ((err == ERR_ECMDUNKNOWN) ||
 		(!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
-		pr_warn("Using default conversion factor for interrupt coalesce timer\n");
+		vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n");
 		vnic_dev_intr_coal_timer_info_default(vdev);
 		return 0;
 	}
@@ -938,6 +1051,9 @@
 			pci_free_consistent(vdev->pdev,
 				sizeof(struct vnic_devcmd_fw_info),
 				vdev->fw_info, vdev->fw_info_pa);
+		if (vdev->devcmd2)
+			vnic_dev_deinit_devcmd2(vdev);
+
 		kfree(vdev);
 	}
 }
@@ -959,10 +1075,6 @@
 	if (vnic_dev_discover_res(vdev, bar, num_bars))
 		goto err_out;
 
-	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
-	if (!vdev->devcmd)
-		goto err_out;
-
 	return vdev;
 
 err_out:
@@ -977,6 +1089,29 @@
 }
 EXPORT_SYMBOL(vnic_dev_get_pdev);
 
+int vnic_devcmd_init(struct vnic_dev *vdev)
+{
+	void __iomem *res;
+	int err;
+
+	res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+	if (res) {
+		err = vnic_dev_init_devcmd2(vdev);
+		if (err)
+			vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1",
+				  err);
+		else
+			return 0;
+	} else {
+		vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
+	}
+	err = vnic_dev_init_devcmd1(vdev);
+	if (err)
+		vdev_err("DEVCMD1 initialization failed: %d", err);
+
+	return err;
+}
+
 int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
 {
 	u64 a0, a1 = len;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 1fb214e..b013b6a 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -70,7 +70,48 @@
 	unsigned int desc_avail;
 };
 
-struct vnic_dev;
+enum vnic_proxy_type {
+	PROXY_NONE,
+	PROXY_BY_BDF,
+	PROXY_BY_INDEX,
+};
+
+struct vnic_res {
+	void __iomem *vaddr;
+	dma_addr_t bus_addr;
+	unsigned int count;
+};
+
+struct vnic_intr_coal_timer_info {
+	u32 mul;
+	u32 div;
+	u32 max_usec;
+};
+
+struct vnic_dev {
+	void *priv;
+	struct pci_dev *pdev;
+	struct vnic_res res[RES_TYPE_MAX];
+	enum vnic_dev_intr_mode intr_mode;
+	struct vnic_devcmd __iomem *devcmd;
+	struct vnic_devcmd_notify *notify;
+	struct vnic_devcmd_notify notify_copy;
+	dma_addr_t notify_pa;
+	u32 notify_sz;
+	dma_addr_t linkstatus_pa;
+	struct vnic_stats *stats;
+	dma_addr_t stats_pa;
+	struct vnic_devcmd_fw_info *fw_info;
+	dma_addr_t fw_info_pa;
+	enum vnic_proxy_type proxy;
+	u32 proxy_index;
+	u64 args[VNIC_DEVCMD_NARGS];
+	struct vnic_intr_coal_timer_info intr_coal_timer_info;
+	struct devcmd2_controller *devcmd2;
+	int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+			  int wait);
+};
+
 struct vnic_stats;
 
 void *vnic_dev_priv(struct vnic_dev *vdev);
@@ -135,5 +176,6 @@
 int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
 			struct filter *data);
+int vnic_devcmd_init(struct vnic_dev *vdev);
 
 #endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index 435d0cd..2a81288 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -365,6 +365,12 @@
 	 */
 	CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
 
+	/* Initialization for the devcmd2 interface.
+	 * in: (u64) a0 = host result buffer physical address
+	 * in: (u16) a1 = number of entries in result buffer
+	 */
+	CMD_INITIALIZE_DEVCMD2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 57),
+
 	/* Add a filter.
 	 * in: (u64) a0= filter address
 	 *     (u32) a1= size of filter
@@ -629,4 +635,26 @@
 	u64 args[VNIC_DEVCMD_NARGS];	/* RW cmd args (little-endian) */
 };
 
+#define DEVCMD2_FNORESULT	0x1	/* Don't copy result to host */
+
+#define VNIC_DEVCMD2_NARGS	VNIC_DEVCMD_NARGS
+struct vnic_devcmd2 {
+	u16 pad;
+	u16 flags;
+	u32 cmd;
+	u64 args[VNIC_DEVCMD2_NARGS];
+};
+
+#define VNIC_DEVCMD2_NRESULTS	VNIC_DEVCMD_NARGS
+struct devcmd2_result {
+	u64 results[VNIC_DEVCMD2_NRESULTS];
+	u32 pad;
+	u16 completed_index;
+	u8  error;
+	u8  color;
+};
+
+#define DEVCMD2_RING_SIZE	32
+#define DEVCMD2_DESC_SIZE	128
+
 #endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c
index 0ca107f..942759d 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_intr.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c
@@ -25,6 +25,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_intr.h"
+#include "enic.h"
 
 void vnic_intr_free(struct vnic_intr *intr)
 {
@@ -39,7 +40,7 @@
 
 	intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
 	if (!intr->ctrl) {
-		pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
+		vdev_err("Failed to hook INTR[%d].ctrl resource\n", index);
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/ethernet/cisco/enic/vnic_resource.h b/drivers/net/ethernet/cisco/enic/vnic_resource.h
index e0a73f1..4e45f88 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_resource.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_resource.h
@@ -48,6 +48,13 @@
 	RES_TYPE_RSVD7,
 	RES_TYPE_DEVCMD,		/* Device command region */
 	RES_TYPE_PASS_THRU_PAGE,	/* Pass-thru page */
+	RES_TYPE_SUBVNIC,		/* subvnic resource type */
+	RES_TYPE_MQ_WQ,			/* MQ Work queues */
+	RES_TYPE_MQ_RQ,			/* MQ Receive queues */
+	RES_TYPE_MQ_CQ,			/* MQ Completion queues */
+	RES_TYPE_DEPRECATED1,		/* Old version of devcmd 2 */
+	RES_TYPE_DEPRECATED2,		/* Old version of devcmd 2 */
+	RES_TYPE_DEVCMD2,		/* Device control region */
 
 	RES_TYPE_MAX,			/* Count of resource types */
 };
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index c4b2183..cce2777 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -26,6 +26,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_rq.h"
+#include "enic.h"
 
 static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
 {
@@ -91,7 +92,7 @@
 
 	rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
 	if (!rq->ctrl) {
-		pr_err("Failed to hook RQ[%d] resource\n", index);
+		vdev_err("Failed to hook RQ[%d] resource\n", index);
 		return -EINVAL;
 	}
 
@@ -167,6 +168,7 @@
 int vnic_rq_disable(struct vnic_rq *rq)
 {
 	unsigned int wait;
+	struct vnic_dev *vdev = rq->vdev;
 
 	iowrite32(0, &rq->ctrl->enable);
 
@@ -177,7 +179,7 @@
 		udelay(10);
 	}
 
-	pr_err("Failed to disable RQ[%d]\n", rq->index);
+	vdev_neterr("Failed to disable RQ[%d]\n", rq->index);
 
 	return -ETIMEDOUT;
 }
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index b5a1c93..05ad16a 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -26,6 +26,7 @@
 
 #include "vnic_dev.h"
 #include "vnic_wq.h"
+#include "enic.h"
 
 static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
 {
@@ -94,7 +95,7 @@
 
 	wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
 	if (!wq->ctrl) {
-		pr_err("Failed to hook WQ[%d] resource\n", index);
+		vdev_err("Failed to hook WQ[%d] resource\n", index);
 		return -EINVAL;
 	}
 
@@ -113,10 +114,27 @@
 	return 0;
 }
 
-static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
-	unsigned int fetch_index, unsigned int posted_index,
-	unsigned int error_interrupt_enable,
-	unsigned int error_interrupt_offset)
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+			  unsigned int desc_count, unsigned int desc_size)
+{
+	int err;
+
+	wq->index = 0;
+	wq->vdev = vdev;
+
+	wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+	if (!wq->ctrl)
+		return -EINVAL;
+	vnic_wq_disable(wq);
+	err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+
+	return err;
+}
+
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+			unsigned int fetch_index, unsigned int posted_index,
+			unsigned int error_interrupt_enable,
+			unsigned int error_interrupt_offset)
 {
 	u64 paddr;
 	unsigned int count = wq->ring.desc_count;
@@ -140,7 +158,7 @@
 	unsigned int error_interrupt_enable,
 	unsigned int error_interrupt_offset)
 {
-	vnic_wq_init_start(wq, cq_index, 0, 0,
+	enic_wq_init_start(wq, cq_index, 0, 0,
 		error_interrupt_enable,
 		error_interrupt_offset);
 }
@@ -158,6 +176,7 @@
 int vnic_wq_disable(struct vnic_wq *wq)
 {
 	unsigned int wait;
+	struct vnic_dev *vdev = wq->vdev;
 
 	iowrite32(0, &wq->ctrl->enable);
 
@@ -168,7 +187,7 @@
 		udelay(10);
 	}
 
-	pr_err("Failed to disable WQ[%d]\n", wq->index);
+	vdev_neterr("Failed to disable WQ[%d]\n", wq->index);
 
 	return -ETIMEDOUT;
 }
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 2961543..0120961 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -88,6 +88,18 @@
 	unsigned int pkts_outstanding;
 };
 
+struct devcmd2_controller {
+	struct vnic_wq_ctrl __iomem *wq_ctrl;
+	struct vnic_devcmd2 *cmd_ring;
+	struct devcmd2_result *result;
+	u16 next_result;
+	u16 result_size;
+	int color;
+	struct vnic_dev_ring results_ring;
+	struct vnic_wq wq;
+	u32 posted;
+};
+
 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
 {
 	/* how many does SW own? */
@@ -174,5 +186,11 @@
 int vnic_wq_disable(struct vnic_wq *wq);
 void vnic_wq_clean(struct vnic_wq *wq,
 	void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+			  unsigned int desc_count, unsigned int desc_size);
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+			unsigned int fetch_index, unsigned int posted_index,
+			unsigned int error_interrupt_enable,
+			unsigned int error_interrupt_offset);
 
 #endif /* _VNIC_WQ_H_ */
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index d101750..f7b4248 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -604,19 +604,7 @@
 	.probe		= ec_bhf_probe,
 	.remove		= ec_bhf_remove,
 };
-
-static int __init ec_bhf_init(void)
-{
-	return pci_register_driver(&pci_driver);
-}
-
-static void __exit ec_bhf_exit(void)
-{
-	pci_unregister_driver(&pci_driver);
-}
-
-module_init(ec_bhf_init);
-module_exit(ec_bhf_exit);
+module_pci_driver(pci_driver);
 
 module_param(polling_frequency, long, S_IRUGO);
 MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8d12b41..0a27805 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -37,7 +37,7 @@
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER			"10.6.0.2"
+#define DRV_VER			"10.6.0.3"
 #define DRV_NAME		"be2net"
 #define BE_NAME			"Emulex BladeEngine2"
 #define BE3_NAME		"Emulex BladeEngine3"
@@ -105,6 +105,8 @@
 
 #define MAX_VFS			30 /* Max VFs supported by BE3 FW */
 #define FW_VER_LEN		32
+#define	CNTL_SERIAL_NUM_WORDS	8  /* Controller serial number words */
+#define	CNTL_SERIAL_NUM_WORD_SZ	(sizeof(u16)) /* Byte-sz of serial num word */
 
 #define	RSS_INDIR_TABLE_LEN	128
 #define RSS_HASH_KEY_LEN	40
@@ -228,6 +230,7 @@
 struct be_tx_stats {
 	u64 tx_bytes;
 	u64 tx_pkts;
+	u64 tx_vxlan_offload_pkts;
 	u64 tx_reqs;
 	u64 tx_compl;
 	ulong tx_jiffies;
@@ -275,6 +278,7 @@
 struct be_rx_stats {
 	u64 rx_bytes;
 	u64 rx_pkts;
+	u64 rx_vxlan_offload_pkts;
 	u32 rx_drops_no_skbs;	/* skb allocation errors */
 	u32 rx_drops_no_frags;	/* HW has no fetched frags */
 	u32 rx_post_fail;	/* page post alloc failures */
@@ -590,6 +594,7 @@
 	struct rss_info rss_info;
 	/* Filters for packets that need to be sent to BMC */
 	u32 bmc_filt_mask;
+	u16 serial_num[CNTL_SERIAL_NUM_WORDS];
 };
 
 #define be_physfn(adapter)		(!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 9eac322..3be1fbd 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -88,19 +88,21 @@
 	return wrb->payload.embedded_payload;
 }
 
-static void be_mcc_notify(struct be_adapter *adapter)
+static int be_mcc_notify(struct be_adapter *adapter)
 {
 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
 	u32 val = 0;
 
 	if (be_check_error(adapter, BE_ERROR_ANY))
-		return;
+		return -EIO;
 
 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
 	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
 
 	wmb();
 	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
+
+	return 0;
 }
 
 /* To check if valid bit is set, check the entire word as we don't know
@@ -170,6 +172,12 @@
 		return;
 	}
 
+	if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
+	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+		complete(&adapter->et_cmd_compl);
+		return;
+	}
+
 	if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
 	     opcode == OPCODE_COMMON_WRITE_OBJECT) &&
 	    subsystem == CMD_SUBSYSTEM_COMMON) {
@@ -541,7 +549,9 @@
 
 	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto out;
 
 	status = be_mcc_wait_compl(adapter);
 	if (status == -EIO)
@@ -1547,7 +1557,10 @@
 	else
 		hdr->version = 2;
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto err;
+
 	adapter->stats_cmd_sent = true;
 
 err:
@@ -1583,7 +1596,10 @@
 	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
 	req->cmd_params.params.reset_stats = 0;
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto err;
+
 	adapter->stats_cmd_sent = true;
 
 err:
@@ -1687,8 +1703,7 @@
 			       OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
 			       sizeof(*req), wrb, NULL);
 
-	be_mcc_notify(adapter);
-
+	status = be_mcc_notify(adapter);
 err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
@@ -1860,7 +1875,7 @@
 				cpu_to_le32(set_eqd[i].delay_multiplier);
 	}
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
 err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
@@ -1953,7 +1968,7 @@
 			memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
 	}
 
-	status = be_mcc_notify_wait(adapter);
+	status = be_mcc_notify(adapter);
 err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
@@ -2320,7 +2335,10 @@
 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
 				sizeof(struct lancer_cmd_req_write_object)));
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto err_unlock;
+
 	spin_unlock_bh(&adapter->mcc_lock);
 
 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2491,7 +2509,10 @@
 	req->params.op_code = cpu_to_le32(flash_opcode);
 	req->params.data_buf_size = cpu_to_le32(buf_size);
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto err_unlock;
+
 	spin_unlock_bh(&adapter->mcc_lock);
 
 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2585,7 +2606,7 @@
 	wrb = wrb_from_mccq(adapter);
 	if (!wrb) {
 		status = -EBUSY;
-		goto err;
+		goto err_unlock;
 	}
 
 	req = embedded_payload(wrb);
@@ -2599,8 +2620,19 @@
 	req->loopback_type = loopback_type;
 	req->loopback_state = enable;
 
-	status = be_mcc_notify_wait(adapter);
-err:
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto err_unlock;
+
+	spin_unlock_bh(&adapter->mcc_lock);
+
+	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+					 msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
+		status = -ETIMEDOUT;
+
+	return status;
+
+err_unlock:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
@@ -2636,7 +2668,9 @@
 	req->num_pkts = cpu_to_le32(num_pkts);
 	req->loopback_type = cpu_to_le32(loopback_type);
 
-	be_mcc_notify(adapter);
+	status = be_mcc_notify(adapter);
+	if (status)
+		goto err;
 
 	spin_unlock_bh(&adapter->mcc_lock);
 
@@ -2818,10 +2852,11 @@
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_cntl_attribs *req;
 	struct be_cmd_resp_cntl_attribs *resp;
-	int status;
+	int status, i;
 	int payload_len = max(sizeof(*req), sizeof(*resp));
 	struct mgmt_controller_attrib *attribs;
 	struct be_dma_mem attribs_cmd;
+	u32 *serial_num;
 
 	if (mutex_lock_interruptible(&adapter->mbox_lock))
 		return -1;
@@ -2852,6 +2887,10 @@
 	if (!status) {
 		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
 		adapter->hba_port_num = attribs->hba_attribs.phy_port;
+		serial_num = attribs->hba_attribs.controller_serial_number;
+		for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
+			adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
+				(BIT_MASK(16) - 1);
 	}
 
 err:
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 00e3a6b..7d178bd 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1500,6 +1500,8 @@
 #define BE_PME_D3COLD_CAP		0x80
 
 /********************** LoopBack test *********************/
+#define SET_LB_MODE_TIMEOUT		12000
+
 struct be_cmd_req_loopback_test {
 	struct be_cmd_req_hdr hdr;
 	u32 loopback_type;
@@ -1640,10 +1642,12 @@
 struct mgmt_hba_attribs {
 	u32 rsvd0[24];
 	u8 controller_model_number[32];
-	u32 rsvd1[79];
-	u8 rsvd2[3];
+	u32 rsvd1[16];
+	u32 controller_serial_number[8];
+	u32 rsvd2[55];
+	u8 rsvd3[3];
 	u8 phy_port;
-	u32 rsvd3[13];
+	u32 rsvd4[13];
 } __packed;
 
 struct mgmt_controller_attrib {
@@ -1763,6 +1767,7 @@
 /*********************** HSW Config ***********************/
 #define PORT_FWD_TYPE_VEPA		0x3
 #define PORT_FWD_TYPE_VEB		0x2
+#define PORT_FWD_TYPE_PASSTHRU		0x1
 
 #define ENABLE_MAC_SPOOFCHK		0x2
 #define DISABLE_MAC_SPOOFCHK		0x3
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index b2476db..2c9ed17 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -138,6 +138,7 @@
 static const struct be_ethtool_stat et_rx_stats[] = {
 	{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
 	{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
+	{DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)},
 	{DRVSTAT_RX_INFO(rx_compl)},
 	{DRVSTAT_RX_INFO(rx_compl_err)},
 	{DRVSTAT_RX_INFO(rx_mcast_pkts)},
@@ -190,6 +191,7 @@
 	{DRVSTAT_TX_INFO(tx_internal_parity_err)},
 	{DRVSTAT_TX_INFO(tx_bytes)},
 	{DRVSTAT_TX_INFO(tx_pkts)},
+	{DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
 	/* Number of skbs queued for trasmission by the driver */
 	{DRVSTAT_TX_INFO(tx_reqs)},
 	/* Number of times the TX queue was stopped due to lack
@@ -847,10 +849,21 @@
 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
 			    u64 *status)
 {
-	be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
+	int ret;
+
+	ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+				  loopback_type, 1);
+	if (ret)
+		return ret;
+
 	*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
 				       loopback_type, 1500, 2, 0xabc);
-	be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
+
+	ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+				  BE_NO_LOOPBACK, 1);
+	if (ret)
+		return ret;
+
 	return *status;
 }
 
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6ca693b..12687bf 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -681,11 +681,14 @@
 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
 {
 	struct be_tx_stats *stats = tx_stats(txo);
+	u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
 
 	u64_stats_update_begin(&stats->sync);
 	stats->tx_reqs++;
 	stats->tx_bytes += skb->len;
-	stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
+	stats->tx_pkts += tx_pkts;
+	if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
+		stats->tx_vxlan_offload_pkts += tx_pkts;
 	u64_stats_update_end(&stats->sync);
 }
 
@@ -1258,7 +1261,7 @@
 	if (is_udp_pkt((*skb))) {
 		struct udphdr *udp = udp_hdr((*skb));
 
-		switch (udp->dest) {
+		switch (ntohs(udp->dest)) {
 		case DHCP_CLIENT_PORT:
 			os2bmc = is_dhcp_client_filt_enabled(adapter);
 			goto done;
@@ -1961,6 +1964,8 @@
 	stats->rx_compl++;
 	stats->rx_bytes += rxcp->pkt_size;
 	stats->rx_pkts++;
+	if (rxcp->tunneled)
+		stats->rx_vxlan_offload_pkts++;
 	if (rxcp->pkt_type == BE_MULTICAST_PACKET)
 		stats->rx_mcast_pkts++;
 	if (rxcp->err)
@@ -3610,15 +3615,15 @@
 
 static int be_setup_wol(struct be_adapter *adapter, bool enable)
 {
+	struct device *dev = &adapter->pdev->dev;
 	struct be_dma_mem cmd;
-	int status = 0;
 	u8 mac[ETH_ALEN];
+	int status;
 
 	eth_zero_addr(mac);
 
 	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
-	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
-				     GFP_KERNEL);
+	cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
 	if (!cmd.va)
 		return -ENOMEM;
 
@@ -3627,24 +3632,18 @@
 						PCICFG_PM_CONTROL_OFFSET,
 						PCICFG_PM_CONTROL_MASK);
 		if (status) {
-			dev_err(&adapter->pdev->dev,
-				"Could not enable Wake-on-lan\n");
-			dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
-					  cmd.dma);
-			return status;
+			dev_err(dev, "Could not enable Wake-on-lan\n");
+			goto err;
 		}
-		status = be_cmd_enable_magic_wol(adapter,
-						 adapter->netdev->dev_addr,
-						 &cmd);
-		pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
-		pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
 	} else {
-		status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
-		pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
-		pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
+		ether_addr_copy(mac, adapter->netdev->dev_addr);
 	}
 
-	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+	status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+	pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
+	pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
+err:
+	dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
 	return status;
 }
 
@@ -4977,7 +4976,7 @@
 {
 	if (!fhdr) {
 		dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
-		return -1;
+		return false;
 	}
 
 	/* First letter of the build version is used to identify
@@ -5132,9 +5131,6 @@
 	int status = 0;
 	u8 hsw_mode;
 
-	if (!sriov_enabled(adapter))
-		return 0;
-
 	/* BE and Lancer chips support VEB mode only */
 	if (BEx_chip(adapter) || lancer_chip(adapter)) {
 		hsw_mode = PORT_FWD_TYPE_VEB;
@@ -5144,6 +5140,9 @@
 					       NULL);
 		if (status)
 			return 0;
+
+		if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
+			return 0;
 	}
 
 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
@@ -5278,6 +5277,27 @@
 }
 #endif
 
+static int be_get_phys_port_id(struct net_device *dev,
+			       struct netdev_phys_item_id *ppid)
+{
+	int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
+	struct be_adapter *adapter = netdev_priv(dev);
+	u8 *id;
+
+	if (MAX_PHYS_ITEM_ID_LEN < id_len)
+		return -ENOSPC;
+
+	ppid->id[0] = adapter->hba_port_num + 1;
+	id = &ppid->id[1];
+	for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
+	     i--, id += CNTL_SERIAL_NUM_WORD_SZ)
+		memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
+
+	ppid->id_len = id_len;
+
+	return 0;
+}
+
 static const struct net_device_ops be_netdev_ops = {
 	.ndo_open		= be_open,
 	.ndo_stop		= be_close,
@@ -5308,6 +5328,7 @@
 	.ndo_del_vxlan_port	= be_del_vxlan_port,
 	.ndo_features_check	= be_features_check,
 #endif
+	.ndo_get_phys_port_id   = be_get_phys_port_id,
 };
 
 static void be_netdev_init(struct net_device *netdev)
@@ -5866,7 +5887,6 @@
 	if (status)
 		return status;
 
-	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
 
 	status = be_resume(adapter);
@@ -5946,7 +5966,6 @@
 		return PCI_ERS_RESULT_DISCONNECT;
 
 	pci_set_master(pdev);
-	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
 
 	/* Check if card is ok and fw is ready */
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 24a85b2..63c2bcf 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -150,6 +150,9 @@
 	if (!priv->tx_packet_sent || tx_ctrl.ct)
 		return;
 
+	/* Ack Tx ctrl register */
+	nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0);
+
 	/* Check Tx transmit error */
 	if (unlikely(tx_ctrl.et)) {
 		ndev->stats.tx_errors++;
@@ -158,11 +161,7 @@
 		ndev->stats.tx_bytes += tx_ctrl.nt;
 	}
 
-	if (priv->tx_skb) {
-		dev_kfree_skb(priv->tx_skb);
-		priv->tx_skb = NULL;
-	}
-
+	dev_kfree_skb(priv->tx_skb);
 	priv->tx_packet_sent = false;
 
 	if (netif_queue_stopped(ndev))
@@ -180,15 +179,16 @@
 {
 	struct net_device *ndev = napi->dev;
 	struct nps_enet_priv *priv = netdev_priv(ndev);
-	struct nps_enet_buf_int_enable buf_int_enable;
 	u32 work_done;
 
-	buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
-	buf_int_enable.tx_done = NPS_ENET_ENABLE;
 	nps_enet_tx_handler(ndev);
 	work_done = nps_enet_rx_handler(ndev);
 	if (work_done < budget) {
+		struct nps_enet_buf_int_enable buf_int_enable;
+
 		napi_complete(napi);
+		buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
+		buf_int_enable.tx_done = NPS_ENET_ENABLE;
 		nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
 				 buf_int_enable.value);
 	}
@@ -211,12 +211,13 @@
 {
 	struct net_device *ndev = dev_instance;
 	struct nps_enet_priv *priv = netdev_priv(ndev);
-	struct nps_enet_buf_int_cause buf_int_cause;
+	struct nps_enet_rx_ctl rx_ctrl;
+	struct nps_enet_tx_ctl tx_ctrl;
 
-	buf_int_cause.value =
-			nps_enet_reg_get(priv, NPS_ENET_REG_BUF_INT_CAUSE);
+	rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
+	tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
 
-	if (buf_int_cause.tx_done || buf_int_cause.rx_rdy)
+	if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr)
 		if (likely(napi_schedule_prep(&priv->napi))) {
 			nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
 			__napi_schedule(&priv->napi);
@@ -307,11 +308,8 @@
 
 	/* Discard Packets bigger than max frame length */
 	max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN;
-	if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) {
+	if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH)
 		ge_mac_cfg_3->max_len = max_frame_length;
-		nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
-				 ge_mac_cfg_3->value);
-	}
 
 	/* Enable interrupts */
 	buf_int_enable.rx_rdy = NPS_ENET_ENABLE;
@@ -339,11 +337,14 @@
 	ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE;
 	ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE;
 	ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR;
+	ge_mac_cfg_3->cf_drop = NPS_ENET_ENABLE;
 
 	/* Enable Rx and Tx */
 	ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE;
 	ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE;
 
+	nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
+			 ge_mac_cfg_3->value);
 	nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0,
 			 ge_mac_cfg_0.value);
 }
@@ -527,10 +528,10 @@
 	/* This driver handles one frame at a time  */
 	netif_stop_queue(ndev);
 
-	nps_enet_send_frame(ndev, skb);
-
 	priv->tx_skb = skb;
 
+	nps_enet_send_frame(ndev, skb);
+
 	return NETDEV_TX_OK;
 }
 
diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h
index fc45c9d..6703674 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.h
+++ b/drivers/net/ethernet/ezchip/nps_enet.h
@@ -36,7 +36,6 @@
 #define NPS_ENET_REG_RX_CTL		0x810
 #define NPS_ENET_REG_RX_BUF		0x818
 #define NPS_ENET_REG_BUF_INT_ENABLE	0x8C0
-#define NPS_ENET_REG_BUF_INT_CAUSE	0x8C4
 #define NPS_ENET_REG_GE_MAC_CFG_0	0x1000
 #define NPS_ENET_REG_GE_MAC_CFG_1	0x1004
 #define NPS_ENET_REG_GE_MAC_CFG_2	0x1008
@@ -108,25 +107,6 @@
 	};
 };
 
-/* Interrupt cause for data buffer events register */
-struct nps_enet_buf_int_cause {
-	union {
-		/* tx_done: Interrupt in the case when current frame was
-		 *          read from TX buffer.
-		 * rx_rdy:  Interrupt in the case when new frame is ready
-		 *          in RX buffer.
-		 */
-		struct {
-			u32
-			__reserved:30,
-			tx_done:1,
-			rx_rdy:1;
-		};
-
-		u32 value;
-	};
-};
-
 /* Gbps Eth MAC Configuration 0 register */
 struct nps_enet_ge_mac_cfg_0 {
 	union {
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 271bb58..91925e3 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -364,7 +364,7 @@
 	return 0;
 }
 
-static int
+static struct bufdesc *
 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
 			     struct sk_buff *skb,
 			     struct net_device *ndev)
@@ -439,10 +439,7 @@
 		bdp->cbd_sc = status;
 	}
 
-	txq->cur_tx = bdp;
-
-	return 0;
-
+	return bdp;
 dma_mapping_error:
 	bdp = txq->cur_tx;
 	for (i = 0; i < frag; i++) {
@@ -450,7 +447,7 @@
 		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
 				bdp->cbd_datlen, DMA_TO_DEVICE);
 	}
-	return NETDEV_TX_OK;
+	return ERR_PTR(-ENOMEM);
 }
 
 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
@@ -467,7 +464,6 @@
 	unsigned int estatus = 0;
 	unsigned int index;
 	int entries_free;
-	int ret;
 
 	entries_free = fec_enet_get_free_txdesc_num(fep, txq);
 	if (entries_free < MAX_SKB_FRAGS + 1) {
@@ -485,6 +481,7 @@
 
 	/* Fill in a Tx ring entry */
 	bdp = txq->cur_tx;
+	last_bdp = bdp;
 	status = bdp->cbd_sc;
 	status &= ~BD_ENET_TX_STATS;
 
@@ -513,9 +510,9 @@
 	}
 
 	if (nr_frags) {
-		ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
-		if (ret)
-			return ret;
+		last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
+		if (IS_ERR(last_bdp))
+			return NETDEV_TX_OK;
 	} else {
 		status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
 		if (fep->bufdesc_ex) {
@@ -544,7 +541,6 @@
 		ebdp->cbd_esc = estatus;
 	}
 
-	last_bdp = txq->cur_tx;
 	index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
 	/* Save skb pointer */
 	txq->tx_skbuff[index] = skb;
@@ -563,6 +559,10 @@
 
 	skb_tx_timestamp(skb);
 
+	/* Make sure the update to bdp and tx_skbuff are performed before
+	 * cur_tx.
+	 */
+	wmb();
 	txq->cur_tx = bdp;
 
 	/* Trigger transmission start */
@@ -1218,10 +1218,11 @@
 	/* get next bdp of dirty_tx */
 	bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
 
-	while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
-
-		/* current queue is empty */
-		if (bdp == txq->cur_tx)
+	while (bdp != READ_ONCE(txq->cur_tx)) {
+		/* Order the load of cur_tx and cbd_sc */
+		rmb();
+		status = READ_ONCE(bdp->cbd_sc);
+		if (status & BD_ENET_TX_READY)
 			break;
 
 		index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
@@ -1275,6 +1276,10 @@
 		/* Free the sk buffer associated with this last transmit */
 		dev_kfree_skb_any(skb);
 
+		/* Make sure the update to bdp and tx_skbuff are performed
+		 * before dirty_tx
+		 */
+		wmb();
 		txq->dirty_tx = bdp;
 
 		/* Update pointer to next buffer descriptor to be transmitted */
@@ -1402,6 +1407,7 @@
 		if ((status & BD_ENET_RX_LAST) == 0)
 			netdev_err(ndev, "rcv is not +last\n");
 
+		writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
 
 		/* Check for errors. */
 		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
@@ -1774,11 +1780,11 @@
 	int ret = 0;
 
 	ret = pm_runtime_get_sync(dev);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	fep->mii_timeout = 0;
-	init_completion(&fep->mdio_done);
+	reinit_completion(&fep->mdio_done);
 
 	/* start a read op */
 	writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
@@ -1813,11 +1819,11 @@
 	int ret = 0;
 
 	ret = pm_runtime_get_sync(dev);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	fep->mii_timeout = 0;
-	init_completion(&fep->mdio_done);
+	reinit_completion(&fep->mdio_done);
 
 	/* start a write op */
 	writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
@@ -2865,7 +2871,7 @@
 	int ret;
 
 	ret = pm_runtime_get_sync(&fep->pdev->dev);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	pinctrl_pm_select_default_state(&fep->pdev->dev);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index f457a23..1543cf0 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -506,12 +506,6 @@
 		break;
 
 	default:
-		/*
-		 * register RXMTRL must be set in order to do V1 packets,
-		 * therefore it is not possible to time stamp both V1 Sync and
-		 * Delay_Req messages and hardware does not support
-		 * timestamping all packets => return error
-		 */
 		fep->hwts_rx_en = 1;
 		config.rx_filter = HWTSTAMP_FILTER_ALL;
 		break;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 10b3bbbb..4b69d061 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -109,15 +109,15 @@
 
 #define TX_TIMEOUT      (1*HZ)
 
-const char gfar_driver_version[] = "1.3";
+const char gfar_driver_version[] = "2.0";
 
 static int gfar_enet_open(struct net_device *dev);
 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static void gfar_reset_task(struct work_struct *work);
 static void gfar_timeout(struct net_device *dev);
 static int gfar_close(struct net_device *dev);
-static struct sk_buff *gfar_new_skb(struct net_device *dev,
-				    dma_addr_t *bufaddr);
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+				int alloc_cnt);
 static int gfar_set_mac_address(struct net_device *dev);
 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -141,8 +141,7 @@
 #endif
 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-			       int amount_pull, struct napi_struct *napi);
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
 static void gfar_halt_nodisable(struct gfar_private *priv);
 static void gfar_clear_exact_match(struct net_device *dev);
 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
@@ -169,17 +168,15 @@
 	bdp->lstatus = cpu_to_be32(lstatus);
 }
 
-static int gfar_init_bds(struct net_device *ndev)
+static void gfar_init_bds(struct net_device *ndev)
 {
 	struct gfar_private *priv = netdev_priv(ndev);
 	struct gfar __iomem *regs = priv->gfargrp[0].regs;
 	struct gfar_priv_tx_q *tx_queue = NULL;
 	struct gfar_priv_rx_q *rx_queue = NULL;
 	struct txbd8 *txbdp;
-	struct rxbd8 *rxbdp;
 	u32 __iomem *rfbptr;
 	int i, j;
-	dma_addr_t bufaddr;
 
 	for (i = 0; i < priv->num_tx_queues; i++) {
 		tx_queue = priv->tx_queue[i];
@@ -207,40 +204,26 @@
 	rfbptr = &regs->rfbptr0;
 	for (i = 0; i < priv->num_rx_queues; i++) {
 		rx_queue = priv->rx_queue[i];
-		rx_queue->cur_rx = rx_queue->rx_bd_base;
-		rx_queue->skb_currx = 0;
-		rxbdp = rx_queue->rx_bd_base;
 
-		for (j = 0; j < rx_queue->rx_ring_size; j++) {
-			struct sk_buff *skb = rx_queue->rx_skbuff[j];
+		rx_queue->next_to_clean = 0;
+		rx_queue->next_to_use = 0;
+		rx_queue->next_to_alloc = 0;
 
-			if (skb) {
-				bufaddr = be32_to_cpu(rxbdp->bufPtr);
-			} else {
-				skb = gfar_new_skb(ndev, &bufaddr);
-				if (!skb) {
-					netdev_err(ndev, "Can't allocate RX buffers\n");
-					return -ENOMEM;
-				}
-				rx_queue->rx_skbuff[j] = skb;
-			}
-
-			gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
-			rxbdp++;
-		}
+		/* make sure next_to_clean != next_to_use after this
+		 * by leaving at least 1 unused descriptor
+		 */
+		gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
 
 		rx_queue->rfbptr = rfbptr;
 		rfbptr += 2;
 	}
-
-	return 0;
 }
 
 static int gfar_alloc_skb_resources(struct net_device *ndev)
 {
 	void *vaddr;
 	dma_addr_t addr;
-	int i, j, k;
+	int i, j;
 	struct gfar_private *priv = netdev_priv(ndev);
 	struct device *dev = priv->dev;
 	struct gfar_priv_tx_q *tx_queue = NULL;
@@ -279,7 +262,8 @@
 		rx_queue = priv->rx_queue[i];
 		rx_queue->rx_bd_base = vaddr;
 		rx_queue->rx_bd_dma_base = addr;
-		rx_queue->dev = ndev;
+		rx_queue->ndev = ndev;
+		rx_queue->dev = dev;
 		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
 		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
 	}
@@ -294,25 +278,20 @@
 		if (!tx_queue->tx_skbuff)
 			goto cleanup;
 
-		for (k = 0; k < tx_queue->tx_ring_size; k++)
-			tx_queue->tx_skbuff[k] = NULL;
+		for (j = 0; j < tx_queue->tx_ring_size; j++)
+			tx_queue->tx_skbuff[j] = NULL;
 	}
 
 	for (i = 0; i < priv->num_rx_queues; i++) {
 		rx_queue = priv->rx_queue[i];
-		rx_queue->rx_skbuff =
-			kmalloc_array(rx_queue->rx_ring_size,
-				      sizeof(*rx_queue->rx_skbuff),
-				      GFP_KERNEL);
-		if (!rx_queue->rx_skbuff)
+		rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
+					    sizeof(*rx_queue->rx_buff),
+					    GFP_KERNEL);
+		if (!rx_queue->rx_buff)
 			goto cleanup;
-
-		for (j = 0; j < rx_queue->rx_ring_size; j++)
-			rx_queue->rx_skbuff[j] = NULL;
 	}
 
-	if (gfar_init_bds(ndev))
-		goto cleanup;
+	gfar_init_bds(ndev);
 
 	return 0;
 
@@ -354,10 +333,8 @@
 	}
 }
 
-static void gfar_rx_buff_size_config(struct gfar_private *priv)
+static void gfar_rx_offload_en(struct gfar_private *priv)
 {
-	int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
-
 	/* set this when rx hw offload (TOE) functions are being used */
 	priv->uses_rxfcb = 0;
 
@@ -366,16 +343,6 @@
 
 	if (priv->hwts_rx_en)
 		priv->uses_rxfcb = 1;
-
-	if (priv->uses_rxfcb)
-		frame_size += GMAC_FCB_LEN;
-
-	frame_size += priv->padding;
-
-	frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
-		     INCREMENTAL_BUFFER_SIZE;
-
-	priv->rx_buffer_size = frame_size;
 }
 
 static void gfar_mac_rx_config(struct gfar_private *priv)
@@ -593,9 +560,8 @@
 		if (!priv->rx_queue[i])
 			return -ENOMEM;
 
-		priv->rx_queue[i]->rx_skbuff = NULL;
 		priv->rx_queue[i]->qindex = i;
-		priv->rx_queue[i]->dev = priv->ndev;
+		priv->rx_queue[i]->ndev = priv->ndev;
 	}
 	return 0;
 }
@@ -1187,12 +1153,11 @@
 
 	udelay(3);
 
-	/* Compute rx_buff_size based on config flags */
-	gfar_rx_buff_size_config(priv);
+	gfar_rx_offload_en(priv);
 
 	/* Initialize the max receive frame/buffer lengths */
-	gfar_write(&regs->maxfrm, priv->rx_buffer_size);
-	gfar_write(&regs->mrblr, priv->rx_buffer_size);
+	gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
+	gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
 
 	/* Initialize the Minimum Frame Length Register */
 	gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
@@ -1200,12 +1165,11 @@
 	/* Initialize MACCFG2. */
 	tempval = MACCFG2_INIT_SETTINGS;
 
-	/* If the mtu is larger than the max size for standard
-	 * ethernet frames (ie, a jumbo frame), then set maccfg2
-	 * to allow huge frames, and to check the length
+	/* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
+	 * are marked as truncated.  Avoid this by MACCFG2[Huge Frame]=1,
+	 * and by checking RxBD[LG] and discarding larger than MAXFRM.
 	 */
-	if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
-	    gfar_has_errata(priv, GFAR_ERRATA_74))
+	if (gfar_has_errata(priv, GFAR_ERRATA_74))
 		tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
 
 	gfar_write(&regs->maccfg2, tempval);
@@ -1415,8 +1379,6 @@
 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
 		dev->needed_headroom = GMAC_FCB_LEN;
 
-	priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
-
 	/* Initializing some of the rx/tx queue level parameters */
 	for (i = 0; i < priv->num_tx_queues; i++) {
 		priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
@@ -1599,10 +1561,7 @@
 		return 0;
 	}
 
-	if (gfar_init_bds(ndev)) {
-		free_skb_resources(priv);
-		return -ENOMEM;
-	}
+	gfar_init_bds(ndev);
 
 	gfar_mac_reset(priv);
 
@@ -1893,26 +1852,32 @@
 
 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
 {
-	struct rxbd8 *rxbdp;
-	struct gfar_private *priv = netdev_priv(rx_queue->dev);
 	int i;
 
-	rxbdp = rx_queue->rx_bd_base;
+	struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
+
+	if (rx_queue->skb)
+		dev_kfree_skb(rx_queue->skb);
 
 	for (i = 0; i < rx_queue->rx_ring_size; i++) {
-		if (rx_queue->rx_skbuff[i]) {
-			dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
-					 priv->rx_buffer_size,
-					 DMA_FROM_DEVICE);
-			dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
-			rx_queue->rx_skbuff[i] = NULL;
-		}
+		struct	gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
+
 		rxbdp->lstatus = 0;
 		rxbdp->bufPtr = 0;
 		rxbdp++;
+
+		if (!rxb->page)
+			continue;
+
+		dma_unmap_single(rx_queue->dev, rxb->dma,
+				 PAGE_SIZE, DMA_FROM_DEVICE);
+		__free_page(rxb->page);
+
+		rxb->page = NULL;
 	}
-	kfree(rx_queue->rx_skbuff);
-	rx_queue->rx_skbuff = NULL;
+
+	kfree(rx_queue->rx_buff);
+	rx_queue->rx_buff = NULL;
 }
 
 /* If there are any tx skbs or rx skbs still around, free them.
@@ -1937,7 +1902,7 @@
 
 	for (i = 0; i < priv->num_rx_queues; i++) {
 		rx_queue = priv->rx_queue[i];
-		if (rx_queue->rx_skbuff)
+		if (rx_queue->rx_buff)
 			free_skb_rx_queue(rx_queue);
 	}
 
@@ -2500,7 +2465,7 @@
 	struct gfar_private *priv = netdev_priv(dev);
 	int frame_size = new_mtu + ETH_HLEN;
 
-	if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+	if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
 		netif_err(priv, drv, dev, "Invalid MTU setting\n");
 		return -EINVAL;
 	}
@@ -2554,15 +2519,6 @@
 	schedule_work(&priv->reset_task);
 }
 
-static void gfar_align_skb(struct sk_buff *skb)
-{
-	/* We need the data buffer to be aligned properly.  We will reserve
-	 * as many bytes as needed to align the data properly
-	 */
-	skb_reserve(skb, RXBUF_ALIGNMENT -
-		    (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
-}
-
 /* Interrupt Handler for Transmit complete */
 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 {
@@ -2620,7 +2576,8 @@
 
 		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
 			struct skb_shared_hwtstamps shhwtstamps;
-			u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+			u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
+					  ~0x7UL);
 
 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 			shhwtstamps.hwtstamp = ns_to_ktime(*ns);
@@ -2669,49 +2626,85 @@
 	netdev_tx_completed_queue(txq, howmany, bytes_sent);
 }
 
-static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
+static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
 {
-	struct gfar_private *priv = netdev_priv(dev);
-	struct sk_buff *skb;
-
-	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
-	if (!skb)
-		return NULL;
-
-	gfar_align_skb(skb);
-
-	return skb;
-}
-
-static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
-{
-	struct gfar_private *priv = netdev_priv(dev);
-	struct sk_buff *skb;
+	struct page *page;
 	dma_addr_t addr;
 
-	skb = gfar_alloc_skb(dev);
-	if (!skb)
-		return NULL;
+	page = dev_alloc_page();
+	if (unlikely(!page))
+		return false;
 
-	addr = dma_map_single(priv->dev, skb->data,
-			      priv->rx_buffer_size, DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(priv->dev, addr))) {
-		dev_kfree_skb_any(skb);
-		return NULL;
+	addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(rxq->dev, addr))) {
+		__free_page(page);
+
+		return false;
 	}
 
-	*bufaddr = addr;
-	return skb;
+	rxb->dma = addr;
+	rxb->page = page;
+	rxb->page_offset = 0;
+
+	return true;
 }
 
-static inline void count_errors(unsigned short status, struct net_device *dev)
+static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
 {
-	struct gfar_private *priv = netdev_priv(dev);
-	struct net_device_stats *stats = &dev->stats;
+	struct gfar_private *priv = netdev_priv(rx_queue->ndev);
+	struct gfar_extra_stats *estats = &priv->extra_stats;
+
+	netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
+	atomic64_inc(&estats->rx_alloc_err);
+}
+
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+				int alloc_cnt)
+{
+	struct rxbd8 *bdp;
+	struct gfar_rx_buff *rxb;
+	int i;
+
+	i = rx_queue->next_to_use;
+	bdp = &rx_queue->rx_bd_base[i];
+	rxb = &rx_queue->rx_buff[i];
+
+	while (alloc_cnt--) {
+		/* try reuse page */
+		if (unlikely(!rxb->page)) {
+			if (unlikely(!gfar_new_page(rx_queue, rxb))) {
+				gfar_rx_alloc_err(rx_queue);
+				break;
+			}
+		}
+
+		/* Setup the new RxBD */
+		gfar_init_rxbdp(rx_queue, bdp,
+				rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
+
+		/* Update to the next pointer */
+		bdp++;
+		rxb++;
+
+		if (unlikely(++i == rx_queue->rx_ring_size)) {
+			i = 0;
+			bdp = rx_queue->rx_bd_base;
+			rxb = rx_queue->rx_buff;
+		}
+	}
+
+	rx_queue->next_to_use = i;
+	rx_queue->next_to_alloc = i;
+}
+
+static void count_errors(u32 lstatus, struct net_device *ndev)
+{
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct net_device_stats *stats = &ndev->stats;
 	struct gfar_extra_stats *estats = &priv->extra_stats;
 
 	/* If the packet was truncated, none of the other errors matter */
-	if (status & RXBD_TRUNCATED) {
+	if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
 		stats->rx_length_errors++;
 
 		atomic64_inc(&estats->rx_trunc);
@@ -2719,25 +2712,25 @@
 		return;
 	}
 	/* Count the errors, if there were any */
-	if (status & (RXBD_LARGE | RXBD_SHORT)) {
+	if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
 		stats->rx_length_errors++;
 
-		if (status & RXBD_LARGE)
+		if (lstatus & BD_LFLAG(RXBD_LARGE))
 			atomic64_inc(&estats->rx_large);
 		else
 			atomic64_inc(&estats->rx_short);
 	}
-	if (status & RXBD_NONOCTET) {
+	if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
 		stats->rx_frame_errors++;
 		atomic64_inc(&estats->rx_nonoctet);
 	}
-	if (status & RXBD_CRCERR) {
+	if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
 		atomic64_inc(&estats->rx_crcerr);
 		stats->rx_crc_errors++;
 	}
-	if (status & RXBD_OVERRUN) {
+	if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
 		atomic64_inc(&estats->rx_overrun);
-		stats->rx_crc_errors++;
+		stats->rx_over_errors++;
 	}
 }
 
@@ -2788,6 +2781,93 @@
 	return IRQ_HANDLED;
 }
 
+static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
+			     struct sk_buff *skb, bool first)
+{
+	unsigned int size = lstatus & BD_LENGTH_MASK;
+	struct page *page = rxb->page;
+
+	/* Remove the FCS from the packet length */
+	if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
+		size -= ETH_FCS_LEN;
+
+	if (likely(first))
+		skb_put(skb, size);
+	else
+		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+				rxb->page_offset + RXBUF_ALIGNMENT,
+				size, GFAR_RXB_TRUESIZE);
+
+	/* try reuse page */
+	if (unlikely(page_count(page) != 1))
+		return false;
+
+	/* change offset to the other half */
+	rxb->page_offset ^= GFAR_RXB_TRUESIZE;
+
+	atomic_inc(&page->_count);
+
+	return true;
+}
+
+static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
+			       struct gfar_rx_buff *old_rxb)
+{
+	struct gfar_rx_buff *new_rxb;
+	u16 nta = rxq->next_to_alloc;
+
+	new_rxb = &rxq->rx_buff[nta];
+
+	/* find next buf that can reuse a page */
+	nta++;
+	rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
+
+	/* copy page reference */
+	*new_rxb = *old_rxb;
+
+	/* sync for use by the device */
+	dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
+					 old_rxb->page_offset,
+					 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+}
+
+static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
+					    u32 lstatus, struct sk_buff *skb)
+{
+	struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
+	struct page *page = rxb->page;
+	bool first = false;
+
+	if (likely(!skb)) {
+		void *buff_addr = page_address(page) + rxb->page_offset;
+
+		skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
+		if (unlikely(!skb)) {
+			gfar_rx_alloc_err(rx_queue);
+			return NULL;
+		}
+		skb_reserve(skb, RXBUF_ALIGNMENT);
+		first = true;
+	}
+
+	dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
+				      GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+
+	if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
+		/* reuse the free half of the page */
+		gfar_reuse_rx_page(rx_queue, rxb);
+	} else {
+		/* page cannot be reused, unmap it */
+		dma_unmap_page(rx_queue->dev, rxb->dma,
+			       PAGE_SIZE, DMA_FROM_DEVICE);
+	}
+
+	/* clear rxb content */
+	rxb->page = NULL;
+
+	return skb;
+}
+
 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
 {
 	/* If valid headers were found, and valid sums
@@ -2802,10 +2882,9 @@
 }
 
 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-			       int amount_pull, struct napi_struct *napi)
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
 {
-	struct gfar_private *priv = netdev_priv(dev);
+	struct gfar_private *priv = netdev_priv(ndev);
 	struct rxfcb *fcb = NULL;
 
 	/* fcb is at the beginning if exists */
@@ -2814,10 +2893,8 @@
 	/* Remove the FCB from the skb
 	 * Remove the padded bytes, if there are any
 	 */
-	if (amount_pull) {
-		skb_record_rx_queue(skb, fcb->rq);
-		skb_pull(skb, amount_pull);
-	}
+	if (priv->uses_rxfcb)
+		skb_pull(skb, GMAC_FCB_LEN);
 
 	/* Get receive timestamp from the skb */
 	if (priv->hwts_rx_en) {
@@ -2831,24 +2908,20 @@
 	if (priv->padding)
 		skb_pull(skb, priv->padding);
 
-	if (dev->features & NETIF_F_RXCSUM)
+	if (ndev->features & NETIF_F_RXCSUM)
 		gfar_rx_checksum(skb, fcb);
 
 	/* Tell the skb what kind of packet this is */
-	skb->protocol = eth_type_trans(skb, dev);
+	skb->protocol = eth_type_trans(skb, ndev);
 
 	/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
 	 * Even if vlan rx accel is disabled, on some chips
 	 * RXFCB_VLN is pseudo randomly set.
 	 */
-	if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+	if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
 	    be16_to_cpu(fcb->flags) & RXFCB_VLN)
 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 				       be16_to_cpu(fcb->vlctl));
-
-	/* Send the packet up the stack */
-	napi_gro_receive(napi, skb);
-
 }
 
 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2857,91 +2930,89 @@
  */
 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
 {
-	struct net_device *dev = rx_queue->dev;
-	struct rxbd8 *bdp, *base;
-	struct sk_buff *skb;
-	int pkt_len;
-	int amount_pull;
-	int howmany = 0;
-	struct gfar_private *priv = netdev_priv(dev);
+	struct net_device *ndev = rx_queue->ndev;
+	struct gfar_private *priv = netdev_priv(ndev);
+	struct rxbd8 *bdp;
+	int i, howmany = 0;
+	struct sk_buff *skb = rx_queue->skb;
+	int cleaned_cnt = gfar_rxbd_unused(rx_queue);
+	unsigned int total_bytes = 0, total_pkts = 0;
 
 	/* Get the first full descriptor */
-	bdp = rx_queue->cur_rx;
-	base = rx_queue->rx_bd_base;
+	i = rx_queue->next_to_clean;
 
-	amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
+	while (rx_work_limit--) {
+		u32 lstatus;
 
-	while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
-		struct sk_buff *newskb;
-		dma_addr_t bufaddr;
-
-		rmb();
-
-		/* Add another skb for the future */
-		newskb = gfar_new_skb(dev, &bufaddr);
-
-		skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
-
-		dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
-				 priv->rx_buffer_size, DMA_FROM_DEVICE);
-
-		if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
-			     be16_to_cpu(bdp->length) > priv->rx_buffer_size))
-			bdp->status = cpu_to_be16(RXBD_LARGE);
-
-		/* We drop the frame if we failed to allocate a new buffer */
-		if (unlikely(!newskb ||
-			     !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
-			     be16_to_cpu(bdp->status) & RXBD_ERR)) {
-			count_errors(be16_to_cpu(bdp->status), dev);
-
-			if (unlikely(!newskb)) {
-				newskb = skb;
-				bufaddr = be32_to_cpu(bdp->bufPtr);
-			} else if (skb)
-				dev_kfree_skb(skb);
-		} else {
-			/* Increment the number of packets */
-			rx_queue->stats.rx_packets++;
-			howmany++;
-
-			if (likely(skb)) {
-				pkt_len = be16_to_cpu(bdp->length) -
-					  ETH_FCS_LEN;
-				/* Remove the FCS from the packet length */
-				skb_put(skb, pkt_len);
-				rx_queue->stats.rx_bytes += pkt_len;
-				skb_record_rx_queue(skb, rx_queue->qindex);
-				gfar_process_frame(dev, skb, amount_pull,
-						   &rx_queue->grp->napi_rx);
-
-			} else {
-				netif_warn(priv, rx_err, dev, "Missing skb!\n");
-				rx_queue->stats.rx_dropped++;
-				atomic64_inc(&priv->extra_stats.rx_skbmissing);
-			}
-
+		if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
+			gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+			cleaned_cnt = 0;
 		}
 
-		rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
+		bdp = &rx_queue->rx_bd_base[i];
+		lstatus = be32_to_cpu(bdp->lstatus);
+		if (lstatus & BD_LFLAG(RXBD_EMPTY))
+			break;
 
-		/* Setup the new bdp */
-		gfar_init_rxbdp(rx_queue, bdp, bufaddr);
+		/* order rx buffer descriptor reads */
+		rmb();
 
-		/* Update Last Free RxBD pointer for LFC */
-		if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
-			gfar_write(rx_queue->rfbptr, (u32)bdp);
+		/* fetch next to clean buffer from the ring */
+		skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
+		if (unlikely(!skb))
+			break;
 
-		/* Update to the next pointer */
-		bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
+		cleaned_cnt++;
+		howmany++;
 
-		/* update to point at the next skb */
-		rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
-				      RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+		if (unlikely(++i == rx_queue->rx_ring_size))
+			i = 0;
+
+		rx_queue->next_to_clean = i;
+
+		/* fetch next buffer if not the last in frame */
+		if (!(lstatus & BD_LFLAG(RXBD_LAST)))
+			continue;
+
+		if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
+			count_errors(lstatus, ndev);
+
+			/* discard faulty buffer */
+			dev_kfree_skb(skb);
+			skb = NULL;
+			rx_queue->stats.rx_dropped++;
+			continue;
+		}
+
+		/* Increment the number of packets */
+		total_pkts++;
+		total_bytes += skb->len;
+
+		skb_record_rx_queue(skb, rx_queue->qindex);
+
+		gfar_process_frame(ndev, skb);
+
+		/* Send the packet up the stack */
+		napi_gro_receive(&rx_queue->grp->napi_rx, skb);
+
+		skb = NULL;
 	}
 
-	/* Update the current rxbd pointer to be the next one */
-	rx_queue->cur_rx = bdp;
+	/* Store incomplete frames for completion */
+	rx_queue->skb = skb;
+
+	rx_queue->stats.rx_packets += total_pkts;
+	rx_queue->stats.rx_bytes += total_bytes;
+
+	if (cleaned_cnt)
+		gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+
+	/* Update Last Free RxBD pointer for LFC */
+	if (unlikely(priv->tx_actual_en)) {
+		u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+
+		gfar_write(rx_queue->rfbptr, bdp_dma);
+	}
 
 	return howmany;
 }
@@ -3459,7 +3530,6 @@
 	struct phy_device *phydev = priv->phydev;
 	struct gfar_priv_rx_q *rx_queue = NULL;
 	int i;
-	struct rxbd8 *bdp;
 
 	if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
 		return;
@@ -3516,15 +3586,11 @@
 		/* Turn last free buffer recording on */
 		if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
 			for (i = 0; i < priv->num_rx_queues; i++) {
-				rx_queue = priv->rx_queue[i];
-				bdp = rx_queue->cur_rx;
-				/* skip to previous bd */
-				bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
-					      rx_queue->rx_bd_base,
-					      rx_queue->rx_ring_size);
+				u32 bdp_dma;
 
-				if (rx_queue->rfbptr)
-					gfar_write(rx_queue->rfbptr, (u32)bdp);
+				rx_queue = priv->rx_queue[i];
+				bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+				gfar_write(rx_queue->rfbptr, bdp_dma);
 			}
 
 			priv->tx_actual_en = 1;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 5545e41..8c19948 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -71,11 +71,6 @@
 /* Number of bytes to align the rx bufs to */
 #define RXBUF_ALIGNMENT 64
 
-/* The number of bytes which composes a unit for the purpose of
- * allocating data buffers.  ie-for any given MTU, the data buffer
- * will be the next highest multiple of 512 bytes. */
-#define INCREMENTAL_BUFFER_SIZE 512
-
 #define PHY_INIT_TIMEOUT 100000
 
 #define DRV_NAME "gfar-enet"
@@ -92,6 +87,8 @@
 #define DEFAULT_TX_RING_SIZE	256
 #define DEFAULT_RX_RING_SIZE	256
 
+#define GFAR_RX_BUFF_ALLOC	16
+
 #define GFAR_RX_MAX_RING_SIZE   256
 #define GFAR_TX_MAX_RING_SIZE   256
 
@@ -103,11 +100,14 @@
 #define DEFAULT_RX_LFC_THR  16
 #define DEFAULT_LFC_PTVVAL  4
 
-#define DEFAULT_RX_BUFFER_SIZE  1536
+#define GFAR_RXB_SIZE 1536
+#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
+			  + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define GFAR_RXB_TRUESIZE 2048
+
 #define TX_RING_MOD_MASK(size) (size-1)
 #define RX_RING_MOD_MASK(size) (size-1)
-#define JUMBO_BUFFER_SIZE 9728
-#define JUMBO_FRAME_SIZE 9600
+#define GFAR_JUMBO_FRAME_SIZE 9600
 
 #define DEFAULT_FIFO_TX_THR 0x100
 #define DEFAULT_FIFO_TX_STARVE 0x40
@@ -640,6 +640,7 @@
 };
 
 struct gfar_extra_stats {
+	atomic64_t rx_alloc_err;
 	atomic64_t rx_large;
 	atomic64_t rx_short;
 	atomic64_t rx_nonoctet;
@@ -651,7 +652,6 @@
 	atomic64_t eberr;
 	atomic64_t tx_babt;
 	atomic64_t tx_underrun;
-	atomic64_t rx_skbmissing;
 	atomic64_t tx_timeout;
 };
 
@@ -1012,34 +1012,42 @@
 	unsigned long rx_dropped;
 };
 
+struct gfar_rx_buff {
+	dma_addr_t dma;
+	struct page *page;
+	unsigned int page_offset;
+};
+
 /**
  *	struct gfar_priv_rx_q - per rx queue structure
- *	@rx_skbuff: skb pointers
- *	@skb_currx: currently use skb pointer
+ *	@rx_buff: Array of buffer info metadata structs
  *	@rx_bd_base: First rx buffer descriptor
- *	@cur_rx: Next free rx ring entry
+ *	@next_to_use: index of the next buffer to be alloc'd
+ *	@next_to_clean: index of the next buffer to be cleaned
  *	@qindex: index of this queue
- *	@dev: back pointer to the dev structure
+ *	@ndev: back pointer to net_device
  *	@rx_ring_size: Rx ring size
  *	@rxcoalescing: enable/disable rx-coalescing
  *	@rxic: receive interrupt coalescing vlaue
  */
 
 struct gfar_priv_rx_q {
-	struct	sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
-	dma_addr_t rx_bd_dma_base;
+	struct	gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES);
 	struct	rxbd8 *rx_bd_base;
-	struct	rxbd8 *cur_rx;
-	struct	net_device *dev;
-	struct gfar_priv_grp *grp;
+	struct	net_device *ndev;
+	struct	device *dev;
+	u16 rx_ring_size;
+	u16 qindex;
+	struct	gfar_priv_grp *grp;
+	u16 next_to_clean;
+	u16 next_to_use;
+	u16 next_to_alloc;
+	struct	sk_buff *skb;
 	struct rx_q_stats stats;
-	u16	skb_currx;
-	u16	qindex;
-	unsigned int	rx_ring_size;
-	/* RX Coalescing values */
+	u32 __iomem *rfbptr;
 	unsigned char rxcoalescing;
 	unsigned long rxic;
-	u32 __iomem *rfbptr;
+	dma_addr_t rx_bd_dma_base;
 };
 
 enum gfar_irqinfo_id {
@@ -1109,7 +1117,6 @@
 	struct device *dev;
 	struct net_device *ndev;
 	enum gfar_errata errata;
-	unsigned int rx_buffer_size;
 
 	u16 uses_rxfcb;
 	u16 padding;
@@ -1292,6 +1299,28 @@
 	bdp->lstatus = cpu_to_be32(lstatus);
 }
 
+static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq)
+{
+	if (rxq->next_to_clean > rxq->next_to_use)
+		return rxq->next_to_clean - rxq->next_to_use - 1;
+
+	return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
+}
+
+static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
+{
+	struct rxbd8 *bdp;
+	u32 bdp_dma;
+	int i;
+
+	i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
+	bdp = &rxq->rx_bd_base[i];
+	bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
+	bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
+
+	return bdp_dma;
+}
+
 irqreturn_t gfar_receive(int irq, void *dev_id);
 int startup_gfar(struct net_device *dev);
 void stop_gfar(struct net_device *dev);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 5b90fcf..6bdc891 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -61,6 +61,8 @@
 			  struct ethtool_drvinfo *drvinfo);
 
 static const char stat_gstrings[][ETH_GSTRING_LEN] = {
+	/* extra stats */
+	"rx-allocation-errors",
 	"rx-large-frame-errors",
 	"rx-short-frame-errors",
 	"rx-non-octet-errors",
@@ -72,8 +74,8 @@
 	"ethernet-bus-error",
 	"tx-babbling-errors",
 	"tx-underrun-errors",
-	"rx-skb-missing-errors",
 	"tx-timeout-errors",
+	/* rmon stats */
 	"tx-rx-64-frames",
 	"tx-rx-65-127-frames",
 	"tx-rx-128-255-frames",
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index d49bee3..cc2d8b4 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -965,7 +965,6 @@
 	.remove	= hip04_remove,
 	.driver	= {
 		.name		= DRV_NAME,
-		.owner		= THIS_MODULE,
 		.of_match_table	= hip04_mac_match,
 	},
 };
diff --git a/drivers/net/ethernet/hisilicon/hip04_mdio.c b/drivers/net/ethernet/hisilicon/hip04_mdio.c
index b3bac25d..fca0a5b 100644
--- a/drivers/net/ethernet/hisilicon/hip04_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hip04_mdio.c
@@ -174,7 +174,6 @@
 	.remove = hip04_mdio_remove,
 	.driver = {
 		.name = "hip04-mdio",
-		.owner = THIS_MODULE,
 		.of_match_table = hip04_mdio_match,
 	},
 };
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 29bbb62..7af870a 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -79,6 +79,11 @@
 module_param(rx_flush, uint, 0644);
 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
 
+static bool old_large_send __read_mostly;
+module_param(old_large_send, bool, S_IRUGO);
+MODULE_PARM_DESC(old_large_send,
+	"Use old large send method on firmware that supports the new method");
+
 struct ibmveth_stat {
 	char name[ETH_GSTRING_LEN];
 	int offset;
@@ -101,7 +106,8 @@
 	{ "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
 	{ "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
 	{ "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
-	{ "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }
+	{ "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
+	{ "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
 };
 
 /* simple methods of getting data from the current rxq entry */
@@ -848,25 +854,91 @@
 	return rc1 ? rc1 : rc2;
 }
 
+static int ibmveth_set_tso(struct net_device *dev, u32 data)
+{
+	struct ibmveth_adapter *adapter = netdev_priv(dev);
+	unsigned long set_attr, clr_attr, ret_attr;
+	long ret1, ret2;
+	int rc1 = 0, rc2 = 0;
+	int restart = 0;
+
+	if (netif_running(dev)) {
+		restart = 1;
+		adapter->pool_config = 1;
+		ibmveth_close(dev);
+		adapter->pool_config = 0;
+	}
+
+	set_attr = 0;
+	clr_attr = 0;
+
+	if (data)
+		set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+	else
+		clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+
+	ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+	if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+	    !old_large_send) {
+		ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+					  set_attr, &ret_attr);
+
+		if (ret2 != H_SUCCESS) {
+			netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
+				   data, ret2);
+
+			h_illan_attributes(adapter->vdev->unit_address,
+					   set_attr, clr_attr, &ret_attr);
+
+			if (data == 1)
+				dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+			rc1 = -EIO;
+
+		} else {
+			adapter->fw_large_send_support = data;
+			adapter->large_send = data;
+		}
+	} else {
+		/* Older firmware version of large send offload does not
+		 * support tcp6/ipv6
+		 */
+		if (data == 1) {
+			dev->features &= ~NETIF_F_TSO6;
+			netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+		}
+		adapter->large_send = data;
+	}
+
+	if (restart)
+		rc2 = ibmveth_open(dev);
+
+	return rc1 ? rc1 : rc2;
+}
+
 static int ibmveth_set_features(struct net_device *dev,
 	netdev_features_t features)
 {
 	struct ibmveth_adapter *adapter = netdev_priv(dev);
 	int rx_csum = !!(features & NETIF_F_RXCSUM);
-	int rc;
-	netdev_features_t changed = features ^ dev->features;
+	int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
+	int rc1 = 0, rc2 = 0;
 
-	if (features & NETIF_F_TSO & changed)
-		netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+	if (rx_csum != adapter->rx_csum) {
+		rc1 = ibmveth_set_csum_offload(dev, rx_csum);
+		if (rc1 && !adapter->rx_csum)
+			dev->features =
+				features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+	}
 
-	if (rx_csum == adapter->rx_csum)
-		return 0;
+	if (large_send != adapter->large_send) {
+		rc2 = ibmveth_set_tso(dev, large_send);
+		if (rc2 && !adapter->large_send)
+			dev->features =
+				features & ~(NETIF_F_TSO | NETIF_F_TSO6);
+	}
 
-	rc = ibmveth_set_csum_offload(dev, rx_csum);
-	if (rc && !adapter->rx_csum)
-		dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
-
-	return rc;
+	return rc1 ? rc1 : rc2;
 }
 
 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -917,7 +989,7 @@
 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
 
 static int ibmveth_send(struct ibmveth_adapter *adapter,
-			union ibmveth_buf_desc *descs)
+			union ibmveth_buf_desc *descs, unsigned long mss)
 {
 	unsigned long correlator;
 	unsigned int retry_count;
@@ -934,7 +1006,8 @@
 					     descs[0].desc, descs[1].desc,
 					     descs[2].desc, descs[3].desc,
 					     descs[4].desc, descs[5].desc,
-					     correlator, &correlator);
+					     correlator, &correlator, mss,
+					     adapter->fw_large_send_support);
 	} while ((ret == H_BUSY) && (retry_count--));
 
 	if (ret != H_SUCCESS && ret != H_DROPPED) {
@@ -955,6 +1028,7 @@
 	int last, i;
 	int force_bounce = 0;
 	dma_addr_t dma_addr;
+	unsigned long mss = 0;
 
 	/*
 	 * veth handles a maximum of 6 segments including the header, so
@@ -980,6 +1054,9 @@
 
 	desc_flags = IBMVETH_BUF_VALID;
 
+	if (skb_is_gso(skb) && adapter->fw_large_send_support)
+		desc_flags |= IBMVETH_BUF_LRG_SND;
+
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		unsigned char *buf = skb_transport_header(skb) +
 						skb->csum_offset;
@@ -1007,7 +1084,7 @@
 		descs[0].fields.flags_len = desc_flags | skb->len;
 		descs[0].fields.address = adapter->bounce_buffer_dma;
 
-		if (ibmveth_send(adapter, descs)) {
+		if (ibmveth_send(adapter, descs, 0)) {
 			adapter->tx_send_failed++;
 			netdev->stats.tx_dropped++;
 		} else {
@@ -1041,16 +1118,23 @@
 		descs[i+1].fields.address = dma_addr;
 	}
 
-	if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) {
-		/* Put -1 in the IP checksum to tell phyp it
-		 *  is a largesend packet and put the mss in the TCP checksum.
-		 */
-		ip_hdr(skb)->check = 0xffff;
-		tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size);
-		adapter->tx_large_packets++;
+	if (skb_is_gso(skb)) {
+		if (adapter->fw_large_send_support) {
+			mss = (unsigned long)skb_shinfo(skb)->gso_size;
+			adapter->tx_large_packets++;
+		} else if (!skb_is_gso_v6(skb)) {
+			/* Put -1 in the IP checksum to tell phyp it
+			 * is a largesend packet. Put the mss in
+			 * the TCP checksum.
+			 */
+			ip_hdr(skb)->check = 0xffff;
+			tcp_hdr(skb)->check =
+				cpu_to_be16(skb_shinfo(skb)->gso_size);
+			adapter->tx_large_packets++;
+		}
 	}
 
-	if (ibmveth_send(adapter, descs)) {
+	if (ibmveth_send(adapter, descs, mss)) {
 		adapter->tx_send_failed++;
 		netdev->stats.tx_dropped++;
 	} else {
@@ -1401,6 +1485,8 @@
 	struct ibmveth_adapter *adapter;
 	unsigned char *mac_addr_p;
 	unsigned int *mcastFilterSize_p;
+	long ret;
+	unsigned long ret_attr;
 
 	dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
 		dev->unit_address);
@@ -1449,10 +1535,19 @@
 	SET_NETDEV_DEV(netdev, &dev->dev);
 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
 	netdev->features |= netdev->hw_features;
 
-	/* TSO is disabled by default */
-	netdev->hw_features |= NETIF_F_TSO;
+	ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+	/* If running older firmware, TSO should not be enabled by default */
+	if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+	    !old_large_send) {
+		netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+		netdev->features |= netdev->hw_features;
+	} else {
+		netdev->hw_features |= NETIF_F_TSO;
+	}
 
 	memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
 
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 41dedb1..4eade67 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -40,6 +40,8 @@
 #define IbmVethMcastRemoveFilter     0x2UL
 #define IbmVethMcastClearFilterTable 0x3UL
 
+#define IBMVETH_ILLAN_LRG_SR_ENABLED	0x0000000000010000UL
+#define IBMVETH_ILLAN_LRG_SND_SUPPORT	0x0000000000008000UL
 #define IBMVETH_ILLAN_PADDED_PKT_CSUM	0x0000000000002000UL
 #define IBMVETH_ILLAN_TRUNK_PRI_MASK	0x0000000000000F00UL
 #define IBMVETH_ILLAN_IPV6_TCP_CSUM		0x0000000000000004UL
@@ -59,13 +61,20 @@
 static inline long h_send_logical_lan(unsigned long unit_address,
 		unsigned long desc1, unsigned long desc2, unsigned long desc3,
 		unsigned long desc4, unsigned long desc5, unsigned long desc6,
-		unsigned long corellator_in, unsigned long *corellator_out)
+		unsigned long corellator_in, unsigned long *corellator_out,
+		unsigned long mss, unsigned long large_send_support)
 {
 	long rc;
 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
 
-	rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1,
-			desc2, desc3, desc4, desc5, desc6, corellator_in);
+	if (large_send_support)
+		rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+				  desc1, desc2, desc3, desc4, desc5, desc6,
+				  corellator_in, mss);
+	else
+		rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+				  desc1, desc2, desc3, desc4, desc5, desc6,
+				  corellator_in);
 
 	*corellator_out = retbuf[0];
 
@@ -147,11 +156,13 @@
     struct ibmveth_rx_q rx_queue;
     int pool_config;
     int rx_csum;
+    int large_send;
     void *bounce_buffer;
     dma_addr_t bounce_buffer_dma;
 
     u64 fw_ipv6_csum_support;
     u64 fw_ipv4_csum_support;
+    u64 fw_large_send_support;
     /* adapter specific stats */
     u64 replenish_task_cycles;
     u64 replenish_no_mem;
@@ -182,6 +193,7 @@
 #endif
 #define IBMVETH_BUF_VALID	0x80000000
 #define IBMVETH_BUF_TOGGLE	0x40000000
+#define IBMVETH_BUF_LRG_SND     0x04000000
 #define IBMVETH_BUF_NO_CSUM	0x02000000
 #define IBMVETH_BUF_CSUM_GOOD	0x01000000
 #define IBMVETH_BUF_LEN_MASK	0x00FFFFFF
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index d2657a4..068789e 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1770,8 +1770,11 @@
 	dma_addr = pci_map_single(nic->pdev,
 				  skb->data, skb->len, PCI_DMA_TODEVICE);
 	/* If we can't map the skb, have the upper layer try later */
-	if (pci_dma_mapping_error(nic->pdev, dma_addr))
+	if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
+		dev_kfree_skb_any(skb);
+		skb = NULL;
 		return -ENOMEM;
+	}
 
 	/*
 	 * Use the last 4 bytes of the SKB payload packet as the CRC, used for
@@ -2967,6 +2970,11 @@
 			   nic->params.cbs.max * sizeof(struct cb),
 			   sizeof(u32),
 			   0);
+	if (!nic->cbs_pool) {
+		netif_err(nic, probe, nic->netdev, "Cannot create DMA pool, aborting\n");
+		err = -ENOMEM;
+		goto err_out_pool;
+	}
 	netif_info(nic, probe, nic->netdev,
 		   "addr 0x%llx, irq %d, MAC addr %pM\n",
 		   (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
@@ -2974,6 +2982,8 @@
 
 	return 0;
 
+err_out_pool:
+	unregister_netdev(netdev);
 err_out_free:
 	e100_free(nic);
 err_out_iounmap:
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 2645985..34c551e 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -106,14 +106,14 @@
 #define E1000_FEXTNVM11_DISABLE_MULR_FIX	0x00002000
 
 /* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
-#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
+#define E1000_RXDCTL_THRESH_UNIT_DESC	0x01000000
 
 #define K1_ENTRY_LATENCY	0
 #define K1_MIN_TIME		1
 #define NVM_SIZE_MULTIPLIER 4096	/*multiplier for NVMS field */
 #define E1000_FLASH_BASE_ADDR 0xE000	/*offset of NVM access regs */
 #define E1000_CTRL_EXT_NVMVS 0x3	/*NVM valid sector */
-
+#define E1000_TARC0_CB_MULTIQ_3_REQ	(1 << 28 | 1 << 29)
 #define PCIE_ICH8_SNOOP_ALL	PCIE_NO_SNOOP_ALL
 
 #define E1000_ICH_RAR_ENTRIES	7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 89d788d..faf4b3f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -48,7 +48,7 @@
 
 #define DRV_EXTRAVERSION "-k"
 
-#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION
+#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -1737,12 +1737,6 @@
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 	adapter->flags2 &= ~FLAG2_IS_DISCARDING;
-
-	writel(0, rx_ring->head);
-	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
-		e1000e_update_rdt_wa(rx_ring, 0);
-	else
-		writel(0, rx_ring->tail);
 }
 
 static void e1000e_downshift_workaround(struct work_struct *work)
@@ -2447,12 +2441,6 @@
 
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
-
-	writel(0, tx_ring->head);
-	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
-		e1000e_update_tdt_wa(tx_ring, 0);
-	else
-		writel(0, tx_ring->tail);
 }
 
 /**
@@ -2954,6 +2942,12 @@
 	tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
 	tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
 
+	writel(0, tx_ring->head);
+	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+		e1000e_update_tdt_wa(tx_ring, 0);
+	else
+		writel(0, tx_ring->tail);
+
 	/* Set the Tx Interrupt Delay register */
 	ew32(TIDV, adapter->tx_int_delay);
 	/* Tx irq moderation */
@@ -3275,6 +3269,12 @@
 	rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
 	rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
 
+	writel(0, rx_ring->head);
+	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+		e1000e_update_rdt_wa(rx_ring, 0);
+	else
+		writel(0, rx_ring->tail);
+
 	/* Enable Receive Checksum Offload for TCP and UDP */
 	rxcsum = er32(RXCSUM);
 	if (adapter->netdev->features & NETIF_F_RXCSUM)
@@ -4280,18 +4280,29 @@
 	struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
 						     cc);
 	struct e1000_hw *hw = &adapter->hw;
+	u32 systimel_1, systimel_2, systimeh;
 	cycle_t systim, systim_next;
-	/* SYSTIMH latching upon SYSTIML read does not work well. To fix that
-	 * we don't want to allow overflow of SYSTIML and a change to SYSTIMH
-	 * to occur between reads, so if we read a vale close to overflow, we
-	 * wait for overflow to occur and read both registers when its safe.
+	/* SYSTIMH latching upon SYSTIML read does not work well.
+	 * This means that if SYSTIML overflows after we read it but before
+	 * we read SYSTIMH, the value of SYSTIMH has been incremented and we
+	 * will experience a huge non linear increment in the systime value
+	 * to fix that we test for overflow and if true, we re-read systime.
 	 */
-	u32 systim_overflow_latch_fix = 0x3FFFFFFF;
-
-	do {
-		systim = (cycle_t)er32(SYSTIML);
-	} while (systim > systim_overflow_latch_fix);
-	systim |= (cycle_t)er32(SYSTIMH) << 32;
+	systimel_1 = er32(SYSTIML);
+	systimeh = er32(SYSTIMH);
+	systimel_2 = er32(SYSTIML);
+	/* Check for overflow. If there was no overflow, use the values */
+	if (systimel_1 < systimel_2) {
+		systim = (cycle_t)systimel_1;
+		systim |= (cycle_t)systimeh << 32;
+	} else {
+		/* There was an overflow, read again SYSTIMH, and use
+		 * systimel_2
+		 */
+		systimeh = er32(SYSTIMH);
+		systim = (cycle_t)systimel_2;
+		systim |= (cycle_t)systimeh << 32;
+	}
 
 	if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
 		u64 incvalue, time_delta, rem, temp;
@@ -4588,6 +4599,7 @@
 	return 0;
 
 err_req_irq:
+	pm_qos_remove_request(&adapter->pm_qos_req);
 	e1000e_release_hw_control(adapter);
 	e1000_power_down_phy(adapter);
 	e1000e_free_rx_resources(adapter->rx_ring);
@@ -6316,6 +6328,33 @@
 			return retval;
 	}
 
+	/* Ensure that the appropriate bits are set in LPI_CTRL
+	 * for EEE in Sx
+	 */
+	if ((hw->phy.type >= e1000_phy_i217) &&
+	    adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) {
+		u16 lpi_ctrl = 0;
+
+		retval = hw->phy.ops.acquire(hw);
+		if (!retval) {
+			retval = e1e_rphy_locked(hw, I82579_LPI_CTRL,
+						 &lpi_ctrl);
+			if (!retval) {
+				if (adapter->eee_advert &
+				    hw->dev_spec.ich8lan.eee_lp_ability &
+				    I82579_EEE_100_SUPPORTED)
+					lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
+				if (adapter->eee_advert &
+				    hw->dev_spec.ich8lan.eee_lp_ability &
+				    I82579_EEE_1000_SUPPORTED)
+					lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
+
+				retval = e1e_wphy_locked(hw, I82579_LPI_CTRL,
+							 lpi_ctrl);
+			}
+		}
+		hw->phy.ops.release(hw);
+	}
 
 	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
 	 * would have already happened in close and is redundant.
@@ -6465,7 +6504,7 @@
 	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
 		aspm_disable_flag |= PCIE_LINK_STATE_L1;
 	if (aspm_disable_flag)
-		e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
+		e1000e_disable_aspm(pdev, aspm_disable_flag);
 
 	pci_set_master(pdev);
 
@@ -6743,7 +6782,7 @@
 	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
 		aspm_disable_flag |= PCIE_LINK_STATE_L1;
 	if (aspm_disable_flag)
-		e1000e_disable_aspm(pdev, aspm_disable_flag);
+		e1000e_disable_aspm_locked(pdev, aspm_disable_flag);
 
 	err = pci_enable_device_mem(pdev);
 	if (err) {
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index b24e5fe..1d5e0b7 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -38,8 +38,8 @@
 #define E1000_FEXTNVM4	0x00024	/* Future Extended NVM 4 - RW */
 #define E1000_FEXTNVM6	0x00010	/* Future Extended NVM 6 - RW */
 #define E1000_FEXTNVM7	0x000E4	/* Future Extended NVM 7 - RW */
-#define E1000_FEXTNVM9	0x5BB4  /* Future Extended NVM 9 - RW */
-#define E1000_FEXTNVM11	0x5BBC  /* Future Extended NVM 11 - RW */
+#define E1000_FEXTNVM9	0x5BB4	/* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11	0x5BBC	/* Future Extended NVM 11 - RW */
 #define E1000_PCIEANACFG	0x00F18	/* PCIE Analog Config */
 #define E1000_FCT	0x00030	/* Flow Control Type - RW */
 #define E1000_VET	0x00038	/* VLAN Ether Type - RW */
@@ -125,7 +125,6 @@
 				 (0x054E4 + ((_i - 16) * 8)))
 #define E1000_SHRAL(_i)		(0x05438 + ((_i) * 8))
 #define E1000_SHRAH(_i)		(0x0543C + ((_i) * 8))
-#define E1000_TARC0_CB_MULTIQ_3_REQ	(1 << 28 | 1 << 29)
 #define E1000_TDFH		0x03410	/* Tx Data FIFO Head - RW */
 #define E1000_TDFT		0x03418	/* Tx Data FIFO Tail - RW */
 #define E1000_TDFHS		0x03420	/* Tx Data FIFO Head Saved - RW */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index ec76c3f..e746279 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -79,10 +79,13 @@
 #define I40E_MIN_MSIX                 2
 #define I40E_DEFAULT_NUM_VMDQ_VSI     8 /* max 256 VSIs */
 #define I40E_MIN_VSI_ALLOC            51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
-#define I40E_DEFAULT_QUEUES_PER_VMDQ  2 /* max 16 qps */
+/* max 16 qps */
+#define i40e_default_queues_per_vmdq(pf) \
+		(((pf)->flags & I40E_FLAG_RSS_AQ_CAPABLE) ? 4 : 1)
 #define I40E_DEFAULT_QUEUES_PER_VF    4
 #define I40E_DEFAULT_QUEUES_PER_TC    1 /* should be a power of 2 */
-#define I40E_MAX_QUEUES_PER_TC        64 /* should be a power of 2 */
+#define i40e_pf_get_max_q_per_tc(pf) \
+		(((pf)->flags & I40E_FLAG_128_QP_RSS_CAPABLE) ? 128 : 64)
 #define I40E_FDIR_RING                0
 #define I40E_FDIR_RING_COUNT          32
 #ifdef I40E_FCOE
@@ -98,7 +101,7 @@
 #define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 9)
 
 /* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG	(1 << 0)
+#define I40E_PRIV_FLAGS_NPAR_FLAG	BIT(0)
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -289,35 +292,42 @@
 	struct work_struct service_task;
 
 	u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED              (u64)(1 << 1)
-#define I40E_FLAG_MSI_ENABLED                  (u64)(1 << 2)
-#define I40E_FLAG_MSIX_ENABLED                 (u64)(1 << 3)
-#define I40E_FLAG_RX_1BUF_ENABLED              (u64)(1 << 4)
-#define I40E_FLAG_RX_PS_ENABLED                (u64)(1 << 5)
-#define I40E_FLAG_RSS_ENABLED                  (u64)(1 << 6)
-#define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 7)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 8)
-#define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 9)
+#define I40E_FLAG_RX_CSUM_ENABLED		BIT_ULL(1)
+#define I40E_FLAG_MSI_ENABLED			BIT_ULL(2)
+#define I40E_FLAG_MSIX_ENABLED			BIT_ULL(3)
+#define I40E_FLAG_RX_1BUF_ENABLED		BIT_ULL(4)
+#define I40E_FLAG_RX_PS_ENABLED			BIT_ULL(5)
+#define I40E_FLAG_RSS_ENABLED			BIT_ULL(6)
+#define I40E_FLAG_VMDQ_ENABLED			BIT_ULL(7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT		BIT_ULL(8)
+#define I40E_FLAG_NEED_LINK_UPDATE		BIT_ULL(9)
+#define I40E_FLAG_IWARP_ENABLED			BIT_ULL(10)
 #ifdef I40E_FCOE
-#define I40E_FLAG_FCOE_ENABLED                 (u64)(1 << 11)
+#define I40E_FLAG_FCOE_ENABLED			BIT_ULL(11)
 #endif /* I40E_FCOE */
-#define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 12)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 13)
-#define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 14)
-#define I40E_FLAG_FILTER_SYNC                  (u64)(1 << 15)
-#define I40E_FLAG_PROCESS_MDD_EVENT            (u64)(1 << 17)
-#define I40E_FLAG_PROCESS_VFLR_EVENT           (u64)(1 << 18)
-#define I40E_FLAG_SRIOV_ENABLED                (u64)(1 << 19)
-#define I40E_FLAG_DCB_ENABLED                  (u64)(1 << 20)
-#define I40E_FLAG_FD_SB_ENABLED                (u64)(1 << 21)
-#define I40E_FLAG_FD_ATR_ENABLED               (u64)(1 << 22)
-#define I40E_FLAG_PTP                          (u64)(1 << 25)
-#define I40E_FLAG_MFP_ENABLED                  (u64)(1 << 26)
+#define I40E_FLAG_IN_NETPOLL			BIT_ULL(12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED	BIT_ULL(13)
+#define I40E_FLAG_CLEAN_ADMINQ			BIT_ULL(14)
+#define I40E_FLAG_FILTER_SYNC			BIT_ULL(15)
+#define I40E_FLAG_PROCESS_MDD_EVENT		BIT_ULL(17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT		BIT_ULL(18)
+#define I40E_FLAG_SRIOV_ENABLED			BIT_ULL(19)
+#define I40E_FLAG_DCB_ENABLED			BIT_ULL(20)
+#define I40E_FLAG_FD_SB_ENABLED			BIT_ULL(21)
+#define I40E_FLAG_FD_ATR_ENABLED		BIT_ULL(22)
+#define I40E_FLAG_PTP				BIT_ULL(25)
+#define I40E_FLAG_MFP_ENABLED			BIT_ULL(26)
 #ifdef CONFIG_I40E_VXLAN
-#define I40E_FLAG_VXLAN_FILTER_SYNC            (u64)(1 << 27)
+#define I40E_FLAG_VXLAN_FILTER_SYNC		BIT_ULL(27)
 #endif
-#define I40E_FLAG_PORT_ID_VALID                (u64)(1 << 28)
-#define I40E_FLAG_DCB_CAPABLE                  (u64)(1 << 29)
+#define I40E_FLAG_PORT_ID_VALID			BIT_ULL(28)
+#define I40E_FLAG_DCB_CAPABLE			BIT_ULL(29)
+#define I40E_FLAG_RSS_AQ_CAPABLE		BIT_ULL(31)
+#define I40E_FLAG_HW_ATR_EVICT_CAPABLE		BIT_ULL(32)
+#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE	BIT_ULL(33)
+#define I40E_FLAG_128_QP_RSS_CAPABLE		BIT_ULL(34)
+#define I40E_FLAG_WB_ON_ITR_CAPABLE		BIT_ULL(35)
+#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE	BIT_ULL(38)
 #define I40E_FLAG_VEB_MODE_ENABLED		BIT_ULL(40)
 
 	/* tracks features that get auto disabled by errors */
@@ -362,6 +372,7 @@
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *i40e_dbg_pf;
 #endif /* CONFIG_DEBUG_FS */
+	bool cur_promisc;
 
 	u16 instance; /* A unique number per i40e_pf instance in the system */
 
@@ -432,6 +443,8 @@
 	bool stat_offsets_loaded;
 	struct i40e_eth_stats stats;
 	struct i40e_eth_stats stats_offsets;
+	struct i40e_veb_tc_stats tc_stats;
+	struct i40e_veb_tc_stats tc_stats_offsets;
 };
 
 /* struct that defines a VSI, associated with a dev */
@@ -443,8 +456,8 @@
 
 	u32 current_netdev_flags;
 	unsigned long state;
-#define I40E_VSI_FLAG_FILTER_CHANGED  (1<<0)
-#define I40E_VSI_FLAG_VEB_OWNER       (1<<1)
+#define I40E_VSI_FLAG_FILTER_CHANGED	BIT(0)
+#define I40E_VSI_FLAG_VEB_OWNER		BIT(1)
 	unsigned long flags;
 
 	struct list_head mac_filter_list;
@@ -550,6 +563,7 @@
 	cpumask_t affinity_mask;
 	struct rcu_head rcu;	/* to avoid race with update stats on free */
 	char name[I40E_INT_NAME_STR_LEN];
+	bool arm_wb_state;
 } ____cacheline_internodealigned_in_smp;
 
 /* lan device */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 929e3d7..95d23bf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -34,7 +34,7 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR	0x0001
-#define I40E_FW_API_VERSION_MINOR	0x0002
+#define I40E_FW_API_VERSION_MINOR	0x0004
 
 struct i40e_aq_desc {
 	__le16 flags;
@@ -132,12 +132,7 @@
 	i40e_aqc_opc_list_func_capabilities	= 0x000A,
 	i40e_aqc_opc_list_dev_capabilities	= 0x000B,
 
-	i40e_aqc_opc_set_cppm_configuration	= 0x0103,
-	i40e_aqc_opc_set_arp_proxy_entry	= 0x0104,
-	i40e_aqc_opc_set_ns_proxy_entry		= 0x0105,
-
 	/* LAA */
-	i40e_aqc_opc_mng_laa		= 0x0106,   /* AQ obsolete */
 	i40e_aqc_opc_mac_address_read	= 0x0107,
 	i40e_aqc_opc_mac_address_write	= 0x0108,
 
@@ -262,7 +257,10 @@
 	/* Tunnel commands */
 	i40e_aqc_opc_add_udp_tunnel	= 0x0B00,
 	i40e_aqc_opc_del_udp_tunnel	= 0x0B01,
-	i40e_aqc_opc_tunnel_key_structure	= 0x0B10,
+	i40e_aqc_opc_set_rss_key	= 0x0B02,
+	i40e_aqc_opc_set_rss_lut	= 0x0B03,
+	i40e_aqc_opc_get_rss_key	= 0x0B04,
+	i40e_aqc_opc_get_rss_lut	= 0x0B05,
 
 	/* Async Events */
 	i40e_aqc_opc_event_lan_overflow		= 0x1001,
@@ -274,8 +272,6 @@
 	i40e_aqc_opc_oem_ocbb_initialize	= 0xFE03,
 
 	/* debug commands */
-	i40e_aqc_opc_debug_get_deviceid		= 0xFF00,
-	i40e_aqc_opc_debug_set_mode		= 0xFF01,
 	i40e_aqc_opc_debug_read_reg		= 0xFF03,
 	i40e_aqc_opc_debug_write_reg		= 0xFF04,
 	i40e_aqc_opc_debug_modify_reg		= 0xFF07,
@@ -509,7 +505,8 @@
 #define I40E_AQC_SAN_ADDR_VALID		0x20
 #define I40E_AQC_PORT_ADDR_VALID	0x40
 #define I40E_AQC_WOL_ADDR_VALID		0x80
-#define I40E_AQC_ADDR_VALID_MASK	0xf0
+#define I40E_AQC_MC_MAG_EN_VALID	0x100
+#define I40E_AQC_ADDR_VALID_MASK	0x1F0
 	u8	reserved[6];
 	__le32	addr_high;
 	__le32	addr_low;
@@ -532,7 +529,9 @@
 #define I40E_AQC_WRITE_TYPE_LAA_ONLY	0x0000
 #define I40E_AQC_WRITE_TYPE_LAA_WOL	0x4000
 #define I40E_AQC_WRITE_TYPE_PORT	0x8000
-#define I40E_AQC_WRITE_TYPE_MASK	0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG	0xC000
+#define I40E_AQC_WRITE_TYPE_MASK	0xC000
+
 	__le16	mac_sah;
 	__le32	mac_sal;
 	u8	reserved[8];
@@ -826,8 +825,12 @@
 					 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
 	/* queueing option section */
 	u8	queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA	0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA	0x08
 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA	0x10
 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA	0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF	0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI	0x40
 	u8	queueing_opt_reserved[3];
 	/* scheduler section */
 	u8	up_enable_bits;
@@ -1068,6 +1071,7 @@
 	__le16	seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK		0x3FF
 	__le16	vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK		0x0FFF
 #define I40E_AQC_SET_VSI_VLAN_VALID		0x8000
 	u8	reserved[8];
 };
@@ -2064,6 +2068,12 @@
 #define I40E_AQC_CEE_PFC_STATUS_MASK	(0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
 #define I40E_AQC_CEE_APP_STATUS_SHIFT	0x8
 #define I40E_AQC_CEE_APP_STATUS_MASK	(0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT	0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK	(0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT	0xA
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK	(0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT	0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK	(0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
 struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
 	u8	reserved1;
 	u8	oper_num_tc;
@@ -2177,6 +2187,46 @@
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT	0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK	(0x3FF << \
+					I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+	__le16	vsi_id;
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+	u8 standard_rss_key[0x28];
+	u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct  i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT	0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK	(0x3FF << \
+					I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+	__le16	vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT	0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK	(0x1 << \
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI	0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF	1
+	__le16	flags;
+	u8	reserved[4];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
 /* tunnel key structure 0x0B10 */
 
 struct i40e_aqc_tunnel_key_structure {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 0bae22d..114dc64 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -54,6 +54,15 @@
 		case I40E_DEV_ID_20G_KR2:
 			hw->mac.type = I40E_MAC_XL710;
 			break;
+		case I40E_DEV_ID_SFP_X722:
+		case I40E_DEV_ID_1G_BASE_T_X722:
+		case I40E_DEV_ID_10G_BASE_T_X722:
+			hw->mac.type = I40E_MAC_X722;
+			break;
+		case I40E_DEV_ID_X722_VF:
+		case I40E_DEV_ID_X722_VF_HV:
+			hw->mac.type = I40E_MAC_X722_VF;
+			break;
 		case I40E_DEV_ID_VF:
 		case I40E_DEV_ID_VF_HV:
 			hw->mac.type = I40E_MAC_VF;
@@ -72,6 +81,212 @@
 }
 
 /**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+	switch (aq_err) {
+	case I40E_AQ_RC_OK:
+		return "OK";
+	case I40E_AQ_RC_EPERM:
+		return "I40E_AQ_RC_EPERM";
+	case I40E_AQ_RC_ENOENT:
+		return "I40E_AQ_RC_ENOENT";
+	case I40E_AQ_RC_ESRCH:
+		return "I40E_AQ_RC_ESRCH";
+	case I40E_AQ_RC_EINTR:
+		return "I40E_AQ_RC_EINTR";
+	case I40E_AQ_RC_EIO:
+		return "I40E_AQ_RC_EIO";
+	case I40E_AQ_RC_ENXIO:
+		return "I40E_AQ_RC_ENXIO";
+	case I40E_AQ_RC_E2BIG:
+		return "I40E_AQ_RC_E2BIG";
+	case I40E_AQ_RC_EAGAIN:
+		return "I40E_AQ_RC_EAGAIN";
+	case I40E_AQ_RC_ENOMEM:
+		return "I40E_AQ_RC_ENOMEM";
+	case I40E_AQ_RC_EACCES:
+		return "I40E_AQ_RC_EACCES";
+	case I40E_AQ_RC_EFAULT:
+		return "I40E_AQ_RC_EFAULT";
+	case I40E_AQ_RC_EBUSY:
+		return "I40E_AQ_RC_EBUSY";
+	case I40E_AQ_RC_EEXIST:
+		return "I40E_AQ_RC_EEXIST";
+	case I40E_AQ_RC_EINVAL:
+		return "I40E_AQ_RC_EINVAL";
+	case I40E_AQ_RC_ENOTTY:
+		return "I40E_AQ_RC_ENOTTY";
+	case I40E_AQ_RC_ENOSPC:
+		return "I40E_AQ_RC_ENOSPC";
+	case I40E_AQ_RC_ENOSYS:
+		return "I40E_AQ_RC_ENOSYS";
+	case I40E_AQ_RC_ERANGE:
+		return "I40E_AQ_RC_ERANGE";
+	case I40E_AQ_RC_EFLUSHED:
+		return "I40E_AQ_RC_EFLUSHED";
+	case I40E_AQ_RC_BAD_ADDR:
+		return "I40E_AQ_RC_BAD_ADDR";
+	case I40E_AQ_RC_EMODE:
+		return "I40E_AQ_RC_EMODE";
+	case I40E_AQ_RC_EFBIG:
+		return "I40E_AQ_RC_EFBIG";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+	return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+	switch (stat_err) {
+	case 0:
+		return "OK";
+	case I40E_ERR_NVM:
+		return "I40E_ERR_NVM";
+	case I40E_ERR_NVM_CHECKSUM:
+		return "I40E_ERR_NVM_CHECKSUM";
+	case I40E_ERR_PHY:
+		return "I40E_ERR_PHY";
+	case I40E_ERR_CONFIG:
+		return "I40E_ERR_CONFIG";
+	case I40E_ERR_PARAM:
+		return "I40E_ERR_PARAM";
+	case I40E_ERR_MAC_TYPE:
+		return "I40E_ERR_MAC_TYPE";
+	case I40E_ERR_UNKNOWN_PHY:
+		return "I40E_ERR_UNKNOWN_PHY";
+	case I40E_ERR_LINK_SETUP:
+		return "I40E_ERR_LINK_SETUP";
+	case I40E_ERR_ADAPTER_STOPPED:
+		return "I40E_ERR_ADAPTER_STOPPED";
+	case I40E_ERR_INVALID_MAC_ADDR:
+		return "I40E_ERR_INVALID_MAC_ADDR";
+	case I40E_ERR_DEVICE_NOT_SUPPORTED:
+		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+	case I40E_ERR_MASTER_REQUESTS_PENDING:
+		return "I40E_ERR_MASTER_REQUESTS_PENDING";
+	case I40E_ERR_INVALID_LINK_SETTINGS:
+		return "I40E_ERR_INVALID_LINK_SETTINGS";
+	case I40E_ERR_AUTONEG_NOT_COMPLETE:
+		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+	case I40E_ERR_RESET_FAILED:
+		return "I40E_ERR_RESET_FAILED";
+	case I40E_ERR_SWFW_SYNC:
+		return "I40E_ERR_SWFW_SYNC";
+	case I40E_ERR_NO_AVAILABLE_VSI:
+		return "I40E_ERR_NO_AVAILABLE_VSI";
+	case I40E_ERR_NO_MEMORY:
+		return "I40E_ERR_NO_MEMORY";
+	case I40E_ERR_BAD_PTR:
+		return "I40E_ERR_BAD_PTR";
+	case I40E_ERR_RING_FULL:
+		return "I40E_ERR_RING_FULL";
+	case I40E_ERR_INVALID_PD_ID:
+		return "I40E_ERR_INVALID_PD_ID";
+	case I40E_ERR_INVALID_QP_ID:
+		return "I40E_ERR_INVALID_QP_ID";
+	case I40E_ERR_INVALID_CQ_ID:
+		return "I40E_ERR_INVALID_CQ_ID";
+	case I40E_ERR_INVALID_CEQ_ID:
+		return "I40E_ERR_INVALID_CEQ_ID";
+	case I40E_ERR_INVALID_AEQ_ID:
+		return "I40E_ERR_INVALID_AEQ_ID";
+	case I40E_ERR_INVALID_SIZE:
+		return "I40E_ERR_INVALID_SIZE";
+	case I40E_ERR_INVALID_ARP_INDEX:
+		return "I40E_ERR_INVALID_ARP_INDEX";
+	case I40E_ERR_INVALID_FPM_FUNC_ID:
+		return "I40E_ERR_INVALID_FPM_FUNC_ID";
+	case I40E_ERR_QP_INVALID_MSG_SIZE:
+		return "I40E_ERR_QP_INVALID_MSG_SIZE";
+	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+	case I40E_ERR_INVALID_FRAG_COUNT:
+		return "I40E_ERR_INVALID_FRAG_COUNT";
+	case I40E_ERR_QUEUE_EMPTY:
+		return "I40E_ERR_QUEUE_EMPTY";
+	case I40E_ERR_INVALID_ALIGNMENT:
+		return "I40E_ERR_INVALID_ALIGNMENT";
+	case I40E_ERR_FLUSHED_QUEUE:
+		return "I40E_ERR_FLUSHED_QUEUE";
+	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+	case I40E_ERR_INVALID_IMM_DATA_SIZE:
+		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+	case I40E_ERR_TIMEOUT:
+		return "I40E_ERR_TIMEOUT";
+	case I40E_ERR_OPCODE_MISMATCH:
+		return "I40E_ERR_OPCODE_MISMATCH";
+	case I40E_ERR_CQP_COMPL_ERROR:
+		return "I40E_ERR_CQP_COMPL_ERROR";
+	case I40E_ERR_INVALID_VF_ID:
+		return "I40E_ERR_INVALID_VF_ID";
+	case I40E_ERR_INVALID_HMCFN_ID:
+		return "I40E_ERR_INVALID_HMCFN_ID";
+	case I40E_ERR_BACKING_PAGE_ERROR:
+		return "I40E_ERR_BACKING_PAGE_ERROR";
+	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+	case I40E_ERR_INVALID_PBLE_INDEX:
+		return "I40E_ERR_INVALID_PBLE_INDEX";
+	case I40E_ERR_INVALID_SD_INDEX:
+		return "I40E_ERR_INVALID_SD_INDEX";
+	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+	case I40E_ERR_INVALID_SD_TYPE:
+		return "I40E_ERR_INVALID_SD_TYPE";
+	case I40E_ERR_MEMCPY_FAILED:
+		return "I40E_ERR_MEMCPY_FAILED";
+	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+	case I40E_ERR_SRQ_ENABLED:
+		return "I40E_ERR_SRQ_ENABLED";
+	case I40E_ERR_ADMIN_QUEUE_ERROR:
+		return "I40E_ERR_ADMIN_QUEUE_ERROR";
+	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+	case I40E_ERR_BUF_TOO_SHORT:
+		return "I40E_ERR_BUF_TOO_SHORT";
+	case I40E_ERR_ADMIN_QUEUE_FULL:
+		return "I40E_ERR_ADMIN_QUEUE_FULL";
+	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+	case I40E_ERR_BAD_IWARP_CQE:
+		return "I40E_ERR_BAD_IWARP_CQE";
+	case I40E_ERR_NVM_BLANK_MODE:
+		return "I40E_ERR_NVM_BLANK_MODE";
+	case I40E_ERR_NOT_IMPLEMENTED:
+		return "I40E_ERR_NOT_IMPLEMENTED";
+	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+	case I40E_ERR_DIAG_TEST_FAILED:
+		return "I40E_ERR_DIAG_TEST_FAILED";
+	case I40E_ERR_NOT_READY:
+		return "I40E_ERR_NOT_READY";
+	case I40E_NOT_SUPPORTED:
+		return "I40E_NOT_SUPPORTED";
+	case I40E_ERR_FIRMWARE_API_VERSION:
+		return "I40E_ERR_FIRMWARE_API_VERSION";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+	return hw->err_str;
+}
+
+/**
  * i40e_debug_aq
  * @hw: debug mask related to admin queue
  * @mask: debug mask
@@ -177,6 +392,169 @@
 	return status;
 }
 
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+					   u16 vsi_id, bool pf_lut,
+					   u8 *lut, u16 lut_size,
+					   bool set)
+{
+	i40e_status status;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_set_rss_lut *cmd_resp =
+		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+	if (set)
+		i40e_fill_default_direct_cmd_desc(&desc,
+						  i40e_aqc_opc_set_rss_lut);
+	else
+		i40e_fill_default_direct_cmd_desc(&desc,
+						  i40e_aqc_opc_get_rss_lut);
+
+	/* Indirect command */
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+	cmd_resp->vsi_id =
+			cpu_to_le16((u16)((vsi_id <<
+					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+	if (pf_lut)
+		cmd_resp->flags |= cpu_to_le16((u16)
+					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+	else
+		cmd_resp->flags |= cpu_to_le16((u16)
+					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
+	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
+
+	status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+				bool pf_lut, u8 *lut, u16 lut_size)
+{
+	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+				       false);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+				bool pf_lut, u8 *lut, u16 lut_size)
+{
+	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+				      u16 vsi_id,
+				      struct i40e_aqc_get_set_rss_key_data *key,
+				      bool set)
+{
+	i40e_status status;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_set_rss_key *cmd_resp =
+			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+	if (set)
+		i40e_fill_default_direct_cmd_desc(&desc,
+						  i40e_aqc_opc_set_rss_key);
+	else
+		i40e_fill_default_direct_cmd_desc(&desc,
+						  i40e_aqc_opc_get_rss_key);
+
+	/* Indirect command */
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+	cmd_resp->vsi_id =
+			cpu_to_le16((u16)((vsi_id <<
+					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
+	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
+
+	status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+				u16 vsi_id,
+				struct i40e_aqc_get_set_rss_key_data *key)
+{
+	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+				u16 vsi_id,
+				struct i40e_aqc_get_set_rss_key_data *key)
+{
+	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
  * hardware to a bit-field that can be used by SW to more easily determine the
  * packet type.
@@ -563,6 +941,7 @@
 
 	switch (hw->mac.type) {
 	case I40E_MAC_XL710:
+	case I40E_MAC_X722:
 		break;
 	default:
 		return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -1187,9 +1566,9 @@
 			blink = false;
 
 		if (blink)
-			gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+			gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
 		else
-			gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+			gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
 
 		wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
 		break;
@@ -2391,7 +2770,7 @@
 #define I40E_DEV_FUNC_CAP_MSIX_VF	0x44
 #define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR	0x45
 #define I40E_DEV_FUNC_CAP_IEEE_1588	0x46
-#define I40E_DEV_FUNC_CAP_MFP_MODE_1	0xF1
+#define I40E_DEV_FUNC_CAP_FLEX10	0xF1
 #define I40E_DEV_FUNC_CAP_CEM		0xF2
 #define I40E_DEV_FUNC_CAP_IWARP		0x51
 #define I40E_DEV_FUNC_CAP_LED		0x61
@@ -2416,6 +2795,7 @@
 	u32 valid_functions, num_functions;
 	u32 number, logical_id, phys_id;
 	struct i40e_hw_capabilities *p;
+	u8 major_rev;
 	u32 i = 0;
 	u16 id;
 
@@ -2433,6 +2813,7 @@
 		number = le32_to_cpu(cap->number);
 		logical_id = le32_to_cpu(cap->logical_id);
 		phys_id = le32_to_cpu(cap->phys_id);
+		major_rev = cap->major_rev;
 
 		switch (id) {
 		case I40E_DEV_FUNC_CAP_SWITCH_MODE:
@@ -2507,9 +2888,21 @@
 		case I40E_DEV_FUNC_CAP_MSIX_VF:
 			p->num_msix_vectors_vf = number;
 			break;
-		case I40E_DEV_FUNC_CAP_MFP_MODE_1:
-			if (number == 1)
-				p->mfp_mode_1 = true;
+		case I40E_DEV_FUNC_CAP_FLEX10:
+			if (major_rev == 1) {
+				if (number == 1) {
+					p->flex10_enable = true;
+					p->flex10_capable = true;
+				}
+			} else {
+				/* Capability revision >= 2 */
+				if (number & 1)
+					p->flex10_enable = true;
+				if (number & 2)
+					p->flex10_capable = true;
+			}
+			p->flex10_mode = logical_id;
+			p->flex10_status = phys_id;
 			break;
 		case I40E_DEV_FUNC_CAP_CEM:
 			if (number == 1)
@@ -2557,7 +2950,7 @@
 	/* Software override ensuring FCoE is disabled if npar or mfp
 	 * mode because it is not supported in these modes.
 	 */
-	if (p->npar_enable || p->mfp_mode_1)
+	if (p->npar_enable || p->flex10_enable)
 		p->fcoe = false;
 
 	/* count the enabled ports (aka the "not disabled" ports) */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 2547aa2..90de46a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -588,6 +588,8 @@
 		if (!ret) {
 			/* CEE mode */
 			hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+			hw->local_dcbx_config.tlv_status =
+					le16_to_cpu(cee_v1_cfg.tlv_status);
 			i40e_cee_to_dcb_v1_config(&cee_v1_cfg,
 						  &hw->local_dcbx_config);
 		}
@@ -597,6 +599,8 @@
 		if (!ret) {
 			/* CEE mode */
 			hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+			hw->local_dcbx_config.tlv_status =
+					le32_to_cpu(cee_cfg.tlv_status);
 			i40e_cee_to_dcb_config(&cee_cfg,
 					       &hw->local_dcbx_config);
 		}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index e137e3f..50fc894 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -58,9 +58,9 @@
 #define I40E_IEEE_ETS_MAXTC_SHIFT	0
 #define I40E_IEEE_ETS_MAXTC_MASK	(0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
 #define I40E_IEEE_ETS_CBS_SHIFT		6
-#define I40E_IEEE_ETS_CBS_MASK		(0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_CBS_MASK		BIT(I40E_IEEE_ETS_CBS_SHIFT)
 #define I40E_IEEE_ETS_WILLING_SHIFT	7
-#define I40E_IEEE_ETS_WILLING_MASK	(0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_WILLING_MASK	BIT(I40E_IEEE_ETS_WILLING_SHIFT)
 #define I40E_IEEE_ETS_PRIO_0_SHIFT	0
 #define I40E_IEEE_ETS_PRIO_0_MASK	(0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
 #define I40E_IEEE_ETS_PRIO_1_SHIFT	4
@@ -79,9 +79,9 @@
 #define I40E_IEEE_PFC_CAP_SHIFT		0
 #define I40E_IEEE_PFC_CAP_MASK		(0xF << I40E_IEEE_PFC_CAP_SHIFT)
 #define I40E_IEEE_PFC_MBC_SHIFT		6
-#define I40E_IEEE_PFC_MBC_MASK		(0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_MBC_MASK		BIT(I40E_IEEE_PFC_MBC_SHIFT)
 #define I40E_IEEE_PFC_WILLING_SHIFT	7
-#define I40E_IEEE_PFC_WILLING_MASK	(0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+#define I40E_IEEE_PFC_WILLING_MASK	BIT(I40E_IEEE_PFC_WILLING_SHIFT)
 
 /* Defines for IEEE APP TLV */
 #define I40E_IEEE_APP_SEL_SHIFT		0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index bd5079d..1c51f73 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -187,7 +187,7 @@
 	/* Set up all the App TLVs if DCBx is negotiated */
 	for (i = 0; i < dcbxcfg->numapps; i++) {
 		prio = dcbxcfg->app[i].priority;
-		tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+		tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]);
 
 		/* Add APP only if the TC is enabled for this VSI */
 		if (tc_map & vsi->tc_config.enabled_tc) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index da0faf4..d7c15d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -964,7 +964,7 @@
 		pf->auto_disable_flags |= flag;
 	}
 	dev_info(&pf->pdev->dev, "requesting a PF reset\n");
-	i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+	i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
 }
 
 #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
@@ -1471,19 +1471,19 @@
 		}
 	} else if (strncmp(cmd_buf, "pfr", 3) == 0) {
 		dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
-		i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "corer", 5) == 0) {
 		dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
-		i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "globr", 5) == 0) {
 		dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
-		i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "empr", 4) == 0) {
 		dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
-		i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
+		i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED));
 
 	} else if (strncmp(cmd_buf, "read", 4) == 0) {
 		u32 address;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index 56438bd..f141e78 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -144,11 +144,8 @@
 	ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
 	if (!ret_code &&
 	    ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
-	     (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
-		ret_code = i40e_validate_nvm_checksum(hw, NULL);
-	} else {
-		ret_code = I40E_ERR_DIAG_TEST_FAILED;
-	}
-
-	return ret_code;
+	     BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+		return i40e_validate_nvm_checksum(hw, NULL);
+	else
+		return I40E_ERR_DIAG_TEST_FAILED;
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9a68c65..e972b5e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -114,7 +114,7 @@
 	I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
 	I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
 	I40E_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
-	I40E_PF_STAT("crc_errors", stats.crc_errors),
+	I40E_PF_STAT("rx_crc_errors", stats.crc_errors),
 	I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
 	I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
 	I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
@@ -148,7 +148,9 @@
 	I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
 	I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
 	I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
+	I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
 	I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
+	I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
 
 	/* LPI stats */
 	I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
@@ -195,7 +197,14 @@
 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
 		 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
 		 / sizeof(u64))
+#define I40E_VEB_TC_STATS_LEN ( \
+		(FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_packets) + \
+		 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_rx_bytes) + \
+		 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_packets) + \
+		 FIELD_SIZEOF(struct i40e_veb, tc_stats.tc_tx_bytes)) \
+		 / sizeof(u64))
 #define I40E_VEB_STATS_LEN	ARRAY_SIZE(i40e_gstrings_veb_stats)
+#define I40E_VEB_STATS_TOTAL	(I40E_VEB_STATS_LEN + I40E_VEB_TC_STATS_LEN)
 #define I40E_PF_STATS_LEN(n)	(I40E_GLOBAL_STATS_LEN + \
 				 I40E_PFC_STATS_LEN + \
 				 I40E_VSI_STATS_LEN((n)))
@@ -679,15 +688,17 @@
 		/* make the aq call */
 		status = i40e_aq_set_phy_config(hw, &config, NULL);
 		if (status) {
-			netdev_info(netdev, "Set phy config failed with error %d.\n",
-				    status);
+			netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
+				    i40e_stat_str(hw, status),
+				    i40e_aq_str(hw, hw->aq.asq_last_status));
 			return -EAGAIN;
 		}
 
 		status = i40e_aq_get_link_info(hw, true, NULL, NULL);
 		if (status)
-			netdev_info(netdev, "Updating link info failed with error %d\n",
-				    status);
+			netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n",
+				    i40e_stat_str(hw, status),
+				    i40e_aq_str(hw, hw->aq.asq_last_status));
 
 	} else {
 		netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -707,8 +718,9 @@
 
 	ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
 	if (ret) {
-		netdev_info(netdev, "link restart failed, aq_err=%d\n",
-			    pf->hw.aq.asq_last_status);
+		netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+			    i40e_stat_str(hw, ret),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
 		return -EIO;
 	}
 
@@ -820,18 +832,21 @@
 	status = i40e_set_fc(hw, &aq_failures, link_up);
 
 	if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
-		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
-			    status, hw->aq.asq_last_status);
+		netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+			    i40e_stat_str(hw, status),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
 		err = -EAGAIN;
 	}
 	if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
-		netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
-			    status, hw->aq.asq_last_status);
+		netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+			    i40e_stat_str(hw, status),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
 		err = -EAGAIN;
 	}
 	if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
-		netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
-			    status, hw->aq.asq_last_status);
+		netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+			    i40e_stat_str(hw, status),
+			    i40e_aq_str(hw, hw->aq.asq_last_status));
 		err = -EAGAIN;
 	}
 
@@ -1009,7 +1024,7 @@
 		& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
 		>> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
 	/* register returns value in power of 2, 64Kbyte chunks. */
-	val = (64 * 1024) * (1 << val);
+	val = (64 * 1024) * BIT(val);
 	return val;
 }
 
@@ -1249,7 +1264,7 @@
 			int len = I40E_PF_STATS_LEN(netdev);
 
 			if (pf->lan_veb != I40E_NO_VEB)
-				len += I40E_VEB_STATS_LEN;
+				len += I40E_VEB_STATS_TOTAL;
 			return len;
 		} else {
 			return I40E_VSI_STATS_LEN(netdev);
@@ -1400,6 +1415,20 @@
 					i40e_gstrings_veb_stats[i].stat_string);
 				p += ETH_GSTRING_LEN;
 			}
+			for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+				snprintf(p, ETH_GSTRING_LEN,
+					 "veb.tc_%u_tx_packets", i);
+				p += ETH_GSTRING_LEN;
+				snprintf(p, ETH_GSTRING_LEN,
+					 "veb.tc_%u_tx_bytes", i);
+				p += ETH_GSTRING_LEN;
+				snprintf(p, ETH_GSTRING_LEN,
+					 "veb.tc_%u_rx_packets", i);
+				p += ETH_GSTRING_LEN;
+				snprintf(p, ETH_GSTRING_LEN,
+					 "veb.tc_%u_rx_bytes", i);
+				p += ETH_GSTRING_LEN;
+			}
 		}
 		for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
 			snprintf(p, ETH_GSTRING_LEN, "port.%s",
@@ -1462,20 +1491,11 @@
 	else
 		info->phc_index = -1;
 
-	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+	info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
 
-	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
-			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+			   BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+			   BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
 
 	return 0;
 }
@@ -1560,6 +1580,21 @@
 	return false;
 }
 
+static inline bool i40e_active_vmdqs(struct i40e_pf *pf)
+{
+	struct i40e_vsi **vsi = pf->vsi;
+	int i;
+
+	for (i = 0; i < pf->num_alloc_vsi; i++) {
+		if (!vsi[i])
+			continue;
+		if (vsi[i]->type == I40E_VSI_VMDQ2)
+			return true;
+	}
+
+	return false;
+}
+
 static void i40e_diag_test(struct net_device *netdev,
 			   struct ethtool_test *eth_test, u64 *data)
 {
@@ -1573,9 +1608,9 @@
 
 		set_bit(__I40E_TESTING, &pf->state);
 
-		if (i40e_active_vfs(pf)) {
+		if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
 			dev_warn(&pf->pdev->dev,
-				 "Please take active VFS offline and restart the adapter before running NIC diagnostics\n");
+				 "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
 			data[I40E_ETH_TEST_REG]		= 1;
 			data[I40E_ETH_TEST_EEPROM]	= 1;
 			data[I40E_ETH_TEST_INTR]	= 1;
@@ -1591,11 +1626,13 @@
 			/* indicate we're in test mode */
 			dev_close(netdev);
 		else
-			i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+			/* This reset does not affect link - if it is
+			 * changed to a type of reset that does affect
+			 * link then the following link test would have
+			 * to be moved to before the reset
+			 */
+			i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
-		/* Link test performed before hardware reset
-		 * so autoneg doesn't interfere with test result
-		 */
 		if (i40e_link_test(netdev, &data[I40E_ETH_TEST_LINK]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
@@ -1613,7 +1650,7 @@
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		clear_bit(__I40E_TESTING, &pf->state);
-		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
 		if (if_running)
 			dev_open(netdev);
@@ -1646,7 +1683,7 @@
 
 	/* NVM bit on means WoL disabled for the port */
 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-	if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
+	if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
 		wol->supported = 0;
 		wol->wolopts = 0;
 	} else {
@@ -1679,7 +1716,7 @@
 
 	/* NVM bit on means WoL disabled for the port */
 	i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-	if (((1 << hw->port) & wol_nvm_bits))
+	if (BIT(hw->port) & wol_nvm_bits)
 		return -EOPNOTSUPP;
 
 	/* only magic packet is supported */
@@ -2025,10 +2062,10 @@
 	case TCP_V4_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
 			break;
 		default:
 			return -EINVAL;
@@ -2037,10 +2074,10 @@
 	case TCP_V6_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
 			break;
 		default:
 			return -EINVAL;
@@ -2049,12 +2086,12 @@
 	case UDP_V4_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 			break;
 		default:
 			return -EINVAL;
@@ -2063,12 +2100,12 @@
 	case UDP_V6_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-				 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 			break;
 		default:
 			return -EINVAL;
@@ -2081,7 +2118,7 @@
 		if ((nfc->data & RXH_L4_B_0_1) ||
 		    (nfc->data & RXH_L4_B_2_3))
 			return -EINVAL;
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
 		break;
 	case AH_ESP_V6_FLOW:
 	case AH_V6_FLOW:
@@ -2090,15 +2127,15 @@
 		if ((nfc->data & RXH_L4_B_0_1) ||
 		    (nfc->data & RXH_L4_B_2_3))
 			return -EINVAL;
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
 		break;
 	case IPV4_FLOW:
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
-			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+			BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
 		break;
 	case IPV6_FLOW:
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
-			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+			BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
 		break;
 	default:
 		return -EINVAL;
@@ -2509,7 +2546,7 @@
  * @indir: indirection table
  * @key: hash key
  *
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
  * returns 0 after programming the table.
  **/
 static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index c8b621e..5ea75dd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -298,8 +298,8 @@
 
 	/* enable FCoE hash filter */
 	val = rd32(hw, I40E_PFQF_HENA(1));
-	val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
-	val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
+	val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
+	val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
 	val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
 	wr32(hw, I40E_PFQF_HENA(1), val);
 
@@ -308,10 +308,10 @@
 	pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
 
 	/* Reserve 4K DDP contexts and 20K filter size for FCoE */
-	pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
-				 I40E_DMA_CNTX_BASE_SIZE;
+	pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) *
+				I40E_DMA_CNTX_BASE_SIZE;
 	pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
-				(1 << I40E_HASH_FILTER_SIZE_16K) *
+				BIT(I40E_HASH_FILTER_SIZE_16K) *
 				I40E_HASH_FILTER_BASE_SIZE;
 
 	/* FCoE object: max 16K filter buckets and 4K DMA contexts */
@@ -348,7 +348,7 @@
 		if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
 		    app.protocolid == ETH_P_FCOE) {
 			tc = dcbcfg->etscfg.prioritytable[app.priority];
-			enabled_tc |= (1 << tc);
+			enabled_tc |= BIT(tc);
 			break;
 		}
 	}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
index 0d49e2d..a93174d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
@@ -59,9 +59,9 @@
 	(((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
 
 #define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT	\
-	(1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
+	BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
 #define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT	\
-	(1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
+	BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
 
 #define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e)	\
 	I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 9b987cc..5ebe12d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -116,6 +116,7 @@
  * @hw: pointer to our HW structure
  * @hmc_info: pointer to the HMC configuration information structure
  * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
  *
  * This function:
  *	1. Initializes the pd entry
@@ -129,12 +130,14 @@
  **/
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
 					      struct i40e_hmc_info *hmc_info,
-					      u32 pd_index)
+					      u32 pd_index,
+					      struct i40e_dma_mem *rsrc_pg)
 {
 	i40e_status ret_code = 0;
 	struct i40e_hmc_pd_table *pd_table;
 	struct i40e_hmc_pd_entry *pd_entry;
 	struct i40e_dma_mem mem;
+	struct i40e_dma_mem *page = &mem;
 	u32 sd_idx, rel_pd_idx;
 	u64 *pd_addr;
 	u64 page_desc;
@@ -155,18 +158,24 @@
 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
 	if (!pd_entry->valid) {
-		/* allocate a 4K backing page */
-		ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
-						 I40E_HMC_PAGED_BP_SIZE,
-						 I40E_HMC_PD_BP_BUF_ALIGNMENT);
-		if (ret_code)
-			goto exit;
+		if (rsrc_pg) {
+			pd_entry->rsrc_pg = true;
+			page = rsrc_pg;
+		} else {
+			/* allocate a 4K backing page */
+			ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+						I40E_HMC_PAGED_BP_SIZE,
+						I40E_HMC_PD_BP_BUF_ALIGNMENT);
+			if (ret_code)
+				goto exit;
+			pd_entry->rsrc_pg = false;
+		}
 
-		pd_entry->bp.addr = mem;
+		pd_entry->bp.addr = *page;
 		pd_entry->bp.sd_pd_index = pd_index;
 		pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
 		/* Set page address and valid bit */
-		page_desc = mem.pa | 0x1;
+		page_desc = page->pa | 0x1;
 
 		pd_addr = (u64 *)pd_table->pd_page_addr.va;
 		pd_addr += rel_pd_idx;
@@ -240,7 +249,8 @@
 	I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
 
 	/* free memory here */
-	ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+	if (!pd_entry->rsrc_pg)
+		ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr);
 	if (ret_code)
 		goto exit;
 	if (!pd_table->ref_cnt)
@@ -287,21 +297,15 @@
 					    u32 idx, bool is_pf)
 {
 	struct i40e_hmc_sd_entry *sd_entry;
-	i40e_status ret_code = 0;
+
+	if (!is_pf)
+		return I40E_NOT_SUPPORTED;
 
 	/* get the entry and decrease its ref counter */
 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
-	if (is_pf) {
-		I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
-	} else {
-		ret_code = I40E_NOT_SUPPORTED;
-		goto exit;
-	}
-	ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
-	if (ret_code)
-		goto exit;
-exit:
-	return ret_code;
+	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+	return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
 }
 
 /**
@@ -341,20 +345,13 @@
 					      struct i40e_hmc_info *hmc_info,
 					      u32 idx, bool is_pf)
 {
-	i40e_status ret_code = 0;
 	struct i40e_hmc_sd_entry *sd_entry;
 
+	if (!is_pf)
+		return I40E_NOT_SUPPORTED;
+
 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
-	if (is_pf) {
-		I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
-	} else {
-		ret_code = I40E_NOT_SUPPORTED;
-		goto exit;
-	}
-	/* free memory here */
-	ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
-	if (ret_code)
-		goto exit;
-exit:
-	return ret_code;
+	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+	return  i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 732a026..d906692 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -62,6 +62,7 @@
 struct i40e_hmc_pd_entry {
 	struct i40e_hmc_bp bp;
 	u32 sd_index;
+	bool rsrc_pg;
 	bool valid;
 };
 
@@ -126,8 +127,8 @@
 		 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
 		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
 		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |			\
-		(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
-	val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+		BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
 	wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);			\
 	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
 	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
@@ -146,7 +147,7 @@
 		I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
 		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
 		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);			\
-	val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
 	wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);				\
 	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
 	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
@@ -218,7 +219,8 @@
 
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
 					      struct i40e_hmc_info *hmc_info,
-					      u32 pd_index);
+					      u32 pd_index,
+					      struct i40e_dma_mem *rsrc_pg);
 i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
 					struct i40e_hmc_info *hmc_info,
 					u32 idx);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index 0079ad7..fa371a2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -129,7 +129,7 @@
 	obj->cnt = txq_num;
 	obj->base = 0;
 	size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
-	obj->size = (u64)1 << size_exp;
+	obj->size = BIT_ULL(size_exp);
 
 	/* validate values requested by driver don't exceed HMC capacity */
 	if (txq_num > obj->max_cnt) {
@@ -152,7 +152,7 @@
 		     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
 	obj->base = i40e_align_l2obj_base(obj->base);
 	size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
-	obj->size = (u64)1 << size_exp;
+	obj->size = BIT_ULL(size_exp);
 
 	/* validate values requested by driver don't exceed HMC capacity */
 	if (rxq_num > obj->max_cnt) {
@@ -175,7 +175,7 @@
 		     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
 	obj->base = i40e_align_l2obj_base(obj->base);
 	size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
-	obj->size = (u64)1 << size_exp;
+	obj->size = BIT_ULL(size_exp);
 
 	/* validate values requested by driver don't exceed HMC capacity */
 	if (fcoe_cntx_num > obj->max_cnt) {
@@ -198,7 +198,7 @@
 		     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
 	obj->base = i40e_align_l2obj_base(obj->base);
 	size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
-	obj->size = (u64)1 << size_exp;
+	obj->size = BIT_ULL(size_exp);
 
 	/* validate values requested by driver don't exceed HMC capacity */
 	if (fcoe_filt_num > obj->max_cnt) {
@@ -387,7 +387,7 @@
 				/* update the pd table entry */
 				ret_code = i40e_add_pd_table_entry(hw,
 								info->hmc_info,
-								i);
+								i, NULL);
 				if (ret_code) {
 					pd_error = true;
 					break;
@@ -763,7 +763,7 @@
 
 	/* prepare the bits and mask */
 	shift_width = ce_info->lsb % 8;
-	mask = ((u8)1 << ce_info->width) - 1;
+	mask = BIT(ce_info->width) - 1;
 
 	src_byte = *from;
 	src_byte &= mask;
@@ -804,7 +804,7 @@
 
 	/* prepare the bits and mask */
 	shift_width = ce_info->lsb % 8;
-	mask = ((u16)1 << ce_info->width) - 1;
+	mask = BIT(ce_info->width) - 1;
 
 	/* don't swizzle the bits until after the mask because the mask bits
 	 * will be in a different bit position on big endian machines
@@ -854,7 +854,7 @@
 	 * to 5 bits so the shift will do nothing
 	 */
 	if (ce_info->width < 32)
-		mask = ((u32)1 << ce_info->width) - 1;
+		mask = BIT(ce_info->width) - 1;
 	else
 		mask = ~(u32)0;
 
@@ -906,7 +906,7 @@
 	 * to 6 bits so the shift will do nothing
 	 */
 	if (ce_info->width < 64)
-		mask = ((u64)1 << ce_info->width) - 1;
+		mask = BIT_ULL(ce_info->width) - 1;
 	else
 		mask = ~(u64)0;
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 48a52b3..851c1a1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 4
+#define DRV_VERSION_BUILD 9
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
 	     __stringify(DRV_VERSION_MINOR) "." \
 	     __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -76,6 +76,9 @@
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
 	/* required last entry */
 	{0, }
 };
@@ -520,7 +523,7 @@
 	if (likely(new_data >= *offset))
 		*stat = new_data - *offset;
 	else
-		*stat = (new_data + ((u64)1 << 48)) - *offset;
+		*stat = (new_data + BIT_ULL(48)) - *offset;
 	*stat &= 0xFFFFFFFFFFFFULL;
 }
 
@@ -543,7 +546,7 @@
 	if (likely(new_data >= *offset))
 		*stat = (u32)(new_data - *offset);
 	else
-		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+		*stat = (u32)((new_data + BIT_ULL(32)) - *offset);
 }
 
 /**
@@ -621,11 +624,15 @@
 	struct i40e_hw *hw = &pf->hw;
 	struct i40e_eth_stats *oes;
 	struct i40e_eth_stats *es;     /* device's eth stats */
-	int idx = 0;
+	struct i40e_veb_tc_stats *veb_oes;
+	struct i40e_veb_tc_stats *veb_es;
+	int i, idx = 0;
 
 	idx = veb->stats_idx;
 	es = &veb->stats;
 	oes = &veb->stats_offsets;
+	veb_es = &veb->tc_stats;
+	veb_oes = &veb->tc_stats_offsets;
 
 	/* Gather up the stats that the hw collects */
 	i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
@@ -661,6 +668,28 @@
 	i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
 			   veb->stat_offsets_loaded,
 			   &oes->tx_broadcast, &es->tx_broadcast);
+	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+		i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
+				   I40E_GLVEBTC_RPCL(i, idx),
+				   veb->stat_offsets_loaded,
+				   &veb_oes->tc_rx_packets[i],
+				   &veb_es->tc_rx_packets[i]);
+		i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
+				   I40E_GLVEBTC_RBCL(i, idx),
+				   veb->stat_offsets_loaded,
+				   &veb_oes->tc_rx_bytes[i],
+				   &veb_es->tc_rx_bytes[i]);
+		i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
+				   I40E_GLVEBTC_TPCL(i, idx),
+				   veb->stat_offsets_loaded,
+				   &veb_oes->tc_tx_packets[i],
+				   &veb_es->tc_tx_packets[i]);
+		i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
+				   I40E_GLVEBTC_TBCL(i, idx),
+				   veb->stat_offsets_loaded,
+				   &veb_oes->tc_tx_bytes[i],
+				   &veb_es->tc_tx_bytes[i]);
+	}
 	veb->stat_offsets_loaded = true;
 }
 
@@ -1123,6 +1152,18 @@
 			   pf->stat_offsets_loaded,
 			   &osd->rx_lpi_count, &nsd->rx_lpi_count);
 
+	if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
+	    !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+		nsd->fd_sb_status = true;
+	else
+		nsd->fd_sb_status = false;
+
+	if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
+	    !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+		nsd->fd_atr_status = true;
+	else
+		nsd->fd_atr_status = false;
+
 	pf->stat_offsets_loaded = true;
 }
 
@@ -1240,6 +1281,8 @@
 	struct i40e_mac_filter *f;
 
 	list_for_each_entry(f, &vsi->mac_filter_list, list) {
+		if (vsi->info.pvid)
+			f->vlan = le16_to_cpu(vsi->info.pvid);
 		if (!i40e_find_filter(vsi, macaddr, f->vlan,
 				      is_vf, is_netdev)) {
 			if (!i40e_add_filter(vsi, macaddr, f->vlan,
@@ -1264,7 +1307,7 @@
 {
 	struct i40e_aqc_remove_macvlan_element_data element;
 	struct i40e_pf *pf = vsi->back;
-	i40e_status aq_ret;
+	i40e_status ret;
 
 	/* Only appropriate for the PF main VSI */
 	if (vsi->type != I40E_VSI_MAIN)
@@ -1275,8 +1318,8 @@
 	element.vlan_tag = 0;
 	element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
 			I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
-	aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
-	if (aq_ret)
+	ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+	if (ret)
 		return -ENOENT;
 
 	return 0;
@@ -1514,7 +1557,7 @@
 	if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
 		/* Find numtc from enabled TC bitmap */
 		for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-			if (enabled_tc & (1 << i)) /* TC is enabled */
+			if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
 				numtc++;
 		}
 		if (!numtc) {
@@ -1533,14 +1576,18 @@
 	 * vectors available and so we need to lower the used
 	 * q count.
 	 */
-	qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+		qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
+	else
+		qcount = vsi->alloc_queue_pairs;
 	num_tc_qps = qcount / numtc;
-	num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
+	num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
 
 	/* Setup queue offset/count for all TCs for given VSI */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
 		/* See if the given TC is enabled for the given VSI */
-		if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
+		if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
+			/* TC is enabled */
 			int pow, num_qps;
 
 			switch (vsi->type) {
@@ -1566,7 +1613,7 @@
 			/* find the next higher power-of-2 of num queue pairs */
 			num_qps = qcount;
 			pow = 0;
-			while (num_qps && ((1 << pow) < qcount)) {
+			while (num_qps && (BIT_ULL(pow) < qcount)) {
 				pow++;
 				num_qps >>= 1;
 			}
@@ -1596,7 +1643,7 @@
 	if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
 		if (vsi->req_queue_pairs > 0)
 			vsi->num_queue_pairs = vsi->req_queue_pairs;
-		else
+		else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
 			vsi->num_queue_pairs = pf->num_lan_msix;
 	}
 
@@ -1716,10 +1763,11 @@
 	bool add_happened = false;
 	int filter_list_len = 0;
 	u32 changed_flags = 0;
-	i40e_status aq_ret = 0;
+	i40e_status ret = 0;
 	struct i40e_pf *pf;
 	int num_add = 0;
 	int num_del = 0;
+	int aq_err = 0;
 	u16 cmd_flags;
 
 	/* empty array typed pointers, kcalloc later */
@@ -1771,31 +1819,31 @@
 
 			/* flush a full buffer */
 			if (num_del == filter_list_len) {
-				aq_ret = i40e_aq_remove_macvlan(&pf->hw,
-					    vsi->seid, del_list, num_del,
-					    NULL);
+				ret = i40e_aq_remove_macvlan(&pf->hw,
+						  vsi->seid, del_list, num_del,
+						  NULL);
+				aq_err = pf->hw.aq.asq_last_status;
 				num_del = 0;
 				memset(del_list, 0, sizeof(*del_list));
 
-				if (aq_ret &&
-				    pf->hw.aq.asq_last_status !=
-							      I40E_AQ_RC_ENOENT)
+				if (ret && aq_err != I40E_AQ_RC_ENOENT)
 					dev_info(&pf->pdev->dev,
-						 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
-						 aq_ret,
-						 pf->hw.aq.asq_last_status);
+						 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+						 i40e_stat_str(&pf->hw, ret),
+						 i40e_aq_str(&pf->hw, aq_err));
 			}
 		}
 		if (num_del) {
-			aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+			ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
 						     del_list, num_del, NULL);
+			aq_err = pf->hw.aq.asq_last_status;
 			num_del = 0;
 
-			if (aq_ret &&
-			    pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
+			if (ret && aq_err != I40E_AQ_RC_ENOENT)
 				dev_info(&pf->pdev->dev,
-					 "ignoring delete macvlan error, err %d, aq_err %d\n",
-					 aq_ret, pf->hw.aq.asq_last_status);
+					 "ignoring delete macvlan error, err %s aq_err %s\n",
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw, aq_err));
 		}
 
 		kfree(del_list);
@@ -1833,29 +1881,31 @@
 
 			/* flush a full buffer */
 			if (num_add == filter_list_len) {
-				aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-							     add_list, num_add,
-							     NULL);
+				ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+							  add_list, num_add,
+							  NULL);
+				aq_err = pf->hw.aq.asq_last_status;
 				num_add = 0;
 
-				if (aq_ret)
+				if (ret)
 					break;
 				memset(add_list, 0, sizeof(*add_list));
 			}
 		}
 		if (num_add) {
-			aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-						     add_list, num_add, NULL);
+			ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+						  add_list, num_add, NULL);
+			aq_err = pf->hw.aq.asq_last_status;
 			num_add = 0;
 		}
 		kfree(add_list);
 		add_list = NULL;
 
-		if (add_happened && aq_ret &&
-		    pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
+		if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
 			dev_info(&pf->pdev->dev,
-				 "add filter failed, err %d, aq_err %d\n",
-				 aq_ret, pf->hw.aq.asq_last_status);
+				 "add filter failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw, aq_err));
 			if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
 			    !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
 				      &vsi->state)) {
@@ -1871,34 +1921,60 @@
 	if (changed_flags & IFF_ALLMULTI) {
 		bool cur_multipromisc;
 		cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
-		aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
-							       vsi->seid,
-							       cur_multipromisc,
-							       NULL);
-		if (aq_ret)
+		ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+							    vsi->seid,
+							    cur_multipromisc,
+							    NULL);
+		if (ret)
 			dev_info(&pf->pdev->dev,
-				 "set multi promisc failed, err %d, aq_err %d\n",
-				 aq_ret, pf->hw.aq.asq_last_status);
+				 "set multi promisc failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 	}
 	if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
 		bool cur_promisc;
 		cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
 			       test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
 					&vsi->state));
-		aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
-							     vsi->seid,
-							     cur_promisc, NULL);
-		if (aq_ret)
+		if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
+			/* set defport ON for Main VSI instead of true promisc
+			 * this way we will get all unicast/multicast and VLAN
+			 * promisc behavior but will not get VF or VMDq traffic
+			 * replicated on the Main VSI.
+			 */
+			if (pf->cur_promisc != cur_promisc) {
+				pf->cur_promisc = cur_promisc;
+				i40e_do_reset_safe(pf,
+						BIT(__I40E_PF_RESET_REQUESTED));
+			}
+		} else {
+			ret = i40e_aq_set_vsi_unicast_promiscuous(
+							  &vsi->back->hw,
+							  vsi->seid,
+							  cur_promisc, NULL);
+			if (ret)
+				dev_info(&pf->pdev->dev,
+					 "set unicast promisc failed, err %d, aq_err %d\n",
+					 ret, pf->hw.aq.asq_last_status);
+			ret = i40e_aq_set_vsi_multicast_promiscuous(
+							  &vsi->back->hw,
+							  vsi->seid,
+							  cur_promisc, NULL);
+			if (ret)
+				dev_info(&pf->pdev->dev,
+					 "set multicast promisc failed, err %d, aq_err %d\n",
+					 ret, pf->hw.aq.asq_last_status);
+		}
+		ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+						vsi->seid,
+						cur_promisc, NULL);
+		if (ret)
 			dev_info(&pf->pdev->dev,
-				 "set uni promisc failed, err %d, aq_err %d\n",
-				 aq_ret, pf->hw.aq.asq_last_status);
-		aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
-						   vsi->seid,
-						   cur_promisc, NULL);
-		if (aq_ret)
-			dev_info(&pf->pdev->dev,
-				 "set brdcast promisc failed, err %d, aq_err %d\n",
-				 aq_ret, pf->hw.aq.asq_last_status);
+				 "set brdcast promisc failed, err %s, aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 	}
 
 	clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1994,8 +2070,10 @@
 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
 	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "%s: update vsi failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "update vlan stripping failed, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 	}
 }
 
@@ -2023,8 +2101,10 @@
 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
 	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "%s: update vsi failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "update vlan stripping failed, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 	}
 }
 
@@ -2294,7 +2374,7 @@
 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 {
 	struct i40e_vsi_context ctxt;
-	i40e_status aq_ret;
+	i40e_status ret;
 
 	vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
 	vsi->info.pvid = cpu_to_le16(vid);
@@ -2304,11 +2384,13 @@
 
 	ctxt.seid = vsi->seid;
 	ctxt.info = vsi->info;
-	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "%s: update vsi failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "add pvid failed, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 		return -ENOENT;
 	}
 
@@ -2696,9 +2778,9 @@
 #endif /* I40E_FCOE */
 	/* round up for the chip's needs */
 	vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
-				(1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+				BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
 	vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
-				(1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+				BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
 
 	/* set up individual rings */
 	for (i = 0; i < vsi->num_queue_pairs && !err; i++)
@@ -2728,7 +2810,7 @@
 	}
 
 	for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
-		if (!(vsi->tc_config.enabled_tc & (1 << n)))
+		if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
 			continue;
 
 		qoffset = vsi->tc_config.tc_info[n].qoffset;
@@ -2877,6 +2959,9 @@
 	      I40E_PFINT_ICR0_ENA_VFLR_MASK          |
 	      I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
 
+	if (pf->flags & I40E_FLAG_IWARP_ENABLED)
+		val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+
 	if (pf->flags & I40E_FLAG_PTP)
 		val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
 
@@ -3167,6 +3252,13 @@
 	    (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
 		pf->sw_int_count++;
 
+	if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+	    (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
+		ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+		icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
+		dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
+	}
+
 	/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
 	if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
 
@@ -3373,7 +3465,7 @@
  * @v_idx: vector index
  * @qp_idx: queue pair index
  **/
-static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
+static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
 {
 	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
 	struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
@@ -3427,7 +3519,7 @@
 		q_vector->tx.ring = NULL;
 
 		while (num_ringpairs--) {
-			map_vector_to_qp(vsi, v_start, qp_idx);
+			i40e_map_vector_to_qp(vsi, v_start, qp_idx);
 			qp_idx++;
 			qp_remaining--;
 		}
@@ -3929,6 +4021,7 @@
 	i40e_vsi_free_irq(vsi);
 	i40e_vsi_free_tx_resources(vsi);
 	i40e_vsi_free_rx_resources(vsi);
+	vsi->current_netdev_flags = 0;
 }
 
 /**
@@ -4073,7 +4166,7 @@
 		if (app.selector == I40E_APP_SEL_TCPIP &&
 		    app.protocolid == I40E_APP_PROTOID_ISCSI) {
 			tc = dcbcfg->etscfg.prioritytable[app.priority];
-			enabled_tc |= (1 << tc);
+			enabled_tc |= BIT_ULL(tc);
 			break;
 		}
 	}
@@ -4122,7 +4215,7 @@
 	u8 i;
 
 	for (i = 0; i < num_tc; i++)
-		enabled_tc |= 1 << i;
+		enabled_tc |= BIT(i);
 
 	return enabled_tc;
 }
@@ -4157,7 +4250,7 @@
 	/* At least have TC0 */
 	enabled_tc = (enabled_tc ? enabled_tc : 0x1);
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (enabled_tc & (1 << i))
+		if (enabled_tc & BIT_ULL(i))
 			num_tc++;
 	}
 	return num_tc;
@@ -4179,11 +4272,11 @@
 
 	/* Find the first enabled TC */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (enabled_tc & (1 << i))
+		if (enabled_tc & BIT_ULL(i))
 			break;
 	}
 
-	return 1 << i;
+	return BIT(i);
 }
 
 /**
@@ -4221,26 +4314,28 @@
 	struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
 	struct i40e_pf *pf = vsi->back;
 	struct i40e_hw *hw = &pf->hw;
-	i40e_status aq_ret;
+	i40e_status ret;
 	u32 tc_bw_max;
 	int i;
 
 	/* Get the VSI level BW configuration */
-	aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "couldn't get PF vsi bw config, err %d, aq_err %d\n",
-			 aq_ret, pf->hw.aq.asq_last_status);
+			 "couldn't get PF vsi bw config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return -EINVAL;
 	}
 
 	/* Get the VSI level BW configuration per TC */
-	aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
-						  NULL);
-	if (aq_ret) {
+	ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+					       NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
-			 aq_ret, pf->hw.aq.asq_last_status);
+			 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return -EINVAL;
 	}
 
@@ -4279,16 +4374,16 @@
 				       u8 *bw_share)
 {
 	struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
-	i40e_status aq_ret;
+	i40e_status ret;
 	int i;
 
 	bw_data.tc_valid_bits = enabled_tc;
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
 		bw_data.tc_bw_credits[i] = bw_share[i];
 
-	aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
-					  NULL);
-	if (aq_ret) {
+	ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+				       NULL);
+	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
 			 "AQ command Config VSI BW allocation per TC failed = %d\n",
 			 vsi->back->hw.aq.asq_last_status);
@@ -4337,7 +4432,7 @@
 		 * will set the numtc for netdev as 2 that will be
 		 * referenced by the netdev layer as TC 0 and 1.
 		 */
-		if (vsi->tc_config.enabled_tc & (1 << i))
+		if (vsi->tc_config.enabled_tc & BIT_ULL(i))
 			netdev_set_tc_queue(netdev,
 					vsi->tc_config.tc_info[i].netdev_tc,
 					vsi->tc_config.tc_info[i].qcount,
@@ -4399,7 +4494,7 @@
 
 	/* Enable ETS TCs with equal BW Share for now across all VSIs */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (enabled_tc & (1 << i))
+		if (enabled_tc & BIT_ULL(i))
 			bw_share[i] = 1;
 	}
 
@@ -4423,8 +4518,10 @@
 	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
 	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "update vsi failed, aq_err=%d\n",
-			 vsi->back->hw.aq.asq_last_status);
+			 "Update vsi tc config failed, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 		goto out;
 	}
 	/* update the local VSI info with updated queue map */
@@ -4435,8 +4532,10 @@
 	ret = i40e_vsi_get_bw_info(vsi);
 	if (ret) {
 		dev_info(&vsi->back->pdev->dev,
-			 "Failed updating vsi bw info, aq_err=%d\n",
-			 vsi->back->hw.aq.asq_last_status);
+			 "Failed updating vsi bw info, err %s aq_err %s\n",
+			 i40e_stat_str(&vsi->back->hw, ret),
+			 i40e_aq_str(&vsi->back->hw,
+				     vsi->back->hw.aq.asq_last_status));
 		goto out;
 	}
 
@@ -4469,7 +4568,7 @@
 
 	/* Enable ETS TCs with equal BW Share for now */
 	for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-		if (enabled_tc & (1 << i))
+		if (enabled_tc & BIT_ULL(i))
 			bw_data.tc_bw_share_credits[i] = 1;
 	}
 
@@ -4477,8 +4576,9 @@
 						   &bw_data, NULL);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "veb bw config failed, aq_err=%d\n",
-			 pf->hw.aq.asq_last_status);
+			 "VEB bw config failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		goto out;
 	}
 
@@ -4486,8 +4586,9 @@
 	ret = i40e_veb_get_bw_info(veb);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "Failed getting veb bw config, aq_err=%d\n",
-			 pf->hw.aq.asq_last_status);
+			 "Failed getting veb bw config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	}
 
 out:
@@ -4574,8 +4675,9 @@
 	ret = i40e_aq_resume_port_tx(hw, NULL);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "AQ command Resume Port Tx failed = %d\n",
-			  pf->hw.aq.asq_last_status);
+			 "Resume Port Tx failed, err %s aq_err %s\n",
+			  i40e_stat_str(&pf->hw, ret),
+			  i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		/* Schedule PF reset to recover */
 		set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
 		i40e_service_event_schedule(pf);
@@ -4627,8 +4729,9 @@
 		}
 	} else {
 		dev_info(&pf->pdev->dev,
-			 "AQ Querying DCB configuration failed: aq_err %d\n",
-			 pf->hw.aq.asq_last_status);
+			 "Query for DCB configuration failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, err),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	}
 
 out:
@@ -4859,7 +4962,7 @@
 
 	/* Generate TC map for number of tc requested */
 	for (i = 0; i < tc; i++)
-		enabled_tc |= (1 << i);
+		enabled_tc |= BIT_ULL(i);
 
 	/* Requesting same TC configuration as already enabled */
 	if (enabled_tc == vsi->tc_config.enabled_tc)
@@ -4998,7 +5101,7 @@
 err_setup_tx:
 	i40e_vsi_free_tx_resources(vsi);
 	if (vsi == pf->vsi[pf->lan_vsi])
-		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
 
 	return err;
 }
@@ -5066,7 +5169,7 @@
 		i40e_vc_notify_reset(pf);
 
 	/* do the biggest reset indicated */
-	if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
+	if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
 
 		/* Request a Global Reset
 		 *
@@ -5081,7 +5184,7 @@
 		val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
 
-	} else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
+	} else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
 
 		/* Request a Core Reset
 		 *
@@ -5093,7 +5196,7 @@
 		wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
 		i40e_flush(&pf->hw);
 
-	} else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
+	} else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
 
 		/* Request a PF Reset
 		 *
@@ -5106,7 +5209,7 @@
 		dev_dbg(&pf->pdev->dev, "PFR requested\n");
 		i40e_handle_reset_warning(pf);
 
-	} else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
+	} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
 		int v;
 
 		/* Find the VSI(s) that requested a re-init */
@@ -5123,7 +5226,7 @@
 
 		/* no further action needed, so return now */
 		return;
-	} else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
+	} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
 		int v;
 
 		/* Find the VSI(s) that needs to be brought down */
@@ -5253,7 +5356,10 @@
 	/* Get updated DCBX data from firmware */
 	ret = i40e_get_dcb_config(&pf->hw);
 	if (ret) {
-		dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
+		dev_info(&pf->pdev->dev,
+			 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		goto exit;
 	}
 
@@ -5761,23 +5867,23 @@
 
 	rtnl_lock();
 	if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_REINIT_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
 		clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
 	}
 	if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
 		clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
 	}
 	if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
 		clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
 	}
 	if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
 		clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
 	}
 	if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
-		reset_flags |= (1 << __I40E_DOWN_REQUESTED);
+		reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
 		clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
 	}
 
@@ -5983,27 +6089,29 @@
 {
 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 	struct i40e_vsi_context ctxt;
-	int aq_ret;
+	int ret;
 
 	ctxt.seid = pf->main_vsi_seid;
 	ctxt.pf_num = pf->hw.pf_id;
 	ctxt.vf_num = 0;
-	aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "%s couldn't get PF vsi config, err %d, aq_err %d\n",
-			 __func__, aq_ret, pf->hw.aq.asq_last_status);
+			 "couldn't get PF vsi config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return;
 	}
 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
 	ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "%s: update vsi switch failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "update vsi switch failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	}
 }
 
@@ -6017,27 +6125,29 @@
 {
 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
 	struct i40e_vsi_context ctxt;
-	int aq_ret;
+	int ret;
 
 	ctxt.seid = pf->main_vsi_seid;
 	ctxt.pf_num = pf->hw.pf_id;
 	ctxt.vf_num = 0;
-	aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "%s couldn't get PF vsi config, err %d, aq_err %d\n",
-			 __func__, aq_ret, pf->hw.aq.asq_last_status);
+			 "couldn't get PF vsi config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return;
 	}
 	ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 	ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
 	ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-	aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-	if (aq_ret) {
+	ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "%s: update vsi switch failed, aq_err=%d\n",
-			 __func__, vsi->back->hw.aq.asq_last_status);
+			 "update vsi switch failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	}
 }
 
@@ -6097,7 +6207,8 @@
 	ret = i40e_add_vsi(ctl_vsi);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "rebuild of owner VSI failed: %d\n", ret);
+			 "rebuild of veb_idx %d owner VSI failed: %d\n",
+			 veb->idx, ret);
 		goto end_reconstitute;
 	}
 	i40e_vsi_reset_stats(ctl_vsi);
@@ -6176,8 +6287,10 @@
 			buf_len = data_size;
 		} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
 			dev_info(&pf->pdev->dev,
-				 "capability discovery failed: aq=%d\n",
-				 pf->hw.aq.asq_last_status);
+				 "capability discovery failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, err),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 			return -ENODEV;
 		}
 	} while (err);
@@ -6363,7 +6476,9 @@
 	/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
 	ret = i40e_init_adminq(&pf->hw);
 	if (ret) {
-		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
+		dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		goto clear_recovery;
 	}
 
@@ -6373,11 +6488,8 @@
 
 	i40e_clear_pxe_mode(hw);
 	ret = i40e_get_capabilities(pf);
-	if (ret) {
-		dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
-			 ret);
+	if (ret)
 		goto end_core_reset;
-	}
 
 	ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
 				hw->func_caps.num_rx_qp,
@@ -6418,12 +6530,16 @@
 				       I40E_AQ_EVENT_LINK_UPDOWN |
 				       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
 	if (ret)
-		dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
+		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
 	/* make sure our flow control settings are restored */
 	ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
 	if (ret)
-		dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
+		dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
 	/* Rebuild the VSIs and VEBs that existed before reset.
 	 * They are still in our local switch element arrays, so only
@@ -6484,8 +6600,10 @@
 		msleep(75);
 		ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
 		if (ret)
-			dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-				 pf->hw.aq.asq_last_status);
+			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 	}
 	/* reinit the misc interrupt */
 	if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -6647,8 +6765,8 @@
 	pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
 
 	for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
-		if (pf->pending_vxlan_bitmap & (1 << i)) {
-			pf->pending_vxlan_bitmap &= ~(1 << i);
+		if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
+			pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
 			port = pf->vxlan_ports[i];
 			if (port)
 				ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
@@ -6659,10 +6777,12 @@
 
 			if (ret) {
 				dev_info(&pf->pdev->dev,
-					 "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
+					 "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
 					 port ? "add" : "delete",
-					 ntohs(port), i, ret,
-					 pf->hw.aq.asq_last_status);
+					 ntohs(port), i,
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw,
+						    pf->hw.aq.asq_last_status));
 				pf->vxlan_ports[i] = 0;
 			}
 		}
@@ -7013,6 +7133,10 @@
 		tx_ring->count = vsi->num_desc;
 		tx_ring->size = 0;
 		tx_ring->dcb_tc = 0;
+		if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
+			tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
+		if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
+			tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
 		vsi->tx_rings[i] = tx_ring;
 
 		rx_ring = &tx_ring[1];
@@ -7411,62 +7535,139 @@
 }
 
 /**
+ * i40e_config_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ **/
+static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
+{
+	struct i40e_aqc_get_set_rss_key_data rss_key;
+	struct i40e_pf *pf = vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	bool pf_lut = false;
+	u8 *rss_lut;
+	int ret, i;
+
+	memset(&rss_key, 0, sizeof(rss_key));
+	memcpy(&rss_key, seed, sizeof(rss_key));
+
+	rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
+	if (!rss_lut)
+		return -ENOMEM;
+
+	/* Populate the LUT with max no. of queues in round robin fashion */
+	for (i = 0; i < vsi->rss_table_size; i++)
+		rss_lut[i] = i % vsi->rss_size;
+
+	ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
+	if (ret) {
+		dev_info(&pf->pdev->dev,
+			 "Cannot set RSS key, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		return ret;
+	}
+
+	if (vsi->type == I40E_VSI_MAIN)
+		pf_lut = true;
+
+	ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
+				  vsi->rss_table_size);
+	if (ret)
+		dev_info(&pf->pdev->dev,
+			 "Cannot set RSS lut, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+	return ret;
+}
+
+/**
+ * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
+ * @vsi: VSI structure
+ **/
+static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
+{
+	u8 seed[I40E_HKEY_ARRAY_SIZE];
+	struct i40e_pf *pf = vsi->back;
+
+	netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
+	vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
+
+	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+		return i40e_config_rss_aq(vsi, seed);
+
+	return 0;
+}
+
+/**
+ * i40e_config_rss_reg - Prepare for RSS if used
+ * @pf: board private structure
+ * @seed: RSS hash seed
+ **/
+static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
+{
+	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+	struct i40e_hw *hw = &pf->hw;
+	u32 *seed_dw = (u32 *)seed;
+	u32 current_queue = 0;
+	u32 lut = 0;
+	int i, j;
+
+	/* Fill out hash function seed */
+	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+		wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+
+	for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
+		lut = 0;
+		for (j = 0; j < 4; j++) {
+			if (current_queue == vsi->rss_size)
+				current_queue = 0;
+			lut |= ((current_queue) << (8 * j));
+			current_queue++;
+		}
+		wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
+	}
+	i40e_flush(hw);
+
+	return 0;
+}
+
+/**
  * i40e_config_rss - Prepare for RSS if used
  * @pf: board private structure
  **/
 static int i40e_config_rss(struct i40e_pf *pf)
 {
-	u32 rss_key[I40E_PFQF_HKEY_MAX_INDEX + 1];
 	struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+	u8 seed[I40E_HKEY_ARRAY_SIZE];
 	struct i40e_hw *hw = &pf->hw;
-	u32 lut = 0;
-	int i, j;
-	u64 hena;
 	u32 reg_val;
+	u64 hena;
 
-	netdev_rss_key_fill(rss_key, sizeof(rss_key));
-	for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-		wr32(hw, I40E_PFQF_HKEY(i), rss_key[i]);
+	netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
 
 	/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
 	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
 		((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
-	hena |= I40E_DEFAULT_RSS_HENA;
+	hena |= i40e_pf_get_default_rss_hena(pf);
+
 	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
 	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
 
 	vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
 
-	/* Check capability and Set table size and register per hw expectation*/
+	/* Determine the RSS table size based on the hardware capabilities */
 	reg_val = rd32(hw, I40E_PFQF_CTL_0);
-	if (pf->rss_table_size == 512)
-		reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
-	else
-		reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
+	reg_val = (pf->rss_table_size == 512) ?
+			(reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
+			(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
 	wr32(hw, I40E_PFQF_CTL_0, reg_val);
 
-	/* Populate the LUT with max no. of queues in round robin fashion */
-	for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
-
-		/* The assumption is that lan qp count will be the highest
-		 * qp count for any PF VSI that needs RSS.
-		 * If multiple VSIs need RSS support, all the qp counts
-		 * for those VSIs should be a power of 2 for RSS to work.
-		 * If LAN VSI is the only consumer for RSS then this requirement
-		 * is not necessary.
-		 */
-		if (j == vsi->rss_size)
-			j = 0;
-		/* lut = 4-byte sliding window of 4 lut entries */
-		lut = (lut << 8) | (j &
-			 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
-		/* On i = 3, we have 4 entries in lut; write to the register */
-		if ((i & 3) == 3)
-			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
-	}
-	i40e_flush(hw);
-
-	return 0;
+	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+		return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed);
+	else
+		return i40e_config_rss_reg(pf, seed);
 }
 
 /**
@@ -7533,7 +7734,7 @@
 	i40e_status status;
 
 	/* Set the valid bit for this PF */
-	bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
+	bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
 	bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
 	bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
 
@@ -7567,8 +7768,9 @@
 	last_aq_status = pf->hw.aq.asq_last_status;
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "Cannot acquire NVM for read access, err %d: aq_err %d\n",
-			 ret, last_aq_status);
+			 "Cannot acquire NVM for read access, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
 		goto bw_commit_out;
 	}
 
@@ -7583,8 +7785,9 @@
 	last_aq_status = pf->hw.aq.asq_last_status;
 	i40e_release_nvm(&pf->hw);
 	if (ret) {
-		dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
-			 ret, last_aq_status);
+		dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
 		goto bw_commit_out;
 	}
 
@@ -7596,8 +7799,9 @@
 	last_aq_status = pf->hw.aq.asq_last_status;
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "Cannot acquire NVM for write access, err %d: aq_err %d\n",
-			 ret, last_aq_status);
+			 "Cannot acquire NVM for write access, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
 		goto bw_commit_out;
 	}
 	/* Write it back out unchanged to initiate update NVM,
@@ -7615,8 +7819,9 @@
 	i40e_release_nvm(&pf->hw);
 	if (ret)
 		dev_info(&pf->pdev->dev,
-			 "BW settings NOT SAVED, err %d aq_err %d\n",
-			 ret, last_aq_status);
+			 "BW settings NOT SAVED, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, last_aq_status));
 bw_commit_out:
 
 	return ret;
@@ -7662,7 +7867,7 @@
 	/* Depending on PF configurations, it is possible that the RSS
 	 * maximum might end up larger than the available queues
 	 */
-	pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+	pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
 	pf->rss_size = 1;
 	pf->rss_table_size = pf->hw.func_caps.rss_table_size;
 	pf->rss_size_max = min_t(int, pf->rss_size_max,
@@ -7673,7 +7878,7 @@
 	}
 
 	/* MFP mode enabled */
-	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+	if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
 		pf->flags |= I40E_FLAG_MFP_ENABLED;
 		dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
 		if (i40e_get_npar_bw_setting(pf))
@@ -7703,9 +7908,8 @@
 	}
 
 	if (pf->hw.func_caps.vmdq) {
-		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
 		pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
-		pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
+		pf->flags |= I40E_FLAG_VMDQ_ENABLED;
 	}
 
 #ifdef I40E_FCOE
@@ -7723,6 +7927,14 @@
 					I40E_MAX_VF_COUNT);
 	}
 #endif /* CONFIG_PCI_IOV */
+	if (pf->hw.mac.type == I40E_MAC_X722) {
+		pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
+			     I40E_FLAG_128_QP_RSS_CAPABLE |
+			     I40E_FLAG_HW_ATR_EVICT_CAPABLE |
+			     I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
+			     I40E_FLAG_WB_ON_ITR_CAPABLE |
+			     I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE;
+	}
 	pf->eeprom_version = 0xDEAD;
 	pf->lan_veb = I40E_NO_VEB;
 	pf->lan_vsi = I40E_NO_VSI;
@@ -7812,7 +8024,7 @@
 	need_reset = i40e_set_ntuple(pf, features);
 
 	if (need_reset)
-		i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+		i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
 
 	return 0;
 }
@@ -7875,10 +8087,8 @@
 
 	/* New port: add it and mark its index in the bitmap */
 	pf->vxlan_ports[next_idx] = port;
-	pf->pending_vxlan_bitmap |= (1 << next_idx);
+	pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
 	pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
-
-	dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
 }
 
 /**
@@ -7906,7 +8116,7 @@
 		 * and make it pending
 		 */
 		pf->vxlan_ports[idx] = 0;
-		pf->pending_vxlan_bitmap |= (1 << idx);
+		pf->pending_vxlan_bitmap |= BIT_ULL(idx);
 		pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
 
 		dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
@@ -7981,7 +8191,6 @@
 	return err;
 }
 
-#ifdef HAVE_BRIDGE_ATTRIBS
 /**
  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
  * @dev: the netdev being configured
@@ -7995,7 +8204,8 @@
  * bridge mode enabled.
  **/
 static int i40e_ndo_bridge_setlink(struct net_device *dev,
-				   struct nlmsghdr *nlh)
+				   struct nlmsghdr *nlh,
+				   u16 flags)
 {
 	struct i40e_netdev_priv *np = netdev_priv(dev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -8066,14 +8276,9 @@
  * Return the mode in which the hardware bridge is operating in
  * i.e VEB or VEPA.
  **/
-#ifdef HAVE_BRIDGE_FILTER
 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
 				   struct net_device *dev,
 				   u32 filter_mask, int nlflags)
-#else
-static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-				   struct net_device *dev, int nlflags)
-#endif /* HAVE_BRIDGE_FILTER */
 {
 	struct i40e_netdev_priv *np = netdev_priv(dev);
 	struct i40e_vsi *vsi = np->vsi;
@@ -8097,7 +8302,25 @@
 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
 				       nlflags, 0, 0, filter_mask, NULL);
 }
-#endif /* HAVE_BRIDGE_ATTRIBS */
+
+#define I40E_MAX_TUNNEL_HDR_LEN 80
+/**
+ * i40e_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @netdev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
+ **/
+static netdev_features_t i40e_features_check(struct sk_buff *skb,
+					     struct net_device *dev,
+					     netdev_features_t features)
+{
+	if (skb->encapsulation &&
+	    (skb_inner_mac_header(skb) - skb_transport_header(skb) >
+	     I40E_MAX_TUNNEL_HDR_LEN))
+		return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+	return features;
+}
 
 static const struct net_device_ops i40e_netdev_ops = {
 	.ndo_open		= i40e_open,
@@ -8133,10 +8356,9 @@
 #endif
 	.ndo_get_phys_port_id	= i40e_get_phys_port_id,
 	.ndo_fdb_add		= i40e_ndo_fdb_add,
-#ifdef HAVE_BRIDGE_ATTRIBS
+	.ndo_features_check	= i40e_features_check,
 	.ndo_bridge_getlink	= i40e_ndo_bridge_getlink,
 	.ndo_bridge_setlink	= i40e_ndo_bridge_setlink,
-#endif /* HAVE_BRIDGE_ATTRIBS */
 };
 
 /**
@@ -8304,8 +8526,10 @@
 		ctxt.flags = I40E_AQ_VSI_TYPE_PF;
 		if (ret) {
 			dev_info(&pf->pdev->dev,
-				 "couldn't get PF vsi config, err %d, aq_err %d\n",
-				 ret, pf->hw.aq.asq_last_status);
+				 "couldn't get PF vsi config, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 			return -ENOENT;
 		}
 		vsi->info = ctxt.info;
@@ -8327,8 +8551,10 @@
 			ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
 			if (ret) {
 				dev_info(&pf->pdev->dev,
-					 "update vsi failed, aq_err=%d\n",
-					 pf->hw.aq.asq_last_status);
+					 "update vsi failed, err %s aq_err %s\n",
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw,
+						    pf->hw.aq.asq_last_status));
 				ret = -ENOENT;
 				goto err;
 			}
@@ -8345,9 +8571,11 @@
 			ret = i40e_vsi_config_tc(vsi, enabled_tc);
 			if (ret) {
 				dev_info(&pf->pdev->dev,
-					 "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
-					 enabled_tc, ret,
-					 pf->hw.aq.asq_last_status);
+					 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+					 enabled_tc,
+					 i40e_stat_str(&pf->hw, ret),
+					 i40e_aq_str(&pf->hw,
+						    pf->hw.aq.asq_last_status));
 				ret = -ENOENT;
 			}
 		}
@@ -8438,8 +8666,10 @@
 		ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
 		if (ret) {
 			dev_info(&vsi->back->pdev->dev,
-				 "add vsi failed, aq_err=%d\n",
-				 vsi->back->hw.aq.asq_last_status);
+				 "add vsi failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 			ret = -ENOENT;
 			goto err;
 		}
@@ -8484,8 +8714,9 @@
 	ret = i40e_vsi_get_bw_info(vsi);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "couldn't get vsi bw info, err %d, aq_err %d\n",
-			 ret, pf->hw.aq.asq_last_status);
+			 "couldn't get vsi bw info, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		/* VSI is already added so not tearing that up */
 		ret = 0;
 	}
@@ -8615,6 +8846,11 @@
 		goto vector_setup_out;
 	}
 
+	/* In Legacy mode, we do not have to get any other vector since we
+	 * piggyback on the misc/ICR0 for queue interrupts.
+	*/
+	if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
+		return ret;
 	if (vsi->num_q_vectors)
 		vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
 						 vsi->num_q_vectors, vsi->idx);
@@ -8658,7 +8894,7 @@
 	ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
 	if (ret < 0) {
 		dev_info(&pf->pdev->dev,
-			 "failed to get tracking for %d queues for VSI %d err=%d\n",
+			 "failed to get tracking for %d queues for VSI %d err %d\n",
 			 vsi->alloc_queue_pairs, vsi->seid, ret);
 		goto err_vsi;
 	}
@@ -8857,6 +9093,10 @@
 		break;
 	}
 
+	if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+	    (vsi->type == I40E_VSI_VMDQ2)) {
+		ret = i40e_vsi_config_rss(vsi);
+	}
 	return vsi;
 
 err_rings:
@@ -8896,8 +9136,9 @@
 						  &bw_data, NULL);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "query veb bw config failed, aq_err=%d\n",
-			 hw->aq.asq_last_status);
+			 "query veb bw config failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
 		goto out;
 	}
 
@@ -8905,8 +9146,9 @@
 						   &ets_data, NULL);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "query veb bw ets config failed, aq_err=%d\n",
-			 hw->aq.asq_last_status);
+			 "query veb bw ets config failed, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
 		goto out;
 	}
 
@@ -9090,36 +9332,40 @@
  **/
 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
 {
-	bool is_default = false;
+	struct i40e_pf *pf = veb->pf;
+	bool is_default = veb->pf->cur_promisc;
 	bool is_cloud = false;
 	int ret;
 
 	/* get a VEB from the hardware */
-	ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
+	ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
 			      veb->enabled_tc, is_default,
 			      is_cloud, &veb->seid, NULL);
 	if (ret) {
-		dev_info(&veb->pf->pdev->dev,
-			 "couldn't add VEB, err %d, aq_err %d\n",
-			 ret, veb->pf->hw.aq.asq_last_status);
+		dev_info(&pf->pdev->dev,
+			 "couldn't add VEB, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return -EPERM;
 	}
 
 	/* get statistics counter */
-	ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
+	ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
 					 &veb->stats_idx, NULL, NULL, NULL);
 	if (ret) {
-		dev_info(&veb->pf->pdev->dev,
-			 "couldn't get VEB statistics idx, err %d, aq_err %d\n",
-			 ret, veb->pf->hw.aq.asq_last_status);
+		dev_info(&pf->pdev->dev,
+			 "couldn't get VEB statistics idx, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return -EPERM;
 	}
 	ret = i40e_veb_get_bw_info(veb);
 	if (ret) {
-		dev_info(&veb->pf->pdev->dev,
-			 "couldn't get VEB bw info, err %d, aq_err %d\n",
-			 ret, veb->pf->hw.aq.asq_last_status);
-		i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
+		dev_info(&pf->pdev->dev,
+			 "couldn't get VEB bw info, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+		i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
 		return -ENOENT;
 	}
 
@@ -9325,8 +9571,10 @@
 						&next_seid, NULL);
 		if (ret) {
 			dev_info(&pf->pdev->dev,
-				 "get switch config failed %d aq_err=%x\n",
-				 ret, pf->hw.aq.asq_last_status);
+				 "get switch config failed err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, ret),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 			kfree(aq_buf);
 			return -ENOENT;
 		}
@@ -9367,8 +9615,9 @@
 	ret = i40e_fetch_switch_configuration(pf, false);
 	if (ret) {
 		dev_info(&pf->pdev->dev,
-			 "couldn't fetch switch config, err %d, aq_err %d\n",
-			 ret, pf->hw.aq.asq_last_status);
+			 "couldn't fetch switch config, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, ret),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 		return ret;
 	}
 	i40e_pf_reset_stats(pf);
@@ -9743,7 +9992,8 @@
 
 	err = i40e_init_shared_code(hw);
 	if (err) {
-		dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
+		dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+			 err);
 		goto err_pf_reset;
 	}
 
@@ -9910,15 +10160,19 @@
 				       I40E_AQ_EVENT_LINK_UPDOWN |
 				       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
 	if (err)
-		dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
+		dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+			 i40e_stat_str(&pf->hw, err),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
 	if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
 	    (pf->hw.aq.fw_maj_ver < 4)) {
 		msleep(75);
 		err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
 		if (err)
-			dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-				 pf->hw.aq.asq_last_status);
+			dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+				 i40e_stat_str(&pf->hw, err),
+				 i40e_aq_str(&pf->hw,
+					     pf->hw.aq.asq_last_status));
 	}
 	/* The main driver is (mostly) up and happy. We need to set this state
 	 * before setting up the misc vector or we get a race and the vector
@@ -10006,8 +10260,10 @@
 	/* get the requested speeds from the fw */
 	err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
 	if (err)
-		dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
-			 err);
+		dev_info(&pf->pdev->dev,
+			 "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
+			 i40e_stat_str(&pf->hw, err),
+			 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 	pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
 
 	/* print a string summarizing features */
@@ -10247,6 +10503,19 @@
 	wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
 	wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
 
+	del_timer_sync(&pf->service_timer);
+	cancel_work_sync(&pf->service_task);
+	i40e_fdir_teardown(pf);
+
+	rtnl_lock();
+	i40e_prep_for_reset(pf);
+	rtnl_unlock();
+
+	wr32(hw, I40E_PFPM_APM,
+	     (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+	wr32(hw, I40E_PFPM_WUFC,
+	     (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
 	i40e_clear_interrupt_scheme(pf);
 
 	if (system_state == SYSTEM_POWER_OFF) {
@@ -10267,9 +10536,6 @@
 
 	set_bit(__I40E_SUSPENDED, &pf->state);
 	set_bit(__I40E_DOWN, &pf->state);
-	del_timer_sync(&pf->service_timer);
-	cancel_work_sync(&pf->service_task);
-	i40e_fdir_teardown(pf);
 
 	rtnl_lock();
 	i40e_prep_for_reset(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 554e49d..9b83abc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -50,7 +50,7 @@
 	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
 			   I40E_GLNVM_GENS_SR_SIZE_SHIFT);
 	/* Switching to words (sr_size contains power of 2KB) */
-	nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+	nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
 
 	/* Check if we are in the normal or blank NVM programming mode */
 	fla = rd32(hw, I40E_GLNVM_FLA);
@@ -189,8 +189,8 @@
 	ret_code = i40e_poll_sr_srctl_done_bit(hw);
 	if (!ret_code) {
 		/* Write the address and start reading */
-		sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
-			 (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+		sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+			 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
 		wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
 
 		/* Poll I40E_GLNVM_SRCTL until the done bit is set */
@@ -212,6 +212,74 @@
 }
 
 /**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+				    u32 offset, u16 words, void *data,
+				    bool last_command)
+{
+	i40e_status ret_code = I40E_ERR_NVM;
+	struct i40e_asq_cmd_details cmd_details;
+
+	memset(&cmd_details, 0, sizeof(cmd_details));
+
+	/* Here we are checking the SR limit only for the flat memory model.
+	 * We cannot do it for the module-based model, as we did not acquire
+	 * the NVM resource yet (we cannot get the module pointer value).
+	 * Firmware will check the module-based model.
+	 */
+	if ((offset + words) > hw->nvm.sr_size)
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+			   (offset + words), hw->nvm.sr_size);
+	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+		/* We can write only up to 4KB (one sector), in one AQ write */
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVM write fail error: tried to write %d words, limit is %d.\n",
+			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+		/* A single write cannot spread over two sectors */
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+			   offset, words);
+	else
+		ret_code = i40e_aq_read_nvm(hw, module_pointer,
+					    2 * offset,  /*bytes*/
+					    2 * words,   /*bytes*/
+					    data, last_command, &cmd_details);
+
+	return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+					 u16 *data)
+{
+	i40e_status ret_code = I40E_ERR_TIMEOUT;
+
+	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
+	*data = le16_to_cpu(*(__le16 *)data);
+
+	return ret_code;
+}
+
+/**
  * i40e_read_nvm_word - Reads Shadow RAM
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -222,6 +290,8 @@
 i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
 			       u16 *data)
 {
+	if (hw->mac.type == I40E_MAC_X722)
+		return i40e_read_nvm_word_aq(hw, offset, data);
 	return i40e_read_nvm_word_srctl(hw, offset, data);
 }
 
@@ -257,6 +327,63 @@
 }
 
 /**
+ * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+					   u16 *words, u16 *data)
+{
+	i40e_status ret_code;
+	u16 read_size = *words;
+	bool last_cmd = false;
+	u16 words_read = 0;
+	u16 i = 0;
+
+	do {
+		/* Calculate number of bytes we should read in this step.
+		 * FVL AQ do not allow to read more than one page at a time or
+		 * to cross page boundaries.
+		 */
+		if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
+			read_size = min(*words,
+					(u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
+				      (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
+		else
+			read_size = min((*words - words_read),
+					I40E_SR_SECTOR_SIZE_IN_WORDS);
+
+		/* Check if this is last command, if so set proper flag */
+		if ((words_read + read_size) >= *words)
+			last_cmd = true;
+
+		ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
+					    data + words_read, last_cmd);
+		if (ret_code)
+			goto read_nvm_buffer_aq_exit;
+
+		/* Increment counter for words already read and move offset to
+		 * new read location
+		 */
+		words_read += read_size;
+		offset += read_size;
+	} while (words_read < *words);
+
+	for (i = 0; i < *words; i++)
+		data[i] = le16_to_cpu(((__le16 *)data)[i]);
+
+read_nvm_buffer_aq_exit:
+	*words = words_read;
+	return ret_code;
+}
+
+/**
  * i40e_read_nvm_buffer - Reads Shadow RAM buffer
  * @hw: pointer to the HW structure
  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -270,6 +397,8 @@
 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
 				 u16 *words, u16 *data)
 {
+	if (hw->mac.type == I40E_MAC_X722)
+		return i40e_read_nvm_buffer_aq(hw, offset, words, data);
 	return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
 }
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 7b34f1e..dcb72a8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -58,6 +58,19 @@
 void i40e_idle_aq(struct i40e_hw *hw);
 bool i40e_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+
+i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+				bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+				bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
+				u16 seid,
+				struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
+				u16 seid,
+				struct i40e_aqc_get_set_rss_key_data *key);
 
 u32 i40e_led_get(struct i40e_hw *hw);
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index a92b772..8c40d6e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -43,9 +43,8 @@
 #define I40E_PTP_10GB_INCVAL 0x0333333333ULL
 #define I40E_PTP_1GB_INCVAL  0x2000000000ULL
 
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  (0x1 << \
-					I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (0x2 << \
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (2 << \
 					I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 
 /**
@@ -357,7 +356,7 @@
 
 	prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
 
-	if (!(prttsyn_stat & (1 << index)))
+	if (!(prttsyn_stat & BIT(index)))
 		return;
 
 	lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 522d6df..dc0402f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -873,6 +873,13 @@
 #define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
 #define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
 #define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
 #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
 #define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
@@ -3366,4 +3373,1933 @@
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION_REGION_7_SHIFT 29
 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#endif
+
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9a4f2bc..738aca6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -464,7 +464,7 @@
 	error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
 		I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
 
-	if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+	if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
 		if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
 		    (I40E_DEBUG_FD & pf->hw.debug_mask))
 			dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
@@ -509,8 +509,7 @@
 			dev_info(&pdev->dev,
 				"FD filter programming failed due to incorrect filter parameters\n");
 		}
-	} else if (error ==
-			  (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+	} else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
 		if (I40E_DEBUG_FD & pf->hw.debug_mask)
 			dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
 				 rx_desc->wb.qword0.hi_dword.fd_id);
@@ -854,15 +853,40 @@
  **/
 static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 {
-	u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
-		  I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
-		  I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
-		  I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
-		  /* allow 00 to be written to the index */
+	u16 flags = q_vector->tx.ring[0].flags;
 
-	wr32(&vsi->back->hw,
-	     I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
-	     val);
+	if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+		u32 val;
+
+		if (q_vector->arm_wb_state)
+			return;
+
+		val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK;
+
+		wr32(&vsi->back->hw,
+		     I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+					 vsi->base_vector - 1),
+		     val);
+		q_vector->arm_wb_state = true;
+	} else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+		u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+			  I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
+			  I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+			  I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
+			  /* allow 00 to be written to the index */
+
+		wr32(&vsi->back->hw,
+		     I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+					 vsi->base_vector - 1), val);
+	} else {
+		u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+			  I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
+			  I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
+			  I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
+			/* allow 00 to be written to the index */
+
+		wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
+	}
 }
 
 /**
@@ -892,7 +916,7 @@
 	 *  20-1249MB/s bulk   (8000 ints/s)
 	 */
 	bytes_per_int = rc->total_bytes / rc->itr;
-	switch (rc->itr) {
+	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
 		if (bytes_per_int > 10)
 			new_latency_range = I40E_LOW_LATENCY;
@@ -905,9 +929,14 @@
 		break;
 	case I40E_BULK_LATENCY:
 		if (bytes_per_int <= 20)
-			rc->latency_range = I40E_LOW_LATENCY;
+			new_latency_range = I40E_LOW_LATENCY;
+		break;
+	default:
+		if (bytes_per_int <= 20)
+			new_latency_range = I40E_LOW_LATENCY;
 		break;
 	}
+	rc->latency_range = new_latency_range;
 
 	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
@@ -923,42 +952,14 @@
 		break;
 	}
 
-	if (new_itr != rc->itr) {
-		/* do an exponential smoothing */
-		new_itr = (10 * new_itr * rc->itr) /
-			  ((9 * new_itr) + rc->itr);
-		rc->itr = new_itr & I40E_MAX_ITR;
-	}
+	if (new_itr != rc->itr)
+		rc->itr = new_itr;
 
 	rc->total_bytes = 0;
 	rc->total_packets = 0;
 }
 
 /**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
-	u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
-	struct i40e_hw *hw = &q_vector->vsi->back->hw;
-	u32 reg_addr;
-	u16 old_itr;
-
-	reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
-	old_itr = q_vector->rx.itr;
-	i40e_set_new_dynamic_itr(&q_vector->rx);
-	if (old_itr != q_vector->rx.itr)
-		wr32(hw, reg_addr, q_vector->rx.itr);
-
-	reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
-	old_itr = q_vector->tx.itr;
-	i40e_set_new_dynamic_itr(&q_vector->tx);
-	if (old_itr != q_vector->tx.itr)
-		wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
-/**
  * i40e_clean_programming_status - clean the programming status descriptor
  * @rx_ring: the rx ring that has this descriptor
  * @rx_desc: the rx descriptor written back by HW
@@ -1386,7 +1387,7 @@
 		return;
 
 	/* did the hardware decode the packet and checksum? */
-	if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
 		return;
 
 	/* both known and outer_ip must be set for the below code to work */
@@ -1401,25 +1402,25 @@
 		ipv6 = true;
 
 	if (ipv4 &&
-	    (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
-			 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
 		goto checksum_fail;
 
 	/* likely incorrect csum if alternate IP extension headers found */
 	if (ipv6 &&
-	    rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
 		/* don't increment checksum err here, non-fatal err */
 		return;
 
 	/* there was some L4 error, count error and punt packet to the stack */
-	if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
 		goto checksum_fail;
 
 	/* handle packets that were not able to be checksummed due
 	 * to arrival speed, in this case the stack can compute
 	 * the csum.
 	 */
-	if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
 		return;
 
 	/* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1428,7 +1429,8 @@
 	 * so the total length of IPv4 header is IHL*4 bytes
 	 * The UDP_0 bit *may* bet set if the *inner* header is UDP
 	 */
-	if (ipv4_tunnel) {
+	if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) &&
+	    (ipv4_tunnel)) {
 		skb->transport_header = skb->mac_header +
 					sizeof(struct ethhdr) +
 					(ip_hdr(skb)->ihl * 4);
@@ -1543,7 +1545,7 @@
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
 
-		if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
 		/* This memory barrier is needed to keep us from reading
@@ -1584,8 +1586,8 @@
 
 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
 			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
-		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
 			   I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1637,7 +1639,7 @@
 		I40E_RX_INCREMENT(rx_ring, i);
 
 		if (unlikely(
-		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
 			struct i40e_rx_buffer *next_buffer;
 
 			next_buffer = &rx_ring->rx_bi[i];
@@ -1647,7 +1649,7 @@
 		}
 
 		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
 			continue;
 		}
@@ -1669,7 +1671,7 @@
 
 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
 			 : 0;
 #ifdef I40E_FCOE
@@ -1730,7 +1732,7 @@
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
 
-		if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
 		/* This memory barrier is needed to keep us from reading
@@ -1753,7 +1755,7 @@
 
 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
 			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
 			   I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1771,13 +1773,13 @@
 		I40E_RX_INCREMENT(rx_ring, i);
 
 		if (unlikely(
-		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
 			rx_ring->rx_stats.non_eop_descs++;
 			continue;
 		}
 
 		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
 			/* TODO: shouldn't we increment a counter indicating the
 			 * drop?
@@ -1802,7 +1804,7 @@
 
 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
 			 : 0;
 #ifdef I40E_FCOE
@@ -1827,6 +1829,68 @@
 }
 
 /**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+					  struct i40e_q_vector *q_vector)
+{
+	struct i40e_hw *hw = &vsi->back->hw;
+	u16 old_itr;
+	int vector;
+	u32 val;
+
+	vector = (q_vector->v_idx + vsi->base_vector);
+	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+		old_itr = q_vector->rx.itr;
+		i40e_set_new_dynamic_itr(&q_vector->rx);
+		if (old_itr != q_vector->rx.itr) {
+			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+			I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+			(I40E_RX_ITR <<
+				I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+			(q_vector->rx.itr <<
+				I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+		} else {
+			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+			I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+			(I40E_ITR_NONE <<
+				I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+		}
+		if (!test_bit(__I40E_DOWN, &vsi->state))
+			wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+	} else {
+		i40e_irq_dynamic_enable(vsi,
+					q_vector->v_idx + vsi->base_vector);
+	}
+	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+		old_itr = q_vector->tx.itr;
+		i40e_set_new_dynamic_itr(&q_vector->tx);
+		if (old_itr != q_vector->tx.itr) {
+			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+				(I40E_TX_ITR <<
+				   I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+				(q_vector->tx.itr <<
+				   I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+		} else {
+			val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+				I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+				(I40E_ITR_NONE <<
+				   I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+		}
+		if (!test_bit(__I40E_DOWN, &vsi->state))
+			wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+			      vsi->base_vector - 1), val);
+	} else {
+		i40e_irq_dynamic_enable(vsi,
+					q_vector->v_idx + vsi->base_vector);
+	}
+}
+
+/**
  * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
  * @napi: napi struct with our devices info in it
  * @budget: amount of work driver is allowed to do this pass, in packets
@@ -1880,35 +1944,29 @@
 		return budget;
 	}
 
+	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+		q_vector->arm_wb_state = false;
+
 	/* Work is done so exit the polling mode and re-enable the interrupt */
 	napi_complete(napi);
-	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
-	    ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-		i40e_update_dynamic_itr(q_vector);
+	if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+		i40e_update_enable_itr(vsi, q_vector);
+	} else { /* Legacy mode */
+		struct i40e_hw *hw = &vsi->back->hw;
+		/* We re-enable the queue 0 cause, but
+		 * don't worry about dynamic_enable
+		 * because we left it on for the other
+		 * possible interrupts during napi
+		 */
+		u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
+			   I40E_QINT_RQCTL_CAUSE_ENA_MASK;
 
-	if (!test_bit(__I40E_DOWN, &vsi->state)) {
-		if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
-			i40e_irq_dynamic_enable(vsi,
-					q_vector->v_idx + vsi->base_vector);
-		} else {
-			struct i40e_hw *hw = &vsi->back->hw;
-			/* We re-enable the queue 0 cause, but
-			 * don't worry about dynamic_enable
-			 * because we left it on for the other
-			 * possible interrupts during napi
-			 */
-			u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
-			qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
-			wr32(hw, I40E_QINT_RQCTL(0), qval);
-
-			qval = rd32(hw, I40E_QINT_TQCTL(0));
-			qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
-			wr32(hw, I40E_QINT_TQCTL(0), qval);
-
-			i40e_irq_dynamic_enable_icr0(vsi->back);
-		}
+		wr32(hw, I40E_QINT_RQCTL(0), qval);
+		qval = rd32(hw, I40E_QINT_TQCTL(0)) |
+		       I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+		wr32(hw, I40E_QINT_TQCTL(0), qval);
+		i40e_irq_dynamic_enable_icr0(vsi->back);
 	}
-
 	return 0;
 }
 
@@ -1982,6 +2040,13 @@
 	/* Due to lack of space, no more new filters can be programmed */
 	if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
 		return;
+	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) {
+		/* HW ATR eviction will take care of removing filters on FIN
+		 * and RST packets.
+		 */
+		if (th->fin || th->rst)
+			return;
+	}
 
 	tx_ring->atr_count++;
 
@@ -2037,6 +2102,9 @@
 			I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
 			I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
+	if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)
+		dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
+
 	fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
 	fdir_desc->rsvd = cpu_to_le32(0);
 	fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
@@ -2244,11 +2312,15 @@
 	struct iphdr *this_ip_hdr;
 	u32 network_hdr_len;
 	u8 l4_hdr = 0;
+	struct udphdr *oudph;
+	struct iphdr *oiph;
 	u32 l4_tunnel = 0;
 
 	if (skb->encapsulation) {
 		switch (ip_hdr(skb)->protocol) {
 		case IPPROTO_UDP:
+			oudph = udp_hdr(skb);
+			oiph = ip_hdr(skb);
 			l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
 			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
 			break;
@@ -2285,6 +2357,15 @@
 			*tx_flags &= ~I40E_TX_FLAGS_IPV4;
 			*tx_flags |= I40E_TX_FLAGS_IPV6;
 		}
+		if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
+		    (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING)        &&
+		    (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
+			oudph->check = ~csum_tcpudp_magic(oiph->saddr,
+					oiph->daddr,
+					(skb->len - skb_transport_offset(skb)),
+					IPPROTO_UDP, 0);
+			*cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+		}
 	} else {
 		network_hdr_len = skb_network_header_len(skb);
 		this_ip_hdr = ip_hdr(skb);
@@ -2616,6 +2697,8 @@
 	    netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
 						   tx_ring->queue_index)))
 		writel(i, tx_ring->tail);
+	else
+		prefetchw(tx_desc + 1);
 
 	return;
 
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 0dc48dc..f1385a1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -66,17 +66,29 @@
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
-	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
-	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
-	((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+	BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+	BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+	BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+	BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+	BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+	BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+	(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+	  I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
 
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
@@ -129,17 +141,17 @@
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define I40E_MIN_DESC_PENDING	4
 
-#define I40E_TX_FLAGS_CSUM		(u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN		(u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN		(u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO		(u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4		(u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6		(u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC		(u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO		(u32)(1 << 7)
-#define I40E_TX_FLAGS_TSYN		(u32)(1 << 8)
-#define I40E_TX_FLAGS_FD_SB		(u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL	(u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM		BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN		BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN		BIT(2)
+#define I40E_TX_FLAGS_TSO		BIT(3)
+#define I40E_TX_FLAGS_IPV4		BIT(4)
+#define I40E_TX_FLAGS_IPV6		BIT(5)
+#define I40E_TX_FLAGS_FCCRC		BIT(6)
+#define I40E_TX_FLAGS_FSO		BIT(7)
+#define I40E_TX_FLAGS_TSYN		BIT(8)
+#define I40E_TX_FLAGS_FD_SB		BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL	BIT(10)
 #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
@@ -253,6 +265,10 @@
 	bool ring_active;		/* is ring online or not */
 	bool arm_wb;		/* do something to arm write back */
 
+	u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR	BIT(0)
+#define I40E_TXR_FLAGS_OUTER_UDP_CSUM	BIT(1)
+
 	/* stats structs */
 	struct i40e_queue_stats	stats;
 	struct u64_stats_sync syncp;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 9a5a75b..4842239 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -47,6 +47,11 @@
 #define I40E_DEV_ID_20G_KR2		0x1587
 #define I40E_DEV_ID_VF			0x154C
 #define I40E_DEV_ID_VF_HV		0x1571
+#define I40E_DEV_ID_SFP_X722		0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
+#define I40E_DEV_ID_X722_VF		0x37CD
+#define I40E_DEV_ID_X722_VF_HV		0x37D9
 
 #define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
 					 (d) == I40E_DEV_ID_QSFP_B  || \
@@ -120,6 +125,8 @@
 	I40E_MAC_X710,
 	I40E_MAC_XL710,
 	I40E_MAC_VF,
+	I40E_MAC_X722,
+	I40E_MAC_X722_VF,
 	I40E_MAC_GENERIC,
 };
 
@@ -213,7 +220,17 @@
 	bool dcb;
 	bool fcoe;
 	bool iscsi; /* Indicates iSCSI enabled */
-	bool mfp_mode_1;
+	bool flex10_enable;
+	bool flex10_capable;
+	u32  flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN	0x0
+#define I40E_FLEX10_MODE_DCC		0x1
+#define I40E_FLEX10_MODE_DCI		0x2
+
+	u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR	0x1
+#define I40E_FLEX10_STATUS_VC_MODE	0x2
+
 	bool mgmt_cem;
 	bool ieee_1588;
 	bool iwarp;
@@ -423,6 +440,7 @@
 #define I40E_DCBX_MODE_CEE	0x1
 #define I40E_DCBX_MODE_IEEE	0x2
 	u32 numapps;
+	u32 tlv_status; /* CEE mode TLV status */
 	struct i40e_dcb_ets_config etscfg;
 	struct i40e_dcb_ets_config etsrec;
 	struct i40e_dcb_pfc_config pfc;
@@ -487,11 +505,13 @@
 
 	/* debug mask */
 	u32 debug_mask;
+	char err_str[16];
 };
 
 static inline bool i40e_is_vf(struct i40e_hw *hw)
 {
-	return hw->mac.type == I40E_MAC_VF;
+	return (hw->mac.type == I40E_MAC_VF ||
+		hw->mac.type == I40E_MAC_X722_VF);
 }
 
 struct i40e_driver_version {
@@ -588,19 +608,23 @@
 	I40E_RX_DESC_STATUS_CRCP_SHIFT		= 4,
 	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 2 BITS */
 	I40E_RX_DESC_STATUS_TSYNVALID_SHIFT	= 7,
-	I40E_RX_DESC_STATUS_PIF_SHIFT		= 8,
+	/* Note: Bit 8 is reserved in X710 and XL710 */
+	I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT	= 8,
 	I40E_RX_DESC_STATUS_UMBCAST_SHIFT	= 9, /* 2 BITS */
 	I40E_RX_DESC_STATUS_FLM_SHIFT		= 11,
 	I40E_RX_DESC_STATUS_FLTSTAT_SHIFT	= 12, /* 2 BITS */
 	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14,
 	I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT	= 15,
 	I40E_RX_DESC_STATUS_RESERVED_SHIFT	= 16, /* 2 BITS */
-	I40E_RX_DESC_STATUS_UDP_0_SHIFT		= 18,
+	/* Note: For non-tunnel packets INT_UDP_0 is the right status for
+	 * UDP header
+	 */
+	I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT	= 18,
 	I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
 };
 
 #define I40E_RXD_QW1_STATUS_SHIFT	0
-#define I40E_RXD_QW1_STATUS_MASK	(((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK	((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
 					 << I40E_RXD_QW1_STATUS_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -608,8 +632,8 @@
 					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK	(0x1UL << \
-					 I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+				    BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
 
 enum i40e_rx_desc_fltstat_values {
 	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,
@@ -743,8 +767,7 @@
 					 I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
 
 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT	63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK	(0x1ULL << \
-					 I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK	BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
 
 enum i40e_rx_desc_ext_status_bits {
 	/* Note: These are predefined bit offsets */
@@ -920,12 +943,12 @@
 #define I40E_TXD_CTX_QW0_NATT_SHIFT	9
 #define I40E_TXD_CTX_QW0_NATT_MASK	(0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
-#define I40E_TXD_CTX_UDP_TUNNELING	(0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING	BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
 #define I40E_TXD_CTX_GRE_TUNNELING	(0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT	11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK	(0x1ULL << \
-					 I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+				       BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
 
 #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST	I40E_TXD_CTX_QW0_EIP_NOINC_MASK
 
@@ -937,6 +960,8 @@
 #define I40E_TXD_CTX_QW0_DECTTL_MASK	(0xFULL << \
 					 I40E_TXD_CTX_QW0_DECTTL_SHIFT)
 
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT	23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK	BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
 struct i40e_filter_program_desc {
 	__le32 qindex_flex_ptype_vsi;
 	__le32 rsvd;
@@ -955,15 +980,24 @@
 
 /* Packet Classifier Types for filters */
 enum i40e_filter_pctype {
-	/* Note: Values 0-30 are reserved for future use */
+	/* Note: Values 0-28 are reserved for future use.
+	 * Value 29, 30, 32 are not supported on XL710 and X710.
+	 */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP	= 29,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP	= 30,
 	I40E_FILTER_PCTYPE_NONF_IPV4_UDP		= 31,
-	/* Note: Value 32 is reserved for future use */
+	I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK	= 32,
 	I40E_FILTER_PCTYPE_NONF_IPV4_TCP		= 33,
 	I40E_FILTER_PCTYPE_NONF_IPV4_SCTP		= 34,
 	I40E_FILTER_PCTYPE_NONF_IPV4_OTHER		= 35,
 	I40E_FILTER_PCTYPE_FRAG_IPV4			= 36,
-	/* Note: Values 37-40 are reserved for future use */
+	/* Note: Values 37-38 are reserved for future use.
+	 * Value 39, 40, 42 are not supported on XL710 and X710.
+	 */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP	= 39,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP	= 40,
 	I40E_FILTER_PCTYPE_NONF_IPV6_UDP		= 41,
+	I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK	= 42,
 	I40E_FILTER_PCTYPE_NONF_IPV6_TCP		= 43,
 	I40E_FILTER_PCTYPE_NONF_IPV6_SCTP		= 44,
 	I40E_FILTER_PCTYPE_NONF_IPV6_OTHER		= 45,
@@ -990,8 +1024,8 @@
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT	23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK	(0x1FFUL << \
-					 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+				       BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT	4
 #define I40E_TXD_FLTR_QW1_CMD_MASK	(0xFFFFULL << \
@@ -1009,14 +1043,17 @@
 #define I40E_TXD_FLTR_QW1_DEST_MASK	(0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT	(0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	(0x1ULL << \
-					 I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT	(0x9ULL + \
 						 I40E_TXD_FLTR_QW1_CMD_SHIFT)
 #define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
 					  I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
 
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT	(0xEULL + \
+					 I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK	BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+
 #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK	(0x1FFUL << \
 					 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
@@ -1069,6 +1106,14 @@
 	u64 tx_errors;			/* tepc */
 };
 
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+	u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
 #ifdef I40E_FCOE
 /* Statistics collected per function for FCoE */
 struct i40e_fcoe_stats {
@@ -1134,6 +1179,8 @@
 	u64 fd_atr_match;
 	u64 fd_sb_match;
 	u64 fd_atr_tunnel_match;
+	u32 fd_atr_status;
+	u32 fd_sb_status;
 	/* EEE LPI */
 	u32 tx_lpi_status;
 	u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 2d20af2..0f8d415 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -110,7 +110,9 @@
  * error regardless of version mismatch.
  */
 #define I40E_VIRTCHNL_VERSION_MAJOR		1
-#define I40E_VIRTCHNL_VERSION_MINOR		0
+#define I40E_VIRTCHNL_VERSION_MINOR		1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS	0
+
 struct i40e_virtchnl_version_info {
 	u32 major;
 	u32 minor;
@@ -129,7 +131,8 @@
  */
 
 /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
  * PF responds with an indirect message containing
  * i40e_virtchnl_vf_resource and one or more
  * i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,13 @@
 	u8 default_mac_addr[ETH_ALEN];
 };
 /* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2	0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE	0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN	0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2		0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP		0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE		0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ		0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG	0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING	0x00020000
 
 struct i40e_virtchnl_vf_resource {
 	u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 23f95cd..d99c116 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -160,13 +160,8 @@
  **/
 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
 {
-	struct i40e_hw *hw = &pf->hw;
-	u32 reg;
-
-	reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
-	reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
-	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
-	i40e_flush(hw);
+	i40e_vc_notify_vf_reset(vf);
+	i40e_reset_vf(vf, false);
 }
 
 /**
@@ -282,16 +277,14 @@
 	}
 	tempmap = vecmap->rxq_map;
 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
-		linklistmap |= (1 <<
-				(I40E_VIRTCHNL_SUPPORTED_QTYPES *
-				 vsi_queue_id));
+		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+				    vsi_queue_id));
 	}
 
 	tempmap = vecmap->txq_map;
 	for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
-		linklistmap |= (1 <<
-				(I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
-				 + 1));
+		linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+				     vsi_queue_id + 1));
 	}
 
 	next_q = find_first_bit(&linklistmap,
@@ -337,11 +330,23 @@
 		reg = (vector_id) |
 		    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
 		    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
-		    (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+		    BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
 		    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
 		wr32(hw, reg_idx, reg);
 	}
 
+	/* if the vf is running in polling mode and using interrupt zero,
+	 * need to disable auto-mask on enabling zero interrupt for VFs.
+	 */
+	if ((vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
+	    (vector_id == 0)) {
+		reg = rd32(hw, I40E_GLINT_CTL);
+		if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
+			reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
+			wr32(hw, I40E_GLINT_CTL, reg);
+		}
+	}
+
 irq_list_done:
 	i40e_flush(hw);
 }
@@ -542,11 +547,13 @@
 		if (vf->port_vlan_id)
 			i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 		f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
-				    vf->port_vlan_id, true, false);
+				    vf->port_vlan_id ? vf->port_vlan_id : -1,
+				    true, false);
 		if (!f)
 			dev_info(&pf->pdev->dev,
 				 "Could not allocate VF MAC addr\n");
-		f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+		f = i40e_add_filter(vsi, brdcast,
+				    vf->port_vlan_id ? vf->port_vlan_id : -1,
 				    true, false);
 		if (!f)
 			dev_info(&pf->pdev->dev,
@@ -835,6 +842,7 @@
 	i40e_alloc_vf_res(vf);
 	i40e_enable_vf_mappings(vf);
 	set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+	clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
 
 	/* tell the VF the reset is done */
 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -899,7 +907,7 @@
 		for (vf_id = 0; vf_id < tmp; vf_id++) {
 			reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
 			bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
-			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 		}
 	}
 	clear_bit(__I40E_VF_DISABLE, &pf->state);
@@ -925,8 +933,6 @@
 	if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
 		ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
 		if (ret) {
-			dev_err(&pf->pdev->dev,
-				"Failed to enable SR-IOV, error %d.\n", ret);
 			pf->num_alloc_vfs = 0;
 			goto err_iov;
 		}
@@ -1123,12 +1129,16 @@
  *
  * called from the VF to request the API version used by the PF
  **/
-static int i40e_vc_get_version_msg(struct i40e_vf *vf)
+static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
 {
 	struct i40e_virtchnl_version_info info = {
 		I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
 	};
 
+	vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
+	/* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+	if (VF_IS_V10(vf))
+		info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
 	return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
 				      I40E_SUCCESS, (u8 *)&info,
 				      sizeof(struct
@@ -1143,7 +1153,7 @@
  *
  * called from the VF to request its resources
  **/
-static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
 {
 	struct i40e_virtchnl_vf_resource *vfres = NULL;
 	struct i40e_pf *pf = vf->pf;
@@ -1167,12 +1177,24 @@
 		len = 0;
 		goto err;
 	}
+	if (VF_IS_V11(vf))
+		vf->driver_caps = *(u32 *)msg;
+	else
+		vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+				  I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+				  I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
 
 	vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
 	vsi = pf->vsi[vf->lan_vsi_idx];
 	if (!vsi->info.pvid)
 		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
-
+	if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
+		if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+			vfres->vf_offload_flags |=
+				I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+	} else {
+		vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+	}
 	vfres->num_vsis = num_vsis;
 	vfres->num_queue_pairs = vf->num_queue_pairs;
 	vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
@@ -1773,9 +1795,14 @@
 		valid_len = sizeof(struct i40e_virtchnl_version_info);
 		break;
 	case I40E_VIRTCHNL_OP_RESET_VF:
-	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
 		valid_len = 0;
 		break;
+	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+		if (VF_IS_V11(vf))
+			valid_len = sizeof(u32);
+		else
+			valid_len = 0;
+		break;
 	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
 		valid_len = sizeof(struct i40e_virtchnl_txq_info);
 		break;
@@ -1888,10 +1915,10 @@
 
 	switch (v_opcode) {
 	case I40E_VIRTCHNL_OP_VERSION:
-		ret = i40e_vc_get_version_msg(vf);
+		ret = i40e_vc_get_version_msg(vf, msg);
 		break;
 	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
-		ret = i40e_vc_get_vf_resources_msg(vf);
+		ret = i40e_vc_get_vf_resources_msg(vf, msg);
 		break;
 	case I40E_VIRTCHNL_OP_RESET_VF:
 		i40e_vc_reset_vf_msg(vf);
@@ -1969,9 +1996,9 @@
 		/* read GLGEN_VFLRSTAT register to find out the flr VFs */
 		vf = &pf->vf[vf_id];
 		reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
-		if (reg & (1 << bit_idx)) {
+		if (reg & BIT(bit_idx)) {
 			/* clear the bit in GLGEN_VFLRSTAT */
-			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+			wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 
 			if (!test_bit(__I40E_DOWN, &pf->state))
 				i40e_reset_vf(vf, true);
@@ -2023,7 +2050,8 @@
 	}
 
 	/* delete the temporary mac address */
-	i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+	i40e_del_filter(vsi, vf->default_lan_addr.addr,
+			vf->port_vlan_id ? vf->port_vlan_id : -1,
 			true, false);
 
 	/* Delete all the filters for this VSI - we're going to kill it
@@ -2088,7 +2116,12 @@
 		goto error_pvid;
 	}
 
-	if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
+	if (le16_to_cpu(vsi->info.pvid) ==
+	    (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)))
+		/* duplicate request, so just return success */
+		goto error_pvid;
+
+	if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
 		dev_err(&pf->pdev->dev,
 			"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
 			vf_id);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 09043c1..736f6f0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -42,6 +42,9 @@
 #define I40E_VLAN_MASK			0xFFF
 #define I40E_PRIORITY_MASK		0x7000
 
+#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
+#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
+
 /* Various queue ctrls */
 enum i40e_queue_ctrl {
 	I40E_QUEUE_CTRL_UNKNOWN = 0,
@@ -75,6 +78,8 @@
 	u16 vf_id;
 	/* all VF vsis connect to the same parent */
 	enum i40e_switch_element_types parent_type;
+	struct i40e_virtchnl_version_info vf_ver;
+	u32 driver_caps; /* reported by VF driver */
 
 	/* VF Port Extender (PE) stag if used */
 	u16 stag;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index c1d25f8..f08450b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -60,17 +60,6 @@
 		hw->aq.arq.len  = I40E_VF_ARQLEN1;
 		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
 		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
-	} else {
-		hw->aq.asq.tail = I40E_PF_ATQT;
-		hw->aq.asq.head = I40E_PF_ATQH;
-		hw->aq.asq.len  = I40E_PF_ATQLEN;
-		hw->aq.asq.bal  = I40E_PF_ATQBAL;
-		hw->aq.asq.bah  = I40E_PF_ATQBAH;
-		hw->aq.arq.tail = I40E_PF_ARQT;
-		hw->aq.arq.head = I40E_PF_ARQH;
-		hw->aq.arq.len  = I40E_PF_ARQLEN;
-		hw->aq.arq.bal  = I40E_PF_ARQBAL;
-		hw->aq.arq.bah  = I40E_PF_ARQBAH;
 	}
 }
 
@@ -308,7 +297,7 @@
 
 	/* set starting point */
 	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
-				  I40E_PF_ATQLEN_ATQENABLE_MASK));
+				  I40E_VF_ATQLEN1_ATQENABLE_MASK));
 	wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
 	wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
 
@@ -337,7 +326,7 @@
 
 	/* set starting point */
 	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
-				  I40E_PF_ARQLEN_ARQENABLE_MASK));
+				  I40E_VF_ARQLEN1_ARQENABLE_MASK));
 	wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
 	wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
 
@@ -899,7 +888,7 @@
 	mutex_lock(&hw->aq.arq_mutex);
 
 	/* set next_to_use to head */
-	ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+	ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
 	if (ntu == ntc) {
 		/* nothing to do - shouldn't need to update ring's values */
 		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index e715bcc..c802209 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -34,8 +34,7 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR	0x0001
-#define I40E_FW_API_VERSION_MINOR	0x0002
-#define I40E_FW_API_VERSION_A0_MINOR  0x0000
+#define I40E_FW_API_VERSION_MINOR	0x0004
 
 struct i40e_aq_desc {
 	__le16 flags;
@@ -133,12 +132,7 @@
 	i40e_aqc_opc_list_func_capabilities	= 0x000A,
 	i40e_aqc_opc_list_dev_capabilities	= 0x000B,
 
-	i40e_aqc_opc_set_cppm_configuration	= 0x0103,
-	i40e_aqc_opc_set_arp_proxy_entry	= 0x0104,
-	i40e_aqc_opc_set_ns_proxy_entry		= 0x0105,
-
 	/* LAA */
-	i40e_aqc_opc_mng_laa		= 0x0106,   /* AQ obsolete */
 	i40e_aqc_opc_mac_address_read	= 0x0107,
 	i40e_aqc_opc_mac_address_write	= 0x0108,
 
@@ -260,7 +254,10 @@
 	/* Tunnel commands */
 	i40e_aqc_opc_add_udp_tunnel	= 0x0B00,
 	i40e_aqc_opc_del_udp_tunnel	= 0x0B01,
-	i40e_aqc_opc_tunnel_key_structure	= 0x0B10,
+	i40e_aqc_opc_set_rss_key	= 0x0B02,
+	i40e_aqc_opc_set_rss_lut	= 0x0B03,
+	i40e_aqc_opc_get_rss_key	= 0x0B04,
+	i40e_aqc_opc_get_rss_lut	= 0x0B05,
 
 	/* Async Events */
 	i40e_aqc_opc_event_lan_overflow		= 0x1001,
@@ -272,8 +269,6 @@
 	i40e_aqc_opc_oem_ocbb_initialize	= 0xFE03,
 
 	/* debug commands */
-	i40e_aqc_opc_debug_get_deviceid		= 0xFF00,
-	i40e_aqc_opc_debug_set_mode		= 0xFF01,
 	i40e_aqc_opc_debug_read_reg		= 0xFF03,
 	i40e_aqc_opc_debug_write_reg		= 0xFF04,
 	i40e_aqc_opc_debug_modify_reg		= 0xFF07,
@@ -507,7 +502,8 @@
 #define I40E_AQC_SAN_ADDR_VALID		0x20
 #define I40E_AQC_PORT_ADDR_VALID	0x40
 #define I40E_AQC_WOL_ADDR_VALID		0x80
-#define I40E_AQC_ADDR_VALID_MASK	0xf0
+#define I40E_AQC_MC_MAG_EN_VALID	0x100
+#define I40E_AQC_ADDR_VALID_MASK	0x1F0
 	u8	reserved[6];
 	__le32	addr_high;
 	__le32	addr_low;
@@ -530,7 +526,9 @@
 #define I40E_AQC_WRITE_TYPE_LAA_ONLY	0x0000
 #define I40E_AQC_WRITE_TYPE_LAA_WOL	0x4000
 #define I40E_AQC_WRITE_TYPE_PORT	0x8000
-#define I40E_AQC_WRITE_TYPE_MASK	0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG	0xC000
+#define I40E_AQC_WRITE_TYPE_MASK	0xC000
+
 	__le16	mac_sah;
 	__le32	mac_sal;
 	u8	reserved[8];
@@ -824,8 +822,12 @@
 					 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
 	/* queueing option section */
 	u8	queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA	0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA	0x08
 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA	0x10
 #define I40E_AQ_VSI_QUE_OPT_FCOE_ENA	0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF	0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI	0x40
 	u8	queueing_opt_reserved[3];
 	/* scheduler section */
 	u8	up_enable_bits;
@@ -1066,6 +1068,7 @@
 	__le16	seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK		0x3FF
 	__le16	vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK		0x0FFF
 #define I40E_AQC_SET_VSI_VLAN_VALID		0x8000
 	u8	reserved[8];
 };
@@ -2093,6 +2096,46 @@
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
 
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT	0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK	(0x3FF << \
+					I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+	__le16	vsi_id;
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+	u8 standard_rss_key[0x28];
+	u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct  i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT	0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK	(0x3FF << \
+					I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+	__le16	vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT	0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK	(0x1 << \
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI	0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF	1
+	__le16	flags;
+	u8	reserved[4];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
 /* tunnel key structure 0x0B10 */
 
 struct i40e_aqc_tunnel_key_structure_A0 {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 39fcb1d..d45d0ae 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -54,6 +54,15 @@
 		case I40E_DEV_ID_20G_KR2:
 			hw->mac.type = I40E_MAC_XL710;
 			break;
+		case I40E_DEV_ID_SFP_X722:
+		case I40E_DEV_ID_1G_BASE_T_X722:
+		case I40E_DEV_ID_10G_BASE_T_X722:
+			hw->mac.type = I40E_MAC_X722;
+			break;
+		case I40E_DEV_ID_X722_VF:
+		case I40E_DEV_ID_X722_VF_HV:
+			hw->mac.type = I40E_MAC_X722_VF;
+			break;
 		case I40E_DEV_ID_VF:
 		case I40E_DEV_ID_VF_HV:
 			hw->mac.type = I40E_MAC_VF;
@@ -72,6 +81,212 @@
 }
 
 /**
+ * i40evf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+	switch (aq_err) {
+	case I40E_AQ_RC_OK:
+		return "OK";
+	case I40E_AQ_RC_EPERM:
+		return "I40E_AQ_RC_EPERM";
+	case I40E_AQ_RC_ENOENT:
+		return "I40E_AQ_RC_ENOENT";
+	case I40E_AQ_RC_ESRCH:
+		return "I40E_AQ_RC_ESRCH";
+	case I40E_AQ_RC_EINTR:
+		return "I40E_AQ_RC_EINTR";
+	case I40E_AQ_RC_EIO:
+		return "I40E_AQ_RC_EIO";
+	case I40E_AQ_RC_ENXIO:
+		return "I40E_AQ_RC_ENXIO";
+	case I40E_AQ_RC_E2BIG:
+		return "I40E_AQ_RC_E2BIG";
+	case I40E_AQ_RC_EAGAIN:
+		return "I40E_AQ_RC_EAGAIN";
+	case I40E_AQ_RC_ENOMEM:
+		return "I40E_AQ_RC_ENOMEM";
+	case I40E_AQ_RC_EACCES:
+		return "I40E_AQ_RC_EACCES";
+	case I40E_AQ_RC_EFAULT:
+		return "I40E_AQ_RC_EFAULT";
+	case I40E_AQ_RC_EBUSY:
+		return "I40E_AQ_RC_EBUSY";
+	case I40E_AQ_RC_EEXIST:
+		return "I40E_AQ_RC_EEXIST";
+	case I40E_AQ_RC_EINVAL:
+		return "I40E_AQ_RC_EINVAL";
+	case I40E_AQ_RC_ENOTTY:
+		return "I40E_AQ_RC_ENOTTY";
+	case I40E_AQ_RC_ENOSPC:
+		return "I40E_AQ_RC_ENOSPC";
+	case I40E_AQ_RC_ENOSYS:
+		return "I40E_AQ_RC_ENOSYS";
+	case I40E_AQ_RC_ERANGE:
+		return "I40E_AQ_RC_ERANGE";
+	case I40E_AQ_RC_EFLUSHED:
+		return "I40E_AQ_RC_EFLUSHED";
+	case I40E_AQ_RC_BAD_ADDR:
+		return "I40E_AQ_RC_BAD_ADDR";
+	case I40E_AQ_RC_EMODE:
+		return "I40E_AQ_RC_EMODE";
+	case I40E_AQ_RC_EFBIG:
+		return "I40E_AQ_RC_EFBIG";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+	return hw->err_str;
+}
+
+/**
+ * i40evf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+	switch (stat_err) {
+	case 0:
+		return "OK";
+	case I40E_ERR_NVM:
+		return "I40E_ERR_NVM";
+	case I40E_ERR_NVM_CHECKSUM:
+		return "I40E_ERR_NVM_CHECKSUM";
+	case I40E_ERR_PHY:
+		return "I40E_ERR_PHY";
+	case I40E_ERR_CONFIG:
+		return "I40E_ERR_CONFIG";
+	case I40E_ERR_PARAM:
+		return "I40E_ERR_PARAM";
+	case I40E_ERR_MAC_TYPE:
+		return "I40E_ERR_MAC_TYPE";
+	case I40E_ERR_UNKNOWN_PHY:
+		return "I40E_ERR_UNKNOWN_PHY";
+	case I40E_ERR_LINK_SETUP:
+		return "I40E_ERR_LINK_SETUP";
+	case I40E_ERR_ADAPTER_STOPPED:
+		return "I40E_ERR_ADAPTER_STOPPED";
+	case I40E_ERR_INVALID_MAC_ADDR:
+		return "I40E_ERR_INVALID_MAC_ADDR";
+	case I40E_ERR_DEVICE_NOT_SUPPORTED:
+		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+	case I40E_ERR_MASTER_REQUESTS_PENDING:
+		return "I40E_ERR_MASTER_REQUESTS_PENDING";
+	case I40E_ERR_INVALID_LINK_SETTINGS:
+		return "I40E_ERR_INVALID_LINK_SETTINGS";
+	case I40E_ERR_AUTONEG_NOT_COMPLETE:
+		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+	case I40E_ERR_RESET_FAILED:
+		return "I40E_ERR_RESET_FAILED";
+	case I40E_ERR_SWFW_SYNC:
+		return "I40E_ERR_SWFW_SYNC";
+	case I40E_ERR_NO_AVAILABLE_VSI:
+		return "I40E_ERR_NO_AVAILABLE_VSI";
+	case I40E_ERR_NO_MEMORY:
+		return "I40E_ERR_NO_MEMORY";
+	case I40E_ERR_BAD_PTR:
+		return "I40E_ERR_BAD_PTR";
+	case I40E_ERR_RING_FULL:
+		return "I40E_ERR_RING_FULL";
+	case I40E_ERR_INVALID_PD_ID:
+		return "I40E_ERR_INVALID_PD_ID";
+	case I40E_ERR_INVALID_QP_ID:
+		return "I40E_ERR_INVALID_QP_ID";
+	case I40E_ERR_INVALID_CQ_ID:
+		return "I40E_ERR_INVALID_CQ_ID";
+	case I40E_ERR_INVALID_CEQ_ID:
+		return "I40E_ERR_INVALID_CEQ_ID";
+	case I40E_ERR_INVALID_AEQ_ID:
+		return "I40E_ERR_INVALID_AEQ_ID";
+	case I40E_ERR_INVALID_SIZE:
+		return "I40E_ERR_INVALID_SIZE";
+	case I40E_ERR_INVALID_ARP_INDEX:
+		return "I40E_ERR_INVALID_ARP_INDEX";
+	case I40E_ERR_INVALID_FPM_FUNC_ID:
+		return "I40E_ERR_INVALID_FPM_FUNC_ID";
+	case I40E_ERR_QP_INVALID_MSG_SIZE:
+		return "I40E_ERR_QP_INVALID_MSG_SIZE";
+	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+	case I40E_ERR_INVALID_FRAG_COUNT:
+		return "I40E_ERR_INVALID_FRAG_COUNT";
+	case I40E_ERR_QUEUE_EMPTY:
+		return "I40E_ERR_QUEUE_EMPTY";
+	case I40E_ERR_INVALID_ALIGNMENT:
+		return "I40E_ERR_INVALID_ALIGNMENT";
+	case I40E_ERR_FLUSHED_QUEUE:
+		return "I40E_ERR_FLUSHED_QUEUE";
+	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+	case I40E_ERR_INVALID_IMM_DATA_SIZE:
+		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+	case I40E_ERR_TIMEOUT:
+		return "I40E_ERR_TIMEOUT";
+	case I40E_ERR_OPCODE_MISMATCH:
+		return "I40E_ERR_OPCODE_MISMATCH";
+	case I40E_ERR_CQP_COMPL_ERROR:
+		return "I40E_ERR_CQP_COMPL_ERROR";
+	case I40E_ERR_INVALID_VF_ID:
+		return "I40E_ERR_INVALID_VF_ID";
+	case I40E_ERR_INVALID_HMCFN_ID:
+		return "I40E_ERR_INVALID_HMCFN_ID";
+	case I40E_ERR_BACKING_PAGE_ERROR:
+		return "I40E_ERR_BACKING_PAGE_ERROR";
+	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+	case I40E_ERR_INVALID_PBLE_INDEX:
+		return "I40E_ERR_INVALID_PBLE_INDEX";
+	case I40E_ERR_INVALID_SD_INDEX:
+		return "I40E_ERR_INVALID_SD_INDEX";
+	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+	case I40E_ERR_INVALID_SD_TYPE:
+		return "I40E_ERR_INVALID_SD_TYPE";
+	case I40E_ERR_MEMCPY_FAILED:
+		return "I40E_ERR_MEMCPY_FAILED";
+	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+	case I40E_ERR_SRQ_ENABLED:
+		return "I40E_ERR_SRQ_ENABLED";
+	case I40E_ERR_ADMIN_QUEUE_ERROR:
+		return "I40E_ERR_ADMIN_QUEUE_ERROR";
+	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+	case I40E_ERR_BUF_TOO_SHORT:
+		return "I40E_ERR_BUF_TOO_SHORT";
+	case I40E_ERR_ADMIN_QUEUE_FULL:
+		return "I40E_ERR_ADMIN_QUEUE_FULL";
+	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+	case I40E_ERR_BAD_IWARP_CQE:
+		return "I40E_ERR_BAD_IWARP_CQE";
+	case I40E_ERR_NVM_BLANK_MODE:
+		return "I40E_ERR_NVM_BLANK_MODE";
+	case I40E_ERR_NOT_IMPLEMENTED:
+		return "I40E_ERR_NOT_IMPLEMENTED";
+	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+	case I40E_ERR_DIAG_TEST_FAILED:
+		return "I40E_ERR_DIAG_TEST_FAILED";
+	case I40E_ERR_NOT_READY:
+		return "I40E_ERR_NOT_READY";
+	case I40E_NOT_SUPPORTED:
+		return "I40E_NOT_SUPPORTED";
+	case I40E_ERR_FIRMWARE_API_VERSION:
+		return "I40E_ERR_FIRMWARE_API_VERSION";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+	return hw->err_str;
+}
+
+/**
  * i40evf_debug_aq
  * @hw: debug mask related to admin queue
  * @mask: debug mask
@@ -146,7 +361,7 @@
 {
 	if (hw->aq.asq.len)
 		return !!(rd32(hw, hw->aq.asq.len) &
-			  I40E_PF_ATQLEN_ATQENABLE_MASK);
+			  I40E_VF_ATQLEN1_ATQENABLE_MASK);
 	else
 		return false;
 }
@@ -177,6 +392,169 @@
 	return status;
 }
 
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set true to set the table, false to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+					   u16 vsi_id, bool pf_lut,
+					   u8 *lut, u16 lut_size,
+					   bool set)
+{
+	i40e_status status;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_set_rss_lut *cmd_resp =
+		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+	if (set)
+		i40evf_fill_default_direct_cmd_desc(&desc,
+						    i40e_aqc_opc_set_rss_lut);
+	else
+		i40evf_fill_default_direct_cmd_desc(&desc,
+						    i40e_aqc_opc_get_rss_lut);
+
+	/* Indirect command */
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+	cmd_resp->vsi_id =
+			cpu_to_le16((u16)((vsi_id <<
+					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+	if (pf_lut)
+		cmd_resp->flags |= cpu_to_le16((u16)
+					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+	else
+		cmd_resp->flags |= cpu_to_le16((u16)
+					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)lut));
+	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)lut));
+
+	status = i40evf_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+	return status;
+}
+
+/**
+ * i40evf_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+				  bool pf_lut, u8 *lut, u16 lut_size)
+{
+	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+				       false);
+}
+
+/**
+ * i40evf_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set true, for VSI table set false
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+				  bool pf_lut, u8 *lut, u16 lut_size)
+{
+	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set true to set the key, false to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+				      u16 vsi_id,
+				      struct i40e_aqc_get_set_rss_key_data *key,
+				      bool set)
+{
+	i40e_status status;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_set_rss_key *cmd_resp =
+			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+	if (set)
+		i40evf_fill_default_direct_cmd_desc(&desc,
+						    i40e_aqc_opc_set_rss_key);
+	else
+		i40evf_fill_default_direct_cmd_desc(&desc,
+						    i40e_aqc_opc_get_rss_key);
+
+	/* Indirect command */
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD);
+
+	cmd_resp->vsi_id =
+			cpu_to_le16((u16)((vsi_id <<
+					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+	cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+	cmd_resp->addr_high = cpu_to_le32(high_16_bits((u64)key));
+	cmd_resp->addr_low = cpu_to_le32(lower_32_bits((u64)key));
+
+	status = i40evf_asq_send_command(hw, &desc, key, key_size, NULL);
+
+	return status;
+}
+
+/**
+ * i40evf_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+				  u16 vsi_id,
+				  struct i40e_aqc_get_set_rss_key_data *key)
+{
+	return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
+}
+
+/**
+ * i40evf_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+				  u16 vsi_id,
+				  struct i40e_aqc_get_set_rss_key_data *key)
+{
+	return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
+}
+
 
 /* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
  * hardware to a bit-field that can be used by SW to more easily determine the
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index 931c880..00ed24b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -62,6 +62,7 @@
 struct i40e_hmc_pd_entry {
 	struct i40e_hmc_bp bp;
 	u32 sd_index;
+	bool rsrc_pg;
 	bool valid;
 };
 
@@ -126,8 +127,8 @@
 		 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
 		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
 		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |			\
-		(1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
-	val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+		BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
 	wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);			\
 	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
 	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
@@ -146,7 +147,7 @@
 		I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
 		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
 		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);			\
-	val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
 	wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);				\
 	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
 	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
@@ -218,7 +219,8 @@
 
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
 					      struct i40e_hmc_info *hmc_info,
-					      u32 pd_index);
+					      u32 pd_index,
+					      struct i40e_dma_mem *rsrc_pg);
 i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
 					struct i40e_hmc_info *hmc_info,
 					u32 idx);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 58e37a4..55ae4b0 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -60,6 +60,19 @@
 void i40evf_resume_aq(struct i40e_hw *hw);
 bool i40evf_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
+
+i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+				  bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+				  bool pf_lut, u8 *lut, u16 lut_size);
+i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw,
+				  u16 seid,
+				  struct i40e_aqc_get_set_rss_key_data *key);
+i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw,
+				  u16 seid,
+				  struct i40e_aqc_get_set_rss_key_data *key);
 
 i40e_status i40e_set_mac_type(struct i40e_hw *hw);
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index 3cc7376..10febcf 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -27,1580 +27,6 @@
 #ifndef _I40E_REGISTER_H_
 #define _I40E_REGISTER_H_
 
-#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
-#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
-#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
-#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
-#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
-#define I40E_GL_ARQH_ARQH_SHIFT 0
-#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
-#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
-#define I40E_GL_ARQT_ARQT_SHIFT 0
-#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
-#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
-#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
-#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
-#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
-#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
-#define I40E_GL_ATQH_ATQH_SHIFT 0
-#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
-#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
-#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
-#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
-#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
-#define I40E_GL_ATQT_ATQT_SHIFT 0
-#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
-#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
-#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
-#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
-#define I40E_PF_ARQH_ARQH_SHIFT 0
-#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
-#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
-#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
-#define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
-#define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
-#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
-#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
-#define I40E_PF_ARQT_ARQT_SHIFT 0
-#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
-#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
-#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
-#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
-#define I40E_PF_ATQH_ATQH_SHIFT 0
-#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
-#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
-#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
-#define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
-#define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
-#define I40E_PF_ATQT_ATQT_SHIFT 0
-#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
-#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQBAH_MAX_INDEX 127
-#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQBAL_MAX_INDEX 127
-#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQH_MAX_INDEX 127
-#define I40E_VF_ARQH_ARQH_SHIFT 0
-#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
-#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQLEN_MAX_INDEX 127
-#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
-#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
-#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
-#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
-#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQT_MAX_INDEX 127
-#define I40E_VF_ARQT_ARQT_SHIFT 0
-#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
-#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQBAH_MAX_INDEX 127
-#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQBAL_MAX_INDEX 127
-#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQH_MAX_INDEX 127
-#define I40E_VF_ATQH_ATQH_SHIFT 0
-#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
-#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQLEN_MAX_INDEX 127
-#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
-#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
-#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQT_MAX_INDEX 127
-#define I40E_VF_ATQT_ATQT_SHIFT 0
-#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
-#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
-#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
-#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
-#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
-#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
-#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
-#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
-#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
-#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
-#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
-#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
-#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
-#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
-#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
-#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
-#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
-#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
-#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
-#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
-#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
-#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
-#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
-#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
-#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
-#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
-#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
-#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
-#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
-#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
-#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
-#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
-#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
-#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
-#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
-#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
-#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
-#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
-#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
-#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
-#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
-#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
-#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
-#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
-#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
-#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
-#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
-#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
-#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
-#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
-#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
-#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
-#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
-#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
-#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
-#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
-#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
-#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
-#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
-#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
-#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
-#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
-#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
-#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
-#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
-#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
-#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
-#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
-#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
-#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
-#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
-#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
-#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
-#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
-#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
-#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
-#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
-#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
-#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
-#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
-#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
-#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
-#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
-#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
-#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
-#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
-#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
-#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
-#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
-#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
-#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
-#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
-#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
-#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
-#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
-#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
-#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
-#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
-#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
-#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
-#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
-#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
-#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
-#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
-#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
-#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
-#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
-#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
-#define I40E_GL_FWSTS_FWS0B_SHIFT 0
-#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
-#define I40E_GL_FWSTS_FWRI_SHIFT 9
-#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
-#define I40E_GL_FWSTS_FWS1B_SHIFT 16
-#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
-#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
-#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
-#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
-#define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
-#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
-#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
-#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
-#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
-#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
-#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
-#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
-#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
-#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
-#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
-#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
-#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
-#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
-#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
-#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
-#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
-#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
-#define I40E_GLGEN_I2CCMD_R_SHIFT 29
-#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
-#define I40E_GLGEN_I2CCMD_E_SHIFT 31
-#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
-#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
-#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
-#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
-#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MSCA_MAX_INDEX 3
-#define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
-#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
-#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
-#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
-#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
-#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
-#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
-#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
-#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
-#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
-#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
-#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
-#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MSRWD_MAX_INDEX 3
-#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
-#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
-#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
-#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
-#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
-#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
-#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
-#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
-#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
-#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
-#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
-#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
-#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
-#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
-#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
-#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
-#define I40E_GLGEN_RTRIG_CORER_SHIFT 0
-#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
-#define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
-#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
-#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
-#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
-#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
-#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
-#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
-#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
-#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
-#define I40E_GLGEN_STAT_VTEN_SHIFT 3
-#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
-#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
-#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
-#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
-#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
-#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
-#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
-#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
-#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
-#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
-#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
-#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
-#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
-#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
-#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
-#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
-#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
-#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
-#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
-#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
-#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
-#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
-#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
-#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
-#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
-#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
-#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
-#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
-#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
-#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
-#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
-#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
-#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
-#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
-#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
-#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
-#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
-#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
-#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
-#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
-#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
-#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
-#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
-#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
-#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
-#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
-#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
-#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
-#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
-#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
-#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
-#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
-#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
-#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
-#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
-#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
-#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
-#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
-#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
-#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
-#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
-#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
-#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
-#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
-#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
-#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
-#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
-#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
-#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
-#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
-#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
-#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
-#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
-#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
-#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
-#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
-#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
-#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
-#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
-#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
-#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
-#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
-#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_SDPART_MAX_INDEX 15
-#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
-#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
-#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
-#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
-#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
-#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
-#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
-#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
-#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
-#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
-#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
-#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
-#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
-#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
-#define I40E_GL_GP_FUSE_MAX_INDEX 28
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
-#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
-#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
-#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
-#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
-#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
-#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
-#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
-#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
-#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
-#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_PFINT_CEQCTL_MAX_INDEX 511
-#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
-#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
-#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
-#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
-#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
-#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
-#define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
-#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
-#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
-#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
-#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
-#define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
-#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
-#define I40E_PFINT_ICR0_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
-#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
-#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
-#define I40E_PFINT_ICR0_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
-#define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
-#define I40E_PFINT_ICR0_SWINT_SHIFT 31
-#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
-#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
-#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
-#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
-#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
-#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
-#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
-#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
-#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
-#define I40E_PFINT_ITR0_MAX_INDEX 2
-#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_ITRN_MAX_INDEX 2
-#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
-#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
-#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
-#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_RATEN_MAX_INDEX 511
-#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
-#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QINT_RQCTL_MAX_INDEX 1535
-#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
-#define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
-#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
-#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
-#define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
-#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QINT_TQCTL_MAX_INDEX 1535
-#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
-#define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
-#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
-#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
-#define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
-#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
-#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
-#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_ICR0_MAX_INDEX 127
-#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_SWINT_SHIFT 31
-#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
-#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
-#define I40E_VFINT_ITR0_MAX_INDEX 2
-#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
-#define I40E_VFINT_ITRN_MAX_INDEX 2
-#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPINT_AEQCTL_MAX_INDEX 127
-#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_VPINT_CEQCTL_MAX_INDEX 511
-#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
-#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPINT_LNKLST0_MAX_INDEX 127
-#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
-#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
-#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPINT_RATE0_MAX_INDEX 127
-#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
-#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VPINT_RATEN_MAX_INDEX 511
-#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
-#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
-#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
-#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
-#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
-#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
-#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
-#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
-#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
-#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
-#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
-#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
-#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
-#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
-#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
-#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
-#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
-#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
-#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
-#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
-#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
-#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QRX_ENA_MAX_INDEX 1535
-#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
-#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
-#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
-#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QRX_TAIL_MAX_INDEX 1535
-#define I40E_QRX_TAIL_TAIL_SHIFT 0
-#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
-#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QTX_CTL_MAX_INDEX 1535
-#define I40E_QTX_CTL_PFVF_Q_SHIFT 0
-#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
-#define I40E_QTX_CTL_PF_INDX_SHIFT 2
-#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
-#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
-#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
-#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QTX_ENA_MAX_INDEX 1535
-#define I40E_QTX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
-#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
-#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
-#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QTX_HEAD_MAX_INDEX 1535
-#define I40E_QTX_HEAD_HEAD_SHIFT 0
-#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
-#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
-#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
-#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QTX_TAIL_MAX_INDEX 1535
-#define I40E_QTX_TAIL_TAIL_SHIFT 0
-#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
-#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_MAPENA_MAX_INDEX 127
-#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
-#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
-#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_QTABLE_MAX_INDEX 15
-#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
-#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
-#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
-#define I40E_VSILAN_QBASE_MAX_INDEX 383
-#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
-#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
-#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
-#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
-#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
-#define I40E_VSILAN_QTABLE_MAX_INDEX 7
-#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
-#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
-#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
-#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
-#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
-#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
-#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
-#define I40E_PRTGL_SAH_MFS_SHIFT 16
-#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
-#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
-#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
-#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
-#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
-#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
-#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
-#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
-#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
-#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
-#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
-#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
-#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
-#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
-#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
-#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
-#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
-#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
-#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
-#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
-#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
-#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
-#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
-#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
-#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
-#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_METF_MAX_INDEX 3
-#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
-#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
-#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
-#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
-#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
-#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
-#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
-#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
-#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
-#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
-#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
-#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
-#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
-#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
-#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
-#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
-#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
-#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
-#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
-#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
-#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
-#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
-#define I40E_MSIX_PBA_MAX_INDEX 5
-#define I40E_MSIX_PBA_PENBIT_SHIFT 0
-#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
-#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TADD_MAX_INDEX 128
-#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
-#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TMSG_MAX_INDEX 128
-#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TUADD_MAX_INDEX 128
-#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TVCTRL_MAX_INDEX 128
-#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
 #define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
 #define I40E_VFMSIX_PBA1_MAX_INDEX 19
 #define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
@@ -1623,1525 +49,6 @@
 #define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
 #define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
 #define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
-#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
-#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
-#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
-#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
-#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
-#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
-#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
-#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
-#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
-#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
-#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
-#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
-#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
-#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
-#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
-#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
-#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
-#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
-#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
-#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
-#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
-#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
-#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
-#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
-#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
-#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
-#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
-#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
-#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
-#define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
-#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
-#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
-#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
-#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
-#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
-#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
-#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
-#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
-#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
-#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
-#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
-#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
-#define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
-#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
-#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
-#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
-#define I40E_GLNVM_SRCTL_START_SHIFT 30
-#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
-#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
-#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
-#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
-#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
-#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
-#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
-#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
-#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
-#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
-#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
-#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
-#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
-#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
-#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
-#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
-#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
-#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
-#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
-#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
-#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
-#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
-#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
-#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
-#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
-#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
-#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
-#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
-#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
-#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
-#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
-#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
-#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
-#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
-#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
-#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
-#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
-#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
-#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
-#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
-#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
-#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
-#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
-#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
-#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
-#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
-#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
-#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
-#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
-#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
-#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
-#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
-#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
-#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
-#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
-#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
-#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
-#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
-#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
-#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
-#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
-#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
-#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
-#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
-#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
-#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
-#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
-#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
-#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
-#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
-#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
-#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
-#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
-#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
-#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
-#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
-#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
-#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
-#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
-#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
-#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
-#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
-#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
-#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
-#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
-#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
-#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
-#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
-#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
-#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
-#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
-#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
-#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
-#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
-#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
-#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
-#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
-#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
-#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
-#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
-#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
-#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
-#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
-#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
-#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
-#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
-#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
-#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
-#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
-#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
-#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
-#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
-#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
-#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
-#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
-#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
-#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
-#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
-#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
-#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
-#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
-#define I40E_PFPCI_PM_PME_EN_SHIFT 0
-#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
-#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
-#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
-#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
-#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
-#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
-#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
-#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
-#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
-#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
-#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
-#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
-#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
-#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
-#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
-#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
-#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
-#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
-#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
-#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
-#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
-#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
-#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
-#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
-#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
-#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
-#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
-#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
-#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
-#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
-#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
-#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
-#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
-#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
-#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
-#define I40E_PRTPM_GC_RATD_SHIFT 2
-#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
-#define I40E_PRTPM_GC_LCDMP_SHIFT 3
-#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
-#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
-#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
-#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
-#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
-#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
-#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
-#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
-#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
-#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
-#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
-#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
-#define I40E_GLRPB_GHW_GHW_SHIFT 0
-#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
-#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
-#define I40E_GLRPB_GLW_GLW_SHIFT 0
-#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
-#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
-#define I40E_GLRPB_PHW_PHW_SHIFT 0
-#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
-#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
-#define I40E_GLRPB_PLW_PLW_SHIFT 0
-#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
-#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DHW_MAX_INDEX 7
-#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
-#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
-#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DLW_MAX_INDEX 7
-#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
-#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
-#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DPS_MAX_INDEX 7
-#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
-#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
-#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_SHT_MAX_INDEX 7
-#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
-#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
-#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
-#define I40E_PRTRPB_SHW_SHW_SHIFT 0
-#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
-#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_SLT_MAX_INDEX 7
-#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
-#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
-#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
-#define I40E_PRTRPB_SLW_SLW_SHIFT 0
-#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
-#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
-#define I40E_PRTRPB_SPS_SPS_SHIFT 0
-#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
-#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
-#define I40E_GLQF_CTL_HTOEP_SHIFT 1
-#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
-#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
-#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
-#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
-#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
-#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
-#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
-#define I40E_GLQF_CTL_RSVD_SHIFT 7
-#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
-#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
-#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
-#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
-#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
-#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
-#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
-#define I40E_GLQF_CTL_FDBEST_SHIFT 17
-#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
-#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
-#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
-#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
-#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
-#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
-#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
-#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
-#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
-#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
-#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
-#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
-#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
-#define I40E_GLQF_HKEY_MAX_INDEX 12
-#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
-#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
-#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
-#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
-#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
-#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
-#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
-#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
-#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_HSYM_MAX_INDEX 63
-#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
-#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
-#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_GLQF_PCNT_MAX_INDEX 511
-#define I40E_GLQF_PCNT_PCNT_SHIFT 0
-#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
-#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_SWAP_MAX_INDEX 1
-#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
-#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
-#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
-#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
-#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
-#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
-#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
-#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
-#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
-#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
-#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
-#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
-#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
-#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
-#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
-#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
-#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
-#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
-#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
-#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
-#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
-#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
-#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
-#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
-#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
-#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
-#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
-#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
-#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
-#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
-#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
-#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
-#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
-#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
-#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
-#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
-#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_PFQF_HENA_MAX_INDEX 1
-#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
-#define I40E_PFQF_HKEY_MAX_INDEX 12
-#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
-#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
-#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
-#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
-#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_PFQF_HLUT_MAX_INDEX 127
-#define I40E_PFQF_HLUT_LUT0_SHIFT 0
-#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
-#define I40E_PFQF_HLUT_LUT1_SHIFT 8
-#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
-#define I40E_PFQF_HLUT_LUT2_SHIFT 16
-#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
-#define I40E_PFQF_HLUT_LUT3_SHIFT 24
-#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
-#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
-#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
-#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
-#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
-#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
-#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
-#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
-#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
-#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
-#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
-#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
-#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
-#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
-#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
-#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
-#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
-#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
-#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HENA1_MAX_INDEX 1
-#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HKEY1_MAX_INDEX 12
-#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HLUT1_MAX_INDEX 15
-#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
-#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
-#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
-#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
-#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HREGION1_MAX_INDEX 7
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
-#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPQF_CTL_MAX_INDEX 127
-#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
-#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
-#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
-#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
-#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
-#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
-#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
-#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
-#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
-#define I40E_VSIQF_CTL_MAX_INDEX 383
-#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
-#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
-#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
-#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
-#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
-#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
-#define I40E_VSIQF_TCREGION_MAX_INDEX 3
-#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
-#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
-#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
-#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
-#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOECRC_MAX_INDEX 143
-#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
-#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
-#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDDPC_MAX_INDEX 143
-#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
-#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
-#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
-#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
-#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
-#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
-#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
-#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
-#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOELAST_MAX_INDEX 143
-#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
-#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
-#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEPRC_MAX_INDEX 143
-#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
-#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
-#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEPTC_MAX_INDEX 143
-#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
-#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
-#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOERPDC_MAX_INDEX 143
-#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
-#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
-#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR1_L_MAX_INDEX 143
-#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
-#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
-#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR2_L_MAX_INDEX 143
-#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
-#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
-#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPRCH_MAX_INDEX 3
-#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
-#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPRCL_MAX_INDEX 3
-#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
-#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPTCH_MAX_INDEX 3
-#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
-#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPTCL_MAX_INDEX 3
-#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
-#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
-#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
-#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
-#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GORCH_MAX_INDEX 3
-#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
-#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
-#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GORCL_MAX_INDEX 3
-#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
-#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
-#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GOTCH_MAX_INDEX 3
-#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
-#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GOTCL_MAX_INDEX 3
-#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
-#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
-#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
-#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
-#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LDPC_MAX_INDEX 3
-#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
-#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
-#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
-#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
-#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
-#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
-#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
-#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MLFC_MAX_INDEX 3
-#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
-#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
-#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPRCH_MAX_INDEX 3
-#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
-#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPRCL_MAX_INDEX 3
-#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
-#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPTCH_MAX_INDEX 3
-#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
-#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPTCL_MAX_INDEX 3
-#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
-#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MRFC_MAX_INDEX 3
-#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
-#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
-#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
-#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
-#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
-#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
-#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
-#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
-#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC127H_MAX_INDEX 3
-#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
-#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
-#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC127L_MAX_INDEX 3
-#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
-#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
-#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
-#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
-#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC255H_MAX_INDEX 3
-#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
-#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
-#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC255L_MAX_INDEX 3
-#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
-#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
-#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC511H_MAX_INDEX 3
-#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
-#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
-#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC511L_MAX_INDEX 3
-#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
-#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
-#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC64H_MAX_INDEX 3
-#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
-#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
-#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC64L_MAX_INDEX 3
-#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
-#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
-#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
-#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
-#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
-#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
-#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
-#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
-#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
-#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
-#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC127H_MAX_INDEX 3
-#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
-#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
-#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC127L_MAX_INDEX 3
-#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
-#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
-#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
-#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
-#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
-#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
-#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
-#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
-#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC255H_MAX_INDEX 3
-#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
-#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
-#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC255L_MAX_INDEX 3
-#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
-#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
-#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC511H_MAX_INDEX 3
-#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
-#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
-#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC511L_MAX_INDEX 3
-#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
-#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
-#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC64H_MAX_INDEX 3
-#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
-#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
-#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC64L_MAX_INDEX 3
-#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
-#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
-#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
-#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
-#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
-#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
-#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
-#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
-#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
-#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
-#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
-#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RDPC_MAX_INDEX 3
-#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
-#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
-#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RFC_MAX_INDEX 3
-#define I40E_GLPRT_RFC_RFC_SHIFT 0
-#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
-#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RJC_MAX_INDEX 3
-#define I40E_GLPRT_RJC_RJC_SHIFT 0
-#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
-#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RLEC_MAX_INDEX 3
-#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
-#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
-#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_ROC_MAX_INDEX 3
-#define I40E_GLPRT_ROC_ROC_SHIFT 0
-#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
-#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RUC_MAX_INDEX 3
-#define I40E_GLPRT_RUC_RUC_SHIFT 0
-#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
-#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RUPP_MAX_INDEX 3
-#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
-#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
-#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
-#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_TDOLD_MAX_INDEX 3
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPRCH_MAX_INDEX 3
-#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
-#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPRCL_MAX_INDEX 3
-#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
-#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPTCH_MAX_INDEX 3
-#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
-#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPTCL_MAX_INDEX 3
-#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
-#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPRCH_MAX_INDEX 15
-#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
-#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPRCL_MAX_INDEX 15
-#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
-#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPTCH_MAX_INDEX 15
-#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
-#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPTCL_MAX_INDEX 15
-#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
-#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GORCH_MAX_INDEX 15
-#define I40E_GLSW_GORCH_GORCH_SHIFT 0
-#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
-#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GORCL_MAX_INDEX 15
-#define I40E_GLSW_GORCL_GORCL_SHIFT 0
-#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
-#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GOTCH_MAX_INDEX 15
-#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
-#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GOTCL_MAX_INDEX 15
-#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
-#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPRCH_MAX_INDEX 15
-#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
-#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPRCL_MAX_INDEX 15
-#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
-#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPTCH_MAX_INDEX 15
-#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
-#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPTCL_MAX_INDEX 15
-#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
-#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_RUPP_MAX_INDEX 15
-#define I40E_GLSW_RUPP_RUPP_SHIFT 0
-#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
-#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_TDPC_MAX_INDEX 15
-#define I40E_GLSW_TDPC_TDPC_SHIFT 0
-#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
-#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPRCH_MAX_INDEX 15
-#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
-#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPRCL_MAX_INDEX 15
-#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
-#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPTCH_MAX_INDEX 15
-#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
-#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPTCL_MAX_INDEX 15
-#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
-#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPRCH_MAX_INDEX 383
-#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
-#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPRCL_MAX_INDEX 383
-#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
-#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPTCH_MAX_INDEX 383
-#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
-#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPTCL_MAX_INDEX 383
-#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
-#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GORCH_MAX_INDEX 383
-#define I40E_GLV_GORCH_GORCH_SHIFT 0
-#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
-#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GORCL_MAX_INDEX 383
-#define I40E_GLV_GORCL_GORCL_SHIFT 0
-#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
-#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GOTCH_MAX_INDEX 383
-#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
-#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GOTCL_MAX_INDEX 383
-#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
-#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPRCH_MAX_INDEX 383
-#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
-#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPRCL_MAX_INDEX 383
-#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
-#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPTCH_MAX_INDEX 383
-#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
-#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPTCL_MAX_INDEX 383
-#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
-#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_RDPC_MAX_INDEX 383
-#define I40E_GLV_RDPC_RDPC_SHIFT 0
-#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
-#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_RUPP_MAX_INDEX 383
-#define I40E_GLV_RUPP_RUPP_SHIFT 0
-#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_TEPC_MAX_INDEX 383
-#define I40E_GLV_TEPC_TEPC_SHIFT 0
-#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
-#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPRCH_MAX_INDEX 383
-#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
-#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPRCL_MAX_INDEX 383
-#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
-#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPTCH_MAX_INDEX 383
-#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
-#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
-#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPTCL_MAX_INDEX 383
-#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
-#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
-#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
-#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
-#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
-#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
-#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
-#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
-#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
-#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
-#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
-#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
-#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
-#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
-#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
-#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
-#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
-#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
-#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
-#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
-#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
-#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
-#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
-#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
-#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
-#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
-#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
-#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
-#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
-#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
-#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
-#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
-#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
-#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
-#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
-#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
-#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
-#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
-#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
-#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
-#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
-#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
-#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
-#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
-#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
-#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
-#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
-#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
-#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
-#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
-#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
-#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
-#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
-#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
-#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
-#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
-#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
-#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
-#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
-#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
-#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
-#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
-#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
-#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
-#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
-#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
-#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
-#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
-#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
-#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
-#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
-#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
-#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
-#define I40E_GL_MDET_RX_EVENT_SHIFT 8
-#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
-#define I40E_GL_MDET_RX_QUEUE_SHIFT 17
-#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
-#define I40E_GL_MDET_RX_VALID_SHIFT 31
-#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
-#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
-#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
-#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
-#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
-#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
-#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
-#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
-#define I40E_GL_MDET_TX_EVENT_SHIFT 25
-#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
-#define I40E_GL_MDET_TX_VALID_SHIFT 31
-#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
-#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
-#define I40E_PF_MDET_RX_VALID_SHIFT 0
-#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
-#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
-#define I40E_PF_MDET_TX_VALID_SHIFT 0
-#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
-#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
-#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
-#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
-#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
-#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
-#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
-#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VP_MDET_RX_MAX_INDEX 127
-#define I40E_VP_MDET_RX_VALID_SHIFT 0
-#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
-#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VP_MDET_TX_MAX_INDEX 127
-#define I40E_VP_MDET_TX_VALID_SHIFT 0
-#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
-#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
-#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
-#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
-#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
-#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
-#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
-#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
-#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
-#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
-#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
-#define I40E_PFPM_APM_APME_SHIFT 0
-#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
-#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
-#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
-#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
-#define I40E_PFPM_WUFC_LNKC_SHIFT 0
-#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
-#define I40E_PFPM_WUFC_MAG_SHIFT 1
-#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
-#define I40E_PFPM_WUFC_MNG_SHIFT 3
-#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
-#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
-#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
-#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
-#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
-#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
-#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
-#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
-#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
-#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX0_SHIFT 16
-#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
-#define I40E_PFPM_WUFC_FLX1_SHIFT 17
-#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
-#define I40E_PFPM_WUFC_FLX2_SHIFT 18
-#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
-#define I40E_PFPM_WUFC_FLX3_SHIFT 19
-#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
-#define I40E_PFPM_WUFC_FLX4_SHIFT 20
-#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
-#define I40E_PFPM_WUFC_FLX5_SHIFT 21
-#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
-#define I40E_PFPM_WUFC_FLX6_SHIFT 22
-#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
-#define I40E_PFPM_WUFC_FLX7_SHIFT 23
-#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
-#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
-#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
-#define I40E_PFPM_WUS_LNKC_SHIFT 0
-#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
-#define I40E_PFPM_WUS_MAG_SHIFT 1
-#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
-#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
-#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
-#define I40E_PFPM_WUS_MNG_SHIFT 3
-#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
-#define I40E_PFPM_WUS_FLX0_SHIFT 16
-#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
-#define I40E_PFPM_WUS_FLX1_SHIFT 17
-#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
-#define I40E_PFPM_WUS_FLX2_SHIFT 18
-#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
-#define I40E_PFPM_WUS_FLX3_SHIFT 19
-#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
-#define I40E_PFPM_WUS_FLX4_SHIFT 20
-#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
-#define I40E_PFPM_WUS_FLX5_SHIFT 21
-#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
-#define I40E_PFPM_WUS_FLX6_SHIFT 22
-#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
-#define I40E_PFPM_WUS_FLX7_SHIFT 23
-#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
-#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
-#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
-#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
-#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
-#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
-#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
-#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
-#define I40E_PRTPM_SAH_MAX_INDEX 3
-#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
-#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
-#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
-#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
-#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
-#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
-#define I40E_PRTPM_SAH_AV_SHIFT 31
-#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
-#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
-#define I40E_PRTPM_SAL_MAX_INDEX 3
-#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
-#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
 #define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
 #define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
 #define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
@@ -3366,4 +273,64 @@
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION_REGION_7_SHIFT 29
 #define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#endif
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 395f32f..7e91d82 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -366,15 +366,32 @@
  **/
 static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
 {
-	u32 val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
-		  I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
-		  I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
-		  I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
-		  /* allow 00 to be written to the index */
+	u16 flags = q_vector->tx.ring[0].flags;
 
-	wr32(&vsi->back->hw,
-	     I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
-	     val);
+	if (flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+		u32 val;
+
+		if (q_vector->arm_wb_state)
+			return;
+
+		val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK;
+
+		wr32(&vsi->back->hw,
+		     I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
+					  vsi->base_vector - 1),
+		     val);
+		q_vector->arm_wb_state = true;
+	} else {
+		u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+			  I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
+			  I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+			  I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK;
+			  /* allow 00 to be written to the index */
+
+		wr32(&vsi->back->hw,
+		     I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
+					  vsi->base_vector - 1), val);
+	}
 }
 
 /**
@@ -404,7 +421,7 @@
 	 *  20-1249MB/s bulk   (8000 ints/s)
 	 */
 	bytes_per_int = rc->total_bytes / rc->itr;
-	switch (rc->itr) {
+	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
 		if (bytes_per_int > 10)
 			new_latency_range = I40E_LOW_LATENCY;
@@ -417,9 +434,14 @@
 		break;
 	case I40E_BULK_LATENCY:
 		if (bytes_per_int <= 20)
-			rc->latency_range = I40E_LOW_LATENCY;
+			new_latency_range = I40E_LOW_LATENCY;
+		break;
+	default:
+		if (bytes_per_int <= 20)
+			new_latency_range = I40E_LOW_LATENCY;
 		break;
 	}
+	rc->latency_range = new_latency_range;
 
 	switch (new_latency_range) {
 	case I40E_LOWEST_LATENCY:
@@ -435,42 +457,14 @@
 		break;
 	}
 
-	if (new_itr != rc->itr) {
-		/* do an exponential smoothing */
-		new_itr = (10 * new_itr * rc->itr) /
-			  ((9 * new_itr) + rc->itr);
-		rc->itr = new_itr & I40E_MAX_ITR;
-	}
+	if (new_itr != rc->itr)
+		rc->itr = new_itr;
 
 	rc->total_bytes = 0;
 	rc->total_packets = 0;
 }
 
-/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
-	u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
-	struct i40e_hw *hw = &q_vector->vsi->back->hw;
-	u32 reg_addr;
-	u16 old_itr;
-
-	reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
-	old_itr = q_vector->rx.itr;
-	i40e_set_new_dynamic_itr(&q_vector->rx);
-	if (old_itr != q_vector->rx.itr)
-		wr32(hw, reg_addr, q_vector->rx.itr);
-
-	reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
-	old_itr = q_vector->tx.itr;
-	i40e_set_new_dynamic_itr(&q_vector->tx);
-	if (old_itr != q_vector->tx.itr)
-		wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
-/**
+/*
  * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
  * @tx_ring: the tx ring to set up
  *
@@ -873,7 +867,7 @@
 		return;
 
 	/* did the hardware decode the packet and checksum? */
-	if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+	if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
 		return;
 
 	/* both known and outer_ip must be set for the below code to work */
@@ -888,25 +882,25 @@
 		ipv6 = true;
 
 	if (ipv4 &&
-	    (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
-			 (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+	    (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+			 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
 		goto checksum_fail;
 
 	/* likely incorrect csum if alternate IP extension headers found */
 	if (ipv6 &&
-	    rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+	    rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
 		/* don't increment checksum err here, non-fatal err */
 		return;
 
 	/* there was some L4 error, count error and punt packet to the stack */
-	if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
 		goto checksum_fail;
 
 	/* handle packets that were not able to be checksummed due
 	 * to arrival speed, in this case the stack can compute
 	 * the csum.
 	 */
-	if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+	if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
 		return;
 
 	/* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1027,7 +1021,7 @@
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
 
-		if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
 		/* This memory barrier is needed to keep us from reading
@@ -1063,8 +1057,8 @@
 
 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
 			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
-		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
 			   I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1116,7 +1110,7 @@
 		I40E_RX_INCREMENT(rx_ring, i);
 
 		if (unlikely(
-		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
 			struct i40e_rx_buffer *next_buffer;
 
 			next_buffer = &rx_ring->rx_bi[i];
@@ -1126,7 +1120,7 @@
 		}
 
 		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
 			continue;
 		}
@@ -1141,7 +1135,7 @@
 
 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
 			 : 0;
 #ifdef I40E_FCOE
@@ -1202,7 +1196,7 @@
 		rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
 			I40E_RXD_QW1_STATUS_SHIFT;
 
-		if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+		if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
 			break;
 
 		/* This memory barrier is needed to keep us from reading
@@ -1220,7 +1214,7 @@
 
 		rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
 			   I40E_RXD_QW1_ERROR_SHIFT;
-		rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+		rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
 		rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
 			   I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1238,13 +1232,13 @@
 		I40E_RX_INCREMENT(rx_ring, i);
 
 		if (unlikely(
-		    !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+		    !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
 			rx_ring->rx_stats.non_eop_descs++;
 			continue;
 		}
 
 		/* ERR_MASK will only have valid bits if EOP set */
-		if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+		if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
 			dev_kfree_skb_any(skb);
 			/* TODO: shouldn't we increment a counter indicating the
 			 * drop?
@@ -1262,7 +1256,7 @@
 
 		i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-		vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+		vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
 			 ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
 			 : 0;
 		i40e_receive_skb(rx_ring, skb, vlan_tag);
@@ -1281,6 +1275,67 @@
 }
 
 /**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+					  struct i40e_q_vector *q_vector)
+{
+	struct i40e_hw *hw = &vsi->back->hw;
+	u16 old_itr;
+	int vector;
+	u32 val;
+
+	vector = (q_vector->v_idx + vsi->base_vector);
+	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+		old_itr = q_vector->rx.itr;
+		i40e_set_new_dynamic_itr(&q_vector->rx);
+		if (old_itr != q_vector->rx.itr) {
+			val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+			I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+			(I40E_RX_ITR <<
+				I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+			(q_vector->rx.itr <<
+				I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+		} else {
+			val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+			I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+			(I40E_ITR_NONE <<
+				I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
+		}
+		if (!test_bit(__I40E_DOWN, &vsi->state))
+			wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+	} else {
+		i40evf_irq_enable_queues(vsi->back, 1
+			<< q_vector->v_idx);
+	}
+	if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+		old_itr = q_vector->tx.itr;
+		i40e_set_new_dynamic_itr(&q_vector->tx);
+		if (old_itr != q_vector->tx.itr) {
+			val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+				I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+				(I40E_TX_ITR <<
+				   I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
+				(q_vector->tx.itr <<
+				   I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+
+		} else {
+			val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+				I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+				(I40E_ITR_NONE <<
+				   I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT);
+		}
+		if (!test_bit(__I40E_DOWN, &vsi->state))
+			wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+	} else {
+		i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
+	}
+}
+
+/**
  * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
  * @napi: napi struct with our devices info in it
  * @budget: amount of work driver is allowed to do this pass, in packets
@@ -1334,15 +1389,12 @@
 		return budget;
 	}
 
+	if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+		q_vector->arm_wb_state = false;
+
 	/* Work is done so exit the polling mode and re-enable the interrupt */
 	napi_complete(napi);
-	if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
-	    ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-		i40e_update_dynamic_itr(q_vector);
-
-	if (!test_bit(__I40E_DOWN, &vsi->state))
-		i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
-
+	i40e_update_enable_itr(vsi, q_vector);
 	return 0;
 }
 
@@ -1476,11 +1528,15 @@
 	struct iphdr *this_ip_hdr;
 	u32 network_hdr_len;
 	u8 l4_hdr = 0;
+	struct udphdr *oudph;
+	struct iphdr *oiph;
 	u32 l4_tunnel = 0;
 
 	if (skb->encapsulation) {
 		switch (ip_hdr(skb)->protocol) {
 		case IPPROTO_UDP:
+			oudph = udp_hdr(skb);
+			oiph = ip_hdr(skb);
 			l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
 			*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
 			break;
@@ -1519,6 +1575,15 @@
 		}
 
 
+		if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) &&
+		    (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING)        &&
+		    (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) {
+			oudph->check = ~csum_tcpudp_magic(oiph->saddr,
+					oiph->daddr,
+					(skb->len - skb_transport_offset(skb)),
+					IPPROTO_UDP, 0);
+			*cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
+		}
 	} else {
 		network_hdr_len = skb_network_header_len(skb);
 		this_ip_hdr = ip_hdr(skb);
@@ -1841,6 +1906,8 @@
 	    netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
 						   tx_ring->queue_index)))
 		writel(i, tx_ring->tail);
+	else
+		prefetchw(tx_desc + 1);
 
 	return;
 
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index e7a34f8..9a30f5d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -66,17 +66,29 @@
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
-	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
-	((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
-	((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
-	((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+	BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
+#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
+		BIT(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+		BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+		BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+		BIT(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+		BIT(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+		BIT(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+
+#define i40e_pf_get_default_rss_hena(pf) \
+	(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
+		I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
 
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
@@ -129,16 +141,16 @@
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define I40E_MIN_DESC_PENDING	4
 
-#define I40E_TX_FLAGS_CSUM		(u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN		(u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN		(u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO		(u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4		(u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6		(u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC		(u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO		(u32)(1 << 7)
-#define I40E_TX_FLAGS_FD_SB		(u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL	(u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM		BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN		BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN		BIT(2)
+#define I40E_TX_FLAGS_TSO		BIT(3)
+#define I40E_TX_FLAGS_IPV4		BIT(4)
+#define I40E_TX_FLAGS_IPV6		BIT(5)
+#define I40E_TX_FLAGS_FCCRC		BIT(6)
+#define I40E_TX_FLAGS_FSO		BIT(7)
+#define I40E_TX_FLAGS_FD_SB		BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL	BIT(10)
 #define I40E_TX_FLAGS_VLAN_MASK		0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK	0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT	29
@@ -250,6 +262,10 @@
 	bool ring_active;		/* is ring online or not */
 	bool arm_wb;		/* do something to arm write back */
 
+	u16 flags;
+#define I40E_TXR_FLAGS_WB_ON_ITR	BIT(0)
+#define I40E_TXR_FLAGS_OUTER_UDP_CSUM	BIT(1)
+
 	/* stats structs */
 	struct i40e_queue_stats	stats;
 	struct u64_stats_sync syncp;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index c463ec4..24a2693 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -47,6 +47,11 @@
 #define I40E_DEV_ID_20G_KR2		0x1587
 #define I40E_DEV_ID_VF			0x154C
 #define I40E_DEV_ID_VF_HV		0x1571
+#define I40E_DEV_ID_SFP_X722		0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
+#define I40E_DEV_ID_X722_VF		0x37CD
+#define I40E_DEV_ID_X722_VF_HV		0x37D9
 
 #define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
 					 (d) == I40E_DEV_ID_QSFP_B  || \
@@ -120,6 +125,8 @@
 	I40E_MAC_X710,
 	I40E_MAC_XL710,
 	I40E_MAC_VF,
+	I40E_MAC_X722,
+	I40E_MAC_X722_VF,
 	I40E_MAC_GENERIC,
 };
 
@@ -213,7 +220,17 @@
 	bool dcb;
 	bool fcoe;
 	bool iscsi; /* Indicates iSCSI enabled */
-	bool mfp_mode_1;
+	bool flex10_enable;
+	bool flex10_capable;
+	u32  flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN	0x0
+#define I40E_FLEX10_MODE_DCC		0x1
+#define I40E_FLEX10_MODE_DCI		0x2
+
+	u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR	0x1
+#define I40E_FLEX10_STATUS_VC_MODE	0x2
+
 	bool mgmt_cem;
 	bool ieee_1588;
 	bool iwarp;
@@ -417,6 +434,7 @@
 
 struct i40e_dcbx_config {
 	u32 numapps;
+	u32 tlv_status; /* CEE mode TLV status */
 	struct i40e_ieee_ets_config etscfg;
 	struct i40e_ieee_ets_recommend etsrec;
 	struct i40e_ieee_pfc_config pfc;
@@ -481,11 +499,13 @@
 
 	/* debug mask */
 	u32 debug_mask;
+	char err_str[16];
 };
 
 static inline bool i40e_is_vf(struct i40e_hw *hw)
 {
-	return hw->mac.type == I40E_MAC_VF;
+	return (hw->mac.type == I40E_MAC_VF ||
+		hw->mac.type == I40E_MAC_X722_VF);
 }
 
 struct i40e_driver_version {
@@ -582,19 +602,23 @@
 	I40E_RX_DESC_STATUS_CRCP_SHIFT		= 4,
 	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 2 BITS */
 	I40E_RX_DESC_STATUS_TSYNVALID_SHIFT	= 7,
-	I40E_RX_DESC_STATUS_PIF_SHIFT		= 8,
+	/* Note: Bit 8 is reserved in X710 and XL710 */
+	I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT	= 8,
 	I40E_RX_DESC_STATUS_UMBCAST_SHIFT	= 9, /* 2 BITS */
 	I40E_RX_DESC_STATUS_FLM_SHIFT		= 11,
 	I40E_RX_DESC_STATUS_FLTSTAT_SHIFT	= 12, /* 2 BITS */
 	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14,
 	I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT	= 15,
 	I40E_RX_DESC_STATUS_RESERVED_SHIFT	= 16, /* 2 BITS */
-	I40E_RX_DESC_STATUS_UDP_0_SHIFT		= 18,
+	/* Note: For non-tunnel packets INT_UDP_0 is the right status for
+	 * UDP header
+	 */
+	I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT	= 18,
 	I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
 };
 
 #define I40E_RXD_QW1_STATUS_SHIFT	0
-#define I40E_RXD_QW1_STATUS_MASK	(((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK	((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
 					 << I40E_RXD_QW1_STATUS_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -602,8 +626,8 @@
 					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK	(0x1UL << \
-					 I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+				    BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
 
 enum i40e_rx_desc_fltstat_values {
 	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,
@@ -737,8 +761,7 @@
 					 I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
 
 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT	63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK	(0x1ULL << \
-					 I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK	BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
 
 enum i40e_rx_desc_ext_status_bits {
 	/* Note: These are predefined bit offsets */
@@ -914,12 +937,12 @@
 #define I40E_TXD_CTX_QW0_NATT_SHIFT	9
 #define I40E_TXD_CTX_QW0_NATT_MASK	(0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
-#define I40E_TXD_CTX_UDP_TUNNELING	(0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING	BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
 #define I40E_TXD_CTX_GRE_TUNNELING	(0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT	11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK	(0x1ULL << \
-					 I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+				       BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
 
 #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST	I40E_TXD_CTX_QW0_EIP_NOINC_MASK
 
@@ -931,6 +954,8 @@
 #define I40E_TXD_CTX_QW0_DECTTL_MASK	(0xFULL << \
 					 I40E_TXD_CTX_QW0_DECTTL_SHIFT)
 
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT	23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK	BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
 struct i40e_filter_program_desc {
 	__le32 qindex_flex_ptype_vsi;
 	__le32 rsvd;
@@ -949,15 +974,24 @@
 
 /* Packet Classifier Types for filters */
 enum i40e_filter_pctype {
-	/* Note: Values 0-30 are reserved for future use */
+	/* Note: Values 0-28 are reserved for future use.
+	 * Value 29, 30, 32 are not supported on XL710 and X710.
+	 */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP	= 29,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP	= 30,
 	I40E_FILTER_PCTYPE_NONF_IPV4_UDP		= 31,
-	/* Note: Value 32 is reserved for future use */
+	I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK	= 32,
 	I40E_FILTER_PCTYPE_NONF_IPV4_TCP		= 33,
 	I40E_FILTER_PCTYPE_NONF_IPV4_SCTP		= 34,
 	I40E_FILTER_PCTYPE_NONF_IPV4_OTHER		= 35,
 	I40E_FILTER_PCTYPE_FRAG_IPV4			= 36,
-	/* Note: Values 37-40 are reserved for future use */
+	/* Note: Values 37-38 are reserved for future use.
+	 * Value 39, 40, 42 are not supported on XL710 and X710.
+	 */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP	= 39,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP	= 40,
 	I40E_FILTER_PCTYPE_NONF_IPV6_UDP		= 41,
+	I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK	= 42,
 	I40E_FILTER_PCTYPE_NONF_IPV6_TCP		= 43,
 	I40E_FILTER_PCTYPE_NONF_IPV6_SCTP		= 44,
 	I40E_FILTER_PCTYPE_NONF_IPV6_OTHER		= 45,
@@ -984,8 +1018,8 @@
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT	23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK	(0x1FFUL << \
-					 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+				       BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT	4
 #define I40E_TXD_FLTR_QW1_CMD_MASK	(0xFFFFULL << \
@@ -1003,8 +1037,7 @@
 #define I40E_TXD_FLTR_QW1_DEST_MASK	(0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT	(0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	(0x1ULL << \
-					 I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT	(0x9ULL + \
 						 I40E_TXD_FLTR_QW1_CMD_SHIFT)
@@ -1063,6 +1096,14 @@
 	u64 tx_errors;			/* tepc */
 };
 
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+	u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
 /* Statistics collected by the MAC */
 struct i40e_hw_port_stats {
 	/* eth stats collected by the port */
@@ -1109,6 +1150,8 @@
 	u64 fd_atr_match;
 	u64 fd_sb_match;
 	u64 fd_atr_tunnel_match;
+	u32 fd_atr_status;
+	u32 fd_sb_status;
 	/* EEE LPI */
 	u32 tx_lpi_status;
 	u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index 59f62f0..e6db20e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -110,7 +110,9 @@
  * error regardless of version mismatch.
  */
 #define I40E_VIRTCHNL_VERSION_MAJOR		1
-#define I40E_VIRTCHNL_VERSION_MINOR		0
+#define I40E_VIRTCHNL_VERSION_MINOR		1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS	0
+
 struct i40e_virtchnl_version_info {
 	u32 major;
 	u32 minor;
@@ -129,7 +131,8 @@
  */
 
 /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
  * PF responds with an indirect message containing
  * i40e_virtchnl_vf_resource and one or more
  * i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,13 @@
 	u8 default_mac_addr[ETH_ALEN];
 };
 /* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2	0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE	0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN	0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2		0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP		0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE		0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ		0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG	0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING	0x00020000
 
 struct i40e_virtchnl_vf_resource {
 	u16 num_vsis;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index fea3b75..3817cbb 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -101,6 +101,8 @@
 #define MAX_RX_QUEUES 8
 #define MAX_TX_QUEUES MAX_RX_QUEUES
 
+#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
+
 /* MAX_MSIX_Q_VECTORS of these are allocated,
  * but we only use one per queue-specific vector.
  */
@@ -115,6 +117,7 @@
 	u8 num_ringpairs;	/* total number of ring pairs in vector */
 	int v_idx;	  /* vector index in list */
 	char name[IFNAMSIZ + 9];
+	bool arm_wb_state;
 	cpumask_var_t affinity_mask;
 };
 
@@ -207,33 +210,39 @@
 	struct msix_entry *msix_entries;
 
 	u32 flags;
-#define I40EVF_FLAG_RX_CSUM_ENABLED              (u32)(1)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 1)
-#define I40EVF_FLAG_RX_PS_CAPABLE                (u32)(1 << 2)
-#define I40EVF_FLAG_RX_PS_ENABLED                (u32)(1 << 3)
-#define I40EVF_FLAG_IN_NETPOLL                   (u32)(1 << 4)
-#define I40EVF_FLAG_IMIR_ENABLED                 (u32)(1 << 5)
-#define I40EVF_FLAG_MQ_CAPABLE                   (u32)(1 << 6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE             (u32)(1 << 7)
-#define I40EVF_FLAG_PF_COMMS_FAILED              (u32)(1 << 8)
-#define I40EVF_FLAG_RESET_PENDING                (u32)(1 << 9)
-#define I40EVF_FLAG_RESET_NEEDED                 (u32)(1 << 10)
-/* duplcates for common code */
+#define I40EVF_FLAG_RX_CSUM_ENABLED              BIT(0)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE              BIT(1)
+#define I40EVF_FLAG_RX_PS_CAPABLE                BIT(2)
+#define I40EVF_FLAG_RX_PS_ENABLED                BIT(3)
+#define I40EVF_FLAG_IN_NETPOLL                   BIT(4)
+#define I40EVF_FLAG_IMIR_ENABLED                 BIT(5)
+#define I40EVF_FLAG_MQ_CAPABLE                   BIT(6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE             BIT(7)
+#define I40EVF_FLAG_PF_COMMS_FAILED              BIT(8)
+#define I40EVF_FLAG_RESET_PENDING                BIT(9)
+#define I40EVF_FLAG_RESET_NEEDED                 BIT(10)
+#define I40EVF_FLAG_WB_ON_ITR_CAPABLE		BIT(11)
+#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE	BIT(12)
+/* duplicates for common code */
 #define I40E_FLAG_FDIR_ATR_ENABLED		 0
 #define I40E_FLAG_DCB_ENABLED			 0
 #define I40E_FLAG_IN_NETPOLL			 I40EVF_FLAG_IN_NETPOLL
 #define I40E_FLAG_RX_CSUM_ENABLED                I40EVF_FLAG_RX_CSUM_ENABLED
+#define I40E_FLAG_WB_ON_ITR_CAPABLE		I40EVF_FLAG_WB_ON_ITR_CAPABLE
+#define I40E_FLAG_OUTER_UDP_CSUM_CAPABLE	I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
 	/* flags for admin queue service task */
 	u32 aq_required;
-#define I40EVF_FLAG_AQ_ENABLE_QUEUES		(u32)(1)
-#define I40EVF_FLAG_AQ_DISABLE_QUEUES		(u32)(1 << 1)
-#define I40EVF_FLAG_AQ_ADD_MAC_FILTER		(u32)(1 << 2)
-#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER		(u32)(1 << 3)
-#define I40EVF_FLAG_AQ_DEL_MAC_FILTER		(u32)(1 << 4)
-#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER		(u32)(1 << 5)
-#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES		(u32)(1 << 6)
-#define I40EVF_FLAG_AQ_MAP_VECTORS		(u32)(1 << 7)
-#define I40EVF_FLAG_AQ_HANDLE_RESET		(u32)(1 << 8)
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES		BIT(0)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES		BIT(1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER		BIT(2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER		BIT(3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER		BIT(4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER		BIT(5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES		BIT(6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS		BIT(7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET		BIT(8)
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS		BIT(9)
+#define I40EVF_FLAG_AQ_GET_CONFIG		BIT(10)
 
 	/* OS defined structs */
 	struct net_device *netdev;
@@ -249,8 +258,17 @@
 	bool netdev_registered;
 	bool link_up;
 	enum i40e_virtchnl_ops current_op;
+#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
+			    I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
+		    I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
+			  I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
 	struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
 	struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+	struct i40e_virtchnl_version_info pf_version;
+#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
+		       ((_a)->pf_version.minor == 1))
 	u16 msg_enable;
 	struct i40e_eth_stats current_stats;
 	struct i40e_vsi vsi;
@@ -264,6 +282,7 @@
 
 int i40evf_up(struct i40evf_adapter *adapter);
 void i40evf_down(struct i40evf_adapter *adapter);
+int i40evf_process_config(struct i40evf_adapter *adapter);
 void i40evf_reset(struct i40evf_adapter *adapter);
 void i40evf_set_ethtool_ops(struct net_device *netdev);
 void i40evf_update_stats(struct i40evf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 2b53c87..4790437 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -381,11 +381,11 @@
 
 	switch (cmd->flow_type) {
 	case TCP_V4_FLOW:
-		if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 		break;
 	case UDP_V4_FLOW:
-		if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 		break;
 
@@ -397,11 +397,11 @@
 		break;
 
 	case TCP_V6_FLOW:
-		if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 		break;
 	case UDP_V6_FLOW:
-		if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+		if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
 		break;
 
@@ -479,10 +479,10 @@
 	case TCP_V4_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
 			break;
 		default:
 			return -EINVAL;
@@ -491,10 +491,10 @@
 	case TCP_V6_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+			hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
 			break;
 		default:
 			return -EINVAL;
@@ -503,12 +503,12 @@
 	case UDP_V4_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-				 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 			break;
 		default:
 			return -EINVAL;
@@ -517,12 +517,12 @@
 	case UDP_V6_FLOW:
 		switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
 		case 0:
-			hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-				  ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+				  BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 			break;
 		case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-			hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-				 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+			hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+				 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 			break;
 		default:
 			return -EINVAL;
@@ -535,7 +535,7 @@
 		if ((nfc->data & RXH_L4_B_0_1) ||
 		    (nfc->data & RXH_L4_B_2_3))
 			return -EINVAL;
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
 		break;
 	case AH_ESP_V6_FLOW:
 	case AH_V6_FLOW:
@@ -544,15 +544,15 @@
 		if ((nfc->data & RXH_L4_B_0_1) ||
 		    (nfc->data & RXH_L4_B_2_3))
 			return -EINVAL;
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+		hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
 		break;
 	case IPV4_FLOW:
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
-			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+		hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+			 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
 		break;
 	case IPV6_FLOW:
-		hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
-			((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+		hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+			 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4ab4ebb..e85849b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -34,10 +34,10 @@
 static const char i40evf_driver_string[] =
 	"Intel(R) XL710/X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "1.2.25"
+#define DRV_VERSION "1.3.5"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
-	"Copyright (c) 2013 - 2014 Intel Corporation.";
+	"Copyright (c) 2013 - 2015 Intel Corporation.";
 
 /* i40evf_pci_tbl - PCI Device ID Table
  *
@@ -49,6 +49,7 @@
  */
 static const struct pci_device_id i40evf_pci_tbl[] = {
 	{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+	{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
 	/* required last entry */
 	{0, }
 };
@@ -203,7 +204,7 @@
 
 	wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK |
 				       I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
-	wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA_ADMINQ_MASK);
+	wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
 
 	/* read flush */
 	rd32(hw, I40E_VFGEN_RSTAT);
@@ -240,11 +241,11 @@
 	int i;
 
 	for (i = 1; i < adapter->num_msix_vectors; i++) {
-		if (mask & (1 << (i - 1))) {
+		if (mask & BIT(i - 1)) {
 			wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
 			     I40E_VFINT_DYN_CTLN1_INTENA_MASK |
 			     I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
-			     I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+			     I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK);
 		}
 	}
 }
@@ -262,17 +263,17 @@
 
 	if (mask & 1) {
 		dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
-		dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+		dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
 			   I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
-			   I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+			   I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
 		wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
 	}
 	for (i = 1; i < adapter->num_msix_vectors; i++) {
-		if (mask & (1 << i)) {
+		if (mask & BIT(i)) {
 			dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
-			dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+			dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
 				   I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
-				   I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+				   I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
 			wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl);
 		}
 	}
@@ -312,7 +313,7 @@
 
 
 	val = rd32(hw, I40E_VFINT_DYN_CTL01);
-	val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+	val = val | I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
 	wr32(hw, I40E_VFINT_DYN_CTL01, val);
 
 	/* schedule work on the private workqueue */
@@ -377,7 +378,7 @@
 	q_vector->tx.count++;
 	q_vector->tx.latency_range = I40E_LOW_LATENCY;
 	q_vector->num_ringpairs++;
-	q_vector->ring_mask |= (1 << t_idx);
+	q_vector->ring_mask |= BIT(t_idx);
 }
 
 /**
@@ -406,7 +407,7 @@
 	/* The ideal configuration...
 	 * We have enough vectors to map one per queue.
 	 */
-	if (q_vectors == (rxr_remaining * 2)) {
+	if (q_vectors >= (rxr_remaining * 2)) {
 		for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
 			i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
 
@@ -892,8 +893,10 @@
 					break;
 				}
 			}
+			if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
+				found = true;
 		}
-		if (found) {
+		if (!found) {
 			f->remove = true;
 			adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
 		}
@@ -1170,6 +1173,113 @@
 }
 
 /**
+ * i40e_configure_rss_aq - Prepare for RSS using AQ commands
+ * @vsi: vsi structure
+ * @seed: RSS hash seed
+ **/
+static void i40evf_configure_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
+{
+	struct i40e_aqc_get_set_rss_key_data rss_key;
+	struct i40evf_adapter *adapter = vsi->back;
+	struct i40e_hw *hw = &adapter->hw;
+	int ret = 0, i;
+	u8 *rss_lut;
+
+	if (!vsi->id)
+		return;
+
+	if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+		/* bail because we already have a command pending */
+		dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n",
+			adapter->current_op);
+		return;
+	}
+
+	memset(&rss_key, 0, sizeof(rss_key));
+	memcpy(&rss_key, seed, sizeof(rss_key));
+
+	rss_lut = kzalloc(((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4), GFP_KERNEL);
+	if (!rss_lut)
+		return;
+
+	/* Populate the LUT with max no. PF queues in round robin fashion */
+	for (i = 0; i <= (I40E_VFQF_HLUT_MAX_INDEX * 4); i++)
+		rss_lut[i] = i % adapter->num_active_queues;
+
+	ret = i40evf_aq_set_rss_key(hw, vsi->id, &rss_key);
+	if (ret) {
+		dev_err(&adapter->pdev->dev,
+			"Cannot set RSS key, err %s aq_err %s\n",
+			i40evf_stat_str(hw, ret),
+			i40evf_aq_str(hw, hw->aq.asq_last_status));
+		return;
+	}
+
+	ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, rss_lut,
+				    (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4);
+	if (ret)
+		dev_err(&adapter->pdev->dev,
+			"Cannot set RSS lut, err %s aq_err %s\n",
+			i40evf_stat_str(hw, ret),
+			i40evf_aq_str(hw, hw->aq.asq_last_status));
+}
+
+/**
+ * i40e_configure_rss_reg - Prepare for RSS if used
+ * @adapter: board private structure
+ * @seed: RSS hash seed
+ **/
+static void i40evf_configure_rss_reg(struct i40evf_adapter *adapter,
+				     const u8 *seed)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	u32 *seed_dw = (u32 *)seed;
+	u32 cqueue = 0;
+	u32 lut = 0;
+	int i, j;
+
+	/* Fill out hash function seed */
+	for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
+		wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]);
+
+	/* Populate the LUT with max no. PF queues in round robin fashion */
+	for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
+		lut = 0;
+		for (j = 0; j < 4; j++) {
+			if (cqueue == adapter->num_active_queues)
+				cqueue = 0;
+			lut |= ((cqueue) << (8 * j));
+			cqueue++;
+		}
+		wr32(hw, I40E_VFQF_HLUT(i), lut);
+	}
+	i40e_flush(hw);
+}
+
+/**
+ * i40evf_configure_rss - Prepare for RSS
+ * @adapter: board private structure
+ **/
+static void i40evf_configure_rss(struct i40evf_adapter *adapter)
+{
+	struct i40e_hw *hw = &adapter->hw;
+	u8 seed[I40EVF_HKEY_ARRAY_SIZE];
+	u64 hena;
+
+	netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE);
+
+	/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+	hena = I40E_DEFAULT_RSS_HENA;
+	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+	if (RSS_AQ(adapter))
+		i40evf_configure_rss_aq(&adapter->vsi, seed);
+	else
+		i40evf_configure_rss_reg(adapter, seed);
+}
+
+/**
  * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
  * @adapter: board private structure to initialize
  *
@@ -1369,6 +1479,10 @@
 		}
 		goto watchdog_done;
 	}
+	if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
+		i40evf_send_vf_config_msg(adapter);
+		goto watchdog_done;
+	}
 
 	if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
 		i40evf_disable_queues(adapter);
@@ -1410,6 +1524,16 @@
 		goto watchdog_done;
 	}
 
+	if (adapter->aq_required & I40EVF_FLAG_AQ_CONFIGURE_RSS) {
+		/* This message goes straight to the firmware, not the
+		 * PF, so we don't have to set current_op as we will
+		 * not get a response through the ARQ.
+		 */
+		i40evf_configure_rss(adapter);
+		adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
+		goto watchdog_done;
+	}
+
 	if (adapter->state == __I40EVF_RUNNING)
 		i40evf_request_stats(adapter);
 watchdog_done:
@@ -1432,45 +1556,6 @@
 	schedule_work(&adapter->adminq_task);
 }
 
-/**
- * i40evf_configure_rss - Prepare for RSS
- * @adapter: board private structure
- **/
-static void i40evf_configure_rss(struct i40evf_adapter *adapter)
-{
-	u32 rss_key[I40E_VFQF_HKEY_MAX_INDEX + 1];
-	struct i40e_hw *hw = &adapter->hw;
-	u32 cqueue = 0;
-	u32 lut = 0;
-	int i, j;
-	u64 hena;
-
-	/* Hash type is configured by the PF - we just supply the key */
-	netdev_rss_key_fill(rss_key, sizeof(rss_key));
-
-	/* Fill out hash function seed */
-	for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
-		wr32(hw, I40E_VFQF_HKEY(i), rss_key[i]);
-
-	/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
-	hena = I40E_DEFAULT_RSS_HENA;
-	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
-	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
-
-	/* Populate the LUT with max no. of queues in round robin fashion */
-	for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
-		lut = 0;
-		for (j = 0; j < 4; j++) {
-			if (cqueue == adapter->num_active_queues)
-				cqueue = 0;
-			lut |= ((cqueue) << (8 * j));
-			cqueue++;
-		}
-		wr32(hw, I40E_VFQF_HLUT(i), lut);
-	}
-	i40e_flush(hw);
-}
-
 #define I40EVF_RESET_WAIT_MS 10
 #define I40EVF_RESET_WAIT_COUNT 500
 /**
@@ -1604,7 +1689,8 @@
 		dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
 			 err);
 
-	i40evf_map_queues(adapter);
+	adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
+	adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
 
 	/* re-add all MAC filters */
 	list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -1614,7 +1700,7 @@
 	list_for_each_entry(f, &adapter->vlan_filter_list, list) {
 		f->add = true;
 	}
-	adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
 	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
 	clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 	i40evf_misc_irq_enable(adapter);
@@ -1693,34 +1779,34 @@
 	/* check for error indications */
 	val = rd32(hw, hw->aq.arq.len);
 	oldval = val;
-	if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
+	if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
 		dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
-		val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
+		val &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
 	}
-	if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
+	if (val & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
 		dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
-		val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
+		val &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
 	}
-	if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
+	if (val & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
 		dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
-		val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
+		val &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
 	}
 	if (oldval != val)
 		wr32(hw, hw->aq.arq.len, val);
 
 	val = rd32(hw, hw->aq.asq.len);
 	oldval = val;
-	if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
+	if (val & I40E_VF_ATQLEN1_ATQVFE_MASK) {
 		dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
-		val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
+		val &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
 	}
-	if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
+	if (val & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
 		dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
-		val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
+		val &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
 	}
-	if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
+	if (val & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
 		dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
-		val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
+		val &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
 	}
 	if (oldval != val)
 		wr32(hw, hw->aq.asq.len, val);
@@ -1856,6 +1942,7 @@
 	if (err)
 		goto err_req_irq;
 
+	i40evf_add_filter(adapter, adapter->hw.mac.addr);
 	i40evf_configure(adapter);
 
 	err = i40evf_up_complete(adapter);
@@ -1979,6 +2066,62 @@
 }
 
 /**
+ * i40evf_process_config - Process the config information we got from the PF
+ * @adapter: board private structure
+ *
+ * Verify that we have a valid config struct, and set up our netdev features
+ * and our VSI struct.
+ **/
+int i40evf_process_config(struct i40evf_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	/* got VF config message back from PF, now we can parse it */
+	for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+		if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+			adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+	}
+	if (!adapter->vsi_res) {
+		dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
+		return -ENODEV;
+	}
+
+	if (adapter->vf_res->vf_offload_flags
+	    & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+		netdev->vlan_features = netdev->features;
+		netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+				    NETIF_F_HW_VLAN_CTAG_RX |
+				    NETIF_F_HW_VLAN_CTAG_FILTER;
+	}
+	netdev->features |= NETIF_F_HIGHDMA |
+			    NETIF_F_SG |
+			    NETIF_F_IP_CSUM |
+			    NETIF_F_SCTP_CSUM |
+			    NETIF_F_IPV6_CSUM |
+			    NETIF_F_TSO |
+			    NETIF_F_TSO6 |
+			    NETIF_F_RXCSUM |
+			    NETIF_F_GRO;
+
+	/* copy netdev features into list of user selectable features */
+	netdev->hw_features |= netdev->features;
+	netdev->hw_features &= ~NETIF_F_RXCSUM;
+
+	adapter->vsi.id = adapter->vsi_res->vsi_id;
+
+	adapter->vsi.back = adapter;
+	adapter->vsi.base_vector = 1;
+	adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+	adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+				       ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+	adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+				       ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+	adapter->vsi.netdev = adapter->netdev;
+	return 0;
+}
+
+/**
  * i40evf_init_task - worker thread to perform delayed initialization
  * @work: pointer to work_struct containing our data
  *
@@ -1996,10 +2139,9 @@
 						      struct i40evf_adapter,
 						      init_task.work);
 	struct net_device *netdev = adapter->netdev;
-	struct i40evf_mac_filter *f;
 	struct i40e_hw *hw = &adapter->hw;
 	struct pci_dev *pdev = adapter->pdev;
-	int i, err, bufsz;
+	int err, bufsz;
 
 	switch (adapter->state) {
 	case __I40EVF_STARTUP:
@@ -2050,6 +2192,12 @@
 		if (err) {
 			if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
 				err = i40evf_send_api_ver(adapter);
+			else
+				dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
+					adapter->pf_version.major,
+					adapter->pf_version.minor,
+					I40E_VIRTCHNL_VERSION_MAJOR,
+					I40E_VIRTCHNL_VERSION_MINOR);
 			goto err;
 		}
 		err = i40evf_send_vf_config_msg(adapter);
@@ -2085,42 +2233,15 @@
 	default:
 		goto err_alloc;
 	}
-	/* got VF config message back from PF, now we can parse it */
-	for (i = 0; i < adapter->vf_res->num_vsis; i++) {
-		if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
-			adapter->vsi_res = &adapter->vf_res->vsi_res[i];
-	}
-	if (!adapter->vsi_res) {
-		dev_err(&pdev->dev, "No LAN VSI found\n");
+	if (i40evf_process_config(adapter))
 		goto err_alloc;
-	}
+	adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
 
 	adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
 
 	netdev->netdev_ops = &i40evf_netdev_ops;
 	i40evf_set_ethtool_ops(netdev);
 	netdev->watchdog_timeo = 5 * HZ;
-	netdev->features |= NETIF_F_HIGHDMA |
-			    NETIF_F_SG |
-			    NETIF_F_IP_CSUM |
-			    NETIF_F_SCTP_CSUM |
-			    NETIF_F_IPV6_CSUM |
-			    NETIF_F_TSO |
-			    NETIF_F_TSO6 |
-			    NETIF_F_RXCSUM |
-			    NETIF_F_GRO;
-
-	if (adapter->vf_res->vf_offload_flags
-	    & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
-		netdev->vlan_features = netdev->features;
-		netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
-				    NETIF_F_HW_VLAN_CTAG_RX |
-				    NETIF_F_HW_VLAN_CTAG_FILTER;
-	}
-
-	/* copy netdev features into list of user selectable features */
-	netdev->hw_features |= netdev->features;
-	netdev->hw_features &= ~NETIF_F_RXCSUM;
 
 	if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
 		dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
@@ -2130,16 +2251,6 @@
 	ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
 	ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
 
-	f = kzalloc(sizeof(*f), GFP_ATOMIC);
-	if (!f)
-		goto err_sw_init;
-
-	ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
-	f->add = true;
-	adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-
-	list_add(&f->list, &adapter->mac_filter_list);
-
 	init_timer(&adapter->watchdog_timer);
 	adapter->watchdog_timer.function = &i40evf_watchdog_timer;
 	adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -2154,24 +2265,14 @@
 	if (err)
 		goto err_sw_init;
 	i40evf_map_rings_to_vectors(adapter);
-	i40evf_configure_rss(adapter);
+	if (!RSS_AQ(adapter))
+		i40evf_configure_rss(adapter);
 	err = i40evf_request_misc_irq(adapter);
 	if (err)
 		goto err_sw_init;
 
 	netif_carrier_off(netdev);
 
-	adapter->vsi.id = adapter->vsi_res->vsi_id;
-	adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
-	adapter->vsi.back = adapter;
-	adapter->vsi.base_vector = 1;
-	adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
-	adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
-				       ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
-	adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
-				       ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
-	adapter->vsi.netdev = adapter->netdev;
-
 	if (!adapter->netdev_registered) {
 		err = register_netdev(netdev);
 		if (err)
@@ -2190,6 +2291,13 @@
 	adapter->state = __I40EVF_DOWN;
 	set_bit(__I40E_DOWN, &adapter->vsi.state);
 	i40evf_misc_irq_enable(adapter);
+
+	if (RSS_AQ(adapter)) {
+		adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
+		mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+	} else {
+		i40evf_configure_rss(adapter);
+	}
 	return;
 restart:
 	schedule_delayed_work(&adapter->init_task,
@@ -2299,7 +2407,7 @@
 	hw = &adapter->hw;
 	hw->back = adapter;
 
-	adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+	adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
 	adapter->state = __I40EVF_STARTUP;
 
 	/* Call save state here because it relies on the adapter struct. */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 61e0905..d4eb1a5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -51,8 +51,9 @@
 
 	err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
 	if (err)
-		dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
-			op, err, hw->aq.asq_last_status);
+		dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
+			op, i40evf_stat_str(hw, err),
+			i40evf_aq_str(hw, hw->aq.asq_last_status));
 	return err;
 }
 
@@ -125,8 +126,11 @@
 	}
 
 	pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
-	if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
-	    (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+	adapter->pf_version = *pf_vvi;
+
+	if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
+	    ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+	     (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
 		err = -EIO;
 
 out_alloc:
@@ -145,8 +149,24 @@
  **/
 int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
 {
-	return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
-				  NULL, 0);
+	u32 caps;
+
+	adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+	caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+	       I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+	       I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+	       I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+	adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+	adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+	if (PF_IS_V11(adapter))
+		return i40evf_send_pf_msg(adapter,
+					  I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+					  (u8 *)&caps, sizeof(caps));
+	else
+		return i40evf_send_pf_msg(adapter,
+					  I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+					  NULL, 0);
 }
 
 /**
@@ -274,7 +294,7 @@
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
 	vqs.vsi_id = adapter->vsi_res->vsi_id;
-	vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
 	vqs.rx_queues = vqs.tx_queues;
 	adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
 	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -299,7 +319,7 @@
 	}
 	adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
 	vqs.vsi_id = adapter->vsi_res->vsi_id;
-	vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+	vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
 	vqs.rx_queues = vqs.tx_queues;
 	adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
 	i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
@@ -708,8 +728,9 @@
 		return;
 	}
 	if (v_retval) {
-		dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
-			__func__, v_retval, v_opcode);
+		dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n",
+			__func__, v_retval,
+			i40evf_stat_str(&adapter->hw, v_retval), v_opcode);
 	}
 	switch (v_opcode) {
 	case I40E_VIRTCHNL_OP_GET_STATS: {
@@ -729,6 +750,15 @@
 		adapter->current_stats = *stats;
 		}
 		break;
+	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: {
+		u16 len = sizeof(struct i40e_virtchnl_vf_resource) +
+			  I40E_MAX_VF_VSI *
+			  sizeof(struct i40e_virtchnl_vsi_resource);
+		memcpy(adapter->vf_res, msg, min(msglen, len));
+		i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
+		i40evf_process_config(adapter);
+		}
+		break;
 	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
 		/* enable transmits */
 		i40evf_irq_enable(adapter, true);
@@ -740,7 +770,6 @@
 		i40evf_free_all_rx_resources(adapter);
 		break;
 	case I40E_VIRTCHNL_OP_VERSION:
-	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
 	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
 		/* Don't display an error if we get these out of sequence.
 		 * If the firmware needed to get kicked, we'll get these and
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index b0182dd..7a73510 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -139,10 +139,6 @@
 	if (ret_val)
 		return ret_val;
 
-	/* reset page to 0 */
-	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
-	if (ret_val)
-		return ret_val;
 
 	if (data & E1000_M88E1112_STATUS_LINK)
 		port = E1000_MEDIA_PORT_OTHER;
@@ -151,8 +147,20 @@
 	if (port && (hw->dev_spec._82575.media_port != port)) {
 		hw->dev_spec._82575.media_port = port;
 		hw->dev_spec._82575.media_changed = true;
+	}
+
+	if (port == E1000_MEDIA_PORT_COPPER) {
+		/* reset page to 0 */
+		ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+		if (ret_val)
+			return ret_val;
+		igb_check_for_link_82575(hw);
 	} else {
-		ret_val = igb_check_for_link_82575(hw);
+		igb_check_for_link_82575(hw);
+		/* reset page to 0 */
+		ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+		if (ret_val)
+			return ret_val;
 	}
 
 	return 0;
@@ -223,6 +231,7 @@
 	/* Verify phy id and set remaining function pointers */
 	switch (phy->id) {
 	case M88E1543_E_PHY_ID:
+	case M88E1512_E_PHY_ID:
 	case I347AT4_E_PHY_ID:
 	case M88E1112_E_PHY_ID:
 	case M88E1111_I_PHY_ID:
@@ -235,7 +244,7 @@
 		else
 			phy->ops.get_cable_length = igb_get_cable_length_m88;
 		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
-		/* Check if this PHY is confgured for media swap. */
+		/* Check if this PHY is configured for media swap. */
 		if (phy->id == M88E1112_E_PHY_ID) {
 			u16 data;
 
@@ -258,6 +267,11 @@
 				hw->mac.ops.check_for_link =
 						igb_check_for_link_media_swap;
 		}
+		if (phy->id == M88E1512_E_PHY_ID) {
+			ret_val = igb_initialize_M88E1512_phy(hw);
+			if (ret_val)
+				goto out;
+		}
 		break;
 	case IGP03E1000_E_PHY_ID:
 		phy->type = e1000_phy_igp_3;
@@ -889,6 +903,7 @@
  **/
 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
 {
+	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
 
 	/* This isn't a true "hard" reset, but is the only reset
@@ -905,7 +920,11 @@
 		goto out;
 
 	ret_val = igb_phy_sw_reset(hw);
+	if (ret_val)
+		goto out;
 
+	if (phy->id == M88E1512_E_PHY_ID)
+		ret_val = igb_initialize_M88E1512_phy(hw);
 out:
 	return ret_val;
 }
@@ -1579,6 +1598,7 @@
 		case I347AT4_E_PHY_ID:
 		case M88E1112_E_PHY_ID:
 		case M88E1543_E_PHY_ID:
+		case M88E1512_E_PHY_ID:
 		case I210_I_PHY_ID:
 			ret_val = igb_copper_link_setup_m88_gen2(hw);
 			break;
@@ -2621,7 +2641,8 @@
 	u16 phy_data;
 
 	if ((hw->phy.media_type != e1000_media_type_copper) ||
-	    (phy->id != M88E1543_E_PHY_ID))
+	    ((phy->id != M88E1543_E_PHY_ID) &&
+	     (phy->id != M88E1512_E_PHY_ID)))
 		goto out;
 
 	if (!hw->dev_spec._82575.eee_disable) {
@@ -2701,7 +2722,8 @@
 
 	/* Check if EEE is supported on this device. */
 	if ((hw->phy.media_type != e1000_media_type_copper) ||
-	    (phy->id != M88E1543_E_PHY_ID))
+	    ((phy->id != M88E1543_E_PHY_ID) &&
+	     (phy->id != M88E1512_E_PHY_ID)))
 		goto out;
 
 	ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index f8684aa..b191504 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -604,6 +604,10 @@
 #define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT	7
 #define E1000_M88E1112_PAGE_ADDR		0x16
 #define E1000_M88E1112_STATUS			0x01
+#define E1000_M88E1512_CFG_REG_1		0x0010
+#define E1000_M88E1512_CFG_REG_2		0x0011
+#define E1000_M88E1512_CFG_REG_3		0x0007
+#define E1000_M88E1512_MODE			0x0014
 
 /* PCI Express Control */
 #define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
@@ -861,6 +865,7 @@
 #define M88_VENDOR           0x0141
 #define I210_I_PHY_ID        0x01410C00
 #define M88E1543_E_PHY_ID    0x01410EA0
+#define M88E1512_E_PHY_ID    0x01410DD0
 
 /* M88E1000 Specific Registers */
 #define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index c1bb64d..23ec28f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,5 +1,5 @@
 /* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
+ * Copyright(c) 2007-2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -36,9 +36,6 @@
 /* Cable length tables */
 static const u16 e1000_m88_cable_length_table[] = {
 	0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
-#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
-	(sizeof(e1000_m88_cable_length_table) / \
-	sizeof(e1000_m88_cable_length_table[0]))
 
 static const u16 e1000_igp_2_cable_length_table[] = {
 	0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -49,9 +46,6 @@
 	60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
 	83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
 	104, 109, 114, 118, 121, 124};
-#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
-	(sizeof(e1000_igp_2_cable_length_table) / \
-	 sizeof(e1000_igp_2_cable_length_table[0]))
 
 /**
  *  igb_check_reset_block - Check if PHY reset is blocked
@@ -1268,6 +1262,8 @@
 			switch (hw->phy.id) {
 			case I347AT4_E_PHY_ID:
 			case M88E1112_E_PHY_ID:
+			case M88E1543_E_PHY_ID:
+			case M88E1512_E_PHY_ID:
 			case I210_I_PHY_ID:
 				reset_dsp = false;
 				break;
@@ -1276,9 +1272,9 @@
 					reset_dsp = false;
 				break;
 			}
-			if (!reset_dsp)
+			if (!reset_dsp) {
 				hw_dbg("Link taking longer than expected.\n");
-			else {
+			} else {
 				/* We didn't get link.
 				 * Reset the DSP and cross our fingers.
 				 */
@@ -1303,6 +1299,8 @@
 	if (hw->phy.type != e1000_phy_m88 ||
 	    hw->phy.id == I347AT4_E_PHY_ID ||
 	    hw->phy.id == M88E1112_E_PHY_ID ||
+	    hw->phy.id == M88E1543_E_PHY_ID ||
+	    hw->phy.id == M88E1512_E_PHY_ID ||
 	    hw->phy.id == I210_I_PHY_ID)
 		goto out;
 
@@ -1700,7 +1698,7 @@
 
 	index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
 		M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-	if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+	if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
 		ret_val = -E1000_ERR_PHY;
 		goto out;
 	}
@@ -1743,6 +1741,7 @@
 		phy->cable_length = phy_data / (is_cm ? 100 : 1);
 		break;
 	case M88E1543_E_PHY_ID:
+	case M88E1512_E_PHY_ID:
 	case I347AT4_E_PHY_ID:
 		/* Remember the original page select and set it to 7 */
 		ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
@@ -1796,7 +1795,7 @@
 
 		index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
 			M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-		if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+		if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
 			ret_val = -E1000_ERR_PHY;
 			goto out;
 		}
@@ -1840,7 +1839,7 @@
 	s32 ret_val = 0;
 	u16 phy_data, i, agc_value = 0;
 	u16 cur_agc_index, max_agc_index = 0;
-	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+	u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1;
 	static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
 		IGP02E1000_PHY_AGC_A,
 		IGP02E1000_PHY_AGC_B,
@@ -1863,7 +1862,7 @@
 				IGP02E1000_AGC_LENGTH_MASK;
 
 		/* Array index bound check. */
-		if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+		if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) ||
 		    (cur_agc_index == 0)) {
 			ret_val = -E1000_ERR_PHY;
 			goto out;
@@ -2195,6 +2194,90 @@
 }
 
 /**
+ *  igb_initialize_M88E1512_phy - Initialize M88E1512 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize Marvel 1512 to work correctly with Avoton.
+ **/
+s32 igb_initialize_M88E1512_phy(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+
+	/* Switch to PHY page 0xFF. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+	if (ret_val)
+		goto out;
+
+	/* Switch to PHY page 0xFB. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D);
+	if (ret_val)
+		goto out;
+
+	/* Switch to PHY page 0x12. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+	if (ret_val)
+		goto out;
+
+	/* Change mode to SGMII-to-Copper */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+	if (ret_val)
+		goto out;
+
+	/* Return the PHY to page 0. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_phy_sw_reset(hw);
+	if (ret_val) {
+		hw_dbg("Error committing the PHY changes\n");
+		return ret_val;
+	}
+
+	/* msec_delay(1000); */
+	usleep_range(1000, 2000);
+out:
+	return ret_val;
+}
+
+/**
  * igb_power_up_phy_copper - Restore copper link in case of PHY power down
  * @hw: pointer to the HW structure
  *
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 7af4ffa..24d55ed 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -61,6 +61,7 @@
 void igb_power_up_phy_copper(struct e1000_hw *hw);
 void igb_power_down_phy_copper(struct e1000_hw *hw);
 s32  igb_phy_init_script_igp3(struct e1000_hw *hw);
+s32  igb_initialize_M88E1512_phy(struct e1000_hw *hw);
 s32  igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
 s32  igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 6f0490d..4af2870 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -104,6 +104,8 @@
 #define E1000_TRGTTIMH0  0x0B648 /* Target Time Register 0 High - RW */
 #define E1000_TRGTTIML1  0x0B64C /* Target Time Register 1 Low  - RW */
 #define E1000_TRGTTIMH1  0x0B650 /* Target Time Register 1 High - RW */
+#define E1000_FREQOUT0   0x0B654 /* Frequency Out 0 Control Register - RW */
+#define E1000_FREQOUT1   0x0B658 /* Frequency Out 1 Control Register - RW */
 #define E1000_AUXSTMPL0  0x0B65C /* Auxiliary Time Stamp 0 Register Low  - RO */
 #define E1000_AUXSTMPH0  0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
 #define E1000_AUXSTMPL1  0x0B664 /* Auxiliary Time Stamp 1 Register Low  - RO */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index c2bd4f9..212d668 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -540,6 +540,7 @@
 			 struct sk_buff *skb);
 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
+void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
 #ifdef CONFIG_IGB_HWMON
 void igb_sysfs_exit(struct igb_adapter *adapter);
 int igb_sysfs_init(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index d5673eb..7426276 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2159,6 +2159,27 @@
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	int i;
 
+	if (ec->rx_max_coalesced_frames ||
+	    ec->rx_coalesce_usecs_irq ||
+	    ec->rx_max_coalesced_frames_irq ||
+	    ec->tx_max_coalesced_frames ||
+	    ec->tx_coalesce_usecs_irq ||
+	    ec->stats_block_coalesce_usecs ||
+	    ec->use_adaptive_rx_coalesce ||
+	    ec->use_adaptive_tx_coalesce ||
+	    ec->pkt_rate_low ||
+	    ec->rx_coalesce_usecs_low ||
+	    ec->rx_max_coalesced_frames_low ||
+	    ec->tx_coalesce_usecs_low ||
+	    ec->tx_max_coalesced_frames_low ||
+	    ec->pkt_rate_high ||
+	    ec->rx_coalesce_usecs_high ||
+	    ec->rx_max_coalesced_frames_high ||
+	    ec->tx_coalesce_usecs_high ||
+	    ec->tx_max_coalesced_frames_high ||
+	    ec->rate_sample_interval)
+		return -ENOTSUPP;
+
 	if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
 	    ((ec->rx_coalesce_usecs > 3) &&
 	     (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
@@ -2396,10 +2417,6 @@
 			info->rx_filters |=
 				(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
 				(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-				(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-				(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-				(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-				(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
 				(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 
 		return 0;
@@ -2991,6 +3008,7 @@
 {
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	unsigned int count = ch->combined_count;
+	unsigned int max_combined = 0;
 
 	/* Verify they are not requesting separate vectors */
 	if (!count || ch->rx_count || ch->tx_count)
@@ -3001,11 +3019,13 @@
 		return -EINVAL;
 
 	/* Verify the number of channels doesn't exceed hw limits */
-	if (count > igb_max_channels(adapter))
+	max_combined = igb_max_channels(adapter);
+	if (count > max_combined)
 		return -EINVAL;
 
 	if (count != adapter->rss_queues) {
 		adapter->rss_queues = count;
+		igb_set_flag_queue_pairs(adapter, max_combined);
 
 		/* Hardware has to reinitialize queues and interrupts to
 		 * match the new configuration.
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 830466c..e174fbb 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -57,8 +57,8 @@
 #include "igb.h"
 
 #define MAJ 5
-#define MIN 2
-#define BUILD 18
+#define MIN 3
+#define BUILD 0
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 __stringify(BUILD) "-k"
 char igb_driver_name[] = "igb";
@@ -179,6 +179,8 @@
 #ifdef CONFIG_PCI_IOV
 static int igb_vf_configure(struct igb_adapter *adapter, int vf);
 static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
+static int igb_disable_sriov(struct pci_dev *dev);
+static int igb_pci_disable_sriov(struct pci_dev *dev);
 #endif
 
 #ifdef CONFIG_PM
@@ -1205,10 +1207,14 @@
 
 	/* allocate q_vector and rings */
 	q_vector = adapter->q_vector[v_idx];
-	if (!q_vector)
+	if (!q_vector) {
 		q_vector = kzalloc(size, GFP_KERNEL);
-	else
+	} else if (size > ksize(q_vector)) {
+		kfree_rcu(q_vector, rcu);
+		q_vector = kzalloc(size, GFP_KERNEL);
+	} else {
 		memset(q_vector, 0, size);
+	}
 	if (!q_vector)
 		return -ENOMEM;
 
@@ -2645,7 +2651,11 @@
 	if (hw->flash_address)
 		iounmap(hw->flash_address);
 err_sw_init:
+	kfree(adapter->shadow_vfta);
 	igb_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_PCI_IOV
+	igb_disable_sriov(pdev);
+#endif
 	pci_iounmap(pdev, hw->hw_addr);
 err_ioremap:
 	free_netdev(netdev);
@@ -2805,14 +2815,14 @@
 	 */
 	igb_release_hw_control(adapter);
 
-	unregister_netdev(netdev);
-
-	igb_clear_interrupt_scheme(adapter);
-
 #ifdef CONFIG_PCI_IOV
 	igb_disable_sriov(pdev);
 #endif
 
+	unregister_netdev(netdev);
+
+	igb_clear_interrupt_scheme(adapter);
+
 	pci_iounmap(pdev, hw->hw_addr);
 	if (hw->flash_address)
 		iounmap(hw->flash_address);
@@ -2847,7 +2857,7 @@
 		return;
 
 	pci_sriov_set_totalvfs(pdev, 7);
-	igb_pci_enable_sriov(pdev, max_vfs);
+	igb_enable_sriov(pdev, max_vfs);
 
 #endif /* CONFIG_PCI_IOV */
 }
@@ -2888,6 +2898,14 @@
 
 	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
 
+	igb_set_flag_queue_pairs(adapter, max_rss_queues);
+}
+
+void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
+			      const u32 max_rss_queues)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
 	/* Determine if we need to pair queues. */
 	switch (hw->mac.type) {
 	case e1000_82575:
@@ -2968,6 +2986,8 @@
 	}
 #endif /* CONFIG_PCI_IOV */
 
+	igb_probe_vfs(adapter);
+
 	igb_init_queue_configuration(adapter);
 
 	/* Setup and initialize a copy of the hw vlan table array */
@@ -2980,8 +3000,6 @@
 		return -ENOMEM;
 	}
 
-	igb_probe_vfs(adapter);
-
 	/* Explicitly disable IRQ since the NIC can be in any state. */
 	igb_irq_disable(adapter);
 
@@ -6621,22 +6639,25 @@
 			    struct sk_buff *skb)
 {
 	struct page *page = rx_buffer->page;
+	unsigned char *va = page_address(page) + rx_buffer->page_offset;
 	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
 #if (PAGE_SIZE < 8192)
 	unsigned int truesize = IGB_RX_BUFSZ;
 #else
-	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+	unsigned int truesize = SKB_DATA_ALIGN(size);
 #endif
+	unsigned int pull_len;
 
-	if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
-		unsigned char *va = page_address(page) + rx_buffer->page_offset;
+	if (unlikely(skb_is_nonlinear(skb)))
+		goto add_tail_frag;
 
-		if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-			igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
-			va += IGB_TS_HDR_LEN;
-			size -= IGB_TS_HDR_LEN;
-		}
+	if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
+		igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+		va += IGB_TS_HDR_LEN;
+		size -= IGB_TS_HDR_LEN;
+	}
 
+	if (likely(size <= IGB_RX_HDR_LEN)) {
 		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
 		/* page is not reserved, we can reuse buffer as-is */
@@ -6648,8 +6669,21 @@
 		return false;
 	}
 
+	/* we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	va += pull_len;
+	size -= pull_len;
+
+add_tail_frag:
 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-			rx_buffer->page_offset, size, truesize);
+			(unsigned long)va & ~PAGE_MASK, size, truesize);
 
 	return igb_can_reuse_rx_page(rx_buffer, page, truesize);
 }
@@ -6791,62 +6825,6 @@
 }
 
 /**
- *  igb_pull_tail - igb specific version of skb_pull_tail
- *  @rx_ring: rx descriptor ring packet is being transacted on
- *  @rx_desc: pointer to the EOP Rx descriptor
- *  @skb: pointer to current skb being adjusted
- *
- *  This function is an igb specific version of __pskb_pull_tail.  The
- *  main difference between this version and the original function is that
- *  this function can make several assumptions about the state of things
- *  that allow for significant optimizations versus the standard function.
- *  As a result we can do things like drop a frag and maintain an accurate
- *  truesize for the skb.
- */
-static void igb_pull_tail(struct igb_ring *rx_ring,
-			  union e1000_adv_rx_desc *rx_desc,
-			  struct sk_buff *skb)
-{
-	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-	unsigned char *va;
-	unsigned int pull_len;
-
-	/* it is valid to use page_address instead of kmap since we are
-	 * working with pages allocated out of the lomem pool per
-	 * alloc_page(GFP_ATOMIC)
-	 */
-	va = skb_frag_address(frag);
-
-	if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-		/* retrieve timestamp from buffer */
-		igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
-
-		/* update pointers to remove timestamp header */
-		skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
-		frag->page_offset += IGB_TS_HDR_LEN;
-		skb->data_len -= IGB_TS_HDR_LEN;
-		skb->len -= IGB_TS_HDR_LEN;
-
-		/* move va to start of packet data */
-		va += IGB_TS_HDR_LEN;
-	}
-
-	/* we need the header to contain the greater of either ETH_HLEN or
-	 * 60 bytes if the skb->len is less than 60 for skb_pad.
-	 */
-	pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
-
-	/* align pull length to size of long to optimize memcpy performance */
-	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-	/* update all of the pointers */
-	skb_frag_size_sub(frag, pull_len);
-	frag->page_offset += pull_len;
-	skb->data_len -= pull_len;
-	skb->tail += pull_len;
-}
-
-/**
  *  igb_cleanup_headers - Correct corrupted or empty headers
  *  @rx_ring: rx descriptor ring packet is being transacted on
  *  @rx_desc: pointer to the EOP Rx descriptor
@@ -6873,10 +6851,6 @@
 		}
 	}
 
-	/* place header in linear portion of buffer */
-	if (skb_is_nonlinear(skb))
-		igb_pull_tail(rx_ring, rx_desc, skb);
-
 	/* if eth_skb_pad returns an error the skb was freed */
 	if (eth_skb_pad(skb))
 		return true;
@@ -7445,6 +7419,7 @@
 
 	if (igb_init_interrupt_scheme(adapter, true)) {
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+		rtnl_unlock();
 		return -ENOMEM;
 	}
 
@@ -7538,6 +7513,7 @@
 	igb_init_queue_configuration(adapter);
 
 	if (igb_init_interrupt_scheme(adapter, true)) {
+		rtnl_unlock();
 		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
 		return -ENOMEM;
 	}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index c3a9392c..5982f28 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -405,7 +405,7 @@
 	wr32(E1000_CTRL_EXT, ctrl_ext);
 }
 
-static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
+static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq)
 {
 	static const u32 aux0_sel_sdp[IGB_N_SDP] = {
 		AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
@@ -424,6 +424,14 @@
 		TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
 		TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
 	};
+	static const u32 ts_sdp_sel_fc0[IGB_N_SDP] = {
+		TS_SDP0_SEL_FC0, TS_SDP1_SEL_FC0,
+		TS_SDP2_SEL_FC0, TS_SDP3_SEL_FC0,
+	};
+	static const u32 ts_sdp_sel_fc1[IGB_N_SDP] = {
+		TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
+		TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
+	};
 	static const u32 ts_sdp_sel_clr[IGB_N_SDP] = {
 		TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
 		TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
@@ -445,11 +453,17 @@
 		tssdp &= ~AUX1_TS_SDP_EN;
 
 	tssdp &= ~ts_sdp_sel_clr[pin];
-	if (chan == 1)
-		tssdp |= ts_sdp_sel_tt1[pin];
-	else
-		tssdp |= ts_sdp_sel_tt0[pin];
-
+	if (freq) {
+		if (chan == 1)
+			tssdp |= ts_sdp_sel_fc1[pin];
+		else
+			tssdp |= ts_sdp_sel_fc0[pin];
+	} else {
+		if (chan == 1)
+			tssdp |= ts_sdp_sel_tt1[pin];
+		else
+			tssdp |= ts_sdp_sel_tt0[pin];
+	}
 	tssdp |= ts_sdp_en[pin];
 
 	wr32(E1000_TSSDP, tssdp);
@@ -463,10 +477,10 @@
 	struct igb_adapter *igb =
 		container_of(ptp, struct igb_adapter, ptp_caps);
 	struct e1000_hw *hw = &igb->hw;
-	u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
+	u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, freqout;
 	unsigned long flags;
 	struct timespec ts;
-	int pin = -1;
+	int use_freq = 0, pin = -1;
 	s64 ns;
 
 	switch (rq->type) {
@@ -511,40 +525,58 @@
 		ts.tv_nsec = rq->perout.period.nsec;
 		ns = timespec_to_ns(&ts);
 		ns = ns >> 1;
-		if (on && ns < 500000LL) {
-			/* 2k interrupts per second is an awful lot. */
-			return -EINVAL;
+		if (on && ns <= 70000000LL) {
+			if (ns < 8LL)
+				return -EINVAL;
+			use_freq = 1;
 		}
 		ts = ns_to_timespec(ns);
 		if (rq->perout.index == 1) {
-			tsauxc_mask = TSAUXC_EN_TT1;
-			tsim_mask = TSINTR_TT1;
+			if (use_freq) {
+				tsauxc_mask = TSAUXC_EN_CLK1 | TSAUXC_ST1;
+				tsim_mask = 0;
+			} else {
+				tsauxc_mask = TSAUXC_EN_TT1;
+				tsim_mask = TSINTR_TT1;
+			}
 			trgttiml = E1000_TRGTTIML1;
 			trgttimh = E1000_TRGTTIMH1;
+			freqout = E1000_FREQOUT1;
 		} else {
-			tsauxc_mask = TSAUXC_EN_TT0;
-			tsim_mask = TSINTR_TT0;
+			if (use_freq) {
+				tsauxc_mask = TSAUXC_EN_CLK0 | TSAUXC_ST0;
+				tsim_mask = 0;
+			} else {
+				tsauxc_mask = TSAUXC_EN_TT0;
+				tsim_mask = TSINTR_TT0;
+			}
 			trgttiml = E1000_TRGTTIML0;
 			trgttimh = E1000_TRGTTIMH0;
+			freqout = E1000_FREQOUT0;
 		}
 		spin_lock_irqsave(&igb->tmreg_lock, flags);
 		tsauxc = rd32(E1000_TSAUXC);
 		tsim = rd32(E1000_TSIM);
+		if (rq->perout.index == 1) {
+			tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1);
+			tsim &= ~TSINTR_TT1;
+		} else {
+			tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0);
+			tsim &= ~TSINTR_TT0;
+		}
 		if (on) {
 			int i = rq->perout.index;
-
-			igb_pin_perout(igb, i, pin);
+			igb_pin_perout(igb, i, pin, use_freq);
 			igb->perout[i].start.tv_sec = rq->perout.start.sec;
 			igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
 			igb->perout[i].period.tv_sec = ts.tv_sec;
 			igb->perout[i].period.tv_nsec = ts.tv_nsec;
 			wr32(trgttimh, rq->perout.start.sec);
 			wr32(trgttiml, rq->perout.start.nsec);
+			if (use_freq)
+				wr32(freqout, ns);
 			tsauxc |= tsauxc_mask;
 			tsim |= tsim_mask;
-		} else {
-			tsauxc &= ~tsauxc_mask;
-			tsim &= ~tsim_mask;
 		}
 		wr32(E1000_TSAUXC, tsauxc);
 		wr32(E1000_TSIM, tsim);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 95af14e..686fa71 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -319,6 +319,7 @@
 			dma_unmap_single(&pdev->dev, buffer_info->dma,
 					 adapter->rx_ps_hdr_size,
 					 DMA_FROM_DEVICE);
+			buffer_info->dma = 0;
 			skb_put(skb, hlen);
 		}
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index ac3ac2a..edf1fb9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -630,6 +630,7 @@
 #define IXGBE_FLAG_FCOE_ENABLED                 (u32)(1 << 21)
 #define IXGBE_FLAG_SRIOV_CAPABLE                (u32)(1 << 22)
 #define IXGBE_FLAG_SRIOV_ENABLED                (u32)(1 << 23)
+#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE	BIT(24)
 
 	u32 flags2;
 #define IXGBE_FLAG2_RSC_CAPABLE                 (u32)(1 << 0)
@@ -644,6 +645,9 @@
 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP		(u32)(1 << 9)
 #define IXGBE_FLAG2_PTP_PPS_ENABLED		(u32)(1 << 10)
 #define IXGBE_FLAG2_PHY_INTERRUPT		(u32)(1 << 11)
+#ifdef CONFIG_IXGBE_VXLAN
+#define IXGBE_FLAG2_VXLAN_REREG_NEEDED		BIT(12)
+#endif
 
 	/* Tx fast path data */
 	int num_tx_queues;
@@ -757,7 +761,9 @@
 	u32 timer_event_accumulator;
 	u32 vferr_refcount;
 	struct ixgbe_mac_addr *mac_table;
+#ifdef CONFIG_IXGBE_VXLAN
 	u16 vxlan_port;
+#endif
 	struct kobject *info_kobj;
 #ifdef CONFIG_IXGBE_HWMON
 	struct hwmon_buff *ixgbe_hwmon_buff;
@@ -967,4 +973,5 @@
 				  struct ixgbe_adapter *adapter,
 				  struct ixgbe_ring *tx_ring);
 u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter);
+void ixgbe_store_reta(struct ixgbe_adapter *adapter);
 #endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 6b87d96..dd7062f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -504,16 +504,12 @@
  **/
 static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
 {
-	u32 autoc2_reg, fwsm;
+	u32 autoc2_reg;
 	u16 ee_ctrl_2 = 0;
 
 	hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
 
-	/* Check to see if MNG FW could be enabled */
-	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
-
-	if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
-	    !hw->wol_enabled &&
+	if (!ixgbe_mng_present(hw) && !hw->wol_enabled &&
 	    ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
 		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
 		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
@@ -1246,6 +1242,25 @@
 }
 
 /**
+ * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete
+ * @hw: pointer to hardware structure
+ * @fdircmd: current value of FDIRCMD register
+ */
+static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd)
+{
+	int i;
+
+	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+		*fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+		if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+			return 0;
+		udelay(10);
+	}
+
+	return IXGBE_ERR_FDIR_CMD_INCOMPLETE;
+}
+
+/**
  *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
  *  @hw: pointer to hardware structure
  **/
@@ -1253,6 +1268,8 @@
 {
 	int i;
 	u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+	u32 fdircmd;
+	s32 err;
 
 	fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
 
@@ -1260,15 +1277,10 @@
 	 * Before starting reinitialization process,
 	 * FDIRCMD.CMD must be zero.
 	 */
-	for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
-		if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
-		      IXGBE_FDIRCMD_CMD_MASK))
-			break;
-		udelay(10);
-	}
-	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
-		hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n");
-		return IXGBE_ERR_FDIR_REINIT_FAILED;
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err) {
+		hw_dbg(hw, "Flow Director previous command did not complete, aborting table re-initialization.\n");
+		return err;
 	}
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
@@ -1394,14 +1406,12 @@
 	/*
 	 * Continue setup of fdirctrl register bits:
 	 *  Turn perfect match filtering on
-	 *  Report hash in RSS field of Rx wb descriptor
 	 *  Initialize the drop queue
 	 *  Move the flexible bytes to use the ethertype - shift 6 words
 	 *  Set the maximum length per hash bucket to 0xA filters
 	 *  Send interrupt when 64 (0x4 * 16) filters are left
 	 */
 	fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
-		    IXGBE_FDIRCTRL_REPORT_STATUS |
 		    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
 		    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
 		    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
@@ -1509,20 +1519,28 @@
  *  @input: unique input dword
  *  @common: compressed common input dword
  *  @queue: queue index to direct traffic to
+ *
+ * Note that the tunnel bit in input must not be set when the hardware
+ * tunneling support does not exist.
  **/
 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
 					  union ixgbe_atr_hash_dword input,
 					  union ixgbe_atr_hash_dword common,
 					  u8 queue)
 {
-	u64  fdirhashcmd;
-	u32  fdircmd;
+	u64 fdirhashcmd;
+	u8 flow_type;
+	bool tunnel;
+	u32 fdircmd;
 
 	/*
 	 * Get the flow_type in order to program FDIRCMD properly
 	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
 	 */
-	switch (input.formatted.flow_type) {
+	tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK);
+	flow_type = input.formatted.flow_type &
+		    (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1);
+	switch (flow_type) {
 	case IXGBE_ATR_FLOW_TYPE_TCPV4:
 	case IXGBE_ATR_FLOW_TYPE_UDPV4:
 	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
@@ -1538,8 +1556,10 @@
 	/* configure FDIRCMD register */
 	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
 		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
-	fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+	fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
 	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+	if (tunnel)
+		fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER;
 
 	/*
 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
@@ -1760,6 +1780,7 @@
 					  u16 soft_id, u8 queue)
 {
 	u32 fdirport, fdirvlan, fdirhash, fdircmd;
+	s32 err;
 
 	/* currently IPv6 is not supported, must be programmed with 0 */
 	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
@@ -1808,6 +1829,11 @@
 	fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err) {
+		hw_dbg(hw, "Flow Director command did not complete!\n");
+		return err;
+	}
 
 	return 0;
 }
@@ -1817,9 +1843,8 @@
 					  u16 soft_id)
 {
 	u32 fdirhash;
-	u32 fdircmd = 0;
-	u32 retry_count;
-	s32 err = 0;
+	u32 fdircmd;
+	s32 err;
 
 	/* configure FDIRHASH register */
 	fdirhash = input->formatted.bkt_hash;
@@ -1832,18 +1857,12 @@
 	/* Query if filter is present */
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
 
-	for (retry_count = 10; retry_count; retry_count--) {
-		/* allow 10us for query to process */
-		udelay(10);
-		/* verify query completed successfully */
-		fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
-		if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
-			break;
+	err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd);
+	if (err) {
+		hw_dbg(hw, "Flow Director command did not complete!\n");
+		return err;
 	}
 
-	if (!retry_count)
-		err = IXGBE_ERR_FDIR_REINIT_FAILED;
-
 	/* if filter exists in hardware then remove it */
 	if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
 		IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
@@ -1852,7 +1871,7 @@
 				IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
 	}
 
-	return err;
+	return 0;
 }
 
 /**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 4c1c267..3f56a80 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3905,3 +3905,18 @@
 		}
 	}
 }
+
+/** ixgbe_mng_present - returns true when management capability is present
+ * @hw: pointer to hardware structure
+ **/
+bool ixgbe_mng_present(struct ixgbe_hw *hw)
+{
+	u32 fwsm;
+
+	if (hw->mac.type < ixgbe_mac_82599EB)
+		return false;
+
+	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
+	fwsm &= IXGBE_FWSM_MODE_MASK;
+	return fwsm == IXGBE_FWSM_FW_MODE_PT;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index ec015fe..2f779f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -113,6 +113,7 @@
 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
 				 u32 length, u32 timeout, bool return_data);
 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+bool ixgbe_mng_present(struct ixgbe_hw *hw);
 bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
 
 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index ec7b232..ab2edc8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -166,6 +166,8 @@
 	/* set the supported link speeds */
 	if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
 		ecmd->supported |= SUPPORTED_10000baseT_Full;
+	if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
+		ecmd->supported |= SUPPORTED_2500baseX_Full;
 	if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
 		ecmd->supported |= SUPPORTED_1000baseT_Full;
 	if (supported_link & IXGBE_LINK_SPEED_100_FULL)
@@ -177,6 +179,8 @@
 			ecmd->advertising |= ADVERTISED_100baseT_Full;
 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
+		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
+			ecmd->advertising |= ADVERTISED_2500baseX_Full;
 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
 	} else {
@@ -286,6 +290,9 @@
 		case IXGBE_LINK_SPEED_10GB_FULL:
 			ethtool_cmd_speed_set(ecmd, SPEED_10000);
 			break;
+		case IXGBE_LINK_SPEED_2_5GB_FULL:
+			ethtool_cmd_speed_set(ecmd, SPEED_2500);
+			break;
 		case IXGBE_LINK_SPEED_1GB_FULL:
 			ethtool_cmd_speed_set(ecmd, SPEED_1000);
 			break;
@@ -2868,6 +2875,14 @@
 	return ret;
 }
 
+static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
+{
+	if (adapter->hw.mac.type < ixgbe_mac_X550)
+		return 16;
+	else
+		return 64;
+}
+
 static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2907,6 +2922,44 @@
 	return 0;
 }
 
+static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
+			  const u8 *key, const u8 hfunc)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	int i;
+	u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
+
+	if (hfunc)
+		return -EINVAL;
+
+	/* Fill out the redirection table */
+	if (indir) {
+		int max_queues = min_t(int, adapter->num_rx_queues,
+				       ixgbe_rss_indir_tbl_max(adapter));
+
+		/*Allow at least 2 queues w/ SR-IOV.*/
+		if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
+		    (max_queues < 2))
+			max_queues = 2;
+
+		/* Verify user input. */
+		for (i = 0; i < reta_entries; i++)
+			if (indir[i] >= max_queues)
+				return -EINVAL;
+
+		for (i = 0; i < reta_entries; i++)
+			adapter->rss_indir_tbl[i] = indir[i];
+	}
+
+	/* Fill out the rss hash key */
+	if (key)
+		memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev));
+
+	ixgbe_store_reta(adapter);
+
+	return 0;
+}
+
 static int ixgbe_get_ts_info(struct net_device *dev,
 			     struct ethtool_ts_info *info)
 {
@@ -2938,14 +2991,6 @@
 			(1 << HWTSTAMP_FILTER_NONE) |
 			(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
 			(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-			(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
 			(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 		break;
 	default:
@@ -3167,6 +3212,7 @@
 	.get_rxfh_indir_size	= ixgbe_rss_indir_size,
 	.get_rxfh_key_size	= ixgbe_get_rxfh_key_size,
 	.get_rxfh		= ixgbe_get_rxfh,
+	.set_rxfh		= ixgbe_set_rxfh,
 	.get_channels		= ixgbe_get_channels,
 	.set_channels		= ixgbe_set_channels,
 	.get_ts_info		= ixgbe_get_ts_info,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ae21e0b..63b2cfe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -65,6 +65,9 @@
 #include "ixgbe_common.h"
 #include "ixgbe_dcb_82599.h"
 #include "ixgbe_sriov.h"
+#ifdef CONFIG_IXGBE_VXLAN
+#include <net/vxlan.h>
+#endif
 
 char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
@@ -79,7 +82,7 @@
 #define DRV_VERSION "4.0.1-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
-				"Copyright (c) 1999-2014 Intel Corporation.";
+				"Copyright (c) 1999-2015 Intel Corporation.";
 
 static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
 
@@ -243,13 +246,20 @@
 static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
 				     int expected_gts)
 {
+	struct ixgbe_hw *hw = &adapter->hw;
 	int max_gts = 0;
 	enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
 	enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
 	struct pci_dev *pdev;
 
-	/* determine whether to use the the parent device
+	/* Some devices are not connected over PCIe and thus do not negotiate
+	 * speed. These devices do not have valid bus info, and thus any report
+	 * we generate may not be correct.
 	 */
+	if (hw->bus.type == ixgbe_bus_type_internal)
+		return;
+
+	/* determine whether to use the parent device */
 	if (ixgbe_pcie_from_parent(&adapter->hw))
 		pdev = adapter->pdev->bus->parent->self;
 	else
@@ -1360,14 +1370,31 @@
 }
 
 #endif /* CONFIG_IXGBE_DCA */
+
+#define IXGBE_RSS_L4_TYPES_MASK \
+	((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
 				 union ixgbe_adv_rx_desc *rx_desc,
 				 struct sk_buff *skb)
 {
-	if (ring->netdev->features & NETIF_F_RXHASH)
-		skb_set_hash(skb,
-			     le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
-			     PKT_HASH_TYPE_L3);
+	u16 rss_type;
+
+	if (!(ring->netdev->features & NETIF_F_RXHASH))
+		return;
+
+	rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+		   IXGBE_RXDADV_RSSTYPE_MASK;
+
+	if (!rss_type)
+		return;
+
+	skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+		     (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+		     PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
 }
 
 #ifdef IXGBE_FCOE
@@ -1414,7 +1441,6 @@
 	    (hdr_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_TUNNEL >> 16))) {
 		encap_pkt = true;
 		skb->encapsulation = 1;
-		skb->ip_summed = CHECKSUM_NONE;
 	}
 
 	/* if IP and error */
@@ -3287,7 +3313,7 @@
  *
  * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
  */
-static void ixgbe_store_reta(struct ixgbe_adapter *adapter)
+void ixgbe_store_reta(struct ixgbe_adapter *adapter)
 {
 	u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
 	struct ixgbe_hw *hw = &adapter->hw;
@@ -4245,6 +4271,21 @@
 	}
 }
 
+static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
+{
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_X550:
+	case ixgbe_mac_X550EM_x:
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
+#ifdef CONFIG_IXGBE_VXLAN
+		adapter->vxlan_port = 0;
+#endif
+		break;
+	default:
+		break;
+	}
+}
+
 #ifdef CONFIG_IXGBE_DCB
 /**
  * ixgbe_configure_dcb - Configure DCB hardware
@@ -5286,6 +5327,9 @@
 #ifdef CONFIG_IXGBE_DCA
 		adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
 #endif
+#ifdef CONFIG_IXGBE_VXLAN
+		adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
+#endif
 		break;
 	default:
 		break;
@@ -5737,10 +5781,11 @@
 
 	ixgbe_up_complete(adapter);
 
-#if IS_ENABLED(CONFIG_IXGBE_VXLAN)
+	ixgbe_clear_vxlan_port(adapter);
+#ifdef CONFIG_IXGBE_VXLAN
 	vxlan_get_rx_port(netdev);
-
 #endif
+
 	return 0;
 
 err_set_queues:
@@ -5761,7 +5806,15 @@
 {
 	ixgbe_ptp_suspend(adapter);
 
-	ixgbe_down(adapter);
+	if (adapter->hw.phy.ops.enter_lplu) {
+		adapter->hw.phy.reset_disable = true;
+		ixgbe_down(adapter);
+		adapter->hw.phy.ops.enter_lplu(&adapter->hw);
+		adapter->hw.phy.reset_disable = false;
+	} else {
+		ixgbe_down(adapter);
+	}
+
 	ixgbe_free_irq(adapter);
 
 	ixgbe_free_all_tx_resources(adapter);
@@ -6327,6 +6380,7 @@
 	struct net_device *upper;
 	struct list_head *iter;
 	u32 link_speed = adapter->link_speed;
+	const char *speed_str;
 	bool flow_rx, flow_tx;
 
 	/* only continue if link was previously down */
@@ -6364,14 +6418,24 @@
 	if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
 		ixgbe_ptp_start_cyclecounter(adapter);
 
-	e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
-	       (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
-	       "10 Gbps" :
-	       (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
-	       "1 Gbps" :
-	       (link_speed == IXGBE_LINK_SPEED_100_FULL ?
-	       "100 Mbps" :
-	       "unknown speed"))),
+	switch (link_speed) {
+	case IXGBE_LINK_SPEED_10GB_FULL:
+		speed_str = "10 Gbps";
+		break;
+	case IXGBE_LINK_SPEED_2_5GB_FULL:
+		speed_str = "2.5 Gbps";
+		break;
+	case IXGBE_LINK_SPEED_1GB_FULL:
+		speed_str = "1 Gbps";
+		break;
+	case IXGBE_LINK_SPEED_100_FULL:
+		speed_str = "100 Mbps";
+		break;
+	default:
+		speed_str = "unknown speed";
+		break;
+	}
+	e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
 	       ((flow_rx && flow_tx) ? "RX/TX" :
 	       (flow_rx ? "RX" :
 	       (flow_tx ? "TX" : "None"))));
@@ -6800,6 +6864,12 @@
 		ixgbe_service_event_complete(adapter);
 		return;
 	}
+#ifdef CONFIG_IXGBE_VXLAN
+	if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
+		adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
+		vxlan_get_rx_port(adapter->netdev);
+	}
+#endif /* CONFIG_IXGBE_VXLAN */
 	ixgbe_reset_subtask(adapter);
 	ixgbe_phy_interrupt_subtask(adapter);
 	ixgbe_sfp_detection_subtask(adapter);
@@ -6896,31 +6966,55 @@
 		if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
 		    !(first->tx_flags & IXGBE_TX_FLAGS_CC))
 			return;
+		vlan_macip_lens = skb_network_offset(skb) <<
+				  IXGBE_ADVTXD_MACLEN_SHIFT;
 	} else {
 		u8 l4_hdr = 0;
-		switch (first->protocol) {
-		case htons(ETH_P_IP):
-			vlan_macip_lens |= skb_network_header_len(skb);
+		union {
+			struct iphdr *ipv4;
+			struct ipv6hdr *ipv6;
+			u8 *raw;
+		} network_hdr;
+		union {
+			struct tcphdr *tcphdr;
+			u8 *raw;
+		} transport_hdr;
+
+		if (skb->encapsulation) {
+			network_hdr.raw = skb_inner_network_header(skb);
+			transport_hdr.raw = skb_inner_transport_header(skb);
+			vlan_macip_lens = skb_inner_network_offset(skb) <<
+					  IXGBE_ADVTXD_MACLEN_SHIFT;
+		} else {
+			network_hdr.raw = skb_network_header(skb);
+			transport_hdr.raw = skb_transport_header(skb);
+			vlan_macip_lens = skb_network_offset(skb) <<
+					  IXGBE_ADVTXD_MACLEN_SHIFT;
+		}
+
+		/* use first 4 bits to determine IP version */
+		switch (network_hdr.ipv4->version) {
+		case IPVERSION:
+			vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
 			type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
-			l4_hdr = ip_hdr(skb)->protocol;
+			l4_hdr = network_hdr.ipv4->protocol;
 			break;
-		case htons(ETH_P_IPV6):
-			vlan_macip_lens |= skb_network_header_len(skb);
-			l4_hdr = ipv6_hdr(skb)->nexthdr;
+		case 6:
+			vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
+			l4_hdr = network_hdr.ipv6->nexthdr;
 			break;
 		default:
 			if (unlikely(net_ratelimit())) {
 				dev_warn(tx_ring->dev,
-				 "partial checksum but proto=%x!\n",
-				 first->protocol);
+					 "partial checksum but version=%d\n",
+					 network_hdr.ipv4->version);
 			}
-			break;
 		}
 
 		switch (l4_hdr) {
 		case IPPROTO_TCP:
 			type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
-			mss_l4len_idx = tcp_hdrlen(skb) <<
+			mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
 					IXGBE_ADVTXD_L4LEN_SHIFT;
 			break;
 		case IPPROTO_SCTP:
@@ -6946,7 +7040,6 @@
 	}
 
 	/* vlan_macip_lens: MACLEN, VLAN tag */
-	vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 
 	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
@@ -7201,6 +7294,10 @@
 		struct ipv6hdr *ipv6;
 	} hdr;
 	struct tcphdr *th;
+	struct sk_buff *skb;
+#ifdef CONFIG_IXGBE_VXLAN
+	u8 encap = false;
+#endif /* CONFIG_IXGBE_VXLAN */
 	__be16 vlan_id;
 
 	/* if ring doesn't have a interrupt vector, cannot perform ATR */
@@ -7214,16 +7311,36 @@
 	ring->atr_count++;
 
 	/* snag network header to get L4 type and address */
-	hdr.network = skb_network_header(first->skb);
+	skb = first->skb;
+	hdr.network = skb_network_header(skb);
+	if (skb->encapsulation) {
+#ifdef CONFIG_IXGBE_VXLAN
+		struct ixgbe_adapter *adapter = q_vector->adapter;
 
-	/* Currently only IPv4/IPv6 with TCP is supported */
-	if ((first->protocol != htons(ETH_P_IPV6) ||
-	     hdr.ipv6->nexthdr != IPPROTO_TCP) &&
-	    (first->protocol != htons(ETH_P_IP) ||
-	     hdr.ipv4->protocol != IPPROTO_TCP))
+		if (!adapter->vxlan_port)
+			return;
+		if (first->protocol != htons(ETH_P_IP) ||
+		    hdr.ipv4->version != IPVERSION ||
+		    hdr.ipv4->protocol != IPPROTO_UDP) {
+			return;
+		}
+		if (ntohs(udp_hdr(skb)->dest) != adapter->vxlan_port)
+			return;
+		encap = true;
+		hdr.network = skb_inner_network_header(skb);
+		th = inner_tcp_hdr(skb);
+#else
 		return;
-
-	th = tcp_hdr(first->skb);
+#endif /* CONFIG_IXGBE_VXLAN */
+	} else {
+		/* Currently only IPv4/IPv6 with TCP is supported */
+		if ((first->protocol != htons(ETH_P_IPV6) ||
+		     hdr.ipv6->nexthdr != IPPROTO_TCP) &&
+		    (first->protocol != htons(ETH_P_IP) ||
+		     hdr.ipv4->protocol != IPPROTO_TCP))
+			return;
+		th = tcp_hdr(skb);
+	}
 
 	/* skip this packet since it is invalid or the socket is closing */
 	if (!th || th->fin)
@@ -7272,6 +7389,11 @@
 			     hdr.ipv6->daddr.s6_addr32[3];
 	}
 
+#ifdef CONFIG_IXGBE_VXLAN
+	if (encap)
+		input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
+#endif /* CONFIG_IXGBE_VXLAN */
+
 	/* This assumes the Rx queue and Tx queue are bound to the same CPU */
 	ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
 					      input, common, ring->queue_index);
@@ -7737,9 +7859,10 @@
 	bool pools;
 
 	/* Hardware supports up to 8 traffic classes */
-	if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
-	    (hw->mac.type == ixgbe_mac_82598EB &&
-	     tc < MAX_TRAFFIC_CLASS))
+	if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
+		return -EINVAL;
+
+	if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
 		return -EINVAL;
 
 	pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
@@ -7898,12 +8021,23 @@
 		need_reset = true;
 
 	netdev->features = features;
+
+#ifdef CONFIG_IXGBE_VXLAN
+	if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
+		if (features & NETIF_F_RXCSUM)
+			adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
+		else
+			ixgbe_clear_vxlan_port(adapter);
+	}
+#endif /* CONFIG_IXGBE_VXLAN */
+
 	if (need_reset)
 		ixgbe_do_reset(netdev);
 
 	return 0;
 }
 
+#ifdef CONFIG_IXGBE_VXLAN
 /**
  * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
  * @dev: The port's netdev
@@ -7917,17 +8051,18 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	u16 new_port = ntohs(port);
 
+	if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+		return;
+
 	if (sa_family == AF_INET6)
 		return;
 
-	if (adapter->vxlan_port == new_port) {
-		netdev_info(dev, "Port %d already offloaded\n", new_port);
+	if (adapter->vxlan_port == new_port)
 		return;
-	}
 
 	if (adapter->vxlan_port) {
 		netdev_info(dev,
-			    "Hit Max num of UDP ports, not adding port %d\n",
+			    "Hit Max num of VXLAN ports, not adding port %d\n",
 			    new_port);
 		return;
 	}
@@ -7946,9 +8081,11 @@
 				 __be16 port)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
-	struct ixgbe_hw *hw = &adapter->hw;
 	u16 new_port = ntohs(port);
 
+	if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+		return;
+
 	if (sa_family == AF_INET6)
 		return;
 
@@ -7958,9 +8095,10 @@
 		return;
 	}
 
-	adapter->vxlan_port = 0;
-	IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, 0);
+	ixgbe_clear_vxlan_port(adapter);
+	adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
 }
+#endif /* CONFIG_IXGBE_VXLAN */
 
 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 			     struct net_device *dev,
@@ -8135,7 +8273,7 @@
 	    (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
 		return ERR_PTR(-EBUSY);
 
-	fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL);
+	fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
 	if (!fwd_adapter)
 		return ERR_PTR(-ENOMEM);
 
@@ -8191,6 +8329,21 @@
 	kfree(fwd_adapter);
 }
 
+#define IXGBE_MAX_TUNNEL_HDR_LEN 80
+static netdev_features_t
+ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
+		     netdev_features_t features)
+{
+	if (!skb->encapsulation)
+		return features;
+
+	if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
+		     IXGBE_MAX_TUNNEL_HDR_LEN))
+		return features & ~NETIF_F_ALL_CSUM;
+
+	return features;
+}
+
 static const struct net_device_ops ixgbe_netdev_ops = {
 	.ndo_open		= ixgbe_open,
 	.ndo_stop		= ixgbe_close,
@@ -8236,8 +8389,11 @@
 	.ndo_bridge_getlink	= ixgbe_ndo_bridge_getlink,
 	.ndo_dfwd_add_station	= ixgbe_fwd_add,
 	.ndo_dfwd_del_station	= ixgbe_fwd_del,
+#ifdef CONFIG_IXGBE_VXLAN
 	.ndo_add_vxlan_port	= ixgbe_add_vxlan_port,
 	.ndo_del_vxlan_port	= ixgbe_del_vxlan_port,
+#endif /* CONFIG_IXGBE_VXLAN */
+	.ndo_features_check	= ixgbe_features_check,
 };
 
 /**
@@ -8597,17 +8753,24 @@
 	netdev->vlan_features |= NETIF_F_IPV6_CSUM;
 	netdev->vlan_features |= NETIF_F_SG;
 
+	netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+				   NETIF_F_IPV6_CSUM;
+
 	netdev->priv_flags |= IFF_UNICAST_FLT;
 	netdev->priv_flags |= IFF_SUPP_NOFCS;
 
+#ifdef CONFIG_IXGBE_VXLAN
 	switch (adapter->hw.mac.type) {
 	case ixgbe_mac_X550:
 	case ixgbe_mac_X550EM_x:
-		netdev->hw_enc_features |= NETIF_F_RXCSUM;
+		netdev->hw_enc_features |= NETIF_F_RXCSUM |
+					   NETIF_F_IP_CSUM |
+					   NETIF_F_IPV6_CSUM;
 		break;
 	default:
 		break;
 	}
+#endif /* CONFIG_IXGBE_VXLAN */
 
 #ifdef CONFIG_IXGBE_DCB
 	netdev->dcbnl_ops = &dcbnl_ops;
@@ -8694,9 +8857,10 @@
 	hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
 
 	/* pick up the PCI bus settings for reporting later */
-	hw->mac.ops.get_bus_info(hw);
 	if (ixgbe_pcie_from_parent(hw))
 		ixgbe_get_parent_bus_info(adapter);
+	else
+		 hw->mac.ops.get_bus_info(hw);
 
 	/* calculate the expected PCIe bandwidth required for optimal
 	 * performance. Note that some older parts will never have enough
@@ -8859,12 +9023,7 @@
 		unregister_netdev(netdev);
 
 #ifdef CONFIG_PCI_IOV
-	/*
-	 * Only disable SR-IOV on unload if the user specified the now
-	 * deprecated max_vfs module parameter.
-	 */
-	if (max_vfs)
-		ixgbe_disable_sriov(adapter);
+	ixgbe_disable_sriov(adapter);
 #endif
 	ixgbe_clear_interrupt_scheme(adapter);
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 526a20b..597d0b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -243,9 +243,7 @@
 	u16 ext_ability = 0;
 
 	if (!hw->phy.phy_semaphore_mask) {
-		hw->phy.lan_id = IXGBE_READ_REG(hw, IXGBE_STATUS) &
-				 IXGBE_STATUS_LAN_ID_1;
-		if (hw->phy.lan_id)
+		if (hw->bus.lan_id)
 			hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
 		else
 			hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
@@ -608,12 +606,7 @@
 				u32 device_type, u16 phy_data)
 {
 	s32 status;
-	u32 gssr;
-
-	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
-		gssr = IXGBE_GSSR_PHY1_SM;
-	else
-		gssr = IXGBE_GSSR_PHY0_SM;
+	u32 gssr = hw->phy.phy_semaphore_mask;
 
 	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
 		status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
@@ -737,39 +730,61 @@
 }
 
 /**
+ * ixgbe_get_copper_speeds_supported - Get copper link speed from phy
+ * @hw: pointer to hardware structure
+ *
+ * Determines the supported link capabilities by reading the PHY auto
+ * negotiation register.
+ */
+static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
+{
+	u16 speed_ability;
+	s32 status;
+
+	status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
+				      &speed_ability);
+	if (status)
+		return status;
+
+	if (speed_ability & MDIO_SPEED_10G)
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+	if (speed_ability & MDIO_PMA_SPEED_1000)
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+	if (speed_ability & MDIO_PMA_SPEED_100)
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_X550:
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+		break;
+	case ixgbe_mac_X550EM_x:
+		hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
  * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
  * @hw: pointer to hardware structure
  * @speed: pointer to link speed
  * @autoneg: boolean auto-negotiation value
- *
- * Determines the link capabilities by reading the AUTOC register.
  */
 s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
 					       ixgbe_link_speed *speed,
 					       bool *autoneg)
 {
-	s32 status;
-	u16 speed_ability;
+	s32 status = 0;
 
-	*speed = 0;
 	*autoneg = true;
+	if (!hw->phy.speeds_supported)
+		status = ixgbe_get_copper_speeds_supported(hw);
 
-	status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
-				      &speed_ability);
-
-	if (status == 0) {
-		if (speed_ability & MDIO_SPEED_10G)
-			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (speed_ability & MDIO_PMA_SPEED_1000)
-			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		if (speed_ability & MDIO_PMA_SPEED_100)
-			*speed |= IXGBE_LINK_SPEED_100_FULL;
-	}
-
-	/* Internal PHY does not support 100 Mbps */
-	if (hw->mac.type == ixgbe_mac_X550EM_x)
-		*speed &= ~IXGBE_LINK_SPEED_100_FULL;
-
+	*speed = hw->phy.speeds_supported;
 	return status;
 }
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index b6f424f..6368919 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2014 Intel Corporation.
+  Copyright(c) 1999 - 2015 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -848,6 +848,7 @@
 #define IXGBE_MDIO_AUTO_NEG_LINK_STATUS	0x4 /* Indicates if link is up */
 
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK	0x7 /* Speed/Duplex Mask */
+#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK	0x6 /* Speed Mask */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s H Duplex */
@@ -856,6 +857,24 @@
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB	0x4 /* 1Gb/s */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB	0x6 /* 10Gb/s */
+
+#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG	0x20	/* 10G Control Reg */
+#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400	/* 1G Provisioning 1 */
+#define IXGBE_MII_AUTONEG_XNP_TX_REG		0x17	/* 1G XNP Transmit */
+#define IXGBE_MII_AUTONEG_ADVERTISE_REG		0x10	/* 100M Advertisement */
+#define IXGBE_MII_10GBASE_T_ADVERTISE		0x1000	/* full duplex, bit:12*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX	0x4000	/* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE		0x8000	/* full duplex, bit:15*/
+#define IXGBE_MII_2_5GBASE_T_ADVERTISE		0x0400
+#define IXGBE_MII_5GBASE_T_ADVERTISE		0x0800
+#define IXGBE_MII_100BASE_T_ADVERTISE		0x0100	/* full duplex, bit:8 */
+#define IXGBE_MII_100BASE_T_ADVERTISE_HALF	0x0080	/* half duplex, bit:7 */
+#define IXGBE_MII_RESTART			0x200
+#define IXGBE_MII_AUTONEG_COMPLETE		0x20
+#define IXGBE_MII_AUTONEG_LINK_UP		0x04
+#define IXGBE_MII_AUTONEG_REG			0x0
 
 /* Management */
 #define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -1305,6 +1324,7 @@
 #define IXGBE_MDIO_AUTO_NEG_CONTROL	0x0 /* AUTO_NEG Control Reg */
 #define IXGBE_MDIO_AUTO_NEG_STATUS	0x1 /* AUTO_NEG Status Reg */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT	0xC800 /* AUTO_NEG Vendor Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM  0xCC00 /* AUTO_NEG Vendor TX Reg */
 #define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */
 #define IXGBE_MDIO_AUTO_NEG_VEN_LSC	0x1 /* AUTO_NEG Vendor Tx LSC */
 #define IXGBE_MDIO_AUTO_NEG_ADVT	0x10 /* AUTO_NEG Advt Reg */
@@ -1312,7 +1332,8 @@
 #define IXGBE_MDIO_AUTO_NEG_EEE_ADVT	0x3C /* AUTO_NEG EEE Advt Reg */
 
 #define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE	 0x0800 /* Set low power mode */
-
+#define IXGBE_AUTO_NEG_LP_STATUS	0xE820 /* AUTO NEG Rx LP Status Reg */
+#define IXGBE_AUTO_NEG_LP_1000BASE_CAP	0x8000 /* AUTO NEG Rx LP 1000BaseT */
 #define IXGBE_MDIO_TX_VENDOR_ALARMS_3	0xCC02 /* Vendor Alarms 3 Reg */
 #define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */
 #define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */
@@ -2041,6 +2062,11 @@
 #define IXGBE_NVM_POLL_WRITE       1  /* Flag for polling for write complete */
 #define IXGBE_NVM_POLL_READ        0  /* Flag for polling for read complete */
 
+#define NVM_INIT_CTRL_3			0x38
+#define NVM_INIT_CTRL_3_LPLU		0x8
+#define NVM_INIT_CTRL_3_D10GMP_PORT0	0x40
+#define NVM_INIT_CTRL_3_D10GMP_PORT1	0x100
+
 #define IXGBE_EEPROM_PAGE_SIZE_MAX       128
 #define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
 #define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
@@ -2540,9 +2566,11 @@
 #define IXGBE_FDIRCMD_QUEUE_EN                  0x00008000
 #define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT           5
 #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT            16
+#define IXGBE_FDIRCMD_RX_TUNNEL_FILTER_SHIFT	23
 #define IXGBE_FDIRCMD_VT_POOL_SHIFT             24
 #define IXGBE_FDIR_INIT_DONE_POLL               10
 #define IXGBE_FDIRCMD_CMD_POLL                  10
+#define IXGBE_FDIRCMD_TUNNEL_FILTER		0x00800000
 
 #define IXGBE_FDIR_DROP_QUEUE                   127
 
@@ -2833,12 +2861,13 @@
 #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
 
 /* Software ATR input stream values and masks */
-#define IXGBE_ATR_HASH_MASK     0x7fff
-#define IXGBE_ATR_L4TYPE_MASK      0x3
-#define IXGBE_ATR_L4TYPE_UDP       0x1
-#define IXGBE_ATR_L4TYPE_TCP       0x2
-#define IXGBE_ATR_L4TYPE_SCTP      0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define IXGBE_ATR_HASH_MASK		0x7fff
+#define IXGBE_ATR_L4TYPE_MASK		0x3
+#define IXGBE_ATR_L4TYPE_UDP		0x1
+#define IXGBE_ATR_L4TYPE_TCP		0x2
+#define IXGBE_ATR_L4TYPE_SCTP		0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK	0x4
+#define IXGBE_ATR_L4TYPE_TUNNEL_MASK	0x10
 enum ixgbe_atr_flow_type {
 	IXGBE_ATR_FLOW_TYPE_IPV4   = 0x0,
 	IXGBE_ATR_FLOW_TYPE_UDPV4  = 0x1,
@@ -3035,9 +3064,8 @@
 /* PCI bus types */
 enum ixgbe_bus_type {
 	ixgbe_bus_type_unknown = 0,
-	ixgbe_bus_type_pci,
-	ixgbe_bus_type_pcix,
 	ixgbe_bus_type_pci_express,
+	ixgbe_bus_type_internal,
 	ixgbe_bus_type_reserved
 };
 
@@ -3298,6 +3326,7 @@
 	s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val);
 	s32 (*check_overtemp)(struct ixgbe_hw *);
 	s32 (*set_phy_power)(struct ixgbe_hw *, bool on);
+	s32 (*enter_lplu)(struct ixgbe_hw *);
 	s32 (*handle_lasi)(struct ixgbe_hw *hw);
 };
 
@@ -3308,6 +3337,7 @@
 	u16                             word_size;
 	u16                             address_bits;
 	u16                             word_page_size;
+	u16				ctrl_word_3;
 };
 
 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED	0x01
@@ -3351,10 +3381,10 @@
 	bool                            sfp_setup_needed;
 	u32                             revision;
 	enum ixgbe_media_type           media_type;
-	u8				lan_id;
 	u32				phy_semaphore_mask;
 	bool                            reset_disable;
 	ixgbe_autoneg_advertised        autoneg_advertised;
+	ixgbe_link_speed		speeds_supported;
 	enum ixgbe_smart_speed          smart_speed;
 	bool                            smart_speed_active;
 	bool                            multispeed_fiber;
@@ -3460,16 +3490,21 @@
 #define IXGBE_ERR_PBA_SECTION                   -31
 #define IXGBE_ERR_INVALID_ARGUMENT              -32
 #define IXGBE_ERR_HOST_INTERFACE_COMMAND        -33
+#define IXGBE_ERR_FDIR_CMD_INCOMPLETE		-38
 #define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
 
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P)	((P == 0) ? (0x4010) : (0x8010))
-#define IXGBE_KRM_LINK_CTRL_1(P)	((P == 0) ? (0x420C) : (0x820C))
-#define IXGBE_KRM_DSP_TXFFE_STATE_4(P)	((P == 0) ? (0x4634) : (0x8634))
-#define IXGBE_KRM_DSP_TXFFE_STATE_5(P)	((P == 0) ? (0x4638) : (0x8638))
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P)	((P == 0) ? (0x4B00) : (0x8B00))
-#define IXGBE_KRM_PMD_DFX_BURNIN(P)	((P == 0) ? (0x4E00) : (0x8E00))
-#define IXGBE_KRM_TX_COEFF_CTRL_1(P)	((P == 0) ? (0x5520) : (0x9520))
-#define IXGBE_KRM_RX_ANA_CTL(P)		((P == 0) ? (0x5A00) : (0x9A00))
+#define IXGBE_FUSES0_GROUP(_i)		(0x11158 + ((_i) * 4))
+#define IXGBE_FUSES0_300MHZ		BIT(5)
+#define IXGBE_FUSES0_REV1		BIT(6)
+
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P)	((P) ? 0x8010 : 0x4010)
+#define IXGBE_KRM_LINK_CTRL_1(P)	((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_DSP_TXFFE_STATE_4(P)	((P) ? 0x8634 : 0x4634)
+#define IXGBE_KRM_DSP_TXFFE_STATE_5(P)	((P) ? 0x8638 : 0x4638)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P)	((P) ? 0x8B00 : 0x4B00)
+#define IXGBE_KRM_PMD_DFX_BURNIN(P)	((P) ? 0x8E00 : 0x4E00)
+#define IXGBE_KRM_TX_COEFF_CTRL_1(P)	((P) ? 0x9520 : 0x5520)
+#define IXGBE_KRM_RX_ANA_CTL(P)		((P) ? 0x9A00 : 0x5A00)
 
 #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B		(1 << 9)
 #define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS		(1 << 11)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 032a587..4e75843 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -54,6 +54,11 @@
 s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
 {
 	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_phy_info *phy = &hw->phy;
+
+	/* set_phy_power was set by default to NULL */
+	if (!ixgbe_mng_present(hw))
+		phy->ops.set_phy_power = ixgbe_set_copper_phy_power;
 
 	mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
 	mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 7581da1..9fe9445 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -26,6 +26,20 @@
 #include "ixgbe_common.h"
 #include "ixgbe_phy.h"
 
+static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+	struct ixgbe_phy_info *phy = &hw->phy;
+
+	/* Start with X540 invariants, since so simular */
+	ixgbe_get_invariants_X540(hw);
+
+	if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
+		phy->ops.set_phy_power = NULL;
+
+	return 0;
+}
+
 /** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
  *  @hw: pointer to hardware structure
  **/
@@ -597,6 +611,24 @@
 	return status;
 }
 
+/**
+ * ixgbe_get_bus_info_X550em - Set PCI bus info
+ * @hw: pointer to hardware structure
+ *
+ * Sets bus link width and speed to unknown because X550em is
+ * not a PCI device.
+ **/
+static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw)
+{
+	hw->bus.type  = ixgbe_bus_type_internal;
+	hw->bus.width = ixgbe_bus_width_unknown;
+	hw->bus.speed = ixgbe_bus_speed_unknown;
+
+	hw->mac.ops.set_lan_id(hw);
+
+	return 0;
+}
+
 /** ixgbe_disable_rx_x550 - Disable RX unit
  *
  *  Enables the Rx DMA unit for x550
@@ -1444,6 +1476,144 @@
 	return ixgbe_enable_lasi_ext_t_x550em(hw);
 }
 
+/** ixgbe_get_lcd_x550em - Determine lowest common denominator
+ *  @hw: pointer to hardware structure
+ *  @lcd_speed: pointer to lowest common link speed
+ *
+ *  Determine lowest common link speed with link partner.
+ **/
+static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
+				  ixgbe_link_speed *lcd_speed)
+{
+	u16 an_lp_status;
+	s32 status;
+	u16 word = hw->eeprom.ctrl_word_3;
+
+	*lcd_speed = IXGBE_LINK_SPEED_UNKNOWN;
+
+	status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &an_lp_status);
+	if (status)
+		return status;
+
+	/* If link partner advertised 1G, return 1G */
+	if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) {
+		*lcd_speed = IXGBE_LINK_SPEED_1GB_FULL;
+		return status;
+	}
+
+	/* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */
+	if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) ||
+	    (word & NVM_INIT_CTRL_3_D10GMP_PORT0))
+		return status;
+
+	/* Link partner not capable of lower speeds, return 10G */
+	*lcd_speed = IXGBE_LINK_SPEED_10GB_FULL;
+	return status;
+}
+
+/** ixgbe_enter_lplu_x550em - Transition to low power states
+ *  @hw: pointer to hardware structure
+ *
+ *  Configures Low Power Link Up on transition to low power states
+ *  (from D0 to non-D0). Link is required to enter LPLU so avoid resetting
+ *  the X557 PHY immediately prior to entering LPLU.
+ **/
+static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
+{
+	u16 an_10g_cntl_reg, autoneg_reg, speed;
+	s32 status;
+	ixgbe_link_speed lcd_speed;
+	u32 save_autoneg;
+	bool link_up;
+
+	/* SW LPLU not required on later HW revisions. */
+	if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))
+		return 0;
+
+	/* If blocked by MNG FW, then don't restart AN */
+	if (ixgbe_check_reset_blocked(hw))
+		return 0;
+
+	status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+	if (status)
+		return status;
+
+	status = hw->eeprom.ops.read(hw, NVM_INIT_CTRL_3,
+				     &hw->eeprom.ctrl_word_3);
+	if (status)
+		return status;
+
+	/* If link is down, LPLU disabled in NVM, WoL disabled, or
+	 * manageability disabled, then force link down by entering
+	 * low power mode.
+	 */
+	if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) ||
+	    !(hw->wol_enabled || ixgbe_mng_present(hw)))
+		return ixgbe_set_copper_phy_power(hw, false);
+
+	/* Determine LCD */
+	status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed);
+	if (status)
+		return status;
+
+	/* If no valid LCD link speed, then force link down and exit. */
+	if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN)
+		return ixgbe_set_copper_phy_power(hw, false);
+
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &speed);
+	if (status)
+		return status;
+
+	/* If no link now, speed is invalid so take link down */
+	status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
+	if (status)
+		return ixgbe_set_copper_phy_power(hw, false);
+
+	/* clear everything but the speed bits */
+	speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK;
+
+	/* If current speed is already LCD, then exit. */
+	if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) &&
+	     (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) ||
+	    ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) &&
+	     (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL)))
+		return status;
+
+	/* Clear AN completed indication */
+	status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &autoneg_reg);
+	if (status)
+		return status;
+
+	status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &an_10g_cntl_reg);
+	if (status)
+		return status;
+
+	status = hw->phy.ops.read_reg(hw,
+				      IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+				      &autoneg_reg);
+	if (status)
+		return status;
+
+	save_autoneg = hw->phy.autoneg_advertised;
+
+	/* Setup link at least common link speed */
+	status = hw->mac.ops.setup_link(hw, lcd_speed, false);
+
+	/* restore autoneg from before setting lplu speed */
+	hw->phy.autoneg_advertised = save_autoneg;
+
+	return status;
+}
+
 /** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
  *  @hw: pointer to hardware structure
  *
@@ -1514,6 +1684,11 @@
 			ret_val = ixgbe_setup_kr_speed_x550em(hw, speed);
 		}
 
+		/* setup SW LPLU only for first revision */
+		if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw,
+							IXGBE_FUSES0_GROUP(0))))
+			phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
+
 		phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
 		phy->ops.reset = ixgbe_reset_phy_t_X550em;
 		break;
@@ -1760,7 +1935,6 @@
 	.get_mac_addr			= &ixgbe_get_mac_addr_generic, \
 	.get_device_caps		= &ixgbe_get_device_caps_generic, \
 	.stop_adapter			= &ixgbe_stop_adapter_generic, \
-	.get_bus_info			= &ixgbe_get_bus_info_generic, \
 	.set_lan_id			= &ixgbe_set_lan_id_multi_port_pcie, \
 	.read_analog_reg8		= NULL, \
 	.write_analog_reg8		= NULL, \
@@ -1809,6 +1983,7 @@
 	.get_wwn_prefix		= &ixgbe_get_wwn_prefix_generic,
 	.setup_link		= &ixgbe_setup_mac_link_X540,
 	.get_link_capabilities	= &ixgbe_get_copper_link_capabilities_generic,
+	.get_bus_info		= &ixgbe_get_bus_info_generic,
 	.setup_sfp		= NULL,
 };
 
@@ -1820,6 +1995,7 @@
 	.get_wwn_prefix		= NULL,
 	.setup_link		= NULL, /* defined later */
 	.get_link_capabilities	= &ixgbe_get_link_capabilities_X550em,
+	.get_bus_info		= &ixgbe_get_bus_info_X550em,
 	.setup_sfp		= ixgbe_setup_sfp_modules_X550em,
 
 };
@@ -1855,7 +2031,7 @@
 	.read_reg		= &ixgbe_read_phy_reg_generic, \
 	.write_reg		= &ixgbe_write_phy_reg_generic, \
 	.setup_link		= &ixgbe_setup_phy_link_generic, \
-	.set_phy_power		= &ixgbe_set_copper_phy_power, \
+	.set_phy_power		= NULL, \
 	.check_overtemp		= &ixgbe_tn_check_overtemp, \
 	.get_firmware_version	= &ixgbe_get_phy_firmware_version_generic,
 
@@ -1893,7 +2069,7 @@
 
 struct ixgbe_info ixgbe_X550EM_x_info = {
 	.mac			= ixgbe_mac_X550EM_x,
-	.get_invariants		= &ixgbe_get_invariants_X540,
+	.get_invariants		= &ixgbe_get_invariants_X550_x,
 	.mac_ops		= &mac_ops_X550EM_x,
 	.eeprom_ops		= &eeprom_ops_X550EM_x,
 	.phy_ops		= &phy_ops_X550EM_x,
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 770e21a..5843458 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -161,6 +161,18 @@
 #define IXGBE_RXDADV_SPLITHEADER_EN	0x00001000
 #define IXGBE_RXDADV_SPH		0x8000
 
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE		0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP		0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4		0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP		0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX		0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6		0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX	0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP		0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP		0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX	0x00000009
+
 #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
 				      IXGBE_RXD_ERR_CE |  \
 				      IXGBE_RXD_ERR_LE |  \
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index b2f5b16..d3e5f5b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -813,22 +813,15 @@
 {
 	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
-	/* We support this operation only for 82599 and x540 at the moment */
-	if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
-		return IXGBEVF_82599_RETA_SIZE;
+	if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
+		return IXGBEVF_X550_VFRETA_SIZE;
 
-	return 0;
+	return IXGBEVF_82599_RETA_SIZE;
 }
 
 static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
 {
-	struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
-	/* We support this operation only for 82599 and x540 at the moment */
-	if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
-		return IXGBEVF_RSS_HASH_KEY_SIZE;
-
-	return 0;
+	return IXGBEVF_RSS_HASH_KEY_SIZE;
 }
 
 static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -840,21 +833,33 @@
 	if (hfunc)
 		*hfunc = ETH_RSS_HASH_TOP;
 
-	/* If neither indirection table nor hash key was requested - just
-	 * return a success avoiding taking any locks.
-	 */
-	if (!indir && !key)
-		return 0;
+	if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
+		if (key)
+			memcpy(key, adapter->rss_key, sizeof(adapter->rss_key));
 
-	spin_lock_bh(&adapter->mbx_lock);
-	if (indir)
-		err = ixgbevf_get_reta_locked(&adapter->hw, indir,
-					      adapter->num_rx_queues);
+		if (indir) {
+			int i;
 
-	if (!err && key)
-		err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+			for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
+				indir[i] = adapter->rss_indir_tbl[i];
+		}
+	} else {
+		/* If neither indirection table nor hash key was requested
+		 *  - just return a success avoiding taking any locks.
+		 */
+		if (!indir && !key)
+			return 0;
 
-	spin_unlock_bh(&adapter->mbx_lock);
+		spin_lock_bh(&adapter->mbx_lock);
+		if (indir)
+			err = ixgbevf_get_reta_locked(&adapter->hw, indir,
+						      adapter->num_rx_queues);
+
+		if (!err && key)
+			err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+
+		spin_unlock_bh(&adapter->mbx_lock);
+	}
 
 	return err;
 }
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 775d089..04c7ec84 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -144,9 +144,11 @@
 
 #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
 #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
-#define IXGBEVF_MAX_RSS_QUEUES	2
-#define IXGBEVF_82599_RETA_SIZE	128
+#define IXGBEVF_MAX_RSS_QUEUES		2
+#define IXGBEVF_82599_RETA_SIZE		128	/* 128 entries */
+#define IXGBEVF_X550_VFRETA_SIZE	64	/* 64 entries */
 #define IXGBEVF_RSS_HASH_KEY_SIZE	40
+#define IXGBEVF_VFRSSRK_REGS		10	/* 10 registers for RSS key */
 
 #define IXGBEVF_DEFAULT_TXD	1024
 #define IXGBEVF_DEFAULT_RXD	512
@@ -447,6 +449,9 @@
 
 	spinlock_t mbx_lock;
 	unsigned long last_reset;
+
+	u32 rss_key[IXGBEVF_VFRSSRK_REGS];
+	u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
 };
 
 enum ixbgevf_state_t {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1d7b00b..149a0b4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -457,6 +457,32 @@
 	napi_gro_receive(&q_vector->napi, skb);
 }
 
+#define IXGBE_RSS_L4_TYPES_MASK \
+	((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+	 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
+static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
+				   union ixgbe_adv_rx_desc *rx_desc,
+				   struct sk_buff *skb)
+{
+	u16 rss_type;
+
+	if (!(ring->netdev->features & NETIF_F_RXHASH))
+		return;
+
+	rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+		   IXGBE_RXDADV_RSSTYPE_MASK;
+
+	if (!rss_type)
+		return;
+
+	skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+		     (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+		     PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+}
+
 /**
  * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
  * @ring: structure containig ring specific data
@@ -506,6 +532,7 @@
 				       union ixgbe_adv_rx_desc *rx_desc,
 				       struct sk_buff *skb)
 {
+	ixgbevf_rx_hash(rx_ring, rx_desc, skb);
 	ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
 
 	if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -649,46 +676,6 @@
 }
 
 /**
- * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an ixgbevf specific version of __pskb_pull_tail.  The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- **/
-static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
-			      struct sk_buff *skb)
-{
-	struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-	unsigned char *va;
-	unsigned int pull_len;
-
-	/* it is valid to use page_address instead of kmap since we are
-	 * working with pages allocated out of the lomem pool per
-	 * alloc_page(GFP_ATOMIC)
-	 */
-	va = skb_frag_address(frag);
-
-	/* we need the header to contain the greater of either ETH_HLEN or
-	 * 60 bytes if the skb->len is less than 60 for skb_pad.
-	 */
-	pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
-
-	/* align pull length to size of long to optimize memcpy performance */
-	skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-	/* update all of the pointers */
-	skb_frag_size_sub(frag, pull_len);
-	frag->page_offset += pull_len;
-	skb->data_len -= pull_len;
-	skb->tail += pull_len;
-}
-
-/**
  * ixgbevf_cleanup_headers - Correct corrupted or empty headers
  * @rx_ring: rx descriptor ring packet is being transacted on
  * @rx_desc: pointer to the EOP Rx descriptor
@@ -721,10 +708,6 @@
 		}
 	}
 
-	/* place header in linear portion of buffer */
-	if (skb_is_nonlinear(skb))
-		ixgbevf_pull_tail(rx_ring, skb);
-
 	/* if eth_skb_pad returns an error the skb was freed */
 	if (eth_skb_pad(skb))
 		return true;
@@ -789,16 +772,19 @@
 				struct sk_buff *skb)
 {
 	struct page *page = rx_buffer->page;
+	unsigned char *va = page_address(page) + rx_buffer->page_offset;
 	unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
 #if (PAGE_SIZE < 8192)
 	unsigned int truesize = IXGBEVF_RX_BUFSZ;
 #else
 	unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
 #endif
+	unsigned int pull_len;
 
-	if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
-		unsigned char *va = page_address(page) + rx_buffer->page_offset;
+	if (unlikely(skb_is_nonlinear(skb)))
+		goto add_tail_frag;
 
+	if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
 		memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
 		/* page is not reserved, we can reuse buffer as is */
@@ -810,8 +796,21 @@
 		return false;
 	}
 
+	/* we need the header to contain the greater of either ETH_HLEN or
+	 * 60 bytes if the skb->len is less than 60 for skb_pad.
+	 */
+	pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
+
+	/* align pull length to size of long to optimize memcpy performance */
+	memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+	/* update all of the pointers */
+	va += pull_len;
+	size -= pull_len;
+
+add_tail_frag:
 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-			rx_buffer->page_offset, size, truesize);
+			(unsigned long)va & ~PAGE_MASK, size, truesize);
 
 	/* avoid re-using remote pages */
 	if (unlikely(ixgbevf_page_is_reserved(page)))
@@ -1697,22 +1696,25 @@
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 vfmrqc = 0, vfreta = 0;
-	u32 rss_key[10];
 	u16 rss_i = adapter->num_rx_queues;
-	int i, j;
+	u8 i, j;
 
 	/* Fill out hash function seeds */
-	netdev_rss_key_fill(rss_key, sizeof(rss_key));
-	for (i = 0; i < 10; i++)
-		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
+	netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
+	for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
+		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
 
-	/* Fill out redirection table */
-	for (i = 0, j = 0; i < 64; i++, j++) {
+	for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
 		if (j == rss_i)
 			j = 0;
-		vfreta = (vfreta << 8) | (j * 0x1);
-		if ((i & 3) == 3)
+
+		adapter->rss_indir_tbl[i] = j;
+
+		vfreta |= j << (i & 0x3) * 8;
+		if ((i & 3) == 3) {
 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
+			vfreta = 0;
+		}
 	}
 
 	/* Perform hash on these packet types */
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 62e48bc..fe2299a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3027,8 +3027,8 @@
 	const char *dt_mac_addr;
 	char hw_mac_addr[ETH_ALEN];
 	const char *mac_from;
+	const char *managed;
 	int phy_mode;
-	int fixed_phy = 0;
 	int err;
 
 	/* Our multiqueue support is not complete, so for now, only
@@ -3062,7 +3062,6 @@
 			dev_err(&pdev->dev, "cannot register fixed PHY\n");
 			goto err_free_irq;
 		}
-		fixed_phy = 1;
 
 		/* In the case of a fixed PHY, the DT node associated
 		 * to the PHY is the Ethernet MAC DT node.
@@ -3086,8 +3085,10 @@
 	pp = netdev_priv(dev);
 	pp->phy_node = phy_node;
 	pp->phy_interface = phy_mode;
-	pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
-				fixed_phy;
+
+	err = of_property_read_string(dn, "managed", &managed);
+	pp->use_inband_status = (err == 0 &&
+				 strcmp(managed, "in-band-status") == 0);
 
 	pp->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(pp->clk)) {
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index 52a6665..d547010 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -18,5 +18,6 @@
 
 source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
 source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
 
 endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 38fe32ef..2e2a5ec 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -4,3 +4,4 @@
 
 obj-$(CONFIG_MLX4_CORE) += mlx4/
 obj-$(CONFIG_MLX5_CORE) += mlx5/core/
+obj-$(CONFIG_MLXSW_CORE) += mlxsw/
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 63769df..eb8a498 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -100,7 +100,6 @@
 {
 	struct mlx4_en_dev *mdev = priv->mdev;
 	int err = 0;
-	char name[25];
 	int timestamp_en = 0;
 	bool assigned_eq = false;
 
@@ -119,8 +118,8 @@
 			err = mlx4_assign_eq(mdev->dev, priv->port,
 					     &cq->vector);
 			if (err) {
-				mlx4_err(mdev, "Failed assigning an EQ to %s\n",
-					 name);
+				mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n",
+					 cq->vector);
 				goto free_eq;
 			}
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 99ba1c5..f79d812 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -102,6 +102,7 @@
 
 static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
 	"blueflame",
+	"phv-bit"
 };
 
 static const char main_strings[][ETH_GSTRING_LEN] = {
@@ -1797,35 +1798,49 @@
 static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
+	struct mlx4_en_dev *mdev = priv->mdev;
 	bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
 	bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+	bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
+	bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
 	int i;
+	int ret = 0;
 
-	if (bf_enabled_new == bf_enabled_old)
-		return 0; /* Nothing to do */
+	if (bf_enabled_new != bf_enabled_old) {
+		if (bf_enabled_new) {
+			bool bf_supported = true;
 
-	if (bf_enabled_new) {
-		bool bf_supported = true;
+			for (i = 0; i < priv->tx_ring_num; i++)
+				bf_supported &= priv->tx_ring[i]->bf_alloced;
 
-		for (i = 0; i < priv->tx_ring_num; i++)
-			bf_supported &= priv->tx_ring[i]->bf_alloced;
+			if (!bf_supported) {
+				en_err(priv, "BlueFlame is not supported\n");
+				return -EINVAL;
+			}
 
-		if (!bf_supported) {
-			en_err(priv, "BlueFlame is not supported\n");
-			return -EINVAL;
+			priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+		} else {
+			priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
 		}
 
-		priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
-	} else {
-		priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+		for (i = 0; i < priv->tx_ring_num; i++)
+			priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+
+		en_info(priv, "BlueFlame %s\n",
+			bf_enabled_new ?  "Enabled" : "Disabled");
 	}
 
-	for (i = 0; i < priv->tx_ring_num; i++)
-		priv->tx_ring[i]->bf_enabled = bf_enabled_new;
-
-	en_info(priv, "BlueFlame %s\n",
-		bf_enabled_new ?  "Enabled" : "Disabled");
-
+	if (phv_enabled_new != phv_enabled_old) {
+		ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
+		if (ret)
+			return ret;
+		else if (phv_enabled_new)
+			priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+		else
+			priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
+		en_info(priv, "PHV bit %s\n",
+			phv_enabled_new ?  "Enabled" : "Disabled");
+	}
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e0de2fd..4726122 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2184,6 +2184,25 @@
 	}
 }
 
+static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+					      netdev_features_t features)
+{
+	struct mlx4_en_priv *en_priv = netdev_priv(netdev);
+	struct mlx4_en_dev *mdev = en_priv->mdev;
+
+	/* Since there is no support for separate RX C-TAG/S-TAG vlan accel
+	 * enable/disable make sure S-TAG flag is always in same state as
+	 * C-TAG.
+	 */
+	if (features & NETIF_F_HW_VLAN_CTAG_RX &&
+	    !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+		features |= NETIF_F_HW_VLAN_STAG_RX;
+	else
+		features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+	return features;
+}
+
 static int mlx4_en_set_features(struct net_device *netdev,
 		netdev_features_t features)
 {
@@ -2218,6 +2237,10 @@
 		en_info(priv, "Turn %s TX vlan strip offload\n",
 			(features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
 
+	if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
+		en_info(priv, "Turn %s TX S-VLAN strip offload\n",
+			(features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
+
 	if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
 		en_info(priv, "Turn %s loopback\n",
 			(features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
@@ -2460,6 +2483,7 @@
 	.ndo_poll_controller	= mlx4_en_netpoll,
 #endif
 	.ndo_set_features	= mlx4_en_set_features,
+	.ndo_fix_features	= mlx4_en_fix_features,
 	.ndo_setup_tc		= mlx4_en_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
 	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
@@ -2500,6 +2524,7 @@
 	.ndo_poll_controller	= mlx4_en_netpoll,
 #endif
 	.ndo_set_features	= mlx4_en_set_features,
+	.ndo_fix_features	= mlx4_en_fix_features,
 	.ndo_setup_tc		= mlx4_en_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
 	.ndo_rx_flow_steer	= mlx4_en_filter_rfs,
@@ -2931,6 +2956,27 @@
 	dev->hw_features |= NETIF_F_LOOPBACK |
 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
 
+	if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+		dev->features |= NETIF_F_HW_VLAN_STAG_RX |
+			NETIF_F_HW_VLAN_STAG_FILTER;
+		dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+	}
+
+	if (mlx4_is_slave(mdev->dev)) {
+		int phv;
+
+		err = get_phv_bit(mdev->dev, port, &phv);
+		if (!err && phv) {
+			dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+			priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+		}
+	} else {
+		if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+		    !(mdev->dev->caps.flags2 &
+		      MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+			dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+	}
+
 	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
 		dev->hw_features |= NETIF_F_RXFCS;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9c145dd..4402a1e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -725,7 +725,7 @@
 
 	hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
 
-	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
+	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
 	    !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
 		hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
 		hdr += sizeof(struct vlan_hdr);
@@ -906,17 +906,25 @@
 				gro_skb->csum_level = 1;
 
 			if ((cqe->vlan_my_qpn &
-			    cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+			    cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
 			    (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
 				u16 vid = be16_to_cpu(cqe->sl_vid);
 
 				__vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
+			} else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+				  MLX4_CQE_SVLAN_PRESENT_MASK) &&
+				 (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
+				__vlan_hwaccel_put_tag(gro_skb,
+						       htons(ETH_P_8021AD),
+						       be16_to_cpu(cqe->sl_vid));
 			}
 
 			if (dev->features & NETIF_F_RXHASH)
 				skb_set_hash(gro_skb,
 					     be32_to_cpu(cqe->immed_rss_invalid),
-					     PKT_HASH_TYPE_L3);
+					     (ip_summed == CHECKSUM_UNNECESSARY) ?
+						PKT_HASH_TYPE_L4 :
+						PKT_HASH_TYPE_L3);
 
 			skb_record_rx_queue(gro_skb, cq->ring);
 			skb_mark_napi_id(gro_skb, &cq->napi);
@@ -962,12 +970,19 @@
 		if (dev->features & NETIF_F_RXHASH)
 			skb_set_hash(skb,
 				     be32_to_cpu(cqe->immed_rss_invalid),
-				     PKT_HASH_TYPE_L3);
+				     (ip_summed == CHECKSUM_UNNECESSARY) ?
+					PKT_HASH_TYPE_L4 :
+					PKT_HASH_TYPE_L3);
 
 		if ((be32_to_cpu(cqe->vlan_my_qpn) &
-		    MLX4_CQE_VLAN_PRESENT_MASK) &&
+		    MLX4_CQE_CVLAN_PRESENT_MASK) &&
 		    (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
+		else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+			  MLX4_CQE_SVLAN_PRESENT_MASK) &&
+			 (dev->features & NETIF_F_HW_VLAN_STAG_RX))
+			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+					       be16_to_cpu(cqe->sl_vid));
 
 		if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
 			timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -1065,7 +1080,10 @@
 void mlx4_en_calc_rx_buf(struct net_device *dev)
 {
 	struct mlx4_en_priv *priv = netdev_priv(dev);
-	int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
+	/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
+	 * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
+	 */
+	int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
 	int buf_size = 0;
 	int i = 0;
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c10d98f..494e776 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -718,6 +718,7 @@
 	u32 index, bf_index;
 	__be32 op_own;
 	u16 vlan_tag = 0;
+	u16 vlan_proto = 0;
 	int i_frag;
 	int lso_header_size;
 	void *fragptr = NULL;
@@ -750,9 +751,10 @@
 		goto tx_drop;
 	}
 
-	if (skb_vlan_tag_present(skb))
+	if (skb_vlan_tag_present(skb)) {
 		vlan_tag = skb_vlan_tag_get(skb);
-
+		vlan_proto = be16_to_cpu(skb->vlan_proto);
+	}
 
 	netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
 
@@ -958,8 +960,11 @@
 		ring->bf.offset ^= ring->bf.buf_size;
 	} else {
 		tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
-		tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
-			!!skb_vlan_tag_present(skb);
+		if (vlan_proto == ETH_P_8021AD)
+			tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
+		else if (vlan_proto == ETH_P_8021Q)
+			tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+
 		tx_desc->ctrl.fence_size = real_size;
 
 		/* Ensure new descriptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e30bf57..e8ec1de 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -154,6 +154,7 @@
 		[26] = "Port ETS Scheduler support",
 		[27] = "Port beacon support",
 		[28] = "RX-ALL support",
+		[29] = "802.1ad offload support",
 	};
 	int i;
 
@@ -307,6 +308,7 @@
 
 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
+#define QUERY_FUNC_CAP_PHV_BIT			0x40
 
 	if (vhcr->op_modifier == 1) {
 		struct mlx4_active_ports actv_ports =
@@ -351,6 +353,12 @@
 		MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
 			 QUERY_FUNC_CAP_PHYS_PORT_ID);
 
+		if (dev->caps.phv_bit[port]) {
+			field = QUERY_FUNC_CAP_PHV_BIT;
+			MLX4_PUT(outbox->buf, field,
+				 QUERY_FUNC_CAP_FLAGS0_OFFSET);
+		}
+
 	} else if (vhcr->op_modifier == 0) {
 		struct mlx4_active_ports actv_ports =
 			mlx4_get_active_ports(dev, slave);
@@ -600,6 +608,9 @@
 		MLX4_GET(func_cap->phys_port_id, outbox,
 			 QUERY_FUNC_CAP_PHYS_PORT_ID);
 
+	MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+	func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+
 	/* All other resources are allocated by the master, but we still report
 	 * 'num' and 'reserved' capabilities as follows:
 	 * - num remains the maximum resource index
@@ -700,6 +711,7 @@
 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET	0x92
 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET		0x94
 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET		0x94
+#define QUERY_DEV_CAP_PHV_EN_OFFSET		0x96
 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET		0x98
 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET		0xa0
 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET		0x9c
@@ -898,6 +910,12 @@
 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
 	if (field & (1 << 2))
 		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+	MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
+	if (field & 0x80)
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
+	if (field & 0x40)
+		dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
+
 	MLX4_GET(dev_cap->reserved_lkey, outbox,
 		 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
 	MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -1992,6 +2010,10 @@
 	MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
 	MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
 
+	/* phv_check enable */
+	MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
+	if (byte_field & 0x2)
+		param->phv_check_en = 1;
 out:
 	mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -2758,3 +2780,63 @@
 			    0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
 			    MLX4_CMD_NATIVE);
 }
+
+static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
+{
+#define SET_PORT_GEN_PHV_VALID	0x10
+#define SET_PORT_GEN_PHV_EN	0x80
+
+	struct mlx4_cmd_mailbox *mailbox;
+	struct mlx4_set_port_general_context *context;
+	u32 in_mod;
+	int err;
+
+	mailbox = mlx4_alloc_cmd_mailbox(dev);
+	if (IS_ERR(mailbox))
+		return PTR_ERR(mailbox);
+	context = mailbox->buf;
+
+	context->v_ignore_fcs |=  SET_PORT_GEN_PHV_VALID;
+	if (phv_bit)
+		context->phv_en |=  SET_PORT_GEN_PHV_EN;
+
+	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+		       MLX4_CMD_NATIVE);
+
+	mlx4_free_cmd_mailbox(dev, mailbox);
+	return err;
+}
+
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
+{
+	int err;
+	struct mlx4_func_cap func_cap;
+
+	memset(&func_cap, 0, sizeof(func_cap));
+	err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+	if (!err)
+		*phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+	return err;
+}
+EXPORT_SYMBOL(get_phv_bit);
+
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
+{
+	int ret;
+
+	if (mlx4_is_slave(dev))
+		return -EPERM;
+
+	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+		ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
+		if (!ret)
+			dev->caps.phv_bit[port] = new_val;
+		return ret;
+	}
+
+	return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(set_phv_bit);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 07cb7c2..08de555 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -204,6 +204,7 @@
 	u16 cqe_size; /* For use only when CQE stride feature enabled */
 	u16 eqe_size; /* For use only when EQE stride feature enabled */
 	u8 rss_ip_frags;
+	u8 phv_check_en; /* for QUERY_HCA */
 };
 
 struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 29c2a01..006757f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -405,6 +405,21 @@
 	dev->caps.max_gso_sz	     = dev_cap->max_gso_sz;
 	dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
 
+	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
+		struct mlx4_init_hca_param hca_param;
+
+		memset(&hca_param, 0, sizeof(hca_param));
+		err = mlx4_QUERY_HCA(dev, &hca_param);
+		/* Turn off PHV_EN flag in case phv_check_en is set.
+		 * phv_check_en is a HW check that parse the packet and verify
+		 * phv bit was reported correctly in the wqe. To allow QinQ
+		 * PHV_EN flag should be set and phv_check_en must be cleared
+		 * otherwise QinQ packets will be drop by the HW.
+		 */
+		if (err || hca_param.phv_check_en)
+			dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
+	}
+
 	/* Sense port always allowed on supported devices for ConnectX-1 and -2 */
 	if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
 		dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -2654,9 +2669,14 @@
 
 	if (msi_x) {
 		int nreq = dev->caps.num_ports * num_online_cpus() + 1;
+		bool shared_ports = false;
 
 		nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
 			     nreq);
+		if (nreq > MAX_MSIX) {
+			nreq = MAX_MSIX;
+			shared_ports = true;
+		}
 
 		entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
 		if (!entries)
@@ -2679,6 +2699,9 @@
 		bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
 			    dev->caps.num_ports);
 
+		if (MLX4_IS_LEGACY_EQ_MODE(dev->caps))
+			shared_ports = true;
+
 		for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
 			if (i == MLX4_EQ_ASYNC)
 				continue;
@@ -2686,7 +2709,7 @@
 			priv->eq_table.eq[i].irq =
 				entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
 
-			if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
+			if (shared_ports) {
 				bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
 					    dev->caps.num_ports);
 				/* We don't set affinity hint when there
@@ -2912,6 +2935,8 @@
 {
 	u64 dev_flags = dev->flags;
 	int err = 0;
+	int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
+					MLX4_MAX_NUM_VF);
 
 	if (reset_flow) {
 		dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
@@ -2937,6 +2962,12 @@
 	}
 
 	if (!(dev->flags &  MLX4_FLAG_SRIOV)) {
+		if (total_vfs > fw_enabled_sriov_vfs) {
+			mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
+				 total_vfs, fw_enabled_sriov_vfs);
+			err = -ENOMEM;
+			goto disable_sriov;
+		}
 		mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
 		err = pci_enable_sriov(pdev, total_vfs);
 	}
@@ -3418,20 +3449,20 @@
 			goto err_disable_pdev;
 		}
 	}
-	if (total_vfs >= MLX4_MAX_NUM_VF) {
+	if (total_vfs > MLX4_MAX_NUM_VF) {
 		dev_err(&pdev->dev,
-			"Requested more VF's (%d) than allowed (%d)\n",
-			total_vfs, MLX4_MAX_NUM_VF - 1);
+			"Requested more VF's (%d) than allowed by hw (%d)\n",
+			total_vfs, MLX4_MAX_NUM_VF);
 		err = -EINVAL;
 		goto err_disable_pdev;
 	}
 
 	for (i = 0; i < MLX4_MAX_PORTS; i++) {
-		if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
+		if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
 			dev_err(&pdev->dev,
-				"Requested more VF's (%d) for port (%d) than allowed (%d)\n",
+				"Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
 				nvfs[i] + nvfs[2], i + 1,
-				MLX4_MAX_NUM_VF_P_PORT - 1);
+				MLX4_MAX_NUM_VF_P_PORT);
 			err = -EINVAL;
 			goto err_disable_pdev;
 		}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index a092c5c..232b2b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -787,6 +787,9 @@
 	u8 pprx;
 	u8 pfcrx;
 	u16 reserved4;
+	u32 reserved5;
+	u8 phv_en;
+	u8 reserved6[3];
 };
 
 struct mlx4_set_port_rqp_calc_context {
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 666d166..defcf8c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -95,6 +95,7 @@
  */
 
 #define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
+#define MLX4_EN_PRIV_FLAGS_PHV	     2
 
 #define MLX4_EN_WATCHDOG_TIMEOUT	(15 * HZ)
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 0715b49..6cb3830 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -45,15 +45,34 @@
  * register it in a memory region at HCA virtual address 0.
  */
 
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
+					   size_t size, dma_addr_t *dma_handle,
+					   int node)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	int original_node;
+	void *cpu_handle;
+
+	mutex_lock(&priv->alloc_mutex);
+	original_node = dev_to_node(&dev->pdev->dev);
+	set_dev_node(&dev->pdev->dev, node);
+	cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
+					 dma_handle, GFP_KERNEL);
+	set_dev_node(&dev->pdev->dev, original_node);
+	mutex_unlock(&priv->alloc_mutex);
+	return cpu_handle;
+}
+
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+			struct mlx5_buf *buf, int node)
 {
 	dma_addr_t t;
 
 	buf->size = size;
 	buf->npages       = 1;
 	buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
-	buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
-						size, &t, GFP_KERNEL);
+	buf->direct.buf   = mlx5_dma_zalloc_coherent_node(dev, size,
+							  &t, node);
 	if (!buf->direct.buf)
 		return -ENOMEM;
 
@@ -66,6 +85,11 @@
 
 	return 0;
 }
+
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+{
+	return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
+}
 EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
 
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
@@ -75,7 +99,8 @@
 }
 EXPORT_SYMBOL_GPL(mlx5_buf_free);
 
-static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
+static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
+						 int node)
 {
 	struct mlx5_db_pgdir *pgdir;
 
@@ -84,8 +109,9 @@
 		return NULL;
 
 	bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
-	pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
-					    &pgdir->db_dma, GFP_KERNEL);
+
+	pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
+						       &pgdir->db_dma, node);
 	if (!pgdir->db_page) {
 		kfree(pgdir);
 		return NULL;
@@ -118,7 +144,7 @@
 	return 0;
 }
 
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
 {
 	struct mlx5_db_pgdir *pgdir;
 	int ret = 0;
@@ -129,7 +155,7 @@
 		if (!mlx5_alloc_db_from_pgdir(pgdir, db))
 			goto out;
 
-	pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
+	pgdir = mlx5_alloc_db_pgdir(dev, node);
 	if (!pgdir) {
 		ret = -ENOMEM;
 		goto out;
@@ -145,6 +171,12 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
+
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+	return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
 EXPORT_SYMBOL_GPL(mlx5_db_alloc);
 
 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 3d23bd6..0983a20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -42,24 +42,27 @@
 
 #define MLX5E_MAX_NUM_TC	8
 
-#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
 #define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
 #define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
 
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x7
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x1
 #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
 #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
 
-#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
-#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ         0x7
 
+#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
+#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
+#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
 #define MLX5E_TX_CQ_POLL_BUDGET        128
 #define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
+#define MLX5E_SQ_BF_BUDGET             16
 
 static const char vport_strings[][ETH_GSTRING_LEN] = {
 	/* vport statistics */
@@ -91,6 +94,7 @@
 	"lro_bytes",
 	"rx_csum_good",
 	"rx_csum_none",
+	"rx_csum_sw",
 	"tx_csum_offload",
 	"tx_queue_stopped",
 	"tx_queue_wake",
@@ -128,18 +132,94 @@
 	u64 lro_bytes;
 	u64 rx_csum_good;
 	u64 rx_csum_none;
+	u64 rx_csum_sw;
 	u64 tx_csum_offload;
 	u64 tx_queue_stopped;
 	u64 tx_queue_wake;
 	u64 tx_queue_dropped;
 	u64 rx_wqe_err;
 
-#define NUM_VPORT_COUNTERS     31
+#define NUM_VPORT_COUNTERS     32
+};
+
+static const char pport_strings[][ETH_GSTRING_LEN] = {
+	/* IEEE802.3 counters */
+	"frames_tx",
+	"frames_rx",
+	"check_seq_err",
+	"alignment_err",
+	"octets_tx",
+	"octets_received",
+	"multicast_xmitted",
+	"broadcast_xmitted",
+	"multicast_rx",
+	"broadcast_rx",
+	"in_range_len_errors",
+	"out_of_range_len",
+	"too_long_errors",
+	"symbol_err",
+	"mac_control_tx",
+	"mac_control_rx",
+	"unsupported_op_rx",
+	"pause_ctrl_rx",
+	"pause_ctrl_tx",
+
+	/* RFC2863 counters */
+	"in_octets",
+	"in_ucast_pkts",
+	"in_discards",
+	"in_errors",
+	"in_unknown_protos",
+	"out_octets",
+	"out_ucast_pkts",
+	"out_discards",
+	"out_errors",
+	"in_multicast_pkts",
+	"in_broadcast_pkts",
+	"out_multicast_pkts",
+	"out_broadcast_pkts",
+
+	/* RFC2819 counters */
+	"drop_events",
+	"octets",
+	"pkts",
+	"broadcast_pkts",
+	"multicast_pkts",
+	"crc_align_errors",
+	"undersize_pkts",
+	"oversize_pkts",
+	"fragments",
+	"jabbers",
+	"collisions",
+	"p64octets",
+	"p65to127octets",
+	"p128to255octets",
+	"p256to511octets",
+	"p512to1023octets",
+	"p1024to1518octets",
+	"p1519to2047octets",
+	"p2048to4095octets",
+	"p4096to8191octets",
+	"p8192to10239octets",
+};
+
+#define NUM_IEEE_802_3_COUNTERS		19
+#define NUM_RFC_2863_COUNTERS		13
+#define NUM_RFC_2819_COUNTERS		21
+#define NUM_PPORT_COUNTERS		(NUM_IEEE_802_3_COUNTERS + \
+					 NUM_RFC_2863_COUNTERS + \
+					 NUM_RFC_2819_COUNTERS)
+
+struct mlx5e_pport_stats {
+	__be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
+	__be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
+	__be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
 };
 
 static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
 	"packets",
 	"csum_none",
+	"csum_sw",
 	"lro_packets",
 	"lro_bytes",
 	"wqe_err"
@@ -148,10 +228,11 @@
 struct mlx5e_rq_stats {
 	u64 packets;
 	u64 csum_none;
+	u64 csum_sw;
 	u64 lro_packets;
 	u64 lro_bytes;
 	u64 wqe_err;
-#define NUM_RQ_STATS 5
+#define NUM_RQ_STATS 6
 };
 
 static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
@@ -179,6 +260,7 @@
 
 struct mlx5e_stats {
 	struct mlx5e_vport_stats   vport;
+	struct mlx5e_pport_stats   pport;
 };
 
 struct mlx5e_params {
@@ -192,9 +274,12 @@
 	u16 tx_cq_moderation_usec;
 	u16 tx_cq_moderation_pkts;
 	u16 min_rx_wqes;
-	u16 rx_hash_log_tbl_sz;
 	bool lro_en;
 	u32 lro_wqe_sz;
+	u16 tx_max_inline;
+	u8  rss_hfunc;
+	u8  toeplitz_hash_key[40];
+	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
 };
 
 enum {
@@ -214,6 +299,7 @@
 	struct napi_struct        *napi;
 	struct mlx5_core_cq        mcq;
 	struct mlx5e_channel      *channel;
+	struct mlx5e_priv         *priv;
 
 	/* control */
 	struct mlx5_wq_ctrl        wq_ctrl;
@@ -237,6 +323,7 @@
 	struct mlx5_wq_ctrl    wq_ctrl;
 	u32                    rqn;
 	struct mlx5e_channel  *channel;
+	struct mlx5e_priv     *priv;
 } ____cacheline_aligned_in_smp;
 
 struct mlx5e_tx_skb_cb {
@@ -266,7 +353,9 @@
 	/* dirtied @xmit */
 	u16                        pc ____cacheline_aligned_in_smp;
 	u32                        dma_fifo_pc;
-	u32                        bf_offset;
+	u16                        bf_offset;
+	u16                        prev_cc;
+	u8                         bf_budget;
 	struct mlx5e_sq_stats      stats;
 
 	struct mlx5e_cq            cq;
@@ -279,9 +368,10 @@
 	struct mlx5_wq_cyc         wq;
 	u32                        dma_fifo_mask;
 	void __iomem              *uar_map;
+	void __iomem              *uar_bf_map;
 	struct netdev_queue       *txq;
 	u32                        sqn;
-	u32                        bf_buf_size;
+	u16                        bf_buf_size;
 	u16                        max_inline;
 	u16                        edge;
 	struct device             *pdev;
@@ -315,7 +405,6 @@
 	__be32                     mkey_be;
 	u8                         num_tc;
 	unsigned long              flags;
-	int                        tc_to_txq_map[MLX5E_MAX_NUM_TC];
 
 	/* control */
 	struct mlx5e_priv         *priv;
@@ -324,20 +413,24 @@
 };
 
 enum mlx5e_traffic_types {
-	MLX5E_TT_IPV4_TCP = 0,
-	MLX5E_TT_IPV6_TCP = 1,
-	MLX5E_TT_IPV4_UDP = 2,
-	MLX5E_TT_IPV6_UDP = 3,
-	MLX5E_TT_IPV4     = 4,
-	MLX5E_TT_IPV6     = 5,
-	MLX5E_TT_ANY      = 6,
-	MLX5E_NUM_TT      = 7,
+	MLX5E_TT_IPV4_TCP,
+	MLX5E_TT_IPV6_TCP,
+	MLX5E_TT_IPV4_UDP,
+	MLX5E_TT_IPV6_UDP,
+	MLX5E_TT_IPV4_IPSEC_AH,
+	MLX5E_TT_IPV6_IPSEC_AH,
+	MLX5E_TT_IPV4_IPSEC_ESP,
+	MLX5E_TT_IPV6_IPSEC_ESP,
+	MLX5E_TT_IPV4,
+	MLX5E_TT_IPV6,
+	MLX5E_TT_ANY,
+	MLX5E_NUM_TT,
 };
 
-enum {
-	MLX5E_RQT_SPREADING  = 0,
-	MLX5E_RQT_DEFAULT_RQ = 1,
-	MLX5E_NUM_RQT        = 2,
+enum mlx5e_rqt_ix {
+	MLX5E_INDIRECTION_RQT,
+	MLX5E_SINGLE_RQ_RQT,
+	MLX5E_NUM_RQT,
 };
 
 struct mlx5e_eth_addr_info {
@@ -362,10 +455,10 @@
 enum {
 	MLX5E_STATE_ASYNC_EVENTS_ENABLE,
 	MLX5E_STATE_OPENED,
+	MLX5E_STATE_DESTROYING,
 };
 
 struct mlx5e_vlan_db {
-	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 	u32           active_vlans_ft_ix[VLAN_N_VID];
 	u32           untagged_rule_ft_ix;
 	u32           any_vlan_rule_ft_ix;
@@ -379,9 +472,9 @@
 
 struct mlx5e_priv {
 	/* priv data path fields - start */
-	int                        num_tc;
 	int                        default_vlan_prio;
 	struct mlx5e_sq            **txq_to_sq_map;
+	int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
 	/* priv data path fields - end */
 
 	unsigned long              state;
@@ -390,10 +483,11 @@
 	u32                        pdn;
 	u32                        tdn;
 	struct mlx5_core_mr        mr;
+	struct mlx5e_rq            drop_rq;
 
 	struct mlx5e_channel     **channel;
 	u32                        tisn[MLX5E_MAX_NUM_TC];
-	u32                        rqtn;
+	u32                        rqtn[MLX5E_NUM_RQT];
 	u32                        tirn[MLX5E_NUM_TT];
 
 	struct mlx5e_flow_table    ft;
@@ -470,10 +564,9 @@
 
 void mlx5e_update_stats(struct mlx5e_priv *priv);
 
-int mlx5e_open_flow_table(struct mlx5e_priv *priv);
-void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
 void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
 void mlx5e_set_rx_mode_work(struct work_struct *work);
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -482,17 +575,17 @@
 			   u16 vid);
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
-			     struct mlx5e_params *new_params);
 
 static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
-				      struct mlx5e_tx_wqe *wqe)
+				      struct mlx5e_tx_wqe *wqe, int bf_sz)
 {
+	u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
+
 	/* ensure wqe is visible to device before updating doorbell record */
 	dma_wmb();
 
@@ -503,9 +596,15 @@
 	 */
 	wmb();
 
-	mlx5_write64((__be32 *)&wqe->ctrl,
-		     sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
-		     NULL);
+	if (bf_sz) {
+		__iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
+
+		/* flush the write-combining mapped buffer */
+		wmb();
+
+	} else {
+		mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+	}
 
 	sq->bf_offset ^= sq->bf_buf_size;
 }
@@ -519,3 +618,4 @@
 }
 
 extern const struct ethtool_ops mlx5e_ethtool_ops;
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3889384..bce9126 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -171,9 +171,9 @@
 
 	switch (sset) {
 	case ETH_SS_STATS:
-		return NUM_VPORT_COUNTERS +
+		return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
 		       priv->params.num_channels * NUM_RQ_STATS +
-		       priv->params.num_channels * priv->num_tc *
+		       priv->params.num_channels * priv->params.num_tc *
 						   NUM_SQ_STATS;
 	/* fallthrough */
 	default:
@@ -200,6 +200,11 @@
 			strcpy(data + (idx++) * ETH_GSTRING_LEN,
 			       vport_strings[i]);
 
+		/* PPORT counters */
+		for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+			strcpy(data + (idx++) * ETH_GSTRING_LEN,
+			       pport_strings[i]);
+
 		/* per channel counters */
 		for (i = 0; i < priv->params.num_channels; i++)
 			for (j = 0; j < NUM_RQ_STATS; j++)
@@ -207,7 +212,7 @@
 					"rx%d_%s", i, rq_stats_strings[j]);
 
 		for (i = 0; i < priv->params.num_channels; i++)
-			for (tc = 0; tc < priv->num_tc; tc++)
+			for (tc = 0; tc < priv->params.num_tc; tc++)
 				for (j = 0; j < NUM_SQ_STATS; j++)
 					sprintf(data +
 						(idx++) * ETH_GSTRING_LEN,
@@ -234,6 +239,9 @@
 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
 		data[idx++] = ((u64 *)&priv->stats.vport)[i];
 
+	for (i = 0; i < NUM_PPORT_COUNTERS; i++)
+		data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]);
+
 	/* per channel counters */
 	for (i = 0; i < priv->params.num_channels; i++)
 		for (j = 0; j < NUM_RQ_STATS; j++)
@@ -242,7 +250,7 @@
 				       ((u64 *)&priv->channel[i]->rq.stats)[j];
 
 	for (i = 0; i < priv->params.num_channels; i++)
-		for (tc = 0; tc < priv->num_tc; tc++)
+		for (tc = 0; tc < priv->params.num_tc; tc++)
 			for (j = 0; j < NUM_SQ_STATS; j++)
 				data[idx++] = !test_bit(MLX5E_STATE_OPENED,
 							&priv->state) ? 0 :
@@ -264,7 +272,7 @@
 			       struct ethtool_ringparam *param)
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5e_params new_params;
+	bool was_opened;
 	u16 min_rx_wqes;
 	u8 log_rq_size;
 	u8 log_sq_size;
@@ -316,11 +324,18 @@
 		return 0;
 
 	mutex_lock(&priv->state_lock);
-	new_params = priv->params;
-	new_params.log_rq_size = log_rq_size;
-	new_params.log_sq_size = log_sq_size;
-	new_params.min_rx_wqes = min_rx_wqes;
-	err = mlx5e_update_priv_params(priv, &new_params);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(dev);
+
+	priv->params.log_rq_size = log_rq_size;
+	priv->params.log_sq_size = log_sq_size;
+	priv->params.min_rx_wqes = min_rx_wqes;
+
+	if (was_opened)
+		err = mlx5e_open_locked(dev);
+
 	mutex_unlock(&priv->state_lock);
 
 	return err;
@@ -342,7 +357,7 @@
 	struct mlx5e_priv *priv = netdev_priv(dev);
 	int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
 	unsigned int count = ch->combined_count;
-	struct mlx5e_params new_params;
+	bool was_opened;
 	int err = 0;
 
 	if (!count) {
@@ -365,9 +380,16 @@
 		return 0;
 
 	mutex_lock(&priv->state_lock);
-	new_params = priv->params;
-	new_params.num_channels = count;
-	err = mlx5e_update_priv_params(priv, &new_params);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(dev);
+
+	priv->params.num_channels = count;
+
+	if (was_opened)
+		err = mlx5e_open_locked(dev);
+
 	mutex_unlock(&priv->state_lock);
 
 	return err;
@@ -606,7 +628,7 @@
 	u32 link_modes;
 	u32 speed;
 	u32 eth_proto_cap, eth_proto_admin;
-	u8 port_status;
+	enum mlx5_port_status ps;
 	int err;
 
 	speed = ethtool_cmd_speed(cmd);
@@ -640,25 +662,197 @@
 	if (link_modes == eth_proto_admin)
 		goto out;
 
-	err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
-	if (err) {
-		netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
-			   __func__, err);
-		goto out;
+	mlx5_query_port_admin_status(mdev, &ps);
+	if (ps == MLX5_PORT_UP)
+		mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+	mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+	if (ps == MLX5_PORT_UP)
+		mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
+
+out:
+	return err;
+}
+
+static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	return sizeof(priv->params.toeplitz_hash_key);
+}
+
+static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
+{
+	return MLX5E_INDIR_RQT_SIZE;
+}
+
+static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+			  u8 *hfunc)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (indir)
+		memcpy(indir, priv->params.indirection_rqt,
+		       sizeof(priv->params.indirection_rqt));
+
+	if (key)
+		memcpy(key, priv->params.toeplitz_hash_key,
+		       sizeof(priv->params.toeplitz_hash_key));
+
+	if (hfunc)
+		*hfunc = priv->params.rss_hfunc;
+
+	return 0;
+}
+
+static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+			  const u8 *key, const u8 hfunc)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	bool close_open;
+	int err = 0;
+
+	if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
+	    (hfunc != ETH_RSS_HASH_XOR) &&
+	    (hfunc != ETH_RSS_HASH_TOP))
+		return -EINVAL;
+
+	mutex_lock(&priv->state_lock);
+
+	if (indir) {
+		memcpy(priv->params.indirection_rqt, indir,
+		       sizeof(priv->params.indirection_rqt));
+		mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
 	}
 
-	err = mlx5_query_port_status(mdev, &port_status);
-	if (err)
-		goto out;
+	close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
+		     test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (close_open)
+		mlx5e_close_locked(dev);
 
-	if (port_status == MLX5_PORT_DOWN)
-		return 0;
+	if (key)
+		memcpy(priv->params.toeplitz_hash_key, key,
+		       sizeof(priv->params.toeplitz_hash_key));
 
-	err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
-	if (err)
-		goto out;
-	err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
-out:
+	if (hfunc != ETH_RSS_HASH_NO_CHANGE)
+		priv->params.rss_hfunc = hfunc;
+
+	if (close_open)
+		err = mlx5e_open_locked(priv->netdev);
+
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static int mlx5e_get_rxnfc(struct net_device *netdev,
+			   struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err = 0;
+
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		info->data = priv->params.num_channels;
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int mlx5e_get_tunable(struct net_device *dev,
+			     const struct ethtool_tunable *tuna,
+			     void *data)
+{
+	const struct mlx5e_priv *priv = netdev_priv(dev);
+	int err = 0;
+
+	switch (tuna->id) {
+	case ETHTOOL_TX_COPYBREAK:
+		*(u32 *)data = priv->params.tx_max_inline;
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+static int mlx5e_set_tunable(struct net_device *dev,
+			     const struct ethtool_tunable *tuna,
+			     const void *data)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	bool was_opened;
+	u32 val;
+	int err = 0;
+
+	switch (tuna->id) {
+	case ETHTOOL_TX_COPYBREAK:
+		val = *(u32 *)data;
+		if (val > mlx5e_get_max_inline_cap(mdev)) {
+			err = -EINVAL;
+			break;
+		}
+
+		mutex_lock(&priv->state_lock);
+
+		was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+		if (was_opened)
+			mlx5e_close_locked(dev);
+
+		priv->params.tx_max_inline = val;
+
+		if (was_opened)
+			err = mlx5e_open_locked(dev);
+
+		mutex_unlock(&priv->state_lock);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+static void mlx5e_get_pauseparam(struct net_device *netdev,
+				 struct ethtool_pauseparam *pauseparam)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int err;
+
+	err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
+				    &pauseparam->tx_pause);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
+			   __func__, err);
+	}
+}
+
+static int mlx5e_set_pauseparam(struct net_device *netdev,
+				struct ethtool_pauseparam *pauseparam)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int err;
+
+	if (pauseparam->autoneg)
+		return -EINVAL;
+
+	err = mlx5_set_port_pause(mdev,
+				  pauseparam->rx_pause ? 1 : 0,
+				  pauseparam->tx_pause ? 1 : 0);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
+			   __func__, err);
+	}
+
 	return err;
 }
 
@@ -676,4 +870,13 @@
 	.set_coalesce      = mlx5e_set_coalesce,
 	.get_settings      = mlx5e_get_settings,
 	.set_settings      = mlx5e_set_settings,
+	.get_rxfh_key_size   = mlx5e_get_rxfh_key_size,
+	.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
+	.get_rxfh          = mlx5e_get_rxfh,
+	.set_rxfh          = mlx5e_set_rxfh,
+	.get_rxnfc         = mlx5e_get_rxnfc,
+	.get_tunable       = mlx5e_get_tunable,
+	.set_tunable       = mlx5e_set_tunable,
+	.get_pauseparam    = mlx5e_get_pauseparam,
+	.set_pauseparam    = mlx5e_set_pauseparam,
 };
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
index 120db80..e71563c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -105,25 +105,41 @@
 {
 	void *ft = priv->ft.main;
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
+		mlx5_del_flow_table_entry(ft,
+					  ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
+
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
+		mlx5_del_flow_table_entry(ft,
+					  ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
+
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
+		mlx5_del_flow_table_entry(ft,
+					  ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
+
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
+		mlx5_del_flow_table_entry(ft,
+					  ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
+
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+	if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
 
-	if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+	if (ai->tt_vec & BIT(MLX5E_TT_ANY))
 		mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
 }
 
@@ -156,33 +172,37 @@
 		switch (eth_addr_type) {
 		case MLX5E_UC:
 			ret =
-				(1 << MLX5E_TT_IPV4_TCP) |
-				(1 << MLX5E_TT_IPV6_TCP) |
-				(1 << MLX5E_TT_IPV4_UDP) |
-				(1 << MLX5E_TT_IPV6_UDP) |
-				(1 << MLX5E_TT_IPV4)     |
-				(1 << MLX5E_TT_IPV6)     |
-				(1 << MLX5E_TT_ANY)      |
+				BIT(MLX5E_TT_IPV4_TCP)       |
+				BIT(MLX5E_TT_IPV6_TCP)       |
+				BIT(MLX5E_TT_IPV4_UDP)       |
+				BIT(MLX5E_TT_IPV6_UDP)       |
+				BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
+				BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
+				BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+				BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+				BIT(MLX5E_TT_IPV4)           |
+				BIT(MLX5E_TT_IPV6)           |
+				BIT(MLX5E_TT_ANY)            |
 				0;
 			break;
 
 		case MLX5E_MC_IPV4:
 			ret =
-				(1 << MLX5E_TT_IPV4_UDP) |
-				(1 << MLX5E_TT_IPV4)     |
+				BIT(MLX5E_TT_IPV4_UDP)       |
+				BIT(MLX5E_TT_IPV4)           |
 				0;
 			break;
 
 		case MLX5E_MC_IPV6:
 			ret =
-				(1 << MLX5E_TT_IPV6_UDP) |
-				(1 << MLX5E_TT_IPV6)     |
+				BIT(MLX5E_TT_IPV6_UDP)       |
+				BIT(MLX5E_TT_IPV6)           |
 				0;
 			break;
 
 		case MLX5E_MC_OTHER:
 			ret =
-				(1 << MLX5E_TT_ANY)      |
+				BIT(MLX5E_TT_ANY)            |
 				0;
 			break;
 		}
@@ -191,23 +211,27 @@
 
 	case MLX5E_ALLMULTI:
 		ret =
-			(1 << MLX5E_TT_IPV4_UDP) |
-			(1 << MLX5E_TT_IPV6_UDP) |
-			(1 << MLX5E_TT_IPV4)     |
-			(1 << MLX5E_TT_IPV6)     |
-			(1 << MLX5E_TT_ANY)      |
+			BIT(MLX5E_TT_IPV4_UDP) |
+			BIT(MLX5E_TT_IPV6_UDP) |
+			BIT(MLX5E_TT_IPV4)     |
+			BIT(MLX5E_TT_IPV6)     |
+			BIT(MLX5E_TT_ANY)      |
 			0;
 		break;
 
 	default: /* MLX5E_PROMISC */
 		ret =
-			(1 << MLX5E_TT_IPV4_TCP) |
-			(1 << MLX5E_TT_IPV6_TCP) |
-			(1 << MLX5E_TT_IPV4_UDP) |
-			(1 << MLX5E_TT_IPV6_UDP) |
-			(1 << MLX5E_TT_IPV4)     |
-			(1 << MLX5E_TT_IPV6)     |
-			(1 << MLX5E_TT_ANY)      |
+			BIT(MLX5E_TT_IPV4_TCP)       |
+			BIT(MLX5E_TT_IPV6_TCP)       |
+			BIT(MLX5E_TT_IPV4_UDP)       |
+			BIT(MLX5E_TT_IPV6_UDP)       |
+			BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
+			BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
+			BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+			BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+			BIT(MLX5E_TT_IPV4)           |
+			BIT(MLX5E_TT_IPV6)           |
+			BIT(MLX5E_TT_ANY)            |
 			0;
 		break;
 	}
@@ -226,6 +250,7 @@
 	u8   *match_criteria_dmac;
 	void *ft   = priv->ft.main;
 	u32  *tirn = priv->tirn;
+	u32  *ft_ix;
 	u32  tt_vec;
 	int  err;
 
@@ -261,51 +286,51 @@
 
 	tt_vec = mlx5e_get_tt_vec(ai, type);
 
-	if (tt_vec & (1 << MLX5E_TT_ANY)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
+	if (tt_vec & BIT(MLX5E_TT_ANY)) {
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_ANY]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_ANY]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_ANY);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_ANY);
 	}
 
 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
 			 outer_headers.ethertype);
 
-	if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
+	if (tt_vec & BIT(MLX5E_TT_IPV4)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IP);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV4]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV4]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4);
 	}
 
-	if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
+	if (tt_vec & BIT(MLX5E_TT_IPV6)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IPV6);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV6]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV6]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6);
 	}
 
 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
@@ -313,70 +338,141 @@
 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
 		 IPPROTO_UDP);
 
-	if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
+	if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IP);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV4_UDP]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV4_UDP]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
 	}
 
-	if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
+	if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IPV6);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV6_UDP]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV6_UDP]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
 	}
 
 	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
 		 IPPROTO_TCP);
 
-	if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
+	if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IP);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV4_TCP]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV4_TCP]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
 	}
 
-	if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
+	if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 			 ETH_P_IPV6);
 		MLX5_SET(dest_format_struct, dest, destination_id,
 			 tirn[MLX5E_TT_IPV6_TCP]);
 		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
 						match_criteria, flow_context,
-						&ai->ft_ix[MLX5E_TT_IPV6_TCP]);
-		if (err) {
-			mlx5e_del_eth_addr_from_flow_table(priv, ai);
-			return err;
-		}
-		ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
+	}
+
+	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+		 IPPROTO_AH);
+
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
+	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IP);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV4_IPSEC_AH]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
+	}
+
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
+	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IPV6);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV6_IPSEC_AH]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
+	}
+
+	MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+		 IPPROTO_ESP);
+
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
+	if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IP);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
+	}
+
+	ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
+	if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
+		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+			 ETH_P_IPV6);
+		MLX5_SET(dest_format_struct, dest, destination_id,
+			 tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
+		err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+						match_criteria, flow_context,
+						ft_ix);
+		if (err)
+			goto err_del_ai;
+
+		ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
 	}
 
 	return 0;
+
+err_del_ai:
+	mlx5e_del_eth_addr_from_flow_table(priv, ai);
+
+	return err;
 }
 
 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
@@ -498,44 +594,28 @@
 
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
 {
-	WARN_ON(!mutex_is_locked(&priv->state_lock));
+	if (!priv->vlan.filter_disabled)
+		return;
 
-	if (priv->vlan.filter_disabled) {
-		priv->vlan.filter_disabled = false;
-		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-			mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-					    0);
-	}
+	priv->vlan.filter_disabled = false;
+	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
 }
 
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
 {
-	WARN_ON(!mutex_is_locked(&priv->state_lock));
+	if (priv->vlan.filter_disabled)
+		return;
 
-	if (!priv->vlan.filter_disabled) {
-		priv->vlan.filter_disabled = true;
-		if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-			mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-					    0);
-	}
+	priv->vlan.filter_disabled = true;
+	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
 }
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
 			  u16 vid)
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
-	int err = 0;
 
-	mutex_lock(&priv->state_lock);
-
-	set_bit(vid, priv->vlan.active_vlans);
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
-					  vid);
-
-	mutex_unlock(&priv->state_lock);
-
-	return err;
+	return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
 }
 
 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -543,56 +623,11 @@
 {
 	struct mlx5e_priv *priv = netdev_priv(dev);
 
-	mutex_lock(&priv->state_lock);
-
-	clear_bit(vid, priv->vlan.active_vlans);
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-
-	mutex_unlock(&priv->state_lock);
+	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
 
 	return 0;
 }
 
-int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
-{
-	u16 vid;
-	int err;
-
-	for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
-		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
-					  vid);
-		if (err)
-			return err;
-	}
-
-	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-	if (err)
-		return err;
-
-	if (priv->vlan.filter_disabled) {
-		err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-					  0);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
-{
-	u16 vid;
-
-	if (priv->vlan.filter_disabled)
-		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
-
-	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-
-	for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
-		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-}
-
 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
 	for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
 		hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
@@ -656,18 +691,21 @@
 	mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
 		hn->action = MLX5E_ACTION_DEL;
 
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+	if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
 		mlx5e_sync_netdev_addr(priv);
 
 	mlx5e_apply_netdev_addr(priv);
 }
 
-void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+void mlx5e_set_rx_mode_work(struct work_struct *work)
 {
+	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+					       set_rx_mode_work);
+
 	struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
 	struct net_device *ndev = priv->netdev;
 
-	bool rx_mode_enable   = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
 	bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
 	bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
 	bool broadcast_enabled = rx_mode_enable;
@@ -700,17 +738,6 @@
 	ea->broadcast_enabled = broadcast_enabled;
 }
 
-void mlx5e_set_rx_mode_work(struct work_struct *work)
-{
-	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
-					       set_rx_mode_work);
-
-	mutex_lock(&priv->state_lock);
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-		mlx5e_set_rx_mode_core(priv);
-	mutex_unlock(&priv->state_lock);
-}
-
 void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
 {
 	ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
@@ -725,7 +752,7 @@
 	if (!g)
 		return -ENOMEM;
 
-	g[0].log_sz = 2;
+	g[0].log_sz = 3;
 	g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 	MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
 			 outer_headers.ethertype);
@@ -833,7 +860,7 @@
 	mlx5_destroy_flow_table(priv->ft.vlan);
 }
 
-int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
 {
 	int err;
 
@@ -845,16 +872,24 @@
 	if (err)
 		goto err_destroy_main_flow_table;
 
+	err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+	if (err)
+		goto err_destroy_vlan_flow_table;
+
 	return 0;
 
+err_destroy_vlan_flow_table:
+	mlx5e_destroy_vlan_flow_table(priv);
+
 err_destroy_main_flow_table:
 	mlx5e_destroy_main_flow_table(priv);
 
 	return err;
 }
 
-void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
 {
+	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
 	mlx5e_destroy_vlan_flow_table(priv);
 	mlx5e_destroy_main_flow_table(priv);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 40206da..59874d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -41,6 +41,7 @@
 struct mlx5e_sq_param {
 	u32                        sqc[MLX5_ST_SZ_DW(sqc)];
 	struct mlx5_wq_param       wq;
+	u16                        max_inline;
 };
 
 struct mlx5e_cq_param {
@@ -81,6 +82,47 @@
 	mutex_unlock(&priv->state_lock);
 }
 
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5e_pport_stats *s = &priv->stats.pport;
+	u32 *in;
+	u32 *out;
+	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+	in  = mlx5_vzalloc(sz);
+	out = mlx5_vzalloc(sz);
+	if (!in || !out)
+		goto free_out;
+
+	MLX5_SET(ppcnt_reg, in, local_port, 1);
+
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out,
+			     sz, MLX5_REG_PPCNT, 0, 0);
+	memcpy(s->IEEE_802_3_counters,
+	       MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+	       sizeof(s->IEEE_802_3_counters));
+
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out,
+			     sz, MLX5_REG_PPCNT, 0, 0);
+	memcpy(s->RFC_2863_counters,
+	       MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+	       sizeof(s->RFC_2863_counters));
+
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out,
+			     sz, MLX5_REG_PPCNT, 0, 0);
+	memcpy(s->RFC_2819_counters,
+	       MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
+	       sizeof(s->RFC_2819_counters));
+
+free_out:
+	kvfree(in);
+	kvfree(out);
+}
+
 void mlx5e_update_stats(struct mlx5e_priv *priv)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
@@ -107,6 +149,7 @@
 	s->lro_packets		= 0;
 	s->lro_bytes		= 0;
 	s->rx_csum_none		= 0;
+	s->rx_csum_sw		= 0;
 	s->rx_wqe_err		= 0;
 	for (i = 0; i < priv->params.num_channels; i++) {
 		rq_stats = &priv->channel[i]->rq.stats;
@@ -114,9 +157,10 @@
 		s->lro_packets	+= rq_stats->lro_packets;
 		s->lro_bytes	+= rq_stats->lro_bytes;
 		s->rx_csum_none	+= rq_stats->csum_none;
+		s->rx_csum_sw	+= rq_stats->csum_sw;
 		s->rx_wqe_err   += rq_stats->wqe_err;
 
-		for (j = 0; j < priv->num_tc; j++) {
+		for (j = 0; j < priv->params.num_tc; j++) {
 			sq_stats = &priv->channel[i]->sq[j].stats;
 
 			s->tso_packets		+= sq_stats->tso_packets;
@@ -199,8 +243,10 @@
 
 	/* Update calculated offload counters */
 	s->tx_csum_offload = s->tx_packets - tx_offload_none;
-	s->rx_csum_good    = s->rx_packets - s->rx_csum_none;
+	s->rx_csum_good    = s->rx_packets - s->rx_csum_none -
+			       s->rx_csum_sw;
 
+	mlx5e_update_pport_counters(priv);
 free_out:
 	kvfree(out);
 }
@@ -272,6 +318,8 @@
 	int err;
 	int i;
 
+	param->wq.db_numa_node = cpu_to_node(c->cpu);
+
 	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
 				&rq->wq_ctrl);
 	if (err)
@@ -304,6 +352,7 @@
 	rq->netdev  = c->netdev;
 	rq->channel = c;
 	rq->ix      = c->ix;
+	rq->priv    = c->priv;
 
 	return 0;
 
@@ -321,8 +370,7 @@
 
 static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
 {
-	struct mlx5e_channel *c = rq->channel;
-	struct mlx5e_priv *priv = c->priv;
+	struct mlx5e_priv *priv = rq->priv;
 	struct mlx5_core_dev *mdev = priv->mdev;
 
 	void *in;
@@ -342,11 +390,11 @@
 
 	memcpy(rqc, param->rqc, sizeof(param->rqc));
 
-	MLX5_SET(rqc,  rqc, cqn,		c->rq.cq.mcq.cqn);
+	MLX5_SET(rqc,  rqc, cqn,		rq->cq.mcq.cqn);
 	MLX5_SET(rqc,  rqc, state,		MLX5_RQC_STATE_RST);
 	MLX5_SET(rqc,  rqc, flush_in_error_en,	1);
 	MLX5_SET(wq,   wq,  log_wq_pg_sz,	rq->wq_ctrl.buf.page_shift -
-						PAGE_SHIFT);
+						MLX5_ADAPTER_PAGE_SHIFT);
 	MLX5_SET64(wq, wq,  dbr_addr,		rq->wq_ctrl.db.dma);
 
 	mlx5_fill_page_array(&rq->wq_ctrl.buf,
@@ -389,11 +437,7 @@
 
 static void mlx5e_disable_rq(struct mlx5e_rq *rq)
 {
-	struct mlx5e_channel *c = rq->channel;
-	struct mlx5e_priv *priv = c->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	mlx5_core_destroy_rq(mdev, rq->rqn);
+	mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
 }
 
 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
@@ -502,6 +546,8 @@
 	if (err)
 		return err;
 
+	param->wq.db_numa_node = cpu_to_node(c->cpu);
+
 	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
 				 &sq->wq_ctrl);
 	if (err)
@@ -509,7 +555,9 @@
 
 	sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
 	sq->uar_map     = sq->uar.map;
+	sq->uar_bf_map  = sq->uar.bf_map;
 	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+	sq->max_inline  = param->max_inline;
 
 	err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
 	if (err)
@@ -518,11 +566,12 @@
 	txq_ix = c->ix + tc * priv->params.num_channels;
 	sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
 
-	sq->pdev    = c->pdev;
-	sq->mkey_be = c->mkey_be;
-	sq->channel = c;
-	sq->tc      = tc;
-	sq->edge    = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+	sq->pdev      = c->pdev;
+	sq->mkey_be   = c->mkey_be;
+	sq->channel   = c;
+	sq->tc        = tc;
+	sq->edge      = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+	sq->bf_budget = MLX5E_SQ_BF_BUDGET;
 	priv->txq_to_sq_map[txq_ix] = sq;
 
 	return 0;
@@ -569,7 +618,6 @@
 
 	memcpy(sqc, param->sqc, sizeof(param->sqc));
 
-	MLX5_SET(sqc,  sqc, user_index,		sq->tc);
 	MLX5_SET(sqc,  sqc, tis_num_0,		priv->tisn[sq->tc]);
 	MLX5_SET(sqc,  sqc, cqn,		c->sq[sq->tc].cq.mcq.cqn);
 	MLX5_SET(sqc,  sqc, state,		MLX5_SQC_STATE_RST);
@@ -579,7 +627,7 @@
 	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
 	MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
 	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
-					  PAGE_SHIFT);
+					  MLX5_ADAPTER_PAGE_SHIFT);
 	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
 
 	mlx5_fill_page_array(&sq->wq_ctrl.buf,
@@ -702,7 +750,8 @@
 	int err;
 	u32 i;
 
-	param->wq.numa = cpu_to_node(c->cpu);
+	param->wq.buf_numa_node = cpu_to_node(c->cpu);
+	param->wq.db_numa_node  = cpu_to_node(c->cpu);
 	param->eq_ix   = c->ix;
 
 	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
@@ -732,6 +781,7 @@
 	}
 
 	cq->channel = c;
+	cq->priv = priv;
 
 	return 0;
 }
@@ -743,8 +793,7 @@
 
 static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
 {
-	struct mlx5e_channel *c = cq->channel;
-	struct mlx5e_priv *priv = c->priv;
+	struct mlx5e_priv *priv = cq->priv;
 	struct mlx5_core_dev *mdev = priv->mdev;
 	struct mlx5_core_cq *mcq = &cq->mcq;
 
@@ -773,7 +822,7 @@
 	MLX5_SET(cqc,   cqc, c_eqn,         eqn);
 	MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
 	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
-					    PAGE_SHIFT);
+					    MLX5_ADAPTER_PAGE_SHIFT);
 	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
 
 	err = mlx5_core_create_cq(mdev, mcq, in, inlen);
@@ -790,8 +839,7 @@
 
 static void mlx5e_disable_cq(struct mlx5e_cq *cq)
 {
-	struct mlx5e_channel *c = cq->channel;
-	struct mlx5e_priv *priv = c->priv;
+	struct mlx5e_priv *priv = cq->priv;
 	struct mlx5_core_dev *mdev = priv->mdev;
 
 	mlx5_core_destroy_cq(mdev, &cq->mcq);
@@ -901,13 +949,13 @@
 		mlx5e_close_sq(&c->sq[tc]);
 }
 
-static void mlx5e_build_tc_to_txq_map(struct mlx5e_channel *c,
-				      int num_channels)
+static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
 {
 	int i;
 
 	for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
-		c->tc_to_txq_map[i] = c->ix + i * num_channels;
+		priv->channeltc_to_txq_map[ix][i] =
+			ix + i * priv->params.num_channels;
 }
 
 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
@@ -929,9 +977,9 @@
 	c->pdev     = &priv->mdev->pdev->dev;
 	c->netdev   = priv->netdev;
 	c->mkey_be  = cpu_to_be32(priv->mr.key);
-	c->num_tc   = priv->num_tc;
+	c->num_tc   = priv->params.num_tc;
 
-	mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
+	mlx5e_build_channeltc_to_txq_map(priv, ix);
 
 	netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
 
@@ -1000,7 +1048,7 @@
 	MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
 	MLX5_SET(wq, wq, pd,               priv->pdn);
 
-	param->wq.numa   = dev_to_node(&priv->mdev->pdev->dev);
+	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
 	param->wq.linear = 1;
 }
 
@@ -1014,7 +1062,8 @@
 	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
 	MLX5_SET(wq, wq, pd,            priv->pdn);
 
-	param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+	param->max_inline = priv->params.tx_max_inline;
 }
 
 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1059,27 +1108,28 @@
 static int mlx5e_open_channels(struct mlx5e_priv *priv)
 {
 	struct mlx5e_channel_param cparam;
+	int nch = priv->params.num_channels;
 	int err = -ENOMEM;
 	int i;
 	int j;
 
-	priv->channel = kcalloc(priv->params.num_channels,
-				sizeof(struct mlx5e_channel *), GFP_KERNEL);
+	priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
+				GFP_KERNEL);
 
-	priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
+	priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
 				      sizeof(struct mlx5e_sq *), GFP_KERNEL);
 
 	if (!priv->channel || !priv->txq_to_sq_map)
 		goto err_free_txq_to_sq_map;
 
 	mlx5e_build_channel_param(priv, &cparam);
-	for (i = 0; i < priv->params.num_channels; i++) {
+	for (i = 0; i < nch; i++) {
 		err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
 		if (err)
 			goto err_close_channels;
 	}
 
-	for (j = 0; j < priv->params.num_channels; j++) {
+	for (j = 0; j < nch; j++) {
 		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
 		if (err)
 			goto err_close_channels;
@@ -1109,67 +1159,73 @@
 	kfree(priv->channel);
 }
 
-static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+static int mlx5e_rx_hash_fn(int hfunc)
 {
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u32 in[MLX5_ST_SZ_DW(create_tis_in)];
-	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
-
-	memset(in, 0, sizeof(in));
-
-	MLX5_SET(tisc, tisc, prio,  tc);
-	MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
-
-	return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+	return (hfunc == ETH_RSS_HASH_TOP) ?
+	       MLX5_RX_HASH_FN_TOEPLITZ :
+	       MLX5_RX_HASH_FN_INVERTED_XOR8;
 }
 
-static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+static int mlx5e_bits_invert(unsigned long a, int size)
 {
-	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+	int inv = 0;
+	int i;
+
+	for (i = 0; i < size; i++)
+		inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+
+	return inv;
 }
 
-static int mlx5e_open_tises(struct mlx5e_priv *priv)
+static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
 {
-	int num_tc = priv->num_tc;
-	int err;
-	int tc;
+	int i;
 
-	for (tc = 0; tc < num_tc; tc++) {
-		err = mlx5e_open_tis(priv, tc);
-		if (err)
-			goto err_close_tises;
+	for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+		int ix = i;
+
+		if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
+			ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
+
+		ix = priv->params.indirection_rqt[ix];
+		ix = ix % priv->params.num_channels;
+		MLX5_SET(rqtc, rqtc, rq_num[i],
+			 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+			 priv->channel[ix]->rq.rqn :
+			 priv->drop_rq.rqn);
 	}
-
-	return 0;
-
-err_close_tises:
-	for (tc--; tc >= 0; tc--)
-		mlx5e_close_tis(priv, tc);
-
-	return err;
 }
 
-static void mlx5e_close_tises(struct mlx5e_priv *priv)
+static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
+				enum mlx5e_rqt_ix rqt_ix)
 {
-	int num_tc = priv->num_tc;
-	int tc;
 
-	for (tc = 0; tc < num_tc; tc++)
-		mlx5e_close_tis(priv, tc);
+	switch (rqt_ix) {
+	case MLX5E_INDIRECTION_RQT:
+		mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+
+		break;
+
+	default: /* MLX5E_SINGLE_RQ_RQT */
+		MLX5_SET(rqtc, rqtc, rq_num[0],
+			 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+			 priv->channel[0]->rq.rqn :
+			 priv->drop_rq.rqn);
+
+		break;
+	}
 }
 
-static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
 	u32 *in;
-	u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
 	void *rqtc;
 	int inlen;
-	int err;
 	int sz;
-	int i;
+	int err;
 
-	sz = 1 << priv->params.rx_hash_log_tbl_sz;
+	sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
 
 	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
 	in = mlx5_vzalloc(inlen);
@@ -1181,198 +1237,101 @@
 	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
 	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
 
-	for (i = 0; i < sz; i++) {
-		int ix = i % priv->params.num_channels;
+	mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
 
-		MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
-	}
-
-	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
-
-	memset(out, 0, sizeof(out));
-	err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
-	if (!err)
-		priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+	err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
 
 	kvfree(in);
 
 	return err;
 }
 
-static void mlx5e_close_rqt(struct mlx5e_priv *priv)
-{
-	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
-	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
-
-	memset(in, 0, sizeof(in));
-
-	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
-	MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
-
-	mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
-				   sizeof(out));
-}
-
-static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
-{
-	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
-	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
-
-#define ROUGH_MAX_L2_L3_HDR_SZ 256
-
-#define MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-			  MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-			  MLX5_HASH_FIELD_SEL_DST_IP   |\
-			  MLX5_HASH_FIELD_SEL_L4_SPORT |\
-			  MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-	if (priv->params.lro_en) {
-		MLX5_SET(tirc, tirc, lro_enable_mask,
-			 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
-			 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
-		MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
-			 (priv->params.lro_wqe_sz -
-			  ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
-		MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
-			 MLX5_CAP_ETH(priv->mdev,
-				      lro_timer_supported_periods[3]));
-	}
-
-	switch (tt) {
-	case MLX5E_TT_ANY:
-		MLX5_SET(tirc, tirc, disp_type,
-			 MLX5_TIRC_DISP_TYPE_DIRECT);
-		MLX5_SET(tirc, tirc, inline_rqn,
-			 priv->channel[0]->rq.rqn);
-		break;
-	default:
-		MLX5_SET(tirc, tirc, disp_type,
-			 MLX5_TIRC_DISP_TYPE_INDIRECT);
-		MLX5_SET(tirc, tirc, indirect_table,
-			 priv->rqtn);
-		MLX5_SET(tirc, tirc, rx_hash_fn,
-			 MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
-		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
-		netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
-						 rx_hash_toeplitz_key),
-				    MLX5_FLD_SZ_BYTES(tirc,
-						      rx_hash_toeplitz_key));
-		break;
-	}
-
-	switch (tt) {
-	case MLX5E_TT_IPV4_TCP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_TCP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_ALL);
-		break;
-
-	case MLX5E_TT_IPV6_TCP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_TCP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_ALL);
-		break;
-
-	case MLX5E_TT_IPV4_UDP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_UDP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_ALL);
-		break;
-
-	case MLX5E_TT_IPV6_UDP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_UDP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_ALL);
-		break;
-
-	case MLX5E_TT_IPV4:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP);
-		break;
-
-	case MLX5E_TT_IPV6:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP);
-		break;
-	}
-}
-
-static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
 {
 	struct mlx5_core_dev *mdev = priv->mdev;
 	u32 *in;
-	void *tirc;
+	void *rqtc;
 	int inlen;
+	int sz;
 	int err;
 
-	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+	sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
+
+	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
 	in = mlx5_vzalloc(inlen);
 	if (!in)
 		return -ENOMEM;
 
-	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+	rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
 
-	mlx5e_build_tir_ctx(priv, tirc, tt);
+	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
 
-	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+	mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+
+	MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
+
+	err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
 
 	kvfree(in);
 
 	return err;
 }
 
-static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
 {
-	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+	mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
 }
 
-static int mlx5e_open_tirs(struct mlx5e_priv *priv)
+static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
 {
+	mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+	mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+}
+
+static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
+{
+	if (!priv->params.lro_en)
+		return;
+
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
+
+	MLX5_SET(tirc, tirc, lro_enable_mask,
+		 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+		 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+	MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+		 (priv->params.lro_wqe_sz -
+		  ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+	MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+		 MLX5_CAP_ETH(priv->mdev,
+			      lro_timer_supported_periods[2]));
+}
+
+static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *in;
+	void *tirc;
+	int inlen;
 	int err;
-	int i;
 
-	for (i = 0; i < MLX5E_NUM_TT; i++) {
-		err = mlx5e_open_tir(priv, i);
-		if (err)
-			goto err_close_tirs;
-	}
+	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
 
-	return 0;
+	MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
+	tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
 
-err_close_tirs:
-	for (i--; i >= 0; i--)
-		mlx5e_close_tir(priv, i);
+	mlx5e_build_tir_ctx_lro(tirc, priv);
+
+	err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+
+	kvfree(in);
 
 	return err;
 }
 
-static void mlx5e_close_tirs(struct mlx5e_priv *priv)
-{
-	int i;
-
-	for (i = 0; i < MLX5E_NUM_TT; i++)
-		mlx5e_close_tir(priv, i);
-}
-
 static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1400,6 +1359,8 @@
 	int num_txqs;
 	int err;
 
+	set_bit(MLX5E_STATE_OPENED, &priv->state);
+
 	num_txqs = priv->params.num_channels * priv->params.num_tc;
 	netif_set_real_num_tx_queues(netdev, num_txqs);
 	netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
@@ -1408,74 +1369,19 @@
 	if (err)
 		return err;
 
-	err = mlx5e_open_tises(priv);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
-			   __func__, err);
-		return err;
-	}
-
 	err = mlx5e_open_channels(priv);
 	if (err) {
 		netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
 			   __func__, err);
-		goto err_close_tises;
+		return err;
 	}
 
-	err = mlx5e_open_rqt(priv);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
-			   __func__, err);
-		goto err_close_channels;
-	}
-
-	err = mlx5e_open_tirs(priv);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
-			   __func__, err);
-		goto err_close_rqls;
-	}
-
-	err = mlx5e_open_flow_table(priv);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
-			   __func__, err);
-		goto err_close_tirs;
-	}
-
-	err = mlx5e_add_all_vlan_rules(priv);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
-			   __func__, err);
-		goto err_close_flow_table;
-	}
-
-	mlx5e_init_eth_addr(priv);
-
-	set_bit(MLX5E_STATE_OPENED, &priv->state);
-
 	mlx5e_update_carrier(priv);
-	mlx5e_set_rx_mode_core(priv);
+	mlx5e_redirect_rqts(priv);
 
 	schedule_delayed_work(&priv->update_stats_work, 0);
+
 	return 0;
-
-err_close_flow_table:
-	mlx5e_close_flow_table(priv);
-
-err_close_tirs:
-	mlx5e_close_tirs(priv);
-
-err_close_rqls:
-	mlx5e_close_rqt(priv);
-
-err_close_channels:
-	mlx5e_close_channels(priv);
-
-err_close_tises:
-	mlx5e_close_tises(priv);
-
-	return err;
 }
 
 static int mlx5e_open(struct net_device *netdev)
@@ -1496,14 +1402,9 @@
 
 	clear_bit(MLX5E_STATE_OPENED, &priv->state);
 
-	mlx5e_set_rx_mode_core(priv);
-	mlx5e_del_all_vlan_rules(priv);
+	mlx5e_redirect_rqts(priv);
 	netif_carrier_off(priv->netdev);
-	mlx5e_close_flow_table(priv);
-	mlx5e_close_tirs(priv);
-	mlx5e_close_rqt(priv);
 	mlx5e_close_channels(priv);
-	mlx5e_close_tises(priv);
 
 	return 0;
 }
@@ -1520,26 +1421,341 @@
 	return err;
 }
 
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
-			     struct mlx5e_params *new_params)
+static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
+				struct mlx5e_rq *rq,
+				struct mlx5e_rq_param *param)
 {
-	int err = 0;
-	int was_opened;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	void *rqc = param->rqc;
+	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+	int err;
 
-	WARN_ON(!mutex_is_locked(&priv->state_lock));
+	param->wq.db_numa_node = param->wq.buf_numa_node;
 
-	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-	if (was_opened)
-		mlx5e_close_locked(priv->netdev);
+	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+				&rq->wq_ctrl);
+	if (err)
+		return err;
 
-	priv->params = *new_params;
+	rq->priv = priv;
 
-	if (was_opened)
-		err = mlx5e_open_locked(priv->netdev);
+	return 0;
+}
+
+static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
+				struct mlx5e_cq *cq,
+				struct mlx5e_cq_param *param)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5_core_cq *mcq = &cq->mcq;
+	int eqn_not_used;
+	int irqn;
+	int err;
+
+	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+			       &cq->wq_ctrl);
+	if (err)
+		return err;
+
+	mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+	mcq->cqe_sz     = 64;
+	mcq->set_ci_db  = cq->wq_ctrl.db.db;
+	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
+	*mcq->set_ci_db = 0;
+	*mcq->arm_db    = 0;
+	mcq->vector     = param->eq_ix;
+	mcq->comp       = mlx5e_completion_event;
+	mcq->event      = mlx5e_cq_error_event;
+	mcq->irqn       = irqn;
+	mcq->uar        = &priv->cq_uar;
+
+	cq->priv = priv;
+
+	return 0;
+}
+
+static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
+{
+	struct mlx5e_cq_param cq_param;
+	struct mlx5e_rq_param rq_param;
+	struct mlx5e_rq *rq = &priv->drop_rq;
+	struct mlx5e_cq *cq = &priv->drop_rq.cq;
+	int err;
+
+	memset(&cq_param, 0, sizeof(cq_param));
+	memset(&rq_param, 0, sizeof(rq_param));
+	mlx5e_build_rx_cq_param(priv, &cq_param);
+	mlx5e_build_rq_param(priv, &rq_param);
+
+	err = mlx5e_create_drop_cq(priv, cq, &cq_param);
+	if (err)
+		return err;
+
+	err = mlx5e_enable_cq(cq, &cq_param);
+	if (err)
+		goto err_destroy_cq;
+
+	err = mlx5e_create_drop_rq(priv, rq, &rq_param);
+	if (err)
+		goto err_disable_cq;
+
+	err = mlx5e_enable_rq(rq, &rq_param);
+	if (err)
+		goto err_destroy_rq;
+
+	return 0;
+
+err_destroy_rq:
+	mlx5e_destroy_rq(&priv->drop_rq);
+
+err_disable_cq:
+	mlx5e_disable_cq(&priv->drop_rq.cq);
+
+err_destroy_cq:
+	mlx5e_destroy_cq(&priv->drop_rq.cq);
 
 	return err;
 }
 
+static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
+{
+	mlx5e_disable_rq(&priv->drop_rq);
+	mlx5e_destroy_rq(&priv->drop_rq);
+	mlx5e_disable_cq(&priv->drop_rq.cq);
+	mlx5e_destroy_cq(&priv->drop_rq.cq);
+}
+
+static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(tisc, tisc, prio,  tc);
+	MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+
+	return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
+{
+	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_create_tises(struct mlx5e_priv *priv)
+{
+	int err;
+	int tc;
+
+	for (tc = 0; tc < priv->params.num_tc; tc++) {
+		err = mlx5e_create_tis(priv, tc);
+		if (err)
+			goto err_close_tises;
+	}
+
+	return 0;
+
+err_close_tises:
+	for (tc--; tc >= 0; tc--)
+		mlx5e_destroy_tis(priv, tc);
+
+	return err;
+}
+
+static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
+{
+	int tc;
+
+	for (tc = 0; tc < priv->params.num_tc; tc++)
+		mlx5e_destroy_tis(priv, tc);
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+	MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+				 MLX5_HASH_FIELD_SEL_L4_SPORT |\
+				 MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
+	mlx5e_build_tir_ctx_lro(tirc, priv);
+
+	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+
+	switch (tt) {
+	case MLX5E_TT_ANY:
+		MLX5_SET(tirc, tirc, indirect_table,
+			 priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
+		MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+		break;
+	default:
+		MLX5_SET(tirc, tirc, indirect_table,
+			 priv->rqtn[MLX5E_INDIRECTION_RQT]);
+		MLX5_SET(tirc, tirc, rx_hash_fn,
+			 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+		if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+			void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+						     rx_hash_toeplitz_key);
+			size_t len = MLX5_FLD_SZ_BYTES(tirc,
+						       rx_hash_toeplitz_key);
+
+			MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+			memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+		}
+		break;
+	}
+
+	switch (tt) {
+	case MLX5E_TT_IPV4_TCP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_TCP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV6_TCP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_TCP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV4_UDP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_UDP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV6_UDP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_UDP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV4_IPSEC_AH:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV6_IPSEC_AH:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV4_IPSEC_ESP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV6_IPSEC_ESP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV4:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP);
+		break;
+
+	case MLX5E_TT_IPV6:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP);
+		break;
+	}
+}
+
+static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 *in;
+	void *tirc;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+	mlx5e_build_tir_ctx(priv, tirc, tt);
+
+	err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
+{
+	mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+{
+	int err;
+	int i;
+
+	for (i = 0; i < MLX5E_NUM_TT; i++) {
+		err = mlx5e_create_tir(priv, i);
+		if (err)
+			goto err_destroy_tirs;
+	}
+
+	return 0;
+
+err_destroy_tirs:
+	for (i--; i >= 0; i--)
+		mlx5e_destroy_tir(priv, i);
+
+	return err;
+}
+
+static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < MLX5E_NUM_TT; i++)
+		mlx5e_destroy_tir(priv, i);
+}
+
 static struct rtnl_link_stats64 *
 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
@@ -1589,20 +1805,26 @@
 			      netdev_features_t features)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err = 0;
 	netdev_features_t changes = features ^ netdev->features;
-	struct mlx5e_params new_params;
-	bool update_params = false;
 
 	mutex_lock(&priv->state_lock);
-	new_params = priv->params;
 
 	if (changes & NETIF_F_LRO) {
-		new_params.lro_en = !!(features & NETIF_F_LRO);
-		update_params = true;
+		bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+
+		if (was_opened)
+			mlx5e_close_locked(priv->netdev);
+
+		priv->params.lro_en = !!(features & NETIF_F_LRO);
+		mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
+		mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
+
+		if (was_opened)
+			err = mlx5e_open_locked(priv->netdev);
 	}
 
-	if (update_params)
-		mlx5e_update_priv_params(priv, &new_params);
+	mutex_unlock(&priv->state_lock);
 
 	if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
 		if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1611,8 +1833,6 @@
 			mlx5e_disable_vlan_filter(priv);
 	}
 
-	mutex_unlock(&priv->state_lock);
-
 	return 0;
 }
 
@@ -1620,8 +1840,9 @@
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
 	struct mlx5_core_dev *mdev = priv->mdev;
+	bool was_opened;
 	int max_mtu;
-	int err;
+	int err = 0;
 
 	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
@@ -1633,8 +1854,16 @@
 	}
 
 	mutex_lock(&priv->state_lock);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(netdev);
+
 	netdev->mtu = new_mtu;
-	err = mlx5e_update_priv_params(priv, &priv->params);
+
+	if (was_opened)
+		err = mlx5e_open_locked(netdev);
+
 	mutex_unlock(&priv->state_lock);
 
 	return err;
@@ -1673,11 +1902,21 @@
 	return 0;
 }
 
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
+{
+	int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+	return bf_buf_size -
+	       sizeof(struct mlx5e_tx_wqe) +
+	       2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
+}
+
 static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
 				    struct net_device *netdev,
-				    int num_comp_vectors)
+				    int num_channels)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int i;
 
 	priv->params.log_sq_size           =
 		MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -1691,24 +1930,25 @@
 		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
 	priv->params.tx_cq_moderation_pkts =
 		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+	priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
 	priv->params.min_rx_wqes           =
 		MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
-	priv->params.rx_hash_log_tbl_sz    =
-		(order_base_2(num_comp_vectors) >
-		 MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
-		order_base_2(num_comp_vectors)           :
-		MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
 	priv->params.num_tc                = 1;
 	priv->params.default_vlan_prio     = 0;
+	priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
 
-	priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
+	netdev_rss_key_fill(priv->params.toeplitz_hash_key,
+			    sizeof(priv->params.toeplitz_hash_key));
+
+	for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+		priv->params.indirection_rqt[i] = i % num_channels;
+
 	priv->params.lro_wqe_sz            =
 		MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
 
 	priv->mdev                         = mdev;
 	priv->netdev                       = netdev;
-	priv->params.num_channels          = num_comp_vectors;
-	priv->num_tc                       = priv->params.num_tc;
+	priv->params.num_channels          = num_channels;
 	priv->default_vlan_prio            = priv->params.default_vlan_prio;
 
 	spin_lock_init(&priv->async_events_spinlock);
@@ -1733,9 +1973,8 @@
 
 	SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
 
-	if (priv->num_tc > 1) {
+	if (priv->params.num_tc > 1)
 		mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
-	}
 
 	netdev->netdev_ops        = &mlx5e_netdev_ops;
 	netdev->watchdog_timeo    = 15 * HZ;
@@ -1798,19 +2037,20 @@
 {
 	struct net_device *netdev;
 	struct mlx5e_priv *priv;
-	int ncv = mdev->priv.eq_table.num_comp_vectors;
+	int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors,
+			MLX5E_MAX_NUM_CHANNELS);
 	int err;
 
 	if (mlx5e_check_required_hca_cap(mdev))
 		return NULL;
 
-	netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), ncv, ncv);
+	netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
 	if (!netdev) {
 		mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
 		return NULL;
 	}
 
-	mlx5e_build_netdev_priv(mdev, netdev, ncv);
+	mlx5e_build_netdev_priv(mdev, netdev, nch);
 	mlx5e_build_netdev(netdev);
 
 	netif_carrier_off(netdev);
@@ -1819,43 +2059,95 @@
 
 	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
 	if (err) {
-		netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
-			   __func__, err);
+		mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
 		goto err_free_netdev;
 	}
 
 	err = mlx5_core_alloc_pd(mdev, &priv->pdn);
 	if (err) {
-		netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
-			   __func__, err);
+		mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
 		goto err_unmap_free_uar;
 	}
 
 	err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
 	if (err) {
-		netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
-			   __func__, err);
+		mlx5_core_err(mdev, "alloc td failed, %d\n", err);
 		goto err_dealloc_pd;
 	}
 
 	err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
 	if (err) {
-		netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
-			   __func__, err);
+		mlx5_core_err(mdev, "create mkey failed, %d\n", err);
 		goto err_dealloc_transport_domain;
 	}
 
-	err = register_netdev(netdev);
+	err = mlx5e_create_tises(priv);
 	if (err) {
-		netdev_err(netdev, "%s: register_netdev failed, %d\n",
-			   __func__, err);
+		mlx5_core_warn(mdev, "create tises failed, %d\n", err);
 		goto err_destroy_mkey;
 	}
 
+	err = mlx5e_open_drop_rq(priv);
+	if (err) {
+		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+		goto err_destroy_tises;
+	}
+
+	err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
+	if (err) {
+		mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
+		goto err_close_drop_rq;
+	}
+
+	err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+	if (err) {
+		mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
+		goto err_destroy_rqt_indir;
+	}
+
+	err = mlx5e_create_tirs(priv);
+	if (err) {
+		mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
+		goto err_destroy_rqt_single;
+	}
+
+	err = mlx5e_create_flow_tables(priv);
+	if (err) {
+		mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
+		goto err_destroy_tirs;
+	}
+
+	mlx5e_init_eth_addr(priv);
+
+	err = register_netdev(netdev);
+	if (err) {
+		mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
+		goto err_destroy_flow_tables;
+	}
+
 	mlx5e_enable_async_events(priv);
+	schedule_work(&priv->set_rx_mode_work);
 
 	return priv;
 
+err_destroy_flow_tables:
+	mlx5e_destroy_flow_tables(priv);
+
+err_destroy_tirs:
+	mlx5e_destroy_tirs(priv);
+
+err_destroy_rqt_single:
+	mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+
+err_destroy_rqt_indir:
+	mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+
+err_close_drop_rq:
+	mlx5e_close_drop_rq(priv);
+
+err_destroy_tises:
+	mlx5e_destroy_tises(priv);
+
 err_destroy_mkey:
 	mlx5_core_destroy_mkey(mdev, &priv->mr);
 
@@ -1879,13 +2171,22 @@
 	struct mlx5e_priv *priv = vpriv;
 	struct net_device *netdev = priv->netdev;
 
+	set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+
+	schedule_work(&priv->set_rx_mode_work);
+	mlx5e_disable_async_events(priv);
+	flush_scheduled_work();
 	unregister_netdev(netdev);
+	mlx5e_destroy_flow_tables(priv);
+	mlx5e_destroy_tirs(priv);
+	mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+	mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+	mlx5e_close_drop_rq(priv);
+	mlx5e_destroy_tises(priv);
 	mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
 	mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
 	mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
 	mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
-	mlx5e_disable_async_events(priv);
-	flush_scheduled_work();
 	free_netdev(netdev);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 9a93741..cf00985 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -111,10 +111,12 @@
 		tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
 					sizeof(struct iphdr));
 		ipv6 = NULL;
+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 	} else {
 		tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
 					sizeof(struct ipv6hdr));
 		ipv4 = NULL;
+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 	}
 
 	if (get_cqe_lro_tcppsh(cqe))
@@ -149,6 +151,38 @@
 	skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
 }
 
+static inline bool is_first_ethertype_ip(struct sk_buff *skb)
+{
+	__be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+
+	return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+}
+
+static inline void mlx5e_handle_csum(struct net_device *netdev,
+				     struct mlx5_cqe64 *cqe,
+				     struct mlx5e_rq *rq,
+				     struct sk_buff *skb)
+{
+	if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
+		goto csum_none;
+
+	if (likely(cqe->hds_ip_ext & CQE_L4_OK)) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else if (is_first_ethertype_ip(skb)) {
+		skb->ip_summed = CHECKSUM_COMPLETE;
+		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+		rq->stats.csum_sw++;
+	} else {
+		goto csum_none;
+	}
+
+	return;
+
+csum_none:
+	skb->ip_summed = CHECKSUM_NONE;
+	rq->stats.csum_none++;
+}
+
 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
 				      struct mlx5e_rq *rq,
 				      struct sk_buff *skb)
@@ -162,20 +196,12 @@
 	lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
 	if (lro_num_seg > 1) {
 		mlx5e_lro_update_hdr(skb, cqe);
-		skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
 		rq->stats.lro_packets++;
 		rq->stats.lro_bytes += cqe_bcnt;
 	}
 
-	if (likely(netdev->features & NETIF_F_RXCSUM) &&
-	    (cqe->hds_ip_ext & CQE_L2_OK) &&
-	    (cqe->hds_ip_ext & CQE_L3_OK) &&
-	    (cqe->hds_ip_ext & CQE_L4_OK)) {
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-	} else {
-		skb->ip_summed = CHECKSUM_NONE;
-		rq->stats.csum_none++;
-	}
+	mlx5e_handle_csum(netdev, cqe, rq, skb);
 
 	skb->protocol = eth_type_trans(skb, netdev);
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 03f28f4..b73672f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -57,7 +57,7 @@
 
 	if (notify_hw) {
 		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-		mlx5e_tx_notify_hw(sq, wqe);
+		mlx5e_tx_notify_hw(sq, wqe, 0);
 	}
 }
 
@@ -106,13 +106,21 @@
 		 priv->default_vlan_prio;
 	int tc = netdev_get_prio_tc_map(dev, up);
 
-	return priv->channel[channel_ix]->tc_to_txq_map[tc];
+	return priv->channeltc_to_txq_map[channel_ix][tc];
 }
 
 static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
-					    struct sk_buff *skb)
+					    struct sk_buff *skb, bool bf)
 {
-#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+	/* Some NIC TX decisions, e.g loopback, are based on the packet
+	 * headers and occur before the data gather.
+	 * Therefore these headers must be copied into the WQE
+	 */
+#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
+
+	if (bf && (skb_headlen(skb) <= sq->max_inline))
+		return skb_headlen(skb);
+
 	return MLX5E_MIN_INLINE;
 }
 
@@ -129,6 +137,7 @@
 
 	u8  opcode = MLX5_OPCODE_SEND;
 	dma_addr_t dma_addr = 0;
+	bool bf = false;
 	u16 headlen;
 	u16 ds_cnt;
 	u16 ihs;
@@ -141,6 +150,11 @@
 	else
 		sq->stats.csum_offload_none++;
 
+	if (sq->cc != sq->prev_cc) {
+		sq->prev_cc = sq->cc;
+		sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
+	}
+
 	if (skb_is_gso(skb)) {
 		u32 payload_len;
 
@@ -153,7 +167,10 @@
 		sq->stats.tso_packets++;
 		sq->stats.tso_bytes += payload_len;
 	} else {
-		ihs = mlx5e_get_inline_hdr_size(sq, skb);
+		bf = sq->bf_budget &&
+		     !skb->xmit_more &&
+		     !skb_shinfo(skb)->nr_frags;
+		ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
 		MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
 							ETH_ZLEN);
 	}
@@ -225,14 +242,21 @@
 	}
 
 	if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
+		int bf_sz = 0;
+
+		if (bf && sq->uar_bf_map)
+			bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;
+
 		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-		mlx5e_tx_notify_hw(sq, wqe);
+		mlx5e_tx_notify_hw(sq, wqe, bf_sz);
 	}
 
 	/* fill sq edge with nops to avoid wqe wrap around */
 	while ((sq->pc & wq->sz_m1) > sq->edge)
 		mlx5e_send_nop(sq, false);
 
+	sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+
 	sq->stats.packets++;
 	return NETDEV_TX_OK;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 06e3e1e..03aabdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -457,7 +457,7 @@
 	struct mlx5_priv *priv  = &mdev->priv;
 	struct msix_entry *msix = priv->msix_arr;
 	int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
-	int numa_node           = dev_to_node(&mdev->pdev->dev);
+	int numa_node           = priv->numa_node;
 	int err;
 
 	if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -656,6 +656,22 @@
 }
 #endif
 
+static int map_bf_area(struct mlx5_core_dev *dev)
+{
+	resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
+	resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
+
+	dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
+
+	return dev->priv.bf_mapping ? 0 : -ENOMEM;
+}
+
+static void unmap_bf_area(struct mlx5_core_dev *dev)
+{
+	if (dev->priv.bf_mapping)
+		io_mapping_free(dev->priv.bf_mapping);
+}
+
 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 {
 	struct mlx5_priv *priv = &dev->priv;
@@ -670,6 +686,10 @@
 	INIT_LIST_HEAD(&priv->pgdir_list);
 	spin_lock_init(&priv->mkey_lock);
 
+	mutex_init(&priv->alloc_mutex);
+
+	priv->numa_node = dev_to_node(&dev->pdev->dev);
+
 	priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
 	if (!priv->dbg_root)
 		return -ENOMEM;
@@ -806,10 +826,13 @@
 		goto err_stop_eqs;
 	}
 
+	if (map_bf_area(dev))
+		dev_err(&pdev->dev, "Failed to map blue flame area\n");
+
 	err = mlx5_irq_set_affinity_hints(dev);
 	if (err) {
 		dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
-		goto err_free_comp_eqs;
+		goto err_unmap_bf_area;
 	}
 
 	MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
@@ -821,7 +844,9 @@
 
 	return 0;
 
-err_free_comp_eqs:
+err_unmap_bf_area:
+	unmap_bf_area(dev);
+
 	free_comp_eqs(dev);
 
 err_stop_eqs:
@@ -879,6 +904,7 @@
 	mlx5_cleanup_qp_table(dev);
 	mlx5_cleanup_cq_table(dev);
 	mlx5_irq_clear_affinity_hints(dev);
+	unmap_bf_area(dev);
 	free_comp_eqs(dev);
 	mlx5_stop_eqs(dev);
 	mlx5_free_uuars(dev, &priv->uuari);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fc88eca..566a704 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -73,7 +73,12 @@
 					     int in_size, u32 *out,
 					     int out_size)
 {
-	mlx5_cmd_exec(dev, in, in_size, out, out_size);
+	int err;
+
+	err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
+	if (err)
+		return err;
+
 	return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 7014799..821caaa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -216,22 +216,25 @@
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
 
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
-			 enum mlx5_port_status status)
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+			       enum mlx5_port_status status)
 {
 	u32 in[MLX5_ST_SZ_DW(paos_reg)];
 	u32 out[MLX5_ST_SZ_DW(paos_reg)];
 
 	memset(in, 0, sizeof(in));
 
+	MLX5_SET(paos_reg, in, local_port, 1);
 	MLX5_SET(paos_reg, in, admin_status, status);
 	MLX5_SET(paos_reg, in, ase, 1);
 
 	return mlx5_core_access_reg(dev, in, sizeof(in), out,
 				    sizeof(out), MLX5_REG_PAOS, 0, 1);
 }
+EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
 
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+				 enum mlx5_port_status *status)
 {
 	u32 in[MLX5_ST_SZ_DW(paos_reg)];
 	u32 out[MLX5_ST_SZ_DW(paos_reg)];
@@ -239,14 +242,17 @@
 
 	memset(in, 0, sizeof(in));
 
+	MLX5_SET(paos_reg, in, local_port, 1);
+
 	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
 				   sizeof(out), MLX5_REG_PAOS, 0, 0);
 	if (err)
 		return err;
 
-	*status = MLX5_GET(paos_reg, out, oper_status);
+	*status = MLX5_GET(paos_reg, out, admin_status);
 	return err;
 }
+EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
 
 static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
 				int *max_mtu, int *oper_mtu, u8 port)
@@ -328,3 +334,45 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
+{
+	u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+	u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(pfcc_reg, in, local_port, 1);
+	MLX5_SET(pfcc_reg, in, pptx, tx_pause);
+	MLX5_SET(pfcc_reg, in, pprx, rx_pause);
+
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_PFCC, 0, 1);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
+
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+			  u32 *rx_pause, u32 *tx_pause)
+{
+	u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+	u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(pfcc_reg, in, local_port, 1);
+
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_PFCC, 0, 0);
+	if (err)
+		return err;
+
+	if (rx_pause)
+		*rx_pause = MLX5_GET(pfcc_reg, out, pprx);
+
+	if (tx_pause)
+		*tx_pause = MLX5_GET(pfcc_reg, out, pptx);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 8d98b03..b4c87c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -163,6 +163,18 @@
 	return err;
 }
 
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+			 int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_tir_out)];
+
+	MLX5_SET(modify_tir_in, in, tirn, tirn);
+	MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
+
+	memset(out, 0, sizeof(out));
+	return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
 void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
 {
 	u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
@@ -358,3 +370,44 @@
 	return  mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
 					   sizeof(out));
 }
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *rqtn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+	int err;
+
+	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+	return err;
+}
+
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+			 int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
+
+	MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
+	MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
+
+	memset(out, 0, sizeof(out));
+	return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+	MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+
+	mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
index f9ef244..74cae51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -45,6 +45,8 @@
 void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
 int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
 			 u32 *tirn);
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+			 int inlen);
 void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
 int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
 			 u32 *tisn);
@@ -61,4 +63,10 @@
 int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
 int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
 
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *rqtn);
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+			 int inlen);
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+
 #endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 9ef8587..eb05c84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -32,6 +32,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/io-mapping.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cmd.h>
 #include "mlx5_core.h"
@@ -246,6 +247,10 @@
 		goto err_free_uar;
 	}
 
+	if (mdev->priv.bf_mapping)
+		uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
+						uar->index << PAGE_SHIFT);
+
 	return 0;
 
 err_free_uar:
@@ -257,6 +262,7 @@
 
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
 {
+	io_mapping_unmap(uar->bf_map);
 	iounmap(uar->map);
 	mlx5_cmd_free_uar(mdev, uar->index);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 8388411..ce21ee5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -73,13 +73,14 @@
 	wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
 	wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
 
-	err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
 		return err;
 	}
 
-	err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+	err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+				  &wq_ctrl->buf, param->buf_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
 		goto err_db_free;
@@ -108,13 +109,14 @@
 	wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
 	wq->sz_m1 = (1 << wq->log_sz) - 1;
 
-	err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
 		return err;
 	}
 
-	err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+	err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+				  &wq_ctrl->buf, param->buf_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
 		goto err_db_free;
@@ -144,7 +146,7 @@
 	wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
 	wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
 
-	err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
 	if (err) {
 		mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
 		return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index e0ddd69..6c2a8f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -37,7 +37,8 @@
 
 struct mlx5_wq_param {
 	int		linear;
-	int		numa;
+	int		buf_numa_node;
+	int		db_numa_node;
 };
 
 struct mlx5_wq_ctrl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
new file mode 100644
index 0000000..2941d9c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -0,0 +1,32 @@
+#
+# Mellanox switch drivers configuration
+#
+
+config MLXSW_CORE
+	tristate "Mellanox Technologies Switch ASICs support"
+	---help---
+	  This driver supports Mellanox Technologies Switch ASICs family.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mlxsw_core.
+
+config MLXSW_PCI
+	tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
+	depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
+	default m
+	---help---
+	  This is PCI bus implementation for Mellanox Technologies Switch ASICs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mlxsw_pci.
+
+config MLXSW_SWITCHX2
+	tristate "Mellanox Technologies SwitchX-2 support"
+	depends on MLXSW_CORE && NET_SWITCHDEV
+	default m
+	---help---
+	  This driver supports Mellanox Technologies SwitchX-2 Ethernet
+	  Switch ASICs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mlxsw_switchx2.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
new file mode 100644
index 0000000..0a05f65
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MLXSW_CORE)	+= mlxsw_core.o
+mlxsw_core-objs			:= core.o
+obj-$(CONFIG_MLXSW_PCI)		+= mlxsw_pci.o
+mlxsw_pci-objs			:= pci.o
+obj-$(CONFIG_MLXSW_SWITCHX2)	+= mlxsw_switchx2.o
+mlxsw_switchx2-objs		:= switchx2.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
new file mode 100644
index 0000000..770db17
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -0,0 +1,1090 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/cmd.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CMD_H
+#define _MLXSW_CMD_H
+
+#include "item.h"
+
+#define MLXSW_CMD_MBOX_SIZE	4096
+
+static inline char *mlxsw_cmd_mbox_alloc(void)
+{
+	return kzalloc(MLXSW_CMD_MBOX_SIZE, GFP_KERNEL);
+}
+
+static inline void mlxsw_cmd_mbox_free(char *mbox)
+{
+	kfree(mbox);
+}
+
+static inline void mlxsw_cmd_mbox_zero(char *mbox)
+{
+	memset(mbox, 0, MLXSW_CMD_MBOX_SIZE);
+}
+
+struct mlxsw_core;
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+		   u32 in_mod, bool out_mbox_direct,
+		   char *in_mbox, size_t in_mbox_size,
+		   char *out_mbox, size_t out_mbox_size);
+
+static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode,
+				    u8 opcode_mod, u32 in_mod, char *in_mbox,
+				    size_t in_mbox_size)
+{
+	return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+			      in_mbox, in_mbox_size, NULL, 0);
+}
+
+static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode,
+				     u8 opcode_mod, u32 in_mod,
+				     bool out_mbox_direct,
+				     char *out_mbox, size_t out_mbox_size)
+{
+	return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod,
+			      out_mbox_direct, NULL, 0,
+			      out_mbox, out_mbox_size);
+}
+
+static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode,
+				      u8 opcode_mod, u32 in_mod)
+{
+	return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+			      NULL, 0, NULL, 0);
+}
+
+enum mlxsw_cmd_opcode {
+	MLXSW_CMD_OPCODE_QUERY_FW		= 0x004,
+	MLXSW_CMD_OPCODE_QUERY_BOARDINFO	= 0x006,
+	MLXSW_CMD_OPCODE_QUERY_AQ_CAP		= 0x003,
+	MLXSW_CMD_OPCODE_MAP_FA			= 0xFFF,
+	MLXSW_CMD_OPCODE_UNMAP_FA		= 0xFFE,
+	MLXSW_CMD_OPCODE_CONFIG_PROFILE		= 0x100,
+	MLXSW_CMD_OPCODE_ACCESS_REG		= 0x040,
+	MLXSW_CMD_OPCODE_SW2HW_DQ		= 0x201,
+	MLXSW_CMD_OPCODE_HW2SW_DQ		= 0x202,
+	MLXSW_CMD_OPCODE_2ERR_DQ		= 0x01E,
+	MLXSW_CMD_OPCODE_QUERY_DQ		= 0x022,
+	MLXSW_CMD_OPCODE_SW2HW_CQ		= 0x016,
+	MLXSW_CMD_OPCODE_HW2SW_CQ		= 0x017,
+	MLXSW_CMD_OPCODE_QUERY_CQ		= 0x018,
+	MLXSW_CMD_OPCODE_SW2HW_EQ		= 0x013,
+	MLXSW_CMD_OPCODE_HW2SW_EQ		= 0x014,
+	MLXSW_CMD_OPCODE_QUERY_EQ		= 0x015,
+};
+
+static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
+{
+	switch (opcode) {
+	case MLXSW_CMD_OPCODE_QUERY_FW:
+		return "QUERY_FW";
+	case MLXSW_CMD_OPCODE_QUERY_BOARDINFO:
+		return "QUERY_BOARDINFO";
+	case MLXSW_CMD_OPCODE_QUERY_AQ_CAP:
+		return "QUERY_AQ_CAP";
+	case MLXSW_CMD_OPCODE_MAP_FA:
+		return "MAP_FA";
+	case MLXSW_CMD_OPCODE_UNMAP_FA:
+		return "UNMAP_FA";
+	case MLXSW_CMD_OPCODE_CONFIG_PROFILE:
+		return "CONFIG_PROFILE";
+	case MLXSW_CMD_OPCODE_ACCESS_REG:
+		return "ACCESS_REG";
+	case MLXSW_CMD_OPCODE_SW2HW_DQ:
+		return "SW2HW_DQ";
+	case MLXSW_CMD_OPCODE_HW2SW_DQ:
+		return "HW2SW_DQ";
+	case MLXSW_CMD_OPCODE_2ERR_DQ:
+		return "2ERR_DQ";
+	case MLXSW_CMD_OPCODE_QUERY_DQ:
+		return "QUERY_DQ";
+	case MLXSW_CMD_OPCODE_SW2HW_CQ:
+		return "SW2HW_CQ";
+	case MLXSW_CMD_OPCODE_HW2SW_CQ:
+		return "HW2SW_CQ";
+	case MLXSW_CMD_OPCODE_QUERY_CQ:
+		return "QUERY_CQ";
+	case MLXSW_CMD_OPCODE_SW2HW_EQ:
+		return "SW2HW_EQ";
+	case MLXSW_CMD_OPCODE_HW2SW_EQ:
+		return "HW2SW_EQ";
+	case MLXSW_CMD_OPCODE_QUERY_EQ:
+		return "QUERY_EQ";
+	default:
+		return "*UNKNOWN*";
+	}
+}
+
+enum mlxsw_cmd_status {
+	/* Command execution succeeded. */
+	MLXSW_CMD_STATUS_OK		= 0x00,
+	/* Internal error (e.g. bus error) occurred while processing command. */
+	MLXSW_CMD_STATUS_INTERNAL_ERR	= 0x01,
+	/* Operation/command not supported or opcode modifier not supported. */
+	MLXSW_CMD_STATUS_BAD_OP		= 0x02,
+	/* Parameter not supported, parameter out of range. */
+	MLXSW_CMD_STATUS_BAD_PARAM	= 0x03,
+	/* System was not enabled or bad system state. */
+	MLXSW_CMD_STATUS_BAD_SYS_STATE	= 0x04,
+	/* Attempt to access reserved or unallocated resource, or resource in
+	 * inappropriate ownership.
+	 */
+	MLXSW_CMD_STATUS_BAD_RESOURCE	= 0x05,
+	/* Requested resource is currently executing a command. */
+	MLXSW_CMD_STATUS_RESOURCE_BUSY	= 0x06,
+	/* Required capability exceeds device limits. */
+	MLXSW_CMD_STATUS_EXCEED_LIM	= 0x08,
+	/* Resource is not in the appropriate state or ownership. */
+	MLXSW_CMD_STATUS_BAD_RES_STATE	= 0x09,
+	/* Index out of range (might be beyond table size or attempt to
+	 * access a reserved resource).
+	 */
+	MLXSW_CMD_STATUS_BAD_INDEX	= 0x0A,
+	/* NVMEM checksum/CRC failed. */
+	MLXSW_CMD_STATUS_BAD_NVMEM	= 0x0B,
+	/* Bad management packet (silently discarded). */
+	MLXSW_CMD_STATUS_BAD_PKT	= 0x30,
+};
+
+static inline const char *mlxsw_cmd_status_str(u8 status)
+{
+	switch (status) {
+	case MLXSW_CMD_STATUS_OK:
+		return "OK";
+	case MLXSW_CMD_STATUS_INTERNAL_ERR:
+		return "INTERNAL_ERR";
+	case MLXSW_CMD_STATUS_BAD_OP:
+		return "BAD_OP";
+	case MLXSW_CMD_STATUS_BAD_PARAM:
+		return "BAD_PARAM";
+	case MLXSW_CMD_STATUS_BAD_SYS_STATE:
+		return "BAD_SYS_STATE";
+	case MLXSW_CMD_STATUS_BAD_RESOURCE:
+		return "BAD_RESOURCE";
+	case MLXSW_CMD_STATUS_RESOURCE_BUSY:
+		return "RESOURCE_BUSY";
+	case MLXSW_CMD_STATUS_EXCEED_LIM:
+		return "EXCEED_LIM";
+	case MLXSW_CMD_STATUS_BAD_RES_STATE:
+		return "BAD_RES_STATE";
+	case MLXSW_CMD_STATUS_BAD_INDEX:
+		return "BAD_INDEX";
+	case MLXSW_CMD_STATUS_BAD_NVMEM:
+		return "BAD_NVMEM";
+	case MLXSW_CMD_STATUS_BAD_PKT:
+		return "BAD_PKT";
+	default:
+		return "*UNKNOWN*";
+	}
+}
+
+/* QUERY_FW - Query Firmware
+ * -------------------------
+ * OpMod == 0, INMmod == 0
+ * -----------------------
+ * The QUERY_FW command retrieves information related to firmware, command
+ * interface version and the amount of resources that should be allocated to
+ * the firmware.
+ */
+
+static inline int mlxsw_cmd_query_fw(struct mlxsw_core *mlxsw_core,
+				     char *out_mbox)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_FW,
+				  0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_fw_fw_pages
+ * Amount of physical memory to be allocatedfor firmware usage in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_pages, 0x00, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_major
+ * Firmware Revision - Major
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_major, 0x00, 0, 16);
+
+/* cmd_mbox_query_fw_fw_rev_subminor
+ * Firmware Sub-minor version (Patch level)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_subminor, 0x04, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_minor
+ * Firmware Revision - Minor
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_minor, 0x04, 0, 16);
+
+/* cmd_mbox_query_fw_core_clk
+ * Internal Clock Frequency (in MHz)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, core_clk, 0x08, 16, 16);
+
+/* cmd_mbox_query_fw_cmd_interface_rev
+ * Command Interface Interpreter Revision ID. This number is bumped up
+ * every time a non-backward-compatible change is done for the command
+ * interface. The current cmd_interface_rev is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, cmd_interface_rev, 0x08, 0, 16);
+
+/* cmd_mbox_query_fw_dt
+ * If set, Debug Trace is supported
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, dt, 0x0C, 31, 1);
+
+/* cmd_mbox_query_fw_api_version
+ * Indicates the version of the API, to enable software querying
+ * for compatibility. The current api_version is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, api_version, 0x0C, 0, 16);
+
+/* cmd_mbox_query_fw_fw_hour
+ * Firmware timestamp - hour
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_hour, 0x10, 24, 8);
+
+/* cmd_mbox_query_fw_fw_minutes
+ * Firmware timestamp - minutes
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_minutes, 0x10, 16, 8);
+
+/* cmd_mbox_query_fw_fw_seconds
+ * Firmware timestamp - seconds
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_seconds, 0x10, 8, 8);
+
+/* cmd_mbox_query_fw_fw_year
+ * Firmware timestamp - year
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_year, 0x14, 16, 16);
+
+/* cmd_mbox_query_fw_fw_month
+ * Firmware timestamp - month
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8);
+
+/* cmd_mbox_query_fw_fw_day
+ * Firmware timestamp - day
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
+
+/* cmd_mbox_query_fw_clr_int_base_offset
+ * Clear Interrupt register's offset from clr_int_bar register
+ * in PCI address space.
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, clr_int_base_offset, 0x20, 0, 64);
+
+/* cmd_mbox_query_fw_clr_int_bar
+ * PCI base address register (BAR) where clr_int register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, clr_int_bar, 0x28, 30, 2);
+
+/* cmd_mbox_query_fw_error_buf_offset
+ * Read Only buffer for internal error reports of offset
+ * from error_buf_bar register in PCI address space).
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, error_buf_offset, 0x30, 0, 64);
+
+/* cmd_mbox_query_fw_error_buf_size
+ * Internal error buffer size in DWORDs
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_buf_size, 0x38, 0, 32);
+
+/* cmd_mbox_query_fw_error_int_bar
+ * PCI base address register (BAR) where error buffer
+ * register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_int_bar, 0x3C, 30, 2);
+
+/* cmd_mbox_query_fw_doorbell_page_offset
+ * Offset of the doorbell page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64);
+
+/* cmd_mbox_query_fw_doorbell_page_bar
+ * PCI base address register (BAR) of the doorbell page
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2);
+
+/* QUERY_BOARDINFO - Query Board Information
+ * -----------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_BOARDINFO command retrieves adapter specific parameters.
+ */
+
+static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
+				      char *out_mbox)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_BOARDINFO,
+				  0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_boardinfo_intapin
+ * When PCIe interrupt messages are being used, this value is used for clearing
+ * an interrupt. When using MSI-X, this register is not used.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, intapin, 0x10, 24, 8);
+
+/* cmd_mbox_boardinfo_vsd_vendor_id
+ * PCISIG Vendor ID (www.pcisig.com/membership/vid_search) of the vendor
+ * specifying/formatting the VSD. The vsd_vendor_id identifies the management
+ * domain of the VSD/PSID data. Different vendors may choose different VSD/PSID
+ * format and encoding as long as they use their assigned vsd_vendor_id.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, vsd_vendor_id, 0x1C, 0, 16);
+
+/* cmd_mbox_boardinfo_vsd
+ * Vendor Specific Data. The VSD string that is burnt to the Flash
+ * with the firmware.
+ */
+#define MLXSW_CMD_BOARDINFO_VSD_LEN 208
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, vsd, 0x20, MLXSW_CMD_BOARDINFO_VSD_LEN);
+
+/* cmd_mbox_boardinfo_psid
+ * The PSID field is a 16-ascii (byte) character string which acts as
+ * the board ID. The PSID format is used in conjunction with
+ * Mellanox vsd_vendor_id (15B3h).
+ */
+#define MLXSW_CMD_BOARDINFO_PSID_LEN 16
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, psid, 0xF0, MLXSW_CMD_BOARDINFO_PSID_LEN);
+
+/* QUERY_AQ_CAP - Query Asynchronous Queues Capabilities
+ * -----------------------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_AQ_CAP command returns the device asynchronous queues
+ * capabilities supported.
+ */
+
+static inline int mlxsw_cmd_query_aq_cap(struct mlxsw_core *mlxsw_core,
+					 char *out_mbox)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_AQ_CAP,
+				  0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_aq_cap_log_max_sdq_sz
+ * Log (base 2) of max WQEs allowed on SDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_sdq_sz, 0x00, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_sdqs
+ * Maximum number of SDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_sdqs, 0x00, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_rdq_sz
+ * Log (base 2) of max WQEs allowed on RDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_rdqs
+ * Maximum number of RDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_cq_sz
+ * Log (base 2) of max CQEs allowed on CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_cqs
+ * Maximum number of CQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_cqs, 0x08, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_eq_sz
+ * Log (base 2) of max EQEs allowed on EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_eq_sz, 0x0C, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_eqs
+ * Maximum number of EQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_eqs, 0x0C, 0, 8);
+
+/* cmd_mbox_query_aq_cap_max_sg_sq
+ * The maximum S/G list elements in an DSQ. DSQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_sq, 0x10, 8, 8);
+
+/* cmd_mbox_query_aq_cap_
+ * The maximum S/G list elements in an DRQ. DRQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
+
+/* MAP_FA - Map Firmware Area
+ * --------------------------
+ * OpMod == 0 (N/A), INMmod == Number of VPM entries
+ * -------------------------------------------------
+ * The MAP_FA command passes physical pages to the switch. These pages
+ * are used to store the device firmware. MAP_FA can be executed multiple
+ * times until all the firmware area is mapped (the size that should be
+ * mapped is retrieved through the QUERY_FW command). All required pages
+ * must be mapped to finish the initialization phase. Physical memory
+ * passed in this command must be pinned.
+ */
+
+static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
+				   char *in_mbox, u32 vpm_entries_count)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_MAP_FA,
+				 0, vpm_entries_count,
+				 in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_map_fa_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, map_fa, pa, 0x00, 12, 52, 0x08, 0x00, true);
+
+/* cmd_mbox_map_fa_log2size
+ * Log (base 2) of the size in 4KB pages of the physical and contiguous memory
+ * that starts at PA_L/H.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, map_fa, log2size, 0x00, 0, 5, 0x08, 0x04, false);
+
+/* UNMAP_FA - Unmap Firmware Area
+ * ------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The UNMAP_FA command unload the firmware and unmaps all the
+ * firmware area. After this command is completed the device will not access
+ * the pages that were mapped to the firmware area. After executing UNMAP_FA
+ * command, software reset must be done prior to execution of MAP_FW command.
+ */
+
+static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
+}
+
+/* CONFIG_PROFILE (Set) - Configure Switch Profile
+ * ------------------------------
+ * OpMod == 1 (Set), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The CONFIG_PROFILE command sets the switch profile. The command can be
+ * executed on the device only once at startup in order to allocate and
+ * configure all switch resources and prepare it for operational mode.
+ * It is not possible to change the device profile after the chip is
+ * in operational mode.
+ * Failure of the CONFIG_PROFILE command leaves the hardware in an indeterminate
+ * state therefore it is required to perform software reset to the device
+ * following an unsuccessful completion of the command. It is required
+ * to perform software reset to the device to change an existing profile.
+ */
+
+static inline int mlxsw_cmd_config_profile_set(struct mlxsw_core *mlxsw_core,
+					       char *in_mbox)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_CONFIG_PROFILE,
+				 1, 0, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_config_profile_set_max_vepa_channels
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vepa_channels, 0x0C, 0, 1);
+
+/* cmd_mbox_config_profile_set_max_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_lag, 0x0C, 1, 1);
+
+/* cmd_mbox_config_profile_set_max_port_per_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_port_per_lag, 0x0C, 2, 1);
+
+/* cmd_mbox_config_profile_set_max_mid
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_mid, 0x0C, 3, 1);
+
+/* cmd_mbox_config_profile_set_max_pgt
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pgt, 0x0C, 4, 1);
+
+/* cmd_mbox_config_profile_set_max_system_port
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_system_port, 0x0C, 5, 1);
+
+/* cmd_mbox_config_profile_set_max_vlan_groups
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
+
+/* cmd_mbox_config_profile_set_max_regions
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
+
+/* cmd_mbox_config_profile_set_fid_based
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_mode, 0x0C, 8, 1);
+
+/* cmd_mbox_config_profile_set_max_flood_tables
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_tables, 0x0C, 9, 1);
+
+/* cmd_mbox_config_profile_set_max_ib_mc
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_ib_mc, 0x0C, 12, 1);
+
+/* cmd_mbox_config_profile_set_max_pkey
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pkey, 0x0C, 13, 1);
+
+/* cmd_mbox_config_profile_set_adaptive_routing_group_cap
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+	     set_adaptive_routing_group_cap, 0x0C, 14, 1);
+
+/* cmd_mbox_config_profile_set_ar_sec
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+
+/* cmd_mbox_config_profile_max_vepa_channels
+ * Maximum number of VEPA channels per port (0 through 16)
+ * 0 - multi-channel VEPA is disabled
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
+
+/* cmd_mbox_config_profile_max_lag
+ * Maximum number of LAG IDs requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
+
+/* cmd_mbox_config_profile_max_port_per_lag
+ * Maximum number of ports per LAG requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_port_per_lag, 0x18, 0, 16);
+
+/* cmd_mbox_config_profile_max_mid
+ * Maximum Multicast IDs.
+ * Multicast IDs are allocated from 0 to max_mid-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_mid, 0x1C, 0, 16);
+
+/* cmd_mbox_config_profile_max_pgt
+ * Maximum records in the Port Group Table per Switch Partition.
+ * Port Group Table indexes are from 0 to max_pgt-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pgt, 0x20, 0, 16);
+
+/* cmd_mbox_config_profile_max_system_port
+ * The maximum number of system ports that can be allocated.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_system_port, 0x24, 0, 16);
+
+/* cmd_mbox_config_profile_max_vlan_groups
+ * Maximum number VLAN Groups for VLAN binding.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
+
+/* cmd_mbox_config_profile_max_regions
+ * Maximum number of TCAM Regions.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
+
+/* cmd_mbox_config_profile_max_flood_tables
+ * Maximum number of Flooding Tables. Flooding Tables are associated to
+ * the different packet types for the different switch partitions.
+ * Note that the table size depends on the fid_based mode.
+ * In SwitchX silicon, tables are split equally between the switch
+ * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated
+ * with swid-1 and the last 4 are associated with swid-2.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
+
+/* cmd_mbox_config_profile_max_vid_flood_tables
+ * Maximum number of per-vid flooding tables. Flooding tables are associated
+ * to the different packet types for the different switch partitions.
+ * Table size is 4K entries covering all VID space.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+
+/* cmd_mbox_config_profile_fid_based
+ * FID Based Flood Mode
+ * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID
+ * 01 Use FID to offset the index to the Port Group Table (pgi)
+ * 10 Use FID to offset the index to the Port Group Table (pgi) and
+ * the Multicast ID
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+
+/* cmd_mbox_config_profile_max_ib_mc
+ * Maximum number of multicast FDB records for InfiniBand
+ * FDB (in 512 chunks) per InfiniBand switch partition.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_ib_mc, 0x40, 0, 15);
+
+/* cmd_mbox_config_profile_max_pkey
+ * Maximum per port PKEY table size (for PKEY enforcement)
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pkey, 0x44, 0, 15);
+
+/* cmd_mbox_config_profile_ar_sec
+ * Primary/secondary capability
+ * Describes the number of adaptive routing sub-groups
+ * 0 - disable primary/secondary (single group)
+ * 1 - enable primary/secondary (2 sub-groups)
+ * 2 - 3 sub-groups: Not supported in SwitchX, SwitchX-2
+ * 3 - 4 sub-groups: Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, ar_sec, 0x4C, 24, 2);
+
+/* cmd_mbox_config_profile_adaptive_routing_group_cap
+ * Adaptive Routing Group Capability. Indicates the number of AR groups
+ * supported. Note that when Primary/secondary is enabled, each
+ * primary/secondary couple consumes 2 adaptive routing entries.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
+
+/* cmd_mbox_config_profile_arn
+ * Adaptive Routing Notification Enable
+ * Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+
+/* cmd_mbox_config_profile_swid_config_mask
+ * Modify Switch Partition Configuration mask. When set, the configu-
+ * ration value for the Switch Partition are taken from the mailbox.
+ * When clear, the current configuration values are used.
+ * Bit 0 - set type
+ * Bit 1 - properties
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_mask,
+		     0x60, 24, 8, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_type
+ * Switch Partition type.
+ * 0000 - disabled (Switch Partition does not exist)
+ * 0001 - InfiniBand
+ * 0010 - Ethernet
+ * 1000 - router port (SwitchX-2 only)
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
+		     0x60, 20, 4, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_properties
+ * Switch Partition properties.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
+		     0x60, 0, 8, 0x08, 0x00, false);
+
+/* ACCESS_REG - Access EMAD Supported Register
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -------------------------------------
+ * The ACCESS_REG command supports accessing device registers. This access
+ * is mainly used for bootstrapping.
+ */
+
+static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core,
+				       char *in_mbox, char *out_mbox)
+{
+	return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG,
+			      0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE,
+			      out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_DQ - Software to Hardware DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The SW2HW_DQ command transitions a descriptor queue from software to
+ * hardware ownership. The command enables posting WQEs and ringing DoorBells
+ * on the descriptor queue.
+ */
+
+static inline int __mlxsw_cmd_sw2hw_dq(struct mlxsw_core *mlxsw_core,
+				       char *in_mbox, u32 dq_number,
+				       u8 opcode_mod)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_DQ,
+				 opcode_mod, dq_number,
+				 in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+enum {
+	MLXSW_CMD_OPCODE_MOD_SDQ = 0,
+	MLXSW_CMD_OPCODE_MOD_RDQ = 1,
+};
+
+static inline int mlxsw_cmd_sw2hw_sdq(struct mlxsw_core *mlxsw_core,
+				      char *in_mbox, u32 dq_number)
+{
+	return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
+				      char *in_mbox, u32 dq_number)
+{
+	return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* cmd_mbox_sw2hw_dq_cq
+ * Number of the CQ that this Descriptor Queue reports completions to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
+
+/* cmd_mbox_sw2hw_dq_sdq_tclass
+ * SDQ: CPU Egress TClass
+ * RDQ: Reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_tclass, 0x00, 16, 6);
+
+/* cmd_mbox_sw2hw_dq_log2_dq_sz
+ * Log (base 2) of the Descriptor Queue size in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, log2_dq_sz, 0x00, 0, 6);
+
+/* cmd_mbox_sw2hw_dq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_dq, pa, 0x10, 12, 52, 0x08, 0x00, true);
+
+/* HW2SW_DQ - Hardware to Software DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The HW2SW_DQ command transitions a descriptor queue from hardware to
+ * software ownership. Incoming packets on the DQ are silently discarded,
+ * SW should not post descriptors on nonoperational DQs.
+ */
+
+static inline int __mlxsw_cmd_hw2sw_dq(struct mlxsw_core *mlxsw_core,
+				       u32 dq_number, u8 opcode_mod)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_DQ,
+				   opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_hw2sw_sdq(struct mlxsw_core *mlxsw_core,
+				      u32 dq_number)
+{
+	return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_hw2sw_rdq(struct mlxsw_core *mlxsw_core,
+				      u32 dq_number)
+{
+	return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* 2ERR_DQ - To Error DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The 2ERR_DQ command transitions the DQ into the error state from the state
+ * in which it has been. While the command is executed, some in-process
+ * descriptors may complete. Once the DQ transitions into the error state,
+ * if there are posted descriptors on the RDQ/SDQ, the hardware writes
+ * a completion with error (flushed) for all descriptors posted in the RDQ/SDQ.
+ * When the command is completed successfully, the DQ is already in
+ * the error state.
+ */
+
+static inline int __mlxsw_cmd_2err_dq(struct mlxsw_core *mlxsw_core,
+				      u32 dq_number, u8 opcode_mod)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+				   opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_2err_sdq(struct mlxsw_core *mlxsw_core,
+				     u32 dq_number)
+{
+	return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+				   MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_2err_rdq(struct mlxsw_core *mlxsw_core,
+				     u32 dq_number)
+{
+	return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+				   MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* QUERY_DQ - Query DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The QUERY_DQ command retrieves a snapshot of DQ parameters from the hardware.
+ *
+ * Note: Output mailbox has the same format as SW2HW_DQ.
+ */
+
+static inline int __mlxsw_cmd_query_dq(struct mlxsw_core *mlxsw_core,
+				       char *out_mbox, u32 dq_number,
+				       u8 opcode_mod)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+				  opcode_mod, dq_number, false,
+				  out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+static inline int mlxsw_cmd_query_sdq(struct mlxsw_core *mlxsw_core,
+				      char *out_mbox, u32 dq_number)
+{
+	return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_query_rdq(struct mlxsw_core *mlxsw_core,
+				      char *out_mbox, u32 dq_number)
+{
+	return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+				    MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* SW2HW_CQ - Software to Hardware CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The SW2HW_CQ command transfers ownership of a CQ context entry from software
+ * to hardware. The command takes the CQ context entry from the input mailbox
+ * and stores it in the CQC in the ownership of the hardware. The command fails
+ * if the requested CQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
+				     char *in_mbox, u32 cq_number)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_CQ,
+				 0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_cq_cv
+ * CQE Version.
+ * 0 - CQE Version 0, 1 - CQE Version 1
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
+
+/* cmd_mbox_sw2hw_cq_c_eqn
+ * Event Queue this CQ reports completion events to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_cq_oi
+ * When set, overrun ignore is enabled. When set, updates of
+ * CQ consumer counter (poll for completion) or Request completion
+ * notifications (Arm CQ) DoorBells should not be rung on that CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_cq_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, st, 0x00, 8, 1);
+
+/* cmd_mbox_sw2hw_cq_log_cq_size
+ * Log (base 2) of the CQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, log_cq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_cq_producer_counter
+ * Producer Counter. The counter is incremented for each CQE that is
+ * written by the HW to the CQ.
+ * Maintained by HW (valid for the QUERY_CQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_cq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_cq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_CQ - Hardware to Software CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The HW2SW_CQ command transfers ownership of a CQ context entry from hardware
+ * to software. The CQC entry is invalidated as a result of this command.
+ */
+
+static inline int mlxsw_cmd_hw2sw_cq(struct mlxsw_core *mlxsw_core,
+				     u32 cq_number)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_CQ,
+				   0, cq_number);
+}
+
+/* QUERY_CQ - Query CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The QUERY_CQ command retrieves a snapshot of the current CQ context entry.
+ * The command stores the snapshot in the output mailbox in the software format.
+ * Note that the CQ context state and values are not affected by the QUERY_CQ
+ * command. The QUERY_CQ command is for debug purposes only.
+ *
+ * Note: Output mailbox has the same format as SW2HW_CQ.
+ */
+
+static inline int mlxsw_cmd_query_cq(struct mlxsw_core *mlxsw_core,
+				     char *out_mbox, u32 cq_number)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_CQ,
+				  0, cq_number, false,
+				  out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_EQ - Software to Hardware EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ * The SW2HW_EQ command transfers ownership of an EQ context entry from software
+ * to hardware. The command takes the EQ context entry from the input mailbox
+ * and stores it in the EQC in the ownership of the hardware. The command fails
+ * if the requested EQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
+				     char *in_mbox, u32 eq_number)
+{
+	return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_EQ,
+				 0, eq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_eq_int_msix
+ * When set, MSI-X cycles will be generated by this EQ.
+ * When cleared, an interrupt will be generated by this EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_eq_int_oi
+ * When set, overrun ignore is enabled.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_eq_int_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ * 0x11 - Always ARMED
+ * other - reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2);
+
+/* cmd_mbox_sw2hw_eq_int_log_eq_size
+ * Log (base 2) of the EQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_eq_int_producer_counter
+ * Producer Counter. The counter is incremented for each EQE that is written
+ * by the HW to the EQ.
+ * Maintained by HW (valid for the QUERY_EQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_eq_int_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_EQ - Hardware to Software EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ */
+
+static inline int mlxsw_cmd_hw2sw_eq(struct mlxsw_core *mlxsw_core,
+				     u32 eq_number)
+{
+	return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_EQ,
+				   0, eq_number);
+}
+
+/* QUERY_EQ - Query EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ *
+ * Note: Output mailbox has the same format as SW2HW_EQ.
+ */
+
+static inline int mlxsw_cmd_query_eq(struct mlxsw_core *mlxsw_core,
+				     char *out_mbox, u32 eq_number)
+{
+	return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_EQ,
+				  0, eq_number, false,
+				  out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
new file mode 100644
index 0000000..dbcaf5d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -0,0 +1,1295 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/if_link.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/netdevice.h>
+#include <linux/wait.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
+#include "item.h"
+#include "cmd.h"
+#include "port.h"
+#include "trap.h"
+#include "emad.h"
+#include "reg.h"
+
+static LIST_HEAD(mlxsw_core_driver_list);
+static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
+
+static const char mlxsw_core_driver_name[] = "mlxsw_core";
+
+static struct dentry *mlxsw_core_dbg_root;
+
+struct mlxsw_core_pcpu_stats {
+	u64			trap_rx_packets[MLXSW_TRAP_ID_MAX];
+	u64			trap_rx_bytes[MLXSW_TRAP_ID_MAX];
+	u64			port_rx_packets[MLXSW_PORT_MAX_PORTS];
+	u64			port_rx_bytes[MLXSW_PORT_MAX_PORTS];
+	struct u64_stats_sync	syncp;
+	u32			trap_rx_dropped[MLXSW_TRAP_ID_MAX];
+	u32			port_rx_dropped[MLXSW_PORT_MAX_PORTS];
+	u32			trap_rx_invalid;
+	u32			port_rx_invalid;
+};
+
+struct mlxsw_core {
+	struct mlxsw_driver *driver;
+	const struct mlxsw_bus *bus;
+	void *bus_priv;
+	const struct mlxsw_bus_info *bus_info;
+	struct list_head rx_listener_list;
+	struct list_head event_listener_list;
+	struct {
+		struct sk_buff *resp_skb;
+		u64 tid;
+		wait_queue_head_t wait;
+		bool trans_active;
+		struct mutex lock; /* One EMAD transaction at a time. */
+		bool use_emad;
+	} emad;
+	struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
+	struct dentry *dbg_dir;
+	struct {
+		struct debugfs_blob_wrapper vsd_blob;
+		struct debugfs_blob_wrapper psid_blob;
+	} dbg;
+	unsigned long driver_priv[0];
+	/* driver_priv has to be always the last item */
+};
+
+struct mlxsw_rx_listener_item {
+	struct list_head list;
+	struct mlxsw_rx_listener rxl;
+	void *priv;
+};
+
+struct mlxsw_event_listener_item {
+	struct list_head list;
+	struct mlxsw_event_listener el;
+	void *priv;
+};
+
+/******************
+ * EMAD processing
+ ******************/
+
+/* emad_eth_hdr_dmac
+ * Destination MAC in EMAD's Ethernet header.
+ * Must be set to 01:02:c9:00:00:01
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
+
+/* emad_eth_hdr_smac
+ * Source MAC in EMAD's Ethernet header.
+ * Must be set to 00:02:c9:01:02:03
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
+
+/* emad_eth_hdr_ethertype
+ * Ethertype in EMAD's Ethernet header.
+ * Must be set to 0x8932
+ */
+MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
+
+/* emad_eth_hdr_mlx_proto
+ * Mellanox protocol.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
+
+/* emad_eth_hdr_ver
+ * Mellanox protocol version.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
+
+/* emad_op_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x1 (operation TLV).
+ */
+MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
+
+/* emad_op_tlv_len
+ * Length of the operation TLV in u32.
+ * Must be set to 0x4.
+ */
+MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
+
+/* emad_op_tlv_dr
+ * Direct route bit. Setting to 1 indicates the EMAD is a direct route
+ * EMAD. DR TLV must follow.
+ *
+ * Note: Currently not supported and must not be set.
+ */
+MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
+
+/* emad_op_tlv_status
+ * Returned status in case of EMAD response. Must be set to 0 in case
+ * of EMAD request.
+ * 0x0 - success
+ * 0x1 - device is busy. Requester should retry
+ * 0x2 - Mellanox protocol version not supported
+ * 0x3 - unknown TLV
+ * 0x4 - register not supported
+ * 0x5 - operation class not supported
+ * 0x6 - EMAD method not supported
+ * 0x7 - bad parameter (e.g. port out of range)
+ * 0x8 - resource not available
+ * 0x9 - message receipt acknowledgment. Requester should retry
+ * 0x70 - internal error
+ */
+MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
+
+/* emad_op_tlv_register_id
+ * Register ID of register within register TLV.
+ */
+MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
+
+/* emad_op_tlv_r
+ * Response bit. Setting to 1 indicates Response, otherwise request.
+ */
+MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
+
+/* emad_op_tlv_method
+ * EMAD method type.
+ * 0x1 - query
+ * 0x2 - write
+ * 0x3 - send (currently not supported)
+ * 0x4 - event
+ */
+MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
+
+/* emad_op_tlv_class
+ * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
+ */
+MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
+
+/* emad_op_tlv_tid
+ * EMAD transaction ID. Used for pairing request and response EMADs.
+ */
+MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
+
+/* emad_reg_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x3 (register TLV).
+ */
+MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
+
+/* emad_reg_tlv_len
+ * Length of the operation TLV in u32.
+ */
+MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
+
+/* emad_end_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x0 (end TLV).
+ */
+MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
+
+/* emad_end_tlv_len
+ * Length of the end TLV in u32.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
+
+enum mlxsw_core_reg_access_type {
+	MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+	MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+};
+
+static inline const char *
+mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
+{
+	switch (type) {
+	case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
+		return "query";
+	case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
+		return "write";
+	}
+	BUG();
+}
+
+static void mlxsw_emad_pack_end_tlv(char *end_tlv)
+{
+	mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
+	mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
+}
+
+static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
+				    const struct mlxsw_reg_info *reg,
+				    char *payload)
+{
+	mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
+	mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
+	memcpy(reg_tlv + sizeof(u32), payload, reg->len);
+}
+
+static void mlxsw_emad_pack_op_tlv(char *op_tlv,
+				   const struct mlxsw_reg_info *reg,
+				   enum mlxsw_core_reg_access_type type,
+				   struct mlxsw_core *mlxsw_core)
+{
+	mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
+	mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
+	mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
+	mlxsw_emad_op_tlv_status_set(op_tlv, 0);
+	mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
+	mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
+	if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
+		mlxsw_emad_op_tlv_method_set(op_tlv,
+					     MLXSW_EMAD_OP_TLV_METHOD_QUERY);
+	else
+		mlxsw_emad_op_tlv_method_set(op_tlv,
+					     MLXSW_EMAD_OP_TLV_METHOD_WRITE);
+	mlxsw_emad_op_tlv_class_set(op_tlv,
+				    MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
+	mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+}
+
+static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
+{
+	char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
+
+	mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
+	mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
+	mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
+	mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
+	mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
+
+	skb_reset_mac_header(skb);
+
+	return 0;
+}
+
+static void mlxsw_emad_construct(struct sk_buff *skb,
+				 const struct mlxsw_reg_info *reg,
+				 char *payload,
+				 enum mlxsw_core_reg_access_type type,
+				 struct mlxsw_core *mlxsw_core)
+{
+	char *buf;
+
+	buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
+	mlxsw_emad_pack_end_tlv(buf);
+
+	buf = skb_push(skb, reg->len + sizeof(u32));
+	mlxsw_emad_pack_reg_tlv(buf, reg, payload);
+
+	buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
+	mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+
+	mlxsw_emad_construct_eth_hdr(skb);
+}
+
+static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
+{
+	return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
+}
+
+static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
+{
+	return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
+				      MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
+}
+
+static char *mlxsw_emad_reg_payload(const char *op_tlv)
+{
+	return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
+}
+
+static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
+{
+	char *op_tlv;
+
+	op_tlv = mlxsw_emad_op_tlv(skb);
+	return mlxsw_emad_op_tlv_tid_get(op_tlv);
+}
+
+static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
+{
+	char *op_tlv;
+
+	op_tlv = mlxsw_emad_op_tlv(skb);
+	return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
+}
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+				 struct sk_buff *skb,
+				 const struct mlxsw_tx_info *tx_info)
+{
+	int err;
+	int ret;
+
+	err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
+	if (err) {
+		dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
+			mlxsw_core->emad.tid);
+		dev_kfree_skb(skb);
+		return err;
+	}
+
+	mlxsw_core->emad.trans_active = true;
+	ret = wait_event_timeout(mlxsw_core->emad.wait,
+				 !(mlxsw_core->emad.trans_active),
+				 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
+	if (!ret) {
+		dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
+			 mlxsw_core->emad.tid);
+		mlxsw_core->emad.trans_active = false;
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
+				     char *op_tlv)
+{
+	enum mlxsw_emad_op_tlv_status status;
+	u64 tid;
+
+	status = mlxsw_emad_op_tlv_status_get(op_tlv);
+	tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
+
+	switch (status) {
+	case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+		return 0;
+	case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+	case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+		dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
+			 tid, status, mlxsw_emad_op_tlv_status_str(status));
+		return -EAGAIN;
+	case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+	case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+	case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+	case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+	case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+	case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+	case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+	case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+	default:
+		dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
+			tid, status, mlxsw_emad_op_tlv_status_str(status));
+		return -EIO;
+	}
+}
+
+static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
+					 struct sk_buff *skb)
+{
+	return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+}
+
+static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+			       struct sk_buff *skb,
+			       const struct mlxsw_tx_info *tx_info)
+{
+	struct sk_buff *trans_skb;
+	int n_retry;
+	int err;
+
+	n_retry = 0;
+retry:
+	/* We copy the EMAD to a new skb, since we might need
+	 * to retransmit it in case of failure.
+	 */
+	trans_skb = skb_copy(skb, GFP_KERNEL);
+	if (!trans_skb) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
+	if (!err) {
+		struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
+
+		err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
+		if (err)
+			dev_kfree_skb(resp_skb);
+		if (!err || err != -EAGAIN)
+			goto out;
+	}
+	if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
+		goto retry;
+
+out:
+	dev_kfree_skb(skb);
+	mlxsw_core->emad.tid++;
+	return err;
+}
+
+static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
+					void *priv)
+{
+	struct mlxsw_core *mlxsw_core = priv;
+
+	if (mlxsw_emad_is_resp(skb) &&
+	    mlxsw_core->emad.trans_active &&
+	    mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
+		mlxsw_core->emad.resp_skb = skb;
+		mlxsw_core->emad.trans_active = false;
+		wake_up(&mlxsw_core->emad.wait);
+	} else {
+		dev_kfree_skb(skb);
+	}
+}
+
+static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
+	.func = mlxsw_emad_rx_listener_func,
+	.local_port = MLXSW_PORT_DONT_CARE,
+	.trap_id = MLXSW_TRAP_ID_ETHEMAD,
+};
+
+static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
+{
+	char htgt_pl[MLXSW_REG_HTGT_LEN];
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int err;
+
+	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
+	err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+			    MLXSW_TRAP_ID_ETHEMAD);
+	return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+}
+
+static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
+{
+	int err;
+
+	/* Set the upper 32 bits of the transaction ID field to a random
+	 * number. This allows us to discard EMADs addressed to other
+	 * devices.
+	 */
+	get_random_bytes(&mlxsw_core->emad.tid, 4);
+	mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+
+	init_waitqueue_head(&mlxsw_core->emad.wait);
+	mlxsw_core->emad.trans_active = false;
+	mutex_init(&mlxsw_core->emad.lock);
+
+	err = mlxsw_core_rx_listener_register(mlxsw_core,
+					      &mlxsw_emad_rx_listener,
+					      mlxsw_core);
+	if (err)
+		return err;
+
+	err = mlxsw_emad_traps_set(mlxsw_core);
+	if (err)
+		goto err_emad_trap_set;
+
+	mlxsw_core->emad.use_emad = true;
+
+	return 0;
+
+err_emad_trap_set:
+	mlxsw_core_rx_listener_unregister(mlxsw_core,
+					  &mlxsw_emad_rx_listener,
+					  mlxsw_core);
+	return err;
+}
+
+static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
+{
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
+			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+			    MLXSW_TRAP_ID_ETHEMAD);
+	mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+
+	mlxsw_core_rx_listener_unregister(mlxsw_core,
+					  &mlxsw_emad_rx_listener,
+					  mlxsw_core);
+}
+
+static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
+					u16 reg_len)
+{
+	struct sk_buff *skb;
+	u16 emad_len;
+
+	emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
+		    (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
+		    sizeof(u32) + mlxsw_core->driver->txhdr_len);
+	if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
+		return NULL;
+
+	skb = netdev_alloc_skb(NULL, emad_len);
+	if (!skb)
+		return NULL;
+	memset(skb->data, 0, emad_len);
+	skb_reserve(skb, emad_len);
+
+	return skb;
+}
+
+/*****************
+ * Core functions
+ *****************/
+
+static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_core *mlxsw_core = file->private;
+	struct mlxsw_core_pcpu_stats *p;
+	u64 rx_packets, rx_bytes;
+	u64 tmp_rx_packets, tmp_rx_bytes;
+	u32 rx_dropped, rx_invalid;
+	unsigned int start;
+	int i;
+	int j;
+	static const char hdr[] =
+		"     NUM   RX_PACKETS     RX_BYTES RX_DROPPED\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
+		rx_packets = 0;
+		rx_bytes = 0;
+		rx_dropped = 0;
+		for_each_possible_cpu(j) {
+			p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+			do {
+				start = u64_stats_fetch_begin(&p->syncp);
+				tmp_rx_packets = p->trap_rx_packets[i];
+				tmp_rx_bytes = p->trap_rx_bytes[i];
+			} while (u64_stats_fetch_retry(&p->syncp, start));
+
+			rx_packets += tmp_rx_packets;
+			rx_bytes += tmp_rx_bytes;
+			rx_dropped += p->trap_rx_dropped[i];
+		}
+		seq_printf(file, "trap %3d %12llu %12llu %10u\n",
+			   i, rx_packets, rx_bytes, rx_dropped);
+	}
+	rx_invalid = 0;
+	for_each_possible_cpu(j) {
+		p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+		rx_invalid += p->trap_rx_invalid;
+	}
+	seq_printf(file, "trap INV                           %10u\n",
+		   rx_invalid);
+
+	for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
+		rx_packets = 0;
+		rx_bytes = 0;
+		rx_dropped = 0;
+		for_each_possible_cpu(j) {
+			p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+			do {
+				start = u64_stats_fetch_begin(&p->syncp);
+				tmp_rx_packets = p->port_rx_packets[i];
+				tmp_rx_bytes = p->port_rx_bytes[i];
+			} while (u64_stats_fetch_retry(&p->syncp, start));
+
+			rx_packets += tmp_rx_packets;
+			rx_bytes += tmp_rx_bytes;
+			rx_dropped += p->port_rx_dropped[i];
+		}
+		seq_printf(file, "port %3d %12llu %12llu %10u\n",
+			   i, rx_packets, rx_bytes, rx_dropped);
+	}
+	rx_invalid = 0;
+	for_each_possible_cpu(j) {
+		p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+		rx_invalid += p->port_rx_invalid;
+	}
+	seq_printf(file, "port INV                           %10u\n",
+		   rx_invalid);
+	return 0;
+}
+
+static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
+{
+	struct mlxsw_core *mlxsw_core = inode->i_private;
+
+	return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
+}
+
+static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
+	.owner = THIS_MODULE,
+	.open = mlxsw_core_rx_stats_dbg_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek
+};
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+				    const char *buf, size_t size)
+{
+	__be32 *m = (__be32 *) buf;
+	int i;
+	int count = size / sizeof(__be32);
+
+	for (i = count - 1; i >= 0; i--)
+		if (m[i])
+			break;
+	i++;
+	count = i ? i : 1;
+	for (i = 0; i < count; i += 4)
+		dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+			i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+			be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
+{
+	spin_lock(&mlxsw_core_driver_list_lock);
+	list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
+	spin_unlock(&mlxsw_core_driver_list_lock);
+	return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_register);
+
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
+{
+	spin_lock(&mlxsw_core_driver_list_lock);
+	list_del(&mlxsw_driver->list);
+	spin_unlock(&mlxsw_core_driver_list_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_driver_unregister);
+
+static struct mlxsw_driver *__driver_find(const char *kind)
+{
+	struct mlxsw_driver *mlxsw_driver;
+
+	list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
+		if (strcmp(mlxsw_driver->kind, kind) == 0)
+			return mlxsw_driver;
+	}
+	return NULL;
+}
+
+static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
+{
+	struct mlxsw_driver *mlxsw_driver;
+
+	spin_lock(&mlxsw_core_driver_list_lock);
+	mlxsw_driver = __driver_find(kind);
+	if (!mlxsw_driver) {
+		spin_unlock(&mlxsw_core_driver_list_lock);
+		request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
+		spin_lock(&mlxsw_core_driver_list_lock);
+		mlxsw_driver = __driver_find(kind);
+	}
+	if (mlxsw_driver) {
+		if (!try_module_get(mlxsw_driver->owner))
+			mlxsw_driver = NULL;
+	}
+
+	spin_unlock(&mlxsw_core_driver_list_lock);
+	return mlxsw_driver;
+}
+
+static void mlxsw_core_driver_put(const char *kind)
+{
+	struct mlxsw_driver *mlxsw_driver;
+
+	spin_lock(&mlxsw_core_driver_list_lock);
+	mlxsw_driver = __driver_find(kind);
+	spin_unlock(&mlxsw_core_driver_list_lock);
+	if (!mlxsw_driver)
+		return;
+	module_put(mlxsw_driver->owner);
+}
+
+static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
+{
+	const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
+
+	mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
+						 mlxsw_core_dbg_root);
+	if (!mlxsw_core->dbg_dir)
+		return -ENOMEM;
+	debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
+			    mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
+	mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
+	mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
+	debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
+			    &mlxsw_core->dbg.vsd_blob);
+	mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
+	mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
+	debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
+			    &mlxsw_core->dbg.psid_blob);
+	return 0;
+}
+
+static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
+{
+	debugfs_remove_recursive(mlxsw_core->dbg_dir);
+}
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+				   const struct mlxsw_bus *mlxsw_bus,
+				   void *bus_priv)
+{
+	const char *device_kind = mlxsw_bus_info->device_kind;
+	struct mlxsw_core *mlxsw_core;
+	struct mlxsw_driver *mlxsw_driver;
+	size_t alloc_size;
+	int err;
+
+	mlxsw_driver = mlxsw_core_driver_get(device_kind);
+	if (!mlxsw_driver)
+		return -EINVAL;
+	alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+	mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
+	if (!mlxsw_core) {
+		err = -ENOMEM;
+		goto err_core_alloc;
+	}
+
+	INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
+	INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
+	mlxsw_core->driver = mlxsw_driver;
+	mlxsw_core->bus = mlxsw_bus;
+	mlxsw_core->bus_priv = bus_priv;
+	mlxsw_core->bus_info = mlxsw_bus_info;
+
+	mlxsw_core->pcpu_stats =
+		netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
+	if (!mlxsw_core->pcpu_stats) {
+		err = -ENOMEM;
+		goto err_alloc_stats;
+	}
+
+	err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
+	if (err)
+		goto err_bus_init;
+
+	err = mlxsw_emad_init(mlxsw_core);
+	if (err)
+		goto err_emad_init;
+
+	err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
+				 mlxsw_bus_info);
+	if (err)
+		goto err_driver_init;
+
+	err = mlxsw_core_debugfs_init(mlxsw_core);
+	if (err)
+		goto err_debugfs_init;
+
+	return 0;
+
+err_debugfs_init:
+	mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+err_driver_init:
+	mlxsw_emad_fini(mlxsw_core);
+err_emad_init:
+	mlxsw_bus->fini(bus_priv);
+err_bus_init:
+	free_percpu(mlxsw_core->pcpu_stats);
+err_alloc_stats:
+	kfree(mlxsw_core);
+err_core_alloc:
+	mlxsw_core_driver_put(device_kind);
+	return err;
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_register);
+
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+{
+	const char *device_kind = mlxsw_core->bus_info->device_kind;
+
+	mlxsw_core_debugfs_fini(mlxsw_core);
+	mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+	mlxsw_emad_fini(mlxsw_core);
+	mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+	free_percpu(mlxsw_core->pcpu_stats);
+	kfree(mlxsw_core);
+	mlxsw_core_driver_put(device_kind);
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
+
+static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
+{
+	return container_of(driver_priv, struct mlxsw_core, driver_priv);
+}
+
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+				  const struct mlxsw_tx_info *tx_info)
+{
+	struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+	return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
+						  tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+			    const struct mlxsw_tx_info *tx_info)
+{
+	struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+	return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
+					     tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit);
+
+static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
+				   const struct mlxsw_rx_listener *rxl_b)
+{
+	return (rxl_a->func == rxl_b->func &&
+		rxl_a->local_port == rxl_b->local_port &&
+		rxl_a->trap_id == rxl_b->trap_id);
+}
+
+static struct mlxsw_rx_listener_item *
+__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
+			const struct mlxsw_rx_listener *rxl,
+			void *priv)
+{
+	struct mlxsw_rx_listener_item *rxl_item;
+
+	list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
+		if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
+		    rxl_item->priv == priv)
+			return rxl_item;
+	}
+	return NULL;
+}
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+				    const struct mlxsw_rx_listener *rxl,
+				    void *priv)
+{
+	struct mlxsw_rx_listener_item *rxl_item;
+
+	rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+	if (rxl_item)
+		return -EEXIST;
+	rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
+	if (!rxl_item)
+		return -ENOMEM;
+	rxl_item->rxl = *rxl;
+	rxl_item->priv = priv;
+
+	list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
+	return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
+
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+				       const struct mlxsw_rx_listener *rxl,
+				       void *priv)
+{
+	struct mlxsw_rx_listener_item *rxl_item;
+
+	rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+	if (!rxl_item)
+		return;
+	list_del_rcu(&rxl_item->list);
+	synchronize_rcu();
+	kfree(rxl_item);
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
+
+static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
+					   void *priv)
+{
+	struct mlxsw_event_listener_item *event_listener_item = priv;
+	struct mlxsw_reg_info reg;
+	char *payload;
+	char *op_tlv = mlxsw_emad_op_tlv(skb);
+	char *reg_tlv = mlxsw_emad_reg_tlv(skb);
+
+	reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
+	reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
+	payload = mlxsw_emad_reg_payload(op_tlv);
+	event_listener_item->el.func(&reg, payload, event_listener_item->priv);
+	dev_kfree_skb(skb);
+}
+
+static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
+				      const struct mlxsw_event_listener *el_b)
+{
+	return (el_a->func == el_b->func &&
+		el_a->trap_id == el_b->trap_id);
+}
+
+static struct mlxsw_event_listener_item *
+__find_event_listener_item(struct mlxsw_core *mlxsw_core,
+			   const struct mlxsw_event_listener *el,
+			   void *priv)
+{
+	struct mlxsw_event_listener_item *el_item;
+
+	list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
+		if (__is_event_listener_equal(&el_item->el, el) &&
+		    el_item->priv == priv)
+			return el_item;
+	}
+	return NULL;
+}
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+				       const struct mlxsw_event_listener *el,
+				       void *priv)
+{
+	int err;
+	struct mlxsw_event_listener_item *el_item;
+	const struct mlxsw_rx_listener rxl = {
+		.func = mlxsw_core_event_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = el->trap_id,
+	};
+
+	el_item = __find_event_listener_item(mlxsw_core, el, priv);
+	if (el_item)
+		return -EEXIST;
+	el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
+	if (!el_item)
+		return -ENOMEM;
+	el_item->el = *el;
+	el_item->priv = priv;
+
+	err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
+	if (err)
+		goto err_rx_listener_register;
+
+	/* No reason to save item if we did not manage to register an RX
+	 * listener for it.
+	 */
+	list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
+
+	return 0;
+
+err_rx_listener_register:
+	kfree(el_item);
+	return err;
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_register);
+
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+					  const struct mlxsw_event_listener *el,
+					  void *priv)
+{
+	struct mlxsw_event_listener_item *el_item;
+	const struct mlxsw_rx_listener rxl = {
+		.func = mlxsw_core_event_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = el->trap_id,
+	};
+
+	el_item = __find_event_listener_item(mlxsw_core, el, priv);
+	if (!el_item)
+		return;
+	mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
+	list_del(&el_item->list);
+	kfree(el_item);
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+
+static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
+				      const struct mlxsw_reg_info *reg,
+				      char *payload,
+				      enum mlxsw_core_reg_access_type type)
+{
+	int err;
+	char *op_tlv;
+	struct sk_buff *skb;
+	struct mlxsw_tx_info tx_info = {
+		.local_port = MLXSW_PORT_CPU_PORT,
+		.is_emad = true,
+	};
+
+	skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+	if (!skb)
+		return -ENOMEM;
+
+	mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
+	mlxsw_core->driver->txhdr_construct(skb, &tx_info);
+
+	dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
+		mlxsw_core->emad.tid);
+	mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
+
+	err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
+	if (!err) {
+		op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
+		memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
+		       reg->len);
+
+		dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
+			mlxsw_core->emad.tid - 1);
+		mlxsw_core_buf_dump_dbg(mlxsw_core,
+					mlxsw_core->emad.resp_skb->data,
+					mlxsw_core->emad.resp_skb->len);
+
+		dev_kfree_skb(mlxsw_core->emad.resp_skb);
+	}
+
+	return err;
+}
+
+static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
+				     const struct mlxsw_reg_info *reg,
+				     char *payload,
+				     enum mlxsw_core_reg_access_type type)
+{
+	int err, n_retry;
+	char *in_mbox, *out_mbox, *tmp;
+
+	in_mbox = mlxsw_cmd_mbox_alloc();
+	if (!in_mbox)
+		return -ENOMEM;
+
+	out_mbox = mlxsw_cmd_mbox_alloc();
+	if (!out_mbox) {
+		err = -ENOMEM;
+		goto free_in_mbox;
+	}
+
+	mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+	tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+	mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
+
+	n_retry = 0;
+retry:
+	err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
+	if (!err) {
+		err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
+		if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+			goto retry;
+	}
+
+	if (!err)
+		memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
+		       reg->len);
+
+	mlxsw_core->emad.tid++;
+	mlxsw_cmd_mbox_free(out_mbox);
+free_in_mbox:
+	mlxsw_cmd_mbox_free(in_mbox);
+	return err;
+}
+
+static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
+				 const struct mlxsw_reg_info *reg,
+				 char *payload,
+				 enum mlxsw_core_reg_access_type type)
+{
+	u64 cur_tid;
+	int err;
+
+	if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
+		dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
+			reg->id, mlxsw_reg_id_str(reg->id),
+			mlxsw_core_reg_access_type_str(type));
+		return -EINTR;
+	}
+
+	cur_tid = mlxsw_core->emad.tid;
+	dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+		cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+		mlxsw_core_reg_access_type_str(type));
+
+	/* During initialization EMAD interface is not available to us,
+	 * so we default to command interface. We switch to EMAD interface
+	 * after setting the appropriate traps.
+	 */
+	if (!mlxsw_core->emad.use_emad)
+		err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
+						payload, type);
+	else
+		err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+						 payload, type);
+
+	if (err)
+		dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
+			cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+			mlxsw_core_reg_access_type_str(type));
+
+	mutex_unlock(&mlxsw_core->emad.lock);
+	return err;
+}
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_reg_info *reg, char *payload)
+{
+	return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+				     MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
+}
+EXPORT_SYMBOL(mlxsw_reg_query);
+
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_reg_info *reg, char *payload)
+{
+	return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+				     MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
+}
+EXPORT_SYMBOL(mlxsw_reg_write);
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+			    struct mlxsw_rx_info *rx_info)
+{
+	struct mlxsw_rx_listener_item *rxl_item;
+	const struct mlxsw_rx_listener *rxl;
+	struct mlxsw_core_pcpu_stats *pcpu_stats;
+	u8 local_port = rx_info->sys_port;
+	bool found = false;
+
+	dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n",
+			    __func__, rx_info->sys_port, rx_info->trap_id);
+
+	if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
+	    (local_port >= MLXSW_PORT_MAX_PORTS))
+		goto drop;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
+		rxl = &rxl_item->rxl;
+		if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
+		     rxl->local_port == local_port) &&
+		    rxl->trap_id == rx_info->trap_id) {
+			found = true;
+			break;
+		}
+	}
+	rcu_read_unlock();
+	if (!found)
+		goto drop;
+
+	pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
+	u64_stats_update_begin(&pcpu_stats->syncp);
+	pcpu_stats->port_rx_packets[local_port]++;
+	pcpu_stats->port_rx_bytes[local_port] += skb->len;
+	pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
+	pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
+	u64_stats_update_end(&pcpu_stats->syncp);
+
+	rxl->func(skb, local_port, rxl_item->priv);
+	return;
+
+drop:
+	if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
+		this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
+	else
+		this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
+	if (local_port >= MLXSW_PORT_MAX_PORTS)
+		this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
+	else
+		this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
+	dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_receive);
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+		   u32 in_mod, bool out_mbox_direct,
+		   char *in_mbox, size_t in_mbox_size,
+		   char *out_mbox, size_t out_mbox_size)
+{
+	u8 status;
+	int err;
+
+	BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
+	if (!mlxsw_core->bus->cmd_exec)
+		return -EOPNOTSUPP;
+
+	dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+		opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
+	if (in_mbox) {
+		dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
+		mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
+	}
+
+	err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
+					opcode_mod, in_mod, out_mbox_direct,
+					in_mbox, in_mbox_size,
+					out_mbox, out_mbox_size, &status);
+
+	if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
+		dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
+			opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+			in_mod, status, mlxsw_cmd_status_str(status));
+	} else if (err == -ETIMEDOUT) {
+		dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+			opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+			in_mod);
+	}
+
+	if (!err && out_mbox) {
+		dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
+		mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
+	}
+	return err;
+}
+EXPORT_SYMBOL(mlxsw_cmd_exec);
+
+static int __init mlxsw_core_module_init(void)
+{
+	mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+	if (!mlxsw_core_dbg_root)
+		return -ENOMEM;
+	return 0;
+}
+
+static void __exit mlxsw_core_module_exit(void)
+{
+	debugfs_remove_recursive(mlxsw_core_dbg_root);
+}
+
+module_init(mlxsw_core_module_init);
+module_exit(mlxsw_core_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch device core driver");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
new file mode 100644
index 0000000..1658084
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -0,0 +1,207 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CORE_H
+#define _MLXSW_CORE_H
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include "trap.h"
+#include "reg.h"
+
+#include "cmd.h"
+
+#define MLXSW_MODULE_ALIAS_PREFIX "mlxsw-driver-"
+#define MODULE_MLXSW_DRIVER_ALIAS(kind)	\
+	MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
+
+#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
+
+struct mlxsw_core;
+struct mlxsw_driver;
+struct mlxsw_bus;
+struct mlxsw_bus_info;
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+				   const struct mlxsw_bus *mlxsw_bus,
+				   void *bus_priv);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+
+struct mlxsw_tx_info {
+	u8 local_port;
+	bool is_emad;
+};
+
+bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+				  const struct mlxsw_tx_info *tx_info);
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+			    const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_rx_listener {
+	void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
+	u8 local_port;
+	u16 trap_id;
+};
+
+struct mlxsw_event_listener {
+	void (*func)(const struct mlxsw_reg_info *reg,
+		     char *payload, void *priv);
+	enum mlxsw_event_trap_id trap_id;
+};
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+				    const struct mlxsw_rx_listener *rxl,
+				    void *priv);
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+				       const struct mlxsw_rx_listener *rxl,
+				       void *priv);
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+				       const struct mlxsw_event_listener *el,
+				       void *priv);
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+					  const struct mlxsw_event_listener *el,
+					  void *priv);
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_reg_info *reg, char *payload);
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_reg_info *reg, char *payload);
+
+struct mlxsw_rx_info {
+	u16 sys_port;
+	int trap_id;
+};
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+			    struct mlxsw_rx_info *rx_info);
+
+#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
+
+struct mlxsw_swid_config {
+	u8	used_type:1,
+		used_properties:1;
+	u8	type;
+	u8	properties;
+};
+
+struct mlxsw_config_profile {
+	u16	used_max_vepa_channels:1,
+		used_max_lag:1,
+		used_max_port_per_lag:1,
+		used_max_mid:1,
+		used_max_pgt:1,
+		used_max_system_port:1,
+		used_max_vlan_groups:1,
+		used_max_regions:1,
+		used_flood_tables:1,
+		used_flood_mode:1,
+		used_max_ib_mc:1,
+		used_max_pkey:1,
+		used_ar_sec:1,
+		used_adaptive_routing_group_cap:1;
+	u8	max_vepa_channels;
+	u16	max_lag;
+	u16	max_port_per_lag;
+	u16	max_mid;
+	u16	max_pgt;
+	u16	max_system_port;
+	u16	max_vlan_groups;
+	u16	max_regions;
+	u8	max_flood_tables;
+	u8	max_vid_flood_tables;
+	u8	flood_mode;
+	u16	max_ib_mc;
+	u16	max_pkey;
+	u8	ar_sec;
+	u16	adaptive_routing_group_cap;
+	u8	arn;
+	struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
+};
+
+struct mlxsw_driver {
+	struct list_head list;
+	const char *kind;
+	struct module *owner;
+	size_t priv_size;
+	int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_bus_info *mlxsw_bus_info);
+	void (*fini)(void *driver_priv);
+	void (*txhdr_construct)(struct sk_buff *skb,
+				const struct mlxsw_tx_info *tx_info);
+	u8 txhdr_len;
+	const struct mlxsw_config_profile *profile;
+};
+
+struct mlxsw_bus {
+	const char *kind;
+	int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
+		    const struct mlxsw_config_profile *profile);
+	void (*fini)(void *bus_priv);
+	bool (*skb_transmit_busy)(void *bus_priv,
+				  const struct mlxsw_tx_info *tx_info);
+	int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
+			    const struct mlxsw_tx_info *tx_info);
+	int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
+			u32 in_mod, bool out_mbox_direct,
+			char *in_mbox, size_t in_mbox_size,
+			char *out_mbox, size_t out_mbox_size,
+			u8 *p_status);
+};
+
+struct mlxsw_bus_info {
+	const char *device_kind;
+	const char *device_name;
+	struct device *dev;
+	struct {
+		u16 major;
+		u16 minor;
+		u16 subminor;
+	} fw_rev;
+	u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
+	u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
new file mode 100644
index 0000000..97b6bb5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -0,0 +1,127 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/emad.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_EMAD_H
+#define _MLXSW_EMAD_H
+
+#define MLXSW_EMAD_MAX_FRAME_LEN 1518	/* Length in u8 */
+#define MLXSW_EMAD_MAX_RETRY 5
+
+/* EMAD Ethernet header */
+#define MLXSW_EMAD_ETH_HDR_LEN 0x10	/* Length in u8 */
+#define MLXSW_EMAD_EH_DMAC "\x01\x02\xc9\x00\x00\x01"
+#define MLXSW_EMAD_EH_SMAC "\x00\x02\xc9\x01\x02\x03"
+#define MLXSW_EMAD_EH_ETHERTYPE 0x8932
+#define MLXSW_EMAD_EH_MLX_PROTO 0
+#define MLXSW_EMAD_EH_PROTO_VERSION 0
+
+/* EMAD TLV Types */
+enum {
+	MLXSW_EMAD_TLV_TYPE_END,
+	MLXSW_EMAD_TLV_TYPE_OP,
+	MLXSW_EMAD_TLV_TYPE_DR,
+	MLXSW_EMAD_TLV_TYPE_REG,
+	MLXSW_EMAD_TLV_TYPE_USERDATA,
+	MLXSW_EMAD_TLV_TYPE_OOBETH,
+};
+
+/* OP TLV */
+#define MLXSW_EMAD_OP_TLV_LEN 4		/* Length in u32 */
+
+enum {
+	MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS = 1,
+	MLXSW_EMAD_OP_TLV_CLASS_IPC = 2,
+};
+
+enum mlxsw_emad_op_tlv_status {
+	MLXSW_EMAD_OP_TLV_STATUS_SUCCESS,
+	MLXSW_EMAD_OP_TLV_STATUS_BUSY,
+	MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED,
+	MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV,
+	MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED,
+	MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED,
+	MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED,
+	MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER,
+	MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE,
+	MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK,
+	MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR = 0x70,
+};
+
+static inline char *mlxsw_emad_op_tlv_status_str(u8 status)
+{
+	switch (status) {
+	case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+		return "operation performed";
+	case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+		return "device is busy";
+	case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+		return "version not supported";
+	case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+		return "unknown TLV";
+	case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+		return "register not supported";
+	case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+		return "class not supported";
+	case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+		return "method not supported";
+	case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+		return "bad parameter";
+	case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+		return "resource not available";
+	case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+		return "acknowledged. retransmit";
+	case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+		return "internal error";
+	default:
+		return "*UNKNOWN*";
+	}
+}
+
+enum {
+	MLXSW_EMAD_OP_TLV_REQUEST,
+	MLXSW_EMAD_OP_TLV_RESPONSE
+};
+
+enum {
+	MLXSW_EMAD_OP_TLV_METHOD_QUERY = 1,
+	MLXSW_EMAD_OP_TLV_METHOD_WRITE = 2,
+	MLXSW_EMAD_OP_TLV_METHOD_SEND = 3,
+	MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5,
+};
+
+/* END TLV */
+#define MLXSW_EMAD_END_TLV_LEN 1	/* Length in u32 */
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
new file mode 100644
index 0000000..ffd55d0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -0,0 +1,405 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/item.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_ITEM_H
+#define _MLXSW_ITEM_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+
+struct mlxsw_item {
+	unsigned short	offset;		/* bytes in container */
+	unsigned short	step;		/* step in bytes for indexed items */
+	unsigned short	in_step_offset; /* offset within one step */
+	unsigned char	shift;		/* shift in bits */
+	unsigned char	element_size;	/* size of element in bit array */
+	bool		no_real_shift;
+	union {
+		unsigned char	bits;
+		unsigned short	bytes;
+	} size;
+	const char	*name;
+};
+
+static inline unsigned int
+__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
+		    size_t typesize)
+{
+	BUG_ON(index && !item->step);
+	if (item->offset % typesize != 0 ||
+	    item->step % typesize != 0 ||
+	    item->in_step_offset % typesize != 0) {
+		pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%zx)\n",
+		       item->name, item->offset, item->step,
+		       item->in_step_offset, typesize);
+		BUG();
+	}
+
+	return ((item->offset + item->step * index + item->in_step_offset) /
+		typesize);
+}
+
+static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item,
+				     unsigned short index)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
+	__be16 *b = (__be16 *) buf;
+	u16 tmp;
+
+	tmp = be16_to_cpu(b[offset]);
+	tmp >>= item->shift;
+	tmp &= GENMASK(item->size.bits - 1, 0);
+	if (item->no_real_shift)
+		tmp <<= item->shift;
+	return tmp;
+}
+
+static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item,
+				      unsigned short index, u16 val)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index,
+						  sizeof(u16));
+	__be16 *b = (__be16 *) buf;
+	u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+	u16 tmp;
+
+	if (!item->no_real_shift)
+		val <<= item->shift;
+	val &= mask;
+	tmp = be16_to_cpu(b[offset]);
+	tmp &= ~mask;
+	tmp |= val;
+	b[offset] = cpu_to_be16(tmp);
+}
+
+static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item,
+				     unsigned short index)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
+	__be32 *b = (__be32 *) buf;
+	u32 tmp;
+
+	tmp = be32_to_cpu(b[offset]);
+	tmp >>= item->shift;
+	tmp &= GENMASK(item->size.bits - 1, 0);
+	if (item->no_real_shift)
+		tmp <<= item->shift;
+	return tmp;
+}
+
+static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item,
+				      unsigned short index, u32 val)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index,
+						  sizeof(u32));
+	__be32 *b = (__be32 *) buf;
+	u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+	u32 tmp;
+
+	if (!item->no_real_shift)
+		val <<= item->shift;
+	val &= mask;
+	tmp = be32_to_cpu(b[offset]);
+	tmp &= ~mask;
+	tmp |= val;
+	b[offset] = cpu_to_be32(tmp);
+}
+
+static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item,
+				     unsigned short index)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+	__be64 *b = (__be64 *) buf;
+	u64 tmp;
+
+	tmp = be64_to_cpu(b[offset]);
+	tmp >>= item->shift;
+	tmp &= GENMASK_ULL(item->size.bits - 1, 0);
+	if (item->no_real_shift)
+		tmp <<= item->shift;
+	return tmp;
+}
+
+static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
+				      unsigned short index, u64 val)
+{
+	unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+	__be64 *b = (__be64 *) buf;
+	u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
+	u64 tmp;
+
+	if (!item->no_real_shift)
+		val <<= item->shift;
+	val &= mask;
+	tmp = be64_to_cpu(b[offset]);
+	tmp &= ~mask;
+	tmp |= val;
+	b[offset] = cpu_to_be64(tmp);
+}
+
+static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
+					    struct mlxsw_item *item)
+{
+	memcpy(dst, &buf[item->offset], item->size.bytes);
+}
+
+static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
+					  struct mlxsw_item *item)
+{
+	memcpy(&buf[item->offset], src, item->size.bytes);
+}
+
+static inline u16
+__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
+{
+	u16 max_index, be_index;
+	u16 offset;		/* byte offset inside the array */
+
+	BUG_ON(index && !item->element_size);
+	if (item->offset % sizeof(u32) != 0 ||
+	    BITS_PER_BYTE % item->element_size != 0) {
+		pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
+		       item->name, item->offset, item->element_size);
+		BUG();
+	}
+
+	max_index = (item->size.bytes << 3) / item->element_size - 1;
+	be_index = max_index - index;
+	offset = be_index * item->element_size >> 3;
+	*shift = index % (BITS_PER_BYTE / item->element_size) << 1;
+
+	return item->offset + offset;
+}
+
+static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item,
+					    u16 index)
+{
+	u8 shift, tmp;
+	u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+
+	tmp = buf[offset];
+	tmp >>= shift;
+	tmp &= GENMASK(item->element_size - 1, 0);
+	return tmp;
+}
+
+static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item,
+					      u16 index, u8 val)
+{
+	u8 shift, tmp;
+	u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+	u8 mask = GENMASK(item->element_size - 1, 0) << shift;
+
+	val <<= shift;
+	val &= mask;
+	tmp = buf[offset];
+	tmp &= ~mask;
+	tmp |= val;
+	buf[offset] = tmp;
+}
+
+#define __ITEM_NAME(_type, _cname, _iname)					\
+	mlxsw_##_type##_##_cname##_##_iname##_item
+
+/* _type: cmd_mbox, reg, etc.
+ * _cname: containter name (e.g. command name, register name)
+ * _iname: item name within the container
+ */
+
+#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.shift = _shift,							\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)		\
+{										\
+	return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0);	\
+}										\
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
+{										\
+	__mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);	\
+}
+
+#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits,	\
+			     _step, _instepoffset, _norealshift)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.step = _step,								\
+	.in_step_offset = _instepoffset,					\
+	.shift = _shift,							\
+	.no_real_shift = _norealshift,						\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u16								\
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)	\
+{										\
+	return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname),	\
+				  index);					\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,	\
+					  u16 val)				\
+{										\
+	__mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname),		\
+			   index, val);						\
+}
+
+#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.shift = _shift,							\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)		\
+{										\
+	return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0);	\
+}										\
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
+{										\
+	__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);	\
+}
+
+#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits,	\
+			     _step, _instepoffset, _norealshift)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.step = _step,								\
+	.in_step_offset = _instepoffset,					\
+	.shift = _shift,							\
+	.no_real_shift = _norealshift,						\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u32								\
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)	\
+{										\
+	return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname),	\
+				  index);					\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,	\
+					  u32 val)				\
+{										\
+	__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname),		\
+			   index, val);						\
+}
+
+#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.shift = _shift,							\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)		\
+{										\
+	return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0);	\
+}										\
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
+{										\
+	__mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0,	val);	\
+}
+
+#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift,		\
+			     _sizebits, _step, _instepoffset, _norealshift)	\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.step = _step,								\
+	.in_step_offset = _instepoffset,					\
+	.shift = _shift,							\
+	.no_real_shift = _norealshift,						\
+	.size = {.bits = _sizebits,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u64								\
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)	\
+{										\
+	return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname),	\
+				  index);					\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,	\
+					  u64 val)				\
+{										\
+	__mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname),		\
+			   index, val);						\
+}
+
+#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes)		\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.size = {.bytes = _sizebytes,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst)		\
+{										\
+	__mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src)		\
+{										\
+	__mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname));	\
+}
+
+#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes,	\
+			     _element_size)					\
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {			\
+	.offset = _offset,							\
+	.element_size = _element_size,						\
+	.size = {.bytes = _sizebytes,},						\
+	.name = #_type "_" #_cname "_" #_iname,					\
+};										\
+static inline u8								\
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index)			\
+{										\
+	return __mlxsw_item_bit_array_get(buf,					\
+					  &__ITEM_NAME(_type, _cname, _iname),	\
+					  index);				\
+}										\
+static inline void								\
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val)		\
+{										\
+	return __mlxsw_item_bit_array_set(buf,					\
+					  &__ITEM_NAME(_type, _cname, _iname),	\
+					  index, val);				\
+}										\
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
new file mode 100644
index 0000000..462cea3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -0,0 +1,1826 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/log2.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+
+#include "pci.h"
+#include "core.h"
+#include "cmd.h"
+#include "port.h"
+
+static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
+
+static const struct pci_device_id mlxsw_pci_id_table[] = {
+	{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+	{0, }
+};
+
+static struct dentry *mlxsw_pci_dbg_root;
+
+static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
+{
+	switch (id->device) {
+	case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
+		return MLXSW_DEVICE_KIND_SWITCHX2;
+	default:
+		BUG();
+	}
+}
+
+#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
+	iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+#define mlxsw_pci_read32(mlxsw_pci, reg) \
+	ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+
+enum mlxsw_pci_queue_type {
+	MLXSW_PCI_QUEUE_TYPE_SDQ,
+	MLXSW_PCI_QUEUE_TYPE_RDQ,
+	MLXSW_PCI_QUEUE_TYPE_CQ,
+	MLXSW_PCI_QUEUE_TYPE_EQ,
+};
+
+static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
+{
+	switch (q_type) {
+	case MLXSW_PCI_QUEUE_TYPE_SDQ:
+		return "sdq";
+	case MLXSW_PCI_QUEUE_TYPE_RDQ:
+		return "rdq";
+	case MLXSW_PCI_QUEUE_TYPE_CQ:
+		return "cq";
+	case MLXSW_PCI_QUEUE_TYPE_EQ:
+		return "eq";
+	}
+	BUG();
+}
+
+#define MLXSW_PCI_QUEUE_TYPE_COUNT	4
+
+static const u16 mlxsw_pci_doorbell_type_offset[] = {
+	MLXSW_PCI_DOORBELL_SDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
+	MLXSW_PCI_DOORBELL_RDQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
+	MLXSW_PCI_DOORBELL_CQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+	MLXSW_PCI_DOORBELL_EQ_OFFSET,	/* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
+	0, /* unused */
+	0, /* unused */
+	MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+	MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+struct mlxsw_pci_mem_item {
+	char *buf;
+	dma_addr_t mapaddr;
+	size_t size;
+};
+
+struct mlxsw_pci_queue_elem_info {
+	char *elem; /* pointer to actual dma mapped element mem chunk */
+	union {
+		struct {
+			struct sk_buff *skb;
+		} sdq;
+		struct {
+			struct sk_buff *skb;
+		} rdq;
+	} u;
+};
+
+struct mlxsw_pci_queue {
+	spinlock_t lock; /* for queue accesses */
+	struct mlxsw_pci_mem_item mem_item;
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	u16 producer_counter;
+	u16 consumer_counter;
+	u16 count; /* number of elements in queue */
+	u8 num; /* queue number */
+	u8 elem_size; /* size of one element */
+	enum mlxsw_pci_queue_type type;
+	struct tasklet_struct tasklet; /* queue processing tasklet */
+	struct mlxsw_pci *pci;
+	union {
+		struct {
+			u32 comp_sdq_count;
+			u32 comp_rdq_count;
+		} cq;
+		struct {
+			u32 ev_cmd_count;
+			u32 ev_comp_count;
+			u32 ev_other_count;
+		} eq;
+	} u;
+};
+
+struct mlxsw_pci_queue_type_group {
+	struct mlxsw_pci_queue *q;
+	u8 count; /* number of queues in group */
+};
+
+struct mlxsw_pci {
+	struct pci_dev *pdev;
+	u8 __iomem *hw_addr;
+	struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
+	u32 doorbell_offset;
+	struct msix_entry msix_entry;
+	struct mlxsw_core *core;
+	struct {
+		u16 num_pages;
+		struct mlxsw_pci_mem_item *items;
+	} fw_area;
+	struct {
+		struct mlxsw_pci_mem_item out_mbox;
+		struct mlxsw_pci_mem_item in_mbox;
+		struct mutex lock; /* Lock access to command registers */
+		bool nopoll;
+		wait_queue_head_t wait;
+		bool wait_done;
+		struct {
+			u8 status;
+			u64 out_param;
+		} comp;
+	} cmd;
+	struct mlxsw_bus_info bus_info;
+	struct dentry *dbg_dir;
+};
+
+static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
+{
+	tasklet_schedule(&q->tasklet);
+}
+
+static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
+					size_t elem_size, int elem_index)
+{
+	return q->mem_item.buf + (elem_size * elem_index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+	return &q->elem_info[elem_index];
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
+{
+	int index = q->producer_counter & (q->count - 1);
+
+	if ((q->producer_counter - q->consumer_counter) == q->count)
+		return NULL;
+	return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
+{
+	int index = q->consumer_counter & (q->count - 1);
+
+	return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+	return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
+}
+
+static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
+{
+	return owner_bit != !!(q->consumer_counter & q->count);
+}
+
+static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
+					 u32 (*get_elem_owner_func)(char *))
+{
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	char *elem;
+	bool owner_bit;
+
+	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+	elem = elem_info->elem;
+	owner_bit = get_elem_owner_func(elem);
+	if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+		return NULL;
+	q->consumer_counter++;
+	rmb(); /* make sure we read owned bit before the rest of elem */
+	return elem;
+}
+
+static struct mlxsw_pci_queue_type_group *
+mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
+			       enum mlxsw_pci_queue_type q_type)
+{
+	return &mlxsw_pci->queues[q_type];
+}
+
+static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
+				  enum mlxsw_pci_queue_type q_type)
+{
+	struct mlxsw_pci_queue_type_group *queue_group;
+
+	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
+	return queue_group->count;
+}
+
+static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
+}
+
+static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
+}
+
+static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
+{
+	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
+}
+
+static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
+{
+	return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
+}
+
+static struct mlxsw_pci_queue *
+__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
+		      enum mlxsw_pci_queue_type q_type, u8 q_num)
+{
+	return &mlxsw_pci->queues[q_type].q[q_num];
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
+						 u8 q_num)
+{
+	return __mlxsw_pci_queue_get(mlxsw_pci,
+				     MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
+						 u8 q_num)
+{
+	return __mlxsw_pci_queue_get(mlxsw_pci,
+				     MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
+						u8 q_num)
+{
+	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
+						u8 q_num)
+{
+	return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
+}
+
+static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
+					   struct mlxsw_pci_queue *q,
+					   u16 val)
+{
+	mlxsw_pci_write32(mlxsw_pci,
+			  DOORBELL(mlxsw_pci->doorbell_offset,
+				   mlxsw_pci_doorbell_type_offset[q->type],
+				   q->num), val);
+}
+
+static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
+					       struct mlxsw_pci_queue *q,
+					       u16 val)
+{
+	mlxsw_pci_write32(mlxsw_pci,
+			  DOORBELL(mlxsw_pci->doorbell_offset,
+				   mlxsw_pci_doorbell_arm_type_offset[q->type],
+				   q->num), val);
+}
+
+static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
+						   struct mlxsw_pci_queue *q)
+{
+	wmb(); /* ensure all writes are done before we ring a bell */
+	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
+}
+
+static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+						   struct mlxsw_pci_queue *q)
+{
+	wmb(); /* ensure all writes are done before we ring a bell */
+	__mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
+				       q->consumer_counter + q->count);
+}
+
+static void
+mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+					   struct mlxsw_pci_queue *q)
+{
+	wmb(); /* ensure all writes are done before we ring a bell */
+	__mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
+}
+
+static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
+					     int page_index)
+{
+	return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
+}
+
+static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+			      struct mlxsw_pci_queue *q)
+{
+	int i;
+	int err;
+
+	q->producer_counter = 0;
+	q->consumer_counter = 0;
+
+	/* Set CQ of same number of this SDQ. */
+	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+	mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7);
+	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+	}
+
+	err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
+	if (err)
+		return err;
+	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+	return 0;
+}
+
+static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
+			       struct mlxsw_pci_queue *q)
+{
+	mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+	struct mlxsw_pci_queue *q;
+	int i;
+	static const char hdr[] =
+		"NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
+		q = mlxsw_pci_sdq_get(mlxsw_pci, i);
+		spin_lock_bh(&q->lock);
+		seq_printf(file, "%3d %10d %10d %5d\n",
+			   i, q->producer_counter, q->consumer_counter,
+			   q->count);
+		spin_unlock_bh(&q->lock);
+	}
+	return 0;
+}
+
+static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
+				  int index, char *frag_data, size_t frag_len,
+				  int direction)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	dma_addr_t mapaddr;
+
+	mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
+	if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
+		if (net_ratelimit())
+			dev_err(&pdev->dev, "failed to dma map tx frag\n");
+		return -EIO;
+	}
+	mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+	mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+	return 0;
+}
+
+static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
+				     int index, int direction)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
+	dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
+
+	if (!frag_len)
+		return;
+	pci_unmap_single(pdev, mapaddr, frag_len, direction);
+}
+
+static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
+				   struct mlxsw_pci_queue_elem_info *elem_info)
+{
+	size_t buf_len = MLXSW_PORT_MAX_MTU;
+	char *wqe = elem_info->elem;
+	struct sk_buff *skb;
+	int err;
+
+	elem_info->u.rdq.skb = NULL;
+	skb = netdev_alloc_skb_ip_align(NULL, buf_len);
+	if (!skb)
+		return -ENOMEM;
+
+	/* Assume that wqe was previously zeroed. */
+
+	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+				     buf_len, DMA_FROM_DEVICE);
+	if (err)
+		goto err_frag_map;
+
+	elem_info->u.rdq.skb = skb;
+	return 0;
+
+err_frag_map:
+	dev_kfree_skb_any(skb);
+	return err;
+}
+
+static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
+				   struct mlxsw_pci_queue_elem_info *elem_info)
+{
+	struct sk_buff *skb;
+	char *wqe;
+
+	skb = elem_info->u.rdq.skb;
+	wqe = elem_info->elem;
+
+	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+	dev_kfree_skb_any(skb);
+}
+
+static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+			      struct mlxsw_pci_queue *q)
+{
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	int i;
+	int err;
+
+	q->producer_counter = 0;
+	q->consumer_counter = 0;
+
+	/* Set CQ of same number of this RDQ with base
+	 * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
+	 */
+	mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
+	mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+		mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+	}
+
+	err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
+	if (err)
+		return err;
+
+	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+	for (i = 0; i < q->count; i++) {
+		elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+		BUG_ON(!elem_info);
+		err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+		if (err)
+			goto rollback;
+		/* Everything is set up, ring doorbell to pass elem to HW */
+		q->producer_counter++;
+		mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+	}
+
+	return 0;
+
+rollback:
+	for (i--; i >= 0; i--) {
+		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+	}
+	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+
+	return err;
+}
+
+static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
+			       struct mlxsw_pci_queue *q)
+{
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	int i;
+
+	mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+	for (i = 0; i < q->count; i++) {
+		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+		mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+	}
+}
+
+static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+	struct mlxsw_pci_queue *q;
+	int i;
+	static const char hdr[] =
+		"NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
+		q = mlxsw_pci_rdq_get(mlxsw_pci, i);
+		spin_lock_bh(&q->lock);
+		seq_printf(file, "%3d %10d %10d %5d\n",
+			   i, q->producer_counter, q->consumer_counter,
+			   q->count);
+		spin_unlock_bh(&q->lock);
+	}
+	return 0;
+}
+
+static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+			     struct mlxsw_pci_queue *q)
+{
+	int i;
+	int err;
+
+	q->consumer_counter = 0;
+
+	for (i = 0; i < q->count; i++) {
+		char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+		mlxsw_pci_cqe_owner_set(elem, 1);
+	}
+
+	mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
+	mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
+	mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
+	mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
+	mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
+	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+		mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
+	}
+	err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
+	if (err)
+		return err;
+	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+	return 0;
+}
+
+static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
+			      struct mlxsw_pci_queue *q)
+{
+	mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+
+	struct mlxsw_pci_queue *q;
+	int i;
+	static const char hdr[] =
+		"NUM CONS_INDEX  SDQ_COUNT  RDQ_COUNT COUNT\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
+		q = mlxsw_pci_cq_get(mlxsw_pci, i);
+		spin_lock_bh(&q->lock);
+		seq_printf(file, "%3d %10d %10d %10d %5d\n",
+			   i, q->consumer_counter, q->u.cq.comp_sdq_count,
+			   q->u.cq.comp_rdq_count, q->count);
+		spin_unlock_bh(&q->lock);
+	}
+	return 0;
+}
+
+static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
+				     struct mlxsw_pci_queue *q,
+				     u16 consumer_counter_limit,
+				     char *cqe)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	char *wqe;
+	struct sk_buff *skb;
+	int i;
+
+	spin_lock(&q->lock);
+	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+	skb = elem_info->u.sdq.skb;
+	wqe = elem_info->elem;
+	for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+	dev_kfree_skb_any(skb);
+	elem_info->u.sdq.skb = NULL;
+
+	if (q->consumer_counter++ != consumer_counter_limit)
+		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
+	spin_unlock(&q->lock);
+}
+
+static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+				     struct mlxsw_pci_queue *q,
+				     u16 consumer_counter_limit,
+				     char *cqe)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	char *wqe;
+	struct sk_buff *skb;
+	struct mlxsw_rx_info rx_info;
+	u16 byte_count;
+	int err;
+
+	elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+	skb = elem_info->u.sdq.skb;
+	if (!skb)
+		return;
+	wqe = elem_info->elem;
+	mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+
+	if (q->consumer_counter++ != consumer_counter_limit)
+		dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+
+	/* We do not support lag now */
+	if (mlxsw_pci_cqe_lag_get(cqe))
+		goto drop;
+
+	rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
+	rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
+
+	byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
+	if (mlxsw_pci_cqe_crc_get(cqe))
+		byte_count -= ETH_FCS_LEN;
+	skb_put(skb, byte_count);
+	mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
+
+put_new_skb:
+	memset(wqe, 0, q->elem_size);
+	err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+	if (err && net_ratelimit())
+		dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
+	/* Everything is set up, ring doorbell to pass elem to HW */
+	q->producer_counter++;
+	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+	return;
+
+drop:
+	dev_kfree_skb_any(skb);
+	goto put_new_skb;
+}
+
+static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
+{
+	return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
+}
+
+static void mlxsw_pci_cq_tasklet(unsigned long data)
+{
+	struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+	struct mlxsw_pci *mlxsw_pci = q->pci;
+	char *cqe;
+	int items = 0;
+	int credits = q->count >> 1;
+
+	while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+		u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+		u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
+		u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
+
+		if (sendq) {
+			struct mlxsw_pci_queue *sdq;
+
+			sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
+			mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+						 wqe_counter, cqe);
+			q->u.cq.comp_sdq_count++;
+		} else {
+			struct mlxsw_pci_queue *rdq;
+
+			rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
+			mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+						 wqe_counter, cqe);
+			q->u.cq.comp_rdq_count++;
+		}
+		if (++items == credits)
+			break;
+	}
+	if (items) {
+		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+	}
+}
+
+static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+			     struct mlxsw_pci_queue *q)
+{
+	int i;
+	int err;
+
+	q->consumer_counter = 0;
+
+	for (i = 0; i < q->count; i++) {
+		char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+		mlxsw_pci_eqe_owner_set(elem, 1);
+	}
+
+	mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
+	mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
+	mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
+	mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+	for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+		dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+		mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+	}
+	err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+	if (err)
+		return err;
+	mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+	mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+	return 0;
+}
+
+static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+			      struct mlxsw_pci_queue *q)
+{
+	mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+	struct mlxsw_pci_queue *q;
+	int i;
+	static const char hdr[] =
+		"NUM CONS_COUNT     EV_CMD    EV_COMP   EV_OTHER COUNT\n";
+
+	seq_printf(file, hdr);
+	for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
+		q = mlxsw_pci_eq_get(mlxsw_pci, i);
+		spin_lock_bh(&q->lock);
+		seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
+			   i, q->consumer_counter, q->u.eq.ev_cmd_count,
+			   q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
+			   q->count);
+		spin_unlock_bh(&q->lock);
+	}
+	return 0;
+}
+
+static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
+{
+	mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
+	mlxsw_pci->cmd.comp.out_param =
+		((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
+		mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
+	mlxsw_pci->cmd.wait_done = true;
+	wake_up(&mlxsw_pci->cmd.wait);
+}
+
+static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
+{
+	return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
+}
+
+static void mlxsw_pci_eq_tasklet(unsigned long data)
+{
+	struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+	struct mlxsw_pci *mlxsw_pci = q->pci;
+	unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
+	char *eqe;
+	u8 cqn;
+	bool cq_handle = false;
+	int items = 0;
+	int credits = q->count >> 1;
+
+	memset(&active_cqns, 0, sizeof(active_cqns));
+
+	while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
+		u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
+
+		switch (event_type) {
+		case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+			mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
+			q->u.eq.ev_cmd_count++;
+			break;
+		case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+			cqn = mlxsw_pci_eqe_cqn_get(eqe);
+			set_bit(cqn, active_cqns);
+			cq_handle = true;
+			q->u.eq.ev_comp_count++;
+			break;
+		default:
+			q->u.eq.ev_other_count++;
+		}
+		if (++items == credits)
+			break;
+	}
+	if (items) {
+		mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+		mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+	}
+
+	if (!cq_handle)
+		return;
+	for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
+		q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
+		mlxsw_pci_queue_tasklet_schedule(q);
+	}
+}
+
+struct mlxsw_pci_queue_ops {
+	const char *name;
+	enum mlxsw_pci_queue_type type;
+	int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
+		    struct mlxsw_pci_queue *q);
+	void (*fini)(struct mlxsw_pci *mlxsw_pci,
+		     struct mlxsw_pci_queue *q);
+	void (*tasklet)(unsigned long data);
+	int (*dbg_read)(struct seq_file *s, void *data);
+	u16 elem_count;
+	u8 elem_size;
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
+	.type		= MLXSW_PCI_QUEUE_TYPE_SDQ,
+	.init		= mlxsw_pci_sdq_init,
+	.fini		= mlxsw_pci_sdq_fini,
+	.dbg_read	= mlxsw_pci_sdq_dbg_read,
+	.elem_count	= MLXSW_PCI_WQE_COUNT,
+	.elem_size	= MLXSW_PCI_WQE_SIZE,
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
+	.type		= MLXSW_PCI_QUEUE_TYPE_RDQ,
+	.init		= mlxsw_pci_rdq_init,
+	.fini		= mlxsw_pci_rdq_fini,
+	.dbg_read	= mlxsw_pci_rdq_dbg_read,
+	.elem_count	= MLXSW_PCI_WQE_COUNT,
+	.elem_size	= MLXSW_PCI_WQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
+	.type		= MLXSW_PCI_QUEUE_TYPE_CQ,
+	.init		= mlxsw_pci_cq_init,
+	.fini		= mlxsw_pci_cq_fini,
+	.tasklet	= mlxsw_pci_cq_tasklet,
+	.dbg_read	= mlxsw_pci_cq_dbg_read,
+	.elem_count	= MLXSW_PCI_CQE_COUNT,
+	.elem_size	= MLXSW_PCI_CQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
+	.type		= MLXSW_PCI_QUEUE_TYPE_EQ,
+	.init		= mlxsw_pci_eq_init,
+	.fini		= mlxsw_pci_eq_fini,
+	.tasklet	= mlxsw_pci_eq_tasklet,
+	.dbg_read	= mlxsw_pci_eq_dbg_read,
+	.elem_count	= MLXSW_PCI_EQE_COUNT,
+	.elem_size	= MLXSW_PCI_EQE_SIZE
+};
+
+static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+				const struct mlxsw_pci_queue_ops *q_ops,
+				struct mlxsw_pci_queue *q, u8 q_num)
+{
+	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+	int i;
+	int err;
+
+	spin_lock_init(&q->lock);
+	q->num = q_num;
+	q->count = q_ops->elem_count;
+	q->elem_size = q_ops->elem_size;
+	q->type = q_ops->type;
+	q->pci = mlxsw_pci;
+
+	if (q_ops->tasklet)
+		tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
+
+	mem_item->size = MLXSW_PCI_AQ_SIZE;
+	mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+					     mem_item->size,
+					     &mem_item->mapaddr);
+	if (!mem_item->buf)
+		return -ENOMEM;
+	memset(mem_item->buf, 0, mem_item->size);
+
+	q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
+	if (!q->elem_info) {
+		err = -ENOMEM;
+		goto err_elem_info_alloc;
+	}
+
+	/* Initialize dma mapped elements info elem_info for
+	 * future easy access.
+	 */
+	for (i = 0; i < q->count; i++) {
+		struct mlxsw_pci_queue_elem_info *elem_info;
+
+		elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+		elem_info->elem =
+			__mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
+	}
+
+	mlxsw_cmd_mbox_zero(mbox);
+	err = q_ops->init(mlxsw_pci, mbox, q);
+	if (err)
+		goto err_q_ops_init;
+	return 0;
+
+err_q_ops_init:
+	kfree(q->elem_info);
+err_elem_info_alloc:
+	pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+			    mem_item->buf, mem_item->mapaddr);
+	return err;
+}
+
+static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
+				 const struct mlxsw_pci_queue_ops *q_ops,
+				 struct mlxsw_pci_queue *q)
+{
+	struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+
+	q_ops->fini(mlxsw_pci, q);
+	kfree(q->elem_info);
+	pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+			    mem_item->buf, mem_item->mapaddr);
+}
+
+static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+				      const struct mlxsw_pci_queue_ops *q_ops,
+				      u8 num_qs)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	struct mlxsw_pci_queue_type_group *queue_group;
+	char tmp[16];
+	int i;
+	int err;
+
+	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+	queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
+	if (!queue_group->q)
+		return -ENOMEM;
+
+	for (i = 0; i < num_qs; i++) {
+		err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
+					   &queue_group->q[i], i);
+		if (err)
+			goto err_queue_init;
+	}
+	queue_group->count = num_qs;
+
+	sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
+	debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
+				    q_ops->dbg_read);
+
+	return 0;
+
+err_queue_init:
+	for (i--; i >= 0; i--)
+		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+	kfree(queue_group->q);
+	return err;
+}
+
+static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
+				       const struct mlxsw_pci_queue_ops *q_ops)
+{
+	struct mlxsw_pci_queue_type_group *queue_group;
+	int i;
+
+	queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+	for (i = 0; i < queue_group->count; i++)
+		mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+	kfree(queue_group->q);
+}
+
+static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	u8 num_sdqs;
+	u8 sdq_log2sz;
+	u8 num_rdqs;
+	u8 rdq_log2sz;
+	u8 num_cqs;
+	u8 cq_log2sz;
+	u8 num_eqs;
+	u8 eq_log2sz;
+	int err;
+
+	mlxsw_cmd_mbox_zero(mbox);
+	err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
+	if (err)
+		return err;
+
+	num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
+	sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
+	num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
+	rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
+	num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
+	cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
+	num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
+	eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
+
+	if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
+	    (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
+	    (num_cqs != MLXSW_PCI_CQS_COUNT) ||
+	    (num_eqs != MLXSW_PCI_EQS_COUNT)) {
+		dev_err(&pdev->dev, "Unsupported number of queues\n");
+		return -EINVAL;
+	}
+
+	if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+	    (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+	    (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
+	    (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
+		dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
+		return -EINVAL;
+	}
+
+	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
+					 num_eqs);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialize event queues\n");
+		return err;
+	}
+
+	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
+					 num_cqs);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialize completion queues\n");
+		goto err_cqs_init;
+	}
+
+	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
+					 num_sdqs);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
+		goto err_sdqs_init;
+	}
+
+	err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
+					 num_rdqs);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
+		goto err_rdqs_init;
+	}
+
+	/* We have to poll in command interface until queues are initialized */
+	mlxsw_pci->cmd.nopoll = true;
+	return 0;
+
+err_rdqs_init:
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+err_sdqs_init:
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+err_cqs_init:
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+	return err;
+}
+
+static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
+{
+	mlxsw_pci->cmd.nopoll = false;
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+	mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+}
+
+static void
+mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
+				     char *mbox, int index,
+				     const struct mlxsw_swid_config *swid)
+{
+	u8 mask = 0;
+
+	if (swid->used_type) {
+		mlxsw_cmd_mbox_config_profile_swid_config_type_set(
+			mbox, index, swid->type);
+		mask |= 1;
+	}
+	if (swid->used_properties) {
+		mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
+			mbox, index, swid->properties);
+		mask |= 2;
+	}
+	mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
+}
+
+static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
+				    const struct mlxsw_config_profile *profile)
+{
+	int i;
+
+	mlxsw_cmd_mbox_zero(mbox);
+
+	if (profile->used_max_vepa_channels) {
+		mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
+			mbox, profile->max_vepa_channels);
+	}
+	if (profile->used_max_lag) {
+		mlxsw_cmd_mbox_config_profile_set_max_lag_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_lag_set(
+			mbox, profile->max_lag);
+	}
+	if (profile->used_max_port_per_lag) {
+		mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
+			mbox, profile->max_port_per_lag);
+	}
+	if (profile->used_max_mid) {
+		mlxsw_cmd_mbox_config_profile_set_max_mid_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_mid_set(
+			mbox, profile->max_mid);
+	}
+	if (profile->used_max_pgt) {
+		mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_pgt_set(
+			mbox, profile->max_pgt);
+	}
+	if (profile->used_max_system_port) {
+		mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_system_port_set(
+			mbox, profile->max_system_port);
+	}
+	if (profile->used_max_vlan_groups) {
+		mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
+			mbox, profile->max_vlan_groups);
+	}
+	if (profile->used_max_regions) {
+		mlxsw_cmd_mbox_config_profile_set_max_regions_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_regions_set(
+			mbox, profile->max_regions);
+	}
+	if (profile->used_flood_tables) {
+		mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
+			mbox, profile->max_flood_tables);
+		mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
+			mbox, profile->max_vid_flood_tables);
+	}
+	if (profile->used_flood_mode) {
+		mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_flood_mode_set(
+			mbox, profile->flood_mode);
+	}
+	if (profile->used_max_ib_mc) {
+		mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
+			mbox, profile->max_ib_mc);
+	}
+	if (profile->used_max_pkey) {
+		mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_max_pkey_set(
+			mbox, profile->max_pkey);
+	}
+	if (profile->used_ar_sec) {
+		mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_ar_sec_set(
+			mbox, profile->ar_sec);
+	}
+	if (profile->used_adaptive_routing_group_cap) {
+		mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
+			mbox, 1);
+		mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
+			mbox, profile->adaptive_routing_group_cap);
+	}
+
+	for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
+		mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
+						     &profile->swid_config[i]);
+
+	return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
+}
+
+static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+	struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
+	int err;
+
+	mlxsw_cmd_mbox_zero(mbox);
+	err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
+	if (err)
+		return err;
+	mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
+	mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
+	return 0;
+}
+
+static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+				  u16 num_pages)
+{
+	struct mlxsw_pci_mem_item *mem_item;
+	int i;
+	int err;
+
+	mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
+					   GFP_KERNEL);
+	if (!mlxsw_pci->fw_area.items)
+		return -ENOMEM;
+	mlxsw_pci->fw_area.num_pages = num_pages;
+
+	mlxsw_cmd_mbox_zero(mbox);
+	for (i = 0; i < num_pages; i++) {
+		mem_item = &mlxsw_pci->fw_area.items[i];
+
+		mem_item->size = MLXSW_PCI_PAGE_SIZE;
+		mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+						     mem_item->size,
+						     &mem_item->mapaddr);
+		if (!mem_item->buf) {
+			err = -ENOMEM;
+			goto err_alloc;
+		}
+		mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
+		mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
+	}
+
+	err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
+	if (err)
+		goto err_cmd_map_fa;
+
+	return 0;
+
+err_cmd_map_fa:
+err_alloc:
+	for (i--; i >= 0; i--) {
+		mem_item = &mlxsw_pci->fw_area.items[i];
+
+		pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+				    mem_item->buf, mem_item->mapaddr);
+	}
+	kfree(mlxsw_pci->fw_area.items);
+	return err;
+}
+
+static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
+{
+	struct mlxsw_pci_mem_item *mem_item;
+	int i;
+
+	mlxsw_cmd_unmap_fa(mlxsw_pci->core);
+
+	for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
+		mem_item = &mlxsw_pci->fw_area.items[i];
+
+		pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+				    mem_item->buf, mem_item->mapaddr);
+	}
+	kfree(mlxsw_pci->fw_area.items);
+}
+
+static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
+{
+	struct mlxsw_pci *mlxsw_pci = dev_id;
+	struct mlxsw_pci_queue *q;
+	int i;
+
+	for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
+		q = mlxsw_pci_eq_get(mlxsw_pci, i);
+		mlxsw_pci_queue_tasklet_schedule(q);
+	}
+	return IRQ_HANDLED;
+}
+
+static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
+				struct mlxsw_pci_mem_item *mbox)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	int err = 0;
+
+	mbox->size = MLXSW_CMD_MBOX_SIZE;
+	mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
+					 &mbox->mapaddr);
+	if (!mbox->buf) {
+		dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
+		err = -ENOMEM;
+	}
+
+	return err;
+}
+
+static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
+				struct mlxsw_pci_mem_item *mbox)
+{
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+
+	pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
+			    mbox->mapaddr);
+}
+
+static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
+			  const struct mlxsw_config_profile *profile)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+	struct pci_dev *pdev = mlxsw_pci->pdev;
+	char *mbox;
+	u16 num_pages;
+	int err;
+
+	mutex_init(&mlxsw_pci->cmd.lock);
+	init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+	mlxsw_pci->core = mlxsw_core;
+
+	mbox = mlxsw_cmd_mbox_alloc();
+	if (!mbox)
+		return -ENOMEM;
+
+	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+	if (err)
+		goto mbox_put;
+
+	err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+	if (err)
+		goto err_out_mbox_alloc;
+
+	err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
+	if (err)
+		goto err_query_fw;
+
+	mlxsw_pci->bus_info.fw_rev.major =
+		mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
+	mlxsw_pci->bus_info.fw_rev.minor =
+		mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
+	mlxsw_pci->bus_info.fw_rev.subminor =
+		mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
+
+	if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
+		dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
+		err = -EINVAL;
+		goto err_iface_rev;
+	}
+	if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
+		dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
+		err = -EINVAL;
+		goto err_doorbell_page_bar;
+	}
+
+	mlxsw_pci->doorbell_offset =
+		mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
+
+	num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
+	err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
+	if (err)
+		goto err_fw_area_init;
+
+	err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
+	if (err)
+		goto err_boardinfo;
+
+	err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+	if (err)
+		goto err_config_profile;
+
+	err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
+	if (err)
+		goto err_aqs_init;
+
+	err = request_irq(mlxsw_pci->msix_entry.vector,
+			  mlxsw_pci_eq_irq_handler, 0,
+			  mlxsw_pci_driver_name, mlxsw_pci);
+	if (err) {
+		dev_err(&pdev->dev, "IRQ request failed\n");
+		goto err_request_eq_irq;
+	}
+
+	goto mbox_put;
+
+err_request_eq_irq:
+	mlxsw_pci_aqs_fini(mlxsw_pci);
+err_aqs_init:
+err_config_profile:
+err_boardinfo:
+	mlxsw_pci_fw_area_fini(mlxsw_pci);
+err_fw_area_init:
+err_doorbell_page_bar:
+err_iface_rev:
+err_query_fw:
+	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+err_out_mbox_alloc:
+	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+mbox_put:
+	mlxsw_cmd_mbox_free(mbox);
+	return err;
+}
+
+static void mlxsw_pci_fini(void *bus_priv)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+	free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
+	mlxsw_pci_aqs_fini(mlxsw_pci);
+	mlxsw_pci_fw_area_fini(mlxsw_pci);
+	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+	mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+}
+
+static struct mlxsw_pci_queue *
+mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
+		   const struct mlxsw_tx_info *tx_info)
+{
+	u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
+
+	return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
+}
+
+static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
+					const struct mlxsw_tx_info *tx_info)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+	struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+
+	return !mlxsw_pci_queue_elem_info_producer_get(q);
+}
+
+static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
+				  const struct mlxsw_tx_info *tx_info)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+	struct mlxsw_pci_queue *q;
+	struct mlxsw_pci_queue_elem_info *elem_info;
+	char *wqe;
+	int i;
+	int err;
+
+	if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
+		err = skb_linearize(skb);
+		if (err)
+			return err;
+	}
+
+	q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+	spin_lock_bh(&q->lock);
+	elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+	if (!elem_info) {
+		/* queue is full */
+		err = -EAGAIN;
+		goto unlock;
+	}
+	elem_info->u.sdq.skb = skb;
+
+	wqe = elem_info->elem;
+	mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
+	mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
+	mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
+
+	err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+				     skb_headlen(skb), DMA_TO_DEVICE);
+	if (err)
+		goto unlock;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
+					     skb_frag_address(frag),
+					     skb_frag_size(frag),
+					     DMA_TO_DEVICE);
+		if (err)
+			goto unmap_frags;
+	}
+
+	/* Set unused sq entries byte count to zero. */
+	for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+		mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+
+	/* Everything is set up, ring producer doorbell to get HW going */
+	q->producer_counter++;
+	mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+	goto unlock;
+
+unmap_frags:
+	for (; i >= 0; i--)
+		mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+unlock:
+	spin_unlock_bh(&q->lock);
+	return err;
+}
+
+static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
+			      u32 in_mod, bool out_mbox_direct,
+			      char *in_mbox, size_t in_mbox_size,
+			      char *out_mbox, size_t out_mbox_size,
+			      u8 *p_status)
+{
+	struct mlxsw_pci *mlxsw_pci = bus_priv;
+	dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
+	dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
+	bool evreq = mlxsw_pci->cmd.nopoll;
+	unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
+	bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+	int err;
+
+	*p_status = MLXSW_CMD_STATUS_OK;
+
+	err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
+	if (err)
+		return err;
+
+	if (in_mbox)
+		memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
+	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
+	mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
+
+	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
+	mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
+
+	mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
+	mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
+
+	*p_wait_done = false;
+
+	wmb(); /* all needs to be written before we write control register */
+	mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
+			  MLXSW_PCI_CIR_CTRL_GO_BIT |
+			  (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
+			  (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
+			  opcode);
+
+	if (!evreq) {
+		unsigned long end;
+
+		end = jiffies + timeout;
+		do {
+			u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+
+			if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+				*p_wait_done = true;
+				*p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+				break;
+			}
+			cond_resched();
+		} while (time_before(jiffies, end));
+	} else {
+		wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
+		*p_status = mlxsw_pci->cmd.comp.status;
+	}
+
+	err = 0;
+	if (*p_wait_done) {
+		if (*p_status)
+			err = -EIO;
+	} else {
+		err = -ETIMEDOUT;
+	}
+
+	if (!err && out_mbox && out_mbox_direct) {
+		/* Some commands don't use output param as address to mailbox
+		 * but they store output directly into registers. In that case,
+		 * copy registers into mbox buffer.
+		 */
+		__be32 tmp;
+
+		if (!evreq) {
+			tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+							   CIR_OUT_PARAM_HI));
+			memcpy(out_mbox, &tmp, sizeof(tmp));
+			tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+							   CIR_OUT_PARAM_LO));
+			memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
+		}
+	} else if (!err && out_mbox)
+		memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
+
+	mutex_unlock(&mlxsw_pci->cmd.lock);
+
+	return err;
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+	.kind			= "pci",
+	.init			= mlxsw_pci_init,
+	.fini			= mlxsw_pci_fini,
+	.skb_transmit_busy	= mlxsw_pci_skb_transmit_busy,
+	.skb_transmit		= mlxsw_pci_skb_transmit,
+	.cmd_exec		= mlxsw_pci_cmd_exec,
+};
+
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+{
+	mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+	/* Current firware does not let us know when the reset is done.
+	 * So we just wait here for constant time and hope for the best.
+	 */
+	msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+	return 0;
+}
+
+static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct mlxsw_pci *mlxsw_pci;
+	int err;
+
+	mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
+	if (!mlxsw_pci)
+		return -ENOMEM;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "pci_enable_device failed\n");
+		goto err_pci_enable_device;
+	}
+
+	err = pci_request_regions(pdev, mlxsw_pci_driver_name);
+	if (err) {
+		dev_err(&pdev->dev, "pci_request_regions failed\n");
+		goto err_pci_request_regions;
+	}
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (!err) {
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (err) {
+			dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+			goto err_pci_set_dma_mask;
+		}
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+			goto err_pci_set_dma_mask;
+		}
+	}
+
+	if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
+		dev_err(&pdev->dev, "invalid PCI region size\n");
+		err = -EINVAL;
+		goto err_pci_resource_len_check;
+	}
+
+	mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
+				     pci_resource_len(pdev, 0));
+	if (!mlxsw_pci->hw_addr) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		err = -EIO;
+		goto err_ioremap;
+	}
+	pci_set_master(pdev);
+
+	mlxsw_pci->pdev = pdev;
+	pci_set_drvdata(pdev, mlxsw_pci);
+
+	err = mlxsw_pci_sw_reset(mlxsw_pci);
+	if (err) {
+		dev_err(&pdev->dev, "Software reset failed\n");
+		goto err_sw_reset;
+	}
+
+	err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
+	if (err) {
+		dev_err(&pdev->dev, "MSI-X init failed\n");
+		goto err_msix_init;
+	}
+
+	mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
+	mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
+	mlxsw_pci->bus_info.dev = &pdev->dev;
+
+	mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
+						mlxsw_pci_dbg_root);
+	if (!mlxsw_pci->dbg_dir) {
+		dev_err(&pdev->dev, "Failed to create debugfs dir\n");
+		err = -ENOMEM;
+		goto err_dbg_create_dir;
+	}
+
+	err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
+					     &mlxsw_pci_bus, mlxsw_pci);
+	if (err) {
+		dev_err(&pdev->dev, "cannot register bus device\n");
+		goto err_bus_device_register;
+	}
+
+	return 0;
+
+err_bus_device_register:
+	debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+err_dbg_create_dir:
+	pci_disable_msix(mlxsw_pci->pdev);
+err_msix_init:
+err_sw_reset:
+	iounmap(mlxsw_pci->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+	pci_release_regions(pdev);
+err_pci_request_regions:
+	pci_disable_device(pdev);
+err_pci_enable_device:
+	kfree(mlxsw_pci);
+	return err;
+}
+
+static void mlxsw_pci_remove(struct pci_dev *pdev)
+{
+	struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+	mlxsw_core_bus_device_unregister(mlxsw_pci->core);
+	debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+	pci_disable_msix(mlxsw_pci->pdev);
+	iounmap(mlxsw_pci->hw_addr);
+	pci_release_regions(mlxsw_pci->pdev);
+	pci_disable_device(mlxsw_pci->pdev);
+	kfree(mlxsw_pci);
+}
+
+static struct pci_driver mlxsw_pci_driver = {
+	.name		= mlxsw_pci_driver_name,
+	.id_table	= mlxsw_pci_id_table,
+	.probe		= mlxsw_pci_probe,
+	.remove		= mlxsw_pci_remove,
+};
+
+static int __init mlxsw_pci_module_init(void)
+{
+	int err;
+
+	mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
+	if (!mlxsw_pci_dbg_root)
+		return -ENOMEM;
+	err = pci_register_driver(&mlxsw_pci_driver);
+	if (err)
+		goto err_register_driver;
+	return 0;
+
+err_register_driver:
+	debugfs_remove_recursive(mlxsw_pci_dbg_root);
+	return err;
+}
+
+static void __exit mlxsw_pci_module_exit(void)
+{
+	pci_unregister_driver(&mlxsw_pci_driver);
+	debugfs_remove_recursive(mlxsw_pci_dbg_root);
+}
+
+module_init(mlxsw_pci_module_init);
+module_exit(mlxsw_pci_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
+MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
new file mode 100644
index 0000000..1ef9664
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
@@ -0,0 +1,227 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_PCI_H
+#define _MLXSW_PCI_H
+
+#include <linux/bitops.h>
+
+#include "item.h"
+
+#define PCI_DEVICE_ID_MELLANOX_SWITCHX2	0xc738
+#define MLXSW_PCI_BAR0_SIZE		(1024 * 1024) /* 1MB */
+#define MLXSW_PCI_PAGE_SIZE		4096
+
+#define MLXSW_PCI_CIR_BASE			0x71000
+#define MLXSW_PCI_CIR_IN_PARAM_HI		MLXSW_PCI_CIR_BASE
+#define MLXSW_PCI_CIR_IN_PARAM_LO		(MLXSW_PCI_CIR_BASE + 0x04)
+#define MLXSW_PCI_CIR_IN_MODIFIER		(MLXSW_PCI_CIR_BASE + 0x08)
+#define MLXSW_PCI_CIR_OUT_PARAM_HI		(MLXSW_PCI_CIR_BASE + 0x0C)
+#define MLXSW_PCI_CIR_OUT_PARAM_LO		(MLXSW_PCI_CIR_BASE + 0x10)
+#define MLXSW_PCI_CIR_TOKEN			(MLXSW_PCI_CIR_BASE + 0x14)
+#define MLXSW_PCI_CIR_CTRL			(MLXSW_PCI_CIR_BASE + 0x18)
+#define MLXSW_PCI_CIR_CTRL_GO_BIT		BIT(23)
+#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT		BIT(22)
+#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT	12
+#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT		24
+#define MLXSW_PCI_CIR_TIMEOUT_MSECS		1000
+
+#define MLXSW_PCI_SW_RESET			0xF0010
+#define MLXSW_PCI_SW_RESET_RST_BIT		BIT(0)
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS	5000
+
+#define MLXSW_PCI_DOORBELL_SDQ_OFFSET		0x000
+#define MLXSW_PCI_DOORBELL_RDQ_OFFSET		0x200
+#define MLXSW_PCI_DOORBELL_CQ_OFFSET		0x400
+#define MLXSW_PCI_DOORBELL_EQ_OFFSET		0x600
+#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET	0x800
+#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET	0xA00
+
+#define MLXSW_PCI_DOORBELL(offset, type_offset, num)	\
+	((offset) + (type_offset) + (num) * 4)
+
+#define MLXSW_PCI_RDQS_COUNT	24
+#define MLXSW_PCI_SDQS_COUNT	24
+#define MLXSW_PCI_CQS_COUNT	(MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT)
+#define MLXSW_PCI_EQS_COUNT	2
+#define MLXSW_PCI_EQ_ASYNC_NUM	0
+#define MLXSW_PCI_EQ_COMP_NUM	1
+
+#define MLXSW_PCI_AQ_PAGES	8
+#define MLXSW_PCI_AQ_SIZE	(MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
+#define MLXSW_PCI_WQE_SIZE	32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE	16 /* 16 bytes per element */
+#define MLXSW_PCI_EQE_SIZE	16 /* 16 bytes per element */
+#define MLXSW_PCI_WQE_COUNT	(MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
+#define MLXSW_PCI_CQE_COUNT	(MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
+#define MLXSW_PCI_EQE_COUNT	(MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
+#define MLXSW_PCI_EQE_UPDATE_COUNT	0x80
+
+#define MLXSW_PCI_WQE_SG_ENTRIES	3
+#define MLXSW_PCI_WQE_TYPE_ETHERNET	0xA
+
+/* pci_wqe_c
+ * If set it indicates that a completion should be reported upon
+ * execution of this descriptor.
+ */
+MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1);
+
+/* pci_wqe_lp
+ * Local Processing, set if packet should be processed by the local
+ * switch hardware:
+ * For Ethernet EMAD (Direct Route and non Direct Route) -
+ * must be set if packet destination is local device
+ * For InfiniBand CTL - must be set if packet destination is local device
+ * Otherwise it must be clear
+ * Local Process packets must not exceed the size of 2K (including payload
+ * and headers).
+ */
+MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
+
+/* pci_wqe_type
+ * Packet type.
+ */
+MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+
+/* pci_wqe_byte_count
+ * Size of i-th scatter/gather entry, 0 if entry is unused.
+ */
+MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
+
+/* pci_wqe_address
+ * Physical address of i-th scatter/gather entry.
+ * Gather Entries must be 2Byte aligned.
+ */
+MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
+
+/* pci_cqe_lag
+ * Packet arrives from a port which is a LAG
+ */
+MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
+
+/* pci_cqe_system_port
+ * When lag=0: System port on which the packet was received
+ * When lag=1:
+ * bits [15:4] LAG ID on which the packet was received
+ * bits [3:0] sub_port on which the packet was received
+ */
+MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
+
+/* pci_cqe_wqe_counter
+ * WQE count of the WQEs completed on the associated dqn
+ */
+MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
+
+/* pci_cqe_byte_count
+ * Byte count of received packets including additional two
+ * Reserved Bytes that are append to the end of the frame.
+ * Reserved for Send CQE.
+ */
+MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
+
+/* pci_cqe_trap_id
+ * Trap ID that captured the packet.
+ */
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
+
+/* pci_cqe_crc
+ * Length include CRC. Indicates the length field includes
+ * the packet's CRC.
+ */
+MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
+
+/* pci_cqe_e
+ * CQE with Error.
+ */
+MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
+
+/* pci_cqe_sr
+ * 1 - Send Queue
+ * 0 - Receive Queue
+ */
+MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
+
+/* pci_cqe_dqn
+ * Descriptor Queue (DQ) Number.
+ */
+MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
+
+/* pci_cqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_event_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
+#define MLXSW_PCI_EQE_EVENT_TYPE_COMP	0x00
+#define MLXSW_PCI_EQE_EVENT_TYPE_CMD	0x0A
+
+/* pci_eqe_event_sub_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
+
+/* pci_eqe_cqn
+ * Completion Queue that triggeret this EQE.
+ */
+MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
+
+/* pci_eqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_cmd_token
+ * Command completion event - token
+ */
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+
+/* pci_eqe_cmd_status
+ * Command completion event - status
+ */
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+
+/* pci_eqe_cmd_out_param_h
+ * Command completion event - output parameter - higher part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+
+/* pci_eqe_cmd_out_param_l
+ * Command completion event - output parameter - lower part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
new file mode 100644
index 0000000..726f543
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -0,0 +1,75 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/port.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_PORT_H
+#define _MLXSW_PORT_H
+
+#include <linux/types.h>
+
+#define MLXSW_PORT_MAX_MTU		10000
+
+#define MLXSW_PORT_DEFAULT_VID		1
+
+#define MLXSW_PORT_SWID_DISABLED_PORT	255
+#define MLXSW_PORT_SWID_ALL_SWIDS	254
+#define MLXSW_PORT_SWID_TYPE_ETH	2
+
+#define MLXSW_PORT_MID			0xd000
+
+#define MLXSW_PORT_MAX_PHY_PORTS	0x40
+#define MLXSW_PORT_MAX_PORTS		MLXSW_PORT_MAX_PHY_PORTS
+
+#define MLXSW_PORT_DEVID_BITS_OFFSET	10
+#define MLXSW_PORT_PHY_BITS_OFFSET	4
+#define MLXSW_PORT_PHY_BITS_MASK	(MLXSW_PORT_MAX_PHY_PORTS - 1)
+
+#define MLXSW_PORT_CPU_PORT		0x0
+
+#define MLXSW_PORT_DONT_CARE		(MLXSW_PORT_MAX_PORTS)
+
+enum mlxsw_port_admin_status {
+	MLXSW_PORT_ADMIN_STATUS_UP = 1,
+	MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
+	MLXSW_PORT_ADMIN_STATUS_UP_ONCE = 3,
+	MLXSW_PORT_ADMIN_STATUS_DISABLED = 4,
+};
+
+enum mlxsw_reg_pude_oper_status {
+	MLXSW_PORT_OPER_STATUS_UP = 1,
+	MLXSW_PORT_OPER_STATUS_DOWN = 2,
+	MLXSW_PORT_OPER_STATUS_FAILURE = 4,	/* Can be set to up again. */
+};
+
+#endif /* _MLXSW_PORT_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
new file mode 100644
index 0000000..096e1c1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -0,0 +1,1349 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/reg.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_REG_H
+#define _MLXSW_REG_H
+
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "item.h"
+#include "port.h"
+
+struct mlxsw_reg_info {
+	u16 id;
+	u16 len; /* In u8 */
+};
+
+#define MLXSW_REG(type) (&mlxsw_reg_##type)
+#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len
+#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len)
+
+/* SGCR - Switch General Configuration Register
+ * --------------------------------------------
+ * This register is used for configuration of the switch capabilities.
+ */
+#define MLXSW_REG_SGCR_ID 0x2000
+#define MLXSW_REG_SGCR_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sgcr = {
+	.id = MLXSW_REG_SGCR_ID,
+	.len = MLXSW_REG_SGCR_LEN,
+};
+
+/* reg_sgcr_llb
+ * Link Local Broadcast (Default=0)
+ * When set, all Link Local packets (224.0.0.X) will be treated as broadcast
+ * packets and ignore the IGMP snooping entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1);
+
+static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
+{
+	MLXSW_REG_ZERO(sgcr, payload);
+	mlxsw_reg_sgcr_llb_set(payload, !!llb);
+}
+
+/* SPAD - Switch Physical Address Register
+ * ---------------------------------------
+ * The SPAD register configures the switch physical MAC address.
+ */
+#define MLXSW_REG_SPAD_ID 0x2002
+#define MLXSW_REG_SPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_spad = {
+	.id = MLXSW_REG_SPAD_ID,
+	.len = MLXSW_REG_SPAD_LEN,
+};
+
+/* reg_spad_base_mac
+ * Base MAC address for the switch partitions.
+ * Per switch partition MAC address is equal to:
+ * base_mac + swid
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
+
+/* SMID - Switch Multicast ID
+ * --------------------------
+ * In multi-chip configuration, each device should maintain mapping between
+ * Multicast ID (MID) into a list of local ports. This mapping is used in all
+ * the devices other than the ingress device, and is implemented as part of the
+ * FDB. The MID record maps from a MID, which is a unique identi- fier of the
+ * multicast group within the stacking domain, into a list of local ports into
+ * which the packet is replicated.
+ */
+#define MLXSW_REG_SMID_ID 0x2007
+#define MLXSW_REG_SMID_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_smid = {
+	.id = MLXSW_REG_SMID_ID,
+	.len = MLXSW_REG_SMID_LEN,
+};
+
+/* reg_smid_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
+
+/* reg_smid_mid
+ * Multicast identifier - global identifier that represents the multicast group
+ * across all devices
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
+
+/* reg_smid_port
+ * Local port memebership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
+
+/* reg_smid_port_mask
+ * Local port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
+{
+	MLXSW_REG_ZERO(smid, payload);
+	mlxsw_reg_smid_swid_set(payload, 0);
+	mlxsw_reg_smid_mid_set(payload, mid);
+	mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+	mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SSPR - Switch System Port Record Register
+ * -----------------------------------------
+ * Configures the system port to local port mapping.
+ */
+#define MLXSW_REG_SSPR_ID 0x2008
+#define MLXSW_REG_SSPR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_sspr = {
+	.id = MLXSW_REG_SSPR_ID,
+	.len = MLXSW_REG_SSPR_LEN,
+};
+
+/* reg_sspr_m
+ * Master - if set, then the record describes the master system port.
+ * This is needed in case a local port is mapped into several system ports
+ * (for multipathing). That number will be reported as the source system
+ * port when packets are forwarded to the CPU. Only one master port is allowed
+ * per local port.
+ *
+ * Note: Must be set for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1);
+
+/* reg_sspr_local_port
+ * Local port number.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, local_port, 0x00, 16, 8);
+
+/* reg_sspr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ *
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8);
+
+/* reg_sspr_system_port
+ * Unique identifier within the stacking domain that represents all the ports
+ * that are available in the system (external ports).
+ *
+ * Currently, only single-ASIC configurations are supported, so we default to
+ * 1:1 mapping between system ports and local ports.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sspr, system_port, 0x04, 0, 16);
+
+static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
+{
+	MLXSW_REG_ZERO(sspr, payload);
+	mlxsw_reg_sspr_m_set(payload, 1);
+	mlxsw_reg_sspr_local_port_set(payload, local_port);
+	mlxsw_reg_sspr_sub_port_set(payload, 0);
+	mlxsw_reg_sspr_system_port_set(payload, local_port);
+}
+
+/* SPMS - Switch Port MSTP/RSTP State Register
+ * -------------------------------------------
+ * Configures the spanning tree state of a physical port.
+ */
+#define MLXSW_REG_SPMS_ID 0x200d
+#define MLXSW_REG_SPMS_LEN 0x404
+
+static const struct mlxsw_reg_info mlxsw_reg_spms = {
+	.id = MLXSW_REG_SPMS_ID,
+	.len = MLXSW_REG_SPMS_LEN,
+};
+
+/* reg_spms_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_spms_state {
+	MLXSW_REG_SPMS_STATE_NO_CHANGE,
+	MLXSW_REG_SPMS_STATE_DISCARDING,
+	MLXSW_REG_SPMS_STATE_LEARNING,
+	MLXSW_REG_SPMS_STATE_FORWARDING,
+};
+
+/* reg_spms_state
+ * Spanning tree state of each VLAN ID (VID) of the local port.
+ * 0 - Do not change spanning tree state (used only when writing).
+ * 1 - Discarding. No learning or forwarding to/from this port (default).
+ * 2 - Learning. Port is learning, but not forwarding.
+ * 3 - Forwarding. Port is learning and forwarding.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
+
+static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
+				       enum mlxsw_reg_spms_state state)
+{
+	MLXSW_REG_ZERO(spms, payload);
+	mlxsw_reg_spms_local_port_set(payload, local_port);
+	mlxsw_reg_spms_state_set(payload, vid, state);
+}
+
+/* SFGC - Switch Flooding Group Configuration
+ * ------------------------------------------
+ * The following register controls the association of flooding tables and MIDs
+ * to packet types used for flooding.
+ */
+#define MLXSW_REG_SFGC_ID  0x2011
+#define MLXSW_REG_SFGC_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
+	.id = MLXSW_REG_SFGC_ID,
+	.len = MLXSW_REG_SFGC_LEN,
+};
+
+enum mlxsw_reg_sfgc_type {
+	MLXSW_REG_SFGC_TYPE_BROADCAST = 0,
+	MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1,
+	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2,
+	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3,
+	MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5,
+	MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6,
+	MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7,
+};
+
+/* reg_sfgc_type
+ * The traffic type to reach the flooding table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
+
+enum mlxsw_reg_sfgc_bridge_type {
+	MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
+	MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+};
+
+/* reg_sfgc_bridge_type
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports 802.1Q mode.
+ */
+MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3);
+
+enum mlxsw_flood_table_type {
+	MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
+	MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
+	MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
+	MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3,
+	MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
+};
+
+/* reg_sfgc_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ *
+ * Note: FID offset and FID types are not supported in SwitchX-2.
+ */
+MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
+
+/* reg_sfgc_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
+
+/* reg_sfgc_mid
+ * The multicast ID for the swid. Not supported for Spectrum
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
+
+/* reg_sfgc_counter_set_type
+ * Counter Set Type for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
+
+/* reg_sfgc_counter_index
+ * Counter Index for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+
+static inline void
+mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
+		    enum mlxsw_reg_sfgc_bridge_type bridge_type,
+		    enum mlxsw_flood_table_type table_type,
+		    unsigned int flood_table)
+{
+	MLXSW_REG_ZERO(sfgc, payload);
+	mlxsw_reg_sfgc_type_set(payload, type);
+	mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
+	mlxsw_reg_sfgc_table_type_set(payload, table_type);
+	mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
+	mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
+}
+
+/* SFTR - Switch Flooding Table Register
+ * -------------------------------------
+ * The switch flooding table is used for flooding packet replication. The table
+ * defines a bit mask of ports for packet replication.
+ */
+#define MLXSW_REG_SFTR_ID 0x2012
+#define MLXSW_REG_SFTR_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_sftr = {
+	.id = MLXSW_REG_SFTR_ID,
+	.len = MLXSW_REG_SFTR_LEN,
+};
+
+/* reg_sftr_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
+
+/* reg_sftr_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
+
+/* reg_sftr_index
+ * Index. Used as an index into the Flooding Table in case the table is
+ * configured to use VID / FID or FID Offset.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
+
+/* reg_sftr_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
+
+/* reg_sftr_range
+ * Range of entries to update
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
+
+/* reg_sftr_port
+ * Local port membership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
+
+/* reg_sftr_cpu_port_mask
+ * CPU port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_sftr_pack(char *payload,
+				       unsigned int flood_table,
+				       unsigned int index,
+				       enum mlxsw_flood_table_type table_type,
+				       unsigned int range)
+{
+	MLXSW_REG_ZERO(sftr, payload);
+	mlxsw_reg_sftr_swid_set(payload, 0);
+	mlxsw_reg_sftr_flood_table_set(payload, flood_table);
+	mlxsw_reg_sftr_index_set(payload, index);
+	mlxsw_reg_sftr_table_type_set(payload, table_type);
+	mlxsw_reg_sftr_range_set(payload, range);
+	mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+	mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SPMLR - Switch Port MAC Learning Register
+ * -----------------------------------------
+ * Controls the Switch MAC learning policy per port.
+ */
+#define MLXSW_REG_SPMLR_ID 0x2018
+#define MLXSW_REG_SPMLR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_spmlr = {
+	.id = MLXSW_REG_SPMLR_ID,
+	.len = MLXSW_REG_SPMLR_LEN,
+};
+
+/* reg_spmlr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
+
+/* reg_spmlr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8);
+
+enum mlxsw_reg_spmlr_learn_mode {
+	MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0,
+	MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2,
+	MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3,
+};
+
+/* reg_spmlr_learn_mode
+ * Learning mode on the port.
+ * 0 - Learning disabled.
+ * 2 - Learning enabled.
+ * 3 - Security mode.
+ *
+ * In security mode the switch does not learn MACs on the port, but uses the
+ * SMAC to see if it exists on another ingress port. If so, the packet is
+ * classified as a bad packet and is discarded unless the software registers
+ * to receive port security error packets usign HPKT.
+ */
+MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
+
+static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
+					enum mlxsw_reg_spmlr_learn_mode mode)
+{
+	MLXSW_REG_ZERO(spmlr, payload);
+	mlxsw_reg_spmlr_local_port_set(payload, local_port);
+	mlxsw_reg_spmlr_sub_port_set(payload, 0);
+	mlxsw_reg_spmlr_learn_mode_set(payload, mode);
+}
+
+/* PMLP - Ports Module to Local Port Register
+ * ------------------------------------------
+ * Configures the assignment of modules to local ports.
+ */
+#define MLXSW_REG_PMLP_ID 0x5002
+#define MLXSW_REG_PMLP_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_pmlp = {
+	.id = MLXSW_REG_PMLP_ID,
+	.len = MLXSW_REG_PMLP_LEN,
+};
+
+/* reg_pmlp_rxtx
+ * 0 - Tx value is used for both Tx and Rx.
+ * 1 - Rx value is taken from a separte field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
+
+/* reg_pmlp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
+
+/* reg_pmlp_width
+ * 0 - Unmap local port.
+ * 1 - Lane 0 is used.
+ * 2 - Lanes 0 and 1 are used.
+ * 4 - Lanes 0, 1, 2 and 3 are used.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
+
+/* reg_pmlp_module
+ * Module number.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false);
+
+/* reg_pmlp_tx_lane
+ * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false);
+
+/* reg_pmlp_rx_lane
+ * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
+ * equal to Tx lane.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false);
+
+static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
+{
+	MLXSW_REG_ZERO(pmlp, payload);
+	mlxsw_reg_pmlp_local_port_set(payload, local_port);
+}
+
+/* PMTU - Port MTU Register
+ * ------------------------
+ * Configures and reports the port MTU.
+ */
+#define MLXSW_REG_PMTU_ID 0x5003
+#define MLXSW_REG_PMTU_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_pmtu = {
+	.id = MLXSW_REG_PMTU_ID,
+	.len = MLXSW_REG_PMTU_LEN,
+};
+
+/* reg_pmtu_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
+
+/* reg_pmtu_max_mtu
+ * Maximum MTU.
+ * When port type (e.g. Ethernet) is configured, the relevant MTU is
+ * reported, otherwise the minimum between the max_mtu of the different
+ * types is reported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16);
+
+/* reg_pmtu_admin_mtu
+ * MTU value to set port to. Must be smaller or equal to max_mtu.
+ * Note: If port type is Infiniband, then port must be disabled, when its
+ * MTU is set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
+
+/* reg_pmtu_oper_mtu
+ * The actual MTU configured on the port. Packets exceeding this size
+ * will be dropped.
+ * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband
+ * oper_mtu might be smaller than admin_mtu.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
+
+static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
+				       u16 new_mtu)
+{
+	MLXSW_REG_ZERO(pmtu, payload);
+	mlxsw_reg_pmtu_local_port_set(payload, local_port);
+	mlxsw_reg_pmtu_max_mtu_set(payload, 0);
+	mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu);
+	mlxsw_reg_pmtu_oper_mtu_set(payload, 0);
+}
+
+/* PTYS - Port Type and Speed Register
+ * -----------------------------------
+ * Configures and reports the port speed type.
+ *
+ * Note: When set while the link is up, the changes will not take effect
+ * until the port transitions from down to up state.
+ */
+#define MLXSW_REG_PTYS_ID 0x5004
+#define MLXSW_REG_PTYS_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_ptys = {
+	.id = MLXSW_REG_PTYS_ID,
+	.len = MLXSW_REG_PTYS_LEN,
+};
+
+/* reg_ptys_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
+
+#define MLXSW_REG_PTYS_PROTO_MASK_ETH	BIT(2)
+
+/* reg_ptys_proto_mask
+ * Protocol mask. Indicates which protocol is used.
+ * 0 - Infiniband.
+ * 1 - Fibre Channel.
+ * 2 - Ethernet.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+
+#define MLXSW_REG_PTYS_ETH_SPEED_SGMII			BIT(0)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX		BIT(1)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4		BIT(2)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4		BIT(3)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR		BIT(4)
+#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2		BIT(5)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4		BIT(6)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4		BIT(7)
+#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4		BIT(8)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR		BIT(12)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR		BIT(13)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR		BIT(14)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4		BIT(15)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4	BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4		BIT(19)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4		BIT(20)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4		BIT(21)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4		BIT(22)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4	BIT(23)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX		BIT(24)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T		BIT(25)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T		BIT(26)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR		BIT(27)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR		BIT(28)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR		BIT(29)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2		BIT(30)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2		BIT(31)
+
+/* reg_ptys_eth_proto_cap
+ * Ethernet port supported speeds and protocols.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
+
+/* reg_ptys_eth_proto_admin
+ * Speed and protocol to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
+
+/* reg_ptys_eth_proto_oper
+ * The current speed and protocol configured for the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+
+static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
+				       u32 proto_admin)
+{
+	MLXSW_REG_ZERO(ptys, payload);
+	mlxsw_reg_ptys_local_port_set(payload, local_port);
+	mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
+	mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
+}
+
+static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
+					 u32 *p_eth_proto_adm,
+					 u32 *p_eth_proto_oper)
+{
+	if (p_eth_proto_cap)
+		*p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload);
+	if (p_eth_proto_adm)
+		*p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload);
+	if (p_eth_proto_oper)
+		*p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload);
+}
+
+/* PPAD - Port Physical Address Register
+ * -------------------------------------
+ * The PPAD register configures the per port physical MAC address.
+ */
+#define MLXSW_REG_PPAD_ID 0x5005
+#define MLXSW_REG_PPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_ppad = {
+	.id = MLXSW_REG_PPAD_ID,
+	.len = MLXSW_REG_PPAD_LEN,
+};
+
+/* reg_ppad_single_base_mac
+ * 0: base_mac, local port should be 0 and mac[7:0] is
+ * reserved. HW will set incremental
+ * 1: single_mac - mac of the local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
+
+/* reg_ppad_local_port
+ * port number, if single_base_mac = 0 then local_port is reserved
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
+
+/* reg_ppad_mac
+ * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
+ * If single_base_mac = 1 - the per port MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
+
+static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
+				       u8 local_port)
+{
+	MLXSW_REG_ZERO(ppad, payload);
+	mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
+	mlxsw_reg_ppad_local_port_set(payload, local_port);
+}
+
+/* PAOS - Ports Administrative and Operational Status Register
+ * -----------------------------------------------------------
+ * Configures and retrieves per port administrative and operational status.
+ */
+#define MLXSW_REG_PAOS_ID 0x5006
+#define MLXSW_REG_PAOS_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_paos = {
+	.id = MLXSW_REG_PAOS_ID,
+	.len = MLXSW_REG_PAOS_LEN,
+};
+
+/* reg_paos_swid
+ * Switch partition ID with which to associate the port.
+ * Note: while external ports uses unique local port numbers (and thus swid is
+ * redundant), router ports use the same local port number where swid is the
+ * only indication for the relevant port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
+
+/* reg_paos_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
+
+/* reg_paos_admin_status
+ * Port administrative state (the desired state of the port):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ *     into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4);
+
+/* reg_paos_oper_status
+ * Port operational state (the current state):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ *     port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4);
+
+/* reg_paos_ase
+ * Admin state update enabled.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1);
+
+/* reg_paos_ee
+ * Event update enable. If this bit is set, event generation will be
+ * updated based on the e field.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
+
+/* reg_paos_e
+ * Event generation on operational state change:
+ * 0 - Do not generate event.
+ * 1 - Generate Event.
+ * 2 - Generate Single Event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
+				       enum mlxsw_port_admin_status status)
+{
+	MLXSW_REG_ZERO(paos, payload);
+	mlxsw_reg_paos_swid_set(payload, 0);
+	mlxsw_reg_paos_local_port_set(payload, local_port);
+	mlxsw_reg_paos_admin_status_set(payload, status);
+	mlxsw_reg_paos_oper_status_set(payload, 0);
+	mlxsw_reg_paos_ase_set(payload, 1);
+	mlxsw_reg_paos_ee_set(payload, 1);
+	mlxsw_reg_paos_e_set(payload, 1);
+}
+
+/* PPCNT - Ports Performance Counters Register
+ * -------------------------------------------
+ * The PPCNT register retrieves per port performance counters.
+ */
+#define MLXSW_REG_PPCNT_ID 0x5008
+#define MLXSW_REG_PPCNT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_ppcnt = {
+	.id = MLXSW_REG_PPCNT_ID,
+	.len = MLXSW_REG_PPCNT_LEN,
+};
+
+/* reg_ppcnt_swid
+ * For HCA: must be always 0.
+ * Switch partition ID to associate port with.
+ * Switch partitions are numbered from 0 to 7 inclusively.
+ * Switch partition 254 indicates stacking ports.
+ * Switch partition 255 indicates all switch partitions.
+ * Only valid on Set() operation with local_port=255.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
+
+/* reg_ppcnt_local_port
+ * Local port number.
+ * 255 indicates all ports on the device, and is only allowed
+ * for Set() operation.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
+
+/* reg_ppcnt_pnat
+ * Port number access type:
+ * 0 - Local port number
+ * 1 - IB port number
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
+
+/* reg_ppcnt_grp
+ * Performance counter group.
+ * Group 63 indicates all groups. Only valid on Set() operation with
+ * clr bit set.
+ * 0x0: IEEE 802.3 Counters
+ * 0x1: RFC 2863 Counters
+ * 0x2: RFC 2819 Counters
+ * 0x3: RFC 3635 Counters
+ * 0x5: Ethernet Extended Counters
+ * 0x8: Link Level Retransmission Counters
+ * 0x10: Per Priority Counters
+ * 0x11: Per Traffic Class Counters
+ * 0x12: Physical Layer Counters
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
+
+/* reg_ppcnt_clr
+ * Clear counters. Setting the clr bit will reset the counter value
+ * for all counters in the counter group. This bit can be set
+ * for both Set() and Get() operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
+
+/* reg_ppcnt_prio_tc
+ * Priority for counter set that support per priority, valid values: 0-7.
+ * Traffic class for counter set that support per traffic class,
+ * valid values: 0- cap_max_tclass-1 .
+ * For HCA: cap_max_tclass is always 8.
+ * Otherwise must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
+
+/* reg_ppcnt_a_frames_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
+	     0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_a_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
+	     0x08 + 0x08, 0, 64);
+
+/* reg_ppcnt_a_frame_check_sequence_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
+	     0x08 + 0x10, 0, 64);
+
+/* reg_ppcnt_a_alignment_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
+	     0x08 + 0x18, 0, 64);
+
+/* reg_ppcnt_a_octets_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
+	     0x08 + 0x20, 0, 64);
+
+/* reg_ppcnt_a_octets_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
+	     0x08 + 0x28, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
+	     0x08 + 0x30, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
+	     0x08 + 0x38, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
+	     0x08 + 0x40, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
+	     0x08 + 0x48, 0, 64);
+
+/* reg_ppcnt_a_in_range_length_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
+	     0x08 + 0x50, 0, 64);
+
+/* reg_ppcnt_a_out_of_range_length_field
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
+	     0x08 + 0x58, 0, 64);
+
+/* reg_ppcnt_a_frame_too_long_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
+	     0x08 + 0x60, 0, 64);
+
+/* reg_ppcnt_a_symbol_error_during_carrier
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
+	     0x08 + 0x68, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
+	     0x08 + 0x70, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
+	     0x08 + 0x78, 0, 64);
+
+/* reg_ppcnt_a_unsupported_opcodes_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
+	     0x08 + 0x80, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
+	     0x08 + 0x88, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
+	     0x08 + 0x90, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
+{
+	MLXSW_REG_ZERO(ppcnt, payload);
+	mlxsw_reg_ppcnt_swid_set(payload, 0);
+	mlxsw_reg_ppcnt_local_port_set(payload, local_port);
+	mlxsw_reg_ppcnt_pnat_set(payload, 0);
+	mlxsw_reg_ppcnt_grp_set(payload, 0);
+	mlxsw_reg_ppcnt_clr_set(payload, 0);
+	mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
+}
+
+/* PSPA - Port Switch Partition Allocation
+ * ---------------------------------------
+ * Controls the association of a port with a switch partition and enables
+ * configuring ports as stacking ports.
+ */
+#define MLXSW_REG_PSPA_ID 0x500d
+#define MLXSW_REG_PSPA_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_pspa = {
+	.id = MLXSW_REG_PSPA_ID,
+	.len = MLXSW_REG_PSPA_LEN,
+};
+
+/* reg_pspa_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
+
+/* reg_pspa_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
+
+/* reg_pspa_sub_port
+ * Virtual port within the local port. Set to 0 when virtual ports are
+ * disabled on the local port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
+
+static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
+{
+	MLXSW_REG_ZERO(pspa, payload);
+	mlxsw_reg_pspa_swid_set(payload, swid);
+	mlxsw_reg_pspa_local_port_set(payload, local_port);
+	mlxsw_reg_pspa_sub_port_set(payload, 0);
+}
+
+/* HTGT - Host Trap Group Table
+ * ----------------------------
+ * Configures the properties for forwarding to CPU.
+ */
+#define MLXSW_REG_HTGT_ID 0x7002
+#define MLXSW_REG_HTGT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_htgt = {
+	.id = MLXSW_REG_HTGT_ID,
+	.len = MLXSW_REG_HTGT_LEN,
+};
+
+/* reg_htgt_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
+
+#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0	/* For locally attached CPU */
+
+/* reg_htgt_type
+ * CPU path type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
+
+#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD	0x0
+#define MLXSW_REG_HTGT_TRAP_GROUP_RX	0x1
+
+/* reg_htgt_trap_group
+ * Trap group number. User defined number specifying which trap groups
+ * should be forwarded to the CPU. The mapping between trap IDs and trap
+ * groups is configured using HPKT register.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8);
+
+enum {
+	MLXSW_REG_HTGT_POLICER_DISABLE,
+	MLXSW_REG_HTGT_POLICER_ENABLE,
+};
+
+/* reg_htgt_pide
+ * Enable policer ID specified using 'pid' field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1);
+
+/* reg_htgt_pid
+ * Policer ID for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8);
+
+#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0
+
+/* reg_htgt_mirror_action
+ * Mirror action to use.
+ * 0 - Trap to CPU.
+ * 1 - Trap to CPU and mirror to a mirroring agent.
+ * 2 - Mirror to a mirroring agent and do not trap to CPU.
+ * Access: RW
+ *
+ * Note: Mirroring to a mirroring agent is only supported in Spectrum.
+ */
+MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2);
+
+/* reg_htgt_mirroring_agent
+ * Mirroring agent.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
+
+/* reg_htgt_priority
+ * Trap group priority.
+ * In case a packet matches multiple classification rules, the packet will
+ * only be trapped once, based on the trap ID associated with the group (via
+ * register HPKT) with the highest priority.
+ * Supported values are 0-7, with 7 represnting the highest priority.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field is ignored and the priority value is replaced
+ * by the 'trap_group' field.
+ */
+MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4);
+
+/* reg_htgt_local_path_cpu_tclass
+ * CPU ingress traffic class for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
+
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD	0x15
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX	0x14
+
+/* reg_htgt_local_path_rdq
+ * Receive descriptor queue (RDQ) to use for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
+
+static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group)
+{
+	u8 swid, rdq;
+
+	MLXSW_REG_ZERO(htgt, payload);
+	if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) {
+		swid = MLXSW_PORT_SWID_ALL_SWIDS;
+		rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
+	} else {
+		swid = 0;
+		rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
+	}
+	mlxsw_reg_htgt_swid_set(payload, swid);
+	mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
+	mlxsw_reg_htgt_trap_group_set(payload, trap_group);
+	mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
+	mlxsw_reg_htgt_pid_set(payload, 0);
+	mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
+	mlxsw_reg_htgt_mirroring_agent_set(payload, 0);
+	mlxsw_reg_htgt_priority_set(payload, 0);
+	mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7);
+	mlxsw_reg_htgt_local_path_rdq_set(payload, rdq);
+}
+
+/* HPKT - Host Packet Trap
+ * -----------------------
+ * Configures trap IDs inside trap groups.
+ */
+#define MLXSW_REG_HPKT_ID 0x7003
+#define MLXSW_REG_HPKT_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_hpkt = {
+	.id = MLXSW_REG_HPKT_ID,
+	.len = MLXSW_REG_HPKT_LEN,
+};
+
+enum {
+	MLXSW_REG_HPKT_ACK_NOT_REQUIRED,
+	MLXSW_REG_HPKT_ACK_REQUIRED,
+};
+
+/* reg_hpkt_ack
+ * Require acknowledgements from the host for events.
+ * If set, then the device will wait for the event it sent to be acknowledged
+ * by the host. This option is only relevant for event trap IDs.
+ * Access: RW
+ *
+ * Note: Currently not supported by firmware.
+ */
+MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1);
+
+enum mlxsw_reg_hpkt_action {
+	MLXSW_REG_HPKT_ACTION_FORWARD,
+	MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+	MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU,
+	MLXSW_REG_HPKT_ACTION_DISCARD,
+	MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
+	MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
+};
+
+/* reg_hpkt_action
+ * Action to perform on packet when trapped.
+ * 0 - No action. Forward to CPU based on switching rules.
+ * 1 - Trap to CPU (CPU receives sole copy).
+ * 2 - Mirror to CPU (CPU receives a replica of the packet).
+ * 3 - Discard.
+ * 4 - Soft discard (allow other traps to act on the packet).
+ * 5 - Trap and soft discard (allow other traps to overwrite this trap).
+ * Access: RW
+ *
+ * Note: Must be set to 0 (forward) for event trap IDs, as they are already
+ * addressed to the CPU.
+ */
+MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3);
+
+/* reg_hpkt_trap_group
+ * Trap group to associate the trap with.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6);
+
+/* reg_hpkt_trap_id
+ * Trap ID.
+ * Access: Index
+ *
+ * Note: A trap ID can only be associated with a single trap group. The device
+ * will associate the trap ID with the last trap group configured.
+ */
+MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9);
+
+enum {
+	MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT,
+	MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER,
+	MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER,
+};
+
+/* reg_hpkt_ctrl
+ * Configure dedicated buffer resources for control packets.
+ * 0 - Keep factory defaults.
+ * 1 - Do not use control buffer for this trap ID.
+ * 2 - Use control buffer for this trap ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
+
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action,
+				       u8 trap_group, u16 trap_id)
+{
+	MLXSW_REG_ZERO(hpkt, payload);
+	mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
+	mlxsw_reg_hpkt_action_set(payload, action);
+	mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
+	mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
+	mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
+}
+
+static inline const char *mlxsw_reg_id_str(u16 reg_id)
+{
+	switch (reg_id) {
+	case MLXSW_REG_SGCR_ID:
+		return "SGCR";
+	case MLXSW_REG_SPAD_ID:
+		return "SPAD";
+	case MLXSW_REG_SMID_ID:
+		return "SMID";
+	case MLXSW_REG_SSPR_ID:
+		return "SSPR";
+	case MLXSW_REG_SPMS_ID:
+		return "SPMS";
+	case MLXSW_REG_SFGC_ID:
+		return "SFGC";
+	case MLXSW_REG_SFTR_ID:
+		return "SFTR";
+	case MLXSW_REG_SPMLR_ID:
+		return "SPMLR";
+	case MLXSW_REG_PMLP_ID:
+		return "PMLP";
+	case MLXSW_REG_PMTU_ID:
+		return "PMTU";
+	case MLXSW_REG_PTYS_ID:
+		return "PTYS";
+	case MLXSW_REG_PPAD_ID:
+		return "PPAD";
+	case MLXSW_REG_PAOS_ID:
+		return "PAOS";
+	case MLXSW_REG_PPCNT_ID:
+		return "PPCNT";
+	case MLXSW_REG_PSPA_ID:
+		return "PSPA";
+	case MLXSW_REG_HTGT_ID:
+		return "HTGT";
+	case MLXSW_REG_HPKT_ID:
+		return "HPKT";
+	default:
+		return "*UNKNOWN*";
+	}
+}
+
+/* PUDE - Port Up / Down Event
+ * ---------------------------
+ * Reports the operational state change of a port.
+ */
+#define MLXSW_REG_PUDE_LEN 0x10
+
+/* reg_pude_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
+
+/* reg_pude_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
+
+/* reg_pude_admin_status
+ * Port administrative state (the desired state).
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ *     into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4);
+
+/* reg_pude_oper_status
+ * Port operatioanl state.
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ *     port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
new file mode 100644
index 0000000..3e52ee9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -0,0 +1,1568 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
+static const char mlxsw_sx_driver_version[] = "1.0";
+
+struct mlxsw_sx_port;
+
+#define MLXSW_SW_HW_ID_LEN 6
+
+struct mlxsw_sx {
+	struct mlxsw_sx_port **ports;
+	struct mlxsw_core *core;
+	const struct mlxsw_bus_info *bus_info;
+	u8 hw_id[MLXSW_SW_HW_ID_LEN];
+};
+
+struct mlxsw_sx_port_pcpu_stats {
+	u64			rx_packets;
+	u64			rx_bytes;
+	u64			tx_packets;
+	u64			tx_bytes;
+	struct u64_stats_sync	syncp;
+	u32			tx_dropped;
+};
+
+struct mlxsw_sx_port {
+	struct net_device *dev;
+	struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
+	struct mlxsw_sx *mlxsw_sx;
+	u8 local_port;
+};
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ * The MSB is specified in the 'ctclass3' field.
+ * Range is 0-15, where 15 is the highest priority.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
+
+/* tx_hdr_swid
+ * Switch partition ID.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_ctclass3
+ * See field 'etclass'.
+ */
+MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
+
+/* tx_hdr_rdq
+ * RDQ for control packets sent to remote CPU.
+ * Must be set to 0x1F for EMADs, otherwise 0.
+ */
+MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
+
+/* tx_hdr_cpu_sig
+ * Signature control for packets going to CPU. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
+
+/* tx_hdr_sig
+ * Stacking protocl signature. Must be set to 0xE0E0.
+ */
+MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
+
+/* tx_hdr_stclass
+ * Stacking TClass.
+ */
+MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
+
+/* tx_hdr_emad
+ * EMAD bit. Must be set for EMADs.
+ */
+MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
+				     const struct mlxsw_tx_info *tx_info)
+{
+	char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+	bool is_emad = tx_info->is_emad;
+
+	memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+	/* We currently set default values for the egress tclass (QoS). */
+	mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
+	mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+	mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+	mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
+						  MLXSW_TXHDR_ETCLASS_5);
+	mlxsw_tx_hdr_swid_set(txhdr, 0);
+	mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+	mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
+	mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
+					      MLXSW_TXHDR_RDQ_OTHER);
+	mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
+	mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
+	mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
+	mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
+					       MLXSW_TXHDR_NOT_EMAD);
+	mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
+					  bool is_up)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char paos_pl[MLXSW_REG_PAOS_LEN];
+
+	mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
+			    is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+			    MLXSW_PORT_ADMIN_STATUS_DOWN);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
+					 bool *p_is_up)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char paos_pl[MLXSW_REG_PAOS_LEN];
+	u8 oper_status;
+	int err;
+
+	mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+	if (err)
+		return err;
+	oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+	*p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+	return 0;
+}
+
+static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char pmtu_pl[MLXSW_REG_PMTU_LEN];
+	int max_mtu;
+	int err;
+
+	mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+	if (err)
+		return err;
+	max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+	if (mtu > max_mtu)
+		return -EINVAL;
+
+	mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+	mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int
+mlxsw_sx_port_system_port_mapping_set(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char sspr_pl[MLXSW_REG_SSPR_LEN];
+
+	mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sx_port->local_port);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sspr), sspr_pl);
+}
+
+static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
+				      bool *p_usable)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char pmlp_pl[MLXSW_REG_PMLP_LEN];
+	int err;
+
+	mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
+	if (err)
+		return err;
+	*p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+	return 0;
+}
+
+static int mlxsw_sx_port_open(struct net_device *dev)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	int err;
+
+	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+	if (err)
+		return err;
+	netif_start_queue(dev);
+	return 0;
+}
+
+static int mlxsw_sx_port_stop(struct net_device *dev)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+}
+
+static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
+				      struct net_device *dev)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+	const struct mlxsw_tx_info tx_info = {
+		.local_port = mlxsw_sx_port->local_port,
+		.is_emad = false,
+	};
+	u64 len;
+	int err;
+
+	if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
+		return NETDEV_TX_BUSY;
+
+	if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+		struct sk_buff *skb_orig = skb;
+
+		skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+		if (!skb) {
+			this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+			dev_kfree_skb_any(skb_orig);
+			return NETDEV_TX_OK;
+		}
+	}
+	mlxsw_sx_txhdr_construct(skb, &tx_info);
+	len = skb->len;
+	/* Due to a race we might fail here because of a full queue. In that
+	 * unlikely case we simply drop the packet.
+	 */
+	err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
+
+	if (!err) {
+		pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+		u64_stats_update_begin(&pcpu_stats->syncp);
+		pcpu_stats->tx_packets++;
+		pcpu_stats->tx_bytes += len;
+		u64_stats_update_end(&pcpu_stats->syncp);
+	} else {
+		this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+		dev_kfree_skb_any(skb);
+	}
+	return NETDEV_TX_OK;
+}
+
+static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	int err;
+
+	err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
+	if (err)
+		return err;
+	dev->mtu = mtu;
+	return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sx_port_get_stats64(struct net_device *dev,
+			  struct rtnl_link_stats64 *stats)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx_port_pcpu_stats *p;
+	u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+	u32 tx_dropped = 0;
+	unsigned int start;
+	int i;
+
+	for_each_possible_cpu(i) {
+		p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
+		do {
+			start = u64_stats_fetch_begin_irq(&p->syncp);
+			rx_packets	= p->rx_packets;
+			rx_bytes	= p->rx_bytes;
+			tx_packets	= p->tx_packets;
+			tx_bytes	= p->tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+		stats->rx_packets	+= rx_packets;
+		stats->rx_bytes		+= rx_bytes;
+		stats->tx_packets	+= tx_packets;
+		stats->tx_bytes		+= tx_bytes;
+		/* tx_dropped is u32, updated without syncp protection. */
+		tx_dropped	+= p->tx_dropped;
+	}
+	stats->tx_dropped	= tx_dropped;
+	return stats;
+}
+
+static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
+	.ndo_open		= mlxsw_sx_port_open,
+	.ndo_stop		= mlxsw_sx_port_stop,
+	.ndo_start_xmit		= mlxsw_sx_port_xmit,
+	.ndo_change_mtu		= mlxsw_sx_port_change_mtu,
+	.ndo_get_stats64	= mlxsw_sx_port_get_stats64,
+};
+
+static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
+				      struct ethtool_drvinfo *drvinfo)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+	strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, mlxsw_sx_driver_version,
+		sizeof(drvinfo->version));
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		 "%d.%d.%d",
+		 mlxsw_sx->bus_info->fw_rev.major,
+		 mlxsw_sx->bus_info->fw_rev.minor,
+		 mlxsw_sx->bus_info->fw_rev.subminor);
+	strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
+		sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sx_port_hw_stats {
+	char str[ETH_GSTRING_LEN];
+	u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
+	{
+		.str = "a_frames_transmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+	},
+	{
+		.str = "a_frames_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+	},
+	{
+		.str = "a_frame_check_sequence_errors",
+		.getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+	},
+	{
+		.str = "a_alignment_errors",
+		.getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+	},
+	{
+		.str = "a_octets_transmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+	},
+	{
+		.str = "a_octets_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+	},
+	{
+		.str = "a_multicast_frames_xmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+	},
+	{
+		.str = "a_broadcast_frames_xmitted_ok",
+		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+	},
+	{
+		.str = "a_multicast_frames_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+	},
+	{
+		.str = "a_broadcast_frames_received_ok",
+		.getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+	},
+	{
+		.str = "a_in_range_length_errors",
+		.getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+	},
+	{
+		.str = "a_out_of_range_length_field",
+		.getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+	},
+	{
+		.str = "a_frame_too_long_errors",
+		.getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+	},
+	{
+		.str = "a_symbol_error_during_carrier",
+		.getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+	},
+	{
+		.str = "a_mac_control_frames_transmitted",
+		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+	},
+	{
+		.str = "a_mac_control_frames_received",
+		.getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+	},
+	{
+		.str = "a_unsupported_opcodes_received",
+		.getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+	},
+	{
+		.str = "a_pause_mac_ctrl_frames_received",
+		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+	},
+	{
+		.str = "a_pause_mac_ctrl_frames_xmitted",
+		.getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+	},
+};
+
+#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
+
+static void mlxsw_sx_port_get_strings(struct net_device *dev,
+				      u32 stringset, u8 *data)
+{
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
+			memcpy(p, mlxsw_sx_port_hw_stats[i].str,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static void mlxsw_sx_port_get_stats(struct net_device *dev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+	int i;
+	int err;
+
+	mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
+	for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
+		data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return MLXSW_SX_PORT_HW_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+struct mlxsw_sx_port_link_mode {
+	u32 mask;
+	u32 supported;
+	u32 advertised;
+	u32 speed;
+};
+
+static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+		.supported	= SUPPORTED_100baseT_Full,
+		.advertised	= ADVERTISED_100baseT_Full,
+		.speed		= 100,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+		.speed		= 100,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+				  MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+		.supported	= SUPPORTED_1000baseKX_Full,
+		.advertised	= ADVERTISED_1000baseKX_Full,
+		.speed		= 1000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+		.supported	= SUPPORTED_10000baseT_Full,
+		.advertised	= ADVERTISED_10000baseT_Full,
+		.speed		= 10000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+		.supported	= SUPPORTED_10000baseKX4_Full,
+		.advertised	= ADVERTISED_10000baseKX4_Full,
+		.speed		= 10000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+				  MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+		.supported	= SUPPORTED_10000baseKR_Full,
+		.advertised	= ADVERTISED_10000baseKR_Full,
+		.speed		= 10000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+		.supported	= SUPPORTED_20000baseKR2_Full,
+		.advertised	= ADVERTISED_20000baseKR2_Full,
+		.speed		= 20000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+		.supported	= SUPPORTED_40000baseCR4_Full,
+		.advertised	= ADVERTISED_40000baseCR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+		.supported	= SUPPORTED_40000baseKR4_Full,
+		.advertised	= ADVERTISED_40000baseKR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+		.supported	= SUPPORTED_40000baseSR4_Full,
+		.advertised	= ADVERTISED_40000baseSR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+		.supported	= SUPPORTED_40000baseLR4_Full,
+		.advertised	= ADVERTISED_40000baseLR4_Full,
+		.speed		= 40000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+				  MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+		.speed		= 25000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+				  MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+		.speed		= 50000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+		.supported	= SUPPORTED_56000baseKR4_Full,
+		.advertised	= ADVERTISED_56000baseKR4_Full,
+		.speed		= 56000,
+	},
+	{
+		.mask		= MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+				  MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+		.speed		= 100000,
+	},
+};
+
+#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
+
+static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+		return SUPPORTED_FIBRE;
+
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+		return SUPPORTED_Backplane;
+	return 0;
+}
+
+static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+	u32 modes = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+			modes |= mlxsw_sx_port_link_mode[i].supported;
+	}
+	return modes;
+}
+
+static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+	u32 modes = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+			modes |= mlxsw_sx_port_link_mode[i].advertised;
+	}
+	return modes;
+}
+
+static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+					    struct ethtool_cmd *cmd)
+{
+	u32 speed = SPEED_UNKNOWN;
+	u8 duplex = DUPLEX_UNKNOWN;
+	int i;
+
+	if (!carrier_ok)
+		goto out;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
+			speed = mlxsw_sx_port_link_mode[i].speed;
+			duplex = DUPLEX_FULL;
+			break;
+		}
+	}
+out:
+	ethtool_cmd_speed_set(cmd, speed);
+	cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
+{
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+		return PORT_FIBRE;
+
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+		return PORT_DA;
+
+	if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+			      MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+			      MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+		return PORT_NONE;
+
+	return PORT_OTHER;
+}
+
+static int mlxsw_sx_port_get_settings(struct net_device *dev,
+				      struct ethtool_cmd *cmd)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char ptys_pl[MLXSW_REG_PTYS_LEN];
+	u32 eth_proto_cap;
+	u32 eth_proto_admin;
+	u32 eth_proto_oper;
+	int err;
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+	if (err) {
+		netdev_err(dev, "Failed to get proto");
+		return err;
+	}
+	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+			      &eth_proto_admin, &eth_proto_oper);
+
+	cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
+			 mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
+			 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+	cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
+	mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
+					eth_proto_oper, cmd);
+
+	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+	cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
+	cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
+
+	cmd->transceiver = XCVR_INTERNAL;
+	return 0;
+}
+
+static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
+{
+	u32 ptys_proto = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (advertising & mlxsw_sx_port_link_mode[i].advertised)
+			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+	}
+	return ptys_proto;
+}
+
+static u32 mlxsw_sx_to_ptys_speed(u32 speed)
+{
+	u32 ptys_proto = 0;
+	int i;
+
+	for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+		if (speed == mlxsw_sx_port_link_mode[i].speed)
+			ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+	}
+	return ptys_proto;
+}
+
+static int mlxsw_sx_port_set_settings(struct net_device *dev,
+				      struct ethtool_cmd *cmd)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char ptys_pl[MLXSW_REG_PTYS_LEN];
+	u32 speed;
+	u32 eth_proto_new;
+	u32 eth_proto_cap;
+	u32 eth_proto_admin;
+	bool is_up;
+	int err;
+
+	speed = ethtool_cmd_speed(cmd);
+
+	eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+		mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
+		mlxsw_sx_to_ptys_speed(speed);
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+	if (err) {
+		netdev_err(dev, "Failed to get proto");
+		return err;
+	}
+	mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+	eth_proto_new = eth_proto_new & eth_proto_cap;
+	if (!eth_proto_new) {
+		netdev_err(dev, "Not supported proto admin requested");
+		return -EINVAL;
+	}
+	if (eth_proto_new == eth_proto_admin)
+		return 0;
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+	if (err) {
+		netdev_err(dev, "Failed to set proto admin");
+		return err;
+	}
+
+	err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
+	if (err) {
+		netdev_err(dev, "Failed to get oper status");
+		return err;
+	}
+	if (!is_up)
+		return 0;
+
+	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+	if (err) {
+		netdev_err(dev, "Failed to set admin status");
+		return err;
+	}
+
+	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+	if (err) {
+		netdev_err(dev, "Failed to set admin status");
+		return err;
+	}
+
+	return 0;
+}
+
+static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
+	.get_drvinfo		= mlxsw_sx_port_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+	.get_strings		= mlxsw_sx_port_get_strings,
+	.get_ethtool_stats	= mlxsw_sx_port_get_stats,
+	.get_sset_count		= mlxsw_sx_port_get_sset_count,
+	.get_settings		= mlxsw_sx_port_get_settings,
+	.set_settings		= mlxsw_sx_port_set_settings,
+};
+
+static int mlxsw_sx_port_attr_get(struct net_device *dev,
+				  struct switchdev_attr *attr)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+	switch (attr->id) {
+	case SWITCHDEV_ATTR_PORT_PARENT_ID:
+		attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
+		memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
+	.switchdev_port_attr_get	= mlxsw_sx_port_attr_get,
+};
+
+static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
+{
+	char spad_pl[MLXSW_REG_SPAD_LEN];
+	int err;
+
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
+	if (err)
+		return err;
+	mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
+	return 0;
+}
+
+static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	struct net_device *dev = mlxsw_sx_port->dev;
+	char ppad_pl[MLXSW_REG_PPAD_LEN];
+	int err;
+
+	mlxsw_reg_ppad_pack(ppad_pl, false, 0);
+	err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
+	if (err)
+		return err;
+	mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
+	/* The last byte value in base mac address is guaranteed
+	 * to be such it does not overflow when adding local_port
+	 * value.
+	 */
+	dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
+	return 0;
+}
+
+static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
+				       u16 vid, enum mlxsw_reg_spms_state state)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char *spms_pl;
+	int err;
+
+	spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+	if (!spms_pl)
+		return -ENOMEM;
+	mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
+	kfree(spms_pl);
+	return err;
+}
+
+static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
+				   u32 speed)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char ptys_pl[MLXSW_REG_PTYS_LEN];
+
+	mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+static int
+mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
+				    enum mlxsw_reg_spmlr_learn_mode mode)
+{
+	struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+	char spmlr_pl[MLXSW_REG_SPMLR_LEN];
+
+	mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
+}
+
+static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port;
+	struct net_device *dev;
+	bool usable;
+	int err;
+
+	dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
+	if (!dev)
+		return -ENOMEM;
+	mlxsw_sx_port = netdev_priv(dev);
+	mlxsw_sx_port->dev = dev;
+	mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
+	mlxsw_sx_port->local_port = local_port;
+
+	mlxsw_sx_port->pcpu_stats =
+		netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
+	if (!mlxsw_sx_port->pcpu_stats) {
+		err = -ENOMEM;
+		goto err_alloc_stats;
+	}
+
+	dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
+	dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
+	dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
+
+	err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
+			mlxsw_sx_port->local_port);
+		goto err_dev_addr_get;
+	}
+
+	netif_carrier_off(dev);
+
+	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+			 NETIF_F_VLAN_CHALLENGED;
+
+	/* Each packet needs to have a Tx header (metadata) on top all other
+	 * headers.
+	 */
+	dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+	err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_module_check;
+	}
+
+	if (!usable) {
+		dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+			mlxsw_sx_port->local_port);
+		goto port_not_usable;
+	}
+
+	err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_system_port_mapping_set;
+	}
+
+	err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_swid_set;
+	}
+
+	err = mlxsw_sx_port_speed_set(mlxsw_sx_port,
+				      MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_speed_set;
+	}
+
+	err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_mtu_set;
+	}
+
+	err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+	if (err)
+		goto err_port_admin_status_set;
+
+	err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
+					  MLXSW_PORT_DEFAULT_VID,
+					  MLXSW_REG_SPMS_STATE_FORWARDING);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_stp_state_set;
+	}
+
+	err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
+						  MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
+			mlxsw_sx_port->local_port);
+		goto err_port_mac_learning_mode_set;
+	}
+
+	err = register_netdev(dev);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
+			mlxsw_sx_port->local_port);
+		goto err_register_netdev;
+	}
+
+	mlxsw_sx->ports[local_port] = mlxsw_sx_port;
+	return 0;
+
+err_register_netdev:
+err_port_admin_status_set:
+err_port_mac_learning_mode_set:
+err_port_stp_state_set:
+err_port_mtu_set:
+err_port_speed_set:
+err_port_swid_set:
+err_port_system_port_mapping_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_get:
+	free_percpu(mlxsw_sx_port->pcpu_stats);
+err_alloc_stats:
+	free_netdev(dev);
+	return err;
+}
+
+static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+
+	if (!mlxsw_sx_port)
+		return;
+	unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
+	mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
+	free_percpu(mlxsw_sx_port->pcpu_stats);
+	free_netdev(mlxsw_sx_port->dev);
+}
+
+static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
+{
+	int i;
+
+	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+		mlxsw_sx_port_remove(mlxsw_sx, i);
+	kfree(mlxsw_sx->ports);
+}
+
+static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
+{
+	size_t alloc_size;
+	int i;
+	int err;
+
+	alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
+	mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
+	if (!mlxsw_sx->ports)
+		return -ENOMEM;
+
+	for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+		err = mlxsw_sx_port_create(mlxsw_sx, i);
+		if (err)
+			goto err_port_create;
+	}
+	return 0;
+
+err_port_create:
+	for (i--; i >= 1; i--)
+		mlxsw_sx_port_remove(mlxsw_sx, i);
+	kfree(mlxsw_sx->ports);
+	return err;
+}
+
+static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
+				     char *pude_pl, void *priv)
+{
+	struct mlxsw_sx *mlxsw_sx = priv;
+	struct mlxsw_sx_port *mlxsw_sx_port;
+	enum mlxsw_reg_pude_oper_status status;
+	u8 local_port;
+
+	local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+	mlxsw_sx_port = mlxsw_sx->ports[local_port];
+	if (!mlxsw_sx_port) {
+		dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+			 local_port);
+		return;
+	}
+
+	status = mlxsw_reg_pude_oper_status_get(pude_pl);
+	if (MLXSW_PORT_OPER_STATUS_UP == status) {
+		netdev_info(mlxsw_sx_port->dev, "link up\n");
+		netif_carrier_on(mlxsw_sx_port->dev);
+	} else {
+		netdev_info(mlxsw_sx_port->dev, "link down\n");
+		netif_carrier_off(mlxsw_sx_port->dev);
+	}
+}
+
+static struct mlxsw_event_listener mlxsw_sx_pude_event = {
+	.func = mlxsw_sx_pude_event_func,
+	.trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
+				   enum mlxsw_event_trap_id trap_id)
+{
+	struct mlxsw_event_listener *el;
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int err;
+
+	switch (trap_id) {
+	case MLXSW_TRAP_ID_PUDE:
+		el = &mlxsw_sx_pude_event;
+		break;
+	}
+	err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx);
+	if (err)
+		return err;
+
+	mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+			    MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+	if (err)
+		goto err_event_trap_set;
+
+	return 0;
+
+err_event_trap_set:
+	mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+	return err;
+}
+
+static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx,
+				      enum mlxsw_event_trap_id trap_id)
+{
+	struct mlxsw_event_listener *el;
+
+	switch (trap_id) {
+	case MLXSW_TRAP_ID_PUDE:
+		el = &mlxsw_sx_pude_event;
+		break;
+	}
+	mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+}
+
+static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
+				      void *priv)
+{
+	struct mlxsw_sx *mlxsw_sx = priv;
+	struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+	struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+
+	if (unlikely(!mlxsw_sx_port)) {
+		if (net_ratelimit())
+			dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
+				 local_port);
+		return;
+	}
+
+	skb->dev = mlxsw_sx_port->dev;
+
+	pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+	u64_stats_update_begin(&pcpu_stats->syncp);
+	pcpu_stats->rx_packets++;
+	pcpu_stats->rx_bytes += skb->len;
+	u64_stats_update_end(&pcpu_stats->syncp);
+
+	skb->protocol = eth_type_trans(skb, skb->dev);
+	netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = {
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_FDB_MC,
+	},
+	/* Traps for specific L2 packet types, not trapped as FDB MC */
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_STP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_LACP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_EAPOL,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_LLDP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_MMRP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_MVRP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_RPVST,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_DHCP,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+	},
+	{
+		.func = mlxsw_sx_rx_listener_func,
+		.local_port = MLXSW_PORT_DONT_CARE,
+		.trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+	},
+};
+
+static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
+{
+	char htgt_pl[MLXSW_REG_HTGT_LEN];
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int i;
+	int err;
+
+	mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+	if (err)
+		return err;
+
+	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+		err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
+						      &mlxsw_sx_rx_listener[i],
+						      mlxsw_sx);
+		if (err)
+			goto err_rx_listener_register;
+
+		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+				    MLXSW_REG_HTGT_TRAP_GROUP_RX,
+				    mlxsw_sx_rx_listener[i].trap_id);
+		err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+		if (err)
+			goto err_rx_trap_set;
+	}
+	return 0;
+
+err_rx_trap_set:
+	mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+					  &mlxsw_sx_rx_listener[i],
+					  mlxsw_sx);
+err_rx_listener_register:
+	for (i--; i >= 0; i--) {
+		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+				    MLXSW_REG_HTGT_TRAP_GROUP_RX,
+				    mlxsw_sx_rx_listener[i].trap_id);
+		mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+		mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+						  &mlxsw_sx_rx_listener[i],
+						  mlxsw_sx);
+	}
+	return err;
+}
+
+static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
+{
+	char hpkt_pl[MLXSW_REG_HPKT_LEN];
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+		mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+				    MLXSW_REG_HTGT_TRAP_GROUP_RX,
+				    mlxsw_sx_rx_listener[i].trap_id);
+		mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+		mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+						  &mlxsw_sx_rx_listener[i],
+						  mlxsw_sx);
+	}
+}
+
+static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
+{
+	char sfgc_pl[MLXSW_REG_SFGC_LEN];
+	char sgcr_pl[MLXSW_REG_SGCR_LEN];
+	char *smid_pl;
+	char *sftr_pl;
+	int err;
+
+	/* Due to FW bug, we must configure SMID. */
+	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
+	if (!smid_pl)
+		return -ENOMEM;
+	mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
+	kfree(smid_pl);
+	if (err)
+		return err;
+
+	/* Configure a flooding table, which includes only CPU port. */
+	sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+	if (!sftr_pl)
+		return -ENOMEM;
+	mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
+	kfree(sftr_pl);
+	if (err)
+		return err;
+
+	/* Flood different packet types using the flooding table. */
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_BROADCAST,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sfgc_pack(sfgc_pl,
+			    MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+			    MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+			    MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+			    0);
+	err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+	if (err)
+		return err;
+
+	mlxsw_reg_sgcr_pack(sgcr_pl, true);
+	return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
+}
+
+static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
+			 const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+	struct mlxsw_sx *mlxsw_sx = priv;
+	int err;
+
+	mlxsw_sx->core = mlxsw_core;
+	mlxsw_sx->bus_info = mlxsw_bus_info;
+
+	err = mlxsw_sx_hw_id_get(mlxsw_sx);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
+		return err;
+	}
+
+	err = mlxsw_sx_ports_create(mlxsw_sx);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
+		return err;
+	}
+
+	err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n");
+		goto err_event_register;
+	}
+
+	err = mlxsw_sx_traps_init(mlxsw_sx);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n");
+		goto err_rx_listener_register;
+	}
+
+	err = mlxsw_sx_flood_init(mlxsw_sx);
+	if (err) {
+		dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
+		goto err_flood_init;
+	}
+
+	return 0;
+
+err_flood_init:
+	mlxsw_sx_traps_fini(mlxsw_sx);
+err_rx_listener_register:
+	mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+	mlxsw_sx_ports_remove(mlxsw_sx);
+	return err;
+}
+
+static void mlxsw_sx_fini(void *priv)
+{
+	struct mlxsw_sx *mlxsw_sx = priv;
+
+	mlxsw_sx_traps_fini(mlxsw_sx);
+	mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+	mlxsw_sx_ports_remove(mlxsw_sx);
+}
+
+static struct mlxsw_config_profile mlxsw_sx_config_profile = {
+	.used_max_vepa_channels		= 1,
+	.max_vepa_channels		= 0,
+	.used_max_lag			= 1,
+	.max_lag			= 64,
+	.used_max_port_per_lag		= 1,
+	.max_port_per_lag		= 16,
+	.used_max_mid			= 1,
+	.max_mid			= 7000,
+	.used_max_pgt			= 1,
+	.max_pgt			= 0,
+	.used_max_system_port		= 1,
+	.max_system_port		= 48000,
+	.used_max_vlan_groups		= 1,
+	.max_vlan_groups		= 127,
+	.used_max_regions		= 1,
+	.max_regions			= 400,
+	.used_flood_tables		= 1,
+	.max_flood_tables		= 2,
+	.max_vid_flood_tables		= 1,
+	.used_flood_mode		= 1,
+	.flood_mode			= 3,
+	.used_max_ib_mc			= 1,
+	.max_ib_mc			= 0,
+	.used_max_pkey			= 1,
+	.max_pkey			= 0,
+	.swid_config			= {
+		{
+			.used_type	= 1,
+			.type		= MLXSW_PORT_SWID_TYPE_ETH,
+		}
+	},
+};
+
+static struct mlxsw_driver mlxsw_sx_driver = {
+	.kind			= MLXSW_DEVICE_KIND_SWITCHX2,
+	.owner			= THIS_MODULE,
+	.priv_size		= sizeof(struct mlxsw_sx),
+	.init			= mlxsw_sx_init,
+	.fini			= mlxsw_sx_fini,
+	.txhdr_construct	= mlxsw_sx_txhdr_construct,
+	.txhdr_len		= MLXSW_TXHDR_LEN,
+	.profile		= &mlxsw_sx_config_profile,
+};
+
+static int __init mlxsw_sx_module_init(void)
+{
+	return mlxsw_core_driver_register(&mlxsw_sx_driver);
+}
+
+static void __exit mlxsw_sx_module_exit(void)
+{
+	mlxsw_core_driver_unregister(&mlxsw_sx_driver);
+}
+
+module_init(mlxsw_sx_module_init);
+module_exit(mlxsw_sx_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
new file mode 100644
index 0000000..53a9550
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/trap.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_TRAP_H
+#define _MLXSW_TRAP_H
+
+enum {
+	/* Ethernet EMAD and FDB miss */
+	MLXSW_TRAP_ID_FDB_MC = 0x01,
+	MLXSW_TRAP_ID_ETHEMAD = 0x05,
+	/* L2 traps for specific packet types */
+	MLXSW_TRAP_ID_STP = 0x10,
+	MLXSW_TRAP_ID_LACP = 0x11,
+	MLXSW_TRAP_ID_EAPOL = 0x12,
+	MLXSW_TRAP_ID_LLDP = 0x13,
+	MLXSW_TRAP_ID_MMRP = 0x14,
+	MLXSW_TRAP_ID_MVRP = 0x15,
+	MLXSW_TRAP_ID_RPVST = 0x16,
+	MLXSW_TRAP_ID_DHCP = 0x19,
+	MLXSW_TRAP_ID_IGMP_QUERY = 0x30,
+	MLXSW_TRAP_ID_IGMP_V1_REPORT = 0x31,
+	MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
+	MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
+	MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+
+	MLXSW_TRAP_ID_MAX = 0x1FF
+};
+
+enum mlxsw_event_trap_id {
+	/* Port Up/Down event generated by hardware */
+	MLXSW_TRAP_ID_PUDE = 0x8,
+};
+
+#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
new file mode 100644
index 0000000..06fc46c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
@@ -0,0 +1,80 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/txheader.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_TXHEADER_H
+#define _MLXSW_TXHEADER_H
+
+#define MLXSW_TXHDR_LEN 0x10
+#define MLXSW_TXHDR_VERSION_0 0
+
+enum {
+	MLXSW_TXHDR_ETH_CTL,
+	MLXSW_TXHDR_ETH_DATA,
+};
+
+#define MLXSW_TXHDR_PROTO_ETH 1
+
+enum {
+	MLXSW_TXHDR_ETCLASS_0,
+	MLXSW_TXHDR_ETCLASS_1,
+	MLXSW_TXHDR_ETCLASS_2,
+	MLXSW_TXHDR_ETCLASS_3,
+	MLXSW_TXHDR_ETCLASS_4,
+	MLXSW_TXHDR_ETCLASS_5,
+	MLXSW_TXHDR_ETCLASS_6,
+	MLXSW_TXHDR_ETCLASS_7,
+};
+
+enum {
+	MLXSW_TXHDR_RDQ_OTHER,
+	MLXSW_TXHDR_RDQ_EMAD = 0x1f,
+};
+
+#define MLXSW_TXHDR_CTCLASS3 0
+#define MLXSW_TXHDR_CPU_SIG 0
+#define MLXSW_TXHDR_SIG 0xE0E0
+#define MLXSW_TXHDR_STCLASS_NONE 0
+
+enum {
+	MLXSW_TXHDR_NOT_EMAD,
+	MLXSW_TXHDR_EMAD,
+};
+
+enum {
+	MLXSW_TXHDR_TYPE_DATA,
+	MLXSW_TXHDR_TYPE_CONTROL = 6,
+};
+
+#endif
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index c281117..2d1b942 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -8226,31 +8226,7 @@
 	pci_disable_device(pdev);
 }
 
-/**
- * s2io_starter - Entry point for the driver
- * Description: This function is the entry point for the driver. It verifies
- * the module loadable parameters and initializes PCI configuration space.
- */
-
-static int __init s2io_starter(void)
-{
-	return pci_register_driver(&s2io_driver);
-}
-
-/**
- * s2io_closer - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver. It
- * unregisters the driver.
- */
-
-static __exit void s2io_closer(void)
-{
-	pci_unregister_driver(&s2io_driver);
-	DBG_PRINT(INIT_DBG, "cleanup done\n");
-}
-
-module_init(s2io_starter);
-module_exit(s2io_closer);
+module_pci_driver(s2io_driver);
 
 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
 				struct tcphdr **tcp, struct RxD_t *rxdp,
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index d89b6ed..6c5997d 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1085,8 +1085,6 @@
 static void tx_intr_handler(struct fifo_info *fifo_data);
 static void s2io_handle_errors(void * dev_id);
 
-static int s2io_starter(void);
-static void s2io_closer(void);
 static void s2io_tx_watchdog(struct net_device *dev);
 static void s2io_set_multicast(struct net_device *dev);
 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 055f376..06bcc73 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -24,9 +24,7 @@
 #include <linux/mii.h>
 #include <linux/timer.h>
 #include <linux/irq.h>
-
 #include <linux/vmalloc.h>
-
 #include <linux/io.h>
 #include <asm/byteorder.h>
 #include <linux/bitops.h>
@@ -39,8 +37,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 62
-#define QLCNIC_LINUX_VERSIONID  "5.3.62"
+#define _QLCNIC_LINUX_SUBVERSION 63
+#define QLCNIC_LINUX_VERSIONID  "5.3.63"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
 		 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -926,6 +924,7 @@
 #define QLCNIC_FW_CAPABILITY_SET_DRV_VER	BIT_5
 #define QLCNIC_FW_CAPABILITY_2_BEACON		BIT_7
 #define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG	BIT_9
+#define QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP	BIT_13
 
 #define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD	BIT_0
 #define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD	BIT_1
@@ -2291,8 +2290,9 @@
 
 #define PCI_DEVICE_ID_QLOGIC_QLE824X		0x8020
 #define PCI_DEVICE_ID_QLOGIC_QLE834X		0x8030
-#define PCI_DEVICE_ID_QLOGIC_QLE8830		0x8830
 #define PCI_DEVICE_ID_QLOGIC_VF_QLE834X	0x8430
+#define PCI_DEVICE_ID_QLOGIC_QLE8830		0x8830
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE8C30		0x8C30
 #define PCI_DEVICE_ID_QLOGIC_QLE844X		0x8040
 #define PCI_DEVICE_ID_QLOGIC_VF_QLE844X	0x8440
 
@@ -2319,7 +2319,8 @@
 		  (device == PCI_DEVICE_ID_QLOGIC_QLE8830) ||
 		  (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
 		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
-		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
+		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
 
 	return status;
 }
@@ -2335,7 +2336,8 @@
 	bool status;
 
 	status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
-		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
+		  (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
 
 	return status;
 }
@@ -2351,7 +2353,8 @@
 {
 	unsigned short device = adapter->pdev->device;
 
-	return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+	return ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+		(device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
 }
 
 static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 840bf36..5ab3adf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -5,14 +5,15 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
 #include <linux/if_vlan.h>
 #include <linux/ipv6.h>
 #include <linux/ethtool.h>
 #include <linux/interrupt.h>
 #include <linux/aer.h>
 
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+
 static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
 static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
 static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
@@ -118,6 +119,7 @@
 	{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
 	{QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
 	{QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
+	{QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP, 4, 1},
 };
 
 const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -3513,6 +3515,31 @@
 	qlcnic_free_mbx_args(&cmd);
 }
 
+#define QLCNIC_83XX_ADD_PORT0		BIT_0
+#define QLCNIC_83XX_ADD_PORT1		BIT_1
+#define QLCNIC_83XX_EXTENDED_MEM_SIZE	13 /* In MB */
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *adapter)
+{
+	struct qlcnic_cmd_args cmd;
+	int err;
+
+	err = qlcnic_alloc_mbx_args(&cmd, adapter,
+				    QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP);
+	if (err)
+		return err;
+
+	cmd.req.arg[1] = (QLCNIC_83XX_ADD_PORT0 | QLCNIC_83XX_ADD_PORT1);
+	cmd.req.arg[2] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+	cmd.req.arg[3] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+
+	err = qlcnic_issue_cmd(adapter, &cmd);
+	if (err)
+		dev_err(&adapter->pdev->dev,
+			"failed to issue extend iSCSI minidump capability\n");
+
+	return err;
+}
+
 int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
 {
 	u32 major, minor, sub;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 69f828e..331ae2c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -10,6 +10,7 @@
 
 #include <linux/types.h>
 #include <linux/etherdevice.h>
+
 #include "qlcnic_hw.h"
 
 #define QLCNIC_83XX_BAR0_LENGTH 0x4000
@@ -626,6 +627,7 @@
 
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
 void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *);
 int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
 int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
 void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 753ea8b..bf89216 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1384,7 +1384,7 @@
 	size_t size;
 	u64 addr;
 
-	temp = kzalloc(fw->size, GFP_KERNEL);
+	temp = vzalloc(fw->size);
 	if (!temp) {
 		release_firmware(fw);
 		fw_info->fw = NULL;
@@ -1430,7 +1430,7 @@
 exit:
 	release_firmware(fw);
 	fw_info->fw = NULL;
-	kfree(temp);
+	vfree(temp);
 
 	return ret;
 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 75ee9e4..509b596 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -5,13 +5,13 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
-#include "qlcnic.h"
-#include "qlcnic_hdr.h"
-
 #include <linux/slab.h>
 #include <net/ip.h>
 #include <linux/bitops.h>
 
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+
 #define MASK(n) ((1ULL<<(n))-1)
 #define OCM_WIN_P3P(addr) (addr & 0xffc0000)
 
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index cbe2399..4bb33af 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -109,6 +109,7 @@
 #define QLCNIC_CMD_GET_LED_CONFIG		0x6A
 #define QLCNIC_CMD_83XX_SET_DRV_VER		0x6F
 #define QLCNIC_CMD_ADD_RCV_RINGS		0x0B
+#define QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP	0x37
 
 #define QLCNIC_INTRPT_INTX			1
 #define QLCNIC_INTRPT_MSIX			3
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2f6cc42..8b08b20 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -7,11 +7,6 @@
 
 #include <linux/vmalloc.h>
 #include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_sriov.h"
-#include "qlcnic_hw.h"
-
 #include <linux/swab.h>
 #include <linux/dma-mapping.h>
 #include <linux/if_vlan.h>
@@ -25,6 +20,10 @@
 #include <net/vxlan.h>
 #endif
 
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+#include "qlcnic_hw.h"
+
 MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
@@ -111,8 +110,9 @@
 static const struct pci_device_id qlcnic_pci_tbl[] = {
 	ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
 	ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
-	ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
 	ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+	ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
+	ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE8C30),
 	ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
 	ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
 	{0,}
@@ -1149,6 +1149,7 @@
 	case PCI_DEVICE_ID_QLOGIC_QLE844X:
 	case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
 	case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+	case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
 		*bar = QLCNIC_83XX_BAR0_LENGTH;
 		break;
 	default:
@@ -2403,7 +2404,6 @@
 			qlcnic_free_tx_rings(adapter);
 			return -ENOMEM;
 		}
-		memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
 		tx_ring->cmd_buf_arr = cmd_buf_arr;
 		spin_lock_init(&tx_ring->tx_clean_lock);
 	}
@@ -2492,6 +2492,7 @@
 		qlcnic_83xx_register_map(ahw);
 		break;
 	case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+	case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
 	case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
 		qlcnic_sriov_vf_register_map(ahw);
 		break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 332bb8a..cda9e60 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -5,13 +5,13 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
+#include <net/ip.h>
+
 #include "qlcnic.h"
 #include "qlcnic_hdr.h"
 #include "qlcnic_83xx_hw.h"
 #include "qlcnic_hw.h"
 
-#include <net/ip.h>
-
 #define QLC_83XX_MINIDUMP_FLASH		0x520000
 #define QLC_83XX_OCM_INDEX			3
 #define QLC_83XX_PCI_INDEX			0
@@ -1388,27 +1388,60 @@
 	fw_dump->clr = 1;
 	snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
 	netdev_info(adapter->netdev,
-		    "Dump data %d bytes captured, template header size %d bytes\n",
-		    fw_dump->size, fw_dump->tmpl_hdr_size);
+		    "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
+		    fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
+		    fw_dump->tmpl_hdr);
 	/* Send a udev event to notify availability of FW dump */
 	kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
 
 	return 0;
 }
 
+static inline bool
+qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
+{
+	/* For special adapters (with 0x8830 device ID), where iSCSI firmware
+	 * dump needs to be captured as part of regular firmware dump
+	 * collection process, firmware exports it's capability through
+	 * capability registers
+	 */
+	return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
+		(adapter->ahw->extra_capability[0] &
+		 QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
+}
+
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
 {
 	u32 prev_version, current_version;
 	struct qlcnic_hardware_context *ahw = adapter->ahw;
 	struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
 	struct pci_dev *pdev = adapter->pdev;
+	bool extended = false;
 
 	prev_version = adapter->fw_version;
 	current_version = qlcnic_83xx_get_fw_version(adapter);
 
 	if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
 		vfree(fw_dump->tmpl_hdr);
+
+		if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
+			extended = !qlcnic_83xx_extend_md_capab(adapter);
+
 		if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
 			dev_info(&pdev->dev, "Supports FW dump capability\n");
+
+		/* Once we have minidump template with extended iSCSI dump
+		 * capability, update the minidump capture mask to 0x1f as
+		 * per FW requirement
+		 */
+		if (extended) {
+			struct qlcnic_83xx_dump_template_hdr *hdr;
+
+			hdr = fw_dump->tmpl_hdr;
+			hdr->drv_cap_mask = 0x1f;
+			fw_dump->cap_mask = 0x1f;
+			dev_info(&pdev->dev,
+				 "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
+		}
 	}
 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 4677b2e..017d8c2c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -8,10 +8,11 @@
 #ifndef _QLCNIC_83XX_SRIOV_H_
 #define _QLCNIC_83XX_SRIOV_H_
 
-#include "qlcnic.h"
 #include <linux/types.h>
 #include <linux/pci.h>
 
+#include "qlcnic.h"
+
 extern const u32 qlcnic_83xx_reg_tbl[];
 extern const u32 qlcnic_83xx_ext_reg_tbl[];
 
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index e631246..546cd5f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -5,10 +5,11 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
+#include <linux/types.h>
+
 #include "qlcnic_sriov.h"
 #include "qlcnic.h"
 #include "qlcnic_83xx_hw.h"
-#include <linux/types.h>
 
 #define QLC_BC_COMMAND	0
 #define QLC_BC_RESPONSE	1
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index a29538b..afd687e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -5,9 +5,10 @@
  * See LICENSE.qlcnic for copyright and licensing details.
  */
 
+#include <linux/types.h>
+
 #include "qlcnic_sriov.h"
 #include "qlcnic.h"
-#include <linux/types.h>
 
 #define QLCNIC_SRIOV_VF_MAX_MAC 7
 #define QLC_VF_MIN_TX_RATE	100
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 05c28f2..ccbb045 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -7,10 +7,6 @@
 
 #include <linux/slab.h>
 #include <linux/interrupt.h>
-
-#include "qlcnic.h"
-#include "qlcnic_hw.h"
-
 #include <linux/swab.h>
 #include <linux/dma-mapping.h>
 #include <net/ip.h>
@@ -24,6 +20,9 @@
 #include <linux/hwmon-sysfs.h>
 #endif
 
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
 int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
 {
 	return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f790f61..24dcbe6 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -637,6 +637,9 @@
 	/* _TBICSRBit */
 	TBILinkOK	= 0x02000000,
 
+	/* ResetCounterCommand */
+	CounterReset	= 0x1,
+
 	/* DumpCounterCommand */
 	CounterDump	= 0x8,
 
@@ -747,6 +750,13 @@
 	__le16	tx_underun;
 };
 
+struct rtl8169_tc_offsets {
+	bool	inited;
+	__le64	tx_errors;
+	__le32	tx_multi_collision;
+	__le16	tx_aborted;
+};
+
 enum rtl_flag {
 	RTL_FLAG_TASK_ENABLED,
 	RTL_FLAG_TASK_SLOW_PENDING,
@@ -824,6 +834,7 @@
 
 	struct mii_if_info mii;
 	struct rtl8169_counters counters;
+	struct rtl8169_tc_offsets tc_offset;
 	u32 saved_wolopts;
 	u32 opts1_mask;
 
@@ -2179,6 +2190,73 @@
 	}
 }
 
+static struct rtl8169_counters *rtl8169_map_counters(struct net_device *dev,
+						     dma_addr_t *paddr,
+						     u32 counter_cmd)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct device *d = &tp->pci_dev->dev;
+	struct rtl8169_counters *counters;
+	u32 cmd;
+
+	counters = dma_alloc_coherent(d, sizeof(*counters), paddr, GFP_KERNEL);
+	if (counters) {
+		RTL_W32(CounterAddrHigh, (u64)*paddr >> 32);
+		cmd = (u64)*paddr & DMA_BIT_MASK(32);
+		RTL_W32(CounterAddrLow, cmd);
+		RTL_W32(CounterAddrLow, cmd | counter_cmd);
+	}
+	return counters;
+}
+
+static void rtl8169_unmap_counters (struct net_device *dev,
+				    dma_addr_t paddr,
+				    struct rtl8169_counters *counters)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct device *d = &tp->pci_dev->dev;
+
+	RTL_W32(CounterAddrLow, 0);
+	RTL_W32(CounterAddrHigh, 0);
+
+	dma_free_coherent(d, sizeof(*counters), counters, paddr);
+}
+
+DECLARE_RTL_COND(rtl_reset_counters_cond)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+
+	return RTL_R32(CounterAddrLow) & CounterReset;
+}
+
+static bool rtl8169_reset_counters(struct net_device *dev)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	struct rtl8169_counters *counters;
+	dma_addr_t paddr;
+	bool ret = true;
+
+	/*
+	 * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
+	 * tally counters.
+	 */
+	if (tp->mac_version < RTL_GIGA_MAC_VER_19)
+		return true;
+
+	counters = rtl8169_map_counters(dev, &paddr, CounterReset);
+	if (!counters)
+		return false;
+
+	if (!rtl_udelay_loop_wait_low(tp, &rtl_reset_counters_cond, 10, 1000))
+		ret = false;
+
+	rtl8169_unmap_counters(dev, paddr, counters);
+
+	return ret;
+}
+
 DECLARE_RTL_COND(rtl_counters_cond)
 {
 	void __iomem *ioaddr = tp->mmio_addr;
@@ -2186,38 +2264,71 @@
 	return RTL_R32(CounterAddrLow) & CounterDump;
 }
 
-static void rtl8169_update_counters(struct net_device *dev)
+static bool rtl8169_update_counters(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
 	void __iomem *ioaddr = tp->mmio_addr;
-	struct device *d = &tp->pci_dev->dev;
 	struct rtl8169_counters *counters;
 	dma_addr_t paddr;
-	u32 cmd;
+	bool ret = true;
 
 	/*
 	 * Some chips are unable to dump tally counters when the receiver
 	 * is disabled.
 	 */
 	if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
-		return;
+		return true;
 
-	counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
+	counters = rtl8169_map_counters(dev, &paddr, CounterDump);
 	if (!counters)
-		return;
-
-	RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
-	cmd = (u64)paddr & DMA_BIT_MASK(32);
-	RTL_W32(CounterAddrLow, cmd);
-	RTL_W32(CounterAddrLow, cmd | CounterDump);
+		return false;
 
 	if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
 		memcpy(&tp->counters, counters, sizeof(*counters));
+	else
+		ret = false;
 
-	RTL_W32(CounterAddrLow, 0);
-	RTL_W32(CounterAddrHigh, 0);
+	rtl8169_unmap_counters(dev, paddr, counters);
 
-	dma_free_coherent(d, sizeof(*counters), counters, paddr);
+	return ret;
+}
+
+static bool rtl8169_init_counter_offsets(struct net_device *dev)
+{
+	struct rtl8169_private *tp = netdev_priv(dev);
+	bool ret = false;
+
+	/*
+	 * rtl8169_init_counter_offsets is called from rtl_open.  On chip
+	 * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only
+	 * reset by a power cycle, while the counter values collected by the
+	 * driver are reset at every driver unload/load cycle.
+	 *
+	 * To make sure the HW values returned by @get_stats64 match the SW
+	 * values, we collect the initial values at first open(*) and use them
+	 * as offsets to normalize the values returned by @get_stats64.
+	 *
+	 * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one
+	 * for the reason stated in rtl8169_update_counters; CmdRxEnb is only
+	 * set at open time by rtl_hw_start.
+	 */
+
+	if (tp->tc_offset.inited)
+		return true;
+
+	/* If both, reset and update fail, propagate to caller. */
+	if (rtl8169_reset_counters(dev))
+		ret = true;
+
+	if (rtl8169_update_counters(dev))
+		ret = true;
+
+	tp->tc_offset.tx_errors = tp->counters.tx_errors;
+	tp->tc_offset.tx_multi_collision = tp->counters.tx_multi_collision;
+	tp->tc_offset.tx_aborted = tp->counters.tx_aborted;
+	tp->tc_offset.inited = true;
+
+	return ret;
 }
 
 static void rtl8169_get_ethtool_stats(struct net_device *dev,
@@ -7367,6 +7478,9 @@
 			tp->rx_stats.packets++;
 			tp->rx_stats.bytes += pkt_size;
 			u64_stats_update_end(&tp->rx_stats.syncp);
+
+			if (skb->pkt_type == PACKET_MULTICAST)
+				dev->stats.multicast++;
 		}
 release_descriptor:
 		desc->opts2 = 0;
@@ -7631,6 +7745,9 @@
 
 	rtl_hw_start(dev);
 
+	if (!rtl8169_init_counter_offsets(dev))
+		netif_warn(tp, hw, dev, "counter reset/update failed\n");
+
 	netif_start_queue(dev);
 
 	rtl_unlock_work(tp);
@@ -7674,7 +7791,6 @@
 		stats->rx_bytes	= tp->rx_stats.bytes;
 	} while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
 
-
 	do {
 		start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
 		stats->tx_packets = tp->tx_stats.packets;
@@ -7688,6 +7804,24 @@
 	stats->rx_crc_errors	= dev->stats.rx_crc_errors;
 	stats->rx_fifo_errors	= dev->stats.rx_fifo_errors;
 	stats->rx_missed_errors = dev->stats.rx_missed_errors;
+	stats->multicast	= dev->stats.multicast;
+
+	/*
+	 * Fetch additonal counter values missing in stats collected by driver
+	 * from tally counters.
+	 */
+	rtl8169_update_counters(dev);
+
+	/*
+	 * Subtract values fetched during initalization.
+	 * See rtl8169_init_counter_offsets for a description why we do that.
+	 */
+	stats->tx_errors = le64_to_cpu(tp->counters.tx_errors) -
+		le64_to_cpu(tp->tc_offset.tx_errors);
+	stats->collisions = le32_to_cpu(tp->counters.tx_multi_collision) -
+		le32_to_cpu(tp->tc_offset.tx_multi_collision);
+	stats->tx_aborted_errors = le16_to_cpu(tp->counters.tx_aborted) -
+		le16_to_cpu(tp->tc_offset.tx_aborted);
 
 	return stats;
 }
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 8aa50ac..a157aaa 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -658,6 +658,8 @@
 	__le32 dptr;	/* Descriptor pointer */
 };
 
+#define DPTR_ALIGN	4	/* Required descriptor pointer alignment */
+
 enum DIE_DT {
 	/* Frame data */
 	DT_FMID		= 0x40,
@@ -739,6 +741,7 @@
 #define RX_QUEUE_OFFSET	4
 #define NUM_RX_QUEUE	2
 #define NUM_TX_QUEUE	2
+#define NUM_TX_DESC	2	/* TX descriptors per packet */
 
 struct ravb_tstamp_skb {
 	struct list_head list;
@@ -777,9 +780,9 @@
 	dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
 	struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
 	struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
+	void *tx_align[NUM_TX_QUEUE];
 	struct sk_buff **rx_skb[NUM_RX_QUEUE];
 	struct sk_buff **tx_skb[NUM_TX_QUEUE];
-	void **tx_buffers[NUM_TX_QUEUE];
 	u32 rx_over_errors;
 	u32 rx_fifo_errors;
 	struct net_device_stats stats[NUM_RX_QUEUE];
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 78849dd..450899e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -195,12 +195,8 @@
 	priv->tx_skb[q] = NULL;
 
 	/* Free aligned TX buffers */
-	if (priv->tx_buffers[q]) {
-		for (i = 0; i < priv->num_tx_ring[q]; i++)
-			kfree(priv->tx_buffers[q][i]);
-	}
-	kfree(priv->tx_buffers[q]);
-	priv->tx_buffers[q] = NULL;
+	kfree(priv->tx_align[q]);
+	priv->tx_align[q] = NULL;
 
 	if (priv->rx_ring[q]) {
 		ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -212,7 +208,7 @@
 
 	if (priv->tx_ring[q]) {
 		ring_size = sizeof(struct ravb_tx_desc) *
-			    (priv->num_tx_ring[q] + 1);
+			    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
 		dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
 				  priv->tx_desc_dma[q]);
 		priv->tx_ring[q] = NULL;
@@ -223,11 +219,12 @@
 static void ravb_ring_format(struct net_device *ndev, int q)
 {
 	struct ravb_private *priv = netdev_priv(ndev);
-	struct ravb_ex_rx_desc *rx_desc = NULL;
-	struct ravb_tx_desc *tx_desc = NULL;
-	struct ravb_desc *desc = NULL;
+	struct ravb_ex_rx_desc *rx_desc;
+	struct ravb_tx_desc *tx_desc;
+	struct ravb_desc *desc;
 	int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
-	int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
+	int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
+			   NUM_TX_DESC;
 	dma_addr_t dma_addr;
 	int i;
 
@@ -260,11 +257,12 @@
 
 	memset(priv->tx_ring[q], 0, tx_ring_size);
 	/* Build TX ring buffer */
-	for (i = 0; i < priv->num_tx_ring[q]; i++) {
-		tx_desc = &priv->tx_ring[q][i];
+	for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
+	     i++, tx_desc++) {
+		tx_desc->die_dt = DT_EEMPTY;
+		tx_desc++;
 		tx_desc->die_dt = DT_EEMPTY;
 	}
-	tx_desc = &priv->tx_ring[q][i];
 	tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
 	tx_desc->die_dt = DT_LINKFIX; /* type */
 
@@ -285,7 +283,6 @@
 	struct ravb_private *priv = netdev_priv(ndev);
 	struct sk_buff *skb;
 	int ring_size;
-	void *buffer;
 	int i;
 
 	/* Allocate RX and TX skb rings */
@@ -305,19 +302,11 @@
 	}
 
 	/* Allocate rings for the aligned buffers */
-	priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
-				      sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
-	if (!priv->tx_buffers[q])
+	priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
+				    DPTR_ALIGN - 1, GFP_KERNEL);
+	if (!priv->tx_align[q])
 		goto error;
 
-	for (i = 0; i < priv->num_tx_ring[q]; i++) {
-		buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
-		if (!buffer)
-			goto error;
-		/* Aligned TX buffer */
-		priv->tx_buffers[q][i] = buffer;
-	}
-
 	/* Allocate all RX descriptors. */
 	ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
 	priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -329,7 +318,8 @@
 	priv->dirty_rx[q] = 0;
 
 	/* Allocate all TX descriptors. */
-	ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
+	ring_size = sizeof(struct ravb_tx_desc) *
+		    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
 	priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
 					      &priv->tx_desc_dma[q],
 					      GFP_KERNEL);
@@ -439,11 +429,12 @@
 	struct net_device_stats *stats = &priv->stats[q];
 	struct ravb_tx_desc *desc;
 	int free_num = 0;
-	int entry = 0;
+	int entry;
 	u32 size;
 
 	for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
-		entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
+		entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+					     NUM_TX_DESC);
 		desc = &priv->tx_ring[q][entry];
 		if (desc->die_dt != DT_FEMPTY)
 			break;
@@ -451,14 +442,18 @@
 		dma_rmb();
 		size = le16_to_cpu(desc->ds_tagl) & TX_DS;
 		/* Free the original skb. */
-		if (priv->tx_skb[q][entry]) {
+		if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
 			dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
 					 size, DMA_TO_DEVICE);
-			dev_kfree_skb_any(priv->tx_skb[q][entry]);
-			priv->tx_skb[q][entry] = NULL;
+			/* Last packet descriptor? */
+			if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+				entry /= NUM_TX_DESC;
+				dev_kfree_skb_any(priv->tx_skb[q][entry]);
+				priv->tx_skb[q][entry] = NULL;
+				stats->tx_packets++;
+			}
 			free_num++;
 		}
-		stats->tx_packets++;
 		stats->tx_bytes += size;
 		desc->die_dt = DT_EEMPTY;
 	}
@@ -512,8 +507,8 @@
 	struct sk_buff *skb;
 	dma_addr_t dma_addr;
 	struct timespec64 ts;
-	u16 pkt_len = 0;
 	u8  desc_status;
+	u16 pkt_len;
 	int limit;
 
 	boguscnt = min(boguscnt, *quota);
@@ -1277,44 +1272,60 @@
 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
 	struct ravb_private *priv = netdev_priv(ndev);
-	struct ravb_tstamp_skb *ts_skb = NULL;
 	u16 q = skb_get_queue_mapping(skb);
+	struct ravb_tstamp_skb *ts_skb;
 	struct ravb_tx_desc *desc;
 	unsigned long flags;
 	u32 dma_addr;
 	void *buffer;
 	u32 entry;
+	u32 len;
 
 	spin_lock_irqsave(&priv->lock, flags);
-	if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
+	if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
+	    NUM_TX_DESC) {
 		netif_err(priv, tx_queued, ndev,
 			  "still transmitting with the full ring!\n");
 		netif_stop_subqueue(ndev, q);
 		spin_unlock_irqrestore(&priv->lock, flags);
 		return NETDEV_TX_BUSY;
 	}
-	entry = priv->cur_tx[q] % priv->num_tx_ring[q];
-	priv->tx_skb[q][entry] = skb;
+	entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
+	priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
 
 	if (skb_put_padto(skb, ETH_ZLEN))
 		goto drop;
 
-	buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
-	memcpy(buffer, skb->data, skb->len);
-	desc = &priv->tx_ring[q][entry];
-	desc->ds_tagl = cpu_to_le16(skb->len);
-	dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
+	buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
+		 entry / NUM_TX_DESC * DPTR_ALIGN;
+	len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+	memcpy(buffer, skb->data, len);
+	dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
 	if (dma_mapping_error(&ndev->dev, dma_addr))
 		goto drop;
+
+	desc = &priv->tx_ring[q][entry];
+	desc->ds_tagl = cpu_to_le16(len);
+	desc->dptr = cpu_to_le32(dma_addr);
+
+	buffer = skb->data + len;
+	len = skb->len - len;
+	dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(&ndev->dev, dma_addr))
+		goto unmap;
+
+	desc++;
+	desc->ds_tagl = cpu_to_le16(len);
 	desc->dptr = cpu_to_le32(dma_addr);
 
 	/* TX timestamp required */
 	if (q == RAVB_NC) {
 		ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
 		if (!ts_skb) {
-			dma_unmap_single(&ndev->dev, dma_addr, skb->len,
+			desc--;
+			dma_unmap_single(&ndev->dev, dma_addr, len,
 					 DMA_TO_DEVICE);
-			goto drop;
+			goto unmap;
 		}
 		ts_skb->skb = skb;
 		ts_skb->tag = priv->ts_skb_tag++;
@@ -1330,13 +1341,15 @@
 
 	/* Descriptor type must be set after all the above writes */
 	dma_wmb();
-	desc->die_dt = DT_FSINGLE;
+	desc->die_dt = DT_FEND;
+	desc--;
+	desc->die_dt = DT_FSTART;
 
 	ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
 
-	priv->cur_tx[q]++;
-	if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
-	    !ravb_tx_free(ndev, q))
+	priv->cur_tx[q] += NUM_TX_DESC;
+	if (priv->cur_tx[q] - priv->dirty_tx[q] >
+	    (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
 		netif_stop_subqueue(ndev, q);
 
 exit:
@@ -1344,9 +1357,12 @@
 	spin_unlock_irqrestore(&priv->lock, flags);
 	return NETDEV_TX_OK;
 
+unmap:
+	dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+			 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
 drop:
 	dev_kfree_skb_any(skb);
-	priv->tx_skb[q][entry] = NULL;
+	priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
 	goto exit;
 }
 
@@ -1643,7 +1659,7 @@
 	ndev->dma = -1;
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
-		error = -ENODEV;
+		error = irq;
 		goto out_release;
 	}
 	ndev->irq = irq;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 7fb244f..257ea71 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3089,10 +3089,8 @@
 
 	ndev->dma = -1;
 	ret = platform_get_irq(pdev, 0);
-	if (ret < 0) {
-		ret = -ENODEV;
+	if (ret < 0)
 		goto out_release;
-	}
 	ndev->irq = ret;
 
 	SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2e7f9a2..34ac41a 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -202,6 +202,7 @@
 	ROCKER_CTRL_IPV4_MCAST,
 	ROCKER_CTRL_IPV6_MCAST,
 	ROCKER_CTRL_DFLT_BRIDGING,
+	ROCKER_CTRL_DFLT_OVS,
 	ROCKER_CTRL_MAX,
 };
 
@@ -323,7 +324,14 @@
 
 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
 {
-	return !!rocker_port->bridge_dev;
+	return rocker_port->bridge_dev &&
+	       netif_is_bridge_master(rocker_port->bridge_dev);
+}
+
+static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
+{
+	return rocker_port->bridge_dev &&
+	       netif_is_ovs_master(rocker_port->bridge_dev);
 }
 
 #define ROCKER_OP_FLAG_REMOVE		BIT(0)
@@ -1818,6 +1826,30 @@
 }
 
 static int
+rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
+				      struct rocker_desc_info *desc_info,
+				      void *priv)
+{
+	int mtu = *(int *)priv;
+	struct rocker_tlv *cmd_info;
+
+	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+			       ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+		return -EMSGSIZE;
+	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+	if (!cmd_info)
+		return -EMSGSIZE;
+	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+			       rocker_port->pport))
+		return -EMSGSIZE;
+	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
+			       mtu))
+		return -EMSGSIZE;
+	rocker_tlv_nest_end(desc_info, cmd_info);
+	return 0;
+}
+
+static int
 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
 				  struct rocker_desc_info *desc_info,
 				  void *priv)
@@ -1874,6 +1906,14 @@
 			       macaddr, NULL, NULL);
 }
 
+static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
+					    int mtu)
+{
+	return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+			       rocker_cmd_set_port_settings_mtu_prep,
+			       &mtu, NULL, NULL);
+}
+
 static int rocker_port_set_learning(struct rocker_port *rocker_port,
 				    enum switchdev_trans trans)
 {
@@ -3243,6 +3283,12 @@
 		.bridge = true,
 		.copy_to_cpu = true,
 	},
+	[ROCKER_CTRL_DFLT_OVS] = {
+		/* pass all pkts up to CPU */
+		.eth_dst = zero_mac,
+		.eth_dst_mask = zero_mac,
+		.acl = true,
+	},
 };
 
 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
@@ -3755,11 +3801,14 @@
 		break;
 	case BR_STATE_LEARNING:
 	case BR_STATE_FORWARDING:
-		want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
+		if (!rocker_port_is_ovsed(rocker_port))
+			want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
 		want[ROCKER_CTRL_IPV4_MCAST] = true;
 		want[ROCKER_CTRL_IPV6_MCAST] = true;
 		if (rocker_port_is_bridged(rocker_port))
 			want[ROCKER_CTRL_DFLT_BRIDGING] = true;
+		else if (rocker_port_is_ovsed(rocker_port))
+			want[ROCKER_CTRL_DFLT_OVS] = true;
 		else
 			want[ROCKER_CTRL_LOCAL_ARP] = true;
 		break;
@@ -3983,7 +4032,8 @@
 
 	napi_enable(&rocker_port->napi_tx);
 	napi_enable(&rocker_port->napi_rx);
-	rocker_port_set_enable(rocker_port, true);
+	if (!dev->proto_down)
+		rocker_port_set_enable(rocker_port, true);
 	netif_start_queue(dev);
 	return 0;
 
@@ -4102,8 +4152,11 @@
 					  skb->data, skb_headlen(skb));
 	if (err)
 		goto nest_cancel;
-	if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
-		goto nest_cancel;
+	if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
+		err = skb_linearize(skb);
+		if (err)
+			goto unmap_frags;
+	}
 
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -4152,6 +4205,34 @@
 	return 0;
 }
 
+static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct rocker_port *rocker_port = netdev_priv(dev);
+	int running = netif_running(dev);
+	int err;
+
+#define ROCKER_PORT_MIN_MTU	68
+#define ROCKER_PORT_MAX_MTU	9000
+
+	if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
+		return -EINVAL;
+
+	if (running)
+		rocker_port_stop(dev);
+
+	netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
+	dev->mtu = new_mtu;
+
+	err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
+	if (err)
+		return err;
+
+	if (running)
+		err = rocker_port_open(dev);
+
+	return err;
+}
+
 static int rocker_port_get_phys_port_name(struct net_device *dev,
 					  char *buf, size_t len)
 {
@@ -4167,11 +4248,33 @@
 	return err ? -EOPNOTSUPP : 0;
 }
 
+static int rocker_port_change_proto_down(struct net_device *dev,
+					 bool proto_down)
+{
+	struct rocker_port *rocker_port = netdev_priv(dev);
+
+	if (rocker_port->dev->flags & IFF_UP)
+		rocker_port_set_enable(rocker_port, !proto_down);
+	rocker_port->dev->proto_down = proto_down;
+	return 0;
+}
+
+static void rocker_port_neigh_destroy(struct neighbour *n)
+{
+	struct rocker_port *rocker_port = netdev_priv(n->dev);
+	int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
+	__be32 ip_addr = *(__be32 *)n->primary_key;
+
+	rocker_port_ipv4_neigh(rocker_port, SWITCHDEV_TRANS_NONE,
+			       flags, ip_addr, n->ha);
+}
+
 static const struct net_device_ops rocker_port_netdev_ops = {
 	.ndo_open			= rocker_port_open,
 	.ndo_stop			= rocker_port_stop,
 	.ndo_start_xmit			= rocker_port_xmit,
 	.ndo_set_mac_address		= rocker_port_set_mac_address,
+	.ndo_change_mtu			= rocker_port_change_mtu,
 	.ndo_bridge_getlink		= switchdev_port_bridge_getlink,
 	.ndo_bridge_setlink		= switchdev_port_bridge_setlink,
 	.ndo_bridge_dellink		= switchdev_port_bridge_dellink,
@@ -4179,6 +4282,8 @@
 	.ndo_fdb_del			= switchdev_port_fdb_del,
 	.ndo_fdb_dump			= switchdev_port_fdb_dump,
 	.ndo_get_phys_port_name		= rocker_port_get_phys_port_name,
+	.ndo_change_proto_down		= rocker_port_change_proto_down,
+	.ndo_neigh_destroy		= rocker_port_neigh_destroy,
 };
 
 /********************
@@ -4445,6 +4550,7 @@
 		if (found->key.pport != rocker_port->pport)
 			continue;
 		fdb->addr = found->key.addr;
+		fdb->ndm_state = NUD_REACHABLE;
 		fdb->vid = rocker_port_vlan_to_vid(rocker_port,
 						   found->key.vlan_id);
 		err = obj->cb(rocker_port->dev, obj);
@@ -4726,6 +4832,7 @@
 	const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
 	struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
 	size_t rx_len;
+	u16 rx_flags = 0;
 
 	if (!skb)
 		return -ENOENT;
@@ -4733,6 +4840,8 @@
 	rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
 	if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
 		return -EINVAL;
+	if (attrs[ROCKER_TLV_RX_FLAGS])
+		rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
 
 	rocker_dma_rx_ring_skb_unmap(rocker, attrs);
 
@@ -4740,6 +4849,9 @@
 	skb_put(skb, rx_len);
 	skb->protocol = eth_type_trans(skb, rocker_port->dev);
 
+	if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
+		skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
+
 	rocker_port->dev->stats.rx_packets++;
 	rocker_port->dev->stats.rx_bytes += skb->len;
 
@@ -4869,7 +4981,7 @@
 		       NAPI_POLL_WEIGHT);
 	rocker_carrier_init(rocker_port);
 
-	dev->features |= NETIF_F_NETNS_LOCAL;
+	dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
 
 	err = register_netdev(dev);
 	if (err) {
@@ -4878,11 +4990,13 @@
 	}
 	rocker->ports[port_number] = rocker_port;
 
+	switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
+
 	rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
 
 	err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
 	if (err) {
-		dev_err(&pdev->dev, "install ig port table failed\n");
+		netdev_err(rocker_port->dev, "install ig port table failed\n");
 		goto err_port_ig_tbl;
 	}
 
@@ -4902,6 +5016,7 @@
 	rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
 			   ROCKER_OP_FLAG_REMOVE);
 err_port_ig_tbl:
+	rocker->ports[port_number] = NULL;
 	unregister_netdev(dev);
 err_register_netdev:
 	free_netdev(dev);
@@ -5074,7 +5189,8 @@
 		goto err_probe_ports;
 	}
 
-	dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
+	dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
+		 (int)sizeof(rocker->hw.id), &rocker->hw.id);
 
 	return 0;
 
@@ -5157,6 +5273,7 @@
 		rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
 
 	rocker_port->bridge_dev = bridge;
+	switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
 
 	return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
 				    untagged_vid, 0);
@@ -5177,6 +5294,8 @@
 		rocker_port_internal_vlan_id_get(rocker_port,
 						 rocker_port->dev->ifindex);
 
+	switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
+				    false);
 	rocker_port->bridge_dev = NULL;
 
 	err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
@@ -5191,46 +5310,77 @@
 	return err;
 }
 
-static int rocker_port_master_changed(struct net_device *dev)
+
+static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
+				   struct net_device *master)
 {
-	struct rocker_port *rocker_port = netdev_priv(dev);
-	struct net_device *master = netdev_master_upper_dev_get(dev);
+	int err;
+
+	rocker_port->bridge_dev = master;
+
+	err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+	if (err)
+		return err;
+	err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+
+	return err;
+}
+
+static int rocker_port_master_linked(struct rocker_port *rocker_port,
+				     struct net_device *master)
+{
 	int err = 0;
 
-	/* There are currently three cases handled here:
-	 * 1. Joining a bridge
-	 * 2. Leaving a previously joined bridge
-	 * 3. Other, e.g. being added to or removed from a bond or openvswitch,
-	 *    in which case nothing is done
-	 */
-	if (master && master->rtnl_link_ops &&
-	    !strcmp(master->rtnl_link_ops->kind, "bridge"))
+	if (netif_is_bridge_master(master))
 		err = rocker_port_bridge_join(rocker_port, master);
-	else if (rocker_port_is_bridged(rocker_port))
-		err = rocker_port_bridge_leave(rocker_port);
+	else if (netif_is_ovs_master(master))
+		err = rocker_port_ovs_changed(rocker_port, master);
+	return err;
+}
 
+static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
+{
+	int err = 0;
+
+	if (rocker_port_is_bridged(rocker_port))
+		err = rocker_port_bridge_leave(rocker_port);
+	else if (rocker_port_is_ovsed(rocker_port))
+		err = rocker_port_ovs_changed(rocker_port, NULL);
 	return err;
 }
 
 static int rocker_netdevice_event(struct notifier_block *unused,
 				  unsigned long event, void *ptr)
 {
-	struct net_device *dev;
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct netdev_notifier_changeupper_info *info;
+	struct rocker_port *rocker_port;
 	int err;
 
+	if (!rocker_port_dev_check(dev))
+		return NOTIFY_DONE;
+
 	switch (event) {
 	case NETDEV_CHANGEUPPER:
-		dev = netdev_notifier_info_to_dev(ptr);
-		if (!rocker_port_dev_check(dev))
-			return NOTIFY_DONE;
-		err = rocker_port_master_changed(dev);
-		if (err)
-			netdev_warn(dev,
-				    "failed to reflect master change (err %d)\n",
-				    err);
+		info = ptr;
+		if (!info->master)
+			goto out;
+		rocker_port = netdev_priv(dev);
+		if (info->linking) {
+			err = rocker_port_master_linked(rocker_port,
+							info->upper_dev);
+			if (err)
+				netdev_warn(dev, "failed to reflect master linked (err %d)\n",
+					    err);
+		} else {
+			err = rocker_port_master_unlinked(rocker_port);
+			if (err)
+				netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
+					    err);
+		}
 		break;
 	}
-
+out:
 	return NOTIFY_DONE;
 }
 
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index c61fbf9..12490b2 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -159,6 +159,7 @@
 	ROCKER_TLV_CMD_PORT_SETTINGS_MODE,		/* u8 */
 	ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,		/* u8 */
 	ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME,		/* binary */
+	ROCKER_TLV_CMD_PORT_SETTINGS_MTU,		/* u16 */
 
 	__ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
 	ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
@@ -245,6 +246,7 @@
 #define ROCKER_RX_FLAGS_TCP			BIT(5)
 #define ROCKER_RX_FLAGS_UDP			BIT(6)
 #define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD	BIT(7)
+#define ROCKER_RX_FLAGS_FWD_OFFLOAD		BIT(8)
 
 enum {
 	ROCKER_TLV_TX_UNSPEC,
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 605cc89..ff649eb 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -49,6 +49,12 @@
  */
 #define HUNT_FILTER_TBL_ROWS 8192
 
+#define EFX_EF10_FILTER_ID_INVALID 0xffff
+struct efx_ef10_dev_addr {
+	u8 addr[ETH_ALEN];
+	u16 id;
+};
+
 struct efx_ef10_filter_table {
 /* The RX match field masks supported by this fw & hw, in order of priority */
 	enum efx_filter_match_flags rx_match_flags[
@@ -69,13 +75,14 @@
 /* Shadow of net_device address lists, guarded by mac_lock */
 #define EFX_EF10_FILTER_DEV_UC_MAX	32
 #define EFX_EF10_FILTER_DEV_MC_MAX	256
-	struct {
-		u8 addr[ETH_ALEN];
-		u16 id;
-	} dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
-	  dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
-	int dev_uc_count;		/* negative for PROMISC */
-	int dev_mc_count;		/* negative for PROMISC/ALLMULTI */
+	struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
+	struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+	int dev_uc_count;
+	int dev_mc_count;
+/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */
+	u16 ucdef_id;
+	u16 bcast_id;
+	u16 mcdef_id;
 };
 
 /* An arbitrary search limit for the software hash table */
@@ -288,11 +295,11 @@
 	/* We can have one VI for each 8K region.  However, until we
 	 * use TX option descriptors we need two TX queues per channel.
 	 */
-	efx->max_channels =
-		min_t(unsigned int,
-		      EFX_MAX_CHANNELS,
-		      efx_ef10_mem_map_size(efx) /
-		      (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
+	efx->max_channels = min_t(unsigned int,
+				  EFX_MAX_CHANNELS,
+				  efx_ef10_mem_map_size(efx) /
+				  (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
+	efx->max_tx_channels = efx->max_channels;
 	if (WARN_ON(efx->max_channels == 0))
 		return -EIO;
 
@@ -387,7 +394,7 @@
 	 * First try to enable it, then if we get EPERM, just
 	 * ask if it's already enabled
 	 */
-	rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
+	rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL);
 	if (rc == 0) {
 		nic_data->workaround_35388 = true;
 	} else if (rc == -EPERM) {
@@ -817,11 +824,13 @@
 {
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
 	unsigned int uc_mem_map_size, wc_mem_map_size;
-	unsigned int min_vis, pio_write_vi_base, max_vis;
+	unsigned int min_vis = max(EFX_TXQ_TYPES,
+				   efx_separate_tx_channels ? 2 : 1);
+	unsigned int channel_vis, pio_write_vi_base, max_vis;
 	void __iomem *membase;
 	int rc;
 
-	min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+	channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
 
 #ifdef EFX_USE_PIO
 	/* Try to allocate PIO buffers if wanted and if the full
@@ -855,11 +864,11 @@
 	 * page size is >4K).  So we may allocate some extra VIs just
 	 * for writing PIO buffers through.
 	 *
-	 * The UC mapping contains (min_vis - 1) complete VIs and the
+	 * The UC mapping contains (channel_vis - 1) complete VIs and the
 	 * first half of the next VI.  Then the WC mapping begins with
 	 * the second half of this last VI.
 	 */
-	uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
+	uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
 				     ER_DZ_TX_PIOBUF);
 	if (nic_data->n_piobufs) {
 		/* pio_write_vi_base rounds down to give the number of complete
@@ -874,7 +883,7 @@
 	} else {
 		pio_write_vi_base = 0;
 		wc_mem_map_size = 0;
-		max_vis = min_vis;
+		max_vis = channel_vis;
 	}
 
 	/* In case the last attached driver failed to free VIs, do it now */
@@ -886,6 +895,23 @@
 	if (rc != 0)
 		return rc;
 
+	if (nic_data->n_allocated_vis < channel_vis) {
+		netif_info(efx, drv, efx->net_dev,
+			   "Could not allocate enough VIs to satisfy RSS"
+			   " requirements. Performance may not be optimal.\n");
+		/* We didn't get the VIs to populate our channels.
+		 * We could keep what we got but then we'd have more
+		 * interrupts than we need.
+		 * Instead calculate new max_channels and restart
+		 */
+		efx->max_channels = nic_data->n_allocated_vis;
+		efx->max_tx_channels =
+			nic_data->n_allocated_vis / EFX_TXQ_TYPES;
+
+		efx_ef10_free_vis(efx);
+		return -EAGAIN;
+	}
+
 	/* If we didn't get enough VIs to map all the PIO buffers, free the
 	 * PIO buffers
 	 */
@@ -984,12 +1010,24 @@
 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
 {
 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+#ifdef CONFIG_SFC_SRIOV
+	unsigned int i;
+#endif
 
 	/* All our allocations have been reset */
 	nic_data->must_realloc_vis = true;
 	nic_data->must_restore_filters = true;
 	nic_data->must_restore_piobufs = true;
 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+	/* Driver-created vswitches and vports must be re-created */
+	nic_data->must_probe_vswitching = true;
+	nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+#ifdef CONFIG_SFC_SRIOV
+	if (nic_data->vf)
+		for (i = 0; i < efx->vf_count; i++)
+			nic_data->vf[i].vport_id = 0;
+#endif
 }
 
 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
@@ -1034,6 +1072,12 @@
 {
 	int rc = efx_mcdi_reset(efx, reset_type);
 
+	/* Unprivileged functions return -EPERM, but need to return success
+	 * here so that the datapath is brought back up.
+	 */
+	if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
+		rc = 0;
+
 	/* If it was a port reset, trigger reallocation of MC resources.
 	 * Note that on an MC reset nothing needs to be done now because we'll
 	 * detect the MC reset later and handle it then.
@@ -1282,7 +1326,12 @@
 		}
 	}
 
-	if (core_stats) {
+	if (!core_stats)
+		return stats_count;
+
+	if (nic_data->datapath_caps &
+			1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
+		/* Use vadaptor stats. */
 		core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
 					 stats[EF10_STAT_rx_multicast] +
 					 stats[EF10_STAT_rx_broadcast];
@@ -1302,6 +1351,26 @@
 		core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
 		core_stats->rx_errors = core_stats->rx_crc_errors;
 		core_stats->tx_errors = stats[EF10_STAT_tx_bad];
+	} else {
+		/* Use port stats. */
+		core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
+		core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
+		core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
+		core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
+		core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
+					 stats[GENERIC_STAT_rx_nodesc_trunc] +
+					 stats[GENERIC_STAT_rx_noskb_drops];
+		core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
+		core_stats->rx_length_errors =
+				stats[EF10_STAT_port_rx_gtjumbo] +
+				stats[EF10_STAT_port_rx_length_error];
+		core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
+		core_stats->rx_frame_errors =
+				stats[EF10_STAT_port_rx_align_error];
+		core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
+		core_stats->rx_errors = (core_stats->rx_length_errors +
+					 core_stats->rx_crc_errors +
+					 core_stats->rx_frame_errors);
 	}
 
 	return stats_count;
@@ -1558,10 +1627,6 @@
 	/* All our allocations have been reset */
 	efx_ef10_reset_mc_allocations(efx);
 
-	/* Driver-created vswitches and vports must be re-created */
-	nic_data->must_probe_vswitching = true;
-	nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
-
 	/* The datapath firmware might have been changed */
 	nic_data->must_check_datapath_caps = true;
 
@@ -2197,6 +2262,29 @@
 				    GFP_KERNEL);
 }
 
+static void efx_ef10_ev_fini(struct efx_channel *channel)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+	MCDI_DECLARE_BUF_ERR(outbuf);
+	struct efx_nic *efx = channel->efx;
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+
+	if (rc && rc != -EALREADY)
+		goto fail;
+
+	return;
+
+fail:
+	efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+			       outbuf, outlen, rc);
+}
+
 static int efx_ef10_ev_init(struct efx_channel *channel)
 {
 	MCDI_DECLARE_BUF(inbuf,
@@ -2208,6 +2296,7 @@
 	struct efx_ef10_nic_data *nic_data;
 	bool supports_rx_merge;
 	size_t inlen, outlen;
+	unsigned int enabled, implemented;
 	dma_addr_t dma_addr;
 	int rc;
 	int i;
@@ -2248,30 +2337,52 @@
 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
 			  outbuf, sizeof(outbuf), &outlen);
 	/* IRQ return is ignored */
-	return rc;
-}
+	if (channel->channel || rc)
+		return rc;
 
-static void efx_ef10_ev_fini(struct efx_channel *channel)
-{
-	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
-	MCDI_DECLARE_BUF_ERR(outbuf);
-	struct efx_nic *efx = channel->efx;
-	size_t outlen;
-	int rc;
-
-	MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
-
-	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
-			  outbuf, sizeof(outbuf), &outlen);
-
-	if (rc && rc != -EALREADY)
+	/* Successfully created event queue on channel 0 */
+	rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
+	if (rc == -ENOSYS) {
+		/* GET_WORKAROUNDS was implemented before the bug26807
+		 * workaround, thus the latter must be unavailable in this fw
+		 */
+		nic_data->workaround_26807 = false;
+		rc = 0;
+	} else if (rc) {
 		goto fail;
+	} else {
+		nic_data->workaround_26807 =
+			!!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
 
-	return;
+		if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
+		    !nic_data->workaround_26807) {
+			unsigned int flags;
+
+			rc = efx_mcdi_set_workaround(efx,
+						     MC_CMD_WORKAROUND_BUG26807,
+						     true, &flags);
+
+			if (!rc) {
+				if (flags &
+				    1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
+					netif_info(efx, drv, efx->net_dev,
+						   "other functions on NIC have been reset\n");
+					/* MC's boot count has incremented */
+					++nic_data->warm_boot_count;
+				}
+				nic_data->workaround_26807 = true;
+			} else if (rc == -EPERM) {
+				rc = 0;
+			}
+		}
+	}
+
+	if (!rc)
+		return 0;
 
 fail:
-	efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
-			       outbuf, outlen, rc);
+	efx_ef10_ev_fini(channel);
+	return rc;
 }
 
 static void efx_ef10_ev_remove(struct efx_channel *channel)
@@ -3225,6 +3336,19 @@
 					       filter_id, false);
 }
 
+static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
+{
+	return filter_id % HUNT_FILTER_TBL_ROWS;
+}
+
+static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
+					 enum efx_filter_priority priority,
+					 u32 filter_id)
+{
+	return efx_ef10_filter_remove_internal(efx, 1U << priority,
+					       filter_id, true);
+}
+
 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
 				    enum efx_filter_priority priority,
 				    u32 filter_id, struct efx_filter_spec *spec)
@@ -3598,6 +3722,10 @@
 		goto fail;
 	}
 
+	table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+	table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+	table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+
 	efx->filter_state = table;
 	init_waitqueue_head(&table->waitq);
 	return 0;
@@ -3700,145 +3828,233 @@
 	kfree(table);
 }
 
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+#define EFX_EF10_FILTER_DO_MARK_OLD(id) \
+		if (id != EFX_EF10_FILTER_ID_INVALID) { \
+			filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
+			WARN_ON(!table->entry[filter_idx].spec); \
+			table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \
+		}
+static void efx_ef10_filter_mark_old(struct efx_nic *efx)
 {
 	struct efx_ef10_filter_table *table = efx->filter_state;
-	struct net_device *net_dev = efx->net_dev;
-	struct efx_filter_spec spec;
-	bool remove_failed = false;
-	struct netdev_hw_addr *uc;
-	struct netdev_hw_addr *mc;
-	unsigned int filter_idx;
-	int i, n, rc;
-
-	if (!efx_dev_registered(efx))
-		return;
+	unsigned int filter_idx, i;
 
 	if (!table)
 		return;
 
 	/* Mark old filters that may need to be removed */
 	spin_lock_bh(&efx->filter_lock);
-	n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
-	for (i = 0; i < n; i++) {
-		filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
-		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
-	}
-	n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
-	for (i = 0; i < n; i++) {
-		filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
-		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
-	}
+	for (i = 0; i < table->dev_uc_count; i++)
+		EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id);
+	for (i = 0; i < table->dev_mc_count; i++)
+		EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id);
+	EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id);
+	EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id);
+	EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id);
 	spin_unlock_bh(&efx->filter_lock);
+}
+#undef EFX_EF10_FILTER_DO_MARK_OLD
 
-	/* Copy/convert the address lists; add the primary station
-	 * address and broadcast address
-	 */
-	netif_addr_lock_bh(net_dev);
-	if (net_dev->flags & IFF_PROMISC ||
-	    netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
-		table->dev_uc_count = -1;
-	} else {
-		table->dev_uc_count = 1 + netdev_uc_count(net_dev);
-		ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
-		i = 1;
-		netdev_for_each_uc_addr(uc, net_dev) {
-			ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
-			i++;
-		}
-	}
-	if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
-	    netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
-		table->dev_mc_count = -1;
-	} else {
-		table->dev_mc_count = 1 + netdev_mc_count(net_dev);
-		eth_broadcast_addr(table->dev_mc_list[0].addr);
-		i = 1;
-		netdev_for_each_mc_addr(mc, net_dev) {
-			ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
-			i++;
-		}
-	}
-	netif_addr_unlock_bh(net_dev);
+static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+	struct efx_ef10_filter_table *table = efx->filter_state;
+	struct net_device *net_dev = efx->net_dev;
+	struct netdev_hw_addr *uc;
+	int addr_count;
+	unsigned int i;
 
-	/* Insert/renew unicast filters */
-	if (table->dev_uc_count >= 0) {
-		for (i = 0; i < table->dev_uc_count; i++) {
-			efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
-					   EFX_FILTER_FLAG_RX_RSS,
-					   0);
-			efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
-						 table->dev_uc_list[i].addr);
-			rc = efx_ef10_filter_insert(efx, &spec, true);
-			if (rc < 0) {
-				/* Fall back to unicast-promisc */
-				while (i--)
-					efx_ef10_filter_remove_safe(
-						efx, EFX_FILTER_PRI_AUTO,
-						table->dev_uc_list[i].id);
-				table->dev_uc_count = -1;
-				break;
-			}
-			table->dev_uc_list[i].id = rc;
+	table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+	addr_count = netdev_uc_count(net_dev);
+	if (net_dev->flags & IFF_PROMISC)
+		*promisc = true;
+	table->dev_uc_count = 1 + addr_count;
+	ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
+	i = 1;
+	netdev_for_each_uc_addr(uc, net_dev) {
+		if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
+			*promisc = true;
+			break;
 		}
+		ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
+		table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+		i++;
 	}
-	if (table->dev_uc_count < 0) {
+}
+
+static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+	struct efx_ef10_filter_table *table = efx->filter_state;
+	struct net_device *net_dev = efx->net_dev;
+	struct netdev_hw_addr *mc;
+	unsigned int i, addr_count;
+
+	table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+	table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+	if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
+		*promisc = true;
+
+	addr_count = netdev_mc_count(net_dev);
+	i = 0;
+	netdev_for_each_mc_addr(mc, net_dev) {
+		if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
+			*promisc = true;
+			break;
+		}
+		ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
+		table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+		i++;
+	}
+
+	table->dev_mc_count = i;
+}
+
+static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
+					     bool multicast, bool rollback)
+{
+	struct efx_ef10_filter_table *table = efx->filter_state;
+	struct efx_ef10_dev_addr *addr_list;
+	struct efx_filter_spec spec;
+	u8 baddr[ETH_ALEN];
+	unsigned int i, j;
+	int addr_count;
+	int rc;
+
+	if (multicast) {
+		addr_list = table->dev_mc_list;
+		addr_count = table->dev_mc_count;
+	} else {
+		addr_list = table->dev_uc_list;
+		addr_count = table->dev_uc_count;
+	}
+
+	/* Insert/renew filters */
+	for (i = 0; i < addr_count; i++) {
 		efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
 				   EFX_FILTER_FLAG_RX_RSS,
 				   0);
-		efx_filter_set_uc_def(&spec);
+		efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+					 addr_list[i].addr);
 		rc = efx_ef10_filter_insert(efx, &spec, true);
 		if (rc < 0) {
-			WARN_ON(1);
-			table->dev_uc_count = 0;
-		} else {
-			table->dev_uc_list[0].id = rc;
+			if (rollback) {
+				netif_info(efx, drv, efx->net_dev,
+					   "efx_ef10_filter_insert failed rc=%d\n",
+					   rc);
+				/* Fall back to promiscuous */
+				for (j = 0; j < i; j++) {
+					if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+						continue;
+					efx_ef10_filter_remove_unsafe(
+						efx, EFX_FILTER_PRI_AUTO,
+						addr_list[j].id);
+					addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+				}
+				return rc;
+			} else {
+				/* mark as not inserted, and carry on */
+				rc = EFX_EF10_FILTER_ID_INVALID;
+			}
 		}
+		addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc);
 	}
 
-	/* Insert/renew multicast filters */
-	if (table->dev_mc_count >= 0) {
-		for (i = 0; i < table->dev_mc_count; i++) {
-			efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
-					   EFX_FILTER_FLAG_RX_RSS,
-					   0);
-			efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
-						 table->dev_mc_list[i].addr);
-			rc = efx_ef10_filter_insert(efx, &spec, true);
-			if (rc < 0) {
-				/* Fall back to multicast-promisc */
-				while (i--)
-					efx_ef10_filter_remove_safe(
-						efx, EFX_FILTER_PRI_AUTO,
-						table->dev_mc_list[i].id);
-				table->dev_mc_count = -1;
-				break;
-			}
-			table->dev_mc_list[i].id = rc;
-		}
-	}
-	if (table->dev_mc_count < 0) {
+	if (multicast && rollback) {
+		/* Also need an Ethernet broadcast filter */
 		efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
 				   EFX_FILTER_FLAG_RX_RSS,
 				   0);
+		eth_broadcast_addr(baddr);
+		efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
+		rc = efx_ef10_filter_insert(efx, &spec, true);
+		if (rc < 0) {
+			netif_warn(efx, drv, efx->net_dev,
+				   "Broadcast filter insert failed rc=%d\n", rc);
+			/* Fall back to promiscuous */
+			for (j = 0; j < i; j++) {
+				if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+					continue;
+				efx_ef10_filter_remove_unsafe(
+					efx, EFX_FILTER_PRI_AUTO,
+					addr_list[j].id);
+				addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+			}
+			return rc;
+		} else {
+			table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+		}
+	}
+
+	return 0;
+}
+
+static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
+				      bool rollback)
+{
+	struct efx_ef10_filter_table *table = efx->filter_state;
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	struct efx_filter_spec spec;
+	u8 baddr[ETH_ALEN];
+	int rc;
+
+	efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+			   EFX_FILTER_FLAG_RX_RSS,
+			   0);
+
+	if (multicast)
 		efx_filter_set_mc_def(&spec);
-		rc = efx_ef10_filter_insert(efx, &spec, true);
-		if (rc < 0) {
-			WARN_ON(1);
-			table->dev_mc_count = 0;
-		} else {
-			table->dev_mc_list[0].id = rc;
-		}
-	}
+	else
+		efx_filter_set_uc_def(&spec);
 
-	/* Remove filters that weren't renewed.  Since nothing else
-	 * changes the AUTO_OLD flag or removes these filters, we
-	 * don't need to hold the filter_lock while scanning for
-	 * these filters.
-	 */
+	rc = efx_ef10_filter_insert(efx, &spec, true);
+	if (rc < 0) {
+		netif_warn(efx, drv, efx->net_dev,
+			   "%scast mismatch filter insert failed rc=%d\n",
+			   multicast ? "Multi" : "Uni", rc);
+	} else if (multicast) {
+		table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+		if (!nic_data->workaround_26807) {
+			/* Also need an Ethernet broadcast filter */
+			efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+					   EFX_FILTER_FLAG_RX_RSS,
+					   0);
+			eth_broadcast_addr(baddr);
+			efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+						 baddr);
+			rc = efx_ef10_filter_insert(efx, &spec, true);
+			if (rc < 0) {
+				netif_warn(efx, drv, efx->net_dev,
+					   "Broadcast filter insert failed rc=%d\n",
+					   rc);
+				if (rollback) {
+					/* Roll back the mc_def filter */
+					efx_ef10_filter_remove_unsafe(
+							efx, EFX_FILTER_PRI_AUTO,
+							table->mcdef_id);
+					table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+					return rc;
+				}
+			} else {
+				table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+			}
+		}
+		rc = 0;
+	} else {
+		table->ucdef_id = rc;
+		rc = 0;
+	}
+	return rc;
+}
+
+/* Remove filters that weren't renewed.  Since nothing else changes the AUTO_OLD
+ * flag or removes these filters, we don't need to hold the filter_lock while
+ * scanning for these filters.
+ */
+static void efx_ef10_filter_remove_old(struct efx_nic *efx)
+{
+	struct efx_ef10_filter_table *table = efx->filter_state;
+	bool remove_failed = false;
+	int i;
+
 	for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
 		if (ACCESS_ONCE(table->entry[i].spec) &
 		    EFX_EF10_FILTER_FLAG_AUTO_OLD) {
@@ -3917,6 +4133,87 @@
 	return rc ? rc : rc2;
 }
 
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+	struct efx_ef10_filter_table *table = efx->filter_state;
+	struct efx_ef10_nic_data *nic_data = efx->nic_data;
+	struct net_device *net_dev = efx->net_dev;
+	bool uc_promisc = false, mc_promisc = false;
+
+	if (!efx_dev_registered(efx))
+		return;
+
+	if (!table)
+		return;
+
+	efx_ef10_filter_mark_old(efx);
+
+	/* Copy/convert the address lists; add the primary station
+	 * address and broadcast address
+	 */
+	netif_addr_lock_bh(net_dev);
+	efx_ef10_filter_uc_addr_list(efx, &uc_promisc);
+	efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
+	netif_addr_unlock_bh(net_dev);
+
+	/* Insert/renew unicast filters */
+	if (uc_promisc) {
+		efx_ef10_filter_insert_def(efx, false, false);
+		efx_ef10_filter_insert_addr_list(efx, false, false);
+	} else {
+		/* If any of the filters failed to insert, fall back to
+		 * promiscuous mode - add in the uc_def filter.  But keep
+		 * our individual unicast filters.
+		 */
+		if (efx_ef10_filter_insert_addr_list(efx, false, false))
+			efx_ef10_filter_insert_def(efx, false, false);
+	}
+
+	/* Insert/renew multicast filters */
+	/* If changing promiscuous state with cascaded multicast filters, remove
+	 * old filters first, so that packets are dropped rather than duplicated
+	 */
+	if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc)
+		efx_ef10_filter_remove_old(efx);
+	if (mc_promisc) {
+		if (nic_data->workaround_26807) {
+			/* If we failed to insert promiscuous filters, rollback
+			 * and fall back to individual multicast filters
+			 */
+			if (efx_ef10_filter_insert_def(efx, true, true)) {
+				/* Changing promisc state, so remove old filters */
+				efx_ef10_filter_remove_old(efx);
+				efx_ef10_filter_insert_addr_list(efx, true, false);
+			}
+		} else {
+			/* If we failed to insert promiscuous filters, don't
+			 * rollback.  Regardless, also insert the mc_list
+			 */
+			efx_ef10_filter_insert_def(efx, true, false);
+			efx_ef10_filter_insert_addr_list(efx, true, false);
+		}
+	} else {
+		/* If any filters failed to insert, rollback and fall back to
+		 * promiscuous mode - mc_def filter and maybe broadcast.  If
+		 * that fails, roll back again and insert as many of our
+		 * individual multicast filters as we can.
+		 */
+		if (efx_ef10_filter_insert_addr_list(efx, true, true)) {
+			/* Changing promisc state, so remove old filters */
+			if (nic_data->workaround_26807)
+				efx_ef10_filter_remove_old(efx);
+			if (efx_ef10_filter_insert_def(efx, true, true))
+				efx_ef10_filter_insert_addr_list(efx, true, false);
+		}
+	}
+
+	efx_ef10_filter_remove_old(efx);
+	efx->mc_promisc = mc_promisc;
+}
+
 static int efx_ef10_set_mac_address(struct efx_nic *efx)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -4085,6 +4382,8 @@
 	rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
 
 out:
+	if (rc == -EPERM)
+		rc = 0;
 	rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
 	return rc ? rc : rc2;
 }
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 03bc03b..974637d 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -115,9 +115,9 @@
  *
  * This is only used in MSI-X interrupt mode
  */
-static bool separate_tx_channels;
-module_param(separate_tx_channels, bool, 0444);
-MODULE_PARM_DESC(separate_tx_channels,
+bool efx_separate_tx_channels;
+module_param(efx_separate_tx_channels, bool, 0444);
+MODULE_PARM_DESC(efx_separate_tx_channels,
 		 "Use separate channels for TX and RX");
 
 /* This is the weight assigned to each of the (per-channel) virtual
@@ -1391,7 +1391,7 @@
 		unsigned int n_channels;
 
 		n_channels = efx_wanted_parallelism(efx);
-		if (separate_tx_channels)
+		if (efx_separate_tx_channels)
 			n_channels *= 2;
 		n_channels += extra_channels;
 		n_channels = min(n_channels, efx->max_channels);
@@ -1418,13 +1418,16 @@
 			efx->n_channels = n_channels;
 			if (n_channels > extra_channels)
 				n_channels -= extra_channels;
-			if (separate_tx_channels) {
-				efx->n_tx_channels = max(n_channels / 2, 1U);
+			if (efx_separate_tx_channels) {
+				efx->n_tx_channels = min(max(n_channels / 2,
+							     1U),
+							 efx->max_tx_channels);
 				efx->n_rx_channels = max(n_channels -
 							 efx->n_tx_channels,
 							 1U);
 			} else {
-				efx->n_tx_channels = n_channels;
+				efx->n_tx_channels = min(n_channels,
+							 efx->max_tx_channels);
 				efx->n_rx_channels = n_channels;
 			}
 			for (i = 0; i < efx->n_channels; i++)
@@ -1450,7 +1453,7 @@
 
 	/* Assume legacy interrupts */
 	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
-		efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
+		efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
 		efx->n_rx_channels = 1;
 		efx->n_tx_channels = 1;
 		efx->legacy_irq = efx->pci_dev->irq;
@@ -1624,7 +1627,8 @@
 	struct efx_tx_queue *tx_queue;
 
 	efx->tx_channel_offset =
-		separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
+		efx_separate_tx_channels ?
+		efx->n_channels - efx->n_tx_channels : 0;
 
 	/* We need to mark which channels really have RX and TX
 	 * queues, and adjust the TX queue numbers if we have separate
@@ -1653,17 +1657,34 @@
 	if (rc)
 		return rc;
 
-	/* Determine the number of channels and queues by trying to hook
-	 * in MSI-X interrupts. */
-	rc = efx_probe_interrupts(efx);
-	if (rc)
-		goto fail1;
+	do {
+		if (!efx->max_channels || !efx->max_tx_channels) {
+			netif_err(efx, drv, efx->net_dev,
+				  "Insufficient resources to allocate"
+				  " any channels\n");
+			rc = -ENOSPC;
+			goto fail1;
+		}
 
-	efx_set_channels(efx);
+		/* Determine the number of channels and queues by trying
+		 * to hook in MSI-X interrupts.
+		 */
+		rc = efx_probe_interrupts(efx);
+		if (rc)
+			goto fail1;
 
-	rc = efx->type->dimension_resources(efx);
-	if (rc)
-		goto fail2;
+		efx_set_channels(efx);
+
+		/* dimension_resources can fail with EAGAIN */
+		rc = efx->type->dimension_resources(efx);
+		if (rc != 0 && rc != -EAGAIN)
+			goto fail2;
+
+		if (rc == -EAGAIN)
+			/* try again with new max_channels */
+			efx_remove_interrupts(efx);
+
+	} while (rc == -EAGAIN);
 
 	if (efx->n_channels > 1)
 		netdev_rss_key_fill(&efx->rx_hash_key,
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index acb1e07..1aaf76c 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -35,6 +35,7 @@
 int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
 extern unsigned int efx_piobuf_size;
+extern bool efx_separate_tx_channels;
 
 /* RX */
 void efx_set_default_rx_indir_table(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 80e69af..d790cb8 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2371,6 +2371,7 @@
 
 	efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
 			     EFX_MAX_CHANNELS);
+	efx->max_tx_channels = efx->max_channels;
 	efx->timer_quantum_ns = 4968; /* 621 cycles */
 
 	/* Initialise I2C adapter */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 81640f8..98d172b 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1779,15 +1779,31 @@
 	return rc;
 }
 
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+			    unsigned int *flags)
 {
 	MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN);
+	size_t outlen;
+	int rc;
 
 	BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
 	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
 	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
-	return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
-			    NULL, 0, NULL);
+	rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		return rc;
+
+	if (!flags)
+		return 0;
+
+	if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
+		*flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS);
+	else
+		*flags = 0;
+
+	return 0;
 }
 
 int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
@@ -1816,7 +1832,11 @@
 	return 0;
 
 fail:
-	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	/* Older firmware lacks GET_WORKAROUNDS and this isn't especially
+	 * terrifying.  The call site will have to deal with it though.
+	 */
+	netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR,
+		     efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 	return rc;
 }
 
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 1838afe..025d504 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -346,7 +346,8 @@
 bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
 enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
 int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+			    unsigned int *flags);
 int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
 			     unsigned int *enabled_out);
 
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 45fca9f..4cc7721 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -26,6 +26,10 @@
  * Unlike a warm boot, assume DMEM has been reloaded, so that
  * the MC persistent data must be reinitialised. */
 #define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode.  This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
 /* BIST state has been initialized */
 #define MC_FW_BIST_INIT_OK (128)
 
@@ -169,6 +173,8 @@
 #define MC_CMD_ERR_EINTR 4
 /* I/O failure */
 #define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
 /* Try again */
 #define MC_CMD_ERR_EAGAIN 11
 /* Out of memory */
@@ -181,6 +187,10 @@
 #define MC_CMD_ERR_ENODEV 19
 /* Invalid argument to target */
 #define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
 /* Out of range */
 #define MC_CMD_ERR_ERANGE 34
 /* Non-recursive resource is already acquired */
@@ -226,6 +236,43 @@
 #define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
 /* The datapath is disabled. */
 #define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN  0x100c
+/* The requested operation might require the
+   command to be passed between MCs, and the
+   transport doesn't support that.  Should
+   only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
 
 #define MC_CMD_ERR_CODE_OFST 0
 
@@ -275,6 +322,11 @@
 	 MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST +		\
 	 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
 
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n)  (((n) & 0xff) << 16)
+
 
 /* Version 2 adds an optional argument to error returns: the errno value
  * may be followed by the (0-based) number of the first argument that
@@ -394,6 +446,8 @@
 #define          MCDI_EVENT_AOE_BYTEBLASTER 0x9
 /* enum: DDR ECC status update */
 #define          MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define          MCDI_EVENT_AOE_PTP_STATUS 0xb
 #define        MCDI_EVENT_AOE_ERR_DATA_LBN 8
 #define        MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
 #define        MCDI_EVENT_RX_ERR_RXQ_LBN 0
@@ -408,6 +462,16 @@
 #define        MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
 #define        MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
 #define        MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define        MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define        MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define          MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define          MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define          MCDI_EVENT_MUM_WATCHDOG 0x3
+#define        MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define        MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
 #define       MCDI_EVENT_DATA_LBN 0
 #define       MCDI_EVENT_DATA_WIDTH 32
 #define       MCDI_EVENT_SRC_LBN 36
@@ -416,6 +480,8 @@
 #define       MCDI_EVENT_EV_CODE_WIDTH 4
 #define       MCDI_EVENT_CODE_LBN 44
 #define       MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define          MCDI_EVENT_SW_EVENT 0x0
 /* enum: Bad assert. */
 #define          MCDI_EVENT_CODE_BADSSERT 0x1
 /* enum: PM Notice. */
@@ -470,6 +536,14 @@
 #define          MCDI_EVENT_CODE_MC_BIST 0x19
 /* enum: PTP tick event providing current NIC time */
 #define          MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define          MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define          MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define          MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
 /* enum: Artificial event generated by host and posted via MC for test
  * purposes.
  */
@@ -537,6 +611,33 @@
 /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define       MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define       MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define       MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define       MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define       MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define       MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
 
 /* FCDI_EVENT structuredef */
 #define    FCDI_EVENT_LEN 8
@@ -581,6 +682,10 @@
 #define          FCDI_EVENT_CODE_PTP_TICK 0x7
 /* enum: ECC error counters */
 #define          FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define          FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define          FCDI_EVENT_CODE_PORT_CONFIG 0xa
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -594,11 +699,24 @@
 #define       FCDI_EVENT_LINK_STATE_DATA_OFST 0
 #define       FCDI_EVENT_LINK_STATE_DATA_LBN 0
 #define       FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define       FCDI_EVENT_PTP_STATE_OFST 0
+#define          FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define          FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define          FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define       FCDI_EVENT_PTP_STATE_LBN 0
+#define       FCDI_EVENT_PTP_STATE_WIDTH 32
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define       FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define       FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define       FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
 
 /* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
  * to the MC. Note that this structure | is overlayed over a normal FCDI event
@@ -631,6 +749,90 @@
 #define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
 #define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
 
+/* MUM_EVENT structuredef */
+#define    MUM_EVENT_LEN 8
+#define       MUM_EVENT_CONT_LBN 32
+#define       MUM_EVENT_CONT_WIDTH 1
+#define       MUM_EVENT_LEVEL_LBN 33
+#define       MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define          MUM_EVENT_LEVEL_INFO  0x0
+/* enum: Warning. */
+#define          MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define          MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define          MUM_EVENT_LEVEL_FATAL 0x3
+#define       MUM_EVENT_DATA_OFST 0
+#define        MUM_EVENT_SENSOR_ID_LBN 0
+#define        MUM_EVENT_SENSOR_ID_WIDTH 8
+/*             Enum values, see field(s): */
+/*                MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define        MUM_EVENT_SENSOR_STATE_LBN 8
+#define        MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define        MUM_EVENT_PORT_PHY_READY_LBN 0
+#define        MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define        MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define        MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define        MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define        MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define        MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define        MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define       MUM_EVENT_DATA_LBN 0
+#define       MUM_EVENT_DATA_WIDTH 32
+#define       MUM_EVENT_SRC_LBN 36
+#define       MUM_EVENT_SRC_WIDTH 8
+#define       MUM_EVENT_EV_CODE_LBN 60
+#define       MUM_EVENT_EV_CODE_WIDTH 4
+#define       MUM_EVENT_CODE_LBN 44
+#define       MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define          MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define          MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define          MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define          MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define       MUM_EVENT_SENSOR_DATA_OFST 0
+#define       MUM_EVENT_SENSOR_DATA_LBN 0
+#define       MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define       MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define       MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define       MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define       MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define       MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define       MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define       MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
 
 /***********************************/
 /* MC_CMD_READ32
@@ -687,24 +889,34 @@
 
 /* MC_CMD_COPYCODE_IN msgrequest */
 #define    MC_CMD_COPYCODE_IN_LEN 16
-/* Source address */
-#define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-/* enum: The main image should be entered via a copy of a single word from and
- * to this address when none of the other magic behaviours are required.
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
  */
+#define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
 #define          MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to start the datapath CPUs.
- * This is useful for certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
  */
 #define          MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to parse any configuration from
- * flash. (In addition, the datapath CPUs will not be started, as for
- * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
- * certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
  */
 #define          MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
 /* Destination address */
 #define       MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
 #define       MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
@@ -795,6 +1007,10 @@
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define          MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
 /* Failing thread address */
 #define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
 #define       MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
@@ -802,7 +1018,8 @@
 
 /***********************************/
 /* MC_CMD_LOG_CTRL
- * Configure the output stream for various events and messages.
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
  */
 #define MC_CMD_LOG_CTRL 0x7
 
@@ -816,6 +1033,7 @@
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
 /* enum: Event queue. */
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
 #define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
 
 /* MC_CMD_LOG_CTRL_OUT msgresponse */
@@ -955,8 +1173,12 @@
  * input on the same NIC.
  */
 #define          MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define          MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
 /* enum: Above this for future use. */
-#define          MC_CMD_PTP_OP_MAX 0x1b
+#define          MC_CMD_PTP_OP_MAX 0x1c
 
 /* MC_CMD_PTP_IN_ENABLE msgrequest */
 #define    MC_CMD_PTP_IN_ENABLE_LEN 16
@@ -1191,8 +1413,12 @@
 #define    MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
-/* Event queue to send PTP time events to */
+/* Original field containing queue ID. Now extended to include flags. */
 #define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
 
 /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
 #define    MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
@@ -1214,6 +1440,23 @@
 /* 1 to enable PPS test mode, 0 to disable and return result. */
 #define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
 
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define    MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* NIC - Host System Clock Synchronization status */
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+/* enum: Host System clock and NIC clock are not in sync */
+#define          MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define          MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+
 /* MC_CMD_PTP_OUT msgresponse */
 #define    MC_CMD_PTP_OUT_LEN 0
 
@@ -1375,7 +1618,7 @@
 #define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
 
 /* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
-#define    MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+#define    MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
 /* Time format required/used by for this NIC. Applies to all PTP MCDI
  * operations that pass times between the host and firmware. If this operation
  * is not supported (older firmware) a format of seconds and nanoseconds should
@@ -1396,6 +1639,13 @@
  * end and start times minus the time that the MC waited for host end.
  */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+/* Various PTP capabilities */
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
 
 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
 #define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
@@ -1415,6 +1665,9 @@
 /*            Enum values, see field(s): */
 /*               MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
 
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define    MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
 
 /***********************************/
 /* MC_CMD_CSR_READ32
@@ -1915,6 +2168,14 @@
 #define          MC_CMD_FW_FULL_FEATURED 0x0
 /* enum: Prefer to use firmware with fewer features but lower latency */
 #define          MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define          MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define          MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define          MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
 /* enum: Only this option is allowed for non-admin functions */
 #define          MC_CMD_FW_DONT_CARE  0xffffffff
 
@@ -2481,6 +2742,12 @@
 #define          MC_CMD_LOOPBACK_SD_FES_WS  0x22
 /* enum: Near side of AOE Siena side port */
 #define          MC_CMD_LOOPBACK_AOE_INT_NEAR  0x23
+/* enum: Medford Wireside datapath loopback */
+#define          MC_CMD_LOOPBACK_DATA_WS  0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define          MC_CMD_LOOPBACK_FORCE_EXT_LINK  0x25
 /* Supported loopbacks. */
 #define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
 #define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
@@ -2552,12 +2819,8 @@
 #define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
 /* This returns the negotiated flow control value. */
 #define       MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
-/* enum: Flow control is off. */
-#define          MC_CMD_FCNTL_OFF 0x0
-/* enum: Respond to flow control. */
-#define          MC_CMD_FCNTL_RESPOND 0x1
-/* enum: Respond to and Issue flow control. */
-#define          MC_CMD_FCNTL_BIDIR 0x2
+/*            Enum values, see field(s): */
+/*               MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
 #define       MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
@@ -2632,7 +2895,7 @@
 #define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
 
 /* MC_CMD_SET_MAC_IN msgrequest */
-#define    MC_CMD_SET_MAC_IN_LEN 24
+#define    MC_CMD_SET_MAC_IN_LEN 28
 /* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
  * EtherII, VLAN, bug16011 padding).
  */
@@ -2649,13 +2912,20 @@
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_IN_FCNTL_OFST 20
 /* enum: Flow control is off. */
-/*               MC_CMD_FCNTL_OFF 0x0 */
+#define          MC_CMD_FCNTL_OFF 0x0
 /* enum: Respond to flow control. */
-/*               MC_CMD_FCNTL_RESPOND 0x1 */
+#define          MC_CMD_FCNTL_RESPOND 0x1
 /* enum: Respond to and Issue flow control. */
-/*               MC_CMD_FCNTL_BIDIR 0x2 */
+#define          MC_CMD_FCNTL_BIDIR 0x2
 /* enum: Auto neg flow control. */
 #define          MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define          MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define          MC_CMD_FCNTL_GENERATE 0x5
+#define       MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
 
 /* MC_CMD_SET_MAC_OUT msgresponse */
 #define    MC_CMD_SET_MAC_OUT_LEN 0
@@ -2748,7 +3018,8 @@
  * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
  * performed, and the statistics may be read from the message response. If
  * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
- * Locks required: None. Returns: 0, ETIME
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
  */
 #define MC_CMD_MAC_STATS 0x2e
 
@@ -2791,6 +3062,7 @@
 #define       MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
 #define       MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
 #define          MC_CMD_MAC_GENERATION_START  0x0 /* enum */
+#define          MC_CMD_MAC_DMABUF_START  0x1 /* enum */
 #define          MC_CMD_MAC_TX_PKTS  0x1 /* enum */
 #define          MC_CMD_MAC_TX_PAUSE_PKTS  0x2 /* enum */
 #define          MC_CMD_MAC_TX_CONTROL_PKTS  0x3 /* enum */
@@ -2890,8 +3162,8 @@
  * PM_AND_RXDP_COUNTERS capability only.
  */
 #define          MC_CMD_MAC_RXDP_STREAMING_PKTS  0x46
-/* enum: RXDP counter: Number of times an emergency descriptor fetch was
- * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
  */
 #define          MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS  0x47
 /* enum: RXDP counter: Number of times the DPCPU waited for an existing
@@ -3213,6 +3485,8 @@
 #define          MC_CMD_NVRAM_TYPE_LICENSE 0x12
 /* enum: FC Log. */
 #define          MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define          MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
 
 
 /***********************************/
@@ -3407,6 +3681,8 @@
  */
 #define MC_CMD_SCHEDINFO 0x3e
 
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SCHEDINFO_IN msgrequest */
 #define    MC_CMD_SCHEDINFO_IN_LEN 0
 
@@ -3593,6 +3869,68 @@
 #define          MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC  0x2c
 /* enum: Hotpoint temperature: degC */
 #define          MC_CMD_SENSOR_HOTPOINT_TEMP  0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define          MC_CMD_SENSOR_PHY_POWER_PORT0  0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define          MC_CMD_SENSOR_PHY_POWER_PORT1  0x2f
+/* enum: Mop-up microcontroller reference voltage (millivolts) */
+#define          MC_CMD_SENSOR_MUM_VCC  0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define          MC_CMD_SENSOR_IN_0V9_A  0x31
+/* enum: 0.9v power phase A current: mA */
+#define          MC_CMD_SENSOR_IN_I0V9_A  0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define          MC_CMD_SENSOR_VREG_0V9_A_TEMP  0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define          MC_CMD_SENSOR_IN_0V9_B  0x34
+/* enum: 0.9v power phase B current: mA */
+#define          MC_CMD_SENSOR_IN_I0V9_B  0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define          MC_CMD_SENSOR_VREG_0V9_B_TEMP  0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY  0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC  0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY  0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC  0x3a
+/* enum: Not a sensor: reserved for the next page flag */
+#define          MC_CMD_SENSOR_PAGE1_NEXT  0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT  0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP  0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC  0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC  0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT  0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP  0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC  0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC  0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define          MC_CMD_SENSOR_SODIMM_VOUT  0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define          MC_CMD_SENSOR_SODIMM_0_TEMP  0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define          MC_CMD_SENSOR_SODIMM_1_TEMP  0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define          MC_CMD_SENSOR_PHY0_VCC  0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define          MC_CMD_SENSOR_PHY1_VCC  0x4d
 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
 #define       MC_CMD_SENSOR_ENTRY_OFST 4
 #define       MC_CMD_SENSOR_ENTRY_LEN 8
@@ -3701,6 +4039,8 @@
 #define          MC_CMD_SENSOR_STATE_BROKEN  0x3
 /* enum: Sensor is working but does not currently have a reading. */
 #define          MC_CMD_SENSOR_STATE_NO_READING  0x4
+/* enum: Sensor initialisation failed. */
+#define          MC_CMD_SENSOR_STATE_INIT_FAILED  0x5
 #define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
 #define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
 #define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
@@ -3870,6 +4210,7 @@
 
 /* MC_CMD_WORKAROUND_IN msgrequest */
 #define    MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
 #define       MC_CMD_WORKAROUND_IN_TYPE_OFST 0
 /* enum: Bug 17230 work around. */
 #define          MC_CMD_WORKAROUND_BUG17230 0x1
@@ -3877,11 +4218,38 @@
 #define          MC_CMD_WORKAROUND_BUG35388 0x2
 /* enum: Bug35017 workaround (A64 tables must be identity map) */
 #define          MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define          MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define          MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define          MC_CMD_WORKAROUND_BUG26807 0x6
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
 #define       MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
 
 /* MC_CMD_WORKAROUND_OUT msgresponse */
 #define    MC_CMD_WORKAROUND_OUT_LEN 0
 
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define    MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
 
 /***********************************/
 /* MC_CMD_GET_PHY_MEDIA_INFO
@@ -4093,7 +4461,7 @@
 
 /***********************************/
 /* MC_CMD_GET_MAC_ADDRESSES
- * Returns the base MAC, count and stride for the requestiong function
+ * Returns the base MAC, count and stride for the requesting function
  */
 #define MC_CMD_GET_MAC_ADDRESSES 0x55
 
@@ -4115,6 +4483,527 @@
 /* Spacing of allocated MAC addresses */
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
 
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation
+ */
+#define MC_CMD_CLP 0x56
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define    MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define       MC_CMD_CLP_IN_OP_OFST 0
+/* enum: Return to factory default settings */
+#define          MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define          MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define          MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define          MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define          MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define    MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define    MC_CMD_CLP_IN_DEFAULT_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define    MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define    MC_CMD_CLP_IN_SET_MAC_LEN 12
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/* MAC address assigned to port */
+#define       MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define       MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define       MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define       MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define    MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define    MC_CMD_CLP_IN_GET_MAC_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define    MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define       MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define       MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define       MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define       MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define    MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/* Boot flag */
+#define       MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define       MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define    MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define    MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define    MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define       MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define       MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define       MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define       MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MUM_IN msgrequest */
+#define    MC_CMD_MUM_IN_LEN 4
+#define       MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define        MC_CMD_MUM_IN_OP_LBN 0
+#define        MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define          MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define          MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define          MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define          MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define          MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define          MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define          MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define          MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define          MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define          MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define          MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define          MC_CMD_MUM_OP_QSFP 0xc
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define    MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define       MC_CMD_MUM_IN_CMD_OFST 0
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define    MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define    MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to read from registers of */
+#define       MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define          MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define          MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define       MC_CMD_MUM_IN_READ_ADDR_OFST 8
+/* Number of words to read. */
+#define       MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define    MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define    MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to write to registers of */
+#define       MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/*               MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define       MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+/* Words to write */
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define    MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define    MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define    MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MUM I2C cmd code */
+#define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+/* Number of bytes to write */
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+/* Number of bytes to read */
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+/* Bytes to write */
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define    MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define          MC_CMD_MUM_IN_LOG_OP_UART  0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define    MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* Enable/disable debug output to UART */
+#define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define        MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define          MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define    MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define    MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Bit-mask of clocks to be programmed */
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define          MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define          MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define          MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define    MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Enable/Disable FPGA config from flash */
+#define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define    MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define        MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define        MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define          MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define       MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define    MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define    MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define    MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define    MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define    MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define    MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+/* returned data */
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_READ_LENMIN 4
+#define    MC_CMD_MUM_OUT_READ_LENMAX 252
+#define    MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define       MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define       MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define       MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define       MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define    MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define    MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO IN register. */
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO OUT register. */
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define        MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define        MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define    MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define    MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define    MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+
 /* MC_CMD_RESOURCE_SPECIFIER enum */
 /* enum: Any */
 #define          MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
@@ -4203,6 +5092,30 @@
 #define          NVRAM_PARTITION_TYPE_PHY_MIN              0xa00
 /* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
 #define          NVRAM_PARTITION_TYPE_PHY_MAX              0xaff
+/* enum: Primary FPGA partition */
+#define          NVRAM_PARTITION_TYPE_FPGA                 0xb00
+/* enum: Secondary FPGA partition */
+#define          NVRAM_PARTITION_TYPE_FPGA_BACKUP          0xb01
+/* enum: FC firmware partition */
+#define          NVRAM_PARTITION_TYPE_FC_FIRMWARE          0xb02
+/* enum: FC License partition */
+#define          NVRAM_PARTITION_TYPE_FC_LICENSE           0xb03
+/* enum: Non-volatile log output partition for FC */
+#define          NVRAM_PARTITION_TYPE_FC_LOG               0xb04
+/* enum: MUM firmware partition */
+#define          NVRAM_PARTITION_TYPE_MUM_FIRMWARE         0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_LOG              0xc01
+/* enum: MUM Application table partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_APPTABLE         0xc02
+/* enum: MUM boot rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_BOOT_ROM         0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_PROD_ROM         0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_USER_ROM         0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_FUSELOCK         0xc06
 /* enum: Start of reserved value range (firmware may use for any purpose) */
 #define          NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN  0xff00
 /* enum: End of reserved value range (firmware may use for any purpose) */
@@ -4218,66 +5131,69 @@
 #define    LICENSED_APP_ID_LEN 4
 #define       LICENSED_APP_ID_ID_OFST 0
 /* enum: OpenOnload */
-#define          LICENSED_APP_ID_ONLOAD            0x1
+#define          LICENSED_APP_ID_ONLOAD                  0x1
 /* enum: PTP timestamping */
-#define          LICENSED_APP_ID_PTP               0x2
+#define          LICENSED_APP_ID_PTP                     0x2
 /* enum: SolarCapture Pro */
-#define          LICENSED_APP_ID_SOLARCAPTURE_PRO  0x4
+#define          LICENSED_APP_ID_SOLARCAPTURE_PRO        0x4
+/* enum: SolarSecure filter engine */
+#define          LICENSED_APP_ID_SOLARSECURE             0x8
+/* enum: Performance monitor */
+#define          LICENSED_APP_ID_PERF_MONITOR            0x10
+/* enum: SolarCapture Live */
+#define          LICENSED_APP_ID_SOLARCAPTURE_LIVE       0x20
+/* enum: Capture SolarSystem */
+#define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM     0x40
+/* enum: Network Access Control */
+#define          LICENSED_APP_ID_NETWORK_ACCESS_CONTROL  0x80
 #define       LICENSED_APP_ID_ID_LBN 0
 #define       LICENSED_APP_ID_ID_WIDTH 32
 
-
-/***********************************/
-/* MC_CMD_GET_WORKAROUNDS
- * Read the list of all implemented and all currently enabled workarounds. The
- * enums here must correspond with those in MC_CMD_WORKAROUND.
+/* TX_TIMESTAMP_EVENT structuredef */
+#define    TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
  */
-#define MC_CMD_GET_WORKAROUNDS 0x59
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define          TX_TIMESTAMP_EVENT_TX_EV_COMPLETION  0x0
+/* enum: This is the low part of a TX timestamp event */
+#define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO  0x51
+/* enum: This is the high part of a TX timestamp event */
+#define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI  0x52
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
 
-/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
-#define    MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
-/* Each workaround is represented by a single bit according to the enums below.
+/* RSS_MODE structuredef */
+#define    RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
  */
-#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
-#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
-/* enum: Bug 17230 work around. */
-#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
-/* enum: Bug 35388 work around (unsafe EVQ writes). */
-#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
-/* enum: Bug35017 workaround (A64 tables must be identity map) */
-#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
-
-
-/***********************************/
-/* MC_CMD_LINK_STATE_MODE
- * Read/set link state mode of a VF
- */
-#define MC_CMD_LINK_STATE_MODE 0x5c
-
-#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
-#define    MC_CMD_LINK_STATE_MODE_IN_LEN 8
-/* The target function to have its link state mode read or set, must be a VF
- * e.g. VF 1,3 = 0x00030001
- */
-#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
-/* New link state mode to be set */
-#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
-#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
-#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
-#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
-/* enum: Use this value to just read the existing setting without modifying it.
- */
-#define          MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE         0xffffffff
-
-/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
-#define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
-#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define       RSS_MODE_HASH_SELECTOR_OFST 0
+#define       RSS_MODE_HASH_SELECTOR_LEN 1
+#define        RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define        RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define        RSS_MODE_HASH_DST_ADDR_LBN 1
+#define        RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define        RSS_MODE_HASH_SRC_PORT_LBN 2
+#define        RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define        RSS_MODE_HASH_DST_PORT_LBN 3
+#define        RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define       RSS_MODE_HASH_SELECTOR_LBN 0
+#define       RSS_MODE_HASH_SELECTOR_WIDTH 8
 
 
 /***********************************/
@@ -4413,7 +5329,9 @@
 
 #define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
-/* MC_CMD_INIT_RXQ_IN msgrequest */
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
 #define    MC_CMD_INIT_RXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_RXQ_IN_LENMAX 252
 #define    MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
@@ -4456,9 +5374,73 @@
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
 
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define    MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define          MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET  0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM  0x1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M  0x0 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K  0x1 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K  0x2 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K  0x3 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K  0x4 /* enum */
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+
 /* MC_CMD_INIT_RXQ_OUT msgresponse */
 #define    MC_CMD_INIT_RXQ_OUT_LEN 0
 
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define    MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_INIT_TXQ
@@ -4467,7 +5449,9 @@
 
 #define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
-/* MC_CMD_INIT_TXQ_IN msgrequest */
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
 #define    MC_CMD_INIT_TXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_TXQ_IN_LENMAX 252
 #define    MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
@@ -4499,6 +5483,10 @@
 #define        MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
 #define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
 #define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -4511,6 +5499,60 @@
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
 
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define    MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+/* Flags related to Qbb flow control mode. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
 /* MC_CMD_INIT_TXQ_OUT msgresponse */
 #define    MC_CMD_INIT_TXQ_OUT_LEN 0
 
@@ -4617,6 +5659,132 @@
 /* MC_CMD_PROXY_CMD_OUT msgresponse */
 #define    MC_CMD_PROXY_CMD_OUT_LEN 0
 
+/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
+ * manage proxied requests
+ */
+#define    MC_PROXY_STATUS_BUFFER_LEN 16
+/* Handle allocated by the firmware for this proxy transaction */
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+/* enum: An invalid handle. */
+#define          MC_PROXY_STATUS_BUFFER_HANDLE_INVALID  0x0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
+/* The requesting physical function number */
+#define       MC_PROXY_STATUS_BUFFER_PF_OFST 4
+#define       MC_PROXY_STATUS_BUFFER_PF_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_PF_LBN 32
+#define       MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
+/* The requesting virtual function number. Set to VF_NULL if the target is a
+ * PF.
+ */
+#define       MC_PROXY_STATUS_BUFFER_VF_OFST 6
+#define       MC_PROXY_STATUS_BUFFER_VF_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_VF_LBN 48
+#define       MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
+/* The target function RID. */
+#define       MC_PROXY_STATUS_BUFFER_RID_OFST 8
+#define       MC_PROXY_STATUS_BUFFER_RID_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_RID_LBN 64
+#define       MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
+/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
+#define       MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
+#define       MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
+#define       MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
+/* If a request is authorized rather than carried out by the host, this is the
+ * elevated privilege mask granted to the requesting function.
+ */
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PROXY_CONFIGURE
+ * Enable/disable authorization of MCDI requests from unprivileged functions by
+ * a designated admin function
+ */
+#define MC_CMD_PROXY_CONFIGURE 0x58
+
+#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
+#define    MC_CMD_PROXY_CONFIGURE_IN_LEN 108
+#define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
+#define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
+#define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+
+/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
+#define    MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_COMPLETE
+ * Tells FW that a requested proxy operation has either been completed (by
+ * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
+ * function that enabled proxying/authorization (by using
+ * MC_CMD_PROXY_CONFIGURE).
+ */
+#define MC_CMD_PROXY_COMPLETE 0x5f
+
+#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
+#define    MC_CMD_PROXY_COMPLETE_IN_LEN 12
+#define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define       MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
+ * is stored in the REPLY_BUFF.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
+/* enum: The operation has been authorized. The originating function may now
+ * try again.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
+/* enum: The operation has been declined. */
+#define          MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
+/* enum: The authorization failed because the relevant application did not
+ * respond in time.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
+#define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+
+/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
+#define    MC_CMD_PROXY_COMPLETE_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_ALLOC_BUFTBL_CHUNK
@@ -4688,6 +5856,44 @@
 /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
 
+/* PORT_CONFIG_ENTRY structuredef */
+#define    PORT_CONFIG_ENTRY_LEN 16
+/* External port number (label) */
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
+/* Port core location */
+#define       PORT_CONFIG_ENTRY_CORE_OFST 1
+#define       PORT_CONFIG_ENTRY_CORE_LEN 1
+#define          PORT_CONFIG_ENTRY_STANDALONE  0x0 /* enum */
+#define          PORT_CONFIG_ENTRY_MASTER  0x1 /* enum */
+#define          PORT_CONFIG_ENTRY_SLAVE  0x2 /* enum */
+#define       PORT_CONFIG_ENTRY_CORE_LBN 8
+#define       PORT_CONFIG_ENTRY_CORE_WIDTH 8
+/* Internal number (HW resource) relative to the core */
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
+/* Reserved */
+#define       PORT_CONFIG_ENTRY_RSVD_OFST 3
+#define       PORT_CONFIG_ENTRY_RSVD_LEN 1
+#define       PORT_CONFIG_ENTRY_RSVD_LBN 24
+#define       PORT_CONFIG_ENTRY_RSVD_WIDTH 8
+/* Bitmask of KR lanes used by the port */
+#define       PORT_CONFIG_ENTRY_LANES_OFST 4
+#define       PORT_CONFIG_ENTRY_LANES_LBN 32
+#define       PORT_CONFIG_ENTRY_LANES_WIDTH 32
+/* Port capabilities (MC_CMD_PHY_CAP_*) */
+#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
+#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
+#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
+/* Reserved (align to 16 bytes) */
+#define       PORT_CONFIG_ENTRY_RSVD2_OFST 12
+#define       PORT_CONFIG_ENTRY_RSVD2_LBN 96
+#define       PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
+
 
 /***********************************/
 /* MC_CMD_FILTER_OP
@@ -4759,9 +5965,9 @@
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_HOST  0x1
 /* enum: receive to MC */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_MC  0x2
-/* enum: loop back to port 0 TX MAC */
+/* enum: loop back to TXDP 0 */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_TX0  0x3
-/* enum: loop back to port 1 TX MAC */
+/* enum: loop back to TXDP 1 */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_TX1  0x4
 /* receive queue handle (for multiple queue modes, this is the base queue) */
 #define       MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
@@ -4778,9 +5984,7 @@
 #define          MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH  0x80000000
 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
  * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
- * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
- * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
- * a valid handle.
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
  */
 #define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
 /* transmit domain (reserved; set to 0) */
@@ -4835,6 +6039,235 @@
 #define       MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
 #define       MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
 
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define    MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define       MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP  0x0
+/* enum: receive to host */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST  0x1
+/* enum: receive to MC */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC  0x2
+/* enum: loop back to TXDP 0 */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0  0x3
+/* enum: loop back to TXDP 1 */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1  0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE  0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS  0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING  0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH  0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define          MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT  0xffffffff
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define       MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define       MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define       MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN  0x0
+/* enum: Match Geneve traffic with this VNI */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE  0x1
+/* enum: Reserved for experimental development use */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL  0xfe
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define          MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE  0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
 /* MC_CMD_FILTER_OP_OUT msgresponse */
 #define    MC_CMD_FILTER_OP_OUT_LEN 12
 /* identifies the type of operation requested */
@@ -4849,6 +6282,27 @@
 #define       MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
 #define       MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
 #define       MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define          MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID  0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define          MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID  0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define    MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define       MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_OUT/HANDLE */
 
 
 /***********************************/
@@ -4865,6 +6319,10 @@
 #define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
 /* enum: read the list of supported RX filter matches */
 #define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES  0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS  0x2
 
 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
 #define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -4884,6 +6342,17 @@
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
 
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define    MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
 
 /***********************************/
 /* MC_CMD_PARSER_DISP_RW
@@ -4901,8 +6370,10 @@
 #define          MC_CMD_PARSER_DISP_RW_IN_RX_DICPU  0x0
 /* enum: TX dispatcher CPU */
 #define          MC_CMD_PARSER_DISP_RW_IN_TX_DICPU  0x1
-/* enum: Lookup engine */
+/* enum: Lookup engine (with original metadata format) */
 #define          MC_CMD_PARSER_DISP_RW_IN_LUE  0x2
+/* enum: Lookup engine (with requested metadata format) */
+#define          MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA  0x3
 /* identifies the type of operation requested */
 #define       MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
 /* enum: read a word of DICPU DMEM or a LUE entry */
@@ -4919,6 +6390,8 @@
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
+#define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
 /* value to write (for LUE writes) */
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
@@ -5019,7 +6492,9 @@
 /* The maximum number of VIs that would be useful */
 #define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
 
-/* MC_CMD_ALLOC_VIS_OUT msgresponse */
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
 #define    MC_CMD_ALLOC_VIS_OUT_LEN 8
 /* The number of VIs allocated on this function */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
@@ -5028,6 +6503,17 @@
  */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
 
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define    MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+
 
 /***********************************/
 /* MC_CMD_FREE_VIS
@@ -5114,13 +6600,15 @@
 #define    MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
 
 /* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
-#define    MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
+#define    MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
 /* The number of VIs allocated on this function */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
 
 
 /***********************************/
@@ -5575,6 +7063,7 @@
 #define MC_CMD_GET_CAPABILITIES 0xbe
 
 #define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_CAPABILITIES_IN msgrequest */
 #define    MC_CMD_GET_CAPABILITIES_IN_LEN 0
 
@@ -5582,6 +7071,20 @@
 #define    MC_CMD_GET_CAPABILITIES_OUT_LEN 20
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
@@ -5600,8 +7103,14 @@
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
 #define        MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
 /* RxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
@@ -5609,6 +7118,10 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP  0x0
 /* enum: Low latency RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY  0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH  0x101
 /* enum: RXDP Test firmware image 2 */
@@ -5632,6 +7145,10 @@
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP  0x0
 /* enum: Low latency TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY  0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT  0x101
 /* enum: TXDP Test firmware image 2 */
@@ -5642,22 +7159,69 @@
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT  0x1 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH  0x3 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM  0x4 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY  0x5 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY  0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM  0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY  0xf
 #define       MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
 #define       MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT  0x1 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH  0x3 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM  0x4 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY  0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
 /* Licensed capabilities */
@@ -5735,6 +7299,15 @@
 /* the rate in mbps */
 #define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
 
+/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
+#define    MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
+/* the bucket id */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+/* the desired maximum fill level */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+
 /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
 
@@ -5753,8 +7326,14 @@
 #define       MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
-/* bitmask of the priority queues this txq is inserted into */
+/* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
 #define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
 /* an already reserved bucket (typically set to bucket associated with outer
@@ -5768,6 +7347,35 @@
 /* the min bucket (typically for ETS/minimum bandwidth) */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
 
+/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
+#define    MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
+/* the txq id */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+/* the static priority associated with the txq */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+
 /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
 
@@ -5826,13 +7434,23 @@
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN  0x1
 /* enum: VEB */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB  0x2
-/* enum: VEPA */
+/* enum: VEPA (obsolete) */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA  0x3
+/* enum: MUX */
+#define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX  0x4
+/* enum: Snapper specific; semantics TBD */
+#define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST  0x5
 /* Flags controlling v-port creation */
 #define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to support. */
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
 #define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
 
 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
@@ -5892,7 +7510,10 @@
 #define       MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to insert/remove. */
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
 #define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
@@ -6136,8 +7757,13 @@
 
 /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
-/* The handle of the new RSS context */
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+/* enum: guaranteed invalid RSS context handle value */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID  0xffffffff
 
 
 /***********************************/
@@ -6249,7 +7875,11 @@
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
-/* Hash control flags */
+/* Hash control flags. The _EN bits are always supported. The _MODE bits only
+ * work when the firmware reports ADDITIONAL_RSS_MODES in
+ * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0.
+ * See the RSS_MODE structure for the meaning of the mode bits.
+ */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
@@ -6259,6 +7889,20 @@
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
 
 /* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
@@ -6279,7 +7923,12 @@
 
 /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
-/* Hash control flags */
+/* Hash control flags. If any _MODE bits are non-zero (which will only be true
+ * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be
+ * disregarded (but are guaranteed to be consistent with the _MODE bits if
+ * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was
+ * allocated).
+ */
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
@@ -6289,6 +7938,20 @@
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
 
 
 /***********************************/
@@ -6311,8 +7974,13 @@
 
 /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
-/* The handle of the new .1p mapping */
+/* The handle of the new .1p mapping. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+/* enum: guaranteed invalid .1p mapping handle value */
+#define          MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID  0xffffffff
 
 
 /***********************************/
@@ -6421,375 +8089,6 @@
 
 
 /***********************************/
-/* MC_CMD_RMON_RX_CLASS_STATS
- * Retrieve rmon rx class statistics
- */
-#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
-
-/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_CLASS_STATS
- * Retrieve rmon tx class statistics
- */
-#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
-
-/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
- * Retrieve rmon rx super_class statistics
- */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
- * Retrieve rmon tx super_class statistics
- */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define       MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define       MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define       MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define       MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define       MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define       MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define       MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define       MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define       MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_CLASS
- * Allocate an rmon class
- */
-#define MC_CMD_RMON_ALLOC_CLASS 0xca
-
-/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
-/* class */
-#define       MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_CLASS
- * Deallocate an rmon class
- */
-#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
-
-/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
-/* class */
-#define       MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS
- * Allocate an rmon super_class
- */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
-/* super_class */
-#define       MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
- * Deallocate an rmon tx super_class
- */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
-/* super_class */
-#define       MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_UP_CONV_STATS
- * Retrieve up converter statistics
- */
-#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPI_STATS
- * Retrieve rx ipi stats
- */
-#define MC_CMD_RMON_RX_IPI_STATS 0xcf
-
-/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
- * Retrieve rx ipsec cntxt_ptr indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
- * Retrieve rx ipsec port indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
 /* MC_CMD_VPORT_ADD_MAC_ADDRESS
  * Add a MAC address to a v-port
  */
@@ -6877,7 +8176,7 @@
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
@@ -6921,354 +8220,6 @@
 
 
 /***********************************/
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
- * Retrieve rx class drop stats
- */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
- * Retrieve rx super class drop stats
- */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPI_STATS
- * Retrieve tx ipi stats
- */
-#define MC_CMD_RMON_TX_IPI_STATS 0xd7
-
-/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
- * Retrieve tx ipsec counters by cntxt_ptr
- */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
- * Retrieve tx ipsec counters by port
- */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_STATS
- * Retrieve tx nowhere stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
- * Retrieve tx nowhere qbb stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define       MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
-/* class */
-#define       MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define       MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
-/* super_class */
-#define       MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
-
-
-/***********************************/
 /* MC_CMD_GET_CLOCK
  * Return the system and PDCPU clock frequencies.
  */
@@ -7296,22 +8247,66 @@
 #define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_SET_CLOCK_IN msgrequest */
-#define    MC_CMD_SET_CLOCK_IN_LEN 12
-/* Requested system frequency in MHz; 0 leaves unchanged. */
+#define    MC_CMD_SET_CLOCK_IN_LEN 28
+/* Requested frequency in MHz for system clock domain */
 #define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
-/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the system clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for inter-core clock domain */
 #define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
-/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the inter-core clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for DPCPU clock domain */
 #define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+/* enum: Leave the DPCPU clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for PCS clock domain */
+#define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+/* enum: Leave the PCS clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for MC clock domain */
+#define       MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+/* enum: Leave the MC clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for rmon clock domain */
+#define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+/* enum: Leave the rmon clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for vswitch clock domain */
+#define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+/* enum: Leave the vswitch clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE  0x0
 
 /* MC_CMD_SET_CLOCK_OUT msgresponse */
-#define    MC_CMD_SET_CLOCK_OUT_LEN 12
+#define    MC_CMD_SET_CLOCK_OUT_LEN 28
 /* Resulting system frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* enum: The system clock domain doesn't exist */
+#define          MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED  0x0
 /* Resulting inter-core frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* enum: The inter-core clock domain doesn't exist / isn't used */
+#define          MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED  0x0
 /* Resulting DPCPU frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+/* enum: The dpcpu clock domain doesn't exist */
+#define          MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED  0x0
+/* Resulting PCS frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+/* enum: The PCS clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED  0x0
+/* Resulting MC frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+/* enum: The MC clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED  0x0
+/* Resulting rmon frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+/* enum: The rmon clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED  0x0
+/* Resulting vswitch frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+/* enum: The vswitch clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED  0x0
 
 
 /***********************************/
@@ -7325,12 +8320,22 @@
 /* MC_CMD_DPCPU_RPC_IN msgrequest */
 #define    MC_CMD_DPCPU_RPC_IN_LEN 36
 #define       MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
-/* enum: RxDPCPU */
-#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX   0x0
+/* enum: RxDPCPU0 */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX0  0x0
 /* enum: TxDPCPU0 */
 #define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX0  0x1
 /* enum: TxDPCPU1 */
 #define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX1  0x2
+/* enum: RxDPCPU1 (Medford only) */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX1   0x3
+/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_RX0)
+ */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX   0x80
+/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_TX0)
+ */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX   0x81
 /* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
  * initialised to zero
  */
@@ -7418,6 +8423,25 @@
 
 
 /***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define    MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define          MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA  0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define    MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
+/***********************************/
 /* MC_CMD_CAP_BLK_READ
  * Read multiple 64bit words from capture block memory
  */
@@ -7730,6 +8754,8 @@
  * more data is returned.
  */
 #define          MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT  0x6
+/* enum: Read Figure Of Merit (eye quality, higher is better). */
+#define          MC_CMD_KR_TUNE_IN_READ_FOM  0x7
 /* Align the arguments to 32 bits */
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
@@ -7762,20 +8788,32 @@
 #define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15) */
+/* enum: Attenuation (0-15, TBD for Medford) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT  0x0
-/* enum: CTLE Boost (0-15) */
+/* enum: CTLE Boost (0-15, TBD for Medford) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST  0x1
-/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD
+ * for Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1  0x2
-/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2  0x3
-/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3  0x4
-/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4  0x5
-/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5  0x6
+/* enum: Edge DFE DLEV (TBD for Medford) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV  0x7
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0  0x0 /* enum */
@@ -7865,6 +8903,8 @@
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY  0x7
 /* enum: TX Slew Rate Fine control */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET  0x8
+/* enum: TX Termination Impedance control */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET  0x9
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0  0x0 /* enum */
@@ -7955,6 +8995,20 @@
 #define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
 #define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
 
+/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
+#define    MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
+#define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+
 
 /***********************************/
 /* MC_CMD_PCIE_TUNE
@@ -8224,6 +9278,8 @@
 #define       MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
 /* enum: validate application */
 #define          MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE  0x0
+/* enum: mask application */
+#define          MC_CMD_LICENSED_APP_OP_IN_OP_MASK  0x1
 /* arguments specific to this particular operation */
 #define       MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
 #define       MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
@@ -8258,10 +9314,22 @@
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
 
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define    MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+/* flag */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define    MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_SET_PORT_SNIFF_CONFIG
- * Configure port sniffing for the physical port associated with the calling
+ * Configure RX port sniffing for the physical port associated with the calling
  * function. Only a privileged function may change the port sniffing
  * configuration. A copy of all traffic delivered to the host (non-promiscuous
  * mode) or all traffic arriving at the port (promiscuous mode) may be
@@ -8299,7 +9367,7 @@
 
 /***********************************/
 /* MC_CMD_GET_PORT_SNIFF_CONFIG
- * Obtain the current port sniffing configuration for the physical port
+ * Obtain the current RX port sniffing configuration for the physical port
  * associated with the calling function. Only a privileged function may read
  * the configuration.
  */
@@ -8330,4 +9398,673 @@
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
 
 
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+/* the type of configuration setting to change */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define          MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN  0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define          MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX  0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
+ * Configure TX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic transmitted through the port may be
+ * delivered to a specific queue, or a set of queues with RSS. Note that these
+ * packets are delivered with transmit timestamps in the packet prefix, not
+ * receive timestamps, so it is likely that the queue(s) will need to be
+ * dedicated as TX sniff receivers.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+
+#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE  0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS  0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
+ * Obtain the current TX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE  0x0
+/* enum: receiving to multiple queues using RSS context */
+#define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS  0x1
+/* RSS context (for RX_MODE_RSS) */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_RMON_STATS_RX_ERRORS
+ * Per queue rx error stats.
+ */
+#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+
+#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
+#define    MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
+/* The rx queue to get stats for. */
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
+#define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
+#define    MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_GET_PCIE_RESOURCE_INFO
+ * Find out about available PCIE resources
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
+#define    MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
+#define    MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
+/* The maximum number of PFs the device can expose */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+/* The maximum number of VFs the device can expose in total */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+/* The maximum number of MSI-X vectors the device can provide in total */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+/* the number of MSI-X vectors the device will allocate by default to each PF
+ */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+/* the number of MSI-X vectors the device will allocate by default to each VF
+ */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+/* the maximum number of MSI-X vectors the device can allocate to any one PF */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+/* the maximum number of MSI-X vectors the device can allocate to any one VF */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define    MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define    MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
+#define       MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+/* Default (canonical) board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+/* Current board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+
+
+/***********************************/
+/* MC_CMD_READ_ATB
+ * Sample voltages on the ATB
+ */
+#define MC_CMD_READ_ATB 0x100
+
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_ATB_IN msgrequest */
+#define    MC_CMD_READ_ATB_IN_LEN 16
+#define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define          MC_CMD_READ_ATB_IN_BUS_CCOM  0x0 /* enum */
+#define          MC_CMD_READ_ATB_IN_BUS_CKR  0x1 /* enum */
+#define          MC_CMD_READ_ATB_IN_BUS_CPCIE  0x8 /* enum */
+#define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+
+/* MC_CMD_READ_ATB_OUT msgresponse */
+#define    MC_CMD_READ_ATB_OUT_LEN 4
+#define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define    MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define    MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define          MC_CMD_PRIVILEGE_MASK_IN_VF_NULL  0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN             0x1 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK              0x2 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD            0x4 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP               0x8 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS  0x10 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING      0x20 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST           0x40 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST         0x80 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST         0x100 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST     0x200 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS       0x400 /* enum */
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE             0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define    MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define    MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define          MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE         0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_SNAPSHOT_LENGTH
+ * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * parameter to MC_CMD_INIT_RXQ.
+ */
+#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+
+#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
+#define    MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
+#define    MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
+/* Minimum acceptable snapshot length. */
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+/* Maximum acceptable snapshot length. */
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define    MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define    MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+/* Checksum of data after logical OR of pairs in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+/* Total number of mismatched bits between pairs in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+/* Checksum of data after logical OR of pairs in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+/* Total number of mismatched bits between pairs in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+/* Checksum of data after logical OR of pairs in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define    MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_NONE       0x0 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_ALL        0x1 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY   0x2 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY   0x3 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF  0x4 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_ONE        0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define    MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_BYTES
+ * Read XPM memory
+ */
+#define MC_CMD_XPM_READ_BYTES 0x103
+
+#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
+#define    MC_CMD_XPM_READ_BYTES_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
+#define    MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
+#define    MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define    MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+/* Data */
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_BYTES
+ * Write XPM memory
+ */
+#define MC_CMD_XPM_WRITE_BYTES 0x104
+
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+/* Start address (byte) */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+/* Data */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+
+/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_SECTOR
+ * Read XPM sector
+ */
+#define MC_CMD_XPM_READ_SECTOR 0x105
+
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_READ_SECTOR_IN_LEN 8
+/* Sector index */
+#define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+/* Sector size */
+#define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+
+/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+/* Sector type */
+#define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define          MC_CMD_XPM_READ_SECTOR_OUT_BLANK            0x0 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128   0x1 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256   0x2 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_INVALID          0xff /* enum */
+/* Sector data */
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_SECTOR
+ * Write XPM sector
+ */
+#define MC_CMD_XPM_WRITE_SECTOR 0x106
+
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+/* If writing fails due to an uncorrectable error, try up to RETRIES following
+ * sectors (or until no more space available). If 0, only one write attempt is
+ * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
+ * mechanism.
+ */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
+/* Sector type */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* Sector size */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+/* Sector data */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+
+/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
+/* New sector index */
+#define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+
+
+/***********************************/
+/* MC_CMD_XPM_INVALIDATE_SECTOR
+ * Invalidate XPM sector
+ */
+#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
+/* Sector index */
+#define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_BLANK_CHECK
+ * Blank-check XPM memory and report bad locations
+ */
+#define MC_CMD_XPM_BLANK_CHECK 0x108
+
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
+#define    MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+/* Total number of bad (non-blank) locations */
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
+ * into MCDI response)
+ */
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+
+
+/***********************************/
+/* MC_CMD_XPM_REPAIR
+ * Blank-check and repair XPM memory
+ */
+#define MC_CMD_XPM_REPAIR 0x109
+
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_REPAIR_IN msgrequest */
+#define    MC_CMD_XPM_REPAIR_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_REPAIR_OUT msgresponse */
+#define    MC_CMD_XPM_REPAIR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_DECODER_TEST
+ * Test XPM memory address decoders for gross manufacturing defects. Can only
+ * be performed on an unprogrammed part.
+ */
+#define MC_CMD_XPM_DECODER_TEST 0x10a
+
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
+#define    MC_CMD_XPM_DECODER_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
+#define    MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_TEST
+ * XPM memory write test. Test XPM write logic for gross manufacturing defects
+ * by writing to a dedicated test row. There are 16 locations in the test row
+ * and the test can only be performed on locations that have not been
+ * previously used (i.e. can be run at most 16 times). The test will pick the
+ * first available location to use, or fail with ENOSPC if none left.
+ */
+#define MC_CMD_XPM_WRITE_TEST 0x10b
+
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+
+
 #endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 47d1e3a..c530e1c 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -925,6 +925,7 @@
  * @stats_lock: Statistics update lock. Must be held when calling
  *	efx_nic_type::{update,start,stop}_stats.
  * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
+ * @mc_promisc: Whether in multicast promiscuous mode when last changed
  *
  * This is stored in the private area of the &struct net_device.
  */
@@ -971,6 +972,7 @@
 	unsigned next_buffer_table;
 
 	unsigned int max_channels;
+	unsigned int max_tx_channels;
 	unsigned n_channels;
 	unsigned n_rx_channels;
 	unsigned rss_spread;
@@ -1072,6 +1074,7 @@
 	int last_irq_cpu;
 	spinlock_t stats_lock;
 	atomic_t n_rx_noskb_drops;
+	bool mc_promisc;
 };
 
 static inline int efx_dev_registered(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 31ff908..0b536e2 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -506,6 +506,7 @@
  * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
  * @stats: Hardware statistics
  * @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @workaround_26807: Flag: firmware supports workaround for bug 26807
  * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
  *	after MC reboot
  * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
@@ -535,6 +536,7 @@
 	bool rx_rss_context_exclusive;
 	u64 stats[EF10_STAT_COUNT];
 	bool workaround_35388;
+	bool workaround_26807;
 	bool must_check_datapath_caps;
 	u32 datapath_caps;
 	unsigned int rx_dpcpu_fw_id;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index b605dfd5..9d78830 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -114,7 +114,10 @@
 
 	if (efx->type->test_nvram) {
 		rc = efx->type->test_nvram(efx);
-		tests->nvram = rc ? -1 : 1;
+		if (rc == -EPERM)
+			rc = 0;
+		else
+			tests->nvram = rc ? -1 : 1;
 	}
 
 	return rc;
@@ -253,6 +256,12 @@
 	mutex_lock(&efx->mac_lock);
 	rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
 	mutex_unlock(&efx->mac_lock);
+	if (rc == -EPERM)
+		rc = 0;
+	else
+		netif_info(efx, drv, efx->net_dev,
+			   "%s phy selftest\n", rc ? "Failed" : "Passed");
+
 	return rc;
 }
 
@@ -661,6 +670,9 @@
 	wmb();
 	kfree(state);
 
+	if (rc == -EPERM)
+		rc = 0;
+
 	return rc;
 }
 
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index b323b91..2219b54 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -262,6 +262,7 @@
 	}
 
 	efx->max_channels = EFX_MAX_CHANNELS;
+	efx->max_tx_channels = EFX_MAX_CHANNELS;
 
 	efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
 	efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
@@ -1042,9 +1043,5 @@
 	.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
 	.hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
 			     1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
-			     1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
-			     1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
-			     1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
-			     1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
-			     1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
+			     1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
 };
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 67d9fde..664f596 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1031,36 +1031,8 @@
 static void print_packet( byte * buf, int length )
 {
 #if 0
-	int i;
-	int remainder;
-	int lines;
-
-	pr_dbg("Packet of length %d\n", length);
-	lines = length / 16;
-	remainder = length % 16;
-
-	for ( i = 0; i < lines ; i ++ ) {
-		int cur;
-
-		printk(KERN_DEBUG);
-		for ( cur = 0; cur < 8; cur ++ ) {
-			byte a, b;
-
-			a = *(buf ++ );
-			b = *(buf ++ );
-			pr_cont("%02x%02x ", a, b);
-		}
-		pr_cont("\n");
-	}
-	printk(KERN_DEBUG);
-	for ( i = 0; i < remainder/2 ; i++ ) {
-		byte a, b;
-
-		a = *(buf ++ );
-		b = *(buf ++ );
-		pr_cont("%02x%02x ", a, b);
-	}
-	pr_cont("\n");
+	print_hex_dump_debug(DRV_NAME, DUMP_PREFIX_OFFSET, 16, 1,
+			     buf, length, true);
 #endif
 }
 #endif
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 959aeea..3b4cd8a 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -59,7 +59,9 @@
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/of_net.h>
+#include <linux/acpi.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 
 #include "smsc911x.h"
 
@@ -2362,59 +2364,50 @@
 	.tx_writefifo = smsc911x_tx_writefifo_shift,
 };
 
-#ifdef CONFIG_OF
-static int smsc911x_probe_config_dt(struct smsc911x_platform_config *config,
-				    struct device_node *np)
+static int smsc911x_probe_config(struct smsc911x_platform_config *config,
+				 struct device *dev)
 {
-	const char *mac;
+	int phy_interface;
 	u32 width = 0;
+	int err;
 
-	if (!np)
-		return -ENODEV;
+	phy_interface = device_get_phy_mode(dev);
+	if (phy_interface < 0)
+		phy_interface = PHY_INTERFACE_MODE_NA;
+	config->phy_interface = phy_interface;
 
-	config->phy_interface = of_get_phy_mode(np);
+	device_get_mac_address(dev, config->mac, ETH_ALEN);
 
-	mac = of_get_mac_address(np);
-	if (mac)
-		memcpy(config->mac, mac, ETH_ALEN);
-
-	of_property_read_u32(np, "reg-shift", &config->shift);
-
-	of_property_read_u32(np, "reg-io-width", &width);
-	if (width == 4)
+	err = device_property_read_u32(dev, "reg-io-width", &width);
+	if (err == -ENXIO)
+		return err;
+	if (!err && width == 4)
 		config->flags |= SMSC911X_USE_32BIT;
 	else
 		config->flags |= SMSC911X_USE_16BIT;
 
-	if (of_get_property(np, "smsc,irq-active-high", NULL))
+	device_property_read_u32(dev, "reg-shift", &config->shift);
+
+	if (device_property_present(dev, "smsc,irq-active-high"))
 		config->irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_HIGH;
 
-	if (of_get_property(np, "smsc,irq-push-pull", NULL))
+	if (device_property_present(dev, "smsc,irq-push-pull"))
 		config->irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL;
 
-	if (of_get_property(np, "smsc,force-internal-phy", NULL))
+	if (device_property_present(dev, "smsc,force-internal-phy"))
 		config->flags |= SMSC911X_FORCE_INTERNAL_PHY;
 
-	if (of_get_property(np, "smsc,force-external-phy", NULL))
+	if (device_property_present(dev, "smsc,force-external-phy"))
 		config->flags |= SMSC911X_FORCE_EXTERNAL_PHY;
 
-	if (of_get_property(np, "smsc,save-mac-address", NULL))
+	if (device_property_present(dev, "smsc,save-mac-address"))
 		config->flags |= SMSC911X_SAVE_MAC_ADDRESS;
 
 	return 0;
 }
-#else
-static inline int smsc911x_probe_config_dt(
-				struct smsc911x_platform_config *config,
-				struct device_node *np)
-{
-	return -ENODEV;
-}
-#endif /* CONFIG_OF */
 
 static int smsc911x_drv_probe(struct platform_device *pdev)
 {
-	struct device_node *np = pdev->dev.of_node;
 	struct net_device *dev;
 	struct smsc911x_data *pdata;
 	struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
@@ -2435,7 +2428,10 @@
 	res_size = resource_size(res);
 
 	irq = platform_get_irq(pdev, 0);
-	if (irq <= 0) {
+	if (irq == -EPROBE_DEFER) {
+		retval = -EPROBE_DEFER;
+		goto out_0;
+	} else if (irq <= 0) {
 		pr_warn("Could not allocate irq resource\n");
 		retval = -ENODEV;
 		goto out_0;
@@ -2478,7 +2474,7 @@
 		goto out_disable_resources;
 	}
 
-	retval = smsc911x_probe_config_dt(&pdata->config, np);
+	retval = smsc911x_probe_config(&pdata->config, &pdev->dev);
 	if (retval && config) {
 		/* copy config parameters across to pdata */
 		memcpy(&pdata->config, config, sizeof(pdata->config));
@@ -2654,6 +2650,12 @@
 MODULE_DEVICE_TABLE(of, smsc911x_dt_ids);
 #endif
 
+static const struct acpi_device_id smsc911x_acpi_match[] = {
+	{ "ARMH9118", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match);
+
 static struct platform_driver smsc911x_driver = {
 	.probe = smsc911x_drv_probe,
 	.remove = smsc911x_drv_remove,
@@ -2661,6 +2663,7 @@
 		.name	= SMSC_CHIPNAME,
 		.pm	= SMSC911X_PM_OPS,
 		.of_match_table = of_match_ptr(smsc911x_dt_ids),
+		.acpi_match_table = ACPI_PTR(smsc911x_acpi_match),
 	},
 };
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index e817a1a..b1e5f24 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -16,6 +16,46 @@
 #include "stmmac.h"
 #include "stmmac_platform.h"
 
+static int dwmac_generic_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	int ret;
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	if (pdev->dev.of_node) {
+		plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+		if (IS_ERR(plat_dat)) {
+			dev_err(&pdev->dev, "dt configuration failed\n");
+			return PTR_ERR(plat_dat);
+		}
+	} else {
+		plat_dat = dev_get_platdata(&pdev->dev);
+		if (!plat_dat) {
+			dev_err(&pdev->dev, "no platform data provided\n");
+			return  -EINVAL;
+		}
+
+		/* Set default value for multicast hash bins */
+		plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+		/* Set default value for unicast filter entries */
+		plat_dat->unicast_filter_entries = 1;
+	}
+
+	/* Custom initialisation (if needed) */
+	if (plat_dat->init) {
+		ret = plat_dat->init(pdev, plat_dat->bsp_priv);
+		if (ret)
+			return ret;
+	}
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
+
 static const struct of_device_id dwmac_generic_match[] = {
 	{ .compatible = "st,spear600-gmac"},
 	{ .compatible = "snps,dwmac-3.610"},
@@ -27,7 +67,7 @@
 MODULE_DEVICE_TABLE(of, dwmac_generic_match);
 
 static struct platform_driver dwmac_generic_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = dwmac_generic_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = STMMAC_RESOURCE_NAME,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index f0e4bb4..9d89bdb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -248,23 +248,40 @@
 	return NULL;
 }
 
-static void *ipq806x_gmac_setup(struct platform_device *pdev)
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
 {
+	struct ipq806x_gmac *gmac = priv;
+
+	ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static int ipq806x_gmac_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
 	struct device *dev = &pdev->dev;
 	struct ipq806x_gmac *gmac;
 	int val;
 	void *err;
 
+	val = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (val)
+		return val;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
 	gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
 	if (!gmac)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 	gmac->pdev = pdev;
 
 	err = ipq806x_gmac_of_parse(gmac);
-	if (err) {
+	if (IS_ERR(err)) {
 		dev_err(dev, "device tree parsing error\n");
-		return err;
+		return PTR_ERR(err);
 	}
 
 	regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
@@ -285,7 +302,7 @@
 	default:
 		dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
 			phy_modes(gmac->phy_mode));
-		return NULL;
+		return -EINVAL;
 	}
 	regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
 
@@ -304,7 +321,7 @@
 	default:
 		dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
 			phy_modes(gmac->phy_mode));
-		return NULL;
+		return -EINVAL;
 	}
 	regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
 
@@ -327,30 +344,21 @@
 			     0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
 	}
 
-	return gmac;
+	plat_dat->has_gmac = true;
+	plat_dat->bsp_priv = gmac;
+	plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
-{
-	struct ipq806x_gmac *gmac = priv;
-
-	ipq806x_gmac_set_speed(gmac, speed);
-}
-
-static const struct stmmac_of_data ipq806x_gmac_data = {
-	.has_gmac	= 1,
-	.setup		= ipq806x_gmac_setup,
-	.fix_mac_speed	= ipq806x_gmac_fix_mac_speed,
-};
-
 static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
-	{ .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+	{ .compatible = "qcom,ipq806x-gmac" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
 
 static struct platform_driver ipq806x_gmac_dwmac_driver = {
-	.probe = stmmac_pltfr_probe,
+	.probe = ipq806x_gmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name		= "ipq806x-gmac-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index cb888d3..78e9d18 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -25,66 +25,53 @@
 # define LPC18XX_CREG_CREG6_ETHMODE_MII		0x0
 # define LPC18XX_CREG_CREG6_ETHMODE_RMII	0x4
 
-struct lpc18xx_dwmac_priv_data {
-	struct regmap *reg;
-	int interface;
-};
-
-static void *lpc18xx_dwmac_setup(struct platform_device *pdev)
+static int lpc18xx_dwmac_probe(struct platform_device *pdev)
 {
-	struct lpc18xx_dwmac_priv_data *dwmac;
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	struct regmap *reg;
+	u8 ethmode;
+	int ret;
 
-	dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
-	if (!dwmac)
-		return ERR_PTR(-ENOMEM);
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
 
-	dwmac->interface = of_get_phy_mode(pdev->dev.of_node);
-	if (dwmac->interface < 0)
-		return ERR_PTR(dwmac->interface);
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
 
-	dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
-	if (IS_ERR(dwmac->reg)) {
-		dev_err(&pdev->dev, "Syscon lookup failed\n");
-		return dwmac->reg;
+	plat_dat->has_gmac = true;
+
+	reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+	if (IS_ERR(reg)) {
+		dev_err(&pdev->dev, "syscon lookup failed\n");
+		return PTR_ERR(reg);
 	}
 
-	return dwmac;
-}
-
-static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv)
-{
-	struct lpc18xx_dwmac_priv_data *dwmac = priv;
-	u8 ethmode;
-
-	if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+	if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
 		ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
-	} else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+	} else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
 		ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
 	} else {
 		dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
 		return -EINVAL;
 	}
 
-	regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6,
+	regmap_update_bits(reg, LPC18XX_CREG_CREG6,
 			   LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
 
-	return 0;
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static const struct stmmac_of_data lpc18xx_dwmac_data = {
-	.has_gmac = 1,
-	.setup = lpc18xx_dwmac_setup,
-	.init = lpc18xx_dwmac_init,
-};
-
 static const struct of_device_id lpc18xx_dwmac_match[] = {
-	{ .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data },
+	{ .compatible = "nxp,lpc1850-dwmac" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
 
 static struct platform_driver lpc18xx_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = lpc18xx_dwmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "lpc18xx-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 61a324a..c1bac19 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -47,36 +47,45 @@
 	writel(val, dwmac->reg);
 }
 
-static void *meson6_dwmac_setup(struct platform_device *pdev)
+static int meson6_dwmac_probe(struct platform_device *pdev)
 {
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
 	struct meson_dwmac *dwmac;
 	struct resource *res;
+	int ret;
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
 
 	dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
 	if (!dwmac)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 	dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(dwmac->reg))
-		return ERR_CAST(dwmac->reg);
+		return PTR_ERR(dwmac->reg);
 
-	return dwmac;
+	plat_dat->bsp_priv = dwmac;
+	plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static const struct stmmac_of_data meson6_dwmac_data = {
-	.setup		= meson6_dwmac_setup,
-	.fix_mac_speed	= meson6_dwmac_fix_mac_speed,
-};
-
 static const struct of_device_id meson6_dwmac_match[] = {
-	{ .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
+	{ .compatible = "amlogic,meson6-dwmac" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
 
 static struct platform_driver meson6_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = meson6_dwmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "meson6-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 00a1e1e..11baa4b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -46,7 +46,7 @@
 	struct platform_device *pdev;
 	int phy_iface;
 	struct regulator *regulator;
-	struct rk_gmac_ops *ops;
+	const struct rk_gmac_ops *ops;
 
 	bool clk_enabled;
 	bool clock_input;
@@ -177,7 +177,7 @@
 	}
 }
 
-struct rk_gmac_ops rk3288_ops = {
+static const struct rk_gmac_ops rk3288_ops = {
 	.set_to_rgmii = rk3288_set_to_rgmii,
 	.set_to_rmii = rk3288_set_to_rmii,
 	.set_rgmii_speed = rk3288_set_rgmii_speed,
@@ -289,7 +289,7 @@
 	}
 }
 
-struct rk_gmac_ops rk3368_ops = {
+static const struct rk_gmac_ops rk3368_ops = {
 	.set_to_rgmii = rk3368_set_to_rgmii,
 	.set_to_rmii = rk3368_set_to_rmii,
 	.set_rgmii_speed = rk3368_set_rgmii_speed,
@@ -448,7 +448,7 @@
 }
 
 static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
-					  struct rk_gmac_ops *ops)
+					  const struct rk_gmac_ops *ops)
 {
 	struct rk_priv_data *bsp_priv;
 	struct device *dev = &pdev->dev;
@@ -529,16 +529,6 @@
 	return bsp_priv;
 }
 
-static void *rk3288_gmac_setup(struct platform_device *pdev)
-{
-	return rk_gmac_setup(pdev, &rk3288_ops);
-}
-
-static void *rk3368_gmac_setup(struct platform_device *pdev)
-{
-	return rk_gmac_setup(pdev, &rk3368_ops);
-}
-
 static int rk_gmac_init(struct platform_device *pdev, void *priv)
 {
 	struct rk_priv_data *bsp_priv = priv;
@@ -576,31 +566,52 @@
 		dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
 }
 
-static const struct stmmac_of_data rk3288_gmac_data = {
-	.has_gmac = 1,
-	.fix_mac_speed = rk_fix_speed,
-	.setup = rk3288_gmac_setup,
-	.init = rk_gmac_init,
-	.exit = rk_gmac_exit,
-};
+static int rk_gmac_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	const struct rk_gmac_ops *data;
+	int ret;
 
-static const struct stmmac_of_data rk3368_gmac_data = {
-	.has_gmac = 1,
-	.fix_mac_speed = rk_fix_speed,
-	.setup = rk3368_gmac_setup,
-	.init = rk_gmac_init,
-	.exit = rk_gmac_exit,
-};
+	data = of_device_get_match_data(&pdev->dev);
+	if (!data) {
+		dev_err(&pdev->dev, "no of match data provided\n");
+		return -EINVAL;
+	}
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
+	plat_dat->has_gmac = true;
+	plat_dat->init = rk_gmac_init;
+	plat_dat->exit = rk_gmac_exit;
+	plat_dat->fix_mac_speed = rk_fix_speed;
+
+	plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
+	if (IS_ERR(plat_dat->bsp_priv))
+		return PTR_ERR(plat_dat->bsp_priv);
+
+	ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
+	if (ret)
+		return ret;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id rk_gmac_dwmac_match[] = {
-	{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
-	{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_gmac_data},
+	{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+	{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
 
 static struct platform_driver rk_gmac_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = rk_gmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "rk_gmac-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 8141c5b..401383b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -175,31 +175,6 @@
 	return 0;
 }
 
-static void *socfpga_dwmac_probe(struct platform_device *pdev)
-{
-	struct device		*dev = &pdev->dev;
-	int			ret;
-	struct socfpga_dwmac	*dwmac;
-
-	dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
-	if (!dwmac)
-		return ERR_PTR(-ENOMEM);
-
-	ret = socfpga_dwmac_parse_data(dwmac, dev);
-	if (ret) {
-		dev_err(dev, "Unable to parse OF data\n");
-		return ERR_PTR(ret);
-	}
-
-	ret = socfpga_dwmac_setup(dwmac);
-	if (ret) {
-		dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
-		return ERR_PTR(ret);
-	}
-
-	return dwmac;
-}
-
 static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
 {
 	struct socfpga_dwmac	*dwmac = priv;
@@ -257,21 +232,58 @@
 	return ret;
 }
 
-static const struct stmmac_of_data socfpga_gmac_data = {
-	.setup = socfpga_dwmac_probe,
-	.init = socfpga_dwmac_init,
-	.exit = socfpga_dwmac_exit,
-	.fix_mac_speed = socfpga_dwmac_fix_mac_speed,
-};
+static int socfpga_dwmac_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	struct device		*dev = &pdev->dev;
+	int			ret;
+	struct socfpga_dwmac	*dwmac;
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
+	dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
+	if (!dwmac)
+		return -ENOMEM;
+
+	ret = socfpga_dwmac_parse_data(dwmac, dev);
+	if (ret) {
+		dev_err(dev, "Unable to parse OF data\n");
+		return ret;
+	}
+
+	ret = socfpga_dwmac_setup(dwmac);
+	if (ret) {
+		dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
+		return ret;
+	}
+
+	plat_dat->bsp_priv = dwmac;
+	plat_dat->init = socfpga_dwmac_init;
+	plat_dat->exit = socfpga_dwmac_exit;
+	plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+
+	ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
+	if (ret)
+		return ret;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id socfpga_dwmac_match[] = {
-	{ .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+	{ .compatible = "altr,socfpga-stmmac" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
 
 static struct platform_driver socfpga_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = socfpga_dwmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "socfpga-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index a2e8111..7f6f4a4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -21,6 +21,7 @@
 #include <linux/regmap.h>
 #include <linux/clk.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/of_net.h>
 
 #include "stmmac_platform.h"
@@ -128,6 +129,11 @@
 	struct device *dev;
 	struct regmap *regmap;
 	u32 speed;
+	void (*fix_retime_src)(void *priv, unsigned int speed);
+};
+
+struct sti_dwmac_of_data {
+	void (*fix_retime_src)(void *priv, unsigned int speed);
 };
 
 static u32 phy_intf_sels[] = {
@@ -222,8 +228,9 @@
 	regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
 }
 
-static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
+static int sti_dwmac_init(struct platform_device *pdev, void *priv)
 {
+	struct sti_dwmac *dwmac = priv;
 	struct regmap *regmap = dwmac->regmap;
 	int iface = dwmac->interface;
 	struct device *dev = dwmac->dev;
@@ -241,28 +248,8 @@
 
 	val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
 	regmap_update_bits(regmap, reg, ENMII_MASK, val);
-}
 
-static int stix4xx_init(struct platform_device *pdev, void *priv)
-{
-	struct sti_dwmac *dwmac = priv;
-	u32 spd = dwmac->speed;
-
-	sti_dwmac_ctrl_init(dwmac);
-
-	stih4xx_fix_retime_src(priv, spd);
-
-	return 0;
-}
-
-static int stid127_init(struct platform_device *pdev, void *priv)
-{
-	struct sti_dwmac *dwmac = priv;
-	u32 spd = dwmac->speed;
-
-	sti_dwmac_ctrl_init(dwmac);
-
-	stid127_fix_retime_src(priv, spd);
+	dwmac->fix_retime_src(priv, dwmac->speed);
 
 	return 0;
 }
@@ -334,36 +321,58 @@
 	return 0;
 }
 
-static void *sti_dwmac_setup(struct platform_device *pdev)
+static int sti_dwmac_probe(struct platform_device *pdev)
 {
+	struct plat_stmmacenet_data *plat_dat;
+	const struct sti_dwmac_of_data *data;
+	struct stmmac_resources stmmac_res;
 	struct sti_dwmac *dwmac;
 	int ret;
 
+	data = of_device_get_match_data(&pdev->dev);
+	if (!data) {
+		dev_err(&pdev->dev, "No OF match data provided\n");
+		return -EINVAL;
+	}
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
 	dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
 	if (!dwmac)
-		return ERR_PTR(-ENOMEM);
+		return -ENOMEM;
 
 	ret = sti_dwmac_parse_data(dwmac, pdev);
 	if (ret) {
 		dev_err(&pdev->dev, "Unable to parse OF data\n");
-		return ERR_PTR(ret);
+		return ret;
 	}
 
-	return dwmac;
+	dwmac->fix_retime_src = data->fix_retime_src;
+
+	plat_dat->bsp_priv = dwmac;
+	plat_dat->init = sti_dwmac_init;
+	plat_dat->exit = sti_dwmac_exit;
+	plat_dat->fix_mac_speed = data->fix_retime_src;
+
+	ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
+	if (ret)
+		return ret;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static const struct stmmac_of_data stih4xx_dwmac_data = {
-	.fix_mac_speed = stih4xx_fix_retime_src,
-	.setup = sti_dwmac_setup,
-	.init = stix4xx_init,
-	.exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
+	.fix_retime_src = stih4xx_fix_retime_src,
 };
 
-static const struct stmmac_of_data stid127_dwmac_data = {
-	.fix_mac_speed = stid127_fix_retime_src,
-	.setup = sti_dwmac_setup,
-	.init = stid127_init,
-	.exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stid127_dwmac_data = {
+	.fix_retime_src = stid127_fix_retime_src,
 };
 
 static const struct of_device_id sti_dwmac_match[] = {
@@ -376,7 +385,7 @@
 MODULE_DEVICE_TABLE(of, sti_dwmac_match);
 
 static struct platform_driver sti_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = sti_dwmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "sti-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 15048ca..52b8ed9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -33,35 +33,6 @@
 	struct regulator *regulator;
 };
 
-static void *sun7i_gmac_setup(struct platform_device *pdev)
-{
-	struct sunxi_priv_data *gmac;
-	struct device *dev = &pdev->dev;
-
-	gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
-	if (!gmac)
-		return ERR_PTR(-ENOMEM);
-
-	gmac->interface = of_get_phy_mode(dev->of_node);
-
-	gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
-	if (IS_ERR(gmac->tx_clk)) {
-		dev_err(dev, "could not get tx clock\n");
-		return gmac->tx_clk;
-	}
-
-	/* Optional regulator for PHY */
-	gmac->regulator = devm_regulator_get_optional(dev, "phy");
-	if (IS_ERR(gmac->regulator)) {
-		if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
-			return ERR_PTR(-EPROBE_DEFER);
-		dev_info(dev, "no regulator found\n");
-		gmac->regulator = NULL;
-	}
-
-	return gmac;
-}
-
 #define SUN7I_GMAC_GMII_RGMII_RATE	125000000
 #define SUN7I_GMAC_MII_RATE		25000000
 
@@ -132,25 +103,67 @@
 	}
 }
 
-/* of_data specifying hardware features and callbacks.
- * hardware features were copied from Allwinner drivers. */
-static const struct stmmac_of_data sun7i_gmac_data = {
-	.has_gmac = 1,
-	.tx_coe = 1,
-	.fix_mac_speed = sun7i_fix_speed,
-	.setup = sun7i_gmac_setup,
-	.init = sun7i_gmac_init,
-	.exit = sun7i_gmac_exit,
-};
+static int sun7i_gmac_probe(struct platform_device *pdev)
+{
+	struct plat_stmmacenet_data *plat_dat;
+	struct stmmac_resources stmmac_res;
+	struct sunxi_priv_data *gmac;
+	struct device *dev = &pdev->dev;
+	int ret;
+
+	ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+	if (ret)
+		return ret;
+
+	plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+	if (IS_ERR(plat_dat))
+		return PTR_ERR(plat_dat);
+
+	gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+	if (!gmac)
+		return -ENOMEM;
+
+	gmac->interface = of_get_phy_mode(dev->of_node);
+
+	gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
+	if (IS_ERR(gmac->tx_clk)) {
+		dev_err(dev, "could not get tx clock\n");
+		return PTR_ERR(gmac->tx_clk);
+	}
+
+	/* Optional regulator for PHY */
+	gmac->regulator = devm_regulator_get_optional(dev, "phy");
+	if (IS_ERR(gmac->regulator)) {
+		if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		dev_info(dev, "no regulator found\n");
+		gmac->regulator = NULL;
+	}
+
+	/* platform data specifying hardware features and callbacks.
+	 * hardware features were copied from Allwinner drivers. */
+	plat_dat->tx_coe = 1;
+	plat_dat->has_gmac = true;
+	plat_dat->bsp_priv = gmac;
+	plat_dat->init = sun7i_gmac_init;
+	plat_dat->exit = sun7i_gmac_exit;
+	plat_dat->fix_mac_speed = sun7i_fix_speed;
+
+	ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
+	if (ret)
+		return ret;
+
+	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id sun7i_dwmac_match[] = {
-	{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+	{ .compatible = "allwinner,sun7i-a20-gmac" },
 	{ }
 };
 MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
 
 static struct platform_driver sun7i_dwmac_driver = {
-	.probe  = stmmac_pltfr_probe,
+	.probe  = sun7i_gmac_probe,
 	.remove = stmmac_pltfr_remove,
 	.driver = {
 		.name           = "sun7i-dwmac",
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index bcdc895..d02691b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -104,32 +104,16 @@
  * this function is to read the driver parameters from device-tree and
  * set some private fields that will be used by the main at runtime.
  */
-static int stmmac_probe_config_dt(struct platform_device *pdev,
-				  struct plat_stmmacenet_data *plat,
-				  const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
 {
 	struct device_node *np = pdev->dev.of_node;
+	struct plat_stmmacenet_data *plat;
 	struct stmmac_dma_cfg *dma_cfg;
-	const struct of_device_id *device;
-	struct device *dev = &pdev->dev;
 
-	device = of_match_device(dev->driver->of_match_table, dev);
-	if (device->data) {
-		const struct stmmac_of_data *data = device->data;
-		plat->has_gmac = data->has_gmac;
-		plat->enh_desc = data->enh_desc;
-		plat->tx_coe = data->tx_coe;
-		plat->rx_coe = data->rx_coe;
-		plat->bugged_jumbo = data->bugged_jumbo;
-		plat->pmt = data->pmt;
-		plat->riwt_off = data->riwt_off;
-		plat->fix_mac_speed = data->fix_mac_speed;
-		plat->bus_setup = data->bus_setup;
-		plat->setup = data->setup;
-		plat->free = data->free;
-		plat->init = data->init;
-		plat->exit = data->exit;
-	}
+	plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+	if (!plat)
+		return ERR_PTR(-ENOMEM);
 
 	*mac = of_get_mac_address(np);
 	plat->interface = of_get_phy_mode(np);
@@ -151,7 +135,7 @@
 	/* If phy-handle is not specified, check if we have a fixed-phy */
 	if (!plat->phy_node && of_phy_is_fixed_link(np)) {
 		if ((of_phy_register_fixed_link(np) < 0))
-			return -ENODEV;
+			return ERR_PTR(-ENODEV);
 
 		plat->phy_node = of_node_get(np);
 	}
@@ -182,6 +166,12 @@
 	 */
 	plat->maxmtu = JUMBO_LEN;
 
+	/* Set default value for multicast hash bins */
+	plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+	/* Set default value for unicast filter entries */
+	plat->unicast_filter_entries = 1;
+
 	/*
 	 * Currently only the properties needed on SPEAr600
 	 * are provided. All other properties should be added
@@ -222,7 +212,7 @@
 				       GFP_KERNEL);
 		if (!dma_cfg) {
 			of_node_put(np);
-			return -ENOMEM;
+			return ERR_PTR(-ENOMEM);
 		}
 		plat->dma_cfg = dma_cfg;
 		of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
@@ -240,44 +230,34 @@
 		pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
 	}
 
-	return 0;
+	return plat;
 }
 #else
-static int stmmac_probe_config_dt(struct platform_device *pdev,
-				  struct plat_stmmacenet_data *plat,
-				  const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
 {
-	return -ENOSYS;
+	return ERR_PTR(-ENOSYS);
 }
 #endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
 
-/**
- * stmmac_pltfr_probe - platform driver probe.
- * @pdev: platform device pointer
- * Description: platform_device probe function. It is to allocate
- * the necessary platform resources, invoke custom helper (if required) and
- * invoke the main probe function.
- */
-int stmmac_pltfr_probe(struct platform_device *pdev)
+int stmmac_get_platform_resources(struct platform_device *pdev,
+				  struct stmmac_resources *stmmac_res)
 {
-	struct stmmac_resources stmmac_res;
-	int ret = 0;
 	struct resource *res;
-	struct device *dev = &pdev->dev;
-	struct plat_stmmacenet_data *plat_dat = NULL;
 
-	memset(&stmmac_res, 0, sizeof(stmmac_res));
+	memset(stmmac_res, 0, sizeof(*stmmac_res));
 
 	/* Get IRQ information early to have an ability to ask for deferred
 	 * probe if needed before we went too far with resource allocation.
 	 */
-	stmmac_res.irq = platform_get_irq_byname(pdev, "macirq");
-	if (stmmac_res.irq < 0) {
-		if (stmmac_res.irq != -EPROBE_DEFER) {
-			dev_err(dev,
+	stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
+	if (stmmac_res->irq < 0) {
+		if (stmmac_res->irq != -EPROBE_DEFER) {
+			dev_err(&pdev->dev,
 				"MAC IRQ configuration information not found\n");
 		}
-		return stmmac_res.irq;
+		return stmmac_res->irq;
 	}
 
 	/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
@@ -287,64 +267,23 @@
 	 * In case the wake up interrupt is not passed from the platform
 	 * so the driver will continue to use the mac irq (ndev->irq)
 	 */
-	stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
-	if (stmmac_res.wol_irq < 0) {
-		if (stmmac_res.wol_irq == -EPROBE_DEFER)
+	stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+	if (stmmac_res->wol_irq < 0) {
+		if (stmmac_res->wol_irq == -EPROBE_DEFER)
 			return -EPROBE_DEFER;
-		stmmac_res.wol_irq = stmmac_res.irq;
+		stmmac_res->wol_irq = stmmac_res->irq;
 	}
 
-	stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
-	if (stmmac_res.lpi_irq == -EPROBE_DEFER)
+	stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+	if (stmmac_res->lpi_irq == -EPROBE_DEFER)
 		return -EPROBE_DEFER;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	stmmac_res.addr = devm_ioremap_resource(dev, res);
-	if (IS_ERR(stmmac_res.addr))
-		return PTR_ERR(stmmac_res.addr);
+	stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
 
-	plat_dat = dev_get_platdata(&pdev->dev);
-
-	if (!plat_dat)
-		plat_dat = devm_kzalloc(&pdev->dev,
-					sizeof(struct plat_stmmacenet_data),
-					GFP_KERNEL);
-	if (!plat_dat) {
-		pr_err("%s: ERROR: no memory", __func__);
-		return  -ENOMEM;
-	}
-
-	/* Set default value for multicast hash bins */
-	plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
-
-	/* Set default value for unicast filter entries */
-	plat_dat->unicast_filter_entries = 1;
-
-	if (pdev->dev.of_node) {
-		ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac);
-		if (ret) {
-			pr_err("%s: main dt probe failed", __func__);
-			return ret;
-		}
-	}
-
-	/* Custom setup (if needed) */
-	if (plat_dat->setup) {
-		plat_dat->bsp_priv = plat_dat->setup(pdev);
-		if (IS_ERR(plat_dat->bsp_priv))
-			return PTR_ERR(plat_dat->bsp_priv);
-	}
-
-	/* Custom initialisation (if needed)*/
-	if (plat_dat->init) {
-		ret = plat_dat->init(pdev, plat_dat->bsp_priv);
-		if (unlikely(ret))
-			return ret;
-	}
-
-	return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+	return PTR_ERR_OR_ZERO(stmmac_res->addr);
 }
-EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
+EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
 
 /**
  * stmmac_pltfr_remove
@@ -361,9 +300,6 @@
 	if (priv->plat->exit)
 		priv->plat->exit(pdev, priv->plat->bsp_priv);
 
-	if (priv->plat->free)
-		priv->plat->free(pdev, priv->plat->bsp_priv);
-
 	return ret;
 }
 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index 71da86d..ffeb8d9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -19,7 +19,14 @@
 #ifndef __STMMAC_PLATFORM_H__
 #define __STMMAC_PLATFORM_H__
 
-int stmmac_pltfr_probe(struct platform_device *pdev);
+#include "stmmac.h"
+
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac);
+
+int stmmac_get_platform_resources(struct platform_device *pdev,
+				  struct stmmac_resources *stmmac_res);
+
 int stmmac_pltfr_remove(struct platform_device *pdev);
 extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
 
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
new file mode 100644
index 0000000..a8f3151
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Kconfig
@@ -0,0 +1,27 @@
+#
+# Synopsys network device configuration
+#
+
+config NET_VENDOR_SYNOPSYS
+	bool "Synopsys devices"
+	default y
+	---help---
+	  If you have a network (Ethernet) device belonging to this class, say Y.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all
+	  the questions about Synopsys devices. If you say Y, you will be asked
+	  for your specific device in the following questions.
+
+if NET_VENDOR_SYNOPSYS
+
+config SYNOPSYS_DWC_ETH_QOS
+	tristate "Sypnopsys DWC Ethernet QOS v4.10a support"
+	select PHYLIB
+	select CRC32
+	select MII
+	depends on OF
+	---help---
+	  This driver supports the DWC Ethernet QoS from Synopsys
+
+endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
new file mode 100644
index 0000000..7a37572
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Synopsys network device drivers.
+#
+
+obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
new file mode 100644
index 0000000..85b3326
--- /dev/null
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -0,0 +1,3019 @@
+/*  Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
+ *
+ *  This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC).
+ *  This version introduced a lot of changes which breaks backwards
+ *  compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers).
+ *  Some fields differ between version 4.00a and 4.10a, mainly the interrupt
+ *  bit fields. The driver could be made compatible with 4.00, if all relevant
+ *  HW erratas are handled.
+ *
+ *  The GMAC is highly configurable at synthesis time. This driver has been
+ *  developed for a subset of the total available feature set. Currently
+ *  it supports:
+ *  - TSO
+ *  - Checksum offload for RX and TX.
+ *  - Energy efficient ethernet.
+ *  - GMII phy interface.
+ *  - The statistics module.
+ *  - Single RX and TX queue.
+ *
+ *  Copyright (C) 2015 Axis Communications AB.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ethtool.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+
+#include <linux/phy.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/tcp.h>
+
+#define DRIVER_NAME			"dwceqos"
+#define DRIVER_DESCRIPTION		"Synopsys DWC Ethernet QoS driver"
+#define DRIVER_VERSION			"0.9"
+
+#define DWCEQOS_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+	NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
+#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */
+
+#define DWCEQOS_LPI_TIMER_MIN      8
+#define DWCEQOS_LPI_TIMER_MAX      ((1 << 20) - 1)
+
+#define DWCEQOS_RX_BUF_SIZE 2048
+
+#define DWCEQOS_RX_DCNT 256
+#define DWCEQOS_TX_DCNT 256
+
+#define DWCEQOS_HASH_TABLE_SIZE 64
+
+/* The size field in the DMA descriptor is 14 bits */
+#define BYTES_PER_DMA_DESC 16376
+
+/* Hardware registers */
+#define START_MAC_REG_OFFSET    0x0000
+#define MAX_MAC_REG_OFFSET      0x0bd0
+#define START_MTL_REG_OFFSET    0x0c00
+#define MAX_MTL_REG_OFFSET      0x0d7c
+#define START_DMA_REG_OFFSET    0x1000
+#define MAX_DMA_REG_OFFSET      0x117C
+
+#define REG_SPACE_SIZE          0x1800
+
+/* DMA */
+#define REG_DWCEQOS_DMA_MODE             0x1000
+#define REG_DWCEQOS_DMA_SYSBUS_MODE      0x1004
+#define REG_DWCEQOS_DMA_IS               0x1008
+#define REG_DWCEQOS_DMA_DEBUG_ST0        0x100c
+
+/* DMA channel registers */
+#define REG_DWCEQOS_DMA_CH0_CTRL         0x1100
+#define REG_DWCEQOS_DMA_CH0_TX_CTRL      0x1104
+#define REG_DWCEQOS_DMA_CH0_RX_CTRL      0x1108
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST  0x1114
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST  0x111c
+#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL  0x1120
+#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL  0x1128
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN   0x112c
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN   0x1130
+#define REG_DWCEQOS_DMA_CH0_IE           0x1134
+#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC   0x1144
+#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC   0x114c
+#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF    0x1154
+#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG    0x115c
+#define REG_DWCEQOS_DMA_CH0_STA          0x1160
+
+#define DWCEQOS_DMA_MODE_TXPR            BIT(11)
+#define DWCEQOS_DMA_MODE_DA              BIT(1)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI   BIT(31)
+#define DWCEQOS_DMA_SYSBUS_MODE_FB       BIT(0)
+#define DWCEQOS_DMA_SYSBUS_MODE_AAL      BIT(12)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \
+	(((x) << 16) & 0x000F0000)
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT    3
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK       GENMASK(19, 16)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \
+	(((x) << 24) & 0x0F000000)
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT    3
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK       GENMASK(27, 24)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \
+	(((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT   GENMASK(3, 1)
+
+#define DWCEQOS_DMA_CH_CTRL_PBLX8       BIT(16)
+#define DWCEQOS_DMA_CH_CTRL_DSL(x)      ((x) << 18)
+
+#define DWCEQOS_DMA_CH_CTRL_PBL(x)       ((x) << 16)
+#define DWCEQOS_DMA_CH_CTRL_START         BIT(0)
+#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x)   ((x) << 1)
+#define DWCEQOS_DMA_CH_TX_OSP            BIT(4)
+#define DWCEQOS_DMA_CH_TX_TSE            BIT(12)
+
+#define DWCEQOS_DMA_CH0_IE_NIE           BIT(15)
+#define DWCEQOS_DMA_CH0_IE_AIE           BIT(14)
+#define DWCEQOS_DMA_CH0_IE_RIE           BIT(6)
+#define DWCEQOS_DMA_CH0_IE_TIE           BIT(0)
+#define DWCEQOS_DMA_CH0_IE_FBEE          BIT(12)
+#define DWCEQOS_DMA_CH0_IE_RBUE          BIT(7)
+
+#define DWCEQOS_DMA_IS_DC0IS             BIT(0)
+#define DWCEQOS_DMA_IS_MTLIS             BIT(16)
+#define DWCEQOS_DMA_IS_MACIS             BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_TI            BIT(0)
+#define DWCEQOS_DMA_CH0_IS_RI            BIT(6)
+#define DWCEQOS_DMA_CH0_IS_RBU           BIT(7)
+#define DWCEQOS_DMA_CH0_IS_FBE           BIT(12)
+#define DWCEQOS_DMA_CH0_IS_CDE           BIT(13)
+#define DWCEQOS_DMA_CH0_IS_AIS           BIT(14)
+
+#define DWCEQOS_DMA_CH0_IS_TEB           GENMASK(18, 16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ   BIT(16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR  BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_REB           GENMASK(21, 19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ   BIT(19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR  BIT(20)
+
+/* DMA descriptor bits for RX normal descriptor (read format) */
+#define DWCEQOS_DMA_RDES3_OWN     BIT(31)
+#define DWCEQOS_DMA_RDES3_INTE    BIT(30)
+#define DWCEQOS_DMA_RDES3_BUF2V   BIT(25)
+#define DWCEQOS_DMA_RDES3_BUF1V   BIT(24)
+
+/* DMA descriptor bits for RX normal descriptor (write back format) */
+#define DWCEQOS_DMA_RDES1_IPCE    BIT(7)
+#define DWCEQOS_DMA_RDES3_ES      BIT(15)
+#define DWCEQOS_DMA_RDES3_E_JT    BIT(14)
+#define DWCEQOS_DMA_RDES3_PL(x)   ((x) & 0x7fff)
+#define DWCEQOS_DMA_RDES1_PT      0x00000007
+#define DWCEQOS_DMA_RDES1_PT_UDP  BIT(0)
+#define DWCEQOS_DMA_RDES1_PT_TCP  BIT(1)
+#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003
+
+/* DMA descriptor bits for TX normal descriptor (read format) */
+#define DWCEQOS_DMA_TDES2_IOC     BIT(31)
+#define DWCEQOS_DMA_TDES3_OWN     BIT(31)
+#define DWCEQOS_DMA_TDES3_CTXT    BIT(30)
+#define DWCEQOS_DMA_TDES3_FD      BIT(29)
+#define DWCEQOS_DMA_TDES3_LD      BIT(28)
+#define DWCEQOS_DMA_TDES3_CIPH    BIT(16)
+#define DWCEQOS_DMA_TDES3_CIPP    BIT(17)
+#define DWCEQOS_DMA_TDES3_CA      0x00030000
+#define DWCEQOS_DMA_TDES3_TSE     BIT(18)
+#define DWCEQOS_DMA_DES3_THL(x)   ((x) << 19)
+#define DWCEQOS_DMA_DES2_B2L(x)   ((x) << 16)
+
+#define DWCEQOS_DMA_TDES3_TCMSSV    BIT(26)
+
+/* DMA channel states */
+#define DMA_TX_CH_STOPPED   0
+#define DMA_TX_CH_SUSPENDED 6
+
+#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12)
+
+/* MTL */
+#define REG_DWCEQOS_MTL_OPER             0x0c00
+#define REG_DWCEQOS_MTL_DEBUG_ST         0x0c0c
+#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST    0x0d08
+#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST    0x0d38
+
+#define REG_DWCEQOS_MTL_IS               0x0c20
+#define REG_DWCEQOS_MTL_TXQ0_OPER        0x0d00
+#define REG_DWCEQOS_MTL_RXQ0_OPER        0x0d30
+#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT     0x0d34
+#define REG_DWCEQOS_MTL_RXQ0_CTRL         0x0d3c
+
+#define REG_DWCEQOS_MTL_Q0_ISCTRL         0x0d2c
+
+#define DWCEQOS_MTL_SCHALG_STRICT        0x00000060
+
+#define DWCEQOS_MTL_TXQ_TXQEN            BIT(3)
+#define DWCEQOS_MTL_TXQ_TSF              BIT(1)
+#define DWCEQOS_MTL_TXQ_FTQ              BIT(0)
+#define DWCEQOS_MTL_TXQ_TTC512           0x00000070
+
+#define DWCEQOS_MTL_TXQ_SIZE(x)          ((((x) - 256) & 0xff00) << 8)
+
+#define DWCEQOS_MTL_RXQ_SIZE(x)          ((((x) - 256) & 0xff00) << 12)
+#define DWCEQOS_MTL_RXQ_EHFC             BIT(7)
+#define DWCEQOS_MTL_RXQ_DIS_TCP_EF       BIT(6)
+#define DWCEQOS_MTL_RXQ_FEP              BIT(4)
+#define DWCEQOS_MTL_RXQ_FUP              BIT(3)
+#define DWCEQOS_MTL_RXQ_RSF              BIT(5)
+#define DWCEQOS_MTL_RXQ_RTC32            BIT(0)
+
+/* MAC */
+#define REG_DWCEQOS_MAC_CFG              0x0000
+#define REG_DWCEQOS_MAC_EXT_CFG          0x0004
+#define REG_DWCEQOS_MAC_PKT_FILT         0x0008
+#define REG_DWCEQOS_MAC_WD_TO            0x000c
+#define REG_DWCEQOS_HASTABLE_LO          0x0010
+#define REG_DWCEQOS_HASTABLE_HI          0x0014
+#define REG_DWCEQOS_MAC_IS               0x00b0
+#define REG_DWCEQOS_MAC_IE               0x00b4
+#define REG_DWCEQOS_MAC_STAT             0x00b8
+#define REG_DWCEQOS_MAC_MDIO_ADDR        0x0200
+#define REG_DWCEQOS_MAC_MDIO_DATA        0x0204
+#define REG_DWCEQOS_MAC_MAC_ADDR0_HI     0x0300
+#define REG_DWCEQOS_MAC_MAC_ADDR0_LO     0x0304
+#define REG_DWCEQOS_MAC_RXQ0_CTRL0       0x00a0
+#define REG_DWCEQOS_MAC_HW_FEATURE0      0x011c
+#define REG_DWCEQOS_MAC_HW_FEATURE1      0x0120
+#define REG_DWCEQOS_MAC_HW_FEATURE2      0x0124
+#define REG_DWCEQOS_MAC_HASHTABLE_LO     0x0010
+#define REG_DWCEQOS_MAC_HASHTABLE_HI     0x0014
+#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS  0x00d0
+#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL  0x00d4
+#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER  0x00d8
+#define REG_DWCEQOS_MAC_1US_TIC_COUNTER  0x00dc
+#define REG_DWCEQOS_MAC_RX_FLOW_CTRL     0x0090
+#define REG_DWCEQOS_MAC_Q0_TX_FLOW	 0x0070
+
+#define DWCEQOS_MAC_CFG_ACS              BIT(20)
+#define DWCEQOS_MAC_CFG_JD               BIT(17)
+#define DWCEQOS_MAC_CFG_JE               BIT(16)
+#define DWCEQOS_MAC_CFG_PS               BIT(15)
+#define DWCEQOS_MAC_CFG_FES              BIT(14)
+#define DWCEQOS_MAC_CFG_DM               BIT(13)
+#define DWCEQOS_MAC_CFG_DO               BIT(10)
+#define DWCEQOS_MAC_CFG_TE               BIT(1)
+#define DWCEQOS_MAC_CFG_IPC              BIT(27)
+#define DWCEQOS_MAC_CFG_RE               BIT(0)
+
+#define DWCEQOS_ADDR_HIGH(reg)           (0x00000300 + (reg * 8))
+#define DWCEQOS_ADDR_LOW(reg)            (0x00000304 + (reg * 8))
+
+#define DWCEQOS_MAC_IS_LPI_INT           BIT(5)
+#define DWCEQOS_MAC_IS_MMC_INT           BIT(8)
+
+#define DWCEQOS_MAC_RXQ_EN               BIT(1)
+#define DWCEQOS_MAC_MAC_ADDR_HI_EN       BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_RA          BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_HPF         BIT(10)
+#define DWCEQOS_MAC_PKT_FILT_SAF         BIT(9)
+#define DWCEQOS_MAC_PKT_FILT_SAIF        BIT(8)
+#define DWCEQOS_MAC_PKT_FILT_DBF         BIT(5)
+#define DWCEQOS_MAC_PKT_FILT_PM          BIT(4)
+#define DWCEQOS_MAC_PKT_FILT_DAIF        BIT(3)
+#define DWCEQOS_MAC_PKT_FILT_HMC         BIT(2)
+#define DWCEQOS_MAC_PKT_FILT_HUC         BIT(1)
+#define DWCEQOS_MAC_PKT_FILT_PR          BIT(0)
+
+#define DWCEQOS_MAC_MDIO_ADDR_CR(x)      (((x & 15)) << 8)
+#define DWCEQOS_MAC_MDIO_ADDR_CR_20      2
+#define DWCEQOS_MAC_MDIO_ADDR_CR_35      3
+#define DWCEQOS_MAC_MDIO_ADDR_CR_60      0
+#define DWCEQOS_MAC_MDIO_ADDR_CR_100     1
+#define DWCEQOS_MAC_MDIO_ADDR_CR_150     4
+#define DWCEQOS_MAC_MDIO_ADDR_CR_250     5
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ   0x0000000c
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE  BIT(2)
+#define DWCEQOS_MAC_MDIO_ADDR_GB         BIT(0)
+
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN  BIT(0)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX  BIT(1)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN  BIT(2)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX  BIT(3)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST  BIT(8)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST  BIT(9)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN   BIT(16)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS     BIT(17)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN   BIT(18)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA  BIT(19)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE   BIT(20)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21)
+
+#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x)  ((x) & GENMASK(11, 0))
+
+#define DWCEQOS_LPI_CTRL_ENABLE_EEE      (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \
+					  DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \
+					  DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN)
+
+#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
+
+#define DWCEQOS_MAC_Q0_TX_FLOW_TFE   BIT(1)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time)	((time) << 16)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4)
+
+/* Features */
+#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16)
+#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14)
+#define DWCEQOS_MAC_HW_FEATURE0_HDSEL    BIT(2)
+#define DWCEQOS_MAC_HW_FEATURE0_EEESEL   BIT(13)
+#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL  BIT(1)
+#define DWCEQOS_MAC_HW_FEATURE0_MIISEL   BIT(0)
+
+#define DWCEQOS_MAC_HW_FEATURE1_TSOEN    BIT(18)
+#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6)
+#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x)  (128 << ((x) & 0x1f))
+
+#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \
+	(1 + (((feature1) & 0x1fc0000) >> 18))
+
+#define DWCEQOS_MDIO_PHYADDR(x)     (((x) & 0x1f) << 21)
+#define DWCEQOS_MDIO_PHYREG(x)      (((x) & 0x1f) << 16)
+
+#define DWCEQOS_DMA_MODE_SWR            BIT(0)
+
+#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048
+
+/* Mac Management Counters */
+#define REG_DWCEQOS_MMC_CTRL             0x0700
+#define REG_DWCEQOS_MMC_RXIRQ            0x0704
+#define REG_DWCEQOS_MMC_TXIRQ            0x0708
+#define REG_DWCEQOS_MMC_RXIRQMASK        0x070c
+#define REG_DWCEQOS_MMC_TXIRQMASK        0x0710
+
+#define DWCEQOS_MMC_CTRL_CNTRST          BIT(0)
+#define DWCEQOS_MMC_CTRL_RSTONRD         BIT(2)
+
+#define DWC_MMC_TXLPITRANSCNTR           0x07F0
+#define DWC_MMC_TXLPIUSCNTR              0x07EC
+#define DWC_MMC_TXOVERSIZE_G             0x0778
+#define DWC_MMC_TXVLANPACKETS_G          0x0774
+#define DWC_MMC_TXPAUSEPACKETS           0x0770
+#define DWC_MMC_TXEXCESSDEF              0x076C
+#define DWC_MMC_TXPACKETCOUNT_G          0x0768
+#define DWC_MMC_TXOCTETCOUNT_G           0x0764
+#define DWC_MMC_TXCARRIERERROR           0x0760
+#define DWC_MMC_TXEXCESSCOL              0x075C
+#define DWC_MMC_TXLATECOL                0x0758
+#define DWC_MMC_TXDEFERRED               0x0754
+#define DWC_MMC_TXMULTICOL_G             0x0750
+#define DWC_MMC_TXSINGLECOL_G            0x074C
+#define DWC_MMC_TXUNDERFLOWERROR         0x0748
+#define DWC_MMC_TXBROADCASTPACKETS_GB    0x0744
+#define DWC_MMC_TXMULTICASTPACKETS_GB    0x0740
+#define DWC_MMC_TXUNICASTPACKETS_GB      0x073C
+#define DWC_MMC_TX1024TOMAXOCTETS_GB     0x0738
+#define DWC_MMC_TX512TO1023OCTETS_GB     0x0734
+#define DWC_MMC_TX256TO511OCTETS_GB      0x0730
+#define DWC_MMC_TX128TO255OCTETS_GB      0x072C
+#define DWC_MMC_TX65TO127OCTETS_GB       0x0728
+#define DWC_MMC_TX64OCTETS_GB            0x0724
+#define DWC_MMC_TXMULTICASTPACKETS_G     0x0720
+#define DWC_MMC_TXBROADCASTPACKETS_G     0x071C
+#define DWC_MMC_TXPACKETCOUNT_GB         0x0718
+#define DWC_MMC_TXOCTETCOUNT_GB          0x0714
+
+#define DWC_MMC_RXLPITRANSCNTR           0x07F8
+#define DWC_MMC_RXLPIUSCNTR              0x07F4
+#define DWC_MMC_RXCTRLPACKETS_G          0x07E4
+#define DWC_MMC_RXRCVERROR               0x07E0
+#define DWC_MMC_RXWATCHDOG               0x07DC
+#define DWC_MMC_RXVLANPACKETS_GB         0x07D8
+#define DWC_MMC_RXFIFOOVERFLOW           0x07D4
+#define DWC_MMC_RXPAUSEPACKETS           0x07D0
+#define DWC_MMC_RXOUTOFRANGETYPE         0x07CC
+#define DWC_MMC_RXLENGTHERROR            0x07C8
+#define DWC_MMC_RXUNICASTPACKETS_G       0x07C4
+#define DWC_MMC_RX1024TOMAXOCTETS_GB     0x07C0
+#define DWC_MMC_RX512TO1023OCTETS_GB     0x07BC
+#define DWC_MMC_RX256TO511OCTETS_GB      0x07B8
+#define DWC_MMC_RX128TO255OCTETS_GB      0x07B4
+#define DWC_MMC_RX65TO127OCTETS_GB       0x07B0
+#define DWC_MMC_RX64OCTETS_GB            0x07AC
+#define DWC_MMC_RXOVERSIZE_G             0x07A8
+#define DWC_MMC_RXUNDERSIZE_G            0x07A4
+#define DWC_MMC_RXJABBERERROR            0x07A0
+#define DWC_MMC_RXRUNTERROR              0x079C
+#define DWC_MMC_RXALIGNMENTERROR         0x0798
+#define DWC_MMC_RXCRCERROR               0x0794
+#define DWC_MMC_RXMULTICASTPACKETS_G     0x0790
+#define DWC_MMC_RXBROADCASTPACKETS_G     0x078C
+#define DWC_MMC_RXOCTETCOUNT_G           0x0788
+#define DWC_MMC_RXOCTETCOUNT_GB          0x0784
+#define DWC_MMC_RXPACKETCOUNT_GB         0x0780
+
+static int debug = 3;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
+
+/* DMA ring descriptor. These are used as support descriptors for the HW DMA */
+struct ring_desc {
+	struct sk_buff *skb;
+	dma_addr_t mapping;
+	size_t len;
+};
+
+/* DMA hardware descriptor */
+struct dwceqos_dma_desc {
+	u32	des0;
+	u32	des1;
+	u32	des2;
+	u32	des3;
+} ____cacheline_aligned;
+
+struct dwceqos_mmc_counters {
+	__u64 txlpitranscntr;
+	__u64 txpiuscntr;
+	__u64 txoversize_g;
+	__u64 txvlanpackets_g;
+	__u64 txpausepackets;
+	__u64 txexcessdef;
+	__u64 txpacketcount_g;
+	__u64 txoctetcount_g;
+	__u64 txcarriererror;
+	__u64 txexcesscol;
+	__u64 txlatecol;
+	__u64 txdeferred;
+	__u64 txmulticol_g;
+	__u64 txsinglecol_g;
+	__u64 txunderflowerror;
+	__u64 txbroadcastpackets_gb;
+	__u64 txmulticastpackets_gb;
+	__u64 txunicastpackets_gb;
+	__u64 tx1024tomaxoctets_gb;
+	__u64 tx512to1023octets_gb;
+	__u64 tx256to511octets_gb;
+	__u64 tx128to255octets_gb;
+	__u64 tx65to127octets_gb;
+	__u64 tx64octets_gb;
+	__u64 txmulticastpackets_g;
+	__u64 txbroadcastpackets_g;
+	__u64 txpacketcount_gb;
+	__u64 txoctetcount_gb;
+
+	__u64 rxlpitranscntr;
+	__u64 rxlpiuscntr;
+	__u64 rxctrlpackets_g;
+	__u64 rxrcverror;
+	__u64 rxwatchdog;
+	__u64 rxvlanpackets_gb;
+	__u64 rxfifooverflow;
+	__u64 rxpausepackets;
+	__u64 rxoutofrangetype;
+	__u64 rxlengtherror;
+	__u64 rxunicastpackets_g;
+	__u64 rx1024tomaxoctets_gb;
+	__u64 rx512to1023octets_gb;
+	__u64 rx256to511octets_gb;
+	__u64 rx128to255octets_gb;
+	__u64 rx65to127octets_gb;
+	__u64 rx64octets_gb;
+	__u64 rxoversize_g;
+	__u64 rxundersize_g;
+	__u64 rxjabbererror;
+	__u64 rxrunterror;
+	__u64 rxalignmenterror;
+	__u64 rxcrcerror;
+	__u64 rxmulticastpackets_g;
+	__u64 rxbroadcastpackets_g;
+	__u64 rxoctetcount_g;
+	__u64 rxoctetcount_gb;
+	__u64 rxpacketcount_gb;
+};
+
+/* Ethtool statistics */
+
+struct dwceqos_stat {
+	const char stat_name[ETH_GSTRING_LEN];
+	int   offset;
+};
+
+#define STAT_ITEM(name, var) \
+	{\
+		name,\
+		offsetof(struct dwceqos_mmc_counters, var),\
+	}
+
+static const struct dwceqos_stat dwceqos_ethtool_stats[] = {
+	STAT_ITEM("tx_bytes", txoctetcount_gb),
+	STAT_ITEM("tx_packets", txpacketcount_gb),
+	STAT_ITEM("tx_unicst_packets", txunicastpackets_gb),
+	STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb),
+	STAT_ITEM("tx_multicast_packets",  txmulticastpackets_gb),
+	STAT_ITEM("tx_pause_packets", txpausepackets),
+	STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb),
+	STAT_ITEM("tx_65_to_127_byte_packets",  tx65to127octets_gb),
+	STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb),
+	STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb),
+	STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+	STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb),
+	STAT_ITEM("tx_underflow_errors", txunderflowerror),
+	STAT_ITEM("tx_lpi_count", txlpitranscntr),
+
+	STAT_ITEM("rx_bytes", rxoctetcount_gb),
+	STAT_ITEM("rx_packets", rxpacketcount_gb),
+	STAT_ITEM("rx_unicast_packets", rxunicastpackets_g),
+	STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g),
+	STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g),
+	STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb),
+	STAT_ITEM("rx_pause_packets", rxpausepackets),
+	STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb),
+	STAT_ITEM("rx_65_to_127_byte_packets",  rx65to127octets_gb),
+	STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb),
+	STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb),
+	STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+	STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb),
+	STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow),
+	STAT_ITEM("rx_oversize_packets", rxoversize_g),
+	STAT_ITEM("rx_undersize_packets", rxundersize_g),
+	STAT_ITEM("rx_jabbers", rxjabbererror),
+	STAT_ITEM("rx_align_errors", rxalignmenterror),
+	STAT_ITEM("rx_crc_errors", rxcrcerror),
+	STAT_ITEM("rx_lpi_count", rxlpitranscntr),
+};
+
+/* Configuration of AXI bus parameters.
+ * These values depend on the parameters set on the MAC core as well
+ * as the AXI interconnect.
+ */
+struct dwceqos_bus_cfg {
+	/* Enable AXI low-power interface. */
+	bool en_lpi;
+	/* Limit on number of outstanding AXI write requests. */
+	u32 write_requests;
+	/* Limit on number of outstanding AXI read requests. */
+	u32 read_requests;
+	/* Bitmap of allowed AXI burst lengths, 4-256 beats. */
+	u32 burst_map;
+	/* DMA Programmable burst length*/
+	u32 tx_pbl;
+	u32 rx_pbl;
+};
+
+struct dwceqos_flowcontrol {
+	int autoneg;
+	int rx;
+	int rx_current;
+	int tx;
+	int tx_current;
+};
+
+struct net_local {
+	void __iomem *baseaddr;
+	struct clk *phy_ref_clk;
+	struct clk *apb_pclk;
+
+	struct device_node *phy_node;
+	struct net_device *ndev;
+	struct platform_device *pdev;
+
+	u32 msg_enable;
+
+	struct tasklet_struct tx_bdreclaim_tasklet;
+	struct workqueue_struct *txtimeout_handler_wq;
+	struct work_struct txtimeout_reinit;
+
+	phy_interface_t phy_interface;
+	struct phy_device *phy_dev;
+	struct mii_bus *mii_bus;
+
+	unsigned int link;
+	unsigned int speed;
+	unsigned int duplex;
+
+	struct napi_struct napi;
+
+	/* DMA Descriptor Areas */
+	struct ring_desc *rx_skb;
+	struct ring_desc *tx_skb;
+
+	struct dwceqos_dma_desc *tx_descs;
+	struct dwceqos_dma_desc *rx_descs;
+
+	/* DMA Mapped Descriptor areas*/
+	dma_addr_t tx_descs_addr;
+	dma_addr_t rx_descs_addr;
+	dma_addr_t tx_descs_tail_addr;
+	dma_addr_t rx_descs_tail_addr;
+
+	size_t tx_free;
+	size_t tx_next;
+	size_t rx_cur;
+	size_t tx_cur;
+
+	/* Spinlocks for accessing DMA Descriptors */
+	spinlock_t tx_lock;
+
+	/* Spinlock for register read-modify-writes. */
+	spinlock_t hw_lock;
+
+	u32 feature0;
+	u32 feature1;
+	u32 feature2;
+
+	struct dwceqos_bus_cfg bus_cfg;
+	bool en_tx_lpi_clockgating;
+
+	int eee_enabled;
+	int eee_active;
+	int csr_val;
+	u32 gso_size;
+
+	struct dwceqos_mmc_counters mmc_counters;
+	/* Protect the mmc_counter updates. */
+	spinlock_t stats_lock;
+	u32 mmc_rx_counters_mask;
+	u32 mmc_tx_counters_mask;
+
+	struct dwceqos_flowcontrol flowcontrol;
+};
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+				      u32 tx_mask);
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+				  unsigned int reg_n);
+static int dwceqos_stop(struct net_device *ndev);
+static int dwceqos_open(struct net_device *ndev);
+static void dwceqos_tx_poll_demand(struct net_local *lp);
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable);
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable);
+
+static void dwceqos_reset_state(struct net_local *lp);
+
+#define dwceqos_read(lp, reg)						\
+	readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg))
+#define dwceqos_write(lp, reg, val)					\
+	writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg))
+
+static void dwceqos_reset_state(struct net_local *lp)
+{
+	lp->link    = 0;
+	lp->speed   = 0;
+	lp->duplex  = DUPLEX_UNKNOWN;
+	lp->flowcontrol.rx_current = 0;
+	lp->flowcontrol.tx_current = 0;
+	lp->eee_active = 0;
+	lp->eee_enabled = 0;
+}
+
+static void print_descriptor(struct net_local *lp, int index, int tx)
+{
+	struct dwceqos_dma_desc *dd;
+
+	if (tx)
+		dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index];
+	else
+		dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index];
+
+	pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX",
+		index, dd);
+	pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2,
+		dd->des3);
+}
+
+static void print_status(struct net_local *lp)
+{
+	size_t desci, i;
+
+	pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free,
+		lp->tx_cur, lp->tx_next);
+
+	print_descriptor(lp, lp->rx_cur, 0);
+
+	for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0;
+		 i < DWCEQOS_TX_DCNT;
+		 ++i) {
+		print_descriptor(lp, desci, 1);
+		desci = (desci + 1) % DWCEQOS_TX_DCNT;
+	}
+
+	pr_info("DMA_Debug_Status0:          0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0));
+	pr_info("DMA_CH0_Status:             0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_DMA_IS));
+	pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n",
+		dwceqos_read(lp, 0x1144));
+	pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n",
+		dwceqos_read(lp, 0x1154));
+	pr_info("MTL_Debug_Status:      0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST));
+	pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST));
+	pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST));
+	pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n",
+		dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC),
+		dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC));
+}
+
+static void dwceqos_mdio_set_csr(struct net_local *lp)
+{
+	int rate = clk_get_rate(lp->apb_pclk);
+
+	if (rate <= 20000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20;
+	else if (rate <= 35000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35;
+	else if (rate <= 60000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60;
+	else if (rate <= 100000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100;
+	else if (rate <= 150000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150;
+	else if (rate <= 250000000)
+		lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250;
+}
+
+/* Simple MDIO functions implementing mii_bus */
+static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg)
+{
+	struct net_local *lp = bus->priv;
+	u32 regval;
+	int i;
+	int data;
+
+	regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+		DWCEQOS_MDIO_PHYREG(phyreg) |
+		DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+		DWCEQOS_MAC_MDIO_ADDR_GB |
+		DWCEQOS_MAC_MDIO_ADDR_GOC_READ;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+	for (i = 0; i < 5; ++i) {
+		usleep_range(64, 128);
+		if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+		      DWCEQOS_MAC_MDIO_ADDR_GB))
+			break;
+	}
+
+	data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA);
+	if (i == 5) {
+		netdev_warn(lp->ndev, "MDIO read timed out\n");
+		data = 0xffff;
+	}
+
+	return data & 0xffff;
+}
+
+static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
+			      u16 value)
+{
+	struct net_local *lp = bus->priv;
+	u32 regval;
+	int i;
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value);
+
+	regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+		DWCEQOS_MDIO_PHYREG(phyreg) |
+		DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+		DWCEQOS_MAC_MDIO_ADDR_GB |
+		DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+	for (i = 0; i < 5; ++i) {
+		usleep_range(64, 128);
+		if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+		      DWCEQOS_MAC_MDIO_ADDR_GB))
+			break;
+	}
+	if (i == 5)
+		netdev_warn(lp->ndev, "MDIO write timed out\n");
+	return 0;
+}
+
+static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = lp->phy_dev;
+
+	if (!netif_running(ndev))
+		return -EINVAL;
+
+	if (!phydev)
+		return -ENODEV;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return phy_mii_ioctl(phydev, rq, cmd);
+	default:
+		dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd);
+		return -EOPNOTSUPP;
+	}
+}
+
+static void dwceqos_link_down(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	/* Indicate link down to the LPI state machine */
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+	regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_link_up(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	/* Indicate link up to the LPI state machine */
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+	regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+	lp->eee_active = !phy_init_eee(lp->phy_dev, 0);
+
+	/* Check for changed EEE capability */
+	if (!lp->eee_active && lp->eee_enabled) {
+		lp->eee_enabled = 0;
+
+		spin_lock_irqsave(&lp->hw_lock, flags);
+		regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+		regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+		dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+		spin_unlock_irqrestore(&lp->hw_lock, flags);
+	}
+}
+
+static void dwceqos_set_speed(struct net_local *lp)
+{
+	struct phy_device *phydev = lp->phy_dev;
+	u32 regval;
+
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+	regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES |
+		    DWCEQOS_MAC_CFG_DM);
+
+	if (phydev->duplex)
+		regval |= DWCEQOS_MAC_CFG_DM;
+	if (phydev->speed == SPEED_10) {
+		regval |= DWCEQOS_MAC_CFG_PS;
+	} else if (phydev->speed == SPEED_100) {
+		regval |= DWCEQOS_MAC_CFG_PS |
+			DWCEQOS_MAC_CFG_FES;
+	} else if (phydev->speed != SPEED_1000) {
+		netdev_err(lp->ndev,
+			   "unknown PHY speed %d\n",
+			   phydev->speed);
+		return;
+	}
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval);
+}
+
+static void dwceqos_adjust_link(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = lp->phy_dev;
+	int status_change = 0;
+
+	if (phydev->link) {
+		if ((lp->speed != phydev->speed) ||
+		    (lp->duplex != phydev->duplex)) {
+			dwceqos_set_speed(lp);
+
+			lp->speed = phydev->speed;
+			lp->duplex = phydev->duplex;
+			status_change = 1;
+		}
+
+		if (lp->flowcontrol.autoneg) {
+			lp->flowcontrol.rx = phydev->pause ||
+					     phydev->asym_pause;
+			lp->flowcontrol.tx = phydev->pause ||
+					     phydev->asym_pause;
+		}
+
+		if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) {
+			if (netif_msg_link(lp))
+				netdev_dbg(ndev, "set rx flow to %d\n",
+					   lp->flowcontrol.rx);
+			dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx);
+			lp->flowcontrol.rx_current = lp->flowcontrol.rx;
+		}
+		if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) {
+			if (netif_msg_link(lp))
+				netdev_dbg(ndev, "set tx flow to %d\n",
+					   lp->flowcontrol.tx);
+			dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx);
+			lp->flowcontrol.tx_current = lp->flowcontrol.tx;
+		}
+	}
+
+	if (phydev->link != lp->link) {
+		lp->link = phydev->link;
+		status_change = 1;
+	}
+
+	if (status_change) {
+		if (phydev->link) {
+			lp->ndev->trans_start = jiffies;
+			dwceqos_link_up(lp);
+		} else {
+			dwceqos_link_down(lp);
+		}
+		phy_print_status(phydev);
+	}
+}
+
+static int dwceqos_mii_probe(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = NULL;
+
+	if (lp->phy_node) {
+		phydev = of_phy_connect(lp->ndev,
+					lp->phy_node,
+					&dwceqos_adjust_link,
+					0,
+					lp->phy_interface);
+
+		if (!phydev) {
+			netdev_err(ndev, "no PHY found\n");
+			return -1;
+		}
+	} else {
+		netdev_err(ndev, "no PHY configured\n");
+		return -ENODEV;
+	}
+
+	if (netif_msg_probe(lp))
+		netdev_dbg(lp->ndev,
+			   "phydev %p, phydev->phy_id 0xa%x, phydev->addr 0x%x\n",
+			   phydev, phydev->phy_id, phydev->addr);
+
+	phydev->supported &= PHY_GBIT_FEATURES;
+
+	lp->link    = 0;
+	lp->speed   = 0;
+	lp->duplex  = DUPLEX_UNKNOWN;
+	lp->phy_dev = phydev;
+
+	if (netif_msg_probe(lp)) {
+		netdev_dbg(lp->ndev, "phy_addr 0x%x, phy_id 0x%08x\n",
+			   lp->phy_dev->addr, lp->phy_dev->phy_id);
+
+		netdev_dbg(lp->ndev, "attach [%s] phy driver\n",
+			   lp->phy_dev->drv->name);
+	}
+
+	return 0;
+}
+
+static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index)
+{
+	struct sk_buff *new_skb;
+	dma_addr_t new_skb_baddr = 0;
+
+	new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+	if (!new_skb) {
+		netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index);
+		goto err_out;
+	}
+
+	new_skb_baddr = dma_map_single(lp->ndev->dev.parent,
+				       new_skb->data, DWCEQOS_RX_BUF_SIZE,
+				       DMA_FROM_DEVICE);
+	if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+		netdev_err(lp->ndev, "DMA map error\n");
+		dev_kfree_skb(new_skb);
+		new_skb = NULL;
+		goto err_out;
+	}
+
+	lp->rx_descs[index].des0 = new_skb_baddr;
+	lp->rx_descs[index].des1 = 0;
+	lp->rx_descs[index].des2 = 0;
+	lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE |
+				   DWCEQOS_DMA_RDES3_BUF1V |
+				   DWCEQOS_DMA_RDES3_OWN;
+
+	lp->rx_skb[index].mapping = new_skb_baddr;
+	lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE;
+
+err_out:
+	lp->rx_skb[index].skb = new_skb;
+}
+
+static void dwceqos_clean_rings(struct net_local *lp)
+{
+	int i;
+
+	if (lp->rx_skb) {
+		for (i = 0; i < DWCEQOS_RX_DCNT; i++) {
+			if (lp->rx_skb[i].skb) {
+				dma_unmap_single(lp->ndev->dev.parent,
+						 lp->rx_skb[i].mapping,
+						 lp->rx_skb[i].len,
+						 DMA_FROM_DEVICE);
+
+				dev_kfree_skb(lp->rx_skb[i].skb);
+				lp->rx_skb[i].skb = NULL;
+				lp->rx_skb[i].mapping = 0;
+			}
+		}
+	}
+
+	if (lp->tx_skb) {
+		for (i = 0; i < DWCEQOS_TX_DCNT; i++) {
+			if (lp->tx_skb[i].skb) {
+				dev_kfree_skb(lp->tx_skb[i].skb);
+				lp->tx_skb[i].skb = NULL;
+			}
+			if (lp->tx_skb[i].mapping) {
+				dma_unmap_single(lp->ndev->dev.parent,
+						 lp->tx_skb[i].mapping,
+						 lp->tx_skb[i].len,
+						 DMA_TO_DEVICE);
+				lp->tx_skb[i].mapping = 0;
+			}
+		}
+	}
+}
+
+static void dwceqos_descriptor_free(struct net_local *lp)
+{
+	int size;
+
+	dwceqos_clean_rings(lp);
+
+	kfree(lp->tx_skb);
+	lp->tx_skb = NULL;
+	kfree(lp->rx_skb);
+	lp->rx_skb = NULL;
+
+	size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+	if (lp->rx_descs) {
+		dma_free_coherent(lp->ndev->dev.parent, size,
+				  (void *)(lp->rx_descs), lp->rx_descs_addr);
+		lp->rx_descs = NULL;
+	}
+
+	size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+	if (lp->tx_descs) {
+		dma_free_coherent(lp->ndev->dev.parent, size,
+				  (void *)(lp->tx_descs), lp->tx_descs_addr);
+		lp->tx_descs = NULL;
+	}
+}
+
+static int dwceqos_descriptor_init(struct net_local *lp)
+{
+	int size;
+	u32 i;
+
+	lp->gso_size = 0;
+
+	lp->tx_skb = NULL;
+	lp->rx_skb = NULL;
+	lp->rx_descs = NULL;
+	lp->tx_descs = NULL;
+
+	/* Reset the DMA indexes */
+	lp->rx_cur = 0;
+	lp->tx_cur = 0;
+	lp->tx_next = 0;
+	lp->tx_free = DWCEQOS_TX_DCNT;
+
+	/* Allocate Ring descriptors */
+	size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc);
+	lp->rx_skb = kzalloc(size, GFP_KERNEL);
+	if (!lp->rx_skb)
+		goto err_out;
+
+	size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc);
+	lp->tx_skb = kzalloc(size, GFP_KERNEL);
+	if (!lp->tx_skb)
+		goto err_out;
+
+	/* Allocate DMA descriptors */
+	size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+	lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+			&lp->rx_descs_addr, 0);
+	if (!lp->rx_descs)
+		goto err_out;
+	lp->rx_descs_tail_addr = lp->rx_descs_addr +
+		sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT;
+
+	size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+	lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+			&lp->tx_descs_addr, 0);
+	if (!lp->tx_descs)
+		goto err_out;
+	lp->tx_descs_tail_addr = lp->tx_descs_addr +
+		sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT;
+
+	/* Initialize RX Ring Descriptors and buffers */
+	for (i = 0; i < DWCEQOS_RX_DCNT; ++i) {
+		dwceqos_alloc_rxring_desc(lp, i);
+		if (!(lp->rx_skb[lp->rx_cur].skb))
+			goto err_out;
+	}
+
+	/* Initialize TX Descriptors */
+	for (i = 0; i < DWCEQOS_TX_DCNT; ++i) {
+		lp->tx_descs[i].des0 = 0;
+		lp->tx_descs[i].des1 = 0;
+		lp->tx_descs[i].des2 = 0;
+		lp->tx_descs[i].des3 = 0;
+	}
+
+	/* Make descriptor writes visible to the DMA. */
+	wmb();
+
+	return 0;
+
+err_out:
+	dwceqos_descriptor_free(lp);
+	return -ENOMEM;
+}
+
+static int dwceqos_packet_avail(struct net_local *lp)
+{
+	return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN);
+}
+
+static void dwceqos_get_hwfeatures(struct net_local *lp)
+{
+	lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0);
+	lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1);
+	lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2);
+}
+
+static void dwceqos_dma_enable_txirq(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+	regval |= DWCEQOS_DMA_CH0_IE_TIE;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_txirq(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+	regval &= ~DWCEQOS_DMA_CH0_IE_TIE;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_enable_rxirq(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+	regval |= DWCEQOS_DMA_CH0_IE_RIE;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_rxirq(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+	regval &= ~DWCEQOS_DMA_CH0_IE_RIE;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_enable_mmc_interrupt(struct net_local *lp)
+{
+	dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0);
+	dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0);
+}
+
+static int dwceqos_mii_init(struct net_local *lp)
+{
+	int ret = -ENXIO, i;
+	struct resource res;
+	struct device_node *mdionode;
+
+	mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio");
+
+	if (!mdionode)
+		return 0;
+
+	lp->mii_bus = mdiobus_alloc();
+	if (!lp->mii_bus) {
+		ret = -ENOMEM;
+		goto err_out;
+	}
+
+	lp->mii_bus->name  = "DWCEQOS MII bus";
+	lp->mii_bus->read  = &dwceqos_mdio_read;
+	lp->mii_bus->write = &dwceqos_mdio_write;
+	lp->mii_bus->priv = lp;
+	lp->mii_bus->parent = &lp->ndev->dev;
+
+	lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+	if (!lp->mii_bus->irq) {
+		ret = -ENOMEM;
+		goto err_out_free_mdiobus;
+	}
+
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		lp->mii_bus->irq[i] = PHY_POLL;
+	of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
+	snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
+		 (unsigned long long)res.start);
+	if (of_mdiobus_register(lp->mii_bus, mdionode))
+		goto err_out_free_mdio_irq;
+
+	return 0;
+
+err_out_free_mdio_irq:
+	kfree(lp->mii_bus->irq);
+err_out_free_mdiobus:
+	mdiobus_free(lp->mii_bus);
+err_out:
+	of_node_put(mdionode);
+	return ret;
+}
+
+/* DMA reset. When issued also resets all MTL and MAC registers as well */
+static void dwceqos_reset_hw(struct net_local *lp)
+{
+	/* Wait (at most) 0.5 seconds for DMA reset*/
+	int i = 5000;
+	u32 reg;
+
+	/* Force gigabit to guarantee a TX clock for GMII. */
+	reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+	reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES);
+	reg |= DWCEQOS_MAC_CFG_DM;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg);
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR);
+
+	do {
+		udelay(100);
+		i--;
+		reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE);
+	} while ((reg & DWCEQOS_DMA_MODE_SWR) && i);
+	/* We might experience a timeout if the chip clock mux is broken */
+	if (!i)
+		netdev_err(lp->ndev, "DMA reset timed out!\n");
+}
+
+static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status)
+{
+	if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) {
+		netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n",
+			   dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ?
+				"read" : "write",
+			   dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ?
+				"descr" : "data",
+			   dma_status);
+
+		print_status(lp);
+	}
+	if (dma_status & DWCEQOS_DMA_CH0_IS_REB) {
+		netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n",
+			   dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ?
+				"read" : "write",
+			   dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ?
+				"descr" : "data",
+			   dma_status);
+
+		print_status(lp);
+	}
+}
+
+static void dwceqos_mmc_interrupt(struct net_local *lp)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->stats_lock, flags);
+
+	/* A latched mmc interrupt can not be masked, we must read
+	 *  all the counters with an interrupt pending.
+	 */
+	dwceqos_read_mmc_counters(lp,
+				  dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ),
+				  dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ));
+
+	spin_unlock_irqrestore(&lp->stats_lock, flags);
+}
+
+static void dwceqos_mac_interrupt(struct net_local *lp)
+{
+	u32 cause;
+
+	cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS);
+
+	if (cause & DWCEQOS_MAC_IS_MMC_INT)
+		dwceqos_mmc_interrupt(lp);
+}
+
+static irqreturn_t dwceqos_interrupt(int irq, void *dev_id)
+{
+	struct net_device *ndev = dev_id;
+	struct net_local *lp = netdev_priv(ndev);
+
+	u32 cause;
+	u32 dma_status;
+	irqreturn_t ret = IRQ_NONE;
+
+	cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS);
+	/* DMA Channel 0 Interrupt */
+	if (cause & DWCEQOS_DMA_IS_DC0IS) {
+		dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA);
+
+		/* Transmit Interrupt */
+		if (dma_status & DWCEQOS_DMA_CH0_IS_TI) {
+			tasklet_schedule(&lp->tx_bdreclaim_tasklet);
+			dwceqos_dma_disable_txirq(lp);
+		}
+
+		/* Receive Interrupt */
+		if (dma_status & DWCEQOS_DMA_CH0_IS_RI) {
+			/* Disable RX IRQs */
+			dwceqos_dma_disable_rxirq(lp);
+			napi_schedule(&lp->napi);
+		}
+
+		/* Fatal Bus Error interrupt */
+		if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) {
+			dwceqos_fatal_bus_error(lp, dma_status);
+
+			/* errata 9000831707 */
+			dma_status |= DWCEQOS_DMA_CH0_IS_TEB |
+				      DWCEQOS_DMA_CH0_IS_REB;
+		}
+
+		/* Ack all DMA Channel 0 IRQs */
+		dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status);
+		ret = IRQ_HANDLED;
+	}
+
+	if (cause & DWCEQOS_DMA_IS_MTLIS) {
+		u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL);
+
+		dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val);
+		ret = IRQ_HANDLED;
+	}
+
+	if (cause & DWCEQOS_DMA_IS_MACIS) {
+		dwceqos_mac_interrupt(lp);
+		ret = IRQ_HANDLED;
+	}
+	return ret;
+}
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL);
+	if (enable)
+		regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+	else
+		regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval);
+
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable)
+{
+	u32 regval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+
+	/* MTL flow control */
+	regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+	if (enable)
+		regval |= DWCEQOS_MTL_RXQ_EHFC;
+	else
+		regval &= ~DWCEQOS_MTL_RXQ_EHFC;
+
+	dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+	/* MAC flow control */
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW);
+	if (enable)
+		regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+	else
+		regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+	dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_flow_control(struct net_local *lp)
+{
+	u32 regval;
+	unsigned long flags;
+	int RQS, RFD, RFA;
+
+	spin_lock_irqsave(&lp->hw_lock, flags);
+
+	regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+
+	/* The queue size is in units of 256 bytes. We want 512 bytes units for
+	 * the threshold fields.
+	 */
+	RQS = ((regval >> 20) & 0x3FF) + 1;
+	RQS /= 2;
+
+	/* The thresholds are relative to a full queue, with a bias
+	 * of 1 KiByte below full.
+	 */
+	RFD = RQS / 2 - 2;
+	RFA = RQS / 8 - 2;
+
+	regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8);
+
+	if (RFD >= 0 && RFA >= 0) {
+		dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+	} else {
+		netdev_warn(lp->ndev,
+			    "FIFO too small for flow control.");
+	}
+
+	regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) |
+		 DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS;
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+	spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_clock(struct net_local *lp)
+{
+	unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000;
+
+	BUG_ON(!rate_mhz);
+
+	dwceqos_write(lp,
+		      REG_DWCEQOS_MAC_1US_TIC_COUNTER,
+		      DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1));
+}
+
+static void dwceqos_configure_bus(struct net_local *lp)
+{
+	u32 sysbus_reg;
+
+	/* N.B. We do not support the Fixed Burst mode because it
+	 * opens a race window by making HW access to DMA descriptors
+	 * non-atomic.
+	 */
+
+	sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL;
+
+	if (lp->bus_cfg.en_lpi)
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI;
+
+	if (lp->bus_cfg.burst_map)
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+			lp->bus_cfg.burst_map);
+	else
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+			DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT);
+
+	if (lp->bus_cfg.read_requests)
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+			lp->bus_cfg.read_requests - 1);
+	else
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+			DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT);
+
+	if (lp->bus_cfg.write_requests)
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+			lp->bus_cfg.write_requests - 1);
+	else
+		sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+			DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT);
+
+	if (netif_msg_hw(lp))
+		netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg);
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg);
+}
+
+static void dwceqos_init_hw(struct net_local *lp)
+{
+	u32 regval;
+	u32 buswidth;
+	u32 dma_skip;
+
+	/* Software reset */
+	dwceqos_reset_hw(lp);
+
+	dwceqos_configure_bus(lp);
+
+	/* Probe data bus width, 32/64/128 bits. */
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF);
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL);
+	buswidth = (regval ^ 0xF) + 1;
+
+	/* Cache-align dma descriptors. */
+	dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL,
+		      DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) |
+		      DWCEQOS_DMA_CH_CTRL_PBLX8);
+
+	/* Initialize DMA Channel 0 */
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST,
+		      (u32)lp->tx_descs_addr);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST,
+		      (u32)lp->rx_descs_addr);
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+		      lp->tx_descs_tail_addr);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+		      lp->rx_descs_tail_addr);
+
+	if (lp->bus_cfg.tx_pbl)
+		regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl);
+	else
+		regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+	/* Enable TSO if the HW support it */
+	if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+		regval |= DWCEQOS_DMA_CH_TX_TSE;
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval);
+
+	if (lp->bus_cfg.rx_pbl)
+		regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl);
+	else
+		regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+	regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+	regval |= DWCEQOS_DMA_CH_CTRL_START;
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+	/* Initialize MTL Queues */
+	regval = DWCEQOS_MTL_SCHALG_STRICT;
+	dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval);
+
+	regval = DWCEQOS_MTL_TXQ_SIZE(
+			DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) |
+		DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF |
+		DWCEQOS_MTL_TXQ_TTC512;
+	dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval);
+
+	regval = DWCEQOS_MTL_RXQ_SIZE(
+			DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) |
+		DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF;
+	dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+	dwceqos_configure_flow_control(lp);
+
+	/* Initialize MAC */
+	dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+	lp->eee_enabled = 0;
+
+	dwceqos_configure_clock(lp);
+
+	/* MMC counters */
+
+	/* probe implemented counters */
+	dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u);
+	dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u);
+	lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK);
+	lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK);
+
+	dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST |
+		DWCEQOS_MMC_CTRL_RSTONRD);
+	dwceqos_enable_mmc_interrupt(lp);
+
+	/* Enable Interrupts */
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+		      DWCEQOS_DMA_CH0_IE_NIE |
+		      DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+		      DWCEQOS_DMA_CH0_IE_AIE |
+		      DWCEQOS_DMA_CH0_IE_FBEE);
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
+
+	dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
+		DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+
+	/* Start TX DMA */
+	regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL);
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL,
+		      regval | DWCEQOS_DMA_CH_CTRL_START);
+
+	/* Enable MAC TX/RX */
+	regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+	dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
+		      regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+}
+
+static void dwceqos_tx_reclaim(unsigned long data)
+{
+	struct net_device *ndev = (struct net_device *)data;
+	struct net_local *lp = netdev_priv(ndev);
+	unsigned int tx_bytes = 0;
+	unsigned int tx_packets = 0;
+
+	spin_lock(&lp->tx_lock);
+
+	while (lp->tx_free < DWCEQOS_TX_DCNT) {
+		struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur];
+		struct ring_desc *rd = &lp->tx_skb[lp->tx_cur];
+
+		/* Descriptor still being held by DMA ? */
+		if (dd->des3 & DWCEQOS_DMA_TDES3_OWN)
+			break;
+
+		if (rd->mapping)
+			dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len,
+					 DMA_TO_DEVICE);
+
+		if (unlikely(rd->skb)) {
+			++tx_packets;
+			tx_bytes += rd->skb->len;
+			dev_consume_skb_any(rd->skb);
+		}
+
+		rd->skb = NULL;
+		rd->mapping = 0;
+		lp->tx_free++;
+		lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT;
+
+		if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) &&
+		    (dd->des3 & DWCEQOS_DMA_RDES3_ES)) {
+			if (netif_msg_tx_err(lp))
+				netdev_err(ndev, "TX Error, TDES3 = 0x%x\n",
+					   dd->des3);
+			if (netif_msg_hw(lp))
+				print_status(lp);
+		}
+	}
+	spin_unlock(&lp->tx_lock);
+
+	netdev_completed_queue(ndev, tx_packets, tx_bytes);
+
+	dwceqos_dma_enable_txirq(lp);
+	netif_wake_queue(ndev);
+}
+
+static int dwceqos_rx(struct net_local *lp, int budget)
+{
+	struct sk_buff *skb;
+	u32 tot_size = 0;
+	unsigned int n_packets = 0;
+	unsigned int n_descs = 0;
+	u32 len;
+
+	struct dwceqos_dma_desc *dd;
+	struct sk_buff *new_skb;
+	dma_addr_t new_skb_baddr = 0;
+
+	while (n_descs < budget) {
+		if (!dwceqos_packet_avail(lp))
+			break;
+
+		new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+		if (!new_skb) {
+			netdev_err(lp->ndev, "no memory for new sk_buff\n");
+			break;
+		}
+
+		/* Get dma handle of skb->data */
+		new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent,
+					new_skb->data,
+					DWCEQOS_RX_BUF_SIZE,
+					DMA_FROM_DEVICE);
+		if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+			netdev_err(lp->ndev, "DMA map error\n");
+			dev_kfree_skb(new_skb);
+			break;
+		}
+
+		/* Read descriptor data after reading owner bit. */
+		dma_rmb();
+
+		dd = &lp->rx_descs[lp->rx_cur];
+		len = DWCEQOS_DMA_RDES3_PL(dd->des3);
+		skb = lp->rx_skb[lp->rx_cur].skb;
+
+		/* Unmap old buffer */
+		dma_unmap_single(lp->ndev->dev.parent,
+				 lp->rx_skb[lp->rx_cur].mapping,
+				 lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE);
+
+		/* Discard packet on reception error or bad checksum */
+		if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) ||
+		    (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) {
+			dev_kfree_skb(skb);
+			skb = NULL;
+		} else {
+			skb_put(skb, len);
+			skb->protocol = eth_type_trans(skb, lp->ndev);
+			switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) {
+			case DWCEQOS_DMA_RDES1_PT_UDP:
+			case DWCEQOS_DMA_RDES1_PT_TCP:
+			case DWCEQOS_DMA_RDES1_PT_ICMP:
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+				break;
+			default:
+				skb->ip_summed = CHECKSUM_NONE;
+				break;
+			}
+		}
+
+		if (unlikely(!skb)) {
+			if (netif_msg_rx_err(lp))
+				netdev_dbg(lp->ndev, "rx error: des3=%X\n",
+					   lp->rx_descs[lp->rx_cur].des3);
+		} else {
+			tot_size += skb->len;
+			n_packets++;
+
+			netif_receive_skb(skb);
+		}
+
+		lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr;
+		lp->rx_descs[lp->rx_cur].des1 = 0;
+		lp->rx_descs[lp->rx_cur].des2 = 0;
+		/* The DMA must observe des0/1/2 written before des3. */
+		wmb();
+		lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE |
+						DWCEQOS_DMA_RDES3_OWN  |
+						DWCEQOS_DMA_RDES3_BUF1V;
+
+		lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr;
+		lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE;
+		lp->rx_skb[lp->rx_cur].skb = new_skb;
+
+		n_descs++;
+		lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT;
+	}
+
+	/* Make sure any ownership update is written to the descriptors before
+	 * DMA wakeup.
+	 */
+	wmb();
+
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI);
+	/* Wake up RX by writing tail pointer */
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+		      lp->rx_descs_tail_addr);
+
+	return n_descs;
+}
+
+static int dwceqos_rx_poll(struct napi_struct *napi, int budget)
+{
+	struct net_local *lp = container_of(napi, struct net_local, napi);
+	int work_done = 0;
+
+	work_done = dwceqos_rx(lp, budget - work_done);
+
+	if (!dwceqos_packet_avail(lp) && work_done < budget) {
+		napi_complete(napi);
+		dwceqos_dma_enable_rxirq(lp);
+	} else {
+		work_done = budget;
+	}
+
+	return work_done;
+}
+
+/* Reinitialize function if a TX timed out */
+static void dwceqos_reinit_for_txtimeout(struct work_struct *data)
+{
+	struct net_local *lp = container_of(data, struct net_local,
+		txtimeout_reinit);
+
+	netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n",
+		   DWCEQOS_TX_TIMEOUT);
+
+	if (netif_msg_hw(lp))
+		print_status(lp);
+
+	rtnl_lock();
+	dwceqos_stop(lp->ndev);
+	dwceqos_open(lp->ndev);
+	rtnl_unlock();
+}
+
+/* DT Probing function called by main probe */
+static inline int dwceqos_probe_config_dt(struct platform_device *pdev)
+{
+	struct net_device *ndev;
+	struct net_local *lp;
+	const void *mac_address;
+	struct dwceqos_bus_cfg *bus_cfg;
+	struct device_node *np = pdev->dev.of_node;
+
+	ndev = platform_get_drvdata(pdev);
+	lp = netdev_priv(ndev);
+	bus_cfg = &lp->bus_cfg;
+
+	/* Set the MAC address. */
+	mac_address = of_get_mac_address(pdev->dev.of_node);
+	if (mac_address)
+		ether_addr_copy(ndev->dev_addr, mac_address);
+
+	/* These are all optional parameters */
+	lp->en_tx_lpi_clockgating =  of_property_read_bool(np,
+		"snps,en-tx-lpi-clockgating");
+	bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi");
+	of_property_read_u32(np, "snps,write-requests",
+			     &bus_cfg->write_requests);
+	of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests);
+	of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map);
+	of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl);
+	of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl);
+
+	netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n",
+		   bus_cfg->en_lpi,
+		   bus_cfg->write_requests,
+		   bus_cfg->read_requests,
+		   bus_cfg->burst_map,
+		   bus_cfg->rx_pbl,
+		   bus_cfg->tx_pbl);
+
+	return 0;
+}
+
+static int dwceqos_open(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	int res;
+
+	dwceqos_reset_state(lp);
+	res = dwceqos_descriptor_init(lp);
+	if (res) {
+		netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res);
+		return res;
+	}
+	netdev_reset_queue(ndev);
+
+	napi_enable(&lp->napi);
+	phy_start(lp->phy_dev);
+	dwceqos_init_hw(lp);
+
+	netif_start_queue(ndev);
+	tasklet_enable(&lp->tx_bdreclaim_tasklet);
+
+	return 0;
+}
+
+static bool dweqos_is_tx_dma_suspended(struct net_local *lp)
+{
+	u32 reg;
+
+	reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0);
+	reg = DMA_GET_TX_STATE_CH0(reg);
+
+	return reg == DMA_TX_CH_SUSPENDED;
+}
+
+static void dwceqos_drain_dma(struct net_local *lp)
+{
+	/* Wait for all pending TX buffers to be sent. Upper limit based
+	 * on max frame size on a 10 Mbit link.
+	 */
+	size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100;
+
+	while (!dweqos_is_tx_dma_suspended(lp) && limit--)
+		usleep_range(100, 200);
+}
+
+static int dwceqos_stop(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+
+	phy_stop(lp->phy_dev);
+
+	tasklet_disable(&lp->tx_bdreclaim_tasklet);
+	netif_stop_queue(ndev);
+	napi_disable(&lp->napi);
+
+	dwceqos_drain_dma(lp);
+
+	netif_tx_lock(lp->ndev);
+	dwceqos_reset_hw(lp);
+	dwceqos_descriptor_free(lp);
+	netif_tx_unlock(lp->ndev);
+
+	return 0;
+}
+
+static void dwceqos_dmadesc_set_ctx(struct net_local *lp,
+				    unsigned short gso_size)
+{
+	struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next];
+
+	dd->des0 = 0;
+	dd->des1 = 0;
+	dd->des2 = gso_size;
+	dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV;
+
+	lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+}
+
+static void dwceqos_tx_poll_demand(struct net_local *lp)
+{
+	dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+		      lp->tx_descs_tail_addr);
+}
+
+struct dwceqos_tx {
+	size_t nr_descriptors;
+	size_t initial_descriptor;
+	size_t last_descriptor;
+	size_t prev_gso_size;
+	size_t network_header_len;
+};
+
+static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp,
+			       struct dwceqos_tx *tx)
+{
+	size_t n = 1;
+	size_t i;
+
+	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size)
+		++n;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		n +=  (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) /
+		       BYTES_PER_DMA_DESC;
+	}
+
+	tx->nr_descriptors = n;
+	tx->initial_descriptor = lp->tx_next;
+	tx->last_descriptor = lp->tx_next;
+	tx->prev_gso_size = lp->gso_size;
+
+	tx->network_header_len = skb_transport_offset(skb);
+	if (skb_is_gso(skb))
+		tx->network_header_len += tcp_hdrlen(skb);
+}
+
+static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp,
+			     struct dwceqos_tx *tx)
+{
+	struct ring_desc *rd;
+	struct dwceqos_dma_desc *dd;
+	size_t payload_len;
+	dma_addr_t dma_handle;
+
+	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) {
+		dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size);
+		lp->gso_size = skb_shinfo(skb)->gso_size;
+	}
+
+	dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data,
+				    skb_headlen(skb), DMA_TO_DEVICE);
+
+	if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+		netdev_err(lp->ndev, "TX DMA Mapping error\n");
+		return -ENOMEM;
+	}
+
+	rd = &lp->tx_skb[lp->tx_next];
+	dd = &lp->tx_descs[lp->tx_next];
+
+	rd->skb = NULL;
+	rd->len = skb_headlen(skb);
+	rd->mapping = dma_handle;
+
+	/* Set up DMA Descriptor */
+	dd->des0 = dma_handle;
+
+	if (skb_is_gso(skb)) {
+		payload_len = skb_headlen(skb) - tx->network_header_len;
+
+		if (payload_len)
+			dd->des1 = dma_handle + tx->network_header_len;
+		dd->des2 = tx->network_header_len |
+			DWCEQOS_DMA_DES2_B2L(payload_len);
+		dd->des3 = DWCEQOS_DMA_TDES3_TSE |
+			DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) |
+			(skb->len - tx->network_header_len);
+	} else {
+		dd->des1 = 0;
+		dd->des2 = skb_headlen(skb);
+		dd->des3 = skb->len;
+
+		switch (skb->ip_summed) {
+		case CHECKSUM_PARTIAL:
+			dd->des3 |= DWCEQOS_DMA_TDES3_CA;
+		case CHECKSUM_NONE:
+		case CHECKSUM_UNNECESSARY:
+		case CHECKSUM_COMPLETE:
+		default:
+			break;
+		}
+	}
+
+	dd->des3 |= DWCEQOS_DMA_TDES3_FD;
+	if (lp->tx_next  != tx->initial_descriptor)
+		dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+	tx->last_descriptor = lp->tx_next;
+	lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+
+	return 0;
+}
+
+static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
+			    struct dwceqos_tx *tx)
+{
+	struct ring_desc *rd = NULL;
+	struct dwceqos_dma_desc *dd;
+	dma_addr_t dma_handle;
+	size_t i;
+
+	/* Setup more ring and DMA descriptor if the packet is fragmented */
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		size_t frag_size;
+		size_t consumed_size;
+
+		/* Map DMA Area */
+		dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0,
+					      skb_frag_size(frag),
+					      DMA_TO_DEVICE);
+		if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+			netdev_err(lp->ndev, "DMA Mapping error\n");
+			return -ENOMEM;
+		}
+
+		/* order-3 fragments span more than one descriptor. */
+		frag_size = skb_frag_size(frag);
+		consumed_size = 0;
+		while (consumed_size < frag_size) {
+			size_t dma_size = min_t(size_t, 16376,
+						frag_size - consumed_size);
+
+			rd = &lp->tx_skb[lp->tx_next];
+			memset(rd, 0, sizeof(*rd));
+
+			dd = &lp->tx_descs[lp->tx_next];
+
+			/* Set DMA Descriptor fields */
+			dd->des0 = dma_handle;
+			dd->des1 = 0;
+			dd->des2 = dma_size;
+
+			if (skb_is_gso(skb))
+				dd->des3 = (skb->len - tx->network_header_len);
+			else
+				dd->des3 = skb->len;
+
+			dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+			tx->last_descriptor = lp->tx_next;
+			lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+			consumed_size += dma_size;
+		}
+
+		rd->len = skb_frag_size(frag);
+		rd->mapping = dma_handle;
+	}
+
+	return 0;
+}
+
+static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp,
+				struct dwceqos_tx *tx)
+{
+	lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD;
+	lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC;
+
+	lp->tx_skb[tx->last_descriptor].skb = skb;
+
+	/* Make all descriptor updates visible to the DMA before setting the
+	 * owner bit.
+	 */
+	wmb();
+
+	lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+	/* Make the owner bit visible before TX wakeup. */
+	wmb();
+
+	dwceqos_tx_poll_demand(lp);
+}
+
+static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx)
+{
+	size_t i = tx->initial_descriptor;
+
+	while (i != lp->tx_next) {
+		if (lp->tx_skb[i].mapping)
+			dma_unmap_single(lp->ndev->dev.parent,
+					 lp->tx_skb[i].mapping,
+					 lp->tx_skb[i].len,
+					 DMA_TO_DEVICE);
+
+		lp->tx_skb[i].mapping = 0;
+		lp->tx_skb[i].skb = NULL;
+
+		memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i]));
+
+		i = (i + 1) % DWCEQOS_TX_DCNT;
+	}
+
+	lp->tx_next = tx->initial_descriptor;
+	lp->gso_size = tx->prev_gso_size;
+}
+
+static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct dwceqos_tx trans;
+	int err;
+
+	dwceqos_tx_prepare(skb, lp, &trans);
+	if (lp->tx_free < trans.nr_descriptors) {
+		netif_stop_queue(ndev);
+		return NETDEV_TX_BUSY;
+	}
+
+	err = dwceqos_tx_linear(skb, lp, &trans);
+	if (err)
+		goto tx_error;
+
+	err = dwceqos_tx_frags(skb, lp, &trans);
+	if (err)
+		goto tx_error;
+
+	WARN_ON(lp->tx_next !=
+		((trans.initial_descriptor + trans.nr_descriptors) %
+		 DWCEQOS_TX_DCNT));
+
+	dwceqos_tx_finalize(skb, lp, &trans);
+
+	netdev_sent_queue(ndev, skb->len);
+
+	spin_lock_bh(&lp->tx_lock);
+	lp->tx_free -= trans.nr_descriptors;
+	spin_unlock_bh(&lp->tx_lock);
+
+	ndev->trans_start = jiffies;
+	return 0;
+
+tx_error:
+	dwceqos_tx_rollback(lp, &trans);
+	dev_kfree_skb(skb);
+	return 0;
+}
+
+/* Set MAC address and then update HW accordingly */
+static int dwceqos_set_mac_address(struct net_device *ndev, void *addr)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct sockaddr *hwaddr = (struct sockaddr *)addr;
+
+	if (netif_running(ndev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(hwaddr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len);
+
+	dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+	return 0;
+}
+
+static void dwceqos_tx_timeout(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+
+	queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
+}
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+				  unsigned int reg_n)
+{
+	unsigned long data;
+
+	data = (addr[5] << 8) | addr[4];
+	dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n),
+		      data | DWCEQOS_MAC_MAC_ADDR_HI_EN);
+	data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+	dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data);
+}
+
+static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n)
+{
+	/* Do not disable MAC address 0 */
+	if (reg_n != 0)
+		dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0);
+}
+
+static void dwceqos_set_rx_mode(struct net_device *ndev)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	u32 regval = 0;
+	u32 mc_filter[2];
+	int reg = 1;
+	struct netdev_hw_addr *ha;
+	unsigned int max_mac_addr;
+
+	max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1);
+
+	if (ndev->flags & IFF_PROMISC) {
+		regval = DWCEQOS_MAC_PKT_FILT_PR;
+	} else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) ||
+				(ndev->flags & IFF_ALLMULTI))) {
+		regval = DWCEQOS_MAC_PKT_FILT_PM;
+		dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff);
+		dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff);
+	} else if (!netdev_mc_empty(ndev)) {
+		regval = DWCEQOS_MAC_PKT_FILT_HMC;
+		memset(mc_filter, 0, sizeof(mc_filter));
+		netdev_for_each_mc_addr(ha, ndev) {
+			/* The upper 6 bits of the calculated CRC are used to
+			 * index the contens of the hash table
+			 */
+			int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+			/* The most significant bit determines the register
+			 * to use (H/L) while the other 5 bits determine
+			 * the bit within the register.
+			 */
+			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+		}
+		dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]);
+		dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]);
+	}
+	if (netdev_uc_count(ndev) > max_mac_addr) {
+		regval |= DWCEQOS_MAC_PKT_FILT_PR;
+	} else {
+		netdev_for_each_uc_addr(ha, ndev) {
+			dwceqos_set_umac_addr(lp, ha->addr, reg);
+			reg++;
+		}
+		for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++)
+			dwceqos_disable_umac_addr(lp, reg);
+	}
+	dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void dwceqos_poll_controller(struct net_device *ndev)
+{
+	disable_irq(ndev->irq);
+	dwceqos_interrupt(ndev->irq, ndev);
+	enable_irq(ndev->irq);
+}
+#endif
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+				      u32 tx_mask)
+{
+	if (tx_mask & BIT(27))
+		lp->mmc_counters.txlpitranscntr +=
+			dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR);
+	if (tx_mask & BIT(26))
+		lp->mmc_counters.txpiuscntr +=
+			dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR);
+	if (tx_mask & BIT(25))
+		lp->mmc_counters.txoversize_g +=
+			dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G);
+	if (tx_mask & BIT(24))
+		lp->mmc_counters.txvlanpackets_g +=
+			dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G);
+	if (tx_mask & BIT(23))
+		lp->mmc_counters.txpausepackets +=
+			dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS);
+	if (tx_mask & BIT(22))
+		lp->mmc_counters.txexcessdef +=
+			dwceqos_read(lp, DWC_MMC_TXEXCESSDEF);
+	if (tx_mask & BIT(21))
+		lp->mmc_counters.txpacketcount_g +=
+			dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G);
+	if (tx_mask & BIT(20))
+		lp->mmc_counters.txoctetcount_g +=
+			dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G);
+	if (tx_mask & BIT(19))
+		lp->mmc_counters.txcarriererror +=
+			dwceqos_read(lp, DWC_MMC_TXCARRIERERROR);
+	if (tx_mask & BIT(18))
+		lp->mmc_counters.txexcesscol +=
+			dwceqos_read(lp, DWC_MMC_TXEXCESSCOL);
+	if (tx_mask & BIT(17))
+		lp->mmc_counters.txlatecol +=
+			dwceqos_read(lp, DWC_MMC_TXLATECOL);
+	if (tx_mask & BIT(16))
+		lp->mmc_counters.txdeferred +=
+			dwceqos_read(lp, DWC_MMC_TXDEFERRED);
+	if (tx_mask & BIT(15))
+		lp->mmc_counters.txmulticol_g +=
+			dwceqos_read(lp, DWC_MMC_TXMULTICOL_G);
+	if (tx_mask & BIT(14))
+		lp->mmc_counters.txsinglecol_g +=
+			dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G);
+	if (tx_mask & BIT(13))
+		lp->mmc_counters.txunderflowerror +=
+			dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR);
+	if (tx_mask & BIT(12))
+		lp->mmc_counters.txbroadcastpackets_gb +=
+			dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB);
+	if (tx_mask & BIT(11))
+		lp->mmc_counters.txmulticastpackets_gb +=
+			dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB);
+	if (tx_mask & BIT(10))
+		lp->mmc_counters.txunicastpackets_gb +=
+			dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB);
+	if (tx_mask & BIT(9))
+		lp->mmc_counters.tx1024tomaxoctets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB);
+	if (tx_mask & BIT(8))
+		lp->mmc_counters.tx512to1023octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB);
+	if (tx_mask & BIT(7))
+		lp->mmc_counters.tx256to511octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB);
+	if (tx_mask & BIT(6))
+		lp->mmc_counters.tx128to255octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB);
+	if (tx_mask & BIT(5))
+		lp->mmc_counters.tx65to127octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB);
+	if (tx_mask & BIT(4))
+		lp->mmc_counters.tx64octets_gb +=
+			dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB);
+	if (tx_mask & BIT(3))
+		lp->mmc_counters.txmulticastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G);
+	if (tx_mask & BIT(2))
+		lp->mmc_counters.txbroadcastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G);
+	if (tx_mask & BIT(1))
+		lp->mmc_counters.txpacketcount_gb +=
+			dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB);
+	if (tx_mask & BIT(0))
+		lp->mmc_counters.txoctetcount_gb +=
+			dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB);
+
+	if (rx_mask & BIT(27))
+		lp->mmc_counters.rxlpitranscntr +=
+			dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR);
+	if (rx_mask & BIT(26))
+		lp->mmc_counters.rxlpiuscntr +=
+			dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR);
+	if (rx_mask & BIT(25))
+		lp->mmc_counters.rxctrlpackets_g +=
+			dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G);
+	if (rx_mask & BIT(24))
+		lp->mmc_counters.rxrcverror +=
+			dwceqos_read(lp, DWC_MMC_RXRCVERROR);
+	if (rx_mask & BIT(23))
+		lp->mmc_counters.rxwatchdog +=
+			dwceqos_read(lp, DWC_MMC_RXWATCHDOG);
+	if (rx_mask & BIT(22))
+		lp->mmc_counters.rxvlanpackets_gb +=
+			dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB);
+	if (rx_mask & BIT(21))
+		lp->mmc_counters.rxfifooverflow +=
+			dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW);
+	if (rx_mask & BIT(20))
+		lp->mmc_counters.rxpausepackets +=
+			dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS);
+	if (rx_mask & BIT(19))
+		lp->mmc_counters.rxoutofrangetype +=
+			dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE);
+	if (rx_mask & BIT(18))
+		lp->mmc_counters.rxlengtherror +=
+			dwceqos_read(lp, DWC_MMC_RXLENGTHERROR);
+	if (rx_mask & BIT(17))
+		lp->mmc_counters.rxunicastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G);
+	if (rx_mask & BIT(16))
+		lp->mmc_counters.rx1024tomaxoctets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB);
+	if (rx_mask & BIT(15))
+		lp->mmc_counters.rx512to1023octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB);
+	if (rx_mask & BIT(14))
+		lp->mmc_counters.rx256to511octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB);
+	if (rx_mask & BIT(13))
+		lp->mmc_counters.rx128to255octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB);
+	if (rx_mask & BIT(12))
+		lp->mmc_counters.rx65to127octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB);
+	if (rx_mask & BIT(11))
+		lp->mmc_counters.rx64octets_gb +=
+			dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB);
+	if (rx_mask & BIT(10))
+		lp->mmc_counters.rxoversize_g +=
+			dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G);
+	if (rx_mask & BIT(9))
+		lp->mmc_counters.rxundersize_g +=
+			dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G);
+	if (rx_mask & BIT(8))
+		lp->mmc_counters.rxjabbererror +=
+			dwceqos_read(lp, DWC_MMC_RXJABBERERROR);
+	if (rx_mask & BIT(7))
+		lp->mmc_counters.rxrunterror +=
+			dwceqos_read(lp, DWC_MMC_RXRUNTERROR);
+	if (rx_mask & BIT(6))
+		lp->mmc_counters.rxalignmenterror +=
+			dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR);
+	if (rx_mask & BIT(5))
+		lp->mmc_counters.rxcrcerror +=
+			dwceqos_read(lp, DWC_MMC_RXCRCERROR);
+	if (rx_mask & BIT(4))
+		lp->mmc_counters.rxmulticastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G);
+	if (rx_mask & BIT(3))
+		lp->mmc_counters.rxbroadcastpackets_g +=
+			dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G);
+	if (rx_mask & BIT(2))
+		lp->mmc_counters.rxoctetcount_g +=
+			dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G);
+	if (rx_mask & BIT(1))
+		lp->mmc_counters.rxoctetcount_gb +=
+			dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB);
+	if (rx_mask & BIT(0))
+		lp->mmc_counters.rxpacketcount_gb +=
+			dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB);
+}
+
+static struct rtnl_link_stats64*
+dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
+{
+	unsigned long flags;
+	struct net_local *lp = netdev_priv(ndev);
+	struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters;
+
+	spin_lock_irqsave(&lp->stats_lock, flags);
+	dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+				  lp->mmc_tx_counters_mask);
+	spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+	s->rx_packets = hwstats->rxpacketcount_gb;
+	s->rx_bytes = hwstats->rxoctetcount_gb;
+	s->rx_errors = hwstats->rxpacketcount_gb -
+		hwstats->rxbroadcastpackets_g -
+		hwstats->rxmulticastpackets_g -
+		hwstats->rxunicastpackets_g;
+	s->multicast = hwstats->rxmulticastpackets_g;
+	s->rx_length_errors = hwstats->rxlengtherror;
+	s->rx_crc_errors = hwstats->rxcrcerror;
+	s->rx_fifo_errors = hwstats->rxfifooverflow;
+
+	s->tx_packets = hwstats->txpacketcount_gb;
+	s->tx_bytes = hwstats->txoctetcount_gb;
+
+	if (lp->mmc_tx_counters_mask & BIT(21))
+		s->tx_errors = hwstats->txpacketcount_gb -
+			hwstats->txpacketcount_g;
+	else
+		s->tx_errors = hwstats->txunderflowerror +
+			hwstats->txcarriererror;
+
+	return s;
+}
+
+static int
+dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = lp->phy_dev;
+
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_ethtool_gset(phydev, ecmd);
+}
+
+static int
+dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	struct phy_device *phydev = lp->phy_dev;
+
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_ethtool_sset(phydev, ecmd);
+}
+
+static void
+dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
+{
+	const struct net_local *lp = netdev_priv(ndev);
+
+	strcpy(ed->driver, lp->pdev->dev.driver->name);
+	strcpy(ed->version, DRIVER_VERSION);
+}
+
+static void dwceqos_get_pauseparam(struct net_device *ndev,
+				   struct ethtool_pauseparam *pp)
+{
+	const struct net_local *lp = netdev_priv(ndev);
+
+	pp->autoneg = lp->flowcontrol.autoneg;
+	pp->tx_pause = lp->flowcontrol.tx;
+	pp->rx_pause = lp->flowcontrol.rx;
+}
+
+static int dwceqos_set_pauseparam(struct net_device *ndev,
+				  struct ethtool_pauseparam *pp)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	int ret = 0;
+
+	lp->flowcontrol.autoneg = pp->autoneg;
+	if (pp->autoneg) {
+		lp->phy_dev->advertising |= ADVERTISED_Pause;
+		lp->phy_dev->advertising |= ADVERTISED_Asym_Pause;
+	} else {
+		lp->phy_dev->advertising &= ~ADVERTISED_Pause;
+		lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause;
+		lp->flowcontrol.rx = pp->rx_pause;
+		lp->flowcontrol.tx = pp->tx_pause;
+	}
+
+	if (netif_running(ndev))
+		ret = phy_start_aneg(lp->phy_dev);
+
+	return ret;
+}
+
+static void dwceqos_get_strings(struct net_device *ndev, u32 stringset,
+				u8 *data)
+{
+	size_t i;
+
+	if (stringset != ETH_SS_STATS)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+		memcpy(data, dwceqos_ethtool_stats[i].stat_name,
+		       ETH_GSTRING_LEN);
+		data += ETH_GSTRING_LEN;
+	}
+}
+
+static void dwceqos_get_ethtool_stats(struct net_device *ndev,
+				      struct ethtool_stats *stats, u64 *data)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	unsigned long flags;
+	size_t i;
+	u8 *mmcstat = (u8 *)&lp->mmc_counters;
+
+	spin_lock_irqsave(&lp->stats_lock, flags);
+	dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+				  lp->mmc_tx_counters_mask);
+	spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+	for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+		memcpy(data,
+		       mmcstat + dwceqos_ethtool_stats[i].offset,
+		       sizeof(u64));
+		data++;
+	}
+}
+
+static int dwceqos_get_sset_count(struct net_device *ndev, int sset)
+{
+	if (sset == ETH_SS_STATS)
+		return ARRAY_SIZE(dwceqos_ethtool_stats);
+
+	return -EOPNOTSUPP;
+}
+
+static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+			     void *space)
+{
+	const struct net_local *lp = netdev_priv(dev);
+	u32 *reg_space = (u32 *)space;
+	int reg_offset;
+	int reg_ix = 0;
+
+	/* MAC registers */
+	for (reg_offset = START_MAC_REG_OFFSET;
+		reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+		reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+		reg_ix++;
+	}
+	/* MTL registers */
+	for (reg_offset = START_MTL_REG_OFFSET;
+		reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
+		reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+		reg_ix++;
+	}
+
+	/* DMA registers */
+	for (reg_offset = START_DMA_REG_OFFSET;
+		reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+		reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+		reg_ix++;
+	}
+
+	BUG_ON(4 * reg_ix > REG_SPACE_SIZE);
+}
+
+static int dwceqos_get_regs_len(struct net_device *dev)
+{
+	return REG_SPACE_SIZE;
+}
+
+static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl)
+{
+	return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off";
+}
+
+static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl)
+{
+	return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off";
+}
+
+static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	u32 lpi_status;
+	u32 lpi_enabled;
+
+	if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+		return -EOPNOTSUPP;
+
+	edata->eee_active  = lp->eee_active;
+	edata->eee_enabled = lp->eee_enabled;
+	edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER);
+	lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+	lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA);
+	edata->tx_lpi_enabled = lpi_enabled;
+
+	if (netif_msg_hw(lp)) {
+		u32 regval;
+
+		regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+
+		netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n",
+			    dwceqos_get_rx_lpi_state(regval),
+			    dwceqos_get_tx_lpi_state(regval));
+	}
+
+	return phy_ethtool_get_eee(lp->phy_dev, edata);
+}
+
+static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+	struct net_local *lp = netdev_priv(ndev);
+	u32 regval;
+	unsigned long flags;
+
+	if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+		return -EOPNOTSUPP;
+
+	if (edata->eee_enabled && !lp->eee_active)
+		return -EOPNOTSUPP;
+
+	if (edata->tx_lpi_enabled) {
+		if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN ||
+		    edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX)
+			return -EINVAL;
+	}
+
+	lp->eee_enabled = edata->eee_enabled;
+
+	if (edata->eee_enabled && edata->tx_lpi_enabled) {
+		dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER,
+			      edata->tx_lpi_timer);
+
+		spin_lock_irqsave(&lp->hw_lock, flags);
+		regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+		regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE;
+		if (lp->en_tx_lpi_clockgating)
+			regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE;
+		dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+		spin_unlock_irqrestore(&lp->hw_lock, flags);
+	} else {
+		spin_lock_irqsave(&lp->hw_lock, flags);
+		regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+		regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+		dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+		spin_unlock_irqrestore(&lp->hw_lock, flags);
+	}
+
+	return phy_ethtool_set_eee(lp->phy_dev, edata);
+}
+
+static u32 dwceqos_get_msglevel(struct net_device *ndev)
+{
+	const struct net_local *lp = netdev_priv(ndev);
+
+	return lp->msg_enable;
+}
+
+static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
+{
+	struct net_local *lp = netdev_priv(ndev);
+
+	lp->msg_enable = msglevel;
+}
+
+static struct ethtool_ops dwceqos_ethtool_ops = {
+	.get_settings   = dwceqos_get_settings,
+	.set_settings   = dwceqos_set_settings,
+	.get_drvinfo    = dwceqos_get_drvinfo,
+	.get_link       = ethtool_op_get_link,
+	.get_pauseparam = dwceqos_get_pauseparam,
+	.set_pauseparam = dwceqos_set_pauseparam,
+	.get_strings    = dwceqos_get_strings,
+	.get_ethtool_stats = dwceqos_get_ethtool_stats,
+	.get_sset_count = dwceqos_get_sset_count,
+	.get_regs       = dwceqos_get_regs,
+	.get_regs_len   = dwceqos_get_regs_len,
+	.get_eee        = dwceqos_get_eee,
+	.set_eee        = dwceqos_set_eee,
+	.get_msglevel   = dwceqos_get_msglevel,
+	.set_msglevel   = dwceqos_set_msglevel,
+};
+
+static struct net_device_ops netdev_ops = {
+	.ndo_open		= dwceqos_open,
+	.ndo_stop		= dwceqos_stop,
+	.ndo_start_xmit		= dwceqos_start_xmit,
+	.ndo_set_rx_mode	= dwceqos_set_rx_mode,
+	.ndo_set_mac_address	= dwceqos_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= dwceqos_poll_controller,
+#endif
+	.ndo_do_ioctl		= dwceqos_ioctl,
+	.ndo_tx_timeout		= dwceqos_tx_timeout,
+	.ndo_get_stats64	= dwceqos_get_stats64,
+};
+
+static const struct of_device_id dwceq_of_match[] = {
+	{ .compatible = "snps,dwc-qos-ethernet-4.10", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, dwceq_of_match);
+
+static int dwceqos_probe(struct platform_device *pdev)
+{
+	struct resource *r_mem = NULL;
+	struct net_device *ndev;
+	struct net_local *lp;
+	int ret = -ENXIO;
+
+	r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r_mem) {
+		dev_err(&pdev->dev, "no IO resource defined.\n");
+		return -ENXIO;
+	}
+
+	ndev = alloc_etherdev(sizeof(*lp));
+	if (!ndev) {
+		dev_err(&pdev->dev, "etherdev allocation failed.\n");
+		return -ENOMEM;
+	}
+
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+
+	lp = netdev_priv(ndev);
+	lp->ndev = ndev;
+	lp->pdev = pdev;
+	lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT);
+
+	spin_lock_init(&lp->tx_lock);
+	spin_lock_init(&lp->hw_lock);
+	spin_lock_init(&lp->stats_lock);
+
+	lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
+	if (IS_ERR(lp->apb_pclk)) {
+		dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+		ret = PTR_ERR(lp->apb_pclk);
+		goto err_out_free_netdev;
+	}
+
+	ret = clk_prepare_enable(lp->apb_pclk);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to enable APER clock.\n");
+		goto err_out_free_netdev;
+	}
+
+	lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem);
+	if (IS_ERR(lp->baseaddr)) {
+		dev_err(&pdev->dev, "failed to map baseaddress.\n");
+		ret = PTR_ERR(lp->baseaddr);
+		goto err_out_clk_dis_aper;
+	}
+
+	ndev->irq = platform_get_irq(pdev, 0);
+	ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ;
+	ndev->netdev_ops = &netdev_ops;
+	ndev->ethtool_ops = &dwceqos_ethtool_ops;
+	ndev->base_addr = r_mem->start;
+
+	dwceqos_get_hwfeatures(lp);
+	dwceqos_mdio_set_csr(lp);
+
+	ndev->hw_features = NETIF_F_SG;
+
+	if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+		ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+
+	if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL)
+		ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+	if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL)
+		ndev->hw_features |= NETIF_F_RXCSUM;
+
+	ndev->features = ndev->hw_features;
+
+	netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+	ret = register_netdev(ndev);
+	if (ret) {
+		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+		goto err_out_clk_dis_aper;
+	}
+
+	lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+	if (IS_ERR(lp->phy_ref_clk)) {
+		dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+		ret = PTR_ERR(lp->phy_ref_clk);
+		goto err_out_unregister_netdev;
+	}
+
+	ret = clk_prepare_enable(lp->phy_ref_clk);
+	if (ret) {
+		dev_err(&pdev->dev, "Unable to enable device clock.\n");
+		goto err_out_unregister_netdev;
+	}
+
+	lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
+						"phy-handle", 0);
+	if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) {
+		ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "invalid fixed-link");
+			goto err_out_unregister_netdev;
+		}
+
+		lp->phy_node = of_node_get(lp->pdev->dev.of_node);
+	}
+
+	ret = of_get_phy_mode(lp->pdev->dev.of_node);
+	if (ret < 0) {
+		dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
+		goto err_out_unregister_clk_notifier;
+	}
+
+	lp->phy_interface = ret;
+
+	ret = dwceqos_mii_init(lp);
+	if (ret) {
+		dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
+		goto err_out_unregister_clk_notifier;
+	}
+
+	ret = dwceqos_mii_probe(ndev);
+	if (ret != 0) {
+		netdev_err(ndev, "mii_probe fail.\n");
+		ret = -ENXIO;
+		goto err_out_unregister_clk_notifier;
+	}
+
+	dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+	tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim,
+		     (unsigned long)ndev);
+	tasklet_disable(&lp->tx_bdreclaim_tasklet);
+
+	lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME);
+	INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout);
+
+	platform_set_drvdata(pdev, ndev);
+	ret = dwceqos_probe_config_dt(pdev);
+	if (ret) {
+		dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
+			ret);
+		goto err_out_unregister_clk_notifier;
+	}
+	dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
+		 pdev->id, ndev->base_addr, ndev->irq);
+
+	ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0,
+			       ndev->name, ndev);
+	if (ret) {
+		dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
+			ndev->irq, ret);
+		goto err_out_unregister_clk_notifier;
+	}
+
+	if (netif_msg_probe(lp))
+		netdev_dbg(ndev, "net_local@%p\n", lp);
+
+	return 0;
+
+err_out_unregister_clk_notifier:
+	clk_disable_unprepare(lp->phy_ref_clk);
+err_out_unregister_netdev:
+	unregister_netdev(ndev);
+err_out_clk_dis_aper:
+	clk_disable_unprepare(lp->apb_pclk);
+err_out_free_netdev:
+	if (lp->phy_node)
+		of_node_put(lp->phy_node);
+	free_netdev(ndev);
+	platform_set_drvdata(pdev, NULL);
+	return ret;
+}
+
+static int dwceqos_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct net_local *lp;
+
+	if (ndev) {
+		lp = netdev_priv(ndev);
+
+		if (lp->phy_dev)
+			phy_disconnect(lp->phy_dev);
+		mdiobus_unregister(lp->mii_bus);
+		kfree(lp->mii_bus->irq);
+		mdiobus_free(lp->mii_bus);
+
+		unregister_netdev(ndev);
+
+		clk_disable_unprepare(lp->phy_ref_clk);
+		clk_disable_unprepare(lp->apb_pclk);
+
+		free_netdev(ndev);
+	}
+
+	return 0;
+}
+
+static struct platform_driver dwceqos_driver = {
+	.probe   = dwceqos_probe,
+	.remove  = dwceqos_remove,
+	.driver  = {
+		.name  = DRIVER_NAME,
+		.of_match_table = dwceq_of_match,
+	},
+};
+
+module_platform_driver(dwceqos_driver);
+
+MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>");
+MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>");
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index dd94300..cba3d9f 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -41,6 +41,8 @@
 #include <linux/gpio.h>
 #include <linux/atomic.h>
 
+#include <asm/mach-ar7/ar7.h>
+
 MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
 MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
 MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index d155bf2..8fc90f1 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -365,7 +365,8 @@
 	spinlock_t			lock;
 	struct platform_device		*pdev;
 	struct net_device		*ndev;
-	struct napi_struct		napi;
+	struct napi_struct		napi_rx;
+	struct napi_struct		napi_tx;
 	struct device			*dev;
 	struct cpsw_platform_data	data;
 	struct cpsw_ss_regs __iomem	*regs;
@@ -386,10 +387,12 @@
 	struct cpsw_ale			*ale;
 	bool				rx_pause;
 	bool				tx_pause;
+	bool				quirk_irq;
+	bool				rx_irq_disabled;
+	bool				tx_irq_disabled;
 	/* snapshot of IRQ numbers */
 	u32 irqs_table[4];
 	u32 num_irqs;
-	bool irq_enabled;
 	struct cpts *cpts;
 	u32 emac_port;
 };
@@ -752,13 +755,15 @@
 {
 	struct cpsw_priv *priv = dev_id;
 
+	writel(0, &priv->wr_regs->tx_en);
 	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-	cpdma_chan_process(priv->txch, 128);
 
-	priv = cpsw_get_slave_priv(priv, 1);
-	if (priv)
-		cpdma_chan_process(priv->txch, 128);
+	if (priv->quirk_irq) {
+		disable_irq_nosync(priv->irqs_table[1]);
+		priv->tx_irq_disabled = true;
+	}
 
+	napi_schedule(&priv->napi_tx);
 	return IRQ_HANDLED;
 }
 
@@ -767,43 +772,49 @@
 	struct cpsw_priv *priv = dev_id;
 
 	cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
+	writel(0, &priv->wr_regs->rx_en);
 
-	cpsw_intr_disable(priv);
-	if (priv->irq_enabled == true) {
+	if (priv->quirk_irq) {
 		disable_irq_nosync(priv->irqs_table[0]);
-		priv->irq_enabled = false;
+		priv->rx_irq_disabled = true;
 	}
 
-	if (netif_running(priv->ndev)) {
-		napi_schedule(&priv->napi);
-		return IRQ_HANDLED;
-	}
-
-	priv = cpsw_get_slave_priv(priv, 1);
-	if (!priv)
-		return IRQ_NONE;
-
-	if (netif_running(priv->ndev)) {
-		napi_schedule(&priv->napi);
-		return IRQ_HANDLED;
-	}
-	return IRQ_NONE;
+	napi_schedule(&priv->napi_rx);
+	return IRQ_HANDLED;
 }
 
-static int cpsw_poll(struct napi_struct *napi, int budget)
+static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
 {
-	struct cpsw_priv	*priv = napi_to_priv(napi);
+	struct cpsw_priv	*priv = napi_to_priv(napi_tx);
+	int			num_tx;
+
+	num_tx = cpdma_chan_process(priv->txch, budget);
+	if (num_tx < budget) {
+		napi_complete(napi_tx);
+		writel(0xff, &priv->wr_regs->tx_en);
+		if (priv->quirk_irq && priv->tx_irq_disabled) {
+			priv->tx_irq_disabled = false;
+			enable_irq(priv->irqs_table[1]);
+		}
+	}
+
+	if (num_tx)
+		cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx);
+
+	return num_tx;
+}
+
+static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+	struct cpsw_priv	*priv = napi_to_priv(napi_rx);
 	int			num_rx;
 
 	num_rx = cpdma_chan_process(priv->rxch, budget);
 	if (num_rx < budget) {
-		struct cpsw_priv *prim_cpsw;
-
-		napi_complete(napi);
-		cpsw_intr_enable(priv);
-		prim_cpsw = cpsw_get_slave_priv(priv, 0);
-		if (prim_cpsw->irq_enabled == false) {
-			prim_cpsw->irq_enabled = true;
+		napi_complete(napi_rx);
+		writel(0xff, &priv->wr_regs->rx_en);
+		if (priv->quirk_irq && priv->rx_irq_disabled) {
+			priv->rx_irq_disabled = false;
 			enable_irq(priv->irqs_table[0]);
 		}
 	}
@@ -1230,7 +1241,6 @@
 static int cpsw_ndo_open(struct net_device *ndev)
 {
 	struct cpsw_priv *priv = netdev_priv(ndev);
-	struct cpsw_priv *prim_cpsw;
 	int i, ret;
 	u32 reg;
 
@@ -1260,6 +1270,8 @@
 				  ALE_ALL_PORTS << priv->host_port, 0, 0);
 
 	if (!cpsw_common_res_usage_state(priv)) {
+		struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+
 		/* setup tx dma to fixed prio and zero offset */
 		cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
 		cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
@@ -1273,6 +1285,19 @@
 		/* Enable internal fifo flow control */
 		writel(0x7, &priv->regs->flow_control);
 
+		napi_enable(&priv_sl0->napi_rx);
+		napi_enable(&priv_sl0->napi_tx);
+
+		if (priv_sl0->tx_irq_disabled) {
+			priv_sl0->tx_irq_disabled = false;
+			enable_irq(priv->irqs_table[1]);
+		}
+
+		if (priv_sl0->rx_irq_disabled) {
+			priv_sl0->rx_irq_disabled = false;
+			enable_irq(priv->irqs_table[0]);
+		}
+
 		if (WARN_ON(!priv->data.rx_descs))
 			priv->data.rx_descs = 128;
 
@@ -1311,18 +1336,9 @@
 		cpsw_set_coalesce(ndev, &coal);
 	}
 
-	napi_enable(&priv->napi);
 	cpdma_ctlr_start(priv->dma);
 	cpsw_intr_enable(priv);
 
-	prim_cpsw = cpsw_get_slave_priv(priv, 0);
-	if (prim_cpsw->irq_enabled == false) {
-		if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) {
-			prim_cpsw->irq_enabled = true;
-			enable_irq(prim_cpsw->irqs_table[0]);
-		}
-	}
-
 	if (priv->data.dual_emac)
 		priv->slaves[priv->emac_port].open_stat = true;
 	return 0;
@@ -1341,10 +1357,13 @@
 
 	cpsw_info(priv, ifdown, "shutting down cpsw device\n");
 	netif_stop_queue(priv->ndev);
-	napi_disable(&priv->napi);
 	netif_carrier_off(priv->ndev);
 
 	if (cpsw_common_res_usage_state(priv) <= 1) {
+		struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+
+		napi_disable(&priv_sl0->napi_rx);
+		napi_disable(&priv_sl0->napi_tx);
 		cpts_unregister(priv->cpts);
 		cpsw_intr_disable(priv);
 		cpdma_ctlr_stop(priv->dma);
@@ -2127,7 +2146,6 @@
 
 	ndev->netdev_ops = &cpsw_netdev_ops;
 	ndev->ethtool_ops = &cpsw_ethtool_ops;
-	netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
 
 	/* register the network device */
 	SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2141,6 +2159,44 @@
 	return ret;
 }
 
+#define CPSW_QUIRK_IRQ		BIT(0)
+
+static struct platform_device_id cpsw_devtype[] = {
+	{
+		/* keep it for existing comaptibles */
+		.name = "cpsw",
+		.driver_data = CPSW_QUIRK_IRQ,
+	}, {
+		.name = "am335x-cpsw",
+		.driver_data = CPSW_QUIRK_IRQ,
+	}, {
+		.name = "am4372-cpsw",
+		.driver_data = 0,
+	}, {
+		.name = "dra7-cpsw",
+		.driver_data = 0,
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(platform, cpsw_devtype);
+
+enum ti_cpsw_type {
+	CPSW = 0,
+	AM335X_CPSW,
+	AM4372_CPSW,
+	DRA7_CPSW,
+};
+
+static const struct of_device_id cpsw_of_mtable[] = {
+	{ .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], },
+	{ .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], },
+	{ .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], },
+	{ .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+
 static int cpsw_probe(struct platform_device *pdev)
 {
 	struct cpsw_platform_data	*data;
@@ -2150,6 +2206,7 @@
 	struct cpsw_ale_params		ale_params;
 	void __iomem			*ss_regs;
 	struct resource			*res, *ss_res;
+	const struct of_device_id	*of_id;
 	u32 slave_offset, sliver_offset, slave_size;
 	int ret = 0, i;
 	int irq;
@@ -2169,7 +2226,6 @@
 	priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
 	priv->rx_packet_max = max(rx_packet_max, 128);
 	priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
-	priv->irq_enabled = true;
 	if (!priv->cpts) {
 		dev_err(&pdev->dev, "error allocating cpts\n");
 		ret = -ENOMEM;
@@ -2341,6 +2397,13 @@
 		goto clean_ale_ret;
 	}
 
+	of_id = of_match_device(cpsw_of_mtable, &pdev->dev);
+	if (of_id) {
+		pdev->id_entry = of_id->data;
+		if (pdev->id_entry->driver_data)
+			priv->quirk_irq = true;
+	}
+
 	/* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
 	 * MISC IRQs which are always kept disabled with this driver so
 	 * we will not request them.
@@ -2380,7 +2443,8 @@
 
 	ndev->netdev_ops = &cpsw_netdev_ops;
 	ndev->ethtool_ops = &cpsw_ethtool_ops;
-	netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
+	netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
+	netif_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
 
 	/* register the network device */
 	SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -2504,12 +2568,6 @@
 
 static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
 
-static const struct of_device_id cpsw_of_mtable[] = {
-	{ .compatible = "ti,cpsw", },
-	{ /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
-
 static struct platform_driver cpsw_driver = {
 	.driver = {
 		.name	 = "cpsw",
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index aeebc0a..a21c77b 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -2004,8 +2004,10 @@
 	if (res_ctrl) {
 		priv->ctrl_base =
 			devm_ioremap_resource(&pdev->dev, res_ctrl);
-		if (IS_ERR(priv->ctrl_base))
+		if (IS_ERR(priv->ctrl_base)) {
+			rc = PTR_ERR(priv->ctrl_base);
 			goto no_pdata;
+		}
 	} else {
 		priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
 	}
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 4755838..1a5aca5 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -52,6 +52,8 @@
 		    NETIF_MSG_PKTDATA	| NETIF_MSG_TX_QUEUED	|	\
 		    NETIF_MSG_RX_STATUS)
 
+#define NETCP_EFUSE_ADDR_SWAP	2
+
 #define knav_queue_get_id(q)	knav_queue_device_control(q, \
 				KNAV_QUEUE_GET_ID, (unsigned long)NULL)
 
@@ -173,13 +175,22 @@
 }
 
 /* Read the e-fuse value as 32 bit values to be endian independent */
-static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac)
+static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
 {
 	unsigned int addr0, addr1;
 
 	addr1 = readl(efuse_mac + 4);
 	addr0 = readl(efuse_mac);
 
+	switch (swap) {
+	case NETCP_EFUSE_ADDR_SWAP:
+		addr0 = addr1;
+		addr1 = readl(efuse_mac);
+		break;
+	default:
+		break;
+	}
+
 	x[0] = (addr1 & 0x0000ff00) >> 8;
 	x[1] = addr1 & 0x000000ff;
 	x[2] = (addr0 & 0xff000000) >> 24;
@@ -1901,7 +1912,7 @@
 			goto quit;
 		}
 
-		emac_arch_get_mac_addr(efuse_mac_addr, efuse);
+		emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
 		if (is_valid_ether_addr(efuse_mac_addr))
 			ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
 		else
@@ -2141,7 +2152,6 @@
 static struct platform_driver netcp_driver = {
 	.driver = {
 		.name		= "netcp-1.0",
-		.owner		= THIS_MODULE,
 		.of_match_table	= of_match,
 	},
 	.probe = netcp_probe,
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 1974a8a..6f16d6a 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -295,8 +295,6 @@
 	u32	rx_dma_overruns;
 };
 
-#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
-
 struct gbenu_ss_regs {
 	u32	id_ver;
 	u32	synce_count;		/* NU */
@@ -480,7 +478,6 @@
 	u32	tx_pri7_drop_bcnt;
 };
 
-#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
 #define GBENU_HW_STATS_REG_MAP_SZ	0x200
 
 struct gbe_ss_regs {
@@ -615,7 +612,6 @@
 	u32	rx_dma_overruns;
 };
 
-#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
 #define GBE_MAX_HW_STAT_MODS			9
 #define GBE_HW_STATS_REG_MAP_SZ			0x100
 
@@ -646,6 +642,7 @@
 	bool				enable_ale;
 	u8				max_num_slaves;
 	u8				max_num_ports; /* max_num_slaves + 1 */
+	u8				num_stats_mods;
 	struct netcp_tx_pipe		tx_pipe;
 
 	int				host_port;
@@ -675,6 +672,7 @@
 	struct net_device		*dummy_ndev;
 
 	u64				*hw_stats;
+	u32				*hw_stats_prev;
 	const struct netcp_ethtool_stat *et_stats;
 	int				num_et_stats;
 	/*  Lock for updating the hwstats */
@@ -874,7 +872,7 @@
 };
 
 /* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_HOST_SIZE	33
+#define GBENU_ET_STATS_HOST_SIZE	52
 
 #define GBENU_STATS_HOST(field)					\
 {								\
@@ -883,8 +881,8 @@
 	offsetof(struct gbenu_hw_stats, field)			\
 }
 
-/* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_PORT_SIZE	46
+/* This is the size of entries in GBENU_STATS_PORT */
+#define GBENU_ET_STATS_PORT_SIZE	65
 
 #define GBENU_STATS_P1(field)					\
 {								\
@@ -976,7 +974,26 @@
 	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
 	GBENU_STATS_HOST(ale_unknown_bcast),
 	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
+	GBENU_STATS_HOST(ale_pol_match),
+	GBENU_STATS_HOST(ale_pol_match_red),
+	GBENU_STATS_HOST(ale_pol_match_yellow),
 	GBENU_STATS_HOST(tx_mem_protect_err),
+	GBENU_STATS_HOST(tx_pri0_drop),
+	GBENU_STATS_HOST(tx_pri1_drop),
+	GBENU_STATS_HOST(tx_pri2_drop),
+	GBENU_STATS_HOST(tx_pri3_drop),
+	GBENU_STATS_HOST(tx_pri4_drop),
+	GBENU_STATS_HOST(tx_pri5_drop),
+	GBENU_STATS_HOST(tx_pri6_drop),
+	GBENU_STATS_HOST(tx_pri7_drop),
+	GBENU_STATS_HOST(tx_pri0_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri1_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri2_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri3_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri4_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri5_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri6_drop_bcnt),
+	GBENU_STATS_HOST(tx_pri7_drop_bcnt),
 	/* GBENU Module 1 */
 	GBENU_STATS_P1(rx_good_frames),
 	GBENU_STATS_P1(rx_broadcast_frames),
@@ -1023,7 +1040,26 @@
 	GBENU_STATS_P1(ale_unknown_mcast_bytes),
 	GBENU_STATS_P1(ale_unknown_bcast),
 	GBENU_STATS_P1(ale_unknown_bcast_bytes),
+	GBENU_STATS_P1(ale_pol_match),
+	GBENU_STATS_P1(ale_pol_match_red),
+	GBENU_STATS_P1(ale_pol_match_yellow),
 	GBENU_STATS_P1(tx_mem_protect_err),
+	GBENU_STATS_P1(tx_pri0_drop),
+	GBENU_STATS_P1(tx_pri1_drop),
+	GBENU_STATS_P1(tx_pri2_drop),
+	GBENU_STATS_P1(tx_pri3_drop),
+	GBENU_STATS_P1(tx_pri4_drop),
+	GBENU_STATS_P1(tx_pri5_drop),
+	GBENU_STATS_P1(tx_pri6_drop),
+	GBENU_STATS_P1(tx_pri7_drop),
+	GBENU_STATS_P1(tx_pri0_drop_bcnt),
+	GBENU_STATS_P1(tx_pri1_drop_bcnt),
+	GBENU_STATS_P1(tx_pri2_drop_bcnt),
+	GBENU_STATS_P1(tx_pri3_drop_bcnt),
+	GBENU_STATS_P1(tx_pri4_drop_bcnt),
+	GBENU_STATS_P1(tx_pri5_drop_bcnt),
+	GBENU_STATS_P1(tx_pri6_drop_bcnt),
+	GBENU_STATS_P1(tx_pri7_drop_bcnt),
 	/* GBENU Module 2 */
 	GBENU_STATS_P2(rx_good_frames),
 	GBENU_STATS_P2(rx_broadcast_frames),
@@ -1070,7 +1106,26 @@
 	GBENU_STATS_P2(ale_unknown_mcast_bytes),
 	GBENU_STATS_P2(ale_unknown_bcast),
 	GBENU_STATS_P2(ale_unknown_bcast_bytes),
+	GBENU_STATS_P2(ale_pol_match),
+	GBENU_STATS_P2(ale_pol_match_red),
+	GBENU_STATS_P2(ale_pol_match_yellow),
 	GBENU_STATS_P2(tx_mem_protect_err),
+	GBENU_STATS_P2(tx_pri0_drop),
+	GBENU_STATS_P2(tx_pri1_drop),
+	GBENU_STATS_P2(tx_pri2_drop),
+	GBENU_STATS_P2(tx_pri3_drop),
+	GBENU_STATS_P2(tx_pri4_drop),
+	GBENU_STATS_P2(tx_pri5_drop),
+	GBENU_STATS_P2(tx_pri6_drop),
+	GBENU_STATS_P2(tx_pri7_drop),
+	GBENU_STATS_P2(tx_pri0_drop_bcnt),
+	GBENU_STATS_P2(tx_pri1_drop_bcnt),
+	GBENU_STATS_P2(tx_pri2_drop_bcnt),
+	GBENU_STATS_P2(tx_pri3_drop_bcnt),
+	GBENU_STATS_P2(tx_pri4_drop_bcnt),
+	GBENU_STATS_P2(tx_pri5_drop_bcnt),
+	GBENU_STATS_P2(tx_pri6_drop_bcnt),
+	GBENU_STATS_P2(tx_pri7_drop_bcnt),
 	/* GBENU Module 3 */
 	GBENU_STATS_P3(rx_good_frames),
 	GBENU_STATS_P3(rx_broadcast_frames),
@@ -1117,7 +1172,26 @@
 	GBENU_STATS_P3(ale_unknown_mcast_bytes),
 	GBENU_STATS_P3(ale_unknown_bcast),
 	GBENU_STATS_P3(ale_unknown_bcast_bytes),
+	GBENU_STATS_P3(ale_pol_match),
+	GBENU_STATS_P3(ale_pol_match_red),
+	GBENU_STATS_P3(ale_pol_match_yellow),
 	GBENU_STATS_P3(tx_mem_protect_err),
+	GBENU_STATS_P3(tx_pri0_drop),
+	GBENU_STATS_P3(tx_pri1_drop),
+	GBENU_STATS_P3(tx_pri2_drop),
+	GBENU_STATS_P3(tx_pri3_drop),
+	GBENU_STATS_P3(tx_pri4_drop),
+	GBENU_STATS_P3(tx_pri5_drop),
+	GBENU_STATS_P3(tx_pri6_drop),
+	GBENU_STATS_P3(tx_pri7_drop),
+	GBENU_STATS_P3(tx_pri0_drop_bcnt),
+	GBENU_STATS_P3(tx_pri1_drop_bcnt),
+	GBENU_STATS_P3(tx_pri2_drop_bcnt),
+	GBENU_STATS_P3(tx_pri3_drop_bcnt),
+	GBENU_STATS_P3(tx_pri4_drop_bcnt),
+	GBENU_STATS_P3(tx_pri5_drop_bcnt),
+	GBENU_STATS_P3(tx_pri6_drop_bcnt),
+	GBENU_STATS_P3(tx_pri7_drop_bcnt),
 	/* GBENU Module 4 */
 	GBENU_STATS_P4(rx_good_frames),
 	GBENU_STATS_P4(rx_broadcast_frames),
@@ -1164,7 +1238,26 @@
 	GBENU_STATS_P4(ale_unknown_mcast_bytes),
 	GBENU_STATS_P4(ale_unknown_bcast),
 	GBENU_STATS_P4(ale_unknown_bcast_bytes),
+	GBENU_STATS_P4(ale_pol_match),
+	GBENU_STATS_P4(ale_pol_match_red),
+	GBENU_STATS_P4(ale_pol_match_yellow),
 	GBENU_STATS_P4(tx_mem_protect_err),
+	GBENU_STATS_P4(tx_pri0_drop),
+	GBENU_STATS_P4(tx_pri1_drop),
+	GBENU_STATS_P4(tx_pri2_drop),
+	GBENU_STATS_P4(tx_pri3_drop),
+	GBENU_STATS_P4(tx_pri4_drop),
+	GBENU_STATS_P4(tx_pri5_drop),
+	GBENU_STATS_P4(tx_pri6_drop),
+	GBENU_STATS_P4(tx_pri7_drop),
+	GBENU_STATS_P4(tx_pri0_drop_bcnt),
+	GBENU_STATS_P4(tx_pri1_drop_bcnt),
+	GBENU_STATS_P4(tx_pri2_drop_bcnt),
+	GBENU_STATS_P4(tx_pri3_drop_bcnt),
+	GBENU_STATS_P4(tx_pri4_drop_bcnt),
+	GBENU_STATS_P4(tx_pri5_drop_bcnt),
+	GBENU_STATS_P4(tx_pri6_drop_bcnt),
+	GBENU_STATS_P4(tx_pri7_drop_bcnt),
 	/* GBENU Module 5 */
 	GBENU_STATS_P5(rx_good_frames),
 	GBENU_STATS_P5(rx_broadcast_frames),
@@ -1211,7 +1304,26 @@
 	GBENU_STATS_P5(ale_unknown_mcast_bytes),
 	GBENU_STATS_P5(ale_unknown_bcast),
 	GBENU_STATS_P5(ale_unknown_bcast_bytes),
+	GBENU_STATS_P5(ale_pol_match),
+	GBENU_STATS_P5(ale_pol_match_red),
+	GBENU_STATS_P5(ale_pol_match_yellow),
 	GBENU_STATS_P5(tx_mem_protect_err),
+	GBENU_STATS_P5(tx_pri0_drop),
+	GBENU_STATS_P5(tx_pri1_drop),
+	GBENU_STATS_P5(tx_pri2_drop),
+	GBENU_STATS_P5(tx_pri3_drop),
+	GBENU_STATS_P5(tx_pri4_drop),
+	GBENU_STATS_P5(tx_pri5_drop),
+	GBENU_STATS_P5(tx_pri6_drop),
+	GBENU_STATS_P5(tx_pri7_drop),
+	GBENU_STATS_P5(tx_pri0_drop_bcnt),
+	GBENU_STATS_P5(tx_pri1_drop_bcnt),
+	GBENU_STATS_P5(tx_pri2_drop_bcnt),
+	GBENU_STATS_P5(tx_pri3_drop_bcnt),
+	GBENU_STATS_P5(tx_pri4_drop_bcnt),
+	GBENU_STATS_P5(tx_pri5_drop_bcnt),
+	GBENU_STATS_P5(tx_pri6_drop_bcnt),
+	GBENU_STATS_P5(tx_pri7_drop_bcnt),
 	/* GBENU Module 6 */
 	GBENU_STATS_P6(rx_good_frames),
 	GBENU_STATS_P6(rx_broadcast_frames),
@@ -1258,7 +1370,26 @@
 	GBENU_STATS_P6(ale_unknown_mcast_bytes),
 	GBENU_STATS_P6(ale_unknown_bcast),
 	GBENU_STATS_P6(ale_unknown_bcast_bytes),
+	GBENU_STATS_P6(ale_pol_match),
+	GBENU_STATS_P6(ale_pol_match_red),
+	GBENU_STATS_P6(ale_pol_match_yellow),
 	GBENU_STATS_P6(tx_mem_protect_err),
+	GBENU_STATS_P6(tx_pri0_drop),
+	GBENU_STATS_P6(tx_pri1_drop),
+	GBENU_STATS_P6(tx_pri2_drop),
+	GBENU_STATS_P6(tx_pri3_drop),
+	GBENU_STATS_P6(tx_pri4_drop),
+	GBENU_STATS_P6(tx_pri5_drop),
+	GBENU_STATS_P6(tx_pri6_drop),
+	GBENU_STATS_P6(tx_pri7_drop),
+	GBENU_STATS_P6(tx_pri0_drop_bcnt),
+	GBENU_STATS_P6(tx_pri1_drop_bcnt),
+	GBENU_STATS_P6(tx_pri2_drop_bcnt),
+	GBENU_STATS_P6(tx_pri3_drop_bcnt),
+	GBENU_STATS_P6(tx_pri4_drop_bcnt),
+	GBENU_STATS_P6(tx_pri5_drop_bcnt),
+	GBENU_STATS_P6(tx_pri6_drop_bcnt),
+	GBENU_STATS_P6(tx_pri7_drop_bcnt),
 	/* GBENU Module 7 */
 	GBENU_STATS_P7(rx_good_frames),
 	GBENU_STATS_P7(rx_broadcast_frames),
@@ -1305,7 +1436,26 @@
 	GBENU_STATS_P7(ale_unknown_mcast_bytes),
 	GBENU_STATS_P7(ale_unknown_bcast),
 	GBENU_STATS_P7(ale_unknown_bcast_bytes),
+	GBENU_STATS_P7(ale_pol_match),
+	GBENU_STATS_P7(ale_pol_match_red),
+	GBENU_STATS_P7(ale_pol_match_yellow),
 	GBENU_STATS_P7(tx_mem_protect_err),
+	GBENU_STATS_P7(tx_pri0_drop),
+	GBENU_STATS_P7(tx_pri1_drop),
+	GBENU_STATS_P7(tx_pri2_drop),
+	GBENU_STATS_P7(tx_pri3_drop),
+	GBENU_STATS_P7(tx_pri4_drop),
+	GBENU_STATS_P7(tx_pri5_drop),
+	GBENU_STATS_P7(tx_pri6_drop),
+	GBENU_STATS_P7(tx_pri7_drop),
+	GBENU_STATS_P7(tx_pri0_drop_bcnt),
+	GBENU_STATS_P7(tx_pri1_drop_bcnt),
+	GBENU_STATS_P7(tx_pri2_drop_bcnt),
+	GBENU_STATS_P7(tx_pri3_drop_bcnt),
+	GBENU_STATS_P7(tx_pri4_drop_bcnt),
+	GBENU_STATS_P7(tx_pri5_drop_bcnt),
+	GBENU_STATS_P7(tx_pri6_drop_bcnt),
+	GBENU_STATS_P7(tx_pri7_drop_bcnt),
 	/* GBENU Module 8 */
 	GBENU_STATS_P8(rx_good_frames),
 	GBENU_STATS_P8(rx_broadcast_frames),
@@ -1352,7 +1502,26 @@
 	GBENU_STATS_P8(ale_unknown_mcast_bytes),
 	GBENU_STATS_P8(ale_unknown_bcast),
 	GBENU_STATS_P8(ale_unknown_bcast_bytes),
+	GBENU_STATS_P8(ale_pol_match),
+	GBENU_STATS_P8(ale_pol_match_red),
+	GBENU_STATS_P8(ale_pol_match_yellow),
 	GBENU_STATS_P8(tx_mem_protect_err),
+	GBENU_STATS_P8(tx_pri0_drop),
+	GBENU_STATS_P8(tx_pri1_drop),
+	GBENU_STATS_P8(tx_pri2_drop),
+	GBENU_STATS_P8(tx_pri3_drop),
+	GBENU_STATS_P8(tx_pri4_drop),
+	GBENU_STATS_P8(tx_pri5_drop),
+	GBENU_STATS_P8(tx_pri6_drop),
+	GBENU_STATS_P8(tx_pri7_drop),
+	GBENU_STATS_P8(tx_pri0_drop_bcnt),
+	GBENU_STATS_P8(tx_pri1_drop_bcnt),
+	GBENU_STATS_P8(tx_pri2_drop_bcnt),
+	GBENU_STATS_P8(tx_pri3_drop_bcnt),
+	GBENU_STATS_P8(tx_pri4_drop_bcnt),
+	GBENU_STATS_P8(tx_pri5_drop_bcnt),
+	GBENU_STATS_P8(tx_pri6_drop_bcnt),
+	GBENU_STATS_P8(tx_pri7_drop_bcnt),
 };
 
 #define XGBE_STATS0_INFO(field)				\
@@ -1554,70 +1723,97 @@
 	}
 }
 
-static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
 {
-	void __iomem *base = NULL;
-	u32  __iomem *p;
-	u32 tmp = 0;
+	void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
+	u32  __iomem *p_stats_entry;
 	int i;
 
 	for (i = 0; i < gbe_dev->num_et_stats; i++) {
-		base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
-		p = base + gbe_dev->et_stats[i].offset;
-		tmp = readl(p);
-		gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
+		if (gbe_dev->et_stats[i].type == stats_mod) {
+			p_stats_entry = base + gbe_dev->et_stats[i].offset;
+			gbe_dev->hw_stats[i] = 0;
+			gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
+		}
+	}
+}
+
+static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
+					     int et_stats_entry)
+{
+	void __iomem *base = NULL;
+	u32  __iomem *p_stats_entry;
+	u32 curr, delta;
+
+	/* The hw_stats_regs pointers are already
+	 * properly set to point to the right base:
+	 */
+	base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
+	p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
+	curr = readl(p_stats_entry);
+	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
+	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
+	gbe_dev->hw_stats[et_stats_entry] += delta;
+}
+
+static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+	int i;
+
+	for (i = 0; i < gbe_dev->num_et_stats; i++) {
+		gbe_update_hw_stats_entry(gbe_dev, i);
+
 		if (data)
 			data[i] = gbe_dev->hw_stats[i];
-		/* write-to-decrement:
-		 * new register value = old register value - write value
-		 */
-		writel(tmp, p);
 	}
 }
 
+static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
+					       int stats_mod)
+{
+	u32 val;
+
+	val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+
+	switch (stats_mod) {
+	case GBE_STATSA_MODULE:
+	case GBE_STATSB_MODULE:
+		val &= ~GBE_STATS_CD_SEL;
+		break;
+	case GBE_STATSC_MODULE:
+	case GBE_STATSD_MODULE:
+		val |= GBE_STATS_CD_SEL;
+		break;
+	default:
+		return;
+	}
+
+	/* make the stat module visible */
+	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+}
+
+static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
+{
+	gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
+	gbe_reset_mod_stats(gbe_dev, stats_mod);
+}
+
 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
 {
-	void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
-	void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
-	u64 *hw_stats = &gbe_dev->hw_stats[0];
-	void __iomem *base = NULL;
-	u32  __iomem *p;
-	u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
-	int i, j, pair;
+	u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
+	int et_entry, j, pair;
 
 	for (pair = 0; pair < 2; pair++) {
-		val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+		gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
+						      GBE_STATSC_MODULE :
+						      GBE_STATSA_MODULE));
 
-		if (pair == 0)
-			val &= ~GBE_STATS_CD_SEL;
-		else
-			val |= GBE_STATS_CD_SEL;
+		for (j = 0; j < half_num_et_stats; j++) {
+			et_entry = pair * half_num_et_stats + j;
+			gbe_update_hw_stats_entry(gbe_dev, et_entry);
 
-		/* make the stat modules visible */
-		writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
-
-		for (i = 0; i < pair_size; i++) {
-			j = pair * pair_size + i;
-			switch (gbe_dev->et_stats[j].type) {
-			case GBE_STATSA_MODULE:
-			case GBE_STATSC_MODULE:
-				base = gbe_statsa;
-			break;
-			case GBE_STATSB_MODULE:
-			case GBE_STATSD_MODULE:
-				base  = gbe_statsb;
-			break;
-			}
-
-			p = base + gbe_dev->et_stats[j].offset;
-			tmp = readl(p);
-			hw_stats[j] += tmp;
 			if (data)
-				data[j] = hw_stats[j];
-			/* write-to-decrement:
-			 * new register value = old register value - write value
-			 */
-			writel(tmp, p);
+				data[et_entry] = gbe_dev->hw_stats[et_entry];
 		}
 	}
 }
@@ -2207,14 +2403,15 @@
 		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
 	}
 
-	spin_lock_bh(&gbe_dev->hw_stats_lock);
+	/* A timer runs as a BH, no need to block them */
+	spin_lock(&gbe_dev->hw_stats_lock);
 
 	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
 		gbe_update_stats_ver14(gbe_dev, NULL);
 	else
 		gbe_update_stats(gbe_dev, NULL);
 
-	spin_unlock_bh(&gbe_dev->hw_stats_lock);
+	spin_unlock(&gbe_dev->hw_stats_lock);
 
 	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
 	add_timer(&gbe_dev->timer);
@@ -2571,15 +2768,28 @@
 	}
 	gbe_dev->xgbe_serdes_regs = regs;
 
+	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+	gbe_dev->et_stats = xgbe10_et_stats;
+	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+
 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-				  XGBE10_NUM_STAT_ENTRIES *
-				  (gbe_dev->max_num_ports) * sizeof(u64),
-				  GFP_KERNEL);
+					 gbe_dev->num_et_stats * sizeof(u64),
+					 GFP_KERNEL);
 	if (!gbe_dev->hw_stats) {
 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
 		return -ENOMEM;
 	}
 
+	gbe_dev->hw_stats_prev =
+		devm_kzalloc(gbe_dev->dev,
+			     gbe_dev->num_et_stats * sizeof(u32),
+			     GFP_KERNEL);
+	if (!gbe_dev->hw_stats_prev) {
+		dev_err(gbe_dev->dev,
+			"hw_stats_prev memory allocation failed\n");
+		return -ENOMEM;
+	}
+
 	gbe_dev->ss_version = XGBE_SS_VERSION_10;
 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
 					XGBE10_SGMII_MODULE_OFFSET;
@@ -2593,8 +2803,6 @@
 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
 	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
 	gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
-	gbe_dev->et_stats = xgbe10_et_stats;
-	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
 
 	/* Subsystem registers */
@@ -2679,30 +2887,45 @@
 	}
 	gbe_dev->switch_regs = regs;
 
+	gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
+	gbe_dev->et_stats = gbe13_et_stats;
+	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+
 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-					  GBE13_NUM_HW_STAT_ENTRIES *
-					  gbe_dev->max_num_slaves * sizeof(u64),
-					  GFP_KERNEL);
+					 gbe_dev->num_et_stats * sizeof(u64),
+					 GFP_KERNEL);
 	if (!gbe_dev->hw_stats) {
 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
 		return -ENOMEM;
 	}
 
+	gbe_dev->hw_stats_prev =
+		devm_kzalloc(gbe_dev->dev,
+			     gbe_dev->num_et_stats * sizeof(u32),
+			     GFP_KERNEL);
+	if (!gbe_dev->hw_stats_prev) {
+		dev_err(gbe_dev->dev,
+			"hw_stats_prev memory allocation failed\n");
+		return -ENOMEM;
+	}
+
 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
 
+	/* K2HK has only 2 hw stats modules visible at a time, so
+	 * module 0 & 2 points to one base and
+	 * module 1 & 3 points to the other base
+	 */
 	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
 		gbe_dev->hw_stats_regs[i] =
 			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
-			(GBE_HW_STATS_REG_MAP_SZ * i);
+			(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
 	}
 
 	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
 	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
 	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
-	gbe_dev->et_stats = gbe13_et_stats;
-	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
 	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
 
 	/* Subsystem registers */
@@ -2729,15 +2952,34 @@
 	void __iomem *regs;
 	int i, ret;
 
+	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+	gbe_dev->et_stats = gbenu_et_stats;
+
+	if (IS_SS_ID_NU(gbe_dev))
+		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
+	else
+		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+					GBENU_ET_STATS_PORT_SIZE;
+
 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-				  GBENU_NUM_HW_STAT_ENTRIES *
-				  (gbe_dev->max_num_ports) * sizeof(u64),
-				  GFP_KERNEL);
+					 gbe_dev->num_et_stats * sizeof(u64),
+					 GFP_KERNEL);
 	if (!gbe_dev->hw_stats) {
 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
 		return -ENOMEM;
 	}
 
+	gbe_dev->hw_stats_prev =
+		devm_kzalloc(gbe_dev->dev,
+			     gbe_dev->num_et_stats * sizeof(u32),
+			     GFP_KERNEL);
+	if (!gbe_dev->hw_stats_prev) {
+		dev_err(gbe_dev->dev,
+			"hw_stats_prev memory allocation failed\n");
+		return -ENOMEM;
+	}
+
 	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
 	if (ret) {
 		dev_err(gbe_dev->dev,
@@ -2765,16 +3007,8 @@
 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
 	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
 	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
-	gbe_dev->et_stats = gbenu_et_stats;
 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
 
-	if (IS_SS_ID_NU(gbe_dev))
-		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
-			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
-	else
-		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
-					GBENU_ET_STATS_PORT_SIZE;
-
 	/* Subsystem registers */
 	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
 
@@ -2804,7 +3038,7 @@
 	struct cpsw_ale_params ale_params;
 	struct gbe_priv *gbe_dev;
 	u32 slave_num;
-	int ret = 0;
+	int i, ret = 0;
 
 	if (!node) {
 		dev_err(dev, "device tree info unavailable\n");
@@ -2951,6 +3185,15 @@
 	/* initialize host port */
 	gbe_init_host_port(gbe_dev);
 
+	spin_lock_bh(&gbe_dev->hw_stats_lock);
+	for (i = 0; i < gbe_dev->num_stats_mods; i++) {
+		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+			gbe_reset_mod_stats_ver14(gbe_dev, i);
+		else
+			gbe_reset_mod_stats(gbe_dev, i);
+	}
+	spin_unlock_bh(&gbe_dev->hw_stats_lock);
+
 	init_timer(&gbe_dev->timer);
 	gbe_dev->timer.data	 = (unsigned long)gbe_dev;
 	gbe_dev->timer.function = netcp_ethss_timer;
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index a3f7610..0a15acc 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -40,6 +40,7 @@
 #include <linux/tcp.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
+#include <linux/tick.h>
 
 #include <asm/checksum.h>
 #include <asm/homecache.h>
@@ -2273,7 +2274,8 @@
 		tile_net_dev_init(name, mac);
 
 	if (!network_cpus_init())
-		network_cpus_map = *cpu_online_mask;
+		cpumask_and(&network_cpus_map, housekeeping_cpumask(),
+			    cpu_online_mask);
 
 	return 0;
 }
diff --git a/drivers/net/fddi/skfp/h/hwmtm.h b/drivers/net/fddi/skfp/h/hwmtm.h
index 5924d42..4ca2341 100644
--- a/drivers/net/fddi/skfp/h/hwmtm.h
+++ b/drivers/net/fddi/skfp/h/hwmtm.h
@@ -74,15 +74,6 @@
 #define NULL 		0
 #endif
 
-#ifdef	LITTLE_ENDIAN
-#define HWM_REVERSE(x)	(x)
-#else
-#define	HWM_REVERSE(x)		((((x)<<24L)&0xff000000L)	+	\
-				 (((x)<< 8L)&0x00ff0000L)	+	\
-				 (((x)>> 8L)&0x0000ff00L)	+	\
-				 (((x)>>24L)&0x000000ffL))
-#endif
-
 #define C_INDIC		(1L<<25)
 #define A_INDIC		(1L<<26)
 #define	RD_FS_LOCAL	0x80
diff --git a/drivers/net/fjes/Makefile b/drivers/net/fjes/Makefile
new file mode 100644
index 0000000..523e3d7
--- /dev/null
+++ b/drivers/net/fjes/Makefile
@@ -0,0 +1,30 @@
+################################################################################
+#
+# FUJITSU Extended Socket Network Device driver
+# Copyright (c) 2015 FUJITSU LIMITED
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+################################################################################
+
+
+#
+# Makefile for the FUJITSU Extended Socket network device driver
+#
+
+obj-$(CONFIG_FUJITSU_ES) += fjes.o
+
+fjes-objs := fjes_main.o fjes_hw.o fjes_ethtool.o
diff --git a/drivers/net/fjes/fjes.h b/drivers/net/fjes/fjes.h
new file mode 100644
index 0000000..a592fe2
--- /dev/null
+++ b/drivers/net/fjes/fjes.h
@@ -0,0 +1,77 @@
+/*
+ *  FUJITSU Extended Socket Network Device driver
+ *  Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef FJES_H_
+#define FJES_H_
+
+#include <linux/acpi.h>
+
+#include "fjes_hw.h"
+
+#define FJES_ACPI_SYMBOL	"Extended Socket"
+#define FJES_MAX_QUEUES		1
+#define FJES_TX_RETRY_INTERVAL	(20 * HZ)
+#define FJES_TX_RETRY_TIMEOUT	(100)
+#define FJES_TX_TX_STALL_TIMEOUT	(FJES_TX_RETRY_INTERVAL / 2)
+#define FJES_OPEN_ZONE_UPDATE_WAIT	(300) /* msec */
+#define FJES_IRQ_WATCH_DELAY	(HZ)
+
+/* board specific private data structure */
+struct fjes_adapter {
+	struct net_device *netdev;
+	struct platform_device *plat_dev;
+
+	struct napi_struct napi;
+	struct rtnl_link_stats64 stats64;
+
+	unsigned int tx_retry_count;
+	unsigned long tx_start_jiffies;
+	unsigned long rx_last_jiffies;
+	bool unset_rx_last;
+
+	struct work_struct force_close_task;
+	bool force_reset;
+	bool open_guard;
+
+	bool irq_registered;
+
+	struct workqueue_struct *txrx_wq;
+	struct workqueue_struct *control_wq;
+
+	struct work_struct tx_stall_task;
+	struct work_struct raise_intr_rxdata_task;
+
+	struct work_struct unshare_watch_task;
+	unsigned long unshare_watch_bitmask;
+
+	struct delayed_work interrupt_watch_task;
+	bool interrupt_watch_enable;
+
+	struct fjes_hw hw;
+};
+
+extern char fjes_driver_name[];
+extern char fjes_driver_version[];
+extern const u32 fjes_support_mtu[];
+
+void fjes_set_ethtool_ops(struct net_device *);
+
+#endif /* FJES_H_ */
diff --git a/drivers/net/fjes/fjes_ethtool.c b/drivers/net/fjes/fjes_ethtool.c
new file mode 100644
index 0000000..0119dd1
--- /dev/null
+++ b/drivers/net/fjes/fjes_ethtool.c
@@ -0,0 +1,137 @@
+/*
+ *  FUJITSU Extended Socket Network Device driver
+ *  Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+/* ethtool support for fjes */
+
+#include <linux/vmalloc.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+
+#include "fjes.h"
+
+struct fjes_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define FJES_STAT(name, stat) { \
+	.stat_string = name, \
+	.sizeof_stat = FIELD_SIZEOF(struct fjes_adapter, stat), \
+	.stat_offset = offsetof(struct fjes_adapter, stat) \
+}
+
+static const struct fjes_stats fjes_gstrings_stats[] = {
+	FJES_STAT("rx_packets", stats64.rx_packets),
+	FJES_STAT("tx_packets", stats64.tx_packets),
+	FJES_STAT("rx_bytes", stats64.rx_bytes),
+	FJES_STAT("tx_bytes", stats64.rx_bytes),
+	FJES_STAT("rx_dropped", stats64.rx_dropped),
+	FJES_STAT("tx_dropped", stats64.tx_dropped),
+};
+
+static void fjes_get_ethtool_stats(struct net_device *netdev,
+				   struct ethtool_stats *stats, u64 *data)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	char *p;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fjes_gstrings_stats); i++) {
+		p = (char *)adapter + fjes_gstrings_stats[i].stat_offset;
+		data[i] = (fjes_gstrings_stats[i].sizeof_stat == sizeof(u64))
+			? *(u64 *)p : *(u32 *)p;
+	}
+}
+
+static void fjes_get_strings(struct net_device *netdev,
+			     u32 stringset, u8 *data)
+{
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < ARRAY_SIZE(fjes_gstrings_stats); i++) {
+			memcpy(p, fjes_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		break;
+	}
+}
+
+static int fjes_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(fjes_gstrings_stats);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void fjes_get_drvinfo(struct net_device *netdev,
+			     struct ethtool_drvinfo *drvinfo)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	struct platform_device *plat_dev;
+
+	plat_dev = adapter->plat_dev;
+
+	strlcpy(drvinfo->driver, fjes_driver_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, fjes_driver_version,
+		sizeof(drvinfo->version));
+
+	strlcpy(drvinfo->fw_version, "none", sizeof(drvinfo->fw_version));
+	snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
+		 "platform:%s", plat_dev->name);
+	drvinfo->regdump_len = 0;
+	drvinfo->eedump_len = 0;
+}
+
+static int fjes_get_settings(struct net_device *netdev,
+			     struct ethtool_cmd *ecmd)
+{
+	ecmd->supported = 0;
+	ecmd->advertising = 0;
+	ecmd->duplex = DUPLEX_FULL;
+	ecmd->autoneg = AUTONEG_DISABLE;
+	ecmd->transceiver = XCVR_DUMMY1;
+	ecmd->port = PORT_NONE;
+	ethtool_cmd_speed_set(ecmd, 20000);	/* 20Gb/s */
+
+	return 0;
+}
+
+static const struct ethtool_ops fjes_ethtool_ops = {
+		.get_settings		= fjes_get_settings,
+		.get_drvinfo		= fjes_get_drvinfo,
+		.get_ethtool_stats = fjes_get_ethtool_stats,
+		.get_strings      = fjes_get_strings,
+		.get_sset_count   = fjes_get_sset_count,
+};
+
+void fjes_set_ethtool_ops(struct net_device *netdev)
+{
+	netdev->ethtool_ops = &fjes_ethtool_ops;
+}
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
new file mode 100644
index 0000000..b5f4a78
--- /dev/null
+++ b/drivers/net/fjes/fjes_hw.c
@@ -0,0 +1,1125 @@
+/*
+ *  FUJITSU Extended Socket Network Device driver
+ *  Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include "fjes_hw.h"
+#include "fjes.h"
+
+static void fjes_hw_update_zone_task(struct work_struct *);
+static void fjes_hw_epstop_task(struct work_struct *);
+
+/* supported MTU list */
+const u32 fjes_support_mtu[] = {
+	FJES_MTU_DEFINE(8 * 1024),
+	FJES_MTU_DEFINE(16 * 1024),
+	FJES_MTU_DEFINE(32 * 1024),
+	FJES_MTU_DEFINE(64 * 1024),
+	0
+};
+
+u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg)
+{
+	u8 *base = hw->base;
+	u32 value = 0;
+
+	value = readl(&base[reg]);
+
+	return value;
+}
+
+static u8 *fjes_hw_iomap(struct fjes_hw *hw)
+{
+	u8 *base;
+
+	if (!request_mem_region(hw->hw_res.start, hw->hw_res.size,
+				fjes_driver_name)) {
+		pr_err("request_mem_region failed\n");
+		return NULL;
+	}
+
+	base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size);
+
+	return base;
+}
+
+static void fjes_hw_iounmap(struct fjes_hw *hw)
+{
+	iounmap(hw->base);
+	release_mem_region(hw->hw_res.start, hw->hw_res.size);
+}
+
+int fjes_hw_reset(struct fjes_hw *hw)
+{
+	union REG_DCTL dctl;
+	int timeout;
+
+	dctl.reg = 0;
+	dctl.bits.reset = 1;
+	wr32(XSCT_DCTL, dctl.reg);
+
+	timeout = FJES_DEVICE_RESET_TIMEOUT * 1000;
+	dctl.reg = rd32(XSCT_DCTL);
+	while ((dctl.bits.reset == 1) && (timeout > 0)) {
+		msleep(1000);
+		dctl.reg = rd32(XSCT_DCTL);
+		timeout -= 1000;
+	}
+
+	return timeout > 0 ? 0 : -EIO;
+}
+
+static int fjes_hw_get_max_epid(struct fjes_hw *hw)
+{
+	union REG_MAX_EP info;
+
+	info.reg = rd32(XSCT_MAX_EP);
+
+	return info.bits.maxep;
+}
+
+static int fjes_hw_get_my_epid(struct fjes_hw *hw)
+{
+	union REG_OWNER_EPID info;
+
+	info.reg = rd32(XSCT_OWNER_EPID);
+
+	return info.bits.epid;
+}
+
+static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw)
+{
+	size_t size;
+
+	size = sizeof(struct fjes_device_shared_info) +
+	    (sizeof(u8) * hw->max_epid);
+	hw->hw_info.share = kzalloc(size, GFP_KERNEL);
+	if (!hw->hw_info.share)
+		return -ENOMEM;
+
+	hw->hw_info.share->epnum = hw->max_epid;
+
+	return 0;
+}
+
+static void fjes_hw_free_shared_status_region(struct fjes_hw *hw)
+{
+	kfree(hw->hw_info.share);
+	hw->hw_info.share = NULL;
+}
+
+static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh)
+{
+	void *mem;
+
+	mem = vzalloc(EP_BUFFER_SIZE);
+	if (!mem)
+		return -ENOMEM;
+
+	epbh->buffer = mem;
+	epbh->size = EP_BUFFER_SIZE;
+
+	epbh->info = (union ep_buffer_info *)mem;
+	epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info));
+
+	return 0;
+}
+
+static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
+{
+	if (epbh->buffer)
+		vfree(epbh->buffer);
+
+	epbh->buffer = NULL;
+	epbh->size = 0;
+
+	epbh->info = NULL;
+	epbh->ring = NULL;
+}
+
+void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
+{
+	union ep_buffer_info *info = epbh->info;
+	u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
+	int i;
+
+	for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
+		vlan_id[i] = info->v1i.vlan_id[i];
+
+	memset(info, 0, sizeof(union ep_buffer_info));
+
+	info->v1i.version = 0;  /* version 0 */
+
+	for (i = 0; i < ETH_ALEN; i++)
+		info->v1i.mac_addr[i] = mac_addr[i];
+
+	info->v1i.head = 0;
+	info->v1i.tail = 1;
+
+	info->v1i.info_size = sizeof(union ep_buffer_info);
+	info->v1i.buffer_size = epbh->size - info->v1i.info_size;
+
+	info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu);
+	info->v1i.count_max =
+	    EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max);
+
+	for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
+		info->v1i.vlan_id[i] = vlan_id[i];
+}
+
+void
+fjes_hw_init_command_registers(struct fjes_hw *hw,
+			       struct fjes_device_command_param *param)
+{
+	/* Request Buffer length */
+	wr32(XSCT_REQBL, (__le32)(param->req_len));
+	/* Response Buffer Length */
+	wr32(XSCT_RESPBL, (__le32)(param->res_len));
+
+	/* Request Buffer Address */
+	wr32(XSCT_REQBAL,
+	     (__le32)(param->req_start & GENMASK_ULL(31, 0)));
+	wr32(XSCT_REQBAH,
+	     (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32));
+
+	/* Response Buffer Address */
+	wr32(XSCT_RESPBAL,
+	     (__le32)(param->res_start & GENMASK_ULL(31, 0)));
+	wr32(XSCT_RESPBAH,
+	     (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32));
+
+	/* Share status address */
+	wr32(XSCT_SHSTSAL,
+	     (__le32)(param->share_start & GENMASK_ULL(31, 0)));
+	wr32(XSCT_SHSTSAH,
+	     (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32));
+}
+
+static int fjes_hw_setup(struct fjes_hw *hw)
+{
+	u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+	struct fjes_device_command_param param;
+	struct ep_share_mem_info *buf_pair;
+	size_t mem_size;
+	int result;
+	int epidx;
+	void *buf;
+
+	hw->hw_info.max_epid = &hw->max_epid;
+	hw->hw_info.my_epid = &hw->my_epid;
+
+	buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info),
+		      GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	hw->ep_shm_info = (struct ep_share_mem_info *)buf;
+
+	mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
+	hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
+	if (!(hw->hw_info.req_buf))
+		return -ENOMEM;
+
+	hw->hw_info.req_buf_size = mem_size;
+
+	mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
+	hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
+	if (!(hw->hw_info.res_buf))
+		return -ENOMEM;
+
+	hw->hw_info.res_buf_size = mem_size;
+
+	result = fjes_hw_alloc_shared_status_region(hw);
+	if (result)
+		return result;
+
+	hw->hw_info.buffer_share_bit = 0;
+	hw->hw_info.buffer_unshare_reserve_bit = 0;
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx != hw->my_epid) {
+			buf_pair = &hw->ep_shm_info[epidx];
+
+			result = fjes_hw_alloc_epbuf(&buf_pair->tx);
+			if (result)
+				return result;
+
+			result = fjes_hw_alloc_epbuf(&buf_pair->rx);
+			if (result)
+				return result;
+
+			fjes_hw_setup_epbuf(&buf_pair->tx, mac,
+					    fjes_support_mtu[0]);
+			fjes_hw_setup_epbuf(&buf_pair->rx, mac,
+					    fjes_support_mtu[0]);
+		}
+	}
+
+	memset(&param, 0, sizeof(param));
+
+	param.req_len = hw->hw_info.req_buf_size;
+	param.req_start = __pa(hw->hw_info.req_buf);
+	param.res_len = hw->hw_info.res_buf_size;
+	param.res_start = __pa(hw->hw_info.res_buf);
+
+	param.share_start = __pa(hw->hw_info.share->ep_status);
+
+	fjes_hw_init_command_registers(hw, &param);
+
+	return 0;
+}
+
+static void fjes_hw_cleanup(struct fjes_hw *hw)
+{
+	int epidx;
+
+	if (!hw->ep_shm_info)
+		return;
+
+	fjes_hw_free_shared_status_region(hw);
+
+	kfree(hw->hw_info.req_buf);
+	hw->hw_info.req_buf = NULL;
+
+	kfree(hw->hw_info.res_buf);
+	hw->hw_info.res_buf = NULL;
+
+	for (epidx = 0; epidx < hw->max_epid ; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+		fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
+		fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
+	}
+
+	kfree(hw->ep_shm_info);
+	hw->ep_shm_info = NULL;
+}
+
+int fjes_hw_init(struct fjes_hw *hw)
+{
+	int ret;
+
+	hw->base = fjes_hw_iomap(hw);
+	if (!hw->base)
+		return -EIO;
+
+	ret = fjes_hw_reset(hw);
+	if (ret)
+		return ret;
+
+	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
+
+	INIT_WORK(&hw->update_zone_task, fjes_hw_update_zone_task);
+	INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
+
+	mutex_init(&hw->hw_info.lock);
+
+	hw->max_epid = fjes_hw_get_max_epid(hw);
+	hw->my_epid = fjes_hw_get_my_epid(hw);
+
+	if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
+		return -ENXIO;
+
+	ret = fjes_hw_setup(hw);
+
+	return ret;
+}
+
+void fjes_hw_exit(struct fjes_hw *hw)
+{
+	int ret;
+
+	if (hw->base) {
+		ret = fjes_hw_reset(hw);
+		if (ret)
+			pr_err("%s: reset error", __func__);
+
+		fjes_hw_iounmap(hw);
+		hw->base = NULL;
+	}
+
+	fjes_hw_cleanup(hw);
+
+	cancel_work_sync(&hw->update_zone_task);
+	cancel_work_sync(&hw->epstop_task);
+}
+
+static enum fjes_dev_command_response_e
+fjes_hw_issue_request_command(struct fjes_hw *hw,
+			      enum fjes_dev_command_request_type type)
+{
+	enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN;
+	union REG_CR cr;
+	union REG_CS cs;
+	int timeout;
+
+	cr.reg = 0;
+	cr.bits.req_start = 1;
+	cr.bits.req_code = type;
+	wr32(XSCT_CR, cr.reg);
+	cr.reg = rd32(XSCT_CR);
+
+	if (cr.bits.error == 0) {
+		timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
+		cs.reg = rd32(XSCT_CS);
+
+		while ((cs.bits.complete != 1) && timeout > 0) {
+			msleep(1000);
+			cs.reg = rd32(XSCT_CS);
+			timeout -= 1000;
+		}
+
+		if (cs.bits.complete == 1)
+			ret = FJES_CMD_STATUS_NORMAL;
+		else if (timeout <= 0)
+			ret = FJES_CMD_STATUS_TIMEOUT;
+
+	} else {
+		switch (cr.bits.err_info) {
+		case FJES_CMD_REQ_ERR_INFO_PARAM:
+			ret = FJES_CMD_STATUS_ERROR_PARAM;
+			break;
+		case FJES_CMD_REQ_ERR_INFO_STATUS:
+			ret = FJES_CMD_STATUS_ERROR_STATUS;
+			break;
+		default:
+			ret = FJES_CMD_STATUS_UNKNOWN;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int fjes_hw_request_info(struct fjes_hw *hw)
+{
+	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
+	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
+	enum fjes_dev_command_response_e ret;
+	int result;
+
+	memset(req_buf, 0, hw->hw_info.req_buf_size);
+	memset(res_buf, 0, hw->hw_info.res_buf_size);
+
+	req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN;
+
+	res_buf->info.length = 0;
+	res_buf->info.code = 0;
+
+	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO);
+
+	result = 0;
+
+	if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) !=
+		res_buf->info.length) {
+		result = -ENOMSG;
+	} else if (ret == FJES_CMD_STATUS_NORMAL) {
+		switch (res_buf->info.code) {
+		case FJES_CMD_REQ_RES_CODE_NORMAL:
+			result = 0;
+			break;
+		default:
+			result = -EPERM;
+			break;
+		}
+	} else {
+		switch (ret) {
+		case FJES_CMD_STATUS_UNKNOWN:
+			result = -EPERM;
+			break;
+		case FJES_CMD_STATUS_TIMEOUT:
+			result = -EBUSY;
+			break;
+		case FJES_CMD_STATUS_ERROR_PARAM:
+			result = -EPERM;
+			break;
+		case FJES_CMD_STATUS_ERROR_STATUS:
+			result = -EPERM;
+			break;
+		default:
+			result = -EPERM;
+			break;
+		}
+	}
+
+	return result;
+}
+
+int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
+			       struct ep_share_mem_info *buf_pair)
+{
+	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
+	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
+	enum fjes_dev_command_response_e ret;
+	int page_count;
+	int timeout;
+	int i, idx;
+	void *addr;
+	int result;
+
+	if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
+		return 0;
+
+	memset(req_buf, 0, hw->hw_info.req_buf_size);
+	memset(res_buf, 0, hw->hw_info.res_buf_size);
+
+	req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
+						buf_pair->tx.size,
+						buf_pair->rx.size);
+	req_buf->share_buffer.epid = dest_epid;
+
+	idx = 0;
+	req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size;
+	page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE;
+	for (i = 0; i < page_count; i++) {
+		addr = ((u8 *)(buf_pair->tx.buffer)) +
+				(i * EP_BUFFER_INFO_SIZE);
+		req_buf->share_buffer.buffer[idx++] =
+				(__le64)(page_to_phys(vmalloc_to_page(addr)) +
+						offset_in_page(addr));
+	}
+
+	req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size;
+	page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE;
+	for (i = 0; i < page_count; i++) {
+		addr = ((u8 *)(buf_pair->rx.buffer)) +
+				(i * EP_BUFFER_INFO_SIZE);
+		req_buf->share_buffer.buffer[idx++] =
+				(__le64)(page_to_phys(vmalloc_to_page(addr)) +
+						offset_in_page(addr));
+	}
+
+	res_buf->share_buffer.length = 0;
+	res_buf->share_buffer.code = 0;
+
+	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER);
+
+	timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
+	while ((ret == FJES_CMD_STATUS_NORMAL) &&
+	       (res_buf->share_buffer.length ==
+		FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) &&
+	       (res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) &&
+	       (timeout > 0)) {
+			msleep(200 + hw->my_epid * 20);
+			timeout -= (200 + hw->my_epid * 20);
+
+			res_buf->share_buffer.length = 0;
+			res_buf->share_buffer.code = 0;
+
+			ret = fjes_hw_issue_request_command(
+					hw, FJES_CMD_REQ_SHARE_BUFFER);
+	}
+
+	result = 0;
+
+	if (res_buf->share_buffer.length !=
+			FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN)
+		result = -ENOMSG;
+	else if (ret == FJES_CMD_STATUS_NORMAL) {
+		switch (res_buf->share_buffer.code) {
+		case FJES_CMD_REQ_RES_CODE_NORMAL:
+			result = 0;
+			set_bit(dest_epid, &hw->hw_info.buffer_share_bit);
+			break;
+		case FJES_CMD_REQ_RES_CODE_BUSY:
+			result = -EBUSY;
+			break;
+		default:
+			result = -EPERM;
+			break;
+		}
+	} else {
+		switch (ret) {
+		case FJES_CMD_STATUS_UNKNOWN:
+			result = -EPERM;
+			break;
+		case FJES_CMD_STATUS_TIMEOUT:
+			result = -EBUSY;
+			break;
+		case FJES_CMD_STATUS_ERROR_PARAM:
+		case FJES_CMD_STATUS_ERROR_STATUS:
+		default:
+			result = -EPERM;
+			break;
+		}
+	}
+
+	return result;
+}
+
+int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
+{
+	union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
+	union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
+	struct fjes_device_shared_info *share = hw->hw_info.share;
+	enum fjes_dev_command_response_e ret;
+	int timeout;
+	int result;
+
+	if (!hw->base)
+		return -EPERM;
+
+	if (!req_buf || !res_buf || !share)
+		return -EPERM;
+
+	if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
+		return 0;
+
+	memset(req_buf, 0, hw->hw_info.req_buf_size);
+	memset(res_buf, 0, hw->hw_info.res_buf_size);
+
+	req_buf->unshare_buffer.length =
+			FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN;
+	req_buf->unshare_buffer.epid = dest_epid;
+
+	res_buf->unshare_buffer.length = 0;
+	res_buf->unshare_buffer.code = 0;
+
+	ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
+
+	timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
+	while ((ret == FJES_CMD_STATUS_NORMAL) &&
+	       (res_buf->unshare_buffer.length ==
+		FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) &&
+	       (res_buf->unshare_buffer.code ==
+		FJES_CMD_REQ_RES_CODE_BUSY) &&
+	       (timeout > 0)) {
+		msleep(200 + hw->my_epid * 20);
+			timeout -= (200 + hw->my_epid * 20);
+
+		res_buf->unshare_buffer.length = 0;
+		res_buf->unshare_buffer.code = 0;
+
+		ret =
+		fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
+	}
+
+	result = 0;
+
+	if (res_buf->unshare_buffer.length !=
+			FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) {
+		result = -ENOMSG;
+	} else if (ret == FJES_CMD_STATUS_NORMAL) {
+		switch (res_buf->unshare_buffer.code) {
+		case FJES_CMD_REQ_RES_CODE_NORMAL:
+			result = 0;
+			clear_bit(dest_epid, &hw->hw_info.buffer_share_bit);
+			break;
+		case FJES_CMD_REQ_RES_CODE_BUSY:
+			result = -EBUSY;
+			break;
+		default:
+			result = -EPERM;
+			break;
+		}
+	} else {
+		switch (ret) {
+		case FJES_CMD_STATUS_UNKNOWN:
+			result = -EPERM;
+			break;
+		case FJES_CMD_STATUS_TIMEOUT:
+			result = -EBUSY;
+			break;
+		case FJES_CMD_STATUS_ERROR_PARAM:
+		case FJES_CMD_STATUS_ERROR_STATUS:
+		default:
+			result = -EPERM;
+			break;
+		}
+	}
+
+	return result;
+}
+
+int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid,
+			    enum REG_ICTL_MASK  mask)
+{
+	u32 ig = mask | dest_epid;
+
+	wr32(XSCT_IG, cpu_to_le32(ig));
+
+	return 0;
+}
+
+u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw)
+{
+	u32 cur_is;
+
+	cur_is = rd32(XSCT_IS);
+
+	return cur_is;
+}
+
+void fjes_hw_set_irqmask(struct fjes_hw *hw,
+			 enum REG_ICTL_MASK intr_mask, bool mask)
+{
+	if (mask)
+		wr32(XSCT_IMS, intr_mask);
+	else
+		wr32(XSCT_IMC, intr_mask);
+}
+
+bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid)
+{
+	if (epid >= hw->max_epid)
+		return false;
+
+	if ((hw->ep_shm_info[epid].es_status !=
+			FJES_ZONING_STATUS_ENABLE) ||
+		(hw->ep_shm_info[hw->my_epid].zone ==
+			FJES_ZONING_ZONE_TYPE_NONE))
+		return false;
+	else
+		return (hw->ep_shm_info[epid].zone ==
+				hw->ep_shm_info[hw->my_epid].zone);
+}
+
+int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share,
+			   int dest_epid)
+{
+	int value = false;
+
+	if (dest_epid < share->epnum)
+		value = share->ep_status[dest_epid];
+
+	return value;
+}
+
+static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid)
+{
+	return test_bit(src_epid, &hw->txrx_stop_req_bit);
+}
+
+static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid)
+{
+	return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status &
+			FJES_RX_STOP_REQ_DONE);
+}
+
+enum ep_partner_status
+fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
+{
+	enum ep_partner_status status;
+
+	if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) {
+		if (fjes_hw_epid_is_stop_requested(hw, epid)) {
+			status = EP_PARTNER_WAITING;
+		} else {
+			if (fjes_hw_epid_is_stop_process_done(hw, epid))
+				status = EP_PARTNER_COMPLETE;
+			else
+				status = EP_PARTNER_SHARED;
+		}
+	} else {
+		status = EP_PARTNER_UNSHARE;
+	}
+
+	return status;
+}
+
+void fjes_hw_raise_epstop(struct fjes_hw *hw)
+{
+	enum ep_partner_status status;
+	int epidx;
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+
+		status = fjes_hw_get_partner_ep_status(hw, epidx);
+		switch (status) {
+		case EP_PARTNER_SHARED:
+			fjes_hw_raise_interrupt(hw, epidx,
+						REG_ICTL_MASK_TXRX_STOP_REQ);
+			break;
+		default:
+			break;
+		}
+
+		set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
+		set_bit(epidx, &hw->txrx_stop_req_bit);
+
+		hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
+				FJES_RX_STOP_REQ_REQUEST;
+	}
+}
+
+int fjes_hw_wait_epstop(struct fjes_hw *hw)
+{
+	enum ep_partner_status status;
+	union ep_buffer_info *info;
+	int wait_time = 0;
+	int epidx;
+
+	while (hw->hw_info.buffer_unshare_reserve_bit &&
+	       (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) {
+		for (epidx = 0; epidx < hw->max_epid; epidx++) {
+			if (epidx == hw->my_epid)
+				continue;
+			status = fjes_hw_epid_is_shared(hw->hw_info.share,
+							epidx);
+			info = hw->ep_shm_info[epidx].rx.info;
+			if ((!status ||
+			     (info->v1i.rx_status &
+			      FJES_RX_STOP_REQ_DONE)) &&
+			    test_bit(epidx,
+				     &hw->hw_info.buffer_unshare_reserve_bit)) {
+				clear_bit(epidx,
+					  &hw->hw_info.buffer_unshare_reserve_bit);
+			}
+		}
+
+		msleep(100);
+		wait_time += 100;
+	}
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+		if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit))
+			clear_bit(epidx,
+				  &hw->hw_info.buffer_unshare_reserve_bit);
+	}
+
+	return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)
+			? 0 : -EBUSY;
+}
+
+bool fjes_hw_check_epbuf_version(struct epbuf_handler *epbh, u32 version)
+{
+	union ep_buffer_info *info = epbh->info;
+
+	return (info->common.version == version);
+}
+
+bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
+{
+	union ep_buffer_info *info = epbh->info;
+
+	return (info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu));
+}
+
+bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
+{
+	union ep_buffer_info *info = epbh->info;
+	bool ret = false;
+	int i;
+
+	if (vlan_id == 0) {
+		ret = true;
+	} else {
+		for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
+			if (vlan_id == info->v1i.vlan_id[i]) {
+				ret = true;
+				break;
+			}
+		}
+	}
+	return ret;
+}
+
+bool fjes_hw_set_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
+{
+	union ep_buffer_info *info = epbh->info;
+	int i;
+
+	for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
+		if (info->v1i.vlan_id[i] == 0) {
+			info->v1i.vlan_id[i] = vlan_id;
+			return true;
+		}
+	}
+	return false;
+}
+
+void fjes_hw_del_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
+{
+	union ep_buffer_info *info = epbh->info;
+	int i;
+
+	if (0 != vlan_id) {
+		for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++) {
+			if (vlan_id == info->v1i.vlan_id[i])
+				info->v1i.vlan_id[i] = 0;
+		}
+	}
+}
+
+bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
+{
+	union ep_buffer_info *info = epbh->info;
+
+	if (info->v1i.count_max == 0)
+		return true;
+
+	return EP_RING_EMPTY(info->v1i.head, info->v1i.tail,
+			     info->v1i.count_max);
+}
+
+void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh,
+				       size_t *psize)
+{
+	union ep_buffer_info *info = epbh->info;
+	struct esmem_frame *ring_frame;
+	void *frame;
+
+	ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
+					     (info->v1i.head,
+					      info->v1i.count_max) *
+					     info->v1i.frame_max]);
+
+	*psize = (size_t)ring_frame->frame_size;
+
+	frame = ring_frame->frame_data;
+
+	return frame;
+}
+
+void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh)
+{
+	union ep_buffer_info *info = epbh->info;
+
+	if (fjes_hw_epbuf_rx_is_empty(epbh))
+		return;
+
+	EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max);
+}
+
+int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh,
+			      void *frame, size_t size)
+{
+	union ep_buffer_info *info = epbh->info;
+	struct esmem_frame *ring_frame;
+
+	if (EP_RING_FULL(info->v1i.head, info->v1i.tail, info->v1i.count_max))
+		return -ENOBUFS;
+
+	ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX
+					     (info->v1i.tail - 1,
+					      info->v1i.count_max) *
+					     info->v1i.frame_max]);
+
+	ring_frame->frame_size = size;
+	memcpy((void *)(ring_frame->frame_data), (void *)frame, size);
+
+	EP_RING_INDEX_INC(epbh->info->v1i.tail, info->v1i.count_max);
+
+	return 0;
+}
+
+static void fjes_hw_update_zone_task(struct work_struct *work)
+{
+	struct fjes_hw *hw = container_of(work,
+			struct fjes_hw, update_zone_task);
+
+	struct my_s {u8 es_status; u8 zone; } *info;
+	union fjes_device_command_res *res_buf;
+	enum ep_partner_status pstatus;
+
+	struct fjes_adapter *adapter;
+	struct net_device *netdev;
+
+	ulong unshare_bit = 0;
+	ulong share_bit = 0;
+	ulong irq_bit = 0;
+
+	int epidx;
+	int ret;
+
+	adapter = (struct fjes_adapter *)hw->back;
+	netdev = adapter->netdev;
+	res_buf = hw->hw_info.res_buf;
+	info = (struct my_s *)&res_buf->info.info;
+
+	mutex_lock(&hw->hw_info.lock);
+
+	ret = fjes_hw_request_info(hw);
+	switch (ret) {
+	case -ENOMSG:
+	case -EBUSY:
+	default:
+		if (!work_pending(&adapter->force_close_task)) {
+			adapter->force_reset = true;
+			schedule_work(&adapter->force_close_task);
+		}
+		break;
+
+	case 0:
+
+		for (epidx = 0; epidx < hw->max_epid; epidx++) {
+			if (epidx == hw->my_epid) {
+				hw->ep_shm_info[epidx].es_status =
+					info[epidx].es_status;
+				hw->ep_shm_info[epidx].zone =
+					info[epidx].zone;
+				continue;
+			}
+
+			pstatus = fjes_hw_get_partner_ep_status(hw, epidx);
+			switch (pstatus) {
+			case EP_PARTNER_UNSHARE:
+			default:
+				if ((info[epidx].zone !=
+					FJES_ZONING_ZONE_TYPE_NONE) &&
+				    (info[epidx].es_status ==
+					FJES_ZONING_STATUS_ENABLE) &&
+				    (info[epidx].zone ==
+					info[hw->my_epid].zone))
+					set_bit(epidx, &share_bit);
+				else
+					set_bit(epidx, &unshare_bit);
+				break;
+
+			case EP_PARTNER_COMPLETE:
+			case EP_PARTNER_WAITING:
+				if ((info[epidx].zone ==
+					FJES_ZONING_ZONE_TYPE_NONE) ||
+				    (info[epidx].es_status !=
+					FJES_ZONING_STATUS_ENABLE) ||
+				    (info[epidx].zone !=
+					info[hw->my_epid].zone)) {
+					set_bit(epidx,
+						&adapter->unshare_watch_bitmask);
+					set_bit(epidx,
+						&hw->hw_info.buffer_unshare_reserve_bit);
+				}
+				break;
+
+			case EP_PARTNER_SHARED:
+				if ((info[epidx].zone ==
+					FJES_ZONING_ZONE_TYPE_NONE) ||
+				    (info[epidx].es_status !=
+					FJES_ZONING_STATUS_ENABLE) ||
+				    (info[epidx].zone !=
+					info[hw->my_epid].zone))
+					set_bit(epidx, &irq_bit);
+				break;
+			}
+		}
+
+		hw->ep_shm_info[epidx].es_status = info[epidx].es_status;
+		hw->ep_shm_info[epidx].zone = info[epidx].zone;
+
+		break;
+	}
+
+	mutex_unlock(&hw->hw_info.lock);
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+
+		if (test_bit(epidx, &share_bit)) {
+			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
+					    netdev->dev_addr, netdev->mtu);
+
+			mutex_lock(&hw->hw_info.lock);
+
+			ret = fjes_hw_register_buff_addr(
+				hw, epidx, &hw->ep_shm_info[epidx]);
+
+			switch (ret) {
+			case 0:
+				break;
+			case -ENOMSG:
+			case -EBUSY:
+			default:
+				if (!work_pending(&adapter->force_close_task)) {
+					adapter->force_reset = true;
+					schedule_work(
+					  &adapter->force_close_task);
+				}
+				break;
+			}
+			mutex_unlock(&hw->hw_info.lock);
+		}
+
+		if (test_bit(epidx, &unshare_bit)) {
+			mutex_lock(&hw->hw_info.lock);
+
+			ret = fjes_hw_unregister_buff_addr(hw, epidx);
+
+			switch (ret) {
+			case 0:
+				break;
+			case -ENOMSG:
+			case -EBUSY:
+			default:
+				if (!work_pending(&adapter->force_close_task)) {
+					adapter->force_reset = true;
+					schedule_work(
+					  &adapter->force_close_task);
+				}
+				break;
+			}
+
+			mutex_unlock(&hw->hw_info.lock);
+
+			if (ret == 0)
+				fjes_hw_setup_epbuf(
+					&hw->ep_shm_info[epidx].tx,
+					netdev->dev_addr, netdev->mtu);
+		}
+
+		if (test_bit(epidx, &irq_bit)) {
+			fjes_hw_raise_interrupt(hw, epidx,
+						REG_ICTL_MASK_TXRX_STOP_REQ);
+
+			set_bit(epidx, &hw->txrx_stop_req_bit);
+			hw->ep_shm_info[epidx].tx.
+				info->v1i.rx_status |=
+					FJES_RX_STOP_REQ_REQUEST;
+			set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
+		}
+	}
+
+	if (irq_bit || adapter->unshare_watch_bitmask) {
+		if (!work_pending(&adapter->unshare_watch_task))
+			queue_work(adapter->control_wq,
+				   &adapter->unshare_watch_task);
+	}
+}
+
+static void fjes_hw_epstop_task(struct work_struct *work)
+{
+	struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
+	struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
+
+	ulong remain_bit;
+	int epid_bit;
+
+	while ((remain_bit = hw->epstop_req_bit)) {
+		for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
+			if (remain_bit & 1) {
+				hw->ep_shm_info[epid_bit].
+					tx.info->v1i.rx_status |=
+						FJES_RX_STOP_REQ_DONE;
+
+				clear_bit(epid_bit, &hw->epstop_req_bit);
+				set_bit(epid_bit,
+					&adapter->unshare_watch_bitmask);
+
+				if (!work_pending(&adapter->unshare_watch_task))
+					queue_work(
+						adapter->control_wq,
+						&adapter->unshare_watch_task);
+			}
+		}
+	}
+}
diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h
new file mode 100644
index 0000000..6d57b89
--- /dev/null
+++ b/drivers/net/fjes/fjes_hw.h
@@ -0,0 +1,334 @@
+/*
+ *  FUJITSU Extended Socket Network Device driver
+ *  Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef FJES_HW_H_
+#define FJES_HW_H_
+
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+
+#include "fjes_regs.h"
+
+struct fjes_hw;
+
+#define EP_BUFFER_SUPPORT_VLAN_MAX 4
+#define EP_BUFFER_INFO_SIZE 4096
+
+#define FJES_DEVICE_RESET_TIMEOUT  ((17 + 1) * 3) /* sec */
+#define FJES_COMMAND_REQ_TIMEOUT  (5 + 1) /* sec */
+#define FJES_COMMAND_REQ_BUFF_TIMEOUT	(8 * 3) /* sec */
+#define FJES_COMMAND_EPSTOP_WAIT_TIMEOUT	(1) /* sec */
+
+#define FJES_CMD_REQ_ERR_INFO_PARAM  (0x0001)
+#define FJES_CMD_REQ_ERR_INFO_STATUS (0x0002)
+
+#define FJES_CMD_REQ_RES_CODE_NORMAL (0)
+#define FJES_CMD_REQ_RES_CODE_BUSY   (1)
+
+#define FJES_ZONING_STATUS_DISABLE	(0x00)
+#define FJES_ZONING_STATUS_ENABLE	(0x01)
+#define FJES_ZONING_STATUS_INVALID	(0xFF)
+
+#define FJES_ZONING_ZONE_TYPE_NONE (0xFF)
+
+#define FJES_TX_DELAY_SEND_NONE		(0)
+#define FJES_TX_DELAY_SEND_PENDING	(1)
+
+#define FJES_RX_STOP_REQ_NONE		(0x0)
+#define FJES_RX_STOP_REQ_DONE		(0x1)
+#define FJES_RX_STOP_REQ_REQUEST	(0x2)
+#define FJES_RX_POLL_WORK		(0x4)
+
+#define EP_BUFFER_SIZE \
+	(((sizeof(union ep_buffer_info) + (128 * (64 * 1024))) \
+		/ EP_BUFFER_INFO_SIZE) * EP_BUFFER_INFO_SIZE)
+
+#define EP_RING_NUM(buffer_size, frame_size) \
+		(u32)((buffer_size) / (frame_size))
+#define EP_RING_INDEX(_num, _max) (((_num) + (_max)) % (_max))
+#define EP_RING_INDEX_INC(_num, _max) \
+	((_num) = EP_RING_INDEX((_num) + 1, (_max)))
+#define EP_RING_FULL(_head, _tail, _max)				\
+	(0 == EP_RING_INDEX(((_tail) - (_head)), (_max)))
+#define EP_RING_EMPTY(_head, _tail, _max) \
+	(1 == EP_RING_INDEX(((_tail) - (_head)), (_max)))
+
+#define FJES_MTU_TO_BUFFER_SIZE(mtu) \
+	(ETH_HLEN + VLAN_HLEN + (mtu) + ETH_FCS_LEN)
+#define FJES_MTU_TO_FRAME_SIZE(mtu) \
+	(sizeof(struct esmem_frame) + FJES_MTU_TO_BUFFER_SIZE(mtu))
+#define FJES_MTU_DEFINE(size) \
+	((size) - sizeof(struct esmem_frame) - \
+	(ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+
+#define FJES_DEV_COMMAND_INFO_REQ_LEN	(4)
+#define FJES_DEV_COMMAND_INFO_RES_LEN(epnum) (8 + 2 * (epnum))
+#define FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(txb, rxb) \
+	(24 + (8 * ((txb) / EP_BUFFER_INFO_SIZE + (rxb) / EP_BUFFER_INFO_SIZE)))
+#define FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN	(8)
+#define FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN	(8)
+#define FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN	(8)
+
+#define FJES_DEV_REQ_BUF_SIZE(maxep) \
+	FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(EP_BUFFER_SIZE, EP_BUFFER_SIZE)
+#define FJES_DEV_RES_BUF_SIZE(maxep) \
+	FJES_DEV_COMMAND_INFO_RES_LEN(maxep)
+
+/* Frame & MTU */
+struct esmem_frame {
+	__le32 frame_size;
+	u8 frame_data[];
+};
+
+/* EP partner status */
+enum ep_partner_status {
+	EP_PARTNER_UNSHARE,
+	EP_PARTNER_SHARED,
+	EP_PARTNER_WAITING,
+	EP_PARTNER_COMPLETE,
+	EP_PARTNER_STATUS_MAX,
+};
+
+/* shared status region */
+struct fjes_device_shared_info {
+	int epnum;
+	u8 ep_status[];
+};
+
+/* structures for command control request data*/
+union fjes_device_command_req {
+	struct {
+		__le32 length;
+	} info;
+	struct {
+		__le32 length;
+		__le32 epid;
+		__le64 buffer[];
+	} share_buffer;
+	struct {
+		__le32 length;
+		__le32 epid;
+	} unshare_buffer;
+	struct {
+		__le32 length;
+		__le32 mode;
+		__le64 buffer_len;
+		__le64 buffer[];
+	} start_trace;
+	struct {
+		__le32 length;
+	} stop_trace;
+};
+
+/* structures for command control response data */
+union fjes_device_command_res {
+	struct {
+		__le32 length;
+		__le32 code;
+		struct {
+			u8 es_status;
+			u8 zone;
+		} info[];
+	} info;
+	struct {
+		__le32 length;
+		__le32 code;
+	} share_buffer;
+	struct {
+		__le32 length;
+		__le32 code;
+	} unshare_buffer;
+	struct {
+		__le32 length;
+		__le32 code;
+	} start_trace;
+	struct {
+		__le32 length;
+		__le32 code;
+	} stop_trace;
+};
+
+/* request command type */
+enum fjes_dev_command_request_type {
+	FJES_CMD_REQ_INFO		= 0x0001,
+	FJES_CMD_REQ_SHARE_BUFFER	= 0x0002,
+	FJES_CMD_REQ_UNSHARE_BUFFER	= 0x0004,
+};
+
+/* parameter for command control */
+struct fjes_device_command_param {
+	u32 req_len;
+	phys_addr_t req_start;
+	u32 res_len;
+	phys_addr_t res_start;
+	phys_addr_t share_start;
+};
+
+/* error code for command control */
+enum fjes_dev_command_response_e {
+	FJES_CMD_STATUS_UNKNOWN,
+	FJES_CMD_STATUS_NORMAL,
+	FJES_CMD_STATUS_TIMEOUT,
+	FJES_CMD_STATUS_ERROR_PARAM,
+	FJES_CMD_STATUS_ERROR_STATUS,
+};
+
+/* EP buffer information */
+union ep_buffer_info {
+	u8 raw[EP_BUFFER_INFO_SIZE];
+
+	struct _ep_buffer_info_common_t {
+		u32 version;
+	} common;
+
+	struct _ep_buffer_info_v1_t {
+		u32 version;
+		u32 info_size;
+
+		u32 buffer_size;
+		u16 count_max;
+
+		u16 _rsv_1;
+
+		u32 frame_max;
+		u8 mac_addr[ETH_ALEN];
+
+		u16 _rsv_2;
+		u32 _rsv_3;
+
+		u16 tx_status;
+		u16 rx_status;
+
+		u32 head;
+		u32 tail;
+
+		u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
+
+	} v1i;
+
+};
+
+/* buffer pair for Extended Partition */
+struct ep_share_mem_info {
+	struct epbuf_handler {
+		void *buffer;
+		size_t size;
+		union ep_buffer_info *info;
+		u8 *ring;
+	} tx, rx;
+
+	struct rtnl_link_stats64 net_stats;
+
+	u16 tx_status_work;
+
+	u8 es_status;
+	u8 zone;
+};
+
+struct es_device_trace {
+	u32 record_num;
+	u32 current_record;
+	u32 status_flag;
+	u32 _rsv;
+
+	struct {
+			u16 epid;
+			u16 dir_offset;
+			u32 data;
+			u64 tsc;
+	} record[];
+};
+
+struct fjes_hw_info {
+	struct fjes_device_shared_info *share;
+	union fjes_device_command_req *req_buf;
+	u64 req_buf_size;
+	union fjes_device_command_res *res_buf;
+	u64 res_buf_size;
+
+	int *my_epid;
+	int *max_epid;
+
+	struct es_device_trace *trace;
+	u64 trace_size;
+
+	struct mutex lock; /* buffer lock*/
+
+	unsigned long buffer_share_bit;
+	unsigned long buffer_unshare_reserve_bit;
+};
+
+struct fjes_hw {
+	void *back;
+
+	unsigned long txrx_stop_req_bit;
+	unsigned long epstop_req_bit;
+	struct work_struct update_zone_task;
+	struct work_struct epstop_task;
+
+	int my_epid;
+	int max_epid;
+
+	struct ep_share_mem_info *ep_shm_info;
+
+	struct fjes_hw_resource {
+		u64 start;
+		u64 size;
+		int irq;
+	} hw_res;
+
+	u8 *base;
+
+	struct fjes_hw_info hw_info;
+};
+
+int fjes_hw_init(struct fjes_hw *);
+void fjes_hw_exit(struct fjes_hw *);
+int fjes_hw_reset(struct fjes_hw *);
+int fjes_hw_request_info(struct fjes_hw *);
+int fjes_hw_register_buff_addr(struct fjes_hw *, int,
+			       struct ep_share_mem_info *);
+int fjes_hw_unregister_buff_addr(struct fjes_hw *, int);
+void fjes_hw_init_command_registers(struct fjes_hw *,
+				    struct fjes_device_command_param *);
+void fjes_hw_setup_epbuf(struct epbuf_handler *, u8 *, u32);
+int fjes_hw_raise_interrupt(struct fjes_hw *, int, enum REG_ICTL_MASK);
+void fjes_hw_set_irqmask(struct fjes_hw *, enum REG_ICTL_MASK, bool);
+u32 fjes_hw_capture_interrupt_status(struct fjes_hw *);
+void fjes_hw_raise_epstop(struct fjes_hw *);
+int fjes_hw_wait_epstop(struct fjes_hw *);
+enum ep_partner_status
+	fjes_hw_get_partner_ep_status(struct fjes_hw *, int);
+
+bool fjes_hw_epid_is_same_zone(struct fjes_hw *, int);
+int fjes_hw_epid_is_shared(struct fjes_device_shared_info *, int);
+bool fjes_hw_check_epbuf_version(struct epbuf_handler *, u32);
+bool fjes_hw_check_mtu(struct epbuf_handler *, u32);
+bool fjes_hw_check_vlan_id(struct epbuf_handler *, u16);
+bool fjes_hw_set_vlan_id(struct epbuf_handler *, u16);
+void fjes_hw_del_vlan_id(struct epbuf_handler *, u16);
+bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *);
+void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *, size_t *);
+void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *);
+int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *, void *, size_t);
+
+#endif /* FJES_HW_H_ */
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
new file mode 100644
index 0000000..0ddb54f
--- /dev/null
+++ b/drivers/net/fjes/fjes_main.c
@@ -0,0 +1,1383 @@
+/*
+ *  FUJITSU Extended Socket Network Device driver
+ *  Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/nls.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+
+#include "fjes.h"
+
+#define MAJ 1
+#define MIN 0
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
+#define DRV_NAME	"fjes"
+char fjes_driver_name[] = DRV_NAME;
+char fjes_driver_version[] = DRV_VERSION;
+static const char fjes_driver_string[] =
+		"FUJITSU Extended Socket Network Device Driver";
+static const char fjes_copyright[] =
+		"Copyright (c) 2015 FUJITSU LIMITED";
+
+MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
+MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int fjes_request_irq(struct fjes_adapter *);
+static void fjes_free_irq(struct fjes_adapter *);
+
+static int fjes_open(struct net_device *);
+static int fjes_close(struct net_device *);
+static int fjes_setup_resources(struct fjes_adapter *);
+static void fjes_free_resources(struct fjes_adapter *);
+static netdev_tx_t fjes_xmit_frame(struct sk_buff *, struct net_device *);
+static void fjes_raise_intr_rxdata_task(struct work_struct *);
+static void fjes_tx_stall_task(struct work_struct *);
+static void fjes_force_close_task(struct work_struct *);
+static irqreturn_t fjes_intr(int, void*);
+static struct rtnl_link_stats64 *
+fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
+static int fjes_change_mtu(struct net_device *, int);
+static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
+static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
+static void fjes_tx_retry(struct net_device *);
+
+static int fjes_acpi_add(struct acpi_device *);
+static int fjes_acpi_remove(struct acpi_device *);
+static acpi_status fjes_get_acpi_resource(struct acpi_resource *, void*);
+
+static int fjes_probe(struct platform_device *);
+static int fjes_remove(struct platform_device *);
+
+static int fjes_sw_init(struct fjes_adapter *);
+static void fjes_netdev_setup(struct net_device *);
+static void fjes_irq_watch_task(struct work_struct *);
+static void fjes_watch_unshare_task(struct work_struct *);
+static void fjes_rx_irq(struct fjes_adapter *, int);
+static int fjes_poll(struct napi_struct *, int);
+
+static const struct acpi_device_id fjes_acpi_ids[] = {
+	{"PNP0C02", 0},
+	{"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids);
+
+static struct acpi_driver fjes_acpi_driver = {
+	.name = DRV_NAME,
+	.class = DRV_NAME,
+	.owner = THIS_MODULE,
+	.ids = fjes_acpi_ids,
+	.ops = {
+		.add = fjes_acpi_add,
+		.remove = fjes_acpi_remove,
+	},
+};
+
+static struct platform_driver fjes_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = fjes_probe,
+	.remove = fjes_remove,
+};
+
+static struct resource fjes_resource[] = {
+	{
+		.flags = IORESOURCE_MEM,
+		.start = 0,
+		.end = 0,
+	},
+	{
+		.flags = IORESOURCE_IRQ,
+		.start = 0,
+		.end = 0,
+	},
+};
+
+static int fjes_acpi_add(struct acpi_device *device)
+{
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+	char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1];
+	struct platform_device *plat_dev;
+	union acpi_object *str;
+	acpi_status status;
+	int result;
+
+	status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
+
+	str = buffer.pointer;
+	result = utf16s_to_utf8s((wchar_t *)str->string.pointer,
+				 str->string.length, UTF16_LITTLE_ENDIAN,
+				 str_buf, sizeof(str_buf) - 1);
+	str_buf[result] = 0;
+
+	if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) {
+		kfree(buffer.pointer);
+		return -ENODEV;
+	}
+	kfree(buffer.pointer);
+
+	status = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
+				     fjes_get_acpi_resource, fjes_resource);
+	if (ACPI_FAILURE(status))
+		return -ENODEV;
+
+	/* create platform_device */
+	plat_dev = platform_device_register_simple(DRV_NAME, 0, fjes_resource,
+						   ARRAY_SIZE(fjes_resource));
+	device->driver_data = plat_dev;
+
+	return 0;
+}
+
+static int fjes_acpi_remove(struct acpi_device *device)
+{
+	struct platform_device *plat_dev;
+
+	plat_dev = (struct platform_device *)acpi_driver_data(device);
+	platform_device_unregister(plat_dev);
+
+	return 0;
+}
+
+static acpi_status
+fjes_get_acpi_resource(struct acpi_resource *acpi_res, void *data)
+{
+	struct acpi_resource_address32 *addr;
+	struct acpi_resource_irq *irq;
+	struct resource *res = data;
+
+	switch (acpi_res->type) {
+	case ACPI_RESOURCE_TYPE_ADDRESS32:
+		addr = &acpi_res->data.address32;
+		res[0].start = addr->address.minimum;
+		res[0].end = addr->address.minimum +
+			addr->address.address_length - 1;
+		break;
+
+	case ACPI_RESOURCE_TYPE_IRQ:
+		irq = &acpi_res->data.irq;
+		if (irq->interrupt_count != 1)
+			return AE_ERROR;
+		res[1].start = irq->interrupts[0];
+		res[1].end = irq->interrupts[0];
+		break;
+
+	default:
+		break;
+	}
+
+	return AE_OK;
+}
+
+static int fjes_request_irq(struct fjes_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int result = -1;
+
+	adapter->interrupt_watch_enable = true;
+	if (!delayed_work_pending(&adapter->interrupt_watch_task)) {
+		queue_delayed_work(adapter->control_wq,
+				   &adapter->interrupt_watch_task,
+				   FJES_IRQ_WATCH_DELAY);
+	}
+
+	if (!adapter->irq_registered) {
+		result = request_irq(adapter->hw.hw_res.irq, fjes_intr,
+				     IRQF_SHARED, netdev->name, adapter);
+		if (result)
+			adapter->irq_registered = false;
+		else
+			adapter->irq_registered = true;
+	}
+
+	return result;
+}
+
+static void fjes_free_irq(struct fjes_adapter *adapter)
+{
+	struct fjes_hw *hw = &adapter->hw;
+
+	adapter->interrupt_watch_enable = false;
+	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+
+	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
+
+	if (adapter->irq_registered) {
+		free_irq(adapter->hw.hw_res.irq, adapter);
+		adapter->irq_registered = false;
+	}
+}
+
+static const struct net_device_ops fjes_netdev_ops = {
+	.ndo_open		= fjes_open,
+	.ndo_stop		= fjes_close,
+	.ndo_start_xmit		= fjes_xmit_frame,
+	.ndo_get_stats64	= fjes_get_stats64,
+	.ndo_change_mtu		= fjes_change_mtu,
+	.ndo_tx_timeout		= fjes_tx_retry,
+	.ndo_vlan_rx_add_vid	= fjes_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid = fjes_vlan_rx_kill_vid,
+};
+
+/* fjes_open - Called when a network interface is made active */
+static int fjes_open(struct net_device *netdev)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	struct fjes_hw *hw = &adapter->hw;
+	int result;
+
+	if (adapter->open_guard)
+		return -ENXIO;
+
+	result = fjes_setup_resources(adapter);
+	if (result)
+		goto err_setup_res;
+
+	hw->txrx_stop_req_bit = 0;
+	hw->epstop_req_bit = 0;
+
+	napi_enable(&adapter->napi);
+
+	fjes_hw_capture_interrupt_status(hw);
+
+	result = fjes_request_irq(adapter);
+	if (result)
+		goto err_req_irq;
+
+	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);
+
+	netif_tx_start_all_queues(netdev);
+	netif_carrier_on(netdev);
+
+	return 0;
+
+err_req_irq:
+	fjes_free_irq(adapter);
+	napi_disable(&adapter->napi);
+
+err_setup_res:
+	fjes_free_resources(adapter);
+	return result;
+}
+
+/* fjes_close - Disables a network interface */
+static int fjes_close(struct net_device *netdev)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	struct fjes_hw *hw = &adapter->hw;
+	int epidx;
+
+	netif_tx_stop_all_queues(netdev);
+	netif_carrier_off(netdev);
+
+	fjes_hw_raise_epstop(hw);
+
+	napi_disable(&adapter->napi);
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+
+		adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status &=
+			~FJES_RX_POLL_WORK;
+	}
+
+	fjes_free_irq(adapter);
+
+	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+	cancel_work_sync(&adapter->unshare_watch_task);
+	adapter->unshare_watch_bitmask = 0;
+	cancel_work_sync(&adapter->raise_intr_rxdata_task);
+	cancel_work_sync(&adapter->tx_stall_task);
+
+	cancel_work_sync(&hw->update_zone_task);
+	cancel_work_sync(&hw->epstop_task);
+
+	fjes_hw_wait_epstop(hw);
+
+	fjes_free_resources(adapter);
+
+	return 0;
+}
+
+static int fjes_setup_resources(struct fjes_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct ep_share_mem_info *buf_pair;
+	struct fjes_hw *hw = &adapter->hw;
+	int result;
+	int epidx;
+
+	mutex_lock(&hw->hw_info.lock);
+	result = fjes_hw_request_info(hw);
+	switch (result) {
+	case 0:
+		for (epidx = 0; epidx < hw->max_epid; epidx++) {
+			hw->ep_shm_info[epidx].es_status =
+			    hw->hw_info.res_buf->info.info[epidx].es_status;
+			hw->ep_shm_info[epidx].zone =
+			    hw->hw_info.res_buf->info.info[epidx].zone;
+		}
+		break;
+	default:
+	case -ENOMSG:
+	case -EBUSY:
+		adapter->force_reset = true;
+
+		mutex_unlock(&hw->hw_info.lock);
+		return result;
+	}
+	mutex_unlock(&hw->hw_info.lock);
+
+	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
+		if ((epidx != hw->my_epid) &&
+		    (hw->ep_shm_info[epidx].es_status ==
+		     FJES_ZONING_STATUS_ENABLE)) {
+			fjes_hw_raise_interrupt(hw, epidx,
+						REG_ICTL_MASK_INFO_UPDATE);
+		}
+	}
+
+	msleep(FJES_OPEN_ZONE_UPDATE_WAIT * hw->max_epid);
+
+	for (epidx = 0; epidx < (hw->max_epid); epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+
+		buf_pair = &hw->ep_shm_info[epidx];
+
+		fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
+				    netdev->mtu);
+
+		if (fjes_hw_epid_is_same_zone(hw, epidx)) {
+			mutex_lock(&hw->hw_info.lock);
+			result =
+			fjes_hw_register_buff_addr(hw, epidx, buf_pair);
+			mutex_unlock(&hw->hw_info.lock);
+
+			switch (result) {
+			case 0:
+				break;
+			case -ENOMSG:
+			case -EBUSY:
+			default:
+				adapter->force_reset = true;
+				return result;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void fjes_free_resources(struct fjes_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct fjes_device_command_param param;
+	struct ep_share_mem_info *buf_pair;
+	struct fjes_hw *hw = &adapter->hw;
+	bool reset_flag = false;
+	int result;
+	int epidx;
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+
+		mutex_lock(&hw->hw_info.lock);
+		result = fjes_hw_unregister_buff_addr(hw, epidx);
+		mutex_unlock(&hw->hw_info.lock);
+
+		if (result)
+			reset_flag = true;
+
+		buf_pair = &hw->ep_shm_info[epidx];
+
+		fjes_hw_setup_epbuf(&buf_pair->tx,
+				    netdev->dev_addr, netdev->mtu);
+
+		clear_bit(epidx, &hw->txrx_stop_req_bit);
+	}
+
+	if (reset_flag || adapter->force_reset) {
+		result = fjes_hw_reset(hw);
+
+		adapter->force_reset = false;
+
+		if (result)
+			adapter->open_guard = true;
+
+		hw->hw_info.buffer_share_bit = 0;
+
+		memset((void *)&param, 0, sizeof(param));
+
+		param.req_len = hw->hw_info.req_buf_size;
+		param.req_start = __pa(hw->hw_info.req_buf);
+		param.res_len = hw->hw_info.res_buf_size;
+		param.res_start = __pa(hw->hw_info.res_buf);
+		param.share_start = __pa(hw->hw_info.share->ep_status);
+
+		fjes_hw_init_command_registers(hw, &param);
+	}
+}
+
+static void fjes_tx_stall_task(struct work_struct *work)
+{
+	struct fjes_adapter *adapter = container_of(work,
+			struct fjes_adapter, tx_stall_task);
+	struct net_device *netdev = adapter->netdev;
+	struct fjes_hw *hw = &adapter->hw;
+	int all_queue_available, sendable;
+	enum ep_partner_status pstatus;
+	int max_epid, my_epid, epid;
+	union ep_buffer_info *info;
+	int i;
+
+	if (((long)jiffies -
+		(long)(netdev->trans_start)) > FJES_TX_TX_STALL_TIMEOUT) {
+		netif_wake_queue(netdev);
+		return;
+	}
+
+	my_epid = hw->my_epid;
+	max_epid = hw->max_epid;
+
+	for (i = 0; i < 5; i++) {
+		all_queue_available = 1;
+
+		for (epid = 0; epid < max_epid; epid++) {
+			if (my_epid == epid)
+				continue;
+
+			pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+			sendable = (pstatus == EP_PARTNER_SHARED);
+			if (!sendable)
+				continue;
+
+			info = adapter->hw.ep_shm_info[epid].tx.info;
+
+			if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
+					 info->v1i.count_max)) {
+				all_queue_available = 0;
+				break;
+			}
+		}
+
+		if (all_queue_available) {
+			netif_wake_queue(netdev);
+			return;
+		}
+	}
+
+	usleep_range(50, 100);
+
+	queue_work(adapter->txrx_wq, &adapter->tx_stall_task);
+}
+
+static void fjes_force_close_task(struct work_struct *work)
+{
+	struct fjes_adapter *adapter = container_of(work,
+			struct fjes_adapter, force_close_task);
+	struct net_device *netdev = adapter->netdev;
+
+	rtnl_lock();
+	dev_close(netdev);
+	rtnl_unlock();
+}
+
+static void fjes_raise_intr_rxdata_task(struct work_struct *work)
+{
+	struct fjes_adapter *adapter = container_of(work,
+			struct fjes_adapter, raise_intr_rxdata_task);
+	struct fjes_hw *hw = &adapter->hw;
+	enum ep_partner_status pstatus;
+	int max_epid, my_epid, epid;
+
+	my_epid = hw->my_epid;
+	max_epid = hw->max_epid;
+
+	for (epid = 0; epid < max_epid; epid++)
+		hw->ep_shm_info[epid].tx_status_work = 0;
+
+	for (epid = 0; epid < max_epid; epid++) {
+		if (epid == my_epid)
+			continue;
+
+		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+		if (pstatus == EP_PARTNER_SHARED) {
+			hw->ep_shm_info[epid].tx_status_work =
+				hw->ep_shm_info[epid].tx.info->v1i.tx_status;
+
+			if (hw->ep_shm_info[epid].tx_status_work ==
+				FJES_TX_DELAY_SEND_PENDING) {
+				hw->ep_shm_info[epid].tx.info->v1i.tx_status =
+					FJES_TX_DELAY_SEND_NONE;
+			}
+		}
+	}
+
+	for (epid = 0; epid < max_epid; epid++) {
+		if (epid == my_epid)
+			continue;
+
+		pstatus = fjes_hw_get_partner_ep_status(hw, epid);
+		if ((hw->ep_shm_info[epid].tx_status_work ==
+		     FJES_TX_DELAY_SEND_PENDING) &&
+		    (pstatus == EP_PARTNER_SHARED) &&
+		    !(hw->ep_shm_info[epid].rx.info->v1i.rx_status)) {
+			fjes_hw_raise_interrupt(hw, epid,
+						REG_ICTL_MASK_RX_DATA);
+		}
+	}
+
+	usleep_range(500, 1000);
+}
+
+static int fjes_tx_send(struct fjes_adapter *adapter, int dest,
+			void *data, size_t len)
+{
+	int retval;
+
+	retval = fjes_hw_epbuf_tx_pkt_send(&adapter->hw.ep_shm_info[dest].tx,
+					   data, len);
+	if (retval)
+		return retval;
+
+	adapter->hw.ep_shm_info[dest].tx.info->v1i.tx_status =
+		FJES_TX_DELAY_SEND_PENDING;
+	if (!work_pending(&adapter->raise_intr_rxdata_task))
+		queue_work(adapter->txrx_wq,
+			   &adapter->raise_intr_rxdata_task);
+
+	retval = 0;
+	return retval;
+}
+
+static netdev_tx_t
+fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	struct fjes_hw *hw = &adapter->hw;
+
+	int max_epid, my_epid, dest_epid;
+	enum ep_partner_status pstatus;
+	struct netdev_queue *cur_queue;
+	char shortpkt[VLAN_ETH_HLEN];
+	bool is_multi, vlan;
+	struct ethhdr *eth;
+	u16 queue_no = 0;
+	u16 vlan_id = 0;
+	netdev_tx_t ret;
+	char *data;
+	int len;
+
+	ret = NETDEV_TX_OK;
+	is_multi = false;
+	cur_queue = netdev_get_tx_queue(netdev, queue_no);
+
+	eth = (struct ethhdr *)skb->data;
+	my_epid = hw->my_epid;
+
+	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;
+
+	data = skb->data;
+	len = skb->len;
+
+	if (is_multicast_ether_addr(eth->h_dest)) {
+		dest_epid = 0;
+		max_epid = hw->max_epid;
+		is_multi = true;
+	} else if (is_local_ether_addr(eth->h_dest)) {
+		dest_epid = eth->h_dest[ETH_ALEN - 1];
+		max_epid = dest_epid + 1;
+
+		if ((eth->h_dest[0] == 0x02) &&
+		    (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
+			      eth->h_dest[3] | eth->h_dest[4])) &&
+		    (dest_epid < hw->max_epid)) {
+			;
+		} else {
+			dest_epid = 0;
+			max_epid = 0;
+			ret = NETDEV_TX_OK;
+
+			adapter->stats64.tx_packets += 1;
+			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
+			adapter->stats64.tx_bytes += len;
+			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
+		}
+	} else {
+		dest_epid = 0;
+		max_epid = 0;
+		ret = NETDEV_TX_OK;
+
+		adapter->stats64.tx_packets += 1;
+		hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
+		adapter->stats64.tx_bytes += len;
+		hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
+	}
+
+	for (; dest_epid < max_epid; dest_epid++) {
+		if (my_epid == dest_epid)
+			continue;
+
+		pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
+		if (pstatus != EP_PARTNER_SHARED) {
+			ret = NETDEV_TX_OK;
+		} else if (!fjes_hw_check_epbuf_version(
+				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
+			/* version is NOT 0 */
+			adapter->stats64.tx_carrier_errors += 1;
+			hw->ep_shm_info[my_epid].net_stats
+						.tx_carrier_errors += 1;
+
+			ret = NETDEV_TX_OK;
+		} else if (!fjes_hw_check_mtu(
+				&adapter->hw.ep_shm_info[dest_epid].rx,
+				netdev->mtu)) {
+			adapter->stats64.tx_dropped += 1;
+			hw->ep_shm_info[my_epid].net_stats.tx_dropped += 1;
+			adapter->stats64.tx_errors += 1;
+			hw->ep_shm_info[my_epid].net_stats.tx_errors += 1;
+
+			ret = NETDEV_TX_OK;
+		} else if (vlan &&
+			   !fjes_hw_check_vlan_id(
+				&adapter->hw.ep_shm_info[dest_epid].rx,
+				vlan_id)) {
+			ret = NETDEV_TX_OK;
+		} else {
+			if (len < VLAN_ETH_HLEN) {
+				memset(shortpkt, 0, VLAN_ETH_HLEN);
+				memcpy(shortpkt, skb->data, skb->len);
+				len = VLAN_ETH_HLEN;
+				data = shortpkt;
+			}
+
+			if (adapter->tx_retry_count == 0) {
+				adapter->tx_start_jiffies = jiffies;
+				adapter->tx_retry_count = 1;
+			} else {
+				adapter->tx_retry_count++;
+			}
+
+			if (fjes_tx_send(adapter, dest_epid, data, len)) {
+				if (is_multi) {
+					ret = NETDEV_TX_OK;
+				} else if (
+					   ((long)jiffies -
+					    (long)adapter->tx_start_jiffies) >=
+					    FJES_TX_RETRY_TIMEOUT) {
+					adapter->stats64.tx_fifo_errors += 1;
+					hw->ep_shm_info[my_epid].net_stats
+								.tx_fifo_errors += 1;
+					adapter->stats64.tx_errors += 1;
+					hw->ep_shm_info[my_epid].net_stats
+								.tx_errors += 1;
+
+					ret = NETDEV_TX_OK;
+				} else {
+					netdev->trans_start = jiffies;
+					netif_tx_stop_queue(cur_queue);
+
+					if (!work_pending(&adapter->tx_stall_task))
+						queue_work(adapter->txrx_wq,
+							   &adapter->tx_stall_task);
+
+					ret = NETDEV_TX_BUSY;
+				}
+			} else {
+				if (!is_multi) {
+					adapter->stats64.tx_packets += 1;
+					hw->ep_shm_info[my_epid].net_stats
+								.tx_packets += 1;
+					adapter->stats64.tx_bytes += len;
+					hw->ep_shm_info[my_epid].net_stats
+								.tx_bytes += len;
+				}
+
+				adapter->tx_retry_count = 0;
+				ret = NETDEV_TX_OK;
+			}
+		}
+	}
+
+	if (ret == NETDEV_TX_OK) {
+		dev_kfree_skb(skb);
+		if (is_multi) {
+			adapter->stats64.tx_packets += 1;
+			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
+			adapter->stats64.tx_bytes += 1;
+			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
+		}
+	}
+
+	return ret;
+}
+
+static void fjes_tx_retry(struct net_device *netdev)
+{
+	struct netdev_queue *queue = netdev_get_tx_queue(netdev, 0);
+
+	netif_tx_wake_queue(queue);
+}
+
+static struct rtnl_link_stats64 *
+fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+
+	memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
+
+	return stats;
+}
+
+static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	bool running = netif_running(netdev);
+	int ret = 0;
+	int idx;
+
+	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
+		if (new_mtu <= fjes_support_mtu[idx]) {
+			new_mtu = fjes_support_mtu[idx];
+			if (new_mtu == netdev->mtu)
+				return 0;
+
+			if (running)
+				fjes_close(netdev);
+
+			netdev->mtu = new_mtu;
+
+			if (running)
+				ret = fjes_open(netdev);
+
+			return ret;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static int fjes_vlan_rx_add_vid(struct net_device *netdev,
+				__be16 proto, u16 vid)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	bool ret = true;
+	int epid;
+
+	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
+		if (epid == adapter->hw.my_epid)
+			continue;
+
+		if (!fjes_hw_check_vlan_id(
+			&adapter->hw.ep_shm_info[epid].tx, vid))
+			ret = fjes_hw_set_vlan_id(
+				&adapter->hw.ep_shm_info[epid].tx, vid);
+	}
+
+	return ret ? 0 : -ENOSPC;
+}
+
+static int fjes_vlan_rx_kill_vid(struct net_device *netdev,
+				 __be16 proto, u16 vid)
+{
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	int epid;
+
+	for (epid = 0; epid < adapter->hw.max_epid; epid++) {
+		if (epid == adapter->hw.my_epid)
+			continue;
+
+		fjes_hw_del_vlan_id(&adapter->hw.ep_shm_info[epid].tx, vid);
+	}
+
+	return 0;
+}
+
+static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
+				   int src_epid)
+{
+	struct fjes_hw *hw = &adapter->hw;
+	enum ep_partner_status status;
+
+	status = fjes_hw_get_partner_ep_status(hw, src_epid);
+	switch (status) {
+	case EP_PARTNER_UNSHARE:
+	case EP_PARTNER_COMPLETE:
+	default:
+		break;
+	case EP_PARTNER_WAITING:
+		if (src_epid < hw->my_epid) {
+			hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
+				FJES_RX_STOP_REQ_DONE;
+
+			clear_bit(src_epid, &hw->txrx_stop_req_bit);
+			set_bit(src_epid, &adapter->unshare_watch_bitmask);
+
+			if (!work_pending(&adapter->unshare_watch_task))
+				queue_work(adapter->control_wq,
+					   &adapter->unshare_watch_task);
+		}
+		break;
+	case EP_PARTNER_SHARED:
+		if (hw->ep_shm_info[src_epid].rx.info->v1i.rx_status &
+		    FJES_RX_STOP_REQ_REQUEST) {
+			set_bit(src_epid, &hw->epstop_req_bit);
+			if (!work_pending(&hw->epstop_task))
+				queue_work(adapter->control_wq,
+					   &hw->epstop_task);
+		}
+		break;
+	}
+}
+
+static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
+{
+	struct fjes_hw *hw = &adapter->hw;
+	enum ep_partner_status status;
+
+	set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
+
+	status = fjes_hw_get_partner_ep_status(hw, src_epid);
+	switch (status) {
+	case EP_PARTNER_WAITING:
+		hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
+				FJES_RX_STOP_REQ_DONE;
+		clear_bit(src_epid, &hw->txrx_stop_req_bit);
+		/* fall through */
+	case EP_PARTNER_UNSHARE:
+	case EP_PARTNER_COMPLETE:
+	default:
+		set_bit(src_epid, &adapter->unshare_watch_bitmask);
+		if (!work_pending(&adapter->unshare_watch_task))
+			queue_work(adapter->control_wq,
+				   &adapter->unshare_watch_task);
+		break;
+	case EP_PARTNER_SHARED:
+		set_bit(src_epid, &hw->epstop_req_bit);
+
+		if (!work_pending(&hw->epstop_task))
+			queue_work(adapter->control_wq, &hw->epstop_task);
+		break;
+	}
+}
+
+static void fjes_update_zone_irq(struct fjes_adapter *adapter,
+				 int src_epid)
+{
+	struct fjes_hw *hw = &adapter->hw;
+
+	if (!work_pending(&hw->update_zone_task))
+		queue_work(adapter->control_wq, &hw->update_zone_task);
+}
+
+static irqreturn_t fjes_intr(int irq, void *data)
+{
+	struct fjes_adapter *adapter = data;
+	struct fjes_hw *hw = &adapter->hw;
+	irqreturn_t ret;
+	u32 icr;
+
+	icr = fjes_hw_capture_interrupt_status(hw);
+
+	if (icr & REG_IS_MASK_IS_ASSERT) {
+		if (icr & REG_ICTL_MASK_RX_DATA)
+			fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID);
+
+		if (icr & REG_ICTL_MASK_DEV_STOP_REQ)
+			fjes_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+
+		if (icr & REG_ICTL_MASK_TXRX_STOP_REQ)
+			fjes_txrx_stop_req_irq(adapter, icr & REG_IS_MASK_EPID);
+
+		if (icr & REG_ICTL_MASK_TXRX_STOP_DONE)
+			fjes_hw_set_irqmask(hw,
+					    REG_ICTL_MASK_TXRX_STOP_DONE, true);
+
+		if (icr & REG_ICTL_MASK_INFO_UPDATE)
+			fjes_update_zone_irq(adapter, icr & REG_IS_MASK_EPID);
+
+		ret = IRQ_HANDLED;
+	} else {
+		ret = IRQ_NONE;
+	}
+
+	return ret;
+}
+
+static int fjes_rxframe_search_exist(struct fjes_adapter *adapter,
+				     int start_epid)
+{
+	struct fjes_hw *hw = &adapter->hw;
+	enum ep_partner_status pstatus;
+	int max_epid, cur_epid;
+	int i;
+
+	max_epid = hw->max_epid;
+	start_epid = (start_epid + 1 + max_epid) % max_epid;
+
+	for (i = 0; i < max_epid; i++) {
+		cur_epid = (start_epid + i) % max_epid;
+		if (cur_epid == hw->my_epid)
+			continue;
+
+		pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid);
+		if (pstatus == EP_PARTNER_SHARED) {
+			if (!fjes_hw_epbuf_rx_is_empty(
+				&hw->ep_shm_info[cur_epid].rx))
+				return cur_epid;
+		}
+	}
+	return -1;
+}
+
+static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize,
+			      int *cur_epid)
+{
+	void *frame;
+
+	*cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid);
+	if (*cur_epid < 0)
+		return NULL;
+
+	frame =
+	fjes_hw_epbuf_rx_curpkt_get_addr(
+		&adapter->hw.ep_shm_info[*cur_epid].rx, psize);
+
+	return frame;
+}
+
+static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid)
+{
+	fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx);
+}
+
+static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid)
+{
+	struct fjes_hw *hw = &adapter->hw;
+
+	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true);
+
+	adapter->unset_rx_last = true;
+	napi_schedule(&adapter->napi);
+}
+
+static int fjes_poll(struct napi_struct *napi, int budget)
+{
+	struct fjes_adapter *adapter =
+			container_of(napi, struct fjes_adapter, napi);
+	struct net_device *netdev = napi->dev;
+	struct fjes_hw *hw = &adapter->hw;
+	struct sk_buff *skb;
+	int work_done = 0;
+	int cur_epid = 0;
+	int epidx;
+	size_t frame_len;
+	void *frame;
+
+	for (epidx = 0; epidx < hw->max_epid; epidx++) {
+		if (epidx == hw->my_epid)
+			continue;
+
+		adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status |=
+			FJES_RX_POLL_WORK;
+	}
+
+	while (work_done < budget) {
+		prefetch(&adapter->hw);
+		frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid);
+
+		if (frame) {
+			skb = napi_alloc_skb(napi, frame_len);
+			if (!skb) {
+				adapter->stats64.rx_dropped += 1;
+				hw->ep_shm_info[cur_epid].net_stats
+							 .rx_dropped += 1;
+				adapter->stats64.rx_errors += 1;
+				hw->ep_shm_info[cur_epid].net_stats
+							 .rx_errors += 1;
+			} else {
+				memcpy(skb_put(skb, frame_len),
+				       frame, frame_len);
+				skb->protocol = eth_type_trans(skb, netdev);
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+				netif_receive_skb(skb);
+
+				work_done++;
+
+				adapter->stats64.rx_packets += 1;
+				hw->ep_shm_info[cur_epid].net_stats
+							 .rx_packets += 1;
+				adapter->stats64.rx_bytes += frame_len;
+				hw->ep_shm_info[cur_epid].net_stats
+							 .rx_bytes += frame_len;
+
+				if (is_multicast_ether_addr(
+					((struct ethhdr *)frame)->h_dest)) {
+					adapter->stats64.multicast += 1;
+					hw->ep_shm_info[cur_epid].net_stats
+								 .multicast += 1;
+				}
+			}
+
+			fjes_rxframe_release(adapter, cur_epid);
+			adapter->unset_rx_last = true;
+		} else {
+			break;
+		}
+	}
+
+	if (work_done < budget) {
+		napi_complete(napi);
+
+		if (adapter->unset_rx_last) {
+			adapter->rx_last_jiffies = jiffies;
+			adapter->unset_rx_last = false;
+		}
+
+		if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
+			napi_reschedule(napi);
+		} else {
+			for (epidx = 0; epidx < hw->max_epid; epidx++) {
+				if (epidx == hw->my_epid)
+					continue;
+				adapter->hw.ep_shm_info[epidx]
+					   .tx.info->v1i.rx_status &=
+						~FJES_RX_POLL_WORK;
+			}
+
+			fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
+		}
+	}
+
+	return work_done;
+}
+
+/* fjes_probe - Device Initialization Routine */
+static int fjes_probe(struct platform_device *plat_dev)
+{
+	struct fjes_adapter *adapter;
+	struct net_device *netdev;
+	struct resource *res;
+	struct fjes_hw *hw;
+	int err;
+
+	err = -ENOMEM;
+	netdev = alloc_netdev_mq(sizeof(struct fjes_adapter), "es%d",
+				 NET_NAME_UNKNOWN, fjes_netdev_setup,
+				 FJES_MAX_QUEUES);
+
+	if (!netdev)
+		goto err_out;
+
+	SET_NETDEV_DEV(netdev, &plat_dev->dev);
+
+	dev_set_drvdata(&plat_dev->dev, netdev);
+	adapter = netdev_priv(netdev);
+	adapter->netdev = netdev;
+	adapter->plat_dev = plat_dev;
+	hw = &adapter->hw;
+	hw->back = adapter;
+
+	/* setup the private structure */
+	err = fjes_sw_init(adapter);
+	if (err)
+		goto err_free_netdev;
+
+	INIT_WORK(&adapter->force_close_task, fjes_force_close_task);
+	adapter->force_reset = false;
+	adapter->open_guard = false;
+
+	adapter->txrx_wq = create_workqueue(DRV_NAME "/txrx");
+	adapter->control_wq = create_workqueue(DRV_NAME "/control");
+
+	INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
+	INIT_WORK(&adapter->raise_intr_rxdata_task,
+		  fjes_raise_intr_rxdata_task);
+	INIT_WORK(&adapter->unshare_watch_task, fjes_watch_unshare_task);
+	adapter->unshare_watch_bitmask = 0;
+
+	INIT_DELAYED_WORK(&adapter->interrupt_watch_task, fjes_irq_watch_task);
+	adapter->interrupt_watch_enable = false;
+
+	res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+	hw->hw_res.start = res->start;
+	hw->hw_res.size = res->end - res->start + 1;
+	hw->hw_res.irq = platform_get_irq(plat_dev, 0);
+	err = fjes_hw_init(&adapter->hw);
+	if (err)
+		goto err_free_netdev;
+
+	/* setup MAC address (02:00:00:00:00:[epid])*/
+	netdev->dev_addr[0] = 2;
+	netdev->dev_addr[1] = 0;
+	netdev->dev_addr[2] = 0;
+	netdev->dev_addr[3] = 0;
+	netdev->dev_addr[4] = 0;
+	netdev->dev_addr[5] = hw->my_epid; /* EPID */
+
+	err = register_netdev(netdev);
+	if (err)
+		goto err_hw_exit;
+
+	netif_carrier_off(netdev);
+
+	return 0;
+
+err_hw_exit:
+	fjes_hw_exit(&adapter->hw);
+err_free_netdev:
+	free_netdev(netdev);
+err_out:
+	return err;
+}
+
+/* fjes_remove - Device Removal Routine */
+static int fjes_remove(struct platform_device *plat_dev)
+{
+	struct net_device *netdev = dev_get_drvdata(&plat_dev->dev);
+	struct fjes_adapter *adapter = netdev_priv(netdev);
+	struct fjes_hw *hw = &adapter->hw;
+
+	cancel_delayed_work_sync(&adapter->interrupt_watch_task);
+	cancel_work_sync(&adapter->unshare_watch_task);
+	cancel_work_sync(&adapter->raise_intr_rxdata_task);
+	cancel_work_sync(&adapter->tx_stall_task);
+	if (adapter->control_wq)
+		destroy_workqueue(adapter->control_wq);
+	if (adapter->txrx_wq)
+		destroy_workqueue(adapter->txrx_wq);
+
+	unregister_netdev(netdev);
+
+	fjes_hw_exit(hw);
+
+	netif_napi_del(&adapter->napi);
+
+	free_netdev(netdev);
+
+	return 0;
+}
+
+static int fjes_sw_init(struct fjes_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	netif_napi_add(netdev, &adapter->napi, fjes_poll, 64);
+
+	return 0;
+}
+
+/* fjes_netdev_setup - netdevice initialization routine */
+static void fjes_netdev_setup(struct net_device *netdev)
+{
+	ether_setup(netdev);
+
+	netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
+	netdev->netdev_ops = &fjes_netdev_ops;
+	fjes_set_ethtool_ops(netdev);
+	netdev->mtu = fjes_support_mtu[0];
+	netdev->flags |= IFF_BROADCAST;
+	netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
+}
+
+static void fjes_irq_watch_task(struct work_struct *work)
+{
+	struct fjes_adapter *adapter = container_of(to_delayed_work(work),
+			struct fjes_adapter, interrupt_watch_task);
+
+	local_irq_disable();
+	fjes_intr(adapter->hw.hw_res.irq, adapter);
+	local_irq_enable();
+
+	if (fjes_rxframe_search_exist(adapter, 0) >= 0)
+		napi_schedule(&adapter->napi);
+
+	if (adapter->interrupt_watch_enable) {
+		if (!delayed_work_pending(&adapter->interrupt_watch_task))
+			queue_delayed_work(adapter->control_wq,
+					   &adapter->interrupt_watch_task,
+					   FJES_IRQ_WATCH_DELAY);
+	}
+}
+
+static void fjes_watch_unshare_task(struct work_struct *work)
+{
+	struct fjes_adapter *adapter =
+	container_of(work, struct fjes_adapter, unshare_watch_task);
+
+	struct net_device *netdev = adapter->netdev;
+	struct fjes_hw *hw = &adapter->hw;
+
+	int unshare_watch, unshare_reserve;
+	int max_epid, my_epid, epidx;
+	int stop_req, stop_req_done;
+	ulong unshare_watch_bitmask;
+	int wait_time = 0;
+	int is_shared;
+	int ret;
+
+	my_epid = hw->my_epid;
+	max_epid = hw->max_epid;
+
+	unshare_watch_bitmask = adapter->unshare_watch_bitmask;
+	adapter->unshare_watch_bitmask = 0;
+
+	while ((unshare_watch_bitmask || hw->txrx_stop_req_bit) &&
+	       (wait_time < 3000)) {
+		for (epidx = 0; epidx < hw->max_epid; epidx++) {
+			if (epidx == hw->my_epid)
+				continue;
+
+			is_shared = fjes_hw_epid_is_shared(hw->hw_info.share,
+							   epidx);
+
+			stop_req = test_bit(epidx, &hw->txrx_stop_req_bit);
+
+			stop_req_done = hw->ep_shm_info[epidx].rx.info->v1i.rx_status &
+					FJES_RX_STOP_REQ_DONE;
+
+			unshare_watch = test_bit(epidx, &unshare_watch_bitmask);
+
+			unshare_reserve = test_bit(epidx,
+						   &hw->hw_info.buffer_unshare_reserve_bit);
+
+			if ((!stop_req ||
+			     (is_shared && (!is_shared || !stop_req_done))) &&
+			    (is_shared || !unshare_watch || !unshare_reserve))
+				continue;
+
+			mutex_lock(&hw->hw_info.lock);
+			ret = fjes_hw_unregister_buff_addr(hw, epidx);
+			switch (ret) {
+			case 0:
+				break;
+			case -ENOMSG:
+			case -EBUSY:
+			default:
+				if (!work_pending(
+					&adapter->force_close_task)) {
+					adapter->force_reset = true;
+					schedule_work(
+						&adapter->force_close_task);
+				}
+				break;
+			}
+			mutex_unlock(&hw->hw_info.lock);
+
+			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
+					    netdev->dev_addr, netdev->mtu);
+
+			clear_bit(epidx, &hw->txrx_stop_req_bit);
+			clear_bit(epidx, &unshare_watch_bitmask);
+			clear_bit(epidx,
+				  &hw->hw_info.buffer_unshare_reserve_bit);
+		}
+
+		msleep(100);
+		wait_time += 100;
+	}
+
+	if (hw->hw_info.buffer_unshare_reserve_bit) {
+		for (epidx = 0; epidx < hw->max_epid; epidx++) {
+			if (epidx == hw->my_epid)
+				continue;
+
+			if (test_bit(epidx,
+				     &hw->hw_info.buffer_unshare_reserve_bit)) {
+				mutex_lock(&hw->hw_info.lock);
+
+				ret = fjes_hw_unregister_buff_addr(hw, epidx);
+				switch (ret) {
+				case 0:
+					break;
+				case -ENOMSG:
+				case -EBUSY:
+				default:
+					if (!work_pending(
+						&adapter->force_close_task)) {
+						adapter->force_reset = true;
+						schedule_work(
+							&adapter->force_close_task);
+					}
+					break;
+				}
+				mutex_unlock(&hw->hw_info.lock);
+
+				fjes_hw_setup_epbuf(
+					&hw->ep_shm_info[epidx].tx,
+					netdev->dev_addr, netdev->mtu);
+
+				clear_bit(epidx, &hw->txrx_stop_req_bit);
+				clear_bit(epidx, &unshare_watch_bitmask);
+				clear_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
+			}
+
+			if (test_bit(epidx, &unshare_watch_bitmask)) {
+				hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
+						~FJES_RX_STOP_REQ_DONE;
+			}
+		}
+	}
+}
+
+/* fjes_init_module - Driver Registration Routine */
+static int __init fjes_init_module(void)
+{
+	int result;
+
+	pr_info("%s - version %s - %s\n",
+		fjes_driver_string, fjes_driver_version, fjes_copyright);
+
+	result = platform_driver_register(&fjes_driver);
+	if (result < 0)
+		return result;
+
+	result = acpi_bus_register_driver(&fjes_acpi_driver);
+	if (result < 0)
+		goto fail_acpi_driver;
+
+	return 0;
+
+fail_acpi_driver:
+	platform_driver_unregister(&fjes_driver);
+	return result;
+}
+
+module_init(fjes_init_module);
+
+/* fjes_exit_module - Driver Exit Cleanup Routine */
+static void __exit fjes_exit_module(void)
+{
+	acpi_bus_unregister_driver(&fjes_acpi_driver);
+	platform_driver_unregister(&fjes_driver);
+}
+
+module_exit(fjes_exit_module);
diff --git a/drivers/net/fjes/fjes_regs.h b/drivers/net/fjes/fjes_regs.h
new file mode 100644
index 0000000..029c924
--- /dev/null
+++ b/drivers/net/fjes/fjes_regs.h
@@ -0,0 +1,142 @@
+/*
+ *  FUJITSU Extended Socket Network Device driver
+ *  Copyright (c) 2015 FUJITSU LIMITED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef FJES_REGS_H_
+#define FJES_REGS_H_
+
+#include <linux/bitops.h>
+
+#define XSCT_DEVICE_REGISTER_SIZE 0x1000
+
+/* register offset */
+/* Information registers */
+#define XSCT_OWNER_EPID     0x0000  /* Owner EPID */
+#define XSCT_MAX_EP         0x0004  /* Maximum EP */
+
+/* Device Control registers */
+#define XSCT_DCTL           0x0010  /* Device Control */
+
+/* Command Control registers */
+#define XSCT_CR             0x0020  /* Command request */
+#define XSCT_CS             0x0024  /* Command status */
+#define XSCT_SHSTSAL        0x0028  /* Share status address Low */
+#define XSCT_SHSTSAH        0x002C  /* Share status address High */
+
+#define XSCT_REQBL          0x0034  /* Request Buffer length */
+#define XSCT_REQBAL         0x0038  /* Request Buffer Address Low */
+#define XSCT_REQBAH         0x003C  /* Request Buffer Address High */
+
+#define XSCT_RESPBL         0x0044  /* Response Buffer Length */
+#define XSCT_RESPBAL        0x0048  /* Response Buffer Address Low */
+#define XSCT_RESPBAH        0x004C  /* Response Buffer Address High */
+
+/* Interrupt Control registers */
+#define XSCT_IS             0x0080  /* Interrupt status */
+#define XSCT_IMS            0x0084  /* Interrupt mask set */
+#define XSCT_IMC            0x0088  /* Interrupt mask clear */
+#define XSCT_IG             0x008C  /* Interrupt generator */
+#define XSCT_ICTL           0x0090  /* Interrupt control */
+
+/* register structure */
+/* Information registers */
+union REG_OWNER_EPID {
+	struct {
+		__le32 epid:16;
+		__le32:16;
+	} bits;
+	__le32 reg;
+};
+
+union REG_MAX_EP {
+	struct {
+		__le32 maxep:16;
+		__le32:16;
+	} bits;
+	__le32 reg;
+};
+
+/* Device Control registers */
+union REG_DCTL {
+	struct {
+		__le32 reset:1;
+		__le32 rsv0:15;
+		__le32 rsv1:16;
+	} bits;
+	__le32 reg;
+};
+
+/* Command Control registers */
+union REG_CR {
+	struct {
+		__le32 req_code:16;
+		__le32 err_info:14;
+		__le32 error:1;
+		__le32 req_start:1;
+	} bits;
+	__le32 reg;
+};
+
+union REG_CS {
+	struct {
+		__le32 req_code:16;
+		__le32 rsv0:14;
+		__le32 busy:1;
+		__le32 complete:1;
+	} bits;
+	__le32 reg;
+};
+
+/* Interrupt Control registers */
+union REG_ICTL {
+	struct {
+		__le32 automak:1;
+		__le32 rsv0:31;
+	} bits;
+	__le32 reg;
+};
+
+enum REG_ICTL_MASK {
+	REG_ICTL_MASK_INFO_UPDATE     = 1 << 20,
+	REG_ICTL_MASK_DEV_STOP_REQ    = 1 << 19,
+	REG_ICTL_MASK_TXRX_STOP_REQ   = 1 << 18,
+	REG_ICTL_MASK_TXRX_STOP_DONE  = 1 << 17,
+	REG_ICTL_MASK_RX_DATA         = 1 << 16,
+	REG_ICTL_MASK_ALL             = GENMASK(20, 16),
+};
+
+enum REG_IS_MASK {
+	REG_IS_MASK_IS_ASSERT	= 1 << 31,
+	REG_IS_MASK_EPID	= GENMASK(15, 0),
+};
+
+struct fjes_hw;
+
+u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg);
+
+#define wr32(reg, val) \
+do { \
+	u8 *base = hw->base; \
+	writel((val), &base[(reg)]); \
+} while (0)
+
+#define rd32(reg) (fjes_hw_rd32(hw, reg))
+
+#endif /* FJES_REGS_H_ */
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 78d49d1..da3259c 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -15,8 +15,11 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/hash.h>
+#include <net/dst_metadata.h>
+#include <net/gro_cells.h>
 #include <net/rtnetlink.h>
 #include <net/geneve.h>
+#include <net/protocol.h>
 
 #define GENEVE_NETDEV_VER	"0.6"
 
@@ -32,12 +35,17 @@
 module_param(log_ecn_error, bool, 0644);
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
+#define GENEVE_VER 0
+#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
+
 /* per-network namespace private data for this module */
 struct geneve_net {
-	struct list_head  geneve_list;
-	struct hlist_head vni_list[VNI_HASH_SIZE];
+	struct list_head	geneve_list;
+	struct list_head	sock_list;
 };
 
+static int geneve_net_id;
+
 /* Pseudo network device */
 struct geneve_dev {
 	struct hlist_node  hlist;	/* vni hash table */
@@ -49,9 +57,20 @@
 	u8                 tos;		/* TOS override */
 	struct sockaddr_in remote;	/* IPv4 address for link partner */
 	struct list_head   next;	/* geneve's per namespace list */
+	__be16		   dst_port;
+	bool		   collect_md;
+	struct gro_cells   gro_cells;
 };
 
-static int geneve_net_id;
+struct geneve_sock {
+	bool			collect_md;
+	struct list_head	list;
+	struct socket		*sock;
+	struct rcu_head		rcu;
+	int			refcnt;
+	struct udp_offload	udp_offloads;
+	struct hlist_head	vni_list[VNI_HASH_SIZE];
+};
 
 static inline __u32 geneve_net_vni_hash(u8 vni[3])
 {
@@ -61,46 +80,98 @@
 	return hash_32(vnid, VNI_HASH_BITS);
 }
 
+static __be64 vni_to_tunnel_id(const __u8 *vni)
+{
+#ifdef __BIG_ENDIAN
+	return (vni[0] << 16) | (vni[1] << 8) | vni[2];
+#else
+	return (__force __be64)(((__force u64)vni[0] << 40) |
+				((__force u64)vni[1] << 48) |
+				((__force u64)vni[2] << 56));
+#endif
+}
+
+static struct geneve_dev *geneve_lookup(struct geneve_sock *gs,
+					__be32 addr, u8 vni[])
+{
+	struct hlist_head *vni_list_head;
+	struct geneve_dev *geneve;
+	__u32 hash;
+
+	/* Find the device for this VNI */
+	hash = geneve_net_vni_hash(vni);
+	vni_list_head = &gs->vni_list[hash];
+	hlist_for_each_entry_rcu(geneve, vni_list_head, hlist) {
+		if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
+		    addr == geneve->remote.sin_addr.s_addr)
+			return geneve;
+	}
+	return NULL;
+}
+
+static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
+{
+	return (struct genevehdr *)(udp_hdr(skb) + 1);
+}
+
 /* geneve receive/decap routine */
 static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
 {
 	struct genevehdr *gnvh = geneve_hdr(skb);
-	struct geneve_dev *dummy, *geneve = NULL;
-	struct geneve_net *gn;
-	struct iphdr *iph = NULL;
+	struct metadata_dst *tun_dst = NULL;
+	struct geneve_dev *geneve = NULL;
 	struct pcpu_sw_netstats *stats;
-	struct hlist_head *vni_list_head;
-	int err = 0;
-	__u32 hash;
+	struct iphdr *iph;
+	u8 *vni;
+	__be32 addr;
+	int err;
 
-	iph = ip_hdr(skb); /* Still outer IP header... */
+	if (gs->collect_md) {
+		static u8 zero_vni[3];
 
-	gn = gs->rcv_data;
-
-	/* Find the device for this VNI */
-	hash = geneve_net_vni_hash(gnvh->vni);
-	vni_list_head = &gn->vni_list[hash];
-	hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
-		if (!memcmp(gnvh->vni, dummy->vni, sizeof(dummy->vni)) &&
-		    iph->saddr == dummy->remote.sin_addr.s_addr) {
-			geneve = dummy;
-			break;
-		}
+		vni = zero_vni;
+		addr = 0;
+	} else {
+		vni = gnvh->vni;
+		iph = ip_hdr(skb); /* Still outer IP header... */
+		addr = iph->saddr;
 	}
+
+	geneve = geneve_lookup(gs, addr, vni);
 	if (!geneve)
 		goto drop;
 
-	/* Drop packets w/ critical options,
-	 * since we don't support any...
-	 */
-	if (gnvh->critical)
-		goto drop;
+	if (ip_tunnel_collect_metadata() || gs->collect_md) {
+		__be16 flags;
+
+		flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
+			(gnvh->oam ? TUNNEL_OAM : 0) |
+			(gnvh->critical ? TUNNEL_CRIT_OPT : 0);
+
+		tun_dst = udp_tun_rx_dst(skb, AF_INET, flags,
+					 vni_to_tunnel_id(gnvh->vni),
+					 gnvh->opt_len * 4);
+		if (!tun_dst)
+			goto drop;
+		/* Update tunnel dst according to Geneve options. */
+		ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
+					gnvh->options, gnvh->opt_len * 4);
+	} else {
+		/* Drop packets w/ critical options,
+		 * since we don't support any...
+		 */
+		if (gnvh->critical)
+			goto drop;
+	}
 
 	skb_reset_mac_header(skb);
 	skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev)));
 	skb->protocol = eth_type_trans(skb, geneve->dev);
 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 
+	if (tun_dst)
+		skb_dst_set(skb, &tun_dst->dst);
+
 	/* Ignore packet loops (and multicast echo) */
 	if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr))
 		goto drop;
@@ -127,8 +198,7 @@
 	stats->rx_bytes += skb->len;
 	u64_stats_update_end(&stats->syncp);
 
-	netif_rx(skb);
-
+	gro_cells_receive(&geneve->gro_cells, skb);
 	return;
 drop:
 	/* Consume bad packet */
@@ -138,32 +208,305 @@
 /* Setup stats when device is created */
 static int geneve_init(struct net_device *dev)
 {
+	struct geneve_dev *geneve = netdev_priv(dev);
+	int err;
+
 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
 	if (!dev->tstats)
 		return -ENOMEM;
 
+	err = gro_cells_init(&geneve->gro_cells, dev);
+	if (err) {
+		free_percpu(dev->tstats);
+		return err;
+	}
+
 	return 0;
 }
 
 static void geneve_uninit(struct net_device *dev)
 {
+	struct geneve_dev *geneve = netdev_priv(dev);
+
+	gro_cells_destroy(&geneve->gro_cells);
 	free_percpu(dev->tstats);
 }
 
+/* Callback from net/ipv4/udp.c to receive packets */
+static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+	struct genevehdr *geneveh;
+	struct geneve_sock *gs;
+	int opts_len;
+
+	/* Need Geneve and inner Ethernet header to be present */
+	if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
+		goto error;
+
+	/* Return packets with reserved bits set */
+	geneveh = geneve_hdr(skb);
+	if (unlikely(geneveh->ver != GENEVE_VER))
+		goto error;
+
+	if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
+		goto error;
+
+	opts_len = geneveh->opt_len * 4;
+	if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
+				 htons(ETH_P_TEB)))
+		goto drop;
+
+	gs = rcu_dereference_sk_user_data(sk);
+	if (!gs)
+		goto drop;
+
+	geneve_rx(gs, skb);
+	return 0;
+
+drop:
+	/* Consume bad packet */
+	kfree_skb(skb);
+	return 0;
+
+error:
+	/* Let the UDP layer deal with the skb */
+	return 1;
+}
+
+static struct socket *geneve_create_sock(struct net *net, bool ipv6,
+					 __be16 port)
+{
+	struct socket *sock;
+	struct udp_port_cfg udp_conf;
+	int err;
+
+	memset(&udp_conf, 0, sizeof(udp_conf));
+
+	if (ipv6) {
+		udp_conf.family = AF_INET6;
+	} else {
+		udp_conf.family = AF_INET;
+		udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+	}
+
+	udp_conf.local_udp_port = port;
+
+	/* Open UDP socket */
+	err = udp_sock_create(net, &udp_conf, &sock);
+	if (err < 0)
+		return ERR_PTR(err);
+
+	return sock;
+}
+
+static void geneve_notify_add_rx_port(struct geneve_sock *gs)
+{
+	struct sock *sk = gs->sock->sk;
+	sa_family_t sa_family = sk->sk_family;
+	int err;
+
+	if (sa_family == AF_INET) {
+		err = udp_add_offload(&gs->udp_offloads);
+		if (err)
+			pr_warn("geneve: udp_add_offload failed with status %d\n",
+				err);
+	}
+}
+
+static int geneve_hlen(struct genevehdr *gh)
+{
+	return sizeof(*gh) + gh->opt_len * 4;
+}
+
+static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
+					   struct sk_buff *skb,
+					   struct udp_offload *uoff)
+{
+	struct sk_buff *p, **pp = NULL;
+	struct genevehdr *gh, *gh2;
+	unsigned int hlen, gh_len, off_gnv;
+	const struct packet_offload *ptype;
+	__be16 type;
+	int flush = 1;
+
+	off_gnv = skb_gro_offset(skb);
+	hlen = off_gnv + sizeof(*gh);
+	gh = skb_gro_header_fast(skb, off_gnv);
+	if (skb_gro_header_hard(skb, hlen)) {
+		gh = skb_gro_header_slow(skb, hlen, off_gnv);
+		if (unlikely(!gh))
+			goto out;
+	}
+
+	if (gh->ver != GENEVE_VER || gh->oam)
+		goto out;
+	gh_len = geneve_hlen(gh);
+
+	hlen = off_gnv + gh_len;
+	if (skb_gro_header_hard(skb, hlen)) {
+		gh = skb_gro_header_slow(skb, hlen, off_gnv);
+		if (unlikely(!gh))
+			goto out;
+	}
+
+	flush = 0;
+
+	for (p = *head; p; p = p->next) {
+		if (!NAPI_GRO_CB(p)->same_flow)
+			continue;
+
+		gh2 = (struct genevehdr *)(p->data + off_gnv);
+		if (gh->opt_len != gh2->opt_len ||
+		    memcmp(gh, gh2, gh_len)) {
+			NAPI_GRO_CB(p)->same_flow = 0;
+			continue;
+		}
+	}
+
+	type = gh->proto_type;
+
+	rcu_read_lock();
+	ptype = gro_find_receive_by_type(type);
+	if (!ptype) {
+		flush = 1;
+		goto out_unlock;
+	}
+
+	skb_gro_pull(skb, gh_len);
+	skb_gro_postpull_rcsum(skb, gh, gh_len);
+	pp = ptype->callbacks.gro_receive(head, skb);
+
+out_unlock:
+	rcu_read_unlock();
+out:
+	NAPI_GRO_CB(skb)->flush |= flush;
+
+	return pp;
+}
+
+static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
+			       struct udp_offload *uoff)
+{
+	struct genevehdr *gh;
+	struct packet_offload *ptype;
+	__be16 type;
+	int gh_len;
+	int err = -ENOSYS;
+
+	udp_tunnel_gro_complete(skb, nhoff);
+
+	gh = (struct genevehdr *)(skb->data + nhoff);
+	gh_len = geneve_hlen(gh);
+	type = gh->proto_type;
+
+	rcu_read_lock();
+	ptype = gro_find_complete_by_type(type);
+	if (ptype)
+		err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
+
+	rcu_read_unlock();
+	return err;
+}
+
+/* Create new listen socket if needed */
+static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
+						bool ipv6)
+{
+	struct geneve_net *gn = net_generic(net, geneve_net_id);
+	struct geneve_sock *gs;
+	struct socket *sock;
+	struct udp_tunnel_sock_cfg tunnel_cfg;
+	int h;
+
+	gs = kzalloc(sizeof(*gs), GFP_KERNEL);
+	if (!gs)
+		return ERR_PTR(-ENOMEM);
+
+	sock = geneve_create_sock(net, ipv6, port);
+	if (IS_ERR(sock)) {
+		kfree(gs);
+		return ERR_CAST(sock);
+	}
+
+	gs->sock = sock;
+	gs->refcnt = 1;
+	for (h = 0; h < VNI_HASH_SIZE; ++h)
+		INIT_HLIST_HEAD(&gs->vni_list[h]);
+
+	/* Initialize the geneve udp offloads structure */
+	gs->udp_offloads.port = port;
+	gs->udp_offloads.callbacks.gro_receive  = geneve_gro_receive;
+	gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
+	geneve_notify_add_rx_port(gs);
+
+	/* Mark socket as an encapsulation socket */
+	tunnel_cfg.sk_user_data = gs;
+	tunnel_cfg.encap_type = 1;
+	tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
+	tunnel_cfg.encap_destroy = NULL;
+	setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
+	list_add(&gs->list, &gn->sock_list);
+	return gs;
+}
+
+static void geneve_notify_del_rx_port(struct geneve_sock *gs)
+{
+	struct sock *sk = gs->sock->sk;
+	sa_family_t sa_family = sk->sk_family;
+
+	if (sa_family == AF_INET)
+		udp_del_offload(&gs->udp_offloads);
+}
+
+static void geneve_sock_release(struct geneve_sock *gs)
+{
+	if (--gs->refcnt)
+		return;
+
+	list_del(&gs->list);
+	geneve_notify_del_rx_port(gs);
+	udp_tunnel_sock_release(gs->sock);
+	kfree_rcu(gs, rcu);
+}
+
+static struct geneve_sock *geneve_find_sock(struct geneve_net *gn,
+					    __be16 dst_port)
+{
+	struct geneve_sock *gs;
+
+	list_for_each_entry(gs, &gn->sock_list, list) {
+		if (inet_sk(gs->sock->sk)->inet_sport == dst_port &&
+		    inet_sk(gs->sock->sk)->sk.sk_family == AF_INET) {
+			return gs;
+		}
+	}
+	return NULL;
+}
+
 static int geneve_open(struct net_device *dev)
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
 	struct net *net = geneve->net;
-	struct geneve_net *gn = net_generic(geneve->net, geneve_net_id);
+	struct geneve_net *gn = net_generic(net, geneve_net_id);
 	struct geneve_sock *gs;
+	__u32 hash;
 
-	gs = geneve_sock_add(net, htons(GENEVE_UDP_PORT), geneve_rx, gn,
-	                     false, false);
+	gs = geneve_find_sock(gn, geneve->dst_port);
+	if (gs) {
+		gs->refcnt++;
+		goto out;
+	}
+
+	gs = geneve_socket_create(net, geneve->dst_port, false);
 	if (IS_ERR(gs))
 		return PTR_ERR(gs);
 
+out:
+	gs->collect_md = geneve->collect_md;
 	geneve->sock = gs;
 
+	hash = geneve_net_vni_hash(geneve->vni);
+	hlist_add_head_rcu(&geneve->hlist, &gs->vni_list[hash]);
 	return 0;
 }
 
@@ -172,74 +515,191 @@
 	struct geneve_dev *geneve = netdev_priv(dev);
 	struct geneve_sock *gs = geneve->sock;
 
+	if (!hlist_unhashed(&geneve->hlist))
+		hlist_del_rcu(&geneve->hlist);
 	geneve_sock_release(gs);
-
 	return 0;
 }
 
+static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
+			    __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
+			    bool csum)
+{
+	struct genevehdr *gnvh;
+	int min_headroom;
+	int err;
+
+	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
+	err = skb_cow_head(skb, min_headroom);
+	if (unlikely(err)) {
+		kfree_skb(skb);
+		goto free_rt;
+	}
+
+	skb = udp_tunnel_handle_offloads(skb, csum);
+	if (IS_ERR(skb)) {
+		err = PTR_ERR(skb);
+		goto free_rt;
+	}
+
+	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
+	gnvh->ver = GENEVE_VER;
+	gnvh->opt_len = opt_len / 4;
+	gnvh->oam = !!(tun_flags & TUNNEL_OAM);
+	gnvh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
+	gnvh->rsvd1 = 0;
+	memcpy(gnvh->vni, vni, 3);
+	gnvh->proto_type = htons(ETH_P_TEB);
+	gnvh->rsvd2 = 0;
+	memcpy(gnvh->options, opt, opt_len);
+
+	skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+	return 0;
+
+free_rt:
+	ip_rt_put(rt);
+	return err;
+}
+
+static struct rtable *geneve_get_rt(struct sk_buff *skb,
+				    struct net_device *dev,
+				    struct flowi4 *fl4,
+				    struct ip_tunnel_info *info)
+{
+	struct geneve_dev *geneve = netdev_priv(dev);
+	struct rtable *rt = NULL;
+	__u8 tos;
+
+	memset(fl4, 0, sizeof(*fl4));
+	fl4->flowi4_mark = skb->mark;
+	fl4->flowi4_proto = IPPROTO_UDP;
+
+	if (info) {
+		fl4->daddr = info->key.u.ipv4.dst;
+		fl4->saddr = info->key.u.ipv4.src;
+		fl4->flowi4_tos = RT_TOS(info->key.tos);
+	} else {
+		tos = geneve->tos;
+		if (tos == 1) {
+			const struct iphdr *iip = ip_hdr(skb);
+
+			tos = ip_tunnel_get_dsfield(iip, skb);
+		}
+
+		fl4->flowi4_tos = RT_TOS(tos);
+		fl4->daddr = geneve->remote.sin_addr.s_addr;
+	}
+
+	rt = ip_route_output_key(geneve->net, fl4);
+	if (IS_ERR(rt)) {
+		netdev_dbg(dev, "no route to %pI4\n", &fl4->daddr);
+		dev->stats.tx_carrier_errors++;
+		return rt;
+	}
+	if (rt->dst.dev == dev) { /* is this necessary? */
+		netdev_dbg(dev, "circular route to %pI4\n", &fl4->daddr);
+		dev->stats.collisions++;
+		ip_rt_put(rt);
+		return ERR_PTR(-EINVAL);
+	}
+	return rt;
+}
+
+/* Convert 64 bit tunnel ID to 24 bit VNI. */
+static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
+{
+#ifdef __BIG_ENDIAN
+	vni[0] = (__force __u8)(tun_id >> 16);
+	vni[1] = (__force __u8)(tun_id >> 8);
+	vni[2] = (__force __u8)tun_id;
+#else
+	vni[0] = (__force __u8)((__force u64)tun_id >> 40);
+	vni[1] = (__force __u8)((__force u64)tun_id >> 48);
+	vni[2] = (__force __u8)((__force u64)tun_id >> 56);
+#endif
+}
+
 static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
 	struct geneve_sock *gs = geneve->sock;
+	struct ip_tunnel_info *info = NULL;
 	struct rtable *rt = NULL;
-	const struct iphdr *iip; /* interior IP header */
 	struct flowi4 fl4;
-	int err;
-	__be16 sport;
 	__u8 tos, ttl;
+	__be16 sport;
+	bool udp_csum;
+	__be16 df;
+	int err;
 
-	iip = ip_hdr(skb);
+	if (geneve->collect_md) {
+		info = skb_tunnel_info(skb);
+		if (unlikely(info && !(info->mode & IP_TUNNEL_INFO_TX))) {
+			netdev_dbg(dev, "no tunnel metadata\n");
+			goto tx_error;
+		}
+		if (info && ip_tunnel_info_af(info) != AF_INET)
+			goto tx_error;
+	}
 
-	skb_reset_mac_header(skb);
-
-	/* TODO: port min/max limits should be configurable */
-	sport = udp_flow_src_port(dev_net(dev), skb, 0, 0, true);
-
-	tos = geneve->tos;
-	if (tos == 1)
-		tos = ip_tunnel_get_dsfield(iip, skb);
-
-	memset(&fl4, 0, sizeof(fl4));
-	fl4.flowi4_tos = RT_TOS(tos);
-	fl4.daddr = geneve->remote.sin_addr.s_addr;
-	rt = ip_route_output_key(geneve->net, &fl4);
+	rt = geneve_get_rt(skb, dev, &fl4, info);
 	if (IS_ERR(rt)) {
 		netdev_dbg(dev, "no route to %pI4\n", &fl4.daddr);
 		dev->stats.tx_carrier_errors++;
 		goto tx_error;
 	}
-	if (rt->dst.dev == dev) { /* is this necessary? */
-		netdev_dbg(dev, "circular route to %pI4\n", &fl4.daddr);
-		dev->stats.collisions++;
-		goto rt_tx_error;
+
+	sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
+	skb_reset_mac_header(skb);
+
+	if (info) {
+		const struct ip_tunnel_key *key = &info->key;
+		u8 *opts = NULL;
+		u8 vni[3];
+
+		tunnel_id_to_vni(key->tun_id, vni);
+		if (key->tun_flags & TUNNEL_GENEVE_OPT)
+			opts = ip_tunnel_info_opts(info);
+
+		udp_csum = !!(key->tun_flags & TUNNEL_CSUM);
+		err = geneve_build_skb(rt, skb, key->tun_flags, vni,
+				       info->options_len, opts, udp_csum);
+		if (unlikely(err))
+			goto err;
+
+		tos = key->tos;
+		ttl = key->ttl;
+		df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+	} else {
+		const struct iphdr *iip; /* interior IP header */
+
+		udp_csum = false;
+		err = geneve_build_skb(rt, skb, 0, geneve->vni,
+				       0, NULL, udp_csum);
+		if (unlikely(err))
+			goto err;
+
+		iip = ip_hdr(skb);
+		tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
+		ttl = geneve->ttl;
+		if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
+			ttl = 1;
+		ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+		df = 0;
 	}
-
-	tos = ip_tunnel_ecn_encap(tos, iip, skb);
-
-	ttl = geneve->ttl;
-	if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
-		ttl = 1;
-
-	ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
-
-	/* no need to handle local destination and encap bypass...yet... */
-
-	err = geneve_xmit_skb(gs, rt, skb, fl4.saddr, fl4.daddr,
-	                      tos, ttl, 0, sport, htons(GENEVE_UDP_PORT), 0,
-	                      geneve->vni, 0, NULL, false,
-	                      !net_eq(geneve->net, dev_net(geneve->dev)));
-	if (err < 0)
-		ip_rt_put(rt);
+	err = udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, fl4.saddr, fl4.daddr,
+				  tos, ttl, df, sport, geneve->dst_port,
+				  !net_eq(geneve->net, dev_net(geneve->dev)),
+				  !udp_csum);
 
 	iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
-
 	return NETDEV_TX_OK;
 
-rt_tx_error:
-	ip_rt_put(rt);
 tx_error:
-	dev->stats.tx_errors++;
 	dev_kfree_skb(skb);
+err:
+	dev->stats.tx_errors++;
 	return NETDEV_TX_OK;
 }
 
@@ -283,7 +743,6 @@
 
 	SET_NETDEV_DEVTYPE(dev, &geneve_type);
 
-	dev->tx_queue_len = 0;
 	dev->features    |= NETIF_F_LLTX;
 	dev->features    |= NETIF_F_SG | NETIF_F_HW_CSUM;
 	dev->features    |= NETIF_F_RXCSUM;
@@ -297,7 +756,8 @@
 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
 
 	netif_keep_dst(dev);
-	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+	eth_hw_addr_random(dev);
 }
 
 static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
@@ -305,6 +765,8 @@
 	[IFLA_GENEVE_REMOTE]		= { .len = FIELD_SIZEOF(struct iphdr, daddr) },
 	[IFLA_GENEVE_TTL]		= { .type = NLA_U8 },
 	[IFLA_GENEVE_TOS]		= { .type = NLA_U8 },
+	[IFLA_GENEVE_PORT]		= { .type = NLA_U16 },
+	[IFLA_GENEVE_COLLECT_METADATA]	= { .type = NLA_FLAG },
 };
 
 static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -330,68 +792,117 @@
 	return 0;
 }
 
-static int geneve_newlink(struct net *net, struct net_device *dev,
-			 struct nlattr *tb[], struct nlattr *data[])
+static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
+					  __be16 dst_port,
+					  __be32 rem_addr,
+					  u8 vni[],
+					  bool *tun_on_same_port,
+					  bool *tun_collect_md)
+{
+	struct geneve_dev *geneve, *t;
+
+	*tun_on_same_port = false;
+	*tun_collect_md = false;
+	t = NULL;
+	list_for_each_entry(geneve, &gn->geneve_list, next) {
+		if (geneve->dst_port == dst_port) {
+			*tun_collect_md = geneve->collect_md;
+			*tun_on_same_port = true;
+		}
+		if (!memcmp(vni, geneve->vni, sizeof(geneve->vni)) &&
+		    rem_addr == geneve->remote.sin_addr.s_addr &&
+		    dst_port == geneve->dst_port)
+			t = geneve;
+	}
+	return t;
+}
+
+static int geneve_configure(struct net *net, struct net_device *dev,
+			    __be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos,
+			    __u16 dst_port, bool metadata)
 {
 	struct geneve_net *gn = net_generic(net, geneve_net_id);
-	struct geneve_dev *dummy, *geneve = netdev_priv(dev);
-	struct hlist_head *vni_list_head;
-	struct sockaddr_in remote;	/* IPv4 address for link partner */
-	__u32 vni, hash;
+	struct geneve_dev *t, *geneve = netdev_priv(dev);
+	bool tun_collect_md, tun_on_same_port;
 	int err;
 
-	if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
-		return -EINVAL;
+	if (metadata) {
+		if (rem_addr || vni || tos || ttl)
+			return -EINVAL;
+	}
 
 	geneve->net = net;
 	geneve->dev = dev;
 
-	vni = nla_get_u32(data[IFLA_GENEVE_ID]);
 	geneve->vni[0] = (vni & 0x00ff0000) >> 16;
 	geneve->vni[1] = (vni & 0x0000ff00) >> 8;
 	geneve->vni[2] =  vni & 0x000000ff;
 
-	geneve->remote.sin_addr.s_addr =
-		nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+	geneve->remote.sin_addr.s_addr = rem_addr;
 	if (IN_MULTICAST(ntohl(geneve->remote.sin_addr.s_addr)))
 		return -EINVAL;
 
-	remote = geneve->remote;
-	hash = geneve_net_vni_hash(geneve->vni);
-	vni_list_head = &gn->vni_list[hash];
-	hlist_for_each_entry_rcu(dummy, vni_list_head, hlist) {
-		if (!memcmp(geneve->vni, dummy->vni, sizeof(dummy->vni)) &&
-		    !memcmp(&remote, &dummy->remote, sizeof(dummy->remote)))
-			return -EBUSY;
-	}
+	geneve->ttl = ttl;
+	geneve->tos = tos;
+	geneve->dst_port = htons(dst_port);
+	geneve->collect_md = metadata;
 
-	if (tb[IFLA_ADDRESS] == NULL)
-		eth_hw_addr_random(dev);
+	t = geneve_find_dev(gn, htons(dst_port), rem_addr, geneve->vni,
+			    &tun_on_same_port, &tun_collect_md);
+	if (t)
+		return -EBUSY;
+
+	if (metadata) {
+		if (tun_on_same_port)
+			return -EPERM;
+	} else {
+		if (tun_collect_md)
+			return -EPERM;
+	}
 
 	err = register_netdevice(dev);
 	if (err)
 		return err;
 
+	list_add(&geneve->next, &gn->geneve_list);
+	return 0;
+}
+
+static int geneve_newlink(struct net *net, struct net_device *dev,
+			  struct nlattr *tb[], struct nlattr *data[])
+{
+	__u16 dst_port = GENEVE_UDP_PORT;
+	__u8 ttl = 0, tos = 0;
+	bool metadata = false;
+	__be32 rem_addr;
+	__u32 vni;
+
+	if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE])
+		return -EINVAL;
+
+	vni = nla_get_u32(data[IFLA_GENEVE_ID]);
+	rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]);
+
 	if (data[IFLA_GENEVE_TTL])
-		geneve->ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
+		ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
 
 	if (data[IFLA_GENEVE_TOS])
-		geneve->tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
+		tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
 
-	list_add(&geneve->next, &gn->geneve_list);
+	if (data[IFLA_GENEVE_PORT])
+		dst_port = nla_get_u16(data[IFLA_GENEVE_PORT]);
 
-	hlist_add_head_rcu(&geneve->hlist, &gn->vni_list[hash]);
+	if (data[IFLA_GENEVE_COLLECT_METADATA])
+		metadata = true;
 
-	return 0;
+	return geneve_configure(net, dev, rem_addr, vni,
+				ttl, tos, dst_port, metadata);
 }
 
 static void geneve_dellink(struct net_device *dev, struct list_head *head)
 {
 	struct geneve_dev *geneve = netdev_priv(dev);
 
-	if (!hlist_unhashed(&geneve->hlist))
-		hlist_del_rcu(&geneve->hlist);
-
 	list_del(&geneve->next);
 	unregister_netdevice_queue(dev, head);
 }
@@ -402,6 +913,8 @@
 		nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
 		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TTL */
 		nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TOS */
+		nla_total_size(sizeof(__u16)) +  /* IFLA_GENEVE_PORT */
+		nla_total_size(0) +	 /* IFLA_GENEVE_COLLECT_METADATA */
 		0;
 }
 
@@ -422,6 +935,14 @@
 	    nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
 		goto nla_put_failure;
 
+	if (nla_put_u16(skb, IFLA_GENEVE_PORT, ntohs(geneve->dst_port)))
+		goto nla_put_failure;
+
+	if (geneve->collect_md) {
+		if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
+			goto nla_put_failure;
+	}
+
 	return 0;
 
 nla_put_failure:
@@ -441,16 +962,34 @@
 	.fill_info	= geneve_fill_info,
 };
 
+struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
+					u8 name_assign_type, u16 dst_port)
+{
+	struct nlattr *tb[IFLA_MAX + 1];
+	struct net_device *dev;
+	int err;
+
+	memset(tb, 0, sizeof(tb));
+	dev = rtnl_create_link(net, name, name_assign_type,
+			       &geneve_link_ops, tb);
+	if (IS_ERR(dev))
+		return dev;
+
+	err = geneve_configure(net, dev, 0, 0, 0, 0, dst_port, true);
+	if (err) {
+		free_netdev(dev);
+		return ERR_PTR(err);
+	}
+	return dev;
+}
+EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
+
 static __net_init int geneve_init_net(struct net *net)
 {
 	struct geneve_net *gn = net_generic(net, geneve_net_id);
-	unsigned int h;
 
 	INIT_LIST_HEAD(&gn->geneve_list);
-
-	for (h = 0; h < VNI_HASH_SIZE; ++h)
-		INIT_HLIST_HEAD(&gn->vni_list[h]);
-
+	INIT_LIST_HEAD(&gn->sock_list);
 	return 0;
 }
 
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 83c7cce..72c9f1f 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -638,7 +638,7 @@
 #define GETTICK(x)                                                \
 ({                                                                \
 	if (cpu_has_tsc)                                          \
-		rdtscl(x);                                        \
+		x = (unsigned int)rdtsc();		  \
 })
 #else /* __i386__ */
 #define GETTICK(x)
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index dd45440..5fa98f5 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -162,6 +162,7 @@
 	bool link_state;	/* 0 - link up, 1 - link down */
 	int  ring_size;
 	u32  max_num_vrss_chns;
+	u32  num_chn;
 };
 
 enum rndis_device_state {
@@ -541,6 +542,29 @@
 	struct nvsp_2_free_rxbuf free_rxbuf;
 } __packed;
 
+struct nvsp_4_send_vf_association {
+	/* 1: allocated, serial number is valid. 0: not allocated */
+	u32 allocated;
+
+	/* Serial number of the VF to team with */
+	u32 serial;
+} __packed;
+
+enum nvsp_vm_datapath {
+	NVSP_DATAPATH_SYNTHETIC = 0,
+	NVSP_DATAPATH_VF,
+	NVSP_DATAPATH_MAX
+};
+
+struct nvsp_4_sw_datapath {
+	u32 active_datapath; /* active data path in VM */
+} __packed;
+
+union nvsp_4_message_uber {
+	struct nvsp_4_send_vf_association vf_assoc;
+	struct nvsp_4_sw_datapath active_dp;
+} __packed;
+
 enum nvsp_subchannel_operation {
 	NVSP_SUBCHANNEL_NONE = 0,
 	NVSP_SUBCHANNEL_ALLOCATE,
@@ -578,6 +602,7 @@
 	union nvsp_message_init_uber init_msg;
 	union nvsp_1_message_uber v1_msg;
 	union nvsp_2_message_uber v2_msg;
+	union nvsp_4_message_uber v4_msg;
 	union nvsp_5_message_uber v5_msg;
 } __packed;
 
@@ -589,6 +614,7 @@
 
 
 #define NETVSC_MTU 65536
+#define NETVSC_MTU_MIN 68
 
 #define NETVSC_RECEIVE_BUFFER_SIZE		(1024*1024*16)	/* 16MB */
 #define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY	(1024*1024*15)  /* 15MB */
@@ -670,6 +696,8 @@
 	u32 send_table[VRSS_SEND_TAB_SIZE];
 	u32 max_chn;
 	u32 num_chn;
+	spinlock_t sc_lock; /* Protects num_sc_offered variable */
+	u32 num_sc_offered;
 	atomic_t queue_sends[NR_CPUS];
 
 	/* Holds rndis device info */
@@ -688,6 +716,11 @@
 
 	/* The net device context */
 	struct net_device_context *nd_ctx;
+
+	/* 1: allocated, serial number is valid. 0: not allocated */
+	u32 vf_alloc;
+	/* Serial number of the VF to team with */
+	u32 vf_serial;
 };
 
 /* NdisInitialize message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 23126a7..51e4c0f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -453,13 +453,16 @@
 	if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
 		return 0;
 
-	/* NVSPv2 only: Send NDIS config */
+	/* NVSPv2 or later: Send NDIS config */
 	memset(init_packet, 0, sizeof(struct nvsp_message));
 	init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
 	init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
 						       ETH_HLEN;
 	init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
 
+	if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
+		init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
+
 	ret = vmbus_sendpacket(device->channel, init_packet,
 				sizeof(struct nvsp_message),
 				(unsigned long)init_packet,
@@ -1064,11 +1067,10 @@
 
 
 static void netvsc_send_table(struct hv_device *hdev,
-			      struct vmpacket_descriptor *vmpkt)
+			      struct nvsp_message *nvmsg)
 {
 	struct netvsc_device *nvscdev;
 	struct net_device *ndev;
-	struct nvsp_message *nvmsg;
 	int i;
 	u32 count, *tab;
 
@@ -1077,12 +1079,6 @@
 		return;
 	ndev = nvscdev->ndev;
 
-	nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
-					(vmpkt->offset8 << 3));
-
-	if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
-		return;
-
 	count = nvmsg->msg.v5_msg.send_table.count;
 	if (count != VRSS_SEND_TAB_SIZE) {
 		netdev_err(ndev, "Received wrong send-table size:%u\n", count);
@@ -1096,6 +1092,28 @@
 		nvscdev->send_table[i] = tab[i];
 }
 
+static void netvsc_send_vf(struct netvsc_device *nvdev,
+			   struct nvsp_message *nvmsg)
+{
+	nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
+	nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+}
+
+static inline void netvsc_receive_inband(struct hv_device *hdev,
+					 struct netvsc_device *nvdev,
+					 struct nvsp_message *nvmsg)
+{
+	switch (nvmsg->hdr.msg_type) {
+	case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
+		netvsc_send_table(hdev, nvmsg);
+		break;
+
+	case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
+		netvsc_send_vf(nvdev, nvmsg);
+		break;
+	}
+}
+
 void netvsc_channel_cb(void *context)
 {
 	int ret;
@@ -1108,6 +1126,7 @@
 	unsigned char *buffer;
 	int bufferlen = NETVSC_PACKET_SIZE;
 	struct net_device *ndev;
+	struct nvsp_message *nvmsg;
 
 	if (channel->primary_channel != NULL)
 		device = channel->primary_channel->device_obj;
@@ -1126,6 +1145,8 @@
 		if (ret == 0) {
 			if (bytes_recvd > 0) {
 				desc = (struct vmpacket_descriptor *)buffer;
+				nvmsg = (struct nvsp_message *)((unsigned long)
+					 desc + (desc->offset8 << 3));
 				switch (desc->type) {
 				case VM_PKT_COMP:
 					netvsc_send_completion(net_device,
@@ -1138,7 +1159,9 @@
 					break;
 
 				case VM_PKT_DATA_INBAND:
-					netvsc_send_table(device, desc);
+					netvsc_receive_inband(device,
+							      net_device,
+							      nvmsg);
 					break;
 
 				default:
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 358475e..409b48e 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -106,7 +106,7 @@
 		return ret;
 	}
 
-	netif_tx_start_all_queues(net);
+	netif_tx_wake_all_queues(net);
 
 	nvdev = hv_get_drvdata(device_obj);
 	rdev = nvdev->extension;
@@ -120,15 +120,56 @@
 {
 	struct net_device_context *net_device_ctx = netdev_priv(net);
 	struct hv_device *device_obj = net_device_ctx->device_ctx;
+	struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
 	int ret;
+	u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
+	struct vmbus_channel *chn;
 
 	netif_tx_disable(net);
 
 	/* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
 	cancel_work_sync(&net_device_ctx->work);
 	ret = rndis_filter_close(device_obj);
-	if (ret != 0)
+	if (ret != 0) {
 		netdev_err(net, "unable to close device (ret %d).\n", ret);
+		return ret;
+	}
+
+	/* Ensure pending bytes in ring are read */
+	while (true) {
+		aread = 0;
+		for (i = 0; i < nvdev->num_chn; i++) {
+			chn = nvdev->chn_table[i];
+			if (!chn)
+				continue;
+
+			hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
+						     &awrite);
+
+			if (aread)
+				break;
+
+			hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
+						     &awrite);
+
+			if (aread)
+				break;
+		}
+
+		retry++;
+		if (retry > retry_max || aread == 0)
+			break;
+
+		msleep(msec);
+
+		if (msec < 1000)
+			msec *= 2;
+	}
+
+	if (aread) {
+		netdev_err(net, "Ring buffer not empty after closing rndis\n");
+		ret = -ETIMEDOUT;
+	}
 
 	return ret;
 }
@@ -198,7 +239,7 @@
 	struct flow_keys flow;
 	int data_len;
 
-	if (!skb_flow_dissect_flow_keys(skb, &flow) ||
+	if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
 	    !(flow.basic.n_proto == htons(ETH_P_IP) ||
 	      flow.basic.n_proto == htons(ETH_P_IPV6)))
 		return false;
@@ -729,6 +770,104 @@
 	}
 }
 
+static int netvsc_set_channels(struct net_device *net,
+			       struct ethtool_channels *channels)
+{
+	struct net_device_context *net_device_ctx = netdev_priv(net);
+	struct hv_device *dev = net_device_ctx->device_ctx;
+	struct netvsc_device *nvdev = hv_get_drvdata(dev);
+	struct netvsc_device_info device_info;
+	u32 num_chn;
+	u32 max_chn;
+	int ret = 0;
+	bool recovering = false;
+
+	if (!nvdev || nvdev->destroy)
+		return -ENODEV;
+
+	num_chn = nvdev->num_chn;
+	max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
+
+	if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
+		pr_info("vRSS unsupported before NVSP Version 5\n");
+		return -EINVAL;
+	}
+
+	/* We do not support rx, tx, or other */
+	if (!channels ||
+	    channels->rx_count ||
+	    channels->tx_count ||
+	    channels->other_count ||
+	    (channels->combined_count < 1))
+		return -EINVAL;
+
+	if (channels->combined_count > max_chn) {
+		pr_info("combined channels too high, using %d\n", max_chn);
+		channels->combined_count = max_chn;
+	}
+
+	ret = netvsc_close(net);
+	if (ret)
+		goto out;
+
+ do_set:
+	nvdev->start_remove = true;
+	rndis_filter_device_remove(dev);
+
+	nvdev->num_chn = channels->combined_count;
+
+	net_device_ctx->device_ctx = dev;
+	hv_set_drvdata(dev, net);
+
+	memset(&device_info, 0, sizeof(device_info));
+	device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
+	device_info.ring_size = ring_size;
+	device_info.max_num_vrss_chns = max_num_vrss_chns;
+
+	ret = rndis_filter_device_add(dev, &device_info);
+	if (ret) {
+		if (recovering) {
+			netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
+			return ret;
+		}
+		goto recover;
+	}
+
+	nvdev = hv_get_drvdata(dev);
+
+	ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
+	if (ret) {
+		if (recovering) {
+			netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
+			return ret;
+		}
+		goto recover;
+	}
+
+	ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
+	if (ret) {
+		if (recovering) {
+			netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
+			return ret;
+		}
+		goto recover;
+	}
+
+ out:
+	netvsc_open(net);
+
+	return ret;
+
+ recover:
+	/* If the above failed, we attempt to recover through the same
+	 * process but with the original number of channels.
+	 */
+	netdev_err(net, "could not set channels, recovering\n");
+	recovering = true;
+	channels->combined_count = num_chn;
+	goto do_set;
+}
+
 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
 {
 	struct net_device_context *ndevctx = netdev_priv(ndev);
@@ -736,6 +875,7 @@
 	struct netvsc_device *nvdev = hv_get_drvdata(hdev);
 	struct netvsc_device_info device_info;
 	int limit = ETH_DATA_LEN;
+	int ret = 0;
 
 	if (nvdev == NULL || nvdev->destroy)
 		return -ENODEV;
@@ -743,25 +883,31 @@
 	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
 		limit = NETVSC_MTU - ETH_HLEN;
 
-	/* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */
-	if (mtu < ETH_DATA_LEN || mtu > limit)
+	if (mtu < NETVSC_MTU_MIN || mtu > limit)
 		return -EINVAL;
 
+	ret = netvsc_close(ndev);
+	if (ret)
+		goto out;
+
 	nvdev->start_remove = true;
-	cancel_work_sync(&ndevctx->work);
-	netif_tx_disable(ndev);
 	rndis_filter_device_remove(hdev);
 
 	ndev->mtu = mtu;
 
 	ndevctx->device_ctx = hdev;
 	hv_set_drvdata(hdev, ndev);
+
+	memset(&device_info, 0, sizeof(device_info));
 	device_info.ring_size = ring_size;
+	device_info.num_chn = nvdev->num_chn;
 	device_info.max_num_vrss_chns = max_num_vrss_chns;
 	rndis_filter_device_add(hdev, &device_info);
-	netif_tx_wake_all_queues(ndev);
 
-	return 0;
+out:
+	netvsc_open(ndev);
+
+	return ret;
 }
 
 static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
@@ -844,6 +990,7 @@
 	.get_drvinfo	= netvsc_get_drvinfo,
 	.get_link	= ethtool_op_get_link,
 	.get_channels   = netvsc_get_channels,
+	.set_channels   = netvsc_set_channels,
 };
 
 static const struct net_device_ops device_ops = {
@@ -977,6 +1124,7 @@
 	net->needed_headroom = max_needed_headroom;
 
 	/* Notify the netvsc driver of the new device */
+	memset(&device_info, 0, sizeof(device_info));
 	device_info.ring_size = ring_size;
 	device_info.max_num_vrss_chns = max_num_vrss_chns;
 	ret = rndis_filter_device_add(dev, &device_info);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 236aeb7..5931a79 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -984,9 +984,16 @@
 	struct netvsc_device *nvscdev;
 	u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
 	int ret;
+	unsigned long flags;
 
 	nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
 
+	spin_lock_irqsave(&nvscdev->sc_lock, flags);
+	nvscdev->num_sc_offered--;
+	spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
+	if (nvscdev->num_sc_offered == 0)
+		complete(&nvscdev->channel_init_wait);
+
 	if (chn_index >= nvscdev->num_chn)
 		return;
 
@@ -1015,8 +1022,10 @@
 	u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
 	u32 mtu, size;
 	u32 num_rss_qs;
+	u32 sc_delta;
 	const struct cpumask *node_cpu_mask;
 	u32 num_possible_rss_qs;
+	unsigned long flags;
 
 	rndis_device = get_rndis_device();
 	if (!rndis_device)
@@ -1039,6 +1048,8 @@
 	net_device->max_chn = 1;
 	net_device->num_chn = 1;
 
+	spin_lock_init(&net_device->sc_lock);
+
 	net_device->extension = rndis_device;
 	rndis_device->net_dev = net_device;
 
@@ -1054,7 +1065,7 @@
 	ret = rndis_filter_query_device(rndis_device,
 					RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
 					&mtu, &size);
-	if (ret == 0 && size == sizeof(u32))
+	if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu)
 		net_device->ndev->mtu = mtu;
 
 	/* Get the mac address */
@@ -1114,7 +1125,15 @@
 	 */
 	node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
 	num_possible_rss_qs = cpumask_weight(node_cpu_mask);
-	net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
+	/* We will use the given number of channels if available. */
+	if (device_info->num_chn && device_info->num_chn < net_device->max_chn)
+		net_device->num_chn = device_info->num_chn;
+	else
+		net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
+	num_rss_qs = net_device->num_chn - 1;
+	net_device->num_sc_offered = num_rss_qs;
 
 	if (net_device->num_chn == 1)
 		goto out;
@@ -1157,11 +1176,25 @@
 
 	ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
 
+	/*
+	 * Wait for the host to send us the sub-channel offers.
+	 */
+	spin_lock_irqsave(&net_device->sc_lock, flags);
+	sc_delta = num_rss_qs - (net_device->num_chn - 1);
+	net_device->num_sc_offered -= sc_delta;
+	spin_unlock_irqrestore(&net_device->sc_lock, flags);
+
+	while (net_device->num_sc_offered != 0) {
+		t = wait_for_completion_timeout(&net_device->channel_init_wait, 10*HZ);
+		if (t == 0)
+			WARN(1, "Netvsc: Waiting for sub-channel processing");
+	}
 out:
 	if (ret) {
 		net_device->max_chn = 1;
 		net_device->num_chn = 1;
 	}
+
 	return 0; /* return 0 because primary channel can be used alone */
 
 err_dev_remv:
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index f7bd9f3..6422caa 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -97,9 +97,7 @@
 
 	struct at86rf230_state_change irq;
 
-	bool tx_aret;
 	unsigned long cal_timeout;
-	s8 max_frame_retries;
 	bool is_tx;
 	bool is_tx_from_off;
 	u8 tx_retry;
@@ -545,7 +543,9 @@
 	}
 
 	/* Default delay is 1us in the most cases */
-	tim = ktime_set(0, NSEC_PER_USEC);
+	udelay(1);
+	at86rf230_async_state_timer(&ctx->timer);
+	return;
 
 change:
 	hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
@@ -649,7 +649,7 @@
 
 	enable_irq(ctx->irq);
 
-	ieee802154_xmit_complete(lp->hw, lp->tx_skb, !lp->tx_aret);
+	ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
 }
 
 static void
@@ -758,17 +758,10 @@
 {
 	if (lp->is_tx) {
 		lp->is_tx = 0;
-
-		if (lp->tx_aret)
-			at86rf230_async_state_change(lp, &lp->irq,
-						     STATE_FORCE_TX_ON,
-						     at86rf230_tx_trac_status,
-						     true);
-		else
-			at86rf230_async_state_change(lp, &lp->irq,
-						     STATE_RX_AACK_ON,
-						     at86rf230_tx_complete,
-						     true);
+		at86rf230_async_state_change(lp, &lp->irq,
+					     STATE_FORCE_TX_ON,
+					     at86rf230_tx_trac_status,
+					     true);
 	} else {
 		at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
 					 at86rf230_rx_trac_check, true);
@@ -874,24 +867,16 @@
 	struct at86rf230_state_change *ctx = context;
 	struct at86rf230_local *lp = ctx->lp;
 
-	/* In ARET mode we need to go into STATE_TX_ARET_ON after we
-	 * are in STATE_TX_ON. The pfad differs here, so we change
-	 * the complete handler.
-	 */
-	if (lp->tx_aret) {
-		if (lp->is_tx_from_off) {
-			lp->is_tx_from_off = false;
-			at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
-						     at86rf230_write_frame,
-						     false);
-		} else {
-			at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
-						     at86rf230_xmit_tx_on,
-						     false);
-		}
+	/* check if we change from off state */
+	if (lp->is_tx_from_off) {
+		lp->is_tx_from_off = false;
+		at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
+					     at86rf230_write_frame,
+					     false);
 	} else {
 		at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
-					     at86rf230_write_frame, false);
+					     at86rf230_xmit_tx_on,
+					     false);
 	}
 }
 
@@ -1265,15 +1250,8 @@
 at86rf230_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
 {
 	struct at86rf230_local *lp = hw->priv;
-	int rc = 0;
 
-	lp->tx_aret = retries >= 0;
-	lp->max_frame_retries = retries;
-
-	if (retries >= 0)
-		rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
-
-	return rc;
+	return at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
 }
 
 static int
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index b6fc295..c5b54a1 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -833,6 +833,7 @@
 		if (!spi_pdata)
 			return -ENOENT;
 		*pdata = *spi_pdata;
+		priv->fifo_pin = pdata->fifo;
 		return 0;
 	}
 
@@ -1151,7 +1152,6 @@
 static struct spi_driver cc2520_driver = {
 	.driver = {
 		.name = "cc2520",
-		.bus = &spi_bus_type,
 		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(cc2520_of_ids),
 	},
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 2549760..997724b 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -812,7 +812,6 @@
 static struct spi_driver mrf24j40_driver = {
 	.driver = {
 		.name = "mrf24j40",
-		.bus = &spi_bus_type,
 		.owner = THIS_MODULE,
 	},
 	.id_table = mrf24j40_ids,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 94570aa..cc56fac 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -38,69 +38,68 @@
 #include <net/net_namespace.h>
 
 #define TX_Q_LIMIT    32
-struct ifb_private {
+struct ifb_q_private {
+	struct net_device	*dev;
 	struct tasklet_struct   ifb_tasklet;
-	int     tasklet_pending;
-
-	struct u64_stats_sync	rsync;
+	int			tasklet_pending;
+	int			txqnum;
 	struct sk_buff_head     rq;
-	u64 rx_packets;
-	u64 rx_bytes;
+	u64			rx_packets;
+	u64			rx_bytes;
+	struct u64_stats_sync	rsync;
 
 	struct u64_stats_sync	tsync;
+	u64			tx_packets;
+	u64			tx_bytes;
 	struct sk_buff_head     tq;
-	u64 tx_packets;
-	u64 tx_bytes;
+} ____cacheline_aligned_in_smp;
+
+struct ifb_dev_private {
+	struct ifb_q_private *tx_private;
 };
 
-static int numifbs = 2;
-
-static void ri_tasklet(unsigned long dev);
 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
 static int ifb_open(struct net_device *dev);
 static int ifb_close(struct net_device *dev);
 
-static void ri_tasklet(unsigned long dev)
+static void ifb_ri_tasklet(unsigned long _txp)
 {
-	struct net_device *_dev = (struct net_device *)dev;
-	struct ifb_private *dp = netdev_priv(_dev);
+	struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
 	struct netdev_queue *txq;
 	struct sk_buff *skb;
 
-	txq = netdev_get_tx_queue(_dev, 0);
-	if ((skb = skb_peek(&dp->tq)) == NULL) {
-		if (__netif_tx_trylock(txq)) {
-			skb_queue_splice_tail_init(&dp->rq, &dp->tq);
-			__netif_tx_unlock(txq);
-		} else {
-			/* reschedule */
+	txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
+	skb = skb_peek(&txp->tq);
+	if (!skb) {
+		if (!__netif_tx_trylock(txq))
 			goto resched;
-		}
+		skb_queue_splice_tail_init(&txp->rq, &txp->tq);
+		__netif_tx_unlock(txq);
 	}
 
-	while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
+	while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
 		u32 from = G_TC_FROM(skb->tc_verd);
 
 		skb->tc_verd = 0;
 		skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
 
-		u64_stats_update_begin(&dp->tsync);
-		dp->tx_packets++;
-		dp->tx_bytes += skb->len;
-		u64_stats_update_end(&dp->tsync);
+		u64_stats_update_begin(&txp->tsync);
+		txp->tx_packets++;
+		txp->tx_bytes += skb->len;
+		u64_stats_update_end(&txp->tsync);
 
 		rcu_read_lock();
-		skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif);
+		skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
 		if (!skb->dev) {
 			rcu_read_unlock();
 			dev_kfree_skb(skb);
-			_dev->stats.tx_dropped++;
-			if (skb_queue_len(&dp->tq) != 0)
+			txp->dev->stats.tx_dropped++;
+			if (skb_queue_len(&txp->tq) != 0)
 				goto resched;
 			break;
 		}
 		rcu_read_unlock();
-		skb->skb_iif = _dev->ifindex;
+		skb->skb_iif = txp->dev->ifindex;
 
 		if (from & AT_EGRESS) {
 			dev_queue_xmit(skb);
@@ -112,10 +111,11 @@
 	}
 
 	if (__netif_tx_trylock(txq)) {
-		if ((skb = skb_peek(&dp->rq)) == NULL) {
-			dp->tasklet_pending = 0;
-			if (netif_queue_stopped(_dev))
-				netif_wake_queue(_dev);
+		skb = skb_peek(&txp->rq);
+		if (!skb) {
+			txp->tasklet_pending = 0;
+			if (netif_tx_queue_stopped(txq))
+				netif_tx_wake_queue(txq);
 		} else {
 			__netif_tx_unlock(txq);
 			goto resched;
@@ -123,8 +123,8 @@
 		__netif_tx_unlock(txq);
 	} else {
 resched:
-		dp->tasklet_pending = 1;
-		tasklet_schedule(&dp->ifb_tasklet);
+		txp->tasklet_pending = 1;
+		tasklet_schedule(&txp->ifb_tasklet);
 	}
 
 }
@@ -132,29 +132,58 @@
 static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
 					     struct rtnl_link_stats64 *stats)
 {
-	struct ifb_private *dp = netdev_priv(dev);
+	struct ifb_dev_private *dp = netdev_priv(dev);
+	struct ifb_q_private *txp = dp->tx_private;
 	unsigned int start;
+	u64 packets, bytes;
+	int i;
 
-	do {
-		start = u64_stats_fetch_begin_irq(&dp->rsync);
-		stats->rx_packets = dp->rx_packets;
-		stats->rx_bytes = dp->rx_bytes;
-	} while (u64_stats_fetch_retry_irq(&dp->rsync, start));
+	for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+		do {
+			start = u64_stats_fetch_begin_irq(&txp->rsync);
+			packets = txp->rx_packets;
+			bytes = txp->rx_bytes;
+		} while (u64_stats_fetch_retry_irq(&txp->rsync, start));
+		stats->rx_packets += packets;
+		stats->rx_bytes += bytes;
 
-	do {
-		start = u64_stats_fetch_begin_irq(&dp->tsync);
-
-		stats->tx_packets = dp->tx_packets;
-		stats->tx_bytes = dp->tx_bytes;
-
-	} while (u64_stats_fetch_retry_irq(&dp->tsync, start));
-
+		do {
+			start = u64_stats_fetch_begin_irq(&txp->tsync);
+			packets = txp->tx_packets;
+			bytes = txp->tx_bytes;
+		} while (u64_stats_fetch_retry_irq(&txp->tsync, start));
+		stats->tx_packets += packets;
+		stats->tx_bytes += bytes;
+	}
 	stats->rx_dropped = dev->stats.rx_dropped;
 	stats->tx_dropped = dev->stats.tx_dropped;
 
 	return stats;
 }
 
+static int ifb_dev_init(struct net_device *dev)
+{
+	struct ifb_dev_private *dp = netdev_priv(dev);
+	struct ifb_q_private *txp;
+	int i;
+
+	txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
+	if (!txp)
+		return -ENOMEM;
+	dp->tx_private = txp;
+	for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+		txp->txqnum = i;
+		txp->dev = dev;
+		__skb_queue_head_init(&txp->rq);
+		__skb_queue_head_init(&txp->tq);
+		u64_stats_init(&txp->rsync);
+		u64_stats_init(&txp->tsync);
+		tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
+			     (unsigned long)txp);
+		netif_tx_start_queue(netdev_get_tx_queue(dev, i));
+	}
+	return 0;
+}
 
 static const struct net_device_ops ifb_netdev_ops = {
 	.ndo_open	= ifb_open,
@@ -162,6 +191,7 @@
 	.ndo_get_stats64 = ifb_stats64,
 	.ndo_start_xmit	= ifb_xmit,
 	.ndo_validate_addr = eth_validate_addr,
+	.ndo_init	= ifb_dev_init,
 };
 
 #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST	| \
@@ -169,10 +199,24 @@
 		      NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX		| \
 		      NETIF_F_HW_VLAN_STAG_TX)
 
+static void ifb_dev_free(struct net_device *dev)
+{
+	struct ifb_dev_private *dp = netdev_priv(dev);
+	struct ifb_q_private *txp = dp->tx_private;
+	int i;
+
+	for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+		tasklet_kill(&txp->ifb_tasklet);
+		__skb_queue_purge(&txp->rq);
+		__skb_queue_purge(&txp->tq);
+	}
+	kfree(dp->tx_private);
+	free_netdev(dev);
+}
+
 static void ifb_setup(struct net_device *dev)
 {
 	/* Initialize the device structure. */
-	dev->destructor = free_netdev;
 	dev->netdev_ops = &ifb_netdev_ops;
 
 	/* Fill in device structure with ethernet-generic values. */
@@ -188,17 +232,19 @@
 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 	netif_keep_dst(dev);
 	eth_hw_addr_random(dev);
+	dev->destructor = ifb_dev_free;
 }
 
 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-	struct ifb_private *dp = netdev_priv(dev);
+	struct ifb_dev_private *dp = netdev_priv(dev);
 	u32 from = G_TC_FROM(skb->tc_verd);
+	struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
 
-	u64_stats_update_begin(&dp->rsync);
-	dp->rx_packets++;
-	dp->rx_bytes += skb->len;
-	u64_stats_update_end(&dp->rsync);
+	u64_stats_update_begin(&txp->rsync);
+	txp->rx_packets++;
+	txp->rx_bytes += skb->len;
+	u64_stats_update_end(&txp->rsync);
 
 	if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
 		dev_kfree_skb(skb);
@@ -206,14 +252,13 @@
 		return NETDEV_TX_OK;
 	}
 
-	if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
-		netif_stop_queue(dev);
-	}
+	if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
+		netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
 
-	__skb_queue_tail(&dp->rq, skb);
-	if (!dp->tasklet_pending) {
-		dp->tasklet_pending = 1;
-		tasklet_schedule(&dp->ifb_tasklet);
+	__skb_queue_tail(&txp->rq, skb);
+	if (!txp->tasklet_pending) {
+		txp->tasklet_pending = 1;
+		tasklet_schedule(&txp->ifb_tasklet);
 	}
 
 	return NETDEV_TX_OK;
@@ -221,24 +266,13 @@
 
 static int ifb_close(struct net_device *dev)
 {
-	struct ifb_private *dp = netdev_priv(dev);
-
-	tasklet_kill(&dp->ifb_tasklet);
-	netif_stop_queue(dev);
-	__skb_queue_purge(&dp->rq);
-	__skb_queue_purge(&dp->tq);
+	netif_tx_stop_all_queues(dev);
 	return 0;
 }
 
 static int ifb_open(struct net_device *dev)
 {
-	struct ifb_private *dp = netdev_priv(dev);
-
-	tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
-	__skb_queue_head_init(&dp->rq);
-	__skb_queue_head_init(&dp->tq);
-	netif_start_queue(dev);
-
+	netif_tx_start_all_queues(dev);
 	return 0;
 }
 
@@ -255,31 +289,30 @@
 
 static struct rtnl_link_ops ifb_link_ops __read_mostly = {
 	.kind		= "ifb",
-	.priv_size	= sizeof(struct ifb_private),
+	.priv_size	= sizeof(struct ifb_dev_private),
 	.setup		= ifb_setup,
 	.validate	= ifb_validate,
 };
 
-/* Number of ifb devices to be set up by this module. */
+/* Number of ifb devices to be set up by this module.
+ * Note that these legacy devices have one queue.
+ * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
+ */
+static int numifbs = 2;
 module_param(numifbs, int, 0);
 MODULE_PARM_DESC(numifbs, "Number of ifb devices");
 
 static int __init ifb_init_one(int index)
 {
 	struct net_device *dev_ifb;
-	struct ifb_private *dp;
 	int err;
 
-	dev_ifb = alloc_netdev(sizeof(struct ifb_private), "ifb%d",
+	dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
 			       NET_NAME_UNKNOWN, ifb_setup);
 
 	if (!dev_ifb)
 		return -ENOMEM;
 
-	dp = netdev_priv(dev_ifb);
-	u64_stats_init(&dp->rsync);
-	u64_stats_init(&dp->tsync);
-
 	dev_ifb->rtnl_link_ops = &ifb_link_ops;
 	err = register_netdevice(dev_ifb);
 	if (err < 0)
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 20b58bd..a9268db 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -520,12 +520,11 @@
 	ether_setup(dev);
 
 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
-	dev->priv_flags |= IFF_UNICAST_FLT;
+	dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
 	dev->netdev_ops = &ipvlan_netdev_ops;
 	dev->destructor = free_netdev;
 	dev->header_ops = &ipvlan_header_ops;
 	dev->ethtool_ops = &ipvlan_ethtool_ops;
-	dev->tx_queue_len = 0;
 }
 
 static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index c76283c..dc7d970 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -165,10 +165,9 @@
 	dev->mtu		= 64 * 1024;
 	dev->hard_header_len	= ETH_HLEN;	/* 14	*/
 	dev->addr_len		= ETH_ALEN;	/* 6	*/
-	dev->tx_queue_len	= 0;
 	dev->type		= ARPHRD_LOOPBACK;	/* 0x0001*/
 	dev->flags		= IFF_LOOPBACK;
-	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE;
+	dev->priv_flags		|= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
 	netif_keep_dst(dev);
 	dev->hw_features	= NETIF_F_ALL_TSO | NETIF_F_UFO;
 	dev->features 		= NETIF_F_SG | NETIF_F_FRAGLIST
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 9f59f17..47da435 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1047,6 +1047,7 @@
 	.ndo_netpoll_cleanup	= macvlan_dev_netpoll_cleanup,
 #endif
 	.ndo_get_iflink		= macvlan_dev_get_iflink,
+	.ndo_features_check	= passthru_features_check,
 };
 
 void macvlan_common_setup(struct net_device *dev)
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index 34924df..7b7c70e 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -130,7 +130,7 @@
 static void nlmon_setup(struct net_device *dev)
 {
 	dev->type = ARPHRD_NETLINK;
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 
 	dev->netdev_ops	= &nlmon_ops;
 	dev->ethtool_ops = &nlmon_ethtool_ops;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cb86d7a..c07030d 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -14,6 +14,11 @@
 
 comment "MII PHY device drivers"
 
+config AQUANTIA_PHY
+        tristate "Drivers for the Aquantia PHYs"
+        ---help---
+          Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
+
 config AT803X_PHY
 	tristate "Drivers for Atheros AT803X PHYs"
 	---help---
@@ -54,6 +59,11 @@
         ---help---
           Currently supports the vsc8244
 
+config TERANETICS_PHY
+        tristate "Drivers for the Teranetics PHYs"
+        ---help---
+          Currently supports the Teranetics TN2020
+
 config SMSC_PHY
 	tristate "Drivers for SMSC PHYs"
 	---help---
@@ -145,13 +155,13 @@
 	  will be called mdio-gpio.
 
 config MDIO_OCTEON
-	tristate "Support for MDIO buses on Octeon SOCs"
-	depends on CAVIUM_OCTEON_SOC
-	default y
+	tristate "Support for MDIO buses on Octeon and ThunderX SOCs"
+	depends on 64BIT
 	help
 
-	  This module provides a driver for the Octeon MDIO busses.
-	  It is required by the Octeon Ethernet device drivers.
+	  This module provides a driver for the Octeon and ThunderX MDIO
+	  busses. It is required by the Octeon and ThunderX ethernet device
+	  drivers.
 
 	  If in doubt, say Y.
 
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index fcc25a0..9bb1033 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -3,12 +3,14 @@
 libphy-objs			:= phy.o phy_device.o mdio_bus.o
 
 obj-$(CONFIG_PHYLIB)		+= libphy.o
+obj-$(CONFIG_AQUANTIA_PHY)	+= aquantia.o
 obj-$(CONFIG_MARVELL_PHY)	+= marvell.o
 obj-$(CONFIG_DAVICOM_PHY)	+= davicom.o
 obj-$(CONFIG_CICADA_PHY)	+= cicada.o
 obj-$(CONFIG_LXT_PHY)		+= lxt.o
 obj-$(CONFIG_QSEMI_PHY)		+= qsemi.o
 obj-$(CONFIG_SMSC_PHY)		+= smsc.o
+obj-$(CONFIG_TERANETICS_PHY)	+= teranetics.o
 obj-$(CONFIG_VITESSE_PHY)	+= vitesse.o
 obj-$(CONFIG_BROADCOM_PHY)	+= broadcom.o
 obj-$(CONFIG_BCM63XX_PHY)	+= bcm63xx.o
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
new file mode 100644
index 0000000..d6111af
--- /dev/null
+++ b/drivers/net/phy/aquantia.c
@@ -0,0 +1,201 @@
+/*
+ * Driver for Aquantia PHY
+ *
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+
+#define PHY_ID_AQ1202	0x03a1b445
+#define PHY_ID_AQ2104	0x03a1b460
+#define PHY_ID_AQR105	0x03a1b4a2
+#define PHY_ID_AQR405	0x03a1b4b0
+
+#define PHY_AQUANTIA_FEATURES	(SUPPORTED_10000baseT_Full | \
+				 SUPPORTED_1000baseT_Full | \
+				 SUPPORTED_100baseT_Full | \
+				 PHY_DEFAULT_FEATURES)
+
+static int aquantia_config_aneg(struct phy_device *phydev)
+{
+	phydev->supported = PHY_AQUANTIA_FEATURES;
+	phydev->advertising = phydev->supported;
+
+	return 0;
+}
+
+static int aquantia_aneg_done(struct phy_device *phydev)
+{
+	int reg;
+
+	reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+	return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+}
+
+static int aquantia_config_intr(struct phy_device *phydev)
+{
+	int err;
+
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+		err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1);
+		if (err < 0)
+			return err;
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1);
+		if (err < 0)
+			return err;
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001);
+	} else {
+		err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0);
+		if (err < 0)
+			return err;
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0);
+		if (err < 0)
+			return err;
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0);
+	}
+
+	return err;
+}
+
+static int aquantia_ack_interrupt(struct phy_device *phydev)
+{
+	int reg;
+
+	reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01);
+	return (reg < 0) ? reg : 0;
+}
+
+static int aquantia_read_status(struct phy_device *phydev)
+{
+	int reg;
+
+	reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+	reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+	if (reg & MDIO_STAT1_LSTATUS)
+		phydev->link = 1;
+	else
+		phydev->link = 0;
+
+	reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
+	mdelay(10);
+	reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
+
+	switch (reg) {
+	case 0x9:
+		phydev->speed = SPEED_2500;
+		break;
+	case 0x5:
+		phydev->speed = SPEED_1000;
+		break;
+	case 0x3:
+		phydev->speed = SPEED_100;
+		break;
+	case 0x7:
+	default:
+		phydev->speed = SPEED_10000;
+		break;
+	}
+	phydev->duplex = DUPLEX_FULL;
+
+	return 0;
+}
+
+static struct phy_driver aquantia_driver[] = {
+{
+	.phy_id		= PHY_ID_AQ1202,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Aquantia AQ1202",
+	.features	= PHY_AQUANTIA_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.aneg_done	= aquantia_aneg_done,
+	.config_aneg    = aquantia_config_aneg,
+	.config_intr	= aquantia_config_intr,
+	.ack_interrupt	= aquantia_ack_interrupt,
+	.read_status	= aquantia_read_status,
+	.driver		= { .owner = THIS_MODULE,},
+},
+{
+	.phy_id		= PHY_ID_AQ2104,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Aquantia AQ2104",
+	.features	= PHY_AQUANTIA_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.aneg_done	= aquantia_aneg_done,
+	.config_aneg    = aquantia_config_aneg,
+	.config_intr	= aquantia_config_intr,
+	.ack_interrupt	= aquantia_ack_interrupt,
+	.read_status	= aquantia_read_status,
+	.driver		= { .owner = THIS_MODULE,},
+},
+{
+	.phy_id		= PHY_ID_AQR105,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Aquantia AQR105",
+	.features	= PHY_AQUANTIA_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.aneg_done	= aquantia_aneg_done,
+	.config_aneg    = aquantia_config_aneg,
+	.config_intr	= aquantia_config_intr,
+	.ack_interrupt	= aquantia_ack_interrupt,
+	.read_status	= aquantia_read_status,
+	.driver		= { .owner = THIS_MODULE,},
+},
+{
+	.phy_id		= PHY_ID_AQR405,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Aquantia AQR405",
+	.features	= PHY_AQUANTIA_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.aneg_done	= aquantia_aneg_done,
+	.config_aneg    = aquantia_config_aneg,
+	.config_intr	= aquantia_config_intr,
+	.ack_interrupt	= aquantia_ack_interrupt,
+	.read_status	= aquantia_read_status,
+	.driver		= { .owner = THIS_MODULE,},
+},
+};
+
+static int __init aquantia_init(void)
+{
+	return phy_drivers_register(aquantia_driver,
+				    ARRAY_SIZE(aquantia_driver));
+}
+
+static void __exit aquantia_exit(void)
+{
+	return phy_drivers_unregister(aquantia_driver,
+				      ARRAY_SIZE(aquantia_driver));
+}
+
+module_init(aquantia_init);
+module_exit(aquantia_exit);
+
+static struct mdio_device_id __maybe_unused aquantia_tbl[] = {
+	{ PHY_ID_AQ1202, 0xfffffff0 },
+	{ PHY_ID_AQ2104, 0xfffffff0 },
+	{ PHY_ID_AQR105, 0xfffffff0 },
+	{ PHY_ID_AQR405, 0xfffffff0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(mdio, aquantia_tbl);
+
+MODULE_DESCRIPTION("Aquantia PHY driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 00cb41e..185b03c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1449,17 +1449,9 @@
 	info->rx_filters =
 		(1 << HWTSTAMP_FILTER_NONE) |
 		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
-		(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-		(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
 		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
 		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-		(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+		(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 	return 0;
 }
 
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 8a3bf54..32f1066 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -123,12 +123,8 @@
 	if (ret)
 		return ret;
 
-	ret = of_property_read_u32(of_node, "ti,fifo-depth",
+	return of_property_read_u32(of_node, "ti,fifo-depth",
 				   &dp83867->fifo_depth);
-	if (ret)
-		return ret;
-
-	return 0;
 }
 #else
 static int dp83867_of_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 1960b46..12c7eb2 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -22,6 +22,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/gpio.h>
 
 #define MII_REGS_NUM 29
 
@@ -38,6 +39,7 @@
 	struct fixed_phy_status status;
 	int (*link_update)(struct net_device *, struct fixed_phy_status *);
 	struct list_head node;
+	int link_gpio;
 };
 
 static struct platform_device *pdev;
@@ -52,58 +54,86 @@
 	u16 lpagb = 0;
 	u16 lpa = 0;
 
-	if (fp->status.duplex) {
-		bmcr |= BMCR_FULLDPLX;
+	if (gpio_is_valid(fp->link_gpio))
+		fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio);
 
+	if (fp->status.duplex) {
 		switch (fp->status.speed) {
 		case 1000:
 			bmsr |= BMSR_ESTATEN;
-			bmcr |= BMCR_SPEED1000;
-			lpagb |= LPA_1000FULL;
 			break;
 		case 100:
 			bmsr |= BMSR_100FULL;
-			bmcr |= BMCR_SPEED100;
-			lpa |= LPA_100FULL;
 			break;
 		case 10:
 			bmsr |= BMSR_10FULL;
-			lpa |= LPA_10FULL;
 			break;
 		default:
-			pr_warn("fixed phy: unknown speed\n");
-			return -EINVAL;
+			break;
 		}
 	} else {
 		switch (fp->status.speed) {
 		case 1000:
 			bmsr |= BMSR_ESTATEN;
-			bmcr |= BMCR_SPEED1000;
-			lpagb |= LPA_1000HALF;
 			break;
 		case 100:
 			bmsr |= BMSR_100HALF;
-			bmcr |= BMCR_SPEED100;
-			lpa |= LPA_100HALF;
 			break;
 		case 10:
 			bmsr |= BMSR_10HALF;
-			lpa |= LPA_10HALF;
 			break;
 		default:
-			pr_warn("fixed phy: unknown speed\n");
-			return -EINVAL;
+			break;
 		}
 	}
 
-	if (fp->status.link)
+	if (fp->status.link) {
 		bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
 
-	if (fp->status.pause)
-		lpa |= LPA_PAUSE_CAP;
+		if (fp->status.duplex) {
+			bmcr |= BMCR_FULLDPLX;
 
-	if (fp->status.asym_pause)
-		lpa |= LPA_PAUSE_ASYM;
+			switch (fp->status.speed) {
+			case 1000:
+				bmcr |= BMCR_SPEED1000;
+				lpagb |= LPA_1000FULL;
+				break;
+			case 100:
+				bmcr |= BMCR_SPEED100;
+				lpa |= LPA_100FULL;
+				break;
+			case 10:
+				lpa |= LPA_10FULL;
+				break;
+			default:
+				pr_warn("fixed phy: unknown speed\n");
+				return -EINVAL;
+			}
+		} else {
+			switch (fp->status.speed) {
+			case 1000:
+				bmcr |= BMCR_SPEED1000;
+				lpagb |= LPA_1000HALF;
+				break;
+			case 100:
+				bmcr |= BMCR_SPEED100;
+				lpa |= LPA_100HALF;
+				break;
+			case 10:
+				lpa |= LPA_10HALF;
+				break;
+			default:
+				pr_warn("fixed phy: unknown speed\n");
+			return -EINVAL;
+			}
+		}
+
+		if (fp->status.pause)
+			lpa |= LPA_PAUSE_CAP;
+
+		if (fp->status.asym_pause)
+			lpa |= LPA_PAUSE_ASYM;
+	}
 
 	fp->regs[MII_PHYSID1] = 0;
 	fp->regs[MII_PHYSID2] = 0;
@@ -213,7 +243,8 @@
 EXPORT_SYMBOL(fixed_phy_update_state);
 
 int fixed_phy_add(unsigned int irq, int phy_addr,
-		  struct fixed_phy_status *status)
+		  struct fixed_phy_status *status,
+		  int link_gpio)
 {
 	int ret;
 	struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -229,15 +260,26 @@
 
 	fp->addr = phy_addr;
 	fp->status = *status;
+	fp->link_gpio = link_gpio;
+
+	if (gpio_is_valid(fp->link_gpio)) {
+		ret = gpio_request_one(fp->link_gpio, GPIOF_DIR_IN,
+				       "fixed-link-gpio-link");
+		if (ret)
+			goto err_regs;
+	}
 
 	ret = fixed_phy_update_regs(fp);
 	if (ret)
-		goto err_regs;
+		goto err_gpio;
 
 	list_add_tail(&fp->node, &fmb->phys);
 
 	return 0;
 
+err_gpio:
+	if (gpio_is_valid(fp->link_gpio))
+		gpio_free(fp->link_gpio);
 err_regs:
 	kfree(fp);
 	return ret;
@@ -252,6 +294,8 @@
 	list_for_each_entry_safe(fp, tmp, &fmb->phys, node) {
 		if (fp->addr == phy_addr) {
 			list_del(&fp->node);
+			if (gpio_is_valid(fp->link_gpio))
+				gpio_free(fp->link_gpio);
 			kfree(fp);
 			return;
 		}
@@ -264,6 +308,7 @@
 
 struct phy_device *fixed_phy_register(unsigned int irq,
 				      struct fixed_phy_status *status,
+				      int link_gpio,
 				      struct device_node *np)
 {
 	struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -280,7 +325,7 @@
 	phy_addr = phy_fixed_addr++;
 	spin_unlock(&phy_fixed_addr_lock);
 
-	ret = fixed_phy_add(PHY_POLL, phy_addr, status);
+	ret = fixed_phy_add(PHY_POLL, phy_addr, status, link_gpio);
 	if (ret < 0)
 		return ERR_PTR(ret);
 
@@ -290,8 +335,30 @@
 		return ERR_PTR(-EINVAL);
 	}
 
+	/* propagate the fixed link values to struct phy_device */
+	phy->link = status->link;
+	if (status->link) {
+		phy->speed = status->speed;
+		phy->duplex = status->duplex;
+		phy->pause = status->pause;
+		phy->asym_pause = status->asym_pause;
+	}
+
 	of_node_get(np);
 	phy->dev.of_node = np;
+	phy->is_pseudo_fixed_link = true;
+
+	switch (status->speed) {
+	case SPEED_1000:
+		phy->supported = PHY_1000BT_FEATURES;
+		break;
+	case SPEED_100:
+		phy->supported = PHY_100BT_FEATURES;
+		break;
+	case SPEED_10:
+	default:
+		phy->supported = PHY_10BT_FEATURES;
+	}
 
 	ret = phy_device_register(phy);
 	if (ret) {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f721444..e6897b6 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -48,8 +48,11 @@
 #define MII_M1011_IMASK_CLEAR		0x0000
 
 #define MII_M1011_PHY_SCR		0x10
+#define MII_M1011_PHY_SCR_MDI		0x0000
+#define MII_M1011_PHY_SCR_MDI_X		0x0020
 #define MII_M1011_PHY_SCR_AUTO_CROSS	0x0060
 
+#define MII_M1145_PHY_EXT_ADDR_PAGE	0x16
 #define MII_M1145_PHY_EXT_SR		0x1b
 #define MII_M1145_PHY_EXT_CR		0x14
 #define MII_M1145_RGMII_RX_DELAY	0x0080
@@ -159,6 +162,43 @@
 	return err;
 }
 
+static int marvell_set_polarity(struct phy_device *phydev, int polarity)
+{
+	int reg;
+	int err;
+	int val;
+
+	/* get the current settings */
+	reg = phy_read(phydev, MII_M1011_PHY_SCR);
+	if (reg < 0)
+		return reg;
+
+	val = reg;
+	val &= ~MII_M1011_PHY_SCR_AUTO_CROSS;
+	switch (polarity) {
+	case ETH_TP_MDI:
+		val |= MII_M1011_PHY_SCR_MDI;
+		break;
+	case ETH_TP_MDI_X:
+		val |= MII_M1011_PHY_SCR_MDI_X;
+		break;
+	case ETH_TP_MDI_AUTO:
+	case ETH_TP_MDI_INVALID:
+	default:
+		val |= MII_M1011_PHY_SCR_AUTO_CROSS;
+		break;
+	}
+
+	if (val != reg) {
+		/* Set the new polarity value in the register */
+		err = phy_write(phydev, MII_M1011_PHY_SCR, val);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int marvell_config_aneg(struct phy_device *phydev)
 {
 	int err;
@@ -191,8 +231,7 @@
 	if (err < 0)
 		return err;
 
-	err = phy_write(phydev, MII_M1011_PHY_SCR,
-			MII_M1011_PHY_SCR_AUTO_CROSS);
+	err = marvell_set_polarity(phydev, phydev->mdix);
 	if (err < 0)
 		return err;
 
@@ -514,6 +553,16 @@
 		err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
 		if (err < 0)
 			return err;
+
+		/* make sure copper is selected */
+		err = phy_read(phydev, MII_M1145_PHY_EXT_ADDR_PAGE);
+		if (err < 0)
+			return err;
+
+		err = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE,
+				err & (~0xff));
+		if (err < 0)
+			return err;
 	}
 
 	if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index c838ad6..fcf4e4d 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/of_address.h>
 #include <linux/of_mdio.h>
 #include <linux/delay.h>
 #include <linux/module.h>
@@ -14,11 +15,12 @@
 #include <linux/phy.h>
 #include <linux/io.h>
 
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
 #include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-smix-defs.h>
+#endif
 
-#define DRV_VERSION "1.0"
-#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver"
+#define DRV_VERSION "1.1"
+#define DRV_DESCRIPTION "Cavium Networks Octeon/ThunderX SMI/MDIO driver"
 
 #define SMI_CMD		0x0
 #define SMI_WR_DAT	0x8
@@ -26,6 +28,79 @@
 #define SMI_CLK		0x18
 #define SMI_EN		0x20
 
+#ifdef __BIG_ENDIAN_BITFIELD
+#define OCT_MDIO_BITFIELD_FIELD(field, more)	\
+	field;					\
+	more
+
+#else
+#define OCT_MDIO_BITFIELD_FIELD(field, more)	\
+	more					\
+	field;
+
+#endif
+
+union cvmx_smix_clk {
+	u64 u64;
+	struct cvmx_smix_clk_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39,
+	  OCT_MDIO_BITFIELD_FIELD(u64 mode:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3,
+	  OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5,
+	  OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 preamble:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 sample:4,
+	  OCT_MDIO_BITFIELD_FIELD(u64 phase:8,
+	  ;))))))))))
+	} s;
+};
+
+union cvmx_smix_cmd {
+	u64 u64;
+	struct cvmx_smix_cmd_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+	  OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3,
+	  OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3,
+	  OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5,
+	  ;))))))
+	} s;
+};
+
+union cvmx_smix_en {
+	u64 u64;
+	struct cvmx_smix_en_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63,
+	  OCT_MDIO_BITFIELD_FIELD(u64 en:1,
+	  ;))
+	} s;
+};
+
+union cvmx_smix_rd_dat {
+	u64 u64;
+	struct cvmx_smix_rd_dat_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+	  OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+	  ;))))
+	} s;
+};
+
+union cvmx_smix_wr_dat {
+	u64 u64;
+	struct cvmx_smix_wr_dat_s {
+	  OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+	  OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+	  OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+	  ;))))
+	} s;
+};
+
 enum octeon_mdiobus_mode {
 	UNINIT = 0,
 	C22,
@@ -41,6 +116,21 @@
 	int phy_irq[PHY_MAX_ADDR];
 };
 
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
+static void oct_mdio_writeq(u64 val, u64 addr)
+{
+	cvmx_write_csr(addr, val);
+}
+
+static u64 oct_mdio_readq(u64 addr)
+{
+	return cvmx_read_csr(addr);
+}
+#else
+#define oct_mdio_writeq(val, addr)	writeq_relaxed(val, (void *)addr)
+#define oct_mdio_readq(addr)		readq_relaxed((void *)addr)
+#endif
+
 static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
 				    enum octeon_mdiobus_mode m)
 {
@@ -49,10 +139,10 @@
 	if (m == p->mode)
 		return;
 
-	smi_clk.u64 = cvmx_read_csr(p->register_base + SMI_CLK);
+	smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK);
 	smi_clk.s.mode = (m == C45) ? 1 : 0;
 	smi_clk.s.preamble = 1;
-	cvmx_write_csr(p->register_base + SMI_CLK, smi_clk.u64);
+	oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK);
 	p->mode = m;
 }
 
@@ -67,7 +157,7 @@
 
 	smi_wr.u64 = 0;
 	smi_wr.s.dat = regnum & 0xffff;
-	cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+	oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
 
 	regnum = (regnum >> 16) & 0x1f;
 
@@ -75,14 +165,14 @@
 	smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */
 	smi_cmd.s.phy_adr = phy_id;
 	smi_cmd.s.reg_adr = regnum;
-	cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+	oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
 	do {
 		/* Wait 1000 clocks so we don't saturate the RSL bus
 		 * doing reads.
 		 */
 		__delay(1000);
-		smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+		smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
 	} while (smi_wr.s.pending && --timeout);
 
 	if (timeout <= 0)
@@ -114,14 +204,14 @@
 	smi_cmd.s.phy_op = op;
 	smi_cmd.s.phy_adr = phy_id;
 	smi_cmd.s.reg_adr = regnum;
-	cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+	oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
 	do {
 		/* Wait 1000 clocks so we don't saturate the RSL bus
 		 * doing reads.
 		 */
 		__delay(1000);
-		smi_rd.u64 = cvmx_read_csr(p->register_base + SMI_RD_DAT);
+		smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT);
 	} while (smi_rd.s.pending && --timeout);
 
 	if (smi_rd.s.val)
@@ -153,20 +243,20 @@
 
 	smi_wr.u64 = 0;
 	smi_wr.s.dat = val;
-	cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+	oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
 
 	smi_cmd.u64 = 0;
 	smi_cmd.s.phy_op = op;
 	smi_cmd.s.phy_adr = phy_id;
 	smi_cmd.s.reg_adr = regnum;
-	cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+	oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
 	do {
 		/* Wait 1000 clocks so we don't saturate the RSL bus
 		 * doing reads.
 		 */
 		__delay(1000);
-		smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+		smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
 	} while (smi_wr.s.pending && --timeout);
 
 	if (timeout <= 0)
@@ -187,30 +277,34 @@
 		return -ENOMEM;
 
 	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
 	if (res_mem == NULL) {
 		dev_err(&pdev->dev, "found no memory resource\n");
-		err = -ENXIO;
-		goto fail;
+		return -ENXIO;
 	}
+
 	bus->mdio_phys = res_mem->start;
 	bus->regsize = resource_size(res_mem);
+
 	if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize,
 				     res_mem->name)) {
 		dev_err(&pdev->dev, "request_mem_region failed\n");
-		goto fail;
+		return -ENXIO;
 	}
+
 	bus->register_base =
 		(u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize);
+	if (!bus->register_base) {
+		dev_err(&pdev->dev, "dev_ioremap failed\n");
+		return -ENOMEM;
+	}
 
 	bus->mii_bus = mdiobus_alloc();
-
 	if (!bus->mii_bus)
 		goto fail;
 
 	smi_en.u64 = 0;
 	smi_en.s.en = 1;
-	cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+	oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
 
 	bus->mii_bus->priv = bus;
 	bus->mii_bus->irq = bus->phy_irq;
@@ -234,7 +328,7 @@
 	mdiobus_free(bus->mii_bus);
 fail:
 	smi_en.u64 = 0;
-	cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+	oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
 	return err;
 }
 
@@ -248,7 +342,7 @@
 	mdiobus_unregister(bus->mii_bus);
 	mdiobus_free(bus->mii_bus);
 	smi_en.u64 = 0;
-	cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+	oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
 	return 0;
 }
 
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 46a14cb..02a4615 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -303,12 +303,12 @@
 	BUG_ON(bus->state != MDIOBUS_REGISTERED);
 	bus->state = MDIOBUS_UNREGISTERED;
 
-	device_del(&bus->dev);
 	for (i = 0; i < PHY_MAX_ADDR; i++) {
 		if (bus->phy_map[i])
 			device_unregister(&bus->phy_map[i]->dev);
 		bus->phy_map[i] = NULL;
 	}
+	device_del(&bus->dev);
 }
 EXPORT_SYMBOL(mdiobus_unregister);
 
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1e1fbb0..adb48ab 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -353,6 +353,8 @@
 
 	phydev->duplex = cmd->duplex;
 
+	phydev->mdix = cmd->eth_tp_mdix_ctrl;
+
 	/* Restart the PHY */
 	phy_start_aneg(phydev);
 
@@ -377,6 +379,7 @@
 	cmd->transceiver = phy_is_internal(phydev) ?
 		XCVR_INTERNAL : XCVR_EXTERNAL;
 	cmd->autoneg = phydev->autoneg;
+	cmd->eth_tp_mdix_ctrl = phydev->mdix;
 
 	return 0;
 }
@@ -1037,11 +1040,15 @@
 	struct phy_driver *phydrv = phydev->drv;
 	int value = -1;
 
-	if (phydrv->read_mmd_indirect == NULL) {
-		mmd_phy_indirect(phydev->bus, prtad, devad, addr);
+	if (!phydrv->read_mmd_indirect) {
+		struct mii_bus *bus = phydev->bus;
+
+		mutex_lock(&bus->mdio_lock);
+		mmd_phy_indirect(bus, prtad, devad, addr);
 
 		/* Read the content of the MMD's selected register */
-		value = phydev->bus->read(phydev->bus, addr, MII_MMD_DATA);
+		value = bus->read(bus, addr, MII_MMD_DATA);
+		mutex_unlock(&bus->mdio_lock);
 	} else {
 		value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
 	}
@@ -1070,11 +1077,15 @@
 {
 	struct phy_driver *phydrv = phydev->drv;
 
-	if (phydrv->write_mmd_indirect == NULL) {
-		mmd_phy_indirect(phydev->bus, prtad, devad, addr);
+	if (!phydrv->write_mmd_indirect) {
+		struct mii_bus *bus = phydev->bus;
+
+		mutex_lock(&bus->mdio_lock);
+		mmd_phy_indirect(bus, prtad, devad, addr);
 
 		/* Write the data into MMD's selected register */
-		phydev->bus->write(phydev->bus, addr, MII_MMD_DATA, data);
+		bus->write(bus, addr, MII_MMD_DATA, data);
+		mutex_unlock(&bus->mdio_lock);
 	} else {
 		phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
 	}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0302483..c0f2111 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -156,8 +156,8 @@
 
 	/* We allocate the device, and initialize the default values */
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-	if (NULL == dev)
-		return (struct phy_device *)PTR_ERR((void *)-ENOMEM);
+	if (!dev)
+		return ERR_PTR(-ENOMEM);
 
 	dev->dev.release = phy_device_release;
 
@@ -176,9 +176,9 @@
 	if (c45_ids)
 		dev->c45_ids = *c45_ids;
 	dev->bus = bus;
-	dev->dev.parent = bus->parent;
+	dev->dev.parent = &bus->dev;
 	dev->dev.bus = &mdio_bus_type;
-	dev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL;
+	dev->irq = bus->irq ? bus->irq[addr] : PHY_POLL;
 	dev_set_name(&dev->dev, PHY_ID_FMT, bus->id, addr);
 
 	dev->state = PHY_DOWN;
@@ -589,7 +589,7 @@
 	/* Assume that if there is no driver, that it doesn't
 	 * exist, and we should use the genphy driver.
 	 */
-	if (NULL == d->driver) {
+	if (!d->driver) {
 		if (phydev->is_c45)
 			d->driver = &genphy_driver[GENPHY_DRV_10G].driver;
 		else
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 4535361..43ab691 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -137,6 +137,19 @@
 		.config_intr	= &rtl8211b_config_intr,
 		.driver		= { .owner = THIS_MODULE,},
 	}, {
+		.phy_id		= 0x001cc914,
+		.name		= "RTL8211DN Gigabit Ethernet",
+		.phy_id_mask	= 0x001fffff,
+		.features	= PHY_GBIT_FEATURES,
+		.flags		= PHY_HAS_INTERRUPT,
+		.config_aneg	= genphy_config_aneg,
+		.read_status	= genphy_read_status,
+		.ack_interrupt	= rtl821x_ack_interrupt,
+		.config_intr	= rtl8211e_config_intr,
+		.suspend	= genphy_suspend,
+		.resume		= genphy_resume,
+		.driver		= { .owner = THIS_MODULE,},
+	}, {
 		.phy_id		= 0x001cc915,
 		.name		= "RTL8211E Gigabit Ethernet",
 		.phy_id_mask	= 0x001fffff,
@@ -170,6 +183,7 @@
 
 static struct mdio_device_id __maybe_unused realtek_tbl[] = {
 	{ 0x001cc912, 0x001fffff },
+	{ 0x001cc914, 0x001fffff },
 	{ 0x001cc915, 0x001fffff },
 	{ 0x001cc916, 0x001fffff },
 	{ }
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 4653015..f091d69 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -209,8 +209,6 @@
 	return ks8995_start(ks);
 }
 
-/* ------------------------------------------------------------------------ */
-
 static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
 	struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
 {
@@ -220,19 +218,9 @@
 	dev = container_of(kobj, struct device, kobj);
 	ks8995 = dev_get_drvdata(dev);
 
-	if (unlikely(off > ks8995->regs_attr.size))
-		return 0;
-
-	if ((off + count) > ks8995->regs_attr.size)
-		count = ks8995->regs_attr.size - off;
-
-	if (unlikely(!count))
-		return count;
-
 	return ks8995_read(ks8995, buf, off, count);
 }
 
-
 static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
 	struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
 {
@@ -242,19 +230,9 @@
 	dev = container_of(kobj, struct device, kobj);
 	ks8995 = dev_get_drvdata(dev);
 
-	if (unlikely(off >= ks8995->regs_attr.size))
-		return -EFBIG;
-
-	if ((off + count) > ks8995->regs_attr.size)
-		count = ks8995->regs_attr.size - off;
-
-	if (unlikely(!count))
-		return count;
-
 	return ks8995_write(ks8995, buf, off, count);
 }
 
-
 static const struct bin_attribute ks8995_registers_attr = {
 	.attr = {
 		.name   = "registers",
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
new file mode 100644
index 0000000..91e1bec
--- /dev/null
+++ b/drivers/net/phy/teranetics.c
@@ -0,0 +1,135 @@
+/*
+ * Driver for Teranetics PHY
+ *
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+
+MODULE_DESCRIPTION("Teranetics PHY driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
+
+#define PHY_ID_TN2020	0x00a19410
+#define MDIO_PHYXS_LNSTAT_SYNC0	0x0001
+#define MDIO_PHYXS_LNSTAT_SYNC1	0x0002
+#define MDIO_PHYXS_LNSTAT_SYNC2	0x0004
+#define MDIO_PHYXS_LNSTAT_SYNC3	0x0008
+#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000
+
+#define MDIO_PHYXS_LANE_READY	(MDIO_PHYXS_LNSTAT_SYNC0 | \
+				MDIO_PHYXS_LNSTAT_SYNC1 | \
+				MDIO_PHYXS_LNSTAT_SYNC2 | \
+				MDIO_PHYXS_LNSTAT_SYNC3 | \
+				MDIO_PHYXS_LNSTAT_ALIGN)
+
+static int teranetics_config_init(struct phy_device *phydev)
+{
+	phydev->supported = SUPPORTED_10000baseT_Full;
+	phydev->advertising = SUPPORTED_10000baseT_Full;
+
+	return 0;
+}
+
+static int teranetics_soft_reset(struct phy_device *phydev)
+{
+	return 0;
+}
+
+static int teranetics_aneg_done(struct phy_device *phydev)
+{
+	int reg;
+
+	/* auto negotiation state can only be checked when using copper
+	 * port, if using fiber port, just lie it's done.
+	 */
+	if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
+		reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+		return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+	}
+
+	return 1;
+}
+
+static int teranetics_config_aneg(struct phy_device *phydev)
+{
+	return 0;
+}
+
+static int teranetics_read_status(struct phy_device *phydev)
+{
+	int reg;
+
+	phydev->link = 1;
+
+	phydev->speed = SPEED_10000;
+	phydev->duplex = DUPLEX_FULL;
+
+	if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
+		reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT);
+		if (reg < 0 ||
+		    !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) {
+			phydev->link = 0;
+			return 0;
+		}
+
+		reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+		if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS))
+			phydev->link = 0;
+	}
+
+	return 0;
+}
+
+static int teranetics_match_phy_device(struct phy_device *phydev)
+{
+	return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020;
+}
+
+static struct phy_driver teranetics_driver[] = {
+{
+	.phy_id		= PHY_ID_TN2020,
+	.phy_id_mask	= 0xffffffff,
+	.name		= "Teranetics TN2020",
+	.soft_reset	= teranetics_soft_reset,
+	.aneg_done	= teranetics_aneg_done,
+	.config_init    = teranetics_config_init,
+	.config_aneg    = teranetics_config_aneg,
+	.read_status	= teranetics_read_status,
+	.match_phy_device = teranetics_match_phy_device,
+	.driver		= { .owner = THIS_MODULE,},
+},
+};
+
+static int __init teranetics_init(void)
+{
+	return phy_drivers_register(teranetics_driver,
+				    ARRAY_SIZE(teranetics_driver));
+}
+
+static void __exit teranetics_exit(void)
+{
+	return phy_drivers_unregister(teranetics_driver,
+				      ARRAY_SIZE(teranetics_driver));
+}
+
+module_init(teranetics_init);
+module_exit(teranetics_exit);
+
+static struct mdio_device_id __maybe_unused teranetics_tbl[] = {
+	{ PHY_ID_TN2020, 0xffffffff },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(mdio, teranetics_tbl);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fa8f504..0481daf 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -283,6 +283,8 @@
 static void unit_put(struct idr *p, int n);
 static void *unit_find(struct idr *p, int n);
 
+static const struct net_device_ops ppp_netdev_ops;
+
 static struct class *ppp_class;
 
 /* per net-namespace data */
@@ -919,13 +921,22 @@
 static __net_exit void ppp_exit_net(struct net *net)
 {
 	struct ppp_net *pn = net_generic(net, ppp_net_id);
+	struct net_device *dev;
+	struct net_device *aux;
 	struct ppp *ppp;
 	LIST_HEAD(list);
 	int id;
 
 	rtnl_lock();
+	for_each_netdev_safe(net, dev, aux) {
+		if (dev->netdev_ops == &ppp_netdev_ops)
+			unregister_netdevice_queue(dev, &list);
+	}
+
 	idr_for_each_entry(&pn->units_idr, ppp, id)
-		unregister_netdevice_queue(ppp->dev, &list);
+		/* Skip devices already unregistered by previous loop */
+		if (!net_eq(dev_net(ppp->dev), net))
+			unregister_netdevice_queue(ppp->dev, &list);
 
 	unregister_netdevice_many(&list);
 	rtnl_unlock();
@@ -1017,6 +1028,7 @@
 	proto = npindex_to_proto[npi];
 	put_unaligned_be16(proto, pp);
 
+	skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
 	skb_queue_tail(&ppp->file.xq, skb);
 	ppp_xmit_process(ppp);
 	return NETDEV_TX_OK;
@@ -1137,7 +1149,6 @@
 	dev->tx_queue_len = 3;
 	dev->type = ARPHRD_PPP;
 	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
-	dev->features |= NETIF_F_NETNS_LOCAL;
 	netif_keep_dst(dev);
 }
 
@@ -1900,6 +1911,8 @@
 			skb->dev = ppp->dev;
 			skb->protocol = htons(npindex_to_ethertype[npi]);
 			skb_reset_mac_header(skb);
+			skb_scrub_packet(skb, !net_eq(ppp->ppp_net,
+						      dev_net(ppp->dev)));
 			netif_rx(skb);
 		}
 	}
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index dac7a0d..01f08a7 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -396,7 +396,7 @@
 	return 0;
 }
 
-static int rionet_remove_dev(struct device *dev, struct subsys_interface *sif)
+static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif)
 {
 	struct rio_dev *rdev = to_rio_dev(dev);
 	unsigned char netid = rdev->net->hport->id;
@@ -416,8 +416,6 @@
 			}
 		}
 	}
-
-	return 0;
 }
 
 static void rionet_get_drvinfo(struct net_device *ndev,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index daa054b..651d35e 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2051,9 +2051,9 @@
 	dev->netdev_ops = &team_netdev_ops;
 	dev->ethtool_ops = &team_ethtool_ops;
 	dev->destructor	= team_destructor;
-	dev->tx_queue_len = 0;
 	dev->flags |= IFF_MULTICAST;
 	dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+	dev->priv_flags |= IFF_NO_QUEUE;
 
 	/*
 	 * Indicate we support unicast address filtering. That way core won't
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 06a0394..976aa97 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -961,6 +961,7 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= tun_poll_controller,
 #endif
+	.ndo_features_check	= passthru_features_check,
 };
 
 static void tun_flow_init(struct tun_struct *tun)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 7ba8d08..1610b79 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -106,6 +106,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called r8152.
 
+config USB_LAN78XX
+	tristate "Microchip LAN78XX Based USB Ethernet Adapters"
+	select MII
+	help
+	  This option adds support for Microchip LAN78XX based USB 2
+	  & USB 3 10/100/1000 Ethernet adapters.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called lan78xx.
+
 config USB_USBNET
 	tristate "Multi-purpose USB Networking Framework"
 	select MII
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index e2797f1..cf6a0e6 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_USB_RTL8150)	+= rtl8150.o
 obj-$(CONFIG_USB_RTL8152)	+= r8152.o
 obj-$(CONFIG_USB_HSO)		+= hso.o
+obj-$(CONFIG_USB_LAN78XX)	+= lan78xx.o
 obj-$(CONFIG_USB_NET_AX8817X)	+= asix.o
 asix-y := asix_devices.o asix_common.o ax88172a.o
 obj-$(CONFIG_USB_NET_AX88179_178A)      += ax88179_178a.o
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
new file mode 100644
index 0000000..39364a4
--- /dev/null
+++ b/drivers/net/usb/lan78xx.c
@@ -0,0 +1,3495 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/mdio.h>
+#include <net/ip6_checksum.h>
+#include "lan78xx.h"
+
+#define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
+#define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
+#define DRIVER_NAME	"lan78xx"
+#define DRIVER_VERSION	"1.0.0"
+
+#define TX_TIMEOUT_JIFFIES		(5 * HZ)
+#define THROTTLE_JIFFIES		(HZ / 8)
+#define UNLINK_TIMEOUT_MS		3
+
+#define RX_MAX_QUEUE_MEMORY		(60 * 1518)
+
+#define SS_USB_PKT_SIZE			(1024)
+#define HS_USB_PKT_SIZE			(512)
+#define FS_USB_PKT_SIZE			(64)
+
+#define MAX_RX_FIFO_SIZE		(12 * 1024)
+#define MAX_TX_FIFO_SIZE		(12 * 1024)
+#define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
+#define DEFAULT_BULK_IN_DELAY		(0x0800)
+#define MAX_SINGLE_PACKET_SIZE		(9000)
+#define DEFAULT_TX_CSUM_ENABLE		(true)
+#define DEFAULT_RX_CSUM_ENABLE		(true)
+#define DEFAULT_TSO_CSUM_ENABLE		(true)
+#define DEFAULT_VLAN_FILTER_ENABLE	(true)
+#define INTERNAL_PHY_ID			(2)	/* 2: GMII */
+#define TX_OVERHEAD			(8)
+#define RXW_PADDING			2
+
+#define LAN78XX_USB_VENDOR_ID		(0x0424)
+#define LAN7800_USB_PRODUCT_ID		(0x7800)
+#define LAN7850_USB_PRODUCT_ID		(0x7850)
+#define LAN78XX_EEPROM_MAGIC		(0x78A5)
+#define LAN78XX_OTP_MAGIC		(0x78F3)
+
+#define	MII_READ			1
+#define	MII_WRITE			0
+
+#define EEPROM_INDICATOR		(0xA5)
+#define EEPROM_MAC_OFFSET		(0x01)
+#define MAX_EEPROM_SIZE			512
+#define OTP_INDICATOR_1			(0xF3)
+#define OTP_INDICATOR_2			(0xF7)
+
+#define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
+					 WAKE_MCAST | WAKE_BCAST | \
+					 WAKE_ARP | WAKE_MAGIC)
+
+/* USB related defines */
+#define BULK_IN_PIPE			1
+#define BULK_OUT_PIPE			2
+
+/* default autosuspend delay (mSec)*/
+#define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
+
+static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
+	"RX FCS Errors",
+	"RX Alignment Errors",
+	"Rx Fragment Errors",
+	"RX Jabber Errors",
+	"RX Undersize Frame Errors",
+	"RX Oversize Frame Errors",
+	"RX Dropped Frames",
+	"RX Unicast Byte Count",
+	"RX Broadcast Byte Count",
+	"RX Multicast Byte Count",
+	"RX Unicast Frames",
+	"RX Broadcast Frames",
+	"RX Multicast Frames",
+	"RX Pause Frames",
+	"RX 64 Byte Frames",
+	"RX 65 - 127 Byte Frames",
+	"RX 128 - 255 Byte Frames",
+	"RX 256 - 511 Bytes Frames",
+	"RX 512 - 1023 Byte Frames",
+	"RX 1024 - 1518 Byte Frames",
+	"RX Greater 1518 Byte Frames",
+	"EEE RX LPI Transitions",
+	"EEE RX LPI Time",
+	"TX FCS Errors",
+	"TX Excess Deferral Errors",
+	"TX Carrier Errors",
+	"TX Bad Byte Count",
+	"TX Single Collisions",
+	"TX Multiple Collisions",
+	"TX Excessive Collision",
+	"TX Late Collisions",
+	"TX Unicast Byte Count",
+	"TX Broadcast Byte Count",
+	"TX Multicast Byte Count",
+	"TX Unicast Frames",
+	"TX Broadcast Frames",
+	"TX Multicast Frames",
+	"TX Pause Frames",
+	"TX 64 Byte Frames",
+	"TX 65 - 127 Byte Frames",
+	"TX 128 - 255 Byte Frames",
+	"TX 256 - 511 Bytes Frames",
+	"TX 512 - 1023 Byte Frames",
+	"TX 1024 - 1518 Byte Frames",
+	"TX Greater 1518 Byte Frames",
+	"EEE TX LPI Transitions",
+	"EEE TX LPI Time",
+};
+
+struct lan78xx_statstage {
+	u32 rx_fcs_errors;
+	u32 rx_alignment_errors;
+	u32 rx_fragment_errors;
+	u32 rx_jabber_errors;
+	u32 rx_undersize_frame_errors;
+	u32 rx_oversize_frame_errors;
+	u32 rx_dropped_frames;
+	u32 rx_unicast_byte_count;
+	u32 rx_broadcast_byte_count;
+	u32 rx_multicast_byte_count;
+	u32 rx_unicast_frames;
+	u32 rx_broadcast_frames;
+	u32 rx_multicast_frames;
+	u32 rx_pause_frames;
+	u32 rx_64_byte_frames;
+	u32 rx_65_127_byte_frames;
+	u32 rx_128_255_byte_frames;
+	u32 rx_256_511_bytes_frames;
+	u32 rx_512_1023_byte_frames;
+	u32 rx_1024_1518_byte_frames;
+	u32 rx_greater_1518_byte_frames;
+	u32 eee_rx_lpi_transitions;
+	u32 eee_rx_lpi_time;
+	u32 tx_fcs_errors;
+	u32 tx_excess_deferral_errors;
+	u32 tx_carrier_errors;
+	u32 tx_bad_byte_count;
+	u32 tx_single_collisions;
+	u32 tx_multiple_collisions;
+	u32 tx_excessive_collision;
+	u32 tx_late_collisions;
+	u32 tx_unicast_byte_count;
+	u32 tx_broadcast_byte_count;
+	u32 tx_multicast_byte_count;
+	u32 tx_unicast_frames;
+	u32 tx_broadcast_frames;
+	u32 tx_multicast_frames;
+	u32 tx_pause_frames;
+	u32 tx_64_byte_frames;
+	u32 tx_65_127_byte_frames;
+	u32 tx_128_255_byte_frames;
+	u32 tx_256_511_bytes_frames;
+	u32 tx_512_1023_byte_frames;
+	u32 tx_1024_1518_byte_frames;
+	u32 tx_greater_1518_byte_frames;
+	u32 eee_tx_lpi_transitions;
+	u32 eee_tx_lpi_time;
+};
+
+struct lan78xx_net;
+
+struct lan78xx_priv {
+	struct lan78xx_net *dev;
+	u32 rfe_ctl;
+	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
+	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
+	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
+	struct mutex dataport_mutex; /* for dataport access */
+	spinlock_t rfe_ctl_lock; /* for rfe register access */
+	struct work_struct set_multicast;
+	struct work_struct set_vlan;
+	u32 wol;
+};
+
+enum skb_state {
+	illegal = 0,
+	tx_start,
+	tx_done,
+	rx_start,
+	rx_done,
+	rx_cleanup,
+	unlink_start
+};
+
+struct skb_data {		/* skb->cb is one of these */
+	struct urb *urb;
+	struct lan78xx_net *dev;
+	enum skb_state state;
+	size_t length;
+};
+
+struct usb_context {
+	struct usb_ctrlrequest req;
+	struct lan78xx_net *dev;
+};
+
+#define EVENT_TX_HALT			0
+#define EVENT_RX_HALT			1
+#define EVENT_RX_MEMORY			2
+#define EVENT_STS_SPLIT			3
+#define EVENT_LINK_RESET		4
+#define EVENT_RX_PAUSED			5
+#define EVENT_DEV_WAKING		6
+#define EVENT_DEV_ASLEEP		7
+#define EVENT_DEV_OPEN			8
+
+struct lan78xx_net {
+	struct net_device	*net;
+	struct usb_device	*udev;
+	struct usb_interface	*intf;
+	void			*driver_priv;
+
+	int			rx_qlen;
+	int			tx_qlen;
+	struct sk_buff_head	rxq;
+	struct sk_buff_head	txq;
+	struct sk_buff_head	done;
+	struct sk_buff_head	rxq_pause;
+	struct sk_buff_head	txq_pend;
+
+	struct tasklet_struct	bh;
+	struct delayed_work	wq;
+
+	struct usb_host_endpoint *ep_blkin;
+	struct usb_host_endpoint *ep_blkout;
+	struct usb_host_endpoint *ep_intr;
+
+	int			msg_enable;
+
+	struct urb		*urb_intr;
+	struct usb_anchor	deferred;
+
+	struct mutex		phy_mutex; /* for phy access */
+	unsigned		pipe_in, pipe_out, pipe_intr;
+
+	u32			hard_mtu;	/* count any extra framing */
+	size_t			rx_urb_size;	/* size for rx urbs */
+
+	unsigned long		flags;
+
+	wait_queue_head_t	*wait;
+	unsigned char		suspend_count;
+
+	unsigned		maxpacket;
+	struct timer_list	delay;
+
+	unsigned long		data[5];
+	struct mii_if_info	mii;
+
+	int			link_on;
+	u8			mdix_ctrl;
+};
+
+/* use ethtool to change the level for any given device */
+static int msg_level = -1;
+module_param(msg_level, int, 0);
+MODULE_PARM_DESC(msg_level, "Override default message level");
+
+static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
+{
+	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
+	int ret;
+
+	if (!buf)
+		return -ENOMEM;
+
+	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+			      USB_VENDOR_REQUEST_READ_REGISTER,
+			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
+	if (likely(ret >= 0)) {
+		le32_to_cpus(buf);
+		*data = *buf;
+	} else {
+		netdev_warn(dev->net,
+			    "Failed to read register index 0x%08x. ret = %d",
+			    index, ret);
+	}
+
+	kfree(buf);
+
+	return ret;
+}
+
+static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
+{
+	u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
+	int ret;
+
+	if (!buf)
+		return -ENOMEM;
+
+	*buf = data;
+	cpu_to_le32s(buf);
+
+	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+			      USB_VENDOR_REQUEST_WRITE_REGISTER,
+			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
+	if (unlikely(ret < 0)) {
+		netdev_warn(dev->net,
+			    "Failed to write register index 0x%08x. ret = %d",
+			    index, ret);
+	}
+
+	kfree(buf);
+
+	return ret;
+}
+
+static int lan78xx_read_stats(struct lan78xx_net *dev,
+			      struct lan78xx_statstage *data)
+{
+	int ret = 0;
+	int i;
+	struct lan78xx_statstage *stats;
+	u32 *src;
+	u32 *dst;
+
+	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
+	if (!stats)
+		return -ENOMEM;
+
+	ret = usb_control_msg(dev->udev,
+			      usb_rcvctrlpipe(dev->udev, 0),
+			      USB_VENDOR_REQUEST_GET_STATS,
+			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			      0,
+			      0,
+			      (void *)stats,
+			      sizeof(*stats),
+			      USB_CTRL_SET_TIMEOUT);
+	if (likely(ret >= 0)) {
+		src = (u32 *)stats;
+		dst = (u32 *)data;
+		for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
+			le32_to_cpus(&src[i]);
+			dst[i] = src[i];
+		}
+	} else {
+		netdev_warn(dev->net,
+			    "Failed to read stat ret = 0x%x", ret);
+	}
+
+	kfree(stats);
+
+	return ret;
+}
+
+/* Loop until the read is completed with timeout called with phy_mutex held */
+static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
+{
+	unsigned long start_time = jiffies;
+	u32 val;
+	int ret;
+
+	do {
+		ret = lan78xx_read_reg(dev, MII_ACC, &val);
+		if (unlikely(ret < 0))
+			return -EIO;
+
+		if (!(val & MII_ACC_MII_BUSY_))
+			return 0;
+	} while (!time_after(jiffies, start_time + HZ));
+
+	return -EIO;
+}
+
+static inline u32 mii_access(int id, int index, int read)
+{
+	u32 ret;
+
+	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
+	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
+	if (read)
+		ret |= MII_ACC_MII_READ_;
+	else
+		ret |= MII_ACC_MII_WRITE_;
+	ret |= MII_ACC_MII_BUSY_;
+
+	return ret;
+}
+
+static int lan78xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	u32 val, addr;
+	int ret;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&dev->phy_mutex);
+
+	/* confirm MII not busy */
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* set the address, index & direction (read from PHY) */
+	phy_id &= dev->mii.phy_id_mask;
+	idx &= dev->mii.reg_num_mask;
+	addr = mii_access(phy_id, idx, MII_READ);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	ret = lan78xx_read_reg(dev, MII_DATA, &val);
+
+	ret = (int)(val & 0xFFFF);
+
+done:
+	mutex_unlock(&dev->phy_mutex);
+	usb_autopm_put_interface(dev->intf);
+	return ret;
+}
+
+static void lan78xx_mdio_write(struct net_device *netdev, int phy_id,
+			       int idx, int regval)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	u32 val, addr;
+	int ret;
+
+	if (usb_autopm_get_interface(dev->intf) < 0)
+		return;
+
+	mutex_lock(&dev->phy_mutex);
+
+	/* confirm MII not busy */
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	val = regval;
+	ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+	/* set the address, index & direction (write to PHY) */
+	phy_id &= dev->mii.phy_id_mask;
+	idx &= dev->mii.reg_num_mask;
+	addr = mii_access(phy_id, idx, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+done:
+	mutex_unlock(&dev->phy_mutex);
+	usb_autopm_put_interface(dev->intf);
+}
+
+static void lan78xx_mmd_write(struct net_device *netdev, int phy_id,
+			      int mmddev, int mmdidx, int regval)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	u32 val, addr;
+	int ret;
+
+	if (usb_autopm_get_interface(dev->intf) < 0)
+		return;
+
+	mutex_lock(&dev->phy_mutex);
+
+	/* confirm MII not busy */
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	mmddev &= 0x1F;
+
+	/* set up device address for MMD */
+	ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* select register of MMD */
+	val = mmdidx;
+	ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* select register data for MMD */
+	val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
+	ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* write to MMD */
+	val = regval;
+	ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+done:
+	mutex_unlock(&dev->phy_mutex);
+	usb_autopm_put_interface(dev->intf);
+}
+
+static int lan78xx_mmd_read(struct net_device *netdev, int phy_id,
+			    int mmddev, int mmdidx)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	u32 val, addr;
+	int ret;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&dev->phy_mutex);
+
+	/* confirm MII not busy */
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* set up device address for MMD */
+	ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* select register of MMD */
+	val = mmdidx;
+	ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* select register data for MMD */
+	val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
+	ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* set the address, index & direction (read from PHY) */
+	phy_id &= dev->mii.phy_id_mask;
+	addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_READ);
+	ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+	ret = lan78xx_phy_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	/* read from MMD */
+	ret = lan78xx_read_reg(dev, MII_DATA, &val);
+
+	ret = (int)(val & 0xFFFF);
+
+done:
+	mutex_unlock(&dev->phy_mutex);
+	usb_autopm_put_interface(dev->intf);
+	return ret;
+}
+
+static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
+{
+	unsigned long start_time = jiffies;
+	u32 val;
+	int ret;
+
+	do {
+		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
+		if (unlikely(ret < 0))
+			return -EIO;
+
+		if (!(val & E2P_CMD_EPC_BUSY_) ||
+		    (val & E2P_CMD_EPC_TIMEOUT_))
+			break;
+		usleep_range(40, 100);
+	} while (!time_after(jiffies, start_time + HZ));
+
+	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
+		netdev_warn(dev->net, "EEPROM read operation timeout");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
+{
+	unsigned long start_time = jiffies;
+	u32 val;
+	int ret;
+
+	do {
+		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
+		if (unlikely(ret < 0))
+			return -EIO;
+
+		if (!(val & E2P_CMD_EPC_BUSY_))
+			return 0;
+
+		usleep_range(40, 100);
+	} while (!time_after(jiffies, start_time + HZ));
+
+	netdev_warn(dev->net, "EEPROM is busy");
+	return -EIO;
+}
+
+static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
+				   u32 length, u8 *data)
+{
+	u32 val;
+	int i, ret;
+
+	ret = lan78xx_eeprom_confirm_not_busy(dev);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < length; i++) {
+		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
+		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+		ret = lan78xx_write_reg(dev, E2P_CMD, val);
+		if (unlikely(ret < 0))
+			return -EIO;
+
+		ret = lan78xx_wait_eeprom(dev);
+		if (ret < 0)
+			return ret;
+
+		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
+		if (unlikely(ret < 0))
+			return -EIO;
+
+		data[i] = val & 0xFF;
+		offset++;
+	}
+
+	return 0;
+}
+
+static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
+			       u32 length, u8 *data)
+{
+	u8 sig;
+	int ret;
+
+	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
+	if ((ret == 0) && (sig == EEPROM_INDICATOR))
+		ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
+				    u32 length, u8 *data)
+{
+	u32 val;
+	int i, ret;
+
+	ret = lan78xx_eeprom_confirm_not_busy(dev);
+	if (ret)
+		return ret;
+
+	/* Issue write/erase enable command */
+	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
+	ret = lan78xx_write_reg(dev, E2P_CMD, val);
+	if (unlikely(ret < 0))
+		return -EIO;
+
+	ret = lan78xx_wait_eeprom(dev);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < length; i++) {
+		/* Fill data register */
+		val = data[i];
+		ret = lan78xx_write_reg(dev, E2P_DATA, val);
+		if (ret < 0)
+			return ret;
+
+		/* Send "write" command */
+		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
+		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+		ret = lan78xx_write_reg(dev, E2P_CMD, val);
+		if (ret < 0)
+			return ret;
+
+		ret = lan78xx_wait_eeprom(dev);
+		if (ret < 0)
+			return ret;
+
+		offset++;
+	}
+
+	return 0;
+}
+
+static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
+				u32 length, u8 *data)
+{
+	int i;
+	int ret;
+	u32 buf;
+	unsigned long timeout;
+
+	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+
+	if (buf & OTP_PWR_DN_PWRDN_N_) {
+		/* clear it and wait to be cleared */
+		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+
+		timeout = jiffies + HZ;
+		do {
+			usleep_range(1, 10);
+			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+			if (time_after(jiffies, timeout)) {
+				netdev_warn(dev->net,
+					    "timeout on OTP_PWR_DN");
+				return -EIO;
+			}
+		} while (buf & OTP_PWR_DN_PWRDN_N_);
+	}
+
+	for (i = 0; i < length; i++) {
+		ret = lan78xx_write_reg(dev, OTP_ADDR1,
+					((offset + i) >> 8) & OTP_ADDR1_15_11);
+		ret = lan78xx_write_reg(dev, OTP_ADDR2,
+					((offset + i) & OTP_ADDR2_10_3));
+
+		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+		timeout = jiffies + HZ;
+		do {
+			udelay(1);
+			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+			if (time_after(jiffies, timeout)) {
+				netdev_warn(dev->net,
+					    "timeout on OTP_STATUS");
+				return -EIO;
+			}
+		} while (buf & OTP_STATUS_BUSY_);
+
+		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+
+		data[i] = (u8)(buf & 0xFF);
+	}
+
+	return 0;
+}
+
+static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
+			    u32 length, u8 *data)
+{
+	u8 sig;
+	int ret;
+
+	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
+
+	if (ret == 0) {
+		if (sig == OTP_INDICATOR_1)
+			offset = offset;
+		else if (sig == OTP_INDICATOR_2)
+			offset += 0x100;
+		else
+			ret = -EINVAL;
+		ret = lan78xx_read_raw_otp(dev, offset, length, data);
+	}
+
+	return ret;
+}
+
+static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
+{
+	int i, ret;
+
+	for (i = 0; i < 100; i++) {
+		u32 dp_sel;
+
+		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
+		if (unlikely(ret < 0))
+			return -EIO;
+
+		if (dp_sel & DP_SEL_DPRDY_)
+			return 0;
+
+		usleep_range(40, 100);
+	}
+
+	netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
+
+	return -EIO;
+}
+
+static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
+				  u32 addr, u32 length, u32 *buf)
+{
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	u32 dp_sel;
+	int i, ret;
+
+	if (usb_autopm_get_interface(dev->intf) < 0)
+			return 0;
+
+	mutex_lock(&pdata->dataport_mutex);
+
+	ret = lan78xx_dataport_wait_not_busy(dev);
+	if (ret < 0)
+		goto done;
+
+	ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
+
+	dp_sel &= ~DP_SEL_RSEL_MASK_;
+	dp_sel |= ram_select;
+	ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
+
+	for (i = 0; i < length; i++) {
+		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
+
+		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
+
+		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
+
+		ret = lan78xx_dataport_wait_not_busy(dev);
+		if (ret < 0)
+			goto done;
+	}
+
+done:
+	mutex_unlock(&pdata->dataport_mutex);
+	usb_autopm_put_interface(dev->intf);
+
+	return ret;
+}
+
+static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
+				    int index, u8 addr[ETH_ALEN])
+{
+	u32	temp;
+
+	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
+		temp = addr[3];
+		temp = addr[2] | (temp << 8);
+		temp = addr[1] | (temp << 8);
+		temp = addr[0] | (temp << 8);
+		pdata->pfilter_table[index][1] = temp;
+		temp = addr[5];
+		temp = addr[4] | (temp << 8);
+		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
+		pdata->pfilter_table[index][0] = temp;
+	}
+}
+
+/* returns hash bit number for given MAC address */
+static inline u32 lan78xx_hash(char addr[ETH_ALEN])
+{
+	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
+}
+
+static void lan78xx_deferred_multicast_write(struct work_struct *param)
+{
+	struct lan78xx_priv *pdata =
+			container_of(param, struct lan78xx_priv, set_multicast);
+	struct lan78xx_net *dev = pdata->dev;
+	int i;
+	int ret;
+
+	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
+		  pdata->rfe_ctl);
+
+	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
+			       DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+
+	for (i = 1; i < NUM_OF_MAF; i++) {
+		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
+		ret = lan78xx_write_reg(dev, MAF_LO(i),
+					pdata->pfilter_table[i][1]);
+		ret = lan78xx_write_reg(dev, MAF_HI(i),
+					pdata->pfilter_table[i][0]);
+	}
+
+	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+}
+
+static void lan78xx_set_multicast(struct net_device *netdev)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
+			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
+
+	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
+			pdata->mchash_table[i] = 0;
+	/* pfilter_table[0] has own HW address */
+	for (i = 1; i < NUM_OF_MAF; i++) {
+			pdata->pfilter_table[i][0] =
+			pdata->pfilter_table[i][1] = 0;
+	}
+
+	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
+
+	if (dev->net->flags & IFF_PROMISC) {
+		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
+		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
+	} else {
+		if (dev->net->flags & IFF_ALLMULTI) {
+			netif_dbg(dev, drv, dev->net,
+				  "receive all multicast enabled");
+			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
+		}
+	}
+
+	if (netdev_mc_count(dev->net)) {
+		struct netdev_hw_addr *ha;
+		int i;
+
+		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
+
+		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
+
+		i = 1;
+		netdev_for_each_mc_addr(ha, netdev) {
+			/* set first 32 into Perfect Filter */
+			if (i < 33) {
+				lan78xx_set_addr_filter(pdata, i, ha->addr);
+			} else {
+				u32 bitnum = lan78xx_hash(ha->addr);
+
+				pdata->mchash_table[bitnum / 32] |=
+							(1 << (bitnum % 32));
+				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
+			}
+			i++;
+		}
+	}
+
+	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+	/* defer register writes to a sleepable context */
+	schedule_work(&pdata->set_multicast);
+}
+
+static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
+				      u16 lcladv, u16 rmtadv)
+{
+	u32 flow = 0, fct_flow = 0;
+	int ret;
+
+	u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+	if (cap & FLOW_CTRL_TX)
+		flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
+
+	if (cap & FLOW_CTRL_RX)
+		flow |= FLOW_CR_RX_FCEN_;
+
+	if (dev->udev->speed == USB_SPEED_SUPER)
+		fct_flow = 0x817;
+	else if (dev->udev->speed == USB_SPEED_HIGH)
+		fct_flow = 0x211;
+
+	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
+		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
+		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+
+	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
+
+	/* threshold value should be set before enabling flow */
+	ret = lan78xx_write_reg(dev, FLOW, flow);
+
+	return 0;
+}
+
+static int lan78xx_link_reset(struct lan78xx_net *dev)
+{
+	struct mii_if_info *mii = &dev->mii;
+	struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+	u16 ladv, radv;
+	int ret;
+	u32 buf;
+
+	/* clear PHY interrupt status */
+	/* VTSE PHY */
+	ret = lan78xx_mdio_read(dev->net, mii->phy_id, PHY_VTSE_INT_STS);
+	if (unlikely(ret < 0))
+		return -EIO;
+
+	/* clear LAN78xx interrupt status */
+	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
+	if (unlikely(ret < 0))
+		return -EIO;
+
+	if (!mii_link_ok(mii) && dev->link_on) {
+		dev->link_on = false;
+		netif_carrier_off(dev->net);
+
+		/* reset MAC */
+		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+		if (unlikely(ret < 0))
+			return -EIO;
+		buf |= MAC_CR_RST_;
+		ret = lan78xx_write_reg(dev, MAC_CR, buf);
+		if (unlikely(ret < 0))
+			return -EIO;
+	} else if (mii_link_ok(mii) && !dev->link_on) {
+		dev->link_on = true;
+
+		mii_check_media(mii, 1, 1);
+		mii_ethtool_gset(&dev->mii, &ecmd);
+
+		mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
+
+		if (dev->udev->speed == USB_SPEED_SUPER) {
+			if (ethtool_cmd_speed(&ecmd) == 1000) {
+				/* disable U2 */
+				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+				buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
+				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+				/* enable U1 */
+				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+				buf |= USB_CFG1_DEV_U1_INIT_EN_;
+				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+			} else {
+				/* enable U1 & U2 */
+				ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+				buf |= USB_CFG1_DEV_U2_INIT_EN_;
+				buf |= USB_CFG1_DEV_U1_INIT_EN_;
+				ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+			}
+		}
+
+		ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
+		if (unlikely(ladv < 0))
+			return -EIO;
+
+		radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
+		if (unlikely(radv < 0))
+			return -EIO;
+
+		netif_dbg(dev, link, dev->net,
+			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
+			  ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
+
+		ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
+		netif_carrier_on(dev->net);
+	}
+
+	return ret;
+}
+
+/* some work can't be done in tasklets, so we use keventd
+ *
+ * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
+ * but tasklet_schedule() doesn't.	hope the failure is rare.
+ */
+void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
+{
+	set_bit(work, &dev->flags);
+	if (!schedule_delayed_work(&dev->wq, 0))
+		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
+}
+
+static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
+{
+	u32 intdata;
+
+	if (urb->actual_length != 4) {
+		netdev_warn(dev->net,
+			    "unexpected urb length %d", urb->actual_length);
+		return;
+	}
+
+	memcpy(&intdata, urb->transfer_buffer, 4);
+	le32_to_cpus(&intdata);
+
+	if (intdata & INT_ENP_PHY_INT) {
+		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
+			  lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+	} else
+		netdev_warn(dev->net,
+			    "unexpected interrupt: 0x%08x\n", intdata);
+}
+
+static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
+{
+	return MAX_EEPROM_SIZE;
+}
+
+static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
+				      struct ethtool_eeprom *ee, u8 *data)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+
+	ee->magic = LAN78XX_EEPROM_MAGIC;
+
+	return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
+}
+
+static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
+				      struct ethtool_eeprom *ee, u8 *data)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+
+	/* Allow entire eeprom update only */
+	if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
+	    (ee->offset == 0) &&
+	    (ee->len == 512) &&
+	    (data[0] == EEPROM_INDICATOR))
+		return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
+		 (ee->offset == 0) &&
+		 (ee->len == 512) &&
+		 (data[0] == OTP_INDICATOR_1))
+		return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+
+	return -EINVAL;
+}
+
+static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
+				u8 *data)
+{
+	if (stringset == ETH_SS_STATS)
+		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
+}
+
+static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
+{
+	if (sset == ETH_SS_STATS)
+		return ARRAY_SIZE(lan78xx_gstrings);
+	else
+		return -EOPNOTSUPP;
+}
+
+static void lan78xx_get_stats(struct net_device *netdev,
+			      struct ethtool_stats *stats, u64 *data)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct lan78xx_statstage lan78xx_stat;
+	u32 *p;
+	int i;
+
+	if (usb_autopm_get_interface(dev->intf) < 0)
+		return;
+
+	if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
+		p = (u32 *)&lan78xx_stat;
+		for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
+			data[i] = p[i];
+	}
+
+	usb_autopm_put_interface(dev->intf);
+}
+
+static void lan78xx_get_wol(struct net_device *netdev,
+			    struct ethtool_wolinfo *wol)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	int ret;
+	u32 buf;
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+
+	if (usb_autopm_get_interface(dev->intf) < 0)
+			return;
+
+	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+	if (unlikely(ret < 0)) {
+		wol->supported = 0;
+		wol->wolopts = 0;
+	} else {
+		if (buf & USB_CFG_RMT_WKP_) {
+			wol->supported = WAKE_ALL;
+			wol->wolopts = pdata->wol;
+		} else {
+			wol->supported = 0;
+			wol->wolopts = 0;
+		}
+	}
+
+	usb_autopm_put_interface(dev->intf);
+}
+
+static int lan78xx_set_wol(struct net_device *netdev,
+			   struct ethtool_wolinfo *wol)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	int ret;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	pdata->wol = 0;
+	if (wol->wolopts & WAKE_UCAST)
+		pdata->wol |= WAKE_UCAST;
+	if (wol->wolopts & WAKE_MCAST)
+		pdata->wol |= WAKE_MCAST;
+	if (wol->wolopts & WAKE_BCAST)
+		pdata->wol |= WAKE_BCAST;
+	if (wol->wolopts & WAKE_MAGIC)
+		pdata->wol |= WAKE_MAGIC;
+	if (wol->wolopts & WAKE_PHY)
+		pdata->wol |= WAKE_PHY;
+	if (wol->wolopts & WAKE_ARP)
+		pdata->wol |= WAKE_ARP;
+
+	device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+
+	usb_autopm_put_interface(dev->intf);
+
+	return ret;
+}
+
+static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+	int ret;
+	u32 buf;
+	u32 adv, lpadv;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+	if (buf & MAC_CR_EEE_EN_) {
+		buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+				       PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT);
+		adv = mmd_eee_adv_to_ethtool_adv_t(buf);
+		buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+				       PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
+		lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
+
+		edata->eee_enabled = true;
+		edata->supported = true;
+		edata->eee_active = !!(adv & lpadv);
+		edata->advertised = adv;
+		edata->lp_advertised = lpadv;
+		edata->tx_lpi_enabled = true;
+		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
+		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
+		edata->tx_lpi_timer = buf;
+	} else {
+		buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+				       PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
+		lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
+
+		edata->eee_enabled = false;
+		edata->eee_active = false;
+		edata->supported = false;
+		edata->advertised = 0;
+		edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(lpadv);
+		edata->tx_lpi_enabled = false;
+		edata->tx_lpi_timer = 0;
+	}
+
+	usb_autopm_put_interface(dev->intf);
+
+	return 0;
+}
+
+static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+	int ret;
+	u32 buf;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	if (edata->eee_enabled) {
+		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+		buf |= MAC_CR_EEE_EN_;
+		ret = lan78xx_write_reg(dev, MAC_CR, buf);
+
+		buf = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+		lan78xx_mmd_write(dev->net, dev->mii.phy_id,
+				  PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT, buf);
+	} else {
+		ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+		buf &= ~MAC_CR_EEE_EN_;
+		ret = lan78xx_write_reg(dev, MAC_CR, buf);
+	}
+
+	usb_autopm_put_interface(dev->intf);
+
+	return 0;
+}
+
+static u32 lan78xx_get_link(struct net_device *net)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+
+	return mii_link_ok(&dev->mii);
+}
+
+int lan78xx_nway_reset(struct net_device *net)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+
+	if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+		return -EOPNOTSUPP;
+
+	return mii_nway_restart(&dev->mii);
+}
+
+static void lan78xx_get_drvinfo(struct net_device *net,
+				struct ethtool_drvinfo *info)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+
+	strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static u32 lan78xx_get_msglevel(struct net_device *net)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+
+	return dev->msg_enable;
+}
+
+static void lan78xx_set_msglevel(struct net_device *net, u32 level)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+
+	dev->msg_enable = level;
+}
+
+static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+	struct mii_if_info *mii = &dev->mii;
+	int ret;
+	int buf;
+
+	if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+		return -EOPNOTSUPP;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	ret = mii_ethtool_gset(&dev->mii, cmd);
+
+	mii->mdio_write(mii->dev, mii->phy_id,
+			PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
+	buf = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
+	mii->mdio_write(mii->dev, mii->phy_id,
+			PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
+
+	buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_;
+	if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) {
+		cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
+		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+	} else if (buf == PHY_EXT_MODE_CTRL_MDI_) {
+		cmd->eth_tp_mdix = ETH_TP_MDI;
+		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+	} else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) {
+		cmd->eth_tp_mdix = ETH_TP_MDI_X;
+		cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+	}
+
+	usb_autopm_put_interface(dev->intf);
+
+	return ret;
+}
+
+static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+	struct mii_if_info *mii = &dev->mii;
+	int ret = 0;
+	int temp;
+
+	if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+		return -EOPNOTSUPP;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		return ret;
+
+	if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
+		if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) {
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_GPIO_PAGE,
+					PHY_EXT_GPIO_PAGE_SPACE_1);
+			temp = mii->mdio_read(mii->dev, mii->phy_id,
+					PHY_EXT_MODE_CTRL);
+			temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_MODE_CTRL,
+					temp | PHY_EXT_MODE_CTRL_MDI_);
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_GPIO_PAGE,
+					PHY_EXT_GPIO_PAGE_SPACE_0);
+		} else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) {
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_GPIO_PAGE,
+					PHY_EXT_GPIO_PAGE_SPACE_1);
+			temp = mii->mdio_read(mii->dev, mii->phy_id,
+					PHY_EXT_MODE_CTRL);
+			temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_MODE_CTRL,
+					temp | PHY_EXT_MODE_CTRL_MDI_X_);
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_GPIO_PAGE,
+					PHY_EXT_GPIO_PAGE_SPACE_0);
+		} else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) {
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_GPIO_PAGE,
+					PHY_EXT_GPIO_PAGE_SPACE_1);
+			temp = mii->mdio_read(mii->dev, mii->phy_id,
+							PHY_EXT_MODE_CTRL);
+			temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_MODE_CTRL,
+					temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
+			mii->mdio_write(mii->dev, mii->phy_id,
+					PHY_EXT_GPIO_PAGE,
+					PHY_EXT_GPIO_PAGE_SPACE_0);
+		}
+	}
+
+	/* change speed & duplex */
+	ret = mii_ethtool_sset(&dev->mii, cmd);
+
+	if (!cmd->autoneg) {
+		/* force link down */
+		temp = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
+		mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR,
+				temp | BMCR_LOOPBACK);
+		mdelay(1);
+		mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, temp);
+	}
+
+	usb_autopm_put_interface(dev->intf);
+
+	return ret;
+}
+
+static const struct ethtool_ops lan78xx_ethtool_ops = {
+	.get_link	= lan78xx_get_link,
+	.nway_reset	= lan78xx_nway_reset,
+	.get_drvinfo	= lan78xx_get_drvinfo,
+	.get_msglevel	= lan78xx_get_msglevel,
+	.set_msglevel	= lan78xx_set_msglevel,
+	.get_settings	= lan78xx_get_settings,
+	.set_settings	= lan78xx_set_settings,
+	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
+	.get_eeprom	= lan78xx_ethtool_get_eeprom,
+	.set_eeprom	= lan78xx_ethtool_set_eeprom,
+	.get_ethtool_stats = lan78xx_get_stats,
+	.get_sset_count = lan78xx_get_sset_count,
+	.get_strings	= lan78xx_get_strings,
+	.get_wol	= lan78xx_get_wol,
+	.set_wol	= lan78xx_set_wol,
+	.get_eee	= lan78xx_get_eee,
+	.set_eee	= lan78xx_set_eee,
+};
+
+static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+
+	if (!netif_running(netdev))
+		return -EINVAL;
+
+	return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static void lan78xx_init_mac_address(struct lan78xx_net *dev)
+{
+	u32 addr_lo, addr_hi;
+	int ret;
+	u8 addr[6];
+
+	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+
+	addr[0] = addr_lo & 0xFF;
+	addr[1] = (addr_lo >> 8) & 0xFF;
+	addr[2] = (addr_lo >> 16) & 0xFF;
+	addr[3] = (addr_lo >> 24) & 0xFF;
+	addr[4] = addr_hi & 0xFF;
+	addr[5] = (addr_hi >> 8) & 0xFF;
+
+	if (!is_valid_ether_addr(addr)) {
+		/* reading mac address from EEPROM or OTP */
+		if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+					 addr) == 0) ||
+		    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+				      addr) == 0)) {
+			if (is_valid_ether_addr(addr)) {
+				/* eeprom values are valid so use them */
+				netif_dbg(dev, ifup, dev->net,
+					  "MAC address read from EEPROM");
+			} else {
+				/* generate random MAC */
+				random_ether_addr(addr);
+				netif_dbg(dev, ifup, dev->net,
+					  "MAC address set to random addr");
+			}
+
+			addr_lo = addr[0] | (addr[1] << 8) |
+				  (addr[2] << 16) | (addr[3] << 24);
+			addr_hi = addr[4] | (addr[5] << 8);
+
+			ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+			ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+		} else {
+			/* generate random MAC */
+			random_ether_addr(addr);
+			netif_dbg(dev, ifup, dev->net,
+				  "MAC address set to random addr");
+		}
+	}
+
+	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+
+	ether_addr_copy(dev->net->dev_addr, addr);
+}
+
+static void lan78xx_mii_init(struct lan78xx_net *dev)
+{
+	/* Initialize MII structure */
+	dev->mii.dev = dev->net;
+	dev->mii.mdio_read = lan78xx_mdio_read;
+	dev->mii.mdio_write = lan78xx_mdio_write;
+	dev->mii.phy_id_mask = 0x1f;
+	dev->mii.reg_num_mask = 0x1f;
+	dev->mii.phy_id = INTERNAL_PHY_ID;
+	dev->mii.supports_gmii = true;
+}
+
+static int lan78xx_phy_init(struct lan78xx_net *dev)
+{
+	int temp;
+	struct mii_if_info *mii = &dev->mii;
+
+	if ((!mii->mdio_write) || (!mii->mdio_read))
+		return -EOPNOTSUPP;
+
+	temp = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
+	temp |= ADVERTISE_ALL;
+	mii->mdio_write(mii->dev, mii->phy_id, MII_ADVERTISE,
+			temp | ADVERTISE_CSMA |
+			ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+	/* set to AUTOMDIX */
+	mii->mdio_write(mii->dev, mii->phy_id,
+			PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
+	temp = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
+	temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+	mii->mdio_write(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL,
+			temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
+	mii->mdio_write(mii->dev, mii->phy_id,
+			PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
+	dev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+	/* MAC doesn't support 1000HD */
+	temp = mii->mdio_read(mii->dev, mii->phy_id, MII_CTRL1000);
+	mii->mdio_write(mii->dev, mii->phy_id, MII_CTRL1000,
+			temp & ~ADVERTISE_1000HALF);
+
+	/* clear interrupt */
+	mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
+	mii->mdio_write(mii->dev, mii->phy_id, PHY_VTSE_INT_MASK,
+			PHY_VTSE_INT_MASK_MDINTPIN_EN_ |
+			PHY_VTSE_INT_MASK_LINK_CHANGE_);
+
+	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+
+	return 0;
+}
+
+static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
+{
+	int ret = 0;
+	u32 buf;
+	bool rxenabled;
+
+	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+
+	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
+
+	if (rxenabled) {
+		buf &= ~MAC_RX_RXEN_;
+		ret = lan78xx_write_reg(dev, MAC_RX, buf);
+	}
+
+	/* add 4 to size for FCS */
+	buf &= ~MAC_RX_MAX_SIZE_MASK_;
+	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
+
+	ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+	if (rxenabled) {
+		buf |= MAC_RX_RXEN_;
+		ret = lan78xx_write_reg(dev, MAC_RX, buf);
+	}
+
+	return 0;
+}
+
+static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
+{
+	struct sk_buff *skb;
+	unsigned long flags;
+	int count = 0;
+
+	spin_lock_irqsave(&q->lock, flags);
+	while (!skb_queue_empty(q)) {
+		struct skb_data	*entry;
+		struct urb *urb;
+		int ret;
+
+		skb_queue_walk(q, skb) {
+			entry = (struct skb_data *)skb->cb;
+			if (entry->state != unlink_start)
+				goto found;
+		}
+		break;
+found:
+		entry->state = unlink_start;
+		urb = entry->urb;
+
+		/* Get reference count of the URB to avoid it to be
+		 * freed during usb_unlink_urb, which may trigger
+		 * use-after-free problem inside usb_unlink_urb since
+		 * usb_unlink_urb is always racing with .complete
+		 * handler(include defer_bh).
+		 */
+		usb_get_urb(urb);
+		spin_unlock_irqrestore(&q->lock, flags);
+		/* during some PM-driven resume scenarios,
+		 * these (async) unlinks complete immediately
+		 */
+		ret = usb_unlink_urb(urb);
+		if (ret != -EINPROGRESS && ret != 0)
+			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
+		else
+			count++;
+		usb_put_urb(urb);
+		spin_lock_irqsave(&q->lock, flags);
+	}
+	spin_unlock_irqrestore(&q->lock, flags);
+	return count;
+}
+
+static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	int ll_mtu = new_mtu + netdev->hard_header_len;
+	int old_hard_mtu = dev->hard_mtu;
+	int old_rx_urb_size = dev->rx_urb_size;
+	int ret;
+
+	if (new_mtu > MAX_SINGLE_PACKET_SIZE)
+		return -EINVAL;
+
+	if (new_mtu <= 0)
+		return -EINVAL;
+	/* no second zero-length packet read wanted after mtu-sized packets */
+	if ((ll_mtu % dev->maxpacket) == 0)
+		return -EDOM;
+
+	ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+
+	netdev->mtu = new_mtu;
+
+	dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
+	if (dev->rx_urb_size == old_hard_mtu) {
+		dev->rx_urb_size = dev->hard_mtu;
+		if (dev->rx_urb_size > old_rx_urb_size) {
+			if (netif_running(dev->net)) {
+				unlink_urbs(dev, &dev->rxq);
+				tasklet_schedule(&dev->bh);
+			}
+		}
+	}
+
+	return 0;
+}
+
+int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+	u32 addr_lo, addr_hi;
+	int ret;
+
+	if (netif_running(netdev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
+	addr_lo = netdev->dev_addr[0] |
+		  netdev->dev_addr[1] << 8 |
+		  netdev->dev_addr[2] << 16 |
+		  netdev->dev_addr[3] << 24;
+	addr_hi = netdev->dev_addr[4] |
+		  netdev->dev_addr[5] << 8;
+
+	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+
+	return 0;
+}
+
+/* Enable or disable Rx checksum offload engine */
+static int lan78xx_set_features(struct net_device *netdev,
+				netdev_features_t features)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+	if (features & NETIF_F_RXCSUM) {
+		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
+		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
+	} else {
+		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
+		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
+	}
+
+	if (features & NETIF_F_HW_VLAN_CTAG_RX)
+		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
+	else
+		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
+
+	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+	return 0;
+}
+
+static void lan78xx_deferred_vlan_write(struct work_struct *param)
+{
+	struct lan78xx_priv *pdata =
+			container_of(param, struct lan78xx_priv, set_vlan);
+	struct lan78xx_net *dev = pdata->dev;
+
+	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
+			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
+}
+
+static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
+				   __be16 proto, u16 vid)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	u16 vid_bit_index;
+	u16 vid_dword_index;
+
+	vid_dword_index = (vid >> 5) & 0x7F;
+	vid_bit_index = vid & 0x1F;
+
+	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
+
+	/* defer register writes to a sleepable context */
+	schedule_work(&pdata->set_vlan);
+
+	return 0;
+}
+
+static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
+				    __be16 proto, u16 vid)
+{
+	struct lan78xx_net *dev = netdev_priv(netdev);
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	u16 vid_bit_index;
+	u16 vid_dword_index;
+
+	vid_dword_index = (vid >> 5) & 0x7F;
+	vid_bit_index = vid & 0x1F;
+
+	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
+
+	/* defer register writes to a sleepable context */
+	schedule_work(&pdata->set_vlan);
+
+	return 0;
+}
+
+static void lan78xx_init_ltm(struct lan78xx_net *dev)
+{
+	int ret;
+	u32 buf;
+	u32 regs[6] = { 0 };
+
+	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+	if (buf & USB_CFG1_LTM_ENABLE_) {
+		u8 temp[2];
+		/* Get values from EEPROM first */
+		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
+			if (temp[0] == 24) {
+				ret = lan78xx_read_raw_eeprom(dev,
+							      temp[1] * 2,
+							      24,
+							      (u8 *)regs);
+				if (ret < 0)
+					return;
+			}
+		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
+			if (temp[0] == 24) {
+				ret = lan78xx_read_raw_otp(dev,
+							   temp[1] * 2,
+							   24,
+							   (u8 *)regs);
+				if (ret < 0)
+					return;
+			}
+		}
+	}
+
+	lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
+	lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
+	lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
+	lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
+	lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
+	lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+}
+
+static int lan78xx_reset(struct lan78xx_net *dev)
+{
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	u32 buf;
+	int ret = 0;
+	unsigned long timeout;
+
+	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+	buf |= HW_CFG_LRST_;
+	ret = lan78xx_write_reg(dev, HW_CFG, buf);
+
+	timeout = jiffies + HZ;
+	do {
+		mdelay(1);
+		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+		if (time_after(jiffies, timeout)) {
+			netdev_warn(dev->net,
+				    "timeout on completion of LiteReset");
+			return -EIO;
+		}
+	} while (buf & HW_CFG_LRST_);
+
+	lan78xx_init_mac_address(dev);
+
+	/* Respond to the IN token with a NAK */
+	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+	buf |= USB_CFG_BIR_;
+	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+
+	/* Init LTM */
+	lan78xx_init_ltm(dev);
+
+	dev->net->hard_header_len += TX_OVERHEAD;
+	dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+	if (dev->udev->speed == USB_SPEED_SUPER) {
+		buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
+		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+		dev->rx_qlen = 4;
+		dev->tx_qlen = 4;
+	} else if (dev->udev->speed == USB_SPEED_HIGH) {
+		buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
+		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+		dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
+		dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
+	} else {
+		buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
+		dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+		dev->rx_qlen = 4;
+	}
+
+	ret = lan78xx_write_reg(dev, BURST_CAP, buf);
+	ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
+
+	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+	buf |= HW_CFG_MEF_;
+	ret = lan78xx_write_reg(dev, HW_CFG, buf);
+
+	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+	buf |= USB_CFG_BCE_;
+	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+
+	/* set FIFO sizes */
+	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
+	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
+
+	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
+	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
+
+	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
+	ret = lan78xx_write_reg(dev, FLOW, 0);
+	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
+
+	/* Don't need rfe_ctl_lock during initialisation */
+	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
+	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
+	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+	/* Enable or disable checksum offload engines */
+	lan78xx_set_features(dev->net, dev->net->features);
+
+	lan78xx_set_multicast(dev->net);
+
+	/* reset PHY */
+	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+	buf |= PMT_CTL_PHY_RST_;
+	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+	timeout = jiffies + HZ;
+	do {
+		mdelay(1);
+		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+		if (time_after(jiffies, timeout)) {
+			netdev_warn(dev->net, "timeout waiting for PHY Reset");
+			return -EIO;
+		}
+	} while (buf & PMT_CTL_PHY_RST_);
+
+	lan78xx_mii_init(dev);
+
+	ret = lan78xx_phy_init(dev);
+
+	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+
+	buf |= MAC_CR_GMII_EN_;
+	buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
+
+	ret = lan78xx_write_reg(dev, MAC_CR, buf);
+
+	/* enable on PHY */
+	if (buf & MAC_CR_EEE_EN_)
+		lan78xx_mmd_write(dev->net, dev->mii.phy_id, 0x07, 0x3C, 0x06);
+
+	/* enable PHY interrupts */
+	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+	buf |= INT_ENP_PHY_INT;
+	ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
+
+	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+	buf |= MAC_TX_TXEN_;
+	ret = lan78xx_write_reg(dev, MAC_TX, buf);
+
+	ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
+	buf |= FCT_TX_CTL_EN_;
+	ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
+
+	ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+
+	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+	buf |= MAC_RX_RXEN_;
+	ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+	ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
+	buf |= FCT_RX_CTL_EN_;
+	ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
+
+	if (!mii_nway_restart(&dev->mii))
+		netif_dbg(dev, link, dev->net, "autoneg initiated");
+
+	return 0;
+}
+
+static int lan78xx_open(struct net_device *net)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+	int ret;
+
+	ret = usb_autopm_get_interface(dev->intf);
+	if (ret < 0)
+		goto out;
+
+	ret = lan78xx_reset(dev);
+	if (ret < 0)
+		goto done;
+
+	/* for Link Check */
+	if (dev->urb_intr) {
+		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
+		if (ret < 0) {
+			netif_err(dev, ifup, dev->net,
+				  "intr submit %d\n", ret);
+			goto done;
+		}
+	}
+
+	set_bit(EVENT_DEV_OPEN, &dev->flags);
+
+	netif_start_queue(net);
+
+	dev->link_on = false;
+
+	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+done:
+	usb_autopm_put_interface(dev->intf);
+
+out:
+	return ret;
+}
+
+static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
+	DECLARE_WAITQUEUE(wait, current);
+	int temp;
+
+	/* ensure there are no more active urbs */
+	add_wait_queue(&unlink_wakeup, &wait);
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	dev->wait = &unlink_wakeup;
+	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
+
+	/* maybe wait for deletions to finish. */
+	while (!skb_queue_empty(&dev->rxq) &&
+	       !skb_queue_empty(&dev->txq) &&
+	       !skb_queue_empty(&dev->done)) {
+		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		netif_dbg(dev, ifdown, dev->net,
+			  "waited for %d urb completions\n", temp);
+	}
+	set_current_state(TASK_RUNNING);
+	dev->wait = NULL;
+	remove_wait_queue(&unlink_wakeup, &wait);
+}
+
+int lan78xx_stop(struct net_device *net)
+{
+	struct lan78xx_net		*dev = netdev_priv(net);
+
+	clear_bit(EVENT_DEV_OPEN, &dev->flags);
+	netif_stop_queue(net);
+
+	netif_info(dev, ifdown, dev->net,
+		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
+		   net->stats.rx_packets, net->stats.tx_packets,
+		   net->stats.rx_errors, net->stats.tx_errors);
+
+	lan78xx_terminate_urbs(dev);
+
+	usb_kill_urb(dev->urb_intr);
+
+	skb_queue_purge(&dev->rxq_pause);
+
+	/* deferred work (task, timer, softirq) must also stop.
+	 * can't flush_scheduled_work() until we drop rtnl (later),
+	 * else workers could deadlock; so make workers a NOP.
+	 */
+	dev->flags = 0;
+	cancel_delayed_work_sync(&dev->wq);
+	tasklet_kill(&dev->bh);
+
+	usb_autopm_put_interface(dev->intf);
+
+	return 0;
+}
+
+static int lan78xx_linearize(struct sk_buff *skb)
+{
+	return skb_linearize(skb);
+}
+
+static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
+				       struct sk_buff *skb, gfp_t flags)
+{
+	u32 tx_cmd_a, tx_cmd_b;
+
+	if (skb_headroom(skb) < TX_OVERHEAD) {
+		struct sk_buff *skb2;
+
+		skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
+		dev_kfree_skb_any(skb);
+		skb = skb2;
+		if (!skb)
+			return NULL;
+	}
+
+	if (lan78xx_linearize(skb) < 0)
+		return NULL;
+
+	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
+
+	tx_cmd_b = 0;
+	if (skb_is_gso(skb)) {
+		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
+
+		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
+
+		tx_cmd_a |= TX_CMD_A_LSO_;
+	}
+
+	if (skb_vlan_tag_present(skb)) {
+		tx_cmd_a |= TX_CMD_A_IVTG_;
+		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
+	}
+
+	skb_push(skb, 4);
+	cpu_to_le32s(&tx_cmd_b);
+	memcpy(skb->data, &tx_cmd_b, 4);
+
+	skb_push(skb, 4);
+	cpu_to_le32s(&tx_cmd_a);
+	memcpy(skb->data, &tx_cmd_a, 4);
+
+	return skb;
+}
+
+static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
+			       struct sk_buff_head *list, enum skb_state state)
+{
+	unsigned long flags;
+	enum skb_state old_state;
+	struct skb_data *entry = (struct skb_data *)skb->cb;
+
+	spin_lock_irqsave(&list->lock, flags);
+	old_state = entry->state;
+	entry->state = state;
+
+	__skb_unlink(skb, list);
+	spin_unlock(&list->lock);
+	spin_lock(&dev->done.lock);
+
+	__skb_queue_tail(&dev->done, skb);
+	if (skb_queue_len(&dev->done) == 1)
+		tasklet_schedule(&dev->bh);
+	spin_unlock_irqrestore(&dev->done.lock, flags);
+
+	return old_state;
+}
+
+static void tx_complete(struct urb *urb)
+{
+	struct sk_buff *skb = (struct sk_buff *)urb->context;
+	struct skb_data *entry = (struct skb_data *)skb->cb;
+	struct lan78xx_net *dev = entry->dev;
+
+	if (urb->status == 0) {
+		dev->net->stats.tx_packets++;
+		dev->net->stats.tx_bytes += entry->length;
+	} else {
+		dev->net->stats.tx_errors++;
+
+		switch (urb->status) {
+		case -EPIPE:
+			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+			break;
+
+		/* software-driven interface shutdown */
+		case -ECONNRESET:
+		case -ESHUTDOWN:
+			break;
+
+		case -EPROTO:
+		case -ETIME:
+		case -EILSEQ:
+			netif_stop_queue(dev->net);
+			break;
+		default:
+			netif_dbg(dev, tx_err, dev->net,
+				  "tx err %d\n", entry->urb->status);
+			break;
+		}
+	}
+
+	usb_autopm_put_interface_async(dev->intf);
+
+	defer_bh(dev, skb, &dev->txq, tx_done);
+}
+
+static void lan78xx_queue_skb(struct sk_buff_head *list,
+			      struct sk_buff *newsk, enum skb_state state)
+{
+	struct skb_data *entry = (struct skb_data *)newsk->cb;
+
+	__skb_queue_tail(list, newsk);
+	entry->state = state;
+}
+
+netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+	struct sk_buff *skb2 = NULL;
+
+	if (skb) {
+		skb_tx_timestamp(skb);
+		skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
+	}
+
+	if (skb2) {
+		skb_queue_tail(&dev->txq_pend, skb2);
+
+		if (skb_queue_len(&dev->txq_pend) > 10)
+			netif_stop_queue(net);
+	} else {
+		netif_dbg(dev, tx_err, dev->net,
+			  "lan78xx_tx_prep return NULL\n");
+		dev->net->stats.tx_errors++;
+		dev->net->stats.tx_dropped++;
+	}
+
+	tasklet_schedule(&dev->bh);
+
+	return NETDEV_TX_OK;
+}
+
+int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+	int tmp;
+	struct usb_host_interface *alt = NULL;
+	struct usb_host_endpoint *in = NULL, *out = NULL;
+	struct usb_host_endpoint *status = NULL;
+
+	for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
+		unsigned ep;
+
+		in = NULL;
+		out = NULL;
+		status = NULL;
+		alt = intf->altsetting + tmp;
+
+		for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
+			struct usb_host_endpoint *e;
+			int intr = 0;
+
+			e = alt->endpoint + ep;
+			switch (e->desc.bmAttributes) {
+			case USB_ENDPOINT_XFER_INT:
+				if (!usb_endpoint_dir_in(&e->desc))
+					continue;
+				intr = 1;
+				/* FALLTHROUGH */
+			case USB_ENDPOINT_XFER_BULK:
+				break;
+			default:
+				continue;
+			}
+			if (usb_endpoint_dir_in(&e->desc)) {
+				if (!intr && !in)
+					in = e;
+				else if (intr && !status)
+					status = e;
+			} else {
+				if (!out)
+					out = e;
+			}
+		}
+		if (in && out)
+			break;
+	}
+	if (!alt || !in || !out)
+		return -EINVAL;
+
+	dev->pipe_in = usb_rcvbulkpipe(dev->udev,
+				       in->desc.bEndpointAddress &
+				       USB_ENDPOINT_NUMBER_MASK);
+	dev->pipe_out = usb_sndbulkpipe(dev->udev,
+					out->desc.bEndpointAddress &
+					USB_ENDPOINT_NUMBER_MASK);
+	dev->ep_intr = status;
+
+	return 0;
+}
+
+static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+	struct lan78xx_priv *pdata = NULL;
+	int ret;
+	int i;
+
+	ret = lan78xx_get_endpoints(dev, intf);
+
+	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
+
+	pdata = (struct lan78xx_priv *)(dev->data[0]);
+	if (!pdata) {
+		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
+		return -ENOMEM;
+	}
+
+	pdata->dev = dev;
+
+	spin_lock_init(&pdata->rfe_ctl_lock);
+	mutex_init(&pdata->dataport_mutex);
+
+	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
+
+	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
+		pdata->vlan_table[i] = 0;
+
+	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
+
+	dev->net->features = 0;
+
+	if (DEFAULT_TX_CSUM_ENABLE)
+		dev->net->features |= NETIF_F_HW_CSUM;
+
+	if (DEFAULT_RX_CSUM_ENABLE)
+		dev->net->features |= NETIF_F_RXCSUM;
+
+	if (DEFAULT_TSO_CSUM_ENABLE)
+		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
+
+	dev->net->hw_features = dev->net->features;
+
+	/* Init all registers */
+	ret = lan78xx_reset(dev);
+
+	dev->net->flags |= IFF_MULTICAST;
+
+	pdata->wol = WAKE_MAGIC;
+
+	return 0;
+}
+
+static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+
+	if (pdata) {
+		netif_dbg(dev, ifdown, dev->net, "free pdata");
+		kfree(pdata);
+		pdata = NULL;
+		dev->data[0] = 0;
+	}
+}
+
+static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
+				    struct sk_buff *skb,
+				    u32 rx_cmd_a, u32 rx_cmd_b)
+{
+	if (!(dev->net->features & NETIF_F_RXCSUM) ||
+	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
+		skb->ip_summed = CHECKSUM_NONE;
+	} else {
+		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
+		skb->ip_summed = CHECKSUM_COMPLETE;
+	}
+}
+
+void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+	int		status;
+
+	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
+		skb_queue_tail(&dev->rxq_pause, skb);
+		return;
+	}
+
+	skb->protocol = eth_type_trans(skb, dev->net);
+	dev->net->stats.rx_packets++;
+	dev->net->stats.rx_bytes += skb->len;
+
+	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
+		  skb->len + sizeof(struct ethhdr), skb->protocol);
+	memset(skb->cb, 0, sizeof(struct skb_data));
+
+	if (skb_defer_rx_timestamp(skb))
+		return;
+
+	status = netif_rx(skb);
+	if (status != NET_RX_SUCCESS)
+		netif_dbg(dev, rx_err, dev->net,
+			  "netif_rx status %d\n", status);
+}
+
+static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+	if (skb->len < dev->net->hard_header_len)
+		return 0;
+
+	while (skb->len > 0) {
+		u32 rx_cmd_a, rx_cmd_b, align_count, size;
+		u16 rx_cmd_c;
+		struct sk_buff *skb2;
+		unsigned char *packet;
+
+		memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
+		le32_to_cpus(&rx_cmd_a);
+		skb_pull(skb, sizeof(rx_cmd_a));
+
+		memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
+		le32_to_cpus(&rx_cmd_b);
+		skb_pull(skb, sizeof(rx_cmd_b));
+
+		memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
+		le16_to_cpus(&rx_cmd_c);
+		skb_pull(skb, sizeof(rx_cmd_c));
+
+		packet = skb->data;
+
+		/* get the packet length */
+		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
+		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+
+		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
+			netif_dbg(dev, rx_err, dev->net,
+				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
+		} else {
+			/* last frame in this batch */
+			if (skb->len == size) {
+				lan78xx_rx_csum_offload(dev, skb,
+							rx_cmd_a, rx_cmd_b);
+
+				skb_trim(skb, skb->len - 4); /* remove fcs */
+				skb->truesize = size + sizeof(struct sk_buff);
+
+				return 1;
+			}
+
+			skb2 = skb_clone(skb, GFP_ATOMIC);
+			if (unlikely(!skb2)) {
+				netdev_warn(dev->net, "Error allocating skb");
+				return 0;
+			}
+
+			skb2->len = size;
+			skb2->data = packet;
+			skb_set_tail_pointer(skb2, size);
+
+			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
+
+			skb_trim(skb2, skb2->len - 4); /* remove fcs */
+			skb2->truesize = size + sizeof(struct sk_buff);
+
+			lan78xx_skb_return(dev, skb2);
+		}
+
+		skb_pull(skb, size);
+
+		/* padding bytes before the next frame starts */
+		if (skb->len)
+			skb_pull(skb, align_count);
+	}
+
+	if (unlikely(skb->len < 0)) {
+		netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
+		return 0;
+	}
+
+	return 1;
+}
+
+static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+	if (!lan78xx_rx(dev, skb)) {
+		dev->net->stats.rx_errors++;
+		goto done;
+	}
+
+	if (skb->len) {
+		lan78xx_skb_return(dev, skb);
+		return;
+	}
+
+	netif_dbg(dev, rx_err, dev->net, "drop\n");
+	dev->net->stats.rx_errors++;
+done:
+	skb_queue_tail(&dev->done, skb);
+}
+
+static void rx_complete(struct urb *urb);
+
+static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
+{
+	struct sk_buff *skb;
+	struct skb_data *entry;
+	unsigned long lockflags;
+	size_t size = dev->rx_urb_size;
+	int ret = 0;
+
+	skb = netdev_alloc_skb_ip_align(dev->net, size);
+	if (!skb) {
+		usb_free_urb(urb);
+		return -ENOMEM;
+	}
+
+	entry = (struct skb_data *)skb->cb;
+	entry->urb = urb;
+	entry->dev = dev;
+	entry->length = 0;
+
+	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
+			  skb->data, size, rx_complete, skb);
+
+	spin_lock_irqsave(&dev->rxq.lock, lockflags);
+
+	if (netif_device_present(dev->net) &&
+	    netif_running(dev->net) &&
+	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
+	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+		ret = usb_submit_urb(urb, GFP_ATOMIC);
+		switch (ret) {
+		case 0:
+			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
+			break;
+		case -EPIPE:
+			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+			break;
+		case -ENODEV:
+			netif_dbg(dev, ifdown, dev->net, "device gone\n");
+			netif_device_detach(dev->net);
+			break;
+		case -EHOSTUNREACH:
+			ret = -ENOLINK;
+			break;
+		default:
+			netif_dbg(dev, rx_err, dev->net,
+				  "rx submit, %d\n", ret);
+			tasklet_schedule(&dev->bh);
+		}
+	} else {
+		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
+		ret = -ENOLINK;
+	}
+	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
+	if (ret) {
+		dev_kfree_skb_any(skb);
+		usb_free_urb(urb);
+	}
+	return ret;
+}
+
+static void rx_complete(struct urb *urb)
+{
+	struct sk_buff	*skb = (struct sk_buff *)urb->context;
+	struct skb_data	*entry = (struct skb_data *)skb->cb;
+	struct lan78xx_net *dev = entry->dev;
+	int urb_status = urb->status;
+	enum skb_state state;
+
+	skb_put(skb, urb->actual_length);
+	state = rx_done;
+	entry->urb = NULL;
+
+	switch (urb_status) {
+	case 0:
+		if (skb->len < dev->net->hard_header_len) {
+			state = rx_cleanup;
+			dev->net->stats.rx_errors++;
+			dev->net->stats.rx_length_errors++;
+			netif_dbg(dev, rx_err, dev->net,
+				  "rx length %d\n", skb->len);
+		}
+		usb_mark_last_busy(dev->udev);
+		break;
+	case -EPIPE:
+		dev->net->stats.rx_errors++;
+		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+		/* FALLTHROUGH */
+	case -ECONNRESET:				/* async unlink */
+	case -ESHUTDOWN:				/* hardware gone */
+		netif_dbg(dev, ifdown, dev->net,
+			  "rx shutdown, code %d\n", urb_status);
+		state = rx_cleanup;
+		entry->urb = urb;
+		urb = NULL;
+		break;
+	case -EPROTO:
+	case -ETIME:
+	case -EILSEQ:
+		dev->net->stats.rx_errors++;
+		state = rx_cleanup;
+		entry->urb = urb;
+		urb = NULL;
+		break;
+
+	/* data overrun ... flush fifo? */
+	case -EOVERFLOW:
+		dev->net->stats.rx_over_errors++;
+		/* FALLTHROUGH */
+
+	default:
+		state = rx_cleanup;
+		dev->net->stats.rx_errors++;
+		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
+		break;
+	}
+
+	state = defer_bh(dev, skb, &dev->rxq, state);
+
+	if (urb) {
+		if (netif_running(dev->net) &&
+		    !test_bit(EVENT_RX_HALT, &dev->flags) &&
+		    state != unlink_start) {
+			rx_submit(dev, urb, GFP_ATOMIC);
+			return;
+		}
+		usb_free_urb(urb);
+	}
+	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
+}
+
+static void lan78xx_tx_bh(struct lan78xx_net *dev)
+{
+	int length;
+	struct urb *urb = NULL;
+	struct skb_data *entry;
+	unsigned long flags;
+	struct sk_buff_head *tqp = &dev->txq_pend;
+	struct sk_buff *skb, *skb2;
+	int ret;
+	int count, pos;
+	int skb_totallen, pkt_cnt;
+
+	skb_totallen = 0;
+	pkt_cnt = 0;
+	for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
+		if (skb_is_gso(skb)) {
+			if (pkt_cnt) {
+				/* handle previous packets first */
+				break;
+			}
+			length = skb->len;
+			skb2 = skb_dequeue(tqp);
+			goto gso_skb;
+		}
+
+		if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
+			break;
+		skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
+		pkt_cnt++;
+	}
+
+	/* copy to a single skb */
+	skb = alloc_skb(skb_totallen, GFP_ATOMIC);
+	if (!skb)
+		goto drop;
+
+	skb_put(skb, skb_totallen);
+
+	for (count = pos = 0; count < pkt_cnt; count++) {
+		skb2 = skb_dequeue(tqp);
+		if (skb2) {
+			memcpy(skb->data + pos, skb2->data, skb2->len);
+			pos += roundup(skb2->len, sizeof(u32));
+			dev_kfree_skb(skb2);
+		}
+	}
+
+	length = skb_totallen;
+
+gso_skb:
+	urb = usb_alloc_urb(0, GFP_ATOMIC);
+	if (!urb) {
+		netif_dbg(dev, tx_err, dev->net, "no urb\n");
+		goto drop;
+	}
+
+	entry = (struct skb_data *)skb->cb;
+	entry->urb = urb;
+	entry->dev = dev;
+	entry->length = length;
+
+	spin_lock_irqsave(&dev->txq.lock, flags);
+	ret = usb_autopm_get_interface_async(dev->intf);
+	if (ret < 0) {
+		spin_unlock_irqrestore(&dev->txq.lock, flags);
+		goto drop;
+	}
+
+	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
+			  skb->data, skb->len, tx_complete, skb);
+
+	if (length % dev->maxpacket == 0) {
+		/* send USB_ZERO_PACKET */
+		urb->transfer_flags |= URB_ZERO_PACKET;
+	}
+
+#ifdef CONFIG_PM
+	/* if this triggers the device is still a sleep */
+	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+		/* transmission will be done in resume */
+		usb_anchor_urb(urb, &dev->deferred);
+		/* no use to process more packets */
+		netif_stop_queue(dev->net);
+		usb_put_urb(urb);
+		spin_unlock_irqrestore(&dev->txq.lock, flags);
+		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
+		return;
+	}
+#endif
+
+	ret = usb_submit_urb(urb, GFP_ATOMIC);
+	switch (ret) {
+	case 0:
+		dev->net->trans_start = jiffies;
+		lan78xx_queue_skb(&dev->txq, skb, tx_start);
+		if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
+			netif_stop_queue(dev->net);
+		break;
+	case -EPIPE:
+		netif_stop_queue(dev->net);
+		lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+		usb_autopm_put_interface_async(dev->intf);
+		break;
+	default:
+		usb_autopm_put_interface_async(dev->intf);
+		netif_dbg(dev, tx_err, dev->net,
+			  "tx: submit urb err %d\n", ret);
+		break;
+	}
+
+	spin_unlock_irqrestore(&dev->txq.lock, flags);
+
+	if (ret) {
+		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
+drop:
+		dev->net->stats.tx_dropped++;
+		if (skb)
+			dev_kfree_skb_any(skb);
+		usb_free_urb(urb);
+	} else
+		netif_dbg(dev, tx_queued, dev->net,
+			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
+}
+
+static void lan78xx_rx_bh(struct lan78xx_net *dev)
+{
+	struct urb *urb;
+	int i;
+
+	if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
+		for (i = 0; i < 10; i++) {
+			if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
+				break;
+			urb = usb_alloc_urb(0, GFP_ATOMIC);
+			if (urb)
+				if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
+					return;
+		}
+
+		if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
+			tasklet_schedule(&dev->bh);
+	}
+	if (skb_queue_len(&dev->txq) < dev->tx_qlen)
+		netif_wake_queue(dev->net);
+}
+
+static void lan78xx_bh(unsigned long param)
+{
+	struct lan78xx_net *dev = (struct lan78xx_net *)param;
+	struct sk_buff *skb;
+	struct skb_data *entry;
+
+	while ((skb = skb_dequeue(&dev->done))) {
+		entry = (struct skb_data *)(skb->cb);
+		switch (entry->state) {
+		case rx_done:
+			entry->state = rx_cleanup;
+			rx_process(dev, skb);
+			continue;
+		case tx_done:
+			usb_free_urb(entry->urb);
+			dev_kfree_skb(skb);
+			continue;
+		case rx_cleanup:
+			usb_free_urb(entry->urb);
+			dev_kfree_skb(skb);
+			continue;
+		default:
+			netdev_dbg(dev->net, "skb state %d\n", entry->state);
+			return;
+		}
+	}
+
+	if (netif_device_present(dev->net) && netif_running(dev->net)) {
+		if (!skb_queue_empty(&dev->txq_pend))
+			lan78xx_tx_bh(dev);
+
+		if (!timer_pending(&dev->delay) &&
+		    !test_bit(EVENT_RX_HALT, &dev->flags))
+			lan78xx_rx_bh(dev);
+	}
+}
+
+static void lan78xx_delayedwork(struct work_struct *work)
+{
+	int status;
+	struct lan78xx_net *dev;
+
+	dev = container_of(work, struct lan78xx_net, wq.work);
+
+	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
+		unlink_urbs(dev, &dev->txq);
+		status = usb_autopm_get_interface(dev->intf);
+		if (status < 0)
+			goto fail_pipe;
+		status = usb_clear_halt(dev->udev, dev->pipe_out);
+		usb_autopm_put_interface(dev->intf);
+		if (status < 0 &&
+		    status != -EPIPE &&
+		    status != -ESHUTDOWN) {
+			if (netif_msg_tx_err(dev))
+fail_pipe:
+				netdev_err(dev->net,
+					   "can't clear tx halt, status %d\n",
+					   status);
+		} else {
+			clear_bit(EVENT_TX_HALT, &dev->flags);
+			if (status != -ESHUTDOWN)
+				netif_wake_queue(dev->net);
+		}
+	}
+	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
+		unlink_urbs(dev, &dev->rxq);
+		status = usb_autopm_get_interface(dev->intf);
+		if (status < 0)
+				goto fail_halt;
+		status = usb_clear_halt(dev->udev, dev->pipe_in);
+		usb_autopm_put_interface(dev->intf);
+		if (status < 0 &&
+		    status != -EPIPE &&
+		    status != -ESHUTDOWN) {
+			if (netif_msg_rx_err(dev))
+fail_halt:
+				netdev_err(dev->net,
+					   "can't clear rx halt, status %d\n",
+					   status);
+		} else {
+			clear_bit(EVENT_RX_HALT, &dev->flags);
+			tasklet_schedule(&dev->bh);
+		}
+	}
+
+	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
+		int ret = 0;
+
+		clear_bit(EVENT_LINK_RESET, &dev->flags);
+		status = usb_autopm_get_interface(dev->intf);
+		if (status < 0)
+			goto skip_reset;
+		if (lan78xx_link_reset(dev) < 0) {
+			usb_autopm_put_interface(dev->intf);
+skip_reset:
+			netdev_info(dev->net, "link reset failed (%d)\n",
+				    ret);
+		} else {
+			usb_autopm_put_interface(dev->intf);
+		}
+	}
+}
+
+static void intr_complete(struct urb *urb)
+{
+	struct lan78xx_net *dev = urb->context;
+	int status = urb->status;
+
+	switch (status) {
+	/* success */
+	case 0:
+		lan78xx_status(dev, urb);
+		break;
+
+	/* software-driven interface shutdown */
+	case -ENOENT:			/* urb killed */
+	case -ESHUTDOWN:		/* hardware gone */
+		netif_dbg(dev, ifdown, dev->net,
+			  "intr shutdown, code %d\n", status);
+		return;
+
+	/* NOTE:  not throttling like RX/TX, since this endpoint
+	 * already polls infrequently
+	 */
+	default:
+		netdev_dbg(dev->net, "intr status %d\n", status);
+		break;
+	}
+
+	if (!netif_running(dev->net))
+		return;
+
+	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
+	status = usb_submit_urb(urb, GFP_ATOMIC);
+	if (status != 0)
+		netif_err(dev, timer, dev->net,
+			  "intr resubmit --> %d\n", status);
+}
+
+static void lan78xx_disconnect(struct usb_interface *intf)
+{
+	struct lan78xx_net		*dev;
+	struct usb_device		*udev;
+	struct net_device		*net;
+
+	dev = usb_get_intfdata(intf);
+	usb_set_intfdata(intf, NULL);
+	if (!dev)
+		return;
+
+	udev = interface_to_usbdev(intf);
+
+	net = dev->net;
+	unregister_netdev(net);
+
+	cancel_delayed_work_sync(&dev->wq);
+
+	usb_scuttle_anchored_urbs(&dev->deferred);
+
+	lan78xx_unbind(dev, intf);
+
+	usb_kill_urb(dev->urb_intr);
+	usb_free_urb(dev->urb_intr);
+
+	free_netdev(net);
+	usb_put_dev(udev);
+}
+
+void lan78xx_tx_timeout(struct net_device *net)
+{
+	struct lan78xx_net *dev = netdev_priv(net);
+
+	unlink_urbs(dev, &dev->txq);
+	tasklet_schedule(&dev->bh);
+}
+
+static const struct net_device_ops lan78xx_netdev_ops = {
+	.ndo_open		= lan78xx_open,
+	.ndo_stop		= lan78xx_stop,
+	.ndo_start_xmit		= lan78xx_start_xmit,
+	.ndo_tx_timeout		= lan78xx_tx_timeout,
+	.ndo_change_mtu		= lan78xx_change_mtu,
+	.ndo_set_mac_address	= lan78xx_set_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= lan78xx_ioctl,
+	.ndo_set_rx_mode	= lan78xx_set_multicast,
+	.ndo_set_features	= lan78xx_set_features,
+	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
+};
+
+static int lan78xx_probe(struct usb_interface *intf,
+			 const struct usb_device_id *id)
+{
+	struct lan78xx_net *dev;
+	struct net_device *netdev;
+	struct usb_device *udev;
+	int ret;
+	unsigned maxp;
+	unsigned period;
+	u8 *buf = NULL;
+
+	udev = interface_to_usbdev(intf);
+	udev = usb_get_dev(udev);
+
+	ret = -ENOMEM;
+	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
+	if (!netdev) {
+			dev_err(&intf->dev, "Error: OOM\n");
+			goto out1;
+	}
+
+	/* netdev_printk() needs this */
+	SET_NETDEV_DEV(netdev, &intf->dev);
+
+	dev = netdev_priv(netdev);
+	dev->udev = udev;
+	dev->intf = intf;
+	dev->net = netdev;
+	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
+					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
+
+	skb_queue_head_init(&dev->rxq);
+	skb_queue_head_init(&dev->txq);
+	skb_queue_head_init(&dev->done);
+	skb_queue_head_init(&dev->rxq_pause);
+	skb_queue_head_init(&dev->txq_pend);
+	mutex_init(&dev->phy_mutex);
+
+	tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
+	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
+	init_usb_anchor(&dev->deferred);
+
+	netdev->netdev_ops = &lan78xx_netdev_ops;
+	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
+	netdev->ethtool_ops = &lan78xx_ethtool_ops;
+
+	ret = lan78xx_bind(dev, intf);
+	if (ret < 0)
+		goto out2;
+	strcpy(netdev->name, "eth%d");
+
+	if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
+		netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
+
+	dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
+	dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
+	dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
+
+	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
+	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
+
+	dev->pipe_intr = usb_rcvintpipe(dev->udev,
+					dev->ep_intr->desc.bEndpointAddress &
+					USB_ENDPOINT_NUMBER_MASK);
+	period = dev->ep_intr->desc.bInterval;
+
+	maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
+	buf = kmalloc(maxp, GFP_KERNEL);
+	if (buf) {
+		dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
+		if (!dev->urb_intr) {
+			kfree(buf);
+			goto out3;
+		} else {
+			usb_fill_int_urb(dev->urb_intr, dev->udev,
+					 dev->pipe_intr, buf, maxp,
+					 intr_complete, dev, period);
+		}
+	}
+
+	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+
+	/* driver requires remote-wakeup capability during autosuspend. */
+	intf->needs_remote_wakeup = 1;
+
+	ret = register_netdev(netdev);
+	if (ret != 0) {
+		netif_err(dev, probe, netdev, "couldn't register the device\n");
+		goto out2;
+	}
+
+	usb_set_intfdata(intf, dev);
+
+	ret = device_set_wakeup_enable(&udev->dev, true);
+
+	 /* Default delay of 2sec has more overhead than advantage.
+	  * Set to 10sec as default.
+	  */
+	pm_runtime_set_autosuspend_delay(&udev->dev,
+					 DEFAULT_AUTOSUSPEND_DELAY);
+
+	return 0;
+
+out3:
+	lan78xx_unbind(dev, intf);
+out2:
+	free_netdev(netdev);
+out1:
+	usb_put_dev(udev);
+
+	return ret;
+}
+
+static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
+{
+	const u16 crc16poly = 0x8005;
+	int i;
+	u16 bit, crc, msb;
+	u8 data;
+
+	crc = 0xFFFF;
+	for (i = 0; i < len; i++) {
+		data = *buf++;
+		for (bit = 0; bit < 8; bit++) {
+			msb = crc >> 15;
+			crc <<= 1;
+
+			if (msb ^ (u16)(data & 1)) {
+				crc ^= crc16poly;
+				crc |= (u16)0x0001U;
+			}
+			data >>= 1;
+		}
+	}
+
+	return crc;
+}
+
+static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
+{
+	u32 buf;
+	int ret;
+	int mask_index;
+	u16 crc;
+	u32 temp_wucsr;
+	u32 temp_pmt_ctl;
+	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
+	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
+	const u8 arp_type[2] = { 0x08, 0x06 };
+
+	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+	buf &= ~MAC_TX_TXEN_;
+	ret = lan78xx_write_reg(dev, MAC_TX, buf);
+	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+	buf &= ~MAC_RX_RXEN_;
+	ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+	ret = lan78xx_write_reg(dev, WUCSR, 0);
+	ret = lan78xx_write_reg(dev, WUCSR2, 0);
+	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+	temp_wucsr = 0;
+
+	temp_pmt_ctl = 0;
+	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
+	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
+	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
+
+	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
+		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
+
+	mask_index = 0;
+	if (wol & WAKE_PHY) {
+		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
+
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+	}
+	if (wol & WAKE_MAGIC) {
+		temp_wucsr |= WUCSR_MPEN_;
+
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
+	}
+	if (wol & WAKE_BCAST) {
+		temp_wucsr |= WUCSR_BCST_EN_;
+
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+	}
+	if (wol & WAKE_MCAST) {
+		temp_wucsr |= WUCSR_WAKE_EN_;
+
+		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
+		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
+		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+					WUF_CFGX_EN_ |
+					WUF_CFGX_TYPE_MCAST_ |
+					(0 << WUF_CFGX_OFFSET_SHIFT_) |
+					(crc & WUF_CFGX_CRC16_MASK_));
+
+		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
+		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+		mask_index++;
+
+		/* for IPv6 Multicast */
+		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
+		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+					WUF_CFGX_EN_ |
+					WUF_CFGX_TYPE_MCAST_ |
+					(0 << WUF_CFGX_OFFSET_SHIFT_) |
+					(crc & WUF_CFGX_CRC16_MASK_));
+
+		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
+		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+		mask_index++;
+
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+	}
+	if (wol & WAKE_UCAST) {
+		temp_wucsr |= WUCSR_PFDA_EN_;
+
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+	}
+	if (wol & WAKE_ARP) {
+		temp_wucsr |= WUCSR_WAKE_EN_;
+
+		/* set WUF_CFG & WUF_MASK
+		 * for packettype (offset 12,13) = ARP (0x0806)
+		 */
+		crc = lan78xx_wakeframe_crc16(arp_type, 2);
+		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+					WUF_CFGX_EN_ |
+					WUF_CFGX_TYPE_ALL_ |
+					(0 << WUF_CFGX_OFFSET_SHIFT_) |
+					(crc & WUF_CFGX_CRC16_MASK_));
+
+		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
+		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+		mask_index++;
+
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+	}
+
+	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
+
+	/* when multiple WOL bits are set */
+	if (hweight_long((unsigned long)wol) > 1) {
+		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+	}
+	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
+
+	/* clear WUPS */
+	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+	buf |= PMT_CTL_WUPS_MASK_;
+	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+	buf |= MAC_RX_RXEN_;
+	ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+	return 0;
+}
+
+int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+	struct lan78xx_net *dev = usb_get_intfdata(intf);
+	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+	u32 buf;
+	int ret;
+	int event;
+
+	ret = 0;
+	event = message.event;
+
+	if (!dev->suspend_count++) {
+		spin_lock_irq(&dev->txq.lock);
+		/* don't autosuspend while transmitting */
+		if ((skb_queue_len(&dev->txq) ||
+		     skb_queue_len(&dev->txq_pend)) &&
+			PMSG_IS_AUTO(message)) {
+			spin_unlock_irq(&dev->txq.lock);
+			ret = -EBUSY;
+			goto out;
+		} else {
+			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
+			spin_unlock_irq(&dev->txq.lock);
+		}
+
+		/* stop TX & RX */
+		ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+		buf &= ~MAC_TX_TXEN_;
+		ret = lan78xx_write_reg(dev, MAC_TX, buf);
+		ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+		buf &= ~MAC_RX_RXEN_;
+		ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+		/* empty out the rx and queues */
+		netif_device_detach(dev->net);
+		lan78xx_terminate_urbs(dev);
+		usb_kill_urb(dev->urb_intr);
+
+		/* reattach */
+		netif_device_attach(dev->net);
+	}
+
+	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+		if (PMSG_IS_AUTO(message)) {
+			/* auto suspend (selective suspend) */
+			ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+			buf &= ~MAC_TX_TXEN_;
+			ret = lan78xx_write_reg(dev, MAC_TX, buf);
+			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+			buf &= ~MAC_RX_RXEN_;
+			ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+			ret = lan78xx_write_reg(dev, WUCSR, 0);
+			ret = lan78xx_write_reg(dev, WUCSR2, 0);
+			ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+			/* set goodframe wakeup */
+			ret = lan78xx_read_reg(dev, WUCSR, &buf);
+
+			buf |= WUCSR_RFE_WAKE_EN_;
+			buf |= WUCSR_STORE_WAKE_;
+
+			ret = lan78xx_write_reg(dev, WUCSR, buf);
+
+			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+
+			buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
+			buf |= PMT_CTL_RES_CLR_WKP_STS_;
+
+			buf |= PMT_CTL_PHY_WAKE_EN_;
+			buf |= PMT_CTL_WOL_EN_;
+			buf &= ~PMT_CTL_SUS_MODE_MASK_;
+			buf |= PMT_CTL_SUS_MODE_3_;
+
+			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+			ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+
+			buf |= PMT_CTL_WUPS_MASK_;
+
+			ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+			ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+			buf |= MAC_RX_RXEN_;
+			ret = lan78xx_write_reg(dev, MAC_RX, buf);
+		} else {
+			lan78xx_set_suspend(dev, pdata->wol);
+		}
+	}
+
+out:
+	return ret;
+}
+
+int lan78xx_resume(struct usb_interface *intf)
+{
+	struct lan78xx_net *dev = usb_get_intfdata(intf);
+	struct sk_buff *skb;
+	struct urb *res;
+	int ret;
+	u32 buf;
+
+	if (!--dev->suspend_count) {
+		/* resume interrupt URBs */
+		if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
+				usb_submit_urb(dev->urb_intr, GFP_NOIO);
+
+		spin_lock_irq(&dev->txq.lock);
+		while ((res = usb_get_from_anchor(&dev->deferred))) {
+			skb = (struct sk_buff *)res->context;
+			ret = usb_submit_urb(res, GFP_ATOMIC);
+			if (ret < 0) {
+				dev_kfree_skb_any(skb);
+				usb_free_urb(res);
+				usb_autopm_put_interface_async(dev->intf);
+			} else {
+				dev->net->trans_start = jiffies;
+				lan78xx_queue_skb(&dev->txq, skb, tx_start);
+			}
+		}
+
+		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
+		spin_unlock_irq(&dev->txq.lock);
+
+		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+			if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
+				netif_start_queue(dev->net);
+			tasklet_schedule(&dev->bh);
+		}
+	}
+
+	ret = lan78xx_write_reg(dev, WUCSR2, 0);
+	ret = lan78xx_write_reg(dev, WUCSR, 0);
+	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
+					     WUCSR2_ARP_RCD_ |
+					     WUCSR2_IPV6_TCPSYN_RCD_ |
+					     WUCSR2_IPV4_TCPSYN_RCD_);
+
+	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
+					    WUCSR_EEE_RX_WAKE_ |
+					    WUCSR_PFDA_FR_ |
+					    WUCSR_RFE_WAKE_FR_ |
+					    WUCSR_WUFR_ |
+					    WUCSR_MPR_ |
+					    WUCSR_BCST_FR_);
+
+	ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+	buf |= MAC_TX_TXEN_;
+	ret = lan78xx_write_reg(dev, MAC_TX, buf);
+
+	return 0;
+}
+
+int lan78xx_reset_resume(struct usb_interface *intf)
+{
+	struct lan78xx_net *dev = usb_get_intfdata(intf);
+
+	lan78xx_reset(dev);
+	return lan78xx_resume(intf);
+}
+
+static const struct usb_device_id products[] = {
+	{
+	/* LAN7800 USB Gigabit Ethernet Device */
+	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
+	},
+	{
+	/* LAN7850 USB Gigabit Ethernet Device */
+	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver lan78xx_driver = {
+	.name			= DRIVER_NAME,
+	.id_table		= products,
+	.probe			= lan78xx_probe,
+	.disconnect		= lan78xx_disconnect,
+	.suspend		= lan78xx_suspend,
+	.resume			= lan78xx_resume,
+	.reset_resume		= lan78xx_reset_resume,
+	.supports_autosuspend	= 1,
+	.disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(lan78xx_driver);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h
new file mode 100644
index 0000000..ae7562e
--- /dev/null
+++ b/drivers/net/usb/lan78xx.h
@@ -0,0 +1,1069 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _LAN78XX_H
+#define _LAN78XX_H
+
+/* USB Vendor Requests */
+#define USB_VENDOR_REQUEST_WRITE_REGISTER	0xA0
+#define USB_VENDOR_REQUEST_READ_REGISTER	0xA1
+#define USB_VENDOR_REQUEST_GET_STATS		0xA2
+
+/* Interrupt Endpoint status word bitfields */
+#define INT_ENP_EEE_START_TX_LPI_INT		BIT(26)
+#define INT_ENP_EEE_STOP_TX_LPI_INT		BIT(25)
+#define INT_ENP_EEE_RX_LPI_INT			BIT(24)
+#define INT_ENP_RDFO_INT			BIT(22)
+#define INT_ENP_TXE_INT				BIT(21)
+#define INT_ENP_TX_DIS_INT			BIT(19)
+#define INT_ENP_RX_DIS_INT			BIT(18)
+#define INT_ENP_PHY_INT				BIT(17)
+#define INT_ENP_DP_INT				BIT(16)
+#define INT_ENP_MAC_ERR_INT			BIT(15)
+#define INT_ENP_TDFU_INT			BIT(14)
+#define INT_ENP_TDFO_INT			BIT(13)
+#define INT_ENP_UTX_FP_INT			BIT(12)
+
+#define TX_PKT_ALIGNMENT			4
+#define RX_PKT_ALIGNMENT			4
+
+/* Tx Command A */
+#define TX_CMD_A_IGE_			(0x20000000)
+#define TX_CMD_A_ICE_			(0x10000000)
+#define TX_CMD_A_LSO_			(0x08000000)
+#define TX_CMD_A_IPE_			(0x04000000)
+#define TX_CMD_A_TPE_			(0x02000000)
+#define TX_CMD_A_IVTG_			(0x01000000)
+#define TX_CMD_A_RVTG_			(0x00800000)
+#define TX_CMD_A_FCS_			(0x00400000)
+#define TX_CMD_A_LEN_MASK_		(0x000FFFFF)
+
+/* Tx Command B */
+#define TX_CMD_B_MSS_SHIFT_		(16)
+#define TX_CMD_B_MSS_MASK_		(0x3FFF0000)
+#define TX_CMD_B_MSS_MIN_		((unsigned short)8)
+#define TX_CMD_B_VTAG_MASK_		(0x0000FFFF)
+#define TX_CMD_B_VTAG_PRI_MASK_		(0x0000E000)
+#define TX_CMD_B_VTAG_CFI_MASK_		(0x00001000)
+#define TX_CMD_B_VTAG_VID_MASK_		(0x00000FFF)
+
+/* Rx Command A */
+#define RX_CMD_A_ICE_			(0x80000000)
+#define RX_CMD_A_TCE_			(0x40000000)
+#define RX_CMD_A_CSE_MASK_		(0xC0000000)
+#define RX_CMD_A_IPV_			(0x20000000)
+#define RX_CMD_A_PID_MASK_		(0x18000000)
+#define RX_CMD_A_PID_NONE_IP_		(0x00000000)
+#define RX_CMD_A_PID_TCP_IP_		(0x08000000)
+#define RX_CMD_A_PID_UDP_IP_		(0x10000000)
+#define RX_CMD_A_PID_IP_		(0x18000000)
+#define RX_CMD_A_PFF_			(0x04000000)
+#define RX_CMD_A_BAM_			(0x02000000)
+#define RX_CMD_A_MAM_			(0x01000000)
+#define RX_CMD_A_FVTG_			(0x00800000)
+#define RX_CMD_A_RED_			(0x00400000)
+#define RX_CMD_A_RX_ERRS_MASK_		(0xC03F0000)
+#define RX_CMD_A_RWT_			(0x00200000)
+#define RX_CMD_A_RUNT_			(0x00100000)
+#define RX_CMD_A_LONG_			(0x00080000)
+#define RX_CMD_A_RXE_			(0x00040000)
+#define RX_CMD_A_DRB_			(0x00020000)
+#define RX_CMD_A_FCS_			(0x00010000)
+#define RX_CMD_A_UAM_			(0x00008000)
+#define RX_CMD_A_ICSM_			(0x00004000)
+#define RX_CMD_A_LEN_MASK_		(0x00003FFF)
+
+/* Rx Command B */
+#define RX_CMD_B_CSUM_SHIFT_		(16)
+#define RX_CMD_B_CSUM_MASK_		(0xFFFF0000)
+#define RX_CMD_B_VTAG_MASK_		(0x0000FFFF)
+#define RX_CMD_B_VTAG_PRI_MASK_		(0x0000E000)
+#define RX_CMD_B_VTAG_CFI_MASK_		(0x00001000)
+#define RX_CMD_B_VTAG_VID_MASK_		(0x00000FFF)
+
+/* Rx Command C */
+#define RX_CMD_C_WAKE_SHIFT_		(15)
+#define RX_CMD_C_WAKE_			(0x8000)
+#define RX_CMD_C_REF_FAIL_SHIFT_	(14)
+#define RX_CMD_C_REF_FAIL_		(0x4000)
+
+/* SCSRs */
+#define NUMBER_OF_REGS			(193)
+
+#define ID_REV				(0x00)
+#define ID_REV_CHIP_ID_MASK_		(0xFFFF0000)
+#define ID_REV_CHIP_REV_MASK_		(0x0000FFFF)
+#define ID_REV_CHIP_ID_7800_		(0x7800)
+
+#define FPGA_REV			(0x04)
+#define FPGA_REV_MINOR_MASK_		(0x0000FF00)
+#define FPGA_REV_MAJOR_MASK_		(0x000000FF)
+
+#define INT_STS				(0x0C)
+#define INT_STS_CLEAR_ALL_		(0xFFFFFFFF)
+#define INT_STS_EEE_TX_LPI_STRT_	(0x04000000)
+#define INT_STS_EEE_TX_LPI_STOP_	(0x02000000)
+#define INT_STS_EEE_RX_LPI_		(0x01000000)
+#define INT_STS_RDFO_			(0x00400000)
+#define INT_STS_TXE_			(0x00200000)
+#define INT_STS_TX_DIS_			(0x00080000)
+#define INT_STS_RX_DIS_			(0x00040000)
+#define INT_STS_PHY_INT_		(0x00020000)
+#define INT_STS_DP_INT_			(0x00010000)
+#define INT_STS_MAC_ERR_		(0x00008000)
+#define INT_STS_TDFU_			(0x00004000)
+#define INT_STS_TDFO_			(0x00002000)
+#define INT_STS_UFX_FP_			(0x00001000)
+#define INT_STS_GPIO_MASK_		(0x00000FFF)
+#define INT_STS_GPIO11_			(0x00000800)
+#define INT_STS_GPIO10_			(0x00000400)
+#define INT_STS_GPIO9_			(0x00000200)
+#define INT_STS_GPIO8_			(0x00000100)
+#define INT_STS_GPIO7_			(0x00000080)
+#define INT_STS_GPIO6_			(0x00000040)
+#define INT_STS_GPIO5_			(0x00000020)
+#define INT_STS_GPIO4_			(0x00000010)
+#define INT_STS_GPIO3_			(0x00000008)
+#define INT_STS_GPIO2_			(0x00000004)
+#define INT_STS_GPIO1_			(0x00000002)
+#define INT_STS_GPIO0_			(0x00000001)
+
+#define HW_CFG				(0x010)
+#define HW_CFG_CLK125_EN_		(0x02000000)
+#define HW_CFG_REFCLK25_EN_		(0x01000000)
+#define HW_CFG_LED3_EN_			(0x00800000)
+#define HW_CFG_LED2_EN_			(0x00400000)
+#define HW_CFG_LED1_EN_			(0x00200000)
+#define HW_CFG_LED0_EN_			(0x00100000)
+#define HW_CFG_EEE_PHY_LUSU_		(0x00020000)
+#define HW_CFG_EEE_TSU_			(0x00010000)
+#define HW_CFG_NETDET_STS_		(0x00008000)
+#define HW_CFG_NETDET_EN_		(0x00004000)
+#define HW_CFG_EEM_			(0x00002000)
+#define HW_CFG_RST_PROTECT_		(0x00001000)
+#define HW_CFG_CONNECT_BUF_		(0x00000400)
+#define HW_CFG_CONNECT_EN_		(0x00000200)
+#define HW_CFG_CONNECT_POL_		(0x00000100)
+#define HW_CFG_SUSPEND_N_SEL_MASK_	(0x000000C0)
+#define HW_CFG_SUSPEND_N_SEL_2		(0x00000000)
+#define HW_CFG_SUSPEND_N_SEL_12N	(0x00000040)
+#define HW_CFG_SUSPEND_N_SEL_012N	(0x00000080)
+#define HW_CFG_SUSPEND_N_SEL_0123N	(0x000000C0)
+#define HW_CFG_SUSPEND_N_POL_		(0x00000020)
+#define HW_CFG_MEF_			(0x00000010)
+#define HW_CFG_ETC_			(0x00000008)
+#define HW_CFG_LRST_			(0x00000002)
+#define HW_CFG_SRST_			(0x00000001)
+
+#define PMT_CTL				(0x014)
+#define PMT_CTL_EEE_WAKEUP_EN_		(0x00002000)
+#define PMT_CTL_EEE_WUPS_		(0x00001000)
+#define PMT_CTL_MAC_SRST_		(0x00000800)
+#define PMT_CTL_PHY_PWRUP_		(0x00000400)
+#define PMT_CTL_RES_CLR_WKP_MASK_	(0x00000300)
+#define PMT_CTL_RES_CLR_WKP_STS_	(0x00000200)
+#define PMT_CTL_RES_CLR_WKP_EN_		(0x00000100)
+#define PMT_CTL_READY_			(0x00000080)
+#define PMT_CTL_SUS_MODE_MASK_		(0x00000060)
+#define PMT_CTL_SUS_MODE_0_		(0x00000000)
+#define PMT_CTL_SUS_MODE_1_		(0x00000020)
+#define PMT_CTL_SUS_MODE_2_		(0x00000040)
+#define PMT_CTL_SUS_MODE_3_		(0x00000060)
+#define PMT_CTL_PHY_RST_		(0x00000010)
+#define PMT_CTL_WOL_EN_			(0x00000008)
+#define PMT_CTL_PHY_WAKE_EN_		(0x00000004)
+#define PMT_CTL_WUPS_MASK_		(0x00000003)
+#define PMT_CTL_WUPS_MLT_		(0x00000003)
+#define PMT_CTL_WUPS_MAC_		(0x00000002)
+#define PMT_CTL_WUPS_PHY_		(0x00000001)
+
+#define GPIO_CFG0			(0x018)
+#define GPIO_CFG0_GPIOEN_MASK_		(0x0000F000)
+#define GPIO_CFG0_GPIOEN3_		(0x00008000)
+#define GPIO_CFG0_GPIOEN2_		(0x00004000)
+#define GPIO_CFG0_GPIOEN1_		(0x00002000)
+#define GPIO_CFG0_GPIOEN0_		(0x00001000)
+#define GPIO_CFG0_GPIOBUF_MASK_		(0x00000F00)
+#define GPIO_CFG0_GPIOBUF3_		(0x00000800)
+#define GPIO_CFG0_GPIOBUF2_		(0x00000400)
+#define GPIO_CFG0_GPIOBUF1_		(0x00000200)
+#define GPIO_CFG0_GPIOBUF0_		(0x00000100)
+#define GPIO_CFG0_GPIODIR_MASK_		(0x000000F0)
+#define GPIO_CFG0_GPIODIR3_		(0x00000080)
+#define GPIO_CFG0_GPIODIR2_		(0x00000040)
+#define GPIO_CFG0_GPIODIR1_		(0x00000020)
+#define GPIO_CFG0_GPIODIR0_		(0x00000010)
+#define GPIO_CFG0_GPIOD_MASK_		(0x0000000F)
+#define GPIO_CFG0_GPIOD3_		(0x00000008)
+#define GPIO_CFG0_GPIOD2_		(0x00000004)
+#define GPIO_CFG0_GPIOD1_		(0x00000002)
+#define GPIO_CFG0_GPIOD0_		(0x00000001)
+
+#define GPIO_CFG1			(0x01C)
+#define GPIO_CFG1_GPIOEN_MASK_		(0xFF000000)
+#define GPIO_CFG1_GPIOEN11_		(0x80000000)
+#define GPIO_CFG1_GPIOEN10_		(0x40000000)
+#define GPIO_CFG1_GPIOEN9_		(0x20000000)
+#define GPIO_CFG1_GPIOEN8_		(0x10000000)
+#define GPIO_CFG1_GPIOEN7_		(0x08000000)
+#define GPIO_CFG1_GPIOEN6_		(0x04000000)
+#define GPIO_CFG1_GPIOEN5_		(0x02000000)
+#define GPIO_CFG1_GPIOEN4_		(0x01000000)
+#define GPIO_CFG1_GPIOBUF_MASK_		(0x00FF0000)
+#define GPIO_CFG1_GPIOBUF11_		(0x00800000)
+#define GPIO_CFG1_GPIOBUF10_		(0x00400000)
+#define GPIO_CFG1_GPIOBUF9_		(0x00200000)
+#define GPIO_CFG1_GPIOBUF8_		(0x00100000)
+#define GPIO_CFG1_GPIOBUF7_		(0x00080000)
+#define GPIO_CFG1_GPIOBUF6_		(0x00040000)
+#define GPIO_CFG1_GPIOBUF5_		(0x00020000)
+#define GPIO_CFG1_GPIOBUF4_		(0x00010000)
+#define GPIO_CFG1_GPIODIR_MASK_		(0x0000FF00)
+#define GPIO_CFG1_GPIODIR11_		(0x00008000)
+#define GPIO_CFG1_GPIODIR10_		(0x00004000)
+#define GPIO_CFG1_GPIODIR9_		(0x00002000)
+#define GPIO_CFG1_GPIODIR8_		(0x00001000)
+#define GPIO_CFG1_GPIODIR7_		(0x00000800)
+#define GPIO_CFG1_GPIODIR6_		(0x00000400)
+#define GPIO_CFG1_GPIODIR5_		(0x00000200)
+#define GPIO_CFG1_GPIODIR4_		(0x00000100)
+#define GPIO_CFG1_GPIOD_MASK_		(0x000000FF)
+#define GPIO_CFG1_GPIOD11_		(0x00000080)
+#define GPIO_CFG1_GPIOD10_		(0x00000040)
+#define GPIO_CFG1_GPIOD9_		(0x00000020)
+#define GPIO_CFG1_GPIOD8_		(0x00000010)
+#define GPIO_CFG1_GPIOD7_		(0x00000008)
+#define GPIO_CFG1_GPIOD6_		(0x00000004)
+#define GPIO_CFG1_GPIOD6_		(0x00000004)
+#define GPIO_CFG1_GPIOD5_		(0x00000002)
+#define GPIO_CFG1_GPIOD4_		(0x00000001)
+
+#define GPIO_WAKE			(0x020)
+#define GPIO_WAKE_GPIOPOL_MASK_		(0x0FFF0000)
+#define GPIO_WAKE_GPIOPOL11_		(0x08000000)
+#define GPIO_WAKE_GPIOPOL10_		(0x04000000)
+#define GPIO_WAKE_GPIOPOL9_		(0x02000000)
+#define GPIO_WAKE_GPIOPOL8_		(0x01000000)
+#define GPIO_WAKE_GPIOPOL7_		(0x00800000)
+#define GPIO_WAKE_GPIOPOL6_		(0x00400000)
+#define GPIO_WAKE_GPIOPOL5_		(0x00200000)
+#define GPIO_WAKE_GPIOPOL4_		(0x00100000)
+#define GPIO_WAKE_GPIOPOL3_		(0x00080000)
+#define GPIO_WAKE_GPIOPOL2_		(0x00040000)
+#define GPIO_WAKE_GPIOPOL1_		(0x00020000)
+#define GPIO_WAKE_GPIOPOL0_		(0x00010000)
+#define GPIO_WAKE_GPIOWK_MASK_		(0x00000FFF)
+#define GPIO_WAKE_GPIOWK11_		(0x00000800)
+#define GPIO_WAKE_GPIOWK10_		(0x00000400)
+#define GPIO_WAKE_GPIOWK9_		(0x00000200)
+#define GPIO_WAKE_GPIOWK8_		(0x00000100)
+#define GPIO_WAKE_GPIOWK7_		(0x00000080)
+#define GPIO_WAKE_GPIOWK6_		(0x00000040)
+#define GPIO_WAKE_GPIOWK5_		(0x00000020)
+#define GPIO_WAKE_GPIOWK4_		(0x00000010)
+#define GPIO_WAKE_GPIOWK3_		(0x00000008)
+#define GPIO_WAKE_GPIOWK2_		(0x00000004)
+#define GPIO_WAKE_GPIOWK1_		(0x00000002)
+#define GPIO_WAKE_GPIOWK0_		(0x00000001)
+
+#define DP_SEL				(0x024)
+#define DP_SEL_DPRDY_			(0x80000000)
+#define DP_SEL_RSEL_MASK_		(0x0000000F)
+#define DP_SEL_RSEL_USB_PHY_CSRS_	(0x0000000F)
+#define DP_SEL_RSEL_OTP_64BIT_		(0x00000009)
+#define DP_SEL_RSEL_OTP_8BIT_		(0x00000008)
+#define DP_SEL_RSEL_UTX_BUF_RAM_	(0x00000007)
+#define DP_SEL_RSEL_DESC_RAM_		(0x00000005)
+#define DP_SEL_RSEL_TXFIFO_		(0x00000004)
+#define DP_SEL_RSEL_RXFIFO_		(0x00000003)
+#define DP_SEL_RSEL_LSO_		(0x00000002)
+#define DP_SEL_RSEL_VLAN_DA_		(0x00000001)
+#define DP_SEL_RSEL_URXBUF_		(0x00000000)
+#define DP_SEL_VHF_HASH_LEN		(16)
+#define DP_SEL_VHF_VLAN_LEN		(128)
+
+#define DP_CMD				(0x028)
+#define DP_CMD_WRITE_			(0x00000001)
+#define DP_CMD_READ_			(0x00000000)
+
+#define DP_ADDR				(0x02C)
+#define DP_ADDR_MASK_			(0x00003FFF)
+
+#define DP_DATA				(0x030)
+
+#define E2P_CMD				(0x040)
+#define E2P_CMD_EPC_BUSY_		(0x80000000)
+#define E2P_CMD_EPC_CMD_MASK_		(0x70000000)
+#define E2P_CMD_EPC_CMD_RELOAD_		(0x70000000)
+#define E2P_CMD_EPC_CMD_ERAL_		(0x60000000)
+#define E2P_CMD_EPC_CMD_ERASE_		(0x50000000)
+#define E2P_CMD_EPC_CMD_WRAL_		(0x40000000)
+#define E2P_CMD_EPC_CMD_WRITE_		(0x30000000)
+#define E2P_CMD_EPC_CMD_EWEN_		(0x20000000)
+#define E2P_CMD_EPC_CMD_EWDS_		(0x10000000)
+#define E2P_CMD_EPC_CMD_READ_		(0x00000000)
+#define E2P_CMD_EPC_TIMEOUT_		(0x00000400)
+#define E2P_CMD_EPC_DL_			(0x00000200)
+#define E2P_CMD_EPC_ADDR_MASK_		(0x000001FF)
+
+#define E2P_DATA			(0x044)
+#define E2P_DATA_EEPROM_DATA_MASK_	(0x000000FF)
+
+#define BOS_ATTR			(0x050)
+#define BOS_ATTR_BLOCK_SIZE_MASK_	(0x000000FF)
+
+#define SS_ATTR				(0x054)
+#define SS_ATTR_POLL_INT_MASK_		(0x00FF0000)
+#define SS_ATTR_DEV_DESC_SIZE_MASK_	(0x0000FF00)
+#define SS_ATTR_CFG_BLK_SIZE_MASK_	(0x000000FF)
+
+#define HS_ATTR				(0x058)
+#define HS_ATTR_POLL_INT_MASK_		(0x00FF0000)
+#define HS_ATTR_DEV_DESC_SIZE_MASK_	(0x0000FF00)
+#define HS_ATTR_CFG_BLK_SIZE_MASK_	(0x000000FF)
+
+#define FS_ATTR				(0x05C)
+#define FS_ATTR_POLL_INT_MASK_		(0x00FF0000)
+#define FS_ATTR_DEV_DESC_SIZE_MASK_	(0x0000FF00)
+#define FS_ATTR_CFG_BLK_SIZE_MASK_	(0x000000FF)
+
+#define STR_ATTR0			    (0x060)
+#define STR_ATTR0_CFGSTR_DESC_SIZE_MASK_    (0xFF000000)
+#define STR_ATTR0_SERSTR_DESC_SIZE_MASK_    (0x00FF0000)
+#define STR_ATTR0_PRODSTR_DESC_SIZE_MASK_   (0x0000FF00)
+#define STR_ATTR0_MANUF_DESC_SIZE_MASK_     (0x000000FF)
+
+#define STR_ATTR1			    (0x064)
+#define STR_ATTR1_INTSTR_DESC_SIZE_MASK_    (0x000000FF)
+
+#define STR_FLAG_ATTR			    (0x068)
+#define STR_FLAG_ATTR_PME_FLAGS_MASK_	    (0x000000FF)
+
+#define USB_CFG0			(0x080)
+#define USB_CFG_LPM_RESPONSE_		(0x80000000)
+#define USB_CFG_LPM_CAPABILITY_		(0x40000000)
+#define USB_CFG_LPM_ENBL_SLPM_		(0x20000000)
+#define USB_CFG_HIRD_THR_MASK_		(0x1F000000)
+#define USB_CFG_HIRD_THR_960_		(0x1C000000)
+#define USB_CFG_HIRD_THR_885_		(0x1B000000)
+#define USB_CFG_HIRD_THR_810_		(0x1A000000)
+#define USB_CFG_HIRD_THR_735_		(0x19000000)
+#define USB_CFG_HIRD_THR_660_		(0x18000000)
+#define USB_CFG_HIRD_THR_585_		(0x17000000)
+#define USB_CFG_HIRD_THR_510_		(0x16000000)
+#define USB_CFG_HIRD_THR_435_		(0x15000000)
+#define USB_CFG_HIRD_THR_360_		(0x14000000)
+#define USB_CFG_HIRD_THR_285_		(0x13000000)
+#define USB_CFG_HIRD_THR_210_		(0x12000000)
+#define USB_CFG_HIRD_THR_135_		(0x11000000)
+#define USB_CFG_HIRD_THR_60_		(0x10000000)
+#define USB_CFG_MAX_BURST_BI_MASK_	(0x00F00000)
+#define USB_CFG_MAX_BURST_BO_MASK_	(0x000F0000)
+#define USB_CFG_MAX_DEV_SPEED_MASK_	(0x0000E000)
+#define USB_CFG_MAX_DEV_SPEED_SS_	(0x00008000)
+#define USB_CFG_MAX_DEV_SPEED_HS_	(0x00000000)
+#define USB_CFG_MAX_DEV_SPEED_FS_	(0x00002000)
+#define USB_CFG_PHY_BOOST_MASK_		(0x00000180)
+#define USB_CFG_PHY_BOOST_PLUS_12_	(0x00000180)
+#define USB_CFG_PHY_BOOST_PLUS_8_	(0x00000100)
+#define USB_CFG_PHY_BOOST_PLUS_4_	(0x00000080)
+#define USB_CFG_PHY_BOOST_NORMAL_	(0x00000000)
+#define USB_CFG_BIR_			(0x00000040)
+#define USB_CFG_BCE_			(0x00000020)
+#define USB_CFG_PORT_SWAP_		(0x00000010)
+#define USB_CFG_LPM_EN_			(0x00000008)
+#define USB_CFG_RMT_WKP_		(0x00000004)
+#define USB_CFG_PWR_SEL_		(0x00000002)
+#define USB_CFG_STALL_BO_DIS_		(0x00000001)
+
+#define USB_CFG1			(0x084)
+#define USB_CFG1_U1_TIMEOUT_MASK_	(0xFF000000)
+#define USB_CFG1_U2_TIMEOUT_MASK_	(0x00FF0000)
+#define USB_CFG1_HS_TOUT_CAL_MASK_	(0x0000E000)
+#define USB_CFG1_DEV_U2_INIT_EN_	(0x00001000)
+#define USB_CFG1_DEV_U2_EN_		(0x00000800)
+#define USB_CFG1_DEV_U1_INIT_EN_	(0x00000400)
+#define USB_CFG1_DEV_U1_EN_		(0x00000200)
+#define USB_CFG1_LTM_ENABLE_		(0x00000100)
+#define USB_CFG1_FS_TOUT_CAL_MASK_	(0x00000070)
+#define USB_CFG1_SCALE_DOWN_MASK_	(0x00000003)
+#define USB_CFG1_SCALE_DOWN_MODE3_	(0x00000003)
+#define USB_CFG1_SCALE_DOWN_MODE2_	(0x00000002)
+#define USB_CFG1_SCALE_DOWN_MODE1_	(0x00000001)
+#define USB_CFG1_SCALE_DOWN_MODE0_	(0x00000000)
+
+#define USB_CFG2			    (0x088)
+#define USB_CFG2_SS_DETACH_TIME_MASK_	    (0xFFFF0000)
+#define USB_CFG2_HS_DETACH_TIME_MASK_	    (0x0000FFFF)
+
+#define BURST_CAP			(0x090)
+#define BURST_CAP_SIZE_MASK_		(0x000000FF)
+
+#define BULK_IN_DLY			(0x094)
+#define BULK_IN_DLY_MASK_		(0x0000FFFF)
+
+#define INT_EP_CTL			(0x098)
+#define INT_EP_INTEP_ON_		(0x80000000)
+#define INT_STS_EEE_TX_LPI_STRT_EN_	(0x04000000)
+#define INT_STS_EEE_TX_LPI_STOP_EN_	(0x02000000)
+#define INT_STS_EEE_RX_LPI_EN_		(0x01000000)
+#define INT_EP_RDFO_EN_			(0x00400000)
+#define INT_EP_TXE_EN_			(0x00200000)
+#define INT_EP_TX_DIS_EN_		(0x00080000)
+#define INT_EP_RX_DIS_EN_		(0x00040000)
+#define INT_EP_PHY_INT_EN_		(0x00020000)
+#define INT_EP_DP_INT_EN_		(0x00010000)
+#define INT_EP_MAC_ERR_EN_		(0x00008000)
+#define INT_EP_TDFU_EN_			(0x00004000)
+#define INT_EP_TDFO_EN_			(0x00002000)
+#define INT_EP_UTX_FP_EN_		(0x00001000)
+#define INT_EP_GPIO_EN_MASK_		(0x00000FFF)
+
+#define PIPE_CTL			(0x09C)
+#define PIPE_CTL_TXSWING_		(0x00000040)
+#define PIPE_CTL_TXMARGIN_MASK_		(0x00000038)
+#define PIPE_CTL_TXDEEMPHASIS_MASK_	(0x00000006)
+#define PIPE_CTL_ELASTICITYBUFFERMODE_	(0x00000001)
+
+#define U1_LATENCY			(0xA0)
+#define U2_LATENCY			(0xA4)
+
+#define USB_STATUS			(0x0A8)
+#define USB_STATUS_REMOTE_WK_		(0x00100000)
+#define USB_STATUS_FUNC_REMOTE_WK_	(0x00080000)
+#define USB_STATUS_LTM_ENABLE_		(0x00040000)
+#define USB_STATUS_U2_ENABLE_		(0x00020000)
+#define USB_STATUS_U1_ENABLE_		(0x00010000)
+#define USB_STATUS_SET_SEL_		(0x00000020)
+#define USB_STATUS_REMOTE_WK_STS_	(0x00000010)
+#define USB_STATUS_FUNC_REMOTE_WK_STS_	(0x00000008)
+#define USB_STATUS_LTM_ENABLE_STS_	(0x00000004)
+#define USB_STATUS_U2_ENABLE_STS_	(0x00000002)
+#define USB_STATUS_U1_ENABLE_STS_	(0x00000001)
+
+#define USB_CFG3			(0x0AC)
+#define USB_CFG3_EN_U2_LTM_		(0x40000000)
+#define USB_CFG3_BULK_OUT_NUMP_OVR_	(0x20000000)
+#define USB_CFG3_DIS_FAST_U1_EXIT_	(0x10000000)
+#define USB_CFG3_LPM_NYET_THR_		(0x0F000000)
+#define USB_CFG3_RX_DET_2_POL_LFPS_	(0x00800000)
+#define USB_CFG3_LFPS_FILT_		(0x00400000)
+#define USB_CFG3_SKIP_RX_DET_		(0x00200000)
+#define USB_CFG3_DELAY_P1P2P3_		(0x001C0000)
+#define USB_CFG3_DELAY_PHY_PWR_CHG_	(0x00020000)
+#define USB_CFG3_U1U2_EXIT_FR_		(0x00010000)
+#define USB_CFG3_REQ_P1P2P3		(0x00008000)
+#define USB_CFG3_HST_PRT_CMPL_		(0x00004000)
+#define USB_CFG3_DIS_SCRAMB_		(0x00002000)
+#define USB_CFG3_PWR_DN_SCALE_		(0x00001FFF)
+
+#define RFE_CTL				(0x0B0)
+#define RFE_CTL_IGMP_COE_		(0x00004000)
+#define RFE_CTL_ICMP_COE_		(0x00002000)
+#define RFE_CTL_TCPUDP_COE_		(0x00001000)
+#define RFE_CTL_IP_COE_			(0x00000800)
+#define RFE_CTL_BCAST_EN_		(0x00000400)
+#define RFE_CTL_MCAST_EN_		(0x00000200)
+#define RFE_CTL_UCAST_EN_		(0x00000100)
+#define RFE_CTL_VLAN_STRIP_		(0x00000080)
+#define RFE_CTL_DISCARD_UNTAGGED_	(0x00000040)
+#define RFE_CTL_VLAN_FILTER_		(0x00000020)
+#define RFE_CTL_SA_FILTER_		(0x00000010)
+#define RFE_CTL_MCAST_HASH_		(0x00000008)
+#define RFE_CTL_DA_HASH_		(0x00000004)
+#define RFE_CTL_DA_PERFECT_		(0x00000002)
+#define RFE_CTL_RST_			(0x00000001)
+
+#define VLAN_TYPE			(0x0B4)
+#define VLAN_TYPE_MASK_			(0x0000FFFF)
+
+#define FCT_RX_CTL			(0x0C0)
+#define FCT_RX_CTL_EN_			(0x80000000)
+#define FCT_RX_CTL_RST_			(0x40000000)
+#define FCT_RX_CTL_SBF_			(0x02000000)
+#define FCT_RX_CTL_OVFL_		(0x01000000)
+#define FCT_RX_CTL_DROP_		(0x00800000)
+#define FCT_RX_CTL_NOT_EMPTY_		(0x00400000)
+#define FCT_RX_CTL_EMPTY_		(0x00200000)
+#define FCT_RX_CTL_DIS_			(0x00100000)
+#define FCT_RX_CTL_USED_MASK_		(0x0000FFFF)
+
+#define FCT_TX_CTL			(0x0C4)
+#define FCT_TX_CTL_EN_			(0x80000000)
+#define FCT_TX_CTL_RST_			(0x40000000)
+#define FCT_TX_CTL_NOT_EMPTY_		(0x00400000)
+#define FCT_TX_CTL_EMPTY_		(0x00200000)
+#define FCT_TX_CTL_DIS_			(0x00100000)
+#define FCT_TX_CTL_USED_MASK_		(0x0000FFFF)
+
+#define FCT_RX_FIFO_END			(0x0C8)
+#define FCT_RX_FIFO_END_MASK_		(0x0000007F)
+
+#define FCT_TX_FIFO_END			(0x0CC)
+#define FCT_TX_FIFO_END_MASK_		(0x0000003F)
+
+#define FCT_FLOW			(0x0D0)
+#define FCT_FLOW_OFF_MASK_		(0x00007F00)
+#define FCT_FLOW_ON_MASK_		(0x0000007F)
+
+#define RX_DP_STOR			(0x0D4)
+#define RX_DP_STORE_TOT_RXUSED_MASK_	(0xFFFF0000)
+#define RX_DP_STORE_UTX_RXUSED_MASK_	(0x0000FFFF)
+
+#define TX_DP_STOR			(0x0D8)
+#define TX_DP_STORE_TOT_TXUSED_MASK_	(0xFFFF0000)
+#define TX_DP_STORE_URX_TXUSED_MASK_	(0x0000FFFF)
+
+#define LTM_BELT_IDLE0			(0x0E0)
+#define LTM_BELT_IDLE0_IDLE1000_	(0x0FFF0000)
+#define LTM_BELT_IDLE0_IDLE100_		(0x00000FFF)
+
+#define LTM_BELT_IDLE1			(0x0E4)
+#define LTM_BELT_IDLE1_IDLE10_		(0x00000FFF)
+
+#define LTM_BELT_ACT0			(0x0E8)
+#define LTM_BELT_ACT0_ACT1000_		(0x0FFF0000)
+#define LTM_BELT_ACT0_ACT100_		(0x00000FFF)
+
+#define LTM_BELT_ACT1			(0x0EC)
+#define LTM_BELT_ACT1_ACT10_		(0x00000FFF)
+
+#define LTM_INACTIVE0			(0x0F0)
+#define LTM_INACTIVE0_TIMER1000_	(0xFFFF0000)
+#define LTM_INACTIVE0_TIMER100_		(0x0000FFFF)
+
+#define LTM_INACTIVE1			(0x0F4)
+#define LTM_INACTIVE1_TIMER10_		(0x0000FFFF)
+
+#define MAC_CR				(0x100)
+#define MAC_CR_GMII_EN_			(0x00080000)
+#define MAC_CR_EEE_TX_CLK_STOP_EN_	(0x00040000)
+#define MAC_CR_EEE_EN_			(0x00020000)
+#define MAC_CR_EEE_TLAR_EN_		(0x00010000)
+#define MAC_CR_ADP_			(0x00002000)
+#define MAC_CR_AUTO_DUPLEX_		(0x00001000)
+#define MAC_CR_AUTO_SPEED_		(0x00000800)
+#define MAC_CR_LOOPBACK_		(0x00000400)
+#define MAC_CR_BOLMT_MASK_		(0x000000C0)
+#define MAC_CR_FULL_DUPLEX_		(0x00000008)
+#define MAC_CR_SPEED_MASK_		(0x00000006)
+#define MAC_CR_SPEED_1000_		(0x00000004)
+#define MAC_CR_SPEED_100_		(0x00000002)
+#define MAC_CR_SPEED_10_		(0x00000000)
+#define MAC_CR_RST_			(0x00000001)
+
+#define MAC_RX				(0x104)
+#define MAC_RX_MAX_SIZE_SHIFT_		(16)
+#define MAC_RX_MAX_SIZE_MASK_		(0x3FFF0000)
+#define MAC_RX_FCS_STRIP_		(0x00000010)
+#define MAC_RX_VLAN_FSE_		(0x00000004)
+#define MAC_RX_RXD_			(0x00000002)
+#define MAC_RX_RXEN_			(0x00000001)
+
+#define MAC_TX				(0x108)
+#define MAC_TX_BAD_FCS_			(0x00000004)
+#define MAC_TX_TXD_			(0x00000002)
+#define MAC_TX_TXEN_			(0x00000001)
+
+#define FLOW				(0x10C)
+#define FLOW_CR_FORCE_FC_		(0x80000000)
+#define FLOW_CR_TX_FCEN_		(0x40000000)
+#define FLOW_CR_RX_FCEN_		(0x20000000)
+#define FLOW_CR_FPF_			(0x10000000)
+#define FLOW_CR_FCPT_MASK_		(0x0000FFFF)
+
+#define RAND_SEED			(0x110)
+#define RAND_SEED_MASK_			(0x0000FFFF)
+
+#define ERR_STS				(0x114)
+#define ERR_STS_FERR_			(0x00000100)
+#define ERR_STS_LERR_			(0x00000080)
+#define ERR_STS_RFERR_			(0x00000040)
+#define ERR_STS_ECERR_			(0x00000010)
+#define ERR_STS_ALERR_			(0x00000008)
+#define ERR_STS_URERR_			(0x00000004)
+
+#define RX_ADDRH			(0x118)
+#define RX_ADDRH_MASK_			(0x0000FFFF)
+
+#define RX_ADDRL			(0x11C)
+#define RX_ADDRL_MASK_			(0xFFFFFFFF)
+
+#define MII_ACC				(0x120)
+#define MII_ACC_PHY_ADDR_SHIFT_		(11)
+#define MII_ACC_PHY_ADDR_MASK_		(0x0000F800)
+#define MII_ACC_MIIRINDA_SHIFT_		(6)
+#define MII_ACC_MIIRINDA_MASK_		(0x000007C0)
+#define MII_ACC_MII_READ_		(0x00000000)
+#define MII_ACC_MII_WRITE_		(0x00000002)
+#define MII_ACC_MII_BUSY_		(0x00000001)
+
+#define MII_DATA			(0x124)
+#define MII_DATA_MASK_			(0x0000FFFF)
+
+#define MAC_RGMII_ID			(0x128)
+#define MAC_RGMII_ID_TXC_DELAY_EN_	(0x00000002)
+#define MAC_RGMII_ID_RXC_DELAY_EN_	(0x00000001)
+
+#define EEE_TX_LPI_REQ_DLY		(0x130)
+#define EEE_TX_LPI_REQ_DLY_CNT_MASK_	(0xFFFFFFFF)
+
+#define EEE_TW_TX_SYS			(0x134)
+#define EEE_TW_TX_SYS_CNT1G_MASK_	(0xFFFF0000)
+#define EEE_TW_TX_SYS_CNT100M_MASK_	(0x0000FFFF)
+
+#define EEE_TX_LPI_REM_DLY		(0x138)
+#define EEE_TX_LPI_REM_DLY_CNT_		(0x00FFFFFF)
+
+#define WUCSR				(0x140)
+#define WUCSR_TESTMODE_			(0x80000000)
+#define WUCSR_RFE_WAKE_EN_		(0x00004000)
+#define WUCSR_EEE_TX_WAKE_		(0x00002000)
+#define WUCSR_EEE_TX_WAKE_EN_		(0x00001000)
+#define WUCSR_EEE_RX_WAKE_		(0x00000800)
+#define WUCSR_EEE_RX_WAKE_EN_		(0x00000400)
+#define WUCSR_RFE_WAKE_FR_		(0x00000200)
+#define WUCSR_STORE_WAKE_		(0x00000100)
+#define WUCSR_PFDA_FR_			(0x00000080)
+#define WUCSR_WUFR_			(0x00000040)
+#define WUCSR_MPR_			(0x00000020)
+#define WUCSR_BCST_FR_			(0x00000010)
+#define WUCSR_PFDA_EN_			(0x00000008)
+#define WUCSR_WAKE_EN_			(0x00000004)
+#define WUCSR_MPEN_			(0x00000002)
+#define WUCSR_BCST_EN_			(0x00000001)
+
+#define WK_SRC				(0x144)
+#define WK_SRC_GPIOX_INT_WK_SHIFT_	(20)
+#define WK_SRC_GPIOX_INT_WK_MASK_	(0xFFF00000)
+#define WK_SRC_IPV6_TCPSYN_RCD_WK_	(0x00010000)
+#define WK_SRC_IPV4_TCPSYN_RCD_WK_	(0x00008000)
+#define WK_SRC_EEE_TX_WK_		(0x00004000)
+#define WK_SRC_EEE_RX_WK_		(0x00002000)
+#define WK_SRC_GOOD_FR_WK_		(0x00001000)
+#define WK_SRC_PFDA_FR_WK_		(0x00000800)
+#define WK_SRC_MP_FR_WK_		(0x00000400)
+#define WK_SRC_BCAST_FR_WK_		(0x00000200)
+#define WK_SRC_WU_FR_WK_		(0x00000100)
+#define WK_SRC_WUFF_MATCH_MASK_		(0x0000001F)
+
+#define WUF_CFG0			(0x150)
+#define NUM_OF_WUF_CFG			(32)
+#define WUF_CFG_BEGIN			(WUF_CFG0)
+#define WUF_CFG(index)			(WUF_CFG_BEGIN + (4 * (index)))
+#define WUF_CFGX_EN_			(0x80000000)
+#define WUF_CFGX_TYPE_MASK_		(0x03000000)
+#define WUF_CFGX_TYPE_MCAST_		(0x02000000)
+#define WUF_CFGX_TYPE_ALL_		(0x01000000)
+#define WUF_CFGX_TYPE_UCAST_		(0x00000000)
+#define WUF_CFGX_OFFSET_SHIFT_		(16)
+#define WUF_CFGX_OFFSET_MASK_		(0x00FF0000)
+#define WUF_CFGX_CRC16_MASK_		(0x0000FFFF)
+
+#define WUF_MASK0_0			(0x200)
+#define WUF_MASK0_1			(0x204)
+#define WUF_MASK0_2			(0x208)
+#define WUF_MASK0_3			(0x20C)
+#define NUM_OF_WUF_MASK			(32)
+#define WUF_MASK0_BEGIN			(WUF_MASK0_0)
+#define WUF_MASK1_BEGIN			(WUF_MASK0_1)
+#define WUF_MASK2_BEGIN			(WUF_MASK0_2)
+#define WUF_MASK3_BEGIN			(WUF_MASK0_3)
+#define WUF_MASK0(index)		(WUF_MASK0_BEGIN + (0x10 * (index)))
+#define WUF_MASK1(index)		(WUF_MASK1_BEGIN + (0x10 * (index)))
+#define WUF_MASK2(index)		(WUF_MASK2_BEGIN + (0x10 * (index)))
+#define WUF_MASK3(index)		(WUF_MASK3_BEGIN + (0x10 * (index)))
+
+#define MAF_BASE			(0x400)
+#define MAF_HIX				(0x00)
+#define MAF_LOX				(0x04)
+#define NUM_OF_MAF			(33)
+#define MAF_HI_BEGIN			(MAF_BASE + MAF_HIX)
+#define MAF_LO_BEGIN			(MAF_BASE + MAF_LOX)
+#define MAF_HI(index)			(MAF_BASE + (8 * (index)) + (MAF_HIX))
+#define MAF_LO(index)			(MAF_BASE + (8 * (index)) + (MAF_LOX))
+#define MAF_HI_VALID_			(0x80000000)
+#define MAF_HI_TYPE_MASK_		(0x40000000)
+#define MAF_HI_TYPE_SRC_		(0x40000000)
+#define MAF_HI_TYPE_DST_		(0x00000000)
+#define MAF_HI_ADDR_MASK		(0x0000FFFF)
+#define MAF_LO_ADDR_MASK		(0xFFFFFFFF)
+
+#define WUCSR2				(0x600)
+#define WUCSR2_CSUM_DISABLE_		(0x80000000)
+#define WUCSR2_NA_SA_SEL_		(0x00000100)
+#define WUCSR2_NS_RCD_			(0x00000080)
+#define WUCSR2_ARP_RCD_			(0x00000040)
+#define WUCSR2_IPV6_TCPSYN_RCD_		(0x00000020)
+#define WUCSR2_IPV4_TCPSYN_RCD_		(0x00000010)
+#define WUCSR2_NS_OFFLOAD_EN_		(0x00000008)
+#define WUCSR2_ARP_OFFLOAD_EN_		(0x00000004)
+#define WUCSR2_IPV6_TCPSYN_WAKE_EN_	(0x00000002)
+#define WUCSR2_IPV4_TCPSYN_WAKE_EN_	(0x00000001)
+
+#define NS1_IPV6_ADDR_DEST0		(0x610)
+#define NS1_IPV6_ADDR_DEST1		(0x614)
+#define NS1_IPV6_ADDR_DEST2		(0x618)
+#define NS1_IPV6_ADDR_DEST3		(0x61C)
+
+#define NS1_IPV6_ADDR_SRC0		(0x620)
+#define NS1_IPV6_ADDR_SRC1		(0x624)
+#define NS1_IPV6_ADDR_SRC2		(0x628)
+#define NS1_IPV6_ADDR_SRC3		(0x62C)
+
+#define NS1_ICMPV6_ADDR0_0		(0x630)
+#define NS1_ICMPV6_ADDR0_1		(0x634)
+#define NS1_ICMPV6_ADDR0_2		(0x638)
+#define NS1_ICMPV6_ADDR0_3		(0x63C)
+
+#define NS1_ICMPV6_ADDR1_0		(0x640)
+#define NS1_ICMPV6_ADDR1_1		(0x644)
+#define NS1_ICMPV6_ADDR1_2		(0x648)
+#define NS1_ICMPV6_ADDR1_3		(0x64C)
+
+#define NS2_IPV6_ADDR_DEST0		(0x650)
+#define NS2_IPV6_ADDR_DEST1		(0x654)
+#define NS2_IPV6_ADDR_DEST2		(0x658)
+#define NS2_IPV6_ADDR_DEST3		(0x65C)
+
+#define NS2_IPV6_ADDR_SRC0		(0x660)
+#define NS2_IPV6_ADDR_SRC1		(0x664)
+#define NS2_IPV6_ADDR_SRC2		(0x668)
+#define NS2_IPV6_ADDR_SRC3		(0x66C)
+
+#define NS2_ICMPV6_ADDR0_0		(0x670)
+#define NS2_ICMPV6_ADDR0_1		(0x674)
+#define NS2_ICMPV6_ADDR0_2		(0x678)
+#define NS2_ICMPV6_ADDR0_3		(0x67C)
+
+#define NS2_ICMPV6_ADDR1_0		(0x680)
+#define NS2_ICMPV6_ADDR1_1		(0x684)
+#define NS2_ICMPV6_ADDR1_2		(0x688)
+#define NS2_ICMPV6_ADDR1_3		(0x68C)
+
+#define SYN_IPV4_ADDR_SRC		(0x690)
+#define SYN_IPV4_ADDR_DEST		(0x694)
+#define SYN_IPV4_TCP_PORTS		(0x698)
+#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_SHIFT_    (16)
+#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_MASK_     (0xFFFF0000)
+#define SYN_IPV4_TCP_PORTS_IPV4_SRC_PORT_MASK_	    (0x0000FFFF)
+
+#define SYN_IPV6_ADDR_SRC0		(0x69C)
+#define SYN_IPV6_ADDR_SRC1		(0x6A0)
+#define SYN_IPV6_ADDR_SRC2		(0x6A4)
+#define SYN_IPV6_ADDR_SRC3		(0x6A8)
+
+#define SYN_IPV6_ADDR_DEST0		(0x6AC)
+#define SYN_IPV6_ADDR_DEST1		(0x6B0)
+#define SYN_IPV6_ADDR_DEST2		(0x6B4)
+#define SYN_IPV6_ADDR_DEST3		(0x6B8)
+
+#define SYN_IPV6_TCP_PORTS		(0x6BC)
+#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_SHIFT_    (16)
+#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_MASK_     (0xFFFF0000)
+#define SYN_IPV6_TCP_PORTS_IPV6_SRC_PORT_MASK_	    (0x0000FFFF)
+
+#define ARP_SPA				(0x6C0)
+#define ARP_TPA				(0x6C4)
+
+#define PHY_DEV_ID			(0x700)
+#define PHY_DEV_ID_REV_SHIFT_		(28)
+#define PHY_DEV_ID_REV_SHIFT_		(28)
+#define PHY_DEV_ID_REV_MASK_		(0xF0000000)
+#define PHY_DEV_ID_MODEL_SHIFT_		(22)
+#define PHY_DEV_ID_MODEL_MASK_		(0x0FC00000)
+#define PHY_DEV_ID_OUI_MASK_		(0x003FFFFF)
+
+#define OTP_BASE_ADDR			(0x00001000)
+#define OTP_ADDR_RANGE_			(0x1FF)
+
+#define OTP_PWR_DN			(OTP_BASE_ADDR + 4 * 0x00)
+#define OTP_PWR_DN_PWRDN_N_		(0x01)
+
+#define OTP_ADDR1			(OTP_BASE_ADDR + 4 * 0x01)
+#define OTP_ADDR1_15_11			(0x1F)
+
+#define OTP_ADDR2			(OTP_BASE_ADDR + 4 * 0x02)
+#define OTP_ADDR2_10_3			(0xFF)
+
+#define OTP_ADDR3			(OTP_BASE_ADDR + 4 * 0x03)
+#define OTP_ADDR3_2_0			(0x03)
+
+#define OTP_PRGM_DATA			(OTP_BASE_ADDR + 4 * 0x04)
+
+#define OTP_PRGM_MODE			(OTP_BASE_ADDR + 4 * 0x05)
+#define OTP_PRGM_MODE_BYTE_		(0x01)
+
+#define OTP_RD_DATA			(OTP_BASE_ADDR + 4 * 0x06)
+
+#define OTP_FUNC_CMD			(OTP_BASE_ADDR + 4 * 0x08)
+#define OTP_FUNC_CMD_RESET_		(0x04)
+#define OTP_FUNC_CMD_PROGRAM_		(0x02)
+#define OTP_FUNC_CMD_READ_		(0x01)
+
+#define OTP_TST_CMD			(OTP_BASE_ADDR + 4 * 0x09)
+#define OTP_TST_CMD_TEST_DEC_SEL_	(0x10)
+#define OTP_TST_CMD_PRGVRFY_		(0x08)
+#define OTP_TST_CMD_WRTEST_		(0x04)
+#define OTP_TST_CMD_TESTDEC_		(0x02)
+#define OTP_TST_CMD_BLANKCHECK_		(0x01)
+
+#define OTP_CMD_GO			(OTP_BASE_ADDR + 4 * 0x0A)
+#define OTP_CMD_GO_GO_			(0x01)
+
+#define OTP_PASS_FAIL			(OTP_BASE_ADDR + 4 * 0x0B)
+#define OTP_PASS_FAIL_PASS_		(0x02)
+#define OTP_PASS_FAIL_FAIL_		(0x01)
+
+#define OTP_STATUS			(OTP_BASE_ADDR + 4 * 0x0C)
+#define OTP_STATUS_OTP_LOCK_		(0x10)
+#define OTP_STATUS_WEB_			(0x08)
+#define OTP_STATUS_PGMEN		(0x04)
+#define OTP_STATUS_CPUMPEN_		(0x02)
+#define OTP_STATUS_BUSY_		(0x01)
+
+#define OTP_MAX_PRG			(OTP_BASE_ADDR + 4 * 0x0D)
+#define OTP_MAX_PRG_MAX_PROG		(0x1F)
+
+#define OTP_INTR_STATUS			(OTP_BASE_ADDR + 4 * 0x10)
+#define OTP_INTR_STATUS_READY_		(0x01)
+
+#define OTP_INTR_MASK			(OTP_BASE_ADDR + 4 * 0x11)
+#define OTP_INTR_MASK_READY_		(0x01)
+
+#define OTP_RSTB_PW1			(OTP_BASE_ADDR + 4 * 0x14)
+#define OTP_RSTB_PW2			(OTP_BASE_ADDR + 4 * 0x15)
+#define OTP_PGM_PW1			(OTP_BASE_ADDR + 4 * 0x18)
+#define OTP_PGM_PW2			(OTP_BASE_ADDR + 4 * 0x19)
+#define OTP_READ_PW1			(OTP_BASE_ADDR + 4 * 0x1C)
+#define OTP_READ_PW2			(OTP_BASE_ADDR + 4 * 0x1D)
+#define OTP_TCRST			(OTP_BASE_ADDR + 4 * 0x20)
+#define OTP_RSRD			(OTP_BASE_ADDR + 4 * 0x21)
+#define OTP_TREADEN_VAL			(OTP_BASE_ADDR + 4 * 0x22)
+#define OTP_TDLES_VAL			(OTP_BASE_ADDR + 4 * 0x23)
+#define OTP_TWWL_VAL			(OTP_BASE_ADDR + 4 * 0x24)
+#define OTP_TDLEH_VAL			(OTP_BASE_ADDR + 4 * 0x25)
+#define OTP_TWPED_VAL			(OTP_BASE_ADDR + 4 * 0x26)
+#define OTP_TPES_VAL			(OTP_BASE_ADDR + 4 * 0x27)
+#define OTP_TCPS_VAL			(OTP_BASE_ADDR + 4 * 0x28)
+#define OTP_TCPH_VAL			(OTP_BASE_ADDR + 4 * 0x29)
+#define OTP_TPGMVFY_VAL			(OTP_BASE_ADDR + 4 * 0x2A)
+#define OTP_TPEH_VAL			(OTP_BASE_ADDR + 4 * 0x2B)
+#define OTP_TPGRST_VAL			(OTP_BASE_ADDR + 4 * 0x2C)
+#define OTP_TCLES_VAL			(OTP_BASE_ADDR + 4 * 0x2D)
+#define OTP_TCLEH_VAL			(OTP_BASE_ADDR + 4 * 0x2E)
+#define OTP_TRDES_VAL			(OTP_BASE_ADDR + 4 * 0x2F)
+#define OTP_TBCACC_VAL			(OTP_BASE_ADDR + 4 * 0x30)
+#define OTP_TAAC_VAL			(OTP_BASE_ADDR + 4 * 0x31)
+#define OTP_TACCT_VAL			(OTP_BASE_ADDR + 4 * 0x32)
+#define OTP_TRDEP_VAL			(OTP_BASE_ADDR + 4 * 0x38)
+#define OTP_TPGSV_VAL			(OTP_BASE_ADDR + 4 * 0x39)
+#define OTP_TPVSR_VAL			(OTP_BASE_ADDR + 4 * 0x3A)
+#define OTP_TPVHR_VAL			(OTP_BASE_ADDR + 4 * 0x3B)
+#define OTP_TPVSA_VAL			(OTP_BASE_ADDR + 4 * 0x3C)
+
+#define PHY_ID1				(0x02)
+#define PHY_ID2				(0x03)
+
+#define PHY_DEV_ID_OUI_VTSE		(0x04001C)
+#define PHY_DEV_ID_MODEL_VTSE_8502	(0x23)
+
+#define PHY_AUTONEG_ADV			(0x04)
+#define NWAY_AR_NEXT_PAGE_		(0x8000)
+#define NWAY_AR_REMOTE_FAULT_		(0x2000)
+#define NWAY_AR_ASM_DIR_		(0x0800)
+#define NWAY_AR_PAUSE_			(0x0400)
+#define NWAY_AR_100T4_CAPS_		(0x0200)
+#define NWAY_AR_100TX_FD_CAPS_		(0x0100)
+#define NWAY_AR_SELECTOR_FIELD_		(0x001F)
+#define NWAY_AR_100TX_HD_CAPS_		(0x0080)
+#define NWAY_AR_10T_FD_CAPS_		(0x0040)
+#define NWAY_AR_10T_HD_CAPS_		(0x0020)
+#define NWAY_AR_ALL_CAPS_		(NWAY_AR_10T_HD_CAPS_ | \
+					 NWAY_AR_10T_FD_CAPS_ | \
+					 NWAY_AR_100TX_HD_CAPS_ | \
+					 NWAY_AR_100TX_FD_CAPS_)
+#define NWAY_AR_PAUSE_MASK		(NWAY_AR_PAUSE_ | NWAY_AR_ASM_DIR_)
+
+#define PHY_LP_ABILITY			(0x05)
+#define NWAY_LPAR_NEXT_PAGE_		(0x8000)
+#define NWAY_LPAR_ACKNOWLEDGE_		(0x4000)
+#define NWAY_LPAR_REMOTE_FAULT_		(0x2000)
+#define NWAY_LPAR_ASM_DIR_		(0x0800)
+#define NWAY_LPAR_PAUSE_		(0x0400)
+#define NWAY_LPAR_100T4_CAPS_		(0x0200)
+#define NWAY_LPAR_100TX_FD_CAPS_	(0x0100)
+#define NWAY_LPAR_100TX_HD_CAPS_	(0x0080)
+#define NWAY_LPAR_10T_FD_CAPS_		(0x0040)
+#define NWAY_LPAR_10T_HD_CAPS_		(0x0020)
+#define NWAY_LPAR_SELECTOR_FIELD_	(0x001F)
+
+#define PHY_AUTONEG_EXP			(0x06)
+#define NWAY_ER_PAR_DETECT_FAULT_	(0x0010)
+#define NWAY_ER_LP_NEXT_PAGE_CAPS_	(0x0008)
+#define NWAY_ER_NEXT_PAGE_CAPS_		(0x0004)
+#define NWAY_ER_PAGE_RXD_		(0x0002)
+#define NWAY_ER_LP_NWAY_CAPS_		(0x0001)
+
+#define PHY_NEXT_PAGE_TX		(0x07)
+#define NPTX_NEXT_PAGE_			(0x8000)
+#define NPTX_MSG_PAGE_			(0x2000)
+#define NPTX_ACKNOWLDGE2_		(0x1000)
+#define NPTX_TOGGLE_			(0x0800)
+#define NPTX_MSG_CODE_FIELD_		(0x0001)
+
+#define PHY_LP_NEXT_PAGE		(0x08)
+#define LP_RNPR_NEXT_PAGE_		(0x8000)
+#define LP_RNPR_ACKNOWLDGE_		(0x4000)
+#define LP_RNPR_MSG_PAGE_		(0x2000)
+#define LP_RNPR_ACKNOWLDGE2_		(0x1000)
+#define LP_RNPR_TOGGLE_			(0x0800)
+#define LP_RNPR_MSG_CODE_FIELD_		(0x0001)
+
+#define PHY_1000T_CTRL			(0x09)
+#define CR_1000T_TEST_MODE_4_		(0x8000)
+#define CR_1000T_TEST_MODE_3_		(0x6000)
+#define CR_1000T_TEST_MODE_2_		(0x4000)
+#define CR_1000T_TEST_MODE_1_		(0x2000)
+#define CR_1000T_MS_ENABLE_		(0x1000)
+#define CR_1000T_MS_VALUE_		(0x0800)
+#define CR_1000T_REPEATER_DTE_		(0x0400)
+#define CR_1000T_FD_CAPS_		(0x0200)
+#define CR_1000T_HD_CAPS_		(0x0100)
+#define CR_1000T_ASYM_PAUSE_		(0x0080)
+#define CR_1000T_TEST_MODE_NORMAL_	(0x0000)
+
+#define PHY_1000T_STATUS		(0x0A)
+#define SR_1000T_MS_CONFIG_FAULT_	(0x8000)
+#define SR_1000T_MS_CONFIG_RES_		(0x4000)
+#define SR_1000T_LOCAL_RX_STATUS_	(0x2000)
+#define SR_1000T_REMOTE_RX_STATUS_	(0x1000)
+#define SR_1000T_LP_FD_CAPS_		(0x0800)
+#define SR_1000T_LP_HD_CAPS_		(0x0400)
+#define SR_1000T_ASYM_PAUSE_DIR_	(0x0100)
+#define SR_1000T_IDLE_ERROR_CNT_	(0x00FF)
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT		12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT		13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT	5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20		20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100		100
+
+#define PHY_EXT_STATUS			(0x0F)
+#define IEEE_ESR_1000X_FD_CAPS_		(0x8000)
+#define IEEE_ESR_1000X_HD_CAPS_		(0x4000)
+#define IEEE_ESR_1000T_FD_CAPS_		(0x2000)
+#define IEEE_ESR_1000T_HD_CAPS_		(0x1000)
+#define PHY_TX_POLARITY_MASK_		(0x0100)
+#define PHY_TX_NORMAL_POLARITY_		(0x0000)
+#define AUTO_POLARITY_DISABLE_		(0x0010)
+
+#define PHY_MMD_CTL			(0x0D)
+#define PHY_MMD_CTRL_OP_MASK_		(0xC000)
+#define PHY_MMD_CTRL_OP_REG_		(0x0000)
+#define PHY_MMD_CTRL_OP_DNI_		(0x4000)
+#define PHY_MMD_CTRL_OP_DPIRW_		(0x8000)
+#define PHY_MMD_CTRL_OP_DPIWO_		(0xC000)
+#define PHY_MMD_CTRL_DEV_ADDR_MASK_	(0x001F)
+
+#define PHY_MMD_REG_DATA		(0x0E)
+
+/* VTSE Vendor Specific registers */
+#define PHY_VTSE_BYPASS				(0x12)
+#define PHY_VTSE_BYPASS_DISABLE_PAIR_SWAP_	(0x0020)
+
+#define PHY_VTSE_INT_MASK			(0x19)
+#define PHY_VTSE_INT_MASK_MDINTPIN_EN_		(0x8000)
+#define PHY_VTSE_INT_MASK_SPEED_CHANGE_		(0x4000)
+#define PHY_VTSE_INT_MASK_LINK_CHANGE_		(0x2000)
+#define PHY_VTSE_INT_MASK_FDX_CHANGE_		(0x1000)
+#define PHY_VTSE_INT_MASK_AUTONEG_ERR_		(0x0800)
+#define PHY_VTSE_INT_MASK_AUTONEG_DONE_		(0x0400)
+#define PHY_VTSE_INT_MASK_POE_DETECT_		(0x0200)
+#define PHY_VTSE_INT_MASK_SYMBOL_ERR_		(0x0100)
+#define PHY_VTSE_INT_MASK_FAST_LINK_FAIL_	(0x0080)
+#define PHY_VTSE_INT_MASK_WOL_EVENT_		(0x0040)
+#define PHY_VTSE_INT_MASK_EXTENDED_INT_		(0x0020)
+#define PHY_VTSE_INT_MASK_RESERVED_		(0x0010)
+#define PHY_VTSE_INT_MASK_FALSE_CARRIER_	(0x0008)
+#define PHY_VTSE_INT_MASK_LINK_SPEED_DS_	(0x0004)
+#define PHY_VTSE_INT_MASK_MASTER_SLAVE_DONE_	(0x0002)
+#define PHY_VTSE_INT_MASK_RX__ER_		(0x0001)
+
+#define PHY_VTSE_INT_STS			(0x1A)
+#define PHY_VTSE_INT_STS_INT_ACTIVE_		(0x8000)
+#define PHY_VTSE_INT_STS_SPEED_CHANGE_		(0x4000)
+#define PHY_VTSE_INT_STS_LINK_CHANGE_		(0x2000)
+#define PHY_VTSE_INT_STS_FDX_CHANGE_		(0x1000)
+#define PHY_VTSE_INT_STS_AUTONEG_ERR_		(0x0800)
+#define PHY_VTSE_INT_STS_AUTONEG_DONE_		(0x0400)
+#define PHY_VTSE_INT_STS_POE_DETECT_		(0x0200)
+#define PHY_VTSE_INT_STS_SYMBOL_ERR_		(0x0100)
+#define PHY_VTSE_INT_STS_FAST_LINK_FAIL_	(0x0080)
+#define PHY_VTSE_INT_STS_WOL_EVENT_		(0x0040)
+#define PHY_VTSE_INT_STS_EXTENDED_INT_		(0x0020)
+#define PHY_VTSE_INT_STS_RESERVED_		(0x0010)
+#define PHY_VTSE_INT_STS_FALSE_CARRIER_		(0x0008)
+#define PHY_VTSE_INT_STS_LINK_SPEED_DS_		(0x0004)
+#define PHY_VTSE_INT_STS_MASTER_SLAVE_DONE_	(0x0002)
+#define PHY_VTSE_INT_STS_RX_ER_			(0x0001)
+
+/* VTSE PHY registers */
+#define PHY_EXT_GPIO_PAGE		(0x1F)
+#define PHY_EXT_GPIO_PAGE_SPACE_0	(0x0000)
+#define PHY_EXT_GPIO_PAGE_SPACE_1	(0x0001)
+#define PHY_EXT_GPIO_PAGE_SPACE_2	(0x0002)
+
+/* Extended Register Page 1 space */
+#define PHY_EXT_MODE_CTRL		(0x13)
+#define PHY_EXT_MODE_CTRL_MDIX_MASK_	(0x000C)
+#define PHY_EXT_MODE_CTRL_AUTO_MDIX_	(0x0000)
+#define PHY_EXT_MODE_CTRL_MDI_		(0x0008)
+#define PHY_EXT_MODE_CTRL_MDI_X_	(0x000C)
+
+#define PHY_ANA_10BASE_T_HD		0x01
+#define PHY_ANA_10BASE_T_FD		0x02
+#define PHY_ANA_100BASE_TX_HD		0x04
+#define PHY_ANA_100BASE_TX_FD		0x08
+#define PHY_ANA_1000BASE_T_FD		0x10
+#define PHY_ANA_ALL_SUPPORTED_MEDIA	(PHY_ANA_10BASE_T_HD |	 \
+					 PHY_ANA_10BASE_T_FD |	 \
+					 PHY_ANA_100BASE_TX_HD | \
+					 PHY_ANA_100BASE_TX_FD | \
+					 PHY_ANA_1000BASE_T_FD)
+/* PHY MMD registers */
+#define PHY_MMD_DEV_3				3
+
+#define PHY_EEE_PCS_STATUS			(0x1)
+#define PHY_EEE_PCS_STATUS_TX_LPI_RCVD_		((WORD)0x0800)
+#define PHY_EEE_PCS_STATUS_RX_LPI_RCVD_		((WORD)0x0400)
+#define PHY_EEE_PCS_STATUS_TX_LPI_IND_		((WORD)0x0200)
+#define PHY_EEE_PCS_STATUS_RX_LPI_IND_		((WORD)0x0100)
+#define PHY_EEE_PCS_STATUS_PCS_RCV_LNK_STS_	((WORD)0x0004)
+
+#define PHY_EEE_CAPABILITIES			(0x14)
+#define PHY_EEE_CAPABILITIES_1000BT_EEE_	((WORD)0x0004)
+#define PHY_EEE_CAPABILITIES_100BT_EEE_		((WORD)0x0002)
+
+#define PHY_MMD_DEV_7				7
+
+#define PHY_EEE_ADVERTISEMENT			(0x3C)
+#define PHY_EEE_ADVERTISEMENT_1000BT_EEE_	((WORD)0x0004)
+#define PHY_EEE_ADVERTISEMENT_100BT_EEE_	((WORD)0x0002)
+
+#define PHY_EEE_LP_ADVERTISEMENT		(0x3D)
+#define PHY_EEE_1000BT_EEE_CAPABLE_		((WORD)0x0004)
+#define PHY_EEE_100BT_EEE_CAPABLE_		((WORD)0x0002)
+#endif /* _LAN78XX_H */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 64a60af..355842b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -752,8 +752,8 @@
 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
 	{QMI_FIXED_INTF(0x1199, 0x68a2, 19)},	/* Sierra Wireless MC7710 in QMI mode */
-	{QMI_FIXED_INTF(0x1199, 0x68c0, 8)},	/* Sierra Wireless MC73xx */
-	{QMI_FIXED_INTF(0x1199, 0x68c0, 10)},	/* Sierra Wireless MC73xx */
+	{QMI_FIXED_INTF(0x1199, 0x68c0, 8)},	/* Sierra Wireless MC7304/MC7354 */
+	{QMI_FIXED_INTF(0x1199, 0x68c0, 10)},	/* Sierra Wireless MC7304/MC7354 */
 	{QMI_FIXED_INTF(0x1199, 0x901c, 8)},    /* Sierra Wireless EM7700 */
 	{QMI_FIXED_INTF(0x1199, 0x901f, 8)},    /* Sierra Wireless EM7355 */
 	{QMI_FIXED_INTF(0x1199, 0x9041, 8)},	/* Sierra Wireless MC7305/MC7355 */
@@ -785,6 +785,7 @@
 	{QMI_FIXED_INTF(0x413c, 0x81a4, 8)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
 	{QMI_FIXED_INTF(0x413c, 0x81a8, 8)},	/* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
 	{QMI_FIXED_INTF(0x413c, 0x81a9, 8)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+	{QMI_FIXED_INTF(0x413c, 0x81b1, 8)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
 	{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},	/* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
 	{QMI_FIXED_INTF(0x03f0, 0x581d, 4)},	/* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
 
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ad8cbc6..fe4ec32 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -339,6 +339,7 @@
 
 /* USB_USB_CTRL */
 #define RX_AGG_DISABLE		0x0010
+#define RX_ZERO_EN		0x0080
 
 /* USB_U2P3_CTRL */
 #define U2P3_ENABLE		0x0001
@@ -622,6 +623,7 @@
 	RTL_VER_03,
 	RTL_VER_04,
 	RTL_VER_05,
+	RTL_VER_06,
 	RTL_VER_MAX
 };
 
@@ -2610,7 +2612,10 @@
 	u32 ocp_data;
 	u16 data;
 
-	ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+	if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
+	    tp->version == RTL_VER_05)
+		ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+
 	data = r8152_mdio_read(tp, MII_BMCR);
 	if (data & BMCR_PDOWN) {
 		data &= ~BMCR_PDOWN;
@@ -2711,7 +2716,7 @@
 
 	/* rx aggregation */
 	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
-	ocp_data &= ~RX_AGG_DISABLE;
+	ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
 	ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 }
 
@@ -3241,7 +3246,7 @@
 
 	/* enable rx aggregation */
 	ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
-	ocp_data &= ~RX_AGG_DISABLE;
+	ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
 	ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 }
 
@@ -3293,6 +3298,13 @@
 		else
 			ocp_data |= DYNAMIC_BURST;
 		ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+	} else if (tp->version == RTL_VER_06) {
+		ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1);
+		if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0)
+			ocp_data &= ~DYNAMIC_BURST;
+		else
+			ocp_data |= DYNAMIC_BURST;
+		ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
 	}
 
 	ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2);
@@ -3988,6 +4000,10 @@
 		tp->version = RTL_VER_05;
 		tp->mii.supports_gmii = 1;
 		break;
+	case 0x5c30:
+		tp->version = RTL_VER_06;
+		tp->mii.supports_gmii = 1;
+		break;
 	default:
 		netif_info(tp, probe, tp->netdev,
 			   "Unknown version 0x%04x\n", version);
@@ -4033,6 +4049,7 @@
 	case RTL_VER_03:
 	case RTL_VER_04:
 	case RTL_VER_05:
+	case RTL_VER_06:
 		ops->init		= r8153_init;
 		ops->enable		= rtl8153_enable;
 		ops->disable		= rtl8153_disable;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3c86b10..e049857 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -778,7 +778,7 @@
 {
 	struct usbnet		*dev = netdev_priv(net);
 	struct driver_info	*info = dev->driver_info;
-	int			retval, pm;
+	int			retval, pm, mpn;
 
 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
 	netif_stop_queue (net);
@@ -809,6 +809,8 @@
 
 	usbnet_purge_paused_rxq(dev);
 
+	mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
+
 	/* deferred work (task, timer, softirq) must also stop.
 	 * can't flush_scheduled_work() until we drop rtnl (later),
 	 * else workers could deadlock; so make workers a NOP.
@@ -819,8 +821,7 @@
 	if (!pm)
 		usb_autopm_put_interface(dev->intf);
 
-	if (info->manage_power &&
-	    !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
+	if (info->manage_power && mpn)
 		info->manage_power(dev, 0);
 	else
 		usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index c8186ff..0ef4a5a 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -290,6 +290,7 @@
 	.ndo_poll_controller	= veth_poll_controller,
 #endif
 	.ndo_get_iflink		= veth_get_iflink,
+	.ndo_features_check	= passthru_features_check,
 };
 
 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |    \
@@ -305,6 +306,7 @@
 
 	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	dev->priv_flags |= IFF_NO_QUEUE;
 
 	dev->netdev_ops = &veth_netdev_ops;
 	dev->ethtool_ops = &veth_ethtool_ops;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 237f8e5..d8838ded 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -40,12 +40,12 @@
 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
 #define GOOD_COPY_LEN	128
 
-/* Weight used for the RX packet size EWMA. The average packet size is used to
- * determine the packet buffer size when refilling RX rings. As the entire RX
- * ring may be refilled at once, the weight is chosen so that the EWMA will be
- * insensitive to short-term, transient changes in packet size.
+/* RX packet size EWMA. The average packet size is used to determine the packet
+ * buffer size when refilling RX rings. As the entire RX ring may be refilled
+ * at once, the weight is chosen so that the EWMA will be insensitive to short-
+ * term, transient changes in packet size.
  */
-#define RECEIVE_AVG_WEIGHT 64
+DECLARE_EWMA(pkt_len, 1, 64)
 
 /* Minimum alignment for mergeable packet buffers. */
 #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
@@ -85,7 +85,7 @@
 	struct page *pages;
 
 	/* Average packet length for mergeable receive buffers. */
-	struct ewma mrg_avg_pkt_len;
+	struct ewma_pkt_len mrg_avg_pkt_len;
 
 	/* Page frag for packet buffer allocation. */
 	struct page_frag alloc_frag;
@@ -407,7 +407,7 @@
 		}
 	}
 
-	ewma_add(&rq->mrg_avg_pkt_len, head_skb->len);
+	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
 	return head_skb;
 
 err_skb:
@@ -518,7 +518,7 @@
 
 	skb_mark_napi_id(skb, &rq->napi);
 
-	netif_receive_skb(skb);
+	napi_gro_receive(&rq->napi, skb);
 	return;
 
 frame_err:
@@ -540,7 +540,7 @@
 	skb_put(skb, GOOD_PACKET_LEN);
 
 	hdr = skb_vnet_hdr(skb);
-	sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
+	sg_init_table(rq->sg, 2);
 	sg_set_buf(rq->sg, hdr, vi->hdr_len);
 	skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
 
@@ -600,12 +600,12 @@
 	return err;
 }
 
-static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len)
+static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
 {
 	const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
 	unsigned int len;
 
-	len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len),
+	len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
 			GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
 	return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
 }
@@ -756,7 +756,7 @@
 	/* Out of packets? */
 	if (received < budget) {
 		r = virtqueue_enable_cb_prepare(rq->vq);
-		napi_complete(napi);
+		napi_complete_done(napi, received);
 		if (unlikely(virtqueue_poll(rq->vq, r)) &&
 		    napi_schedule_prep(napi)) {
 			virtqueue_disable_cb(rq->vq);
@@ -893,7 +893,7 @@
 	if (vi->mergeable_rx_bufs)
 		hdr->num_buffers = 0;
 
-	sg_init_table(sq->sg, MAX_SKB_FRAGS + 2);
+	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
 	if (can_push) {
 		__skb_push(skb, hdr_len);
 		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
@@ -1615,7 +1615,7 @@
 		napi_hash_add(&vi->rq[i].napi);
 
 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
-		ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
+		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
 		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
 	}
 
@@ -1658,7 +1658,7 @@
 {
 	struct virtnet_info *vi = netdev_priv(queue->dev);
 	unsigned int queue_index = get_netdev_rx_queue_index(queue);
-	struct ewma *avg;
+	struct ewma_pkt_len *avg;
 
 	BUG_ON(queue_index >= vi->max_queue_pairs);
 	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
new file mode 100644
index 0000000..e7094fb
--- /dev/null
+++ b/drivers/net/vrf.c
@@ -0,0 +1,710 @@
+/*
+ * vrf.c: device driver to encapsulate a VRF space
+ *
+ * Copyright (c) 2015 Cumulus Networks. All rights reserved.
+ * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
+ * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
+ *
+ * Based on dummy, team and ipvlan drivers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/netfilter.h>
+#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/hashtable.h>
+
+#include <linux/inetdevice.h>
+#include <net/arp.h>
+#include <net/ip.h>
+#include <net/ip_fib.h>
+#include <net/ip6_route.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/addrconf.h>
+#include <net/vrf.h>
+
+#define DRV_NAME	"vrf"
+#define DRV_VERSION	"1.0"
+
+#define vrf_is_slave(dev)   ((dev)->flags & IFF_SLAVE)
+
+#define vrf_master_get_rcu(dev) \
+	((struct net_device *)rcu_dereference(dev->rx_handler_data))
+
+struct pcpu_dstats {
+	u64			tx_pkts;
+	u64			tx_bytes;
+	u64			tx_drps;
+	u64			rx_pkts;
+	u64			rx_bytes;
+	struct u64_stats_sync	syncp;
+};
+
+static struct dst_entry *vrf_ip_check(struct dst_entry *dst, u32 cookie)
+{
+	return dst;
+}
+
+static int vrf_ip_local_out(struct sk_buff *skb)
+{
+	return ip_local_out(skb);
+}
+
+static unsigned int vrf_v4_mtu(const struct dst_entry *dst)
+{
+	/* TO-DO: return max ethernet size? */
+	return dst->dev->mtu;
+}
+
+static void vrf_dst_destroy(struct dst_entry *dst)
+{
+	/* our dst lives forever - or until the device is closed */
+}
+
+static unsigned int vrf_default_advmss(const struct dst_entry *dst)
+{
+	return 65535 - 40;
+}
+
+static struct dst_ops vrf_dst_ops = {
+	.family		= AF_INET,
+	.local_out	= vrf_ip_local_out,
+	.check		= vrf_ip_check,
+	.mtu		= vrf_v4_mtu,
+	.destroy	= vrf_dst_destroy,
+	.default_advmss	= vrf_default_advmss,
+};
+
+static bool is_ip_rx_frame(struct sk_buff *skb)
+{
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+	case htons(ETH_P_IPV6):
+		return true;
+	}
+	return false;
+}
+
+static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
+{
+	vrf_dev->stats.tx_errors++;
+	kfree_skb(skb);
+}
+
+/* note: already called with rcu_read_lock */
+static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
+{
+	struct sk_buff *skb = *pskb;
+
+	if (is_ip_rx_frame(skb)) {
+		struct net_device *dev = vrf_master_get_rcu(skb->dev);
+		struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+		u64_stats_update_begin(&dstats->syncp);
+		dstats->rx_pkts++;
+		dstats->rx_bytes += skb->len;
+		u64_stats_update_end(&dstats->syncp);
+
+		skb->dev = dev;
+
+		return RX_HANDLER_ANOTHER;
+	}
+	return RX_HANDLER_PASS;
+}
+
+static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
+						 struct rtnl_link_stats64 *stats)
+{
+	int i;
+
+	for_each_possible_cpu(i) {
+		const struct pcpu_dstats *dstats;
+		u64 tbytes, tpkts, tdrops, rbytes, rpkts;
+		unsigned int start;
+
+		dstats = per_cpu_ptr(dev->dstats, i);
+		do {
+			start = u64_stats_fetch_begin_irq(&dstats->syncp);
+			tbytes = dstats->tx_bytes;
+			tpkts = dstats->tx_pkts;
+			tdrops = dstats->tx_drps;
+			rbytes = dstats->rx_bytes;
+			rpkts = dstats->rx_pkts;
+		} while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
+		stats->tx_bytes += tbytes;
+		stats->tx_packets += tpkts;
+		stats->tx_dropped += tdrops;
+		stats->rx_bytes += rbytes;
+		stats->rx_packets += rpkts;
+	}
+	return stats;
+}
+
+static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
+					   struct net_device *dev)
+{
+	vrf_tx_error(dev, skb);
+	return NET_XMIT_DROP;
+}
+
+static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4,
+			    struct net_device *vrf_dev)
+{
+	struct rtable *rt;
+	int err = 1;
+
+	rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL);
+	if (IS_ERR(rt))
+		goto out;
+
+	/* TO-DO: what about broadcast ? */
+	if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
+		ip_rt_put(rt);
+		goto out;
+	}
+
+	skb_dst_drop(skb);
+	skb_dst_set(skb, &rt->dst);
+	err = 0;
+out:
+	return err;
+}
+
+static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
+					   struct net_device *vrf_dev)
+{
+	struct iphdr *ip4h = ip_hdr(skb);
+	int ret = NET_XMIT_DROP;
+	struct flowi4 fl4 = {
+		/* needed to match OIF rule */
+		.flowi4_oif = vrf_dev->ifindex,
+		.flowi4_iif = LOOPBACK_IFINDEX,
+		.flowi4_tos = RT_TOS(ip4h->tos),
+		.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC,
+		.daddr = ip4h->daddr,
+	};
+
+	if (vrf_send_v4_prep(skb, &fl4, vrf_dev))
+		goto err;
+
+	if (!ip4h->saddr) {
+		ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
+					       RT_SCOPE_LINK);
+	}
+
+	ret = ip_local_out(skb);
+	if (unlikely(net_xmit_eval(ret)))
+		vrf_dev->stats.tx_errors++;
+	else
+		ret = NET_XMIT_SUCCESS;
+
+out:
+	return ret;
+err:
+	vrf_tx_error(vrf_dev, skb);
+	goto out;
+}
+
+static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
+{
+	/* strip the ethernet header added for pass through VRF device */
+	__skb_pull(skb, skb_network_offset(skb));
+
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+		return vrf_process_v4_outbound(skb, dev);
+	case htons(ETH_P_IPV6):
+		return vrf_process_v6_outbound(skb, dev);
+	default:
+		vrf_tx_error(dev, skb);
+		return NET_XMIT_DROP;
+	}
+}
+
+static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	netdev_tx_t ret = is_ip_tx_frame(skb, dev);
+
+	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
+		struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+		u64_stats_update_begin(&dstats->syncp);
+		dstats->tx_pkts++;
+		dstats->tx_bytes += skb->len;
+		u64_stats_update_end(&dstats->syncp);
+	} else {
+		this_cpu_inc(dev->dstats->tx_drps);
+	}
+
+	return ret;
+}
+
+/* modelled after ip_finish_output2 */
+static int vrf_finish_output(struct sock *sk, struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+	struct rtable *rt = (struct rtable *)dst;
+	struct net_device *dev = dst->dev;
+	unsigned int hh_len = LL_RESERVED_SPACE(dev);
+	struct neighbour *neigh;
+	u32 nexthop;
+	int ret = -EINVAL;
+
+	/* Be paranoid, rather than too clever. */
+	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
+		struct sk_buff *skb2;
+
+		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
+		if (!skb2) {
+			ret = -ENOMEM;
+			goto err;
+		}
+		if (skb->sk)
+			skb_set_owner_w(skb2, skb->sk);
+
+		consume_skb(skb);
+		skb = skb2;
+	}
+
+	rcu_read_lock_bh();
+
+	nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
+	neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
+	if (unlikely(!neigh))
+		neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
+	if (!IS_ERR(neigh))
+		ret = dst_neigh_output(dst, neigh, skb);
+
+	rcu_read_unlock_bh();
+err:
+	if (unlikely(ret < 0))
+		vrf_tx_error(skb->dev, skb);
+	return ret;
+}
+
+static int vrf_output(struct sock *sk, struct sk_buff *skb)
+{
+	struct net_device *dev = skb_dst(skb)->dev;
+
+	IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
+
+	skb->dev = dev;
+	skb->protocol = htons(ETH_P_IP);
+
+	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, sk, skb,
+			    NULL, dev,
+			    vrf_finish_output,
+			    !(IPCB(skb)->flags & IPSKB_REROUTED));
+}
+
+static void vrf_rtable_destroy(struct net_vrf *vrf)
+{
+	struct dst_entry *dst = (struct dst_entry *)vrf->rth;
+
+	dst_destroy(dst);
+	vrf->rth = NULL;
+}
+
+static struct rtable *vrf_rtable_create(struct net_device *dev)
+{
+	struct rtable *rth;
+
+	rth = dst_alloc(&vrf_dst_ops, dev, 2,
+			DST_OBSOLETE_NONE,
+			(DST_HOST | DST_NOPOLICY | DST_NOXFRM));
+	if (rth) {
+		rth->dst.output	= vrf_output;
+		rth->rt_genid	= rt_genid_ipv4(dev_net(dev));
+		rth->rt_flags	= 0;
+		rth->rt_type	= RTN_UNICAST;
+		rth->rt_is_input = 0;
+		rth->rt_iif	= 0;
+		rth->rt_pmtu	= 0;
+		rth->rt_gateway	= 0;
+		rth->rt_uses_gateway = 0;
+		INIT_LIST_HEAD(&rth->rt_uncached);
+		rth->rt_uncached_list = NULL;
+	}
+
+	return rth;
+}
+
+/**************************** device handling ********************/
+
+/* cycle interface to flush neighbor cache and move routes across tables */
+static void cycle_netdev(struct net_device *dev)
+{
+	unsigned int flags = dev->flags;
+	int ret;
+
+	if (!netif_running(dev))
+		return;
+
+	ret = dev_change_flags(dev, flags & ~IFF_UP);
+	if (ret >= 0)
+		ret = dev_change_flags(dev, flags);
+
+	if (ret < 0) {
+		netdev_err(dev,
+			   "Failed to cycle device %s; route tables might be wrong!\n",
+			   dev->name);
+	}
+}
+
+static struct slave *__vrf_find_slave_dev(struct slave_queue *queue,
+					  struct net_device *dev)
+{
+	struct list_head *head = &queue->all_slaves;
+	struct slave *slave;
+
+	list_for_each_entry(slave, head, list) {
+		if (slave->dev == dev)
+			return slave;
+	}
+
+	return NULL;
+}
+
+/* inverse of __vrf_insert_slave */
+static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave)
+{
+	list_del(&slave->list);
+}
+
+static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave)
+{
+	list_add(&slave->list, &queue->all_slaves);
+}
+
+static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+	struct net_vrf_dev *vrf_ptr = kmalloc(sizeof(*vrf_ptr), GFP_KERNEL);
+	struct slave *slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+	struct net_vrf *vrf = netdev_priv(dev);
+	struct slave_queue *queue = &vrf->queue;
+	int ret = -ENOMEM;
+
+	if (!slave || !vrf_ptr)
+		goto out_fail;
+
+	slave->dev = port_dev;
+	vrf_ptr->ifindex = dev->ifindex;
+	vrf_ptr->tb_id = vrf->tb_id;
+
+	/* register the packet handler for slave ports */
+	ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
+	if (ret) {
+		netdev_err(port_dev,
+			   "Device %s failed to register rx_handler\n",
+			   port_dev->name);
+		goto out_fail;
+	}
+
+	ret = netdev_master_upper_dev_link(port_dev, dev);
+	if (ret < 0)
+		goto out_unregister;
+
+	port_dev->flags |= IFF_SLAVE;
+	__vrf_insert_slave(queue, slave);
+	rcu_assign_pointer(port_dev->vrf_ptr, vrf_ptr);
+	cycle_netdev(port_dev);
+
+	return 0;
+
+out_unregister:
+	netdev_rx_handler_unregister(port_dev);
+out_fail:
+	kfree(vrf_ptr);
+	kfree(slave);
+	return ret;
+}
+
+static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+	if (netif_is_vrf(port_dev) || vrf_is_slave(port_dev))
+		return -EINVAL;
+
+	return do_vrf_add_slave(dev, port_dev);
+}
+
+/* inverse of do_vrf_add_slave */
+static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+	struct net_vrf_dev *vrf_ptr = rtnl_dereference(port_dev->vrf_ptr);
+	struct net_vrf *vrf = netdev_priv(dev);
+	struct slave_queue *queue = &vrf->queue;
+	struct slave *slave;
+
+	RCU_INIT_POINTER(port_dev->vrf_ptr, NULL);
+
+	netdev_upper_dev_unlink(port_dev, dev);
+	port_dev->flags &= ~IFF_SLAVE;
+
+	netdev_rx_handler_unregister(port_dev);
+
+	/* after netdev_rx_handler_unregister for synchronize_rcu */
+	kfree(vrf_ptr);
+
+	cycle_netdev(port_dev);
+
+	slave = __vrf_find_slave_dev(queue, port_dev);
+	if (slave)
+		__vrf_remove_slave(queue, slave);
+
+	kfree(slave);
+
+	return 0;
+}
+
+static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+	return do_vrf_del_slave(dev, port_dev);
+}
+
+static void vrf_dev_uninit(struct net_device *dev)
+{
+	struct net_vrf *vrf = netdev_priv(dev);
+	struct slave_queue *queue = &vrf->queue;
+	struct list_head *head = &queue->all_slaves;
+	struct slave *slave, *next;
+
+	vrf_rtable_destroy(vrf);
+
+	list_for_each_entry_safe(slave, next, head, list)
+		vrf_del_slave(dev, slave->dev);
+
+	free_percpu(dev->dstats);
+	dev->dstats = NULL;
+}
+
+static int vrf_dev_init(struct net_device *dev)
+{
+	struct net_vrf *vrf = netdev_priv(dev);
+
+	INIT_LIST_HEAD(&vrf->queue.all_slaves);
+
+	dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
+	if (!dev->dstats)
+		goto out_nomem;
+
+	/* create the default dst which points back to us */
+	vrf->rth = vrf_rtable_create(dev);
+	if (!vrf->rth)
+		goto out_stats;
+
+	dev->flags = IFF_MASTER | IFF_NOARP;
+
+	return 0;
+
+out_stats:
+	free_percpu(dev->dstats);
+	dev->dstats = NULL;
+out_nomem:
+	return -ENOMEM;
+}
+
+static const struct net_device_ops vrf_netdev_ops = {
+	.ndo_init		= vrf_dev_init,
+	.ndo_uninit		= vrf_dev_uninit,
+	.ndo_start_xmit		= vrf_xmit,
+	.ndo_get_stats64	= vrf_get_stats64,
+	.ndo_add_slave		= vrf_add_slave,
+	.ndo_del_slave		= vrf_del_slave,
+};
+
+static void vrf_get_drvinfo(struct net_device *dev,
+			    struct ethtool_drvinfo *info)
+{
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+}
+
+static const struct ethtool_ops vrf_ethtool_ops = {
+	.get_drvinfo	= vrf_get_drvinfo,
+};
+
+static void vrf_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+
+	/* Initialize the device structure. */
+	dev->netdev_ops = &vrf_netdev_ops;
+	dev->ethtool_ops = &vrf_ethtool_ops;
+	dev->destructor = free_netdev;
+
+	/* Fill in device structure with ethernet-generic values. */
+	eth_hw_addr_random(dev);
+
+	/* don't acquire vrf device's netif_tx_lock when transmitting */
+	dev->features |= NETIF_F_LLTX;
+
+	/* don't allow vrf devices to change network namespaces. */
+	dev->features |= NETIF_F_NETNS_LOCAL;
+}
+
+static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+	if (tb[IFLA_ADDRESS]) {
+		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+			return -EINVAL;
+		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+			return -EADDRNOTAVAIL;
+	}
+	return 0;
+}
+
+static void vrf_dellink(struct net_device *dev, struct list_head *head)
+{
+	struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
+
+	RCU_INIT_POINTER(dev->vrf_ptr, NULL);
+	kfree_rcu(vrf_ptr, rcu);
+	unregister_netdevice_queue(dev, head);
+}
+
+static int vrf_newlink(struct net *src_net, struct net_device *dev,
+		       struct nlattr *tb[], struct nlattr *data[])
+{
+	struct net_vrf *vrf = netdev_priv(dev);
+	struct net_vrf_dev *vrf_ptr;
+	int err;
+
+	if (!data || !data[IFLA_VRF_TABLE])
+		return -EINVAL;
+
+	vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
+
+	dev->priv_flags |= IFF_VRF_MASTER;
+
+	err = -ENOMEM;
+	vrf_ptr = kmalloc(sizeof(*dev->vrf_ptr), GFP_KERNEL);
+	if (!vrf_ptr)
+		goto out_fail;
+
+	vrf_ptr->ifindex = dev->ifindex;
+	vrf_ptr->tb_id = vrf->tb_id;
+
+	err = register_netdevice(dev);
+	if (err < 0)
+		goto out_fail;
+
+	rcu_assign_pointer(dev->vrf_ptr, vrf_ptr);
+
+	return 0;
+
+out_fail:
+	kfree(vrf_ptr);
+	free_netdev(dev);
+	return err;
+}
+
+static size_t vrf_nl_getsize(const struct net_device *dev)
+{
+	return nla_total_size(sizeof(u32));  /* IFLA_VRF_TABLE */
+}
+
+static int vrf_fillinfo(struct sk_buff *skb,
+			const struct net_device *dev)
+{
+	struct net_vrf *vrf = netdev_priv(dev);
+
+	return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
+}
+
+static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
+	[IFLA_VRF_TABLE] = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops vrf_link_ops __read_mostly = {
+	.kind		= DRV_NAME,
+	.priv_size	= sizeof(struct net_vrf),
+
+	.get_size	= vrf_nl_getsize,
+	.policy		= vrf_nl_policy,
+	.validate	= vrf_validate,
+	.fill_info	= vrf_fillinfo,
+
+	.newlink	= vrf_newlink,
+	.dellink	= vrf_dellink,
+	.setup		= vrf_setup,
+	.maxtype	= IFLA_VRF_MAX,
+};
+
+static int vrf_device_event(struct notifier_block *unused,
+			    unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	/* only care about unregister events to drop slave references */
+	if (event == NETDEV_UNREGISTER) {
+		struct net_vrf_dev *vrf_ptr = rtnl_dereference(dev->vrf_ptr);
+		struct net_device *vrf_dev;
+
+		if (!vrf_ptr || netif_is_vrf(dev))
+			goto out;
+
+		vrf_dev = netdev_master_upper_dev_get(dev);
+		vrf_del_slave(vrf_dev, dev);
+	}
+out:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block vrf_notifier_block __read_mostly = {
+	.notifier_call = vrf_device_event,
+};
+
+static int __init vrf_init_module(void)
+{
+	int rc;
+
+	vrf_dst_ops.kmem_cachep =
+		kmem_cache_create("vrf_ip_dst_cache",
+				  sizeof(struct rtable), 0,
+				  SLAB_HWCACHE_ALIGN,
+				  NULL);
+
+	if (!vrf_dst_ops.kmem_cachep)
+		return -ENOMEM;
+
+	register_netdevice_notifier(&vrf_notifier_block);
+
+	rc = rtnl_link_register(&vrf_link_ops);
+	if (rc < 0)
+		goto error;
+
+	return 0;
+
+error:
+	unregister_netdevice_notifier(&vrf_notifier_block);
+	kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
+	return rc;
+}
+
+static void __exit vrf_cleanup_module(void)
+{
+	rtnl_link_unregister(&vrf_link_ops);
+	unregister_netdevice_notifier(&vrf_notifier_block);
+	kmem_cache_destroy(vrf_dst_ops.kmem_cachep);
+}
+
+module_init(vrf_init_module);
+module_exit(vrf_cleanup_module);
+MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
+MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 34c519e..ce988fd 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -49,15 +49,12 @@
 #include <net/ip6_tunnel.h>
 #include <net/ip6_checksum.h>
 #endif
+#include <net/dst_metadata.h>
 
 #define VXLAN_VERSION	"0.1"
 
 #define PORT_HASH_BITS	8
 #define PORT_HASH_SIZE  (1<<PORT_HASH_BITS)
-#define VNI_HASH_BITS	10
-#define VNI_HASH_SIZE	(1<<VNI_HASH_BITS)
-#define FDB_HASH_BITS	8
-#define FDB_HASH_SIZE	(1<<FDB_HASH_BITS)
 #define FDB_AGE_DEFAULT 300 /* 5 min */
 #define FDB_AGE_INTERVAL (10 * HZ)	/* rescan interval */
 
@@ -74,9 +71,13 @@
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
 static int vxlan_net_id;
+static struct rtnl_link_ops vxlan_link_ops;
 
 static const u8 all_zeros_mac[ETH_ALEN];
 
+static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+					 bool no_share, u32 flags);
+
 /* per-network namespace private data for this module */
 struct vxlan_net {
 	struct list_head  vxlan_list;
@@ -84,21 +85,6 @@
 	spinlock_t	  sock_lock;
 };
 
-union vxlan_addr {
-	struct sockaddr_in sin;
-	struct sockaddr_in6 sin6;
-	struct sockaddr sa;
-};
-
-struct vxlan_rdst {
-	union vxlan_addr	 remote_ip;
-	__be16			 remote_port;
-	u32			 remote_vni;
-	u32			 remote_ifindex;
-	struct list_head	 list;
-	struct rcu_head		 rcu;
-};
-
 /* Forwarding table entry */
 struct vxlan_fdb {
 	struct hlist_node hlist;	/* linked list of entries */
@@ -106,40 +92,21 @@
 	unsigned long	  updated;	/* jiffies */
 	unsigned long	  used;
 	struct list_head  remotes;
+	u8		  eth_addr[ETH_ALEN];
 	u16		  state;	/* see ndm_state */
 	u8		  flags;	/* see ndm_flags */
-	u8		  eth_addr[ETH_ALEN];
-};
-
-/* Pseudo network device */
-struct vxlan_dev {
-	struct hlist_node hlist;	/* vni hash table */
-	struct list_head  next;		/* vxlan's per namespace list */
-	struct vxlan_sock *vn_sock;	/* listening socket */
-	struct net_device *dev;
-	struct net	  *net;		/* netns for packet i/o */
-	struct vxlan_rdst default_dst;	/* default destination */
-	union vxlan_addr  saddr;	/* source address */
-	__be16		  dst_port;
-	__u16		  port_min;	/* source port range */
-	__u16		  port_max;
-	__u8		  tos;		/* TOS override */
-	__u8		  ttl;
-	u32		  flags;	/* VXLAN_F_* in vxlan.h */
-
-	unsigned long	  age_interval;
-	struct timer_list age_timer;
-	spinlock_t	  hash_lock;
-	unsigned int	  addrcnt;
-	unsigned int	  addrmax;
-
-	struct hlist_head fdb_head[FDB_HASH_SIZE];
 };
 
 /* salt for hash table */
 static u32 vxlan_salt __read_mostly;
 static struct workqueue_struct *vxlan_wq;
 
+static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
+{
+	return vs->flags & VXLAN_F_COLLECT_METADATA ||
+	       ip_tunnel_collect_metadata();
+}
+
 #if IS_ENABLED(CONFIG_IPV6)
 static inline
 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
@@ -269,7 +236,7 @@
 
 	hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
 		if (inet_sk(vs->sock->sk)->inet_sport == port &&
-		    inet_sk(vs->sock->sk)->sk.sk_family == family &&
+		    vxlan_get_sk_family(vs) == family &&
 		    vs->flags == flags)
 			return vs;
 	}
@@ -345,7 +312,7 @@
 	if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
 		goto nla_put_failure;
 
-	if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
+	if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
 	    nla_put_be16(skb, NDA_PORT, rdst->remote_port))
 		goto nla_put_failure;
 	if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
@@ -552,10 +519,10 @@
 					  u32 data, struct gro_remcsum *grc,
 					  bool nopartial)
 {
-	size_t start, offset, plen;
+	size_t start, offset;
 
 	if (skb->remcsum_offload)
-		return NULL;
+		return vh;
 
 	if (!NAPI_GRO_CB(skb)->csum_valid)
 		return NULL;
@@ -565,17 +532,8 @@
 			  offsetof(struct udphdr, check) :
 			  offsetof(struct tcphdr, check));
 
-	plen = hdrlen + offset + sizeof(u16);
-
-	/* Pull checksum that will be written */
-	if (skb_gro_header_hard(skb, off + plen)) {
-		vh = skb_gro_header_slow(skb, off + plen, off);
-		if (!vh)
-			return NULL;
-	}
-
-	skb_gro_remcsum_process(skb, (void *)vh + hdrlen,
-				start, offset, grc, nopartial);
+	vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen,
+				     start, offset, grc, nopartial);
 
 	skb->remcsum_offload = 1;
 
@@ -606,7 +564,6 @@
 			goto out;
 	}
 
-	skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
 	skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr));
 
 	flags = ntohl(vh->vx_flags);
@@ -621,6 +578,8 @@
 			goto out;
 	}
 
+	skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
+
 	flush = 0;
 
 	for (p = *head; p; p = p->next) {
@@ -658,7 +617,7 @@
 	struct net_device *dev;
 	struct sock *sk = vs->sock->sk;
 	struct net *net = sock_net(sk);
-	sa_family_t sa_family = sk->sk_family;
+	sa_family_t sa_family = vxlan_get_sk_family(vs);
 	__be16 port = inet_sk(sk)->inet_sport;
 	int err;
 
@@ -683,7 +642,7 @@
 	struct net_device *dev;
 	struct sock *sk = vs->sock->sk;
 	struct net *net = sock_net(sk);
-	sa_family_t sa_family = sk->sk_family;
+	sa_family_t sa_family = vxlan_get_sk_family(vs);
 	__be16 port = inet_sk(sk)->inet_sport;
 
 	rcu_read_lock();
@@ -749,7 +708,8 @@
 		if (!(flags & NLM_F_CREATE))
 			return -ENOENT;
 
-		if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
+		if (vxlan->cfg.addrmax &&
+		    vxlan->addrcnt >= vxlan->cfg.addrmax)
 			return -ENOSPC;
 
 		/* Disallow replace to add a multicast entry */
@@ -835,7 +795,7 @@
 			return -EINVAL;
 		*port = nla_get_be16(tb[NDA_PORT]);
 	} else {
-		*port = vxlan->dst_port;
+		*port = vxlan->cfg.dst_port;
 	}
 
 	if (tb[NDA_VNI]) {
@@ -963,10 +923,10 @@
 		hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
 			struct vxlan_rdst *rd;
 
-			if (idx < cb->args[0])
-				goto skip;
-
 			list_for_each_entry_rcu(rd, &f->remotes, list) {
+				if (idx < cb->args[0])
+					goto skip;
+
 				err = vxlan_fdb_info(skb, vxlan, f,
 						     NETLINK_CB(cb->skb).portid,
 						     cb->nlh->nlmsg_seq,
@@ -974,9 +934,9 @@
 						     NLM_F_MULTI, rd);
 				if (err < 0)
 					goto out;
-			}
 skip:
-			++idx;
+				++idx;
+			}
 		}
 	}
 out:
@@ -1021,7 +981,7 @@
 			vxlan_fdb_create(vxlan, src_mac, src_ip,
 					 NUD_REACHABLE,
 					 NLM_F_EXCL|NLM_F_CREATE,
-					 vxlan->dst_port,
+					 vxlan->cfg.dst_port,
 					 vxlan->default_dst.remote_vni,
 					 0, NTF_SELF);
 		spin_unlock(&vxlan->hash_lock);
@@ -1062,7 +1022,7 @@
 	return false;
 }
 
-void vxlan_sock_release(struct vxlan_sock *vs)
+static void vxlan_sock_release(struct vxlan_sock *vs)
 {
 	struct sock *sk = vs->sock->sk;
 	struct net *net = sock_net(sk);
@@ -1078,7 +1038,6 @@
 
 	queue_work(vxlan_wq, &vs->del_work);
 }
-EXPORT_SYMBOL_GPL(vxlan_sock_release);
 
 /* Update multicast group membership when first VNI on
  * multicast address is brought up
@@ -1143,6 +1102,9 @@
 {
 	size_t start, offset, plen;
 
+	if (skb->remcsum_offload)
+		return vh;
+
 	start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT;
 	offset = start + ((data & VXLAN_RCO_UDP) ?
 			  offsetof(struct udphdr, check) :
@@ -1161,13 +1123,112 @@
 	return vh;
 }
 
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
+		      struct vxlan_metadata *md, u32 vni,
+		      struct metadata_dst *tun_dst)
+{
+	struct iphdr *oip = NULL;
+	struct ipv6hdr *oip6 = NULL;
+	struct vxlan_dev *vxlan;
+	struct pcpu_sw_netstats *stats;
+	union vxlan_addr saddr;
+	int err = 0;
+	union vxlan_addr *remote_ip;
+
+	/* For flow based devices, map all packets to VNI 0 */
+	if (vs->flags & VXLAN_F_COLLECT_METADATA)
+		vni = 0;
+
+	/* Is this VNI defined? */
+	vxlan = vxlan_vs_find_vni(vs, vni);
+	if (!vxlan)
+		goto drop;
+
+	remote_ip = &vxlan->default_dst.remote_ip;
+	skb_reset_mac_header(skb);
+	skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
+	skb->protocol = eth_type_trans(skb, vxlan->dev);
+	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+	/* Ignore packet loops (and multicast echo) */
+	if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
+		goto drop;
+
+	/* Re-examine inner Ethernet packet */
+	if (remote_ip->sa.sa_family == AF_INET) {
+		oip = ip_hdr(skb);
+		saddr.sin.sin_addr.s_addr = oip->saddr;
+		saddr.sa.sa_family = AF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+	} else {
+		oip6 = ipv6_hdr(skb);
+		saddr.sin6.sin6_addr = oip6->saddr;
+		saddr.sa.sa_family = AF_INET6;
+#endif
+	}
+
+	if (tun_dst) {
+		skb_dst_set(skb, (struct dst_entry *)tun_dst);
+		tun_dst = NULL;
+	}
+
+	if ((vxlan->flags & VXLAN_F_LEARN) &&
+	    vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
+		goto drop;
+
+	skb_reset_network_header(skb);
+	/* In flow-based mode, GBP is carried in dst_metadata */
+	if (!(vs->flags & VXLAN_F_COLLECT_METADATA))
+		skb->mark = md->gbp;
+
+	if (oip6)
+		err = IP6_ECN_decapsulate(oip6, skb);
+	if (oip)
+		err = IP_ECN_decapsulate(oip, skb);
+
+	if (unlikely(err)) {
+		if (log_ecn_error) {
+			if (oip6)
+				net_info_ratelimited("non-ECT from %pI6\n",
+						     &oip6->saddr);
+			if (oip)
+				net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+						     &oip->saddr, oip->tos);
+		}
+		if (err > 1) {
+			++vxlan->dev->stats.rx_frame_errors;
+			++vxlan->dev->stats.rx_errors;
+			goto drop;
+		}
+	}
+
+	stats = this_cpu_ptr(vxlan->dev->tstats);
+	u64_stats_update_begin(&stats->syncp);
+	stats->rx_packets++;
+	stats->rx_bytes += skb->len;
+	u64_stats_update_end(&stats->syncp);
+
+	gro_cells_receive(&vxlan->gro_cells, skb);
+
+	return;
+drop:
+	if (tun_dst)
+		dst_release((struct dst_entry *)tun_dst);
+
+	/* Consume bad packet */
+	kfree_skb(skb);
+}
+
 /* Callback from net/ipv4/udp.c to receive packets */
 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
+	struct metadata_dst *tun_dst = NULL;
+	struct ip_tunnel_info *info;
 	struct vxlan_sock *vs;
 	struct vxlanhdr *vxh;
 	u32 flags, vni;
-	struct vxlan_metadata md = {0};
+	struct vxlan_metadata _md;
+	struct vxlan_metadata *md = &_md;
 
 	/* Need Vxlan and inner Ethernet header to be present */
 	if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1202,6 +1263,19 @@
 		vni &= VXLAN_VNI_MASK;
 	}
 
+	if (vxlan_collect_metadata(vs)) {
+		tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
+					 cpu_to_be64(vni >> 8), sizeof(*md));
+
+		if (!tun_dst)
+			goto drop;
+
+		info = &tun_dst->u.tun_info;
+		md = ip_tunnel_info_opts(info);
+	} else {
+		memset(md, 0, sizeof(*md));
+	}
+
 	/* For backwards compatibility, only allow reserved fields to be
 	 * used by VXLAN extensions if explicitly requested.
 	 */
@@ -1209,13 +1283,16 @@
 		struct vxlanhdr_gbp *gbp;
 
 		gbp = (struct vxlanhdr_gbp *)vxh;
-		md.gbp = ntohs(gbp->policy_id);
+		md->gbp = ntohs(gbp->policy_id);
+
+		if (tun_dst)
+			info->key.tun_flags |= TUNNEL_VXLAN_OPT;
 
 		if (gbp->dont_learn)
-			md.gbp |= VXLAN_GBP_DONT_LEARN;
+			md->gbp |= VXLAN_GBP_DONT_LEARN;
 
 		if (gbp->policy_applied)
-			md.gbp |= VXLAN_GBP_POLICY_APPLIED;
+			md->gbp |= VXLAN_GBP_POLICY_APPLIED;
 
 		flags &= ~VXLAN_GBP_USED_BITS;
 	}
@@ -1233,8 +1310,7 @@
 		goto bad_flags;
 	}
 
-	md.vni = vxh->vx_vni;
-	vs->rcv(vs, skb, &md);
+	vxlan_rcv(vs, skb, md, vni >> 8, tun_dst);
 	return 0;
 
 drop:
@@ -1247,93 +1323,13 @@
 		   ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
 
 error:
+	if (tun_dst)
+		dst_release((struct dst_entry *)tun_dst);
+
 	/* Return non vxlan pkt */
 	return 1;
 }
 
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
-		      struct vxlan_metadata *md)
-{
-	struct iphdr *oip = NULL;
-	struct ipv6hdr *oip6 = NULL;
-	struct vxlan_dev *vxlan;
-	struct pcpu_sw_netstats *stats;
-	union vxlan_addr saddr;
-	__u32 vni;
-	int err = 0;
-	union vxlan_addr *remote_ip;
-
-	vni = ntohl(md->vni) >> 8;
-	/* Is this VNI defined? */
-	vxlan = vxlan_vs_find_vni(vs, vni);
-	if (!vxlan)
-		goto drop;
-
-	remote_ip = &vxlan->default_dst.remote_ip;
-	skb_reset_mac_header(skb);
-	skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
-	skb->protocol = eth_type_trans(skb, vxlan->dev);
-	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-
-	/* Ignore packet loops (and multicast echo) */
-	if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
-		goto drop;
-
-	/* Re-examine inner Ethernet packet */
-	if (remote_ip->sa.sa_family == AF_INET) {
-		oip = ip_hdr(skb);
-		saddr.sin.sin_addr.s_addr = oip->saddr;
-		saddr.sa.sa_family = AF_INET;
-#if IS_ENABLED(CONFIG_IPV6)
-	} else {
-		oip6 = ipv6_hdr(skb);
-		saddr.sin6.sin6_addr = oip6->saddr;
-		saddr.sa.sa_family = AF_INET6;
-#endif
-	}
-
-	if ((vxlan->flags & VXLAN_F_LEARN) &&
-	    vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
-		goto drop;
-
-	skb_reset_network_header(skb);
-	skb->mark = md->gbp;
-
-	if (oip6)
-		err = IP6_ECN_decapsulate(oip6, skb);
-	if (oip)
-		err = IP_ECN_decapsulate(oip, skb);
-
-	if (unlikely(err)) {
-		if (log_ecn_error) {
-			if (oip6)
-				net_info_ratelimited("non-ECT from %pI6\n",
-						     &oip6->saddr);
-			if (oip)
-				net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
-						     &oip->saddr, oip->tos);
-		}
-		if (err > 1) {
-			++vxlan->dev->stats.rx_frame_errors;
-			++vxlan->dev->stats.rx_errors;
-			goto drop;
-		}
-	}
-
-	stats = this_cpu_ptr(vxlan->dev->tstats);
-	u64_stats_update_begin(&stats->syncp);
-	stats->rx_packets++;
-	stats->rx_bytes += skb->len;
-	u64_stats_update_end(&stats->syncp);
-
-	netif_rx(skb);
-
-	return;
-drop:
-	/* Consume bad packet */
-	kfree_skb(skb);
-}
-
 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -1672,7 +1668,7 @@
 			   struct sk_buff *skb,
 			   struct net_device *dev, struct in6_addr *saddr,
 			   struct in6_addr *daddr, __u8 prio, __u8 ttl,
-			   __be16 src_port, __be16 dst_port,
+			   __be16 src_port, __be16 dst_port, __be32 vni,
 			   struct vxlan_metadata *md, bool xnet, u32 vxflags)
 {
 	struct vxlanhdr *vxh;
@@ -1722,7 +1718,7 @@
 
 	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
 	vxh->vx_flags = htonl(VXLAN_HF_VNI);
-	vxh->vx_vni = md->vni;
+	vxh->vx_vni = vni;
 
 	if (type & SKB_GSO_TUNNEL_REMCSUM) {
 		u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
@@ -1755,10 +1751,10 @@
 }
 #endif
 
-int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
-		   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-		   __be16 src_port, __be16 dst_port,
-		   struct vxlan_metadata *md, bool xnet, u32 vxflags)
+static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
+			  __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+			  __be16 src_port, __be16 dst_port, __be32 vni,
+			  struct vxlan_metadata *md, bool xnet, u32 vxflags)
 {
 	struct vxlanhdr *vxh;
 	int min_headroom;
@@ -1801,7 +1797,7 @@
 
 	vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
 	vxh->vx_flags = htonl(VXLAN_HF_VNI);
-	vxh->vx_vni = md->vni;
+	vxh->vx_vni = vni;
 
 	if (type & SKB_GSO_TUNNEL_REMCSUM) {
 		u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
@@ -1828,7 +1824,6 @@
 				   ttl, df, src_port, dst_port, xnet,
 				   !(vxflags & VXLAN_F_UDP_CSUM));
 }
-EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
 
 /* Bypass encapsulation if the destination is local */
 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
@@ -1878,22 +1873,48 @@
 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 			   struct vxlan_rdst *rdst, bool did_rsc)
 {
+	struct ip_tunnel_info *info;
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct sock *sk = vxlan->vn_sock->sock->sk;
+	unsigned short family = vxlan_get_sk_family(vxlan->vn_sock);
 	struct rtable *rt = NULL;
 	const struct iphdr *old_iph;
 	struct flowi4 fl4;
 	union vxlan_addr *dst;
-	struct vxlan_metadata md;
+	union vxlan_addr remote_ip;
+	struct vxlan_metadata _md;
+	struct vxlan_metadata *md = &_md;
 	__be16 src_port = 0, dst_port;
 	u32 vni;
 	__be16 df = 0;
 	__u8 tos, ttl;
 	int err;
+	u32 flags = vxlan->flags;
 
-	dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
-	vni = rdst->remote_vni;
-	dst = &rdst->remote_ip;
+	info = skb_tunnel_info(skb);
+
+	if (rdst) {
+		dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
+		vni = rdst->remote_vni;
+		dst = &rdst->remote_ip;
+	} else {
+		if (!info) {
+			WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
+				  dev->name);
+			goto drop;
+		}
+		if (family != ip_tunnel_info_af(info))
+			goto drop;
+
+		dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
+		vni = be64_to_cpu(info->key.tun_id);
+		remote_ip.sa.sa_family = family;
+		if (family == AF_INET)
+			remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
+		else
+			remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
+		dst = &remote_ip;
+	}
 
 	if (vxlan_addr_any(dst)) {
 		if (did_rsc) {
@@ -1906,25 +1927,43 @@
 
 	old_iph = ip_hdr(skb);
 
-	ttl = vxlan->ttl;
+	ttl = vxlan->cfg.ttl;
 	if (!ttl && vxlan_addr_multicast(dst))
 		ttl = 1;
 
-	tos = vxlan->tos;
+	tos = vxlan->cfg.tos;
 	if (tos == 1)
 		tos = ip_tunnel_get_dsfield(old_iph, skb);
 
-	src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
-				     vxlan->port_max, true);
+	src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
+				     vxlan->cfg.port_max, true);
+
+	if (info) {
+		if (info->key.tun_flags & TUNNEL_CSUM)
+			flags |= VXLAN_F_UDP_CSUM;
+		else
+			flags &= ~VXLAN_F_UDP_CSUM;
+
+		ttl = info->key.ttl;
+		tos = info->key.tos;
+
+		if (info->options_len)
+			md = ip_tunnel_info_opts(info);
+	} else {
+		md->gbp = skb->mark;
+	}
 
 	if (dst->sa.sa_family == AF_INET) {
+		if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
+			df = htons(IP_DF);
+
 		memset(&fl4, 0, sizeof(fl4));
-		fl4.flowi4_oif = rdst->remote_ifindex;
+		fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
 		fl4.flowi4_tos = RT_TOS(tos);
 		fl4.flowi4_mark = skb->mark;
 		fl4.flowi4_proto = IPPROTO_UDP;
 		fl4.daddr = dst->sin.sin_addr.s_addr;
-		fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
+		fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
 
 		rt = ip_route_output_key(vxlan->net, &fl4);
 		if (IS_ERR(rt)) {
@@ -1958,14 +1997,11 @@
 
 		tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
 		ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
-		md.vni = htonl(vni << 8);
-		md.gbp = skb->mark;
-
 		err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
 				     dst->sin.sin_addr.s_addr, tos, ttl, df,
-				     src_port, dst_port, &md,
+				     src_port, dst_port, htonl(vni << 8), md,
 				     !net_eq(vxlan->net, dev_net(vxlan->dev)),
-				     vxlan->flags);
+				     flags);
 		if (err < 0) {
 			/* skb is already freed. */
 			skb = NULL;
@@ -1977,16 +2013,16 @@
 	} else {
 		struct dst_entry *ndst;
 		struct flowi6 fl6;
-		u32 flags;
+		u32 rt6i_flags;
 
 		memset(&fl6, 0, sizeof(fl6));
-		fl6.flowi6_oif = rdst->remote_ifindex;
+		fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
 		fl6.daddr = dst->sin6.sin6_addr;
-		fl6.saddr = vxlan->saddr.sin6.sin6_addr;
+		fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
 		fl6.flowi6_mark = skb->mark;
 		fl6.flowi6_proto = IPPROTO_UDP;
 
-		if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
+		if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) {
 			netdev_dbg(dev, "no route to %pI6\n",
 				   &dst->sin6.sin6_addr);
 			dev->stats.tx_carrier_errors++;
@@ -2002,9 +2038,9 @@
 		}
 
 		/* Bypass encapsulation if the destination is local */
-		flags = ((struct rt6_info *)ndst)->rt6i_flags;
-		if (flags & RTF_LOCAL &&
-		    !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
+		rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
+		if (rt6i_flags & RTF_LOCAL &&
+		    !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
 			struct vxlan_dev *dst_vxlan;
 
 			dst_release(ndst);
@@ -2018,13 +2054,10 @@
 		}
 
 		ttl = ttl ? : ip6_dst_hoplimit(ndst);
-		md.vni = htonl(vni << 8);
-		md.gbp = skb->mark;
-
 		err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
-				      0, ttl, src_port, dst_port, &md,
+				      0, ttl, src_port, dst_port, htonl(vni << 8), md,
 				      !net_eq(vxlan->net, dev_net(vxlan->dev)),
-				      vxlan->flags);
+				      flags);
 #endif
 	}
 
@@ -2051,11 +2084,14 @@
 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
+	const struct ip_tunnel_info *info;
 	struct ethhdr *eth;
 	bool did_rsc = false;
 	struct vxlan_rdst *rdst, *fdst = NULL;
 	struct vxlan_fdb *f;
 
+	info = skb_tunnel_info(skb);
+
 	skb_reset_mac_header(skb);
 	eth = eth_hdr(skb);
 
@@ -2078,6 +2114,12 @@
 #endif
 	}
 
+	if (vxlan->flags & VXLAN_F_COLLECT_METADATA &&
+	    info && info->mode & IP_TUNNEL_INFO_TX) {
+		vxlan_xmit_one(skb, dev, NULL, false);
+		return NETDEV_TX_OK;
+	}
+
 	f = vxlan_find_mac(vxlan, eth->h_dest);
 	did_rsc = false;
 
@@ -2143,7 +2185,7 @@
 			if (f->state & NUD_PERMANENT)
 				continue;
 
-			timeout = f->used + vxlan->age_interval * HZ;
+			timeout = f->used + vxlan->cfg.age_interval * HZ;
 			if (time_before_eq(timeout, jiffies)) {
 				netdev_dbg(vxlan->dev,
 					   "garbage collect %pM\n",
@@ -2207,8 +2249,8 @@
 	struct vxlan_sock *vs;
 	int ret = 0;
 
-	vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL,
-			    false, vxlan->flags);
+	vs = vxlan_sock_add(vxlan->net, vxlan->cfg.dst_port,
+			    vxlan->cfg.no_share, vxlan->flags);
 	if (IS_ERR(vs))
 		return PTR_ERR(vs);
 
@@ -2216,13 +2258,15 @@
 
 	if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
 		ret = vxlan_igmp_join(vxlan);
+		if (ret == -EADDRINUSE)
+			ret = 0;
 		if (ret) {
 			vxlan_sock_release(vs);
 			return ret;
 		}
 	}
 
-	if (vxlan->age_interval)
+	if (vxlan->cfg.age_interval)
 		mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
 
 	return ret;
@@ -2333,7 +2377,7 @@
 	for (i = 0; i < PORT_HASH_SIZE; ++i) {
 		hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
 			port = inet_sk(vs->sock->sk)->inet_sport;
-			sa_family = vs->sock->sk->sk_family;
+			sa_family = vxlan_get_sk_family(vs);
 			dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
 							    port);
 		}
@@ -2359,7 +2403,6 @@
 	dev->destructor = free_netdev;
 	SET_NETDEV_DEVTYPE(dev, &vxlan_type);
 
-	dev->tx_queue_len = 0;
 	dev->features	|= NETIF_F_LLTX;
 	dev->features	|= NETIF_F_SG | NETIF_F_HW_CSUM;
 	dev->features   |= NETIF_F_RXCSUM;
@@ -2371,7 +2414,7 @@
 	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
 	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
 	netif_keep_dst(dev);
-	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
 
 	INIT_LIST_HEAD(&vxlan->next);
 	spin_lock_init(&vxlan->hash_lock);
@@ -2380,10 +2423,12 @@
 	vxlan->age_timer.function = vxlan_cleanup;
 	vxlan->age_timer.data = (unsigned long) vxlan;
 
-	vxlan->dst_port = htons(vxlan_port);
+	vxlan->cfg.dst_port = htons(vxlan_port);
 
 	vxlan->dev = dev;
 
+	gro_cells_init(&vxlan->gro_cells, dev);
+
 	for (h = 0; h < FDB_HASH_SIZE; ++h)
 		INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
 }
@@ -2405,6 +2450,7 @@
 	[IFLA_VXLAN_RSC]	= { .type = NLA_U8 },
 	[IFLA_VXLAN_L2MISS]	= { .type = NLA_U8 },
 	[IFLA_VXLAN_L3MISS]	= { .type = NLA_U8 },
+	[IFLA_VXLAN_COLLECT_METADATA]	= { .type = NLA_U8 },
 	[IFLA_VXLAN_PORT]	= { .type = NLA_U16 },
 	[IFLA_VXLAN_UDP_CSUM]	= { .type = NLA_U8 },
 	[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]	= { .type = NLA_U8 },
@@ -2484,6 +2530,7 @@
 		udp_conf.family = AF_INET6;
 		udp_conf.use_udp6_rx_checksums =
 		    !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
+		udp_conf.ipv6_v6only = 1;
 	} else {
 		udp_conf.family = AF_INET;
 	}
@@ -2500,7 +2547,6 @@
 
 /* Create new listen socket if needed */
 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
-					      vxlan_rcv_t *rcv, void *data,
 					      u32 flags)
 {
 	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -2529,8 +2575,6 @@
 
 	vs->sock = sock;
 	atomic_set(&vs->refcnt, 1);
-	vs->rcv = rcv;
-	vs->data = data;
 	vs->flags = (flags & VXLAN_F_RCV_FLAGS);
 
 	/* Initialize the vxlan udp offloads structure */
@@ -2554,9 +2598,8 @@
 	return vs;
 }
 
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
-				  vxlan_rcv_t *rcv, void *data,
-				  bool no_share, u32 flags)
+static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+					 bool no_share, u32 flags)
 {
 	struct vxlan_net *vn = net_generic(net, vxlan_net_id);
 	struct vxlan_sock *vs;
@@ -2566,7 +2609,7 @@
 		spin_lock(&vn->sock_lock);
 		vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
 				     flags);
-		if (vs && vs->rcv == rcv) {
+		if (vs) {
 			if (!atomic_add_unless(&vs->refcnt, 1, 0))
 				vs = ERR_PTR(-EBUSY);
 			spin_unlock(&vn->sock_lock);
@@ -2575,58 +2618,38 @@
 		spin_unlock(&vn->sock_lock);
 	}
 
-	return vxlan_socket_create(net, port, rcv, data, flags);
+	return vxlan_socket_create(net, port, flags);
 }
-EXPORT_SYMBOL_GPL(vxlan_sock_add);
 
-static int vxlan_newlink(struct net *src_net, struct net_device *dev,
-			 struct nlattr *tb[], struct nlattr *data[])
+static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
+			       struct vxlan_config *conf)
 {
 	struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct vxlan_rdst *dst = &vxlan->default_dst;
-	__u32 vni;
 	int err;
 	bool use_ipv6 = false;
-
-	if (!data[IFLA_VXLAN_ID])
-		return -EINVAL;
+	__be16 default_port = vxlan->cfg.dst_port;
 
 	vxlan->net = src_net;
 
-	vni = nla_get_u32(data[IFLA_VXLAN_ID]);
-	dst->remote_vni = vni;
+	dst->remote_vni = conf->vni;
+
+	memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
 
 	/* Unless IPv6 is explicitly requested, assume IPv4 */
-	dst->remote_ip.sa.sa_family = AF_INET;
-	if (data[IFLA_VXLAN_GROUP]) {
-		dst->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
-	} else if (data[IFLA_VXLAN_GROUP6]) {
-		if (!IS_ENABLED(CONFIG_IPV6))
-			return -EPFNOSUPPORT;
+	if (!dst->remote_ip.sa.sa_family)
+		dst->remote_ip.sa.sa_family = AF_INET;
 
-		dst->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
-		dst->remote_ip.sa.sa_family = AF_INET6;
+	if (dst->remote_ip.sa.sa_family == AF_INET6 ||
+	    vxlan->cfg.saddr.sa.sa_family == AF_INET6)
 		use_ipv6 = true;
-	}
 
-	if (data[IFLA_VXLAN_LOCAL]) {
-		vxlan->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
-		vxlan->saddr.sa.sa_family = AF_INET;
-	} else if (data[IFLA_VXLAN_LOCAL6]) {
-		if (!IS_ENABLED(CONFIG_IPV6))
-			return -EPFNOSUPPORT;
-
-		/* TODO: respect scope id */
-		vxlan->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
-		vxlan->saddr.sa.sa_family = AF_INET6;
-		use_ipv6 = true;
-	}
-
-	if (data[IFLA_VXLAN_LINK] &&
-	    (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
+	if (conf->remote_ifindex) {
 		struct net_device *lowerdev
-			 = __dev_get_by_index(src_net, dst->remote_ifindex);
+			 = __dev_get_by_index(src_net, conf->remote_ifindex);
+
+		dst->remote_ifindex = conf->remote_ifindex;
 
 		if (!lowerdev) {
 			pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2644,7 +2667,7 @@
 		}
 #endif
 
-		if (!tb[IFLA_MTU])
+		if (!conf->mtu)
 			dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
 
 		dev->needed_headroom = lowerdev->hard_header_len +
@@ -2652,75 +2675,17 @@
 	} else if (use_ipv6)
 		vxlan->flags |= VXLAN_F_IPV6;
 
-	if (data[IFLA_VXLAN_TOS])
-		vxlan->tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
+	memcpy(&vxlan->cfg, conf, sizeof(*conf));
+	if (!vxlan->cfg.dst_port)
+		vxlan->cfg.dst_port = default_port;
+	vxlan->flags |= conf->flags;
 
-	if (data[IFLA_VXLAN_TTL])
-		vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
+	if (!vxlan->cfg.age_interval)
+		vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
 
-	if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
-		vxlan->flags |= VXLAN_F_LEARN;
-
-	if (data[IFLA_VXLAN_AGEING])
-		vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
-	else
-		vxlan->age_interval = FDB_AGE_DEFAULT;
-
-	if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
-		vxlan->flags |= VXLAN_F_PROXY;
-
-	if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
-		vxlan->flags |= VXLAN_F_RSC;
-
-	if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
-		vxlan->flags |= VXLAN_F_L2MISS;
-
-	if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
-		vxlan->flags |= VXLAN_F_L3MISS;
-
-	if (data[IFLA_VXLAN_LIMIT])
-		vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
-
-	if (data[IFLA_VXLAN_PORT_RANGE]) {
-		const struct ifla_vxlan_port_range *p
-			= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
-		vxlan->port_min = ntohs(p->low);
-		vxlan->port_max = ntohs(p->high);
-	}
-
-	if (data[IFLA_VXLAN_PORT])
-		vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
-
-	if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
-		vxlan->flags |= VXLAN_F_UDP_CSUM;
-
-	if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
-	    nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
-		vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
-
-	if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
-	    nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
-		vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
-
-	if (data[IFLA_VXLAN_REMCSUM_TX] &&
-	    nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
-		vxlan->flags |= VXLAN_F_REMCSUM_TX;
-
-	if (data[IFLA_VXLAN_REMCSUM_RX] &&
-	    nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
-		vxlan->flags |= VXLAN_F_REMCSUM_RX;
-
-	if (data[IFLA_VXLAN_GBP])
-		vxlan->flags |= VXLAN_F_GBP;
-
-	if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
-		vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
-
-	if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
-			   vxlan->dst_port, vxlan->flags)) {
-		pr_info("duplicate VNI %u\n", vni);
+	if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
+			   vxlan->cfg.dst_port, vxlan->flags))
 		return -EEXIST;
-	}
 
 	dev->ethtool_ops = &vxlan_ethtool_ops;
 
@@ -2730,7 +2695,7 @@
 				       &vxlan->default_dst.remote_ip,
 				       NUD_REACHABLE|NUD_PERMANENT,
 				       NLM_F_EXCL|NLM_F_CREATE,
-				       vxlan->dst_port,
+				       vxlan->cfg.dst_port,
 				       vxlan->default_dst.remote_vni,
 				       vxlan->default_dst.remote_ifindex,
 				       NTF_SELF);
@@ -2749,6 +2714,151 @@
 	return 0;
 }
 
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+				    u8 name_assign_type, struct vxlan_config *conf)
+{
+	struct nlattr *tb[IFLA_MAX+1];
+	struct net_device *dev;
+	int err;
+
+	memset(&tb, 0, sizeof(tb));
+
+	dev = rtnl_create_link(net, name, name_assign_type,
+			       &vxlan_link_ops, tb);
+	if (IS_ERR(dev))
+		return dev;
+
+	err = vxlan_dev_configure(net, dev, conf);
+	if (err < 0) {
+		free_netdev(dev);
+		return ERR_PTR(err);
+	}
+
+	return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
+static int vxlan_newlink(struct net *src_net, struct net_device *dev,
+			 struct nlattr *tb[], struct nlattr *data[])
+{
+	struct vxlan_config conf;
+	int err;
+
+	if (!data[IFLA_VXLAN_ID])
+		return -EINVAL;
+
+	memset(&conf, 0, sizeof(conf));
+	conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+
+	if (data[IFLA_VXLAN_GROUP]) {
+		conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
+	} else if (data[IFLA_VXLAN_GROUP6]) {
+		if (!IS_ENABLED(CONFIG_IPV6))
+			return -EPFNOSUPPORT;
+
+		conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
+		conf.remote_ip.sa.sa_family = AF_INET6;
+	}
+
+	if (data[IFLA_VXLAN_LOCAL]) {
+		conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
+		conf.saddr.sa.sa_family = AF_INET;
+	} else if (data[IFLA_VXLAN_LOCAL6]) {
+		if (!IS_ENABLED(CONFIG_IPV6))
+			return -EPFNOSUPPORT;
+
+		/* TODO: respect scope id */
+		conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
+		conf.saddr.sa.sa_family = AF_INET6;
+	}
+
+	if (data[IFLA_VXLAN_LINK])
+		conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
+
+	if (data[IFLA_VXLAN_TOS])
+		conf.tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
+
+	if (data[IFLA_VXLAN_TTL])
+		conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
+
+	if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
+		conf.flags |= VXLAN_F_LEARN;
+
+	if (data[IFLA_VXLAN_AGEING])
+		conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
+
+	if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
+		conf.flags |= VXLAN_F_PROXY;
+
+	if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
+		conf.flags |= VXLAN_F_RSC;
+
+	if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
+		conf.flags |= VXLAN_F_L2MISS;
+
+	if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
+		conf.flags |= VXLAN_F_L3MISS;
+
+	if (data[IFLA_VXLAN_LIMIT])
+		conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+
+	if (data[IFLA_VXLAN_COLLECT_METADATA] &&
+	    nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
+		conf.flags |= VXLAN_F_COLLECT_METADATA;
+
+	if (data[IFLA_VXLAN_PORT_RANGE]) {
+		const struct ifla_vxlan_port_range *p
+			= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
+		conf.port_min = ntohs(p->low);
+		conf.port_max = ntohs(p->high);
+	}
+
+	if (data[IFLA_VXLAN_PORT])
+		conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
+
+	if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
+		conf.flags |= VXLAN_F_UDP_CSUM;
+
+	if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
+	    nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
+		conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+
+	if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
+	    nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
+		conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
+
+	if (data[IFLA_VXLAN_REMCSUM_TX] &&
+	    nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
+		conf.flags |= VXLAN_F_REMCSUM_TX;
+
+	if (data[IFLA_VXLAN_REMCSUM_RX] &&
+	    nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
+		conf.flags |= VXLAN_F_REMCSUM_RX;
+
+	if (data[IFLA_VXLAN_GBP])
+		conf.flags |= VXLAN_F_GBP;
+
+	if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
+		conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
+
+	err = vxlan_dev_configure(src_net, dev, &conf);
+	switch (err) {
+	case -ENODEV:
+		pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
+		break;
+
+	case -EPERM:
+		pr_info("IPv6 is disabled via sysctl\n");
+		break;
+
+	case -EEXIST:
+		pr_info("duplicate VNI %u\n", conf.vni);
+		break;
+	}
+
+	return err;
+}
+
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -2759,6 +2869,7 @@
 		hlist_del_rcu(&vxlan->hlist);
 	spin_unlock(&vn->sock_lock);
 
+	gro_cells_destroy(&vxlan->gro_cells);
 	list_del(&vxlan->next);
 	unregister_netdevice_queue(dev, head);
 }
@@ -2777,6 +2888,7 @@
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_RSC */
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_L2MISS */
 		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_L3MISS */
+		nla_total_size(sizeof(__u8)) +	/* IFLA_VXLAN_COLLECT_METADATA */
 		nla_total_size(sizeof(__u32)) +	/* IFLA_VXLAN_AGEING */
 		nla_total_size(sizeof(__u32)) +	/* IFLA_VXLAN_LIMIT */
 		nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
@@ -2794,8 +2906,8 @@
 	const struct vxlan_dev *vxlan = netdev_priv(dev);
 	const struct vxlan_rdst *dst = &vxlan->default_dst;
 	struct ifla_vxlan_port_range ports = {
-		.low =  htons(vxlan->port_min),
-		.high = htons(vxlan->port_max),
+		.low =  htons(vxlan->cfg.port_min),
+		.high = htons(vxlan->cfg.port_max),
 	};
 
 	if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
@@ -2818,22 +2930,22 @@
 	if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
 		goto nla_put_failure;
 
-	if (!vxlan_addr_any(&vxlan->saddr)) {
-		if (vxlan->saddr.sa.sa_family == AF_INET) {
+	if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
+		if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
 			if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
-					    vxlan->saddr.sin.sin_addr.s_addr))
+					    vxlan->cfg.saddr.sin.sin_addr.s_addr))
 				goto nla_put_failure;
 #if IS_ENABLED(CONFIG_IPV6)
 		} else {
 			if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
-					     &vxlan->saddr.sin6.sin6_addr))
+					     &vxlan->cfg.saddr.sin6.sin6_addr))
 				goto nla_put_failure;
 #endif
 		}
 	}
 
-	if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
-	    nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
+	if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+	    nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
 	    nla_put_u8(skb, IFLA_VXLAN_LEARNING,
 			!!(vxlan->flags & VXLAN_F_LEARN)) ||
 	    nla_put_u8(skb, IFLA_VXLAN_PROXY,
@@ -2843,9 +2955,11 @@
 			!!(vxlan->flags & VXLAN_F_L2MISS)) ||
 	    nla_put_u8(skb, IFLA_VXLAN_L3MISS,
 			!!(vxlan->flags & VXLAN_F_L3MISS)) ||
-	    nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
-	    nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
-	    nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
+	    nla_put_u8(skb, IFLA_VXLAN_COLLECT_METADATA,
+		       !!(vxlan->flags & VXLAN_F_COLLECT_METADATA)) ||
+	    nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
+	    nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
+	    nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
 	    nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
 			!!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
 	    nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
@@ -2964,8 +3078,10 @@
 		/* If vxlan->dev is in the same netns, it has already been added
 		 * to the list by the previous loop.
 		 */
-		if (!net_eq(dev_net(vxlan->dev), net))
+		if (!net_eq(dev_net(vxlan->dev), net)) {
+			gro_cells_destroy(&vxlan->gro_cells);
 			unregister_netdevice_queue(vxlan->dev, &list);
+		}
 	}
 
 	unregister_netdevice_many(&list);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 3ebed1c..e92aaf6 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1096,7 +1096,7 @@
 	}
 	dev->netdev_ops = &pvc_ops;
 	dev->mtu = HDLC_MAX_MTU;
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->ml_priv = pvc;
 
 	if (register_netdevice(dev) != 0) {
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 9729e69..c04fb00 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -11,7 +11,8 @@
 		 wmi-tlv.o \
 		 bmi.o \
 		 hw.o \
-		 p2p.o
+		 p2p.o \
+		 swap.o
 
 ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
 ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 31a9906..df7c761 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -178,7 +178,7 @@
 };
 
 /* in msec */
-#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
+#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ)
 
 #define BMI_CE_NUM_TO_TARG 0
 #define BMI_CE_NUM_TO_HOST 1
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index e508c65..cf28fbe 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -452,6 +452,7 @@
 {
 	struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
 	unsigned int nentries_mask = dest_ring->nentries_mask;
+	struct ath10k *ar = ce_state->ar;
 	unsigned int sw_index = dest_ring->sw_index;
 
 	struct ce_desc *base = dest_ring->base_addr_owner_space;
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 0eddb20..5c903e15 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -21,7 +21,7 @@
 #include "hif.h"
 
 /* Maximum number of Copy Engine's supported */
-#define CE_COUNT_MAX 8
+#define CE_COUNT_MAX 12
 #define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
 
 /* Descriptor rings must be aligned to this boundary */
@@ -38,8 +38,13 @@
 
 #define CE_DESC_FLAGS_GATHER         (1 << 0)
 #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
-#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
-#define CE_DESC_FLAGS_META_DATA_LSB  2
+
+/* Following desc flags are used in QCA99X0 */
+#define CE_DESC_FLAGS_HOST_INT_DIS	(1 << 2)
+#define CE_DESC_FLAGS_TGT_INT_DIS	(1 << 3)
+
+#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
+#define CE_DESC_FLAGS_META_DATA_LSB  ar->hw_values->ce_desc_meta_data_lsb
 
 struct ce_desc {
 	__le32 addr;
@@ -423,8 +428,10 @@
 
 #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
 
-#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB		8
-#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK		0x0000ff00
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
+				ar->regs->ce_wrap_intr_sum_host_msi_lsb
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \
+				ar->regs->ce_wrap_intr_sum_host_msi_mask
 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
 	(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
 		CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 59496a9..b87b986 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -31,16 +31,19 @@
 #include "wmi-ops.h"
 
 unsigned int ath10k_debug_mask;
+static unsigned int ath10k_cryptmode_param;
 static bool uart_print;
 static bool skip_otp;
 
 module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
+module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
 module_param(uart_print, bool, 0644);
 module_param(skip_otp, bool, 0644);
 
 MODULE_PARM_DESC(debug_mask, "Debugging mask");
 MODULE_PARM_DESC(uart_print, "Uart target debugging");
 MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
+MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
 
 static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 	{
@@ -49,6 +52,8 @@
 		.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
 		.uart_pin = 7,
 		.has_shifted_cc_wraparound = true,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
 		.fw = {
 			.dir = QCA988X_HW_2_0_FW_DIR,
 			.fw = QCA988X_HW_2_0_FW_FILE,
@@ -63,6 +68,8 @@
 		.name = "qca6174 hw2.1",
 		.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
 		.uart_pin = 6,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
 		.fw = {
 			.dir = QCA6174_HW_2_1_FW_DIR,
 			.fw = QCA6174_HW_2_1_FW_FILE,
@@ -77,6 +84,8 @@
 		.name = "qca6174 hw3.0",
 		.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
 		.uart_pin = 6,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
 		.fw = {
 			.dir = QCA6174_HW_3_0_FW_DIR,
 			.fw = QCA6174_HW_3_0_FW_FILE,
@@ -91,6 +100,8 @@
 		.name = "qca6174 hw3.2",
 		.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
 		.uart_pin = 6,
+		.otp_exe_param = 0,
+		.channel_counters_freq_hz = 88000,
 		.fw = {
 			/* uses same binaries as hw3.0 */
 			.dir = QCA6174_HW_3_0_FW_DIR,
@@ -101,8 +112,69 @@
 			.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
 		},
 	},
+	{
+		.id = QCA99X0_HW_2_0_DEV_VERSION,
+		.name = "qca99x0 hw2.0",
+		.patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
+		.uart_pin = 7,
+		.otp_exe_param = 0x00000700,
+		.continuous_frag_desc = true,
+		.channel_counters_freq_hz = 150000,
+		.fw = {
+			.dir = QCA99X0_HW_2_0_FW_DIR,
+			.fw = QCA99X0_HW_2_0_FW_FILE,
+			.otp = QCA99X0_HW_2_0_OTP_FILE,
+			.board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
+			.board_size = QCA99X0_BOARD_DATA_SZ,
+			.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+		},
+	},
 };
 
+static const char *const ath10k_core_fw_feature_str[] = {
+	[ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx",
+	[ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x",
+	[ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx",
+	[ATH10K_FW_FEATURE_NO_P2P] = "no-p2p",
+	[ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2",
+	[ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps",
+	[ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan",
+	[ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp",
+	[ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad",
+	[ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
+};
+
+static unsigned int ath10k_core_get_fw_feature_str(char *buf,
+						   size_t buf_len,
+						   enum ath10k_fw_features feat)
+{
+	if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) ||
+	    WARN_ON(!ath10k_core_fw_feature_str[feat])) {
+		return scnprintf(buf, buf_len, "bit%d", feat);
+	}
+
+	return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]);
+}
+
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+				     char *buf,
+				     size_t buf_len)
+{
+	unsigned int len = 0;
+	int i;
+
+	for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+		if (test_bit(i, ar->fw_features)) {
+			if (len > 0)
+				len += scnprintf(buf + len, buf_len - len, ",");
+
+			len += ath10k_core_get_fw_feature_str(buf + len,
+							      buf_len - len,
+							      i);
+		}
+	}
+}
+
 static void ath10k_send_suspend_complete(struct ath10k *ar)
 {
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
@@ -164,6 +236,17 @@
 		return ret;
 	}
 
+	/* Some devices have a special sanity check that verifies the PCI
+	 * Device ID is written to this host interest var. It is known to be
+	 * required to boot QCA6164.
+	 */
+	ret = ath10k_bmi_write32(ar, hi_hci_uart_pwr_mgmt_params_ext,
+				 ar->dev_id);
+	if (ret) {
+		ath10k_err(ar, "failed to set pwr_mgmt_params: %d\n", ret);
+		return ret;
+	}
+
 	return 0;
 }
 
@@ -355,6 +438,7 @@
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
 	u32 result, address = ar->hw_params.patch_load_addr;
+	u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
 	int ret;
 
 	ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
@@ -380,7 +464,7 @@
 		return ret;
 	}
 
-	ret = ath10k_bmi_execute(ar, address, 0, &result);
+	ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
 	if (ret) {
 		ath10k_err(ar, "could not execute otp (%d)\n", ret);
 		return ret;
@@ -412,6 +496,13 @@
 		data = ar->firmware_data;
 		data_len = ar->firmware_len;
 		mode_name = "normal";
+		ret = ath10k_swap_code_seg_configure(ar,
+				ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
+		if (ret) {
+			ath10k_err(ar, "failed to configure fw code swap: %d\n",
+				   ret);
+			return ret;
+		}
 		break;
 	case ATH10K_FIRMWARE_MODE_UTF:
 		data = ar->testmode.utf->data;
@@ -451,6 +542,8 @@
 	if (!IS_ERR(ar->cal_file))
 		release_firmware(ar->cal_file);
 
+	ath10k_swap_code_seg_release(ar);
+
 	ar->board = NULL;
 	ar->board_data = NULL;
 	ar->board_len = 0;
@@ -464,6 +557,7 @@
 	ar->firmware_len = 0;
 
 	ar->cal_file = NULL;
+
 }
 
 static int ath10k_fetch_cal_file(struct ath10k *ar)
@@ -737,6 +831,13 @@
 			ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
 				   ar->htt.op_version);
 			break;
+		case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
+			ath10k_dbg(ar, ATH10K_DBG_BOOT,
+				   "found fw code swap image ie (%zd B)\n",
+				   ie_len);
+			ar->swap.firmware_codeswap_data = data;
+			ar->swap.firmware_codeswap_len = ie_len;
+			break;
 		default:
 			ath10k_warn(ar, "Unknown FW IE: %u\n",
 				    le32_to_cpu(hdr->id));
@@ -991,6 +1092,46 @@
 		return -EINVAL;
 	}
 
+	ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
+	switch (ath10k_cryptmode_param) {
+	case ATH10K_CRYPT_MODE_HW:
+		clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+		clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+		break;
+	case ATH10K_CRYPT_MODE_SW:
+		if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
+			      ar->fw_features)) {
+			ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
+			return -EINVAL;
+		}
+
+		set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+		set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+		break;
+	default:
+		ath10k_info(ar, "invalid cryptmode: %d\n",
+			    ath10k_cryptmode_param);
+		return -EINVAL;
+	}
+
+	ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
+	ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
+
+	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+		ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW;
+
+		/* Workaround:
+		 *
+		 * Firmware A-MSDU aggregation breaks with RAW Tx encap mode
+		 * and causes enormous performance issues (malformed frames,
+		 * etc).
+		 *
+		 * Disabling A-MSDU makes RAW mode stable with heavy traffic
+		 * albeit a bit slower compared to regular operation.
+		 */
+		ar->htt.max_num_amsdu = 1;
+	}
+
 	/* Backwards compatibility for firmwares without
 	 * ATH10K_FW_IE_WMI_OP_VERSION.
 	 */
@@ -1014,6 +1155,7 @@
 		ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
 		ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
 			WMI_STAT_PEER;
+		ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_10_1:
 	case ATH10K_FW_WMI_OP_VERSION_10_2:
@@ -1023,6 +1165,7 @@
 		ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
 		ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
 		ar->fw_stats_req_mask = WMI_STAT_PEER;
+		ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_TLV:
 		ar->max_num_peers = TARGET_TLV_NUM_PEERS;
@@ -1033,6 +1176,17 @@
 		ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
 		ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
 			WMI_STAT_PEER;
+		ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+		break;
+	case ATH10K_FW_WMI_OP_VERSION_10_4:
+		ar->max_num_peers = TARGET_10_4_NUM_PEERS;
+		ar->max_num_stations = TARGET_10_4_NUM_STATIONS;
+		ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
+		ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
+		ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
+		ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
+		ar->fw_stats_req_mask = WMI_STAT_PEER;
+		ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM;
 		break;
 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
 	case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1056,6 +1210,7 @@
 		case ATH10K_FW_WMI_OP_VERSION_TLV:
 			ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
 			break;
+		case ATH10K_FW_WMI_OP_VERSION_10_4:
 		case ATH10K_FW_WMI_OP_VERSION_UNSET:
 		case ATH10K_FW_WMI_OP_VERSION_MAX:
 			WARN_ON(1);
@@ -1272,13 +1427,13 @@
 void ath10k_core_stop(struct ath10k *ar)
 {
 	lockdep_assert_held(&ar->conf_mutex);
+	ath10k_debug_stop(ar);
 
 	/* try to suspend target */
 	if (ar->state != ATH10K_STATE_RESTARTING &&
 	    ar->state != ATH10K_STATE_UTF)
 		ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
 
-	ath10k_debug_stop(ar);
 	ath10k_hif_stop(ar);
 	ath10k_htt_tx_free(&ar->htt);
 	ath10k_htt_rx_free(&ar->htt);
@@ -1330,6 +1485,13 @@
 		goto err_free_firmware_files;
 	}
 
+	ret = ath10k_swap_code_seg_init(ar);
+	if (ret) {
+		ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+			   ret);
+		goto err_free_firmware_files;
+	}
+
 	mutex_lock(&ar->conf_mutex);
 
 	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
@@ -1470,9 +1632,15 @@
 	switch (hw_rev) {
 	case ATH10K_HW_QCA988X:
 		ar->regs = &qca988x_regs;
+		ar->hw_values = &qca988x_values;
 		break;
 	case ATH10K_HW_QCA6174:
 		ar->regs = &qca6174_regs;
+		ar->hw_values = &qca6174_values;
+		break;
+	case ATH10K_HW_QCA99X0:
+		ar->regs = &qca99x0_regs;
+		ar->hw_values = &qca99x0_values;
 		break;
 	default:
 		ath10k_err(ar, "unsupported core hardware revision %d\n",
@@ -1497,6 +1665,10 @@
 	if (!ar->workqueue)
 		goto err_free_mac;
 
+	ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq");
+	if (!ar->workqueue_aux)
+		goto err_free_wq;
+
 	mutex_init(&ar->conf_mutex);
 	spin_lock_init(&ar->data_lock);
 
@@ -1517,10 +1689,12 @@
 
 	ret = ath10k_debug_create(ar);
 	if (ret)
-		goto err_free_wq;
+		goto err_free_aux_wq;
 
 	return ar;
 
+err_free_aux_wq:
+	destroy_workqueue(ar->workqueue_aux);
 err_free_wq:
 	destroy_workqueue(ar->workqueue);
 
@@ -1536,6 +1710,9 @@
 	flush_workqueue(ar->workqueue);
 	destroy_workqueue(ar->workqueue);
 
+	flush_workqueue(ar->workqueue_aux);
+	destroy_workqueue(ar->workqueue_aux);
+
 	ath10k_debug_destroy(ar);
 	ath10k_mac_destroy(ar);
 }
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 78094f23c..1254214 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -36,6 +36,7 @@
 #include "spectral.h"
 #include "thermal.h"
 #include "wow.h"
+#include "swap.h"
 
 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -91,6 +92,7 @@
 		u8 tid;
 		u16 freq;
 		bool is_offchan;
+		bool nohwcrypt;
 		struct ath10k_htt_txbuf *txbuf;
 		u32 txbuf_paddr;
 	} __packed htt;
@@ -151,6 +153,7 @@
 	const struct wmi_ops *ops;
 
 	u32 num_mem_chunks;
+	u32 rx_decap_mode;
 	struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
 };
 
@@ -327,8 +330,8 @@
 			u32 uapsd;
 		} sta;
 		struct {
-			/* 127 stations; wmi limit */
-			u8 tim_bitmap[16];
+			/* 512 stations */
+			u8 tim_bitmap[64];
 			u8 tim_len;
 			u32 ssid_len;
 			u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -340,6 +343,7 @@
 	} u;
 
 	bool use_cts_prot;
+	bool nohwcrypt;
 	int num_legacy_stations;
 	int txpower;
 	struct wmi_wmm_params_all_arg wmm_params;
@@ -381,9 +385,6 @@
 	u32 reg_addr;
 	u32 nf_cal_period;
 
-	u8 htt_max_amsdu;
-	u8 htt_max_ampdu;
-
 	struct ath10k_fw_crash_data *fw_crash_data;
 };
 
@@ -452,16 +453,21 @@
 	ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
 
 	/* Don't trust error code from otp.bin */
-	ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+	ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7,
 
 	/* Some firmware revisions pad 4th hw address to 4 byte boundary making
 	 * it 8 bytes long in Native Wifi Rx decap.
 	 */
-	ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+	ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8,
 
 	/* Firmware supports bypassing PLL setting on init. */
 	ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
 
+	/* Raw mode support. If supported, FW supports receiving and trasmitting
+	 * frames in raw mode.
+	 */
+	ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
+
 	/* keep last */
 	ATH10K_FW_FEATURE_COUNT,
 };
@@ -475,6 +481,15 @@
 	 * waiters should immediately cancel instead of waiting for a time out.
 	 */
 	ATH10K_FLAG_CRASH_FLUSH,
+
+	/* Use Raw mode instead of native WiFi Tx/Rx encap mode.
+	 * Raw mode supports both hardware and software crypto. Native WiFi only
+	 * supports hardware crypto.
+	 */
+	ATH10K_FLAG_RAW_MODE,
+
+	/* Disable HW crypto engine */
+	ATH10K_FLAG_HW_CRYPTO_DISABLED,
 };
 
 enum ath10k_cal_mode {
@@ -483,6 +498,13 @@
 	ATH10K_CAL_MODE_DT,
 };
 
+enum ath10k_crypt_mode {
+	/* Only use hardware crypto engine */
+	ATH10K_CRYPT_MODE_HW,
+	/* Only use software crypto engine */
+	ATH10K_CRYPT_MODE_SW,
+};
+
 static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
 {
 	switch (mode) {
@@ -532,6 +554,7 @@
 	u8 mac_addr[ETH_ALEN];
 
 	enum ath10k_hw_rev hw_rev;
+	u16 dev_id;
 	u32 chip_id;
 	u32 target_version;
 	u8 fw_version_major;
@@ -545,6 +568,7 @@
 	u32 ht_cap_info;
 	u32 vht_cap_info;
 	u32 num_rf_chains;
+	u32 max_spatial_stream;
 	/* protected by conf_mutex */
 	bool ani_enabled;
 
@@ -560,6 +584,7 @@
 	struct completion target_suspend;
 
 	const struct ath10k_hw_regs *regs;
+	const struct ath10k_hw_values *hw_values;
 	struct ath10k_bmi bmi;
 	struct ath10k_wmi wmi;
 	struct ath10k_htc htc;
@@ -570,6 +595,7 @@
 		const char *name;
 		u32 patch_load_addr;
 		int uart_pin;
+		u32 otp_exe_param;
 
 		/* This is true if given HW chip has a quirky Cycle Counter
 		 * wraparound which resets to 0x7fffffff instead of 0. All
@@ -578,6 +604,14 @@
 		 */
 		bool has_shifted_cc_wraparound;
 
+		/* Some of chip expects fragment descriptor to be continuous
+		 * memory for any TX operation. Set continuous_frag_desc flag
+		 * for the hardware which have such requirement.
+		 */
+		bool continuous_frag_desc;
+
+		u32 channel_counters_freq_hz;
+
 		struct ath10k_hw_params_fw {
 			const char *dir;
 			const char *fw;
@@ -602,6 +636,12 @@
 
 	const struct firmware *cal_file;
 
+	struct {
+		const void *firmware_codeswap_data;
+		size_t firmware_codeswap_len;
+		struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
+	} swap;
+
 	char spec_board_id[100];
 	bool spec_board_loaded;
 
@@ -617,6 +657,7 @@
 		bool is_roc;
 		int vdev_id;
 		int roc_freq;
+		bool roc_notify;
 	} scan;
 
 	struct {
@@ -656,6 +697,8 @@
 	struct completion vdev_setup_done;
 
 	struct workqueue_struct *workqueue;
+	/* Auxiliary workqueue */
+	struct workqueue_struct *workqueue_aux;
 
 	/* prevents concurrent FW reconfiguration */
 	struct mutex conf_mutex;
@@ -675,6 +718,11 @@
 	int max_num_stations;
 	int max_num_vdevs;
 	int max_num_tdls_vdevs;
+	int num_active_peers;
+	int num_tids;
+
+	struct work_struct svc_rdy_work;
+	struct sk_buff *svc_rdy_skb;
 
 	struct work_struct offchan_tx_work;
 	struct sk_buff_head offchan_tx_queue;
@@ -749,6 +797,9 @@
 				  enum ath10k_hw_rev hw_rev,
 				  const struct ath10k_hif_ops *hif_ops);
 void ath10k_core_destroy(struct ath10k *ar);
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+				     char *buf,
+				     size_t max_len);
 
 int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
 int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 8fa606a..bf033f4 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -124,7 +124,11 @@
 
 void ath10k_print_driver_info(struct ath10k *ar)
 {
-	ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
+	char fw_features[128] = {};
+
+	ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
+
+	ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d features %s\n",
 		    ar->hw_params.name,
 		    ar->target_version,
 		    ar->chip_id,
@@ -137,8 +141,12 @@
 		    ar->htt.target_version_major,
 		    ar->htt.target_version_minor,
 		    ar->wmi.op_version,
+		    ar->htt.op_version,
 		    ath10k_cal_mode_str(ar->cal_mode),
-		    ar->max_num_stations);
+		    ar->max_num_stations,
+		    test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
+		    !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags),
+		    fw_features);
 	ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
 		    config_enabled(CONFIG_ATH10K_DEBUG),
 		    config_enabled(CONFIG_ATH10K_DEBUGFS),
@@ -315,7 +323,7 @@
 	ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
 	if (ret) {
 		ath10k_warn(ar, "failed to pull fw stats: %d\n", ret);
-		goto unlock;
+		goto free;
 	}
 
 	/* Stat data may exceed htc-wmi buffer limit. In such case firmware
@@ -378,7 +386,6 @@
 	ath10k_debug_fw_stats_vdevs_free(&stats.vdevs);
 	ath10k_debug_fw_stats_peers_free(&stats.peers);
 
-unlock:
 	spin_unlock_bh(&ar->data_lock);
 }
 
@@ -1357,12 +1364,8 @@
 
 	mutex_lock(&ar->conf_mutex);
 
-	if (ar->debug.htt_max_amsdu)
-		amsdu = ar->debug.htt_max_amsdu;
-
-	if (ar->debug.htt_max_ampdu)
-		ampdu = ar->debug.htt_max_ampdu;
-
+	amsdu = ar->htt.max_num_amsdu;
+	ampdu = ar->htt.max_num_ampdu;
 	mutex_unlock(&ar->conf_mutex);
 
 	len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
@@ -1396,8 +1399,8 @@
 		goto out;
 
 	res = count;
-	ar->debug.htt_max_amsdu = amsdu;
-	ar->debug.htt_max_ampdu = ampdu;
+	ar->htt.max_num_amsdu = amsdu;
+	ar->htt.max_num_ampdu = ampdu;
 
 out:
 	mutex_unlock(&ar->conf_mutex);
@@ -1899,9 +1902,6 @@
 	if (ar->debug.htt_stats_mask != 0)
 		cancel_delayed_work(&ar->debug.htt_stats_dwork);
 
-	ar->debug.htt_max_amsdu = 0;
-	ar->debug.htt_max_ampdu = 0;
-
 	ath10k_wmi_pdev_pktlog_disable(ar);
 }
 
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 85bfa2a..32d9ff1 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -145,8 +145,10 @@
 	skb_cb->eid = eid;
 	skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
 	ret = dma_mapping_error(dev, skb_cb->paddr);
-	if (ret)
+	if (ret) {
+		ret = -EIO;
 		goto err_credits;
+	}
 
 	sg_item.transfer_id = ep->eid;
 	sg_item.transfer_context = skb;
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 6da6ef2..3e6ba63 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -102,6 +102,43 @@
 	[HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
 };
 
+static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
+	[HTT_10_4_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+	[HTT_10_4_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+	[HTT_10_4_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+	[HTT_10_4_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+	[HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+	[HTT_10_4_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+	[HTT_10_4_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+	[HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+	[HTT_10_4_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+	[HTT_10_4_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+	[HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+	[HTT_10_4_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+	[HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+	[HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND] =
+				HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+	[HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+				HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+	[HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+	[HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+				HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+	[HTT_10_4_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+	[HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+				HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+	[HTT_10_4_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+	[HTT_10_4_T2H_MSG_TYPE_EN_STATS] = HTT_T2H_MSG_TYPE_EN_STATS,
+	[HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+	[HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] =
+				HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+	[HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] =
+				HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+	[HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] =
+				HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+	[HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] =
+				HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+};
+
 int ath10k_htt_connect(struct ath10k_htt *htt)
 {
 	struct ath10k_htc_svc_conn_req conn_req;
@@ -147,6 +184,10 @@
 		2; /* ip4 dscp or ip6 priority */
 
 	switch (ar->htt.op_version) {
+	case ATH10K_FW_HTT_OP_VERSION_10_4:
+		ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
+		ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
+		break;
 	case ATH10K_FW_HTT_OP_VERSION_10_1:
 		ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
 		ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
@@ -205,8 +246,31 @@
 	}
 
 	status = ath10k_htt_verify_version(htt);
+	if (status) {
+		ath10k_warn(ar, "failed to verify htt version: %d\n",
+			    status);
+		return status;
+	}
+
+	status = ath10k_htt_send_frag_desc_bank_cfg(htt);
 	if (status)
 		return status;
 
-	return ath10k_htt_send_rx_ring_cfg_ll(htt);
+	status = ath10k_htt_send_rx_ring_cfg_ll(htt);
+	if (status) {
+		ath10k_warn(ar, "failed to setup rx ring: %d\n",
+			    status);
+		return status;
+	}
+
+	status = ath10k_htt_h2t_aggr_cfg_msg(htt,
+					     htt->max_num_ampdu,
+					     htt->max_num_amsdu);
+	if (status) {
+		ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n",
+			    status);
+		return status;
+	}
+
+	return 0;
 }
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 7e8a0d8..5731875 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -83,10 +83,39 @@
  * around the mask + shift defs.
  */
 struct htt_data_tx_desc_frag {
-	__le32 paddr;
-	__le32 len;
+	union {
+		struct double_word_addr {
+			__le32 paddr;
+			__le32 len;
+		} __packed dword_addr;
+		struct triple_word_addr {
+			__le32 paddr_lo;
+			__le16 paddr_hi;
+			__le16 len_16;
+		} __packed tword_addr;
+	} __packed;
 } __packed;
 
+struct htt_msdu_ext_desc {
+	__le32 tso_flag[3];
+	__le16 ip_identification;
+	u8 flags;
+	u8 reserved;
+	struct htt_data_tx_desc_frag frags[6];
+};
+
+#define	HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE		BIT(0)
+#define	HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE	BIT(1)
+#define	HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE	BIT(2)
+#define	HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE	BIT(3)
+#define	HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE	BIT(4)
+
+#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
+				 | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
+				 | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
+				 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
+				 | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
+
 enum htt_data_tx_desc_flags0 {
 	HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
 	HTT_DATA_TX_DESC_FLAGS0_NO_AGGR         = 1 << 1,
@@ -255,6 +284,9 @@
 } __packed;
 
 #define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
+struct htt_mgmt_tx_desc_qca99x0 {
+	__le32 rate;
+} __packed;
 
 struct htt_mgmt_tx_desc {
 	u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
@@ -263,6 +295,9 @@
 	__le32 len;
 	__le32 vdev_id;
 	u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
+	union {
+		struct htt_mgmt_tx_desc_qca99x0 qca99x0;
+	} __packed;
 } __packed;
 
 enum htt_mgmt_tx_status {
@@ -349,6 +384,38 @@
 	HTT_TLV_T2H_NUM_MSGS
 };
 
+enum htt_10_4_t2h_msg_type {
+	HTT_10_4_T2H_MSG_TYPE_VERSION_CONF           = 0x0,
+	HTT_10_4_T2H_MSG_TYPE_RX_IND                 = 0x1,
+	HTT_10_4_T2H_MSG_TYPE_RX_FLUSH               = 0x2,
+	HTT_10_4_T2H_MSG_TYPE_PEER_MAP               = 0x3,
+	HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP             = 0x4,
+	HTT_10_4_T2H_MSG_TYPE_RX_ADDBA               = 0x5,
+	HTT_10_4_T2H_MSG_TYPE_RX_DELBA               = 0x6,
+	HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND           = 0x7,
+	HTT_10_4_T2H_MSG_TYPE_PKTLOG                 = 0x8,
+	HTT_10_4_T2H_MSG_TYPE_STATS_CONF             = 0x9,
+	HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND            = 0xa,
+	HTT_10_4_T2H_MSG_TYPE_SEC_IND                = 0xb,
+	HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND          = 0xc,
+	HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND         = 0xd,
+	HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND      = 0xe,
+	HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE            = 0xf,
+	HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND   = 0x10,
+	HTT_10_4_T2H_MSG_TYPE_RX_PN_IND              = 0x11,
+	HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
+	HTT_10_4_T2H_MSG_TYPE_TEST                   = 0x13,
+	HTT_10_4_T2H_MSG_TYPE_EN_STATS               = 0x14,
+	HTT_10_4_T2H_MSG_TYPE_AGGR_CONF              = 0x15,
+	HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND           = 0x16,
+	HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF          = 0x17,
+	HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD         = 0x18,
+	/* 0x19 to 0x2f are reserved */
+	HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND     = 0x30,
+	/* keep this last */
+	HTT_10_4_T2H_NUM_MSGS
+};
+
 enum htt_t2h_msg_type {
 	HTT_T2H_MSG_TYPE_VERSION_CONF,
 	HTT_T2H_MSG_TYPE_RX_IND,
@@ -375,6 +442,10 @@
 	HTT_T2H_MSG_TYPE_AGGR_CONF,
 	HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
 	HTT_T2H_MSG_TYPE_TEST,
+	HTT_T2H_MSG_TYPE_EN_STATS,
+	HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+	HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+	HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
 	/* keep this last */
 	HTT_T2H_NUM_MSGS
 };
@@ -1325,6 +1396,8 @@
 	u8 target_version_minor;
 	struct completion target_version_received;
 	enum ath10k_fw_htt_op_version op_version;
+	u8 max_num_amsdu;
+	u8 max_num_ampdu;
 
 	const enum htt_t2h_msg_type *t2h_msg_types;
 	u32 t2h_msg_types_max;
@@ -1430,6 +1503,11 @@
 
 	/* rx_status template */
 	struct ieee80211_rx_status rx_status;
+
+	struct {
+		dma_addr_t paddr;
+		struct htt_msdu_ext_desc *vaddr;
+	} frag_desc;
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
@@ -1482,6 +1560,12 @@
 #define HTT_LOG2_MAX_CACHE_LINE_SIZE 7	/* 2^7 = 128 */
 #define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
 
+/* These values are default in most firmware revisions and apparently are a
+ * sweet spot performance wise.
+ */
+#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
+#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
+
 int ath10k_htt_connect(struct ath10k_htt *htt);
 int ath10k_htt_init(struct ath10k *ar);
 int ath10k_htt_setup(struct ath10k_htt *htt);
@@ -1497,6 +1581,7 @@
 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
 				u8 max_subfrms_ampdu,
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 89eb16b..1b7a043 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -368,7 +368,7 @@
 		msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
 					& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
 					   RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
-		msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
+		msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
 			      RX_MSDU_START_INFO0_MSDU_LENGTH);
 		msdu_chained = rx_desc->frag_info.ring2_more_count;
 
@@ -394,7 +394,7 @@
 			msdu_chaining = 1;
 		}
 
-		last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
+		last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
 				RX_MSDU_END_INFO0_LAST_MSDU;
 
 		trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
@@ -740,7 +740,7 @@
 	    __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
 		return NULL;
 
-	if (!(rxd->msdu_end.info0 &
+	if (!(rxd->msdu_end.common.info0 &
 	      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
 		return NULL;
 
@@ -991,9 +991,9 @@
 	bool is_last;
 
 	rxd = (void *)msdu->data - sizeof(*rxd);
-	is_first = !!(rxd->msdu_end.info0 &
+	is_first = !!(rxd->msdu_end.common.info0 &
 		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
-	is_last = !!(rxd->msdu_end.info0 &
+	is_last = !!(rxd->msdu_end.common.info0 &
 		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
 
 	/* Delivered decapped frame:
@@ -1017,9 +1017,8 @@
 	skb_trim(msdu, msdu->len - FCS_LEN);
 
 	/* In most cases this will be true for sniffed frames. It makes sense
-	 * to deliver them as-is without stripping the crypto param. This would
-	 * also make sense for software based decryption (which is not
-	 * implemented in ath10k).
+	 * to deliver them as-is without stripping the crypto param. This is
+	 * necessary for software based decryption.
 	 *
 	 * If there's no error then the frame is decrypted. At least that is
 	 * the case for frames that come in via fragmented rx indication.
@@ -1104,9 +1103,9 @@
 	rxd = (void *)msdu->data - sizeof(*rxd);
 	hdr = (void *)rxd->rx_hdr_status;
 
-	is_first = !!(rxd->msdu_end.info0 &
+	is_first = !!(rxd->msdu_end.common.info0 &
 		      __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
-	is_last = !!(rxd->msdu_end.info0 &
+	is_last = !!(rxd->msdu_end.common.info0 &
 		     __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
 	is_amsdu = !(is_first && is_last);
 
@@ -1201,7 +1200,6 @@
 {
 	struct htt_rx_desc *rxd;
 	enum rx_msdu_decap_format decap;
-	struct ieee80211_hdr *hdr;
 
 	/* First msdu's decapped header:
 	 * [802.11 header] <-- padded to 4 bytes long
@@ -1215,8 +1213,7 @@
 	 */
 
 	rxd = (void *)msdu->data - sizeof(*rxd);
-	hdr = (void *)rxd->rx_hdr_status;
-	decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+	decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
 		   RX_MSDU_START_INFO1_DECAP_FORMAT);
 
 	switch (decap) {
@@ -1246,7 +1243,7 @@
 
 	rxd = (void *)skb->data - sizeof(*rxd);
 	flags = __le32_to_cpu(rxd->attention.flags);
-	info = __le32_to_cpu(rxd->msdu_start.info1);
+	info = __le32_to_cpu(rxd->msdu_start.common.info1);
 
 	is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
 	is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
@@ -1439,7 +1436,7 @@
 
 	first = skb_peek(amsdu);
 	rxd = (void *)first->data - sizeof(*rxd);
-	decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
+	decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
 		   RX_MSDU_START_INFO1_DECAP_FORMAT);
 
 	if (!chained)
@@ -1633,8 +1630,6 @@
 	__le16 msdu_id;
 	int i;
 
-	lockdep_assert_held(&htt->tx_lock);
-
 	switch (status) {
 	case HTT_DATA_TX_STATUS_NO_ACK:
 		tx_done.no_ack = true;
@@ -1759,14 +1754,14 @@
 		__skb_queue_tail(amsdu, msdu);
 
 		rxd = (void *)msdu->data - sizeof(*rxd);
-		if (rxd->msdu_end.info0 &
+		if (rxd->msdu_end.common.info0 &
 		    __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
 			break;
 	}
 
 	msdu = skb_peek_tail(amsdu);
 	rxd = (void *)msdu->data - sizeof(*rxd);
-	if (!(rxd->msdu_end.info0 &
+	if (!(rxd->msdu_end.common.info0 &
 	      __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
 		skb_queue_splice_init(amsdu, list);
 		return -EAGAIN;
@@ -2000,15 +1995,11 @@
 			break;
 		}
 
-		spin_lock_bh(&htt->tx_lock);
 		ath10k_txrx_tx_unref(htt, &tx_done);
-		spin_unlock_bh(&htt->tx_lock);
 		break;
 	}
 	case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
-		spin_lock_bh(&htt->tx_lock);
-		__skb_queue_tail(&htt->tx_compl_q, skb);
-		spin_unlock_bh(&htt->tx_lock);
+		skb_queue_tail(&htt->tx_compl_q, skb);
 		tasklet_schedule(&htt->txrx_compl_task);
 		return;
 	case HTT_T2H_MSG_TYPE_SEC_IND: {
@@ -2074,6 +2065,12 @@
 		break;
 	case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
 		break;
+	case HTT_T2H_MSG_TYPE_AGGR_CONF:
+		break;
+	case HTT_T2H_MSG_TYPE_EN_STATS:
+	case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
+	case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
+	case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND:
 	default:
 		ath10k_warn(ar, "htt event (%d) not handled\n",
 			    resp->hdr.msg_type);
@@ -2093,12 +2090,10 @@
 	struct htt_resp *resp;
 	struct sk_buff *skb;
 
-	spin_lock_bh(&htt->tx_lock);
-	while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
+	while ((skb = skb_dequeue(&htt->tx_compl_q))) {
 		ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
 		dev_kfree_skb_any(skb);
 	}
-	spin_unlock_bh(&htt->tx_lock);
 
 	spin_lock_bh(&htt->rx_ring.lock);
 	while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index a60ef7d..43aa5e2 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -63,7 +63,8 @@
 
 	lockdep_assert_held(&htt->tx_lock);
 
-	ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
+	ret = idr_alloc(&htt->pending_tx, skb, 0,
+			htt->max_num_pending_tx, GFP_ATOMIC);
 
 	ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
 
@@ -84,6 +85,7 @@
 int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
+	int ret, size;
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
 		   htt->max_num_pending_tx);
@@ -94,11 +96,31 @@
 	htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
 				       sizeof(struct ath10k_htt_txbuf), 4, 0);
 	if (!htt->tx_pool) {
-		idr_destroy(&htt->pending_tx);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto free_idr_pending_tx;
 	}
 
+	if (!ar->hw_params.continuous_frag_desc)
+		goto skip_frag_desc_alloc;
+
+	size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+	htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
+						  &htt->frag_desc.paddr,
+						  GFP_DMA);
+	if (!htt->frag_desc.vaddr) {
+		ath10k_warn(ar, "failed to alloc fragment desc memory\n");
+		ret = -ENOMEM;
+		goto free_tx_pool;
+	}
+
+skip_frag_desc_alloc:
 	return 0;
+
+free_tx_pool:
+	dma_pool_destroy(htt->tx_pool);
+free_idr_pending_tx:
+	idr_destroy(&htt->pending_tx);
+	return ret;
 }
 
 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
@@ -112,18 +134,25 @@
 	tx_done.discard = 1;
 	tx_done.msdu_id = msdu_id;
 
-	spin_lock_bh(&htt->tx_lock);
 	ath10k_txrx_tx_unref(htt, &tx_done);
-	spin_unlock_bh(&htt->tx_lock);
 
 	return 0;
 }
 
 void ath10k_htt_tx_free(struct ath10k_htt *htt)
 {
+	int size;
+
 	idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
 	idr_destroy(&htt->pending_tx);
 	dma_pool_destroy(htt->tx_pool);
+
+	if (htt->frag_desc.vaddr) {
+		size = htt->max_num_pending_tx *
+				  sizeof(struct htt_msdu_ext_desc);
+		dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
+				  htt->frag_desc.paddr);
+	}
 }
 
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -201,6 +230,49 @@
 	return 0;
 }
 
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+{
+	struct ath10k *ar = htt->ar;
+	struct sk_buff *skb;
+	struct htt_cmd *cmd;
+	int ret, size;
+
+	if (!ar->hw_params.continuous_frag_desc)
+		return 0;
+
+	if (!htt->frag_desc.paddr) {
+		ath10k_warn(ar, "invalid frag desc memory\n");
+		return -EINVAL;
+	}
+
+	size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
+	skb = ath10k_htc_alloc_skb(ar, size);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_put(skb, size);
+	cmd = (struct htt_cmd *)skb->data;
+	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+	cmd->frag_desc_bank_cfg.info = 0;
+	cmd->frag_desc_bank_cfg.num_banks = 1;
+	cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
+	cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
+				__cpu_to_le32(htt->frag_desc.paddr);
+	cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0;
+	cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
+				__cpu_to_le16(htt->max_num_pending_tx - 1);
+
+	ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+	if (ret) {
+		ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+			    ret);
+		dev_kfree_skb_any(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 {
 	struct ath10k *ar = htt->ar;
@@ -355,12 +427,11 @@
 
 	spin_lock_bh(&htt->tx_lock);
 	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+	spin_unlock_bh(&htt->tx_lock);
 	if (res < 0) {
-		spin_unlock_bh(&htt->tx_lock);
 		goto err_tx_dec;
 	}
 	msdu_id = res;
-	spin_unlock_bh(&htt->tx_lock);
 
 	txdesc = ath10k_htc_alloc_skb(ar, len);
 	if (!txdesc) {
@@ -371,11 +442,15 @@
 	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
 				       DMA_TO_DEVICE);
 	res = dma_mapping_error(dev, skb_cb->paddr);
-	if (res)
+	if (res) {
+		res = -EIO;
 		goto err_free_txdesc;
+	}
 
 	skb_put(txdesc, len);
 	cmd = (struct htt_cmd *)txdesc->data;
+	memset(cmd, 0, len);
+
 	cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
 	cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
 	cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
@@ -422,6 +497,7 @@
 	u16 msdu_id, flags1 = 0;
 	dma_addr_t paddr = 0;
 	u32 frags_paddr = 0;
+	struct htt_msdu_ext_desc *ext_desc = NULL;
 
 	res = ath10k_htt_tx_inc_pending(htt);
 	if (res)
@@ -429,12 +505,11 @@
 
 	spin_lock_bh(&htt->tx_lock);
 	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+	spin_unlock_bh(&htt->tx_lock);
 	if (res < 0) {
-		spin_unlock_bh(&htt->tx_lock);
 		goto err_tx_dec;
 	}
 	msdu_id = res;
-	spin_unlock_bh(&htt->tx_lock);
 
 	prefetch_len = min(htt->prefetch_len, msdu->len);
 	prefetch_len = roundup(prefetch_len, 4);
@@ -450,14 +525,20 @@
 	if ((ieee80211_is_action(hdr->frame_control) ||
 	     ieee80211_is_deauth(hdr->frame_control) ||
 	     ieee80211_is_disassoc(hdr->frame_control)) &&
-	     ieee80211_has_protected(hdr->frame_control))
+	     ieee80211_has_protected(hdr->frame_control)) {
 		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+	} else if (!skb_cb->htt.nohwcrypt &&
+		   skb_cb->txmode == ATH10K_HW_TXRX_RAW) {
+		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+	}
 
 	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
 				       DMA_TO_DEVICE);
 	res = dma_mapping_error(dev, skb_cb->paddr);
-	if (res)
+	if (res) {
+		res = -EIO;
 		goto err_free_txbuf;
+	}
 
 	switch (skb_cb->txmode) {
 	case ATH10K_HW_TXRX_RAW:
@@ -465,16 +546,30 @@
 		flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
 		/* pass through */
 	case ATH10K_HW_TXRX_ETHERNET:
-		frags = skb_cb->htt.txbuf->frags;
+		if (ar->hw_params.continuous_frag_desc) {
+			memset(&htt->frag_desc.vaddr[msdu_id], 0,
+			       sizeof(struct htt_msdu_ext_desc));
+			frags = (struct htt_data_tx_desc_frag *)
+				&htt->frag_desc.vaddr[msdu_id].frags;
+			ext_desc = &htt->frag_desc.vaddr[msdu_id];
+			frags[0].tword_addr.paddr_lo =
+				__cpu_to_le32(skb_cb->paddr);
+			frags[0].tword_addr.paddr_hi = 0;
+			frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
 
-		frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
-		frags[0].len = __cpu_to_le32(msdu->len);
-		frags[1].paddr = 0;
-		frags[1].len = 0;
+			frags_paddr =  htt->frag_desc.paddr +
+				(sizeof(struct htt_msdu_ext_desc) * msdu_id);
+		} else {
+			frags = skb_cb->htt.txbuf->frags;
+			frags[0].dword_addr.paddr =
+				__cpu_to_le32(skb_cb->paddr);
+			frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
+			frags[1].dword_addr.paddr = 0;
+			frags[1].dword_addr.len = 0;
 
+			frags_paddr = skb_cb->htt.txbuf_paddr;
+		}
 		flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
-
-		frags_paddr = skb_cb->htt.txbuf_paddr;
 		break;
 	case ATH10K_HW_TXRX_MGMT:
 		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
@@ -508,14 +603,20 @@
 			prefetch_len);
 	skb_cb->htt.txbuf->htc_hdr.flags = 0;
 
+	if (skb_cb->htt.nohwcrypt)
+		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
 	if (!skb_cb->is_protected)
 		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
 
 	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
 	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
-	if (msdu->ip_summed == CHECKSUM_PARTIAL) {
+	if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
 		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
 		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+		if (ar->hw_params.continuous_frag_desc)
+			ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
 	}
 
 	/* Prevent firmware from sending up tx inspection requests. There's
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 5997f00..7b84d08 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -34,8 +34,15 @@
 	.ce7_base_address		= 0x00059000,
 	.soc_reset_control_si0_rst_mask	= 0x00000001,
 	.soc_reset_control_ce_rst_mask	= 0x00040000,
-	.soc_chip_id_address		= 0x00ec,
-	.scratch_3_address		= 0x0030,
+	.soc_chip_id_address		= 0x000000ec,
+	.scratch_3_address		= 0x00000030,
+	.fw_indicator_address		= 0x00009030,
+	.pcie_local_base_address	= 0x00080000,
+	.ce_wrap_intr_sum_host_msi_lsb	= 0x00000008,
+	.ce_wrap_intr_sum_host_msi_mask	= 0x0000ff00,
+	.pcie_intr_fw_mask		= 0x00000400,
+	.pcie_intr_ce_mask_all		= 0x0007f800,
+	.pcie_intr_clr_address		= 0x00000014,
 };
 
 const struct ath10k_hw_regs qca6174_regs = {
@@ -54,8 +61,79 @@
 	.ce7_base_address			= 0x00036000,
 	.soc_reset_control_si0_rst_mask		= 0x00000000,
 	.soc_reset_control_ce_rst_mask		= 0x00000001,
-	.soc_chip_id_address			= 0x000f0,
-	.scratch_3_address			= 0x0028,
+	.soc_chip_id_address			= 0x000000f0,
+	.scratch_3_address			= 0x00000028,
+	.fw_indicator_address			= 0x0003a028,
+	.pcie_local_base_address		= 0x00080000,
+	.ce_wrap_intr_sum_host_msi_lsb		= 0x00000008,
+	.ce_wrap_intr_sum_host_msi_mask		= 0x0000ff00,
+	.pcie_intr_fw_mask			= 0x00000400,
+	.pcie_intr_ce_mask_all			= 0x0007f800,
+	.pcie_intr_clr_address			= 0x00000014,
+};
+
+const struct ath10k_hw_regs qca99x0_regs = {
+	.rtc_state_cold_reset_mask		= 0x00000400,
+	.rtc_soc_base_address			= 0x00080000,
+	.rtc_wmac_base_address			= 0x00000000,
+	.soc_core_base_address			= 0x00082000,
+	.ce_wrapper_base_address		= 0x0004d000,
+	.ce0_base_address			= 0x0004a000,
+	.ce1_base_address			= 0x0004a400,
+	.ce2_base_address			= 0x0004a800,
+	.ce3_base_address			= 0x0004ac00,
+	.ce4_base_address			= 0x0004b000,
+	.ce5_base_address			= 0x0004b400,
+	.ce6_base_address			= 0x0004b800,
+	.ce7_base_address			= 0x0004bc00,
+	/* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
+	 * CE0 and CE1 no other copy engine is directly referred in the code.
+	 * It is not really neccessary to assign address for newly supported
+	 * CEs in this address table.
+	 *	Copy Engine		Address
+	 *	CE8			0x0004c000
+	 *	CE9			0x0004c400
+	 *	CE10			0x0004c800
+	 *	CE11			0x0004cc00
+	 */
+	.soc_reset_control_si0_rst_mask		= 0x00000001,
+	.soc_reset_control_ce_rst_mask		= 0x00000100,
+	.soc_chip_id_address			= 0x000000ec,
+	.scratch_3_address			= 0x00040050,
+	.fw_indicator_address			= 0x00040050,
+	.pcie_local_base_address		= 0x00000000,
+	.ce_wrap_intr_sum_host_msi_lsb		= 0x0000000c,
+	.ce_wrap_intr_sum_host_msi_mask		= 0x00fff000,
+	.pcie_intr_fw_mask			= 0x00100000,
+	.pcie_intr_ce_mask_all			= 0x000fff00,
+	.pcie_intr_clr_address			= 0x00000010,
+};
+
+const struct ath10k_hw_values qca988x_values = {
+	.rtc_state_val_on		= 3,
+	.ce_count			= 8,
+	.msi_assign_ce_max		= 7,
+	.num_target_ce_config_wlan	= 7,
+	.ce_desc_meta_data_mask		= 0xFFFC,
+	.ce_desc_meta_data_lsb		= 2,
+};
+
+const struct ath10k_hw_values qca6174_values = {
+	.rtc_state_val_on		= 3,
+	.ce_count			= 8,
+	.msi_assign_ce_max		= 7,
+	.num_target_ce_config_wlan	= 7,
+	.ce_desc_meta_data_mask		= 0xFFFC,
+	.ce_desc_meta_data_lsb		= 2,
+};
+
+const struct ath10k_hw_values qca99x0_values = {
+	.rtc_state_val_on		= 5,
+	.ce_count			= 12,
+	.msi_assign_ce_max		= 12,
+	.num_target_ce_config_wlan	= 10,
+	.ce_desc_meta_data_mask		= 0xFFF0,
+	.ce_desc_meta_data_lsb		= 4,
 };
 
 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
@@ -74,6 +152,6 @@
 	cc -= cc_prev - cc_fix;
 	rcc -= rcc_prev;
 
-	survey->time = CCNT_TO_MSEC(cc);
-	survey->time_busy = CCNT_TO_MSEC(rcc);
+	survey->time = CCNT_TO_MSEC(ar, cc);
+	survey->time_busy = CCNT_TO_MSEC(ar, rcc);
 }
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 85cca29..23afcda 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -72,6 +72,18 @@
 #define QCA6174_HW_3_0_BOARD_DATA_FILE	"board.bin"
 #define QCA6174_HW_3_0_PATCH_LOAD_ADDR	0x1234
 
+/* QCA99X0 1.0 definitions (unsupported) */
+#define QCA99X0_HW_1_0_CHIP_ID_REV     0x0
+
+/* QCA99X0 2.0 definitions */
+#define QCA99X0_HW_2_0_DEV_VERSION     0x01000000
+#define QCA99X0_HW_2_0_CHIP_ID_REV     0x1
+#define QCA99X0_HW_2_0_FW_DIR          ATH10K_FW_DIR "/QCA99X0/hw2.0"
+#define QCA99X0_HW_2_0_FW_FILE         "firmware.bin"
+#define QCA99X0_HW_2_0_OTP_FILE        "otp.bin"
+#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR	0x1234
+
 #define ATH10K_FW_API2_FILE		"firmware-2.bin"
 #define ATH10K_FW_API3_FILE		"firmware-3.bin"
 
@@ -112,6 +124,9 @@
 	 * FW API 5 and above.
 	 */
 	ATH10K_FW_IE_HTT_OP_VERSION = 6,
+
+	/* Code swap image for firmware binary */
+	ATH10K_FW_IE_FW_CODE_SWAP_IMAGE = 7,
 };
 
 enum ath10k_fw_wmi_op_version {
@@ -122,6 +137,7 @@
 	ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
 	ATH10K_FW_WMI_OP_VERSION_TLV = 4,
 	ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
+	ATH10K_FW_WMI_OP_VERSION_10_4 = 6,
 
 	/* keep last */
 	ATH10K_FW_WMI_OP_VERSION_MAX,
@@ -137,6 +153,8 @@
 
 	ATH10K_FW_HTT_OP_VERSION_TLV = 3,
 
+	ATH10K_FW_HTT_OP_VERSION_10_4 = 4,
+
 	/* keep last */
 	ATH10K_FW_HTT_OP_VERSION_MAX,
 };
@@ -144,6 +162,7 @@
 enum ath10k_hw_rev {
 	ATH10K_HW_QCA988X,
 	ATH10K_HW_QCA6174,
+	ATH10K_HW_QCA99X0,
 };
 
 struct ath10k_hw_regs {
@@ -164,26 +183,50 @@
 	u32 soc_reset_control_ce_rst_mask;
 	u32 soc_chip_id_address;
 	u32 scratch_3_address;
+	u32 fw_indicator_address;
+	u32 pcie_local_base_address;
+	u32 ce_wrap_intr_sum_host_msi_lsb;
+	u32 ce_wrap_intr_sum_host_msi_mask;
+	u32 pcie_intr_fw_mask;
+	u32 pcie_intr_ce_mask_all;
+	u32 pcie_intr_clr_address;
 };
 
 extern const struct ath10k_hw_regs qca988x_regs;
 extern const struct ath10k_hw_regs qca6174_regs;
+extern const struct ath10k_hw_regs qca99x0_regs;
+
+struct ath10k_hw_values {
+	u32 rtc_state_val_on;
+	u8 ce_count;
+	u8 msi_assign_ce_max;
+	u8 num_target_ce_config_wlan;
+	u16 ce_desc_meta_data_mask;
+	u8 ce_desc_meta_data_lsb;
+};
+
+extern const struct ath10k_hw_values qca988x_values;
+extern const struct ath10k_hw_values qca6174_values;
+extern const struct ath10k_hw_values qca99x0_values;
 
 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
 				u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
 
 #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
 #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
+#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
 
 /* Known pecularities:
- *  - current FW doesn't support raw rx mode (last tested v599)
- *  - current FW dumps upon raw tx mode (last tested v599)
  *  - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
  *  - raw have FCS, nwifi doesn't
  *  - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
  *    param, llc/snap) are aligned to 4byte boundaries each */
 enum ath10k_hw_txrx_mode {
 	ATH10K_HW_TXRX_RAW = 0,
+
+	/* Native Wifi decap mode is used to align IP frames to 4-byte
+	 * boundaries and avoid a very expensive re-alignment in mac80211.
+	 */
 	ATH10K_HW_TXRX_NATIVE_WIFI = 1,
 	ATH10K_HW_TXRX_ETHERNET = 2,
 
@@ -245,10 +288,6 @@
 #define TARGET_RX_TIMEOUT_LO_PRI		100
 #define TARGET_RX_TIMEOUT_HI_PRI		40
 
-/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
- * avoid a very expensive re-alignment in mac80211. */
-#define TARGET_RX_DECAP_MODE			ATH10K_HW_TXRX_NATIVE_WIFI
-
 #define TARGET_SCAN_MAX_PENDING_REQS		4
 #define TARGET_BMISS_OFFLOAD_MAX_VDEV		3
 #define TARGET_ROAM_OFFLOAD_MAX_VDEV		3
@@ -283,7 +322,6 @@
 #define TARGET_10X_RX_CHAIN_MASK		(BIT(0) | BIT(1) | BIT(2))
 #define TARGET_10X_RX_TIMEOUT_LO_PRI		100
 #define TARGET_10X_RX_TIMEOUT_HI_PRI		40
-#define TARGET_10X_RX_DECAP_MODE		ATH10K_HW_TXRX_NATIVE_WIFI
 #define TARGET_10X_SCAN_MAX_PENDING_REQS	4
 #define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV	2
 #define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV	2
@@ -310,8 +348,70 @@
 #define TARGET_TLV_NUM_MSDU_DESC		(1024 + 32)
 #define TARGET_TLV_NUM_WOW_PATTERNS		22
 
+/* Diagnostic Window */
+#define CE_DIAG_PIPE	7
+
+#define NUM_TARGET_CE_CONFIG_WLAN ar->hw_values->num_target_ce_config_wlan
+
+/* Target specific defines for 10.4 firmware */
+#define TARGET_10_4_NUM_VDEVS			16
+#define TARGET_10_4_NUM_STATIONS		32
+#define TARGET_10_4_NUM_PEERS			((TARGET_10_4_NUM_STATIONS) + \
+						 (TARGET_10_4_NUM_VDEVS))
+#define TARGET_10_4_ACTIVE_PEERS		0
+
+#define TARGET_10_4_NUM_QCACHE_PEERS_MAX	512
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS		50
+#define TARGET_10_4_NUM_OFFLOAD_PEERS		0
+#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS	0
+#define TARGET_10_4_NUM_PEER_KEYS		2
+#define TARGET_10_4_TGT_NUM_TIDS		((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_AST_SKID_LIMIT		32
+#define TARGET_10_4_TX_CHAIN_MASK		(BIT(0) | BIT(1) | \
+						 BIT(2) | BIT(3))
+#define TARGET_10_4_RX_CHAIN_MASK		(BIT(0) | BIT(1) | \
+						 BIT(2) | BIT(3))
+
+/* 100 ms for video, best-effort, and background */
+#define TARGET_10_4_RX_TIMEOUT_LO_PRI		100
+
+/* 40 ms for voice */
+#define TARGET_10_4_RX_TIMEOUT_HI_PRI		40
+
+#define TARGET_10_4_RX_DECAP_MODE		ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10_4_SCAN_MAX_REQS		4
+#define TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV	3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV	3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES   8
+
+/* Note: mcast to ucast is disabled by default */
+#define TARGET_10_4_NUM_MCAST_GROUPS		0
+#define TARGET_10_4_NUM_MCAST_TABLE_ELEMS	0
+#define TARGET_10_4_MCAST2UCAST_MODE		0
+
+#define TARGET_10_4_TX_DBG_LOG_SIZE		1024
+#define TARGET_10_4_NUM_WDS_ENTRIES		32
+#define TARGET_10_4_DMA_BURST_SIZE		1
+#define TARGET_10_4_MAC_AGGR_DELIM		0
+#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10_4_VOW_CONFIG			0
+#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV	3
+#define TARGET_10_4_NUM_MSDU_DESC		(1024 + 400)
+#define TARGET_10_4_11AC_TX_MAX_FRAGS		2
+#define TARGET_10_4_MAX_PEER_EXT_STATS		16
+#define TARGET_10_4_SMART_ANT_CAP		0
+#define TARGET_10_4_BK_MIN_FREE			0
+#define TARGET_10_4_BE_MIN_FREE			0
+#define TARGET_10_4_VI_MIN_FREE			0
+#define TARGET_10_4_VO_MIN_FREE			0
+#define TARGET_10_4_RX_BATCH_MODE		1
+#define TARGET_10_4_THERMAL_THROTTLING_CONFIG	0
+#define TARGET_10_4_ATF_CONFIG			0
+#define TARGET_10_4_IPHDR_PAD_CONFIG		1
+#define TARGET_10_4_QWRAP_CONFIG		0
+
 /* Number of Copy Engines supported */
-#define CE_COUNT 8
+#define CE_COUNT ar->hw_values->ce_count
 
 /*
  * Total number of PCIe MSI interrupts requested for all interrupt sources.
@@ -335,10 +435,10 @@
 
 /* MSIs for Copy Engines */
 #define MSI_ASSIGN_CE_INITIAL	1
-#define MSI_ASSIGN_CE_MAX	7
+#define MSI_ASSIGN_CE_MAX	ar->hw_values->msi_assign_ce_max
 
 /* as of IP3.7.1 */
-#define RTC_STATE_V_ON				3
+#define RTC_STATE_V_ON				ar->hw_values->rtc_state_val_on
 
 #define RTC_STATE_COLD_RESET_MASK		ar->regs->rtc_state_cold_reset_mask
 #define RTC_STATE_V_LSB				0
@@ -374,7 +474,7 @@
 #define CE7_BASE_ADDRESS			ar->regs->ce7_base_address
 #define DBI_BASE_ADDRESS			0x00060000
 #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS	0x0006c000
-#define PCIE_LOCAL_BASE_ADDRESS			0x00080000
+#define PCIE_LOCAL_BASE_ADDRESS		ar->regs->pcie_local_base_address
 
 #define SOC_RESET_CONTROL_ADDRESS		0x00000000
 #define SOC_RESET_CONTROL_OFFSET		0x00000000
@@ -448,24 +548,25 @@
 #define CORE_CTRL_ADDRESS			0x0000
 #define PCIE_INTR_ENABLE_ADDRESS		0x0008
 #define PCIE_INTR_CAUSE_ADDRESS			0x000c
-#define PCIE_INTR_CLR_ADDRESS			0x0014
+#define PCIE_INTR_CLR_ADDRESS			ar->regs->pcie_intr_clr_address
 #define SCRATCH_3_ADDRESS			ar->regs->scratch_3_address
 #define CPU_INTR_ADDRESS			0x0010
 
-/* Cycle counters are running at 88MHz */
-#define CCNT_TO_MSEC(x) ((x) / 88000)
+#define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz)
 
 /* Firmware indications to the Host via SCRATCH_3 register. */
-#define FW_INDICATOR_ADDRESS	(SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
+#define FW_INDICATOR_ADDRESS			ar->regs->fw_indicator_address
 #define FW_IND_EVENT_PENDING			1
 #define FW_IND_INITIALIZED			2
 
 /* HOST_REG interrupt from firmware */
-#define PCIE_INTR_FIRMWARE_MASK			0x00000400
-#define PCIE_INTR_CE_MASK_ALL			0x0007f800
+#define PCIE_INTR_FIRMWARE_MASK			ar->regs->pcie_intr_fw_mask
+#define PCIE_INTR_CE_MASK_ALL			ar->regs->pcie_intr_ce_mask_all
 
 #define DRAM_BASE_ADDRESS			0x00400000
 
+#define PCIE_BAR_REG_ADDRESS			0x40030
+
 #define MISSING 0
 
 #define SYSTEM_SLEEP_OFFSET			SOC_SYSTEM_SLEEP_OFFSET
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 218b6af..64674c9 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -197,6 +197,10 @@
 		return -EOPNOTSUPP;
 	}
 
+	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+	}
+
 	if (cmd == DISABLE_KEY) {
 		arg.key_cipher = WMI_CIPHER_NONE;
 		arg.key_data = NULL;
@@ -218,6 +222,9 @@
 
 	reinit_completion(&ar->install_key_done);
 
+	if (arvif->nohwcrypt)
+		return 1;
+
 	ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
 	if (ret)
 		return ret;
@@ -240,6 +247,10 @@
 
 	lockdep_assert_held(&ar->conf_mutex);
 
+	if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
+		    arvif->vif->type != NL80211_IFTYPE_ADHOC))
+		return -EINVAL;
+
 	spin_lock_bh(&ar->data_lock);
 	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
 	spin_unlock_bh(&ar->data_lock);
@@ -251,21 +262,34 @@
 		if (arvif->wep_keys[i] == NULL)
 			continue;
 
-		flags = 0;
-		flags |= WMI_KEY_PAIRWISE;
+		switch (arvif->vif->type) {
+		case NL80211_IFTYPE_AP:
+			flags = WMI_KEY_PAIRWISE;
 
-		ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
-					 addr, flags);
-		if (ret)
-			return ret;
+			if (arvif->def_wep_key_idx == i)
+				flags |= WMI_KEY_TX_USAGE;
 
-		flags = 0;
-		flags |= WMI_KEY_GROUP;
+			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+						 SET_KEY, addr, flags);
+			if (ret < 0)
+				return ret;
+			break;
+		case NL80211_IFTYPE_ADHOC:
+			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+						 SET_KEY, addr,
+						 WMI_KEY_PAIRWISE);
+			if (ret < 0)
+				return ret;
 
-		ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY,
-					 addr, flags);
-		if (ret)
-			return ret;
+			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+						 SET_KEY, addr, WMI_KEY_GROUP);
+			if (ret < 0)
+				return ret;
+			break;
+		default:
+			WARN_ON(1);
+			return -EINVAL;
+		}
 
 		spin_lock_bh(&ar->data_lock);
 		peer->keys[i] = arvif->wep_keys[i];
@@ -280,6 +304,9 @@
 	 *
 	 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
 	 */
+	if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
+		return 0;
+
 	if (arvif->def_wep_key_idx == -1)
 		return 0;
 
@@ -322,10 +349,10 @@
 		/* key flags are not required to delete the key */
 		ret = ath10k_install_key(arvif, peer->keys[i],
 					 DISABLE_KEY, addr, flags);
-		if (ret && first_errno == 0)
+		if (ret < 0 && first_errno == 0)
 			first_errno = ret;
 
-		if (ret)
+		if (ret < 0)
 			ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
 				    i, ret);
 
@@ -398,7 +425,7 @@
 			break;
 		/* key flags are not required to delete the key */
 		ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
-		if (ret && first_errno == 0)
+		if (ret < 0 && first_errno == 0)
 			first_errno = ret;
 
 		if (ret)
@@ -591,11 +618,19 @@
 static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
 			      enum wmi_peer_type peer_type)
 {
+	struct ath10k_vif *arvif;
+	int num_peers = 0;
 	int ret;
 
 	lockdep_assert_held(&ar->conf_mutex);
 
-	if (ar->num_peers >= ar->max_num_peers)
+	num_peers = ar->num_peers;
+
+	/* Each vdev consumes a peer entry as well */
+	list_for_each_entry(arvif, &ar->arvifs, list)
+		num_peers++;
+
+	if (num_peers >= ar->max_num_peers)
 		return -ENOBUFS;
 
 	ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
@@ -671,20 +706,6 @@
 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
 }
 
-static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
-{
-	struct ath10k *ar = arvif->ar;
-	u32 vdev_param;
-
-	if (value != 0xFFFFFFFF)
-		value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
-				ATH10K_FRAGMT_THRESHOLD_MIN,
-				ATH10K_FRAGMT_THRESHOLD_MAX);
-
-	vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
-	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
-}
-
 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
 {
 	int ret;
@@ -836,7 +857,7 @@
 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
 {
 	struct cfg80211_chan_def *chandef = NULL;
-	struct ieee80211_channel *channel = chandef->chan;
+	struct ieee80211_channel *channel = NULL;
 	struct wmi_vdev_start_request_arg arg = {};
 	int ret = 0;
 
@@ -1668,7 +1689,7 @@
 	return 0;
 }
 
-static int ath10k_mac_ps_vif_count(struct ath10k *ar)
+static int ath10k_mac_num_vifs_started(struct ath10k *ar)
 {
 	struct ath10k_vif *arvif;
 	int num = 0;
@@ -1676,7 +1697,7 @@
 	lockdep_assert_held(&ar->conf_mutex);
 
 	list_for_each_entry(arvif, &ar->arvifs, list)
-		if (arvif->ps)
+		if (arvif->is_started)
 			num++;
 
 	return num;
@@ -1700,7 +1721,7 @@
 
 	enable_ps = arvif->ps;
 
-	if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
+	if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
 	    !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
 		      ar->fw_features)) {
 		ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
@@ -2502,6 +2523,9 @@
 	u32 param;
 	u32 value;
 
+	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
+		return 0;
+
 	if (!(ar->vht_cap_info &
 	      (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
 	       IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
@@ -2995,6 +3019,8 @@
 						   IEEE80211_IFACE_ITER_RESUME_ALL,
 						   ath10k_mac_tx_unlock_iter,
 						   ar);
+
+	ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
 }
 
 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
@@ -3034,38 +3060,16 @@
 
 	lockdep_assert_held(&ar->htt.tx_lock);
 
-	switch (pause_id) {
-	case WMI_TLV_TX_PAUSE_ID_MCC:
-	case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
-	case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
-	case WMI_TLV_TX_PAUSE_ID_AP_PS:
-	case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
-		switch (action) {
-		case WMI_TLV_TX_PAUSE_ACTION_STOP:
-			ath10k_mac_vif_tx_lock(arvif, pause_id);
-			break;
-		case WMI_TLV_TX_PAUSE_ACTION_WAKE:
-			ath10k_mac_vif_tx_unlock(arvif, pause_id);
-			break;
-		default:
-			ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
-				    action, arvif->vdev_id);
-			break;
-		}
+	switch (action) {
+	case WMI_TLV_TX_PAUSE_ACTION_STOP:
+		ath10k_mac_vif_tx_lock(arvif, pause_id);
 		break;
-	case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
-	case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
-	case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
-	case WMI_TLV_TX_PAUSE_ID_HOST:
+	case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+		ath10k_mac_vif_tx_unlock(arvif, pause_id);
+		break;
 	default:
-		/* FIXME: Some pause_ids aren't vdev specific. Instead they
-		 * target peer_id and tid. Implementing these could improve
-		 * traffic scheduling fairness across multiple connected
-		 * stations in AP/IBSS modes.
-		 */
-		ath10k_dbg(ar, ATH10K_DBG_MAC,
-			   "mac ignoring unsupported tx pause vdev %i id %d\n",
-			   arvif->vdev_id, pause_id);
+		ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
+			    action, arvif->vdev_id);
 		break;
 	}
 }
@@ -3082,12 +3086,15 @@
 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
 	struct ath10k_mac_tx_pause *arg = data;
 
+	if (arvif->vdev_id != arg->vdev_id)
+		return;
+
 	ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
 }
 
-void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
-				enum wmi_tlv_tx_pause_id pause_id,
-				enum wmi_tlv_tx_pause_action action)
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+				     enum wmi_tlv_tx_pause_id pause_id,
+				     enum wmi_tlv_tx_pause_action action)
 {
 	struct ath10k_mac_tx_pause arg = {
 		.vdev_id = vdev_id,
@@ -3168,13 +3175,30 @@
 	 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
 	 * NativeWifi txmode - it selects AP key instead of peer key. It seems
 	 * to work with Ethernet txmode so use it.
+	 *
+	 * FIXME: Check if raw mode works with TDLS.
 	 */
 	if (ieee80211_is_data_present(fc) && sta && sta->tdls)
 		return ATH10K_HW_TXRX_ETHERNET;
 
+	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+		return ATH10K_HW_TXRX_RAW;
+
 	return ATH10K_HW_TXRX_NATIVE_WIFI;
 }
 
+static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
+				     struct sk_buff *skb) {
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
+			 IEEE80211_TX_CTL_INJECTED;
+	if ((info->flags & mask) == mask)
+		return false;
+	if (vif)
+		return !ath10k_vif_to_arvif(vif)->nohwcrypt;
+	return true;
+}
+
 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
  * Control in the header.
  */
@@ -3341,6 +3365,7 @@
 	int vdev_id;
 	int ret;
 	unsigned long time_left;
+	bool tmp_peer_created = false;
 
 	/* FW requirement: We must create a peer before FW will send out
 	 * an offchannel frame. Otherwise the frame will be stuck and
@@ -3378,6 +3403,7 @@
 			if (ret)
 				ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
 					    peer_addr, vdev_id, ret);
+			tmp_peer_created = (ret == 0);
 		}
 
 		spin_lock_bh(&ar->data_lock);
@@ -3393,7 +3419,7 @@
 			ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
 				    skb);
 
-		if (!peer) {
+		if (!peer && tmp_peer_created) {
 			ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
 			if (ret)
 				ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
@@ -3449,14 +3475,13 @@
 	case ATH10K_SCAN_IDLE:
 		break;
 	case ATH10K_SCAN_RUNNING:
-		if (ar->scan.is_roc)
-			ieee80211_remain_on_channel_expired(ar->hw);
-		/* fall through */
 	case ATH10K_SCAN_ABORTING:
 		if (!ar->scan.is_roc)
 			ieee80211_scan_completed(ar->hw,
 						 (ar->scan.state ==
 						  ATH10K_SCAN_ABORTING));
+		else if (ar->scan.roc_notify)
+			ieee80211_remain_on_channel_expired(ar->hw);
 		/* fall through */
 	case ATH10K_SCAN_STARTING:
 		ar->scan.state = ATH10K_SCAN_IDLE;
@@ -3620,6 +3645,7 @@
 	ATH10K_SKB_CB(skb)->htt.is_offchan = false;
 	ATH10K_SKB_CB(skb)->htt.freq = 0;
 	ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
+	ATH10K_SKB_CB(skb)->htt.nohwcrypt = !ath10k_tx_h_use_hwcrypto(vif, skb);
 	ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif);
 	ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb);
 	ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc);
@@ -3635,12 +3661,11 @@
 		ath10k_tx_h_8023(skb);
 		break;
 	case ATH10K_HW_TXRX_RAW:
-		/* FIXME: Packet injection isn't implemented. It should be
-		 * doable with firmware 10.2 on qca988x.
-		 */
-		WARN_ON_ONCE(1);
-		ieee80211_free_txskb(hw, skb);
-		return;
+		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+			WARN_ON_ONCE(1);
+			ieee80211_free_txskb(hw, skb);
+			return;
+		}
 	}
 
 	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
@@ -4039,6 +4064,43 @@
 	return 1;
 }
 
+static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
+{
+	u32 value = 0;
+	struct ath10k *ar = arvif->ar;
+
+	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
+		return 0;
+
+	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
+		value |= SM((ar->num_rf_chains - 1), WMI_TXBF_STS_CAP_OFFSET);
+
+	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
+		value |= SM((ar->num_rf_chains - 1), WMI_BF_SOUND_DIM_OFFSET);
+
+	if (!value)
+		return 0;
+
+	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
+			  WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
+
+	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
+			  WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
+
+	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+					 ar->wmi.vdev_param->txbf, value);
+}
+
 /*
  * TODO:
  * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
@@ -4080,6 +4142,12 @@
 		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
 	}
 
+	if (ar->num_peers >= ar->max_num_peers) {
+		ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
+		ret = -ENOBUFS;
+		goto err;
+	}
+
 	if (ar->free_vdev_map == 0) {
 		ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
 		ret = -EBUSY;
@@ -4159,6 +4227,14 @@
 			goto err;
 		}
 	}
+	if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
+		arvif->nohwcrypt = true;
+
+	if (arvif->nohwcrypt &&
+	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+		ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
+		goto err;
+	}
 
 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
 		   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
@@ -4257,16 +4333,16 @@
 		}
 	}
 
-	ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
+	ret = ath10k_mac_set_txbf_conf(arvif);
 	if (ret) {
-		ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+		ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
 			    arvif->vdev_id, ret);
 		goto err_peer_delete;
 	}
 
-	ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
+	ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
 	if (ret) {
-		ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n",
+		ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
 			    arvif->vdev_id, ret);
 		goto err_peer_delete;
 	}
@@ -4287,6 +4363,11 @@
 		}
 	}
 
+	spin_lock_bh(&ar->htt.tx_lock);
+	if (!ar->tx_paused)
+		ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+	spin_unlock_bh(&ar->htt.tx_lock);
+
 	mutex_unlock(&ar->conf_mutex);
 	return 0;
 
@@ -4641,9 +4722,6 @@
 	arg.vdev_id = arvif->vdev_id;
 	arg.scan_id = ATH10K_SCAN_ID;
 
-	if (!req->no_cck)
-		arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
-
 	if (req->ie_len) {
 		arg.ie_len = req->ie_len;
 		memcpy(arg.ie, req->ie, arg.ie_len);
@@ -4751,6 +4829,9 @@
 	if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
 		return 1;
 
+	if (arvif->nohwcrypt)
+		return 1;
+
 	if (key->keyidx > WMI_MAX_KEY_INDEX)
 		return -ENOSPC;
 
@@ -4820,6 +4901,7 @@
 
 	ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
 	if (ret) {
+		WARN_ON(ret > 0);
 		ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
 			    arvif->vdev_id, peer_addr, ret);
 		goto exit;
@@ -4835,13 +4917,16 @@
 
 		ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
 		if (ret) {
+			WARN_ON(ret > 0);
 			ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
 				    arvif->vdev_id, peer_addr, ret);
 			ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
 						  peer_addr, flags);
-			if (ret2)
+			if (ret2) {
+				WARN_ON(ret2 > 0);
 				ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
 					    arvif->vdev_id, peer_addr, ret2);
+			}
 			goto exit;
 		}
 	}
@@ -5462,6 +5547,7 @@
 		ar->scan.is_roc = true;
 		ar->scan.vdev_id = arvif->vdev_id;
 		ar->scan.roc_freq = chan->center_freq;
+		ar->scan.roc_notify = true;
 		ret = 0;
 		break;
 	case ATH10K_SCAN_STARTING:
@@ -5525,7 +5611,13 @@
 	struct ath10k *ar = hw->priv;
 
 	mutex_lock(&ar->conf_mutex);
+
+	spin_lock_bh(&ar->data_lock);
+	ar->scan.roc_notify = false;
+	spin_unlock_bh(&ar->data_lock);
+
 	ath10k_scan_abort(ar);
+
 	mutex_unlock(&ar->conf_mutex);
 
 	cancel_delayed_work_sync(&ar->scan.timeout);
@@ -5561,12 +5653,27 @@
 	return ret;
 }
 
+static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+	/* Even though there's a WMI enum for fragmentation threshold no known
+	 * firmware actually implements it. Moreover it is not possible to rely
+	 * frame fragmentation to mac80211 because firmware clears the "more
+	 * fragments" bit in frame control making it impossible for remote
+	 * devices to reassemble frames.
+	 *
+	 * Hence implement a dummy callback just to say fragmentation isn't
+	 * supported. This effectively prevents mac80211 from doing frame
+	 * fragmentation in software.
+	 */
+	return -EOPNOTSUPP;
+}
+
 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			 u32 queues, bool drop)
 {
 	struct ath10k *ar = hw->priv;
 	bool skip;
-	int ret;
+	long time_left;
 
 	/* mac80211 doesn't care if we really xmit queued frames or not
 	 * we'll collect those frames either way if we stop/delete vdevs */
@@ -5578,7 +5685,7 @@
 	if (ar->state == ATH10K_STATE_WEDGED)
 		goto skip;
 
-	ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
+	time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
 			bool empty;
 
 			spin_lock_bh(&ar->htt.tx_lock);
@@ -5592,9 +5699,9 @@
 			(empty || skip);
 		}), ATH10K_FLUSH_TIMEOUT_HZ);
 
-	if (ret <= 0 || skip)
-		ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n",
-			    skip, ar->state, ret);
+	if (time_left == 0 || skip)
+		ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
+			    skip, ar->state, time_left);
 
 skip:
 	mutex_unlock(&ar->conf_mutex);
@@ -6219,6 +6326,13 @@
 
 	arvif->is_started = true;
 
+	ret = ath10k_mac_vif_setup_ps(arvif);
+	if (ret) {
+		ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
+			    arvif->vdev_id, ret);
+		goto err_stop;
+	}
+
 	if (vif->type == NL80211_IFTYPE_MONITOR) {
 		ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
 		if (ret) {
@@ -6236,6 +6350,7 @@
 err_stop:
 	ath10k_vdev_stop(arvif);
 	arvif->is_started = false;
+	ath10k_mac_vif_setup_ps(arvif);
 
 err:
 	mutex_unlock(&ar->conf_mutex);
@@ -6395,6 +6510,7 @@
 	.remain_on_channel		= ath10k_remain_on_channel,
 	.cancel_remain_on_channel	= ath10k_cancel_remain_on_channel,
 	.set_rts_threshold		= ath10k_set_rts_threshold,
+	.set_frag_threshold		= ath10k_mac_op_set_frag_threshold,
 	.flush				= ath10k_flush,
 	.tx_last_beacon			= ath10k_tx_last_beacon,
 	.set_antenna			= ath10k_set_antenna,
@@ -6565,8 +6681,11 @@
 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
 	{
 		.max = 2,
-		.types = BIT(NL80211_IFTYPE_STATION) |
-			 BIT(NL80211_IFTYPE_AP) |
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_AP) |
 			 BIT(NL80211_IFTYPE_P2P_CLIENT) |
 			 BIT(NL80211_IFTYPE_P2P_GO),
 	},
@@ -6576,6 +6695,26 @@
 	},
 };
 
+static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_AP) |
+			 BIT(NL80211_IFTYPE_P2P_GO),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+};
+
 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
 	{
 		.max = 1,
@@ -6594,7 +6733,7 @@
 	{
 		.limits = ath10k_tlv_if_limit,
 		.num_different_channels = 1,
-		.max_interfaces = 3,
+		.max_interfaces = 4,
 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
 	},
 	{
@@ -6608,11 +6747,17 @@
 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
 	{
 		.limits = ath10k_tlv_if_limit,
-		.num_different_channels = 2,
-		.max_interfaces = 3,
+		.num_different_channels = 1,
+		.max_interfaces = 4,
 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
 	},
 	{
+		.limits = ath10k_tlv_qcs_if_limit,
+		.num_different_channels = 2,
+		.max_interfaces = 4,
+		.n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
+	},
+	{
 		.limits = ath10k_tlv_if_limit_ibss,
 		.num_different_channels = 1,
 		.max_interfaces = 2,
@@ -6620,6 +6765,33 @@
 	},
 };
 
+static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max	= 16,
+		.types	= BIT(NL80211_IFTYPE_AP)
+	},
+};
+
+static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
+	{
+		.limits = ath10k_10_4_if_limits,
+		.n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
+		.max_interfaces = 16,
+		.num_different_channels = 1,
+		.beacon_int_infra_match = true,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+					BIT(NL80211_CHAN_WIDTH_20) |
+					BIT(NL80211_CHAN_WIDTH_40) |
+					BIT(NL80211_CHAN_WIDTH_80),
+#endif
+	},
+};
+
 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
 {
 	struct ieee80211_sta_vht_cap vht_cap = {0};
@@ -6844,7 +7016,6 @@
 	ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
 	ieee80211_hw_set(ar->hw, AP_LINK_PS);
 	ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
-	ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
 	ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
 	ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
 	ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
@@ -6852,6 +7023,9 @@
 	ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
 	ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
 
+	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+		ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+
 	ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
 	ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 
@@ -6902,6 +7076,8 @@
 		goto err_free;
 	}
 
+	wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
 	/*
 	 * on LL hardware queues are managed entirely by the FW
 	 * so we only advertise to mac we can do the queues thing
@@ -6941,6 +7117,11 @@
 		ar->hw->wiphy->n_iface_combinations =
 			ARRAY_SIZE(ath10k_10x_if_comb);
 		break;
+	case ATH10K_FW_WMI_OP_VERSION_10_4:
+		ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
+		ar->hw->wiphy->n_iface_combinations =
+			ARRAY_SIZE(ath10k_10_4_if_comb);
+		break;
 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
 	case ATH10K_FW_WMI_OP_VERSION_MAX:
 		WARN_ON(1);
@@ -6948,7 +7129,8 @@
 		goto err_free;
 	}
 
-	ar->hw->netdev_features = NETIF_F_HW_CSUM;
+	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+		ar->hw->netdev_features = NETIF_F_HW_CSUM;
 
 	if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
 		/* Init ath dfs pattern detector */
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index b291f06..e3cefe4 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -61,9 +61,9 @@
 
 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
-void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
-				enum wmi_tlv_tx_pause_id pause_id,
-				enum wmi_tlv_tx_pause_action action);
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+				     enum wmi_tlv_tx_pause_id pause_id,
+				     enum wmi_tlv_tx_pause_action action);
 
 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
 			     u8 hw_rate);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index ea656e0..1046ab6 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -58,11 +58,15 @@
 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
 
 #define QCA988X_2_0_DEVICE_ID	(0x003c)
+#define QCA6164_2_1_DEVICE_ID	(0x0041)
 #define QCA6174_2_1_DEVICE_ID	(0x003e)
+#define QCA99X0_2_0_DEVICE_ID	(0x0040)
 
 static const struct pci_device_id ath10k_pci_id_table[] = {
 	{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
+	{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
 	{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
+	{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
 	{0}
 };
 
@@ -72,16 +76,25 @@
 	 * because of that.
 	 */
 	{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
+
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
+	{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+
 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
 	{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+
+	{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
 };
 
 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
 static int ath10k_pci_cold_reset(struct ath10k *ar);
-static int ath10k_pci_warm_reset(struct ath10k *ar);
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
 static int ath10k_pci_init_irq(struct ath10k *ar);
 static int ath10k_pci_deinit_irq(struct ath10k *ar);
@@ -90,6 +103,7 @@
 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
 			       struct ath10k_ce_pipe *rx_pipe,
 			       struct bmi_xfer *xfer);
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
 
 static const struct ce_attr host_ce_config_wlan[] = {
 	/* CE0: host->target HTC control and raw streams */
@@ -155,6 +169,38 @@
 		.src_sz_max = DIAG_TRANSFER_LIMIT,
 		.dest_nentries = 2,
 	},
+
+	/* CE8: target->host pktlog */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 2048,
+		.dest_nentries = 128,
+	},
+
+	/* CE9 target autonomous qcache memcpy */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE10: target autonomous hif memcpy */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
+
+	/* CE11: target autonomous hif memcpy */
+	{
+		.flags = CE_ATTR_FLAGS,
+		.src_nentries = 0,
+		.src_sz_max = 0,
+		.dest_nentries = 0,
+	},
 };
 
 /* Target firmware's Copy Engine configuration. */
@@ -232,6 +278,38 @@
 	},
 
 	/* CE7 used only by Host */
+	{
+		.pipenum = __cpu_to_le32(7),
+		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+		.nentries = __cpu_to_le32(0),
+		.nbytes_max = __cpu_to_le32(0),
+		.flags = __cpu_to_le32(0),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE8 target->host packtlog */
+	{
+		.pipenum = __cpu_to_le32(8),
+		.pipedir = __cpu_to_le32(PIPEDIR_IN),
+		.nentries = __cpu_to_le32(64),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* CE9 target autonomous qcache memcpy */
+	{
+		.pipenum = __cpu_to_le32(9),
+		.pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+		.nentries = __cpu_to_le32(32),
+		.nbytes_max = __cpu_to_le32(2048),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.reserved = __cpu_to_le32(0),
+	},
+
+	/* It not necessary to send target wlan configuration for CE10 & CE11
+	 * as these CEs are not actively used in target.
+	 */
 };
 
 /*
@@ -479,6 +557,12 @@
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 	int ret;
 
+	if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
+		ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+			    offset, offset + sizeof(value), ar_pci->mem_len);
+		return;
+	}
+
 	ret = ath10k_pci_wake(ar);
 	if (ret) {
 		ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
@@ -496,6 +580,12 @@
 	u32 val;
 	int ret;
 
+	if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
+		ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+			    offset, offset + sizeof(val), ar_pci->mem_len);
+		return 0;
+	}
+
 	ret = ath10k_pci_wake(ar);
 	if (ret) {
 		ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
@@ -678,6 +768,26 @@
 	ath10k_pci_rx_post(ar);
 }
 
+static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+	u32 val = 0;
+
+	switch (ar->hw_rev) {
+	case ATH10K_HW_QCA988X:
+	case ATH10K_HW_QCA6174:
+		val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+					  CORE_CTRL_ADDRESS) &
+		       0x7ff) << 21;
+		break;
+	case ATH10K_HW_QCA99X0:
+		val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+		break;
+	}
+
+	val |= 0x100000 | (addr & 0xfffff);
+	return val;
+}
+
 /*
  * Diagnostic read/write access is provided for startup/config/debug usage.
  * Caller must guarantee proper alignment, when applicable, and single user
@@ -740,8 +850,7 @@
 		 * convert it from Target CPU virtual address space
 		 * to CE address space
 		 */
-		address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
-						     address);
+		address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 
 		ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
 					    0);
@@ -899,7 +1008,7 @@
 	 * to
 	 *    CE address space
 	 */
-	address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
+	address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 
 	remaining_bytes = orig_nbytes;
 	ce_data = ce_data_base;
@@ -1331,20 +1440,42 @@
 {
 	u32 val;
 
-	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
-	val &= ~CORE_CTRL_PCIE_REG_31_MASK;
-
-	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+	switch (ar->hw_rev) {
+	case ATH10K_HW_QCA988X:
+	case ATH10K_HW_QCA6174:
+		val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+					CORE_CTRL_ADDRESS);
+		val &= ~CORE_CTRL_PCIE_REG_31_MASK;
+		ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+				   CORE_CTRL_ADDRESS, val);
+		break;
+	case ATH10K_HW_QCA99X0:
+		/* TODO: Find appropriate register configuration for QCA99X0
+		 *  to mask irq/MSI.
+		 */
+		 break;
+	}
 }
 
 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
 {
 	u32 val;
 
-	val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
-	val |= CORE_CTRL_PCIE_REG_31_MASK;
-
-	ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+	switch (ar->hw_rev) {
+	case ATH10K_HW_QCA988X:
+	case ATH10K_HW_QCA6174:
+		val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+					CORE_CTRL_ADDRESS);
+		val |= CORE_CTRL_PCIE_REG_31_MASK;
+		ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+				   CORE_CTRL_ADDRESS, val);
+		break;
+	case ATH10K_HW_QCA99X0:
+		/* TODO: Find appropriate register configuration for QCA99X0
+		 *  to unmask irq/MSI.
+		 */
+		break;
+	}
 }
 
 static void ath10k_pci_irq_disable(struct ath10k *ar)
@@ -1506,7 +1637,7 @@
 	 * masked. To prevent the device from asserting the interrupt reset it
 	 * before proceeding with cleanup.
 	 */
-	ath10k_pci_warm_reset(ar);
+	ath10k_pci_safe_chip_reset(ar);
 
 	ath10k_pci_irq_disable(ar);
 	ath10k_pci_irq_sync(ar);
@@ -1546,8 +1677,10 @@
 
 	req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
 	ret = dma_mapping_error(ar->dev, req_paddr);
-	if (ret)
+	if (ret) {
+		ret = -EIO;
 		goto err_dma;
+	}
 
 	if (resp && resp_len) {
 		tresp = kzalloc(*resp_len, GFP_KERNEL);
@@ -1559,8 +1692,10 @@
 		resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
 					    DMA_FROM_DEVICE);
 		ret = dma_mapping_error(ar->dev, resp_paddr);
-		if (ret)
+		if (ret) {
+			ret = EIO;
 			goto err_req;
+		}
 
 		xfer.wait_for_resp = true;
 		xfer.resp_len = 0;
@@ -1687,7 +1822,9 @@
 
 	switch (ar_pci->pdev->device) {
 	case QCA988X_2_0_DEVICE_ID:
+	case QCA99X0_2_0_DEVICE_ID:
 		return 1;
+	case QCA6164_2_1_DEVICE_ID:
 	case QCA6174_2_1_DEVICE_ID:
 		switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
 		case QCA6174_HW_1_0_CHIP_ID_REV:
@@ -1757,7 +1894,8 @@
 
 	ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
 					target_ce_config_wlan,
-					sizeof(target_ce_config_wlan));
+					sizeof(struct ce_pipe_config) *
+					NUM_TARGET_CE_CONFIG_WLAN);
 
 	if (ret != 0) {
 		ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
@@ -1871,7 +2009,7 @@
 		}
 
 		/* Last CE is Diagnostic Window */
-		if (i == CE_COUNT - 1) {
+		if (i == CE_DIAG_PIPE) {
 			ar_pci->ce_diag = pipe->ce_hdl;
 			continue;
 		}
@@ -2016,6 +2154,18 @@
 	return 0;
 }
 
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
+{
+	if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
+		return ath10k_pci_warm_reset(ar);
+	} else if (QCA_REV_99X0(ar)) {
+		ath10k_pci_irq_disable(ar);
+		return ath10k_pci_qca99x0_chip_reset(ar);
+	} else {
+		return -ENOTSUPP;
+	}
+}
+
 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
 {
 	int i, ret;
@@ -2122,12 +2272,38 @@
 	return 0;
 }
 
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
+{
+	int ret;
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
+
+	ret = ath10k_pci_cold_reset(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+		return ret;
+	}
+
+	ret = ath10k_pci_wait_for_target_init(ar);
+	if (ret) {
+		ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+			    ret);
+		return ret;
+	}
+
+	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
+
+	return 0;
+}
+
 static int ath10k_pci_chip_reset(struct ath10k *ar)
 {
 	if (QCA_REV_988X(ar))
 		return ath10k_pci_qca988x_chip_reset(ar);
 	else if (QCA_REV_6174(ar))
 		return ath10k_pci_qca6174_chip_reset(ar);
+	else if (QCA_REV_99X0(ar))
+		return ath10k_pci_qca99x0_chip_reset(ar);
 	else
 		return -ENOTSUPP;
 }
@@ -2602,7 +2778,6 @@
 
 static int ath10k_pci_cold_reset(struct ath10k *ar)
 {
-	int i;
 	u32 val;
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
@@ -2618,23 +2793,18 @@
 	val |= 1;
 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
-	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-		if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
-					  RTC_STATE_COLD_RESET_MASK)
-			break;
-		msleep(1);
-	}
+	/* After writing into SOC_GLOBAL_RESET to put device into
+	 * reset and pulling out of reset pcie may not be stable
+	 * for any immediate pcie register access and cause bus error,
+	 * add delay before any pcie access request to fix this issue.
+	 */
+	msleep(20);
 
 	/* Pull Target, including PCIe, out of RESET. */
 	val &= ~1;
 	ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
-	for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-		if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
-					    RTC_STATE_COLD_RESET_MASK))
-			break;
-		msleep(1);
-	}
+	msleep(20);
 
 	ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
 
@@ -2679,6 +2849,7 @@
 	pci_set_master(pdev);
 
 	/* Arrange for access to Target SoC registers. */
+	ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
 	ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
 	if (!ar_pci->mem) {
 		ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
@@ -2742,9 +2913,13 @@
 	case QCA988X_2_0_DEVICE_ID:
 		hw_rev = ATH10K_HW_QCA988X;
 		break;
+	case QCA6164_2_1_DEVICE_ID:
 	case QCA6174_2_1_DEVICE_ID:
 		hw_rev = ATH10K_HW_QCA6174;
 		break;
+	case QCA99X0_2_0_DEVICE_ID:
+		hw_rev = ATH10K_HW_QCA99X0;
+		break;
 	default:
 		WARN_ON(1);
 		return -ENOTSUPP;
@@ -2763,6 +2938,7 @@
 	ar_pci->pdev = pdev;
 	ar_pci->dev = &pdev->dev;
 	ar_pci->ar = ar;
+	ar->dev_id = pci_dev->device;
 
 	if (pdev->subsystem_vendor || pdev->subsystem_device)
 		scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id),
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index d7696dd..8d364fb 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -162,6 +162,7 @@
 	struct device *dev;
 	struct ath10k *ar;
 	void __iomem *mem;
+	size_t mem_len;
 
 	/*
 	 * Number of MSI interrupts granted, 0 --> using legacy PCI line
@@ -236,18 +237,6 @@
 #define CDC_WAR_MAGIC_STR   0xceef0000
 #define CDC_WAR_DATA_CE     4
 
-/*
- * TODO: Should be a function call specific to each Target-type.
- * This convoluted macro converts from Target CPU Virtual Address Space to CE
- * Address Space. As part of this process, we conservatively fetch the current
- * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
- * for this device; but that's not guaranteed.
- */
-#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr)			\
-	(((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS |		\
-	  CORE_CTRL_ADDRESS)) & 0x7ff) << 21) |				\
-	 0x100000 | ((addr) & 0xfffff))
-
 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
 #define DIAG_ACCESS_CE_TIMEOUT_MS 10
 
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 492b5a5..ca8d168 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -422,6 +422,12 @@
 #define RX_MSDU_START_INFO1_IP_FRAG             (1 << 14)
 #define RX_MSDU_START_INFO1_TCP_ONLY_ACK        (1 << 15)
 
+#define RX_MSDU_START_INFO2_DA_IDX_MASK         0x000007ff
+#define RX_MSDU_START_INFO2_DA_IDX_LSB          0
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB  16
+#define RX_MSDU_START_INFO2_DA_BCAST_MCAST      BIT(11)
+
 /* The decapped header (rx_hdr_status) contains the following:
  *  a) 802.11 header
  *  [padding to 4 bytes]
@@ -449,12 +455,23 @@
 	RX_MSDU_DECAP_8023_SNAP_LLC = 3
 };
 
-struct rx_msdu_start {
+struct rx_msdu_start_common {
 	__le32 info0; /* %RX_MSDU_START_INFO0_ */
 	__le32 flow_id_crc;
 	__le32 info1; /* %RX_MSDU_START_INFO1_ */
 } __packed;
 
+struct rx_msdu_start_qca99x0 {
+	__le32 info2; /* %RX_MSDU_START_INFO2_ */
+} __packed;
+
+struct rx_msdu_start {
+	struct rx_msdu_start_common common;
+	union {
+		struct rx_msdu_start_qca99x0 qca99x0;
+	} __packed;
+} __packed;
+
 /*
  * msdu_length
  *		MSDU length in bytes after decapsulation.  This field is
@@ -540,7 +557,7 @@
 #define RX_MSDU_END_INFO0_PRE_DELIM_ERR             (1 << 30)
 #define RX_MSDU_END_INFO0_RESERVED_3B               (1 << 31)
 
-struct rx_msdu_end {
+struct rx_msdu_end_common {
 	__le16 ip_hdr_cksum;
 	__le16 tcp_hdr_cksum;
 	u8 key_id_octet;
@@ -549,6 +566,36 @@
 	__le32 info0;
 } __packed;
 
+#define RX_MSDU_END_INFO1_TCP_FLAG_MASK     0x000001ff
+#define RX_MSDU_END_INFO1_TCP_FLAG_LSB      0
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK   0x00001c00
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB    10
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK  0xffff0000
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB   16
+#define RX_MSDU_END_INFO1_IRO_ELIGIBLE      BIT(9)
+
+#define RX_MSDU_END_INFO2_DA_OFFSET_MASK    0x0000003f
+#define RX_MSDU_END_INFO2_DA_OFFSET_LSB     0
+#define RX_MSDU_END_INFO2_SA_OFFSET_MASK    0x00000fc0
+#define RX_MSDU_END_INFO2_SA_OFFSET_LSB     6
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK  0x0003f000
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB   12
+
+struct rx_msdu_end_qca99x0 {
+	__le32 ipv6_crc;
+	__le32 tcp_seq_no;
+	__le32 tcp_ack_no;
+	__le32 info1;
+	__le32 info2;
+} __packed;
+
+struct rx_msdu_end {
+	struct rx_msdu_end_common common;
+	union {
+		struct rx_msdu_end_qca99x0 qca99x0;
+	} __packed;
+} __packed;
+
 /*
  *ip_hdr_chksum
  *		This can include the IP header checksum or the pseudo header
@@ -870,7 +917,11 @@
 #define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK (1 << 24)
 #define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL (1 << 25)
 
-#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
+#define RX_PPDU_END_INFO1_PEER_IDX_MASK       0x1ffc
+#define RX_PPDU_END_INFO1_PEER_IDX_LSB        2
+#define RX_PPDU_END_INFO1_BB_DATA             BIT(0)
+#define RX_PPDU_END_INFO1_PEER_IDX_VALID      BIT(1)
+#define RX_PPDU_END_INFO1_PPDU_DONE           BIT(15)
 
 struct rx_ppdu_end_common {
 	__le32 evm_p0;
@@ -891,13 +942,13 @@
 	__le32 evm_p15;
 	__le32 tsf_timestamp;
 	__le32 wb_timestamp;
+} __packed;
+
+struct rx_ppdu_end_qca988x {
 	u8 locationing_timestamp;
 	u8 phy_err_code;
 	__le16 flags; /* %RX_PPDU_END_FLAGS_ */
 	__le32 info0; /* %RX_PPDU_END_INFO0_ */
-} __packed;
-
-struct rx_ppdu_end_qca988x {
 	__le16 bb_length;
 	__le16 info1; /* %RX_PPDU_END_INFO1_ */
 } __packed;
@@ -909,16 +960,126 @@
 #define RX_PPDU_END_RTT_NORMAL_MODE            BIT(31)
 
 struct rx_ppdu_end_qca6174 {
+	u8 locationing_timestamp;
+	u8 phy_err_code;
+	__le16 flags; /* %RX_PPDU_END_FLAGS_ */
+	__le32 info0; /* %RX_PPDU_END_INFO0_ */
 	__le32 rtt; /* %RX_PPDU_END_RTT_ */
 	__le16 bb_length;
 	__le16 info1; /* %RX_PPDU_END_INFO1_ */
 } __packed;
 
+#define RX_PKT_END_INFO0_RX_SUCCESS              BIT(0)
+#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX     BIT(3)
+#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP     BIT(4)
+#define RX_PKT_END_INFO0_ERR_OFDM_RESTART        BIT(5)
+#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP      BIT(6)
+#define RX_PKT_END_INFO0_ERR_CCK_RESTART         BIT(7)
+
+#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK       0x0001ffff
+#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB        0
+#define RX_LOCATION_INFO_FAC_STATUS_MASK         0x000c0000
+#define RX_LOCATION_INFO_FAC_STATUS_LSB          18
+#define RX_LOCATION_INFO_PKT_BW_MASK             0x00700000
+#define RX_LOCATION_INFO_PKT_BW_LSB              20
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB  23
+#define RX_LOCATION_INFO_CIR_STATUS              BIT(17)
+#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE       BIT(25)
+#define RX_LOCATION_INFO_RTT_TX_DATA_START_X     BIT(26)
+#define RX_LOCATION_INFO_HW_IFFT_MODE            BIT(30)
+#define RX_LOCATION_INFO_RX_LOCATION_VALID       BIT(31)
+
+struct rx_pkt_end {
+	__le32 info0; /* %RX_PKT_END_INFO0_ */
+	__le32 phy_timestamp_1;
+	__le32 phy_timestamp_2;
+	__le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+} __packed;
+
+enum rx_phy_ppdu_end_info0 {
+	RX_PHY_PPDU_END_INFO0_ERR_RADAR           = BIT(2),
+	RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT        = BIT(3),
+	RX_PHY_PPDU_END_INFO0_ERR_RX_NAP          = BIT(4),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING     = BIT(5),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY     = BIT(6),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE       = BIT(7),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH     = BIT(8),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART    = BIT(9),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE    = BIT(10),
+	RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER     = BIT(12),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING      = BIT(13),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC  = BIT(14),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE        = BIT(15),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH      = BIT(16),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART     = BIT(17),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE     = BIT(18),
+	RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP  = BIT(19),
+	RX_PHY_PPDU_END_INFO0_ERR_HT_CRC          = BIT(20),
+	RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH       = BIT(21),
+	RX_PHY_PPDU_END_INFO0_ERR_HT_RATE         = BIT(22),
+	RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF          = BIT(23),
+	RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24),
+	RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD     = BIT(25),
+	RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN   = BIT(26),
+	RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW       = BIT(27),
+	RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28),
+	RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC         = BIT(29),
+	RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA        = BIT(30),
+	RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG        = BIT(31),
+};
+
+enum rx_phy_ppdu_end_info1 {
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP            = BIT(0),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM           = BIT(1),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM     = BIT(2),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0    = BIT(3),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4),
+	RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63   = BIT(5),
+	RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER  = BIT(6),
+	RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP          = BIT(7),
+	RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT    = BIT(8),
+	RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK     = BIT(9),
+	RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION       = BIT(10),
+	RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK        = BIT(11),
+	RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX    = BIT(12),
+	RX_PHY_PPDU_END_INFO1_ERR_RX_CBF             = BIT(13),
+};
+
+struct rx_phy_ppdu_end {
+	__le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */
+	__le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PPDU_END_RX_TIMING_OFFSET_MASK          0x00000fff
+#define RX_PPDU_END_RX_TIMING_OFFSET_LSB           0
+
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK        0x00ffffff
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB         0
+#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK          BIT(24)
+#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID       BIT(25)
+#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID  BIT(26)
+#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27)
+#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL    BIT(28)
+#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC      BIT(29)
+#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE       BIT(30)
+
+struct rx_ppdu_end_qca99x0 {
+	struct rx_pkt_end rx_pkt_end;
+	struct rx_phy_ppdu_end rx_phy_ppdu_end;
+	__le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+	__le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+	__le16 bb_length;
+	__le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
 struct rx_ppdu_end {
 	struct rx_ppdu_end_common common;
 	union {
 		struct rx_ppdu_end_qca988x qca988x;
 		struct rx_ppdu_end_qca6174 qca6174;
+		struct rx_ppdu_end_qca99x0 qca99x0;
 	} __packed;
 } __packed;
 
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
index 8dcd424..4671cfb 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.c
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -57,7 +57,7 @@
 }
 
 int ath10k_spectral_process_fft(struct ath10k *ar,
-				const struct wmi_phyerr *phyerr,
+				struct wmi_phyerr_ev_arg *phyerr,
 				const struct phyerr_fft_report *fftr,
 				size_t bin_len, u64 tsf)
 {
@@ -73,6 +73,15 @@
 	if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
 		return -EINVAL;
 
+	/* qca99x0 reports bin size as 68 bytes (64 bytes + 4 bytes) in
+	 * report mode 2. First 64 bytes carries inband tones (-32 to +31)
+	 * and last 4 byte carries band edge detection data (+32) mainly
+	 * used in radar detection purpose. Strip last 4 byte to make bin
+	 * size is valid one.
+	 */
+	if (bin_len == 68)
+		bin_len -= 4;
+
 	reg0 = __le32_to_cpu(fftr->reg0);
 	reg1 = __le32_to_cpu(fftr->reg1);
 
@@ -118,15 +127,14 @@
 	fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
 	fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
 
-	freq1 = __le16_to_cpu(phyerr->freq1);
-	freq2 = __le16_to_cpu(phyerr->freq2);
+	freq1 = phyerr->freq1;
+	freq2 = phyerr->freq2;
 	fft_sample->freq1 = __cpu_to_be16(freq1);
 	fft_sample->freq2 = __cpu_to_be16(freq2);
 
 	chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
 
-	fft_sample->noise = __cpu_to_be16(
-			__le16_to_cpu(phyerr->nf_chains[chain_idx]));
+	fft_sample->noise = __cpu_to_be16(phyerr->nf_chains[chain_idx]);
 
 	bins = (u8 *)fftr;
 	bins += sizeof(*fftr);
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
index 042f5b3..89b0ad7 100644
--- a/drivers/net/wireless/ath/ath10k/spectral.h
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -47,7 +47,7 @@
 #ifdef CONFIG_ATH10K_DEBUGFS
 
 int ath10k_spectral_process_fft(struct ath10k *ar,
-				const struct wmi_phyerr *phyerr,
+				struct wmi_phyerr_ev_arg *phyerr,
 				const struct phyerr_fft_report *fftr,
 				size_t bin_len, u64 tsf);
 int ath10k_spectral_start(struct ath10k *ar);
@@ -59,7 +59,7 @@
 
 static inline int
 ath10k_spectral_process_fft(struct ath10k *ar,
-			    const struct wmi_phyerr *phyerr,
+			    struct wmi_phyerr_ev_arg *phyerr,
 			    const struct phyerr_fft_report *fftr,
 			    size_t bin_len, u64 tsf)
 {
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
new file mode 100644
index 0000000..3ca3fae
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file has implementation for code swap logic. With code swap feature,
+ * target can run the fw binary with even smaller IRAM size by using host
+ * memory to store some of the code segments.
+ */
+
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+
+static int ath10k_swap_code_seg_fill(struct ath10k *ar,
+				     struct ath10k_swap_code_seg_info *seg_info,
+				     const void *data, size_t data_len)
+{
+	u8 *virt_addr = seg_info->virt_address[0];
+	u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {};
+	const u8 *fw_data = data;
+	union ath10k_swap_code_seg_item *swap_item;
+	u32 length = 0;
+	u32 payload_len;
+	u32 total_payload_len = 0;
+	u32 size_left = data_len;
+
+	/* Parse swap bin and copy the content to host allocated memory.
+	 * The format is Address, length and value. The last 4-bytes is
+	 * target write address. Currently address field is not used.
+	 */
+	seg_info->target_addr = -1;
+	while (size_left >= sizeof(*swap_item)) {
+		swap_item = (union ath10k_swap_code_seg_item *)fw_data;
+		payload_len = __le32_to_cpu(swap_item->tlv.length);
+		if ((payload_len > size_left) ||
+		    (payload_len == 0 &&
+		     size_left != sizeof(struct ath10k_swap_code_seg_tail))) {
+			ath10k_err(ar, "refusing to parse invalid tlv length %d\n",
+				   payload_len);
+			return -EINVAL;
+		}
+
+		if (payload_len == 0) {
+			if (memcmp(swap_item->tail.magic_signature, swap_magic,
+				   ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) {
+				ath10k_err(ar, "refusing an invalid swap file\n");
+				return -EINVAL;
+			}
+			seg_info->target_addr =
+				__le32_to_cpu(swap_item->tail.bmi_write_addr);
+			break;
+		}
+
+		memcpy(virt_addr, swap_item->tlv.data, payload_len);
+		virt_addr += payload_len;
+		length = payload_len +  sizeof(struct ath10k_swap_code_seg_tlv);
+		size_left -= length;
+		fw_data += length;
+		total_payload_len += payload_len;
+	}
+
+	if (seg_info->target_addr == -1) {
+		ath10k_err(ar, "failed to parse invalid swap file\n");
+		return -EINVAL;
+	}
+	seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len);
+
+	return 0;
+}
+
+static void
+ath10k_swap_code_seg_free(struct ath10k *ar,
+			  struct ath10k_swap_code_seg_info *seg_info)
+{
+	u32 seg_size;
+
+	if (!seg_info)
+		return;
+
+	if (!seg_info->virt_address[0])
+		return;
+
+	seg_size = __le32_to_cpu(seg_info->seg_hw_info.size);
+	dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0],
+			  seg_info->paddr[0]);
+}
+
+static struct ath10k_swap_code_seg_info *
+ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
+{
+	struct ath10k_swap_code_seg_info *seg_info;
+	void *virt_addr;
+	dma_addr_t paddr;
+
+	swap_bin_len = roundup(swap_bin_len, 2);
+	if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) {
+		ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n",
+			   swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX);
+		return NULL;
+	}
+
+	seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL);
+	if (!seg_info)
+		return NULL;
+
+	virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr,
+				       GFP_KERNEL);
+	if (!virt_addr) {
+		ath10k_err(ar, "failed to allocate dma coherent memory\n");
+		return NULL;
+	}
+
+	seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr);
+	seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len);
+	seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len);
+	seg_info->seg_hw_info.num_segs =
+			__cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED);
+	seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len));
+	seg_info->virt_address[0] = virt_addr;
+	seg_info->paddr[0] = paddr;
+
+	return seg_info;
+}
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+				   enum ath10k_swap_code_seg_bin_type type)
+{
+	int ret;
+	struct ath10k_swap_code_seg_info *seg_info = NULL;
+
+	switch (type) {
+	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
+		if (!ar->swap.firmware_swap_code_seg_info)
+			return 0;
+
+		ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+		seg_info = ar->swap.firmware_swap_code_seg_info;
+		break;
+	default:
+	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
+	case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
+		ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
+			    type);
+		return 0;
+	}
+
+	ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
+				      &seg_info->seg_hw_info,
+				      sizeof(seg_info->seg_hw_info));
+	if (ret) {
+		ath10k_err(ar, "failed to write Code swap segment information (%d)\n",
+			   ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+void ath10k_swap_code_seg_release(struct ath10k *ar)
+{
+	ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
+	ar->swap.firmware_codeswap_data = NULL;
+	ar->swap.firmware_codeswap_len = 0;
+	ar->swap.firmware_swap_code_seg_info = NULL;
+}
+
+int ath10k_swap_code_seg_init(struct ath10k *ar)
+{
+	int ret;
+	struct ath10k_swap_code_seg_info *seg_info;
+
+	if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
+		return 0;
+
+	seg_info = ath10k_swap_code_seg_alloc(ar,
+					      ar->swap.firmware_codeswap_len);
+	if (!seg_info) {
+		ath10k_err(ar, "failed to allocate fw code swap segment\n");
+		return -ENOMEM;
+	}
+
+	ret = ath10k_swap_code_seg_fill(ar, seg_info,
+					ar->swap.firmware_codeswap_data,
+					ar->swap.firmware_codeswap_len);
+
+	if (ret) {
+		ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
+			    ret);
+		ath10k_swap_code_seg_free(ar, seg_info);
+		return ret;
+	}
+
+	ar->swap.firmware_swap_code_seg_info = seg_info;
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
new file mode 100644
index 0000000..5c89952
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SWAP_H_
+#define _SWAP_H_
+
+#define ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX	(512 * 1024)
+#define ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ	12
+#define ATH10K_SWAP_CODE_SEG_NUM_MAX		16
+/* Currently only one swap segment is supported */
+#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED	1
+
+struct ath10k_swap_code_seg_tlv {
+	__le32 address;
+	__le32 length;
+	u8 data[0];
+} __packed;
+
+struct ath10k_swap_code_seg_tail {
+	u8 magic_signature[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ];
+	__le32 bmi_write_addr;
+} __packed;
+
+union ath10k_swap_code_seg_item {
+	struct ath10k_swap_code_seg_tlv tlv;
+	struct ath10k_swap_code_seg_tail tail;
+} __packed;
+
+enum ath10k_swap_code_seg_bin_type {
+	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
+	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
+	 ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
+};
+
+struct ath10k_swap_code_seg_hw_info {
+	/* Swap binary image size */
+	__le32 swap_size;
+	__le32 num_segs;
+
+	/* Swap data size */
+	__le32 size;
+	__le32 size_log2;
+	__le32 bus_addr[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+	__le64 reserved[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+} __packed;
+
+struct ath10k_swap_code_seg_info {
+	struct ath10k_swap_code_seg_hw_info seg_hw_info;
+	void *virt_address[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+	u32 target_addr;
+	dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+};
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+				   enum ath10k_swap_code_seg_bin_type type);
+void ath10k_swap_code_seg_release(struct ath10k *ar);
+int ath10k_swap_code_seg_init(struct ath10k *ar);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index a417aae..768bef6 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -450,4 +450,7 @@
 #define QCA6174_BOARD_DATA_SZ     8192
 #define QCA6174_BOARD_EXT_DATA_SZ 0
 
+#define QCA99X0_BOARD_DATA_SZ	  12288
+#define QCA99X0_BOARD_EXT_DATA_SZ 0
+
 #endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 826500b..e4a9c4c 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -53,8 +53,6 @@
 	struct ath10k_skb_cb *skb_cb;
 	struct sk_buff *msdu;
 
-	lockdep_assert_held(&htt->tx_lock);
-
 	ath10k_dbg(ar, ATH10K_DBG_HTT,
 		   "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
 		   tx_done->msdu_id, !!tx_done->discard,
@@ -66,12 +64,19 @@
 		return;
 	}
 
+	spin_lock_bh(&htt->tx_lock);
 	msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
 	if (!msdu) {
 		ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
 			    tx_done->msdu_id);
+		spin_unlock_bh(&htt->tx_lock);
 		return;
 	}
+	ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
+	__ath10k_htt_tx_dec_pending(htt);
+	if (htt->num_pending_tx == 0)
+		wake_up(&htt->empty_tx_wq);
+	spin_unlock_bh(&htt->tx_lock);
 
 	skb_cb = ATH10K_SKB_CB(msdu);
 
@@ -90,7 +95,7 @@
 
 	if (tx_done->discard) {
 		ieee80211_free_txskb(htt->ar->hw, msdu);
-		goto exit;
+		return;
 	}
 
 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
@@ -104,12 +109,6 @@
 
 	ieee80211_tx_status(htt->ar->hw, msdu);
 	/* we do not own the msdu anymore */
-
-exit:
-	ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
-	__ath10k_htt_tx_dec_pending(htt);
-	if (htt->num_pending_tx == 0)
-		wake_up(&htt->empty_tx_wq);
 }
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
@@ -147,9 +146,9 @@
 static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
 				       const u8 *addr, bool expect_mapped)
 {
-	int ret;
+	long time_left;
 
-	ret = wait_event_timeout(ar->peer_mapping_wq, ({
+	time_left = wait_event_timeout(ar->peer_mapping_wq, ({
 			bool mapped;
 
 			spin_lock_bh(&ar->data_lock);
@@ -160,7 +159,7 @@
 			 test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
 		}), 3*HZ);
 
-	if (ret <= 0)
+	if (time_left == 0)
 		return -ETIMEDOUT;
 
 	return 0;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 47fe2e7..248ffc3 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -37,8 +37,10 @@
 			      struct wmi_peer_kick_ev_arg *arg);
 	int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
 			 struct wmi_swba_ev_arg *arg);
-	int (*pull_phyerr)(struct ath10k *ar, struct sk_buff *skb,
-			   struct wmi_phyerr_ev_arg *arg);
+	int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
+			       struct wmi_phyerr_hdr_arg *arg);
+	int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
+			   int left_len, struct wmi_phyerr_ev_arg *arg);
 	int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
 			    struct wmi_svc_rdy_ev_arg *arg);
 	int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
@@ -49,6 +51,7 @@
 			    struct wmi_roam_ev_arg *arg);
 	int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
 			      struct wmi_wow_ev_arg *arg);
+	enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
 
 	struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
 	struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
@@ -260,13 +263,23 @@
 }
 
 static inline int
-ath10k_wmi_pull_phyerr(struct ath10k *ar, struct sk_buff *skb,
-		       struct wmi_phyerr_ev_arg *arg)
+ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
+			   struct wmi_phyerr_hdr_arg *arg)
+{
+	if (!ar->wmi.ops->pull_phyerr_hdr)
+		return -EOPNOTSUPP;
+
+	return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
+		       int left_len, struct wmi_phyerr_ev_arg *arg)
 {
 	if (!ar->wmi.ops->pull_phyerr)
 		return -EOPNOTSUPP;
 
-	return ar->wmi.ops->pull_phyerr(ar, skb, arg);
+	return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
 }
 
 static inline int
@@ -319,6 +332,15 @@
 	return ar->wmi.ops->pull_wow_event(ar, skb, arg);
 }
 
+static inline enum wmi_txbf_conf
+ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
+{
+	if (!ar->wmi.ops->get_txbf_conf_scheme)
+		return WMI_TXBF_CONF_UNSUPPORTED;
+
+	return ar->wmi.ops->get_txbf_conf_scheme(ar);
+}
+
 static inline int
 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
 {
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 8fdba38..b5849b3 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -377,12 +377,34 @@
 		   "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
 		   pause_id, action, vdev_map, peer_id, tid_map);
 
-	for (vdev_id = 0; vdev_map; vdev_id++) {
-		if (!(vdev_map & BIT(vdev_id)))
-			continue;
+	switch (pause_id) {
+	case WMI_TLV_TX_PAUSE_ID_MCC:
+	case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+	case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+	case WMI_TLV_TX_PAUSE_ID_AP_PS:
+	case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+		for (vdev_id = 0; vdev_map; vdev_id++) {
+			if (!(vdev_map & BIT(vdev_id)))
+				continue;
 
-		vdev_map &= ~BIT(vdev_id);
-		ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
+			vdev_map &= ~BIT(vdev_id);
+			ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
+							action);
+		}
+		break;
+	case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+	case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+	case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+	case WMI_TLV_TX_PAUSE_ID_HOST:
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac ignoring unsupported tx pause id %d\n",
+			   pause_id);
+		break;
+	default:
+		ath10k_dbg(ar, ATH10K_DBG_MAC,
+			   "mac ignoring unknown tx pause vdev %d\n",
+			   pause_id);
+		break;
 	}
 
 	kfree(tb);
@@ -497,7 +519,7 @@
 		break;
 	case WMI_TLV_SERVICE_READY_EVENTID:
 		ath10k_wmi_event_service_ready(ar, skb);
-		break;
+		return;
 	case WMI_TLV_READY_EVENTID:
 		ath10k_wmi_event_ready(ar, skb);
 		break;
@@ -709,6 +731,8 @@
 					 const void *ptr, void *data)
 {
 	struct wmi_tlv_swba_parse *swba = data;
+	struct wmi_tim_info_arg *tim_info_arg;
+	const struct wmi_tim_info *tim_info_ev = ptr;
 
 	if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
 		return -EPROTO;
@@ -716,7 +740,21 @@
 	if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
 		return -ENOBUFS;
 
-	swba->arg->tim_info[swba->n_tim++] = ptr;
+	if (__le32_to_cpu(tim_info_ev->tim_len) >
+	     sizeof(tim_info_ev->tim_bitmap)) {
+		ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+		return -EPROTO;
+	}
+
+	tim_info_arg = &swba->arg->tim_info[swba->n_tim];
+	tim_info_arg->tim_len = tim_info_ev->tim_len;
+	tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
+	tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
+	tim_info_arg->tim_changed = tim_info_ev->tim_changed;
+	tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
+
+	swba->n_tim++;
+
 	return 0;
 }
 
@@ -800,9 +838,9 @@
 	return 0;
 }
 
-static int ath10k_wmi_tlv_op_pull_phyerr_ev(struct ath10k *ar,
-					    struct sk_buff *skb,
-					    struct wmi_phyerr_ev_arg *arg)
+static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+						struct sk_buff *skb,
+						struct wmi_phyerr_hdr_arg *arg)
 {
 	const void **tb;
 	const struct wmi_tlv_phyerr_ev *ev;
@@ -824,10 +862,10 @@
 		return -EPROTO;
 	}
 
-	arg->num_phyerrs  = ev->num_phyerrs;
-	arg->tsf_l32 = ev->tsf_l32;
-	arg->tsf_u32 = ev->tsf_u32;
-	arg->buf_len = ev->buf_len;
+	arg->num_phyerrs  = __le32_to_cpu(ev->num_phyerrs);
+	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+	arg->buf_len = __le32_to_cpu(ev->buf_len);
 	arg->phyerrs = phyerrs;
 
 	kfree(tb);
@@ -1241,6 +1279,11 @@
 	return skb;
 }
 
+static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
+{
+	return WMI_TXBF_CONF_AFTER_ASSOC;
+}
+
 static struct sk_buff *
 ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
 				     u32 param_value)
@@ -1335,7 +1378,7 @@
 	cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
 	cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
 	cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
-	cfg->rx_decap_mode = __cpu_to_le32(1);
+	cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
 	cfg->scan_max_pending_reqs = __cpu_to_le32(4);
 	cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
 	cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
@@ -3151,6 +3194,38 @@
 	.tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
 	.tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
 	.adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
@@ -3204,6 +3279,48 @@
 	.burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
 	.burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
 	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
@@ -3262,6 +3379,22 @@
 	.tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
 					WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static const struct wmi_ops wmi_tlv_ops = {
@@ -3274,12 +3407,14 @@
 	.pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
 	.pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
 	.pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
-	.pull_phyerr = ath10k_wmi_tlv_op_pull_phyerr_ev,
+	.pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
+	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
 	.pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
 	.pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
 	.pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
 	.pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+	.get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
 
 	.gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
 	.gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 6c046c2..ce01107 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -148,6 +148,48 @@
 	.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
 	.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
 };
 
 /* 10.X WMI cmd track */
@@ -271,6 +313,48 @@
 	.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
 	.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
 };
 
 /* 10.2.4 WMI cmd track */
@@ -393,6 +477,231 @@
 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
 	.pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+	.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+	.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_4_cmd_map = {
+	.init_cmdid = WMI_10_4_INIT_CMDID,
+	.start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
+	.stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
+	.scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
+	.scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+	.pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+	.pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+	.pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
+	.pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+	.pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+	.pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+	.pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+	.pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+	.pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+	.pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+	.pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	.pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+	.pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+	.vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
+	.vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
+	.vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
+	.vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+	.vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
+	.vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
+	.vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
+	.vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
+	.vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+	.peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
+	.peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
+	.peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+	.peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
+	.peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
+	.peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+	.peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+	.peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
+	.bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
+	.pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
+	.bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
+	.bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
+	.prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+	.mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
+	.prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
+	.addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+	.addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
+	.addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
+	.delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
+	.addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
+	.send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+	.sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+	.sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+	.sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+	.pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+	.pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+	.roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
+	.roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+	.roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
+	.roam_scan_rssi_change_threshold =
+				WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	.roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
+	.ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+	.ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+	.ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
+	.p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+	.p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+	.p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
+	.p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+	.p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+	.ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+	.ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+	.peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+	.wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+	.wlan_profile_set_hist_intvl_cmdid =
+				WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	.wlan_profile_get_profile_data_cmdid =
+				WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	.wlan_profile_enable_profile_id_cmdid =
+				WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	.wlan_profile_list_profile_id_cmdid =
+				WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	.pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
+	.pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
+	.add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
+	.rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
+	.wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+	.wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+	.wow_enable_disable_wake_event_cmdid =
+				WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	.wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
+	.wow_hostwakeup_from_sleep_cmdid =
+				WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	.rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
+	.rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
+	.vdev_spectral_scan_configure_cmdid =
+				WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	.vdev_spectral_scan_enable_cmdid =
+				WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	.request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
+	.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+	.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
+	.csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+	.csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+	.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+	.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+	.echo_cmdid = WMI_10_4_ECHO_CMDID,
+	.pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
+	.dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
+	.pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
+	.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+	.vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+	.force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
+	.gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
+	.gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
+	.pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+	.vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
+	.tdls_set_state_cmdid = WMI_CMD_UNSUPPORTED,
+	.tdls_peer_update_cmdid = WMI_CMD_UNSUPPORTED,
+	.adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
+	.scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+	.vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+	.vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+	.wlan_peer_caching_add_peer_cmdid =
+			WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+	.wlan_peer_caching_evict_peer_cmdid =
+			WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+	.wlan_peer_caching_restore_peer_cmdid =
+			WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+	.wlan_peer_caching_print_all_peers_info_cmdid =
+			WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+	.peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+	.peer_add_proxy_sta_entry_cmdid =
+			WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+	.rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
+	.oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
+	.nan_cmdid = WMI_10_4_NAN_CMDID,
+	.vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
+	.qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
+	.pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+	.pdev_smart_ant_set_rx_antenna_cmdid =
+			WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+	.peer_smart_ant_set_tx_antenna_cmdid =
+			WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+	.peer_smart_ant_set_train_info_cmdid =
+			WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+	.peer_smart_ant_set_node_config_ops_cmdid =
+			WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+	.pdev_set_antenna_switch_table_cmdid =
+			WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+	.pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+	.pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+	.pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+	.pdev_ratepwr_chainmsk_table_cmdid =
+			WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+	.pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
+	.tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
+	.fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
+	.vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+	.peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
+	.pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+	.pdev_get_ani_ofdm_config_cmdid =
+			WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+	.pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+	.pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+	.pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
+	.pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+	.vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+	.pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
+	.vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
+	.vdev_filter_neighbor_rx_packets_cmdid =
+			WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+	.mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
+	.set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
+	.pdev_bss_chan_info_request_cmdid =
+			WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
 };
 
 /* MAIN WMI VDEV param map */
@@ -452,6 +761,22 @@
 	.tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
 					WMI_VDEV_PARAM_UNSUPPORTED,
+	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 /* 10.X WMI VDEV param map */
@@ -511,6 +836,22 @@
 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -569,6 +910,97 @@
 	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+	.cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+	.mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+	.early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+	.proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+	.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+	.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
+	.rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
+	.fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	.beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+	.listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+	.multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+	.mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+	.slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
+	.preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
+	.swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
+	.wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+	.wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+	.wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+	.dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+	.wmi_vdev_oc_scheduler_air_time_limit =
+	       WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	.wds = WMI_10_4_VDEV_PARAM_WDS,
+	.atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+	.bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+	.bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+	.bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+	.feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+	.chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
+	.chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+	.disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+	.sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+	.mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
+	.protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+	.fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
+	.sgi = WMI_10_4_VDEV_PARAM_SGI,
+	.ldpc = WMI_10_4_VDEV_PARAM_LDPC,
+	.tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
+	.rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
+	.intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+	.def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
+	.nss = WMI_10_4_VDEV_PARAM_NSS,
+	.bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+	.mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+	.mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+	.dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+	.unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+	.ap_keepalive_min_idle_inactive_time_secs =
+	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_idle_inactive_time_secs =
+	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+	.ap_keepalive_max_unresponsive_time_secs =
+	       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+	.ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+	.mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+	.enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+	.txbf = WMI_10_4_VDEV_PARAM_TXBF,
+	.packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+	.drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+	.tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+	.ap_detect_out_of_sync_sleeping_sta_time_secs =
+	       WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+	.rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+	.cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+	.mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+	.rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+	.vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+	.vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+	.early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+	.early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+	.early_rx_bmiss_sample_cycle =
+	       WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+	.early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+	.early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+	.early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+	.proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
+	.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
+	.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+	.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
 };
 
 static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -621,6 +1053,48 @@
 	.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
 	.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
 	.cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
@@ -674,6 +1148,48 @@
 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
@@ -727,6 +1243,48 @@
 	.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
 	.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
 	.cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+	.aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+	.peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+	.block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+	.en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+	.noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+	.set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+	.atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+	.ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+	.mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+	.sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+	.cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+	.rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+	.pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+	.arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 /* firmware 10.2 specific mappings */
@@ -849,6 +1407,139 @@
 	.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
 	.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
 	.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+	.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+	.wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+	.rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+	.oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+	.nan_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+	.qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+	.tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+	.fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+	.vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+	.pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
+	.tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
+	.rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+	.txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+	.txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+	.txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+	.beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+	.beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+	.resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	.protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+	.dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+	.non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	.agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+	.sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+	.ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	.ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+	.ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	.ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	.ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	.ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	.ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	.ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	.ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+	.ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	.l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+	.dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+	.pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+	.pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+	.pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	.pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+	.pdev_stats_update_period =
+			WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	.vdev_stats_update_period =
+			WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	.peer_stats_update_period =
+			WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	.bcnflt_stats_update_period =
+			WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	.pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
+	.arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+	.dcs = WMI_10_4_PDEV_PARAM_DCS,
+	.ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+	.ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+	.ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	.ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+	.ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+	.dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+	.proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
+	.idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+	.power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+	.fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+	.burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
+	.burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+	.cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+	.aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
+	.rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+	.smart_antenna_default_antenna =
+			WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+	.igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+	.igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+	.antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+	.rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
+	.set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+	.proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+	.set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+	.set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+	.remove_mcast2ucast_buffer =
+			WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+	.peer_sta_ps_statechg_enable =
+			WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+	.igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+	.block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+	.set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+	.set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+	.set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+	.txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+	.set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+	.set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+	.en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
+	.mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+	.noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+	.noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+	.dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+	.set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+	.atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+	.atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+	.ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
+	.mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+	.sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+	.signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+	.signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+	.enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+	.enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+	.cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+	.rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+	.pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
+	.wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+	.arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+	.arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
 };
 
 void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
@@ -1232,6 +1923,8 @@
 			return "completed [preempted]";
 		case WMI_SCAN_REASON_TIMEDOUT:
 			return "completed [timedout]";
+		case WMI_SCAN_REASON_INTERNAL_FAILURE:
+			return "completed [internal err]";
 		case WMI_SCAN_REASON_MAX:
 			break;
 		}
@@ -1246,6 +1939,10 @@
 		return "preempted";
 	case WMI_SCAN_EVENT_START_FAILED:
 		return "start failed";
+	case WMI_SCAN_EVENT_RESTARTED:
+		return "restarted";
+	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
+		return "foreign channel exit";
 	default:
 		return "unknown";
 	}
@@ -1321,6 +2018,8 @@
 		break;
 	case WMI_SCAN_EVENT_DEQUEUED:
 	case WMI_SCAN_EVENT_PREEMPTED:
+	case WMI_SCAN_EVENT_RESTARTED:
+	case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
 	default:
 		break;
 	}
@@ -1433,6 +2132,40 @@
 	return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
+					      struct sk_buff *skb,
+					      struct wmi_mgmt_rx_ev_arg *arg)
+{
+	struct wmi_10_4_mgmt_rx_event *ev;
+	struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
+	size_t pull_len;
+	u32 msdu_len;
+
+	ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
+	ev_hdr = &ev->hdr;
+	pull_len = sizeof(*ev);
+
+	if (skb->len < pull_len)
+		return -EPROTO;
+
+	skb_pull(skb, pull_len);
+	arg->channel = ev_hdr->channel;
+	arg->buf_len = ev_hdr->buf_len;
+	arg->status = ev_hdr->status;
+	arg->snr = ev_hdr->snr;
+	arg->phy_mode = ev_hdr->phy_mode;
+	arg->rate = ev_hdr->rate;
+
+	msdu_len = __le32_to_cpu(arg->buf_len);
+	if (skb->len < msdu_len)
+		return -EPROTO;
+
+	/* Make sure bytes added for padding are removed. */
+	skb_trim(skb, msdu_len);
+
+	return 0;
+}
+
 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct wmi_mgmt_rx_ev_arg arg = {};
@@ -1593,6 +2326,29 @@
 	return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
+					      struct sk_buff *skb,
+					      struct wmi_ch_info_ev_arg *arg)
+{
+	struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->err_code = ev->err_code;
+	arg->freq = ev->freq;
+	arg->cmd_flags = ev->cmd_flags;
+	arg->noise_floor = ev->noise_floor;
+	arg->rx_clear_count = ev->rx_clear_count;
+	arg->cycle_count = ev->cycle_count;
+	arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+	arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+	arg->rx_frame_count = ev->rx_frame_count;
+
+	return 0;
+}
+
 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct wmi_ch_info_ev_arg arg = {};
@@ -1656,8 +2412,10 @@
 		ar->ch_info_can_report_survey = true;
 	}
 
-	ar->survey_last_rx_clear_count = rx_clear_count;
-	ar->survey_last_cycle_count = cycle_count;
+	if (!(cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
+		ar->survey_last_rx_clear_count = rx_clear_count;
+		ar->survey_last_cycle_count = cycle_count;
+	}
 
 exit:
 	spin_unlock_bh(&ar->data_lock);
@@ -2149,33 +2907,42 @@
 static void ath10k_wmi_update_tim(struct ath10k *ar,
 				  struct ath10k_vif *arvif,
 				  struct sk_buff *bcn,
-				  const struct wmi_tim_info *tim_info)
+				  const struct wmi_tim_info_arg *tim_info)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
 	struct ieee80211_tim_ie *tim;
 	u8 *ies, *ie;
 	u8 ie_len, pvm_len;
 	__le32 t;
-	u32 v;
+	u32 v, tim_len;
+
+	/* When FW reports 0 in tim_len, ensure atleast first byte
+	 * in tim_bitmap is considered for pvm calculation.
+	 */
+	tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
 
 	/* if next SWBA has no tim_changed the tim_bitmap is garbage.
 	 * we must copy the bitmap upon change and reuse it later */
 	if (__le32_to_cpu(tim_info->tim_changed)) {
 		int i;
 
-		BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
-			     sizeof(tim_info->tim_bitmap));
+		if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
+			ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
+				    tim_len, sizeof(arvif->u.ap.tim_bitmap));
+			tim_len = sizeof(arvif->u.ap.tim_bitmap);
+		}
 
-		for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
+		for (i = 0; i < tim_len; i++) {
 			t = tim_info->tim_bitmap[i / 4];
 			v = __le32_to_cpu(t);
 			arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
 		}
 
-		/* FW reports either length 0 or 16
-		 * so we calculate this on our own */
+		/* FW reports either length 0 or length based on max supported
+		 * station. so we calculate this on our own
+		 */
 		arvif->u.ap.tim_len = 0;
-		for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
+		for (i = 0; i < tim_len; i++)
 			if (arvif->u.ap.tim_bitmap[i])
 				arvif->u.ap.tim_len = i;
 
@@ -2199,7 +2966,7 @@
 	pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
 
 	if (pvm_len < arvif->u.ap.tim_len) {
-		int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
+		int expand_size = tim_len - pvm_len;
 		int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
 		void *next_ie = ie + 2 + ie_len;
 
@@ -2214,7 +2981,7 @@
 		}
 	}
 
-	if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
+	if (pvm_len > tim_len) {
 		ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
 		return;
 	}
@@ -2278,7 +3045,21 @@
 		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
 			break;
 
-		arg->tim_info[i] = &ev->bcn_info[i].tim_info;
+		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+		     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+			return -EPROTO;
+		}
+
+		arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+		arg->tim_info[i].tim_bitmap =
+				ev->bcn_info[i].tim_info.tim_bitmap;
+		arg->tim_info[i].tim_changed =
+				ev->bcn_info[i].tim_info.tim_changed;
+		arg->tim_info[i].tim_num_ps_pending =
+				ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
 		arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
 		i++;
 	}
@@ -2286,12 +3067,74 @@
 	return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
+					   struct sk_buff *skb,
+					   struct wmi_swba_ev_arg *arg)
+{
+	struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
+	u32 map, tim_len;
+	size_t i;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	skb_pull(skb, sizeof(*ev));
+	arg->vdev_map = ev->vdev_map;
+
+	for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+		if (!(map & BIT(0)))
+			continue;
+
+		/* If this happens there were some changes in firmware and
+		 * ath10k should update the max size of tim_info array.
+		 */
+		if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+			break;
+
+		if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+		      sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+			ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+			return -EPROTO;
+		}
+
+		tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
+		if (tim_len) {
+			/* Exclude 4 byte guard length */
+			tim_len -= 4;
+			arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
+		} else {
+			arg->tim_info[i].tim_len = 0;
+		}
+
+		arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+		arg->tim_info[i].tim_bitmap =
+				ev->bcn_info[i].tim_info.tim_bitmap;
+		arg->tim_info[i].tim_changed =
+				ev->bcn_info[i].tim_info.tim_changed;
+		arg->tim_info[i].tim_num_ps_pending =
+				ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
+		/* 10.4 firmware doesn't have p2p support. notice of absence
+		 * info can be ignored for now.
+		 */
+
+		i++;
+	}
+
+	return 0;
+}
+
+static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
+{
+	return WMI_TXBF_CONF_BEFORE_ASSOC;
+}
+
 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
 {
 	struct wmi_swba_ev_arg arg = {};
 	u32 map;
 	int i = -1;
-	const struct wmi_tim_info *tim_info;
+	const struct wmi_tim_info_arg *tim_info;
 	const struct wmi_p2p_noa_info *noa_info;
 	struct ath10k_vif *arvif;
 	struct sk_buff *bcn;
@@ -2320,7 +3163,7 @@
 			break;
 		}
 
-		tim_info = arg.tim_info[i];
+		tim_info = &arg.tim_info[i];
 		noa_info = arg.noa_info[i];
 
 		ath10k_dbg(ar, ATH10K_DBG_MGMT,
@@ -2335,6 +3178,10 @@
 			   __le32_to_cpu(tim_info->tim_bitmap[1]),
 			   __le32_to_cpu(tim_info->tim_bitmap[0]));
 
+		/* TODO: Only first 4 word from tim_bitmap is dumped.
+		 * Extend debug code to dump full tim_bitmap.
+		 */
+
 		arvif = ath10k_get_arvif(ar, vdev_id);
 		if (arvif == NULL) {
 			ath10k_warn(ar, "no vif for vdev_id %d found\n",
@@ -2391,6 +3238,7 @@
 				ath10k_warn(ar, "failed to map beacon: %d\n",
 					    ret);
 				dev_kfree_skb_any(bcn);
+				ret = -EIO;
 				goto skip;
 			}
 
@@ -2424,7 +3272,7 @@
 }
 
 static void ath10k_dfs_radar_report(struct ath10k *ar,
-				    const struct wmi_phyerr *phyerr,
+				    struct wmi_phyerr_ev_arg *phyerr,
 				    const struct phyerr_radar_report *rr,
 				    u64 tsf)
 {
@@ -2468,7 +3316,7 @@
 	}
 
 	/* report event to DFS pattern detector */
-	tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
+	tsf32l = phyerr->tsf_timestamp;
 	tsf64 = tsf & (~0xFFFFFFFFULL);
 	tsf64 |= tsf32l;
 
@@ -2513,7 +3361,7 @@
 }
 
 static int ath10k_dfs_fft_report(struct ath10k *ar,
-				 const struct wmi_phyerr *phyerr,
+				 struct wmi_phyerr_ev_arg *phyerr,
 				 const struct phyerr_fft_report *fftr,
 				 u64 tsf)
 {
@@ -2551,7 +3399,7 @@
 }
 
 void ath10k_wmi_event_dfs(struct ath10k *ar,
-			  const struct wmi_phyerr *phyerr,
+			  struct wmi_phyerr_ev_arg *phyerr,
 			  u64 tsf)
 {
 	int buf_len, tlv_len, res, i = 0;
@@ -2560,11 +3408,11 @@
 	const struct phyerr_fft_report *fftr;
 	const u8 *tlv_buf;
 
-	buf_len = __le32_to_cpu(phyerr->buf_len);
+	buf_len = phyerr->buf_len;
 	ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
 		   "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
 		   phyerr->phy_err_code, phyerr->rssi_combined,
-		   __le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len);
+		   phyerr->tsf_timestamp, tsf, buf_len);
 
 	/* Skip event if DFS disabled */
 	if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
@@ -2616,7 +3464,7 @@
 }
 
 void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
-				    const struct wmi_phyerr *phyerr,
+				    struct wmi_phyerr_ev_arg *phyerr,
 				    u64 tsf)
 {
 	int buf_len, tlv_len, res, i = 0;
@@ -2625,7 +3473,7 @@
 	const struct phyerr_fft_report *fftr;
 	size_t fftr_len;
 
-	buf_len = __le32_to_cpu(phyerr->buf_len);
+	buf_len = phyerr->buf_len;
 
 	while (i < buf_len) {
 		if (i + sizeof(*tlv) > buf_len) {
@@ -2658,7 +3506,7 @@
 							  fftr, fftr_len,
 							  tsf);
 			if (res < 0) {
-				ath10k_warn(ar, "failed to process fft report: %d\n",
+				ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
 					    res);
 				return;
 			}
@@ -2669,65 +3517,169 @@
 	}
 }
 
-static int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, struct sk_buff *skb,
-					struct wmi_phyerr_ev_arg *arg)
+static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+					    struct sk_buff *skb,
+					    struct wmi_phyerr_hdr_arg *arg)
 {
 	struct wmi_phyerr_event *ev = (void *)skb->data;
 
 	if (skb->len < sizeof(*ev))
 		return -EPROTO;
 
-	arg->num_phyerrs = ev->num_phyerrs;
-	arg->tsf_l32 = ev->tsf_l32;
-	arg->tsf_u32 = ev->tsf_u32;
-	arg->buf_len = __cpu_to_le32(skb->len - sizeof(*ev));
+	arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
+	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+	arg->buf_len = skb->len - sizeof(*ev);
 	arg->phyerrs = ev->phyerrs;
 
 	return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+						 struct sk_buff *skb,
+						 struct wmi_phyerr_hdr_arg *arg)
+{
+	struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
+
+	if (skb->len < sizeof(*ev))
+		return -EPROTO;
+
+	/* 10.4 firmware always reports only one phyerr */
+	arg->num_phyerrs = 1;
+
+	arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+	arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+	arg->buf_len = skb->len;
+	arg->phyerrs = skb->data;
+
+	return 0;
+}
+
+int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar,
+				 const void *phyerr_buf,
+				 int left_len,
+				 struct wmi_phyerr_ev_arg *arg)
+{
+	const struct wmi_phyerr *phyerr = phyerr_buf;
+	int i;
+
+	if (left_len < sizeof(*phyerr)) {
+		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
+			    left_len, sizeof(*phyerr));
+		return -EINVAL;
+	}
+
+	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
+	arg->freq1 = __le16_to_cpu(phyerr->freq1);
+	arg->freq2 = __le16_to_cpu(phyerr->freq2);
+	arg->rssi_combined = phyerr->rssi_combined;
+	arg->chan_width_mhz = phyerr->chan_width_mhz;
+	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
+	arg->buf = phyerr->buf;
+	arg->hdr_len = sizeof(*phyerr);
+
+	for (i = 0; i < 4; i++)
+		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
+
+	switch (phyerr->phy_err_code) {
+	case PHY_ERROR_GEN_SPECTRAL_SCAN:
+		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
+		break;
+	case PHY_ERROR_GEN_FALSE_RADAR_EXT:
+		arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT;
+		break;
+	case PHY_ERROR_GEN_RADAR:
+		arg->phy_err_code = PHY_ERROR_RADAR;
+		break;
+	default:
+		arg->phy_err_code = PHY_ERROR_UNKNOWN;
+		break;
+	}
+
+	return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar,
+					     const void *phyerr_buf,
+					     int left_len,
+					     struct wmi_phyerr_ev_arg *arg)
+{
+	const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf;
+	u32 phy_err_mask;
+	int i;
+
+	if (left_len < sizeof(*phyerr)) {
+		ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
+			    left_len, sizeof(*phyerr));
+		return -EINVAL;
+	}
+
+	arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
+	arg->freq1 = __le16_to_cpu(phyerr->freq1);
+	arg->freq2 = __le16_to_cpu(phyerr->freq2);
+	arg->rssi_combined = phyerr->rssi_combined;
+	arg->chan_width_mhz = phyerr->chan_width_mhz;
+	arg->buf_len = __le32_to_cpu(phyerr->buf_len);
+	arg->buf = phyerr->buf;
+	arg->hdr_len = sizeof(*phyerr);
+
+	for (i = 0; i < 4; i++)
+		arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
+
+	phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]);
+
+	if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK)
+		arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
+	else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK)
+		arg->phy_err_code = PHY_ERROR_RADAR;
+	else
+		arg->phy_err_code = PHY_ERROR_UNKNOWN;
+
+	return 0;
+}
+
 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
 {
-	struct wmi_phyerr_ev_arg arg = {};
-	const struct wmi_phyerr *phyerr;
+	struct wmi_phyerr_hdr_arg hdr_arg = {};
+	struct wmi_phyerr_ev_arg phyerr_arg = {};
+	const void *phyerr;
 	u32 count, i, buf_len, phy_err_code;
 	u64 tsf;
 	int left_len, ret;
 
 	ATH10K_DFS_STAT_INC(ar, phy_errors);
 
-	ret = ath10k_wmi_pull_phyerr(ar, skb, &arg);
+	ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
 	if (ret) {
-		ath10k_warn(ar, "failed to parse phyerr event: %d\n", ret);
+		ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret);
 		return;
 	}
 
-	left_len = __le32_to_cpu(arg.buf_len);
-
 	/* Check number of included events */
-	count = __le32_to_cpu(arg.num_phyerrs);
+	count = hdr_arg.num_phyerrs;
 
-	tsf = __le32_to_cpu(arg.tsf_u32);
+	left_len = hdr_arg.buf_len;
+
+	tsf = hdr_arg.tsf_u32;
 	tsf <<= 32;
-	tsf |= __le32_to_cpu(arg.tsf_l32);
+	tsf |= hdr_arg.tsf_l32;
 
 	ath10k_dbg(ar, ATH10K_DBG_WMI,
 		   "wmi event phyerr count %d tsf64 0x%llX\n",
 		   count, tsf);
 
-	phyerr = arg.phyerrs;
+	phyerr = hdr_arg.phyerrs;
 	for (i = 0; i < count; i++) {
-		/* Check if we can read event header */
-		if (left_len < sizeof(*phyerr)) {
-			ath10k_warn(ar, "single event (%d) wrong head len\n",
+		ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg);
+		if (ret) {
+			ath10k_warn(ar, "failed to parse phyerr event (%d)\n",
 				    i);
 			return;
 		}
 
-		left_len -= sizeof(*phyerr);
-
-		buf_len = __le32_to_cpu(phyerr->buf_len);
-		phy_err_code = phyerr->phy_err_code;
+		left_len -= phyerr_arg.hdr_len;
+		buf_len = phyerr_arg.buf_len;
+		phy_err_code = phyerr_arg.phy_err_code;
 
 		if (left_len < buf_len) {
 			ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
@@ -2738,20 +3690,20 @@
 
 		switch (phy_err_code) {
 		case PHY_ERROR_RADAR:
-			ath10k_wmi_event_dfs(ar, phyerr, tsf);
+			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
 			break;
 		case PHY_ERROR_SPECTRAL_SCAN:
-			ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
+			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
 			break;
 		case PHY_ERROR_FALSE_RADAR_EXT:
-			ath10k_wmi_event_dfs(ar, phyerr, tsf);
-			ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
+			ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
+			ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
 			break;
 		default:
 			break;
 		}
 
-		phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len;
+		phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
 	}
 }
 
@@ -2949,7 +3901,7 @@
 	ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
 							   pool_size,
 							   &paddr,
-							   GFP_ATOMIC);
+							   GFP_KERNEL);
 	if (!ar->wmi.mem_chunks[idx].vaddr) {
 		ath10k_warn(ar, "failed to allocate memory chunk\n");
 		return -ENOMEM;
@@ -3038,12 +3990,19 @@
 	return 0;
 }
 
-void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
 {
+	struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
+	struct sk_buff *skb = ar->svc_rdy_skb;
 	struct wmi_svc_rdy_ev_arg arg = {};
 	u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
 	int ret;
 
+	if (!skb) {
+		ath10k_warn(ar, "invalid service ready event skb\n");
+		return;
+	}
+
 	ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
 	if (ret) {
 		ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
@@ -3075,10 +4034,10 @@
 	if (ar->fw_api == 1 && ar->fw_version_build > 636)
 		set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
 
-	if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+	if (ar->num_rf_chains > ar->max_spatial_stream) {
 		ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
-			    ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
-		ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+			    ar->num_rf_chains, ar->max_spatial_stream);
+		ar->num_rf_chains = ar->max_spatial_stream;
 	}
 
 	ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
@@ -3101,20 +4060,39 @@
 		return;
 	}
 
+	if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+		ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
+				    TARGET_10_4_NUM_VDEVS;
+		ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+				       TARGET_10_4_NUM_VDEVS;
+		ar->num_tids = ar->num_active_peers * 2;
+		ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
+	}
+
+	/* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
+	 * and WMI_SERVICE_IRAM_TIDS, etc.
+	 */
+
 	for (i = 0; i < num_mem_reqs; ++i) {
 		req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
 		num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
 		unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
 		num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
 
-		if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
+		if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+			if (ar->num_active_peers)
+				num_units = ar->num_active_peers + 1;
+			else
+				num_units = ar->max_num_peers + 1;
+		} else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
 			/* number of units to allocate is number of
 			 * peers, 1 extra for self peer on target */
 			/* this needs to be tied, host and target
 			 * can get out of sync */
-			num_units = TARGET_10X_NUM_PEERS + 1;
-		else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
-			num_units = TARGET_10X_NUM_VDEVS + 1;
+			num_units = ar->max_num_peers + 1;
+		} else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
+			num_units = ar->max_num_vdevs + 1;
+		}
 
 		ath10k_dbg(ar, ATH10K_DBG_WMI,
 			   "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
@@ -3144,9 +4122,17 @@
 		   __le32_to_cpu(arg.eeprom_rd),
 		   __le32_to_cpu(arg.num_mem_reqs));
 
+	dev_kfree_skb(skb);
+	ar->svc_rdy_skb = NULL;
 	complete(&ar->wmi.service_ready);
 }
 
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+	ar->svc_rdy_skb = skb;
+	queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
+}
+
 static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
 				     struct wmi_rdy_ev_arg *arg)
 {
@@ -3318,7 +4304,7 @@
 		break;
 	case WMI_SERVICE_READY_EVENTID:
 		ath10k_wmi_event_service_ready(ar, skb);
-		break;
+		return;
 	case WMI_READY_EVENTID:
 		ath10k_wmi_event_ready(ar, skb);
 		break;
@@ -3439,7 +4425,7 @@
 		break;
 	case WMI_10X_SERVICE_READY_EVENTID:
 		ath10k_wmi_event_service_ready(ar, skb);
-		break;
+		return;
 	case WMI_10X_READY_EVENTID:
 		ath10k_wmi_event_ready(ar, skb);
 		break;
@@ -3550,7 +4536,7 @@
 		break;
 	case WMI_10_2_SERVICE_READY_EVENTID:
 		ath10k_wmi_event_service_ready(ar, skb);
-		break;
+		return;
 	case WMI_10_2_READY_EVENTID:
 		ath10k_wmi_event_ready(ar, skb);
 		break;
@@ -3576,6 +4562,76 @@
 	dev_kfree_skb(skb);
 }
 
+static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+	struct wmi_cmd_hdr *cmd_hdr;
+	enum wmi_10_4_event_id id;
+
+	cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+	id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+	if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
+		goto out;
+
+	trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+	switch (id) {
+	case WMI_10_4_MGMT_RX_EVENTID:
+		ath10k_wmi_event_mgmt_rx(ar, skb);
+		/* mgmt_rx() owns the skb now! */
+		return;
+	case WMI_10_4_ECHO_EVENTID:
+		ath10k_wmi_event_echo(ar, skb);
+		break;
+	case WMI_10_4_DEBUG_MESG_EVENTID:
+		ath10k_wmi_event_debug_mesg(ar, skb);
+		break;
+	case WMI_10_4_SERVICE_READY_EVENTID:
+		ath10k_wmi_event_service_ready(ar, skb);
+		return;
+	case WMI_10_4_SCAN_EVENTID:
+		ath10k_wmi_event_scan(ar, skb);
+		break;
+	case WMI_10_4_CHAN_INFO_EVENTID:
+		ath10k_wmi_event_chan_info(ar, skb);
+		break;
+	case WMI_10_4_PHYERR_EVENTID:
+		ath10k_wmi_event_phyerr(ar, skb);
+		break;
+	case WMI_10_4_READY_EVENTID:
+		ath10k_wmi_event_ready(ar, skb);
+		break;
+	case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
+		ath10k_wmi_event_peer_sta_kickout(ar, skb);
+		break;
+	case WMI_10_4_HOST_SWBA_EVENTID:
+		ath10k_wmi_event_host_swba(ar, skb);
+		break;
+	case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
+		ath10k_wmi_event_tbttoffset_update(ar, skb);
+		break;
+	case WMI_10_4_DEBUG_PRINT_EVENTID:
+		ath10k_wmi_event_debug_print(ar, skb);
+		break;
+	case WMI_10_4_VDEV_START_RESP_EVENTID:
+		ath10k_wmi_event_vdev_start_resp(ar, skb);
+		break;
+	case WMI_10_4_VDEV_STOPPED_EVENTID:
+		ath10k_wmi_event_vdev_stopped(ar, skb);
+		break;
+	case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
+		ath10k_dbg(ar, ATH10K_DBG_WMI,
+			   "received event id %d not implemented\n", id);
+		break;
+	default:
+		ath10k_warn(ar, "Unknown eventid: %d\n", id);
+		break;
+	}
+
+out:
+	dev_kfree_skb(skb);
+}
+
 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
 {
 	int ret;
@@ -3762,8 +4818,7 @@
 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
-	config.rx_decap_mode = __cpu_to_le32(TARGET_RX_DECAP_MODE);
-
+	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
 	config.scan_max_pending_reqs =
 		__cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
 
@@ -3831,8 +4886,7 @@
 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
-	config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
-
+	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
 	config.scan_max_pending_reqs =
 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
 
@@ -3897,7 +4951,7 @@
 	config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
 	config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
-	config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
+	config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
 
 	config.scan_max_pending_reqs =
 		__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
@@ -3950,6 +5004,88 @@
 	return buf;
 }
 
+static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
+{
+	struct wmi_init_cmd_10_4 *cmd;
+	struct sk_buff *buf;
+	struct wmi_resource_config_10_4 config = {};
+	u32 len;
+
+	config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
+	config.num_peers = __cpu_to_le32(ar->max_num_peers);
+	config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
+	config.num_tids = __cpu_to_le32(ar->num_tids);
+
+	config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
+	config.num_offload_reorder_buffs =
+			__cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
+	config.num_peer_keys  = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
+	config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
+	config.tx_chain_mask  = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK);
+	config.rx_chain_mask  = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK);
+
+	config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+	config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
+
+	config.rx_decap_mode	    = __cpu_to_le32(TARGET_10_4_RX_DECAP_MODE);
+	config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
+	config.bmiss_offload_max_vdev =
+			__cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
+	config.roam_offload_max_vdev  =
+			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
+	config.roam_offload_max_ap_profiles =
+			__cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
+	config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
+	config.num_mcast_table_elems =
+			__cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
+
+	config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
+	config.tx_dbg_log_size  = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
+	config.num_wds_entries  = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
+	config.dma_burst_size   = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
+	config.mac_aggr_delim   = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
+
+	config.rx_skip_defrag_timeout_dup_detection_check =
+	  __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
+
+	config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
+	config.gtk_offload_max_vdev =
+			__cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
+	config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC);
+	config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
+	config.max_peer_ext_stats =
+			__cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
+	config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
+
+	config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
+	config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
+	config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
+	config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
+
+	config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
+	config.tt_support =
+			__cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
+	config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
+	config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
+	config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
+
+	len = sizeof(*cmd) +
+	      (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+	buf = ath10k_wmi_alloc_skb(ar, len);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	cmd = (struct wmi_init_cmd_10_4 *)buf->data;
+	memcpy(&cmd->resource_config, &config, sizeof(config));
+	ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+	ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
+	return buf;
+}
+
 int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
 {
 	if (arg->ie_len && !arg->ie)
@@ -4172,7 +5308,6 @@
 		| WMI_SCAN_EVENT_BSS_CHANNEL
 		| WMI_SCAN_EVENT_FOREIGN_CHANNEL
 		| WMI_SCAN_EVENT_DEQUEUED;
-	arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
 	arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
 	arg->n_bssids = 1;
 	arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
@@ -5170,6 +6305,7 @@
 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
@@ -5241,6 +6377,7 @@
 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
@@ -5306,6 +6443,7 @@
 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
@@ -5367,6 +6505,7 @@
 	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
 	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
 	.pull_swba = ath10k_wmi_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
 	.pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
 	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
 	.pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
@@ -5412,9 +6551,73 @@
 	/* .gen_adaptive_qcs not implemented */
 };
 
+static const struct wmi_ops wmi_10_4_ops = {
+	.rx = ath10k_wmi_10_4_op_rx,
+	.map_svc = wmi_10_4_svc_map,
+
+	.pull_scan = ath10k_wmi_op_pull_scan_ev,
+	.pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
+	.pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
+	.pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+	.pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+	.pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
+	.pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr,
+	.pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
+	.pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+	.pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+	.get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
+
+	.gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+	.gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+	.gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+	.gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+	.gen_init = ath10k_wmi_10_4_op_gen_init,
+	.gen_start_scan = ath10k_wmi_op_gen_start_scan,
+	.gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+	.gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+	.gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+	.gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+	.gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+	.gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+	.gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+	.gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+	.gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+	.gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+	.gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+	.gen_peer_create = ath10k_wmi_op_gen_peer_create,
+	.gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+	.gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+	.gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+	.gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+	.gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+	.gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+	.gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+	.gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+	.gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+	.gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+	.gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+	.gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+	.gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+	.gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+	.gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+	.gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+	.gen_addba_send = ath10k_wmi_op_gen_addba_send,
+	.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+	.gen_delba_send = ath10k_wmi_op_gen_delba_send,
+
+	/* shared with 10.2 */
+	.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+};
+
 int ath10k_wmi_attach(struct ath10k *ar)
 {
 	switch (ar->wmi.op_version) {
+	case ATH10K_FW_WMI_OP_VERSION_10_4:
+		ar->wmi.ops = &wmi_10_4_ops;
+		ar->wmi.cmd = &wmi_10_4_cmd_map;
+		ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
+		ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+		break;
 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
 		ar->wmi.cmd = &wmi_10_2_4_cmd_map;
 		ar->wmi.ops = &wmi_10_2_4_ops;
@@ -5452,6 +6655,8 @@
 	init_completion(&ar->wmi.service_ready);
 	init_completion(&ar->wmi.unified_ready);
 
+	INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
+
 	return 0;
 }
 
@@ -5459,6 +6664,11 @@
 {
 	int i;
 
+	cancel_work_sync(&ar->svc_rdy_work);
+
+	if (ar->svc_rdy_skb)
+		dev_kfree_skb(ar->svc_rdy_skb);
+
 	/* free the host memory chunks requested by firmware */
 	for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
 		dma_free_coherent(ar->dev,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index cf44a3d..52d3503 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -150,6 +150,12 @@
 	WMI_SERVICE_SAP_AUTH_OFFLOAD,
 	WMI_SERVICE_ATF,
 	WMI_SERVICE_COEX_GPIO,
+	WMI_SERVICE_ENHANCED_PROXY_STA,
+	WMI_SERVICE_TT,
+	WMI_SERVICE_PEER_CACHING,
+	WMI_SERVICE_AUX_SPECTRAL_INTF,
+	WMI_SERVICE_AUX_CHAN_LOAD_INTF,
+	WMI_SERVICE_BSS_CHANNEL_INFO_64,
 
 	/* keep last */
 	WMI_SERVICE_MAX,
@@ -218,6 +224,51 @@
 	WMI_MAIN_SERVICE_TX_ENCAP,
 };
 
+enum wmi_10_4_service {
+	WMI_10_4_SERVICE_BEACON_OFFLOAD = 0,
+	WMI_10_4_SERVICE_SCAN_OFFLOAD,
+	WMI_10_4_SERVICE_ROAM_OFFLOAD,
+	WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+	WMI_10_4_SERVICE_STA_PWRSAVE,
+	WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+	WMI_10_4_SERVICE_AP_UAPSD,
+	WMI_10_4_SERVICE_AP_DFS,
+	WMI_10_4_SERVICE_11AC,
+	WMI_10_4_SERVICE_BLOCKACK,
+	WMI_10_4_SERVICE_PHYERR,
+	WMI_10_4_SERVICE_BCN_FILTER,
+	WMI_10_4_SERVICE_RTT,
+	WMI_10_4_SERVICE_RATECTRL,
+	WMI_10_4_SERVICE_WOW,
+	WMI_10_4_SERVICE_RATECTRL_CACHE,
+	WMI_10_4_SERVICE_IRAM_TIDS,
+	WMI_10_4_SERVICE_BURST,
+	WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+	WMI_10_4_SERVICE_GTK_OFFLOAD,
+	WMI_10_4_SERVICE_SCAN_SCH,
+	WMI_10_4_SERVICE_CSA_OFFLOAD,
+	WMI_10_4_SERVICE_CHATTER,
+	WMI_10_4_SERVICE_COEX_FREQAVOID,
+	WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+	WMI_10_4_SERVICE_FORCE_FW_HANG,
+	WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+	WMI_10_4_SERVICE_GPIO,
+	WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+	WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+	WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+	WMI_10_4_SERVICE_TX_ENCAP,
+	WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+	WMI_10_4_SERVICE_EARLY_RX,
+	WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+	WMI_10_4_SERVICE_TT,
+	WMI_10_4_SERVICE_ATF,
+	WMI_10_4_SERVICE_PEER_CACHING,
+	WMI_10_4_SERVICE_COEX_GPIO,
+	WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+	WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+	WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+};
+
 static inline char *wmi_service_name(int service_id)
 {
 #define SVCSTR(x) case x: return #x
@@ -299,6 +350,12 @@
 	SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
 	SVCSTR(WMI_SERVICE_ATF);
 	SVCSTR(WMI_SERVICE_COEX_GPIO);
+	SVCSTR(WMI_SERVICE_ENHANCED_PROXY_STA);
+	SVCSTR(WMI_SERVICE_TT);
+	SVCSTR(WMI_SERVICE_PEER_CACHING);
+	SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF);
+	SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF);
+	SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64);
 	default:
 		return NULL;
 	}
@@ -437,6 +494,95 @@
 	       WMI_SERVICE_TX_ENCAP, len);
 }
 
+static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
+				    size_t len)
+{
+	SVCMAP(WMI_10_4_SERVICE_BEACON_OFFLOAD,
+	       WMI_SERVICE_BEACON_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_SCAN_OFFLOAD,
+	       WMI_SERVICE_SCAN_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_ROAM_OFFLOAD,
+	       WMI_SERVICE_ROAM_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+	       WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_PWRSAVE,
+	       WMI_SERVICE_STA_PWRSAVE, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+	       WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+	SVCMAP(WMI_10_4_SERVICE_AP_UAPSD,
+	       WMI_SERVICE_AP_UAPSD, len);
+	SVCMAP(WMI_10_4_SERVICE_AP_DFS,
+	       WMI_SERVICE_AP_DFS, len);
+	SVCMAP(WMI_10_4_SERVICE_11AC,
+	       WMI_SERVICE_11AC, len);
+	SVCMAP(WMI_10_4_SERVICE_BLOCKACK,
+	       WMI_SERVICE_BLOCKACK, len);
+	SVCMAP(WMI_10_4_SERVICE_PHYERR,
+	       WMI_SERVICE_PHYERR, len);
+	SVCMAP(WMI_10_4_SERVICE_BCN_FILTER,
+	       WMI_SERVICE_BCN_FILTER, len);
+	SVCMAP(WMI_10_4_SERVICE_RTT,
+	       WMI_SERVICE_RTT, len);
+	SVCMAP(WMI_10_4_SERVICE_RATECTRL,
+	       WMI_SERVICE_RATECTRL, len);
+	SVCMAP(WMI_10_4_SERVICE_WOW,
+	       WMI_SERVICE_WOW, len);
+	SVCMAP(WMI_10_4_SERVICE_RATECTRL_CACHE,
+	       WMI_SERVICE_RATECTRL_CACHE, len);
+	SVCMAP(WMI_10_4_SERVICE_IRAM_TIDS,
+	       WMI_SERVICE_IRAM_TIDS, len);
+	SVCMAP(WMI_10_4_SERVICE_BURST,
+	       WMI_SERVICE_BURST, len);
+	SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+	       WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
+	SVCMAP(WMI_10_4_SERVICE_GTK_OFFLOAD,
+	       WMI_SERVICE_GTK_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_SCAN_SCH,
+	       WMI_SERVICE_SCAN_SCH, len);
+	SVCMAP(WMI_10_4_SERVICE_CSA_OFFLOAD,
+	       WMI_SERVICE_CSA_OFFLOAD, len);
+	SVCMAP(WMI_10_4_SERVICE_CHATTER,
+	       WMI_SERVICE_CHATTER, len);
+	SVCMAP(WMI_10_4_SERVICE_COEX_FREQAVOID,
+	       WMI_SERVICE_COEX_FREQAVOID, len);
+	SVCMAP(WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+	       WMI_SERVICE_PACKET_POWER_SAVE, len);
+	SVCMAP(WMI_10_4_SERVICE_FORCE_FW_HANG,
+	       WMI_SERVICE_FORCE_FW_HANG, len);
+	SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+	       WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+	SVCMAP(WMI_10_4_SERVICE_GPIO,
+	       WMI_SERVICE_GPIO, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+	       WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+	       WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+	SVCMAP(WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+	       WMI_SERVICE_STA_KEEP_ALIVE, len);
+	SVCMAP(WMI_10_4_SERVICE_TX_ENCAP,
+	       WMI_SERVICE_TX_ENCAP, len);
+	SVCMAP(WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+	       WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+	SVCMAP(WMI_10_4_SERVICE_EARLY_RX,
+	       WMI_SERVICE_EARLY_RX, len);
+	SVCMAP(WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+	       WMI_SERVICE_ENHANCED_PROXY_STA, len);
+	SVCMAP(WMI_10_4_SERVICE_TT,
+	       WMI_SERVICE_TT, len);
+	SVCMAP(WMI_10_4_SERVICE_ATF,
+	       WMI_SERVICE_ATF, len);
+	SVCMAP(WMI_10_4_SERVICE_PEER_CACHING,
+	       WMI_SERVICE_PEER_CACHING, len);
+	SVCMAP(WMI_10_4_SERVICE_COEX_GPIO,
+	       WMI_SERVICE_COEX_GPIO, len);
+	SVCMAP(WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+	       WMI_SERVICE_AUX_SPECTRAL_INTF, len);
+	SVCMAP(WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+	       WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
+	SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+	       WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+}
+
 #undef SVCMAP
 
 /* 2 word representation of MAC addr */
@@ -565,6 +711,48 @@
 	u32 tdls_set_state_cmdid;
 	u32 tdls_peer_update_cmdid;
 	u32 adaptive_qcs_cmdid;
+	u32 scan_update_request_cmdid;
+	u32 vdev_standby_response_cmdid;
+	u32 vdev_resume_response_cmdid;
+	u32 wlan_peer_caching_add_peer_cmdid;
+	u32 wlan_peer_caching_evict_peer_cmdid;
+	u32 wlan_peer_caching_restore_peer_cmdid;
+	u32 wlan_peer_caching_print_all_peers_info_cmdid;
+	u32 peer_update_wds_entry_cmdid;
+	u32 peer_add_proxy_sta_entry_cmdid;
+	u32 rtt_keepalive_cmdid;
+	u32 oem_req_cmdid;
+	u32 nan_cmdid;
+	u32 vdev_ratemask_cmdid;
+	u32 qboost_cfg_cmdid;
+	u32 pdev_smart_ant_enable_cmdid;
+	u32 pdev_smart_ant_set_rx_antenna_cmdid;
+	u32 peer_smart_ant_set_tx_antenna_cmdid;
+	u32 peer_smart_ant_set_train_info_cmdid;
+	u32 peer_smart_ant_set_node_config_ops_cmdid;
+	u32 pdev_set_antenna_switch_table_cmdid;
+	u32 pdev_set_ctl_table_cmdid;
+	u32 pdev_set_mimogain_table_cmdid;
+	u32 pdev_ratepwr_table_cmdid;
+	u32 pdev_ratepwr_chainmsk_table_cmdid;
+	u32 pdev_fips_cmdid;
+	u32 tt_set_conf_cmdid;
+	u32 fwtest_cmdid;
+	u32 vdev_atf_request_cmdid;
+	u32 peer_atf_request_cmdid;
+	u32 pdev_get_ani_cck_config_cmdid;
+	u32 pdev_get_ani_ofdm_config_cmdid;
+	u32 pdev_reserve_ast_entry_cmdid;
+	u32 pdev_get_nfcal_power_cmdid;
+	u32 pdev_get_tpc_cmdid;
+	u32 pdev_get_ast_info_cmdid;
+	u32 vdev_set_dscp_tid_map_cmdid;
+	u32 pdev_get_info_cmdid;
+	u32 vdev_get_info_cmdid;
+	u32 vdev_filter_neighbor_rx_packets_cmdid;
+	u32 mu_cal_start_cmdid;
+	u32 set_cca_params_cmdid;
+	u32 pdev_bss_chan_info_request_cmdid;
 };
 
 /*
@@ -1220,6 +1408,216 @@
 	WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
 };
 
+enum wmi_10_4_cmd_id {
+	WMI_10_4_START_CMDID = 0x9000,
+	WMI_10_4_END_CMDID = 0x9FFF,
+	WMI_10_4_INIT_CMDID,
+	WMI_10_4_START_SCAN_CMDID = WMI_10_4_START_CMDID,
+	WMI_10_4_STOP_SCAN_CMDID,
+	WMI_10_4_SCAN_CHAN_LIST_CMDID,
+	WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+	WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+	WMI_10_4_ECHO_CMDID,
+	WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+	WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+	WMI_10_4_PDEV_SET_PARAM_CMDID,
+	WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+	WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+	WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+	WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+	WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+	WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+	WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+	WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+	WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+	WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+	WMI_10_4_VDEV_CREATE_CMDID,
+	WMI_10_4_VDEV_DELETE_CMDID,
+	WMI_10_4_VDEV_START_REQUEST_CMDID,
+	WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+	WMI_10_4_VDEV_UP_CMDID,
+	WMI_10_4_VDEV_STOP_CMDID,
+	WMI_10_4_VDEV_DOWN_CMDID,
+	WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+	WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+	WMI_10_4_VDEV_SET_PARAM_CMDID,
+	WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+	WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+	WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+	WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+	WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+	WMI_10_4_PEER_CREATE_CMDID,
+	WMI_10_4_PEER_DELETE_CMDID,
+	WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+	WMI_10_4_PEER_SET_PARAM_CMDID,
+	WMI_10_4_PEER_ASSOC_CMDID,
+	WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+	WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+	WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+	WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+	WMI_10_4_PEER_MCAST_GROUP_CMDID,
+	WMI_10_4_BCN_TX_CMDID,
+	WMI_10_4_PDEV_SEND_BCN_CMDID,
+	WMI_10_4_BCN_PRB_TMPL_CMDID,
+	WMI_10_4_BCN_FILTER_RX_CMDID,
+	WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+	WMI_10_4_MGMT_TX_CMDID,
+	WMI_10_4_PRB_TMPL_CMDID,
+	WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+	WMI_10_4_ADDBA_SEND_CMDID,
+	WMI_10_4_ADDBA_STATUS_CMDID,
+	WMI_10_4_DELBA_SEND_CMDID,
+	WMI_10_4_ADDBA_SET_RESP_CMDID,
+	WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+	WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+	WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+	WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+	WMI_10_4_DBGLOG_CFG_CMDID,
+	WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+	WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+	WMI_10_4_PDEV_QVIT_CMDID,
+	WMI_10_4_ROAM_SCAN_MODE,
+	WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+	WMI_10_4_ROAM_SCAN_PERIOD,
+	WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+	WMI_10_4_ROAM_AP_PROFILE,
+	WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+	WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+	WMI_10_4_OFL_SCAN_PERIOD,
+	WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+	WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+	WMI_10_4_P2P_GO_SET_BEACON_IE,
+	WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+	WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+	WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+	WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+	WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+	WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+	WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+	WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+	WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+	WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+	WMI_10_4_PDEV_SUSPEND_CMDID,
+	WMI_10_4_PDEV_RESUME_CMDID,
+	WMI_10_4_ADD_BCN_FILTER_CMDID,
+	WMI_10_4_RMV_BCN_FILTER_CMDID,
+	WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+	WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+	WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+	WMI_10_4_WOW_ENABLE_CMDID,
+	WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+	WMI_10_4_RTT_MEASREQ_CMDID,
+	WMI_10_4_RTT_TSF_CMDID,
+	WMI_10_4_RTT_KEEPALIVE_CMDID,
+	WMI_10_4_OEM_REQ_CMDID,
+	WMI_10_4_NAN_CMDID,
+	WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+	WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+	WMI_10_4_REQUEST_STATS_CMDID,
+	WMI_10_4_GPIO_CONFIG_CMDID,
+	WMI_10_4_GPIO_OUTPUT_CMDID,
+	WMI_10_4_VDEV_RATEMASK_CMDID,
+	WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+	WMI_10_4_GTK_OFFLOAD_CMDID,
+	WMI_10_4_QBOOST_CFG_CMDID,
+	WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+	WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+	WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+	WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+	WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+	WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+	WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+	WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+	WMI_10_4_FORCE_FW_HANG_CMDID,
+	WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+	WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+	WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+	WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+	WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+	WMI_10_4_PDEV_FIPS_CMDID,
+	WMI_10_4_TT_SET_CONF_CMDID,
+	WMI_10_4_FWTEST_CMDID,
+	WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+	WMI_10_4_PEER_ATF_REQUEST_CMDID,
+	WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+	WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+	WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+	WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+	WMI_10_4_PDEV_GET_TPC_CMDID,
+	WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+	WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+	WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+	WMI_10_4_PDEV_GET_INFO_CMDID,
+	WMI_10_4_VDEV_GET_INFO_CMDID,
+	WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+	WMI_10_4_MU_CAL_START_CMDID,
+	WMI_10_4_SET_CCA_PARAMS_CMDID,
+	WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+	WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
+};
+
+enum wmi_10_4_event_id {
+	WMI_10_4_SERVICE_READY_EVENTID = 0x8000,
+	WMI_10_4_READY_EVENTID,
+	WMI_10_4_DEBUG_MESG_EVENTID,
+	WMI_10_4_START_EVENTID = 0x9000,
+	WMI_10_4_END_EVENTID = 0x9FFF,
+	WMI_10_4_SCAN_EVENTID = WMI_10_4_START_EVENTID,
+	WMI_10_4_ECHO_EVENTID,
+	WMI_10_4_UPDATE_STATS_EVENTID,
+	WMI_10_4_INST_RSSI_STATS_EVENTID,
+	WMI_10_4_VDEV_START_RESP_EVENTID,
+	WMI_10_4_VDEV_STANDBY_REQ_EVENTID,
+	WMI_10_4_VDEV_RESUME_REQ_EVENTID,
+	WMI_10_4_VDEV_STOPPED_EVENTID,
+	WMI_10_4_PEER_STA_KICKOUT_EVENTID,
+	WMI_10_4_HOST_SWBA_EVENTID,
+	WMI_10_4_TBTTOFFSET_UPDATE_EVENTID,
+	WMI_10_4_MGMT_RX_EVENTID,
+	WMI_10_4_CHAN_INFO_EVENTID,
+	WMI_10_4_PHYERR_EVENTID,
+	WMI_10_4_ROAM_EVENTID,
+	WMI_10_4_PROFILE_MATCH,
+	WMI_10_4_DEBUG_PRINT_EVENTID,
+	WMI_10_4_PDEV_QVIT_EVENTID,
+	WMI_10_4_WLAN_PROFILE_DATA_EVENTID,
+	WMI_10_4_RTT_MEASUREMENT_REPORT_EVENTID,
+	WMI_10_4_TSF_MEASUREMENT_REPORT_EVENTID,
+	WMI_10_4_RTT_ERROR_REPORT_EVENTID,
+	WMI_10_4_RTT_KEEPALIVE_EVENTID,
+	WMI_10_4_OEM_CAPABILITY_EVENTID,
+	WMI_10_4_OEM_MEASUREMENT_REPORT_EVENTID,
+	WMI_10_4_OEM_ERROR_REPORT_EVENTID,
+	WMI_10_4_NAN_EVENTID,
+	WMI_10_4_WOW_WAKEUP_HOST_EVENTID,
+	WMI_10_4_GTK_OFFLOAD_STATUS_EVENTID,
+	WMI_10_4_GTK_REKEY_FAIL_EVENTID,
+	WMI_10_4_DCS_INTERFERENCE_EVENTID,
+	WMI_10_4_PDEV_TPC_CONFIG_EVENTID,
+	WMI_10_4_CSA_HANDLING_EVENTID,
+	WMI_10_4_GPIO_INPUT_EVENTID,
+	WMI_10_4_PEER_RATECODE_LIST_EVENTID,
+	WMI_10_4_GENERIC_BUFFER_EVENTID,
+	WMI_10_4_MCAST_BUF_RELEASE_EVENTID,
+	WMI_10_4_MCAST_LIST_AGEOUT_EVENTID,
+	WMI_10_4_VDEV_GET_KEEPALIVE_EVENTID,
+	WMI_10_4_WDS_PEER_EVENTID,
+	WMI_10_4_PEER_STA_PS_STATECHG_EVENTID,
+	WMI_10_4_PDEV_FIPS_EVENTID,
+	WMI_10_4_TT_STATS_EVENTID,
+	WMI_10_4_PDEV_CHANNEL_HOPPING_EVENTID,
+	WMI_10_4_PDEV_ANI_CCK_LEVEL_EVENTID,
+	WMI_10_4_PDEV_ANI_OFDM_LEVEL_EVENTID,
+	WMI_10_4_PDEV_RESERVE_AST_ENTRY_EVENTID,
+	WMI_10_4_PDEV_NFCAL_POWER_EVENTID,
+	WMI_10_4_PDEV_TPC_EVENTID,
+	WMI_10_4_PDEV_GET_AST_INFO_EVENTID,
+	WMI_10_4_PDEV_TEMPERATURE_EVENTID,
+	WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+	WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID,
+	WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1,
+};
+
 enum wmi_phy_mode {
 	MODE_11A        = 0,   /* 11a Mode */
 	MODE_11G        = 1,   /* 11b/g Mode */
@@ -1349,7 +1747,8 @@
 /* Indicate reason for channel switch */
 #define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
 
-#define WMI_MAX_SPATIAL_STREAM   3
+#define WMI_MAX_SPATIAL_STREAM        3 /* default max ss */
+#define WMI_10_4_MAX_SPATIAL_STREAM   4
 
 /* HT Capabilities*/
 #define WMI_HT_CAP_ENABLED                0x0001   /* HT Enabled/ disabled */
@@ -1979,8 +2378,224 @@
 	__le32 feature_mask;
 } __packed;
 
-#define NUM_UNITS_IS_NUM_VDEVS   0x1
-#define NUM_UNITS_IS_NUM_PEERS   0x2
+#define NUM_UNITS_IS_NUM_VDEVS         BIT(0)
+#define NUM_UNITS_IS_NUM_PEERS         BIT(1)
+#define NUM_UNITS_IS_NUM_ACTIVE_PEERS  BIT(2)
+
+struct wmi_resource_config_10_4 {
+	/* Number of virtual devices (VAPs) to support */
+	__le32 num_vdevs;
+
+	/* Number of peer nodes to support */
+	__le32 num_peers;
+
+	/* Number of active peer nodes to support */
+	__le32 num_active_peers;
+
+	/* In offload mode, target supports features like WOW, chatter and other
+	 * protocol offloads. In order to support them some functionalities like
+	 * reorder buffering, PN checking need to be done in target.
+	 * This determines maximum number of peers supported by target in
+	 * offload mode.
+	 */
+	__le32 num_offload_peers;
+
+	/* Number of reorder buffers available for doing target based reorder
+	 * Rx reorder buffering
+	 */
+	__le32 num_offload_reorder_buffs;
+
+	/* Number of keys per peer */
+	__le32 num_peer_keys;
+
+	/* Total number of TX/RX data TIDs */
+	__le32 num_tids;
+
+	/* Max skid for resolving hash collisions.
+	 * The address search table is sparse, so that if two MAC addresses
+	 * result in the same hash value, the second of these conflicting
+	 * entries can slide to the next index in the address search table,
+	 * and use it, if it is unoccupied.  This ast_skid_limit parameter
+	 * specifies the upper bound on how many subsequent indices to search
+	 * over to find an unoccupied space.
+	 */
+	__le32 ast_skid_limit;
+
+	/* The nominal chain mask for transmit.
+	 * The chain mask may be modified dynamically, e.g. to operate AP tx
+	 * with a reduced number of chains if no clients are associated.
+	 * This configuration parameter specifies the nominal chain-mask that
+	 * should be used when not operating with a reduced set of tx chains.
+	 */
+	__le32 tx_chain_mask;
+
+	/* The nominal chain mask for receive.
+	 * The chain mask may be modified dynamically, e.g. for a client to use
+	 * a reduced number of chains for receive if the traffic to the client
+	 * is low enough that it doesn't require downlink MIMO or antenna
+	 * diversity. This configuration parameter specifies the nominal
+	 * chain-mask that should be used when not operating with a reduced
+	 * set of rx chains.
+	 */
+	__le32 rx_chain_mask;
+
+	/* What rx reorder timeout (ms) to use for the AC.
+	 * Each WMM access class (voice, video, best-effort, background) will
+	 * have its own timeout value to dictate how long to wait for missing
+	 * rx MPDUs to arrive before flushing subsequent MPDUs that have already
+	 * been received. This parameter specifies the timeout in milliseconds
+	 * for each class.
+	 */
+	__le32 rx_timeout_pri[4];
+
+	/* What mode the rx should decap packets to.
+	 * MAC can decap to RAW (no decap), native wifi or Ethernet types.
+	 * This setting also determines the default TX behavior, however TX
+	 * behavior can be modified on a per VAP basis during VAP init
+	 */
+	__le32 rx_decap_mode;
+
+	__le32 scan_max_pending_req;
+
+	__le32 bmiss_offload_max_vdev;
+
+	__le32 roam_offload_max_vdev;
+
+	__le32 roam_offload_max_ap_profiles;
+
+	/* How many groups to use for mcast->ucast conversion.
+	 * The target's WAL maintains a table to hold information regarding
+	 * which peers belong to a given multicast group, so that if
+	 * multicast->unicast conversion is enabled, the target can convert
+	 * multicast tx frames to a series of unicast tx frames, to each peer
+	 * within the multicast group. This num_mcast_groups configuration
+	 * parameter tells the target how many multicast groups to provide
+	 * storage for within its multicast group membership table.
+	 */
+	__le32 num_mcast_groups;
+
+	/* Size to alloc for the mcast membership table.
+	 * This num_mcast_table_elems configuration parameter tells the target
+	 * how many peer elements it needs to provide storage for in its
+	 * multicast group membership table. These multicast group membership
+	 * table elements are shared by the multicast groups stored within
+	 * the table.
+	 */
+	__le32 num_mcast_table_elems;
+
+	/* Whether/how to do multicast->unicast conversion.
+	 * This configuration parameter specifies whether the target should
+	 * perform multicast --> unicast conversion on transmit, and if so,
+	 * what to do if it finds no entries in its multicast group membership
+	 * table for the multicast IP address in the tx frame.
+	 * Configuration value:
+	 * 0 -> Do not perform multicast to unicast conversion.
+	 * 1 -> Convert multicast frames to unicast, if the IP multicast address
+	 *      from the tx frame is found in the multicast group membership
+	 *      table.  If the IP multicast address is not found, drop the frame
+	 * 2 -> Convert multicast frames to unicast, if the IP multicast address
+	 *      from the tx frame is found in the multicast group membership
+	 *      table.  If the IP multicast address is not found, transmit the
+	 *      frame as multicast.
+	 */
+	__le32 mcast2ucast_mode;
+
+	/* How much memory to allocate for a tx PPDU dbg log.
+	 * This parameter controls how much memory the target will allocate to
+	 * store a log of tx PPDU meta-information (how large the PPDU was,
+	 * when it was sent, whether it was successful, etc.)
+	 */
+	__le32 tx_dbg_log_size;
+
+	/* How many AST entries to be allocated for WDS */
+	__le32 num_wds_entries;
+
+	/* MAC DMA burst size. 0 -default, 1 -256B */
+	__le32 dma_burst_size;
+
+	/* Fixed delimiters to be inserted after every MPDU to account for
+	 * interface latency to avoid underrun.
+	 */
+	__le32 mac_aggr_delim;
+
+	/* Determine whether target is responsible for detecting duplicate
+	 * non-aggregate MPDU and timing out stale fragments. A-MPDU reordering
+	 * is always performed on the target.
+	 *
+	 * 0: target responsible for frag timeout and dup checking
+	 * 1: host responsible for frag timeout and dup checking
+	 */
+	__le32 rx_skip_defrag_timeout_dup_detection_check;
+
+	/* Configuration for VoW : No of Video nodes to be supported and max
+	 * no of descriptors for each video link (node).
+	 */
+	__le32 vow_config;
+
+	/* Maximum vdev that could use gtk offload */
+	__le32 gtk_offload_max_vdev;
+
+	/* Number of msdu descriptors target should use */
+	__le32 num_msdu_desc;
+
+	/* Max number of tx fragments per MSDU.
+	 * This parameter controls the max number of tx fragments per MSDU.
+	 * This will passed by target as part of the WMI_SERVICE_READY event
+	 * and is overridden by the OS shim as required.
+	 */
+	__le32 max_frag_entries;
+
+	/* Max number of extended peer stats.
+	 * This parameter controls the max number of peers for which extended
+	 * statistics are supported by target
+	 */
+	__le32 max_peer_ext_stats;
+
+	/* Smart antenna capabilities information.
+	 * 1 - Smart antenna is enabled
+	 * 0 - Smart antenna is disabled
+	 * In future this can contain smart antenna specific capabilities.
+	 */
+	__le32 smart_ant_cap;
+
+	/* User can configure the buffers allocated for each AC (BE, BK, VI, VO)
+	 * during init.
+	 */
+	__le32 bk_minfree;
+	__le32 be_minfree;
+	__le32 vi_minfree;
+	__le32 vo_minfree;
+
+	/* Rx batch mode capability.
+	 * 1 - Rx batch mode enabled
+	 * 0 - Rx batch mode disabled
+	 */
+	__le32 rx_batchmode;
+
+	/* Thermal throttling capability.
+	 * 1 - Capable of thermal throttling
+	 * 0 - Not capable of thermal throttling
+	 */
+	__le32 tt_support;
+
+	/* ATF configuration.
+	 * 1  - Enable ATF
+	 * 0  - Disable ATF
+	 */
+	__le32 atf_config;
+
+	/* Configure padding to manage IP header un-alignment
+	 * 1  - Enable padding
+	 * 0  - Disable padding
+	 */
+	__le32 iphdr_pad_config;
+
+	/* qwrap configuration
+	 * 1  - This is qwrap configuration
+	 * 0  - This is not qwrap
+	 */
+	__le32 qwrap_config;
+} __packed;
 
 /* strucutre describing host memory chunk. */
 struct host_memory_chunk {
@@ -2014,6 +2629,11 @@
 	struct wmi_host_mem_chunks mem_chunks;
 } __packed;
 
+struct wmi_init_cmd_10_4 {
+	struct wmi_resource_config_10_4 resource_config;
+	struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
 struct wmi_chan_list_entry {
 	__le16 freq;
 	u8 phy_mode; /* valid for 10.2 only */
@@ -2260,15 +2880,17 @@
 };
 
 enum wmi_scan_event_type {
-	WMI_SCAN_EVENT_STARTED         = 0x1,
-	WMI_SCAN_EVENT_COMPLETED       = 0x2,
-	WMI_SCAN_EVENT_BSS_CHANNEL     = 0x4,
-	WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8,
-	WMI_SCAN_EVENT_DEQUEUED        = 0x10,
-	WMI_SCAN_EVENT_PREEMPTED       = 0x20, /* possibly by high-prio scan */
-	WMI_SCAN_EVENT_START_FAILED    = 0x40,
-	WMI_SCAN_EVENT_RESTARTED       = 0x80,
-	WMI_SCAN_EVENT_MAX             = 0x8000
+	WMI_SCAN_EVENT_STARTED              = BIT(0),
+	WMI_SCAN_EVENT_COMPLETED            = BIT(1),
+	WMI_SCAN_EVENT_BSS_CHANNEL          = BIT(2),
+	WMI_SCAN_EVENT_FOREIGN_CHANNEL      = BIT(3),
+	WMI_SCAN_EVENT_DEQUEUED             = BIT(4),
+	/* possibly by high-prio scan */
+	WMI_SCAN_EVENT_PREEMPTED            = BIT(5),
+	WMI_SCAN_EVENT_START_FAILED         = BIT(6),
+	WMI_SCAN_EVENT_RESTARTED            = BIT(7),
+	WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = BIT(8),
+	WMI_SCAN_EVENT_MAX                  = BIT(15),
 };
 
 enum wmi_scan_completion_reason {
@@ -2276,6 +2898,7 @@
 	WMI_SCAN_REASON_CANCELLED,
 	WMI_SCAN_REASON_PREEMPTED,
 	WMI_SCAN_REASON_TIMEDOUT,
+	WMI_SCAN_REASON_INTERNAL_FAILURE,
 	WMI_SCAN_REASON_MAX,
 };
 
@@ -2329,15 +2952,40 @@
 	u8 buf[0];
 } __packed;
 
+struct wmi_10_4_mgmt_rx_hdr {
+	__le32 channel;
+	__le32 snr;
+	    u8 rssi_ctl[4];
+	__le32 rate;
+	__le32 phy_mode;
+	__le32 buf_len;
+	__le32 status;
+} __packed;
+
+struct wmi_10_4_mgmt_rx_event {
+	struct wmi_10_4_mgmt_rx_hdr hdr;
+	u8 buf[0];
+} __packed;
+
 #define WMI_RX_STATUS_OK			0x00
 #define WMI_RX_STATUS_ERR_CRC			0x01
 #define WMI_RX_STATUS_ERR_DECRYPT		0x08
 #define WMI_RX_STATUS_ERR_MIC			0x10
 #define WMI_RX_STATUS_ERR_KEY_CACHE_MISS	0x20
 
-#define PHY_ERROR_SPECTRAL_SCAN		0x26
-#define PHY_ERROR_FALSE_RADAR_EXT		0x24
-#define PHY_ERROR_RADAR				0x05
+#define PHY_ERROR_GEN_SPECTRAL_SCAN		0x26
+#define PHY_ERROR_GEN_FALSE_RADAR_EXT		0x24
+#define PHY_ERROR_GEN_RADAR			0x05
+
+#define PHY_ERROR_10_4_RADAR_MASK               0x4
+#define PHY_ERROR_10_4_SPECTRAL_SCAN_MASK       0x4000000
+
+enum phy_err_type {
+	PHY_ERROR_UNKNOWN,
+	PHY_ERROR_SPECTRAL_SCAN,
+	PHY_ERROR_FALSE_RADAR_EXT,
+	PHY_ERROR_RADAR
+};
 
 struct wmi_phyerr {
 	__le32 tsf_timestamp;
@@ -2360,6 +3008,23 @@
 	struct wmi_phyerr phyerrs[0];
 } __packed;
 
+struct wmi_10_4_phyerr_event {
+	__le32 tsf_l32;
+	__le32 tsf_u32;
+	__le16 freq1;
+	__le16 freq2;
+	u8 rssi_combined;
+	u8 chan_width_mhz;
+	u8 phy_err_code;
+	u8 rsvd0;
+	__le32 rssi_chains[4];
+	__le16 nf_chains[4];
+	__le32 phy_err_mask[2];
+	__le32 tsf_timestamp;
+	__le32 buf_len;
+	u8 buf[0];
+} __packed;
+
 #define PHYERR_TLV_SIG				0xBB
 #define PHYERR_TLV_TAG_SEARCH_FFT_REPORT	0xFB
 #define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY	0xF8
@@ -2613,6 +3278,48 @@
 	u32 burst_dur;
 	u32 burst_enable;
 	u32 cal_period;
+	u32 aggr_burst;
+	u32 rx_decap_mode;
+	u32 smart_antenna_default_antenna;
+	u32 igmpmld_override;
+	u32 igmpmld_tid;
+	u32 antenna_gain;
+	u32 rx_filter;
+	u32 set_mcast_to_ucast_tid;
+	u32 proxy_sta_mode;
+	u32 set_mcast2ucast_mode;
+	u32 set_mcast2ucast_buffer;
+	u32 remove_mcast2ucast_buffer;
+	u32 peer_sta_ps_statechg_enable;
+	u32 igmpmld_ac_override;
+	u32 block_interbss;
+	u32 set_disable_reset_cmdid;
+	u32 set_msdu_ttl_cmdid;
+	u32 set_ppdu_duration_cmdid;
+	u32 txbf_sound_period_cmdid;
+	u32 set_promisc_mode_cmdid;
+	u32 set_burst_mode_cmdid;
+	u32 en_stats;
+	u32 mu_group_policy;
+	u32 noise_detection;
+	u32 noise_threshold;
+	u32 dpd_enable;
+	u32 set_mcast_bcast_echo;
+	u32 atf_strict_sch;
+	u32 atf_sched_duration;
+	u32 ant_plzn;
+	u32 mgmt_retry_limit;
+	u32 sensitivity_level;
+	u32 signed_txpower_2g;
+	u32 signed_txpower_5g;
+	u32 enable_per_tid_amsdu;
+	u32 enable_per_tid_ampdu;
+	u32 cca_threshold;
+	u32 rts_fixed_rate;
+	u32 pdev_reset;
+	u32 wapi_mbssid_offset;
+	u32 arp_srcaddr;
+	u32 arp_dstaddr;
 };
 
 #define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -2828,6 +3535,100 @@
 	WMI_10X_PDEV_PARAM_CAL_PERIOD
 };
 
+enum wmi_10_4_pdev_param {
+	WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+	WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+	WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+	WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+	WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+	WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+	WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+	WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+	WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+	WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+	WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+	WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+	WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+	WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+	WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+	WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+	WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+	WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+	WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+	WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+	WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+	WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+	WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+	WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+	WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+	WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+	WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+	WMI_10_4_PDEV_PARAM_PMF_QOS,
+	WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_DCS,
+	WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+	WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+	WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+	WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+	WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+	WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+	WMI_10_4_PDEV_PARAM_PROXY_STA,
+	WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+	WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+	WMI_10_4_PDEV_PARAM_AGGR_BURST,
+	WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+	WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+	WMI_10_4_PDEV_PARAM_BURST_DUR,
+	WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+	WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+	WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+	WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+	WMI_10_4_PDEV_PARAM_RX_FILTER,
+	WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+	WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+	WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+	WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+	WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+	WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+	WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+	WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+	WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+	WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+	WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+	WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+	WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+	WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+	WMI_10_4_PDEV_PARAM_EN_STATS,
+	WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+	WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+	WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+	WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+	WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+	WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+	WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+	WMI_10_4_PDEV_PARAM_ANT_PLZN,
+	WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+	WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+	WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+	WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+	WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+	WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+	WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+	WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+	WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+	WMI_10_4_PDEV_PARAM_PDEV_RESET,
+	WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+	WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+	WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+};
+
 struct wmi_pdev_set_param_cmd {
 	__le32 param_id;
 	__le32 param_value;
@@ -3506,6 +4307,22 @@
 	u32 drop_unencry;
 	u32 tx_encap_type;
 	u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+	u32 rc_num_retries;
+	u32 cabq_maxdur;
+	u32 mfptest_set;
+	u32 rts_fixed_rate;
+	u32 vht_sgimask;
+	u32 vht80_ratemask;
+	u32 early_rx_adjust_enable;
+	u32 early_rx_tgt_bmiss_num;
+	u32 early_rx_bmiss_sample_cycle;
+	u32 early_rx_slop_step;
+	u32 early_rx_init_slop;
+	u32 early_rx_adjust_pause;
+	u32 proxy_sta;
+	u32 meru_vc;
+	u32 rx_decap_type;
+	u32 bw_nss_ratemask;
 };
 
 #define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -3764,11 +4581,85 @@
 	WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
 };
 
+enum wmi_10_4_vdev_param {
+	WMI_10_4_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+	WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+	WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+	WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+	WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+	WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+	WMI_10_4_VDEV_PARAM_SLOT_TIME,
+	WMI_10_4_VDEV_PARAM_PREAMBLE,
+	WMI_10_4_VDEV_PARAM_SWBA_TIME,
+	WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+	WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+	WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+	WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+	WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+	WMI_10_4_VDEV_PARAM_WDS,
+	WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+	WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+	WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+	WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+	WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+	WMI_10_4_VDEV_PARAM_CHWIDTH,
+	WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+	WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+	WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+	WMI_10_4_VDEV_PARAM_MGMT_RATE,
+	WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+	WMI_10_4_VDEV_PARAM_FIXED_RATE,
+	WMI_10_4_VDEV_PARAM_SGI,
+	WMI_10_4_VDEV_PARAM_LDPC,
+	WMI_10_4_VDEV_PARAM_TX_STBC,
+	WMI_10_4_VDEV_PARAM_RX_STBC,
+	WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+	WMI_10_4_VDEV_PARAM_DEF_KEYID,
+	WMI_10_4_VDEV_PARAM_NSS,
+	WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+	WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+	WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+	WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+	WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+	WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+	WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+	WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+	WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+	WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+	WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+	WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+	WMI_10_4_VDEV_PARAM_TXBF,
+	WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+	WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+	WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+	WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+	WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+	WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+	WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+	WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+	WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+	WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+	WMI_10_4_VDEV_PARAM_PROXY_STA,
+	WMI_10_4_VDEV_PARAM_MERU_VC,
+	WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+	WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+};
+
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
 
+#define WMI_TXBF_STS_CAP_OFFSET_LSB	4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK	0xf0
+#define WMI_BF_SOUND_DIM_OFFSET_LSB	8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK	0xf00
+
 /* slot time long */
 #define WMI_VDEV_SLOT_TIME_LONG		0x1
 /* slot time short */
@@ -4305,6 +5196,14 @@
 	__le32 tim_num_ps_pending;
 } __packed;
 
+struct wmi_tim_info_arg {
+	__le32 tim_len;
+	__le32 tim_mcast;
+	const __le32 *tim_bitmap;
+	__le32 tim_changed;
+	__le32 tim_num_ps_pending;
+} __packed;
+
 /* Maximum number of NOA Descriptors supported */
 #define WMI_P2P_MAX_NOA_DESCRIPTORS 4
 #define WMI_P2P_OPPPS_ENABLE_BIT	BIT(0)
@@ -4336,6 +5235,47 @@
 	struct wmi_bcn_info bcn_info[0];
 } __packed;
 
+/* 16 words = 512 client + 1 word = for guard */
+#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17
+
+struct wmi_10_4_tim_info {
+	__le32 tim_len;
+	__le32 tim_mcast;
+	__le32 tim_bitmap[WMI_10_4_TIM_BITMAP_ARRAY_SIZE];
+	__le32 tim_changed;
+	__le32 tim_num_ps_pending;
+} __packed;
+
+#define WMI_10_4_P2P_MAX_NOA_DESCRIPTORS 1
+
+struct wmi_10_4_p2p_noa_info {
+	/* Bit 0 - Flag to indicate an update in NOA schedule
+	 * Bits 7-1 - Reserved
+	 */
+	u8 changed;
+	/* NOA index */
+	u8 index;
+	/* Bit 0 - Opp PS state of the AP
+	 * Bits 1-7 - Ctwindow in TUs
+	 */
+	u8 ctwindow_oppps;
+	/* Number of NOA descriptors */
+	u8 num_descriptors;
+
+	struct wmi_p2p_noa_descriptor
+		noa_descriptors[WMI_10_4_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_10_4_bcn_info {
+	struct wmi_10_4_tim_info tim_info;
+	struct wmi_10_4_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_10_4_host_swba_event {
+	__le32 vdev_map;
+	struct wmi_10_4_bcn_info bcn_info[0];
+} __packed;
+
 #define WMI_MAX_AP_VDEV 16
 
 struct wmi_tbtt_offset_event {
@@ -4660,11 +5600,24 @@
 	__le32 cycle_count;
 } __packed;
 
+struct wmi_10_4_chan_info_event {
+	__le32 err_code;
+	__le32 freq;
+	__le32 cmd_flags;
+	__le32 noise_floor;
+	__le32 rx_clear_count;
+	__le32 cycle_count;
+	__le32 chan_tx_pwr_range;
+	__le32 chan_tx_pwr_tp;
+	__le32 rx_frame_count;
+} __packed;
+
 struct wmi_peer_sta_kickout_event {
 	struct wmi_mac_addr peer_macaddr;
 } __packed;
 
 #define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
+#define WMI_CHAN_INFO_FLAG_PRE_COMPLETE BIT(1)
 
 /* Beacon filter wmi command info */
 #define BCN_FLT_MAX_SUPPORTED_IES	256
@@ -4840,6 +5793,9 @@
 	__le32 noise_floor;
 	__le32 rx_clear_count;
 	__le32 cycle_count;
+	__le32 chan_tx_pwr_range;
+	__le32 chan_tx_pwr_tp;
+	__le32 rx_frame_count;
 };
 
 struct wmi_vdev_start_ev_arg {
@@ -4855,16 +5811,29 @@
 
 struct wmi_swba_ev_arg {
 	__le32 vdev_map;
-	const struct wmi_tim_info *tim_info[WMI_MAX_AP_VDEV];
+	struct wmi_tim_info_arg tim_info[WMI_MAX_AP_VDEV];
 	const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
 };
 
 struct wmi_phyerr_ev_arg {
-	__le32 num_phyerrs;
-	__le32 tsf_l32;
-	__le32 tsf_u32;
-	__le32 buf_len;
-	const struct wmi_phyerr *phyerrs;
+	u32 tsf_timestamp;
+	u16 freq1;
+	u16 freq2;
+	u8 rssi_combined;
+	u8 chan_width_mhz;
+	u8 phy_err_code;
+	u16 nf_chains[4];
+	u32 buf_len;
+	const u8 *buf;
+	u8 hdr_len;
+};
+
+struct wmi_phyerr_hdr_arg {
+	u32 num_phyerrs;
+	u32 tsf_l32;
+	u32 tsf_u32;
+	u32 buf_len;
+	const void *phyerrs;
 };
 
 struct wmi_svc_rdy_ev_arg {
@@ -5085,6 +6054,12 @@
 	u32 pref_offchan_bw;
 };
 
+enum wmi_txbf_conf {
+	WMI_TXBF_CONF_UNSUPPORTED,
+	WMI_TXBF_CONF_BEFORE_ASSOC,
+	WMI_TXBF_CONF_AFTER_ASSOC,
+};
+
 struct ath10k;
 struct ath10k_vif;
 struct ath10k_fw_stats_pdev;
@@ -5136,9 +6111,9 @@
 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_dfs(struct ath10k *ar,
-			  const struct wmi_phyerr *phyerr, u64 tsf);
+			  struct wmi_phyerr_ev_arg *phyerr, u64 tsf);
 void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
-				    const struct wmi_phyerr *phyerr,
+				    struct wmi_phyerr_ev_arg *phyerr,
 				    u64 tsf);
 void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb);
@@ -5167,5 +6142,6 @@
 void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
-
+int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf,
+				 int left_len, struct wmi_phyerr_ev_arg *arg);
 #endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index a68d8fd..8e02b38 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -301,8 +301,26 @@
 		ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
 
 exit:
+	if (ret) {
+		switch (ar->state) {
+		case ATH10K_STATE_ON:
+			ar->state = ATH10K_STATE_RESTARTING;
+			ret = 1;
+			break;
+		case ATH10K_STATE_OFF:
+		case ATH10K_STATE_RESTARTING:
+		case ATH10K_STATE_RESTARTED:
+		case ATH10K_STATE_UTF:
+		case ATH10K_STATE_WEDGED:
+			ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n",
+				    ar->state);
+			ret = -EIO;
+			break;
+		}
+	}
+
 	mutex_unlock(&ar->conf_mutex);
-	return ret ? 1 : 0;
+	return ret;
 }
 
 int ath10k_wow_init(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 2399a39..b1278f9 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -5,7 +5,6 @@
 	select MAC80211_LEDS
 	select LEDS_CLASS
 	select NEW_LEDS
-	select AVERAGE
 	select ATH5K_AHB if ATH25
 	select ATH5K_PCI if !ATH25
 	---help---
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 5c00875..38be270 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -223,7 +223,7 @@
 ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
 			 bool ofdm_trigger)
 {
-	int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+	int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
 
 	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)",
 		ofdm_trigger ? "ODFM" : "CCK");
@@ -309,7 +309,7 @@
 static void
 ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
 {
-	int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
+	int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
 
 	ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity");
 
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index e22b0e7..fa6e89e 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1252,6 +1252,8 @@
 #define ATH5K_TXQ_LEN_MAX	(ATH_TXBUF / 4)		/* bufs per queue */
 #define ATH5K_TXQ_LEN_LOW	(ATH5K_TXQ_LEN_MAX / 2)	/* low mark */
 
+DECLARE_EWMA(beacon_rssi, 1024, 8)
+
 /* Driver state associated with an instance of a device */
 struct ath5k_hw {
 	struct ath_common       common;
@@ -1432,7 +1434,7 @@
 	struct ath5k_nfcal_hist ah_nfcal_hist;
 
 	/* average beacon RSSI in our BSS (used by ANI) */
-	struct ewma		ah_beacon_rssi_avg;
+	struct ewma_beacon_rssi	ah_beacon_rssi_avg;
 
 	/* noise floor from last periodic calibration */
 	s32			ah_noise_floor;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 23552f4..342563a 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1430,7 +1430,7 @@
 	trace_ath5k_rx(ah, skb);
 
 	if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
-		ewma_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
+		ewma_beacon_rssi_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
 
 		/* check beacons in IBSS mode */
 		if (ah->opmode == NL80211_IFTYPE_ADHOC)
@@ -2936,7 +2936,7 @@
 	ah->ah_cal_next_short = jiffies +
 		msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
 
-	ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
+	ewma_beacon_rssi_init(&ah->ah_beacon_rssi_avg);
 
 	/* clear survey data and cycle counters */
 	memset(&ah->survey, 0, sizeof(ah->survey));
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index c70782e..654a1e3 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -722,7 +722,7 @@
 			st->mib_intr);
 	len += snprintf(buf + len, sizeof(buf) - len,
 			"beacon RSSI average:\t%d\n",
-			(int)ewma_read(&ah->ah_beacon_rssi_avg));
+			(int)ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg));
 
 #define CC_PRINT(_struct, _field) \
 	_struct._field, \
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 14cab14..112d8a9 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -427,7 +427,7 @@
 };
 
 /*
- * credit distibution code that is passed into the distrbution function,
+ * credit distribution code that is passed into the distribution function,
  * there are mandatory and optional codes that must be handled
  */
 enum htc_credit_dist_reason {
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index b921005..a5e1de7 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -154,7 +154,7 @@
 }
 
 /*  Performs DIX to 802.3 encapsulation for transmit packets.
- *  Assumes the entire DIX header is contigous and that there is
+ *  Assumes the entire DIX header is contiguous and that there is
  *  enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
  */
 int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb)
@@ -449,7 +449,7 @@
 
 /*
  * Performs 802.3 to DIX encapsulation for received packets.
- * Assumes the entire 802.3 header is contigous.
+ * Assumes the entire 802.3 header is contiguous.
  */
 int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb)
 {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index fc595b9..c5f8bc4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -455,7 +455,7 @@
 #define AR_PHY_MODE              (AR_SM_BASE + 0x8)
 #define AR_PHY_ACTIVE            (AR_SM_BASE + 0xc)
 #define AR_PHY_SPUR_MASK_A       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20))
-#define AR_PHY_SPUR_MASK_B       (AR_SM_BASE + 0x24)
+#define AR_PHY_SPUR_MASK_B       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x1c : 0x24))
 #define AR_PHY_SPECTRAL_SCAN     (AR_SM_BASE + 0x28)
 #define AR_PHY_RADAR_BW_FILTER   (AR_SM_BASE + 0x2c)
 #define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
@@ -495,7 +495,7 @@
 #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A                       0x3FF
 #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S                     0
 
-#define AR_PHY_TEST              (AR_SM_BASE + 0x160)
+#define AR_PHY_TEST              (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x15c : 0x160))
 
 #define AR_PHY_TEST_BBB_OBS_SEL       0x780000
 #define AR_PHY_TEST_BBB_OBS_SEL_S     19
@@ -521,24 +521,29 @@
 #define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S	  29
 
 
-#define AR_PHY_TSTDAC            (AR_SM_BASE + 0x168)
+#define AR_PHY_TSTDAC            (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x164 : 0x168))
 
-#define AR_PHY_CHAN_STATUS       (AR_SM_BASE + 0x16c)
+#define AR_PHY_CHAN_STATUS       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x168 : 0x16c))
 
 #define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170))
 #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ	0x00000008
 #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S	3
 
-#define AR_PHY_CHNINFO_NOISEPWR  (AR_SM_BASE + 0x174)
-#define AR_PHY_CHNINFO_GAINDIFF  (AR_SM_BASE + 0x178)
-#define AR_PHY_CHNINFO_FINETIM   (AR_SM_BASE + 0x17c)
-#define AR_PHY_CHAN_INFO_GAIN_0  (AR_SM_BASE + 0x180)
-#define AR_PHY_SCRAMBLER_SEED    (AR_SM_BASE + 0x190)
-#define AR_PHY_CCK_TX_CTRL       (AR_SM_BASE + 0x194)
+#define AR_PHY_CHNINFO_NOISEPWR  (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x170 : 0x174))
+#define AR_PHY_CHNINFO_GAINDIFF  (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x174 : 0x178))
+#define AR_PHY_CHNINFO_FINETIM   (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x178 : 0x17c))
+#define AR_PHY_CHAN_INFO_GAIN_0  (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x17c : 0x180))
+#define AR_PHY_SCRAMBLER_SEED    (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x184 : 0x190))
+#define AR_PHY_CCK_TX_CTRL       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x188 : 0x194))
 
 #define AR_PHY_HEAVYCLIP_CTL     (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4))
 #define AR_PHY_HEAVYCLIP_20      (AR_SM_BASE + 0x1a8)
 #define AR_PHY_HEAVYCLIP_40      (AR_SM_BASE + 0x1ac)
+#define AR_PHY_HEAVYCLIP_1	 (AR_SM_BASE + 0x19c)
+#define AR_PHY_HEAVYCLIP_2	 (AR_SM_BASE + 0x1a0)
+#define AR_PHY_HEAVYCLIP_3	 (AR_SM_BASE + 0x1a4)
+#define AR_PHY_HEAVYCLIP_4	 (AR_SM_BASE + 0x1a8)
+#define AR_PHY_HEAVYCLIP_5	 (AR_SM_BASE + 0x1ac)
 #define AR_PHY_ILLEGAL_TXRATE    (AR_SM_BASE + 0x1b0)
 
 #define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index a7a81b3..c85c479 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -172,14 +172,6 @@
 	struct sk_buff_head complete_q;
 };
 
-struct ath_atx_ac {
-	struct ath_txq *txq;
-	struct list_head list;
-	struct list_head tid_q;
-	bool clear_ps_filter;
-	bool sched;
-};
-
 struct ath_frame_info {
 	struct ath_buf *bf;
 	u16 framelen;
@@ -242,7 +234,7 @@
 	struct sk_buff_head buf_q;
 	struct sk_buff_head retry_q;
 	struct ath_node *an;
-	struct ath_atx_ac *ac;
+	struct ath_txq *txq;
 	unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
 	u16 seq_start;
 	u16 seq_next;
@@ -252,8 +244,8 @@
 	int baw_tail;   /* next unused tx buffer slot */
 
 	s8 bar_index;
-	bool sched;
 	bool active;
+	bool clear_ps_filter;
 };
 
 struct ath_node {
@@ -261,7 +253,6 @@
 	struct ieee80211_sta *sta; /* station struct we're part of */
 	struct ieee80211_vif *vif; /* interface with which we're associated */
 	struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
-	struct ath_atx_ac ac[IEEE80211_NUM_ACS];
 
 	u16 maxampdu;
 	u8 mpdudensity;
@@ -410,6 +401,12 @@
 	ATH_OFFCHANNEL_ROC_DONE,
 };
 
+enum ath_roc_complete_reason {
+	ATH_ROC_COMPLETE_EXPIRE,
+	ATH_ROC_COMPLETE_ABORT,
+	ATH_ROC_COMPLETE_CANCEL,
+};
+
 struct ath_offchannel {
 	struct ath_chanctx chan;
 	struct timer_list timer;
@@ -471,7 +468,8 @@
 void ath_chanctx_set_next(struct ath_softc *sc, bool force);
 void ath_offchannel_next(struct ath_softc *sc);
 void ath_scan_complete(struct ath_softc *sc, bool abort);
-void ath_roc_complete(struct ath_softc *sc, bool abort);
+void ath_roc_complete(struct ath_softc *sc,
+		      enum ath_roc_complete_reason reason);
 struct ath_chanctx* ath_is_go_chanctx_present(struct ath_softc *sc);
 
 #else
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 2066650..90f5773 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -915,18 +915,27 @@
 	}
 }
 
-void ath_roc_complete(struct ath_softc *sc, bool abort)
+void ath_roc_complete(struct ath_softc *sc, enum ath_roc_complete_reason reason)
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
-	if (abort)
-		ath_dbg(common, CHAN_CTX, "RoC aborted\n");
-	else
-		ath_dbg(common, CHAN_CTX, "RoC expired\n");
-
 	sc->offchannel.roc_vif = NULL;
 	sc->offchannel.roc_chan = NULL;
-	ieee80211_remain_on_channel_expired(sc->hw);
+
+	switch (reason) {
+	case ATH_ROC_COMPLETE_ABORT:
+		ath_dbg(common, CHAN_CTX, "RoC aborted\n");
+		ieee80211_remain_on_channel_expired(sc->hw);
+		break;
+	case ATH_ROC_COMPLETE_EXPIRE:
+		ath_dbg(common, CHAN_CTX, "RoC expired\n");
+		ieee80211_remain_on_channel_expired(sc->hw);
+		break;
+	case ATH_ROC_COMPLETE_CANCEL:
+		ath_dbg(common, CHAN_CTX, "RoC canceled\n");
+		break;
+	}
+
 	ath_offchannel_next(sc);
 	ath9k_ps_restore(sc);
 }
@@ -1058,7 +1067,7 @@
 	case ATH_OFFCHANNEL_ROC_START:
 	case ATH_OFFCHANNEL_ROC_WAIT:
 		sc->offchannel.state = ATH_OFFCHANNEL_ROC_DONE;
-		ath_roc_complete(sc, false);
+		ath_roc_complete(sc, ATH_ROC_COMPLETE_EXPIRE);
 		break;
 	default:
 		break;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index dbf8f49..da32c8f 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -765,6 +765,8 @@
 		[RESET_TYPE_BEACON_STUCK] = "Stuck Beacon",
 		[RESET_TYPE_MCI] = "MCI Reset",
 		[RESET_TYPE_CALIBRATION] = "Calibration error",
+		[RESET_TX_DMA_ERROR] = "Tx DMA stop error",
+		[RESET_RX_DMA_ERROR] = "Rx DMA stop error",
 	};
 	int i;
 
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index a8e9319..cd68c5f 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -50,6 +50,8 @@
 	RESET_TYPE_BEACON_STUCK,
 	RESET_TYPE_MCI,
 	RESET_TYPE_CALIBRATION,
+	RESET_TX_DMA_ERROR,
+	RESET_RX_DMA_ERROR,
 	__RESET_TYPE_MAX
 };
 
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index ffca918..c2ca57a 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -26,12 +26,11 @@
 	struct ath_node *an = file->private_data;
 	struct ath_softc *sc = an->sc;
 	struct ath_atx_tid *tid;
-	struct ath_atx_ac *ac;
 	struct ath_txq *txq;
 	u32 len = 0, size = 4096;
 	char *buf;
 	size_t retval;
-	int tidno, acno;
+	int tidno;
 
 	buf = kzalloc(size, GFP_KERNEL);
 	if (buf == NULL)
@@ -49,26 +48,13 @@
 			 an->mpdudensity);
 
 	len += scnprintf(buf + len, size - len,
-			 "%2s%7s\n", "AC", "SCHED");
-
-	for (acno = 0, ac = &an->ac[acno];
-	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
-		txq = ac->txq;
-		ath_txq_lock(sc, txq);
-		len += scnprintf(buf + len, size - len,
-				 "%2d%7d\n",
-				 acno, ac->sched);
-		ath_txq_unlock(sc, txq);
-	}
-
-	len += scnprintf(buf + len, size - len,
 			 "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
 			 "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
 			 "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
 
 	for (tidno = 0, tid = &an->tid[tidno];
 	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
-		txq = tid->ac->txq;
+		txq = tid->txq;
 		ath_txq_lock(sc, txq);
 		if (tid->active) {
 			len += scnprintf(buf + len, size - len,
@@ -80,7 +66,7 @@
 					 tid->baw_head,
 					 tid->baw_tail,
 					 tid->bar_index,
-					 tid->sched);
+					 !list_empty(&tid->list));
 		}
 		ath_txq_unlock(sc, txq);
 	}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index e98a9ea..1ece42c 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -30,6 +30,157 @@
 	u8 pulse_length_pri;
 };
 
+/**** begin: CHIRP ************************************************************/
+
+/* min and max gradients for defined FCC chirping pulses, given by
+ * - 20MHz chirp width over a pulse width of  50us
+ * -  5MHz chirp width over a pulse width of 100us
+ */
+static const int BIN_DELTA_MIN		= 1;
+static const int BIN_DELTA_MAX		= 10;
+
+/* we need at least 3 deltas / 4 samples for a reliable chirp detection */
+#define NUM_DIFFS 3
+static const int FFT_NUM_SAMPLES	= (NUM_DIFFS + 1);
+
+/* Threshold for difference of delta peaks */
+static const int MAX_DIFF		= 2;
+
+/* width range to be checked for chirping */
+static const int MIN_CHIRP_PULSE_WIDTH	= 20;
+static const int MAX_CHIRP_PULSE_WIDTH	= 110;
+
+struct ath9k_dfs_fft_20 {
+	u8 bin[28];
+	u8 lower_bins[3];
+} __packed;
+struct ath9k_dfs_fft_40 {
+	u8 bin[64];
+	u8 lower_bins[3];
+	u8 upper_bins[3];
+} __packed;
+
+static inline int fft_max_index(u8 *bins)
+{
+	return (bins[2] & 0xfc) >> 2;
+}
+static inline int fft_max_magnitude(u8 *bins)
+{
+	return (bins[0] & 0xc0) >> 6 | bins[1] << 2 | (bins[2] & 0x03) << 10;
+}
+static inline u8 fft_bitmap_weight(u8 *bins)
+{
+	return bins[0] & 0x3f;
+}
+
+static int ath9k_get_max_index_ht40(struct ath9k_dfs_fft_40 *fft,
+				    bool is_ctl, bool is_ext)
+{
+	const int DFS_UPPER_BIN_OFFSET = 64;
+	/* if detected radar on both channels, select the significant one */
+	if (is_ctl && is_ext) {
+		/* first check wether channels have 'strong' bins */
+		is_ctl = fft_bitmap_weight(fft->lower_bins) != 0;
+		is_ext = fft_bitmap_weight(fft->upper_bins) != 0;
+
+		/* if still unclear, take higher magnitude */
+		if (is_ctl && is_ext) {
+			int mag_lower = fft_max_magnitude(fft->lower_bins);
+			int mag_upper = fft_max_magnitude(fft->upper_bins);
+			if (mag_upper > mag_lower)
+				is_ctl = false;
+			else
+				is_ext = false;
+		}
+	}
+	if (is_ctl)
+		return fft_max_index(fft->lower_bins);
+	return fft_max_index(fft->upper_bins) + DFS_UPPER_BIN_OFFSET;
+}
+static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
+				 int datalen, bool is_ctl, bool is_ext)
+{
+	int i;
+	int max_bin[FFT_NUM_SAMPLES];
+	struct ath_hw *ah = sc->sc_ah;
+	struct ath_common *common = ath9k_hw_common(ah);
+	int prev_delta;
+
+	if (IS_CHAN_HT40(ah->curchan)) {
+		struct ath9k_dfs_fft_40 *fft = (struct ath9k_dfs_fft_40 *) data;
+		int num_fft_packets = datalen / sizeof(*fft);
+		if (num_fft_packets == 0)
+			return false;
+
+		ath_dbg(common, DFS, "HT40: datalen=%d, num_fft_packets=%d\n",
+			datalen, num_fft_packets);
+		if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+			ath_dbg(common, DFS, "not enough packets for chirp\n");
+			return false;
+		}
+		/* HW sometimes adds 2 garbage bytes in front of FFT samples */
+		if ((datalen % sizeof(*fft)) == 2) {
+			fft = (struct ath9k_dfs_fft_40 *) (data + 2);
+			ath_dbg(common, DFS, "fixing datalen by 2\n");
+		}
+		if (IS_CHAN_HT40MINUS(ah->curchan)) {
+			int temp = is_ctl;
+			is_ctl = is_ext;
+			is_ext = temp;
+		}
+		for (i = 0; i < FFT_NUM_SAMPLES; i++)
+			max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl,
+							      is_ext);
+	} else {
+		struct ath9k_dfs_fft_20 *fft = (struct ath9k_dfs_fft_20 *) data;
+		int num_fft_packets = datalen / sizeof(*fft);
+		if (num_fft_packets == 0)
+			return false;
+		ath_dbg(common, DFS, "HT20: datalen=%d, num_fft_packets=%d\n",
+			datalen, num_fft_packets);
+		if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+			ath_dbg(common, DFS, "not enough packets for chirp\n");
+			return false;
+		}
+		/* in ht20, this is a 6-bit signed number => shift it to 0 */
+		for (i = 0; i < FFT_NUM_SAMPLES; i++)
+			max_bin[i] = fft_max_index(fft[i].lower_bins) ^ 0x20;
+	}
+	ath_dbg(common, DFS, "bin_max = [%d, %d, %d, %d]\n",
+		max_bin[0], max_bin[1], max_bin[2], max_bin[3]);
+
+	/* Check for chirp attributes within specs
+	 * a) delta of adjacent max_bins is within range
+	 * b) delta of adjacent deltas are within tolerance
+	 */
+	prev_delta = 0;
+	for (i = 0; i < NUM_DIFFS; i++) {
+		int ddelta = -1;
+		int delta = max_bin[i + 1] - max_bin[i];
+
+		/* ensure gradient is within valid range */
+		if (abs(delta) < BIN_DELTA_MIN || abs(delta) > BIN_DELTA_MAX) {
+			ath_dbg(common, DFS, "CHIRP: invalid delta %d "
+				"in sample %d\n", delta, i);
+			return false;
+		}
+		if (i == 0)
+			goto done;
+		ddelta = delta - prev_delta;
+		if (abs(ddelta) > MAX_DIFF) {
+			ath_dbg(common, DFS, "CHIRP: ddelta %d too high\n",
+				ddelta);
+			return false;
+		}
+done:
+		ath_dbg(common, DFS, "CHIRP - %d: delta=%d, ddelta=%d\n",
+			i, delta, ddelta);
+		prev_delta = delta;
+	}
+	return true;
+}
+/**** end: CHIRP **************************************************************/
+
 /* convert pulse duration to usecs, considering clock mode */
 static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
 {
@@ -113,12 +264,6 @@
 		return false;
 	}
 
-	/*
-	 * TODO: check chirping pulses
-	 *	 checks for chirping are dependent on the DFS regulatory domain
-	 *	 used, which is yet TBD
-	 */
-
 	/* convert duration to usecs */
 	pe->width = dur_to_usecs(sc->sc_ah, dur);
 	pe->rssi = rssi;
@@ -190,6 +335,16 @@
 	if (!ath9k_postprocess_radar_event(sc, &ard, &pe))
 		return;
 
+	if (pe.width > MIN_CHIRP_PULSE_WIDTH &&
+	    pe.width < MAX_CHIRP_PULSE_WIDTH) {
+		bool is_ctl = !!(ard.pulse_bw_info & PRI_CH_RADAR_FOUND);
+		bool is_ext = !!(ard.pulse_bw_info & EXT_CH_RADAR_FOUND);
+		int clen = datalen - 3;
+		pe.chirp = ath9k_check_chirping(sc, data, clen, is_ctl, is_ext);
+	} else {
+		pe.chirp = false;
+	}
+
 	ath_dbg(common, DFS,
 		"ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, "
 		"width=%d, rssi=%d, delta_ts=%llu\n",
@@ -198,7 +353,8 @@
 	sc->dfs_prev_pulse_ts = pe.ts;
 	if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND)
 		ath9k_dfs_process_radar_pulse(sc, &pe);
-	if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
+	if (IS_CHAN_HT40(ah->curchan) &&
+	    ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
 		pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20;
 		ath9k_dfs_process_radar_pulse(sc, &pe);
 	}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 39eaf9b..1e84882 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -74,7 +74,7 @@
 
 static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
 {
-	int time_left;
+	unsigned long time_left;
 
 	if (atomic_read(&priv->htc->tgt_ready) > 0) {
 		atomic_dec(&priv->htc->tgt_ready);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index dab1323..172a9ff4a 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -794,8 +794,11 @@
 		common->ani.longcal_timer = timestamp;
 	}
 
-	/* Short calibration applies only while caldone is false */
-	if (!common->ani.caldone) {
+	/*
+	 * Short calibration applies only while caldone
+	 * is false or -ETIMEDOUT
+	 */
+	if (common->ani.caldone <= 0) {
 		if ((timestamp - common->ani.shortcal_timer) >=
 		    short_cal_interval) {
 			shortcal = true;
@@ -844,7 +847,11 @@
 	*/
 	cal_interval = ATH_LONG_CALINTERVAL;
 	cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
-	if (!common->ani.caldone)
+	/*
+	 * Short calibration applies only while caldone
+	 * is false or -ETIMEDOUT
+	 */
+	if (common->ani.caldone <= 0)
 		cal_interval = min(cal_interval, (u32)short_cal_interval);
 
 	ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index d2408da..2294709 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -146,7 +146,8 @@
 {
 	struct sk_buff *skb;
 	struct htc_config_pipe_msg *cp_msg;
-	int ret, time_left;
+	int ret;
+	unsigned long time_left;
 
 	skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
 	if (!skb) {
@@ -184,7 +185,8 @@
 {
 	struct sk_buff *skb;
 	struct htc_comp_msg *comp_msg;
-	int ret = 0, time_left;
+	int ret = 0;
+	unsigned long time_left;
 
 	skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
 	if (!skb) {
@@ -236,7 +238,8 @@
 	struct sk_buff *skb;
 	struct htc_endpoint *endpoint;
 	struct htc_conn_svc_msg *conn_msg;
-	int ret, time_left;
+	int ret;
+	unsigned long time_left;
 
 	/* Find an available endpoint */
 	endpoint = get_next_avail_ep(target->endpoint);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index a31a680..1dd0339 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -3186,6 +3186,7 @@
 	{ AR_SREV_VERSION_9550,         "9550" },
 	{ AR_SREV_VERSION_9565,         "9565" },
 	{ AR_SREV_VERSION_9531,         "9531" },
+	{ AR_SREV_VERSION_9561,         "9561" },
 };
 
 /* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index eff0e53..57f95f2 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -736,13 +736,14 @@
 				 BIT(NL80211_IFTYPE_P2P_CLIENT) |
 				 BIT(NL80211_IFTYPE_P2P_GO) },
 	{ .max = 1,	.types = BIT(NL80211_IFTYPE_ADHOC) },
+	{ .max = 1,	.types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
 };
 
 static const struct ieee80211_iface_combination if_comb_multi[] = {
 	{
 		.limits = if_limits_multi,
 		.n_limits = ARRAY_SIZE(if_limits_multi),
-		.max_interfaces = 2,
+		.max_interfaces = 3,
 		.num_different_channels = 2,
 		.beacon_int_infra_match = true,
 	},
@@ -826,6 +827,7 @@
 	ieee80211_hw_set(hw, SIGNAL_DBM);
 	ieee80211_hw_set(hw, RX_INCLUDES_FCS);
 	ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
+	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
 
 	if (ath9k_ps_enable)
 		ieee80211_hw_set(hw, SUPPORTS_PS);
@@ -855,6 +857,10 @@
 			BIT(NL80211_IFTYPE_MESH_POINT) |
 			BIT(NL80211_IFTYPE_WDS);
 
+		if (ath9k_is_chanctx_enabled())
+			hw->wiphy->interface_modes |=
+					BIT(NL80211_IFTYPE_P2P_DEVICE);
+
 			hw->wiphy->iface_combinations = if_comb;
 			hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
 	}
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index 90631d7..5ad0fee 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -172,7 +172,7 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 	struct ath_tx_control txctl;
-	int time_left;
+	unsigned long time_left;
 
 	memset(&txctl, 0, sizeof(txctl));
 	txctl.txq = sc->tx.txq_map[IEEE80211_AC_BE];
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index cfd45cb..c27143b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1459,13 +1459,18 @@
 				   u64 multicast)
 {
 	struct ath_softc *sc = hw->priv;
+	struct ath_chanctx *ctx;
 	u32 rfilt;
 
 	changed_flags &= SUPPORTED_FILTERS;
 	*total_flags &= SUPPORTED_FILTERS;
 
 	spin_lock_bh(&sc->chan_lock);
-	sc->cur_chan->rxfilter = *total_flags;
+	ath_for_each_chanctx(sc, ctx)
+		ctx->rxfilter = *total_flags;
+#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
+	sc->offchannel.chan.rxfilter = *total_flags;
+#endif
 	spin_unlock_bh(&sc->chan_lock);
 
 	ath9k_ps_wakeup(sc);
@@ -2246,7 +2251,7 @@
 
 		del_timer_sync(&sc->offchannel.timer);
 		if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
-			ath_roc_complete(sc, true);
+			ath_roc_complete(sc, ATH_ROC_COMPLETE_ABORT);
 	}
 
 	if (test_bit(ATH_OP_SCANNING, &common->op_flags)) {
@@ -2355,7 +2360,7 @@
 
 	if (sc->offchannel.roc_vif) {
 		if (sc->offchannel.state >= ATH_OFFCHANNEL_ROC_START)
-			ath_roc_complete(sc, true);
+			ath_roc_complete(sc, ATH_ROC_COMPLETE_CANCEL);
 	}
 
 	mutex_unlock(&sc->mutex);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 6c75fb1..d3189da 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -491,10 +491,9 @@
 
 	if (!(ah->ah_flags & AH_UNPLUGGED) &&
 	    unlikely(!stopped)) {
-		ath_err(ath9k_hw_common(sc->sc_ah),
-			"Could not stop RX, we could be "
-			"confusing the DMA engine when we start RX up\n");
-		ATH_DBG_WARN_ON_ONCE(!stopped);
+		ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+			"Failed to stop Rx DMA\n");
+		RESET_STAT_INC(sc, RESET_RX_DMA_ERROR);
 	}
 	return stopped && !reset;
 }
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index ca533b4..9c16e2a 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -299,7 +299,8 @@
 		       sizeof(struct wmi_cmd_hdr);
 	struct sk_buff *skb;
 	u8 *data;
-	int time_left, ret = 0;
+	unsigned long time_left;
+	int ret = 0;
 
 	if (ah->ah_flags & AH_UNPLUGGED)
 		return 0;
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 3ad79bb..3e3dac3 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -106,7 +106,6 @@
 static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
 			     struct ath_atx_tid *tid)
 {
-	struct ath_atx_ac *ac = tid->ac;
 	struct list_head *list;
 	struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
 	struct ath_chanctx *ctx = avp->chanctx;
@@ -114,19 +113,9 @@
 	if (!ctx)
 		return;
 
-	if (tid->sched)
-		return;
-
-	tid->sched = true;
-	list_add_tail(&tid->list, &ac->tid_q);
-
-	if (ac->sched)
-		return;
-
-	ac->sched = true;
-
 	list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
-	list_add_tail(&ac->list, list);
+	if (list_empty(&tid->list))
+		list_add_tail(&tid->list, list);
 }
 
 static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@@ -208,7 +197,7 @@
 static void
 ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-	struct ath_txq *txq = tid->ac->txq;
+	struct ath_txq *txq = tid->txq;
 	struct ieee80211_tx_info *tx_info;
 	struct sk_buff *skb, *tskb;
 	struct ath_buf *bf;
@@ -237,7 +226,7 @@
 
 static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-	struct ath_txq *txq = tid->ac->txq;
+	struct ath_txq *txq = tid->txq;
 	struct sk_buff *skb;
 	struct ath_buf *bf;
 	struct list_head bf_head;
@@ -644,7 +633,7 @@
 			ath_tx_queue_tid(sc, txq, tid);
 
 			if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
-				tid->ac->clear_ps_filter = true;
+				tid->clear_ps_filter = true;
 		}
 	}
 
@@ -734,7 +723,7 @@
 	struct ieee80211_tx_rate *rates;
 	u32 max_4ms_framelen, frmlen;
 	u16 aggr_limit, bt_aggr_limit, legacy = 0;
-	int q = tid->ac->txq->mac80211_qnum;
+	int q = tid->txq->mac80211_qnum;
 	int i;
 
 	skb = bf->bf_mpdu;
@@ -1471,8 +1460,8 @@
 	if (list_empty(&bf_q))
 		return false;
 
-	if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
-		tid->ac->clear_ps_filter = false;
+	if (tid->clear_ps_filter || tid->an->no_ps_filter) {
+		tid->clear_ps_filter = false;
 		tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
 	}
 
@@ -1491,7 +1480,7 @@
 
 	an = (struct ath_node *)sta->drv_priv;
 	txtid = ATH_AN_2_TID(an, tid);
-	txq = txtid->ac->txq;
+	txq = txtid->txq;
 
 	ath_txq_lock(sc, txq);
 
@@ -1525,7 +1514,7 @@
 {
 	struct ath_node *an = (struct ath_node *)sta->drv_priv;
 	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
-	struct ath_txq *txq = txtid->ac->txq;
+	struct ath_txq *txq = txtid->txq;
 
 	ath_txq_lock(sc, txq);
 	txtid->active = false;
@@ -1538,7 +1527,6 @@
 		       struct ath_node *an)
 {
 	struct ath_atx_tid *tid;
-	struct ath_atx_ac *ac;
 	struct ath_txq *txq;
 	bool buffered;
 	int tidno;
@@ -1546,25 +1534,18 @@
 	for (tidno = 0, tid = &an->tid[tidno];
 	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 
-		ac = tid->ac;
-		txq = ac->txq;
+		txq = tid->txq;
 
 		ath_txq_lock(sc, txq);
 
-		if (!tid->sched) {
+		if (list_empty(&tid->list)) {
 			ath_txq_unlock(sc, txq);
 			continue;
 		}
 
 		buffered = ath_tid_has_buffered(tid);
 
-		tid->sched = false;
-		list_del(&tid->list);
-
-		if (ac->sched) {
-			ac->sched = false;
-			list_del(&ac->list);
-		}
+		list_del_init(&tid->list);
 
 		ath_txq_unlock(sc, txq);
 
@@ -1575,18 +1556,16 @@
 void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
 {
 	struct ath_atx_tid *tid;
-	struct ath_atx_ac *ac;
 	struct ath_txq *txq;
 	int tidno;
 
 	for (tidno = 0, tid = &an->tid[tidno];
 	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 
-		ac = tid->ac;
-		txq = ac->txq;
+		txq = tid->txq;
 
 		ath_txq_lock(sc, txq);
-		ac->clear_ps_filter = true;
+		tid->clear_ps_filter = true;
 
 		if (ath_tid_has_buffered(tid)) {
 			ath_tx_queue_tid(sc, txq, tid);
@@ -1606,7 +1585,7 @@
 
 	an = (struct ath_node *)sta->drv_priv;
 	tid = ATH_AN_2_TID(an, tidno);
-	txq = tid->ac->txq;
+	txq = tid->txq;
 
 	ath_txq_lock(sc, txq);
 
@@ -1645,7 +1624,7 @@
 
 		tid = ATH_AN_2_TID(an, i);
 
-		ath_txq_lock(sc, tid->ac->txq);
+		ath_txq_lock(sc, tid->txq);
 		while (nframes > 0) {
 			bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
 			if (!bf)
@@ -1669,7 +1648,7 @@
 			if (an->sta && !ath_tid_has_buffered(tid))
 				ieee80211_sta_set_buffered(an->sta, i, false);
 		}
-		ath_txq_unlock_complete(sc, tid->ac->txq);
+		ath_txq_unlock_complete(sc, tid->txq);
 	}
 
 	if (list_empty(&bf_q))
@@ -1883,8 +1862,11 @@
 			npend |= BIT(i);
 	}
 
-	if (npend)
-		ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
+	if (npend) {
+		RESET_STAT_INC(sc, RESET_TX_DMA_ERROR);
+		ath_dbg(common, RESET,
+			"Failed to stop TX DMA, queues=0x%03x!\n", npend);
+	}
 
 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
 		if (!ATH_TXQ_SETUP(sc, i))
@@ -1915,9 +1897,8 @@
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-	struct ath_atx_ac *ac, *last_ac;
 	struct ath_atx_tid *tid, *last_tid;
-	struct list_head *ac_list;
+	struct list_head *tid_list;
 	bool sent = false;
 
 	if (txq->mac80211_qnum < 0)
@@ -1927,63 +1908,45 @@
 		return;
 
 	spin_lock_bh(&sc->chan_lock);
-	ac_list = &sc->cur_chan->acq[txq->mac80211_qnum];
+	tid_list = &sc->cur_chan->acq[txq->mac80211_qnum];
 
-	if (list_empty(ac_list)) {
+	if (list_empty(tid_list)) {
 		spin_unlock_bh(&sc->chan_lock);
 		return;
 	}
 
 	rcu_read_lock();
 
-	last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list);
-	while (!list_empty(ac_list)) {
+	last_tid = list_entry(tid_list->prev, struct ath_atx_tid, list);
+	while (!list_empty(tid_list)) {
 		bool stop = false;
 
 		if (sc->cur_chan->stopped)
 			break;
 
-		ac = list_first_entry(ac_list, struct ath_atx_ac, list);
-		last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
-		list_del(&ac->list);
-		ac->sched = false;
+		tid = list_first_entry(tid_list, struct ath_atx_tid, list);
+		list_del_init(&tid->list);
 
-		while (!list_empty(&ac->tid_q)) {
+		if (ath_tx_sched_aggr(sc, txq, tid, &stop))
+			sent = true;
 
-			tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
-					       list);
-			list_del(&tid->list);
-			tid->sched = false;
-
-			if (ath_tx_sched_aggr(sc, txq, tid, &stop))
-				sent = true;
-
-			/*
-			 * add tid to round-robin queue if more frames
-			 * are pending for the tid
-			 */
-			if (ath_tid_has_buffered(tid))
-				ath_tx_queue_tid(sc, txq, tid);
-
-			if (stop || tid == last_tid)
-				break;
-		}
-
-		if (!list_empty(&ac->tid_q) && !ac->sched) {
-			ac->sched = true;
-			list_add_tail(&ac->list, ac_list);
-		}
+		/*
+		 * add tid to round-robin queue if more frames
+		 * are pending for the tid
+		 */
+		if (ath_tid_has_buffered(tid))
+			ath_tx_queue_tid(sc, txq, tid);
 
 		if (stop)
 			break;
 
-		if (ac == last_ac) {
+		if (tid == last_tid) {
 			if (!sent)
 				break;
 
 			sent = false;
-			last_ac = list_entry(ac_list->prev,
-					     struct ath_atx_ac, list);
+			last_tid = list_entry(tid_list->prev,
+					      struct ath_atx_tid, list);
 		}
 	}
 
@@ -2373,10 +2336,10 @@
 		txq = sc->tx.uapsdq;
 		ath_txq_lock(sc, txq);
 	} else if (txctl->an && queue) {
-		WARN_ON(tid->ac->txq != txctl->txq);
+		WARN_ON(tid->txq != txctl->txq);
 
 		if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
-			tid->ac->clear_ps_filter = true;
+			tid->clear_ps_filter = true;
 
 		/*
 		 * Add this frame to software queue for scheduling later
@@ -2470,8 +2433,8 @@
 	bf = list_first_entry(&bf_q, struct ath_buf, list);
 	hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
 
-	if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
-		hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
+	if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) {
+		hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
 		dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
 			sizeof(*hdr), DMA_TO_DEVICE);
 	}
@@ -2870,7 +2833,6 @@
 void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
 {
 	struct ath_atx_tid *tid;
-	struct ath_atx_ac *ac;
 	int tidno, acno;
 
 	for (tidno = 0, tid = &an->tid[tidno];
@@ -2881,26 +2843,18 @@
 		tid->seq_start = tid->seq_next = 0;
 		tid->baw_size  = WME_MAX_BA;
 		tid->baw_head  = tid->baw_tail = 0;
-		tid->sched     = false;
 		tid->active	   = false;
+		tid->clear_ps_filter = true;
 		__skb_queue_head_init(&tid->buf_q);
 		__skb_queue_head_init(&tid->retry_q);
+		INIT_LIST_HEAD(&tid->list);
 		acno = TID_TO_WME_AC(tidno);
-		tid->ac = &an->ac[acno];
-	}
-
-	for (acno = 0, ac = &an->ac[acno];
-	     acno < IEEE80211_NUM_ACS; acno++, ac++) {
-		ac->sched    = false;
-		ac->clear_ps_filter = true;
-		ac->txq = sc->tx.txq_map[acno];
-		INIT_LIST_HEAD(&ac->tid_q);
+		tid->txq = sc->tx.txq_map[acno];
 	}
 }
 
 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
 {
-	struct ath_atx_ac *ac;
 	struct ath_atx_tid *tid;
 	struct ath_txq *txq;
 	int tidno;
@@ -2908,20 +2862,12 @@
 	for (tidno = 0, tid = &an->tid[tidno];
 	     tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
 
-		ac = tid->ac;
-		txq = ac->txq;
+		txq = tid->txq;
 
 		ath_txq_lock(sc, txq);
 
-		if (tid->sched) {
-			list_del(&tid->list);
-			tid->sched = false;
-		}
-
-		if (ac->sched) {
-			list_del(&ac->list);
-			tid->ac->sched = false;
-		}
+		if (!list_empty(&tid->list))
+			list_del_init(&tid->list);
 
 		ath_tid_drain(sc, txq, tid);
 		tid->active = false;
diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c
index 508eccf..d59d83e 100644
--- a/drivers/net/wireless/ath/debug.c
+++ b/drivers/net/wireless/ath/debug.c
@@ -40,6 +40,8 @@
 		return "P2P-CLIENT";
 	case NL80211_IFTYPE_P2P_GO:
 		return "P2P-GO";
+	case NL80211_IFTYPE_OCB:
+		return "OCB";
 	default:
 		return "UNKNOWN";
 	}
diff --git a/drivers/net/wireless/ath/dfs_pri_detector.c b/drivers/net/wireless/ath/dfs_pri_detector.c
index 1b5ad19..cc5c592 100644
--- a/drivers/net/wireless/ath/dfs_pri_detector.c
+++ b/drivers/net/wireless/ath/dfs_pri_detector.c
@@ -273,7 +273,7 @@
 				tmp_false_count++;
 			}
 		}
-		if (ps.count < min_count)
+		if (ps.count <= min_count)
 			/* did not reach minimum count, drop sequence */
 			continue;
 
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 050506f..64b4326 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -12,6 +12,7 @@
 wil6210-y += rx_reorder.o
 wil6210-y += ioctl.o
 wil6210-y += fw.o
+wil6210-y += pm.o
 wil6210-y += pmc.o
 wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
 wil6210-y += wil_platform.o
diff --git a/drivers/net/wireless/ath/wil6210/boot_loader.h b/drivers/net/wireless/ath/wil6210/boot_loader.h
new file mode 100644
index 0000000..c131b5e
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/boot_loader.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file contains the definitions for the boot loader
+ * for the Qualcomm "Sparrow" 60 Gigabit wireless solution.
+ */
+#ifndef BOOT_LOADER_EXPORT_H_
+#define BOOT_LOADER_EXPORT_H_
+
+struct bl_dedicated_registers_v1 {
+	__le32	boot_loader_ready;		/* 0x880A3C driver will poll
+						 * this Dword until BL will
+						 * set it to 1 (initial value
+						 * should be 0)
+						 */
+	__le32	boot_loader_struct_version;	/* 0x880A40 BL struct ver. */
+	__le16	rf_type;			/* 0x880A44 connected RF ID */
+	__le16	rf_status;			/* 0x880A46 RF status,
+						 * 0 is OK else error
+						 */
+	__le32	baseband_type;			/* 0x880A48 board type ID */
+	u8	mac_address[6];			/* 0x880A4c BL mac address */
+	u8	bl_version_major;		/* 0x880A52 BL ver. major */
+	u8	bl_version_minor;		/* 0x880A53 BL ver. minor */
+	__le16	bl_version_subminor;		/* 0x880A54 BL ver. subminor */
+	__le16	bl_version_build;		/* 0x880A56 BL ver. build */
+	/* valid only for version 2 and above */
+	__le32  bl_assert_code;         /* 0x880A58 BL Assert code */
+	__le32  bl_assert_blink;        /* 0x880A5C BL Assert Branch */
+	__le32  bl_reserved[22];        /* 0x880A60 - 0x880AB4 */
+	__le32  bl_magic_number;        /* 0x880AB8 BL Magic number */
+} __packed;
+
+/* the following struct is the version 0 struct */
+
+struct bl_dedicated_registers_v0 {
+	__le32	boot_loader_ready;		/* 0x880A3C driver will poll
+						 * this Dword until BL will
+						 * set it to 1 (initial value
+						 * should be 0)
+						 */
+#define BL_READY (1)	/* ready indication */
+	__le32	boot_loader_struct_version;	/* 0x880A40 BL struct ver. */
+	__le32	rf_type;			/* 0x880A44 connected RF ID */
+	__le32	baseband_type;			/* 0x880A48 board type ID */
+	u8	mac_address[6];			/* 0x880A4c BL mac address */
+} __packed;
+
+#endif /* BOOT_LOADER_EXPORT_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index c79cfe0..20d07ef 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -336,12 +336,9 @@
 	else
 		wil_dbg_misc(wil, "Scan has no IE's\n");
 
-	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len,
-			request->ie);
-	if (rc) {
-		wil_err(wil, "Aborting scan, set_ie failed: %d\n", rc);
+	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
+	if (rc)
 		goto out;
-	}
 
 	rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
 			cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
@@ -462,10 +459,8 @@
 	 * ies in FW.
 	 */
 	rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_REQ, sme->ie_len, sme->ie);
-	if (rc) {
-		wil_err(wil, "WMI_SET_APPIE_CMD failed\n");
+	if (rc)
 		goto out;
-	}
 
 	/* WMI_CONNECT_CMD */
 	memset(&conn, 0, sizeof(conn));
@@ -722,113 +717,58 @@
 {
 	struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
 	size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-	int rc = 0;
 
 	if (bcon->probe_resp_len <= hlen)
 		return 0;
 
+/* always use IE's from full probe frame, they has more info
+ * notable RSN
+ */
+	bcon->proberesp_ies = f->u.probe_resp.variable;
+	bcon->proberesp_ies_len = bcon->probe_resp_len - hlen;
 	if (!bcon->assocresp_ies) {
-		bcon->assocresp_ies = f->u.probe_resp.variable;
-		bcon->assocresp_ies_len = bcon->probe_resp_len - hlen;
-		rc = 1;
+		bcon->assocresp_ies = bcon->proberesp_ies;
+		bcon->assocresp_ies_len = bcon->proberesp_ies_len;
 	}
 
+	return 1;
+}
+
+/* internal functions for device reset and starting AP */
+static int _wil_cfg80211_set_ies(struct wiphy *wiphy,
+				 struct cfg80211_beacon_data *bcon)
+{
+	int rc;
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
+			bcon->proberesp_ies);
+	if (rc)
+		return rc;
+
+	rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
+			bcon->assocresp_ies);
+#if 0 /* to use beacon IE's, remove this #if 0 */
+	if (rc)
+		return rc;
+
+	rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail);
+#endif
+
 	return rc;
 }
 
-static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
-				      struct net_device *ndev,
-				      struct cfg80211_beacon_data *bcon)
+static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
+				  struct net_device *ndev,
+				  const u8 *ssid, size_t ssid_len, u32 privacy,
+				  int bi, u8 chan,
+				  struct cfg80211_beacon_data *bcon,
+				  u8 hidden_ssid)
 {
 	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-	struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
-	size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-	const u8 *pr_ies = NULL;
-	size_t pr_ies_len = 0;
 	int rc;
-
-	wil_dbg_misc(wil, "%s()\n", __func__);
-	wil_print_bcon_data(bcon);
-
-	if (bcon->probe_resp_len > hlen) {
-		pr_ies = f->u.probe_resp.variable;
-		pr_ies_len = bcon->probe_resp_len - hlen;
-	}
-
-	if (wil_fix_bcon(wil, bcon)) {
-		wil_dbg_misc(wil, "Fixed bcon\n");
-		wil_print_bcon_data(bcon);
-	}
-
-	/* FW do not form regular beacon, so bcon IE's are not set
-	 * For the DMG bcon, when it will be supported, bcon IE's will
-	 * be reused; add something like:
-	 * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
-	 * bcon->beacon_ies);
-	 */
-	rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
-	if (rc) {
-		wil_err(wil, "set_ie(PROBE_RESP) failed\n");
-		return rc;
-	}
-
-	rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP,
-			bcon->assocresp_ies_len,
-			bcon->assocresp_ies);
-	if (rc) {
-		wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
-		return rc;
-	}
-
-	return 0;
-}
-
-static int wil_cfg80211_start_ap(struct wiphy *wiphy,
-				 struct net_device *ndev,
-				 struct cfg80211_ap_settings *info)
-{
-	int rc = 0;
-	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 	struct wireless_dev *wdev = ndev->ieee80211_ptr;
-	struct ieee80211_channel *channel = info->chandef.chan;
-	struct cfg80211_beacon_data *bcon = &info->beacon;
-	struct cfg80211_crypto_settings *crypto = &info->crypto;
 	u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
-	struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
-	size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
-	const u8 *pr_ies = NULL;
-	size_t pr_ies_len = 0;
-	u8 hidden_ssid;
-
-	wil_dbg_misc(wil, "%s()\n", __func__);
-
-	if (!channel) {
-		wil_err(wil, "AP: No channel???\n");
-		return -EINVAL;
-	}
-
-	wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
-		     channel->center_freq, info->privacy ? "secure" : "open");
-	wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
-		     info->privacy, info->auth_type);
-	wil_dbg_misc(wil, "Hidden SSID mode: %d\n",
-		     info->hidden_ssid);
-	wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
-		     info->dtim_period);
-	print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
-			     info->ssid, info->ssid_len);
-	wil_print_bcon_data(bcon);
-	wil_print_crypto(wil, crypto);
-
-	if (bcon->probe_resp_len > hlen) {
-		pr_ies = f->u.probe_resp.variable;
-		pr_ies_len = bcon->probe_resp_len - hlen;
-	}
-
-	if (wil_fix_bcon(wil, bcon)) {
-		wil_dbg_misc(wil, "Fixed bcon\n");
-		wil_print_bcon_data(bcon);
-	}
 
 	wil_set_recovery_state(wil, fw_recovery_idle);
 
@@ -839,24 +779,96 @@
 	if (rc)
 		goto out;
 
-	rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
+	rc = wmi_set_ssid(wil, ssid_len, ssid);
 	if (rc)
 		goto out;
 
-	/* IE's */
-	/* bcon 'head IE's are not relevant for 60g band */
-	/*
-	 * FW do not form regular beacon, so bcon IE's are not set
-	 * For the DMG bcon, when it will be supported, bcon IE's will
-	 * be reused; add something like:
-	 * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
-	 * bcon->beacon_ies);
-	 */
-	wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
-	wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
-		   bcon->assocresp_ies);
+	rc = _wil_cfg80211_set_ies(wiphy, bcon);
+	if (rc)
+		goto out;
 
-	wil->privacy = info->privacy;
+	wil->privacy = privacy;
+	wil->channel = chan;
+	wil->hidden_ssid = hidden_ssid;
+
+	netif_carrier_on(ndev);
+
+	rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid);
+	if (rc)
+		goto err_pcp_start;
+
+	rc = wil_bcast_init(wil);
+	if (rc)
+		goto err_bcast;
+
+	goto out; /* success */
+
+err_bcast:
+	wmi_pcp_stop(wil);
+err_pcp_start:
+	netif_carrier_off(ndev);
+out:
+	mutex_unlock(&wil->mutex);
+	return rc;
+}
+
+static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
+				      struct net_device *ndev,
+				      struct cfg80211_beacon_data *bcon)
+{
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	int rc;
+	u32 privacy = 0;
+
+	wil_dbg_misc(wil, "%s()\n", __func__);
+	wil_print_bcon_data(bcon);
+
+	if (wil_fix_bcon(wil, bcon)) {
+		wil_dbg_misc(wil, "Fixed bcon\n");
+		wil_print_bcon_data(bcon);
+	}
+
+	if (bcon->proberesp_ies &&
+	    cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies,
+			     bcon->proberesp_ies_len))
+		privacy = 1;
+
+	/* in case privacy has changed, need to restart the AP */
+	if (wil->privacy != privacy) {
+		struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+		wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n",
+			     wil->privacy, privacy);
+
+		rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
+					    wdev->ssid_len, privacy,
+					    wdev->beacon_interval,
+					    wil->channel, bcon,
+					    wil->hidden_ssid);
+	} else {
+		rc = _wil_cfg80211_set_ies(wiphy, bcon);
+	}
+
+	return rc;
+}
+
+static int wil_cfg80211_start_ap(struct wiphy *wiphy,
+				 struct net_device *ndev,
+				 struct cfg80211_ap_settings *info)
+{
+	int rc;
+	struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+	struct ieee80211_channel *channel = info->chandef.chan;
+	struct cfg80211_beacon_data *bcon = &info->beacon;
+	struct cfg80211_crypto_settings *crypto = &info->crypto;
+	u8 hidden_ssid;
+
+	wil_dbg_misc(wil, "%s()\n", __func__);
+
+	if (!channel) {
+		wil_err(wil, "AP: No channel???\n");
+		return -EINVAL;
+	}
 
 	switch (info->hidden_ssid) {
 	case NL80211_HIDDEN_SSID_NOT_IN_USE:
@@ -872,28 +884,32 @@
 		break;
 
 	default:
-		rc = -EOPNOTSUPP;
-		goto out;
+		wil_err(wil, "AP: Invalid hidden SSID %d\n", info->hidden_ssid);
+		return -EOPNOTSUPP;
+	}
+	wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
+		     channel->center_freq, info->privacy ? "secure" : "open");
+	wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
+		     info->privacy, info->auth_type);
+	wil_dbg_misc(wil, "Hidden SSID mode: %d\n",
+		     info->hidden_ssid);
+	wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
+		     info->dtim_period);
+	print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
+			     info->ssid, info->ssid_len);
+	wil_print_bcon_data(bcon);
+	wil_print_crypto(wil, crypto);
+
+	if (wil_fix_bcon(wil, bcon)) {
+		wil_dbg_misc(wil, "Fixed bcon\n");
+		wil_print_bcon_data(bcon);
 	}
 
-	netif_carrier_on(ndev);
+	rc = _wil_cfg80211_start_ap(wiphy, ndev,
+				    info->ssid, info->ssid_len, info->privacy,
+				    info->beacon_interval, channel->hw_value,
+				    bcon, hidden_ssid);
 
-	rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
-			   channel->hw_value, hidden_ssid);
-	if (rc)
-		goto err_pcp_start;
-
-	rc = wil_bcast_init(wil);
-	if (rc)
-		goto err_bcast;
-
-	goto out; /* success */
-err_bcast:
-	wmi_pcp_stop(wil);
-err_pcp_start:
-	netif_carrier_off(ndev);
-out:
-	mutex_unlock(&wil->mutex);
 	return rc;
 }
 
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 75219a1b..613ca2b 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -62,7 +62,7 @@
 	seq_printf(s, "  swhead = %d\n", vring->swhead);
 	seq_printf(s, "  hwtail = [0x%08x] -> ", vring->hwtail);
 	if (x) {
-		v = ioread32(x);
+		v = readl(x);
 		seq_printf(s, "0x%08x = %d\n", v, v);
 	} else {
 		seq_puts(s, "???\n");
@@ -268,7 +268,7 @@
 
 static int wil_debugfs_iomem_x32_set(void *data, u64 val)
 {
-	iowrite32(val, (void __iomem *)data);
+	writel(val, (void __iomem *)data);
 	wmb(); /* make sure write propagated to HW */
 
 	return 0;
@@ -276,7 +276,7 @@
 
 static int wil_debugfs_iomem_x32_get(void *data, u64 *val)
 {
-	*val = ioread32((void __iomem *)data);
+	*val = readl((void __iomem *)data);
 
 	return 0;
 }
@@ -306,7 +306,7 @@
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
-			wil_debugfs_ulong_set, "%llu\n");
+			wil_debugfs_ulong_set, "0x%llx\n");
 
 static struct dentry *wil_debugfs_create_ulong(const char *name, umode_t mode,
 					       struct dentry *parent,
@@ -477,7 +477,7 @@
 	void __iomem *a = wmi_buffer(wil, cpu_to_le32(mem_addr));
 
 	if (a)
-		seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, ioread32(a));
+		seq_printf(s, "[0x%08x] = 0x%08x\n", mem_addr, readl(a));
 	else
 		seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
 
@@ -1344,6 +1344,7 @@
 {
 	int i;
 	u16 index = ((r->head_seq_num - r->ssn) & 0xfff) % r->buf_size;
+	unsigned long long drop_dup = r->drop_dup, drop_old = r->drop_old;
 
 	seq_printf(s, "([%2d] %3d TU) 0x%03x [", r->buf_size, r->timeout,
 		   r->head_seq_num);
@@ -1353,7 +1354,10 @@
 		else
 			seq_printf(s, "%c", r->reorder_buf[i] ? '*' : '_');
 	}
-	seq_printf(s, "] last drop 0x%03x\n", r->ssn_last_drop);
+	seq_printf(s,
+		   "] total %llu drop %llu (dup %llu + old %llu) last 0x%03x\n",
+		   r->total, drop_dup + drop_old, drop_dup, drop_old,
+		   r->ssn_last_drop);
 }
 
 static int wil_sta_debugfs_show(struct seq_file *s, void *data)
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index 0ea695f..7053b62 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -50,19 +50,13 @@
 
 	wil_dbg_misc(wil, "%s()\n", __func__);
 
-	tx_itr_en = ioread32(wil->csr +
-			     HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL));
+	tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
 	if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
-		tx_itr_val =
-			ioread32(wil->csr +
-				 HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH));
+		tx_itr_val = wil_r(wil, RGF_DMA_ITR_TX_CNT_TRSH);
 
-	rx_itr_en = ioread32(wil->csr +
-			     HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL));
+	rx_itr_en = wil_r(wil, RGF_DMA_ITR_RX_CNT_CTL);
 	if (rx_itr_en & BIT_DMA_ITR_RX_CNT_CTL_EN)
-		rx_itr_val =
-			ioread32(wil->csr +
-				 HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH));
+		rx_itr_val = wil_r(wil, RGF_DMA_ITR_RX_CNT_TRSH);
 
 	cp->tx_coalesce_usecs = tx_itr_val;
 	cp->rx_coalesce_usecs = rx_itr_val;
diff --git a/drivers/net/wireless/ath/wil6210/fw.c b/drivers/net/wireless/ath/wil6210/fw.c
index 4428345..82aae2d 100644
--- a/drivers/net/wireless/ath/wil6210/fw.c
+++ b/drivers/net/wireless/ath/wil6210/fw.c
@@ -22,16 +22,6 @@
 MODULE_FIRMWARE(WIL_FW_NAME);
 MODULE_FIRMWARE(WIL_FW2_NAME);
 
-/* target operations */
-/* register read */
-#define R(a) ioread32(wil->csr + HOSTADDR(a))
-/* register write. wmb() to make sure it is completed */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-/* register set = read, OR, write */
-#define S(a, v) W(a, R(a) | v)
-/* register clear = read, AND with inverted, write */
-#define C(a, v) W(a, R(a) & ~v)
-
 static
 void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
 			size_t count)
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 157f5ef..d30657e 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -221,12 +221,12 @@
 
 		FW_ADDR_CHECK(dst, block[i].addr, "address");
 
-		x = ioread32(dst);
+		x = readl(dst);
 		y = (x & m) | (v & ~m);
 		wil_dbg_fw(wil, "write [0x%08x] <== 0x%08x "
 			   "(old 0x%08x val 0x%08x mask 0x%08x)\n",
 			   le32_to_cpu(block[i].addr), y, x, v, m);
-		iowrite32(y, dst);
+		writel(y, dst);
 		wmb(); /* finish before processing next record */
 	}
 
@@ -239,18 +239,18 @@
 {
 	unsigned delay = 0;
 
-	iowrite32(a, gwa_addr);
-	iowrite32(gw_cmd, gwa_cmd);
+	writel(a, gwa_addr);
+	writel(gw_cmd, gwa_cmd);
 	wmb(); /* finish before activate gw */
 
-	iowrite32(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
+	writel(WIL_FW_GW_CTL_RUN, gwa_ctl); /* activate gw */
 	do {
 		udelay(1); /* typical time is few usec */
 		if (delay++ > 100) {
 			wil_err_fw(wil, "gw timeout\n");
 			return -EINVAL;
 		}
-	} while (ioread32(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
+	} while (readl(gwa_ctl) & WIL_FW_GW_CTL_BUSY); /* gw done? */
 
 	return 0;
 }
@@ -305,7 +305,7 @@
 		wil_dbg_fw(wil, "  gw write[%3d] [0x%08x] <== 0x%08x\n",
 			   i, a, v);
 
-		iowrite32(v, gwa_val);
+		writel(v, gwa_val);
 		rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
 		if (rc)
 			return rc;
@@ -372,7 +372,7 @@
 				sizeof(v), false);
 
 		for (k = 0; k < ARRAY_SIZE(block->value); k++)
-			iowrite32(v[k], gwa_val[k]);
+			writel(v[k], gwa_val[k]);
 		rc = gw_write(wil, gwa_addr, gwa_cmd, gwa_ctl, gw_cmd, a);
 		if (rc)
 			return rc;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 28ffc18..a371f036 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -61,13 +61,13 @@
 
 static inline void wil_icr_clear(u32 x, void __iomem *addr)
 {
-	iowrite32(x, addr);
+	writel(x, addr);
 }
 #endif /* defined(CONFIG_WIL6210_ISR_COR) */
 
 static inline u32 wil_ioread32_and_clear(void __iomem *addr)
 {
-	u32 x = ioread32(addr);
+	u32 x = readl(addr);
 
 	wil_icr_clear(x, addr);
 
@@ -76,54 +76,47 @@
 
 static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
 {
-	iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-		  HOSTADDR(RGF_DMA_EP_TX_ICR) +
-		  offsetof(struct RGF_ICR, IMS));
+	wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMS),
+	      WIL6210_IRQ_DISABLE);
 }
 
 static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
 {
-	iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-		  HOSTADDR(RGF_DMA_EP_RX_ICR) +
-		  offsetof(struct RGF_ICR, IMS));
+	wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
+	      WIL6210_IRQ_DISABLE);
 }
 
 static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
 {
-	iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-		  HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-		  offsetof(struct RGF_ICR, IMS));
+	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
+	      WIL6210_IRQ_DISABLE);
 }
 
 static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
 {
 	wil_dbg_irq(wil, "%s()\n", __func__);
 
-	iowrite32(WIL6210_IRQ_DISABLE, wil->csr +
-		  HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+	wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
 
 	clear_bit(wil_status_irqen, wil->status);
 }
 
 void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
 {
-	iowrite32(WIL6210_IMC_TX, wil->csr +
-		  HOSTADDR(RGF_DMA_EP_TX_ICR) +
-		  offsetof(struct RGF_ICR, IMC));
+	wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMC),
+	      WIL6210_IMC_TX);
 }
 
 void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
 {
-	iowrite32(WIL6210_IMC_RX, wil->csr +
-		  HOSTADDR(RGF_DMA_EP_RX_ICR) +
-		  offsetof(struct RGF_ICR, IMC));
+	wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
+	      WIL6210_IMC_RX);
 }
 
 static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
 {
-	iowrite32(WIL6210_IMC_MISC, wil->csr +
-		  HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-		  offsetof(struct RGF_ICR, IMC));
+	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
+	      WIL6210_IMC_MISC);
 }
 
 static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
@@ -132,8 +125,7 @@
 
 	set_bit(wil_status_irqen, wil->status);
 
-	iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr +
-		  HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW));
+	wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_PSEUDO_MASK);
 }
 
 void wil_mask_irq(struct wil6210_priv *wil)
@@ -150,12 +142,12 @@
 {
 	wil_dbg_irq(wil, "%s()\n", __func__);
 
-	iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
-		  offsetof(struct RGF_ICR, ICC));
-	iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
-		  offsetof(struct RGF_ICR, ICC));
-	iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-		  offsetof(struct RGF_ICR, ICC));
+	wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
+	      WIL_ICR_ICC_VALUE);
+	wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
+	      WIL_ICR_ICC_VALUE);
+	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
+	      WIL_ICR_ICC_VALUE);
 
 	wil6210_unmask_irq_pseudo(wil);
 	wil6210_unmask_irq_tx(wil);
@@ -163,9 +155,6 @@
 	wil6210_unmask_irq_misc(wil);
 }
 
-/* target write operation */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-
 void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
 {
 	wil_dbg_irq(wil, "%s()\n", __func__);
@@ -177,44 +166,42 @@
 		return;
 
 	/* Disable and clear tx counter before (re)configuration */
-	W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
-	W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
+	wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
+	wil_w(wil, RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
 	wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
 		 wil->tx_max_burst_duration);
 	/* Configure TX max burst duration timer to use usec units */
-	W(RGF_DMA_ITR_TX_CNT_CTL,
-	  BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
+	wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL,
+	      BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
 
 	/* Disable and clear tx idle counter before (re)configuration */
-	W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
-	W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
+	wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
+	wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
 	wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
 		 wil->tx_interframe_timeout);
 	/* Configure TX max burst duration timer to use usec units */
-	W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
-				      BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
+	wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
+	      BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
 
 	/* Disable and clear rx counter before (re)configuration */
-	W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
-	W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
+	wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
+	wil_w(wil, RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
 	wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
 		 wil->rx_max_burst_duration);
 	/* Configure TX max burst duration timer to use usec units */
-	W(RGF_DMA_ITR_RX_CNT_CTL,
-	  BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
+	wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL,
+	      BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
 
 	/* Disable and clear rx idle counter before (re)configuration */
-	W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
-	W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
+	wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
+	wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
 	wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
 		 wil->rx_interframe_timeout);
 	/* Configure TX max burst duration timer to use usec units */
-	W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
-				      BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
+	wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
+	      BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
 }
 
-#undef W
-
 static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
 {
 	struct wil6210_priv *wil = cookie;
@@ -452,27 +439,24 @@
 		u32 icr_rx = wil_ioread32_and_clear(wil->csr +
 				HOSTADDR(RGF_DMA_EP_RX_ICR) +
 				offsetof(struct RGF_ICR, ICR));
-		u32 imv_rx = ioread32(wil->csr +
-				HOSTADDR(RGF_DMA_EP_RX_ICR) +
-				offsetof(struct RGF_ICR, IMV));
+		u32 imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
+				   offsetof(struct RGF_ICR, IMV));
 		u32 icm_tx = wil_ioread32_and_clear(wil->csr +
 				HOSTADDR(RGF_DMA_EP_TX_ICR) +
 				offsetof(struct RGF_ICR, ICM));
 		u32 icr_tx = wil_ioread32_and_clear(wil->csr +
 				HOSTADDR(RGF_DMA_EP_TX_ICR) +
 				offsetof(struct RGF_ICR, ICR));
-		u32 imv_tx = ioread32(wil->csr +
-				HOSTADDR(RGF_DMA_EP_TX_ICR) +
-				offsetof(struct RGF_ICR, IMV));
+		u32 imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
+				   offsetof(struct RGF_ICR, IMV));
 		u32 icm_misc = wil_ioread32_and_clear(wil->csr +
 				HOSTADDR(RGF_DMA_EP_MISC_ICR) +
 				offsetof(struct RGF_ICR, ICM));
 		u32 icr_misc = wil_ioread32_and_clear(wil->csr +
 				HOSTADDR(RGF_DMA_EP_MISC_ICR) +
 				offsetof(struct RGF_ICR, ICR));
-		u32 imv_misc = ioread32(wil->csr +
-				HOSTADDR(RGF_DMA_EP_MISC_ICR) +
-				offsetof(struct RGF_ICR, IMV));
+		u32 imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
+				     offsetof(struct RGF_ICR, IMV));
 		wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
 				"Rx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
 				"Tx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
@@ -492,7 +476,7 @@
 {
 	irqreturn_t rc = IRQ_HANDLED;
 	struct wil6210_priv *wil = cookie;
-	u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE));
+	u32 pseudo_cause = wil_r(wil, RGF_DMA_PSEUDO_CAUSE);
 
 	/**
 	 * pseudo_cause is Clear-On-Read, no need to ACK
@@ -541,48 +525,12 @@
 	return rc;
 }
 
-static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
-{
-	int rc;
-	/*
-	 * IRQ's are in the following order:
-	 * - Tx
-	 * - Rx
-	 * - Misc
-	 */
-
-	rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED,
-			 WIL_NAME"_tx", wil);
-	if (rc)
-		return rc;
-
-	rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED,
-			 WIL_NAME"_rx", wil);
-	if (rc)
-		goto free0;
-
-	rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
-				  wil6210_irq_misc_thread,
-				  IRQF_SHARED, WIL_NAME"_misc", wil);
-	if (rc)
-		goto free1;
-
-	return 0;
-	/* error branch */
-free1:
-	free_irq(irq + 1, wil);
-free0:
-	free_irq(irq, wil);
-
-	return rc;
-}
-
 /* can't use wil_ioread32_and_clear because ICC value is not set yet */
 static inline void wil_clear32(void __iomem *addr)
 {
-	u32 x = ioread32(addr);
+	u32 x = readl(addr);
 
-	iowrite32(x, addr);
+	writel(x, addr);
 }
 
 void wil6210_clear_irq(struct wil6210_priv *wil)
@@ -596,19 +544,16 @@
 	wmb(); /* make sure write completed */
 }
 
-int wil6210_init_irq(struct wil6210_priv *wil, int irq)
+int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
 {
 	int rc;
 
-	wil_dbg_misc(wil, "%s() n_msi=%d\n", __func__, wil->n_msi);
+	wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx");
 
-	if (wil->n_msi == 3)
-		rc = wil6210_request_3msi(wil, irq);
-	else
-		rc = request_threaded_irq(irq, wil6210_hardirq,
-					  wil6210_thread_irq,
-					  wil->n_msi ? 0 : IRQF_SHARED,
-					  WIL_NAME, wil);
+	rc = request_threaded_irq(irq, wil6210_hardirq,
+				  wil6210_thread_irq,
+				  use_msi ? 0 : IRQF_SHARED,
+				  WIL_NAME, wil);
 	return rc;
 }
 
@@ -618,8 +563,4 @@
 
 	wil_mask_irq(wil);
 	free_irq(irq, wil);
-	if (wil->n_msi == 3) {
-		free_irq(irq + 1, wil);
-		free_irq(irq + 2, wil);
-	}
 }
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
index e9c0673..f7f9486 100644
--- a/drivers/net/wireless/ath/wil6210/ioctl.c
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -76,11 +76,11 @@
 	/* operation */
 	switch (io.op & wil_mmio_op_mask) {
 	case wil_mmio_read:
-		io.val = ioread32(a);
+		io.val = readl(a);
 		need_copy = true;
 		break;
 	case wil_mmio_write:
-		iowrite32(io.val, a);
+		writel(io.val, a);
 		wmb(); /* make sure write propagated to HW */
 		break;
 	default:
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 6ca6193..2fb04c5 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -21,6 +21,7 @@
 #include "wil6210.h"
 #include "txrx.h"
 #include "wmi.h"
+#include "boot_loader.h"
 
 #define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000
 #define WAIT_FOR_DISCONNECT_INTERVAL_MS 10
@@ -270,8 +271,7 @@
 
 	clear_bit(wil_status_fwready, wil->status);
 	wil_err(wil, "Scan timeout detected, start fw error recovery\n");
-	wil->recovery_state = fw_recovery_pending;
-	schedule_work(&wil->fw_error_worker);
+	wil_fw_error_recovery(wil);
 }
 
 static int wil_wait_for_recovery(struct wil6210_priv *wil)
@@ -528,26 +528,16 @@
 	destroy_workqueue(wil->wmi_wq);
 }
 
-/* target operations */
-/* register read */
-#define R(a) ioread32(wil->csr + HOSTADDR(a))
-/* register write. wmb() to make sure it is completed */
-#define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0)
-/* register set = read, OR, write */
-#define S(a, v) W(a, R(a) | v)
-/* register clear = read, AND with inverted, write */
-#define C(a, v) W(a, R(a) & ~v)
-
 static inline void wil_halt_cpu(struct wil6210_priv *wil)
 {
-	W(RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
-	W(RGF_USER_MAC_CPU_0,  BIT_USER_MAC_CPU_MAN_RST);
+	wil_w(wil, RGF_USER_USER_CPU_0, BIT_USER_USER_CPU_MAN_RST);
+	wil_w(wil, RGF_USER_MAC_CPU_0,  BIT_USER_MAC_CPU_MAN_RST);
 }
 
 static inline void wil_release_cpu(struct wil6210_priv *wil)
 {
 	/* Start CPU */
-	W(RGF_USER_USER_CPU_0, 1);
+	wil_w(wil, RGF_USER_USER_CPU_0, 1);
 }
 
 static int wil_target_reset(struct wil6210_priv *wil)
@@ -558,56 +548,60 @@
 	wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
 
 	/* Clear MAC link up */
-	S(RGF_HP_CTRL, BIT(15));
-	S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
-	S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
+	wil_s(wil, RGF_HP_CTRL, BIT(15));
+	wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
+	wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
 
 	wil_halt_cpu(wil);
 
 	/* clear all boot loader "ready" bits */
-	W(RGF_USER_BL + offsetof(struct RGF_BL, ready), 0);
+	wil_w(wil, RGF_USER_BL +
+	      offsetof(struct bl_dedicated_registers_v0, boot_loader_ready), 0);
 	/* Clear Fw Download notification */
-	C(RGF_USER_USAGE_6, BIT(0));
+	wil_c(wil, RGF_USER_USAGE_6, BIT(0));
 
-	S(RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
+	wil_s(wil, RGF_CAF_OSC_CONTROL, BIT_CAF_OSC_XTAL_EN);
 	/* XTAL stabilization should take about 3ms */
 	usleep_range(5000, 7000);
-	x = R(RGF_CAF_PLL_LOCK_STATUS);
+	x = wil_r(wil, RGF_CAF_PLL_LOCK_STATUS);
 	if (!(x & BIT_CAF_OSC_DIG_XTAL_STABLE)) {
 		wil_err(wil, "Xtal stabilization timeout\n"
 			"RGF_CAF_PLL_LOCK_STATUS = 0x%08x\n", x);
 		return -ETIME;
 	}
 	/* switch 10k to XTAL*/
-	C(RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
+	wil_c(wil, RGF_USER_SPARROW_M_4, BIT_SPARROW_M_4_SEL_SLEEP_OR_REF);
 	/* 40 MHz */
-	C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
+	wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_CAR_AHB_SW_SEL);
 
-	W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
-	W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
+	wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x3ff81f);
+	wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0xf);
 
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x000000f0);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FE00);
 
-	W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
-	W(RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
+	wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_0, 0x0);
+	wil_w(wil, RGF_USER_CLKS_CTL_EXT_SW_RST_VEC_1, 0x0);
 
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000); /* reset A2 PCIE AHB */
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000003);
+	/* reset A2 PCIE AHB */
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00008000);
 
-	W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
+	wil_w(wil, RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
 
 	/* wait until device ready. typical time is 20..80 msec */
 	do {
 		msleep(RST_DELAY);
-		x = R(RGF_USER_BL + offsetof(struct RGF_BL, ready));
+		x = wil_r(wil, RGF_USER_BL +
+			  offsetof(struct bl_dedicated_registers_v0,
+				   boot_loader_ready));
 		if (x1 != x) {
 			wil_dbg_misc(wil, "BL.ready 0x%08x => 0x%08x\n", x1, x);
 			x1 = x;
@@ -617,13 +611,13 @@
 				x);
 			return -ETIME;
 		}
-	} while (x != BIT_BL_READY);
+	} while (x != BL_READY);
 
-	C(RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
+	wil_c(wil, RGF_USER_CLKS_CTL_0, BIT_USER_CLKS_RST_PWGD);
 
 	/* enable fix for HW bug related to the SA/DA swap in AP Rx */
-	S(RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
-	  BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
+	wil_s(wil, RGF_DMA_OFUL_NID_0, BIT_DMA_OFUL_NID_0_RX_EXT_TR_EN |
+	      BIT_DMA_OFUL_NID_0_RX_EXT_A3_SRC);
 
 	wil_dbg_misc(wil, "Reset completed in %d ms\n", delay * RST_DELAY);
 	return 0;
@@ -641,29 +635,93 @@
 static int wil_get_bl_info(struct wil6210_priv *wil)
 {
 	struct net_device *ndev = wil_to_ndev(wil);
-	struct RGF_BL bl;
+	union {
+		struct bl_dedicated_registers_v0 bl0;
+		struct bl_dedicated_registers_v1 bl1;
+	} bl;
+	u32 bl_ver;
+	u8 *mac;
+	u16 rf_status;
 
-	wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL), sizeof(bl));
-	le32_to_cpus(&bl.ready);
-	le32_to_cpus(&bl.version);
-	le32_to_cpus(&bl.rf_type);
-	le32_to_cpus(&bl.baseband_type);
+	wil_memcpy_fromio_32(&bl, wil->csr + HOSTADDR(RGF_USER_BL),
+			     sizeof(bl));
+	bl_ver = le32_to_cpu(bl.bl0.boot_loader_struct_version);
+	mac = bl.bl0.mac_address;
 
-	if (!is_valid_ether_addr(bl.mac_address)) {
-		wil_err(wil, "BL: Invalid MAC %pM\n", bl.mac_address);
+	if (bl_ver == 0) {
+		le32_to_cpus(&bl.bl0.rf_type);
+		le32_to_cpus(&bl.bl0.baseband_type);
+		rf_status = 0; /* actually, unknown */
+		wil_info(wil,
+			 "Boot Loader struct v%d: MAC = %pM RF = 0x%08x bband = 0x%08x\n",
+			 bl_ver, mac,
+			 bl.bl0.rf_type, bl.bl0.baseband_type);
+		wil_info(wil, "Boot Loader build unknown for struct v0\n");
+	} else {
+		le16_to_cpus(&bl.bl1.rf_type);
+		rf_status = le16_to_cpu(bl.bl1.rf_status);
+		le32_to_cpus(&bl.bl1.baseband_type);
+		le16_to_cpus(&bl.bl1.bl_version_subminor);
+		le16_to_cpus(&bl.bl1.bl_version_build);
+		wil_info(wil,
+			 "Boot Loader struct v%d: MAC = %pM RF = 0x%04x (status 0x%04x) bband = 0x%08x\n",
+			 bl_ver, mac,
+			 bl.bl1.rf_type, rf_status,
+			 bl.bl1.baseband_type);
+		wil_info(wil, "Boot Loader build %d.%d.%d.%d\n",
+			 bl.bl1.bl_version_major, bl.bl1.bl_version_minor,
+			 bl.bl1.bl_version_subminor, bl.bl1.bl_version_build);
+	}
+
+	if (!is_valid_ether_addr(mac)) {
+		wil_err(wil, "BL: Invalid MAC %pM\n", mac);
 		return -EINVAL;
 	}
 
-	ether_addr_copy(ndev->perm_addr, bl.mac_address);
+	ether_addr_copy(ndev->perm_addr, mac);
 	if (!is_valid_ether_addr(ndev->dev_addr))
-		ether_addr_copy(ndev->dev_addr, bl.mac_address);
-	wil_info(wil,
-		 "Boot Loader: ver = %d MAC = %pM RF = 0x%08x bband = 0x%08x\n",
-		 bl.version, bl.mac_address, bl.rf_type, bl.baseband_type);
+		ether_addr_copy(ndev->dev_addr, mac);
+
+	if (rf_status) {/* bad RF cable? */
+		wil_err(wil, "RF communication error 0x%04x",
+			rf_status);
+		return -EAGAIN;
+	}
 
 	return 0;
 }
 
+static void wil_bl_crash_info(struct wil6210_priv *wil, bool is_err)
+{
+	u32 bl_assert_code, bl_assert_blink, bl_magic_number;
+	u32 bl_ver = wil_r(wil, RGF_USER_BL +
+			   offsetof(struct bl_dedicated_registers_v0,
+				    boot_loader_struct_version));
+
+	if (bl_ver < 2)
+		return;
+
+	bl_assert_code = wil_r(wil, RGF_USER_BL +
+			       offsetof(struct bl_dedicated_registers_v1,
+					bl_assert_code));
+	bl_assert_blink = wil_r(wil, RGF_USER_BL +
+				offsetof(struct bl_dedicated_registers_v1,
+					 bl_assert_blink));
+	bl_magic_number = wil_r(wil, RGF_USER_BL +
+				offsetof(struct bl_dedicated_registers_v1,
+					 bl_magic_number));
+
+	if (is_err) {
+		wil_err(wil,
+			"BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+			bl_assert_code, bl_assert_blink, bl_magic_number);
+	} else {
+		wil_dbg_misc(wil,
+			     "BL assert code 0x%08x blink 0x%08x magic 0x%08x\n",
+			     bl_assert_code, bl_assert_blink, bl_magic_number);
+	}
+}
+
 static int wil_wait_for_fw_ready(struct wil6210_priv *wil)
 {
 	ulong to = msecs_to_jiffies(1000);
@@ -690,9 +748,6 @@
 
 	wil_dbg_misc(wil, "%s()\n", __func__);
 
-	if (wil->hw_version == HW_VER_UNKNOWN)
-		return -ENODEV;
-
 	WARN_ON(!mutex_is_locked(&wil->mutex));
 	WARN_ON(test_bit(wil_status_napi_en, wil->status));
 
@@ -707,6 +762,9 @@
 		return 0;
 	}
 
+	if (wil->hw_version == HW_VER_UNKNOWN)
+		return -ENODEV;
+
 	cancel_work_sync(&wil->disconnect_worker);
 	wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
 	wil_bcast_fini(wil);
@@ -729,12 +787,17 @@
 	flush_workqueue(wil->wq_service);
 	flush_workqueue(wil->wmi_wq);
 
+	wil_bl_crash_info(wil, false);
 	rc = wil_target_reset(wil);
 	wil_rx_fini(wil);
-	if (rc)
+	if (rc) {
+		wil_bl_crash_info(wil, true);
 		return rc;
+	}
 
 	rc = wil_get_bl_info(wil);
+	if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
+		rc = 0;
 	if (rc)
 		return rc;
 
@@ -752,7 +815,7 @@
 			return rc;
 
 		/* Mark FW as loaded from host */
-		S(RGF_USER_USAGE_6, 1);
+		wil_s(wil, RGF_USER_USAGE_6, 1);
 
 		/* clear any interrupts which on-card-firmware
 		 * may have set
@@ -760,8 +823,8 @@
 		wil6210_clear_irq(wil);
 		/* CAF_ICR - clear and mask */
 		/* it is W1C, clear by writing back same value */
-		S(RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
-		W(RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
+		wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
+		wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
 
 		wil_release_cpu(wil);
 	}
@@ -785,11 +848,6 @@
 	return rc;
 }
 
-#undef R
-#undef W
-#undef S
-#undef C
-
 void wil_fw_error_recovery(struct wil6210_priv *wil)
 {
 	wil_dbg_misc(wil, "starting fw error recovery\n");
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 8ef18ac..e3b3c8f 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -173,7 +173,10 @@
 	wil_set_ethtoolops(ndev);
 	ndev->ieee80211_ptr = wdev;
 	ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
-			    NETIF_F_SG | NETIF_F_GRO;
+			    NETIF_F_SG | NETIF_F_GRO |
+			    NETIF_F_TSO | NETIF_F_TSO6 |
+			    NETIF_F_RXHASH;
+
 	ndev->features |= ndev->hw_features;
 	SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
 	wdev->netdev = ndev;
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index aa3ecc6..feff1ef 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -21,16 +21,14 @@
 
 #include "wil6210.h"
 
-static int use_msi = 1;
-module_param(use_msi, int, S_IRUGO);
-MODULE_PARM_DESC(use_msi,
-		 " Use MSI interrupt: "
-		 "0 - don't, 1 - (default) - single, or 3");
+static bool use_msi = true;
+module_param(use_msi, bool, S_IRUGO);
+MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
 
 static
 void wil_set_capabilities(struct wil6210_priv *wil)
 {
-	u32 rev_id = ioread32(wil->csr + HOSTADDR(RGF_USER_JTAG_DEV_ID));
+	u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
 
 	bitmap_zero(wil->hw_capabilities, hw_capability_last);
 
@@ -50,24 +48,12 @@
 
 void wil_disable_irq(struct wil6210_priv *wil)
 {
-	int irq = wil->pdev->irq;
-
-	disable_irq(irq);
-	if (wil->n_msi == 3) {
-		disable_irq(irq + 1);
-		disable_irq(irq + 2);
-	}
+	disable_irq(wil->pdev->irq);
 }
 
 void wil_enable_irq(struct wil6210_priv *wil)
 {
-	int irq = wil->pdev->irq;
-
-	enable_irq(irq);
-	if (wil->n_msi == 3) {
-		enable_irq(irq + 1);
-		enable_irq(irq + 2);
-	}
+	enable_irq(wil->pdev->irq);
 }
 
 /* Bus ops */
@@ -80,6 +66,7 @@
 	 * and only MSI should be used
 	 */
 	int msi_only = pdev->msi_enabled;
+	bool _use_msi = use_msi;
 
 	wil_dbg_misc(wil, "%s()\n", __func__);
 
@@ -87,41 +74,20 @@
 
 	pci_set_master(pdev);
 
-	/*
-	 * how many MSI interrupts to request?
-	 */
-	switch (use_msi) {
-	case 3:
-	case 1:
-		wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
-		break;
-	case 0:
-		wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
-		break;
-	default:
-		wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
-		use_msi = 1;
-	}
+	wil_dbg_misc(wil, "Setup %s interrupt\n", use_msi ? "MSI" : "INTx");
 
-	if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
-		wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
-		use_msi = 1;
-	}
-
-	if (use_msi == 1 && pci_enable_msi(pdev)) {
+	if (use_msi && pci_enable_msi(pdev)) {
 		wil_err(wil, "pci_enable_msi failed, use INTx\n");
-		use_msi = 0;
+		_use_msi = false;
 	}
 
-	wil->n_msi = use_msi;
-
-	if ((wil->n_msi == 0) && msi_only) {
+	if (!_use_msi && msi_only) {
 		wil_err(wil, "Interrupt pin not routed, unable to use INTx\n");
 		rc = -ENODEV;
 		goto stop_master;
 	}
 
-	rc = wil6210_init_irq(wil, pdev->irq);
+	rc = wil6210_init_irq(wil, pdev->irq, _use_msi);
 	if (rc)
 		goto stop_master;
 
@@ -293,11 +259,80 @@
 };
 MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
 
+#ifdef CONFIG_PM
+
+static int wil6210_suspend(struct device *dev, bool is_runtime)
+{
+	int rc = 0;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+	wil_dbg_pm(wil, "%s(%s)\n", __func__,
+		   is_runtime ? "runtime" : "system");
+
+	rc = wil_can_suspend(wil, is_runtime);
+	if (rc)
+		goto out;
+
+	rc = wil_suspend(wil, is_runtime);
+	if (rc)
+		goto out;
+
+	/* TODO: how do I bring card in low power state? */
+
+	/* disable bus mastering */
+	pci_clear_master(pdev);
+	/* PCI will call pci_save_state(pdev) and pci_prepare_to_sleep(pdev) */
+
+out:
+	return rc;
+}
+
+static int wil6210_resume(struct device *dev, bool is_runtime)
+{
+	int rc = 0;
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct wil6210_priv *wil = pci_get_drvdata(pdev);
+
+	wil_dbg_pm(wil, "%s(%s)\n", __func__,
+		   is_runtime ? "runtime" : "system");
+
+	/* allow master */
+	pci_set_master(pdev);
+
+	rc = wil_resume(wil, is_runtime);
+	if (rc)
+		pci_clear_master(pdev);
+
+	return rc;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int wil6210_pm_suspend(struct device *dev)
+{
+	return wil6210_suspend(dev, false);
+}
+
+static int wil6210_pm_resume(struct device *dev)
+{
+	return wil6210_resume(dev, false);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops wil6210_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
+};
+
 static struct pci_driver wil6210_driver = {
 	.probe		= wil_pcie_probe,
 	.remove		= wil_pcie_remove,
 	.id_table	= wil6210_pcie_ids,
 	.name		= WIL_NAME,
+	.driver		= {
+		.pm = &wil6210_pm_ops,
+	},
 };
 
 static int __init wil6210_driver_init(void)
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
new file mode 100644
index 0000000..0b7ecbc
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+	int rc = 0;
+	struct wireless_dev *wdev = wil->wdev;
+
+	wil_dbg_pm(wil, "%s(%s)\n", __func__,
+		   is_runtime ? "runtime" : "system");
+
+	switch (wdev->iftype) {
+	case NL80211_IFTYPE_MONITOR:
+	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_CLIENT:
+		break;
+	/* AP-like interface - can't suspend */
+	default:
+		wil_dbg_pm(wil, "AP-like interface\n");
+		rc = -EBUSY;
+		break;
+	}
+
+	wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
+		   is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
+
+	return rc;
+}
+
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
+{
+	int rc = 0;
+	struct net_device *ndev = wil_to_ndev(wil);
+
+	wil_dbg_pm(wil, "%s(%s)\n", __func__,
+		   is_runtime ? "runtime" : "system");
+
+	/* if netif up, hardware is alive, shut it down */
+	if (ndev->flags & IFF_UP) {
+		rc = wil_down(wil);
+		if (rc) {
+			wil_err(wil, "wil_down : %d\n", rc);
+			goto out;
+		}
+	}
+
+	if (wil->platform_ops.suspend)
+		rc = wil->platform_ops.suspend(wil->platform_handle);
+
+out:
+	wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+		   is_runtime ? "runtime" : "system", rc);
+	return rc;
+}
+
+int wil_resume(struct wil6210_priv *wil, bool is_runtime)
+{
+	int rc = 0;
+	struct net_device *ndev = wil_to_ndev(wil);
+
+	wil_dbg_pm(wil, "%s(%s)\n", __func__,
+		   is_runtime ? "runtime" : "system");
+
+	if (wil->platform_ops.resume) {
+		rc = wil->platform_ops.resume(wil->platform_handle);
+		if (rc) {
+			wil_err(wil, "platform_ops.resume : %d\n", rc);
+			goto out;
+		}
+	}
+
+	/* if netif up, bring hardware up
+	 * During open(), IFF_UP set after actual device method
+	 * invocation. This prevent recursive call to wil_up()
+	 */
+	if (ndev->flags & IFF_UP)
+		rc = wil_up(wil);
+
+out:
+	wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+		   is_runtime ? "runtime" : "system", rc);
+	return rc;
+}
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index ca10dcf..9238c1a 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -121,6 +121,7 @@
 		goto out;
 	}
 
+	r->total++;
 	hseq = r->head_seq_num;
 
 	/** Due to the race between WMI events, where BACK establishment
@@ -153,6 +154,9 @@
 	/* frame with out of date sequence number */
 	if (seq_less(seq, r->head_seq_num)) {
 		r->ssn_last_drop = seq;
+		r->drop_old++;
+		wil_dbg_txrx(wil, "Rx drop: old seq 0x%03x head 0x%03x\n",
+			     seq, r->head_seq_num);
 		dev_kfree_skb(skb);
 		goto out;
 	}
@@ -173,6 +177,8 @@
 
 	/* check if we already stored this frame */
 	if (r->reorder_buf[index]) {
+		r->drop_dup++;
+		wil_dbg_txrx(wil, "Rx drop: dup seq 0x%03x\n", seq);
 		dev_kfree_skb(skb);
 		goto out;
 	}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index aa20af8..6229110 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -509,7 +509,7 @@
 			break;
 		}
 	}
-	iowrite32(v->swtail, wil->csr + HOSTADDR(v->hwtail));
+	wil_w(wil, v->hwtail, v->swtail);
 
 	return rc;
 }
@@ -541,6 +541,14 @@
 		[GRO_DROP]		= "GRO_DROP",
 	};
 
+	if (ndev->features & NETIF_F_RXHASH)
+		/* fake L4 to ensure it won't be re-calculated later
+		 * set hash to any non-zero value to activate rps
+		 * mechanism, core will be chosen according
+		 * to user-level rps configuration.
+		 */
+		skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
+
 	skb_orphan(skb);
 
 	if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
@@ -1058,14 +1066,52 @@
 static inline
 void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
 {
-	d->mac.d[2] |= ((nr_frags + 1) <<
-		       MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
+	d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
 }
 
-static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
-					 struct vring_tx_desc *d,
-					 struct sk_buff *skb)
+/**
+ * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
+ * 2 - middle, 3 - last descriptor.
+ */
+
+static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
+					  struct sk_buff *skb,
+					  int tso_desc_type, bool is_ipv4,
+					  int tcp_hdr_len, int skb_net_hdr_len)
 {
+	d->dma.b11 = ETH_HLEN; /* MAC header length */
+	d->dma.b11 |= is_ipv4 << DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS;
+
+	d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
+	/* L4 header len: TCP header length */
+	d->dma.d0 |= (tcp_hdr_len & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+
+	/* Setup TSO: bit and desc type */
+	d->dma.d0 |= (BIT(DMA_CFG_DESC_TX_0_TCP_SEG_EN_POS)) |
+		(tso_desc_type << DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS);
+	d->dma.d0 |= (is_ipv4 << DMA_CFG_DESC_TX_0_IPV4_CHECKSUM_EN_POS);
+
+	d->dma.ip_length = skb_net_hdr_len;
+	/* Enable TCP/UDP checksum */
+	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
+	/* Calculate pseudo-header */
+	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
+}
+
+/**
+ * Sets the descriptor @d up for csum. The corresponding
+ * @skb is used to obtain the protocol and headers length.
+ * Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
+ * Note, if d==NULL, the function only returns the protocol result.
+ *
+ * It is very similar to previous wil_tx_desc_offload_setup_tso. This
+ * is "if unrolling" to optimize the critical path.
+ */
+
+static int wil_tx_desc_offload_setup(struct vring_tx_desc *d,
+				     struct sk_buff *skb){
 	int protocol;
 
 	if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1110,6 +1156,305 @@
 	return 0;
 }
 
+static inline void wil_tx_last_desc(struct vring_tx_desc *d)
+{
+	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS) |
+	      BIT(DMA_CFG_DESC_TX_0_CMD_MARK_WB_POS) |
+	      BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
+}
+
+static inline void wil_set_tx_desc_last_tso(volatile struct vring_tx_desc *d)
+{
+	d->dma.d0 |= wil_tso_type_lst <<
+		  DMA_CFG_DESC_TX_0_SEGMENT_BUF_DETAILS_POS;
+}
+
+static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
+			      struct sk_buff *skb)
+{
+	struct device *dev = wil_to_dev(wil);
+
+	/* point to descriptors in shared memory */
+	volatile struct vring_tx_desc *_desc = NULL, *_hdr_desc,
+				      *_first_desc = NULL;
+
+	/* pointers to shadow descriptors */
+	struct vring_tx_desc desc_mem, hdr_desc_mem, first_desc_mem,
+			     *d = &hdr_desc_mem, *hdr_desc = &hdr_desc_mem,
+			     *first_desc = &first_desc_mem;
+
+	/* pointer to shadow descriptors' context */
+	struct wil_ctx *hdr_ctx, *first_ctx = NULL;
+
+	int descs_used = 0; /* total number of used descriptors */
+	int sg_desc_cnt = 0; /* number of descriptors for current mss*/
+
+	u32 swhead = vring->swhead;
+	int used, avail = wil_vring_avail_tx(vring);
+	int nr_frags = skb_shinfo(skb)->nr_frags;
+	int min_desc_required = nr_frags + 1;
+	int mss = skb_shinfo(skb)->gso_size;	/* payload size w/o headers */
+	int f, len, hdrlen, headlen;
+	int vring_index = vring - wil->vring_tx;
+	struct vring_tx_data *txdata = &wil->vring_tx_data[vring_index];
+	uint i = swhead;
+	dma_addr_t pa;
+	const skb_frag_t *frag = NULL;
+	int rem_data = mss;
+	int lenmss;
+	int hdr_compensation_need = true;
+	int desc_tso_type = wil_tso_type_first;
+	bool is_ipv4;
+	int tcp_hdr_len;
+	int skb_net_hdr_len;
+	int gso_type;
+
+	wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
+		     __func__, skb->len, vring_index);
+
+	if (unlikely(!txdata->enabled))
+		return -EINVAL;
+
+	/* A typical page 4K is 3-4 payloads, we assume each fragment
+	 * is a full payload, that's how min_desc_required has been
+	 * calculated. In real we might need more or less descriptors,
+	 * this is the initial check only.
+	 */
+	if (unlikely(avail < min_desc_required)) {
+		wil_err_ratelimited(wil,
+				    "TSO: Tx ring[%2d] full. No space for %d fragments\n",
+				    vring_index, min_desc_required);
+		return -ENOMEM;
+	}
+
+	/* Header Length = MAC header len + IP header len + TCP header len*/
+	hdrlen = ETH_HLEN +
+		(int)skb_network_header_len(skb) +
+		tcp_hdrlen(skb);
+
+	gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV6 | SKB_GSO_TCPV4);
+	switch (gso_type) {
+	case SKB_GSO_TCPV4:
+		/* TCP v4, zero out the IP length and IPv4 checksum fields
+		 * as required by the offloading doc
+		 */
+		ip_hdr(skb)->tot_len = 0;
+		ip_hdr(skb)->check = 0;
+		is_ipv4 = true;
+		break;
+	case SKB_GSO_TCPV6:
+		/* TCP v6, zero out the payload length */
+		ipv6_hdr(skb)->payload_len = 0;
+		is_ipv4 = false;
+		break;
+	default:
+		/* other than TCPv4 or TCPv6 types are not supported for TSO.
+		 * It is also illegal for both to be set simultaneously
+		 */
+		return -EINVAL;
+	}
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return -EINVAL;
+
+	/* tcp header length and skb network header length are fixed for all
+	 * packet's descriptors - read then once here
+	 */
+	tcp_hdr_len = tcp_hdrlen(skb);
+	skb_net_hdr_len = skb_network_header_len(skb);
+
+	_hdr_desc = &vring->va[i].tx;
+
+	pa = dma_map_single(dev, skb->data, hdrlen, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev, pa))) {
+		wil_err(wil, "TSO: Skb head DMA map error\n");
+		goto err_exit;
+	}
+
+	wil_tx_desc_map(hdr_desc, pa, hdrlen, vring_index);
+	wil_tx_desc_offload_setup_tso(hdr_desc, skb, wil_tso_type_hdr, is_ipv4,
+				      tcp_hdr_len, skb_net_hdr_len);
+	wil_tx_last_desc(hdr_desc);
+
+	vring->ctx[i].mapped_as = wil_mapped_as_single;
+	hdr_ctx = &vring->ctx[i];
+
+	descs_used++;
+	headlen = skb_headlen(skb) - hdrlen;
+
+	for (f = headlen ? -1 : 0; f < nr_frags; f++)  {
+		if (headlen) {
+			len = headlen;
+			wil_dbg_txrx(wil, "TSO: process skb head, len %u\n",
+				     len);
+		} else {
+			frag = &skb_shinfo(skb)->frags[f];
+			len = frag->size;
+			wil_dbg_txrx(wil, "TSO: frag[%d]: len %u\n", f, len);
+		}
+
+		while (len) {
+			wil_dbg_txrx(wil,
+				     "TSO: len %d, rem_data %d, descs_used %d\n",
+				     len, rem_data, descs_used);
+
+			if (descs_used == avail)  {
+				wil_err(wil, "TSO: ring overflow\n");
+				goto dma_error;
+			}
+
+			lenmss = min_t(int, rem_data, len);
+			i = (swhead + descs_used) % vring->size;
+			wil_dbg_txrx(wil, "TSO: lenmss %d, i %d\n", lenmss, i);
+
+			if (!headlen) {
+				pa = skb_frag_dma_map(dev, frag,
+						      frag->size - len, lenmss,
+						      DMA_TO_DEVICE);
+				vring->ctx[i].mapped_as = wil_mapped_as_page;
+			} else {
+				pa = dma_map_single(dev,
+						    skb->data +
+						    skb_headlen(skb) - headlen,
+						    lenmss,
+						    DMA_TO_DEVICE);
+				vring->ctx[i].mapped_as = wil_mapped_as_single;
+				headlen -= lenmss;
+			}
+
+			if (unlikely(dma_mapping_error(dev, pa)))
+				goto dma_error;
+
+			_desc = &vring->va[i].tx;
+
+			if (!_first_desc) {
+				_first_desc = _desc;
+				first_ctx = &vring->ctx[i];
+				d = first_desc;
+			} else {
+				d = &desc_mem;
+			}
+
+			wil_tx_desc_map(d, pa, lenmss, vring_index);
+			wil_tx_desc_offload_setup_tso(d, skb, desc_tso_type,
+						      is_ipv4, tcp_hdr_len,
+						      skb_net_hdr_len);
+
+			/* use tso_type_first only once */
+			desc_tso_type = wil_tso_type_mid;
+
+			descs_used++;  /* desc used so far */
+			sg_desc_cnt++; /* desc used for this segment */
+			len -= lenmss;
+			rem_data -= lenmss;
+
+			wil_dbg_txrx(wil,
+				     "TSO: len %d, rem_data %d, descs_used %d, sg_desc_cnt %d,\n",
+				     len, rem_data, descs_used, sg_desc_cnt);
+
+			/* Close the segment if reached mss size or last frag*/
+			if (rem_data == 0 || (f == nr_frags - 1 && len == 0)) {
+				if (hdr_compensation_need) {
+					/* first segment include hdr desc for
+					 * release
+					 */
+					hdr_ctx->nr_frags = sg_desc_cnt;
+					wil_tx_desc_set_nr_frags(first_desc,
+								 sg_desc_cnt +
+								 1);
+					hdr_compensation_need = false;
+				} else {
+					wil_tx_desc_set_nr_frags(first_desc,
+								 sg_desc_cnt);
+				}
+				first_ctx->nr_frags = sg_desc_cnt - 1;
+
+				wil_tx_last_desc(d);
+
+				/* first descriptor may also be the last
+				 * for this mss - make sure not to copy
+				 * it twice
+				 */
+				if (first_desc != d)
+					*_first_desc = *first_desc;
+
+				/*last descriptor will be copied at the end
+				 * of this TS processing
+				 */
+				if (f < nr_frags - 1 || len > 0)
+					*_desc = *d;
+
+				rem_data = mss;
+				_first_desc = NULL;
+				sg_desc_cnt = 0;
+			} else if (first_desc != d) /* update mid descriptor */
+					*_desc = *d;
+		}
+	}
+
+	/* first descriptor may also be the last.
+	 * in this case d pointer is invalid
+	 */
+	if (_first_desc == _desc)
+		d = first_desc;
+
+	/* Last data descriptor */
+	wil_set_tx_desc_last_tso(d);
+	*_desc = *d;
+
+	/* Fill the total number of descriptors in first desc (hdr)*/
+	wil_tx_desc_set_nr_frags(hdr_desc, descs_used);
+	*_hdr_desc = *hdr_desc;
+
+	/* hold reference to skb
+	 * to prevent skb release before accounting
+	 * in case of immediate "tx done"
+	 */
+	vring->ctx[i].skb = skb_get(skb);
+
+	/* performance monitoring */
+	used = wil_vring_used_tx(vring);
+	if (wil_val_in_range(vring_idle_trsh,
+			     used, used + descs_used)) {
+		txdata->idle += get_cycles() - txdata->last_idle;
+		wil_dbg_txrx(wil,  "Ring[%2d] not idle %d -> %d\n",
+			     vring_index, used, used + descs_used);
+	}
+
+	/* advance swhead */
+	wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
+	wil_vring_advance_head(vring, descs_used);
+
+	/* make sure all writes to descriptors (shared memory) are done before
+	 * committing them to HW
+	 */
+	wmb();
+
+	wil_w(wil, vring->hwtail, vring->swhead);
+	return 0;
+
+dma_error:
+	wil_err(wil, "TSO: DMA map page error\n");
+	while (descs_used > 0) {
+		struct wil_ctx *ctx;
+
+		i = (swhead + descs_used) % vring->size;
+		d = (struct vring_tx_desc *)&vring->va[i].tx;
+		_desc = &vring->va[i].tx;
+		*d = *_desc;
+		_desc->dma.status = TX_DMA_STATUS_DU;
+		ctx = &vring->ctx[i];
+		wil_txdesc_unmap(dev, d, ctx);
+		if (ctx->skb)
+			dev_kfree_skb_any(ctx->skb);
+		memset(ctx, 0, sizeof(*ctx));
+		descs_used--;
+	}
+
+err_exit:
+	return -EINVAL;
+}
+
 static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
 			  struct sk_buff *skb)
 {
@@ -1128,7 +1473,8 @@
 	bool mcast = (vring_index == wil->bcast_vring);
 	uint len = skb_headlen(skb);
 
-	wil_dbg_txrx(wil, "%s()\n", __func__);
+	wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
+		     __func__, skb->len, vring_index);
 
 	if (unlikely(!txdata->enabled))
 		return -EINVAL;
@@ -1159,14 +1505,14 @@
 			d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS);
 	}
 	/* Process TCP/UDP checksum offloading */
-	if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) {
+	if (unlikely(wil_tx_desc_offload_setup(d, skb))) {
 		wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n",
 			vring_index);
 		goto dma_error;
 	}
 
 	vring->ctx[i].nr_frags = nr_frags;
-	wil_tx_desc_set_nr_frags(d, nr_frags);
+	wil_tx_desc_set_nr_frags(d, nr_frags + 1);
 
 	/* middle segments */
 	for (; f < nr_frags; f++) {
@@ -1190,7 +1536,7 @@
 		 * if it succeeded for 1-st descriptor,
 		 * it will succeed here too
 		 */
-		wil_tx_desc_offload_cksum_set(wil, d, skb);
+		wil_tx_desc_offload_setup(d, skb);
 	}
 	/* for the last seg only */
 	d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_EOP_POS);
@@ -1221,7 +1567,13 @@
 	wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead,
 		     vring->swhead);
 	trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
-	iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
+
+	/* make sure all writes to descriptors (shared memory) are done before
+	 * committing them to HW
+	 */
+	wmb();
+
+	wil_w(wil, vring->hwtail, vring->swhead);
 
 	return 0;
  dma_error:
@@ -1254,8 +1606,12 @@
 	int rc;
 
 	spin_lock(&txdata->lock);
-	rc = __wil_tx_vring(wil, vring, skb);
+
+	rc = (skb_is_gso(skb) ? __wil_tx_vring_tso : __wil_tx_vring)
+	     (wil, vring, skb);
+
 	spin_unlock(&txdata->lock);
+
 	return rc;
 }
 
@@ -1382,7 +1738,8 @@
 		struct wil_ctx *ctx = &vring->ctx[vring->swtail];
 		/**
 		 * For the fragmented skb, HW will set DU bit only for the
-		 * last fragment. look for it
+		 * last fragment. look for it.
+		 * In TSO the first DU will include hdr desc
 		 */
 		int lf = (vring->swtail + ctx->nr_frags) % vring->size;
 		/* TODO: check we are not past head */
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 0c46384..82a8f9a 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -291,6 +291,14 @@
 	__le16 length;
 } __packed;
 
+/* TSO type used in dma descriptor d0 bits 11-12 */
+enum {
+	wil_tso_type_hdr = 0,
+	wil_tso_type_first = 1,
+	wil_tso_type_mid  = 2,
+	wil_tso_type_lst  = 3,
+};
+
 /* Rx descriptor - MAC part
  * [dword 0]
  * bit  0.. 3 : tid:4 The QoS (b3-0) TID Field
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 275355d..dd4ea92 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -127,16 +127,6 @@
 	u32 IMC; /* Mask Clear, write 1 to clear */
 } __packed;
 
-struct RGF_BL {
-	u32 ready;		/* 0x880A3C bit [0] */
-#define BIT_BL_READY	BIT(0)
-	u32 version;		/* 0x880A40 version of the BL struct */
-	u32 rf_type;		/* 0x880A44 ID of the connected RF */
-	u32 baseband_type;	/* 0x880A48 ID of the baseband */
-	u8  mac_address[ETH_ALEN]; /* 0x880A4C permanent MAC */
-	u8 pad[2];
-} __packed;
-
 /* registers - FW addresses */
 #define RGF_USER_USAGE_1		(0x880004)
 #define RGF_USER_USAGE_6		(0x880018)
@@ -262,9 +252,8 @@
 };
 
 /* popular locations */
-#define HOST_MBOX   HOSTADDR(RGF_USER_USER_SCRATCH_PAD)
-#define HOST_SW_INT (HOSTADDR(RGF_USER_USER_ICR) + \
-	offsetof(struct RGF_ICR, ICS))
+#define RGF_MBOX   RGF_USER_USER_SCRATCH_PAD
+#define HOST_MBOX   HOSTADDR(RGF_MBOX)
 #define SW_INT_MBOX BIT_USER_USER_ICR_SW_INT_2
 
 /* ISR register bits */
@@ -434,12 +423,12 @@
  * @ssn: Starting Sequence Number expected to be aggregated.
  * @buf_size: buffer size for incoming A-MPDUs
  * @timeout: reset timer value (in TUs).
+ * @ssn_last_drop: SSN of the last dropped frame
+ * @total: total number of processed incoming frames
+ * @drop_dup: duplicate frames dropped for this reorder buffer
+ * @drop_old: old frames dropped for this reorder buffer
  * @dialog_token: dialog token for aggregation session
- * @rcu_head: RCU head used for freeing this struct
- *
- * This structure's lifetime is managed by RCU, assignments to
- * the array holding it must hold the aggregation mutex.
- *
+ * @first_time: true when this buffer used 1-st time
  */
 struct wil_tid_ampdu_rx {
 	struct sk_buff **reorder_buf;
@@ -453,6 +442,9 @@
 	u16 buf_size;
 	u16 timeout;
 	u16 ssn_last_drop;
+	unsigned long long total; /* frames processed */
+	unsigned long long drop_dup;
+	unsigned long long drop_old;
 	u8 dialog_token;
 	bool first_time; /* is it 1-st time this buffer used? */
 };
@@ -543,7 +535,6 @@
 
 struct wil6210_priv {
 	struct pci_dev *pdev;
-	int n_msi;
 	struct wireless_dev *wdev;
 	void __iomem *csr;
 	DECLARE_BITMAP(status, wil_status_last);
@@ -559,6 +550,8 @@
 	/* profile */
 	u32 monitor_flags;
 	u32 privacy; /* secure connection? */
+	u8 hidden_ssid; /* relevant in AP mode */
+	u16 channel; /* relevant in AP mode */
 	int sinfo_gen;
 	u32 ap_isolate; /* no intra-BSS communication */
 	/* interrupt moderation */
@@ -654,6 +647,33 @@
 #define wil_dbg_txrx(wil, fmt, arg...) wil_dbg(wil, "DBG[TXRX]" fmt, ##arg)
 #define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
 #define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
+#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg)
+
+/* target operations */
+/* register read */
+static inline u32 wil_r(struct wil6210_priv *wil, u32 reg)
+{
+	return readl(wil->csr + HOSTADDR(reg));
+}
+
+/* register write. wmb() to make sure it is completed */
+static inline void wil_w(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+	writel(val, wil->csr + HOSTADDR(reg));
+	wmb(); /* wait for write to propagate to the HW */
+}
+
+/* register set = read, OR, write */
+static inline void wil_s(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+	wil_w(wil, reg, wil_r(wil, reg) | val);
+}
+
+/* register clear = read, AND with inverted, write */
+static inline void wil_c(struct wil6210_priv *wil, u32 reg, u32 val)
+{
+	wil_w(wil, reg, wil_r(wil, reg) & ~val);
+}
 
 #if defined(CONFIG_DYNAMIC_DEBUG)
 #define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize,	\
@@ -744,7 +764,7 @@
 void wil_back_tx_flush(struct wil6210_priv *wil);
 
 void wil6210_clear_irq(struct wil6210_priv *wil);
-int wil6210_init_irq(struct wil6210_priv *wil, int irq);
+int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
 void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
 void wil_mask_irq(struct wil6210_priv *wil);
 void wil_unmask_irq(struct wil6210_priv *wil);
@@ -796,4 +816,8 @@
 int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
 int wil_request_firmware(struct wil6210_priv *wil, const char *name);
 
+int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
+int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
+int wil_resume(struct wil6210_priv *wil, bool is_runtime);
+
 #endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c
index de15f14..2e831bf 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.c
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.c
@@ -14,7 +14,7 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
-#include "linux/device.h"
+#include <linux/device.h>
 #include "wil_platform.h"
 
 int __init wil_platform_modinit(void)
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index c759759..2f35d4c 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -228,8 +228,8 @@
 	wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head);
 	/* wait till FW finish with previous command */
 	for (retry = 5; retry > 0; retry--) {
-		r->tail = ioread32(wil->csr + HOST_MBOX +
-				   offsetof(struct wil6210_mbox_ctl, tx.tail));
+		r->tail = wil_r(wil, RGF_MBOX +
+				offsetof(struct wil6210_mbox_ctl, tx.tail));
 		if (next_head != r->tail)
 			break;
 		msleep(20);
@@ -254,16 +254,16 @@
 	wil_memcpy_toio_32(dst, &cmd, sizeof(cmd));
 	wil_memcpy_toio_32(dst + sizeof(cmd), buf, len);
 	/* mark entry as full */
-	iowrite32(1, wil->csr + HOSTADDR(r->head) +
-		  offsetof(struct wil6210_mbox_ring_desc, sync));
+	wil_w(wil, r->head + offsetof(struct wil6210_mbox_ring_desc, sync), 1);
 	/* advance next ptr */
-	iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
-		  offsetof(struct wil6210_mbox_ctl, tx.head));
+	wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head),
+	      r->head = next_head);
 
 	trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
 
 	/* interrupt to FW */
-	iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
+	wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS),
+	      SW_INT_MBOX);
 
 	return 0;
 }
@@ -312,22 +312,44 @@
 	struct wiphy *wiphy = wil_to_wiphy(wil);
 	struct ieee80211_mgmt *rx_mgmt_frame =
 			(struct ieee80211_mgmt *)data->payload;
-	int ch_no = data->info.channel+1;
-	u32 freq = ieee80211_channel_to_frequency(ch_no,
-			IEEE80211_BAND_60GHZ);
-	struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq);
-	s32 signal = data->info.sqi;
-	__le16 fc = rx_mgmt_frame->frame_control;
-	u32 d_len = le32_to_cpu(data->info.len);
-	u16 d_status = le16_to_cpu(data->info.status);
+	int flen = len - offsetof(struct wmi_rx_mgmt_packet_event, payload);
+	int ch_no;
+	u32 freq;
+	struct ieee80211_channel *channel;
+	s32 signal;
+	__le16 fc;
+	u32 d_len;
+	u16 d_status;
 
-	wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d SQI %d%%\n",
+	if (flen < 0) {
+		wil_err(wil, "MGMT Rx: short event, len %d\n", len);
+		return;
+	}
+
+	d_len = le32_to_cpu(data->info.len);
+	if (d_len != flen) {
+		wil_err(wil,
+			"MGMT Rx: length mismatch, d_len %d should be %d\n",
+			d_len, flen);
+		return;
+	}
+
+	ch_no = data->info.channel + 1;
+	freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ);
+	channel = ieee80211_get_channel(wiphy, freq);
+	signal = data->info.sqi;
+	d_status = le16_to_cpu(data->info.status);
+	fc = rx_mgmt_frame->frame_control;
+
+	wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n",
 		    data->info.channel, data->info.mcs, data->info.snr,
 		    data->info.sqi);
 	wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len,
 		    le16_to_cpu(fc));
 	wil_dbg_wmi(wil, "qid %d mid %d cid %d\n",
 		    data->info.qid, data->info.mid, data->info.cid);
+	wil_hex_dump_wmi("MGMT Rx ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
+			 d_len, true);
 
 	if (!channel) {
 		wil_err(wil, "Frame on unsupported channel\n");
@@ -363,6 +385,17 @@
 	}
 }
 
+static void wmi_evt_tx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
+{
+	struct wmi_tx_mgmt_packet_event *data = d;
+	struct ieee80211_mgmt *mgmt_frame =
+			(struct ieee80211_mgmt *)data->payload;
+	int flen = len - offsetof(struct wmi_tx_mgmt_packet_event, payload);
+
+	wil_hex_dump_wmi("MGMT Tx ", DUMP_PREFIX_OFFSET, 16, 1, mgmt_frame,
+			 flen, true);
+}
+
 static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
 				  void *d, int len)
 {
@@ -659,6 +692,7 @@
 	{WMI_READY_EVENTID,		wmi_evt_ready},
 	{WMI_FW_READY_EVENTID,		wmi_evt_fw_ready},
 	{WMI_RX_MGMT_PACKET_EVENTID,	wmi_evt_rx_mgmt},
+	{WMI_TX_MGMT_PACKET_EVENTID,		wmi_evt_tx_mgmt},
 	{WMI_SCAN_COMPLETE_EVENTID,	wmi_evt_scan_complete},
 	{WMI_CONNECT_EVENTID,		wmi_evt_connect},
 	{WMI_DISCONNECT_EVENTID,	wmi_evt_disconnect},
@@ -695,8 +729,8 @@
 		u16 len;
 		bool q;
 
-		r->head = ioread32(wil->csr + HOST_MBOX +
-				   offsetof(struct wil6210_mbox_ctl, rx.head));
+		r->head = wil_r(wil, RGF_MBOX +
+				offsetof(struct wil6210_mbox_ctl, rx.head));
 		if (r->tail == r->head)
 			break;
 
@@ -734,8 +768,8 @@
 		cmd = (void *)&evt->event.wmi;
 		wil_memcpy_fromio_32(cmd, src, len);
 		/* mark entry as empty */
-		iowrite32(0, wil->csr + HOSTADDR(r->tail) +
-			  offsetof(struct wil6210_mbox_ring_desc, sync));
+		wil_w(wil, r->tail +
+		      offsetof(struct wil6210_mbox_ring_desc, sync), 0);
 		/* indicate */
 		if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
 		    (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
@@ -754,8 +788,8 @@
 		/* advance tail */
 		r->tail = r->base + ((r->tail - r->base +
 			  sizeof(struct wil6210_mbox_ring_desc)) % r->size);
-		iowrite32(r->tail, wil->csr + HOST_MBOX +
-			  offsetof(struct wil6210_mbox_ctl, rx.tail));
+		wil_w(wil, RGF_MBOX +
+		      offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
 
 		/* add to the pending list */
 		spin_lock_irqsave(&wil->wmi_ev_lock, flags);
@@ -772,7 +806,7 @@
 	     u16 reply_id, void *reply, u8 reply_size, int to_msec)
 {
 	int rc;
-	int remain;
+	unsigned long remain;
 
 	mutex_lock(&wil->wmi_mutex);
 
@@ -988,12 +1022,21 @@
 
 int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
 {
+	static const char *const names[] = {
+		[WMI_FRAME_BEACON]	= "BEACON",
+		[WMI_FRAME_PROBE_REQ]	= "PROBE_REQ",
+		[WMI_FRAME_PROBE_RESP]	= "WMI_FRAME_PROBE_RESP",
+		[WMI_FRAME_ASSOC_REQ]	= "WMI_FRAME_ASSOC_REQ",
+		[WMI_FRAME_ASSOC_RESP]	= "WMI_FRAME_ASSOC_RESP",
+	};
 	int rc;
 	u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
 	struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
 
-	if (!cmd)
-		return -ENOMEM;
+	if (!cmd) {
+		rc = -ENOMEM;
+		goto out;
+	}
 	if (!ie)
 		ie_len = 0;
 
@@ -1003,6 +1046,12 @@
 	memcpy(cmd->ie_info, ie, ie_len);
 	rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
 	kfree(cmd);
+out:
+	if (rc) {
+		const char *name = type < ARRAY_SIZE(names) ?
+				   names[type] : "??";
+		wil_err(wil, "set_ie(%d %s) failed : %d\n", type, name, rc);
+	}
 
 	return rc;
 }
@@ -1129,15 +1178,42 @@
 
 int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason)
 {
+	int rc;
+	u16 reason_code;
 	struct wmi_disconnect_sta_cmd cmd = {
 		.disconnect_reason = cpu_to_le16(reason),
 	};
+	struct {
+		struct wil6210_mbox_hdr_wmi wmi;
+		struct wmi_disconnect_event evt;
+	} __packed reply;
 
 	ether_addr_copy(cmd.dst_mac, mac);
 
 	wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
 
-	return wmi_send(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd));
+	rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd),
+		      WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000);
+	/* failure to disconnect in reasonable time treated as FW error */
+	if (rc) {
+		wil_fw_error_recovery(wil);
+		return rc;
+	}
+
+	/* call event handler manually after processing wmi_call,
+	 * to avoid deadlock - disconnect event handler acquires wil->mutex
+	 * while it is already held here
+	 */
+	reason_code = le16_to_cpu(reply.evt.protocol_reason_status);
+
+	wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
+		    reply.evt.bssid, reason_code,
+		    reply.evt.disconnect_reason);
+
+	wil->sinfo_gen++;
+	wil6210_disconnect(wil, reply.evt.bssid, reason_code, true);
+
+	return 0;
 }
 
 int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
@@ -1279,7 +1355,7 @@
 		/* search for handler */
 		if (!wmi_evt_call_handler(wil, id, evt_data,
 					  len - sizeof(*wmi))) {
-			wil_err(wil, "Unhandled event 0x%04x\n", id);
+			wil_info(wil, "Unhandled event 0x%04x\n", id);
 		}
 	} else {
 		wil_err(wil, "Unknown event type\n");
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 916123a..a335f94 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -929,8 +929,8 @@
 	b43_lo_write(dev, &cal->ctl);
 }
 
-/* Periodic LO maintanance work */
-void b43_lo_g_maintanance_work(struct b43_wldev *dev)
+/* Periodic LO maintenance work */
+void b43_lo_g_maintenance_work(struct b43_wldev *dev)
 {
 	struct b43_phy *phy = &dev->phy;
 	struct b43_phy_g *gphy = phy->g;
diff --git a/drivers/net/wireless/b43/lo.h b/drivers/net/wireless/b43/lo.h
index 3b27e20..7b4df38 100644
--- a/drivers/net/wireless/b43/lo.h
+++ b/drivers/net/wireless/b43/lo.h
@@ -80,7 +80,7 @@
 
 void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all);
 
-void b43_lo_g_maintanance_work(struct b43_wldev *dev);
+void b43_lo_g_maintenance_work(struct b43_wldev *dev);
 void b43_lo_g_cleanup(struct b43_wldev *dev);
 void b43_lo_g_init(struct b43_wldev *dev);
 
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 727ce6e..462310e 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -3004,7 +3004,7 @@
 		   phy->rev == 1) {
 		//TODO: implement rev1 workaround
 	}
-	b43_lo_g_maintanance_work(dev);
+	b43_lo_g_maintenance_work(dev);
 	b43_mac_enable(dev);
 }
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index d86d1f1..a293275 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -469,6 +469,36 @@
 	return NULL;
 }
 
+static int brcmf_vif_change_validate(struct brcmf_cfg80211_info *cfg,
+				     struct brcmf_cfg80211_vif *vif,
+				     enum nl80211_iftype new_type)
+{
+	int iftype_num[NUM_NL80211_IFTYPES];
+	struct brcmf_cfg80211_vif *pos;
+
+	memset(&iftype_num[0], 0, sizeof(iftype_num));
+	list_for_each_entry(pos, &cfg->vif_list, list)
+		if (pos == vif)
+			iftype_num[new_type]++;
+		else
+			iftype_num[pos->wdev.iftype]++;
+
+	return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+}
+
+static int brcmf_vif_add_validate(struct brcmf_cfg80211_info *cfg,
+				  enum nl80211_iftype new_type)
+{
+	int iftype_num[NUM_NL80211_IFTYPES];
+	struct brcmf_cfg80211_vif *pos;
+
+	memset(&iftype_num[0], 0, sizeof(iftype_num));
+	list_for_each_entry(pos, &cfg->vif_list, list)
+		iftype_num[pos->wdev.iftype]++;
+
+	iftype_num[new_type]++;
+	return cfg80211_check_combinations(cfg->wiphy, 1, 0, iftype_num);
+}
 
 static void convert_key_from_CPU(struct brcmf_wsec_key *key,
 				 struct brcmf_wsec_key_le *key_le)
@@ -663,8 +693,14 @@
 						     struct vif_params *params)
 {
 	struct wireless_dev *wdev;
+	int err;
 
 	brcmf_dbg(TRACE, "enter: %s type %d\n", name, type);
+	err = brcmf_vif_add_validate(wiphy_to_cfg(wiphy), type);
+	if (err) {
+		brcmf_err("iface validation failed: err=%d\n", err);
+		return ERR_PTR(err);
+	}
 	switch (type) {
 	case NL80211_IFTYPE_ADHOC:
 	case NL80211_IFTYPE_STATION:
@@ -823,8 +859,12 @@
 	s32 ap = 0;
 	s32 err = 0;
 
-	brcmf_dbg(TRACE, "Enter, ndev=%p, type=%d\n", ndev, type);
-
+	brcmf_dbg(TRACE, "Enter, idx=%d, type=%d\n", ifp->bssidx, type);
+	err = brcmf_vif_change_validate(wiphy_to_cfg(wiphy), vif, type);
+	if (err) {
+		brcmf_err("iface validation failed: err=%d\n", err);
+		return err;
+	}
 	switch (type) {
 	case NL80211_IFTYPE_MONITOR:
 	case NL80211_IFTYPE_WDS:
@@ -5695,63 +5735,132 @@
 	}
 };
 
+/**
+ * brcmf_setup_ifmodes() - determine interface modes and combinations.
+ *
+ * @wiphy: wiphy object.
+ * @ifp: interface object needed for feat module api.
+ *
+ * The interface modes and combinations are determined dynamically here
+ * based on firmware functionality.
+ *
+ * no p2p and no mbss:
+ *
+ *	#STA <= 1, #AP <= 1, channels = 1, 2 total
+ *
+ * no p2p and mbss:
+ *
+ *	#STA <= 1, #AP <= 1, channels = 1, 2 total
+ *	#AP <= 4, matching BI, channels = 1, 4 total
+ *
+ * p2p, no mchan, and mbss:
+ *
+ *	#STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 1, 3 total
+ *	#STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
+ *	#AP <= 4, matching BI, channels = 1, 4 total
+ *
+ * p2p, mchan, and mbss:
+ *
+ *	#STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 1, channels = 2, 3 total
+ *	#STA <= 1, #P2P-DEV <= 1, #AP <= 1, #P2P-CL <= 1, channels = 1, 4 total
+ *	#AP <= 4, matching BI, channels = 1, 4 total
+ */
 static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
 {
 	struct ieee80211_iface_combination *combo = NULL;
-	struct ieee80211_iface_limit *limits = NULL;
-	int i = 0, max_iface_cnt;
+	struct ieee80211_iface_limit *c0_limits = NULL;
+	struct ieee80211_iface_limit *p2p_limits = NULL;
+	struct ieee80211_iface_limit *mbss_limits = NULL;
+	bool mbss, p2p;
+	int i, c, n_combos;
 
-	combo = kzalloc(sizeof(*combo), GFP_KERNEL);
+	mbss = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS);
+	p2p = brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P);
+
+	n_combos = 1 + !!p2p + !!mbss;
+	combo = kcalloc(n_combos, sizeof(*combo), GFP_KERNEL);
 	if (!combo)
 		goto err;
 
-	limits = kzalloc(sizeof(*limits) * 4, GFP_KERNEL);
-	if (!limits)
+	c0_limits = kcalloc(p2p ? 3 : 2, sizeof(*c0_limits), GFP_KERNEL);
+	if (!c0_limits)
 		goto err;
 
+	if (p2p) {
+		p2p_limits = kcalloc(4, sizeof(*p2p_limits), GFP_KERNEL);
+		if (!p2p_limits)
+			goto err;
+	}
+
+	if (mbss) {
+		mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL);
+		if (!mbss_limits)
+			goto err;
+	}
+
 	wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
 				 BIT(NL80211_IFTYPE_ADHOC) |
 				 BIT(NL80211_IFTYPE_AP);
 
-	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
-		combo->num_different_channels = 2;
-	else
-		combo->num_different_channels = 1;
-
-	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) {
-		limits[i].max = 1;
-		limits[i++].types = BIT(NL80211_IFTYPE_STATION);
-		limits[i].max = 4;
-		limits[i++].types = BIT(NL80211_IFTYPE_AP);
-		max_iface_cnt = 5;
-	} else {
-		limits[i].max = 2;
-		limits[i++].types = BIT(NL80211_IFTYPE_STATION) |
-				    BIT(NL80211_IFTYPE_AP);
-		max_iface_cnt = 2;
-	}
-
-	if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_P2P)) {
+	c = 0;
+	i = 0;
+	combo[c].num_different_channels = 1;
+	c0_limits[i].max = 1;
+	c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+	if (p2p) {
+		if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN))
+			combo[c].num_different_channels = 2;
 		wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
 					  BIT(NL80211_IFTYPE_P2P_GO) |
 					  BIT(NL80211_IFTYPE_P2P_DEVICE);
-		limits[i].max = 1;
-		limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
-				    BIT(NL80211_IFTYPE_P2P_GO);
-		limits[i].max = 1;
-		limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
-		max_iface_cnt += 2;
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
+				       BIT(NL80211_IFTYPE_P2P_GO);
+	} else {
+		c0_limits[i].max = 1;
+		c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
 	}
-	combo->max_interfaces = max_iface_cnt;
-	combo->limits = limits;
-	combo->n_limits = i;
+	combo[c].max_interfaces = i;
+	combo[c].n_limits = i;
+	combo[c].limits = c0_limits;
 
+	if (p2p) {
+		c++;
+		i = 0;
+		combo[c].num_different_channels = 1;
+		p2p_limits[i].max = 1;
+		p2p_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
+		p2p_limits[i].max = 1;
+		p2p_limits[i++].types = BIT(NL80211_IFTYPE_AP);
+		p2p_limits[i].max = 1;
+		p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT);
+		p2p_limits[i].max = 1;
+		p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE);
+		combo[c].max_interfaces = i;
+		combo[c].n_limits = i;
+		combo[c].limits = p2p_limits;
+	}
+
+	if (mbss) {
+		c++;
+		combo[c].beacon_int_infra_match = true;
+		combo[c].num_different_channels = 1;
+		mbss_limits[0].max = 4;
+		mbss_limits[0].types = BIT(NL80211_IFTYPE_AP);
+		combo[c].max_interfaces = 4;
+		combo[c].n_limits = 1;
+		combo[c].limits = mbss_limits;
+	}
+	wiphy->n_iface_combinations = n_combos;
 	wiphy->iface_combinations = combo;
-	wiphy->n_iface_combinations = 1;
 	return 0;
 
 err:
-	kfree(limits);
+	kfree(c0_limits);
+	kfree(p2p_limits);
+	kfree(mbss_limits);
 	kfree(combo);
 	return -ENOMEM;
 }
@@ -5785,7 +5894,10 @@
 
 static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
 {
+	struct brcmf_pub *drvr = ifp->drvr;
+	const struct ieee80211_iface_combination *combo;
 	struct ieee80211_supported_band *band;
+	u16 max_interfaces = 0;
 	__le32 bandlist[3];
 	u32 n_bands;
 	int err, i;
@@ -5798,6 +5910,24 @@
 	if (err)
 		return err;
 
+	for (i = 0, combo = wiphy->iface_combinations;
+	     i < wiphy->n_iface_combinations; i++, combo++) {
+		max_interfaces = max(max_interfaces, combo->max_interfaces);
+	}
+
+	for (i = 0; i < max_interfaces && i < ARRAY_SIZE(drvr->addresses);
+	     i++) {
+		u8 *addr = drvr->addresses[i].addr;
+
+		memcpy(addr, drvr->mac, ETH_ALEN);
+		if (i) {
+			addr[0] |= BIT(1);
+			addr[ETH_ALEN - 1] ^= i;
+		}
+	}
+	wiphy->addresses = drvr->addresses;
+	wiphy->n_addresses = i;
+
 	wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
 	wiphy->cipher_suites = __wl_cipher_suites;
 	wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
@@ -6059,11 +6189,15 @@
 
 static void brcmf_free_wiphy(struct wiphy *wiphy)
 {
+	int i;
+
 	if (!wiphy)
 		return;
 
-	if (wiphy->iface_combinations)
-		kfree(wiphy->iface_combinations->limits);
+	if (wiphy->iface_combinations) {
+		for (i = 0; i < wiphy->n_iface_combinations; i++)
+			kfree(wiphy->iface_combinations[i].limits);
+	}
 	kfree(wiphy->iface_combinations);
 	if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
 		kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.h b/drivers/net/wireless/brcm80211/brcmfmac/core.h
index fd74a9c..7463041 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.h
@@ -21,6 +21,7 @@
 #ifndef BRCMFMAC_CORE_H
 #define BRCMFMAC_CORE_H
 
+#include <net/cfg80211.h>
 #include "fweh.h"
 
 #define TOE_TX_CSUM_OL		0x00000001
@@ -118,6 +119,8 @@
 	/* Multicast data packets sent to dongle */
 	unsigned long tx_multicast;
 
+	struct mac_address addresses[BRCMF_MAX_IFS];
+
 	struct brcmf_if *iflist[BRCMF_MAX_IFS];
 
 	struct mutex proto_block;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
index 743f16b..971920f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
@@ -19,6 +19,7 @@
 #include <linux/device.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
+#include <linux/bcm47xx_nvram.h>
 
 #include "debug.h"
 #include "firmware.h"
@@ -426,19 +427,33 @@
 	struct brcmf_fw *fwctx = ctx;
 	u32 nvram_length = 0;
 	void *nvram = NULL;
+	u8 *data = NULL;
+	size_t data_len;
+	bool raw_nvram;
 
 	brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
-	if (!fw && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
-		goto fail;
-
-	if (fw) {
-		nvram = brcmf_fw_nvram_strip(fw->data, fw->size, &nvram_length,
-					     fwctx->domain_nr, fwctx->bus_nr);
-		release_firmware(fw);
-		if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+	if (fw && fw->data) {
+		data = (u8 *)fw->data;
+		data_len = fw->size;
+		raw_nvram = false;
+	} else {
+		data = bcm47xx_nvram_get_contents(&data_len);
+		if (!data && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
 			goto fail;
+		raw_nvram = true;
 	}
 
+	if (data)
+		nvram = brcmf_fw_nvram_strip(data, data_len, &nvram_length,
+					     fwctx->domain_nr, fwctx->bus_nr);
+
+	if (raw_nvram)
+		bcm47xx_nvram_release_contents(data);
+	if (fw)
+		release_firmware(fw);
+	if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+		goto fail;
+
 	fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length);
 	kfree(fwctx);
 	return;
@@ -473,15 +488,9 @@
 	if (!ret)
 		return;
 
-	/* when nvram is optional call .done() callback here */
-	if (fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL) {
-		fwctx->done(fwctx->dev, fw, NULL, 0);
-		kfree(fwctx);
-		return;
-	}
+	brcmf_fw_request_nvram_done(NULL, fwctx);
+	return;
 
-	/* failed nvram request */
-	release_firmware(fw);
 fail:
 	brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
 	device_release_driver(fwctx->dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
index 5944063..8d1ab4a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
@@ -194,11 +194,15 @@
 	spin_lock_irqsave(&flow->block_lock, flags);
 
 	ring = flow->rings[flowid];
+	if (ring->blocked == blocked) {
+		spin_unlock_irqrestore(&flow->block_lock, flags);
+		return;
+	}
 	ifidx = brcmf_flowring_ifidx_get(flow, flowid);
 
 	currently_blocked = false;
 	for (i = 0; i < flow->nrofrings; i++) {
-		if (flow->rings[i]) {
+		if ((flow->rings[i]) && (i != flowid)) {
 			ring = flow->rings[i];
 			if ((ring->status == RING_OPEN) &&
 			    (brcmf_flowring_ifidx_get(flow, i) == ifidx)) {
@@ -209,8 +213,8 @@
 			}
 		}
 	}
-	ring->blocked = blocked;
-	if (currently_blocked == blocked) {
+	flow->rings[flowid]->blocked = blocked;
+	if (currently_blocked) {
 		spin_unlock_irqrestore(&flow->block_lock, flags);
 		return;
 	}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index cbf033f..1326898 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -85,7 +85,6 @@
 	BRCMF_ENUM_DEF(IF, 54) \
 	BRCMF_ENUM_DEF(P2P_DISC_LISTEN_COMPLETE, 55) \
 	BRCMF_ENUM_DEF(RSSI, 56) \
-	BRCMF_ENUM_DEF(PFN_SCAN_COMPLETE, 57) \
 	BRCMF_ENUM_DEF(EXTLOG_MSG, 58) \
 	BRCMF_ENUM_DEF(ACTION_FRAME, 59) \
 	BRCMF_ENUM_DEF(ACTION_FRAME_COMPLETE, 60) \
@@ -103,8 +102,7 @@
 	BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
 	BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
 	BRCMF_ENUM_DEF(TDLS_PEER_EVENT, 92) \
-	BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
-	BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128)
+	BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
 
 #define BRCMF_ENUM_DEF(id, val) \
 	BRCMF_E_##id = (val),
@@ -112,7 +110,11 @@
 /* firmware event codes sent by the dongle */
 enum brcmf_fweh_event_code {
 	BRCMF_FWEH_EVENT_ENUM_DEFLIST
-	BRCMF_E_LAST
+	/* this determines event mask length which must match
+	 * minimum length check in device firmware so it is
+	 * hard-coded here.
+	 */
+	BRCMF_E_LAST = 139
 };
 #undef BRCMF_ENUM_DEF
 
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index 898c380..7b2136c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -1360,6 +1360,60 @@
 	}
 }
 
+#ifdef DEBUG
+static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
+{
+	struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
+	struct brcmf_pub *drvr = bus_if->drvr;
+	struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
+	struct brcmf_commonring *commonring;
+	u16 i;
+	struct brcmf_flowring_ring *ring;
+	struct brcmf_flowring_hash *hash;
+
+	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
+	seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+	commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
+	seq_printf(seq, "h2d_rx_submit:  rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+	commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
+	seq_printf(seq, "d2h_ctl_cmplt:  rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+	commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
+	seq_printf(seq, "d2h_tx_cmplt:   rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+	commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
+	seq_printf(seq, "d2h_rx_cmplt:   rp %4u, wp %4u, depth %4u\n",
+		   commonring->r_ptr, commonring->w_ptr, commonring->depth);
+
+	seq_printf(seq, "\nh2d_flowrings: depth %u\n",
+		   BRCMF_H2D_TXFLOWRING_MAX_ITEM);
+	seq_puts(seq, "Active flowrings:\n");
+	hash = msgbuf->flow->hash;
+	for (i = 0; i < msgbuf->flow->nrofrings; i++) {
+		if (!msgbuf->flow->rings[i])
+			continue;
+		ring = msgbuf->flow->rings[i];
+		if (ring->status != RING_OPEN)
+			continue;
+		commonring = msgbuf->flowrings[i];
+		hash = &msgbuf->flow->hash[ring->hash_id];
+		seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n"
+				"        ifidx %u, fifo %u, da %pM\n",
+				i, commonring->r_ptr, commonring->w_ptr,
+				skb_queue_len(&ring->skblist), ring->blocked,
+				hash->ifidx, hash->fifo, hash->mac);
+	}
+
+	return 0;
+}
+#else
+static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
+{
+	return 0;
+}
+#endif
 
 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
 {
@@ -1460,6 +1514,8 @@
 	spin_lock_init(&msgbuf->flowring_work_lock);
 	INIT_LIST_HEAD(&msgbuf->work_queue);
 
+	brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read);
+
 	return 0;
 
 fail:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
index d36f5f3..f990e3d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
@@ -2564,15 +2564,6 @@
 	}
 }
 
-static void atomic_orr(int val, atomic_t *v)
-{
-	int old_val;
-
-	old_val = atomic_read(v);
-	while (atomic_cmpxchg(v, old_val, val | old_val) != old_val)
-		old_val = atomic_read(v);
-}
-
 static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
 {
 	struct brcmf_core *buscore;
@@ -2595,7 +2586,7 @@
 	if (val) {
 		brcmf_sdiod_regwl(bus->sdiodev, addr, val, &ret);
 		bus->sdcnt.f1regdata++;
-		atomic_orr(val, &bus->intstatus);
+		atomic_or(val, &bus->intstatus);
 	}
 
 	return ret;
@@ -2712,7 +2703,7 @@
 
 	/* Keep still-pending events for next scheduling */
 	if (intstatus)
-		atomic_orr(intstatus, &bus->intstatus);
+		atomic_or(intstatus, &bus->intstatus);
 
 	brcmf_sdio_clrintr(bus);
 
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index ab775a5..d2c5747 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1472,9 +1472,7 @@
 	wl->timers = t;
 
 #ifdef DEBUG
-	t->name = kmalloc(strlen(name) + 1, GFP_ATOMIC);
-	if (t->name)
-		strcpy(t->name, name);
+	t->name = kstrdup(name, GFP_ATOMIC);
 #endif
 
 	return t;
diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
index 7603546..29185ae 100644
--- a/drivers/net/wireless/cw1200/cw1200_spi.c
+++ b/drivers/net/wireless/cw1200/cw1200_spi.c
@@ -467,7 +467,6 @@
 	.remove		= cw1200_spi_disconnect,
 	.driver = {
 		.name		= "cw1200_wlan_spi",
-		.bus            = &spi_bus_type,
 		.owner          = THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm		= &cw1200_pm_ops,
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 01de1a3..80d4228 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -865,7 +865,7 @@
 
 	switch(type) {
 	case HOSTAP_INTERFACE_AP:
-		dev->tx_queue_len = 0;	/* use main radio device queue */
+		dev->priv_flags |= IFF_NO_QUEUE;	/* use main radio device queue */
 		dev->netdev_ops = &hostap_mgmt_netdev_ops;
 		dev->type = ARPHRD_IEEE80211;
 		dev->header_ops = &hostap_80211_ops;
@@ -874,7 +874,7 @@
 		dev->netdev_ops = &hostap_master_ops;
 		break;
 	default:
-		dev->tx_queue_len = 0;	/* use main radio device queue */
+		dev->priv_flags |= IFF_NO_QUEUE;	/* use main radio device queue */
 		dev->netdev_ops = &hostap_netdev_ops;
 	}
 
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 08eb229..36818c7 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1410,7 +1410,7 @@
 static int ipw2100_hw_phy_off(struct ipw2100_priv *priv)
 {
 
-#define HW_PHY_OFF_LOOP_DELAY (HZ / 5000)
+#define HW_PHY_OFF_LOOP_DELAY (msecs_to_jiffies(50))
 
 	struct host_command cmd = {
 		.host_command = CARD_DISABLE_PHY_OFF,
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index c6d7879..1939478 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -746,7 +746,7 @@
 #define IPW_REG_GPIO			IPW_REG_DOMAIN_0_OFFSET + 0x0030
 #define IPW_REG_FW_TYPE                 IPW_REG_DOMAIN_1_OFFSET + 0x0188
 #define IPW_REG_FW_VERSION 		IPW_REG_DOMAIN_1_OFFSET + 0x018C
-#define IPW_REG_FW_COMPATABILITY_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x0190
+#define IPW_REG_FW_COMPATIBILITY_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x0190
 
 #define IPW_REG_INDIRECT_ADDR_MASK	0x00FFFFFC
 
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index 7f4cb69..af1b3e6 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3259,7 +3259,7 @@
 
 	while (size && PAGE_SIZE - len) {
 		hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
-				   PAGE_SIZE - len, 1);
+				   PAGE_SIZE - len, true);
 		len = strlen(buf);
 		if (PAGE_SIZE - len)
 			buf[len++] = '\n';
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
index 3440101..908b9f4 100644
--- a/drivers/net/wireless/iwlegacy/debug.c
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -515,12 +515,8 @@
 	    scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
 		      eeprom_ver);
 	for (ofs = 0; ofs < eeprom_len; ofs += 16) {
-		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
-		hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
-				   buf_size - pos, 0);
-		pos += strlen(buf + pos);
-		if (buf_size - pos > 0)
-			buf[pos++] = '\n';
+		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+				 ofs, ptr + ofs);
 	}
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index c160dad..991def8 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -122,9 +122,8 @@
 void iwl_down(struct iwl_priv *priv);
 void iwl_cancel_deferred_work(struct iwl_priv *priv);
 void iwlagn_prepare_restart(struct iwl_priv *priv);
-int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
-				 struct iwl_rx_cmd_buffer *rxb,
-				 struct iwl_device_cmd *cmd);
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		     struct iwl_rx_cmd_buffer *rxb);
 
 bool iwl_check_for_ct_kill(struct iwl_priv *priv);
 
@@ -216,11 +215,9 @@
 		       struct ieee80211_sta *sta, u16 tid);
 int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
 			struct ieee80211_sta *sta, u16 tid);
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
-				   struct iwl_rx_cmd_buffer *rxb,
-				   struct iwl_device_cmd *cmd);
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd);
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+				   struct iwl_rx_cmd_buffer *rxb);
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 
 static inline u32 iwl_tx_status_to_mac80211(u32 status)
 {
@@ -277,9 +274,6 @@
 
 /* bt coex */
 void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
-				  struct iwl_rx_cmd_buffer *rxb,
-				  struct iwl_device_cmd *cmd);
 void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
 void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
 void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
@@ -332,8 +326,7 @@
 
 int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 		    struct iwl_link_quality_cmd *lq, u8 flags, bool init);
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd);
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
 		      struct ieee80211_sta *sta);
 
@@ -480,7 +473,7 @@
 } while (0)
 #endif				/* CONFIG_IWLWIFI_DEBUG */
 
-extern const char *const iwl_dvm_cmd_strings[REPLY_MAX];
+extern const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1];
 
 static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
 {
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 0ffb6ff..b15e44f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -310,12 +310,8 @@
 	pos += scnprintf(buf + pos, buf_size - pos,
 			 "NVM version: 0x%x\n", nvm_ver);
 	for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
-		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
-		hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
-				   buf_size - pos, 0);
-		pos += strlen(buf + pos);
-		if (buf_size - pos > 0)
-			buf[pos++] = '\n';
+		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+				 ofs, ptr + ofs);
 	}
 
 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 3811878..0ba3e56 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -669,6 +669,8 @@
 	/* ieee device used by generic ieee processing code */
 	struct ieee80211_hw *hw;
 
+	struct napi_struct *napi;
+
 	struct list_head calib_results;
 
 	struct workqueue_struct *workqueue;
@@ -678,9 +680,8 @@
 	enum ieee80211_band band;
 	u8 valid_contexts;
 
-	int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
-				       struct iwl_rx_cmd_buffer *rxb,
-				       struct iwl_device_cmd *cmd);
+	void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb);
 
 	struct iwl_notif_wait_data notif_wait;
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 1d2223d..ab45819 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -659,9 +659,8 @@
 	return need_update;
 }
 
-int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
-				  struct iwl_rx_cmd_buffer *rxb,
-				  struct iwl_device_cmd *cmd)
+static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
+					 struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
@@ -669,7 +668,7 @@
 
 	if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
 		/* bt coex disabled */
-		return 0;
+		return;
 	}
 
 	IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
@@ -714,7 +713,6 @@
 	/* FIXME: based on notification, adjust the prio_boost */
 
 	priv->bt_ci_compliance = coex->bt_ci_compliance;
-	return 0;
 }
 
 void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 7acaa26..453f7c3 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -250,12 +250,24 @@
 		}
 	}
 
+	ret = iwl_trans_start_hw(priv->trans);
+	if (ret) {
+		IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+		goto error;
+	}
+
 	ret = iwl_run_init_ucode(priv);
 	if (ret) {
 		IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
 		goto error;
 	}
 
+	ret = iwl_trans_start_hw(priv->trans);
+	if (ret) {
+		IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+		goto error;
+	}
+
 	ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
 	if (ret) {
 		IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
@@ -432,7 +444,7 @@
 		u32 error_id;
 	} err_info;
 	struct iwl_notification_wait status_wait;
-	static const u8 status_cmd[] = {
+	static const u16 status_cmd[] = {
 		REPLY_WOWLAN_GET_STATUS,
 	};
 	struct iwlagn_wowlan_status status_data = {};
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 234e30f..e7616f0 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -2029,17 +2029,6 @@
 	return false;
 }
 
-static void iwl_napi_add(struct iwl_op_mode *op_mode,
-			 struct napi_struct *napi,
-			 struct net_device *napi_dev,
-			 int (*poll)(struct napi_struct *, int),
-			 int weight)
-{
-	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-
-	ieee80211_napi_add(priv->hw, napi, napi_dev, poll, weight);
-}
-
 static const struct iwl_op_mode_ops iwl_dvm_ops = {
 	.start = iwl_op_mode_dvm_start,
 	.stop = iwl_op_mode_dvm_stop,
@@ -2052,7 +2041,6 @@
 	.cmd_queue_full = iwl_cmd_queue_full,
 	.nic_config = iwl_nic_config,
 	.wimax_active = iwl_wimax_active,
-	.napi_add = iwl_napi_add,
 };
 
 /*****************************************************************************
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 3bd7c86..cef921c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -1416,11 +1416,11 @@
 /*
  * Try to switch to new modulation mode from legacy
  */
-static int rs_move_legacy_other(struct iwl_priv *priv,
-				struct iwl_lq_sta *lq_sta,
-				struct ieee80211_conf *conf,
-				struct ieee80211_sta *sta,
-				int index)
+static void rs_move_legacy_other(struct iwl_priv *priv,
+				 struct iwl_lq_sta *lq_sta,
+				 struct ieee80211_conf *conf,
+				 struct ieee80211_sta *sta,
+				 int index)
 {
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
 	struct iwl_scale_tbl_info *search_tbl =
@@ -1575,7 +1575,7 @@
 
 	}
 	search_tbl->lq_type = LQ_NONE;
-	return 0;
+	return;
 
 out:
 	lq_sta->search_better_tbl = 1;
@@ -1584,17 +1584,15 @@
 		tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
 	if (update_search_tbl_counter)
 		search_tbl->action = tbl->action;
-	return 0;
-
 }
 
 /*
  * Try to switch to new modulation mode from SISO
  */
-static int rs_move_siso_to_other(struct iwl_priv *priv,
-				 struct iwl_lq_sta *lq_sta,
-				 struct ieee80211_conf *conf,
-				 struct ieee80211_sta *sta, int index)
+static void rs_move_siso_to_other(struct iwl_priv *priv,
+				  struct iwl_lq_sta *lq_sta,
+				  struct ieee80211_conf *conf,
+				  struct ieee80211_sta *sta, int index)
 {
 	u8 is_green = lq_sta->is_green;
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1747,7 +1745,7 @@
 			break;
 	}
 	search_tbl->lq_type = LQ_NONE;
-	return 0;
+	return;
 
  out:
 	lq_sta->search_better_tbl = 1;
@@ -1756,17 +1754,15 @@
 		tbl->action = IWL_SISO_SWITCH_ANTENNA1;
 	if (update_search_tbl_counter)
 		search_tbl->action = tbl->action;
-
-	return 0;
 }
 
 /*
  * Try to switch to new modulation mode from MIMO2
  */
-static int rs_move_mimo2_to_other(struct iwl_priv *priv,
-				 struct iwl_lq_sta *lq_sta,
-				 struct ieee80211_conf *conf,
-				 struct ieee80211_sta *sta, int index)
+static void rs_move_mimo2_to_other(struct iwl_priv *priv,
+				   struct iwl_lq_sta *lq_sta,
+				   struct ieee80211_conf *conf,
+				   struct ieee80211_sta *sta, int index)
 {
 	s8 is_green = lq_sta->is_green;
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1917,7 +1913,7 @@
 			break;
 	}
 	search_tbl->lq_type = LQ_NONE;
-	return 0;
+	return;
  out:
 	lq_sta->search_better_tbl = 1;
 	tbl->action++;
@@ -1926,17 +1922,15 @@
 	if (update_search_tbl_counter)
 		search_tbl->action = tbl->action;
 
-	return 0;
-
 }
 
 /*
  * Try to switch to new modulation mode from MIMO3
  */
-static int rs_move_mimo3_to_other(struct iwl_priv *priv,
-				 struct iwl_lq_sta *lq_sta,
-				 struct ieee80211_conf *conf,
-				 struct ieee80211_sta *sta, int index)
+static void rs_move_mimo3_to_other(struct iwl_priv *priv,
+				   struct iwl_lq_sta *lq_sta,
+				   struct ieee80211_conf *conf,
+				   struct ieee80211_sta *sta, int index)
 {
 	s8 is_green = lq_sta->is_green;
 	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -2093,7 +2087,7 @@
 			break;
 	}
 	search_tbl->lq_type = LQ_NONE;
-	return 0;
+	return;
  out:
 	lq_sta->search_better_tbl = 1;
 	tbl->action++;
@@ -2101,9 +2095,6 @@
 		tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
 	if (update_search_tbl_counter)
 		search_tbl->action = tbl->action;
-
-	return 0;
-
 }
 
 /*
diff --git a/drivers/net/wireless/iwlwifi/dvm/rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index debec96..4a45b0b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -39,7 +39,7 @@
 
 #define IWL_CMD_ENTRY(x) [x] = #x
 
-const char *const iwl_dvm_cmd_strings[REPLY_MAX] = {
+const char *const iwl_dvm_cmd_strings[REPLY_MAX + 1] = {
 	IWL_CMD_ENTRY(REPLY_ALIVE),
 	IWL_CMD_ENTRY(REPLY_ERROR),
 	IWL_CMD_ENTRY(REPLY_ECHO),
@@ -123,9 +123,8 @@
  *
  ******************************************************************************/
 
-static int iwlagn_rx_reply_error(struct iwl_priv *priv,
-			       struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_error(struct iwl_priv *priv,
+				  struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_error_resp *err_resp = (void *)pkt->data;
@@ -136,11 +135,9 @@
 		err_resp->cmd_id,
 		le16_to_cpu(err_resp->bad_cmd_seq_num),
 		le32_to_cpu(err_resp->error_info));
-	return 0;
 }
 
-static int iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd)
+static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_csa_notification *csa = (void *)pkt->data;
@@ -152,7 +149,7 @@
 	struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
 
 	if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
-		return 0;
+		return;
 
 	if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
 		rxon->channel = csa->channel;
@@ -165,13 +162,11 @@
 			le16_to_cpu(csa->channel));
 		iwl_chswitch_done(priv, false);
 	}
-	return 0;
 }
 
 
-static int iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
-					  struct iwl_rx_cmd_buffer *rxb,
-					  struct iwl_device_cmd *cmd)
+static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
+					     struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_spectrum_notification *report = (void *)pkt->data;
@@ -179,17 +174,15 @@
 	if (!report->state) {
 		IWL_DEBUG_11H(priv,
 			"Spectrum Measure Notification: Start\n");
-		return 0;
+		return;
 	}
 
 	memcpy(&priv->measure_report, report, sizeof(*report));
 	priv->measurement_status |= MEASUREMENT_READY;
-	return 0;
 }
 
-static int iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
-				  struct iwl_rx_cmd_buffer *rxb,
-				  struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
+				     struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -197,24 +190,20 @@
 	IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
 		     sleep->pm_sleep_mode, sleep->pm_wakeup_src);
 #endif
-	return 0;
 }
 
-static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd)
+static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+						struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u32 __maybe_unused len = iwl_rx_packet_len(pkt);
 	IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
 			"notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
 	iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
-	return 0;
 }
 
-static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd)
+static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
+				   struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
@@ -232,8 +221,6 @@
 #endif
 
 	priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
-	return 0;
 }
 
 /**
@@ -448,9 +435,8 @@
 }
 #endif
 
-static int iwlagn_rx_statistics(struct iwl_priv *priv,
-			      struct iwl_rx_cmd_buffer *rxb,
-			      struct iwl_device_cmd *cmd)
+static void iwlagn_rx_statistics(struct iwl_priv *priv,
+				 struct iwl_rx_cmd_buffer *rxb)
 {
 	unsigned long stamp = jiffies;
 	const int reg_recalib_period = 60;
@@ -505,7 +491,7 @@
 			  len, sizeof(struct iwl_bt_notif_statistics),
 			  sizeof(struct iwl_notif_statistics));
 		spin_unlock(&priv->statistics.lock);
-		return 0;
+		return;
 	}
 
 	change = common->temperature != priv->statistics.common.temperature ||
@@ -550,13 +536,10 @@
 		priv->lib->temperature(priv);
 
 	spin_unlock(&priv->statistics.lock);
-
-	return 0;
 }
 
-static int iwlagn_rx_reply_statistics(struct iwl_priv *priv,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_statistics(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_notif_statistics *stats = (void *)pkt->data;
@@ -572,15 +555,14 @@
 #endif
 		IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
 	}
-	iwlagn_rx_statistics(priv, rxb, cmd);
-	return 0;
+
+	iwlagn_rx_statistics(priv, rxb);
 }
 
 /* Handle notification from uCode that card's power state is changing
  * due to software, hardware, or critical temperature RFKILL */
-static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd)
+static void iwlagn_rx_card_state_notif(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@@ -627,12 +609,10 @@
 	     test_bit(STATUS_RF_KILL_HW, &priv->status)))
 		wiphy_rfkill_set_hw_state(priv->hw->wiphy,
 			test_bit(STATUS_RF_KILL_HW, &priv->status));
-	return 0;
 }
 
-static int iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
-				       struct iwl_rx_cmd_buffer *rxb,
-				       struct iwl_device_cmd *cmd)
+static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
+					  struct iwl_rx_cmd_buffer *rxb)
 
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -649,14 +629,12 @@
 		if (!test_bit(STATUS_SCANNING, &priv->status))
 			iwl_init_sensitivity(priv);
 	}
-	return 0;
 }
 
 /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
  * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-static int iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+				   struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
@@ -664,7 +642,6 @@
 	priv->ampdu_ref++;
 	memcpy(&priv->last_phy_res, pkt->data,
 	       sizeof(struct iwl_rx_phy_res));
-	return 0;
 }
 
 /*
@@ -786,7 +763,7 @@
 
 	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
 
-	ieee80211_rx(priv->hw, skb);
+	ieee80211_rx_napi(priv->hw, skb, priv->napi);
 }
 
 static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
@@ -890,9 +867,8 @@
 }
 
 /* Called for REPLY_RX_MPDU_CMD */
-static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
-			    struct iwl_rx_cmd_buffer *rxb,
-			    struct iwl_device_cmd *cmd)
+static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+			       struct iwl_rx_cmd_buffer *rxb)
 {
 	struct ieee80211_hdr *header;
 	struct ieee80211_rx_status rx_status = {};
@@ -906,7 +882,7 @@
 
 	if (!priv->last_phy_res_valid) {
 		IWL_ERR(priv, "MPDU frame without cached PHY data\n");
-		return 0;
+		return;
 	}
 	phy_res = &priv->last_phy_res;
 	amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
@@ -919,14 +895,14 @@
 	if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
 		IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
 				phy_res->cfg_phy_cnt);
-		return 0;
+		return;
 	}
 
 	if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
 	    !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
 		IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
 				le32_to_cpu(rx_pkt_status));
-		return 0;
+		return;
 	}
 
 	/* This will be used in several places later */
@@ -998,12 +974,10 @@
 
 	iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
 				    rxb, &rx_status);
-	return 0;
 }
 
-static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
-				      struct iwl_rx_cmd_buffer *rxb,
-				      struct iwl_device_cmd *cmd)
+static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_wipan_noa_data *new_data, *old_data;
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -1041,8 +1015,6 @@
 
 	if (old_data)
 		kfree_rcu(old_data, rcu_head);
-
-	return 0;
 }
 
 /**
@@ -1053,8 +1025,7 @@
  */
 void iwl_setup_rx_handlers(struct iwl_priv *priv)
 {
-	int (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd);
+	void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
 
 	handlers = priv->rx_handlers;
 
@@ -1102,12 +1073,11 @@
 		iwlagn_bt_rx_handler_setup(priv);
 }
 
-int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
-		    struct iwl_device_cmd *cmd)
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		     struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
-	int err = 0;
 
 	/*
 	 * Do the notification wait before RX handlers so
@@ -1121,12 +1091,11 @@
 	 *   rx_handlers table.  See iwl_setup_rx_handlers() */
 	if (priv->rx_handlers[pkt->hdr.cmd]) {
 		priv->rx_handlers_stats[pkt->hdr.cmd]++;
-		err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
+		priv->rx_handlers[pkt->hdr.cmd](priv, rxb);
 	} else {
 		/* No handling needed */
 		IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
 			     iwl_dvm_get_cmd_string(pkt->hdr.cmd),
 			     pkt->hdr.cmd);
 	}
-	return err;
 }
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index ed50de6..85ceceb 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -123,7 +124,7 @@
 	__le32 old_filter = send->filter_flags;
 	u8 old_dev_type = send->dev_type;
 	int ret;
-	static const u8 deactivate_cmd[] = {
+	static const u16 deactivate_cmd[] = {
 		REPLY_WIPAN_DEACTIVATION_COMPLETE
 	};
 
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 43bef90..6481594 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -247,9 +247,8 @@
 }
 
 /* Service response to REPLY_SCAN_CMD (0x80) */
-static int iwl_rx_reply_scan(struct iwl_priv *priv,
-			      struct iwl_rx_cmd_buffer *rxb,
-			      struct iwl_device_cmd *cmd)
+static void iwl_rx_reply_scan(struct iwl_priv *priv,
+			      struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -257,13 +256,11 @@
 
 	IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
 #endif
-	return 0;
 }
 
 /* Service SCAN_START_NOTIFICATION (0x82) */
-static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
+				    struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_scanstart_notification *notif = (void *)pkt->data;
@@ -277,14 +274,11 @@
 		       le32_to_cpu(notif->tsf_high),
 		       le32_to_cpu(notif->tsf_low),
 		       notif->status, notif->beacon_timer);
-
-	return 0;
 }
 
 /* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static int iwl_rx_scan_results_notif(struct iwl_priv *priv,
-				      struct iwl_rx_cmd_buffer *rxb,
-				      struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
+				      struct iwl_rx_cmd_buffer *rxb)
 {
 #ifdef CONFIG_IWLWIFI_DEBUG
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -303,13 +297,11 @@
 		       le32_to_cpu(notif->statistics[0]),
 		       le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
 #endif
-	return 0;
 }
 
 /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static int iwl_rx_scan_complete_notif(struct iwl_priv *priv,
-				       struct iwl_rx_cmd_buffer *rxb,
-				       struct iwl_device_cmd *cmd)
+static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
@@ -356,7 +348,6 @@
 		queue_work(priv->workqueue,
 			   &priv->bt_traffic_change_work);
 	}
-	return 0;
 }
 
 void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index 6ec86ad..0fa67d3 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -60,41 +60,28 @@
 	return 0;
 }
 
-static int iwl_process_add_sta_resp(struct iwl_priv *priv,
-				    struct iwl_addsta_cmd *addsta,
-				    struct iwl_rx_packet *pkt)
+static void iwl_process_add_sta_resp(struct iwl_priv *priv,
+				     struct iwl_rx_packet *pkt)
 {
 	struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
-	u8 sta_id = addsta->sta.sta_id;
-	int ret = -EIO;
 
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
-			pkt->hdr.flags);
-		return ret;
-	}
-
-	IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
-		       sta_id);
+	IWL_DEBUG_INFO(priv, "Processing response for adding station\n");
 
 	spin_lock_bh(&priv->sta_lock);
 
 	switch (add_sta_resp->status) {
 	case ADD_STA_SUCCESS_MSK:
 		IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
-		ret = iwl_sta_ucode_activate(priv, sta_id);
 		break;
 	case ADD_STA_NO_ROOM_IN_TABLE:
-		IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
-			sta_id);
+		IWL_ERR(priv, "Adding station failed, no room in table.\n");
 		break;
 	case ADD_STA_NO_BLOCK_ACK_RESOURCE:
-		IWL_ERR(priv, "Adding station %d failed, no block ack "
-			"resource.\n", sta_id);
+		IWL_ERR(priv,
+			"Adding station failed, no block ack resource.\n");
 		break;
 	case ADD_STA_MODIFY_NON_EXIST_STA:
-		IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
-			sta_id);
+		IWL_ERR(priv, "Attempting to modify non-existing station\n");
 		break;
 	default:
 		IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
@@ -102,37 +89,14 @@
 		break;
 	}
 
-	IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
-		       priv->stations[sta_id].sta.mode ==
-		       STA_CONTROL_MODIFY_MSK ?  "Modified" : "Added",
-		       sta_id, priv->stations[sta_id].sta.sta.addr);
-
-	/*
-	 * XXX: The MAC address in the command buffer is often changed from
-	 * the original sent to the device. That is, the MAC address
-	 * written to the command buffer often is not the same MAC address
-	 * read from the command buffer when the command returns. This
-	 * issue has not yet been resolved and this debugging is left to
-	 * observe the problem.
-	 */
-	IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
-		       priv->stations[sta_id].sta.mode ==
-		       STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
-		       addsta->sta.addr);
 	spin_unlock_bh(&priv->sta_lock);
-
-	return ret;
 }
 
-int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd)
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
-	if (!cmd)
-		return 0;
-
-	return iwl_process_add_sta_resp(priv, (void *)cmd->payload, pkt);
+	iwl_process_add_sta_resp(priv, pkt);
 }
 
 int iwl_send_add_sta(struct iwl_priv *priv,
@@ -146,6 +110,8 @@
 		.len = { sizeof(*sta), },
 	};
 	u8 sta_id __maybe_unused = sta->sta.sta_id;
+	struct iwl_rx_packet *pkt;
+	struct iwl_add_sta_resp *add_sta_resp;
 
 	IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
 		       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
@@ -159,16 +125,22 @@
 
 	if (ret || (flags & CMD_ASYNC))
 		return ret;
-	/*else the command was successfully sent in SYNC mode, need to free
-	 * the reply page */
+
+	pkt = cmd.resp_pkt;
+	add_sta_resp = (void *)pkt->data;
+
+	/* debug messages are printed in the handler */
+	if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) {
+		spin_lock_bh(&priv->sta_lock);
+		ret = iwl_sta_ucode_activate(priv, sta_id);
+		spin_unlock_bh(&priv->sta_lock);
+	} else {
+		ret = -EIO;
+	}
 
 	iwl_free_resp(&cmd);
 
-	if (cmd.handler_status)
-		IWL_ERR(priv, "%s - error in the CMD response %d\n", __func__,
-			cmd.handler_status);
-
-	return cmd.handler_status;
+	return ret;
 }
 
 bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
@@ -452,6 +424,7 @@
 	struct iwl_rx_packet *pkt;
 	int ret;
 	struct iwl_rem_sta_cmd rm_sta_cmd;
+	struct iwl_rem_sta_resp *rem_sta_resp;
 
 	struct iwl_host_cmd cmd = {
 		.id = REPLY_REMOVE_STA,
@@ -471,29 +444,23 @@
 		return ret;
 
 	pkt = cmd.resp_pkt;
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
-			  pkt->hdr.flags);
+	rem_sta_resp = (void *)pkt->data;
+
+	switch (rem_sta_resp->status) {
+	case REM_STA_SUCCESS_MSK:
+		if (!temporary) {
+			spin_lock_bh(&priv->sta_lock);
+			iwl_sta_ucode_deactivate(priv, sta_id);
+			spin_unlock_bh(&priv->sta_lock);
+		}
+		IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+		break;
+	default:
 		ret = -EIO;
+		IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+		break;
 	}
 
-	if (!ret) {
-		struct iwl_rem_sta_resp *rem_sta_resp = (void *)pkt->data;
-		switch (rem_sta_resp->status) {
-		case REM_STA_SUCCESS_MSK:
-			if (!temporary) {
-				spin_lock_bh(&priv->sta_lock);
-				iwl_sta_ucode_deactivate(priv, sta_id);
-				spin_unlock_bh(&priv->sta_lock);
-			}
-			IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
-			break;
-		default:
-			ret = -EIO;
-			IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
-			break;
-		}
-	}
 	iwl_free_resp(&cmd);
 
 	return ret;
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 275df12..bddd197 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1128,8 +1128,7 @@
 	}
 }
 
-int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -1273,8 +1272,6 @@
 		skb = __skb_dequeue(&skbs);
 		ieee80211_tx_status(priv->hw, skb);
 	}
-
-	return 0;
 }
 
 /**
@@ -1283,9 +1280,8 @@
  * Handles block-acknowledge notification from device, which reports success
  * of frames sent via aggregation.
  */
-int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
-				   struct iwl_rx_cmd_buffer *rxb,
-				   struct iwl_device_cmd *cmd)
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+				   struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
@@ -1306,7 +1302,7 @@
 	if (scd_flow >= priv->cfg->base_params->num_of_queues) {
 		IWL_ERR(priv,
 			"BUG_ON scd_flow is bigger than number of queues\n");
-		return 0;
+		return;
 	}
 
 	sta_id = ba_resp->sta_id;
@@ -1319,7 +1315,7 @@
 		if (unlikely(ba_resp->bitmap))
 			IWL_ERR(priv, "Received BA when not expected\n");
 		spin_unlock_bh(&priv->sta_lock);
-		return 0;
+		return;
 	}
 
 	if (unlikely(scd_flow != agg->txq_id)) {
@@ -1333,7 +1329,7 @@
 				    "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
 				    scd_flow, sta_id, tid, agg->txq_id);
 		spin_unlock_bh(&priv->sta_lock);
-		return 0;
+		return;
 	}
 
 	__skb_queue_head_init(&reclaimed_skbs);
@@ -1413,6 +1409,4 @@
 		skb = __skb_dequeue(&reclaimed_skbs);
 		ieee80211_tx_status(priv->hw, skb);
 	}
-
-	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 5244e43..931a8e4 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -3,6 +3,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -327,7 +328,7 @@
 	const struct fw_img *fw;
 	int ret;
 	enum iwl_ucode_type old_type;
-	static const u8 alive_cmd[] = { REPLY_ALIVE };
+	static const u16 alive_cmd[] = { REPLY_ALIVE };
 
 	fw = iwl_get_ucode_image(priv, ucode_type);
 	if (WARN_ON(!fw))
@@ -406,7 +407,7 @@
 int iwl_run_init_ucode(struct iwl_priv *priv)
 {
 	struct iwl_notification_wait calib_wait;
-	static const u8 calib_complete[] = {
+	static const u16 calib_complete[] = {
 		CALIBRATION_RES_NOTIFICATION,
 		CALIBRATION_COMPLETE_NOTIFICATION
 	};
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index cc35f79..6951aba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,14 +69,14 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX	15
+#define IWL7260_UCODE_API_MAX	17
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK	12
 #define IWL3165_UCODE_API_OK	13
 
 /* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN	10
+#define IWL7260_UCODE_API_MIN	12
 #define IWL3165_UCODE_API_MIN	13
 
 /* NVM versions */
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index 72040cd..197abe4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,13 +69,13 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX	15
+#define IWL8000_UCODE_API_MAX	17
 
 /* Oldest version we won't warn about */
 #define IWL8000_UCODE_API_OK	12
 
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN	10
+#define IWL8000_UCODE_API_MIN	12
 
 /* NVM versions */
 #define IWL8000_NVM_VERSION		0x0a1d
@@ -97,8 +97,9 @@
 #define DEFAULT_NVM_FILE_FAMILY_8000B		"nvmData-8000B"
 #define DEFAULT_NVM_FILE_FAMILY_8000C		"nvmData-8000C"
 
-/* Max SDIO RX aggregation size of the ADDBA request/response */
-#define MAX_RX_AGG_SIZE_8260_SDIO	28
+/* Max SDIO RX/TX aggregation sizes of the ADDBA request/response */
+#define MAX_RX_AGG_SIZE_8260_SDIO	21
+#define MAX_TX_AGG_SIZE_8260_SDIO	40
 
 /* Max A-MPDU exponent for HT and VHT */
 #define MAX_HT_AMPDU_EXPONENT_8260_SDIO	IEEE80211_HT_MAX_AMPDU_32K
@@ -154,6 +155,7 @@
 	.led_mode = IWL_LED_RF_STATE,					\
 	.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000,		\
 	.d0i3 = true,							\
+	.features = NETIF_F_RXCSUM,					\
 	.non_shared_ant = ANT_A,					\
 	.dccm_offset = IWL8260_DCCM_OFFSET,				\
 	.dccm_len = IWL8260_DCCM_LEN,					\
@@ -203,6 +205,7 @@
 	.nvm_ver = IWL8000_NVM_VERSION,
 	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
 	.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+	.max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
 	.disable_dummy_notification = true,
 	.max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
 	.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
@@ -216,6 +219,7 @@
 	.nvm_ver = IWL8000_NVM_VERSION,
 	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
 	.max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+	.max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
 	.bt_shared_single_ant = true,
 	.disable_dummy_notification = true,
 	.max_ht_ampdu_exponent  = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 08c14af..939fa22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -297,6 +297,7 @@
  *	mode set
  * @d0i3: device uses d0i3 instead of d3
  * @nvm_hw_section_num: the ID of the HW NVM section
+ * @features: hw features, any combination of feature_whitelist
  * @pwr_tx_backoffs: translation table between power limits and backoffs
  * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
  * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
@@ -348,6 +349,7 @@
 	bool no_power_up_nic_in_init;
 	const char *default_nvm_file_B_step;
 	const char *default_nvm_file_C_step;
+	netdev_features_t features;
 	unsigned int max_rx_agg_size;
 	bool disable_dummy_notification;
 	unsigned int max_tx_agg_size;
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index faa17f2..543abea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -200,6 +200,7 @@
 #define CSR_INT_BIT_FH_TX        (1 << 27) /* Tx DMA FH_INT[1:0] */
 #define CSR_INT_BIT_SCD          (1 << 26) /* TXQ pointer advanced */
 #define CSR_INT_BIT_SW_ERR       (1 << 25) /* uCode error */
+#define CSR_INT_BIT_PAGING       (1 << 24) /* SDIO PAGING */
 #define CSR_INT_BIT_RF_KILL      (1 << 7)  /* HW RFKILL switch GP_CNTRL[27] toggled */
 #define CSR_INT_BIT_CT_KILL      (1 << 6)  /* Critical temp (chip too hot) rfkill */
 #define CSR_INT_BIT_SW_RX        (1 << 3)  /* Rx, command responses */
@@ -210,6 +211,7 @@
 				 CSR_INT_BIT_HW_ERR  | \
 				 CSR_INT_BIT_FH_TX   | \
 				 CSR_INT_BIT_SW_ERR  | \
+				 CSR_INT_BIT_PAGING  | \
 				 CSR_INT_BIT_RF_KILL | \
 				 CSR_INT_BIT_SW_RX   | \
 				 CSR_INT_BIT_WAKEUP  | \
@@ -422,6 +424,7 @@
 
 /* DRAM INT TABLE */
 #define CSR_DRAM_INT_TBL_ENABLE		(1 << 31)
+#define CSR_DRAM_INIT_TBL_WRITE_POINTER	(1 << 28)
 #define CSR_DRAM_INIT_TBL_WRAP_CHECK	(1 << 27)
 
 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
index 04e6649..71a78ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-data.h
@@ -35,8 +35,8 @@
 TRACE_EVENT(iwlwifi_dev_tx_data,
 	TP_PROTO(const struct device *dev,
 		 struct sk_buff *skb,
-		 void *data, size_t data_len),
-	TP_ARGS(dev, skb, data, data_len),
+		 u8 hdr_len, size_t data_len),
+	TP_ARGS(dev, skb, hdr_len, data_len),
 	TP_STRUCT__entry(
 		DEV_ENTRY
 
@@ -45,7 +45,8 @@
 	TP_fast_assign(
 		DEV_ASSIGN;
 		if (iwl_trace_data(skb))
-			memcpy(__get_dynamic_array(data), data, data_len);
+			skb_copy_bits(skb, hdr_len,
+				      __get_dynamic_array(data), data_len);
 	),
 	TP_printk("[%s] TX frame data", __get_str(dev))
 );
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
index 948ce08..eb4b99a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -36,7 +36,7 @@
 TRACE_EVENT(iwlwifi_dev_hcmd,
 	TP_PROTO(const struct device *dev,
 		 struct iwl_host_cmd *cmd, u16 total_size,
-		 struct iwl_cmd_header *hdr),
+		 struct iwl_cmd_header_wide *hdr),
 	TP_ARGS(dev, cmd, total_size, hdr),
 	TP_STRUCT__entry(
 		DEV_ENTRY
@@ -44,11 +44,14 @@
 		__field(u32, flags)
 	),
 	TP_fast_assign(
-		int i, offset = sizeof(*hdr);
+		int i, offset = sizeof(struct iwl_cmd_header);
+
+		if (hdr->group_id)
+			offset = sizeof(struct iwl_cmd_header_wide);
 
 		DEV_ASSIGN;
 		__entry->flags = cmd->flags;
-		memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
+		memcpy(__get_dynamic_array(hcmd), hdr, offset);
 
 		for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
 			if (!cmd->len[i])
@@ -58,8 +61,9 @@
 			offset += cmd->len[i];
 		}
 	),
-	TP_printk("[%s] hcmd %#.2x (%ssync)",
-		  __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[0],
+	TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)",
+		  __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1],
+		  ((u8 *)__get_dynamic_array(hcmd))[0],
 		  __entry->flags & CMD_ASYNC ? "a" : "")
 );
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 6685259..a86aa5b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -372,6 +372,30 @@
 	return 0;
 }
 
+static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
+				const u32 len)
+{
+	struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
+	struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
+
+	if (len < sizeof(*fw_capa))
+		return -EINVAL;
+
+	capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
+	capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
+	capa->max_ap_cache_per_scan =
+		le32_to_cpu(fw_capa->max_ap_cache_per_scan);
+	capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
+	capa->max_scan_reporting_threshold =
+		le32_to_cpu(fw_capa->max_scan_reporting_threshold);
+	capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
+	capa->max_significant_change_aps =
+		le32_to_cpu(fw_capa->max_significant_change_aps);
+	capa->max_bssid_history_entries =
+		le32_to_cpu(fw_capa->max_bssid_history_entries);
+	return 0;
+}
+
 /*
  * Gets uCode section from tlv.
  */
@@ -573,13 +597,15 @@
 	size_t len = ucode_raw->size;
 	const u8 *data;
 	u32 tlv_len;
+	u32 usniffer_img;
 	enum iwl_ucode_tlv_type tlv_type;
 	const u8 *tlv_data;
 	char buildstr[25];
-	u32 build;
+	u32 build, paging_mem_size;
 	int num_of_cpus;
 	bool usniffer_images = false;
 	bool usniffer_req = false;
+	bool gscan_capa = false;
 
 	if (len < sizeof(*ucode)) {
 		IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -955,12 +981,46 @@
 					    IWL_UCODE_REGULAR_USNIFFER,
 					    tlv_len);
 			break;
+		case IWL_UCODE_TLV_PAGING:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
+
+			IWL_DEBUG_FW(drv,
+				     "Paging: paging enabled (size = %u bytes)\n",
+				     paging_mem_size);
+
+			if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
+				IWL_ERR(drv,
+					"Paging: driver supports up to %lu bytes for paging image\n",
+					MAX_PAGING_IMAGE_SIZE);
+				return -EINVAL;
+			}
+
+			if (paging_mem_size & (FW_PAGING_SIZE - 1)) {
+				IWL_ERR(drv,
+					"Paging: image isn't multiple %lu\n",
+					FW_PAGING_SIZE);
+				return -EINVAL;
+			}
+
+			drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size =
+				paging_mem_size;
+			usniffer_img = IWL_UCODE_REGULAR_USNIFFER;
+			drv->fw.img[usniffer_img].paging_mem_size =
+				paging_mem_size;
+			break;
 		case IWL_UCODE_TLV_SDIO_ADMA_ADDR:
 			if (tlv_len != sizeof(u32))
 				goto invalid_tlv_len;
 			drv->fw.sdio_adma_addr =
 				le32_to_cpup((__le32 *)tlv_data);
 			break;
+		case IWL_UCODE_TLV_FW_GSCAN_CAPA:
+			if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len))
+				goto invalid_tlv_len;
+			gscan_capa = true;
+			break;
 		default:
 			IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
 			break;
@@ -979,6 +1039,16 @@
 		return -EINVAL;
 	}
 
+	/*
+	 * If ucode advertises that it supports GSCAN but GSCAN
+	 * capabilities TLV is not present, warn and continue without GSCAN.
+	 */
+	if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+	    WARN(!gscan_capa,
+		 "GSCAN is supported but capabilities TLV is unavailable\n"))
+		__clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
+			    capa->_capa);
+
 	return 0;
 
  invalid_tlv_len:
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 21302b6..acc3d18 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -713,12 +713,12 @@
 	struct ieee80211_channel *chan = &data->channels[0];
 	int n = 0, idx = 0;
 
-	while (chan->band != band && idx < n_channels)
+	while (idx < n_channels && chan->band != band)
 		chan = &data->channels[++idx];
 
 	sband->channels = &data->channels[idx];
 
-	while (chan->band == band && idx < n_channels) {
+	while (idx < n_channels && chan->band == band) {
 		chan = &data->channels[++idx];
 		n++;
 	}
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d45dc02..d560648 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -438,12 +438,6 @@
 #define RX_QUEUE_MASK                         255
 #define RX_QUEUE_SIZE_LOG                     8
 
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
 /**
  * struct iwl_rb_status - reserve buffer status
  * 	host memory mapped FH registers
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
index e57dbd0..af5b320 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-error-dump.h
@@ -84,6 +84,8 @@
  * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
  * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
  *	Structured as &struct iwl_fw_error_dump_trigger_desc.
+ * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
+ *	&struct iwl_fw_error_dump_rb
  */
 enum iwl_fw_error_dump_type {
 	/* 0 is deprecated */
@@ -97,6 +99,7 @@
 	IWL_FW_ERROR_DUMP_FH_REGS = 8,
 	IWL_FW_ERROR_DUMP_MEM = 9,
 	IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
+	IWL_FW_ERROR_DUMP_RB = 11,
 
 	IWL_FW_ERROR_DUMP_MAX,
 };
@@ -223,6 +226,20 @@
 };
 
 /**
+ * struct iwl_fw_error_dump_rb - content of an Receive Buffer
+ * @index: the index of the Receive Buffer in the Rx queue
+ * @rxq: the RB's Rx queue
+ * @reserved:
+ * @data: the content of the Receive Buffer
+ */
+struct iwl_fw_error_dump_rb {
+	__le32 index;
+	__le32 rxq;
+	__le32 reserved;
+	u8 data[];
+};
+
+/**
  * iwl_fw_error_next_data - advance fw error dump data pointer
  * @data: previous data block
  * Returns: next data block
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index a9b5ae4..84653e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -132,12 +132,14 @@
 	IWL_UCODE_TLV_API_CHANGES_SET	= 29,
 	IWL_UCODE_TLV_ENABLED_CAPABILITIES	= 30,
 	IWL_UCODE_TLV_N_SCAN_CHANNELS		= 31,
+	IWL_UCODE_TLV_PAGING		= 32,
 	IWL_UCODE_TLV_SEC_RT_USNIFFER	= 34,
 	IWL_UCODE_TLV_SDIO_ADMA_ADDR	= 35,
 	IWL_UCODE_TLV_FW_VERSION	= 36,
 	IWL_UCODE_TLV_FW_DBG_DEST	= 38,
 	IWL_UCODE_TLV_FW_DBG_CONF	= 39,
 	IWL_UCODE_TLV_FW_DBG_TRIGGER	= 40,
+	IWL_UCODE_TLV_FW_GSCAN_CAPA	= 50,
 };
 
 struct iwl_ucode_tlv {
@@ -247,9 +249,7 @@
  * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
  * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
  * @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
- * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
- *	regardless of the band or the number of the probes. FW will calculate
- *	the actual dwell time.
+ * @IWL_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
  * @IWL_UCODE_TLV_API_SCD_CFG: This firmware can configure the scheduler
  *	through the dedicated host command.
  * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
@@ -259,6 +259,8 @@
  * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
  * @IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
  *	instead of 3.
+ * @IWL_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
+ *	(command version 3) that supports per-chain limits
  */
 enum iwl_ucode_tlv_api {
 	IWL_UCODE_TLV_API_BT_COEX_SPLIT         = (__force iwl_ucode_tlv_api_t)3,
@@ -266,7 +268,7 @@
 	IWL_UCODE_TLV_API_WIFI_MCC_UPDATE	= (__force iwl_ucode_tlv_api_t)9,
 	IWL_UCODE_TLV_API_HDC_PHASE_0		= (__force iwl_ucode_tlv_api_t)10,
 	IWL_UCODE_TLV_API_TX_POWER_DEV		= (__force iwl_ucode_tlv_api_t)11,
-	IWL_UCODE_TLV_API_BASIC_DWELL		= (__force iwl_ucode_tlv_api_t)13,
+	IWL_UCODE_TLV_API_WIDE_CMD_HDR		= (__force iwl_ucode_tlv_api_t)14,
 	IWL_UCODE_TLV_API_SCD_CFG		= (__force iwl_ucode_tlv_api_t)15,
 	IWL_UCODE_TLV_API_SINGLE_SCAN_EBS	= (__force iwl_ucode_tlv_api_t)16,
 	IWL_UCODE_TLV_API_ASYNC_DTM		= (__force iwl_ucode_tlv_api_t)17,
@@ -274,6 +276,7 @@
 	IWL_UCODE_TLV_API_STATS_V10		= (__force iwl_ucode_tlv_api_t)19,
 	IWL_UCODE_TLV_API_NEW_VERSION		= (__force iwl_ucode_tlv_api_t)20,
 	IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY	= (__force iwl_ucode_tlv_api_t)24,
+	IWL_UCODE_TLV_API_TX_POWER_CHAIN	= (__force iwl_ucode_tlv_api_t)27,
 };
 
 typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
@@ -284,6 +287,7 @@
  * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
  * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
  * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
+ * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
  * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
  * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
  *	tx power value into TPC Report action frame and Link Measurement Report
@@ -298,6 +302,7 @@
  * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
  * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
+ * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
  * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
  * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
  * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
@@ -305,12 +310,14 @@
  *	IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
  *	is supported.
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
  */
 enum iwl_ucode_tlv_capa {
 	IWL_UCODE_TLV_CAPA_D0I3_SUPPORT			= (__force iwl_ucode_tlv_capa_t)0,
 	IWL_UCODE_TLV_CAPA_LAR_SUPPORT			= (__force iwl_ucode_tlv_capa_t)1,
 	IWL_UCODE_TLV_CAPA_UMAC_SCAN			= (__force iwl_ucode_tlv_capa_t)2,
 	IWL_UCODE_TLV_CAPA_BEAMFORMER			= (__force iwl_ucode_tlv_capa_t)3,
+	IWL_UCODE_TLV_CAPA_TOF_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)5,
 	IWL_UCODE_TLV_CAPA_TDLS_SUPPORT			= (__force iwl_ucode_tlv_capa_t)6,
 	IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT	= (__force iwl_ucode_tlv_capa_t)8,
 	IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT	= (__force iwl_ucode_tlv_capa_t)9,
@@ -320,10 +327,12 @@
 	IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH		= (__force iwl_ucode_tlv_capa_t)13,
 	IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT		= (__force iwl_ucode_tlv_capa_t)18,
 	IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT		= (__force iwl_ucode_tlv_capa_t)19,
+	IWL_UCODE_TLV_CAPA_CSUM_SUPPORT			= (__force iwl_ucode_tlv_capa_t)21,
 	IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS		= (__force iwl_ucode_tlv_capa_t)22,
 	IWL_UCODE_TLV_CAPA_BT_COEX_PLCR			= (__force iwl_ucode_tlv_capa_t)28,
 	IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC		= (__force iwl_ucode_tlv_capa_t)29,
 	IWL_UCODE_TLV_CAPA_BT_COEX_RRC			= (__force iwl_ucode_tlv_capa_t)30,
+	IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT		= (__force iwl_ucode_tlv_capa_t)31,
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -341,8 +350,9 @@
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
  */
-#define IWL_UCODE_SECTION_MAX 12
+#define IWL_UCODE_SECTION_MAX 16
 #define CPU1_CPU2_SEPARATOR_SECTION	0xFFFFCCCC
+#define PAGING_SEPARATOR_SECTION	0xAAAABBBB
 
 /* uCode version contains 4 values: Major/Minor/API/Serial */
 #define IWL_UCODE_MAJOR(ver)	(((ver) & 0xFF000000) >> 24)
@@ -412,6 +422,12 @@
 	PRPH_ASSIGN,
 	PRPH_SETBIT,
 	PRPH_CLEARBIT,
+
+	INDIRECT_ASSIGN,
+	INDIRECT_SETBIT,
+	INDIRECT_CLEARBIT,
+
+	PRPH_BLOCKBIT,
 };
 
 /**
@@ -485,10 +501,13 @@
  *
  * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
  * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
+ * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
+ *	collect only monitor data
  */
 enum iwl_fw_dbg_trigger_mode {
 	IWL_FW_DBG_TRIGGER_START = BIT(0),
 	IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+	IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
 };
 
 /**
@@ -718,4 +737,28 @@
 	struct iwl_fw_dbg_conf_hcmd hcmd;
 } __packed;
 
+/**
+ * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ *	change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ *	hold.
+ */
+struct iwl_fw_gscan_capabilities {
+	__le32 max_scan_cache_size;
+	__le32 max_scan_buckets;
+	__le32 max_ap_cache_per_scan;
+	__le32 max_rssi_sample_size;
+	__le32 max_scan_reporting_threshold;
+	__le32 max_hotlist_aps;
+	__le32 max_significant_change_aps;
+	__le32 max_bssid_history_entries;
+} __packed;
+
 #endif  /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 3e3c9d8..45e7321 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -133,6 +133,7 @@
 struct fw_img {
 	struct fw_desc sec[IWL_UCODE_SECTION_MAX];
 	bool is_dual_cpus;
+	u32 paging_mem_size;
 };
 
 struct iwl_sf_region {
@@ -140,6 +141,48 @@
 	u32 size;
 };
 
+/*
+ * Block paging calculations
+ */
+#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
+#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */
+#define PAGE_PER_GROUP_2_EXP_SIZE 3
+/* 8 pages per group */
+#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE)
+/* don't change, support only 32KB size */
+#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE)
+/* 32K == 2^15 */
+#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE)
+
+/*
+ * Image paging calculations
+ */
+#define BLOCK_PER_IMAGE_2_EXP_SIZE 5
+/* 2^5 == 32 blocks per image */
+#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE)
+/* maximum image size 1024KB */
+#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE)
+
+/* Virtual address signature */
+#define PAGING_ADDR_SIG 0xAA000000
+
+#define PAGING_CMD_IS_SECURED BIT(9)
+#define PAGING_CMD_IS_ENABLED BIT(8)
+#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS	0
+#define PAGING_TLV_SECURE_MASK 1
+
+/**
+ * struct iwl_fw_paging
+ * @fw_paging_phys: page phy pointer
+ * @fw_paging_block: pointer to the allocated block
+ * @fw_paging_size: page size
+ */
+struct iwl_fw_paging {
+	dma_addr_t fw_paging_phys;
+	struct page *fw_paging_block;
+	u32 fw_paging_size;
+};
+
 /**
  * struct iwl_fw_cscheme_list - a cipher scheme list
  * @size: a number of entries
@@ -151,6 +194,30 @@
 } __packed;
 
 /**
+ * struct iwl_gscan_capabilities - gscan capabilities supported by FW
+ * @max_scan_cache_size: total space allocated for scan results (in bytes).
+ * @max_scan_buckets: maximum number of channel buckets.
+ * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
+ * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
+ * @max_scan_reporting_threshold: max possible report threshold. in percentage.
+ * @max_hotlist_aps: maximum number of entries for hotlist APs.
+ * @max_significant_change_aps: maximum number of entries for significant
+ *	change APs.
+ * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
+ *	hold.
+ */
+struct iwl_gscan_capabilities {
+	u32 max_scan_cache_size;
+	u32 max_scan_buckets;
+	u32 max_ap_cache_per_scan;
+	u32 max_rssi_sample_size;
+	u32 max_scan_reporting_threshold;
+	u32 max_hotlist_aps;
+	u32 max_significant_change_aps;
+	u32 max_bssid_history_entries;
+};
+
+/**
  * struct iwl_fw - variables associated with the firmware
  *
  * @ucode_ver: ucode version from the ucode file
@@ -208,6 +275,7 @@
 	struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
 	size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
 	u8 dbg_dest_reg_num;
+	struct iwl_gscan_capabilities gscan_capa;
 };
 
 static inline const char *get_fw_dbg_mode_string(int mode)
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index b5bc959..6caf2af 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -98,7 +99,8 @@
 				continue;
 
 			for (i = 0; i < w->n_cmds; i++) {
-				if (w->cmds[i] == pkt->hdr.cmd) {
+				if (w->cmds[i] ==
+				    WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) {
 					found = true;
 					break;
 				}
@@ -136,7 +138,7 @@
 void
 iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
 			   struct iwl_notification_wait *wait_entry,
-			   const u8 *cmds, int n_cmds,
+			   const u16 *cmds, int n_cmds,
 			   bool (*fn)(struct iwl_notif_wait_data *notif_wait,
 				      struct iwl_rx_packet *pkt, void *data),
 			   void *fn_data)
@@ -147,7 +149,7 @@
 	wait_entry->fn = fn;
 	wait_entry->fn_data = fn_data;
 	wait_entry->n_cmds = n_cmds;
-	memcpy(wait_entry->cmds, cmds, n_cmds);
+	memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16));
 	wait_entry->triggered = false;
 	wait_entry->aborted = false;
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index 95af97a..dbe8234 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -105,7 +106,7 @@
 		   struct iwl_rx_packet *pkt, void *data);
 	void *fn_data;
 
-	u8 cmds[MAX_NOTIF_CMDS];
+	u16 cmds[MAX_NOTIF_CMDS];
 	u8 n_cmds;
 	bool triggered, aborted;
 };
@@ -121,7 +122,7 @@
 void __acquires(wait_entry)
 iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
 			   struct iwl_notification_wait *wait_entry,
-			   const u8 *cmds, int n_cmds,
+			   const u16 *cmds, int n_cmds,
 			   bool (*fn)(struct iwl_notif_wait_data *notif_data,
 				      struct iwl_rx_packet *pkt, void *data),
 			   void *fn_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index ce1cdd7..b47fe9d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -116,10 +116,6 @@
  *	May sleep
  * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
  *	HCMD this Rx responds to. Can't sleep.
- * @napi_add: NAPI initialization. The transport is fully responsible for NAPI,
- *	but the higher layers need to know about it (in particular mac80211 to
- *	to able to call the right NAPI RX functions); this function is needed
- *	to eventually call netif_napi_add() with higher layer involvement.
  * @queue_full: notifies that a HW queue is full.
  *	Must be atomic and called with BH disabled.
  * @queue_not_full: notifies that a HW queue is not full any more.
@@ -148,13 +144,8 @@
 				     const struct iwl_fw *fw,
 				     struct dentry *dbgfs_dir);
 	void (*stop)(struct iwl_op_mode *op_mode);
-	int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
-		  struct iwl_device_cmd *cmd);
-	void (*napi_add)(struct iwl_op_mode *op_mode,
-			 struct napi_struct *napi,
-			 struct net_device *napi_dev,
-			 int (*poll)(struct napi_struct *, int),
-			 int weight);
+	void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		   struct iwl_rx_cmd_buffer *rxb);
 	void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
 	void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
 	bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
@@ -188,11 +179,11 @@
 	op_mode->ops->stop(op_mode);
 }
 
-static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
-				  struct iwl_rx_cmd_buffer *rxb,
-				  struct iwl_device_cmd *cmd)
+static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
+				  struct napi_struct *napi,
+				  struct iwl_rx_cmd_buffer *rxb)
 {
-	return op_mode->ops->rx(op_mode, rxb, cmd);
+	return op_mode->ops->rx(op_mode, napi, rxb);
 }
 
 static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
@@ -260,15 +251,4 @@
 	return op_mode->ops->exit_d0i3(op_mode);
 }
 
-static inline void iwl_op_mode_napi_add(struct iwl_op_mode *op_mode,
-					struct napi_struct *napi,
-					struct net_device *napi_dev,
-					int (*poll)(struct napi_struct *, int),
-					int weight)
-{
-	if (!op_mode->ops->napi_add)
-		return;
-	op_mode->ops->napi_add(op_mode, napi, napi_dev, poll, weight);
-}
-
 #endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 5af1c77..3ab777f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -253,6 +253,7 @@
 #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS	(16)
 #define SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK	(0x007F0000)
 #define SCD_GP_CTRL_ENABLE_31_QUEUES		BIT(0)
+#define SCD_GP_CTRL_AUTO_ACTIVE_MODE		BIT(18)
 
 /* Context Data */
 #define SCD_CONTEXT_MEM_LOWER_BOUND	(SCD_MEM_LOWER_BOUND + 0x600)
@@ -291,6 +292,9 @@
 
 /*********************** END TX SCHEDULER *************************************/
 
+/* tcp checksum offload */
+#define RX_EN_CSUM		(0x00a00d88)
+
 /* Oscillator clock */
 #define OSC_CLK				(0xa04068)
 #define OSC_CLK_FORCE_CONTROL		(0x8)
@@ -379,6 +383,8 @@
 #define AUX_MISC_MASTER1_SMPHR_STATUS	0xA20800
 #define RSA_ENABLE			0xA24B08
 #define PREG_AUX_BUS_WPROT_0		0xA04CC0
+#define SB_CPU_1_STATUS			0xA01E30
+#define SB_CPU_2_STATUS			0xA01E34
 
 /* FW chicken bits */
 #define LMPM_CHICK			0xA01FF8
@@ -386,4 +392,10 @@
 	LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
 };
 
+/* FW chicken bits */
+#define LMPM_PAGE_PASS_NOTIF			0xA03824
+enum {
+	LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
+};
+
 #endif				/* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 87a230a..c829c50 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -122,6 +122,40 @@
 #define INDEX_TO_SEQ(i)	((i) & 0xff)
 #define SEQ_RX_FRAME	cpu_to_le16(0x8000)
 
+/*
+ * those functions retrieve specific information from
+ * the id field in the iwl_host_cmd struct which contains
+ * the command id, the group id and the version of the command
+ * and vice versa
+*/
+static inline u8 iwl_cmd_opcode(u32 cmdid)
+{
+	return cmdid & 0xFF;
+}
+
+static inline u8 iwl_cmd_groupid(u32 cmdid)
+{
+	return ((cmdid & 0xFF00) >> 8);
+}
+
+static inline u8 iwl_cmd_version(u32 cmdid)
+{
+	return ((cmdid & 0xFF0000) >> 16);
+}
+
+static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
+{
+	return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make u16 wide id out of u8 group and opcode */
+#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+
+/* due to the conversion, this group is special; new groups
+ * should be defined in the appropriate fw-api header files
+ */
+#define IWL_ALWAYS_LONG_GROUP	1
+
 /**
  * struct iwl_cmd_header
  *
@@ -130,7 +164,7 @@
  */
 struct iwl_cmd_header {
 	u8 cmd;		/* Command ID:  REPLY_RXON, etc. */
-	u8 flags;	/* 0:5 reserved, 6 abort, 7 internal */
+	u8 group_id;
 	/*
 	 * The driver sets up the sequence number to values of its choosing.
 	 * uCode does not use this value, but passes it back to the driver
@@ -154,9 +188,22 @@
 	__le16 sequence;
 } __packed;
 
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
-
+/**
+ * struct iwl_cmd_header_wide
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ * this is the wide version that contains more information about the command
+ * like length, version and command type
+ */
+struct iwl_cmd_header_wide {
+	u8 cmd;
+	u8 group_id;
+	__le16 sequence;
+	__le16 length;
+	u8 reserved;
+	u8 version;
+} __packed;
 
 #define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
 #define FH_RSCSR_FRAME_INVALID		0x55550000
@@ -201,6 +248,8 @@
  * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
  * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
  *	(i.e. mark it as non-idle).
+ * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
+ *	check that we leave enough room for the TBs bitmap which needs 20 bits.
  */
 enum CMD_MODE {
 	CMD_ASYNC		= BIT(0),
@@ -210,6 +259,8 @@
 	CMD_SEND_IN_IDLE	= BIT(4),
 	CMD_MAKE_TRANS_IDLE	= BIT(5),
 	CMD_WAKE_UP_TRANS	= BIT(6),
+
+	CMD_TB_BITMAP_POS	= 11,
 };
 
 #define DEF_CMD_PAYLOAD_SIZE 320
@@ -222,8 +273,18 @@
  * aren't fully copied and use other TFD space.
  */
 struct iwl_device_cmd {
-	struct iwl_cmd_header hdr;	/* uCode API */
-	u8 payload[DEF_CMD_PAYLOAD_SIZE];
+	union {
+		struct {
+			struct iwl_cmd_header hdr;	/* uCode API */
+			u8 payload[DEF_CMD_PAYLOAD_SIZE];
+		};
+		struct {
+			struct iwl_cmd_header_wide hdr_wide;
+			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
+					sizeof(struct iwl_cmd_header_wide) +
+					sizeof(struct iwl_cmd_header)];
+		};
+	};
 } __packed;
 
 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
@@ -261,24 +322,22 @@
  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
  * @_rx_page_order: (internally used to free response packet)
  * @_rx_page_addr: (internally used to free response packet)
- * @handler_status: return value of the handler of the command
- *	(put in setup_rx_handlers) - valid for SYNC mode only
  * @flags: can be CMD_*
  * @len: array of the lengths of the chunks in data
  * @dataflags: IWL_HCMD_DFL_*
- * @id: id of the host command
+ * @id: command id of the host command, for wide commands encoding the
+ *	version and group as well
  */
 struct iwl_host_cmd {
 	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
 	struct iwl_rx_packet *resp_pkt;
 	unsigned long _rx_page_addr;
 	u32 _rx_page_order;
-	int handler_status;
 
 	u32 flags;
+	u32 id;
 	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
 	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
-	u8 id;
 };
 
 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
@@ -379,6 +438,7 @@
  * @bc_table_dword: set to true if the BC table expects the byte count to be
  *	in DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: firmware supports wide host command header
  * @command_names: array of command names, must be 256 entries
  *	(one for each command); for debugging only
  * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
@@ -396,6 +456,7 @@
 	bool rx_buf_size_8k;
 	bool bc_table_dword;
 	bool scd_set_active;
+	bool wide_cmd_header;
 	const char *const *command_names;
 
 	u32 sdio_adma_addr;
@@ -544,10 +605,12 @@
 			      u32 value);
 	void (*ref)(struct iwl_trans *trans);
 	void (*unref)(struct iwl_trans *trans);
-	void (*suspend)(struct iwl_trans *trans);
+	int  (*suspend)(struct iwl_trans *trans);
 	void (*resume)(struct iwl_trans *trans);
 
-	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans);
+	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
+						 struct iwl_fw_dbg_trigger_tlv
+						 *trigger);
 };
 
 /**
@@ -584,6 +647,8 @@
  * @cfg - pointer to the configuration
  * @status: a bit-mask of transport status flags
  * @dev - pointer to struct device * that represents the device
+ * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
+ *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
  * @hw_id: a u32 with the ID of the device / sub-device.
  *	Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
@@ -603,6 +668,12 @@
  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
  * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
+ * @paging_req_addr: The location were the FW will upload / download the pages
+ *	from. The address is set by the opmode
+ * @paging_db: Pointer to the opmode paging data base, the pointer is set by
+ *	the opmode.
+ * @paging_download_buf: Buffer used for copying all of the pages before
+ *	downloading them to the FW. The buffer is allocated in the opmode
  */
 struct iwl_trans {
 	const struct iwl_trans_ops *ops;
@@ -612,6 +683,7 @@
 	unsigned long status;
 
 	struct device *dev;
+	u32 max_skb_frags;
 	u32 hw_rev;
 	u32 hw_id;
 	char hw_id_str[52];
@@ -639,6 +711,14 @@
 	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
 	u8 dbg_dest_reg_num;
 
+	/*
+	 * Paging parameters - All of the parameters should be set by the
+	 * opmode when paging is enabled
+	 */
+	u32 paging_req_addr;
+	struct iwl_fw_paging *paging_db;
+	void *paging_download_buf;
+
 	enum iwl_d0i3_mode d0i3_mode;
 
 	bool wowlan_d0i3;
@@ -730,7 +810,8 @@
 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
 {
 	might_sleep();
-	trans->ops->d3_suspend(trans, test);
+	if (trans->ops->d3_suspend)
+		trans->ops->d3_suspend(trans, test);
 }
 
 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
@@ -738,6 +819,9 @@
 				      bool test)
 {
 	might_sleep();
+	if (!trans->ops->d3_resume)
+		return 0;
+
 	return trans->ops->d3_resume(trans, status, test);
 }
 
@@ -753,10 +837,12 @@
 		trans->ops->unref(trans);
 }
 
-static inline void iwl_trans_suspend(struct iwl_trans *trans)
+static inline int iwl_trans_suspend(struct iwl_trans *trans)
 {
-	if (trans->ops->suspend)
-		trans->ops->suspend(trans);
+	if (!trans->ops->suspend)
+		return 0;
+
+	return trans->ops->suspend(trans);
 }
 
 static inline void iwl_trans_resume(struct iwl_trans *trans)
@@ -766,11 +852,12 @@
 }
 
 static inline struct iwl_trans_dump_data *
-iwl_trans_dump_data(struct iwl_trans *trans)
+iwl_trans_dump_data(struct iwl_trans *trans,
+		    struct iwl_fw_dbg_trigger_tlv *trigger)
 {
 	if (!trans->ops->dump_data)
 		return NULL;
-	return trans->ops->dump_data(trans);
+	return trans->ops->dump_data(trans, trigger);
 }
 
 static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index 2d7c3ea..8c2c3d1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -6,6 +6,7 @@
 iwlmvm-y += tt.o offloading.o tdls.o
 iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
 iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
+iwlmvm-y += tof.o
 iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
 
 ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index b4737e2..e290ac6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -725,15 +725,17 @@
 	}
 }
 
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
-			     struct iwl_rx_cmd_buffer *rxb,
-			     struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-		return iwl_mvm_rx_bt_coex_notif_old(mvm, rxb, dev_cmd);
+	if (!fw_has_api(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+		iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
+		return;
+	}
 
 	IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
 	IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
@@ -748,12 +750,6 @@
 	memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
 
 	iwl_mvm_bt_coex_notif_handle(mvm);
-
-	/*
-	 * This is an async handler for a notification, returning anything other
-	 * than 0 doesn't make sense even if HCMD failed.
-	 */
-	return 0;
 }
 
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -947,9 +943,8 @@
 	iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
-				  struct iwl_rx_cmd_buffer *rxb,
-				  struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+				   struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u32 ant_isolation = le32_to_cpup((void *)pkt->data);
@@ -957,20 +952,23 @@
 	u8 __maybe_unused lower_bound, upper_bound;
 	u8 lut;
 
-	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
-		return iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb, dev_cmd);
+	if (!fw_has_api(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
+		iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
+		return;
+	}
 
 	if (!iwl_mvm_bt_is_plcr_supported(mvm))
-		return 0;
+		return;
 
 	lockdep_assert_held(&mvm->mutex);
 
 	/* Ignore updates if we are in force mode */
 	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-		return 0;
+		return;
 
 	if (ant_isolation ==  mvm->last_ant_isol)
-		return 0;
+		return;
 
 	for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
 		if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@@ -989,7 +987,7 @@
 	mvm->last_ant_isol = ant_isolation;
 
 	if (mvm->last_corun_lut == lut)
-		return 0;
+		return;
 
 	mvm->last_corun_lut = lut;
 
@@ -1000,6 +998,8 @@
 	memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20,
 	       sizeof(cmd.corun_lut40));
 
-	return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
-				    sizeof(cmd), &cmd);
+	if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0,
+				 sizeof(cmd), &cmd))
+		IWL_ERR(mvm,
+			"failed to send BT_COEX_UPDATE_CORUN_LUT command\n");
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index 6ac6de2..61c07b0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -1058,9 +1058,8 @@
 		IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
 }
 
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-				 struct iwl_rx_cmd_buffer *rxb,
-				 struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+				  struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
@@ -1083,12 +1082,6 @@
 	memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
 
 	iwl_mvm_bt_coex_notif_handle(mvm);
-
-	/*
-	 * This is an async handler for a notification, returning anything other
-	 * than 0 doesn't make sense even if HCMD failed.
-	 */
-	return 0;
 }
 
 static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
@@ -1250,14 +1243,12 @@
 	iwl_mvm_bt_coex_notif_handle(mvm);
 }
 
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-				      struct iwl_rx_cmd_buffer *rxb,
-				      struct iwl_device_cmd *dev_cmd)
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+				       struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u32 ant_isolation = le32_to_cpup((void *)pkt->data);
 	u8 __maybe_unused lower_bound, upper_bound;
-	int ret;
 	u8 lut;
 
 	struct iwl_bt_coex_cmd_old *bt_cmd;
@@ -1268,16 +1259,16 @@
 	};
 
 	if (!iwl_mvm_bt_is_plcr_supported(mvm))
-		return 0;
+		return;
 
 	lockdep_assert_held(&mvm->mutex);
 
 	/* Ignore updates if we are in force mode */
 	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
-		return 0;
+		return;
 
 	if (ant_isolation ==  mvm->last_ant_isol)
-		return 0;
+		return;
 
 	for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
 		if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
@@ -1296,13 +1287,13 @@
 	mvm->last_ant_isol = ant_isolation;
 
 	if (mvm->last_corun_lut == lut)
-		return 0;
+		return;
 
 	mvm->last_corun_lut = lut;
 
 	bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
 	if (!bt_cmd)
-		return 0;
+		return;
 	cmd.data[0] = bt_cmd;
 
 	bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
@@ -1317,8 +1308,8 @@
 	memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
 	       sizeof(bt_cmd->bt4_corun_lut40));
 
-	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (iwl_mvm_send_cmd(mvm, &cmd))
+		IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
 
 	kfree(bt_cmd);
-	return ret;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index beba375..b8ee312 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -102,6 +102,7 @@
 #define IWL_MVM_QUOTA_THRESHOLD			4
 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE         0
 #define IWL_MVM_RS_DISABLE_P2P_MIMO		0
+#define IWL_MVM_TOF_IS_RESPONDER		0
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE      2
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW   1
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 4165d10..04264e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -1145,7 +1145,7 @@
 static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
 {
 	struct iwl_notification_wait wait_d3;
-	static const u8 d3_notif[] = { D3_CONFIG_CMD };
+	static const u16 d3_notif[] = { D3_CONFIG_CMD };
 	int ret;
 
 	iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
@@ -1168,13 +1168,17 @@
 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int ret;
 
-	iwl_trans_suspend(mvm->trans);
+	ret = iwl_trans_suspend(mvm->trans);
+	if (ret)
+		return ret;
+
 	mvm->trans->wowlan_d0i3 = wowlan->any;
 	if (mvm->trans->wowlan_d0i3) {
 		/* 'any' trigger means d0i3 usage */
 		if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
-			int ret = iwl_mvm_enter_d0i3_sync(mvm);
+			ret = iwl_mvm_enter_d0i3_sync(mvm);
 
 			if (ret)
 				return ret;
@@ -1183,6 +1187,9 @@
 		mutex_lock(&mvm->d0i3_suspend_mutex);
 		__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
 		mutex_unlock(&mvm->d0i3_suspend_mutex);
+
+		iwl_trans_d3_suspend(mvm->trans, false);
+
 		return 0;
 	}
 
@@ -1935,28 +1942,59 @@
 	return 1;
 }
 
+static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
+{
+	iwl_trans_resume(mvm->trans);
+
+	return __iwl_mvm_resume(mvm, false);
+}
+
+static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
+{
+	bool exit_now;
+	enum iwl_d3_status d3_status;
+
+	iwl_trans_d3_resume(mvm->trans, &d3_status, false);
+
+	/*
+	 * make sure to clear D0I3_DEFER_WAKEUP before
+	 * calling iwl_trans_resume(), which might wait
+	 * for d0i3 exit completion.
+	 */
+	mutex_lock(&mvm->d0i3_suspend_mutex);
+	__clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+	exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+					&mvm->d0i3_suspend_flags);
+	mutex_unlock(&mvm->d0i3_suspend_mutex);
+	if (exit_now) {
+		IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
+		_iwl_mvm_exit_d0i3(mvm);
+	}
+
+	iwl_trans_resume(mvm->trans);
+
+	if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
+		int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
+
+		if (ret)
+			return ret;
+		/*
+		 * d0i3 exit will be deferred until reconfig_complete.
+		 * make sure there we are out of d0i3.
+		 */
+	}
+	return 0;
+}
+
 int iwl_mvm_resume(struct ieee80211_hw *hw)
 {
 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
-	iwl_trans_resume(mvm->trans);
-
-	if (mvm->hw->wiphy->wowlan_config->any) {
-		/* 'any' trigger means d0i3 usage */
-		if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND) {
-			int ret = iwl_mvm_exit_d0i3(hw->priv);
-
-			if (ret)
-				return ret;
-			/*
-			 * d0i3 exit will be deferred until reconfig_complete.
-			 * make sure there we are out of d0i3.
-			 */
-		}
-		return 0;
-	}
-
-	return __iwl_mvm_resume(mvm, false);
+	/* 'any' trigger means d0i3 was used */
+	if (hw->wiphy->wowlan_config->any)
+		return iwl_mvm_resume_d0i3(mvm);
+	else
+		return iwl_mvm_resume_d3(mvm);
 }
 
 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 5c8a65d..383a316 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -63,6 +63,7 @@
  *
  *****************************************************************************/
 #include "mvm.h"
+#include "fw-api-tof.h"
 #include "debugfs.h"
 
 static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -497,6 +498,731 @@
 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
+static inline char *iwl_dbgfs_is_match(char *name, char *buf)
+{
+	int len = strlen(name);
+
+	return !strncmp(name, buf, len) ? buf + len : NULL;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
+					  char *buf,
+					  size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	int value, ret = -EINVAL;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("tof_disabled=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.tof_cfg.tof_disabled = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.tof_cfg.one_sided_disabled = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("is_debug_mode=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.tof_cfg.is_debug_mode = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("is_buf=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.tof_cfg.is_buf_required = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			ret = iwl_mvm_tof_config_cmd(mvm);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_tof_config_cmd *cmd;
+
+	cmd = &mvm->tof_data.tof_cfg;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
+			 cmd->tof_disabled);
+	pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
+			 cmd->one_sided_disabled);
+	pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
+			 cmd->is_debug_mode);
+	pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
+			 cmd->is_buf_required);
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
+						    char *buf,
+						    size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	int value, ret = 0;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("burst_period=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (!ret)
+			mvm->tof_data.responder_cfg.burst_period =
+							cpu_to_le16(value);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.min_delta_ftm = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("burst_duration=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.burst_duration = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.num_of_burst_exp = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("abort_responder=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.abort_responder = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("get_ch_est=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.get_ch_est = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.recv_sta_req_params = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("channel_num=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.channel_num = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("bandwidth=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.bandwidth = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("rate=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.rate = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("bssid=", buf);
+	if (data) {
+		u8 *mac = mvm->tof_data.responder_cfg.bssid;
+
+		if (!mac_pton(data, mac)) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
+							cpu_to_le16(value);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("toa_offset=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.toa_offset =
+							cpu_to_le16(value);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ctrl_ch_position=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.ctrl_ch_position = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.ftm_per_burst = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("asap_mode=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.asap_mode = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			ret = iwl_mvm_tof_responder_cmd(mvm, vif);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
+						   char __user *user_buf,
+						   size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_tof_responder_config_cmd *cmd;
+
+	cmd = &mvm->tof_data.responder_cfg;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
+			 le16_to_cpu(cmd->burst_period));
+	pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
+			 cmd->burst_duration);
+	pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
+			 cmd->bandwidth);
+	pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
+			 cmd->channel_num);
+	pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
+			 cmd->ctrl_ch_position);
+	pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
+			 cmd->bssid);
+	pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
+			 cmd->min_delta_ftm);
+	pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
+			 cmd->num_of_burst_exp);
+	pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
+	pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
+			 cmd->abort_responder);
+	pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
+			 cmd->get_ch_est);
+	pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
+			 cmd->recv_sta_req_params);
+	pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
+			 cmd->ftm_per_burst);
+	pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
+			 cmd->ftm_resp_ts_avail);
+	pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
+			 cmd->asap_mode);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "tsf_timer_offset_msecs = %d\n",
+			 le16_to_cpu(cmd->tsf_timer_offset_msecs));
+	pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
+			 le16_to_cpu(cmd->toa_offset));
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
+						 char *buf, size_t count,
+						 loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	int value, ret = 0;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("request_id=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.request_id = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("initiator=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.initiator = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.one_sided_los_disable = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("req_timeout=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.req_timeout = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("report_policy=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.report_policy = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("macaddr_random=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.macaddr_random = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("num_of_ap=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.num_of_ap = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("macaddr_template=", buf);
+	if (data) {
+		u8 mac[ETH_ALEN];
+
+		if (!mac_pton(data, mac)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
+	}
+
+	data = iwl_dbgfs_is_match("macaddr_mask=", buf);
+	if (data) {
+		u8 mac[ETH_ALEN];
+
+		if (!mac_pton(data, mac)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
+	}
+
+	data = iwl_dbgfs_is_match("ap=", buf);
+	if (data) {
+		struct iwl_tof_range_req_ap_entry ap;
+		int size = sizeof(struct iwl_tof_range_req_ap_entry);
+		u16 burst_period;
+		u8 *mac = ap.bssid;
+		unsigned int i;
+
+		if (sscanf(data, "%u %hhd %hhx %hhx"
+			   "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
+			   "%hhx %hhx %hx"
+			   "%hhx %hhx %x"
+			   "%hhx %hhx %hhx %hhx",
+			   &i, &ap.channel_num, &ap.bandwidth,
+			   &ap.ctrl_ch_position,
+			   mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
+			   &ap.measure_type, &ap.num_of_bursts,
+			   &burst_period,
+			   &ap.samples_per_burst, &ap.retries_per_sample,
+			   &ap.tsf_delta, &ap.location_req, &ap.asap_mode,
+			   &ap.enable_dyn_ack, &ap.rssi) != 20) {
+			ret = -EINVAL;
+			goto out;
+		}
+		if (i >= IWL_MVM_TOF_MAX_APS) {
+			IWL_ERR(mvm, "Invalid AP index %d\n", i);
+			ret = -EINVAL;
+			goto out;
+		}
+
+		ap.burst_period = cpu_to_le16(burst_period);
+
+		memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_range_request=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[512];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_tof_range_req_cmd *cmd;
+	int i;
+
+	cmd = &mvm->tof_data.range_req;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
+			 cmd->request_id);
+	pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
+			 cmd->initiator);
+	pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
+			 cmd->one_sided_los_disable);
+	pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
+			 cmd->req_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
+			 cmd->report_policy);
+	pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
+			 cmd->macaddr_random);
+	pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
+			 cmd->macaddr_template);
+	pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
+			 cmd->macaddr_mask);
+	pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
+			 cmd->num_of_ap);
+	for (i = 0; i < cmd->num_of_ap; i++) {
+		struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
+
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"ap %.2d: channel_num=%hhx bw=%hhx"
+				" control=%hhx bssid=%pM type=%hhx"
+				" num_of_bursts=%hhx burst_period=%hx ftm=%hhx"
+				" retries=%hhx tsf_delta=%x location_req=%hhx "
+				" asap=%hhx enable=%hhx rssi=%hhx\n",
+				i, ap->channel_num, ap->bandwidth,
+				ap->ctrl_ch_position, ap->bssid,
+				ap->measure_type, ap->num_of_bursts,
+				ap->burst_period, ap->samples_per_burst,
+				ap->retries_per_sample, ap->tsf_delta,
+				ap->location_req, ap->asap_mode,
+				ap->enable_dyn_ack, ap->rssi);
+	}
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
+						 char *buf,
+						 size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	int value, ret = 0;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
+							cpu_to_le16(value);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.min_delta_ftm = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
+									value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
+									value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
+									value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_tof_range_req_ext_cmd *cmd;
+
+	cmd = &mvm->tof_data.range_req_ext;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "tsf_timer_offset_msec = %hx\n",
+			 cmd->tsf_timer_offset_msec);
+	pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhx\n",
+			 cmd->min_delta_ftm);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "ftm_format_and_bw20M = %hhx\n",
+			 cmd->ftm_format_and_bw20M);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "ftm_format_and_bw40M = %hhx\n",
+			 cmd->ftm_format_and_bw40M);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "ftm_format_and_bw80M = %hhx\n",
+			 cmd->ftm_format_and_bw80M);
+
+	mutex_unlock(&mvm->mutex);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
+					       char *buf,
+					       size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	int value, ret = 0;
+	int abort_id;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("abort_id=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.last_abort_id = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_range_abort=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			abort_id = mvm->tof_data.last_abort_id;
+			ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
+					      char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[32];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	int last_abort_id;
+
+	mutex_lock(&mvm->mutex);
+	last_abort_id = mvm->tof_data.last_abort_id;
+	mutex_unlock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
+			 last_abort_id);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
+						 char __user *user_buf,
+						 size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char *buf;
+	int pos = 0;
+	const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
+	struct iwl_tof_range_rsp_ntfy *cmd;
+	int i, ret;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&mvm->mutex);
+	cmd = &mvm->tof_data.range_resp;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
+			 cmd->request_id);
+	pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
+			 cmd->request_status);
+	pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
+			 cmd->last_in_batch);
+	pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
+			 cmd->num_of_aps);
+	for (i = 0; i < cmd->num_of_aps; i++) {
+		struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
+
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"ap %.2d: bssid=%pM status=%hhx bw=%hhx"
+				" rtt=%x rtt_var=%x rtt_spread=%x"
+				" rssi=%hhx  rssi_spread=%hhx"
+				" range=%x range_var=%x"
+				" time_stamp=%x\n",
+				i, ap->bssid, ap->measure_status,
+				ap->measure_bw,
+				ap->rtt, ap->rtt_variance, ap->rtt_spread,
+				ap->rssi, ap->rssi_spread, ap->range,
+				ap->range_variance, ap->timestamp);
+	}
+	mutex_unlock(&mvm->mutex);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
 static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
 					   size_t count, loff_t *ppos)
 {
@@ -628,6 +1354,12 @@
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
+MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -671,6 +1403,25 @@
 		MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir,
 					 S_IRUSR | S_IWUSR);
 
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
+	    !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
+		if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
+			MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
+						 mvmvif->dbgfs_dir,
+						 S_IRUSR | S_IWUSR);
+
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
+					 S_IRUSR | S_IWUSR);
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
+					 S_IRUSR | S_IWUSR);
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
+					 S_IRUSR | S_IWUSR);
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
+					 S_IRUSR | S_IWUSR);
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
+					 S_IRUSR);
+	}
+
 	/*
 	 * Create symlink for convenience pointing to interface specific
 	 * debugfs entries for the driver. For example, under
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index ffb4b5c..7d69a55 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -949,9 +949,10 @@
 					   char *buf, size_t count,
 					   loff_t *ppos)
 {
-	int ret, conf_id;
+	unsigned int conf_id;
+	int ret;
 
-	ret = kstrtoint(buf, 0, &conf_id);
+	ret = kstrtouint(buf, 0, &conf_id);
 	if (ret)
 		return ret;
 
@@ -974,7 +975,7 @@
 	if (ret)
 		return ret;
 
-	iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, 0);
+	iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, NULL, 0, NULL);
 
 	iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
 
@@ -1200,12 +1201,7 @@
 	if (ptr) {
 		for (ofs = 0; ofs < len; ofs += 16) {
 			pos += scnprintf(buf + pos, bufsz - pos,
-					 "0x%.4x ", ofs);
-			hex_dump_to_buffer(ptr + ofs, 16, 16, 1, buf + pos,
-					   bufsz - pos, false);
-			pos += strlen(buf + pos);
-			if (bufsz - pos > 0)
-				buf[pos++] = '\n';
+					 "0x%.4x %16ph\n", ofs, ptr + ofs);
 		}
 	} else {
 		pos += scnprintf(buf + pos, bufsz - pos,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index d7658d1..20521be 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -339,8 +339,13 @@
 	IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE			= BIT(8),
 	IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS			= BIT(9),
 	IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE		= BIT(10),
-	/* BIT(11) reserved */
+	IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL		= BIT(11),
 	IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET		= BIT(12),
+	IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET			= BIT(13),
+	IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER		= BIT(14),
+	IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN		= BIT(15),
+	IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN			= BIT(16),
+
 }; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
 
 struct iwl_wowlan_gtk_status {
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index b1baa33..7005fa4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -310,17 +312,22 @@
 	__le16 pwr_restriction;
 } __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
 
+enum iwl_dev_tx_power_cmd_mode {
+	IWL_TX_POWER_MODE_SET_MAC = 0,
+	IWL_TX_POWER_MODE_SET_DEVICE = 1,
+	IWL_TX_POWER_MODE_SET_CHAINS = 2,
+}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_2 */;
+
 /**
- * struct iwl_dev_tx_power_cmd - TX power reduction command
- * REDUCE_TX_POWER_CMD = 0x9f
- * @set_mode: 0 - MAC tx power, 1 - device tx power
+ * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command
+ * @set_mode: see &enum iwl_dev_tx_power_cmd_mode
  * @mac_context_id: id of the mac ctx for which we are reducing TX power.
  * @pwr_restriction: TX power restriction in 1/8 dBms.
  * @dev_24: device TX power restriction in 1/8 dBms
  * @dev_52_low: device TX power restriction upper band - low
  * @dev_52_high: device TX power restriction upper band - high
  */
-struct iwl_dev_tx_power_cmd {
+struct iwl_dev_tx_power_cmd_v2 {
 	__le32 set_mode;
 	__le32 mac_context_id;
 	__le16 pwr_restriction;
@@ -329,6 +336,20 @@
 	__le16 dev_52_high;
 } __packed; /* TX_REDUCED_POWER_API_S_VER_2 */
 
+#define IWL_NUM_CHAIN_LIMITS	2
+#define IWL_NUM_SUB_BANDS	5
+
+/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
+ * @v2: version 2 of the command, embedded here for easier software handling
+ * @per_chain_restriction: per chain restrictions
+ */
+struct iwl_dev_tx_power_cmd {
+	/* v3 is just an extension of v2 - keep this here */
+	struct iwl_dev_tx_power_cmd_v2 v2;
+	__le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
+
 #define IWL_DEV_MAX_TX_POWER 0x7FFF
 
 /**
@@ -413,7 +434,7 @@
 #define IWL_BF_TEMP_FAST_FILTER_MIN 0
 
 #define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
-#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20
 #define IWL_BF_TEMP_SLOW_FILTER_MAX 255
 #define IWL_BF_TEMP_SLOW_FILTER_MIN 0
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 737774a..660cc1c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -87,41 +87,6 @@
 	u8 ssid[IEEE80211_MAX_SSID_LEN];
 } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
 
-/* How many statistics are gathered for each channel */
-#define SCAN_RESULTS_STATISTICS 1
-
-/**
- * enum iwl_scan_complete_status - status codes for scan complete notifications
- * @SCAN_COMP_STATUS_OK:  scan completed successfully
- * @SCAN_COMP_STATUS_ABORT: scan was aborted by user
- * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed
- * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready
- * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed
- * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed
- * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command
- * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort
- * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
- * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
- *	(not an error!)
- * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repetition the driver
- *	asked for
- * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
-*/
-enum iwl_scan_complete_status {
-	SCAN_COMP_STATUS_OK = 0x1,
-	SCAN_COMP_STATUS_ABORT = 0x2,
-	SCAN_COMP_STATUS_ERR_SLEEP = 0x3,
-	SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4,
-	SCAN_COMP_STATUS_ERR_PROBE = 0x5,
-	SCAN_COMP_STATUS_ERR_WAKEUP = 0x6,
-	SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7,
-	SCAN_COMP_STATUS_ERR_INTERNAL = 0x8,
-	SCAN_COMP_STATUS_ERR_COEX = 0x9,
-	SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA,
-	SCAN_COMP_STATUS_ITERATION_END = 0x0B,
-	SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
-};
-
 /* scan offload */
 #define IWL_SCAN_MAX_BLACKLIST_LEN	64
 #define IWL_SCAN_SHORT_BLACKLIST_LEN	16
@@ -144,71 +109,6 @@
 };
 
 /**
- * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
- * @scan_flags:		see enum iwl_scan_flags
- * @channel_count:	channels in channel list
- * @quiet_time:		dwell time, in milliseconds, on quiet channel
- * @quiet_plcp_th:	quiet channel num of packets threshold
- * @good_CRC_th:	passive to active promotion threshold
- * @rx_chain:		RXON rx chain.
- * @max_out_time:	max TUs to be out of associated channel
- * @suspend_time:	pause scan this TUs when returning to service channel
- * @flags:		RXON flags
- * @filter_flags:	RXONfilter
- * @tx_cmd:		tx command for active scan; for 2GHz and for 5GHz.
- * @direct_scan:	list of SSIDs for directed active scan
- * @scan_type:		see enum iwl_scan_type.
- * @rep_count:		repetition count for each scheduled scan iteration.
- */
-struct iwl_scan_offload_cmd {
-	__le16 len;
-	u8 scan_flags;
-	u8 channel_count;
-	__le16 quiet_time;
-	__le16 quiet_plcp_th;
-	__le16 good_CRC_th;
-	__le16 rx_chain;
-	__le32 max_out_time;
-	__le32 suspend_time;
-	/* RX_ON_FLAGS_API_S_VER_1 */
-	__le32 flags;
-	__le32 filter_flags;
-	struct iwl_tx_cmd tx_cmd[2];
-	/* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
-	struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
-	__le32 scan_type;
-	__le32 rep_count;
-} __packed;
-
-enum iwl_scan_offload_channel_flags {
-	IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE		= BIT(0),
-	IWL_SCAN_OFFLOAD_CHANNEL_NARROW		= BIT(22),
-	IWL_SCAN_OFFLOAD_CHANNEL_FULL		= BIT(24),
-	IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL	= BIT(25),
-};
-
-/* channel configuration for struct iwl_scan_offload_cfg. Each channels needs:
- * __le32 type:	bitmap; bits 1-20 are for directed scan to i'th ssid and
- *	see enum iwl_scan_offload_channel_flags.
- * __le16 channel_number: channel number 1-13 etc.
- * __le16 iter_count: repetition count for the channel.
- * __le32 iter_interval: interval between two iterations on one channel.
- * u8 active_dwell.
- * u8 passive_dwell.
- */
-#define IWL_SCAN_CHAN_SIZE 14
-
-/**
- * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S
- * @scan_cmd:		scan command fixed part
- * @data:		scan channel configuration and probe request frames
- */
-struct iwl_scan_offload_cfg {
-	struct iwl_scan_offload_cmd scan_cmd;
-	u8 data[0];
-} __packed;
-
-/**
  * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
  * @ssid:		MAC address to filter out
  * @reported_rssi:	AP rssi reported to the host
@@ -298,35 +198,6 @@
 };
 
 /**
- * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
- * @last_schedule_line:		last schedule line executed (fast or regular)
- * @last_schedule_iteration:	last scan iteration executed before scan abort
- * @status:			enum iwl_scan_offload_compleate_status
- * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
- */
-struct iwl_scan_offload_complete {
-	u8 last_schedule_line;
-	u8 last_schedule_iteration;
-	u8 status;
-	u8 ebs_status;
-} __packed;
-
-/**
- * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
- * @ssid_bitmap:	SSIDs indexes found in this iteration
- * @client_bitmap:	clients that are active and wait for this notification
- */
-struct iwl_sched_scan_results {
-	__le16 ssid_bitmap;
-	u8 client_bitmap;
-	u8 reserved;
-};
-
-/* Unified LMAC scan API */
-
-#define IWL_MVM_BASIC_PASSIVE_DWELL 110
-
-/**
  * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
  * @tx_flags: combination of TX_CMD_FLG_*
  * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
@@ -550,18 +421,6 @@
 
 /* UMAC Scan API */
 
-/**
- * struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands
- * @size:	size of the command (not including header)
- * @reserved0:	for future use and alignment
- * @ver:	API version number
- */
-struct iwl_mvm_umac_cmd_hdr {
-	__le16 size;
-	u8 reserved0;
-	u8 ver;
-} __packed;
-
 /* The maximum of either of these cannot exceed 8, because we use an
  * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
  */
@@ -621,7 +480,6 @@
 
 /**
  * struct iwl_scan_config
- * @hdr: umac command header
  * @flags:			enum scan_config_flags
  * @tx_chains:			valid_tx antenna - ANT_* definitions
  * @rx_chains:			valid_rx antenna - ANT_* definitions
@@ -639,7 +497,6 @@
  * @channel_array:		default supported channels
  */
 struct iwl_scan_config {
-	struct iwl_mvm_umac_cmd_hdr hdr;
 	__le32 flags;
 	__le32 tx_chains;
 	__le32 rx_chains;
@@ -735,7 +592,6 @@
 
 /**
  * struct iwl_scan_req_umac
- * @hdr: umac command header
  * @flags: &enum iwl_umac_scan_flags
  * @uid: scan id, &enum iwl_umac_scan_uid_offsets
  * @ooc_priority: out of channel priority - &enum iwl_scan_priority
@@ -754,7 +610,6 @@
  *	&struct iwl_scan_req_umac_tail
  */
 struct iwl_scan_req_umac {
-	struct iwl_mvm_umac_cmd_hdr hdr;
 	__le32 flags;
 	__le32 uid;
 	__le32 ooc_priority;
@@ -776,12 +631,10 @@
 
 /**
  * struct iwl_umac_scan_abort
- * @hdr: umac command header
  * @uid: scan id, &enum iwl_umac_scan_uid_offsets
  * @flags: reserved
  */
 struct iwl_umac_scan_abort {
-	struct iwl_mvm_umac_cmd_hdr hdr;
 	__le32 uid;
 	__le32 flags;
 } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 21dd5b7..493a8bd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -366,8 +366,8 @@
  * ( MGMT_MCAST_KEY = 0x1f )
  * @ctrl_flags: %iwl_sta_key_flag
  * @IGTK:
- * @K1: IGTK master key
- * @K2: IGTK sub key
+ * @K1: unused
+ * @K2: unused
  * @sta_id: station ID that support IGTK
  * @key_id:
  * @receive_seq_cnt: initial RSC/PN needed for replay check
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h
new file mode 100644
index 0000000..eed6271
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tof.h
@@ -0,0 +1,386 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __fw_api_tof_h__
+#define __fw_api_tof_h__
+
+#include "fw-api.h"
+
+/* ToF sub-group command IDs */
+enum iwl_mvm_tof_sub_grp_ids {
+	TOF_RANGE_REQ_CMD = 0x1,
+	TOF_CONFIG_CMD = 0x2,
+	TOF_RANGE_ABORT_CMD = 0x3,
+	TOF_RANGE_REQ_EXT_CMD = 0x4,
+	TOF_RESPONDER_CONFIG_CMD = 0x5,
+	TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
+	TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
+	TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
+	TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
+	TOF_RANGE_RESPONSE_NOTIF = 0xFE,
+	TOF_MCSI_DEBUG_NOTIF = 0xFB,
+};
+
+/**
+ * struct iwl_tof_config_cmd - ToF configuration
+ * @tof_disabled: 0 enabled, 1 - disabled
+ * @one_sided_disabled: 0 enabled, 1 - disabled
+ * @is_debug_mode: 1 debug mode, 0 - otherwise
+ * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
+ */
+struct iwl_tof_config_cmd {
+	__le32 sub_grp_cmd_id;
+	u8 tof_disabled;
+	u8 one_sided_disabled;
+	u8 is_debug_mode;
+	u8 is_buf_required;
+} __packed;
+
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * @burst_period: future use: (currently hard coded in the LMAC)
+ *		  The interval between two sequential bursts.
+ * @min_delta_ftm: future use: (currently hard coded in the LMAC)
+ *		   The minimum delay between two sequential FTM Responses
+ *		   in the same burst.
+ * @burst_duration: future use: (currently hard coded in the LMAC)
+ *		   The total time for all FTMs handshake in the same burst.
+ *		   Affect the time events duration in the LMAC.
+ * @num_of_burst_exp: future use: (currently hard coded in the LMAC)
+ *		   The number of bursts for the current ToF request. Affect
+ *		   the number of events allocations in the current iteration.
+ * @get_ch_est: for xVT only, NA for driver
+ * @abort_responder: when set to '1' - Responder will terminate its activity
+ *		     (all other fields in the command are ignored)
+ * @recv_sta_req_params: 1 - Responder will ignore the other Responder's
+ *			 params and use the recomended Initiator params.
+ *			 0 - otherwise
+ * @channel_num: current AP Channel
+ * @bandwidth: current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @rate: current AP rate
+ * @ctrl_ch_position: coding of the control channel position relative to
+ *	     the center frequency.
+ *	     40MHz  0 below center, 1 above center
+ *	     80MHz  bits [0..1]: 0  the near 20MHz to the center,
+ *				 1  the far  20MHz to the center
+ *		    bit[2]  as above 40MHz
+ * @ftm_per_burst: FTMs per Burst
+ * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
+ *		  '1' - we measure over the Initial FTM Response
+ * @asap_mode: ASAP / Non ASAP mode for the current WLS station
+ * @sta_id: index of the AP STA when in AP mode
+ * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
+ * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
+ *		purposes, simulating station movement by adding various values
+ *		to this field
+ * @bssid: Current AP BSSID
+ */
+struct iwl_tof_responder_config_cmd {
+	__le32 sub_grp_cmd_id;
+	__le16 burst_period;
+	u8 min_delta_ftm;
+	u8 burst_duration;
+	u8 num_of_burst_exp;
+	u8 get_ch_est;
+	u8 abort_responder;
+	u8 recv_sta_req_params;
+	u8 channel_num;
+	u8 bandwidth;
+	u8 rate;
+	u8 ctrl_ch_position;
+	u8 ftm_per_burst;
+	u8 ftm_resp_ts_avail;
+	u8 asap_mode;
+	u8 sta_id;
+	__le16 tsf_timer_offset_msecs;
+	__le16 toa_offset;
+	u8 bssid[ETH_ALEN];
+} __packed;
+
+/**
+ * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
+ * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
+ * @min_delta_ftm: Minimal time between two consecutive measurements,
+ *		   in units of 100us. 0 means no preference by station
+ * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
+ *			value be sent to the AP
+ * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
+ *			value to be sent to the AP
+ * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
+ *			value to be sent to the AP
+ */
+struct iwl_tof_range_req_ext_cmd {
+	__le32 sub_grp_cmd_id;
+	__le16 tsf_timer_offset_msec;
+	__le16 reserved;
+	u8 min_delta_ftm;
+	u8 ftm_format_and_bw20M;
+	u8 ftm_format_and_bw40M;
+	u8 ftm_format_and_bw80M;
+} __packed;
+
+#define IWL_MVM_TOF_MAX_APS 21
+
+/**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @channel_num: Current AP Channel
+ * @bandwidth: Current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @tsf_delta_direction: TSF relatively to the subject AP
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ *	     center frequency.
+ *	     40MHz  0 below center, 1 above center
+ *	     80MHz  bits [0..1]: 0  the near 20MHz to the center,
+ *				 1  the far  20MHz to the center
+ *		    bit[2]  as above 40MHz
+ * @bssid: AP's bss id
+ * @measure_type: Measurement type: 0 - two sided, 1 - One sided
+ * @num_of_bursts: Recommended value to be sent to the AP.  2s Exponent of the
+ *		   number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ *		  periodicity In units of 100ms. ignored if num_of_bursts = 0
+ * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
+ *		       1-sided: how many rts/cts pairs should be used per burst.
+ * @retries_per_sample: Max number of retries that the LMAC should send
+ *			in case of no replies by the AP.
+ * @tsf_delta: TSF Delta in units of microseconds.
+ *	       The difference between the AP TSF and the device local clock.
+ * @location_req: Location Request Bit[0] LCI should be sent in the FTMR
+ *			      Bit[1] Civic should be sent in the FTMR
+ * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
+ * @enable_dyn_ack: Enable Dynamic ACK BW.
+ *	    0  Initiator interact with regular AP
+ *	    1  Initiator interact with Responder machine: need to send the
+ *	    Initiator Acks with HT 40MHz / 80MHz, since the Responder should
+ *	    use it for its ch est measurement (this flag will be set when we
+ *	    configure the opposite machine to be Responder).
+ * @rssi: Last received value
+ *	  leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
+ */
+struct iwl_tof_range_req_ap_entry {
+	u8 channel_num;
+	u8 bandwidth;
+	u8 tsf_delta_direction;
+	u8 ctrl_ch_position;
+	u8 bssid[ETH_ALEN];
+	u8 measure_type;
+	u8 num_of_bursts;
+	__le16 burst_period;
+	u8 samples_per_burst;
+	u8 retries_per_sample;
+	__le32 tsf_delta;
+	u8 location_req;
+	u8 asap_mode;
+	u8 enable_dyn_ack;
+	s8 rssi;
+} __packed;
+
+/**
+ * enum iwl_tof_response_mode
+ * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
+ *			      possible (not supported for this release)
+ * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
+ *				 timeout expiration
+ * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
+ *				  earlier of: measurements completion / timeout
+ *				  expiration.
+ */
+enum iwl_tof_response_mode {
+	IWL_MVM_TOF_RESPOSE_ASAP = 1,
+	IWL_MVM_TOF_RESPOSE_TIMEOUT,
+	IWL_MVM_TOF_RESPOSE_COMPLETE,
+};
+
+/**
+ * struct iwl_tof_range_req_cmd - start measurement cmd
+ * @request_id: A Token incremented per request. The same Token will be
+ *		sent back in the range response
+ * @initiator: 0- NW initiated,  1 - Client Initiated
+ * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
+ *			   '1' - run ML-Algo for ToF only
+ * @req_timeout: Requested timeout of the response in units of 100ms.
+ *	     This is equivalent to the session time configured to the
+ *	     LMAC in Initiator Request
+ * @report_policy: Supported partially for this release: For current release -
+ *		   the range report will be uploaded as a batch when ready or
+ *		   when the session is done (successfully / partially).
+ *		   one of iwl_tof_response_mode.
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
+ *	            '1' Use MAC Address randomization according to the below
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ *		  Bits set to 1 shall be randomized by the UMAC
+ */
+struct iwl_tof_range_req_cmd {
+	__le32 sub_grp_cmd_id;
+	u8 request_id;
+	u8 initiator;
+	u8 one_sided_los_disable;
+	u8 req_timeout;
+	u8 report_policy;
+	u8 los_det_disable;
+	u8 num_of_ap;
+	u8 macaddr_random;
+	u8 macaddr_template[ETH_ALEN];
+	u8 macaddr_mask[ETH_ALEN];
+	struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+/**
+ * struct iwl_tof_gen_resp_cmd - generic ToF response
+ */
+struct iwl_tof_gen_resp_cmd {
+	__le32 sub_grp_cmd_id;
+	u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * @measure_status: current APs measurement status
+ * @measure_bw: Current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ *	 current AP [nSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ *	       values measured for current AP in the current session [nsec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ *	        measured for current AP in the current session
+ * @range: Measured range [cm]
+ * @range_variance: Measured range variance [cm]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ *	       uploaded by the LMAC
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy {
+	u8 bssid[ETH_ALEN];
+	u8 measure_status;
+	u8 measure_bw;
+	__le32 rtt;
+	__le32 rtt_variance;
+	__le32 rtt_spread;
+	s8 rssi;
+	u8 rssi_spread;
+	__le16 reserved;
+	__le32 range;
+	__le32 range_variance;
+	__le32 timestamp;
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ntfy -
+ * @request_id: A Token ID of the corresponding Range request
+ * @request_status: status of current measurement session
+ * @last_in_batch: reprot policy (when not all responses are uploaded at once)
+ * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ */
+struct iwl_tof_range_rsp_ntfy {
+	u8 request_id;
+	u8 request_status;
+	u8 last_in_batch;
+	u8 num_of_aps;
+	struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+#define IWL_MVM_TOF_MCSI_BUF_SIZE  (245)
+/**
+ * struct iwl_tof_mcsi_notif - used for debug
+ * @token: token ID for the current session
+ * @role: '0' - initiator, '1' - responder
+ * @initiator_bssid: initiator machine
+ * @responder_bssid: responder machine
+ * @mcsi_buffer: debug data
+ */
+struct iwl_tof_mcsi_notif {
+	u8 token;
+	u8 role;
+	__le16 reserved;
+	u8 initiator_bssid[ETH_ALEN];
+	u8 responder_bssid[ETH_ALEN];
+	u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
+} __packed;
+
+/**
+ * struct iwl_tof_neighbor_report_notif
+ * @bssid: BSSID of the AP which sent the report
+ * @request_token: same token as the corresponding request
+ * @status:
+ * @report_ie_len: the length of the response frame starting from the Element ID
+ * @data: the IEs
+ */
+struct iwl_tof_neighbor_report {
+	u8 bssid[ETH_ALEN];
+	u8 request_token;
+	u8 status;
+	__le16 report_ie_len;
+	u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_abort_cmd
+ * @request_id: corresponds to a range request
+ */
+struct iwl_tof_range_abort_cmd {
+	__le32 sub_grp_cmd_id;
+	u8 request_id;
+	u8 reserved[3];
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 81c4ea3..853698a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -124,6 +124,18 @@
 	TX_CMD_FLG_HCCA_CHUNK		= BIT(31)
 }; /* TX_FLAGS_BITS_API_S_VER_1 */
 
+/**
+ * enum iwl_tx_pm_timeouts - pm timeout values in TX command
+ * @PM_FRAME_NONE: no need to suspend sleep mode
+ * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
+ * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
+ */
+enum iwl_tx_pm_timeouts {
+	PM_FRAME_NONE		= 0,
+	PM_FRAME_MGMT		= 2,
+	PM_FRAME_ASSOC		= 3,
+};
+
 /*
  * TX command security control
  */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 16e9ef4..4af7513a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -75,6 +75,7 @@
 #include "fw-api-coex.h"
 #include "fw-api-scan.h"
 #include "fw-api-stats.h"
+#include "fw-api-tof.h"
 
 /* Tx queue numbers */
 enum {
@@ -119,6 +120,9 @@
 	ADD_STA = 0x18,
 	REMOVE_STA = 0x19,
 
+	/* paging get item */
+	FW_GET_ITEM_CMD = 0x1a,
+
 	/* TX */
 	TX_CMD = 0x1c,
 	TXPATH_FLUSH = 0x1e,
@@ -148,6 +152,9 @@
 
 	LQ_CMD = 0x4e,
 
+	/* paging block to FW cpu2 */
+	FW_PAGING_BLOCK_CMD = 0x4f,
+
 	/* Scan offload */
 	SCAN_OFFLOAD_REQUEST_CMD = 0x51,
 	SCAN_OFFLOAD_ABORT_CMD = 0x52,
@@ -163,6 +170,10 @@
 	CALIB_RES_NOTIF_PHY_DB = 0x6b,
 	/* PHY_DB_CMD = 0x6c, */
 
+	/* ToF - 802.11mc FTM */
+	TOF_CMD = 0x10,
+	TOF_NOTIFICATION = 0x11,
+
 	/* Power - legacy power table command */
 	POWER_TABLE_CMD = 0x77,
 	PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
@@ -365,6 +376,50 @@
 	u8 data[];
 } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
 
+#define NUM_OF_FW_PAGING_BLOCKS	33 /* 32 for data and 1 block for CSS */
+
+/*
+ * struct iwl_fw_paging_cmd - paging layout
+ *
+ * (FW_PAGING_BLOCK_CMD = 0x4f)
+ *
+ * Send to FW the paging layout in the driver.
+ *
+ * @flags: various flags for the command
+ * @block_size: the block size in powers of 2
+ * @block_num: number of blocks specified in the command.
+ * @device_phy_addr: virtual addresses from device side
+*/
+struct iwl_fw_paging_cmd {
+	__le32 flags;
+	__le32 block_size;
+	__le32 block_num;
+	__le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+
+/*
+ * Fw items ID's
+ *
+ * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload
+ *	download
+ */
+enum iwl_fw_item_id {
+	IWL_FW_ITEM_ID_PAGING = 3,
+};
+
+/*
+ * struct iwl_fw_get_item_cmd - get an item from the fw
+ */
+struct iwl_fw_get_item_cmd {
+	__le32 item_id;
+} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */
+
+struct iwl_fw_get_item_resp {
+	__le32 item_id;
+	__le32 item_byte_cnt;
+	__le32 item_val;
+} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */
+
 /**
  * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
  * @offset: offset in bytes into the section
@@ -1080,10 +1135,33 @@
 	__le16 frame_time;
 } __packed;
 
+/*
+ * TCP offload Rx assist info
+ *
+ * bits 0:3 - reserved
+ * bits 4:7 - MIC CRC length
+ * bits 8:12 - MAC header length
+ * bit 13 - Padding indication
+ * bit 14 - A-AMSDU indication
+ * bit 15 - Offload enabled
+ */
+enum iwl_csum_rx_assist_info {
+	CSUM_RXA_RESERVED_MASK	= 0x000f,
+	CSUM_RXA_MICSIZE_MASK	= 0x00f0,
+	CSUM_RXA_HEADERLEN_MASK	= 0x1f00,
+	CSUM_RXA_PADD		= BIT(13),
+	CSUM_RXA_AMSDU		= BIT(14),
+	CSUM_RXA_ENA		= BIT(15)
+};
+
+/**
+ * struct iwl_rx_mpdu_res_start - phy info
+ * @assist: see CSUM_RX_ASSIST_ above
+ */
 struct iwl_rx_mpdu_res_start {
 	__le16 byte_count;
-	__le16 reserved;
-} __packed;
+	__le16 assist;
+} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
 
 /**
  * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
@@ -1136,6 +1214,8 @@
  * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
  * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
  * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
+ * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
  * @RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
  * @RX_MPDU_RES_STATUS_STA_ID_MSK:
  * @RX_MPDU_RES_STATUS_RRF_KILL:
@@ -1165,6 +1245,8 @@
 	RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP		= BIT(13),
 	RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT		= BIT(14),
 	RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME		= BIT(15),
+	RX_MPDU_RES_STATUS_CSUM_DONE			= BIT(16),
+	RX_MPDU_RES_STATUS_CSUM_OK			= BIT(17),
 	RX_MPDU_RES_STATUS_HASH_INDEX_MSK		= (0x3F0000),
 	RX_MPDU_RES_STATUS_STA_ID_MSK			= (0x1f000000),
 	RX_MPDU_RES_STATUS_RRF_KILL			= BIT(29),
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index eb10c5e..4a0ce83 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -106,6 +106,306 @@
 				    sizeof(tx_ant_cmd), &tx_ant_cmd);
 }
 
+static void iwl_free_fw_paging(struct iwl_mvm *mvm)
+{
+	int i;
+
+	if (!mvm->fw_paging_db[0].fw_paging_block)
+		return;
+
+	for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
+		if (!mvm->fw_paging_db[i].fw_paging_block) {
+			IWL_DEBUG_FW(mvm,
+				     "Paging: block %d already freed, continue to next page\n",
+				     i);
+
+			continue;
+		}
+
+		__free_pages(mvm->fw_paging_db[i].fw_paging_block,
+			     get_order(mvm->fw_paging_db[i].fw_paging_size));
+	}
+	kfree(mvm->trans->paging_download_buf);
+	memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
+}
+
+static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
+{
+	int sec_idx, idx;
+	u32 offset = 0;
+
+	/*
+	 * find where is the paging image start point:
+	 * if CPU2 exist and it's in paging format, then the image looks like:
+	 * CPU1 sections (2 or more)
+	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
+	 * CPU2 sections (not paged)
+	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
+	 * non paged to CPU2 paging sec
+	 * CPU2 paging CSS
+	 * CPU2 paging image (including instruction and data)
+	 */
+	for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
+		if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
+			sec_idx++;
+			break;
+		}
+	}
+
+	if (sec_idx >= IWL_UCODE_SECTION_MAX) {
+		IWL_ERR(mvm, "driver didn't find paging image\n");
+		iwl_free_fw_paging(mvm);
+		return -EINVAL;
+	}
+
+	/* copy the CSS block to the dram */
+	IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
+		     sec_idx);
+
+	memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
+	       image->sec[sec_idx].data,
+	       mvm->fw_paging_db[0].fw_paging_size);
+
+	IWL_DEBUG_FW(mvm,
+		     "Paging: copied %d CSS bytes to first block\n",
+		     mvm->fw_paging_db[0].fw_paging_size);
+
+	sec_idx++;
+
+	/*
+	 * copy the paging blocks to the dram
+	 * loop index start from 1 since that CSS block already copied to dram
+	 * and CSS index is 0.
+	 * loop stop at num_of_paging_blk since that last block is not full.
+	 */
+	for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
+		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+		       image->sec[sec_idx].data + offset,
+		       mvm->fw_paging_db[idx].fw_paging_size);
+
+		IWL_DEBUG_FW(mvm,
+			     "Paging: copied %d paging bytes to block %d\n",
+			     mvm->fw_paging_db[idx].fw_paging_size,
+			     idx);
+
+		offset += mvm->fw_paging_db[idx].fw_paging_size;
+	}
+
+	/* copy the last paging block */
+	if (mvm->num_of_pages_in_last_blk > 0) {
+		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+		       image->sec[sec_idx].data + offset,
+		       FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
+
+		IWL_DEBUG_FW(mvm,
+			     "Paging: copied %d pages in the last block %d\n",
+			     mvm->num_of_pages_in_last_blk, idx);
+	}
+
+	return 0;
+}
+
+static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
+				   const struct fw_img *image)
+{
+	struct page *block;
+	dma_addr_t phys = 0;
+	int blk_idx = 0;
+	int order, num_of_pages;
+	int dma_enabled;
+
+	if (mvm->fw_paging_db[0].fw_paging_block)
+		return 0;
+
+	dma_enabled = is_device_dma_capable(mvm->trans->dev);
+
+	/* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
+	BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
+
+	num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
+	mvm->num_of_paging_blk = ((num_of_pages - 1) /
+				    NUM_OF_PAGE_PER_GROUP) + 1;
+
+	mvm->num_of_pages_in_last_blk =
+		num_of_pages -
+		NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
+
+	IWL_DEBUG_FW(mvm,
+		     "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
+		     mvm->num_of_paging_blk,
+		     mvm->num_of_pages_in_last_blk);
+
+	/* allocate block of 4Kbytes for paging CSS */
+	order = get_order(FW_PAGING_SIZE);
+	block = alloc_pages(GFP_KERNEL, order);
+	if (!block) {
+		/* free all the previous pages since we failed */
+		iwl_free_fw_paging(mvm);
+		return -ENOMEM;
+	}
+
+	mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+	mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
+
+	if (dma_enabled) {
+		phys = dma_map_page(mvm->trans->dev, block, 0,
+				    PAGE_SIZE << order, DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(mvm->trans->dev, phys)) {
+			/*
+			 * free the previous pages and the current one since
+			 * we failed to map_page.
+			 */
+			iwl_free_fw_paging(mvm);
+			return -ENOMEM;
+		}
+		mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+	} else {
+		mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
+			blk_idx << BLOCK_2_EXP_SIZE;
+	}
+
+	IWL_DEBUG_FW(mvm,
+		     "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+		     order);
+
+	/*
+	 * allocate blocks in dram.
+	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
+	 */
+	for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+		/* allocate block of PAGING_BLOCK_SIZE (32K) */
+		order = get_order(PAGING_BLOCK_SIZE);
+		block = alloc_pages(GFP_KERNEL, order);
+		if (!block) {
+			/* free all the previous pages since we failed */
+			iwl_free_fw_paging(mvm);
+			return -ENOMEM;
+		}
+
+		mvm->fw_paging_db[blk_idx].fw_paging_block = block;
+		mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
+
+		if (dma_enabled) {
+			phys = dma_map_page(mvm->trans->dev, block, 0,
+					    PAGE_SIZE << order,
+					    DMA_BIDIRECTIONAL);
+			if (dma_mapping_error(mvm->trans->dev, phys)) {
+				/*
+				 * free the previous pages and the current one
+				 * since we failed to map_page.
+				 */
+				iwl_free_fw_paging(mvm);
+				return -ENOMEM;
+			}
+			mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
+		} else {
+			mvm->fw_paging_db[blk_idx].fw_paging_phys =
+				PAGING_ADDR_SIG |
+				blk_idx << BLOCK_2_EXP_SIZE;
+		}
+
+		IWL_DEBUG_FW(mvm,
+			     "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
+			     order);
+	}
+
+	return 0;
+}
+
+static int iwl_save_fw_paging(struct iwl_mvm *mvm,
+			      const struct fw_img *fw)
+{
+	int ret;
+
+	ret = iwl_alloc_fw_paging_mem(mvm, fw);
+	if (ret)
+		return ret;
+
+	return iwl_fill_paging_mem(mvm, fw);
+}
+
+/* send paging cmd to FW in case CPU2 has paging image */
+static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
+{
+	int blk_idx;
+	__le32 dev_phy_addr;
+	struct iwl_fw_paging_cmd fw_paging_cmd = {
+		.flags =
+			cpu_to_le32(PAGING_CMD_IS_SECURED |
+				    PAGING_CMD_IS_ENABLED |
+				    (mvm->num_of_pages_in_last_blk <<
+				    PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
+		.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
+		.block_num = cpu_to_le32(mvm->num_of_paging_blk),
+	};
+
+	/* loop for for all paging blocks + CSS block */
+	for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+		dev_phy_addr =
+			cpu_to_le32(mvm->fw_paging_db[blk_idx].fw_paging_phys >>
+				    PAGE_2_EXP_SIZE);
+		fw_paging_cmd.device_phy_addr[blk_idx] = dev_phy_addr;
+	}
+
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(fw_paging_cmd), &fw_paging_cmd);
+}
+
+/*
+ * Send paging item cmd to FW in case CPU2 has paging image
+ */
+static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
+{
+	int ret;
+	struct iwl_fw_get_item_cmd fw_get_item_cmd = {
+		.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
+	};
+
+	struct iwl_fw_get_item_resp *item_resp;
+	struct iwl_host_cmd cmd = {
+		.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+		.data = { &fw_get_item_cmd, },
+	};
+
+	cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (ret) {
+		IWL_ERR(mvm,
+			"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
+			ret);
+		return ret;
+	}
+
+	item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
+	if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
+		IWL_ERR(mvm,
+			"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
+			le32_to_cpu(item_resp->item_id));
+		ret = -EIO;
+		goto exit;
+	}
+
+	mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
+						  GFP_KERNEL);
+	if (!mvm->trans->paging_download_buf) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+	mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
+	mvm->trans->paging_db = mvm->fw_paging_db;
+	IWL_DEBUG_FW(mvm,
+		     "Paging: got paging request address (paging_req_addr 0x%08x)\n",
+		     mvm->trans->paging_req_addr);
+
+exit:
+	iwl_free_resp(&cmd);
+
+	return ret;
+}
+
 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
 			 struct iwl_rx_packet *pkt, void *data)
 {
@@ -213,7 +513,7 @@
 	const struct fw_img *fw;
 	int ret, i;
 	enum iwl_ucode_type old_type = mvm->cur_ucode;
-	static const u8 alive_cmd[] = { MVM_ALIVE };
+	static const u16 alive_cmd[] = { MVM_ALIVE };
 	struct iwl_sf_region st_fwrd_space;
 
 	if (ucode_type == IWL_UCODE_REGULAR &&
@@ -244,6 +544,11 @@
 	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
 				    MVM_UCODE_ALIVE_TIMEOUT);
 	if (ret) {
+		if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+			IWL_ERR(mvm,
+				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+				iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
+				iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
 		mvm->cur_ucode = old_type;
 		return ret;
 	}
@@ -269,6 +574,40 @@
 	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
 
 	/*
+	 * configure and operate fw paging mechanism.
+	 * driver configures the paging flow only once, CPU2 paging image
+	 * included in the IWL_UCODE_INIT image.
+	 */
+	if (fw->paging_mem_size) {
+		/*
+		 * When dma is not enabled, the driver needs to copy / write
+		 * the downloaded / uploaded page to / from the smem.
+		 * This gets the location of the place were the pages are
+		 * stored.
+		 */
+		if (!is_device_dma_capable(mvm->trans->dev)) {
+			ret = iwl_trans_get_paging_item(mvm);
+			if (ret) {
+				IWL_ERR(mvm, "failed to get FW paging item\n");
+				return ret;
+			}
+		}
+
+		ret = iwl_save_fw_paging(mvm, fw);
+		if (ret) {
+			IWL_ERR(mvm, "failed to save the FW paging image\n");
+			return ret;
+		}
+
+		ret = iwl_send_paging_cmd(mvm, fw);
+		if (ret) {
+			IWL_ERR(mvm, "failed to send the paging cmd\n");
+			iwl_free_fw_paging(mvm);
+			return ret;
+		}
+	}
+
+	/*
 	 * Note: all the queues are enabled as part of the interface
 	 * initialization, but in firmware restart scenarios they
 	 * could be stopped, so wake them up. In firmware restart,
@@ -314,7 +653,7 @@
 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 {
 	struct iwl_notification_wait calib_wait;
-	static const u8 init_complete[] = {
+	static const u16 init_complete[] = {
 		INIT_COMPLETE_NOTIF,
 		CALIB_RES_NOTIF_PHY_DB
 	};
@@ -444,12 +783,6 @@
 		return;
 
 	pkt = cmd.resp_pkt;
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(mvm, "Bad return from SHARED_MEM_CFG (0x%08X)\n",
-			pkt->hdr.flags);
-		goto exit;
-	}
-
 	mem_cfg = (void *)pkt->data;
 
 	mvm->shared_mem_cfg.shared_mem_addr =
@@ -473,14 +806,18 @@
 		le32_to_cpu(mem_cfg->page_buff_size);
 	IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
 
-exit:
 	iwl_free_resp(&cmd);
 }
 
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 				struct iwl_mvm_dump_desc *desc,
-				unsigned int delay)
+				struct iwl_fw_dbg_trigger_tlv *trigger)
 {
+	unsigned int delay = 0;
+
+	if (trigger)
+		delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+
 	if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status))
 		return -EBUSY;
 
@@ -491,6 +828,7 @@
 		 le32_to_cpu(desc->trig_desc.type));
 
 	mvm->fw_dump_desc = desc;
+	mvm->fw_dump_trig = trigger;
 
 	queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
 
@@ -498,7 +836,8 @@
 }
 
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
-			   const char *str, size_t len, unsigned int delay)
+			   const char *str, size_t len,
+			   struct iwl_fw_dbg_trigger_tlv *trigger)
 {
 	struct iwl_mvm_dump_desc *desc;
 
@@ -510,14 +849,13 @@
 	desc->trig_desc.type = cpu_to_le32(trig);
 	memcpy(desc->trig_desc.data, str, len);
 
-	return iwl_mvm_fw_dbg_collect_desc(mvm, desc, delay);
+	return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger);
 }
 
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
 				struct iwl_fw_dbg_trigger_tlv *trigger,
 				const char *fmt, ...)
 {
-	unsigned int delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
 	u16 occurrences = le16_to_cpu(trigger->occurrences);
 	int ret, len = 0;
 	char buf[64];
@@ -541,8 +879,9 @@
 		len = strlen(buf) + 1;
 	}
 
-	ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf,
-				     len, delay);
+	ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len,
+				     trigger);
+
 	if (ret)
 		return ret;
 
@@ -676,8 +1015,7 @@
 		goto error;
 	}
 
-	if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10)
-		iwl_mvm_get_shared_mem_conf(mvm);
+	iwl_mvm_get_shared_mem_conf(mvm);
 
 	ret = iwl_mvm_sf_update(mvm, NULL, false);
 	if (ret)
@@ -760,6 +1098,10 @@
 			goto error;
 	}
 
+	if (iwl_mvm_is_csum_supported(mvm) &&
+	    mvm->cfg->features & NETIF_F_RXCSUM)
+		iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
+
 	/* allow FW/transport low power modes if not during restart */
 	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
 		iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -815,9 +1157,8 @@
 	return ret;
 }
 
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
@@ -828,13 +1169,10 @@
 			  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
 			  (flags & CT_KILL_CARD_DISABLED) ?
 			  "Reached" : "Not reached");
-
-	return 0;
 }
 
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
-			    struct iwl_rx_cmd_buffer *rxb,
-			    struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+			     struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
@@ -845,5 +1183,4 @@
 		       le32_to_cpu(mfuart_notif->external_ver),
 		       le32_to_cpu(mfuart_notif->status),
 		       le32_to_cpu(mfuart_notif->duration));
-	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 1812dd0..3424315 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -1312,9 +1312,8 @@
 	}
 }
 
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
-			    struct iwl_rx_cmd_buffer *rxb,
-			    struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+			     struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
@@ -1365,8 +1364,6 @@
 			RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
 		}
 	}
-
-	return 0;
 }
 
 static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
@@ -1415,9 +1412,8 @@
 		iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL);
 }
 
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+				     struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
@@ -1434,5 +1430,4 @@
 						   IEEE80211_IFACE_ITER_NORMAL,
 						   iwl_mvm_beacon_loss_iterator,
 						   mb);
-	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index dfdab38..aa8c2b7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -641,6 +641,7 @@
 			IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
 		IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+		ieee80211_hw_set(hw, TDLS_WIDER_BW);
 	}
 
 	if (fw_has_capa(&mvm->fw->ucode_capa,
@@ -649,6 +650,10 @@
 		hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
 	}
 
+	hw->netdev_features |= mvm->cfg->features;
+	if (!iwl_mvm_is_csum_supported(mvm))
+		hw->netdev_features &= ~NETIF_F_RXCSUM;
+
 	ret = ieee80211_register_hw(mvm->hw);
 	if (ret)
 		iwl_mvm_leds_exit(mvm);
@@ -1120,9 +1125,14 @@
 	u32 file_len, fifo_data_len = 0;
 	u32 smem_len = mvm->cfg->smem_len;
 	u32 sram2_len = mvm->cfg->dccm2_len;
+	bool monitor_dump_only = false;
 
 	lockdep_assert_held(&mvm->mutex);
 
+	if (mvm->fw_dump_trig &&
+	    mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
+		monitor_dump_only = true;
+
 	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
 	if (!fw_error_dump)
 		return;
@@ -1174,6 +1184,20 @@
 		   fifo_data_len +
 		   sizeof(*dump_info);
 
+	/* Make room for the SMEM, if it exists */
+	if (smem_len)
+		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
+
+	/* Make room for the secondary SRAM, if it exists */
+	if (sram2_len)
+		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
+
+	/* If we only want a monitor dump, reset the file length */
+	if (monitor_dump_only) {
+		file_len = sizeof(*dump_file) + sizeof(*dump_data) +
+			   sizeof(*dump_info);
+	}
+
 	/*
 	 * In 8000 HW family B-step include the ICCM (which resides separately)
 	 */
@@ -1186,14 +1210,6 @@
 		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
 			    mvm->fw_dump_desc->len;
 
-	/* Make room for the SMEM, if it exists */
-	if (smem_len)
-		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
-
-	/* Make room for the secondary SRAM, if it exists */
-	if (sram2_len)
-		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
-
 	dump_file = vzalloc(file_len);
 	if (!dump_file) {
 		kfree(fw_error_dump);
@@ -1239,6 +1255,10 @@
 		dump_data = iwl_fw_error_next_data(dump_data);
 	}
 
+	/* In case we only want monitor dump, skip to dump trasport data */
+	if (monitor_dump_only)
+		goto dump_trans_data;
+
 	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
 	dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
 	dump_mem = (void *)dump_data->data;
@@ -1282,7 +1302,9 @@
 					 dump_mem->data, IWL8260_ICCM_LEN);
 	}
 
-	fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans);
+dump_trans_data:
+	fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
+						       mvm->fw_dump_trig);
 	fw_error_dump->op_mode_len = file_len;
 	if (fw_error_dump->trans_ptr)
 		file_len += fw_error_dump->trans_ptr->len;
@@ -1291,6 +1313,7 @@
 	dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
 		      GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
 
+	mvm->fw_dump_trig = NULL;
 	clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
 }
 
@@ -1433,22 +1456,9 @@
 
 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
 {
-	bool exit_now;
-
 	if (!iwl_mvm_is_d0i3_supported(mvm))
 		return;
 
-	mutex_lock(&mvm->d0i3_suspend_mutex);
-	__clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
-	exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
-					&mvm->d0i3_suspend_flags);
-	mutex_unlock(&mvm->d0i3_suspend_mutex);
-
-	if (exit_now) {
-		IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
-		_iwl_mvm_exit_d0i3(mvm);
-	}
-
 	if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
 		if (!wait_event_timeout(mvm->d0i3_exit_waitq,
 					!test_bit(IWL_MVM_STATUS_IN_D0I3,
@@ -1585,20 +1595,23 @@
 				s16 tx_power)
 {
 	struct iwl_dev_tx_power_cmd cmd = {
-		.set_mode = 0,
-		.mac_context_id =
+		.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+		.v2.mac_context_id =
 			cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
-		.pwr_restriction = cpu_to_le16(8 * tx_power),
+		.v2.pwr_restriction = cpu_to_le16(8 * tx_power),
 	};
+	int len = sizeof(cmd);
 
 	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
 		return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
 
 	if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
-		cmd.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+		cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
 
-	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
-				    sizeof(cmd), &cmd);
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
+		len = sizeof(cmd.v2);
+
+	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
 }
 
 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
@@ -1664,6 +1677,8 @@
 		goto out_unlock;
 	}
 
+	mvmvif->features |= hw->netdev_features;
+
 	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
 	if (ret)
 		goto out_release;
@@ -2880,10 +2895,11 @@
 	switch (key->cipher) {
 	case WLAN_CIPHER_SUITE_TKIP:
 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-		/* fall-through */
-	case WLAN_CIPHER_SUITE_CCMP:
 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
 		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+		break;
 	case WLAN_CIPHER_SUITE_AES_CMAC:
 		WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
 		break;
@@ -3025,7 +3041,7 @@
 	int res, time_reg = DEVICE_SYSTEM_TIME_REG;
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
-	static const u8 time_event_response[] = { HOT_SPOT_CMD };
+	static const u16 time_event_response[] = { HOT_SPOT_CMD };
 	struct iwl_notification_wait wait_time_event;
 	struct iwl_hs20_roc_req aux_roc_req = {
 		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 2d4bad5..b95a07e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -80,6 +80,7 @@
 #include "sta.h"
 #include "fw-api.h"
 #include "constants.h"
+#include "tof.h"
 
 #define IWL_INVALID_MAC80211_QUEUE	0xff
 #define IWL_MVM_MAX_ADDRESSES		5
@@ -122,8 +123,7 @@
  *	be up'ed after the INIT fw asserted. This is useful to be able to use
  *	proprietary tools over testmode to debug the INIT fw.
  * @tfd_q_hang_detect: enabled the detection of hung transmit queues
- * @power_scheme: CAM(Continuous Active Mode)-1, BPS(Balanced Power
- *	Save)-2(default), LP(Low Power)-3
+ * @power_scheme: one of enum iwl_power_scheme
  */
 struct iwl_mvm_mod_params {
 	bool init_dbg;
@@ -357,6 +357,7 @@
  *	# of received beacons accumulated over FW restart, and the current
  *	average signal of beacons retrieved from the firmware
  * @csa_failed: CSA failed to schedule time event, report an error later
+ * @features: hw features active for this vif
  */
 struct iwl_mvm_vif {
 	struct iwl_mvm *mvm;
@@ -437,6 +438,9 @@
 	/* Indicates that CSA countdown may be started */
 	bool csa_countdown;
 	bool csa_failed;
+
+	/* TCP Checksum Offload */
+	netdev_features_t features;
 };
 
 static inline struct iwl_mvm_vif *
@@ -606,6 +610,11 @@
 	/* NVM sections */
 	struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
 
+	/* Paging section */
+	struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
+	u16 num_of_paging_blk;
+	u16 num_of_pages_in_last_blk;
+
 	/* EEPROM MAC addresses */
 	struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
 
@@ -686,6 +695,7 @@
 	 * can hold 16 keys at most. Reflect this fact.
 	 */
 	unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+	u8 fw_key_deleted[STA_KEY_MAX_NUM];
 
 	/* references taken by the driver and spinlock protecting them */
 	spinlock_t refs_lock;
@@ -698,6 +708,7 @@
 	u8 fw_dbg_conf;
 	struct delayed_work fw_dump_wk;
 	struct iwl_mvm_dump_desc *fw_dump_desc;
+	struct iwl_fw_dbg_trigger_tlv *fw_dump_trig;
 
 #ifdef CONFIG_IWLWIFI_LEDS
 	struct led_classdev led;
@@ -822,6 +833,7 @@
 	struct iwl_mvm_shared_mem_cfg shared_mem_cfg;
 
 	u32 ciphers[6];
+	struct iwl_mvm_tof_data tof_data;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -941,6 +953,12 @@
 		IWL_MVM_BT_COEX_RRC;
 }
 
+static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
+{
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
+}
+
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
 
 struct iwl_rate_info {
@@ -974,12 +992,12 @@
 /* Tx / Host Commands */
 int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
 				  struct iwl_host_cmd *cmd);
-int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
 				      u32 flags, u16 len, const void *data);
 int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
 					 struct iwl_host_cmd *cmd,
 					 u32 *status);
-int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id,
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
 					     u16 len, const void *data,
 					     u32 *status);
 int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
@@ -988,10 +1006,6 @@
 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
 			struct iwl_tx_cmd *tx_cmd,
 			struct ieee80211_tx_info *info, u8 sta_id);
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
-			       struct ieee80211_tx_info *info,
-			       struct iwl_tx_cmd *tx_cmd,
-			       struct sk_buff *skb_frag);
 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
 			    struct ieee80211_tx_info *info,
 			    struct ieee80211_sta *sta, __le16 fc);
@@ -1003,6 +1017,17 @@
 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
 
+static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
+					   struct iwl_tx_cmd *tx_cmd)
+{
+	struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+	tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+	memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+	if (info->flags & IEEE80211_TX_CTL_AMPDU)
+		tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+}
+
 static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
 {
 	flush_work(&mvm->async_handlers_wk);
@@ -1011,9 +1036,8 @@
 /* Statistics */
 void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
 				  struct iwl_rx_packet *pkt);
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
-			  struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+			   struct iwl_rx_cmd_buffer *rxb);
 int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
 void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
 
@@ -1059,27 +1083,20 @@
  * FW notifications / CMD responses handlers
  * Convention: iwl_mvm_rx_<NAME OF THE CMD>
  */
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-		       struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-		      struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
-				  struct iwl_rx_cmd_buffer *rxb,
-				  struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			    struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+			struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+				   struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+			     struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
+				     struct iwl_rx_cmd_buffer *rxb);
 
 /* MVM PHY */
 int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
@@ -1106,12 +1123,10 @@
 u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
 int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
 				    struct ieee80211_vif *vif);
-int iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
-			    struct iwl_rx_cmd_buffer *rxb,
-			    struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
-				    struct iwl_rx_cmd_buffer *rxb,
-				    struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+			     struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+				     struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
 				    struct ieee80211_vif *vif);
 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
@@ -1135,29 +1150,24 @@
 void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
 
 /* Scheduled scan */
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
-					struct iwl_rx_cmd_buffer *rxb,
-					struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+					 struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb);
 int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
 			     struct ieee80211_vif *vif,
 			     struct cfg80211_sched_scan_request *req,
 			     struct ieee80211_scan_ies *ies,
 			     int type);
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb);
 
 /* UMAC scan */
 int iwl_mvm_config_scan(struct iwl_mvm *mvm);
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
-					struct iwl_rx_cmd_buffer *rxb,
-					struct iwl_device_cmd *cmd);
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+					 struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb);
 
 /* MVM debugfs */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1196,9 +1206,8 @@
 				 char *buf, int bufsz);
 
 void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd);
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb);
 
 #ifdef CONFIG_IWLWIFI_LEDS
 int iwl_mvm_leds_init(struct iwl_mvm *mvm);
@@ -1254,9 +1263,8 @@
 
 /* BT Coex */
 int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
-			     struct iwl_rx_cmd_buffer *rxb,
-			     struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			   enum ieee80211_rssi_event_data);
 void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
@@ -1274,9 +1282,8 @@
 bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
 void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
 int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
-int iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
-				 struct iwl_rx_cmd_buffer *rxb,
-				 struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
+				  struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 			       enum ieee80211_rssi_event_data);
 u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
@@ -1285,9 +1292,8 @@
 					 struct ieee80211_sta *sta);
 bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
 					enum ieee80211_band band);
-int iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
-				      struct iwl_rx_cmd_buffer *rxb,
-				      struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
+				       struct iwl_rx_cmd_buffer *rxb);
 
 /* beacon filtering */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1376,9 +1382,8 @@
 /* Thermal management and CT-kill */
 void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
 void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
-		       struct iwl_rx_cmd_buffer *rxb,
-		       struct iwl_device_cmd *cmd);
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+			struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
 void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
 void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
@@ -1390,9 +1395,8 @@
 iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
 		   enum iwl_mcc_source src_id);
 int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
-			       struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+				struct iwl_rx_cmd_buffer *rxb);
 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
 						  const char *alpha2,
 						  enum iwl_mcc_source src_id,
@@ -1431,8 +1435,7 @@
 void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
 					struct ieee80211_vif *vif,
 					struct ieee80211_sta *sta);
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
 
 struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
@@ -1442,10 +1445,11 @@
 
 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id);
 int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig,
-			   const char *str, size_t len, unsigned int delay);
+			   const char *str, size_t len,
+			   struct iwl_fw_dbg_trigger_tlv *trigger);
 int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 				struct iwl_mvm_dump_desc *desc,
-				unsigned int delay);
+				struct iwl_fw_dbg_trigger_tlv *trigger);
 void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm);
 int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
 				struct iwl_fw_dbg_trigger_tlv *trigger,
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 2a6be35..328187d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -139,12 +139,6 @@
 		return ret;
 
 	pkt = cmd.resp_pkt;
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(mvm, "Bad return from NVM_ACCES_COMMAND (0x%08X)\n",
-			pkt->hdr.flags);
-		ret = -EIO;
-		goto exit;
-	}
 
 	/* Extract NVM response */
 	nvm_resp = (void *)pkt->data;
@@ -652,12 +646,6 @@
 		return ERR_PTR(ret);
 
 	pkt = cmd.resp_pkt;
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n",
-			pkt->hdr.flags);
-		ret = -EIO;
-		goto exit;
-	}
 
 	/* Extract MCC response */
 	mcc_resp = (void *)pkt->data;
@@ -839,9 +827,8 @@
 	return retval;
 }
 
-int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
-			       struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+				struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
@@ -852,7 +839,7 @@
 	lockdep_assert_held(&mvm->mutex);
 
 	if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
-		return 0;
+		return;
 
 	mcc[0] = notif->mcc >> 8;
 	mcc[1] = notif->mcc & 0xff;
@@ -864,10 +851,8 @@
 		      mcc, src);
 	regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
 	if (IS_ERR_OR_NULL(regd))
-		return 0;
+		return;
 
 	regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
 	kfree(regd);
-
-	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index e4fa500..a37de3f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -201,14 +201,15 @@
 }
 
 struct iwl_rx_handlers {
-	u8 cmd_id;
+	u16 cmd_id;
 	bool async;
-	int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-		  struct iwl_device_cmd *cmd);
+	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 };
 
 #define RX_HANDLER(_cmd_id, _fn, _async)	\
 	{ .cmd_id = _cmd_id , .fn = _fn , .async = _async }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async)	\
+	{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async }
 
 /*
  * Handlers for fw notifications
@@ -221,7 +222,6 @@
  * called from a worker with mvm->mutex held.
  */
 static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
-	RX_HANDLER(REPLY_RX_MPDU_CMD, iwl_mvm_rx_rx_mpdu, false),
 	RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
 	RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
 	RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
@@ -261,12 +261,14 @@
 	RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
 		   true),
 	RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
+	RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
 
 };
 #undef RX_HANDLER
+#undef RX_HANDLER_GRP
 #define CMD(x) [x] = #x
 
-static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
+static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = {
 	CMD(MVM_ALIVE),
 	CMD(REPLY_ERROR),
 	CMD(INIT_COMPLETE_NOTIF),
@@ -286,8 +288,10 @@
 	CMD(PHY_CONFIGURATION_CMD),
 	CMD(CALIB_RES_NOTIF_PHY_DB),
 	CMD(SET_CALIB_DEFAULT_CMD),
+	CMD(FW_PAGING_BLOCK_CMD),
 	CMD(ADD_STA_KEY),
 	CMD(ADD_STA),
+	CMD(FW_GET_ITEM_CMD),
 	CMD(REMOVE_STA),
 	CMD(LQ_CMD),
 	CMD(SCAN_OFFLOAD_CONFIG_CMD),
@@ -470,6 +474,8 @@
 	trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
 	trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
 	trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
+	trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa,
+					       IWL_UCODE_TLV_API_WIDE_CMD_HDR);
 
 	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
 		trans_cfg.bc_table_dword = true;
@@ -576,6 +582,8 @@
 	/* rpm starts with a taken ref. only set the appropriate bit here. */
 	mvm->refs[IWL_MVM_REF_UCODE_DOWN] = 1;
 
+	iwl_mvm_tof_init(mvm);
+
 	return op_mode;
 
  out_unregister:
@@ -623,14 +631,15 @@
 	for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
 		kfree(mvm->nvm_sections[i].data);
 
+	iwl_mvm_tof_clean(mvm);
+
 	ieee80211_free_hw(mvm->hw);
 }
 
 struct iwl_async_handler_entry {
 	struct list_head list;
 	struct iwl_rx_cmd_buffer rxb;
-	int (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-		  struct iwl_device_cmd *cmd);
+	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
 };
 
 void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
@@ -667,9 +676,7 @@
 	spin_unlock_bh(&mvm->async_handlers_lock);
 
 	list_for_each_entry_safe(entry, tmp, &local_list, list) {
-		if (entry->fn(mvm, &entry->rxb, NULL))
-			IWL_WARN(mvm,
-				 "returned value from ASYNC handlers are ignored\n");
+		entry->fn(mvm, &entry->rxb);
 		iwl_free_rxb(&entry->rxb);
 		list_del(&entry->list);
 		kfree(entry);
@@ -698,24 +705,30 @@
 		if (!cmds_trig->cmds[i].cmd_id)
 			break;
 
-		if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd)
+		if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
+		    cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
 			continue;
 
 		iwl_mvm_fw_dbg_collect_trig(mvm, trig,
-					    "CMD 0x%02x received",
-					    pkt->hdr.cmd);
+					    "CMD 0x%02x.%02x received",
+					    pkt->hdr.group_id, pkt->hdr.cmd);
 		break;
 	}
 }
 
-static int iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
-			       struct iwl_rx_cmd_buffer *rxb,
-			       struct iwl_device_cmd *cmd)
+static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
+				struct napi_struct *napi,
+				struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 	u8 i;
 
+	if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
+		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+		return;
+	}
+
 	iwl_mvm_rx_check_trigger(mvm, pkt);
 
 	/*
@@ -729,16 +742,18 @@
 		const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
 		struct iwl_async_handler_entry *entry;
 
-		if (rx_h->cmd_id != pkt->hdr.cmd)
+		if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
 			continue;
 
-		if (!rx_h->async)
-			return rx_h->fn(mvm, rxb, cmd);
+		if (!rx_h->async) {
+			rx_h->fn(mvm, rxb);
+			return;
+		}
 
 		entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
 		/* we can't do much... */
 		if (!entry)
-			return 0;
+			return;
 
 		entry->rxb._page = rxb_steal_page(rxb);
 		entry->rxb._offset = rxb->_offset;
@@ -750,8 +765,6 @@
 		schedule_work(&mvm->async_handlers_wk);
 		break;
 	}
-
-	return 0;
 }
 
 static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
@@ -903,7 +916,8 @@
 	 * can't recover this since we're already half suspended.
 	 */
 	if (!mvm->restart_fw && fw_error) {
-		iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, 0);
+		iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert,
+					    NULL);
 	} else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART,
 				    &mvm->status)) {
 		struct iwl_mvm_reprobe *reprobe;
@@ -1100,9 +1114,7 @@
 
 	IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
 
-	/* make sure we have no running tx while configuring the qos */
 	set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
-	synchronize_net();
 
 	/*
 	 * iwl_mvm_ref_sync takes a reference before checking the flag.
@@ -1130,6 +1142,9 @@
 		mvm->d0i3_offloading = false;
 	}
 
+	/* make sure we have no running tx while configuring the seqno */
+	synchronize_net();
+
 	iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
 	ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
 				   sizeof(wowlan_config_cmd),
@@ -1156,15 +1171,25 @@
 	iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
 }
 
-static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
-					 struct ieee80211_vif *vif)
+struct iwl_mvm_wakeup_reason_iter_data {
+	struct iwl_mvm *mvm;
+	u32 wakeup_reasons;
+};
+
+static void iwl_mvm_d0i3_wakeup_reason_iter(void *_data, u8 *mac,
+					    struct ieee80211_vif *vif)
 {
-	struct iwl_mvm *mvm = data;
+	struct iwl_mvm_wakeup_reason_iter_data *data = _data;
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
 	if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
-	    mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
-		iwl_mvm_connection_loss(mvm, vif, "D0i3");
+	    data->mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) {
+		if (data->wakeup_reasons &
+		    IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
+			iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
+		else
+			ieee80211_beacon_loss(vif);
+	}
 }
 
 void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
@@ -1232,7 +1257,7 @@
 	};
 	struct iwl_wowlan_status *status;
 	int ret;
-	u32 disconnection_reasons, wakeup_reasons;
+	u32 handled_reasons, wakeup_reasons;
 	__le16 *qos_seq = NULL;
 
 	mutex_lock(&mvm->mutex);
@@ -1249,13 +1274,18 @@
 
 	IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
 
-	disconnection_reasons =
-		IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
-		IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
-	if (wakeup_reasons & disconnection_reasons)
+	handled_reasons = IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+				IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+	if (wakeup_reasons & handled_reasons) {
+		struct iwl_mvm_wakeup_reason_iter_data data = {
+			.mvm = mvm,
+			.wakeup_reasons = wakeup_reasons,
+		};
+
 		ieee80211_iterate_active_interfaces(
 			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
-			iwl_mvm_d0i3_disconnect_iter, mvm);
+			iwl_mvm_d0i3_wakeup_reason_iter, &data);
+	}
 out:
 	iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
 
@@ -1308,17 +1338,6 @@
 	return _iwl_mvm_exit_d0i3(mvm);
 }
 
-static void iwl_mvm_napi_add(struct iwl_op_mode *op_mode,
-			     struct napi_struct *napi,
-			     struct net_device *napi_dev,
-			     int (*poll)(struct napi_struct *, int),
-			     int weight)
-{
-	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
-
-	ieee80211_napi_add(mvm->hw, napi, napi_dev, poll, weight);
-}
-
 static const struct iwl_op_mode_ops iwl_mvm_ops = {
 	.start = iwl_op_mode_mvm_start,
 	.stop = iwl_op_mode_mvm_stop,
@@ -1332,5 +1351,4 @@
 	.nic_config = iwl_mvm_nic_config,
 	.enter_d0i3 = iwl_mvm_enter_d0i3,
 	.exit_d0i3 = iwl_mvm_exit_d0i3,
-	.napi_add = iwl_mvm_napi_add,
 };
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index d2c6ba9..4645877 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -112,11 +112,12 @@
 static
 void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
 					  struct ieee80211_vif *vif,
-					  struct iwl_beacon_filter_cmd *cmd)
+					  struct iwl_beacon_filter_cmd *cmd,
+					  bool d0i3)
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-	if (vif->bss_conf.cqm_rssi_thold) {
+	if (vif->bss_conf.cqm_rssi_thold && !d0i3) {
 		cmd->bf_energy_delta =
 			cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
 		/* fw uses an absolute value for this */
@@ -287,27 +288,6 @@
 	return true;
 }
 
-static int iwl_mvm_power_get_skip_over_dtim(int dtimper, int bi)
-{
-	int numerator;
-	int dtim_interval = dtimper * bi;
-
-	if (WARN_ON(!dtim_interval))
-		return 0;
-
-	if (dtimper == 1) {
-		if (bi > 100)
-			numerator = 408;
-		else
-			numerator = 510;
-	} else if (dtimper < 10) {
-		numerator = 612;
-	} else {
-		return 0;
-	}
-	return max(1, (numerator / dtim_interval));
-}
-
 static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
 {
 	struct ieee80211_chanctx_conf *chanctx_conf;
@@ -357,8 +337,8 @@
 
 	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
 
-	if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
-	    !mvmvif->pm_enabled)
+	if (!vif->bss_conf.ps || !mvmvif->pm_enabled ||
+	    (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p))
 		return;
 
 	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -377,11 +357,8 @@
 	if (!radar_detect && (dtimper < 10) &&
 	    (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
 	     mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
-		cmd->skip_dtim_periods =
-			iwl_mvm_power_get_skip_over_dtim(dtimper, bi);
-		if (cmd->skip_dtim_periods)
-			cmd->flags |=
-				cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+		cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+		cmd->skip_dtim_periods = 3;
 	}
 
 	if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
@@ -509,9 +486,8 @@
 		       ETH_ALEN);
 }
 
-int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd)
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
@@ -520,8 +496,6 @@
 	ieee80211_iterate_active_interfaces_atomic(
 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
 		iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
-
-	return 0;
 }
 
 struct iwl_power_vifs {
@@ -810,7 +784,7 @@
 	    vif->type != NL80211_IFTYPE_STATION || vif->p2p)
 		return 0;
 
-	iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
+	iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3);
 	if (!d0i3)
 		iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
 	ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index daff1d0..5ae9c8a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -177,7 +177,8 @@
 
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-	if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
+	if (IWL_MVM_RS_DISABLE_P2P_MIMO &&
+	    iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
 		return false;
 
 	if (mvm->nvm_data->sku_cap_mimo_disabled)
@@ -2403,7 +2404,7 @@
 	u8 rate_idx;
 };
 
-static const struct rs_init_rate_info rs_init_rates_24ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = {
 	{ -60, IWL_RATE_54M_INDEX },
 	{ -64, IWL_RATE_48M_INDEX },
 	{ -68, IWL_RATE_36M_INDEX },
@@ -2416,7 +2417,7 @@
 	{ S8_MIN, IWL_RATE_1M_INDEX },
 };
 
-static const struct rs_init_rate_info rs_init_rates_5ghz[] = {
+static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = {
 	{ -60, IWL_RATE_54M_INDEX },
 	{ -64, IWL_RATE_48M_INDEX },
 	{ -72, IWL_RATE_36M_INDEX },
@@ -2427,6 +2428,124 @@
 	{ S8_MIN, IWL_RATE_6M_INDEX },
 };
 
+static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
+	{ -60, IWL_RATE_MCS_7_INDEX },
+	{ -64, IWL_RATE_MCS_6_INDEX },
+	{ -68, IWL_RATE_MCS_5_INDEX },
+	{ -72, IWL_RATE_MCS_4_INDEX },
+	{ -80, IWL_RATE_MCS_3_INDEX },
+	{ -84, IWL_RATE_MCS_2_INDEX },
+	{ -85, IWL_RATE_MCS_1_INDEX },
+	{ S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
+	{ -60, IWL_RATE_MCS_8_INDEX },
+	{ -64, IWL_RATE_MCS_7_INDEX },
+	{ -68, IWL_RATE_MCS_6_INDEX },
+	{ -72, IWL_RATE_MCS_5_INDEX },
+	{ -80, IWL_RATE_MCS_4_INDEX },
+	{ -84, IWL_RATE_MCS_3_INDEX },
+	{ -85, IWL_RATE_MCS_2_INDEX },
+	{ -87, IWL_RATE_MCS_1_INDEX },
+	{ S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = {
+	{ -60, IWL_RATE_MCS_9_INDEX },
+	{ -64, IWL_RATE_MCS_8_INDEX },
+	{ -68, IWL_RATE_MCS_7_INDEX },
+	{ -72, IWL_RATE_MCS_6_INDEX },
+	{ -80, IWL_RATE_MCS_5_INDEX },
+	{ -84, IWL_RATE_MCS_4_INDEX },
+	{ -85, IWL_RATE_MCS_3_INDEX },
+	{ -87, IWL_RATE_MCS_2_INDEX },
+	{ -88, IWL_RATE_MCS_1_INDEX },
+	{ S8_MIN, IWL_RATE_MCS_0_INDEX },
+};
+
+/* Init the optimal rate based on STA caps
+ * This combined with rssi is used to report the last tx rate
+ * to userspace when we haven't transmitted enough frames.
+ */
+static void rs_init_optimal_rate(struct iwl_mvm *mvm,
+				 struct ieee80211_sta *sta,
+				 struct iwl_lq_sta *lq_sta)
+{
+	struct rs_rate *rate = &lq_sta->optimal_rate;
+
+	if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID)
+		rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+	else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
+		rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+	else if (lq_sta->band == IEEE80211_BAND_5GHZ)
+		rate->type = LQ_LEGACY_A;
+	else
+		rate->type = LQ_LEGACY_G;
+
+	rate->bw = rs_bw_from_sta_bw(sta);
+	rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL);
+
+	/* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */
+
+	if (is_mimo(rate)) {
+		lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate;
+	} else if (is_siso(rate)) {
+		lq_sta->optimal_rate_mask = lq_sta->active_siso_rate;
+	} else {
+		lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
+
+		if (lq_sta->band == IEEE80211_BAND_5GHZ) {
+			lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
+			lq_sta->optimal_nentries =
+				ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
+		} else {
+			lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy;
+			lq_sta->optimal_nentries =
+				ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
+		}
+	}
+
+	if (is_vht(rate)) {
+		if (rate->bw == RATE_MCS_CHAN_WIDTH_20) {
+			lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz;
+			lq_sta->optimal_nentries =
+				ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
+		} else {
+			lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz;
+			lq_sta->optimal_nentries =
+				ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz);
+		}
+	} else if (is_ht(rate)) {
+		lq_sta->optimal_rates = rs_optimal_rates_ht;
+		lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht);
+	}
+}
+
+/* Compute the optimal rate index based on RSSI */
+static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
+					   struct iwl_lq_sta *lq_sta)
+{
+	struct rs_rate *rate = &lq_sta->optimal_rate;
+	int i;
+
+	rate->index = find_first_bit(&lq_sta->optimal_rate_mask,
+				     BITS_PER_LONG);
+
+	for (i = 0; i < lq_sta->optimal_nentries; i++) {
+		int rate_idx = lq_sta->optimal_rates[i].rate_idx;
+
+		if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) &&
+		    (BIT(rate_idx) & lq_sta->optimal_rate_mask)) {
+			rate->index = rate_idx;
+			break;
+		}
+	}
+
+	rs_dump_rate(mvm, rate, "OPTIMAL RATE");
+	return rate;
+}
+
 /* Choose an initial legacy rate and antenna to use based on the RSSI
  * of last Rx
  */
@@ -2468,12 +2587,12 @@
 
 	if (band == IEEE80211_BAND_5GHZ) {
 		rate->type = LQ_LEGACY_A;
-		initial_rates = rs_init_rates_5ghz;
-		nentries = ARRAY_SIZE(rs_init_rates_5ghz);
+		initial_rates = rs_optimal_rates_5ghz_legacy;
+		nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
 	} else {
 		rate->type = LQ_LEGACY_G;
-		initial_rates = rs_init_rates_24ghz;
-		nentries = ARRAY_SIZE(rs_init_rates_24ghz);
+		initial_rates = rs_optimal_rates_24ghz_legacy;
+		nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
 	}
 
 	if (IWL_MVM_RS_RSSI_BASED_INIT_RATE) {
@@ -2496,10 +2615,21 @@
 			 struct iwl_lq_sta *lq_sta,
 			 struct ieee80211_rx_status *rx_status)
 {
+	int i;
+
 	lq_sta->pers.chains = rx_status->chains;
 	lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0];
 	lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1];
 	lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2];
+	lq_sta->pers.last_rssi = S8_MIN;
+
+	for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
+		if (!(lq_sta->pers.chains & BIT(i)))
+			continue;
+
+		if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi)
+			lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i];
+	}
 }
 
 /**
@@ -2538,6 +2668,7 @@
 	rate = &tbl->rate;
 
 	rs_get_initial_rate(mvm, lq_sta, band, rate);
+	rs_init_optimal_rate(mvm, sta, lq_sta);
 
 	WARN_ON_ONCE(rate->ant != ANT_A && rate->ant != ANT_B);
 	if (rate->ant == ANT_A)
@@ -2560,6 +2691,8 @@
 	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct iwl_lq_sta *lq_sta = mvm_sta;
+	struct rs_rate *optimal_rate;
+	u32 last_ucode_rate;
 
 	if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
 		/* if vif isn't initialized mvm doesn't know about
@@ -2583,8 +2716,18 @@
 
 	iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
 				  info->band, &info->control.rates[0]);
-
 	info->control.rates[0].count = 1;
+
+	/* Report the optimal rate based on rssi and STA caps if we haven't
+	 * converged yet (too little traffic) or exploring other modulations
+	 */
+	if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) {
+		optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
+		last_ucode_rate = ucode_rate_from_rs_rate(mvm,
+							  optimal_rate);
+		iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
+					  &txrc->reported_rate);
+	}
 }
 
 static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
@@ -2605,6 +2748,7 @@
 #endif
 	lq_sta->pers.chains = 0;
 	memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+	lq_sta->pers.last_rssi = S8_MIN;
 
 	return &sta_priv->lq_sta;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 2a3da31..81314ad 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -1,6 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -316,6 +317,14 @@
 	u8 max_siso_rate_idx;
 	u8 max_mimo2_rate_idx;
 
+	/* Optimal rate based on RSSI and STA caps.
+	 * Used only to reflect link speed to userspace.
+	 */
+	struct rs_rate optimal_rate;
+	unsigned long optimal_rate_mask;
+	const struct rs_init_rate_info *optimal_rates;
+	int optimal_nentries;
+
 	u8 missed_rate_counter;
 
 	struct iwl_lq_cmd lq;
@@ -341,6 +350,7 @@
 #endif
 		u8 chains;
 		s8 chain_signal[IEEE80211_MAX_CHAINS];
+		s8 last_rssi;
 		struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
 		struct iwl_mvm *drv;
 	} pers;
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 8f1d93b..c37c10a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -61,6 +61,7 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *****************************************************************************/
+#include <linux/skbuff.h>
 #include "iwl-trans.h"
 #include "mvm.h"
 #include "fw-api.h"
@@ -71,8 +72,7 @@
  * Copies the phy information in mvm->last_phy_info, it will be used when the
  * actual data will come from the fw in the next packet.
  */
-int iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 
@@ -86,8 +86,6 @@
 		spin_unlock(&mvm->drv_stats_lock);
 	}
 #endif
-
-	return 0;
 }
 
 /*
@@ -96,6 +94,7 @@
  * Adds the rxb to a new skb and give it to mac80211
  */
 static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+					    struct napi_struct *napi,
 					    struct sk_buff *skb,
 					    struct ieee80211_hdr *hdr, u16 len,
 					    u32 ampdu_status, u8 crypt_len,
@@ -129,7 +128,7 @@
 				fraglen, rxb->truesize);
 	}
 
-	ieee80211_rx(mvm->hw, skb);
+	ieee80211_rx_napi(mvm->hw, skb, napi);
 }
 
 /*
@@ -237,13 +236,26 @@
 	return 0;
 }
 
+static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+			    struct sk_buff *skb,
+			    u32 status)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+	if (mvmvif->features & NETIF_F_RXCSUM &&
+	    status & RX_MPDU_RES_STATUS_CSUM_DONE &&
+	    status & RX_MPDU_RES_STATUS_CSUM_OK)
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
 /*
  * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
  *
  * Handles the actual data of the Rx packet from the fw
  */
-int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-		       struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+			struct iwl_rx_cmd_buffer *rxb)
 {
 	struct ieee80211_hdr *hdr;
 	struct ieee80211_rx_status *rx_status;
@@ -271,7 +283,7 @@
 	skb = alloc_skb(128, GFP_ATOMIC);
 	if (!skb) {
 		IWL_ERR(mvm, "alloc_skb failed\n");
-		return 0;
+		return;
 	}
 
 	rx_status = IEEE80211_SKB_RXCB(skb);
@@ -284,14 +296,14 @@
 		IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
 			       rx_pkt_status);
 		kfree_skb(skb);
-		return 0;
+		return;
 	}
 
 	if ((unlikely(phy_info->cfg_phy_cnt > 20))) {
 		IWL_DEBUG_DROP(mvm, "dsp size out of range [0,20]: %d\n",
 			       phy_info->cfg_phy_cnt);
 		kfree_skb(skb);
-		return 0;
+		return;
 	}
 
 	/*
@@ -366,6 +378,9 @@
 		}
 	}
 
+	if (sta && ieee80211_is_data(hdr->frame_control))
+		iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
+
 	rcu_read_unlock();
 
 	/* set the preamble flag if appropriate */
@@ -429,9 +444,8 @@
 	iwl_mvm_update_frame_stats(mvm, rate_n_flags,
 				   rx_status->flag & RX_FLAG_AMPDU_DETAILS);
 #endif
-	iwl_mvm_pass_packet_to_mac80211(mvm, skb, hdr, len, ampdu_status,
+	iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
 					crypt_len, rxb);
-	return 0;
 }
 
 static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
@@ -623,10 +637,7 @@
 		iwl_rx_packet_payload_len(pkt));
 }
 
-int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
-			  struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
-	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5514ad6..56559d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -72,10 +72,60 @@
 #define IWL_DENSE_EBS_SCAN_RATIO 5
 #define IWL_SPARSE_EBS_SCAN_RATIO 1
 
-struct iwl_mvm_scan_params {
-	u32 max_out_time;
+enum iwl_mvm_scan_type {
+	IWL_SCAN_TYPE_UNASSOC,
+	IWL_SCAN_TYPE_WILD,
+	IWL_SCAN_TYPE_MILD,
+	IWL_SCAN_TYPE_FRAGMENTED,
+};
+
+enum iwl_mvm_traffic_load {
+	IWL_MVM_TRAFFIC_LOW,
+	IWL_MVM_TRAFFIC_MEDIUM,
+	IWL_MVM_TRAFFIC_HIGH,
+};
+
+struct iwl_mvm_scan_timing_params {
+	u32 dwell_active;
+	u32 dwell_passive;
+	u32 dwell_fragmented;
 	u32 suspend_time;
-	bool passive_fragmented;
+	u32 max_out_time;
+};
+
+static struct iwl_mvm_scan_timing_params scan_timing[] = {
+	[IWL_SCAN_TYPE_UNASSOC] = {
+		.dwell_active = 10,
+		.dwell_passive = 110,
+		.dwell_fragmented = 44,
+		.suspend_time = 0,
+		.max_out_time = 0,
+	},
+	[IWL_SCAN_TYPE_WILD] = {
+		.dwell_active = 10,
+		.dwell_passive = 110,
+		.dwell_fragmented = 44,
+		.suspend_time = 30,
+		.max_out_time = 120,
+	},
+	[IWL_SCAN_TYPE_MILD] = {
+		.dwell_active = 10,
+		.dwell_passive = 110,
+		.dwell_fragmented = 44,
+		.suspend_time = 120,
+		.max_out_time = 120,
+	},
+	[IWL_SCAN_TYPE_FRAGMENTED] = {
+		.dwell_active = 10,
+		.dwell_passive = 110,
+		.dwell_fragmented = 44,
+		.suspend_time = 95,
+		.max_out_time = 44,
+	},
+};
+
+struct iwl_mvm_scan_params {
+	enum iwl_mvm_scan_type type;
 	u32 n_channels;
 	u16 delay;
 	int n_ssids;
@@ -90,15 +140,7 @@
 	int n_match_sets;
 	struct iwl_scan_probe_req preq;
 	struct cfg80211_match_set *match_sets;
-	struct _dwell {
-		u16 passive;
-		u16 active;
-		u16 fragmented;
-	} dwell[IEEE80211_NUM_BANDS];
-	struct {
-		u8 iterations;
-		u8 full_scan_mul; /* not used for UMAC */
-	} schedule[2];
+	u8 iterations[2];
 };
 
 static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
@@ -147,34 +189,6 @@
 		return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
 }
 
-/*
- * If req->n_ssids > 0, it means we should do an active scan.
- * In case of active scan w/o directed scan, we receive a zero-length SSID
- * just to notify that this scan is active and not passive.
- * In order to notify the FW of the number of SSIDs we wish to scan (including
- * the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first). If the first SSID is
- * already included in the probe template, so we need to set only
- * req->n_ssids - 1 bits in addition to the first bit.
- */
-static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
-				    enum ieee80211_band band, int n_ssids)
-{
-	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
-		return 10;
-	if (band == IEEE80211_BAND_2GHZ)
-		return 20  + 3 * (n_ssids + 1);
-	return 10  + 2 * (n_ssids + 1);
-}
-
-static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
-				     enum ieee80211_band band)
-{
-	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
-			return 110;
-	return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
-}
-
 static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
 					    struct ieee80211_vif *vif)
 {
@@ -186,90 +200,39 @@
 		*global_cnt += 1;
 }
 
-static void iwl_mvm_scan_calc_dwell(struct iwl_mvm *mvm,
-				    struct ieee80211_vif *vif,
-				    struct iwl_mvm_scan_params *params)
+static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
+{
+	return IWL_MVM_TRAFFIC_LOW;
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
+					struct ieee80211_vif *vif,
+					struct iwl_mvm_scan_params *params)
 {
 	int global_cnt = 0;
-	enum ieee80211_band band;
-	u8 frag_passive_dwell = 0;
+	enum iwl_mvm_traffic_load load;
+	bool low_latency;
 
 	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
 					    IEEE80211_IFACE_ITER_NORMAL,
 					    iwl_mvm_scan_condition_iterator,
 					    &global_cnt);
 	if (!global_cnt)
-		goto not_bound;
+		return IWL_SCAN_TYPE_UNASSOC;
 
-	params->suspend_time = 30;
-	params->max_out_time = 120;
+	load = iwl_mvm_get_traffic_load(mvm);
+	low_latency = iwl_mvm_low_latency(mvm);
 
-	if (iwl_mvm_low_latency(mvm)) {
-		if (fw_has_api(&mvm->fw->ucode_capa,
-			       IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
+	if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
+	    vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
+		return IWL_SCAN_TYPE_FRAGMENTED;
 
-			params->suspend_time = 105;
-			/*
-			 * If there is more than one active interface make
-			 * passive scan more fragmented.
-			 */
-			frag_passive_dwell = 40;
-			params->max_out_time = frag_passive_dwell;
-		} else {
-			params->suspend_time = 120;
-			params->max_out_time = 120;
-		}
-	}
+	if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
+		return IWL_SCAN_TYPE_MILD;
 
-	if (frag_passive_dwell &&
-	    fw_has_api(&mvm->fw->ucode_capa,
-		       IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
-		/*
-		 * P2P device scan should not be fragmented to avoid negative
-		 * impact on P2P device discovery. Configure max_out_time to be
-		 * equal to dwell time on passive channel. Take a longest
-		 * possible value, one that corresponds to 2GHz band
-		 */
-		if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-			u32 passive_dwell =
-				iwl_mvm_get_passive_dwell(mvm,
-							  IEEE80211_BAND_2GHZ);
-			params->max_out_time = passive_dwell;
-		} else {
-			params->passive_fragmented = true;
-		}
-	}
-
-	if ((params->flags & NL80211_SCAN_FLAG_LOW_PRIORITY) &&
-	    (params->max_out_time > 200))
-		params->max_out_time = 200;
-
-not_bound:
-
-	for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-		if (params->passive_fragmented)
-			params->dwell[band].fragmented = frag_passive_dwell;
-
-		params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
-									band);
-		params->dwell[band].active =
-			iwl_mvm_get_active_dwell(mvm, band, params->n_ssids);
-	}
-
-	IWL_DEBUG_SCAN(mvm,
-		       "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
-		       params->max_out_time, params->suspend_time,
-		       params->passive_fragmented);
-	IWL_DEBUG_SCAN(mvm,
-		       "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n",
-		       params->dwell[IEEE80211_BAND_2GHZ].passive,
-		       params->dwell[IEEE80211_BAND_2GHZ].active,
-		       params->dwell[IEEE80211_BAND_2GHZ].fragmented);
-	IWL_DEBUG_SCAN(mvm,
-		       "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n",
-		       params->dwell[IEEE80211_BAND_5GHZ].passive,
-		       params->dwell[IEEE80211_BAND_5GHZ].active,
-		       params->dwell[IEEE80211_BAND_5GHZ].fragmented);
+	return IWL_SCAN_TYPE_WILD;
 }
 
 static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
@@ -327,9 +290,8 @@
 	return buf;
 }
 
-int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
@@ -341,17 +303,13 @@
 		       iwl_mvm_dump_channel_list(notif->results,
 						 notif->scanned_channels, buf,
 						 sizeof(buf)));
-	return 0;
 }
 
-int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb)
 {
 	IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
 	ieee80211_sched_scan_results(mvm->hw);
-
-	return 0;
 }
 
 static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
@@ -368,9 +326,8 @@
 	}
 }
 
-int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
-					struct iwl_rx_cmd_buffer *rxb,
-					struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+					 struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
@@ -395,6 +352,11 @@
 		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
 			       aborted ? "aborted" : "completed",
 			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+		IWL_DEBUG_SCAN(mvm,
+			       "Last line %d, Last iteration %d, Time after last iteration %d\n",
+			       scan_notif->last_schedule_line,
+			       scan_notif->last_schedule_iteration,
+			       __le32_to_cpu(scan_notif->time_after_last_iter));
 
 		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
 	} else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
@@ -406,9 +368,14 @@
 	} else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
 		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
 
-		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s (FW)\n",
+		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
 			       aborted ? "aborted" : "completed",
 			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+		IWL_DEBUG_SCAN(mvm,
+			       "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
+			       scan_notif->last_schedule_line,
+			       scan_notif->last_schedule_iteration,
+			       __le32_to_cpu(scan_notif->time_after_last_iter));
 
 		mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
 		ieee80211_sched_scan_stopped(mvm->hw);
@@ -426,8 +393,6 @@
 	mvm->last_ebs_successful =
 			scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
 			scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
-
-	return 0;
 }
 
 static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
@@ -751,13 +716,11 @@
 				    struct iwl_scan_req_lmac *cmd,
 				    struct iwl_mvm_scan_params *params)
 {
-	cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
-	cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
-	if (params->passive_fragmented)
-		cmd->fragmented_dwell =
-				params->dwell[IEEE80211_BAND_2GHZ].fragmented;
-	cmd->max_out_time = cpu_to_le32(params->max_out_time);
-	cmd->suspend_time = cpu_to_le32(params->suspend_time);
+	cmd->active_dwell = scan_timing[params->type].dwell_active;
+	cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+	cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+	cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+	cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
 	cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 }
 
@@ -794,7 +757,7 @@
 
 static int iwl_mvm_scan_total_iterations(struct iwl_mvm_scan_params *params)
 {
-	return params->schedule[0].iterations + params->schedule[1].iterations;
+	return params->iterations[0] + params->iterations[1];
 }
 
 static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
@@ -808,7 +771,7 @@
 	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
 
-	if (params->passive_fragmented)
+	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
 		flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 
 	if (iwl_mvm_rrm_scan_needed(mvm))
@@ -861,11 +824,11 @@
 	ssid_bitmap <<= 1;
 
 	cmd->schedule[0].delay = cpu_to_le16(params->interval);
-	cmd->schedule[0].iterations = params->schedule[0].iterations;
-	cmd->schedule[0].full_scan_mul = params->schedule[0].full_scan_mul;
+	cmd->schedule[0].iterations = params->iterations[0];
+	cmd->schedule[0].full_scan_mul = 1;
 	cmd->schedule[1].delay = cpu_to_le16(params->interval);
-	cmd->schedule[1].iterations = params->schedule[1].iterations;
-	cmd->schedule[1].full_scan_mul = params->schedule[1].iterations;
+	cmd->schedule[1].iterations = params->iterations[1];
+	cmd->schedule[1].full_scan_mul = 1;
 
 	if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) {
 		cmd->channel_opt[0].flags =
@@ -937,9 +900,9 @@
 	int num_channels =
 		mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
 		mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
-	int ret, i, j = 0, cmd_size, data_size;
+	int ret, i, j = 0, cmd_size;
 	struct iwl_host_cmd cmd = {
-		.id = SCAN_CFG_CMD,
+		.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
 	};
 
 	if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
@@ -951,8 +914,6 @@
 	if (!scan_config)
 		return -ENOMEM;
 
-	data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
-	scan_config->hdr.size = cpu_to_le16(data_size);
 	scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
 					 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
 					 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
@@ -1013,13 +974,11 @@
 				    struct iwl_scan_req_umac *cmd,
 				    struct iwl_mvm_scan_params *params)
 {
-	cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
-	cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
-	if (params->passive_fragmented)
-		cmd->fragmented_dwell =
-				params->dwell[IEEE80211_BAND_2GHZ].fragmented;
-	cmd->max_out_time = cpu_to_le32(params->max_out_time);
-	cmd->suspend_time = cpu_to_le32(params->suspend_time);
+	cmd->active_dwell = scan_timing[params->type].dwell_active;
+	cmd->passive_dwell = scan_timing[params->type].dwell_passive;
+	cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
+	cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+	cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
 	cmd->scan_priority =
 		iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 
@@ -1059,7 +1018,7 @@
 	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
 
-	if (params->passive_fragmented)
+	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
 		flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
 
 	if (iwl_mvm_rrm_scan_needed(mvm))
@@ -1099,8 +1058,6 @@
 		return uid;
 
 	memset(cmd, 0, ksize(cmd));
-	cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
-				    sizeof(struct iwl_mvm_umac_cmd_hdr));
 
 	iwl_mvm_scan_umac_dwell(mvm, cmd, params);
 
@@ -1230,17 +1187,15 @@
 	params.n_match_sets = 0;
 	params.match_sets = NULL;
 
-	params.schedule[0].iterations = 1;
-	params.schedule[0].full_scan_mul = 0;
-	params.schedule[1].iterations = 0;
-	params.schedule[1].full_scan_mul = 0;
+	params.iterations[0] = 1;
+	params.iterations[1] = 0;
 
-	iwl_mvm_scan_calc_dwell(mvm, vif, &params);
+	params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
 	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-		hcmd.id = SCAN_REQ_UMAC;
+		hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
 		ret = iwl_mvm_scan_umac(mvm, vif, &params,
 					IWL_MVM_SCAN_REGULAR);
 	} else {
@@ -1313,10 +1268,10 @@
 	params.n_match_sets = req->n_match_sets;
 	params.match_sets = req->match_sets;
 
-	params.schedule[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS;
-	params.schedule[0].full_scan_mul = 1;
-	params.schedule[1].iterations = 0xff;
-	params.schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+	params.iterations[0] = 0;
+	params.iterations[1] = 0xff;
+
+	params.type = iwl_mvm_get_scan_type(mvm, vif, &params);
 
 	if (req->interval > U16_MAX) {
 		IWL_DEBUG_SCAN(mvm,
@@ -1339,8 +1294,6 @@
 		params.delay = req->delay;
 	}
 
-	iwl_mvm_scan_calc_dwell(mvm, vif, &params);
-
 	ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
 	if (ret)
 		return ret;
@@ -1348,7 +1301,7 @@
 	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-		hcmd.id = SCAN_REQ_UMAC;
+		hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
 		ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
 	} else {
 		hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
@@ -1374,9 +1327,8 @@
 	return ret;
 }
 
-int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
-					struct iwl_rx_cmd_buffer *rxb,
-					struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+					 struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_umac_scan_complete *notif = (void *)pkt->data;
@@ -1384,7 +1336,7 @@
 	bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 
 	if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
-		return 0;
+		return;
 
 	/* if the scan is already stopping, we don't need to notify mac80211 */
 	if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
@@ -1395,26 +1347,26 @@
 	}
 
 	mvm->scan_status &= ~mvm->scan_uid_status[uid];
-
 	IWL_DEBUG_SCAN(mvm,
 		       "Scan completed, uid %u type %u, status %s, EBS status %s\n",
 		       uid, mvm->scan_uid_status[uid],
 		       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
 				"completed" : "aborted",
 		       iwl_mvm_ebs_status_str(notif->ebs_status));
+	IWL_DEBUG_SCAN(mvm,
+		       "Last line %d, Last iteration %d, Time from last iteration %d\n",
+		       notif->last_schedule, notif->last_iter,
+		       __le32_to_cpu(notif->time_from_last_iter));
 
 	if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
 	    notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
 		mvm->last_ebs_successful = false;
 
 	mvm->scan_uid_status[uid] = 0;
-
-	return 0;
 }
 
-int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-					     struct iwl_rx_cmd_buffer *rxb,
-					     struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
@@ -1426,15 +1378,11 @@
 		       iwl_mvm_dump_channel_list(notif->results,
 						 notif->scanned_channels, buf,
 						 sizeof(buf)));
-	return 0;
 }
 
 static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 {
-	struct iwl_umac_scan_abort cmd = {
-		.hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
-					sizeof(struct iwl_mvm_umac_cmd_hdr)),
-	};
+	struct iwl_umac_scan_abort cmd = {};
 	int uid, ret;
 
 	lockdep_assert_held(&mvm->mutex);
@@ -1451,7 +1399,10 @@
 
 	IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
 
-	ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+	ret = iwl_mvm_send_cmd_pdu(mvm,
+				   iwl_cmd_id(SCAN_ABORT_UMAC,
+					      IWL_ALWAYS_LONG_GROUP, 0),
+				   0, sizeof(cmd), &cmd);
 	if (!ret)
 		mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
 
@@ -1461,7 +1412,7 @@
 static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
 {
 	struct iwl_notification_wait wait_scan_done;
-	static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+	static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
 					      SCAN_OFFLOAD_COMPLETE, };
 	int ret;
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 26f076e..df216cd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1148,18 +1148,31 @@
 
 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
 {
-	int i;
+	int i, max = -1, max_offs = -1;
 
 	lockdep_assert_held(&mvm->mutex);
 
-	i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM);
+	/* Pick the unused key offset with the highest 'deleted'
+	 * counter. Every time a key is deleted, all the counters
+	 * are incremented and the one that was just deleted is
+	 * reset to zero. Thus, the highest counter is the one
+	 * that was deleted longest ago. Pick that one.
+	 */
+	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+		if (test_bit(i, mvm->fw_key_table))
+			continue;
+		if (mvm->fw_key_deleted[i] > max) {
+			max = mvm->fw_key_deleted[i];
+			max_offs = i;
+		}
+	}
 
-	if (i == STA_KEY_MAX_NUM)
+	if (max_offs < 0)
 		return STA_KEY_IDX_INVALID;
 
-	__set_bit(i, mvm->fw_key_table);
+	__set_bit(max_offs, mvm->fw_key_table);
 
-	return i;
+	return max_offs;
 }
 
 static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
@@ -1277,8 +1290,6 @@
 		const u8 *pn;
 
 		memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
-		ieee80211_aes_cmac_calculate_k1_k2(keyconf,
-						   igtk_cmd.K1, igtk_cmd.K2);
 		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
 		pn = seq.aes_cmac.pn;
 		igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
@@ -1479,7 +1490,7 @@
 {
 	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
 	u8 sta_id;
-	int ret;
+	int ret, i;
 
 	lockdep_assert_held(&mvm->mutex);
 
@@ -1498,6 +1509,13 @@
 		return -ENOENT;
 	}
 
+	/* track which key was deleted last */
+	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+		if (mvm->fw_key_deleted[i] < U8_MAX)
+			mvm->fw_key_deleted[i]++;
+	}
+	mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
+
 	if (sta_id == IWL_MVM_STATION_COUNT) {
 		IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
 		return 0;
@@ -1661,9 +1679,8 @@
 		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
 
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
-			  struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+			   struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
@@ -1671,15 +1688,13 @@
 	u32 sta_id = le32_to_cpu(notif->sta_id);
 
 	if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
-		return 0;
+		return;
 
 	rcu_read_lock();
 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
 	if (!IS_ERR_OR_NULL(sta))
 		ieee80211_sta_eosp(sta);
 	rcu_read_unlock();
-
-	return 0;
 }
 
 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 748f5dc..eedb215 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -378,9 +378,8 @@
 			     struct ieee80211_sta *sta, u32 iv32,
 			     u16 *phase1key);
 
-int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
-			  struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+			   struct iwl_rx_cmd_buffer *rxb);
 
 /* AMPDU */
 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/mvm/tdls.c b/drivers/net/wireless/iwlwifi/mvm/tdls.c
index a87b506..fe2fa56 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tdls.c
@@ -169,18 +169,11 @@
 		return;
 
 	pkt = cmd.resp_pkt;
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		IWL_ERR(mvm, "Bad return from TDLS_CONFIG_COMMAND (0x%08X)\n",
-			pkt->hdr.flags);
-		goto exit;
-	}
 
-	if (WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp)))
-		goto exit;
+	WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
 
 	/* we don't really care about the response at this point */
 
-exit:
 	iwl_free_resp(&cmd);
 }
 
@@ -261,8 +254,7 @@
 		mvm->tdls_cs.cur_sta_id = IWL_MVM_STATION_COUNT;
 }
 
-int iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
@@ -277,17 +269,17 @@
 	/* can fail sometimes */
 	if (!le32_to_cpu(notif->status)) {
 		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
-		goto out;
+		return;
 	}
 
 	if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
-		goto out;
+		return;
 
 	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
 					lockdep_is_held(&mvm->mutex));
 	/* the station may not be here, but if it is, it must be a TDLS peer */
 	if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
-		goto out;
+		return;
 
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	vif = mvmsta->vif;
@@ -301,9 +293,6 @@
 			 msecs_to_jiffies(delay));
 
 	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
-
-out:
-	return 0;
 }
 
 static int
@@ -471,13 +460,19 @@
 	cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
 
 	info = IEEE80211_SKB_CB(skb);
-	if (info->control.hw_key)
-		iwl_mvm_set_tx_cmd_crypto(mvm, info, &cmd.frame.tx_cmd, skb);
+	hdr = (void *)skb->data;
+	if (info->control.hw_key) {
+		if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
+			rcu_read_unlock();
+			ret = -EINVAL;
+			goto out;
+		}
+		iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
+	}
 
 	iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
 			   mvmsta->sta_id);
 
-	hdr = (void *)skb->data;
 	iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
 				hdr->frame_control);
 	rcu_read_unlock();
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index e472729..dbd7d54 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -410,9 +410,8 @@
 /*
  * The Rx handler for time event notifications
  */
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_time_event_notif *notif = (void *)pkt->data;
@@ -433,8 +432,6 @@
 	}
 unlock:
 	spin_unlock_bh(&mvm->time_event_lock);
-
-	return 0;
 }
 
 static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
@@ -503,7 +500,7 @@
 				       struct iwl_mvm_time_event_data *te_data,
 				       struct iwl_time_event_cmd *te_cmd)
 {
-	static const u8 time_event_response[] = { TIME_EVENT_CMD };
+	static const u16 time_event_response[] = { TIME_EVENT_CMD };
 	struct iwl_notification_wait wait_time_event;
 	int ret;
 
@@ -566,7 +563,7 @@
 {
 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-	const u8 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
+	const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
 	struct iwl_notification_wait wait_te_notif;
 	struct iwl_time_event_cmd time_cmd = {};
 
@@ -599,8 +596,7 @@
 		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
 	time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
 
-	time_cmd.apply_time =
-		cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
+	time_cmd.apply_time = cpu_to_le32(0);
 
 	time_cmd.max_frags = TE_V2_FRAG_NONE;
 	time_cmd.max_delay = cpu_to_le32(max_delay);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index de4fbc6..cbdf8e5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -157,9 +157,8 @@
 /*
  * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
  */
-int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
-				struct iwl_rx_cmd_buffer *rxb,
-				struct iwl_device_cmd *cmd);
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb);
 
 /**
  * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.c b/drivers/net/wireless/iwlwifi/mvm/tof.c
new file mode 100644
index 0000000..380972f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/tof.c
@@ -0,0 +1,304 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "fw-api-tof.h"
+
+#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return;
+
+	memset(tof_data, 0, sizeof(*tof_data));
+
+	tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (IWL_MVM_TOF_IS_RESPONDER) {
+		tof_data->responder_cfg.sub_grp_cmd_id =
+			cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
+		tof_data->responder_cfg.sta_id = IWL_MVM_STATION_COUNT;
+	}
+#endif
+
+	tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
+	tof_data->range_req.req_timeout = 1;
+	tof_data->range_req.initiator = 1;
+	tof_data->range_req.report_policy = 3;
+
+	tof_data->range_req_ext.sub_grp_cmd_id =
+		cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
+
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return;
+
+	memset(tof_data, 0, sizeof(*tof_data));
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+}
+
+static void iwl_tof_iterator(void *_data, u8 *mac,
+			     struct ieee80211_vif *vif)
+{
+	bool *enabled = _data;
+
+	/* non bss vif exists */
+	if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION)
+		*enabled = false;
+}
+
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
+{
+	struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
+	bool enabled;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   iwl_tof_iterator, &enabled);
+	if (!enabled) {
+		IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
+		return -EINVAL;
+	}
+
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(*cmd), cmd);
+}
+
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
+{
+	struct iwl_tof_range_abort_cmd cmd = {
+		.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
+		.request_id = id,
+	};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	if (id != mvm->tof_data.active_range_request) {
+		IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
+			id, mvm->tof_data.active_range_request);
+		return -EINVAL;
+	}
+
+	/* after abort is sent there's no active request anymore */
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+			      struct ieee80211_vif *vif)
+{
+	struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	if (vif->p2p || vif->type != NL80211_IFTYPE_AP) {
+		IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
+		return -EIO;
+	}
+
+	cmd->sta_id = mvmvif->bcast_sta.sta_id;
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(*cmd), cmd);
+}
+#endif
+
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+				  struct ieee80211_vif *vif)
+{
+	struct iwl_host_cmd cmd = {
+		.id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+		.len = { sizeof(mvm->tof_data.range_req), },
+		/* no copy because of the command size */
+		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
+	};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION) {
+		IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
+		return -EIO;
+	}
+
+	/* nesting of range requests is not supported in FW */
+	if (mvm->tof_data.active_range_request !=
+		IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
+		IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
+			mvm->tof_data.active_range_request);
+		return -EIO;
+	}
+
+	mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
+
+	cmd.data[0] = &mvm->tof_data.range_req;
+	return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+				      struct ieee80211_vif *vif)
+{
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION) {
+		IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
+		return -EIO;
+	}
+
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(mvm->tof_data.range_req_ext),
+				    &mvm->tof_data.range_req_ext);
+}
+
+static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
+{
+	struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
+
+	if (resp->request_id != mvm->tof_data.active_range_request) {
+		IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
+			resp->request_id, mvm->tof_data.active_range_request);
+		return -EIO;
+	}
+
+	memcpy(&mvm->tof_data.range_resp, resp,
+	       sizeof(struct iwl_tof_range_rsp_ntfy));
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+	return 0;
+}
+
+static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
+{
+	struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
+
+	IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
+	return 0;
+}
+
+static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
+{
+	struct iwl_tof_neighbor_report *report =
+		(struct iwl_tof_neighbor_report *)data;
+
+	IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
+		       report->bssid, report->request_token, report->status);
+	return 0;
+}
+
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
+	case TOF_RANGE_RESPONSE_NOTIF:
+		iwl_mvm_tof_range_resp(mvm, resp->data);
+		break;
+	case TOF_MCSI_DEBUG_NOTIF:
+		iwl_mvm_tof_mcsi_notif(mvm, resp->data);
+		break;
+	case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
+		iwl_mvm_tof_nb_report_notif(mvm, resp->data);
+		break;
+	default:
+	       IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
+		       resp->sub_grp_cmd_id);
+	       break;
+	}
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tof.h b/drivers/net/wireless/iwlwifi/mvm/tof.h
new file mode 100644
index 0000000..50ae8ad
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/tof.h
@@ -0,0 +1,94 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __tof
+#define __tof_h__
+
+#include "fw-api-tof.h"
+
+struct iwl_mvm_tof_data {
+	struct iwl_tof_config_cmd tof_cfg;
+	struct iwl_tof_range_req_cmd range_req;
+	struct iwl_tof_range_req_ext_cmd range_req_ext;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	struct iwl_tof_responder_config_cmd responder_cfg;
+#endif
+	struct iwl_tof_range_rsp_ntfy range_resp;
+	u8 last_abort_id;
+	u16 active_range_request;
+};
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm);
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+				  struct ieee80211_vif *vif);
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+				      struct ieee80211_vif *vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+			      struct ieee80211_vif *vif);
+#endif
+#endif /* __tof_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 80d07db..fe7145c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -33,6 +33,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -154,24 +155,20 @@
 	return true;
 }
 
-int iwl_mvm_temp_notif(struct iwl_mvm *mvm,
-		       struct iwl_rx_cmd_buffer *rxb,
-		       struct iwl_device_cmd *cmd)
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	int temp;
 
 	/* the notification is handled synchronously in ctkill, so skip here */
 	if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
-		return 0;
+		return;
 
 	temp = iwl_mvm_temp_notif_parse(mvm, pkt);
 	if (temp < 0)
-		return 0;
+		return;
 
 	iwl_mvm_tt_temp_changed(mvm, temp);
-
-	return 0;
 }
 
 static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
@@ -187,7 +184,7 @@
 int iwl_mvm_get_temp(struct iwl_mvm *mvm)
 {
 	struct iwl_notification_wait wait_temp_notif;
-	static const u8 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
+	static const u16 temp_notif[] = { DTS_MEASUREMENT_NOTIFICATION };
 	int ret, temp;
 
 	lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 8911686..6df5aad 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -153,18 +153,20 @@
 
 	if (ieee80211_is_mgmt(fc)) {
 		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
-			tx_cmd->pm_frame_timeout = cpu_to_le16(3);
+			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
+		else if (ieee80211_is_action(fc))
+			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
 		else
-			tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
 
 		/* The spec allows Action frames in A-MPDU, we don't support
 		 * it
 		 */
 		WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
 	} else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
-		tx_cmd->pm_frame_timeout = cpu_to_le16(2);
+		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
 	} else {
-		tx_cmd->pm_frame_timeout = 0;
+		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
 	}
 
 	if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
@@ -268,19 +270,29 @@
 /*
  * Sets the fields in the Tx cmd that are crypto related
  */
-void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
-			       struct ieee80211_tx_info *info,
-			       struct iwl_tx_cmd *tx_cmd,
-			       struct sk_buff *skb_frag)
+static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+				      struct ieee80211_tx_info *info,
+				      struct iwl_tx_cmd *tx_cmd,
+				      struct sk_buff *skb_frag,
+				      int hdrlen)
 {
 	struct ieee80211_key_conf *keyconf = info->control.hw_key;
+	u8 *crypto_hdr = skb_frag->data + hdrlen;
+	u64 pn;
 
 	switch (keyconf->cipher) {
 	case WLAN_CIPHER_SUITE_CCMP:
-		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
-		memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
-		if (info->flags & IEEE80211_TX_CTL_AMPDU)
-			tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+	case WLAN_CIPHER_SUITE_CCMP_256:
+		iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
+		pn = atomic64_inc_return(&keyconf->tx_pn);
+		crypto_hdr[0] = pn;
+		crypto_hdr[2] = 0;
+		crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+		crypto_hdr[1] = pn >> 8;
+		crypto_hdr[4] = pn >> 16;
+		crypto_hdr[5] = pn >> 24;
+		crypto_hdr[6] = pn >> 32;
+		crypto_hdr[7] = pn >> 40;
 		break;
 
 	case WLAN_CIPHER_SUITE_TKIP:
@@ -308,7 +320,7 @@
  */
 static struct iwl_device_cmd *
 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
-		      struct ieee80211_sta *sta, u8 sta_id)
+		      int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -325,7 +337,7 @@
 	tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
 	if (info->control.hw_key)
-		iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb);
+		iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
 
 	iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
 
@@ -346,6 +358,7 @@
 	struct iwl_device_cmd *dev_cmd;
 	struct iwl_tx_cmd *tx_cmd;
 	u8 sta_id;
+	int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
 	if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
 		return -1;
@@ -366,23 +379,34 @@
 		IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
 
 	/*
-	 * If the interface on which frame is sent is the P2P_DEVICE
+	 * If the interface on which the frame is sent is the P2P_DEVICE
 	 * or an AP/GO interface use the broadcast station associated
-	 * with it; otherwise use the AUX station.
+	 * with it; otherwise if the interface is a managed interface
+	 * use the AP station associated with it for multicast traffic
+	 * (this is not possible for unicast packets as a TLDS discovery
+	 * response are sent without a station entry); otherwise use the
+	 * AUX station.
 	 */
-	if (info->control.vif &&
-	    (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
-	     info->control.vif->type == NL80211_IFTYPE_AP)) {
+	sta_id = mvm->aux_sta.sta_id;
+	if (info->control.vif) {
 		struct iwl_mvm_vif *mvmvif =
 			iwl_mvm_vif_from_mac80211(info->control.vif);
-		sta_id = mvmvif->bcast_sta.sta_id;
-	} else {
-		sta_id = mvm->aux_sta.sta_id;
+
+		if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+		    info->control.vif->type == NL80211_IFTYPE_AP)
+			sta_id = mvmvif->bcast_sta.sta_id;
+		else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+			 is_multicast_ether_addr(hdr->addr1)) {
+			u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
+
+			if (ap_sta_id != IWL_MVM_STATION_COUNT)
+				sta_id = ap_sta_id;
+		}
 	}
 
 	IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
 
-	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id);
+	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
 	if (!dev_cmd)
 		return -1;
 
@@ -390,7 +414,7 @@
 	tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
 	/* Copy MAC header from skb into command buffer */
-	memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+	memcpy(tx_cmd->hdr, hdr, hdrlen);
 
 	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
 		iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
@@ -416,9 +440,11 @@
 	u8 tid = IWL_MAX_TID_COUNT;
 	u8 txq_id = info->hw_queue;
 	bool is_data_qos = false, is_ampdu = false;
+	int hdrlen;
 
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
 	fc = hdr->frame_control;
+	hdrlen = ieee80211_hdrlen(fc);
 
 	if (WARN_ON_ONCE(!mvmsta))
 		return -1;
@@ -426,7 +452,7 @@
 	if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
 		return -1;
 
-	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id);
+	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
 	if (!dev_cmd)
 		goto drop;
 
@@ -458,7 +484,7 @@
 	}
 
 	/* Copy MAC header from skb into command buffer */
-	memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc));
+	memcpy(tx_cmd->hdr, hdr, hdrlen);
 
 	WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
 
@@ -911,8 +937,7 @@
 	rcu_read_unlock();
 }
 
-int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-		      struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
@@ -921,8 +946,6 @@
 		iwl_mvm_rx_tx_cmd_single(mvm, pkt);
 	else
 		iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
-
-	return 0;
 }
 
 static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
@@ -942,8 +965,7 @@
 		(void *)(uintptr_t)tid_data->rate_n_flags;
 }
 
-int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
@@ -965,7 +987,7 @@
 	if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
 		      tid >= IWL_MAX_TID_COUNT,
 		      "sta_id %d tid %d", sta_id, tid))
-		return 0;
+		return;
 
 	rcu_read_lock();
 
@@ -974,7 +996,7 @@
 	/* Reclaiming frames for a station that has been deleted ? */
 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 		rcu_read_unlock();
-		return 0;
+		return;
 	}
 
 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -985,7 +1007,7 @@
 			"invalid BA notification: Q %d, tid %d, flow %d\n",
 			tid_data->txq_id, tid, scd_flow);
 		rcu_read_unlock();
-		return 0;
+		return;
 	}
 
 	spin_lock_bh(&mvmsta->lock);
@@ -1072,8 +1094,6 @@
 		skb = __skb_dequeue(&reclaimed_skbs);
 		ieee80211_tx_status(mvm->hw, skb);
 	}
-
-	return 0;
 }
 
 /*
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 03f8e06..a7d4342 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -108,7 +108,7 @@
 	return ret;
 }
 
-int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u8 id,
+int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
 			 u32 flags, u16 len, const void *data)
 {
 	struct iwl_host_cmd cmd = {
@@ -166,11 +166,6 @@
 		goto out_free_resp;
 	}
 
-	if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
-		ret = -EIO;
-		goto out_free_resp;
-	}
-
 	resp_len = iwl_rx_packet_payload_len(pkt);
 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
 		ret = -EIO;
@@ -187,7 +182,7 @@
 /*
  * We assume that the caller set the status to the sucess value
  */
-int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u8 id, u16 len,
+int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
 				const void *data, u32 *status)
 {
 	struct iwl_host_cmd cmd = {
@@ -243,8 +238,7 @@
 	return fw_rate_idx_to_plcp[rate_idx];
 }
 
-int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-			  struct iwl_device_cmd *cmd)
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	struct iwl_error_resp *err_resp = (void *)pkt->data;
@@ -256,7 +250,6 @@
 		le32_to_cpu(err_resp->error_service));
 	IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
 		le64_to_cpu(err_resp->timestamp));
-	return 0;
 }
 
 /*
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 9f65c1c..b0825c4 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -614,6 +614,7 @@
 {
 	struct pci_dev *pdev = to_pci_dev(device);
 	struct iwl_trans *trans = pci_get_drvdata(pdev);
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	bool hw_rfkill;
 
 	/* Before you put code here, think about WoWLAN. You cannot check here
@@ -631,20 +632,16 @@
 		return 0;
 
 	/*
-	 * On suspend, ict is disabled, and the interrupt mask
-	 * gets cleared. Reconfigure them both in case of d0i3
-	 * image. Otherwise, only enable rfkill interrupt (in
-	 * order to keep track of the rfkill status)
+	 * Enable rfkill interrupt (in order to keep track of
+	 * the rfkill status)
 	 */
-	if (trans->wowlan_d0i3) {
-		iwl_pcie_reset_ict(trans);
-		iwl_enable_interrupts(trans);
-	} else {
-		iwl_enable_rfkill_int(trans);
-	}
+	iwl_enable_rfkill_int(trans);
 
 	hw_rfkill = iwl_is_rfkill_set(trans);
+
+	mutex_lock(&trans_pcie->mutex);
 	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+	mutex_unlock(&trans_pcie->mutex);
 
 	return 0;
 }
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 376b84e..feb2f7e 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,6 +44,21 @@
 #include "iwl-io.h"
 #include "iwl-op-mode.h"
 
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
+
+/*
+ * RX related structures and functions
+ */
+#define RX_NUM_QUEUES 1
+#define RX_POST_REQ_ALLOC 2
+#define RX_CLAIM_REQ_ALLOC 8
+#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
+#define RX_LOW_WATERMARK 8
+
 struct iwl_host_cmd;
 
 /*This file includes the declaration that are internal to the
@@ -77,29 +92,29 @@
  * struct iwl_rxq - Rx queue
  * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @pool:
- * @queue:
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
+ * @used_count: Number of RBDs handled to allocator to use for allocation
  * @write_actual:
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
+ * @rx_free: list of RBDs with allocated RB ready for use
+ * @rx_used: list of RBDs with no RB attached
  * @need_update: flag to indicate we need to update read/write index
  * @rb_stts: driver's pointer to receive buffer status
  * @rb_stts_dma: bus address of receive buffer status
  * @lock:
+ * @pool: initial pool of iwl_rx_mem_buffer for the queue
+ * @queue: actual rx queue
  *
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
 struct iwl_rxq {
 	__le32 *bd;
 	dma_addr_t bd_dma;
-	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
-	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
 	u32 read;
 	u32 write;
 	u32 free_count;
+	u32 used_count;
 	u32 write_actual;
 	struct list_head rx_free;
 	struct list_head rx_used;
@@ -107,6 +122,32 @@
 	struct iwl_rb_status *rb_stts;
 	dma_addr_t rb_stts_dma;
 	spinlock_t lock;
+	struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+};
+
+/**
+ * struct iwl_rb_allocator - Rx allocator
+ * @pool: initial pool of allocator
+ * @req_pending: number of requests the allcator had not processed yet
+ * @req_ready: number of requests honored and ready for claiming
+ * @rbd_allocated: RBDs with pages allocated and ready to be handled to
+ *	the queue. This is a list of &struct iwl_rx_mem_buffer
+ * @rbd_empty: RBDs with no page attached for allocator use. This is a list
+ *	of &struct iwl_rx_mem_buffer
+ * @lock: protects the rbd_allocated and rbd_empty lists
+ * @alloc_wq: work queue for background calls
+ * @rx_alloc: work struct for background calls
+ */
+struct iwl_rb_allocator {
+	struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
+	atomic_t req_pending;
+	atomic_t req_ready;
+	struct list_head rbd_allocated;
+	struct list_head rbd_empty;
+	spinlock_t lock;
+	struct workqueue_struct *alloc_wq;
+	struct work_struct rx_alloc;
 };
 
 struct iwl_dma_ptr {
@@ -250,7 +291,7 @@
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
- * @rx_replenish: work that will be called when buffers need to be allocated
+ * @rba: allocator for RX replenishing
  * @drv - pointer to iwl_drv
  * @trans: pointer to the generic transport area
  * @scd_base_addr: scheduler sram base address in SRAM
@@ -264,8 +305,10 @@
  * @rx_buf_size_8k: 8 kB RX buffer size
  * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
  * @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @wide_cmd_header: true when ucode supports wide command header format
  * @rx_page_order: page order for receive buffer size
  * @reg_lock: protect hw register access
+ * @mutex: to protect stop_device / start_fw / start_hw
  * @cmd_in_flight: true when we have a host command in flight
  * @fw_mon_phys: physical address of the buffer for the firmware monitor
  * @fw_mon_page: points to the first page of the buffer for the firmware monitor
@@ -273,7 +316,7 @@
  */
 struct iwl_trans_pcie {
 	struct iwl_rxq rxq;
-	struct work_struct rx_replenish;
+	struct iwl_rb_allocator rba;
 	struct iwl_trans *trans;
 	struct iwl_drv *drv;
 
@@ -285,9 +328,11 @@
 	dma_addr_t ict_tbl_dma;
 	int ict_index;
 	bool use_ict;
+	bool is_down;
 	struct isr_statistics isr_stats;
 
 	spinlock_t irq_lock;
+	struct mutex mutex;
 	u32 inta_mask;
 	u32 scd_base_addr;
 	struct iwl_dma_ptr scd_bc_tbls;
@@ -314,6 +359,7 @@
 	bool rx_buf_size_8k;
 	bool bc_table_dword;
 	bool scd_set_active;
+	bool wide_cmd_header;
 	u32 rx_page_order;
 
 	const char *const *command_names;
@@ -385,7 +431,7 @@
 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
-			    struct iwl_rx_cmd_buffer *rxb, int handler_status);
+			    struct iwl_rx_cmd_buffer *rxb);
 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 			    struct sk_buff_head *skbs);
 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index adad8d0..e06591f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -74,16 +74,29 @@
  * resets the Rx queue buffers with new memory.
  *
  * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
- *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- *   to replenish the iwl->rxq->rx_free.
- * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
- *   iwl->rxq is replenished and the READ INDEX is updated (updating the
- *   'processed' and 'read' driver indexes as well)
+ * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
+ *   When the interrupt handler is called, the request is processed.
+ *   The page is either stolen - transferred to the upper layer
+ *   or reused - added immediately to the iwl->rxq->rx_free list.
+ * + When the page is stolen - the driver updates the matching queue's used
+ *   count, detaches the RBD and transfers it to the queue used list.
+ *   When there are two used RBDs - they are transferred to the allocator empty
+ *   list. Work is then scheduled for the allocator to start allocating
+ *   eight buffers.
+ *   When there are another 6 used RBDs - they are transferred to the allocator
+ *   empty list and the driver tries to claim the pre-allocated buffers and
+ *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
+ *   until ready.
+ *   When there are 8+ buffers in the free list - either from allocation or from
+ *   8 reused unstolen pages - restock is called to update the FW and indexes.
+ * + In order to make sure the allocator always has RBDs to use for allocation
+ *   the allocator has initial pool in the size of num_queues*(8-2) - the
+ *   maximum missing RBDs per allocation request (request posted with 2
+ *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
+ *   The queues supplies the recycle of the rest of the RBDs.
  * + A received packet is processed and handed to the kernel network stack,
  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
- *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
+ * + If there are no allocated buffers in iwl->rxq->rx_free,
  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
  *   If there were enough free buffers and RX_STALLED is set it is cleared.
  *
@@ -92,18 +105,32 @@
  *
  * iwl_rxq_alloc()            Allocates rx_free
  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
- *                            iwl_pcie_rxq_restock
+ *                            iwl_pcie_rxq_restock.
+ *                            Used only during initialization.
  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
  *                            queue, updates firmware pointers, and updates
- *                            the WRITE index.  If insufficient rx_free buffers
- *                            are available, schedules iwl_pcie_rx_replenish
+ *                            the WRITE index.
+ * iwl_pcie_rx_allocator()     Background work for allocating pages.
  *
  * -- enable interrupts --
  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
  *                            READ INDEX, detaching the SKB from the pool.
  *                            Moves the packet buffer from queue to rx_used.
+ *                            Posts and claims requests to the allocator.
  *                            Calls iwl_pcie_rxq_restock to refill any empty
  *                            slots.
+ *
+ * RBD life-cycle:
+ *
+ * Init:
+ * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
+ *
+ * Regular Receive interrupt:
+ * Page Stolen:
+ * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
+ * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
+ * Page not Stolen:
+ * rxq.queue -> rxq.rx_free -> rxq.queue
  * ...
  *
  */
@@ -240,10 +267,6 @@
 		rxq->free_count--;
 	}
 	spin_unlock(&rxq->lock);
-	/* If the pre-allocated buffer pool is dropping low, schedule to
-	 * refill it */
-	if (rxq->free_count <= RX_LOW_WATERMARK)
-		schedule_work(&trans_pcie->rx_replenish);
 
 	/* If we've added more space for the firmware to place data, tell it.
 	 * Increment device's write pointer in multiples of 8. */
@@ -255,6 +278,45 @@
 }
 
 /*
+ * iwl_pcie_rx_alloc_page - allocates and returns a page.
+ *
+ */
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
+					   gfp_t priority)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct page *page;
+	gfp_t gfp_mask = priority;
+
+	if (rxq->free_count > RX_LOW_WATERMARK)
+		gfp_mask |= __GFP_NOWARN;
+
+	if (trans_pcie->rx_page_order > 0)
+		gfp_mask |= __GFP_COMP;
+
+	/* Alloc a new receive buffer */
+	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+	if (!page) {
+		if (net_ratelimit())
+			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
+				       trans_pcie->rx_page_order);
+		/* Issue an error if the hardware has consumed more than half
+		 * of its free buffer list and we don't have enough
+		 * pre-allocated buffers.
+`		 */
+		if (rxq->free_count <= RX_LOW_WATERMARK &&
+		    iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
+		    net_ratelimit())
+			IWL_CRIT(trans,
+				 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
+				 rxq->free_count);
+		return NULL;
+	}
+	return page;
+}
+
+/*
  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
  *
  * A used RBD is an Rx buffer that has been given to the stack. To use it again
@@ -269,7 +331,6 @@
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
 	struct iwl_rx_mem_buffer *rxb;
 	struct page *page;
-	gfp_t gfp_mask = priority;
 
 	while (1) {
 		spin_lock(&rxq->lock);
@@ -279,32 +340,10 @@
 		}
 		spin_unlock(&rxq->lock);
 
-		if (rxq->free_count > RX_LOW_WATERMARK)
-			gfp_mask |= __GFP_NOWARN;
-
-		if (trans_pcie->rx_page_order > 0)
-			gfp_mask |= __GFP_COMP;
-
 		/* Alloc a new receive buffer */
-		page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
-		if (!page) {
-			if (net_ratelimit())
-				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
-					   "order: %d\n",
-					   trans_pcie->rx_page_order);
-
-			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
-			    net_ratelimit())
-				IWL_CRIT(trans, "Failed to alloc_pages with %s."
-					 "Only %u free buffers remaining.\n",
-					 priority == GFP_ATOMIC ?
-					 "GFP_ATOMIC" : "GFP_KERNEL",
-					 rxq->free_count);
-			/* We don't reschedule replenish work here -- we will
-			 * call the restock method and if it still needs
-			 * more buffers it will schedule replenish */
+		page = iwl_pcie_rx_alloc_page(trans, priority);
+		if (!page)
 			return;
-		}
 
 		spin_lock(&rxq->lock);
 
@@ -355,7 +394,7 @@
 
 	lockdep_assert_held(&rxq->lock);
 
-	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+	for (i = 0; i < RX_QUEUE_SIZE; i++) {
 		if (!rxq->pool[i].page)
 			continue;
 		dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -372,32 +411,164 @@
  * When moving to rx_free an page is allocated for the slot.
  *
  * Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called as a scheduled work item (except for during initialization)
+ * This is called only during initialization
  */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
+static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
 {
-	iwl_pcie_rxq_alloc_rbs(trans, gfp);
+	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
 
 	iwl_pcie_rxq_restock(trans);
 }
 
-static void iwl_pcie_rx_replenish_work(struct work_struct *data)
+/*
+ * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
+ *
+ * Allocates for each received request 8 pages
+ * Called as a scheduled work item.
+ */
+static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
 {
-	struct iwl_trans_pcie *trans_pcie =
-	    container_of(data, struct iwl_trans_pcie, rx_replenish);
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	struct list_head local_empty;
+	int pending = atomic_xchg(&rba->req_pending, 0);
 
-	iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
+	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
+
+	/* If we were scheduled - there is at least one request */
+	spin_lock(&rba->lock);
+	/* swap out the rba->rbd_empty to a local list */
+	list_replace_init(&rba->rbd_empty, &local_empty);
+	spin_unlock(&rba->lock);
+
+	while (pending) {
+		int i;
+		struct list_head local_allocated;
+
+		INIT_LIST_HEAD(&local_allocated);
+
+		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+			struct iwl_rx_mem_buffer *rxb;
+			struct page *page;
+
+			/* List should never be empty - each reused RBD is
+			 * returned to the list, and initial pool covers any
+			 * possible gap between the time the page is allocated
+			 * to the time the RBD is added.
+			 */
+			BUG_ON(list_empty(&local_empty));
+			/* Get the first rxb from the rbd list */
+			rxb = list_first_entry(&local_empty,
+					       struct iwl_rx_mem_buffer, list);
+			BUG_ON(rxb->page);
+
+			/* Alloc a new receive buffer */
+			page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
+			if (!page)
+				continue;
+			rxb->page = page;
+
+			/* Get physical address of the RB */
+			rxb->page_dma = dma_map_page(trans->dev, page, 0,
+					PAGE_SIZE << trans_pcie->rx_page_order,
+					DMA_FROM_DEVICE);
+			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+				rxb->page = NULL;
+				__free_pages(page, trans_pcie->rx_page_order);
+				continue;
+			}
+			/* dma address must be no more than 36 bits */
+			BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+			/* and also 256 byte aligned! */
+			BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+			/* move the allocated entry to the out list */
+			list_move(&rxb->list, &local_allocated);
+			i++;
+		}
+
+		pending--;
+		if (!pending) {
+			pending = atomic_xchg(&rba->req_pending, 0);
+			IWL_DEBUG_RX(trans,
+				     "Pending allocation requests = %d\n",
+				     pending);
+		}
+
+		spin_lock(&rba->lock);
+		/* add the allocated rbds to the allocator allocated list */
+		list_splice_tail(&local_allocated, &rba->rbd_allocated);
+		/* get more empty RBDs for current pending requests */
+		list_splice_tail_init(&rba->rbd_empty, &local_empty);
+		spin_unlock(&rba->lock);
+
+		atomic_inc(&rba->req_ready);
+	}
+
+	spin_lock(&rba->lock);
+	/* return unused rbds to the allocator empty list */
+	list_splice_tail(&local_empty, &rba->rbd_empty);
+	spin_unlock(&rba->lock);
+}
+
+/*
+ * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
+.*
+.* Called by queue when the queue posted allocation request and
+ * has freed 8 RBDs in order to restock itself.
+ */
+static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+				     struct iwl_rx_mem_buffer
+				     *out[RX_CLAIM_REQ_ALLOC])
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	int i;
+
+	/*
+	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
+	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
+	 * function will return -ENOMEM, as there are no ready requests.
+	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
+	 * req_ready > 0, i.e. - there are ready requests and the function
+	 * hands one request to the caller.
+	 */
+	if (atomic_dec_if_positive(&rba->req_ready) < 0)
+		return -ENOMEM;
+
+	spin_lock(&rba->lock);
+	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+		/* Get next free Rx buffer, remove it from free list */
+		out[i] = list_first_entry(&rba->rbd_allocated,
+			       struct iwl_rx_mem_buffer, list);
+		list_del(&out[i]->list);
+	}
+	spin_unlock(&rba->lock);
+
+	return 0;
+}
+
+static void iwl_pcie_rx_allocator_work(struct work_struct *data)
+{
+	struct iwl_rb_allocator *rba_p =
+		container_of(data, struct iwl_rb_allocator, rx_alloc);
+	struct iwl_trans_pcie *trans_pcie =
+		container_of(rba_p, struct iwl_trans_pcie, rba);
+
+	iwl_pcie_rx_allocator(trans_pcie->trans);
 }
 
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 	struct device *dev = trans->dev;
 
 	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
 
 	spin_lock_init(&rxq->lock);
+	spin_lock_init(&rba->lock);
 
 	if (WARN_ON(rxq->bd || rxq->rb_stts))
 		return -EINVAL;
@@ -487,15 +658,49 @@
 	INIT_LIST_HEAD(&rxq->rx_free);
 	INIT_LIST_HEAD(&rxq->rx_used);
 	rxq->free_count = 0;
+	rxq->used_count = 0;
 
-	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+	for (i = 0; i < RX_QUEUE_SIZE; i++)
 		list_add(&rxq->pool[i].list, &rxq->rx_used);
 }
 
+static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
+{
+	int i;
+
+	lockdep_assert_held(&rba->lock);
+
+	INIT_LIST_HEAD(&rba->rbd_allocated);
+	INIT_LIST_HEAD(&rba->rbd_empty);
+
+	for (i = 0; i < RX_POOL_SIZE; i++)
+		list_add(&rba->pool[i].list, &rba->rbd_empty);
+}
+
+static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	int i;
+
+	lockdep_assert_held(&rba->lock);
+
+	for (i = 0; i < RX_POOL_SIZE; i++) {
+		if (!rba->pool[i].page)
+			continue;
+		dma_unmap_page(trans->dev, rba->pool[i].page_dma,
+			       PAGE_SIZE << trans_pcie->rx_page_order,
+			       DMA_FROM_DEVICE);
+		__free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
+		rba->pool[i].page = NULL;
+	}
+}
+
 int iwl_pcie_rx_init(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 	int i, err;
 
 	if (!rxq->bd) {
@@ -503,11 +708,21 @@
 		if (err)
 			return err;
 	}
+	if (!rba->alloc_wq)
+		rba->alloc_wq = alloc_workqueue("rb_allocator",
+						WQ_HIGHPRI | WQ_UNBOUND, 1);
+	INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
+
+	spin_lock(&rba->lock);
+	atomic_set(&rba->req_pending, 0);
+	atomic_set(&rba->req_ready, 0);
+	/* free all first - we might be reconfigured for a different size */
+	iwl_pcie_rx_free_rba(trans);
+	iwl_pcie_rx_init_rba(rba);
+	spin_unlock(&rba->lock);
 
 	spin_lock(&rxq->lock);
 
-	INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
-
 	/* free all first - we might be reconfigured for a different size */
 	iwl_pcie_rxq_free_rbs(trans);
 	iwl_pcie_rx_init_rxb_lists(rxq);
@@ -522,7 +737,7 @@
 	memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
 	spin_unlock(&rxq->lock);
 
-	iwl_pcie_rx_replenish(trans, GFP_KERNEL);
+	iwl_pcie_rx_replenish(trans);
 
 	iwl_pcie_rx_hw_init(trans, rxq);
 
@@ -537,6 +752,7 @@
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
 
 	/*if rxq->bd is NULL, it means that nothing has been allocated,
 	 * exit now */
@@ -545,7 +761,15 @@
 		return;
 	}
 
-	cancel_work_sync(&trans_pcie->rx_replenish);
+	cancel_work_sync(&rba->rx_alloc);
+	if (rba->alloc_wq) {
+		destroy_workqueue(rba->alloc_wq);
+		rba->alloc_wq = NULL;
+	}
+
+	spin_lock(&rba->lock);
+	iwl_pcie_rx_free_rba(trans);
+	spin_unlock(&rba->lock);
 
 	spin_lock(&rxq->lock);
 	iwl_pcie_rxq_free_rbs(trans);
@@ -566,8 +790,49 @@
 	rxq->rb_stts = NULL;
 }
 
+/*
+ * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
+ *
+ * Called when a RBD can be reused. The RBD is transferred to the allocator.
+ * When there are 2 empty RBDs - a request for allocation is posted
+ */
+static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
+				  struct iwl_rx_mem_buffer *rxb,
+				  struct iwl_rxq *rxq, bool emergency)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+	/* Move the RBD to the used list, will be moved to allocator in batches
+	 * before claiming or posting a request*/
+	list_add_tail(&rxb->list, &rxq->rx_used);
+
+	if (unlikely(emergency))
+		return;
+
+	/* Count the allocator owned RBDs */
+	rxq->used_count++;
+
+	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
+	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+	 * after but we still need to post another request.
+	 */
+	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+		/* Move the 2 RBDs to the allocator ownership.
+		 Allocator has another 6 from pool for the request completion*/
+		spin_lock(&rba->lock);
+		list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+		spin_unlock(&rba->lock);
+
+		atomic_inc(&rba->req_pending);
+		queue_work(rba->alloc_wq, &rba->rx_alloc);
+	}
+}
+
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
-				struct iwl_rx_mem_buffer *rxb)
+				struct iwl_rx_mem_buffer *rxb,
+				bool emergency)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
@@ -583,10 +848,9 @@
 
 	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
 		struct iwl_rx_packet *pkt;
-		struct iwl_device_cmd *cmd;
 		u16 sequence;
 		bool reclaim;
-		int index, cmd_index, err, len;
+		int index, cmd_index, len;
 		struct iwl_rx_cmd_buffer rxcb = {
 			._offset = offset,
 			._rx_page_order = trans_pcie->rx_page_order,
@@ -634,12 +898,7 @@
 		index = SEQ_TO_INDEX(sequence);
 		cmd_index = get_cmd_index(&txq->q, index);
 
-		if (reclaim)
-			cmd = txq->entries[cmd_index].cmd;
-		else
-			cmd = NULL;
-
-		err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+		iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
 
 		if (reclaim) {
 			kzfree(txq->entries[cmd_index].free_buf);
@@ -657,7 +916,7 @@
 			 * iwl_trans_send_cmd()
 			 * as we reclaim the driver command queue */
 			if (!rxcb._page_stolen)
-				iwl_pcie_hcmd_complete(trans, &rxcb, err);
+				iwl_pcie_hcmd_complete(trans, &rxcb);
 			else
 				IWL_WARN(trans, "Claim null rxb?\n");
 		}
@@ -688,13 +947,13 @@
 			 */
 			__free_pages(rxb->page, trans_pcie->rx_page_order);
 			rxb->page = NULL;
-			list_add_tail(&rxb->list, &rxq->rx_used);
+			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
 		} else {
 			list_add_tail(&rxb->list, &rxq->rx_free);
 			rxq->free_count++;
 		}
 	} else
-		list_add_tail(&rxb->list, &rxq->rx_used);
+		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
 }
 
 /*
@@ -704,10 +963,8 @@
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_rxq *rxq = &trans_pcie->rxq;
-	u32 r, i;
-	u8 fill_rx = 0;
-	u32 count = 8;
-	int total_empty;
+	u32 r, i, j, count = 0;
+	bool emergency = false;
 
 restart:
 	spin_lock(&rxq->lock);
@@ -720,36 +977,74 @@
 	if (i == r)
 		IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
 
-	/* calculate total frames need to be restock after handling RX */
-	total_empty = r - rxq->write_actual;
-	if (total_empty < 0)
-		total_empty += RX_QUEUE_SIZE;
-
-	if (total_empty > (RX_QUEUE_SIZE / 2))
-		fill_rx = 1;
-
 	while (i != r) {
 		struct iwl_rx_mem_buffer *rxb;
 
+		if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
+			emergency = true;
+
 		rxb = rxq->queue[i];
 		rxq->queue[i] = NULL;
 
 		IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
 			     r, i, rxb);
-		iwl_pcie_rx_handle_rb(trans, rxb);
+		iwl_pcie_rx_handle_rb(trans, rxb, emergency);
 
 		i = (i + 1) & RX_QUEUE_MASK;
-		/* If there are a lot of unused frames,
-		 * restock the Rx queue so ucode wont assert. */
-		if (fill_rx) {
-			count++;
-			if (count >= 8) {
-				rxq->read = i;
-				spin_unlock(&rxq->lock);
-				iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
-				count = 0;
-				goto restart;
+
+		/* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+		 * try to claim the pre-allocated buffers from the allocator */
+		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
+			struct iwl_rb_allocator *rba = &trans_pcie->rba;
+			struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
+
+			if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 &&
+			    !emergency) {
+				/* Add the remaining 6 empty RBDs
+				* for allocator use
+				 */
+				spin_lock(&rba->lock);
+				list_splice_tail_init(&rxq->rx_used,
+						      &rba->rbd_empty);
+				spin_unlock(&rba->lock);
 			}
+
+			/* If not ready - continue, will try to reclaim later.
+			* No need to reschedule work - allocator exits only on
+			* success */
+			if (!iwl_pcie_rx_allocator_get(trans, out)) {
+				/* If success - then RX_CLAIM_REQ_ALLOC
+				 * buffers were retrieved and should be added
+				 * to free list */
+				rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+				for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
+					list_add_tail(&out[j]->list,
+						      &rxq->rx_free);
+					rxq->free_count++;
+				}
+			}
+		}
+		if (emergency) {
+			count++;
+			if (count == 8) {
+				count = 0;
+				if (rxq->used_count < RX_QUEUE_SIZE / 3)
+					emergency = false;
+				spin_unlock(&rxq->lock);
+				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+				spin_lock(&rxq->lock);
+			}
+		}
+		/* handle restock for three cases, can be all of them at once:
+		* - we just pulled buffers from the allocator
+		* - we have 8+ unstolen pages accumulated
+		* - we are in emergency and allocated buffers
+		 */
+		if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
+			rxq->read = i;
+			spin_unlock(&rxq->lock);
+			iwl_pcie_rxq_restock(trans);
+			goto restart;
 		}
 	}
 
@@ -757,10 +1052,20 @@
 	rxq->read = i;
 	spin_unlock(&rxq->lock);
 
-	if (fill_rx)
-		iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
-	else
-		iwl_pcie_rxq_restock(trans);
+	/*
+	 * handle a case where in emergency there are some unallocated RBDs.
+	 * those RBDs are in the used list, but are not tracked by the queue's
+	 * used_count which counts allocator owned RBDs.
+	 * unallocated emergency RBDs must be allocated on exit, otherwise
+	 * when called again the function may not be in emergency mode and
+	 * they will be handed to the allocator with no tracking in the RBD
+	 * allocator counters, which will lead to them never being claimed back
+	 * by the queue.
+	 * by allocating them here, they are now in the queue free list, and
+	 * will be restocked by the next call of iwl_pcie_rxq_restock.
+	 */
+	if (unlikely(emergency && count))
+		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
 
 	if (trans_pcie->napi.poll)
 		napi_gro_flush(&trans_pcie->napi, false);
@@ -772,6 +1077,7 @@
 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int i;
 
 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
 	if (trans->cfg->internal_wimax_coex &&
@@ -795,6 +1101,9 @@
 	iwl_trans_fw_error(trans);
 	local_bh_enable();
 
+	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
+		del_timer(&trans_pcie->txq[i].stuck_timer);
+
 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
 	wake_up(&trans_pcie->wait_command_queue);
 }
@@ -1003,7 +1312,9 @@
 
 		isr_stats->rfkill++;
 
+		mutex_lock(&trans_pcie->mutex);
 		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+		mutex_unlock(&trans_pcie->mutex);
 		if (hw_rfkill) {
 			set_bit(STATUS_RFKILL, &trans->status);
 			if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
@@ -1195,8 +1506,9 @@
 
 	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
 
-	val |= CSR_DRAM_INT_TBL_ENABLE;
-	val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+	val |= CSR_DRAM_INT_TBL_ENABLE |
+	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
+	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
 
 	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
 
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 9e144e7..6ba7d30 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -780,8 +780,15 @@
 	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
 		last_read_idx = i;
 
+		/*
+		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+		 * CPU1 to CPU2.
+		 * PAGING_SEPARATOR_SECTION delimiter - separate between
+		 * CPU2 non paged to CPU2 paging sec.
+		 */
 		if (!image->sec[i].data ||
-		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
 			IWL_DEBUG_FW(trans,
 				     "Break since Data not valid or Empty section, sec = %d\n",
 				     i);
@@ -829,8 +836,15 @@
 	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
 		last_read_idx = i;
 
+		/*
+		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+		 * CPU1 to CPU2.
+		 * PAGING_SEPARATOR_SECTION delimiter - separate between
+		 * CPU2 non paged to CPU2 paging sec.
+		 */
 		if (!image->sec[i].data ||
-		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
 			IWL_DEBUG_FW(trans,
 				     "Break since Data not valid or Empty section, sec = %d\n",
 				     i);
@@ -897,6 +911,14 @@
 		case PRPH_CLEARBIT:
 			iwl_clear_bits_prph(trans, addr, BIT(val));
 			break;
+		case PRPH_BLOCKBIT:
+			if (iwl_read_prph(trans, addr) & BIT(val)) {
+				IWL_ERR(trans,
+					"BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
+					val, addr);
+				goto monitor;
+			}
+			break;
 		default:
 			IWL_ERR(trans, "FW debug - unknown OP %d\n",
 				dest->reg_ops[i].op);
@@ -904,6 +926,7 @@
 		}
 	}
 
+monitor:
 	if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
 		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
 			       trans_pcie->fw_mon_phys >> dest->base_shift);
@@ -998,13 +1021,25 @@
 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
 				   const struct fw_img *fw, bool run_in_rfkill)
 {
-	int ret;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	bool hw_rfkill;
+	int ret;
+
+	mutex_lock(&trans_pcie->mutex);
+
+	/* Someone called stop_device, don't try to start_fw */
+	if (trans_pcie->is_down) {
+		IWL_WARN(trans,
+			 "Can't start_fw since the HW hasn't been started\n");
+		ret = EIO;
+		goto out;
+	}
 
 	/* This may fail if AMT took ownership of the device */
 	if (iwl_pcie_prepare_card_hw(trans)) {
 		IWL_WARN(trans, "Exit HW not ready\n");
-		return -EIO;
+		ret = -EIO;
+		goto out;
 	}
 
 	iwl_enable_rfkill_int(trans);
@@ -1016,15 +1051,17 @@
 	else
 		clear_bit(STATUS_RFKILL, &trans->status);
 	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
-	if (hw_rfkill && !run_in_rfkill)
-		return -ERFKILL;
+	if (hw_rfkill && !run_in_rfkill) {
+		ret = -ERFKILL;
+		goto out;
+	}
 
 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
 
 	ret = iwl_pcie_nic_init(trans);
 	if (ret) {
 		IWL_ERR(trans, "Unable to init nic\n");
-		return ret;
+		goto out;
 	}
 
 	/* make sure rfkill handshake bits are cleared */
@@ -1042,9 +1079,13 @@
 
 	/* Load the given image to the HW */
 	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
-		return iwl_pcie_load_given_ucode_8000(trans, fw);
+		ret = iwl_pcie_load_given_ucode_8000(trans, fw);
 	else
-		return iwl_pcie_load_given_ucode(trans, fw);
+		ret = iwl_pcie_load_given_ucode(trans, fw);
+
+out:
+	mutex_unlock(&trans_pcie->mutex);
+	return ret;
 }
 
 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
@@ -1053,11 +1094,18 @@
 	iwl_pcie_tx_start(trans, scd_addr);
 }
 
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	bool hw_rfkill, was_hw_rfkill;
 
+	lockdep_assert_held(&trans_pcie->mutex);
+
+	if (trans_pcie->is_down)
+		return;
+
+	trans_pcie->is_down = true;
+
 	was_hw_rfkill = iwl_is_rfkill_set(trans);
 
 	/* tell the device to stop sending interrupts */
@@ -1147,14 +1195,36 @@
 	iwl_pcie_prepare_card_hw(trans);
 }
 
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	mutex_lock(&trans_pcie->mutex);
+	_iwl_trans_pcie_stop_device(trans, low_power);
+	mutex_unlock(&trans_pcie->mutex);
+}
+
 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
 {
+	struct iwl_trans_pcie __maybe_unused *trans_pcie =
+		IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	lockdep_assert_held(&trans_pcie->mutex);
+
 	if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
-		iwl_trans_pcie_stop_device(trans, true);
+		_iwl_trans_pcie_stop_device(trans, true);
 }
 
 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
 {
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (trans->wowlan_d0i3) {
+		/* Enable persistence mode to avoid reset */
+		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+			    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+	}
+
 	iwl_disable_interrupts(trans);
 
 	/*
@@ -1166,17 +1236,21 @@
 
 	iwl_pcie_disable_ict(trans);
 
+	synchronize_irq(trans_pcie->pci_dev->irq);
+
 	iwl_clear_bit(trans, CSR_GP_CNTRL,
 		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
 	iwl_clear_bit(trans, CSR_GP_CNTRL,
 		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 
-	/*
-	 * reset TX queues -- some of their registers reset during S3
-	 * so if we don't reset everything here the D3 image would try
-	 * to execute some invalid memory upon resume
-	 */
-	iwl_trans_pcie_tx_reset(trans);
+	if (!trans->wowlan_d0i3) {
+		/*
+		 * reset TX queues -- some of their registers reset during S3
+		 * so if we don't reset everything here the D3 image would try
+		 * to execute some invalid memory upon resume
+		 */
+		iwl_trans_pcie_tx_reset(trans);
+	}
 
 	iwl_pcie_set_pwr(trans, true);
 }
@@ -1218,12 +1292,18 @@
 
 	iwl_pcie_set_pwr(trans, false);
 
-	iwl_trans_pcie_tx_reset(trans);
+	if (trans->wowlan_d0i3) {
+		iwl_clear_bit(trans, CSR_GP_CNTRL,
+			      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+	} else {
+		iwl_trans_pcie_tx_reset(trans);
 
-	ret = iwl_pcie_rx_init(trans);
-	if (ret) {
-		IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
-		return ret;
+		ret = iwl_pcie_rx_init(trans);
+		if (ret) {
+			IWL_ERR(trans,
+				"Failed to resume the device (RX reset)\n");
+			return ret;
+		}
 	}
 
 	val = iwl_read32(trans, CSR_RESET);
@@ -1235,11 +1315,14 @@
 	return 0;
 }
 
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
 {
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	bool hw_rfkill;
 	int err;
 
+	lockdep_assert_held(&trans_pcie->mutex);
+
 	err = iwl_pcie_prepare_card_hw(trans);
 	if (err) {
 		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
@@ -1256,20 +1339,38 @@
 	/* From now on, the op_mode will be kept updated about RF kill state */
 	iwl_enable_rfkill_int(trans);
 
+	/* Set is_down to false here so that...*/
+	trans_pcie->is_down = false;
+
 	hw_rfkill = iwl_is_rfkill_set(trans);
 	if (hw_rfkill)
 		set_bit(STATUS_RFKILL, &trans->status);
 	else
 		clear_bit(STATUS_RFKILL, &trans->status);
+	/* ... rfkill can call stop_device and set it false if needed */
 	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
 
 	return 0;
 }
 
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ret;
+
+	mutex_lock(&trans_pcie->mutex);
+	ret = _iwl_trans_pcie_start_hw(trans, low_power);
+	mutex_unlock(&trans_pcie->mutex);
+
+	return ret;
+}
+
 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
+	mutex_lock(&trans_pcie->mutex);
+
 	/* disable interrupts - don't enable HW RF kill interrupt */
 	spin_lock(&trans_pcie->irq_lock);
 	iwl_disable_interrupts(trans);
@@ -1282,6 +1383,10 @@
 	spin_unlock(&trans_pcie->irq_lock);
 
 	iwl_pcie_disable_ict(trans);
+
+	mutex_unlock(&trans_pcie->mutex);
+
+	synchronize_irq(trans_pcie->pci_dev->irq);
 }
 
 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1342,6 +1447,7 @@
 	else
 		trans_pcie->rx_page_order = get_order(4 * 1024);
 
+	trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
 	trans_pcie->command_names = trans_cfg->command_names;
 	trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
 	trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@ -1354,11 +1460,10 @@
 	 * As this function may be called again in some corner cases don't
 	 * do anything if NAPI was already initialized.
 	 */
-	if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
+	if (!trans_pcie->napi.poll) {
 		init_dummy_netdev(&trans_pcie->napi_dev);
-		iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
-				     &trans_pcie->napi_dev,
-				     iwl_pcie_dummy_napi_poll, 64);
+		netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
+			       iwl_pcie_dummy_napi_poll, 64);
 	}
 }
 
@@ -2185,6 +2290,47 @@
 	return prph_len;
 }
 
+static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+				   struct iwl_fw_error_dump_data **data,
+				   int allocated_rb_nums)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+	struct iwl_rxq *rxq = &trans_pcie->rxq;
+	u32 i, r, j, rb_len = 0;
+
+	spin_lock(&rxq->lock);
+
+	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+
+	for (i = rxq->read, j = 0;
+	     i != r && j < allocated_rb_nums;
+	     i = (i + 1) & RX_QUEUE_MASK, j++) {
+		struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
+		struct iwl_fw_error_dump_rb *rb;
+
+		dma_unmap_page(trans->dev, rxb->page_dma, max_len,
+			       DMA_FROM_DEVICE);
+
+		rb_len += sizeof(**data) + sizeof(*rb) + max_len;
+
+		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
+		(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
+		rb = (void *)(*data)->data;
+		rb->index = cpu_to_le32(i);
+		memcpy(rb->data, page_address(rxb->page), max_len);
+		/* remap the page for the free benefit */
+		rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
+						     max_len,
+						     DMA_FROM_DEVICE);
+
+		*data = iwl_fw_error_next_data(*data);
+	}
+
+	spin_unlock(&rxq->lock);
+
+	return rb_len;
+}
 #define IWL_CSR_TO_DUMP (0x250)
 
 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
@@ -2254,17 +2400,97 @@
 	return monitor_len;
 }
 
-static
-struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
+static u32
+iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
+			    struct iwl_fw_error_dump_data **data,
+			    u32 monitor_len)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 len = 0;
+
+	if ((trans_pcie->fw_mon_page &&
+	     trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
+	    trans->dbg_dest_tlv) {
+		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+		u32 base, write_ptr, wrap_cnt;
+
+		/* If there was a dest TLV - use the values from there */
+		if (trans->dbg_dest_tlv) {
+			write_ptr =
+				le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+			wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+		} else {
+			base = MON_BUFF_BASE_ADDR;
+			write_ptr = MON_BUFF_WRPTR;
+			wrap_cnt = MON_BUFF_CYCLE_CNT;
+		}
+
+		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
+		fw_mon_data = (void *)(*data)->data;
+		fw_mon_data->fw_mon_wr_ptr =
+			cpu_to_le32(iwl_read_prph(trans, write_ptr));
+		fw_mon_data->fw_mon_cycle_cnt =
+			cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
+		fw_mon_data->fw_mon_base_ptr =
+			cpu_to_le32(iwl_read_prph(trans, base));
+
+		len += sizeof(**data) + sizeof(*fw_mon_data);
+		if (trans_pcie->fw_mon_page) {
+			/*
+			 * The firmware is now asserted, it won't write anything
+			 * to the buffer. CPU can take ownership to fetch the
+			 * data. The buffer will be handed back to the device
+			 * before the firmware will be restarted.
+			 */
+			dma_sync_single_for_cpu(trans->dev,
+						trans_pcie->fw_mon_phys,
+						trans_pcie->fw_mon_size,
+						DMA_FROM_DEVICE);
+			memcpy(fw_mon_data->data,
+			       page_address(trans_pcie->fw_mon_page),
+			       trans_pcie->fw_mon_size);
+
+			monitor_len = trans_pcie->fw_mon_size;
+		} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
+			/*
+			 * Update pointers to reflect actual values after
+			 * shifting
+			 */
+			base = iwl_read_prph(trans, base) <<
+			       trans->dbg_dest_tlv->base_shift;
+			iwl_trans_read_mem(trans, base, fw_mon_data->data,
+					   monitor_len / sizeof(u32));
+		} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+			monitor_len =
+				iwl_trans_pci_dump_marbh_monitor(trans,
+								 fw_mon_data,
+								 monitor_len);
+		} else {
+			/* Didn't match anything - output no monitor data */
+			monitor_len = 0;
+		}
+
+		len += monitor_len;
+		(*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
+	}
+
+	return len;
+}
+
+static struct iwl_trans_dump_data
+*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+			  struct iwl_fw_dbg_trigger_tlv *trigger)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 	struct iwl_fw_error_dump_data *data;
 	struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
 	struct iwl_fw_error_dump_txcmd *txcmd;
 	struct iwl_trans_dump_data *dump_data;
-	u32 len;
+	u32 len, num_rbs;
 	u32 monitor_len;
 	int i, ptr;
+	bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
 
 	/* transport dump header */
 	len = sizeof(*dump_data);
@@ -2273,22 +2499,6 @@
 	len += sizeof(*data) +
 		cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
 
-	/* CSR registers */
-	len += sizeof(*data) + IWL_CSR_TO_DUMP;
-
-	/* PRPH registers */
-	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
-		/* The range includes both boundaries */
-		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
-			iwl_prph_dump_addr[i].start + 4;
-
-		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
-			num_bytes_in_chunk;
-	}
-
-	/* FH registers */
-	len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
-
 	/* FW monitor */
 	if (trans_pcie->fw_mon_page) {
 		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
@@ -2316,6 +2526,45 @@
 		monitor_len = 0;
 	}
 
+	if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+		dump_data = vzalloc(len);
+		if (!dump_data)
+			return NULL;
+
+		data = (void *)dump_data->data;
+		len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+		dump_data->len = len;
+
+		return dump_data;
+	}
+
+	/* CSR registers */
+	len += sizeof(*data) + IWL_CSR_TO_DUMP;
+
+	/* PRPH registers */
+	for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+		/* The range includes both boundaries */
+		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+			iwl_prph_dump_addr[i].start + 4;
+
+		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
+		       num_bytes_in_chunk;
+	}
+
+	/* FH registers */
+	len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+
+	if (dump_rbs) {
+		/* RBs */
+		num_rbs = le16_to_cpu(ACCESS_ONCE(
+				      trans_pcie->rxq.rb_stts->closed_rb_num))
+				      & 0x0FFF;
+		num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
+		len += num_rbs * (sizeof(*data) +
+				  sizeof(struct iwl_fw_error_dump_rb) +
+				  (PAGE_SIZE << trans_pcie->rx_page_order));
+	}
+
 	dump_data = vzalloc(len);
 	if (!dump_data)
 		return NULL;
@@ -2352,74 +2601,10 @@
 	len += iwl_trans_pcie_dump_prph(trans, &data);
 	len += iwl_trans_pcie_dump_csr(trans, &data);
 	len += iwl_trans_pcie_fh_regs_dump(trans, &data);
-	/* data is already pointing to the next section */
+	if (dump_rbs)
+		len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
 
-	if ((trans_pcie->fw_mon_page &&
-	     trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
-	    trans->dbg_dest_tlv) {
-		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
-		u32 base, write_ptr, wrap_cnt;
-
-		/* If there was a dest TLV - use the values from there */
-		if (trans->dbg_dest_tlv) {
-			write_ptr =
-				le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
-			wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
-			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
-		} else {
-			base = MON_BUFF_BASE_ADDR;
-			write_ptr = MON_BUFF_WRPTR;
-			wrap_cnt = MON_BUFF_CYCLE_CNT;
-		}
-
-		data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
-		fw_mon_data = (void *)data->data;
-		fw_mon_data->fw_mon_wr_ptr =
-			cpu_to_le32(iwl_read_prph(trans, write_ptr));
-		fw_mon_data->fw_mon_cycle_cnt =
-			cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
-		fw_mon_data->fw_mon_base_ptr =
-			cpu_to_le32(iwl_read_prph(trans, base));
-
-		len += sizeof(*data) + sizeof(*fw_mon_data);
-		if (trans_pcie->fw_mon_page) {
-			/*
-			 * The firmware is now asserted, it won't write anything
-			 * to the buffer. CPU can take ownership to fetch the
-			 * data. The buffer will be handed back to the device
-			 * before the firmware will be restarted.
-			 */
-			dma_sync_single_for_cpu(trans->dev,
-						trans_pcie->fw_mon_phys,
-						trans_pcie->fw_mon_size,
-						DMA_FROM_DEVICE);
-			memcpy(fw_mon_data->data,
-			       page_address(trans_pcie->fw_mon_page),
-			       trans_pcie->fw_mon_size);
-
-			monitor_len = trans_pcie->fw_mon_size;
-		} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
-			/*
-			 * Update pointers to reflect actual values after
-			 * shifting
-			 */
-			base = iwl_read_prph(trans, base) <<
-			       trans->dbg_dest_tlv->base_shift;
-			iwl_trans_read_mem(trans, base, fw_mon_data->data,
-					   monitor_len / sizeof(u32));
-		} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
-			monitor_len =
-				iwl_trans_pci_dump_marbh_monitor(trans,
-								 fw_mon_data,
-								 monitor_len);
-		} else {
-			/* Didn't match anything - output no monitor data */
-			monitor_len = 0;
-		}
-
-		len += monitor_len;
-		data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
-	}
+	len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
 
 	dump_data->len = len;
 
@@ -2482,12 +2667,15 @@
 	if (!trans)
 		return ERR_PTR(-ENOMEM);
 
+	trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
+
 	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
 	trans_pcie->trans = trans;
 	spin_lock_init(&trans_pcie->irq_lock);
 	spin_lock_init(&trans_pcie->reg_lock);
 	spin_lock_init(&trans_pcie->ref_lock);
+	mutex_init(&trans_pcie->mutex);
 	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
 	ret = pci_enable_device(pdev);
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 607acb5..a8c8a4a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -219,8 +219,6 @@
 
 	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
 
-	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
 	sta_id = tx_cmd->sta_id;
 	sec_ctl = tx_cmd->sec_ctl;
 
@@ -239,6 +237,9 @@
 	if (trans_pcie->bc_table_dword)
 		len = DIV_ROUND_UP(len, 4);
 
+	if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+		return;
+
 	bc_ent = cpu_to_le16(len | (sta_id << 12));
 
 	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
@@ -387,11 +388,18 @@
 
 	/* first TB is never freed - it's the scratchbuf data */
 
-	for (i = 1; i < num_tbs; i++)
-		dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
-				 iwl_pcie_tfd_tb_get_len(tfd, i),
-				 DMA_TO_DEVICE);
-
+	for (i = 1; i < num_tbs; i++) {
+		if (meta->flags & BIT(i + CMD_TB_BITMAP_POS))
+			dma_unmap_page(trans->dev,
+				       iwl_pcie_tfd_tb_get_addr(tfd, i),
+				       iwl_pcie_tfd_tb_get_len(tfd, i),
+				       DMA_TO_DEVICE);
+		else
+			dma_unmap_single(trans->dev,
+					 iwl_pcie_tfd_tb_get_addr(tfd, i),
+					 iwl_pcie_tfd_tb_get_len(tfd, i),
+					 DMA_TO_DEVICE);
+	}
 	tfd->num_tbs = 0;
 }
 
@@ -467,7 +475,7 @@
 
 	iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
 
-	return 0;
+	return num_tbs;
 }
 
 static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
@@ -915,6 +923,7 @@
 		}
 	}
 
+	iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
 	if (trans->cfg->base_params->num_of_queues > 20)
 		iwl_set_bits_prph(trans, SCD_GP_CTRL,
 				  SCD_GP_CTRL_ENABLE_31_QUEUES);
@@ -1320,13 +1329,24 @@
 	int idx;
 	u16 copy_size, cmd_size, scratch_size;
 	bool had_nocopy = false;
+	u8 group_id = iwl_cmd_groupid(cmd->id);
 	int i, ret;
 	u32 cmd_pos;
 	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
 	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
 
-	copy_size = sizeof(out_cmd->hdr);
-	cmd_size = sizeof(out_cmd->hdr);
+	if (WARN(!trans_pcie->wide_cmd_header &&
+		 group_id > IWL_ALWAYS_LONG_GROUP,
+		 "unsupported wide command %#x\n", cmd->id))
+		return -EINVAL;
+
+	if (group_id != 0) {
+		copy_size = sizeof(struct iwl_cmd_header_wide);
+		cmd_size = sizeof(struct iwl_cmd_header_wide);
+	} else {
+		copy_size = sizeof(struct iwl_cmd_header);
+		cmd_size = sizeof(struct iwl_cmd_header);
+	}
 
 	/* need one for the header if the first is NOCOPY */
 	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
@@ -1416,16 +1436,32 @@
 		out_meta->source = cmd;
 
 	/* set up the header */
+	if (group_id != 0) {
+		out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+		out_cmd->hdr_wide.group_id = group_id;
+		out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+		out_cmd->hdr_wide.length =
+			cpu_to_le16(cmd_size -
+				    sizeof(struct iwl_cmd_header_wide));
+		out_cmd->hdr_wide.reserved = 0;
+		out_cmd->hdr_wide.sequence =
+			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+						 INDEX_TO_SEQ(q->write_ptr));
 
-	out_cmd->hdr.cmd = cmd->id;
-	out_cmd->hdr.flags = 0;
-	out_cmd->hdr.sequence =
-		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
-					 INDEX_TO_SEQ(q->write_ptr));
+		cmd_pos = sizeof(struct iwl_cmd_header_wide);
+		copy_size = sizeof(struct iwl_cmd_header_wide);
+	} else {
+		out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
+		out_cmd->hdr.sequence =
+			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+						 INDEX_TO_SEQ(q->write_ptr));
+		out_cmd->hdr.group_id = 0;
+
+		cmd_pos = sizeof(struct iwl_cmd_header);
+		copy_size = sizeof(struct iwl_cmd_header);
+	}
 
 	/* and copy the data that needs to be copied */
-	cmd_pos = offsetof(struct iwl_device_cmd, payload);
-	copy_size = sizeof(out_cmd->hdr);
 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
 		int copy;
 
@@ -1464,9 +1500,10 @@
 	}
 
 	IWL_DEBUG_HC(trans,
-		     "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
 		     get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
-		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+		     group_id, out_cmd->hdr.cmd,
+		     le16_to_cpu(out_cmd->hdr.sequence),
 		     cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
 
 	/* start the TFD with the scratchbuf */
@@ -1516,12 +1553,14 @@
 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
 	}
 
+	BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS >
+		     sizeof(out_meta->flags) * BITS_PER_BYTE);
 	out_meta->flags = cmd->flags;
 	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
 		kzfree(txq->entries[idx].free_buf);
 	txq->entries[idx].free_buf = dup_buf;
 
-	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr);
+	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
 
 	/* start timer if queue currently empty */
 	if (q->read_ptr == q->write_ptr && txq->wd_timeout)
@@ -1552,15 +1591,13 @@
 /*
  * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
  * @rxb: Rx buffer to reclaim
- * @handler_status: return value of the handler of the command
- *	(put in setup_rx_handlers)
  *
  * If an Rx buffer has an async callback associated with it the callback
  * will be executed.  The attached skb (if present) will only be freed
  * if the callback returns 1
  */
 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
-			    struct iwl_rx_cmd_buffer *rxb, int handler_status)
+			    struct iwl_rx_cmd_buffer *rxb)
 {
 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -1599,7 +1636,6 @@
 		meta->source->resp_pkt = pkt;
 		meta->source->_rx_page_addr = (unsigned long)page_address(p);
 		meta->source->_rx_page_order = trans_pcie->rx_page_order;
-		meta->source->handler_status = handler_status;
 	}
 
 	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
@@ -1762,7 +1798,7 @@
 		      struct iwl_device_cmd *dev_cmd, int txq_id)
 {
 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_hdr *hdr;
 	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 	struct iwl_cmd_meta *out_meta;
 	struct iwl_txq *txq;
@@ -1771,9 +1807,10 @@
 	void *tb1_addr;
 	u16 len, tb1_len, tb2_len;
 	bool wait_write_ptr;
-	__le16 fc = hdr->frame_control;
-	u8 hdr_len = ieee80211_hdrlen(fc);
+	__le16 fc;
+	u8 hdr_len;
 	u16 wifi_seq;
+	int i;
 
 	txq = &trans_pcie->txq[txq_id];
 	q = &txq->q;
@@ -1782,6 +1819,18 @@
 		      "TX on unused queue %d\n", txq_id))
 		return -EINVAL;
 
+	if (skb_is_nonlinear(skb) &&
+	    skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
+	    __skb_linearize(skb))
+		return -ENOMEM;
+
+	/* mac80211 always puts the full header into the SKB's head,
+	 * so there's no need to check if it's readable there
+	 */
+	hdr = (struct ieee80211_hdr *)skb->data;
+	fc = hdr->frame_control;
+	hdr_len = ieee80211_hdrlen(fc);
+
 	spin_lock(&txq->lock);
 
 	/* In AGG mode, the index in the ring must correspond to the WiFi
@@ -1812,6 +1861,7 @@
 
 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
 	out_meta = &txq->entries[q->write_ptr].meta;
+	out_meta->flags = 0;
 
 	/*
 	 * The second TB (tb1) points to the remainder of the TX command
@@ -1845,9 +1895,9 @@
 
 	/*
 	 * Set up TFD's third entry to point directly to remainder
-	 * of skb, if any (802.11 null frames have no payload).
+	 * of skb's head, if any
 	 */
-	tb2_len = skb->len - hdr_len;
+	tb2_len = skb_headlen(skb) - hdr_len;
 	if (tb2_len > 0) {
 		dma_addr_t tb2_phys = dma_map_single(trans->dev,
 						     skb->data + hdr_len,
@@ -1860,6 +1910,29 @@
 		iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
 	}
 
+	/* set up the remaining entries to point to the data */
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		dma_addr_t tb_phys;
+		int tb_idx;
+
+		if (!skb_frag_size(frag))
+			continue;
+
+		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+					   skb_frag_size(frag), DMA_TO_DEVICE);
+
+		if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+			iwl_pcie_tfd_unmap(trans, out_meta,
+					   &txq->tfds[q->write_ptr]);
+			goto out_err;
+		}
+		tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
+						skb_frag_size(frag), false);
+
+		out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS);
+	}
+
 	/* Set up entry for this TFD in Tx byte-count array */
 	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
@@ -1869,7 +1942,7 @@
 			     &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
 			     skb->data + hdr_len, tb2_len);
 	trace_iwlwifi_dev_tx_data(trans->dev, skb,
-				  skb->data + hdr_len, tb2_len);
+				  hdr_len, skb->len - hdr_len);
 
 	wait_write_ptr = ieee80211_has_morefrags(fc);
 
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 99e873d..520bef8 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2399,6 +2399,7 @@
 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
 	ieee80211_hw_set(hw, MFP_CAPABLE);
 	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, TDLS_WIDER_BW);
 	if (rctbl)
 		ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
 
@@ -2676,7 +2677,7 @@
 	dev->netdev_ops = &hwsim_netdev_ops;
 	dev->destructor = free_netdev;
 	ether_setup(dev);
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->type = ARPHRD_IEEE80211_RADIOTAP;
 	eth_zero_addr(dev->dev_addr);
 	dev->dev_addr[0] = 0x12;
@@ -3120,8 +3121,10 @@
 		goto failure;
 
 	rc = netlink_register_notifier(&hwsim_netlink_notifier);
-	if (rc)
+	if (rc) {
+		genl_unregister_family(&hwsim_genl_family);
 		goto failure;
+	}
 
 	return 0;
 
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 7217da4..57a80cf 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -112,7 +112,9 @@
 	if (!skb)
 		return;
 
-	ieee80211_rx_ni(dev->hw, skb);
+	spin_lock(&dev->mac_lock);
+	ieee80211_rx(dev->hw, skb);
+	spin_unlock(&dev->mac_lock);
 }
 
 static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
@@ -236,23 +238,42 @@
 	skb = q->e[q->start].skb;
 	trace_mt_tx_dma_done(dev, skb);
 
-	mt7601u_tx_status(dev, skb);
+	__skb_queue_tail(&dev->tx_skb_done, skb);
+	tasklet_schedule(&dev->tx_tasklet);
 
 	if (q->used == q->entries - q->entries / 8)
 		ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
 
 	q->start = (q->start + 1) % q->entries;
 	q->used--;
+out:
+	spin_unlock_irqrestore(&dev->tx_lock, flags);
+}
 
-	if (urb->status)
-		goto out;
+static void mt7601u_tx_tasklet(unsigned long data)
+{
+	struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
+	struct sk_buff_head skbs;
+	unsigned long flags;
+
+	__skb_queue_head_init(&skbs);
+
+	spin_lock_irqsave(&dev->tx_lock, flags);
 
 	set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
 	if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
 		queue_delayed_work(dev->stat_wq, &dev->stat_work,
 				   msecs_to_jiffies(10));
-out:
+
+	skb_queue_splice_init(&dev->tx_skb_done, &skbs);
+
 	spin_unlock_irqrestore(&dev->tx_lock, flags);
+
+	while (!skb_queue_empty(&skbs)) {
+		struct sk_buff *skb = __skb_dequeue(&skbs);
+
+		mt7601u_tx_status(dev, skb);
+	}
 }
 
 static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
@@ -475,6 +496,7 @@
 {
 	int ret = -ENOMEM;
 
+	tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
 	tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
 
 	ret = mt7601u_alloc_tx(dev);
@@ -502,4 +524,6 @@
 
 	mt7601u_free_rx(dev);
 	mt7601u_free_tx(dev);
+
+	tasklet_kill(&dev->tx_tasklet);
 }
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index df3dd56..26190fd 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -454,8 +454,10 @@
 	spin_lock_init(&dev->tx_lock);
 	spin_lock_init(&dev->rx_lock);
 	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->mac_lock);
 	spin_lock_init(&dev->con_mon_lock);
 	atomic_set(&dev->avg_ampdu_len, 1);
+	skb_queue_head_init(&dev->tx_skb_done);
 
 	dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0);
 	if (!dev->stat_wq) {
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
index 7514bce..e21c53e 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mac.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -181,7 +181,11 @@
 	}
 
 	mt76_mac_fill_tx_status(dev, &info, stat);
+
+	spin_lock_bh(&dev->mac_lock);
 	ieee80211_tx_status_noskb(dev->hw, sta, &info);
+	spin_unlock_bh(&dev->mac_lock);
+
 	rcu_read_unlock();
 }
 
diff --git a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
index 9102be6b..428bd2f 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
+++ b/drivers/net/wireless/mediatek/mt7601u/mt7601u.h
@@ -141,12 +141,13 @@
 /**
  * struct mt7601u_dev - adapter structure
  * @lock:		protects @wcid->tx_rate.
+ * @mac_lock:		locks out mac80211's tx status and rx paths.
  * @tx_lock:		protects @tx_q and changes of MT7601U_STATE_*_STATS
-			flags in @state.
+ *			flags in @state.
  * @rx_lock:		protects @rx_q.
  * @con_mon_lock:	protects @ap_bssid, @bcn_*, @avg_rssi.
  * @mutex:		ensures exclusive access from mac80211 callbacks.
- * @vendor_req_mutex:	ensures atomicity of vendor requests.
+ * @vendor_req_mutex:	protects @vend_buf, ensures atomicity of split writes.
  * @reg_atomic_mutex:	ensures atomicity of indirect register accesses
  *			(accesses to RF and BBP).
  * @hw_atomic_mutex:	ensures exclusive access to HW during critical
@@ -177,6 +178,7 @@
 	struct mt76_wcid __rcu *wcid[N_WCIDS];
 
 	spinlock_t lock;
+	spinlock_t mac_lock;
 
 	const u16 *beacon_offsets;
 
@@ -184,6 +186,8 @@
 	struct mt7601u_eeprom_params *ee;
 
 	struct mutex vendor_req_mutex;
+	void *vend_buf;
+
 	struct mutex reg_atomic_mutex;
 	struct mutex hw_atomic_mutex;
 
@@ -197,7 +201,9 @@
 
 	/* TX */
 	spinlock_t tx_lock;
+	struct tasklet_struct tx_tasklet;
 	struct mt7601u_tx_queue *tx_q;
+	struct sk_buff_head tx_skb_done;
 
 	atomic_t avg_ampdu_len;
 
diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
index 0be2080..a0a33dc 100644
--- a/drivers/net/wireless/mediatek/mt7601u/tx.c
+++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
@@ -116,7 +116,10 @@
 	ieee80211_tx_info_clear_status(info);
 	info->status.rates[0].idx = -1;
 	info->flags |= IEEE80211_TX_STAT_ACK;
+
+	spin_lock(&dev->mac_lock);
 	ieee80211_tx_status(dev->hw, skb);
+	spin_unlock(&dev->mac_lock);
 }
 
 static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
index 54dba40..416c604 100644
--- a/drivers/net/wireless/mediatek/mt7601u/usb.c
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -92,10 +92,9 @@
 	complete(cmpl);
 }
 
-static int
-__mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
-			 const u8 direction, const u16 val, const u16 offset,
-			 void *buf, const size_t buflen)
+int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
+			   const u8 direction, const u16 val, const u16 offset,
+			   void *buf, const size_t buflen)
 {
 	int i, ret;
 	struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
@@ -110,6 +109,8 @@
 		trace_mt_vend_req(dev, pipe, req, req_type, val, offset,
 				  buf, buflen, ret);
 
+		if (ret == -ENODEV)
+			set_bit(MT7601U_STATE_REMOVED, &dev->state);
 		if (ret >= 0 || ret == -ENODEV)
 			return ret;
 
@@ -122,25 +123,6 @@
 	return ret;
 }
 
-int
-mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req,
-		       const u8 direction, const u16 val, const u16 offset,
-		       void *buf, const size_t buflen)
-{
-	int ret;
-
-	mutex_lock(&dev->vendor_req_mutex);
-
-	ret = __mt7601u_vendor_request(dev, req, direction, val, offset,
-				       buf, buflen);
-	if (ret == -ENODEV)
-		set_bit(MT7601U_STATE_REMOVED, &dev->state);
-
-	mutex_unlock(&dev->vendor_req_mutex);
-
-	return ret;
-}
-
 void mt7601u_vendor_reset(struct mt7601u_dev *dev)
 {
 	mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT,
@@ -150,19 +132,21 @@
 u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset)
 {
 	int ret;
-	__le32 reg;
-	u32 val;
+	u32 val = ~0;
 
 	WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset);
 
+	mutex_lock(&dev->vendor_req_mutex);
+
 	ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN,
-				     0, offset, &reg, sizeof(reg));
-	val = le32_to_cpu(reg);
-	if (ret > 0 && ret != sizeof(reg)) {
+				     0, offset, dev->vend_buf, MT_VEND_BUF);
+	if (ret == MT_VEND_BUF)
+		val = get_unaligned_le32(dev->vend_buf);
+	else if (ret > 0)
 		dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n",
 			ret, offset);
-		val = ~0;
-	}
+
+	mutex_unlock(&dev->vendor_req_mutex);
 
 	trace_reg_read(dev, offset, val);
 	return val;
@@ -173,12 +157,17 @@
 {
 	int ret;
 
+	mutex_lock(&dev->vendor_req_mutex);
+
 	ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
 				     val & 0xffff, offset, NULL, 0);
-	if (ret)
-		return ret;
-	return mt7601u_vendor_request(dev, req, USB_DIR_OUT,
-				      val >> 16, offset + 2, NULL, 0);
+	if (!ret)
+		ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT,
+					     val >> 16, offset + 2, NULL, 0);
+
+	mutex_unlock(&dev->vendor_req_mutex);
+
+	return ret;
 }
 
 void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val)
@@ -275,6 +264,12 @@
 
 	usb_set_intfdata(usb_intf, dev);
 
+	dev->vend_buf = devm_kmalloc(dev->dev, MT_VEND_BUF, GFP_KERNEL);
+	if (!dev->vend_buf) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
 	ret = mt7601u_assign_pipes(usb_intf, dev);
 	if (ret)
 		goto err;
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.h b/drivers/net/wireless/mediatek/mt7601u/usb.h
index 49e188f..bc18202 100644
--- a/drivers/net/wireless/mediatek/mt7601u/usb.h
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.h
@@ -23,6 +23,8 @@
 
 #define MT_VEND_DEV_MODE_RESET	1
 
+#define MT_VEND_BUF		sizeof(__le32)
+
 enum mt_vendor_req {
 	MT_VEND_DEV_MODE = 1,
 	MT_VEND_WRITE = 2,
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 48edf38..317d991 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -9,36 +9,36 @@
 	  mwifiex.
 
 config MWIFIEX_SDIO
-	tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897"
+	tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8997"
 	depends on MWIFIEX && MMC
 	select FW_LOADER
 	select WANT_DEV_COREDUMP
 	---help---
 	  This adds support for wireless adapters based on Marvell
-	  8786/8787/8797/8887/8897 chipsets with SDIO interface.
+	  8786/8787/8797/8887/8897/8997 chipsets with SDIO interface.
 
 	  If you choose to build it as a module, it will be called
 	  mwifiex_sdio.
 
 config MWIFIEX_PCIE
-	tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897"
+	tristate "Marvell WiFi-Ex Driver for PCIE 8766/8897/8997"
 	depends on MWIFIEX && PCI
 	select FW_LOADER
 	select WANT_DEV_COREDUMP
 	---help---
 	  This adds support for wireless adapters based on Marvell
-	  8766/8897 chipsets with PCIe interface.
+	  8766/8897/8997 chipsets with PCIe interface.
 
 	  If you choose to build it as a module, it will be called
 	  mwifiex_pcie.
 
 config MWIFIEX_USB
-	tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897"
+	tristate "Marvell WiFi-Ex Driver for USB8766/8797/8897/8997"
 	depends on MWIFIEX && USB
 	select FW_LOADER
 	---help---
 	  This adds support for wireless adapters based on Marvell
-	  8797/8897 chipset with USB interface.
+	  8797/8897/8997 chipset with USB interface.
 
 	  If you choose to build it as a module, it will be called
 	  mwifiex_usb.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index b15e4c7..ff63cb5 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -19,6 +19,7 @@
 
 #include "cfg80211.h"
 #include "main.h"
+#include "11n.h"
 
 static char *reg_alpha2;
 module_param(reg_alpha2, charp, 0);
@@ -34,12 +35,38 @@
 	},
 };
 
-static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
+static const struct ieee80211_iface_combination
+mwifiex_iface_comb_ap_sta = {
 	.limits = mwifiex_ap_sta_limits,
 	.num_different_channels = 1,
 	.n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
 	.max_interfaces = MWIFIEX_MAX_BSS_NUM,
 	.beacon_int_infra_match = true,
+	.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+				BIT(NL80211_CHAN_WIDTH_20) |
+				BIT(NL80211_CHAN_WIDTH_40),
+};
+
+static const struct ieee80211_iface_combination
+mwifiex_iface_comb_ap_sta_vht = {
+	.limits = mwifiex_ap_sta_limits,
+	.num_different_channels = 1,
+	.n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+	.max_interfaces = MWIFIEX_MAX_BSS_NUM,
+	.beacon_int_infra_match = true,
+	.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+				BIT(NL80211_CHAN_WIDTH_20) |
+				BIT(NL80211_CHAN_WIDTH_40) |
+				BIT(NL80211_CHAN_WIDTH_80),
+};
+
+static const struct
+ieee80211_iface_combination mwifiex_iface_comb_ap_sta_drcs = {
+	.limits = mwifiex_ap_sta_limits,
+	.num_different_channels = 2,
+	.n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+	.max_interfaces = MWIFIEX_MAX_BSS_NUM,
+	.beacon_int_infra_match = true,
 };
 
 /*
@@ -441,7 +468,7 @@
  *      - Country codes
  *      - Sub bands (first channel, number of channels, maximum Tx power)
  */
-static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
+int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
 {
 	u8 no_of_triplet = 0;
 	struct ieee80211_country_ie_triplet *t;
@@ -804,10 +831,13 @@
 		priv->bss_type = MWIFIEX_BSS_TYPE_STA;
 		break;
 	case NL80211_IFTYPE_P2P_CLIENT:
-	case NL80211_IFTYPE_P2P_GO:
 		priv->bss_role =  MWIFIEX_BSS_ROLE_STA;
 		priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
 		break;
+	case NL80211_IFTYPE_P2P_GO:
+		priv->bss_role =  MWIFIEX_BSS_ROLE_UAP;
+		priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
+		break;
 	case NL80211_IFTYPE_AP:
 		priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
 		priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
@@ -1115,8 +1145,10 @@
 	case NL80211_IFTYPE_P2P_GO:
 		switch (type) {
 		case NL80211_IFTYPE_STATION:
-			if (mwifiex_cfg80211_init_p2p_client(priv))
+			if (mwifiex_cfg80211_deinit_p2p(priv))
 				return -EFAULT;
+			priv->adapter->curr_iface_comb.p2p_intf--;
+			priv->adapter->curr_iface_comb.sta_intf++;
 			dev->ieee80211_ptr->iftype = type;
 			break;
 		case NL80211_IFTYPE_ADHOC:
@@ -2788,6 +2820,7 @@
 {
 	struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
 	struct mwifiex_adapter *adapter = priv->adapter;
+	struct sk_buff *skb, *tmp;
 
 #ifdef CONFIG_DEBUG_FS
 	mwifiex_dev_debugfs_remove(priv);
@@ -2795,6 +2828,9 @@
 
 	mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 
+	skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+
 	if (netif_carrier_ok(priv->netdev))
 		netif_carrier_off(priv->netdev);
 
@@ -2954,7 +2990,6 @@
 					MWIFIEX_MEF_MAX_BYTESEQ)) {
 			mwifiex_dbg(priv->adapter, ERROR,
 				    "Pattern not supported\n");
-			kfree(mef_entry);
 			return -EOPNOTSUPP;
 		}
 
@@ -3036,9 +3071,12 @@
 
 	mwifiex_set_auto_arp_mef_entry(priv, &mef_entry[0]);
 
-	if (wowlan->n_patterns || wowlan->magic_pkt)
+	if (wowlan->n_patterns || wowlan->magic_pkt) {
 		ret = mwifiex_set_wowlan_mef_entry(priv, &mef_cfg,
 						   &mef_entry[1], wowlan);
+		if (ret)
+			goto err;
+	}
 
 	if (!mef_cfg.criteria)
 		mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
@@ -3048,6 +3086,8 @@
 	ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
 			HostCmd_ACT_GEN_SET, 0,
 			&mef_cfg, true);
+
+err:
 	kfree(mef_entry);
 	return ret;
 }
@@ -3360,6 +3400,72 @@
 }
 
 static int
+mwifiex_cfg80211_tdls_chan_switch(struct wiphy *wiphy, struct net_device *dev,
+				  const u8 *addr, u8 oper_class,
+				  struct cfg80211_chan_def *chandef)
+{
+	struct mwifiex_sta_node *sta_ptr;
+	unsigned long flags;
+	u16 chan;
+	u8 second_chan_offset, band;
+	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+	spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+	sta_ptr = mwifiex_get_sta_entry(priv, addr);
+	spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+	if (!sta_ptr) {
+		wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
+			  __func__, addr);
+		return -ENOENT;
+	}
+
+	if (!(sta_ptr->tdls_cap.extcap.ext_capab[3] &
+	      WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)) {
+		wiphy_err(wiphy, "%pM do not support tdls cs\n", addr);
+		return -ENOENT;
+	}
+
+	if (sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
+	    sta_ptr->tdls_status == TDLS_IN_OFF_CHAN) {
+		wiphy_err(wiphy, "channel switch is running, abort request\n");
+		return -EALREADY;
+	}
+
+	chan = chandef->chan->hw_value;
+	second_chan_offset = mwifiex_get_sec_chan_offset(chan);
+	band = chandef->chan->band;
+	mwifiex_start_tdls_cs(priv, addr, chan, second_chan_offset, band);
+
+	return 0;
+}
+
+static void
+mwifiex_cfg80211_tdls_cancel_chan_switch(struct wiphy *wiphy,
+					 struct net_device *dev,
+					 const u8 *addr)
+{
+	struct mwifiex_sta_node *sta_ptr;
+	unsigned long flags;
+	struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+	spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+	sta_ptr = mwifiex_get_sta_entry(priv, addr);
+	spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+	if (!sta_ptr) {
+		wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
+			  __func__, addr);
+	} else if (!(sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
+		     sta_ptr->tdls_status == TDLS_IN_BASE_CHAN ||
+		     sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)) {
+		wiphy_err(wiphy, "tdls chan switch not initialize by %pM\n",
+			  addr);
+	} else
+		mwifiex_stop_tdls_cs(priv, addr);
+}
+
+static int
 mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
 			     const u8 *mac, struct station_parameters *params)
 {
@@ -3575,6 +3681,8 @@
 	.set_coalesce = mwifiex_cfg80211_set_coalesce,
 	.tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
 	.tdls_oper = mwifiex_cfg80211_tdls_oper,
+	.tdls_channel_switch = mwifiex_cfg80211_tdls_chan_switch,
+	.tdls_cancel_channel_switch = mwifiex_cfg80211_tdls_cancel_chan_switch,
 	.add_station = mwifiex_cfg80211_add_station,
 	.change_station = mwifiex_cfg80211_change_station,
 	.get_channel = mwifiex_cfg80211_get_channel,
@@ -3672,7 +3780,12 @@
 	else
 		wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
 
-	wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
+	if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+		wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
+	else if (adapter->is_hw_11ac_capable)
+		wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_vht;
+	else
+		wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
 	wiphy->n_iface_combinations = 1;
 
 	/* Initialize cipher suits */
@@ -3709,6 +3822,9 @@
 			   NL80211_FEATURE_INACTIVITY_TIMER |
 			   NL80211_FEATURE_NEED_OBSS_SCAN;
 
+	if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
+		wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+
 	if (adapter->fw_api_ver == MWIFIEX_FW_V15)
 		wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
 
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 207da40..45ae38e 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -167,8 +167,6 @@
 		mwifiex_dbg(adapter, ERROR,
 			    "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
 			cmd_code);
-		if (cmd_node->wait_q_enabled)
-			mwifiex_complete_cmd(adapter, cmd_node);
 		mwifiex_recycle_cmd_node(adapter, cmd_node);
 		queue_work(adapter->workqueue, &adapter->main_work);
 		return -1;
@@ -809,17 +807,6 @@
 	adapter->is_cmd_timedout = 0;
 
 	resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
-	if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
-		mwifiex_dbg(adapter, ERROR,
-			    "CMD_RESP: %#x been canceled\n",
-			    le16_to_cpu(resp->command));
-		mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
-		spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
-		adapter->curr_cmd = NULL;
-		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
-		return -1;
-	}
-
 	if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
 		/* Copy original response back to response buffer */
 		struct mwifiex_ds_misc_cmd *hostcmd;
@@ -989,12 +976,13 @@
 
 		if (cmd_node->wait_q_enabled) {
 			adapter->cmd_wait_q.status = -ETIMEDOUT;
-			wake_up_interruptible(&adapter->cmd_wait_q.wait);
 			mwifiex_cancel_pending_ioctl(adapter);
 		}
 	}
-	if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
+	if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
 		mwifiex_init_fw_complete(adapter);
+		return;
+	}
 
 	if (adapter->if_ops.device_dump)
 		adapter->if_ops.device_dump(adapter);
@@ -1024,6 +1012,7 @@
 		adapter->curr_cmd->wait_q_enabled = false;
 		adapter->cmd_wait_q.status = -1;
 		mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+		/* no recycle probably wait for response */
 	}
 	/* Cancel all pending command */
 	spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
@@ -1032,11 +1021,8 @@
 		list_del(&cmd_node->list);
 		spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
 
-		if (cmd_node->wait_q_enabled) {
+		if (cmd_node->wait_q_enabled)
 			adapter->cmd_wait_q.status = -1;
-			mwifiex_complete_cmd(adapter, cmd_node);
-			cmd_node->wait_q_enabled = false;
-		}
 		mwifiex_recycle_cmd_node(adapter, cmd_node);
 		spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
 	}
@@ -1094,12 +1080,18 @@
 	    (adapter->curr_cmd->wait_q_enabled)) {
 		spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
 		cmd_node = adapter->curr_cmd;
-		cmd_node->wait_q_enabled = false;
-		cmd_node->cmd_flag |= CMD_F_CANCELED;
-		mwifiex_recycle_cmd_node(adapter, cmd_node);
-		mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+		/* setting curr_cmd to NULL is quite dangerous, because
+		 * mwifiex_process_cmdresp checks curr_cmd to be != NULL
+		 * at the beginning then relies on it and dereferences
+		 * it at will
+		 * this probably works since mwifiex_cmd_timeout_func
+		 * is the only caller of this function and responses
+		 * at that point
+		 */
 		adapter->curr_cmd = NULL;
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
+
+		mwifiex_recycle_cmd_node(adapter, cmd_node);
 	}
 
 	/* Cancel all pending scan command */
@@ -1129,7 +1121,6 @@
 			}
 		}
 	}
-	adapter->cmd_wait_q.status = -1;
 }
 
 /*
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 51e3447..098e1f1 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -141,6 +141,9 @@
 	TDLS_SETUP_COMPLETE,
 	TDLS_SETUP_FAILURE,
 	TDLS_LINK_TEARDOWN,
+	TDLS_CHAN_SWITCHING,
+	TDLS_IN_BASE_CHAN,
+	TDLS_IN_OFF_CHAN,
 };
 
 enum mwifiex_tdls_error_code {
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index cd09051..3ec2ac8 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -169,14 +169,17 @@
 #define TLV_TYPE_UAP_PS_AO_TIMER    (PROPRIETARY_TLV_BASE_ID + 123)
 #define TLV_TYPE_PWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 145)
 #define TLV_TYPE_GWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 146)
+#define TLV_TYPE_TX_PAUSE           (PROPRIETARY_TLV_BASE_ID + 148)
 #define TLV_TYPE_COALESCE_RULE      (PROPRIETARY_TLV_BASE_ID + 154)
 #define TLV_TYPE_KEY_PARAM_V2       (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_MULTI_CHAN_INFO    (PROPRIETARY_TLV_BASE_ID + 183)
 #define TLV_TYPE_TDLS_IDLE_TIMEOUT  (PROPRIETARY_TLV_BASE_ID + 194)
 #define TLV_TYPE_SCAN_CHANNEL_GAP   (PROPRIETARY_TLV_BASE_ID + 197)
 #define TLV_TYPE_API_REV            (PROPRIETARY_TLV_BASE_ID + 199)
 #define TLV_TYPE_CHANNEL_STATS      (PROPRIETARY_TLV_BASE_ID + 198)
 #define TLV_BTCOEX_WL_AGGR_WINSIZE  (PROPRIETARY_TLV_BASE_ID + 202)
 #define TLV_BTCOEX_WL_SCANTIME      (PROPRIETARY_TLV_BASE_ID + 203)
+#define TLV_TYPE_BSS_MODE           (PROPRIETARY_TLV_BASE_ID + 206)
 
 #define MWIFIEX_TX_DATA_BUF_SIZE_2K        2048
 
@@ -200,6 +203,7 @@
 
 #define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
 #define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
+#define ISSUPP_DRCS_ENABLED(FwCapInfo) (FwCapInfo & BIT(15))
 #define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16))
 
 #define MWIFIEX_DEF_HT_CAP	(IEEE80211_HT_CAP_DSSSCCK40 | \
@@ -359,6 +363,8 @@
 #define HostCmd_CMD_MGMT_FRAME_REG                    0x010c
 #define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
 #define HostCmd_CMD_11AC_CFG			      0x0112
+#define HostCmd_CMD_TDLS_CONFIG                       0x0100
+#define HostCmd_CMD_MC_POLICY                         0x0121
 #define HostCmd_CMD_TDLS_OPER                         0x0122
 #define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG               0x0223
 
@@ -432,7 +438,6 @@
 
 
 #define CMD_F_HOSTCMD           (1 << 0)
-#define CMD_F_CANCELED          (1 << 1)
 
 #define HostCmd_CMD_ID_MASK             0x0fff
 
@@ -509,8 +514,10 @@
 #define EVENT_TDLS_GENERIC_EVENT        0x00000052
 #define EVENT_RADAR_DETECTED		0x00000053
 #define EVENT_CHANNEL_REPORT_RDY        0x00000054
+#define EVENT_TX_DATA_PAUSE             0x00000055
 #define EVENT_EXT_SCAN_REPORT           0x00000058
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
+#define EVENT_MULTI_CHAN_INFO           0x0000006a
 #define EVENT_TX_STATUS_REPORT		0x00000074
 #define EVENT_BT_COEX_WLAN_PARA_CHANGE	0X00000076
 
@@ -545,7 +552,27 @@
 #define ACT_TDLS_DELETE            0x00
 #define ACT_TDLS_CREATE            0x01
 #define ACT_TDLS_CONFIG            0x02
-#define TDLS_EVENT_LINK_TEAR_DOWN  3
+
+#define TDLS_EVENT_LINK_TEAR_DOWN      3
+#define TDLS_EVENT_CHAN_SWITCH_RESULT  7
+#define TDLS_EVENT_START_CHAN_SWITCH   8
+#define TDLS_EVENT_CHAN_SWITCH_STOPPED 9
+
+#define TDLS_BASE_CHANNEL	       0
+#define TDLS_OFF_CHANNEL	       1
+
+#define ACT_TDLS_CS_ENABLE_CONFIG 0x00
+#define ACT_TDLS_CS_INIT	  0x06
+#define ACT_TDLS_CS_STOP	  0x07
+#define ACT_TDLS_CS_PARAMS	  0x08
+
+#define MWIFIEX_DEF_CS_UNIT_TIME	2
+#define MWIFIEX_DEF_CS_THR_OTHERLINK	10
+#define MWIFIEX_DEF_THR_DIRECTLINK	0
+#define MWIFIEX_DEF_CS_TIME		10
+#define MWIFIEX_DEF_CS_TIMEOUT		16
+#define MWIFIEX_DEF_CS_REG_CLASS	12
+#define MWIFIEX_DEF_CS_PERIODICITY	1
 
 #define MWIFIEX_FW_V15		   15
 
@@ -658,6 +685,7 @@
 enum mwifiex_chan_scan_mode_bitmasks {
 	MWIFIEX_PASSIVE_SCAN = BIT(0),
 	MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
+	MWIFIEX_HIDDEN_SSID_REPORT = BIT(4),
 };
 
 struct mwifiex_chan_scan_param_set {
@@ -1131,6 +1159,13 @@
 	u8 ht_info;
 } __packed;
 
+struct mwifiex_tx_pause_tlv {
+	struct mwifiex_ie_types_header header;
+	u8 peermac[ETH_ALEN];
+	u8 tx_pause;
+	u8 pkt_cnt;
+} __packed;
+
 enum Host_Sleep_Action {
 	HS_CONFIGURE = 0x0001,
 	HS_ACTIVATE  = 0x0002,
@@ -1249,6 +1284,36 @@
 	u8 peer_mac[ETH_ALEN];
 } __packed;
 
+struct mwifiex_tdls_config {
+	__le16 enable;
+};
+
+struct mwifiex_tdls_config_cs_params {
+	u8 unit_time;
+	u8 thr_otherlink;
+	u8 thr_directlink;
+};
+
+struct mwifiex_tdls_init_cs_params {
+	u8 peer_mac[ETH_ALEN];
+	u8 primary_chan;
+	u8 second_chan_offset;
+	u8 band;
+	__le16 switch_time;
+	__le16 switch_timeout;
+	u8 reg_class;
+	u8 periodicity;
+} __packed;
+
+struct mwifiex_tdls_stop_cs_params {
+	u8 peer_mac[ETH_ALEN];
+};
+
+struct host_cmd_ds_tdls_config {
+	__le16 tdls_action;
+	u8 tdls_data[1];
+} __packed;
+
 struct mwifiex_chan_desc {
 	__le16 start_freq;
 	u8 chan_width;
@@ -1370,6 +1435,11 @@
 	u8    tlv_buffer[1];
 } __packed;
 
+struct mwifiex_ie_types_bss_mode {
+	struct mwifiex_ie_types_header  header;
+	u8 bss_mode;
+} __packed;
+
 struct mwifiex_ie_types_bss_scan_rsp {
 	struct mwifiex_ie_types_header header;
 	u8 bssid[ETH_ALEN];
@@ -1908,6 +1978,12 @@
 	__le32 passed;
 } __packed;
 
+struct mwifiex_ie_types_multi_chan_info {
+	struct mwifiex_ie_types_header header;
+	__le16 status;
+	u8 tlv_buffer[0];
+} __packed;
+
 struct meas_rpt_map {
 	u8 rssi:3;
 	u8 unmeasured:1;
@@ -1927,10 +2003,18 @@
 	__le16 events;
 } __packed;
 
+struct chan_switch_result {
+	u8 cur_chan;
+	u8 status;
+	u8 reason;
+} __packed;
+
 struct mwifiex_tdls_generic_event {
 	__le16 type;
 	u8 peer_mac[ETH_ALEN];
 	union {
+		struct chan_switch_result switch_result;
+		u8 cs_stop_reason;
 		__le16 reason_code;
 		__le16 reserved;
 	} u;
@@ -1971,6 +2055,11 @@
 	struct coalesce_receive_filt_rule rule[0];
 } __packed;
 
+struct host_cmd_ds_multi_chan_policy {
+	__le16 action;
+	__le16 policy;
+} __packed;
+
 struct host_cmd_ds_command {
 	__le16 command;
 	__le16 size;
@@ -2035,9 +2124,11 @@
 		struct host_cmd_ds_sta_list sta_list;
 		struct host_cmd_11ac_vht_cfg vht_cfg;
 		struct host_cmd_ds_coalesce_cfg coalesce_cfg;
+		struct host_cmd_ds_tdls_config tdls_config;
 		struct host_cmd_ds_tdls_oper tdls_oper;
 		struct host_cmd_ds_chan_rpt_req chan_rpt_req;
 		struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
+		struct host_cmd_ds_multi_chan_policy mc_policy;
 	} params;
 } __packed;
 
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index 0ba8945..abf52d2 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -409,6 +409,8 @@
 	int ret;
 
 	ret = mwifiex_uap_parse_tail_ies(priv, info);
+
+	if (ret)
 		return ret;
 
 	return mwifiex_set_mgmt_beacon_data_ies(priv, info);
@@ -477,6 +479,7 @@
 						   ar_ie, &priv->assocresp_idx);
 
 done:
+	kfree(gen_ie);
 	kfree(beacon_ie);
 	kfree(pr_ie);
 	kfree(ar_ie);
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index df7fdc0..5d3ae63 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -77,7 +77,7 @@
 
 	priv->media_connected = false;
 	eth_broadcast_addr(priv->curr_addr);
-
+	priv->port_open = false;
 	priv->pkt_tx_ctrl = 0;
 	priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
 	priv->data_rate = 0;	/* Initially indicate the rate as auto */
@@ -301,7 +301,7 @@
 	adapter->iface_limit.sta_intf = MWIFIEX_MAX_STA_NUM;
 	adapter->iface_limit.uap_intf = MWIFIEX_MAX_UAP_NUM;
 	adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
-
+	adapter->active_scan_triggered = false;
 	setup_timer(&adapter->wakeup_timer, wakeup_timer_fn,
 		    (unsigned long)adapter);
 }
@@ -499,6 +499,7 @@
 		INIT_LIST_HEAD(&priv->sta_list);
 		INIT_LIST_HEAD(&priv->auto_tdls_list);
 		skb_queue_head_init(&priv->tdls_txq);
+		skb_queue_head_init(&priv->bypass_txq);
 
 		spin_lock_init(&priv->tx_ba_stream_tbl_lock);
 		spin_lock_init(&priv->rx_reorder_tbl_lock);
@@ -550,11 +551,6 @@
 		}
 	}
 
-	if (adapter->if_ops.init_fw_port) {
-		if (adapter->if_ops.init_fw_port(adapter))
-			return -1;
-	}
-
 	for (i = 0; i < adapter->priv_num; i++) {
 		if (adapter->priv[i]) {
 			ret = mwifiex_sta_init_cmd(adapter->priv[i], first_sta,
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 56b024a..3cda1f9 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -783,6 +783,8 @@
 
 	if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
 		priv->scan_block = true;
+	else
+		priv->port_open = true;
 
 done:
 	/* Need to indicate IOCTL complete */
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 3ba4e0e..278dc94 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -276,6 +276,7 @@
 		     !adapter->pm_wakeup_fw_try) &&
 		    (is_command_pending(adapter) ||
 		     !skb_queue_empty(&adapter->tx_data_q) ||
+		     !mwifiex_bypass_txlist_empty(adapter) ||
 		     !mwifiex_wmm_lists_empty(adapter))) {
 			adapter->pm_wakeup_fw_try = true;
 			mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
@@ -299,9 +300,16 @@
 
 			if ((!adapter->scan_chan_gap_enabled &&
 			     adapter->scan_processing) || adapter->data_sent ||
+			     mwifiex_is_tdls_chan_switching
+			     (mwifiex_get_priv(adapter,
+					       MWIFIEX_BSS_ROLE_STA)) ||
 			    (mwifiex_wmm_lists_empty(adapter) &&
+			     mwifiex_bypass_txlist_empty(adapter) &&
 			     skb_queue_empty(&adapter->tx_data_q))) {
 				if (adapter->cmd_sent || adapter->curr_cmd ||
+					!mwifiex_is_send_cmd_allowed
+						(mwifiex_get_priv(adapter,
+						MWIFIEX_BSS_ROLE_STA)) ||
 				    (!is_command_pending(adapter)))
 					break;
 			}
@@ -342,7 +350,9 @@
 			continue;
 		}
 
-		if (!adapter->cmd_sent && !adapter->curr_cmd) {
+		if (!adapter->cmd_sent && !adapter->curr_cmd &&
+		    mwifiex_is_send_cmd_allowed
+		    (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
 			if (mwifiex_exec_next_cmd(adapter) == -1) {
 				ret = -1;
 				break;
@@ -365,7 +375,25 @@
 
 		if ((adapter->scan_chan_gap_enabled ||
 		     !adapter->scan_processing) &&
-		    !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
+		    !adapter->data_sent &&
+		    !mwifiex_bypass_txlist_empty(adapter) &&
+		    !mwifiex_is_tdls_chan_switching
+			(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
+			mwifiex_process_bypass_tx(adapter);
+			if (adapter->hs_activated) {
+				adapter->is_hs_configured = false;
+				mwifiex_hs_activated_event
+					(mwifiex_get_priv
+					 (adapter, MWIFIEX_BSS_ROLE_ANY),
+					 false);
+			}
+		}
+
+		if ((adapter->scan_chan_gap_enabled ||
+		     !adapter->scan_processing) &&
+		    !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter) &&
+		    !mwifiex_is_tdls_chan_switching
+			(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
 			mwifiex_wmm_process_tx(adapter);
 			if (adapter->hs_activated) {
 				adapter->is_hs_configured = false;
@@ -379,6 +407,7 @@
 		if (adapter->delay_null_pkt && !adapter->cmd_sent &&
 		    !adapter->curr_cmd && !is_command_pending(adapter) &&
 		    (mwifiex_wmm_lists_empty(adapter) &&
+		     mwifiex_bypass_txlist_empty(adapter) &&
 		     skb_queue_empty(&adapter->tx_data_q))) {
 			if (!mwifiex_send_null_packet
 			    (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
@@ -649,6 +678,26 @@
 	return 0;
 }
 
+static bool
+mwifiex_bypass_tx_queue(struct mwifiex_private *priv,
+			struct sk_buff *skb)
+{
+	struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+
+	if (ntohs(eth_hdr->h_proto) == ETH_P_PAE ||
+	    mwifiex_is_skb_mgmt_frame(skb) ||
+	    (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
+	     ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+	     (ntohs(eth_hdr->h_proto) == ETH_P_TDLS))) {
+		mwifiex_dbg(priv->adapter, DATA,
+			    "bypass txqueue; eth type %#x, mgmt %d\n",
+			     ntohs(eth_hdr->h_proto),
+			     mwifiex_is_skb_mgmt_frame(skb));
+		return true;
+	}
+
+	return false;
+}
 /*
  * Add buffer into wmm tx queue and queue work to transmit it.
  */
@@ -666,8 +715,14 @@
 		}
 	}
 
-	atomic_inc(&priv->adapter->tx_pending);
-	mwifiex_wmm_add_buf_txqueue(priv, skb);
+	if (mwifiex_bypass_tx_queue(priv, skb)) {
+		atomic_inc(&priv->adapter->tx_pending);
+		atomic_inc(&priv->adapter->bypass_tx_pending);
+		mwifiex_wmm_add_buf_bypass_txqueue(priv, skb);
+	 } else {
+		atomic_inc(&priv->adapter->tx_pending);
+		mwifiex_wmm_add_buf_txqueue(priv, skb);
+	 }
 
 	mwifiex_queue_main_work(priv->adapter);
 
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index ae98b5b..6b95121 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -281,6 +281,7 @@
 	u8 amsdu_in_ampdu;
 	u16 total_pkt_count;
 	bool tdls_link;
+	bool tx_paused;
 };
 
 struct mwifiex_tid_tbl {
@@ -294,6 +295,7 @@
 struct mwifiex_wmm_desc {
 	struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID];
 	u32 packets_out[MAX_NUM_TID];
+	u32 pkts_paused[MAX_NUM_TID];
 	/* spin lock to protect ra_list */
 	spinlock_t ra_list_spinlock;
 	struct mwifiex_wmm_ac_status ac_status[IEEE80211_NUM_ACS];
@@ -517,6 +519,7 @@
 	u8 frame_type;
 	u8 curr_addr[ETH_ALEN];
 	u8 media_connected;
+	u8 port_open;
 	u32 num_tx_timeout;
 	/* track consecutive timeout */
 	u8 tx_timeout_cnt;
@@ -662,6 +665,8 @@
 	struct cfg80211_beacon_data beacon_after;
 	struct mwifiex_11h_intf_state state_11h;
 	struct mwifiex_ds_mem_rw mem_rw;
+	struct sk_buff_head bypass_txq;
+	struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
 };
 
 
@@ -768,6 +773,7 @@
 	u8 tdls_status;
 	struct mwifiex_tdls_capab tdls_cap;
 	struct mwifiex_station_stats stats;
+	u8 tx_pause;
 };
 
 struct mwifiex_auto_tdls_peer {
@@ -831,6 +837,7 @@
 	wait_queue_head_t init_wait_q;
 	void *card;
 	struct mwifiex_if_ops if_ops;
+	atomic_t bypass_tx_pending;
 	atomic_t rx_pending;
 	atomic_t tx_pending;
 	atomic_t cmd_pending;
@@ -979,6 +986,8 @@
 	u8 coex_win_size;
 	u8 coex_tx_win_size;
 	u8 coex_rx_win_size;
+	bool drcs_enabled;
+	u8 active_scan_triggered;
 };
 
 void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1330,6 +1339,21 @@
 	return 0;
 }
 
+static inline u8 mwifiex_is_tdls_link_setup(u8 status)
+{
+	switch (status) {
+	case TDLS_SETUP_COMPLETE:
+	case TDLS_CHAN_SWITCHING:
+	case TDLS_IN_BASE_CHAN:
+	case TDLS_IN_OFF_CHAN:
+		return true;
+	default:
+		break;
+	}
+
+	return false;
+}
+
 int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
 			     u32 func_init_shutdown);
 int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -1458,6 +1482,9 @@
 mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac);
 struct mwifiex_sta_node *
 mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac);
+u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv);
+u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv);
+u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv);
 int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
 				 u8 action_code, u8 dialog_token,
 				 u16 status_code, const u8 *extra_ies,
@@ -1488,6 +1515,13 @@
 void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac);
 void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv);
 void mwifiex_clean_auto_tdls(struct mwifiex_private *priv);
+int mwifiex_config_tdls_enable(struct mwifiex_private *priv);
+int mwifiex_config_tdls_disable(struct mwifiex_private *priv);
+int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv);
+int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac);
+int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac,
+			  u8 primary_chan, u8 second_chan_offset, u8 band);
+
 int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
 					  struct host_cmd_ds_command *cmd,
 					  void *data_buf);
@@ -1522,6 +1556,12 @@
 void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
 void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter);
 void mwifiex_11n_delba(struct mwifiex_private *priv, int tid);
+int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy);
+void mwifiex_process_tx_pause_event(struct mwifiex_private *priv,
+				    struct sk_buff *event);
+void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
+				      struct sk_buff *event_skb);
+
 #ifdef CONFIG_DEBUG_FS
 void mwifiex_debugfs_init(void);
 void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 77b9055..408b684 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -266,12 +266,17 @@
 	{
 		PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		.driver_data = (unsigned long) &mwifiex_pcie8766,
+		.driver_data = (unsigned long)&mwifiex_pcie8766,
 	},
 	{
 		PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8897,
 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
-		.driver_data = (unsigned long) &mwifiex_pcie8897,
+		.driver_data = (unsigned long)&mwifiex_pcie8897,
+	},
+	{
+		PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997,
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		.driver_data = (unsigned long)&mwifiex_pcie8997,
 	},
 	{},
 };
@@ -1082,6 +1087,7 @@
 			card->txbd_rdptr++;
 			break;
 		case PCIE_DEVICE_ID_MARVELL_88W8897:
+		case PCIE_DEVICE_ID_MARVELL_88W8997:
 			card->txbd_rdptr += reg->ring_tx_start_ptr;
 			break;
 		}
@@ -1179,6 +1185,7 @@
 			card->txbd_wrptr++;
 			break;
 		case PCIE_DEVICE_ID_MARVELL_88W8897:
+		case PCIE_DEVICE_ID_MARVELL_88W8997:
 			card->txbd_wrptr += reg->ring_tx_start_ptr;
 			break;
 		}
@@ -1807,6 +1814,8 @@
 
 	if (!card->evt_buf_list[rdptr]) {
 		skb_push(skb, INTF_HEADER_LEN);
+		skb_put(skb, MAX_EVENT_SIZE - skb->len);
+		memset(skb->data, 0, MAX_EVENT_SIZE);
 		if (mwifiex_map_pci_memory(adapter, skb,
 					   MAX_EVENT_SIZE,
 					   PCI_DMA_FROMDEVICE))
@@ -2731,3 +2740,4 @@
 MODULE_LICENSE("GPL v2");
 MODULE_FIRMWARE(PCIE8766_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(PCIE8897_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(PCIE8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h
index 0e7ee8b..48e549c 100644
--- a/drivers/net/wireless/mwifiex/pcie.h
+++ b/drivers/net/wireless/mwifiex/pcie.h
@@ -30,10 +30,12 @@
 
 #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
 #define PCIE8897_DEFAULT_FW_NAME "mrvl/pcie8897_uapsta.bin"
+#define PCIE8997_DEFAULT_FW_NAME "mrvl/pcie8997_uapsta.bin"
 
 #define PCIE_VENDOR_ID_MARVELL              (0x11ab)
 #define PCIE_DEVICE_ID_MARVELL_88W8766P		(0x2b30)
 #define PCIE_DEVICE_ID_MARVELL_88W8897		(0x2b38)
+#define PCIE_DEVICE_ID_MARVELL_88W8997		(0x2b42)
 
 /* Constants for Buffer Descriptor (BD) rings */
 #define MWIFIEX_MAX_TXRX_BD			0x20
@@ -197,7 +199,38 @@
 	.sleep_cookie = 0,
 	.fw_dump_ctrl = 0xcf4,
 	.fw_dump_start = 0xcf8,
-	.fw_dump_end = 0xcff
+	.fw_dump_end = 0xcff,
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
+	.cmd_addr_lo = PCIE_SCRATCH_0_REG,
+	.cmd_addr_hi = PCIE_SCRATCH_1_REG,
+	.cmd_size = PCIE_SCRATCH_2_REG,
+	.fw_status = PCIE_SCRATCH_3_REG,
+	.cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+	.cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+	.tx_rdptr = 0xC1A4,
+	.tx_wrptr = 0xC1A8,
+	.rx_rdptr = 0xC1A8,
+	.rx_wrptr = 0xC1A4,
+	.evt_rdptr = PCIE_SCRATCH_10_REG,
+	.evt_wrptr = PCIE_SCRATCH_11_REG,
+	.drv_rdy = PCIE_SCRATCH_12_REG,
+	.tx_start_ptr = 16,
+	.tx_mask = 0x0FFF0000,
+	.tx_wrap_mask = 0x01FF0000,
+	.rx_mask = 0x00000FFF,
+	.rx_wrap_mask = 0x000001FF,
+	.tx_rollover_ind = BIT(28),
+	.rx_rollover_ind = BIT(12),
+	.evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
+	.ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
+	.ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
+	.ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
+	.ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
+	.ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
+	.pfu_enabled = 1,
+	.sleep_cookie = 0,
 };
 
 struct mwifiex_pcie_device {
@@ -227,6 +260,15 @@
 	.can_ext_scan = true,
 };
 
+static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
+	.firmware       = PCIE8997_DEFAULT_FW_NAME,
+	.reg            = &mwifiex_reg_8997,
+	.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+	.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+	.can_dump_fw = false,
+	.can_ext_scan = true,
+};
+
 struct mwifiex_evt_buf_desc {
 	u64 paddr;
 	u16 len;
@@ -325,6 +367,7 @@
 			return 1;
 		break;
 	case PCIE_DEVICE_ID_MARVELL_88W8897:
+	case PCIE_DEVICE_ID_MARVELL_88W8997:
 		if (((card->txbd_wrptr & reg->tx_mask) !=
 		     (card->txbd_rdptr & reg->tx_mask)) ||
 		    ((card->txbd_wrptr & reg->tx_rollover_ind) ==
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index baf9715..5847863 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -527,7 +527,8 @@
 
 			if (ch->flags & IEEE80211_CHAN_NO_IR)
 				scan_chan_list[chan_idx].chan_scan_mode_bitmap
-					|= MWIFIEX_PASSIVE_SCAN;
+					|= (MWIFIEX_PASSIVE_SCAN |
+					    MWIFIEX_HIDDEN_SSID_REPORT);
 			else
 				scan_chan_list[chan_idx].chan_scan_mode_bitmap
 					&= ~MWIFIEX_PASSIVE_SCAN;
@@ -823,6 +824,7 @@
 	int i;
 	u8 ssid_filter;
 	struct mwifiex_ie_types_htcap *ht_cap;
+	struct mwifiex_ie_types_bss_mode *bss_mode;
 
 	/* The tlv_buf_len is calculated for each scan command.  The TLVs added
 	   in this routine will be preserved since the routine that sends the
@@ -908,6 +910,10 @@
 				wildcard_ssid_tlv->max_ssid_length =
 							IEEE80211_MAX_SSID_LEN;
 
+			if (!memcmp(user_scan_in->ssid_list[i].ssid,
+				    "DIRECT-", 7))
+				wildcard_ssid_tlv->max_ssid_length = 0xfe;
+
 			memcpy(wildcard_ssid_tlv->ssid,
 			       user_scan_in->ssid_list[i].ssid, ssid_len);
 
@@ -968,6 +974,15 @@
 	else
 		*max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
 
+	if (adapter->ext_scan) {
+		bss_mode = (struct mwifiex_ie_types_bss_mode *)tlv_pos;
+		bss_mode->header.type = cpu_to_le16(TLV_TYPE_BSS_MODE);
+		bss_mode->header.len = cpu_to_le16(sizeof(bss_mode->bss_mode));
+		bss_mode->bss_mode = scan_cfg_out->bss_mode;
+		tlv_pos += sizeof(bss_mode->header) +
+			   le16_to_cpu(bss_mode->header.len);
+	}
+
 	/* If the input config or adapter has the number of Probes set,
 	   add tlv */
 	if (num_probes) {
@@ -1035,7 +1050,8 @@
 			if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
 				(scan_chan_list +
 				 chan_idx)->chan_scan_mode_bitmap
-					|= MWIFIEX_PASSIVE_SCAN;
+					|= (MWIFIEX_PASSIVE_SCAN |
+					    MWIFIEX_HIDDEN_SSID_REPORT);
 			else
 				(scan_chan_list +
 				 chan_idx)->chan_scan_mode_bitmap
@@ -1586,6 +1602,62 @@
 	return ret;
 }
 
+/* This function checks if SSID string contains all zeroes or length is zero */
+static bool mwifiex_is_hidden_ssid(struct cfg80211_ssid *ssid)
+{
+	int idx;
+
+	for (idx = 0; idx < ssid->ssid_len; idx++) {
+		if (ssid->ssid[idx])
+			return false;
+	}
+
+	return true;
+}
+
+/* This function checks if any hidden SSID found in passive scan channels
+ * and save those channels for specific SSID active scan
+ */
+static int mwifiex_save_hidden_ssid_channels(struct mwifiex_private *priv,
+					     struct cfg80211_bss *bss)
+{
+	struct mwifiex_bssdescriptor *bss_desc;
+	int ret;
+	int chid;
+
+	/* Allocate and fill new bss descriptor */
+	bss_desc = kzalloc(sizeof(*bss_desc), GFP_KERNEL);
+	if (!bss_desc)
+		return -ENOMEM;
+
+	ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
+	if (ret)
+		goto done;
+
+	if (mwifiex_is_hidden_ssid(&bss_desc->ssid)) {
+		mwifiex_dbg(priv->adapter, INFO, "found hidden SSID\n");
+		for (chid = 0 ; chid < MWIFIEX_USER_SCAN_CHAN_MAX; chid++) {
+			if (priv->hidden_chan[chid].chan_number ==
+			    bss->channel->hw_value)
+				break;
+
+			if (!priv->hidden_chan[chid].chan_number) {
+				priv->hidden_chan[chid].chan_number =
+					bss->channel->hw_value;
+				priv->hidden_chan[chid].radio_type =
+					bss->channel->band;
+				priv->hidden_chan[chid].scan_type =
+					MWIFIEX_SCAN_TYPE_ACTIVE;
+				break;
+			}
+		}
+	}
+
+done:
+	kfree(bss_desc);
+	return 0;
+}
+
 static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
 					  struct cfg80211_bss *bss)
 {
@@ -1775,6 +1847,14 @@
 				    .mac_address, ETH_ALEN))
 				mwifiex_update_curr_bss_params(priv, bss);
 			cfg80211_put_bss(priv->wdev.wiphy, bss);
+
+			if ((chan->flags & IEEE80211_CHAN_RADAR) ||
+			    (chan->flags & IEEE80211_CHAN_NO_IR)) {
+				mwifiex_dbg(adapter, INFO,
+					    "radar or passive channel %d\n",
+					    channel);
+				mwifiex_save_hidden_ssid_channels(priv, bss);
+			}
 		}
 	} else {
 		mwifiex_dbg(adapter, WARN, "missing BSS channel IE\n");
@@ -1798,6 +1878,57 @@
 	}
 }
 
+/* This function checks if any hidden SSID found in passive scan channels
+ * and do specific SSID active scan for those channels
+ */
+static int
+mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
+{
+	int ret;
+	struct mwifiex_adapter *adapter = priv->adapter;
+	u8 id = 0;
+	struct mwifiex_user_scan_cfg  *user_scan_cfg;
+
+	if (adapter->active_scan_triggered) {
+		adapter->active_scan_triggered = false;
+		return 0;
+	}
+
+	if (!priv->hidden_chan[0].chan_number) {
+		mwifiex_dbg(adapter, INFO, "No BSS with hidden SSID found on DFS channels\n");
+		return 0;
+	}
+	user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
+
+	if (!user_scan_cfg)
+		return -ENOMEM;
+
+	memset(user_scan_cfg, 0, sizeof(*user_scan_cfg));
+
+	for (id = 0; id < MWIFIEX_USER_SCAN_CHAN_MAX; id++) {
+		if (!priv->hidden_chan[id].chan_number)
+			break;
+		memcpy(&user_scan_cfg->chan_list[id],
+		       &priv->hidden_chan[id],
+		       sizeof(struct mwifiex_user_scan_chan));
+	}
+
+	adapter->active_scan_triggered = true;
+	user_scan_cfg->num_ssids = priv->scan_request->n_ssids;
+	user_scan_cfg->ssid_list = priv->scan_request->ssids;
+
+	ret = mwifiex_scan_networks(priv, user_scan_cfg);
+	kfree(user_scan_cfg);
+
+	memset(&priv->hidden_chan, 0, sizeof(priv->hidden_chan));
+
+	if (ret) {
+		dev_err(priv->adapter->dev, "scan failed: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
 static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
 {
 	struct mwifiex_adapter *adapter = priv->adapter;
@@ -1811,6 +1942,8 @@
 		adapter->scan_processing = false;
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 
+		mwifiex_active_scan_req_for_passive_chan(priv);
+
 		if (!adapter->ext_scan)
 			mwifiex_complete_scan(priv);
 
@@ -1837,15 +1970,17 @@
 		adapter->scan_processing = false;
 		spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 
-		if (priv->scan_request) {
-			mwifiex_dbg(adapter, INFO,
-				    "info: aborting scan\n");
-			cfg80211_scan_done(priv->scan_request, 1);
-			priv->scan_request = NULL;
-		} else {
-			priv->scan_aborting = false;
-			mwifiex_dbg(adapter, INFO,
-				    "info: scan already aborted\n");
+		if (!adapter->active_scan_triggered) {
+			if (priv->scan_request) {
+				mwifiex_dbg(adapter, INFO,
+					    "info: aborting scan\n");
+				cfg80211_scan_done(priv->scan_request, 1);
+				priv->scan_request = NULL;
+			} else {
+				priv->scan_aborting = false;
+				mwifiex_dbg(adapter, INFO,
+					    "info: scan already aborted\n");
+			}
 		}
 	} else {
 		/* Get scan command from scan_pending_q and put to
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index a0b121f..5d05c6f 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -51,6 +51,10 @@
 
 static struct semaphore add_remove_card_sem;
 
+static struct memory_type_mapping generic_mem_type_map[] = {
+	{"DUMP", NULL, 0, 0xDD},
+};
+
 static struct memory_type_mapping mem_type_mapping_tbl[] = {
 	{"ITCM", NULL, 0, 0xF0},
 	{"DTCM", NULL, 0, 0xF1},
@@ -91,6 +95,7 @@
 		return -ENOMEM;
 
 	card->func = func;
+	card->device_id = id;
 
 	func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
@@ -107,6 +112,7 @@
 		card->mp_tx_agg_buf_size = data->mp_tx_agg_buf_size;
 		card->mp_rx_agg_buf_size = data->mp_rx_agg_buf_size;
 		card->can_dump_fw = data->can_dump_fw;
+		card->fw_dump_enh = data->fw_dump_enh;
 		card->can_auto_tdls = data->can_auto_tdls;
 		card->can_ext_scan = data->can_ext_scan;
 	}
@@ -287,6 +293,8 @@
 #define SDIO_DEVICE_ID_MARVELL_8887   (0x9135)
 /* Device ID for SD8801 */
 #define SDIO_DEVICE_ID_MARVELL_8801   (0x9139)
+/* Device ID for SD8997 */
+#define SDIO_DEVICE_ID_MARVELL_8997   (0x9141)
 
 
 /* WLAN IDs */
@@ -303,6 +311,8 @@
 		.driver_data = (unsigned long)&mwifiex_sdio_sd8887},
 	{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8801),
 		.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
+	{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997),
+		.driver_data = (unsigned long)&mwifiex_sdio_sd8997},
 	{},
 };
 
@@ -910,6 +920,8 @@
 	if (!fwbuf)
 		return -ENOMEM;
 
+	sdio_claim_host(card->func);
+
 	/* Perform firmware data transfer */
 	do {
 		/* The host polls for the DN_LD_CARD_RDY and CARD_IO_READY
@@ -1014,6 +1026,8 @@
 		offset += txlen;
 	} while (true);
 
+	sdio_release_host(card->func);
+
 	mwifiex_dbg(adapter, MSG,
 		    "info: FW download over, size %d bytes\n", offset);
 
@@ -1964,8 +1978,13 @@
 	adapter->dev = &func->dev;
 
 	strcpy(adapter->fw_name, card->firmware);
-	adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
-	adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+	if (card->fw_dump_enh) {
+		adapter->mem_type_mapping_tbl = generic_mem_type_map;
+		adapter->num_mem_types = 1;
+	} else {
+		adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
+		adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+	}
 
 	return 0;
 }
@@ -2107,26 +2126,46 @@
 		    port, card->mp_data_port_mask);
 }
 
+static void mwifiex_recreate_adapter(struct sdio_mmc_card *card)
+{
+	struct sdio_func *func = card->func;
+	const struct sdio_device_id *device_id = card->device_id;
+
+	/* TODO mmc_hw_reset does not require destroying and re-probing the
+	 * whole adapter. Hence there was no need to for this rube-goldberg
+	 * design to reload the fw from an external workqueue. If we don't
+	 * destroy the adapter we could reload the fw from
+	 * mwifiex_main_work_queue directly.
+	 * The real difficulty with fw reset is to restore all the user
+	 * settings applied through ioctl. By destroying and recreating the
+	 * adapter, we take the easy way out, since we rely on user space to
+	 * restore them. We assume that user space will treat the new
+	 * incarnation of the adapter(interfaces) as if they had been just
+	 * discovered and initializes them from scratch.
+	 */
+
+	mwifiex_sdio_remove(func);
+
+	/* power cycle the adapter */
+	sdio_claim_host(func);
+	mmc_hw_reset(func->card->host);
+	sdio_release_host(func);
+
+	mwifiex_sdio_probe(func, device_id);
+}
+
 static struct mwifiex_adapter *save_adapter;
 static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
 {
 	struct sdio_mmc_card *card = adapter->card;
-	struct mmc_host *target = card->func->card->host;
 
-	/* The actual reset operation must be run outside of driver thread.
-	 * This is because mmc_remove_host() will cause the device to be
-	 * instantly destroyed, and the driver then needs to end its thread,
-	 * leading to a deadlock.
-	 *
-	 * We run it in a totally independent workqueue.
+	/* TODO card pointer is unprotected. If the adapter is removed
+	 * physically, sdio core might trigger mwifiex_sdio_remove, before this
+	 * workqueue is run, which will destroy the adapter struct. When this
+	 * workqueue eventually exceutes it will dereference an invalid adapter
+	 * pointer
 	 */
-
-	mwifiex_dbg(adapter, WARN, "Resetting card...\n");
-	mmc_remove_host(target);
-	/* 200ms delay is based on experiment with sdhci controller */
-	mdelay(200);
-	target->rescan_entered = 0; /* rescan non-removable cards */
-	mmc_add_host(target);
+	mwifiex_recreate_adapter(card);
 }
 
 /* This function read/write firmware */
@@ -2138,8 +2177,8 @@
 	int ret, tries;
 	u8 ctrl_data = 0;
 
-	sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl,
-		    &ret);
+	sdio_writeb(card->func, card->reg->fw_dump_host_ready,
+		    card->reg->fw_dump_ctrl, &ret);
 	if (ret) {
 		mwifiex_dbg(adapter, ERROR, "SDIO Write ERR\n");
 		return RDWR_STATUS_FAILURE;
@@ -2155,10 +2194,10 @@
 			break;
 		if (doneflag && ctrl_data == doneflag)
 			return RDWR_STATUS_DONE;
-		if (ctrl_data != FW_DUMP_HOST_READY) {
+		if (ctrl_data != card->reg->fw_dump_host_ready) {
 			mwifiex_dbg(adapter, WARN,
-				    "The ctrl reg was changed, re-try again!\n");
-			sdio_writeb(card->func, FW_DUMP_HOST_READY,
+				    "The ctrl reg was changed, re-try again\n");
+			sdio_writeb(card->func, card->reg->fw_dump_host_ready,
 				    card->reg->fw_dump_ctrl, &ret);
 			if (ret) {
 				mwifiex_dbg(adapter, ERROR, "SDIO write err\n");
@@ -2167,7 +2206,7 @@
 		}
 		usleep_range(100, 200);
 	}
-	if (ctrl_data == FW_DUMP_HOST_READY) {
+	if (ctrl_data == card->reg->fw_dump_host_ready) {
 		mwifiex_dbg(adapter, ERROR,
 			    "Fail to pull ctrl_data\n");
 		return RDWR_STATUS_FAILURE;
@@ -2300,10 +2339,129 @@
 	sdio_release_host(card->func);
 }
 
+static void mwifiex_sdio_generic_fw_dump(struct mwifiex_adapter *adapter)
+{
+	struct sdio_mmc_card *card = adapter->card;
+	struct memory_type_mapping *entry = &generic_mem_type_map[0];
+	unsigned int reg, reg_start, reg_end;
+	u8 start_flag = 0, done_flag = 0;
+	u8 *dbg_ptr, *end_ptr;
+	enum rdwr_status stat;
+	int ret = -1, tries;
+
+	if (!card->fw_dump_enh)
+		return;
+
+	if (entry->mem_ptr) {
+		vfree(entry->mem_ptr);
+		entry->mem_ptr = NULL;
+	}
+	entry->mem_size = 0;
+
+	mwifiex_pm_wakeup_card(adapter);
+	sdio_claim_host(card->func);
+
+	mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
+
+	stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag);
+	if (stat == RDWR_STATUS_FAILURE)
+		goto done;
+
+	reg_start = card->reg->fw_dump_start;
+	reg_end = card->reg->fw_dump_end;
+	for (reg = reg_start; reg <= reg_end; reg++) {
+		for (tries = 0; tries < MAX_POLL_TRIES; tries++) {
+			start_flag = sdio_readb(card->func, reg, &ret);
+			if (ret) {
+				mwifiex_dbg(adapter, ERROR,
+					    "SDIO read err\n");
+				goto done;
+			}
+			if (start_flag == 0)
+				break;
+			if (tries == MAX_POLL_TRIES) {
+				mwifiex_dbg(adapter, ERROR,
+					    "FW not ready to dump\n");
+				ret = -1;
+				goto done;
+			}
+		}
+		usleep_range(100, 200);
+	}
+
+	entry->mem_ptr = vmalloc(0xf0000 + 1);
+	if (!entry->mem_ptr) {
+		ret = -1;
+		goto done;
+	}
+	dbg_ptr = entry->mem_ptr;
+	entry->mem_size = 0xf0000;
+	end_ptr = dbg_ptr + entry->mem_size;
+
+	done_flag = entry->done_flag;
+	mwifiex_dbg(adapter, DUMP,
+		    "Start %s output, please wait...\n", entry->mem_name);
+
+	while (true) {
+		stat = mwifiex_sdio_rdwr_firmware(adapter, done_flag);
+		if (stat == RDWR_STATUS_FAILURE)
+			goto done;
+		for (reg = reg_start; reg <= reg_end; reg++) {
+			*dbg_ptr = sdio_readb(card->func, reg, &ret);
+			if (ret) {
+				mwifiex_dbg(adapter, ERROR,
+					    "SDIO read err\n");
+				goto done;
+			}
+			dbg_ptr++;
+			if (dbg_ptr >= end_ptr) {
+				u8 *tmp_ptr;
+
+				tmp_ptr = vmalloc(entry->mem_size + 0x4000 + 1);
+				if (!tmp_ptr)
+					goto done;
+
+				memcpy(tmp_ptr, entry->mem_ptr,
+				       entry->mem_size);
+				vfree(entry->mem_ptr);
+				entry->mem_ptr = tmp_ptr;
+				tmp_ptr = NULL;
+				dbg_ptr = entry->mem_ptr + entry->mem_size;
+				entry->mem_size += 0x4000;
+				end_ptr = entry->mem_ptr + entry->mem_size;
+			}
+		}
+		if (stat == RDWR_STATUS_DONE) {
+			entry->mem_size = dbg_ptr - entry->mem_ptr;
+			mwifiex_dbg(adapter, DUMP, "dump %s done size=0x%x\n",
+				    entry->mem_name, entry->mem_size);
+			ret = 0;
+			break;
+		}
+	}
+	mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
+
+done:
+	if (ret) {
+		mwifiex_dbg(adapter, ERROR, "firmware dump failed\n");
+		if (entry->mem_ptr) {
+			vfree(entry->mem_ptr);
+			entry->mem_ptr = NULL;
+		}
+		entry->mem_size = 0;
+	}
+	sdio_release_host(card->func);
+}
+
 static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
 {
+	struct sdio_mmc_card *card = adapter->card;
+
 	mwifiex_drv_info_dump(adapter);
-	mwifiex_sdio_fw_dump(adapter);
+	if (card->fw_dump_enh)
+		mwifiex_sdio_generic_fw_dump(adapter);
+	else
+		mwifiex_sdio_fw_dump(adapter);
 	mwifiex_upload_device_dump(adapter);
 }
 
@@ -2510,3 +2668,4 @@
 MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 6f645cf..b9fbc5cf 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -35,6 +35,7 @@
 #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin"
 #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
 #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
+#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
 
 #define BLOCK_MODE	1
 #define BYTE_MODE	0
@@ -222,6 +223,7 @@
 	u8 cmd_cfg_1;
 	u8 cmd_cfg_2;
 	u8 cmd_cfg_3;
+	u8 fw_dump_host_ready;
 	u8 fw_dump_ctrl;
 	u8 fw_dump_start;
 	u8 fw_dump_end;
@@ -257,11 +259,15 @@
 	bool supports_sdio_new_mode;
 	bool has_control_mask;
 	bool can_dump_fw;
+	bool fw_dump_enh;
 	bool can_auto_tdls;
 	bool can_ext_scan;
 
 	struct mwifiex_sdio_mpa_tx mpa_tx;
 	struct mwifiex_sdio_mpa_rx mpa_rx;
+
+	/* needed for card reset */
+	const struct sdio_device_id *device_id;
 };
 
 struct mwifiex_sdio_device {
@@ -275,6 +281,7 @@
 	bool supports_sdio_new_mode;
 	bool has_control_mask;
 	bool can_dump_fw;
+	bool fw_dump_enh;
 	bool can_auto_tdls;
 	bool can_ext_scan;
 };
@@ -350,6 +357,7 @@
 	.cmd_cfg_1 = 0xb9,
 	.cmd_cfg_2 = 0xba,
 	.cmd_cfg_3 = 0xbb,
+	.fw_dump_host_ready = 0xee,
 	.fw_dump_ctrl = 0xe2,
 	.fw_dump_start = 0xe3,
 	.fw_dump_end = 0xea,
@@ -361,6 +369,59 @@
 				 0x59, 0x5c, 0x5d},
 };
 
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
+	.start_rd_port = 0,
+	.start_wr_port = 0,
+	.base_0_reg = 0xF8,
+	.base_1_reg = 0xF9,
+	.poll_reg = 0x5C,
+	.host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+			CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+	.host_int_rsr_reg = 0x4,
+	.host_int_status_reg = 0x0C,
+	.host_int_mask_reg = 0x08,
+	.status_reg_0 = 0xE8,
+	.status_reg_1 = 0xE9,
+	.sdio_int_mask = 0xff,
+	.data_port_mask = 0xffffffff,
+	.io_port_0_reg = 0xE4,
+	.io_port_1_reg = 0xE5,
+	.io_port_2_reg = 0xE6,
+	.max_mp_regs = 196,
+	.rd_bitmap_l = 0x10,
+	.rd_bitmap_u = 0x11,
+	.rd_bitmap_1l = 0x12,
+	.rd_bitmap_1u = 0x13,
+	.wr_bitmap_l = 0x14,
+	.wr_bitmap_u = 0x15,
+	.wr_bitmap_1l = 0x16,
+	.wr_bitmap_1u = 0x17,
+	.rd_len_p0_l = 0x18,
+	.rd_len_p0_u = 0x19,
+	.card_misc_cfg_reg = 0xd8,
+	.card_cfg_2_1_reg = 0xd9,
+	.cmd_rd_len_0 = 0xc0,
+	.cmd_rd_len_1 = 0xc1,
+	.cmd_rd_len_2 = 0xc2,
+	.cmd_rd_len_3 = 0xc3,
+	.cmd_cfg_0 = 0xc4,
+	.cmd_cfg_1 = 0xc5,
+	.cmd_cfg_2 = 0xc6,
+	.cmd_cfg_3 = 0xc7,
+	.fw_dump_host_ready = 0xcc,
+	.fw_dump_ctrl = 0xf0,
+	.fw_dump_start = 0xf1,
+	.fw_dump_end = 0xf8,
+	.func1_dump_reg_start = 0x10,
+	.func1_dump_reg_end = 0x17,
+	.func1_scratch_reg = 0xe8,
+	.func1_spec_reg_num = 13,
+	.func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
+				 0x60, 0x61, 0x62, 0x64,
+				 0x65, 0x66, 0x68, 0x69,
+				 0x6a},
+};
+
 static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
 	.start_rd_port = 0,
 	.start_wr_port = 0,
@@ -469,6 +530,22 @@
 	.can_ext_scan = true,
 };
 
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
+	.firmware = SD8997_DEFAULT_FW_NAME,
+	.reg = &mwifiex_reg_sd8997,
+	.max_ports = 32,
+	.mp_agg_pkt_limit = 16,
+	.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+	.mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+	.mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+	.supports_sdio_new_mode = true,
+	.has_control_mask = false,
+	.can_dump_fw = true,
+	.fw_dump_enh = true,
+	.can_auto_tdls = false,
+	.can_ext_scan = true,
+};
+
 static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
 	.firmware = SD8887_DEFAULT_FW_NAME,
 	.reg = &mwifiex_reg_sd8887,
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 037adcd..a49a80d 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -26,6 +26,10 @@
 #include "11n.h"
 #include "11ac.h"
 
+static bool drcs;
+module_param(drcs, bool, 0644);
+MODULE_PARM_DESC(drcs, "multi-channel operation:1, single-channel operation:0");
+
 static bool disable_auto_ds;
 module_param(disable_auto_ds, bool, 0);
 MODULE_PARM_DESC(disable_auto_ds,
@@ -1512,6 +1516,22 @@
 }
 
 static int
+mwifiex_cmd_set_mc_policy(struct mwifiex_private *priv,
+			  struct host_cmd_ds_command *cmd,
+			  u16 cmd_action, void *data_buf)
+{
+	struct host_cmd_ds_multi_chan_policy *mc_pol = &cmd->params.mc_policy;
+	const u16 *drcs_info = data_buf;
+
+	mc_pol->action = cpu_to_le16(cmd_action);
+	mc_pol->policy = cpu_to_le16(*drcs_info);
+	cmd->command = cpu_to_le16(HostCmd_CMD_MC_POLICY);
+	cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_multi_chan_policy) +
+				S_DS_GEN);
+	return 0;
+}
+
+static int
 mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
 			 struct host_cmd_ds_command *cmd,
 			 u16 cmd_action, void *data_buf)
@@ -1576,6 +1596,50 @@
 }
 
 static int
+mwifiex_cmd_tdls_config(struct mwifiex_private *priv,
+			struct host_cmd_ds_command *cmd,
+			u16 cmd_action, void *data_buf)
+{
+	struct host_cmd_ds_tdls_config *tdls_config = &cmd->params.tdls_config;
+	struct mwifiex_tdls_init_cs_params *config;
+	struct mwifiex_tdls_config *init_config;
+	u16 len;
+
+	cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_CONFIG);
+	cmd->size = cpu_to_le16(S_DS_GEN);
+	tdls_config->tdls_action = cpu_to_le16(cmd_action);
+	le16_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action));
+
+	switch (cmd_action) {
+	case ACT_TDLS_CS_ENABLE_CONFIG:
+		init_config = data_buf;
+		len = sizeof(*init_config);
+		memcpy(tdls_config->tdls_data, init_config, len);
+		break;
+	case ACT_TDLS_CS_INIT:
+		config = data_buf;
+		len = sizeof(*config);
+		memcpy(tdls_config->tdls_data, config, len);
+		break;
+	case ACT_TDLS_CS_STOP:
+		len = sizeof(struct mwifiex_tdls_stop_cs_params);
+		memcpy(tdls_config->tdls_data, data_buf, len);
+		break;
+	case ACT_TDLS_CS_PARAMS:
+		len = sizeof(struct mwifiex_tdls_config_cs_params);
+		memcpy(tdls_config->tdls_data, data_buf, len);
+		break;
+	default:
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "Unknown TDLS configuration\n");
+		return -ENOTSUPP;
+	}
+
+	le16_add_cpu(&cmd->size, len);
+	return 0;
+}
+
+static int
 mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
 		      struct host_cmd_ds_command *cmd,
 		      void *data_buf)
@@ -1933,10 +1997,12 @@
 		if (priv->bss_mode == NL80211_IFTYPE_ADHOC)
 			cmd_ptr->params.bss_mode.con_type =
 				CONNECTION_TYPE_ADHOC;
-		else if (priv->bss_mode == NL80211_IFTYPE_STATION)
+		else if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+			 priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT)
 			cmd_ptr->params.bss_mode.con_type =
 				CONNECTION_TYPE_INFRA;
-		else if (priv->bss_mode == NL80211_IFTYPE_AP)
+		else if (priv->bss_mode == NL80211_IFTYPE_AP ||
+			 priv->bss_mode == NL80211_IFTYPE_P2P_GO)
 			cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_AP;
 		cmd_ptr->size = cpu_to_le16(sizeof(struct
 				host_cmd_ds_set_bss_mode) + S_DS_GEN);
@@ -1958,6 +2024,10 @@
 	case HostCmd_CMD_TDLS_OPER:
 		ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
 		break;
+	case HostCmd_CMD_TDLS_CONFIG:
+		ret = mwifiex_cmd_tdls_config(priv, cmd_ptr, cmd_action,
+					      data_buf);
+		break;
 	case HostCmd_CMD_CHAN_REPORT_REQUEST:
 		ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr,
 							    data_buf);
@@ -1966,6 +2036,10 @@
 		ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action,
 						   data_buf);
 		break;
+	case HostCmd_CMD_MC_POLICY:
+		ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
+						data_buf);
+		break;
 	default:
 		mwifiex_dbg(priv->adapter, ERROR,
 			    "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2082,6 +2156,18 @@
 			if (ret)
 				return -1;
 		}
+
+		if (drcs) {
+			adapter->drcs_enabled = true;
+			if (ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+				ret = mwifiex_send_cmd(priv,
+						       HostCmd_CMD_MC_POLICY,
+						       HostCmd_ACT_GEN_SET, 0,
+						       &adapter->drcs_enabled,
+						       true);
+			if (ret)
+				return -1;
+		}
 	}
 
 	/* get tx rate */
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index b645884..87b69d8 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -599,6 +599,7 @@
 				    "info: key: GTK is set\n");
 			priv->wpa_is_gtk_set = true;
 			priv->scan_block = false;
+			priv->port_open = true;
 		}
 	}
 
@@ -629,6 +630,7 @@
 			mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
 			priv->wpa_is_gtk_set = true;
 			priv->scan_block = false;
+			priv->port_open = true;
 		}
 	}
 
@@ -893,7 +895,7 @@
 	case ACT_TDLS_DELETE:
 		if (reason) {
 			if (!node || reason == TDLS_ERR_LINK_NONEXISTENT)
-				mwifiex_dbg(priv->adapter, ERROR,
+				mwifiex_dbg(priv->adapter, MSG,
 					    "TDLS link delete for %pM failed: reason %d\n",
 					    cmd_tdls_oper->peer_mac, reason);
 			else
@@ -1191,12 +1193,15 @@
 		break;
 	case HostCmd_CMD_TDLS_OPER:
 		ret = mwifiex_ret_tdls_oper(priv, resp);
+	case HostCmd_CMD_MC_POLICY:
 		break;
 	case HostCmd_CMD_CHAN_REPORT_REQUEST:
 		break;
 	case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
 		ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
 		break;
+	case HostCmd_CMD_TDLS_CONFIG:
+		break;
 	default:
 		mwifiex_dbg(adapter, ERROR,
 			    "CMD_RESP: unknown cmd response %#x\n",
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 848de26..3d18c58 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -54,6 +54,7 @@
 	priv->media_connected = false;
 
 	priv->scan_block = false;
+	priv->port_open = false;
 
 	if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
 	    ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) {
@@ -153,6 +154,7 @@
 	struct mwifiex_sta_node *sta_ptr;
 	struct mwifiex_tdls_generic_event *tdls_evt =
 			(void *)event_skb->data + sizeof(adapter->event_cause);
+	u8 *mac = tdls_evt->peer_mac;
 
 	/* reserved 2 bytes are not mandatory in tdls event */
 	if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
@@ -175,6 +177,59 @@
 					   le16_to_cpu(tdls_evt->u.reason_code),
 					   GFP_KERNEL);
 		break;
+	case TDLS_EVENT_CHAN_SWITCH_RESULT:
+		mwifiex_dbg(adapter, EVENT, "tdls channel switch result :\n");
+		mwifiex_dbg(adapter, EVENT,
+			    "status=0x%x, reason=0x%x cur_chan=%d\n",
+			    tdls_evt->u.switch_result.status,
+			    tdls_evt->u.switch_result.reason,
+			    tdls_evt->u.switch_result.cur_chan);
+
+		/* tdls channel switch failed */
+		if (tdls_evt->u.switch_result.status != 0) {
+			switch (tdls_evt->u.switch_result.cur_chan) {
+			case TDLS_BASE_CHANNEL:
+				sta_ptr->tdls_status = TDLS_IN_BASE_CHAN;
+				break;
+			case TDLS_OFF_CHANNEL:
+				sta_ptr->tdls_status = TDLS_IN_OFF_CHAN;
+				break;
+			default:
+				break;
+			}
+			return ret;
+		}
+
+		/* tdls channel switch success */
+		switch (tdls_evt->u.switch_result.cur_chan) {
+		case TDLS_BASE_CHANNEL:
+			if (sta_ptr->tdls_status == TDLS_IN_BASE_CHAN)
+				break;
+			mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac,
+								  false);
+			sta_ptr->tdls_status = TDLS_IN_BASE_CHAN;
+			break;
+		case TDLS_OFF_CHANNEL:
+			if (sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)
+				break;
+			mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac,
+								  true);
+			sta_ptr->tdls_status = TDLS_IN_OFF_CHAN;
+			break;
+		default:
+			break;
+		}
+
+		break;
+	case TDLS_EVENT_START_CHAN_SWITCH:
+		mwifiex_dbg(adapter, EVENT, "tdls start channel switch...\n");
+		sta_ptr->tdls_status = TDLS_CHAN_SWITCHING;
+		break;
+	case TDLS_EVENT_CHAN_SWITCH_STOPPED:
+		mwifiex_dbg(adapter, EVENT,
+			    "tdls chan switch stopped, reason=%d\n",
+			    tdls_evt->u.cs_stop_reason);
+		break;
 	default:
 		break;
 	}
@@ -182,6 +237,145 @@
 	return ret;
 }
 
+static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv,
+					 struct mwifiex_ie_types_header *tlv)
+{
+	struct mwifiex_tx_pause_tlv *tp;
+	struct mwifiex_sta_node *sta_ptr;
+	unsigned long flags;
+
+	tp = (void *)tlv;
+	mwifiex_dbg(priv->adapter, EVENT,
+		    "uap tx_pause: %pM pause=%d, pkts=%d\n",
+		    tp->peermac, tp->tx_pause,
+		    tp->pkt_cnt);
+
+	if (ether_addr_equal(tp->peermac, priv->netdev->dev_addr)) {
+		if (tp->tx_pause)
+			priv->port_open = false;
+		else
+			priv->port_open = true;
+	} else if (is_multicast_ether_addr(tp->peermac)) {
+		mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause);
+	} else {
+		spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+		sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
+		spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+		if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
+			sta_ptr->tx_pause = tp->tx_pause;
+			mwifiex_update_ralist_tx_pause(priv, tp->peermac,
+						       tp->tx_pause);
+		}
+	}
+}
+
+static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv,
+					 struct mwifiex_ie_types_header *tlv)
+{
+	struct mwifiex_tx_pause_tlv *tp;
+	struct mwifiex_sta_node *sta_ptr;
+	int status;
+	unsigned long flags;
+
+	tp = (void *)tlv;
+	mwifiex_dbg(priv->adapter, EVENT,
+		    "sta tx_pause: %pM pause=%d, pkts=%d\n",
+		    tp->peermac, tp->tx_pause,
+		    tp->pkt_cnt);
+
+	if (ether_addr_equal(tp->peermac, priv->cfg_bssid)) {
+		if (tp->tx_pause)
+			priv->port_open = false;
+		else
+			priv->port_open = true;
+	} else {
+		if (!ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+			return;
+
+		status = mwifiex_get_tdls_link_status(priv, tp->peermac);
+		if (mwifiex_is_tdls_link_setup(status)) {
+			spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+			sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
+			spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+			if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
+				sta_ptr->tx_pause = tp->tx_pause;
+				mwifiex_update_ralist_tx_pause(priv,
+							       tp->peermac,
+							       tp->tx_pause);
+			}
+		}
+	}
+}
+
+void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
+				      struct sk_buff *event_skb)
+{
+	struct mwifiex_ie_types_multi_chan_info *chan_info;
+	u16 status;
+
+	chan_info = (void *)event_skb->data + sizeof(u32);
+
+	if (le16_to_cpu(chan_info->header.type) != TLV_TYPE_MULTI_CHAN_INFO) {
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "unknown TLV in chan_info event\n");
+		return;
+	}
+
+	status = le16_to_cpu(chan_info->status);
+
+	if (status) {
+		mwifiex_dbg(priv->adapter, EVENT,
+			    "multi-channel operation started\n");
+	} else {
+		mwifiex_dbg(priv->adapter, EVENT,
+			    "multi-channel operation over\n");
+	}
+}
+
+void mwifiex_process_tx_pause_event(struct mwifiex_private *priv,
+				    struct sk_buff *event_skb)
+{
+	struct mwifiex_ie_types_header *tlv;
+	u16 tlv_type, tlv_len;
+	int tlv_buf_left;
+
+	if (!priv->media_connected) {
+		mwifiex_dbg(priv->adapter, ERROR,
+			    "tx_pause event while disconnected; bss_role=%d\n",
+			    priv->bss_role);
+		return;
+	}
+
+	tlv_buf_left = event_skb->len - sizeof(u32);
+	tlv = (void *)event_skb->data + sizeof(u32);
+
+	while (tlv_buf_left >= (int)sizeof(struct mwifiex_ie_types_header)) {
+		tlv_type = le16_to_cpu(tlv->type);
+		tlv_len  = le16_to_cpu(tlv->len);
+		if ((sizeof(struct mwifiex_ie_types_header) + tlv_len) >
+		    tlv_buf_left) {
+			mwifiex_dbg(priv->adapter, ERROR,
+				    "wrong tlv: tlvLen=%d, tlvBufLeft=%d\n",
+				    tlv_len, tlv_buf_left);
+			break;
+		}
+		if (tlv_type == TLV_TYPE_TX_PAUSE) {
+			if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
+				mwifiex_process_sta_tx_pause(priv, tlv);
+			else
+				mwifiex_process_uap_tx_pause(priv, tlv);
+		}
+
+		tlv_buf_left -= sizeof(struct mwifiex_ie_types_header) +
+				tlv_len;
+		tlv = (void *)((u8 *)tlv + tlv_len +
+			       sizeof(struct mwifiex_ie_types_header));
+	}
+
+}
+
 /*
 * This function handles coex events generated by firmware
 */
@@ -359,7 +553,7 @@
 
 	case EVENT_PS_AWAKE:
 		mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
-		if (!adapter->pps_uapsd_mode &&
+		if (!adapter->pps_uapsd_mode && priv->port_open &&
 		    priv->media_connected && adapter->sleep_period.period) {
 				adapter->pps_uapsd_mode = true;
 				mwifiex_dbg(adapter, EVENT,
@@ -438,6 +632,7 @@
 
 	case EVENT_PORT_RELEASE:
 		mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n");
+		priv->port_open = true;
 		break;
 
 	case EVENT_EXT_SCAN_REPORT:
@@ -573,6 +768,16 @@
 		ret = mwifiex_parse_tdls_event(priv, adapter->event_skb);
 		break;
 
+	case EVENT_TX_DATA_PAUSE:
+		mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n");
+		mwifiex_process_tx_pause_event(priv, adapter->event_skb);
+		break;
+
+	case EVENT_MULTI_CHAN_INFO:
+		mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
+		mwifiex_process_multi_chan_event(priv, adapter->event_skb);
+		break;
+
 	case EVENT_TX_STATUS_REPORT:
 		mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
 		mwifiex_parse_tx_status_event(priv, adapter->event_body);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index d8b7d9c..a6c8a4f 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -66,8 +66,8 @@
 	if (status <= 0) {
 		if (status == 0)
 			status = -ETIMEDOUT;
-		mwifiex_dbg(adapter, ERROR,
-			    "cmd_wait_q terminated: %d\n", status);
+		mwifiex_dbg(adapter, ERROR, "cmd_wait_q terminated: %d\n",
+			    status);
 		mwifiex_cancel_all_pending_cmd(adapter);
 		return status;
 	}
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index 2faa1bc..b3e163d 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -49,7 +49,7 @@
 		tid = skb->priority;
 		tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
 
-		if (status == TDLS_SETUP_COMPLETE) {
+		if (mwifiex_is_tdls_link_setup(status)) {
 			ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac);
 			ra_list->tdls_link = true;
 			tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
@@ -164,7 +164,7 @@
 	pos = (void *)skb_put(skb, 4);
 	*pos++ = WLAN_EID_AID;
 	*pos++ = 2;
-	*pos++ = le16_to_cpu(assoc_rsp->a_id);
+	memcpy(pos, &assoc_rsp->a_id, sizeof(assoc_rsp->a_id));
 
 	return;
 }
@@ -355,6 +355,7 @@
 	extcap->ieee_hdr.len = 8;
 	memset(extcap->ext_capab, 0, 8);
 	extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED;
+	extcap->ext_capab[3] |= WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH;
 
 	if (priv->adapter->is_hw_11ac_capable)
 		extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED;
@@ -1071,6 +1072,11 @@
 			for (i = 0; i < MAX_NUM_TID; i++)
 				sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
 		}
+		if (sta_ptr->tdls_cap.extcap.ext_capab[3] &
+		    WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) {
+			mwifiex_config_tdls_enable(priv);
+			mwifiex_config_tdls_cs_params(priv);
+		}
 
 		memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
 		mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
@@ -1141,7 +1147,7 @@
 
 	spin_lock_irqsave(&priv->sta_list_spinlock, flags);
 	list_for_each_entry(sta_ptr, &priv->sta_list, list) {
-		if (sta_ptr->tdls_status == TDLS_SETUP_COMPLETE) {
+		if (mwifiex_is_tdls_link_setup(sta_ptr->tdls_status)) {
 			ether_addr_copy(peer->peer_addr, sta_ptr->mac_addr);
 			peer++;
 			count++;
@@ -1295,7 +1301,7 @@
 			if ((link_status == TDLS_NOT_SETUP) &&
 			    (peer->tdls_status == TDLS_SETUP_INPROGRESS))
 				peer->failure_count++;
-			else if (link_status == TDLS_SETUP_COMPLETE)
+			else if (mwifiex_is_tdls_link_setup(link_status))
 				peer->failure_count = 0;
 
 			peer->tdls_status = link_status;
@@ -1367,7 +1373,7 @@
 
 		if (((tdls_peer->rssi >= MWIFIEX_TDLS_RSSI_LOW) ||
 		     !tdls_peer->rssi) &&
-		    tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) {
+		    mwifiex_is_tdls_link_setup(tdls_peer->tdls_status)) {
 			tdls_peer->tdls_status = TDLS_LINK_TEARDOWN;
 			mwifiex_dbg(priv->adapter, MSG,
 				    "teardown TDLS link,peer=%pM rssi=%d\n",
@@ -1416,3 +1422,67 @@
 		mwifiex_flush_auto_tdls_list(priv);
 	}
 }
+
+static int mwifiex_config_tdls(struct mwifiex_private *priv, u8 enable)
+{
+	struct mwifiex_tdls_config config;
+
+	config.enable = cpu_to_le16(enable);
+	return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+				ACT_TDLS_CS_ENABLE_CONFIG, 0, &config, true);
+}
+
+int mwifiex_config_tdls_enable(struct mwifiex_private *priv)
+{
+	return mwifiex_config_tdls(priv, true);
+}
+
+int mwifiex_config_tdls_disable(struct mwifiex_private *priv)
+{
+	return mwifiex_config_tdls(priv, false);
+}
+
+int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv)
+{
+	struct mwifiex_tdls_config_cs_params config_tdls_cs_params;
+
+	config_tdls_cs_params.unit_time = MWIFIEX_DEF_CS_UNIT_TIME;
+	config_tdls_cs_params.thr_otherlink = MWIFIEX_DEF_CS_THR_OTHERLINK;
+	config_tdls_cs_params.thr_directlink = MWIFIEX_DEF_THR_DIRECTLINK;
+
+	return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+				ACT_TDLS_CS_PARAMS, 0,
+				&config_tdls_cs_params, true);
+}
+
+int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac)
+{
+	struct mwifiex_tdls_stop_cs_params stop_tdls_cs_params;
+
+	ether_addr_copy(stop_tdls_cs_params.peer_mac, peer_mac);
+
+	return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+				ACT_TDLS_CS_STOP, 0,
+				&stop_tdls_cs_params, true);
+}
+
+int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac,
+			  u8 primary_chan, u8 second_chan_offset, u8 band)
+{
+	struct mwifiex_tdls_init_cs_params start_tdls_cs_params;
+
+	ether_addr_copy(start_tdls_cs_params.peer_mac, peer_mac);
+	start_tdls_cs_params.primary_chan = primary_chan;
+	start_tdls_cs_params.second_chan_offset = second_chan_offset;
+	start_tdls_cs_params.band = band;
+
+	start_tdls_cs_params.switch_time = cpu_to_le16(MWIFIEX_DEF_CS_TIME);
+	start_tdls_cs_params.switch_timeout =
+					cpu_to_le16(MWIFIEX_DEF_CS_TIMEOUT);
+	start_tdls_cs_params.reg_class = MWIFIEX_DEF_CS_REG_CLASS;
+	start_tdls_cs_params.periodicity = MWIFIEX_DEF_CS_PERIODICITY;
+
+	return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+				ACT_TDLS_CS_INIT, 0,
+				&start_tdls_cs_params, true);
+}
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 5ed9b79..8b1e5b5 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -370,8 +370,28 @@
 			/* consumes ack_skb */
 			skb_complete_wifi_ack(ack_skb, !tx_status->status);
 		} else {
+			/* Remove broadcast address which was added by driver */
+			memmove(ack_skb->data +
+				sizeof(struct ieee80211_hdr_3addr) +
+				MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16),
+				ack_skb->data +
+				sizeof(struct ieee80211_hdr_3addr) +
+				MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) +
+				ETH_ALEN, ack_skb->len -
+				(sizeof(struct ieee80211_hdr_3addr) +
+				MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) +
+				ETH_ALEN));
+			ack_skb->len = ack_skb->len - ETH_ALEN;
+			/* Remove driver's proprietary header including 2 bytes
+			 * of packet length and pass actual management frame buffer
+			 * to cfg80211.
+			 */
 			cfg80211_mgmt_tx_status(&priv->wdev, tx_info->cookie,
-						ack_skb->data, ack_skb->len,
+						ack_skb->data +
+						MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+						sizeof(u16), ack_skb->len -
+						(MWIFIEX_MGMT_FRAME_HEADER_SIZE
+						 + sizeof(u16)),
 						!tx_status->status, GFP_ATOMIC);
 			dev_kfree_skb_any(ack_skb);
 		}
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index b749300..4d5a6e3 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -808,7 +808,7 @@
 			     struct mwifiex_uap_bss_param *bss_cfg,
 			     struct cfg80211_chan_def chandef)
 {
-	u8 config_bands = 0;
+	u8 config_bands = 0, old_bands = priv->adapter->config_bands;
 
 	priv->bss_chandef = chandef;
 
@@ -834,6 +834,11 @@
 	}
 
 	priv->adapter->config_bands = config_bands;
+
+	if (old_bands != config_bands) {
+		mwifiex_send_domain_info_cmd_fw(priv->adapter->wiphy);
+		mwifiex_dnld_txpwr_table(priv);
+	}
 }
 
 int mwifiex_config_start_uap(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 7bc1f85..46c972a 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -41,6 +41,8 @@
 	mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilties:",
 			 event->data, event->len);
 
+	skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE);
+
 	while ((evt_len >= sizeof(tlv_hdr->header))) {
 		tlv_hdr = (struct mwifiex_ie_types_data *)curr;
 		tlv_len = le16_to_cpu(tlv_hdr->header.len);
@@ -176,6 +178,7 @@
 		break;
 	case EVENT_UAP_BSS_IDLE:
 		priv->media_connected = false;
+		priv->port_open = false;
 		if (netif_carrier_ok(priv->netdev))
 			netif_carrier_off(priv->netdev);
 		mwifiex_stop_net_dev_queue(priv->netdev, adapter);
@@ -185,6 +188,7 @@
 		break;
 	case EVENT_UAP_BSS_ACTIVE:
 		priv->media_connected = true;
+		priv->port_open = true;
 		if (!netif_carrier_ok(priv->netdev))
 			netif_carrier_on(priv->netdev);
 		mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
@@ -192,6 +196,7 @@
 	case EVENT_UAP_BSS_START:
 		mwifiex_dbg(adapter, EVENT,
 			    "AP EVENT: event id: %#x\n", eventcause);
+		priv->port_open = false;
 		memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
 		       ETH_ALEN);
 		if (priv->hist_data)
@@ -297,6 +302,16 @@
 		mwifiex_bt_coex_wlan_param_update_event(priv,
 							adapter->event_skb);
 		break;
+	case EVENT_TX_DATA_PAUSE:
+		mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n");
+		mwifiex_process_tx_pause_event(priv, adapter->event_skb);
+		break;
+
+	case EVENT_MULTI_CHAN_INFO:
+		mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
+		mwifiex_process_multi_chan_event(priv, adapter->event_skb);
+		break;
+
 	default:
 		mwifiex_dbg(adapter, EVENT,
 			    "event: unknown event id: %#x\n", eventcause);
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index aada934..5e789b2 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -47,6 +47,11 @@
 	{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8897_PID_2,
 				       USB_CLASS_VENDOR_SPEC,
 				       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
+	/* 8997 */
+	{USB_DEVICE(USB8XXX_VID, USB8997_PID_1)},
+	{USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8997_PID_2,
+				       USB_CLASS_VENDOR_SPEC,
+				       USB_SUBCLASS_VENDOR_SPEC, 0xff)},
 	{ }	/* Terminating entry */
 };
 
@@ -244,9 +249,11 @@
 	if (card->rx_cmd_ep == context->ep) {
 		mwifiex_usb_submit_rx_urb(context, size);
 	} else {
-		context->skb = NULL;
-		if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING)
+		if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING){
 			mwifiex_usb_submit_rx_urb(context, size);
+		}else{
+			context->skb = NULL;
+		}
 	}
 
 	return;
@@ -380,12 +387,14 @@
 	case USB8797_PID_1:
 	case USB8801_PID_1:
 	case USB8897_PID_1:
+	case USB8997_PID_1:
 		card->usb_boot_state = USB8XXX_FW_DNLD;
 		break;
 	case USB8766_PID_2:
 	case USB8797_PID_2:
 	case USB8801_PID_2:
 	case USB8897_PID_2:
+	case USB8997_PID_2:
 		card->usb_boot_state = USB8XXX_FW_READY;
 		break;
 	default:
@@ -812,6 +821,12 @@
 	adapter->dev = &card->udev->dev;
 
 	switch (le16_to_cpu(card->udev->descriptor.idProduct)) {
+	case USB8997_PID_1:
+	case USB8997_PID_2:
+		adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
+		strcpy(adapter->fw_name, USB8997_DEFAULT_FW_NAME);
+		adapter->ext_scan = true;
+		break;
 	case USB8897_PID_1:
 	case USB8897_PID_2:
 		adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
@@ -868,8 +883,10 @@
 
 	/* Allocate memory for transmit */
 	fwdata = kzalloc(FW_DNLD_TX_BUF_SIZE, GFP_KERNEL);
-	if (!fwdata)
+	if (!fwdata) {
+		ret = -ENOMEM;
 		goto fw_exit;
+	}
 
 	/* Allocate memory for receive */
 	recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
@@ -1119,3 +1136,4 @@
 MODULE_FIRMWARE(USB8797_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8801_DEFAULT_FW_NAME);
 MODULE_FIRMWARE(USB8897_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(USB8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
index 57e1a57..f0051f8 100644
--- a/drivers/net/wireless/mwifiex/usb.h
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -32,6 +32,8 @@
 #define USB8897_PID_2		0x2046
 #define USB8801_PID_1		0x2049
 #define USB8801_PID_2		0x204a
+#define USB8997_PID_1		0x204d
+#define USB8997_PID_2		0x204e
 
 
 #define USB8XXX_FW_DNLD		1
@@ -46,6 +48,7 @@
 #define USB8797_DEFAULT_FW_NAME	"mrvl/usb8797_uapsta.bin"
 #define USB8801_DEFAULT_FW_NAME	"mrvl/usb8801_uapsta.bin"
 #define USB8897_DEFAULT_FW_NAME	"mrvl/usb8897_uapsta.bin"
+#define USB8997_DEFAULT_FW_NAME	"mrvl/usb8997_uapsta.bin"
 
 #define FW_DNLD_TX_BUF_SIZE	620
 #define FW_DNLD_RX_BUF_SIZE	2048
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 790e619..0cec8a6 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -126,6 +126,10 @@
 int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter)
 {
 
+	if (adapter->hw_status == MWIFIEX_HW_STATUS_READY)
+		if (adapter->if_ops.init_fw_port)
+			adapter->if_ops.init_fw_port(adapter);
+
 	adapter->init_wait_q_woken = true;
 	wake_up_interruptible(&adapter->init_wait_q);
 	return 0;
@@ -496,16 +500,12 @@
 int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
 			 struct cmd_ctrl_node *cmd_node)
 {
-	mwifiex_dbg(adapter, CMD,
-		    "cmd completed: status=%d\n",
+	WARN_ON(!cmd_node->wait_q_enabled);
+	mwifiex_dbg(adapter, CMD, "cmd completed: status=%d\n",
 		    adapter->cmd_wait_q.status);
 
-	*(cmd_node->condition) = true;
-
-	if (adapter->cmd_wait_q.status == -ETIMEDOUT)
-		mwifiex_dbg(adapter, ERROR, "cmd timeout\n");
-	else
-		wake_up_interruptible(&adapter->cmd_wait_q.wait);
+	*cmd_node->condition = true;
+	wake_up_interruptible(&adapter->cmd_wait_q.wait);
 
 	return 0;
 }
@@ -531,6 +531,65 @@
 	return NULL;
 }
 
+static struct mwifiex_sta_node *
+mwifiex_get_tdls_sta_entry(struct mwifiex_private *priv, u8 status)
+{
+	struct mwifiex_sta_node *node;
+
+	list_for_each_entry(node, &priv->sta_list, list) {
+		if (node->tdls_status == status)
+			return node;
+	}
+
+	return NULL;
+}
+
+/* If tdls channel switching is on-going, tx data traffic should be
+ * blocked until the switching stage completed.
+ */
+u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv)
+{
+	struct mwifiex_sta_node *sta_ptr;
+
+	if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+		return false;
+
+	sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_CHAN_SWITCHING);
+	if (sta_ptr)
+		return true;
+
+	return false;
+}
+
+u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv)
+{
+	struct mwifiex_sta_node *sta_ptr;
+
+	if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+		return false;
+
+	sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_IN_OFF_CHAN);
+	if (sta_ptr)
+		return true;
+
+	return false;
+}
+
+/* If tdls channel switching is on-going or tdls operate on off-channel,
+ * cmd path should be blocked until tdls switched to base-channel.
+ */
+u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv)
+{
+	if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+		return true;
+
+	if (mwifiex_is_tdls_chan_switching(priv) ||
+	    mwifiex_is_tdls_off_chan(priv))
+		return false;
+
+	return true;
+}
+
 /* This function will add a sta_node entry to associated station list
  * table with the given mac address.
  * If entry exist already, existing entry is returned.
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index a8ea21c..173d366 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -160,9 +160,10 @@
 		ra_list->tdls_link = false;
 		ra_list->ba_status = BA_SETUP_NONE;
 		ra_list->amsdu_in_ampdu = false;
+		ra_list->tx_paused = false;
 		if (!mwifiex_queuing_ra_based(priv)) {
-			if (mwifiex_get_tdls_link_status(priv, ra) ==
-			    TDLS_SETUP_COMPLETE) {
+			if (mwifiex_is_tdls_link_setup
+				(mwifiex_get_tdls_link_status(priv, ra))) {
 				ra_list->tdls_link = true;
 				ra_list->is_11n_enabled =
 					mwifiex_tdls_peer_11n_enabled(priv, ra);
@@ -448,6 +449,11 @@
 	}
 }
 
+int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
+{
+	return atomic_read(&adapter->bypass_tx_pending) ? false : true;
+}
+
 /*
  * This function checks if WMM Tx queue is empty.
  */
@@ -459,6 +465,8 @@
 
 	for (i = 0; i < adapter->priv_num; ++i) {
 		priv = adapter->priv[i];
+		if (priv && !priv->port_open)
+			continue;
 		if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
 			return false;
 	}
@@ -580,6 +588,10 @@
 	skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
 		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
 
+	skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+	atomic_set(&priv->adapter->bypass_tx_pending, 0);
+
 	idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
 	idr_destroy(&priv->ack_status_frames);
 }
@@ -603,6 +615,88 @@
 	return NULL;
 }
 
+void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
+				    u8 tx_pause)
+{
+	struct mwifiex_ra_list_tbl *ra_list;
+	u32 pkt_cnt = 0, tx_pkts_queued;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+	for (i = 0; i < MAX_NUM_TID; ++i) {
+		ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
+		if (ra_list && ra_list->tx_paused != tx_pause) {
+			pkt_cnt += ra_list->total_pkt_count;
+			ra_list->tx_paused = tx_pause;
+			if (tx_pause)
+				priv->wmm.pkts_paused[i] +=
+					ra_list->total_pkt_count;
+			else
+				priv->wmm.pkts_paused[i] -=
+					ra_list->total_pkt_count;
+		}
+	}
+
+	if (pkt_cnt) {
+		tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
+		if (tx_pause)
+			tx_pkts_queued -= pkt_cnt;
+		else
+			tx_pkts_queued += pkt_cnt;
+
+		atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
+		atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+	}
+	spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
+/* This function update non-tdls peer ralist tx_pause while
+ * tdls channel swithing
+ */
+void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
+					       u8 *mac, u8 tx_pause)
+{
+	struct mwifiex_ra_list_tbl *ra_list;
+	u32 pkt_cnt = 0, tx_pkts_queued;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+	for (i = 0; i < MAX_NUM_TID; ++i) {
+		list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
+				    list) {
+			if (!memcmp(ra_list->ra, mac, ETH_ALEN))
+				continue;
+
+			if (ra_list && ra_list->tx_paused != tx_pause) {
+				pkt_cnt += ra_list->total_pkt_count;
+				ra_list->tx_paused = tx_pause;
+				if (tx_pause)
+					priv->wmm.pkts_paused[i] +=
+						ra_list->total_pkt_count;
+				else
+					priv->wmm.pkts_paused[i] -=
+						ra_list->total_pkt_count;
+			}
+		}
+	}
+
+	if (pkt_cnt) {
+		tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
+		if (tx_pause)
+			tx_pkts_queued -= pkt_cnt;
+		else
+			tx_pkts_queued += pkt_cnt;
+
+		atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
+		atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+	}
+	spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
 /*
  * This function retrieves an RA list node for a given TID and
  * RA address pair.
@@ -670,6 +764,18 @@
 }
 
 /*
+ * This function adds a packet to bypass TX queue.
+ * This is special TX queue for packets which can be sent even when port_open
+ * is false.
+ */
+void
+mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
+				   struct sk_buff *skb)
+{
+	skb_queue_tail(&priv->bypass_txq, skb);
+}
+
+/*
  * This function adds a packet to WMM queue.
  *
  * In disconnected state the packet is immediately dropped and the
@@ -723,6 +829,9 @@
 	    !mwifiex_is_skb_mgmt_frame(skb)) {
 		switch (tdls_status) {
 		case TDLS_SETUP_COMPLETE:
+		case TDLS_CHAN_SWITCHING:
+		case TDLS_IN_BASE_CHAN:
+		case TDLS_IN_OFF_CHAN:
 			ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
 							      ra);
 			tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
@@ -765,7 +874,10 @@
 		atomic_set(&priv->wmm.highest_queued_prio,
 			   priv->tos_to_tid_inv[tid_down]);
 
-	atomic_inc(&priv->wmm.tx_pkts_queued);
+	if (ra_list->tx_paused)
+		priv->wmm.pkts_paused[tid_down]++;
+	else
+		atomic_inc(&priv->wmm.tx_pkts_queued);
 
 	spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
 }
@@ -970,7 +1082,8 @@
 
 			priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
 
-			if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
+			if (!priv_tmp->port_open ||
+			    (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
 				continue;
 
 			/* iterate over the WMM queues of the BSS */
@@ -987,7 +1100,8 @@
 				list_for_each_entry(ptr, &tid_ptr->ra_list,
 						    list) {
 
-					if (!skb_queue_empty(&ptr->skb_head))
+					if (!ptr->tx_paused &&
+					    !skb_queue_empty(&ptr->skb_head))
 						/* holds both locks */
 						goto found;
 				}
@@ -1339,6 +1453,38 @@
 	return 0;
 }
 
+void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
+{
+	struct mwifiex_tx_param tx_param;
+	struct sk_buff *skb;
+	struct mwifiex_txinfo *tx_info;
+	struct mwifiex_private *priv;
+	int i;
+
+	if (adapter->data_sent || adapter->tx_lock_flag)
+		return;
+
+	for (i = 0; i < adapter->priv_num; ++i) {
+		priv = adapter->priv[i];
+
+		if (skb_queue_empty(&priv->bypass_txq))
+			continue;
+
+		skb = skb_dequeue(&priv->bypass_txq);
+		tx_info = MWIFIEX_SKB_TXCB(skb);
+
+		/* no aggregation for bypass packets */
+		tx_param.next_pkt_len = 0;
+
+		if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
+			skb_queue_head(&priv->bypass_txq, skb);
+			tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
+		} else {
+			atomic_dec(&adapter->bypass_tx_pending);
+		}
+	}
+}
+
 /*
  * This function transmits the highest priority packet awaiting in the
  * WMM Queues.
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 48ece0b..38f0976 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -99,12 +99,16 @@
 
 void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
 				 struct sk_buff *skb);
+void mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
+					struct sk_buff *skb);
 void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra);
 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
 			      struct mwifiex_ra_list_tbl *ra, int tid);
 
 int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
+int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter);
 void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
+void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter);
 int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
 			    struct mwifiex_ra_list_tbl *ra_list, int tid);
 
@@ -126,6 +130,10 @@
 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
 			    const u8 *ra_addr);
 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
+void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
+				    u8 tx_pause);
+void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
+					       u8 *mac, u8 tx_pause);
 
 struct mwifiex_ra_list_tbl *mwifiex_wmm_get_ralist_node(struct mwifiex_private
 					*priv, u8 tid, const u8 *ra_addr);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 77361af..9420fc6 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -5019,35 +5019,36 @@
 		memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
 
 		rcu_read_unlock();
-	}
 
-	if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
-	    !priv->ap_fw) {
-		rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
-		if (rc)
-			goto out;
+		if (changed & BSS_CHANGED_ASSOC) {
+			if (!priv->ap_fw) {
+				rc = mwl8k_cmd_set_rate(hw, vif,
+							ap_legacy_rates,
+							ap_mcs_rates);
+				if (rc)
+					goto out;
 
-		rc = mwl8k_cmd_use_fixed_rate_sta(hw);
-		if (rc)
-			goto out;
-	} else {
-		if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc &&
-		    priv->ap_fw) {
-			int idx;
-			int rate;
+				rc = mwl8k_cmd_use_fixed_rate_sta(hw);
+				if (rc)
+					goto out;
+			} else {
+				int idx;
+				int rate;
 
-			/* Use AP firmware specific rate command.
-			 */
-			idx = ffs(vif->bss_conf.basic_rates);
-			if (idx)
-				idx--;
+				/* Use AP firmware specific rate command.
+				 */
+				idx = ffs(vif->bss_conf.basic_rates);
+				if (idx)
+					idx--;
 
-			if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
-				rate = mwl8k_rates_24[idx].hw_value;
-			else
-				rate = mwl8k_rates_50[idx].hw_value;
+				if (hw->conf.chandef.chan->band ==
+				    IEEE80211_BAND_2GHZ)
+					rate = mwl8k_rates_24[idx].hw_value;
+				else
+					rate = mwl8k_rates_50[idx].hw_value;
 
-			mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+				mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+			}
 		}
 	}
 
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index c410180..7b5c554 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -2321,8 +2321,6 @@
 	struct orinoco_rx_data *rx_data, *temp;
 	struct orinoco_scan_data *sd, *sdtemp;
 
-	wiphy_unregister(wiphy);
-
 	/* If the tasklet is scheduled when we call tasklet_kill it
 	 * will run one final time. However the tasklet will only
 	 * drain priv->rx_list if the hw is still available. */
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index c0a2737..a956f96 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -118,6 +118,7 @@
 
 	orinoco_cs_release(link);
 
+	wiphy_unregister(priv_to_wiphy(priv));
 	free_orinocodev(priv);
 }				/* orinoco_cs_detach */
 
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index 1b543e3..048693b 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -223,13 +223,15 @@
 	err = orinoco_if_add(priv, 0, 0, NULL);
 	if (err) {
 		printk(KERN_ERR PFX "orinoco_if_add() failed\n");
-		goto fail;
+		goto fail_wiphy;
 	}
 
 	pci_set_drvdata(pdev, priv);
 
 	return 0;
 
+ fail_wiphy:
+	wiphy_unregister(priv_to_wiphy(priv));
  fail:
 	free_irq(pdev->irq, priv);
 
@@ -263,6 +265,7 @@
 	iowrite16(0, card->bridge_io + 10);
 
 	orinoco_if_del(priv);
+	wiphy_unregister(priv_to_wiphy(priv));
 	free_irq(pdev->irq, priv);
 	free_orinocodev(priv);
 	pci_iounmap(pdev, priv->hw.iobase);
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index 74219d5..4938a22 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -173,13 +173,15 @@
 	err = orinoco_if_add(priv, 0, 0, NULL);
 	if (err) {
 		printk(KERN_ERR PFX "orinoco_if_add() failed\n");
-		goto fail;
+		goto fail_wiphy;
 	}
 
 	pci_set_drvdata(pdev, priv);
 
 	return 0;
 
+ fail_wiphy:
+	wiphy_unregister(priv_to_wiphy(priv));
  fail:
 	free_irq(pdev->irq, priv);
 
@@ -203,6 +205,7 @@
 	struct orinoco_private *priv = pci_get_drvdata(pdev);
 
 	orinoco_if_del(priv);
+	wiphy_unregister(priv_to_wiphy(priv));
 	free_irq(pdev->irq, priv);
 	free_orinocodev(priv);
 	pci_iounmap(pdev, priv->hw.iobase);
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 8b04523..2213520 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -262,13 +262,15 @@
 	err = orinoco_if_add(priv, 0, 0, NULL);
 	if (err) {
 		printk(KERN_ERR PFX "orinoco_if_add() failed\n");
-		goto fail;
+		goto fail_wiphy;
 	}
 
 	pci_set_drvdata(pdev, priv);
 
 	return 0;
 
+ fail_wiphy:
+	wiphy_unregister(priv_to_wiphy(priv));
  fail:
 	free_irq(pdev->irq, priv);
 
@@ -299,6 +301,7 @@
 	struct orinoco_pci_card *card = priv->card;
 
 	orinoco_if_del(priv);
+	wiphy_unregister(priv_to_wiphy(priv));
 	free_irq(pdev->irq, priv);
 	free_orinocodev(priv);
 	pci_iounmap(pdev, priv->hw.iobase);
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 91f0544..26a57d7 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1502,6 +1502,7 @@
 	if (upriv->dev) {
 		struct orinoco_private *priv = ndev_priv(upriv->dev);
 		orinoco_if_del(priv);
+		wiphy_unregister(priv_to_wiphy(upriv));
 		free_orinocodev(priv);
 	}
 }
@@ -1695,6 +1696,7 @@
 	if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) {
 		upriv->dev = NULL;
 		err("%s: orinoco_if_add() failed", __func__);
+		wiphy_unregister(priv_to_wiphy(priv));
 		goto error;
 	}
 	upriv->dev = priv->ndev;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 1c6788a..40d7231 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -203,8 +203,10 @@
 
 	/* Copy firmware into DMA-accessible memory */
 	fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
-	if (!fw)
-		return -ENOMEM;
+	if (!fw) {
+		status = -ENOMEM;
+		goto out;
+	}
 	len = fw_entry->size;
 
 	if (len % 4)
@@ -217,6 +219,8 @@
 
 	status = rsi_copy_to_card(common, fw, len, num_blocks);
 	kfree(fw);
+
+out:
 	release_firmware(fw_entry);
 	return status;
 }
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 30c2cf7..de49008 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -148,8 +148,10 @@
 
 	/* Copy firmware into DMA-accessible memory */
 	fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
-	if (!fw)
-		return -ENOMEM;
+	if (!fw) {
+		status = -ENOMEM;
+		goto out;
+	}
 	len = fw_entry->size;
 
 	if (len % 4)
@@ -162,6 +164,8 @@
 
 	status = rsi_copy_to_card(common, fw, len, num_blocks);
 	kfree(fw);
+
+out:
 	release_firmware(fw_entry);
 	return status;
 }
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 2b4ef25..de62f5d 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -240,7 +240,6 @@
 
 config RT2X00_LIB
 	tristate
-	select AVERAGE
 
 config RT2X00_LIB_FIRMWARE
 	bool
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index afba073..78cc035 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -54,7 +54,7 @@
 #define CSR_REG_BASE			0x0400
 #define CSR_REG_SIZE			0x0100
 #define EEPROM_BASE			0x0000
-#define EEPROM_SIZE			0x006a
+#define EEPROM_SIZE			0x006e
 #define BBP_BASE			0x0000
 #define BBP_SIZE			0x0060
 #define RF_BASE				0x0004
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 9bb398b..3282ddb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -254,6 +254,8 @@
 	int tx_failed;
 };
 
+DECLARE_EWMA(rssi, 1024, 8)
+
 /*
  * Antenna settings about the currently active link.
  */
@@ -285,7 +287,7 @@
 	 * Similar to the avg_rssi in the link_qual structure
 	 * this value is updated by using the walking average.
 	 */
-	struct ewma rssi_ant;
+	struct ewma_rssi rssi_ant;
 };
 
 /*
@@ -314,7 +316,7 @@
 	/*
 	 * Currently active average RSSI value
 	 */
-	struct ewma avg_rssi;
+	struct ewma_rssi avg_rssi;
 
 	/*
 	 * Work structure for scheduling periodic link tuning.
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 9b941c0..017188e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -33,15 +33,11 @@
  */
 #define DEFAULT_RSSI		-128
 
-/* Constants for EWMA calculations. */
-#define RT2X00_EWMA_FACTOR	1024
-#define RT2X00_EWMA_WEIGHT	8
-
-static inline int rt2x00link_get_avg_rssi(struct ewma *ewma)
+static inline int rt2x00link_get_avg_rssi(struct ewma_rssi *ewma)
 {
 	unsigned long avg;
 
-	avg = ewma_read(ewma);
+	avg = ewma_rssi_read(ewma);
 	if (avg)
 		return -avg;
 
@@ -76,8 +72,7 @@
 
 static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
 {
-	ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR,
-		  RT2X00_EWMA_WEIGHT);
+	ewma_rssi_init(&rt2x00dev->link.ant.rssi_ant);
 }
 
 static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
@@ -225,12 +220,12 @@
 	/*
 	 * Update global RSSI
 	 */
-	ewma_add(&link->avg_rssi, -rxdesc->rssi);
+	ewma_rssi_add(&link->avg_rssi, -rxdesc->rssi);
 
 	/*
 	 * Update antenna RSSI
 	 */
-	ewma_add(&ant->rssi_ant, -rxdesc->rssi);
+	ewma_rssi_add(&ant->rssi_ant, -rxdesc->rssi);
 }
 
 void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
@@ -285,8 +280,7 @@
 	 */
 	rt2x00dev->link.count = 0;
 	memset(qual, 0, sizeof(*qual));
-	ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR,
-		  RT2X00_EWMA_WEIGHT);
+	ewma_rssi_init(&rt2x00dev->link.avg_rssi);
 
 	/*
 	 * Restore the VGC level as stored in the registers,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
index c8058aa..6291256 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.c
@@ -200,7 +200,7 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	struct rtl92c_firmware_header *pfwheader;
+	struct rtlwifi_firmware_header *pfwheader;
 	u8 *pfwdata;
 	u32 fwsize;
 	int err;
@@ -209,7 +209,7 @@
 	if (!rtlhal->pfirmware)
 		return 1;
 
-	pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+	pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
 	pfwdata = rtlhal->pfirmware;
 	fwsize = rtlhal->fwsize;
 	RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
@@ -219,10 +219,10 @@
 		RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
 			 "Firmware Version(%d), Signature(%#x), Size(%d)\n",
 			  pfwheader->version, pfwheader->signature,
-			  (int)sizeof(struct rtl92c_firmware_header));
+			  (int)sizeof(struct rtlwifi_firmware_header));
 
-		pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
-		fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+		pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+		fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
 	}
 
 	if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
index 05e944e..21bd4a5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/fw.h
@@ -37,7 +37,7 @@
 #define FW_8192C_POLLING_TIMEOUT_COUNT		3000
 
 #define IS_FW_HEADER_EXIST(_pfwhdr)		\
-	((_pfwhdr->signature&0xFFFF) == 0x88E1)
+	((le16_to_cpu(_pfwhdr->signature) & 0xFFFF) == 0x88E1)
 #define USE_OLD_WOWLAN_DEBUG_FW			0
 
 #define H2C_88E_RSVDPAGE_LOC_LEN		5
@@ -131,25 +131,6 @@
 #define	FW_PWR_STATE_ACTIVE	((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
 #define	FW_PWR_STATE_RF_OFF		0
 
-struct rtl92c_firmware_header {
-	u16 signature;
-	u8 category;
-	u8 function;
-	u16 version;
-	u8 subversion;
-	u8 rsvd1;
-	u8 month;
-	u8 date;
-	u8 hour;
-	u8 minute;
-	u16 ramcodesize;
-	u16 rsvd2;
-	u32 svnindex;
-	u32 rsvd3;
-	u32 rsvd4;
-	u32 rsvd5;
-};
-
 enum rtl8188e_h2c_cmd {
 	H2C_88E_RSVDPAGE = 0,
 	H2C_88E_JOINBSSRPT = 1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index 0aca6f4..03cbe4c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -39,6 +39,7 @@
 #define BT_RSSI_STATE_SPECIAL_LOW	BIT_OFFSET_LEN_MASK_32(2, 1)
 #define BT_RSSI_STATE_BG_EDCA_LOW	BIT_OFFSET_LEN_MASK_32(3, 1)
 #define BT_RSSI_STATE_TXPOWER_LOW	BIT_OFFSET_LEN_MASK_32(4, 1)
+#define BT_MASK				0x00ffffff
 
 #define RTLPRIV			(struct rtl_priv *)
 #define GET_UNDECORATED_AVERAGE_RSSI(_priv)	\
@@ -312,7 +313,7 @@
 	struct dig_t *digtable = &rtlpriv->dm_digtable;
 	u32 isbt;
 
-	/* modify DIG lower bound, deal with abnorally large false alarm */
+	/* modify DIG lower bound, deal with abnormally large false alarm */
 	if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
 		digtable->large_fa_hit++;
 		if (digtable->forbidden_igi < digtable->cur_igvalue) {
@@ -1536,13 +1537,11 @@
 		return false;
 
 	bt_state = rtl_read_byte(rtlpriv, 0x4fd);
-	bt_tx = rtl_read_dword(rtlpriv, 0x488);
-	bt_tx = bt_tx & 0x00ffffff;
-	bt_pri = rtl_read_dword(rtlpriv, 0x48c);
-	bt_pri = bt_pri & 0x00ffffff;
+	bt_tx = rtl_read_dword(rtlpriv, 0x488) & BT_MASK;
+	bt_pri = rtl_read_dword(rtlpriv, 0x48c) & BT_MASK;
 	polling = rtl_read_dword(rtlpriv, 0x490);
 
-	if (bt_tx == 0xffffffff && bt_pri == 0xffffffff &&
+	if (bt_tx == BT_MASK && bt_pri == BT_MASK &&
 	    polling == 0xffffffff && bt_state == 0xff)
 		return false;
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 14b819e..43fcb25 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -221,7 +221,7 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	struct rtl92c_firmware_header *pfwheader;
+	struct rtlwifi_firmware_header *pfwheader;
 	u8 *pfwdata;
 	u32 fwsize;
 	int err;
@@ -230,19 +230,19 @@
 	if (!rtlhal->pfirmware)
 		return 1;
 
-	pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
+	pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
 	pfwdata = (u8 *)rtlhal->pfirmware;
 	fwsize = rtlhal->fwsize;
 	if (IS_FW_HEADER_EXIST(pfwheader)) {
 		RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
 			 "Firmware Version(%d), Signature(%#x),Size(%d)\n",
 			  pfwheader->version, pfwheader->signature,
-			  (int)sizeof(struct rtl92c_firmware_header));
+			  (int)sizeof(struct rtlwifi_firmware_header));
 
-		rtlhal->fw_version = pfwheader->version;
+		rtlhal->fw_version = le16_to_cpu(pfwheader->version);
 		rtlhal->fw_subversion = pfwheader->subversion;
-		pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
-		fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+		pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+		fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
 	}
 
 	_rtl92c_enable_fw_download(hw, true);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index e9f4281..864806c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -69,25 +69,6 @@
 	((GET_CVID_CUT_VERSION(version) == \
 		CHIP_VENDOR_UMC_B_CUT) ? true : false) : false)
 
-struct rtl92c_firmware_header {
-	__le16 signature;
-	u8 category;
-	u8 function;
-	__le16 version;
-	u8 subversion;
-	u8 rsvd1;
-	u8 month;
-	u8 date;
-	u8 hour;
-	u8 minute;
-	__le16 ramcodeSize;
-	__le16 rsvd2;
-	__le32 svnindex;
-	__le32 rsvd3;
-	__le32 rsvd4;
-	__le32 rsvd5;
-};
-
 #define pagenum_128(_len)	(u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
 
 #define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val)			\
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
index c940a87..74a479a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/def.h
@@ -32,24 +32,15 @@
 /*-------------------------------------------------------------------------
  *	Chip specific
  *-------------------------------------------------------------------------*/
-#define CHIP_8723			BIT(2) /* RTL8723 With BT feature */
-#define CHIP_8723_DRV_REV		BIT(3) /* RTL8723 Driver Revised */
 #define NORMAL_CHIP			BIT(4)
 #define CHIP_VENDOR_UMC			BIT(5)
 #define CHIP_VENDOR_UMC_B_CUT		BIT(6)
 
-#define IS_8723_SERIES(version)		\
-	(((version) & CHIP_8723) ? true : false)
-
 #define IS_92C_1T2R(version)		\
 	(((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
 
 #define IS_VENDOR_UMC(version)		\
 	(((version) & CHIP_VENDOR_UMC) ? true : false)
 
-#define IS_VENDOR_8723_A_CUT(version)	\
-	(((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \
-	false : true) : false)
-
 #define CHIP_BONDING_92C_1T2R	0x1
 #define CHIP_BONDING_IDENTIFIER(_value)	(((_value) >> 22) & 0x3)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 767358a..25db369 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -818,26 +818,29 @@
 
 static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
 {
-	u16			value16;
-
+	u16 value16;
+	u32 value32;
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
-	mac->rx_conf = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS |
-		      RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
-		      RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
-	rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+	value32 = (RCR_APM | RCR_AM | RCR_ADF | RCR_AB | RCR_APPFCS |
+		   RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
+		   RCR_APP_MIC | RCR_APP_PHYSTS | RCR_ACRC32);
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)(&value32));
 	/* Accept all multicast address */
 	rtl_write_dword(rtlpriv,  REG_MAR, 0xFFFFFFFF);
 	rtl_write_dword(rtlpriv,  REG_MAR + 4, 0xFFFFFFFF);
 	/* Accept all management frames */
 	value16 = 0xFFFF;
-	rtl92c_set_mgt_filter(hw, value16);
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MGT_FILTER,
+				      (u8 *)(&value16));
 	/* Reject all control frame - default value is 0 */
-	rtl92c_set_ctrl_filter(hw, 0x0);
+	value16 = 0x0;
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CTRL_FILTER,
+				      (u8 *)(&value16));
 	/* Accept all data frames */
 	value16 = 0xFFFF;
-	rtl92c_set_data_filter(hw, value16);
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DATA_FILTER,
+				      (u8 *)(&value16));
 }
 
 static void _rtl92cu_init_beacon_parameters(struct ieee80211_hw *hw)
@@ -988,17 +991,6 @@
 	}
 }
 
-static void _update_mac_setting(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-
-	mac->rx_conf = rtl_read_dword(rtlpriv, REG_RCR);
-	mac->rx_mgt_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
-	mac->rx_ctrl_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
-	mac->rx_data_filter = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
-}
-
 int rtl92cu_hw_init(struct ieee80211_hw *hw)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1068,7 +1060,6 @@
 	}
 	_rtl92cu_hw_configure(hw);
 	_InitPABias(hw);
-	_update_mac_setting(hw);
 	rtl92c_dm_init(hw);
 exit:
 	local_irq_restore(flags);
@@ -1620,7 +1611,6 @@
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-	struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
 	enum wireless_mode wirelessmode = mac->mode;
 	u8 idx = 0;
 
@@ -1829,63 +1819,10 @@
 						u4b_ac_param);
 				break;
 			default:
-				RT_ASSERT(false,
-					  "SetHwReg8185(): invalid aci: %d !\n",
+				RT_ASSERT(false, "invalid aci: %d !\n",
 					  e_aci);
 				break;
 			}
-			if (rtlusb->acm_method != EACMWAY2_SW)
-				rtlpriv->cfg->ops->set_hw_reg(hw,
-					 HW_VAR_ACM_CTRL, &e_aci);
-			break;
-		}
-	case HW_VAR_ACM_CTRL:{
-			u8 e_aci = *val;
-			union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
-							(&(mac->ac[0].aifs));
-			u8 acm = p_aci_aifsn->f.acm;
-			u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
-
-			acm_ctrl =
-			    acm_ctrl | ((rtlusb->acm_method == 2) ? 0x0 : 0x1);
-			if (acm) {
-				switch (e_aci) {
-				case AC0_BE:
-					acm_ctrl |= AcmHw_BeqEn;
-					break;
-				case AC2_VI:
-					acm_ctrl |= AcmHw_ViqEn;
-					break;
-				case AC3_VO:
-					acm_ctrl |= AcmHw_VoqEn;
-					break;
-				default:
-					RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
-						 "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
-						 acm);
-					break;
-				}
-			} else {
-				switch (e_aci) {
-				case AC0_BE:
-					acm_ctrl &= (~AcmHw_BeqEn);
-					break;
-				case AC2_VI:
-					acm_ctrl &= (~AcmHw_ViqEn);
-					break;
-				case AC3_VO:
-					acm_ctrl &= (~AcmHw_VoqEn);
-					break;
-				default:
-					RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-						 "switch case not processed\n");
-					break;
-				}
-			}
-			RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
-				 "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
-				 acm_ctrl);
-			rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
 			break;
 		}
 	case HW_VAR_RCR:{
@@ -1999,12 +1936,15 @@
 		}
 	case HW_VAR_MGT_FILTER:
 		rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *)val);
+		mac->rx_mgt_filter = *(u16 *)val;
 		break;
 	case HW_VAR_CTRL_FILTER:
 		rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *)val);
+		mac->rx_ctrl_filter = *(u16 *)val;
 		break;
 	case HW_VAR_DATA_FILTER:
 		rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *)val);
+		mac->rx_data_filter = *(u16 *)val;
 		break;
 	default:
 		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
@@ -2280,7 +2220,6 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 	enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
 	u8 u1tmp = 0;
 	bool actuallyset = false;
@@ -2357,20 +2296,7 @@
 		if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) {
 			/* Enable register area 0x0-0xc. */
 			rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
-			if (IS_HARDWARE_TYPE_8723U(rtlhal)) {
-				/*
-				 * We should configure HW PDn source for WiFi
-				 * ONLY, and then our HW will be set in
-				 * power-down mode if PDn source from all
-				 * functions are configured.
-				 */
-				u1tmp = rtl_read_byte(rtlpriv,
-						      REG_MULTI_FUNC_CTRL);
-				rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL,
-					       (u1tmp|WL_HWPDN_EN));
-			} else {
-				rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
-			}
+			rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
 		}
 		if (e_rfpowerstate_toset == ERFOFF) {
 			if (ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_ASPM)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 490a7cf..0357133 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -69,8 +69,6 @@
 		chip_version = NORMAL_CHIP;
 		chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
 		chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
-		/* RTL8723 with BT function. */
-		chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0);
 		if (IS_VENDOR_UMC(chip_version))
 			chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
 					 CHIP_VENDOR_UMC_B_CUT : 0);
@@ -78,10 +76,6 @@
 			value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
 			chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
 				 CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
-		} else if (IS_8723_SERIES(chip_version)) {
-			value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
-			chip_version |= ((value32 & RF_RL_ID) ?
-					  CHIP_8723_DRV_REV : 0);
 		}
 	}
 	rtlhal->version  = (enum version_8192c)chip_version;
@@ -114,12 +108,6 @@
 	case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
 		versionid = "NORMAL_UMC_CHIP_88C_B_CUT";
 		break;
-	case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
-		versionid = "NORMAL_UMC_CHIP_8723_1T1R_A_CUT";
-		break;
-	case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
-		versionid = "NORMAL_UMC_CHIP_8723_1T1R_B_CUT";
-		break;
 	case VERSION_TEST_CHIP_92C:
 		versionid = "TEST_CHIP_92C";
 		break;
@@ -405,59 +393,9 @@
 void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
-	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-	u32 u4b_ac_param;
 
 	rtl92c_dm_init_edca_turbo(hw);
-	u4b_ac_param = (u32) mac->ac[aci].aifs;
-	u4b_ac_param |=
-	    ((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) <<
-	    AC_PARAM_ECW_MIN_OFFSET;
-	u4b_ac_param |=
-	    ((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) <<
-	    AC_PARAM_ECW_MAX_OFFSET;
-	u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
-			 AC_PARAM_TXOP_OFFSET;
-	RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD, "queue:%x, ac_param:%x\n",
-		 aci, u4b_ac_param);
-	switch (aci) {
-	case AC1_BK:
-		rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
-		break;
-	case AC0_BE:
-		rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
-		break;
-	case AC2_VI:
-		rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
-		break;
-	case AC3_VO:
-		rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
-		break;
-	default:
-		RT_ASSERT(false, "invalid aci: %d !\n", aci);
-		break;
-	}
-}
-
-/*-------------------------------------------------------------------------
- * HW MAC Address
- *-------------------------------------------------------------------------*/
-void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
-{
-	u32 i;
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	for (i = 0 ; i < ETH_ALEN ; i++)
-		rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
-
-	RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
-		 "MAC Address: %02X-%02X-%02X-%02X-%02X-%02X\n",
-		 rtl_read_byte(rtlpriv, REG_MACID),
-		 rtl_read_byte(rtlpriv, REG_MACID+1),
-		 rtl_read_byte(rtlpriv, REG_MACID+2),
-		 rtl_read_byte(rtlpriv, REG_MACID+3),
-		 rtl_read_byte(rtlpriv, REG_MACID+4),
-		 rtl_read_byte(rtlpriv, REG_MACID+5));
+	rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, (u8 *)&aci);
 }
 
 void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
@@ -656,47 +594,6 @@
 	rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
 }
 
-u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	return rtl_read_word(rtlpriv, REG_RXFLTMAP0);
-}
-
-void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter);
-}
-
-u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	return rtl_read_word(rtlpriv, REG_RXFLTMAP1);
-}
-
-void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter);
-}
-
-u16 rtl92c_get_data_filter(struct ieee80211_hw *hw)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	return rtl_read_word(rtlpriv,  REG_RXFLTMAP2);
-}
-
-void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter)
-{
-	struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-	rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter);
-}
 /*==============================================================*/
 
 static u8 _rtl92c_query_rxpwrpercentage(char antpower)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
index e34f0f1..553a4bf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.h
@@ -48,7 +48,6 @@
 /*---------------------------------------------------------------
  *	Hardware init functions
  *---------------------------------------------------------------*/
-void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr);
 void rtl92c_init_interrupt(struct ieee80211_hw *hw);
 void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size);
 
@@ -73,15 +72,6 @@
 void rtl92c_disable_fast_edca(struct ieee80211_hw *hw);
 void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T);
 
-/* For filter */
-u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw);
-void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter);
-u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw);
-void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter);
-u16 rtl92c_get_data_filter(struct ieee80211_hw *hw);
-void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter);
-
-
 u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw);
 
 struct rx_fwinfo_92c {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 23806c2..fd4a535 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -321,6 +321,7 @@
 	{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
 	{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
 	{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+	{RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
 	{RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
 	{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
 	{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 587b8c5..7c1db7e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -420,7 +420,7 @@
 		 "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
 		 de_digtable->recover_cnt, de_digtable->rx_gain_min);
 
-	/* deal with abnorally large false alarm */
+	/* deal with abnormally large false alarm */
 	if (falsealm_cnt->cnt_all > 10000) {
 		RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
 			 "dm_DIG(): Abnormally false alarm case\n");
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
index 1646e7c..8a38daa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.h
@@ -110,28 +110,6 @@
 #define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val)	\
 	SET_BITS_TO_LE_1BYTE((__ph2ccmd) + 2, 0, 8, __val)
 
-struct rtl92d_firmware_header {
-	u16 signature;
-	u8 category;
-	u8 function;
-	u16 version;
-	u8 subversion;
-	u8 rsvd1;
-
-	u8 month;
-	u8 date;
-	u8 hour;
-	u8 minute;
-	u16 ramcodeSize;
-	u16 rsvd2;
-
-	u32 svnindex;
-	u32 rsvd3;
-
-	u32 rsvd4;
-	u32 rsvd5;
-};
-
 int rtl92d_download_fw(struct ieee80211_hw *hw);
 void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
 			 u32 cmd_len, u8 *p_cmdbuffer);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 1961b8e..bb06fe8 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3515,14 +3515,14 @@
 	for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
 	     rfpath++) {
 		if (rtlhal->current_bandtype == BAND_ON_2_4G) {
-			/* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+			/* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
 			rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) |
 				      BIT(18), 0);
 			/* RF0x0b[16:14] =3b'111 */
 			rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B,
 				      0x1c000, 0x07);
 		} else {
-			/* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+			/* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
 			rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) |
 				      BIT(16) | BIT(18),
 				      (BIT(16) | BIT(8)) >> 8);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
index 232865c..0708eed 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
@@ -198,7 +198,7 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	struct rtl92c_firmware_header *pfwheader;
+	struct rtlwifi_firmware_header *pfwheader;
 	u8 *pfwdata;
 	u32 fwsize;
 	int err;
@@ -207,8 +207,8 @@
 	if (!rtlhal->pfirmware)
 		return 1;
 
-	pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
-	rtlhal->fw_version = pfwheader->version;
+	pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+	rtlhal->fw_version = le16_to_cpu(pfwheader->version);
 	rtlhal->fw_subversion = pfwheader->subversion;
 	pfwdata = (u8 *)rtlhal->pfirmware;
 	fwsize = rtlhal->fwsize;
@@ -219,10 +219,10 @@
 		RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
 			 "Firmware Version(%d), Signature(%#x),Size(%d)\n",
 			  pfwheader->version, pfwheader->signature,
-			  (int)sizeof(struct rtl92c_firmware_header));
+			  (int)sizeof(struct rtlwifi_firmware_header));
 
-		pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
-		fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
+		pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+		fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
 	} else {
 		RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
 			 "Firmware no Header, Signature(%#x)\n",
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
index 3e2a48e..069da1e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.h
@@ -33,7 +33,7 @@
 #define FW_8192C_POLLING_TIMEOUT_COUNT		3000
 
 #define IS_FW_HEADER_EXIST(_pfwhdr)	\
-	((_pfwhdr->signature&0xFFF0) == 0x92E0)
+	((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x92E0)
 #define USE_OLD_WOWLAN_DEBUG_FW 0
 
 #define H2C_92E_RSVDPAGE_LOC_LEN		5
@@ -89,25 +89,6 @@
 #define	FW_PWR_STATE_ACTIVE	((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
 #define	FW_PWR_STATE_RF_OFF	0
 
-struct rtl92c_firmware_header {
-	u16 signature;
-	u8 category;
-	u8 function;
-	u16 version;
-	u8 subversion;
-	u8 rsvd1;
-	u8 month;
-	u8 date;
-	u8 hour;
-	u8 minute;
-	u16 ramcodesize;
-	u16 rsvd2;
-	u32 svnindex;
-	u32 rsvd3;
-	u32 rsvd4;
-	u32 rsvd5;
-};
-
 enum rtl8192e_h2c_cmd {
 	H2C_92E_RSVDPAGE = 0,
 	H2C_92E_MSRRPT = 1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c
index a863a44..018340a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/phy.c
@@ -449,7 +449,7 @@
 				 "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
 				  rate_section, path, txnum);
 			break;
-		};
+		}
 	} else {
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
 			 "Invalid Band %d\n", band);
@@ -489,7 +489,7 @@
 				 "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
 				  rate_section, path, txnum);
 			break;
-		};
+		}
 	} else {
 		RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
 			 "Invalid Band %d()\n", band);
@@ -853,7 +853,7 @@
 		else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
 			index = (u8)((regaddr - 0xE20) / 4);
 		break;
-	};
+	}
 	return index;
 }
 
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
index 8280bab..3859b3e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
@@ -205,9 +205,9 @@
 	return true;
 }
 
-static bool is_fw_header(struct rtl8723e_firmware_header *hdr)
+static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
 {
-	return (hdr->signature & 0xfff0) == 0x2300;
+	return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x2300;
 }
 
 static struct rtl_hal_ops rtl8723e_hal_ops = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 7bf88d9..d091f1d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -209,9 +209,9 @@
 	return true;
 }
 
-static bool is_fw_header(struct rtl8723e_firmware_header *hdr)
+static bool is_fw_header(struct rtlwifi_firmware_header *hdr)
 {
-	return (hdr->signature & 0xfff0) == 0x5300;
+	return (le16_to_cpu(hdr->signature) & 0xfff0) == 0x5300;
 }
 
 static struct rtl_hal_ops rtl8723be_hal_ops = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
index dd698e7..a2f5e89 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.c
@@ -253,7 +253,7 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	struct rtl8723e_firmware_header *pfwheader;
+	struct rtlwifi_firmware_header *pfwheader;
 	u8 *pfwdata;
 	u32 fwsize;
 	int err;
@@ -263,7 +263,7 @@
 	if (!rtlhal->pfirmware)
 		return 1;
 
-	pfwheader = (struct rtl8723e_firmware_header *)rtlhal->pfirmware;
+	pfwheader = (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
 	pfwdata = rtlhal->pfirmware;
 	fwsize = rtlhal->fwsize;
 
@@ -275,10 +275,10 @@
 		RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
 			 "Firmware Version(%d), Signature(%#x), Size(%d)\n",
 			 pfwheader->version, pfwheader->signature,
-			 (int)sizeof(struct rtl8723e_firmware_header));
+			 (int)sizeof(struct rtlwifi_firmware_header));
 
-		pfwdata = pfwdata + sizeof(struct rtl8723e_firmware_header);
-		fwsize = fwsize - sizeof(struct rtl8723e_firmware_header);
+		pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+		fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
 	}
 
 	if (rtl_read_byte(rtlpriv, REG_MCUFWDL)&BIT(7)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
index 3ebafc8..8ea372d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8723com/fw_common.h
@@ -50,25 +50,6 @@
 	VERSION_UNKNOWN = 0xFF,
 };
 
-struct rtl8723e_firmware_header {
-	u16 signature;
-	u8 category;
-	u8 function;
-	u16 version;
-	u8 subversion;
-	u8 rsvd1;
-	u8 month;
-	u8 date;
-	u8 hour;
-	u8 minute;
-	u16 ramcodesize;
-	u16 rsvd2;
-	u32 svnindex;
-	u32 rsvd3;
-	u32 rsvd4;
-	u32 rsvd5;
-};
-
 enum rtl8723be_cmd {
 	H2C_8723BE_RSVDPAGE = 0,
 	H2C_8723BE_JOINBSSRPT = 1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
index 95e9562..525eb23 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.c
@@ -210,7 +210,7 @@
 {
 	struct rtl_priv *rtlpriv = rtl_priv(hw);
 	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-	struct rtl8821a_firmware_header *pfwheader;
+	struct rtlwifi_firmware_header *pfwheader;
 	u8 *pfwdata;
 	u32 fwsize;
 	int err;
@@ -228,8 +228,8 @@
 			return 1;
 
 		pfwheader =
-		  (struct rtl8821a_firmware_header *)rtlhal->wowlan_firmware;
-		rtlhal->fw_version = pfwheader->version;
+		  (struct rtlwifi_firmware_header *)rtlhal->wowlan_firmware;
+		rtlhal->fw_version = le16_to_cpu(pfwheader->version);
 		rtlhal->fw_subversion = pfwheader->subversion;
 		pfwdata = (u8 *)rtlhal->wowlan_firmware;
 		fwsize = rtlhal->wowlan_fwsize;
@@ -238,8 +238,8 @@
 			return 1;
 
 		pfwheader =
-		  (struct rtl8821a_firmware_header *)rtlhal->pfirmware;
-		rtlhal->fw_version = pfwheader->version;
+		  (struct rtlwifi_firmware_header *)rtlhal->pfirmware;
+		rtlhal->fw_version = le16_to_cpu(pfwheader->version);
 		rtlhal->fw_subversion = pfwheader->subversion;
 		pfwdata = (u8 *)rtlhal->pfirmware;
 		fwsize = rtlhal->fwsize;
@@ -255,8 +255,8 @@
 			 "Firmware Version(%d), Signature(%#x)\n",
 			 pfwheader->version, pfwheader->signature);
 
-		pfwdata = pfwdata + sizeof(struct rtl8821a_firmware_header);
-		fwsize = fwsize - sizeof(struct rtl8821a_firmware_header);
+		pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
+		fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
 	}
 
 	if (rtlhal->mac_func_enable) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
index 591c14c..8f5b4aa 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/fw.h
@@ -34,10 +34,10 @@
 #define FW_8821AE_POLLING_TIMEOUT_COUNT	6000
 
 #define IS_FW_HEADER_EXIST_8812(_pfwhdr)	\
-	((_pfwhdr->signature&0xFFF0) == 0x9500)
+	((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x9500)
 
 #define IS_FW_HEADER_EXIST_8821(_pfwhdr)	\
-	((_pfwhdr->signature&0xFFF0) == 0x2100)
+	((le16_to_cpu(_pfwhdr->signature) & 0xFFF0) == 0x2100)
 
 #define USE_OLD_WOWLAN_DEBUG_FW 0
 
@@ -137,25 +137,6 @@
 #define	FW_PWR_STATE_ACTIVE	((FW_PS_RF_ON) | (FW_PS_REGISTER_ACTIVE))
 #define	FW_PWR_STATE_RF_OFF	0
 
-struct rtl8821a_firmware_header {
-	u16 signature;
-	u8 category;
-	u8 function;
-	u16 version;
-	u8 subversion;
-	u8 rsvd1;
-	u8 month;
-	u8 date;
-	u8 hour;
-	u8 minute;
-	u16 ramcodeSize;
-	u16 rsvd2;
-	u32 svnindex;
-	u32 rsvd3;
-	u32 rsvd4;
-	u32 rsvd5;
-};
-
 enum rtl8812_c2h_evt {
 	C2H_8812_DBG = 0,
 	C2H_8812_LB = 1,
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index 3236d44..b7f18e21 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -2180,7 +2180,7 @@
 
 	rtl_write_byte(rtlpriv, MSR, bt_msr);
 	rtlpriv->cfg->ops->led_control(hw, ledaction);
-	if ((bt_msr & 0xfc) == MSR_AP)
+	if ((bt_msr & MSR_MASK) == MSR_AP)
 		rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
 	else
 		rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
index 53668fc..1d6110f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
@@ -429,6 +429,7 @@
 #define	MSR_ADHOC				0x01
 #define	MSR_INFRA				0x02
 #define	MSR_AP					0x03
+#define MSR_MASK				0x03
 
 #define	RRSR_RSC_OFFSET				21
 #define	RRSR_SHORT_OFFSET			23
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 2b770b5..b90ca61 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -222,6 +222,25 @@
 #define	WOL_REASON_REALWOW_V2_WAKEUPPKT	BIT(9)
 #define	WOL_REASON_REALWOW_V2_ACKLOST	BIT(10)
 
+struct rtlwifi_firmware_header {
+	__le16 signature;
+	u8 category;
+	u8 function;
+	__le16 version;
+	u8 subversion;
+	u8 rsvd1;
+	u8 month;
+	u8 date;
+	u8 hour;
+	u8 minute;
+	__le16 ramcodeSize;
+	__le16 rsvd2;
+	__le32 svnindex;
+	__le32 rsvd3;
+	__le32 rsvd4;
+	__le32 rsvd5;
+};
+
 struct txpower_info_2g {
 	u8 index_cck_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
 	u8 index_bw40_base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
@@ -2064,16 +2083,12 @@
 	bool tx_enable_sw_calc_duration;
 };
 
-struct rtl92c_firmware_header;
-
 struct rtl_wow_pattern {
 	u8 type;
 	u16 crc;
 	u32 mask[4];
 };
 
-struct rtl8723e_firmware_header;
-
 struct rtl_hal_ops {
 	int (*init_sw_vars) (struct ieee80211_hw *hw);
 	void (*deinit_sw_vars) (struct ieee80211_hw *hw);
@@ -2177,7 +2192,7 @@
 	void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id,
 			      u32 cmd_len, u8 *p_cmdbuffer);
 	bool (*get_btc_status) (void);
-	bool (*is_fw_header)(struct rtl8723e_firmware_header *hdr);
+	bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr);
 	u32 (*rx_command_packet)(struct ieee80211_hw *hw,
 				 struct rtl_stats status, struct sk_buff *skb);
 	void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index 0c0d5cd..7c355ff 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -118,7 +118,11 @@
 	if (passive)
 		scan_options |= WL1271_SCAN_OPT_PASSIVE;
 
-	cmd->params.role_id = wlvif->role_id;
+	/* scan on the dev role if the regular one is not started */
+	if (wlcore_is_p2p_mgmt(wlvif))
+		cmd->params.role_id = wlvif->dev_role_id;
+	else
+		cmd->params.role_id = wlvif->role_id;
 
 	if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
 		ret = -EINVAL;
diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c
index 67f2a0e..4be0409 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.c
+++ b/drivers/net/wireless/ti/wl18xx/acx.c
@@ -282,3 +282,30 @@
 	kfree(acx);
 	return ret;
 }
+
+int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl)
+{
+	struct acx_dynamic_fw_traces_cfg *acx;
+	int ret;
+
+	wl1271_debug(DEBUG_ACX, "acx dynamic fw traces config %d",
+		     wl->dynamic_fw_traces);
+
+	acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+	if (!acx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	acx->dynamic_fw_traces = cpu_to_le32(wl->dynamic_fw_traces);
+
+	ret = wl1271_cmd_configure(wl, ACX_DYNAMIC_TRACES_CFG,
+				   acx, sizeof(*acx));
+	if (ret < 0) {
+		wl1271_warning("acx config dynamic fw traces failed: %d", ret);
+		goto out;
+	}
+out:
+	kfree(acx);
+	return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h
index 4afccd4..342a299 100644
--- a/drivers/net/wireless/ti/wl18xx/acx.h
+++ b/drivers/net/wireless/ti/wl18xx/acx.h
@@ -35,7 +35,8 @@
 	ACX_PEER_CAP			 = 0x0056,
 	ACX_INTERRUPT_NOTIFY		 = 0x0057,
 	ACX_RX_BA_FILTER		 = 0x0058,
-	ACX_AP_SLEEP_CFG                 = 0x0059
+	ACX_AP_SLEEP_CFG                 = 0x0059,
+	ACX_DYNAMIC_TRACES_CFG		 = 0x005A,
 };
 
 /* numbers of bits the length field takes (add 1 for the actual number) */
@@ -92,27 +93,26 @@
 
 
 struct wl18xx_acx_error_stats {
-	u32 error_frame;
-	u32 error_null_Frame_tx_start;
-	u32 error_numll_frame_cts_start;
-	u32 error_bar_retry;
-	u32 error_frame_cts_nul_flid;
+	u32 error_frame_non_ctrl;
+	u32 error_frame_ctrl;
+	u32 error_frame_during_protection;
+	u32 null_frame_tx_start;
+	u32 null_frame_cts_start;
+	u32 bar_retry;
+	u32 num_frame_cts_nul_flid;
+	u32 tx_abort_failure;
+	u32 tx_resume_failure;
+	u32 rx_cmplt_db_overflow_cnt;
+	u32 elp_while_rx_exch;
+	u32 elp_while_tx_exch;
+	u32 elp_while_tx;
+	u32 elp_while_nvic_pending;
+	u32 rx_excessive_frame_len;
+	u32 burst_mismatch;
+	u32 tbc_exch_mismatch;
 } __packed;
 
-struct wl18xx_acx_debug_stats {
-	u32 debug1;
-	u32 debug2;
-	u32 debug3;
-	u32 debug4;
-	u32 debug5;
-	u32 debug6;
-} __packed;
-
-struct wl18xx_acx_ring_stats {
-	u32 prepared_descs;
-	u32 tx_cmplt;
-} __packed;
-
+#define NUM_OF_RATES_INDEXES 30
 struct wl18xx_acx_tx_stats {
 	u32 tx_prepared_descs;
 	u32 tx_cmplt;
@@ -122,7 +122,7 @@
 	u32 tx_data_programmed;
 	u32 tx_burst_programmed;
 	u32 tx_starts;
-	u32 tx_imm_resp;
+	u32 tx_stop;
 	u32 tx_start_templates;
 	u32 tx_start_int_templates;
 	u32 tx_start_fw_gen;
@@ -131,13 +131,14 @@
 	u32 tx_exch;
 	u32 tx_retry_template;
 	u32 tx_retry_data;
+	u32 tx_retry_per_rate[NUM_OF_RATES_INDEXES];
 	u32 tx_exch_pending;
 	u32 tx_exch_expiry;
 	u32 tx_done_template;
 	u32 tx_done_data;
 	u32 tx_done_int_template;
-	u32 tx_frame_checksum;
-	u32 tx_checksum_result;
+	u32 tx_cfe1;
+	u32 tx_cfe2;
 	u32 frag_called;
 	u32 frag_mpdu_alloc_failed;
 	u32 frag_init_called;
@@ -165,11 +166,8 @@
 	u32 rx_cmplt_task;
 	u32 rx_phy_hdr;
 	u32 rx_timeout;
+	u32 rx_rts_timeout;
 	u32 rx_timeout_wa;
-	u32 rx_wa_density_dropped_frame;
-	u32 rx_wa_ba_not_expected;
-	u32 rx_frame_checksum;
-	u32 rx_checksum_result;
 	u32 defrag_called;
 	u32 defrag_init_called;
 	u32 defrag_in_process_called;
@@ -179,6 +177,7 @@
 	u32 decrypt_key_not_found;
 	u32 defrag_need_decrypt;
 	u32 rx_tkip_replays;
+	u32 rx_xfr;
 } __packed;
 
 struct wl18xx_acx_isr_stats {
@@ -193,21 +192,13 @@
 	u32 connection_out_of_sync;
 	u32 cont_miss_bcns_spread[PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD];
 	u32 rcvd_awake_bcns_cnt;
-} __packed;
-
-struct wl18xx_acx_event_stats {
-	u32 calibration;
-	u32 rx_mismatch;
-	u32 rx_mem_empty;
-} __packed;
-
-struct wl18xx_acx_ps_poll_stats {
-	u32 ps_poll_timeouts;
-	u32 upsd_timeouts;
-	u32 upsd_max_ap_turn;
-	u32 ps_poll_max_ap_turn;
-	u32 ps_poll_utilization;
-	u32 upsd_utilization;
+	u32 sleep_time_count;
+	u32 sleep_time_avg;
+	u32 sleep_cycle_avg;
+	u32 sleep_percent;
+	u32 ap_sleep_active_conf;
+	u32 ap_sleep_user_conf;
+	u32 ap_sleep_counter;
 } __packed;
 
 struct wl18xx_acx_rx_filter_stats {
@@ -227,11 +218,11 @@
 } __packed;
 
 #define AGGR_STATS_TX_AGG	16
-#define AGGR_STATS_TX_RATE	16
 #define AGGR_STATS_RX_SIZE_LEN	16
 
 struct wl18xx_acx_aggr_stats {
-	u32 tx_agg_vs_rate[AGGR_STATS_TX_AGG * AGGR_STATS_TX_RATE];
+	u32 tx_agg_rate[AGGR_STATS_TX_AGG];
+	u32 tx_agg_len[AGGR_STATS_TX_AGG];
 	u32 rx_size[AGGR_STATS_RX_SIZE_LEN];
 } __packed;
 
@@ -240,8 +231,6 @@
 struct wl18xx_acx_pipeline_stats {
 	u32 hs_tx_stat_fifo_int;
 	u32 hs_rx_stat_fifo_int;
-	u32 tcp_tx_stat_fifo_int;
-	u32 tcp_rx_stat_fifo_int;
 	u32 enc_tx_stat_fifo_int;
 	u32 enc_rx_stat_fifo_int;
 	u32 rx_complete_stat_fifo_int;
@@ -249,38 +238,61 @@
 	u32 post_proc_swi;
 	u32 sec_frag_swi;
 	u32 pre_to_defrag_swi;
-	u32 defrag_to_csum_swi;
-	u32 csum_to_rx_xfer_swi;
+	u32 defrag_to_rx_xfer_swi;
 	u32 dec_packet_in;
 	u32 dec_packet_in_fifo_full;
 	u32 dec_packet_out;
-	u32 cs_rx_packet_in;
-	u32 cs_rx_packet_out;
 	u16 pipeline_fifo_full[PIPE_STATS_HW_FIFO];
+	u16 padding;
 } __packed;
 
-struct wl18xx_acx_mem_stats {
-	u32 rx_free_mem_blks;
-	u32 tx_free_mem_blks;
-	u32 fwlog_free_mem_blks;
-	u32 fw_gen_free_mem_blks;
+#define DIVERSITY_STATS_NUM_OF_ANT	2
+
+struct wl18xx_acx_diversity_stats {
+	u32 num_of_packets_per_ant[DIVERSITY_STATS_NUM_OF_ANT];
+	u32 total_num_of_toggles;
+} __packed;
+
+struct wl18xx_acx_thermal_stats {
+	u16 irq_thr_low;
+	u16 irq_thr_high;
+	u16 tx_stop;
+	u16 tx_resume;
+	u16 false_irq;
+	u16 adc_source_unexpected;
+} __packed;
+
+#define WL18XX_NUM_OF_CALIBRATIONS_ERRORS 18
+struct wl18xx_acx_calib_failure_stats {
+	u16 fail_count[WL18XX_NUM_OF_CALIBRATIONS_ERRORS];
+	u32 calib_count;
+} __packed;
+
+struct wl18xx_roaming_stats {
+	s32 rssi_level;
+} __packed;
+
+struct wl18xx_dfs_stats {
+	u32 num_of_radar_detections;
 } __packed;
 
 struct wl18xx_acx_statistics {
 	struct acx_header header;
 
 	struct wl18xx_acx_error_stats		error;
-	struct wl18xx_acx_debug_stats		debug;
 	struct wl18xx_acx_tx_stats		tx;
 	struct wl18xx_acx_rx_stats		rx;
 	struct wl18xx_acx_isr_stats		isr;
 	struct wl18xx_acx_pwr_stats		pwr;
-	struct wl18xx_acx_ps_poll_stats		ps_poll;
 	struct wl18xx_acx_rx_filter_stats	rx_filter;
 	struct wl18xx_acx_rx_rate_stats		rx_rate;
 	struct wl18xx_acx_aggr_stats		aggr_size;
 	struct wl18xx_acx_pipeline_stats	pipeline;
-	struct wl18xx_acx_mem_stats		mem;
+	struct wl18xx_acx_diversity_stats	diversity;
+	struct wl18xx_acx_thermal_stats		thermal;
+	struct wl18xx_acx_calib_failure_stats	calib;
+	struct wl18xx_roaming_stats		roaming;
+	struct wl18xx_dfs_stats			dfs;
 } __packed;
 
 struct wl18xx_acx_clear_statistics {
@@ -367,6 +379,15 @@
 	u8 idle_conn_thresh;
 } __packed;
 
+/*
+ * ACX_DYNAMIC_TRACES_CFG
+ * configure the FW dynamic traces
+ */
+struct acx_dynamic_fw_traces_cfg {
+	struct acx_header header;
+	__le32 dynamic_fw_traces;
+} __packed;
+
 int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
 				  u32 sdio_blk_size, u32 extra_mem_blks,
 				  u32 len_field_size);
@@ -380,5 +401,6 @@
 int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl, bool action);
 int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action);
 int wl18xx_acx_ap_sleep(struct wl1271 *wl);
+int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl);
 
 #endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index 5fbd223..4edfe28 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -36,18 +36,23 @@
 	DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics)
 
 
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug1, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug2, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug3, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug4, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug5, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug6, "%u");
-
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_null_Frame_tx_start, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_numll_frame_cts_start, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_bar_retry, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_cts_nul_flid, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_non_ctrl, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_ctrl, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_during_protection, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_tx_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_cts_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, bar_retry, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, num_frame_cts_nul_flid, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_abort_failure, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_resume_failure, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_cmplt_db_overflow_cnt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_rx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_nvic_pending, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_excessive_frame_len, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, burst_mismatch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, tbc_exch_mismatch, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u");
@@ -57,7 +62,7 @@
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_imm_resp, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_stop, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u");
@@ -66,13 +71,15 @@
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(tx, tx_retry_per_rate,
+				  NUM_OF_RATES_INDEXES);
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_frame_checksum, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_checksum_result, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe1, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe2, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u");
@@ -97,11 +104,8 @@
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt_task, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_phy_hdr, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_rts_timeout, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout_wa, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_density_dropped_frame, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_ba_not_expected, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_frame_checksum, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_checksum_result, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_called, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_init_called, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_in_process_called, "%u");
@@ -111,6 +115,7 @@
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, decrypt_key_not_found, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_decrypt, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_tkip_replays, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_xfr, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
 
@@ -120,14 +125,13 @@
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pwr, cont_miss_bcns_spread,
 				  PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD);
 WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_bcns_cnt, "%u");
-
-
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_timeouts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_timeouts, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_max_ap_turn, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_max_ap_turn, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_utilization, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_utilization, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_count, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_avg, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_cycle_avg, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_percent, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_active_conf, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_user_conf, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_counter, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, beacon_filter, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, arp_filter, "%u");
@@ -141,14 +145,14 @@
 
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
 
-WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
-				  AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_rate,
+				  AGGR_STATS_TX_AGG);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_len,
+				  AGGR_STATS_TX_AGG);
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, rx_size,
 				  AGGR_STATS_RX_SIZE_LEN);
 
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, hs_tx_stat_fifo_int, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_tx_stat_fifo_int, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_rx_stat_fifo_int, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_tx_stat_fifo_int, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_rx_stat_fifo_int, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, rx_complete_stat_fifo_int, "%u");
@@ -156,21 +160,32 @@
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, post_proc_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, sec_frag_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_to_defrag_swi, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_csum_swi, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, csum_to_rx_xfer_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_rx_xfer_swi, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in_fifo_full, "%u");
 WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_out, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_in, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_out, "%u");
 
 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pipeline, pipeline_fifo_full,
 				  PIPE_STATS_HW_FIFO);
 
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, rx_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, tx_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, fwlog_free_mem_blks, "%u");
-WL18XX_DEBUGFS_FWSTATS_FILE(mem, fw_gen_free_mem_blks, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(diversity, num_of_packets_per_ant,
+				  DIVERSITY_STATS_NUM_OF_ANT);
+WL18XX_DEBUGFS_FWSTATS_FILE(diversity, total_num_of_toggles, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_low, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_high, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_stop, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_resume, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, false_irq, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(thermal, adc_source_unexpected, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(calib, fail_count,
+				  WL18XX_NUM_OF_CALIBRATIONS_ERRORS);
+WL18XX_DEBUGFS_FWSTATS_FILE(calib, calib_count, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(roaming, rssi_level, "%d");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(dfs, num_of_radar_detections, "%d");
 
 static ssize_t conf_read(struct file *file, char __user *user_buf,
 			 size_t count, loff_t *ppos)
@@ -281,6 +296,55 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t dynamic_fw_traces_write(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct wl1271 *wl = file->private_data;
+	unsigned long value;
+	int ret;
+
+	ret = kstrtoul_from_user(user_buf, count, 0, &value);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&wl->mutex);
+
+	wl->dynamic_fw_traces = value;
+
+	if (unlikely(wl->state != WLCORE_STATE_ON))
+		goto out;
+
+	ret = wl1271_ps_elp_wakeup(wl);
+	if (ret < 0)
+		goto out;
+
+	ret = wl18xx_acx_dynamic_fw_traces(wl);
+	if (ret < 0)
+		count = ret;
+
+	wl1271_ps_elp_sleep(wl);
+out:
+	mutex_unlock(&wl->mutex);
+	return count;
+}
+
+static ssize_t dynamic_fw_traces_read(struct file *file,
+					char __user *userbuf,
+					size_t count, loff_t *ppos)
+{
+	struct wl1271 *wl = file->private_data;
+	return wl1271_format_buffer(userbuf, count, ppos,
+				    "%d\n", wl->dynamic_fw_traces);
+}
+
+static const struct file_operations dynamic_fw_traces_ops = {
+	.read = dynamic_fw_traces_read,
+	.write = dynamic_fw_traces_write,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
 int wl18xx_debugfs_add_files(struct wl1271 *wl,
 			     struct dentry *rootdir)
 {
@@ -301,18 +365,23 @@
 
 	DEBUGFS_ADD(clear_fw_stats, stats);
 
-	DEBUGFS_FWSTATS_ADD(debug, debug1);
-	DEBUGFS_FWSTATS_ADD(debug, debug2);
-	DEBUGFS_FWSTATS_ADD(debug, debug3);
-	DEBUGFS_FWSTATS_ADD(debug, debug4);
-	DEBUGFS_FWSTATS_ADD(debug, debug5);
-	DEBUGFS_FWSTATS_ADD(debug, debug6);
-
-	DEBUGFS_FWSTATS_ADD(error, error_frame);
-	DEBUGFS_FWSTATS_ADD(error, error_null_Frame_tx_start);
-	DEBUGFS_FWSTATS_ADD(error, error_numll_frame_cts_start);
-	DEBUGFS_FWSTATS_ADD(error, error_bar_retry);
-	DEBUGFS_FWSTATS_ADD(error, error_frame_cts_nul_flid);
+	DEBUGFS_FWSTATS_ADD(error, error_frame_non_ctrl);
+	DEBUGFS_FWSTATS_ADD(error, error_frame_ctrl);
+	DEBUGFS_FWSTATS_ADD(error, error_frame_during_protection);
+	DEBUGFS_FWSTATS_ADD(error, null_frame_tx_start);
+	DEBUGFS_FWSTATS_ADD(error, null_frame_cts_start);
+	DEBUGFS_FWSTATS_ADD(error, bar_retry);
+	DEBUGFS_FWSTATS_ADD(error, num_frame_cts_nul_flid);
+	DEBUGFS_FWSTATS_ADD(error, tx_abort_failure);
+	DEBUGFS_FWSTATS_ADD(error, tx_resume_failure);
+	DEBUGFS_FWSTATS_ADD(error, rx_cmplt_db_overflow_cnt);
+	DEBUGFS_FWSTATS_ADD(error, elp_while_rx_exch);
+	DEBUGFS_FWSTATS_ADD(error, elp_while_tx_exch);
+	DEBUGFS_FWSTATS_ADD(error, elp_while_tx);
+	DEBUGFS_FWSTATS_ADD(error, elp_while_nvic_pending);
+	DEBUGFS_FWSTATS_ADD(error, rx_excessive_frame_len);
+	DEBUGFS_FWSTATS_ADD(error, burst_mismatch);
+	DEBUGFS_FWSTATS_ADD(error, tbc_exch_mismatch);
 
 	DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs);
 	DEBUGFS_FWSTATS_ADD(tx, tx_cmplt);
@@ -322,7 +391,7 @@
 	DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed);
 	DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed);
 	DEBUGFS_FWSTATS_ADD(tx, tx_starts);
-	DEBUGFS_FWSTATS_ADD(tx, tx_imm_resp);
+	DEBUGFS_FWSTATS_ADD(tx, tx_stop);
 	DEBUGFS_FWSTATS_ADD(tx, tx_start_templates);
 	DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates);
 	DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen);
@@ -331,13 +400,14 @@
 	DEBUGFS_FWSTATS_ADD(tx, tx_exch);
 	DEBUGFS_FWSTATS_ADD(tx, tx_retry_template);
 	DEBUGFS_FWSTATS_ADD(tx, tx_retry_data);
+	DEBUGFS_FWSTATS_ADD(tx, tx_retry_per_rate);
 	DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending);
 	DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry);
 	DEBUGFS_FWSTATS_ADD(tx, tx_done_template);
 	DEBUGFS_FWSTATS_ADD(tx, tx_done_data);
 	DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template);
-	DEBUGFS_FWSTATS_ADD(tx, tx_frame_checksum);
-	DEBUGFS_FWSTATS_ADD(tx, tx_checksum_result);
+	DEBUGFS_FWSTATS_ADD(tx, tx_cfe1);
+	DEBUGFS_FWSTATS_ADD(tx, tx_cfe2);
 	DEBUGFS_FWSTATS_ADD(tx, frag_called);
 	DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed);
 	DEBUGFS_FWSTATS_ADD(tx, frag_init_called);
@@ -362,11 +432,8 @@
 	DEBUGFS_FWSTATS_ADD(rx, rx_cmplt_task);
 	DEBUGFS_FWSTATS_ADD(rx, rx_phy_hdr);
 	DEBUGFS_FWSTATS_ADD(rx, rx_timeout);
+	DEBUGFS_FWSTATS_ADD(rx, rx_rts_timeout);
 	DEBUGFS_FWSTATS_ADD(rx, rx_timeout_wa);
-	DEBUGFS_FWSTATS_ADD(rx, rx_wa_density_dropped_frame);
-	DEBUGFS_FWSTATS_ADD(rx, rx_wa_ba_not_expected);
-	DEBUGFS_FWSTATS_ADD(rx, rx_frame_checksum);
-	DEBUGFS_FWSTATS_ADD(rx, rx_checksum_result);
 	DEBUGFS_FWSTATS_ADD(rx, defrag_called);
 	DEBUGFS_FWSTATS_ADD(rx, defrag_init_called);
 	DEBUGFS_FWSTATS_ADD(rx, defrag_in_process_called);
@@ -376,6 +443,7 @@
 	DEBUGFS_FWSTATS_ADD(rx, decrypt_key_not_found);
 	DEBUGFS_FWSTATS_ADD(rx, defrag_need_decrypt);
 	DEBUGFS_FWSTATS_ADD(rx, rx_tkip_replays);
+	DEBUGFS_FWSTATS_ADD(rx, rx_xfr);
 
 	DEBUGFS_FWSTATS_ADD(isr, irqs);
 
@@ -384,13 +452,13 @@
 	DEBUGFS_FWSTATS_ADD(pwr, connection_out_of_sync);
 	DEBUGFS_FWSTATS_ADD(pwr, cont_miss_bcns_spread);
 	DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_bcns_cnt);
-
-	DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_timeouts);
-	DEBUGFS_FWSTATS_ADD(ps_poll, upsd_timeouts);
-	DEBUGFS_FWSTATS_ADD(ps_poll, upsd_max_ap_turn);
-	DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_max_ap_turn);
-	DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_utilization);
-	DEBUGFS_FWSTATS_ADD(ps_poll, upsd_utilization);
+	DEBUGFS_FWSTATS_ADD(pwr, sleep_time_count);
+	DEBUGFS_FWSTATS_ADD(pwr, sleep_time_avg);
+	DEBUGFS_FWSTATS_ADD(pwr, sleep_cycle_avg);
+	DEBUGFS_FWSTATS_ADD(pwr, sleep_percent);
+	DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_active_conf);
+	DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_user_conf);
+	DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_counter);
 
 	DEBUGFS_FWSTATS_ADD(rx_filter, beacon_filter);
 	DEBUGFS_FWSTATS_ADD(rx_filter, arp_filter);
@@ -404,12 +472,11 @@
 
 	DEBUGFS_FWSTATS_ADD(rx_rate, rx_frames_per_rates);
 
-	DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_vs_rate);
+	DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_rate);
+	DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_len);
 	DEBUGFS_FWSTATS_ADD(aggr_size, rx_size);
 
 	DEBUGFS_FWSTATS_ADD(pipeline, hs_tx_stat_fifo_int);
-	DEBUGFS_FWSTATS_ADD(pipeline, tcp_tx_stat_fifo_int);
-	DEBUGFS_FWSTATS_ADD(pipeline, tcp_rx_stat_fifo_int);
 	DEBUGFS_FWSTATS_ADD(pipeline, enc_tx_stat_fifo_int);
 	DEBUGFS_FWSTATS_ADD(pipeline, enc_rx_stat_fifo_int);
 	DEBUGFS_FWSTATS_ADD(pipeline, rx_complete_stat_fifo_int);
@@ -417,22 +484,33 @@
 	DEBUGFS_FWSTATS_ADD(pipeline, post_proc_swi);
 	DEBUGFS_FWSTATS_ADD(pipeline, sec_frag_swi);
 	DEBUGFS_FWSTATS_ADD(pipeline, pre_to_defrag_swi);
-	DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_csum_swi);
-	DEBUGFS_FWSTATS_ADD(pipeline, csum_to_rx_xfer_swi);
+	DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_rx_xfer_swi);
 	DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in);
 	DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in_fifo_full);
 	DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_out);
-	DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_in);
-	DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_out);
 	DEBUGFS_FWSTATS_ADD(pipeline, pipeline_fifo_full);
 
-	DEBUGFS_FWSTATS_ADD(mem, rx_free_mem_blks);
-	DEBUGFS_FWSTATS_ADD(mem, tx_free_mem_blks);
-	DEBUGFS_FWSTATS_ADD(mem, fwlog_free_mem_blks);
-	DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks);
+	DEBUGFS_FWSTATS_ADD(diversity, num_of_packets_per_ant);
+	DEBUGFS_FWSTATS_ADD(diversity, total_num_of_toggles);
+
+	DEBUGFS_FWSTATS_ADD(thermal, irq_thr_low);
+	DEBUGFS_FWSTATS_ADD(thermal, irq_thr_high);
+	DEBUGFS_FWSTATS_ADD(thermal, tx_stop);
+	DEBUGFS_FWSTATS_ADD(thermal, tx_resume);
+	DEBUGFS_FWSTATS_ADD(thermal, false_irq);
+	DEBUGFS_FWSTATS_ADD(thermal, adc_source_unexpected);
+
+	DEBUGFS_FWSTATS_ADD(calib, fail_count);
+
+	DEBUGFS_FWSTATS_ADD(calib, calib_count);
+
+	DEBUGFS_FWSTATS_ADD(roaming, rssi_level);
+
+	DEBUGFS_FWSTATS_ADD(dfs, num_of_radar_detections);
 
 	DEBUGFS_ADD(conf, moddir);
 	DEBUGFS_ADD(radar_detection, moddir);
+	DEBUGFS_ADD(dynamic_fw_traces, moddir);
 
 	return 0;
 
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 548bb9e..09c7e09 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -112,6 +112,14 @@
 	return 0;
 }
 
+static void wlcore_event_time_sync(struct wl1271 *wl, u16 tsf_msb, u16 tsf_lsb)
+{
+	u32 clock;
+	/* convert the MSB+LSB to a u32 TSF value */
+	clock = (tsf_msb << 16) | tsf_lsb;
+	wl1271_info("TIME_SYNC_EVENT_ID: clock %u", clock);
+}
+
 int wl18xx_process_mailbox_events(struct wl1271 *wl)
 {
 	struct wl18xx_event_mailbox *mbox = wl->mbox;
@@ -128,6 +136,11 @@
 			wl18xx_scan_completed(wl, wl->scan_wlvif);
 	}
 
+	if (vector & TIME_SYNC_EVENT_ID)
+		wlcore_event_time_sync(wl,
+				mbox->time_sync_tsf_msb,
+				mbox->time_sync_tsf_lsb);
+
 	if (vector & RADAR_DETECTED_EVENT_ID) {
 		wl1271_info("radar event: channel %d type %s",
 			    mbox->radar_channel,
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index 266ee87..f3d4f13 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -38,8 +38,9 @@
 	REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID      = BIT(18),
 	DFS_CHANNELS_CONFIG_COMPLETE_EVENT       = BIT(19),
 	PERIODIC_SCAN_REPORT_EVENT_ID            = BIT(20),
-	SMART_CONFIG_SYNC_EVENT_ID		 = BIT(22),
-	SMART_CONFIG_DECODE_EVENT_ID		 = BIT(23),
+	SMART_CONFIG_SYNC_EVENT_ID               = BIT(22),
+	SMART_CONFIG_DECODE_EVENT_ID             = BIT(23),
+	TIME_SYNC_EVENT_ID                       = BIT(24),
 };
 
 enum wl18xx_radar_types {
@@ -95,13 +96,16 @@
 	/* smart config sync channel */
 	u8 sc_sync_channel;
 	u8 sc_sync_band;
-	u8 padding2[2];
 
+	/* time sync msb*/
+	u16 time_sync_tsf_msb;
 	/* radar detect */
 	u8 radar_channel;
 	u8 radar_type;
 
-	u8 padding3[2];
+	/* time sync lsb*/
+	u16 time_sync_tsf_lsb;
+
 } __packed;
 
 int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 49aca2c..abbf054 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -422,6 +422,8 @@
 		.num_probe_reqs			= 2,
 		.rssi_threshold			= -90,
 		.snr_threshold			= 0,
+		.num_short_intervals		= SCAN_MAX_SHORT_INTERVALS,
+		.long_interval			= 30000,
 	},
 	.ht = {
 		.rx_ba_win_size = 32,
@@ -1026,8 +1028,8 @@
 		CHANNEL_SWITCH_COMPLETE_EVENT_ID |
 		DFS_CHANNELS_CONFIG_COMPLETE_EVENT |
 		SMART_CONFIG_SYNC_EVENT_ID |
-		SMART_CONFIG_DECODE_EVENT_ID;
-;
+		SMART_CONFIG_DECODE_EVENT_ID |
+		TIME_SYNC_EVENT_ID;
 
 	wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
 
@@ -1159,6 +1161,11 @@
 	if (ret < 0)
 		return ret;
 
+	/* set the dynamic fw traces bitmap */
+	ret = wl18xx_acx_dynamic_fw_traces(wl);
+	if (ret < 0)
+		return ret;
+
 	if (checksum_param) {
 		ret = wl18xx_acx_set_checksum_state(wl);
 		if (ret != 0)
@@ -1797,7 +1804,7 @@
 
 static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
 	{
-		.max = 3,
+		.max = 2,
 		.types = BIT(NL80211_IFTYPE_STATION),
 	},
 	{
@@ -1806,6 +1813,10 @@
 			 BIT(NL80211_IFTYPE_P2P_GO) |
 			 BIT(NL80211_IFTYPE_P2P_CLIENT),
 	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
 };
 
 static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
@@ -1813,6 +1824,48 @@
 		.max = 2,
 		.types = BIT(NL80211_IFTYPE_AP),
 	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_cl_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_AP),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_go_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_AP),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_GO),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
 };
 
 static const struct ieee80211_iface_combination
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index 98666f2..c938c49 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -51,7 +51,11 @@
 		goto out;
 	}
 
-	cmd->role_id = wlvif->role_id;
+	/* scan on the dev role if the regular one is not started */
+	if (wlcore_is_p2p_mgmt(wlvif))
+		cmd->role_id = wlvif->dev_role_id;
+	else
+		cmd->role_id = wlvif->role_id;
 
 	if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
 		ret = -EINVAL;
@@ -223,9 +227,20 @@
 				    SCAN_TYPE_PERIODIC);
 	wl18xx_adjust_channels(cmd, cmd_channels);
 
-	cmd->short_cycles_sec = 0;
-	cmd->long_cycles_sec = cpu_to_le16(req->interval);
-	cmd->short_cycles_count = 0;
+	if (c->num_short_intervals && c->long_interval &&
+	    c->long_interval > req->interval) {
+		cmd->short_cycles_msec = cpu_to_le16(req->interval);
+		cmd->long_cycles_msec = cpu_to_le16(c->long_interval);
+		cmd->short_cycles_count = c->num_short_intervals;
+	} else {
+		cmd->short_cycles_msec = 0;
+		cmd->long_cycles_msec = cpu_to_le16(req->interval);
+		cmd->short_cycles_count = 0;
+	}
+	wl1271_debug(DEBUG_SCAN, "short_interval: %d, long_interval: %d, num_short: %d",
+		     le16_to_cpu(cmd->short_cycles_msec),
+		     le16_to_cpu(cmd->long_cycles_msec),
+		     cmd->short_cycles_count);
 
 	cmd->total_cycles = 0;
 
diff --git a/drivers/net/wireless/ti/wl18xx/scan.h b/drivers/net/wireless/ti/wl18xx/scan.h
index 2e636aa..66a763f 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.h
+++ b/drivers/net/wireless/ti/wl18xx/scan.h
@@ -74,8 +74,8 @@
 	u8 dfs;		   /* number of dfs channels in 5ghz */
 	u8 passive_active; /* number of passive before active channels 2.4ghz */
 
-	__le16 short_cycles_sec;
-	__le16 long_cycles_sec;
+	__le16 short_cycles_msec;
+	__le16 long_cycles_msec;
 	u8 short_cycles_count;
 	u8 total_cycles; /* 0 - infinite */
 	u8 padding[2];
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 68919f8..f01d24b 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -2003,12 +2003,15 @@
 		      wlvif->bss_type == BSS_TYPE_IBSS)))
 		return -EINVAL;
 
-	ret = wl12xx_cmd_role_enable(wl,
-				     wl12xx_wlvif_to_vif(wlvif)->addr,
-				     WL1271_ROLE_DEVICE,
-				     &wlvif->dev_role_id);
-	if (ret < 0)
-		goto out;
+	/* the dev role is already started for p2p mgmt interfaces */
+	if (!wlcore_is_p2p_mgmt(wlvif)) {
+		ret = wl12xx_cmd_role_enable(wl,
+					     wl12xx_wlvif_to_vif(wlvif)->addr,
+					     WL1271_ROLE_DEVICE,
+					     &wlvif->dev_role_id);
+		if (ret < 0)
+			goto out;
+	}
 
 	ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel);
 	if (ret < 0)
@@ -2023,7 +2026,8 @@
 out_stop:
 	wl12xx_cmd_role_stop_dev(wl, wlvif);
 out_disable:
-	wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+	if (!wlcore_is_p2p_mgmt(wlvif))
+		wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
 out:
 	return ret;
 }
@@ -2052,10 +2056,42 @@
 	if (ret < 0)
 		goto out;
 
-	ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
-	if (ret < 0)
-		goto out;
+	if (!wlcore_is_p2p_mgmt(wlvif)) {
+		ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+		if (ret < 0)
+			goto out;
+	}
 
 out:
 	return ret;
 }
+
+int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			   u8 feature, u8 enable, u8 value)
+{
+	struct wlcore_cmd_generic_cfg *cmd;
+	int ret;
+
+	wl1271_debug(DEBUG_CMD,
+		     "cmd generic cfg (role %d feature %d enable %d value %d)",
+		     wlvif->role_id, feature, enable, value);
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->role_id = wlvif->role_id;
+	cmd->feature = feature;
+	cmd->enable = enable;
+	cmd->value = value;
+
+	ret = wl1271_cmd_send(wl, CMD_GENERIC_CFG, cmd, sizeof(*cmd), 0);
+	if (ret < 0) {
+		wl1271_error("failed to send generic cfg command");
+		goto out_free;
+	}
+out_free:
+	kfree(cmd);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(wlcore_cmd_generic_cfg);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index e14cd40..8dc46c0 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -92,6 +92,8 @@
 void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
 				     enum ieee80211_band band);
 int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
+int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+			   u8 feature, u8 enable, u8 value);
 int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
 int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
@@ -652,6 +654,19 @@
 	u8 padding[3];
 } __packed;
 
+enum wlcore_generic_cfg_feature {
+	WLCORE_CFG_FEATURE_RADAR_DEBUG = 2,
+};
+
+struct wlcore_cmd_generic_cfg {
+	struct wl1271_cmd_header header;
+
+	u8 role_id;
+	u8 feature;
+	u8 enable;
+	u8 value;
+} __packed;
+
 struct wl12xx_cmd_config_fwlog {
 	struct wl1271_cmd_header header;
 
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 166add0..52a9d1b 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -1186,6 +1186,15 @@
 
 	/* SNR threshold to be used for filtering */
 	s8 snr_threshold;
+
+	/*
+	 * number of short intervals scheduled scan cycles before
+	 * switching to long intervals
+	 */
+	u8 num_short_intervals;
+
+	/* interval between each long scheduled scan cycle (in ms) */
+	u16 long_interval;
 } __packed;
 
 struct conf_ht_setting {
@@ -1352,7 +1361,7 @@
  * version, the two LSB are the lower driver's private conf
  * version.
  */
-#define WLCORE_CONF_VERSION	(0x0006 << 16)
+#define WLCORE_CONF_VERSION	(0x0007 << 16)
 #define WLCORE_CONF_MASK	0xffff0000
 #define WLCORE_CONF_SIZE	(sizeof(struct wlcore_conf_header) +	\
 				 sizeof(struct wlcore_conf))
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 5ca1fb1..e92f263 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -348,7 +348,7 @@
 }
 
 /* generic sta initialization (non vif-specific) */
-static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 {
 	int ret;
 
diff --git a/drivers/net/wireless/ti/wlcore/init.h b/drivers/net/wireless/ti/wlcore/init.h
index a45fbfdd..fd1cdb6 100644
--- a/drivers/net/wireless/ti/wlcore/init.h
+++ b/drivers/net/wireless/ti/wlcore/init.h
@@ -35,5 +35,6 @@
 int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
 int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif);
 
 #endif
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 337223b..e819369 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1792,6 +1792,9 @@
 
 	wl->wow_enabled = true;
 	wl12xx_for_each_wlvif(wl, wlvif) {
+		if (wlcore_is_p2p_mgmt(wlvif))
+			continue;
+
 		ret = wl1271_configure_suspend(wl, wlvif, wow);
 		if (ret < 0) {
 			mutex_unlock(&wl->mutex);
@@ -1901,6 +1904,9 @@
 		goto out;
 
 	wl12xx_for_each_wlvif(wl, wlvif) {
+		if (wlcore_is_p2p_mgmt(wlvif))
+			continue;
+
 		wl1271_configure_resume(wl, wlvif);
 	}
 
@@ -2256,6 +2262,7 @@
 		wlvif->p2p = 1;
 		/* fall-through */
 	case NL80211_IFTYPE_STATION:
+	case NL80211_IFTYPE_P2P_DEVICE:
 		wlvif->bss_type = BSS_TYPE_STA_BSS;
 		break;
 	case NL80211_IFTYPE_ADHOC:
@@ -2477,7 +2484,8 @@
 {
 	struct wlcore_hw_queue_iter_data *iter_data = data;
 
-	if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
+	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
 		return;
 
 	if (iter_data->cur_running || vif == iter_data->vif) {
@@ -2495,6 +2503,11 @@
 	struct wlcore_hw_queue_iter_data iter_data = {};
 	int i, q_base;
 
+	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+		return 0;
+	}
+
 	iter_data.vif = vif;
 
 	/* mark all bits taken by active interfaces */
@@ -2618,14 +2631,27 @@
 			goto out;
 	}
 
-	ret = wl12xx_cmd_role_enable(wl, vif->addr,
-				     role_type, &wlvif->role_id);
-	if (ret < 0)
-		goto out;
+	if (!wlcore_is_p2p_mgmt(wlvif)) {
+		ret = wl12xx_cmd_role_enable(wl, vif->addr,
+					     role_type, &wlvif->role_id);
+		if (ret < 0)
+			goto out;
 
-	ret = wl1271_init_vif_specific(wl, vif);
-	if (ret < 0)
-		goto out;
+		ret = wl1271_init_vif_specific(wl, vif);
+		if (ret < 0)
+			goto out;
+
+	} else {
+		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
+					     &wlvif->dev_role_id);
+		if (ret < 0)
+			goto out;
+
+		/* needed mainly for configuring rate policies */
+		ret = wl1271_sta_hw_init(wl, wlvif);
+		if (ret < 0)
+			goto out;
+	}
 
 	list_add(&wlvif->list, &wl->wlvif_list);
 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
@@ -2696,9 +2722,15 @@
 				wl12xx_stop_dev(wl, wlvif);
 		}
 
-		ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
-		if (ret < 0)
-			goto deinit;
+		if (!wlcore_is_p2p_mgmt(wlvif)) {
+			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
+			if (ret < 0)
+				goto deinit;
+		} else {
+			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
+			if (ret < 0)
+				goto deinit;
+		}
 
 		wl1271_ps_elp_sleep(wl);
 	}
@@ -3088,6 +3120,9 @@
 {
 	int ret;
 
+	if (wlcore_is_p2p_mgmt(wlvif))
+		return 0;
+
 	if (conf->power_level != wlvif->power_level) {
 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
 		if (ret < 0)
@@ -3207,6 +3242,9 @@
 		goto out;
 
 	wl12xx_for_each_wlvif(wl, wlvif) {
+		if (wlcore_is_p2p_mgmt(wlvif))
+			continue;
+
 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
 			if (*total & FIF_ALLMULTI)
 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
@@ -4837,6 +4875,9 @@
 	u8 ps_scheme;
 	int ret = 0;
 
+	if (wlcore_is_p2p_mgmt(wlvif))
+		return 0;
+
 	mutex_lock(&wl->mutex);
 
 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
@@ -6078,8 +6119,10 @@
 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
 
 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
-		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
-		BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
+					 BIT(NL80211_IFTYPE_AP) |
+					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
+					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
+					 BIT(NL80211_IFTYPE_P2P_GO);
 	wl->hw->wiphy->max_scan_ssids = 1;
 	wl->hw->wiphy->max_sched_scan_ssids = 16;
 	wl->hw->wiphy->max_match_sets = 16;
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index e125974..5b29273 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -74,7 +74,14 @@
 	if (desc->rate <= wl->hw_min_ht_rate)
 		status->flag |= RX_FLAG_HT;
 
-	status->signal = desc->rssi;
+	/*
+	* Read the signal level and antenna diversity indication.
+	* The msb in the signal level is always set as it is a
+	* negative number.
+	* The antenna indication is the msb of the rssi.
+	*/
+	status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7));
+	status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7);
 
 	/*
 	 * FIXME: In wl1251, the SNR should be divided by two.  In wl1271 we
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index a3b1618..f5a7087c 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -30,6 +30,9 @@
 #define WL1271_RX_MAX_RSSI -30
 #define WL1271_RX_MIN_RSSI -95
 
+#define RSSI_LEVEL_BITMASK	0x7F
+#define ANT_DIVERSITY_BITMASK	BIT(7)
+
 #define SHORT_PREAMBLE_BIT   BIT(0)
 #define OFDM_RATE_BIT        BIT(6)
 #define PBCC_RATE_BIT        BIT(7)
diff --git a/drivers/net/wireless/ti/wlcore/scan.h b/drivers/net/wireless/ti/wlcore/scan.h
index 4dadd0c..782eb29 100644
--- a/drivers/net/wireless/ti/wlcore/scan.h
+++ b/drivers/net/wireless/ti/wlcore/scan.h
@@ -83,6 +83,12 @@
 #define MAX_CHANNELS_5GHZ	42
 
 #define SCAN_MAX_CYCLE_INTERVALS 16
+
+/* The FW intervals can take up to 16 entries.
+ * The 1st entry isn't used (scan is immediate). The last
+ * entry should be used for the long_interval
+ */
+#define SCAN_MAX_SHORT_INTERVALS (SCAN_MAX_CYCLE_INTERVALS - 2)
 #define SCAN_MAX_BANDS 3
 
 enum {
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index ea7e07a..c172da5 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -293,7 +293,8 @@
 	/* Use block mode for transferring over one block size of data */
 	func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
-	if (wlcore_probe_of(&func->dev, &irq, &pdev_data))
+	ret = wlcore_probe_of(&func->dev, &irq, &pdev_data);
+	if (ret)
 		goto out_free_glue;
 
 	/* if sdio can keep power while host is suspended, enable wow */
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 7f363fa..a1b6040 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -500,6 +500,9 @@
 	/* interface combinations supported by the hw */
 	const struct ieee80211_iface_combination *iface_combinations;
 	u8 n_iface_combinations;
+
+	/* dynamic fw traces */
+	u32 dynamic_fw_traces;
 };
 
 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 39efc6d..27c5687 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -503,6 +503,11 @@
 	return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
 }
 
+static inline bool wlcore_is_p2p_mgmt(struct wl12xx_vif *wlvif)
+{
+	return wl12xx_wlvif_to_vif(wlvif)->type == NL80211_IFTYPE_P2P_DEVICE;
+}
+
 #define wl12xx_for_each_wlvif(wl, wlvif) \
 		list_for_each_entry(wlvif, &wl->wlvif_list, list)
 
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8a495b3..6dc76c1 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -210,12 +210,22 @@
 	VIF_STATUS_CONNECTED,
 };
 
+struct xenvif_mcast_addr {
+	struct list_head entry;
+	struct rcu_head rcu;
+	u8 addr[6];
+};
+
+#define XEN_NETBK_MCAST_MAX 64
+
 struct xenvif {
 	/* Unique identifier for this interface. */
 	domid_t          domid;
 	unsigned int     handle;
 
 	u8               fe_dev_addr[6];
+	struct list_head fe_mcast_addr;
+	unsigned int     fe_mcast_count;
 
 	/* Frontend feature information. */
 	int gso_mask;
@@ -224,6 +234,7 @@
 	u8 can_sg:1;
 	u8 ip_csum:1;
 	u8 ipv6_csum:1;
+	u8 multicast_control:1;
 
 	/* Is this interface disabled? True when backend discovers
 	 * frontend is rogue.
@@ -325,9 +336,6 @@
 		queue->pending_prod + queue->pending_cons;
 }
 
-/* Callback from stack when TX packet can be released */
-void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
-
 irqreturn_t xenvif_interrupt(int irq, void *dev_id);
 
 extern bool separate_tx_rx_irq;
@@ -344,4 +352,8 @@
 				 struct sk_buff *skb);
 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
 
+/* Multicast control */
+bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
+void xenvif_mcast_addr_list_free(struct xenvif *vif);
+
 #endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 28577a3..e7bd63e 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -171,6 +171,13 @@
 	    !xenvif_schedulable(vif))
 		goto drop;
 
+	if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
+		struct ethhdr *eth = (struct ethhdr *)skb->data;
+
+		if (!xenvif_mcast_match(vif, eth->h_dest))
+			goto drop;
+	}
+
 	cb = XENVIF_RX_CB(skb);
 	cb->expires = jiffies + vif->drain_timeout;
 
@@ -427,6 +434,7 @@
 	vif->num_queues = 0;
 
 	spin_lock_init(&vif->lock);
+	INIT_LIST_HEAD(&vif->fe_mcast_addr);
 
 	dev->netdev_ops	= &xenvif_netdev_ops;
 	dev->hw_features = NETIF_F_SG |
@@ -661,6 +669,8 @@
 
 		xenvif_unmap_frontend_rings(queue);
 	}
+
+	xenvif_mcast_addr_list_free(vif);
 }
 
 /* Reverse the relevant parts of xenvif_init_queue().
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 3f44b52..42569b9 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1157,6 +1157,80 @@
 	return false;
 }
 
+/* No locking is required in xenvif_mcast_add/del() as they are
+ * only ever invoked from NAPI poll. An RCU list is used because
+ * xenvif_mcast_match() is called asynchronously, during start_xmit.
+ */
+
+static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
+{
+	struct xenvif_mcast_addr *mcast;
+
+	if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
+		if (net_ratelimit())
+			netdev_err(vif->dev,
+				   "Too many multicast addresses\n");
+		return -ENOSPC;
+	}
+
+	mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
+	if (!mcast)
+		return -ENOMEM;
+
+	ether_addr_copy(mcast->addr, addr);
+	list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
+	vif->fe_mcast_count++;
+
+	return 0;
+}
+
+static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
+{
+	struct xenvif_mcast_addr *mcast;
+
+	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
+		if (ether_addr_equal(addr, mcast->addr)) {
+			--vif->fe_mcast_count;
+			list_del_rcu(&mcast->entry);
+			kfree_rcu(mcast, rcu);
+			break;
+		}
+	}
+}
+
+bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
+{
+	struct xenvif_mcast_addr *mcast;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
+		if (ether_addr_equal(addr, mcast->addr)) {
+			rcu_read_unlock();
+			return true;
+		}
+	}
+	rcu_read_unlock();
+
+	return false;
+}
+
+void xenvif_mcast_addr_list_free(struct xenvif *vif)
+{
+	/* No need for locking or RCU here. NAPI poll and TX queue
+	 * are stopped.
+	 */
+	while (!list_empty(&vif->fe_mcast_addr)) {
+		struct xenvif_mcast_addr *mcast;
+
+		mcast = list_first_entry(&vif->fe_mcast_addr,
+					 struct xenvif_mcast_addr,
+					 entry);
+		--vif->fe_mcast_count;
+		list_del(&mcast->entry);
+		kfree(mcast);
+	}
+}
+
 static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 				     int budget,
 				     unsigned *copy_ops,
@@ -1215,6 +1289,31 @@
 				break;
 		}
 
+		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
+			struct xen_netif_extra_info *extra;
+
+			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
+			ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
+
+			make_tx_response(queue, &txreq,
+					 (ret == 0) ?
+					 XEN_NETIF_RSP_OKAY :
+					 XEN_NETIF_RSP_ERROR);
+			push_tx_responses(queue);
+			continue;
+		}
+
+		if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
+			struct xen_netif_extra_info *extra;
+
+			extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
+			xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
+
+			make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
+			push_tx_responses(queue);
+			continue;
+		}
+
 		ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
 		if (unlikely(ret < 0))
 			break;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index ec383b0..929a6e7 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -327,6 +327,14 @@
 			goto abort_transaction;
 		}
 
+		/* We support multicast-control. */
+		err = xenbus_printf(xbt, dev->nodename,
+				    "feature-multicast-control", "%d", 1);
+		if (err) {
+			message = "writing feature-multicast-control";
+			goto abort_transaction;
+		}
+
 		err = xenbus_transaction_end(xbt, 0);
 	} while (err == -EAGAIN);
 
@@ -1016,6 +1024,11 @@
 		val = 0;
 	vif->ipv6_csum = !!val;
 
+	if (xenbus_scanf(XBT_NIL, dev->otherend, "request-multicast-control",
+			 "%d", &val) < 0)
+		val = 0;
+	vif->multicast_control = !!val;
+
 	return 0;
 }
 
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f948c46..e27e6d2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1336,7 +1336,7 @@
 
 	netif_carrier_off(info->netdev);
 
-	for (i = 0; i < num_queues; ++i) {
+	for (i = 0; i < num_queues && info->queues; ++i) {
 		struct netfront_queue *queue = &info->queues[i];
 
 		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
@@ -1348,7 +1348,8 @@
 		queue->tx_evtchn = queue->rx_evtchn = 0;
 		queue->tx_irq = queue->rx_irq = 0;
 
-		napi_synchronize(&queue->napi);
+		if (netif_running(info->netdev))
+			napi_synchronize(&queue->napi);
 
 		xennet_release_tx_bufs(queue);
 		xennet_release_rx_bufs(queue);
@@ -2101,7 +2102,8 @@
 
 	unregister_netdev(info->netdev);
 
-	xennet_destroy_queues(info);
+	if (info->queues)
+		xennet_destroy_queues(info);
 	xennet_free_netdev(info->netdev);
 
 	return 0;
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index 722673c..6639cd1 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -74,4 +74,5 @@
 source "drivers/nfc/st21nfca/Kconfig"
 source "drivers/nfc/st-nci/Kconfig"
 source "drivers/nfc/nxp-nci/Kconfig"
+source "drivers/nfc/s3fwrn5/Kconfig"
 endmenu
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
index 368b6df..2757fe1b 100644
--- a/drivers/nfc/Makefile
+++ b/drivers/nfc/Makefile
@@ -14,3 +14,4 @@
 obj-$(CONFIG_NFC_ST21NFCA)  	+= st21nfca/
 obj-$(CONFIG_NFC_ST_NCI)	+= st-nci/
 obj-$(CONFIG_NFC_NXP_NCI)	+= nxp-nci/
+obj-$(CONFIG_NFC_S3FWRN5)	+= s3fwrn5/
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 2b77ccf..754a9bb 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -355,7 +355,8 @@
 		goto err;
 	}
 
-	r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
+	r = mei_cl_register_event_cb(phy->device, BIT(MEI_CL_EVENT_RX),
+				     nfc_mei_event_cb, phy);
 	if (r) {
 		pr_err("Event cb registration failed %d\n", r);
 		goto err;
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index 2f77f1d..fac80c6 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -318,19 +318,15 @@
 	struct i2c_client *client = phy->i2c_dev;
 	struct gpio_desc *gpiod_en, *gpiod_fw, *gpiod_irq;
 
-	gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2);
-	gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1);
-	gpiod_irq = devm_gpiod_get_index(&client->dev, NULL, 0);
+	gpiod_en = devm_gpiod_get_index(&client->dev, NULL, 2, GPIOD_OUT_LOW);
+	gpiod_fw = devm_gpiod_get_index(&client->dev, NULL, 1, GPIOD_OUT_LOW);
+	gpiod_irq = devm_gpiod_get_index(&client->dev, NULL, 0, GPIOD_IN);
 
 	if (IS_ERR(gpiod_en) || IS_ERR(gpiod_fw) || IS_ERR(gpiod_irq)) {
 		nfc_err(&client->dev, "No GPIOs\n");
 		return -EINVAL;
 	}
 
-	gpiod_direction_output(gpiod_en, 0);
-	gpiod_direction_output(gpiod_fw, 0);
-	gpiod_direction_input(gpiod_irq);
-
 	client->irq = gpiod_to_irq(gpiod_irq);
 	if (client->irq < 0) {
 		nfc_err(&client->dev, "No IRQ\n");
diff --git a/drivers/nfc/s3fwrn5/Kconfig b/drivers/nfc/s3fwrn5/Kconfig
new file mode 100644
index 0000000..7e3b255
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/Kconfig
@@ -0,0 +1,19 @@
+config NFC_S3FWRN5
+	tristate
+	---help---
+	  Core driver for Samsung S3FWRN5 NFC chip. Contains core utilities
+	  of chip. It's intended to be used by PHYs to avoid duplicating lots
+	  of common code.
+
+config NFC_S3FWRN5_I2C
+	tristate "Samsung S3FWRN5 I2C support"
+	depends on NFC_NCI && I2C
+	select NFC_S3FWRN5
+	default n
+	---help---
+	  This module adds support for an I2C interface to the S3FWRN5 chip.
+	  Select this if your platform is using the I2C bus.
+
+	  To compile this driver as a module, choose m here. The module will
+	  be called s3fwrn5_i2c.ko.
+	  Say N if unsure.
diff --git a/drivers/nfc/s3fwrn5/Makefile b/drivers/nfc/s3fwrn5/Makefile
new file mode 100644
index 0000000..3381c34
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Samsung S3FWRN5 NFC driver
+#
+
+s3fwrn5-objs = core.o firmware.o nci.o
+s3fwrn5_i2c-objs = i2c.o
+
+obj-$(CONFIG_NFC_S3FWRN5) += s3fwrn5.o
+obj-$(CONFIG_NFC_S3FWRN5_I2C) += s3fwrn5_i2c.o
+
+ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
new file mode 100644
index 0000000..0d866ca
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -0,0 +1,219 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <net/nfc/nci_core.h>
+
+#include "s3fwrn5.h"
+#include "firmware.h"
+#include "nci.h"
+
+#define S3FWRN5_NFC_PROTOCOLS  (NFC_PROTO_JEWEL_MASK | \
+				NFC_PROTO_MIFARE_MASK | \
+				NFC_PROTO_FELICA_MASK | \
+				NFC_PROTO_ISO14443_MASK | \
+				NFC_PROTO_ISO14443_B_MASK | \
+				NFC_PROTO_ISO15693_MASK)
+
+static int s3fwrn5_firmware_update(struct s3fwrn5_info *info)
+{
+	bool need_update;
+	int ret;
+
+	s3fwrn5_fw_init(&info->fw_info, "sec_s3fwrn5_firmware.bin");
+
+	/* Update firmware */
+
+	s3fwrn5_set_wake(info, false);
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_FW);
+
+	ret = s3fwrn5_fw_setup(&info->fw_info);
+	if (ret < 0)
+		return ret;
+
+	need_update = s3fwrn5_fw_check_version(&info->fw_info,
+		info->ndev->manufact_specific_info);
+	if (!need_update)
+		goto out;
+
+	dev_info(&info->ndev->nfc_dev->dev, "Detected new firmware version\n");
+
+	ret = s3fwrn5_fw_download(&info->fw_info);
+	if (ret < 0)
+		goto out;
+
+	/* Update RF configuration */
+
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI);
+
+	s3fwrn5_set_wake(info, true);
+	ret = s3fwrn5_nci_rf_configure(info, "sec_s3fwrn5_rfreg.bin");
+	s3fwrn5_set_wake(info, false);
+
+out:
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+	s3fwrn5_fw_cleanup(&info->fw_info);
+	return ret;
+}
+
+static int s3fwrn5_nci_open(struct nci_dev *ndev)
+{
+	struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+
+	if (s3fwrn5_get_mode(info) != S3FWRN5_MODE_COLD)
+		return  -EBUSY;
+
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI);
+	s3fwrn5_set_wake(info, true);
+
+	return 0;
+}
+
+static int s3fwrn5_nci_close(struct nci_dev *ndev)
+{
+	struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+
+	s3fwrn5_set_wake(info, false);
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+
+	return 0;
+}
+
+static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
+{
+	struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+	int ret;
+
+	mutex_lock(&info->mutex);
+
+	if (s3fwrn5_get_mode(info) != S3FWRN5_MODE_NCI) {
+		mutex_unlock(&info->mutex);
+		return -EINVAL;
+	}
+
+	ret = s3fwrn5_write(info, skb);
+	if (ret < 0)
+		kfree_skb(skb);
+
+	mutex_unlock(&info->mutex);
+	return ret;
+}
+
+static int s3fwrn5_nci_post_setup(struct nci_dev *ndev)
+{
+	struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+	int ret;
+
+	ret = s3fwrn5_firmware_update(info);
+	if (ret < 0)
+		goto out;
+
+	/* NCI core reset */
+
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI);
+	s3fwrn5_set_wake(info, true);
+
+	ret = nci_core_reset(info->ndev);
+	if (ret < 0)
+		goto out;
+
+	ret = nci_core_init(info->ndev);
+
+out:
+	return ret;
+}
+
+static struct nci_ops s3fwrn5_nci_ops = {
+	.open = s3fwrn5_nci_open,
+	.close = s3fwrn5_nci_close,
+	.send = s3fwrn5_nci_send,
+	.post_setup = s3fwrn5_nci_post_setup,
+};
+
+int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
+	struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload)
+{
+	struct s3fwrn5_info *info;
+	int ret;
+
+	info = devm_kzalloc(pdev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->phy_id = phy_id;
+	info->pdev = pdev;
+	info->phy_ops = phy_ops;
+	info->max_payload = max_payload;
+	mutex_init(&info->mutex);
+
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+
+	s3fwrn5_nci_get_prop_ops(&s3fwrn5_nci_ops.prop_ops,
+		&s3fwrn5_nci_ops.n_prop_ops);
+
+	info->ndev = nci_allocate_device(&s3fwrn5_nci_ops,
+		S3FWRN5_NFC_PROTOCOLS, 0, 0);
+	if (!info->ndev)
+		return -ENOMEM;
+
+	nci_set_parent_dev(info->ndev, pdev);
+	nci_set_drvdata(info->ndev, info);
+
+	ret = nci_register_device(info->ndev);
+	if (ret < 0) {
+		nci_free_device(info->ndev);
+		return ret;
+	}
+
+	info->fw_info.ndev = info->ndev;
+
+	*ndev = info->ndev;
+
+	return ret;
+}
+EXPORT_SYMBOL(s3fwrn5_probe);
+
+void s3fwrn5_remove(struct nci_dev *ndev)
+{
+	struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+
+	s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD);
+
+	nci_unregister_device(ndev);
+	nci_free_device(ndev);
+}
+EXPORT_SYMBOL(s3fwrn5_remove);
+
+int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
+	enum s3fwrn5_mode mode)
+{
+	switch (mode) {
+	case S3FWRN5_MODE_NCI:
+		return nci_recv_frame(ndev, skb);
+	case S3FWRN5_MODE_FW:
+		return s3fwrn5_fw_recv_frame(ndev, skb);
+	default:
+		return -ENODEV;
+	}
+}
+EXPORT_SYMBOL(s3fwrn5_recv_frame);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Samsung S3FWRN5 NFC driver");
+MODULE_AUTHOR("Robert Baldyga <r.baldyga@samsung.com>");
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
new file mode 100644
index 0000000..64a9025
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -0,0 +1,511 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/crypto.h>
+#include <crypto/sha.h>
+
+#include "s3fwrn5.h"
+#include "firmware.h"
+
+struct s3fwrn5_fw_version {
+	__u8 major;
+	__u8 build1;
+	__u8 build2;
+	__u8 target;
+};
+
+static int s3fwrn5_fw_send_msg(struct s3fwrn5_fw_info *fw_info,
+	struct sk_buff *msg, struct sk_buff **rsp)
+{
+	struct s3fwrn5_info *info =
+		container_of(fw_info, struct s3fwrn5_info, fw_info);
+	long ret;
+
+	reinit_completion(&fw_info->completion);
+
+	ret = s3fwrn5_write(info, msg);
+	if (ret < 0)
+		return ret;
+
+	ret = wait_for_completion_interruptible_timeout(
+		&fw_info->completion, msecs_to_jiffies(1000));
+	if (ret < 0)
+		return ret;
+	else if (ret == 0)
+		return -ENXIO;
+
+	if (!fw_info->rsp)
+		return -EINVAL;
+
+	*rsp = fw_info->rsp;
+	fw_info->rsp = NULL;
+
+	return 0;
+}
+
+static int s3fwrn5_fw_prep_msg(struct s3fwrn5_fw_info *fw_info,
+	struct sk_buff **msg, u8 type, u8 code, const void *data, u16 len)
+{
+	struct s3fwrn5_fw_header hdr;
+	struct sk_buff *skb;
+
+	hdr.type = type | fw_info->parity;
+	fw_info->parity ^= 0x80;
+	hdr.code = code;
+	hdr.len = len;
+
+	skb = alloc_skb(S3FWRN5_FW_HDR_SIZE + len, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	memcpy(skb_put(skb, S3FWRN5_FW_HDR_SIZE), &hdr, S3FWRN5_FW_HDR_SIZE);
+	if (len)
+		memcpy(skb_put(skb, len), data, len);
+
+	*msg = skb;
+
+	return 0;
+}
+
+static int s3fwrn5_fw_get_bootinfo(struct s3fwrn5_fw_info *fw_info,
+	struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
+{
+	struct sk_buff *msg, *rsp = NULL;
+	struct s3fwrn5_fw_header *hdr;
+	int ret;
+
+	/* Send GET_BOOTINFO command */
+
+	ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+		S3FWRN5_FW_CMD_GET_BOOTINFO, NULL, 0);
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+	kfree_skb(msg);
+	if (ret < 0)
+		return ret;
+
+	hdr = (struct s3fwrn5_fw_header *) rsp->data;
+	if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	memcpy(bootinfo, rsp->data + S3FWRN5_FW_HDR_SIZE, 10);
+
+out:
+	kfree_skb(rsp);
+	return ret;
+}
+
+static int s3fwrn5_fw_enter_update_mode(struct s3fwrn5_fw_info *fw_info,
+	const void *hash_data, u16 hash_size,
+	const void *sig_data, u16 sig_size)
+{
+	struct s3fwrn5_fw_cmd_enter_updatemode args;
+	struct sk_buff *msg, *rsp = NULL;
+	struct s3fwrn5_fw_header *hdr;
+	int ret;
+
+	/* Send ENTER_UPDATE_MODE command */
+
+	args.hashcode_size = hash_size;
+	args.signature_size = sig_size;
+
+	ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+		S3FWRN5_FW_CMD_ENTER_UPDATE_MODE, &args, sizeof(args));
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+	kfree_skb(msg);
+	if (ret < 0)
+		return ret;
+
+	hdr = (struct s3fwrn5_fw_header *) rsp->data;
+	if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+		ret = -EPROTO;
+		goto out;
+	}
+
+	kfree_skb(rsp);
+
+	/* Send hashcode data */
+
+	ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_DATA, 0,
+		hash_data, hash_size);
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+	kfree_skb(msg);
+	if (ret < 0)
+		return ret;
+
+	hdr = (struct s3fwrn5_fw_header *) rsp->data;
+	if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+		ret = -EPROTO;
+		goto out;
+	}
+
+	kfree_skb(rsp);
+
+	/* Send signature data */
+
+	ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_DATA, 0,
+		sig_data, sig_size);
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+	kfree_skb(msg);
+	if (ret < 0)
+		return ret;
+
+	hdr = (struct s3fwrn5_fw_header *) rsp->data;
+	if (hdr->code != S3FWRN5_FW_RET_SUCCESS)
+		ret = -EPROTO;
+
+out:
+	kfree_skb(rsp);
+	return ret;
+}
+
+static int s3fwrn5_fw_update_sector(struct s3fwrn5_fw_info *fw_info,
+	u32 base_addr, const void *data)
+{
+	struct s3fwrn5_fw_cmd_update_sector args;
+	struct sk_buff *msg, *rsp = NULL;
+	struct s3fwrn5_fw_header *hdr;
+	int ret, i;
+
+	/* Send UPDATE_SECTOR command */
+
+	args.base_address = base_addr;
+
+	ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+		S3FWRN5_FW_CMD_UPDATE_SECTOR, &args, sizeof(args));
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+	kfree_skb(msg);
+	if (ret < 0)
+		return ret;
+
+	hdr = (struct s3fwrn5_fw_header *) rsp->data;
+	if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+		ret = -EPROTO;
+		goto err;
+	}
+
+	kfree_skb(rsp);
+
+	/* Send data split into 256-byte packets */
+
+	for (i = 0; i < 16; ++i) {
+		ret = s3fwrn5_fw_prep_msg(fw_info, &msg,
+			S3FWRN5_FW_MSG_DATA, 0, data+256*i, 256);
+		if (ret < 0)
+			break;
+
+		ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+		kfree_skb(msg);
+		if (ret < 0)
+			break;
+
+		hdr = (struct s3fwrn5_fw_header *) rsp->data;
+		if (hdr->code != S3FWRN5_FW_RET_SUCCESS) {
+			ret = -EPROTO;
+			goto err;
+		}
+
+		kfree_skb(rsp);
+	}
+
+	return ret;
+
+err:
+	kfree_skb(rsp);
+	return ret;
+}
+
+static int s3fwrn5_fw_complete_update_mode(struct s3fwrn5_fw_info *fw_info)
+{
+	struct sk_buff *msg, *rsp = NULL;
+	struct s3fwrn5_fw_header *hdr;
+	int ret;
+
+	/* Send COMPLETE_UPDATE_MODE command */
+
+	ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD,
+		S3FWRN5_FW_CMD_COMPLETE_UPDATE_MODE, NULL, 0);
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp);
+	kfree_skb(msg);
+	if (ret < 0)
+		return ret;
+
+	hdr = (struct s3fwrn5_fw_header *) rsp->data;
+	if (hdr->code != S3FWRN5_FW_RET_SUCCESS)
+		ret = -EPROTO;
+
+	kfree_skb(rsp);
+
+	return ret;
+}
+
+/*
+ * Firmware header stucture:
+ *
+ * 0x00 - 0x0B : Date and time string (w/o NUL termination)
+ * 0x10 - 0x13 : Firmware version
+ * 0x14 - 0x17 : Signature address
+ * 0x18 - 0x1B : Signature size
+ * 0x1C - 0x1F : Firmware image address
+ * 0x20 - 0x23 : Firmware sectors count
+ * 0x24 - 0x27 : Custom signature address
+ * 0x28 - 0x2B : Custom signature size
+ */
+
+#define S3FWRN5_FW_IMAGE_HEADER_SIZE 44
+
+static int s3fwrn5_fw_request_firmware(struct s3fwrn5_fw_info *fw_info)
+{
+	struct s3fwrn5_fw_image *fw = &fw_info->fw;
+	u32 sig_off;
+	u32 image_off;
+	u32 custom_sig_off;
+	int ret;
+
+	ret = request_firmware(&fw->fw, fw_info->fw_name,
+		&fw_info->ndev->nfc_dev->dev);
+	if (ret < 0)
+		return ret;
+
+	if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE)
+		return -EINVAL;
+
+	memcpy(fw->date, fw->fw->data + 0x00, 12);
+	fw->date[12] = '\0';
+
+	memcpy(&fw->version, fw->fw->data + 0x10, 4);
+
+	memcpy(&sig_off, fw->fw->data + 0x14, 4);
+	fw->sig = fw->fw->data + sig_off;
+	memcpy(&fw->sig_size, fw->fw->data + 0x18, 4);
+
+	memcpy(&image_off, fw->fw->data + 0x1C, 4);
+	fw->image = fw->fw->data + image_off;
+	memcpy(&fw->image_sectors, fw->fw->data + 0x20, 4);
+
+	memcpy(&custom_sig_off, fw->fw->data + 0x24, 4);
+	fw->custom_sig = fw->fw->data + custom_sig_off;
+	memcpy(&fw->custom_sig_size, fw->fw->data + 0x28, 4);
+
+	return 0;
+}
+
+static void s3fwrn5_fw_release_firmware(struct s3fwrn5_fw_info *fw_info)
+{
+	release_firmware(fw_info->fw.fw);
+}
+
+static int s3fwrn5_fw_get_base_addr(
+	struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo, u32 *base_addr)
+{
+	int i;
+	struct {
+		u8 version[4];
+		u32 base_addr;
+	} match[] = {
+		{{0x05, 0x00, 0x00, 0x00}, 0x00005000},
+		{{0x05, 0x00, 0x00, 0x01}, 0x00003000},
+		{{0x05, 0x00, 0x00, 0x02}, 0x00003000},
+		{{0x05, 0x00, 0x00, 0x03}, 0x00003000},
+		{{0x05, 0x00, 0x00, 0x05}, 0x00003000}
+	};
+
+	for (i = 0; i < ARRAY_SIZE(match); ++i)
+		if (bootinfo->hw_version[0] == match[i].version[0] &&
+			bootinfo->hw_version[1] == match[i].version[1] &&
+			bootinfo->hw_version[3] == match[i].version[3]) {
+			*base_addr = match[i].base_addr;
+			return 0;
+		}
+
+	return -EINVAL;
+}
+
+static inline bool
+s3fwrn5_fw_is_custom(struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
+{
+	return !!bootinfo->hw_version[2];
+}
+
+int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info)
+{
+	struct s3fwrn5_fw_cmd_get_bootinfo_rsp bootinfo;
+	int ret;
+
+	/* Get firmware data */
+
+	ret = s3fwrn5_fw_request_firmware(fw_info);
+	if (ret < 0) {
+		dev_err(&fw_info->ndev->nfc_dev->dev,
+			"Failed to get fw file, ret=%02x\n", ret);
+		return ret;
+	}
+
+	/* Get bootloader info */
+
+	ret = s3fwrn5_fw_get_bootinfo(fw_info, &bootinfo);
+	if (ret < 0) {
+		dev_err(&fw_info->ndev->nfc_dev->dev,
+			"Failed to get bootinfo, ret=%02x\n", ret);
+		goto err;
+	}
+
+	/* Match hardware version to obtain firmware base address */
+
+	ret = s3fwrn5_fw_get_base_addr(&bootinfo, &fw_info->base_addr);
+	if (ret < 0) {
+		dev_err(&fw_info->ndev->nfc_dev->dev,
+			"Unknown hardware version\n");
+		goto err;
+	}
+
+	fw_info->sector_size = bootinfo.sector_size;
+
+	fw_info->sig_size = s3fwrn5_fw_is_custom(&bootinfo) ?
+		fw_info->fw.custom_sig_size : fw_info->fw.sig_size;
+	fw_info->sig = s3fwrn5_fw_is_custom(&bootinfo) ?
+		fw_info->fw.custom_sig : fw_info->fw.sig;
+
+	return 0;
+
+err:
+	s3fwrn5_fw_release_firmware(fw_info);
+	return ret;
+}
+
+bool s3fwrn5_fw_check_version(struct s3fwrn5_fw_info *fw_info, u32 version)
+{
+	struct s3fwrn5_fw_version *new = (void *) &fw_info->fw.version;
+	struct s3fwrn5_fw_version *old = (void *) &version;
+
+	if (new->major > old->major)
+		return true;
+	if (new->build1 > old->build1)
+		return true;
+	if (new->build2 > old->build2)
+		return true;
+
+	return false;
+}
+
+int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
+{
+	struct s3fwrn5_fw_image *fw = &fw_info->fw;
+	u8 hash_data[SHA1_DIGEST_SIZE];
+	struct scatterlist sg;
+	struct hash_desc desc;
+	u32 image_size, off;
+	int ret;
+
+	image_size = fw_info->sector_size * fw->image_sectors;
+
+	/* Compute SHA of firmware data */
+
+	sg_init_one(&sg, fw->image, image_size);
+	desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
+	crypto_hash_init(&desc);
+	crypto_hash_update(&desc, &sg, image_size);
+	crypto_hash_final(&desc, hash_data);
+	crypto_free_hash(desc.tfm);
+
+	/* Firmware update process */
+
+	dev_info(&fw_info->ndev->nfc_dev->dev,
+		"Firmware update: %s\n", fw_info->fw_name);
+
+	ret = s3fwrn5_fw_enter_update_mode(fw_info, hash_data,
+		SHA1_DIGEST_SIZE, fw_info->sig, fw_info->sig_size);
+	if (ret < 0) {
+		dev_err(&fw_info->ndev->nfc_dev->dev,
+			"Unable to enter update mode\n");
+		goto out;
+	}
+
+	for (off = 0; off < image_size; off += fw_info->sector_size) {
+		ret = s3fwrn5_fw_update_sector(fw_info,
+			fw_info->base_addr + off, fw->image + off);
+		if (ret < 0) {
+			dev_err(&fw_info->ndev->nfc_dev->dev,
+				"Firmware update error (code=%d)\n", ret);
+			goto out;
+		}
+	}
+
+	ret = s3fwrn5_fw_complete_update_mode(fw_info);
+	if (ret < 0) {
+		dev_err(&fw_info->ndev->nfc_dev->dev,
+			"Unable to complete update mode\n");
+		goto out;
+	}
+
+	dev_info(&fw_info->ndev->nfc_dev->dev,
+		"Firmware update: success\n");
+
+out:
+	return ret;
+}
+
+void s3fwrn5_fw_init(struct s3fwrn5_fw_info *fw_info, const char *fw_name)
+{
+	fw_info->parity = 0x00;
+	fw_info->rsp = NULL;
+	fw_info->fw.fw = NULL;
+	strcpy(fw_info->fw_name, fw_name);
+	init_completion(&fw_info->completion);
+}
+
+void s3fwrn5_fw_cleanup(struct s3fwrn5_fw_info *fw_info)
+{
+	s3fwrn5_fw_release_firmware(fw_info);
+}
+
+int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
+{
+	struct s3fwrn5_info *info = nci_get_drvdata(ndev);
+	struct s3fwrn5_fw_info *fw_info = &info->fw_info;
+
+	BUG_ON(fw_info->rsp);
+
+	fw_info->rsp = skb;
+
+	complete(&fw_info->completion);
+
+	return 0;
+}
diff --git a/drivers/nfc/s3fwrn5/firmware.h b/drivers/nfc/s3fwrn5/firmware.h
new file mode 100644
index 0000000..1ec0647
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/firmware.h
@@ -0,0 +1,111 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_S3FWRN5_FIRMWARE_H_
+#define __LOCAL_S3FWRN5_FIRMWARE_H_
+
+/* FW Message Types */
+#define S3FWRN5_FW_MSG_CMD			0x00
+#define S3FWRN5_FW_MSG_RSP			0x01
+#define S3FWRN5_FW_MSG_DATA			0x02
+
+/* FW Return Codes */
+#define S3FWRN5_FW_RET_SUCCESS			0x00
+#define S3FWRN5_FW_RET_MESSAGE_TYPE_INVALID	0x01
+#define S3FWRN5_FW_RET_COMMAND_INVALID		0x02
+#define S3FWRN5_FW_RET_PAGE_DATA_OVERFLOW	0x03
+#define S3FWRN5_FW_RET_SECT_DATA_OVERFLOW	0x04
+#define S3FWRN5_FW_RET_AUTHENTICATION_FAIL	0x05
+#define S3FWRN5_FW_RET_FLASH_OPERATION_FAIL	0x06
+#define S3FWRN5_FW_RET_ADDRESS_OUT_OF_RANGE	0x07
+#define S3FWRN5_FW_RET_PARAMETER_INVALID	0x08
+
+/* ---- FW Packet structures ---- */
+#define S3FWRN5_FW_HDR_SIZE 4
+
+struct s3fwrn5_fw_header {
+	__u8 type;
+	__u8 code;
+	__u16 len;
+};
+
+#define S3FWRN5_FW_CMD_RESET			0x00
+
+#define S3FWRN5_FW_CMD_GET_BOOTINFO		0x01
+
+struct s3fwrn5_fw_cmd_get_bootinfo_rsp {
+	__u8 hw_version[4];
+	__u16 sector_size;
+	__u16 page_size;
+	__u16 frame_max_size;
+	__u16 hw_buffer_size;
+};
+
+#define S3FWRN5_FW_CMD_ENTER_UPDATE_MODE	0x02
+
+struct s3fwrn5_fw_cmd_enter_updatemode {
+	__u16 hashcode_size;
+	__u16 signature_size;
+};
+
+#define S3FWRN5_FW_CMD_UPDATE_SECTOR		0x04
+
+struct s3fwrn5_fw_cmd_update_sector {
+	__u32 base_address;
+};
+
+#define S3FWRN5_FW_CMD_COMPLETE_UPDATE_MODE	0x05
+
+struct s3fwrn5_fw_image {
+	const struct firmware *fw;
+
+	char date[13];
+	u32 version;
+	const void *sig;
+	u32 sig_size;
+	const void *image;
+	u32 image_sectors;
+	const void *custom_sig;
+	u32 custom_sig_size;
+};
+
+struct s3fwrn5_fw_info {
+	struct nci_dev *ndev;
+	struct s3fwrn5_fw_image fw;
+	char fw_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
+
+	const void *sig;
+	u32 sig_size;
+	u32 sector_size;
+	u32 base_addr;
+
+	struct completion completion;
+	struct sk_buff *rsp;
+	char parity;
+};
+
+void s3fwrn5_fw_init(struct s3fwrn5_fw_info *fw_info, const char *fw_name);
+int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info);
+bool s3fwrn5_fw_check_version(struct s3fwrn5_fw_info *fw_info, u32 version);
+int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info);
+void s3fwrn5_fw_cleanup(struct s3fwrn5_fw_info *fw_info);
+
+int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
+
+#endif /* __LOCAL_S3FWRN5_FIRMWARE_H_ */
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
new file mode 100644
index 0000000..b4dd7dd
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -0,0 +1,306 @@
+/*
+ * I2C Link Layer for Samsung S3FWRN5 NCI based Driver
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/module.h>
+
+#include <net/nfc/nfc.h>
+
+#include "s3fwrn5.h"
+
+#define S3FWRN5_I2C_DRIVER_NAME "s3fwrn5_i2c"
+
+#define S3FWRN5_I2C_MAX_PAYLOAD 32
+#define S3FWRN5_EN_WAIT_TIME 150
+
+struct s3fwrn5_i2c_phy {
+	struct i2c_client *i2c_dev;
+	struct nci_dev *ndev;
+
+	unsigned int gpio_en;
+	unsigned int gpio_fw_wake;
+
+	struct mutex mutex;
+
+	enum s3fwrn5_mode mode;
+	unsigned int irq_skip:1;
+};
+
+static void s3fwrn5_i2c_set_wake(void *phy_id, bool wake)
+{
+	struct s3fwrn5_i2c_phy *phy = phy_id;
+
+	mutex_lock(&phy->mutex);
+	gpio_set_value(phy->gpio_fw_wake, wake);
+	msleep(S3FWRN5_EN_WAIT_TIME/2);
+	mutex_unlock(&phy->mutex);
+}
+
+static void s3fwrn5_i2c_set_mode(void *phy_id, enum s3fwrn5_mode mode)
+{
+	struct s3fwrn5_i2c_phy *phy = phy_id;
+
+	mutex_lock(&phy->mutex);
+
+	if (phy->mode == mode)
+		goto out;
+
+	phy->mode = mode;
+
+	gpio_set_value(phy->gpio_en, 1);
+	gpio_set_value(phy->gpio_fw_wake, 0);
+	if (mode == S3FWRN5_MODE_FW)
+		gpio_set_value(phy->gpio_fw_wake, 1);
+
+	if (mode != S3FWRN5_MODE_COLD) {
+		msleep(S3FWRN5_EN_WAIT_TIME);
+		gpio_set_value(phy->gpio_en, 0);
+		msleep(S3FWRN5_EN_WAIT_TIME/2);
+	}
+
+	phy->irq_skip = true;
+
+out:
+	mutex_unlock(&phy->mutex);
+}
+
+static enum s3fwrn5_mode s3fwrn5_i2c_get_mode(void *phy_id)
+{
+	struct s3fwrn5_i2c_phy *phy = phy_id;
+	enum s3fwrn5_mode mode;
+
+	mutex_lock(&phy->mutex);
+
+	mode = phy->mode;
+
+	mutex_unlock(&phy->mutex);
+
+	return mode;
+}
+
+static int s3fwrn5_i2c_write(void *phy_id, struct sk_buff *skb)
+{
+	struct s3fwrn5_i2c_phy *phy = phy_id;
+	int ret;
+
+	mutex_lock(&phy->mutex);
+
+	phy->irq_skip = false;
+
+	ret = i2c_master_send(phy->i2c_dev, skb->data, skb->len);
+	if (ret == -EREMOTEIO) {
+		/* Retry, chip was in standby */
+		usleep_range(110000, 120000);
+		ret  = i2c_master_send(phy->i2c_dev, skb->data, skb->len);
+	}
+
+	mutex_unlock(&phy->mutex);
+
+	if (ret < 0)
+		return ret;
+
+	if (ret != skb->len)
+		return -EREMOTEIO;
+
+	return 0;
+}
+
+static struct s3fwrn5_phy_ops i2c_phy_ops = {
+	.set_wake = s3fwrn5_i2c_set_wake,
+	.set_mode = s3fwrn5_i2c_set_mode,
+	.get_mode = s3fwrn5_i2c_get_mode,
+	.write = s3fwrn5_i2c_write,
+};
+
+static int s3fwrn5_i2c_read(struct s3fwrn5_i2c_phy *phy)
+{
+	struct sk_buff *skb;
+	size_t hdr_size;
+	size_t data_len;
+	char hdr[4];
+	int ret;
+
+	hdr_size = (phy->mode == S3FWRN5_MODE_NCI) ?
+		NCI_CTRL_HDR_SIZE : S3FWRN5_FW_HDR_SIZE;
+	ret = i2c_master_recv(phy->i2c_dev, hdr, hdr_size);
+	if (ret < 0)
+		return ret;
+
+	if (ret < hdr_size)
+		return -EBADMSG;
+
+	data_len = (phy->mode == S3FWRN5_MODE_NCI) ?
+		((struct nci_ctrl_hdr *)hdr)->plen :
+		((struct s3fwrn5_fw_header *)hdr)->len;
+
+	skb = alloc_skb(hdr_size + data_len, GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	memcpy(skb_put(skb, hdr_size), hdr, hdr_size);
+
+	if (data_len == 0)
+		goto out;
+
+	ret = i2c_master_recv(phy->i2c_dev, skb_put(skb, data_len), data_len);
+	if (ret != data_len) {
+		kfree_skb(skb);
+		return -EBADMSG;
+	}
+
+out:
+	return s3fwrn5_recv_frame(phy->ndev, skb, phy->mode);
+}
+
+static irqreturn_t s3fwrn5_i2c_irq_thread_fn(int irq, void *phy_id)
+{
+	struct s3fwrn5_i2c_phy *phy = phy_id;
+	int ret = 0;
+
+	if (!phy || !phy->ndev) {
+		WARN_ON_ONCE(1);
+		return IRQ_NONE;
+	}
+
+	mutex_lock(&phy->mutex);
+
+	if (phy->irq_skip)
+		goto out;
+
+	switch (phy->mode) {
+	case S3FWRN5_MODE_NCI:
+	case S3FWRN5_MODE_FW:
+		ret = s3fwrn5_i2c_read(phy);
+		break;
+	case S3FWRN5_MODE_COLD:
+		ret = -EREMOTEIO;
+		break;
+	}
+
+out:
+	mutex_unlock(&phy->mutex);
+
+	return IRQ_HANDLED;
+}
+
+static int s3fwrn5_i2c_parse_dt(struct i2c_client *client)
+{
+	struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client);
+	struct device_node *np = client->dev.of_node;
+
+	if (!np)
+		return -ENODEV;
+
+	phy->gpio_en = of_get_named_gpio(np, "s3fwrn5,en-gpios", 0);
+	if (!gpio_is_valid(phy->gpio_en))
+		return -ENODEV;
+
+	phy->gpio_fw_wake = of_get_named_gpio(np, "s3fwrn5,fw-gpios", 0);
+	if (!gpio_is_valid(phy->gpio_fw_wake))
+		return -ENODEV;
+
+	return 0;
+}
+
+static int s3fwrn5_i2c_probe(struct i2c_client *client,
+				  const struct i2c_device_id *id)
+{
+	struct s3fwrn5_i2c_phy *phy;
+	int ret;
+
+	phy = devm_kzalloc(&client->dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	mutex_init(&phy->mutex);
+	phy->mode = S3FWRN5_MODE_COLD;
+	phy->irq_skip = true;
+
+	phy->i2c_dev = client;
+	i2c_set_clientdata(client, phy);
+
+	ret = s3fwrn5_i2c_parse_dt(client);
+	if (ret < 0)
+		return ret;
+
+	ret = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_en,
+		GPIOF_OUT_INIT_HIGH, "s3fwrn5_en");
+	if (ret < 0)
+		return ret;
+
+	ret = devm_gpio_request_one(&phy->i2c_dev->dev, phy->gpio_fw_wake,
+		GPIOF_OUT_INIT_LOW, "s3fwrn5_fw_wake");
+	if (ret < 0)
+		return ret;
+
+	ret = s3fwrn5_probe(&phy->ndev, phy, &phy->i2c_dev->dev, &i2c_phy_ops,
+		S3FWRN5_I2C_MAX_PAYLOAD);
+	if (ret < 0)
+		return ret;
+
+	ret = request_threaded_irq(phy->i2c_dev->irq, NULL,
+		s3fwrn5_i2c_irq_thread_fn, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+		S3FWRN5_I2C_DRIVER_NAME, phy);
+	if (ret)
+		s3fwrn5_remove(phy->ndev);
+
+	return ret;
+}
+
+static int s3fwrn5_i2c_remove(struct i2c_client *client)
+{
+	struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client);
+
+	s3fwrn5_remove(phy->ndev);
+
+	return 0;
+}
+
+static struct i2c_device_id s3fwrn5_i2c_id_table[] = {
+	{S3FWRN5_I2C_DRIVER_NAME, 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, s3fwrn5_i2c_id_table);
+
+static const struct of_device_id of_s3fwrn5_i2c_match[] = {
+	{ .compatible = "samsung,s3fwrn5-i2c", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, of_s3fwrn5_i2c_match);
+
+static struct i2c_driver s3fwrn5_i2c_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = S3FWRN5_I2C_DRIVER_NAME,
+		.of_match_table = of_match_ptr(of_s3fwrn5_i2c_match),
+	},
+	.probe = s3fwrn5_i2c_probe,
+	.remove = s3fwrn5_i2c_remove,
+	.id_table = s3fwrn5_i2c_id_table,
+};
+
+module_i2c_driver(s3fwrn5_i2c_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("I2C driver for Samsung S3FWRN5");
+MODULE_AUTHOR("Robert Baldyga <r.baldyga@samsung.com>");
diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c
new file mode 100644
index 0000000..ace0071
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/nci.c
@@ -0,0 +1,165 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+
+#include "s3fwrn5.h"
+#include "nci.h"
+
+static int s3fwrn5_nci_prop_rsp(struct nci_dev *ndev, struct sk_buff *skb)
+{
+	__u8 status = skb->data[0];
+
+	nci_req_complete(ndev, status);
+	return 0;
+}
+
+static struct nci_prop_ops s3fwrn5_nci_prop_ops[] = {
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_AGAIN),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_GET_RFREG),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_SET_RFREG),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_GET_RFREG_VER),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_SET_RFREG_VER),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_START_RFREG),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_STOP_RFREG),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_FW_CFG),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+	{
+		.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
+				NCI_PROP_WR_RESET),
+		.rsp = s3fwrn5_nci_prop_rsp,
+	},
+};
+
+void s3fwrn5_nci_get_prop_ops(struct nci_prop_ops **ops, size_t *n)
+{
+	*ops = s3fwrn5_nci_prop_ops;
+	*n = ARRAY_SIZE(s3fwrn5_nci_prop_ops);
+}
+
+#define S3FWRN5_RFREG_SECTION_SIZE 252
+
+int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
+{
+	const struct firmware *fw;
+	struct nci_prop_fw_cfg_cmd fw_cfg;
+	struct nci_prop_set_rfreg_cmd set_rfreg;
+	struct nci_prop_stop_rfreg_cmd stop_rfreg;
+	u32 checksum;
+	int i, len;
+	int ret;
+
+	ret = request_firmware(&fw, fw_name, &info->ndev->nfc_dev->dev);
+	if (ret < 0)
+		return ret;
+
+	/* Compute rfreg checksum */
+
+	checksum = 0;
+	for (i = 0; i < fw->size; i += 4)
+		checksum += *((u32 *)(fw->data+i));
+
+	/* Set default clock configuration for external crystal */
+
+	fw_cfg.clk_type = 0x01;
+	fw_cfg.clk_speed = 0xff;
+	fw_cfg.clk_req = 0xff;
+	ret = nci_prop_cmd(info->ndev, NCI_PROP_FW_CFG,
+		sizeof(fw_cfg), (__u8 *)&fw_cfg);
+	if (ret < 0)
+		goto out;
+
+	/* Start rfreg configuration */
+
+	dev_info(&info->ndev->nfc_dev->dev,
+		"rfreg configuration update: %s\n", fw_name);
+
+	ret = nci_prop_cmd(info->ndev, NCI_PROP_START_RFREG, 0, NULL);
+	if (ret < 0) {
+		dev_err(&info->ndev->nfc_dev->dev,
+			"Unable to start rfreg update\n");
+		goto out;
+	}
+
+	/* Update rfreg */
+
+	set_rfreg.index = 0;
+	for (i = 0; i < fw->size; i += S3FWRN5_RFREG_SECTION_SIZE) {
+		len = (fw->size - i < S3FWRN5_RFREG_SECTION_SIZE) ?
+			(fw->size - i) : S3FWRN5_RFREG_SECTION_SIZE;
+		memcpy(set_rfreg.data, fw->data+i, len);
+		ret = nci_prop_cmd(info->ndev, NCI_PROP_SET_RFREG,
+			len+1, (__u8 *)&set_rfreg);
+		if (ret < 0) {
+			dev_err(&info->ndev->nfc_dev->dev,
+				"rfreg update error (code=%d)\n", ret);
+			goto out;
+		}
+		set_rfreg.index++;
+	}
+
+	/* Finish rfreg configuration */
+
+	stop_rfreg.checksum = checksum & 0xffff;
+	ret = nci_prop_cmd(info->ndev, NCI_PROP_STOP_RFREG,
+		sizeof(stop_rfreg), (__u8 *)&stop_rfreg);
+	if (ret < 0) {
+		dev_err(&info->ndev->nfc_dev->dev,
+			"Unable to stop rfreg update\n");
+		goto out;
+	}
+
+	dev_info(&info->ndev->nfc_dev->dev,
+		"rfreg configuration update: success\n");
+out:
+	release_firmware(fw);
+	return ret;
+}
diff --git a/drivers/nfc/s3fwrn5/nci.h b/drivers/nfc/s3fwrn5/nci.h
new file mode 100644
index 0000000..0e68d43
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/nci.h
@@ -0,0 +1,89 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_S3FWRN5_NCI_H_
+#define __LOCAL_S3FWRN5_NCI_H_
+
+#include "s3fwrn5.h"
+
+#define NCI_PROP_AGAIN		0x01
+
+#define NCI_PROP_GET_RFREG	0x21
+#define NCI_PROP_SET_RFREG	0x22
+
+struct nci_prop_set_rfreg_cmd {
+	__u8 index;
+	__u8 data[252];
+};
+
+struct nci_prop_set_rfreg_rsp {
+	__u8 status;
+};
+
+#define NCI_PROP_GET_RFREG_VER	0x24
+
+struct nci_prop_get_rfreg_ver_rsp {
+	__u8 status;
+	__u8 data[8];
+};
+
+#define NCI_PROP_SET_RFREG_VER	0x25
+
+struct nci_prop_set_rfreg_ver_cmd {
+	__u8 data[8];
+};
+
+struct nci_prop_set_rfreg_ver_rsp {
+	__u8 status;
+};
+
+#define NCI_PROP_START_RFREG	0x26
+
+struct nci_prop_start_rfreg_rsp {
+	__u8 status;
+};
+
+#define NCI_PROP_STOP_RFREG	0x27
+
+struct nci_prop_stop_rfreg_cmd {
+	__u16 checksum;
+};
+
+struct nci_prop_stop_rfreg_rsp {
+	__u8 status;
+};
+
+#define NCI_PROP_FW_CFG		0x28
+
+struct nci_prop_fw_cfg_cmd {
+	__u8 clk_type;
+	__u8 clk_speed;
+	__u8 clk_req;
+};
+
+struct nci_prop_fw_cfg_rsp {
+	__u8 status;
+};
+
+#define NCI_PROP_WR_RESET	0x2f
+
+void s3fwrn5_nci_get_prop_ops(struct nci_prop_ops **ops, size_t *n);
+int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name);
+
+#endif /* __LOCAL_S3FWRN5_NCI_H_ */
diff --git a/drivers/nfc/s3fwrn5/s3fwrn5.h b/drivers/nfc/s3fwrn5/s3fwrn5.h
new file mode 100644
index 0000000..89210d4
--- /dev/null
+++ b/drivers/nfc/s3fwrn5/s3fwrn5.h
@@ -0,0 +1,99 @@
+/*
+ * NCI based driver for Samsung S3FWRN5 NFC chip
+ *
+ * Copyright (C) 2015 Samsung Electrnoics
+ * Robert Baldyga <r.baldyga@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __LOCAL_S3FWRN5_H_
+#define __LOCAL_S3FWRN5_H_
+
+#include <linux/nfc.h>
+
+#include <net/nfc/nci_core.h>
+
+#include "firmware.h"
+
+enum s3fwrn5_mode {
+	S3FWRN5_MODE_COLD,
+	S3FWRN5_MODE_NCI,
+	S3FWRN5_MODE_FW,
+};
+
+struct s3fwrn5_phy_ops {
+	void (*set_wake)(void *id, bool sleep);
+	void (*set_mode)(void *id, enum s3fwrn5_mode);
+	enum s3fwrn5_mode (*get_mode)(void *id);
+	int (*write)(void *id, struct sk_buff *skb);
+};
+
+struct s3fwrn5_info {
+	struct nci_dev *ndev;
+	void *phy_id;
+	struct device *pdev;
+
+	struct s3fwrn5_phy_ops *phy_ops;
+	unsigned int max_payload;
+
+	struct s3fwrn5_fw_info fw_info;
+
+	struct mutex mutex;
+};
+
+static inline int s3fwrn5_set_mode(struct s3fwrn5_info *info,
+	enum s3fwrn5_mode mode)
+{
+	if (!info->phy_ops->set_mode)
+		return -ENOTSUPP;
+
+	info->phy_ops->set_mode(info->phy_id, mode);
+
+	return 0;
+}
+
+static inline enum s3fwrn5_mode s3fwrn5_get_mode(struct s3fwrn5_info *info)
+{
+	if (!info->phy_ops->get_mode)
+		return -ENOTSUPP;
+
+	return info->phy_ops->get_mode(info->phy_id);
+}
+
+static inline int s3fwrn5_set_wake(struct s3fwrn5_info *info, bool wake)
+{
+	if (!info->phy_ops->set_wake)
+		return -ENOTSUPP;
+
+	info->phy_ops->set_wake(info->phy_id, wake);
+
+	return 0;
+}
+
+static inline int s3fwrn5_write(struct s3fwrn5_info *info, struct sk_buff *skb)
+{
+	if (!info->phy_ops->write)
+		return -ENOTSUPP;
+
+	return info->phy_ops->write(info->phy_id, skb);
+}
+
+int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev,
+	struct s3fwrn5_phy_ops *phy_ops, unsigned int max_payload);
+void s3fwrn5_remove(struct nci_dev *ndev);
+
+int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
+	enum s3fwrn5_mode mode);
+
+#endif /* __LOCAL_S3FWRN5_H_ */
diff --git a/drivers/nfc/st-nci/Kconfig b/drivers/nfc/st-nci/Kconfig
index fc3904c..e7c6db9 100644
--- a/drivers/nfc/st-nci/Kconfig
+++ b/drivers/nfc/st-nci/Kconfig
@@ -21,3 +21,14 @@
 
 	  If you choose to build a module, it'll be called st-nci_i2c.
 	  Say N if unsure.
+
+config NFC_ST_NCI_SPI
+	tristate "NFC ST NCI spi support"
+	depends on NFC_ST_NCI && SPI
+	---help---
+	  This module adds support for an SPI interface to the
+	  STMicroelectronics NFC NCI chips familly.
+	  Select this if your platform is using the spi bus.
+
+	  If you choose to build a module, it'll be called st-nci_spi.
+	  Say N if unsure.
diff --git a/drivers/nfc/st-nci/Makefile b/drivers/nfc/st-nci/Makefile
index 0df157d..348ce76 100644
--- a/drivers/nfc/st-nci/Makefile
+++ b/drivers/nfc/st-nci/Makefile
@@ -7,3 +7,6 @@
 
 st-nci_i2c-objs = i2c.o
 obj-$(CONFIG_NFC_ST_NCI_I2C) += st-nci_i2c.o
+
+st-nci_spi-objs = spi.o
+obj-$(CONFIG_NFC_ST_NCI_SPI) += st-nci_spi.o
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 06175ce..707ed2e 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -25,15 +25,15 @@
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/nfc.h>
-#include <linux/platform_data/st_nci.h>
+#include <linux/platform_data/st-nci.h>
 
 #include "ndlc.h"
 
-#define DRIVER_DESC "NCI NFC driver for ST21NFCB"
+#define DRIVER_DESC "NCI NFC driver for ST_NCI"
 
 /* ndlc header */
-#define ST21NFCB_FRAME_HEADROOM	1
-#define ST21NFCB_FRAME_TAILROOM 0
+#define ST_NCI_FRAME_HEADROOM	1
+#define ST_NCI_FRAME_TAILROOM 0
 
 #define ST_NCI_I2C_MIN_SIZE 4   /* PCB(1) + NCI Packet header(3) */
 #define ST_NCI_I2C_MAX_SIZE 250 /* req 4.2.1 */
@@ -118,15 +118,10 @@
 /*
  * Reads an ndlc frame and returns it in a newly allocated sk_buff.
  * returns:
- * frame size : if received frame is complete (find ST21NFCB_SOF_EOF at
- * end of read)
- * -EAGAIN : if received frame is incomplete (not find ST21NFCB_SOF_EOF
- * at end of read)
+ * 0 : if received frame is complete
  * -EREMOTEIO : i2c read error (fatal)
  * -EBADMSG : frame was incorrect and discarded
- * (value returned from st_nci_i2c_repack)
- * -EIO : if no ST21NFCB_SOF_EOF is found after reaching
- * the read length end sequence
+ * -ENOMEM : cannot allocate skb, frame dropped
  */
 static int st_nci_i2c_read(struct st_nci_i2c_phy *phy,
 				 struct sk_buff **skb)
@@ -179,7 +174,7 @@
 /*
  * Reads an ndlc frame from the chip.
  *
- * On ST21NFCB, IRQ goes in idle state when read starts.
+ * On ST_NCI, IRQ goes in idle state when read starts.
  */
 static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
 {
@@ -325,12 +320,12 @@
 		}
 	} else {
 		nfc_err(&client->dev,
-			"st21nfcb platform resources not available\n");
+			"st_nci platform resources not available\n");
 		return -ENODEV;
 	}
 
 	r = ndlc_probe(phy, &i2c_phy_ops, &client->dev,
-			ST21NFCB_FRAME_HEADROOM, ST21NFCB_FRAME_TAILROOM,
+			ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM,
 			&phy->ndlc);
 	if (r < 0) {
 		nfc_err(&client->dev, "Unable to register ndlc layer\n");
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index 56c6a4c..d2cf84e 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -171,6 +171,8 @@
 		if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_SUPERVISOR) {
 			switch (pcb & PCB_SYNC_MASK) {
 			case PCB_SYNC_ACK:
+				skb = skb_dequeue(&ndlc->ack_pending_q);
+				kfree_skb(skb);
 				del_timer_sync(&ndlc->t1_timer);
 				del_timer_sync(&ndlc->t2_timer);
 				ndlc->t2_active = false;
@@ -192,12 +194,13 @@
 					  msecs_to_jiffies(NDLC_TIMER_T1_WAIT));
 				break;
 			default:
-				pr_err("UNKNOWN Packet Control Byte=%d\n", pcb);
 				kfree_skb(skb);
 				break;
 			}
-		} else {
+		} else if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_DATAFRAME) {
 			nci_recv_frame(ndlc->ndev, skb);
+		} else {
+			kfree_skb(skb);
 		}
 	}
 }
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
new file mode 100644
index 0000000..598a58c
--- /dev/null
+++ b/drivers/nfc/st-nci/spi.c
@@ -0,0 +1,392 @@
+/*
+ * SPI Link Layer for ST NCI based Driver
+ * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/nfc.h>
+#include <linux/platform_data/st-nci.h>
+
+#include "ndlc.h"
+
+#define DRIVER_DESC "NCI NFC driver for ST_NCI"
+
+/* ndlc header */
+#define ST_NCI_FRAME_HEADROOM	1
+#define ST_NCI_FRAME_TAILROOM 0
+
+#define ST_NCI_SPI_MIN_SIZE 4   /* PCB(1) + NCI Packet header(3) */
+#define ST_NCI_SPI_MAX_SIZE 250 /* req 4.2.1 */
+
+#define ST_NCI_SPI_DRIVER_NAME "st_nci_spi"
+
+static struct spi_device_id st_nci_spi_id_table[] = {
+	{ST_NCI_SPI_DRIVER_NAME, 0},
+	{}
+};
+MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
+
+struct st_nci_spi_phy {
+	struct spi_device *spi_dev;
+	struct llt_ndlc *ndlc;
+
+	unsigned int gpio_reset;
+	unsigned int irq_polarity;
+};
+
+#define SPI_DUMP_SKB(info, skb)					\
+do {								\
+	pr_debug("%s:\n", info);				\
+	print_hex_dump(KERN_DEBUG, "spi: ", DUMP_PREFIX_OFFSET,	\
+		       16, 1, (skb)->data, (skb)->len, 0);	\
+} while (0)
+
+static int st_nci_spi_enable(void *phy_id)
+{
+	struct st_nci_spi_phy *phy = phy_id;
+
+	gpio_set_value(phy->gpio_reset, 0);
+	usleep_range(10000, 15000);
+	gpio_set_value(phy->gpio_reset, 1);
+	usleep_range(80000, 85000);
+
+	if (phy->ndlc->powered == 0)
+		enable_irq(phy->spi_dev->irq);
+
+	return 0;
+}
+
+static void st_nci_spi_disable(void *phy_id)
+{
+	struct st_nci_spi_phy *phy = phy_id;
+
+	disable_irq_nosync(phy->spi_dev->irq);
+}
+
+/*
+ * Writing a frame must not return the number of written bytes.
+ * It must return either zero for success, or <0 for error.
+ * In addition, it must not alter the skb
+ */
+static int st_nci_spi_write(void *phy_id, struct sk_buff *skb)
+{
+	int r;
+	struct st_nci_spi_phy *phy = phy_id;
+	struct spi_device *dev = phy->spi_dev;
+	struct sk_buff *skb_rx;
+	u8 buf[ST_NCI_SPI_MAX_SIZE];
+	struct spi_transfer spi_xfer = {
+		.tx_buf = skb->data,
+		.rx_buf = buf,
+		.len = skb->len,
+	};
+
+	SPI_DUMP_SKB("st_nci_spi_write", skb);
+
+	if (phy->ndlc->hard_fault != 0)
+		return phy->ndlc->hard_fault;
+
+	r = spi_sync_transfer(dev, &spi_xfer, 1);
+	/*
+	 * We may have received some valuable data on miso line.
+	 * Send them back in the ndlc state machine.
+	 */
+	if (!r) {
+		skb_rx = alloc_skb(skb->len, GFP_KERNEL);
+		if (!skb_rx) {
+			r = -ENOMEM;
+			goto exit;
+		}
+
+		skb_put(skb_rx, skb->len);
+		memcpy(skb_rx->data, buf, skb->len);
+		ndlc_recv(phy->ndlc, skb_rx);
+	}
+
+exit:
+	return r;
+}
+
+/*
+ * Reads an ndlc frame and returns it in a newly allocated sk_buff.
+ * returns:
+ * 0 : if received frame is complete
+ * -EREMOTEIO : i2c read error (fatal)
+ * -EBADMSG : frame was incorrect and discarded
+ * -ENOMEM : cannot allocate skb, frame dropped
+ */
+static int st_nci_spi_read(struct st_nci_spi_phy *phy,
+			struct sk_buff **skb)
+{
+	int r;
+	u8 len;
+	u8 buf[ST_NCI_SPI_MAX_SIZE];
+	struct spi_device *dev = phy->spi_dev;
+	struct spi_transfer spi_xfer = {
+		.rx_buf = buf,
+		.len = ST_NCI_SPI_MIN_SIZE,
+	};
+
+	r = spi_sync_transfer(dev, &spi_xfer, 1);
+	if (r < 0)
+		return -EREMOTEIO;
+
+	len = be16_to_cpu(*(__be16 *) (buf + 2));
+	if (len > ST_NCI_SPI_MAX_SIZE) {
+		nfc_err(&dev->dev, "invalid frame len\n");
+		phy->ndlc->hard_fault = 1;
+		return -EBADMSG;
+	}
+
+	*skb = alloc_skb(ST_NCI_SPI_MIN_SIZE + len, GFP_KERNEL);
+	if (*skb == NULL)
+		return -ENOMEM;
+
+	skb_reserve(*skb, ST_NCI_SPI_MIN_SIZE);
+	skb_put(*skb, ST_NCI_SPI_MIN_SIZE);
+	memcpy((*skb)->data, buf, ST_NCI_SPI_MIN_SIZE);
+
+	if (!len)
+		return 0;
+
+	spi_xfer.len = len;
+	r = spi_sync_transfer(dev, &spi_xfer, 1);
+	if (r < 0) {
+		kfree_skb(*skb);
+		return -EREMOTEIO;
+	}
+
+	skb_put(*skb, len);
+	memcpy((*skb)->data + ST_NCI_SPI_MIN_SIZE, buf, len);
+
+	SPI_DUMP_SKB("spi frame read", *skb);
+
+	return 0;
+}
+
+/*
+ * Reads an ndlc frame from the chip.
+ *
+ * On ST21NFCB, IRQ goes in idle state when read starts.
+ */
+static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
+{
+	struct st_nci_spi_phy *phy = phy_id;
+	struct spi_device *dev;
+	struct sk_buff *skb = NULL;
+	int r;
+
+	if (!phy || !phy->ndlc || irq != phy->spi_dev->irq) {
+		WARN_ON_ONCE(1);
+		return IRQ_NONE;
+	}
+
+	dev = phy->spi_dev;
+	dev_dbg(&dev->dev, "IRQ\n");
+
+	if (phy->ndlc->hard_fault)
+		return IRQ_HANDLED;
+
+	if (!phy->ndlc->powered) {
+		st_nci_spi_disable(phy);
+		return IRQ_HANDLED;
+	}
+
+	r = st_nci_spi_read(phy, &skb);
+	if (r == -EREMOTEIO || r == -ENOMEM || r == -EBADMSG)
+		return IRQ_HANDLED;
+
+	ndlc_recv(phy->ndlc, skb);
+
+	return IRQ_HANDLED;
+}
+
+static struct nfc_phy_ops spi_phy_ops = {
+	.write = st_nci_spi_write,
+	.enable = st_nci_spi_enable,
+	.disable = st_nci_spi_disable,
+};
+
+#ifdef CONFIG_OF
+static int st_nci_spi_of_request_resources(struct spi_device *dev)
+{
+	struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
+	struct device_node *pp;
+	int gpio;
+	int r;
+
+	pp = dev->dev.of_node;
+	if (!pp)
+		return -ENODEV;
+
+	/* Get GPIO from device tree */
+	gpio = of_get_named_gpio(pp, "reset-gpios", 0);
+	if (gpio < 0) {
+		nfc_err(&dev->dev,
+			"Failed to retrieve reset-gpios from device tree\n");
+		return gpio;
+	}
+
+	/* GPIO request and configuration */
+	r = devm_gpio_request_one(&dev->dev, gpio,
+				GPIOF_OUT_INIT_HIGH, "clf_reset");
+	if (r) {
+		nfc_err(&dev->dev, "Failed to request reset pin\n");
+		return r;
+	}
+	phy->gpio_reset = gpio;
+
+	phy->irq_polarity = irq_get_trigger_type(dev->irq);
+
+	return 0;
+}
+#else
+static int st_nci_spi_of_request_resources(struct spi_device *dev)
+{
+	return -ENODEV;
+}
+#endif
+
+static int st_nci_spi_request_resources(struct spi_device *dev)
+{
+	struct st_nci_nfc_platform_data *pdata;
+	struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
+	int r;
+
+	pdata = dev->dev.platform_data;
+	if (pdata == NULL) {
+		nfc_err(&dev->dev, "No platform data\n");
+		return -EINVAL;
+	}
+
+	/* store for later use */
+	phy->gpio_reset = pdata->gpio_reset;
+	phy->irq_polarity = pdata->irq_polarity;
+
+	r = devm_gpio_request_one(&dev->dev,
+			phy->gpio_reset, GPIOF_OUT_INIT_HIGH, "clf_reset");
+	if (r) {
+		pr_err("%s : reset gpio_request failed\n", __FILE__);
+		return r;
+	}
+
+	return 0;
+}
+
+static int st_nci_spi_probe(struct spi_device *dev)
+{
+	struct st_nci_spi_phy *phy;
+	struct st_nci_nfc_platform_data *pdata;
+	int r;
+
+	dev_dbg(&dev->dev, "%s\n", __func__);
+	dev_dbg(&dev->dev, "IRQ: %d\n", dev->irq);
+
+	/* Check SPI platform functionnalities */
+	if (!dev) {
+		pr_debug("%s: dev is NULL. Device is not accessible.\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	phy = devm_kzalloc(&dev->dev, sizeof(struct st_nci_spi_phy),
+			   GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	phy->spi_dev = dev;
+
+	spi_set_drvdata(dev, phy);
+
+	pdata = dev->dev.platform_data;
+	if (!pdata && dev->dev.of_node) {
+		r = st_nci_spi_of_request_resources(dev);
+		if (r) {
+			nfc_err(&dev->dev, "No platform data\n");
+			return r;
+		}
+	} else if (pdata) {
+		r = st_nci_spi_request_resources(dev);
+		if (r) {
+			nfc_err(&dev->dev,
+				"Cannot get platform resources\n");
+			return r;
+		}
+	} else {
+		nfc_err(&dev->dev,
+			"st_nci platform resources not available\n");
+		return -ENODEV;
+	}
+
+	r = ndlc_probe(phy, &spi_phy_ops, &dev->dev,
+			ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM,
+			&phy->ndlc);
+	if (r < 0) {
+		nfc_err(&dev->dev, "Unable to register ndlc layer\n");
+		return r;
+	}
+
+	r = devm_request_threaded_irq(&dev->dev, dev->irq, NULL,
+				st_nci_irq_thread_fn,
+				phy->irq_polarity | IRQF_ONESHOT,
+				ST_NCI_SPI_DRIVER_NAME, phy);
+	if (r < 0)
+		nfc_err(&dev->dev, "Unable to register IRQ handler\n");
+
+	return r;
+}
+
+static int st_nci_spi_remove(struct spi_device *dev)
+{
+	struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
+
+	dev_dbg(&dev->dev, "%s\n", __func__);
+
+	ndlc_remove(phy->ndlc);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_st_nci_spi_match[] = {
+	{ .compatible = "st,st21nfcb-spi", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, of_st_nci_spi_match);
+#endif
+
+static struct spi_driver st_nci_spi_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = ST_NCI_SPI_DRIVER_NAME,
+		.of_match_table = of_match_ptr(of_st_nci_spi_match),
+	},
+	.probe = st_nci_spi_probe,
+	.id_table = st_nci_spi_id_table,
+	.remove = st_nci_spi_remove,
+};
+
+module_spi_driver(st_nci_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/nfc/st-nci/st-nci_se.c b/drivers/nfc/st-nci/st-nci_se.c
index 97addfa..c742ef6 100644
--- a/drivers/nfc/st-nci/st-nci_se.c
+++ b/drivers/nfc/st-nci/st-nci_se.c
@@ -189,14 +189,14 @@
 				ST_NCI_DEVICE_MGNT_GATE,
 				ST_NCI_DEVICE_MGNT_PIPE);
 	if (r < 0)
-		goto free_info;
+		return r;
 
 	/* Get pipe list */
 	r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE,
 			ST_NCI_DM_GETINFO, pipe_list, sizeof(pipe_list),
 			&skb_pipe_list);
 	if (r < 0)
-		goto free_info;
+		return r;
 
 	/* Complete the existing gate_pipe table */
 	for (i = 0; i < skb_pipe_list->len; i++) {
@@ -222,6 +222,7 @@
 		    dm_pipe_info->src_host_id != ST_NCI_ESE_HOST_ID) {
 			pr_err("Unexpected apdu_reader pipe on host %x\n",
 			       dm_pipe_info->src_host_id);
+			kfree_skb(skb_pipe_info);
 			continue;
 		}
 
@@ -241,13 +242,12 @@
 			ndev->hci_dev->pipes[st_nci_gates[j].pipe].host =
 						dm_pipe_info->src_host_id;
 		}
+		kfree_skb(skb_pipe_info);
 	}
 
 	memcpy(ndev->hci_dev->init_data.gates, st_nci_gates,
 	       sizeof(st_nci_gates));
 
-free_info:
-	kfree_skb(skb_pipe_info);
 	kfree_skb(skb_pipe_list);
 	return r;
 }
diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
index d251f72..0512865 100644
--- a/drivers/nfc/st21nfca/st21nfca.c
+++ b/drivers/nfc/st21nfca/st21nfca.c
@@ -148,14 +148,14 @@
 				ST21NFCA_DEVICE_MGNT_GATE,
 				ST21NFCA_DEVICE_MGNT_PIPE);
 	if (r < 0)
-		goto free_info;
+		return r;
 
 	/* Get pipe list */
 	r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
 			ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
 			&skb_pipe_list);
 	if (r < 0)
-		goto free_info;
+		return r;
 
 	/* Complete the existing gate_pipe table */
 	for (i = 0; i < skb_pipe_list->len; i++) {
@@ -181,6 +181,7 @@
 			info->src_host_id != ST21NFCA_ESE_HOST_ID) {
 			pr_err("Unexpected apdu_reader pipe on host %x\n",
 				info->src_host_id);
+			kfree_skb(skb_pipe_info);
 			continue;
 		}
 
@@ -200,6 +201,7 @@
 			hdev->pipes[st21nfca_gates[j].pipe].dest_host =
 							info->src_host_id;
 		}
+		kfree_skb(skb_pipe_info);
 	}
 
 	/*
@@ -214,13 +216,12 @@
 					st21nfca_gates[i].gate,
 					st21nfca_gates[i].pipe);
 			if (r < 0)
-				goto free_info;
+				goto free_list;
 		}
 	}
 
 	memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
-free_info:
-	kfree_skb(skb_pipe_info);
+free_list:
 	kfree_skb(skb_pipe_list);
 	return r;
 }
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 85b4d86..70b0707 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -336,7 +336,7 @@
 
 #define TRF7970A_NFC_TARGET_LEVEL_RFDET(v)	((v) & 0x07)
 #define TRF7970A_NFC_TARGET_LEVEL_HI_RF		BIT(3)
-#define TRF7970A_NFC_TARGET_LEVEL_SDD_EN	BIT(3)
+#define TRF7970A_NFC_TARGET_LEVEL_SDD_EN	BIT(5)
 #define TRF7970A_NFC_TARGET_LEVEL_LD_S_4BYTES	(0x0 << 6)
 #define TRF7970A_NFC_TARGET_LEVEL_LD_S_7BYTES	(0x1 << 6)
 #define TRF7970A_NFC_TARGET_LEVEL_LD_S_10BYTES	(0x2 << 6)
@@ -629,7 +629,9 @@
 	}
 
 	if (trf->adjust_resp_len) {
-		skb_trim(trf->rx_skb, trf->rx_skb->len - 1);
+		if (trf->rx_skb)
+			skb_trim(trf->rx_skb, trf->rx_skb->len - 1);
+
 		trf->adjust_resp_len = false;
 	}
 
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 4f97b24..0df77cb 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -180,7 +180,7 @@
 	 * another kernel subsystem, and we just pass it through.
 	 */
 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-		err = -EIO;
+		bio->bi_error = -EIO;
 		goto out;
 	}
 
@@ -199,6 +199,7 @@
 					"io error in %s sector %lld, len %d,\n",
 					(rw == READ) ? "READ" : "WRITE",
 					(unsigned long long) iter.bi_sector, len);
+			bio->bi_error = err;
 			break;
 		}
 	}
@@ -206,7 +207,7 @@
 		nd_iostat_end(bio, start);
 
  out:
-	bio_endio(bio, err);
+	bio_endio(bio);
 }
 
 static int nd_blk_rw_bytes(struct nd_namespace_common *ndns,
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 411c7b2..341202e 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1189,7 +1189,7 @@
 	 * another kernel subsystem, and we just pass it through.
 	 */
 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-		err = -EIO;
+		bio->bi_error = -EIO;
 		goto out;
 	}
 
@@ -1211,6 +1211,7 @@
 					"io error in %s sector %lld, len %d,\n",
 					(rw == READ) ? "READ" : "WRITE",
 					(unsigned long long) iter.bi_sector, len);
+			bio->bi_error = err;
 			break;
 		}
 	}
@@ -1218,7 +1219,7 @@
 		nd_iostat_end(bio, start);
 
 out:
-	bio_endio(bio, err);
+	bio_endio(bio);
 }
 
 static int btt_rw_page(struct block_device *bdev, sector_t sector,
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ade9eb9..4c079d5 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -77,7 +77,7 @@
 	if (bio_data_dir(bio))
 		wmb_pmem();
 
-	bio_endio(bio, 0);
+	bio_endio(bio);
 }
 
 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
new file mode 100644
index 0000000..8db2978
--- /dev/null
+++ b/drivers/nvmem/Kconfig
@@ -0,0 +1,39 @@
+menuconfig NVMEM
+	tristate "NVMEM Support"
+	select REGMAP
+	help
+	  Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES...
+
+	  This framework is designed to provide a generic interface to NVMEM
+	  from both the Linux Kernel and the userspace.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called nvmem_core.
+
+	  If unsure, say no.
+
+if NVMEM
+
+config QCOM_QFPROM
+	tristate "QCOM QFPROM Support"
+	depends on ARCH_QCOM || COMPILE_TEST
+	select REGMAP_MMIO
+	help
+	  Say y here to enable QFPROM support. The QFPROM provides access
+	  functions for QFPROM data to rest of the drivers via nvmem interface.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called nvmem_qfprom.
+
+config NVMEM_SUNXI_SID
+	tristate "Allwinner SoCs SID support"
+	depends on ARCH_SUNXI
+	select REGMAP_MMIO
+	help
+	  This is a driver for the 'security ID' available on various Allwinner
+	  devices.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called nvmem_sunxi_sid.
+
+endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
new file mode 100644
index 0000000..4328b93
--- /dev/null
+++ b/drivers/nvmem/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for nvmem drivers.
+#
+
+obj-$(CONFIG_NVMEM)		+= nvmem_core.o
+nvmem_core-y			:= core.o
+
+# Devices
+obj-$(CONFIG_QCOM_QFPROM)	+= nvmem_qfprom.o
+nvmem_qfprom-y			:= qfprom.o
+obj-$(CONFIG_NVMEM_SUNXI_SID)	+= nvmem_sunxi_sid.o
+nvmem_sunxi_sid-y		:= sunxi_sid.o
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
new file mode 100644
index 0000000..d3c6676
--- /dev/null
+++ b/drivers/nvmem/core.c
@@ -0,0 +1,1083 @@
+/*
+ * nvmem framework core.
+ *
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+struct nvmem_device {
+	const char		*name;
+	struct regmap		*regmap;
+	struct module		*owner;
+	struct device		dev;
+	int			stride;
+	int			word_size;
+	int			ncells;
+	int			id;
+	int			users;
+	size_t			size;
+	bool			read_only;
+};
+
+struct nvmem_cell {
+	const char		*name;
+	int			offset;
+	int			bytes;
+	int			bit_offset;
+	int			nbits;
+	struct nvmem_device	*nvmem;
+	struct list_head	node;
+};
+
+static DEFINE_MUTEX(nvmem_mutex);
+static DEFINE_IDA(nvmem_ida);
+
+static LIST_HEAD(nvmem_cells);
+static DEFINE_MUTEX(nvmem_cells_mutex);
+
+#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
+
+static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
+				    struct bin_attribute *attr,
+				    char *buf, loff_t pos, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct nvmem_device *nvmem = to_nvmem_device(dev);
+	int rc;
+
+	/* Stop the user from reading */
+	if (pos > nvmem->size)
+		return 0;
+
+	if (pos + count > nvmem->size)
+		count = nvmem->size - pos;
+
+	count = round_down(count, nvmem->word_size);
+
+	rc = regmap_raw_read(nvmem->regmap, pos, buf, count);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	return count;
+}
+
+static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
+				     struct bin_attribute *attr,
+				     char *buf, loff_t pos, size_t count)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct nvmem_device *nvmem = to_nvmem_device(dev);
+	int rc;
+
+	/* Stop the user from writing */
+	if (pos > nvmem->size)
+		return 0;
+
+	if (pos + count > nvmem->size)
+		count = nvmem->size - pos;
+
+	count = round_down(count, nvmem->word_size);
+
+	rc = regmap_raw_write(nvmem->regmap, pos, buf, count);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	return count;
+}
+
+/* default read/write permissions */
+static struct bin_attribute bin_attr_rw_nvmem = {
+	.attr	= {
+		.name	= "nvmem",
+		.mode	= S_IWUSR | S_IRUGO,
+	},
+	.read	= bin_attr_nvmem_read,
+	.write	= bin_attr_nvmem_write,
+};
+
+static struct bin_attribute *nvmem_bin_rw_attributes[] = {
+	&bin_attr_rw_nvmem,
+	NULL,
+};
+
+static const struct attribute_group nvmem_bin_rw_group = {
+	.bin_attrs	= nvmem_bin_rw_attributes,
+};
+
+static const struct attribute_group *nvmem_rw_dev_groups[] = {
+	&nvmem_bin_rw_group,
+	NULL,
+};
+
+/* read only permission */
+static struct bin_attribute bin_attr_ro_nvmem = {
+	.attr	= {
+		.name	= "nvmem",
+		.mode	= S_IRUGO,
+	},
+	.read	= bin_attr_nvmem_read,
+};
+
+static struct bin_attribute *nvmem_bin_ro_attributes[] = {
+	&bin_attr_ro_nvmem,
+	NULL,
+};
+
+static const struct attribute_group nvmem_bin_ro_group = {
+	.bin_attrs	= nvmem_bin_ro_attributes,
+};
+
+static const struct attribute_group *nvmem_ro_dev_groups[] = {
+	&nvmem_bin_ro_group,
+	NULL,
+};
+
+static void nvmem_release(struct device *dev)
+{
+	struct nvmem_device *nvmem = to_nvmem_device(dev);
+
+	ida_simple_remove(&nvmem_ida, nvmem->id);
+	kfree(nvmem);
+}
+
+static const struct device_type nvmem_provider_type = {
+	.release	= nvmem_release,
+};
+
+static struct bus_type nvmem_bus_type = {
+	.name		= "nvmem",
+};
+
+static int of_nvmem_match(struct device *dev, void *nvmem_np)
+{
+	return dev->of_node == nvmem_np;
+}
+
+static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
+{
+	struct device *d;
+
+	if (!nvmem_np)
+		return NULL;
+
+	d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
+
+	if (!d)
+		return NULL;
+
+	return to_nvmem_device(d);
+}
+
+static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
+{
+	struct nvmem_cell *p;
+
+	list_for_each_entry(p, &nvmem_cells, node)
+		if (p && !strcmp(p->name, cell_id))
+			return p;
+
+	return NULL;
+}
+
+static void nvmem_cell_drop(struct nvmem_cell *cell)
+{
+	mutex_lock(&nvmem_cells_mutex);
+	list_del(&cell->node);
+	mutex_unlock(&nvmem_cells_mutex);
+	kfree(cell);
+}
+
+static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
+{
+	struct nvmem_cell *cell;
+	struct list_head *p, *n;
+
+	list_for_each_safe(p, n, &nvmem_cells) {
+		cell = list_entry(p, struct nvmem_cell, node);
+		if (cell->nvmem == nvmem)
+			nvmem_cell_drop(cell);
+	}
+}
+
+static void nvmem_cell_add(struct nvmem_cell *cell)
+{
+	mutex_lock(&nvmem_cells_mutex);
+	list_add_tail(&cell->node, &nvmem_cells);
+	mutex_unlock(&nvmem_cells_mutex);
+}
+
+static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
+				   const struct nvmem_cell_info *info,
+				   struct nvmem_cell *cell)
+{
+	cell->nvmem = nvmem;
+	cell->offset = info->offset;
+	cell->bytes = info->bytes;
+	cell->name = info->name;
+
+	cell->bit_offset = info->bit_offset;
+	cell->nbits = info->nbits;
+
+	if (cell->nbits)
+		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
+					   BITS_PER_BYTE);
+
+	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
+		dev_err(&nvmem->dev,
+			"cell %s unaligned to nvmem stride %d\n",
+			cell->name, nvmem->stride);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int nvmem_add_cells(struct nvmem_device *nvmem,
+			   const struct nvmem_config *cfg)
+{
+	struct nvmem_cell **cells;
+	const struct nvmem_cell_info *info = cfg->cells;
+	int i, rval;
+
+	cells = kcalloc(cfg->ncells, sizeof(*cells), GFP_KERNEL);
+	if (!cells)
+		return -ENOMEM;
+
+	for (i = 0; i < cfg->ncells; i++) {
+		cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
+		if (!cells[i]) {
+			rval = -ENOMEM;
+			goto err;
+		}
+
+		rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
+		if (IS_ERR_VALUE(rval)) {
+			kfree(cells[i]);
+			goto err;
+		}
+
+		nvmem_cell_add(cells[i]);
+	}
+
+	nvmem->ncells = cfg->ncells;
+	/* remove tmp array */
+	kfree(cells);
+
+	return 0;
+err:
+	while (--i)
+		nvmem_cell_drop(cells[i]);
+
+	return rval;
+}
+
+/**
+ * nvmem_register() - Register a nvmem device for given nvmem_config.
+ * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
+ *
+ * @config: nvmem device configuration with which nvmem device is created.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
+ * on success.
+ */
+
+struct nvmem_device *nvmem_register(const struct nvmem_config *config)
+{
+	struct nvmem_device *nvmem;
+	struct device_node *np;
+	struct regmap *rm;
+	int rval;
+
+	if (!config->dev)
+		return ERR_PTR(-EINVAL);
+
+	rm = dev_get_regmap(config->dev, NULL);
+	if (!rm) {
+		dev_err(config->dev, "Regmap not found\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
+	if (!nvmem)
+		return ERR_PTR(-ENOMEM);
+
+	rval  = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
+	if (rval < 0) {
+		kfree(nvmem);
+		return ERR_PTR(rval);
+	}
+
+	nvmem->id = rval;
+	nvmem->regmap = rm;
+	nvmem->owner = config->owner;
+	nvmem->stride = regmap_get_reg_stride(rm);
+	nvmem->word_size = regmap_get_val_bytes(rm);
+	nvmem->size = regmap_get_max_register(rm) + nvmem->stride;
+	nvmem->dev.type = &nvmem_provider_type;
+	nvmem->dev.bus = &nvmem_bus_type;
+	nvmem->dev.parent = config->dev;
+	np = config->dev->of_node;
+	nvmem->dev.of_node = np;
+	dev_set_name(&nvmem->dev, "%s%d",
+		     config->name ? : "nvmem", config->id);
+
+	nvmem->read_only = of_property_read_bool(np, "read-only") |
+			   config->read_only;
+
+	nvmem->dev.groups = nvmem->read_only ? nvmem_ro_dev_groups :
+					       nvmem_rw_dev_groups;
+
+	device_initialize(&nvmem->dev);
+
+	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
+
+	rval = device_add(&nvmem->dev);
+	if (rval) {
+		ida_simple_remove(&nvmem_ida, nvmem->id);
+		kfree(nvmem);
+		return ERR_PTR(rval);
+	}
+
+	if (config->cells)
+		nvmem_add_cells(nvmem, config);
+
+	return nvmem;
+}
+EXPORT_SYMBOL_GPL(nvmem_register);
+
+/**
+ * nvmem_unregister() - Unregister previously registered nvmem device
+ *
+ * @nvmem: Pointer to previously registered nvmem device.
+ *
+ * Return: Will be an negative on error or a zero on success.
+ */
+int nvmem_unregister(struct nvmem_device *nvmem)
+{
+	mutex_lock(&nvmem_mutex);
+	if (nvmem->users) {
+		mutex_unlock(&nvmem_mutex);
+		return -EBUSY;
+	}
+	mutex_unlock(&nvmem_mutex);
+
+	nvmem_device_remove_all_cells(nvmem);
+	device_del(&nvmem->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nvmem_unregister);
+
+static struct nvmem_device *__nvmem_device_get(struct device_node *np,
+					       struct nvmem_cell **cellp,
+					       const char *cell_id)
+{
+	struct nvmem_device *nvmem = NULL;
+
+	mutex_lock(&nvmem_mutex);
+
+	if (np) {
+		nvmem = of_nvmem_find(np);
+		if (!nvmem) {
+			mutex_unlock(&nvmem_mutex);
+			return ERR_PTR(-EPROBE_DEFER);
+		}
+	} else {
+		struct nvmem_cell *cell = nvmem_find_cell(cell_id);
+
+		if (cell) {
+			nvmem = cell->nvmem;
+			*cellp = cell;
+		}
+
+		if (!nvmem) {
+			mutex_unlock(&nvmem_mutex);
+			return ERR_PTR(-ENOENT);
+		}
+	}
+
+	nvmem->users++;
+	mutex_unlock(&nvmem_mutex);
+
+	if (!try_module_get(nvmem->owner)) {
+		dev_err(&nvmem->dev,
+			"could not increase module refcount for cell %s\n",
+			nvmem->name);
+
+		mutex_lock(&nvmem_mutex);
+		nvmem->users--;
+		mutex_unlock(&nvmem_mutex);
+
+		return ERR_PTR(-EINVAL);
+	}
+
+	return nvmem;
+}
+
+static void __nvmem_device_put(struct nvmem_device *nvmem)
+{
+	module_put(nvmem->owner);
+	mutex_lock(&nvmem_mutex);
+	nvmem->users--;
+	mutex_unlock(&nvmem_mutex);
+}
+
+static int nvmem_match(struct device *dev, void *data)
+{
+	return !strcmp(dev_name(dev), data);
+}
+
+static struct nvmem_device *nvmem_find(const char *name)
+{
+	struct device *d;
+
+	d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match);
+
+	if (!d)
+		return NULL;
+
+	return to_nvmem_device(d);
+}
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+/**
+ * of_nvmem_device_get() - Get nvmem device from a given id
+ *
+ * @dev node: Device tree node that uses the nvmem device
+ * @id: nvmem name from nvmem-names property.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success.
+ */
+struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
+{
+
+	struct device_node *nvmem_np;
+	int index;
+
+	index = of_property_match_string(np, "nvmem-names", id);
+
+	nvmem_np = of_parse_phandle(np, "nvmem", index);
+	if (!nvmem_np)
+		return ERR_PTR(-EINVAL);
+
+	return __nvmem_device_get(nvmem_np, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(of_nvmem_device_get);
+#endif
+
+/**
+ * nvmem_device_get() - Get nvmem device from a given id
+ *
+ * @dev : Device that uses the nvmem device
+ * @id: nvmem name from nvmem-names property.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
+ * on success.
+ */
+struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
+{
+	if (dev->of_node) { /* try dt first */
+		struct nvmem_device *nvmem;
+
+		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
+
+		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
+			return nvmem;
+
+	}
+
+	return nvmem_find(dev_name);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_get);
+
+static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
+{
+	struct nvmem_device **nvmem = res;
+
+	if (WARN_ON(!nvmem || !*nvmem))
+		return 0;
+
+	return *nvmem == data;
+}
+
+static void devm_nvmem_device_release(struct device *dev, void *res)
+{
+	nvmem_device_put(*(struct nvmem_device **)res);
+}
+
+/**
+ * devm_nvmem_device_put() - put alredy got nvmem device
+ *
+ * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
+ * that needs to be released.
+ */
+void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
+{
+	int ret;
+
+	ret = devres_release(dev, devm_nvmem_device_release,
+			     devm_nvmem_device_match, nvmem);
+
+	WARN_ON(ret);
+}
+EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
+
+/**
+ * nvmem_device_put() - put alredy got nvmem device
+ *
+ * @nvmem: pointer to nvmem device that needs to be released.
+ */
+void nvmem_device_put(struct nvmem_device *nvmem)
+{
+	__nvmem_device_put(nvmem);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_put);
+
+/**
+ * devm_nvmem_device_get() - Get nvmem cell of device form a given id
+ *
+ * @dev node: Device tree node that uses the nvmem cell
+ * @id: nvmem name in nvmems property.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
+ * on success.  The nvmem_cell will be freed by the automatically once the
+ * device is freed.
+ */
+struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
+{
+	struct nvmem_device **ptr, *nvmem;
+
+	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	nvmem = nvmem_device_get(dev, id);
+	if (!IS_ERR(nvmem)) {
+		*ptr = nvmem;
+		devres_add(dev, ptr);
+	} else {
+		devres_free(ptr);
+	}
+
+	return nvmem;
+}
+EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
+
+static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
+{
+	struct nvmem_cell *cell = NULL;
+	struct nvmem_device *nvmem;
+
+	nvmem = __nvmem_device_get(NULL, &cell, cell_id);
+	if (IS_ERR(nvmem))
+		return ERR_CAST(nvmem);
+
+	return cell;
+}
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+/**
+ * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
+ *
+ * @dev node: Device tree node that uses the nvmem cell
+ * @id: nvmem cell name from nvmem-cell-names property.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer
+ * to a struct nvmem_cell.  The nvmem_cell will be freed by the
+ * nvmem_cell_put().
+ */
+struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
+					    const char *name)
+{
+	struct device_node *cell_np, *nvmem_np;
+	struct nvmem_cell *cell;
+	struct nvmem_device *nvmem;
+	const __be32 *addr;
+	int rval, len, index;
+
+	index = of_property_match_string(np, "nvmem-cell-names", name);
+
+	cell_np = of_parse_phandle(np, "nvmem-cells", index);
+	if (!cell_np)
+		return ERR_PTR(-EINVAL);
+
+	nvmem_np = of_get_next_parent(cell_np);
+	if (!nvmem_np)
+		return ERR_PTR(-EINVAL);
+
+	nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
+	if (IS_ERR(nvmem))
+		return ERR_CAST(nvmem);
+
+	addr = of_get_property(cell_np, "reg", &len);
+	if (!addr || (len < 2 * sizeof(u32))) {
+		dev_err(&nvmem->dev, "nvmem: invalid reg on %s\n",
+			cell_np->full_name);
+		rval  = -EINVAL;
+		goto err_mem;
+	}
+
+	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+	if (!cell) {
+		rval = -ENOMEM;
+		goto err_mem;
+	}
+
+	cell->nvmem = nvmem;
+	cell->offset = be32_to_cpup(addr++);
+	cell->bytes = be32_to_cpup(addr);
+	cell->name = cell_np->name;
+
+	addr = of_get_property(cell_np, "bits", &len);
+	if (addr && len == (2 * sizeof(u32))) {
+		cell->bit_offset = be32_to_cpup(addr++);
+		cell->nbits = be32_to_cpup(addr);
+	}
+
+	if (cell->nbits)
+		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
+					   BITS_PER_BYTE);
+
+	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
+			dev_err(&nvmem->dev,
+				"cell %s unaligned to nvmem stride %d\n",
+				cell->name, nvmem->stride);
+		rval  = -EINVAL;
+		goto err_sanity;
+	}
+
+	nvmem_cell_add(cell);
+
+	return cell;
+
+err_sanity:
+	kfree(cell);
+
+err_mem:
+	__nvmem_device_put(nvmem);
+
+	return ERR_PTR(rval);
+}
+EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
+#endif
+
+/**
+ * nvmem_cell_get() - Get nvmem cell of device form a given cell name
+ *
+ * @dev node: Device tree node that uses the nvmem cell
+ * @id: nvmem cell name to get.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer
+ * to a struct nvmem_cell.  The nvmem_cell will be freed by the
+ * nvmem_cell_put().
+ */
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
+{
+	struct nvmem_cell *cell;
+
+	if (dev->of_node) { /* try dt first */
+		cell = of_nvmem_cell_get(dev->of_node, cell_id);
+		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
+			return cell;
+	}
+
+	return nvmem_cell_get_from_list(cell_id);
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_get);
+
+static void devm_nvmem_cell_release(struct device *dev, void *res)
+{
+	nvmem_cell_put(*(struct nvmem_cell **)res);
+}
+
+/**
+ * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
+ *
+ * @dev node: Device tree node that uses the nvmem cell
+ * @id: nvmem id in nvmem-names property.
+ *
+ * Return: Will be an ERR_PTR() on error or a valid pointer
+ * to a struct nvmem_cell.  The nvmem_cell will be freed by the
+ * automatically once the device is freed.
+ */
+struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
+{
+	struct nvmem_cell **ptr, *cell;
+
+	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	cell = nvmem_cell_get(dev, id);
+	if (!IS_ERR(cell)) {
+		*ptr = cell;
+		devres_add(dev, ptr);
+	} else {
+		devres_free(ptr);
+	}
+
+	return cell;
+}
+EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
+
+static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
+{
+	struct nvmem_cell **c = res;
+
+	if (WARN_ON(!c || !*c))
+		return 0;
+
+	return *c == data;
+}
+
+/**
+ * devm_nvmem_cell_put() - Release previously allocated nvmem cell
+ * from devm_nvmem_cell_get.
+ *
+ * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get()
+ */
+void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
+{
+	int ret;
+
+	ret = devres_release(dev, devm_nvmem_cell_release,
+				devm_nvmem_cell_match, cell);
+
+	WARN_ON(ret);
+}
+EXPORT_SYMBOL(devm_nvmem_cell_put);
+
+/**
+ * nvmem_cell_put() - Release previously allocated nvmem cell.
+ *
+ * @cell: Previously allocated nvmem cell by nvmem_cell_get()
+ */
+void nvmem_cell_put(struct nvmem_cell *cell)
+{
+	struct nvmem_device *nvmem = cell->nvmem;
+
+	__nvmem_device_put(nvmem);
+	nvmem_cell_drop(cell);
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_put);
+
+static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
+						    void *buf)
+{
+	u8 *p, *b;
+	int i, bit_offset = cell->bit_offset;
+
+	p = b = buf;
+	if (bit_offset) {
+		/* First shift */
+		*b++ >>= bit_offset;
+
+		/* setup rest of the bytes if any */
+		for (i = 1; i < cell->bytes; i++) {
+			/* Get bits from next byte and shift them towards msb */
+			*p |= *b << (BITS_PER_BYTE - bit_offset);
+
+			p = b;
+			*b++ >>= bit_offset;
+		}
+
+		/* result fits in less bytes */
+		if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
+			*p-- = 0;
+	}
+	/* clear msb bits if any leftover in the last byte */
+	*p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
+}
+
+static int __nvmem_cell_read(struct nvmem_device *nvmem,
+		      struct nvmem_cell *cell,
+		      void *buf, size_t *len)
+{
+	int rc;
+
+	rc = regmap_raw_read(nvmem->regmap, cell->offset, buf, cell->bytes);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	/* shift bits in-place */
+	if (cell->bit_offset || cell->bit_offset)
+		nvmem_shift_read_buffer_in_place(cell, buf);
+
+	*len = cell->bytes;
+
+	return 0;
+}
+
+/**
+ * nvmem_cell_read() - Read a given nvmem cell
+ *
+ * @cell: nvmem cell to be read.
+ * @len: pointer to length of cell which will be populated on successful read.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a char * buffer on success.
+ * The buffer should be freed by the consumer with a kfree().
+ */
+void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
+{
+	struct nvmem_device *nvmem = cell->nvmem;
+	u8 *buf;
+	int rc;
+
+	if (!nvmem || !nvmem->regmap)
+		return ERR_PTR(-EINVAL);
+
+	buf = kzalloc(cell->bytes, GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	rc = __nvmem_cell_read(nvmem, cell, buf, len);
+	if (IS_ERR_VALUE(rc)) {
+		kfree(buf);
+		return ERR_PTR(rc);
+	}
+
+	return buf;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_read);
+
+static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
+						    u8 *_buf, int len)
+{
+	struct nvmem_device *nvmem = cell->nvmem;
+	int i, rc, nbits, bit_offset = cell->bit_offset;
+	u8 v, *p, *buf, *b, pbyte, pbits;
+
+	nbits = cell->nbits;
+	buf = kzalloc(cell->bytes, GFP_KERNEL);
+	if (!buf)
+		return ERR_PTR(-ENOMEM);
+
+	memcpy(buf, _buf, len);
+	p = b = buf;
+
+	if (bit_offset) {
+		pbyte = *b;
+		*b <<= bit_offset;
+
+		/* setup the first byte with lsb bits from nvmem */
+		rc = regmap_raw_read(nvmem->regmap, cell->offset, &v, 1);
+		*b++ |= GENMASK(bit_offset - 1, 0) & v;
+
+		/* setup rest of the byte if any */
+		for (i = 1; i < cell->bytes; i++) {
+			/* Get last byte bits and shift them towards lsb */
+			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
+			pbyte = *b;
+			p = b;
+			*b <<= bit_offset;
+			*b++ |= pbits;
+		}
+	}
+
+	/* if it's not end on byte boundary */
+	if ((nbits + bit_offset) % BITS_PER_BYTE) {
+		/* setup the last byte with msb bits from nvmem */
+		rc = regmap_raw_read(nvmem->regmap,
+				    cell->offset + cell->bytes - 1, &v, 1);
+		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
+
+	}
+
+	return buf;
+}
+
+/**
+ * nvmem_cell_write() - Write to a given nvmem cell
+ *
+ * @cell: nvmem cell to be written.
+ * @buf: Buffer to be written.
+ * @len: length of buffer to be written to nvmem cell.
+ *
+ * Return: length of bytes written or negative on failure.
+ */
+int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
+{
+	struct nvmem_device *nvmem = cell->nvmem;
+	int rc;
+
+	if (!nvmem || !nvmem->regmap || nvmem->read_only ||
+	    (cell->bit_offset == 0 && len != cell->bytes))
+		return -EINVAL;
+
+	if (cell->bit_offset || cell->nbits) {
+		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
+		if (IS_ERR(buf))
+			return PTR_ERR(buf);
+	}
+
+	rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes);
+
+	/* free the tmp buffer */
+	if (cell->bit_offset)
+		kfree(buf);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	return len;
+}
+EXPORT_SYMBOL_GPL(nvmem_cell_write);
+
+/**
+ * nvmem_device_cell_read() - Read a given nvmem device and cell
+ *
+ * @nvmem: nvmem device to read from.
+ * @info: nvmem cell info to be read.
+ * @buf: buffer pointer which will be populated on successful read.
+ *
+ * Return: length of successful bytes read on success and negative
+ * error code on error.
+ */
+ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+			   struct nvmem_cell_info *info, void *buf)
+{
+	struct nvmem_cell cell;
+	int rc;
+	ssize_t len;
+
+	if (!nvmem || !nvmem->regmap)
+		return -EINVAL;
+
+	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	return len;
+}
+EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
+
+/**
+ * nvmem_device_cell_write() - Write cell to a given nvmem device
+ *
+ * @nvmem: nvmem device to be written to.
+ * @info: nvmem cell info to be written
+ * @buf: buffer to be written to cell.
+ *
+ * Return: length of bytes written or negative error code on failure.
+ * */
+int nvmem_device_cell_write(struct nvmem_device *nvmem,
+			    struct nvmem_cell_info *info, void *buf)
+{
+	struct nvmem_cell cell;
+	int rc;
+
+	if (!nvmem || !nvmem->regmap)
+		return -EINVAL;
+
+	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	return nvmem_cell_write(&cell, buf, cell.bytes);
+}
+EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
+
+/**
+ * nvmem_device_read() - Read from a given nvmem device
+ *
+ * @nvmem: nvmem device to read from.
+ * @offset: offset in nvmem device.
+ * @bytes: number of bytes to read.
+ * @buf: buffer pointer which will be populated on successful read.
+ *
+ * Return: length of successful bytes read on success and negative
+ * error code on error.
+ */
+int nvmem_device_read(struct nvmem_device *nvmem,
+		      unsigned int offset,
+		      size_t bytes, void *buf)
+{
+	int rc;
+
+	if (!nvmem || !nvmem->regmap)
+		return -EINVAL;
+
+	rc = regmap_raw_read(nvmem->regmap, offset, buf, bytes);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+	return bytes;
+}
+EXPORT_SYMBOL_GPL(nvmem_device_read);
+
+/**
+ * nvmem_device_write() - Write cell to a given nvmem device
+ *
+ * @nvmem: nvmem device to be written to.
+ * @offset: offset in nvmem device.
+ * @bytes: number of bytes to write.
+ * @buf: buffer to be written.
+ *
+ * Return: length of bytes written or negative error code on failure.
+ * */
+int nvmem_device_write(struct nvmem_device *nvmem,
+		       unsigned int offset,
+		       size_t bytes, void *buf)
+{
+	int rc;
+
+	if (!nvmem || !nvmem->regmap)
+		return -EINVAL;
+
+	rc = regmap_raw_write(nvmem->regmap, offset, buf, bytes);
+
+	if (IS_ERR_VALUE(rc))
+		return rc;
+
+
+	return bytes;
+}
+EXPORT_SYMBOL_GPL(nvmem_device_write);
+
+static int __init nvmem_init(void)
+{
+	return bus_register(&nvmem_bus_type);
+}
+
+static void __exit nvmem_exit(void)
+{
+	bus_unregister(&nvmem_bus_type);
+}
+
+subsys_initcall(nvmem_init);
+module_exit(nvmem_exit);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
+MODULE_DESCRIPTION("nvmem Driver Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
new file mode 100644
index 0000000..afb67e7
--- /dev/null
+++ b/drivers/nvmem/qfprom.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+static struct regmap_config qfprom_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 8,
+	.reg_stride = 1,
+};
+
+static struct nvmem_config econfig = {
+	.name = "qfprom",
+	.owner = THIS_MODULE,
+};
+
+static int qfprom_remove(struct platform_device *pdev)
+{
+	struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+	return nvmem_unregister(nvmem);
+}
+
+static int qfprom_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct nvmem_device *nvmem;
+	struct regmap *regmap;
+	void __iomem *base;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	qfprom_regmap_config.max_register = resource_size(res) - 1;
+
+	regmap = devm_regmap_init_mmio(dev, base, &qfprom_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(dev, "regmap init failed\n");
+		return PTR_ERR(regmap);
+	}
+	econfig.dev = dev;
+	nvmem = nvmem_register(&econfig);
+	if (IS_ERR(nvmem))
+		return PTR_ERR(nvmem);
+
+	platform_set_drvdata(pdev, nvmem);
+
+	return 0;
+}
+
+static const struct of_device_id qfprom_of_match[] = {
+	{ .compatible = "qcom,qfprom",},
+	{/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, qfprom_of_match);
+
+static struct platform_driver qfprom_driver = {
+	.probe = qfprom_probe,
+	.remove = qfprom_remove,
+	.driver = {
+		.name = "qcom,qfprom",
+		.of_match_table = qfprom_of_match,
+	},
+};
+module_platform_driver(qfprom_driver);
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm QFPROM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
new file mode 100644
index 0000000..14777dd
--- /dev/null
+++ b/drivers/nvmem/sunxi_sid.c
@@ -0,0 +1,171 @@
+/*
+ * Allwinner sunXi SoCs Security ID support.
+ *
+ * Copyright (c) 2013 Oliver Schinagl <oliver@schinagl.nl>
+ * Copyright (C) 2014 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+
+
+static struct nvmem_config econfig = {
+	.name = "sunxi-sid",
+	.read_only = true,
+	.owner = THIS_MODULE,
+};
+
+struct sunxi_sid {
+	void __iomem		*base;
+};
+
+/* We read the entire key, due to a 32 bit read alignment requirement. Since we
+ * want to return the requested byte, this results in somewhat slower code and
+ * uses 4 times more reads as needed but keeps code simpler. Since the SID is
+ * only very rarely probed, this is not really an issue.
+ */
+static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid,
+			      const unsigned int offset)
+{
+	u32 sid_key;
+
+	sid_key = ioread32be(sid->base + round_down(offset, 4));
+	sid_key >>= (offset % 4) * 8;
+
+	return sid_key; /* Only return the last byte */
+}
+
+static int sunxi_sid_read(void *context,
+			    const void *reg, size_t reg_size,
+			    void *val, size_t val_size)
+{
+	struct sunxi_sid *sid = context;
+	unsigned int offset = *(u32 *)reg;
+	u8 *buf = val;
+
+	while (val_size) {
+		*buf++ = sunxi_sid_read_byte(sid, offset);
+		val_size--;
+		offset++;
+	}
+
+	return 0;
+}
+
+static int sunxi_sid_write(void *context, const void *data, size_t count)
+{
+	/* Unimplemented, dummy to keep regmap core happy */
+	return 0;
+}
+
+static struct regmap_bus sunxi_sid_bus = {
+	.read = sunxi_sid_read,
+	.write = sunxi_sid_write,
+	.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+	.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+};
+
+static bool sunxi_sid_writeable_reg(struct device *dev, unsigned int reg)
+{
+	return false;
+}
+
+static struct regmap_config sunxi_sid_regmap_config = {
+	.reg_bits = 32,
+	.val_bits = 8,
+	.reg_stride = 1,
+	.writeable_reg = sunxi_sid_writeable_reg,
+};
+
+static int sunxi_sid_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	struct nvmem_device *nvmem;
+	struct regmap *regmap;
+	struct sunxi_sid *sid;
+	int i, size;
+	char *randomness;
+
+	sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL);
+	if (!sid)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	sid->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(sid->base))
+		return PTR_ERR(sid->base);
+
+	size = resource_size(res) - 1;
+	sunxi_sid_regmap_config.max_register = size;
+
+	regmap = devm_regmap_init(dev, &sunxi_sid_bus, sid,
+				  &sunxi_sid_regmap_config);
+	if (IS_ERR(regmap)) {
+		dev_err(dev, "regmap init failed\n");
+		return PTR_ERR(regmap);
+	}
+
+	econfig.dev = dev;
+	nvmem = nvmem_register(&econfig);
+	if (IS_ERR(nvmem))
+		return PTR_ERR(nvmem);
+
+	randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL);
+	for (i = 0; i < size; i++)
+		randomness[i] = sunxi_sid_read_byte(sid, i);
+
+	add_device_randomness(randomness, size);
+	kfree(randomness);
+
+	platform_set_drvdata(pdev, nvmem);
+
+	return 0;
+}
+
+static int sunxi_sid_remove(struct platform_device *pdev)
+{
+	struct nvmem_device *nvmem = platform_get_drvdata(pdev);
+
+	return nvmem_unregister(nvmem);
+}
+
+static const struct of_device_id sunxi_sid_of_match[] = {
+	{ .compatible = "allwinner,sun4i-a10-sid" },
+	{ .compatible = "allwinner,sun7i-a20-sid" },
+	{/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
+
+static struct platform_driver sunxi_sid_driver = {
+	.probe = sunxi_sid_probe,
+	.remove = sunxi_sid_remove,
+	.driver = {
+		.name = "eeprom-sunxi-sid",
+		.of_match_table = sunxi_sid_of_match,
+	},
+};
+module_platform_driver(sunxi_sid_driver);
+
+MODULE_AUTHOR("Oliver Schinagl <oliver@schinagl.nl>");
+MODULE_DESCRIPTION("Allwinner sunxi security id driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 8bfda6a..384574c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -845,10 +845,10 @@
 	struct resource res;
 
 	while (dn) {
-		if (of_address_to_resource(dn, 0, &res))
-			continue;
-		if (res.start == base_address)
+		if (!of_address_to_resource(dn, 0, &res) &&
+		    res.start == base_address)
 			return dn;
+
 		dn = of_find_matching_node(dn, matches);
 	}
 
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 0749656..6e82bc42 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -967,7 +967,9 @@
 }
 
 #ifdef CONFIG_HAVE_MEMBLOCK
-#define MAX_PHYS_ADDR	((phys_addr_t)~0)
+#ifndef MAX_MEMBLOCK_ADDR
+#define MAX_MEMBLOCK_ADDR	((phys_addr_t)~0)
+#endif
 
 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
 {
@@ -984,16 +986,16 @@
 	}
 	size &= PAGE_MASK;
 
-	if (base > MAX_PHYS_ADDR) {
+	if (base > MAX_MEMBLOCK_ADDR) {
 		pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
 				base, base + size);
 		return;
 	}
 
-	if (base + size - 1 > MAX_PHYS_ADDR) {
+	if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
 		pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
-				((u64)MAX_PHYS_ADDR) + 1, base + size);
-		size = MAX_PHYS_ADDR - base + 1;
+				((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
+		size = MAX_MEMBLOCK_ADDR - base + 1;
 	}
 
 	if (base + size < phys_offset) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 3cf7a01..2956d72 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -18,6 +18,7 @@
  * driver.
  */
 
+#include <linux/device.h>
 #include <linux/errno.h>
 #include <linux/list.h>
 #include <linux/module.h>
@@ -576,3 +577,23 @@
 		kfree(desc);
 	}
 }
+
+/**
+ * of_msi_configure - Set the msi_domain field of a device
+ * @dev: device structure to associate with an MSI irq domain
+ * @np: device node for that device
+ */
+void of_msi_configure(struct device *dev, struct device_node *np)
+{
+	struct device_node *msi_np;
+	struct irq_domain *d;
+
+	msi_np = of_parse_phandle(np, "msi-parent", 0);
+	if (!msi_np)
+		return;
+
+	d = irq_find_matching_host(msi_np, DOMAIN_BUS_PLATFORM_MSI);
+	if (!d)
+		d = irq_find_host(msi_np);
+	dev_set_msi_domain(dev, d);
+}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index fdc60db..1350fa2 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -16,6 +16,7 @@
 #include <linux/phy.h>
 #include <linux/phy_fixed.h>
 #include <linux/of.h>
+#include <linux/of_gpio.h>
 #include <linux/of_irq.h>
 #include <linux/of_mdio.h>
 #include <linux/module.h>
@@ -266,7 +267,8 @@
 bool of_phy_is_fixed_link(struct device_node *np)
 {
 	struct device_node *dn;
-	int len;
+	int len, err;
+	const char *managed;
 
 	/* New binding */
 	dn = of_get_child_by_name(np, "fixed-link");
@@ -275,6 +277,10 @@
 		return true;
 	}
 
+	err = of_property_read_string(np, "managed", &managed);
+	if (err == 0 && strcmp(managed, "auto") != 0)
+		return true;
+
 	/* Old binding */
 	if (of_get_property(np, "fixed-link", &len) &&
 	    len == (5 * sizeof(__be32)))
@@ -289,8 +295,19 @@
 	struct fixed_phy_status status = {};
 	struct device_node *fixed_link_node;
 	const __be32 *fixed_link_prop;
-	int len;
+	int link_gpio;
+	int len, err;
 	struct phy_device *phy;
+	const char *managed;
+
+	err = of_property_read_string(np, "managed", &managed);
+	if (err == 0) {
+		if (strcmp(managed, "in-band-status") == 0) {
+			/* status is zeroed, namely its .link member */
+			phy = fixed_phy_register(PHY_POLL, &status, -1, np);
+			return IS_ERR(phy) ? PTR_ERR(phy) : 0;
+		}
+	}
 
 	/* New binding */
 	fixed_link_node = of_get_child_by_name(np, "fixed-link");
@@ -303,8 +320,13 @@
 		status.pause = of_property_read_bool(fixed_link_node, "pause");
 		status.asym_pause = of_property_read_bool(fixed_link_node,
 							  "asym-pause");
+		link_gpio = of_get_named_gpio_flags(fixed_link_node,
+						    "link-gpios", 0, NULL);
 		of_node_put(fixed_link_node);
-		phy = fixed_phy_register(PHY_POLL, &status, np);
+		if (link_gpio == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		phy = fixed_phy_register(PHY_POLL, &status, link_gpio, np);
 		return IS_ERR(phy) ? PTR_ERR(phy) : 0;
 	}
 
@@ -316,7 +338,7 @@
 		status.speed = be32_to_cpu(fixed_link_prop[2]);
 		status.pause = be32_to_cpu(fixed_link_prop[3]);
 		status.asym_pause = be32_to_cpu(fixed_link_prop[4]);
-		phy = fixed_phy_register(PHY_POLL, &status, np);
+		phy = fixed_phy_register(PHY_POLL, &status, -1, np);
 		return IS_ERR(phy) ? PTR_ERR(phy) : 0;
 	}
 
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index ddf8e42..1001efa 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -184,6 +184,7 @@
 	dev->dev.bus = &platform_bus_type;
 	dev->dev.platform_data = platform_data;
 	of_dma_configure(&dev->dev, dev->dev.of_node);
+	of_msi_configure(&dev->dev, dev->dev.of_node);
 
 	if (of_device_add(dev) != 0) {
 		of_dma_deconfigure(&dev->dev);
@@ -456,6 +457,15 @@
 }
 EXPORT_SYMBOL_GPL(of_platform_populate);
 
+int of_platform_default_populate(struct device_node *root,
+				 const struct of_dev_auxdata *lookup,
+				 struct device *parent)
+{
+	return of_platform_populate(root, of_default_bus_match_table, lookup,
+				    parent);
+}
+EXPORT_SYMBOL_GPL(of_platform_default_populate);
+
 static int of_platform_device_destroy(struct device *dev, void *data)
 {
 	/* Do not touch devices not populated from the device tree */
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index a0580af..baec33c 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -560,9 +560,6 @@
 	} else if (bus->parent) {
 		int i;
 
-		pci_read_bridge_bases(bus);
-
-
 		for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
 			if((bus->self->resource[i].flags & 
 			    (IORESOURCE_IO | IORESOURCE_MEM)) == 0)
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 9ee04b4..144c77d 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -691,7 +691,7 @@
 	if (dest_cpu < 0)
 		return -1;
 
-	cpumask_copy(d->affinity, cpumask_of(dest_cpu));
+	cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(dest_cpu));
 	vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu);
 
 	spin_lock_irqsave(&iosapic_lock, flags);
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index dceb9dd..901e1a3 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -693,7 +693,6 @@
 	if (bus->parent) {
 		int i;
 		/* PCI-PCI Bridge */
-		pci_read_bridge_bases(bus);
 		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
 			pci_claim_bridge_resource(bus->self, i);
 	} else {
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 73e4af4..be3f631 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -33,6 +33,7 @@
 #
 obj-$(CONFIG_ALPHA) += setup-irq.o
 obj-$(CONFIG_ARM) += setup-irq.o
+obj-$(CONFIG_ARM64) += setup-irq.o
 obj-$(CONFIG_UNICORE32) += setup-irq.o
 obj-$(CONFIG_SUPERH) += setup-irq.o
 obj-$(CONFIG_MIPS) += setup-irq.o
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index d9b64a1..769f7e3 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -439,6 +439,56 @@
 	.release = pci_vpd_pci22_release,
 };
 
+static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
+			       void *arg)
+{
+	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+	ssize_t ret;
+
+	if (!tdev)
+		return -ENODEV;
+
+	ret = pci_read_vpd(tdev, pos, count, arg);
+	pci_dev_put(tdev);
+	return ret;
+}
+
+static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
+				const void *arg)
+{
+	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+	ssize_t ret;
+
+	if (!tdev)
+		return -ENODEV;
+
+	ret = pci_write_vpd(tdev, pos, count, arg);
+	pci_dev_put(tdev);
+	return ret;
+}
+
+static const struct pci_vpd_ops pci_vpd_f0_ops = {
+	.read = pci_vpd_f0_read,
+	.write = pci_vpd_f0_write,
+	.release = pci_vpd_pci22_release,
+};
+
+static int pci_vpd_f0_dev_check(struct pci_dev *dev)
+{
+	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+	int ret = 0;
+
+	if (!tdev)
+		return -ENODEV;
+	if (!tdev->vpd || !tdev->multifunction ||
+	    dev->class != tdev->class || dev->vendor != tdev->vendor ||
+	    dev->device != tdev->device)
+		ret = -ENODEV;
+
+	pci_dev_put(tdev);
+	return ret;
+}
+
 int pci_vpd_pci22_init(struct pci_dev *dev)
 {
 	struct pci_vpd_pci22 *vpd;
@@ -447,12 +497,21 @@
 	cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
 	if (!cap)
 		return -ENODEV;
+	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+		int ret = pci_vpd_f0_dev_check(dev);
+
+		if (ret)
+			return ret;
+	}
 	vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
 	if (!vpd)
 		return -ENOMEM;
 
 	vpd->base.len = PCI_VPD_PCI22_SIZE;
-	vpd->base.ops = &pci_vpd_pci22_ops;
+	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
+		vpd->base.ops = &pci_vpd_f0_ops;
+	else
+		vpd->base.ops = &pci_vpd_pci22_ops;
 	mutex_init(&vpd->lock);
 	vpd->cap = cap;
 	vpd->busy = false;
@@ -531,6 +590,14 @@
 	return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
 }
 
+static bool pcie_downstream_port(const struct pci_dev *dev)
+{
+	int type = pci_pcie_type(dev);
+
+	return type == PCI_EXP_TYPE_ROOT_PORT ||
+	       type == PCI_EXP_TYPE_DOWNSTREAM;
+}
+
 bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
 {
 	int type = pci_pcie_type(dev);
@@ -546,10 +613,7 @@
 
 static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
 {
-	int type = pci_pcie_type(dev);
-
-	return (type == PCI_EXP_TYPE_ROOT_PORT ||
-		type == PCI_EXP_TYPE_DOWNSTREAM) &&
+	return pcie_downstream_port(dev) &&
 	       pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
 }
 
@@ -628,10 +692,9 @@
 	 * State bit in the Slot Status register of Downstream Ports,
 	 * which must be hardwired to 1b.  (PCIe Base Spec 3.0, sec 7.8)
 	 */
-	if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
-		 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+	if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
+	    pos == PCI_EXP_SLTSTA)
 		*val = PCI_EXP_SLTSTA_PDS;
-	}
 
 	return 0;
 }
@@ -657,10 +720,9 @@
 		return ret;
 	}
 
-	if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
-		 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
+	if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
+	    pos == PCI_EXP_SLTSTA)
 		*val = PCI_EXP_SLTSTA_PDS;
-	}
 
 	return 0;
 }
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index a8099d4..eeb9fb2 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -17,34 +17,15 @@
 
 #include "pci.h"
 
-static int ats_alloc_one(struct pci_dev *dev, int ps)
+void pci_ats_init(struct pci_dev *dev)
 {
 	int pos;
-	u16 cap;
-	struct pci_ats *ats;
 
 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
 	if (!pos)
-		return -ENODEV;
+		return;
 
-	ats = kzalloc(sizeof(*ats), GFP_KERNEL);
-	if (!ats)
-		return -ENOMEM;
-
-	ats->pos = pos;
-	ats->stu = ps;
-	pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
-	ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
-					    PCI_ATS_MAX_QDEP;
-	dev->ats = ats;
-
-	return 0;
-}
-
-static void ats_free_one(struct pci_dev *dev)
-{
-	kfree(dev->ats);
-	dev->ats = NULL;
+	dev->ats_cap = pos;
 }
 
 /**
@@ -56,43 +37,36 @@
  */
 int pci_enable_ats(struct pci_dev *dev, int ps)
 {
-	int rc;
 	u16 ctrl;
+	struct pci_dev *pdev;
 
-	BUG_ON(dev->ats && dev->ats->is_enabled);
+	if (!dev->ats_cap)
+		return -EINVAL;
+
+	if (WARN_ON(dev->ats_enabled))
+		return -EBUSY;
 
 	if (ps < PCI_ATS_MIN_STU)
 		return -EINVAL;
 
-	if (dev->is_physfn || dev->is_virtfn) {
-		struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
-
-		mutex_lock(&pdev->sriov->lock);
-		if (pdev->ats)
-			rc = pdev->ats->stu == ps ? 0 : -EINVAL;
-		else
-			rc = ats_alloc_one(pdev, ps);
-
-		if (!rc)
-			pdev->ats->ref_cnt++;
-		mutex_unlock(&pdev->sriov->lock);
-		if (rc)
-			return rc;
-	}
-
-	if (!dev->is_physfn) {
-		rc = ats_alloc_one(dev, ps);
-		if (rc)
-			return rc;
-	}
-
+	/*
+	 * Note that enabling ATS on a VF fails unless it's already enabled
+	 * with the same STU on the PF.
+	 */
 	ctrl = PCI_ATS_CTRL_ENABLE;
-	if (!dev->is_virtfn)
-		ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
-	pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+	if (dev->is_virtfn) {
+		pdev = pci_physfn(dev);
+		if (pdev->ats_stu != ps)
+			return -EINVAL;
 
-	dev->ats->is_enabled = 1;
+		atomic_inc(&pdev->ats_ref_cnt);  /* count enabled VFs */
+	} else {
+		dev->ats_stu = ps;
+		ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
+	}
+	pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
 
+	dev->ats_enabled = 1;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(pci_enable_ats);
@@ -103,28 +77,25 @@
  */
 void pci_disable_ats(struct pci_dev *dev)
 {
+	struct pci_dev *pdev;
 	u16 ctrl;
 
-	BUG_ON(!dev->ats || !dev->ats->is_enabled);
+	if (WARN_ON(!dev->ats_enabled))
+		return;
 
-	pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
-	ctrl &= ~PCI_ATS_CTRL_ENABLE;
-	pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+	if (atomic_read(&dev->ats_ref_cnt))
+		return;		/* VFs still enabled */
 
-	dev->ats->is_enabled = 0;
-
-	if (dev->is_physfn || dev->is_virtfn) {
-		struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
-
-		mutex_lock(&pdev->sriov->lock);
-		pdev->ats->ref_cnt--;
-		if (!pdev->ats->ref_cnt)
-			ats_free_one(pdev);
-		mutex_unlock(&pdev->sriov->lock);
+	if (dev->is_virtfn) {
+		pdev = pci_physfn(dev);
+		atomic_dec(&pdev->ats_ref_cnt);
 	}
 
-	if (!dev->is_physfn)
-		ats_free_one(dev);
+	pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, &ctrl);
+	ctrl &= ~PCI_ATS_CTRL_ENABLE;
+	pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
+
+	dev->ats_enabled = 0;
 }
 EXPORT_SYMBOL_GPL(pci_disable_ats);
 
@@ -132,16 +103,13 @@
 {
 	u16 ctrl;
 
-	if (!pci_ats_enabled(dev))
+	if (!dev->ats_enabled)
 		return;
-	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS))
-		BUG();
 
 	ctrl = PCI_ATS_CTRL_ENABLE;
 	if (!dev->is_virtfn)
-		ctrl |= PCI_ATS_CTRL_STU(dev->ats->stu - PCI_ATS_MIN_STU);
-
-	pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+		ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
+	pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
 }
 EXPORT_SYMBOL_GPL(pci_restore_ats_state);
 
@@ -159,23 +127,16 @@
  */
 int pci_ats_queue_depth(struct pci_dev *dev)
 {
-	int pos;
 	u16 cap;
 
+	if (!dev->ats_cap)
+		return -EINVAL;
+
 	if (dev->is_virtfn)
 		return 0;
 
-	if (dev->ats)
-		return dev->ats->qdep;
-
-	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
-	if (!pos)
-		return -ENODEV;
-
-	pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
-
-	return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
-				       PCI_ATS_MAX_QDEP;
+	pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CAP, &cap);
+	return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : PCI_ATS_MAX_QDEP;
 }
 EXPORT_SYMBOL_GPL(pci_ats_queue_depth);
 
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index c132bdd..d5e58ba 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -53,7 +53,7 @@
 
 config PCI_HOST_GENERIC
 	bool "Generic PCI host controller"
-	depends on ARM && OF
+	depends on (ARM || ARM64) && OF
 	help
 	  Say Y here if you want to support a simple generic PCI host
 	  controller, such as the one emulated by kvmtool.
@@ -117,7 +117,7 @@
 
 config PCIE_IPROC
 	tristate "Broadcom iProc PCIe controller"
-	depends on OF && ARM
+	depends on OF && (ARM || ARM64)
 	default n
 	help
 	  This enables the iProc PCIe core controller support for Broadcom's
@@ -135,8 +135,8 @@
 	  through the generic platform bus interface
 
 config PCIE_IPROC_BCMA
-	bool "Broadcom iProc PCIe BCMA bus driver"
-	depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST)
+	tristate "Broadcom iProc PCIe BCMA bus driver"
+	depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST)
 	select PCIE_IPROC
 	select BCMA
 	select PCI_DOMAINS
diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
index 80db09e..199e29a 100644
--- a/drivers/pci/host/pci-dra7xx.c
+++ b/drivers/pci/host/pci-dra7xx.c
@@ -17,6 +17,7 @@
 #include <linux/irqdomain.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of_gpio.h>
 #include <linux/pci.h>
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
@@ -83,6 +84,17 @@
 	writel(value, pcie->base + offset);
 }
 
+static inline u32 dra7xx_pcie_readl_rc(struct pcie_port *pp, u32 offset)
+{
+	return readl(pp->dbi_base + offset);
+}
+
+static inline void dra7xx_pcie_writel_rc(struct pcie_port *pp, u32 offset,
+					 u32 value)
+{
+	writel(value, pp->dbi_base + offset);
+}
+
 static int dra7xx_pcie_link_up(struct pcie_port *pp)
 {
 	struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp);
@@ -155,7 +167,6 @@
 {
 	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
 	irq_set_chip_data(irq, domain->host_data);
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
@@ -325,6 +336,9 @@
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	char name[10];
+	int gpio_sel;
+	enum of_gpio_flags flags;
+	unsigned long gpio_flags;
 
 	dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
 	if (!dra7xx)
@@ -382,9 +396,25 @@
 
 	pm_runtime_enable(dev);
 	ret = pm_runtime_get_sync(dev);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(dev, "pm_runtime_get_sync failed\n");
-		goto err_phy;
+		goto err_get_sync;
+	}
+
+	gpio_sel = of_get_gpio_flags(dev->of_node, 0, &flags);
+	if (gpio_is_valid(gpio_sel)) {
+		gpio_flags = (flags & OF_GPIO_ACTIVE_LOW) ?
+				GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH;
+		ret = devm_gpio_request_one(dev, gpio_sel, gpio_flags,
+					    "pcie_reset");
+		if (ret) {
+			dev_err(&pdev->dev, "gpio%d request failed, ret %d\n",
+				gpio_sel, ret);
+			goto err_gpio;
+		}
+	} else if (gpio_sel == -EPROBE_DEFER) {
+		ret = -EPROBE_DEFER;
+		goto err_gpio;
 	}
 
 	reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
@@ -395,12 +425,14 @@
 
 	ret = dra7xx_add_pcie_port(dra7xx, pdev);
 	if (ret < 0)
-		goto err_add_port;
+		goto err_gpio;
 
 	return 0;
 
-err_add_port:
+err_gpio:
 	pm_runtime_put(dev);
+
+err_get_sync:
 	pm_runtime_disable(dev);
 
 err_phy:
@@ -431,6 +463,85 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int dra7xx_pcie_suspend(struct device *dev)
+{
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+	struct pcie_port *pp = &dra7xx->pp;
+	u32 val;
+
+	/* clear MSE */
+	val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
+	val &= ~PCI_COMMAND_MEMORY;
+	dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
+
+	return 0;
+}
+
+static int dra7xx_pcie_resume(struct device *dev)
+{
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+	struct pcie_port *pp = &dra7xx->pp;
+	u32 val;
+
+	/* set MSE */
+	val = dra7xx_pcie_readl_rc(pp, PCI_COMMAND);
+	val |= PCI_COMMAND_MEMORY;
+	dra7xx_pcie_writel_rc(pp, PCI_COMMAND, val);
+
+	return 0;
+}
+
+static int dra7xx_pcie_suspend_noirq(struct device *dev)
+{
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+	int count = dra7xx->phy_count;
+
+	while (count--) {
+		phy_power_off(dra7xx->phy[count]);
+		phy_exit(dra7xx->phy[count]);
+	}
+
+	return 0;
+}
+
+static int dra7xx_pcie_resume_noirq(struct device *dev)
+{
+	struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
+	int phy_count = dra7xx->phy_count;
+	int ret;
+	int i;
+
+	for (i = 0; i < phy_count; i++) {
+		ret = phy_init(dra7xx->phy[i]);
+		if (ret < 0)
+			goto err_phy;
+
+		ret = phy_power_on(dra7xx->phy[i]);
+		if (ret < 0) {
+			phy_exit(dra7xx->phy[i]);
+			goto err_phy;
+		}
+	}
+
+	return 0;
+
+err_phy:
+	while (--i >= 0) {
+		phy_power_off(dra7xx->phy[i]);
+		phy_exit(dra7xx->phy[i]);
+	}
+
+	return ret;
+}
+#endif
+
+static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
+	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
+				      dra7xx_pcie_resume_noirq)
+};
+
 static const struct of_device_id of_dra7xx_pcie_match[] = {
 	{ .compatible = "ti,dra7-pcie", },
 	{},
@@ -442,6 +553,7 @@
 	.driver = {
 		.name	= "dra7-pcie",
 		.of_match_table = of_dra7xx_pcie_match,
+		.pm	= &dra7xx_pcie_pm_ops,
 	},
 };
 
diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c
index ba46e58..265dd25 100644
--- a/drivers/pci/host/pci-host-generic.c
+++ b/drivers/pci/host/pci-host-generic.c
@@ -38,7 +38,16 @@
 	const struct gen_pci_cfg_bus_ops	*ops;
 };
 
+/*
+ * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI
+ * sysdata.  Add pci_sys_data as the first element in struct gen_pci so
+ * that when we use a gen_pci pointer as sysdata, it is also a pointer to
+ * a struct pci_sys_data.
+ */
 struct gen_pci {
+#ifdef CONFIG_ARM
+	struct pci_sys_data			sys;
+#endif
 	struct pci_host_bridge			host;
 	struct gen_pci_cfg_windows		cfg;
 	struct list_head			resources;
@@ -48,8 +57,7 @@
 					     unsigned int devfn,
 					     int where)
 {
-	struct pci_sys_data *sys = bus->sysdata;
-	struct gen_pci *pci = sys->private_data;
+	struct gen_pci *pci = bus->sysdata;
 	resource_size_t idx = bus->number - pci->cfg.bus_range->start;
 
 	return pci->cfg.win[idx] + ((devfn << 8) | where);
@@ -64,8 +72,7 @@
 					      unsigned int devfn,
 					      int where)
 {
-	struct pci_sys_data *sys = bus->sysdata;
-	struct gen_pci *pci = sys->private_data;
+	struct gen_pci *pci = bus->sysdata;
 	resource_size_t idx = bus->number - pci->cfg.bus_range->start;
 
 	return pci->cfg.win[idx] + ((devfn << 12) | where);
@@ -198,13 +205,6 @@
 	return 0;
 }
 
-static int gen_pci_setup(int nr, struct pci_sys_data *sys)
-{
-	struct gen_pci *pci = sys->private_data;
-	list_splice_init(&pci->resources, &sys->resources);
-	return 1;
-}
-
 static int gen_pci_probe(struct platform_device *pdev)
 {
 	int err;
@@ -214,13 +214,7 @@
 	struct device *dev = &pdev->dev;
 	struct device_node *np = dev->of_node;
 	struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
-	struct hw_pci hw = {
-		.nr_controllers	= 1,
-		.private_data	= (void **)&pci,
-		.setup		= gen_pci_setup,
-		.map_irq	= of_irq_parse_and_map_pci,
-		.ops		= &gen_pci_ops,
-	};
+	struct pci_bus *bus, *child;
 
 	if (!pci)
 		return -ENOMEM;
@@ -258,7 +252,27 @@
 		return err;
 	}
 
-	pci_common_init_dev(dev, &hw);
+	/* Do not reassign resources if probe only */
+	if (!pci_has_flag(PCI_PROBE_ONLY))
+		pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
+
+	bus = pci_scan_root_bus(dev, 0, &gen_pci_ops, pci, &pci->resources);
+	if (!bus) {
+		dev_err(dev, "Scanning rootbus failed");
+		return -ENODEV;
+	}
+
+	pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
+
+	if (!pci_has_flag(PCI_PROBE_ONLY)) {
+		pci_bus_size_bridges(bus);
+		pci_bus_assign_resources(bus);
+
+		list_for_each_entry(child, &bus->children, node)
+			pcie_bus_configure_settings(child);
+	}
+
+	pci_bus_add_devices(bus);
 	return 0;
 }
 
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index 233a196..8f3a981 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -117,11 +117,7 @@
 	val = addr << PCIE_PHY_CTRL_DATA_LOC;
 	writel(val, dbi_base + PCIE_PHY_CTRL);
 
-	ret = pcie_phy_poll_ack(dbi_base, 0);
-	if (ret)
-		return ret;
-
-	return 0;
+	return pcie_phy_poll_ack(dbi_base, 0);
 }
 
 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
@@ -148,11 +144,7 @@
 	/* deassert Read signal */
 	writel(0x00, dbi_base + PCIE_PHY_CTRL);
 
-	ret = pcie_phy_poll_ack(dbi_base, 0);
-	if (ret)
-		return ret;
-
-	return 0;
+	return pcie_phy_poll_ack(dbi_base, 0);
 }
 
 static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index f34892e..e71da99 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -104,14 +104,13 @@
 {
 	u32 offset, reg_offset, bit_pos;
 	struct keystone_pcie *ks_pcie;
-	unsigned int irq = d->irq;
 	struct msi_desc *msi;
 	struct pcie_port *pp;
 
-	msi = irq_get_msi_desc(irq);
-	pp = sys_to_pcie(msi->dev->bus->sysdata);
+	msi = irq_data_get_msi_desc(d);
+	pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
 	ks_pcie = to_keystone_pcie(pp);
-	offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+	offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 	update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
 
 	writel(BIT(bit_pos),
@@ -142,15 +141,14 @@
 static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
 {
 	struct keystone_pcie *ks_pcie;
-	unsigned int irq = d->irq;
 	struct msi_desc *msi;
 	struct pcie_port *pp;
 	u32 offset;
 
-	msi = irq_get_msi_desc(irq);
-	pp = sys_to_pcie(msi->dev->bus->sysdata);
+	msi = irq_data_get_msi_desc(d);
+	pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
 	ks_pcie = to_keystone_pcie(pp);
-	offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+	offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 
 	/* Mask the end point if PVM implemented */
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
@@ -164,15 +162,14 @@
 static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
 {
 	struct keystone_pcie *ks_pcie;
-	unsigned int irq = d->irq;
 	struct msi_desc *msi;
 	struct pcie_port *pp;
 	u32 offset;
 
-	msi = irq_get_msi_desc(irq);
-	pp = sys_to_pcie(msi->dev->bus->sysdata);
+	msi = irq_data_get_msi_desc(d);
+	pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
 	ks_pcie = to_keystone_pcie(pp);
-	offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+	offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
 
 	/* Mask the end point if PVM implemented */
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
@@ -196,7 +193,6 @@
 	irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
 				 handle_level_irq);
 	irq_set_chip_data(irq, domain->host_data);
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
@@ -277,7 +273,6 @@
 	irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
 				 handle_level_irq);
 	irq_set_chip_data(irq, d->host_data);
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 734da58..81253e7 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -110,8 +110,9 @@
 	return -EINVAL;
 }
 
-static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
 	u32 offset = irq - ks_pcie->msi_host_irqs[0];
 	struct pcie_port *pp = &ks_pcie->pp;
@@ -137,8 +138,10 @@
  * Traverse through pending legacy interrupts and invoke handler for each. Also
  * takes care of interrupt controller level mask/ack operation.
  */
-static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ks_pcie_legacy_irq_handler(unsigned int __irq,
+				       struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
 	struct pcie_port *pp = &ks_pcie->pp;
 	u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
@@ -212,9 +215,9 @@
 
 	/* Legacy IRQ */
 	for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
-		irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie);
-		irq_set_chained_handler(ks_pcie->legacy_host_irqs[i],
-					ks_pcie_legacy_irq_handler);
+		irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i],
+						 ks_pcie_legacy_irq_handler,
+						 ks_pcie);
 	}
 	ks_dw_pcie_enable_legacy_irqs(ks_pcie);
 
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 70aa095..67ec5e1 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -879,6 +879,7 @@
 		return;
 
 	pcie->msi = of_pci_find_msi_chip_by_node(msi_node);
+	of_node_put(msi_node);
 
 	if (pcie->msi)
 		pcie->msi->dev = &pcie->pdev->dev;
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 10c0571..81df0c1 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -1248,7 +1248,6 @@
 {
 	irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
 	irq_set_chip_data(irq, domain->host_data);
-	set_irq_flags(irq, IRQF_VALID);
 
 	tegra_cpuidle_pcie_irqs_in_use();
 
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index 2d31d4d..996327c 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -40,8 +40,8 @@
 
 struct xgene_msi {
 	struct device_node	*node;
-	struct msi_controller	mchip;
-	struct irq_domain	*domain;
+	struct irq_domain	*inner_domain;
+	struct irq_domain	*msi_domain;
 	u64			msi_addr;
 	void __iomem		*msi_regs;
 	unsigned long		*bitmap;
@@ -223,7 +223,6 @@
 	irq_domain_set_info(domain, virq, msi_irq,
 			    &xgene_msi_bottom_irq_chip, domain->host_data,
 			    handle_simple_irq, NULL, NULL);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
@@ -252,17 +251,17 @@
 
 static int xgene_allocate_domains(struct xgene_msi *msi)
 {
-	msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
-					    &msi_domain_ops, msi);
-	if (!msi->domain)
+	msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
+						  &msi_domain_ops, msi);
+	if (!msi->inner_domain)
 		return -ENOMEM;
 
-	msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node,
-						      &xgene_msi_domain_info,
-						      msi->domain);
+	msi->msi_domain = pci_msi_create_irq_domain(msi->node,
+						    &xgene_msi_domain_info,
+						    msi->inner_domain);
 
-	if (!msi->mchip.domain) {
-		irq_domain_remove(msi->domain);
+	if (!msi->msi_domain) {
+		irq_domain_remove(msi->inner_domain);
 		return -ENOMEM;
 	}
 
@@ -271,10 +270,10 @@
 
 static void xgene_free_domains(struct xgene_msi *msi)
 {
-	if (msi->mchip.domain)
-		irq_domain_remove(msi->mchip.domain);
-	if (msi->domain)
-		irq_domain_remove(msi->domain);
+	if (msi->msi_domain)
+		irq_domain_remove(msi->msi_domain);
+	if (msi->inner_domain)
+		irq_domain_remove(msi->inner_domain);
 }
 
 static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
@@ -340,7 +339,7 @@
 			 * CPU0
 			 */
 			hw_irq = hwirq_to_canonical_hwirq(hw_irq);
-			virq = irq_find_mapping(xgene_msi->domain, hw_irq);
+			virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq);
 			WARN_ON(!virq);
 			if (virq != 0)
 				generic_handle_irq(virq);
@@ -368,10 +367,8 @@
 
 	for (i = 0; i < NR_HW_IRQS; i++) {
 		virq = msi->msi_groups[i].gic_irq;
-		if (virq != 0) {
-			irq_set_chained_handler(virq, NULL);
-			irq_set_handler_data(virq, NULL);
-		}
+		if (virq != 0)
+			irq_set_chained_handler_and_data(virq, NULL, NULL);
 	}
 	kfree(msi->msi_groups);
 
@@ -421,8 +418,8 @@
 		}
 
 		if (err) {
-			irq_set_chained_handler(msi_group->gic_irq, NULL);
-			irq_set_handler_data(msi_group->gic_irq, NULL);
+			irq_set_chained_handler_and_data(msi_group->gic_irq,
+							 NULL, NULL);
 			return err;
 		}
 	}
@@ -441,8 +438,8 @@
 		if (!msi_group->gic_irq)
 			continue;
 
-		irq_set_chained_handler(msi_group->gic_irq, NULL);
-		irq_set_handler_data(msi_group->gic_irq, NULL);
+		irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
+						 NULL);
 	}
 }
 
@@ -497,7 +494,7 @@
 		goto error;
 	}
 	xgene_msi->msi_addr = res->start;
-
+	xgene_msi->node = pdev->dev.of_node;
 	xgene_msi->num_cpus = num_possible_cpus();
 
 	rc = xgene_msi_init_allocator(xgene_msi);
@@ -561,19 +558,10 @@
 
 	cpu_notifier_register_done();
 
-	xgene_msi->mchip.of_node = pdev->dev.of_node;
-	rc = of_pci_msi_chip_add(&xgene_msi->mchip);
-	if (rc) {
-		dev_err(&pdev->dev, "failed to add MSI controller chip\n");
-		goto error_notifier;
-	}
-
 	dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
 
 	return 0;
 
-error_notifier:
-	unregister_hotcpu_notifier(&xgene_msi_cpu_notifier);
 error:
 	xgene_msi_remove(pdev);
 	return rc;
@@ -582,7 +570,6 @@
 static struct platform_driver xgene_msi_driver = {
 	.driver = {
 		.name = "xgene-msi",
-		.owner = THIS_MODULE,
 		.of_match_table = xgene_msi_match_table,
 	},
 	.probe = xgene_msi_probe,
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index a9dfb70..0236ab9 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -321,8 +321,16 @@
 				return ret;
 			break;
 		case IORESOURCE_MEM:
-			xgene_pcie_setup_ob_reg(port, res, OMR1BARL, res->start,
-						res->start - window->offset);
+			if (res->flags & IORESOURCE_PREFETCH)
+				xgene_pcie_setup_ob_reg(port, res, OMR2BARL,
+							res->start,
+							res->start -
+							window->offset);
+			else
+				xgene_pcie_setup_ob_reg(port, res, OMR1BARL,
+							res->start,
+							res->start -
+							window->offset);
 			break;
 		case IORESOURCE_BUS:
 			break;
@@ -514,6 +522,7 @@
 	if (!bus->msi)
 		return -ENODEV;
 
+	of_node_put(msi_node);
 	bus->msi->dev = &bus->dev;
 	return 0;
 }
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 69486be..52aa6e3 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -255,7 +255,7 @@
 static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
 {
 	int irq, pos0, i;
-	struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
+	struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(desc));
 
 	pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS,
 				       order_base_2(no_irqs));
@@ -326,8 +326,8 @@
 static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
 {
 	struct irq_data *data = irq_get_irq_data(irq);
-	struct msi_desc *msi = irq_data_get_msi(data);
-	struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata);
+	struct msi_desc *msi = irq_data_get_msi_desc(data);
+	struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
 
 	clear_irq_range(pp, irq, 1, data->hwirq);
 }
@@ -350,7 +350,6 @@
 {
 	irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
 	irq_set_chip_data(irq, domain->host_data);
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
@@ -388,7 +387,7 @@
 		addrp = of_get_address(np, index, NULL, NULL);
 		pp->cfg0_mod_base = of_read_number(addrp, ns);
 		pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
-	} else {
+	} else if (!pp->va_cfg0_base) {
 		dev_err(pp->dev, "missing *config* reg space\n");
 	}
 
@@ -526,7 +525,6 @@
 
 #ifdef CONFIG_PCI_MSI
 	dw_pcie_msi_chip.dev = pp->dev;
-	dw_pci.msi_ctrl = &dw_pcie_msi_chip;
 #endif
 
 	dw_pci.nr_controllers = 1;
@@ -708,8 +706,15 @@
 	struct pcie_port *pp = sys_to_pcie(sys);
 
 	pp->root_bus_nr = sys->busnr;
-	bus = pci_scan_root_bus(pp->dev, sys->busnr,
-				  &dw_pcie_ops, sys, &sys->resources);
+
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		bus = pci_scan_root_bus_msi(pp->dev, sys->busnr, &dw_pcie_ops,
+					    sys, &sys->resources,
+					    &dw_pcie_msi_chip);
+	else
+		bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops,
+					sys, &sys->resources);
+
 	if (!bus)
 		return NULL;
 
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index d77481e..fe2efb1 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -58,9 +58,17 @@
 #define SYS_RC_INTX_EN               0x330
 #define SYS_RC_INTX_MASK             0xf
 
-static inline struct iproc_pcie *sys_to_pcie(struct pci_sys_data *sys)
+static inline struct iproc_pcie *iproc_data(struct pci_bus *bus)
 {
-	return sys->private_data;
+	struct iproc_pcie *pcie;
+#ifdef CONFIG_ARM
+	struct pci_sys_data *sys = bus->sysdata;
+
+	pcie = sys->private_data;
+#else
+	pcie = bus->sysdata;
+#endif
+	return pcie;
 }
 
 /**
@@ -71,8 +79,7 @@
 					    unsigned int devfn,
 					    int where)
 {
-	struct pci_sys_data *sys = bus->sysdata;
-	struct iproc_pcie *pcie = sys_to_pcie(sys);
+	struct iproc_pcie *pcie = iproc_data(bus);
 	unsigned slot = PCI_SLOT(devfn);
 	unsigned fn = PCI_FUNC(devfn);
 	unsigned busno = bus->number;
@@ -186,32 +193,34 @@
 int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
 {
 	int ret;
+	void *sysdata;
 	struct pci_bus *bus;
 
 	if (!pcie || !pcie->dev || !pcie->base)
 		return -EINVAL;
 
-	if (pcie->phy) {
-		ret = phy_init(pcie->phy);
-		if (ret) {
-			dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
-			return ret;
-		}
+	ret = phy_init(pcie->phy);
+	if (ret) {
+		dev_err(pcie->dev, "unable to initialize PCIe PHY\n");
+		return ret;
+	}
 
-		ret = phy_power_on(pcie->phy);
-		if (ret) {
-			dev_err(pcie->dev, "unable to power on PCIe PHY\n");
-			goto err_exit_phy;
-		}
-
+	ret = phy_power_on(pcie->phy);
+	if (ret) {
+		dev_err(pcie->dev, "unable to power on PCIe PHY\n");
+		goto err_exit_phy;
 	}
 
 	iproc_pcie_reset(pcie);
 
+#ifdef CONFIG_ARM
 	pcie->sysdata.private_data = pcie;
+	sysdata = &pcie->sysdata;
+#else
+	sysdata = pcie;
+#endif
 
-	bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops,
-				  &pcie->sysdata, res);
+	bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, sysdata, res);
 	if (!bus) {
 		dev_err(pcie->dev, "unable to create PCI root bus\n");
 		ret = -ENOMEM;
@@ -229,7 +238,9 @@
 
 	pci_scan_child_bus(bus);
 	pci_assign_unassigned_bus_resources(bus);
+#ifdef CONFIG_ARM
 	pci_fixup_irqs(pci_common_swizzle, pcie->map_irq);
+#endif
 	pci_bus_add_devices(bus);
 
 	return 0;
@@ -239,12 +250,9 @@
 	pci_remove_root_bus(bus);
 
 err_power_off_phy:
-	if (pcie->phy)
-		phy_power_off(pcie->phy);
+	phy_power_off(pcie->phy);
 err_exit_phy:
-	if (pcie->phy)
-		phy_exit(pcie->phy);
-
+	phy_exit(pcie->phy);
 	return ret;
 }
 EXPORT_SYMBOL(iproc_pcie_setup);
@@ -254,10 +262,8 @@
 	pci_stop_root_bus(pcie->root_bus);
 	pci_remove_root_bus(pcie->root_bus);
 
-	if (pcie->phy) {
-		phy_power_off(pcie->phy);
-		phy_exit(pcie->phy);
-	}
+	phy_power_off(pcie->phy);
+	phy_exit(pcie->phy);
 
 	return 0;
 }
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index ba0a108..c9e4c10 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -21,7 +21,7 @@
  * @dev: pointer to device data structure
  * @base: PCIe host controller I/O register base
  * @resources: linked list of all PCI resources
- * @sysdata: Per PCI controller data
+ * @sysdata: Per PCI controller data (ARM-specific)
  * @root_bus: pointer to root bus
  * @phy: optional PHY device that controls the Serdes
  * @irqs: interrupt IDs
@@ -29,7 +29,9 @@
 struct iproc_pcie {
 	struct device *dev;
 	void __iomem *base;
+#ifdef CONFIG_ARM
 	struct pci_sys_data sysdata;
+#endif
 	struct pci_bus *root_bus;
 	struct phy *phy;
 	int irqs[IPROC_PCIE_MAX_NUM_IRQS];
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
index c086210..7678fe0 100644
--- a/drivers/pci/host/pcie-rcar.c
+++ b/drivers/pci/host/pcie-rcar.c
@@ -664,7 +664,6 @@
 {
 	irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
 	irq_set_chip_data(irq, domain->host_data);
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index c49fbdc..98d2683 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -223,8 +223,7 @@
 	status = readl(&app_reg->int_sts);
 
 	if (status & MSI_CTRL_INT) {
-		if (!IS_ENABLED(CONFIG_PCI_MSI))
-			BUG();
+		BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI));
 		dw_handle_msi_irq(pp);
 	}
 
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
index f1a06a0..3c7a0d5 100644
--- a/drivers/pci/host/pcie-xilinx.c
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -227,18 +227,16 @@
  */
 static void xilinx_pcie_destroy_msi(unsigned int irq)
 {
-	struct irq_desc *desc;
 	struct msi_desc *msi;
 	struct xilinx_pcie_port *port;
 
-	desc = irq_to_desc(irq);
-	msi = irq_desc_get_msi_desc(desc);
-	port = sys_to_pcie(msi->dev->bus->sysdata);
-
-	if (!test_bit(irq, msi_irq_in_use))
+	if (!test_bit(irq, msi_irq_in_use)) {
+		msi = irq_get_msi_desc(irq);
+		port = sys_to_pcie(msi_desc_to_pci_sysdata(msi));
 		dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
-	else
+	} else {
 		clear_bit(irq, msi_irq_in_use);
+	}
 }
 
 /**
@@ -338,7 +336,6 @@
 {
 	irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
 	irq_set_chip_data(irq, domain->host_data);
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
@@ -377,7 +374,6 @@
 {
 	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
 	irq_set_chip_data(irq, domain->host_data);
-	set_irq_flags(irq, IRQF_VALID);
 
 	return 0;
 }
@@ -449,14 +445,17 @@
 			return IRQ_HANDLED;
 		}
 
-		/* Clear interrupt FIFO register 1 */
-		pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
-			   XILINX_PCIE_REG_RPIFR1);
+		if (!(val & XILINX_PCIE_RPIFR1_MSI_INTR)) {
+			/* Clear interrupt FIFO register 1 */
+			pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+				   XILINX_PCIE_REG_RPIFR1);
 
-		/* Handle INTx Interrupt */
-		val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
-			XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
-		generic_handle_irq(irq_find_mapping(port->irq_domain, val));
+			/* Handle INTx Interrupt */
+			val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
+				XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
+			generic_handle_irq(irq_find_mapping(port->irq_domain,
+							    val));
+		}
 	}
 
 	if (status & XILINX_PCIE_INTR_MSI) {
@@ -647,9 +646,15 @@
 	struct pci_bus *bus;
 
 	port->root_busno = sys->busnr;
-	bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops,
-				sys, &sys->resources);
 
+	if (IS_ENABLED(CONFIG_PCI_MSI))
+		bus = pci_scan_root_bus_msi(port->dev, sys->busnr,
+					    &xilinx_pcie_ops, sys,
+					    &sys->resources,
+					    &xilinx_pcie_msi_chip);
+	else
+		bus = pci_scan_root_bus(port->dev, sys->busnr,
+					&xilinx_pcie_ops, sys, &sys->resources);
 	return bus;
 }
 
@@ -847,7 +852,6 @@
 
 #ifdef CONFIG_PCI_MSI
 	xilinx_pcie_msi_chip.dev = port->dev;
-	hw.msi_ctrl = &xilinx_pcie_msi_chip;
 #endif
 	pci_common_init_dev(dev, &hw);
 
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 56d8486..d1fab97 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -83,12 +83,12 @@
 GET_STATUS(latch_status, u8)
 GET_STATUS(adapter_status, u8)
 
-static ssize_t power_read_file(struct pci_slot *slot, char *buf)
+static ssize_t power_read_file(struct pci_slot *pci_slot, char *buf)
 {
 	int retval;
 	u8 value;
 
-	retval = get_power_status(slot->hotplug, &value);
+	retval = get_power_status(pci_slot->hotplug, &value);
 	if (retval)
 		return retval;
 
@@ -140,22 +140,22 @@
 	.store = power_write_file
 };
 
-static ssize_t attention_read_file(struct pci_slot *slot, char *buf)
+static ssize_t attention_read_file(struct pci_slot *pci_slot, char *buf)
 {
 	int retval;
 	u8 value;
 
-	retval = get_attention_status(slot->hotplug, &value);
+	retval = get_attention_status(pci_slot->hotplug, &value);
 	if (retval)
 		return retval;
 
 	return sprintf(buf, "%d\n", value);
 }
 
-static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
+static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
 				    size_t count)
 {
-	struct hotplug_slot_ops *ops = slot->hotplug->ops;
+	struct hotplug_slot_ops *ops = pci_slot->hotplug->ops;
 	unsigned long lattention;
 	u8 attention;
 	int retval = 0;
@@ -169,7 +169,7 @@
 		goto exit;
 	}
 	if (ops->set_attention_status)
-		retval = ops->set_attention_status(slot->hotplug, attention);
+		retval = ops->set_attention_status(pci_slot->hotplug, attention);
 	module_put(ops->owner);
 
 exit:
@@ -184,12 +184,12 @@
 	.store = attention_write_file
 };
 
-static ssize_t latch_read_file(struct pci_slot *slot, char *buf)
+static ssize_t latch_read_file(struct pci_slot *pci_slot, char *buf)
 {
 	int retval;
 	u8 value;
 
-	retval = get_latch_status(slot->hotplug, &value);
+	retval = get_latch_status(pci_slot->hotplug, &value);
 	if (retval)
 		return retval;
 
@@ -201,12 +201,12 @@
 	.show = latch_read_file,
 };
 
-static ssize_t presence_read_file(struct pci_slot *slot, char *buf)
+static ssize_t presence_read_file(struct pci_slot *pci_slot, char *buf)
 {
 	int retval;
 	u8 value;
 
-	retval = get_adapter_status(slot->hotplug, &value);
+	retval = get_adapter_status(pci_slot->hotplug, &value);
 	if (retval)
 		return retval;
 
@@ -307,43 +307,43 @@
 	return false;
 }
 
-static int fs_add_slot(struct pci_slot *slot)
+static int fs_add_slot(struct pci_slot *pci_slot)
 {
 	int retval = 0;
 
 	/* Create symbolic link to the hotplug driver module */
-	pci_hp_create_module_link(slot);
+	pci_hp_create_module_link(pci_slot);
 
-	if (has_power_file(slot)) {
-		retval = sysfs_create_file(&slot->kobj,
+	if (has_power_file(pci_slot)) {
+		retval = sysfs_create_file(&pci_slot->kobj,
 					   &hotplug_slot_attr_power.attr);
 		if (retval)
 			goto exit_power;
 	}
 
-	if (has_attention_file(slot)) {
-		retval = sysfs_create_file(&slot->kobj,
+	if (has_attention_file(pci_slot)) {
+		retval = sysfs_create_file(&pci_slot->kobj,
 					   &hotplug_slot_attr_attention.attr);
 		if (retval)
 			goto exit_attention;
 	}
 
-	if (has_latch_file(slot)) {
-		retval = sysfs_create_file(&slot->kobj,
+	if (has_latch_file(pci_slot)) {
+		retval = sysfs_create_file(&pci_slot->kobj,
 					   &hotplug_slot_attr_latch.attr);
 		if (retval)
 			goto exit_latch;
 	}
 
-	if (has_adapter_file(slot)) {
-		retval = sysfs_create_file(&slot->kobj,
+	if (has_adapter_file(pci_slot)) {
+		retval = sysfs_create_file(&pci_slot->kobj,
 					   &hotplug_slot_attr_presence.attr);
 		if (retval)
 			goto exit_adapter;
 	}
 
-	if (has_test_file(slot)) {
-		retval = sysfs_create_file(&slot->kobj,
+	if (has_test_file(pci_slot)) {
+		retval = sysfs_create_file(&pci_slot->kobj,
 					   &hotplug_slot_attr_test.attr);
 		if (retval)
 			goto exit_test;
@@ -352,45 +352,45 @@
 	goto exit;
 
 exit_test:
-	if (has_adapter_file(slot))
-		sysfs_remove_file(&slot->kobj,
+	if (has_adapter_file(pci_slot))
+		sysfs_remove_file(&pci_slot->kobj,
 				  &hotplug_slot_attr_presence.attr);
 exit_adapter:
-	if (has_latch_file(slot))
-		sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
+	if (has_latch_file(pci_slot))
+		sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
 exit_latch:
-	if (has_attention_file(slot))
-		sysfs_remove_file(&slot->kobj,
+	if (has_attention_file(pci_slot))
+		sysfs_remove_file(&pci_slot->kobj,
 				  &hotplug_slot_attr_attention.attr);
 exit_attention:
-	if (has_power_file(slot))
-		sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
+	if (has_power_file(pci_slot))
+		sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
 exit_power:
-	pci_hp_remove_module_link(slot);
+	pci_hp_remove_module_link(pci_slot);
 exit:
 	return retval;
 }
 
-static void fs_remove_slot(struct pci_slot *slot)
+static void fs_remove_slot(struct pci_slot *pci_slot)
 {
-	if (has_power_file(slot))
-		sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
+	if (has_power_file(pci_slot))
+		sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_power.attr);
 
-	if (has_attention_file(slot))
-		sysfs_remove_file(&slot->kobj,
+	if (has_attention_file(pci_slot))
+		sysfs_remove_file(&pci_slot->kobj,
 				  &hotplug_slot_attr_attention.attr);
 
-	if (has_latch_file(slot))
-		sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
+	if (has_latch_file(pci_slot))
+		sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_latch.attr);
 
-	if (has_adapter_file(slot))
-		sysfs_remove_file(&slot->kobj,
+	if (has_adapter_file(pci_slot))
+		sysfs_remove_file(&pci_slot->kobj,
 				  &hotplug_slot_attr_presence.attr);
 
-	if (has_test_file(slot))
-		sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
+	if (has_test_file(pci_slot))
+		sysfs_remove_file(&pci_slot->kobj, &hotplug_slot_attr_test.attr);
 
-	pci_hp_remove_module_link(slot);
+	pci_hp_remove_module_link(pci_slot);
 }
 
 static struct hotplug_slot *get_slot_from_name(const char *name)
@@ -467,37 +467,37 @@
 
 /**
  * pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem
- * @hotplug: pointer to the &struct hotplug_slot to deregister
+ * @slot: pointer to the &struct hotplug_slot to deregister
  *
  * The @slot must have been registered with the pci hotplug subsystem
  * previously with a call to pci_hp_register().
  *
  * Returns 0 if successful, anything else for an error.
  */
-int pci_hp_deregister(struct hotplug_slot *hotplug)
+int pci_hp_deregister(struct hotplug_slot *slot)
 {
 	struct hotplug_slot *temp;
-	struct pci_slot *slot;
+	struct pci_slot *pci_slot;
 
-	if (!hotplug)
+	if (!slot)
 		return -ENODEV;
 
 	mutex_lock(&pci_hp_mutex);
-	temp = get_slot_from_name(hotplug_slot_name(hotplug));
-	if (temp != hotplug) {
+	temp = get_slot_from_name(hotplug_slot_name(slot));
+	if (temp != slot) {
 		mutex_unlock(&pci_hp_mutex);
 		return -ENODEV;
 	}
 
-	list_del(&hotplug->slot_list);
+	list_del(&slot->slot_list);
 
-	slot = hotplug->pci_slot;
-	fs_remove_slot(slot);
-	dbg("Removed slot %s from the list\n", hotplug_slot_name(hotplug));
+	pci_slot = slot->pci_slot;
+	fs_remove_slot(pci_slot);
+	dbg("Removed slot %s from the list\n", hotplug_slot_name(slot));
 
-	hotplug->release(hotplug);
-	slot->hotplug = NULL;
-	pci_destroy_slot(slot);
+	slot->release(slot);
+	pci_slot->hotplug = NULL;
+	pci_destroy_slot(pci_slot);
 	mutex_unlock(&pci_hp_mutex);
 
 	return 0;
@@ -506,7 +506,7 @@
 
 /**
  * pci_hp_change_slot_info - changes the slot's information structure in the core
- * @hotplug: pointer to the slot whose info has changed
+ * @slot: pointer to the slot whose info has changed
  * @info: pointer to the info copy into the slot's info structure
  *
  * @slot must have been registered with the pci
@@ -514,13 +514,13 @@
  *
  * Returns 0 if successful, anything else for an error.
  */
-int pci_hp_change_slot_info(struct hotplug_slot *hotplug,
+int pci_hp_change_slot_info(struct hotplug_slot *slot,
 			    struct hotplug_slot_info *info)
 {
-	if (!hotplug || !info)
+	if (!slot || !info)
 		return -ENODEV;
 
-	memcpy(hotplug->info, info, sizeof(struct hotplug_slot_info));
+	memcpy(slot->info, info, sizeof(struct hotplug_slot_info));
 
 	return 0;
 }
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 57cd132..62d6fe6 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -101,18 +101,12 @@
 	unsigned int power_fault_detected;
 };
 
-#define INT_BUTTON_IGNORE		0
 #define INT_PRESENCE_ON			1
 #define INT_PRESENCE_OFF		2
-#define INT_SWITCH_CLOSE		3
-#define INT_SWITCH_OPEN			4
-#define INT_POWER_FAULT			5
-#define INT_POWER_FAULT_CLEAR		6
-#define INT_BUTTON_PRESS		7
-#define INT_BUTTON_RELEASE		8
-#define INT_BUTTON_CANCEL		9
-#define INT_LINK_UP			10
-#define INT_LINK_DOWN			11
+#define INT_POWER_FAULT			3
+#define INT_BUTTON_PRESS		4
+#define INT_LINK_UP			5
+#define INT_LINK_DOWN			6
 
 #define STATIC_STATE			0
 #define BLINKINGON_STATE		1
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 2913f7e..5c24e93 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -109,21 +109,23 @@
 	struct pci_dev *pdev = ctrl_dev(ctrl);
 	u16 slot_status;
 
-	pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
-	if (slot_status & PCI_EXP_SLTSTA_CC) {
-		pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
-					   PCI_EXP_SLTSTA_CC);
-		return 1;
-	}
-	while (timeout > 0) {
-		msleep(10);
-		timeout -= 10;
+	while (true) {
 		pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+		if (slot_status == (u16) ~0) {
+			ctrl_info(ctrl, "%s: no response from device\n",
+				  __func__);
+			return 0;
+		}
+
 		if (slot_status & PCI_EXP_SLTSTA_CC) {
 			pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
 						   PCI_EXP_SLTSTA_CC);
 			return 1;
 		}
+		if (timeout < 0)
+			break;
+		msleep(10);
+		timeout -= 10;
 	}
 	return 0;	/* timeout */
 }
@@ -190,6 +192,11 @@
 	pcie_wait_cmd(ctrl);
 
 	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
+	if (slot_ctrl == (u16) ~0) {
+		ctrl_info(ctrl, "%s: no response from device\n", __func__);
+		goto out;
+	}
+
 	slot_ctrl &= ~mask;
 	slot_ctrl |= (cmd & mask);
 	ctrl->cmd_busy = 1;
@@ -205,6 +212,7 @@
 	if (wait)
 		pcie_wait_cmd(ctrl);
 
+out:
 	mutex_unlock(&ctrl->ctrl_lock);
 }
 
@@ -535,7 +543,7 @@
 	struct pci_dev *dev;
 	struct slot *slot = ctrl->slot;
 	u16 detected, intr_loc;
-	u8 open, present;
+	u8 present;
 	bool link;
 
 	/*
@@ -546,9 +554,14 @@
 	intr_loc = 0;
 	do {
 		pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &detected);
+		if (detected == (u16) ~0) {
+			ctrl_info(ctrl, "%s: no response from device\n",
+				  __func__);
+			return IRQ_HANDLED;
+		}
 
 		detected &= (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
-			     PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
+			     PCI_EXP_SLTSTA_PDC |
 			     PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
 		detected &= ~intr_loc;
 		intr_loc |= detected;
@@ -581,15 +594,6 @@
 	if (!(intr_loc & ~PCI_EXP_SLTSTA_CC))
 		return IRQ_HANDLED;
 
-	/* Check MRL Sensor Changed */
-	if (intr_loc & PCI_EXP_SLTSTA_MRLSC) {
-		pciehp_get_latch_status(slot, &open);
-		ctrl_info(ctrl, "Latch %s on Slot(%s)\n",
-			  open ? "open" : "close", slot_name(slot));
-		pciehp_queue_interrupt_event(slot, open ? INT_SWITCH_OPEN :
-					     INT_SWITCH_CLOSE);
-	}
-
 	/* Check Attention Button Pressed */
 	if (intr_loc & PCI_EXP_SLTSTA_ABP) {
 		ctrl_info(ctrl, "Button pressed on Slot(%s)\n",
@@ -649,13 +653,11 @@
 		cmd |= PCI_EXP_SLTCTL_ABPE;
 	else
 		cmd |= PCI_EXP_SLTCTL_PDCE;
-	if (MRL_SENS(ctrl))
-		cmd |= PCI_EXP_SLTCTL_MRLSCE;
 	if (!pciehp_poll_mode)
 		cmd |= PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE;
 
 	mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
-		PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
+		PCI_EXP_SLTCTL_PFDE |
 		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
 		PCI_EXP_SLTCTL_DLLSCE);
 
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index f66be86..d449714 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -39,14 +39,13 @@
 
 static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev)
 {
-	struct irq_domain *domain = NULL;
+	struct irq_domain *domain;
 
-	if (dev->bus->msi)
-		domain = dev->bus->msi->domain;
-	if (!domain)
-		domain = arch_get_pci_msi_domain(dev);
+	domain = dev_get_msi_domain(&dev->dev);
+	if (domain)
+		return domain;
 
-	return domain;
+	return arch_get_pci_msi_domain(dev);
 }
 
 static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
@@ -77,24 +76,9 @@
 
 /* Arch hooks */
 
-struct msi_controller * __weak pcibios_msi_controller(struct pci_dev *dev)
-{
-	return NULL;
-}
-
-static struct msi_controller *pci_msi_controller(struct pci_dev *dev)
-{
-	struct msi_controller *msi_ctrl = dev->bus->msi;
-
-	if (msi_ctrl)
-		return msi_ctrl;
-
-	return pcibios_msi_controller(dev);
-}
-
 int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
 {
-	struct msi_controller *chip = pci_msi_controller(dev);
+	struct msi_controller *chip = dev->bus->msi;
 	int err;
 
 	if (!chip || !chip->setup_irq)
@@ -131,7 +115,7 @@
 	if (type == PCI_CAP_ID_MSI && nvec > 1)
 		return 1;
 
-	list_for_each_entry(entry, &dev->msi_list, list) {
+	for_each_pci_msi_entry(entry, dev) {
 		ret = arch_setup_msi_irq(dev, entry);
 		if (ret < 0)
 			return ret;
@@ -151,7 +135,7 @@
 	int i;
 	struct msi_desc *entry;
 
-	list_for_each_entry(entry, &dev->msi_list, list)
+	for_each_pci_msi_entry(entry, dev)
 		if (entry->irq)
 			for (i = 0; i < entry->nvec_used; i++)
 				arch_teardown_msi_irq(entry->irq + i);
@@ -168,7 +152,7 @@
 
 	entry = NULL;
 	if (dev->msix_enabled) {
-		list_for_each_entry(entry, &dev->msi_list, list) {
+		for_each_pci_msi_entry(entry, dev) {
 			if (irq == entry->irq)
 				break;
 		}
@@ -208,7 +192,8 @@
 
 	mask_bits &= ~mask;
 	mask_bits |= flag;
-	pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits);
+	pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
+			       mask_bits);
 
 	return mask_bits;
 }
@@ -249,7 +234,7 @@
 
 static void msi_set_mask_bit(struct irq_data *data, u32 flag)
 {
-	struct msi_desc *desc = irq_data_get_msi(data);
+	struct msi_desc *desc = irq_data_get_msi_desc(data);
 
 	if (desc->msi_attrib.is_msix) {
 		msix_mask_irq(desc, flag);
@@ -282,13 +267,15 @@
 {
 	struct msi_desc *entry;
 
-	list_for_each_entry(entry, &dev->msi_list, list)
+	for_each_pci_msi_entry(entry, dev)
 		default_restore_msi_irq(dev, entry->irq);
 }
 
 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
 {
-	BUG_ON(entry->dev->current_state != PCI_D0);
+	struct pci_dev *dev = msi_desc_to_pci_dev(entry);
+
+	BUG_ON(dev->current_state != PCI_D0);
 
 	if (entry->msi_attrib.is_msix) {
 		void __iomem *base = entry->mask_base +
@@ -298,7 +285,6 @@
 		msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
 		msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
 	} else {
-		struct pci_dev *dev = entry->dev;
 		int pos = dev->msi_cap;
 		u16 data;
 
@@ -318,7 +304,9 @@
 
 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
 {
-	if (entry->dev->current_state != PCI_D0) {
+	struct pci_dev *dev = msi_desc_to_pci_dev(entry);
+
+	if (dev->current_state != PCI_D0) {
 		/* Don't touch the hardware now */
 	} else if (entry->msi_attrib.is_msix) {
 		void __iomem *base;
@@ -329,7 +317,6 @@
 		writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
 		writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
 	} else {
-		struct pci_dev *dev = entry->dev;
 		int pos = dev->msi_cap;
 		u16 msgctl;
 
@@ -363,21 +350,22 @@
 
 static void free_msi_irqs(struct pci_dev *dev)
 {
+	struct list_head *msi_list = dev_to_msi_list(&dev->dev);
 	struct msi_desc *entry, *tmp;
 	struct attribute **msi_attrs;
 	struct device_attribute *dev_attr;
 	int i, count = 0;
 
-	list_for_each_entry(entry, &dev->msi_list, list)
+	for_each_pci_msi_entry(entry, dev)
 		if (entry->irq)
 			for (i = 0; i < entry->nvec_used; i++)
 				BUG_ON(irq_has_action(entry->irq + i));
 
 	pci_msi_teardown_msi_irqs(dev);
 
-	list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
+	list_for_each_entry_safe(entry, tmp, msi_list, list) {
 		if (entry->msi_attrib.is_msix) {
-			if (list_is_last(&entry->list, &dev->msi_list))
+			if (list_is_last(&entry->list, msi_list))
 				iounmap(entry->mask_base);
 		}
 
@@ -402,18 +390,6 @@
 	}
 }
 
-static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
-{
-	struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
-	if (!desc)
-		return NULL;
-
-	INIT_LIST_HEAD(&desc->list);
-	desc->dev = dev;
-
-	return desc;
-}
-
 static void pci_intx_for_msi(struct pci_dev *dev, int enable)
 {
 	if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
@@ -448,7 +424,7 @@
 
 	if (!dev->msix_enabled)
 		return;
-	BUG_ON(list_empty(&dev->msi_list));
+	BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
 
 	/* route the table */
 	pci_intx_for_msi(dev, 0);
@@ -456,7 +432,7 @@
 				PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
 
 	arch_restore_msi_irqs(dev);
-	list_for_each_entry(entry, &dev->msi_list, list)
+	for_each_pci_msi_entry(entry, dev)
 		msix_mask_irq(entry, entry->masked);
 
 	pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
@@ -501,7 +477,7 @@
 	int count = 0;
 
 	/* Determine how many msi entries we have */
-	list_for_each_entry(entry, &pdev->msi_list, list)
+	for_each_pci_msi_entry(entry, pdev)
 		++num_msi;
 	if (!num_msi)
 		return 0;
@@ -510,7 +486,7 @@
 	msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL);
 	if (!msi_attrs)
 		return -ENOMEM;
-	list_for_each_entry(entry, &pdev->msi_list, list) {
+	for_each_pci_msi_entry(entry, pdev) {
 		msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
 		if (!msi_dev_attr)
 			goto error_attrs;
@@ -568,7 +544,7 @@
 	struct msi_desc *entry;
 
 	/* MSI Entry Initialization */
-	entry = alloc_msi_entry(dev);
+	entry = alloc_msi_entry(&dev->dev);
 	if (!entry)
 		return NULL;
 
@@ -599,7 +575,7 @@
 {
 	struct msi_desc *entry;
 
-	list_for_each_entry(entry, &dev->msi_list, list) {
+	for_each_pci_msi_entry(entry, dev) {
 		if (!dev->no_64bit_msi || !entry->msg.address_hi)
 			continue;
 		dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
@@ -636,7 +612,7 @@
 	mask = msi_mask(entry->msi_attrib.multi_cap);
 	msi_mask_irq(entry, mask, mask);
 
-	list_add_tail(&entry->list, &dev->msi_list);
+	list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
 
 	/* Configure MSI capability structure */
 	ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
@@ -665,6 +641,7 @@
 	pci_msi_set_enable(dev, 1);
 	dev->msi_enabled = 1;
 
+	pcibios_free_irq(dev);
 	dev->irq = entry->irq;
 	return 0;
 }
@@ -696,7 +673,7 @@
 	int i;
 
 	for (i = 0; i < nvec; i++) {
-		entry = alloc_msi_entry(dev);
+		entry = alloc_msi_entry(&dev->dev);
 		if (!entry) {
 			if (!i)
 				iounmap(base);
@@ -713,7 +690,7 @@
 		entry->mask_base		= base;
 		entry->nvec_used		= 1;
 
-		list_add_tail(&entry->list, &dev->msi_list);
+		list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
 	}
 
 	return 0;
@@ -725,7 +702,7 @@
 	struct msi_desc *entry;
 	int i = 0;
 
-	list_for_each_entry(entry, &dev->msi_list, list) {
+	for_each_pci_msi_entry(entry, dev) {
 		int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
 						PCI_MSIX_ENTRY_VECTOR_CTRL;
 
@@ -792,9 +769,9 @@
 	/* Set MSI-X enabled bits and unmask the function */
 	pci_intx_for_msi(dev, 0);
 	dev->msix_enabled = 1;
-
 	pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
 
+	pcibios_free_irq(dev);
 	return 0;
 
 out_avail:
@@ -806,7 +783,7 @@
 		struct msi_desc *entry;
 		int avail = 0;
 
-		list_for_each_entry(entry, &dev->msi_list, list) {
+		for_each_pci_msi_entry(entry, dev) {
 			if (entry->irq != 0)
 				avail++;
 		}
@@ -895,8 +872,8 @@
 	if (!pci_msi_enable || !dev || !dev->msi_enabled)
 		return;
 
-	BUG_ON(list_empty(&dev->msi_list));
-	desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
+	BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
+	desc = first_pci_msi_entry(dev);
 
 	pci_msi_set_enable(dev, 0);
 	pci_intx_for_msi(dev, 1);
@@ -909,6 +886,7 @@
 
 	/* Restore dev->irq to its default pin-assertion irq */
 	dev->irq = desc->msi_attrib.default_irq;
+	pcibios_alloc_irq(dev);
 }
 
 void pci_disable_msi(struct pci_dev *dev)
@@ -1001,7 +979,7 @@
 		return;
 
 	/* Return the device with MSI-X masked as initial states */
-	list_for_each_entry(entry, &dev->msi_list, list) {
+	for_each_pci_msi_entry(entry, dev) {
 		/* Keep cached states to be restored */
 		__pci_msix_desc_mask_irq(entry, 1);
 	}
@@ -1009,6 +987,7 @@
 	pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
 	pci_intx_for_msi(dev, 1);
 	dev->msix_enabled = 0;
+	pcibios_alloc_irq(dev);
 }
 
 void pci_disable_msix(struct pci_dev *dev)
@@ -1040,7 +1019,6 @@
 
 void pci_msi_init_pci_dev(struct pci_dev *dev)
 {
-	INIT_LIST_HEAD(&dev->msi_list);
 }
 
 /**
@@ -1137,6 +1115,19 @@
 }
 EXPORT_SYMBOL(pci_enable_msix_range);
 
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
+{
+	return to_pci_dev(desc->dev);
+}
+
+void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
+{
+	struct pci_dev *dev = msi_desc_to_pci_dev(desc);
+
+	return dev->bus->sysdata;
+}
+EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata);
+
 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
 /**
  * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
@@ -1145,7 +1136,7 @@
  */
 void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
 {
-	struct msi_desc *desc = irq_data->msi_desc;
+	struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
 
 	/*
 	 * For MSI-X desc->irq is always equal to irq_data->irq. For
@@ -1269,12 +1260,19 @@
 					     struct msi_domain_info *info,
 					     struct irq_domain *parent)
 {
+	struct irq_domain *domain;
+
 	if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
 		pci_msi_domain_update_dom_ops(info);
 	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
 		pci_msi_domain_update_chip_ops(info);
 
-	return msi_create_irq_domain(node, info, parent);
+	domain = msi_create_irq_domain(node, info, parent);
+	if (!domain)
+		return NULL;
+
+	domain->bus_token = DOMAIN_BUS_PCI_MSI;
+	return domain;
 }
 
 /**
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index f092993..2e99a50 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -9,6 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#include <linux/irqdomain.h>
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/of.h>
@@ -59,3 +60,32 @@
 		return of_node_get(bus->bridge->parent->of_node);
 	return NULL;
 }
+
+struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
+{
+#ifdef CONFIG_IRQ_DOMAIN
+	struct device_node *np;
+	struct irq_domain *d;
+
+	if (!bus->dev.of_node)
+		return NULL;
+
+	/* Start looking for a phandle to an MSI controller. */
+	np = of_parse_phandle(bus->dev.of_node, "msi-parent", 0);
+
+	/*
+	 * If we don't have an msi-parent property, look for a domain
+	 * directly attached to the host bridge.
+	 */
+	if (!np)
+		np = bus->dev.of_node;
+
+	d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
+	if (d)
+		return d;
+
+	return irq_find_host(np);
+#else
+	return NULL;
+#endif
+}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 314a625..a32ba75 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -594,7 +594,7 @@
 /**
  * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
  * @pdev: the PCI device whose delay is to be updated
- * @adev: the companion ACPI device of this PCI device
+ * @handle: ACPI handle of this device
  *
  * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM
  * control method of either the device itself or the PCI host bridge.
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 3cb2210..52a880c 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -388,18 +388,31 @@
 	return error;
 }
 
+int __weak pcibios_alloc_irq(struct pci_dev *dev)
+{
+	return 0;
+}
+
+void __weak pcibios_free_irq(struct pci_dev *dev)
+{
+}
+
 static int pci_device_probe(struct device *dev)
 {
-	int error = 0;
-	struct pci_driver *drv;
-	struct pci_dev *pci_dev;
+	int error;
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct pci_driver *drv = to_pci_driver(dev->driver);
 
-	drv = to_pci_driver(dev->driver);
-	pci_dev = to_pci_dev(dev);
+	error = pcibios_alloc_irq(pci_dev);
+	if (error < 0)
+		return error;
+
 	pci_dev_get(pci_dev);
 	error = __pci_device_probe(drv, pci_dev);
-	if (error)
+	if (error) {
+		pcibios_free_irq(pci_dev);
 		pci_dev_put(pci_dev);
+	}
 
 	return error;
 }
@@ -415,6 +428,7 @@
 			drv->remove(pci_dev);
 			pm_runtime_put_noidle(dev);
 		}
+		pcibios_free_irq(pci_dev);
 		pci_dev->driver = NULL;
 	}
 
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0008c95..6a9a111 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -81,7 +81,7 @@
 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
 
-enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
 
 /*
  * The default CLS is used if arch didn't set CLS explicitly and not
@@ -138,9 +138,22 @@
 	return ioremap_nocache(res->start, resource_size(res));
 }
 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
+
+void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
+{
+	/*
+	 * Make sure the BAR is actually a memory resource, not an IO resource
+	 */
+	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
+		WARN_ON(1);
+		return NULL;
+	}
+	return ioremap_wc(pci_resource_start(pdev, bar),
+			  pci_resource_len(pdev, bar));
+}
+EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
 #endif
 
-#define PCI_FIND_CAP_TTL	48
 
 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 				   u8 pos, int cap, int *ttl)
@@ -196,8 +209,6 @@
 		return PCI_CAPABILITY_LIST;
 	case PCI_HEADER_TYPE_CARDBUS:
 		return PCI_CB_CAPABILITY_LIST;
-	default:
-		return 0;
 	}
 
 	return 0;
@@ -972,7 +983,7 @@
 	struct pci_cap_saved_state *save_state;
 
 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
-	if (pos <= 0)
+	if (!pos)
 		return 0;
 
 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
@@ -995,7 +1006,7 @@
 
 	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
-	if (!save_state || pos <= 0)
+	if (!save_state || !pos)
 		return;
 	cap = (u16 *)&save_state->cap.data[0];
 
@@ -1092,6 +1103,9 @@
 
 	pci_restore_pcix_state(dev);
 	pci_restore_msi_state(dev);
+
+	/* Restore ACS and IOV configuration state */
+	pci_enable_acs(dev);
 	pci_restore_iov_state(dev);
 
 	dev->state_saved = false;
@@ -2159,7 +2173,7 @@
 	else
 		pos = pci_find_capability(dev, cap);
 
-	if (pos <= 0)
+	if (!pos)
 		return 0;
 
 	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 4ff0ff1..24ba9dc 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -4,6 +4,8 @@
 #define PCI_CFG_SPACE_SIZE	256
 #define PCI_CFG_SPACE_EXP_SIZE	4096
 
+#define PCI_FIND_CAP_TTL	48
+
 extern const unsigned char pcie_link_speed[];
 
 bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 2f0ce66..88122dc 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -448,7 +448,7 @@
 }
 
 /**
- * pcie_port_device_suspend - resume port services associated with a PCIe port
+ * pcie_port_device_resume - resume port services associated with a PCIe port
  * @dev: PCI Express port to handle
  */
 int pcie_port_device_resume(struct device *dev)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index b978bbf..8177f3b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -661,6 +661,35 @@
 	}
 }
 
+static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
+{
+	struct irq_domain *d;
+
+	/*
+	 * Any firmware interface that can resolve the msi_domain
+	 * should be called from here.
+	 */
+	d = pci_host_bridge_of_msi_domain(bus);
+
+	return d;
+}
+
+static void pci_set_bus_msi_domain(struct pci_bus *bus)
+{
+	struct irq_domain *d;
+
+	/*
+	 * Either bus is the root, and we must obtain it from the
+	 * firmware, or we inherit it from the bridge device.
+	 */
+	if (pci_is_root_bus(bus))
+		d = pci_host_bridge_msi_domain(bus);
+	else
+		d = dev_get_msi_domain(&bus->self->dev);
+
+	dev_set_msi_domain(&bus->dev, d);
+}
+
 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
 					   struct pci_dev *bridge, int busnr)
 {
@@ -714,6 +743,7 @@
 	bridge->subordinate = child;
 
 add_dev:
+	pci_set_bus_msi_domain(child);
 	ret = device_register(&child->dev);
 	WARN_ON(ret < 0);
 
@@ -826,6 +856,9 @@
 			child->bridge_ctl = bctl;
 		}
 
+		/* Read and initialize bridge resources */
+		pci_read_bridge_bases(child);
+
 		cmax = pci_scan_child_bus(child);
 		if (cmax > subordinate)
 			dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
@@ -886,6 +919,9 @@
 
 		if (!is_cardbus) {
 			child->bridge_ctl = bctl;
+
+			/* Read and initialize bridge resources */
+			pci_read_bridge_bases(child);
 			max = pci_scan_child_bus(child);
 		} else {
 			/*
@@ -1108,7 +1144,7 @@
 
 #define LEGACY_IO_RESOURCE	(IORESOURCE_IO | IORESOURCE_PCI_FIXED)
 
-static void pci_msi_setup_pci_dev(struct pci_dev *dev)
+void pci_msi_setup_pci_dev(struct pci_dev *dev)
 {
 	/*
 	 * Disable the MSI hardware to avoid screaming interrupts
@@ -1138,7 +1174,6 @@
 {
 	u32 class;
 	u8 hdr_type;
-	struct pci_slot *slot;
 	int pos = 0;
 	struct pci_bus_region region;
 	struct resource *res;
@@ -1154,10 +1189,7 @@
 	dev->error_state = pci_channel_io_normal;
 	set_pcie_port_type(dev);
 
-	list_for_each_entry(slot, &dev->bus->slots, list)
-		if (PCI_SLOT(dev->devfn) == slot->number)
-			dev->slot = slot;
-
+	pci_dev_assign_slot(dev);
 	/* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
 	   set this higher, assuming the system even supports it.  */
 	dev->dma_mask = 0xffffffff;
@@ -1273,13 +1305,51 @@
 	bad:
 		dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
 			dev->class, dev->hdr_type);
-		dev->class = PCI_CLASS_NOT_DEFINED;
+		dev->class = PCI_CLASS_NOT_DEFINED << 8;
 	}
 
 	/* We found a fine healthy device, go go go... */
 	return 0;
 }
 
+static void pci_configure_mps(struct pci_dev *dev)
+{
+	struct pci_dev *bridge = pci_upstream_bridge(dev);
+	int mps, p_mps, rc;
+
+	if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
+		return;
+
+	mps = pcie_get_mps(dev);
+	p_mps = pcie_get_mps(bridge);
+
+	if (mps == p_mps)
+		return;
+
+	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
+		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+			 mps, pci_name(bridge), p_mps);
+		return;
+	}
+
+	/*
+	 * Fancier MPS configuration is done later by
+	 * pcie_bus_configure_settings()
+	 */
+	if (pcie_bus_config != PCIE_BUS_DEFAULT)
+		return;
+
+	rc = pcie_set_mps(dev, p_mps);
+	if (rc) {
+		dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+			 p_mps);
+		return;
+	}
+
+	dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
+		 p_mps, mps, 128 << dev->pcie_mpss);
+}
+
 static struct hpp_type0 pci_default_type0 = {
 	.revision = 1,
 	.cache_line_size = 8,
@@ -1401,6 +1471,8 @@
 	struct hotplug_params hpp;
 	int ret;
 
+	pci_configure_mps(dev);
+
 	memset(&hpp, 0, sizeof(hpp));
 	ret = pci_get_hp_params(dev, &hpp);
 	if (ret)
@@ -1545,10 +1617,24 @@
 	/* Single Root I/O Virtualization */
 	pci_iov_init(dev);
 
+	/* Address Translation Services */
+	pci_ats_init(dev);
+
 	/* Enable ACS P2P upstream forwarding */
 	pci_enable_acs(dev);
 }
 
+static void pci_set_msi_domain(struct pci_dev *dev)
+{
+	/*
+	 * If no domain has been set through the pcibios_add_device
+	 * callback, inherit the default from the bus device.
+	 */
+	if (!dev_get_msi_domain(&dev->dev))
+		dev_set_msi_domain(&dev->dev,
+				   dev_get_msi_domain(&dev->bus->dev));
+}
+
 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
 {
 	int ret;
@@ -1590,6 +1676,9 @@
 	ret = pcibios_add_device(dev);
 	WARN_ON(ret < 0);
 
+	/* Setup MSI irq domain */
+	pci_set_msi_domain(dev);
+
 	/* Notifier could use PCI capabilities */
 	dev->match_driver = false;
 	ret = device_add(&dev->dev);
@@ -1796,22 +1885,6 @@
 		dev_err(&dev->dev, "MRRS was unable to be configured with a safe value.  If problems are experienced, try running with pci=pcie_bus_safe\n");
 }
 
-static void pcie_bus_detect_mps(struct pci_dev *dev)
-{
-	struct pci_dev *bridge = dev->bus->self;
-	int mps, p_mps;
-
-	if (!bridge)
-		return;
-
-	mps = pcie_get_mps(dev);
-	p_mps = pcie_get_mps(bridge);
-
-	if (mps != p_mps)
-		dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
-			 mps, pci_name(bridge), p_mps);
-}
-
 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
 {
 	int mps, orig_mps;
@@ -1819,10 +1892,9 @@
 	if (!pci_is_pcie(dev))
 		return 0;
 
-	if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
-		pcie_bus_detect_mps(dev);
+	if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
+	    pcie_bus_config == PCIE_BUS_DEFAULT)
 		return 0;
-	}
 
 	mps = 128 << *(u8 *)data;
 	orig_mps = pcie_get_mps(dev);
@@ -1980,6 +2052,7 @@
 	b->bridge = get_device(&bridge->dev);
 	device_enable_async_suspend(b->bridge);
 	pci_set_bus_of_node(b);
+	pci_set_bus_msi_domain(b);
 
 	if (!parent)
 		set_dev_node(b->bridge, pcibus_to_node(b));
@@ -2101,8 +2174,9 @@
 			res, ret ? "can not be" : "is");
 }
 
-struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
-		struct pci_ops *ops, void *sysdata, struct list_head *resources)
+struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
+		struct pci_ops *ops, void *sysdata,
+		struct list_head *resources, struct msi_controller *msi)
 {
 	struct resource_entry *window;
 	bool found = false;
@@ -2119,6 +2193,8 @@
 	if (!b)
 		return NULL;
 
+	b->msi = msi;
+
 	if (!found) {
 		dev_info(&b->dev,
 		 "No busn resource found for root bus, will use [bus %02x-ff]\n",
@@ -2133,6 +2209,13 @@
 
 	return b;
 }
+
+struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
+		struct pci_ops *ops, void *sysdata, struct list_head *resources)
+{
+	return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
+				     NULL);
+}
 EXPORT_SYMBOL(pci_scan_root_bus);
 
 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e9fd0e9..6a30252 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -163,7 +163,6 @@
  *	VIA Apollo KT133 needs PCI latency patch
  *	Made according to a windows driver based patch by George E. Breese
  *	see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
- *	and http://www.georgebreese.com/net/software/#PCI
  *	Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
  *	the info on which Mr Breese based his work.
  *
@@ -424,10 +423,12 @@
  */
 static void quirk_amd_nl_class(struct pci_dev *pdev)
 {
-	/*
-	 * Use 'USB Device' (0x0c03fe) instead of PCI header provided
-	 */
-	pdev->class = 0x0c03fe;
+	u32 class = pdev->class;
+
+	/* Use "USB Device (not host controller)" class */
+	pdev->class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe;
+	dev_info(&pdev->dev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
+		 class, pdev->class);
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
 		quirk_amd_nl_class);
@@ -1569,6 +1570,18 @@
 
 #endif
 
+static void quirk_jmicron_async_suspend(struct pci_dev *dev)
+{
+	if (dev->multifunction) {
+		device_disable_async_suspend(&dev->dev);
+		dev_info(&dev->dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
+	}
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend);
+
 #ifdef CONFIG_X86_IO_APIC
 static void quirk_alder_ioapic(struct pci_dev *pdev)
 {
@@ -1894,6 +1907,15 @@
 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
 			 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
 
+static void quirk_f0_vpd_link(struct pci_dev *dev)
+{
+	if (!dev->multifunction || !PCI_FUNC(dev->devfn))
+		return;
+	dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+			      PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
+
 static void quirk_e100_interrupt(struct pci_dev *dev)
 {
 	u16 command, pmcsr;
@@ -1986,14 +2008,18 @@
 
 static void fixup_rev1_53c810(struct pci_dev *dev)
 {
-	/* rev 1 ncr53c810 chips don't set the class at all which means
+	u32 class = dev->class;
+
+	/*
+	 * rev 1 ncr53c810 chips don't set the class at all which means
 	 * they don't get their resources remapped. Fix that here.
 	 */
+	if (class)
+		return;
 
-	if (dev->class == PCI_CLASS_NOT_DEFINED) {
-		dev_info(&dev->dev, "NCR 53c810 rev 1 detected; setting PCI class\n");
-		dev->class = PCI_CLASS_STORAGE_SCSI;
-	}
+	dev->class = PCI_CLASS_STORAGE_SCSI << 8;
+	dev_info(&dev->dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
+		 class, dev->class);
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
 
@@ -2241,7 +2267,7 @@
  * return 1 if a HT MSI capability is found and enabled */
 static int msi_ht_cap_enabled(struct pci_dev *dev)
 {
-	int pos, ttl = 48;
+	int pos, ttl = PCI_FIND_CAP_TTL;
 
 	pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
 	while (pos && ttl--) {
@@ -2300,7 +2326,7 @@
 /* Force enable MSI mapping capability on HT bridges */
 static void ht_enable_msi_mapping(struct pci_dev *dev)
 {
-	int pos, ttl = 48;
+	int pos, ttl = PCI_FIND_CAP_TTL;
 
 	pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
 	while (pos && ttl--) {
@@ -2379,7 +2405,7 @@
 
 static int ht_check_msi_mapping(struct pci_dev *dev)
 {
-	int pos, ttl = 48;
+	int pos, ttl = PCI_FIND_CAP_TTL;
 	int found = 0;
 
 	/* check if there is HT MSI cap or enabled on this device */
@@ -2504,7 +2530,7 @@
 
 static void ht_disable_msi_mapping(struct pci_dev *dev)
 {
-	int pos, ttl = 48;
+	int pos, ttl = PCI_FIND_CAP_TTL;
 
 	pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
 	while (pos && ttl--) {
@@ -2829,12 +2855,15 @@
 
 static void fixup_ti816x_class(struct pci_dev *dev)
 {
+	u32 class = dev->class;
+
 	/* TI 816x devices do not have class code set when in PCIe boot mode */
-	dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
-	dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
+	dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
+	dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n",
+		 class, dev->class);
 }
 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
-				 PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
+			      PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
 
 /* Some PCIe devices do not work reliably with the claimed maximum
  * payload size supported.
@@ -2862,7 +2891,8 @@
 	int err;
 	u16 rcc;
 
-	if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
+	if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
+	    pcie_bus_config == PCIE_BUS_DEFAULT)
 		return;
 
 	/* Intel errata specifies bits to change but does not say what they are.
@@ -3028,7 +3058,16 @@
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
-
+/* Intel Cherrytrail devices do not need 10ms d3_delay */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
 /*
  * Some devices may pass our check in pci_intx_mask_supported if
  * PCI_COMMAND_INTX_DISABLE works though they actually do not properly
@@ -3326,28 +3365,6 @@
  * reset a single function if other methods (e.g. FLR, PM D0->D3) are
  * not available.
  */
-static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
-{
-	int pos;
-
-	/* only implement PCI_CLASS_SERIAL_USB at present */
-	if (dev->class == PCI_CLASS_SERIAL_USB) {
-		pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
-		if (!pos)
-			return -ENOTTY;
-
-		if (probe)
-			return 0;
-
-		pci_write_config_byte(dev, pos + 0x4, 1);
-		msleep(100);
-
-		return 0;
-	} else {
-		return -ENOTTY;
-	}
-}
-
 static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
 {
 	/*
@@ -3506,8 +3523,6 @@
 		reset_ivb_igd },
 	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
 		reset_ivb_igd },
-	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
-		reset_intel_generic_dev },
 	{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
 		reset_chelsio_generic_dev },
 	{ 0 }
@@ -3655,6 +3670,28 @@
 DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
 
 /*
+ * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero)
+ * class code.  Fix it.
+ */
+static void quirk_tw686x_class(struct pci_dev *pdev)
+{
+	u32 class = pdev->class;
+
+	/* Use "Multimedia controller" class */
+	pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01;
+	dev_info(&pdev->dev, "TW686x PCI class overridden (%#08x -> %#08x)\n",
+		 class, pdev->class);
+}
+DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8,
+			      quirk_tw686x_class);
+DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8,
+			      quirk_tw686x_class);
+DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
+			      quirk_tw686x_class);
+DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
+			      quirk_tw686x_class);
+
+/*
  * AMD has indicated that the devices below do not support peer-to-peer
  * in any system where they are found in the southbridge with an AMD
  * IOMMU in the system.  Multifunction devices that do not support
@@ -3848,6 +3885,9 @@
 	{ PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs },
 	{ PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs },
 	{ PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs },
+	/* I219 */
+	{ PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
 	/* Intel PCH root ports */
 	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
 	{ 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
@@ -4008,3 +4048,88 @@
 		}
 	}
 }
+
+/*
+ * The PCI capabilities list for Intel DH895xCC VFs (device id 0x0443) with
+ * QuickAssist Technology (QAT) is prematurely terminated in hardware.  The
+ * Next Capability pointer in the MSI Capability Structure should point to
+ * the PCIe Capability Structure but is incorrectly hardwired as 0 terminating
+ * the list.
+ */
+static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
+{
+	int pos, i = 0;
+	u8 next_cap;
+	u16 reg16, *cap;
+	struct pci_cap_saved_state *state;
+
+	/* Bail if the hardware bug is fixed */
+	if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP))
+		return;
+
+	/* Bail if MSI Capability Structure is not found for some reason */
+	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+	if (!pos)
+		return;
+
+	/*
+	 * Bail if Next Capability pointer in the MSI Capability Structure
+	 * is not the expected incorrect 0x00.
+	 */
+	pci_read_config_byte(pdev, pos + 1, &next_cap);
+	if (next_cap)
+		return;
+
+	/*
+	 * PCIe Capability Structure is expected to be at 0x50 and should
+	 * terminate the list (Next Capability pointer is 0x00).  Verify
+	 * Capability Id and Next Capability pointer is as expected.
+	 * Open-code some of set_pcie_port_type() and pci_cfg_space_size_ext()
+	 * to correctly set kernel data structures which have already been
+	 * set incorrectly due to the hardware bug.
+	 */
+	pos = 0x50;
+	pci_read_config_word(pdev, pos, &reg16);
+	if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) {
+		u32 status;
+#ifndef PCI_EXP_SAVE_REGS
+#define PCI_EXP_SAVE_REGS     7
+#endif
+		int size = PCI_EXP_SAVE_REGS * sizeof(u16);
+
+		pdev->pcie_cap = pos;
+		pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+		pdev->pcie_flags_reg = reg16;
+		pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
+		pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+
+		pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
+		if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
+		    PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
+			pdev->cfg_size = PCI_CFG_SPACE_SIZE;
+
+		if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
+			return;
+
+		/*
+		 * Save PCIE cap
+		 */
+		state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
+		if (!state)
+			return;
+
+		state->cap.cap_nr = PCI_CAP_ID_EXP;
+		state->cap.cap_extended = 0;
+		state->cap.size = size;
+		cap = (u16 *)&state->cap.data[0];
+		pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]);
+		pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]);
+		pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]);
+		pcie_capability_read_word(pdev, PCI_EXP_RTCTL,  &cap[i++]);
+		pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]);
+		pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]);
+		pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]);
+		hlist_add_head(&state->next, &pdev->saved_cap_space);
+	}
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 396c200..429d34c 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -14,6 +14,7 @@
 
 struct kset *pci_slots_kset;
 EXPORT_SYMBOL_GPL(pci_slots_kset);
+static DEFINE_MUTEX(pci_slot_mutex);
 
 static ssize_t pci_slot_attr_show(struct kobject *kobj,
 					struct attribute *attr, char *buf)
@@ -106,9 +107,11 @@
 	dev_dbg(&slot->bus->dev, "dev %02x, released physical slot %s\n",
 		slot->number, pci_slot_name(slot));
 
+	down_read(&pci_bus_sem);
 	list_for_each_entry(dev, &slot->bus->devices, bus_list)
 		if (PCI_SLOT(dev->devfn) == slot->number)
 			dev->slot = NULL;
+	up_read(&pci_bus_sem);
 
 	list_del(&slot->list);
 
@@ -191,12 +194,22 @@
 	return result;
 }
 
+void pci_dev_assign_slot(struct pci_dev *dev)
+{
+	struct pci_slot *slot;
+
+	mutex_lock(&pci_slot_mutex);
+	list_for_each_entry(slot, &dev->bus->slots, list)
+		if (PCI_SLOT(dev->devfn) == slot->number)
+			dev->slot = slot;
+	mutex_unlock(&pci_slot_mutex);
+}
+
 static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr)
 {
 	struct pci_slot *slot;
-	/*
-	 * We already hold pci_bus_sem so don't worry
-	 */
+
+	/* We already hold pci_slot_mutex */
 	list_for_each_entry(slot, &parent->slots, list)
 		if (slot->number == slot_nr) {
 			kobject_get(&slot->kobj);
@@ -253,7 +266,7 @@
 	int err = 0;
 	char *slot_name = NULL;
 
-	down_write(&pci_bus_sem);
+	mutex_lock(&pci_slot_mutex);
 
 	if (slot_nr == -1)
 		goto placeholder;
@@ -301,16 +314,18 @@
 	INIT_LIST_HEAD(&slot->list);
 	list_add(&slot->list, &parent->slots);
 
+	down_read(&pci_bus_sem);
 	list_for_each_entry(dev, &parent->devices, bus_list)
 		if (PCI_SLOT(dev->devfn) == slot_nr)
 			dev->slot = slot;
+	up_read(&pci_bus_sem);
 
 	dev_dbg(&parent->dev, "dev %02x, created physical slot %s\n",
 		slot_nr, pci_slot_name(slot));
 
 out:
 	kfree(slot_name);
-	up_write(&pci_bus_sem);
+	mutex_unlock(&pci_slot_mutex);
 	return slot;
 err:
 	kfree(slot);
@@ -332,9 +347,9 @@
 	dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n",
 		slot->number, atomic_read(&slot->kobj.kref.refcount) - 1);
 
-	down_write(&pci_bus_sem);
+	mutex_lock(&pci_slot_mutex);
 	kobject_put(&slot->kobj);
-	up_write(&pci_bus_sem);
+	mutex_unlock(&pci_slot_mutex);
 }
 EXPORT_SYMBOL_GPL(pci_destroy_slot);
 
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 8b7a900..c777b97 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -265,7 +265,7 @@
 	}
 
 	i = 0;
-	list_for_each_entry(entry, &dev->msi_list, list) {
+	for_each_pci_msi_entry(entry, dev) {
 		op.msix_entries[i].entry = entry->msi_attrib.entry_nr;
 		/* Vector is useless at this point. */
 		op.msix_entries[i].vector = -1;
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 984a8ff..483f919 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -296,20 +296,18 @@
 		goto err0;
 	}
 
-	clk = clk_get(&dev->dev, NULL);
+	clk = devm_clk_get(&dev->dev, NULL);
 	if (IS_ERR(clk))
 		return -ENODEV;
 
 	pxa2xx_drv_pcmcia_ops(ops);
 
-	sinfo = kzalloc(SKT_DEV_INFO_SIZE(ops->nr), GFP_KERNEL);
-	if (!sinfo) {
-		clk_put(clk);
+	sinfo = devm_kzalloc(&dev->dev, SKT_DEV_INFO_SIZE(ops->nr),
+			     GFP_KERNEL);
+	if (!sinfo)
 		return -ENOMEM;
-	}
 
 	sinfo->nskt = ops->nr;
-	sinfo->clk = clk;
 
 	/* Initialize processor specific parameters */
 	for (i = 0; i < ops->nr; i++) {
@@ -332,8 +330,7 @@
 err1:
 	while (--i >= 0)
 		soc_pcmcia_remove_one(&sinfo->skt[i]);
-	clk_put(clk);
-	kfree(sinfo);
+
 err0:
 	return ret;
 }
@@ -343,13 +340,9 @@
 	struct skt_dev_info *sinfo = platform_get_drvdata(dev);
 	int i;
 
-	platform_set_drvdata(dev, NULL);
-
 	for (i = 0; i < sinfo->nskt; i++)
 		soc_pcmcia_remove_one(&sinfo->skt[i]);
 
-	clk_put(sinfo->clk);
-	kfree(sinfo);
 	return 0;
 }
 
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index 8039452..66acdc8 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -93,8 +93,6 @@
 	for (i = 0; i < sinfo->nskt; i++)
 		soc_pcmcia_remove_one(&sinfo->skt[i]);
 
-	clk_put(sinfo->clk);
-	kfree(sinfo);
 	return 0;
 }
 
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 80b8e9d..a1531fe 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -135,8 +135,13 @@
 	int (*add)(struct soc_pcmcia_socket *))
 {
 	struct sa1111_pcmcia_socket *s;
+	struct clk *clk;
 	int i, ret = 0;
 
+	clk = devm_clk_get(&dev->dev, NULL);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
 	ops->socket_state = sa1111_pcmcia_socket_state;
 
 	for (i = 0; i < ops->nr; i++) {
@@ -145,12 +150,8 @@
 			return -ENOMEM;
 
 		s->soc.nr = ops->first + i;
-		s->soc.clk = clk_get(&dev->dev, NULL);
-		if (IS_ERR(s->soc.clk)) {
-			ret = PTR_ERR(s->soc.clk);
-			kfree(s);
-			return ret;
-		}
+		s->soc.clk = clk;
+
 		soc_pcmcia_init_one(&s->soc, ops, &dev->dev);
 		s->dev = dev;
 		if (s->soc.nr) {
@@ -226,7 +227,6 @@
 	for (; s; s = next) {
 		next = s->next;
 		soc_pcmcia_remove_one(&s->soc);
-		clk_put(s->soc.clk);
 		kfree(s);
 	}
 
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index cf6de2c..9f6ec87 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -222,18 +222,17 @@
 	int i, ret = 0;
 	struct clk *clk;
 
-	clk = clk_get(dev, NULL);
+	clk = devm_clk_get(dev, NULL);
 	if (IS_ERR(clk))
 		return PTR_ERR(clk);
 
 	sa11xx_drv_pcmcia_ops(ops);
 
-	sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
+	sinfo = devm_kzalloc(dev, SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
 	if (!sinfo)
 		return -ENOMEM;
 
 	sinfo->nskt = nr;
-	sinfo->clk = clk;
 
 	/* Initialize processor specific parameters */
 	for (i = 0; i < nr; i++) {
@@ -251,8 +250,6 @@
 	if (ret) {
 		while (--i >= 0)
 			soc_pcmcia_remove_one(&sinfo->skt[i]);
-		clk_put(clk);
-		kfree(sinfo);
 	} else {
 		dev_set_drvdata(dev, sinfo);
 	}
@@ -261,16 +258,6 @@
 }
 EXPORT_SYMBOL(sa11xx_drv_pcmcia_probe);
 
-static int __init sa11xx_pcmcia_init(void)
-{
-	return 0;
-}
-fs_initcall(sa11xx_pcmcia_init);
-
-static void __exit sa11xx_pcmcia_exit(void) {}
-
-module_exit(sa11xx_pcmcia_exit);
-
 MODULE_AUTHOR("John Dorsey <john+@cs.cmu.edu>");
 MODULE_DESCRIPTION("Linux PCMCIA Card Services: SA-11xx core socket driver");
 MODULE_LICENSE("Dual MPL/GPL");
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index e6fcbea..94762a5 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -68,7 +68,6 @@
 
 struct skt_dev_info {
 	int nskt;
-	struct clk *clk;
 	struct soc_pcmcia_socket skt[0];
 };
 
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
new file mode 100644
index 0000000..d9de36e
--- /dev/null
+++ b/drivers/perf/Kconfig
@@ -0,0 +1,15 @@
+#
+# Performance Monitor Drivers
+#
+
+menu "Performance monitor support"
+
+config ARM_PMU
+	depends on PERF_EVENTS && ARM
+	bool "ARM PMU framework"
+	default y
+	help
+	  Say y if you want to use CPU performance monitors on ARM-based
+	  systems.
+
+endmenu
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
new file mode 100644
index 0000000..acd2397
--- /dev/null
+++ b/drivers/perf/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ARM_PMU) += arm_pmu.o
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
new file mode 100644
index 0000000..2365a32
--- /dev/null
+++ b/drivers/perf/arm_pmu.c
@@ -0,0 +1,921 @@
+#undef DEBUG
+
+/*
+ * ARM performance counter support.
+ *
+ * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
+ * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
+ *
+ * This code is based on the sparc64 perf event code, which is in turn based
+ * on the x86 code.
+ */
+#define pr_fmt(fmt) "hw perfevents: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/cpumask.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+
+#include <asm/cputype.h>
+#include <asm/irq_regs.h>
+
+static int
+armpmu_map_cache_event(const unsigned (*cache_map)
+				      [PERF_COUNT_HW_CACHE_MAX]
+				      [PERF_COUNT_HW_CACHE_OP_MAX]
+				      [PERF_COUNT_HW_CACHE_RESULT_MAX],
+		       u64 config)
+{
+	unsigned int cache_type, cache_op, cache_result, ret;
+
+	cache_type = (config >>  0) & 0xff;
+	if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+		return -EINVAL;
+
+	cache_op = (config >>  8) & 0xff;
+	if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+		return -EINVAL;
+
+	cache_result = (config >> 16) & 0xff;
+	if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+		return -EINVAL;
+
+	ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
+
+	if (ret == CACHE_OP_UNSUPPORTED)
+		return -ENOENT;
+
+	return ret;
+}
+
+static int
+armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
+{
+	int mapping;
+
+	if (config >= PERF_COUNT_HW_MAX)
+		return -EINVAL;
+
+	mapping = (*event_map)[config];
+	return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
+}
+
+static int
+armpmu_map_raw_event(u32 raw_event_mask, u64 config)
+{
+	return (int)(config & raw_event_mask);
+}
+
+int
+armpmu_map_event(struct perf_event *event,
+		 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+		 const unsigned (*cache_map)
+				[PERF_COUNT_HW_CACHE_MAX]
+				[PERF_COUNT_HW_CACHE_OP_MAX]
+				[PERF_COUNT_HW_CACHE_RESULT_MAX],
+		 u32 raw_event_mask)
+{
+	u64 config = event->attr.config;
+	int type = event->attr.type;
+
+	if (type == event->pmu->type)
+		return armpmu_map_raw_event(raw_event_mask, config);
+
+	switch (type) {
+	case PERF_TYPE_HARDWARE:
+		return armpmu_map_hw_event(event_map, config);
+	case PERF_TYPE_HW_CACHE:
+		return armpmu_map_cache_event(cache_map, config);
+	case PERF_TYPE_RAW:
+		return armpmu_map_raw_event(raw_event_mask, config);
+	}
+
+	return -ENOENT;
+}
+
+int armpmu_event_set_period(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	s64 left = local64_read(&hwc->period_left);
+	s64 period = hwc->sample_period;
+	int ret = 0;
+
+	if (unlikely(left <= -period)) {
+		left = period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	}
+
+	if (unlikely(left <= 0)) {
+		left += period;
+		local64_set(&hwc->period_left, left);
+		hwc->last_period = period;
+		ret = 1;
+	}
+
+	/*
+	 * Limit the maximum period to prevent the counter value
+	 * from overtaking the one we are about to program. In
+	 * effect we are reducing max_period to account for
+	 * interrupt latency (and we are being very conservative).
+	 */
+	if (left > (armpmu->max_period >> 1))
+		left = armpmu->max_period >> 1;
+
+	local64_set(&hwc->prev_count, (u64)-left);
+
+	armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
+
+	perf_event_update_userpage(event);
+
+	return ret;
+}
+
+u64 armpmu_event_update(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	u64 delta, prev_raw_count, new_raw_count;
+
+again:
+	prev_raw_count = local64_read(&hwc->prev_count);
+	new_raw_count = armpmu->read_counter(event);
+
+	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+			     new_raw_count) != prev_raw_count)
+		goto again;
+
+	delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
+
+	local64_add(delta, &event->count);
+	local64_sub(delta, &hwc->period_left);
+
+	return new_raw_count;
+}
+
+static void
+armpmu_read(struct perf_event *event)
+{
+	armpmu_event_update(event);
+}
+
+static void
+armpmu_stop(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	/*
+	 * ARM pmu always has to update the counter, so ignore
+	 * PERF_EF_UPDATE, see comments in armpmu_start().
+	 */
+	if (!(hwc->state & PERF_HES_STOPPED)) {
+		armpmu->disable(event);
+		armpmu_event_update(event);
+		hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	}
+}
+
+static void armpmu_start(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+
+	/*
+	 * ARM pmu always has to reprogram the period, so ignore
+	 * PERF_EF_RELOAD, see the comment below.
+	 */
+	if (flags & PERF_EF_RELOAD)
+		WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+	hwc->state = 0;
+	/*
+	 * Set the period again. Some counters can't be stopped, so when we
+	 * were stopped we simply disabled the IRQ source and the counter
+	 * may have been left counting. If we don't do this step then we may
+	 * get an interrupt too soon or *way* too late if the overflow has
+	 * happened since disabling.
+	 */
+	armpmu_event_set_period(event);
+	armpmu->enable(event);
+}
+
+static void
+armpmu_del(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx = hwc->idx;
+
+	armpmu_stop(event, PERF_EF_UPDATE);
+	hw_events->events[idx] = NULL;
+	clear_bit(idx, hw_events->used_mask);
+	if (armpmu->clear_event_idx)
+		armpmu->clear_event_idx(hw_events, event);
+
+	perf_event_update_userpage(event);
+}
+
+static int
+armpmu_add(struct perf_event *event, int flags)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+	struct hw_perf_event *hwc = &event->hw;
+	int idx;
+	int err = 0;
+
+	/* An event following a process won't be stopped earlier */
+	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+		return -ENOENT;
+
+	perf_pmu_disable(event->pmu);
+
+	/* If we don't have a space for the counter then finish early. */
+	idx = armpmu->get_event_idx(hw_events, event);
+	if (idx < 0) {
+		err = idx;
+		goto out;
+	}
+
+	/*
+	 * If there is an event in the counter we are going to use then make
+	 * sure it is disabled.
+	 */
+	event->hw.idx = idx;
+	armpmu->disable(event);
+	hw_events->events[idx] = event;
+
+	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+	if (flags & PERF_EF_START)
+		armpmu_start(event, PERF_EF_RELOAD);
+
+	/* Propagate our changes to the userspace mapping. */
+	perf_event_update_userpage(event);
+
+out:
+	perf_pmu_enable(event->pmu);
+	return err;
+}
+
+static int
+validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
+			       struct perf_event *event)
+{
+	struct arm_pmu *armpmu;
+
+	if (is_software_event(event))
+		return 1;
+
+	/*
+	 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
+	 * core perf code won't check that the pmu->ctx == leader->ctx
+	 * until after pmu->event_init(event).
+	 */
+	if (event->pmu != pmu)
+		return 0;
+
+	if (event->state < PERF_EVENT_STATE_OFF)
+		return 1;
+
+	if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
+		return 1;
+
+	armpmu = to_arm_pmu(event->pmu);
+	return armpmu->get_event_idx(hw_events, event) >= 0;
+}
+
+static int
+validate_group(struct perf_event *event)
+{
+	struct perf_event *sibling, *leader = event->group_leader;
+	struct pmu_hw_events fake_pmu;
+
+	/*
+	 * Initialise the fake PMU. We only need to populate the
+	 * used_mask for the purposes of validation.
+	 */
+	memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
+
+	if (!validate_event(event->pmu, &fake_pmu, leader))
+		return -EINVAL;
+
+	list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
+		if (!validate_event(event->pmu, &fake_pmu, sibling))
+			return -EINVAL;
+	}
+
+	if (!validate_event(event->pmu, &fake_pmu, event))
+		return -EINVAL;
+
+	return 0;
+}
+
+static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
+{
+	struct arm_pmu *armpmu;
+	struct platform_device *plat_device;
+	struct arm_pmu_platdata *plat;
+	int ret;
+	u64 start_clock, finish_clock;
+
+	/*
+	 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
+	 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
+	 * do any necessary shifting, we just need to perform the first
+	 * dereference.
+	 */
+	armpmu = *(void **)dev;
+	plat_device = armpmu->plat_device;
+	plat = dev_get_platdata(&plat_device->dev);
+
+	start_clock = sched_clock();
+	if (plat && plat->handle_irq)
+		ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
+	else
+		ret = armpmu->handle_irq(irq, armpmu);
+	finish_clock = sched_clock();
+
+	perf_sample_event_took(finish_clock - start_clock);
+	return ret;
+}
+
+static void
+armpmu_release_hardware(struct arm_pmu *armpmu)
+{
+	armpmu->free_irq(armpmu);
+}
+
+static int
+armpmu_reserve_hardware(struct arm_pmu *armpmu)
+{
+	int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
+	if (err) {
+		armpmu_release_hardware(armpmu);
+		return err;
+	}
+
+	return 0;
+}
+
+static void
+hw_perf_event_destroy(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	atomic_t *active_events	 = &armpmu->active_events;
+	struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
+
+	if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
+		armpmu_release_hardware(armpmu);
+		mutex_unlock(pmu_reserve_mutex);
+	}
+}
+
+static int
+event_requires_mode_exclusion(struct perf_event_attr *attr)
+{
+	return attr->exclude_idle || attr->exclude_user ||
+	       attr->exclude_kernel || attr->exclude_hv;
+}
+
+static int
+__hw_perf_event_init(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	struct hw_perf_event *hwc = &event->hw;
+	int mapping;
+
+	mapping = armpmu->map_event(event);
+
+	if (mapping < 0) {
+		pr_debug("event %x:%llx not supported\n", event->attr.type,
+			 event->attr.config);
+		return mapping;
+	}
+
+	/*
+	 * We don't assign an index until we actually place the event onto
+	 * hardware. Use -1 to signify that we haven't decided where to put it
+	 * yet. For SMP systems, each core has it's own PMU so we can't do any
+	 * clever allocation or constraints checking at this point.
+	 */
+	hwc->idx		= -1;
+	hwc->config_base	= 0;
+	hwc->config		= 0;
+	hwc->event_base		= 0;
+
+	/*
+	 * Check whether we need to exclude the counter from certain modes.
+	 */
+	if ((!armpmu->set_event_filter ||
+	     armpmu->set_event_filter(hwc, &event->attr)) &&
+	     event_requires_mode_exclusion(&event->attr)) {
+		pr_debug("ARM performance counters do not support "
+			 "mode exclusion\n");
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * Store the event encoding into the config_base field.
+	 */
+	hwc->config_base	    |= (unsigned long)mapping;
+
+	if (!is_sampling_event(event)) {
+		/*
+		 * For non-sampling runs, limit the sample_period to half
+		 * of the counter width. That way, the new counter value
+		 * is far less likely to overtake the previous one unless
+		 * you have some serious IRQ latency issues.
+		 */
+		hwc->sample_period  = armpmu->max_period >> 1;
+		hwc->last_period    = hwc->sample_period;
+		local64_set(&hwc->period_left, hwc->sample_period);
+	}
+
+	if (event->group_leader != event) {
+		if (validate_group(event) != 0)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int armpmu_event_init(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	int err = 0;
+	atomic_t *active_events = &armpmu->active_events;
+
+	/*
+	 * Reject CPU-affine events for CPUs that are of a different class to
+	 * that which this PMU handles. Process-following events (where
+	 * event->cpu == -1) can be migrated between CPUs, and thus we have to
+	 * reject them later (in armpmu_add) if they're scheduled on a
+	 * different class of CPU.
+	 */
+	if (event->cpu != -1 &&
+		!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
+		return -ENOENT;
+
+	/* does not support taken branch sampling */
+	if (has_branch_stack(event))
+		return -EOPNOTSUPP;
+
+	if (armpmu->map_event(event) == -ENOENT)
+		return -ENOENT;
+
+	event->destroy = hw_perf_event_destroy;
+
+	if (!atomic_inc_not_zero(active_events)) {
+		mutex_lock(&armpmu->reserve_mutex);
+		if (atomic_read(active_events) == 0)
+			err = armpmu_reserve_hardware(armpmu);
+
+		if (!err)
+			atomic_inc(active_events);
+		mutex_unlock(&armpmu->reserve_mutex);
+	}
+
+	if (err)
+		return err;
+
+	err = __hw_perf_event_init(event);
+	if (err)
+		hw_perf_event_destroy(event);
+
+	return err;
+}
+
+static void armpmu_enable(struct pmu *pmu)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(pmu);
+	struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
+	int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
+
+	/* For task-bound events we may be called on other CPUs */
+	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+		return;
+
+	if (enabled)
+		armpmu->start(armpmu);
+}
+
+static void armpmu_disable(struct pmu *pmu)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(pmu);
+
+	/* For task-bound events we may be called on other CPUs */
+	if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
+		return;
+
+	armpmu->stop(armpmu);
+}
+
+/*
+ * In heterogeneous systems, events are specific to a particular
+ * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
+ * the same microarchitecture.
+ */
+static int armpmu_filter_match(struct perf_event *event)
+{
+	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
+	unsigned int cpu = smp_processor_id();
+	return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
+}
+
+static void armpmu_init(struct arm_pmu *armpmu)
+{
+	atomic_set(&armpmu->active_events, 0);
+	mutex_init(&armpmu->reserve_mutex);
+
+	armpmu->pmu = (struct pmu) {
+		.pmu_enable	= armpmu_enable,
+		.pmu_disable	= armpmu_disable,
+		.event_init	= armpmu_event_init,
+		.add		= armpmu_add,
+		.del		= armpmu_del,
+		.start		= armpmu_start,
+		.stop		= armpmu_stop,
+		.read		= armpmu_read,
+		.filter_match	= armpmu_filter_match,
+	};
+}
+
+int armpmu_register(struct arm_pmu *armpmu, int type)
+{
+	armpmu_init(armpmu);
+	pr_info("enabled with %s PMU driver, %d counters available\n",
+			armpmu->name, armpmu->num_events);
+	return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
+}
+
+/* Set at runtime when we know what CPU type we are. */
+static struct arm_pmu *__oprofile_cpu_pmu;
+
+/*
+ * Despite the names, these two functions are CPU-specific and are used
+ * by the OProfile/perf code.
+ */
+const char *perf_pmu_name(void)
+{
+	if (!__oprofile_cpu_pmu)
+		return NULL;
+
+	return __oprofile_cpu_pmu->name;
+}
+EXPORT_SYMBOL_GPL(perf_pmu_name);
+
+int perf_num_counters(void)
+{
+	int max_events = 0;
+
+	if (__oprofile_cpu_pmu != NULL)
+		max_events = __oprofile_cpu_pmu->num_events;
+
+	return max_events;
+}
+EXPORT_SYMBOL_GPL(perf_num_counters);
+
+static void cpu_pmu_enable_percpu_irq(void *data)
+{
+	int irq = *(int *)data;
+
+	enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
+static void cpu_pmu_disable_percpu_irq(void *data)
+{
+	int irq = *(int *)data;
+
+	disable_percpu_irq(irq);
+}
+
+static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
+{
+	int i, irq, irqs;
+	struct platform_device *pmu_device = cpu_pmu->plat_device;
+	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
+
+	irqs = min(pmu_device->num_resources, num_possible_cpus());
+
+	irq = platform_get_irq(pmu_device, 0);
+	if (irq >= 0 && irq_is_percpu(irq)) {
+		on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
+		free_percpu_irq(irq, &hw_events->percpu_pmu);
+	} else {
+		for (i = 0; i < irqs; ++i) {
+			int cpu = i;
+
+			if (cpu_pmu->irq_affinity)
+				cpu = cpu_pmu->irq_affinity[i];
+
+			if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
+				continue;
+			irq = platform_get_irq(pmu_device, i);
+			if (irq >= 0)
+				free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+		}
+	}
+}
+
+static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
+{
+	int i, err, irq, irqs;
+	struct platform_device *pmu_device = cpu_pmu->plat_device;
+	struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
+
+	if (!pmu_device)
+		return -ENODEV;
+
+	irqs = min(pmu_device->num_resources, num_possible_cpus());
+	if (irqs < 1) {
+		pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
+		return 0;
+	}
+
+	irq = platform_get_irq(pmu_device, 0);
+	if (irq >= 0 && irq_is_percpu(irq)) {
+		err = request_percpu_irq(irq, handler, "arm-pmu",
+					 &hw_events->percpu_pmu);
+		if (err) {
+			pr_err("unable to request IRQ%d for ARM PMU counters\n",
+				irq);
+			return err;
+		}
+		on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
+	} else {
+		for (i = 0; i < irqs; ++i) {
+			int cpu = i;
+
+			err = 0;
+			irq = platform_get_irq(pmu_device, i);
+			if (irq < 0)
+				continue;
+
+			if (cpu_pmu->irq_affinity)
+				cpu = cpu_pmu->irq_affinity[i];
+
+			/*
+			 * If we have a single PMU interrupt that we can't shift,
+			 * assume that we're running on a uniprocessor machine and
+			 * continue. Otherwise, continue without this interrupt.
+			 */
+			if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
+				pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
+					irq, cpu);
+				continue;
+			}
+
+			err = request_irq(irq, handler,
+					  IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
+					  per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+			if (err) {
+				pr_err("unable to request IRQ%d for ARM PMU counters\n",
+					irq);
+				return err;
+			}
+
+			cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
+			  void *hcpu)
+{
+	int cpu = (unsigned long)hcpu;
+	struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
+
+	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+		return NOTIFY_DONE;
+
+	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+		return NOTIFY_DONE;
+
+	if (pmu->reset)
+		pmu->reset(pmu);
+	else
+		return NOTIFY_DONE;
+
+	return NOTIFY_OK;
+}
+
+static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
+{
+	int err;
+	int cpu;
+	struct pmu_hw_events __percpu *cpu_hw_events;
+
+	cpu_hw_events = alloc_percpu(struct pmu_hw_events);
+	if (!cpu_hw_events)
+		return -ENOMEM;
+
+	cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
+	err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
+	if (err)
+		goto out_hw_events;
+
+	for_each_possible_cpu(cpu) {
+		struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
+		raw_spin_lock_init(&events->pmu_lock);
+		events->percpu_pmu = cpu_pmu;
+	}
+
+	cpu_pmu->hw_events	= cpu_hw_events;
+	cpu_pmu->request_irq	= cpu_pmu_request_irq;
+	cpu_pmu->free_irq	= cpu_pmu_free_irq;
+
+	/* Ensure the PMU has sane values out of reset. */
+	if (cpu_pmu->reset)
+		on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
+			 cpu_pmu, 1);
+
+	/* If no interrupts available, set the corresponding capability flag */
+	if (!platform_get_irq(cpu_pmu->plat_device, 0))
+		cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
+	return 0;
+
+out_hw_events:
+	free_percpu(cpu_hw_events);
+	return err;
+}
+
+static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
+{
+	unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
+	free_percpu(cpu_pmu->hw_events);
+}
+
+/*
+ * CPU PMU identification and probing.
+ */
+static int probe_current_pmu(struct arm_pmu *pmu,
+			     const struct pmu_probe_info *info)
+{
+	int cpu = get_cpu();
+	unsigned int cpuid = read_cpuid_id();
+	int ret = -ENODEV;
+
+	pr_info("probing PMU on CPU %d\n", cpu);
+
+	for (; info->init != NULL; info++) {
+		if ((cpuid & info->mask) != info->cpuid)
+			continue;
+		ret = info->init(pmu);
+		break;
+	}
+
+	put_cpu();
+	return ret;
+}
+
+static int of_pmu_irq_cfg(struct arm_pmu *pmu)
+{
+	int *irqs, i = 0;
+	bool using_spi = false;
+	struct platform_device *pdev = pmu->plat_device;
+
+	irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+	if (!irqs)
+		return -ENOMEM;
+
+	do {
+		struct device_node *dn;
+		int cpu, irq;
+
+		/* See if we have an affinity entry */
+		dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", i);
+		if (!dn)
+			break;
+
+		/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
+		irq = platform_get_irq(pdev, i);
+		if (irq >= 0) {
+			bool spi = !irq_is_percpu(irq);
+
+			if (i > 0 && spi != using_spi) {
+				pr_err("PPI/SPI IRQ type mismatch for %s!\n",
+					dn->name);
+				kfree(irqs);
+				return -EINVAL;
+			}
+
+			using_spi = spi;
+		}
+
+		/* Now look up the logical CPU number */
+		for_each_possible_cpu(cpu)
+			if (dn == of_cpu_device_node_get(cpu))
+				break;
+
+		if (cpu >= nr_cpu_ids) {
+			pr_warn("Failed to find logical CPU for %s\n",
+				dn->name);
+			of_node_put(dn);
+			cpumask_setall(&pmu->supported_cpus);
+			break;
+		}
+		of_node_put(dn);
+
+		/* For SPIs, we need to track the affinity per IRQ */
+		if (using_spi) {
+			if (i >= pdev->num_resources) {
+				of_node_put(dn);
+				break;
+			}
+
+			irqs[i] = cpu;
+		}
+
+		/* Keep track of the CPUs containing this PMU type */
+		cpumask_set_cpu(cpu, &pmu->supported_cpus);
+		of_node_put(dn);
+		i++;
+	} while (1);
+
+	/* If we didn't manage to parse anything, claim to support all CPUs */
+	if (cpumask_weight(&pmu->supported_cpus) == 0)
+		cpumask_setall(&pmu->supported_cpus);
+
+	/* If we matched up the IRQ affinities, use them to route the SPIs */
+	if (using_spi && i == pdev->num_resources)
+		pmu->irq_affinity = irqs;
+	else
+		kfree(irqs);
+
+	return 0;
+}
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+			 const struct of_device_id *of_table,
+			 const struct pmu_probe_info *probe_table)
+{
+	const struct of_device_id *of_id;
+	const int (*init_fn)(struct arm_pmu *);
+	struct device_node *node = pdev->dev.of_node;
+	struct arm_pmu *pmu;
+	int ret = -ENODEV;
+
+	pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
+	if (!pmu) {
+		pr_info("failed to allocate PMU device!\n");
+		return -ENOMEM;
+	}
+
+	if (!__oprofile_cpu_pmu)
+		__oprofile_cpu_pmu = pmu;
+
+	pmu->plat_device = pdev;
+
+	if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
+		init_fn = of_id->data;
+
+		ret = of_pmu_irq_cfg(pmu);
+		if (!ret)
+			ret = init_fn(pmu);
+	} else {
+		ret = probe_current_pmu(pmu, probe_table);
+		cpumask_setall(&pmu->supported_cpus);
+	}
+
+	if (ret) {
+		pr_info("failed to probe PMU!\n");
+		goto out_free;
+	}
+
+	ret = cpu_pmu_init(pmu);
+	if (ret)
+		goto out_free;
+
+	ret = armpmu_register(pmu, -1);
+	if (ret)
+		goto out_destroy;
+
+	return 0;
+
+out_destroy:
+	cpu_pmu_destroy(pmu);
+out_free:
+	pr_info("failed to register PMU devices!\n");
+	kfree(pmu);
+	return ret;
+}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 6b8dd16..47da573 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -54,6 +54,17 @@
 	  Support for MIPI CSI-2 and MIPI DSI DPHY found on Samsung S5P
 	  and EXYNOS SoCs.
 
+config PHY_LPC18XX_USB_OTG
+	tristate "NXP LPC18xx/43xx SoC USB OTG PHY driver"
+	depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+	depends on MFD_SYSCON
+	select GENERIC_PHY
+	help
+	  Enable this to support NXP LPC18xx/43xx internal USB OTG PHY.
+
+	  This driver is need for USB0 support on LPC18xx/43xx and takes
+	  care of enabling and clock setup.
+
 config PHY_PXA_28NM_HSIC
 	tristate "Marvell USB HSIC 28nm PHY Driver"
 	depends on HAS_IOMEM
@@ -199,6 +210,8 @@
 	tristate "Allwinner sunxi SoC USB PHY driver"
 	depends on ARCH_SUNXI && HAS_IOMEM && OF
 	depends on RESET_CONTROLLER
+	depends on EXTCON
+	depends on POWER_SUPPLY
 	select GENERIC_PHY
 	help
 	  Enable this to support the transceiver that is part of Allwinner
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index f344e1b..a5b18c1 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -10,6 +10,7 @@
 obj-$(CONFIG_BCM_KONA_USB2_PHY)		+= phy-bcm-kona-usb2.o
 obj-$(CONFIG_PHY_EXYNOS_DP_VIDEO)	+= phy-exynos-dp-video.o
 obj-$(CONFIG_PHY_EXYNOS_MIPI_VIDEO)	+= phy-exynos-mipi-video.o
+obj-$(CONFIG_PHY_LPC18XX_USB_OTG)	+= phy-lpc18xx-usb-otg.o
 obj-$(CONFIG_PHY_PXA_28NM_USB2)		+= phy-pxa-28nm-usb2.o
 obj-$(CONFIG_PHY_PXA_28NM_HSIC)		+= phy-pxa-28nm-hsic.o
 obj-$(CONFIG_PHY_MVEBU_SATA)		+= phy-mvebu-sata.o
diff --git a/drivers/phy/phy-armada375-usb2.c b/drivers/phy/phy-armada375-usb2.c
index 8ccc395..1a3db28 100644
--- a/drivers/phy/phy-armada375-usb2.c
+++ b/drivers/phy/phy-armada375-usb2.c
@@ -51,7 +51,7 @@
 	return 0;
 }
 
-static struct phy_ops armada375_usb_phy_ops = {
+static const struct phy_ops armada375_usb_phy_ops = {
 	.init = armada375_usb_phy_init,
 	.owner = THIS_MODULE,
 };
@@ -149,7 +149,6 @@
 	.driver = {
 		.of_match_table	= of_usb_cluster_table,
 		.name  = "armada-375-usb-cluster",
-		.owner = THIS_MODULE,
 	}
 };
 module_platform_driver(armada375_usb_phy_driver);
diff --git a/drivers/phy/phy-bcm-kona-usb2.c b/drivers/phy/phy-bcm-kona-usb2.c
index ef2dc1a..7b67fe4 100644
--- a/drivers/phy/phy-bcm-kona-usb2.c
+++ b/drivers/phy/phy-bcm-kona-usb2.c
@@ -91,7 +91,7 @@
 	return 0;
 }
 
-static struct phy_ops ops = {
+static const struct phy_ops ops = {
 	.init		= bcm_kona_usb_phy_init,
 	.power_on	= bcm_kona_usb_phy_power_on,
 	.power_off	= bcm_kona_usb_phy_power_off,
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c
index 6f3e06d..0062027 100644
--- a/drivers/phy/phy-berlin-sata.c
+++ b/drivers/phy/phy-berlin-sata.c
@@ -176,7 +176,7 @@
 	return priv->phys[i]->phy;
 }
 
-static struct phy_ops phy_berlin_sata_ops = {
+static const struct phy_ops phy_berlin_sata_ops = {
 	.power_on	= phy_berlin_sata_power_on,
 	.power_off	= phy_berlin_sata_power_off,
 	.owner		= THIS_MODULE,
diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c
index 335e06d..797ba17 100644
--- a/drivers/phy/phy-berlin-usb.c
+++ b/drivers/phy/phy-berlin-usb.c
@@ -147,12 +147,12 @@
 	return 0;
 }
 
-static struct phy_ops phy_berlin_usb_ops = {
+static const struct phy_ops phy_berlin_usb_ops = {
 	.power_on	= phy_berlin_usb_power_on,
 	.owner		= THIS_MODULE,
 };
 
-static const struct of_device_id phy_berlin_sata_of_match[] = {
+static const struct of_device_id phy_berlin_usb_of_match[] = {
 	{
 		.compatible = "marvell,berlin2-usb-phy",
 		.data = &phy_berlin_pll_dividers[0],
@@ -163,12 +163,12 @@
 	},
 	{ },
 };
-MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match);
+MODULE_DEVICE_TABLE(of, phy_berlin_usb_of_match);
 
 static int phy_berlin_usb_probe(struct platform_device *pdev)
 {
 	const struct of_device_id *match =
-		of_match_device(phy_berlin_sata_of_match, &pdev->dev);
+		of_match_device(phy_berlin_usb_of_match, &pdev->dev);
 	struct phy_berlin_usb_priv *priv;
 	struct resource *res;
 	struct phy *phy;
@@ -207,9 +207,8 @@
 	.probe	= phy_berlin_usb_probe,
 	.driver	= {
 		.name		= "phy-berlin-usb",
-		.owner		= THIS_MODULE,
-		.of_match_table	= phy_berlin_sata_of_match,
-	 },
+		.of_match_table	= phy_berlin_usb_of_match,
+	},
 };
 module_platform_driver(phy_berlin_usb_driver);
 
diff --git a/drivers/phy/phy-brcmstb-sata.c b/drivers/phy/phy-brcmstb-sata.c
index b7e303d..8a2cb16 100644
--- a/drivers/phy/phy-brcmstb-sata.c
+++ b/drivers/phy/phy-brcmstb-sata.c
@@ -122,7 +122,7 @@
 	return 0;
 }
 
-static struct phy_ops phy_ops_28nm = {
+static const struct phy_ops phy_ops_28nm = {
 	.init		= brcm_sata_phy_init,
 	.owner		= THIS_MODULE,
 };
diff --git a/drivers/phy/phy-dm816x-usb.c b/drivers/phy/phy-dm816x-usb.c
index 7b42555..b4bbef6 100644
--- a/drivers/phy/phy-dm816x-usb.c
+++ b/drivers/phy/phy-dm816x-usb.c
@@ -113,7 +113,7 @@
 	return 0;
 }
 
-static struct phy_ops ops = {
+static const struct phy_ops ops = {
 	.init		= dm816x_usb_phy_init,
 	.owner		= THIS_MODULE,
 };
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
index 179cbf9..34b0615 100644
--- a/drivers/phy/phy-exynos-dp-video.c
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -48,7 +48,7 @@
 				  EXYNOS5_PHY_ENABLE, 0);
 }
 
-static struct phy_ops exynos_dp_video_phy_ops = {
+static const struct phy_ops exynos_dp_video_phy_ops = {
 	.power_on	= exynos_dp_video_phy_power_on,
 	.power_off	= exynos_dp_video_phy_power_off,
 	.owner		= THIS_MODULE,
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index df7519a..2a54cab 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -124,7 +124,7 @@
 	return state->phys[args->args[0]].phy;
 }
 
-static struct phy_ops exynos_mipi_video_phy_ops = {
+static const struct phy_ops exynos_mipi_video_phy_ops = {
 	.power_on	= exynos_mipi_video_phy_power_on,
 	.power_off	= exynos_mipi_video_phy_power_off,
 	.owner		= THIS_MODULE,
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c
index d72ef15..20696f5 100644
--- a/drivers/phy/phy-exynos5-usbdrd.c
+++ b/drivers/phy/phy-exynos5-usbdrd.c
@@ -537,7 +537,7 @@
 	return phy_drd->phys[args->args[0]].phy;
 }
 
-static struct phy_ops exynos5_usbdrd_phy_ops = {
+static const struct phy_ops exynos5_usbdrd_phy_ops = {
 	.init		= exynos5_usbdrd_phy_init,
 	.exit		= exynos5_usbdrd_phy_exit,
 	.power_on	= exynos5_usbdrd_phy_power_on,
diff --git a/drivers/phy/phy-exynos5250-sata.c b/drivers/phy/phy-exynos5250-sata.c
index bc858cc..60e13af 100644
--- a/drivers/phy/phy-exynos5250-sata.c
+++ b/drivers/phy/phy-exynos5250-sata.c
@@ -154,7 +154,7 @@
 	return ret;
 }
 
-static struct phy_ops exynos_sata_phy_ops = {
+static const struct phy_ops exynos_sata_phy_ops = {
 	.init		= exynos_sata_phy_init,
 	.power_on	= exynos_sata_phy_power_on,
 	.power_off	= exynos_sata_phy_power_off,
diff --git a/drivers/phy/phy-hix5hd2-sata.c b/drivers/phy/phy-hix5hd2-sata.c
index d6b2265..e5ab3aa 100644
--- a/drivers/phy/phy-hix5hd2-sata.c
+++ b/drivers/phy/phy-hix5hd2-sata.c
@@ -129,7 +129,7 @@
 	return 0;
 }
 
-static struct phy_ops hix5hd2_sata_phy_ops = {
+static const struct phy_ops hix5hd2_sata_phy_ops = {
 	.init		= hix5hd2_sata_phy_init,
 	.owner		= THIS_MODULE,
 };
diff --git a/drivers/phy/phy-lpc18xx-usb-otg.c b/drivers/phy/phy-lpc18xx-usb-otg.c
new file mode 100644
index 0000000..3b7a71e
--- /dev/null
+++ b/drivers/phy/phy-lpc18xx-usb-otg.c
@@ -0,0 +1,143 @@
+/*
+ * PHY driver for NXP LPC18xx/43xx internal USB OTG PHY
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* USB OTG PHY register offset and bit in CREG */
+#define LPC18XX_CREG_CREG0		0x004
+#define LPC18XX_CREG_CREG0_USB0PHY	BIT(5)
+
+struct lpc18xx_usb_otg_phy {
+	struct phy *phy;
+	struct clk *clk;
+	struct regmap *reg;
+};
+
+static int lpc18xx_usb_otg_phy_init(struct phy *phy)
+{
+	struct lpc18xx_usb_otg_phy *lpc = phy_get_drvdata(phy);
+	int ret;
+
+	/* The PHY must be clocked at 480 MHz */
+	ret = clk_set_rate(lpc->clk, 480000000);
+	if (ret)
+		return ret;
+
+	return clk_prepare(lpc->clk);
+}
+
+static int lpc18xx_usb_otg_phy_exit(struct phy *phy)
+{
+	struct lpc18xx_usb_otg_phy *lpc = phy_get_drvdata(phy);
+
+	clk_unprepare(lpc->clk);
+
+	return 0;
+}
+
+static int lpc18xx_usb_otg_phy_power_on(struct phy *phy)
+{
+	struct lpc18xx_usb_otg_phy *lpc = phy_get_drvdata(phy);
+	int ret;
+
+	ret = clk_enable(lpc->clk);
+	if (ret)
+		return ret;
+
+	/* The bit in CREG is cleared to enable the PHY */
+	return regmap_update_bits(lpc->reg, LPC18XX_CREG_CREG0,
+				  LPC18XX_CREG_CREG0_USB0PHY, 0);
+}
+
+static int lpc18xx_usb_otg_phy_power_off(struct phy *phy)
+{
+	struct lpc18xx_usb_otg_phy *lpc = phy_get_drvdata(phy);
+	int ret;
+
+	ret = regmap_update_bits(lpc->reg, LPC18XX_CREG_CREG0,
+				 LPC18XX_CREG_CREG0_USB0PHY,
+				 LPC18XX_CREG_CREG0_USB0PHY);
+	if (ret)
+		return ret;
+
+	clk_disable(lpc->clk);
+
+	return 0;
+}
+
+static const struct phy_ops lpc18xx_usb_otg_phy_ops = {
+	.init		= lpc18xx_usb_otg_phy_init,
+	.exit		= lpc18xx_usb_otg_phy_exit,
+	.power_on	= lpc18xx_usb_otg_phy_power_on,
+	.power_off	= lpc18xx_usb_otg_phy_power_off,
+	.owner		= THIS_MODULE,
+};
+
+static int lpc18xx_usb_otg_phy_probe(struct platform_device *pdev)
+{
+	struct phy_provider *phy_provider;
+	struct lpc18xx_usb_otg_phy *lpc;
+
+	lpc = devm_kzalloc(&pdev->dev, sizeof(*lpc), GFP_KERNEL);
+	if (!lpc)
+		return -ENOMEM;
+
+	lpc->reg = syscon_node_to_regmap(pdev->dev.of_node->parent);
+	if (IS_ERR(lpc->reg)) {
+		dev_err(&pdev->dev, "failed to get syscon\n");
+		return PTR_ERR(lpc->reg);
+	}
+
+	lpc->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(lpc->clk)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		return PTR_ERR(lpc->clk);
+	}
+
+	lpc->phy = devm_phy_create(&pdev->dev, NULL, &lpc18xx_usb_otg_phy_ops);
+	if (IS_ERR(lpc->phy)) {
+		dev_err(&pdev->dev, "failed to create PHY\n");
+		return PTR_ERR(lpc->phy);
+	}
+
+	phy_set_drvdata(lpc->phy, lpc);
+
+	phy_provider = devm_of_phy_provider_register(&pdev->dev,
+						     of_phy_simple_xlate);
+
+	return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id lpc18xx_usb_otg_phy_match[] = {
+	{ .compatible = "nxp,lpc1850-usb-otg-phy" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_usb_otg_phy_match);
+
+static struct platform_driver lpc18xx_usb_otg_phy_driver = {
+	.probe		= lpc18xx_usb_otg_phy_probe,
+	.driver		= {
+		.name	= "lpc18xx-usb-otg-phy",
+		.of_match_table = lpc18xx_usb_otg_phy_match,
+	},
+};
+module_platform_driver(lpc18xx_usb_otg_phy_driver);
+
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_DESCRIPTION("NXP LPC18xx/43xx USB OTG PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index 5e257ef..c47b56b 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -1132,7 +1132,7 @@
 	return miphy_phy->phy;
 }
 
-static struct phy_ops miphy28lp_ops = {
+static const struct phy_ops miphy28lp_ops = {
 	.init = miphy28lp_init,
 	.owner = THIS_MODULE,
 };
@@ -1268,7 +1268,6 @@
 	.probe = miphy28lp_probe,
 	.driver = {
 		.name = "miphy28lp-phy",
-		.owner = THIS_MODULE,
 		.of_match_table = miphy28lp_of_match,
 	}
 };
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
index 0ff354d..00a686a 100644
--- a/drivers/phy/phy-miphy365x.c
+++ b/drivers/phy/phy-miphy365x.c
@@ -510,7 +510,7 @@
 	return miphy_phy->phy;
 }
 
-static struct phy_ops miphy365x_ops = {
+static const struct phy_ops miphy365x_ops = {
 	.init		= miphy365x_init,
 	.owner		= THIS_MODULE,
 };
diff --git a/drivers/phy/phy-mvebu-sata.c b/drivers/phy/phy-mvebu-sata.c
index 03b94f9..768ce92 100644
--- a/drivers/phy/phy-mvebu-sata.c
+++ b/drivers/phy/phy-mvebu-sata.c
@@ -75,7 +75,7 @@
 	return 0;
 }
 
-static struct phy_ops phy_mvebu_sata_ops = {
+static const struct phy_ops phy_mvebu_sata_ops = {
 	.power_on	= phy_mvebu_sata_power_on,
 	.power_off	= phy_mvebu_sata_power_off,
 	.owner		= THIS_MODULE,
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index c1a4686..0fe8058 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -137,7 +137,7 @@
 	return 0;
 }
 
-static struct phy_ops ops = {
+static const struct phy_ops ops = {
 	.init		= omap_usb_init,
 	.power_on	= omap_usb_power_on,
 	.power_off	= omap_usb_power_off,
diff --git a/drivers/phy/phy-qcom-apq8064-sata.c b/drivers/phy/phy-qcom-apq8064-sata.c
index 4b243f7..69ce2af 100644
--- a/drivers/phy/phy-qcom-apq8064-sata.c
+++ b/drivers/phy/phy-qcom-apq8064-sata.c
@@ -204,7 +204,7 @@
 	return 0;
 }
 
-static struct phy_ops qcom_apq8064_sata_phy_ops = {
+static const struct phy_ops qcom_apq8064_sata_phy_ops = {
 	.init		= qcom_apq8064_sata_phy_init,
 	.exit		= qcom_apq8064_sata_phy_exit,
 	.owner		= THIS_MODULE,
diff --git a/drivers/phy/phy-qcom-ipq806x-sata.c b/drivers/phy/phy-qcom-ipq806x-sata.c
index 6f2fe26..0ad127c 100644
--- a/drivers/phy/phy-qcom-ipq806x-sata.c
+++ b/drivers/phy/phy-qcom-ipq806x-sata.c
@@ -126,7 +126,7 @@
 	return 0;
 }
 
-static struct phy_ops qcom_ipq806x_sata_phy_ops = {
+static const struct phy_ops qcom_ipq806x_sata_phy_ops = {
 	.init		= qcom_ipq806x_sata_phy_init,
 	.exit		= qcom_ipq806x_sata_phy_exit,
 	.owner		= THIS_MODULE,
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
index 591a391..2bd5ce4 100644
--- a/drivers/phy/phy-qcom-ufs-i.h
+++ b/drivers/phy/phy-qcom-ufs-i.h
@@ -150,7 +150,7 @@
 		       struct ufs_qcom_phy *ufs_qcom_phy);
 struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
 			struct ufs_qcom_phy *common_cfg,
-			struct phy_ops *ufs_qcom_phy_gen_ops,
+			const struct phy_ops *ufs_qcom_phy_gen_ops,
 			struct ufs_qcom_phy_specific_ops *phy_spec_ops);
 int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
 			struct ufs_qcom_phy_calibration *tbl_A, int tbl_size_A,
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
index f5fc50a..56631e7 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
@@ -115,7 +115,7 @@
 	return err;
 }
 
-static struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
+static const struct phy_ops ufs_qcom_phy_qmp_14nm_phy_ops = {
 	.init		= ufs_qcom_phy_qmp_14nm_init,
 	.exit		= ufs_qcom_phy_exit,
 	.power_on	= ufs_qcom_phy_power_on,
@@ -191,7 +191,6 @@
 	.driver = {
 		.of_match_table = ufs_qcom_phy_qmp_14nm_of_match,
 		.name = "ufs_qcom_phy_qmp_14nm",
-		.owner = THIS_MODULE,
 	},
 };
 
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
index 8332f96..b16ea77 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-20nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
@@ -171,7 +171,7 @@
 	return err;
 }
 
-static struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
+static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
 	.init		= ufs_qcom_phy_qmp_20nm_init,
 	.exit		= ufs_qcom_phy_exit,
 	.power_on	= ufs_qcom_phy_power_on,
@@ -247,7 +247,6 @@
 	.driver = {
 		.of_match_table = ufs_qcom_phy_qmp_20nm_of_match,
 		.name = "ufs_qcom_phy_qmp_20nm",
-		.owner = THIS_MODULE,
 	},
 };
 
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index f9c618f..49a1ed0 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -77,7 +77,7 @@
 
 struct phy *ufs_qcom_phy_generic_probe(struct platform_device *pdev,
 				struct ufs_qcom_phy *common_cfg,
-				struct phy_ops *ufs_qcom_phy_gen_ops,
+				const struct phy_ops *ufs_qcom_phy_gen_ops,
 				struct ufs_qcom_phy_specific_ops *phy_spec_ops)
 {
 	int err;
diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/phy-rcar-gen2.c
index 39d9b29..6e0d9fa 100644
--- a/drivers/phy/phy-rcar-gen2.c
+++ b/drivers/phy/phy-rcar-gen2.c
@@ -184,7 +184,7 @@
 	return 0;
 }
 
-static struct phy_ops rcar_gen2_phy_ops = {
+static const struct phy_ops rcar_gen2_phy_ops = {
 	.init		= rcar_gen2_phy_init,
 	.exit		= rcar_gen2_phy_exit,
 	.power_on	= rcar_gen2_phy_power_on,
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 7d4c336..5a5c073 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -84,7 +84,7 @@
 	return 0;
 }
 
-static struct phy_ops ops = {
+static const struct phy_ops ops = {
 	.power_on	= rockchip_usb_phy_power_on,
 	.power_off	= rockchip_usb_phy_power_off,
 	.owner		= THIS_MODULE,
@@ -146,7 +146,6 @@
 	.probe		= rockchip_usb_phy_probe,
 	.driver		= {
 		.name	= "rockchip-usb-phy",
-		.owner	= THIS_MODULE,
 		.of_match_table = rockchip_usb_phy_dt_ids,
 	},
 };
diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c
index 55b6994..f278a9c 100644
--- a/drivers/phy/phy-samsung-usb2.c
+++ b/drivers/phy/phy-samsung-usb2.c
@@ -71,7 +71,7 @@
 	return 0;
 }
 
-static struct phy_ops samsung_usb2_phy_ops = {
+static const struct phy_ops samsung_usb2_phy_ops = {
 	.power_on	= samsung_usb2_phy_power_on,
 	.power_off	= samsung_usb2_phy_power_off,
 	.owner		= THIS_MODULE,
diff --git a/drivers/phy/phy-spear1310-miphy.c b/drivers/phy/phy-spear1310-miphy.c
index 45d0005..ed67e98 100644
--- a/drivers/phy/phy-spear1310-miphy.c
+++ b/drivers/phy/phy-spear1310-miphy.c
@@ -179,7 +179,7 @@
 };
 MODULE_DEVICE_TABLE(of, spear1310_miphy_of_match);
 
-static struct phy_ops spear1310_miphy_ops = {
+static const struct phy_ops spear1310_miphy_ops = {
 	.init = spear1310_miphy_init,
 	.exit = spear1310_miphy_exit,
 	.owner = THIS_MODULE,
diff --git a/drivers/phy/phy-spear1340-miphy.c b/drivers/phy/phy-spear1340-miphy.c
index 494240d..97280c0 100644
--- a/drivers/phy/phy-spear1340-miphy.c
+++ b/drivers/phy/phy-spear1340-miphy.c
@@ -189,7 +189,7 @@
 };
 MODULE_DEVICE_TABLE(of, spear1340_miphy_of_match);
 
-static struct phy_ops spear1340_miphy_ops = {
+static const struct phy_ops spear1340_miphy_ops = {
 	.init = spear1340_miphy_init,
 	.exit = spear1340_miphy_exit,
 	.owner = THIS_MODULE,
diff --git a/drivers/phy/phy-stih41x-usb.c b/drivers/phy/phy-stih41x-usb.c
index c093b47..0ac7463 100644
--- a/drivers/phy/phy-stih41x-usb.c
+++ b/drivers/phy/phy-stih41x-usb.c
@@ -112,7 +112,7 @@
 	return 0;
 }
 
-static struct phy_ops stih41x_usb_phy_ops = {
+static const struct phy_ops stih41x_usb_phy_ops = {
 	.init		= stih41x_usb_phy_init,
 	.power_on	= stih41x_usb_phy_power_on,
 	.power_off	= stih41x_usb_phy_power_off,
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index 2dad7e8..731b395 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -1,7 +1,7 @@
 /*
  * Allwinner sun4i USB phy driver
  *
- * Copyright (C) 2014 Hans de Goede <hdegoede@redhat.com>
+ * Copyright (C) 2014-2015 Hans de Goede <hdegoede@redhat.com>
  *
  * Based on code from
  * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
@@ -22,23 +22,30 @@
  */
 
 #include <linux/clk.h>
+#include <linux/delay.h>
 #include <linux/err.h>
+#include <linux/extcon.h>
 #include <linux/io.h>
+#include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_gpio.h>
 #include <linux/phy/phy.h>
 #include <linux/phy/phy-sun4i-usb.h>
 #include <linux/platform_device.h>
+#include <linux/power_supply.h>
 #include <linux/regulator/consumer.h>
 #include <linux/reset.h>
+#include <linux/workqueue.h>
 
 #define REG_ISCR			0x00
-#define REG_PHYCTL			0x04
+#define REG_PHYCTL_A10			0x04
 #define REG_PHYBIST			0x08
 #define REG_PHYTUNE			0x0c
+#define REG_PHYCTL_A33			0x10
 
 #define PHYCTL_DATA			BIT(7)
 
@@ -47,6 +54,17 @@
 #define SUNXI_AHB_INCRX_ALIGN_EN	BIT(8)
 #define SUNXI_ULPI_BYPASS_EN		BIT(0)
 
+/* ISCR, Interface Status and Control bits */
+#define ISCR_ID_PULLUP_EN		(1 << 17)
+#define ISCR_DPDM_PULLUP_EN	(1 << 16)
+/* sunxi has the phy id/vbus pins not connected, so we use the force bits */
+#define ISCR_FORCE_ID_MASK	(3 << 14)
+#define ISCR_FORCE_ID_LOW		(2 << 14)
+#define ISCR_FORCE_ID_HIGH	(3 << 14)
+#define ISCR_FORCE_VBUS_MASK	(3 << 12)
+#define ISCR_FORCE_VBUS_LOW	(2 << 12)
+#define ISCR_FORCE_VBUS_HIGH	(3 << 12)
+
 /* Common Control Bits for Both PHYs */
 #define PHY_PLL_BW			0x03
 #define PHY_RES45_CAL_EN		0x0c
@@ -63,60 +81,124 @@
 
 #define MAX_PHYS			3
 
+/*
+ * Note do not raise the debounce time, we must report Vusb high within 100ms
+ * otherwise we get Vbus errors
+ */
+#define DEBOUNCE_TIME			msecs_to_jiffies(50)
+#define POLL_TIME			msecs_to_jiffies(250)
+
 struct sun4i_usb_phy_data {
 	void __iomem *base;
 	struct mutex mutex;
 	int num_phys;
 	u32 disc_thresh;
+	bool has_a33_phyctl;
 	struct sun4i_usb_phy {
 		struct phy *phy;
 		void __iomem *pmu;
 		struct regulator *vbus;
 		struct reset_control *reset;
 		struct clk *clk;
+		bool regulator_on;
 		int index;
 	} phys[MAX_PHYS];
+	/* phy0 / otg related variables */
+	struct extcon_dev *extcon;
+	bool phy0_init;
+	bool phy0_poll;
+	struct gpio_desc *id_det_gpio;
+	struct gpio_desc *vbus_det_gpio;
+	struct power_supply *vbus_power_supply;
+	struct notifier_block vbus_power_nb;
+	bool vbus_power_nb_registered;
+	int id_det_irq;
+	int vbus_det_irq;
+	int id_det;
+	int vbus_det;
+	struct delayed_work detect;
 };
 
 #define to_sun4i_usb_phy_data(phy) \
 	container_of((phy), struct sun4i_usb_phy_data, phys[(phy)->index])
 
+static void sun4i_usb_phy0_update_iscr(struct phy *_phy, u32 clr, u32 set)
+{
+	struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+	struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+	u32 iscr;
+
+	iscr = readl(data->base + REG_ISCR);
+	iscr &= ~clr;
+	iscr |= set;
+	writel(iscr, data->base + REG_ISCR);
+}
+
+static void sun4i_usb_phy0_set_id_detect(struct phy *phy, u32 val)
+{
+	if (val)
+		val = ISCR_FORCE_ID_HIGH;
+	else
+		val = ISCR_FORCE_ID_LOW;
+
+	sun4i_usb_phy0_update_iscr(phy, ISCR_FORCE_ID_MASK, val);
+}
+
+static void sun4i_usb_phy0_set_vbus_detect(struct phy *phy, u32 val)
+{
+	if (val)
+		val = ISCR_FORCE_VBUS_HIGH;
+	else
+		val = ISCR_FORCE_VBUS_LOW;
+
+	sun4i_usb_phy0_update_iscr(phy, ISCR_FORCE_VBUS_MASK, val);
+}
+
 static void sun4i_usb_phy_write(struct sun4i_usb_phy *phy, u32 addr, u32 data,
 				int len)
 {
 	struct sun4i_usb_phy_data *phy_data = to_sun4i_usb_phy_data(phy);
 	u32 temp, usbc_bit = BIT(phy->index * 2);
+	void *phyctl;
 	int i;
 
 	mutex_lock(&phy_data->mutex);
 
+	if (phy_data->has_a33_phyctl) {
+		phyctl = phy_data->base + REG_PHYCTL_A33;
+		/* A33 needs us to set phyctl to 0 explicitly */
+		writel(0, phyctl);
+	} else {
+		phyctl = phy_data->base + REG_PHYCTL_A10;
+	}
+
 	for (i = 0; i < len; i++) {
-		temp = readl(phy_data->base + REG_PHYCTL);
+		temp = readl(phyctl);
 
 		/* clear the address portion */
 		temp &= ~(0xff << 8);
 
 		/* set the address */
 		temp |= ((addr + i) << 8);
-		writel(temp, phy_data->base + REG_PHYCTL);
+		writel(temp, phyctl);
 
 		/* set the data bit and clear usbc bit*/
-		temp = readb(phy_data->base + REG_PHYCTL);
+		temp = readb(phyctl);
 		if (data & 0x1)
 			temp |= PHYCTL_DATA;
 		else
 			temp &= ~PHYCTL_DATA;
 		temp &= ~usbc_bit;
-		writeb(temp, phy_data->base + REG_PHYCTL);
+		writeb(temp, phyctl);
 
 		/* pulse usbc_bit */
-		temp = readb(phy_data->base + REG_PHYCTL);
+		temp = readb(phyctl);
 		temp |= usbc_bit;
-		writeb(temp, phy_data->base + REG_PHYCTL);
+		writeb(temp, phyctl);
 
-		temp = readb(phy_data->base + REG_PHYCTL);
+		temp = readb(phyctl);
 		temp &= ~usbc_bit;
-		writeb(temp, phy_data->base + REG_PHYCTL);
+		writeb(temp, phyctl);
 
 		data >>= 1;
 	}
@@ -171,12 +253,39 @@
 
 	sun4i_usb_phy_passby(phy, 1);
 
+	if (phy->index == 0) {
+		data->phy0_init = true;
+
+		/* Enable pull-ups */
+		sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_DPDM_PULLUP_EN);
+		sun4i_usb_phy0_update_iscr(_phy, 0, ISCR_ID_PULLUP_EN);
+
+		if (data->id_det_gpio) {
+			/* OTG mode, force ISCR and cable state updates */
+			data->id_det = -1;
+			data->vbus_det = -1;
+			queue_delayed_work(system_wq, &data->detect, 0);
+		} else {
+			/* Host only mode */
+			sun4i_usb_phy0_set_id_detect(_phy, 0);
+			sun4i_usb_phy0_set_vbus_detect(_phy, 1);
+		}
+	}
+
 	return 0;
 }
 
 static int sun4i_usb_phy_exit(struct phy *_phy)
 {
 	struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+	struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+
+	if (phy->index == 0) {
+		/* Disable pull-ups */
+		sun4i_usb_phy0_update_iscr(_phy, ISCR_DPDM_PULLUP_EN, 0);
+		sun4i_usb_phy0_update_iscr(_phy, ISCR_ID_PULLUP_EN, 0);
+		data->phy0_init = false;
+	}
 
 	sun4i_usb_phy_passby(phy, 0);
 	reset_control_assert(phy->reset);
@@ -185,23 +294,74 @@
 	return 0;
 }
 
+static int sun4i_usb_phy0_get_vbus_det(struct sun4i_usb_phy_data *data)
+{
+	if (data->vbus_det_gpio)
+		return gpiod_get_value_cansleep(data->vbus_det_gpio);
+
+	if (data->vbus_power_supply) {
+		union power_supply_propval val;
+		int r;
+
+		r = power_supply_get_property(data->vbus_power_supply,
+					      POWER_SUPPLY_PROP_PRESENT, &val);
+		if (r == 0)
+			return val.intval;
+	}
+
+	/* Fallback: report vbus as high */
+	return 1;
+}
+
+static bool sun4i_usb_phy0_have_vbus_det(struct sun4i_usb_phy_data *data)
+{
+	return data->vbus_det_gpio || data->vbus_power_supply;
+}
+
 static int sun4i_usb_phy_power_on(struct phy *_phy)
 {
 	struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
-	int ret = 0;
+	struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
+	int ret;
 
-	if (phy->vbus)
-		ret = regulator_enable(phy->vbus);
+	if (!phy->vbus || phy->regulator_on)
+		return 0;
 
-	return ret;
+	/* For phy0 only turn on Vbus if we don't have an ext. Vbus */
+	if (phy->index == 0 && sun4i_usb_phy0_have_vbus_det(data) &&
+				data->vbus_det)
+		return 0;
+
+	ret = regulator_enable(phy->vbus);
+	if (ret)
+		return ret;
+
+	phy->regulator_on = true;
+
+	/* We must report Vbus high within OTG_TIME_A_WAIT_VRISE msec. */
+	if (phy->index == 0 && data->vbus_det_gpio && data->phy0_poll)
+		mod_delayed_work(system_wq, &data->detect, DEBOUNCE_TIME);
+
+	return 0;
 }
 
 static int sun4i_usb_phy_power_off(struct phy *_phy)
 {
 	struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
+	struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
 
-	if (phy->vbus)
-		regulator_disable(phy->vbus);
+	if (!phy->vbus || !phy->regulator_on)
+		return 0;
+
+	regulator_disable(phy->vbus);
+	phy->regulator_on = false;
+
+	/*
+	 * phy0 vbus typically slowly discharges, sometimes this causes the
+	 * Vbus gpio to not trigger an edge irq on Vbus off, so force a rescan.
+	 */
+	if (phy->index == 0 && data->vbus_det_gpio && !data->phy0_poll)
+		mod_delayed_work(system_wq, &data->detect, POLL_TIME);
 
 	return 0;
 }
@@ -214,7 +374,7 @@
 }
 EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect);
 
-static struct phy_ops sun4i_usb_phy_ops = {
+static const struct phy_ops sun4i_usb_phy_ops = {
 	.init		= sun4i_usb_phy_init,
 	.exit		= sun4i_usb_phy_exit,
 	.power_on	= sun4i_usb_phy_power_on,
@@ -222,6 +382,95 @@
 	.owner		= THIS_MODULE,
 };
 
+static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
+{
+	struct sun4i_usb_phy_data *data =
+		container_of(work, struct sun4i_usb_phy_data, detect.work);
+	struct phy *phy0 = data->phys[0].phy;
+	int id_det, vbus_det, id_notify = 0, vbus_notify = 0;
+
+	id_det = gpiod_get_value_cansleep(data->id_det_gpio);
+	vbus_det = sun4i_usb_phy0_get_vbus_det(data);
+
+	mutex_lock(&phy0->mutex);
+
+	if (!data->phy0_init) {
+		mutex_unlock(&phy0->mutex);
+		return;
+	}
+
+	if (id_det != data->id_det) {
+		/*
+		 * When a host cable (id == 0) gets plugged in on systems
+		 * without vbus detection report vbus low for long enough for
+		 * the musb-ip to end the current device session.
+		 */
+		if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 0) {
+			sun4i_usb_phy0_set_vbus_detect(phy0, 0);
+			msleep(200);
+			sun4i_usb_phy0_set_vbus_detect(phy0, 1);
+		}
+		sun4i_usb_phy0_set_id_detect(phy0, id_det);
+		data->id_det = id_det;
+		id_notify = 1;
+	}
+
+	if (vbus_det != data->vbus_det) {
+		sun4i_usb_phy0_set_vbus_detect(phy0, vbus_det);
+		data->vbus_det = vbus_det;
+		vbus_notify = 1;
+	}
+
+	mutex_unlock(&phy0->mutex);
+
+	if (id_notify) {
+		extcon_set_cable_state_(data->extcon, EXTCON_USB_HOST,
+					!id_det);
+		/*
+		 * When a host cable gets unplugged (id == 1) on systems
+		 * without vbus detection report vbus low for long enough to
+		 * the musb-ip to end the current host session.
+		 */
+		if (!sun4i_usb_phy0_have_vbus_det(data) && id_det == 1) {
+			mutex_lock(&phy0->mutex);
+			sun4i_usb_phy0_set_vbus_detect(phy0, 0);
+			msleep(1000);
+			sun4i_usb_phy0_set_vbus_detect(phy0, 1);
+			mutex_unlock(&phy0->mutex);
+		}
+	}
+
+	if (vbus_notify)
+		extcon_set_cable_state_(data->extcon, EXTCON_USB, vbus_det);
+
+	if (data->phy0_poll)
+		queue_delayed_work(system_wq, &data->detect, POLL_TIME);
+}
+
+static irqreturn_t sun4i_usb_phy0_id_vbus_det_irq(int irq, void *dev_id)
+{
+	struct sun4i_usb_phy_data *data = dev_id;
+
+	/* vbus or id changed, let the pins settle and then scan them */
+	mod_delayed_work(system_wq, &data->detect, DEBOUNCE_TIME);
+
+	return IRQ_HANDLED;
+}
+
+static int sun4i_usb_phy0_vbus_notify(struct notifier_block *nb,
+				      unsigned long val, void *v)
+{
+	struct sun4i_usb_phy_data *data =
+		container_of(nb, struct sun4i_usb_phy_data, vbus_power_nb);
+	struct power_supply *psy = v;
+
+	/* Properties on the vbus_power_supply changed, scan vbus_det */
+	if (val == PSY_EVENT_PROP_CHANGED && psy == data->vbus_power_supply)
+		mod_delayed_work(system_wq, &data->detect, DEBOUNCE_TIME);
+
+	return NOTIFY_OK;
+}
+
 static struct phy *sun4i_usb_phy_xlate(struct device *dev,
 					struct of_phandle_args *args)
 {
@@ -233,6 +482,29 @@
 	return data->phys[args->args[0]].phy;
 }
 
+static int sun4i_usb_phy_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
+
+	if (data->vbus_power_nb_registered)
+		power_supply_unreg_notifier(&data->vbus_power_nb);
+	if (data->id_det_irq >= 0)
+		devm_free_irq(dev, data->id_det_irq, data);
+	if (data->vbus_det_irq >= 0)
+		devm_free_irq(dev, data->vbus_det_irq, data);
+
+	cancel_delayed_work_sync(&data->detect);
+
+	return 0;
+}
+
+static const unsigned int sun4i_usb_phy0_cable[] = {
+	EXTCON_USB,
+	EXTCON_USB_HOST,
+	EXTCON_NONE,
+};
+
 static int sun4i_usb_phy_probe(struct platform_device *pdev)
 {
 	struct sun4i_usb_phy_data *data;
@@ -241,35 +513,87 @@
 	struct phy_provider *phy_provider;
 	bool dedicated_clocks;
 	struct resource *res;
-	int i;
+	int i, ret;
 
 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
 	mutex_init(&data->mutex);
+	INIT_DELAYED_WORK(&data->detect, sun4i_usb_phy0_id_vbus_det_scan);
+	dev_set_drvdata(dev, data);
 
-	if (of_device_is_compatible(np, "allwinner,sun5i-a13-usb-phy"))
+	if (of_device_is_compatible(np, "allwinner,sun5i-a13-usb-phy") ||
+	    of_device_is_compatible(np, "allwinner,sun8i-a23-usb-phy") ||
+	    of_device_is_compatible(np, "allwinner,sun8i-a33-usb-phy"))
 		data->num_phys = 2;
 	else
 		data->num_phys = 3;
 
-	if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy") ||
-	    of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy"))
-		data->disc_thresh = 3;
-	else
+	if (of_device_is_compatible(np, "allwinner,sun5i-a13-usb-phy") ||
+	    of_device_is_compatible(np, "allwinner,sun7i-a20-usb-phy"))
 		data->disc_thresh = 2;
+	else
+		data->disc_thresh = 3;
 
-	if (of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy"))
+	if (of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy") ||
+	    of_device_is_compatible(np, "allwinner,sun8i-a23-usb-phy") ||
+	    of_device_is_compatible(np, "allwinner,sun8i-a33-usb-phy"))
 		dedicated_clocks = true;
 	else
 		dedicated_clocks = false;
 
+	if (of_device_is_compatible(np, "allwinner,sun8i-a33-usb-phy"))
+		data->has_a33_phyctl = true;
+
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_ctrl");
 	data->base = devm_ioremap_resource(dev, res);
 	if (IS_ERR(data->base))
 		return PTR_ERR(data->base);
 
+	data->id_det_gpio = devm_gpiod_get(dev, "usb0_id_det", GPIOD_IN);
+	if (IS_ERR(data->id_det_gpio)) {
+		if (PTR_ERR(data->id_det_gpio) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		data->id_det_gpio = NULL;
+	}
+
+	data->vbus_det_gpio = devm_gpiod_get(dev, "usb0_vbus_det", GPIOD_IN);
+	if (IS_ERR(data->vbus_det_gpio)) {
+		if (PTR_ERR(data->vbus_det_gpio) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		data->vbus_det_gpio = NULL;
+	}
+
+	if (of_find_property(np, "usb0_vbus_power-supply", NULL)) {
+		data->vbus_power_supply = devm_power_supply_get_by_phandle(dev,
+						     "usb0_vbus_power-supply");
+		if (IS_ERR(data->vbus_power_supply))
+			return PTR_ERR(data->vbus_power_supply);
+
+		if (!data->vbus_power_supply)
+			return -EPROBE_DEFER;
+	}
+
+	/* vbus_det without id_det makes no sense, and is not supported */
+	if (sun4i_usb_phy0_have_vbus_det(data) && !data->id_det_gpio) {
+		dev_err(dev, "usb0_id_det missing or invalid\n");
+		return -ENODEV;
+	}
+
+	if (data->id_det_gpio) {
+		data->extcon = devm_extcon_dev_allocate(dev,
+							sun4i_usb_phy0_cable);
+		if (IS_ERR(data->extcon))
+			return PTR_ERR(data->extcon);
+
+		ret = devm_extcon_dev_register(dev, data->extcon);
+		if (ret) {
+			dev_err(dev, "failed to register extcon: %d\n", ret);
+			return ret;
+		}
+	}
+
 	for (i = 0; i < data->num_phys; i++) {
 		struct sun4i_usb_phy *phy = data->phys + i;
 		char name[16];
@@ -319,10 +643,54 @@
 		phy_set_drvdata(phy->phy, &data->phys[i]);
 	}
 
-	dev_set_drvdata(dev, data);
-	phy_provider = devm_of_phy_provider_register(dev, sun4i_usb_phy_xlate);
+	data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
+	data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
+	if ((data->id_det_gpio && data->id_det_irq < 0) ||
+	    (data->vbus_det_gpio && data->vbus_det_irq < 0))
+		data->phy0_poll = true;
 
-	return PTR_ERR_OR_ZERO(phy_provider);
+	if (data->id_det_irq >= 0) {
+		ret = devm_request_irq(dev, data->id_det_irq,
+				sun4i_usb_phy0_id_vbus_det_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				"usb0-id-det", data);
+		if (ret) {
+			dev_err(dev, "Err requesting id-det-irq: %d\n", ret);
+			return ret;
+		}
+	}
+
+	if (data->vbus_det_irq >= 0) {
+		ret = devm_request_irq(dev, data->vbus_det_irq,
+				sun4i_usb_phy0_id_vbus_det_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				"usb0-vbus-det", data);
+		if (ret) {
+			dev_err(dev, "Err requesting vbus-det-irq: %d\n", ret);
+			data->vbus_det_irq = -1;
+			sun4i_usb_phy_remove(pdev); /* Stop detect work */
+			return ret;
+		}
+	}
+
+	if (data->vbus_power_supply) {
+		data->vbus_power_nb.notifier_call = sun4i_usb_phy0_vbus_notify;
+		data->vbus_power_nb.priority = 0;
+		ret = power_supply_reg_notifier(&data->vbus_power_nb);
+		if (ret) {
+			sun4i_usb_phy_remove(pdev); /* Stop detect work */
+			return ret;
+		}
+		data->vbus_power_nb_registered = true;
+	}
+
+	phy_provider = devm_of_phy_provider_register(dev, sun4i_usb_phy_xlate);
+	if (IS_ERR(phy_provider)) {
+		sun4i_usb_phy_remove(pdev); /* Stop detect work */
+		return PTR_ERR(phy_provider);
+	}
+
+	return 0;
 }
 
 static const struct of_device_id sun4i_usb_phy_of_match[] = {
@@ -330,12 +698,15 @@
 	{ .compatible = "allwinner,sun5i-a13-usb-phy" },
 	{ .compatible = "allwinner,sun6i-a31-usb-phy" },
 	{ .compatible = "allwinner,sun7i-a20-usb-phy" },
+	{ .compatible = "allwinner,sun8i-a23-usb-phy" },
+	{ .compatible = "allwinner,sun8i-a33-usb-phy" },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, sun4i_usb_phy_of_match);
 
 static struct platform_driver sun4i_usb_phy_driver = {
 	.probe	= sun4i_usb_phy_probe,
+	.remove	= sun4i_usb_phy_remove,
 	.driver = {
 		.of_match_table	= sun4i_usb_phy_of_match,
 		.name  = "sun4i-usb-phy",
diff --git a/drivers/phy/phy-sun9i-usb.c b/drivers/phy/phy-sun9i-usb.c
index 0095914..ac4f31a 100644
--- a/drivers/phy/phy-sun9i-usb.c
+++ b/drivers/phy/phy-sun9i-usb.c
@@ -114,7 +114,7 @@
 	return 0;
 }
 
-static struct phy_ops sun9i_usb_phy_ops = {
+static const struct phy_ops sun9i_usb_phy_ops = {
 	.init		= sun9i_usb_phy_init,
 	.exit		= sun9i_usb_phy_exit,
 	.owner		= THIS_MODULE,
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 08020dc..93bc112 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -298,7 +298,7 @@
 
 	return 0;
 }
-static struct phy_ops ops = {
+static const struct phy_ops ops = {
 	.init		= ti_pipe3_init,
 	.exit		= ti_pipe3_exit,
 	.power_on	= ti_pipe3_power_on,
diff --git a/drivers/phy/phy-tusb1210.c b/drivers/phy/phy-tusb1210.c
index 07efdd3..4f6d5e7 100644
--- a/drivers/phy/phy-tusb1210.c
+++ b/drivers/phy/phy-tusb1210.c
@@ -53,7 +53,7 @@
 	return 0;
 }
 
-static struct phy_ops phy_ops = {
+static const struct phy_ops phy_ops = {
 	.power_on = tusb1210_power_on,
 	.power_off = tusb1210_power_off,
 	.owner = THIS_MODULE,
@@ -61,32 +61,26 @@
 
 static int tusb1210_probe(struct ulpi *ulpi)
 {
-	struct gpio_desc *gpio;
 	struct tusb1210 *tusb;
 	u8 val, reg;
-	int ret;
 
 	tusb = devm_kzalloc(&ulpi->dev, sizeof(*tusb), GFP_KERNEL);
 	if (!tusb)
 		return -ENOMEM;
 
-	gpio = devm_gpiod_get(&ulpi->dev, "reset");
-	if (!IS_ERR(gpio)) {
-		ret = gpiod_direction_output(gpio, 0);
-		if (ret)
-			return ret;
-		gpiod_set_value_cansleep(gpio, 1);
-		tusb->gpio_reset = gpio;
-	}
+	tusb->gpio_reset = devm_gpiod_get_optional(&ulpi->dev, "reset",
+						   GPIOD_OUT_LOW);
+	if (IS_ERR(tusb->gpio_reset))
+		return PTR_ERR(tusb->gpio_reset);
 
-	gpio = devm_gpiod_get(&ulpi->dev, "cs");
-	if (!IS_ERR(gpio)) {
-		ret = gpiod_direction_output(gpio, 0);
-		if (ret)
-			return ret;
-		gpiod_set_value_cansleep(gpio, 1);
-		tusb->gpio_cs = gpio;
-	}
+	gpiod_set_value_cansleep(tusb->gpio_reset, 1);
+
+	tusb->gpio_cs = devm_gpiod_get_optional(&ulpi->dev, "cs",
+						GPIOD_OUT_LOW);
+	if (IS_ERR(tusb->gpio_cs))
+		return PTR_ERR(tusb->gpio_cs);
+
+	gpiod_set_value_cansleep(tusb->gpio_cs, 1);
 
 	/*
 	 * VENDOR_SPECIFIC2 register in TUSB1210 can be used for configuring eye
diff --git a/drivers/phy/ulpi_phy.h b/drivers/phy/ulpi_phy.h
index ac49fb6..f2ebe49 100644
--- a/drivers/phy/ulpi_phy.h
+++ b/drivers/phy/ulpi_phy.h
@@ -5,7 +5,7 @@
  * and it's controller, which is always the parent.
  */
 static inline struct phy
-*ulpi_phy_create(struct ulpi *ulpi, struct phy_ops *ops)
+*ulpi_phy_create(struct ulpi *ulpi, const struct phy_ops *ops)
 {
 	struct phy *phy;
 	int ret;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 100d9ac..84dd2ed 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -82,6 +82,12 @@
 	  Requires ACPI/FDT device enumeration code to set up a platform
 	  device.
 
+config PINCTRL_DIGICOLOR
+	bool
+	depends on OF && (ARCH_DIGICOLOR || COMPILE_TEST)
+	select PINMUX
+	select GENERIC_PINCONF
+
 config PINCTRL_LANTIQ
 	bool
 	depends on LANTIQ
@@ -240,6 +246,7 @@
 source "drivers/pinctrl/sh-pfc/Kconfig"
 source "drivers/pinctrl/spear/Kconfig"
 source "drivers/pinctrl/sunxi/Kconfig"
+source "drivers/pinctrl/uniphier/Kconfig"
 source "drivers/pinctrl/vt8500/Kconfig"
 source "drivers/pinctrl/mediatek/Kconfig"
 
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index f4216d9..cad077c 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -2,12 +2,10 @@
 
 subdir-ccflags-$(CONFIG_DEBUG_PINCTRL)	+= -DDEBUG
 
-obj-$(CONFIG_PINCTRL)		+= core.o pinctrl-utils.o
+obj-y				+= core.o pinctrl-utils.o
 obj-$(CONFIG_PINMUX)		+= pinmux.o
 obj-$(CONFIG_PINCONF)		+= pinconf.o
-ifeq ($(CONFIG_OF),y)
-obj-$(CONFIG_PINCTRL)		+= devicetree.o
-endif
+obj-$(CONFIG_OF)		+= devicetree.o
 obj-$(CONFIG_GENERIC_PINCONF)	+= pinconf-generic.o
 obj-$(CONFIG_PINCTRL_ADI2)	+= pinctrl-adi2.o
 obj-$(CONFIG_PINCTRL_AS3722)	+= pinctrl-as3722.o
@@ -15,6 +13,7 @@
 obj-$(CONFIG_PINCTRL_BF60x)	+= pinctrl-adi2-bf60x.o
 obj-$(CONFIG_PINCTRL_AT91)	+= pinctrl-at91.o
 obj-$(CONFIG_PINCTRL_AMD)	+= pinctrl-amd.o
+obj-$(CONFIG_PINCTRL_DIGICOLOR)	+= pinctrl-digicolor.o
 obj-$(CONFIG_PINCTRL_FALCON)	+= pinctrl-falcon.o
 obj-$(CONFIG_PINCTRL_MESON)	+= meson/
 obj-$(CONFIG_PINCTRL_PALMAS)	+= pinctrl-palmas.o
@@ -51,5 +50,6 @@
 obj-$(CONFIG_PINCTRL_SH_PFC)	+= sh-pfc/
 obj-$(CONFIG_PLAT_SPEAR)	+= spear/
 obj-$(CONFIG_ARCH_SUNXI)	+= sunxi/
+obj-$(CONFIG_ARCH_UNIPHIER)	+= uniphier/
 obj-$(CONFIG_ARCH_VT8500)	+= vt8500/
 obj-$(CONFIG_ARCH_MEDIATEK)	+= mediatek/
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 6177315..8efa235 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -586,9 +586,9 @@
 		ret = __bcm2835_gpio_irq_set_type_disabled(pc, gpio, type);
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(data->irq, handle_edge_irq);
+		irq_set_handler_locked(data, handle_edge_irq);
 	else
-		__irq_set_handler_locked(data->irq, handle_level_irq);
+		irq_set_handler_locked(data, handle_level_irq);
 
 	spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
 
@@ -989,7 +989,6 @@
 		irq_set_chip_and_handler(irq, &bcm2835_gpio_irq_chip,
 				handle_level_irq);
 		irq_set_chip_data(irq, pc);
-		set_irq_flags(irq, IRQF_VALID);
 	}
 
 	for (i = 0; i < BCM2835_NUM_BANKS; i++) {
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 8b8f3a0..69723e0 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -231,8 +231,7 @@
 
 	pindesc = pin_desc_get(pctldev, number);
 	if (pindesc != NULL) {
-		pr_err("pin %d already registered on %s\n", number,
-		       pctldev->desc->name);
+		dev_err(pctldev->dev, "pin %d already registered\n", number);
 		return -EINVAL;
 	}
 
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index 0bbf7d7..fe04e74 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -97,13 +97,7 @@
 
 struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
 {
-	struct pinctrl_dev *pctldev;
-
-	pctldev = get_pinctrl_dev_from_of_node(np);
-	if (!pctldev)
-		return NULL;
-
-	return pctldev;
+	return get_pinctrl_dev_from_of_node(np);
 }
 
 static int dt_to_map_one_config(struct pinctrl *p, const char *statename,
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 12ef544..debe121 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -87,6 +87,13 @@
 	help
 	  Say Y here to enable the imx6sx pinctrl driver
 
+config PINCTRL_IMX6UL
+	bool "IMX6UL pinctrl driver"
+	depends on SOC_IMX6UL
+	select PINCTRL_IMX
+	help
+	  Say Y here to enable the imx6ul pinctrl driver
+
 config PINCTRL_IMX7D
 	bool "IMX7D pinctrl driver"
 	depends on SOC_IMX7D
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index 343cb43..d44c9e2 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -12,6 +12,7 @@
 obj-$(CONFIG_PINCTRL_IMX6Q)	+= pinctrl-imx6dl.o
 obj-$(CONFIG_PINCTRL_IMX6SL)	+= pinctrl-imx6sl.o
 obj-$(CONFIG_PINCTRL_IMX6SX)	+= pinctrl-imx6sx.o
+obj-$(CONFIG_PINCTRL_IMX6UL)	+= pinctrl-imx6ul.o
 obj-$(CONFIG_PINCTRL_IMX7D)	+= pinctrl-imx7d.o
 obj-$(CONFIG_PINCTRL_VF610)	+= pinctrl-vf610.o
 obj-$(CONFIG_PINCTRL_MXS)	+= pinctrl-mxs.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx6ul.c b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
new file mode 100644
index 0000000..08e7576
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imx6ul.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx.h"
+
+enum imx6ul_pads {
+	MX6UL_PAD_RESERVE0 = 0,
+	MX6UL_PAD_RESERVE1 = 1,
+	MX6UL_PAD_RESERVE2 = 2,
+	MX6UL_PAD_RESERVE3 = 3,
+	MX6UL_PAD_RESERVE4 = 4,
+	MX6UL_PAD_RESERVE5 = 5,
+	MX6UL_PAD_RESERVE6 = 6,
+	MX6UL_PAD_RESERVE7 = 7,
+	MX6UL_PAD_RESERVE8 = 8,
+	MX6UL_PAD_RESERVE9 = 9,
+	MX6UL_PAD_RESERVE10 = 10,
+	MX6UL_PAD_SNVS_TAMPER4 = 11,
+	MX6UL_PAD_RESERVE12 = 12,
+	MX6UL_PAD_RESERVE13 = 13,
+	MX6UL_PAD_RESERVE14 = 14,
+	MX6UL_PAD_RESERVE15 = 15,
+	MX6UL_PAD_RESERVE16 = 16,
+	MX6UL_PAD_JTAG_MOD = 17,
+	MX6UL_PAD_JTAG_TMS = 18,
+	MX6UL_PAD_JTAG_TDO = 19,
+	MX6UL_PAD_JTAG_TDI = 20,
+	MX6UL_PAD_JTAG_TCK = 21,
+	MX6UL_PAD_JTAG_TRST_B = 22,
+	MX6UL_PAD_GPIO1_IO00 = 23,
+	MX6UL_PAD_GPIO1_IO01 = 24,
+	MX6UL_PAD_GPIO1_IO02 = 25,
+	MX6UL_PAD_GPIO1_IO03 = 26,
+	MX6UL_PAD_GPIO1_IO04 = 27,
+	MX6UL_PAD_GPIO1_IO05 = 28,
+	MX6UL_PAD_GPIO1_IO06 = 29,
+	MX6UL_PAD_GPIO1_IO07 = 30,
+	MX6UL_PAD_GPIO1_IO08 = 31,
+	MX6UL_PAD_GPIO1_IO09 = 32,
+	MX6UL_PAD_UART1_TX_DATA = 33,
+	MX6UL_PAD_UART1_RX_DATA = 34,
+	MX6UL_PAD_UART1_CTS_B = 35,
+	MX6UL_PAD_UART1_RTS_B = 36,
+	MX6UL_PAD_UART2_TX_DATA = 37,
+	MX6UL_PAD_UART2_RX_DATA = 38,
+	MX6UL_PAD_UART2_CTS_B = 39,
+	MX6UL_PAD_UART2_RTS_B = 40,
+	MX6UL_PAD_UART3_TX_DATA = 41,
+	MX6UL_PAD_UART3_RX_DATA = 42,
+	MX6UL_PAD_UART3_CTS_B = 43,
+	MX6UL_PAD_UART3_RTS_B = 44,
+	MX6UL_PAD_UART4_TX_DATA = 45,
+	MX6UL_PAD_UART4_RX_DATA = 46,
+	MX6UL_PAD_UART5_TX_DATA = 47,
+	MX6UL_PAD_UART5_RX_DATA = 48,
+	MX6UL_PAD_ENET1_RX_DATA0 = 49,
+	MX6UL_PAD_ENET1_RX_DATA1 = 50,
+	MX6UL_PAD_ENET1_RX_EN = 51,
+	MX6UL_PAD_ENET1_TX_DATA0 = 52,
+	MX6UL_PAD_ENET1_TX_DATA1 = 53,
+	MX6UL_PAD_ENET1_TX_EN = 54,
+	MX6UL_PAD_ENET1_TX_CLK = 55,
+	MX6UL_PAD_ENET1_RX_ER = 56,
+	MX6UL_PAD_ENET2_RX_DATA0 = 57,
+	MX6UL_PAD_ENET2_RX_DATA1 = 58,
+	MX6UL_PAD_ENET2_RX_EN = 59,
+	MX6UL_PAD_ENET2_TX_DATA0 = 60,
+	MX6UL_PAD_ENET2_TX_DATA1 = 61,
+	MX6UL_PAD_ENET2_TX_EN = 62,
+	MX6UL_PAD_ENET2_TX_CLK = 63,
+	MX6UL_PAD_ENET2_RX_ER = 64,
+	MX6UL_PAD_LCD_CLK = 65,
+	MX6UL_PAD_LCD_ENABLE = 66,
+	MX6UL_PAD_LCD_HSYNC = 67,
+	MX6UL_PAD_LCD_VSYNC = 68,
+	MX6UL_PAD_LCD_RESET = 69,
+	MX6UL_PAD_LCD_DATA00 = 70,
+	MX6UL_PAD_LCD_DATA01 = 71,
+	MX6UL_PAD_LCD_DATA02 = 72,
+	MX6UL_PAD_LCD_DATA03 = 73,
+	MX6UL_PAD_LCD_DATA04 = 74,
+	MX6UL_PAD_LCD_DATA05 = 75,
+	MX6UL_PAD_LCD_DATA06 = 76,
+	MX6UL_PAD_LCD_DATA07 = 77,
+	MX6UL_PAD_LCD_DATA08 = 78,
+	MX6UL_PAD_LCD_DATA09 = 79,
+	MX6UL_PAD_LCD_DATA10 = 80,
+	MX6UL_PAD_LCD_DATA11 = 81,
+	MX6UL_PAD_LCD_DATA12 = 82,
+	MX6UL_PAD_LCD_DATA13 = 83,
+	MX6UL_PAD_LCD_DATA14 = 84,
+	MX6UL_PAD_LCD_DATA15 = 85,
+	MX6UL_PAD_LCD_DATA16 = 86,
+	MX6UL_PAD_LCD_DATA17 = 87,
+	MX6UL_PAD_LCD_DATA18 = 88,
+	MX6UL_PAD_LCD_DATA19 = 89,
+	MX6UL_PAD_LCD_DATA20 = 90,
+	MX6UL_PAD_LCD_DATA21 = 91,
+	MX6UL_PAD_LCD_DATA22 = 92,
+	MX6UL_PAD_LCD_DATA23 = 93,
+	MX6UL_PAD_NAND_RE_B = 94,
+	MX6UL_PAD_NAND_WE_B = 95,
+	MX6UL_PAD_NAND_DATA00 = 96,
+	MX6UL_PAD_NAND_DATA01 = 97,
+	MX6UL_PAD_NAND_DATA02 = 98,
+	MX6UL_PAD_NAND_DATA03 = 99,
+	MX6UL_PAD_NAND_DATA04 = 100,
+	MX6UL_PAD_NAND_DATA05 = 101,
+	MX6UL_PAD_NAND_DATA06 = 102,
+	MX6UL_PAD_NAND_DATA07 = 103,
+	MX6UL_PAD_NAND_ALE = 104,
+	MX6UL_PAD_NAND_WP_B = 105,
+	MX6UL_PAD_NAND_READY_B = 106,
+	MX6UL_PAD_NAND_CE0_B = 107,
+	MX6UL_PAD_NAND_CE1_B = 108,
+	MX6UL_PAD_NAND_CLE = 109,
+	MX6UL_PAD_NAND_DQS = 110,
+	MX6UL_PAD_SD1_CMD = 111,
+	MX6UL_PAD_SD1_CLK = 112,
+	MX6UL_PAD_SD1_DATA0 = 113,
+	MX6UL_PAD_SD1_DATA1 = 114,
+	MX6UL_PAD_SD1_DATA2 = 115,
+	MX6UL_PAD_SD1_DATA3 = 116,
+	MX6UL_PAD_CSI_MCLK = 117,
+	MX6UL_PAD_CSI_PIXCLK = 118,
+	MX6UL_PAD_CSI_VSYNC = 119,
+	MX6UL_PAD_CSI_HSYNC = 120,
+	MX6UL_PAD_CSI_DATA00 = 121,
+	MX6UL_PAD_CSI_DATA01 = 122,
+	MX6UL_PAD_CSI_DATA02 = 123,
+	MX6UL_PAD_CSI_DATA03 = 124,
+	MX6UL_PAD_CSI_DATA04 = 125,
+	MX6UL_PAD_CSI_DATA05 = 126,
+	MX6UL_PAD_CSI_DATA06 = 127,
+	MX6UL_PAD_CSI_DATA07 = 128,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx6ul_pinctrl_pads[] = {
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE2),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE3),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE4),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE5),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE6),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE7),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE8),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE9),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE10),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SNVS_TAMPER4),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE12),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE13),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE14),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE15),
+	IMX_PINCTRL_PIN(MX6UL_PAD_RESERVE16),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_MOD),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TMS),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TDO),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TDI),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TCK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_JTAG_TRST_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO00),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO01),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO02),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO03),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO04),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO05),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO06),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO07),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO08),
+	IMX_PINCTRL_PIN(MX6UL_PAD_GPIO1_IO09),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART1_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART1_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART1_CTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART1_RTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART2_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART2_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART2_CTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART2_RTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART3_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART3_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART3_CTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART3_RTS_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART4_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART4_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART5_TX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_UART5_RX_DATA),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_EN),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_EN),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_TX_CLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET1_RX_ER),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_EN),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_EN),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_TX_CLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_ENET2_RX_ER),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_CLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_ENABLE),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_HSYNC),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_VSYNC),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_RESET),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA00),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA01),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA02),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA03),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA04),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA05),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA06),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA07),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA08),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA09),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA10),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA11),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA12),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA13),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA14),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA15),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA16),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA17),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA18),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA19),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA20),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA21),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA22),
+	IMX_PINCTRL_PIN(MX6UL_PAD_LCD_DATA23),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_RE_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_WE_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA00),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA01),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA02),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA03),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA04),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA05),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA06),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DATA07),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_ALE),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_WP_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_READY_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CE0_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CE1_B),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_CLE),
+	IMX_PINCTRL_PIN(MX6UL_PAD_NAND_DQS),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_CMD),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_CLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA0),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA1),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA2),
+	IMX_PINCTRL_PIN(MX6UL_PAD_SD1_DATA3),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_MCLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_PIXCLK),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_VSYNC),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_HSYNC),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA00),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA01),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA02),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA03),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA04),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA05),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA06),
+	IMX_PINCTRL_PIN(MX6UL_PAD_CSI_DATA07),
+};
+
+static struct imx_pinctrl_soc_info imx6ul_pinctrl_info = {
+	.pins = imx6ul_pinctrl_pads,
+	.npins = ARRAY_SIZE(imx6ul_pinctrl_pads),
+};
+
+static struct of_device_id imx6ul_pinctrl_of_match[] = {
+	{ .compatible = "fsl,imx6ul-iomuxc", },
+	{ /* sentinel */ }
+};
+
+static int imx6ul_pinctrl_probe(struct platform_device *pdev)
+{
+	return imx_pinctrl_probe(pdev, &imx6ul_pinctrl_info);
+}
+
+static struct platform_driver imx6ul_pinctrl_driver = {
+	.driver = {
+		.name = "imx6ul-pinctrl",
+		.of_match_table = of_match_ptr(imx6ul_pinctrl_of_match),
+	},
+	.probe = imx6ul_pinctrl_probe,
+	.remove = imx_pinctrl_remove,
+};
+
+static int __init imx6ul_pinctrl_init(void)
+{
+	return platform_driver_register(&imx6ul_pinctrl_driver);
+}
+arch_initcall(imx6ul_pinctrl_init);
+
+static void __exit imx6ul_pinctrl_exit(void)
+{
+	platform_driver_unregister(&imx6ul_pinctrl_driver);
+}
+module_exit(imx6ul_pinctrl_exit);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@freescale.com>");
+MODULE_DESCRIPTION("Freescale imx6ul pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 2062c22..dac4865 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -12,11 +12,6 @@
  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
  */
 
 #include <linux/kernel.h>
@@ -146,7 +141,7 @@
 struct byt_gpio {
 	struct gpio_chip		chip;
 	struct platform_device		*pdev;
-	spinlock_t			lock;
+	raw_spinlock_t			lock;
 	void __iomem			*reg_base;
 	struct pinctrl_gpio_range	*range;
 	struct byt_gpio_pin_context	*saved_context;
@@ -174,11 +169,11 @@
 	unsigned long flags;
 	u32 value;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 	value = readl(reg);
 	value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
 	writel(value, reg);
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
@@ -201,6 +196,9 @@
 	struct byt_gpio *vg = to_byt_gpio(chip);
 	void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
 	u32 value, gpio_mux;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
 
 	/*
 	 * In most cases, func pin mux 000 means GPIO function.
@@ -214,18 +212,16 @@
 	value = readl(reg) & BYT_PIN_MUX;
 	gpio_mux = byt_get_gpio_mux(vg, offset);
 	if (WARN_ON(gpio_mux != value)) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&vg->lock, flags);
 		value = readl(reg) & ~BYT_PIN_MUX;
 		value |= gpio_mux;
 		writel(value, reg);
-		spin_unlock_irqrestore(&vg->lock, flags);
 
 		dev_warn(&vg->pdev->dev,
 			 "pin %u forcibly re-configured as GPIO\n", offset);
 	}
 
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
+
 	pm_runtime_get(&vg->pdev->dev);
 
 	return 0;
@@ -250,7 +246,7 @@
 	if (offset >= vg->chip.ngpio)
 		return -EINVAL;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 	value = readl(reg);
 
 	WARN(value & BYT_DIRECT_IRQ_EN,
@@ -265,11 +261,11 @@
 	writel(value, reg);
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else if (type & IRQ_TYPE_LEVEL_MASK)
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 
 	return 0;
 }
@@ -277,7 +273,15 @@
 static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
 	void __iomem *reg = byt_gpio_reg(chip, offset, BYT_VAL_REG);
-	return readl(reg) & BYT_LEVEL;
+	struct byt_gpio *vg = to_byt_gpio(chip);
+	unsigned long flags;
+	u32 val;
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
+	val = readl(reg);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
+
+	return val & BYT_LEVEL;
 }
 
 static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -287,7 +291,7 @@
 	unsigned long flags;
 	u32 old_val;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 
 	old_val = readl(reg);
 
@@ -296,7 +300,7 @@
 	else
 		writel(old_val & ~BYT_LEVEL, reg);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -306,13 +310,13 @@
 	unsigned long flags;
 	u32 value;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 
 	value = readl(reg) | BYT_DIR_MASK;
 	value &= ~BYT_INPUT_EN;		/* active low */
 	writel(value, reg);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 
 	return 0;
 }
@@ -326,7 +330,7 @@
 	unsigned long flags;
 	u32 reg_val;
 
-	spin_lock_irqsave(&vg->lock, flags);
+	raw_spin_lock_irqsave(&vg->lock, flags);
 
 	/*
 	 * Before making any direction modifications, do a check if gpio
@@ -345,7 +349,7 @@
 	else
 		writel(reg_val & ~BYT_LEVEL, reg);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 
 	return 0;
 }
@@ -354,18 +358,19 @@
 {
 	struct byt_gpio *vg = to_byt_gpio(chip);
 	int i;
-	unsigned long flags;
 	u32 conf0, val, offs;
 
-	spin_lock_irqsave(&vg->lock, flags);
-
 	for (i = 0; i < vg->chip.ngpio; i++) {
 		const char *pull_str = NULL;
 		const char *pull = NULL;
+		unsigned long flags;
 		const char *label;
 		offs = vg->range->pins[i] * 16;
+
+		raw_spin_lock_irqsave(&vg->lock, flags);
 		conf0 = readl(vg->reg_base + offs + BYT_CONF0_REG);
 		val = readl(vg->reg_base + offs + BYT_VAL_REG);
+		raw_spin_unlock_irqrestore(&vg->lock, flags);
 
 		label = gpiochip_is_requested(chip, i);
 		if (!label)
@@ -418,7 +423,6 @@
 
 		seq_puts(s, "\n");
 	}
-	spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
@@ -450,8 +454,10 @@
 	unsigned offset = irqd_to_hwirq(d);
 	void __iomem *reg;
 
+	raw_spin_lock(&vg->lock);
 	reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG);
 	writel(BIT(offset % 32), reg);
+	raw_spin_unlock(&vg->lock);
 }
 
 static void byt_irq_unmask(struct irq_data *d)
@@ -463,9 +469,9 @@
 	void __iomem *reg;
 	u32 value;
 
-	spin_lock_irqsave(&vg->lock, flags);
-
 	reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
+
+	raw_spin_lock_irqsave(&vg->lock, flags);
 	value = readl(reg);
 
 	switch (irqd_get_trigger_type(d)) {
@@ -486,7 +492,7 @@
 
 	writel(value, reg);
 
-	spin_unlock_irqrestore(&vg->lock, flags);
+	raw_spin_unlock_irqrestore(&vg->lock, flags);
 }
 
 static void byt_irq_mask(struct irq_data *d)
@@ -578,7 +584,7 @@
 	if (IS_ERR(vg->reg_base))
 		return PTR_ERR(vg->reg_base);
 
-	spin_lock_init(&vg->lock);
+	raw_spin_lock_init(&vg->lock);
 
 	gc = &vg->chip;
 	gc->label = dev_name(&pdev->dev);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 3f737da..2d5d3dd 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -174,7 +174,7 @@
 	struct pinctrl_dev *pctldev;
 	struct gpio_chip chip;
 	void __iomem *regs;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 	unsigned intr_lines[16];
 	const struct chv_community *community;
 	u32 saved_intmask;
@@ -720,13 +720,13 @@
 	u32 ctrl0, ctrl1;
 	bool locked;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
 	ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
 	locked = chv_pad_locked(pctrl, offset);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
 		seq_puts(s, "GPIO ");
@@ -789,14 +789,14 @@
 
 	grp = &pctrl->community->groups[group];
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	/* Check first that the pad is not locked */
 	for (i = 0; i < grp->npins; i++) {
 		if (chv_pad_locked(pctrl, grp->pins[i])) {
 			dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
 				 grp->pins[i]);
-			spin_unlock_irqrestore(&pctrl->lock, flags);
+			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 			return -EBUSY;
 		}
 	}
@@ -839,7 +839,7 @@
 			pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
 	}
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -853,13 +853,13 @@
 	void __iomem *reg;
 	u32 value;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	if (chv_pad_locked(pctrl, offset)) {
 		value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
 		if (!(value & CHV_PADCTRL0_GPIOEN)) {
 			/* Locked so cannot enable */
-			spin_unlock_irqrestore(&pctrl->lock, flags);
+			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 			return -EBUSY;
 		}
 	} else {
@@ -899,7 +899,7 @@
 		chv_writel(value, reg);
 	}
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -913,13 +913,13 @@
 	void __iomem *reg;
 	u32 value;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
 	value = readl(reg) & ~CHV_PADCTRL0_GPIOEN;
 	chv_writel(value, reg);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
 static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -931,7 +931,7 @@
 	unsigned long flags;
 	u32 ctrl0;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
 	if (input)
@@ -940,7 +940,7 @@
 		ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
 	chv_writel(ctrl0, reg);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -965,10 +965,10 @@
 	u16 arg = 0;
 	u32 term;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
 	ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
 
@@ -1042,7 +1042,7 @@
 	unsigned long flags;
 	u32 ctrl0, pull;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 	ctrl0 = readl(reg);
 
 	switch (param) {
@@ -1065,7 +1065,7 @@
 			pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
 			break;
 		default:
-			spin_unlock_irqrestore(&pctrl->lock, flags);
+			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 			return -EINVAL;
 		}
 
@@ -1083,7 +1083,7 @@
 			pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
 			break;
 		default:
-			spin_unlock_irqrestore(&pctrl->lock, flags);
+			raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 			return -EINVAL;
 		}
 
@@ -1091,12 +1091,12 @@
 		break;
 
 	default:
-		spin_unlock_irqrestore(&pctrl->lock, flags);
+		raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 		return -EINVAL;
 	}
 
 	chv_writel(ctrl0, reg);
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -1169,9 +1169,12 @@
 {
 	struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
 	int pin = chv_gpio_offset_to_pin(pctrl, offset);
+	unsigned long flags;
 	u32 ctrl0, cfg;
 
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
 	cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1189,7 +1192,7 @@
 	void __iomem *reg;
 	u32 ctrl0;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
 	ctrl0 = readl(reg);
@@ -1201,7 +1204,7 @@
 
 	chv_writel(ctrl0, reg);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
 static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -1209,8 +1212,11 @@
 	struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
 	unsigned pin = chv_gpio_offset_to_pin(pctrl, offset);
 	u32 ctrl0, direction;
+	unsigned long flags;
 
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
 	direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
@@ -1248,14 +1254,14 @@
 	int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
 	u32 intr_line;
 
-	spin_lock(&pctrl->lock);
+	raw_spin_lock(&pctrl->lock);
 
 	intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
 	intr_line &= CHV_PADCTRL0_INTSEL_MASK;
 	intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
 	chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
 
-	spin_unlock(&pctrl->lock);
+	raw_spin_unlock(&pctrl->lock);
 }
 
 static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
@@ -1266,7 +1272,7 @@
 	u32 value, intr_line;
 	unsigned long flags;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
 	intr_line &= CHV_PADCTRL0_INTSEL_MASK;
@@ -1279,7 +1285,7 @@
 		value |= BIT(intr_line);
 	chv_writel(value, pctrl->regs + CHV_INTMASK);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
 static void chv_gpio_irq_mask(struct irq_data *d)
@@ -1313,6 +1319,7 @@
 		unsigned long flags;
 		u32 intsel, value;
 
+		raw_spin_lock_irqsave(&pctrl->lock, flags);
 		intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
 		intsel &= CHV_PADCTRL0_INTSEL_MASK;
 		intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
@@ -1323,12 +1330,11 @@
 		else
 			handler = handle_edge_irq;
 
-		spin_lock_irqsave(&pctrl->lock, flags);
 		if (!pctrl->intr_lines[intsel]) {
-			__irq_set_handler_locked(d->irq, handler);
+			irq_set_handler_locked(d, handler);
 			pctrl->intr_lines[intsel] = offset;
 		}
-		spin_unlock_irqrestore(&pctrl->lock, flags);
+		raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 	}
 
 	chv_gpio_irq_unmask(d);
@@ -1344,7 +1350,7 @@
 	unsigned long flags;
 	u32 value;
 
-	spin_lock_irqsave(&pctrl->lock, flags);
+	raw_spin_lock_irqsave(&pctrl->lock, flags);
 
 	/*
 	 * Pins which can be used as shared interrupt are configured in
@@ -1389,11 +1395,11 @@
 	pctrl->intr_lines[value] = offset;
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else if (type & IRQ_TYPE_LEVEL_MASK)
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 
-	spin_unlock_irqrestore(&pctrl->lock, flags);
+	raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	return 0;
 }
@@ -1412,7 +1418,7 @@
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned long pending;
 	u32 intr_line;
 
@@ -1505,7 +1511,7 @@
 	if (i == ARRAY_SIZE(chv_communities))
 		return -ENODEV;
 
-	spin_lock_init(&pctrl->lock);
+	raw_spin_lock_init(&pctrl->lock);
 	pctrl->dev = &pdev->dev;
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index f9ee0d6..bb377c1 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -758,9 +758,9 @@
 	writel(value, reg);
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else if (type & IRQ_TYPE_LEVEL_MASK)
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 
@@ -840,7 +840,7 @@
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int i;
 
 	chained_irq_enter(chip, desc);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8173.c b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
index d0c811d..ad27184 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8173.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8173.c
@@ -385,6 +385,7 @@
 	.driver = {
 		.name = "mediatek-mt8173-pinctrl",
 		.of_match_table = mt8173_pctrl_match,
+		.pm = &mtk_eint_pm_ops,
 	},
 };
 
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index ad1ea16..7726c6c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -33,6 +33,7 @@
 #include <linux/mfd/syscon.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/pm.h>
 #include <dt-bindings/pinctrl/mt65xx.h>
 
 #include "../core.h"
@@ -702,7 +703,7 @@
 
 	ret = mtk_pctrl_is_function_valid(pctl, g->pin, function);
 	if (!ret) {
-		dev_err(pctl->dev, "invaild function %d on group %d .\n",
+		dev_err(pctl->dev, "invalid function %d on group %d .\n",
 				function, group);
 		return -EINVAL;
 	}
@@ -1062,6 +1063,77 @@
 	return 0;
 }
 
+static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+	struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+	int shift = d->hwirq & 0x1f;
+	int reg = d->hwirq >> 5;
+
+	if (on)
+		pctl->wake_mask[reg] |= BIT(shift);
+	else
+		pctl->wake_mask[reg] &= ~BIT(shift);
+
+	return 0;
+}
+
+static void mtk_eint_chip_write_mask(const struct mtk_eint_offsets *chip,
+		void __iomem *eint_reg_base, u32 *buf)
+{
+	int port;
+	void __iomem *reg;
+
+	for (port = 0; port < chip->ports; port++) {
+		reg = eint_reg_base + (port << 2);
+		writel_relaxed(~buf[port], reg + chip->mask_set);
+		writel_relaxed(buf[port], reg + chip->mask_clr);
+	}
+}
+
+static void mtk_eint_chip_read_mask(const struct mtk_eint_offsets *chip,
+		void __iomem *eint_reg_base, u32 *buf)
+{
+	int port;
+	void __iomem *reg;
+
+	for (port = 0; port < chip->ports; port++) {
+		reg = eint_reg_base + chip->mask + (port << 2);
+		buf[port] = ~readl_relaxed(reg);
+		/* Mask is 0 when irq is enabled, and 1 when disabled. */
+	}
+}
+
+static int mtk_eint_suspend(struct device *device)
+{
+	void __iomem *reg;
+	struct mtk_pinctrl *pctl = dev_get_drvdata(device);
+	const struct mtk_eint_offsets *eint_offsets =
+			&pctl->devdata->eint_offsets;
+
+	reg = pctl->eint_reg_base;
+	mtk_eint_chip_read_mask(eint_offsets, reg, pctl->cur_mask);
+	mtk_eint_chip_write_mask(eint_offsets, reg, pctl->wake_mask);
+
+	return 0;
+}
+
+static int mtk_eint_resume(struct device *device)
+{
+	struct mtk_pinctrl *pctl = dev_get_drvdata(device);
+	const struct mtk_eint_offsets *eint_offsets =
+			&pctl->devdata->eint_offsets;
+
+	mtk_eint_chip_write_mask(eint_offsets,
+			pctl->eint_reg_base, pctl->cur_mask);
+
+	return 0;
+}
+
+const struct dev_pm_ops mtk_eint_pm_ops = {
+	.suspend = mtk_eint_suspend,
+	.resume = mtk_eint_resume,
+};
+
 static void mtk_eint_ack(struct irq_data *d)
 {
 	struct mtk_pinctrl *pctl = irq_data_get_irq_chip_data(d);
@@ -1076,10 +1148,12 @@
 
 static struct irq_chip mtk_pinctrl_irq_chip = {
 	.name = "mt-eint",
+	.irq_disable = mtk_eint_mask,
 	.irq_mask = mtk_eint_mask,
 	.irq_unmask = mtk_eint_unmask,
 	.irq_ack = mtk_eint_ack,
 	.irq_set_type = mtk_eint_set_type,
+	.irq_set_wake = mtk_eint_irq_set_wake,
 	.irq_request_resources = mtk_pinctrl_irq_request_resources,
 	.irq_release_resources = mtk_pinctrl_irq_release_resources,
 };
@@ -1118,8 +1192,8 @@
 
 static void mtk_eint_irq_handler(unsigned irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct mtk_pinctrl *pctl = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc);
 	unsigned int status, eint_num;
 	int offset, index, virq;
 	const struct mtk_eint_offsets *eint_offsets =
@@ -1202,12 +1276,6 @@
 	return 0;
 }
 
-static struct pinctrl_desc mtk_pctrl_desc = {
-	.confops	= &mtk_pconf_ops,
-	.pctlops	= &mtk_pctrl_ops,
-	.pmxops		= &mtk_pmx_ops,
-};
-
 int mtk_pctrl_init(struct platform_device *pdev,
 		const struct mtk_pinctrl_devdata *data,
 		struct regmap *regmap)
@@ -1217,7 +1285,7 @@
 	struct device_node *np = pdev->dev.of_node, *node;
 	struct property *prop;
 	struct resource *res;
-	int i, ret, irq;
+	int i, ret, irq, ports_buf;
 
 	pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
 	if (!pctl)
@@ -1265,12 +1333,17 @@
 
 	for (i = 0; i < pctl->devdata->npins; i++)
 		pins[i] = pctl->devdata->pins[i].pin;
-	mtk_pctrl_desc.name = dev_name(&pdev->dev);
-	mtk_pctrl_desc.owner = THIS_MODULE;
-	mtk_pctrl_desc.pins = pins;
-	mtk_pctrl_desc.npins = pctl->devdata->npins;
+
+	pctl->pctl_desc.name = dev_name(&pdev->dev);
+	pctl->pctl_desc.owner = THIS_MODULE;
+	pctl->pctl_desc.pins = pins;
+	pctl->pctl_desc.npins = pctl->devdata->npins;
+	pctl->pctl_desc.confops = &mtk_pconf_ops;
+	pctl->pctl_desc.pctlops = &mtk_pctrl_ops;
+	pctl->pctl_desc.pmxops = &mtk_pmx_ops;
 	pctl->dev = &pdev->dev;
-	pctl->pctl_dev = pinctrl_register(&mtk_pctrl_desc, &pdev->dev, pctl);
+
+	pctl->pctl_dev = pinctrl_register(&pctl->pctl_desc, &pdev->dev, pctl);
 	if (IS_ERR(pctl->pctl_dev)) {
 		dev_err(&pdev->dev, "couldn't register pinctrl driver\n");
 		return PTR_ERR(pctl->pctl_dev);
@@ -1319,6 +1392,21 @@
 		goto chip_error;
 	}
 
+	ports_buf = pctl->devdata->eint_offsets.ports;
+	pctl->wake_mask = devm_kcalloc(&pdev->dev, ports_buf,
+					sizeof(*pctl->wake_mask), GFP_KERNEL);
+	if (!pctl->wake_mask) {
+		ret = -ENOMEM;
+		goto chip_error;
+	}
+
+	pctl->cur_mask = devm_kcalloc(&pdev->dev, ports_buf,
+					sizeof(*pctl->cur_mask), GFP_KERNEL);
+	if (!pctl->cur_mask) {
+		ret = -ENOMEM;
+		goto chip_error;
+	}
+
 	pctl->eint_dual_edges = devm_kcalloc(&pdev->dev, pctl->devdata->ap_num,
 					     sizeof(int), GFP_KERNEL);
 	if (!pctl->eint_dual_edges) {
@@ -1348,11 +1436,9 @@
 		irq_set_chip_and_handler(virq, &mtk_pinctrl_irq_chip,
 			handle_level_irq);
 		irq_set_chip_data(virq, pctl);
-		set_irq_flags(virq, IRQF_VALID);
 	};
 
 	irq_set_chained_handler_and_data(irq, mtk_eint_irq_handler, pctl);
-	set_irq_flags(irq, IRQF_VALID);
 	return 0;
 
 chip_error:
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
index 30213e5..55a5343 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h
@@ -256,6 +256,7 @@
 struct mtk_pinctrl {
 	struct regmap	*regmap1;
 	struct regmap	*regmap2;
+	struct pinctrl_desc pctl_desc;
 	struct device           *dev;
 	struct gpio_chip	*chip;
 	struct mtk_pinctrl_group	*groups;
@@ -266,6 +267,8 @@
 	void __iomem		*eint_reg_base;
 	struct irq_domain	*domain;
 	int			*eint_dual_edges;
+	u32 *wake_mask;
+	u32 *cur_mask;
 };
 
 int mtk_pctrl_init(struct platform_device *pdev,
@@ -281,4 +284,6 @@
 		const struct mtk_pin_ies_smt_set *ies_smt_infos, unsigned int info_num,
 		unsigned int pin, unsigned char align, int value);
 
+extern const struct dev_pm_ops mtk_eint_pm_ops;
+
 #endif /* __PINCTRL_MTK_COMMON_H */
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index c748407..8392083 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
@@ -355,25 +355,6 @@
 	PINCTRL_PIN(DB8500_PIN_AC27, "GPIO267_AC27"),
 };
 
-#define DB8500_GPIO_RANGE(a, b, c) { .name = "DB8500", .id = a, .base = b, \
-			.pin_base = b, .npins = c }
-
-/*
- * This matches the 32-pin gpio chips registered by the GPIO portion. This
- * cannot be const since we assign the struct gpio_chip * pointer at runtime.
- */
-static struct pinctrl_gpio_range nmk_db8500_ranges[] = {
-	DB8500_GPIO_RANGE(0, 0, 32),
-	DB8500_GPIO_RANGE(1, 32, 5),
-	DB8500_GPIO_RANGE(2, 64, 32),
-	DB8500_GPIO_RANGE(3, 96, 2),
-	DB8500_GPIO_RANGE(4, 128, 32),
-	DB8500_GPIO_RANGE(5, 160, 12),
-	DB8500_GPIO_RANGE(6, 192, 32),
-	DB8500_GPIO_RANGE(7, 224, 7),
-	DB8500_GPIO_RANGE(8, 256, 12),
-};
-
 /*
  * Read the pin group names like this:
  * u0_a_1    = first groups of pins for uart0 on alt function a
@@ -1238,8 +1219,6 @@
 };
 
 static const struct nmk_pinctrl_soc_data nmk_db8500_soc = {
-	.gpio_ranges = nmk_db8500_ranges,
-	.gpio_num_ranges = ARRAY_SIZE(nmk_db8500_ranges),
 	.pins = nmk_db8500_pins,
 	.npins = ARRAY_SIZE(nmk_db8500_pins),
 	.functions = nmk_db8500_functions,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
index d7ba544..2860eaf 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
@@ -341,28 +341,6 @@
 	PINCTRL_PIN(DB8540_PIN_D17, "GPIO267_D17"),
 };
 
-#define DB8540_GPIO_RANGE(a, b, c) { .name = "db8540", .id = a, .base = b, \
-			.pin_base = b, .npins = c }
-
-/*
- * This matches the 32-pin gpio chips registered by the GPIO portion. This
- * cannot be const since we assign the struct gpio_chip * pointer at runtime.
- */
-static struct pinctrl_gpio_range nmk_db8540_ranges[] = {
-	DB8540_GPIO_RANGE(0, 0, 18),
-	DB8540_GPIO_RANGE(0, 22, 7),
-	DB8540_GPIO_RANGE(1, 33, 6),
-	DB8540_GPIO_RANGE(2, 64, 4),
-	DB8540_GPIO_RANGE(2, 70, 18),
-	DB8540_GPIO_RANGE(3, 116, 12),
-	DB8540_GPIO_RANGE(4, 128, 32),
-	DB8540_GPIO_RANGE(5, 160, 9),
-	DB8540_GPIO_RANGE(6, 192, 23),
-	DB8540_GPIO_RANGE(6, 219, 5),
-	DB8540_GPIO_RANGE(7, 224, 9),
-	DB8540_GPIO_RANGE(8, 256, 12),
-};
-
 /*
  * Read the pin group names like this:
  * u0_a_1    = first groups of pins for uart0 on alt function a
@@ -1247,8 +1225,6 @@
 };
 
 static const struct nmk_pinctrl_soc_data nmk_db8540_soc = {
-	.gpio_ranges = nmk_db8540_ranges,
-	.gpio_num_ranges = ARRAY_SIZE(nmk_db8540_ranges),
 	.pins = nmk_db8540_pins,
 	.npins = ARRAY_SIZE(nmk_db8540_pins),
 	.functions = nmk_db8540_functions,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index 2cd7147..587b222 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
@@ -264,20 +264,6 @@
 	PINCTRL_PIN(STN8815_PIN_J22, "GPIO123_J22"),
 };
 
-#define STN8815_GPIO_RANGE(a, b, c) { .name = "STN8815", .id = a, .base = b, \
-			.pin_base = b, .npins = c }
-
-/*
- * This matches the 32-pin gpio chips registered by the GPIO portion. This
- * cannot be const since we assign the struct gpio_chip * pointer at runtime.
- */
-static struct pinctrl_gpio_range nmk_stn8815_ranges[] = {
-	STN8815_GPIO_RANGE(0, 0, 32),
-	STN8815_GPIO_RANGE(1, 32, 32),
-	STN8815_GPIO_RANGE(2, 64, 32),
-	STN8815_GPIO_RANGE(3, 96, 28),
-};
-
 /*
  * Read the pin group names like this:
  * u0_a_1    = first groups of pins for uart0 on alt function a
@@ -285,9 +271,11 @@
  */
 
 /* Altfunction A */
-static const unsigned u0_a_1_pins[] = { STN8815_PIN_B4, STN8815_PIN_D5,
-	STN8815_PIN_C5, STN8815_PIN_A4, STN8815_PIN_B5, STN8815_PIN_D6,
-	STN8815_PIN_C6, STN8815_PIN_B6 };
+static const unsigned u0txrx_a_1_pins[] = { STN8815_PIN_B4, STN8815_PIN_D5 };
+static const unsigned u0ctsrts_a_1_pins[] = { STN8815_PIN_C5, STN8815_PIN_B6 };
+/* Modem pins: DCD, DSR, RI, DTR */
+static const unsigned u0modem_a_1_pins[] = { STN8815_PIN_A4, STN8815_PIN_B5,
+	STN8815_PIN_D6, STN8815_PIN_C6 };
 static const unsigned mmcsd_a_1_pins[] = { STN8815_PIN_B10, STN8815_PIN_A10,
 	STN8815_PIN_C11, STN8815_PIN_B11, STN8815_PIN_A11, STN8815_PIN_C12,
 	STN8815_PIN_B12, STN8815_PIN_A12, STN8815_PIN_C13, STN8815_PIN_C15 };
@@ -304,7 +292,9 @@
 			.npins = ARRAY_SIZE(a##_pins), .altsetting = b }
 
 static const struct nmk_pingroup nmk_stn8815_groups[] = {
-	STN8815_PIN_GROUP(u0_a_1, NMK_GPIO_ALT_A),
+	STN8815_PIN_GROUP(u0txrx_a_1, NMK_GPIO_ALT_A),
+	STN8815_PIN_GROUP(u0ctsrts_a_1, NMK_GPIO_ALT_A),
+	STN8815_PIN_GROUP(u0modem_a_1, NMK_GPIO_ALT_A),
 	STN8815_PIN_GROUP(mmcsd_a_1, NMK_GPIO_ALT_A),
 	STN8815_PIN_GROUP(mmcsd_b_1, NMK_GPIO_ALT_B),
 	STN8815_PIN_GROUP(u1_a_1, NMK_GPIO_ALT_A),
@@ -318,7 +308,7 @@
 #define STN8815_FUNC_GROUPS(a, b...)	   \
 static const char * const a##_groups[] = { b };
 
-STN8815_FUNC_GROUPS(u0, "u0_a_1");
+STN8815_FUNC_GROUPS(u0, "u0txrx_a_1", "u0ctsrts_a_1", "u0modem_a_1");
 STN8815_FUNC_GROUPS(mmcsd, "mmcsd_a_1", "mmcsd_b_1");
 STN8815_FUNC_GROUPS(u1, "u1_a_1", "u1_b_1");
 STN8815_FUNC_GROUPS(i2c1, "i2c1_a_1");
@@ -342,8 +332,6 @@
 };
 
 static const struct nmk_pinctrl_soc_data nmk_stn8815_soc = {
-	.gpio_ranges = nmk_stn8815_ranges,
-	.gpio_num_ranges = ARRAY_SIZE(nmk_stn8815_ranges),
 	.pins = nmk_stn8815_pins,
 	.npins = ARRAY_SIZE(nmk_stn8815_pins),
 	.functions = nmk_stn8815_functions,
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 809d884..352ede1 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -203,6 +203,7 @@
 
 #define GPIO_BLOCK_SHIFT 5
 #define NMK_GPIO_PER_CHIP (1 << GPIO_BLOCK_SHIFT)
+#define NMK_MAX_BANKS DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)
 
 /* Register in the logic block */
 #define NMK_GPIO_DAT	0x00
@@ -282,8 +283,7 @@
 	void __iomem *prcm_base;
 };
 
-static struct nmk_gpio_chip *
-nmk_gpio_chips[DIV_ROUND_UP(ARCH_NR_GPIOS, NMK_GPIO_PER_CHIP)];
+static struct nmk_gpio_chip *nmk_gpio_chips[NMK_MAX_BANKS];
 
 static DEFINE_SPINLOCK(nmk_gpio_slpm_lock);
 
@@ -843,10 +843,9 @@
 	clk_disable(nmk_chip->clk);
 }
 
-static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
-				   u32 status)
+static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
 {
-	struct irq_chip *host_chip = irq_get_chip(irq);
+	struct irq_chip *host_chip = irq_desc_get_chip(desc);
 	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
 
 	chained_irq_enter(host_chip, desc);
@@ -871,17 +870,16 @@
 	status = readl(nmk_chip->addr + NMK_GPIO_IS);
 	clk_disable(nmk_chip->clk);
 
-	__nmk_gpio_irq_handler(irq, desc, status);
+	__nmk_gpio_irq_handler(desc, status);
 }
 
-static void nmk_gpio_latent_irq_handler(unsigned int irq,
-					   struct irq_desc *desc)
+static void nmk_gpio_latent_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
 	struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
 	u32 status = nmk_chip->get_latent_status(nmk_chip->bank);
 
-	__nmk_gpio_irq_handler(irq, desc, status);
+	__nmk_gpio_irq_handler(desc, status);
 }
 
 /* I/O Functions */
@@ -1012,6 +1010,7 @@
 		int irq = gpio_to_irq(gpio);
 		struct irq_desc	*desc = irq_to_desc(irq);
 		int pullidx = 0;
+		int val;
 
 		if (pull)
 			pullidx = data_out ? 1 : 2;
@@ -1021,6 +1020,10 @@
 			   label ?: "(none)",
 			   pulls[pullidx],
 			   (mode < 0) ? "unknown" : modes[mode]);
+
+		val = nmk_gpio_get_input(chip, offset);
+		seq_printf(s, " VAL %d", val);
+
 		/*
 		 * This races with request_irq(), set_irq_type(),
 		 * and set_irq_wake() ... but those are "rare".
@@ -1162,29 +1165,90 @@
 	}
 }
 
+/*
+ * We will allocate memory for the state container using devm* allocators
+ * binding to the first device reaching this point, it doesn't matter if
+ * it is the pin controller or GPIO driver. However we need to use the right
+ * platform device when looking up resources so pay attention to pdev.
+ */
+static struct nmk_gpio_chip *nmk_gpio_populate_chip(struct device_node *np,
+						struct platform_device *pdev)
+{
+	struct nmk_gpio_chip *nmk_chip;
+	struct platform_device *gpio_pdev;
+	struct gpio_chip *chip;
+	struct resource *res;
+	struct clk *clk;
+	void __iomem *base;
+	u32 id;
+
+	gpio_pdev = of_find_device_by_node(np);
+	if (!gpio_pdev) {
+		pr_err("populate \"%s\": device not found\n", np->name);
+		return ERR_PTR(-ENODEV);
+	}
+	if (of_property_read_u32(np, "gpio-bank", &id)) {
+		dev_err(&pdev->dev, "populate: gpio-bank property not found\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Already populated? */
+	nmk_chip = nmk_gpio_chips[id];
+	if (nmk_chip)
+		return nmk_chip;
+
+	nmk_chip = devm_kzalloc(&pdev->dev, sizeof(*nmk_chip), GFP_KERNEL);
+	if (!nmk_chip)
+		return ERR_PTR(-ENOMEM);
+
+	nmk_chip->bank = id;
+	chip = &nmk_chip->chip;
+	chip->base = id * NMK_GPIO_PER_CHIP;
+	chip->ngpio = NMK_GPIO_PER_CHIP;
+	chip->label = dev_name(&gpio_pdev->dev);
+	chip->dev = &gpio_pdev->dev;
+
+	res = platform_get_resource(gpio_pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return base;
+	nmk_chip->addr = base;
+
+	clk = clk_get(&gpio_pdev->dev, NULL);
+	if (IS_ERR(clk))
+		return (void *) clk;
+	clk_prepare(clk);
+	nmk_chip->clk = clk;
+
+	BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
+	nmk_gpio_chips[id] = nmk_chip;
+	return nmk_chip;
+}
+
 static int nmk_gpio_probe(struct platform_device *dev)
 {
 	struct device_node *np = dev->dev.of_node;
 	struct nmk_gpio_chip *nmk_chip;
 	struct gpio_chip *chip;
 	struct irq_chip *irqchip;
-	struct resource *res;
-	struct clk *clk;
 	int latent_irq;
 	bool supports_sleepmode;
-	void __iomem *base;
 	int irq;
 	int ret;
 
+	nmk_chip = nmk_gpio_populate_chip(np, dev);
+	if (IS_ERR(nmk_chip)) {
+		dev_err(&dev->dev, "could not populate nmk chip struct\n");
+		return PTR_ERR(nmk_chip);
+	}
+
 	if (of_get_property(np, "st,supports-sleepmode", NULL))
 		supports_sleepmode = true;
 	else
 		supports_sleepmode = false;
 
-	if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
-		dev_err(&dev->dev, "gpio-bank property not found\n");
-		return -EINVAL;
-	}
+	/* Correct platform device ID */
+	dev->id = nmk_chip->bank;
 
 	irq = platform_get_irq(dev, 0);
 	if (irq < 0)
@@ -1193,27 +1257,10 @@
 	/* It's OK for this IRQ not to be present */
 	latent_irq = platform_get_irq(dev, 1);
 
-	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(&dev->dev, res);
-	if (IS_ERR(base))
-		return PTR_ERR(base);
-
-	clk = devm_clk_get(&dev->dev, NULL);
-	if (IS_ERR(clk))
-		return PTR_ERR(clk);
-	clk_prepare(clk);
-
-	nmk_chip = devm_kzalloc(&dev->dev, sizeof(*nmk_chip), GFP_KERNEL);
-	if (!nmk_chip)
-		return -ENOMEM;
-
 	/*
 	 * The virt address in nmk_chip->addr is in the nomadik register space,
 	 * so we can simply convert the resource address, without remapping
 	 */
-	nmk_chip->bank = dev->id;
-	nmk_chip->clk = clk;
-	nmk_chip->addr = base;
 	nmk_chip->parent_irq = irq;
 	nmk_chip->latent_parent_irq = latent_irq;
 	nmk_chip->sleepmode = supports_sleepmode;
@@ -1228,10 +1275,6 @@
 	chip->set = nmk_gpio_set_output;
 	chip->dbg_show = nmk_gpio_dbg_show;
 	chip->can_sleep = false;
-	chip->base = dev->id * NMK_GPIO_PER_CHIP;
-	chip->ngpio = NMK_GPIO_PER_CHIP;
-	chip->label = dev_name(&dev->dev);
-	chip->dev = &dev->dev;
 	chip->owner = THIS_MODULE;
 
 	irqchip = &nmk_chip->irqchip;
@@ -1253,14 +1296,10 @@
 	clk_disable(nmk_chip->clk);
 	chip->of_node = np;
 
-	ret = gpiochip_add(&nmk_chip->chip);
+	ret = gpiochip_add(chip);
 	if (ret)
 		return ret;
 
-	BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
-
-	nmk_gpio_chips[nmk_chip->bank] = nmk_chip;
-
 	platform_set_drvdata(dev, nmk_chip);
 
 	/*
@@ -1320,35 +1359,40 @@
 	return 0;
 }
 
-static struct pinctrl_gpio_range *
-nmk_match_gpio_range(struct pinctrl_dev *pctldev, unsigned offset)
+static struct nmk_gpio_chip *find_nmk_gpio_from_pin(unsigned pin)
 {
-	struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
 	int i;
+	struct nmk_gpio_chip *nmk_gpio;
 
-	for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
-		struct pinctrl_gpio_range *range;
-
-		range = &npct->soc->gpio_ranges[i];
-		if (offset >= range->pin_base &&
-		    offset <= (range->pin_base + range->npins - 1))
-			return range;
+	for(i = 0; i < NMK_MAX_BANKS; i++) {
+		nmk_gpio = nmk_gpio_chips[i];
+		if (!nmk_gpio)
+			continue;
+		if (pin >= nmk_gpio->chip.base &&
+			pin < nmk_gpio->chip.base + nmk_gpio->chip.ngpio)
+			return nmk_gpio;
 	}
 	return NULL;
 }
 
+static struct gpio_chip *find_gc_from_pin(unsigned pin)
+{
+	struct nmk_gpio_chip *nmk_gpio = find_nmk_gpio_from_pin(pin);
+
+	if (nmk_gpio)
+		return &nmk_gpio->chip;
+	return NULL;
+}
+
 static void nmk_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
 		   unsigned offset)
 {
-	struct pinctrl_gpio_range *range;
-	struct gpio_chip *chip;
+	struct gpio_chip *chip = find_gc_from_pin(offset);
 
-	range = nmk_match_gpio_range(pctldev, offset);
-	if (!range || !range->gc) {
+	if (!chip) {
 		seq_printf(s, "invalid pin offset");
 		return;
 	}
-	chip = range->gc;
 	nmk_gpio_dbg_show_one(s, pctldev, chip, offset - chip->base, offset);
 }
 
@@ -1693,25 +1737,16 @@
 	}
 
 	for (i = 0; i < g->npins; i++) {
-		struct pinctrl_gpio_range *range;
 		struct nmk_gpio_chip *nmk_chip;
-		struct gpio_chip *chip;
 		unsigned bit;
 
-		range = nmk_match_gpio_range(pctldev, g->pins[i]);
-		if (!range) {
+		nmk_chip = find_nmk_gpio_from_pin(g->pins[i]);
+		if (!nmk_chip) {
 			dev_err(npct->dev,
 				"invalid pin offset %d in group %s at index %d\n",
 				g->pins[i], g->name, i);
 			goto out_glitch;
 		}
-		if (!range->gc) {
-			dev_err(npct->dev, "GPIO chip missing in range for pin offset %d in group %s at index %d\n",
-				g->pins[i], g->name, i);
-			goto out_glitch;
-		}
-		chip = range->gc;
-		nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
 		dev_dbg(npct->dev, "setting pin %d to altsetting %d\n", g->pins[i], g->altsetting);
 
 		clk_enable(nmk_chip->clk);
@@ -1827,25 +1862,17 @@
 	};
 	struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
 	struct nmk_gpio_chip *nmk_chip;
-	struct pinctrl_gpio_range *range;
-	struct gpio_chip *chip;
 	unsigned bit;
 	pin_cfg_t cfg;
 	int pull, slpm, output, val, i;
 	bool lowemi, gpiomode, sleep;
 
-	range = nmk_match_gpio_range(pctldev, pin);
-	if (!range) {
-		dev_err(npct->dev, "invalid pin offset %d\n", pin);
+	nmk_chip = find_nmk_gpio_from_pin(pin);
+	if (!nmk_chip) {
+		dev_err(npct->dev,
+			"invalid pin offset %d\n", pin);
 		return -EINVAL;
 	}
-	if (!range->gc) {
-		dev_err(npct->dev, "GPIO chip missing in range for pin %d\n",
-			pin);
-		return -EINVAL;
-	}
-	chip = range->gc;
-	nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
 
 	for (i = 0; i < num_configs; i++) {
 		/*
@@ -1997,6 +2024,31 @@
 	if (version == PINCTRL_NMK_DB8540)
 		nmk_pinctrl_db8540_init(&npct->soc);
 
+	/*
+	 * Since we depend on the GPIO chips to provide clock and register base
+	 * for the pin control operations, make sure that we have these
+	 * populated before we continue. Follow the phandles to instantiate
+	 * them. The GPIO portion of the actual hardware may be probed before
+	 * or after this point: it shouldn't matter as the APIs are orthogonal.
+	 */
+	for (i = 0; i < NMK_MAX_BANKS; i++) {
+		struct device_node *gpio_np;
+		struct nmk_gpio_chip *nmk_chip;
+
+		gpio_np = of_parse_phandle(np, "nomadik-gpio-chips", i);
+		if (gpio_np) {
+			dev_info(&pdev->dev,
+				 "populate NMK GPIO %d \"%s\"\n",
+				 i, gpio_np->name);
+			nmk_chip = nmk_gpio_populate_chip(gpio_np, pdev);
+			if (IS_ERR(nmk_chip))
+				dev_err(&pdev->dev,
+					"could not populate nmk chip struct "
+					"- continue anyway\n");
+			of_node_put(gpio_np);
+		}
+	}
+
 	prcm_np = of_parse_phandle(np, "prcm", 0);
 	if (prcm_np)
 		npct->prcm_base = of_iomap(prcm_np, 0);
@@ -2011,19 +2063,6 @@
 		}
 	}
 
-	/*
-	 * We need all the GPIO drivers to probe FIRST, or we will not be able
-	 * to obtain references to the struct gpio_chip * for them, and we
-	 * need this to proceed.
-	 */
-	for (i = 0; i < npct->soc->gpio_num_ranges; i++) {
-		if (!nmk_gpio_chips[npct->soc->gpio_ranges[i].id]) {
-			dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
-			return -EPROBE_DEFER;
-		}
-		npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[npct->soc->gpio_ranges[i].id]->chip;
-	}
-
 	nmk_pinctrl_desc.pins = npct->soc->pins;
 	nmk_pinctrl_desc.npins = npct->soc->npins;
 	npct->dev = &pdev->dev;
@@ -2034,10 +2073,6 @@
 		return PTR_ERR(npct->pctl);
 	}
 
-	/* We will handle a range of GPIO pins */
-	for (i = 0; i < npct->soc->gpio_num_ranges; i++)
-		pinctrl_add_gpio_range(npct->pctl, &npct->soc->gpio_ranges[i]);
-
 	platform_set_drvdata(pdev, npct);
 	dev_info(&pdev->dev, "initialized Nomadik pin control driver\n");
 
@@ -2072,15 +2107,15 @@
 
 static int __init nmk_gpio_init(void)
 {
-	int ret;
+	return platform_driver_register(&nmk_gpio_driver);
+}
+subsys_initcall(nmk_gpio_init);
 
-	ret = platform_driver_register(&nmk_gpio_driver);
-	if (ret)
-		return ret;
+static int __init nmk_pinctrl_init(void)
+{
 	return platform_driver_register(&nmk_pinctrl_driver);
 }
-
-core_initcall(nmk_gpio_init);
+core_initcall(nmk_pinctrl_init);
 
 MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini");
 MODULE_DESCRIPTION("Nomadik GPIO Driver");
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.h b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
index d8215f1..30bba2a 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.h
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
@@ -121,8 +121,6 @@
 
 /**
  * struct nmk_pinctrl_soc_data - Nomadik pin controller per-SoC configuration
- * @gpio_ranges: An array of GPIO ranges for this SoC
- * @gpio_num_ranges: The number of GPIO ranges for this SoC
  * @pins:	An array describing all pins the pin controller affects.
  *		All pins which are also GPIOs must be listed first within the
  *		array, and be numbered identically to the GPIO controller's
@@ -137,8 +135,6 @@
  * @prcm_gpiocr_registers: The array of PRCM GPIOCR registers on this SoC
  */
 struct nmk_pinctrl_soc_data {
-	struct pinctrl_gpio_range *gpio_ranges;
-	unsigned gpio_num_ranges;
 	const struct pinctrl_pin_desc *pins;
 	unsigned npins;
 	const struct nmk_function *functions;
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index 1fc09dc..29a7bb1 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -61,8 +61,8 @@
 	const struct pinconf_ops *ops = pctldev->desc->confops;
 
 	if (!ops || !ops->pin_config_get) {
-		dev_dbg(pctldev->dev, "cannot get pin configuration, missing "
-			"pin_config_get() function in driver\n");
+		dev_dbg(pctldev->dev,
+			"cannot get pin configuration, .pin_config_get missing in driver\n");
 		return -ENOTSUPP;
 	}
 
@@ -202,18 +202,34 @@
 
 #ifdef CONFIG_DEBUG_FS
 
-void pinconf_show_map(struct seq_file *s, struct pinctrl_map const *map)
+static void pinconf_show_config(struct seq_file *s, struct pinctrl_dev *pctldev,
+		      unsigned long *configs, unsigned num_configs)
 {
-	struct pinctrl_dev *pctldev;
 	const struct pinconf_ops *confops;
 	int i;
 
-	pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
 	if (pctldev)
 		confops = pctldev->desc->confops;
 	else
 		confops = NULL;
 
+	for (i = 0; i < num_configs; i++) {
+		seq_puts(s, "config ");
+		if (confops && confops->pin_config_config_dbg_show)
+			confops->pin_config_config_dbg_show(pctldev, s,
+							    configs[i]);
+		else
+			seq_printf(s, "%08lx", configs[i]);
+		seq_puts(s, "\n");
+	}
+}
+
+void pinconf_show_map(struct seq_file *s, struct pinctrl_map const *map)
+{
+	struct pinctrl_dev *pctldev;
+
+	pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name);
+
 	switch (map->type) {
 	case PIN_MAP_TYPE_CONFIGS_PIN:
 		seq_printf(s, "pin ");
@@ -227,15 +243,8 @@
 
 	seq_printf(s, "%s\n", map->data.configs.group_or_pin);
 
-	for (i = 0; i < map->data.configs.num_configs; i++) {
-		seq_printf(s, "config ");
-		if (confops && confops->pin_config_config_dbg_show)
-			confops->pin_config_config_dbg_show(pctldev, s,
-						map->data.configs.configs[i]);
-		else
-			seq_printf(s, "%08lx", map->data.configs.configs[i]);
-		seq_printf(s, "\n");
-	}
+	pinconf_show_config(s, pctldev, map->data.configs.configs,
+			    map->data.configs.num_configs);
 }
 
 void pinconf_show_setting(struct seq_file *s,
@@ -243,9 +252,7 @@
 {
 	struct pinctrl_dev *pctldev = setting->pctldev;
 	const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
-	const struct pinconf_ops *confops = pctldev->desc->confops;
 	struct pin_desc *desc;
-	int i;
 
 	switch (setting->type) {
 	case PIN_MAP_TYPE_CONFIGS_PIN:
@@ -269,17 +276,8 @@
 	 * FIXME: We should really get the pin controler to dump the config
 	 * values, so they can be decoded to something meaningful.
 	 */
-	for (i = 0; i < setting->data.configs.num_configs; i++) {
-		seq_printf(s, " ");
-		if (confops && confops->pin_config_config_dbg_show)
-			confops->pin_config_config_dbg_show(pctldev, s,
-				setting->data.configs.configs[i]);
-		else
-			seq_printf(s, "%08lx",
-				   setting->data.configs.configs[i]);
-	}
-
-	seq_printf(s, "\n");
+	pinconf_show_config(s, pctldev, setting->data.configs.configs,
+			    setting->data.configs.num_configs);
 }
 
 static void pinconf_dump_pin(struct pinctrl_dev *pctldev,
@@ -412,10 +410,8 @@
 	const struct pinctrl_map *map;
 	const struct pinctrl_map *found = NULL;
 	struct pinctrl_dev *pctldev;
-	const struct pinconf_ops *confops = NULL;
 	struct dbg_cfg *dbg = &pinconf_dbg_conf;
 	int i, j;
-	unsigned long config;
 
 	mutex_lock(&pinctrl_maps_mutex);
 
@@ -449,16 +445,10 @@
 	}
 
 	pctldev = get_pinctrl_dev_from_devname(found->ctrl_dev_name);
-	config = *found->data.configs.configs;
-	seq_printf(s, "Dev %s has config of %s in state %s: 0x%08lX\n",
-			dbg->dev_name, dbg->pin_name,
-			dbg->state_name, config);
-
-	if (pctldev)
-		confops = pctldev->desc->confops;
-
-	if (confops && confops->pin_config_config_dbg_show)
-		confops->pin_config_config_dbg_show(pctldev, s, config);
+	seq_printf(s, "Dev %s has config of %s in state %s:\n",
+		   dbg->dev_name, dbg->pin_name, dbg->state_name);
+	pinconf_show_config(s, pctldev, found->data.configs.configs,
+			    found->data.configs.num_configs);
 
 exit:
 	mutex_unlock(&pinctrl_maps_mutex);
@@ -470,10 +460,12 @@
  * pinconf_dbg_config_write() - modify the pinctrl config in the pinctrl
  * map, of a dev/pin/state entry based on user entries to pinconf-config
  * @user_buf: contains the modification request with expected format:
- *     modify config_pin <devicename> <state> <pinname> <newvalue>
+ *     modify <config> <devicename> <state> <name> <newvalue>
  * modify is literal string, alternatives like add/delete not supported yet
- * config_pin is literal, alternatives like config_mux not supported yet
- * <devicename> <state> <pinname> are values that should match the pinctrl-maps
+ * <config> is the configuration to be changed. Supported configs are
+ *     "config_pin" or "config_group", alternatives like config_mux are not
+ *     supported yet.
+ * <devicename> <state> <name> are values that should match the pinctrl-maps
  * <newvalue> reflects the new config and is driver dependant
  */
 static ssize_t pinconf_dbg_config_write(struct file *file,
@@ -511,13 +503,19 @@
 	if (strcmp(token, "modify"))
 		return -EINVAL;
 
-	/* Get arg type: "config_pin" type supported so far */
+	/*
+	 * Get arg type: "config_pin" and "config_group"
+	 *                types are supported so far
+	 */
 	token = strsep(&b, " ");
 	if (!token)
 		return -EINVAL;
-	if (strcmp(token, "config_pin"))
+	if (!strcmp(token, "config_pin"))
+		dbg->map_type = PIN_MAP_TYPE_CONFIGS_PIN;
+	else if (!strcmp(token, "config_group"))
+		dbg->map_type = PIN_MAP_TYPE_CONFIGS_GROUP;
+	else
 		return -EINVAL;
-	dbg->map_type = PIN_MAP_TYPE_CONFIGS_PIN;
 
 	/* get arg 'device_name' */
 	token = strsep(&b, " ");
diff --git a/drivers/pinctrl/pinctrl-adi2-bf60x.c b/drivers/pinctrl/pinctrl-adi2-bf60x.c
index 4cb59fe..fcfa008 100644
--- a/drivers/pinctrl/pinctrl-adi2-bf60x.c
+++ b/drivers/pinctrl/pinctrl-adi2-bf60x.c
@@ -394,25 +394,25 @@
 static const unsigned short lp0_mux[] = {
 	P_LP0_CLK, P_LP0_ACK, P_LP0_D0, P_LP0_D1, P_LP0_D2,
 	P_LP0_D3, P_LP0_D4, P_LP0_D5, P_LP0_D6, P_LP0_D7,
-        0
+	0
 };
 
 static const unsigned short lp1_mux[] = {
 	P_LP1_CLK, P_LP1_ACK, P_LP1_D0, P_LP1_D1, P_LP1_D2,
 	P_LP1_D3, P_LP1_D4, P_LP1_D5, P_LP1_D6, P_LP1_D7,
-        0
+	0
 };
 
 static const unsigned short lp2_mux[] = {
 	P_LP2_CLK, P_LP2_ACK, P_LP2_D0, P_LP2_D1, P_LP2_D2,
 	P_LP2_D3, P_LP2_D4, P_LP2_D5, P_LP2_D6, P_LP2_D7,
-        0
+	0
 };
 
 static const unsigned short lp3_mux[] = {
 	P_LP3_CLK, P_LP3_ACK, P_LP3_D0, P_LP3_D1, P_LP3_D2,
 	P_LP3_D3, P_LP3_D4, P_LP3_D5, P_LP3_D6, P_LP3_D7,
-        0
+	0
 };
 
 static const struct adi_pin_group adi_pin_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index c3c3d23..a5976eb 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -427,10 +427,10 @@
 
 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
 		writel(pintmask, &pint_regs->edge_set);
-		__irq_set_handler_locked(irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	} else {
 		writel(pintmask, &pint_regs->edge_clear);
-		__irq_set_handler_locked(irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	}
 
 out:
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index d8e3f7c..5e86bb8 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -420,7 +420,7 @@
 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
 		pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
 		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		break;
 
 	case IRQ_TYPE_EDGE_FALLING:
@@ -428,7 +428,7 @@
 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
 		pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
 		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		break;
 
 	case IRQ_TYPE_EDGE_BOTH:
@@ -436,7 +436,7 @@
 		pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
 		pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF;
 		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 		break;
 
 	case IRQ_TYPE_LEVEL_HIGH:
@@ -445,7 +445,7 @@
 		pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
 		pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
 		pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		break;
 
 	case IRQ_TYPE_LEVEL_LOW:
@@ -454,7 +454,7 @@
 		pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
 		pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
 		pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF;
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		break;
 
 	case IRQ_TYPE_NONE:
@@ -492,8 +492,9 @@
 	.irq_set_type = amd_gpio_irq_set_type,
 };
 
-static void amd_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	u32 i;
 	u32 off;
 	u32 reg;
@@ -501,7 +502,7 @@
 	u64 reg64;
 	int handled = 0;
 	unsigned long flags;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct amd_gpio *gpio_dev = to_amd_gpio(gc);
 
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index a082447..bae0012 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -320,6 +320,9 @@
 static void __iomem *pin_to_controller(struct at91_pinctrl *info,
 				 unsigned int bank)
 {
+	if (!gpio_chips[bank])
+		return NULL;
+
 	return gpio_chips[bank]->regbase;
 }
 
@@ -729,6 +732,10 @@
 		pin = &pins_conf[i];
 		at91_pin_dbg(info->dev, pin);
 		pio = pin_to_controller(info, pin->bank);
+
+		if (!pio)
+			continue;
+
 		mask = pin_to_mask(pin->pin);
 		at91_mux_disable_interrupt(pio, mask);
 		switch (pin->mux) {
@@ -848,6 +855,10 @@
 	*config = 0;
 	dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id);
 	pio = pin_to_controller(info, pin_to_bank(pin_id));
+
+	if (!pio)
+		return -EINVAL;
+
 	pin = pin_id % MAX_NB_GPIO_PER_BANK;
 
 	if (at91_mux_get_multidrive(pio, pin))
@@ -889,6 +900,10 @@
 			"%s:%d, pin_id=%d, config=0x%lx",
 			__func__, __LINE__, pin_id, config);
 		pio = pin_to_controller(info, pin_to_bank(pin_id));
+
+		if (!pio)
+			return -EINVAL;
+
 		pin = pin_id % MAX_NB_GPIO_PER_BANK;
 		mask = pin_to_mask(pin);
 
@@ -1444,22 +1459,22 @@
 
 	switch (type) {
 	case IRQ_TYPE_EDGE_RISING:
-		__irq_set_handler_locked(d->irq, handle_simple_irq);
+		irq_set_handler_locked(d, handle_simple_irq);
 		writel_relaxed(mask, pio + PIO_ESR);
 		writel_relaxed(mask, pio + PIO_REHLSR);
 		break;
 	case IRQ_TYPE_EDGE_FALLING:
-		__irq_set_handler_locked(d->irq, handle_simple_irq);
+		irq_set_handler_locked(d, handle_simple_irq);
 		writel_relaxed(mask, pio + PIO_ESR);
 		writel_relaxed(mask, pio + PIO_FELLSR);
 		break;
 	case IRQ_TYPE_LEVEL_LOW:
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		writel_relaxed(mask, pio + PIO_LSR);
 		writel_relaxed(mask, pio + PIO_FELLSR);
 		break;
 	case IRQ_TYPE_LEVEL_HIGH:
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 		writel_relaxed(mask, pio + PIO_LSR);
 		writel_relaxed(mask, pio + PIO_REHLSR);
 		break;
@@ -1468,7 +1483,7 @@
 		 * disable additional interrupt modes:
 		 * fall back to default behavior
 		 */
-		__irq_set_handler_locked(d->irq, handle_simple_irq);
+		irq_set_handler_locked(d, handle_simple_irq);
 		writel_relaxed(mask, pio + PIO_AIMDR);
 		return 0;
 	case IRQ_TYPE_NONE:
@@ -1488,28 +1503,6 @@
 	/* the interrupt is already cleared before by reading ISR */
 }
 
-static int gpio_irq_request_res(struct irq_data *d)
-{
-	struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
-	unsigned	pin = d->hwirq;
-	int ret;
-
-	ret = gpiochip_lock_as_irq(&at91_gpio->chip, pin);
-	if (ret)
-		dev_err(at91_gpio->chip.dev, "unable to lock pind %lu IRQ\n",
-			d->hwirq);
-
-	return ret;
-}
-
-static void gpio_irq_release_res(struct irq_data *d)
-{
-	struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
-	unsigned	pin = d->hwirq;
-
-	gpiochip_unlock_as_irq(&at91_gpio->chip, pin);
-}
-
 #ifdef CONFIG_PM
 
 static u32 wakeups[MAX_GPIO_BANKS];
@@ -1585,8 +1578,6 @@
 static struct irq_chip gpio_irqchip = {
 	.name		= "GPIO",
 	.irq_ack	= gpio_irq_ack,
-	.irq_request_resources = gpio_irq_request_res,
-	.irq_release_resources = gpio_irq_release_res,
 	.irq_disable	= gpio_irq_mask,
 	.irq_mask	= gpio_irq_mask,
 	.irq_unmask	= gpio_irq_unmask,
@@ -1596,7 +1587,7 @@
 
 static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct gpio_chip *gpio_chip = irq_desc_get_handler_data(desc);
 	struct at91_gpio_chip *at91_gpio = container_of(gpio_chip,
 					   struct at91_gpio_chip, chip);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 29cbbab..3731cc6 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -519,10 +519,11 @@
 	.irq_set_type		= u300_gpio_irq_type,
 };
 
-static void u300_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void u300_gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
 {
-	struct irq_chip *parent_chip = irq_get_chip(irq);
-	struct gpio_chip *chip = irq_get_handler_data(irq);
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct irq_chip *parent_chip = irq_desc_get_chip(desc);
+	struct gpio_chip *chip = irq_desc_get_handler_data(desc);
 	struct u300_gpio *gpio = to_u300_gpio(chip);
 	struct u300_gpio_port *port = &gpio->ports[irq - chip->base];
 	int pinoffset = port->number << 3; /* get the right stride */
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
new file mode 100644
index 0000000..461fffc
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -0,0 +1,378 @@
+/*
+ *  Driver for Conexant Digicolor General Purpose Pin Mapping
+ *
+ * Author: Baruch Siach <baruch@tkos.co.il>
+ *
+ * Copyright (C) 2015 Paradox Innovation Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * TODO:
+ * - GPIO interrupt support
+ * - Pin pad configuration (pull up/down, strength)
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/spinlock.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include "pinctrl-utils.h"
+
+#define DRIVER_NAME	"pinctrl-digicolor"
+
+#define GP_CLIENTSEL(clct)	((clct)*8 + 0x20)
+#define GP_DRIVE0(clct)		(GP_CLIENTSEL(clct) + 2)
+#define GP_OUTPUT0(clct)	(GP_CLIENTSEL(clct) + 3)
+#define GP_INPUT(clct)		(GP_CLIENTSEL(clct) + 6)
+
+#define PIN_COLLECTIONS		('R' - 'A' + 1)
+#define PINS_PER_COLLECTION	8
+#define PINS_COUNT		(PIN_COLLECTIONS * PINS_PER_COLLECTION)
+
+struct dc_pinmap {
+	void __iomem		*regs;
+	struct device		*dev;
+	struct pinctrl_dev	*pctl;
+
+	struct pinctrl_desc	*desc;
+	const char		*pin_names[PINS_COUNT];
+
+	struct gpio_chip	chip;
+	spinlock_t		lock;
+};
+
+static int dc_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	return PINS_COUNT;
+}
+
+static const char *dc_get_group_name(struct pinctrl_dev *pctldev,
+				     unsigned selector)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+
+	/* Exactly one group per pin */
+	return pmap->desc->pins[selector].name;
+}
+
+static int dc_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+			     const unsigned **pins,
+			     unsigned *num_pins)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = &pmap->desc->pins[selector].number;
+	*num_pins = 1;
+
+	return 0;
+}
+
+static struct pinctrl_ops dc_pinctrl_ops = {
+	.get_groups_count	= dc_get_groups_count,
+	.get_group_name		= dc_get_group_name,
+	.get_group_pins		= dc_get_group_pins,
+	.dt_node_to_map		= pinconf_generic_dt_node_to_map_pin,
+	.dt_free_map		= pinctrl_utils_dt_free_map,
+};
+
+static const char *const dc_functions[] = {
+	"gpio",
+	"client_a",
+	"client_b",
+	"client_c",
+};
+
+static int dc_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(dc_functions);
+}
+
+static const char *dc_get_fname(struct pinctrl_dev *pctldev, unsigned selector)
+{
+	return dc_functions[selector];
+}
+
+static int dc_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+			 const char * const **groups,
+			 unsigned * const num_groups)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = pmap->pin_names;
+	*num_groups = PINS_COUNT;
+
+	return 0;
+}
+
+static void dc_client_sel(int pin_num, int *reg, int *bit)
+{
+	*bit = (pin_num % PINS_PER_COLLECTION) * 2;
+	*reg = GP_CLIENTSEL(pin_num/PINS_PER_COLLECTION);
+
+	if (*bit >= PINS_PER_COLLECTION) {
+		*bit -= PINS_PER_COLLECTION;
+		*reg += 1;
+	}
+}
+
+static int dc_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
+		      unsigned group)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pctldev);
+	int bit_off, reg_off;
+	u8 reg;
+
+	dc_client_sel(group, &reg_off, &bit_off);
+
+	reg = readb_relaxed(pmap->regs + reg_off);
+	reg &= ~(3 << bit_off);
+	reg |= (selector << bit_off);
+	writeb_relaxed(reg, pmap->regs + reg_off);
+
+	return 0;
+}
+
+static int dc_pmx_request_gpio(struct pinctrl_dev *pcdev,
+			       struct pinctrl_gpio_range *range,
+			       unsigned offset)
+{
+	struct dc_pinmap *pmap = pinctrl_dev_get_drvdata(pcdev);
+	int bit_off, reg_off;
+	u8 reg;
+
+	dc_client_sel(offset, &reg_off, &bit_off);
+
+	reg = readb_relaxed(pmap->regs + reg_off);
+	if ((reg & (3 << bit_off)) != 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static struct pinmux_ops dc_pmxops = {
+	.get_functions_count	= dc_get_functions_count,
+	.get_function_name	= dc_get_fname,
+	.get_function_groups	= dc_get_groups,
+	.set_mux		= dc_set_mux,
+	.gpio_request_enable	= dc_pmx_request_gpio,
+};
+
+static int dc_gpio_request(struct gpio_chip *chip, unsigned gpio)
+{
+	return pinctrl_request_gpio(chip->base + gpio);
+}
+
+static void dc_gpio_free(struct gpio_chip *chip, unsigned gpio)
+{
+	pinctrl_free_gpio(chip->base + gpio);
+}
+
+static int dc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+	struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+	int reg_off = GP_DRIVE0(gpio/PINS_PER_COLLECTION);
+	int bit_off = gpio % PINS_PER_COLLECTION;
+	u8 drive;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pmap->lock, flags);
+	drive = readb_relaxed(pmap->regs + reg_off);
+	drive &= ~BIT(bit_off);
+	writeb_relaxed(drive, pmap->regs + reg_off);
+	spin_unlock_irqrestore(&pmap->lock, flags);
+
+	return 0;
+}
+
+static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value);
+
+static int dc_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+				    int value)
+{
+	struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+	int reg_off = GP_DRIVE0(gpio/PINS_PER_COLLECTION);
+	int bit_off = gpio % PINS_PER_COLLECTION;
+	u8 drive;
+	unsigned long flags;
+
+	dc_gpio_set(chip, gpio, value);
+
+	spin_lock_irqsave(&pmap->lock, flags);
+	drive = readb_relaxed(pmap->regs + reg_off);
+	drive |= BIT(bit_off);
+	writeb_relaxed(drive, pmap->regs + reg_off);
+	spin_unlock_irqrestore(&pmap->lock, flags);
+
+	return 0;
+}
+
+static int dc_gpio_get(struct gpio_chip *chip, unsigned gpio)
+{
+	struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+	int reg_off = GP_INPUT(gpio/PINS_PER_COLLECTION);
+	int bit_off = gpio % PINS_PER_COLLECTION;
+	u8 input;
+
+	input = readb_relaxed(pmap->regs + reg_off);
+
+	return !!(input & BIT(bit_off));
+}
+
+static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
+{
+	struct dc_pinmap *pmap = container_of(chip, struct dc_pinmap, chip);
+	int reg_off = GP_OUTPUT0(gpio/PINS_PER_COLLECTION);
+	int bit_off = gpio % PINS_PER_COLLECTION;
+	u8 output;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pmap->lock, flags);
+	output = readb_relaxed(pmap->regs + reg_off);
+	if (value)
+		output |= BIT(bit_off);
+	else
+		output &= ~BIT(bit_off);
+	writeb_relaxed(output, pmap->regs + reg_off);
+	spin_unlock_irqrestore(&pmap->lock, flags);
+}
+
+static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
+{
+	struct gpio_chip *chip = &pmap->chip;
+	int ret;
+
+	chip->label		= DRIVER_NAME;
+	chip->dev		= pmap->dev;
+	chip->request		= dc_gpio_request;
+	chip->free		= dc_gpio_free;
+	chip->direction_input	= dc_gpio_direction_input;
+	chip->direction_output	= dc_gpio_direction_output;
+	chip->get		= dc_gpio_get;
+	chip->set		= dc_gpio_set;
+	chip->base		= -1;
+	chip->ngpio		= PINS_COUNT;
+	chip->of_node		= np;
+	chip->of_gpio_n_cells	= 2;
+
+	spin_lock_init(&pmap->lock);
+
+	ret = gpiochip_add(chip);
+	if (ret < 0)
+		return ret;
+
+	ret = gpiochip_add_pin_range(chip, dev_name(pmap->dev), 0, 0,
+				     PINS_COUNT);
+	if (ret < 0) {
+		gpiochip_remove(chip);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dc_pinctrl_probe(struct platform_device *pdev)
+{
+	struct dc_pinmap *pmap;
+	struct resource *r;
+	struct pinctrl_pin_desc *pins;
+	struct pinctrl_desc *pctl_desc;
+	char *pin_names;
+	int name_len = strlen("GP_xx") + 1;
+	int i, j, ret;
+
+	pmap = devm_kzalloc(&pdev->dev, sizeof(*pmap), GFP_KERNEL);
+	if (!pmap)
+		return -ENOMEM;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pmap->regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(pmap->regs))
+		return PTR_ERR(pmap->regs);
+
+	pins = devm_kzalloc(&pdev->dev, sizeof(*pins)*PINS_COUNT, GFP_KERNEL);
+	if (!pins)
+		return -ENOMEM;
+	pin_names = devm_kzalloc(&pdev->dev, name_len * PINS_COUNT,
+				 GFP_KERNEL);
+	if (!pin_names)
+		return -ENOMEM;
+
+	for (i = 0; i < PIN_COLLECTIONS; i++) {
+		for (j = 0; j < PINS_PER_COLLECTION; j++) {
+			int pin_id = i*PINS_PER_COLLECTION + j;
+			char *name = &pin_names[pin_id * name_len];
+
+			snprintf(name, name_len, "GP_%c%c", 'A'+i, '0'+j);
+
+			pins[pin_id].number = pin_id;
+			pins[pin_id].name = name;
+			pmap->pin_names[pin_id] = name;
+		}
+	}
+
+	pctl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctl_desc), GFP_KERNEL);
+	if (!pctl_desc)
+		return -ENOMEM;
+
+	pctl_desc->name	= DRIVER_NAME,
+	pctl_desc->owner = THIS_MODULE,
+	pctl_desc->pctlops = &dc_pinctrl_ops,
+	pctl_desc->pmxops = &dc_pmxops,
+	pctl_desc->npins = PINS_COUNT;
+	pctl_desc->pins = pins;
+	pmap->desc = pctl_desc;
+
+	pmap->dev = &pdev->dev;
+
+	pmap->pctl = pinctrl_register(pctl_desc, &pdev->dev, pmap);
+	if (!pmap->pctl) {
+		dev_err(&pdev->dev, "pinctrl driver registration failed\n");
+		return -EINVAL;
+	}
+
+	ret = dc_gpiochip_add(pmap, pdev->dev.of_node);
+	if (ret < 0) {
+		pinctrl_unregister(pmap->pctl);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int dc_pinctrl_remove(struct platform_device *pdev)
+{
+	struct dc_pinmap *pmap = platform_get_drvdata(pdev);
+
+	pinctrl_unregister(pmap->pctl);
+	gpiochip_remove(&pmap->chip);
+
+	return 0;
+}
+
+static const struct of_device_id dc_pinctrl_ids[] = {
+	{ .compatible = "cnxt,cx92755-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_pinctrl_ids);
+
+static struct platform_driver dc_pinctrl_driver = {
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = dc_pinctrl_ids,
+	},
+	.probe = dc_pinctrl_probe,
+	.remove = dc_pinctrl_remove,
+};
+module_platform_driver(dc_pinctrl_driver);
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index 347c763..f0bebbe 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -37,6 +37,9 @@
 #define LPC18XX_SCU_PIN_EHD_MASK	0x300
 #define LPC18XX_SCU_PIN_EHD_POS		8
 
+#define LPC18XX_SCU_USB1_EPD		BIT(2)
+#define LPC18XX_SCU_USB1_EPWR		BIT(4)
+
 #define LPC18XX_SCU_I2C0_EFP		BIT(0)
 #define LPC18XX_SCU_I2C0_EHD		BIT(2)
 #define LPC18XX_SCU_I2C0_EZI		BIT(3)
@@ -617,8 +620,31 @@
 
 static int lpc18xx_pconf_get_usb1(enum pin_config_param param, int *arg, u32 reg)
 {
-	/* TODO */
-	return -ENOTSUPP;
+	switch (param) {
+	case PIN_CONFIG_LOW_POWER_MODE:
+		if (reg & LPC18XX_SCU_USB1_EPWR)
+			*arg = 0;
+		else
+			*arg = 1;
+		break;
+
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (reg & LPC18XX_SCU_USB1_EPD)
+			return -EINVAL;
+		break;
+
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		if (reg & LPC18XX_SCU_USB1_EPD)
+			*arg = 1;
+		else
+			return -EINVAL;
+		break;
+
+	default:
+		return -ENOTSUPP;
+	}
+
+	return 0;
 }
 
 static int lpc18xx_pconf_get_i2c0(enum pin_config_param param, int *arg, u32 reg,
@@ -782,8 +808,28 @@
 				  enum pin_config_param param,
 				  u16 param_val, u32 *reg)
 {
-	/* TODO */
-	return -ENOTSUPP;
+	switch (param) {
+	case PIN_CONFIG_LOW_POWER_MODE:
+		if (param_val)
+			*reg &= ~LPC18XX_SCU_USB1_EPWR;
+		else
+			*reg |= LPC18XX_SCU_USB1_EPWR;
+		break;
+
+	case PIN_CONFIG_BIAS_DISABLE:
+		*reg &= ~LPC18XX_SCU_USB1_EPD;
+		break;
+
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		*reg |= LPC18XX_SCU_USB1_EPD;
+		break;
+
+	default:
+		dev_err(pctldev->dev, "Property not supported\n");
+		return -ENOTSUPP;
+	}
+
+	return 0;
 }
 
 static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 63100be..3dc2ae1 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1310,9 +1310,11 @@
 	return 0;
 }
 
-static void pistachio_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void pistachio_gpio_irq_handler(unsigned int __irq,
+				       struct irq_desc *desc)
 {
-	struct gpio_chip *gc = irq_get_handler_data(irq);
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct pistachio_gpio_bank *bank = gc_to_bank(gc);
 	struct irq_chip *chip = irq_get_chip(irq);
 	unsigned long pending;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 9affcd7..c5246c0 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -945,6 +945,7 @@
 	if (ret < 0)
 		return ret;
 
+	clk_enable(bank->clk);
 	spin_lock_irqsave(&bank->slock, flags);
 
 	data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
@@ -956,6 +957,7 @@
 	writel_relaxed(data, bank->reg_base + GPIO_SWPORT_DDR);
 
 	spin_unlock_irqrestore(&bank->slock, flags);
+	clk_disable(bank->clk);
 
 	return 0;
 }
@@ -1389,6 +1391,7 @@
 	unsigned long flags;
 	u32 data;
 
+	clk_enable(bank->clk);
 	spin_lock_irqsave(&bank->slock, flags);
 
 	data = readl(reg);
@@ -1398,6 +1401,7 @@
 	writel(data, reg);
 
 	spin_unlock_irqrestore(&bank->slock, flags);
+	clk_disable(bank->clk);
 }
 
 /*
@@ -1409,7 +1413,9 @@
 	struct rockchip_pin_bank *bank = gc_to_pin_bank(gc);
 	u32 data;
 
+	clk_enable(bank->clk);
 	data = readl(bank->reg_base + GPIO_EXT_PORT);
+	clk_disable(bank->clk);
 	data >>= offset;
 	data &= 1;
 	return data;
@@ -1469,10 +1475,10 @@
  * Interrupt handling
  */
 
-static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void rockchip_irq_demux(unsigned int __irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct rockchip_pin_bank *bank = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
 	u32 pend;
 
 	dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
@@ -1482,7 +1488,7 @@
 	pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
 
 	while (pend) {
-		unsigned int virq;
+		unsigned int irq, virq;
 
 		irq = __ffs(pend);
 		pend &= ~BIT(irq);
@@ -1546,6 +1552,7 @@
 	if (ret < 0)
 		return ret;
 
+	clk_enable(bank->clk);
 	spin_lock_irqsave(&bank->slock, flags);
 
 	data = readl_relaxed(bank->reg_base + GPIO_SWPORT_DDR);
@@ -1555,9 +1562,9 @@
 	spin_unlock_irqrestore(&bank->slock, flags);
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 
 	spin_lock_irqsave(&bank->slock, flags);
 	irq_gc_lock(gc);
@@ -1603,6 +1610,7 @@
 	default:
 		irq_gc_unlock(gc);
 		spin_unlock_irqrestore(&bank->slock, flags);
+		clk_disable(bank->clk);
 		return -EINVAL;
 	}
 
@@ -1611,6 +1619,7 @@
 
 	irq_gc_unlock(gc);
 	spin_unlock_irqrestore(&bank->slock, flags);
+	clk_disable(bank->clk);
 
 	return 0;
 }
@@ -1620,8 +1629,10 @@
 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 	struct rockchip_pin_bank *bank = gc->private;
 
+	clk_enable(bank->clk);
 	bank->saved_masks = irq_reg_readl(gc, GPIO_INTMASK);
 	irq_reg_writel(gc, ~gc->wake_active, GPIO_INTMASK);
+	clk_disable(bank->clk);
 }
 
 static void rockchip_irq_resume(struct irq_data *d)
@@ -1629,7 +1640,27 @@
 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 	struct rockchip_pin_bank *bank = gc->private;
 
+	clk_enable(bank->clk);
 	irq_reg_writel(gc, bank->saved_masks, GPIO_INTMASK);
+	clk_disable(bank->clk);
+}
+
+static void rockchip_irq_gc_mask_clr_bit(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct rockchip_pin_bank *bank = gc->private;
+
+	clk_enable(bank->clk);
+	irq_gc_mask_clr_bit(d);
+}
+
+void rockchip_irq_gc_mask_set_bit(struct irq_data *d)
+{
+	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+	struct rockchip_pin_bank *bank = gc->private;
+
+	irq_gc_mask_set_bit(d);
+	clk_disable(bank->clk);
 }
 
 static int rockchip_interrupts_register(struct platform_device *pdev,
@@ -1640,7 +1671,7 @@
 	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
 	struct irq_chip_generic *gc;
 	int ret;
-	int i;
+	int i, j;
 
 	for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
 		if (!bank->valid) {
@@ -1649,11 +1680,19 @@
 			continue;
 		}
 
+		ret = clk_enable(bank->clk);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to enable clock for bank %s\n",
+				bank->name);
+			continue;
+		}
+
 		bank->domain = irq_domain_add_linear(bank->of_node, 32,
 						&irq_generic_chip_ops, NULL);
 		if (!bank->domain) {
 			dev_warn(&pdev->dev, "could not initialize irq domain for bank %s\n",
 				 bank->name);
+			clk_disable(bank->clk);
 			continue;
 		}
 
@@ -1664,6 +1703,7 @@
 			dev_err(&pdev->dev, "could not alloc generic chips for bank %s\n",
 				bank->name);
 			irq_domain_remove(bank->domain);
+			clk_disable(bank->clk);
 			continue;
 		}
 
@@ -1681,16 +1721,23 @@
 		gc->chip_types[0].regs.mask = GPIO_INTMASK;
 		gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
 		gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
-		gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
-		gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+		gc->chip_types[0].chip.irq_mask = rockchip_irq_gc_mask_set_bit;
+		gc->chip_types[0].chip.irq_unmask =
+						  rockchip_irq_gc_mask_clr_bit;
 		gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
 		gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
 		gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
 		gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
 		gc->wake_enabled = IRQ_MSK(bank->nr_pins);
 
-		irq_set_handler_data(bank->irq, bank);
-		irq_set_chained_handler(bank->irq, rockchip_irq_demux);
+		irq_set_chained_handler_and_data(bank->irq,
+						 rockchip_irq_demux, bank);
+
+		/* map the gpio irqs here, when the clock is still running */
+		for (j = 0 ; j < 32 ; j++)
+			irq_create_mapping(bank->domain, j);
+
+		clk_disable(bank->clk);
 	}
 
 	return 0;
@@ -1808,7 +1855,7 @@
 	if (IS_ERR(bank->clk))
 		return PTR_ERR(bank->clk);
 
-	return clk_prepare_enable(bank->clk);
+	return clk_prepare(bank->clk);
 }
 
 static const struct of_device_id rockchip_pinctrl_dt_match[];
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 0b8d480..bf548c2 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1684,7 +1684,7 @@
 	struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip;
 
-	chip = irq_get_chip(irq);
+	chip = irq_desc_get_chip(desc);
 	chained_irq_enter(chip, desc);
 	pcs_irq_handle(pcs_soc);
 	/* REVISIT: export and add handle_bad_irq(irq, desc)? */
@@ -1716,12 +1716,7 @@
 	irq_set_chip_data(irq, pcs_soc);
 	irq_set_chip_and_handler(irq, &pcs->chip,
 				 handle_level_irq);
-
-#ifdef CONFIG_ARM
-	set_irq_flags(irq, IRQF_VALID);
-#else
 	irq_set_noprobe(irq);
-#endif
 
 	return 0;
 }
@@ -1768,9 +1763,9 @@
 			return res;
 		}
 	} else {
-		irq_set_handler_data(pcs_soc->irq, pcs_soc);
-		irq_set_chained_handler(pcs_soc->irq,
-					pcs_irq_chain_handler);
+		irq_set_chained_handler_and_data(pcs_soc->irq,
+						 pcs_irq_chain_handler,
+						 pcs_soc);
 	}
 
 	/*
@@ -1983,7 +1978,6 @@
 };
 
 static const struct pcs_soc_data pinctrl_single_dra7 = {
-	.flags = PCS_QUIRK_SHARED_IRQ,
 	.irq_enable_mask = (1 << 24),	/* WAKEUPENABLE */
 	.irq_status_mask = (1 << 25),	/* WAKEUPEVENT */
 };
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index c262e5f..f8338d2 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1463,7 +1463,7 @@
 static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
 {
 	/* interrupt dedicated per bank */
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct st_gpio_bank *bank = gpio_chip_to_bank(gc);
 
@@ -1474,8 +1474,8 @@
 
 static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct st_pinctrl *info = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct st_pinctrl *info = irq_desc_get_handler_data(desc);
 	unsigned long status;
 	int n;
 
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index 0f982b8..0fd7fd2 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -624,6 +624,22 @@
 	.owner = THIS_MODULE,
 };
 
+static bool gpio_node_has_range(void)
+{
+	struct device_node *np;
+	bool has_prop = false;
+
+	np = of_find_compatible_node(NULL, NULL, "nvidia,tegra30-gpio");
+	if (!np)
+		return has_prop;
+
+	has_prop = of_find_property(np, "gpio-ranges", NULL);
+
+	of_node_put(np);
+
+	return has_prop;
+}
+
 int tegra_pinctrl_probe(struct platform_device *pdev,
 			const struct tegra_pinctrl_soc_data *soc_data)
 {
@@ -708,7 +724,8 @@
 		return PTR_ERR(pmx->pctl);
 	}
 
-	pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
+	if (!gpio_node_has_range())
+		pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
 
 	platform_set_drvdata(pdev, pmx);
 
diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
index 7ce23b6..5aafea8 100644
--- a/drivers/pinctrl/pinctrl-zynq.c
+++ b/drivers/pinctrl/pinctrl-zynq.c
@@ -706,10 +706,10 @@
 		"gpio0_39_grp", "gpio0_41_grp", "gpio0_43_grp",
 		"gpio0_45_grp", "gpio0_47_grp", "gpio0_49_grp",
 		"gpio0_51_grp", "gpio0_53_grp", "sdio1_emio_wp_grp"};
-static const char * const smc0_nor_groups[] = {"smc0_nor"};
+static const char * const smc0_nor_groups[] = {"smc0_nor_grp"};
 static const char * const smc0_nor_cs1_groups[] = {"smc0_nor_cs1_grp"};
 static const char * const smc0_nor_addr25_groups[] = {"smc0_nor_addr25_grp"};
-static const char * const smc0_nand_groups[] = {"smc0_nand"};
+static const char * const smc0_nand_groups[] = {"smc0_nand_grp"};
 static const char * const can0_groups[] = {"can0_0_grp", "can0_1_grp",
 		"can0_2_grp", "can0_3_grp", "can0_4_grp", "can0_5_grp",
 		"can0_6_grp", "can0_7_grp", "can0_8_grp", "can0_9_grp",
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index e7ae890..67e08cb 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -322,8 +322,7 @@
 		selector++;
 	}
 
-	pr_err("%s does not support function %s\n",
-	       pinctrl_dev_get_name(pctldev), function);
+	dev_err(pctldev->dev, "function '%s' not supported\n", function);
 	return -EINVAL;
 }
 
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 58f5632..383263a 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -63,6 +63,14 @@
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  Qualcomm TLMM block found on the Qualcomm 8916 platform.
 
+config PINCTRL_QDF2XXX
+	tristate "Qualcomm Technologies QDF2xxx pin controller driver"
+	depends on GPIOLIB && ACPI
+	select PINCTRL_MSM
+	help
+	  This is the GPIO driver for the TLMM block found on the
+	  Qualcomm Technologies QDF2xxx SOCs.
+
 config PINCTRL_QCOM_SPMI_PMIC
        tristate "Qualcomm SPMI PMIC pin controller driver"
        depends on GPIOLIB && OF && SPMI
@@ -76,4 +84,16 @@
          which are using SPMI for communication with SoC. Example PMIC's
          devices are pm8841, pm8941 and pma8084.
 
+config PINCTRL_QCOM_SSBI_PMIC
+       tristate "Qualcomm SSBI PMIC pin controller driver"
+       depends on GPIOLIB && OF
+       select PINMUX
+       select PINCONF
+       select GENERIC_PINCONF
+       help
+         This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+         Qualcomm GPIO and MPP blocks found in the Qualcomm PMIC's chips,
+         which are using SSBI for communication with SoC. Example PMIC's
+         devices are pm8058 and pm8921.
+
 endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 3666c70..13b190e 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -7,5 +7,8 @@
 obj-$(CONFIG_PINCTRL_MSM8960)	+= pinctrl-msm8960.o
 obj-$(CONFIG_PINCTRL_MSM8X74)	+= pinctrl-msm8x74.o
 obj-$(CONFIG_PINCTRL_MSM8916)	+= pinctrl-msm8916.o
+obj-$(CONFIG_PINCTRL_QDF2XXX)	+= pinctrl-qdf2xxx.o
 obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o
 obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
+obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
+obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e457d52..492cdd5 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -28,6 +28,7 @@
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/reboot.h>
+#include <linux/pm.h>
 
 #include "../core.h"
 #include "../pinconf.h"
@@ -733,9 +734,9 @@
 	spin_unlock_irqrestore(&pctrl->lock, flags);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__irq_set_handler_locked(d->irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__irq_set_handler_locked(d->irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 
 	return 0;
 }
@@ -764,12 +765,13 @@
 	.irq_set_wake   = msm_gpio_irq_set_wake,
 };
 
-static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	const struct msm_pingroup *g;
 	struct msm_pinctrl *pctrl = to_msm_pinctrl(gc);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int irq_pin;
 	int handled = 0;
 	u32 val;
@@ -855,6 +857,13 @@
 	return NOTIFY_DONE;
 }
 
+static struct msm_pinctrl *poweroff_pctrl;
+
+static void msm_ps_hold_poweroff(void)
+{
+	msm_ps_hold_restart(&poweroff_pctrl->restart_nb, 0, NULL);
+}
+
 static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
 {
 	int i;
@@ -867,6 +876,8 @@
 			if (register_restart_handler(&pctrl->restart_nb))
 				dev_err(pctrl->dev,
 					"failed to setup restart handler.\n");
+			poweroff_pctrl = pctrl;
+			pm_power_off = msm_ps_hold_poweroff;
 			break;
 		}
 }
diff --git a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
new file mode 100644
index 0000000..e9ff3bc
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * GPIO and pin control functions on this SOC are handled by the "TLMM"
+ * device.  The driver which controls this device is pinctrl-msm.c.  Each
+ * SOC with a TLMM is expected to create a client driver that registers
+ * with pinctrl-msm.c.  This means that all TLMM drivers are pin control
+ * drivers.
+ *
+ * This pin control driver is intended to be used only an ACPI-enabled
+ * system.  As such, UEFI will handle all pin control configuration, so
+ * this driver does not provide pin control functions.  It is effectively
+ * a GPIO-only driver.  The alternative is to duplicate the GPIO code of
+ * pinctrl-msm.c into another driver.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/acpi.h>
+
+#include "pinctrl-msm.h"
+
+static struct msm_pinctrl_soc_data qdf2xxx_pinctrl;
+
+static int qdf2xxx_pinctrl_probe(struct platform_device *pdev)
+{
+	struct pinctrl_pin_desc *pins;
+	struct msm_pingroup *groups;
+	unsigned int i;
+	u32 num_gpios;
+	int ret;
+
+	/* Query the number of GPIOs from ACPI */
+	ret = device_property_read_u32(&pdev->dev, "num-gpios", &num_gpios);
+	if (ret < 0)
+		return ret;
+
+	if (!num_gpios) {
+		dev_warn(&pdev->dev, "missing num-gpios property\n");
+		return -ENODEV;
+	}
+
+	pins = devm_kcalloc(&pdev->dev, num_gpios,
+		sizeof(struct pinctrl_pin_desc), GFP_KERNEL);
+	groups = devm_kcalloc(&pdev->dev, num_gpios,
+		sizeof(struct msm_pingroup), GFP_KERNEL);
+
+	for (i = 0; i < num_gpios; i++) {
+		pins[i].number = i;
+
+		groups[i].npins = 1,
+		groups[i].pins = &pins[i].number;
+		groups[i].ctl_reg = 0x10000 * i;
+		groups[i].io_reg = 0x04 + 0x10000 * i;
+		groups[i].intr_cfg_reg = 0x08 + 0x10000 * i;
+		groups[i].intr_status_reg = 0x0c + 0x10000 * i;
+		groups[i].intr_target_reg = 0x08 + 0x10000 * i;
+
+		groups[i].mux_bit = 2;
+		groups[i].pull_bit = 0;
+		groups[i].drv_bit = 6;
+		groups[i].oe_bit = 9;
+		groups[i].in_bit = 0;
+		groups[i].out_bit = 1;
+		groups[i].intr_enable_bit = 0;
+		groups[i].intr_status_bit = 0;
+		groups[i].intr_target_bit = 5;
+		groups[i].intr_target_kpss_val = 1;
+		groups[i].intr_raw_status_bit = 4;
+		groups[i].intr_polarity_bit = 1;
+		groups[i].intr_detection_bit = 2;
+		groups[i].intr_detection_width = 2;
+	}
+
+	qdf2xxx_pinctrl.pins = pins;
+	qdf2xxx_pinctrl.groups = groups;
+	qdf2xxx_pinctrl.npins = num_gpios;
+	qdf2xxx_pinctrl.ngroups = num_gpios;
+	qdf2xxx_pinctrl.ngpios = num_gpios;
+
+	return msm_pinctrl_probe(pdev, &qdf2xxx_pinctrl);
+}
+
+static const struct acpi_device_id qdf2xxx_acpi_ids[] = {
+	{"QCOM8001"},
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, qdf2xxx_acpi_ids);
+
+static struct platform_driver qdf2xxx_pinctrl_driver = {
+	.driver = {
+		.name = "qdf2xxx-pinctrl",
+		.acpi_match_table = ACPI_PTR(qdf2xxx_acpi_ids),
+	},
+	.probe = qdf2xxx_pinctrl_probe,
+	.remove = msm_pinctrl_remove,
+};
+
+static int __init qdf2xxx_pinctrl_init(void)
+{
+	return platform_driver_register(&qdf2xxx_pinctrl_driver);
+}
+arch_initcall(qdf2xxx_pinctrl_init);
+
+static void __exit qdf2xxx_pinctrl_exit(void)
+{
+	platform_driver_unregister(&qdf2xxx_pinctrl_driver);
+}
+module_exit(qdf2xxx_pinctrl_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies QDF2xxx pin control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 3121de9..e3be3ce 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -61,7 +61,9 @@
 #define PMIC_MPP_REG_DIG_PULL_CTL		0x42
 #define PMIC_MPP_REG_DIG_IN_CTL			0x43
 #define PMIC_MPP_REG_EN_CTL			0x46
+#define PMIC_MPP_REG_AOUT_CTL			0x48
 #define PMIC_MPP_REG_AIN_CTL			0x4a
+#define PMIC_MPP_REG_SINK_CTL			0x4c
 
 /* PMIC_MPP_REG_MODE_CTL */
 #define PMIC_MPP_REG_MODE_VALUE_MASK		0x1
@@ -85,11 +87,25 @@
 #define PMIC_MPP_REG_AIN_ROUTE_SHIFT		0
 #define PMIC_MPP_REG_AIN_ROUTE_MASK		0x7
 
+#define PMIC_MPP_MODE_DIGITAL_INPUT		0
+#define PMIC_MPP_MODE_DIGITAL_OUTPUT		1
+#define PMIC_MPP_MODE_DIGITAL_BIDIR		2
+#define PMIC_MPP_MODE_ANALOG_BIDIR		3
+#define PMIC_MPP_MODE_ANALOG_INPUT		4
+#define PMIC_MPP_MODE_ANALOG_OUTPUT		5
+#define PMIC_MPP_MODE_CURRENT_SINK		6
+
+#define PMIC_MPP_SELECTOR_NORMAL		0
+#define PMIC_MPP_SELECTOR_PAIRED		1
+#define PMIC_MPP_SELECTOR_DTEST_FIRST		4
+
 #define PMIC_MPP_PHYSICAL_OFFSET		1
 
 /* Qualcomm specific pin configurations */
 #define PMIC_MPP_CONF_AMUX_ROUTE		(PIN_CONFIG_END + 1)
-#define PMIC_MPP_CONF_ANALOG_MODE		(PIN_CONFIG_END + 2)
+#define PMIC_MPP_CONF_ANALOG_LEVEL		(PIN_CONFIG_END + 2)
+#define PMIC_MPP_CONF_DTEST_SELECTOR		(PIN_CONFIG_END + 3)
+#define PMIC_MPP_CONF_PAIRED			(PIN_CONFIG_END + 4)
 
 /**
  * struct pmic_mpp_pad - keep current MPP settings
@@ -99,13 +115,15 @@
  * @out_value: Cached pin output value.
  * @output_enabled: Set to true if MPP output logic is enabled.
  * @input_enabled: Set to true if MPP input buffer logic is enabled.
- * @analog_mode: Set to true when MPP should operate in Analog Input, Analog
- *	Output or Bidirectional Analog mode.
+ * @paired: Pin operates in paired mode
  * @num_sources: Number of power-sources supported by this MPP.
  * @power_source: Current power-source used.
  * @amux_input: Set the source for analog input.
+ * @aout_level: Analog output level
  * @pullup: Pullup resistor value. Valid in Bidirectional mode only.
  * @function: See pmic_mpp_functions[].
+ * @drive_strength: Amount of current in sink mode
+ * @dtest: DTEST route selector
  */
 struct pmic_mpp_pad {
 	u16		base;
@@ -114,12 +132,15 @@
 	bool		out_value;
 	bool		output_enabled;
 	bool		input_enabled;
-	bool		analog_mode;
+	bool		paired;
 	unsigned int	num_sources;
 	unsigned int	power_source;
 	unsigned int	amux_input;
+	unsigned int	aout_level;
 	unsigned int	pullup;
 	unsigned int	function;
+	unsigned int	drive_strength;
+	unsigned int	dtest;
 };
 
 struct pmic_mpp_state {
@@ -129,25 +150,32 @@
 	struct gpio_chip chip;
 };
 
-struct pmic_mpp_bindings {
-	const char	*property;
-	unsigned	param;
+static const struct pinconf_generic_params pmic_mpp_bindings[] = {
+	{"qcom,amux-route",	PMIC_MPP_CONF_AMUX_ROUTE,	0},
+	{"qcom,analog-level",	PMIC_MPP_CONF_ANALOG_LEVEL,	0},
+	{"qcom,dtest",		PMIC_MPP_CONF_DTEST_SELECTOR,	0},
+	{"qcom,paired",		PMIC_MPP_CONF_PAIRED,		0},
 };
 
-static struct pmic_mpp_bindings pmic_mpp_bindings[] = {
-	{"qcom,amux-route",	PMIC_MPP_CONF_AMUX_ROUTE},
-	{"qcom,analog-mode",	PMIC_MPP_CONF_ANALOG_MODE},
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item pmic_conf_items[] = {
+	PCONFDUMP(PMIC_MPP_CONF_AMUX_ROUTE, "analog mux", NULL, true),
+	PCONFDUMP(PMIC_MPP_CONF_ANALOG_LEVEL, "analog level", NULL, true),
+	PCONFDUMP(PMIC_MPP_CONF_DTEST_SELECTOR, "dtest", NULL, true),
+	PCONFDUMP(PMIC_MPP_CONF_PAIRED, "paired", NULL, false),
 };
+#endif
 
 static const char *const pmic_mpp_groups[] = {
 	"mpp1", "mpp2", "mpp3", "mpp4", "mpp5", "mpp6", "mpp7", "mpp8",
 };
 
+#define PMIC_MPP_DIGITAL	0
+#define PMIC_MPP_ANALOG		1
+#define PMIC_MPP_SINK		2
+
 static const char *const pmic_mpp_functions[] = {
-	PMIC_MPP_FUNC_NORMAL, PMIC_MPP_FUNC_PAIRED,
-	"reserved1", "reserved2",
-	PMIC_MPP_FUNC_DTEST1, PMIC_MPP_FUNC_DTEST2,
-	PMIC_MPP_FUNC_DTEST3, PMIC_MPP_FUNC_DTEST4,
+	"digital", "analog", "sink"
 };
 
 static inline struct pmic_mpp_state *to_mpp_state(struct gpio_chip *chip)
@@ -204,118 +232,11 @@
 	return 0;
 }
 
-static int pmic_mpp_parse_dt_config(struct device_node *np,
-				    struct pinctrl_dev *pctldev,
-				    unsigned long **configs,
-				    unsigned int *nconfs)
-{
-	struct pmic_mpp_bindings *par;
-	unsigned long cfg;
-	int ret, i;
-	u32 val;
-
-	for (i = 0; i < ARRAY_SIZE(pmic_mpp_bindings); i++) {
-		par = &pmic_mpp_bindings[i];
-		ret = of_property_read_u32(np, par->property, &val);
-
-		/* property not found */
-		if (ret == -EINVAL)
-			continue;
-
-		/* use zero as default value, when no value is specified */
-		if (ret)
-			val = 0;
-
-		dev_dbg(pctldev->dev, "found %s with value %u\n",
-			par->property, val);
-
-		cfg = pinconf_to_config_packed(par->param, val);
-
-		ret = pinctrl_utils_add_config(pctldev, configs, nconfs, cfg);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int pmic_mpp_dt_subnode_to_map(struct pinctrl_dev *pctldev,
-				      struct device_node *np,
-				      struct pinctrl_map **map,
-				      unsigned *reserv, unsigned *nmaps,
-				      enum pinctrl_map_type type)
-{
-	unsigned long *configs = NULL;
-	unsigned nconfs = 0;
-	struct property *prop;
-	const char *group;
-	int ret;
-
-	ret = pmic_mpp_parse_dt_config(np, pctldev, &configs, &nconfs);
-	if (ret < 0)
-		return ret;
-
-	if (!nconfs)
-		return 0;
-
-	ret = of_property_count_strings(np, "pins");
-	if (ret < 0)
-		goto exit;
-
-	ret = pinctrl_utils_reserve_map(pctldev, map, reserv, nmaps, ret);
-	if (ret < 0)
-		goto exit;
-
-	of_property_for_each_string(np, "pins", prop, group) {
-		ret = pinctrl_utils_add_map_configs(pctldev, map,
-						    reserv, nmaps, group,
-						    configs, nconfs, type);
-		if (ret < 0)
-			break;
-	}
-exit:
-	kfree(configs);
-	return ret;
-}
-
-static int pmic_mpp_dt_node_to_map(struct pinctrl_dev *pctldev,
-				   struct device_node *np_config,
-				   struct pinctrl_map **map, unsigned *nmaps)
-{
-	struct device_node *np;
-	enum pinctrl_map_type type;
-	unsigned reserv;
-	int ret;
-
-	ret = 0;
-	*map = NULL;
-	*nmaps = 0;
-	reserv = 0;
-	type = PIN_MAP_TYPE_CONFIGS_GROUP;
-
-	for_each_child_of_node(np_config, np) {
-		ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
-							&reserv, nmaps, type);
-		if (ret)
-			break;
-
-		ret = pmic_mpp_dt_subnode_to_map(pctldev, np, map, &reserv,
-						 nmaps, type);
-		if (ret)
-			break;
-	}
-
-	if (ret < 0)
-		pinctrl_utils_dt_free_map(pctldev, *map, *nmaps);
-
-	return ret;
-}
-
 static const struct pinctrl_ops pmic_mpp_pinctrl_ops = {
 	.get_groups_count	= pmic_mpp_get_groups_count,
 	.get_group_name		= pmic_mpp_get_group_name,
 	.get_group_pins		= pmic_mpp_get_group_pins,
-	.dt_node_to_map		= pmic_mpp_dt_node_to_map,
+	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
 	.dt_free_map		= pinctrl_utils_dt_free_map,
 };
 
@@ -340,6 +261,53 @@
 	return 0;
 }
 
+static int pmic_mpp_write_mode_ctl(struct pmic_mpp_state *state,
+				   struct pmic_mpp_pad *pad)
+{
+	unsigned int mode;
+	unsigned int sel;
+	unsigned int val;
+	unsigned int en;
+
+	switch (pad->function) {
+	case PMIC_MPP_ANALOG:
+		if (pad->input_enabled && pad->output_enabled)
+			mode = PMIC_MPP_MODE_ANALOG_BIDIR;
+		else if (pad->input_enabled)
+			mode = PMIC_MPP_MODE_ANALOG_INPUT;
+		else
+			mode = PMIC_MPP_MODE_ANALOG_OUTPUT;
+		break;
+	case PMIC_MPP_DIGITAL:
+		if (pad->input_enabled && pad->output_enabled)
+			mode = PMIC_MPP_MODE_DIGITAL_BIDIR;
+		else if (pad->input_enabled)
+			mode = PMIC_MPP_MODE_DIGITAL_INPUT;
+		else
+			mode = PMIC_MPP_MODE_DIGITAL_OUTPUT;
+		break;
+	case PMIC_MPP_SINK:
+	default:
+		mode = PMIC_MPP_MODE_CURRENT_SINK;
+		break;
+	}
+
+	if (pad->dtest)
+		sel = PMIC_MPP_SELECTOR_DTEST_FIRST + pad->dtest - 1;
+	else if (pad->paired)
+		sel = PMIC_MPP_SELECTOR_PAIRED;
+	else
+		sel = PMIC_MPP_SELECTOR_NORMAL;
+
+	en = !!pad->out_value;
+
+	val = mode << PMIC_MPP_REG_MODE_DIR_SHIFT |
+	      sel << PMIC_MPP_REG_MODE_FUNCTION_SHIFT |
+	      en;
+
+	return pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
+}
+
 static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
 				unsigned pin)
 {
@@ -352,31 +320,7 @@
 
 	pad->function = function;
 
-	if (!pad->analog_mode) {
-		val = 0;	/* just digital input */
-		if (pad->output_enabled) {
-			if (pad->input_enabled)
-				val = 2; /* digital input and output */
-			else
-				val = 1; /* just digital output */
-		}
-	} else {
-		val = 4;	/* just analog input */
-		if (pad->output_enabled) {
-			if (pad->input_enabled)
-				val = 3; /* analog input and output */
-			else
-				val = 5; /* just analog output */
-		}
-	}
-
-	val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
-	val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
-	val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
-
-	ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
-	if (ret < 0)
-		return ret;
+	ret = pmic_mpp_write_mode_ctl(state, pad);
 
 	val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
 
@@ -433,11 +377,20 @@
 	case PIN_CONFIG_OUTPUT:
 		arg = pad->out_value;
 		break;
+	case PMIC_MPP_CONF_DTEST_SELECTOR:
+		arg = pad->dtest;
+		break;
 	case PMIC_MPP_CONF_AMUX_ROUTE:
 		arg = pad->amux_input;
 		break;
-	case PMIC_MPP_CONF_ANALOG_MODE:
-		arg = pad->analog_mode;
+	case PMIC_MPP_CONF_PAIRED:
+		arg = pad->paired;
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		arg = pad->drive_strength;
+		break;
+	case PMIC_MPP_CONF_ANALOG_LEVEL:
+		arg = pad->aout_level;
 		break;
 	default:
 		return -EINVAL;
@@ -459,6 +412,9 @@
 
 	pad = pctldev->desc->pins[pin].drv_data;
 
+	/* Make it possible to enable the pin, by not setting high impedance */
+	pad->is_enabled = true;
+
 	for (i = 0; i < nconfs; i++) {
 		param = pinconf_to_config_param(configs[i]);
 		arg = pinconf_to_config_argument(configs[i]);
@@ -497,13 +453,22 @@
 			pad->output_enabled = true;
 			pad->out_value = arg;
 			break;
+		case PMIC_MPP_CONF_DTEST_SELECTOR:
+			pad->dtest = arg;
+			break;
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			arg = pad->drive_strength;
+			break;
 		case PMIC_MPP_CONF_AMUX_ROUTE:
 			if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4)
 				return -EINVAL;
 			pad->amux_input = arg;
 			break;
-		case PMIC_MPP_CONF_ANALOG_MODE:
-			pad->analog_mode = true;
+		case PMIC_MPP_CONF_ANALOG_LEVEL:
+			pad->aout_level = arg;
+			break;
+		case PMIC_MPP_CONF_PAIRED:
+			pad->paired = !!arg;
 			break;
 		default:
 			return -EINVAL;
@@ -528,29 +493,17 @@
 	if (ret < 0)
 		return ret;
 
-	if (!pad->analog_mode) {
-		val = 0;	/* just digital input */
-		if (pad->output_enabled) {
-			if (pad->input_enabled)
-				val = 2; /* digital input and output */
-			else
-				val = 1; /* just digital output */
-		}
-	} else {
-		val = 4;	/* just analog input */
-		if (pad->output_enabled) {
-			if (pad->input_enabled)
-				val = 3; /* analog input and output */
-			else
-				val = 5; /* just analog output */
-		}
-	}
+	ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_AOUT_CTL, pad->aout_level);
+	if (ret < 0)
+		return ret;
 
-	val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
-	val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
-	val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
+	ret = pmic_mpp_write_mode_ctl(state, pad);
+	if (ret < 0)
+		return ret;
 
-	return pmic_mpp_write(state, pad, PMIC_MPP_REG_MODE_CTL, val);
+	val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
+
+	return pmic_mpp_write(state, pad, PMIC_MPP_REG_EN_CTL, val);
 }
 
 static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
@@ -558,20 +511,17 @@
 {
 	struct pmic_mpp_state *state = pinctrl_dev_get_drvdata(pctldev);
 	struct pmic_mpp_pad *pad;
-	int ret, val;
+	int ret;
 
 	static const char *const biases[] = {
 		"0.6kOhm", "10kOhm", "30kOhm", "Disabled"
 	};
 
-
 	pad = pctldev->desc->pins[pin].drv_data;
 
 	seq_printf(s, " mpp%-2d:", pin + PMIC_MPP_PHYSICAL_OFFSET);
 
-	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_EN_CTL);
-
-	if (val < 0 || !(val >> PMIC_MPP_REG_MASTER_EN_SHIFT)) {
+	if (!pad->is_enabled) {
 		seq_puts(s, " ---");
 	} else {
 
@@ -585,15 +535,20 @@
 		}
 
 		seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
-		seq_printf(s, " %-4s", pad->analog_mode ? "ana" : "dig");
 		seq_printf(s, " %-7s", pmic_mpp_functions[pad->function]);
 		seq_printf(s, " vin-%d", pad->power_source);
+		seq_printf(s, " %d", pad->aout_level);
 		seq_printf(s, " %-8s", biases[pad->pullup]);
 		seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
+		if (pad->dtest)
+			seq_printf(s, " dtest%d", pad->dtest);
+		if (pad->paired)
+			seq_puts(s, " paired");
 	}
 }
 
 static const struct pinconf_ops pmic_mpp_pinconf_ops = {
+	.is_generic = true,
 	.pin_config_group_get		= pmic_mpp_config_get,
 	.pin_config_group_set		= pmic_mpp_config_set,
 	.pin_config_group_dbg_show	= pmic_mpp_config_dbg_show,
@@ -709,6 +664,7 @@
 			     struct pmic_mpp_pad *pad)
 {
 	int type, subtype, val, dir;
+	unsigned int sel;
 
 	type = pmic_mpp_read(state, pad, PMIC_MPP_REG_TYPE);
 	if (type < 0)
@@ -751,43 +707,53 @@
 	dir &= PMIC_MPP_REG_MODE_DIR_MASK;
 
 	switch (dir) {
-	case 0:
+	case PMIC_MPP_MODE_DIGITAL_INPUT:
 		pad->input_enabled = true;
 		pad->output_enabled = false;
-		pad->analog_mode = false;
+		pad->function = PMIC_MPP_DIGITAL;
 		break;
-	case 1:
+	case PMIC_MPP_MODE_DIGITAL_OUTPUT:
 		pad->input_enabled = false;
 		pad->output_enabled = true;
-		pad->analog_mode = false;
+		pad->function = PMIC_MPP_DIGITAL;
 		break;
-	case 2:
+	case PMIC_MPP_MODE_DIGITAL_BIDIR:
 		pad->input_enabled = true;
 		pad->output_enabled = true;
-		pad->analog_mode = false;
+		pad->function = PMIC_MPP_DIGITAL;
 		break;
-	case 3:
+	case PMIC_MPP_MODE_ANALOG_BIDIR:
 		pad->input_enabled = true;
 		pad->output_enabled = true;
-		pad->analog_mode = true;
+		pad->function = PMIC_MPP_ANALOG;
 		break;
-	case 4:
+	case PMIC_MPP_MODE_ANALOG_INPUT:
 		pad->input_enabled = true;
 		pad->output_enabled = false;
-		pad->analog_mode = true;
+		pad->function = PMIC_MPP_ANALOG;
 		break;
-	case 5:
+	case PMIC_MPP_MODE_ANALOG_OUTPUT:
 		pad->input_enabled = false;
 		pad->output_enabled = true;
-		pad->analog_mode = true;
+		pad->function = PMIC_MPP_ANALOG;
+		break;
+	case PMIC_MPP_MODE_CURRENT_SINK:
+		pad->input_enabled = false;
+		pad->output_enabled = true;
+		pad->function = PMIC_MPP_SINK;
 		break;
 	default:
 		dev_err(state->dev, "unknown MPP direction\n");
 		return -ENODEV;
 	}
 
-	pad->function = val >> PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
-	pad->function &= PMIC_MPP_REG_MODE_FUNCTION_MASK;
+	sel = val >> PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
+	sel &= PMIC_MPP_REG_MODE_FUNCTION_MASK;
+
+	if (sel >= PMIC_MPP_SELECTOR_DTEST_FIRST)
+		pad->dtest = sel + 1;
+	else if (sel == PMIC_MPP_SELECTOR_PAIRED)
+		pad->paired = true;
 
 	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_VIN_CTL);
 	if (val < 0)
@@ -810,8 +776,24 @@
 	pad->amux_input = val >> PMIC_MPP_REG_AIN_ROUTE_SHIFT;
 	pad->amux_input &= PMIC_MPP_REG_AIN_ROUTE_MASK;
 
-	/* Pin could be disabled with PIN_CONFIG_BIAS_HIGH_IMPEDANCE */
-	pad->is_enabled = true;
+	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_SINK_CTL);
+	if (val < 0)
+		return val;
+
+	pad->drive_strength = val;
+
+	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_AOUT_CTL);
+	if (val < 0)
+		return val;
+
+	pad->aout_level = val;
+
+	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_EN_CTL);
+	if (val < 0)
+		return val;
+
+	pad->is_enabled = !!val;
+
 	return 0;
 }
 
@@ -866,6 +848,12 @@
 	pctrldesc->pins = pindesc;
 	pctrldesc->npins = npins;
 
+	pctrldesc->num_custom_params = ARRAY_SIZE(pmic_mpp_bindings);
+	pctrldesc->custom_params = pmic_mpp_bindings;
+#ifdef CONFIG_DEBUG_FS
+	pctrldesc->custom_conf_items = pmic_conf_items;
+#endif
+
 	for (i = 0; i < npins; i++, pindesc++) {
 		pad = &pads[i];
 		pindesc->drv_data = pad;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
new file mode 100644
index 0000000..c978b31
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -0,0 +1,791 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+/* mode */
+#define PM8XXX_GPIO_MODE_ENABLED	BIT(0)
+#define PM8XXX_GPIO_MODE_INPUT		0
+#define PM8XXX_GPIO_MODE_OUTPUT		2
+
+/* output buffer */
+#define PM8XXX_GPIO_PUSH_PULL		0
+#define PM8XXX_GPIO_OPEN_DRAIN		1
+
+/* bias */
+#define PM8XXX_GPIO_BIAS_PU_30		0
+#define PM8XXX_GPIO_BIAS_PU_1P5		1
+#define PM8XXX_GPIO_BIAS_PU_31P5	2
+#define PM8XXX_GPIO_BIAS_PU_1P5_30	3
+#define PM8XXX_GPIO_BIAS_PD		4
+#define PM8XXX_GPIO_BIAS_NP		5
+
+/* GPIO registers */
+#define SSBI_REG_ADDR_GPIO_BASE		0x150
+#define SSBI_REG_ADDR_GPIO(n)		(SSBI_REG_ADDR_GPIO_BASE + n)
+
+#define PM8XXX_BANK_WRITE		BIT(7)
+
+#define PM8XXX_MAX_GPIOS               44
+
+/* custom pinconf parameters */
+#define PM8XXX_QCOM_DRIVE_STRENGH      (PIN_CONFIG_END + 1)
+#define PM8XXX_QCOM_PULL_UP_STRENGTH   (PIN_CONFIG_END + 2)
+
+/**
+ * struct pm8xxx_pin_data - dynamic configuration for a pin
+ * @reg:               address of the control register
+ * @irq:               IRQ from the PMIC interrupt controller
+ * @power_source:      logical selected voltage source, mapping in static data
+ *                     is used translate to register values
+ * @mode:              operating mode for the pin (input/output)
+ * @open_drain:        output buffer configured as open-drain (vs push-pull)
+ * @output_value:      configured output value
+ * @bias:              register view of configured bias
+ * @pull_up_strength:  placeholder for selected pull up strength
+ *                     only used to configure bias when pull up is selected
+ * @output_strength:   selector of output-strength
+ * @disable:           pin disabled / configured as tristate
+ * @function:          pinmux selector
+ * @inverted:          pin logic is inverted
+ */
+struct pm8xxx_pin_data {
+	unsigned reg;
+	int irq;
+	u8 power_source;
+	u8 mode;
+	bool open_drain;
+	bool output_value;
+	u8 bias;
+	u8 pull_up_strength;
+	u8 output_strength;
+	bool disable;
+	u8 function;
+	bool inverted;
+};
+
+struct pm8xxx_gpio {
+	struct device *dev;
+	struct regmap *regmap;
+	struct pinctrl_dev *pctrl;
+	struct gpio_chip chip;
+
+	struct pinctrl_desc desc;
+	unsigned npins;
+};
+
+static const struct pinconf_generic_params pm8xxx_gpio_bindings[] = {
+	{"qcom,drive-strength",		PM8XXX_QCOM_DRIVE_STRENGH,	0},
+	{"qcom,pull-up-strength",	PM8XXX_QCOM_PULL_UP_STRENGTH,	0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item pm8xxx_conf_items[ARRAY_SIZE(pm8xxx_gpio_bindings)] = {
+	PCONFDUMP(PM8XXX_QCOM_DRIVE_STRENGH, "drive-strength", NULL, true),
+	PCONFDUMP(PM8XXX_QCOM_PULL_UP_STRENGTH,  "pull up strength", NULL, true),
+};
+#endif
+
+static const char * const pm8xxx_groups[PM8XXX_MAX_GPIOS] = {
+	"gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio8",
+	"gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15",
+	"gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
+	"gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28", "gpio29",
+	"gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
+	"gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+	"gpio44",
+};
+
+static const char * const pm8xxx_gpio_functions[] = {
+	PMIC_GPIO_FUNC_NORMAL, PMIC_GPIO_FUNC_PAIRED,
+	PMIC_GPIO_FUNC_FUNC1, PMIC_GPIO_FUNC_FUNC2,
+	PMIC_GPIO_FUNC_DTEST1, PMIC_GPIO_FUNC_DTEST2,
+	PMIC_GPIO_FUNC_DTEST3, PMIC_GPIO_FUNC_DTEST4,
+};
+
+static int pm8xxx_read_bank(struct pm8xxx_gpio *pctrl,
+			    struct pm8xxx_pin_data *pin, int bank)
+{
+	unsigned int val = bank << 4;
+	int ret;
+
+	ret = regmap_write(pctrl->regmap, pin->reg, val);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to select bank %d\n", bank);
+		return ret;
+	}
+
+	ret = regmap_read(pctrl->regmap, pin->reg, &val);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to read register %d\n", bank);
+		return ret;
+	}
+
+	return val;
+}
+
+static int pm8xxx_write_bank(struct pm8xxx_gpio *pctrl,
+			     struct pm8xxx_pin_data *pin,
+			     int bank,
+			     u8 val)
+{
+	int ret;
+
+	val |= PM8XXX_BANK_WRITE;
+	val |= bank << 4;
+
+	ret = regmap_write(pctrl->regmap, pin->reg, val);
+	if (ret)
+		dev_err(pctrl->dev, "failed to write register\n");
+
+	return ret;
+}
+
+static int pm8xxx_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	return pctrl->npins;
+}
+
+static const char *pm8xxx_get_group_name(struct pinctrl_dev *pctldev,
+					 unsigned group)
+{
+	return pm8xxx_groups[group];
+}
+
+
+static int pm8xxx_get_group_pins(struct pinctrl_dev *pctldev,
+				 unsigned group,
+				 const unsigned **pins,
+				 unsigned *num_pins)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = &pctrl->desc.pins[group].number;
+	*num_pins = 1;
+
+	return 0;
+}
+
+static const struct pinctrl_ops pm8xxx_pinctrl_ops = {
+	.get_groups_count	= pm8xxx_get_groups_count,
+	.get_group_name		= pm8xxx_get_group_name,
+	.get_group_pins         = pm8xxx_get_group_pins,
+	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
+	.dt_free_map		= pinctrl_utils_dt_free_map,
+};
+
+static int pm8xxx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(pm8xxx_gpio_functions);
+}
+
+static const char *pm8xxx_get_function_name(struct pinctrl_dev *pctldev,
+					    unsigned function)
+{
+	return pm8xxx_gpio_functions[function];
+}
+
+static int pm8xxx_get_function_groups(struct pinctrl_dev *pctldev,
+				      unsigned function,
+				      const char * const **groups,
+				      unsigned * const num_groups)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = pm8xxx_groups;
+	*num_groups = pctrl->npins;
+	return 0;
+}
+
+static int pm8xxx_pinmux_set_mux(struct pinctrl_dev *pctldev,
+				 unsigned function,
+				 unsigned group)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[group].drv_data;
+	u8 val;
+
+	pin->function = function;
+	val = pin->function << 1;
+
+	pm8xxx_write_bank(pctrl, pin, 4, val);
+
+	return 0;
+}
+
+static const struct pinmux_ops pm8xxx_pinmux_ops = {
+	.get_functions_count	= pm8xxx_get_functions_count,
+	.get_function_name	= pm8xxx_get_function_name,
+	.get_function_groups	= pm8xxx_get_function_groups,
+	.set_mux		= pm8xxx_pinmux_set_mux,
+};
+
+static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
+				 unsigned int offset,
+				 unsigned long *config)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	unsigned param = pinconf_to_config_param(*config);
+	unsigned arg;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		arg = pin->bias == PM8XXX_GPIO_BIAS_NP;
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		arg = pin->bias == PM8XXX_GPIO_BIAS_PD;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		arg = pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30;
+		break;
+	case PM8XXX_QCOM_PULL_UP_STRENGTH:
+		arg = pin->pull_up_strength;
+		break;
+	case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+		arg = pin->disable;
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+		arg = pin->mode == PM8XXX_GPIO_MODE_INPUT;
+		break;
+	case PIN_CONFIG_OUTPUT:
+		if (pin->mode & PM8XXX_GPIO_MODE_OUTPUT)
+			arg = pin->output_value;
+		else
+			arg = 0;
+		break;
+	case PIN_CONFIG_POWER_SOURCE:
+		arg = pin->power_source;
+		break;
+	case PM8XXX_QCOM_DRIVE_STRENGH:
+		arg = pin->output_strength;
+		break;
+	case PIN_CONFIG_DRIVE_PUSH_PULL:
+		arg = !pin->open_drain;
+		break;
+	case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+		arg = pin->open_drain;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev,
+				 unsigned int offset,
+				 unsigned long *configs,
+				 unsigned num_configs)
+{
+	struct pm8xxx_gpio *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	unsigned param;
+	unsigned arg;
+	unsigned i;
+	u8 banks = 0;
+	u8 val;
+
+	for (i = 0; i < num_configs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+		arg = pinconf_to_config_argument(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_DISABLE:
+			pin->bias = PM8XXX_GPIO_BIAS_NP;
+			banks |= BIT(2);
+			pin->disable = 0;
+			banks |= BIT(3);
+			break;
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+			pin->bias = PM8XXX_GPIO_BIAS_PD;
+			banks |= BIT(2);
+			pin->disable = 0;
+			banks |= BIT(3);
+			break;
+		case PM8XXX_QCOM_PULL_UP_STRENGTH:
+			if (arg > PM8XXX_GPIO_BIAS_PU_1P5_30) {
+				dev_err(pctrl->dev, "invalid pull-up strength\n");
+				return -EINVAL;
+			}
+			pin->pull_up_strength = arg;
+			/* FALLTHROUGH */
+		case PIN_CONFIG_BIAS_PULL_UP:
+			pin->bias = pin->pull_up_strength;
+			banks |= BIT(2);
+			pin->disable = 0;
+			banks |= BIT(3);
+			break;
+		case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+			pin->disable = 1;
+			banks |= BIT(3);
+			break;
+		case PIN_CONFIG_INPUT_ENABLE:
+			pin->mode = PM8XXX_GPIO_MODE_INPUT;
+			banks |= BIT(0) | BIT(1);
+			break;
+		case PIN_CONFIG_OUTPUT:
+			pin->mode = PM8XXX_GPIO_MODE_OUTPUT;
+			pin->output_value = !!arg;
+			banks |= BIT(0) | BIT(1);
+			break;
+		case PIN_CONFIG_POWER_SOURCE:
+			pin->power_source = arg;
+			banks |= BIT(0);
+			break;
+		case PM8XXX_QCOM_DRIVE_STRENGH:
+			if (arg > PMIC_GPIO_STRENGTH_LOW) {
+				dev_err(pctrl->dev, "invalid drive strength\n");
+				return -EINVAL;
+			}
+			pin->output_strength = arg;
+			banks |= BIT(3);
+			break;
+		case PIN_CONFIG_DRIVE_PUSH_PULL:
+			pin->open_drain = 0;
+			banks |= BIT(1);
+			break;
+		case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+			pin->open_drain = 1;
+			banks |= BIT(1);
+			break;
+		default:
+			dev_err(pctrl->dev,
+				"unsupported config parameter: %x\n",
+				param);
+			return -EINVAL;
+		}
+	}
+
+	if (banks & BIT(0)) {
+		val = pin->power_source << 1;
+		val |= PM8XXX_GPIO_MODE_ENABLED;
+		pm8xxx_write_bank(pctrl, pin, 0, val);
+	}
+
+	if (banks & BIT(1)) {
+		val = pin->mode << 2;
+		val |= pin->open_drain << 1;
+		val |= pin->output_value;
+		pm8xxx_write_bank(pctrl, pin, 1, val);
+	}
+
+	if (banks & BIT(2)) {
+		val = pin->bias << 1;
+		pm8xxx_write_bank(pctrl, pin, 2, val);
+	}
+
+	if (banks & BIT(3)) {
+		val = pin->output_strength << 2;
+		val |= pin->disable;
+		pm8xxx_write_bank(pctrl, pin, 3, val);
+	}
+
+	if (banks & BIT(4)) {
+		val = pin->function << 1;
+		pm8xxx_write_bank(pctrl, pin, 4, val);
+	}
+
+	if (banks & BIT(5)) {
+		val = 0;
+		if (!pin->inverted)
+			val |= BIT(3);
+		pm8xxx_write_bank(pctrl, pin, 5, val);
+	}
+
+	return 0;
+}
+
+static const struct pinconf_ops pm8xxx_pinconf_ops = {
+	.is_generic = true,
+	.pin_config_group_get = pm8xxx_pin_config_get,
+	.pin_config_group_set = pm8xxx_pin_config_set,
+};
+
+static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+	.name = "pm8xxx_gpio",
+	.pctlops = &pm8xxx_pinctrl_ops,
+	.pmxops = &pm8xxx_pinmux_ops,
+	.confops = &pm8xxx_pinconf_ops,
+	.owner = THIS_MODULE,
+};
+
+static int pm8xxx_gpio_direction_input(struct gpio_chip *chip,
+				       unsigned offset)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	u8 val;
+
+	pin->mode = PM8XXX_GPIO_MODE_INPUT;
+	val = pin->mode << 2;
+
+	pm8xxx_write_bank(pctrl, pin, 1, val);
+
+	return 0;
+}
+
+static int pm8xxx_gpio_direction_output(struct gpio_chip *chip,
+					unsigned offset,
+					int value)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	u8 val;
+
+	pin->mode = PM8XXX_GPIO_MODE_OUTPUT;
+	pin->output_value = !!value;
+
+	val = pin->mode << 2;
+	val |= pin->open_drain << 1;
+	val |= pin->output_value;
+
+	pm8xxx_write_bank(pctrl, pin, 1, val);
+
+	return 0;
+}
+
+static int pm8xxx_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	bool state;
+	int ret;
+
+	if (pin->mode == PM8XXX_GPIO_MODE_OUTPUT) {
+		ret = pin->output_value;
+	} else {
+		ret = irq_get_irqchip_state(pin->irq, IRQCHIP_STATE_LINE_LEVEL, &state);
+		if (!ret)
+			ret = !!state;
+	}
+
+	return ret;
+}
+
+static void pm8xxx_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	u8 val;
+
+	pin->output_value = !!value;
+
+	val = pin->mode << 2;
+	val |= pin->open_drain << 1;
+	val |= pin->output_value;
+
+	pm8xxx_write_bank(pctrl, pin, 1, val);
+}
+
+static int pm8xxx_gpio_of_xlate(struct gpio_chip *chip,
+				const struct of_phandle_args *gpio_desc,
+				u32 *flags)
+{
+	if (chip->of_gpio_n_cells < 2)
+		return -EINVAL;
+
+	if (flags)
+		*flags = gpio_desc->args[1];
+
+	return gpio_desc->args[0] - 1;
+}
+
+
+static int pm8xxx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	return pin->irq;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static void pm8xxx_gpio_dbg_show_one(struct seq_file *s,
+				  struct pinctrl_dev *pctldev,
+				  struct gpio_chip *chip,
+				  unsigned offset,
+				  unsigned gpio)
+{
+	struct pm8xxx_gpio *pctrl = container_of(chip, struct pm8xxx_gpio, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	static const char * const modes[] = {
+		"in", "both", "out", "off"
+	};
+	static const char * const biases[] = {
+		"pull-up 30uA", "pull-up 1.5uA", "pull-up 31.5uA",
+		"pull-up 1.5uA + 30uA boost", "pull-down 10uA", "no pull"
+	};
+	static const char * const buffer_types[] = {
+		"push-pull", "open-drain"
+	};
+	static const char * const strengths[] = {
+		"no", "high", "medium", "low"
+	};
+
+	seq_printf(s, " gpio%-2d:", offset + 1);
+	if (pin->disable) {
+		seq_puts(s, " ---");
+	} else {
+		seq_printf(s, " %-4s", modes[pin->mode]);
+		seq_printf(s, " %-7s", pm8xxx_gpio_functions[pin->function]);
+		seq_printf(s, " VIN%d", pin->power_source);
+		seq_printf(s, " %-27s", biases[pin->bias]);
+		seq_printf(s, " %-10s", buffer_types[pin->open_drain]);
+		seq_printf(s, " %-4s", pin->output_value ? "high" : "low");
+		seq_printf(s, " %-7s", strengths[pin->output_strength]);
+		if (pin->inverted)
+			seq_puts(s, " inverted");
+	}
+}
+
+static void pm8xxx_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+	unsigned gpio = chip->base;
+	unsigned i;
+
+	for (i = 0; i < chip->ngpio; i++, gpio++) {
+		pm8xxx_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+		seq_puts(s, "\n");
+	}
+}
+
+#else
+#define msm_gpio_dbg_show NULL
+#endif
+
+static struct gpio_chip pm8xxx_gpio_template = {
+	.direction_input = pm8xxx_gpio_direction_input,
+	.direction_output = pm8xxx_gpio_direction_output,
+	.get = pm8xxx_gpio_get,
+	.set = pm8xxx_gpio_set,
+	.of_xlate = pm8xxx_gpio_of_xlate,
+	.to_irq = pm8xxx_gpio_to_irq,
+	.dbg_show = pm8xxx_gpio_dbg_show,
+	.owner = THIS_MODULE,
+};
+
+static int pm8xxx_pin_populate(struct pm8xxx_gpio *pctrl,
+			       struct pm8xxx_pin_data *pin)
+{
+	int val;
+
+	val = pm8xxx_read_bank(pctrl, pin, 0);
+	if (val < 0)
+		return val;
+
+	pin->power_source = (val >> 1) & 0x7;
+
+	val = pm8xxx_read_bank(pctrl, pin, 1);
+	if (val < 0)
+		return val;
+
+	pin->mode = (val >> 2) & 0x3;
+	pin->open_drain = !!(val & BIT(1));
+	pin->output_value = val & BIT(0);
+
+	val = pm8xxx_read_bank(pctrl, pin, 2);
+	if (val < 0)
+		return val;
+
+	pin->bias = (val >> 1) & 0x7;
+	if (pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30)
+		pin->pull_up_strength = pin->bias;
+	else
+		pin->pull_up_strength = PM8XXX_GPIO_BIAS_PU_30;
+
+	val = pm8xxx_read_bank(pctrl, pin, 3);
+	if (val < 0)
+		return val;
+
+	pin->output_strength = (val >> 2) & 0x3;
+	pin->disable = val & BIT(0);
+
+	val = pm8xxx_read_bank(pctrl, pin, 4);
+	if (val < 0)
+		return val;
+
+	pin->function = (val >> 1) & 0x7;
+
+	val = pm8xxx_read_bank(pctrl, pin, 5);
+	if (val < 0)
+		return val;
+
+	pin->inverted = !(val & BIT(3));
+
+	return 0;
+}
+
+static const struct of_device_id pm8xxx_gpio_of_match[] = {
+	{ .compatible = "qcom,pm8018-gpio", .data = (void *)6 },
+	{ .compatible = "qcom,pm8038-gpio", .data = (void *)12 },
+	{ .compatible = "qcom,pm8058-gpio", .data = (void *)40 },
+	{ .compatible = "qcom,pm8917-gpio", .data = (void *)38 },
+	{ .compatible = "qcom,pm8921-gpio", .data = (void *)44 },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, pm8xxx_gpio_of_match);
+
+static int pm8xxx_gpio_probe(struct platform_device *pdev)
+{
+	struct pm8xxx_pin_data *pin_data;
+	struct pinctrl_pin_desc *pins;
+	struct pm8xxx_gpio *pctrl;
+	int ret;
+	int i;
+
+	pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+	if (!pctrl)
+		return -ENOMEM;
+
+	pctrl->dev = &pdev->dev;
+	pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev);
+
+	pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!pctrl->regmap) {
+		dev_err(&pdev->dev, "parent regmap unavailable\n");
+		return -ENXIO;
+	}
+
+	pctrl->desc = pm8xxx_pinctrl_desc;
+	pctrl->desc.npins = pctrl->npins;
+
+	pins = devm_kcalloc(&pdev->dev,
+			    pctrl->desc.npins,
+			    sizeof(struct pinctrl_pin_desc),
+			    GFP_KERNEL);
+	if (!pins)
+		return -ENOMEM;
+
+	pin_data = devm_kcalloc(&pdev->dev,
+				pctrl->desc.npins,
+				sizeof(struct pm8xxx_pin_data),
+				GFP_KERNEL);
+	if (!pin_data)
+		return -ENOMEM;
+
+	for (i = 0; i < pctrl->desc.npins; i++) {
+		pin_data[i].reg = SSBI_REG_ADDR_GPIO(i);
+		pin_data[i].irq = platform_get_irq(pdev, i);
+		if (pin_data[i].irq < 0) {
+			dev_err(&pdev->dev,
+				"missing interrupts for pin %d\n", i);
+			return pin_data[i].irq;
+		}
+
+		ret = pm8xxx_pin_populate(pctrl, &pin_data[i]);
+		if (ret)
+			return ret;
+
+		pins[i].number = i;
+		pins[i].name = pm8xxx_groups[i];
+		pins[i].drv_data = &pin_data[i];
+	}
+	pctrl->desc.pins = pins;
+
+	pctrl->desc.num_custom_params = ARRAY_SIZE(pm8xxx_gpio_bindings);
+	pctrl->desc.custom_params = pm8xxx_gpio_bindings;
+#ifdef CONFIG_DEBUG_FS
+	pctrl->desc.custom_conf_items = pm8xxx_conf_items;
+#endif
+
+	pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
+	if (!pctrl->pctrl) {
+		dev_err(&pdev->dev, "couldn't register pm8xxx gpio driver\n");
+		return -ENODEV;
+	}
+
+	pctrl->chip = pm8xxx_gpio_template;
+	pctrl->chip.base = -1;
+	pctrl->chip.dev = &pdev->dev;
+	pctrl->chip.of_node = pdev->dev.of_node;
+	pctrl->chip.of_gpio_n_cells = 2;
+	pctrl->chip.label = dev_name(pctrl->dev);
+	pctrl->chip.ngpio = pctrl->npins;
+	ret = gpiochip_add(&pctrl->chip);
+	if (ret) {
+		dev_err(&pdev->dev, "failed register gpiochip\n");
+		goto unregister_pinctrl;
+	}
+
+	ret = gpiochip_add_pin_range(&pctrl->chip,
+				     dev_name(pctrl->dev),
+				     0, 0, pctrl->chip.ngpio);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to add pin range\n");
+		goto unregister_gpiochip;
+	}
+
+	platform_set_drvdata(pdev, pctrl);
+
+	dev_dbg(&pdev->dev, "Qualcomm pm8xxx gpio driver probed\n");
+
+	return 0;
+
+unregister_gpiochip:
+	gpiochip_remove(&pctrl->chip);
+
+unregister_pinctrl:
+	pinctrl_unregister(pctrl->pctrl);
+
+	return ret;
+}
+
+static int pm8xxx_gpio_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_gpio *pctrl = platform_get_drvdata(pdev);
+
+	gpiochip_remove(&pctrl->chip);
+
+	pinctrl_unregister(pctrl->pctrl);
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_gpio_driver = {
+	.driver = {
+		.name = "qcom-ssbi-gpio",
+		.of_match_table = pm8xxx_gpio_of_match,
+	},
+	.probe = pm8xxx_gpio_probe,
+	.remove = pm8xxx_gpio_remove,
+};
+
+module_platform_driver(pm8xxx_gpio_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm PM8xxx GPIO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
new file mode 100644
index 0000000..2d1b69f
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -0,0 +1,882 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/of_device.h>
+
+#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+
+/* MPP registers */
+#define SSBI_REG_ADDR_MPP_BASE		0x50
+#define SSBI_REG_ADDR_MPP(n)		(SSBI_REG_ADDR_MPP_BASE + n)
+
+/* MPP Type: type */
+#define PM8XXX_MPP_TYPE_D_INPUT         0
+#define PM8XXX_MPP_TYPE_D_OUTPUT        1
+#define PM8XXX_MPP_TYPE_D_BI_DIR        2
+#define PM8XXX_MPP_TYPE_A_INPUT         3
+#define PM8XXX_MPP_TYPE_A_OUTPUT        4
+#define PM8XXX_MPP_TYPE_SINK            5
+#define PM8XXX_MPP_TYPE_DTEST_SINK      6
+#define PM8XXX_MPP_TYPE_DTEST_OUTPUT    7
+
+/* Digital Input: control */
+#define PM8XXX_MPP_DIN_TO_INT           0
+#define PM8XXX_MPP_DIN_TO_DBUS1         1
+#define PM8XXX_MPP_DIN_TO_DBUS2         2
+#define PM8XXX_MPP_DIN_TO_DBUS3         3
+
+/* Digital Output: control */
+#define PM8XXX_MPP_DOUT_CTRL_LOW        0
+#define PM8XXX_MPP_DOUT_CTRL_HIGH       1
+#define PM8XXX_MPP_DOUT_CTRL_MPP        2
+#define PM8XXX_MPP_DOUT_CTRL_INV_MPP    3
+
+/* Bidirectional: control */
+#define PM8XXX_MPP_BI_PULLUP_1KOHM      0
+#define PM8XXX_MPP_BI_PULLUP_OPEN       1
+#define PM8XXX_MPP_BI_PULLUP_10KOHM     2
+#define PM8XXX_MPP_BI_PULLUP_30KOHM     3
+
+/* Analog Output: control */
+#define PM8XXX_MPP_AOUT_CTRL_DISABLE            0
+#define PM8XXX_MPP_AOUT_CTRL_ENABLE             1
+#define PM8XXX_MPP_AOUT_CTRL_MPP_HIGH_EN        2
+#define PM8XXX_MPP_AOUT_CTRL_MPP_LOW_EN         3
+
+/* Current Sink: control */
+#define PM8XXX_MPP_CS_CTRL_DISABLE      0
+#define PM8XXX_MPP_CS_CTRL_ENABLE       1
+#define PM8XXX_MPP_CS_CTRL_MPP_HIGH_EN  2
+#define PM8XXX_MPP_CS_CTRL_MPP_LOW_EN   3
+
+/* DTEST Current Sink: control */
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN1    0
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN2    1
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN3    2
+#define PM8XXX_MPP_DTEST_CS_CTRL_EN4    3
+
+/* DTEST Digital Output: control */
+#define PM8XXX_MPP_DTEST_DBUS1          0
+#define PM8XXX_MPP_DTEST_DBUS2          1
+#define PM8XXX_MPP_DTEST_DBUS3          2
+#define PM8XXX_MPP_DTEST_DBUS4          3
+
+/* custom pinconf parameters */
+#define PM8XXX_CONFIG_AMUX		(PIN_CONFIG_END + 1)
+#define PM8XXX_CONFIG_DTEST_SELECTOR	(PIN_CONFIG_END + 2)
+#define PM8XXX_CONFIG_ALEVEL		(PIN_CONFIG_END + 3)
+#define PM8XXX_CONFIG_PAIRED		(PIN_CONFIG_END + 4)
+
+/**
+ * struct pm8xxx_pin_data - dynamic configuration for a pin
+ * @reg:		address of the control register
+ * @irq:		IRQ from the PMIC interrupt controller
+ * @mode:		operating mode for the pin (digital, analog or current sink)
+ * @input:		pin is input
+ * @output:		pin is output
+ * @high_z:		pin is floating
+ * @paired:		mpp operates in paired mode
+ * @output_value:	logical output value of the mpp
+ * @power_source:	selected power source
+ * @dtest:		DTEST route selector
+ * @amux:		input muxing in analog mode
+ * @aout_level:		selector of the output in analog mode
+ * @drive_strength:	drive strength of the current sink
+ * @pullup:		pull up value, when in digital bidirectional mode
+ */
+struct pm8xxx_pin_data {
+	unsigned reg;
+	int irq;
+
+	u8 mode;
+
+	bool input;
+	bool output;
+	bool high_z;
+	bool paired;
+	bool output_value;
+
+	u8 power_source;
+	u8 dtest;
+	u8 amux;
+	u8 aout_level;
+	u8 drive_strength;
+	unsigned pullup;
+};
+
+struct pm8xxx_mpp {
+	struct device *dev;
+	struct regmap *regmap;
+	struct pinctrl_dev *pctrl;
+	struct gpio_chip chip;
+
+	struct pinctrl_desc desc;
+	unsigned npins;
+};
+
+static const struct pinconf_generic_params pm8xxx_mpp_bindings[] = {
+	{"qcom,amux-route",	PM8XXX_CONFIG_AMUX,		0},
+	{"qcom,analog-level",	PM8XXX_CONFIG_ALEVEL,		0},
+	{"qcom,dtest",		PM8XXX_CONFIG_DTEST_SELECTOR,	0},
+	{"qcom,paired",		PM8XXX_CONFIG_PAIRED,		0},
+};
+
+#ifdef CONFIG_DEBUG_FS
+static const struct pin_config_item pm8xxx_conf_items[] = {
+	PCONFDUMP(PM8XXX_CONFIG_AMUX, "analog mux", NULL, true),
+	PCONFDUMP(PM8XXX_CONFIG_ALEVEL, "analog level", NULL, true),
+	PCONFDUMP(PM8XXX_CONFIG_DTEST_SELECTOR, "dtest", NULL, true),
+	PCONFDUMP(PM8XXX_CONFIG_PAIRED, "paired", NULL, false),
+};
+#endif
+
+#define PM8XXX_MAX_MPPS	12
+static const char * const pm8xxx_groups[PM8XXX_MAX_MPPS] = {
+	"mpp1", "mpp2", "mpp3", "mpp4", "mpp5", "mpp6", "mpp7", "mpp8",
+	"mpp9", "mpp10", "mpp11", "mpp12",
+};
+
+#define PM8XXX_MPP_DIGITAL	0
+#define PM8XXX_MPP_ANALOG	1
+#define PM8XXX_MPP_SINK		2
+
+static const char * const pm8xxx_mpp_functions[] = {
+	"digital", "analog", "sink",
+};
+
+static int pm8xxx_mpp_update(struct pm8xxx_mpp *pctrl,
+			     struct pm8xxx_pin_data *pin)
+{
+	unsigned level;
+	unsigned ctrl;
+	unsigned type;
+	int ret;
+	u8 val;
+
+	switch (pin->mode) {
+	case PM8XXX_MPP_DIGITAL:
+		if (pin->dtest) {
+			type = PM8XXX_MPP_TYPE_DTEST_OUTPUT;
+			ctrl = pin->dtest - 1;
+		} else if (pin->input && pin->output) {
+			type = PM8XXX_MPP_TYPE_D_BI_DIR;
+			if (pin->high_z)
+				ctrl = PM8XXX_MPP_BI_PULLUP_OPEN;
+			else if (pin->pullup == 600)
+				ctrl = PM8XXX_MPP_BI_PULLUP_1KOHM;
+			else if (pin->pullup == 10000)
+				ctrl = PM8XXX_MPP_BI_PULLUP_10KOHM;
+			else
+				ctrl = PM8XXX_MPP_BI_PULLUP_30KOHM;
+		} else if (pin->input) {
+			type = PM8XXX_MPP_TYPE_D_INPUT;
+			if (pin->dtest)
+				ctrl = pin->dtest;
+			else
+				ctrl = PM8XXX_MPP_DIN_TO_INT;
+		} else {
+			type = PM8XXX_MPP_TYPE_D_OUTPUT;
+			ctrl = !!pin->output_value;
+			if (pin->paired)
+				ctrl |= BIT(1);
+		}
+
+		level = pin->power_source;
+		break;
+	case PM8XXX_MPP_ANALOG:
+		if (pin->output) {
+			type = PM8XXX_MPP_TYPE_A_OUTPUT;
+			level = pin->aout_level;
+			ctrl = pin->output_value;
+			if (pin->paired)
+				ctrl |= BIT(1);
+		} else {
+			type = PM8XXX_MPP_TYPE_A_INPUT;
+			level = pin->amux;
+			ctrl = 0;
+		}
+		break;
+	case PM8XXX_MPP_SINK:
+		level = (pin->drive_strength / 5) - 1;
+		if (pin->dtest) {
+			type = PM8XXX_MPP_TYPE_DTEST_SINK;
+			ctrl = pin->dtest - 1;
+		} else {
+			type = PM8XXX_MPP_TYPE_SINK;
+			ctrl = pin->output_value;
+			if (pin->paired)
+				ctrl |= BIT(1);
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	val = type << 5 | level << 2 | ctrl;
+	ret = regmap_write(pctrl->regmap, pin->reg, val);
+	if (ret)
+		dev_err(pctrl->dev, "failed to write register\n");
+
+	return ret;
+}
+
+static int pm8xxx_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	return pctrl->npins;
+}
+
+static const char *pm8xxx_get_group_name(struct pinctrl_dev *pctldev,
+					 unsigned group)
+{
+	return pm8xxx_groups[group];
+}
+
+
+static int pm8xxx_get_group_pins(struct pinctrl_dev *pctldev,
+				 unsigned group,
+				 const unsigned **pins,
+				 unsigned *num_pins)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = &pctrl->desc.pins[group].number;
+	*num_pins = 1;
+
+	return 0;
+}
+
+static const struct pinctrl_ops pm8xxx_pinctrl_ops = {
+	.get_groups_count	= pm8xxx_get_groups_count,
+	.get_group_name		= pm8xxx_get_group_name,
+	.get_group_pins         = pm8xxx_get_group_pins,
+	.dt_node_to_map		= pinconf_generic_dt_node_to_map_group,
+	.dt_free_map		= pinctrl_utils_dt_free_map,
+};
+
+static int pm8xxx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(pm8xxx_mpp_functions);
+}
+
+static const char *pm8xxx_get_function_name(struct pinctrl_dev *pctldev,
+					    unsigned function)
+{
+	return pm8xxx_mpp_functions[function];
+}
+
+static int pm8xxx_get_function_groups(struct pinctrl_dev *pctldev,
+				      unsigned function,
+				      const char * const **groups,
+				      unsigned * const num_groups)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = pm8xxx_groups;
+	*num_groups = pctrl->npins;
+	return 0;
+}
+
+static int pm8xxx_pinmux_set_mux(struct pinctrl_dev *pctldev,
+				 unsigned function,
+				 unsigned group)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[group].drv_data;
+
+	pin->mode = function;
+	pm8xxx_mpp_update(pctrl, pin);
+
+	return 0;
+}
+
+static const struct pinmux_ops pm8xxx_pinmux_ops = {
+	.get_functions_count	= pm8xxx_get_functions_count,
+	.get_function_name	= pm8xxx_get_function_name,
+	.get_function_groups	= pm8xxx_get_function_groups,
+	.set_mux		= pm8xxx_pinmux_set_mux,
+};
+
+static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
+				 unsigned int offset,
+				 unsigned long *config)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	unsigned param = pinconf_to_config_param(*config);
+	unsigned arg;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_PULL_UP:
+		arg = pin->pullup;
+		break;
+	case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+		arg = pin->high_z;
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+		arg = pin->input;
+		break;
+	case PIN_CONFIG_OUTPUT:
+		arg = pin->output_value;
+		break;
+	case PIN_CONFIG_POWER_SOURCE:
+		arg = pin->power_source;
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		arg = pin->drive_strength;
+		break;
+	case PM8XXX_CONFIG_DTEST_SELECTOR:
+		arg = pin->dtest;
+		break;
+	case PM8XXX_CONFIG_AMUX:
+		arg = pin->amux;
+		break;
+	case PM8XXX_CONFIG_ALEVEL:
+		arg = pin->aout_level;
+		break;
+	case PM8XXX_CONFIG_PAIRED:
+		arg = pin->paired;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	*config = pinconf_to_config_packed(param, arg);
+
+	return 0;
+}
+
+static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev,
+				 unsigned int offset,
+				 unsigned long *configs,
+				 unsigned num_configs)
+{
+	struct pm8xxx_mpp *pctrl = pinctrl_dev_get_drvdata(pctldev);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	unsigned param;
+	unsigned arg;
+	unsigned i;
+
+	for (i = 0; i < num_configs; i++) {
+		param = pinconf_to_config_param(configs[i]);
+		arg = pinconf_to_config_argument(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_PULL_UP:
+			pin->pullup = arg;
+			break;
+		case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+			pin->high_z = true;
+			break;
+		case PIN_CONFIG_INPUT_ENABLE:
+			pin->input = true;
+			break;
+		case PIN_CONFIG_OUTPUT:
+			pin->output = true;
+			pin->output_value = !!arg;
+			break;
+		case PIN_CONFIG_POWER_SOURCE:
+			pin->power_source = arg;
+			break;
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			pin->drive_strength = arg;
+			break;
+		case PM8XXX_CONFIG_DTEST_SELECTOR:
+			pin->dtest = arg;
+			break;
+		case PM8XXX_CONFIG_AMUX:
+			pin->amux = arg;
+			break;
+		case PM8XXX_CONFIG_ALEVEL:
+			pin->aout_level = arg;
+			break;
+		case PM8XXX_CONFIG_PAIRED:
+			pin->paired = !!arg;
+			break;
+		default:
+			dev_err(pctrl->dev,
+				"unsupported config parameter: %x\n",
+				param);
+			return -EINVAL;
+		}
+	}
+
+	pm8xxx_mpp_update(pctrl, pin);
+
+	return 0;
+}
+
+static const struct pinconf_ops pm8xxx_pinconf_ops = {
+	.is_generic = true,
+	.pin_config_group_get = pm8xxx_pin_config_get,
+	.pin_config_group_set = pm8xxx_pin_config_set,
+};
+
+static struct pinctrl_desc pm8xxx_pinctrl_desc = {
+	.name = "pm8xxx_mpp",
+	.pctlops = &pm8xxx_pinctrl_ops,
+	.pmxops = &pm8xxx_pinmux_ops,
+	.confops = &pm8xxx_pinconf_ops,
+	.owner = THIS_MODULE,
+};
+
+static int pm8xxx_mpp_direction_input(struct gpio_chip *chip,
+				       unsigned offset)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	switch (pin->mode) {
+	case PM8XXX_MPP_DIGITAL:
+		pin->input = true;
+		break;
+	case PM8XXX_MPP_ANALOG:
+		pin->input = true;
+		pin->output = true;
+		break;
+	case PM8XXX_MPP_SINK:
+		return -EINVAL;
+	}
+
+	pm8xxx_mpp_update(pctrl, pin);
+
+	return 0;
+}
+
+static int pm8xxx_mpp_direction_output(struct gpio_chip *chip,
+					unsigned offset,
+					int value)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	switch (pin->mode) {
+	case PM8XXX_MPP_DIGITAL:
+		pin->output = true;
+		break;
+	case PM8XXX_MPP_ANALOG:
+		pin->input = false;
+		pin->output = true;
+		break;
+	case PM8XXX_MPP_SINK:
+		pin->input = false;
+		pin->output = true;
+		break;
+	}
+
+	pm8xxx_mpp_update(pctrl, pin);
+
+	return 0;
+}
+
+static int pm8xxx_mpp_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+	bool state;
+	int ret;
+
+	if (!pin->input)
+		return pin->output_value;
+
+	ret = irq_get_irqchip_state(pin->irq, IRQCHIP_STATE_LINE_LEVEL, &state);
+	if (!ret)
+		ret = !!state;
+
+	return ret;
+}
+
+static void pm8xxx_mpp_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	pin->output_value = !!value;
+
+	pm8xxx_mpp_update(pctrl, pin);
+}
+
+static int pm8xxx_mpp_of_xlate(struct gpio_chip *chip,
+				const struct of_phandle_args *gpio_desc,
+				u32 *flags)
+{
+	if (chip->of_gpio_n_cells < 2)
+		return -EINVAL;
+
+	if (flags)
+		*flags = gpio_desc->args[1];
+
+	return gpio_desc->args[0] - 1;
+}
+
+
+static int pm8xxx_mpp_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	return pin->irq;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static void pm8xxx_mpp_dbg_show_one(struct seq_file *s,
+				  struct pinctrl_dev *pctldev,
+				  struct gpio_chip *chip,
+				  unsigned offset,
+				  unsigned gpio)
+{
+	struct pm8xxx_mpp *pctrl = container_of(chip, struct pm8xxx_mpp, chip);
+	struct pm8xxx_pin_data *pin = pctrl->desc.pins[offset].drv_data;
+
+	static const char * const aout_lvls[] = {
+		"1v25", "1v25_2", "0v625", "0v3125", "mpp", "abus1", "abus2",
+		"abus3"
+	};
+
+	static const char * const amuxs[] = {
+		"amux5", "amux6", "amux7", "amux8", "amux9", "abus1", "abus2",
+		"abus3",
+	};
+
+	seq_printf(s, " mpp%-2d:", offset + 1);
+
+	switch (pin->mode) {
+	case PM8XXX_MPP_DIGITAL:
+		seq_puts(s, " digital ");
+		if (pin->dtest) {
+			seq_printf(s, "dtest%d\n", pin->dtest);
+		} else if (pin->input && pin->output) {
+			if (pin->high_z)
+				seq_puts(s, "bi-dir high-z");
+			else
+				seq_printf(s, "bi-dir %dOhm", pin->pullup);
+		} else if (pin->input) {
+			if (pin->dtest)
+				seq_printf(s, "in dtest%d", pin->dtest);
+			else
+				seq_puts(s, "in gpio");
+		} else if (pin->output) {
+			seq_puts(s, "out ");
+
+			if (!pin->paired) {
+				seq_puts(s, pin->output_value ?
+					 "high" : "low");
+			} else {
+				seq_puts(s, pin->output_value ?
+					 "inverted" : "follow");
+			}
+		}
+		break;
+	case PM8XXX_MPP_ANALOG:
+		seq_puts(s, " analog ");
+		if (pin->output) {
+			seq_printf(s, "out %s ", aout_lvls[pin->aout_level]);
+			if (!pin->paired) {
+				seq_puts(s, pin->output_value ?
+					 "high" : "low");
+			} else {
+				seq_puts(s, pin->output_value ?
+					 "inverted" : "follow");
+			}
+		} else {
+			seq_printf(s, "input mux %s", amuxs[pin->amux]);
+		}
+		break;
+	case PM8XXX_MPP_SINK:
+		seq_printf(s, " sink %dmA ", pin->drive_strength);
+		if (pin->dtest) {
+			seq_printf(s, "dtest%d", pin->dtest);
+		} else {
+			if (!pin->paired) {
+				seq_puts(s, pin->output_value ?
+					 "high" : "low");
+			} else {
+				seq_puts(s, pin->output_value ?
+					 "inverted" : "follow");
+			}
+		}
+		break;
+	}
+
+}
+
+static void pm8xxx_mpp_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+	unsigned gpio = chip->base;
+	unsigned i;
+
+	for (i = 0; i < chip->ngpio; i++, gpio++) {
+		pm8xxx_mpp_dbg_show_one(s, NULL, chip, i, gpio);
+		seq_puts(s, "\n");
+	}
+}
+
+#else
+#define msm_mpp_dbg_show NULL
+#endif
+
+static struct gpio_chip pm8xxx_mpp_template = {
+	.direction_input = pm8xxx_mpp_direction_input,
+	.direction_output = pm8xxx_mpp_direction_output,
+	.get = pm8xxx_mpp_get,
+	.set = pm8xxx_mpp_set,
+	.of_xlate = pm8xxx_mpp_of_xlate,
+	.to_irq = pm8xxx_mpp_to_irq,
+	.dbg_show = pm8xxx_mpp_dbg_show,
+	.owner = THIS_MODULE,
+};
+
+static int pm8xxx_pin_populate(struct pm8xxx_mpp *pctrl,
+			       struct pm8xxx_pin_data *pin)
+{
+	unsigned int val;
+	unsigned level;
+	unsigned ctrl;
+	unsigned type;
+	int ret;
+
+	ret = regmap_read(pctrl->regmap, pin->reg, &val);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to read register\n");
+		return ret;
+	}
+
+	type = (val >> 5) & 7;
+	level = (val >> 2) & 7;
+	ctrl = (val) & 3;
+
+	switch (type) {
+	case PM8XXX_MPP_TYPE_D_INPUT:
+		pin->mode = PM8XXX_MPP_DIGITAL;
+		pin->input = true;
+		pin->power_source = level;
+		pin->dtest = ctrl;
+		break;
+	case PM8XXX_MPP_TYPE_D_OUTPUT:
+		pin->mode = PM8XXX_MPP_DIGITAL;
+		pin->output = true;
+		pin->power_source = level;
+		pin->output_value = !!(ctrl & BIT(0));
+		pin->paired = !!(ctrl & BIT(1));
+		break;
+	case PM8XXX_MPP_TYPE_D_BI_DIR:
+		pin->mode = PM8XXX_MPP_DIGITAL;
+		pin->input = true;
+		pin->output = true;
+		pin->power_source = level;
+		switch (ctrl) {
+		case PM8XXX_MPP_BI_PULLUP_1KOHM:
+			pin->pullup = 600;
+			break;
+		case PM8XXX_MPP_BI_PULLUP_OPEN:
+			pin->high_z = true;
+			break;
+		case PM8XXX_MPP_BI_PULLUP_10KOHM:
+			pin->pullup = 10000;
+			break;
+		case PM8XXX_MPP_BI_PULLUP_30KOHM:
+			pin->pullup = 30000;
+			break;
+		}
+		break;
+	case PM8XXX_MPP_TYPE_A_INPUT:
+		pin->mode = PM8XXX_MPP_ANALOG;
+		pin->input = true;
+		pin->amux = level;
+		break;
+	case PM8XXX_MPP_TYPE_A_OUTPUT:
+		pin->mode = PM8XXX_MPP_ANALOG;
+		pin->output = true;
+		pin->aout_level = level;
+		pin->output_value = !!(ctrl & BIT(0));
+		pin->paired = !!(ctrl & BIT(1));
+		break;
+	case PM8XXX_MPP_TYPE_SINK:
+		pin->mode = PM8XXX_MPP_SINK;
+		pin->drive_strength = 5 * (level + 1);
+		pin->output_value = !!(ctrl & BIT(0));
+		pin->paired = !!(ctrl & BIT(1));
+		break;
+	case PM8XXX_MPP_TYPE_DTEST_SINK:
+		pin->mode = PM8XXX_MPP_SINK;
+		pin->dtest = ctrl + 1;
+		pin->drive_strength = 5 * (level + 1);
+		break;
+	case PM8XXX_MPP_TYPE_DTEST_OUTPUT:
+		pin->mode = PM8XXX_MPP_DIGITAL;
+		pin->power_source = level;
+		if (ctrl >= 1)
+			pin->dtest = ctrl;
+		break;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id pm8xxx_mpp_of_match[] = {
+	{ .compatible = "qcom,pm8018-mpp", .data = (void *)6 },
+	{ .compatible = "qcom,pm8038-mpp", .data = (void *)6 },
+	{ .compatible = "qcom,pm8917-mpp", .data = (void *)10 },
+	{ .compatible = "qcom,pm8821-mpp", .data = (void *)4 },
+	{ .compatible = "qcom,pm8921-mpp", .data = (void *)12 },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, pm8xxx_mpp_of_match);
+
+static int pm8xxx_mpp_probe(struct platform_device *pdev)
+{
+	struct pm8xxx_pin_data *pin_data;
+	struct pinctrl_pin_desc *pins;
+	struct pm8xxx_mpp *pctrl;
+	int ret;
+	int i;
+
+	pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+	if (!pctrl)
+		return -ENOMEM;
+
+	pctrl->dev = &pdev->dev;
+	pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev);
+
+	pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!pctrl->regmap) {
+		dev_err(&pdev->dev, "parent regmap unavailable\n");
+		return -ENXIO;
+	}
+
+	pctrl->desc = pm8xxx_pinctrl_desc;
+	pctrl->desc.npins = pctrl->npins;
+
+	pins = devm_kcalloc(&pdev->dev,
+			    pctrl->desc.npins,
+			    sizeof(struct pinctrl_pin_desc),
+			    GFP_KERNEL);
+	if (!pins)
+		return -ENOMEM;
+
+	pin_data = devm_kcalloc(&pdev->dev,
+				pctrl->desc.npins,
+				sizeof(struct pm8xxx_pin_data),
+				GFP_KERNEL);
+	if (!pin_data)
+		return -ENOMEM;
+
+	for (i = 0; i < pctrl->desc.npins; i++) {
+		pin_data[i].reg = SSBI_REG_ADDR_MPP(i);
+		pin_data[i].irq = platform_get_irq(pdev, i);
+		if (pin_data[i].irq < 0) {
+			dev_err(&pdev->dev,
+				"missing interrupts for pin %d\n", i);
+			return pin_data[i].irq;
+		}
+
+		ret = pm8xxx_pin_populate(pctrl, &pin_data[i]);
+		if (ret)
+			return ret;
+
+		pins[i].number = i;
+		pins[i].name = pm8xxx_groups[i];
+		pins[i].drv_data = &pin_data[i];
+	}
+	pctrl->desc.pins = pins;
+
+	pctrl->desc.num_custom_params = ARRAY_SIZE(pm8xxx_mpp_bindings);
+	pctrl->desc.custom_params = pm8xxx_mpp_bindings;
+#ifdef CONFIG_DEBUG_FS
+	pctrl->desc.custom_conf_items = pm8xxx_conf_items;
+#endif
+
+	pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
+	if (!pctrl->pctrl) {
+		dev_err(&pdev->dev, "couldn't register pm8xxx mpp driver\n");
+		return -ENODEV;
+	}
+
+	pctrl->chip = pm8xxx_mpp_template;
+	pctrl->chip.base = -1;
+	pctrl->chip.dev = &pdev->dev;
+	pctrl->chip.of_node = pdev->dev.of_node;
+	pctrl->chip.of_gpio_n_cells = 2;
+	pctrl->chip.label = dev_name(pctrl->dev);
+	pctrl->chip.ngpio = pctrl->npins;
+	ret = gpiochip_add(&pctrl->chip);
+	if (ret) {
+		dev_err(&pdev->dev, "failed register gpiochip\n");
+		goto unregister_pinctrl;
+	}
+
+	ret = gpiochip_add_pin_range(&pctrl->chip,
+				     dev_name(pctrl->dev),
+				     0, 0, pctrl->chip.ngpio);
+	if (ret) {
+		dev_err(pctrl->dev, "failed to add pin range\n");
+		goto unregister_gpiochip;
+	}
+
+	platform_set_drvdata(pdev, pctrl);
+
+	dev_dbg(&pdev->dev, "Qualcomm pm8xxx mpp driver probed\n");
+
+	return 0;
+
+unregister_gpiochip:
+	gpiochip_remove(&pctrl->chip);
+
+unregister_pinctrl:
+	pinctrl_unregister(pctrl->pctrl);
+
+	return ret;
+}
+
+static int pm8xxx_mpp_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_mpp *pctrl = platform_get_drvdata(pdev);
+
+	gpiochip_remove(&pctrl->chip);
+
+	pinctrl_unregister(pctrl->pctrl);
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_mpp_driver = {
+	.driver = {
+		.name = "qcom-ssbi-mpp",
+		.of_match_table = pm8xxx_mpp_of_match,
+	},
+	.probe = pm8xxx_mpp_probe,
+	.remove = pm8xxx_mpp_remove,
+};
+
+module_platform_driver(pm8xxx_mpp_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm PM8xxx MPP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index b18dabb..5f45caa 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -148,9 +148,9 @@
 	}
 
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(irqd->irq, handle_edge_irq);
+		irq_set_handler_locked(irqd, handle_edge_irq);
 	else
-		__irq_set_handler_locked(irqd->irq, handle_level_irq);
+		irq_set_handler_locked(irqd, handle_level_irq);
 
 	con = readl(d->virt_base + reg_con);
 	con &= ~(EXYNOS_EINT_CON_MASK << shift);
@@ -256,7 +256,6 @@
 	irq_set_chip_data(virq, b);
 	irq_set_chip_and_handler(virq, &b->irq_chip->chip,
 					handle_level_irq);
-	set_irq_flags(virq, IRQF_VALID);
 	return 0;
 }
 
@@ -422,9 +421,9 @@
 /* interrupt handler for wakeup interrupts 0..15 */
 static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
 {
-	struct exynos_weint_data *eintd = irq_get_handler_data(irq);
+	struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc);
 	struct samsung_pin_bank *bank = eintd->bank;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int eint_irq;
 
 	chained_irq_enter(chip, desc);
@@ -454,8 +453,8 @@
 /* interrupt handler for wakeup interrupt 16 */
 static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct exynos_muxed_weint_data *eintd = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
 	struct samsung_pinctrl_drv_data *d = eintd->banks[0]->drvdata;
 	unsigned long pend;
 	unsigned long mask;
@@ -542,8 +541,9 @@
 			}
 			weint_data[idx].irq = idx;
 			weint_data[idx].bank = bank;
-			irq_set_handler_data(irq, &weint_data[idx]);
-			irq_set_chained_handler(irq, exynos_irq_eint0_15);
+			irq_set_chained_handler_and_data(irq,
+							 exynos_irq_eint0_15,
+							 &weint_data[idx]);
 		}
 	}
 
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
index f5619fb..9ce0b86 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
@@ -44,9 +44,7 @@
 #define PIN_NAME_LENGTH		10
 
 #define GROUP_SUFFIX		"-grp"
-#define GSUFFIX_LEN		sizeof(GROUP_SUFFIX)
 #define FUNCTION_SUFFIX		"-mux"
-#define FSUFFIX_LEN		sizeof(FUNCTION_SUFFIX)
 
 /*
  * pin configuration type and its value are packed together into a 16-bits.
@@ -205,22 +203,17 @@
 
 	/* Allocate memory for pin-map entries */
 	map = kzalloc(sizeof(*map) * map_cnt, GFP_KERNEL);
-	if (!map) {
-		dev_err(dev, "could not alloc memory for pin-maps\n");
+	if (!map)
 		return -ENOMEM;
-	}
 	*nmaps = 0;
 
 	/*
 	 * Allocate memory for pin group name. The pin group name is derived
 	 * from the node name from which these map entries are be created.
 	 */
-	gname = kzalloc(strlen(np->name) + GSUFFIX_LEN, GFP_KERNEL);
-	if (!gname) {
-		dev_err(dev, "failed to alloc memory for group name\n");
+	gname = kasprintf(GFP_KERNEL, "%s%s", np->name, GROUP_SUFFIX);
+	if (!gname)
 		goto free_map;
-	}
-	snprintf(gname, strlen(np->name) + 4, "%s%s", np->name, GROUP_SUFFIX);
 
 	/*
 	 * don't have config options? then skip over to creating function
@@ -231,10 +224,8 @@
 
 	/* Allocate memory for config entries */
 	cfg = kzalloc(sizeof(*cfg) * cfg_cnt, GFP_KERNEL);
-	if (!cfg) {
-		dev_err(dev, "failed to alloc memory for configs\n");
+	if (!cfg)
 		goto free_gname;
-	}
 
 	/* Prepare a list of config settings */
 	for (idx = 0, cfg_cnt = 0; idx < ARRAY_SIZE(pcfgs); idx++) {
@@ -254,13 +245,10 @@
 skip_cfgs:
 	/* create the function map entry */
 	if (of_find_property(np, "samsung,exynos5440-pin-function", NULL)) {
-		fname = kzalloc(strlen(np->name) + FSUFFIX_LEN,	GFP_KERNEL);
-		if (!fname) {
-			dev_err(dev, "failed to alloc memory for func name\n");
+		fname = kasprintf(GFP_KERNEL,
+				  "%s%s", np->name, FUNCTION_SUFFIX);
+		if (!fname)
 			goto free_cfg;
-		}
-		snprintf(fname, strlen(np->name) + 4, "%s%s", np->name,
-			 FUNCTION_SUFFIX);
 
 		map[*nmaps].data.mux.group = gname;
 		map[*nmaps].data.mux.function = fname;
@@ -651,10 +639,8 @@
 	}
 
 	*pin_list = devm_kzalloc(dev, *npins * sizeof(**pin_list), GFP_KERNEL);
-	if (!*pin_list) {
-		dev_err(dev, "failed to allocate memory for pin list\n");
+	if (!*pin_list)
 		return -ENOMEM;
-	}
 
 	return of_property_read_u32_array(cfg_np, "samsung,exynos5440-pins",
 			*pin_list, *npins);
@@ -682,17 +668,15 @@
 		return -EINVAL;
 
 	groups = devm_kzalloc(dev, grp_cnt * sizeof(*groups), GFP_KERNEL);
-	if (!groups) {
-		dev_err(dev, "failed allocate memory for ping group list\n");
+	if (!groups)
 		return -EINVAL;
-	}
+
 	grp = groups;
 
 	functions = devm_kzalloc(dev, grp_cnt * sizeof(*functions), GFP_KERNEL);
-	if (!functions) {
-		dev_err(dev, "failed to allocate memory for function list\n");
+	if (!functions)
 		return -EINVAL;
-	}
+
 	func = functions;
 
 	/*
@@ -710,14 +694,10 @@
 		}
 
 		/* derive pin group name from the node name */
-		gname = devm_kzalloc(dev, strlen(cfg_np->name) + GSUFFIX_LEN,
-					GFP_KERNEL);
-		if (!gname) {
-			dev_err(dev, "failed to alloc memory for group name\n");
+		gname = devm_kasprintf(dev, GFP_KERNEL,
+				       "%s%s", cfg_np->name, GROUP_SUFFIX);
+		if (!gname)
 			return -ENOMEM;
-		}
-		snprintf(gname, strlen(cfg_np->name) + 4, "%s%s", cfg_np->name,
-			 GROUP_SUFFIX);
 
 		grp->name = gname;
 		grp->pins = pin_list;
@@ -731,22 +711,15 @@
 			continue;
 
 		/* derive function name from the node name */
-		fname = devm_kzalloc(dev, strlen(cfg_np->name) + FSUFFIX_LEN,
-					GFP_KERNEL);
-		if (!fname) {
-			dev_err(dev, "failed to alloc memory for func name\n");
+		fname = devm_kasprintf(dev, GFP_KERNEL,
+				       "%s%s", cfg_np->name, FUNCTION_SUFFIX);
+		if (!fname)
 			return -ENOMEM;
-		}
-		snprintf(fname, strlen(cfg_np->name) + 4, "%s%s", cfg_np->name,
-			 FUNCTION_SUFFIX);
 
 		func->name = fname;
 		func->groups = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL);
-		if (!func->groups) {
-			dev_err(dev, "failed to alloc memory for group list "
-					"in pin function");
+		if (!func->groups)
 			return -ENOMEM;
-		}
 		func->groups[0] = gname;
 		func->num_groups = gname ? 1 : 0;
 		func->function = function;
@@ -774,10 +747,8 @@
 	int pin, ret;
 
 	ctrldesc = devm_kzalloc(dev, sizeof(*ctrldesc), GFP_KERNEL);
-	if (!ctrldesc) {
-		dev_err(dev, "could not allocate memory for pinctrl desc\n");
+	if (!ctrldesc)
 		return -ENOMEM;
-	}
 
 	ctrldesc->name = "exynos5440-pinctrl";
 	ctrldesc->owner = THIS_MODULE;
@@ -787,10 +758,8 @@
 
 	pindesc = devm_kzalloc(&pdev->dev, sizeof(*pindesc) *
 				EXYNOS5440_MAX_PINS, GFP_KERNEL);
-	if (!pindesc) {
-		dev_err(&pdev->dev, "mem alloc for pin descriptors failed\n");
+	if (!pindesc)
 		return -ENOMEM;
-	}
 	ctrldesc->pins = pindesc;
 	ctrldesc->npins = EXYNOS5440_MAX_PINS;
 
@@ -804,10 +773,8 @@
 	 */
 	pin_names = devm_kzalloc(&pdev->dev, sizeof(char) * PIN_NAME_LENGTH *
 					ctrldesc->npins, GFP_KERNEL);
-	if (!pin_names) {
-		dev_err(&pdev->dev, "mem alloc for pin names failed\n");
+	if (!pin_names)
 		return -ENOMEM;
-	}
 
 	/* for each pin, set the name of the pin */
 	for (pin = 0; pin < ctrldesc->npins; pin++) {
@@ -844,10 +811,8 @@
 	int ret;
 
 	gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
-	if (!gc) {
-		dev_err(&pdev->dev, "mem alloc for gpio_chip failed\n");
+	if (!gc)
 		return -ENOMEM;
-	}
 
 	priv->gc = gc;
 	gc->base = 0;
@@ -929,7 +894,6 @@
 	irq_set_chip_data(virq, d);
 	irq_set_chip_and_handler(virq, &exynos5440_gpio_irq_chip,
 					handle_level_irq);
-	set_irq_flags(virq, IRQF_VALID);
 	return 0;
 }
 
@@ -949,10 +913,8 @@
 
 	intd = devm_kzalloc(dev, sizeof(*intd) * EXYNOS5440_MAX_GPIO_INT,
 					GFP_KERNEL);
-	if (!intd) {
-		dev_err(dev, "failed to allocate memory for gpio intr data\n");
+	if (!intd)
 		return -ENOMEM;
-	}
 
 	for (i = 0; i < EXYNOS5440_MAX_GPIO_INT; i++) {
 		irq = irq_of_parse_and_map(dev->of_node, i);
@@ -995,10 +957,8 @@
 	}
 
 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-	if (!priv) {
-		dev_err(dev, "could not allocate memory for private data\n");
+	if (!priv)
 		return -ENOMEM;
-	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 01b43db..019844d 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -131,13 +131,13 @@
 	}
 }
 
-static void s3c24xx_eint_set_handler(unsigned int irq, unsigned int type)
+static void s3c24xx_eint_set_handler(struct irq_data *d, unsigned int type)
 {
 	/* Edge- and level-triggered interrupts need different handlers */
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else
-		__irq_set_handler_locked(irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 }
 
 static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d,
@@ -181,7 +181,7 @@
 		return -EINVAL;
 	}
 
-	s3c24xx_eint_set_handler(data->irq, type);
+	s3c24xx_eint_set_handler(data, type);
 
 	/* Set up interrupt trigger */
 	reg = d->virt_base + EINT_REG(index);
@@ -243,7 +243,7 @@
 static void s3c2410_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
 {
 	struct irq_data *data = irq_desc_get_irq_data(desc);
-	struct s3c24xx_eint_data *eint_data = irq_get_handler_data(irq);
+	struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
 	unsigned int virq;
 
 	/* the first 4 eints have a simple 1 to 1 mapping */
@@ -297,9 +297,9 @@
 
 static void s3c2412_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
 	struct irq_data *data = irq_desc_get_irq_data(desc);
-	struct s3c24xx_eint_data *eint_data = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_data_get_irq_chip(data);
 	unsigned int virq;
 
 	chained_irq_enter(chip, desc);
@@ -357,11 +357,11 @@
 	.irq_set_type	= s3c24xx_eint_type,
 };
 
-static inline void s3c24xx_demux_eint(unsigned int irq, struct irq_desc *desc,
+static inline void s3c24xx_demux_eint(struct irq_desc *desc,
 				      u32 offset, u32 range)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct s3c24xx_eint_data *data = irq_get_handler_data(irq);
+	struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_irq_chip(desc);
 	struct samsung_pinctrl_drv_data *d = data->drvdata;
 	unsigned int pend, mask;
 
@@ -374,7 +374,7 @@
 	pend &= range;
 
 	while (pend) {
-		unsigned int virq;
+		unsigned int virq, irq;
 
 		irq = __ffs(pend);
 		pend &= ~(1 << irq);
@@ -390,12 +390,12 @@
 
 static void s3c24xx_demux_eint4_7(unsigned int irq, struct irq_desc *desc)
 {
-	s3c24xx_demux_eint(irq, desc, 0, 0xf0);
+	s3c24xx_demux_eint(desc, 0, 0xf0);
 }
 
 static void s3c24xx_demux_eint8_23(unsigned int irq, struct irq_desc *desc)
 {
-	s3c24xx_demux_eint(irq, desc, 8, 0xffff00);
+	s3c24xx_demux_eint(desc, 8, 0xffff00);
 }
 
 static irq_flow_handler_t s3c2410_eint_handlers[NUM_EINT_IRQ] = {
@@ -437,7 +437,6 @@
 					 handle_edge_irq);
 	}
 	irq_set_chip_data(virq, bank);
-	set_irq_flags(virq, IRQF_VALID);
 	return 0;
 }
 
@@ -457,7 +456,6 @@
 
 	irq_set_chip_and_handler(virq, &s3c24xx_eint_chip, handle_edge_irq);
 	irq_set_chip_data(virq, bank);
-	set_irq_flags(virq, IRQF_VALID);
 	return 0;
 }
 
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index ec8cc3b..f5ea40a 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -260,13 +260,13 @@
 	return trigger;
 }
 
-static void s3c64xx_irq_set_handler(unsigned int irq, unsigned int type)
+static void s3c64xx_irq_set_handler(struct irq_data *d, unsigned int type)
 {
 	/* Edge- and level-triggered interrupts need different handlers */
 	if (type & IRQ_TYPE_EDGE_BOTH)
-		__irq_set_handler_locked(irq, handle_edge_irq);
+		irq_set_handler_locked(d, handle_edge_irq);
 	else
-		__irq_set_handler_locked(irq, handle_level_irq);
+		irq_set_handler_locked(d, handle_level_irq);
 }
 
 static void s3c64xx_irq_set_function(struct samsung_pinctrl_drv_data *d,
@@ -356,7 +356,7 @@
 		return -EINVAL;
 	}
 
-	s3c64xx_irq_set_handler(irqd->irq, type);
+	s3c64xx_irq_set_handler(irqd, type);
 
 	/* Set up interrupt trigger */
 	reg = d->virt_base + EINTCON_REG(bank->eint_offset);
@@ -395,7 +395,6 @@
 	irq_set_chip_and_handler(virq,
 				&s3c64xx_gpio_irq_chip, handle_level_irq);
 	irq_set_chip_data(virq, bank);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
@@ -410,8 +409,8 @@
 
 static void s3c64xx_eint_gpio_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct s3c64xx_eint_gpio_data *data = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc);
 	struct samsung_pinctrl_drv_data *drvdata = data->drvdata;
 
 	chained_irq_enter(chip, desc);
@@ -567,7 +566,7 @@
 		return -EINVAL;
 	}
 
-	s3c64xx_irq_set_handler(irqd->irq, type);
+	s3c64xx_irq_set_handler(irqd, type);
 
 	/* Set up interrupt trigger */
 	reg = d->virt_base + EINT0CON0_REG;
@@ -599,11 +598,10 @@
 	.irq_set_type	= s3c64xx_eint0_irq_set_type,
 };
 
-static inline void s3c64xx_irq_demux_eint(unsigned int irq,
-					struct irq_desc *desc, u32 range)
+static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct s3c64xx_eint0_data *data = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct s3c64xx_eint0_data *data = irq_desc_get_handler_data(desc);
 	struct samsung_pinctrl_drv_data *drvdata = data->drvdata;
 	unsigned int pend, mask;
 
@@ -616,11 +614,10 @@
 	pend &= range;
 
 	while (pend) {
-		unsigned int virq;
+		unsigned int virq, irq;
 
 		irq = fls(pend) - 1;
 		pend &= ~(1 << irq);
-
 		virq = irq_linear_revmap(data->domains[irq], data->pins[irq]);
 		/*
 		 * Something must be really wrong if an unmapped EINT
@@ -636,22 +633,22 @@
 
 static void s3c64xx_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
 {
-	s3c64xx_irq_demux_eint(irq, desc, 0xf);
+	s3c64xx_irq_demux_eint(desc, 0xf);
 }
 
 static void s3c64xx_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
 {
-	s3c64xx_irq_demux_eint(irq, desc, 0xff0);
+	s3c64xx_irq_demux_eint(desc, 0xff0);
 }
 
 static void s3c64xx_demux_eint12_19(unsigned int irq, struct irq_desc *desc)
 {
-	s3c64xx_irq_demux_eint(irq, desc, 0xff000);
+	s3c64xx_irq_demux_eint(desc, 0xff000);
 }
 
 static void s3c64xx_demux_eint20_27(unsigned int irq, struct irq_desc *desc)
 {
-	s3c64xx_irq_demux_eint(irq, desc, 0xff00000);
+	s3c64xx_irq_demux_eint(desc, 0xff00000);
 }
 
 static irq_flow_handler_t s3c64xx_eint0_handlers[NUM_EINT0_IRQ] = {
@@ -673,7 +670,6 @@
 	irq_set_chip_and_handler(virq,
 				&s3c64xx_eint0_irq_chip, handle_level_irq);
 	irq_set_chip_data(virq, ddata);
-	set_irq_flags(virq, IRQF_VALID);
 
 	return 0;
 }
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 865d235..fb9c448 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -29,24 +29,25 @@
 static int sh_pfc_map_resources(struct sh_pfc *pfc,
 				struct platform_device *pdev)
 {
-	unsigned int num_windows = 0;
-	unsigned int num_irqs = 0;
+	unsigned int num_windows, num_irqs;
 	struct sh_pfc_window *windows;
 	unsigned int *irqs = NULL;
 	struct resource *res;
 	unsigned int i;
+	int irq;
 
 	/* Count the MEM and IRQ resources. */
-	for (i = 0; i < pdev->num_resources; ++i) {
-		switch (resource_type(&pdev->resource[i])) {
-		case IORESOURCE_MEM:
-			num_windows++;
+	for (num_windows = 0;; num_windows++) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, num_windows);
+		if (!res)
 			break;
-
-		case IORESOURCE_IRQ:
-			num_irqs++;
+	}
+	for (num_irqs = 0;; num_irqs++) {
+		irq = platform_get_irq(pdev, num_irqs);
+		if (irq == -EPROBE_DEFER)
+			return irq;
+		if (irq < 0)
 			break;
-		}
 	}
 
 	if (num_windows == 0)
@@ -72,22 +73,17 @@
 	}
 
 	/* Fill them. */
-	for (i = 0, res = pdev->resource; i < pdev->num_resources; i++, res++) {
-		switch (resource_type(res)) {
-		case IORESOURCE_MEM:
-			windows->phys = res->start;
-			windows->size = resource_size(res);
-			windows->virt = devm_ioremap_resource(pfc->dev, res);
-			if (IS_ERR(windows->virt))
-				return -ENOMEM;
-			windows++;
-			break;
-
-		case IORESOURCE_IRQ:
-			*irqs++ = res->start;
-			break;
-		}
+	for (i = 0; i < num_windows; i++) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		windows->phys = res->start;
+		windows->size = resource_size(res);
+		windows->virt = devm_ioremap_resource(pfc->dev, res);
+		if (IS_ERR(windows->virt))
+			return -ENOMEM;
+		windows++;
 	}
+	for (i = 0; i < num_irqs; i++)
+		*irqs++ = platform_get_irq(pdev, i);
 
 	return 0;
 }
@@ -591,9 +587,6 @@
 }
 
 static const struct platform_device_id sh_pfc_id_table[] = {
-#ifdef CONFIG_PINCTRL_PFC_R8A7740
-	{ "pfc-r8a7740", (kernel_ulong_t)&r8a7740_pinmux_info },
-#endif
 #ifdef CONFIG_PINCTRL_PFC_R8A7778
 	{ "pfc-r8a7778", (kernel_ulong_t)&r8a7778_pinmux_info },
 #endif
@@ -609,9 +602,6 @@
 #ifdef CONFIG_PINCTRL_PFC_SH7269
 	{ "pfc-sh7269", (kernel_ulong_t)&sh7269_pinmux_info },
 #endif
-#ifdef CONFIG_PINCTRL_PFC_SH73A0
-	{ "pfc-sh73a0", (kernel_ulong_t)&sh73a0_pinmux_info },
-#endif
 #ifdef CONFIG_PINCTRL_PFC_SH7720
 	{ "pfc-sh7720", (kernel_ulong_t)&sh7720_pinmux_info },
 #endif
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index d0bb145..82ef186 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -22,10 +22,6 @@
 #include <linux/kernel.h>
 #include <linux/pinctrl/pinconf-generic.h>
 
-#ifndef CONFIG_ARCH_MULTIPLATFORM
-#include <mach/irqs.h>
-#endif
-
 #include "core.h"
 #include "sh_pfc.h"
 
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index baab81e..fc344a7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -27,10 +27,27 @@
 #include "core.h"
 #include "sh_pfc.h"
 
+#define PORT_GP_30(bank, fn, sfx)					\
+	PORT_GP_1(bank, 0,  fn, sfx), PORT_GP_1(bank, 1,  fn, sfx),	\
+	PORT_GP_1(bank, 2,  fn, sfx), PORT_GP_1(bank, 3,  fn, sfx),	\
+	PORT_GP_1(bank, 4,  fn, sfx), PORT_GP_1(bank, 5,  fn, sfx),	\
+	PORT_GP_1(bank, 6,  fn, sfx), PORT_GP_1(bank, 7,  fn, sfx),	\
+	PORT_GP_1(bank, 8,  fn, sfx), PORT_GP_1(bank, 9,  fn, sfx),	\
+	PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx),	\
+	PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx),	\
+	PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx),	\
+	PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx),	\
+	PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx),	\
+	PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx),	\
+	PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx),	\
+	PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx),	\
+	PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx),	\
+	PORT_GP_1(bank, 28, fn, sfx), PORT_GP_1(bank, 29, fn, sfx)
+
 #define CPU_ALL_PORT(fn, sfx)						\
 	PORT_GP_32(0, fn, sfx),						\
-	PORT_GP_32(1, fn, sfx),						\
-	PORT_GP_32(2, fn, sfx),						\
+	PORT_GP_30(1, fn, sfx),						\
+	PORT_GP_30(2, fn, sfx),						\
 	PORT_GP_32(3, fn, sfx),						\
 	PORT_GP_32(4, fn, sfx),						\
 	PORT_GP_32(5, fn, sfx)
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 3ddf23e..25e8117 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -14,15 +14,30 @@
 #include "core.h"
 #include "sh_pfc.h"
 
+#define PORT_GP_26(bank, fn, sfx)					\
+	PORT_GP_1(bank, 0,  fn, sfx), PORT_GP_1(bank, 1,  fn, sfx),	\
+	PORT_GP_1(bank, 2,  fn, sfx), PORT_GP_1(bank, 3,  fn, sfx),	\
+	PORT_GP_1(bank, 4,  fn, sfx), PORT_GP_1(bank, 5,  fn, sfx),	\
+	PORT_GP_1(bank, 6,  fn, sfx), PORT_GP_1(bank, 7,  fn, sfx),	\
+	PORT_GP_1(bank, 8,  fn, sfx), PORT_GP_1(bank, 9,  fn, sfx),	\
+	PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx),	\
+	PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx),	\
+	PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx),	\
+	PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx),	\
+	PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx),	\
+	PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx),	\
+	PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx),	\
+	PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx)
+
 #define CPU_ALL_PORT(fn, sfx)						\
 	PORT_GP_32(0, fn, sfx),						\
-	PORT_GP_32(1, fn, sfx),						\
+	PORT_GP_26(1, fn, sfx),						\
 	PORT_GP_32(2, fn, sfx),						\
 	PORT_GP_32(3, fn, sfx),						\
 	PORT_GP_32(4, fn, sfx),						\
 	PORT_GP_32(5, fn, sfx),						\
 	PORT_GP_32(6, fn, sfx),						\
-	PORT_GP_32(7, fn, sfx)
+	PORT_GP_26(7, fn, sfx)
 
 enum {
 	PINMUX_RESERVED = 0,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
index bfdcac4..5248685 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
@@ -2770,6 +2770,24 @@
 static const unsigned int sdhi2_wp_mux[] = {
 	SD2_WP_MARK,
 };
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+	RCAR_GP_PIN(5, 24), /* PWEN */
+	RCAR_GP_PIN(5, 25), /* OVC */
+};
+static const unsigned int usb0_mux[] = {
+	USB0_PWEN_MARK,
+	USB0_OVC_MARK,
+};
+/* - USB1 ------------------------------------------------------------------- */
+static const unsigned int usb1_pins[] = {
+	RCAR_GP_PIN(5, 26), /* PWEN */
+	RCAR_GP_PIN(5, 27), /* OVC */
+};
+static const unsigned int usb1_mux[] = {
+	USB1_PWEN_MARK,
+	USB1_OVC_MARK,
+};
 
 static const struct sh_pfc_pin_group pinmux_groups[] = {
 	SH_PFC_PIN_GROUP(eth_link),
@@ -2945,6 +2963,8 @@
 	SH_PFC_PIN_GROUP(sdhi2_ctrl),
 	SH_PFC_PIN_GROUP(sdhi2_cd),
 	SH_PFC_PIN_GROUP(sdhi2_wp),
+	SH_PFC_PIN_GROUP(usb0),
+	SH_PFC_PIN_GROUP(usb1),
 };
 
 static const char * const eth_groups[] = {
@@ -3219,6 +3239,14 @@
 	"sdhi2_wp",
 };
 
+static const char * const usb0_groups[] = {
+	"usb0",
+};
+
+static const char * const usb1_groups[] = {
+	"usb1",
+};
+
 static const struct sh_pfc_function pinmux_functions[] = {
 	SH_PFC_FUNCTION(eth),
 	SH_PFC_FUNCTION(hscif0),
@@ -3253,6 +3281,8 @@
 	SH_PFC_FUNCTION(sdhi0),
 	SH_PFC_FUNCTION(sdhi1),
 	SH_PFC_FUNCTION(sdhi2),
+	SH_PFC_FUNCTION(usb0),
+	SH_PFC_FUNCTION(usb1),
 };
 
 static const struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index d2efbfb..0975265 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -26,10 +26,6 @@
 #include <linux/regulator/machine.h>
 #include <linux/slab.h>
 
-#ifndef CONFIG_ARCH_MULTIPLATFORM
-#include <mach/irqs.h>
-#endif
-
 #include "core.h"
 #include "sh_pfc.h"
 
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index ff67896..863c3e3 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -40,6 +40,10 @@
 
 	struct pinctrl_pin_desc *pins;
 	struct sh_pfc_pin_config *configs;
+
+	const char *func_prop_name;
+	const char *groups_prop_name;
+	const char *pins_prop_name;
 };
 
 static int sh_pfc_get_groups_count(struct pinctrl_dev *pctldev)
@@ -96,10 +100,13 @@
 	return 0;
 }
 
-static int sh_pfc_dt_subnode_to_map(struct device *dev, struct device_node *np,
+static int sh_pfc_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+				    struct device_node *np,
 				    struct pinctrl_map **map,
 				    unsigned int *num_maps, unsigned int *index)
 {
+	struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+	struct device *dev = pmx->pfc->dev;
 	struct pinctrl_map *maps = *map;
 	unsigned int nmaps = *num_maps;
 	unsigned int idx = *index;
@@ -113,10 +120,27 @@
 	const char *pin;
 	int ret;
 
+	/* Support both the old Renesas-specific properties and the new standard
+	 * properties. Mixing old and new properties isn't allowed, neither
+	 * inside a subnode nor across subnodes.
+	 */
+	if (!pmx->func_prop_name) {
+		if (of_find_property(np, "groups", NULL) ||
+		    of_find_property(np, "pins", NULL)) {
+			pmx->func_prop_name = "function";
+			pmx->groups_prop_name = "groups";
+			pmx->pins_prop_name = "pins";
+		} else {
+			pmx->func_prop_name = "renesas,function";
+			pmx->groups_prop_name = "renesas,groups";
+			pmx->pins_prop_name = "renesas,pins";
+		}
+	}
+
 	/* Parse the function and configuration properties. At least a function
 	 * or one configuration must be specified.
 	 */
-	ret = of_property_read_string(np, "renesas,function", &function);
+	ret = of_property_read_string(np, pmx->func_prop_name, &function);
 	if (ret < 0 && ret != -EINVAL) {
 		dev_err(dev, "Invalid function in DT\n");
 		return ret;
@@ -129,11 +153,12 @@
 	if (!function && num_configs == 0) {
 		dev_err(dev,
 			"DT node must contain at least a function or config\n");
+		ret = -ENODEV;
 		goto done;
 	}
 
 	/* Count the number of pins and groups and reallocate mappings. */
-	ret = of_property_count_strings(np, "renesas,pins");
+	ret = of_property_count_strings(np, pmx->pins_prop_name);
 	if (ret == -EINVAL) {
 		num_pins = 0;
 	} else if (ret < 0) {
@@ -143,7 +168,7 @@
 		num_pins = ret;
 	}
 
-	ret = of_property_count_strings(np, "renesas,groups");
+	ret = of_property_count_strings(np, pmx->groups_prop_name);
 	if (ret == -EINVAL) {
 		num_groups = 0;
 	} else if (ret < 0) {
@@ -174,7 +199,7 @@
 	*num_maps = nmaps;
 
 	/* Iterate over pins and groups and create the mappings. */
-	of_property_for_each_string(np, "renesas,groups", prop, group) {
+	of_property_for_each_string(np, pmx->groups_prop_name, prop, group) {
 		if (function) {
 			maps[idx].type = PIN_MAP_TYPE_MUX_GROUP;
 			maps[idx].data.mux.group = group;
@@ -198,7 +223,7 @@
 		goto done;
 	}
 
-	of_property_for_each_string(np, "renesas,pins", prop, pin) {
+	of_property_for_each_string(np, pmx->pins_prop_name, prop, pin) {
 		ret = sh_pfc_map_add_config(&maps[idx], pin,
 					    PIN_MAP_TYPE_CONFIGS_PIN,
 					    configs, num_configs);
@@ -246,7 +271,7 @@
 	index = 0;
 
 	for_each_child_of_node(np, child) {
-		ret = sh_pfc_dt_subnode_to_map(dev, child, map, num_maps,
+		ret = sh_pfc_dt_subnode_to_map(pctldev, child, map, num_maps,
 					       &index);
 		if (ret < 0)
 			goto done;
@@ -254,7 +279,8 @@
 
 	/* If no mapping has been found in child nodes try the config node. */
 	if (*num_maps == 0) {
-		ret = sh_pfc_dt_subnode_to_map(dev, np, map, num_maps, &index);
+		ret = sh_pfc_dt_subnode_to_map(pctldev, np, map, num_maps,
+					       &index);
 		if (ret < 0)
 			goto done;
 	}
@@ -465,6 +491,9 @@
 	case PIN_CONFIG_BIAS_PULL_DOWN:
 		return pin->configs & SH_PFC_PIN_CFG_PULL_DOWN;
 
+	case PIN_CONFIG_POWER_SOURCE:
+		return pin->configs & SH_PFC_PIN_CFG_IO_VOLTAGE;
+
 	default:
 		return false;
 	}
@@ -477,7 +506,6 @@
 	struct sh_pfc *pfc = pmx->pfc;
 	enum pin_config_param param = pinconf_to_config_param(*config);
 	unsigned long flags;
-	unsigned int bias;
 
 	if (!sh_pfc_pinconf_validate(pfc, _pin, param))
 		return -ENOTSUPP;
@@ -485,7 +513,9 @@
 	switch (param) {
 	case PIN_CONFIG_BIAS_DISABLE:
 	case PIN_CONFIG_BIAS_PULL_UP:
-	case PIN_CONFIG_BIAS_PULL_DOWN:
+	case PIN_CONFIG_BIAS_PULL_DOWN: {
+		unsigned int bias;
+
 		if (!pfc->info->ops || !pfc->info->ops->get_bias)
 			return -ENOTSUPP;
 
@@ -498,6 +528,24 @@
 
 		*config = 0;
 		break;
+	}
+
+	case PIN_CONFIG_POWER_SOURCE: {
+		int ret;
+
+		if (!pfc->info->ops || !pfc->info->ops->get_io_voltage)
+			return -ENOTSUPP;
+
+		spin_lock_irqsave(&pfc->lock, flags);
+		ret = pfc->info->ops->get_io_voltage(pfc, _pin);
+		spin_unlock_irqrestore(&pfc->lock, flags);
+
+		if (ret < 0)
+			return ret;
+
+		*config = ret;
+		break;
+	}
 
 	default:
 		return -ENOTSUPP;
@@ -534,6 +582,24 @@
 
 			break;
 
+		case PIN_CONFIG_POWER_SOURCE: {
+			unsigned int arg =
+				pinconf_to_config_argument(configs[i]);
+			int ret;
+
+			if (!pfc->info->ops || !pfc->info->ops->set_io_voltage)
+				return -ENOTSUPP;
+
+			spin_lock_irqsave(&pfc->lock, flags);
+			ret = pfc->info->ops->set_io_voltage(pfc, _pin, arg);
+			spin_unlock_irqrestore(&pfc->lock, flags);
+
+			if (ret)
+				return ret;
+
+			break;
+		}
+
 		default:
 			return -ENOTSUPP;
 		}
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 0874cfe..15afd49 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -12,6 +12,7 @@
 #define __SH_PFC_H
 
 #include <linux/bug.h>
+#include <linux/pinctrl/pinconf-generic.h>
 #include <linux/stringify.h>
 
 enum {
@@ -26,6 +27,7 @@
 #define SH_PFC_PIN_CFG_OUTPUT		(1 << 1)
 #define SH_PFC_PIN_CFG_PULL_UP		(1 << 2)
 #define SH_PFC_PIN_CFG_PULL_DOWN	(1 << 3)
+#define SH_PFC_PIN_CFG_IO_VOLTAGE	(1 << 4)
 #define SH_PFC_PIN_CFG_NO_GPIO		(1 << 31)
 
 struct sh_pfc_pin {
@@ -121,6 +123,9 @@
 	unsigned int (*get_bias)(struct sh_pfc *pfc, unsigned int pin);
 	void (*set_bias)(struct sh_pfc *pfc, unsigned int pin,
 			 unsigned int bias);
+	int (*get_io_voltage)(struct sh_pfc *pfc, unsigned int pin);
+	int (*set_io_voltage)(struct sh_pfc *pfc, unsigned int pin,
+			      u16 voltage_mV);
 };
 
 struct sh_pfc_soc_info {
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 9384e0a..9df0c5f 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -148,6 +148,19 @@
 #define DIV_DISABLE	0x1
 #define DIV_ENABLE	0x0
 
+/* Number of Function input disable registers */
+#define NUM_OF_IN_DISABLE_REG	0x2
+
+/* Offset of Function input disable registers */
+#define IN_DISABLE_0_REG_SET		0x0A00
+#define IN_DISABLE_0_REG_CLR		0x0A04
+#define IN_DISABLE_1_REG_SET		0x0A08
+#define IN_DISABLE_1_REG_CLR		0x0A0C
+#define IN_DISABLE_VAL_0_REG_SET	0x0A80
+#define IN_DISABLE_VAL_0_REG_CLR	0x0A84
+#define IN_DISABLE_VAL_1_REG_SET	0x0A88
+#define IN_DISABLE_VAL_1_REG_CLR	0x0A8C
+
 struct dt_params {
 	const char *property;
 	int value;
@@ -197,6 +210,16 @@
 	}
 
 /**
+ * struct atlas7_pad_status - Atlas7 Pad status
+ */
+struct atlas7_pad_status {
+	u8 func;
+	u8 pull;
+	u8 dstr;
+	u8 reserved;
+};
+
+/**
  * struct atlas7_pad_mux - Atlas7 mux
  * @bank:		The bank of this pad's registers on.
  * @pin	:		The ID of this Pad.
@@ -285,6 +308,9 @@
 /* Platform info of atlas7 pinctrl */
 #define ATLAS7_PINCTRL_REG_BANKS	2
 #define ATLAS7_PINCTRL_BANK_0_PINS	18
+#define ATLAS7_PINCTRL_BANK_1_PINS	141
+#define ATLAS7_PINCTRL_TOTAL_PINS	\
+	(ATLAS7_PINCTRL_BANK_0_PINS + ATLAS7_PINCTRL_BANK_1_PINS)
 
 /**
  * Atlas7 GPIO Chip
@@ -316,6 +342,7 @@
 	unsigned int gpio_offset;
 	unsigned int ngpio;
 	const unsigned int *gpio_pins;
+	u32 sleep_data[NGPIO_OF_BANK];
 };
 
 struct atlas7_gpio_chip {
@@ -343,6 +370,9 @@
 	struct pinctrl_desc pctl_desc;
 	struct atlas7_pinctrl_data *pctl_data;
 	void __iomem *regs[ATLAS7_PINCTRL_REG_BANKS];
+	u32 status_ds[NUM_OF_IN_DISABLE_REG];
+	u32 status_dsv[NUM_OF_IN_DISABLE_REG];
+	struct atlas7_pad_status sleep_data[ATLAS7_PINCTRL_TOTAL_PINS];
 };
 
 /*
@@ -3480,6 +3510,160 @@
 	.confs_cnt = ARRAY_SIZE(atlas7_ioc_pad_confs),
 };
 
+/* Simple map data structure */
+struct map_data {
+	u8 idx;
+	u8 data;
+};
+
+/**
+ * struct atlas7_pull_info - Atlas7 Pad pull info
+ * @type:The type of this Pad.
+ * @mask:The mas value of this pin's pull bits.
+ * @v2s: The map of pull register value to pull status.
+ * @s2v: The map of pull status to pull register value.
+ */
+struct atlas7_pull_info {
+	u8 pad_type;
+	u8 mask;
+	const struct map_data *v2s;
+	const struct map_data *s2v;
+};
+
+/* Pull Register value map to status */
+static const struct map_data p4we_pull_v2s[] = {
+	{ P4WE_PULL_UP, PULL_UP },
+	{ P4WE_HIGH_HYSTERESIS, HIGH_HYSTERESIS },
+	{ P4WE_HIGH_Z, HIGH_Z },
+	{ P4WE_PULL_DOWN, PULL_DOWN },
+};
+
+static const struct map_data p16st_pull_v2s[] = {
+	{ P16ST_PULL_UP, PULL_UP },
+	{ PD, PULL_UNKNOWN },
+	{ P16ST_HIGH_Z, HIGH_Z },
+	{ P16ST_PULL_DOWN, PULL_DOWN },
+};
+
+static const struct map_data pm31_pull_v2s[] = {
+	{ PM31_PULL_DISABLED, PULL_DOWN },
+	{ PM31_PULL_ENABLED, PULL_UP },
+};
+
+static const struct map_data pangd_pull_v2s[] = {
+	{ PANGD_PULL_UP, PULL_UP },
+	{ PD, PULL_UNKNOWN },
+	{ PANGD_HIGH_Z, HIGH_Z },
+	{ PANGD_PULL_DOWN, PULL_DOWN },
+};
+
+/* Pull status map to register value */
+static const struct map_data p4we_pull_s2v[] = {
+	{ PULL_UP, P4WE_PULL_UP },
+	{ HIGH_HYSTERESIS, P4WE_HIGH_HYSTERESIS },
+	{ HIGH_Z, P4WE_HIGH_Z },
+	{ PULL_DOWN, P4WE_PULL_DOWN },
+	{ PULL_DISABLE, -1 },
+	{ PULL_ENABLE, -1 },
+};
+
+static const struct map_data p16st_pull_s2v[] = {
+	{ PULL_UP, P16ST_PULL_UP },
+	{ HIGH_HYSTERESIS, -1 },
+	{ HIGH_Z, P16ST_HIGH_Z },
+	{ PULL_DOWN, P16ST_PULL_DOWN },
+	{ PULL_DISABLE, -1 },
+	{ PULL_ENABLE, -1 },
+};
+
+static const struct map_data pm31_pull_s2v[] = {
+	{ PULL_UP, PM31_PULL_ENABLED },
+	{ HIGH_HYSTERESIS, -1 },
+	{ HIGH_Z, -1 },
+	{ PULL_DOWN, PM31_PULL_DISABLED },
+	{ PULL_DISABLE, -1 },
+	{ PULL_ENABLE, -1 },
+};
+
+static const struct map_data pangd_pull_s2v[] = {
+	{ PULL_UP, PANGD_PULL_UP },
+	{ HIGH_HYSTERESIS, -1 },
+	{ HIGH_Z, PANGD_HIGH_Z },
+	{ PULL_DOWN, PANGD_PULL_DOWN },
+	{ PULL_DISABLE, -1 },
+	{ PULL_ENABLE, -1 },
+};
+
+static const struct atlas7_pull_info atlas7_pull_map[] = {
+	{ PAD_T_4WE_PD, P4WE_PULL_MASK, p4we_pull_v2s, p4we_pull_s2v },
+	{ PAD_T_4WE_PU, P4WE_PULL_MASK, p4we_pull_v2s, p4we_pull_s2v },
+	{ PAD_T_16ST, P16ST_PULL_MASK, p16st_pull_v2s, p16st_pull_s2v },
+	{ PAD_T_M31_0204_PD, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+	{ PAD_T_M31_0204_PU, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+	{ PAD_T_M31_0610_PD, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+	{ PAD_T_M31_0610_PU, PM31_PULL_MASK, pm31_pull_v2s, pm31_pull_s2v },
+	{ PAD_T_AD, PANGD_PULL_MASK, pangd_pull_v2s, pangd_pull_s2v },
+};
+
+/**
+ * struct atlas7_ds_ma_info - Atlas7 Pad DriveStrength & currents info
+ * @ma:		The Drive Strength in current value .
+ * @ds_16st:	The correspond raw value of 16st pad.
+ * @ds_4we:	The correspond raw value of 4we pad.
+ * @ds_0204m31:	The correspond raw value of 0204m31 pad.
+ * @ds_0610m31:	The correspond raw value of 0610m31 pad.
+ */
+struct atlas7_ds_ma_info {
+	u32 ma;
+	u32 ds_16st;
+	u32 ds_4we;
+	u32 ds_0204m31;
+	u32 ds_0610m31;
+};
+
+static const struct atlas7_ds_ma_info atlas7_ma2ds_map[] = {
+	{ 2, DS_16ST_0, DS_4WE_0, DS_M31_0, DS_NULL },
+	{ 4, DS_16ST_1, DS_NULL, DS_M31_1, DS_NULL },
+	{ 6, DS_16ST_2, DS_NULL, DS_NULL, DS_M31_0 },
+	{ 8, DS_16ST_3, DS_4WE_1, DS_NULL, DS_NULL },
+	{ 10, DS_16ST_4, DS_NULL, DS_NULL, DS_M31_1 },
+	{ 12, DS_16ST_5, DS_NULL, DS_NULL, DS_NULL },
+	{ 14, DS_16ST_6, DS_NULL, DS_NULL, DS_NULL },
+	{ 16, DS_16ST_7, DS_4WE_2, DS_NULL, DS_NULL },
+	{ 18, DS_16ST_8, DS_NULL, DS_NULL, DS_NULL },
+	{ 20, DS_16ST_9, DS_NULL, DS_NULL, DS_NULL },
+	{ 22, DS_16ST_10, DS_NULL, DS_NULL, DS_NULL },
+	{ 24, DS_16ST_11, DS_NULL, DS_NULL, DS_NULL },
+	{ 26, DS_16ST_12, DS_NULL, DS_NULL, DS_NULL },
+	{ 28, DS_16ST_13, DS_4WE_3, DS_NULL, DS_NULL },
+	{ 30, DS_16ST_14, DS_NULL, DS_NULL, DS_NULL },
+	{ 32, DS_16ST_15, DS_NULL, DS_NULL, DS_NULL },
+};
+
+/**
+ * struct atlas7_ds_info - Atlas7 Pad DriveStrength info
+ * @type:		The type of this Pad.
+ * @mask:		The mask value of this pin's pull bits.
+ * @imval:		The immediate value of drives trength register.
+ */
+struct atlas7_ds_info {
+	u8 type;
+	u8 mask;
+	u8 imval;
+	u8 reserved;
+};
+
+static const struct atlas7_ds_info atlas7_ds_map[] = {
+	{ PAD_T_4WE_PD, DS_2BIT_MASK, DS_2BIT_IM_VAL },
+	{ PAD_T_4WE_PU, DS_2BIT_MASK, DS_2BIT_IM_VAL },
+	{ PAD_T_16ST, DS_4BIT_MASK, DS_4BIT_IM_VAL },
+	{ PAD_T_M31_0204_PD, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+	{ PAD_T_M31_0204_PU, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+	{ PAD_T_M31_0610_PD, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+	{ PAD_T_M31_0610_PU, DS_1BIT_MASK, DS_1BIT_IM_VAL },
+	{ PAD_T_AD, DS_NULL, DS_NULL },
+};
+
 static inline u32 atlas7_pin_to_bank(u32 pin)
 {
 	return (pin >= ATLAS7_PINCTRL_BANK_0_PINS) ? 1 : 0;
@@ -3682,49 +3866,22 @@
 	return 0;
 }
 
-struct atlas7_ds_info {
-	u32 ma;
-	u32 ds_16st;
-	u32 ds_4we;
-	u32 ds_0204m31;
-	u32 ds_0610m31;
-};
-
-const struct atlas7_ds_info atlas7_ds_map[] = {
-	{ 2, DS_16ST_0, DS_4WE_0, DS_M31_0, DS_NULL},
-	{ 4, DS_16ST_1, DS_NULL, DS_M31_1, DS_NULL},
-	{ 6, DS_16ST_2, DS_NULL, DS_NULL, DS_M31_0},
-	{ 8, DS_16ST_3, DS_4WE_1, DS_NULL, DS_NULL},
-	{ 10, DS_16ST_4, DS_NULL, DS_NULL, DS_M31_1},
-	{ 12, DS_16ST_5, DS_NULL, DS_NULL, DS_NULL},
-	{ 14, DS_16ST_6, DS_NULL, DS_NULL, DS_NULL},
-	{ 16, DS_16ST_7, DS_4WE_2, DS_NULL, DS_NULL},
-	{ 18, DS_16ST_8, DS_NULL, DS_NULL, DS_NULL},
-	{ 20, DS_16ST_9, DS_NULL, DS_NULL, DS_NULL},
-	{ 22, DS_16ST_10, DS_NULL, DS_NULL, DS_NULL},
-	{ 24, DS_16ST_11, DS_NULL, DS_NULL, DS_NULL},
-	{ 26, DS_16ST_12, DS_NULL, DS_NULL, DS_NULL},
-	{ 28, DS_16ST_13, DS_4WE_3, DS_NULL, DS_NULL},
-	{ 30, DS_16ST_14, DS_NULL, DS_NULL, DS_NULL},
-	{ 32, DS_16ST_15, DS_NULL, DS_NULL, DS_NULL},
-};
-
 static u32 convert_current_to_drive_strength(u32 type, u32 ma)
 {
 	int idx;
 
-	for (idx = 0; idx < ARRAY_SIZE(atlas7_ds_map); idx++) {
-		if (atlas7_ds_map[idx].ma != ma)
+	for (idx = 0; idx < ARRAY_SIZE(atlas7_ma2ds_map); idx++) {
+		if (atlas7_ma2ds_map[idx].ma != ma)
 			continue;
 
 		if (type == PAD_T_4WE_PD || type == PAD_T_4WE_PU)
-			return atlas7_ds_map[idx].ds_4we;
+			return atlas7_ma2ds_map[idx].ds_4we;
 		else if (type == PAD_T_16ST)
-			return atlas7_ds_map[idx].ds_16st;
+			return atlas7_ma2ds_map[idx].ds_16st;
 		else if (type == PAD_T_M31_0204_PD || type == PAD_T_M31_0204_PU)
-			return atlas7_ds_map[idx].ds_0204m31;
+			return atlas7_ma2ds_map[idx].ds_0204m31;
 		else if (type == PAD_T_M31_0610_PD || type == PAD_T_M31_0610_PU)
-			return atlas7_ds_map[idx].ds_0610m31;
+			return atlas7_ma2ds_map[idx].ds_0610m31;
 	}
 
 	return DS_NULL;
@@ -3735,78 +3892,21 @@
 {
 	struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
 	struct atlas7_pad_config *conf = &pmx->pctl_data->confs[pin];
-	u32 type = conf->type;
-	u32 shift = conf->pupd_bit;
-	u32 bank = atlas7_pin_to_bank(pin);
-	void __iomem *pull_sel_reg, *pull_clr_reg;
+	const struct atlas7_pull_info *pull_info;
+	u32 bank;
+	unsigned long regv;
+	void __iomem *pull_sel_reg;
 
+	bank = atlas7_pin_to_bank(pin);
+	pull_info = &atlas7_pull_map[conf->type];
 	pull_sel_reg = pmx->regs[bank] + conf->pupd_reg;
-	pull_clr_reg = CLR_REG(pull_sel_reg);
 
-	if (type == PAD_T_4WE_PD || type == PAD_T_4WE_PU) {
-		writel(P4WE_PULL_MASK << shift, pull_clr_reg);
+	/* Retrieve correspond register value from table by sel */
+	regv = pull_info->s2v[sel].data & pull_info->mask;
 
-		if (sel == PULL_UP)
-			writel(P4WE_PULL_UP << shift, pull_sel_reg);
-		else if (sel == HIGH_HYSTERESIS)
-			writel(P4WE_HIGH_HYSTERESIS << shift, pull_sel_reg);
-		else if (sel == HIGH_Z)
-			writel(P4WE_HIGH_Z << shift, pull_sel_reg);
-		else if (sel == PULL_DOWN)
-			writel(P4WE_PULL_DOWN << shift, pull_sel_reg);
-		else {
-			pr_err("Unknown Pull select type for 4WEPAD#%d\n",
-				pin);
-			return -ENOTSUPP;
-		}
-	} else if (type == PAD_T_16ST) {
-		writel(P16ST_PULL_MASK << shift, pull_clr_reg);
-
-		if (sel == PULL_UP)
-			writel(P16ST_PULL_UP << shift, pull_sel_reg);
-		else if (sel == HIGH_Z)
-			writel(P16ST_HIGH_Z << shift, pull_sel_reg);
-		else if (sel == PULL_DOWN)
-			writel(P16ST_PULL_DOWN << shift, pull_sel_reg);
-		else {
-			pr_err("Unknown Pull select type for 16STPAD#%d\n",
-				pin);
-			return -ENOTSUPP;
-		}
-	} else if (type == PAD_T_M31_0204_PD ||
-		type == PAD_T_M31_0204_PU ||
-		type == PAD_T_M31_0610_PD ||
-		type == PAD_T_M31_0610_PU) {
-		writel(PM31_PULL_MASK << shift, pull_clr_reg);
-
-		if (sel == PULL_UP)
-			writel(PM31_PULL_ENABLED << shift, pull_sel_reg);
-		else if (sel == PULL_DOWN)
-			writel(PM31_PULL_DISABLED << shift, pull_sel_reg);
-		else {
-			pr_err("Unknown Pull select type for M31PAD#%d\n",
-				pin);
-			return -ENOTSUPP;
-		}
-	} else if (type == PAD_T_AD) {
-		writel(PANGD_PULL_MASK << shift, pull_clr_reg);
-
-		if (sel == PULL_UP)
-			writel(PANGD_PULL_UP << shift, pull_sel_reg);
-		else if (sel == HIGH_Z)
-			writel(PANGD_HIGH_Z << shift, pull_sel_reg);
-		else if (sel == PULL_DOWN)
-			writel(PANGD_PULL_DOWN << shift, pull_sel_reg);
-		else {
-			pr_err("Unknown Pull select type for A/D PAD#%d\n",
-				pin);
-			return -ENOTSUPP;
-		}
-	} else {
-			pr_err("Unknown Pad type[%d] for pull select PAD#%d\n",
-				type, pin);
-			return -ENOTSUPP;
-	}
+	/* Clear & Set new value to pull register */
+	writel(pull_info->mask << conf->pupd_bit, CLR_REG(pull_sel_reg));
+	writel(regv << conf->pupd_bit, pull_sel_reg);
 
 	pr_debug("PIN_CFG ### SET PIN#%d PULL SELECTOR:%d == OK ####\n",
 		pin, sel);
@@ -3818,43 +3918,25 @@
 {
 	struct atlas7_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
 	struct atlas7_pad_config *conf = &pmx->pctl_data->confs[pin];
-	u32 type = conf->type;
-	u32 shift = conf->drvstr_bit;
-	u32 bank = atlas7_pin_to_bank(pin);
-	void __iomem *ds_sel_reg, *ds_clr_reg;
+	const struct atlas7_ds_info *ds_info;
+	u32 bank;
+	void __iomem *ds_sel_reg;
 
+	ds_info = &atlas7_ds_map[conf->type];
+	if (sel & (~(ds_info->mask)))
+		goto unsupport;
+
+	bank = atlas7_pin_to_bank(pin);
 	ds_sel_reg = pmx->regs[bank] + conf->drvstr_reg;
-	ds_clr_reg = CLR_REG(ds_sel_reg);
-	if (type == PAD_T_4WE_PD || type == PAD_T_4WE_PU) {
-		if (sel & (~DS_2BIT_MASK))
-			goto unsupport;
 
-		writel(DS_2BIT_IM_VAL << shift, ds_clr_reg);
-		writel(sel << shift, ds_sel_reg);
+	writel(ds_info->imval << conf->drvstr_bit, CLR_REG(ds_sel_reg));
+	writel(sel << conf->drvstr_bit, ds_sel_reg);
 
-		return 0;
-	} else if (type == PAD_T_16ST) {
-		if (sel & (~DS_4BIT_MASK))
-			goto unsupport;
-
-		writel(DS_4BIT_IM_VAL << shift, ds_clr_reg);
-		writel(sel << shift, ds_sel_reg);
-
-		return 0;
-	} else if (type == PAD_T_M31_0204_PD ||	type == PAD_T_M31_0204_PU ||
-		type == PAD_T_M31_0610_PD || type == PAD_T_M31_0610_PU) {
-		if (sel & (~DS_1BIT_MASK))
-			goto unsupport;
-
-		writel(DS_1BIT_IM_VAL << shift, ds_clr_reg);
-		writel(sel << shift, ds_sel_reg);
-
-		return 0;
-	}
+	return 0;
 
 unsupport:
 	pr_err("Pad#%d type[%d] doesn't support ds code[%d]!\n",
-		pin, type, sel);
+		pin, conf->type, sel);
 	return -ENOTSUPP;
 }
 
@@ -4101,14 +4183,135 @@
 	return ret;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int atlas7_pinmux_suspend_noirq(struct device *dev)
+{
+	struct atlas7_pmx *pmx = dev_get_drvdata(dev);
+	struct atlas7_pad_status *status;
+	struct atlas7_pad_config *conf;
+	const struct atlas7_ds_info *ds_info;
+	const struct atlas7_pull_info *pull_info;
+	int idx;
+	u32 bank;
+	unsigned long regv;
+
+	for (idx = 0; idx < pmx->pctl_desc.npins; idx++) {
+		/* Get this Pad's descriptor from PINCTRL */
+		conf = &pmx->pctl_data->confs[idx];
+		bank = atlas7_pin_to_bank(idx);
+		status = &pmx->sleep_data[idx];
+
+		/* Save Function selector */
+		regv = readl(pmx->regs[bank] + conf->mux_reg);
+		status->func = (regv >> conf->mux_bit) & FUNC_CLEAR_MASK;
+
+		/* Check if Pad is in Analogue selector */
+		if (conf->ad_ctrl_reg == -1)
+			goto save_ds_sel;
+
+		regv = readl(pmx->regs[bank] + conf->ad_ctrl_reg);
+		if (!(regv & (conf->ad_ctrl_bit << ANA_CLEAR_MASK)))
+			status->func = FUNC_ANALOGUE;
+
+save_ds_sel:
+		if (conf->drvstr_reg == -1)
+			goto save_pull_sel;
+
+		/* Save Drive Strength selector */
+		ds_info = &atlas7_ds_map[conf->type];
+		regv = readl(pmx->regs[bank] + conf->drvstr_reg);
+		status->dstr = (regv >> conf->drvstr_bit) & ds_info->mask;
+
+save_pull_sel:
+		/* Save Pull selector */
+		pull_info = &atlas7_pull_map[conf->type];
+		regv = readl(pmx->regs[bank] + conf->pupd_reg);
+		regv = (regv >> conf->pupd_bit) & pull_info->mask;
+		status->pull = pull_info->v2s[regv].data;
+	}
+
+	/*
+	 * Save disable input selector, this selector is not for Pin,
+	 * but for Mux function.
+	 */
+	for (idx = 0; idx < NUM_OF_IN_DISABLE_REG; idx++) {
+		pmx->status_ds[idx] = readl(pmx->regs[BANK_DS] +
+					IN_DISABLE_0_REG_SET + 0x8 * idx);
+		pmx->status_dsv[idx] = readl(pmx->regs[BANK_DS] +
+					IN_DISABLE_VAL_0_REG_SET + 0x8 * idx);
+	}
+
+	return 0;
+}
+
+static int atlas7_pinmux_resume_noirq(struct device *dev)
+{
+	struct atlas7_pmx *pmx = dev_get_drvdata(dev);
+	struct atlas7_pad_status *status;
+	struct atlas7_pad_config *conf;
+	int idx;
+	u32 bank;
+
+	for (idx = 0; idx < pmx->pctl_desc.npins; idx++) {
+		/* Get this Pad's descriptor from PINCTRL */
+		conf = &pmx->pctl_data->confs[idx];
+		bank = atlas7_pin_to_bank(idx);
+		status = &pmx->sleep_data[idx];
+
+		/* Restore Function selector */
+		__atlas7_pmx_pin_enable(pmx, idx, (u32)status->func & 0xff);
+
+		if (FUNC_ANALOGUE == status->func)
+			goto restore_pull_sel;
+
+		/* Restore Drive Strength selector */
+		__altas7_pinctrl_set_drive_strength_sel(pmx->pctl, idx,
+						(u32)status->dstr & 0xff);
+
+restore_pull_sel:
+		/* Restore Pull selector */
+		altas7_pinctrl_set_pull_sel(pmx->pctl, idx,
+						(u32)status->pull & 0xff);
+	}
+
+	/*
+	 * Restore disable input selector, this selector is not for Pin,
+	 * but for Mux function
+	 */
+	for (idx = 0; idx < NUM_OF_IN_DISABLE_REG; idx++) {
+		writel(~0, pmx->regs[BANK_DS] +
+					IN_DISABLE_0_REG_CLR + 0x8 * idx);
+		writel(pmx->status_ds[idx], pmx->regs[BANK_DS] +
+					IN_DISABLE_0_REG_SET + 0x8 * idx);
+		writel(~0, pmx->regs[BANK_DS] +
+					IN_DISABLE_VAL_0_REG_CLR + 0x8 * idx);
+		writel(pmx->status_dsv[idx], pmx->regs[BANK_DS] +
+					IN_DISABLE_VAL_0_REG_SET + 0x8 * idx);
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops atlas7_pinmux_pm_ops = {
+	.suspend_noirq = atlas7_pinmux_suspend_noirq,
+	.resume_noirq = atlas7_pinmux_resume_noirq,
+	.freeze_noirq = atlas7_pinmux_suspend_noirq,
+	.restore_noirq = atlas7_pinmux_resume_noirq,
+};
+#endif
+
 static const struct of_device_id atlas7_pinmux_ids[] = {
 	{ .compatible = "sirf,atlas7-ioc",},
+	{},
 };
 
 static struct platform_driver atlas7_pinmux_driver = {
 	.driver = {
 		.name = "atlas7-ioc",
 		.of_match_table = atlas7_pinmux_ids,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &atlas7_pinmux_pm_ops,
+#endif
 	},
 	.probe = atlas7_pinmux_probe,
 };
@@ -4286,14 +4489,15 @@
 	.irq_set_type = atlas7_gpio_irq_type,
 };
 
-static void atlas7_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
+static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
 {
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct atlas7_gpio_chip *a7gc = to_atlas7_gpio(gc);
 	struct atlas7_gpio_bank *bank = NULL;
 	u32 status, ctrl;
 	int pin_in_bank = 0, idx;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned int irq = irq_desc_get_irq(desc);
 
 	for (idx = 0; idx < a7gc->nbank; idx++) {
 		bank = &a7gc->banks[idx];
@@ -4496,6 +4700,7 @@
 
 static const struct of_device_id atlas7_gpio_ids[] = {
 	{ .compatible = "sirf,atlas7-gpio", },
+	{},
 };
 
 static int atlas7_gpio_probe(struct platform_device *pdev)
@@ -4612,17 +4817,65 @@
 		BUG_ON(!bank->pctldev);
 	}
 
+	platform_set_drvdata(pdev, a7gc);
 	dev_info(&pdev->dev, "add to system.\n");
 	return 0;
 failed:
 	return ret;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int atlas7_gpio_suspend_noirq(struct device *dev)
+{
+	struct atlas7_gpio_chip *a7gc = dev_get_drvdata(dev);
+	struct atlas7_gpio_bank *bank;
+	void __iomem *ctrl_reg;
+	u32 idx, pin;
+
+	for (idx = 0; idx < a7gc->nbank; idx++) {
+		bank = &a7gc->banks[idx];
+		for (pin = 0; pin < bank->ngpio; pin++) {
+			ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin);
+			bank->sleep_data[pin] = readl(ctrl_reg);
+		}
+	}
+
+	return 0;
+}
+
+static int atlas7_gpio_resume_noirq(struct device *dev)
+{
+	struct atlas7_gpio_chip *a7gc = dev_get_drvdata(dev);
+	struct atlas7_gpio_bank *bank;
+	void __iomem *ctrl_reg;
+	u32 idx, pin;
+
+	for (idx = 0; idx < a7gc->nbank; idx++) {
+		bank = &a7gc->banks[idx];
+		for (pin = 0; pin < bank->ngpio; pin++) {
+			ctrl_reg = ATLAS7_GPIO_CTRL(bank, pin);
+			writel(bank->sleep_data[pin], ctrl_reg);
+		}
+	}
+
+	return 0;
+}
+
+static const struct dev_pm_ops atlas7_gpio_pm_ops = {
+	.suspend_noirq = atlas7_gpio_suspend_noirq,
+	.resume_noirq = atlas7_gpio_resume_noirq,
+	.freeze_noirq = atlas7_gpio_suspend_noirq,
+	.restore_noirq = atlas7_gpio_resume_noirq,
+};
+#endif
+
 static struct platform_driver atlas7_gpio_driver = {
 	.driver = {
 		.name = "atlas7-gpio",
-		.owner = THIS_MODULE,
 		.of_match_table = atlas7_gpio_ids,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &atlas7_gpio_pm_ops,
+#endif
 	},
 	.probe = atlas7_gpio_probe,
 };
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 8ba26e4..f8bd9fb 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -545,14 +545,15 @@
 	.irq_set_type = sirfsoc_gpio_irq_type,
 };
 
-static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
+static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct gpio_chip *gc = irq_desc_get_handler_data(desc);
 	struct sirfsoc_gpio_chip *sgpio = to_sirfsoc_gpio(gc);
 	struct sirfsoc_gpio_bank *bank;
 	u32 status, ctrl;
 	int idx = 0;
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	int i;
 
 	for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 7376a97..862a096 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -135,7 +135,14 @@
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
-		  SUNXI_FUNCTION(0x2, "ir0")),		/* TX */
+		  SUNXI_FUNCTION(0x2, "ir0"),		/* TX */
+		/*
+		 * The SPDIF block is not referenced at all in the A10 user
+		 * manual. However it is described in the code leaked and the
+		 * pin descriptions are declared in the A20 user manual which
+		 * is pin compatible with this device.
+		 */
+		  SUNXI_FUNCTION(0x4, "spdif")),        /* SPDIF MCLK */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
@@ -176,11 +183,15 @@
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
 		  SUNXI_FUNCTION(0x2, "i2s"),		/* DI */
-		  SUNXI_FUNCTION(0x3, "ac97")),		/* DI */
+		  SUNXI_FUNCTION(0x3, "ac97"),		/* DI */
+		/* Undocumented mux function - See SPDIF MCLK above */
+		  SUNXI_FUNCTION(0x4, "spdif")),        /* SPDIF IN */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 13),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
-		  SUNXI_FUNCTION(0x2, "spi2")),		/* CS1 */
+		  SUNXI_FUNCTION(0x2, "spi2"),		/* CS1 */
+		/* Undocumented mux function - See SPDIF MCLK above */
+		  SUNXI_FUNCTION(0x4, "spdif")),        /* SPDIF OUT */
 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 14),
 		  SUNXI_FUNCTION(0x0, "gpio_in"),
 		  SUNXI_FUNCTION(0x1, "gpio_out"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index f09573e..fb4669c0 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -588,7 +588,6 @@
 static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
 {
 	struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
-	struct irq_desc *desc = container_of(d, struct irq_desc, irq_data);
 	u32 reg = sunxi_irq_cfg_reg(d->hwirq);
 	u8 index = sunxi_irq_cfg_offset(d->hwirq);
 	unsigned long flags;
@@ -615,16 +614,17 @@
 		return -EINVAL;
 	}
 
-	if (type & IRQ_TYPE_LEVEL_MASK) {
-		d->chip = &sunxi_pinctrl_level_irq_chip;
-		desc->handle_irq = handle_fasteoi_irq;
-	} else {
-		d->chip = &sunxi_pinctrl_edge_irq_chip;
-		desc->handle_irq = handle_edge_irq;
-	}
-
 	spin_lock_irqsave(&pctl->lock, flags);
 
+	if (type & IRQ_TYPE_LEVEL_MASK)
+		__irq_set_chip_handler_name_locked(d->irq,
+						   &sunxi_pinctrl_level_irq_chip,
+						   handle_fasteoi_irq, NULL);
+	else
+		__irq_set_chip_handler_name_locked(d->irq,
+						   &sunxi_pinctrl_edge_irq_chip,
+						   handle_edge_irq, NULL);
+
 	regval = readl(pctl->membase + reg);
 	regval &= ~(IRQ_CFG_IRQ_MASK << index);
 	writel(regval | (mode << index), pctl->membase + reg);
@@ -685,6 +685,7 @@
 }
 
 static struct irq_chip sunxi_pinctrl_edge_irq_chip = {
+	.name		= "sunxi_pio_edge",
 	.irq_ack	= sunxi_pinctrl_irq_ack,
 	.irq_mask	= sunxi_pinctrl_irq_mask,
 	.irq_unmask	= sunxi_pinctrl_irq_unmask,
@@ -695,6 +696,7 @@
 };
 
 static struct irq_chip sunxi_pinctrl_level_irq_chip = {
+	.name		= "sunxi_pio_level",
 	.irq_eoi	= sunxi_pinctrl_irq_ack,
 	.irq_mask	= sunxi_pinctrl_irq_mask,
 	.irq_unmask	= sunxi_pinctrl_irq_unmask,
@@ -709,10 +711,42 @@
 			  IRQCHIP_EOI_IF_HANDLED,
 };
 
-static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
+static int sunxi_pinctrl_irq_of_xlate(struct irq_domain *d,
+				      struct device_node *node,
+				      const u32 *intspec,
+				      unsigned int intsize,
+				      unsigned long *out_hwirq,
+				      unsigned int *out_type)
 {
-	struct irq_chip *chip = irq_get_chip(irq);
-	struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
+	struct sunxi_desc_function *desc;
+	int pin, base;
+
+	if (intsize < 3)
+		return -EINVAL;
+
+	base = PINS_PER_BANK * intspec[0];
+	pin = base + intspec[1];
+
+	desc = sunxi_pinctrl_desc_find_function_by_pin(d->host_data,
+						       pin, "irq");
+	if (!desc)
+		return -EINVAL;
+
+	*out_hwirq = desc->irqbank * PINS_PER_BANK + desc->irqnum;
+	*out_type = intspec[2];
+
+	return 0;
+}
+
+static struct irq_domain_ops sunxi_pinctrl_irq_domain_ops = {
+	.xlate		= sunxi_pinctrl_irq_of_xlate,
+};
+
+static void sunxi_pinctrl_irq_handler(unsigned __irq, struct irq_desc *desc)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+	struct sunxi_pinctrl *pctl = irq_desc_get_handler_data(desc);
 	unsigned long bank, reg, val;
 
 	for (bank = 0; bank < pctl->desc->irq_banks; bank++)
@@ -983,8 +1017,8 @@
 
 	pctl->domain = irq_domain_add_linear(node,
 					     pctl->desc->irq_banks * IRQ_PER_BANK,
-					     &irq_domain_simple_ops,
-					     NULL);
+					     &sunxi_pinctrl_irq_domain_ops,
+					     pctl);
 	if (!pctl->domain) {
 		dev_err(&pdev->dev, "Couldn't register IRQ domain\n");
 		ret = -ENOMEM;
diff --git a/drivers/pinctrl/uniphier/Kconfig b/drivers/pinctrl/uniphier/Kconfig
new file mode 100644
index 0000000..eab23ef
--- /dev/null
+++ b/drivers/pinctrl/uniphier/Kconfig
@@ -0,0 +1,32 @@
+if ARCH_UNIPHIER
+
+config PINCTRL_UNIPHIER_CORE
+	bool
+	select PINMUX
+	select GENERIC_PINCONF
+
+config PINCTRL_UNIPHIER_PH1_LD4
+	tristate "UniPhier PH1-LD4 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_PRO4
+	tristate "UniPhier PH1-Pro4 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_SLD8
+	tristate "UniPhier PH1-sLD8 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_PRO5
+	tristate "UniPhier PH1-Pro5 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PROXSTREAM2
+	tristate "UniPhier ProXstream2 SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+config PINCTRL_UNIPHIER_PH1_LD6B
+	tristate "UniPhier PH1-LD6b SoC pinctrl driver"
+	select PINCTRL_UNIPHIER_CORE
+
+endif
diff --git a/drivers/pinctrl/uniphier/Makefile b/drivers/pinctrl/uniphier/Makefile
new file mode 100644
index 0000000..e215b10
--- /dev/null
+++ b/drivers/pinctrl/uniphier/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_PINCTRL_UNIPHIER_CORE)		+= pinctrl-uniphier-core.o
+
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD4)		+= pinctrl-ph1-ld4.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO4)		+= pinctrl-ph1-pro4.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_SLD8)		+= pinctrl-ph1-sld8.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_PRO5)		+= pinctrl-ph1-pro5.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PROXSTREAM2)	+= pinctrl-proxstream2.o
+obj-$(CONFIG_PINCTRL_UNIPHIER_PH1_LD6B)		+= pinctrl-ph1-ld6b.o
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c b/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
new file mode 100644
index 0000000..7beb87e
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-ld4.c
@@ -0,0 +1,886 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-ld4-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_ld4_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "EA1", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "EA2", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "EA3", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "EA4", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "EA5", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "EA6", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "EA7", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "EA8", 0,
+			     15, UNIPHIER_PIN_DRV_4_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "EA9", 0,
+			     16, UNIPHIER_PIN_DRV_4_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "EA10", 0,
+			     17, UNIPHIER_PIN_DRV_4_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "EA11", 0,
+			     18, UNIPHIER_PIN_DRV_4_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "EA12", 0,
+			     19, UNIPHIER_PIN_DRV_4_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "EA13", 0,
+			     20, UNIPHIER_PIN_DRV_4_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(13, "EA14", 0,
+			     21, UNIPHIER_PIN_DRV_4_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "EA15", 0,
+			     22, UNIPHIER_PIN_DRV_4_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "ECLK", UNIPHIER_PIN_IECTRL_NONE,
+			     23, UNIPHIER_PIN_DRV_4_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(16, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_4_8,
+			     24, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(17, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     25, UNIPHIER_PIN_DRV_4_8,
+			     25, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(18, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     27, UNIPHIER_PIN_DRV_4_8,
+			     27, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(19, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_4_8,
+			     28, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(20, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     29, UNIPHIER_PIN_DRV_4_8,
+			     29, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(21, "XERST", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(22, "MMCCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     146, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(23, "MMCCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     147, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(24, "MMCDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     148, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(25, "MMCDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     149, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(26, "MMCDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_8_12_16_20,
+			     150, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(27, "MMCDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_8_12_16_20,
+			     151, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(28, "MMCDAT4", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_8_12_16_20,
+			     152, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(29, "MMCDAT5", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_8_12_16_20,
+			     153, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(30, "MMCDAT6", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_8_12_16_20,
+			     154, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "MMCDAT7", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_8_12_16_20,
+			     155, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "RMII_RXD0", 6,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(33, "RMII_RXD1", 6,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(34, "RMII_CRS_DV", 6,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(35, "RMII_RXER", 6,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(36, "RMII_REFCLK", 6,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(37, "RMII_TXD0", 6,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(38, "RMII_TXD1", 6,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(39, "RMII_TXEN", 6,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(40, "MDC", 6,
+			     47, UNIPHIER_PIN_DRV_4_8,
+			     47, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(41, "MDIO", 6,
+			     48, UNIPHIER_PIN_DRV_4_8,
+			     48, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(42, "MDIO_INTL", 6,
+			     49, UNIPHIER_PIN_DRV_4_8,
+			     49, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(43, "PHYRSTL", 6,
+			     50, UNIPHIER_PIN_DRV_4_8,
+			     50, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(44, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_8_12_16_20,
+			     156, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(45, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_8_12_16_20,
+			     157, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(46, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_8_12_16_20,
+			     158, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(47, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_8_12_16_20,
+			     159, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(48, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     56, UNIPHIER_PIN_DRV_8_12_16_20,
+			     160, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(49, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_8_12_16_20,
+			     161, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(50, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     51, UNIPHIER_PIN_DRV_4_8,
+			     51, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(51, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_4_8,
+			     52, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(52, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     53, UNIPHIER_PIN_DRV_4_8,
+			     53, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(53, "USB0VBUS", 0,
+			     54, UNIPHIER_PIN_DRV_4_8,
+			     54, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(54, "USB0OD", 0,
+			     55, UNIPHIER_PIN_DRV_4_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(55, "USB1VBUS", 0,
+			     56, UNIPHIER_PIN_DRV_4_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "USB1OD", 0,
+			     57, UNIPHIER_PIN_DRV_4_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "PCRESET", 0,
+			     58, UNIPHIER_PIN_DRV_4_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "PCREG", 0,
+			     59, UNIPHIER_PIN_DRV_4_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "PCCE2", 0,
+			     60, UNIPHIER_PIN_DRV_4_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "PCVS1", 0,
+			     61, UNIPHIER_PIN_DRV_4_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "PCCD2", 0,
+			     62, UNIPHIER_PIN_DRV_4_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "PCCD1", 0,
+			     63, UNIPHIER_PIN_DRV_4_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "PCREADY", 0,
+			     64, UNIPHIER_PIN_DRV_4_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "PCDOE", 0,
+			     65, UNIPHIER_PIN_DRV_4_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "PCCE1", 0,
+			     66, UNIPHIER_PIN_DRV_4_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "PCWE", 0,
+			     67, UNIPHIER_PIN_DRV_4_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "PCOE", 0,
+			     68, UNIPHIER_PIN_DRV_4_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "PCWAIT", 0,
+			     69, UNIPHIER_PIN_DRV_4_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "PCIOWR", 0,
+			     70, UNIPHIER_PIN_DRV_4_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "PCIORD", 0,
+			     71, UNIPHIER_PIN_DRV_4_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "HS0DIN0", 0,
+			     72, UNIPHIER_PIN_DRV_4_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "HS0DIN1", 0,
+			     73, UNIPHIER_PIN_DRV_4_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "HS0DIN2", 0,
+			     74, UNIPHIER_PIN_DRV_4_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "HS0DIN3", 0,
+			     75, UNIPHIER_PIN_DRV_4_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "HS0DIN4", 0,
+			     76, UNIPHIER_PIN_DRV_4_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "HS0DIN5", 0,
+			     77, UNIPHIER_PIN_DRV_4_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "HS0DIN6", 0,
+			     78, UNIPHIER_PIN_DRV_4_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "HS0DIN7", 0,
+			     79, UNIPHIER_PIN_DRV_4_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "HS0BCLKIN", 0,
+			     80, UNIPHIER_PIN_DRV_4_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "HS0VALIN", 0,
+			     81, UNIPHIER_PIN_DRV_4_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "HS0SYNCIN", 0,
+			     82, UNIPHIER_PIN_DRV_4_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "HSDOUT0", 0,
+			     83, UNIPHIER_PIN_DRV_4_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "HSDOUT1", 0,
+			     84, UNIPHIER_PIN_DRV_4_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "HSDOUT2", 0,
+			     85, UNIPHIER_PIN_DRV_4_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "HSDOUT3", 0,
+			     86, UNIPHIER_PIN_DRV_4_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "HSDOUT4", 0,
+			     87, UNIPHIER_PIN_DRV_4_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "HSDOUT5", 0,
+			     88, UNIPHIER_PIN_DRV_4_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "HSDOUT6", 0,
+			     89, UNIPHIER_PIN_DRV_4_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "HSDOUT7", 0,
+			     90, UNIPHIER_PIN_DRV_4_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "HSBCLKOUT", 0,
+			     91, UNIPHIER_PIN_DRV_4_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "HSVALOUT", 0,
+			     92, UNIPHIER_PIN_DRV_4_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "HSSYNCOUT", 0,
+			     93, UNIPHIER_PIN_DRV_4_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "AGCI", 3,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     162, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "AGCR", 4,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     163, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "AGCBS", 5,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     164, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "IECOUT", 0,
+			     94, UNIPHIER_PIN_DRV_4_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "ASMCK", 0,
+			     95, UNIPHIER_PIN_DRV_4_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "ABCKO", UNIPHIER_PIN_IECTRL_NONE,
+			     96, UNIPHIER_PIN_DRV_4_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "ALRCKO", UNIPHIER_PIN_IECTRL_NONE,
+			     97, UNIPHIER_PIN_DRV_4_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "ASDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+			     98, UNIPHIER_PIN_DRV_4_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0,
+			     99, UNIPHIER_PIN_DRV_4_8,
+			     99, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE,
+			     100, UNIPHIER_PIN_DRV_4_8,
+			     100, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE,
+			     101, UNIPHIER_PIN_DRV_4_8,
+			     101, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(112, "HIN", 1,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(113, "VIN", 2,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(114, "TCON0", UNIPHIER_PIN_IECTRL_NONE,
+			     102, UNIPHIER_PIN_DRV_4_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(115, "TCON1", UNIPHIER_PIN_IECTRL_NONE,
+			     103, UNIPHIER_PIN_DRV_4_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(116, "TCON2", UNIPHIER_PIN_IECTRL_NONE,
+			     104, UNIPHIER_PIN_DRV_4_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(117, "TCON3", UNIPHIER_PIN_IECTRL_NONE,
+			     105, UNIPHIER_PIN_DRV_4_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(118, "TCON4", UNIPHIER_PIN_IECTRL_NONE,
+			     106, UNIPHIER_PIN_DRV_4_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(119, "TCON5", UNIPHIER_PIN_IECTRL_NONE,
+			     107, UNIPHIER_PIN_DRV_4_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(120, "TCON6", 0,
+			     108, UNIPHIER_PIN_DRV_4_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "TCON7", 0,
+			     109, UNIPHIER_PIN_DRV_4_8,
+			     109, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "PWMA", 0,
+			     110, UNIPHIER_PIN_DRV_4_8,
+			     110, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "XIRQ1", 0,
+			     111, UNIPHIER_PIN_DRV_4_8,
+			     111, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "XIRQ2", 0,
+			     112, UNIPHIER_PIN_DRV_4_8,
+			     112, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "XIRQ3", 0,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(126, "XIRQ4", 0,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "XIRQ5", 0,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(128, "XIRQ6", 0,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "XIRQ7", 0,
+			     117, UNIPHIER_PIN_DRV_4_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(130, "XIRQ8", 0,
+			     118, UNIPHIER_PIN_DRV_4_8,
+			     118, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "XIRQ9", 0,
+			     119, UNIPHIER_PIN_DRV_4_8,
+			     119, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "XIRQ10", 0,
+			     120, UNIPHIER_PIN_DRV_4_8,
+			     120, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "XIRQ11", 0,
+			     121, UNIPHIER_PIN_DRV_4_8,
+			     121, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "XIRQ14", 0,
+			     122, UNIPHIER_PIN_DRV_4_8,
+			     122, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "PORT00", 0,
+			     123, UNIPHIER_PIN_DRV_4_8,
+			     123, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(136, "PORT01", 0,
+			     124, UNIPHIER_PIN_DRV_4_8,
+			     124, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(137, "PORT02", 0,
+			     125, UNIPHIER_PIN_DRV_4_8,
+			     125, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(138, "PORT03", 0,
+			     126, UNIPHIER_PIN_DRV_4_8,
+			     126, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(139, "PORT04", 0,
+			     127, UNIPHIER_PIN_DRV_4_8,
+			     127, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(140, "PORT05", 0,
+			     128, UNIPHIER_PIN_DRV_4_8,
+			     128, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(141, "PORT06", 0,
+			     129, UNIPHIER_PIN_DRV_4_8,
+			     129, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "PORT07", 0,
+			     130, UNIPHIER_PIN_DRV_4_8,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(143, "PORT10", 0,
+			     131, UNIPHIER_PIN_DRV_4_8,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(144, "PORT11", 0,
+			     132, UNIPHIER_PIN_DRV_4_8,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(145, "PORT12", 0,
+			     133, UNIPHIER_PIN_DRV_4_8,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(146, "PORT13", 0,
+			     134, UNIPHIER_PIN_DRV_4_8,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(147, "PORT14", 0,
+			     135, UNIPHIER_PIN_DRV_4_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(148, "PORT15", 0,
+			     136, UNIPHIER_PIN_DRV_4_8,
+			     136, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(149, "PORT16", 0,
+			     137, UNIPHIER_PIN_DRV_4_8,
+			     137, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(150, "PORT17", UNIPHIER_PIN_IECTRL_NONE,
+			     138, UNIPHIER_PIN_DRV_4_8,
+			     138, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(151, "PORT20", 0,
+			     139, UNIPHIER_PIN_DRV_4_8,
+			     139, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(152, "PORT21", 0,
+			     140, UNIPHIER_PIN_DRV_4_8,
+			     140, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(153, "PORT22", 0,
+			     141, UNIPHIER_PIN_DRV_4_8,
+			     141, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(154, "PORT23", 0,
+			     142, UNIPHIER_PIN_DRV_4_8,
+			     142, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(155, "PORT24", UNIPHIER_PIN_IECTRL_NONE,
+			     143, UNIPHIER_PIN_DRV_4_8,
+			     143, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(156, "PORT25", 0,
+			     144, UNIPHIER_PIN_DRV_4_8,
+			     144, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(157, "PORT26", 0,
+			     145, UNIPHIER_PIN_DRV_4_8,
+			     145, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(158, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(159, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(160, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(164, "NANDRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_UP),
+};
+
+static const unsigned emmc_pins[] = {21, 22, 23, 24, 25, 26, 27};
+static const unsigned emmc_muxvals[] = {0, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {28, 29, 30, 31};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {102, 103};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {104, 105};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {108, 109};
+static const unsigned i2c2_muxvals[] = {2, 2};
+static const unsigned i2c3_pins[] = {108, 109};
+static const unsigned i2c3_muxvals[] = {3, 3};
+static const unsigned nand_pins[] = {24, 25, 26, 27, 28, 29, 30, 31, 158, 159,
+				     160, 161, 162, 163, 164};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {22, 23};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {85, 88};
+static const unsigned uart0_muxvals[] = {1, 1};
+static const unsigned uart1_pins[] = {155, 156};
+static const unsigned uart1_muxvals[] = {13, 13};
+static const unsigned uart1b_pins[] = {69, 70};
+static const unsigned uart1b_muxvals[] = {23, 23};
+static const unsigned uart2_pins[] = {128, 129};
+static const unsigned uart2_muxvals[] = {13, 13};
+static const unsigned uart3_pins[] = {110, 111};
+static const unsigned uart3_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {53, 54};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {55, 56};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {155, 156};
+static const unsigned usb2_muxvals[] = {4, 4};
+static const unsigned usb2b_pins[] = {67, 68};
+static const unsigned usb2b_muxvals[] = {23, 23};
+static const unsigned port_range0_pins[] = {
+	135, 136, 137, 138, 139, 140, 141, 142,		/* PORT0x */
+	143, 144, 145, 146, 147, 148, 149, 150,		/* PORT1x */
+	151, 152, 153, 154, 155, 156, 157, 0,		/* PORT2x */
+	1, 2, 3, 4, 5, 120, 121, 122,			/* PORT3x */
+	24, 25, 26, 27, 28, 29, 30, 31,			/* PORT4x */
+	40, 41, 42, 43, 44, 45, 46, 47,			/* PORT5x */
+	48, 49, 50, 51, 52, 53, 54, 55,			/* PORT6x */
+	56, 85, 84, 59, 82, 61, 64, 65,			/* PORT7x */
+	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT8x */
+	66, 67, 68, 69, 70, 71, 72, 73,			/* PORT9x */
+	74, 75, 89, 86, 78, 79, 80, 81,			/* PORT10x */
+	60, 83, 58, 57, 88, 87, 77, 76,			/* PORT11x */
+	90, 91, 92, 93, 94, 95, 96, 97,			/* PORT12x */
+	98, 99, 100, 6, 101, 114, 115, 116,		/* PORT13x */
+	103, 108, 21, 22, 23, 117, 118, 119,		/* PORT14x */
+};
+static const unsigned port_range0_muxvals[] = {
+	0, 0, 0, 0, 0, 0, 0, 0,				/* PORT0x */
+	0, 0, 0, 0, 0, 0, 0, 0,				/* PORT1x */
+	0, 0, 0, 0, 0, 0, 0, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT11x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+};
+static const unsigned port_range1_pins[] = {
+	7,						/* PORT166 */
+};
+static const unsigned port_range1_muxvals[] = {
+	15,						/* PORT166 */
+};
+static const unsigned xirq_range0_pins[] = {
+	151, 123, 124, 125, 126, 127, 128, 129,		/* XIRQ0-7 */
+	130, 131, 132, 133, 62,				/* XIRQ8-12 */
+};
+static const unsigned xirq_range0_muxvals[] = {
+	14, 0, 0, 0, 0, 0, 0, 0,			/* XIRQ0-7 */
+	0, 0, 0, 0, 14,					/* XIRQ8-12 */
+};
+static const unsigned xirq_range1_pins[] = {
+	134, 63,					/* XIRQ14-15 */
+};
+static const unsigned xirq_range1_muxvals[] = {
+	0, 14,						/* XIRQ14-15 */
+};
+
+static const struct uniphier_pinctrl_group ph1_ld4_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart1b),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP(usb2b),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range0, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range0, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range0, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range0, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range0, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range0, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range0, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range0, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range0, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range0, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range0, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range0, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range0, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range0, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range0, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range0, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range0, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range0, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range0, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port165, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq_range1, 1),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1", "uart1b"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2", "usb2b"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	"port110", "port111", "port112", "port113",
+	"port114", "port115", "port116", "port117",
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	/* port150-164 missing */
+	/* none */ "port165",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", /* none*/ "xirq14", "xirq15",
+};
+
+static const struct uniphier_pinmux_function ph1_ld4_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_ld4_pindata = {
+	.groups = ph1_ld4_groups,
+	.groups_count = ARRAY_SIZE(ph1_ld4_groups),
+	.functions = ph1_ld4_functions,
+	.functions_count = ARRAY_SIZE(ph1_ld4_functions),
+	.mux_bits = 8,
+	.reg_stride = 4,
+	.load_pinctrl = false,
+};
+
+static struct pinctrl_desc ph1_ld4_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_ld4_pins,
+	.npins = ARRAY_SIZE(ph1_ld4_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_ld4_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_ld4_pinctrl_desc,
+				      &ph1_ld4_pindata);
+}
+
+static const struct of_device_id ph1_ld4_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-ld4-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_ld4_pinctrl_match);
+
+static struct platform_driver ph1_ld4_pinctrl_driver = {
+	.probe = ph1_ld4_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_ld4_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_ld4_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-LD4 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c b/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
new file mode 100644
index 0000000..9720e697
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
@@ -0,0 +1,1274 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-ld6b-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_ld6b_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_4_8,
+			     0, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+			     1, UNIPHIER_PIN_DRV_4_8,
+			     1, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+			     2, UNIPHIER_PIN_DRV_4_8,
+			     2, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+			     3, UNIPHIER_PIN_DRV_4_8,
+			     3, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_4_8,
+			     4, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_4_8,
+			     5, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_4_8,
+			     6, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_4_8,
+			     7, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(13, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "PCA00", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(16, "PCA01", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(17, "PCA02", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(18, "PCA03", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(19, "PCA04", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(20, "PCA05", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(21, "PCA06", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(22, "PCA07", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(23, "PCA08", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(24, "PCA09", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     24, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(25, "PCA10", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     25, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(26, "PCA11", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     26, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(27, "PCA12", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     27, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(28, "PCA13", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     28, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(29, "PCA14", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     29, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(30, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(33, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(34, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(35, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(36, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(37, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(38, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(39, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(40, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(41, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(42, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(43, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(44, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(45, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(46, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(47, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(48, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(49, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(50, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(51, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(52, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(53, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     53, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(54, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     54, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(55, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "HS0BCLKOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "HS0SYNCOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "HS0VALOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "HS0DOUT0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "HS0DOUT1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "HS0DOUT2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "HS0DOUT3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "HS0DOUT4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "HS0DOUT5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "HS0DOUT6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "HS0DOUT7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "HS2BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "HS2SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "HS2VALIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "HS2DIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "HS2DIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "HS2DIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "HS2DIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "HS2DIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "HS2DIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "HS2DIN6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "HS2DIN7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     99, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     100, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(103, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(104, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(105, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(106, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(107, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(108, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(109, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     109, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(110, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     110, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(111, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     111, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(112, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     112, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(113, "SBO0", 0,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(114, "SBI0", 0,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "TXD1", 0,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "RXD1", 0,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(117, "PWSRA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(118, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     118, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(119, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     119, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(120, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     120, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     121, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     122, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     123, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     124, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     125, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(126, "XIRQ8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     126, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "PORT00", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     127, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(128, "PORT01", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     128, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "PORT02", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     129, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(130, "PORT03", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "PORT04", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "PORT07", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "PORT10", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(136, "PORT11", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     136, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(137, "PORT12", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     137, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(138, "PORT13", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     138, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(139, "PORT14", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     139, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(140, "PORT15", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     140, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(141, "PORT16", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     141, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+			     142, UNIPHIER_PIN_DRV_4_8,
+			     142, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(143, "MDC", 0,
+			     143, UNIPHIER_PIN_DRV_4_8,
+			     143, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(144, "MDIO", 0,
+			     144, UNIPHIER_PIN_DRV_4_8,
+			     144, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(145, "MDIO_INTL", 0,
+			     145, UNIPHIER_PIN_DRV_4_8,
+			     145, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(146, "PHYRSTL", 0,
+			     146, UNIPHIER_PIN_DRV_4_8,
+			     146, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(147, "RGMII_RXCLK", 0,
+			     147, UNIPHIER_PIN_DRV_4_8,
+			     147, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(148, "RGMII_RXD0", 0,
+			     148, UNIPHIER_PIN_DRV_4_8,
+			     148, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(149, "RGMII_RXD1", 0,
+			     149, UNIPHIER_PIN_DRV_4_8,
+			     149, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(150, "RGMII_RXD2", 0,
+			     150, UNIPHIER_PIN_DRV_4_8,
+			     150, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(151, "RGMII_RXD3", 0,
+			     151, UNIPHIER_PIN_DRV_4_8,
+			     151, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(152, "RGMII_RXCTL", 0,
+			     152, UNIPHIER_PIN_DRV_4_8,
+			     152, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(153, "RGMII_TXCLK", 0,
+			     153, UNIPHIER_PIN_DRV_4_8,
+			     153, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(154, "RGMII_TXD0", 0,
+			     154, UNIPHIER_PIN_DRV_4_8,
+			     154, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(155, "RGMII_TXD1", 0,
+			     155, UNIPHIER_PIN_DRV_4_8,
+			     155, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(156, "RGMII_TXD2", 0,
+			     156, UNIPHIER_PIN_DRV_4_8,
+			     156, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(157, "RGMII_TXD3", 0,
+			     157, UNIPHIER_PIN_DRV_4_8,
+			     157, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(158, "RGMII_TXCTL", 0,
+			     158, UNIPHIER_PIN_DRV_4_8,
+			     158, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(159, "A_D_PCD00OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     159, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(160, "A_D_PCD01OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     160, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "A_D_PCD02OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     161, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "A_D_PCD03OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     162, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "A_D_PCD04OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     163, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(164, "A_D_PCD05OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     164, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(165, "A_D_PCD06OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     165, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(166, "A_D_PCD07OUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     166, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(167, "A_D_PCD00IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     167, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(168, "A_D_PCD01IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     168, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(169, "A_D_PCD02IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     169, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(170, "A_D_PCD03IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     170, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(171, "A_D_PCD04IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     171, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(172, "A_D_PCD05IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     172, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(173, "A_D_PCD06IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     173, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(174, "A_D_PCD07IN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     174, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(175, "A_D_PCDNOE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     175, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(176, "A_D_PC0READY", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     176, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(177, "A_D_PC0CD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     177, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(178, "A_D_PC0CD2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     178, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(179, "A_D_PC0WAIT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     179, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(180, "A_D_PC0RESET", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     180, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(181, "A_D_PC0CE1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     181, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(182, "A_D_PC0WE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     182, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(183, "A_D_PC0OE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     183, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(184, "A_D_PC0IOWR", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     184, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(185, "A_D_PC0IORD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     185, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(186, "A_D_PC0NOE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     186, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(187, "A_D_HS0BCLKIN", 0,
+			     187, UNIPHIER_PIN_DRV_4_8,
+			     187, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(188, "A_D_HS0SYNCIN", 0,
+			     188, UNIPHIER_PIN_DRV_4_8,
+			     188, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(189, "A_D_HS0VALIN", 0,
+			     189, UNIPHIER_PIN_DRV_4_8,
+			     189, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(190, "A_D_HS0DIN0", 0,
+			     190, UNIPHIER_PIN_DRV_4_8,
+			     190, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(191, "A_D_HS0DIN1", 0,
+			     191, UNIPHIER_PIN_DRV_4_8,
+			     191, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(192, "A_D_HS0DIN2", 0,
+			     192, UNIPHIER_PIN_DRV_4_8,
+			     192, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(193, "A_D_HS0DIN3", 0,
+			     193, UNIPHIER_PIN_DRV_4_8,
+			     193, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(194, "A_D_HS0DIN4", 0,
+			     194, UNIPHIER_PIN_DRV_4_8,
+			     194, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(195, "A_D_HS0DIN5", 0,
+			     195, UNIPHIER_PIN_DRV_4_8,
+			     195, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(196, "A_D_HS0DIN6", 0,
+			     196, UNIPHIER_PIN_DRV_4_8,
+			     196, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(197, "A_D_HS0DIN7", 0,
+			     197, UNIPHIER_PIN_DRV_4_8,
+			     197, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(198, "A_D_AO1ARC", 0,
+			     198, UNIPHIER_PIN_DRV_4_8,
+			     198, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(199, "A_D_SPIXRST", UNIPHIER_PIN_IECTRL_NONE,
+			     199, UNIPHIER_PIN_DRV_4_8,
+			     199, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(200, "A_D_SPISCLK0", UNIPHIER_PIN_IECTRL_NONE,
+			     200, UNIPHIER_PIN_DRV_4_8,
+			     200, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(201, "A_D_SPITXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     201, UNIPHIER_PIN_DRV_4_8,
+			     201, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(202, "A_D_SPIRXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     202, UNIPHIER_PIN_DRV_4_8,
+			     202, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(203, "A_D_DMDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     203, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(204, "A_D_DMDPSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     204, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(205, "A_D_DMDVAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     205, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(206, "A_D_DMDDATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     206, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(207, "A_D_HDMIRXXIRQ", 0,
+			     207, UNIPHIER_PIN_DRV_4_8,
+			     207, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(208, "A_D_VBIXIRQ", 0,
+			     208, UNIPHIER_PIN_DRV_4_8,
+			     208, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(209, "A_D_HDMITXXIRQ", 0,
+			     209, UNIPHIER_PIN_DRV_4_8,
+			     209, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(210, "A_D_DMDIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     210, UNIPHIER_PIN_DRV_4_8,
+			     210, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(211, "A_D_SPICIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     211, UNIPHIER_PIN_DRV_4_8,
+			     211, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(212, "A_D_SPIBIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     212, UNIPHIER_PIN_DRV_4_8,
+			     212, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(213, "A_D_BESDAOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     213, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(214, "A_D_BESDAIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     214, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(215, "A_D_BESCLOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     215, UNIPHIER_PIN_DRV_4_8,
+			     215, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(216, "A_D_VDACCLKOUT", 0,
+			     216, UNIPHIER_PIN_DRV_4_8,
+			     216, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(217, "A_D_VDACDOUT5", 0,
+			     217, UNIPHIER_PIN_DRV_4_8,
+			     217, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(218, "A_D_VDACDOUT6", 0,
+			     218, UNIPHIER_PIN_DRV_4_8,
+			     218, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(219, "A_D_VDACDOUT7", 0,
+			     219, UNIPHIER_PIN_DRV_4_8,
+			     219, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(220, "A_D_VDACDOUT8", 0,
+			     220, UNIPHIER_PIN_DRV_4_8,
+			     220, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(221, "A_D_VDACDOUT9", 0,
+			     221, UNIPHIER_PIN_DRV_4_8,
+			     221, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(222, "A_D_SIFBCKIN", 0,
+			     222, UNIPHIER_PIN_DRV_4_8,
+			     222, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(223, "A_D_SIFLRCKIN", 0,
+			     223, UNIPHIER_PIN_DRV_4_8,
+			     223, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(224, "A_D_SIFDIN", 0,
+			     224, UNIPHIER_PIN_DRV_4_8,
+			     224, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(225, "A_D_LIBCKOUT", 0,
+			     225, UNIPHIER_PIN_DRV_4_8,
+			     225, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(226, "A_D_LILRCKOUT", 0,
+			     226, UNIPHIER_PIN_DRV_4_8,
+			     226, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(227, "A_D_LIDIN", 0,
+			     227, UNIPHIER_PIN_DRV_4_8,
+			     227, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(228, "A_D_LODOUT", 0,
+			     228, UNIPHIER_PIN_DRV_4_8,
+			     228, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(229, "A_D_HPDOUT", 0,
+			     229, UNIPHIER_PIN_DRV_4_8,
+			     229, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(230, "A_D_MCLK", 0,
+			     230, UNIPHIER_PIN_DRV_4_8,
+			     230, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(231, "A_D_A2PLLREFOUT", 0,
+			     231, UNIPHIER_PIN_DRV_4_8,
+			     231, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(232, "A_D_HDMI3DSDAOUT", 0,
+			     232, UNIPHIER_PIN_DRV_4_8,
+			     232, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(233, "A_D_HDMI3DSDAIN", 0,
+			     233, UNIPHIER_PIN_DRV_4_8,
+			     233, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(234, "A_D_HDMI3DSCLIN", 0,
+			     234, UNIPHIER_PIN_DRV_4_8,
+			     234, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned adinter_pins[] = {
+	159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172,
+	173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186,
+	187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200,
+	201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+	215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
+	229, 230, 231, 232, 233, 234,
+};
+static const unsigned adinter_muxvals[] = {
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0,
+};
+static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
+static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {109, 110};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {111, 112};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {115, 116};
+static const unsigned i2c2_muxvals[] = {1, 1};
+static const unsigned i2c3_pins[] = {118, 119};
+static const unsigned i2c3_muxvals[] = {1, 1};
+static const unsigned nand_pins[] = {30, 31, 32, 33, 34, 35, 36, 39, 40, 41,
+				     42, 43, 44, 45, 46};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {37, 38};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {135, 136};
+static const unsigned uart0_muxvals[] = {3, 3};
+static const unsigned uart0b_pins[] = {11, 12};
+static const unsigned uart0b_muxvals[] = {2, 2};
+static const unsigned uart1_pins[] = {115, 116};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart1b_pins[] = {113, 114};
+static const unsigned uart1b_muxvals[] = {1, 1};
+static const unsigned uart2_pins[] = {113, 114};
+static const unsigned uart2_muxvals[] = {2, 2};
+static const unsigned uart2b_pins[] = {86, 87};
+static const unsigned uart2b_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {56, 57};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {58, 59};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {60, 61};
+static const unsigned usb2_muxvals[] = {0, 0};
+static const unsigned usb3_pins[] = {62, 63};
+static const unsigned usb3_muxvals[] = {0, 0};
+static const unsigned port_range0_pins[] = {
+	127, 128, 129, 130, 131, 132, 133, 134,		/* PORT0x */
+	135, 136, 137, 138, 139, 140, 141, 142,		/* PORT1x */
+	0, 1, 2, 3, 4, 5, 6, 7,				/* PORT2x */
+	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT3x */
+	16, 17, 18, 19, 21, 22, 23, 24,			/* PORT4x */
+	25, 30, 31, 32, 33, 34, 35, 36,			/* PORT5x */
+	37, 38, 39, 40, 41, 42, 43, 44,			/* PORT6x */
+	45, 46, 47, 48, 49, 50, 51, 52,			/* PORT7x */
+	53, 54, 55, 56, 57, 58, 59, 60,			/* PORT8x */
+	61, 62, 63, 64, 65, 66, 67, 68,			/* PORT9x */
+	69, 70, 71, 76, 77, 78, 79, 80,			/* PORT10x */
+};
+static const unsigned port_range0_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+};
+static const unsigned port_range1_pins[] = {
+	81, 82, 83, 84, 85, 86, 87, 88,			/* PORT12x */
+	89, 90, 95, 96, 97, 98, 99, 100,		/* PORT13x */
+	101, 102, 103, 104, 105, 106, 107, 108,		/* PORT14x */
+	118, 119, 120, 121, 122, 123, 124, 125,		/* PORT15x */
+	126, 72, 73, 92, 177, 93, 94, 176,		/* PORT16x */
+	74, 91, 27, 28, 29, 75, 20, 26,			/* PORT17x */
+	109, 110, 111, 112, 113, 114, 115, 116,		/* PORT18x */
+	117, 143, 144, 145, 146, 147, 148, 149,		/* PORT19x */
+	150, 151, 152, 153, 154, 155, 156, 157,		/* PORT20x */
+	158, 159, 160, 161, 162, 163, 164, 165,		/* PORT21x */
+	166, 178, 179, 180, 181, 182, 183, 184,		/* PORT22x */
+	185, 187, 188, 189, 190, 191, 192, 193,		/* PORT23x */
+	194, 195, 196, 197, 198, 199, 200, 201,		/* PORT24x */
+	202, 203, 204, 205, 206, 207, 208, 209,		/* PORT25x */
+	210, 211, 212, 213, 214, 215, 216, 217,		/* PORT26x */
+	218, 219, 220, 221, 223, 224, 225, 226,		/* PORT27x */
+	227, 228, 229, 230, 231, 232, 233, 234,		/* PORT28x */
+};
+static const unsigned port_range1_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT15x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT16x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT17x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT18x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT19x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT20x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT21x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT22x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT23x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT24x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT25x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT26x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT27x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT28x */
+};
+static const unsigned xirq_pins[] = {
+	118, 119, 120, 121, 122, 123, 124, 125,		/* XIRQ0-7 */
+	126, 72, 73, 92, 177, 93, 94, 176,		/* XIRQ8-15 */
+	74, 91, 27, 28, 29, 75, 20, 26,			/* XIRQ16-23 */
+};
+static const unsigned xirq_muxvals[] = {
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ0-7 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ8-15 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ16-23 */
+};
+
+static const struct uniphier_pinctrl_group ph1_ld6b_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(adinter),
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart0b),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart1b),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart2b),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP(usb3),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range1, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range1, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range1, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range1, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range1, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range1, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range1, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range1, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range1, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range1, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range1, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range1, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range1, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range1, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range1, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range1, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port150, port_range1, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port151, port_range1, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port152, port_range1, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port153, port_range1, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port154, port_range1, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port155, port_range1, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port156, port_range1, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port157, port_range1, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port160, port_range1, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port161, port_range1, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port162, port_range1, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port163, port_range1, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port164, port_range1, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port165, port_range1, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port166, port_range1, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port167, port_range1, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port170, port_range1, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port171, port_range1, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port172, port_range1, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port173, port_range1, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port174, port_range1, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 120),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 121),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 122),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 123),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 124),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 125),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 126),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 127),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 128),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 129),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 130),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 131),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 132),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 133),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 134),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 135),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21, xirq, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22, xirq, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23, xirq, 23),
+};
+
+static const char * const adinter_groups[] = {"adinter"};
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0", "uart0b"};
+static const char * const uart1_groups[] = {"uart1", "uart1b"};
+static const char * const uart2_groups[] = {"uart2", "uart2b"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const usb3_groups[] = {"usb3"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	/* port110-117 missing */
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	"port150", "port151", "port152", "port153",
+	"port154", "port155", "port156", "port157",
+	"port160", "port161", "port162", "port163",
+	"port164", "port165", "port166", "port167",
+	"port170", "port171", "port172", "port173",
+	"port174", "port175", "port176", "port177",
+	"port180", "port181", "port182", "port183",
+	"port184", "port185", "port186", "port187",
+	"port190", "port191", "port192", "port193",
+	"port194", "port195", "port196", "port197",
+	"port200", "port201", "port202", "port203",
+	"port204", "port205", "port206", "port207",
+	"port210", "port211", "port212", "port213",
+	"port214", "port215", "port216", "port217",
+	"port220", "port221", "port222", "port223",
+	"port224", "port225", "port226", "port227",
+	"port230", "port231", "port232", "port233",
+	"port234", "port235", "port236", "port237",
+	"port240", "port241", "port242", "port243",
+	"port244", "port245", "port246", "port247",
+	"port250", "port251", "port252", "port253",
+	"port254", "port255", "port256", "port257",
+	"port260", "port261", "port262", "port263",
+	"port264", "port265", "port266", "port267",
+	"port270", "port271", "port272", "port273",
+	"port274", "port275", "port276", "port277",
+	"port280", "port281", "port282", "port283",
+	"port284", "port285", "port286", "port287",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", "xirq13", "xirq14", "xirq15",
+	"xirq16", "xirq17", "xirq18", "xirq19",
+	"xirq20", "xirq21", "xirq22", "xirq23",
+};
+
+static const struct uniphier_pinmux_function ph1_ld6b_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(adinter), /* Achip-Dchip interconnect */
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(usb3),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_ld6b_pindata = {
+	.groups = ph1_ld6b_groups,
+	.groups_count = ARRAY_SIZE(ph1_ld6b_groups),
+	.functions = ph1_ld6b_functions,
+	.functions_count = ARRAY_SIZE(ph1_ld6b_functions),
+	.mux_bits = 8,
+	.reg_stride = 4,
+	.load_pinctrl = false,
+};
+
+static struct pinctrl_desc ph1_ld6b_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_ld6b_pins,
+	.npins = ARRAY_SIZE(ph1_ld6b_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_ld6b_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_ld6b_pinctrl_desc,
+				      &ph1_ld6b_pindata);
+}
+
+static const struct of_device_id ph1_ld6b_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-ld6b-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_ld6b_pinctrl_match);
+
+static struct platform_driver ph1_ld6b_pinctrl_driver = {
+	.probe = ph1_ld6b_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_ld6b_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_ld6b_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-LD6b pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c b/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
new file mode 100644
index 0000000..96921e4
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-pro4.c
@@ -0,0 +1,1554 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program5 is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-pro4-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_pro4_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "CK24O", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_4_8,
+			     0, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "VC27A", UNIPHIER_PIN_IECTRL_NONE,
+			     1, UNIPHIER_PIN_DRV_4_8,
+			     1, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "CK27AI", UNIPHIER_PIN_IECTRL_NONE,
+			     2, UNIPHIER_PIN_DRV_4_8,
+			     2, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "CK27AO", UNIPHIER_PIN_IECTRL_NONE,
+			     3, UNIPHIER_PIN_DRV_4_8,
+			     3, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "CKSEL", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_4_8,
+			     4, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(5, "CK27AV", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_4_8,
+			     5, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "AEXCKA", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_4_8,
+			     6, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "ASEL", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_4_8,
+			     7, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "ARCRESET", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "ARCUNLOCK", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "XSRST", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "XNMIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(12, "XSCIRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(13, "EXTRG", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "TRCCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "TRCCTL", UNIPHIER_PIN_IECTRL_NONE,
+			     15, UNIPHIER_PIN_DRV_4_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(16, "TRCD0", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_4_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(17, "TRCD1", UNIPHIER_PIN_IECTRL_NONE,
+			     17, UNIPHIER_PIN_DRV_4_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(18, "TRCD2", UNIPHIER_PIN_IECTRL_NONE,
+			     18, UNIPHIER_PIN_DRV_4_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(19, "TRCD3", UNIPHIER_PIN_IECTRL_NONE,
+			     19, UNIPHIER_PIN_DRV_4_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(20, "TRCD4", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_4_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(21, "TRCD5", UNIPHIER_PIN_IECTRL_NONE,
+			     21, UNIPHIER_PIN_DRV_4_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(22, "TRCD6", UNIPHIER_PIN_IECTRL_NONE,
+			     22, UNIPHIER_PIN_DRV_4_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(23, "TRCD7", UNIPHIER_PIN_IECTRL_NONE,
+			     23, UNIPHIER_PIN_DRV_4_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(24, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_4_8,
+			     24, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(25, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+			     25, UNIPHIER_PIN_DRV_4_8,
+			     25, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(26, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     26, UNIPHIER_PIN_DRV_4_8,
+			     26, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(27, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     27, UNIPHIER_PIN_DRV_4_8,
+			     27, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(28, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_4_8,
+			     28, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(29, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     29, UNIPHIER_PIN_DRV_4_8,
+			     29, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(30, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(31, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(32, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(33, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(34, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(35, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(36, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(37, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(38, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(39, "BOOTSWAP", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_NONE,
+			     39, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(40, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+			     2, UNIPHIER_PIN_DRV_8_12_16_20,
+			     40, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(41, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+			     3, UNIPHIER_PIN_DRV_8_12_16_20,
+			     41, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(42, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     42, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(43, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_8_12_16_20,
+			     43, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(44, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_8_12_16_20,
+			     44, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(45, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_8_12_16_20,
+			     45, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(46, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     46, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(47, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_8_12_16_20,
+			     47, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(48, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_4_8,
+			     48, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(49, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     49, UNIPHIER_PIN_DRV_4_8,
+			     49, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(50, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     50, UNIPHIER_PIN_DRV_4_8,
+			     50, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(51, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     51, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(52, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_4_8,
+			     52, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(53, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     1, UNIPHIER_PIN_DRV_8_12_16_20,
+			     53, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(54, "NRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     54, UNIPHIER_PIN_DRV_4_8,
+			     54, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(55, "DMDSCLTST", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_NONE,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(56, "DMDSDATST", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(57, "AGCI0", 3,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(59, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(60, "AGCBS0", 5,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(62, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(63, "ANTSHORT", UNIPHIER_PIN_IECTRL_NONE,
+			     57, UNIPHIER_PIN_DRV_4_8,
+			     57, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(64, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     58, UNIPHIER_PIN_DRV_4_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     59, UNIPHIER_PIN_DRV_4_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_4_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     61, UNIPHIER_PIN_DRV_4_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     62, UNIPHIER_PIN_DRV_4_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     63, UNIPHIER_PIN_DRV_4_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     64, UNIPHIER_PIN_DRV_4_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     65, UNIPHIER_PIN_DRV_4_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     66, UNIPHIER_PIN_DRV_4_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     67, UNIPHIER_PIN_DRV_4_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     68, UNIPHIER_PIN_DRV_4_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     69, UNIPHIER_PIN_DRV_4_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     70, UNIPHIER_PIN_DRV_4_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     71, UNIPHIER_PIN_DRV_4_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     72, UNIPHIER_PIN_DRV_4_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     73, UNIPHIER_PIN_DRV_4_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     74, UNIPHIER_PIN_DRV_4_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     75, UNIPHIER_PIN_DRV_4_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     76, UNIPHIER_PIN_DRV_4_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     77, UNIPHIER_PIN_DRV_4_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     78, UNIPHIER_PIN_DRV_4_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     79, UNIPHIER_PIN_DRV_4_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     80, UNIPHIER_PIN_DRV_4_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     81, UNIPHIER_PIN_DRV_4_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     82, UNIPHIER_PIN_DRV_4_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     83, UNIPHIER_PIN_DRV_4_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     84, UNIPHIER_PIN_DRV_4_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     85, UNIPHIER_PIN_DRV_4_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "CKFEO", UNIPHIER_PIN_IECTRL_NONE,
+			     86, UNIPHIER_PIN_DRV_4_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "XFERST", UNIPHIER_PIN_IECTRL_NONE,
+			     87, UNIPHIER_PIN_DRV_4_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "P_FE_ON", UNIPHIER_PIN_IECTRL_NONE,
+			     88, UNIPHIER_PIN_DRV_4_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "P_TU0_ON", UNIPHIER_PIN_IECTRL_NONE,
+			     89, UNIPHIER_PIN_DRV_4_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "XFEIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+			     90, UNIPHIER_PIN_DRV_4_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "XFEIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+			     91, UNIPHIER_PIN_DRV_4_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "XFEIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+			     92, UNIPHIER_PIN_DRV_4_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "XFEIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+			     93, UNIPHIER_PIN_DRV_4_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "XFEIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+			     94, UNIPHIER_PIN_DRV_4_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "XFEIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+			     95, UNIPHIER_PIN_DRV_4_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "XFEIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+			     96, UNIPHIER_PIN_DRV_4_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(103, "SMTCLK0", UNIPHIER_PIN_IECTRL_NONE,
+			     97, UNIPHIER_PIN_DRV_4_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(104, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
+			     98, UNIPHIER_PIN_DRV_4_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(105, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
+			     99, UNIPHIER_PIN_DRV_4_8,
+			     99, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(106, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
+			     100, UNIPHIER_PIN_DRV_4_8,
+			     100, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(107, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
+			     101, UNIPHIER_PIN_DRV_4_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(108, "SMTDET0", UNIPHIER_PIN_IECTRL_NONE,
+			     102, UNIPHIER_PIN_DRV_4_8,
+			     102, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(109, "SMTCLK1", UNIPHIER_PIN_IECTRL_NONE,
+			     103, UNIPHIER_PIN_DRV_4_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(110, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
+			     104, UNIPHIER_PIN_DRV_4_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(111, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
+			     105, UNIPHIER_PIN_DRV_4_8,
+			     105, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(112, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
+			     106, UNIPHIER_PIN_DRV_4_8,
+			     106, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(113, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
+			     107, UNIPHIER_PIN_DRV_4_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(114, "SMTDET1", UNIPHIER_PIN_IECTRL_NONE,
+			     108, UNIPHIER_PIN_DRV_4_8,
+			     108, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "XINTM", UNIPHIER_PIN_IECTRL_NONE,
+			     109, UNIPHIER_PIN_DRV_4_8,
+			     109, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "SCLKM", UNIPHIER_PIN_IECTRL_NONE,
+			     110, UNIPHIER_PIN_DRV_4_8,
+			     110, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(117, "SBMTP", UNIPHIER_PIN_IECTRL_NONE,
+			     111, UNIPHIER_PIN_DRV_4_8,
+			     111, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(118, "SBPTM", UNIPHIER_PIN_IECTRL_NONE,
+			     112, UNIPHIER_PIN_DRV_4_8,
+			     112, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(119, "XMPREQ", UNIPHIER_PIN_IECTRL_NONE,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(120, "XINTP", UNIPHIER_PIN_IECTRL_NONE,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(121, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "SDBOOT", UNIPHIER_PIN_IECTRL_NONE,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(123, "BFAIL", UNIPHIER_PIN_IECTRL_NONE,
+			     117, UNIPHIER_PIN_DRV_4_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "XFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     118, UNIPHIER_PIN_DRV_4_8,
+			     118, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(125, "RF_COM_RDY", UNIPHIER_PIN_IECTRL_NONE,
+			     119, UNIPHIER_PIN_DRV_4_8,
+			     119, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(126, "XDIAG0", UNIPHIER_PIN_IECTRL_NONE,
+			     120, UNIPHIER_PIN_DRV_4_8,
+			     120, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(127, "RXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     121, UNIPHIER_PIN_DRV_4_8,
+			     121, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(128, "TXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     122, UNIPHIER_PIN_DRV_4_8,
+			     122, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(129, "RXD1", UNIPHIER_PIN_IECTRL_NONE,
+			     123, UNIPHIER_PIN_DRV_4_8,
+			     123, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(130, "TXD1", UNIPHIER_PIN_IECTRL_NONE,
+			     124, UNIPHIER_PIN_DRV_4_8,
+			     124, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(131, "RXD2", UNIPHIER_PIN_IECTRL_NONE,
+			     125, UNIPHIER_PIN_DRV_4_8,
+			     125, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(132, "TXD2", UNIPHIER_PIN_IECTRL_NONE,
+			     126, UNIPHIER_PIN_DRV_4_8,
+			     126, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(133, "SS0CS", UNIPHIER_PIN_IECTRL_NONE,
+			     127, UNIPHIER_PIN_DRV_4_8,
+			     127, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(134, "SS0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     128, UNIPHIER_PIN_DRV_4_8,
+			     128, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(135, "SS0DO", UNIPHIER_PIN_IECTRL_NONE,
+			     129, UNIPHIER_PIN_DRV_4_8,
+			     129, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(136, "SS0DI", UNIPHIER_PIN_IECTRL_NONE,
+			     130, UNIPHIER_PIN_DRV_4_8,
+			     130, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(137, "MS0CS0", UNIPHIER_PIN_IECTRL_NONE,
+			     131, UNIPHIER_PIN_DRV_4_8,
+			     131, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(138, "MS0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     132, UNIPHIER_PIN_DRV_4_8,
+			     132, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(139, "MS0DI", UNIPHIER_PIN_IECTRL_NONE,
+			     133, UNIPHIER_PIN_DRV_4_8,
+			     133, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(140, "MS0DO", UNIPHIER_PIN_IECTRL_NONE,
+			     134, UNIPHIER_PIN_DRV_4_8,
+			     134, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(141, "XMDMRST", UNIPHIER_PIN_IECTRL_NONE,
+			     135, UNIPHIER_PIN_DRV_4_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(143, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(144, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(145, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(146, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(147, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(148, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(149, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(150, "SD0DAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     136, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(151, "SD0DAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_8_12_16_20,
+			     137, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(152, "SD0DAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_8_12_16_20,
+			     138, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(153, "SD0DAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     15, UNIPHIER_PIN_DRV_8_12_16_20,
+			     139, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(154, "SD0CMD", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_8_12_16_20,
+			     141, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(155, "SD0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_8_12_16_20,
+			     140, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(156, "SD0CD", UNIPHIER_PIN_IECTRL_NONE,
+			     142, UNIPHIER_PIN_DRV_4_8,
+			     142, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(157, "SD0WP", UNIPHIER_PIN_IECTRL_NONE,
+			     143, UNIPHIER_PIN_DRV_4_8,
+			     143, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(158, "SD0VTCG", UNIPHIER_PIN_IECTRL_NONE,
+			     144, UNIPHIER_PIN_DRV_4_8,
+			     144, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(159, "CK25O", UNIPHIER_PIN_IECTRL_NONE,
+			     145, UNIPHIER_PIN_DRV_4_8,
+			     145, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(160, "RGMII_TXCLK", 6,
+			     146, UNIPHIER_PIN_DRV_4_8,
+			     146, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "RGMII_TXD0", 6,
+			     147, UNIPHIER_PIN_DRV_4_8,
+			     147, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "RGMII_TXD1", 6,
+			     148, UNIPHIER_PIN_DRV_4_8,
+			     148, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "RGMII_TXD2", 6,
+			     149, UNIPHIER_PIN_DRV_4_8,
+			     149, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(164, "RGMII_TXD3", 6,
+			     150, UNIPHIER_PIN_DRV_4_8,
+			     150, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(165, "RGMII_TXCTL", 6,
+			     151, UNIPHIER_PIN_DRV_4_8,
+			     151, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(166, "MII_TXER", UNIPHIER_PIN_IECTRL_NONE,
+			     152, UNIPHIER_PIN_DRV_4_8,
+			     152, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(167, "RGMII_RXCLK", 6,
+			     153, UNIPHIER_PIN_DRV_4_8,
+			     153, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(168, "RGMII_RXD0", 6,
+			     154, UNIPHIER_PIN_DRV_4_8,
+			     154, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(169, "RGMII_RXD1", 6,
+			     155, UNIPHIER_PIN_DRV_4_8,
+			     155, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(170, "RGMII_RXD2", 6,
+			     156, UNIPHIER_PIN_DRV_4_8,
+			     156, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(171, "RGMII_RXD3", 6,
+			     157, UNIPHIER_PIN_DRV_4_8,
+			     157, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(172, "RGMII_RXCTL", 6,
+			     158, UNIPHIER_PIN_DRV_4_8,
+			     158, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(173, "MII_RXER", 6,
+			     159, UNIPHIER_PIN_DRV_4_8,
+			     159, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(174, "MII_CRS", 6,
+			     160, UNIPHIER_PIN_DRV_4_8,
+			     160, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(175, "MII_COL", 6,
+			     161, UNIPHIER_PIN_DRV_4_8,
+			     161, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(176, "MDC", 6,
+			     162, UNIPHIER_PIN_DRV_4_8,
+			     162, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(177, "MDIO", 6,
+			     163, UNIPHIER_PIN_DRV_4_8,
+			     163, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(178, "MDIO_INTL", 6,
+			     164, UNIPHIER_PIN_DRV_4_8,
+			     164, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(179, "XETH_RST", 6,
+			     165, UNIPHIER_PIN_DRV_4_8,
+			     165, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(180, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     166, UNIPHIER_PIN_DRV_4_8,
+			     166, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(181, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     167, UNIPHIER_PIN_DRV_4_8,
+			     167, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(182, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     168, UNIPHIER_PIN_DRV_4_8,
+			     168, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(183, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     169, UNIPHIER_PIN_DRV_4_8,
+			     169, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(184, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     170, UNIPHIER_PIN_DRV_4_8,
+			     170, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(185, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+			     171, UNIPHIER_PIN_DRV_4_8,
+			     171, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(186, "USB2ID", UNIPHIER_PIN_IECTRL_NONE,
+			     172, UNIPHIER_PIN_DRV_4_8,
+			     172, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(187, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     173, UNIPHIER_PIN_DRV_4_8,
+			     173, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(188, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
+			     174, UNIPHIER_PIN_DRV_4_8,
+			     174, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(189, "LINKCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     175, UNIPHIER_PIN_DRV_4_8,
+			     175, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(190, "LINKREQ", UNIPHIER_PIN_IECTRL_NONE,
+			     176, UNIPHIER_PIN_DRV_4_8,
+			     176, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(191, "LINKCTL0", UNIPHIER_PIN_IECTRL_NONE,
+			     177, UNIPHIER_PIN_DRV_4_8,
+			     177, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(192, "LINKCTL1", UNIPHIER_PIN_IECTRL_NONE,
+			     178, UNIPHIER_PIN_DRV_4_8,
+			     178, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(193, "LINKDT0", UNIPHIER_PIN_IECTRL_NONE,
+			     179, UNIPHIER_PIN_DRV_4_8,
+			     179, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(194, "LINKDT1", UNIPHIER_PIN_IECTRL_NONE,
+			     180, UNIPHIER_PIN_DRV_4_8,
+			     180, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(195, "LINKDT2", UNIPHIER_PIN_IECTRL_NONE,
+			     181, UNIPHIER_PIN_DRV_4_8,
+			     181, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(196, "LINKDT3", UNIPHIER_PIN_IECTRL_NONE,
+			     182, UNIPHIER_PIN_DRV_4_8,
+			     182, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(197, "LINKDT4", UNIPHIER_PIN_IECTRL_NONE,
+			     183, UNIPHIER_PIN_DRV_4_8,
+			     183, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(198, "LINKDT5", UNIPHIER_PIN_IECTRL_NONE,
+			     184, UNIPHIER_PIN_DRV_4_8,
+			     184, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(199, "LINKDT6", UNIPHIER_PIN_IECTRL_NONE,
+			     185, UNIPHIER_PIN_DRV_4_8,
+			     185, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(200, "LINKDT7", UNIPHIER_PIN_IECTRL_NONE,
+			     186, UNIPHIER_PIN_DRV_4_8,
+			     186, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(201, "CKDVO", UNIPHIER_PIN_IECTRL_NONE,
+			     187, UNIPHIER_PIN_DRV_4_8,
+			     187, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(202, "PHY_PD", UNIPHIER_PIN_IECTRL_NONE,
+			     188, UNIPHIER_PIN_DRV_4_8,
+			     188, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(203, "X1394_RST", UNIPHIER_PIN_IECTRL_NONE,
+			     189, UNIPHIER_PIN_DRV_4_8,
+			     189, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(204, "VOUT_MUTE_L", UNIPHIER_PIN_IECTRL_NONE,
+			     190, UNIPHIER_PIN_DRV_4_8,
+			     190, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(205, "CLK54O", UNIPHIER_PIN_IECTRL_NONE,
+			     191, UNIPHIER_PIN_DRV_4_8,
+			     191, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(206, "CLK54I", UNIPHIER_PIN_IECTRL_NONE,
+			     192, UNIPHIER_PIN_DRV_NONE,
+			     192, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(207, "YIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     193, UNIPHIER_PIN_DRV_4_8,
+			     193, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(208, "YIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     194, UNIPHIER_PIN_DRV_4_8,
+			     194, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(209, "YIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     195, UNIPHIER_PIN_DRV_4_8,
+			     195, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(210, "YIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     196, UNIPHIER_PIN_DRV_4_8,
+			     196, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(211, "YIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     197, UNIPHIER_PIN_DRV_4_8,
+			     197, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(212, "YIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     198, UNIPHIER_PIN_DRV_4_8,
+			     198, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(213, "CIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     199, UNIPHIER_PIN_DRV_4_8,
+			     199, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(214, "CIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     200, UNIPHIER_PIN_DRV_4_8,
+			     200, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(215, "CIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     201, UNIPHIER_PIN_DRV_4_8,
+			     201, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(216, "CIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     202, UNIPHIER_PIN_DRV_4_8,
+			     202, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(217, "CIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     203, UNIPHIER_PIN_DRV_4_8,
+			     203, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(218, "CIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     204, UNIPHIER_PIN_DRV_4_8,
+			     204, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(219, "GCP", UNIPHIER_PIN_IECTRL_NONE,
+			     205, UNIPHIER_PIN_DRV_4_8,
+			     205, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(220, "ADFLG", UNIPHIER_PIN_IECTRL_NONE,
+			     206, UNIPHIER_PIN_DRV_4_8,
+			     206, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(221, "CK27AIOF", UNIPHIER_PIN_IECTRL_NONE,
+			     207, UNIPHIER_PIN_DRV_4_8,
+			     207, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(222, "DACOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     208, UNIPHIER_PIN_DRV_4_8,
+			     208, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(223, "DAFLG", UNIPHIER_PIN_IECTRL_NONE,
+			     209, UNIPHIER_PIN_DRV_4_8,
+			     209, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(224, "VBIH", UNIPHIER_PIN_IECTRL_NONE,
+			     210, UNIPHIER_PIN_DRV_4_8,
+			     210, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(225, "VBIL", UNIPHIER_PIN_IECTRL_NONE,
+			     211, UNIPHIER_PIN_DRV_4_8,
+			     211, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(226, "XSUB_RST", UNIPHIER_PIN_IECTRL_NONE,
+			     212, UNIPHIER_PIN_DRV_4_8,
+			     212, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(227, "XADC_PD", UNIPHIER_PIN_IECTRL_NONE,
+			     213, UNIPHIER_PIN_DRV_4_8,
+			     213, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(228, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     214, UNIPHIER_PIN_DRV_4_8,
+			     214, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(229, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     215, UNIPHIER_PIN_DRV_4_8,
+			     215, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(230, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     216, UNIPHIER_PIN_DRV_4_8,
+			     216, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(231, "AI1DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     217, UNIPHIER_PIN_DRV_4_8,
+			     217, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(232, "CK27HD", UNIPHIER_PIN_IECTRL_NONE,
+			     218, UNIPHIER_PIN_DRV_4_8,
+			     218, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(233, "XHD_RST", UNIPHIER_PIN_IECTRL_NONE,
+			     219, UNIPHIER_PIN_DRV_4_8,
+			     219, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(234, "INTHD", UNIPHIER_PIN_IECTRL_NONE,
+			     220, UNIPHIER_PIN_DRV_4_8,
+			     220, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(235, "VO1HDCK", UNIPHIER_PIN_IECTRL_NONE,
+			     221, UNIPHIER_PIN_DRV_4_8,
+			     221, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(236, "VO1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     222, UNIPHIER_PIN_DRV_4_8,
+			     222, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(237, "VO1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     223, UNIPHIER_PIN_DRV_4_8,
+			     223, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(238, "VO1DE", UNIPHIER_PIN_IECTRL_NONE,
+			     224, UNIPHIER_PIN_DRV_4_8,
+			     224, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(239, "VO1Y0", UNIPHIER_PIN_IECTRL_NONE,
+			     225, UNIPHIER_PIN_DRV_4_8,
+			     225, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(240, "VO1Y1", UNIPHIER_PIN_IECTRL_NONE,
+			     226, UNIPHIER_PIN_DRV_4_8,
+			     226, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(241, "VO1Y2", UNIPHIER_PIN_IECTRL_NONE,
+			     227, UNIPHIER_PIN_DRV_4_8,
+			     227, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(242, "VO1Y3", UNIPHIER_PIN_IECTRL_NONE,
+			     228, UNIPHIER_PIN_DRV_4_8,
+			     228, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(243, "VO1Y4", UNIPHIER_PIN_IECTRL_NONE,
+			     229, UNIPHIER_PIN_DRV_4_8,
+			     229, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(244, "VO1Y5", UNIPHIER_PIN_IECTRL_NONE,
+			     230, UNIPHIER_PIN_DRV_4_8,
+			     230, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(245, "VO1Y6", UNIPHIER_PIN_IECTRL_NONE,
+			     231, UNIPHIER_PIN_DRV_4_8,
+			     231, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(246, "VO1Y7", UNIPHIER_PIN_IECTRL_NONE,
+			     232, UNIPHIER_PIN_DRV_4_8,
+			     232, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(247, "VO1Y8", UNIPHIER_PIN_IECTRL_NONE,
+			     233, UNIPHIER_PIN_DRV_4_8,
+			     233, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(248, "VO1Y9", UNIPHIER_PIN_IECTRL_NONE,
+			     234, UNIPHIER_PIN_DRV_4_8,
+			     234, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(249, "VO1Y10", UNIPHIER_PIN_IECTRL_NONE,
+			     235, UNIPHIER_PIN_DRV_4_8,
+			     235, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(250, "VO1Y11", UNIPHIER_PIN_IECTRL_NONE,
+			     236, UNIPHIER_PIN_DRV_4_8,
+			     236, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(251, "VO1CB0", UNIPHIER_PIN_IECTRL_NONE,
+			     237, UNIPHIER_PIN_DRV_4_8,
+			     237, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(252, "VO1CB1", UNIPHIER_PIN_IECTRL_NONE,
+			     238, UNIPHIER_PIN_DRV_4_8,
+			     238, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(253, "VO1CB2", UNIPHIER_PIN_IECTRL_NONE,
+			     239, UNIPHIER_PIN_DRV_4_8,
+			     239, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(254, "VO1CB3", UNIPHIER_PIN_IECTRL_NONE,
+			     240, UNIPHIER_PIN_DRV_4_8,
+			     240, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(255, "VO1CB4", UNIPHIER_PIN_IECTRL_NONE,
+			     241, UNIPHIER_PIN_DRV_4_8,
+			     241, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(256, "VO1CB5", UNIPHIER_PIN_IECTRL_NONE,
+			     242, UNIPHIER_PIN_DRV_4_8,
+			     242, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(257, "VO1CB6", UNIPHIER_PIN_IECTRL_NONE,
+			     243, UNIPHIER_PIN_DRV_4_8,
+			     243, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(258, "VO1CB7", UNIPHIER_PIN_IECTRL_NONE,
+			     244, UNIPHIER_PIN_DRV_4_8,
+			     244, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(259, "VO1CB8", UNIPHIER_PIN_IECTRL_NONE,
+			     245, UNIPHIER_PIN_DRV_4_8,
+			     245, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(260, "VO1CB9", UNIPHIER_PIN_IECTRL_NONE,
+			     246, UNIPHIER_PIN_DRV_4_8,
+			     246, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(261, "VO1CB10", UNIPHIER_PIN_IECTRL_NONE,
+			     247, UNIPHIER_PIN_DRV_4_8,
+			     247, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(262, "VO1CB11", UNIPHIER_PIN_IECTRL_NONE,
+			     248, UNIPHIER_PIN_DRV_4_8,
+			     248, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(263, "VO1CR0", UNIPHIER_PIN_IECTRL_NONE,
+			     249, UNIPHIER_PIN_DRV_4_8,
+			     249, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(264, "VO1CR1", UNIPHIER_PIN_IECTRL_NONE,
+			     250, UNIPHIER_PIN_DRV_4_8,
+			     250, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(265, "VO1CR2", UNIPHIER_PIN_IECTRL_NONE,
+			     251, UNIPHIER_PIN_DRV_4_8,
+			     251, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(266, "VO1CR3", UNIPHIER_PIN_IECTRL_NONE,
+			     252, UNIPHIER_PIN_DRV_4_8,
+			     252, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(267, "VO1CR4", UNIPHIER_PIN_IECTRL_NONE,
+			     253, UNIPHIER_PIN_DRV_4_8,
+			     253, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(268, "VO1CR5", UNIPHIER_PIN_IECTRL_NONE,
+			     254, UNIPHIER_PIN_DRV_4_8,
+			     254, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(269, "VO1CR6", UNIPHIER_PIN_IECTRL_NONE,
+			     255, UNIPHIER_PIN_DRV_4_8,
+			     255, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(270, "VO1CR7", UNIPHIER_PIN_IECTRL_NONE,
+			     256, UNIPHIER_PIN_DRV_4_8,
+			     256, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(271, "VO1CR8", UNIPHIER_PIN_IECTRL_NONE,
+			     257, UNIPHIER_PIN_DRV_4_8,
+			     257, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(272, "VO1CR9", UNIPHIER_PIN_IECTRL_NONE,
+			     258, UNIPHIER_PIN_DRV_4_8,
+			     258, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(273, "VO1CR10", UNIPHIER_PIN_IECTRL_NONE,
+			     259, UNIPHIER_PIN_DRV_4_8,
+			     259, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(274, "VO1CR11", UNIPHIER_PIN_IECTRL_NONE,
+			     260, UNIPHIER_PIN_DRV_4_8,
+			     260, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(275, "VO1EX0", UNIPHIER_PIN_IECTRL_NONE,
+			     261, UNIPHIER_PIN_DRV_4_8,
+			     261, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(276, "VO1EX1", UNIPHIER_PIN_IECTRL_NONE,
+			     262, UNIPHIER_PIN_DRV_4_8,
+			     262, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(277, "VO1EX2", UNIPHIER_PIN_IECTRL_NONE,
+			     263, UNIPHIER_PIN_DRV_4_8,
+			     263, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(278, "VO1EX3", UNIPHIER_PIN_IECTRL_NONE,
+			     264, UNIPHIER_PIN_DRV_4_8,
+			     264, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(279, "VEXCKA", UNIPHIER_PIN_IECTRL_NONE,
+			     265, UNIPHIER_PIN_DRV_4_8,
+			     265, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(280, "VSEL0", UNIPHIER_PIN_IECTRL_NONE,
+			     266, UNIPHIER_PIN_DRV_4_8,
+			     266, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(281, "VSEL1", UNIPHIER_PIN_IECTRL_NONE,
+			     267, UNIPHIER_PIN_DRV_4_8,
+			     267, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(282, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     268, UNIPHIER_PIN_DRV_4_8,
+			     268, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(283, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     269, UNIPHIER_PIN_DRV_4_8,
+			     269, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(284, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     270, UNIPHIER_PIN_DRV_4_8,
+			     270, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(285, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     271, UNIPHIER_PIN_DRV_4_8,
+			     271, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(286, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     272, UNIPHIER_PIN_DRV_4_8,
+			     272, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(287, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     273, UNIPHIER_PIN_DRV_4_8,
+			     273, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(288, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     274, UNIPHIER_PIN_DRV_4_8,
+			     274, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(289, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     275, UNIPHIER_PIN_DRV_4_8,
+			     275, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(290, "XDAC_PD", UNIPHIER_PIN_IECTRL_NONE,
+			     276, UNIPHIER_PIN_DRV_4_8,
+			     276, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(291, "EX_A_MUTE", UNIPHIER_PIN_IECTRL_NONE,
+			     277, UNIPHIER_PIN_DRV_4_8,
+			     277, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(292, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     278, UNIPHIER_PIN_DRV_4_8,
+			     278, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(293, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     279, UNIPHIER_PIN_DRV_4_8,
+			     279, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(294, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     280, UNIPHIER_PIN_DRV_4_8,
+			     280, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(295, "AO2DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     281, UNIPHIER_PIN_DRV_4_8,
+			     281, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(296, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     282, UNIPHIER_PIN_DRV_4_8,
+			     282, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(297, "HTHPD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(298, "HTSCL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(299, "HTSDA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(300, "PORT00", UNIPHIER_PIN_IECTRL_NONE,
+			     284, UNIPHIER_PIN_DRV_4_8,
+			     284, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(301, "PORT01", UNIPHIER_PIN_IECTRL_NONE,
+			     285, UNIPHIER_PIN_DRV_4_8,
+			     285, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(302, "PORT02", UNIPHIER_PIN_IECTRL_NONE,
+			     286, UNIPHIER_PIN_DRV_4_8,
+			     286, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(303, "PORT03", UNIPHIER_PIN_IECTRL_NONE,
+			     287, UNIPHIER_PIN_DRV_4_8,
+			     287, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(304, "PORT04", UNIPHIER_PIN_IECTRL_NONE,
+			     288, UNIPHIER_PIN_DRV_4_8,
+			     288, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(305, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
+			     289, UNIPHIER_PIN_DRV_4_8,
+			     289, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(306, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
+			     290, UNIPHIER_PIN_DRV_4_8,
+			     290, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(307, "PORT07", UNIPHIER_PIN_IECTRL_NONE,
+			     291, UNIPHIER_PIN_DRV_4_8,
+			     291, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(308, "PORT10", UNIPHIER_PIN_IECTRL_NONE,
+			     292, UNIPHIER_PIN_DRV_4_8,
+			     292, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(309, "PORT11", UNIPHIER_PIN_IECTRL_NONE,
+			     293, UNIPHIER_PIN_DRV_4_8,
+			     293, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(310, "PORT12", UNIPHIER_PIN_IECTRL_NONE,
+			     294, UNIPHIER_PIN_DRV_4_8,
+			     294, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(311, "PORT13", UNIPHIER_PIN_IECTRL_NONE,
+			     295, UNIPHIER_PIN_DRV_4_8,
+			     295, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(312, "PORT14", UNIPHIER_PIN_IECTRL_NONE,
+			     296, UNIPHIER_PIN_DRV_4_8,
+			     296, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(313, "PORT15", UNIPHIER_PIN_IECTRL_NONE,
+			     297, UNIPHIER_PIN_DRV_4_8,
+			     297, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(314, "PORT16", UNIPHIER_PIN_IECTRL_NONE,
+			     298, UNIPHIER_PIN_DRV_4_8,
+			     298, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(315, "PORT17", UNIPHIER_PIN_IECTRL_NONE,
+			     299, UNIPHIER_PIN_DRV_4_8,
+			     299, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(316, "PORT20", UNIPHIER_PIN_IECTRL_NONE,
+			     300, UNIPHIER_PIN_DRV_4_8,
+			     300, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(317, "PORT21", UNIPHIER_PIN_IECTRL_NONE,
+			     301, UNIPHIER_PIN_DRV_4_8,
+			     301, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(318, "PORT22", UNIPHIER_PIN_IECTRL_NONE,
+			     302, UNIPHIER_PIN_DRV_4_8,
+			     302, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(319, "SD1DAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     303, UNIPHIER_PIN_DRV_4_8,
+			     303, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(320, "SD1DAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     304, UNIPHIER_PIN_DRV_4_8,
+			     304, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(321, "SD1DAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     305, UNIPHIER_PIN_DRV_4_8,
+			     305, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(322, "SD1DAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     306, UNIPHIER_PIN_DRV_4_8,
+			     306, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(323, "SD1CMD", UNIPHIER_PIN_IECTRL_NONE,
+			     307, UNIPHIER_PIN_DRV_4_8,
+			     307, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(324, "SD1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     308, UNIPHIER_PIN_DRV_4_8,
+			     308, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(325, "SD1CD", UNIPHIER_PIN_IECTRL_NONE,
+			     309, UNIPHIER_PIN_DRV_4_8,
+			     309, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(326, "SD1WP", UNIPHIER_PIN_IECTRL_NONE,
+			     310, UNIPHIER_PIN_DRV_4_8,
+			     310, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(327, "SD1VTCG", UNIPHIER_PIN_IECTRL_NONE,
+			     311, UNIPHIER_PIN_DRV_4_8,
+			     311, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(328, "DMDISO", UNIPHIER_PIN_IECTRL_NONE,
+			     312, UNIPHIER_PIN_DRV_NONE,
+			     312, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {40, 41, 42, 43, 51, 52, 53};
+static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {44, 45, 46, 47};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {142, 143};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {144, 145};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {146, 147};
+static const unsigned i2c2_muxvals[] = {0, 0};
+static const unsigned i2c3_pins[] = {148, 149};
+static const unsigned i2c3_muxvals[] = {0, 0};
+static const unsigned i2c6_pins[] = {308, 309};
+static const unsigned i2c6_muxvals[] = {6, 6};
+static const unsigned nand_pins[] = {40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+				     50, 51, 52, 53, 54};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {131, 132};
+static const unsigned nand_cs1_muxvals[] = {1, 1};
+static const unsigned uart0_pins[] = {127, 128};
+static const unsigned uart0_muxvals[] = {0, 0};
+static const unsigned uart1_pins[] = {129, 130};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart2_pins[] = {131, 132};
+static const unsigned uart2_muxvals[] = {0, 0};
+static const unsigned uart3_pins[] = {88, 89};
+static const unsigned uart3_muxvals[] = {2, 2};
+static const unsigned usb0_pins[] = {180, 181};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {182, 183};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {184, 185};
+static const unsigned usb2_muxvals[] = {0, 0};
+static const unsigned usb3_pins[] = {186, 187};
+static const unsigned usb3_muxvals[] = {0, 0};
+static const unsigned port_range0_pins[] = {
+	300, 301, 302, 303, 304, 305, 306, 307,		/* PORT0x */
+	308, 309, 310, 311, 312, 313, 314, 315,		/* PORT1x */
+	316, 317, 318, 16, 17, 18, 19, 20,		/* PORT2x */
+	21, 22, 23, 4, 93, 94, 95, 63,			/* PORT3x */
+	123, 122, 124, 125, 126, 141, 202, 203,		/* PORT4x */
+	204, 226, 227, 290, 291, 233, 280, 281,		/* PORT5x */
+	8, 7, 10, 29, 30, 48, 49, 50,			/* PORT6x */
+	40, 41, 42, 43, 44, 45, 46, 47,			/* PORT7x */
+	54, 51, 52, 53, 127, 128, 129, 130,		/* PORT8x */
+	131, 132, 57, 60, 134, 133, 135, 136,		/* PORT9x */
+	138, 137, 140, 139, 64, 65, 66, 67,		/* PORT10x */
+	107, 106, 105, 104, 113, 112, 111, 110,		/* PORT11x */
+	68, 69, 70, 71, 72, 73, 74, 75,			/* PORT12x */
+	76, 77, 78, 79, 80, 81, 82, 83,			/* PORT13x */
+	84, 85, 86, 87, 88, 89, 90, 91,			/* PORT14x */
+};
+static const unsigned port_range0_muxvals[] = {
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT0x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT1x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT2x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT3x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT4x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT5x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT6x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT7x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT8x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT9x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT10x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT11x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT12x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT13x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT14x */
+};
+static const unsigned port_range1_pins[] = {
+	13, 14, 15,					/* PORT175-177 */
+	157, 158, 156, 154, 150, 151, 152, 153,		/* PORT18x */
+	326, 327, 325, 323, 319, 320, 321, 322,		/* PORT19x */
+	160, 161, 162, 163, 164, 165, 166, 167,		/* PORT20x */
+	168, 169, 170, 171, 172, 173, 174, 175,		/* PORT21x */
+	180, 181, 182, 183, 184, 185, 187, 188,		/* PORT22x */
+	193, 194, 195, 196, 197, 198, 199, 200,		/* PORT23x */
+	191, 192, 215, 216, 217, 218, 219, 220,		/* PORT24x */
+	222, 223, 224, 225, 228, 229, 230, 231,		/* PORT25x */
+	282, 283, 284, 285, 286, 287, 288, 289,		/* PORT26x */
+	292, 293, 294, 295, 296, 236, 237, 238,		/* PORT27x */
+	275, 276, 277, 278, 239, 240, 249, 250,		/* PORT28x */
+	251, 252, 261, 262, 263, 264, 273, 274,		/* PORT29x */
+	31, 32, 33, 34, 35, 36, 37, 38,			/* PORT30x */
+};
+static const unsigned port_range1_muxvals[] = {
+	7, 7, 7,					/* PORT175-177 */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT18x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT19x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT20x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT21x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT22x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT23x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT24x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT25x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT26x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT27x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT28x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT29x */
+	7, 7, 7, 7, 7, 7, 7, 7,				/* PORT30x */
+};
+static const unsigned xirq_pins[] = {
+	11, 9, 12, 96, 97, 98, 108, 114,		/* XIRQ0-7 */
+	234, 186, 99, 100, 101, 102, 184, 301,		/* XIRQ8-15 */
+	302, 303, 304, 305, 306,			/* XIRQ16-20 */
+};
+static const unsigned xirq_muxvals[] = {
+	7, 7, 7, 7, 7, 7, 7, 7,				/* XIRQ0-7 */
+	7, 7, 7, 7, 7, 7, 2, 2,				/* XIRQ8-15 */
+	2, 2, 2, 2, 2,					/* XIRQ16-20 */
+};
+static const unsigned xirq_alternatives_pins[] = {
+	184, 310, 316,
+};
+static const unsigned xirq_alternatives_muxvals[] = {
+	2, 2, 2,
+};
+
+static const struct uniphier_pinctrl_group ph1_pro4_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(i2c6),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP(usb3),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range0, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range0, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range0, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range0, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range0, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range0, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range0, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range0, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range0, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range0, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range0, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range0, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range0, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range0, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range0, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range0, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range0, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range0, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range0, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port290, port_range1, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port291, port_range1, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port292, port_range1, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port293, port_range1, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port294, port_range1, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port295, port_range1, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port296, port_range1, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port297, port_range1, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port300, port_range1, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port301, port_range1, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port302, port_range1, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port303, port_range1, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port304, port_range1, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port305, port_range1, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port306, port_range1, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port307, port_range1, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14b, xirq_alternatives, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17b, xirq_alternatives, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18b, xirq_alternatives, 2),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c6_groups[] = {"i2c6"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const usb3_groups[] = {"usb3"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	"port110", "port111", "port112", "port113",
+	"port114", "port115", "port116", "port117",
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	/* port150-174 missing */
+	/* none */ "port175", "port176", "port177",
+	"port180", "port181", "port182", "port183",
+	"port184", "port185", "port186", "port187",
+	"port190", "port191", "port192", "port193",
+	"port194", "port195", "port196", "port197",
+	"port200", "port201", "port202", "port203",
+	"port204", "port205", "port206", "port207",
+	"port210", "port211", "port212", "port213",
+	"port214", "port215", "port216", "port217",
+	"port220", "port221", "port222", "port223",
+	"port224", "port225", "port226", "port227",
+	"port230", "port231", "port232", "port233",
+	"port234", "port235", "port236", "port237",
+	"port240", "port241", "port242", "port243",
+	"port244", "port245", "port246", "port247",
+	"port250", "port251", "port252", "port253",
+	"port254", "port255", "port256", "port257",
+	"port260", "port261", "port262", "port263",
+	"port264", "port265", "port266", "port267",
+	"port270", "port271", "port272", "port273",
+	"port274", "port275", "port276", "port277",
+	"port280", "port281", "port282", "port283",
+	"port284", "port285", "port286", "port287",
+	"port290", "port291", "port292", "port293",
+	"port294", "port295", "port296", "port297",
+	"port300", "port301", "port302", "port303",
+	"port304", "port305", "port306", "port307",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", "xirq13", "xirq14", "xirq15",
+	"xirq16", "xirq17", "xirq18", "xirq19",
+	"xirq20",
+	"xirq14b", "xirq17b", "xirq18b",
+};
+
+static const struct uniphier_pinmux_function ph1_pro4_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(i2c6),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(usb3),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_pro4_pindata = {
+	.groups = ph1_pro4_groups,
+	.groups_count = ARRAY_SIZE(ph1_pro4_groups),
+	.functions = ph1_pro4_functions,
+	.functions_count = ARRAY_SIZE(ph1_pro4_functions),
+	.mux_bits = 4,
+	.reg_stride = 8,
+	.load_pinctrl = true,
+};
+
+static struct pinctrl_desc ph1_pro4_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_pro4_pins,
+	.npins = ARRAY_SIZE(ph1_pro4_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_pro4_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_pro4_pinctrl_desc,
+				      &ph1_pro4_pindata);
+}
+
+static const struct of_device_id ph1_pro4_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-pro4-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_pro4_pinctrl_match);
+
+static struct platform_driver ph1_pro4_pinctrl_driver = {
+	.probe = ph1_pro4_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_pro4_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_pro4_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-Pro4 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c b/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
new file mode 100644
index 0000000..9af4559
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-pro5.c
@@ -0,0 +1,1351 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program5 is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-pro5-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_pro5_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "AEXCKA1", 0,
+			     0, UNIPHIER_PIN_DRV_4_8,
+			     0, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "AEXCKA2", 0,
+			     1, UNIPHIER_PIN_DRV_4_8,
+			     1, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "CK27EXI", 0,
+			     2, UNIPHIER_PIN_DRV_4_8,
+			     2, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "CK54EXI", 0,
+			     3, UNIPHIER_PIN_DRV_4_8,
+			     3, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_4_8,
+			     4, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_4_8,
+			     5, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_4_8,
+			     6, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_4_8,
+			     7, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(13, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(14, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(15, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     15, UNIPHIER_PIN_DRV_4_8,
+			     15, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(16, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_4_8,
+			     16, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(17, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     17, UNIPHIER_PIN_DRV_4_8,
+			     17, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(18, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+			     18, UNIPHIER_PIN_DRV_4_8,
+			     18, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(19, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     19, UNIPHIER_PIN_DRV_4_8,
+			     19, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(20, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_4_8,
+			     20, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(21, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     21, UNIPHIER_PIN_DRV_4_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(22, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     22, UNIPHIER_PIN_DRV_4_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(23, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     23, UNIPHIER_PIN_DRV_4_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(24, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_4_8,
+			     24, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(25, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     25, UNIPHIER_PIN_DRV_4_8,
+			     25, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(26, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
+			     26, UNIPHIER_PIN_DRV_4_8,
+			     26, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(27, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
+			     27, UNIPHIER_PIN_DRV_4_8,
+			     27, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(28, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_4_8,
+			     28, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(29, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+			     29, UNIPHIER_PIN_DRV_4_8,
+			     29, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(30, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(33, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(34, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(35, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(36, "XERST", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(37, "MMCCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(38, "MMCCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(39, "MMCDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(40, "MMCDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(41, "MMCDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(42, "MMCDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(43, "MMCDAT4", UNIPHIER_PIN_IECTRL_NONE,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(44, "MMCDAT5", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(45, "MMCDAT6", UNIPHIER_PIN_IECTRL_NONE,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(46, "MMCDAT7", UNIPHIER_PIN_IECTRL_NONE,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(47, "TXD0", 0,
+			     47, UNIPHIER_PIN_DRV_4_8,
+			     47, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(48, "RXD0", 0,
+			     48, UNIPHIER_PIN_DRV_4_8,
+			     48, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(49, "TXD1", 0,
+			     49, UNIPHIER_PIN_DRV_4_8,
+			     49, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(50, "RXD1", 0,
+			     50, UNIPHIER_PIN_DRV_4_8,
+			     50, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(51, "TXD2", UNIPHIER_PIN_IECTRL_NONE,
+			     51, UNIPHIER_PIN_DRV_4_8,
+			     51, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(52, "RXD2", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_4_8,
+			     52, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(53, "TXD3", 0,
+			     53, UNIPHIER_PIN_DRV_4_8,
+			     53, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(54, "RXD3", 0,
+			     54, UNIPHIER_PIN_DRV_4_8,
+			     54, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(55, "MS0CS0", 0,
+			     55, UNIPHIER_PIN_DRV_4_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "MS0DO", 0,
+			     56, UNIPHIER_PIN_DRV_4_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "MS0DI", 0,
+			     57, UNIPHIER_PIN_DRV_4_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "MS0CLK", 0,
+			     58, UNIPHIER_PIN_DRV_4_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "CSCLK", 0,
+			     59, UNIPHIER_PIN_DRV_4_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "CSBPTM", 0,
+			     60, UNIPHIER_PIN_DRV_4_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "CSBMTP", 0,
+			     61, UNIPHIER_PIN_DRV_4_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "XCINTP", 0,
+			     62, UNIPHIER_PIN_DRV_4_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "XCINTM", 0,
+			     63, UNIPHIER_PIN_DRV_4_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "XCMPREQ", 0,
+			     64, UNIPHIER_PIN_DRV_4_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "XSRST", 0,
+			     65, UNIPHIER_PIN_DRV_4_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+			     66, UNIPHIER_PIN_DRV_4_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "PWMA", 0,
+			     67, UNIPHIER_PIN_DRV_4_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "XIRQ0", 0,
+			     68, UNIPHIER_PIN_DRV_4_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "XIRQ1", 0,
+			     69, UNIPHIER_PIN_DRV_4_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "XIRQ2", 0,
+			     70, UNIPHIER_PIN_DRV_4_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "XIRQ3", 0,
+			     71, UNIPHIER_PIN_DRV_4_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "XIRQ4", 0,
+			     72, UNIPHIER_PIN_DRV_4_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "XIRQ5", 0,
+			     73, UNIPHIER_PIN_DRV_4_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "XIRQ6", 0,
+			     74, UNIPHIER_PIN_DRV_4_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "XIRQ7", 0,
+			     75, UNIPHIER_PIN_DRV_4_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "XIRQ8", 0,
+			     76, UNIPHIER_PIN_DRV_4_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "XIRQ9", 0,
+			     77, UNIPHIER_PIN_DRV_4_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "XIRQ10", 0,
+			     78, UNIPHIER_PIN_DRV_4_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "XIRQ11", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "XIRQ12", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "XIRQ13", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "XIRQ14", 0,
+			     82, UNIPHIER_PIN_DRV_4_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "XIRQ15", 0,
+			     83, UNIPHIER_PIN_DRV_4_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "XIRQ16", 0,
+			     84, UNIPHIER_PIN_DRV_4_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "XIRQ17", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "XIRQ18", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "XIRQ19", 0,
+			     87, UNIPHIER_PIN_DRV_4_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "XIRQ20", 0,
+			     88, UNIPHIER_PIN_DRV_4_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "PORT00", 0,
+			     89, UNIPHIER_PIN_DRV_4_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "PORT01", 0,
+			     90, UNIPHIER_PIN_DRV_4_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "PORT02", 0,
+			     91, UNIPHIER_PIN_DRV_4_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "PORT03", 0,
+			     92, UNIPHIER_PIN_DRV_4_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "PORT04", 0,
+			     93, UNIPHIER_PIN_DRV_4_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "PORT05", 0,
+			     94, UNIPHIER_PIN_DRV_4_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "PORT06", 0,
+			     95, UNIPHIER_PIN_DRV_4_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "PORT07", 0,
+			     96, UNIPHIER_PIN_DRV_4_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "PORT10", 0,
+			     97, UNIPHIER_PIN_DRV_4_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "PORT11", 0,
+			     98, UNIPHIER_PIN_DRV_4_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "PORT12", 0,
+			     99, UNIPHIER_PIN_DRV_4_8,
+			     99, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "PORT13", 0,
+			     100, UNIPHIER_PIN_DRV_4_8,
+			     100, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "PORT14", 0,
+			     101, UNIPHIER_PIN_DRV_4_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "PORT15", 0,
+			     102, UNIPHIER_PIN_DRV_4_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(103, "PORT16", 0,
+			     103, UNIPHIER_PIN_DRV_4_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(104, "PORT17", 0,
+			     104, UNIPHIER_PIN_DRV_4_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(105, "T0HPD", 0,
+			     105, UNIPHIER_PIN_DRV_4_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(106, "T1HPD", 0,
+			     106, UNIPHIER_PIN_DRV_4_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(107, "R0HPD", 0,
+			     107, UNIPHIER_PIN_DRV_4_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(108, "R1HPD", 0,
+			     108, UNIPHIER_PIN_DRV_4_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(109, "XPERST", 0,
+			     109, UNIPHIER_PIN_DRV_4_8,
+			     109, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(110, "XPEWAKE", 0,
+			     110, UNIPHIER_PIN_DRV_4_8,
+			     110, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(111, "XPECLKRQ", 0,
+			     111, UNIPHIER_PIN_DRV_4_8,
+			     111, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(112, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     112, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(113, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     113, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(114, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     114, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     115, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     116, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(117, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     117, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(118, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     118, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(119, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     119, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(120, "SPISYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     120, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "SPISCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     121, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "SPITXD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     122, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "SPIRXD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     123, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     124, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     125, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(126, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     126, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     127, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(128, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     128, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     129, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(130, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "SMTCLK0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(136, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     136, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(137, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     137, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(138, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     138, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(139, "SMTCLK1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     139, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(140, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     140, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(141, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     141, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     142, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(143, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     143, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(144, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     144, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(145, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     145, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(146, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     146, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(147, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     147, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(148, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     148, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(149, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     149, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(150, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     150, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(151, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     151, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(152, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     152, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(153, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     153, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(154, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     154, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(155, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     155, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(156, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     156, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(157, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     157, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(158, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     158, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(159, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     159, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(160, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     160, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     161, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     162, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     163, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(164, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     164, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(165, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     165, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(166, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     166, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(167, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     167, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(168, "CH7CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     168, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(169, "CH7PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     169, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(170, "CH7VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     170, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(171, "CH7DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     171, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(172, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     172, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(173, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     173, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(174, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     174, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(175, "AI1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     175, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(176, "AI1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     176, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(177, "AI1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     177, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(178, "AI1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     178, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(179, "AI2ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     179, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(180, "AI2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     180, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(181, "AI2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     181, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(182, "AI2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     182, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(183, "AI2D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     183, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(184, "AI2D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     184, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(185, "AI2D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     185, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(186, "AI3ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     186, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(187, "AI3BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     187, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(188, "AI3LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     188, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(189, "AI3D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     189, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(190, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     190, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(191, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     191, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(192, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     192, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(193, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     193, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(194, "AO1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     194, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(195, "AO1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     195, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(196, "AO1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     196, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(197, "AO1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     197, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(198, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     198, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(199, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     199, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(200, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     200, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(201, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     201, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(202, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     202, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(203, "AO2D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     203, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(204, "AO2D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     204, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(205, "AO2D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     205, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(206, "AO3DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     206, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(207, "AO3BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     207, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(208, "AO3LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     208, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(209, "AO3DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     209, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(210, "AO4DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     210, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(211, "AO4BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     211, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(212, "AO4LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     212, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(213, "AO4DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     213, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(214, "VI1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     214, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(215, "VI1C0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     215, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(216, "VI1C1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     216, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(217, "VI1C2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     217, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(218, "VI1C3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     218, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(219, "VI1C4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     219, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(220, "VI1C5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     220, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(221, "VI1C6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     221, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(222, "VI1C7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     222, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(223, "VI1C8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     223, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(224, "VI1C9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     224, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(225, "VI1Y0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     225, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(226, "VI1Y1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     226, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(227, "VI1Y2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     227, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(228, "VI1Y3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     228, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(229, "VI1Y4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     229, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(230, "VI1Y5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     230, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(231, "VI1Y6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     231, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(232, "VI1Y7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     232, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(233, "VI1Y8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     233, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(234, "VI1Y9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     234, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(235, "VI1DE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     235, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(236, "VI1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     236, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(237, "VI1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     237, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(238, "VO1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     238, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(239, "VO1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     239, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(240, "VO1D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     240, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(241, "VO1D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     241, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(242, "VO1D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     242, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(243, "VO1D4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     243, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(244, "VO1D5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     244, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(245, "VO1D6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     245, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(246, "VO1D7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     246, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(247, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     247, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(248, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     248, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(249, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     249, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(250, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(251, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(252, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(253, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(254, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     56, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(255, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+};
+
+static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
+static const unsigned emmc_muxvals[] = {0, 0, 0, 0, 0, 0, 0};
+static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
+static const unsigned emmc_dat8_muxvals[] = {0, 0, 0, 0};
+static const unsigned i2c0_pins[] = {112, 113};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {114, 115};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {116, 117};
+static const unsigned i2c2_muxvals[] = {0, 0};
+static const unsigned i2c3_pins[] = {118, 119};
+static const unsigned i2c3_muxvals[] = {0, 0};
+static const unsigned i2c5_pins[] = {87, 88};
+static const unsigned i2c5_muxvals[] = {2, 2};
+static const unsigned i2c5b_pins[] = {196, 197};
+static const unsigned i2c5b_muxvals[] = {2, 2};
+static const unsigned i2c5c_pins[] = {215, 216};
+static const unsigned i2c5c_muxvals[] = {2, 2};
+static const unsigned i2c6_pins[] = {101, 102};
+static const unsigned i2c6_muxvals[] = {2, 2};
+static const unsigned nand_pins[] = {19, 20, 21, 22, 23, 24, 25, 28, 29, 30,
+				     31, 32, 33, 34, 35};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {26, 27};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {47, 48};
+static const unsigned uart0_muxvals[] = {0, 0};
+static const unsigned uart0b_pins[] = {227, 228};
+static const unsigned uart0b_muxvals[] = {3, 3};
+static const unsigned uart1_pins[] = {49, 50};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart2_pins[] = {51, 52};
+static const unsigned uart2_muxvals[] = {0, 0};
+static const unsigned uart3_pins[] = {53, 54};
+static const unsigned uart3_muxvals[] = {0, 0};
+static const unsigned usb0_pins[] = {124, 125};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {126, 127};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {128, 129};
+static const unsigned usb2_muxvals[] = {0, 0};
+static const unsigned port_range0_pins[] = {
+	89, 90, 91, 92, 93, 94, 95, 96,			/* PORT0x */
+	97, 98, 99, 100, 101, 102, 103, 104,		/* PORT1x */
+	251, 252, 253, 254, 255, 247, 248, 249,		/* PORT2x */
+	39, 40, 41, 42, 43, 44, 45, 46,			/* PORT3x */
+	156, 157, 158, 159, 160, 161, 162, 163,		/* PORT4x */
+	164, 165, 166, 167, 168, 169, 170, 171,		/* PORT5x */
+	190, 191, 192, 193, 194, 195, 196, 197,		/* PORT6x */
+	198, 199, 200, 201, 202, 203, 204, 205,		/* PORT7x */
+	120, 121, 122, 123, 55, 56, 57, 58,		/* PORT8x */
+	124, 125, 126, 127, 49, 50, 53, 54,		/* PORT9x */
+	148, 149, 150, 151, 152, 153, 154, 155,		/* PORT10x */
+	133, 134, 131, 130, 138, 139, 136, 135,		/* PORT11x */
+	28, 29, 30, 31, 32, 33, 34, 35,			/* PORT12x */
+	179, 180, 181, 182, 186, 187, 188, 189,		/* PORT13x */
+	4, 5, 6, 7, 8, 9, 10, 11,			/* PORT14x */
+};
+static const unsigned port_range0_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT11x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+};
+static const unsigned port_range1_pins[] = {
+	109, 110, 111,					/* PORT175-177 */
+	206, 207, 208, 209, 210, 211, 212, 213,		/* PORT18x */
+	12, 13, 14, 15, 16, 17, 107, 108,		/* PORT19x */
+	140, 141, 142, 143, 144, 145, 146, 147,		/* PORT20x */
+	59, 60, 61, 62, 63, 64, 65, 66,			/* PORT21x */
+	214, 215, 216, 217, 218, 219, 220, 221,		/* PORT22x */
+	222, 223, 224, 225, 226, 227, 228, 229,		/* PORT23x */
+	19, 20, 21, 22, 23, 24, 25, 26,			/* PORT24x */
+	230, 231, 232, 233, 234, 235, 236, 237,		/* PORT25x */
+	239, 240, 241, 242, 243, 244, 245, 246,		/* PORT26x */
+	172, 173, 174, 175, 176, 177, 178, 129,		/* PORT27x */
+	0, 1, 2, 67, 85, 86, 87, 88,			/* PORT28x */
+	105, 106, 18, 27, 36, 128, 132, 137,		/* PORT29x */
+	183, 184, 185, 84, 47, 48, 51, 52,		/* PORT30x */
+};
+static const unsigned port_range1_muxvals[] = {
+	15, 15, 15,					/* PORT175-177 */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT18x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT19x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT20x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT21x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT22x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT23x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT24x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT25x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT26x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT27x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT28x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT29x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT30x */
+};
+static const unsigned xirq_pins[] = {
+	68, 69, 70, 71, 72, 73, 74, 75,			/* XIRQ0-7 */
+	76, 77, 78, 79, 80, 81, 82, 83,			/* XIRQ8-15 */
+	84, 85, 86, 87, 88,				/* XIRQ16-20 */
+};
+static const unsigned xirq_muxvals[] = {
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ0-7 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ8-15 */
+	14, 14, 14, 14, 14,				/* XIRQ16-20 */
+};
+static const unsigned xirq_alternatives_pins[] = {
+	91, 92, 239, 144, 240, 156, 241, 106, 128,
+};
+static const unsigned xirq_alternatives_muxvals[] = {
+	14, 14, 14, 14, 14, 14, 14, 14, 14,
+};
+
+static const struct uniphier_pinctrl_group ph1_pro5_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(i2c5),
+	UNIPHIER_PINCTRL_GROUP(i2c5b),
+	UNIPHIER_PINCTRL_GROUP(i2c5c),
+	UNIPHIER_PINCTRL_GROUP(i2c6),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart0b),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_alternatives),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range0, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range0, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range0, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range0, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range0, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range0, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range0, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range0, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range0, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range0, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range0, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range0, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range0, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range0, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range0, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range0, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range0, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range0, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range0, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port290, port_range1, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port291, port_range1, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port292, port_range1, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port293, port_range1, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port294, port_range1, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port295, port_range1, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port296, port_range1, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port297, port_range1, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port300, port_range1, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port301, port_range1, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port302, port_range1, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port303, port_range1, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port304, port_range1, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port305, port_range1, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port306, port_range1, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port307, port_range1, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3b, xirq_alternatives, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4b, xirq_alternatives, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16b, xirq_alternatives, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17b, xirq_alternatives, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17c, xirq_alternatives, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18b, xirq_alternatives, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18c, xirq_alternatives, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19b, xirq_alternatives, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20b, xirq_alternatives, 8),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c5_groups[] = {"i2c5", "i2c5b", "i2c5c"};
+static const char * const i2c6_groups[] = {"i2c6"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0", "uart0b"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	"port110", "port111", "port112", "port113",
+	"port114", "port115", "port116", "port117",
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	/* port150-174 missing */
+	/* none */ "port175", "port176", "port177",
+	"port180", "port181", "port182", "port183",
+	"port184", "port185", "port186", "port187",
+	"port190", "port191", "port192", "port193",
+	"port194", "port195", "port196", "port197",
+	"port200", "port201", "port202", "port203",
+	"port204", "port205", "port206", "port207",
+	"port210", "port211", "port212", "port213",
+	"port214", "port215", "port216", "port217",
+	"port220", "port221", "port222", "port223",
+	"port224", "port225", "port226", "port227",
+	"port230", "port231", "port232", "port233",
+	"port234", "port235", "port236", "port237",
+	"port240", "port241", "port242", "port243",
+	"port244", "port245", "port246", "port247",
+	"port250", "port251", "port252", "port253",
+	"port254", "port255", "port256", "port257",
+	"port260", "port261", "port262", "port263",
+	"port264", "port265", "port266", "port267",
+	"port270", "port271", "port272", "port273",
+	"port274", "port275", "port276", "port277",
+	"port280", "port281", "port282", "port283",
+	"port284", "port285", "port286", "port287",
+	"port290", "port291", "port292", "port293",
+	"port294", "port295", "port296", "port297",
+	"port300", "port301", "port302", "port303",
+	"port304", "port305", "port306", "port307",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", "xirq13", "xirq14", "xirq15",
+	"xirq16", "xirq17", "xirq18", "xirq19",
+	"xirq20",
+	"xirq3b", "xirq4b", "xirq16b", "xirq17b", "xirq17c",
+	"xirq18b", "xirq18c", "xirq19b", "xirq20b",
+};
+
+static const struct uniphier_pinmux_function ph1_pro5_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(i2c5),
+	UNIPHIER_PINMUX_FUNCTION(i2c6),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_pro5_pindata = {
+	.groups = ph1_pro5_groups,
+	.groups_count = ARRAY_SIZE(ph1_pro5_groups),
+	.functions = ph1_pro5_functions,
+	.functions_count = ARRAY_SIZE(ph1_pro5_functions),
+	.mux_bits = 4,
+	.reg_stride = 8,
+	.load_pinctrl = true,
+};
+
+static struct pinctrl_desc ph1_pro5_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_pro5_pins,
+	.npins = ARRAY_SIZE(ph1_pro5_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_pro5_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_pro5_pinctrl_desc,
+				      &ph1_pro5_pindata);
+}
+
+static const struct of_device_id ph1_pro5_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-pro5-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_pro5_pinctrl_match);
+
+static struct platform_driver ph1_pro5_pinctrl_driver = {
+	.probe = ph1_pro5_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_pro5_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_pro5_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-Pro5 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
new file mode 100644
index 0000000..7e9dae5
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c
@@ -0,0 +1,794 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "ph1-sld8-pinctrl"
+
+static const struct pinctrl_pin_desc ph1_sld8_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "PCA00", UNIPHIER_PIN_IECTRL_NONE,
+			     15, UNIPHIER_PIN_DRV_4_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "PCA01", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_4_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "PCA02", UNIPHIER_PIN_IECTRL_NONE,
+			     17, UNIPHIER_PIN_DRV_4_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "PCA03", UNIPHIER_PIN_IECTRL_NONE,
+			     18, UNIPHIER_PIN_DRV_4_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "PCA04", UNIPHIER_PIN_IECTRL_NONE,
+			     19, UNIPHIER_PIN_DRV_4_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "PCA05", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_4_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "PCA06", UNIPHIER_PIN_IECTRL_NONE,
+			     21, UNIPHIER_PIN_DRV_4_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "PCA07", UNIPHIER_PIN_IECTRL_NONE,
+			     22, UNIPHIER_PIN_DRV_4_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "PCA08", UNIPHIER_PIN_IECTRL_NONE,
+			     23, UNIPHIER_PIN_DRV_4_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "PCA09", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_4_8,
+			     24, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "PCA10", UNIPHIER_PIN_IECTRL_NONE,
+			     25, UNIPHIER_PIN_DRV_4_8,
+			     25, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "PCA11", UNIPHIER_PIN_IECTRL_NONE,
+			     26, UNIPHIER_PIN_DRV_4_8,
+			     26, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "PCA12", UNIPHIER_PIN_IECTRL_NONE,
+			     27, UNIPHIER_PIN_DRV_4_8,
+			     27, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(13, "PCA13", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_4_8,
+			     28, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "PCA14", UNIPHIER_PIN_IECTRL_NONE,
+			     29, UNIPHIER_PIN_DRV_4_8,
+			     29, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(16, "XNFWE_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(17, "NFALE_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(18, "NFCLE_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(19, "XNFWP_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(20, "XNFCE0_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(21, "NANDRYBY0_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(22, "XNFCE1_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     119, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(23, "NANDRYBY1_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     120, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(24, "NFD0_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     121, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(25, "NFD1_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     122, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(26, "NFD2_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_8_12_16_20,
+			     123, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(27, "NFD3_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_8_12_16_20,
+			     124, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(28, "NFD4_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     24, UNIPHIER_PIN_DRV_8_12_16_20,
+			     125, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(29, "NFD5_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     28, UNIPHIER_PIN_DRV_8_12_16_20,
+			     126, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(30, "NFD6_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_8_12_16_20,
+			     127, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_8_12_16_20,
+			     128, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(33, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(34, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(35, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(36, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     56, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(37, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(38, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     129, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(39, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(40, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(42, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(44, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(45, "PCRESET", UNIPHIER_PIN_IECTRL_NONE,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(46, "PCREG", UNIPHIER_PIN_IECTRL_NONE,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(47, "PCCE2", UNIPHIER_PIN_IECTRL_NONE,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(48, "PCVS1", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(49, "PCCD2", UNIPHIER_PIN_IECTRL_NONE,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(50, "PCCD1", UNIPHIER_PIN_IECTRL_NONE,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(51, "PCREADY", UNIPHIER_PIN_IECTRL_NONE,
+			     47, UNIPHIER_PIN_DRV_4_8,
+			     47, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(52, "PCDOE", UNIPHIER_PIN_IECTRL_NONE,
+			     48, UNIPHIER_PIN_DRV_4_8,
+			     48, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(53, "PCCE1", UNIPHIER_PIN_IECTRL_NONE,
+			     49, UNIPHIER_PIN_DRV_4_8,
+			     49, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(54, "PCWE", UNIPHIER_PIN_IECTRL_NONE,
+			     50, UNIPHIER_PIN_DRV_4_8,
+			     50, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(55, "PCOE", UNIPHIER_PIN_IECTRL_NONE,
+			     51, UNIPHIER_PIN_DRV_4_8,
+			     51, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "PCWAIT", UNIPHIER_PIN_IECTRL_NONE,
+			     52, UNIPHIER_PIN_DRV_4_8,
+			     52, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "PCIOWR", UNIPHIER_PIN_IECTRL_NONE,
+			     53, UNIPHIER_PIN_DRV_4_8,
+			     53, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "PCIORD", UNIPHIER_PIN_IECTRL_NONE,
+			     54, UNIPHIER_PIN_DRV_4_8,
+			     54, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     55, UNIPHIER_PIN_DRV_4_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     56, UNIPHIER_PIN_DRV_4_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     57, UNIPHIER_PIN_DRV_4_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     58, UNIPHIER_PIN_DRV_4_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     59, UNIPHIER_PIN_DRV_4_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     60, UNIPHIER_PIN_DRV_4_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", UNIPHIER_PIN_IECTRL_NONE,
+			     61, UNIPHIER_PIN_DRV_4_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", UNIPHIER_PIN_IECTRL_NONE,
+			     62, UNIPHIER_PIN_DRV_4_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+			     63, UNIPHIER_PIN_DRV_4_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", UNIPHIER_PIN_IECTRL_NONE,
+			     64, UNIPHIER_PIN_DRV_4_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+			     65, UNIPHIER_PIN_DRV_4_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+			     66, UNIPHIER_PIN_DRV_4_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", UNIPHIER_PIN_IECTRL_NONE,
+			     67, UNIPHIER_PIN_DRV_4_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", UNIPHIER_PIN_IECTRL_NONE,
+			     68, UNIPHIER_PIN_DRV_4_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", UNIPHIER_PIN_IECTRL_NONE,
+			     69, UNIPHIER_PIN_DRV_4_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", UNIPHIER_PIN_IECTRL_NONE,
+			     70, UNIPHIER_PIN_DRV_4_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", UNIPHIER_PIN_IECTRL_NONE,
+			     71, UNIPHIER_PIN_DRV_4_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", UNIPHIER_PIN_IECTRL_NONE,
+			     72, UNIPHIER_PIN_DRV_4_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", UNIPHIER_PIN_IECTRL_NONE,
+			     73, UNIPHIER_PIN_DRV_4_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     74, UNIPHIER_PIN_DRV_4_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     75, UNIPHIER_PIN_DRV_4_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     76, UNIPHIER_PIN_DRV_4_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE,
+			     77, UNIPHIER_PIN_DRV_4_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE,
+			     78, UNIPHIER_PIN_DRV_4_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE,
+			     79, UNIPHIER_PIN_DRV_4_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE,
+			     80, UNIPHIER_PIN_DRV_4_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE,
+			     81, UNIPHIER_PIN_DRV_4_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE,
+			     82, UNIPHIER_PIN_DRV_4_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE,
+			     83, UNIPHIER_PIN_DRV_4_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE,
+			     84, UNIPHIER_PIN_DRV_4_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE,
+			     85, UNIPHIER_PIN_DRV_4_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE,
+			     86, UNIPHIER_PIN_DRV_4_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE,
+			     87, UNIPHIER_PIN_DRV_4_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "AGCI", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "AGCR", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "AGCBS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "IECOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     88, UNIPHIER_PIN_DRV_4_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "ASMCK", UNIPHIER_PIN_IECTRL_NONE,
+			     89, UNIPHIER_PIN_DRV_4_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE,
+			     90, UNIPHIER_PIN_DRV_4_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "ALRCKO", UNIPHIER_PIN_IECTRL_NONE,
+			     91, UNIPHIER_PIN_DRV_4_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "ASDOUT0", UNIPHIER_PIN_IECTRL_NONE,
+			     92, UNIPHIER_PIN_DRV_4_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE,
+			     93, UNIPHIER_PIN_DRV_4_8,
+			     93, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(101, "ARCOUT", UNIPHIER_PIN_IECTRL_NONE,
+			     94, UNIPHIER_PIN_DRV_4_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE,
+			     95, UNIPHIER_PIN_DRV_4_8,
+			     95, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE,
+			     96, UNIPHIER_PIN_DRV_4_8,
+			     96, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(112, "SBO1", UNIPHIER_PIN_IECTRL_NONE,
+			     97, UNIPHIER_PIN_DRV_4_8,
+			     97, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(113, "SBI1", UNIPHIER_PIN_IECTRL_NONE,
+			     98, UNIPHIER_PIN_DRV_4_8,
+			     98, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(114, "TXD1", UNIPHIER_PIN_IECTRL_NONE,
+			     99, UNIPHIER_PIN_DRV_4_8,
+			     99, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "RXD1", UNIPHIER_PIN_IECTRL_NONE,
+			     100, UNIPHIER_PIN_DRV_4_8,
+			     100, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "HIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(117, "VIN", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_5,
+			     -1, UNIPHIER_PIN_PULL_NONE),
+	UNIPHIER_PINCTRL_PIN(118, "TCON0", UNIPHIER_PIN_IECTRL_NONE,
+			     101, UNIPHIER_PIN_DRV_4_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(119, "TCON1", UNIPHIER_PIN_IECTRL_NONE,
+			     102, UNIPHIER_PIN_DRV_4_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(120, "TCON2", UNIPHIER_PIN_IECTRL_NONE,
+			     103, UNIPHIER_PIN_DRV_4_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "TCON3", UNIPHIER_PIN_IECTRL_NONE,
+			     104, UNIPHIER_PIN_DRV_4_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "TCON4", UNIPHIER_PIN_IECTRL_NONE,
+			     105, UNIPHIER_PIN_DRV_4_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "TCON5", UNIPHIER_PIN_IECTRL_NONE,
+			     106, UNIPHIER_PIN_DRV_4_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "TCON6", UNIPHIER_PIN_IECTRL_NONE,
+			     107, UNIPHIER_PIN_DRV_4_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "TCON7", UNIPHIER_PIN_IECTRL_NONE,
+			     108, UNIPHIER_PIN_DRV_4_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(126, "TCON8", UNIPHIER_PIN_IECTRL_NONE,
+			     109, UNIPHIER_PIN_DRV_4_8,
+			     109, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "PWMA", UNIPHIER_PIN_IECTRL_NONE,
+			     110, UNIPHIER_PIN_DRV_4_8,
+			     110, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(128, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE,
+			     111, UNIPHIER_PIN_DRV_4_8,
+			     111, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE,
+			     112, UNIPHIER_PIN_DRV_4_8,
+			     112, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(130, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE,
+			     117, UNIPHIER_PIN_DRV_4_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE,
+			     118, UNIPHIER_PIN_DRV_4_8,
+			     118, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {21, 22, 23, 24, 25, 26, 27};
+static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1};
+static const unsigned emmc_dat8_pins[] = {28, 29, 30, 31};
+static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1};
+static const unsigned i2c0_pins[] = {102, 103};
+static const unsigned i2c0_muxvals[] = {0, 0};
+static const unsigned i2c1_pins[] = {104, 105};
+static const unsigned i2c1_muxvals[] = {0, 0};
+static const unsigned i2c2_pins[] = {108, 109};
+static const unsigned i2c2_muxvals[] = {2, 2};
+static const unsigned i2c3_pins[] = {108, 109};
+static const unsigned i2c3_muxvals[] = {3, 3};
+static const unsigned nand_pins[] = {15, 16, 17, 18, 19, 20, 21, 24, 25, 26,
+				     27, 28, 29, 30, 31};
+static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+					0, 0};
+static const unsigned nand_cs1_pins[] = {22, 23};
+static const unsigned nand_cs1_muxvals[] = {0, 0};
+static const unsigned uart0_pins[] = {70, 71};
+static const unsigned uart0_muxvals[] = {3, 3};
+static const unsigned uart1_pins[] = {114, 115};
+static const unsigned uart1_muxvals[] = {0, 0};
+static const unsigned uart2_pins[] = {112, 113};
+static const unsigned uart2_muxvals[] = {1, 1};
+static const unsigned uart3_pins[] = {110, 111};
+static const unsigned uart3_muxvals[] = {1, 1};
+static const unsigned usb0_pins[] = {41, 42};
+static const unsigned usb0_muxvals[] = {0, 0};
+static const unsigned usb1_pins[] = {43, 44};
+static const unsigned usb1_muxvals[] = {0, 0};
+static const unsigned usb2_pins[] = {114, 115};
+static const unsigned usb2_muxvals[] = {1, 1};
+static const unsigned port_range0_pins[] = {
+	0, 1, 2, 3, 4, 5, 6, 7,				/* PORT0x */
+	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT1x */
+	32, 33, 34, 35, 36, 37, 38, 39,			/* PORT2x */
+	59, 60, 61, 62, 63, 64, 65, 66,			/* PORT3x */
+	95, 96, 97, 98, 99, 100, 101, 57,		/* PORT4x */
+	70, 71, 72, 73, 74, 75, 76, 77,			/* PORT5x */
+	81, 83, 84, 85, 86, 89, 90, 91,			/* PORT6x */
+	118, 119, 120, 121, 122, 53, 54, 55,		/* PORT7x */
+	41, 42, 43, 44, 79, 80, 18, 19,			/* PORT8x */
+	110, 111, 112, 113, 114, 115, 16, 17,		/* PORT9x */
+	40, 67, 68, 69, 78, 92, 93, 94,			/* PORT10x */
+	48, 49, 46, 45, 123, 124, 125, 126,		/* PORT11x */
+	47, 127, 20, 56, 22,				/* PORT120-124 */
+};
+static const unsigned port_range0_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT11x */
+	15, 15, 15, 15, 15,				/* PORT120-124 */
+};
+static const unsigned port_range1_pins[] = {
+	116, 117,					/* PORT130-131 */
+};
+static const unsigned port_range1_muxvals[] = {
+	15, 15,						/* PORT130-131 */
+};
+static const unsigned port_range2_pins[] = {
+	102, 103, 104, 105, 106, 107, 108, 109,		/* PORT14x */
+};
+static const unsigned port_range2_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+};
+static const unsigned port_range3_pins[] = {
+	23,						/* PORT166 */
+};
+static const unsigned port_range3_muxvals[] = {
+	15,						/* PORT166 */
+};
+static const unsigned xirq_range0_pins[] = {
+	128, 129, 130, 131, 132, 133, 134, 135,		/* XIRQ0-7 */
+	82, 87, 88, 50, 51,				/* XIRQ8-12 */
+};
+static const unsigned xirq_range0_muxvals[] = {
+	0, 0, 0, 0, 0, 0, 0, 0,				/* XIRQ0-7 */
+	14, 14, 14, 14, 14,				/* XIRQ8-12 */
+};
+static const unsigned xirq_range1_pins[] = {
+	52, 58,						/* XIRQ14-15 */
+};
+static const unsigned xirq_range1_muxvals[] = {
+	14, 14,						/* XIRQ14-15 */
+};
+
+static const struct uniphier_pinctrl_group ph1_sld8_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range2),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range3),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq_range1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port110, port_range0, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port111, port_range0, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port112, port_range0, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port113, port_range0, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port114, port_range0, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port115, port_range0, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port116, port_range0, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port117, port_range0, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range0, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range0, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range0, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range0, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range0, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range2, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range2, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range2, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range2, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range2, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range2, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range2, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range2, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port166, port_range3, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq_range1, 1),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	"port110", "port111", "port112", "port113",
+	"port114", "port115", "port116", "port117",
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	/* port150-164 missing */
+	/* none */ "port165",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", /* none*/ "xirq14", "xirq15",
+};
+
+static const struct uniphier_pinmux_function ph1_sld8_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata ph1_sld8_pindata = {
+	.groups = ph1_sld8_groups,
+	.groups_count = ARRAY_SIZE(ph1_sld8_groups),
+	.functions = ph1_sld8_functions,
+	.functions_count = ARRAY_SIZE(ph1_sld8_functions),
+	.mux_bits = 8,
+	.reg_stride = 4,
+	.load_pinctrl = false,
+};
+
+static struct pinctrl_desc ph1_sld8_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = ph1_sld8_pins,
+	.npins = ARRAY_SIZE(ph1_sld8_pins),
+	.owner = THIS_MODULE,
+};
+
+static int ph1_sld8_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &ph1_sld8_pinctrl_desc,
+				      &ph1_sld8_pindata);
+}
+
+static const struct of_device_id ph1_sld8_pinctrl_match[] = {
+	{ .compatible = "socionext,ph1-sld8-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ph1_sld8_pinctrl_match);
+
+static struct platform_driver ph1_sld8_pinctrl_driver = {
+	.probe = ph1_sld8_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = ph1_sld8_pinctrl_match,
+	},
+};
+module_platform_driver(ph1_sld8_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PH1-sLD8 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-proxstream2.c b/drivers/pinctrl/uniphier/pinctrl-proxstream2.c
new file mode 100644
index 0000000..3f036e2
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-proxstream2.c
@@ -0,0 +1,1269 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-uniphier.h"
+
+#define DRIVER_NAME "proxstream2-pinctrl"
+
+static const struct pinctrl_pin_desc proxstream2_pins[] = {
+	UNIPHIER_PINCTRL_PIN(0, "ED0", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_4_8,
+			     0, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(1, "ED1", UNIPHIER_PIN_IECTRL_NONE,
+			     1, UNIPHIER_PIN_DRV_4_8,
+			     1, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(2, "ED2", UNIPHIER_PIN_IECTRL_NONE,
+			     2, UNIPHIER_PIN_DRV_4_8,
+			     2, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(3, "ED3", UNIPHIER_PIN_IECTRL_NONE,
+			     3, UNIPHIER_PIN_DRV_4_8,
+			     3, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(4, "ED4", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_4_8,
+			     4, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(5, "ED5", UNIPHIER_PIN_IECTRL_NONE,
+			     5, UNIPHIER_PIN_DRV_4_8,
+			     5, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(6, "ED6", UNIPHIER_PIN_IECTRL_NONE,
+			     6, UNIPHIER_PIN_DRV_4_8,
+			     6, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(7, "ED7", UNIPHIER_PIN_IECTRL_NONE,
+			     7, UNIPHIER_PIN_DRV_4_8,
+			     7, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(8, "XERWE0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_4_8,
+			     8, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(9, "XERWE1", UNIPHIER_PIN_IECTRL_NONE,
+			     9, UNIPHIER_PIN_DRV_4_8,
+			     9, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(10, "ERXW", UNIPHIER_PIN_IECTRL_NONE,
+			     10, UNIPHIER_PIN_DRV_4_8,
+			     10, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(11, "ES0", UNIPHIER_PIN_IECTRL_NONE,
+			     11, UNIPHIER_PIN_DRV_4_8,
+			     11, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(12, "ES1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_4_8,
+			     12, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(13, "ES2", UNIPHIER_PIN_IECTRL_NONE,
+			     13, UNIPHIER_PIN_DRV_4_8,
+			     13, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(14, "XECS1", UNIPHIER_PIN_IECTRL_NONE,
+			     14, UNIPHIER_PIN_DRV_4_8,
+			     14, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(15, "SMTRST0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     15, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(16, "SMTCMD0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     16, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(17, "SMTD0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     17, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(18, "SMTSEL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     18, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(19, "SMTCLK0CG", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     19, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(20, "SMTDET0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     20, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(21, "SMTRST1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     21, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(22, "SMTCMD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     22, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(23, "SMTD1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     23, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(24, "SMTSEL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     24, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(25, "SMTCLK1CG", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     25, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(26, "SMTDET1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     26, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(27, "XIRQ18", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     27, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(28, "XIRQ19", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     28, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(29, "XIRQ20", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     29, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(30, "XNFRE", UNIPHIER_PIN_IECTRL_NONE,
+			     30, UNIPHIER_PIN_DRV_4_8,
+			     30, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(31, "XNFWE", UNIPHIER_PIN_IECTRL_NONE,
+			     31, UNIPHIER_PIN_DRV_4_8,
+			     31, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(32, "NFALE", UNIPHIER_PIN_IECTRL_NONE,
+			     32, UNIPHIER_PIN_DRV_4_8,
+			     32, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(33, "NFCLE", UNIPHIER_PIN_IECTRL_NONE,
+			     33, UNIPHIER_PIN_DRV_4_8,
+			     33, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(34, "XNFWP", UNIPHIER_PIN_IECTRL_NONE,
+			     34, UNIPHIER_PIN_DRV_4_8,
+			     34, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(35, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE,
+			     35, UNIPHIER_PIN_DRV_4_8,
+			     35, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(36, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE,
+			     36, UNIPHIER_PIN_DRV_4_8,
+			     36, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(37, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE,
+			     37, UNIPHIER_PIN_DRV_4_8,
+			     37, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(38, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE,
+			     38, UNIPHIER_PIN_DRV_4_8,
+			     38, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(39, "NFD0", UNIPHIER_PIN_IECTRL_NONE,
+			     39, UNIPHIER_PIN_DRV_4_8,
+			     39, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(40, "NFD1", UNIPHIER_PIN_IECTRL_NONE,
+			     40, UNIPHIER_PIN_DRV_4_8,
+			     40, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(41, "NFD2", UNIPHIER_PIN_IECTRL_NONE,
+			     41, UNIPHIER_PIN_DRV_4_8,
+			     41, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(42, "NFD3", UNIPHIER_PIN_IECTRL_NONE,
+			     42, UNIPHIER_PIN_DRV_4_8,
+			     42, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(43, "NFD4", UNIPHIER_PIN_IECTRL_NONE,
+			     43, UNIPHIER_PIN_DRV_4_8,
+			     43, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(44, "NFD5", UNIPHIER_PIN_IECTRL_NONE,
+			     44, UNIPHIER_PIN_DRV_4_8,
+			     44, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(45, "NFD6", UNIPHIER_PIN_IECTRL_NONE,
+			     45, UNIPHIER_PIN_DRV_4_8,
+			     45, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(46, "NFD7", UNIPHIER_PIN_IECTRL_NONE,
+			     46, UNIPHIER_PIN_DRV_4_8,
+			     46, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(47, "SDCLK", UNIPHIER_PIN_IECTRL_NONE,
+			     0, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(48, "SDCMD", UNIPHIER_PIN_IECTRL_NONE,
+			     4, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(49, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE,
+			     8, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(50, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE,
+			     12, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(51, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE,
+			     16, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(52, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE,
+			     20, UNIPHIER_PIN_DRV_8_12_16_20,
+			     -1, UNIPHIER_PIN_PULL_UP_FIXED),
+	UNIPHIER_PINCTRL_PIN(53, "SDCD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     53, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(54, "SDWP", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     54, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(55, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     55, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(56, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     56, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(57, "USB0OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     57, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(58, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     58, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(59, "USB1OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     59, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(60, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     60, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(61, "USB2OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     61, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(62, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     62, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(63, "USB3OD", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     63, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(64, "CH0CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     64, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(65, "CH0PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     65, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(66, "CH0VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     66, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(67, "CH0DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     67, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(68, "CH1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     68, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(69, "CH1PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     69, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(70, "CH1VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     70, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(71, "CH1DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     71, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(72, "XIRQ9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     72, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(73, "XIRQ10", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     73, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(74, "XIRQ16", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     74, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(75, "CH4CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     75, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(76, "CH4PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     76, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(77, "CH4VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     77, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(78, "CH4DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     78, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(79, "CH5CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     79, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(80, "CH5PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     80, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(81, "CH5VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     81, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(82, "CH5DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     82, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(83, "CH6CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     83, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(84, "CH6PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     84, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(85, "CH6VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     85, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(86, "CH6DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     86, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(87, "STS0CLKO", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     87, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(88, "STS0SYNCO", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     88, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(89, "STS0VALO", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     89, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(90, "STS0DATAO", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     90, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(91, "XIRQ17", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     91, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(92, "PORT163", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     92, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(93, "PORT165", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     93, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(94, "PORT166", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     94, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(95, "PORT132", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     95, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(96, "PORT133", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     96, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(97, "AO2IEC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     97, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(98, "AI2ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     98, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(99, "AI2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     99, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(100, "AI2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     100, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(101, "AI2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     101, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(102, "AI2D1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     102, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(103, "AI2D2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     103, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(104, "AI2D3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     104, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(105, "AO3DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     105, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(106, "AO3BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     106, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(107, "AO3LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     107, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(108, "AO3DMIX", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     108, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(109, "SDA0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     109, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(110, "SCL0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     110, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(111, "SDA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     111, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(112, "SCL1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     112, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(113, "TXD2", 0,
+			     113, UNIPHIER_PIN_DRV_4_8,
+			     113, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(114, "RXD2", 0,
+			     114, UNIPHIER_PIN_DRV_4_8,
+			     114, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(115, "TXD1", 0,
+			     115, UNIPHIER_PIN_DRV_4_8,
+			     115, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(116, "RXD1", 0,
+			     116, UNIPHIER_PIN_DRV_4_8,
+			     116, UNIPHIER_PIN_PULL_UP),
+	UNIPHIER_PINCTRL_PIN(117, "PORT190", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     117, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(118, "VI1HSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     118, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(119, "VI1VSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     119, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(120, "VI1DE", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     120, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(121, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     121, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(122, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     122, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(123, "VI1G2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     123, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(124, "VI1G3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     124, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(125, "VI1G4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     125, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(126, "VI1G5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     126, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(127, "VI1G6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     127, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(128, "VI1G7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     128, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(129, "VI1G8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     129, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(130, "VI1G9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     130, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(131, "VI1CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     131, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(132, "PORT05", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     132, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(133, "PORT06", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     133, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(134, "VI1R2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     134, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(135, "VI1R3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     135, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(136, "VI1R4", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     136, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(137, "VI1R5", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     137, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(138, "VI1R6", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     138, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(139, "VI1R7", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     139, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(140, "VI1R8", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     140, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(141, "VI1R9", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     141, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(142, "LPST", UNIPHIER_PIN_IECTRL_NONE,
+			     142, UNIPHIER_PIN_DRV_4_8,
+			     142, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(143, "MDC", 0,
+			     143, UNIPHIER_PIN_DRV_4_8,
+			     143, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(144, "MDIO", 0,
+			     144, UNIPHIER_PIN_DRV_4_8,
+			     144, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(145, "MDIO_INTL", 0,
+			     145, UNIPHIER_PIN_DRV_4_8,
+			     145, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(146, "PHYRSTL", 0,
+			     146, UNIPHIER_PIN_DRV_4_8,
+			     146, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(147, "RGMII_RXCLK", 0,
+			     147, UNIPHIER_PIN_DRV_4_8,
+			     147, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(148, "RGMII_RXD0", 0,
+			     148, UNIPHIER_PIN_DRV_4_8,
+			     148, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(149, "RGMII_RXD1", 0,
+			     149, UNIPHIER_PIN_DRV_4_8,
+			     149, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(150, "RGMII_RXD2", 0,
+			     150, UNIPHIER_PIN_DRV_4_8,
+			     150, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(151, "RGMII_RXD3", 0,
+			     151, UNIPHIER_PIN_DRV_4_8,
+			     151, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(152, "RGMII_RXCTL", 0,
+			     152, UNIPHIER_PIN_DRV_4_8,
+			     152, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(153, "RGMII_TXCLK", 0,
+			     153, UNIPHIER_PIN_DRV_4_8,
+			     153, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(154, "RGMII_TXD0", 0,
+			     154, UNIPHIER_PIN_DRV_4_8,
+			     154, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(155, "RGMII_TXD1", 0,
+			     155, UNIPHIER_PIN_DRV_4_8,
+			     155, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(156, "RGMII_TXD2", 0,
+			     156, UNIPHIER_PIN_DRV_4_8,
+			     156, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(157, "RGMII_TXD3", 0,
+			     157, UNIPHIER_PIN_DRV_4_8,
+			     157, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(158, "RGMII_TXCTL", 0,
+			     158, UNIPHIER_PIN_DRV_4_8,
+			     158, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(159, "SDA3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     159, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(160, "SCL3", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     160, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(161, "AI1ADCCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     161, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(162, "AI1BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     162, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(163, "CH2CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     163, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(164, "CH2PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     164, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(165, "CH2VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     165, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(166, "CH2DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     166, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(167, "CH3CLK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     167, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(168, "CH3PSYNC", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     168, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(169, "CH3VAL", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     169, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(170, "CH3DATA", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     170, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(171, "SDA2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     171, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(172, "SCL2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     172, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(173, "AI1LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     173, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(174, "AI1D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     174, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(175, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     175, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(176, "AO2D0", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     176, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(177, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     177, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(178, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     178, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(179, "PORT222", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     179, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(180, "PORT223", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     180, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(181, "PORT224", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     181, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(182, "PORT225", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     182, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(183, "PORT226", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     183, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(184, "PORT227", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     184, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(185, "PORT230", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     185, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(186, "FANPWM", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     186, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(187, "HRDDCSDA0", 0,
+			     187, UNIPHIER_PIN_DRV_4_8,
+			     187, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(188, "HRDDCSCL0", 0,
+			     188, UNIPHIER_PIN_DRV_4_8,
+			     188, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(189, "HRDDCSDA1", 0,
+			     189, UNIPHIER_PIN_DRV_4_8,
+			     189, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(190, "HRDDCSCL1", 0,
+			     190, UNIPHIER_PIN_DRV_4_8,
+			     190, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(191, "HTDDCSDA0", 0,
+			     191, UNIPHIER_PIN_DRV_4_8,
+			     191, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(192, "HTDDCSCL0", 0,
+			     192, UNIPHIER_PIN_DRV_4_8,
+			     192, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(193, "HTDDCSDA1", 0,
+			     193, UNIPHIER_PIN_DRV_4_8,
+			     193, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(194, "HTDDCSCL1", 0,
+			     194, UNIPHIER_PIN_DRV_4_8,
+			     194, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(195, "PORT241", 0,
+			     195, UNIPHIER_PIN_DRV_4_8,
+			     195, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(196, "PORT242", 0,
+			     196, UNIPHIER_PIN_DRV_4_8,
+			     196, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(197, "PORT243", 0,
+			     197, UNIPHIER_PIN_DRV_4_8,
+			     197, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(198, "MVSYNC", 0,
+			     198, UNIPHIER_PIN_DRV_4_8,
+			     198, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(199, "SPISYNC0", UNIPHIER_PIN_IECTRL_NONE,
+			     199, UNIPHIER_PIN_DRV_4_8,
+			     199, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(200, "SPISCLK0", UNIPHIER_PIN_IECTRL_NONE,
+			     200, UNIPHIER_PIN_DRV_4_8,
+			     200, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(201, "SPITXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     201, UNIPHIER_PIN_DRV_4_8,
+			     201, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(202, "SPIRXD0", UNIPHIER_PIN_IECTRL_NONE,
+			     202, UNIPHIER_PIN_DRV_4_8,
+			     202, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(203, "CK54EXI", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     203, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(204, "AEXCKA1", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     204, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(205, "AEXCKA2", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     205, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(206, "CK27EXI", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_8,
+			     206, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(207, "STCDIN", 0,
+			     207, UNIPHIER_PIN_DRV_4_8,
+			     207, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(208, "PHSYNI", 0,
+			     208, UNIPHIER_PIN_DRV_4_8,
+			     208, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(209, "PVSYNI", 0,
+			     209, UNIPHIER_PIN_DRV_4_8,
+			     209, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(210, "MVSYN", UNIPHIER_PIN_IECTRL_NONE,
+			     210, UNIPHIER_PIN_DRV_4_8,
+			     210, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(211, "STCV", UNIPHIER_PIN_IECTRL_NONE,
+			     211, UNIPHIER_PIN_DRV_4_8,
+			     211, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(212, "PORT262", UNIPHIER_PIN_IECTRL_NONE,
+			     212, UNIPHIER_PIN_DRV_4_8,
+			     212, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(213, "USB0VBUS_IRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     213, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(214, "USB1VBUS_IRQ", UNIPHIER_PIN_IECTRL_NONE,
+			     -1, UNIPHIER_PIN_DRV_FIXED_4,
+			     214, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(215, "PORT265", UNIPHIER_PIN_IECTRL_NONE,
+			     215, UNIPHIER_PIN_DRV_4_8,
+			     215, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(216, "CK25O", 0,
+			     216, UNIPHIER_PIN_DRV_4_8,
+			     216, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(217, "TXD0", 0,
+			     217, UNIPHIER_PIN_DRV_4_8,
+			     217, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(218, "RXD0", 0,
+			     218, UNIPHIER_PIN_DRV_4_8,
+			     218, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(219, "TXD3", 0,
+			     219, UNIPHIER_PIN_DRV_4_8,
+			     219, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(220, "RXD3", 0,
+			     220, UNIPHIER_PIN_DRV_4_8,
+			     220, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(221, "PORT273", 0,
+			     221, UNIPHIER_PIN_DRV_4_8,
+			     221, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(222, "STCDOUTC", 0,
+			     222, UNIPHIER_PIN_DRV_4_8,
+			     222, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(223, "PORT274", 0,
+			     223, UNIPHIER_PIN_DRV_4_8,
+			     223, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(224, "PORT275", 0,
+			     224, UNIPHIER_PIN_DRV_4_8,
+			     224, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(225, "PORT276", 0,
+			     225, UNIPHIER_PIN_DRV_4_8,
+			     225, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(226, "PORT277", 0,
+			     226, UNIPHIER_PIN_DRV_4_8,
+			     226, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(227, "PORT280", 0,
+			     227, UNIPHIER_PIN_DRV_4_8,
+			     227, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(228, "PORT281", 0,
+			     228, UNIPHIER_PIN_DRV_4_8,
+			     228, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(229, "PORT282", 0,
+			     229, UNIPHIER_PIN_DRV_4_8,
+			     229, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(230, "PORT283", 0,
+			     230, UNIPHIER_PIN_DRV_4_8,
+			     230, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(231, "PORT284", 0,
+			     231, UNIPHIER_PIN_DRV_4_8,
+			     231, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(232, "PORT285", 0,
+			     232, UNIPHIER_PIN_DRV_4_8,
+			     232, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(233, "T0HPD", 0,
+			     233, UNIPHIER_PIN_DRV_4_8,
+			     233, UNIPHIER_PIN_PULL_DOWN),
+	UNIPHIER_PINCTRL_PIN(234, "T1HPD", 0,
+			     234, UNIPHIER_PIN_DRV_4_8,
+			     234, UNIPHIER_PIN_PULL_DOWN),
+};
+
+static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42};
+static const unsigned emmc_muxvals[] = {9, 9, 9, 9, 9, 9, 9};
+static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46};
+static const unsigned emmc_dat8_muxvals[] = {9, 9, 9, 9};
+static const unsigned i2c0_pins[] = {109, 110};
+static const unsigned i2c0_muxvals[] = {8, 8};
+static const unsigned i2c1_pins[] = {111, 112};
+static const unsigned i2c1_muxvals[] = {8, 8};
+static const unsigned i2c2_pins[] = {171, 172};
+static const unsigned i2c2_muxvals[] = {8, 8};
+static const unsigned i2c3_pins[] = {159, 160};
+static const unsigned i2c3_muxvals[] = {8, 8};
+static const unsigned i2c5_pins[] = {183, 184};
+static const unsigned i2c5_muxvals[] = {11, 11};
+static const unsigned i2c6_pins[] = {185, 186};
+static const unsigned i2c6_muxvals[] = {11, 11};
+static const unsigned nand_pins[] = {30, 31, 32, 33, 34, 35, 36, 39, 40, 41,
+				     42, 43, 44, 45, 46};
+static const unsigned nand_muxvals[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+					8, 8};
+static const unsigned nand_cs1_pins[] = {37, 38};
+static const unsigned nand_cs1_muxvals[] = {8, 8};
+static const unsigned uart0_pins[] = {217, 218};
+static const unsigned uart0_muxvals[] = {8, 8};
+static const unsigned uart0b_pins[] = {179, 180};
+static const unsigned uart0b_muxvals[] = {10, 10};
+static const unsigned uart1_pins[] = {115, 116};
+static const unsigned uart1_muxvals[] = {8, 8};
+static const unsigned uart2_pins[] = {113, 114};
+static const unsigned uart2_muxvals[] = {8, 8};
+static const unsigned uart3_pins[] = {219, 220};
+static const unsigned uart3_muxvals[] = {8, 8};
+static const unsigned uart3b_pins[] = {181, 182};
+static const unsigned uart3b_muxvals[] = {10, 10};
+static const unsigned usb0_pins[] = {56, 57};
+static const unsigned usb0_muxvals[] = {8, 8};
+static const unsigned usb1_pins[] = {58, 59};
+static const unsigned usb1_muxvals[] = {8, 8};
+static const unsigned usb2_pins[] = {60, 61};
+static const unsigned usb2_muxvals[] = {8, 8};
+static const unsigned usb3_pins[] = {62, 63};
+static const unsigned usb3_muxvals[] = {8, 8};
+static const unsigned port_range0_pins[] = {
+	127, 128, 129, 130, 131, 132, 133, 134,		/* PORT0x */
+	135, 136, 137, 138, 139, 140, 141, 142,		/* PORT1x */
+	0, 1, 2, 3, 4, 5, 6, 7,				/* PORT2x */
+	8, 9, 10, 11, 12, 13, 14, 15,			/* PORT3x */
+	16, 17, 18, 19, 21, 22, 23, 24,			/* PORT4x */
+	25, 30, 31, 32, 33, 34, 35, 36,			/* PORT5x */
+	37, 38, 39, 40, 41, 42, 43, 44,			/* PORT6x */
+	45, 46, 47, 48, 49, 50, 51, 52,			/* PORT7x */
+	53, 54, 55, 56, 57, 58, 59, 60,			/* PORT8x */
+	61, 62, 63, 64, 65, 66, 67, 68,			/* PORT9x */
+	69, 70, 71, 76, 77, 78, 79, 80,			/* PORT10x */
+};
+static const unsigned port_range0_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT0x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT1x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT2x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT3x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT4x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT5x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT6x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT7x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT8x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT9x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT10x */
+};
+static const unsigned port_range1_pins[] = {
+	81, 82, 83, 84, 85, 86, 87, 88,			/* PORT12x */
+	89, 90, 95, 96, 97, 98, 99, 100,		/* PORT13x */
+	101, 102, 103, 104, 105, 106, 107, 108,		/* PORT14x */
+	118, 119, 120, 121, 122, 123, 124, 125,		/* PORT15x */
+	126, 72, 73, 92, 177, 93, 94, 176,		/* PORT16x */
+	74, 91, 27, 28, 29, 75, 20, 26,			/* PORT17x */
+	109, 110, 111, 112, 113, 114, 115, 116,		/* PORT18x */
+	117, 143, 144, 145, 146, 147, 148, 149,		/* PORT19x */
+	150, 151, 152, 153, 154, 155, 156, 157,		/* PORT20x */
+	158, 159, 160, 161, 162, 163, 164, 165,		/* PORT21x */
+	166, 178, 179, 180, 181, 182, 183, 184,		/* PORT22x */
+	185, 187, 188, 189, 190, 191, 192, 193,		/* PORT23x */
+	194, 195, 196, 197, 198, 199, 200, 201,		/* PORT24x */
+	202, 203, 204, 205, 206, 207, 208, 209,		/* PORT25x */
+	210, 211, 212, 213, 214, 215, 216, 217,		/* PORT26x */
+	218, 219, 220, 221, 223, 224, 225, 226,		/* PORT27x */
+	227, 228, 229, 230, 231, 232, 233, 234,		/* PORT28x */
+};
+static const unsigned port_range1_muxvals[] = {
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT12x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT13x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT14x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT15x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT16x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT17x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT18x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT19x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT20x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT21x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT22x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT23x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT24x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT25x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT26x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT27x */
+	15, 15, 15, 15, 15, 15, 15, 15,			/* PORT28x */
+};
+static const unsigned xirq_pins[] = {
+	118, 119, 120, 121, 122, 123, 124, 125,		/* XIRQ0-7 */
+	126, 72, 73, 92, 177, 93, 94, 176,		/* XIRQ8-15 */
+	74, 91, 27, 28, 29, 75, 20, 26,			/* XIRQ16-23 */
+};
+static const unsigned xirq_muxvals[] = {
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ0-7 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ8-15 */
+	14, 14, 14, 14, 14, 14, 14, 14,			/* XIRQ16-23 */
+};
+
+static const struct uniphier_pinctrl_group proxstream2_groups[] = {
+	UNIPHIER_PINCTRL_GROUP(emmc),
+	UNIPHIER_PINCTRL_GROUP(emmc_dat8),
+	UNIPHIER_PINCTRL_GROUP(i2c0),
+	UNIPHIER_PINCTRL_GROUP(i2c1),
+	UNIPHIER_PINCTRL_GROUP(i2c2),
+	UNIPHIER_PINCTRL_GROUP(i2c3),
+	UNIPHIER_PINCTRL_GROUP(i2c5),
+	UNIPHIER_PINCTRL_GROUP(i2c6),
+	UNIPHIER_PINCTRL_GROUP(nand),
+	UNIPHIER_PINCTRL_GROUP(nand_cs1),
+	UNIPHIER_PINCTRL_GROUP(uart0),
+	UNIPHIER_PINCTRL_GROUP(uart0b),
+	UNIPHIER_PINCTRL_GROUP(uart1),
+	UNIPHIER_PINCTRL_GROUP(uart2),
+	UNIPHIER_PINCTRL_GROUP(uart3),
+	UNIPHIER_PINCTRL_GROUP(uart3b),
+	UNIPHIER_PINCTRL_GROUP(usb0),
+	UNIPHIER_PINCTRL_GROUP(usb1),
+	UNIPHIER_PINCTRL_GROUP(usb2),
+	UNIPHIER_PINCTRL_GROUP(usb3),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1),
+	UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range1, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range1, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range1, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range1, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range1, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range1, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range1, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range1, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range1, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range1, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range1, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range1, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range1, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range1, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range1, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range1, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range1, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range1, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range1, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range1, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range1, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range1, 23),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port150, port_range1, 24),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port151, port_range1, 25),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port152, port_range1, 26),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port153, port_range1, 27),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port154, port_range1, 28),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port155, port_range1, 29),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port156, port_range1, 30),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port157, port_range1, 31),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port160, port_range1, 32),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port161, port_range1, 33),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port162, port_range1, 34),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port163, port_range1, 35),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port164, port_range1, 36),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port165, port_range1, 37),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port166, port_range1, 38),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port167, port_range1, 39),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port170, port_range1, 40),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port171, port_range1, 41),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port172, port_range1, 42),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port173, port_range1, 43),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port174, port_range1, 44),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 45),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 46),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 47),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 48),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 49),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 50),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 51),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 52),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 53),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 54),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 55),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 56),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 57),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 58),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 59),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 60),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 61),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 62),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 63),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 64),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 65),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 66),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 67),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 68),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 69),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 70),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 71),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 72),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 73),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 74),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 75),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 76),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 77),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 78),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 79),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 80),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 81),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 82),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 83),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 84),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 85),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 86),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 87),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 88),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 89),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 90),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 91),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 92),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 93),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 94),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 95),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 96),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 97),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 98),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 99),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 100),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 101),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 102),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 103),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 104),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 105),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 106),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 107),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 108),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 109),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 110),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 111),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 112),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 113),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 114),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 115),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 116),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 117),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 118),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 119),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 120),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 121),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 122),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 123),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 124),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 125),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 126),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 127),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 128),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 129),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 130),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 131),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 132),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 133),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 134),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 135),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21, xirq, 21),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22, xirq, 22),
+	UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23, xirq, 23),
+};
+
+static const char * const emmc_groups[] = {"emmc", "emmc_dat8"};
+static const char * const i2c0_groups[] = {"i2c0"};
+static const char * const i2c1_groups[] = {"i2c1"};
+static const char * const i2c2_groups[] = {"i2c2"};
+static const char * const i2c3_groups[] = {"i2c3"};
+static const char * const i2c5_groups[] = {"i2c5"};
+static const char * const i2c6_groups[] = {"i2c6"};
+static const char * const nand_groups[] = {"nand", "nand_cs1"};
+static const char * const uart0_groups[] = {"uart0", "uart0b"};
+static const char * const uart1_groups[] = {"uart1"};
+static const char * const uart2_groups[] = {"uart2"};
+static const char * const uart3_groups[] = {"uart3", "uart3b"};
+static const char * const usb0_groups[] = {"usb0"};
+static const char * const usb1_groups[] = {"usb1"};
+static const char * const usb2_groups[] = {"usb2"};
+static const char * const usb3_groups[] = {"usb3"};
+static const char * const port_groups[] = {
+	"port00",  "port01",  "port02",  "port03",
+	"port04",  "port05",  "port06",  "port07",
+	"port10",  "port11",  "port12",  "port13",
+	"port14",  "port15",  "port16",  "port17",
+	"port20",  "port21",  "port22",  "port23",
+	"port24",  "port25",  "port26",  "port27",
+	"port30",  "port31",  "port32",  "port33",
+	"port34",  "port35",  "port36",  "port37",
+	"port40",  "port41",  "port42",  "port43",
+	"port44",  "port45",  "port46",  "port47",
+	"port50",  "port51",  "port52",  "port53",
+	"port54",  "port55",  "port56",  "port57",
+	"port60",  "port61",  "port62",  "port63",
+	"port64",  "port65",  "port66",  "port67",
+	"port70",  "port71",  "port72",  "port73",
+	"port74",  "port75",  "port76",  "port77",
+	"port80",  "port81",  "port82",  "port83",
+	"port84",  "port85",  "port86",  "port87",
+	"port90",  "port91",  "port92",  "port93",
+	"port94",  "port95",  "port96",  "port97",
+	"port100", "port101", "port102", "port103",
+	"port104", "port105", "port106", "port107",
+	/* port110-117 missing */
+	"port120", "port121", "port122", "port123",
+	"port124", "port125", "port126", "port127",
+	"port130", "port131", "port132", "port133",
+	"port134", "port135", "port136", "port137",
+	"port140", "port141", "port142", "port143",
+	"port144", "port145", "port146", "port147",
+	"port150", "port151", "port152", "port153",
+	"port154", "port155", "port156", "port157",
+	"port160", "port161", "port162", "port163",
+	"port164", "port165", "port166", "port167",
+	"port170", "port171", "port172", "port173",
+	"port174", "port175", "port176", "port177",
+	"port180", "port181", "port182", "port183",
+	"port184", "port185", "port186", "port187",
+	"port190", "port191", "port192", "port193",
+	"port194", "port195", "port196", "port197",
+	"port200", "port201", "port202", "port203",
+	"port204", "port205", "port206", "port207",
+	"port210", "port211", "port212", "port213",
+	"port214", "port215", "port216", "port217",
+	"port220", "port221", "port222", "port223",
+	"port224", "port225", "port226", "port227",
+	"port230", "port231", "port232", "port233",
+	"port234", "port235", "port236", "port237",
+	"port240", "port241", "port242", "port243",
+	"port244", "port245", "port246", "port247",
+	"port250", "port251", "port252", "port253",
+	"port254", "port255", "port256", "port257",
+	"port260", "port261", "port262", "port263",
+	"port264", "port265", "port266", "port267",
+	"port270", "port271", "port272", "port273",
+	"port274", "port275", "port276", "port277",
+	"port280", "port281", "port282", "port283",
+	"port284", "port285", "port286", "port287",
+};
+static const char * const xirq_groups[] = {
+	"xirq0",  "xirq1",  "xirq2",  "xirq3",
+	"xirq4",  "xirq5",  "xirq6",  "xirq7",
+	"xirq8",  "xirq9",  "xirq10", "xirq11",
+	"xirq12", "xirq13", "xirq14", "xirq15",
+	"xirq16", "xirq17", "xirq18", "xirq19",
+	"xirq20", "xirq21", "xirq22", "xirq23",
+};
+
+static const struct uniphier_pinmux_function proxstream2_functions[] = {
+	UNIPHIER_PINMUX_FUNCTION(emmc),
+	UNIPHIER_PINMUX_FUNCTION(i2c0),
+	UNIPHIER_PINMUX_FUNCTION(i2c1),
+	UNIPHIER_PINMUX_FUNCTION(i2c2),
+	UNIPHIER_PINMUX_FUNCTION(i2c3),
+	UNIPHIER_PINMUX_FUNCTION(i2c5),
+	UNIPHIER_PINMUX_FUNCTION(i2c6),
+	UNIPHIER_PINMUX_FUNCTION(nand),
+	UNIPHIER_PINMUX_FUNCTION(uart0),
+	UNIPHIER_PINMUX_FUNCTION(uart1),
+	UNIPHIER_PINMUX_FUNCTION(uart2),
+	UNIPHIER_PINMUX_FUNCTION(uart3),
+	UNIPHIER_PINMUX_FUNCTION(usb0),
+	UNIPHIER_PINMUX_FUNCTION(usb1),
+	UNIPHIER_PINMUX_FUNCTION(usb2),
+	UNIPHIER_PINMUX_FUNCTION(usb3),
+	UNIPHIER_PINMUX_FUNCTION(port),
+	UNIPHIER_PINMUX_FUNCTION(xirq),
+};
+
+static struct uniphier_pinctrl_socdata proxstream2_pindata = {
+	.groups = proxstream2_groups,
+	.groups_count = ARRAY_SIZE(proxstream2_groups),
+	.functions = proxstream2_functions,
+	.functions_count = ARRAY_SIZE(proxstream2_functions),
+	.mux_bits = 8,
+	.reg_stride = 4,
+	.load_pinctrl = false,
+};
+
+static struct pinctrl_desc proxstream2_pinctrl_desc = {
+	.name = DRIVER_NAME,
+	.pins = proxstream2_pins,
+	.npins = ARRAY_SIZE(proxstream2_pins),
+	.owner = THIS_MODULE,
+};
+
+static int proxstream2_pinctrl_probe(struct platform_device *pdev)
+{
+	return uniphier_pinctrl_probe(pdev, &proxstream2_pinctrl_desc,
+				      &proxstream2_pindata);
+}
+
+static const struct of_device_id proxstream2_pinctrl_match[] = {
+	{ .compatible = "socionext,proxstream2-pinctrl" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, proxstream2_pinctrl_match);
+
+static struct platform_driver proxstream2_pinctrl_driver = {
+	.probe = proxstream2_pinctrl_probe,
+	.remove = uniphier_pinctrl_remove,
+	.driver = {
+		.name = DRIVER_NAME,
+		.of_match_table = proxstream2_pinctrl_match,
+	},
+};
+module_platform_driver(proxstream2_pinctrl_driver);
+
+MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
+MODULE_DESCRIPTION("UniPhier ProXstream2 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
new file mode 100644
index 0000000..918f3b6
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-core.c
@@ -0,0 +1,684 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/mfd/syscon.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "pinctrl-uniphier.h"
+
+struct uniphier_pinctrl_priv {
+	struct pinctrl_dev *pctldev;
+	struct regmap *regmap;
+	struct uniphier_pinctrl_socdata *socdata;
+};
+
+static int uniphier_pctl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	return priv->socdata->groups_count;
+}
+
+static const char *uniphier_pctl_get_group_name(struct pinctrl_dev *pctldev,
+						unsigned selector)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	return priv->socdata->groups[selector].name;
+}
+
+static int uniphier_pctl_get_group_pins(struct pinctrl_dev *pctldev,
+					unsigned selector,
+					const unsigned **pins,
+					unsigned *num_pins)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	*pins = priv->socdata->groups[selector].pins;
+	*num_pins = priv->socdata->groups[selector].num_pins;
+
+	return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void uniphier_pctl_pin_dbg_show(struct pinctrl_dev *pctldev,
+				       struct seq_file *s, unsigned offset)
+{
+	const struct pinctrl_pin_desc *pin = &pctldev->desc->pins[offset];
+	const char *pull_dir, *drv_str;
+
+	switch (uniphier_pin_get_pull_dir(pin->drv_data)) {
+	case UNIPHIER_PIN_PULL_UP:
+		pull_dir = "UP";
+		break;
+	case UNIPHIER_PIN_PULL_DOWN:
+		pull_dir = "DOWN";
+		break;
+	case UNIPHIER_PIN_PULL_NONE:
+		pull_dir = "NONE";
+		break;
+	default:
+		BUG();
+	}
+
+	switch (uniphier_pin_get_drv_str(pin->drv_data)) {
+	case UNIPHIER_PIN_DRV_4_8:
+		drv_str = "4/8(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_8_12_16_20:
+		drv_str = "8/12/16/20(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_FIXED_4:
+		drv_str = "4(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_FIXED_5:
+		drv_str = "5(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_FIXED_8:
+		drv_str = "8(mA)";
+		break;
+	case UNIPHIER_PIN_DRV_NONE:
+		drv_str = "NONE";
+		break;
+	default:
+		BUG();
+	}
+
+	seq_printf(s, " PULL_DIR=%s  DRV_STR=%s", pull_dir, drv_str);
+}
+#endif
+
+static const struct pinctrl_ops uniphier_pctlops = {
+	.get_groups_count = uniphier_pctl_get_groups_count,
+	.get_group_name = uniphier_pctl_get_group_name,
+	.get_group_pins = uniphier_pctl_get_group_pins,
+#ifdef CONFIG_DEBUG_FS
+	.pin_dbg_show = uniphier_pctl_pin_dbg_show,
+#endif
+	.dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+	.dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int uniphier_conf_pin_bias_get(struct pinctrl_dev *pctldev,
+				      const struct pinctrl_pin_desc *pin,
+				      enum pin_config_param param)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	enum uniphier_pin_pull_dir pull_dir =
+				uniphier_pin_get_pull_dir(pin->drv_data);
+	unsigned int pupdctrl, reg, shift, val;
+	unsigned int expected = 1;
+	int ret;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (pull_dir == UNIPHIER_PIN_PULL_NONE)
+			return 0;
+		if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED ||
+		    pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED)
+			return -EINVAL;
+		expected = 0;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED)
+			return 0;
+		if (pull_dir != UNIPHIER_PIN_PULL_UP)
+			return -EINVAL;
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		if (pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED)
+			return 0;
+		if (pull_dir != UNIPHIER_PIN_PULL_DOWN)
+			return -EINVAL;
+		break;
+	default:
+		BUG();
+	}
+
+	pupdctrl = uniphier_pin_get_pupdctrl(pin->drv_data);
+
+	reg = UNIPHIER_PINCTRL_PUPDCTRL_BASE + pupdctrl / 32 * 4;
+	shift = pupdctrl % 32;
+
+	ret = regmap_read(priv->regmap, reg, &val);
+	if (ret)
+		return ret;
+
+	val = (val >> shift) & 1;
+
+	return (val == expected) ? 0 : -EINVAL;
+}
+
+static int uniphier_conf_pin_drive_get(struct pinctrl_dev *pctldev,
+				       const struct pinctrl_pin_desc *pin,
+				       u16 *strength)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	enum uniphier_pin_drv_str drv_str =
+				uniphier_pin_get_drv_str(pin->drv_data);
+	const unsigned int strength_4_8[] = {4, 8};
+	const unsigned int strength_8_12_16_20[] = {8, 12, 16, 20};
+	const unsigned int *supported_strength;
+	unsigned int drvctrl, reg, shift, mask, width, val;
+	int ret;
+
+	switch (drv_str) {
+	case UNIPHIER_PIN_DRV_4_8:
+		supported_strength = strength_4_8;
+		width = 1;
+		break;
+	case UNIPHIER_PIN_DRV_8_12_16_20:
+		supported_strength = strength_8_12_16_20;
+		width = 2;
+		break;
+	case UNIPHIER_PIN_DRV_FIXED_4:
+		*strength = 4;
+		return 0;
+	case UNIPHIER_PIN_DRV_FIXED_5:
+		*strength = 5;
+		return 0;
+	case UNIPHIER_PIN_DRV_FIXED_8:
+		*strength = 8;
+		return 0;
+	default:
+		/* drive strength control is not supported for this pin */
+		return -EINVAL;
+	}
+
+	drvctrl = uniphier_pin_get_drvctrl(pin->drv_data);
+	drvctrl *= width;
+
+	reg = (width == 2) ? UNIPHIER_PINCTRL_DRV2CTRL_BASE :
+			     UNIPHIER_PINCTRL_DRVCTRL_BASE;
+
+	reg += drvctrl / 32 * 4;
+	shift = drvctrl % 32;
+	mask = (1U << width) - 1;
+
+	ret = regmap_read(priv->regmap, reg, &val);
+	if (ret)
+		return ret;
+
+	*strength = supported_strength[(val >> shift) & mask];
+
+	return 0;
+}
+
+static int uniphier_conf_pin_input_enable_get(struct pinctrl_dev *pctldev,
+					const struct pinctrl_pin_desc *pin)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	unsigned int iectrl = uniphier_pin_get_iectrl(pin->drv_data);
+	unsigned int val;
+	int ret;
+
+	if (iectrl == UNIPHIER_PIN_IECTRL_NONE)
+		/* This pin is always input-enabled. */
+		return 0;
+
+	ret = regmap_read(priv->regmap, UNIPHIER_PINCTRL_IECTRL, &val);
+	if (ret)
+		return ret;
+
+	return val & BIT(iectrl) ? 0 : -EINVAL;
+}
+
+static int uniphier_conf_pin_config_get(struct pinctrl_dev *pctldev,
+					unsigned pin,
+					unsigned long *configs)
+{
+	const struct pinctrl_pin_desc *pin_desc = &pctldev->desc->pins[pin];
+	enum pin_config_param param = pinconf_to_config_param(*configs);
+	bool has_arg = false;
+	u16 arg;
+	int ret;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+	case PIN_CONFIG_BIAS_PULL_UP:
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		ret = uniphier_conf_pin_bias_get(pctldev, pin_desc, param);
+		break;
+	case PIN_CONFIG_DRIVE_STRENGTH:
+		ret = uniphier_conf_pin_drive_get(pctldev, pin_desc, &arg);
+		has_arg = true;
+		break;
+	case PIN_CONFIG_INPUT_ENABLE:
+		ret = uniphier_conf_pin_input_enable_get(pctldev, pin_desc);
+		break;
+	default:
+		/* unsupported parameter */
+		ret = -EINVAL;
+		break;
+	}
+
+	if (ret == 0 && has_arg)
+		*configs = pinconf_to_config_packed(param, arg);
+
+	return ret;
+}
+
+static int uniphier_conf_pin_bias_set(struct pinctrl_dev *pctldev,
+				      const struct pinctrl_pin_desc *pin,
+				      enum pin_config_param param,
+				      u16 arg)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	enum uniphier_pin_pull_dir pull_dir =
+				uniphier_pin_get_pull_dir(pin->drv_data);
+	unsigned int pupdctrl, reg, shift;
+	unsigned int val = 1;
+
+	switch (param) {
+	case PIN_CONFIG_BIAS_DISABLE:
+		if (pull_dir == UNIPHIER_PIN_PULL_NONE)
+			return 0;
+		if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED ||
+		    pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED) {
+			dev_err(pctldev->dev,
+				"can not disable pull register for pin %u (%s)\n",
+				pin->number, pin->name);
+			return -EINVAL;
+		}
+		val = 0;
+		break;
+	case PIN_CONFIG_BIAS_PULL_UP:
+		if (pull_dir == UNIPHIER_PIN_PULL_UP_FIXED && arg != 0)
+			return 0;
+		if (pull_dir != UNIPHIER_PIN_PULL_UP) {
+			dev_err(pctldev->dev,
+				"pull-up is unsupported for pin %u (%s)\n",
+				pin->number, pin->name);
+			return -EINVAL;
+		}
+		if (arg == 0) {
+			dev_err(pctldev->dev, "pull-up can not be total\n");
+			return -EINVAL;
+		}
+		break;
+	case PIN_CONFIG_BIAS_PULL_DOWN:
+		if (pull_dir == UNIPHIER_PIN_PULL_DOWN_FIXED && arg != 0)
+			return 0;
+		if (pull_dir != UNIPHIER_PIN_PULL_DOWN) {
+			dev_err(pctldev->dev,
+				"pull-down is unsupported for pin %u (%s)\n",
+				pin->number, pin->name);
+			return -EINVAL;
+		}
+		if (arg == 0) {
+			dev_err(pctldev->dev, "pull-down can not be total\n");
+			return -EINVAL;
+		}
+		break;
+	case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+		if (pull_dir == UNIPHIER_PIN_PULL_NONE) {
+			dev_err(pctldev->dev,
+				"pull-up/down is unsupported for pin %u (%s)\n",
+				pin->number, pin->name);
+			return -EINVAL;
+		}
+
+		if (arg == 0)
+			return 0; /* configuration ingored */
+		break;
+	default:
+		BUG();
+	}
+
+	pupdctrl = uniphier_pin_get_pupdctrl(pin->drv_data);
+
+	reg = UNIPHIER_PINCTRL_PUPDCTRL_BASE + pupdctrl / 32 * 4;
+	shift = pupdctrl % 32;
+
+	return regmap_update_bits(priv->regmap, reg, 1 << shift, val << shift);
+}
+
+static int uniphier_conf_pin_drive_set(struct pinctrl_dev *pctldev,
+				       const struct pinctrl_pin_desc *pin,
+				       u16 strength)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	enum uniphier_pin_drv_str drv_str =
+				uniphier_pin_get_drv_str(pin->drv_data);
+	const unsigned int strength_4_8[] = {4, 8, -1};
+	const unsigned int strength_8_12_16_20[] = {8, 12, 16, 20, -1};
+	const unsigned int *supported_strength;
+	unsigned int drvctrl, reg, shift, mask, width, val;
+
+	switch (drv_str) {
+	case UNIPHIER_PIN_DRV_4_8:
+		supported_strength = strength_4_8;
+		width = 1;
+		break;
+	case UNIPHIER_PIN_DRV_8_12_16_20:
+		supported_strength = strength_8_12_16_20;
+		width = 2;
+		break;
+	default:
+		dev_err(pctldev->dev,
+			"cannot change drive strength for pin %u (%s)\n",
+			pin->number, pin->name);
+		return -EINVAL;
+	}
+
+	for (val = 0; supported_strength[val] > 0; val++) {
+		if (supported_strength[val] > strength)
+			break;
+	}
+
+	if (val == 0) {
+		dev_err(pctldev->dev,
+			"unsupported drive strength %u mA for pin %u (%s)\n",
+			strength, pin->number, pin->name);
+		return -EINVAL;
+	}
+
+	val--;
+
+	drvctrl = uniphier_pin_get_drvctrl(pin->drv_data);
+	drvctrl *= width;
+
+	reg = (width == 2) ? UNIPHIER_PINCTRL_DRV2CTRL_BASE :
+			     UNIPHIER_PINCTRL_DRVCTRL_BASE;
+
+	reg += drvctrl / 32 * 4;
+	shift = drvctrl % 32;
+	mask = (1U << width) - 1;
+
+	return regmap_update_bits(priv->regmap, reg,
+				  mask << shift, val << shift);
+}
+
+static int uniphier_conf_pin_input_enable(struct pinctrl_dev *pctldev,
+					  const struct pinctrl_pin_desc *pin,
+					  u16 enable)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	unsigned int iectrl = uniphier_pin_get_iectrl(pin->drv_data);
+
+	if (enable == 0) {
+		/*
+		 * Multiple pins share one input enable, so per-pin disabling
+		 * is impossible.
+		 */
+		dev_err(pctldev->dev, "unable to disable input\n");
+		return -EINVAL;
+	}
+
+	if (iectrl == UNIPHIER_PIN_IECTRL_NONE)
+		/* This pin is always input-enabled. nothing to do. */
+		return 0;
+
+	return regmap_update_bits(priv->regmap, UNIPHIER_PINCTRL_IECTRL,
+				  BIT(iectrl), BIT(iectrl));
+}
+
+static int uniphier_conf_pin_config_set(struct pinctrl_dev *pctldev,
+					unsigned pin,
+					unsigned long *configs,
+					unsigned num_configs)
+{
+	const struct pinctrl_pin_desc *pin_desc = &pctldev->desc->pins[pin];
+	int i, ret;
+
+	for (i = 0; i < num_configs; i++) {
+		enum pin_config_param param =
+					pinconf_to_config_param(configs[i]);
+		u16 arg = pinconf_to_config_argument(configs[i]);
+
+		switch (param) {
+		case PIN_CONFIG_BIAS_DISABLE:
+		case PIN_CONFIG_BIAS_PULL_UP:
+		case PIN_CONFIG_BIAS_PULL_DOWN:
+		case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+			ret = uniphier_conf_pin_bias_set(pctldev, pin_desc,
+							 param, arg);
+			break;
+		case PIN_CONFIG_DRIVE_STRENGTH:
+			ret = uniphier_conf_pin_drive_set(pctldev, pin_desc,
+							  arg);
+			break;
+		case PIN_CONFIG_INPUT_ENABLE:
+			ret = uniphier_conf_pin_input_enable(pctldev,
+							     pin_desc, arg);
+			break;
+		default:
+			dev_err(pctldev->dev,
+				"unsupported configuration parameter %u\n",
+				param);
+			return -EINVAL;
+		}
+
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int uniphier_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
+					      unsigned selector,
+					      unsigned long *configs,
+					      unsigned num_configs)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	const unsigned *pins = priv->socdata->groups[selector].pins;
+	unsigned num_pins = priv->socdata->groups[selector].num_pins;
+	int i, ret;
+
+	for (i = 0; i < num_pins; i++) {
+		ret = uniphier_conf_pin_config_set(pctldev, pins[i],
+						   configs, num_configs);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static const struct pinconf_ops uniphier_confops = {
+	.is_generic = true,
+	.pin_config_get = uniphier_conf_pin_config_get,
+	.pin_config_set = uniphier_conf_pin_config_set,
+	.pin_config_group_set = uniphier_conf_pin_config_group_set,
+};
+
+static int uniphier_pmx_get_functions_count(struct pinctrl_dev *pctldev)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	return priv->socdata->functions_count;
+}
+
+static const char *uniphier_pmx_get_function_name(struct pinctrl_dev *pctldev,
+						  unsigned selector)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	return priv->socdata->functions[selector].name;
+}
+
+static int uniphier_pmx_get_function_groups(struct pinctrl_dev *pctldev,
+					    unsigned selector,
+					    const char * const **groups,
+					    unsigned *num_groups)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+
+	*groups = priv->socdata->functions[selector].groups;
+	*num_groups = priv->socdata->functions[selector].num_groups;
+
+	return 0;
+}
+
+static int uniphier_pmx_set_one_mux(struct pinctrl_dev *pctldev, unsigned pin,
+				    unsigned muxval)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	unsigned mux_bits = priv->socdata->mux_bits;
+	unsigned reg_stride = priv->socdata->reg_stride;
+	unsigned reg, reg_end, shift, mask;
+	int ret;
+
+	reg = UNIPHIER_PINCTRL_PINMUX_BASE + pin * mux_bits / 32 * reg_stride;
+	reg_end = reg + reg_stride;
+	shift = pin * mux_bits % 32;
+	mask = (1U << mux_bits) - 1;
+
+	/*
+	 * If reg_stride is greater than 4, the MSB of each pinsel shall be
+	 * stored in the offset+4.
+	 */
+	for (; reg < reg_end; reg += 4) {
+		ret = regmap_update_bits(priv->regmap, reg,
+					 mask << shift, muxval << shift);
+		if (ret)
+			return ret;
+		muxval >>= mux_bits;
+	}
+
+	if (priv->socdata->load_pinctrl) {
+		ret = regmap_write(priv->regmap,
+				   UNIPHIER_PINCTRL_LOAD_PINMUX, 1);
+		if (ret)
+			return ret;
+	}
+
+	/* some pins need input-enabling */
+	return uniphier_conf_pin_input_enable(pctldev,
+					      &pctldev->desc->pins[pin], 1);
+}
+
+static int uniphier_pmx_set_mux(struct pinctrl_dev *pctldev,
+				unsigned func_selector,
+				unsigned group_selector)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	const struct uniphier_pinctrl_group *grp =
+					&priv->socdata->groups[group_selector];
+	int i;
+	int ret;
+
+	for (i = 0; i < grp->num_pins; i++) {
+		ret = uniphier_pmx_set_one_mux(pctldev, grp->pins[i],
+					       grp->muxvals[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int uniphier_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
+					    struct pinctrl_gpio_range *range,
+					    unsigned offset)
+{
+	struct uniphier_pinctrl_priv *priv = pinctrl_dev_get_drvdata(pctldev);
+	const struct uniphier_pinctrl_group *groups = priv->socdata->groups;
+	int groups_count = priv->socdata->groups_count;
+	enum uniphier_pinmux_gpio_range_type range_type;
+	int i, j;
+
+	if (strstr(range->name, "irq"))
+		range_type = UNIPHIER_PINMUX_GPIO_RANGE_IRQ;
+	else
+		range_type = UNIPHIER_PINMUX_GPIO_RANGE_PORT;
+
+	for (i = 0; i < groups_count; i++) {
+		if (groups[i].range_type != range_type)
+			continue;
+
+		for (j = 0; j < groups[i].num_pins; j++)
+			if (groups[i].pins[j] == offset)
+				goto found;
+	}
+
+	dev_err(pctldev->dev, "pin %u does not support GPIO\n", offset);
+	return -EINVAL;
+
+found:
+	return uniphier_pmx_set_one_mux(pctldev, offset, groups[i].muxvals[j]);
+}
+
+static const struct pinmux_ops uniphier_pmxops = {
+	.get_functions_count = uniphier_pmx_get_functions_count,
+	.get_function_name = uniphier_pmx_get_function_name,
+	.get_function_groups = uniphier_pmx_get_function_groups,
+	.set_mux = uniphier_pmx_set_mux,
+	.gpio_request_enable = uniphier_pmx_gpio_request_enable,
+	.strict = true,
+};
+
+int uniphier_pinctrl_probe(struct platform_device *pdev,
+			   struct pinctrl_desc *desc,
+			   struct uniphier_pinctrl_socdata *socdata)
+{
+	struct device *dev = &pdev->dev;
+	struct uniphier_pinctrl_priv *priv;
+
+	if (!socdata ||
+	    !socdata->groups ||
+	    !socdata->groups_count ||
+	    !socdata->functions ||
+	    !socdata->functions_count ||
+	    !socdata->mux_bits ||
+	    !socdata->reg_stride) {
+		dev_err(dev, "pinctrl socdata lacks necessary members\n");
+		return -EINVAL;
+	}
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->regmap = syscon_node_to_regmap(dev->of_node);
+	if (IS_ERR(priv->regmap)) {
+		dev_err(dev, "failed to get regmap\n");
+		return PTR_ERR(priv->regmap);
+	}
+
+	priv->socdata = socdata;
+	desc->pctlops = &uniphier_pctlops;
+	desc->pmxops = &uniphier_pmxops;
+	desc->confops = &uniphier_confops;
+
+	priv->pctldev = pinctrl_register(desc, dev, priv);
+	if (IS_ERR(priv->pctldev)) {
+		dev_err(dev, "failed to register UniPhier pinctrl driver\n");
+		return PTR_ERR(priv->pctldev);
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uniphier_pinctrl_probe);
+
+int uniphier_pinctrl_remove(struct platform_device *pdev)
+{
+	struct uniphier_pinctrl_priv *priv = platform_get_drvdata(pdev);
+
+	pinctrl_unregister(priv->pctldev);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uniphier_pinctrl_remove);
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier.h b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
new file mode 100644
index 0000000..e1e98b8
--- /dev/null
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PINCTRL_UNIPHIER_H__
+#define __PINCTRL_UNIPHIER_H__
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#define UNIPHIER_PINCTRL_PINMUX_BASE	0x0
+#define UNIPHIER_PINCTRL_LOAD_PINMUX	0x700
+#define UNIPHIER_PINCTRL_DRVCTRL_BASE	0x800
+#define UNIPHIER_PINCTRL_DRV2CTRL_BASE	0x900
+#define UNIPHIER_PINCTRL_PUPDCTRL_BASE	0xa00
+#define UNIPHIER_PINCTRL_IECTRL		0xd00
+
+/* input enable control register bit */
+#define UNIPHIER_PIN_IECTRL_SHIFT	0
+#define UNIPHIER_PIN_IECTRL_BITS	8
+#define UNIPHIER_PIN_IECTRL_MASK	((1UL << (UNIPHIER_PIN_IECTRL_BITS)) \
+					 - 1)
+
+/* drive strength control register number */
+#define UNIPHIER_PIN_DRVCTRL_SHIFT	((UNIPHIER_PIN_IECTRL_SHIFT) + \
+					(UNIPHIER_PIN_IECTRL_BITS))
+#define UNIPHIER_PIN_DRVCTRL_BITS	9
+#define UNIPHIER_PIN_DRVCTRL_MASK	((1UL << (UNIPHIER_PIN_DRVCTRL_BITS)) \
+					 - 1)
+
+/* supported drive strength (mA) */
+#define UNIPHIER_PIN_DRV_STR_SHIFT	((UNIPHIER_PIN_DRVCTRL_SHIFT) + \
+					 (UNIPHIER_PIN_DRVCTRL_BITS))
+#define UNIPHIER_PIN_DRV_STR_BITS	3
+#define UNIPHIER_PIN_DRV_STR_MASK	((1UL << (UNIPHIER_PIN_DRV_STR_BITS)) \
+					 - 1)
+
+/* pull-up / pull-down register number */
+#define UNIPHIER_PIN_PUPDCTRL_SHIFT	((UNIPHIER_PIN_DRV_STR_SHIFT) + \
+					 (UNIPHIER_PIN_DRV_STR_BITS))
+#define UNIPHIER_PIN_PUPDCTRL_BITS	9
+#define UNIPHIER_PIN_PUPDCTRL_MASK	((1UL << (UNIPHIER_PIN_PUPDCTRL_BITS))\
+					 - 1)
+
+/* direction of pull register */
+#define UNIPHIER_PIN_PULL_DIR_SHIFT	((UNIPHIER_PIN_PUPDCTRL_SHIFT) + \
+					 (UNIPHIER_PIN_PUPDCTRL_BITS))
+#define UNIPHIER_PIN_PULL_DIR_BITS	3
+#define UNIPHIER_PIN_PULL_DIR_MASK	((1UL << (UNIPHIER_PIN_PULL_DIR_BITS))\
+					 - 1)
+
+#if UNIPHIER_PIN_PULL_DIR_SHIFT + UNIPHIER_PIN_PULL_DIR_BITS > BITS_PER_LONG
+#error "unable to pack pin attributes."
+#endif
+
+#define UNIPHIER_PIN_IECTRL_NONE	(UNIPHIER_PIN_IECTRL_MASK)
+
+/* selectable drive strength */
+enum uniphier_pin_drv_str {
+	UNIPHIER_PIN_DRV_4_8,		/* 2 level control: 4/8 mA */
+	UNIPHIER_PIN_DRV_8_12_16_20,	/* 4 level control: 8/12/16/20 mA */
+	UNIPHIER_PIN_DRV_FIXED_4,	/* fixed to 4mA */
+	UNIPHIER_PIN_DRV_FIXED_5,	/* fixed to 5mA */
+	UNIPHIER_PIN_DRV_FIXED_8,	/* fixed to 8mA */
+	UNIPHIER_PIN_DRV_NONE,		/* no support (input only pin) */
+};
+
+/* direction of pull register (no pin supports bi-directional pull biasing) */
+enum uniphier_pin_pull_dir {
+	UNIPHIER_PIN_PULL_UP,		/* pull-up or disabled */
+	UNIPHIER_PIN_PULL_DOWN,		/* pull-down or disabled */
+	UNIPHIER_PIN_PULL_UP_FIXED,	/* always pull-up */
+	UNIPHIER_PIN_PULL_DOWN_FIXED,	/* always pull-down */
+	UNIPHIER_PIN_PULL_NONE,		/* no pull register */
+};
+
+#define UNIPHIER_PIN_IECTRL(x) \
+	(((x) & (UNIPHIER_PIN_IECTRL_MASK)) << (UNIPHIER_PIN_IECTRL_SHIFT))
+#define UNIPHIER_PIN_DRVCTRL(x) \
+	(((x) & (UNIPHIER_PIN_DRVCTRL_MASK)) << (UNIPHIER_PIN_DRVCTRL_SHIFT))
+#define UNIPHIER_PIN_DRV_STR(x) \
+	(((x) & (UNIPHIER_PIN_DRV_STR_MASK)) << (UNIPHIER_PIN_DRV_STR_SHIFT))
+#define UNIPHIER_PIN_PUPDCTRL(x) \
+	(((x) & (UNIPHIER_PIN_PUPDCTRL_MASK)) << (UNIPHIER_PIN_PUPDCTRL_SHIFT))
+#define UNIPHIER_PIN_PULL_DIR(x) \
+	(((x) & (UNIPHIER_PIN_PULL_DIR_MASK)) << (UNIPHIER_PIN_PULL_DIR_SHIFT))
+
+#define UNIPHIER_PIN_ATTR_PACKED(iectrl, drvctrl, drv_str, pupdctrl, pull_dir)\
+				(UNIPHIER_PIN_IECTRL(iectrl) |		\
+				 UNIPHIER_PIN_DRVCTRL(drvctrl) |	\
+				 UNIPHIER_PIN_DRV_STR(drv_str) |	\
+				 UNIPHIER_PIN_PUPDCTRL(pupdctrl) |	\
+				 UNIPHIER_PIN_PULL_DIR(pull_dir))
+
+static inline unsigned int uniphier_pin_get_iectrl(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_IECTRL_SHIFT) &
+						UNIPHIER_PIN_IECTRL_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_drvctrl(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_DRVCTRL_SHIFT) &
+						UNIPHIER_PIN_DRVCTRL_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_drv_str(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_DRV_STR_SHIFT) &
+						UNIPHIER_PIN_DRV_STR_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_pupdctrl(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_PUPDCTRL_SHIFT) &
+						UNIPHIER_PIN_PUPDCTRL_MASK;
+}
+
+static inline unsigned int uniphier_pin_get_pull_dir(void *drv_data)
+{
+	return ((unsigned long)drv_data >> UNIPHIER_PIN_PULL_DIR_SHIFT) &
+						UNIPHIER_PIN_PULL_DIR_MASK;
+}
+
+enum uniphier_pinmux_gpio_range_type {
+	UNIPHIER_PINMUX_GPIO_RANGE_PORT,
+	UNIPHIER_PINMUX_GPIO_RANGE_IRQ,
+	UNIPHIER_PINMUX_GPIO_RANGE_NONE,
+};
+
+struct uniphier_pinctrl_group {
+	const char *name;
+	const unsigned *pins;
+	unsigned num_pins;
+	const unsigned *muxvals;
+	enum uniphier_pinmux_gpio_range_type range_type;
+};
+
+struct uniphier_pinmux_function {
+	const char *name;
+	const char * const *groups;
+	unsigned num_groups;
+};
+
+struct uniphier_pinctrl_socdata {
+	const struct uniphier_pinctrl_group *groups;
+	int groups_count;
+	const struct uniphier_pinmux_function *functions;
+	int functions_count;
+	unsigned mux_bits;
+	unsigned reg_stride;
+	bool load_pinctrl;
+};
+
+#define UNIPHIER_PINCTRL_PIN(a, b, c, d, e, f, g)			\
+{									\
+	.number = a,							\
+	.name = b,							\
+	.drv_data = (void *)UNIPHIER_PIN_ATTR_PACKED(c, d, e, f, g),	\
+}
+
+#define __UNIPHIER_PINCTRL_GROUP(grp, type)				\
+	{								\
+		.name = #grp,						\
+		.pins = grp##_pins,					\
+		.num_pins = ARRAY_SIZE(grp##_pins),			\
+		.muxvals = grp##_muxvals +				\
+			BUILD_BUG_ON_ZERO(ARRAY_SIZE(grp##_pins) !=	\
+					  ARRAY_SIZE(grp##_muxvals)),	\
+		.range_type = type,					\
+	}
+
+#define UNIPHIER_PINCTRL_GROUP(grp)					\
+	__UNIPHIER_PINCTRL_GROUP(grp, UNIPHIER_PINMUX_GPIO_RANGE_NONE)
+
+#define UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(grp)			\
+	__UNIPHIER_PINCTRL_GROUP(grp, UNIPHIER_PINMUX_GPIO_RANGE_PORT)
+
+#define UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(grp)			\
+	__UNIPHIER_PINCTRL_GROUP(grp, UNIPHIER_PINMUX_GPIO_RANGE_IRQ)
+
+#define UNIPHIER_PINCTRL_GROUP_SINGLE(grp, array, ofst)			\
+	{								\
+		.name = #grp,						\
+		.pins = array##_pins + ofst,				\
+		.num_pins = 1,						\
+		.muxvals = array##_muxvals + ofst,			\
+	}
+
+#define UNIPHIER_PINMUX_FUNCTION(func)					\
+	{								\
+		.name = #func,						\
+		.groups = func##_groups,				\
+		.num_groups = ARRAY_SIZE(func##_groups),		\
+	}
+
+struct platform_device;
+struct pinctrl_desc;
+
+int uniphier_pinctrl_probe(struct platform_device *pdev,
+			   struct pinctrl_desc *desc,
+			   struct uniphier_pinctrl_socdata *socdata);
+
+int uniphier_pinctrl_remove(struct platform_device *pdev);
+
+#endif /* __PINCTRL_UNIPHIER_H__ */
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index a04019a..0207274 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -23,7 +23,7 @@
 
 #include <linux/dmi.h>
 #include <linux/i2c.h>
-#include <linux/i2c/atmel_mxt_ts.h>
+#include <linux/platform_data/atmel_mxt_ts.h>
 #include <linux/input.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
@@ -111,6 +111,7 @@
 	.irqflags		= IRQF_TRIGGER_FALLING,
 	.t19_num_keys		= ARRAY_SIZE(mxt_t19_keys),
 	.t19_keymap		= mxt_t19_keys,
+	.suspend_mode		= MXT_SUSPEND_T9_CTRL,
 };
 
 static struct i2c_board_info atmel_224s_tp_device = {
@@ -121,6 +122,7 @@
 
 static struct mxt_platform_data atmel_1664s_platform_data = {
 	.irqflags		= IRQF_TRIGGER_FALLING,
+	.suspend_mode		= MXT_SUSPEND_T9_CTRL,
 };
 
 static struct i2c_board_info atmel_1664s_device = {
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index 105cfff..28b2a12 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -33,7 +33,7 @@
 #include <linux/suspend.h>
 #include <linux/acpi.h>
 #include <asm/intel_pmc_ipc.h>
-#include <linux/mfd/lpc_ich.h>
+#include <linux/platform_data/itco_wdt.h>
 
 /*
  * IPC registers
@@ -473,9 +473,9 @@
 	},
 };
 
-static struct lpc_ich_info tco_info = {
+static struct itco_wdt_platform_data tco_info = {
 	.name = "Apollo Lake SoC",
-	.iTCO_version = 3,
+	.version = 3,
 };
 
 static int ipc_create_punit_device(void)
@@ -552,8 +552,7 @@
 		goto err;
 	}
 
-	ret = platform_device_add_data(pdev, &tco_info,
-				       sizeof(struct lpc_ich_info));
+	ret = platform_device_add_data(pdev, &tco_info, sizeof(tco_info));
 	if (ret) {
 		dev_err(ipcdev.dev, "Failed to add tco platform data\n");
 		goto err;
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 08beeed..f8758d6 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -333,7 +333,7 @@
 
 config CHARGER_GPIO
 	tristate "GPIO charger"
-	depends on GPIOLIB
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y to include support for chargers which report their online status
 	  through a GPIO pin.
@@ -391,26 +391,30 @@
 
 config CHARGER_BQ24190
 	tristate "TI BQ24190 battery charger driver"
-	depends on I2C && GPIOLIB
+	depends on I2C
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y to enable support for the TI BQ24190 battery charger.
 
 config CHARGER_BQ24257
 	tristate "TI BQ24257 battery charger driver"
-	depends on I2C && GPIOLIB
+	depends on I2C
+	depends on GPIOLIB || COMPILE_TEST
 	depends on REGMAP_I2C
 	help
 	  Say Y to enable support for the TI BQ24257 battery charger.
 
 config CHARGER_BQ24735
 	tristate "TI BQ24735 battery charger support"
-	depends on I2C && GPIOLIB
+	depends on I2C
+	depends on GPIOLIB || COMPILE_TEST
 	help
 	  Say Y to enable support for the TI BQ24735 battery charger.
 
 config CHARGER_BQ25890
 	tristate "TI BQ25890 battery charger driver"
-	depends on I2C && GPIOLIB
+	depends on I2C
+	depends on GPIOLIB || COMPILE_TEST
 	select REGMAP_I2C
 	help
 	  Say Y to enable support for the TI BQ25890 battery charger.
@@ -462,7 +466,8 @@
 
 config CHARGER_RT9455
 	tristate "Richtek RT9455 battery charger driver"
-	depends on I2C && GPIOLIB
+	depends on I2C
+	depends on GPIOLIB || COMPILE_TEST
 	select REGMAP_I2C
 	help
 	  Say Y to enable support for Richtek RT9455 battery charger.
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index 7f3d389..a67eeac 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -13,7 +13,7 @@
 
 config ROCKCHIP_IODOMAIN
         tristate "Rockchip IO domain support"
-        depends on ARCH_ROCKCHIP && OF
+        depends on POWER_AVS && ARCH_ROCKCHIP && OF
         help
           Say y here to enable support io domains on Rockchip SoCs. It is
           necessary for the io domain setting of the SoC to match the
diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c
index 3ae35d0..2e30002 100644
--- a/drivers/power/avs/rockchip-io-domain.c
+++ b/drivers/power/avs/rockchip-io-domain.c
@@ -43,6 +43,10 @@
 #define RK3288_SOC_CON2_FLASH0		BIT(7)
 #define RK3288_SOC_FLASH_SUPPLY_NUM	2
 
+#define RK3368_SOC_CON15		0x43c
+#define RK3368_SOC_CON15_FLASH0		BIT(14)
+#define RK3368_SOC_FLASH_SUPPLY_NUM	2
+
 struct rockchip_iodomain;
 
 /**
@@ -158,6 +162,25 @@
 		dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
 }
 
+static void rk3368_iodomain_init(struct rockchip_iodomain *iod)
+{
+	int ret;
+	u32 val;
+
+	/* if no flash supply we should leave things alone */
+	if (!iod->supplies[RK3368_SOC_FLASH_SUPPLY_NUM].reg)
+		return;
+
+	/*
+	 * set flash0 iodomain to also use this framework
+	 * instead of a special gpio.
+	 */
+	val = RK3368_SOC_CON15_FLASH0 | (RK3368_SOC_CON15_FLASH0 << 16);
+	ret = regmap_write(iod->grf, RK3368_SOC_CON15, val);
+	if (ret < 0)
+		dev_warn(iod->dev, "couldn't update flash0 ctrl\n");
+}
+
 /*
  * On the rk3188 the io-domains are handled by a shared register with the
  * lower 8 bits being still being continuing drive-strength settings.
@@ -201,6 +224,34 @@
 	.init = rk3288_iodomain_init,
 };
 
+static const struct rockchip_iodomain_soc_data soc_data_rk3368 = {
+	.grf_offset = 0x900,
+	.supply_names = {
+		NULL,		/* reserved */
+		"dvp",		/* DVPIO_VDD */
+		"flash0",	/* FLASH0_VDD (emmc) */
+		"wifi",		/* APIO2_VDD (sdio0) */
+		NULL,
+		"audio",	/* APIO3_VDD */
+		"sdcard",	/* SDMMC0_VDD (sdmmc) */
+		"gpio30",	/* APIO1_VDD */
+		"gpio1830",	/* APIO4_VDD (gpujtag) */
+	},
+	.init = rk3368_iodomain_init,
+};
+
+static const struct rockchip_iodomain_soc_data soc_data_rk3368_pmu = {
+	.grf_offset = 0x100,
+	.supply_names = {
+		NULL,
+		NULL,
+		NULL,
+		NULL,
+		"pmu",	        /*PMU IO domain*/
+		"vop",	        /*LCDC IO domain*/
+	},
+};
+
 static const struct of_device_id rockchip_iodomain_match[] = {
 	{
 		.compatible = "rockchip,rk3188-io-voltage-domain",
@@ -210,6 +261,14 @@
 		.compatible = "rockchip,rk3288-io-voltage-domain",
 		.data = (void *)&soc_data_rk3288
 	},
+	{
+		.compatible = "rockchip,rk3368-io-voltage-domain",
+		.data = (void *)&soc_data_rk3368
+	},
+	{
+		.compatible = "rockchip,rk3368-pmu-io-voltage-domain",
+		.data = (void *)&soc_data_rk3368_pmu
+	},
 	{ /* sentinel */ },
 };
 
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index e98dcb6..ec212b5 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -170,7 +170,7 @@
 	struct power_supply *charger;
 	struct power_supply_desc charger_desc;
 	struct delayed_work work;
-	struct power_supply *notify_psy;
+	struct device_node *notify_node;
 	struct notifier_block nb;
 	enum bq2415x_mode reported_mode;/* mode reported by hook function */
 	enum bq2415x_mode mode;		/* currently configured mode */
@@ -792,31 +792,9 @@
 
 }
 
-static int bq2415x_notifier_call(struct notifier_block *nb,
-		unsigned long val, void *v)
+static bool bq2415x_update_reported_mode(struct bq2415x_device *bq, int mA)
 {
-	struct bq2415x_device *bq =
-		container_of(nb, struct bq2415x_device, nb);
-	struct power_supply *psy = v;
 	enum bq2415x_mode mode;
-	union power_supply_propval prop;
-	int ret;
-	int mA;
-
-	if (val != PSY_EVENT_PROP_CHANGED)
-		return NOTIFY_OK;
-
-	if (psy != bq->notify_psy)
-		return NOTIFY_OK;
-
-	dev_dbg(bq->dev, "notifier call was called\n");
-
-	ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_CURRENT_MAX,
-			&prop);
-	if (ret != 0)
-		return NOTIFY_OK;
-
-	mA = prop.intval;
 
 	if (mA == 0)
 		mode = BQ2415X_MODE_OFF;
@@ -828,9 +806,43 @@
 		mode = BQ2415X_MODE_DEDICATED_CHARGER;
 
 	if (bq->reported_mode == mode)
-		return NOTIFY_OK;
+		return false;
 
 	bq->reported_mode = mode;
+	return true;
+}
+
+static int bq2415x_notifier_call(struct notifier_block *nb,
+		unsigned long val, void *v)
+{
+	struct bq2415x_device *bq =
+		container_of(nb, struct bq2415x_device, nb);
+	struct power_supply *psy = v;
+	union power_supply_propval prop;
+	int ret;
+
+	if (val != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	/* Ignore event if it was not send by notify_node/notify_device */
+	if (bq->notify_node) {
+		if (!psy->dev.parent ||
+		    psy->dev.parent->of_node != bq->notify_node)
+			return NOTIFY_OK;
+	} else if (bq->init_data.notify_device) {
+		if (strcmp(psy->desc->name, bq->init_data.notify_device) != 0)
+			return NOTIFY_OK;
+	}
+
+	dev_dbg(bq->dev, "notifier call was called\n");
+
+	ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_CURRENT_MAX,
+			&prop);
+	if (ret != 0)
+		return NOTIFY_OK;
+
+	if (!bq2415x_update_reported_mode(bq, prop.intval))
+		return NOTIFY_OK;
 
 	/* if automode is not enabled do not tell about reported_mode */
 	if (bq->automode < 1)
@@ -1536,6 +1548,8 @@
 	struct device_node *np = client->dev.of_node;
 	struct bq2415x_platform_data *pdata = client->dev.platform_data;
 	const struct acpi_device_id *acpi_id = NULL;
+	struct power_supply *notify_psy = NULL;
+	union power_supply_propval prop;
 
 	if (!np && !pdata && !ACPI_HANDLE(&client->dev)) {
 		dev_err(&client->dev, "Neither devicetree, nor platform data, nor ACPI support\n");
@@ -1569,25 +1583,6 @@
 		goto error_2;
 	}
 
-	if (np) {
-		bq->notify_psy = power_supply_get_by_phandle(np,
-						"ti,usb-charger-detection");
-
-		if (IS_ERR(bq->notify_psy)) {
-			dev_info(&client->dev,
-				 "no 'ti,usb-charger-detection' property (err=%ld)\n",
-				PTR_ERR(bq->notify_psy));
-			bq->notify_psy = NULL;
-		} else if (!bq->notify_psy) {
-			ret = -EPROBE_DEFER;
-			goto error_2;
-		}
-	} else if (pdata && pdata->notify_device) {
-		bq->notify_psy = power_supply_get_by_name(pdata->notify_device);
-	} else {
-		bq->notify_psy = NULL;
-	}
-
 	i2c_set_clientdata(client, bq);
 
 	bq->id = num;
@@ -1607,32 +1602,35 @@
 					       "ti,current-limit",
 					       &bq->init_data.current_limit);
 		if (ret)
-			goto error_3;
+			goto error_2;
 		ret = device_property_read_u32(bq->dev,
 					"ti,weak-battery-voltage",
 					&bq->init_data.weak_battery_voltage);
 		if (ret)
-			goto error_3;
+			goto error_2;
 		ret = device_property_read_u32(bq->dev,
 				"ti,battery-regulation-voltage",
 				&bq->init_data.battery_regulation_voltage);
 		if (ret)
-			goto error_3;
+			goto error_2;
 		ret = device_property_read_u32(bq->dev,
 					       "ti,charge-current",
 					       &bq->init_data.charge_current);
 		if (ret)
-			goto error_3;
+			goto error_2;
 		ret = device_property_read_u32(bq->dev,
 				"ti,termination-current",
 				&bq->init_data.termination_current);
 		if (ret)
-			goto error_3;
+			goto error_2;
 		ret = device_property_read_u32(bq->dev,
 					       "ti,resistor-sense",
 					       &bq->init_data.resistor_sense);
 		if (ret)
-			goto error_3;
+			goto error_2;
+		if (np)
+			bq->notify_node = of_parse_phandle(np,
+						"ti,usb-charger-detection", 0);
 	} else {
 		memcpy(&bq->init_data, pdata, sizeof(bq->init_data));
 	}
@@ -1642,56 +1640,72 @@
 	ret = bq2415x_power_supply_init(bq);
 	if (ret) {
 		dev_err(bq->dev, "failed to register power supply: %d\n", ret);
-		goto error_3;
+		goto error_2;
 	}
 
 	ret = bq2415x_sysfs_init(bq);
 	if (ret) {
 		dev_err(bq->dev, "failed to create sysfs entries: %d\n", ret);
-		goto error_4;
+		goto error_3;
 	}
 
 	ret = bq2415x_set_defaults(bq);
 	if (ret) {
 		dev_err(bq->dev, "failed to set default values: %d\n", ret);
-		goto error_5;
+		goto error_4;
 	}
 
-	if (bq->notify_psy) {
+	if (bq->notify_node || bq->init_data.notify_device) {
 		bq->nb.notifier_call = bq2415x_notifier_call;
 		ret = power_supply_reg_notifier(&bq->nb);
 		if (ret) {
 			dev_err(bq->dev, "failed to reg notifier: %d\n", ret);
-			goto error_6;
+			goto error_4;
 		}
 
-		/* Query for initial reported_mode and set it */
-		bq2415x_notifier_call(&bq->nb, PSY_EVENT_PROP_CHANGED,
-				      bq->notify_psy);
-		bq2415x_set_mode(bq, bq->reported_mode);
-
 		bq->automode = 1;
-		dev_info(bq->dev, "automode enabled\n");
+		dev_info(bq->dev, "automode supported, waiting for events\n");
 	} else {
 		bq->automode = -1;
 		dev_info(bq->dev, "automode not supported\n");
 	}
 
+	/* Query for initial reported_mode and set it */
+	if (bq->nb.notifier_call) {
+		if (np) {
+			notify_psy = power_supply_get_by_phandle(np,
+						"ti,usb-charger-detection");
+			if (IS_ERR(notify_psy))
+				notify_psy = NULL;
+		} else if (bq->init_data.notify_device) {
+			notify_psy = power_supply_get_by_name(
+						bq->init_data.notify_device);
+		}
+	}
+	if (notify_psy) {
+		ret = power_supply_get_property(notify_psy,
+					POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
+		power_supply_put(notify_psy);
+
+		if (ret == 0) {
+			bq2415x_update_reported_mode(bq, prop.intval);
+			bq2415x_set_mode(bq, bq->reported_mode);
+		}
+	}
+
 	INIT_DELAYED_WORK(&bq->work, bq2415x_timer_work);
 	bq2415x_set_autotimer(bq, 1);
 
 	dev_info(bq->dev, "driver registered\n");
 	return 0;
 
-error_6:
-error_5:
-	bq2415x_sysfs_exit(bq);
 error_4:
-	bq2415x_power_supply_exit(bq);
+	bq2415x_sysfs_exit(bq);
 error_3:
-	if (bq->notify_psy)
-		power_supply_put(bq->notify_psy);
+	bq2415x_power_supply_exit(bq);
 error_2:
+	if (bq->notify_node)
+		of_node_put(bq->notify_node);
 	kfree(name);
 error_1:
 	mutex_lock(&bq2415x_id_mutex);
@@ -1707,10 +1721,11 @@
 {
 	struct bq2415x_device *bq = i2c_get_clientdata(client);
 
-	if (bq->notify_psy) {
+	if (bq->nb.notifier_call)
 		power_supply_unreg_notifier(&bq->nb);
-		power_supply_put(bq->notify_psy);
-	}
+
+	if (bq->notify_node)
+		of_node_put(bq->notify_node);
 
 	bq2415x_sysfs_exit(bq);
 	bq2415x_power_supply_exit(bq);
diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
index 052db78..469a452 100644
--- a/drivers/power/bq24190_charger.c
+++ b/drivers/power/bq24190_charger.c
@@ -902,7 +902,7 @@
 }
 
 static enum power_supply_property bq24190_charger_properties[] = {
-	POWER_SUPPLY_PROP_TYPE,
+	POWER_SUPPLY_PROP_CHARGE_TYPE,
 	POWER_SUPPLY_PROP_HEALTH,
 	POWER_SUPPLY_PROP_ONLINE,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
@@ -1515,6 +1515,7 @@
 	{ "bq24190", BQ24190_REG_VPRS_PN_24190 },
 	{ },
 };
+MODULE_DEVICE_TABLE(i2c, bq24190_i2c_ids);
 
 #ifdef CONFIG_OF
 static const struct of_device_id bq24190_of_match[] = {
@@ -1534,7 +1535,6 @@
 	.id_table	= bq24190_i2c_ids,
 	.driver = {
 		.name		= "bq24190-charger",
-		.owner		= THIS_MODULE,
 		.pm		= &bq24190_pm_ops,
 		.of_match_table	= of_match_ptr(bq24190_of_match),
 	},
diff --git a/drivers/power/bq24735-charger.c b/drivers/power/bq24735-charger.c
index 961a189..eb2b368 100644
--- a/drivers/power/bq24735-charger.c
+++ b/drivers/power/bq24735-charger.c
@@ -267,8 +267,9 @@
 
 	name = (char *)charger->pdata->name;
 	if (!name) {
-		name = kasprintf(GFP_KERNEL, "bq24735@%s",
-				 dev_name(&client->dev));
+		name = devm_kasprintf(&client->dev, GFP_KERNEL,
+				      "bq24735@%s",
+				      dev_name(&client->dev));
 		if (!name) {
 			dev_err(&client->dev, "Failed to alloc device name\n");
 			return -ENOMEM;
@@ -296,23 +297,21 @@
 	if (ret < 0) {
 		dev_err(&client->dev, "Failed to read manufacturer id : %d\n",
 			ret);
-		goto err_free_name;
+		return ret;
 	} else if (ret != 0x0040) {
 		dev_err(&client->dev,
 			"manufacturer id mismatch. 0x0040 != 0x%04x\n", ret);
-		ret = -ENODEV;
-		goto err_free_name;
+		return -ENODEV;
 	}
 
 	ret = bq24735_read_word(client, BQ24735_DEVICE_ID);
 	if (ret < 0) {
 		dev_err(&client->dev, "Failed to read device id : %d\n", ret);
-		goto err_free_name;
+		return ret;
 	} else if (ret != 0x000B) {
 		dev_err(&client->dev,
 			"device id mismatch. 0x000b != 0x%04x\n", ret);
-		ret = -ENODEV;
-		goto err_free_name;
+		return -ENODEV;
 	}
 
 	if (gpio_is_valid(charger->pdata->status_gpio)) {
@@ -331,7 +330,7 @@
 	ret = bq24735_config_charger(charger);
 	if (ret < 0) {
 		dev_err(&client->dev, "failed in configuring charger");
-		goto err_free_name;
+		return ret;
 	}
 
 	/* check for AC adapter presence */
@@ -339,17 +338,17 @@
 		ret = bq24735_enable_charging(charger);
 		if (ret < 0) {
 			dev_err(&client->dev, "Failed to enable charging\n");
-			goto err_free_name;
+			return ret;
 		}
 	}
 
-	charger->charger = power_supply_register(&client->dev, supply_desc,
-						 &psy_cfg);
+	charger->charger = devm_power_supply_register(&client->dev, supply_desc,
+						      &psy_cfg);
 	if (IS_ERR(charger->charger)) {
 		ret = PTR_ERR(charger->charger);
 		dev_err(&client->dev, "Failed to register power supply: %d\n",
 			ret);
-		goto err_free_name;
+		return ret;
 	}
 
 	if (client->irq) {
@@ -364,34 +363,11 @@
 			dev_err(&client->dev,
 				"Unable to register IRQ %d err %d\n",
 				client->irq, ret);
-			goto err_unregister_supply;
+			return ret;
 		}
 	}
 
 	return 0;
-err_unregister_supply:
-	power_supply_unregister(charger->charger);
-err_free_name:
-	if (name != charger->pdata->name)
-		kfree(name);
-
-	return ret;
-}
-
-static int bq24735_charger_remove(struct i2c_client *client)
-{
-	struct bq24735 *charger = i2c_get_clientdata(client);
-
-	if (charger->client->irq)
-		devm_free_irq(&charger->client->dev, charger->client->irq,
-			      &charger->charger);
-
-	power_supply_unregister(charger->charger);
-
-	if (charger->charger_desc.name != charger->pdata->name)
-		kfree(charger->charger_desc.name);
-
-	return 0;
 }
 
 static const struct i2c_device_id bq24735_charger_id[] = {
@@ -409,11 +385,9 @@
 static struct i2c_driver bq24735_charger_driver = {
 	.driver = {
 		.name = "bq24735-charger",
-		.owner = THIS_MODULE,
 		.of_match_table = bq24735_match_ids,
 	},
 	.probe = bq24735_charger_probe,
-	.remove = bq24735_charger_remove,
 	.id_table = bq24735_charger_id,
 };
 
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index b6b9837..8287261f 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -39,47 +39,49 @@
 
 #include <linux/power/bq27x00_battery.h>
 
-#define DRIVER_VERSION			"1.2.0"
+#define DRIVER_VERSION		"1.2.0"
 
-#define BQ27x00_REG_TEMP		0x06
-#define BQ27x00_REG_VOLT		0x08
-#define BQ27x00_REG_AI			0x14
-#define BQ27x00_REG_FLAGS		0x0A
-#define BQ27x00_REG_TTE			0x16
-#define BQ27x00_REG_TTF			0x18
-#define BQ27x00_REG_TTECP		0x26
-#define BQ27x00_REG_NAC			0x0C /* Nominal available capacity */
-#define BQ27x00_REG_LMD			0x12 /* Last measured discharge */
-#define BQ27x00_REG_CYCT		0x2A /* Cycle count total */
-#define BQ27x00_REG_AE			0x22 /* Available energy */
-#define BQ27x00_POWER_AVG		0x24
+#define BQ27XXX_MANUFACTURER	"Texas Instruments"
 
-#define BQ27000_REG_RSOC		0x0B /* Relative State-of-Charge */
-#define BQ27000_REG_ILMD		0x76 /* Initial last measured discharge */
-#define BQ27000_FLAG_EDVF		BIT(0) /* Final End-of-Discharge-Voltage flag */
-#define BQ27000_FLAG_EDV1		BIT(1) /* First End-of-Discharge-Voltage flag */
-#define BQ27000_FLAG_CI			BIT(4) /* Capacity Inaccurate flag */
-#define BQ27000_FLAG_FC			BIT(5)
-#define BQ27000_FLAG_CHGS		BIT(7) /* Charge state flag */
+#define BQ27x00_REG_TEMP	0x06
+#define BQ27x00_REG_VOLT	0x08
+#define BQ27x00_REG_AI		0x14
+#define BQ27x00_REG_FLAGS	0x0A
+#define BQ27x00_REG_TTE		0x16
+#define BQ27x00_REG_TTF		0x18
+#define BQ27x00_REG_TTECP	0x26
+#define BQ27x00_REG_NAC		0x0C /* Nominal available capacity */
+#define BQ27x00_REG_LMD		0x12 /* Last measured discharge */
+#define BQ27x00_REG_CYCT	0x2A /* Cycle count total */
+#define BQ27x00_REG_AE		0x22 /* Available energy */
+#define BQ27x00_POWER_AVG	0x24
 
-#define BQ27500_REG_SOC			0x2C
-#define BQ27500_REG_DCAP		0x3C /* Design capacity */
-#define BQ27500_FLAG_DSC		BIT(0)
-#define BQ27500_FLAG_SOCF		BIT(1) /* State-of-Charge threshold final */
-#define BQ27500_FLAG_SOC1		BIT(2) /* State-of-Charge threshold 1 */
-#define BQ27500_FLAG_FC			BIT(9)
-#define BQ27500_FLAG_OTC		BIT(15)
+#define BQ27000_REG_RSOC	0x0B /* Relative State-of-Charge */
+#define BQ27000_REG_ILMD	0x76 /* Initial last measured discharge */
+#define BQ27000_FLAG_EDVF	BIT(0) /* Final End-of-Discharge-Voltage flag */
+#define BQ27000_FLAG_EDV1	BIT(1) /* First End-of-Discharge-Voltage flag */
+#define BQ27000_FLAG_CI		BIT(4) /* Capacity Inaccurate flag */
+#define BQ27000_FLAG_FC		BIT(5)
+#define BQ27000_FLAG_CHGS	BIT(7) /* Charge state flag */
 
-#define BQ27742_POWER_AVG		0x76
+#define BQ27500_REG_SOC		0x2C
+#define BQ27500_REG_DCAP	0x3C /* Design capacity */
+#define BQ27500_FLAG_DSC	BIT(0)
+#define BQ27500_FLAG_SOCF	BIT(1) /* State-of-Charge threshold final */
+#define BQ27500_FLAG_SOC1	BIT(2) /* State-of-Charge threshold 1 */
+#define BQ27500_FLAG_FC		BIT(9)
+#define BQ27500_FLAG_OTC	BIT(15)
 
-#define BQ27510_REG_SOC			0x20
-#define BQ27510_REG_DCAP		0x2E /* Design capacity */
-#define BQ27510_REG_CYCT		0x1E /* Cycle count total */
+#define BQ27742_POWER_AVG	0x76
+
+#define BQ27510_REG_SOC		0x20
+#define BQ27510_REG_DCAP	0x2E /* Design capacity */
+#define BQ27510_REG_CYCT	0x1E /* Cycle count total */
 
 /* bq27425 register addresses are same as bq27x00 addresses minus 4 */
-#define BQ27425_REG_OFFSET		0x04
+#define BQ27425_REG_OFFSET	0x04
 #define BQ27425_REG_SOC		(0x1C + BQ27425_REG_OFFSET)
-#define BQ27425_REG_DCAP		(0x3C + BQ27425_REG_OFFSET)
+#define BQ27425_REG_DCAP	(0x3C + BQ27425_REG_OFFSET)
 
 #define BQ27000_RS			20 /* Resistor sense */
 #define BQ27x00_POWER_CONSTANT		(256 * 29200 / 1000)
@@ -106,7 +108,7 @@
 };
 
 struct bq27x00_device_info {
-	struct device 		*dev;
+	struct device		*dev;
 	int			id;
 	enum bq27x00_chip	chip;
 
@@ -142,6 +144,7 @@
 	POWER_SUPPLY_PROP_ENERGY_NOW,
 	POWER_SUPPLY_PROP_POWER_AVG,
 	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_MANUFACTURER,
 };
 
 static enum power_supply_property bq27425_battery_props[] = {
@@ -156,6 +159,7 @@
 	POWER_SUPPLY_PROP_CHARGE_FULL,
 	POWER_SUPPLY_PROP_CHARGE_NOW,
 	POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+	POWER_SUPPLY_PROP_MANUFACTURER,
 };
 
 static enum power_supply_property bq27742_battery_props[] = {
@@ -174,6 +178,7 @@
 	POWER_SUPPLY_PROP_CYCLE_COUNT,
 	POWER_SUPPLY_PROP_POWER_AVG,
 	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_MANUFACTURER,
 };
 
 static enum power_supply_property bq27510_battery_props[] = {
@@ -192,19 +197,20 @@
 	POWER_SUPPLY_PROP_CYCLE_COUNT,
 	POWER_SUPPLY_PROP_POWER_AVG,
 	POWER_SUPPLY_PROP_HEALTH,
+	POWER_SUPPLY_PROP_MANUFACTURER,
 };
 
 static unsigned int poll_interval = 360;
 module_param(poll_interval, uint, 0644);
-MODULE_PARM_DESC(poll_interval, "battery poll interval in seconds - " \
-				"0 disables polling");
+MODULE_PARM_DESC(poll_interval,
+		 "battery poll interval in seconds - 0 disables polling");
 
 /*
  * Common code for BQ27x00 devices
  */
 
 static inline int bq27x00_read(struct bq27x00_device_info *di, u8 reg,
-		bool single)
+			       bool single)
 {
 	if (di->chip == BQ27425)
 		return di->bus.read(di, reg - BQ27425_REG_OFFSET, single);
@@ -313,8 +319,9 @@
 			ilmd = bq27x00_read(di, BQ27510_REG_DCAP, false);
 		else
 			ilmd = bq27x00_read(di, BQ27500_REG_DCAP, false);
-	} else
+	} else {
 		ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true);
+	}
 
 	if (ilmd < 0) {
 		dev_dbg(di->dev, "error reading initial last measured discharge\n");
@@ -445,7 +452,7 @@
 		return tval;
 	}
 
-	if ((di->chip == BQ27500)) {
+	if (di->chip == BQ27500) {
 		if (tval & BQ27500_FLAG_SOCF)
 			tval = POWER_SUPPLY_HEALTH_DEAD;
 		else if (tval & BQ27500_FLAG_OTC)
@@ -559,7 +566,7 @@
  * Or 0 if something fails.
  */
 static int bq27x00_battery_current(struct bq27x00_device_info *di,
-	union power_supply_propval *val)
+				   union power_supply_propval *val)
 {
 	int curr;
 	int flags;
@@ -587,7 +594,7 @@
 }
 
 static int bq27x00_battery_status(struct bq27x00_device_info *di,
-	union power_supply_propval *val)
+				  union power_supply_propval *val)
 {
 	int status;
 
@@ -615,7 +622,7 @@
 }
 
 static int bq27x00_battery_capacity_level(struct bq27x00_device_info *di,
-	union power_supply_propval *val)
+					  union power_supply_propval *val)
 {
 	int level;
 
@@ -649,7 +656,7 @@
  * Or < 0 if something fails.
  */
 static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
-	union power_supply_propval *val)
+				   union power_supply_propval *val)
 {
 	int volt;
 
@@ -665,7 +672,7 @@
 }
 
 static int bq27x00_simple_value(int value,
-	union power_supply_propval *val)
+				union power_supply_propval *val)
 {
 	if (value < 0)
 		return value;
@@ -749,6 +756,9 @@
 	case POWER_SUPPLY_PROP_HEALTH:
 		ret = bq27x00_simple_value(di->cache.health, val);
 		break;
+	case POWER_SUPPLY_PROP_MANUFACTURER:
+		val->strval = BQ27XXX_MANUFACTURER;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -827,7 +837,6 @@
 	mutex_destroy(&di->lock);
 }
 
-
 /* i2c specific code */
 #ifdef CONFIG_BATTERY_BQ27X00_I2C
 
@@ -888,14 +897,12 @@
 
 	name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
 	if (!name) {
-		dev_err(&client->dev, "failed to allocate device name\n");
 		retval = -ENOMEM;
 		goto batt_failed;
 	}
 
 	di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
 	if (!di) {
-		dev_err(&client->dev, "failed to allocate device info data\n");
 		retval = -ENOMEM;
 		goto batt_failed;
 	}
@@ -956,8 +963,9 @@
 static inline int bq27x00_battery_i2c_init(void)
 {
 	int ret = i2c_add_driver(&bq27x00_battery_driver);
+
 	if (ret)
-		printk(KERN_ERR "Unable to register BQ27x00 i2c driver\n");
+		pr_err("Unable to register BQ27x00 i2c driver\n");
 
 	return ret;
 }
@@ -978,7 +986,7 @@
 #ifdef CONFIG_BATTERY_BQ27X00_PLATFORM
 
 static int bq27000_read_platform(struct bq27x00_device_info *di, u8 reg,
-			bool single)
+				 bool single)
 {
 	struct device *dev = di->dev;
 	struct bq27000_platform_data *pdata = dev->platform_data;
@@ -1028,10 +1036,8 @@
 	}
 
 	di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
-	if (!di) {
-		dev_err(&pdev->dev, "failed to allocate device info data\n");
+	if (!di)
 		return -ENOMEM;
-	}
 
 	platform_set_drvdata(pdev, di);
 
@@ -1064,8 +1070,9 @@
 static inline int bq27x00_battery_platform_init(void)
 {
 	int ret = platform_driver_register(&bq27000_battery_driver);
+
 	if (ret)
-		printk(KERN_ERR "Unable to register BQ27000 platform driver\n");
+		pr_err("Unable to register BQ27000 platform driver\n");
 
 	return ret;
 }
diff --git a/drivers/power/ds2780_battery.c b/drivers/power/ds2780_battery.c
index a7a0427..d3743d0 100644
--- a/drivers/power/ds2780_battery.c
+++ b/drivers/power/ds2780_battery.c
@@ -637,10 +637,6 @@
 	struct power_supply *psy = to_power_supply(dev);
 	struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
 
-	count = min_t(loff_t, count,
-		DS2780_EEPROM_BLOCK1_END -
-		DS2780_EEPROM_BLOCK1_START + 1 - off);
-
 	return ds2780_read_block(dev_info, buf,
 				DS2780_EEPROM_BLOCK1_START + off, count);
 }
@@ -655,10 +651,6 @@
 	struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
 	int ret;
 
-	count = min_t(loff_t, count,
-		DS2780_EEPROM_BLOCK1_END -
-		DS2780_EEPROM_BLOCK1_START + 1 - off);
-
 	ret = ds2780_write(dev_info, buf,
 				DS2780_EEPROM_BLOCK1_START + off, count);
 	if (ret < 0)
@@ -676,7 +668,7 @@
 		.name = "param_eeprom",
 		.mode = S_IRUGO | S_IWUSR,
 	},
-	.size = DS2780_EEPROM_BLOCK1_END - DS2780_EEPROM_BLOCK1_START + 1,
+	.size = DS2780_PARAM_EEPROM_SIZE,
 	.read = ds2780_read_param_eeprom_bin,
 	.write = ds2780_write_param_eeprom_bin,
 };
@@ -690,10 +682,6 @@
 	struct power_supply *psy = to_power_supply(dev);
 	struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
 
-	count = min_t(loff_t, count,
-		DS2780_EEPROM_BLOCK0_END -
-		DS2780_EEPROM_BLOCK0_START + 1 - off);
-
 	return ds2780_read_block(dev_info, buf,
 				DS2780_EEPROM_BLOCK0_START + off, count);
 }
@@ -708,10 +696,6 @@
 	struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
 	int ret;
 
-	count = min_t(loff_t, count,
-		DS2780_EEPROM_BLOCK0_END -
-		DS2780_EEPROM_BLOCK0_START + 1 - off);
-
 	ret = ds2780_write(dev_info, buf,
 				DS2780_EEPROM_BLOCK0_START + off, count);
 	if (ret < 0)
@@ -729,7 +713,7 @@
 		.name = "user_eeprom",
 		.mode = S_IRUGO | S_IWUSR,
 	},
-	.size = DS2780_EEPROM_BLOCK0_END - DS2780_EEPROM_BLOCK0_START + 1,
+	.size = DS2780_USER_EEPROM_SIZE,
 	.read = ds2780_read_user_eeprom_bin,
 	.write = ds2780_write_user_eeprom_bin,
 };
diff --git a/drivers/power/ds2781_battery.c b/drivers/power/ds2781_battery.c
index 56d583d..c368002 100644
--- a/drivers/power/ds2781_battery.c
+++ b/drivers/power/ds2781_battery.c
@@ -639,8 +639,6 @@
 	struct power_supply *psy = to_power_supply(dev);
 	struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
 
-	count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
-
 	return ds2781_read_block(dev_info, buf,
 				DS2781_EEPROM_BLOCK1_START + off, count);
 }
@@ -655,8 +653,6 @@
 	struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
 	int ret;
 
-	count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
-
 	ret = ds2781_write(dev_info, buf,
 				DS2781_EEPROM_BLOCK1_START + off, count);
 	if (ret < 0)
@@ -688,8 +684,6 @@
 	struct power_supply *psy = to_power_supply(dev);
 	struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
 
-	count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
-
 	return ds2781_read_block(dev_info, buf,
 				DS2781_EEPROM_BLOCK0_START + off, count);
 
@@ -705,8 +699,6 @@
 	struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
 	int ret;
 
-	count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
-
 	ret = ds2781_write(dev_info, buf,
 				DS2781_EEPROM_BLOCK0_START + off, count);
 	if (ret < 0)
diff --git a/drivers/power/ltc2941-battery-gauge.c b/drivers/power/ltc2941-battery-gauge.c
index daeb086..4adf2ba 100644
--- a/drivers/power/ltc2941-battery-gauge.c
+++ b/drivers/power/ltc2941-battery-gauge.c
@@ -14,7 +14,6 @@
 #include <linux/swab.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
-#include <linux/idr.h>
 #include <linux/power_supply.h>
 #include <linux/slab.h>
 
@@ -63,15 +62,11 @@
 	struct power_supply_desc supply_desc;	/* Supply description */
 	struct delayed_work work;	/* Work scheduler */
 	int num_regs;	/* Number of registers (chip type) */
-	int id;		/* Identifier of ltc294x chip */
 	int charge;	/* Last charge register content */
 	int r_sense;	/* mOhm */
 	int Qlsb;	/* nAh */
 };
 
-static DEFINE_IDR(ltc294x_id);
-static DEFINE_MUTEX(ltc294x_lock);
-
 static inline int convert_bin_to_uAh(
 	const struct ltc294x_info *info, int Q)
 {
@@ -371,10 +366,6 @@
 
 	cancel_delayed_work(&info->work);
 	power_supply_unregister(info->supply);
-	kfree(info->supply_desc.name);
-	mutex_lock(&ltc294x_lock);
-	idr_remove(&ltc294x_id, info->id);
-	mutex_unlock(&ltc294x_lock);
 	return 0;
 }
 
@@ -384,44 +375,28 @@
 	struct power_supply_config psy_cfg = {};
 	struct ltc294x_info *info;
 	int ret;
-	int num;
 	u32 prescaler_exp;
 	s32 r_sense;
 	struct device_node *np;
 
-	mutex_lock(&ltc294x_lock);
-	ret = idr_alloc(&ltc294x_id, client, 0, 0, GFP_KERNEL);
-	mutex_unlock(&ltc294x_lock);
-	if (ret < 0)
-		goto fail_id;
-
-	num = ret;
-
 	info = devm_kzalloc(&client->dev, sizeof(*info), GFP_KERNEL);
-	if (info == NULL) {
-		ret = -ENOMEM;
-		goto fail_info;
-	}
+	if (info == NULL)
+		return -ENOMEM;
 
 	i2c_set_clientdata(client, info);
 
-	info->num_regs = id->driver_data;
-	info->supply_desc.name = kasprintf(GFP_KERNEL, "%s-%d", client->name,
-					   num);
-	if (!info->supply_desc.name) {
-		ret = -ENOMEM;
-		goto fail_name;
-	}
-
 	np = of_node_get(client->dev.of_node);
 
+	info->num_regs = id->driver_data;
+	info->supply_desc.name = np->name;
+
 	/* r_sense can be negative, when sense+ is connected to the battery
 	 * instead of the sense-. This results in reversed measurements. */
 	ret = of_property_read_u32(np, "lltc,resistor-sense", &r_sense);
 	if (ret < 0) {
 		dev_err(&client->dev,
 			"Could not find lltc,resistor-sense in devicetree\n");
-		goto fail_name;
+		return ret;
 	}
 	info->r_sense = r_sense;
 
@@ -446,7 +421,6 @@
 	}
 
 	info->client = client;
-	info->id = num;
 	info->supply_desc.type = POWER_SUPPLY_TYPE_BATTERY;
 	info->supply_desc.properties = ltc294x_properties;
 	if (info->num_regs >= LTC294X_REG_TEMPERATURE_LSB)
@@ -473,31 +447,19 @@
 	ret = ltc294x_reset(info, prescaler_exp);
 	if (ret < 0) {
 		dev_err(&client->dev, "Communication with chip failed\n");
-		goto fail_comm;
+		return ret;
 	}
 
 	info->supply = power_supply_register(&client->dev, &info->supply_desc,
 					     &psy_cfg);
 	if (IS_ERR(info->supply)) {
 		dev_err(&client->dev, "failed to register ltc2941\n");
-		ret = PTR_ERR(info->supply);
-		goto fail_register;
+		return PTR_ERR(info->supply);
 	} else {
 		schedule_delayed_work(&info->work, LTC294X_WORK_DELAY * HZ);
 	}
 
 	return 0;
-
-fail_register:
-	kfree(info->supply_desc.name);
-fail_comm:
-fail_name:
-fail_info:
-	mutex_lock(&ltc294x_lock);
-	idr_remove(&ltc294x_id, num);
-	mutex_unlock(&ltc294x_lock);
-fail_id:
-	return ret;
 }
 
 #ifdef CONFIG_PM_SLEEP
diff --git a/drivers/power/max77693_charger.c b/drivers/power/max77693_charger.c
index 754879e..060cab5 100644
--- a/drivers/power/max77693_charger.c
+++ b/drivers/power/max77693_charger.c
@@ -20,6 +20,7 @@
 #include <linux/power_supply.h>
 #include <linux/regmap.h>
 #include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-common.h>
 #include <linux/mfd/max77693-private.h>
 
 #define MAX77693_CHARGER_NAME				"max77693-charger"
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index a944338..9e29b13 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -521,11 +521,6 @@
 	int ret;
 	int i;
 
-	if (off >= EEPROM_SIZE)
-		return 0;
-	if (off + count > EEPROM_SIZE)
-		count = EEPROM_SIZE - off;
-
 	for (i = 0; i < count; i++) {
 		ec_byte = EEPROM_START + off + i;
 		ret = olpc_ec_cmd(EC_BAT_EEPROM, &ec_byte, 1, &buf[i], 1);
@@ -545,7 +540,7 @@
 		.name = "eeprom",
 		.mode = S_IRUGO,
 	},
-	.size = 0,
+	.size = EEPROM_SIZE,
 	.read = olpc_bat_eeprom_read,
 };
 
diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c
index cc0893f..3a45cc0 100644
--- a/drivers/power/pm2301_charger.c
+++ b/drivers/power/pm2301_charger.c
@@ -1244,7 +1244,6 @@
 	.remove = pm2xxx_wall_charger_remove,
 	.driver = {
 		.name = "pm2xxx-wall_charger",
-		.owner = THIS_MODULE,
 		.pm = PM2XXX_PM_OPS,
 	},
 	.id_table = pm2xxx_id,
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 17d93a7..5a0189b 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -166,5 +166,12 @@
 	help
 	  Reboot support for Renesas R-Mobile and SH-Mobile SoCs.
 
+config POWER_RESET_ZX
+	tristate "ZTE SoCs reset driver"
+	depends on ARCH_ZX || COMPILE_TEST
+	depends on HAS_IOMEM
+	help
+	  Reboot support for ZTE SoCs.
+
 endif
 
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index dbe06c3..096fa67 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -19,3 +19,4 @@
 obj-$(CONFIG_POWER_RESET_SYSCON) += syscon-reboot.o
 obj-$(CONFIG_POWER_RESET_SYSCON_POWEROFF) += syscon-poweroff.o
 obj-$(CONFIG_POWER_RESET_RMOBILE) += rmobile-reset.o
+obj-$(CONFIG_POWER_RESET_ZX) += zx-reboot.o
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 36dc52f..c378d4e 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -123,6 +123,15 @@
 	return NOTIFY_DONE;
 }
 
+static int sama5d3_restart(struct notifier_block *this, unsigned long mode,
+			   void *cmd)
+{
+	writel(cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST),
+	       at91_rstc_base);
+
+	return NOTIFY_DONE;
+}
+
 static void __init at91_reset_status(struct platform_device *pdev)
 {
 	u32 reg = readl(at91_rstc_base + AT91_RSTC_SR);
@@ -155,13 +164,13 @@
 static const struct of_device_id at91_ramc_of_match[] = {
 	{ .compatible = "atmel,at91sam9260-sdramc", },
 	{ .compatible = "atmel,at91sam9g45-ddramc", },
-	{ .compatible = "atmel,sama5d3-ddramc", },
 	{ /* sentinel */ }
 };
 
 static const struct of_device_id at91_reset_of_match[] = {
 	{ .compatible = "atmel,at91sam9260-rstc", .data = at91sam9260_restart },
 	{ .compatible = "atmel,at91sam9g45-rstc", .data = at91sam9g45_restart },
+	{ .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart },
 	{ /* sentinel */ }
 };
 
@@ -181,13 +190,16 @@
 		return -ENODEV;
 	}
 
-	for_each_matching_node(np, at91_ramc_of_match) {
-		at91_ramc_base[idx] = of_iomap(np, 0);
-		if (!at91_ramc_base[idx]) {
-			dev_err(&pdev->dev, "Could not map ram controller address\n");
-			return -ENODEV;
+	if (!of_device_is_compatible(pdev->dev.of_node, "atmel,sama5d3-rstc")) {
+		/* we need to shutdown the ddr controller, so get ramc base */
+		for_each_matching_node(np, at91_ramc_of_match) {
+			at91_ramc_base[idx] = of_iomap(np, 0);
+			if (!at91_ramc_base[idx]) {
+				dev_err(&pdev->dev, "Could not map ram controller address\n");
+				return -ENODEV;
+			}
+			idx++;
 		}
-		idx++;
 	}
 
 	match = of_match_node(at91_reset_of_match, pdev->dev.of_node);
diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
new file mode 100644
index 0000000..a5b0096
--- /dev/null
+++ b/drivers/power/reset/zx-reboot.c
@@ -0,0 +1,80 @@
+/*
+ * ZTE zx296702 SoC reset code
+ *
+ * Copyright (c) 2015 Linaro Ltd.
+ *
+ * Author: Jun Nie <jun.nie@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+
+static void __iomem *base;
+static void __iomem *pcu_base;
+
+static int zx_restart_handler(struct notifier_block *this,
+			      unsigned long mode, void *cmd)
+{
+	writel_relaxed(1, base + 0xb0);
+	writel_relaxed(1, pcu_base + 0x34);
+
+	mdelay(50);
+	pr_emerg("Unable to restart system\n");
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block zx_restart_nb = {
+	.notifier_call = zx_restart_handler,
+	.priority = 128,
+};
+
+static int zx_reboot_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	int err;
+
+	base = of_iomap(np, 0);
+	if (!base) {
+		WARN(1, "failed to map base address");
+		return -ENODEV;
+	}
+
+	np = of_find_compatible_node(NULL, NULL, "zte,zx296702-pcu");
+	pcu_base = of_iomap(np, 0);
+	if (!pcu_base) {
+		iounmap(base);
+		WARN(1, "failed to map pcu_base address");
+		return -ENODEV;
+	}
+
+	err = register_restart_handler(&zx_restart_nb);
+	if (err)
+		dev_err(&pdev->dev, "Register restart handler failed(err=%d)\n",
+			err);
+
+	return err;
+}
+
+static const struct of_device_id zx_reboot_of_match[] = {
+	{ .compatible = "zte,sysctrl" },
+	{}
+};
+
+static struct platform_driver zx_reboot_driver = {
+	.probe = zx_reboot_probe,
+	.driver = {
+		.name = "zx-reboot",
+		.of_match_table = zx_reboot_of_match,
+	},
+};
+module_platform_driver(zx_reboot_driver);
diff --git a/drivers/power/rt5033_battery.c b/drivers/power/rt5033_battery.c
index a7a6877..bcdd830 100644
--- a/drivers/power/rt5033_battery.c
+++ b/drivers/power/rt5033_battery.c
@@ -165,7 +165,7 @@
 	{ "rt5033-battery", },
 	{ }
 };
-MODULE_DEVICE_TABLE(platform, rt5033_battery_id);
+MODULE_DEVICE_TABLE(i2c, rt5033_battery_id);
 
 static struct i2c_driver rt5033_battery_driver = {
 	.driver = {
diff --git a/drivers/power/rt9455_charger.c b/drivers/power/rt9455_charger.c
index 08baac6..a49a9d4 100644
--- a/drivers/power/rt9455_charger.c
+++ b/drivers/power/rt9455_charger.c
@@ -973,7 +973,6 @@
 
 	if (irq2 & GET_MASK(F_CHRVPI)) {
 		dev_dbg(dev, "Charger fault occurred\n");
-		alert_userspace = true;
 		/*
 		 * CHRVPI bit is set in 2 cases:
 		 * 1. when the power source is connected to the charger.
@@ -981,6 +980,9 @@
 		 * To identify the case, PWR_RDY bit is checked. Because
 		 * PWR_RDY bit is set / cleared after CHRVPI interrupt is
 		 * triggered, it is used delayed_work to later read PWR_RDY bit.
+		 * Also, do not set to true alert_userspace, because there is no
+		 * need to notify userspace when CHRVPI interrupt has occurred.
+		 * Userspace will be notified after PWR_RDY bit is read.
 		 */
 		queue_delayed_work(system_power_efficient_wq,
 				   &info->pwr_rdy_work,
@@ -1178,7 +1180,7 @@
 		/*
 		 * Sometimes, an interrupt occurs while rt9455_probe() function
 		 * is executing and power_supply_register() is not yet called.
-		 * Do not call power_supply_charged() in this case.
+		 * Do not call power_supply_changed() in this case.
 		 */
 		if (info->charger)
 			power_supply_changed(info->charger);
@@ -1478,6 +1480,11 @@
 				   RT9455_MAX_CHARGING_TIME * HZ);
 		break;
 	}
+	/*
+	 * Notify userspace that the charger has been either connected to or
+	 * disconnected from the power source.
+	 */
+	power_supply_changed(info->charger);
 }
 
 static void rt9455_max_charging_time_work_callback(struct work_struct *work)
@@ -1533,6 +1540,11 @@
 			if (ret)
 				dev_err(dev, "Failed to unmask BATAB interrupt\n");
 		}
+		/*
+		 * Notify userspace that the battery is now connected to the
+		 * charger.
+		 */
+		power_supply_changed(info->charger);
 	}
 }
 
diff --git a/drivers/power/rx51_battery.c b/drivers/power/rx51_battery.c
index ac62069..af9383d 100644
--- a/drivers/power/rx51_battery.c
+++ b/drivers/power/rx51_battery.c
@@ -215,7 +215,7 @@
 	platform_set_drvdata(pdev, di);
 
 	di->dev = &pdev->dev;
-	di->bat_desc.name = dev_name(&pdev->dev);
+	di->bat_desc.name = "rx51-battery";
 	di->bat_desc.type = POWER_SUPPLY_TYPE_BATTERY;
 	di->bat_desc.properties = rx51_battery_props;
 	di->bat_desc.num_properties = ARRAY_SIZE(rx51_battery_props);
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index 022b891..f4f2c1f 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -22,8 +22,10 @@
 #include <linux/power_supply.h>
 #include <linux/notifier.h>
 #include <linux/usb/otg.h>
-#include <linux/regulator/machine.h>
+#include <linux/i2c/twl4030-madc.h>
 
+#define TWL4030_BCIMDEN		0x00
+#define TWL4030_BCIMDKEY	0x01
 #define TWL4030_BCIMSTATEC	0x02
 #define TWL4030_BCIICHG		0x08
 #define TWL4030_BCIVAC		0x0a
@@ -32,11 +34,19 @@
 #define TWL4030_BCIMFSTS4	0x10
 #define TWL4030_BCICTL1		0x23
 #define TWL4030_BB_CFG		0x12
+#define TWL4030_BCIIREF1	0x27
+#define TWL4030_BCIIREF2	0x28
+#define TWL4030_BCIMFKEY	0x11
+#define TWL4030_BCIMFEN3	0x14
+#define TWL4030_BCIMFTH8	0x1d
+#define TWL4030_BCIMFTH9	0x1e
+#define TWL4030_BCIWDKEY	0x21
 
 #define TWL4030_BCIMFSTS1	0x01
 
 #define TWL4030_BCIAUTOWEN	BIT(5)
 #define TWL4030_CONFIG_DONE	BIT(4)
+#define TWL4030_CVENAC		BIT(2)
 #define TWL4030_BCIAUTOUSB	BIT(1)
 #define TWL4030_BCIAUTOAC	BIT(0)
 #define TWL4030_CGAIN		BIT(5)
@@ -81,6 +91,21 @@
 #define TWL4030_MSTATEC_COMPLETE1	0x0b
 #define TWL4030_MSTATEC_COMPLETE4	0x0e
 
+#if IS_ENABLED(CONFIG_TWL4030_MADC)
+/*
+ * If AC (Accessory Charger) voltage exceeds 4.5V (MADC 11)
+ * then AC is available.
+ */
+static inline int ac_available(void)
+{
+	return twl4030_get_madc_conversion(11) > 4500;
+}
+#else
+static inline int ac_available(void)
+{
+	return 0;
+}
+#endif
 static bool allow_usb;
 module_param(allow_usb, bool, 0644);
 MODULE_PARM_DESC(allow_usb, "Allow USB charge drawing default current");
@@ -94,12 +119,39 @@
 	struct work_struct	work;
 	int			irq_chg;
 	int			irq_bci;
-	struct regulator	*usb_reg;
 	int			usb_enabled;
 
+	/*
+	 * ichg_* and *_cur values in uA. If any are 'large', we set
+	 * CGAIN to '1' which doubles the range for half the
+	 * precision.
+	 */
+	unsigned int		ichg_eoc, ichg_lo, ichg_hi;
+	unsigned int		usb_cur, ac_cur;
+	bool			ac_is_active;
+	int			usb_mode, ac_mode; /* charging mode requested */
+#define	CHARGE_OFF	0
+#define	CHARGE_AUTO	1
+#define	CHARGE_LINEAR	2
+
+	/* When setting the USB current we slowly increase the
+	 * requested current until target is reached or the voltage
+	 * drops below 4.75V.  In the latter case we step back one
+	 * step.
+	 */
+	unsigned int		usb_cur_target;
+	struct delayed_work	current_worker;
+#define	USB_CUR_STEP	20000	/* 20mA at a time */
+#define	USB_MIN_VOLT	4750000	/* 4.75V */
+#define	USB_CUR_DELAY	msecs_to_jiffies(100)
+#define	USB_MAX_CURRENT	1700000 /* TWL4030 caps at 1.7A */
+
 	unsigned long		event;
 };
 
+/* strings for 'usb_mode' values */
+static char *modes[] = { "off", "auto", "continuous" };
+
 /*
  * clear and set bits on an given register on a given module
  */
@@ -180,27 +232,233 @@
 }
 
 /*
- * Check if VBUS power is present
+ * TI provided formulas:
+ * CGAIN == 0: ICHG = (BCIICHG * 1.7) / (2^10 - 1) - 0.85
+ * CGAIN == 1: ICHG = (BCIICHG * 3.4) / (2^10 - 1) - 1.7
+ * Here we use integer approximation of:
+ * CGAIN == 0: val * 1.6618 - 0.85 * 1000
+ * CGAIN == 1: (val * 1.6618 - 0.85 * 1000) * 2
  */
-static int twl4030_bci_have_vbus(struct twl4030_bci *bci)
+/*
+ * convert twl register value for currents into uA
+ */
+static int regval2ua(int regval, bool cgain)
+{
+	if (cgain)
+		return (regval * 16618 - 8500 * 1000) / 5;
+	else
+		return (regval * 16618 - 8500 * 1000) / 10;
+}
+
+/*
+ * convert uA currents into twl register value
+ */
+static int ua2regval(int ua, bool cgain)
 {
 	int ret;
-	u8 hwsts;
+	if (cgain)
+		ua /= 2;
+	ret = (ua * 10 + 8500 * 1000) / 16618;
+	/* rounding problems */
+	if (ret < 512)
+		ret = 512;
+	return ret;
+}
 
-	ret = twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &hwsts,
-			      TWL4030_PM_MASTER_STS_HW_CONDITIONS);
-	if (ret < 0)
-		return 0;
+static int twl4030_charger_update_current(struct twl4030_bci *bci)
+{
+	int status;
+	int cur;
+	unsigned reg, cur_reg;
+	u8 bcictl1, oldreg, fullreg;
+	bool cgain = false;
+	u8 boot_bci;
 
-	dev_dbg(bci->dev, "check_vbus: HW_CONDITIONS %02x\n", hwsts);
+	/*
+	 * If AC (Accessory Charger) voltage exceeds 4.5V (MADC 11)
+	 * and AC is enabled, set current for 'ac'
+	 */
+	if (ac_available()) {
+		cur = bci->ac_cur;
+		bci->ac_is_active = true;
+	} else {
+		cur = bci->usb_cur;
+		bci->ac_is_active = false;
+		if (cur > bci->usb_cur_target) {
+			cur = bci->usb_cur_target;
+			bci->usb_cur = cur;
+		}
+		if (cur < bci->usb_cur_target)
+			schedule_delayed_work(&bci->current_worker, USB_CUR_DELAY);
+	}
 
-	/* in case we also have STS_USB_ID, VBUS is driven by TWL itself */
-	if ((hwsts & TWL4030_STS_VBUS) && !(hwsts & TWL4030_STS_USB_ID))
-		return 1;
+	/* First, check thresholds and see if cgain is needed */
+	if (bci->ichg_eoc >= 200000)
+		cgain = true;
+	if (bci->ichg_lo >= 400000)
+		cgain = true;
+	if (bci->ichg_hi >= 820000)
+		cgain = true;
+	if (cur > 852000)
+		cgain = true;
 
+	status = twl4030_bci_read(TWL4030_BCICTL1, &bcictl1);
+	if (status < 0)
+		return status;
+	if (twl_i2c_read_u8(TWL_MODULE_PM_MASTER, &boot_bci,
+			    TWL4030_PM_MASTER_BOOT_BCI) < 0)
+		boot_bci = 0;
+	boot_bci &= 7;
+
+	if ((!!cgain) != !!(bcictl1 & TWL4030_CGAIN))
+		/* Need to turn for charging while we change the
+		 * CGAIN bit.  Leave it off while everything is
+		 * updated.
+		 */
+		twl4030_clear_set_boot_bci(boot_bci, 0);
+
+	/*
+	 * For ichg_eoc, the hardware only supports reg values matching
+	 * 100XXXX000, and requires the XXXX be stored in the high nibble
+	 * of TWL4030_BCIMFTH8.
+	 */
+	reg = ua2regval(bci->ichg_eoc, cgain);
+	if (reg > 0x278)
+		reg = 0x278;
+	if (reg < 0x200)
+		reg = 0x200;
+	reg = (reg >> 3) & 0xf;
+	fullreg = reg << 4;
+
+	/*
+	 * For ichg_lo, reg value must match 10XXXX0000.
+	 * XXXX is stored in low nibble of TWL4030_BCIMFTH8.
+	 */
+	reg = ua2regval(bci->ichg_lo, cgain);
+	if (reg > 0x2F0)
+		reg = 0x2F0;
+	if (reg < 0x200)
+		reg = 0x200;
+	reg = (reg >> 4) & 0xf;
+	fullreg |= reg;
+
+	/* ichg_eoc and ichg_lo live in same register */
+	status = twl4030_bci_read(TWL4030_BCIMFTH8, &oldreg);
+	if (status < 0)
+		return status;
+	if (oldreg != fullreg) {
+		status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xF4,
+					  TWL4030_BCIMFKEY);
+		if (status < 0)
+			return status;
+		twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
+				 fullreg, TWL4030_BCIMFTH8);
+	}
+
+	/* ichg_hi threshold must be 1XXXX01100 (I think) */
+	reg = ua2regval(bci->ichg_hi, cgain);
+	if (reg > 0x3E0)
+		reg = 0x3E0;
+	if (reg < 0x200)
+		reg = 0x200;
+	fullreg = (reg >> 5) & 0xF;
+	fullreg <<= 4;
+	status = twl4030_bci_read(TWL4030_BCIMFTH9, &oldreg);
+	if (status < 0)
+		return status;
+	if ((oldreg & 0xF0) != fullreg) {
+		fullreg |= (oldreg & 0x0F);
+		status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xE7,
+					  TWL4030_BCIMFKEY);
+		if (status < 0)
+			return status;
+		twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
+				 fullreg, TWL4030_BCIMFTH9);
+	}
+
+	/*
+	 * And finally, set the current.  This is stored in
+	 * two registers.
+	 */
+	reg = ua2regval(cur, cgain);
+	/* we have only 10 bits */
+	if (reg > 0x3ff)
+		reg = 0x3ff;
+	status = twl4030_bci_read(TWL4030_BCIIREF1, &oldreg);
+	if (status < 0)
+		return status;
+	cur_reg = oldreg;
+	status = twl4030_bci_read(TWL4030_BCIIREF2, &oldreg);
+	if (status < 0)
+		return status;
+	cur_reg |= oldreg << 8;
+	if (reg != oldreg) {
+		/* disable write protection for one write access for
+		 * BCIIREF */
+		status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xE7,
+					  TWL4030_BCIMFKEY);
+		if (status < 0)
+			return status;
+		status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
+					  (reg & 0x100) ? 3 : 2,
+					  TWL4030_BCIIREF2);
+		if (status < 0)
+			return status;
+		/* disable write protection for one write access for
+		 * BCIIREF */
+		status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xE7,
+					  TWL4030_BCIMFKEY);
+		if (status < 0)
+			return status;
+		status = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
+					  reg & 0xff,
+					  TWL4030_BCIIREF1);
+	}
+	if ((!!cgain) != !!(bcictl1 & TWL4030_CGAIN)) {
+		/* Flip CGAIN and re-enable charging */
+		bcictl1 ^= TWL4030_CGAIN;
+		twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE,
+				 bcictl1, TWL4030_BCICTL1);
+		twl4030_clear_set_boot_bci(0, boot_bci);
+	}
 	return 0;
 }
 
+static int twl4030_charger_get_current(void);
+
+static void twl4030_current_worker(struct work_struct *data)
+{
+	int v, curr;
+	int res;
+	struct twl4030_bci *bci = container_of(data, struct twl4030_bci,
+					       current_worker.work);
+
+	res = twl4030bci_read_adc_val(TWL4030_BCIVBUS);
+	if (res < 0)
+		v = 0;
+	else
+		/* BCIVBUS uses ADCIN8, 7/1023 V/step */
+		v = res * 6843;
+	curr = twl4030_charger_get_current();
+
+	dev_dbg(bci->dev, "v=%d cur=%d limit=%d target=%d\n", v, curr,
+		bci->usb_cur, bci->usb_cur_target);
+
+	if (v < USB_MIN_VOLT) {
+		/* Back up and stop adjusting. */
+		bci->usb_cur -= USB_CUR_STEP;
+		bci->usb_cur_target = bci->usb_cur;
+	} else if (bci->usb_cur >= bci->usb_cur_target ||
+		   bci->usb_cur + USB_CUR_STEP > USB_MAX_CURRENT) {
+		/* Reached target and voltage is OK - stop */
+		return;
+	} else {
+		bci->usb_cur += USB_CUR_STEP;
+		schedule_delayed_work(&bci->current_worker, USB_CUR_DELAY);
+	}
+	twl4030_charger_update_current(bci);
+}
+
 /*
  * Enable/Disable USB Charge functionality.
  */
@@ -208,45 +466,60 @@
 {
 	int ret;
 
-	if (enable) {
-		/* Check for USB charger connected */
-		if (!twl4030_bci_have_vbus(bci))
-			return -ENODEV;
+	if (bci->usb_mode == CHARGE_OFF)
+		enable = false;
+	if (enable && !IS_ERR_OR_NULL(bci->transceiver)) {
 
-		/*
-		 * Until we can find out what current the device can provide,
-		 * require a module param to enable USB charging.
-		 */
-		if (!allow_usb) {
-			dev_warn(bci->dev, "USB charging is disabled.\n");
-			return -EACCES;
-		}
+		twl4030_charger_update_current(bci);
 
-		/* Need to keep regulator on */
+		/* Need to keep phy powered */
 		if (!bci->usb_enabled) {
-			ret = regulator_enable(bci->usb_reg);
-			if (ret) {
-				dev_err(bci->dev,
-					"Failed to enable regulator\n");
-				return ret;
-			}
+			pm_runtime_get_sync(bci->transceiver->dev);
 			bci->usb_enabled = 1;
 		}
 
-		/* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */
-		ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB);
-		if (ret < 0)
-			return ret;
+		if (bci->usb_mode == CHARGE_AUTO)
+			/* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */
+			ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB);
 
 		/* forcing USBFASTMCHG(BCIMFSTS4[2]) to 1 */
 		ret = twl4030_clear_set(TWL_MODULE_MAIN_CHARGE, 0,
 			TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4);
+		if (bci->usb_mode == CHARGE_LINEAR) {
+			twl4030_clear_set_boot_bci(TWL4030_BCIAUTOAC|TWL4030_CVENAC, 0);
+			/* Watch dog key: WOVF acknowledge */
+			ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x33,
+					       TWL4030_BCIWDKEY);
+			/* 0x24 + EKEY6: off mode */
+			ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x2a,
+					       TWL4030_BCIMDKEY);
+			/* EKEY2: Linear charge: USB path */
+			ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x26,
+					       TWL4030_BCIMDKEY);
+			/* WDKEY5: stop watchdog count */
+			ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xf3,
+					       TWL4030_BCIWDKEY);
+			/* enable MFEN3 access */
+			ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x9c,
+					       TWL4030_BCIMFKEY);
+			 /* ICHGEOCEN - end-of-charge monitor (current < 80mA)
+			  *                      (charging continues)
+			  * ICHGLOWEN - current level monitor (charge continues)
+			  * don't monitor over-current or heat save
+			  */
+			ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0xf0,
+					       TWL4030_BCIMFEN3);
+		}
 	} else {
 		ret = twl4030_clear_set_boot_bci(TWL4030_BCIAUTOUSB, 0);
+		ret |= twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x2a,
+					TWL4030_BCIMDKEY);
 		if (bci->usb_enabled) {
-			regulator_disable(bci->usb_reg);
+			pm_runtime_mark_last_busy(bci->transceiver->dev);
+			pm_runtime_put_autosuspend(bci->transceiver->dev);
 			bci->usb_enabled = 0;
 		}
+		bci->usb_cur = 0;
 	}
 
 	return ret;
@@ -255,10 +528,13 @@
 /*
  * Enable/Disable AC Charge funtionality.
  */
-static int twl4030_charger_enable_ac(bool enable)
+static int twl4030_charger_enable_ac(struct twl4030_bci *bci, bool enable)
 {
 	int ret;
 
+	if (bci->ac_mode == CHARGE_OFF)
+		enable = false;
+
 	if (enable)
 		ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOAC);
 	else
@@ -318,6 +594,9 @@
 	struct twl4030_bci *bci = arg;
 
 	dev_dbg(bci->dev, "CHG_PRES irq\n");
+	/* reset current on each 'plug' event */
+	bci->ac_cur = 500000;
+	twl4030_charger_update_current(bci);
 	power_supply_changed(bci->ac);
 	power_supply_changed(bci->usb);
 
@@ -350,6 +629,7 @@
 		power_supply_changed(bci->ac);
 		power_supply_changed(bci->usb);
 	}
+	twl4030_charger_update_current(bci);
 
 	/* various monitoring events, for now we just log them here */
 	if (irqs1 & (TWL4030_TBATOR2 | TWL4030_TBATOR1))
@@ -370,6 +650,63 @@
 	return IRQ_HANDLED;
 }
 
+/*
+ * Provide "max_current" attribute in sysfs.
+ */
+static ssize_t
+twl4030_bci_max_current_store(struct device *dev, struct device_attribute *attr,
+	const char *buf, size_t n)
+{
+	struct twl4030_bci *bci = dev_get_drvdata(dev->parent);
+	int cur = 0;
+	int status = 0;
+	status = kstrtoint(buf, 10, &cur);
+	if (status)
+		return status;
+	if (cur < 0)
+		return -EINVAL;
+	if (dev == &bci->ac->dev)
+		bci->ac_cur = cur;
+	else
+		bci->usb_cur_target = cur;
+
+	twl4030_charger_update_current(bci);
+	return n;
+}
+
+/*
+ * sysfs max_current show
+ */
+static ssize_t twl4030_bci_max_current_show(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	int status = 0;
+	int cur = -1;
+	u8 bcictl1;
+	struct twl4030_bci *bci = dev_get_drvdata(dev->parent);
+
+	if (dev == &bci->ac->dev) {
+		if (!bci->ac_is_active)
+			cur = bci->ac_cur;
+	} else {
+		if (bci->ac_is_active)
+			cur = bci->usb_cur_target;
+	}
+	if (cur < 0) {
+		cur = twl4030bci_read_adc_val(TWL4030_BCIIREF1);
+		if (cur < 0)
+			return cur;
+		status = twl4030_bci_read(TWL4030_BCICTL1, &bcictl1);
+		if (status < 0)
+			return status;
+		cur = regval2ua(cur, bcictl1 & TWL4030_CGAIN);
+	}
+	return scnprintf(buf, PAGE_SIZE, "%u\n", cur);
+}
+
+static DEVICE_ATTR(max_current, 0644, twl4030_bci_max_current_show,
+			twl4030_bci_max_current_store);
+
 static void twl4030_bci_usb_work(struct work_struct *data)
 {
 	struct twl4030_bci *bci = container_of(data, struct twl4030_bci, work);
@@ -392,6 +729,12 @@
 
 	dev_dbg(bci->dev, "OTG notify %lu\n", val);
 
+	/* reset current on each 'plug' event */
+	if (allow_usb)
+		bci->usb_cur_target = 500000;
+	else
+		bci->usb_cur_target = 100000;
+
 	bci->event = val;
 	schedule_work(&bci->work);
 
@@ -399,13 +742,66 @@
 }
 
 /*
- * TI provided formulas:
- * CGAIN == 0: ICHG = (BCIICHG * 1.7) / (2^10 - 1) - 0.85
- * CGAIN == 1: ICHG = (BCIICHG * 3.4) / (2^10 - 1) - 1.7
- * Here we use integer approximation of:
- * CGAIN == 0: val * 1.6618 - 0.85
- * CGAIN == 1: (val * 1.6618 - 0.85) * 2
+ * sysfs charger enabled store
  */
+static ssize_t
+twl4030_bci_mode_store(struct device *dev, struct device_attribute *attr,
+			  const char *buf, size_t n)
+{
+	struct twl4030_bci *bci = dev_get_drvdata(dev->parent);
+	int mode;
+	int status;
+
+	if (sysfs_streq(buf, modes[0]))
+		mode = 0;
+	else if (sysfs_streq(buf, modes[1]))
+		mode = 1;
+	else if (sysfs_streq(buf, modes[2]))
+		mode = 2;
+	else
+		return -EINVAL;
+	if (dev == &bci->ac->dev) {
+		if (mode == 2)
+			return -EINVAL;
+		twl4030_charger_enable_ac(bci, false);
+		bci->ac_mode = mode;
+		status = twl4030_charger_enable_ac(bci, true);
+	} else {
+		twl4030_charger_enable_usb(bci, false);
+		bci->usb_mode = mode;
+		status = twl4030_charger_enable_usb(bci, true);
+	}
+	return (status == 0) ? n : status;
+}
+
+/*
+ * sysfs charger enabled show
+ */
+static ssize_t
+twl4030_bci_mode_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct twl4030_bci *bci = dev_get_drvdata(dev->parent);
+	int len = 0;
+	int i;
+	int mode = bci->usb_mode;
+
+	if (dev == &bci->ac->dev)
+		mode = bci->ac_mode;
+
+	for (i = 0; i < ARRAY_SIZE(modes); i++)
+		if (mode == i)
+			len += snprintf(buf+len, PAGE_SIZE-len,
+					"[%s] ", modes[i]);
+		else
+			len += snprintf(buf+len, PAGE_SIZE-len,
+					"%s ", modes[i]);
+	buf[len-1] = '\n';
+	return len;
+}
+static DEVICE_ATTR(mode, 0644, twl4030_bci_mode_show,
+		   twl4030_bci_mode_store);
+
 static int twl4030_charger_get_current(void)
 {
 	int curr;
@@ -420,11 +816,7 @@
 	if (ret)
 		return ret;
 
-	ret = (curr * 16618 - 850 * 10000) / 10;
-	if (bcictl1 & TWL4030_CGAIN)
-		ret *= 2;
-
-	return ret;
+	return regval2ua(curr, bcictl1 & TWL4030_CGAIN);
 }
 
 /*
@@ -476,6 +868,17 @@
 		is_charging = state & TWL4030_MSTATEC_USB;
 	else
 		is_charging = state & TWL4030_MSTATEC_AC;
+	if (!is_charging) {
+		u8 s;
+		twl4030_bci_read(TWL4030_BCIMDEN, &s);
+		if (psy->desc->type == POWER_SUPPLY_TYPE_USB)
+			is_charging = s & 1;
+		else
+			is_charging = s & 2;
+		if (is_charging)
+			/* A little white lie */
+			state = TWL4030_MSTATEC_QUICK1;
+	}
 
 	switch (psp) {
 	case POWER_SUPPLY_PROP_STATUS:
@@ -574,20 +977,31 @@
 	.get_property	= twl4030_bci_get_property,
 };
 
-static int __init twl4030_bci_probe(struct platform_device *pdev)
+static int twl4030_bci_probe(struct platform_device *pdev)
 {
 	struct twl4030_bci *bci;
 	const struct twl4030_bci_platform_data *pdata = pdev->dev.platform_data;
 	int ret;
 	u32 reg;
 
-	bci = kzalloc(sizeof(*bci), GFP_KERNEL);
+	bci = devm_kzalloc(&pdev->dev, sizeof(*bci), GFP_KERNEL);
 	if (bci == NULL)
 		return -ENOMEM;
 
 	if (!pdata)
 		pdata = twl4030_bci_parse_dt(&pdev->dev);
 
+	bci->ichg_eoc = 80100; /* Stop charging when current drops to here */
+	bci->ichg_lo = 241000; /* Low threshold */
+	bci->ichg_hi = 500000; /* High threshold */
+	bci->ac_cur = 500000; /* 500mA */
+	if (allow_usb)
+		bci->usb_cur_target = 500000;  /* 500mA */
+	else
+		bci->usb_cur_target = 100000;  /* 100mA */
+	bci->usb_mode = CHARGE_AUTO;
+	bci->ac_mode = CHARGE_AUTO;
+
 	bci->dev = &pdev->dev;
 	bci->irq_chg = platform_get_irq(pdev, 0);
 	bci->irq_bci = platform_get_irq(pdev, 1);
@@ -596,47 +1010,46 @@
 	ret = twl4030_is_battery_present(bci);
 	if  (ret) {
 		dev_crit(&pdev->dev, "Battery was not detected:%d\n", ret);
-		goto fail_no_battery;
+		return ret;
 	}
 
 	platform_set_drvdata(pdev, bci);
 
-	bci->ac = power_supply_register(&pdev->dev, &twl4030_bci_ac_desc,
-					NULL);
+	bci->ac = devm_power_supply_register(&pdev->dev, &twl4030_bci_ac_desc,
+					     NULL);
 	if (IS_ERR(bci->ac)) {
 		ret = PTR_ERR(bci->ac);
 		dev_err(&pdev->dev, "failed to register ac: %d\n", ret);
-		goto fail_register_ac;
+		return ret;
 	}
 
-	bci->usb_reg = regulator_get(bci->dev, "bci3v1");
-
-	bci->usb = power_supply_register(&pdev->dev, &twl4030_bci_usb_desc,
-					 NULL);
+	bci->usb = devm_power_supply_register(&pdev->dev, &twl4030_bci_usb_desc,
+					      NULL);
 	if (IS_ERR(bci->usb)) {
 		ret = PTR_ERR(bci->usb);
 		dev_err(&pdev->dev, "failed to register usb: %d\n", ret);
-		goto fail_register_usb;
+		return ret;
 	}
 
-	ret = request_threaded_irq(bci->irq_chg, NULL,
+	ret = devm_request_threaded_irq(&pdev->dev, bci->irq_chg, NULL,
 			twl4030_charger_interrupt, IRQF_ONESHOT, pdev->name,
 			bci);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "could not request irq %d, status %d\n",
 			bci->irq_chg, ret);
-		goto fail_chg_irq;
+		return ret;
 	}
 
-	ret = request_threaded_irq(bci->irq_bci, NULL,
+	ret = devm_request_threaded_irq(&pdev->dev, bci->irq_bci, NULL,
 			twl4030_bci_interrupt, IRQF_ONESHOT, pdev->name, bci);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "could not request irq %d, status %d\n",
 			bci->irq_bci, ret);
-		goto fail_bci_irq;
+		return ret;
 	}
 
 	INIT_WORK(&bci->work, twl4030_bci_usb_work);
+	INIT_DELAYED_WORK(&bci->current_worker, twl4030_current_worker);
 
 	bci->usb_nb.notifier_call = twl4030_bci_usb_ncb;
 	if (bci->dev->of_node) {
@@ -644,9 +1057,13 @@
 
 		phynode = of_find_compatible_node(bci->dev->of_node->parent,
 						  NULL, "ti,twl4030-usb");
-		if (phynode)
+		if (phynode) {
 			bci->transceiver = devm_usb_get_phy_by_node(
 				bci->dev, phynode, &bci->usb_nb);
+			if (IS_ERR(bci->transceiver) &&
+			    PTR_ERR(bci->transceiver) == -EPROBE_DEFER)
+				return -EPROBE_DEFER;
+		}
 	}
 
 	/* Enable interrupts now. */
@@ -656,7 +1073,7 @@
 			       TWL4030_INTERRUPTS_BCIIMR1A);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "failed to unmask interrupts: %d\n", ret);
-		goto fail_unmask_interrupts;
+		return ret;
 	}
 
 	reg = ~(u32)(TWL4030_VBATOV | TWL4030_VBUSOV | TWL4030_ACCHGOV);
@@ -665,8 +1082,23 @@
 	if (ret < 0)
 		dev_warn(&pdev->dev, "failed to unmask interrupts: %d\n", ret);
 
-	twl4030_charger_enable_ac(true);
-	twl4030_charger_enable_usb(bci, true);
+	twl4030_charger_update_current(bci);
+	if (device_create_file(&bci->usb->dev, &dev_attr_max_current))
+		dev_warn(&pdev->dev, "could not create sysfs file\n");
+	if (device_create_file(&bci->usb->dev, &dev_attr_mode))
+		dev_warn(&pdev->dev, "could not create sysfs file\n");
+	if (device_create_file(&bci->ac->dev, &dev_attr_mode))
+		dev_warn(&pdev->dev, "could not create sysfs file\n");
+	if (device_create_file(&bci->ac->dev, &dev_attr_max_current))
+		dev_warn(&pdev->dev, "could not create sysfs file\n");
+
+	twl4030_charger_enable_ac(bci, true);
+	if (!IS_ERR_OR_NULL(bci->transceiver))
+		twl4030_bci_usb_ncb(&bci->usb_nb,
+				    bci->transceiver->last_event,
+				    NULL);
+	else
+		twl4030_charger_enable_usb(bci, false);
 	if (pdata)
 		twl4030_charger_enable_backup(pdata->bb_uvolt,
 					      pdata->bb_uamp);
@@ -674,42 +1106,26 @@
 		twl4030_charger_enable_backup(0, 0);
 
 	return 0;
-
-fail_unmask_interrupts:
-	free_irq(bci->irq_bci, bci);
-fail_bci_irq:
-	free_irq(bci->irq_chg, bci);
-fail_chg_irq:
-	power_supply_unregister(bci->usb);
-fail_register_usb:
-	power_supply_unregister(bci->ac);
-fail_register_ac:
-fail_no_battery:
-	kfree(bci);
-
-	return ret;
 }
 
 static int __exit twl4030_bci_remove(struct platform_device *pdev)
 {
 	struct twl4030_bci *bci = platform_get_drvdata(pdev);
 
-	twl4030_charger_enable_ac(false);
+	twl4030_charger_enable_ac(bci, false);
 	twl4030_charger_enable_usb(bci, false);
 	twl4030_charger_enable_backup(0, 0);
 
+	device_remove_file(&bci->usb->dev, &dev_attr_max_current);
+	device_remove_file(&bci->usb->dev, &dev_attr_mode);
+	device_remove_file(&bci->ac->dev, &dev_attr_max_current);
+	device_remove_file(&bci->ac->dev, &dev_attr_mode);
 	/* mask interrupts */
 	twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, 0xff,
 			 TWL4030_INTERRUPTS_BCIIMR1A);
 	twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, 0xff,
 			 TWL4030_INTERRUPTS_BCIIMR2A);
 
-	free_irq(bci->irq_bci, bci);
-	free_irq(bci->irq_chg, bci);
-	power_supply_unregister(bci->usb);
-	power_supply_unregister(bci->ac);
-	kfree(bci);
-
 	return 0;
 }
 
@@ -720,14 +1136,14 @@
 MODULE_DEVICE_TABLE(of, twl_bci_of_match);
 
 static struct platform_driver twl4030_bci_driver = {
+	.probe = twl4030_bci_probe,
 	.driver	= {
 		.name	= "twl4030_bci",
 		.of_match_table = of_match_ptr(twl_bci_of_match),
 	},
 	.remove	= __exit_p(twl4030_bci_remove),
 };
-
-module_platform_driver_probe(twl4030_bci_driver, twl4030_bci_probe);
+module_platform_driver(twl4030_bci_driver);
 
 MODULE_AUTHOR("Gražvydas Ignotas");
 MODULE_DESCRIPTION("TWL4030 Battery Charger Interface driver");
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 482b22d..5efacd0 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1096,11 +1096,13 @@
 	RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
 	RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
 	RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
+	RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
 	RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
 	RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
 	RAPL_CPU(0x4A, rapl_defaults_tng),/* Tangier */
 	RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
 	RAPL_CPU(0x5A, rapl_defaults_ann),/* Annidale */
+	RAPL_CPU(0x5E, rapl_defaults_core),/* Skylake-H/S */
 	RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */
 	{}
 };
@@ -1145,9 +1147,11 @@
 			pr_debug("remove package, undo power limit on %d: %s\n",
 				rp->id, rd->name);
 			rapl_write_data_raw(rd, PL1_ENABLE, 0);
-			rapl_write_data_raw(rd, PL2_ENABLE, 0);
 			rapl_write_data_raw(rd, PL1_CLAMP, 0);
-			rapl_write_data_raw(rd, PL2_CLAMP, 0);
+			if (find_nr_power_limit(rd) > 1) {
+				rapl_write_data_raw(rd, PL2_ENABLE, 0);
+				rapl_write_data_raw(rd, PL2_CLAMP, 0);
+			}
 			if (rd->id == RAPL_DOMAIN_PACKAGE) {
 				rd_package = rd;
 				continue;
diff --git a/drivers/ras/Kconfig b/drivers/ras/Kconfig
index f9da613..4c3c67d 100644
--- a/drivers/ras/Kconfig
+++ b/drivers/ras/Kconfig
@@ -1,2 +1,35 @@
-config RAS
-	bool
+menuconfig RAS
+	bool "Reliability, Availability and Serviceability (RAS) features"
+	help
+	  Reliability, availability and serviceability (RAS) is a computer
+	  hardware engineering term. Computers designed with higher levels
+	  of RAS have a multitude of features that protect data integrity
+	  and help them stay available for long periods of time without
+	  failure.
+
+	  Reliability can be defined as the probability that the system will
+	  produce correct outputs up to some given time. Reliability is
+	  enhanced by features that help to avoid, detect and repair hardware
+	  faults.
+
+	  Availability is the probability a system is operational at a given
+	  time, i.e. the amount of time a device is actually operating as the
+	  percentage of total time it should be operating.
+
+	  Serviceability or maintainability is the simplicity and speed with
+	  which a system can be repaired or maintained; if the time to repair
+	  a failed system increases, then availability will decrease.
+
+	  Note that Reliability and Availability are distinct concepts:
+	  Reliability is a measure of the ability of a system to function
+	  correctly, including avoiding data corruption, whereas Availability
+	  measures how often it is available for use, even though it may not
+	  be functioning correctly. For example, a server may run forever and
+	  so have ideal availability, but may be unreliable, with frequent
+	  data corruption.
+
+if RAS
+
+source arch/x86/ras/Kconfig
+
+endif
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index 7fd4f51..a62a896 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -78,7 +78,6 @@
 };
 
 struct pm800_regulators {
-	struct regulator_dev *regulators[PM800_ID_RG_MAX];
 	struct pm80x_chip *chip;
 	struct regmap *map;
 };
@@ -92,14 +91,16 @@
  * not the constant voltage table.
  * n_volt - Number of available selectors
  */
-#define PM800_BUCK(vreg, ereg, ebit, amax, volt_ranges, n_volt)		\
+#define PM800_BUCK(match, vreg, ereg, ebit, amax, volt_ranges, n_volt)	\
 {									\
 	.desc	= {							\
-		.name	= #vreg,					\
-		.ops	= &pm800_volt_range_ops,			\
-		.type	= REGULATOR_VOLTAGE,				\
-		.id	= PM800_ID_##vreg,				\
-		.owner	= THIS_MODULE,					\
+		.name			= #vreg,			\
+		.of_match		= of_match_ptr(#match),		\
+		.regulators_node	= of_match_ptr("regulators"),	\
+		.ops			= &pm800_volt_range_ops,	\
+		.type			= REGULATOR_VOLTAGE,		\
+		.id			= PM800_ID_##vreg,		\
+		.owner			= THIS_MODULE,			\
 		.n_voltages		= n_volt,			\
 		.linear_ranges		= volt_ranges,			\
 		.n_linear_ranges	= ARRAY_SIZE(volt_ranges),	\
@@ -108,7 +109,7 @@
 		.enable_reg		= PM800_##ereg,			\
 		.enable_mask		= 1 << (ebit),			\
 	},								\
-	.max_ua		= (amax),					\
+	.max_ua	= (amax),						\
 }
 
 /*
@@ -120,22 +121,24 @@
  * For all the LDOes, there are too many ranges. Using volt_table will be
  * simpler and faster.
  */
-#define PM800_LDO(vreg, ereg, ebit, amax, ldo_volt_table)		\
+#define PM800_LDO(match, vreg, ereg, ebit, amax, ldo_volt_table)	\
 {									\
 	.desc	= {							\
-		.name	= #vreg,					\
-		.ops	= &pm800_volt_table_ops,			\
-		.type	= REGULATOR_VOLTAGE,				\
-		.id	= PM800_ID_##vreg,				\
-		.owner	= THIS_MODULE,					\
-		.n_voltages = ARRAY_SIZE(ldo_volt_table),		\
-		.vsel_reg	= PM800_##vreg##_VOUT,			\
-		.vsel_mask	= 0xf,					\
-		.enable_reg	= PM800_##ereg,				\
-		.enable_mask	= 1 << (ebit),				\
-		.volt_table	= ldo_volt_table,			\
+		.name			= #vreg,			\
+		.of_match		= of_match_ptr(#match),		\
+		.regulators_node	= of_match_ptr("regulators"),	\
+		.ops			= &pm800_volt_table_ops,	\
+		.type			= REGULATOR_VOLTAGE,		\
+		.id			= PM800_ID_##vreg,		\
+		.owner			= THIS_MODULE,			\
+		.n_voltages		= ARRAY_SIZE(ldo_volt_table),	\
+		.vsel_reg		= PM800_##vreg##_VOUT,		\
+		.vsel_mask		= 0xf,				\
+		.enable_reg		= PM800_##ereg,			\
+		.enable_mask		= 1 << (ebit),			\
+		.volt_table		= ldo_volt_table,		\
 	},								\
-	.max_ua		= (amax),					\
+	.max_ua	= (amax),						\
 }
 
 /* Ranges are sorted in ascending order. */
@@ -178,122 +181,66 @@
 }
 
 static struct regulator_ops pm800_volt_range_ops = {
-	.list_voltage = regulator_list_voltage_linear_range,
-	.map_voltage = regulator_map_voltage_linear_range,
-	.set_voltage_sel = regulator_set_voltage_sel_regmap,
-	.get_voltage_sel = regulator_get_voltage_sel_regmap,
-	.enable = regulator_enable_regmap,
-	.disable = regulator_disable_regmap,
-	.is_enabled = regulator_is_enabled_regmap,
-	.get_current_limit = pm800_get_current_limit,
+	.list_voltage		= regulator_list_voltage_linear_range,
+	.map_voltage		= regulator_map_voltage_linear_range,
+	.set_voltage_sel	= regulator_set_voltage_sel_regmap,
+	.get_voltage_sel	= regulator_get_voltage_sel_regmap,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
+	.is_enabled		= regulator_is_enabled_regmap,
+	.get_current_limit	= pm800_get_current_limit,
 };
 
 static struct regulator_ops pm800_volt_table_ops = {
-	.list_voltage = regulator_list_voltage_table,
-	.map_voltage = regulator_map_voltage_iterate,
-	.set_voltage_sel = regulator_set_voltage_sel_regmap,
-	.get_voltage_sel = regulator_get_voltage_sel_regmap,
-	.enable = regulator_enable_regmap,
-	.disable = regulator_disable_regmap,
-	.is_enabled = regulator_is_enabled_regmap,
-	.get_current_limit = pm800_get_current_limit,
+	.list_voltage		= regulator_list_voltage_table,
+	.map_voltage		= regulator_map_voltage_iterate,
+	.set_voltage_sel	= regulator_set_voltage_sel_regmap,
+	.get_voltage_sel	= regulator_get_voltage_sel_regmap,
+	.enable			= regulator_enable_regmap,
+	.disable		= regulator_disable_regmap,
+	.is_enabled		= regulator_is_enabled_regmap,
+	.get_current_limit	= pm800_get_current_limit,
 };
 
 /* The array is indexed by id(PM800_ID_XXX) */
 static struct pm800_regulator_info pm800_regulator_info[] = {
-	PM800_BUCK(BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55),
-	PM800_BUCK(BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73),
-	PM800_BUCK(BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73),
-	PM800_BUCK(BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73),
-	PM800_BUCK(BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73),
+	PM800_BUCK(buck1, BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55),
+	PM800_BUCK(buck2, BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73),
+	PM800_BUCK(buck3, BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73),
+	PM800_BUCK(buck4, BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73),
+	PM800_BUCK(buck5, BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73),
 
-	PM800_LDO(LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table),
-	PM800_LDO(LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table),
-	PM800_LDO(LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table),
-	PM800_LDO(LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table),
-	PM800_LDO(LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table),
+	PM800_LDO(ldo1, LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table),
+	PM800_LDO(ldo2, LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table),
+	PM800_LDO(ldo3, LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo4, LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo5, LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo6, LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo7, LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo8, LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo9, LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo10, LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo11, LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo12, LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo13, LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo14, LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo15, LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo16, LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo17, LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table),
+	PM800_LDO(ldo18, LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table),
+	PM800_LDO(ldo19, LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table),
 };
 
-#define PM800_REGULATOR_OF_MATCH(_name, _id)				\
-	[PM800_ID_##_id] = {						\
-		.name = #_name,						\
-		.driver_data = &pm800_regulator_info[PM800_ID_##_id],	\
-	}
-
-static struct of_regulator_match pm800_regulator_matches[] = {
-	PM800_REGULATOR_OF_MATCH(buck1, BUCK1),
-	PM800_REGULATOR_OF_MATCH(buck2, BUCK2),
-	PM800_REGULATOR_OF_MATCH(buck3, BUCK3),
-	PM800_REGULATOR_OF_MATCH(buck4, BUCK4),
-	PM800_REGULATOR_OF_MATCH(buck5, BUCK5),
-	PM800_REGULATOR_OF_MATCH(ldo1, LDO1),
-	PM800_REGULATOR_OF_MATCH(ldo2, LDO2),
-	PM800_REGULATOR_OF_MATCH(ldo3, LDO3),
-	PM800_REGULATOR_OF_MATCH(ldo4, LDO4),
-	PM800_REGULATOR_OF_MATCH(ldo5, LDO5),
-	PM800_REGULATOR_OF_MATCH(ldo6, LDO6),
-	PM800_REGULATOR_OF_MATCH(ldo7, LDO7),
-	PM800_REGULATOR_OF_MATCH(ldo8, LDO8),
-	PM800_REGULATOR_OF_MATCH(ldo9, LDO9),
-	PM800_REGULATOR_OF_MATCH(ldo10, LDO10),
-	PM800_REGULATOR_OF_MATCH(ldo11, LDO11),
-	PM800_REGULATOR_OF_MATCH(ldo12, LDO12),
-	PM800_REGULATOR_OF_MATCH(ldo13, LDO13),
-	PM800_REGULATOR_OF_MATCH(ldo14, LDO14),
-	PM800_REGULATOR_OF_MATCH(ldo15, LDO15),
-	PM800_REGULATOR_OF_MATCH(ldo16, LDO16),
-	PM800_REGULATOR_OF_MATCH(ldo17, LDO17),
-	PM800_REGULATOR_OF_MATCH(ldo18, LDO18),
-	PM800_REGULATOR_OF_MATCH(ldo19, LDO19),
-};
-
-static int pm800_regulator_dt_init(struct platform_device *pdev)
-{
-	struct device_node *np = pdev->dev.of_node;
-	int ret;
-
-	ret = of_regulator_match(&pdev->dev, np,
-				 pm800_regulator_matches,
-				 ARRAY_SIZE(pm800_regulator_matches));
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
 static int pm800_regulator_probe(struct platform_device *pdev)
 {
 	struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
 	struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
 	struct pm800_regulators *pm800_data;
-	struct pm800_regulator_info *info;
 	struct regulator_config config = { };
 	struct regulator_init_data *init_data;
 	int i, ret;
 
-	if (!pdata || pdata->num_regulators == 0) {
-		if (IS_ENABLED(CONFIG_OF)) {
-			ret = pm800_regulator_dt_init(pdev);
-			if (ret)
-				return ret;
-		} else {
-			return -ENODEV;
-		}
-	} else if (pdata->num_regulators) {
+	if (pdata && pdata->num_regulators) {
 		unsigned int count = 0;
 
 		/* Check whether num_regulator is valid. */
@@ -303,8 +250,6 @@
 		}
 		if (count != pdata->num_regulators)
 			return -EINVAL;
-	} else {
-		return -EINVAL;
 	}
 
 	pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data),
@@ -317,30 +262,27 @@
 
 	platform_set_drvdata(pdev, pm800_data);
 
+	config.dev = chip->dev;
+	config.regmap = pm800_data->map;
 	for (i = 0; i < PM800_ID_RG_MAX; i++) {
-		if (!pdata || pdata->num_regulators == 0)
-			init_data = pm800_regulator_matches[i].init_data;
-		else
+		struct regulator_dev *regulator;
+
+		if (pdata && pdata->num_regulators) {
 			init_data = pdata->regulators[i];
-		if (!init_data)
-			continue;
-		info = pm800_regulator_matches[i].driver_data;
-		config.dev = &pdev->dev;
-		config.init_data = init_data;
-		config.driver_data = info;
-		config.regmap = pm800_data->map;
-		config.of_node = pm800_regulator_matches[i].of_node;
+			if (!init_data)
+				continue;
 
-		pm800_data->regulators[i] =
-				regulator_register(&info->desc, &config);
-		if (IS_ERR(pm800_data->regulators[i])) {
-			ret = PTR_ERR(pm800_data->regulators[i]);
+			config.init_data = init_data;
+		}
+
+		config.driver_data = &pm800_regulator_info[i];
+
+		regulator = devm_regulator_register(&pdev->dev,
+				&pm800_regulator_info[i].desc, &config);
+		if (IS_ERR(regulator)) {
+			ret = PTR_ERR(regulator);
 			dev_err(&pdev->dev, "Failed to register %s\n",
-				info->desc.name);
-
-			while (--i >= 0)
-				regulator_unregister(pm800_data->regulators[i]);
-
+					pm800_regulator_info[i].desc.name);
 			return ret;
 		}
 	}
@@ -348,23 +290,11 @@
 	return 0;
 }
 
-static int pm800_regulator_remove(struct platform_device *pdev)
-{
-	struct pm800_regulators *pm800_data = platform_get_drvdata(pdev);
-	int i;
-
-	for (i = 0; i < PM800_ID_RG_MAX; i++)
-		regulator_unregister(pm800_data->regulators[i]);
-
-	return 0;
-}
-
 static struct platform_driver pm800_regulator_driver = {
 	.driver		= {
 		.name	= "88pm80x-regulator",
 	},
 	.probe		= pm800_regulator_probe,
-	.remove		= pm800_regulator_remove,
 };
 
 module_platform_driver(pm800_regulator_driver);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index bef3bde..64bccff 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -209,13 +209,13 @@
 	  interface.
 
 config REGULATOR_DA9211
-	tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9214 regulator"
+	tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9214/DA9215 regulator"
 	depends on I2C
 	select REGMAP_I2C
 	help
 	  Say y here to support for the Dialog Semiconductor DA9211/DA9212
-	  /DA9213/DA9214.
-	  The DA9211/DA9212/DA9213/DA9214 is a multi-phase synchronous
+	  /DA9213/DA9214/DA9215.
+	  The DA9211/DA9212/DA9213/DA9214/DA9215 is a multi-phase synchronous
 	  step down converter 12A or 16A DC-DC Buck controlled through an I2C
 	  interface.
 
@@ -407,13 +407,13 @@
 	  Exynos-4 chips to control VARM and VINT voltages.
 
 config REGULATOR_MAX77693
-	tristate "Maxim MAX77693 regulator"
-	depends on MFD_MAX77693
+	tristate "Maxim 77693/77843 regulator"
+	depends on (MFD_MAX77693 || MFD_MAX77843)
 	help
-	  This driver controls a Maxim 77693 regulator via I2C bus.
+	  This driver controls a Maxim 77693/77843 regulators via I2C bus.
 	  The regulators include two LDOs, 'SAFEOUT1', 'SAFEOUT2'
 	  and one current regulator 'CHARGER'. This is suitable for
-	  Exynos-4x12 chips.
+	  Exynos-4x12 (MAX77693) or Exynos5433 (MAX77843) SoC chips.
 
 config REGULATOR_MAX77802
 	tristate "Maxim 77802 regulator"
@@ -424,14 +424,6 @@
 	  Exynos5420/Exynos5800 SoCs to control various voltages.
 	  It includes support for control of voltage and ramp speed.
 
-config REGULATOR_MAX77843
-	tristate "Maxim 77843 regulator"
-	depends on MFD_MAX77843
-	help
-	  This driver controls a Maxim 77843 regulator.
-	  The regulator include two 'SAFEOUT' for USB(Universal Serial Bus)
-	  This is suitable for Exynos5433 SoC chips.
-
 config REGULATOR_MC13XXX_CORE
 	tristate
 
@@ -451,6 +443,15 @@
 	  Say y here to support the regulators found on the Freescale MC13892
 	  PMIC.
 
+config REGULATOR_MT6311
+	tristate "MediaTek MT6311 PMIC"
+	depends on I2C
+	help
+	  Say y here to select this option to enable the power regulator of
+	  MediaTek MT6311 PMIC.
+	  This driver supports the control of different power rails of device
+	  through regulator interface.
+
 config REGULATOR_MT6397
 	tristate "MediaTek MT6397 PMIC"
 	depends on MFD_MT6397
@@ -522,6 +523,18 @@
 	  Qualcomm RPM as a module. The module will be named
 	  "qcom_rpm-regulator".
 
+config REGULATOR_QCOM_SMD_RPM
+	tristate "Qualcomm SMD based RPM regulator driver"
+	depends on QCOM_SMD_RPM
+	help
+	  If you say yes to this option, support will be included for the
+	  regulators exposed by the Resource Power Manager found in Qualcomm
+	  8974 based devices.
+
+	  Say M here if you want to include support for the regulators on the
+	  Qualcomm RPM as a module. The module will be named
+	  "qcom_smd-regulator".
+
 config REGULATOR_QCOM_SPMI
 	tristate "Qualcomm SPMI regulator driver"
 	depends on SPMI || COMPILE_TEST
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 91bf762..0f81749 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -56,12 +56,13 @@
 obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
 obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
 obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o
-obj-$(CONFIG_REGULATOR_MAX77843) += max77843.o
 obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
 obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
 obj-$(CONFIG_REGULATOR_MC13XXX_CORE) +=  mc13xxx-regulator-core.o
+obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
 obj-$(CONFIG_REGULATOR_MT6397)	+= mt6397-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
+obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
 obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
 obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
index 2ff73d7..896db16 100644
--- a/drivers/regulator/act8865-regulator.c
+++ b/drivers/regulator/act8865-regulator.c
@@ -530,7 +530,6 @@
 static struct i2c_driver act8865_pmic_driver = {
 	.driver	= {
 		.name	= "act8865",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= act8865_pmic_probe,
 	.id_table	= act8865_ids,
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index 48016a0..ea50a88 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -275,4 +275,3 @@
 MODULE_DESCRIPTION("AD5398 and AD5821 current regulator driver");
 MODULE_AUTHOR("Sonic Zhang");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("i2c:ad5398-regulator");
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 6468291..01bf347 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -405,3 +405,4 @@
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
 MODULE_DESCRIPTION("Regulator Driver for AXP20X PMIC");
+MODULE_ALIAS("platform:axp20x-regulator");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 78387a6..de9f272 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -111,6 +111,11 @@
 					  const char *supply_name);
 static void _regulator_put(struct regulator *regulator);
 
+static struct regulator_dev *dev_to_rdev(struct device *dev)
+{
+	return container_of(dev, struct regulator_dev, dev);
+}
+
 static const char *rdev_get_name(struct regulator_dev *rdev)
 {
 	if (rdev->constraints && rdev->constraints->name)
@@ -296,7 +301,7 @@
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
-		rdev_err(rdev, "operation not allowed\n");
+		rdev_dbg(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 	return 0;
@@ -641,6 +646,8 @@
 	int current_uA = 0, output_uV, input_uV, err;
 	unsigned int mode;
 
+	lockdep_assert_held_once(&rdev->mutex);
+
 	/*
 	 * first check to see if we can set modes at all, otherwise just
 	 * tell the consumer everything is OK.
@@ -761,6 +768,8 @@
 /* locks held by caller */
 static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
 {
+	lockdep_assert_held_once(&rdev->mutex);
+
 	if (!rdev->constraints)
 		return -EINVAL;
 
@@ -1082,6 +1091,15 @@
 		}
 	}
 
+	if (rdev->constraints->over_current_protection
+		&& ops->set_over_current_protection) {
+		ret = ops->set_over_current_protection(rdev);
+		if (ret < 0) {
+			rdev_err(rdev, "failed to set over current protection\n");
+			goto out;
+		}
+	}
+
 	print_constraints(rdev);
 	return 0;
 out:
@@ -1595,9 +1613,11 @@
 {
 	struct regulator_dev *rdev;
 
-	if (regulator == NULL || IS_ERR(regulator))
+	if (IS_ERR_OR_NULL(regulator))
 		return;
 
+	lockdep_assert_held_once(&regulator_list_mutex);
+
 	rdev = regulator->rdev;
 
 	debugfs_remove_recursive(regulator->debugfs);
@@ -1606,14 +1626,15 @@
 	if (regulator->dev)
 		sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
 	mutex_lock(&rdev->mutex);
-	kfree(regulator->supply_name);
 	list_del(&regulator->list);
-	kfree(regulator);
 
 	rdev->open_count--;
 	rdev->exclusive = 0;
 	mutex_unlock(&rdev->mutex);
 
+	kfree(regulator->supply_name);
+	kfree(regulator);
+
 	module_put(rdev->owner);
 }
 
@@ -1976,6 +1997,8 @@
 {
 	int ret;
 
+	lockdep_assert_held_once(&rdev->mutex);
+
 	/* check voltage and requested load before enabling */
 	if (rdev->constraints &&
 	    (rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS))
@@ -2076,6 +2099,8 @@
 {
 	int ret = 0;
 
+	lockdep_assert_held_once(&rdev->mutex);
+
 	if (WARN(rdev->use_count <= 0,
 		 "unbalanced disables for %s\n", rdev_get_name(rdev)))
 		return -EIO;
@@ -2154,6 +2179,8 @@
 {
 	int ret = 0;
 
+	lockdep_assert_held_once(&rdev->mutex);
+
 	ret = _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
 			REGULATOR_EVENT_PRE_DISABLE, NULL);
 	if (ret & NOTIFY_STOP_MASK)
@@ -2722,7 +2749,7 @@
 		goto out;
 
 	/* If we're trying to set a range that overlaps the current voltage,
-	 * return succesfully even though the regulator does not support
+	 * return successfully even though the regulator does not support
 	 * changing the voltage.
 	 */
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
@@ -3450,6 +3477,8 @@
 int regulator_notifier_call_chain(struct regulator_dev *rdev,
 				  unsigned long event, void *data)
 {
+	lockdep_assert_held_once(&rdev->mutex);
+
 	_notifier_call_chain(rdev, event, data);
 	return NOTIFY_DONE;
 
@@ -3594,6 +3623,9 @@
 static void regulator_dev_release(struct device *dev)
 {
 	struct regulator_dev *rdev = dev_get_drvdata(dev);
+
+	kfree(rdev->constraints);
+	of_node_put(rdev->dev.of_node);
 	kfree(rdev);
 }
 
@@ -3824,11 +3856,9 @@
 	WARN_ON(rdev->open_count);
 	unset_regulator_supplies(rdev);
 	list_del(&rdev->list);
-	kfree(rdev->constraints);
-	regulator_ena_gpio_free(rdev);
-	of_node_put(rdev->dev.of_node);
-	device_unregister(&rdev->dev);
 	mutex_unlock(&regulator_list_mutex);
+	regulator_ena_gpio_free(rdev);
+	device_unregister(&rdev->dev);
 }
 EXPORT_SYMBOL_GPL(regulator_unregister);
 
@@ -4147,13 +4177,57 @@
 /* init early to allow our consumers to complete system booting */
 core_initcall(regulator_init);
 
-static int __init regulator_init_complete(void)
+static int __init regulator_late_cleanup(struct device *dev, void *data)
 {
-	struct regulator_dev *rdev;
-	const struct regulator_ops *ops;
-	struct regulation_constraints *c;
+	struct regulator_dev *rdev = dev_to_rdev(dev);
+	const struct regulator_ops *ops = rdev->desc->ops;
+	struct regulation_constraints *c = rdev->constraints;
 	int enabled, ret;
 
+	if (c && c->always_on)
+		return 0;
+
+	if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS))
+		return 0;
+
+	mutex_lock(&rdev->mutex);
+
+	if (rdev->use_count)
+		goto unlock;
+
+	/* If we can't read the status assume it's on. */
+	if (ops->is_enabled)
+		enabled = ops->is_enabled(rdev);
+	else
+		enabled = 1;
+
+	if (!enabled)
+		goto unlock;
+
+	if (have_full_constraints()) {
+		/* We log since this may kill the system if it goes
+		 * wrong. */
+		rdev_info(rdev, "disabling\n");
+		ret = _regulator_do_disable(rdev);
+		if (ret != 0)
+			rdev_err(rdev, "couldn't disable: %d\n", ret);
+	} else {
+		/* The intention is that in future we will
+		 * assume that full constraints are provided
+		 * so warn even if we aren't going to do
+		 * anything here.
+		 */
+		rdev_warn(rdev, "incomplete constraints, leaving on\n");
+	}
+
+unlock:
+	mutex_unlock(&rdev->mutex);
+
+	return 0;
+}
+
+static int __init regulator_init_complete(void)
+{
 	/*
 	 * Since DT doesn't provide an idiomatic mechanism for
 	 * enabling full constraints and since it's much more natural
@@ -4163,58 +4237,13 @@
 	if (of_have_populated_dt())
 		has_full_constraints = true;
 
-	mutex_lock(&regulator_list_mutex);
-
 	/* If we have a full configuration then disable any regulators
 	 * we have permission to change the status for and which are
 	 * not in use or always_on.  This is effectively the default
 	 * for DT and ACPI as they have full constraints.
 	 */
-	list_for_each_entry(rdev, &regulator_list, list) {
-		ops = rdev->desc->ops;
-		c = rdev->constraints;
-
-		if (c && c->always_on)
-			continue;
-
-		if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS))
-			continue;
-
-		mutex_lock(&rdev->mutex);
-
-		if (rdev->use_count)
-			goto unlock;
-
-		/* If we can't read the status assume it's on. */
-		if (ops->is_enabled)
-			enabled = ops->is_enabled(rdev);
-		else
-			enabled = 1;
-
-		if (!enabled)
-			goto unlock;
-
-		if (have_full_constraints()) {
-			/* We log since this may kill the system if it
-			 * goes wrong. */
-			rdev_info(rdev, "disabling\n");
-			ret = _regulator_do_disable(rdev);
-			if (ret != 0)
-				rdev_err(rdev, "couldn't disable: %d\n", ret);
-		} else {
-			/* The intention is that in future we will
-			 * assume that full constraints are provided
-			 * so warn even if we aren't going to do
-			 * anything here.
-			 */
-			rdev_warn(rdev, "incomplete constraints, leaving on\n");
-		}
-
-unlock:
-		mutex_unlock(&rdev->mutex);
-	}
-
-	mutex_unlock(&regulator_list_mutex);
+	class_for_each_device(&regulator_class, NULL, NULL,
+			      regulator_late_cleanup);
 
 	return 0;
 }
diff --git a/drivers/regulator/da9062-regulator.c b/drivers/regulator/da9062-regulator.c
index dd76da0..5638fe8 100644
--- a/drivers/regulator/da9062-regulator.c
+++ b/drivers/regulator/da9062-regulator.c
@@ -818,7 +818,6 @@
 static struct platform_driver da9062_regulator_driver = {
 	.driver = {
 		.name = "da9062-regulators",
-		.owner = THIS_MODULE,
 	},
 	.probe = da9062_regulator_probe,
 };
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index f0489cb..b351783 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -22,6 +22,8 @@
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/slab.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
@@ -120,6 +122,55 @@
 	return da9210_buck_limits[sel];
 }
 
+static irqreturn_t da9210_irq_handler(int irq, void *data)
+{
+	struct da9210 *chip = data;
+	unsigned int val, handled = 0;
+	int error, ret = IRQ_NONE;
+
+	error = regmap_read(chip->regmap, DA9210_REG_EVENT_B, &val);
+	if (error < 0)
+		goto error_i2c;
+
+	if (val & DA9210_E_OVCURR) {
+		regulator_notifier_call_chain(chip->rdev,
+					      REGULATOR_EVENT_OVER_CURRENT,
+					      NULL);
+		handled |= DA9210_E_OVCURR;
+	}
+	if (val & DA9210_E_NPWRGOOD) {
+		regulator_notifier_call_chain(chip->rdev,
+					      REGULATOR_EVENT_UNDER_VOLTAGE,
+					      NULL);
+		handled |= DA9210_E_NPWRGOOD;
+	}
+	if (val & (DA9210_E_TEMP_WARN | DA9210_E_TEMP_CRIT)) {
+		regulator_notifier_call_chain(chip->rdev,
+					      REGULATOR_EVENT_OVER_TEMP, NULL);
+		handled |= val & (DA9210_E_TEMP_WARN | DA9210_E_TEMP_CRIT);
+	}
+	if (val & DA9210_E_VMAX) {
+		regulator_notifier_call_chain(chip->rdev,
+					      REGULATOR_EVENT_REGULATION_OUT,
+					      NULL);
+		handled |= DA9210_E_VMAX;
+	}
+	if (handled) {
+		/* Clear handled events */
+		error = regmap_write(chip->regmap, DA9210_REG_EVENT_B, handled);
+		if (error < 0)
+			goto error_i2c;
+
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+
+error_i2c:
+	dev_err(regmap_get_device(chip->regmap), "I2C error : %d\n", error);
+	return ret;
+}
+
 /*
  * I2C driver interface functions
  */
@@ -168,6 +219,30 @@
 	}
 
 	chip->rdev = rdev;
+	if (i2c->irq) {
+		error = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+						  da9210_irq_handler,
+						  IRQF_TRIGGER_LOW |
+						  IRQF_ONESHOT | IRQF_SHARED,
+						  "da9210", chip);
+		if (error) {
+			dev_err(&i2c->dev, "Failed to request IRQ%u: %d\n",
+				i2c->irq, error);
+			return error;
+		}
+
+		error = regmap_update_bits(chip->regmap, DA9210_REG_MASK_B,
+					 DA9210_M_OVCURR | DA9210_M_NPWRGOOD |
+					 DA9210_M_TEMP_WARN |
+					 DA9210_M_TEMP_CRIT | DA9210_M_VMAX, 0);
+		if (error < 0) {
+			dev_err(&i2c->dev, "Failed to update mask reg: %d\n",
+				error);
+			return error;
+		}
+	} else {
+		dev_warn(&i2c->dev, "No IRQ configured\n");
+	}
 
 	i2c_set_clientdata(i2c, chip);
 
@@ -184,7 +259,6 @@
 static struct i2c_driver da9210_regulator_driver = {
 	.driver = {
 		.name = "da9210",
-		.owner = THIS_MODULE,
 	},
 	.probe = da9210_i2c_probe,
 	.id_table = da9210_i2c_id,
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index df79e4b..04ef65b 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -1,6 +1,6 @@
 /*
- * da9211-regulator.c - Regulator device driver for DA9211/DA9213
- * Copyright (C) 2014  Dialog Semiconductor Ltd.
+ * da9211-regulator.c - Regulator device driver for DA9211/DA9213/DA9215
+ * Copyright (C) 2015  Dialog Semiconductor Ltd.
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Library General Public
@@ -32,6 +32,7 @@
 /* DEVICE IDs */
 #define DA9211_DEVICE_ID	0x22
 #define DA9213_DEVICE_ID	0x23
+#define DA9215_DEVICE_ID	0x24
 
 #define DA9211_BUCK_MODE_SLEEP	1
 #define DA9211_BUCK_MODE_SYNC	2
@@ -90,6 +91,13 @@
 	3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000,
 	4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000
 };
+/* Current limits for DA9215 buck (uA) indices
+ * corresponds with register values
+ */
+static const int da9215_current_limits[] = {
+	4000000, 4200000, 4400000, 4600000, 4800000, 5000000, 5200000, 5400000,
+	5600000, 5800000, 6000000, 6200000, 6400000, 6600000, 6800000, 7000000
+};
 
 static unsigned int da9211_buck_get_mode(struct regulator_dev *rdev)
 {
@@ -157,6 +165,10 @@
 		current_limits = da9213_current_limits;
 		max_size = ARRAY_SIZE(da9213_current_limits)-1;
 		break;
+	case DA9215:
+		current_limits = da9215_current_limits;
+		max_size = ARRAY_SIZE(da9215_current_limits)-1;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -189,6 +201,9 @@
 	case DA9213:
 		current_limits = da9213_current_limits;
 		break;
+	case DA9215:
+		current_limits = da9215_current_limits;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -350,13 +365,11 @@
 	/* If configuration for 1/2 bucks is different between platform data
 	 * and the register, driver should exit.
 	 */
-	if ((chip->pdata->num_buck == 2 && data == 0x40)
-		|| (chip->pdata->num_buck == 1 && data == 0x00)) {
-		if (data == 0)
-			chip->num_regulator = 1;
-		else
-			chip->num_regulator = 2;
-	} else {
+	if (chip->pdata->num_buck == 1 && data == 0x00)
+		chip->num_regulator = 1;
+	else if (chip->pdata->num_buck == 2 && data != 0x00)
+		chip->num_regulator = 2;
+	else {
 		dev_err(chip->dev, "Configuration is mismatched\n");
 		return -EINVAL;
 	}
@@ -438,6 +451,9 @@
 	case DA9213_DEVICE_ID:
 		chip->chip_id = DA9213;
 		break;
+	case DA9215_DEVICE_ID:
+		chip->chip_id = DA9215;
+		break;
 	default:
 		dev_err(chip->dev, "Unsupported device id = 0x%x.\n", data);
 		return -ENODEV;
@@ -478,6 +494,7 @@
 static const struct i2c_device_id da9211_i2c_id[] = {
 	{"da9211", DA9211},
 	{"da9213", DA9213},
+	{"da9215", DA9215},
 	{},
 };
 MODULE_DEVICE_TABLE(i2c, da9211_i2c_id);
@@ -486,6 +503,7 @@
 static const struct of_device_id da9211_dt_ids[] = {
 	{ .compatible = "dlg,da9211", .data = &da9211_i2c_id[0] },
 	{ .compatible = "dlg,da9213", .data = &da9211_i2c_id[1] },
+	{ .compatible = "dlg,da9215", .data = &da9211_i2c_id[2] },
 	{},
 };
 MODULE_DEVICE_TABLE(of, da9211_dt_ids);
@@ -494,7 +512,6 @@
 static struct i2c_driver da9211_regulator_driver = {
 	.driver = {
 		.name = "da9211",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(da9211_dt_ids),
 	},
 	.probe = da9211_i2c_probe,
@@ -504,5 +521,5 @@
 module_i2c_driver(da9211_regulator_driver);
 
 MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>");
-MODULE_DESCRIPTION("Regulator device driver for Dialog DA9211/DA9213");
-MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Regulator device driver for Dialog DA9211/DA9213/DA9215");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/da9211-regulator.h b/drivers/regulator/da9211-regulator.h
index 93fa9df..d6ad96f 100644
--- a/drivers/regulator/da9211-regulator.h
+++ b/drivers/regulator/da9211-regulator.h
@@ -1,16 +1,16 @@
 /*
- * da9211-regulator.h - Regulator definitions for DA9211/DA9213
- * Copyright (C) 2014  Dialog Semiconductor Ltd.
+ * da9211-regulator.h - Regulator definitions for DA9211/DA9213/DA9215
+ * Copyright (C) 2015  Dialog Semiconductor Ltd.
  *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
  *
- * This library is distributed in the hope that it will be useful,
+ * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
  */
 
 #ifndef __DA9211_REGISTERS_H__
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index 4286568..4940e82 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -439,6 +439,7 @@
 	},
 	{ },
 };
+MODULE_DEVICE_TABLE(i2c, fan53555_id);
 
 static struct i2c_driver fan53555_regulator_driver = {
 	.driver = {
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index 6e5da95..4abd8e9 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -156,7 +156,6 @@
 static struct i2c_driver isl6271a_i2c_driver = {
 	.driver = {
 		.name = "isl6271a",
-		.owner = THIS_MODULE,
 	},
 	.probe = isl6271a_probe,
 	.id_table = isl6271a_id,
diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c
index 6e3a15f..257c194 100644
--- a/drivers/regulator/isl9305.c
+++ b/drivers/regulator/isl9305.c
@@ -183,6 +183,7 @@
 	{ .compatible = "isil,isl9305h" },
 	{},
 };
+MODULE_DEVICE_TABLE(of, isl9305_dt_ids);
 #endif
 
 static const struct i2c_device_id isl9305_i2c_id[] = {
@@ -195,7 +196,6 @@
 static struct i2c_driver isl9305_regulator_driver = {
 	.driver = {
 		.name = "isl9305",
-		.owner = THIS_MODULE,
 		.of_match_table	= of_match_ptr(isl9305_dt_ids),
 	},
 	.probe = isl9305_i2c_probe,
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 66fd233..15c25c6 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -452,7 +452,6 @@
 static struct i2c_driver lp3971_i2c_driver = {
 	.driver = {
 		.name = "LP3971",
-		.owner = THIS_MODULE,
 	},
 	.probe    = lp3971_i2c_probe,
 	.id_table = lp3971_i2c_id,
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index aea485a..3a7e96e 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -550,7 +550,6 @@
 static struct i2c_driver lp3972_i2c_driver = {
 	.driver = {
 		.name = "lp3972",
-		.owner = THIS_MODULE,
 	},
 	.probe    = lp3972_i2c_probe,
 	.id_table = lp3972_i2c_id,
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index 3de328a..e5af072 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -849,7 +849,7 @@
 
 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
-		goto out;
+		return ERR_PTR(-ENOMEM);
 
 	of_property_read_u8(np, "ti,general-config", &pdata->general_config);
 	if (of_find_property(np, "ti,update-config", NULL))
@@ -857,7 +857,7 @@
 
 	pdata->dvs = devm_kzalloc(dev, sizeof(struct lp872x_dvs), GFP_KERNEL);
 	if (!pdata->dvs)
-		goto out;
+		return ERR_PTR(-ENOMEM);
 
 	pdata->dvs->gpio = of_get_named_gpio(np, "ti,dvs-gpio", 0);
 	of_property_read_u8(np, "ti,dvs-vsel", (u8 *)&pdata->dvs->vsel);
@@ -903,15 +903,21 @@
 static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
 {
 	struct lp872x *lp;
+	struct lp872x_platform_data *pdata;
 	int ret;
 	const int lp872x_num_regulators[] = {
 		[LP8720] = LP8720_NUM_REGULATORS,
 		[LP8725] = LP8725_NUM_REGULATORS,
 	};
 
-	if (cl->dev.of_node)
-		cl->dev.platform_data = lp872x_populate_pdata_from_dt(&cl->dev,
+	if (cl->dev.of_node) {
+		pdata = lp872x_populate_pdata_from_dt(&cl->dev,
 					      (enum lp872x_id)id->driver_data);
+		if (IS_ERR(pdata))
+			return PTR_ERR(pdata);
+	} else {
+		pdata = dev_get_platdata(&cl->dev);
+	}
 
 	lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
 	if (!lp)
@@ -927,7 +933,7 @@
 	}
 
 	lp->dev = &cl->dev;
-	lp->pdata = dev_get_platdata(&cl->dev);
+	lp->pdata = pdata;
 	lp->chipid = id->driver_data;
 	i2c_set_clientdata(cl, lp);
 
@@ -955,7 +961,6 @@
 static struct i2c_driver lp872x_driver = {
 	.driver = {
 		.name = "lp872x",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(lp872x_dt_ids),
 	},
 	.probe = lp872x_probe,
diff --git a/drivers/regulator/ltc3589.c b/drivers/regulator/ltc3589.c
index 0ce8e4e..972c386 100644
--- a/drivers/regulator/ltc3589.c
+++ b/drivers/regulator/ltc3589.c
@@ -378,7 +378,7 @@
 	return false;
 }
 
-static struct reg_default ltc3589_reg_defaults[] = {
+static const struct reg_default ltc3589_reg_defaults[] = {
 	{ LTC3589_SCR1,   0x00 },
 	{ LTC3589_OVEN,   0x00 },
 	{ LTC3589_SCR2,   0x00 },
@@ -542,7 +542,6 @@
 static struct i2c_driver ltc3589_driver = {
 	.driver = {
 		.name = DRIVER_NAME,
-		.owner = THIS_MODULE,
 	},
 	.probe = ltc3589_probe,
 	.id_table = ltc3589_i2c_id,
@@ -552,4 +551,3 @@
 MODULE_AUTHOR("Philipp Zabel <p.zabel@pengutronix.de>");
 MODULE_DESCRIPTION("Regulator driver for Linear Technology LTC3589(-1,2)");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("i2c:ltc3589");
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index d2a8c64c..2c1228d 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -304,7 +304,6 @@
 	.probe = max1586_pmic_probe,
 	.driver		= {
 		.name	= "max1586",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(max1586_of_match),
 	},
 	.id_table	= max1586_id,
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
index 38722c8..de730fd 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693.c
@@ -1,8 +1,9 @@
 /*
- * max77693.c - Regulator driver for the Maxim 77693
+ * max77693.c - Regulator driver for the Maxim 77693 and 77843
  *
- * Copyright (C) 2013 Samsung Electronics
+ * Copyright (C) 2013-2015 Samsung Electronics
  * Jonghwa Lee <jonghwa3.lee@samsung.com>
+ * Krzysztof Kozlowski <k.kozlowski.k@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -29,38 +30,64 @@
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 #include <linux/mfd/max77693.h>
+#include <linux/mfd/max77693-common.h>
 #include <linux/mfd/max77693-private.h>
+#include <linux/mfd/max77843-private.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/regmap.h>
 
-#define CHGIN_ILIM_STEP_20mA			20000
+/*
+ * ID for MAX77843 regulators.
+ * There is no need for such for MAX77693.
+ */
+enum max77843_regulator_type {
+	MAX77843_SAFEOUT1 = 0,
+	MAX77843_SAFEOUT2,
+	MAX77843_CHARGER,
+
+	MAX77843_NUM,
+};
+
+/* Register differences between chargers: MAX77693 and MAX77843 */
+struct chg_reg_data {
+	unsigned int linear_reg;
+	unsigned int linear_mask;
+	unsigned int uA_step;
+	unsigned int min_sel;
+};
 
 /*
- * CHARGER regulator - Min : 20mA, Max : 2580mA, step : 20mA
+ * MAX77693 CHARGER regulator - Min : 20mA, Max : 2580mA, step : 20mA
  * 0x00, 0x01, 0x2, 0x03	= 60 mA
  * 0x04 ~ 0x7E			= (60 + (X - 3) * 20) mA
+ * Actually for MAX77693 the driver manipulates the maximum input current,
+ * not the fast charge current (output). This should be fixed.
+ *
+ * On MAX77843 the calculation formula is the same (except values).
+ * Fortunately it properly manipulates the fast charge current.
  */
 static int max77693_chg_get_current_limit(struct regulator_dev *rdev)
 {
+	const struct chg_reg_data *reg_data = rdev_get_drvdata(rdev);
 	unsigned int chg_min_uA = rdev->constraints->min_uA;
 	unsigned int chg_max_uA = rdev->constraints->max_uA;
 	unsigned int reg, sel;
 	unsigned int val;
 	int ret;
 
-	ret = regmap_read(rdev->regmap, MAX77693_CHG_REG_CHG_CNFG_09, &reg);
+	ret = regmap_read(rdev->regmap, reg_data->linear_reg, &reg);
 	if (ret < 0)
 		return ret;
 
-	sel = reg & CHG_CNFG_09_CHGIN_ILIM_MASK;
+	sel = reg & reg_data->linear_mask;
 
 	/* the first four codes for charger current are all 60mA */
-	if (sel <= 3)
+	if (sel <= reg_data->min_sel)
 		sel = 0;
 	else
-		sel -= 3;
+		sel -= reg_data->min_sel;
 
-	val = chg_min_uA + CHGIN_ILIM_STEP_20mA * sel;
+	val = chg_min_uA + reg_data->uA_step * sel;
 	if (val > chg_max_uA)
 		return -EINVAL;
 
@@ -70,23 +97,43 @@
 static int max77693_chg_set_current_limit(struct regulator_dev *rdev,
 						int min_uA, int max_uA)
 {
+	const struct chg_reg_data *reg_data = rdev_get_drvdata(rdev);
 	unsigned int chg_min_uA = rdev->constraints->min_uA;
 	int sel = 0;
 
-	while (chg_min_uA + CHGIN_ILIM_STEP_20mA * sel < min_uA)
+	while (chg_min_uA + reg_data->uA_step * sel < min_uA)
 		sel++;
 
-	if (chg_min_uA + CHGIN_ILIM_STEP_20mA * sel > max_uA)
+	if (chg_min_uA + reg_data->uA_step * sel > max_uA)
 		return -EINVAL;
 
 	/* the first four codes for charger current are all 60mA */
-	sel += 3;
+	sel += reg_data->min_sel;
 
-	return regmap_write(rdev->regmap,
-				MAX77693_CHG_REG_CHG_CNFG_09, sel);
+	return regmap_write(rdev->regmap, reg_data->linear_reg, sel);
 }
 /* end of CHARGER regulator ops */
 
+/* Returns regmap suitable for given regulator on chosen device */
+static struct regmap *max77693_get_regmap(enum max77693_types type,
+					  struct max77693_dev *max77693,
+					  int reg_id)
+{
+	if (type == TYPE_MAX77693)
+		return max77693->regmap;
+
+	/* Else: TYPE_MAX77843 */
+	switch (reg_id) {
+	case MAX77843_SAFEOUT1:
+	case MAX77843_SAFEOUT2:
+		return max77693->regmap;
+	case MAX77843_CHARGER:
+		return max77693->regmap_chg;
+	default:
+		return max77693->regmap;
+	}
+}
+
 static const unsigned int max77693_safeout_table[] = {
 	4850000,
 	4900000,
@@ -111,7 +158,7 @@
 	.set_current_limit	= max77693_chg_set_current_limit,
 };
 
-#define regulator_desc_esafeout(_num)	{			\
+#define max77693_regulator_desc_esafeout(_num)	{		\
 	.name		= "ESAFEOUT"#_num,			\
 	.id		= MAX77693_ESAFEOUT##_num,		\
 	.of_match	= of_match_ptr("ESAFEOUT"#_num),	\
@@ -127,9 +174,9 @@
 	.enable_mask	= SAFEOUT_CTRL_ENSAFEOUT##_num##_MASK ,	\
 }
 
-static const struct regulator_desc regulators[] = {
-	regulator_desc_esafeout(1),
-	regulator_desc_esafeout(2),
+static const struct regulator_desc max77693_supported_regulators[] = {
+	max77693_regulator_desc_esafeout(1),
+	max77693_regulator_desc_esafeout(2),
 	{
 		.name = "CHARGER",
 		.id = MAX77693_CHARGER,
@@ -145,18 +192,86 @@
 	},
 };
 
+static const struct chg_reg_data max77693_chg_reg_data = {
+	.linear_reg	= MAX77693_CHG_REG_CHG_CNFG_09,
+	.linear_mask	= CHG_CNFG_09_CHGIN_ILIM_MASK,
+	.uA_step	= 20000,
+	.min_sel	= 3,
+};
+
+#define	max77843_regulator_desc_esafeout(num)	{			\
+	.name		= "SAFEOUT" # num,				\
+	.id		= MAX77843_SAFEOUT ## num,			\
+	.ops		= &max77693_safeout_ops,			\
+	.of_match	= of_match_ptr("SAFEOUT" # num),		\
+	.regulators_node = of_match_ptr("regulators"),			\
+	.type		= REGULATOR_VOLTAGE,				\
+	.owner		= THIS_MODULE,					\
+	.n_voltages	= ARRAY_SIZE(max77693_safeout_table),		\
+	.volt_table	= max77693_safeout_table,			\
+	.enable_reg	= MAX77843_SYS_REG_SAFEOUTCTRL,			\
+	.enable_mask	= MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT ## num,	\
+	.vsel_reg	= MAX77843_SYS_REG_SAFEOUTCTRL,			\
+	.vsel_mask	= MAX77843_REG_SAFEOUTCTRL_SAFEOUT ## num ## _MASK, \
+}
+
+static const struct regulator_desc max77843_supported_regulators[] = {
+	[MAX77843_SAFEOUT1] = max77843_regulator_desc_esafeout(1),
+	[MAX77843_SAFEOUT2] = max77843_regulator_desc_esafeout(2),
+	[MAX77843_CHARGER] = {
+		.name		= "CHARGER",
+		.id		= MAX77843_CHARGER,
+		.ops		= &max77693_charger_ops,
+		.of_match	= of_match_ptr("CHARGER"),
+		.regulators_node = of_match_ptr("regulators"),
+		.type		= REGULATOR_CURRENT,
+		.owner		= THIS_MODULE,
+		.enable_reg	= MAX77843_CHG_REG_CHG_CNFG_00,
+		.enable_mask	= MAX77843_CHG_MASK,
+		.enable_val	= MAX77843_CHG_MASK,
+	},
+};
+
+static const struct chg_reg_data max77843_chg_reg_data = {
+	.linear_reg	= MAX77843_CHG_REG_CHG_CNFG_02,
+	.linear_mask	= MAX77843_CHG_FAST_CHG_CURRENT_MASK,
+	.uA_step	= MAX77843_CHG_FAST_CHG_CURRENT_STEP,
+	.min_sel	= 2,
+};
+
 static int max77693_pmic_probe(struct platform_device *pdev)
 {
+	enum max77693_types type = platform_get_device_id(pdev)->driver_data;
 	struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+	const struct regulator_desc *regulators;
+	unsigned int regulators_size;
 	int i;
 	struct regulator_config config = { };
 
 	config.dev = iodev->dev;
-	config.regmap = iodev->regmap;
 
-	for (i = 0; i < ARRAY_SIZE(regulators); i++) {
+	switch (type) {
+	case TYPE_MAX77693:
+		regulators = max77693_supported_regulators;
+		regulators_size = ARRAY_SIZE(max77693_supported_regulators);
+		config.driver_data = (void *)&max77693_chg_reg_data;
+		break;
+	case TYPE_MAX77843:
+		regulators = max77843_supported_regulators;
+		regulators_size = ARRAY_SIZE(max77843_supported_regulators);
+		config.driver_data = (void *)&max77843_chg_reg_data;
+		break;
+	default:
+		dev_err(&pdev->dev, "Unsupported device type: %u\n", type);
+		return -ENODEV;
+	}
+
+	for (i = 0; i < regulators_size; i++) {
 		struct regulator_dev *rdev;
 
+		config.regmap = max77693_get_regmap(type, iodev,
+						    regulators[i].id);
+
 		rdev = devm_regulator_register(&pdev->dev,
 						&regulators[i], &config);
 		if (IS_ERR(rdev)) {
@@ -170,7 +285,8 @@
 }
 
 static const struct platform_device_id max77693_pmic_id[] = {
-	{"max77693-pmic", 0},
+	{ "max77693-pmic", TYPE_MAX77693 },
+	{ "max77843-regulator", TYPE_MAX77843 },
 	{},
 };
 
@@ -184,8 +300,19 @@
 	.id_table = max77693_pmic_id,
 };
 
-module_platform_driver(max77693_pmic_driver);
+static int __init max77693_pmic_init(void)
+{
+	return platform_driver_register(&max77693_pmic_driver);
+}
+subsys_initcall(max77693_pmic_init);
 
-MODULE_DESCRIPTION("MAXIM MAX77693 regulator driver");
+static void __exit max77693_pmic_cleanup(void)
+{
+	platform_driver_unregister(&max77693_pmic_driver);
+}
+module_exit(max77693_pmic_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 77693/77843 regulator driver");
 MODULE_AUTHOR("Jonghwa Lee <jonghwa3.lee@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <k.kozlowski.k@gmail.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max77843.c b/drivers/regulator/max77843.c
deleted file mode 100644
index f4fd0d3..0000000
--- a/drivers/regulator/max77843.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*
- * max77843.c - Regulator driver for the Maxim MAX77843
- *
- * Copyright (C) 2015 Samsung Electronics
- * Author: Jaewon Kim <jaewon02.kim@samsung.com>
- * Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/mfd/max77843-private.h>
-#include <linux/regulator/of_regulator.h>
-
-enum max77843_regulator_type {
-	MAX77843_SAFEOUT1 = 0,
-	MAX77843_SAFEOUT2,
-	MAX77843_CHARGER,
-
-	MAX77843_NUM,
-};
-
-static const unsigned int max77843_safeout_voltage_table[] = {
-	4850000,
-	4900000,
-	4950000,
-	3300000,
-};
-
-static int max77843_reg_get_current_limit(struct regulator_dev *rdev)
-{
-	struct regmap *regmap = rdev->regmap;
-	unsigned int chg_min_uA = rdev->constraints->min_uA;
-	unsigned int chg_max_uA = rdev->constraints->max_uA;
-	unsigned int val;
-	int ret;
-	unsigned int reg, sel;
-
-	ret = regmap_read(regmap, MAX77843_CHG_REG_CHG_CNFG_02, &reg);
-	if (ret) {
-		dev_err(&rdev->dev, "Failed to read charger register\n");
-		return ret;
-	}
-
-	sel = reg & MAX77843_CHG_FAST_CHG_CURRENT_MASK;
-
-	if (sel < 0x03)
-		sel = 0;
-	else
-		sel -= 2;
-
-	val = chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel;
-	if (val > chg_max_uA)
-		return -EINVAL;
-
-	return val;
-}
-
-static int max77843_reg_set_current_limit(struct regulator_dev *rdev,
-		int min_uA, int max_uA)
-{
-	struct regmap *regmap = rdev->regmap;
-	unsigned int chg_min_uA = rdev->constraints->min_uA;
-	int sel = 0;
-
-	while (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel < min_uA)
-		sel++;
-
-	if (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel > max_uA)
-		return -EINVAL;
-
-	sel += 2;
-
-	return regmap_write(regmap, MAX77843_CHG_REG_CHG_CNFG_02, sel);
-}
-
-static struct regulator_ops max77843_charger_ops = {
-	.is_enabled		= regulator_is_enabled_regmap,
-	.enable			= regulator_enable_regmap,
-	.disable		= regulator_disable_regmap,
-	.get_current_limit	= max77843_reg_get_current_limit,
-	.set_current_limit	= max77843_reg_set_current_limit,
-};
-
-static struct regulator_ops max77843_regulator_ops = {
-	.is_enabled             = regulator_is_enabled_regmap,
-	.enable                 = regulator_enable_regmap,
-	.disable                = regulator_disable_regmap,
-	.list_voltage		= regulator_list_voltage_table,
-	.get_voltage_sel        = regulator_get_voltage_sel_regmap,
-	.set_voltage_sel        = regulator_set_voltage_sel_regmap,
-};
-
-#define	MAX77843_SAFEOUT(num)	{ \
-	.name		= "SAFEOUT" # num, \
-	.id		= MAX77843_SAFEOUT ## num, \
-	.ops		= &max77843_regulator_ops, \
-	.of_match	= of_match_ptr("SAFEOUT" # num), \
-	.regulators_node = of_match_ptr("regulators"), \
-	.type		= REGULATOR_VOLTAGE, \
-	.owner		= THIS_MODULE, \
-	.n_voltages	= ARRAY_SIZE(max77843_safeout_voltage_table), \
-	.volt_table	= max77843_safeout_voltage_table, \
-	.enable_reg	= MAX77843_SYS_REG_SAFEOUTCTRL, \
-	.enable_mask	= MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT ## num, \
-	.vsel_reg	= MAX77843_SYS_REG_SAFEOUTCTRL, \
-	.vsel_mask	= MAX77843_REG_SAFEOUTCTRL_SAFEOUT ## num ## _MASK, \
-}
-
-static const struct regulator_desc max77843_supported_regulators[] = {
-	[MAX77843_SAFEOUT1] = MAX77843_SAFEOUT(1),
-	[MAX77843_SAFEOUT2] = MAX77843_SAFEOUT(2),
-	[MAX77843_CHARGER] = {
-		.name		= "CHARGER",
-		.id		= MAX77843_CHARGER,
-		.ops		= &max77843_charger_ops,
-		.of_match	= of_match_ptr("CHARGER"),
-		.regulators_node = of_match_ptr("regulators"),
-		.type		= REGULATOR_CURRENT,
-		.owner		= THIS_MODULE,
-		.enable_reg	= MAX77843_CHG_REG_CHG_CNFG_00,
-		.enable_mask	= MAX77843_CHG_MASK | MAX77843_CHG_BUCK_MASK,
-		.enable_val	= MAX77843_CHG_MASK | MAX77843_CHG_BUCK_MASK,
-	},
-};
-
-static struct regmap *max77843_get_regmap(struct max77843 *max77843, int reg_id)
-{
-	switch (reg_id) {
-	case MAX77843_SAFEOUT1:
-	case MAX77843_SAFEOUT2:
-		return max77843->regmap;
-	case MAX77843_CHARGER:
-		return max77843->regmap_chg;
-	default:
-		return max77843->regmap;
-	}
-}
-
-static int max77843_regulator_probe(struct platform_device *pdev)
-{
-	struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
-	struct regulator_config config = {};
-	int i;
-
-	config.dev = max77843->dev;
-	config.driver_data = max77843;
-
-	for (i = 0; i < ARRAY_SIZE(max77843_supported_regulators); i++) {
-		struct regulator_dev *regulator;
-
-		config.regmap = max77843_get_regmap(max77843,
-				max77843_supported_regulators[i].id);
-
-		regulator = devm_regulator_register(&pdev->dev,
-				&max77843_supported_regulators[i], &config);
-		if (IS_ERR(regulator)) {
-			dev_err(&pdev->dev,
-					"Failed to regiser regulator-%d\n", i);
-			return PTR_ERR(regulator);
-		}
-	}
-
-	return 0;
-}
-
-static const struct platform_device_id max77843_regulator_id[] = {
-	{ "max77843-regulator", },
-	{ /* sentinel */ },
-};
-
-static struct platform_driver max77843_regulator_driver = {
-	.driver	= {
-		.name = "max77843-regulator",
-	},
-	.probe		= max77843_regulator_probe,
-	.id_table	= max77843_regulator_id,
-};
-
-static int __init max77843_regulator_init(void)
-{
-	return platform_driver_register(&max77843_regulator_driver);
-}
-subsys_initcall(max77843_regulator_init);
-
-static void __exit max77843_regulator_exit(void)
-{
-	platform_driver_unregister(&max77843_regulator_driver);
-}
-module_exit(max77843_regulator_exit);
-
-MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
-MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
-MODULE_DESCRIPTION("Maxim MAX77843 regulator driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 4071d74..b87f62d 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -518,7 +518,6 @@
 	.probe = max8660_probe,
 	.driver		= {
 		.name	= "max8660",
-		.owner	= THIS_MODULE,
 	},
 	.id_table	= max8660_id,
 };
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index e94ddcf..5b75b7c 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -75,6 +75,7 @@
 #define MAX8973_DISCH_ENBABLE				BIT(5)
 #define MAX8973_FT_ENABLE				BIT(4)
 
+#define MAX8973_CKKADV_TRIP_MASK			0xC
 #define MAX8973_CKKADV_TRIP_DISABLE			0xC
 #define MAX8973_CKKADV_TRIP_75mV_PER_US			0x0
 #define MAX8973_CKKADV_TRIP_150mV_PER_US		0x4
@@ -282,6 +283,55 @@
 	return ret;
 }
 
+static int max8973_set_current_limit(struct regulator_dev *rdev,
+		int min_ua, int max_ua)
+{
+	struct max8973_chip *max = rdev_get_drvdata(rdev);
+	unsigned int val;
+	int ret;
+
+	if (max_ua <= 9000000)
+		val = MAX8973_CKKADV_TRIP_75mV_PER_US;
+	else if (max_ua <= 12000000)
+		val = MAX8973_CKKADV_TRIP_150mV_PER_US;
+	else
+		val = MAX8973_CKKADV_TRIP_DISABLE;
+
+	ret = regmap_update_bits(max->regmap, MAX8973_CONTROL2,
+			MAX8973_CKKADV_TRIP_MASK, val);
+	if (ret < 0) {
+		dev_err(max->dev, "register %d update failed: %d\n",
+				MAX8973_CONTROL2, ret);
+		return ret;
+	}
+	return 0;
+}
+
+static int max8973_get_current_limit(struct regulator_dev *rdev)
+{
+	struct max8973_chip *max = rdev_get_drvdata(rdev);
+	unsigned int control2;
+	int ret;
+
+	ret = regmap_read(max->regmap, MAX8973_CONTROL2, &control2);
+	if (ret < 0) {
+		dev_err(max->dev, "register %d read failed: %d\n",
+				MAX8973_CONTROL2, ret);
+		return ret;
+	}
+	switch (control2 & MAX8973_CKKADV_TRIP_MASK) {
+	case MAX8973_CKKADV_TRIP_DISABLE:
+		return 15000000;
+	case MAX8973_CKKADV_TRIP_150mV_PER_US:
+		return 12000000;
+	case MAX8973_CKKADV_TRIP_75mV_PER_US:
+		return 9000000;
+	default:
+		break;
+	}
+	return 9000000;
+}
+
 static const struct regulator_ops max8973_dcdc_ops = {
 	.get_voltage_sel	= max8973_dcdc_get_voltage_sel,
 	.set_voltage_sel	= max8973_dcdc_set_voltage_sel,
@@ -421,6 +471,8 @@
 	struct device_node *np = dev->of_node;
 	int ret;
 	u32 pval;
+	bool etr_enable;
+	bool etr_sensitivity_high;
 
 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
@@ -452,6 +504,23 @@
 	if (of_property_read_bool(np, "maxim,enable-bias-control"))
 		pdata->control_flags  |= MAX8973_CONTROL_BIAS_ENABLE;
 
+	etr_enable = of_property_read_bool(np, "maxim,enable-etr");
+	etr_sensitivity_high = of_property_read_bool(np,
+				"maxim,enable-high-etr-sensitivity");
+	if (etr_sensitivity_high)
+		etr_enable = true;
+
+	if (etr_enable) {
+		if (etr_sensitivity_high)
+			pdata->control_flags |=
+				MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US;
+		else
+			pdata->control_flags |=
+				MAX8973_CONTROL_CLKADV_TRIP_150mV_PER_US;
+	} else {
+		pdata->control_flags |= MAX8973_CONTROL_CLKADV_TRIP_DISABLED;
+	}
+
 	return pdata;
 }
 
@@ -568,6 +637,15 @@
 			max->lru_index[i] = i;
 		max->lru_index[0] = max->curr_vout_reg;
 		max->lru_index[max->curr_vout_reg] = 0;
+	} else {
+		/*
+		 * If there is no DVS GPIO, the VOUT register
+		 * address is fixed.
+		 */
+		max->ops.set_voltage_sel = regulator_set_voltage_sel_regmap;
+		max->ops.get_voltage_sel = regulator_get_voltage_sel_regmap;
+		max->desc.vsel_reg = max->curr_vout_reg;
+		max->desc.vsel_mask = MAX8973_VOUT_MASK;
 	}
 
 	if (pdata_from_dt)
@@ -613,6 +691,8 @@
 		max->ops.enable = regulator_enable_regmap;
 		max->ops.disable = regulator_disable_regmap;
 		max->ops.is_enabled = regulator_is_enabled_regmap;
+		max->ops.set_current_limit = max8973_set_current_limit;
+		max->ops.get_current_limit = max8973_get_current_limit;
 		break;
 	default:
 		break;
@@ -652,7 +732,6 @@
 	.driver = {
 		.name = "max8973",
 		.of_match_table = of_max8973_match_tbl,
-		.owner = THIS_MODULE,
 	},
 	.probe = max8973_probe,
 	.id_table = max8973_id,
diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c
new file mode 100644
index 0000000..02c4e5f
--- /dev/null
+++ b/drivers/regulator/mt6311-regulator.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Henry Chen <henryc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/mt6311.h>
+#include <linux/slab.h>
+#include "mt6311-regulator.h"
+
+static const struct regmap_config mt6311_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = MT6311_FQMTR_CON4,
+};
+
+/* Default limits measured in millivolts and milliamps */
+#define MT6311_MIN_UV		600000
+#define MT6311_MAX_UV		1393750
+#define MT6311_STEP_UV		6250
+
+static const struct regulator_linear_range buck_volt_range[] = {
+	REGULATOR_LINEAR_RANGE(MT6311_MIN_UV, 0, 0x7f, MT6311_STEP_UV),
+};
+
+static const struct regulator_ops mt6311_buck_ops = {
+	.list_voltage = regulator_list_voltage_linear_range,
+	.map_voltage = regulator_map_voltage_linear_range,
+	.set_voltage_sel = regulator_set_voltage_sel_regmap,
+	.get_voltage_sel = regulator_get_voltage_sel_regmap,
+	.set_voltage_time_sel = regulator_set_voltage_time_sel,
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.is_enabled = regulator_is_enabled_regmap,
+};
+
+static const struct regulator_ops mt6311_ldo_ops = {
+	.enable = regulator_enable_regmap,
+	.disable = regulator_disable_regmap,
+	.is_enabled = regulator_is_enabled_regmap,
+};
+
+#define MT6311_BUCK(_id) \
+{\
+	.name = #_id,\
+	.ops = &mt6311_buck_ops,\
+	.of_match = of_match_ptr(#_id),\
+	.regulators_node = of_match_ptr("regulators"),\
+	.type = REGULATOR_VOLTAGE,\
+	.id = MT6311_ID_##_id,\
+	.n_voltages = (MT6311_MAX_UV - MT6311_MIN_UV) / MT6311_STEP_UV + 1,\
+	.min_uV = MT6311_MIN_UV,\
+	.uV_step = MT6311_STEP_UV,\
+	.owner = THIS_MODULE,\
+	.linear_ranges = buck_volt_range, \
+	.n_linear_ranges = ARRAY_SIZE(buck_volt_range), \
+	.enable_reg = MT6311_VDVFS11_CON9,\
+	.enable_mask = MT6311_PMIC_VDVFS11_EN_MASK,\
+	.vsel_reg = MT6311_VDVFS11_CON12,\
+	.vsel_mask = MT6311_PMIC_VDVFS11_VOSEL_MASK,\
+}
+
+#define MT6311_LDO(_id) \
+{\
+	.name = #_id,\
+	.ops = &mt6311_ldo_ops,\
+	.of_match = of_match_ptr(#_id),\
+	.regulators_node = of_match_ptr("regulators"),\
+	.type = REGULATOR_VOLTAGE,\
+	.id = MT6311_ID_##_id,\
+	.owner = THIS_MODULE,\
+	.enable_reg = MT6311_LDO_CON3,\
+	.enable_mask = MT6311_PMIC_RG_VBIASN_EN_MASK,\
+}
+
+static const struct regulator_desc mt6311_regulators[] = {
+	MT6311_BUCK(VDVFS),
+	MT6311_LDO(VBIASN),
+};
+
+/*
+ * I2C driver interface functions
+ */
+static int mt6311_i2c_probe(struct i2c_client *i2c,
+		const struct i2c_device_id *id)
+{
+	struct regulator_config config = { };
+	struct regulator_dev *rdev;
+	struct regmap *regmap;
+	int i, ret;
+	unsigned int data;
+
+	regmap = devm_regmap_init_i2c(i2c, &mt6311_regmap_config);
+	if (IS_ERR(regmap)) {
+		ret = PTR_ERR(regmap);
+		dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+			ret);
+		return ret;
+	}
+
+	ret = regmap_read(regmap, MT6311_SWCID, &data);
+	if (ret < 0) {
+		dev_err(&i2c->dev, "Failed to read DEVICE_ID reg: %d\n", ret);
+		return ret;
+	}
+
+	switch (data) {
+	case MT6311_E1_CID_CODE:
+	case MT6311_E2_CID_CODE:
+	case MT6311_E3_CID_CODE:
+		break;
+	default:
+		dev_err(&i2c->dev, "Unsupported device id = 0x%x.\n", data);
+		return -ENODEV;
+	}
+
+	for (i = 0; i < MT6311_MAX_REGULATORS; i++) {
+		config.dev = &i2c->dev;
+		config.regmap = regmap;
+
+		rdev = devm_regulator_register(&i2c->dev,
+			&mt6311_regulators[i], &config);
+		if (IS_ERR(rdev)) {
+			dev_err(&i2c->dev,
+				"Failed to register MT6311 regulator\n");
+			return PTR_ERR(rdev);
+		}
+	}
+
+	return 0;
+}
+
+static const struct i2c_device_id mt6311_i2c_id[] = {
+	{"mt6311", 0},
+	{},
+};
+MODULE_DEVICE_TABLE(i2c, mt6311_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id mt6311_dt_ids[] = {
+	{ .compatible = "mediatek,mt6311-regulator",
+	  .data = &mt6311_i2c_id[0] },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mt6311_dt_ids);
+#endif
+
+static struct i2c_driver mt6311_regulator_driver = {
+	.driver = {
+		.name = "mt6311",
+		.of_match_table = of_match_ptr(mt6311_dt_ids),
+	},
+	.probe = mt6311_i2c_probe,
+	.id_table = mt6311_i2c_id,
+};
+
+module_i2c_driver(mt6311_regulator_driver);
+
+MODULE_AUTHOR("Henry Chen <henryc.chen@mediatek.com>");
+MODULE_DESCRIPTION("Regulator device driver for Mediatek MT6311");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/mt6311-regulator.h b/drivers/regulator/mt6311-regulator.h
new file mode 100644
index 0000000..5218db4
--- /dev/null
+++ b/drivers/regulator/mt6311-regulator.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Henry Chen <henryc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MT6311_REGULATOR_H__
+#define __MT6311_REGULATOR_H__
+
+#define MT6311_SWCID              0x01
+
+#define MT6311_TOP_INT_CON        0x18
+#define MT6311_TOP_INT_MON        0x19
+
+#define MT6311_VDVFS11_CON0       0x87
+#define MT6311_VDVFS11_CON7       0x88
+#define MT6311_VDVFS11_CON8       0x89
+#define MT6311_VDVFS11_CON9       0x8A
+#define MT6311_VDVFS11_CON10      0x8B
+#define MT6311_VDVFS11_CON11      0x8C
+#define MT6311_VDVFS11_CON12      0x8D
+#define MT6311_VDVFS11_CON13      0x8E
+#define MT6311_VDVFS11_CON14      0x8F
+#define MT6311_VDVFS11_CON15      0x90
+#define MT6311_VDVFS11_CON16      0x91
+#define MT6311_VDVFS11_CON17      0x92
+#define MT6311_VDVFS11_CON18      0x93
+#define MT6311_VDVFS11_CON19      0x94
+
+#define MT6311_LDO_CON0           0xCC
+#define MT6311_LDO_OCFB0          0xCD
+#define MT6311_LDO_CON2           0xCE
+#define MT6311_LDO_CON3           0xCF
+#define MT6311_LDO_CON4           0xD0
+#define MT6311_FQMTR_CON0         0xD1
+#define MT6311_FQMTR_CON1         0xD2
+#define MT6311_FQMTR_CON2         0xD3
+#define MT6311_FQMTR_CON3         0xD4
+#define MT6311_FQMTR_CON4         0xD5
+
+#define MT6311_PMIC_RG_INT_POL_MASK                      0x1
+#define MT6311_PMIC_RG_INT_EN_MASK                       0x2
+#define MT6311_PMIC_RG_BUCK_OC_INT_STATUS_MASK           0x10
+
+#define MT6311_PMIC_VDVFS11_EN_CTRL_MASK                 0x1
+#define MT6311_PMIC_VDVFS11_VOSEL_CTRL_MASK              0x2
+#define MT6311_PMIC_VDVFS11_EN_SEL_MASK                  0x3
+#define MT6311_PMIC_VDVFS11_VOSEL_SEL_MASK               0xc
+#define MT6311_PMIC_VDVFS11_EN_MASK                      0x1
+#define MT6311_PMIC_VDVFS11_VOSEL_MASK                   0x7F
+#define MT6311_PMIC_VDVFS11_VOSEL_ON_MASK                0x7F
+#define MT6311_PMIC_VDVFS11_VOSEL_SLEEP_MASK             0x7F
+#define MT6311_PMIC_NI_VDVFS11_VOSEL_MASK                0x7F
+
+#define MT6311_PMIC_RG_VBIASN_EN_MASK                    0x1
+
+#endif
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index b1c485b..250700c 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -107,6 +107,9 @@
 	if (!of_property_read_u32(np, "regulator-system-load", &pval))
 		constraints->system_load = pval;
 
+	constraints->over_current_protection = of_property_read_bool(np,
+					"regulator-over-current-protection");
+
 	for (i = 0; i < ARRAY_SIZE(regulator_states); i++) {
 		switch (i) {
 		case PM_SUSPEND_MEM:
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index bd2b75c..4fa7bca 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -30,6 +30,7 @@
 struct pbias_reg_info {
 	u32 enable;
 	u32 enable_mask;
+	u32 disable_val;
 	u32 vmode;
 	unsigned int enable_time;
 	char *name;
@@ -62,6 +63,7 @@
 	.enable = BIT(1),
 	.enable_mask = BIT(1),
 	.vmode = BIT(0),
+	.disable_val = 0,
 	.enable_time = 100,
 	.name = "pbias_mmc_omap2430"
 };
@@ -77,6 +79,7 @@
 static const struct pbias_reg_info pbias_mmc_omap4 = {
 	.enable = BIT(26) | BIT(22),
 	.enable_mask = BIT(26) | BIT(25) | BIT(22),
+	.disable_val = BIT(25),
 	.vmode = BIT(21),
 	.enable_time = 100,
 	.name = "pbias_mmc_omap4"
@@ -85,6 +88,7 @@
 static const struct pbias_reg_info pbias_mmc_omap5 = {
 	.enable = BIT(27) | BIT(26),
 	.enable_mask = BIT(27) | BIT(25) | BIT(26),
+	.disable_val = BIT(25),
 	.vmode = BIT(21),
 	.enable_time = 100,
 	.name = "pbias_mmc_omap5"
@@ -159,6 +163,7 @@
 		drvdata[data_idx].desc.enable_reg = res->start;
 		drvdata[data_idx].desc.enable_mask = info->enable_mask;
 		drvdata[data_idx].desc.enable_val = info->enable;
+		drvdata[data_idx].desc.disable_val = info->disable_val;
 
 		cfg.init_data = pbias_matches[idx].init_data;
 		cfg.driver_data = &drvdata[data_idx];
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 8cc8d18..2a44e5d 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -643,7 +643,6 @@
 	.id_table = pfuze_device_id,
 	.driver = {
 		.name = "pfuze100-regulator",
-		.owner = THIS_MODULE,
 		.of_match_table = pfuze_dt_ids,
 	},
 	.probe = pfuze100_regulator_probe,
@@ -653,4 +652,3 @@
 MODULE_AUTHOR("Robin Gong <b38343@freescale.com>");
 MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100/PFUZE200 PMIC");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("i2c:pfuze100-regulator");
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index ffa9612..fc3166d 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -10,6 +10,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/err.h>
@@ -21,9 +22,15 @@
 #include <linux/pwm.h>
 
 struct pwm_regulator_data {
-	struct pwm_voltages *duty_cycle_table;
+	/*  Shared */
 	struct pwm_device *pwm;
+
+	/* Voltage table */
+	struct pwm_voltages *duty_cycle_table;
 	int state;
+
+	/* Continuous voltage */
+	int volt_uV;
 };
 
 struct pwm_voltages {
@@ -31,6 +38,9 @@
 	unsigned int dutycycle;
 };
 
+/**
+ * Voltage table call-backs
+ */
 static int pwm_regulator_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
@@ -79,29 +89,129 @@
 	return drvdata->duty_cycle_table[selector].uV;
 }
 
-static struct regulator_ops pwm_regulator_voltage_ops = {
+/**
+ * Continuous voltage call-backs
+ */
+static int pwm_voltage_to_duty_cycle_percentage(struct regulator_dev *rdev, int req_uV)
+{
+	int min_uV = rdev->constraints->min_uV;
+	int max_uV = rdev->constraints->max_uV;
+	int diff = max_uV - min_uV;
+
+	return 100 - (((req_uV * 100) - (min_uV * 100)) / diff);
+}
+
+static int pwm_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
+
+	return drvdata->volt_uV;
+}
+
+static int pwm_regulator_set_voltage(struct regulator_dev *rdev,
+					int min_uV, int max_uV,
+					unsigned *selector)
+{
+	struct pwm_regulator_data *drvdata = rdev_get_drvdata(rdev);
+	unsigned int ramp_delay = rdev->constraints->ramp_delay;
+	unsigned int period = pwm_get_period(drvdata->pwm);
+	int duty_cycle;
+	int ret;
+
+	duty_cycle = pwm_voltage_to_duty_cycle_percentage(rdev, min_uV);
+
+	ret = pwm_config(drvdata->pwm, (period / 100) * duty_cycle, period);
+	if (ret) {
+		dev_err(&rdev->dev, "Failed to configure PWM\n");
+		return ret;
+	}
+
+	ret = pwm_enable(drvdata->pwm);
+	if (ret) {
+		dev_err(&rdev->dev, "Failed to enable PWM\n");
+		return ret;
+	}
+	drvdata->volt_uV = min_uV;
+
+	/* Delay required by PWM regulator to settle to the new voltage */
+	usleep_range(ramp_delay, ramp_delay + 1000);
+
+	return 0;
+}
+
+static struct regulator_ops pwm_regulator_voltage_table_ops = {
 	.set_voltage_sel = pwm_regulator_set_voltage_sel,
 	.get_voltage_sel = pwm_regulator_get_voltage_sel,
 	.list_voltage    = pwm_regulator_list_voltage,
 	.map_voltage     = regulator_map_voltage_iterate,
 };
 
+static struct regulator_ops pwm_regulator_voltage_continuous_ops = {
+	.get_voltage = pwm_regulator_get_voltage,
+	.set_voltage = pwm_regulator_set_voltage,
+};
+
 static struct regulator_desc pwm_regulator_desc = {
 	.name		= "pwm-regulator",
-	.ops		= &pwm_regulator_voltage_ops,
 	.type		= REGULATOR_VOLTAGE,
 	.owner		= THIS_MODULE,
 	.supply_name    = "pwm",
 };
 
+static int pwm_regulator_init_table(struct platform_device *pdev,
+				    struct pwm_regulator_data *drvdata)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct pwm_voltages *duty_cycle_table;
+	unsigned int length = 0;
+	int ret;
+
+	of_find_property(np, "voltage-table", &length);
+
+	if ((length < sizeof(*duty_cycle_table)) ||
+	    (length % sizeof(*duty_cycle_table))) {
+		dev_err(&pdev->dev,
+			"voltage-table length(%d) is invalid\n",
+			length);
+		return -EINVAL;
+	}
+
+	duty_cycle_table = devm_kzalloc(&pdev->dev, length, GFP_KERNEL);
+	if (!duty_cycle_table)
+		return -ENOMEM;
+
+	ret = of_property_read_u32_array(np, "voltage-table",
+					 (u32 *)duty_cycle_table,
+					 length / sizeof(u32));
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to read voltage-table\n");
+		return ret;
+	}
+
+	drvdata->duty_cycle_table	= duty_cycle_table;
+	pwm_regulator_desc.ops		= &pwm_regulator_voltage_table_ops;
+	pwm_regulator_desc.n_voltages	= length / sizeof(*duty_cycle_table);
+
+	return 0;
+}
+
+static int pwm_regulator_init_continuous(struct platform_device *pdev,
+					 struct pwm_regulator_data *drvdata)
+{
+	pwm_regulator_desc.ops = &pwm_regulator_voltage_continuous_ops;
+	pwm_regulator_desc.continuous_voltage_range = true;
+
+	return 0;
+}
+
 static int pwm_regulator_probe(struct platform_device *pdev)
 {
+	const struct regulator_init_data *init_data;
 	struct pwm_regulator_data *drvdata;
-	struct property *prop;
 	struct regulator_dev *regulator;
 	struct regulator_config config = { };
 	struct device_node *np = pdev->dev.of_node;
-	int length, ret;
+	int ret;
 
 	if (!np) {
 		dev_err(&pdev->dev, "Device Tree node missing\n");
@@ -112,44 +222,22 @@
 	if (!drvdata)
 		return -ENOMEM;
 
-	/* determine the number of voltage-table */
-	prop = of_find_property(np, "voltage-table", &length);
-	if (!prop) {
-		dev_err(&pdev->dev, "No voltage-table\n");
-		return -EINVAL;
-	}
-
-	if ((length < sizeof(*drvdata->duty_cycle_table)) ||
-	    (length % sizeof(*drvdata->duty_cycle_table))) {
-		dev_err(&pdev->dev, "voltage-table length(%d) is invalid\n",
-			length);
-		return -EINVAL;
-	}
-
-	pwm_regulator_desc.n_voltages = length / sizeof(*drvdata->duty_cycle_table);
-
-	drvdata->duty_cycle_table = devm_kzalloc(&pdev->dev,
-						 length, GFP_KERNEL);
-	if (!drvdata->duty_cycle_table)
-		return -ENOMEM;
-
-	/* read voltage table from DT property */
-	ret = of_property_read_u32_array(np, "voltage-table",
-					 (u32 *)drvdata->duty_cycle_table,
-					 length / sizeof(u32));
-	if (ret < 0) {
-		dev_err(&pdev->dev, "read voltage-table failed\n");
+	if (of_find_property(np, "voltage-table", NULL))
+		ret = pwm_regulator_init_table(pdev, drvdata);
+	else
+		ret = pwm_regulator_init_continuous(pdev, drvdata);
+	if (ret)
 		return ret;
-	}
 
-	config.init_data = of_get_regulator_init_data(&pdev->dev, np,
-						      &pwm_regulator_desc);
-	if (!config.init_data)
+	init_data = of_get_regulator_init_data(&pdev->dev, np,
+					       &pwm_regulator_desc);
+	if (!init_data)
 		return -ENOMEM;
 
 	config.of_node = np;
 	config.dev = &pdev->dev;
 	config.driver_data = drvdata;
+	config.init_data = init_data;
 
 	drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
 	if (IS_ERR(drvdata->pwm)) {
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
new file mode 100644
index 0000000..9c6167d
--- /dev/null
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+struct qcom_rpm_reg {
+	struct device *dev;
+
+	struct qcom_smd_rpm *rpm;
+
+	u32 type;
+	u32 id;
+
+	struct regulator_desc desc;
+
+	int is_enabled;
+	int uV;
+};
+
+struct rpm_regulator_req {
+	u32 key;
+	u32 nbytes;
+	u32 value;
+};
+
+#define RPM_KEY_SWEN	0x6e657773 /* "swen" */
+#define RPM_KEY_UV	0x00007675 /* "uv" */
+#define RPM_KEY_MA	0x0000616d /* "ma" */
+
+static int rpm_reg_write_active(struct qcom_rpm_reg *vreg,
+				struct rpm_regulator_req *req,
+				size_t size)
+{
+	return qcom_rpm_smd_write(vreg->rpm,
+				  QCOM_SMD_RPM_ACTIVE_STATE,
+				  vreg->type,
+				  vreg->id,
+				  req, size);
+}
+
+static int rpm_reg_enable(struct regulator_dev *rdev)
+{
+	struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+	struct rpm_regulator_req req;
+	int ret;
+
+	req.key = RPM_KEY_SWEN;
+	req.nbytes = sizeof(u32);
+	req.value = 1;
+
+	ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+	if (!ret)
+		vreg->is_enabled = 1;
+
+	return ret;
+}
+
+static int rpm_reg_is_enabled(struct regulator_dev *rdev)
+{
+	struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->is_enabled;
+}
+
+static int rpm_reg_disable(struct regulator_dev *rdev)
+{
+	struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+	struct rpm_regulator_req req;
+	int ret;
+
+	req.key = RPM_KEY_SWEN;
+	req.nbytes = sizeof(u32);
+	req.value = 0;
+
+	ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+	if (!ret)
+		vreg->is_enabled = 0;
+
+	return ret;
+}
+
+static int rpm_reg_get_voltage(struct regulator_dev *rdev)
+{
+	struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->uV;
+}
+
+static int rpm_reg_set_voltage(struct regulator_dev *rdev,
+			       int min_uV,
+			       int max_uV,
+			       unsigned *selector)
+{
+	struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+	struct rpm_regulator_req req;
+	int ret = 0;
+
+	req.key = RPM_KEY_UV;
+	req.nbytes = sizeof(u32);
+	req.value = min_uV;
+
+	ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+	if (!ret)
+		vreg->uV = min_uV;
+
+	return ret;
+}
+
+static int rpm_reg_set_load(struct regulator_dev *rdev, int load_uA)
+{
+	struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+	struct rpm_regulator_req req;
+
+	req.key = RPM_KEY_MA;
+	req.nbytes = sizeof(u32);
+	req.value = load_uA;
+
+	return rpm_reg_write_active(vreg, &req, sizeof(req));
+}
+
+static const struct regulator_ops rpm_smps_ldo_ops = {
+	.enable = rpm_reg_enable,
+	.disable = rpm_reg_disable,
+	.is_enabled = rpm_reg_is_enabled,
+
+	.get_voltage = rpm_reg_get_voltage,
+	.set_voltage = rpm_reg_set_voltage,
+
+	.set_load = rpm_reg_set_load,
+};
+
+static const struct regulator_ops rpm_switch_ops = {
+	.enable = rpm_reg_enable,
+	.disable = rpm_reg_disable,
+	.is_enabled = rpm_reg_is_enabled,
+};
+
+static const struct regulator_desc pm8x41_hfsmps = {
+	.linear_ranges = (struct regulator_linear_range[]) {
+		REGULATOR_LINEAR_RANGE( 375000,  0,  95, 12500),
+		REGULATOR_LINEAR_RANGE(1550000, 96, 158, 25000),
+	},
+	.n_linear_ranges = 2,
+	.n_voltages = 159,
+	.ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8841_ftsmps = {
+	.linear_ranges = (struct regulator_linear_range[]) {
+		REGULATOR_LINEAR_RANGE(350000,  0, 184, 5000),
+		REGULATOR_LINEAR_RANGE(700000, 185, 339, 10000),
+	},
+	.n_linear_ranges = 2,
+	.n_voltages = 340,
+	.ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_boost = {
+	.linear_ranges = (struct regulator_linear_range[]) {
+		REGULATOR_LINEAR_RANGE(4000000, 0, 15, 100000),
+	},
+	.n_linear_ranges = 1,
+	.n_voltages = 16,
+	.ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_pldo = {
+	.linear_ranges = (struct regulator_linear_range[]) {
+		REGULATOR_LINEAR_RANGE( 750000,  0,  30, 25000),
+		REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+	},
+	.n_linear_ranges = 2,
+	.n_voltages = 100,
+	.ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_nldo = {
+	.linear_ranges = (struct regulator_linear_range[]) {
+		REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
+	},
+	.n_linear_ranges = 1,
+	.n_voltages = 64,
+	.ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_lnldo = {
+	.fixed_uV = 1740000,
+	.n_voltages = 1,
+	.ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8941_switch = {
+	.ops = &rpm_switch_ops,
+};
+
+struct rpm_regulator_data {
+	const char *name;
+	u32 type;
+	u32 id;
+	const struct regulator_desc *desc;
+	const char *supply;
+};
+
+static const struct rpm_regulator_data rpm_pm8841_regulators[] = {
+	{ "s1", QCOM_SMD_RPM_SMPB, 1, &pm8x41_hfsmps, "vdd_s1" },
+	{ "s2", QCOM_SMD_RPM_SMPB, 2, &pm8841_ftsmps, "vdd_s2" },
+	{ "s3", QCOM_SMD_RPM_SMPB, 3, &pm8x41_hfsmps, "vdd_s3" },
+	{ "s4", QCOM_SMD_RPM_SMPB, 4, &pm8841_ftsmps, "vdd_s4" },
+	{ "s5", QCOM_SMD_RPM_SMPB, 5, &pm8841_ftsmps, "vdd_s5" },
+	{ "s6", QCOM_SMD_RPM_SMPB, 6, &pm8841_ftsmps, "vdd_s6" },
+	{ "s7", QCOM_SMD_RPM_SMPB, 7, &pm8841_ftsmps, "vdd_s7" },
+	{ "s8", QCOM_SMD_RPM_SMPB, 8, &pm8841_ftsmps, "vdd_s8" },
+	{}
+};
+
+static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
+	{ "s1", QCOM_SMD_RPM_SMPA, 1, &pm8x41_hfsmps, "vdd_s1" },
+	{ "s2", QCOM_SMD_RPM_SMPA, 2, &pm8x41_hfsmps, "vdd_s2" },
+	{ "s3", QCOM_SMD_RPM_SMPA, 3, &pm8x41_hfsmps, "vdd_s3" },
+	{ "s4", QCOM_SMD_RPM_BOOST, 1, &pm8941_boost },
+
+	{ "l1", QCOM_SMD_RPM_LDOA, 1, &pm8941_nldo, "vdd_l1_l3" },
+	{ "l2", QCOM_SMD_RPM_LDOA, 2, &pm8941_nldo, "vdd_l2_lvs1_2_3" },
+	{ "l3", QCOM_SMD_RPM_LDOA, 3, &pm8941_nldo, "vdd_l1_l3" },
+	{ "l4", QCOM_SMD_RPM_LDOA, 4, &pm8941_nldo, "vdd_l4_l11" },
+	{ "l5", QCOM_SMD_RPM_LDOA, 5, &pm8941_lnldo, "vdd_l5_l7" },
+	{ "l6", QCOM_SMD_RPM_LDOA, 6, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
+	{ "l7", QCOM_SMD_RPM_LDOA, 7, &pm8941_lnldo, "vdd_l5_l7" },
+	{ "l8", QCOM_SMD_RPM_LDOA, 8, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
+	{ "l9", QCOM_SMD_RPM_LDOA, 9, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
+	{ "l10", QCOM_SMD_RPM_LDOA, 10, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
+	{ "l11", QCOM_SMD_RPM_LDOA, 11, &pm8941_nldo, "vdd_l4_l11" },
+	{ "l12", QCOM_SMD_RPM_LDOA, 12, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
+	{ "l13", QCOM_SMD_RPM_LDOA, 13, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
+	{ "l14", QCOM_SMD_RPM_LDOA, 14, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
+	{ "l15", QCOM_SMD_RPM_LDOA, 15, &pm8941_pldo, "vdd_l6_l12_l14_l15" },
+	{ "l16", QCOM_SMD_RPM_LDOA, 16, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
+	{ "l17", QCOM_SMD_RPM_LDOA, 17, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
+	{ "l18", QCOM_SMD_RPM_LDOA, 18, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
+	{ "l19", QCOM_SMD_RPM_LDOA, 19, &pm8941_pldo, "vdd_l8_l16_l18_l19" },
+	{ "l20", QCOM_SMD_RPM_LDOA, 20, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
+	{ "l21", QCOM_SMD_RPM_LDOA, 21, &pm8941_pldo, "vdd_l21" },
+	{ "l22", QCOM_SMD_RPM_LDOA, 22, &pm8941_pldo, "vdd_l9_l10_l17_l22" },
+	{ "l23", QCOM_SMD_RPM_LDOA, 23, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
+	{ "l24", QCOM_SMD_RPM_LDOA, 24, &pm8941_pldo, "vdd_l13_l20_l23_l24" },
+
+	{ "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8941_switch, "vdd_l2_lvs1_2_3" },
+	{ "lvs2", QCOM_SMD_RPM_VSA, 2, &pm8941_switch, "vdd_l2_lvs1_2_3" },
+	{ "lvs3", QCOM_SMD_RPM_VSA, 3, &pm8941_switch, "vdd_l2_lvs1_2_3" },
+
+	{ "5vs1", QCOM_SMD_RPM_VSA, 4, &pm8941_switch, "vin_5vs" },
+	{ "5vs2", QCOM_SMD_RPM_VSA, 5, &pm8941_switch, "vin_5vs" },
+
+	{}
+};
+
+static const struct of_device_id rpm_of_match[] = {
+	{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
+	{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
+	{}
+};
+MODULE_DEVICE_TABLE(of, rpm_of_match);
+
+static int rpm_reg_probe(struct platform_device *pdev)
+{
+	const struct rpm_regulator_data *reg;
+	const struct of_device_id *match;
+	struct regulator_config config = { };
+	struct regulator_dev *rdev;
+	struct qcom_rpm_reg *vreg;
+	struct qcom_smd_rpm *rpm;
+
+	rpm = dev_get_drvdata(pdev->dev.parent);
+	if (!rpm) {
+		dev_err(&pdev->dev, "unable to retrieve handle to rpm\n");
+		return -ENODEV;
+	}
+
+	match = of_match_device(rpm_of_match, &pdev->dev);
+	for (reg = match->data; reg->name; reg++) {
+		vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
+		if (!vreg)
+			return -ENOMEM;
+
+		vreg->dev = &pdev->dev;
+		vreg->type = reg->type;
+		vreg->id = reg->id;
+		vreg->rpm = rpm;
+
+		memcpy(&vreg->desc, reg->desc, sizeof(vreg->desc));
+
+		vreg->desc.id = -1;
+		vreg->desc.owner = THIS_MODULE;
+		vreg->desc.type = REGULATOR_VOLTAGE;
+		vreg->desc.name = reg->name;
+		vreg->desc.supply_name = reg->supply;
+		vreg->desc.of_match = reg->name;
+
+		config.dev = &pdev->dev;
+		config.driver_data = vreg;
+		rdev = devm_regulator_register(&pdev->dev, &vreg->desc, &config);
+		if (IS_ERR(rdev)) {
+			dev_err(&pdev->dev, "failed to register %s\n", reg->name);
+			return PTR_ERR(rdev);
+		}
+	}
+
+	return 0;
+}
+
+static struct platform_driver rpm_reg_driver = {
+	.probe = rpm_reg_probe,
+	.driver = {
+		.name  = "qcom_rpm_smd_regulator",
+		.of_match_table = rpm_of_match,
+	},
+};
+
+static int __init rpm_reg_init(void)
+{
+	return platform_driver_register(&rpm_reg_driver);
+}
+subsys_initcall(rpm_reg_init);
+
+static void __exit rpm_reg_exit(void)
+{
+	platform_driver_unregister(&rpm_reg_driver);
+}
+module_exit(rpm_reg_exit)
+
+MODULE_DESCRIPTION("Qualcomm RPM regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 850a30a..88a5dc8 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -26,6 +26,70 @@
 #include <linux/regmap.h>
 #include <linux/list.h>
 
+/* Pin control enable input pins. */
+#define SPMI_REGULATOR_PIN_CTRL_ENABLE_NONE		0x00
+#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN0		0x01
+#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN1		0x02
+#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN2		0x04
+#define SPMI_REGULATOR_PIN_CTRL_ENABLE_EN3		0x08
+#define SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT	0x10
+
+/* Pin control high power mode input pins. */
+#define SPMI_REGULATOR_PIN_CTRL_HPM_NONE		0x00
+#define SPMI_REGULATOR_PIN_CTRL_HPM_EN0			0x01
+#define SPMI_REGULATOR_PIN_CTRL_HPM_EN1			0x02
+#define SPMI_REGULATOR_PIN_CTRL_HPM_EN2			0x04
+#define SPMI_REGULATOR_PIN_CTRL_HPM_EN3			0x08
+#define SPMI_REGULATOR_PIN_CTRL_HPM_SLEEP_B		0x10
+#define SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT		0x20
+
+/*
+ * Used with enable parameters to specify that hardware default register values
+ * should be left unaltered.
+ */
+#define SPMI_REGULATOR_USE_HW_DEFAULT			2
+
+/* Soft start strength of a voltage switch type regulator */
+enum spmi_vs_soft_start_str {
+	SPMI_VS_SOFT_START_STR_0P05_UA = 0,
+	SPMI_VS_SOFT_START_STR_0P25_UA,
+	SPMI_VS_SOFT_START_STR_0P55_UA,
+	SPMI_VS_SOFT_START_STR_0P75_UA,
+	SPMI_VS_SOFT_START_STR_HW_DEFAULT,
+};
+
+/**
+ * struct spmi_regulator_init_data - spmi-regulator initialization data
+ * @pin_ctrl_enable:        Bit mask specifying which hardware pins should be
+ *				used to enable the regulator, if any
+ *			    Value should be an ORing of
+ *				SPMI_REGULATOR_PIN_CTRL_ENABLE_* constants.  If
+ *				the bit specified by
+ *				SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT is
+ *				set, then pin control enable hardware registers
+ *				will not be modified.
+ * @pin_ctrl_hpm:           Bit mask specifying which hardware pins should be
+ *				used to force the regulator into high power
+ *				mode, if any
+ *			    Value should be an ORing of
+ *				SPMI_REGULATOR_PIN_CTRL_HPM_* constants.  If
+ *				the bit specified by
+ *				SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT is
+ *				set, then pin control mode hardware registers
+ *				will not be modified.
+ * @vs_soft_start_strength: This parameter sets the soft start strength for
+ *				voltage switch type regulators.  Its value
+ *				should be one of SPMI_VS_SOFT_START_STR_*.  If
+ *				its value is SPMI_VS_SOFT_START_STR_HW_DEFAULT,
+ *				then the soft start strength will be left at its
+ *				default hardware value.
+ */
+struct spmi_regulator_init_data {
+	unsigned				pin_ctrl_enable;
+	unsigned				pin_ctrl_hpm;
+	enum spmi_vs_soft_start_str		vs_soft_start_strength;
+};
+
 /* These types correspond to unique register layouts. */
 enum spmi_regulator_logical_type {
 	SPMI_REGULATOR_LOGICAL_TYPE_SMPS,
@@ -458,6 +522,14 @@
 	return spmi_regulator_common_enable(rdev);
 }
 
+static int spmi_regulator_vs_ocp(struct regulator_dev *rdev)
+{
+	struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+	u8 reg = SPMI_VS_OCP_OVERRIDE;
+
+	return spmi_vreg_write(vreg, SPMI_VS_REG_OCP, &reg, 1);
+}
+
 static int spmi_regulator_common_disable(struct regulator_dev *rdev)
 {
 	struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
@@ -504,8 +576,7 @@
 	 * Force uV to be an allowed set point by applying a ceiling function to
 	 * the uV value.
 	 */
-	*voltage_sel = (uV - range->min_uV + range->step_uV - 1)
-			/ range->step_uV;
+	*voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
 	uV = *voltage_sel * range->step_uV + range->min_uV;
 
 	if (uV > max_uV) {
@@ -792,6 +863,9 @@
 	if (reg & SPMI_COMMON_MODE_HPM_MASK)
 		return REGULATOR_MODE_NORMAL;
 
+	if (reg & SPMI_COMMON_MODE_AUTO_MASK)
+		return REGULATOR_MODE_FAST;
+
 	return REGULATOR_MODE_IDLE;
 }
 
@@ -799,11 +873,13 @@
 spmi_regulator_common_set_mode(struct regulator_dev *rdev, unsigned int mode)
 {
 	struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
-	u8 mask = SPMI_COMMON_MODE_HPM_MASK;
+	u8 mask = SPMI_COMMON_MODE_HPM_MASK | SPMI_COMMON_MODE_AUTO_MASK;
 	u8 val = 0;
 
 	if (mode == REGULATOR_MODE_NORMAL)
-		val = mask;
+		val = SPMI_COMMON_MODE_HPM_MASK;
+	else if (mode == REGULATOR_MODE_FAST)
+		val = SPMI_COMMON_MODE_AUTO_MASK;
 
 	return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_MODE, val, mask);
 }
@@ -973,6 +1049,7 @@
 	.is_enabled		= spmi_regulator_common_is_enabled,
 	.set_pull_down		= spmi_regulator_common_set_pull_down,
 	.set_soft_start		= spmi_regulator_common_set_soft_start,
+	.set_over_current_protection = spmi_regulator_vs_ocp,
 };
 
 static struct regulator_ops spmi_boost_ops = {
@@ -1203,10 +1280,111 @@
 	return ret;
 }
 
+static int spmi_regulator_init_registers(struct spmi_regulator *vreg,
+				const struct spmi_regulator_init_data *data)
+{
+	int ret;
+	enum spmi_regulator_logical_type type;
+	u8 ctrl_reg[8], reg, mask;
+
+	type = vreg->logical_type;
+
+	ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_RANGE, ctrl_reg, 8);
+	if (ret)
+		return ret;
+
+	/* Set up enable pin control. */
+	if ((type == SPMI_REGULATOR_LOGICAL_TYPE_SMPS
+	     || type == SPMI_REGULATOR_LOGICAL_TYPE_LDO
+	     || type == SPMI_REGULATOR_LOGICAL_TYPE_VS)
+	    && !(data->pin_ctrl_enable
+			& SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT)) {
+		ctrl_reg[SPMI_COMMON_IDX_ENABLE] &=
+			~SPMI_COMMON_ENABLE_FOLLOW_ALL_MASK;
+		ctrl_reg[SPMI_COMMON_IDX_ENABLE] |=
+		    data->pin_ctrl_enable & SPMI_COMMON_ENABLE_FOLLOW_ALL_MASK;
+	}
+
+	/* Set up mode pin control. */
+	if ((type == SPMI_REGULATOR_LOGICAL_TYPE_SMPS
+	    || type == SPMI_REGULATOR_LOGICAL_TYPE_LDO)
+		&& !(data->pin_ctrl_hpm
+			& SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+		ctrl_reg[SPMI_COMMON_IDX_MODE] &=
+			~SPMI_COMMON_MODE_FOLLOW_ALL_MASK;
+		ctrl_reg[SPMI_COMMON_IDX_MODE] |=
+			data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_ALL_MASK;
+	}
+
+	if (type == SPMI_REGULATOR_LOGICAL_TYPE_VS
+	   && !(data->pin_ctrl_hpm & SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+		ctrl_reg[SPMI_COMMON_IDX_MODE] &=
+			~SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
+		ctrl_reg[SPMI_COMMON_IDX_MODE] |=
+		       data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
+	}
+
+	if ((type == SPMI_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
+		|| type == SPMI_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+		|| type == SPMI_REGULATOR_LOGICAL_TYPE_ULT_LDO)
+		&& !(data->pin_ctrl_hpm
+			& SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+		ctrl_reg[SPMI_COMMON_IDX_MODE] &=
+			~SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
+		ctrl_reg[SPMI_COMMON_IDX_MODE] |=
+		       data->pin_ctrl_hpm & SPMI_COMMON_MODE_FOLLOW_AWAKE_MASK;
+	}
+
+	/* Write back any control register values that were modified. */
+	ret = spmi_vreg_write(vreg, SPMI_COMMON_REG_VOLTAGE_RANGE, ctrl_reg, 8);
+	if (ret)
+		return ret;
+
+	/* Set soft start strength and over current protection for VS. */
+	if (type == SPMI_REGULATOR_LOGICAL_TYPE_VS) {
+		if (data->vs_soft_start_strength
+				!= SPMI_VS_SOFT_START_STR_HW_DEFAULT) {
+			reg = data->vs_soft_start_strength
+				& SPMI_VS_SOFT_START_SEL_MASK;
+			mask = SPMI_VS_SOFT_START_SEL_MASK;
+			return spmi_vreg_update_bits(vreg,
+						     SPMI_VS_REG_SOFT_START,
+						     reg, mask);
+		}
+	}
+
+	return 0;
+}
+
+static void spmi_regulator_get_dt_config(struct spmi_regulator *vreg,
+		struct device_node *node, struct spmi_regulator_init_data *data)
+{
+	/*
+	 * Initialize configuration parameters to use hardware default in case
+	 * no value is specified via device tree.
+	 */
+	data->pin_ctrl_enable	    = SPMI_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT;
+	data->pin_ctrl_hpm	    = SPMI_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT;
+	data->vs_soft_start_strength	= SPMI_VS_SOFT_START_STR_HW_DEFAULT;
+
+	/* These bindings are optional, so it is okay if they aren't found. */
+	of_property_read_u32(node, "qcom,ocp-max-retries",
+		&vreg->ocp_max_retries);
+	of_property_read_u32(node, "qcom,ocp-retry-delay",
+		&vreg->ocp_retry_delay_ms);
+	of_property_read_u32(node, "qcom,pin-ctrl-enable",
+		&data->pin_ctrl_enable);
+	of_property_read_u32(node, "qcom,pin-ctrl-hpm", &data->pin_ctrl_hpm);
+	of_property_read_u32(node, "qcom,vs-soft-start-strength",
+		&data->vs_soft_start_strength);
+}
+
 static unsigned int spmi_regulator_of_map_mode(unsigned int mode)
 {
-	if (mode)
+	if (mode == 1)
 		return REGULATOR_MODE_NORMAL;
+	if (mode == 2)
+		return REGULATOR_MODE_FAST;
 
 	return REGULATOR_MODE_IDLE;
 }
@@ -1215,12 +1393,23 @@
 			    const struct regulator_desc *desc,
 			    struct regulator_config *config)
 {
+	struct spmi_regulator_init_data data = { };
 	struct spmi_regulator *vreg = config->driver_data;
 	struct device *dev = config->dev;
 	int ret;
 
-	vreg->ocp_max_retries = SPMI_VS_OCP_DEFAULT_MAX_RETRIES;
-	vreg->ocp_retry_delay_ms = SPMI_VS_OCP_DEFAULT_RETRY_DELAY_MS;
+	spmi_regulator_get_dt_config(vreg, node, &data);
+
+	if (!vreg->ocp_max_retries)
+		vreg->ocp_max_retries = SPMI_VS_OCP_DEFAULT_MAX_RETRIES;
+	if (!vreg->ocp_retry_delay_ms)
+		vreg->ocp_retry_delay_ms = SPMI_VS_OCP_DEFAULT_RETRY_DELAY_MS;
+
+	ret = spmi_regulator_init_registers(vreg, &data);
+	if (ret) {
+		dev_err(dev, "common initialization failed, ret=%d\n", ret);
+		return ret;
+	}
 
 	if (vreg->logical_type == SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS) {
 		ret = spmi_regulator_ftsmps_init_slew_rate(vreg);
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index 3fd4435..d86a3dc 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -16,12 +16,16 @@
  * more details.
  */
 
-#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
 #include <linux/i2c.h>
-#include <linux/mfd/rk808.h>
+#include <linux/module.h>
 #include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/mfd/rk808.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/of_regulator.h>
+#include <linux/gpio/consumer.h>
 
 /* Field Definitions */
 #define RK808_BUCK_VSEL_MASK	0x3f
@@ -36,12 +40,25 @@
 #define RK808_RAMP_RATE_6MV_PER_US	(2 << RK808_RAMP_RATE_OFFSET)
 #define RK808_RAMP_RATE_10MV_PER_US	(3 << RK808_RAMP_RATE_OFFSET)
 
+#define RK808_DVS2_POL		BIT(2)
+#define RK808_DVS1_POL		BIT(1)
+
 /* Offset from XXX_ON_VSEL to XXX_SLP_VSEL */
 #define RK808_SLP_REG_OFFSET 1
 
+/* Offset from XXX_ON_VSEL to XXX_DVS_VSEL */
+#define RK808_DVS_REG_OFFSET 2
+
 /* Offset from XXX_EN_REG to SLEEP_SET_OFF_XXX */
 #define RK808_SLP_SET_OFF_REG_OFFSET 2
 
+/* max steps for increase voltage of Buck1/2, equal 100mv*/
+#define MAX_STEPS_ONE_TIME 8
+
+struct rk808_regulator_data {
+	struct gpio_desc *dvs_gpio[2];
+};
+
 static const int rk808_buck_config_regs[] = {
 	RK808_BUCK1_CONFIG_REG,
 	RK808_BUCK2_CONFIG_REG,
@@ -70,6 +87,131 @@
 	REGULATOR_LINEAR_RANGE(800000, 0, 17, 100000),
 };
 
+static int rk808_buck1_2_get_voltage_sel_regmap(struct regulator_dev *rdev)
+{
+	struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
+	int id = rdev->desc->id - RK808_ID_DCDC1;
+	struct gpio_desc *gpio = pdata->dvs_gpio[id];
+	unsigned int val;
+	int ret;
+
+	if (!gpio || gpiod_get_value(gpio) == 0)
+		return regulator_get_voltage_sel_regmap(rdev);
+
+	ret = regmap_read(rdev->regmap,
+			  rdev->desc->vsel_reg + RK808_DVS_REG_OFFSET,
+			  &val);
+	if (ret != 0)
+		return ret;
+
+	val &= rdev->desc->vsel_mask;
+	val >>= ffs(rdev->desc->vsel_mask) - 1;
+
+	return val;
+}
+
+static int rk808_buck1_2_i2c_set_voltage_sel(struct regulator_dev *rdev,
+					     unsigned sel)
+{
+	int ret, delta_sel;
+	unsigned int old_sel, tmp, val, mask = rdev->desc->vsel_mask;
+
+	ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
+	if (ret != 0)
+		return ret;
+
+	tmp = val & ~mask;
+	old_sel = val & mask;
+	old_sel >>= ffs(mask) - 1;
+	delta_sel = sel - old_sel;
+
+	/*
+	 * If directly modify the register to change the voltage, we will face
+	 * the risk of overshoot. Put it into a multi-step, can effectively
+	 * avoid this problem, a step is 100mv here.
+	 */
+	while (delta_sel > MAX_STEPS_ONE_TIME) {
+		old_sel += MAX_STEPS_ONE_TIME;
+		val = old_sel << (ffs(mask) - 1);
+		val |= tmp;
+
+		/*
+		 * i2c is 400kHz (2.5us per bit) and we must transmit _at least_
+		 * 3 bytes (24 bits) plus start and stop so 26 bits.  So we've
+		 * got more than 65 us between each voltage change and thus
+		 * won't ramp faster than ~1500 uV / us.
+		 */
+		ret = regmap_write(rdev->regmap, rdev->desc->vsel_reg, val);
+		delta_sel = sel - old_sel;
+	}
+
+	sel <<= ffs(mask) - 1;
+	val = tmp | sel;
+	ret = regmap_write(rdev->regmap, rdev->desc->vsel_reg, val);
+
+	/*
+	 * When we change the voltage register directly, the ramp rate is about
+	 * 100000uv/us, wait 1us to make sure the target voltage to be stable,
+	 * so we needn't wait extra time after that.
+	 */
+	udelay(1);
+
+	return ret;
+}
+
+static int rk808_buck1_2_set_voltage_sel(struct regulator_dev *rdev,
+					 unsigned sel)
+{
+	struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
+	int id = rdev->desc->id - RK808_ID_DCDC1;
+	struct gpio_desc *gpio = pdata->dvs_gpio[id];
+	unsigned int reg = rdev->desc->vsel_reg;
+	unsigned old_sel;
+	int ret, gpio_level;
+
+	if (!gpio)
+		return rk808_buck1_2_i2c_set_voltage_sel(rdev, sel);
+
+	gpio_level = gpiod_get_value(gpio);
+	if (gpio_level == 0) {
+		reg += RK808_DVS_REG_OFFSET;
+		ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &old_sel);
+	} else {
+		ret = regmap_read(rdev->regmap,
+				  reg + RK808_DVS_REG_OFFSET,
+				  &old_sel);
+	}
+
+	if (ret != 0)
+		return ret;
+
+	sel <<= ffs(rdev->desc->vsel_mask) - 1;
+	sel |= old_sel & ~rdev->desc->vsel_mask;
+
+	ret = regmap_write(rdev->regmap, reg, sel);
+	if (ret)
+		return ret;
+
+	gpiod_set_value(gpio, !gpio_level);
+
+	return ret;
+}
+
+static int rk808_buck1_2_set_voltage_time_sel(struct regulator_dev *rdev,
+				       unsigned int old_selector,
+				       unsigned int new_selector)
+{
+	struct rk808_regulator_data *pdata = rdev_get_drvdata(rdev);
+	int id = rdev->desc->id - RK808_ID_DCDC1;
+	struct gpio_desc *gpio = pdata->dvs_gpio[id];
+
+	/* if there is no dvs1/2 pin, we don't need wait extra time here. */
+	if (!gpio)
+		return 0;
+
+	return regulator_set_voltage_time_sel(rdev, old_selector, new_selector);
+}
+
 static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
 {
 	unsigned int ramp_value = RK808_RAMP_RATE_10MV_PER_US;
@@ -137,8 +279,9 @@
 static struct regulator_ops rk808_buck1_2_ops = {
 	.list_voltage		= regulator_list_voltage_linear_range,
 	.map_voltage		= regulator_map_voltage_linear_range,
-	.get_voltage_sel	= regulator_get_voltage_sel_regmap,
-	.set_voltage_sel	= regulator_set_voltage_sel_regmap,
+	.get_voltage_sel	= rk808_buck1_2_get_voltage_sel_regmap,
+	.set_voltage_sel	= rk808_buck1_2_set_voltage_sel,
+	.set_voltage_time_sel	= rk808_buck1_2_set_voltage_time_sel,
 	.enable			= regulator_enable_regmap,
 	.disable		= regulator_disable_regmap,
 	.is_enabled		= regulator_is_enabled_regmap,
@@ -380,25 +523,69 @@
 	[RK808_ID_SWITCH2]	= { .name = "SWITCH_REG2" },
 };
 
+static int rk808_regulator_dt_parse_pdata(struct device *dev,
+				   struct device *client_dev,
+				   struct regmap *map,
+				   struct rk808_regulator_data *pdata)
+{
+	struct device_node *np;
+	int tmp, ret, i;
+
+	np = of_get_child_by_name(client_dev->of_node, "regulators");
+	if (!np)
+		return -ENXIO;
+
+	ret = of_regulator_match(dev, np, rk808_reg_matches,
+				 RK808_NUM_REGULATORS);
+	if (ret < 0)
+		goto dt_parse_end;
+
+	for (i = 0; i < ARRAY_SIZE(pdata->dvs_gpio); i++) {
+		pdata->dvs_gpio[i] =
+			devm_gpiod_get_index_optional(client_dev, "dvs", i,
+						      GPIOD_OUT_LOW);
+		if (IS_ERR(pdata->dvs_gpio[i])) {
+			ret = PTR_ERR(pdata->dvs_gpio[i]);
+			dev_err(dev, "failed to get dvs%d gpio (%d)\n", i, ret);
+			goto dt_parse_end;
+		}
+
+		if (!pdata->dvs_gpio[i]) {
+			dev_warn(dev, "there is no dvs%d gpio\n", i);
+			continue;
+		}
+
+		tmp = i ? RK808_DVS2_POL : RK808_DVS1_POL;
+		ret = regmap_update_bits(map, RK808_IO_POL_REG, tmp,
+				gpiod_is_active_low(pdata->dvs_gpio[i]) ?
+				0 : tmp);
+	}
+
+dt_parse_end:
+	of_node_put(np);
+	return ret;
+}
+
 static int rk808_regulator_probe(struct platform_device *pdev)
 {
 	struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
 	struct i2c_client *client = rk808->i2c;
-	struct device_node *reg_np;
 	struct regulator_config config = {};
 	struct regulator_dev *rk808_rdev;
+	struct rk808_regulator_data *pdata;
 	int ret, i;
 
-	reg_np = of_get_child_by_name(client->dev.of_node, "regulators");
-	if (!reg_np)
-		return -ENXIO;
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
 
-	ret = of_regulator_match(&pdev->dev, reg_np, rk808_reg_matches,
-				 RK808_NUM_REGULATORS);
-	of_node_put(reg_np);
+	ret = rk808_regulator_dt_parse_pdata(&pdev->dev, &client->dev,
+					     rk808->regmap, pdata);
 	if (ret < 0)
 		return ret;
 
+	platform_set_drvdata(pdev, pdata);
+
 	/* Instantiate the regulators */
 	for (i = 0; i < RK808_NUM_REGULATORS; i++) {
 		if (!rk808_reg_matches[i].init_data ||
@@ -406,7 +593,7 @@
 			continue;
 
 		config.dev = &client->dev;
-		config.driver_data = rk808;
+		config.driver_data = pdata;
 		config.regmap = rk808->regmap;
 		config.of_node = rk808_reg_matches[i].of_node;
 		config.init_data = rk808_reg_matches[i].init_data;
@@ -427,6 +614,7 @@
 	.probe = rk808_regulator_probe,
 	.driver = {
 		.name = "rk808-regulator",
+		.owner = THIS_MODULE,
 	},
 };
 
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index c213e37..572816e 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -362,7 +362,6 @@
 static struct i2c_driver tps51632_i2c_driver = {
 	.driver = {
 		.name = "tps51632",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(tps51632_of_match),
 	},
 	.probe = tps51632_probe,
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index a1fd626..f6a6d36 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -515,7 +515,6 @@
 static struct i2c_driver tps62360_i2c_driver = {
 	.driver = {
 		.name = "tps62360",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(tps62360_of_match),
 	},
 	.probe = tps62360_probe,
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index b941e56..5cc19b4 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -410,7 +410,6 @@
 static struct i2c_driver tps_65023_i2c_driver = {
 	.driver = {
 		.name = "tps65023",
-		.owner = THIS_MODULE,
 	},
 	.probe = tps_65023_probe,
 	.id_table = tps_65023_id,
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 2852de0..9e9d220 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -422,12 +422,12 @@
 		return NULL;
 
 	for (i = 0; i < num; i++) {
-		int id;
+		uintptr_t id;
 		if (!tps6586x_matches[i].init_data)
 			continue;
 
 		pdata->reg_init_data[i] = tps6586x_matches[i].init_data;
-		id = (int)tps6586x_matches[i].driver_data;
+		id = (uintptr_t)tps6586x_matches[i].driver_data;
 		if (id == TPS6586X_ID_SYS)
 			sys_rail = pdata->reg_init_data[i]->constraints.name;
 
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 157d421..85d5904 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,5 +1,8 @@
 obj-$(CONFIG_RESET_CONTROLLER) += core.o
+obj-$(CONFIG_ARCH_LPC18XX) += reset-lpc18xx.o
 obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o
 obj-$(CONFIG_ARCH_BERLIN) += reset-berlin.o
 obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o
 obj-$(CONFIG_ARCH_STI) += sti/
+obj-$(CONFIG_ARCH_ZYNQ) += reset-zynq.o
+obj-$(CONFIG_ATH79) += reset-ath79.o
diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c
new file mode 100644
index 0000000..d2d2904
--- /dev/null
+++ b/drivers/reset/reset-ath79.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2015 Alban Bedel <albeu@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+struct ath79_reset {
+	struct reset_controller_dev rcdev;
+	void __iomem *base;
+	spinlock_t lock;
+};
+
+static int ath79_reset_update(struct reset_controller_dev *rcdev,
+			unsigned long id, bool assert)
+{
+	struct ath79_reset *ath79_reset =
+		container_of(rcdev, struct ath79_reset, rcdev);
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&ath79_reset->lock, flags);
+	val = readl(ath79_reset->base);
+	if (assert)
+		val |= BIT(id);
+	else
+		val &= ~BIT(id);
+	writel(val, ath79_reset->base);
+	spin_unlock_irqrestore(&ath79_reset->lock, flags);
+
+	return 0;
+}
+
+static int ath79_reset_assert(struct reset_controller_dev *rcdev,
+			unsigned long id)
+{
+	return ath79_reset_update(rcdev, id, true);
+}
+
+static int ath79_reset_deassert(struct reset_controller_dev *rcdev,
+				unsigned long id)
+{
+	return ath79_reset_update(rcdev, id, false);
+}
+
+static int ath79_reset_status(struct reset_controller_dev *rcdev,
+			unsigned long id)
+{
+	struct ath79_reset *ath79_reset =
+		container_of(rcdev, struct ath79_reset, rcdev);
+	u32 val;
+
+	val = readl(ath79_reset->base);
+
+	return !!(val & BIT(id));
+}
+
+static struct reset_control_ops ath79_reset_ops = {
+	.assert = ath79_reset_assert,
+	.deassert = ath79_reset_deassert,
+	.status = ath79_reset_status,
+};
+
+static int ath79_reset_probe(struct platform_device *pdev)
+{
+	struct ath79_reset *ath79_reset;
+	struct resource *res;
+
+	ath79_reset = devm_kzalloc(&pdev->dev,
+				sizeof(*ath79_reset), GFP_KERNEL);
+	if (!ath79_reset)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, ath79_reset);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ath79_reset->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(ath79_reset->base))
+		return PTR_ERR(ath79_reset->base);
+
+	ath79_reset->rcdev.ops = &ath79_reset_ops;
+	ath79_reset->rcdev.owner = THIS_MODULE;
+	ath79_reset->rcdev.of_node = pdev->dev.of_node;
+	ath79_reset->rcdev.of_reset_n_cells = 1;
+	ath79_reset->rcdev.nr_resets = 32;
+
+	return reset_controller_register(&ath79_reset->rcdev);
+}
+
+static int ath79_reset_remove(struct platform_device *pdev)
+{
+	struct ath79_reset *ath79_reset = platform_get_drvdata(pdev);
+
+	reset_controller_unregister(&ath79_reset->rcdev);
+
+	return 0;
+}
+
+static const struct of_device_id ath79_reset_dt_ids[] = {
+	{ .compatible = "qca,ar7100-reset", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, ath79_reset_dt_ids);
+
+static struct platform_driver ath79_reset_driver = {
+	.probe	= ath79_reset_probe,
+	.remove = ath79_reset_remove,
+	.driver = {
+		.name		= "ath79-reset",
+		.of_match_table	= ath79_reset_dt_ids,
+	},
+};
+module_platform_driver(ath79_reset_driver);
+
+MODULE_AUTHOR("Alban Bedel <albeu@free.fr>");
+MODULE_DESCRIPTION("AR71xx Reset Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c
new file mode 100644
index 0000000..70922e9
--- /dev/null
+++ b/drivers/reset/reset-lpc18xx.c
@@ -0,0 +1,258 @@
+/*
+ * Reset driver for NXP LPC18xx/43xx Reset Generation Unit (RGU).
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+/* LPC18xx RGU registers */
+#define LPC18XX_RGU_CTRL0		0x100
+#define LPC18XX_RGU_CTRL1		0x104
+#define LPC18XX_RGU_ACTIVE_STATUS0	0x150
+#define LPC18XX_RGU_ACTIVE_STATUS1	0x154
+
+#define LPC18XX_RGU_RESETS_PER_REG	32
+
+/* Internal reset outputs */
+#define LPC18XX_RGU_CORE_RST	0
+#define LPC43XX_RGU_M0SUB_RST	12
+#define LPC43XX_RGU_M0APP_RST	56
+
+struct lpc18xx_rgu_data {
+	struct reset_controller_dev rcdev;
+	struct clk *clk_delay;
+	struct clk *clk_reg;
+	void __iomem *base;
+	spinlock_t lock;
+	u32 delay_us;
+};
+
+#define to_rgu_data(p) container_of(p, struct lpc18xx_rgu_data, rcdev)
+
+static void __iomem *rgu_base;
+
+static int lpc18xx_rgu_restart(struct notifier_block *this, unsigned long mode,
+			       void *cmd)
+{
+	writel(BIT(LPC18XX_RGU_CORE_RST), rgu_base + LPC18XX_RGU_CTRL0);
+	mdelay(2000);
+
+	pr_emerg("%s: unable to restart system\n", __func__);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block lpc18xx_rgu_restart_nb = {
+	.notifier_call = lpc18xx_rgu_restart,
+	.priority = 192,
+};
+
+/*
+ * The LPC18xx RGU has mostly self-deasserting resets except for the
+ * two reset lines going to the internal Cortex-M0 cores.
+ *
+ * To prevent the M0 core resets from accidentally getting deasserted
+ * status register must be check and bits in control register set to
+ * preserve the state.
+ */
+static int lpc18xx_rgu_setclear_reset(struct reset_controller_dev *rcdev,
+				      unsigned long id, bool set)
+{
+	struct lpc18xx_rgu_data *rc = to_rgu_data(rcdev);
+	u32 stat_offset = LPC18XX_RGU_ACTIVE_STATUS0;
+	u32 ctrl_offset = LPC18XX_RGU_CTRL0;
+	unsigned long flags;
+	u32 stat, rst_bit;
+
+	stat_offset += (id / LPC18XX_RGU_RESETS_PER_REG) * sizeof(u32);
+	ctrl_offset += (id / LPC18XX_RGU_RESETS_PER_REG) * sizeof(u32);
+	rst_bit = 1 << (id % LPC18XX_RGU_RESETS_PER_REG);
+
+	spin_lock_irqsave(&rc->lock, flags);
+	stat = ~readl(rc->base + stat_offset);
+	if (set)
+		writel(stat | rst_bit, rc->base + ctrl_offset);
+	else
+		writel(stat & ~rst_bit, rc->base + ctrl_offset);
+	spin_unlock_irqrestore(&rc->lock, flags);
+
+	return 0;
+}
+
+static int lpc18xx_rgu_assert(struct reset_controller_dev *rcdev,
+			      unsigned long id)
+{
+	return lpc18xx_rgu_setclear_reset(rcdev, id, true);
+}
+
+static int lpc18xx_rgu_deassert(struct reset_controller_dev *rcdev,
+				unsigned long id)
+{
+	return lpc18xx_rgu_setclear_reset(rcdev, id, false);
+}
+
+/* Only M0 cores require explicit reset deassert */
+static int lpc18xx_rgu_reset(struct reset_controller_dev *rcdev,
+			     unsigned long id)
+{
+	struct lpc18xx_rgu_data *rc = to_rgu_data(rcdev);
+
+	lpc18xx_rgu_assert(rcdev, id);
+	udelay(rc->delay_us);
+
+	switch (id) {
+	case LPC43XX_RGU_M0SUB_RST:
+	case LPC43XX_RGU_M0APP_RST:
+		lpc18xx_rgu_setclear_reset(rcdev, id, false);
+	}
+
+	return 0;
+}
+
+static int lpc18xx_rgu_status(struct reset_controller_dev *rcdev,
+			      unsigned long id)
+{
+	struct lpc18xx_rgu_data *rc = to_rgu_data(rcdev);
+	u32 bit, offset = LPC18XX_RGU_ACTIVE_STATUS0;
+
+	offset += (id / LPC18XX_RGU_RESETS_PER_REG) * sizeof(u32);
+	bit = 1 << (id % LPC18XX_RGU_RESETS_PER_REG);
+
+	return !(readl(rc->base + offset) & bit);
+}
+
+static struct reset_control_ops lpc18xx_rgu_ops = {
+	.reset		= lpc18xx_rgu_reset,
+	.assert		= lpc18xx_rgu_assert,
+	.deassert	= lpc18xx_rgu_deassert,
+	.status		= lpc18xx_rgu_status,
+};
+
+static int lpc18xx_rgu_probe(struct platform_device *pdev)
+{
+	struct lpc18xx_rgu_data *rc;
+	struct resource *res;
+	u32 fcclk, firc;
+	int ret;
+
+	rc = devm_kzalloc(&pdev->dev, sizeof(*rc), GFP_KERNEL);
+	if (!rc)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	rc->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(rc->base))
+		return PTR_ERR(rc->base);
+
+	rc->clk_reg = devm_clk_get(&pdev->dev, "reg");
+	if (IS_ERR(rc->clk_reg)) {
+		dev_err(&pdev->dev, "reg clock not found\n");
+		return PTR_ERR(rc->clk_reg);
+	}
+
+	rc->clk_delay = devm_clk_get(&pdev->dev, "delay");
+	if (IS_ERR(rc->clk_delay)) {
+		dev_err(&pdev->dev, "delay clock not found\n");
+		return PTR_ERR(rc->clk_delay);
+	}
+
+	ret = clk_prepare_enable(rc->clk_reg);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to enable reg clock\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(rc->clk_delay);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to enable delay clock\n");
+		goto dis_clk_reg;
+	}
+
+	fcclk = clk_get_rate(rc->clk_reg) / USEC_PER_SEC;
+	firc = clk_get_rate(rc->clk_delay) / USEC_PER_SEC;
+	if (fcclk == 0 || firc == 0)
+		rc->delay_us = 2;
+	else
+		rc->delay_us = DIV_ROUND_UP(fcclk, firc * firc);
+
+	spin_lock_init(&rc->lock);
+
+	rc->rcdev.owner = THIS_MODULE;
+	rc->rcdev.nr_resets = 64;
+	rc->rcdev.ops = &lpc18xx_rgu_ops;
+	rc->rcdev.of_node = pdev->dev.of_node;
+
+	platform_set_drvdata(pdev, rc);
+
+	ret = reset_controller_register(&rc->rcdev);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to register device\n");
+		goto dis_clks;
+	}
+
+	rgu_base = rc->base;
+	ret = register_restart_handler(&lpc18xx_rgu_restart_nb);
+	if (ret)
+		dev_warn(&pdev->dev, "failed to register restart handler\n");
+
+	return 0;
+
+dis_clks:
+	clk_disable_unprepare(rc->clk_delay);
+dis_clk_reg:
+	clk_disable_unprepare(rc->clk_reg);
+
+	return ret;
+}
+
+static int lpc18xx_rgu_remove(struct platform_device *pdev)
+{
+	struct lpc18xx_rgu_data *rc = platform_get_drvdata(pdev);
+	int ret;
+
+	ret = unregister_restart_handler(&lpc18xx_rgu_restart_nb);
+	if (ret)
+		dev_warn(&pdev->dev, "failed to unregister restart handler\n");
+
+	reset_controller_unregister(&rc->rcdev);
+
+	clk_disable_unprepare(rc->clk_delay);
+	clk_disable_unprepare(rc->clk_reg);
+
+	return 0;
+}
+
+static const struct of_device_id lpc18xx_rgu_match[] = {
+	{ .compatible = "nxp,lpc1850-rgu" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_rgu_match);
+
+static struct platform_driver lpc18xx_rgu_driver = {
+	.probe	= lpc18xx_rgu_probe,
+	.remove	= lpc18xx_rgu_remove,
+	.driver	= {
+		.name		= "lpc18xx-reset",
+		.of_match_table	= lpc18xx_rgu_match,
+	},
+};
+module_platform_driver(lpc18xx_rgu_driver);
+
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_DESCRIPTION("Reset driver for LPC18xx/43xx RGU");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 0a8def3..1a6c5d6 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -24,11 +24,11 @@
 #include <linux/types.h>
 
 #define NR_BANKS		4
-#define OFFSET_MODRST		0x10
 
 struct socfpga_reset_data {
 	spinlock_t			lock;
 	void __iomem			*membase;
+	u32				modrst_offset;
 	struct reset_controller_dev	rcdev;
 };
 
@@ -45,8 +45,8 @@
 
 	spin_lock_irqsave(&data->lock, flags);
 
-	reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS));
-	writel(reg | BIT(offset), data->membase + OFFSET_MODRST +
+	reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
+	writel(reg | BIT(offset), data->membase + data->modrst_offset +
 				 (bank * NR_BANKS));
 	spin_unlock_irqrestore(&data->lock, flags);
 
@@ -67,8 +67,8 @@
 
 	spin_lock_irqsave(&data->lock, flags);
 
-	reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS));
-	writel(reg & ~BIT(offset), data->membase + OFFSET_MODRST +
+	reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
+	writel(reg & ~BIT(offset), data->membase + data->modrst_offset +
 				  (bank * NR_BANKS));
 
 	spin_unlock_irqrestore(&data->lock, flags);
@@ -85,7 +85,7 @@
 	int offset = id % BITS_PER_LONG;
 	u32 reg;
 
-	reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS));
+	reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS));
 
 	return !(reg & BIT(offset));
 }
@@ -100,6 +100,8 @@
 {
 	struct socfpga_reset_data *data;
 	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
 
 	/*
 	 * The binding was mainlined without the required property.
@@ -120,6 +122,11 @@
 	if (IS_ERR(data->membase))
 		return PTR_ERR(data->membase);
 
+	if (of_property_read_u32(np, "altr,modrst-offset", &data->modrst_offset)) {
+		dev_warn(dev, "missing altr,modrst-offset property, assuming 0x10!\n");
+		data->modrst_offset = 0x10;
+	}
+
 	spin_lock_init(&data->lock);
 
 	data->rcdev.owner = THIS_MODULE;
diff --git a/drivers/reset/reset-zynq.c b/drivers/reset/reset-zynq.c
new file mode 100644
index 0000000..89318a5
--- /dev/null
+++ b/drivers/reset/reset-zynq.c
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2015, National Instruments Corp.
+ *
+ * Xilinx Zynq Reset controller driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+struct zynq_reset_data {
+	struct regmap *slcr;
+	struct reset_controller_dev rcdev;
+	u32 offset;
+};
+
+#define to_zynq_reset_data(p)		\
+	container_of((p), struct zynq_reset_data, rcdev)
+
+static int zynq_reset_assert(struct reset_controller_dev *rcdev,
+			     unsigned long id)
+{
+	struct zynq_reset_data *priv = to_zynq_reset_data(rcdev);
+
+	int bank = id / BITS_PER_LONG;
+	int offset = id % BITS_PER_LONG;
+
+	pr_debug("%s: %s reset bank %u offset %u\n", KBUILD_MODNAME, __func__,
+		 bank, offset);
+
+	return regmap_update_bits(priv->slcr,
+				  priv->offset + (bank * 4),
+				  BIT(offset),
+				  BIT(offset));
+}
+
+static int zynq_reset_deassert(struct reset_controller_dev *rcdev,
+			       unsigned long id)
+{
+	struct zynq_reset_data *priv = to_zynq_reset_data(rcdev);
+
+	int bank = id / BITS_PER_LONG;
+	int offset = id % BITS_PER_LONG;
+
+	pr_debug("%s: %s reset bank %u offset %u\n", KBUILD_MODNAME, __func__,
+		 bank, offset);
+
+	return regmap_update_bits(priv->slcr,
+				  priv->offset + (bank * 4),
+				  BIT(offset),
+				  ~BIT(offset));
+}
+
+static int zynq_reset_status(struct reset_controller_dev *rcdev,
+			     unsigned long id)
+{
+	struct zynq_reset_data *priv = to_zynq_reset_data(rcdev);
+
+	int bank = id / BITS_PER_LONG;
+	int offset = id % BITS_PER_LONG;
+	int ret;
+	u32 reg;
+
+	pr_debug("%s: %s reset bank %u offset %u\n", KBUILD_MODNAME, __func__,
+		 bank, offset);
+
+	ret = regmap_read(priv->slcr, priv->offset + (bank * 4), &reg);
+	if (ret)
+		return ret;
+
+	return !!(reg & BIT(offset));
+}
+
+static struct reset_control_ops zynq_reset_ops = {
+	.assert		= zynq_reset_assert,
+	.deassert	= zynq_reset_deassert,
+	.status		= zynq_reset_status,
+};
+
+static int zynq_reset_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct zynq_reset_data *priv;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, priv);
+
+	priv->slcr = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+						     "syscon");
+	if (IS_ERR(priv->slcr)) {
+		dev_err(&pdev->dev, "unable to get zynq-slcr regmap");
+		return PTR_ERR(priv->slcr);
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "missing IO resource\n");
+		return -ENODEV;
+	}
+
+	priv->offset = res->start;
+
+	priv->rcdev.owner = THIS_MODULE;
+	priv->rcdev.nr_resets = resource_size(res) / 4 * BITS_PER_LONG;
+	priv->rcdev.ops = &zynq_reset_ops;
+	priv->rcdev.of_node = pdev->dev.of_node;
+	reset_controller_register(&priv->rcdev);
+
+	return 0;
+}
+
+static int zynq_reset_remove(struct platform_device *pdev)
+{
+	struct zynq_reset_data *priv = platform_get_drvdata(pdev);
+
+	reset_controller_unregister(&priv->rcdev);
+
+	return 0;
+}
+
+static const struct of_device_id zynq_reset_dt_ids[] = {
+	{ .compatible = "xlnx,zynq-reset", },
+	{ /* sentinel */ },
+};
+
+static struct platform_driver zynq_reset_driver = {
+	.probe	= zynq_reset_probe,
+	.remove	= zynq_reset_remove,
+	.driver = {
+		.name		= KBUILD_MODNAME,
+		.of_match_table	= zynq_reset_dt_ids,
+	},
+};
+module_platform_driver(zynq_reset_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
+MODULE_DESCRIPTION("Zynq Reset Controller Driver");
diff --git a/drivers/reset/sti/reset-stih407.c b/drivers/reset/sti/reset-stih407.c
index d83db5d7..827eb3d 100644
--- a/drivers/reset/sti/reset-stih407.c
+++ b/drivers/reset/sti/reset-stih407.c
@@ -11,7 +11,7 @@
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
-#include <dt-bindings/reset-controller/stih407-resets.h>
+#include <dt-bindings/reset/stih407-resets.h>
 #include "reset-syscfg.h"
 
 /* STiH407 Peripheral powerdown definitions. */
@@ -126,7 +126,7 @@
 	.channels = stih407_picophyresets,
 };
 
-static struct of_device_id stih407_reset_match[] = {
+static const struct of_device_id stih407_reset_match[] = {
 	{
 		.compatible = "st,stih407-powerdown",
 		.data = &stih407_powerdown_controller,
diff --git a/drivers/reset/sti/reset-stih415.c b/drivers/reset/sti/reset-stih415.c
index 8dad603..6f220cd 100644
--- a/drivers/reset/sti/reset-stih415.c
+++ b/drivers/reset/sti/reset-stih415.c
@@ -13,7 +13,7 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 
-#include <dt-bindings/reset-controller/stih415-resets.h>
+#include <dt-bindings/reset/stih415-resets.h>
 
 #include "reset-syscfg.h"
 
@@ -89,7 +89,7 @@
 	.channels = stih415_softresets,
 };
 
-static struct of_device_id stih415_reset_match[] = {
+static const struct of_device_id stih415_reset_match[] = {
 	{ .compatible = "st,stih415-powerdown",
 	  .data = &stih415_powerdown_controller, },
 	{ .compatible = "st,stih415-softreset",
diff --git a/drivers/reset/sti/reset-stih416.c b/drivers/reset/sti/reset-stih416.c
index 79aed70..c581d60 100644
--- a/drivers/reset/sti/reset-stih416.c
+++ b/drivers/reset/sti/reset-stih416.c
@@ -13,7 +13,7 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 
-#include <dt-bindings/reset-controller/stih416-resets.h>
+#include <dt-bindings/reset/stih416-resets.h>
 
 #include "reset-syscfg.h"
 
@@ -120,7 +120,7 @@
 	.channels = stih416_softresets,
 };
 
-static struct of_device_id stih416_reset_match[] = {
+static const struct of_device_id stih416_reset_match[] = {
 	{ .compatible = "st,stih416-powerdown",
 	  .data = &stih416_powerdown_controller, },
 	{ .compatible = "st,stih416-softreset",
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 83b4b89..533bfa3 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1523,6 +1523,7 @@
 
 config RTC_DRV_SNVS
 	tristate "Freescale SNVS RTC support"
+	select REGMAP_MMIO
 	depends on HAS_IOMEM
 	depends on OF
 	help
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 167783f..72c9333 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -666,9 +666,8 @@
 #ifdef CONFIG_RTC_DRV_DS1374_WDT
 	int res;
 
-	res = misc_deregister(&ds1374_miscdev);
-	if (!res)
-		ds1374_miscdev.parent = NULL;
+	misc_deregister(&ds1374_miscdev);
+	ds1374_miscdev.parent = NULL;
 	unregister_reboot_notifier(&ds1374_wdt_notifier);
 #endif
 
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 5fc292c..7bd89d9 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -16,6 +16,8 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #define RTC_INPUT_CLK_32768HZ	(0x00 << 5)
 #define RTC_INPUT_CLK_32000HZ	(0x01 << 5)
@@ -79,7 +81,8 @@
 	struct rtc_device *rtc;
 	void __iomem *ioaddr;
 	int irq;
-	struct clk *clk;
+	struct clk *clk_ref;
+	struct clk *clk_ipg;
 	struct rtc_time g_rtc_alarm;
 	enum imx_rtc_type devtype;
 };
@@ -97,6 +100,15 @@
 };
 MODULE_DEVICE_TABLE(platform, imx_rtc_devtype);
 
+#ifdef CONFIG_OF
+static const struct of_device_id imx_rtc_dt_ids[] = {
+	{ .compatible = "fsl,imx1-rtc", .data = (const void *)IMX1_RTC },
+	{ .compatible = "fsl,imx21-rtc", .data = (const void *)IMX21_RTC },
+	{}
+};
+MODULE_DEVICE_TABLE(of, imx_rtc_dt_ids);
+#endif
+
 static inline int is_imx1_rtc(struct rtc_plat_data *data)
 {
 	return data->devtype == IMX1_RTC;
@@ -361,29 +373,45 @@
 	u32 reg;
 	unsigned long rate;
 	int ret;
+	const struct of_device_id *of_id;
 
 	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata)
 		return -ENOMEM;
 
-	pdata->devtype = pdev->id_entry->driver_data;
+	of_id = of_match_device(imx_rtc_dt_ids, &pdev->dev);
+	if (of_id)
+		pdata->devtype = (enum imx_rtc_type)of_id->data;
+	else
+		pdata->devtype = pdev->id_entry->driver_data;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
 	if (IS_ERR(pdata->ioaddr))
 		return PTR_ERR(pdata->ioaddr);
 
-	pdata->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(pdata->clk)) {
-		dev_err(&pdev->dev, "unable to get clock!\n");
-		return PTR_ERR(pdata->clk);
+	pdata->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(pdata->clk_ipg)) {
+		dev_err(&pdev->dev, "unable to get ipg clock!\n");
+		return PTR_ERR(pdata->clk_ipg);
 	}
 
-	ret = clk_prepare_enable(pdata->clk);
+	ret = clk_prepare_enable(pdata->clk_ipg);
 	if (ret)
 		return ret;
 
-	rate = clk_get_rate(pdata->clk);
+	pdata->clk_ref = devm_clk_get(&pdev->dev, "ref");
+	if (IS_ERR(pdata->clk_ref)) {
+		dev_err(&pdev->dev, "unable to get ref clock!\n");
+		ret = PTR_ERR(pdata->clk_ref);
+		goto exit_put_clk_ipg;
+	}
+
+	ret = clk_prepare_enable(pdata->clk_ref);
+	if (ret)
+		goto exit_put_clk_ipg;
+
+	rate = clk_get_rate(pdata->clk_ref);
 
 	if (rate == 32768)
 		reg = RTC_INPUT_CLK_32768HZ;
@@ -394,7 +422,7 @@
 	else {
 		dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate);
 		ret = -EINVAL;
-		goto exit_put_clk;
+		goto exit_put_clk_ref;
 	}
 
 	reg |= RTC_ENABLE_BIT;
@@ -402,7 +430,7 @@
 	if (((readw(pdata->ioaddr + RTC_RTCCTL)) & RTC_ENABLE_BIT) == 0) {
 		dev_err(&pdev->dev, "hardware module can't be enabled!\n");
 		ret = -EIO;
-		goto exit_put_clk;
+		goto exit_put_clk_ref;
 	}
 
 	platform_set_drvdata(pdev, pdata);
@@ -424,15 +452,17 @@
 				  THIS_MODULE);
 	if (IS_ERR(rtc)) {
 		ret = PTR_ERR(rtc);
-		goto exit_put_clk;
+		goto exit_put_clk_ref;
 	}
 
 	pdata->rtc = rtc;
 
 	return 0;
 
-exit_put_clk:
-	clk_disable_unprepare(pdata->clk);
+exit_put_clk_ref:
+	clk_disable_unprepare(pdata->clk_ref);
+exit_put_clk_ipg:
+	clk_disable_unprepare(pdata->clk_ipg);
 
 	return ret;
 }
@@ -441,7 +471,8 @@
 {
 	struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
 
-	clk_disable_unprepare(pdata->clk);
+	clk_disable_unprepare(pdata->clk_ref);
+	clk_disable_unprepare(pdata->clk_ipg);
 
 	return 0;
 }
@@ -473,6 +504,7 @@
 static struct platform_driver mxc_rtc_driver = {
 	.driver = {
 		   .name	= "mxc_rtc",
+		   .of_match_table = of_match_ptr(imx_rtc_dt_ids),
 		   .pm		= &mxc_rtc_pm_ops,
 	},
 	.id_table = imx_rtc_devtype,
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index d87a85c..950c5d0 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -18,6 +18,10 @@
 #include <linux/platform_device.h>
 #include <linux/rtc.h>
 #include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#define SNVS_LPREGISTER_OFFSET	0x34
 
 /* These register offsets are relative to LP (Low Power) range */
 #define SNVS_LPCR		0x04
@@ -37,31 +41,36 @@
 
 struct snvs_rtc_data {
 	struct rtc_device *rtc;
-	void __iomem *ioaddr;
+	struct regmap *regmap;
+	int offset;
 	int irq;
-	spinlock_t lock;
 	struct clk *clk;
 };
 
-static u32 rtc_read_lp_counter(void __iomem *ioaddr)
+static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
 {
 	u64 read1, read2;
+	u32 val;
 
 	do {
-		read1 = readl(ioaddr + SNVS_LPSRTCMR);
+		regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
+		read1 = val;
 		read1 <<= 32;
-		read1 |= readl(ioaddr + SNVS_LPSRTCLR);
+		regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
+		read1 |= val;
 
-		read2 = readl(ioaddr + SNVS_LPSRTCMR);
+		regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
+		read2 = val;
 		read2 <<= 32;
-		read2 |= readl(ioaddr + SNVS_LPSRTCLR);
+		regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
+		read2 |= val;
 	} while (read1 != read2);
 
 	/* Convert 47-bit counter to 32-bit raw second count */
 	return (u32) (read1 >> CNTR_TO_SECS_SH);
 }
 
-static void rtc_write_sync_lp(void __iomem *ioaddr)
+static void rtc_write_sync_lp(struct snvs_rtc_data *data)
 {
 	u32 count1, count2, count3;
 	int i;
@@ -69,15 +78,15 @@
 	/* Wait for 3 CKIL cycles */
 	for (i = 0; i < 3; i++) {
 		do {
-			count1 = readl(ioaddr + SNVS_LPSRTCLR);
-			count2 = readl(ioaddr + SNVS_LPSRTCLR);
+			regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
+			regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
 		} while (count1 != count2);
 
 		/* Now wait until counter value changes */
 		do {
 			do {
-				count2 = readl(ioaddr + SNVS_LPSRTCLR);
-				count3 = readl(ioaddr + SNVS_LPSRTCLR);
+				regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
+				regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count3);
 			} while (count2 != count3);
 		} while (count3 == count1);
 	}
@@ -85,23 +94,14 @@
 
 static int snvs_rtc_enable(struct snvs_rtc_data *data, bool enable)
 {
-	unsigned long flags;
 	int timeout = 1000;
 	u32 lpcr;
 
-	spin_lock_irqsave(&data->lock, flags);
-
-	lpcr = readl(data->ioaddr + SNVS_LPCR);
-	if (enable)
-		lpcr |= SNVS_LPCR_SRTC_ENV;
-	else
-		lpcr &= ~SNVS_LPCR_SRTC_ENV;
-	writel(lpcr, data->ioaddr + SNVS_LPCR);
-
-	spin_unlock_irqrestore(&data->lock, flags);
+	regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_SRTC_ENV,
+			   enable ? SNVS_LPCR_SRTC_ENV : 0);
 
 	while (--timeout) {
-		lpcr = readl(data->ioaddr + SNVS_LPCR);
+		regmap_read(data->regmap, data->offset + SNVS_LPCR, &lpcr);
 
 		if (enable) {
 			if (lpcr & SNVS_LPCR_SRTC_ENV)
@@ -121,7 +121,7 @@
 static int snvs_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	struct snvs_rtc_data *data = dev_get_drvdata(dev);
-	unsigned long time = rtc_read_lp_counter(data->ioaddr);
+	unsigned long time = rtc_read_lp_counter(data);
 
 	rtc_time_to_tm(time, tm);
 
@@ -139,8 +139,8 @@
 	snvs_rtc_enable(data, false);
 
 	/* Write 32-bit time to 47-bit timer, leaving 15 LSBs blank */
-	writel(time << CNTR_TO_SECS_SH, data->ioaddr + SNVS_LPSRTCLR);
-	writel(time >> (32 - CNTR_TO_SECS_SH), data->ioaddr + SNVS_LPSRTCMR);
+	regmap_write(data->regmap, data->offset + SNVS_LPSRTCLR, time << CNTR_TO_SECS_SH);
+	regmap_write(data->regmap, data->offset + SNVS_LPSRTCMR, time >> (32 - CNTR_TO_SECS_SH));
 
 	/* Enable RTC again */
 	snvs_rtc_enable(data, true);
@@ -153,10 +153,10 @@
 	struct snvs_rtc_data *data = dev_get_drvdata(dev);
 	u32 lptar, lpsr;
 
-	lptar = readl(data->ioaddr + SNVS_LPTAR);
+	regmap_read(data->regmap, data->offset + SNVS_LPTAR, &lptar);
 	rtc_time_to_tm(lptar, &alrm->time);
 
-	lpsr = readl(data->ioaddr + SNVS_LPSR);
+	regmap_read(data->regmap, data->offset + SNVS_LPSR, &lpsr);
 	alrm->pending = (lpsr & SNVS_LPSR_LPTA) ? 1 : 0;
 
 	return 0;
@@ -165,21 +165,12 @@
 static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
 {
 	struct snvs_rtc_data *data = dev_get_drvdata(dev);
-	u32 lpcr;
-	unsigned long flags;
 
-	spin_lock_irqsave(&data->lock, flags);
+	regmap_update_bits(data->regmap, data->offset + SNVS_LPCR,
+			   (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN),
+			   enable ? (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN) : 0);
 
-	lpcr = readl(data->ioaddr + SNVS_LPCR);
-	if (enable)
-		lpcr |= (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN);
-	else
-		lpcr &= ~(SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN);
-	writel(lpcr, data->ioaddr + SNVS_LPCR);
-
-	spin_unlock_irqrestore(&data->lock, flags);
-
-	rtc_write_sync_lp(data->ioaddr);
+	rtc_write_sync_lp(data);
 
 	return 0;
 }
@@ -189,24 +180,14 @@
 	struct snvs_rtc_data *data = dev_get_drvdata(dev);
 	struct rtc_time *alrm_tm = &alrm->time;
 	unsigned long time;
-	unsigned long flags;
-	u32 lpcr;
 
 	rtc_tm_to_time(alrm_tm, &time);
 
-	spin_lock_irqsave(&data->lock, flags);
-
-	/* Have to clear LPTA_EN before programming new alarm time in LPTAR */
-	lpcr = readl(data->ioaddr + SNVS_LPCR);
-	lpcr &= ~SNVS_LPCR_LPTA_EN;
-	writel(lpcr, data->ioaddr + SNVS_LPCR);
-
-	spin_unlock_irqrestore(&data->lock, flags);
-
-	writel(time, data->ioaddr + SNVS_LPTAR);
+	regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0);
+	regmap_write(data->regmap, data->offset + SNVS_LPTAR, time);
 
 	/* Clear alarm interrupt status bit */
-	writel(SNVS_LPSR_LPTA, data->ioaddr + SNVS_LPSR);
+	regmap_write(data->regmap, data->offset + SNVS_LPSR, SNVS_LPSR_LPTA);
 
 	return snvs_rtc_alarm_irq_enable(dev, alrm->enabled);
 }
@@ -226,7 +207,7 @@
 	u32 lpsr;
 	u32 events = 0;
 
-	lpsr = readl(data->ioaddr + SNVS_LPSR);
+	regmap_read(data->regmap, data->offset + SNVS_LPSR, &lpsr);
 
 	if (lpsr & SNVS_LPSR_LPTA) {
 		events |= (RTC_AF | RTC_IRQF);
@@ -238,25 +219,48 @@
 	}
 
 	/* clear interrupt status */
-	writel(lpsr, data->ioaddr + SNVS_LPSR);
+	regmap_write(data->regmap, data->offset + SNVS_LPSR, lpsr);
 
 	return events ? IRQ_HANDLED : IRQ_NONE;
 }
 
+static const struct regmap_config snvs_rtc_config = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.reg_stride = 4,
+};
+
 static int snvs_rtc_probe(struct platform_device *pdev)
 {
 	struct snvs_rtc_data *data;
 	struct resource *res;
 	int ret;
+	void __iomem *mmio;
 
 	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	data->ioaddr = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(data->ioaddr))
-		return PTR_ERR(data->ioaddr);
+	data->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap");
+
+	if (IS_ERR(data->regmap)) {
+		dev_warn(&pdev->dev, "snvs rtc: you use old dts file, please update it\n");
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+		mmio = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(mmio))
+			return PTR_ERR(mmio);
+
+		data->regmap = devm_regmap_init_mmio(&pdev->dev, mmio, &snvs_rtc_config);
+	} else {
+		data->offset = SNVS_LPREGISTER_OFFSET;
+		of_property_read_u32(pdev->dev.of_node, "offset", &data->offset);
+	}
+
+	if (!data->regmap) {
+		dev_err(&pdev->dev, "Can't find snvs syscon\n");
+		return -ENODEV;
+	}
 
 	data->irq = platform_get_irq(pdev, 0);
 	if (data->irq < 0)
@@ -276,13 +280,11 @@
 
 	platform_set_drvdata(pdev, data);
 
-	spin_lock_init(&data->lock);
-
 	/* Initialize glitch detect */
-	writel(SNVS_LPPGDR_INIT, data->ioaddr + SNVS_LPPGDR);
+	regmap_write(data->regmap, data->offset + SNVS_LPPGDR, SNVS_LPPGDR_INIT);
 
 	/* Clear interrupt status */
-	writel(0xffffffff, data->ioaddr + SNVS_LPSR);
+	regmap_write(data->regmap, data->offset + SNVS_LPSR, 0xffffffff);
 
 	/* Enable RTC */
 	snvs_rtc_enable(data, true);
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index 3f9d0ac..74c0a33 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -208,7 +208,7 @@
 		return -EINVAL;
 	}
 
-	/* LPC can either run in RTC or WDT mode */
+	/* LPC can either run as a Clocksource or in RTC or WDT mode */
 	if (mode != ST_LPC_MODE_RTC)
 		return -ENODEV;
 
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index ee3a6fa..fe07f31 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -58,7 +58,7 @@
 		    && !strncmp(pos->uid.serial, uid->serial,
 				sizeof(uid->serial)))
 			return pos;
-	};
+	}
 	return NULL;
 }
 
@@ -69,7 +69,7 @@
 	list_for_each_entry(pos, &server->lculist, lcu) {
 		if (pos->uid.ssid == uid->ssid)
 			return pos;
-	};
+	}
 	return NULL;
 }
 
@@ -97,7 +97,7 @@
 		if (pos->uid.base_unit_addr == search_unit_addr &&
 		    !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
 			return pos;
-	};
+	}
 	return NULL;
 }
 
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 6215f64..62a3235 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1036,7 +1036,7 @@
 {
 	void *conf_data;
 	int conf_len, conf_data_saved;
-	int rc, path_err;
+	int rc, path_err, pos;
 	__u8 lpm, opm;
 	struct dasd_eckd_private *private, path_private;
 	struct dasd_path *path_data;
@@ -1068,6 +1068,17 @@
 			path_data->opm |= lpm;
 			continue;	/* no error */
 		}
+		/* translate path mask to position in mask */
+		pos = 8 - ffs(lpm);
+		kfree(private->path_conf_data[pos]);
+		if ((__u8 *)private->path_conf_data[pos] ==
+		    private->conf_data) {
+			private->conf_data = NULL;
+			private->conf_len = 0;
+			conf_data_saved = 0;
+		}
+		private->path_conf_data[pos] =
+			(struct dasd_conf_data *) conf_data;
 		/* save first valid configuration data */
 		if (!conf_data_saved) {
 			kfree(private->conf_data);
@@ -1095,7 +1106,6 @@
 				kfree(conf_data);
 				continue;
 			}
-
 			if (dasd_eckd_compare_path_uid(
 				    device, &path_private)) {
 				uid = &path_private.uid;
@@ -1157,9 +1167,6 @@
 		path_data->cablepm &= ~lpm;
 		path_data->hpfpm &= ~lpm;
 		path_data->cuirpm &= ~lpm;
-
-		if (conf_data != private->conf_data)
-			kfree(conf_data);
 	}
 
 	return path_err;
@@ -1259,7 +1266,11 @@
 		schedule_work(work);
 		return;
 	}
-
+	/* check if path verification already running and delay if so */
+	if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
+		schedule_work(work);
+		return;
+	}
 	opm = 0;
 	npm = 0;
 	ppm = 0;
@@ -1402,7 +1413,7 @@
 		device->path_data.hpfpm |= hpfpm;
 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 	}
-
+	clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
 	dasd_put_device(device);
 	if (data->isglobal)
 		mutex_unlock(&dasd_path_verification_mutex);
@@ -1810,6 +1821,7 @@
 static void dasd_eckd_uncheck_device(struct dasd_device *device)
 {
 	struct dasd_eckd_private *private;
+	int i;
 
 	private = (struct dasd_eckd_private *) device->private;
 	dasd_alias_disconnect_device_from_lcu(device);
@@ -1818,6 +1830,15 @@
 	private->vdsneq = NULL;
 	private->gneq = NULL;
 	private->conf_len = 0;
+	for (i = 0; i < 8; i++) {
+		kfree(private->path_conf_data[i]);
+		if ((__u8 *)private->path_conf_data[i] ==
+		    private->conf_data) {
+			private->conf_data = NULL;
+			private->conf_len = 0;
+		}
+		private->path_conf_data[i] = NULL;
+	}
 	kfree(private->conf_data);
 	private->conf_data = NULL;
 }
@@ -3968,7 +3989,7 @@
 	rc = -EFAULT;
 	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
 		goto out;
-	if (is_compat_task() || sizeof(long) == 4) {
+	if (is_compat_task()) {
 		/* Make sure pointers are sane even on 31 bit. */
 		rc = -EINVAL;
 		if ((usrparm.psf_data >> 32) != 0)
@@ -4525,12 +4546,13 @@
 	cqr->startdev = device;
 	cqr->memdev = device;
 	cqr->block = NULL;
-	cqr->retries = 256;
 	cqr->expires = 10 * HZ;
-
-	/* we need to check for messages on exactly this path */
 	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
-	cqr->lpm = lpum;
+	/* dasd_sleep_on_immediatly does not do complex error
+	 * recovery so clear erp flag and set retry counter to
+	 * do basic erp */
+	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
+	cqr->retries = 256;
 
 	/* Prepare for Read Subsystem Data */
 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
@@ -4605,10 +4627,10 @@
 	psf_cuir->message_id = message_id;
 	psf_cuir->cssid = sch_id.cssid;
 	psf_cuir->ssid = sch_id.ssid;
-
 	ccw = cqr->cpaddr;
 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
 	ccw->cda = (__u32)(addr_t)psf_cuir;
+	ccw->flags = CCW_FLAG_SLI;
 	ccw->count = sizeof(struct dasd_psf_cuir_response);
 
 	cqr->startdev = device;
@@ -4618,6 +4640,7 @@
 	cqr->expires = 10*HZ;
 	cqr->buildclk = get_tod_clock();
 	cqr->status = DASD_CQR_FILLED;
+	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
 
 	rc = dasd_sleep_on(cqr);
 
@@ -4625,118 +4648,252 @@
 	return rc;
 }
 
-static int dasd_eckd_cuir_change_state(struct dasd_device *device, __u8 lpum)
+/*
+ * return configuration data that is referenced by record selector
+ * if a record selector is specified or per default return the
+ * conf_data pointer for the path specified by lpum
+ */
+static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
+						     __u8 lpum,
+						     struct dasd_cuir_message *cuir)
 {
-	unsigned long flags;
-	__u8 tbcpm;
+	struct dasd_eckd_private *private;
+	struct dasd_conf_data *conf_data;
+	int path, pos;
 
-	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
-	tbcpm = device->path_data.opm & ~lpum;
-	if (tbcpm) {
-		device->path_data.opm = tbcpm;
-		device->path_data.cuirpm |= lpum;
+	private = (struct dasd_eckd_private *) device->private;
+	if (cuir->record_selector == 0)
+		goto out;
+	for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
+		conf_data = private->path_conf_data[pos];
+		if (conf_data->gneq.record_selector ==
+		    cuir->record_selector)
+			return conf_data;
 	}
-	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
-	return tbcpm ? 0 : PSF_CUIR_LAST_PATH;
+out:
+	return private->path_conf_data[8 - ffs(lpum)];
 }
 
 /*
- * walk through all devices and quiesce them
- * if it is the last path return error
+ * This function determines the scope of a reconfiguration request by
+ * analysing the path and device selection data provided in the CUIR request.
+ * Returns a path mask containing CUIR affected paths for the give device.
+ *
+ * If the CUIR request does not contain the required information return the
+ * path mask of the path the attention message for the CUIR request was reveived
+ * on.
+ */
+static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
+				struct dasd_cuir_message *cuir)
+{
+	struct dasd_conf_data *ref_conf_data;
+	unsigned long bitmask = 0, mask = 0;
+	struct dasd_eckd_private *private;
+	struct dasd_conf_data *conf_data;
+	unsigned int pos, path;
+	char *ref_gneq, *gneq;
+	char *ref_ned, *ned;
+	int tbcpm = 0;
+
+	/* if CUIR request does not specify the scope use the path
+	   the attention message was presented on */
+	if (!cuir->ned_map ||
+	    !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
+		return lpum;
+
+	private = (struct dasd_eckd_private *) device->private;
+	/* get reference conf data */
+	ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
+	/* reference ned is determined by ned_map field */
+	pos = 8 - ffs(cuir->ned_map);
+	ref_ned = (char *)&ref_conf_data->neds[pos];
+	ref_gneq = (char *)&ref_conf_data->gneq;
+	/* transfer 24 bit neq_map to mask */
+	mask = cuir->neq_map[2];
+	mask |= cuir->neq_map[1] << 8;
+	mask |= cuir->neq_map[0] << 16;
+
+	for (path = 0x80; path; path >>= 1) {
+		/* initialise data per path */
+		bitmask = mask;
+		pos = 8 - ffs(path);
+		conf_data = private->path_conf_data[pos];
+		pos = 8 - ffs(cuir->ned_map);
+		ned = (char *) &conf_data->neds[pos];
+		/* compare reference ned and per path ned */
+		if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
+			continue;
+		gneq = (char *)&conf_data->gneq;
+		/* compare reference gneq and per_path gneq under
+		   24 bit mask where mask bit 0 equals byte 7 of
+		   the gneq and mask bit 24 equals byte 31 */
+		while (bitmask) {
+			pos = ffs(bitmask) - 1;
+			if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
+			    != 0)
+				break;
+			clear_bit(pos, &bitmask);
+		}
+		if (bitmask)
+			continue;
+		/* device and path match the reference values
+		   add path to CUIR scope */
+		tbcpm |= path;
+	}
+	return tbcpm;
+}
+
+static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
+				       unsigned long paths,
+				       struct subchannel_id sch_id, int action)
+{
+	struct channel_path_desc *desc;
+	int pos;
+
+	while (paths) {
+		/* get position of bit in mask */
+		pos = ffs(paths) - 1;
+		/* get channel path descriptor from this position */
+		desc = ccw_device_get_chp_desc(device->cdev, 7 - pos);
+		if (action == CUIR_QUIESCE)
+			pr_warn("Service on the storage server caused path "
+				"%x.%02x to go offline", sch_id.cssid,
+				desc ? desc->chpid : 0);
+		else if (action == CUIR_RESUME)
+			pr_info("Path %x.%02x is back online after service "
+				"on the storage server", sch_id.cssid,
+				desc ? desc->chpid : 0);
+		kfree(desc);
+		clear_bit(pos, &paths);
+	}
+}
+
+static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
+				      struct dasd_cuir_message *cuir)
+{
+	unsigned long tbcpm;
+
+	tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
+	/* nothing to do if path is not in use */
+	if (!(device->path_data.opm & tbcpm))
+		return 0;
+	if (!(device->path_data.opm & ~tbcpm)) {
+		/* no path would be left if the CUIR action is taken
+		   return error */
+		return -EINVAL;
+	}
+	/* remove device from operational path mask */
+	device->path_data.opm &= ~tbcpm;
+	device->path_data.cuirpm |= tbcpm;
+	return tbcpm;
+}
+
+/*
+ * walk through all devices and build a path mask to quiesce them
+ * return an error if the last path to a device would be removed
  *
  * if only part of the devices are quiesced and an error
  * occurs no onlining necessary, the storage server will
  * notify the already set offline devices again
  */
 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
-				 struct channel_path_desc *desc,
-				 struct subchannel_id sch_id)
+				  struct subchannel_id sch_id,
+				  struct dasd_cuir_message *cuir)
 {
 	struct alias_pav_group *pavgroup, *tempgroup;
 	struct dasd_eckd_private *private;
 	struct dasd_device *dev, *n;
-	int rc;
+	unsigned long paths = 0;
+	unsigned long flags;
+	int tbcpm;
 
 	private = (struct dasd_eckd_private *) device->private;
-	rc = 0;
-
 	/* active devices */
-	list_for_each_entry_safe(dev, n,
-				 &private->lcu->active_devices,
+	list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
 				 alias_list) {
-		rc = dasd_eckd_cuir_change_state(dev, lpum);
-		if (rc)
-			goto out;
+		spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+		tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+		spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
+		if (tbcpm < 0)
+			goto out_err;
+		paths |= tbcpm;
 	}
-
 	/* inactive devices */
-	list_for_each_entry_safe(dev, n,
-				 &private->lcu->inactive_devices,
+	list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
 				 alias_list) {
-		rc = dasd_eckd_cuir_change_state(dev, lpum);
-		if (rc)
-			goto out;
+		spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+		tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+		spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
+		if (tbcpm < 0)
+			goto out_err;
+		paths |= tbcpm;
 	}
-
 	/* devices in PAV groups */
 	list_for_each_entry_safe(pavgroup, tempgroup,
 				 &private->lcu->grouplist, group) {
 		list_for_each_entry_safe(dev, n, &pavgroup->baselist,
 					 alias_list) {
-			rc = dasd_eckd_cuir_change_state(dev, lpum);
-			if (rc)
-				goto out;
+			spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+			tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+			spin_unlock_irqrestore(
+				get_ccwdev_lock(dev->cdev), flags);
+			if (tbcpm < 0)
+				goto out_err;
+			paths |= tbcpm;
 		}
 		list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
 					 alias_list) {
-			rc = dasd_eckd_cuir_change_state(dev, lpum);
-			if (rc)
-				goto out;
+			spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
+			tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
+			spin_unlock_irqrestore(
+				get_ccwdev_lock(dev->cdev), flags);
+			if (tbcpm < 0)
+				goto out_err;
+			paths |= tbcpm;
 		}
 	}
-
-	pr_warn("Service on the storage server caused path %x.%02x to go offline",
-		sch_id.cssid, desc ? desc->chpid : 0);
-	rc = PSF_CUIR_COMPLETED;
-out:
-	return rc;
+	/* notify user about all paths affected by CUIR action */
+	dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_QUIESCE);
+	return 0;
+out_err:
+	return tbcpm;
 }
 
 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
-				 struct channel_path_desc *desc,
-				 struct subchannel_id sch_id)
+				 struct subchannel_id sch_id,
+				 struct dasd_cuir_message *cuir)
 {
 	struct alias_pav_group *pavgroup, *tempgroup;
 	struct dasd_eckd_private *private;
 	struct dasd_device *dev, *n;
+	unsigned long paths = 0;
+	int tbcpm;
 
-	pr_info("Path %x.%02x is back online after service on the storage server",
-		sch_id.cssid, desc ? desc->chpid : 0);
 	private = (struct dasd_eckd_private *) device->private;
-
 	/*
 	 * the path may have been added through a generic path event before
 	 * only trigger path verification if the path is not already in use
 	 */
-
 	list_for_each_entry_safe(dev, n,
 				 &private->lcu->active_devices,
 				 alias_list) {
-		if (!(dev->path_data.opm & lpum)) {
-			dev->path_data.tbvpm |= lpum;
+		tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+		paths |= tbcpm;
+		if (!(dev->path_data.opm & tbcpm)) {
+			dev->path_data.tbvpm |= tbcpm;
 			dasd_schedule_device_bh(dev);
 		}
 	}
-
 	list_for_each_entry_safe(dev, n,
 				 &private->lcu->inactive_devices,
 				 alias_list) {
-		if (!(dev->path_data.opm & lpum)) {
-			dev->path_data.tbvpm |= lpum;
+		tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+		paths |= tbcpm;
+		if (!(dev->path_data.opm & tbcpm)) {
+			dev->path_data.tbvpm |= tbcpm;
 			dasd_schedule_device_bh(dev);
 		}
 	}
-
 	/* devices in PAV groups */
 	list_for_each_entry_safe(pavgroup, tempgroup,
 				 &private->lcu->grouplist,
@@ -4744,21 +4901,27 @@
 		list_for_each_entry_safe(dev, n,
 					 &pavgroup->baselist,
 					 alias_list) {
-			if (!(dev->path_data.opm & lpum)) {
-				dev->path_data.tbvpm |= lpum;
+			tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+			paths |= tbcpm;
+			if (!(dev->path_data.opm & tbcpm)) {
+				dev->path_data.tbvpm |= tbcpm;
 				dasd_schedule_device_bh(dev);
 			}
 		}
 		list_for_each_entry_safe(dev, n,
 					 &pavgroup->aliaslist,
 					 alias_list) {
-			if (!(dev->path_data.opm & lpum)) {
-				dev->path_data.tbvpm |= lpum;
+			tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
+			paths |= tbcpm;
+			if (!(dev->path_data.opm & tbcpm)) {
+				dev->path_data.tbvpm |= tbcpm;
 				dasd_schedule_device_bh(dev);
 			}
 		}
 	}
-	return PSF_CUIR_COMPLETED;
+	/* notify user about all paths affected by CUIR action */
+	dasd_eckd_cuir_notify_user(device, paths, sch_id, CUIR_RESUME);
+	return 0;
 }
 
 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
@@ -4768,8 +4931,12 @@
 	struct channel_path_desc *desc;
 	struct subchannel_id sch_id;
 	int pos, response;
-	ccw_device_get_schid(device->cdev, &sch_id);
 
+	DBF_DEV_EVENT(DBF_WARNING, device,
+		      "CUIR request: %016llx %016llx %016llx %08x",
+		      ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
+		      ((u32 *)cuir)[3]);
+	ccw_device_get_schid(device->cdev, &sch_id);
 	/* get position of path in mask */
 	pos = 8 - ffs(lpum);
 	/* get channel path descriptor from this position */
@@ -4777,18 +4944,26 @@
 
 	if (cuir->code == CUIR_QUIESCE) {
 		/* quiesce */
-		response = dasd_eckd_cuir_quiesce(device, lpum, desc, sch_id);
+		if (dasd_eckd_cuir_quiesce(device, lpum, sch_id, cuir))
+			response = PSF_CUIR_LAST_PATH;
+		else
+			response = PSF_CUIR_COMPLETED;
 	} else if (cuir->code == CUIR_RESUME) {
 		/* resume */
-		response = dasd_eckd_cuir_resume(device, lpum, desc, sch_id);
+		dasd_eckd_cuir_resume(device, lpum, sch_id, cuir);
+		response = PSF_CUIR_COMPLETED;
 	} else
 		response = PSF_CUIR_NOT_SUPPORTED;
 
-	dasd_eckd_psf_cuir_response(device, response, cuir->message_id,
-				    desc, sch_id);
-
+	dasd_eckd_psf_cuir_response(device, response,
+				    cuir->message_id, desc, sch_id);
+	DBF_DEV_EVENT(DBF_WARNING, device,
+		      "CUIR response: %d on message ID %08x", response,
+		      cuir->message_id);
 	/* free descriptor copy */
 	kfree(desc);
+	/* to make sure there is no attention left schedule work again */
+	device->discipline->check_attention(device, lpum);
 }
 
 static void dasd_eckd_check_attention_work(struct work_struct *work)
@@ -4800,22 +4975,18 @@
 
 	data = container_of(work, struct check_attention_work_data, worker);
 	device = data->device;
-
 	messages = kzalloc(sizeof(*messages), GFP_KERNEL);
 	if (!messages) {
 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
 			      "Could not allocate attention message buffer");
 		goto out;
 	}
-
 	rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
 	if (rc)
 		goto out;
-
 	if (messages->length == ATTENTION_LENGTH_CUIR &&
 	    messages->format == ATTENTION_FORMAT_CUIR)
 		dasd_eckd_handle_cuir(device, messages, data->lpum);
-
 out:
 	dasd_put_device(device);
 	kfree(messages);
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index ddab7df..f8f91ee 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -355,7 +355,8 @@
 		__u8 identifier:2;
 		__u8 reserved:6;
 	} __attribute__ ((packed)) flags;
-	__u8 reserved[5];
+	__u8 record_selector;
+	__u8 reserved[4];
 	struct {
 		__u8 value:2;
 		__u8 number:6;
@@ -492,10 +493,18 @@
 	struct dasd_device *next;
 };
 
+struct dasd_conf_data {
+	struct dasd_ned neds[5];
+	u8 reserved[64];
+	struct dasd_gneq gneq;
+} __packed;
+
 struct dasd_eckd_private {
 	struct dasd_eckd_characteristics rdc_data;
 	u8 *conf_data;
 	int conf_len;
+	/* per path configuration data */
+	struct dasd_conf_data *path_conf_data[8];
 	/* pointers to specific parts in the conf_data */
 	struct dasd_ned *ned;
 	struct dasd_sneq *sneq;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 227e3de..4aed5ed 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -534,6 +534,7 @@
 #define DASD_FLAG_SAFE_OFFLINE	10	/* safe offline processing requested*/
 #define DASD_FLAG_SAFE_OFFLINE_RUNNING	11	/* safe offline running */
 #define DASD_FLAG_ABORTALL	12	/* Abort all noretry requests */
+#define DASD_FLAG_PATH_VERIFY	13	/* Path verification worker running */
 
 #define DASD_SLEEPON_START_TAG	((void *) 1)
 #define DASD_SLEEPON_END_TAG	((void *) 2)
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index da21281..2b744fb 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -548,10 +548,10 @@
 	 */
 	num_of_segments = 0;
 	for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) {
-		for (j = i; (buf[j] != ':') &&
+		for (j = i; j < count &&
+			(buf[j] != ':') &&
 			(buf[j] != '\0') &&
-			(buf[j] != '\n') &&
-			j < count; j++) {
+			(buf[j] != '\n'); j++) {
 			local_buf[j-i] = toupper(buf[j]);
 		}
 		local_buf[j-i] = '\0';
@@ -723,7 +723,7 @@
 	/*
 	 * parse input
 	 */
-	for (i = 0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i < count); i++) {
+	for (i = 0; (i < count && (*(buf+i)!='\0') && (*(buf+i)!='\n')); i++) {
 		local_buf[i] = toupper(buf[i]);
 	}
 	local_buf[i] = '\0';
@@ -826,6 +826,8 @@
 	unsigned long source_addr;
 	unsigned long bytes_done;
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	bytes_done = 0;
 	dev_info = bio->bi_bdev->bd_disk->private_data;
 	if (dev_info == NULL)
@@ -871,7 +873,7 @@
 		}
 		bytes_done += bvec.bv_len;
 	}
-	bio_endio(bio, 0);
+	bio_endio(bio);
 	return;
 fail:
 	bio_io_error(bio);
@@ -904,10 +906,10 @@
 
 	for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
 	     i++) {
-		for (j = i; (dcssblk_segments[j] != ',')  &&
+		for (j = i; (j < DCSSBLK_PARM_LEN) &&
+			    (dcssblk_segments[j] != ',')  &&
 			    (dcssblk_segments[j] != '\0') &&
-			    (dcssblk_segments[j] != '(')  &&
-			    (j < DCSSBLK_PARM_LEN); j++)
+			    (dcssblk_segments[j] != '('); j++)
 		{
 			buf[j-i] = dcssblk_segments[j];
 		}
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 7d4e939..02871f1 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -190,6 +190,8 @@
 	unsigned long page_addr;
 	unsigned long bytes;
 
+	blk_queue_split(q, &bio, q->bio_split);
+
 	if ((bio->bi_iter.bi_sector & 7) != 0 ||
 	    (bio->bi_iter.bi_size & 4095) != 0)
 		/* Request is not page-aligned. */
@@ -220,8 +222,7 @@
 			index++;
 		}
 	}
-	set_bit(BIO_UPTODATE, &bio->bi_flags);
-	bio_endio(bio, 0);
+	bio_endio(bio);
 	return;
 fail:
 	bio_io_error(bio);
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 75ffe99..7c511ad 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -413,6 +413,10 @@
 		else
 			/* Normal end. Copy residual count. */
 			rq->rescnt = irb->scsw.cmd.count;
+	} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+		/* Interrupt without an outstanding request -> update all */
+		cp->update_flags = CON_UPDATE_ALL;
+		con3270_set_timer(cp, 1);
 	}
 	return RAW3270_IO_DONE;
 }
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index 8de2deb..f7d9258 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -14,15 +14,21 @@
 #include "ctrlchar.h"
 
 #ifdef CONFIG_MAGIC_SYSRQ
-static int ctrlchar_sysrq_key;
+static struct sysrq_work ctrlchar_sysrq;
 
 static void
 ctrlchar_handle_sysrq(struct work_struct *work)
 {
-	handle_sysrq(ctrlchar_sysrq_key);
+	struct sysrq_work *sysrq = container_of(work, struct sysrq_work, work);
+
+	handle_sysrq(sysrq->key);
 }
 
-static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
+void schedule_sysrq_work(struct sysrq_work *sw)
+{
+	INIT_WORK(&sw->work, ctrlchar_handle_sysrq);
+	schedule_work(&sw->work);
+}
 #endif
 
 
@@ -51,8 +57,8 @@
 #ifdef CONFIG_MAGIC_SYSRQ
 	/* racy */
 	if (len == 3 && buf[1] == '-') {
-		ctrlchar_sysrq_key = buf[2];
-		schedule_work(&ctrlchar_work);
+		ctrlchar_sysrq.key = buf[2];
+		schedule_sysrq_work(&ctrlchar_sysrq);
 		return CTRLCHAR_SYSRQ;
 	}
 #endif
diff --git a/drivers/s390/char/ctrlchar.h b/drivers/s390/char/ctrlchar.h
index 1a53552..59c2d6e 100644
--- a/drivers/s390/char/ctrlchar.h
+++ b/drivers/s390/char/ctrlchar.h
@@ -7,6 +7,8 @@
  */
 
 #include <linux/tty.h>
+#include <linux/sysrq.h>
+#include <linux/workqueue.h>
 
 extern unsigned int
 ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty);
@@ -17,3 +19,13 @@
 #define CTRLCHAR_SYSRQ (3 << 8)
 
 #define CTRLCHAR_MASK (~0xffu)
+
+
+#ifdef CONFIG_MAGIC_SYSRQ
+struct sysrq_work {
+	int key;
+	struct work_struct work;
+};
+
+void schedule_sysrq_work(struct sysrq_work *sw);
+#endif
diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c
index 9388963..12db8db 100644
--- a/drivers/s390/char/diag_ftp.c
+++ b/drivers/s390/char/diag_ftp.c
@@ -223,7 +223,7 @@
 	if (rc)
 		return rc;
 
-	ctl_set_bit(0, 63 - 22);
+	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
 	return 0;
 }
 
@@ -232,6 +232,6 @@
  */
 void diag_ftp_shutdown(void)
 {
-	ctl_clear_bit(0, 63 - 22);
+	irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
 	unregister_external_irq(EXT_IRQ_CP_SERVICE, diag_ftp_handler);
 }
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 0da3ae3..b7d6030 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -95,7 +95,7 @@
 		if (ascii_name[i] == '\0')
 			break;
 		ebcdic_name[i] = toupper(ascii_name[i]);
-	};
+	}
 	for (; i < 8; i++)
 		ebcdic_name[i] = ' ';
 	ASCEBC(ebcdic_name, 8);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 5e20513..f58bf4c 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -53,7 +53,7 @@
 /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
 int sclp_console_pages = SCLP_CONSOLE_PAGES;
 /* Flag to indicate if buffer pages are dropped on buffer full condition */
-int sclp_console_drop = 0;
+int sclp_console_drop = 1;
 /* Number of times the console dropped buffer pages */
 unsigned long sclp_console_full;
 
@@ -79,8 +79,8 @@
 	int drop, rc;
 
 	rc = kstrtoint(str, 0, &drop);
-	if (!rc && drop)
-		sclp_console_drop = 1;
+	if (!rc)
+		sclp_console_drop = drop;
 	return 1;
 }
 
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index e9485fb..806239c 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -25,6 +25,7 @@
 #include <asm/setup.h>
 #include <asm/page.h>
 #include <asm/sclp.h>
+#include <asm/numa.h>
 
 #include "sclp.h"
 
@@ -388,11 +389,11 @@
 };
 
 static void __init align_to_block_size(unsigned long long *start,
-				       unsigned long long *size)
+				       unsigned long long *size,
+				       unsigned long long alignment)
 {
-	unsigned long long start_align, size_align, alignment;
+	unsigned long long start_align, size_align;
 
-	alignment = memory_block_size_bytes();
 	start_align = roundup(*start, alignment);
 	size_align = rounddown(*start + *size, alignment) - start_align;
 
@@ -404,8 +405,8 @@
 
 static void __init add_memory_merged(u16 rn)
 {
+	unsigned long long start, size, addr, block_size;
 	static u16 first_rn, num;
-	unsigned long long start, size;
 
 	if (rn && first_rn && (first_rn + num == rn)) {
 		num++;
@@ -423,9 +424,12 @@
 		goto skip_add;
 	if (memory_end_set && (start + size > memory_end))
 		size = memory_end - start;
-	align_to_block_size(&start, &size);
-	if (size)
-		add_memory(0, start, size);
+	block_size = memory_block_size_bytes();
+	align_to_block_size(&start, &size, block_size);
+	if (!size)
+		goto skip_add;
+	for (addr = start; addr < start + size; addr += block_size)
+		add_memory(numa_pfn_to_nid(PFN_DOWN(addr)), addr, block_size);
 skip_add:
 	first_rn = rn;
 	num = 1;
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index ae67386..68d6ee7 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -12,6 +12,7 @@
 #include <linux/wait.h>
 #include <linux/timer.h>
 #include <linux/kernel.h>
+#include <linux/sysrq.h>
 #include <linux/tty.h>
 #include <linux/tty_driver.h>
 #include <linux/tty_flip.h>
@@ -27,6 +28,7 @@
 
 #include <asm/uaccess.h>
 #include "sclp.h"
+#include "ctrlchar.h"
 
 #define SCLP_VT220_MAJOR		TTY_MAJOR
 #define SCLP_VT220_MINOR		65
@@ -477,6 +479,53 @@
 #define	SCLP_VT220_SESSION_STARTED	0x80
 #define SCLP_VT220_SESSION_DATA		0x00
 
+#ifdef CONFIG_MAGIC_SYSRQ
+
+static int sysrq_pressed;
+static struct sysrq_work sysrq;
+
+static void sclp_vt220_reset_session(void)
+{
+	sysrq_pressed = 0;
+}
+
+static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		/* Handle magic sys request */
+		if (buffer[i] == ('O' ^ 0100)) { /* CTRL-O */
+			/*
+			 * If pressed again, reset sysrq_pressed
+			 * and flip CTRL-O character
+			 */
+			sysrq_pressed = !sysrq_pressed;
+			if (sysrq_pressed)
+				continue;
+		} else if (sysrq_pressed) {
+			sysrq.key = buffer[i];
+			schedule_sysrq_work(&sysrq);
+			sysrq_pressed = 0;
+			continue;
+		}
+		tty_insert_flip_char(&sclp_vt220_port, buffer[i], 0);
+	}
+}
+
+#else
+
+static void sclp_vt220_reset_session(void)
+{
+}
+
+static void sclp_vt220_handle_input(const char *buffer, unsigned int count)
+{
+	tty_insert_flip_string(&sclp_vt220_port, buffer, count);
+}
+
+#endif
+
 /*
  * Called by the SCLP to report incoming event buffers.
  */
@@ -492,12 +541,13 @@
 	switch (*buffer) {
 	case SCLP_VT220_SESSION_ENDED:
 	case SCLP_VT220_SESSION_STARTED:
+		sclp_vt220_reset_session();
 		break;
 	case SCLP_VT220_SESSION_DATA:
 		/* Send input to line discipline */
 		buffer++;
 		count--;
-		tty_insert_flip_string(&sclp_vt220_port, buffer, count);
+		sclp_vt220_handle_input(buffer, count);
 		tty_flip_buffer_push(&sclp_vt220_port);
 		break;
 	}
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index e91b89d..e96fc7f 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -659,6 +659,10 @@
 		else
 			/* Normal end. Copy residual count. */
 			rq->rescnt = irb->scsw.cmd.count;
+	} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
+		/* Interrupt without an outstanding request -> update all */
+		tp->update_flags = TTY_UPDATE_ALL;
+		tty3270_set_timer(tp, 1);
 	}
 	return RAW3270_IO_DONE;
 }
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index e3bf885..548a189 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -21,6 +21,7 @@
 #include <asm/chsc.h>
 #include <asm/crw.h>
 #include <asm/isc.h>
+#include <asm/ebcdic.h>
 
 #include "css.h"
 #include "cio.h"
@@ -272,36 +273,6 @@
 	css_schedule_reprobe();
 }
 
-static int
-__get_chpid_from_lir(void *data)
-{
-	struct lir {
-		u8  iq;
-		u8  ic;
-		u16 sci;
-		/* incident-node descriptor */
-		u32 indesc[28];
-		/* attached-node descriptor */
-		u32 andesc[28];
-		/* incident-specific information */
-		u32 isinfo[28];
-	} __attribute__ ((packed)) *lir;
-
-	lir = data;
-	if (!(lir->iq&0x80))
-		/* NULL link incident record */
-		return -EINVAL;
-	if (!(lir->indesc[0]&0xc0000000))
-		/* node descriptor not valid */
-		return -EINVAL;
-	if (!(lir->indesc[0]&0x10000000))
-		/* don't handle device-type nodes - FIXME */
-		return -EINVAL;
-	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
-
-	return (u16) (lir->indesc[0]&0x000000ff);
-}
-
 struct chsc_sei_nt0_area {
 	u8  flags;
 	u8  vf;				/* validity flags */
@@ -341,22 +312,132 @@
 	} u;
 } __packed;
 
+/*
+ * Node Descriptor as defined in SA22-7204, "Common I/O-Device Commands"
+ */
+
+#define ND_VALIDITY_VALID	0
+#define ND_VALIDITY_OUTDATED	1
+#define ND_VALIDITY_INVALID	2
+
+struct node_descriptor {
+	/* Flags. */
+	union {
+		struct {
+			u32 validity:3;
+			u32 reserved:5;
+		} __packed;
+		u8 byte0;
+	} __packed;
+
+	/* Node parameters. */
+	u32 params:24;
+
+	/* Node ID. */
+	char type[6];
+	char model[3];
+	char manufacturer[3];
+	char plant[2];
+	char seq[12];
+	u16 tag;
+} __packed;
+
+/*
+ * Link Incident Record as defined in SA22-7202, "ESCON I/O Interface"
+ */
+
+#define LIR_IQ_CLASS_INFO		0
+#define LIR_IQ_CLASS_DEGRADED		1
+#define LIR_IQ_CLASS_NOT_OPERATIONAL	2
+
+struct lir {
+	struct {
+		u32 null:1;
+		u32 reserved:3;
+		u32 class:2;
+		u32 reserved2:2;
+	} __packed iq;
+	u32 ic:8;
+	u32 reserved:16;
+	struct node_descriptor incident_node;
+	struct node_descriptor attached_node;
+	u8 reserved2[32];
+} __packed;
+
+#define PARAMS_LEN	10	/* PARAMS=xx,xxxxxx */
+#define NODEID_LEN	35	/* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
+
+/* Copy EBCIDC text, convert to ASCII and optionally add delimiter. */
+static char *store_ebcdic(char *dest, const char *src, unsigned long len,
+			  char delim)
+{
+	memcpy(dest, src, len);
+	EBCASC(dest, len);
+
+	if (delim)
+		dest[len++] = delim;
+
+	return dest + len;
+}
+
+/* Format node ID and parameters for output in LIR log message. */
+static void format_node_data(char *params, char *id, struct node_descriptor *nd)
+{
+	memset(params, 0, PARAMS_LEN);
+	memset(id, 0, NODEID_LEN);
+
+	if (nd->validity != ND_VALIDITY_VALID) {
+		strncpy(params, "n/a", PARAMS_LEN - 1);
+		strncpy(id, "n/a", NODEID_LEN - 1);
+		return;
+	}
+
+	/* PARAMS=xx,xxxxxx */
+	snprintf(params, PARAMS_LEN, "%02x,%06x", nd->byte0, nd->params);
+	/* NODEID=tttttt/mdl,mmm.ppssssssssssss,xxxx */
+	id = store_ebcdic(id, nd->type, sizeof(nd->type), '/');
+	id = store_ebcdic(id, nd->model, sizeof(nd->model), ',');
+	id = store_ebcdic(id, nd->manufacturer, sizeof(nd->manufacturer), '.');
+	id = store_ebcdic(id, nd->plant, sizeof(nd->plant), 0);
+	id = store_ebcdic(id, nd->seq, sizeof(nd->seq), ',');
+	sprintf(id, "%04X", nd->tag);
+}
+
 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
 {
-	struct chp_id chpid;
-	int id;
+	struct lir *lir = (struct lir *) &sei_area->ccdf;
+	char iuparams[PARAMS_LEN], iunodeid[NODEID_LEN], auparams[PARAMS_LEN],
+	     aunodeid[NODEID_LEN];
 
-	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
-		      sei_area->rs, sei_area->rsid);
-	if (sei_area->rs != 4)
+	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x, iq=%02x)\n",
+		      sei_area->rs, sei_area->rsid, sei_area->ccdf[0]);
+
+	/* Ignore NULL Link Incident Records. */
+	if (lir->iq.null)
 		return;
-	id = __get_chpid_from_lir(sei_area->ccdf);
-	if (id < 0)
-		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
-	else {
-		chp_id_init(&chpid);
-		chpid.id = id;
-		chsc_chp_offline(chpid);
+
+	/* Inform user that a link requires maintenance actions because it has
+	 * become degraded or not operational. Note that this log message is
+	 * the primary intention behind a Link Incident Record. */
+
+	format_node_data(iuparams, iunodeid, &lir->incident_node);
+	format_node_data(auparams, aunodeid, &lir->attached_node);
+
+	switch (lir->iq.class) {
+	case LIR_IQ_CLASS_DEGRADED:
+		pr_warn("Link degraded: RS=%02x RSID=%04x IC=%02x "
+			"IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
+			sei_area->rs, sei_area->rsid, lir->ic, iuparams,
+			iunodeid, auparams, aunodeid);
+		break;
+	case LIR_IQ_CLASS_NOT_OPERATIONAL:
+		pr_err("Link stopped: RS=%02x RSID=%04x IC=%02x "
+		       "IUPARAMS=%s IUNODEID=%s AUPARAMS=%s AUNODEID=%s\n",
+		       sei_area->rs, sei_area->rsid, lir->ic, iuparams,
+		       iunodeid, auparams, aunodeid);
+		break;
+	default:
+		break;
 	}
 }
 
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index f3c4179..6acd0b5 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -540,7 +540,7 @@
 	if (rc)
 		goto out_unlock;
 	/* Perform operation. */
-	cdev->private->state = DEV_STATE_STEAL_LOCK,
+	cdev->private->state = DEV_STATE_STEAL_LOCK;
 	ccw_device_stlck_start(cdev, &data, &buffer[0], &buffer[32]);
 	spin_unlock_irq(sch->lock);
 	/* Wait for operation to finish. */
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index bee8c11..b3f44bc 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -336,7 +336,6 @@
 {
 	struct eadm_private *private;
 	unsigned long flags;
-	int ret = 0;
 
 	spin_lock_irqsave(sch->lock, flags);
 	if (!device_is_registered(&sch->dev))
@@ -356,7 +355,7 @@
 out_unlock:
 	spin_unlock_irqrestore(sch->lock, flags);
 
-	return ret;
+	return 0;
 }
 
 static struct css_device_id eadm_subchannel_ids[] = {
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 559a9dc..d78b3d6 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1372,7 +1372,7 @@
 
 	/* Wait for the test message to complete. */
 	for (i = 0; i < 6; i++) {
-		mdelay(300);
+		msleep(300);
 		status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
 		if (status.response_code == AP_RESPONSE_NORMAL &&
 		    psmid == 0x0102030405060708ULL)
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index 899ffa1..f418527 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -182,7 +182,7 @@
 
 	/* Wait for the test message to complete. */
 	for (i = 0; i < 6; i++) {
-		mdelay(300);
+		msleep(300);
 		rc = ap_recv(ap_dev->qid, &psmid, reply, 4096);
 		if (rc == 0 && psmid == 0x0102030405060708ULL)
 			break;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2e65b98..a855669 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -390,10 +390,8 @@
 	return rc;
 }
 
-static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
+static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
 {
-	int rc = 0;
-
 	QETH_DBF_TEXT(SETUP , 2, "stopcard");
 	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
 
@@ -427,7 +425,6 @@
 		qeth_clear_cmd_buffers(&card->read);
 		qeth_clear_cmd_buffers(&card->write);
 	}
-	return rc;
 }
 
 static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 70eb2f61..a1aaa36 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2158,10 +2158,8 @@
 	return card ;
 }
 
-static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
+static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
 {
-	int rc = 0;
-
 	QETH_DBF_TEXT(SETUP, 2, "stopcard");
 	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
 
@@ -2196,7 +2194,6 @@
 		qeth_clear_cmd_buffers(&card->read);
 		qeth_clear_cmd_buffers(&card->write);
 	}
-	return rc;
 }
 
 /*
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 01a7339..c00ac46 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -529,7 +529,7 @@
 	list_add_tail(&port->list, &adapter->port_list);
 	write_unlock_irq(&adapter->port_list_lock);
 
-	atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
+	atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
 
 	return port;
 
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index acde3f5..3fb4109 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -190,7 +190,7 @@
 		if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
 			if (scsi_device_get(sdev))
 				return NULL;
-		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
 				&zfcp_sdev->status);
 		erp_action = &zfcp_sdev->erp_action;
 		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
@@ -206,7 +206,7 @@
 		if (!get_device(&port->dev))
 			return NULL;
 		zfcp_erp_action_dismiss_port(port);
-		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
+		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
 		erp_action = &port->erp_action;
 		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
 		erp_action->port = port;
@@ -217,7 +217,7 @@
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
 		kref_get(&adapter->ref);
 		zfcp_erp_action_dismiss_adapter(adapter);
-		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
+		atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
 		erp_action = &adapter->erp_action;
 		memset(erp_action, 0, sizeof(struct zfcp_erp_action));
 		if (!(atomic_read(&adapter->status) &
@@ -254,7 +254,7 @@
 	act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
 	if (!act)
 		goto out;
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
+	atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
 	++adapter->erp_total_count;
 	list_add_tail(&act->list, &adapter->erp_ready_head);
 	wake_up(&adapter->erp_ready_wq);
@@ -486,14 +486,14 @@
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
 		zfcp_dbf_rec_run("eraubl1", &adapter->erp_action);
-	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
+	atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
 }
 
 static void zfcp_erp_port_unblock(struct zfcp_port *port)
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
 		zfcp_dbf_rec_run("erpubl1", &port->erp_action);
-	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
+	atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
 }
 
 static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
@@ -502,7 +502,7 @@
 
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
 		zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action);
-	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
+	atomic_or(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
 }
 
 static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
@@ -642,7 +642,7 @@
 	read_lock_irqsave(&adapter->erp_lock, flags);
 	if (list_empty(&adapter->erp_ready_head) &&
 	    list_empty(&adapter->erp_running_head)) {
-			atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
+			atomic_andnot(ZFCP_STATUS_ADAPTER_ERP_PENDING,
 					  &adapter->status);
 			wake_up(&adapter->erp_done_wqh);
 	}
@@ -665,16 +665,16 @@
 	int sleep = 1;
 	struct zfcp_adapter *adapter = erp_action->adapter;
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
+	atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK, &adapter->status);
 
 	for (retries = 7; retries; retries--) {
-		atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+		atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
 				  &adapter->status);
 		write_lock_irq(&adapter->erp_lock);
 		zfcp_erp_action_to_running(erp_action);
 		write_unlock_irq(&adapter->erp_lock);
 		if (zfcp_fsf_exchange_config_data(erp_action)) {
-			atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+			atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
 					  &adapter->status);
 			return ZFCP_ERP_FAILED;
 		}
@@ -692,7 +692,7 @@
 		sleep *= 2;
 	}
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+	atomic_andnot(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
 			  &adapter->status);
 
 	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_XCONFIG_OK))
@@ -764,7 +764,7 @@
 	/* all ports and LUNs are closed */
 	zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+	atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
 			  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
 }
 
@@ -773,7 +773,7 @@
 	struct zfcp_adapter *adapter = act->adapter;
 
 	if (zfcp_qdio_open(adapter->qdio)) {
-		atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
+		atomic_andnot(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
 				  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED,
 				  &adapter->status);
 		return ZFCP_ERP_FAILED;
@@ -784,7 +784,7 @@
 		return ZFCP_ERP_FAILED;
 	}
 
-	atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
+	atomic_or(ZFCP_STATUS_COMMON_OPEN, &adapter->status);
 
 	return ZFCP_ERP_SUCCEEDED;
 }
@@ -948,7 +948,7 @@
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
-	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED,
+	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED,
 			  &zfcp_sdev->status);
 }
 
@@ -1187,18 +1187,18 @@
 	switch (erp_action->action) {
 	case ZFCP_ERP_ACTION_REOPEN_LUN:
 		zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
-		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+		atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
 				  &zfcp_sdev->status);
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
-		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+		atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
 				  &erp_action->port->status);
 		break;
 
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
-		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+		atomic_andnot(ZFCP_STATUS_COMMON_ERP_INUSE,
 				  &erp_action->adapter->status);
 		break;
 	}
@@ -1422,19 +1422,19 @@
 	unsigned long flags;
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 
-	atomic_set_mask(mask, &adapter->status);
+	atomic_or(mask, &adapter->status);
 
 	if (!common_mask)
 		return;
 
 	read_lock_irqsave(&adapter->port_list_lock, flags);
 	list_for_each_entry(port, &adapter->port_list, list)
-		atomic_set_mask(common_mask, &port->status);
+		atomic_or(common_mask, &port->status);
 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
 	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, adapter->scsi_host)
-		atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+		atomic_or(common_mask, &sdev_to_zfcp(sdev)->status);
 	spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
 }
 
@@ -1453,7 +1453,7 @@
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 	u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
 
-	atomic_clear_mask(mask, &adapter->status);
+	atomic_andnot(mask, &adapter->status);
 
 	if (!common_mask)
 		return;
@@ -1463,7 +1463,7 @@
 
 	read_lock_irqsave(&adapter->port_list_lock, flags);
 	list_for_each_entry(port, &adapter->port_list, list) {
-		atomic_clear_mask(common_mask, &port->status);
+		atomic_andnot(common_mask, &port->status);
 		if (clear_counter)
 			atomic_set(&port->erp_counter, 0);
 	}
@@ -1471,7 +1471,7 @@
 
 	spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, adapter->scsi_host) {
-		atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+		atomic_andnot(common_mask, &sdev_to_zfcp(sdev)->status);
 		if (clear_counter)
 			atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
 	}
@@ -1491,7 +1491,7 @@
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 	unsigned long flags;
 
-	atomic_set_mask(mask, &port->status);
+	atomic_or(mask, &port->status);
 
 	if (!common_mask)
 		return;
@@ -1499,7 +1499,7 @@
 	spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, port->adapter->scsi_host)
 		if (sdev_to_zfcp(sdev)->port == port)
-			atomic_set_mask(common_mask,
+			atomic_or(common_mask,
 					&sdev_to_zfcp(sdev)->status);
 	spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
 }
@@ -1518,7 +1518,7 @@
 	u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
 	unsigned long flags;
 
-	atomic_clear_mask(mask, &port->status);
+	atomic_andnot(mask, &port->status);
 
 	if (!common_mask)
 		return;
@@ -1529,7 +1529,7 @@
 	spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
 	__shost_for_each_device(sdev, port->adapter->scsi_host)
 		if (sdev_to_zfcp(sdev)->port == port) {
-			atomic_clear_mask(common_mask,
+			atomic_andnot(common_mask,
 					  &sdev_to_zfcp(sdev)->status);
 			if (clear_counter)
 				atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
@@ -1546,7 +1546,7 @@
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
-	atomic_set_mask(mask, &zfcp_sdev->status);
+	atomic_or(mask, &zfcp_sdev->status);
 }
 
 /**
@@ -1558,7 +1558,7 @@
 {
 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
-	atomic_clear_mask(mask, &zfcp_sdev->status);
+	atomic_andnot(mask, &zfcp_sdev->status);
 
 	if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
 		atomic_set(&zfcp_sdev->erp_counter, 0);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 25d49f3..237688a 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -508,7 +508,7 @@
 	/* port is good, unblock rport without going through erp */
 	zfcp_scsi_schedule_rport_register(port);
  out:
-	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+	atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
 	put_device(&port->dev);
 	kmem_cache_free(zfcp_fc_req_cache, fc_req);
 }
@@ -564,14 +564,14 @@
 	if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
 		goto out;
 
-	atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+	atomic_or(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
 
 	retval = zfcp_fc_adisc(port);
 	if (retval == 0)
 		return;
 
 	/* send of ADISC was not possible */
-	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+	atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
 	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
 
 out:
@@ -640,7 +640,7 @@
 	if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
 		return;
 
-	atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
+	atomic_andnot(ZFCP_STATUS_COMMON_NOESC, &port->status);
 
 	if ((port->supported_classes != 0) ||
 	    !list_empty(&port->unit_list))
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 21ec5e2..522a633 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -114,7 +114,7 @@
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
 		return;
 
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
+	atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
 
 	zfcp_scsi_schedule_rports_block(adapter);
 
@@ -204,7 +204,7 @@
 		break;
 	case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
 		zfcp_fsf_link_down_info_eval(req, NULL);
-	};
+	}
 }
 
 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
@@ -345,7 +345,7 @@
 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
 		break;
 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
+		atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
 				&adapter->status);
 		break;
 	case FSF_PROT_DUPLICATE_REQUEST_ID:
@@ -554,7 +554,7 @@
 			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
 			return;
 		}
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
 				&adapter->status);
 		break;
 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
@@ -567,7 +567,7 @@
 
 		/* avoids adapter shutdown to be able to recognize
 		 * events such as LINK UP */
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
+		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
 				&adapter->status);
 		zfcp_fsf_link_down_info_eval(req,
 			&qtcb->header.fsf_status_qual.link_down_info);
@@ -1394,9 +1394,9 @@
 		break;
 	case FSF_GOOD:
 		port->handle = header->port_handle;
-		atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
+		atomic_or(ZFCP_STATUS_COMMON_OPEN |
 				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
-		atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_BOXED,
+		atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
 		                  &port->status);
 		/* check whether D_ID has changed during open */
 		/*
@@ -1677,10 +1677,10 @@
 	case FSF_PORT_BOXED:
 		/* can't use generic zfcp_erp_modify_port_status because
 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
-		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
 		shost_for_each_device(sdev, port->adapter->scsi_host)
 			if (sdev_to_zfcp(sdev)->port == port)
-				atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
 						  &sdev_to_zfcp(sdev)->status);
 		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -1700,10 +1700,10 @@
 		/* can't use generic zfcp_erp_modify_port_status because
 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
 		 */
-		atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
+		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
 		shost_for_each_device(sdev, port->adapter->scsi_host)
 			if (sdev_to_zfcp(sdev)->port == port)
-				atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
+				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
 						  &sdev_to_zfcp(sdev)->status);
 		break;
 	}
@@ -1766,7 +1766,7 @@
 
 	zfcp_sdev = sdev_to_zfcp(sdev);
 
-	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
+	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
 			  ZFCP_STATUS_COMMON_ACCESS_BOXED,
 			  &zfcp_sdev->status);
 
@@ -1822,7 +1822,7 @@
 
 	case FSF_GOOD:
 		zfcp_sdev->lun_handle = header->lun_handle;
-		atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
+		atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
 		break;
 	}
 }
@@ -1913,7 +1913,7 @@
 		}
 		break;
 	case FSF_GOOD:
-		atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
+		atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
 		break;
 	}
 }
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 495e1cb..dbf2b547 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -349,7 +349,7 @@
 
 	/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
 	spin_lock_irq(&qdio->req_q_lock);
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
+	atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
 	spin_unlock_irq(&qdio->req_q_lock);
 
 	wake_up(&qdio->req_q_wq);
@@ -384,7 +384,7 @@
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
 		return -EIO;
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+	atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
 			  &qdio->adapter->status);
 
 	zfcp_qdio_setup_init_data(&init_data, qdio);
@@ -396,14 +396,14 @@
 		goto failed_qdio;
 
 	if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
+		atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
 				&qdio->adapter->status);
 
 	if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+		atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
 		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
 	} else {
-		atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
+		atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
 		qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
 	}
 
@@ -427,7 +427,7 @@
 	/* set index of first available SBALS / number of available SBALS */
 	qdio->req_q_idx = 0;
 	atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
+	atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
 
 	if (adapter->scsi_host) {
 		adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
@@ -499,6 +499,6 @@
 
 	rc = ccw_device_siosl(adapter->ccw_device);
 	if (!rc)
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
+		atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
 				&adapter->status);
 }
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 82abfce..a209c34 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -299,7 +299,7 @@
 	memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
 				       &pScript, GFP_KERNEL);
 	if(memory == NULL) {
-		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
+		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
 		return NULL;
 	}
 
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 456e1567..95f7a76 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -345,6 +345,7 @@
 source "drivers/scsi/bnx2i/Kconfig"
 source "drivers/scsi/bnx2fc/Kconfig"
 source "drivers/scsi/be2iscsi/Kconfig"
+source "drivers/scsi/cxlflash/Kconfig"
 
 config SGIWD93_SCSI
 	tristate "SGI WD93C93 SCSI Driver"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 91209e3..471d0879 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -102,6 +102,7 @@
 obj-$(CONFIG_SCSI_EATA)		+= eata.o
 obj-$(CONFIG_SCSI_DC395x)	+= dc395x.o
 obj-$(CONFIG_SCSI_AM53C974)	+= esp_scsi.o	am53c974.o
+obj-$(CONFIG_CXLFLASH)		+= cxlflash/
 obj-$(CONFIG_MEGARAID_LEGACY)	+= megaraid.o
 obj-$(CONFIG_MEGARAID_NEWGEN)	+= megaraid/
 obj-$(CONFIG_MEGARAID_SAS)	+= megaraid/
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index cac6b37..8086bd0 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -888,7 +888,7 @@
 	scb->sense_len = SENSE_SIZE;
 	scb->cdb_len = cmd->cmd_len;
 	if (scb->cdb_len >= IMAX_CDB) {
-		printk("max cdb length= %x\b", cmd->cmd_len);
+		printk("max cdb length= %x\n", cmd->cmd_len);
 		scb->cdb_len = IMAX_CDB;
 	}
 	scb->ident = (u8)(cmd->device->lun & 0xff) | DISC_ALLOW;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index c4829d8..64ab9ea 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -79,7 +79,7 @@
 
 static const struct ahc_hard_error_entry ahc_hard_errors[] = {
 	{ ILLHADDR,	"Illegal Host Access" },
-	{ ILLSADDR,	"Illegal Sequencer Address referrenced" },
+	{ ILLSADDR,	"Illegal Sequencer Address referenced" },
 	{ ILLOPCODE,	"Illegal Opcode in sequencer program" },
 	{ SQPARERR,	"Sequencer Parity Error" },
 	{ DPARERR,	"Data-path Parity Error" },
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 4b135cc..31e8576 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -109,6 +109,7 @@
 		if (!io_handle->addr) {
 			asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1,
 				   pci_name(asd_ha->pcidev));
+			err = -ENOMEM;
 			goto Err_unreq;
 		}
 	}
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 914c39f..6ac74fb 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -3264,7 +3264,7 @@
 		reg->doneq_index = 0;
 		writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
 		if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
-			printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
+			printk(KERN_NOTICE "arcmsr%d: cannot set driver mode\n", \
 				acb->host->host_no);
 			return 1;
 		}
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 7223b00..8367c11 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -851,6 +851,8 @@
 
 	if (bfad_im_scsi_vport_transport_template)
 		fc_release_transport(bfad_im_scsi_vport_transport_template);
+
+	idr_destroy(&bfad_im_port_index);
 }
 
 void
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 98d06d1..d5cdc47 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2051,9 +2051,49 @@
 	return rc;
 }
 
+static uint bnx2fc_npiv_create_vports(struct fc_lport *lport,
+				      struct cnic_fc_npiv_tbl *npiv_tbl)
+{
+	struct fc_vport_identifiers vpid;
+	uint i, created = 0;
+
+	if (npiv_tbl->count > MAX_NPIV_ENTRIES) {
+		BNX2FC_HBA_DBG(lport, "Exceeded count max of npiv table\n");
+		goto done;
+	}
+
+	/* Sanity check the first entry to make sure it's not 0 */
+	if (wwn_to_u64(npiv_tbl->wwnn[0]) == 0 &&
+	    wwn_to_u64(npiv_tbl->wwpn[0]) == 0) {
+		BNX2FC_HBA_DBG(lport, "First NPIV table entries invalid.\n");
+		goto done;
+	}
+
+	vpid.roles = FC_PORT_ROLE_FCP_INITIATOR;
+	vpid.vport_type = FC_PORTTYPE_NPIV;
+	vpid.disable = false;
+
+	for (i = 0; i < npiv_tbl->count; i++) {
+		vpid.node_name = wwn_to_u64(npiv_tbl->wwnn[i]);
+		vpid.port_name = wwn_to_u64(npiv_tbl->wwpn[i]);
+		scnprintf(vpid.symbolic_name, sizeof(vpid.symbolic_name),
+		    "NPIV[%u]:%016llx-%016llx",
+		    created, vpid.port_name, vpid.node_name);
+		if (fc_vport_create(lport->host, 0, &vpid))
+			created++;
+		else
+			BNX2FC_HBA_DBG(lport, "Failed to create vport\n");
+	}
+done:
+	return created;
+}
+
 static int __bnx2fc_enable(struct fcoe_ctlr *ctlr)
 {
 	struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr);
+	struct bnx2fc_hba *hba;
+	struct cnic_fc_npiv_tbl npiv_tbl;
+	struct fc_lport *lport;
 
 	if (interface->enabled == false) {
 		if (!ctlr->lp) {
@@ -2064,6 +2104,32 @@
 			interface->enabled = true;
 		}
 	}
+
+	/* Create static NPIV ports if any are contained in NVRAM */
+	hba = interface->hba;
+	lport = ctlr->lp;
+
+	if (!hba)
+		goto done;
+
+	if (!hba->cnic)
+		goto done;
+
+	if (!lport)
+		goto done;
+
+	if (!lport->host)
+		goto done;
+
+	if (!hba->cnic->get_fc_npiv_tbl)
+		goto done;
+
+	memset(&npiv_tbl, 0, sizeof(npiv_tbl));
+	if (hba->cnic->get_fc_npiv_tbl(hba->cnic, &npiv_tbl))
+		goto done;
+
+	bnx2fc_npiv_create_vports(lport, &npiv_tbl);
+done:
 	return 0;
 }
 
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
new file mode 100644
index 0000000..c052104
--- /dev/null
+++ b/drivers/scsi/cxlflash/Kconfig
@@ -0,0 +1,11 @@
+#
+# IBM CXL-attached Flash Accelerator SCSI Driver
+#
+
+config CXLFLASH
+	tristate "Support for IBM CAPI Flash"
+	depends on PCI && SCSI && CXL && EEH
+	default m
+	help
+	  Allows CAPI Accelerated IO to Flash
+	  If unsure, say N.
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
new file mode 100644
index 0000000..9e39866
--- /dev/null
+++ b/drivers/scsi/cxlflash/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CXLFLASH) += cxlflash.o
+cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
new file mode 100644
index 0000000..1c56037
--- /dev/null
+++ b/drivers/scsi/cxlflash/common.h
@@ -0,0 +1,208 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _CXLFLASH_COMMON_H
+#define _CXLFLASH_COMMON_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+
+
+#define MAX_CONTEXT  CXLFLASH_MAX_CONTEXT       /* num contexts per afu */
+
+#define CXLFLASH_BLOCK_SIZE	4096	/* 4K blocks */
+#define CXLFLASH_MAX_XFER_SIZE	16777216	/* 16MB transfer */
+#define CXLFLASH_MAX_SECTORS	(CXLFLASH_MAX_XFER_SIZE/512)	/* SCSI wants
+								   max_sectors
+								   in units of
+								   512 byte
+								   sectors
+								*/
+
+#define NUM_RRQ_ENTRY    16     /* for master issued cmds */
+#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
+
+/* AFU command retry limit */
+#define MC_RETRY_CNT         5	/* sufficient for SCSI check and
+				   certain AFU errors */
+
+/* Command management definitions */
+#define CXLFLASH_NUM_CMDS	(2 * CXLFLASH_MAX_CMDS)	/* Must be a pow2 for
+							   alignment and more
+							   efficient array
+							   index derivation
+							 */
+
+#define CXLFLASH_MAX_CMDS               16
+#define CXLFLASH_MAX_CMDS_PER_LUN       CXLFLASH_MAX_CMDS
+
+
+static inline void check_sizes(void)
+{
+	BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_CMDS);
+}
+
+/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
+#define CMD_BUFSIZE     SIZE_4K
+
+/* flags in IOA status area for host use */
+#define B_DONE       0x01
+#define B_ERROR      0x02	/* set with B_DONE */
+#define B_TIMEOUT    0x04	/* set with B_DONE & B_ERROR */
+
+enum cxlflash_lr_state {
+	LINK_RESET_INVALID,
+	LINK_RESET_REQUIRED,
+	LINK_RESET_COMPLETE
+};
+
+enum cxlflash_init_state {
+	INIT_STATE_NONE,
+	INIT_STATE_PCI,
+	INIT_STATE_AFU,
+	INIT_STATE_SCSI
+};
+
+enum cxlflash_state {
+	STATE_NORMAL,	/* Normal running state, everything good */
+	STATE_LIMBO,	/* Limbo running state, trying to reset/recover */
+	STATE_FAILTERM	/* Failed/terminating state, error out users/threads */
+};
+
+/*
+ * Each context has its own set of resource handles that is visible
+ * only from that context.
+ */
+
+struct cxlflash_cfg {
+	struct afu *afu;
+	struct cxl_context *mcctx;
+
+	struct pci_dev *dev;
+	struct pci_device_id *dev_id;
+	struct Scsi_Host *host;
+
+	ulong cxlflash_regs_pci;
+
+	struct work_struct work_q;
+	enum cxlflash_init_state init_state;
+	enum cxlflash_lr_state lr_state;
+	int lr_port;
+
+	struct cxl_afu *cxl_afu;
+
+	struct pci_pool *cxlflash_cmd_pool;
+	struct pci_dev *parent_dev;
+
+	atomic_t recovery_threads;
+	struct mutex ctx_recovery_mutex;
+	struct mutex ctx_tbl_list_mutex;
+	struct ctx_info *ctx_tbl[MAX_CONTEXT];
+	struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
+	struct file_operations cxl_fops;
+
+	atomic_t num_user_contexts;
+
+	/* Parameters that are LUN table related */
+	int last_lun_index[CXLFLASH_NUM_FC_PORTS];
+	int promote_lun_index;
+	struct list_head lluns; /* list of llun_info structs */
+
+	wait_queue_head_t tmf_waitq;
+	bool tmf_active;
+	wait_queue_head_t limbo_waitq;
+	enum cxlflash_state state;
+};
+
+struct afu_cmd {
+	struct sisl_ioarcb rcb;	/* IOARCB (cache line aligned) */
+	struct sisl_ioasa sa;	/* IOASA must follow IOARCB */
+	spinlock_t slock;
+	struct completion cevent;
+	char *buf;		/* per command buffer */
+	struct afu *parent;
+	int slot;
+	atomic_t free;
+
+	u8 cmd_tmf:1;
+
+	/* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
+	 * However for performance reasons the IOARCB/IOASA should be
+	 * cache line aligned.
+	 */
+} __aligned(cache_line_size());
+
+struct afu {
+	/* Stuff requiring alignment go first. */
+
+	u64 rrq_entry[NUM_RRQ_ENTRY];	/* 128B RRQ */
+	/*
+	 * Command & data for AFU commands.
+	 */
+	struct afu_cmd cmd[CXLFLASH_NUM_CMDS];
+
+	/* Beware of alignment till here. Preferably introduce new
+	 * fields after this point
+	 */
+
+	/* AFU HW */
+	struct cxl_ioctl_start_work work;
+	struct cxlflash_afu_map *afu_map;	/* entire MMIO map */
+	struct sisl_host_map *host_map;		/* MC host map */
+	struct sisl_ctrl_map *ctrl_map;		/* MC control map */
+
+	ctx_hndl_t ctx_hndl;	/* master's context handle */
+	u64 *hrrq_start;
+	u64 *hrrq_end;
+	u64 *hrrq_curr;
+	bool toggle;
+	bool read_room;
+	atomic64_t room;
+	u64 hb;
+	u32 cmd_couts;		/* Number of command checkouts */
+	u32 internal_lun;	/* User-desired LUN mode for this AFU */
+
+	char version[8];
+	u64 interface_version;
+
+	struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
+
+};
+
+static inline u64 lun_to_lunid(u64 lun)
+{
+	u64 lun_id;
+
+	int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
+	return swab64(lun_id);
+}
+
+int cxlflash_send_cmd(struct afu *, struct afu_cmd *);
+void cxlflash_wait_resp(struct afu *, struct afu_cmd *);
+int cxlflash_afu_reset(struct cxlflash_cfg *);
+struct afu_cmd *cxlflash_cmd_checkout(struct afu *);
+void cxlflash_cmd_checkin(struct afu_cmd *);
+int cxlflash_afu_sync(struct afu *, ctx_hndl_t, res_hndl_t, u8);
+void cxlflash_list_init(void);
+void cxlflash_term_global_luns(void);
+void cxlflash_free_errpage(void);
+int cxlflash_ioctl(struct scsi_device *, int, void __user *);
+void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *);
+int cxlflash_mark_contexts_error(struct cxlflash_cfg *);
+void cxlflash_term_local_luns(struct cxlflash_cfg *);
+void cxlflash_restore_luntable(struct cxlflash_cfg *);
+
+#endif /* ifndef _CXLFLASH_COMMON_H */
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
new file mode 100644
index 0000000..d98ad0f
--- /dev/null
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -0,0 +1,266 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <misc/cxl.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsi_host.h>
+#include <uapi/scsi/cxlflash_ioctl.h>
+
+#include "sislite.h"
+#include "common.h"
+#include "vlun.h"
+#include "superpipe.h"
+
+/**
+ * create_local() - allocate and initialize a local LUN information structure
+ * @sdev:	SCSI device associated with LUN.
+ * @wwid:	World Wide Node Name for LUN.
+ *
+ * Return: Allocated local llun_info structure on success, NULL on failure
+ */
+static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid)
+{
+	struct llun_info *lli = NULL;
+
+	lli = kzalloc(sizeof(*lli), GFP_KERNEL);
+	if (unlikely(!lli)) {
+		pr_err("%s: could not allocate lli\n", __func__);
+		goto out;
+	}
+
+	lli->sdev = sdev;
+	lli->newly_created = true;
+	lli->host_no = sdev->host->host_no;
+	lli->in_table = false;
+
+	memcpy(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
+out:
+	return lli;
+}
+
+/**
+ * create_global() - allocate and initialize a global LUN information structure
+ * @sdev:	SCSI device associated with LUN.
+ * @wwid:	World Wide Node Name for LUN.
+ *
+ * Return: Allocated global glun_info structure on success, NULL on failure
+ */
+static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid)
+{
+	struct glun_info *gli = NULL;
+
+	gli = kzalloc(sizeof(*gli), GFP_KERNEL);
+	if (unlikely(!gli)) {
+		pr_err("%s: could not allocate gli\n", __func__);
+		goto out;
+	}
+
+	mutex_init(&gli->mutex);
+	memcpy(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
+out:
+	return gli;
+}
+
+/**
+ * refresh_local() - find and update local LUN information structure by WWID
+ * @cfg:	Internal structure associated with the host.
+ * @wwid:	WWID associated with LUN.
+ *
+ * When the LUN is found, mark it by updating it's newly_created field.
+ *
+ * Return: Found local lun_info structure on success, NULL on failure
+ * If a LUN with the WWID is found in the list, refresh it's state.
+ */
+static struct llun_info *refresh_local(struct cxlflash_cfg *cfg, u8 *wwid)
+{
+	struct llun_info *lli, *temp;
+
+	list_for_each_entry_safe(lli, temp, &cfg->lluns, list)
+		if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN)) {
+			lli->newly_created = false;
+			return lli;
+		}
+
+	return NULL;
+}
+
+/**
+ * lookup_global() - find a global LUN information structure by WWID
+ * @wwid:	WWID associated with LUN.
+ *
+ * Return: Found global lun_info structure on success, NULL on failure
+ */
+static struct glun_info *lookup_global(u8 *wwid)
+{
+	struct glun_info *gli, *temp;
+
+	list_for_each_entry_safe(gli, temp, &global.gluns, list)
+		if (!memcmp(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
+			return gli;
+
+	return NULL;
+}
+
+/**
+ * find_and_create_lun() - find or create a local LUN information structure
+ * @sdev:	SCSI device associated with LUN.
+ * @wwid:	WWID associated with LUN.
+ *
+ * The LUN is kept both in a local list (per adapter) and in a global list
+ * (across all adapters). Certain attributes of the LUN are local to the
+ * adapter (such as index, port selection mask etc.).
+ * The block allocation map is shared across all adapters (i.e. associated
+ * wih the global list). Since different attributes are associated with
+ * the per adapter and global entries, allocate two separate structures for each
+ * LUN (one local, one global).
+ *
+ * Keep a pointer back from the local to the global entry.
+ *
+ * Return: Found/Allocated local lun_info structure on success, NULL on failure
+ */
+static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
+{
+	struct llun_info *lli = NULL;
+	struct glun_info *gli = NULL;
+	struct Scsi_Host *shost = sdev->host;
+	struct cxlflash_cfg *cfg = shost_priv(shost);
+
+	mutex_lock(&global.mutex);
+	if (unlikely(!wwid))
+		goto out;
+
+	lli = refresh_local(cfg, wwid);
+	if (lli)
+		goto out;
+
+	lli = create_local(sdev, wwid);
+	if (unlikely(!lli))
+		goto out;
+
+	gli = lookup_global(wwid);
+	if (gli) {
+		lli->parent = gli;
+		list_add(&lli->list, &cfg->lluns);
+		goto out;
+	}
+
+	gli = create_global(sdev, wwid);
+	if (unlikely(!gli)) {
+		kfree(lli);
+		lli = NULL;
+		goto out;
+	}
+
+	lli->parent = gli;
+	list_add(&lli->list, &cfg->lluns);
+
+	list_add(&gli->list, &global.gluns);
+
+out:
+	mutex_unlock(&global.mutex);
+	pr_debug("%s: returning %p\n", __func__, lli);
+	return lli;
+}
+
+/**
+ * cxlflash_term_local_luns() - Delete all entries from local LUN list, free.
+ * @cfg:	Internal structure associated with the host.
+ */
+void cxlflash_term_local_luns(struct cxlflash_cfg *cfg)
+{
+	struct llun_info *lli, *temp;
+
+	mutex_lock(&global.mutex);
+	list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
+		list_del(&lli->list);
+		kfree(lli);
+	}
+	mutex_unlock(&global.mutex);
+}
+
+/**
+ * cxlflash_list_init() - initializes the global LUN list
+ */
+void cxlflash_list_init(void)
+{
+	INIT_LIST_HEAD(&global.gluns);
+	mutex_init(&global.mutex);
+	global.err_page = NULL;
+}
+
+/**
+ * cxlflash_term_global_luns() - frees resources associated with global LUN list
+ */
+void cxlflash_term_global_luns(void)
+{
+	struct glun_info *gli, *temp;
+
+	mutex_lock(&global.mutex);
+	list_for_each_entry_safe(gli, temp, &global.gluns, list) {
+		list_del(&gli->list);
+		cxlflash_ba_terminate(&gli->blka.ba_lun);
+		kfree(gli);
+	}
+	mutex_unlock(&global.mutex);
+}
+
+/**
+ * cxlflash_manage_lun() - handles LUN management activities
+ * @sdev:	SCSI device associated with LUN.
+ * @manage:	Manage ioctl data structure.
+ *
+ * This routine is used to notify the driver about a LUN's WWID and associate
+ * SCSI devices (sdev) with a global LUN instance. Additionally it serves to
+ * change a LUN's operating mode: legacy or superpipe.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int cxlflash_manage_lun(struct scsi_device *sdev,
+			struct dk_cxlflash_manage_lun *manage)
+{
+	int rc = 0;
+	struct llun_info *lli = NULL;
+	u64 flags = manage->hdr.flags;
+	u32 chan = sdev->channel;
+
+	lli = find_and_create_lun(sdev, manage->wwid);
+	pr_debug("%s: ENTER: WWID = %016llX%016llX, flags = %016llX li = %p\n",
+		 __func__, get_unaligned_le64(&manage->wwid[0]),
+		 get_unaligned_le64(&manage->wwid[8]),
+		 manage->hdr.flags, lli);
+	if (unlikely(!lli)) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	if (flags & DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE) {
+		if (lli->newly_created)
+			lli->port_sel = CHAN2PORT(chan);
+		else
+			lli->port_sel = BOTH_PORTS;
+		/* Store off lun in unpacked, AFU-friendly format */
+		lli->lun_id[chan] = lun_to_lunid(sdev->lun);
+		sdev->hostdata = lli;
+	} else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) {
+		if (lli->parent->mode != MODE_NONE)
+			rc = -EBUSY;
+		else
+			sdev->hostdata = NULL;
+	}
+
+out:
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
new file mode 100644
index 0000000..3e3ccf1
--- /dev/null
+++ b/drivers/scsi/cxlflash/main.c
@@ -0,0 +1,2494 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <asm/unaligned.h>
+
+#include <misc/cxl.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <uapi/scsi/cxlflash_ioctl.h>
+
+#include "main.h"
+#include "sislite.h"
+#include "common.h"
+
+MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
+MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
+MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
+MODULE_LICENSE("GPL");
+
+
+/**
+ * cxlflash_cmd_checkout() - checks out an AFU command
+ * @afu:	AFU to checkout from.
+ *
+ * Commands are checked out in a round-robin fashion. Note that since
+ * the command pool is larger than the hardware queue, the majority of
+ * times we will only loop once or twice before getting a command. The
+ * buffer and CDB within the command are initialized (zeroed) prior to
+ * returning.
+ *
+ * Return: The checked out command or NULL when command pool is empty.
+ */
+struct afu_cmd *cxlflash_cmd_checkout(struct afu *afu)
+{
+	int k, dec = CXLFLASH_NUM_CMDS;
+	struct afu_cmd *cmd;
+
+	while (dec--) {
+		k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
+
+		cmd = &afu->cmd[k];
+
+		if (!atomic_dec_if_positive(&cmd->free)) {
+			pr_debug("%s: returning found index=%d\n",
+				 __func__, cmd->slot);
+			memset(cmd->buf, 0, CMD_BUFSIZE);
+			memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
+			return cmd;
+		}
+	}
+
+	return NULL;
+}
+
+/**
+ * cxlflash_cmd_checkin() - checks in an AFU command
+ * @cmd:	AFU command to checkin.
+ *
+ * Safe to pass commands that have already been checked in. Several
+ * internal tracking fields are reset as part of the checkin. Note
+ * that these are intentionally reset prior to toggling the free bit
+ * to avoid clobbering values in the event that the command is checked
+ * out right away.
+ */
+void cxlflash_cmd_checkin(struct afu_cmd *cmd)
+{
+	cmd->rcb.scp = NULL;
+	cmd->rcb.timeout = 0;
+	cmd->sa.ioasc = 0;
+	cmd->cmd_tmf = false;
+	cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
+
+	if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
+		pr_err("%s: Freeing cmd (%d) that is not in use!\n",
+		       __func__, cmd->slot);
+		return;
+	}
+
+	pr_debug("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
+}
+
+/**
+ * process_cmd_err() - command error handler
+ * @cmd:	AFU command that experienced the error.
+ * @scp:	SCSI command associated with the AFU command in error.
+ *
+ * Translates error bits from AFU command to SCSI command results.
+ */
+static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
+{
+	struct sisl_ioarcb *ioarcb;
+	struct sisl_ioasa *ioasa;
+
+	if (unlikely(!cmd))
+		return;
+
+	ioarcb = &(cmd->rcb);
+	ioasa = &(cmd->sa);
+
+	if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
+		pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
+			 __func__, cmd, scp);
+		scp->result = (DID_ERROR << 16);
+	}
+
+	if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
+		pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
+			 __func__, cmd, scp);
+		scp->result = (DID_ERROR << 16);
+	}
+
+	pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
+		 "afu_extra=0x%X, scsi_entra=0x%X, fc_extra=0x%X\n",
+		 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
+		 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
+		 ioasa->fc_extra);
+
+	if (ioasa->rc.scsi_rc) {
+		/* We have a SCSI status */
+		if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
+			memcpy(scp->sense_buffer, ioasa->sense_data,
+			       SISL_SENSE_DATA_LEN);
+			scp->result = ioasa->rc.scsi_rc;
+		} else
+			scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
+	}
+
+	/*
+	 * We encountered an error. Set scp->result based on nature
+	 * of error.
+	 */
+	if (ioasa->rc.fc_rc) {
+		/* We have an FC status */
+		switch (ioasa->rc.fc_rc) {
+		case SISL_FC_RC_LINKDOWN:
+			scp->result = (DID_REQUEUE << 16);
+			break;
+		case SISL_FC_RC_RESID:
+			/* This indicates an FCP resid underrun */
+			if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
+				/* If the SISL_RC_FLAGS_OVERRUN flag was set,
+				 * then we will handle this error else where.
+				 * If not then we must handle it here.
+				 * This is probably an AFU bug. We will
+				 * attempt a retry to see if that resolves it.
+				 */
+				scp->result = (DID_ERROR << 16);
+			}
+			break;
+		case SISL_FC_RC_RESIDERR:
+			/* Resid mismatch between adapter and device */
+		case SISL_FC_RC_TGTABORT:
+		case SISL_FC_RC_ABORTOK:
+		case SISL_FC_RC_ABORTFAIL:
+		case SISL_FC_RC_NOLOGI:
+		case SISL_FC_RC_ABORTPEND:
+		case SISL_FC_RC_WRABORTPEND:
+		case SISL_FC_RC_NOEXP:
+		case SISL_FC_RC_INUSE:
+			scp->result = (DID_ERROR << 16);
+			break;
+		}
+	}
+
+	if (ioasa->rc.afu_rc) {
+		/* We have an AFU error */
+		switch (ioasa->rc.afu_rc) {
+		case SISL_AFU_RC_NO_CHANNELS:
+			scp->result = (DID_MEDIUM_ERROR << 16);
+			break;
+		case SISL_AFU_RC_DATA_DMA_ERR:
+			switch (ioasa->afu_extra) {
+			case SISL_AFU_DMA_ERR_PAGE_IN:
+				/* Retry */
+				scp->result = (DID_IMM_RETRY << 16);
+				break;
+			case SISL_AFU_DMA_ERR_INVALID_EA:
+			default:
+				scp->result = (DID_ERROR << 16);
+			}
+			break;
+		case SISL_AFU_RC_OUT_OF_DATA_BUFS:
+			/* Retry */
+			scp->result = (DID_ALLOC_FAILURE << 16);
+			break;
+		default:
+			scp->result = (DID_ERROR << 16);
+		}
+	}
+}
+
+/**
+ * cmd_complete() - command completion handler
+ * @cmd:	AFU command that has completed.
+ *
+ * Prepares and submits command that has either completed or timed out to
+ * the SCSI stack. Checks AFU command back into command pool for non-internal
+ * (rcb.scp populated) commands.
+ */
+static void cmd_complete(struct afu_cmd *cmd)
+{
+	struct scsi_cmnd *scp;
+	u32 resid;
+	ulong lock_flags;
+	struct afu *afu = cmd->parent;
+	struct cxlflash_cfg *cfg = afu->parent;
+	bool cmd_is_tmf;
+
+	spin_lock_irqsave(&cmd->slock, lock_flags);
+	cmd->sa.host_use_b[0] |= B_DONE;
+	spin_unlock_irqrestore(&cmd->slock, lock_flags);
+
+	if (cmd->rcb.scp) {
+		scp = cmd->rcb.scp;
+		if (unlikely(cmd->sa.rc.afu_rc ||
+			     cmd->sa.rc.scsi_rc ||
+			     cmd->sa.rc.fc_rc))
+			process_cmd_err(cmd, scp);
+		else
+			scp->result = (DID_OK << 16);
+
+		resid = cmd->sa.resid;
+		cmd_is_tmf = cmd->cmd_tmf;
+		cxlflash_cmd_checkin(cmd); /* Don't use cmd after here */
+
+		pr_debug("%s: calling scsi_set_resid, scp=%p "
+			 "result=%X resid=%d\n", __func__,
+			 scp, scp->result, resid);
+
+		scsi_set_resid(scp, resid);
+		scsi_dma_unmap(scp);
+		scp->scsi_done(scp);
+
+		if (cmd_is_tmf) {
+			spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+			cfg->tmf_active = false;
+			wake_up_all_locked(&cfg->tmf_waitq);
+			spin_unlock_irqrestore(&cfg->tmf_waitq.lock,
+					       lock_flags);
+		}
+	} else
+		complete(&cmd->cevent);
+}
+
+/**
+ * send_tmf() - sends a Task Management Function (TMF)
+ * @afu:	AFU to checkout from.
+ * @scp:	SCSI command from stack.
+ * @tmfcmd:	TMF command to send.
+ *
+ * Return:
+ *	0 on success
+ *	SCSI_MLQUEUE_HOST_BUSY when host is busy
+ */
+static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
+{
+	struct afu_cmd *cmd;
+
+	u32 port_sel = scp->device->channel + 1;
+	short lflag = 0;
+	struct Scsi_Host *host = scp->device->host;
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+	ulong lock_flags;
+	int rc = 0;
+
+	cmd = cxlflash_cmd_checkout(afu);
+	if (unlikely(!cmd)) {
+		pr_err("%s: could not get a free command\n", __func__);
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+		goto out;
+	}
+
+	/* If a Task Management Function is active, do not send one more.
+	 */
+	spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+	if (cfg->tmf_active)
+		wait_event_interruptible_locked_irq(cfg->tmf_waitq,
+						    !cfg->tmf_active);
+	cfg->tmf_active = true;
+	cmd->cmd_tmf = true;
+	spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+
+	cmd->rcb.ctx_id = afu->ctx_hndl;
+	cmd->rcb.port_sel = port_sel;
+	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
+
+	lflag = SISL_REQ_FLAGS_TMF_CMD;
+
+	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
+			      SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
+
+	/* Stash the scp in the reserved field, for reuse during interrupt */
+	cmd->rcb.scp = scp;
+
+	/* Copy the CDB from the cmd passed in */
+	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
+
+	/* Send the command */
+	rc = cxlflash_send_cmd(afu, cmd);
+	if (unlikely(rc)) {
+		cxlflash_cmd_checkin(cmd);
+		spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+		cfg->tmf_active = false;
+		spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+		goto out;
+	}
+
+	spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+	wait_event_interruptible_locked_irq(cfg->tmf_waitq, !cfg->tmf_active);
+	spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+out:
+	return rc;
+}
+
+/**
+ * cxlflash_driver_info() - information handler for this host driver
+ * @host:	SCSI host associated with device.
+ *
+ * Return: A string describing the device.
+ */
+static const char *cxlflash_driver_info(struct Scsi_Host *host)
+{
+	return CXLFLASH_ADAPTER_NAME;
+}
+
+/**
+ * cxlflash_queuecommand() - sends a mid-layer request
+ * @host:	SCSI host associated with device.
+ * @scp:	SCSI command to send.
+ *
+ * Return:
+ *	0 on success
+ *	SCSI_MLQUEUE_HOST_BUSY when host is busy
+ */
+static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
+{
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+	struct afu *afu = cfg->afu;
+	struct pci_dev *pdev = cfg->dev;
+	struct afu_cmd *cmd;
+	u32 port_sel = scp->device->channel + 1;
+	int nseg, i, ncount;
+	struct scatterlist *sg;
+	ulong lock_flags;
+	short lflag = 0;
+	int rc = 0;
+
+	pr_debug("%s: (scp=%p) %d/%d/%d/%llu cdb=(%08X-%08X-%08X-%08X)\n",
+		 __func__, scp, host->host_no, scp->device->channel,
+		 scp->device->id, scp->device->lun,
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+
+	/* If a Task Management Function is active, wait for it to complete
+	 * before continuing with regular commands.
+	 */
+	spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+	if (cfg->tmf_active) {
+		spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+		goto out;
+	}
+	spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+
+	switch (cfg->state) {
+	case STATE_LIMBO:
+		dev_dbg_ratelimited(&cfg->dev->dev, "%s: device in limbo!\n",
+				    __func__);
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+		goto out;
+	case STATE_FAILTERM:
+		dev_dbg_ratelimited(&cfg->dev->dev, "%s: device has failed!\n",
+				    __func__);
+		scp->result = (DID_NO_CONNECT << 16);
+		scp->scsi_done(scp);
+		rc = 0;
+		goto out;
+	default:
+		break;
+	}
+
+	cmd = cxlflash_cmd_checkout(afu);
+	if (unlikely(!cmd)) {
+		pr_err("%s: could not get a free command\n", __func__);
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+		goto out;
+	}
+
+	cmd->rcb.ctx_id = afu->ctx_hndl;
+	cmd->rcb.port_sel = port_sel;
+	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
+
+	if (scp->sc_data_direction == DMA_TO_DEVICE)
+		lflag = SISL_REQ_FLAGS_HOST_WRITE;
+	else
+		lflag = SISL_REQ_FLAGS_HOST_READ;
+
+	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
+			      SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
+
+	/* Stash the scp in the reserved field, for reuse during interrupt */
+	cmd->rcb.scp = scp;
+
+	nseg = scsi_dma_map(scp);
+	if (unlikely(nseg < 0)) {
+		dev_err(&pdev->dev, "%s: Fail DMA map! nseg=%d\n",
+			__func__, nseg);
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+		goto out;
+	}
+
+	ncount = scsi_sg_count(scp);
+	scsi_for_each_sg(scp, sg, ncount, i) {
+		cmd->rcb.data_len = sg_dma_len(sg);
+		cmd->rcb.data_ea = sg_dma_address(sg);
+	}
+
+	/* Copy the CDB from the scsi_cmnd passed in */
+	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
+
+	/* Send the command */
+	rc = cxlflash_send_cmd(afu, cmd);
+	if (unlikely(rc)) {
+		cxlflash_cmd_checkin(cmd);
+		scsi_dma_unmap(scp);
+	}
+
+out:
+	return rc;
+}
+
+/**
+ * cxlflash_eh_device_reset_handler() - reset a single LUN
+ * @scp:	SCSI command to send.
+ *
+ * Return:
+ *	SUCCESS as defined in scsi/scsi.h
+ *	FAILED as defined in scsi/scsi.h
+ */
+static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
+{
+	int rc = SUCCESS;
+	struct Scsi_Host *host = scp->device->host;
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+	struct afu *afu = cfg->afu;
+	int rcr = 0;
+
+	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
+		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
+		 host->host_no, scp->device->channel,
+		 scp->device->id, scp->device->lun,
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+
+	switch (cfg->state) {
+	case STATE_NORMAL:
+		rcr = send_tmf(afu, scp, TMF_LUN_RESET);
+		if (unlikely(rcr))
+			rc = FAILED;
+		break;
+	case STATE_LIMBO:
+		wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
+		if (cfg->state == STATE_NORMAL)
+			break;
+		/* fall through */
+	default:
+		rc = FAILED;
+		break;
+	}
+
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * cxlflash_eh_host_reset_handler() - reset the host adapter
+ * @scp:	SCSI command from stack identifying host.
+ *
+ * Return:
+ *	SUCCESS as defined in scsi/scsi.h
+ *	FAILED as defined in scsi/scsi.h
+ */
+static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
+{
+	int rc = SUCCESS;
+	int rcr = 0;
+	struct Scsi_Host *host = scp->device->host;
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+
+	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
+		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
+		 host->host_no, scp->device->channel,
+		 scp->device->id, scp->device->lun,
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+
+	switch (cfg->state) {
+	case STATE_NORMAL:
+		cfg->state = STATE_LIMBO;
+		scsi_block_requests(cfg->host);
+		cxlflash_mark_contexts_error(cfg);
+		rcr = cxlflash_afu_reset(cfg);
+		if (rcr) {
+			rc = FAILED;
+			cfg->state = STATE_FAILTERM;
+		} else
+			cfg->state = STATE_NORMAL;
+		wake_up_all(&cfg->limbo_waitq);
+		scsi_unblock_requests(cfg->host);
+		break;
+	case STATE_LIMBO:
+		wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
+		if (cfg->state == STATE_NORMAL)
+			break;
+		/* fall through */
+	default:
+		rc = FAILED;
+		break;
+	}
+
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * cxlflash_change_queue_depth() - change the queue depth for the device
+ * @sdev:	SCSI device destined for queue depth change.
+ * @qdepth:	Requested queue depth value to set.
+ *
+ * The requested queue depth is capped to the maximum supported value.
+ *
+ * Return: The actual queue depth set.
+ */
+static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+
+	if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
+		qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
+
+	scsi_change_queue_depth(sdev, qdepth);
+	return sdev->queue_depth;
+}
+
+/**
+ * cxlflash_show_port_status() - queries and presents the current port status
+ * @dev:	Generic device associated with the host owning the port.
+ * @attr:	Device attribute representing the port.
+ * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_show_port_status(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+	struct afu *afu = cfg->afu;
+
+	char *disp_status;
+	int rc;
+	u32 port;
+	u64 status;
+	u64 *fc_regs;
+
+	rc = kstrtouint((attr->attr.name + 4), 10, &port);
+	if (rc || (port >= NUM_FC_PORTS))
+		return 0;
+
+	fc_regs = &afu->afu_map->global.fc_regs[port][0];
+	status =
+	    (readq_be(&fc_regs[FC_MTIP_STATUS / 8]) & FC_MTIP_STATUS_MASK);
+
+	if (status == FC_MTIP_STATUS_ONLINE)
+		disp_status = "online";
+	else if (status == FC_MTIP_STATUS_OFFLINE)
+		disp_status = "offline";
+	else
+		disp_status = "unknown";
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", disp_status);
+}
+
+/**
+ * cxlflash_show_lun_mode() - presents the current LUN mode of the host
+ * @dev:	Generic device associated with the host.
+ * @attr:	Device attribute representing the lun mode.
+ * @buf:	Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_show_lun_mode(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+	struct afu *afu = cfg->afu;
+
+	return snprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
+}
+
+/**
+ * cxlflash_store_lun_mode() - sets the LUN mode of the host
+ * @dev:	Generic device associated with the host.
+ * @attr:	Device attribute representing the lun mode.
+ * @buf:	Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
+ * @count:	Length of data resizing in @buf.
+ *
+ * The CXL Flash AFU supports a dummy LUN mode where the external
+ * links and storage are not required. Space on the FPGA is used
+ * to create 1 or 2 small LUNs which are presented to the system
+ * as if they were a normal storage device. This feature is useful
+ * during development and also provides manufacturing with a way
+ * to test the AFU without an actual device.
+ *
+ * 0 = external LUN[s] (default)
+ * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
+ * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
+ * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
+ * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_store_lun_mode(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+	struct afu *afu = cfg->afu;
+	int rc;
+	u32 lun_mode;
+
+	rc = kstrtouint(buf, 10, &lun_mode);
+	if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
+		afu->internal_lun = lun_mode;
+		cxlflash_afu_reset(cfg);
+		scsi_scan_host(cfg->host);
+	}
+
+	return count;
+}
+
+/**
+ * cxlflash_show_ioctl_version() - presents the current ioctl version of the host
+ * @dev:	Generic device associated with the host.
+ * @attr:	Device attribute representing the ioctl version.
+ * @buf:	Buffer of length PAGE_SIZE to report back the ioctl version.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_show_ioctl_version(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
+}
+
+/**
+ * cxlflash_show_dev_mode() - presents the current mode of the device
+ * @dev:	Generic device associated with the device.
+ * @attr:	Device attribute representing the device mode.
+ * @buf:	Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_show_dev_mode(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			sdev->hostdata ? "superpipe" : "legacy");
+}
+
+/**
+ * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
+ * @cxlflash:	Internal structure associated with the host.
+ */
+static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
+{
+	struct pci_dev *pdev = cfg->dev;
+
+	if (pci_channel_offline(pdev))
+		wait_event_timeout(cfg->limbo_waitq,
+				   !pci_channel_offline(pdev),
+				   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
+}
+
+/*
+ * Host attributes
+ */
+static DEVICE_ATTR(port0, S_IRUGO, cxlflash_show_port_status, NULL);
+static DEVICE_ATTR(port1, S_IRUGO, cxlflash_show_port_status, NULL);
+static DEVICE_ATTR(lun_mode, S_IRUGO | S_IWUSR, cxlflash_show_lun_mode,
+		   cxlflash_store_lun_mode);
+static DEVICE_ATTR(ioctl_version, S_IRUGO, cxlflash_show_ioctl_version, NULL);
+
+static struct device_attribute *cxlflash_host_attrs[] = {
+	&dev_attr_port0,
+	&dev_attr_port1,
+	&dev_attr_lun_mode,
+	&dev_attr_ioctl_version,
+	NULL
+};
+
+/*
+ * Device attributes
+ */
+static DEVICE_ATTR(mode, S_IRUGO, cxlflash_show_dev_mode, NULL);
+
+static struct device_attribute *cxlflash_dev_attrs[] = {
+	&dev_attr_mode,
+	NULL
+};
+
+/*
+ * Host template
+ */
+static struct scsi_host_template driver_template = {
+	.module = THIS_MODULE,
+	.name = CXLFLASH_ADAPTER_NAME,
+	.info = cxlflash_driver_info,
+	.ioctl = cxlflash_ioctl,
+	.proc_name = CXLFLASH_NAME,
+	.queuecommand = cxlflash_queuecommand,
+	.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
+	.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
+	.change_queue_depth = cxlflash_change_queue_depth,
+	.cmd_per_lun = 16,
+	.can_queue = CXLFLASH_MAX_CMDS,
+	.this_id = -1,
+	.sg_tablesize = SG_NONE,	/* No scatter gather support. */
+	.max_sectors = CXLFLASH_MAX_SECTORS,
+	.use_clustering = ENABLE_CLUSTERING,
+	.shost_attrs = cxlflash_host_attrs,
+	.sdev_attrs = cxlflash_dev_attrs,
+};
+
+/*
+ * Device dependent values
+ */
+static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
+
+/*
+ * PCI device binding table
+ */
+static struct pci_device_id cxlflash_pci_table[] = {
+	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
+	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
+	{}
+};
+
+MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
+
+/**
+ * free_mem() - free memory associated with the AFU
+ * @cxlflash:	Internal structure associated with the host.
+ */
+static void free_mem(struct cxlflash_cfg *cfg)
+{
+	int i;
+	char *buf = NULL;
+	struct afu *afu = cfg->afu;
+
+	if (cfg->afu) {
+		for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
+			buf = afu->cmd[i].buf;
+			if (!((u64)buf & (PAGE_SIZE - 1)))
+				free_page((ulong)buf);
+		}
+
+		free_pages((ulong)afu, get_order(sizeof(struct afu)));
+		cfg->afu = NULL;
+	}
+}
+
+/**
+ * stop_afu() - stops the AFU command timers and unmaps the MMIO space
+ * @cxlflash:	Internal structure associated with the host.
+ *
+ * Safe to call with AFU in a partially allocated/initialized state.
+ */
+static void stop_afu(struct cxlflash_cfg *cfg)
+{
+	int i;
+	struct afu *afu = cfg->afu;
+
+	if (likely(afu)) {
+		for (i = 0; i < CXLFLASH_NUM_CMDS; i++)
+			complete(&afu->cmd[i].cevent);
+
+		if (likely(afu->afu_map)) {
+			cxl_psa_unmap((void *)afu->afu_map);
+			afu->afu_map = NULL;
+		}
+	}
+}
+
+/**
+ * term_mc() - terminates the master context
+ * @cxlflash:	Internal structure associated with the host.
+ * @level:	Depth of allocation, where to begin waterfall tear down.
+ *
+ * Safe to call with AFU/MC in partially allocated/initialized state.
+ */
+static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
+{
+	int rc = 0;
+	struct afu *afu = cfg->afu;
+
+	if (!afu || !cfg->mcctx) {
+		pr_err("%s: returning from term_mc with NULL afu or MC\n",
+		       __func__);
+		return;
+	}
+
+	switch (level) {
+	case UNDO_START:
+		rc = cxl_stop_context(cfg->mcctx);
+		BUG_ON(rc);
+	case UNMAP_THREE:
+		cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
+	case UNMAP_TWO:
+		cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
+	case UNMAP_ONE:
+		cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
+	case FREE_IRQ:
+		cxl_free_afu_irqs(cfg->mcctx);
+	case RELEASE_CONTEXT:
+		cfg->mcctx = NULL;
+	}
+}
+
+/**
+ * term_afu() - terminates the AFU
+ * @cxlflash:	Internal structure associated with the host.
+ *
+ * Safe to call with AFU/MC in partially allocated/initialized state.
+ */
+static void term_afu(struct cxlflash_cfg *cfg)
+{
+	term_mc(cfg, UNDO_START);
+
+	if (cfg->afu)
+		stop_afu(cfg);
+
+	pr_debug("%s: returning\n", __func__);
+}
+
+/**
+ * cxlflash_remove() - PCI entry point to tear down host
+ * @pdev:	PCI device associated with the host.
+ *
+ * Safe to use as a cleanup in partially allocated/initialized state.
+ */
+static void cxlflash_remove(struct pci_dev *pdev)
+{
+	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
+	ulong lock_flags;
+
+	/* If a Task Management Function is active, wait for it to complete
+	 * before continuing with remove.
+	 */
+	spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+	if (cfg->tmf_active)
+		wait_event_interruptible_locked_irq(cfg->tmf_waitq,
+						    !cfg->tmf_active);
+	spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+
+	cfg->state = STATE_FAILTERM;
+	cxlflash_stop_term_user_contexts(cfg);
+
+	switch (cfg->init_state) {
+	case INIT_STATE_SCSI:
+		cxlflash_term_local_luns(cfg);
+		scsi_remove_host(cfg->host);
+		scsi_host_put(cfg->host);
+		/* Fall through */
+	case INIT_STATE_AFU:
+		term_afu(cfg);
+	case INIT_STATE_PCI:
+		pci_release_regions(cfg->dev);
+		pci_disable_device(pdev);
+	case INIT_STATE_NONE:
+		flush_work(&cfg->work_q);
+		free_mem(cfg);
+		break;
+	}
+
+	pr_debug("%s: returning\n", __func__);
+}
+
+/**
+ * alloc_mem() - allocates the AFU and its command pool
+ * @cxlflash:	Internal structure associated with the host.
+ *
+ * A partially allocated state remains on failure.
+ *
+ * Return:
+ *	0 on success
+ *	-ENOMEM on failure to allocate memory
+ */
+static int alloc_mem(struct cxlflash_cfg *cfg)
+{
+	int rc = 0;
+	int i;
+	char *buf = NULL;
+
+	/* This allocation is about 12K, i.e. only 1 64k page
+	 * and upto 4 4k pages
+	 */
+	cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+					    get_order(sizeof(struct afu)));
+	if (unlikely(!cfg->afu)) {
+		pr_err("%s: cannot get %d free pages\n",
+		       __func__, get_order(sizeof(struct afu)));
+		rc = -ENOMEM;
+		goto out;
+	}
+	cfg->afu->parent = cfg;
+	cfg->afu->afu_map = NULL;
+
+	for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
+		if (!((u64)buf & (PAGE_SIZE - 1))) {
+			buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+			if (unlikely(!buf)) {
+				pr_err("%s: Allocate command buffers fail!\n",
+				       __func__);
+				rc = -ENOMEM;
+				free_mem(cfg);
+				goto out;
+			}
+		}
+
+		cfg->afu->cmd[i].buf = buf;
+		atomic_set(&cfg->afu->cmd[i].free, 1);
+		cfg->afu->cmd[i].slot = i;
+	}
+
+out:
+	return rc;
+}
+
+/**
+ * init_pci() - initializes the host as a PCI device
+ * @cxlflash:	Internal structure associated with the host.
+ *
+ * Return:
+ *	0 on success
+ *	-EIO on unable to communicate with device
+ *	A return code from the PCI sub-routines
+ */
+static int init_pci(struct cxlflash_cfg *cfg)
+{
+	struct pci_dev *pdev = cfg->dev;
+	int rc = 0;
+
+	cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0);
+	rc = pci_request_regions(pdev, CXLFLASH_NAME);
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"%s: Couldn't register memory range of registers\n",
+			__func__);
+		goto out;
+	}
+
+	rc = pci_enable_device(pdev);
+	if (rc || pci_channel_offline(pdev)) {
+		if (pci_channel_offline(pdev)) {
+			cxlflash_wait_for_pci_err_recovery(cfg);
+			rc = pci_enable_device(pdev);
+		}
+
+		if (rc) {
+			dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
+				__func__);
+			cxlflash_wait_for_pci_err_recovery(cfg);
+			goto out_release_regions;
+		}
+	}
+
+	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (rc < 0) {
+		dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n",
+			__func__);
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	}
+
+	if (rc < 0) {
+		dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n",
+			__func__);
+		goto out_disable;
+	}
+
+	pci_set_master(pdev);
+
+	if (pci_channel_offline(pdev)) {
+		cxlflash_wait_for_pci_err_recovery(cfg);
+		if (pci_channel_offline(pdev)) {
+			rc = -EIO;
+			goto out_msi_disable;
+		}
+	}
+
+	rc = pci_save_state(pdev);
+
+	if (rc != PCIBIOS_SUCCESSFUL) {
+		dev_err(&pdev->dev, "%s: Failed to save PCI config space\n",
+			__func__);
+		rc = -EIO;
+		goto cleanup_nolog;
+	}
+
+out:
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+
+cleanup_nolog:
+out_msi_disable:
+	cxlflash_wait_for_pci_err_recovery(cfg);
+out_disable:
+	pci_disable_device(pdev);
+out_release_regions:
+	pci_release_regions(pdev);
+	goto out;
+
+}
+
+/**
+ * init_scsi() - adds the host to the SCSI stack and kicks off host scan
+ * @cxlflash:	Internal structure associated with the host.
+ *
+ * Return:
+ *	0 on success
+ *	A return code from adding the host
+ */
+static int init_scsi(struct cxlflash_cfg *cfg)
+{
+	struct pci_dev *pdev = cfg->dev;
+	int rc = 0;
+
+	rc = scsi_add_host(cfg->host, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
+			__func__, rc);
+		goto out;
+	}
+
+	scsi_scan_host(cfg->host);
+
+out:
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * set_port_online() - transitions the specified host FC port to online state
+ * @fc_regs:	Top of MMIO region defined for specified port.
+ *
+ * The provided MMIO region must be mapped prior to call. Online state means
+ * that the FC link layer has synced, completed the handshaking process, and
+ * is ready for login to start.
+ */
+static void set_port_online(u64 *fc_regs)
+{
+	u64 cmdcfg;
+
+	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
+	cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);	/* clear OFF_LINE */
+	cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);	/* set ON_LINE */
+	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
+}
+
+/**
+ * set_port_offline() - transitions the specified host FC port to offline state
+ * @fc_regs:	Top of MMIO region defined for specified port.
+ *
+ * The provided MMIO region must be mapped prior to call.
+ */
+static void set_port_offline(u64 *fc_regs)
+{
+	u64 cmdcfg;
+
+	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
+	cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);	/* clear ON_LINE */
+	cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);	/* set OFF_LINE */
+	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
+}
+
+/**
+ * wait_port_online() - waits for the specified host FC port come online
+ * @fc_regs:	Top of MMIO region defined for specified port.
+ * @delay_us:	Number of microseconds to delay between reading port status.
+ * @nretry:	Number of cycles to retry reading port status.
+ *
+ * The provided MMIO region must be mapped prior to call. This will timeout
+ * when the cable is not plugged in.
+ *
+ * Return:
+ *	TRUE (1) when the specified port is online
+ *	FALSE (0) when the specified port fails to come online after timeout
+ *	-EINVAL when @delay_us is less than 1000
+ */
+static int wait_port_online(u64 *fc_regs, u32 delay_us, u32 nretry)
+{
+	u64 status;
+
+	if (delay_us < 1000) {
+		pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
+		return -EINVAL;
+	}
+
+	do {
+		msleep(delay_us / 1000);
+		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
+	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
+		 nretry--);
+
+	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
+}
+
+/**
+ * wait_port_offline() - waits for the specified host FC port go offline
+ * @fc_regs:	Top of MMIO region defined for specified port.
+ * @delay_us:	Number of microseconds to delay between reading port status.
+ * @nretry:	Number of cycles to retry reading port status.
+ *
+ * The provided MMIO region must be mapped prior to call.
+ *
+ * Return:
+ *	TRUE (1) when the specified port is offline
+ *	FALSE (0) when the specified port fails to go offline after timeout
+ *	-EINVAL when @delay_us is less than 1000
+ */
+static int wait_port_offline(u64 *fc_regs, u32 delay_us, u32 nretry)
+{
+	u64 status;
+
+	if (delay_us < 1000) {
+		pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
+		return -EINVAL;
+	}
+
+	do {
+		msleep(delay_us / 1000);
+		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
+	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
+		 nretry--);
+
+	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
+}
+
+/**
+ * afu_set_wwpn() - configures the WWPN for the specified host FC port
+ * @afu:	AFU associated with the host that owns the specified FC port.
+ * @port:	Port number being configured.
+ * @fc_regs:	Top of MMIO region defined for specified port.
+ * @wwpn:	The world-wide-port-number previously discovered for port.
+ *
+ * The provided MMIO region must be mapped prior to call. As part of the
+ * sequence to configure the WWPN, the port is toggled offline and then back
+ * online. This toggling action can cause this routine to delay up to a few
+ * seconds. When configured to use the internal LUN feature of the AFU, a
+ * failure to come online is overridden.
+ *
+ * Return:
+ *	0 when the WWPN is successfully written and the port comes back online
+ *	-1 when the port fails to go offline or come back up online
+ */
+static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn)
+{
+	int ret = 0;
+
+	set_port_offline(fc_regs);
+
+	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
+			       FC_PORT_STATUS_RETRY_CNT)) {
+		pr_debug("%s: wait on port %d to go offline timed out\n",
+			 __func__, port);
+		ret = -1; /* but continue on to leave the port back online */
+	}
+
+	if (ret == 0)
+		writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
+
+	set_port_online(fc_regs);
+
+	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
+			      FC_PORT_STATUS_RETRY_CNT)) {
+		pr_debug("%s: wait on port %d to go online timed out\n",
+			 __func__, port);
+		ret = -1;
+
+		/*
+		 * Override for internal lun!!!
+		 */
+		if (afu->internal_lun) {
+			pr_debug("%s: Overriding port %d online timeout!!!\n",
+				 __func__, port);
+			ret = 0;
+		}
+	}
+
+	pr_debug("%s: returning rc=%d\n", __func__, ret);
+
+	return ret;
+}
+
+/**
+ * afu_link_reset() - resets the specified host FC port
+ * @afu:	AFU associated with the host that owns the specified FC port.
+ * @port:	Port number being configured.
+ * @fc_regs:	Top of MMIO region defined for specified port.
+ *
+ * The provided MMIO region must be mapped prior to call. The sequence to
+ * reset the port involves toggling it offline and then back online. This
+ * action can cause this routine to delay up to a few seconds. An effort
+ * is made to maintain link with the device by switching to host to use
+ * the alternate port exclusively while the reset takes place.
+ * failure to come online is overridden.
+ */
+static void afu_link_reset(struct afu *afu, int port, u64 *fc_regs)
+{
+	u64 port_sel;
+
+	/* first switch the AFU to the other links, if any */
+	port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
+	port_sel &= ~(1ULL << port);
+	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
+	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
+
+	set_port_offline(fc_regs);
+	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
+			       FC_PORT_STATUS_RETRY_CNT))
+		pr_err("%s: wait on port %d to go offline timed out\n",
+		       __func__, port);
+
+	set_port_online(fc_regs);
+	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
+			      FC_PORT_STATUS_RETRY_CNT))
+		pr_err("%s: wait on port %d to go online timed out\n",
+		       __func__, port);
+
+	/* switch back to include this port */
+	port_sel |= (1ULL << port);
+	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
+	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
+
+	pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
+}
+
+/*
+ * Asynchronous interrupt information table
+ */
+static const struct asyc_intr_info ainfo[] = {
+	{SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
+	{SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
+	{SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
+	{SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, 0},
+	{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
+	{SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, 0},
+	{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
+	{SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
+	{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
+	{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
+	{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
+	{SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
+	{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
+	{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, 0},
+	{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
+	{SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
+	{0x0, "", 0, 0}		/* terminator */
+};
+
+/**
+ * find_ainfo() - locates and returns asynchronous interrupt information
+ * @status:	Status code set by AFU on error.
+ *
+ * Return: The located information or NULL when the status code is invalid.
+ */
+static const struct asyc_intr_info *find_ainfo(u64 status)
+{
+	const struct asyc_intr_info *info;
+
+	for (info = &ainfo[0]; info->status; info++)
+		if (info->status == status)
+			return info;
+
+	return NULL;
+}
+
+/**
+ * afu_err_intr_init() - clears and initializes the AFU for error interrupts
+ * @afu:	AFU associated with the host.
+ */
+static void afu_err_intr_init(struct afu *afu)
+{
+	int i;
+	u64 reg;
+
+	/* global async interrupts: AFU clears afu_ctrl on context exit
+	 * if async interrupts were sent to that context. This prevents
+	 * the AFU form sending further async interrupts when
+	 * there is
+	 * nobody to receive them.
+	 */
+
+	/* mask all */
+	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
+	/* set LISN# to send and point to master context */
+	reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
+
+	if (afu->internal_lun)
+		reg |= 1;	/* Bit 63 indicates local lun */
+	writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
+	/* clear all */
+	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
+	/* unmask bits that are of interest */
+	/* note: afu can send an interrupt after this step */
+	writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
+	/* clear again in case a bit came on after previous clear but before */
+	/* unmask */
+	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
+
+	/* Clear/Set internal lun bits */
+	reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
+	reg &= SISL_FC_INTERNAL_MASK;
+	if (afu->internal_lun)
+		reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
+	writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
+
+	/* now clear FC errors */
+	for (i = 0; i < NUM_FC_PORTS; i++) {
+		writeq_be(0xFFFFFFFFU,
+			  &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
+		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
+	}
+
+	/* sync interrupts for master's IOARRIN write */
+	/* note that unlike asyncs, there can be no pending sync interrupts */
+	/* at this time (this is a fresh context and master has not written */
+	/* IOARRIN yet), so there is nothing to clear. */
+
+	/* set LISN#, it is always sent to the context that wrote IOARRIN */
+	writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
+	writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
+}
+
+/**
+ * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
+ * @irq:	Interrupt number.
+ * @data:	Private data provided at interrupt registration, the AFU.
+ *
+ * Return: Always return IRQ_HANDLED.
+ */
+static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
+{
+	struct afu *afu = (struct afu *)data;
+	u64 reg;
+	u64 reg_unmasked;
+
+	reg = readq_be(&afu->host_map->intr_status);
+	reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
+
+	if (reg_unmasked == 0UL) {
+		pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
+		       __func__, (u64)afu, reg);
+		goto cxlflash_sync_err_irq_exit;
+	}
+
+	pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
+	       __func__, (u64)afu, reg);
+
+	writeq_be(reg_unmasked, &afu->host_map->intr_clear);
+
+cxlflash_sync_err_irq_exit:
+	pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
+	return IRQ_HANDLED;
+}
+
+/**
+ * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
+ * @irq:	Interrupt number.
+ * @data:	Private data provided at interrupt registration, the AFU.
+ *
+ * Return: Always return IRQ_HANDLED.
+ */
+static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
+{
+	struct afu *afu = (struct afu *)data;
+	struct afu_cmd *cmd;
+	bool toggle = afu->toggle;
+	u64 entry,
+	    *hrrq_start = afu->hrrq_start,
+	    *hrrq_end = afu->hrrq_end,
+	    *hrrq_curr = afu->hrrq_curr;
+
+	/* Process however many RRQ entries that are ready */
+	while (true) {
+		entry = *hrrq_curr;
+
+		if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
+			break;
+
+		cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
+		cmd_complete(cmd);
+
+		/* Advance to next entry or wrap and flip the toggle bit */
+		if (hrrq_curr < hrrq_end)
+			hrrq_curr++;
+		else {
+			hrrq_curr = hrrq_start;
+			toggle ^= SISL_RESP_HANDLE_T_BIT;
+		}
+	}
+
+	afu->hrrq_curr = hrrq_curr;
+	afu->toggle = toggle;
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
+ * @irq:	Interrupt number.
+ * @data:	Private data provided at interrupt registration, the AFU.
+ *
+ * Return: Always return IRQ_HANDLED.
+ */
+static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
+{
+	struct afu *afu = (struct afu *)data;
+	struct cxlflash_cfg *cfg;
+	u64 reg_unmasked;
+	const struct asyc_intr_info *info;
+	struct sisl_global_map *global = &afu->afu_map->global;
+	u64 reg;
+	u8 port;
+	int i;
+
+	cfg = afu->parent;
+
+	reg = readq_be(&global->regs.aintr_status);
+	reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
+
+	if (reg_unmasked == 0) {
+		pr_err("%s: spurious interrupt, aintr_status 0x%016llX\n",
+		       __func__, reg);
+		goto out;
+	}
+
+	/* it is OK to clear AFU status before FC_ERROR */
+	writeq_be(reg_unmasked, &global->regs.aintr_clear);
+
+	/* check each bit that is on */
+	for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
+		info = find_ainfo(1ULL << i);
+		if ((reg_unmasked & 0x1) || !info)
+			continue;
+
+		port = info->port;
+
+		pr_err("%s: FC Port %d -> %s, fc_status 0x%08llX\n",
+		       __func__, port, info->desc,
+		       readq_be(&global->fc_regs[port][FC_STATUS / 8]));
+
+		/*
+		 * do link reset first, some OTHER errors will set FC_ERROR
+		 * again if cleared before or w/o a reset
+		 */
+		if (info->action & LINK_RESET) {
+			pr_err("%s: FC Port %d: resetting link\n",
+			       __func__, port);
+			cfg->lr_state = LINK_RESET_REQUIRED;
+			cfg->lr_port = port;
+			schedule_work(&cfg->work_q);
+		}
+
+		if (info->action & CLR_FC_ERROR) {
+			reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
+
+			/*
+			 * since all errors are unmasked, FC_ERROR and FC_ERRCAP
+			 * should be the same and tracing one is sufficient.
+			 */
+
+			pr_err("%s: fc %d: clearing fc_error 0x%08llX\n",
+			       __func__, port, reg);
+
+			writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
+			writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
+		}
+	}
+
+out:
+	pr_debug("%s: returning rc=%d, afu=%p\n", __func__, IRQ_HANDLED, afu);
+	return IRQ_HANDLED;
+}
+
+/**
+ * start_context() - starts the master context
+ * @cxlflash:	Internal structure associated with the host.
+ *
+ * Return: A success or failure value from CXL services.
+ */
+static int start_context(struct cxlflash_cfg *cfg)
+{
+	int rc = 0;
+
+	rc = cxl_start_context(cfg->mcctx,
+			       cfg->afu->work.work_element_descriptor,
+			       NULL);
+
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * read_vpd() - obtains the WWPNs from VPD
+ * @cxlflash:	Internal structure associated with the host.
+ * @wwpn:	Array of size NUM_FC_PORTS to pass back WWPNs
+ *
+ * Return:
+ *	0 on success
+ *	-ENODEV when VPD or WWPN keywords not found
+ */
+static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
+{
+	struct pci_dev *dev = cfg->parent_dev;
+	int rc = 0;
+	int ro_start, ro_size, i, j, k;
+	ssize_t vpd_size;
+	char vpd_data[CXLFLASH_VPD_LEN];
+	char tmp_buf[WWPN_BUF_LEN] = { 0 };
+	char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
+
+	/* Get the VPD data from the device */
+	vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
+	if (unlikely(vpd_size <= 0)) {
+		pr_err("%s: Unable to read VPD (size = %ld)\n",
+		       __func__, vpd_size);
+		rc = -ENODEV;
+		goto out;
+	}
+
+	/* Get the read only section offset */
+	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
+				    PCI_VPD_LRDT_RO_DATA);
+	if (unlikely(ro_start < 0)) {
+		pr_err("%s: VPD Read-only data not found\n", __func__);
+		rc = -ENODEV;
+		goto out;
+	}
+
+	/* Get the read only section size, cap when extends beyond read VPD */
+	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
+	j = ro_size;
+	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+	if (unlikely((i + j) > vpd_size)) {
+		pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
+			 __func__, (i + j), vpd_size);
+		ro_size = vpd_size - i;
+	}
+
+	/*
+	 * Find the offset of the WWPN tag within the read only
+	 * VPD data and validate the found field (partials are
+	 * no good to us). Convert the ASCII data to an integer
+	 * value. Note that we must copy to a temporary buffer
+	 * because the conversion service requires that the ASCII
+	 * string be terminated.
+	 */
+	for (k = 0; k < NUM_FC_PORTS; k++) {
+		j = ro_size;
+		i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
+
+		i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
+		if (unlikely(i < 0)) {
+			pr_err("%s: Port %d WWPN not found in VPD\n",
+			       __func__, k);
+			rc = -ENODEV;
+			goto out;
+		}
+
+		j = pci_vpd_info_field_size(&vpd_data[i]);
+		i += PCI_VPD_INFO_FLD_HDR_SIZE;
+		if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
+			pr_err("%s: Port %d WWPN incomplete or VPD corrupt\n",
+			       __func__, k);
+			rc = -ENODEV;
+			goto out;
+		}
+
+		memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
+		rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
+		if (unlikely(rc)) {
+			pr_err("%s: Fail to convert port %d WWPN to integer\n",
+			       __func__, k);
+			rc = -ENODEV;
+			goto out;
+		}
+	}
+
+out:
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * cxlflash_context_reset() - timeout handler for AFU commands
+ * @cmd:	AFU command that timed out.
+ *
+ * Sends a reset to the AFU.
+ */
+void cxlflash_context_reset(struct afu_cmd *cmd)
+{
+	int nretry = 0;
+	u64 rrin = 0x1;
+	u64 room = 0;
+	struct afu *afu = cmd->parent;
+	ulong lock_flags;
+
+	pr_debug("%s: cmd=%p\n", __func__, cmd);
+
+	spin_lock_irqsave(&cmd->slock, lock_flags);
+
+	/* Already completed? */
+	if (cmd->sa.host_use_b[0] & B_DONE) {
+		spin_unlock_irqrestore(&cmd->slock, lock_flags);
+		return;
+	}
+
+	cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
+	spin_unlock_irqrestore(&cmd->slock, lock_flags);
+
+	/*
+	 * We really want to send this reset at all costs, so spread
+	 * out wait time on successive retries for available room.
+	 */
+	do {
+		room = readq_be(&afu->host_map->cmd_room);
+		atomic64_set(&afu->room, room);
+		if (room)
+			goto write_rrin;
+		udelay(nretry);
+	} while (nretry++ < MC_ROOM_RETRY_CNT);
+
+	pr_err("%s: no cmd_room to send reset\n", __func__);
+	return;
+
+write_rrin:
+	nretry = 0;
+	writeq_be(rrin, &afu->host_map->ioarrin);
+	do {
+		rrin = readq_be(&afu->host_map->ioarrin);
+		if (rrin != 0x1)
+			break;
+		/* Double delay each time */
+		udelay(2 ^ nretry);
+	} while (nretry++ < MC_ROOM_RETRY_CNT);
+}
+
+/**
+ * init_pcr() - initialize the provisioning and control registers
+ * @cxlflash:	Internal structure associated with the host.
+ *
+ * Also sets up fast access to the mapped registers and initializes AFU
+ * command fields that never change.
+ */
+void init_pcr(struct cxlflash_cfg *cfg)
+{
+	struct afu *afu = cfg->afu;
+	struct sisl_ctrl_map *ctrl_map;
+	int i;
+
+	for (i = 0; i < MAX_CONTEXT; i++) {
+		ctrl_map = &afu->afu_map->ctrls[i].ctrl;
+		/* disrupt any clients that could be running */
+		/* e. g. clients that survived a master restart */
+		writeq_be(0, &ctrl_map->rht_start);
+		writeq_be(0, &ctrl_map->rht_cnt_id);
+		writeq_be(0, &ctrl_map->ctx_cap);
+	}
+
+	/* copy frequently used fields into afu */
+	afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
+	/* ctx_hndl is 16 bits in CAIA */
+	afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
+	afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
+
+	/* Program the Endian Control for the master context */
+	writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
+
+	/* initialize cmd fields that never change */
+	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
+		afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
+		afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
+		afu->cmd[i].rcb.rrq = 0x0;
+	}
+}
+
+/**
+ * init_global() - initialize AFU global registers
+ * @cxlflash:	Internal structure associated with the host.
+ */
+int init_global(struct cxlflash_cfg *cfg)
+{
+	struct afu *afu = cfg->afu;
+	u64 wwpn[NUM_FC_PORTS];	/* wwpn of AFU ports */
+	int i = 0, num_ports = 0;
+	int rc = 0;
+	u64 reg;
+
+	rc = read_vpd(cfg, &wwpn[0]);
+	if (rc) {
+		pr_err("%s: could not read vpd rc=%d\n", __func__, rc);
+		goto out;
+	}
+
+	pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
+
+	/* set up RRQ in AFU for master issued cmds */
+	writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
+	writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
+
+	/* AFU configuration */
+	reg = readq_be(&afu->afu_map->global.regs.afu_config);
+	reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
+	/* enable all auto retry options and control endianness */
+	/* leave others at default: */
+	/* CTX_CAP write protected, mbox_r does not clear on read and */
+	/* checker on if dual afu */
+	writeq_be(reg, &afu->afu_map->global.regs.afu_config);
+
+	/* global port select: select either port */
+	if (afu->internal_lun) {
+		/* only use port 0 */
+		writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
+		num_ports = NUM_FC_PORTS - 1;
+	} else {
+		writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
+		num_ports = NUM_FC_PORTS;
+	}
+
+	for (i = 0; i < num_ports; i++) {
+		/* unmask all errors (but they are still masked at AFU) */
+		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
+		/* clear CRC error cnt & set a threshold */
+		(void)readq_be(&afu->afu_map->global.
+			       fc_regs[i][FC_CNT_CRCERR / 8]);
+		writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
+			  [FC_CRC_THRESH / 8]);
+
+		/* set WWPNs. If already programmed, wwpn[i] is 0 */
+		if (wwpn[i] != 0 &&
+		    afu_set_wwpn(afu, i,
+				 &afu->afu_map->global.fc_regs[i][0],
+				 wwpn[i])) {
+			pr_err("%s: failed to set WWPN on port %d\n",
+			       __func__, i);
+			rc = -EIO;
+			goto out;
+		}
+		/* Programming WWPN back to back causes additional
+		 * offline/online transitions and a PLOGI
+		 */
+		msleep(100);
+
+	}
+
+	/* set up master's own CTX_CAP to allow real mode, host translation */
+	/* tbls, afu cmds and read/write GSCSI cmds. */
+	/* First, unlock ctx_cap write by reading mbox */
+	(void)readq_be(&afu->ctrl_map->mbox_r);	/* unlock ctx_cap */
+	writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
+		   SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
+		   SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
+		  &afu->ctrl_map->ctx_cap);
+	/* init heartbeat */
+	afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
+
+out:
+	return rc;
+}
+
+/**
+ * start_afu() - initializes and starts the AFU
+ * @cxlflash:	Internal structure associated with the host.
+ */
+static int start_afu(struct cxlflash_cfg *cfg)
+{
+	struct afu *afu = cfg->afu;
+	struct afu_cmd *cmd;
+
+	int i = 0;
+	int rc = 0;
+
+	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
+		cmd = &afu->cmd[i];
+
+		init_completion(&cmd->cevent);
+		spin_lock_init(&cmd->slock);
+		cmd->parent = afu;
+	}
+
+	init_pcr(cfg);
+
+	/* initialize RRQ pointers */
+	afu->hrrq_start = &afu->rrq_entry[0];
+	afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
+	afu->hrrq_curr = afu->hrrq_start;
+	afu->toggle = 1;
+
+	rc = init_global(cfg);
+
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * init_mc() - create and register as the master context
+ * @cxlflash:	Internal structure associated with the host.
+ *
+ * Return:
+ *	0 on success
+ *	-ENOMEM when unable to obtain a context from CXL services
+ *	A failure value from CXL services.
+ */
+static int init_mc(struct cxlflash_cfg *cfg)
+{
+	struct cxl_context *ctx;
+	struct device *dev = &cfg->dev->dev;
+	struct afu *afu = cfg->afu;
+	int rc = 0;
+	enum undo_level level;
+
+	ctx = cxl_get_context(cfg->dev);
+	if (unlikely(!ctx))
+		return -ENOMEM;
+	cfg->mcctx = ctx;
+
+	/* Set it up as a master with the CXL */
+	cxl_set_master(ctx);
+
+	/* During initialization reset the AFU to start from a clean slate */
+	rc = cxl_afu_reset(cfg->mcctx);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
+			__func__, rc);
+		level = RELEASE_CONTEXT;
+		goto out;
+	}
+
+	rc = cxl_allocate_afu_irqs(ctx, 3);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
+			__func__, rc);
+		level = RELEASE_CONTEXT;
+		goto out;
+	}
+
+	rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
+			     "SISL_MSI_SYNC_ERROR");
+	if (unlikely(rc <= 0)) {
+		dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
+			__func__);
+		level = FREE_IRQ;
+		goto out;
+	}
+
+	rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
+			     "SISL_MSI_RRQ_UPDATED");
+	if (unlikely(rc <= 0)) {
+		dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
+			__func__);
+		level = UNMAP_ONE;
+		goto out;
+	}
+
+	rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
+			     "SISL_MSI_ASYNC_ERROR");
+	if (unlikely(rc <= 0)) {
+		dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
+			__func__);
+		level = UNMAP_TWO;
+		goto out;
+	}
+
+	rc = 0;
+
+	/* This performs the equivalent of the CXL_IOCTL_START_WORK.
+	 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
+	 * element (pe) that is embedded in the context (ctx)
+	 */
+	rc = start_context(cfg);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
+		level = UNMAP_THREE;
+		goto out;
+	}
+ret:
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+out:
+	term_mc(cfg, level);
+	goto ret;
+}
+
+/**
+ * init_afu() - setup as master context and start AFU
+ * @cxlflash:	Internal structure associated with the host.
+ *
+ * This routine is a higher level of control for configuring the
+ * AFU on probe and reset paths.
+ *
+ * Return:
+ *	0 on success
+ *	-ENOMEM when unable to map the AFU MMIO space
+ *	A failure value from internal services.
+ */
+static int init_afu(struct cxlflash_cfg *cfg)
+{
+	u64 reg;
+	int rc = 0;
+	struct afu *afu = cfg->afu;
+	struct device *dev = &cfg->dev->dev;
+
+	cxl_perst_reloads_same_image(cfg->cxl_afu, true);
+
+	rc = init_mc(cfg);
+	if (rc) {
+		dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
+			__func__, rc);
+		goto err1;
+	}
+
+	/* Map the entire MMIO space of the AFU.
+	 */
+	afu->afu_map = cxl_psa_map(cfg->mcctx);
+	if (!afu->afu_map) {
+		rc = -ENOMEM;
+		term_mc(cfg, UNDO_START);
+		dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
+		goto err1;
+	}
+
+	/* don't byte reverse on reading afu_version, else the string form */
+	/*     will be backwards */
+	reg = afu->afu_map->global.regs.afu_version;
+	memcpy(afu->version, &reg, 8);
+	afu->interface_version =
+	    readq_be(&afu->afu_map->global.regs.interface_version);
+	pr_debug("%s: afu version %s, interface version 0x%llX\n",
+		 __func__, afu->version, afu->interface_version);
+
+	rc = start_afu(cfg);
+	if (rc) {
+		dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
+			__func__, rc);
+		term_mc(cfg, UNDO_START);
+		cxl_psa_unmap((void *)afu->afu_map);
+		afu->afu_map = NULL;
+		goto err1;
+	}
+
+	afu_err_intr_init(cfg->afu);
+	atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
+
+	/* Restore the LUN mappings */
+	cxlflash_restore_luntable(cfg);
+err1:
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * cxlflash_send_cmd() - sends an AFU command
+ * @afu:	AFU associated with the host.
+ * @cmd:	AFU command to send.
+ *
+ * Return:
+ *	0 on success
+ *	-1 on failure
+ */
+int cxlflash_send_cmd(struct afu *afu, struct afu_cmd *cmd)
+{
+	struct cxlflash_cfg *cfg = afu->parent;
+	int nretry = 0;
+	int rc = 0;
+	u64 room;
+	long newval;
+
+	/*
+	 * This routine is used by critical users such an AFU sync and to
+	 * send a task management function (TMF). Thus we want to retry a
+	 * bit before returning an error. To avoid the performance penalty
+	 * of MMIO, we spread the update of 'room' over multiple commands.
+	 */
+retry:
+	newval = atomic64_dec_if_positive(&afu->room);
+	if (!newval) {
+		do {
+			room = readq_be(&afu->host_map->cmd_room);
+			atomic64_set(&afu->room, room);
+			if (room)
+				goto write_ioarrin;
+			udelay(nretry);
+		} while (nretry++ < MC_ROOM_RETRY_CNT);
+
+		pr_err("%s: no cmd_room to send 0x%X\n",
+		       __func__, cmd->rcb.cdb[0]);
+
+		goto no_room;
+	} else if (unlikely(newval < 0)) {
+		/* This should be rare. i.e. Only if two threads race and
+		 * decrement before the MMIO read is done. In this case
+		 * just benefit from the other thread having updated
+		 * afu->room.
+		 */
+		if (nretry++ < MC_ROOM_RETRY_CNT) {
+			udelay(nretry);
+			goto retry;
+		}
+
+		goto no_room;
+	}
+
+write_ioarrin:
+	writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
+out:
+	pr_debug("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
+		 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
+	return rc;
+
+no_room:
+	afu->read_room = true;
+	schedule_work(&cfg->work_q);
+	rc = SCSI_MLQUEUE_HOST_BUSY;
+	goto out;
+}
+
+/**
+ * cxlflash_wait_resp() - polls for a response or timeout to a sent AFU command
+ * @afu:	AFU associated with the host.
+ * @cmd:	AFU command that was sent.
+ */
+void cxlflash_wait_resp(struct afu *afu, struct afu_cmd *cmd)
+{
+	ulong timeout = jiffies + (cmd->rcb.timeout * 2 * HZ);
+
+	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
+	if (!timeout)
+		cxlflash_context_reset(cmd);
+
+	if (unlikely(cmd->sa.ioasc != 0))
+		pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
+		       "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
+		       cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
+		       cmd->sa.rc.fc_rc);
+}
+
+/**
+ * cxlflash_afu_sync() - builds and sends an AFU sync command
+ * @afu:	AFU associated with the host.
+ * @ctx_hndl_u:	Identifies context requesting sync.
+ * @res_hndl_u:	Identifies resource requesting sync.
+ * @mode:	Type of sync to issue (lightweight, heavyweight, global).
+ *
+ * The AFU can only take 1 sync command at a time. This routine enforces this
+ * limitation by using a mutex to provide exlusive access to the AFU during
+ * the sync. This design point requires calling threads to not be on interrupt
+ * context due to the possibility of sleeping during concurrent sync operations.
+ *
+ * AFU sync operations are only necessary and allowed when the device is
+ * operating normally. When not operating normally, sync requests can occur as
+ * part of cleaning up resources associated with an adapter prior to removal.
+ * In this scenario, these requests are simply ignored (safe due to the AFU
+ * going away).
+ *
+ * Return:
+ *	0 on success
+ *	-1 on failure
+ */
+int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
+		      res_hndl_t res_hndl_u, u8 mode)
+{
+	struct cxlflash_cfg *cfg = afu->parent;
+	struct afu_cmd *cmd = NULL;
+	int rc = 0;
+	int retry_cnt = 0;
+	static DEFINE_MUTEX(sync_active);
+
+	if (cfg->state != STATE_NORMAL) {
+		pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
+		return 0;
+	}
+
+	mutex_lock(&sync_active);
+retry:
+	cmd = cxlflash_cmd_checkout(afu);
+	if (unlikely(!cmd)) {
+		retry_cnt++;
+		udelay(1000 * retry_cnt);
+		if (retry_cnt < MC_RETRY_CNT)
+			goto retry;
+		pr_err("%s: could not get a free command\n", __func__);
+		rc = -1;
+		goto out;
+	}
+
+	pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
+
+	memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
+
+	cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
+	cmd->rcb.port_sel = 0x0;	/* NA */
+	cmd->rcb.lun_id = 0x0;	/* NA */
+	cmd->rcb.data_len = 0x0;
+	cmd->rcb.data_ea = 0x0;
+	cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
+
+	cmd->rcb.cdb[0] = 0xC0;	/* AFU Sync */
+	cmd->rcb.cdb[1] = mode;
+
+	/* The cdb is aligned, no unaligned accessors required */
+	*((u16 *)&cmd->rcb.cdb[2]) = swab16(ctx_hndl_u);
+	*((u32 *)&cmd->rcb.cdb[4]) = swab32(res_hndl_u);
+
+	rc = cxlflash_send_cmd(afu, cmd);
+	if (unlikely(rc))
+		goto out;
+
+	cxlflash_wait_resp(afu, cmd);
+
+	/* set on timeout */
+	if (unlikely((cmd->sa.ioasc != 0) ||
+		     (cmd->sa.host_use_b[0] & B_ERROR)))
+		rc = -1;
+out:
+	mutex_unlock(&sync_active);
+	if (cmd)
+		cxlflash_cmd_checkin(cmd);
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * cxlflash_afu_reset() - resets the AFU
+ * @cxlflash:	Internal structure associated with the host.
+ *
+ * Return:
+ *	0 on success
+ *	A failure value from internal services.
+ */
+int cxlflash_afu_reset(struct cxlflash_cfg *cfg)
+{
+	int rc = 0;
+	/* Stop the context before the reset. Since the context is
+	 * no longer available restart it after the reset is complete
+	 */
+
+	term_afu(cfg);
+
+	rc = init_afu(cfg);
+
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * cxlflash_worker_thread() - work thread handler for the AFU
+ * @work:	Work structure contained within cxlflash associated with host.
+ *
+ * Handles the following events:
+ * - Link reset which cannot be performed on interrupt context due to
+ * blocking up to a few seconds
+ * - Read AFU command room
+ */
+static void cxlflash_worker_thread(struct work_struct *work)
+{
+	struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
+						work_q);
+	struct afu *afu = cfg->afu;
+	int port;
+	ulong lock_flags;
+
+	/* Avoid MMIO if the device has failed */
+
+	if (cfg->state != STATE_NORMAL)
+		return;
+
+	spin_lock_irqsave(cfg->host->host_lock, lock_flags);
+
+	if (cfg->lr_state == LINK_RESET_REQUIRED) {
+		port = cfg->lr_port;
+		if (port < 0)
+			pr_err("%s: invalid port index %d\n", __func__, port);
+		else {
+			spin_unlock_irqrestore(cfg->host->host_lock,
+					       lock_flags);
+
+			/* The reset can block... */
+			afu_link_reset(afu, port,
+				       &afu->afu_map->
+				       global.fc_regs[port][0]);
+			spin_lock_irqsave(cfg->host->host_lock, lock_flags);
+		}
+
+		cfg->lr_state = LINK_RESET_COMPLETE;
+	}
+
+	if (afu->read_room) {
+		atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
+		afu->read_room = false;
+	}
+
+	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
+}
+
+/**
+ * cxlflash_probe() - PCI entry point to add host
+ * @pdev:	PCI device associated with the host.
+ * @dev_id:	PCI device id associated with device.
+ *
+ * Return: 0 on success / non-zero on failure
+ */
+static int cxlflash_probe(struct pci_dev *pdev,
+			  const struct pci_device_id *dev_id)
+{
+	struct Scsi_Host *host;
+	struct cxlflash_cfg *cfg = NULL;
+	struct device *phys_dev;
+	struct dev_dependent_vals *ddv;
+	int rc = 0;
+
+	dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
+		__func__, pdev->irq);
+
+	ddv = (struct dev_dependent_vals *)dev_id->driver_data;
+	driver_template.max_sectors = ddv->max_sectors;
+
+	host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
+	if (!host) {
+		dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
+			__func__);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
+	host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
+	host->max_channel = NUM_FC_PORTS - 1;
+	host->unique_id = host->host_no;
+	host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
+
+	cfg = (struct cxlflash_cfg *)host->hostdata;
+	cfg->host = host;
+	rc = alloc_mem(cfg);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
+			__func__);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	cfg->init_state = INIT_STATE_NONE;
+	cfg->dev = pdev;
+
+	/*
+	 * The promoted LUNs move to the top of the LUN table. The rest stay
+	 * on the bottom half. The bottom half grows from the end
+	 * (index = 255), whereas the top half grows from the beginning
+	 * (index = 0).
+	 */
+	cfg->promote_lun_index  = 0;
+	cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
+	cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
+
+	cfg->dev_id = (struct pci_device_id *)dev_id;
+	cfg->mcctx = NULL;
+
+	init_waitqueue_head(&cfg->tmf_waitq);
+	init_waitqueue_head(&cfg->limbo_waitq);
+
+	INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
+	cfg->lr_state = LINK_RESET_INVALID;
+	cfg->lr_port = -1;
+	mutex_init(&cfg->ctx_tbl_list_mutex);
+	mutex_init(&cfg->ctx_recovery_mutex);
+	INIT_LIST_HEAD(&cfg->ctx_err_recovery);
+	INIT_LIST_HEAD(&cfg->lluns);
+
+	pci_set_drvdata(pdev, cfg);
+
+	/* Use the special service provided to look up the physical
+	 * PCI device, since we are called on the probe of the virtual
+	 * PCI host bus (vphb)
+	 */
+	phys_dev = cxl_get_phys_dev(pdev);
+	if (!dev_is_pci(phys_dev)) {
+		pr_err("%s: not a pci dev\n", __func__);
+		rc = -ENODEV;
+		goto out_remove;
+	}
+	cfg->parent_dev = to_pci_dev(phys_dev);
+
+	cfg->cxl_afu = cxl_pci_to_afu(pdev);
+
+	rc = init_pci(cfg);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: call to init_pci "
+			"failed rc=%d!\n", __func__, rc);
+		goto out_remove;
+	}
+	cfg->init_state = INIT_STATE_PCI;
+
+	rc = init_afu(cfg);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: call to init_afu "
+			"failed rc=%d!\n", __func__, rc);
+		goto out_remove;
+	}
+	cfg->init_state = INIT_STATE_AFU;
+
+
+	rc = init_scsi(cfg);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: call to init_scsi "
+			"failed rc=%d!\n", __func__, rc);
+		goto out_remove;
+	}
+	cfg->init_state = INIT_STATE_SCSI;
+
+out:
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+
+out_remove:
+	cxlflash_remove(pdev);
+	goto out;
+}
+
+/**
+ * cxlflash_pci_error_detected() - called when a PCI error is detected
+ * @pdev:	PCI device struct.
+ * @state:	PCI channel state.
+ *
+ * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
+						    pci_channel_state_t state)
+{
+	int rc = 0;
+	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
+	struct device *dev = &cfg->dev->dev;
+
+	dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
+
+	switch (state) {
+	case pci_channel_io_frozen:
+		cfg->state = STATE_LIMBO;
+
+		/* Turn off legacy I/O */
+		scsi_block_requests(cfg->host);
+		rc = cxlflash_mark_contexts_error(cfg);
+		if (unlikely(rc))
+			dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
+				__func__, rc);
+		term_mc(cfg, UNDO_START);
+		stop_afu(cfg);
+
+		return PCI_ERS_RESULT_NEED_RESET;
+	case pci_channel_io_perm_failure:
+		cfg->state = STATE_FAILTERM;
+		wake_up_all(&cfg->limbo_waitq);
+		scsi_unblock_requests(cfg->host);
+		return PCI_ERS_RESULT_DISCONNECT;
+	default:
+		break;
+	}
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * cxlflash_pci_slot_reset() - called when PCI slot has been reset
+ * @pdev:	PCI device struct.
+ *
+ * This routine is called by the pci error recovery code after the PCI
+ * slot has been reset, just before we should resume normal operations.
+ *
+ * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
+{
+	int rc = 0;
+	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
+	struct device *dev = &cfg->dev->dev;
+
+	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
+
+	rc = init_afu(cfg);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * cxlflash_pci_resume() - called when normal operation can resume
+ * @pdev:	PCI device struct
+ */
+static void cxlflash_pci_resume(struct pci_dev *pdev)
+{
+	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
+	struct device *dev = &cfg->dev->dev;
+
+	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
+
+	cfg->state = STATE_NORMAL;
+	wake_up_all(&cfg->limbo_waitq);
+	scsi_unblock_requests(cfg->host);
+}
+
+static const struct pci_error_handlers cxlflash_err_handler = {
+	.error_detected = cxlflash_pci_error_detected,
+	.slot_reset = cxlflash_pci_slot_reset,
+	.resume = cxlflash_pci_resume,
+};
+
+/*
+ * PCI device structure
+ */
+static struct pci_driver cxlflash_driver = {
+	.name = CXLFLASH_NAME,
+	.id_table = cxlflash_pci_table,
+	.probe = cxlflash_probe,
+	.remove = cxlflash_remove,
+	.err_handler = &cxlflash_err_handler,
+};
+
+/**
+ * init_cxlflash() - module entry point
+ *
+ * Return: 0 on success / non-zero on failure
+ */
+static int __init init_cxlflash(void)
+{
+	pr_info("%s: IBM Power CXL Flash Adapter: %s\n",
+		__func__, CXLFLASH_DRIVER_DATE);
+
+	cxlflash_list_init();
+
+	return pci_register_driver(&cxlflash_driver);
+}
+
+/**
+ * exit_cxlflash() - module exit point
+ */
+static void __exit exit_cxlflash(void)
+{
+	cxlflash_term_global_luns();
+	cxlflash_free_errpage();
+
+	pci_unregister_driver(&cxlflash_driver);
+}
+
+module_init(init_cxlflash);
+module_exit(exit_cxlflash);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
new file mode 100644
index 0000000..cf0e809
--- /dev/null
+++ b/drivers/scsi/cxlflash/main.h
@@ -0,0 +1,108 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _CXLFLASH_MAIN_H
+#define _CXLFLASH_MAIN_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+
+#define CXLFLASH_NAME		"cxlflash"
+#define CXLFLASH_ADAPTER_NAME	"IBM POWER CXL Flash Adapter"
+#define CXLFLASH_DRIVER_DATE	"(August 13, 2015)"
+
+#define PCI_DEVICE_ID_IBM_CORSA	0x04F0
+#define CXLFLASH_SUBS_DEV_ID	0x04F0
+
+/* Since there is only one target, make it 0 */
+#define CXLFLASH_TARGET		0
+#define CXLFLASH_MAX_CDB_LEN	16
+
+/* Really only one target per bus since the Texan is directly attached */
+#define CXLFLASH_MAX_NUM_TARGETS_PER_BUS	1
+#define CXLFLASH_MAX_NUM_LUNS_PER_TARGET	65536
+
+#define CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT	(120 * HZ)
+
+#define NUM_FC_PORTS	CXLFLASH_NUM_FC_PORTS	/* ports per AFU */
+
+/* FC defines */
+#define FC_MTIP_CMDCONFIG 0x010
+#define FC_MTIP_STATUS 0x018
+
+#define FC_PNAME 0x300
+#define FC_CONFIG 0x320
+#define FC_CONFIG2 0x328
+#define FC_STATUS 0x330
+#define FC_ERROR 0x380
+#define FC_ERRCAP 0x388
+#define FC_ERRMSK 0x390
+#define FC_CNT_CRCERR 0x538
+#define FC_CRC_THRESH 0x580
+
+#define FC_MTIP_CMDCONFIG_ONLINE	0x20ULL
+#define FC_MTIP_CMDCONFIG_OFFLINE	0x40ULL
+
+#define FC_MTIP_STATUS_MASK		0x30ULL
+#define FC_MTIP_STATUS_ONLINE		0x20ULL
+#define FC_MTIP_STATUS_OFFLINE		0x10ULL
+
+/* TIMEOUT and RETRY definitions */
+
+/* AFU command timeout values */
+#define MC_AFU_SYNC_TIMEOUT	5	/* 5 secs */
+
+/* AFU command room retry limit */
+#define MC_ROOM_RETRY_CNT	10
+
+/* FC CRC clear periodic timer */
+#define MC_CRC_THRESH 100	/* threshold in 5 mins */
+
+#define FC_PORT_STATUS_RETRY_CNT 100	/* 100 100ms retries = 10 seconds */
+#define FC_PORT_STATUS_RETRY_INTERVAL_US 100000	/* microseconds */
+
+/* VPD defines */
+#define CXLFLASH_VPD_LEN	256
+#define WWPN_LEN	16
+#define WWPN_BUF_LEN	(WWPN_LEN + 1)
+
+enum undo_level {
+	RELEASE_CONTEXT = 0,
+	FREE_IRQ,
+	UNMAP_ONE,
+	UNMAP_TWO,
+	UNMAP_THREE,
+	UNDO_START
+};
+
+struct dev_dependent_vals {
+	u64 max_sectors;
+};
+
+struct asyc_intr_info {
+	u64 status;
+	char *desc;
+	u8 port;
+	u8 action;
+#define CLR_FC_ERROR	0x01
+#define LINK_RESET	0x02
+};
+
+#ifndef CONFIG_CXL_EEH
+#define cxl_perst_reloads_same_image(_a, _b) do { } while (0)
+#endif
+
+#endif /* _CXLFLASH_MAIN_H */
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
new file mode 100644
index 0000000..63bf394
--- /dev/null
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -0,0 +1,472 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _SISLITE_H
+#define _SISLITE_H
+
+#include <linux/types.h>
+
+typedef u16 ctx_hndl_t;
+typedef u32 res_hndl_t;
+
+#define SIZE_4K		4096
+#define SIZE_64K	65536
+
+/*
+ * IOARCB: 64 bytes, min 16 byte alignment required, host native endianness
+ * except for SCSI CDB which remains big endian per SCSI standards.
+ */
+struct sisl_ioarcb {
+	u16 ctx_id;		/* ctx_hndl_t */
+	u16 req_flags;
+#define SISL_REQ_FLAGS_RES_HNDL       0x8000U	/* bit 0 (MSB) */
+#define SISL_REQ_FLAGS_PORT_LUN_ID    0x0000U
+
+#define SISL_REQ_FLAGS_SUP_UNDERRUN   0x4000U	/* bit 1 */
+
+#define SISL_REQ_FLAGS_TIMEOUT_SECS   0x0000U	/* bits 8,9 */
+#define SISL_REQ_FLAGS_TIMEOUT_MSECS  0x0040U
+#define SISL_REQ_FLAGS_TIMEOUT_USECS  0x0080U
+#define SISL_REQ_FLAGS_TIMEOUT_CYCLES 0x00C0U
+
+#define SISL_REQ_FLAGS_TMF_CMD        0x0004u	/* bit 13 */
+
+#define SISL_REQ_FLAGS_AFU_CMD        0x0002U	/* bit 14 */
+
+#define SISL_REQ_FLAGS_HOST_WRITE     0x0001U	/* bit 15 (LSB) */
+#define SISL_REQ_FLAGS_HOST_READ      0x0000U
+
+	union {
+		u32 res_hndl;	/* res_hndl_t */
+		u32 port_sel;	/* this is a selection mask:
+				 * 0x1 -> port#0 can be selected,
+				 * 0x2 -> port#1 can be selected.
+				 * Can be bitwise ORed.
+				 */
+	};
+	u64 lun_id;
+	u32 data_len;		/* 4K for read/write */
+	u32 ioadl_len;
+	union {
+		u64 data_ea;	/* min 16 byte aligned */
+		u64 ioadl_ea;
+	};
+	u8 msi;			/* LISN to send on RRQ write */
+#define SISL_MSI_CXL_PFAULT        0	/* reserved for CXL page faults */
+#define SISL_MSI_SYNC_ERROR        1	/* recommended for AFU sync error */
+#define SISL_MSI_RRQ_UPDATED       2	/* recommended for IO completion */
+#define SISL_MSI_ASYNC_ERROR       3	/* master only - for AFU async error */
+
+	u8 rrq;			/* 0 for a single RRQ */
+	u16 timeout;		/* in units specified by req_flags */
+	u32 rsvd1;
+	u8 cdb[16];		/* must be in big endian */
+	struct scsi_cmnd *scp;
+} __packed;
+
+struct sisl_rc {
+	u8 flags;
+#define SISL_RC_FLAGS_SENSE_VALID         0x80U
+#define SISL_RC_FLAGS_FCP_RSP_CODE_VALID  0x40U
+#define SISL_RC_FLAGS_OVERRUN             0x20U
+#define SISL_RC_FLAGS_UNDERRUN            0x10U
+
+	u8 afu_rc;
+#define SISL_AFU_RC_RHT_INVALID           0x01U	/* user error */
+#define SISL_AFU_RC_RHT_UNALIGNED         0x02U	/* should never happen */
+#define SISL_AFU_RC_RHT_OUT_OF_BOUNDS     0x03u	/* user error */
+#define SISL_AFU_RC_RHT_DMA_ERR           0x04u	/* see afu_extra
+						   may retry if afu_retry is off
+						   possible on master exit
+						 */
+#define SISL_AFU_RC_RHT_RW_PERM           0x05u	/* no RW perms, user error */
+#define SISL_AFU_RC_LXT_UNALIGNED         0x12U	/* should never happen */
+#define SISL_AFU_RC_LXT_OUT_OF_BOUNDS     0x13u	/* user error */
+#define SISL_AFU_RC_LXT_DMA_ERR           0x14u	/* see afu_extra
+						   may retry if afu_retry is off
+						   possible on master exit
+						 */
+#define SISL_AFU_RC_LXT_RW_PERM           0x15u	/* no RW perms, user error */
+
+#define SISL_AFU_RC_NOT_XLATE_HOST        0x1au	/* possible if master exited */
+
+	/* NO_CHANNELS means the FC ports selected by dest_port in
+	 * IOARCB or in the LXT entry are down when the AFU tried to select
+	 * a FC port. If the port went down on an active IO, it will set
+	 * fc_rc to =0x54(NOLOGI) or 0x57(LINKDOWN) instead.
+	 */
+#define SISL_AFU_RC_NO_CHANNELS           0x20U	/* see afu_extra, may retry */
+#define SISL_AFU_RC_CAP_VIOLATION         0x21U	/* either user error or
+						   afu reset/master restart
+						 */
+#define SISL_AFU_RC_OUT_OF_DATA_BUFS      0x30U	/* always retry */
+#define SISL_AFU_RC_DATA_DMA_ERR          0x31U	/* see afu_extra
+						   may retry if afu_retry is off
+						 */
+
+	u8 scsi_rc;		/* SCSI status byte, retry as appropriate */
+#define SISL_SCSI_RC_CHECK                0x02U
+#define SISL_SCSI_RC_BUSY                 0x08u
+
+	u8 fc_rc;		/* retry */
+	/*
+	 * We should only see fc_rc=0x57 (LINKDOWN) or 0x54(NOLOGI) for
+	 * commands that are in flight when a link goes down or is logged out.
+	 * If the link is down or logged out before AFU selects the port, either
+	 * it will choose the other port or we will get afu_rc=0x20 (no_channel)
+	 * if there is no valid port to use.
+	 *
+	 * ABORTPEND/ABORTOK/ABORTFAIL/TGTABORT can be retried, typically these
+	 * would happen if a frame is dropped and something times out.
+	 * NOLOGI or LINKDOWN can be retried if the other port is up.
+	 * RESIDERR can be retried as well.
+	 *
+	 * ABORTFAIL might indicate that lots of frames are getting CRC errors.
+	 * So it maybe retried once and reset the link if it happens again.
+	 * The link can also be reset on the CRC error threshold interrupt.
+	 */
+#define SISL_FC_RC_ABORTPEND	0x52	/* exchange timeout or abort request */
+#define SISL_FC_RC_WRABORTPEND	0x53	/* due to write XFER_RDY invalid */
+#define SISL_FC_RC_NOLOGI	0x54	/* port not logged in, in-flight cmds */
+#define SISL_FC_RC_NOEXP	0x55	/* FC protocol error or HW bug */
+#define SISL_FC_RC_INUSE	0x56	/* tag already in use, HW bug */
+#define SISL_FC_RC_LINKDOWN	0x57	/* link down, in-flight cmds */
+#define SISL_FC_RC_ABORTOK	0x58	/* pending abort completed w/success */
+#define SISL_FC_RC_ABORTFAIL	0x59	/* pending abort completed w/fail */
+#define SISL_FC_RC_RESID	0x5A	/* ioasa underrun/overrun flags set */
+#define SISL_FC_RC_RESIDERR	0x5B	/* actual data len does not match SCSI
+					   reported len, possbly due to dropped
+					   frames */
+#define SISL_FC_RC_TGTABORT	0x5C	/* command aborted by target */
+};
+
+#define SISL_SENSE_DATA_LEN     20	/* Sense data length         */
+
+/*
+ * IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required,
+ * host native endianness
+ */
+struct sisl_ioasa {
+	union {
+		struct sisl_rc rc;
+		u32 ioasc;
+#define SISL_IOASC_GOOD_COMPLETION        0x00000000U
+	};
+	u32 resid;
+	u8 port;
+	u8 afu_extra;
+	/* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR):
+	 * afu_exta contains PSL response code. Useful codes are:
+	 */
+#define SISL_AFU_DMA_ERR_PAGE_IN	0x0A	/* AFU_retry_on_pagein Action
+						 *  Enabled            N/A
+						 *  Disabled           retry
+						 */
+#define SISL_AFU_DMA_ERR_INVALID_EA	0x0B	/* this is a hard error
+						 * afu_rc	Implies
+						 * 0x04, 0x14	master exit.
+						 * 0x31         user error.
+						 */
+	/* when afu rc=0x20 (no channels):
+	 * afu_extra bits [4:5]: available portmask,  [6:7]: requested portmask.
+	 */
+#define SISL_AFU_NO_CLANNELS_AMASK(afu_extra) (((afu_extra) & 0x0C) >> 2)
+#define SISL_AFU_NO_CLANNELS_RMASK(afu_extra) ((afu_extra) & 0x03)
+
+	u8 scsi_extra;
+	u8 fc_extra;
+	u8 sense_data[SISL_SENSE_DATA_LEN];
+
+	/* These fields are defined by the SISlite architecture for the
+	 * host to use as they see fit for their implementation.
+	 */
+	union {
+		u64 host_use[4];
+		u8 host_use_b[32];
+	};
+} __packed;
+
+#define SISL_RESP_HANDLE_T_BIT        0x1ULL	/* Toggle bit */
+
+/* MMIO space is required to support only 64-bit access */
+
+/*
+ * This AFU has two mechanisms to deal with endian-ness.
+ * One is a global configuration (in the afu_config) register
+ * below that specifies the endian-ness of the host.
+ * The other is a per context (i.e. application) specification
+ * controlled by the endian_ctrl field here. Since the master
+ * context is one such application the master context's
+ * endian-ness is set to be the same as the host.
+ *
+ * As per the SISlite spec, the MMIO registers are always
+ * big endian.
+ */
+#define SISL_ENDIAN_CTRL_BE           0x8000000000000080ULL
+#define SISL_ENDIAN_CTRL_LE           0x0000000000000000ULL
+
+#ifdef __BIG_ENDIAN
+#define SISL_ENDIAN_CTRL              SISL_ENDIAN_CTRL_BE
+#else
+#define SISL_ENDIAN_CTRL              SISL_ENDIAN_CTRL_LE
+#endif
+
+/* per context host transport MMIO  */
+struct sisl_host_map {
+	__be64 endian_ctrl;     /* Per context Endian Control. The AFU will
+			      * operate on whatever the context is of the
+			      * host application.
+			      */
+
+	__be64 intr_status;	/* this sends LISN# programmed in ctx_ctrl.
+				 * Only recovery in a PERM_ERR is a context
+				 * exit since there is no way to tell which
+				 * command caused the error.
+				 */
+#define SISL_ISTATUS_PERM_ERR_CMDROOM    0x0010ULL	/* b59, user error */
+#define SISL_ISTATUS_PERM_ERR_RCB_READ   0x0008ULL	/* b60, user error */
+#define SISL_ISTATUS_PERM_ERR_SA_WRITE   0x0004ULL	/* b61, user error */
+#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE  0x0002ULL	/* b62, user error */
+	/* Page in wait accessing RCB/IOASA/RRQ is reported in b63.
+	 * Same error in data/LXT/RHT access is reported via IOASA.
+	 */
+#define SISL_ISTATUS_TEMP_ERR_PAGEIN     0x0001ULL	/* b63, can be generated
+							 * only when AFU auto
+							 * retry is disabled.
+							 * If user can determine
+							 * the command that
+							 * caused the error, it
+							 * can be retried.
+							 */
+#define SISL_ISTATUS_UNMASK  (0x001FULL)	/* 1 means unmasked */
+#define SISL_ISTATUS_MASK    ~(SISL_ISTATUS_UNMASK)	/* 1 means masked */
+
+	__be64 intr_clear;
+	__be64 intr_mask;
+	__be64 ioarrin;		/* only write what cmd_room permits */
+	__be64 rrq_start;	/* start & end are both inclusive */
+	__be64 rrq_end;		/* write sequence: start followed by end */
+	__be64 cmd_room;
+	__be64 ctx_ctrl;	/* least signiifcant byte or b56:63 is LISN# */
+	__be64 mbox_w;		/* restricted use */
+};
+
+/* per context provisioning & control MMIO */
+struct sisl_ctrl_map {
+	__be64 rht_start;
+	__be64 rht_cnt_id;
+	/* both cnt & ctx_id args must be ULL */
+#define SISL_RHT_CNT_ID(cnt, ctx_id)  (((cnt) << 48) | ((ctx_id) << 32))
+
+	__be64 ctx_cap;	/* afu_rc below is when the capability is violated */
+#define SISL_CTX_CAP_PROXY_ISSUE       0x8000000000000000ULL /* afu_rc 0x21 */
+#define SISL_CTX_CAP_REAL_MODE         0x4000000000000000ULL /* afu_rc 0x21 */
+#define SISL_CTX_CAP_HOST_XLATE        0x2000000000000000ULL /* afu_rc 0x1a */
+#define SISL_CTX_CAP_PROXY_TARGET      0x1000000000000000ULL /* afu_rc 0x21 */
+#define SISL_CTX_CAP_AFU_CMD           0x0000000000000008ULL /* afu_rc 0x21 */
+#define SISL_CTX_CAP_GSCSI_CMD         0x0000000000000004ULL /* afu_rc 0x21 */
+#define SISL_CTX_CAP_WRITE_CMD         0x0000000000000002ULL /* afu_rc 0x21 */
+#define SISL_CTX_CAP_READ_CMD          0x0000000000000001ULL /* afu_rc 0x21 */
+	__be64 mbox_r;
+};
+
+/* single copy global regs */
+struct sisl_global_regs {
+	__be64 aintr_status;
+	/* In cxlflash, each FC port/link gets a byte of status */
+#define SISL_ASTATUS_FC0_OTHER	 0x8000ULL /* b48, other err,
+					      FC_ERRCAP[31:20] */
+#define SISL_ASTATUS_FC0_LOGO    0x4000ULL /* b49, target sent FLOGI/PLOGI/LOGO
+						   while logged in */
+#define SISL_ASTATUS_FC0_CRC_T   0x2000ULL /* b50, CRC threshold exceeded */
+#define SISL_ASTATUS_FC0_LOGI_R  0x1000ULL /* b51, login state mechine timed out
+						   and retrying */
+#define SISL_ASTATUS_FC0_LOGI_F  0x0800ULL /* b52, login failed,
+					      FC_ERROR[19:0] */
+#define SISL_ASTATUS_FC0_LOGI_S  0x0400ULL /* b53, login succeeded */
+#define SISL_ASTATUS_FC0_LINK_DN 0x0200ULL /* b54, link online to offline */
+#define SISL_ASTATUS_FC0_LINK_UP 0x0100ULL /* b55, link offline to online */
+
+#define SISL_ASTATUS_FC1_OTHER   0x0080ULL /* b56 */
+#define SISL_ASTATUS_FC1_LOGO    0x0040ULL /* b57 */
+#define SISL_ASTATUS_FC1_CRC_T   0x0020ULL /* b58 */
+#define SISL_ASTATUS_FC1_LOGI_R  0x0010ULL /* b59 */
+#define SISL_ASTATUS_FC1_LOGI_F  0x0008ULL /* b60 */
+#define SISL_ASTATUS_FC1_LOGI_S  0x0004ULL /* b61 */
+#define SISL_ASTATUS_FC1_LINK_DN 0x0002ULL /* b62 */
+#define SISL_ASTATUS_FC1_LINK_UP 0x0001ULL /* b63 */
+
+#define SISL_FC_INTERNAL_UNMASK	0x0000000300000000ULL	/* 1 means unmasked */
+#define SISL_FC_INTERNAL_MASK	~(SISL_FC_INTERNAL_UNMASK)
+#define SISL_FC_INTERNAL_SHIFT	32
+
+#define SISL_ASTATUS_UNMASK	0xFFFFULL		/* 1 means unmasked */
+#define SISL_ASTATUS_MASK	~(SISL_ASTATUS_UNMASK)	/* 1 means masked */
+
+	__be64 aintr_clear;
+	__be64 aintr_mask;
+	__be64 afu_ctrl;
+	__be64 afu_hb;
+	__be64 afu_scratch_pad;
+	__be64 afu_port_sel;
+#define SISL_AFUCONF_AR_IOARCB	0x4000ULL
+#define SISL_AFUCONF_AR_LXT	0x2000ULL
+#define SISL_AFUCONF_AR_RHT	0x1000ULL
+#define SISL_AFUCONF_AR_DATA	0x0800ULL
+#define SISL_AFUCONF_AR_RSRC	0x0400ULL
+#define SISL_AFUCONF_AR_IOASA	0x0200ULL
+#define SISL_AFUCONF_AR_RRQ	0x0100ULL
+/* Aggregate all Auto Retry Bits */
+#define SISL_AFUCONF_AR_ALL	(SISL_AFUCONF_AR_IOARCB|SISL_AFUCONF_AR_LXT| \
+				 SISL_AFUCONF_AR_RHT|SISL_AFUCONF_AR_DATA|   \
+				 SISL_AFUCONF_AR_RSRC|SISL_AFUCONF_AR_IOASA| \
+				 SISL_AFUCONF_AR_RRQ)
+#ifdef __BIG_ENDIAN
+#define SISL_AFUCONF_ENDIAN            0x0000ULL
+#else
+#define SISL_AFUCONF_ENDIAN            0x0020ULL
+#endif
+#define SISL_AFUCONF_MBOX_CLR_READ     0x0010ULL
+	__be64 afu_config;
+	__be64 rsvd[0xf8];
+	__be64 afu_version;
+	__be64 interface_version;
+};
+
+#define CXLFLASH_NUM_FC_PORTS   2
+#define CXLFLASH_MAX_CONTEXT  512	/* how many contexts per afu */
+#define CXLFLASH_NUM_VLUNS    512
+
+struct sisl_global_map {
+	union {
+		struct sisl_global_regs regs;
+		char page0[SIZE_4K];	/* page 0 */
+	};
+
+	char page1[SIZE_4K];	/* page 1 */
+
+	/* pages 2 & 3 */
+	__be64 fc_regs[CXLFLASH_NUM_FC_PORTS][CXLFLASH_NUM_VLUNS];
+
+	/* pages 4 & 5 (lun tbl) */
+	__be64 fc_port[CXLFLASH_NUM_FC_PORTS][CXLFLASH_NUM_VLUNS];
+
+};
+
+/*
+ * CXL Flash Memory Map
+ *
+ *	+-------------------------------+
+ *	|    512 * 64 KB User MMIO      |
+ *	|        (per context)          |
+ *	|       User Accessible         |
+ *	+-------------------------------+
+ *	|    512 * 128 B per context    |
+ *	|    Provisioning and Control   |
+ *	|   Trusted Process accessible  |
+ *	+-------------------------------+
+ *	|         64 KB Global          |
+ *	|   Trusted Process accessible  |
+ *	+-------------------------------+
+*/
+struct cxlflash_afu_map {
+	union {
+		struct sisl_host_map host;
+		char harea[SIZE_64K];	/* 64KB each */
+	} hosts[CXLFLASH_MAX_CONTEXT];
+
+	union {
+		struct sisl_ctrl_map ctrl;
+		char carea[cache_line_size()];	/* 128B each */
+	} ctrls[CXLFLASH_MAX_CONTEXT];
+
+	union {
+		struct sisl_global_map global;
+		char garea[SIZE_64K];	/* 64KB single block */
+	};
+};
+
+/*
+ * LXT - LBA Translation Table
+ * LXT control blocks
+ */
+struct sisl_lxt_entry {
+	u64 rlba_base;	/* bits 0:47 is base
+			 * b48:55 is lun index
+			 * b58:59 is write & read perms
+			 * (if no perm, afu_rc=0x15)
+			 * b60:63 is port_sel mask
+			 */
+};
+
+/*
+ * RHT - Resource Handle Table
+ * Per the SISlite spec, RHT entries are to be 16-byte aligned
+ */
+struct sisl_rht_entry {
+	struct sisl_lxt_entry *lxt_start;
+	u32 lxt_cnt;
+	u16 rsvd;
+	u8 fp;			/* format & perm nibbles.
+				 * (if no perm, afu_rc=0x05)
+				 */
+	u8 nmask;
+} __packed __aligned(16);
+
+struct sisl_rht_entry_f1 {
+	u64 lun_id;
+	union {
+		struct {
+			u8 valid;
+			u8 rsvd[5];
+			u8 fp;
+			u8 port_sel;
+		};
+
+		u64 dw;
+	};
+} __packed __aligned(16);
+
+/* make the fp byte */
+#define SISL_RHT_FP(fmt, perm) (((fmt) << 4) | (perm))
+
+/* make the fp byte for a clone from a source fp and clone flags
+ * flags must be only 2 LSB bits.
+ */
+#define SISL_RHT_FP_CLONE(src_fp, cln_flags) ((src_fp) & (0xFC | (cln_flags)))
+
+#define RHT_PERM_READ  0x01U
+#define RHT_PERM_WRITE 0x02U
+#define RHT_PERM_RW    (RHT_PERM_READ | RHT_PERM_WRITE)
+
+/* extract the perm bits from a fp */
+#define SISL_RHT_PERM(fp) ((fp) & RHT_PERM_RW)
+
+#define PORT0  0x01U
+#define PORT1  0x02U
+#define BOTH_PORTS    (PORT0 | PORT1)
+
+/* AFU Sync Mode byte */
+#define AFU_LW_SYNC 0x0U
+#define AFU_HW_SYNC 0x1U
+#define AFU_GSYNC   0x2U
+
+/* Special Task Management Function CDB */
+#define TMF_LUN_RESET  0x1U
+#define TMF_CLEAR_ACA  0x2U
+
+
+#define SISLITE_MAX_WS_BLOCKS 512
+
+#endif /* _SISLITE_H */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
new file mode 100644
index 0000000..f1b62ce
--- /dev/null
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -0,0 +1,2084 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/file.h>
+#include <linux/syscalls.h>
+#include <misc/cxl.h>
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <uapi/scsi/cxlflash_ioctl.h>
+
+#include "sislite.h"
+#include "common.h"
+#include "vlun.h"
+#include "superpipe.h"
+
+struct cxlflash_global global;
+
+/**
+ * marshal_rele_to_resize() - translate release to resize structure
+ * @rele:	Source structure from which to translate/copy.
+ * @resize:	Destination structure for the translate/copy.
+ */
+static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
+				   struct dk_cxlflash_resize *resize)
+{
+	resize->hdr = release->hdr;
+	resize->context_id = release->context_id;
+	resize->rsrc_handle = release->rsrc_handle;
+}
+
+/**
+ * marshal_det_to_rele() - translate detach to release structure
+ * @detach:	Destination structure for the translate/copy.
+ * @rele:	Source structure from which to translate/copy.
+ */
+static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
+				struct dk_cxlflash_release *release)
+{
+	release->hdr = detach->hdr;
+	release->context_id = detach->context_id;
+}
+
+/**
+ * cxlflash_free_errpage() - frees resources associated with global error page
+ */
+void cxlflash_free_errpage(void)
+{
+
+	mutex_lock(&global.mutex);
+	if (global.err_page) {
+		__free_page(global.err_page);
+		global.err_page = NULL;
+	}
+	mutex_unlock(&global.mutex);
+}
+
+/**
+ * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
+ * @cfg:	Internal structure associated with the host.
+ *
+ * When the host needs to go down, all users must be quiesced and their
+ * memory freed. This is accomplished by putting the contexts in error
+ * state which will notify the user and let them 'drive' the tear-down.
+ * Meanwhile, this routine camps until all user contexts have been removed.
+ */
+void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
+{
+	struct device *dev = &cfg->dev->dev;
+	int i, found;
+
+	cxlflash_mark_contexts_error(cfg);
+
+	while (true) {
+		found = false;
+
+		for (i = 0; i < MAX_CONTEXT; i++)
+			if (cfg->ctx_tbl[i]) {
+				found = true;
+				break;
+			}
+
+		if (!found && list_empty(&cfg->ctx_err_recovery))
+			return;
+
+		dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
+			__func__);
+		wake_up_all(&cfg->limbo_waitq);
+		ssleep(1);
+	}
+}
+
+/**
+ * find_error_context() - locates a context by cookie on the error recovery list
+ * @cfg:	Internal structure associated with the host.
+ * @rctxid:	Desired context by id.
+ * @file:	Desired context by file.
+ *
+ * Return: Found context on success, NULL on failure
+ */
+static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
+					   struct file *file)
+{
+	struct ctx_info *ctxi;
+
+	list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
+		if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
+			return ctxi;
+
+	return NULL;
+}
+
+/**
+ * get_context() - obtains a validated and locked context reference
+ * @cfg:	Internal structure associated with the host.
+ * @rctxid:	Desired context (raw, un-decoded format).
+ * @arg:	LUN information or file associated with request.
+ * @ctx_ctrl:	Control information to 'steer' desired lookup.
+ *
+ * NOTE: despite the name pid, in linux, current->pid actually refers
+ * to the lightweight process id (tid) and can change if the process is
+ * multi threaded. The tgid remains constant for the process and only changes
+ * when the process of fork. For all intents and purposes, think of tgid
+ * as a pid in the traditional sense.
+ *
+ * Return: Validated context on success, NULL on failure
+ */
+struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
+			     void *arg, enum ctx_ctrl ctx_ctrl)
+{
+	struct device *dev = &cfg->dev->dev;
+	struct ctx_info *ctxi = NULL;
+	struct lun_access *lun_access = NULL;
+	struct file *file = NULL;
+	struct llun_info *lli = arg;
+	u64 ctxid = DECODE_CTXID(rctxid);
+	int rc;
+	pid_t pid = current->tgid, ctxpid = 0;
+
+	if (ctx_ctrl & CTX_CTRL_FILE) {
+		lli = NULL;
+		file = (struct file *)arg;
+	}
+
+	if (ctx_ctrl & CTX_CTRL_CLONE)
+		pid = current->parent->tgid;
+
+	if (likely(ctxid < MAX_CONTEXT)) {
+		while (true) {
+			rc = mutex_lock_interruptible(&cfg->ctx_tbl_list_mutex);
+			if (rc)
+				goto out;
+
+			ctxi = cfg->ctx_tbl[ctxid];
+			if (ctxi)
+				if ((file && (ctxi->file != file)) ||
+				    (!file && (ctxi->ctxid != rctxid)))
+					ctxi = NULL;
+
+			if ((ctx_ctrl & CTX_CTRL_ERR) ||
+			    (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
+				ctxi = find_error_context(cfg, rctxid, file);
+			if (!ctxi) {
+				mutex_unlock(&cfg->ctx_tbl_list_mutex);
+				goto out;
+			}
+
+			/*
+			 * Need to acquire ownership of the context while still
+			 * under the table/list lock to serialize with a remove
+			 * thread. Use the 'try' to avoid stalling the
+			 * table/list lock for a single context.
+			 *
+			 * Note that the lock order is:
+			 *
+			 *	cfg->ctx_tbl_list_mutex -> ctxi->mutex
+			 *
+			 * Therefore release ctx_tbl_list_mutex before retrying.
+			 */
+			rc = mutex_trylock(&ctxi->mutex);
+			mutex_unlock(&cfg->ctx_tbl_list_mutex);
+			if (rc)
+				break; /* got the context's lock! */
+		}
+
+		if (ctxi->unavail)
+			goto denied;
+
+		ctxpid = ctxi->pid;
+		if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
+			if (pid != ctxpid)
+				goto denied;
+
+		if (lli) {
+			list_for_each_entry(lun_access, &ctxi->luns, list)
+				if (lun_access->lli == lli)
+					goto out;
+			goto denied;
+		}
+	}
+
+out:
+	dev_dbg(dev, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u "
+		"ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
+		ctx_ctrl);
+
+	return ctxi;
+
+denied:
+	mutex_unlock(&ctxi->mutex);
+	ctxi = NULL;
+	goto out;
+}
+
+/**
+ * put_context() - release a context that was retrieved from get_context()
+ * @ctxi:	Context to release.
+ *
+ * For now, releasing the context equates to unlocking it's mutex.
+ */
+void put_context(struct ctx_info *ctxi)
+{
+	mutex_unlock(&ctxi->mutex);
+}
+
+/**
+ * afu_attach() - attach a context to the AFU
+ * @cfg:	Internal structure associated with the host.
+ * @ctxi:	Context to attach.
+ *
+ * Upon setting the context capabilities, they must be confirmed with
+ * a read back operation as the context might have been closed since
+ * the mailbox was unlocked. When this occurs, registration is failed.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
+{
+	struct device *dev = &cfg->dev->dev;
+	struct afu *afu = cfg->afu;
+	struct sisl_ctrl_map *ctrl_map = ctxi->ctrl_map;
+	int rc = 0;
+	u64 val;
+
+	/* Unlock cap and restrict user to read/write cmds in translated mode */
+	readq_be(&ctrl_map->mbox_r);
+	val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
+	writeq_be(val, &ctrl_map->ctx_cap);
+	val = readq_be(&ctrl_map->ctx_cap);
+	if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
+		dev_err(dev, "%s: ctx may be closed val=%016llX\n",
+			__func__, val);
+		rc = -EAGAIN;
+		goto out;
+	}
+
+	/* Set up MMIO registers pointing to the RHT */
+	writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
+	val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(afu->ctx_hndl));
+	writeq_be(val, &ctrl_map->rht_cnt_id);
+out:
+	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * read_cap16() - issues a SCSI READ_CAP16 command
+ * @sdev:	SCSI device associated with LUN.
+ * @lli:	LUN destined for capacity request.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
+{
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct device *dev = &cfg->dev->dev;
+	struct glun_info *gli = lli->parent;
+	u8 *cmd_buf = NULL;
+	u8 *scsi_cmd = NULL;
+	u8 *sense_buf = NULL;
+	int rc = 0;
+	int result = 0;
+	int retry_cnt = 0;
+	u32 tout = (MC_DISCOVERY_TIMEOUT * HZ);
+
+retry:
+	cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
+	scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
+	sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+	if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	scsi_cmd[0] = SERVICE_ACTION_IN_16;	/* read cap(16) */
+	scsi_cmd[1] = SAI_READ_CAPACITY_16;	/* service action */
+	put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
+
+	dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__,
+		retry_cnt ? "re" : "", scsi_cmd[0]);
+
+	result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
+			      CMD_BUFSIZE, sense_buf, tout, 5, 0, NULL);
+
+	if (driver_byte(result) == DRIVER_SENSE) {
+		result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
+		if (result & SAM_STAT_CHECK_CONDITION) {
+			struct scsi_sense_hdr sshdr;
+
+			scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE,
+					    &sshdr);
+			switch (sshdr.sense_key) {
+			case NO_SENSE:
+			case RECOVERED_ERROR:
+				/* fall through */
+			case NOT_READY:
+				result &= ~SAM_STAT_CHECK_CONDITION;
+				break;
+			case UNIT_ATTENTION:
+				switch (sshdr.asc) {
+				case 0x29: /* Power on Reset or Device Reset */
+					/* fall through */
+				case 0x2A: /* Device capacity changed */
+				case 0x3F: /* Report LUNs changed */
+					/* Retry the command once more */
+					if (retry_cnt++ < 1) {
+						kfree(cmd_buf);
+						kfree(scsi_cmd);
+						kfree(sense_buf);
+						goto retry;
+					}
+				}
+				break;
+			default:
+				break;
+			}
+		}
+	}
+
+	if (result) {
+		dev_err(dev, "%s: command failed, result=0x%x\n",
+			__func__, result);
+		rc = -EIO;
+		goto out;
+	}
+
+	/*
+	 * Read cap was successful, grab values from the buffer;
+	 * note that we don't need to worry about unaligned access
+	 * as the buffer is allocated on an aligned boundary.
+	 */
+	mutex_lock(&gli->mutex);
+	gli->max_lba = be64_to_cpu(*((u64 *)&cmd_buf[0]));
+	gli->blk_len = be32_to_cpu(*((u32 *)&cmd_buf[8]));
+	mutex_unlock(&gli->mutex);
+
+out:
+	kfree(cmd_buf);
+	kfree(scsi_cmd);
+	kfree(sense_buf);
+
+	dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
+		__func__, gli->max_lba, gli->blk_len, rc);
+	return rc;
+}
+
+/**
+ * get_rhte() - obtains validated resource handle table entry reference
+ * @ctxi:	Context owning the resource handle.
+ * @rhndl:	Resource handle associated with entry.
+ * @lli:	LUN associated with request.
+ *
+ * Return: Validated RHTE on success, NULL on failure
+ */
+struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
+				struct llun_info *lli)
+{
+	struct sisl_rht_entry *rhte = NULL;
+
+	if (unlikely(!ctxi->rht_start)) {
+		pr_debug("%s: Context does not have allocated RHT!\n",
+			 __func__);
+		goto out;
+	}
+
+	if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
+		pr_debug("%s: Bad resource handle! (%d)\n", __func__, rhndl);
+		goto out;
+	}
+
+	if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
+		pr_debug("%s: Bad resource handle LUN! (%d)\n",
+			 __func__, rhndl);
+		goto out;
+	}
+
+	rhte = &ctxi->rht_start[rhndl];
+	if (unlikely(rhte->nmask == 0)) {
+		pr_debug("%s: Unopened resource handle! (%d)\n",
+			 __func__, rhndl);
+		rhte = NULL;
+		goto out;
+	}
+
+out:
+	return rhte;
+}
+
+/**
+ * rhte_checkout() - obtains free/empty resource handle table entry
+ * @ctxi:	Context owning the resource handle.
+ * @lli:	LUN associated with request.
+ *
+ * Return: Free RHTE on success, NULL on failure
+ */
+struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
+				     struct llun_info *lli)
+{
+	struct sisl_rht_entry *rhte = NULL;
+	int i;
+
+	/* Find a free RHT entry */
+	for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
+		if (ctxi->rht_start[i].nmask == 0) {
+			rhte = &ctxi->rht_start[i];
+			ctxi->rht_out++;
+			break;
+		}
+
+	if (likely(rhte))
+		ctxi->rht_lun[i] = lli;
+
+	pr_debug("%s: returning rhte=%p (%d)\n", __func__, rhte, i);
+	return rhte;
+}
+
+/**
+ * rhte_checkin() - releases a resource handle table entry
+ * @ctxi:	Context owning the resource handle.
+ * @rhte:	RHTE to release.
+ */
+void rhte_checkin(struct ctx_info *ctxi,
+		  struct sisl_rht_entry *rhte)
+{
+	u32 rsrc_handle = rhte - ctxi->rht_start;
+
+	rhte->nmask = 0;
+	rhte->fp = 0;
+	ctxi->rht_out--;
+	ctxi->rht_lun[rsrc_handle] = NULL;
+	ctxi->rht_needs_ws[rsrc_handle] = false;
+}
+
+/**
+ * rhte_format1() - populates a RHTE for format 1
+ * @rhte:	RHTE to populate.
+ * @lun_id:	LUN ID of LUN associated with RHTE.
+ * @perm:	Desired permissions for RHTE.
+ * @port_sel:	Port selection mask
+ */
+static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
+			u32 port_sel)
+{
+	/*
+	 * Populate the Format 1 RHT entry for direct access (physical
+	 * LUN) using the synchronization sequence defined in the
+	 * SISLite specification.
+	 */
+	struct sisl_rht_entry_f1 dummy = { 0 };
+	struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
+
+	memset(rhte_f1, 0, sizeof(*rhte_f1));
+	rhte_f1->fp = SISL_RHT_FP(1U, 0);
+	dma_wmb(); /* Make setting of format bit visible */
+
+	rhte_f1->lun_id = lun_id;
+	dma_wmb(); /* Make setting of LUN id visible */
+
+	/*
+	 * Use a dummy RHT Format 1 entry to build the second dword
+	 * of the entry that must be populated in a single write when
+	 * enabled (valid bit set to TRUE).
+	 */
+	dummy.valid = 0x80;
+	dummy.fp = SISL_RHT_FP(1U, perm);
+	dummy.port_sel = port_sel;
+	rhte_f1->dw = dummy.dw;
+
+	dma_wmb(); /* Make remaining RHT entry fields visible */
+}
+
+/**
+ * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
+ * @gli:	LUN to attach.
+ * @mode:	Desired mode of the LUN.
+ * @locked:	Mutex status on current thread.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
+{
+	int rc = 0;
+
+	if (!locked)
+		mutex_lock(&gli->mutex);
+
+	if (gli->mode == MODE_NONE)
+		gli->mode = mode;
+	else if (gli->mode != mode) {
+		pr_debug("%s: LUN operating in mode %d, requested mode %d\n",
+			 __func__, gli->mode, mode);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	gli->users++;
+	WARN_ON(gli->users <= 0);
+out:
+	pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
+		 __func__, rc, gli->mode, gli->users);
+	if (!locked)
+		mutex_unlock(&gli->mutex);
+	return rc;
+}
+
+/**
+ * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
+ * @gli:	LUN to detach.
+ *
+ * When resetting the mode, terminate block allocation resources as they
+ * are no longer required (service is safe to call even when block allocation
+ * resources were not present - such as when transitioning from physical mode).
+ * These resources will be reallocated when needed (subsequent transition to
+ * virtual mode).
+ */
+void cxlflash_lun_detach(struct glun_info *gli)
+{
+	mutex_lock(&gli->mutex);
+	WARN_ON(gli->mode == MODE_NONE);
+	if (--gli->users == 0) {
+		gli->mode = MODE_NONE;
+		cxlflash_ba_terminate(&gli->blka.ba_lun);
+	}
+	pr_debug("%s: gli->users=%u\n", __func__, gli->users);
+	WARN_ON(gli->users < 0);
+	mutex_unlock(&gli->mutex);
+}
+
+/**
+ * _cxlflash_disk_release() - releases the specified resource entry
+ * @sdev:	SCSI device associated with LUN.
+ * @ctxi:	Context owning resources.
+ * @release:	Release ioctl data structure.
+ *
+ * For LUNs in virtual mode, the virtual LUN associated with the specified
+ * resource handle is resized to 0 prior to releasing the RHTE. Note that the
+ * AFU sync should _not_ be performed when the context is sitting on the error
+ * recovery list. A context on the error recovery list is not known to the AFU
+ * due to reset. When the context is recovered, it will be reattached and made
+ * known again to the AFU.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int _cxlflash_disk_release(struct scsi_device *sdev,
+			   struct ctx_info *ctxi,
+			   struct dk_cxlflash_release *release)
+{
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct device *dev = &cfg->dev->dev;
+	struct llun_info *lli = sdev->hostdata;
+	struct glun_info *gli = lli->parent;
+	struct afu *afu = cfg->afu;
+	bool put_ctx = false;
+
+	struct dk_cxlflash_resize size;
+	res_hndl_t rhndl = release->rsrc_handle;
+
+	int rc = 0;
+	u64 ctxid = DECODE_CTXID(release->context_id),
+	    rctxid = release->context_id;
+
+	struct sisl_rht_entry *rhte;
+	struct sisl_rht_entry_f1 *rhte_f1;
+
+	dev_dbg(dev, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n",
+		__func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
+
+	if (!ctxi) {
+		ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
+		if (unlikely(!ctxi)) {
+			dev_dbg(dev, "%s: Bad context! (%llu)\n",
+				__func__, ctxid);
+			rc = -EINVAL;
+			goto out;
+		}
+
+		put_ctx = true;
+	}
+
+	rhte = get_rhte(ctxi, rhndl, lli);
+	if (unlikely(!rhte)) {
+		dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
+			__func__, rhndl);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Resize to 0 for virtual LUNS by setting the size
+	 * to 0. This will clear LXT_START and LXT_CNT fields
+	 * in the RHT entry and properly sync with the AFU.
+	 *
+	 * Afterwards we clear the remaining fields.
+	 */
+	switch (gli->mode) {
+	case MODE_VIRTUAL:
+		marshal_rele_to_resize(release, &size);
+		size.req_size = 0;
+		rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
+		if (rc) {
+			dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
+			goto out;
+		}
+
+		break;
+	case MODE_PHYSICAL:
+		/*
+		 * Clear the Format 1 RHT entry for direct access
+		 * (physical LUN) using the synchronization sequence
+		 * defined in the SISLite specification.
+		 */
+		rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
+
+		rhte_f1->valid = 0;
+		dma_wmb(); /* Make revocation of RHT entry visible */
+
+		rhte_f1->lun_id = 0;
+		dma_wmb(); /* Make clearing of LUN id visible */
+
+		rhte_f1->dw = 0;
+		dma_wmb(); /* Make RHT entry bottom-half clearing visible */
+
+		if (!ctxi->err_recovery_active)
+			cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+		break;
+	default:
+		WARN(1, "Unsupported LUN mode!");
+		goto out;
+	}
+
+	rhte_checkin(ctxi, rhte);
+	cxlflash_lun_detach(gli);
+
+out:
+	if (put_ctx)
+		put_context(ctxi);
+	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int cxlflash_disk_release(struct scsi_device *sdev,
+			  struct dk_cxlflash_release *release)
+{
+	return _cxlflash_disk_release(sdev, NULL, release);
+}
+
+/**
+ * destroy_context() - releases a context
+ * @cfg:	Internal structure associated with the host.
+ * @ctxi:	Context to release.
+ *
+ * Note that the rht_lun member of the context was cut from a single
+ * allocation when the context was created and therefore does not need
+ * to be explicitly freed. Also note that we conditionally check for the
+ * existence of the context control map before clearing the RHT registers
+ * and context capabilities because it is possible to destroy a context
+ * while the context is in the error state (previous mapping was removed
+ * [so we don't have to worry about clearing] and context is waiting for
+ * a new mapping).
+ */
+static void destroy_context(struct cxlflash_cfg *cfg,
+			    struct ctx_info *ctxi)
+{
+	struct afu *afu = cfg->afu;
+
+	WARN_ON(!list_empty(&ctxi->luns));
+
+	/* Clear RHT registers and drop all capabilities for this context */
+	if (afu->afu_map && ctxi->ctrl_map) {
+		writeq_be(0, &ctxi->ctrl_map->rht_start);
+		writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
+		writeq_be(0, &ctxi->ctrl_map->ctx_cap);
+	}
+
+	/* Free memory associated with context */
+	free_page((ulong)ctxi->rht_start);
+	kfree(ctxi->rht_needs_ws);
+	kfree(ctxi->rht_lun);
+	kfree(ctxi);
+	atomic_dec_if_positive(&cfg->num_user_contexts);
+}
+
+/**
+ * create_context() - allocates and initializes a context
+ * @cfg:	Internal structure associated with the host.
+ * @ctx:	Previously obtained CXL context reference.
+ * @ctxid:	Previously obtained process element associated with CXL context.
+ * @adap_fd:	Previously obtained adapter fd associated with CXL context.
+ * @file:	Previously obtained file associated with CXL context.
+ * @perms:	User-specified permissions.
+ *
+ * The context's mutex is locked when an allocated context is returned.
+ *
+ * Return: Allocated context on success, NULL on failure
+ */
+static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
+				       struct cxl_context *ctx, int ctxid,
+				       int adap_fd, struct file *file,
+				       u32 perms)
+{
+	struct device *dev = &cfg->dev->dev;
+	struct afu *afu = cfg->afu;
+	struct ctx_info *ctxi = NULL;
+	struct llun_info **lli = NULL;
+	bool *ws = NULL;
+	struct sisl_rht_entry *rhte;
+
+	ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
+	lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
+	ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
+	if (unlikely(!ctxi || !lli || !ws)) {
+		dev_err(dev, "%s: Unable to allocate context!\n", __func__);
+		goto err;
+	}
+
+	rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
+	if (unlikely(!rhte)) {
+		dev_err(dev, "%s: Unable to allocate RHT!\n", __func__);
+		goto err;
+	}
+
+	ctxi->rht_lun = lli;
+	ctxi->rht_needs_ws = ws;
+	ctxi->rht_start = rhte;
+	ctxi->rht_perms = perms;
+
+	ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
+	ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
+	ctxi->lfd = adap_fd;
+	ctxi->pid = current->tgid; /* tgid = pid */
+	ctxi->ctx = ctx;
+	ctxi->file = file;
+	mutex_init(&ctxi->mutex);
+	INIT_LIST_HEAD(&ctxi->luns);
+	INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
+
+	atomic_inc(&cfg->num_user_contexts);
+	mutex_lock(&ctxi->mutex);
+out:
+	return ctxi;
+
+err:
+	kfree(ws);
+	kfree(lli);
+	kfree(ctxi);
+	ctxi = NULL;
+	goto out;
+}
+
+/**
+ * _cxlflash_disk_detach() - detaches a LUN from a context
+ * @sdev:	SCSI device associated with LUN.
+ * @ctxi:	Context owning resources.
+ * @detach:	Detach ioctl data structure.
+ *
+ * As part of the detach, all per-context resources associated with the LUN
+ * are cleaned up. When detaching the last LUN for a context, the context
+ * itself is cleaned up and released.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _cxlflash_disk_detach(struct scsi_device *sdev,
+				 struct ctx_info *ctxi,
+				 struct dk_cxlflash_detach *detach)
+{
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct device *dev = &cfg->dev->dev;
+	struct llun_info *lli = sdev->hostdata;
+	struct lun_access *lun_access, *t;
+	struct dk_cxlflash_release rel;
+	bool put_ctx = false;
+
+	int i;
+	int rc = 0;
+	int lfd;
+	u64 ctxid = DECODE_CTXID(detach->context_id),
+	    rctxid = detach->context_id;
+
+	dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
+
+	if (!ctxi) {
+		ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
+		if (unlikely(!ctxi)) {
+			dev_dbg(dev, "%s: Bad context! (%llu)\n",
+				__func__, ctxid);
+			rc = -EINVAL;
+			goto out;
+		}
+
+		put_ctx = true;
+	}
+
+	/* Cleanup outstanding resources tied to this LUN */
+	if (ctxi->rht_out) {
+		marshal_det_to_rele(detach, &rel);
+		for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
+			if (ctxi->rht_lun[i] == lli) {
+				rel.rsrc_handle = i;
+				_cxlflash_disk_release(sdev, ctxi, &rel);
+			}
+
+			/* No need to loop further if we're done */
+			if (ctxi->rht_out == 0)
+				break;
+		}
+	}
+
+	/* Take our LUN out of context, free the node */
+	list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
+		if (lun_access->lli == lli) {
+			list_del(&lun_access->list);
+			kfree(lun_access);
+			lun_access = NULL;
+			break;
+		}
+
+	/* Tear down context following last LUN cleanup */
+	if (list_empty(&ctxi->luns)) {
+		ctxi->unavail = true;
+		mutex_unlock(&ctxi->mutex);
+		mutex_lock(&cfg->ctx_tbl_list_mutex);
+		mutex_lock(&ctxi->mutex);
+
+		/* Might not have been in error list so conditionally remove */
+		if (!list_empty(&ctxi->list))
+			list_del(&ctxi->list);
+		cfg->ctx_tbl[ctxid] = NULL;
+		mutex_unlock(&cfg->ctx_tbl_list_mutex);
+		mutex_unlock(&ctxi->mutex);
+
+		lfd = ctxi->lfd;
+		destroy_context(cfg, ctxi);
+		ctxi = NULL;
+		put_ctx = false;
+
+		/*
+		 * As a last step, clean up external resources when not
+		 * already on an external cleanup thread, i.e.: close(adap_fd).
+		 *
+		 * NOTE: this will free up the context from the CXL services,
+		 * allowing it to dole out the same context_id on a future
+		 * (or even currently in-flight) disk_attach operation.
+		 */
+		if (lfd != -1)
+			sys_close(lfd);
+	}
+
+out:
+	if (put_ctx)
+		put_context(ctxi);
+	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+static int cxlflash_disk_detach(struct scsi_device *sdev,
+				struct dk_cxlflash_detach *detach)
+{
+	return _cxlflash_disk_detach(sdev, NULL, detach);
+}
+
+/**
+ * cxlflash_cxl_release() - release handler for adapter file descriptor
+ * @inode:	File-system inode associated with fd.
+ * @file:	File installed with adapter file descriptor.
+ *
+ * This routine is the release handler for the fops registered with
+ * the CXL services on an initial attach for a context. It is called
+ * when a close is performed on the adapter file descriptor returned
+ * to the user. Programmatically, the user is not required to perform
+ * the close, as it is handled internally via the detach ioctl when
+ * a context is being removed. Note that nothing prevents the user
+ * from performing a close, but the user should be aware that doing
+ * so is considered catastrophic and subsequent usage of the superpipe
+ * API with previously saved off tokens will fail.
+ *
+ * When initiated from an external close (either by the user or via
+ * a process tear down), the routine derives the context reference
+ * and calls detach for each LUN associated with the context. The
+ * final detach operation will cause the context itself to be freed.
+ * Note that the saved off lfd is reset prior to calling detach to
+ * signify that the final detach should not perform a close.
+ *
+ * When initiated from a detach operation as part of the tear down
+ * of a context, the context is first completely freed and then the
+ * close is performed. This routine will fail to derive the context
+ * reference (due to the context having already been freed) and then
+ * call into the CXL release entry point.
+ *
+ * Thus, with exception to when the CXL process element (context id)
+ * lookup fails (a case that should theoretically never occur), every
+ * call into this routine results in a complete freeing of a context.
+ *
+ * As part of the detach, all per-context resources associated with the LUN
+ * are cleaned up. When detaching the last LUN for a context, the context
+ * itself is cleaned up and released.
+ *
+ * Return: 0 on success
+ */
+static int cxlflash_cxl_release(struct inode *inode, struct file *file)
+{
+	struct cxl_context *ctx = cxl_fops_get_context(file);
+	struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
+						cxl_fops);
+	struct device *dev = &cfg->dev->dev;
+	struct ctx_info *ctxi = NULL;
+	struct dk_cxlflash_detach detach = { { 0 }, 0 };
+	struct lun_access *lun_access, *t;
+	enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
+	int ctxid;
+
+	ctxid = cxl_process_element(ctx);
+	if (unlikely(ctxid < 0)) {
+		dev_err(dev, "%s: Context %p was closed! (%d)\n",
+			__func__, ctx, ctxid);
+		goto out;
+	}
+
+	ctxi = get_context(cfg, ctxid, file, ctrl);
+	if (unlikely(!ctxi)) {
+		ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
+		if (!ctxi) {
+			dev_dbg(dev, "%s: Context %d already free!\n",
+				__func__, ctxid);
+			goto out_release;
+		}
+
+		dev_dbg(dev, "%s: Another process owns context %d!\n",
+			__func__, ctxid);
+		put_context(ctxi);
+		goto out;
+	}
+
+	dev_dbg(dev, "%s: close(%d) for context %d\n",
+		__func__, ctxi->lfd, ctxid);
+
+	/* Reset the file descriptor to indicate we're on a close() thread */
+	ctxi->lfd = -1;
+	detach.context_id = ctxi->ctxid;
+	list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
+		_cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
+out_release:
+	cxl_fd_release(inode, file);
+out:
+	dev_dbg(dev, "%s: returning\n", __func__);
+	return 0;
+}
+
+/**
+ * unmap_context() - clears a previously established mapping
+ * @ctxi:	Context owning the mapping.
+ *
+ * This routine is used to switch between the error notification page
+ * (dummy page of all 1's) and the real mapping (established by the CXL
+ * fault handler).
+ */
+static void unmap_context(struct ctx_info *ctxi)
+{
+	unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
+}
+
+/**
+ * get_err_page() - obtains and allocates the error notification page
+ *
+ * Return: error notification page on success, NULL on failure
+ */
+static struct page *get_err_page(void)
+{
+	struct page *err_page = global.err_page;
+
+	if (unlikely(!err_page)) {
+		err_page = alloc_page(GFP_KERNEL);
+		if (unlikely(!err_page)) {
+			pr_err("%s: Unable to allocate err_page!\n", __func__);
+			goto out;
+		}
+
+		memset(page_address(err_page), -1, PAGE_SIZE);
+
+		/* Serialize update w/ other threads to avoid a leak */
+		mutex_lock(&global.mutex);
+		if (likely(!global.err_page))
+			global.err_page = err_page;
+		else {
+			__free_page(err_page);
+			err_page = global.err_page;
+		}
+		mutex_unlock(&global.mutex);
+	}
+
+out:
+	pr_debug("%s: returning err_page=%p\n", __func__, err_page);
+	return err_page;
+}
+
+/**
+ * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
+ * @vma:	VM area associated with mapping.
+ * @vmf:	VM fault associated with current fault.
+ *
+ * To support error notification via MMIO, faults are 'caught' by this routine
+ * that was inserted before passing back the adapter file descriptor on attach.
+ * When a fault occurs, this routine evaluates if error recovery is active and
+ * if so, installs the error page to 'notify' the user about the error state.
+ * During normal operation, the fault is simply handled by the original fault
+ * handler that was installed by CXL services as part of initializing the
+ * adapter file descriptor. The VMA's page protection bits are toggled to
+ * indicate cached/not-cached depending on the memory backing the fault.
+ *
+ * Return: 0 on success, VM_FAULT_SIGBUS on failure
+ */
+static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct file *file = vma->vm_file;
+	struct cxl_context *ctx = cxl_fops_get_context(file);
+	struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
+						cxl_fops);
+	struct device *dev = &cfg->dev->dev;
+	struct ctx_info *ctxi = NULL;
+	struct page *err_page = NULL;
+	enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
+	int rc = 0;
+	int ctxid;
+
+	ctxid = cxl_process_element(ctx);
+	if (unlikely(ctxid < 0)) {
+		dev_err(dev, "%s: Context %p was closed! (%d)\n",
+			__func__, ctx, ctxid);
+		goto err;
+	}
+
+	ctxi = get_context(cfg, ctxid, file, ctrl);
+	if (unlikely(!ctxi)) {
+		dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
+		goto err;
+	}
+
+	dev_dbg(dev, "%s: fault(%d) for context %d\n",
+		__func__, ctxi->lfd, ctxid);
+
+	if (likely(!ctxi->err_recovery_active)) {
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+		rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
+	} else {
+		dev_dbg(dev, "%s: err recovery active, use err_page!\n",
+			__func__);
+
+		err_page = get_err_page();
+		if (unlikely(!err_page)) {
+			dev_err(dev, "%s: Could not obtain error page!\n",
+				__func__);
+			rc = VM_FAULT_RETRY;
+			goto out;
+		}
+
+		get_page(err_page);
+		vmf->page = err_page;
+		vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
+	}
+
+out:
+	if (likely(ctxi))
+		put_context(ctxi);
+	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
+	return rc;
+
+err:
+	rc = VM_FAULT_SIGBUS;
+	goto out;
+}
+
+/*
+ * Local MMAP vmops to 'catch' faults
+ */
+static const struct vm_operations_struct cxlflash_mmap_vmops = {
+	.fault = cxlflash_mmap_fault,
+};
+
+/**
+ * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
+ * @file:	File installed with adapter file descriptor.
+ * @vma:	VM area associated with mapping.
+ *
+ * Installs local mmap vmops to 'catch' faults for error notification support.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct cxl_context *ctx = cxl_fops_get_context(file);
+	struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
+						cxl_fops);
+	struct device *dev = &cfg->dev->dev;
+	struct ctx_info *ctxi = NULL;
+	enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
+	int ctxid;
+	int rc = 0;
+
+	ctxid = cxl_process_element(ctx);
+	if (unlikely(ctxid < 0)) {
+		dev_err(dev, "%s: Context %p was closed! (%d)\n",
+			__func__, ctx, ctxid);
+		rc = -EIO;
+		goto out;
+	}
+
+	ctxi = get_context(cfg, ctxid, file, ctrl);
+	if (unlikely(!ctxi)) {
+		dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
+		rc = -EIO;
+		goto out;
+	}
+
+	dev_dbg(dev, "%s: mmap(%d) for context %d\n",
+		__func__, ctxi->lfd, ctxid);
+
+	rc = cxl_fd_mmap(file, vma);
+	if (likely(!rc)) {
+		/* Insert ourself in the mmap fault handler path */
+		ctxi->cxl_mmap_vmops = vma->vm_ops;
+		vma->vm_ops = &cxlflash_mmap_vmops;
+	}
+
+out:
+	if (likely(ctxi))
+		put_context(ctxi);
+	return rc;
+}
+
+/*
+ * Local fops for adapter file descriptor
+ */
+static const struct file_operations cxlflash_cxl_fops = {
+	.owner = THIS_MODULE,
+	.mmap = cxlflash_cxl_mmap,
+	.release = cxlflash_cxl_release,
+};
+
+/**
+ * cxlflash_mark_contexts_error() - move contexts to error state and list
+ * @cfg:	Internal structure associated with the host.
+ *
+ * A context is only moved over to the error list when there are no outstanding
+ * references to it. This ensures that a running operation has completed.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
+{
+	int i, rc = 0;
+	struct ctx_info *ctxi = NULL;
+
+	mutex_lock(&cfg->ctx_tbl_list_mutex);
+
+	for (i = 0; i < MAX_CONTEXT; i++) {
+		ctxi = cfg->ctx_tbl[i];
+		if (ctxi) {
+			mutex_lock(&ctxi->mutex);
+			cfg->ctx_tbl[i] = NULL;
+			list_add(&ctxi->list, &cfg->ctx_err_recovery);
+			ctxi->err_recovery_active = true;
+			ctxi->ctrl_map = NULL;
+			unmap_context(ctxi);
+			mutex_unlock(&ctxi->mutex);
+		}
+	}
+
+	mutex_unlock(&cfg->ctx_tbl_list_mutex);
+	return rc;
+}
+
+/*
+ * Dummy NULL fops
+ */
+static const struct file_operations null_fops = {
+	.owner = THIS_MODULE,
+};
+
+/**
+ * cxlflash_disk_attach() - attach a LUN to a context
+ * @sdev:	SCSI device associated with LUN.
+ * @attach:	Attach ioctl data structure.
+ *
+ * Creates a context and attaches LUN to it. A LUN can only be attached
+ * one time to a context (subsequent attaches for the same context/LUN pair
+ * are not supported). Additional LUNs can be attached to a context by
+ * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_disk_attach(struct scsi_device *sdev,
+				struct dk_cxlflash_attach *attach)
+{
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct device *dev = &cfg->dev->dev;
+	struct afu *afu = cfg->afu;
+	struct llun_info *lli = sdev->hostdata;
+	struct glun_info *gli = lli->parent;
+	struct cxl_ioctl_start_work *work;
+	struct ctx_info *ctxi = NULL;
+	struct lun_access *lun_access = NULL;
+	int rc = 0;
+	u32 perms;
+	int ctxid = -1;
+	u64 rctxid = 0UL;
+	struct file *file;
+
+	struct cxl_context *ctx;
+
+	int fd = -1;
+
+	/* On first attach set fileops */
+	if (atomic_read(&cfg->num_user_contexts) == 0)
+		cfg->cxl_fops = cxlflash_cxl_fops;
+
+	if (attach->num_interrupts > 4) {
+		dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
+			__func__, attach->num_interrupts);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (gli->max_lba == 0) {
+		dev_dbg(dev, "%s: No capacity info for this LUN (%016llX)\n",
+			__func__, lli->lun_id[sdev->channel]);
+		rc = read_cap16(sdev, lli);
+		if (rc) {
+			dev_err(dev, "%s: Invalid device! (%d)\n",
+				__func__, rc);
+			rc = -ENODEV;
+			goto out;
+		}
+		dev_dbg(dev, "%s: LBA = %016llX\n", __func__, gli->max_lba);
+		dev_dbg(dev, "%s: BLK_LEN = %08X\n", __func__, gli->blk_len);
+	}
+
+	if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
+		rctxid = attach->context_id;
+		ctxi = get_context(cfg, rctxid, NULL, 0);
+		if (!ctxi) {
+			dev_dbg(dev, "%s: Bad context! (%016llX)\n",
+				__func__, rctxid);
+			rc = -EINVAL;
+			goto out;
+		}
+
+		list_for_each_entry(lun_access, &ctxi->luns, list)
+			if (lun_access->lli == lli) {
+				dev_dbg(dev, "%s: Already attached!\n",
+					__func__);
+				rc = -EINVAL;
+				goto out;
+			}
+	}
+
+	lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
+	if (unlikely(!lun_access)) {
+		dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	lun_access->lli = lli;
+	lun_access->sdev = sdev;
+
+	/* Non-NULL context indicates reuse */
+	if (ctxi) {
+		dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
+			__func__, rctxid);
+		list_add(&lun_access->list, &ctxi->luns);
+		fd = ctxi->lfd;
+		goto out_attach;
+	}
+
+	ctx = cxl_dev_context_init(cfg->dev);
+	if (unlikely(IS_ERR_OR_NULL(ctx))) {
+		dev_err(dev, "%s: Could not initialize context %p\n",
+			__func__, ctx);
+		rc = -ENODEV;
+		goto err0;
+	}
+
+	ctxid = cxl_process_element(ctx);
+	if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
+		dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
+		rc = -EPERM;
+		goto err1;
+	}
+
+	file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+	if (unlikely(fd < 0)) {
+		rc = -ENODEV;
+		dev_err(dev, "%s: Could not get file descriptor\n", __func__);
+		goto err1;
+	}
+
+	/* Translate read/write O_* flags from fcntl.h to AFU permission bits */
+	perms = SISL_RHT_PERM(attach->hdr.flags + 1);
+
+	ctxi = create_context(cfg, ctx, ctxid, fd, file, perms);
+	if (unlikely(!ctxi)) {
+		dev_err(dev, "%s: Failed to create context! (%d)\n",
+			__func__, ctxid);
+		goto err2;
+	}
+
+	work = &ctxi->work;
+	work->num_interrupts = attach->num_interrupts;
+	work->flags = CXL_START_WORK_NUM_IRQS;
+
+	rc = cxl_start_work(ctx, work);
+	if (unlikely(rc)) {
+		dev_dbg(dev, "%s: Could not start context rc=%d\n",
+			__func__, rc);
+		goto err3;
+	}
+
+	rc = afu_attach(cfg, ctxi);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
+		goto err4;
+	}
+
+	/*
+	 * No error paths after this point. Once the fd is installed it's
+	 * visible to user space and can't be undone safely on this thread.
+	 * There is no need to worry about a deadlock here because no one
+	 * knows about us yet; we can be the only one holding our mutex.
+	 */
+	list_add(&lun_access->list, &ctxi->luns);
+	mutex_unlock(&ctxi->mutex);
+	mutex_lock(&cfg->ctx_tbl_list_mutex);
+	mutex_lock(&ctxi->mutex);
+	cfg->ctx_tbl[ctxid] = ctxi;
+	mutex_unlock(&cfg->ctx_tbl_list_mutex);
+	fd_install(fd, file);
+
+out_attach:
+	attach->hdr.return_flags = 0;
+	attach->context_id = ctxi->ctxid;
+	attach->block_size = gli->blk_len;
+	attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
+	attach->last_lba = gli->max_lba;
+	attach->max_xfer = (sdev->host->max_sectors * 512) / gli->blk_len;
+
+out:
+	attach->adap_fd = fd;
+
+	if (ctxi)
+		put_context(ctxi);
+
+	dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
+		__func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
+	return rc;
+
+err4:
+	cxl_stop_context(ctx);
+err3:
+	put_context(ctxi);
+	destroy_context(cfg, ctxi);
+	ctxi = NULL;
+err2:
+	/*
+	 * Here, we're overriding the fops with a dummy all-NULL fops because
+	 * fput() calls the release fop, which will cause us to mistakenly
+	 * call into the CXL code. Rather than try to add yet more complexity
+	 * to that routine (cxlflash_cxl_release) we should try to fix the
+	 * issue here.
+	 */
+	file->f_op = &null_fops;
+	fput(file);
+	put_unused_fd(fd);
+	fd = -1;
+err1:
+	cxl_release_context(ctx);
+err0:
+	kfree(lun_access);
+	goto out;
+}
+
+/**
+ * recover_context() - recovers a context in error
+ * @cfg:	Internal structure associated with the host.
+ * @ctxi:	Context to release.
+ *
+ * Restablishes the state for a context-in-error.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
+{
+	struct device *dev = &cfg->dev->dev;
+	int rc = 0;
+	int old_fd, fd = -1;
+	int ctxid = -1;
+	struct file *file;
+	struct cxl_context *ctx;
+	struct afu *afu = cfg->afu;
+
+	ctx = cxl_dev_context_init(cfg->dev);
+	if (unlikely(IS_ERR_OR_NULL(ctx))) {
+		dev_err(dev, "%s: Could not initialize context %p\n",
+			__func__, ctx);
+		rc = -ENODEV;
+		goto out;
+	}
+
+	ctxid = cxl_process_element(ctx);
+	if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
+		dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
+		rc = -EPERM;
+		goto err1;
+	}
+
+	file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
+	if (unlikely(fd < 0)) {
+		rc = -ENODEV;
+		dev_err(dev, "%s: Could not get file descriptor\n", __func__);
+		goto err1;
+	}
+
+	rc = cxl_start_work(ctx, &ctxi->work);
+	if (unlikely(rc)) {
+		dev_dbg(dev, "%s: Could not start context rc=%d\n",
+			__func__, rc);
+		goto err2;
+	}
+
+	/* Update with new MMIO area based on updated context id */
+	ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
+
+	rc = afu_attach(cfg, ctxi);
+	if (rc) {
+		dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
+		goto err3;
+	}
+
+	/*
+	 * No error paths after this point. Once the fd is installed it's
+	 * visible to user space and can't be undone safely on this thread.
+	 */
+	old_fd = ctxi->lfd;
+	ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
+	ctxi->lfd = fd;
+	ctxi->ctx = ctx;
+	ctxi->file = file;
+
+	/*
+	 * Put context back in table (note the reinit of the context list);
+	 * we must first drop the context's mutex and then acquire it in
+	 * order with the table/list mutex to avoid a deadlock - safe to do
+	 * here because no one can find us at this moment in time.
+	 */
+	mutex_unlock(&ctxi->mutex);
+	mutex_lock(&cfg->ctx_tbl_list_mutex);
+	mutex_lock(&ctxi->mutex);
+	list_del_init(&ctxi->list);
+	cfg->ctx_tbl[ctxid] = ctxi;
+	mutex_unlock(&cfg->ctx_tbl_list_mutex);
+	fd_install(fd, file);
+
+	/* Release the original adapter fd and associated CXL resources */
+	sys_close(old_fd);
+out:
+	dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
+		__func__, ctxid, fd, rc);
+	return rc;
+
+err3:
+	cxl_stop_context(ctx);
+err2:
+	fput(file);
+	put_unused_fd(fd);
+err1:
+	cxl_release_context(ctx);
+	goto out;
+}
+
+/**
+ * check_state() - checks and responds to the current adapter state
+ * @cfg:	Internal structure associated with the host.
+ *
+ * This routine can block and should only be used on process context.
+ * Note that when waking up from waiting in limbo, the state is unknown
+ * and must be checked again before proceeding.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int check_state(struct cxlflash_cfg *cfg)
+{
+	struct device *dev = &cfg->dev->dev;
+	int rc = 0;
+
+retry:
+	switch (cfg->state) {
+	case STATE_LIMBO:
+		dev_dbg(dev, "%s: Limbo, going to wait...\n", __func__);
+		rc = wait_event_interruptible(cfg->limbo_waitq,
+					      cfg->state != STATE_LIMBO);
+		if (unlikely(rc))
+			break;
+		goto retry;
+	case STATE_FAILTERM:
+		dev_dbg(dev, "%s: Failed/Terminating!\n", __func__);
+		rc = -ENODEV;
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * cxlflash_afu_recover() - initiates AFU recovery
+ * @sdev:	SCSI device associated with LUN.
+ * @recover:	Recover ioctl data structure.
+ *
+ * Only a single recovery is allowed at a time to avoid exhausting CXL
+ * resources (leading to recovery failure) in the event that we're up
+ * against the maximum number of contexts limit. For similar reasons,
+ * a context recovery is retried if there are multiple recoveries taking
+ * place at the same time and the failure was due to CXL services being
+ * unable to keep up.
+ *
+ * Because a user can detect an error condition before the kernel, it is
+ * quite possible for this routine to act as the kernel's EEH detection
+ * source (MMIO read of mbox_r). Because of this, there is a window of
+ * time where an EEH might have been detected but not yet 'serviced'
+ * (callback invoked, causing the device to enter limbo state). To avoid
+ * looping in this routine during that window, a 1 second sleep is in place
+ * between the time the MMIO failure is detected and the time a wait on the
+ * limbo wait queue is attempted via check_state().
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_afu_recover(struct scsi_device *sdev,
+				struct dk_cxlflash_recover_afu *recover)
+{
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct device *dev = &cfg->dev->dev;
+	struct llun_info *lli = sdev->hostdata;
+	struct afu *afu = cfg->afu;
+	struct ctx_info *ctxi = NULL;
+	struct mutex *mutex = &cfg->ctx_recovery_mutex;
+	u64 ctxid = DECODE_CTXID(recover->context_id),
+	    rctxid = recover->context_id;
+	long reg;
+	int lretry = 20; /* up to 2 seconds */
+	int rc = 0;
+
+	atomic_inc(&cfg->recovery_threads);
+	rc = mutex_lock_interruptible(mutex);
+	if (rc)
+		goto out;
+
+	dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
+		__func__, recover->reason, rctxid);
+
+retry:
+	/* Ensure that this process is attached to the context */
+	ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
+	if (unlikely(!ctxi)) {
+		dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (ctxi->err_recovery_active) {
+retry_recover:
+		rc = recover_context(cfg, ctxi);
+		if (unlikely(rc)) {
+			dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
+				__func__, ctxid, rc);
+			if ((rc == -ENODEV) &&
+			    ((atomic_read(&cfg->recovery_threads) > 1) ||
+			     (lretry--))) {
+				dev_dbg(dev, "%s: Going to try again!\n",
+					__func__);
+				mutex_unlock(mutex);
+				msleep(100);
+				rc = mutex_lock_interruptible(mutex);
+				if (rc)
+					goto out;
+				goto retry_recover;
+			}
+
+			goto out;
+		}
+
+		ctxi->err_recovery_active = false;
+		recover->context_id = ctxi->ctxid;
+		recover->adap_fd = ctxi->lfd;
+		recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
+		recover->hdr.return_flags |=
+			DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
+		goto out;
+	}
+
+	/* Test if in error state */
+	reg = readq_be(&afu->ctrl_map->mbox_r);
+	if (reg == -1) {
+		dev_dbg(dev, "%s: MMIO read fail! Wait for recovery...\n",
+			__func__);
+		mutex_unlock(&ctxi->mutex);
+		ctxi = NULL;
+		ssleep(1);
+		rc = check_state(cfg);
+		if (unlikely(rc))
+			goto out;
+		goto retry;
+	}
+
+	dev_dbg(dev, "%s: MMIO working, no recovery required!\n", __func__);
+out:
+	if (likely(ctxi))
+		put_context(ctxi);
+	mutex_unlock(mutex);
+	atomic_dec_if_positive(&cfg->recovery_threads);
+	return rc;
+}
+
+/**
+ * process_sense() - evaluates and processes sense data
+ * @sdev:	SCSI device associated with LUN.
+ * @verify:	Verify ioctl data structure.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int process_sense(struct scsi_device *sdev,
+			 struct dk_cxlflash_verify *verify)
+{
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct device *dev = &cfg->dev->dev;
+	struct llun_info *lli = sdev->hostdata;
+	struct glun_info *gli = lli->parent;
+	u64 prev_lba = gli->max_lba;
+	struct scsi_sense_hdr sshdr = { 0 };
+	int rc = 0;
+
+	rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
+				  DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
+	if (!rc) {
+		dev_err(dev, "%s: Failed to normalize sense data!\n", __func__);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	switch (sshdr.sense_key) {
+	case NO_SENSE:
+	case RECOVERED_ERROR:
+		/* fall through */
+	case NOT_READY:
+		break;
+	case UNIT_ATTENTION:
+		switch (sshdr.asc) {
+		case 0x29: /* Power on Reset or Device Reset */
+			/* fall through */
+		case 0x2A: /* Device settings/capacity changed */
+			rc = read_cap16(sdev, lli);
+			if (rc) {
+				rc = -ENODEV;
+				break;
+			}
+			if (prev_lba != gli->max_lba)
+				dev_dbg(dev, "%s: Capacity changed old=%lld "
+					"new=%lld\n", __func__, prev_lba,
+					gli->max_lba);
+			break;
+		case 0x3F: /* Report LUNs changed, Rescan. */
+			scsi_scan_host(cfg->host);
+			break;
+		default:
+			rc = -EIO;
+			break;
+		}
+		break;
+	default:
+		rc = -EIO;
+		break;
+	}
+out:
+	dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
+		sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
+	return rc;
+}
+
+/**
+ * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
+ * @sdev:	SCSI device associated with LUN.
+ * @verify:	Verify ioctl data structure.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_disk_verify(struct scsi_device *sdev,
+				struct dk_cxlflash_verify *verify)
+{
+	int rc = 0;
+	struct ctx_info *ctxi = NULL;
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct device *dev = &cfg->dev->dev;
+	struct llun_info *lli = sdev->hostdata;
+	struct glun_info *gli = lli->parent;
+	struct sisl_rht_entry *rhte = NULL;
+	res_hndl_t rhndl = verify->rsrc_handle;
+	u64 ctxid = DECODE_CTXID(verify->context_id),
+	    rctxid = verify->context_id;
+	u64 last_lba = 0;
+
+	dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, "
+		"flags=%016llX\n", __func__, ctxid, verify->rsrc_handle,
+		verify->hint, verify->hdr.flags);
+
+	ctxi = get_context(cfg, rctxid, lli, 0);
+	if (unlikely(!ctxi)) {
+		dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rhte = get_rhte(ctxi, rhndl, lli);
+	if (unlikely(!rhte)) {
+		dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
+			__func__, rhndl);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Look at the hint/sense to see if it requires us to redrive
+	 * inquiry (i.e. the Unit attention is due to the WWN changing).
+	 */
+	if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
+		rc = process_sense(sdev, verify);
+		if (unlikely(rc)) {
+			dev_err(dev, "%s: Failed to validate sense data (%d)\n",
+				__func__, rc);
+			goto out;
+		}
+	}
+
+	switch (gli->mode) {
+	case MODE_PHYSICAL:
+		last_lba = gli->max_lba;
+		break;
+	case MODE_VIRTUAL:
+		/* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
+		last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
+		last_lba /= CXLFLASH_BLOCK_SIZE;
+		last_lba--;
+		break;
+	default:
+		WARN(1, "Unsupported LUN mode!");
+	}
+
+	verify->last_lba = last_lba;
+
+out:
+	if (likely(ctxi))
+		put_context(ctxi);
+	dev_dbg(dev, "%s: returning rc=%d llba=%llX\n",
+		__func__, rc, verify->last_lba);
+	return rc;
+}
+
+/**
+ * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
+ * @cmd:	The ioctl command to decode.
+ *
+ * Return: A string identifying the decoded ioctl.
+ */
+static char *decode_ioctl(int cmd)
+{
+	switch (cmd) {
+	case DK_CXLFLASH_ATTACH:
+		return __stringify_1(DK_CXLFLASH_ATTACH);
+	case DK_CXLFLASH_USER_DIRECT:
+		return __stringify_1(DK_CXLFLASH_USER_DIRECT);
+	case DK_CXLFLASH_USER_VIRTUAL:
+		return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
+	case DK_CXLFLASH_VLUN_RESIZE:
+		return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
+	case DK_CXLFLASH_RELEASE:
+		return __stringify_1(DK_CXLFLASH_RELEASE);
+	case DK_CXLFLASH_DETACH:
+		return __stringify_1(DK_CXLFLASH_DETACH);
+	case DK_CXLFLASH_VERIFY:
+		return __stringify_1(DK_CXLFLASH_VERIFY);
+	case DK_CXLFLASH_VLUN_CLONE:
+		return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
+	case DK_CXLFLASH_RECOVER_AFU:
+		return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
+	case DK_CXLFLASH_MANAGE_LUN:
+		return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
+	}
+
+	return "UNKNOWN";
+}
+
+/**
+ * cxlflash_disk_direct_open() - opens a direct (physical) disk
+ * @sdev:	SCSI device associated with LUN.
+ * @arg:	UDirect ioctl data structure.
+ *
+ * On successful return, the user is informed of the resource handle
+ * to be used to identify the direct lun and the size (in blocks) of
+ * the direct lun in last LBA format.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
+{
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct device *dev = &cfg->dev->dev;
+	struct afu *afu = cfg->afu;
+	struct llun_info *lli = sdev->hostdata;
+	struct glun_info *gli = lli->parent;
+
+	struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
+
+	u64 ctxid = DECODE_CTXID(pphys->context_id),
+	    rctxid = pphys->context_id;
+	u64 lun_size = 0;
+	u64 last_lba = 0;
+	u64 rsrc_handle = -1;
+	u32 port = CHAN2PORT(sdev->channel);
+
+	int rc = 0;
+
+	struct ctx_info *ctxi = NULL;
+	struct sisl_rht_entry *rhte = NULL;
+
+	pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
+
+	rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
+	if (unlikely(rc)) {
+		dev_dbg(dev, "%s: Failed to attach to LUN! (PHYSICAL)\n",
+			__func__);
+		goto out;
+	}
+
+	ctxi = get_context(cfg, rctxid, lli, 0);
+	if (unlikely(!ctxi)) {
+		dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+		rc = -EINVAL;
+		goto err1;
+	}
+
+	rhte = rhte_checkout(ctxi, lli);
+	if (unlikely(!rhte)) {
+		dev_dbg(dev, "%s: too many opens for this context\n", __func__);
+		rc = -EMFILE;	/* too many opens  */
+		goto err1;
+	}
+
+	rsrc_handle = (rhte - ctxi->rht_start);
+
+	rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
+	cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
+
+	last_lba = gli->max_lba;
+	pphys->hdr.return_flags = 0;
+	pphys->last_lba = last_lba;
+	pphys->rsrc_handle = rsrc_handle;
+
+out:
+	if (likely(ctxi))
+		put_context(ctxi);
+	dev_dbg(dev, "%s: returning handle 0x%llx rc=%d llba %lld\n",
+		__func__, rsrc_handle, rc, last_lba);
+	return rc;
+
+err1:
+	cxlflash_lun_detach(gli);
+	goto out;
+}
+
+/**
+ * ioctl_common() - common IOCTL handler for driver
+ * @sdev:	SCSI device associated with LUN.
+ * @cmd:	IOCTL command.
+ *
+ * Handles common fencing operations that are valid for multiple ioctls. Always
+ * allow through ioctls that are cleanup oriented in nature, even when operating
+ * in a failed/terminating state.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ioctl_common(struct scsi_device *sdev, int cmd)
+{
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct device *dev = &cfg->dev->dev;
+	struct llun_info *lli = sdev->hostdata;
+	int rc = 0;
+
+	if (unlikely(!lli)) {
+		dev_dbg(dev, "%s: Unknown LUN\n", __func__);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = check_state(cfg);
+	if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
+		switch (cmd) {
+		case DK_CXLFLASH_VLUN_RESIZE:
+		case DK_CXLFLASH_RELEASE:
+		case DK_CXLFLASH_DETACH:
+			dev_dbg(dev, "%s: Command override! (%d)\n",
+				__func__, rc);
+			rc = 0;
+			break;
+		}
+	}
+out:
+	return rc;
+}
+
+/**
+ * cxlflash_ioctl() - IOCTL handler for driver
+ * @sdev:	SCSI device associated with LUN.
+ * @cmd:	IOCTL command.
+ * @arg:	Userspace ioctl data structure.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+	typedef int (*sioctl) (struct scsi_device *, void *);
+
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct device *dev = &cfg->dev->dev;
+	struct afu *afu = cfg->afu;
+	struct dk_cxlflash_hdr *hdr;
+	char buf[sizeof(union cxlflash_ioctls)];
+	size_t size = 0;
+	bool known_ioctl = false;
+	int idx;
+	int rc = 0;
+	struct Scsi_Host *shost = sdev->host;
+	sioctl do_ioctl = NULL;
+
+	static const struct {
+		size_t size;
+		sioctl ioctl;
+	} ioctl_tbl[] = {	/* NOTE: order matters here */
+	{sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
+	{sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
+	{sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
+	{sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
+	{sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
+	{sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
+	{sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
+	{sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
+	{sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
+	{sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
+	};
+
+	/* Restrict command set to physical support only for internal LUN */
+	if (afu->internal_lun)
+		switch (cmd) {
+		case DK_CXLFLASH_RELEASE:
+		case DK_CXLFLASH_USER_VIRTUAL:
+		case DK_CXLFLASH_VLUN_RESIZE:
+		case DK_CXLFLASH_VLUN_CLONE:
+			dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
+				__func__, decode_ioctl(cmd), afu->internal_lun);
+			rc = -EINVAL;
+			goto cxlflash_ioctl_exit;
+		}
+
+	switch (cmd) {
+	case DK_CXLFLASH_ATTACH:
+	case DK_CXLFLASH_USER_DIRECT:
+	case DK_CXLFLASH_RELEASE:
+	case DK_CXLFLASH_DETACH:
+	case DK_CXLFLASH_VERIFY:
+	case DK_CXLFLASH_RECOVER_AFU:
+	case DK_CXLFLASH_USER_VIRTUAL:
+	case DK_CXLFLASH_VLUN_RESIZE:
+	case DK_CXLFLASH_VLUN_CLONE:
+		dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
+			__func__, decode_ioctl(cmd), cmd, shost->host_no,
+			sdev->channel, sdev->id, sdev->lun);
+		rc = ioctl_common(sdev, cmd);
+		if (unlikely(rc))
+			goto cxlflash_ioctl_exit;
+
+		/* fall through */
+
+	case DK_CXLFLASH_MANAGE_LUN:
+		known_ioctl = true;
+		idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
+		size = ioctl_tbl[idx].size;
+		do_ioctl = ioctl_tbl[idx].ioctl;
+
+		if (likely(do_ioctl))
+			break;
+
+		/* fall through */
+	default:
+		rc = -EINVAL;
+		goto cxlflash_ioctl_exit;
+	}
+
+	if (unlikely(copy_from_user(&buf, arg, size))) {
+		dev_err(dev, "%s: copy_from_user() fail! "
+			"size=%lu cmd=%d (%s) arg=%p\n",
+			__func__, size, cmd, decode_ioctl(cmd), arg);
+		rc = -EFAULT;
+		goto cxlflash_ioctl_exit;
+	}
+
+	hdr = (struct dk_cxlflash_hdr *)&buf;
+	if (hdr->version != DK_CXLFLASH_VERSION_0) {
+		dev_dbg(dev, "%s: Version %u not supported for %s\n",
+			__func__, hdr->version, decode_ioctl(cmd));
+		rc = -EINVAL;
+		goto cxlflash_ioctl_exit;
+	}
+
+	if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
+		dev_dbg(dev, "%s: Reserved/rflags populated!\n", __func__);
+		rc = -EINVAL;
+		goto cxlflash_ioctl_exit;
+	}
+
+	rc = do_ioctl(sdev, (void *)&buf);
+	if (likely(!rc))
+		if (unlikely(copy_to_user(arg, &buf, size))) {
+			dev_err(dev, "%s: copy_to_user() fail! "
+				"size=%lu cmd=%d (%s) arg=%p\n",
+				__func__, size, cmd, decode_ioctl(cmd), arg);
+			rc = -EFAULT;
+		}
+
+	/* fall through to exit */
+
+cxlflash_ioctl_exit:
+	if (unlikely(rc && known_ioctl))
+		dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
+			"returned rc %d\n", __func__,
+			decode_ioctl(cmd), cmd, shost->host_no,
+			sdev->channel, sdev->id, sdev->lun, rc);
+	else
+		dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
+			"returned rc %d\n", __func__, decode_ioctl(cmd),
+			cmd, shost->host_no, sdev->channel, sdev->id,
+			sdev->lun, rc);
+	return rc;
+}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
new file mode 100644
index 0000000..d7dc88b
--- /dev/null
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -0,0 +1,147 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _CXLFLASH_SUPERPIPE_H
+#define _CXLFLASH_SUPERPIPE_H
+
+extern struct cxlflash_global global;
+
+/*
+ * Terminology: use afu (and not adapter) to refer to the HW.
+ * Adapter is the entire slot and includes PSL out of which
+ * only the AFU is visible to user space.
+ */
+
+/* Chunk size parms: note sislite minimum chunk size is
+   0x10000 LBAs corresponding to a NMASK or 16.
+*/
+#define MC_CHUNK_SIZE     (1 << MC_RHT_NMASK)	/* in LBAs */
+
+#define MC_DISCOVERY_TIMEOUT 5  /* 5 secs */
+
+#define CHAN2PORT(_x)	((_x) + 1)
+#define PORT2CHAN(_x)	((_x) - 1)
+
+enum lun_mode {
+	MODE_NONE = 0,
+	MODE_VIRTUAL,
+	MODE_PHYSICAL
+};
+
+/* Global (entire driver, spans adapters) lun_info structure */
+struct glun_info {
+	u64 max_lba;		/* from read cap(16) */
+	u32 blk_len;		/* from read cap(16) */
+	enum lun_mode mode;	/* NONE, VIRTUAL, PHYSICAL */
+	int users;		/* Number of users w/ references to LUN */
+
+	u8 wwid[16];
+
+	struct mutex mutex;
+
+	struct blka blka;
+	struct list_head list;
+};
+
+/* Local (per-adapter) lun_info structure */
+struct llun_info {
+	u64 lun_id[CXLFLASH_NUM_FC_PORTS]; /* from REPORT_LUNS */
+	u32 lun_index;		/* Index in the LUN table */
+	u32 host_no;		/* host_no from Scsi_host */
+	u32 port_sel;		/* What port to use for this LUN */
+	bool newly_created;	/* Whether the LUN was just discovered */
+	bool in_table;		/* Whether a LUN table entry was created */
+
+	u8 wwid[16];		/* Keep a duplicate copy here? */
+
+	struct glun_info *parent; /* Pointer to entry in global LUN structure */
+	struct scsi_device *sdev;
+	struct list_head list;
+};
+
+struct lun_access {
+	struct llun_info *lli;
+	struct scsi_device *sdev;
+	struct list_head list;
+};
+
+enum ctx_ctrl {
+	CTX_CTRL_CLONE		= (1 << 1),
+	CTX_CTRL_ERR		= (1 << 2),
+	CTX_CTRL_ERR_FALLBACK	= (1 << 3),
+	CTX_CTRL_NOPID		= (1 << 4),
+	CTX_CTRL_FILE		= (1 << 5)
+};
+
+#define ENCODE_CTXID(_ctx, _id)	(((((u64)_ctx) & 0xFFFFFFFF0) << 28) | _id)
+#define DECODE_CTXID(_val)	(_val & 0xFFFFFFFF)
+
+struct ctx_info {
+	struct sisl_ctrl_map *ctrl_map; /* initialized at startup */
+	struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment),
+					     alloc/free on attach/detach */
+	u32 rht_out;		/* Number of checked out RHT entries */
+	u32 rht_perms;		/* User-defined permissions for RHT entries */
+	struct llun_info **rht_lun;       /* Mapping of RHT entries to LUNs */
+	bool *rht_needs_ws;	/* User-desired write-same function per RHTE */
+
+	struct cxl_ioctl_start_work work;
+	u64 ctxid;
+	int lfd;
+	pid_t pid;
+	bool unavail;
+	bool err_recovery_active;
+	struct mutex mutex; /* Context protection */
+	struct cxl_context *ctx;
+	struct list_head luns;	/* LUNs attached to this context */
+	const struct vm_operations_struct *cxl_mmap_vmops;
+	struct file *file;
+	struct list_head list; /* Link contexts in error recovery */
+};
+
+struct cxlflash_global {
+	struct mutex mutex;
+	struct list_head gluns;/* list of glun_info structs */
+	struct page *err_page; /* One page of all 0xF for error notification */
+};
+
+int cxlflash_vlun_resize(struct scsi_device *, struct dk_cxlflash_resize *);
+int _cxlflash_vlun_resize(struct scsi_device *, struct ctx_info *,
+			  struct dk_cxlflash_resize *);
+
+int cxlflash_disk_release(struct scsi_device *, struct dk_cxlflash_release *);
+int _cxlflash_disk_release(struct scsi_device *, struct ctx_info *,
+			   struct dk_cxlflash_release *);
+
+int cxlflash_disk_clone(struct scsi_device *, struct dk_cxlflash_clone *);
+
+int cxlflash_disk_virtual_open(struct scsi_device *, void *);
+
+int cxlflash_lun_attach(struct glun_info *, enum lun_mode, bool);
+void cxlflash_lun_detach(struct glun_info *);
+
+struct ctx_info *get_context(struct cxlflash_cfg *, u64, void *, enum ctx_ctrl);
+void put_context(struct ctx_info *);
+
+struct sisl_rht_entry *get_rhte(struct ctx_info *, res_hndl_t,
+				struct llun_info *);
+
+struct sisl_rht_entry *rhte_checkout(struct ctx_info *, struct llun_info *);
+void rhte_checkin(struct ctx_info *, struct sisl_rht_entry *);
+
+void cxlflash_ba_terminate(struct ba_lun *);
+
+int cxlflash_manage_lun(struct scsi_device *, struct dk_cxlflash_manage_lun *);
+
+#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
new file mode 100644
index 0000000..6155cb1
--- /dev/null
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -0,0 +1,1243 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/syscalls.h>
+#include <misc/cxl.h>
+#include <asm/unaligned.h>
+#include <asm/bitsperlong.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <uapi/scsi/cxlflash_ioctl.h>
+
+#include "sislite.h"
+#include "common.h"
+#include "vlun.h"
+#include "superpipe.h"
+
+/**
+ * marshal_virt_to_resize() - translate uvirtual to resize structure
+ * @virt:	Source structure from which to translate/copy.
+ * @resize:	Destination structure for the translate/copy.
+ */
+static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
+				   struct dk_cxlflash_resize *resize)
+{
+	resize->hdr = virt->hdr;
+	resize->context_id = virt->context_id;
+	resize->rsrc_handle = virt->rsrc_handle;
+	resize->req_size = virt->lun_size;
+	resize->last_lba = virt->last_lba;
+}
+
+/**
+ * marshal_clone_to_rele() - translate clone to release structure
+ * @clone:	Source structure from which to translate/copy.
+ * @rele:	Destination structure for the translate/copy.
+ */
+static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
+				  struct dk_cxlflash_release *release)
+{
+	release->hdr = clone->hdr;
+	release->context_id = clone->context_id_dst;
+}
+
+/**
+ * ba_init() - initializes a block allocator
+ * @ba_lun:	Block allocator to initialize.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int ba_init(struct ba_lun *ba_lun)
+{
+	struct ba_lun_info *bali = NULL;
+	int lun_size_au = 0, i = 0;
+	int last_word_underflow = 0;
+	u64 *lam;
+
+	pr_debug("%s: Initializing LUN: lun_id = %llX, "
+		 "ba_lun->lsize = %lX, ba_lun->au_size = %lX\n",
+		__func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
+
+	/* Calculate bit map size */
+	lun_size_au = ba_lun->lsize / ba_lun->au_size;
+	if (lun_size_au == 0) {
+		pr_debug("%s: Requested LUN size of 0!\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Allocate lun information container */
+	bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
+	if (unlikely(!bali)) {
+		pr_err("%s: Failed to allocate lun_info for lun_id %llX\n",
+		       __func__, ba_lun->lun_id);
+		return -ENOMEM;
+	}
+
+	bali->total_aus = lun_size_au;
+	bali->lun_bmap_size = lun_size_au / BITS_PER_LONG;
+
+	if (lun_size_au % BITS_PER_LONG)
+		bali->lun_bmap_size++;
+
+	/* Allocate bitmap space */
+	bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)),
+				      GFP_KERNEL);
+	if (unlikely(!bali->lun_alloc_map)) {
+		pr_err("%s: Failed to allocate lun allocation map: "
+		       "lun_id = %llX\n", __func__, ba_lun->lun_id);
+		kfree(bali);
+		return -ENOMEM;
+	}
+
+	/* Initialize the bit map size and set all bits to '1' */
+	bali->free_aun_cnt = lun_size_au;
+
+	for (i = 0; i < bali->lun_bmap_size; i++)
+		bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL;
+
+	/* If the last word not fully utilized, mark extra bits as allocated */
+	last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG);
+	last_word_underflow -= bali->free_aun_cnt;
+	if (last_word_underflow > 0) {
+		lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1];
+		for (i = (HIBIT - last_word_underflow + 1);
+		     i < BITS_PER_LONG;
+		     i++)
+			clear_bit(i, (ulong *)lam);
+	}
+
+	/* Initialize high elevator index, low/curr already at 0 from kzalloc */
+	bali->free_high_idx = bali->lun_bmap_size;
+
+	/* Allocate clone map */
+	bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
+				      GFP_KERNEL);
+	if (unlikely(!bali->aun_clone_map)) {
+		pr_err("%s: Failed to allocate clone map: lun_id = %llX\n",
+		       __func__, ba_lun->lun_id);
+		kfree(bali->lun_alloc_map);
+		kfree(bali);
+		return -ENOMEM;
+	}
+
+	/* Pass the allocated lun info as a handle to the user */
+	ba_lun->ba_lun_handle = bali;
+
+	pr_debug("%s: Successfully initialized the LUN: "
+		 "lun_id = %llX, bitmap size = %X, free_aun_cnt = %llX\n",
+		__func__, ba_lun->lun_id, bali->lun_bmap_size,
+		bali->free_aun_cnt);
+	return 0;
+}
+
+/**
+ * find_free_range() - locates a free bit within the block allocator
+ * @low:	First word in block allocator to start search.
+ * @high:	Last word in block allocator to search.
+ * @bali:	LUN information structure owning the block allocator to search.
+ * @bit_word:	Passes back the word in the block allocator owning the free bit.
+ *
+ * Return: The bit position within the passed back word, -1 on failure
+ */
+static int find_free_range(u32 low,
+			   u32 high,
+			   struct ba_lun_info *bali, int *bit_word)
+{
+	int i;
+	u64 bit_pos = -1;
+	ulong *lam, num_bits;
+
+	for (i = low; i < high; i++)
+		if (bali->lun_alloc_map[i] != 0) {
+			lam = (ulong *)&bali->lun_alloc_map[i];
+			num_bits = (sizeof(*lam) * BITS_PER_BYTE);
+			bit_pos = find_first_bit(lam, num_bits);
+
+			pr_devel("%s: Found free bit %llX in lun "
+				 "map entry %llX at bitmap index = %X\n",
+				 __func__, bit_pos, bali->lun_alloc_map[i],
+				 i);
+
+			*bit_word = i;
+			bali->free_aun_cnt--;
+			clear_bit(bit_pos, lam);
+			break;
+		}
+
+	return bit_pos;
+}
+
+/**
+ * ba_alloc() - allocates a block from the block allocator
+ * @ba_lun:	Block allocator from which to allocate a block.
+ *
+ * Return: The allocated block, -1 on failure
+ */
+static u64 ba_alloc(struct ba_lun *ba_lun)
+{
+	u64 bit_pos = -1;
+	int bit_word = 0;
+	struct ba_lun_info *bali = NULL;
+
+	bali = ba_lun->ba_lun_handle;
+
+	pr_debug("%s: Received block allocation request: "
+		 "lun_id = %llX, free_aun_cnt = %llX\n",
+		 __func__, ba_lun->lun_id, bali->free_aun_cnt);
+
+	if (bali->free_aun_cnt == 0) {
+		pr_debug("%s: No space left on LUN: lun_id = %llX\n",
+			 __func__, ba_lun->lun_id);
+		return -1ULL;
+	}
+
+	/* Search to find a free entry, curr->high then low->curr */
+	bit_pos = find_free_range(bali->free_curr_idx,
+				  bali->free_high_idx, bali, &bit_word);
+	if (bit_pos == -1) {
+		bit_pos = find_free_range(bali->free_low_idx,
+					  bali->free_curr_idx,
+					  bali, &bit_word);
+		if (bit_pos == -1) {
+			pr_debug("%s: Could not find an allocation unit on LUN:"
+				 " lun_id = %llX\n", __func__, ba_lun->lun_id);
+			return -1ULL;
+		}
+	}
+
+	/* Update the free_curr_idx */
+	if (bit_pos == HIBIT)
+		bali->free_curr_idx = bit_word + 1;
+	else
+		bali->free_curr_idx = bit_word;
+
+	pr_debug("%s: Allocating AU number %llX, on lun_id %llX, "
+		 "free_aun_cnt = %llX\n", __func__,
+		 ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
+		 bali->free_aun_cnt);
+
+	return (u64) ((bit_word * BITS_PER_LONG) + bit_pos);
+}
+
+/**
+ * validate_alloc() - validates the specified block has been allocated
+ * @ba_lun_info:	LUN info owning the block allocator.
+ * @aun:		Block to validate.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int validate_alloc(struct ba_lun_info *bali, u64 aun)
+{
+	int idx = 0, bit_pos = 0;
+
+	idx = aun / BITS_PER_LONG;
+	bit_pos = aun % BITS_PER_LONG;
+
+	if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]))
+		return -1;
+
+	return 0;
+}
+
+/**
+ * ba_free() - frees a block from the block allocator
+ * @ba_lun:	Block allocator from which to allocate a block.
+ * @to_free:	Block to free.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int ba_free(struct ba_lun *ba_lun, u64 to_free)
+{
+	int idx = 0, bit_pos = 0;
+	struct ba_lun_info *bali = NULL;
+
+	bali = ba_lun->ba_lun_handle;
+
+	if (validate_alloc(bali, to_free)) {
+		pr_debug("%s: The AUN %llX is not allocated on lun_id %llX\n",
+			 __func__, to_free, ba_lun->lun_id);
+		return -1;
+	}
+
+	pr_debug("%s: Received a request to free AU %llX on lun_id %llX, "
+		 "free_aun_cnt = %llX\n", __func__, to_free, ba_lun->lun_id,
+		 bali->free_aun_cnt);
+
+	if (bali->aun_clone_map[to_free] > 0) {
+		pr_debug("%s: AUN %llX on lun_id %llX has been cloned. Clone "
+			 "count = %X\n", __func__, to_free, ba_lun->lun_id,
+			 bali->aun_clone_map[to_free]);
+		bali->aun_clone_map[to_free]--;
+		return 0;
+	}
+
+	idx = to_free / BITS_PER_LONG;
+	bit_pos = to_free % BITS_PER_LONG;
+
+	set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]);
+	bali->free_aun_cnt++;
+
+	if (idx < bali->free_low_idx)
+		bali->free_low_idx = idx;
+	else if (idx > bali->free_high_idx)
+		bali->free_high_idx = idx;
+
+	pr_debug("%s: Successfully freed AU at bit_pos %X, bit map index %X on "
+		 "lun_id %llX, free_aun_cnt = %llX\n", __func__, bit_pos, idx,
+		 ba_lun->lun_id, bali->free_aun_cnt);
+
+	return 0;
+}
+
+/**
+ * ba_clone() - Clone a chunk of the block allocation table
+ * @ba_lun:	Block allocator from which to allocate a block.
+ * @to_free:	Block to free.
+ *
+ * Return: 0 on success, -1 on failure
+ */
+static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
+{
+	struct ba_lun_info *bali = ba_lun->ba_lun_handle;
+
+	if (validate_alloc(bali, to_clone)) {
+		pr_debug("%s: AUN %llX is not allocated on lun_id %llX\n",
+			 __func__, to_clone, ba_lun->lun_id);
+		return -1;
+	}
+
+	pr_debug("%s: Received a request to clone AUN %llX on lun_id %llX\n",
+		 __func__, to_clone, ba_lun->lun_id);
+
+	if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
+		pr_debug("%s: AUN %llX on lun_id %llX hit max clones already\n",
+			 __func__, to_clone, ba_lun->lun_id);
+		return -1;
+	}
+
+	bali->aun_clone_map[to_clone]++;
+
+	return 0;
+}
+
+/**
+ * ba_space() - returns the amount of free space left in the block allocator
+ * @ba_lun:	Block allocator.
+ *
+ * Return: Amount of free space in block allocator
+ */
+static u64 ba_space(struct ba_lun *ba_lun)
+{
+	struct ba_lun_info *bali = ba_lun->ba_lun_handle;
+
+	return bali->free_aun_cnt;
+}
+
+/**
+ * cxlflash_ba_terminate() - frees resources associated with the block allocator
+ * @ba_lun:	Block allocator.
+ *
+ * Safe to call in a partially allocated state.
+ */
+void cxlflash_ba_terminate(struct ba_lun *ba_lun)
+{
+	struct ba_lun_info *bali = ba_lun->ba_lun_handle;
+
+	if (bali) {
+		kfree(bali->aun_clone_map);
+		kfree(bali->lun_alloc_map);
+		kfree(bali);
+		ba_lun->ba_lun_handle = NULL;
+	}
+}
+
+/**
+ * init_vlun() - initializes a LUN for virtual use
+ * @lun_info:	LUN information structure that owns the block allocator.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int init_vlun(struct llun_info *lli)
+{
+	int rc = 0;
+	struct glun_info *gli = lli->parent;
+	struct blka *blka = &gli->blka;
+
+	memset(blka, 0, sizeof(*blka));
+	mutex_init(&blka->mutex);
+
+	/* LUN IDs are unique per port, save the index instead */
+	blka->ba_lun.lun_id = lli->lun_index;
+	blka->ba_lun.lsize = gli->max_lba + 1;
+	blka->ba_lun.lba_size = gli->blk_len;
+
+	blka->ba_lun.au_size = MC_CHUNK_SIZE;
+	blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE;
+
+	rc = ba_init(&blka->ba_lun);
+	if (unlikely(rc))
+		pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc);
+
+	pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli);
+	return rc;
+}
+
+/**
+ * write_same16() - sends a SCSI WRITE_SAME16 (0) command to specified LUN
+ * @sdev:	SCSI device associated with LUN.
+ * @lba:	Logical block address to start write same.
+ * @nblks:	Number of logical blocks to write same.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int write_same16(struct scsi_device *sdev,
+			u64 lba,
+			u32 nblks)
+{
+	u8 *cmd_buf = NULL;
+	u8 *scsi_cmd = NULL;
+	u8 *sense_buf = NULL;
+	int rc = 0;
+	int result = 0;
+	int ws_limit = SISLITE_MAX_WS_BLOCKS;
+	u64 offset = lba;
+	int left = nblks;
+	u32 tout = sdev->request_queue->rq_timeout;
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct device *dev = &cfg->dev->dev;
+
+	cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
+	scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
+	sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+	if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	while (left > 0) {
+
+		scsi_cmd[0] = WRITE_SAME_16;
+		put_unaligned_be64(offset, &scsi_cmd[2]);
+		put_unaligned_be32(ws_limit < left ? ws_limit : left,
+				   &scsi_cmd[10]);
+
+		result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
+				      CMD_BUFSIZE, sense_buf, tout, 5, 0, NULL);
+		if (result) {
+			dev_err_ratelimited(dev, "%s: command failed for "
+					    "offset %lld result=0x%x\n",
+					    __func__, offset, result);
+			rc = -EIO;
+			goto out;
+		}
+		left -= ws_limit;
+		offset += ws_limit;
+	}
+
+out:
+	kfree(cmd_buf);
+	kfree(scsi_cmd);
+	kfree(sense_buf);
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * grow_lxt() - expands the translation table associated with the specified RHTE
+ * @afu:	AFU associated with the host.
+ * @sdev:	SCSI device associated with LUN.
+ * @ctxid:	Context ID of context owning the RHTE.
+ * @rhndl:	Resource handle associated with the RHTE.
+ * @rhte:	Resource handle entry (RHTE).
+ * @new_size:	Number of translation entries associated with RHTE.
+ *
+ * By design, this routine employs a 'best attempt' allocation and will
+ * truncate the requested size down if there is not sufficient space in
+ * the block allocator to satisfy the request but there does exist some
+ * amount of space. The user is made aware of this by returning the size
+ * allocated.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int grow_lxt(struct afu *afu,
+		    struct scsi_device *sdev,
+		    ctx_hndl_t ctxid,
+		    res_hndl_t rhndl,
+		    struct sisl_rht_entry *rhte,
+		    u64 *new_size)
+{
+	struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
+	struct llun_info *lli = sdev->hostdata;
+	struct glun_info *gli = lli->parent;
+	struct blka *blka = &gli->blka;
+	u32 av_size;
+	u32 ngrps, ngrps_old;
+	u64 aun;		/* chunk# allocated by block allocator */
+	u64 delta = *new_size - rhte->lxt_cnt;
+	u64 my_new_size;
+	int i, rc = 0;
+
+	/*
+	 * Check what is available in the block allocator before re-allocating
+	 * LXT array. This is done up front under the mutex which must not be
+	 * released until after allocation is complete.
+	 */
+	mutex_lock(&blka->mutex);
+	av_size = ba_space(&blka->ba_lun);
+	if (unlikely(av_size <= 0)) {
+		pr_debug("%s: ba_space error: av_size %d\n", __func__, av_size);
+		mutex_unlock(&blka->mutex);
+		rc = -ENOSPC;
+		goto out;
+	}
+
+	if (av_size < delta)
+		delta = av_size;
+
+	lxt_old = rhte->lxt_start;
+	ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
+	ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta);
+
+	if (ngrps != ngrps_old) {
+		/* reallocate to fit new size */
+		lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
+			      GFP_KERNEL);
+		if (unlikely(!lxt)) {
+			mutex_unlock(&blka->mutex);
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		/* copy over all old entries */
+		memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt));
+	} else
+		lxt = lxt_old;
+
+	/* nothing can fail from now on */
+	my_new_size = rhte->lxt_cnt + delta;
+
+	/* add new entries to the end */
+	for (i = rhte->lxt_cnt; i < my_new_size; i++) {
+		/*
+		 * Due to the earlier check of available space, ba_alloc
+		 * cannot fail here. If it did due to internal error,
+		 * leave a rlba_base of -1u which will likely be a
+		 * invalid LUN (too large).
+		 */
+		aun = ba_alloc(&blka->ba_lun);
+		if ((aun == -1ULL) || (aun >= blka->nchunk))
+			pr_debug("%s: ba_alloc error: allocated chunk# %llX, "
+				 "max %llX\n", __func__, aun, blka->nchunk - 1);
+
+		/* select both ports, use r/w perms from RHT */
+		lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
+				    (lli->lun_index << LXT_LUNIDX_SHIFT) |
+				    (RHT_PERM_RW << LXT_PERM_SHIFT |
+				     lli->port_sel));
+	}
+
+	mutex_unlock(&blka->mutex);
+
+	/*
+	 * The following sequence is prescribed in the SISlite spec
+	 * for syncing up with the AFU when adding LXT entries.
+	 */
+	dma_wmb(); /* Make LXT updates are visible */
+
+	rhte->lxt_start = lxt;
+	dma_wmb(); /* Make RHT entry's LXT table update visible */
+
+	rhte->lxt_cnt = my_new_size;
+	dma_wmb(); /* Make RHT entry's LXT table size update visible */
+
+	cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+
+	/* free old lxt if reallocated */
+	if (lxt != lxt_old)
+		kfree(lxt_old);
+	*new_size = my_new_size;
+out:
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * shrink_lxt() - reduces translation table associated with the specified RHTE
+ * @afu:	AFU associated with the host.
+ * @sdev:	SCSI device associated with LUN.
+ * @rhndl:	Resource handle associated with the RHTE.
+ * @rhte:	Resource handle entry (RHTE).
+ * @ctxi:	Context owning resources.
+ * @new_size:	Number of translation entries associated with RHTE.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int shrink_lxt(struct afu *afu,
+		      struct scsi_device *sdev,
+		      res_hndl_t rhndl,
+		      struct sisl_rht_entry *rhte,
+		      struct ctx_info *ctxi,
+		      u64 *new_size)
+{
+	struct sisl_lxt_entry *lxt, *lxt_old;
+	struct llun_info *lli = sdev->hostdata;
+	struct glun_info *gli = lli->parent;
+	struct blka *blka = &gli->blka;
+	ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid);
+	bool needs_ws = ctxi->rht_needs_ws[rhndl];
+	bool needs_sync = !ctxi->err_recovery_active;
+	u32 ngrps, ngrps_old;
+	u64 aun;		/* chunk# allocated by block allocator */
+	u64 delta = rhte->lxt_cnt - *new_size;
+	u64 my_new_size;
+	int i, rc = 0;
+
+	lxt_old = rhte->lxt_start;
+	ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
+	ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta);
+
+	if (ngrps != ngrps_old) {
+		/* Reallocate to fit new size unless new size is 0 */
+		if (ngrps) {
+			lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
+				      GFP_KERNEL);
+			if (unlikely(!lxt)) {
+				rc = -ENOMEM;
+				goto out;
+			}
+
+			/* Copy over old entries that will remain */
+			memcpy(lxt, lxt_old,
+			       (sizeof(*lxt) * (rhte->lxt_cnt - delta)));
+		} else
+			lxt = NULL;
+	} else
+		lxt = lxt_old;
+
+	/* Nothing can fail from now on */
+	my_new_size = rhte->lxt_cnt - delta;
+
+	/*
+	 * The following sequence is prescribed in the SISlite spec
+	 * for syncing up with the AFU when removing LXT entries.
+	 */
+	rhte->lxt_cnt = my_new_size;
+	dma_wmb(); /* Make RHT entry's LXT table size update visible */
+
+	rhte->lxt_start = lxt;
+	dma_wmb(); /* Make RHT entry's LXT table update visible */
+
+	if (needs_sync)
+		cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
+
+	if (needs_ws) {
+		/*
+		 * Mark the context as unavailable, so that we can release
+		 * the mutex safely.
+		 */
+		ctxi->unavail = true;
+		mutex_unlock(&ctxi->mutex);
+	}
+
+	/* Free LBAs allocated to freed chunks */
+	mutex_lock(&blka->mutex);
+	for (i = delta - 1; i >= 0; i--) {
+		/* Mask the higher 48 bits before shifting, even though
+		 * it is a noop
+		 */
+		aun = (lxt_old[my_new_size + i].rlba_base & SISL_ASTATUS_MASK);
+		aun = (aun >> MC_CHUNK_SHIFT);
+		if (needs_ws)
+			write_same16(sdev, aun, MC_CHUNK_SIZE);
+		ba_free(&blka->ba_lun, aun);
+	}
+	mutex_unlock(&blka->mutex);
+
+	if (needs_ws) {
+		/* Make the context visible again */
+		mutex_lock(&ctxi->mutex);
+		ctxi->unavail = false;
+	}
+
+	/* Free old lxt if reallocated */
+	if (lxt != lxt_old)
+		kfree(lxt_old);
+	*new_size = my_new_size;
+out:
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * _cxlflash_vlun_resize() - changes the size of a virtual lun
+ * @sdev:	SCSI device associated with LUN owning virtual LUN.
+ * @ctxi:	Context owning resources.
+ * @resize:	Resize ioctl data structure.
+ *
+ * On successful return, the user is informed of the new size (in blocks)
+ * of the virtual lun in last LBA format. When the size of the virtual
+ * lun is zero, the last LBA is reflected as -1. See comment in the
+ * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
+ * on the error recovery list.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int _cxlflash_vlun_resize(struct scsi_device *sdev,
+			  struct ctx_info *ctxi,
+			  struct dk_cxlflash_resize *resize)
+{
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct llun_info *lli = sdev->hostdata;
+	struct glun_info *gli = lli->parent;
+	struct afu *afu = cfg->afu;
+	bool put_ctx = false;
+
+	res_hndl_t rhndl = resize->rsrc_handle;
+	u64 new_size;
+	u64 nsectors;
+	u64 ctxid = DECODE_CTXID(resize->context_id),
+	    rctxid = resize->context_id;
+
+	struct sisl_rht_entry *rhte;
+
+	int rc = 0;
+
+	/*
+	 * The requested size (req_size) is always assumed to be in 4k blocks,
+	 * so we have to convert it here from 4k to chunk size.
+	 */
+	nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
+	new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
+
+	pr_debug("%s: ctxid=%llu rhndl=0x%llx, req_size=0x%llx,"
+		 "new_size=%llx\n", __func__, ctxid, resize->rsrc_handle,
+		 resize->req_size, new_size);
+
+	if (unlikely(gli->mode != MODE_VIRTUAL)) {
+		pr_debug("%s: LUN mode does not support resize! (%d)\n",
+			 __func__, gli->mode);
+		rc = -EINVAL;
+		goto out;
+
+	}
+
+	if (!ctxi) {
+		ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
+		if (unlikely(!ctxi)) {
+			pr_debug("%s: Bad context! (%llu)\n", __func__, ctxid);
+			rc = -EINVAL;
+			goto out;
+		}
+
+		put_ctx = true;
+	}
+
+	rhte = get_rhte(ctxi, rhndl, lli);
+	if (unlikely(!rhte)) {
+		pr_debug("%s: Bad resource handle! (%u)\n", __func__, rhndl);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (new_size > rhte->lxt_cnt)
+		rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
+	else if (new_size < rhte->lxt_cnt)
+		rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
+
+	resize->hdr.return_flags = 0;
+	resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
+	resize->last_lba /= CXLFLASH_BLOCK_SIZE;
+	resize->last_lba--;
+
+out:
+	if (put_ctx)
+		put_context(ctxi);
+	pr_debug("%s: resized to %lld returning rc=%d\n",
+		 __func__, resize->last_lba, rc);
+	return rc;
+}
+
+int cxlflash_vlun_resize(struct scsi_device *sdev,
+			 struct dk_cxlflash_resize *resize)
+{
+	return _cxlflash_vlun_resize(sdev, NULL, resize);
+}
+
+/**
+ * cxlflash_restore_luntable() - Restore LUN table to prior state
+ * @cfg:	Internal structure associated with the host.
+ */
+void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
+{
+	struct llun_info *lli, *temp;
+	u32 chan;
+	u32 lind;
+	struct afu *afu = cfg->afu;
+	struct sisl_global_map *agm = &afu->afu_map->global;
+
+	mutex_lock(&global.mutex);
+
+	list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
+		if (!lli->in_table)
+			continue;
+
+		lind = lli->lun_index;
+
+		if (lli->port_sel == BOTH_PORTS) {
+			writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
+			writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
+			pr_debug("%s: Virtual LUN on slot %d  id0=%llx, "
+				 "id1=%llx\n", __func__, lind,
+				 lli->lun_id[0], lli->lun_id[1]);
+		} else {
+			chan = PORT2CHAN(lli->port_sel);
+			writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
+			pr_debug("%s: Virtual LUN on slot %d chan=%d, "
+				 "id=%llx\n", __func__, lind, chan,
+				 lli->lun_id[chan]);
+		}
+	}
+
+	mutex_unlock(&global.mutex);
+}
+
+/**
+ * init_luntable() - write an entry in the LUN table
+ * @cfg:	Internal structure associated with the host.
+ * @lli:	Per adapter LUN information structure.
+ *
+ * On successful return, a LUN table entry is created.
+ * At the top for LUNs visible on both ports.
+ * At the bottom for LUNs visible only on one port.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
+{
+	u32 chan;
+	u32 lind;
+	int rc = 0;
+	struct afu *afu = cfg->afu;
+	struct sisl_global_map *agm = &afu->afu_map->global;
+
+	mutex_lock(&global.mutex);
+
+	if (lli->in_table)
+		goto out;
+
+	if (lli->port_sel == BOTH_PORTS) {
+		/*
+		 * If this LUN is visible from both ports, we will put
+		 * it in the top half of the LUN table.
+		 */
+		if ((cfg->promote_lun_index == cfg->last_lun_index[0]) ||
+		    (cfg->promote_lun_index == cfg->last_lun_index[1])) {
+			rc = -ENOSPC;
+			goto out;
+		}
+
+		lind = lli->lun_index = cfg->promote_lun_index;
+		writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
+		writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
+		cfg->promote_lun_index++;
+		pr_debug("%s: Virtual LUN on slot %d  id0=%llx, id1=%llx\n",
+			 __func__, lind, lli->lun_id[0], lli->lun_id[1]);
+	} else {
+		/*
+		 * If this LUN is visible only from one port, we will put
+		 * it in the bottom half of the LUN table.
+		 */
+		chan = PORT2CHAN(lli->port_sel);
+		if (cfg->promote_lun_index == cfg->last_lun_index[chan]) {
+			rc = -ENOSPC;
+			goto out;
+		}
+
+		lind = lli->lun_index = cfg->last_lun_index[chan];
+		writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
+		cfg->last_lun_index[chan]--;
+		pr_debug("%s: Virtual LUN on slot %d  chan=%d, id=%llx\n",
+			 __func__, lind, chan, lli->lun_id[chan]);
+	}
+
+	lli->in_table = true;
+out:
+	mutex_unlock(&global.mutex);
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * cxlflash_disk_virtual_open() - open a virtual disk of specified size
+ * @sdev:	SCSI device associated with LUN owning virtual LUN.
+ * @arg:	UVirtual ioctl data structure.
+ *
+ * On successful return, the user is informed of the resource handle
+ * to be used to identify the virtual lun and the size (in blocks) of
+ * the virtual lun in last LBA format. When the size of the virtual lun
+ * is zero, the last LBA is reflected as -1.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
+{
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct device *dev = &cfg->dev->dev;
+	struct llun_info *lli = sdev->hostdata;
+	struct glun_info *gli = lli->parent;
+
+	struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg;
+	struct dk_cxlflash_resize resize;
+
+	u64 ctxid = DECODE_CTXID(virt->context_id),
+	    rctxid = virt->context_id;
+	u64 lun_size = virt->lun_size;
+	u64 last_lba = 0;
+	u64 rsrc_handle = -1;
+
+	int rc = 0;
+
+	struct ctx_info *ctxi = NULL;
+	struct sisl_rht_entry *rhte = NULL;
+
+	pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
+
+	mutex_lock(&gli->mutex);
+	if (gli->mode == MODE_NONE) {
+		/* Setup the LUN table and block allocator on first call */
+		rc = init_luntable(cfg, lli);
+		if (rc) {
+			dev_err(dev, "%s: call to init_luntable failed "
+				"rc=%d!\n", __func__, rc);
+			goto err0;
+		}
+
+		rc = init_vlun(lli);
+		if (rc) {
+			dev_err(dev, "%s: call to init_vlun failed rc=%d!\n",
+				__func__, rc);
+			rc = -ENOMEM;
+			goto err0;
+		}
+	}
+
+	rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: Failed to attach to LUN! (VIRTUAL)\n",
+			__func__);
+		goto err0;
+	}
+	mutex_unlock(&gli->mutex);
+
+	ctxi = get_context(cfg, rctxid, lli, 0);
+	if (unlikely(!ctxi)) {
+		dev_err(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
+		rc = -EINVAL;
+		goto err1;
+	}
+
+	rhte = rhte_checkout(ctxi, lli);
+	if (unlikely(!rhte)) {
+		dev_err(dev, "%s: too many opens for this context\n", __func__);
+		rc = -EMFILE;	/* too many opens  */
+		goto err1;
+	}
+
+	rsrc_handle = (rhte - ctxi->rht_start);
+
+	/* Populate RHT format 0 */
+	rhte->nmask = MC_RHT_NMASK;
+	rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms);
+
+	/* Resize even if requested size is 0 */
+	marshal_virt_to_resize(virt, &resize);
+	resize.rsrc_handle = rsrc_handle;
+	rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
+	if (rc) {
+		dev_err(dev, "%s: resize failed rc %d\n", __func__, rc);
+		goto err2;
+	}
+	last_lba = resize.last_lba;
+
+	if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME)
+		ctxi->rht_needs_ws[rsrc_handle] = true;
+
+	virt->hdr.return_flags = 0;
+	virt->last_lba = last_lba;
+	virt->rsrc_handle = rsrc_handle;
+
+out:
+	if (likely(ctxi))
+		put_context(ctxi);
+	pr_debug("%s: returning handle 0x%llx rc=%d llba %lld\n",
+		 __func__, rsrc_handle, rc, last_lba);
+	return rc;
+
+err2:
+	rhte_checkin(ctxi, rhte);
+err1:
+	cxlflash_lun_detach(gli);
+	goto out;
+err0:
+	/* Special common cleanup prior to successful LUN attach */
+	cxlflash_ba_terminate(&gli->blka.ba_lun);
+	mutex_unlock(&gli->mutex);
+	goto out;
+}
+
+/**
+ * clone_lxt() - copies translation tables from source to destination RHTE
+ * @afu:	AFU associated with the host.
+ * @blka:	Block allocator associated with LUN.
+ * @ctxid:	Context ID of context owning the RHTE.
+ * @rhndl:	Resource handle associated with the RHTE.
+ * @rhte:	Destination resource handle entry (RHTE).
+ * @rhte_src:	Source resource handle entry (RHTE).
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int clone_lxt(struct afu *afu,
+		     struct blka *blka,
+		     ctx_hndl_t ctxid,
+		     res_hndl_t rhndl,
+		     struct sisl_rht_entry *rhte,
+		     struct sisl_rht_entry *rhte_src)
+{
+	struct sisl_lxt_entry *lxt;
+	u32 ngrps;
+	u64 aun;		/* chunk# allocated by block allocator */
+	int i, j;
+
+	ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
+
+	if (ngrps) {
+		/* allocate new LXTs for clone */
+		lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
+				GFP_KERNEL);
+		if (unlikely(!lxt))
+			return -ENOMEM;
+
+		/* copy over */
+		memcpy(lxt, rhte_src->lxt_start,
+		       (sizeof(*lxt) * rhte_src->lxt_cnt));
+
+		/* clone the LBAs in block allocator via ref_cnt */
+		mutex_lock(&blka->mutex);
+		for (i = 0; i < rhte_src->lxt_cnt; i++) {
+			aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
+			if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
+				/* free the clones already made */
+				for (j = 0; j < i; j++) {
+					aun = (lxt[j].rlba_base >>
+					       MC_CHUNK_SHIFT);
+					ba_free(&blka->ba_lun, aun);
+				}
+
+				mutex_unlock(&blka->mutex);
+				kfree(lxt);
+				return -EIO;
+			}
+		}
+		mutex_unlock(&blka->mutex);
+	} else {
+		lxt = NULL;
+	}
+
+	/*
+	 * The following sequence is prescribed in the SISlite spec
+	 * for syncing up with the AFU when adding LXT entries.
+	 */
+	dma_wmb(); /* Make LXT updates are visible */
+
+	rhte->lxt_start = lxt;
+	dma_wmb(); /* Make RHT entry's LXT table update visible */
+
+	rhte->lxt_cnt = rhte_src->lxt_cnt;
+	dma_wmb(); /* Make RHT entry's LXT table size update visible */
+
+	cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
+
+	pr_debug("%s: returning\n", __func__);
+	return 0;
+}
+
+/**
+ * cxlflash_disk_clone() - clone a context by making snapshot of another
+ * @sdev:	SCSI device associated with LUN owning virtual LUN.
+ * @clone:	Clone ioctl data structure.
+ *
+ * This routine effectively performs cxlflash_disk_open operation for each
+ * in-use virtual resource in the source context. Note that the destination
+ * context must be in pristine state and cannot have any resource handles
+ * open at the time of the clone.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int cxlflash_disk_clone(struct scsi_device *sdev,
+			struct dk_cxlflash_clone *clone)
+{
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
+	struct llun_info *lli = sdev->hostdata;
+	struct glun_info *gli = lli->parent;
+	struct blka *blka = &gli->blka;
+	struct afu *afu = cfg->afu;
+	struct dk_cxlflash_release release = { { 0 }, 0 };
+
+	struct ctx_info *ctxi_src = NULL,
+			*ctxi_dst = NULL;
+	struct lun_access *lun_access_src, *lun_access_dst;
+	u32 perms;
+	u64 ctxid_src = DECODE_CTXID(clone->context_id_src),
+	    ctxid_dst = DECODE_CTXID(clone->context_id_dst),
+	    rctxid_src = clone->context_id_src,
+	    rctxid_dst = clone->context_id_dst;
+	int adap_fd_src = clone->adap_fd_src;
+	int i, j;
+	int rc = 0;
+	bool found;
+	LIST_HEAD(sidecar);
+
+	pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu adap_fd_src=%d\n",
+		 __func__, ctxid_src, ctxid_dst, adap_fd_src);
+
+	/* Do not clone yourself */
+	if (unlikely(rctxid_src == rctxid_dst)) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (unlikely(gli->mode != MODE_VIRTUAL)) {
+		rc = -EINVAL;
+		pr_debug("%s: Clone not supported on physical LUNs! (%d)\n",
+			 __func__, gli->mode);
+		goto out;
+	}
+
+	ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
+	ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
+	if (unlikely(!ctxi_src || !ctxi_dst)) {
+		pr_debug("%s: Bad context! (%llu,%llu)\n", __func__,
+			 ctxid_src, ctxid_dst);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (unlikely(adap_fd_src != ctxi_src->lfd)) {
+		pr_debug("%s: Invalid source adapter fd! (%d)\n",
+			 __func__, adap_fd_src);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* Verify there is no open resource handle in the destination context */
+	for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
+		if (ctxi_dst->rht_start[i].nmask != 0) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+	/* Clone LUN access list */
+	list_for_each_entry(lun_access_src, &ctxi_src->luns, list) {
+		found = false;
+		list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list)
+			if (lun_access_dst->sdev == lun_access_src->sdev) {
+				found = true;
+				break;
+			}
+
+		if (!found) {
+			lun_access_dst = kzalloc(sizeof(*lun_access_dst),
+						 GFP_KERNEL);
+			if (unlikely(!lun_access_dst)) {
+				pr_err("%s: Unable to allocate lun_access!\n",
+				       __func__);
+				rc = -ENOMEM;
+				goto out;
+			}
+
+			*lun_access_dst = *lun_access_src;
+			list_add(&lun_access_dst->list, &sidecar);
+		}
+	}
+
+	if (unlikely(!ctxi_src->rht_out)) {
+		pr_debug("%s: Nothing to clone!\n", __func__);
+		goto out_success;
+	}
+
+	/* User specified permission on attach */
+	perms = ctxi_dst->rht_perms;
+
+	/*
+	 * Copy over checked-out RHT (and their associated LXT) entries by
+	 * hand, stopping after we've copied all outstanding entries and
+	 * cleaning up if the clone fails.
+	 *
+	 * Note: This loop is equivalent to performing cxlflash_disk_open and
+	 * cxlflash_vlun_resize. As such, LUN accounting needs to be taken into
+	 * account by attaching after each successful RHT entry clone. In the
+	 * event that a clone failure is experienced, the LUN detach is handled
+	 * via the cleanup performed by _cxlflash_disk_release.
+	 */
+	for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
+		if (ctxi_src->rht_out == ctxi_dst->rht_out)
+			break;
+		if (ctxi_src->rht_start[i].nmask == 0)
+			continue;
+
+		/* Consume a destination RHT entry */
+		ctxi_dst->rht_out++;
+		ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask;
+		ctxi_dst->rht_start[i].fp =
+		    SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms);
+		ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i];
+
+		rc = clone_lxt(afu, blka, ctxid_dst, i,
+			       &ctxi_dst->rht_start[i],
+			       &ctxi_src->rht_start[i]);
+		if (rc) {
+			marshal_clone_to_rele(clone, &release);
+			for (j = 0; j < i; j++) {
+				release.rsrc_handle = j;
+				_cxlflash_disk_release(sdev, ctxi_dst,
+						       &release);
+			}
+
+			/* Put back the one we failed on */
+			rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]);
+			goto err;
+		}
+
+		cxlflash_lun_attach(gli, gli->mode, false);
+	}
+
+out_success:
+	list_splice(&sidecar, &ctxi_dst->luns);
+	sys_close(adap_fd_src);
+
+	/* fall through */
+out:
+	if (ctxi_src)
+		put_context(ctxi_src);
+	if (ctxi_dst)
+		put_context(ctxi_dst);
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+
+err:
+	list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list)
+		kfree(lun_access_src);
+	goto out;
+}
diff --git a/drivers/scsi/cxlflash/vlun.h b/drivers/scsi/cxlflash/vlun.h
new file mode 100644
index 0000000..8b29a74
--- /dev/null
+++ b/drivers/scsi/cxlflash/vlun.h
@@ -0,0 +1,86 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _CXLFLASH_VLUN_H
+#define _CXLFLASH_VLUN_H
+
+/* RHT - Resource Handle Table */
+#define MC_RHT_NMASK      16	/* in bits */
+#define MC_CHUNK_SHIFT    MC_RHT_NMASK	/* shift to go from LBA to chunk# */
+
+#define HIBIT             (BITS_PER_LONG - 1)
+
+#define MAX_AUN_CLONE_CNT 0xFF
+
+/*
+ * LXT - LBA Translation Table
+ *
+ * +-------+-------+-------+-------+-------+-------+-------+---+---+
+ * | RLBA_BASE                                     |LUN_IDX| P |SEL|
+ * +-------+-------+-------+-------+-------+-------+-------+---+---+
+ *
+ * The LXT Entry contains the physical LBA where the chunk starts (RLBA_BASE).
+ * AFU ORes the low order bits from the virtual LBA (offset into the chunk)
+ * with RLBA_BASE. The result is the physical LBA to be sent to storage.
+ * The LXT Entry also contains an index to a LUN TBL and a bitmask of which
+ * outgoing (FC) * ports can be selected. The port select bit-mask is ANDed
+ * with a global port select bit-mask maintained by the driver.
+ * In addition, it has permission bits that are ANDed with the
+ * RHT permissions to arrive at the final permissions for the chunk.
+ *
+ * LXT tables are allocated dynamically in groups. This is done to avoid
+ * a malloc/free overhead each time the LXT has to grow or shrink.
+ *
+ * Based on the current lxt_cnt (used), it is always possible to know
+ * how many are allocated (used+free). The number of allocated entries is
+ * not stored anywhere.
+ *
+ * The LXT table is re-allocated whenever it needs to cross into another group.
+*/
+#define LXT_GROUP_SIZE          8
+#define LXT_NUM_GROUPS(lxt_cnt) (((lxt_cnt) + 7)/8)	/* alloc'ed groups */
+#define LXT_LUNIDX_SHIFT  8	/* LXT entry, shift for LUN index */
+#define LXT_PERM_SHIFT    4	/* LXT entry, shift for permission bits */
+
+struct ba_lun_info {
+	u64 *lun_alloc_map;
+	u32 lun_bmap_size;
+	u32 total_aus;
+	u64 free_aun_cnt;
+
+	/* indices to be used for elevator lookup of free map */
+	u32 free_low_idx;
+	u32 free_curr_idx;
+	u32 free_high_idx;
+
+	u8 *aun_clone_map;
+};
+
+struct ba_lun {
+	u64 lun_id;
+	u64 wwpn;
+	size_t lsize;		/* LUN size in number of LBAs             */
+	size_t lba_size;	/* LBA size in number of bytes            */
+	size_t au_size;		/* Allocation Unit size in number of LBAs */
+	struct ba_lun_info *ba_lun_handle;
+};
+
+/* Block Allocator */
+struct blka {
+	struct ba_lun ba_lun;
+	u64 nchunk;		/* number of chunks */
+	struct mutex mutex;
+};
+
+#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index f35ed53..d4cda5e 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1924,6 +1924,9 @@
 #endif
 
 #if defined __i386__
+
+#include <uapi/asm/vm86.h>
+
 static void adpt_i386_info(sysInfo_S* si)
 {
 	// This is all the info we need for now
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index bdc8989..d7597c0 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -58,7 +58,7 @@
 module_param_call(create, fcoe_transport_create, NULL,
 		  (void *)FIP_MODE_FABRIC, S_IWUSR);
 __MODULE_PARM_TYPE(create, "string");
-MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
+MODULE_PARM_DESC(create, " Creates fcoe instance on an ethernet interface");
 
 module_param_call(create_vn2vn, fcoe_transport_create, NULL,
 		  (void *)FIP_MODE_VN2VN, S_IWUSR);
@@ -68,15 +68,15 @@
 
 module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR);
 __MODULE_PARM_TYPE(destroy, "string");
-MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
+MODULE_PARM_DESC(destroy, " Destroys fcoe instance on an ethernet interface");
 
 module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR);
 __MODULE_PARM_TYPE(enable, "string");
-MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
+MODULE_PARM_DESC(enable, " Enables fcoe on an ethernet interface.");
 
 module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR);
 __MODULE_PARM_TYPE(disable, "string");
-MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
+MODULE_PARM_DESC(disable, " Disables fcoe on an ethernet interface.");
 
 /* notification function for packets from net device */
 static struct notifier_block libfcoe_notifier = {
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 1dafeb4..40669f8 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1,6 +1,7 @@
 /*
  *    Disk Array driver for HP Smart Array SAS controllers
- *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
+ *    Copyright 2014-2015 PMC-Sierra, Inc.
+ *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
  *
  *    This program is free software; you can redistribute it and/or modify
  *    it under the terms of the GNU General Public License as published by
@@ -11,11 +12,7 @@
  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
  *
- *    You should have received a copy of the GNU General Public License
- *    along with this program; if not, write to the Free Software
- *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *    Questions/Comments/Bugfixes to storagedev@pmcs.com
  *
  */
 
@@ -132,6 +129,11 @@
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
 	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
+	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
+	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
+	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
+	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
+	{PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
 	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
@@ -190,6 +192,11 @@
 	{0x21CD103C, "Smart Array", &SA5_access},
 	{0x21CE103C, "Smart HBA", &SA5_access},
 	{0x05809005, "SmartHBA-SA", &SA5_access},
+	{0x05819005, "SmartHBA-SA 8i", &SA5_access},
+	{0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
+	{0x05839005, "SmartHBA-SA 8e", &SA5_access},
+	{0x05849005, "SmartHBA-SA 16i", &SA5_access},
+	{0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
 	{0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
 	{0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
 	{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
@@ -267,6 +274,7 @@
 static void hpsa_command_resubmit_worker(struct work_struct *work);
 static u32 lockup_detected(struct ctlr_info *h);
 static int detect_controller_lockup(struct ctlr_info *h);
+static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device);
 
 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
 {
@@ -325,7 +333,7 @@
 
 	decode_sense_data(c->err_info->SenseInfo, sense_len,
 				&sense_key, &asc, &ascq);
-	if (sense_key != UNIT_ATTENTION || asc == -1)
+	if (sense_key != UNIT_ATTENTION || asc == 0xff)
 		return 0;
 
 	switch (asc) {
@@ -717,12 +725,107 @@
 	return snprintf(buf, 20, "%d\n", offload_enabled);
 }
 
+#define MAX_PATHS 8
+#define PATH_STRING_LEN 50
+
+static ssize_t path_info_show(struct device *dev,
+	     struct device_attribute *attr, char *buf)
+{
+	struct ctlr_info *h;
+	struct scsi_device *sdev;
+	struct hpsa_scsi_dev_t *hdev;
+	unsigned long flags;
+	int i;
+	int output_len = 0;
+	u8 box;
+	u8 bay;
+	u8 path_map_index = 0;
+	char *active;
+	unsigned char phys_connector[2];
+	unsigned char path[MAX_PATHS][PATH_STRING_LEN];
+
+	memset(path, 0, MAX_PATHS * PATH_STRING_LEN);
+	sdev = to_scsi_device(dev);
+	h = sdev_to_hba(sdev);
+	spin_lock_irqsave(&h->devlock, flags);
+	hdev = sdev->hostdata;
+	if (!hdev) {
+		spin_unlock_irqrestore(&h->devlock, flags);
+		return -ENODEV;
+	}
+
+	bay = hdev->bay;
+	for (i = 0; i < MAX_PATHS; i++) {
+		path_map_index = 1<<i;
+		if (i == hdev->active_path_index)
+			active = "Active";
+		else if (hdev->path_map & path_map_index)
+			active = "Inactive";
+		else
+			continue;
+
+		output_len = snprintf(path[i],
+				PATH_STRING_LEN, "[%d:%d:%d:%d] %20.20s ",
+				h->scsi_host->host_no,
+				hdev->bus, hdev->target, hdev->lun,
+				scsi_device_type(hdev->devtype));
+
+		if (is_ext_target(h, hdev) ||
+			(hdev->devtype == TYPE_RAID) ||
+			is_logical_dev_addr_mode(hdev->scsi3addr)) {
+			output_len += snprintf(path[i] + output_len,
+						PATH_STRING_LEN, "%s\n",
+						active);
+			continue;
+		}
+
+		box = hdev->box[i];
+		memcpy(&phys_connector, &hdev->phys_connector[i],
+			sizeof(phys_connector));
+		if (phys_connector[0] < '0')
+			phys_connector[0] = '0';
+		if (phys_connector[1] < '0')
+			phys_connector[1] = '0';
+		if (hdev->phys_connector[i] > 0)
+			output_len += snprintf(path[i] + output_len,
+				PATH_STRING_LEN,
+				"PORT: %.2s ",
+				phys_connector);
+		if (hdev->devtype == TYPE_DISK &&
+			hdev->expose_state != HPSA_DO_NOT_EXPOSE) {
+			if (box == 0 || box == 0xFF) {
+				output_len += snprintf(path[i] + output_len,
+					PATH_STRING_LEN,
+					"BAY: %hhu %s\n",
+					bay, active);
+			} else {
+				output_len += snprintf(path[i] + output_len,
+					PATH_STRING_LEN,
+					"BOX: %hhu BAY: %hhu %s\n",
+					box, bay, active);
+			}
+		} else if (box != 0 && box != 0xFF) {
+			output_len += snprintf(path[i] + output_len,
+				PATH_STRING_LEN, "BOX: %hhu %s\n",
+				box, active);
+		} else
+			output_len += snprintf(path[i] + output_len,
+				PATH_STRING_LEN, "%s\n", active);
+	}
+
+	spin_unlock_irqrestore(&h->devlock, flags);
+	return snprintf(buf, output_len+1, "%s%s%s%s%s%s%s%s",
+		path[0], path[1], path[2], path[3],
+		path[4], path[5], path[6], path[7]);
+}
+
 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
 			host_show_hp_ssd_smart_path_enabled, NULL);
+static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
 		host_show_hp_ssd_smart_path_status,
 		host_store_hp_ssd_smart_path_status);
@@ -744,6 +847,7 @@
 	&dev_attr_lunid,
 	&dev_attr_unique_id,
 	&dev_attr_hp_ssd_smart_path_enabled,
+	&dev_attr_path_info,
 	&dev_attr_lockup_detected,
 	NULL,
 };
@@ -1083,17 +1187,19 @@
 
 	/* This is a non-zero lun of a multi-lun device.
 	 * Search through our list and find the device which
-	 * has the same 8 byte LUN address, excepting byte 4.
+	 * has the same 8 byte LUN address, excepting byte 4 and 5.
 	 * Assign the same bus and target for this new LUN.
 	 * Use the logical unit number from the firmware.
 	 */
 	memcpy(addr1, device->scsi3addr, 8);
 	addr1[4] = 0;
+	addr1[5] = 0;
 	for (i = 0; i < n; i++) {
 		sd = h->dev[i];
 		memcpy(addr2, sd->scsi3addr, 8);
 		addr2[4] = 0;
-		/* differ only in byte 4? */
+		addr2[5] = 0;
+		/* differ only in byte 4 and 5? */
 		if (memcmp(addr1, addr2, 8) == 0) {
 			device->bus = sd->bus;
 			device->target = sd->target;
@@ -1286,8 +1392,9 @@
 		return 1;
 	if (dev1->offload_enabled != dev2->offload_enabled)
 		return 1;
-	if (dev1->queue_depth != dev2->queue_depth)
-		return 1;
+	if (!is_logical_dev_addr_mode(dev1->scsi3addr))
+		if (dev1->queue_depth != dev2->queue_depth)
+			return 1;
 	return 0;
 }
 
@@ -1376,17 +1483,23 @@
 			h->scsi_host->host_no,
 			sd->bus, sd->target, sd->lun);
 		break;
+	case HPSA_LV_NOT_AVAILABLE:
+		dev_info(&h->pdev->dev,
+			"C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
+		break;
 	case HPSA_LV_UNDERGOING_RPI:
 		dev_info(&h->pdev->dev,
-			"C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
+			"C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
 			h->scsi_host->host_no,
 			sd->bus, sd->target, sd->lun);
 		break;
 	case HPSA_LV_PENDING_RPI:
 		dev_info(&h->pdev->dev,
-				"C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
-				h->scsi_host->host_no,
-				sd->bus, sd->target, sd->lun);
+			"C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
+			h->scsi_host->host_no,
+			sd->bus, sd->target, sd->lun);
 		break;
 	case HPSA_LV_ENCRYPTED_NO_KEY:
 		dev_info(&h->pdev->dev,
@@ -2585,34 +2698,6 @@
 	return rc;
 }
 
-static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
-		unsigned char *scsi3addr, unsigned char page,
-		struct bmic_controller_parameters *buf, size_t bufsize)
-{
-	int rc = IO_OK;
-	struct CommandList *c;
-	struct ErrorInfo *ei;
-
-	c = cmd_alloc(h);
-	if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
-			page, scsi3addr, TYPE_CMD)) {
-		rc = -1;
-		goto out;
-	}
-	rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
-			PCI_DMA_FROMDEVICE, NO_TIMEOUT);
-	if (rc)
-		goto out;
-	ei = c->err_info;
-	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
-		hpsa_scsi_interpret_error(h, c);
-		rc = -1;
-	}
-out:
-	cmd_free(h, c);
-	return rc;
-}
-
 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
 	u8 reset_type, int reply_queue)
 {
@@ -2749,11 +2834,10 @@
 			lockup_detected(h));
 
 	if (unlikely(lockup_detected(h))) {
-			dev_warn(&h->pdev->dev,
-				 "Controller lockup detected during reset wait\n");
-			mutex_unlock(&h->reset_mutex);
-			rc = -ENODEV;
-		}
+		dev_warn(&h->pdev->dev,
+			 "Controller lockup detected during reset wait\n");
+		rc = -ENODEV;
+	}
 
 	if (unlikely(rc))
 		atomic_set(&dev->reset_cmds_out, 0);
@@ -3186,6 +3270,7 @@
 	/* Keep volume offline in certain cases: */
 	switch (ldstat) {
 	case HPSA_LV_UNDERGOING_ERASE:
+	case HPSA_LV_NOT_AVAILABLE:
 	case HPSA_LV_UNDERGOING_RPI:
 	case HPSA_LV_PENDING_RPI:
 	case HPSA_LV_ENCRYPTED_NO_KEY:
@@ -3562,29 +3647,6 @@
 	return NULL;
 }
 
-static int hpsa_hba_mode_enabled(struct ctlr_info *h)
-{
-	int rc;
-	int hba_mode_enabled;
-	struct bmic_controller_parameters *ctlr_params;
-	ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
-		GFP_KERNEL);
-
-	if (!ctlr_params)
-		return -ENOMEM;
-	rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
-		sizeof(struct bmic_controller_parameters));
-	if (rc) {
-		kfree(ctlr_params);
-		return rc;
-	}
-
-	hba_mode_enabled =
-		((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
-	kfree(ctlr_params);
-	return hba_mode_enabled;
-}
-
 /* get physical drive ioaccel handle and queue depth */
 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
 		struct hpsa_scsi_dev_t *dev,
@@ -3615,6 +3677,31 @@
 	atomic_set(&dev->reset_cmds_out, 0);
 }
 
+static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
+	u8 *lunaddrbytes,
+	struct bmic_identify_physical_device *id_phys)
+{
+	if (PHYS_IOACCEL(lunaddrbytes)
+		&& this_device->ioaccel_handle)
+		this_device->hba_ioaccel_enabled = 1;
+
+	memcpy(&this_device->active_path_index,
+		&id_phys->active_path_number,
+		sizeof(this_device->active_path_index));
+	memcpy(&this_device->path_map,
+		&id_phys->redundant_path_present_map,
+		sizeof(this_device->path_map));
+	memcpy(&this_device->box,
+		&id_phys->alternate_paths_phys_box_on_port,
+		sizeof(this_device->box));
+	memcpy(&this_device->phys_connector,
+		&id_phys->alternate_paths_phys_connector,
+		sizeof(this_device->phys_connector));
+	memcpy(&this_device->bay,
+		&id_phys->phys_bay_in_box,
+		sizeof(this_device->bay));
+}
+
 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
 {
 	/* the idea here is we could get notified
@@ -3637,7 +3724,6 @@
 	int ncurrent = 0;
 	int i, n_ext_target_devs, ndevs_to_allocate;
 	int raid_ctlr_position;
-	int rescan_hba_mode;
 	DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
 
 	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
@@ -3653,17 +3739,6 @@
 	}
 	memset(lunzerobits, 0, sizeof(lunzerobits));
 
-	rescan_hba_mode = hpsa_hba_mode_enabled(h);
-	if (rescan_hba_mode < 0)
-		goto out;
-
-	if (!h->hba_mode_enabled && rescan_hba_mode)
-		dev_warn(&h->pdev->dev, "HBA mode enabled\n");
-	else if (h->hba_mode_enabled && !rescan_hba_mode)
-		dev_warn(&h->pdev->dev, "HBA mode disabled\n");
-
-	h->hba_mode_enabled = rescan_hba_mode;
-
 	if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
 			logdev_list, &nlogicals))
 		goto out;
@@ -3739,9 +3814,6 @@
 		/* do not expose masked devices */
 		if (MASKED_DEVICE(lunaddrbytes) &&
 			i < nphysicals + (raid_ctlr_position == 0)) {
-			if (h->hba_mode_enabled)
-				dev_warn(&h->pdev->dev,
-					"Masked physical device detected\n");
 			this_device->expose_state = HPSA_DO_NOT_EXPOSE;
 		} else {
 			this_device->expose_state =
@@ -3761,30 +3833,21 @@
 				ncurrent++;
 			break;
 		case TYPE_DISK:
-			if (i >= nphysicals) {
-				ncurrent++;
-				break;
-			}
-
-			if (h->hba_mode_enabled)
-				/* never use raid mapper in HBA mode */
+			if (i < nphysicals + (raid_ctlr_position == 0)) {
+				/* The disk is in HBA mode. */
+				/* Never use RAID mapper in HBA mode. */
 				this_device->offload_enabled = 0;
-			else if (!(h->transMethod & CFGTBL_Trans_io_accel1 ||
-				h->transMethod & CFGTBL_Trans_io_accel2))
-				break;
-
-			hpsa_get_ioaccel_drive_info(h, this_device,
-						lunaddrbytes, id_phys);
-			atomic_set(&this_device->ioaccel_cmds_out, 0);
+				hpsa_get_ioaccel_drive_info(h, this_device,
+					lunaddrbytes, id_phys);
+				hpsa_get_path_info(this_device, lunaddrbytes,
+							id_phys);
+			}
 			ncurrent++;
 			break;
 		case TYPE_TAPE:
 		case TYPE_MEDIUM_CHANGER:
-			ncurrent++;
-			break;
 		case TYPE_ENCLOSURE:
-			if (h->hba_mode_enabled)
-				ncurrent++;
+			ncurrent++;
 			break;
 		case TYPE_RAID:
 			/* Only present the Smartarray HBA as a RAID controller.
@@ -5104,7 +5167,7 @@
 	int rc;
 	struct ctlr_info *h;
 	struct hpsa_scsi_dev_t *dev;
-	char msg[40];
+	char msg[48];
 
 	/* find the controller to which the command to be aborted was sent */
 	h = sdev_to_hba(scsicmd->device);
@@ -5122,16 +5185,18 @@
 
 	/* if controller locked up, we can guarantee command won't complete */
 	if (lockup_detected(h)) {
-		sprintf(msg, "cmd %d RESET FAILED, lockup detected",
-				hpsa_get_cmd_index(scsicmd));
+		snprintf(msg, sizeof(msg),
+			 "cmd %d RESET FAILED, lockup detected",
+			 hpsa_get_cmd_index(scsicmd));
 		hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
 		return FAILED;
 	}
 
 	/* this reset request might be the result of a lockup; check */
 	if (detect_controller_lockup(h)) {
-		sprintf(msg, "cmd %d RESET FAILED, new lockup detected",
-				hpsa_get_cmd_index(scsicmd));
+		snprintf(msg, sizeof(msg),
+			 "cmd %d RESET FAILED, new lockup detected",
+			 hpsa_get_cmd_index(scsicmd));
 		hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
 		return FAILED;
 	}
@@ -5145,7 +5210,8 @@
 	/* send a reset to the SCSI LUN which the command was sent to */
 	rc = hpsa_do_reset(h, dev, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
 			   DEFAULT_REPLY_QUEUE);
-	sprintf(msg, "reset %s", rc == 0 ? "completed successfully" : "failed");
+	snprintf(msg, sizeof(msg), "reset %s",
+		 rc == 0 ? "completed successfully" : "failed");
 	hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
 	return rc == 0 ? SUCCESS : FAILED;
 }
@@ -7989,7 +8055,6 @@
 
 	pci_set_drvdata(pdev, h);
 	h->ndevices = 0;
-	h->hba_mode_enabled = 0;
 
 	spin_lock_init(&h->devlock);
 	rc = hpsa_put_ctlr_into_performant_mode(h);
@@ -8054,7 +8119,7 @@
 		rc = hpsa_kdump_soft_reset(h);
 		if (rc)
 			/* Neither hard nor soft reset worked, we're hosed. */
-			goto clean9;
+			goto clean7;
 
 		dev_info(&h->pdev->dev, "Board READY.\n");
 		dev_info(&h->pdev->dev,
@@ -8100,8 +8165,6 @@
 				h->heartbeat_sample_interval);
 	return 0;
 
-clean9: /* wq, sh, perf, sg, cmd, irq, shost, pci, lu, aer/h */
-	kfree(h->hba_inquiry_data);
 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
 	hpsa_free_performant_mode(h);
 	h->access.set_intr_mask(h, HPSA_INTR_OFF);
@@ -8209,6 +8272,14 @@
 	destroy_workqueue(h->rescan_ctlr_wq);
 	destroy_workqueue(h->resubmit_wq);
 
+	/*
+	 * Call before disabling interrupts.
+	 * scsi_remove_host can trigger I/O operations especially
+	 * when multipath is enabled. There can be SYNCHRONIZE CACHE
+	 * operations which cannot complete and will hang the system.
+	 */
+	if (h->scsi_host)
+		scsi_remove_host(h->scsi_host);		/* init_one 8 */
 	/* includes hpsa_free_irqs - init_one 4 */
 	/* includes hpsa_disable_interrupt_mode - pci_init 2 */
 	hpsa_shutdown(pdev);
@@ -8217,8 +8288,6 @@
 
 	kfree(h->hba_inquiry_data);			/* init_one 10 */
 	h->hba_inquiry_data = NULL;			/* init_one 10 */
-	if (h->scsi_host)
-		scsi_remove_host(h->scsi_host);		/* init_one 8 */
 	hpsa_free_ioaccel2_sg_chain_blocks(h);
 	hpsa_free_performant_mode(h);			/* init_one 7 */
 	hpsa_free_sg_chain_blocks(h);			/* init_one 6 */
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6ee4da6..27debb3 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -1,6 +1,7 @@
 /*
  *    Disk Array driver for HP Smart Array SAS controllers
- *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
+ *    Copyright 2014-2015 PMC-Sierra, Inc.
+ *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
  *
  *    This program is free software; you can redistribute it and/or modify
  *    it under the terms of the GNU General Public License as published by
@@ -11,11 +12,7 @@
  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
  *
- *    You should have received a copy of the GNU General Public License
- *    along with this program; if not, write to the Free Software
- *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *    Questions/Comments/Bugfixes to storagedev@pmcs.com
  *
  */
 #ifndef HPSA_H
@@ -53,6 +50,11 @@
 					 * device via "ioaccel" path.
 					 */
 	u32 ioaccel_handle;
+	u8 active_path_index;
+	u8 path_map;
+	u8 bay;
+	u8 box[8];
+	u16 phys_connector[8];
 	int offload_config;		/* I/O accel RAID offload configured */
 	int offload_enabled;		/* I/O accel RAID offload enabled */
 	int offload_to_be_enabled;
@@ -114,7 +116,6 @@
 	u8   automatic_drive_slamming;
 	u8   reserved1;
 	u8   nvram_flags;
-#define HBA_MODE_ENABLED_FLAG (1 << 3)
 	u8   cache_nvram_flags;
 	u8   drive_config_flags;
 	u16  reserved2;
@@ -153,7 +154,6 @@
 	unsigned int msi_vector;
 	int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
 	struct access_method access;
-	char hba_mode_enabled;
 
 	/* queue and queue Info */
 	unsigned int Qdepth;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index c601622..47c756b 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -1,6 +1,7 @@
 /*
  *    Disk Array driver for HP Smart Array SAS controllers
- *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
+ *    Copyright 2014-2015 PMC-Sierra, Inc.
+ *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
  *
  *    This program is free software; you can redistribute it and/or modify
  *    it under the terms of the GNU General Public License as published by
@@ -11,11 +12,7 @@
  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
  *
- *    You should have received a copy of the GNU General Public License
- *    along with this program; if not, write to the Free Software
- *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *    Questions/Comments/Bugfixes to storagedev@pmcs.com
  *
  */
 #ifndef HPSA_CMD_H
@@ -167,6 +164,7 @@
 /* Logical volume states */
 #define HPSA_VPD_LV_STATUS_UNSUPPORTED			0xff
 #define HPSA_LV_OK                                      0x0
+#define HPSA_LV_NOT_AVAILABLE				0x0b
 #define HPSA_LV_UNDERGOING_ERASE			0x0F
 #define HPSA_LV_UNDERGOING_RPI				0x12
 #define HPSA_LV_PENDING_RPI				0x13
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index e995218..a83f705 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1,6 +1,6 @@
 /*
  * HighPoint RR3xxx/4xxx controller driver for Linux
- * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
+ * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -42,7 +42,7 @@
 
 static char driver_name[] = "hptiop";
 static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
-static const char driver_ver[] = "v1.8";
+static const char driver_ver[] = "v1.10.0";
 
 static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
 static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
@@ -764,9 +764,7 @@
 		scsi_set_resid(scp,
 			scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
 		scp->result = SAM_STAT_CHECK_CONDITION;
-		memcpy(scp->sense_buffer, &req->sg_list,
-				min_t(size_t, SCSI_SENSE_BUFFERSIZE,
-					le32_to_cpu(req->dataxfer_length)));
+		memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE);
 		goto skip_resid;
 		break;
 
@@ -1037,8 +1035,9 @@
 
 	scp->result = 0;
 
-	if (scp->device->channel || scp->device->lun ||
-			scp->device->id > hba->max_devices) {
+	if (scp->device->channel ||
+			(scp->device->id > hba->max_devices) ||
+			((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) {
 		scp->result = DID_BAD_TARGET << 16;
 		free_req(hba, _req);
 		goto cmd_done;
@@ -1168,6 +1167,14 @@
 	NULL
 };
 
+static int hptiop_slave_config(struct scsi_device *sdev)
+{
+	if (sdev->type == TYPE_TAPE)
+		blk_queue_max_hw_sectors(sdev->request_queue, 8192);
+
+	return 0;
+}
+
 static struct scsi_host_template driver_template = {
 	.module                     = THIS_MODULE,
 	.name                       = driver_name,
@@ -1179,6 +1186,7 @@
 	.use_clustering             = ENABLE_CLUSTERING,
 	.proc_name                  = driver_name,
 	.shost_attrs                = hptiop_attrs,
+	.slave_configure            = hptiop_slave_config,
 	.this_id                    = -1,
 	.change_queue_depth         = hptiop_adjust_disk_queue_depth,
 };
@@ -1323,6 +1331,7 @@
 	}
 
 	hba = (struct hptiop_hba *)host->hostdata;
+	memset(hba, 0, sizeof(struct hptiop_hba));
 
 	hba->ops = iop_ops;
 	hba->pcidev = pcidev;
@@ -1336,7 +1345,7 @@
 	init_waitqueue_head(&hba->reset_wq);
 	init_waitqueue_head(&hba->ioctl_wq);
 
-	host->max_lun = 1;
+	host->max_lun = 128;
 	host->max_channel = 0;
 	host->io_port = 0;
 	host->n_io_port = 0;
@@ -1428,34 +1437,33 @@
 	dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
 
 	hba->req_size = req_size;
-	start_virt = dma_alloc_coherent(&pcidev->dev,
-				hba->req_size*hba->max_requests + 0x20,
-				&start_phy, GFP_KERNEL);
-
-	if (!start_virt) {
-		printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
-					hba->host->host_no);
-		goto free_request_irq;
-	}
-
-	hba->dma_coherent = start_virt;
-	hba->dma_coherent_handle = start_phy;
-
-	if ((start_phy & 0x1f) != 0) {
-		offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
-		start_phy += offset;
-		start_virt += offset;
-	}
-
 	hba->req_list = NULL;
+
 	for (i = 0; i < hba->max_requests; i++) {
+		start_virt = dma_alloc_coherent(&pcidev->dev,
+					hba->req_size + 0x20,
+					&start_phy, GFP_KERNEL);
+
+		if (!start_virt) {
+			printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
+						hba->host->host_no);
+			goto free_request_mem;
+		}
+
+		hba->dma_coherent[i] = start_virt;
+		hba->dma_coherent_handle[i] = start_phy;
+
+		if ((start_phy & 0x1f) != 0) {
+			offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
+			start_phy += offset;
+			start_virt += offset;
+		}
+
 		hba->reqs[i].next = NULL;
 		hba->reqs[i].req_virt = start_virt;
 		hba->reqs[i].req_shifted_phy = start_phy >> 5;
 		hba->reqs[i].index = i;
 		free_req(hba, &hba->reqs[i]);
-		start_virt = (char *)start_virt + hba->req_size;
-		start_phy = start_phy + hba->req_size;
 	}
 
 	/* Enable Interrupt and start background task */
@@ -1474,11 +1482,16 @@
 	return 0;
 
 free_request_mem:
-	dma_free_coherent(&hba->pcidev->dev,
-			hba->req_size * hba->max_requests + 0x20,
-			hba->dma_coherent, hba->dma_coherent_handle);
+	for (i = 0; i < hba->max_requests; i++) {
+		if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
+			dma_free_coherent(&hba->pcidev->dev,
+					hba->req_size + 0x20,
+					hba->dma_coherent[i],
+					hba->dma_coherent_handle[i]);
+		else
+			break;
+	}
 
-free_request_irq:
 	free_irq(hba->pcidev->irq, hba);
 
 unmap_pci_bar:
@@ -1546,6 +1559,7 @@
 {
 	struct Scsi_Host *host = pci_get_drvdata(pcidev);
 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
+	u32 i;
 
 	dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
 
@@ -1555,10 +1569,15 @@
 
 	free_irq(hba->pcidev->irq, hba);
 
-	dma_free_coherent(&hba->pcidev->dev,
-			hba->req_size * hba->max_requests + 0x20,
-			hba->dma_coherent,
-			hba->dma_coherent_handle);
+	for (i = 0; i < hba->max_requests; i++) {
+		if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
+			dma_free_coherent(&hba->pcidev->dev,
+					hba->req_size + 0x20,
+					hba->dma_coherent[i],
+					hba->dma_coherent_handle[i]);
+		else
+			break;
+	}
 
 	hba->ops->internal_memfree(hba);
 
@@ -1653,6 +1672,14 @@
 	{ PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
 	{ PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops },
 	{ PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops },
+	{ PCI_VDEVICE(TTI, 0x3610), (kernel_ulong_t)&hptiop_mvfrey_ops },
+	{ PCI_VDEVICE(TTI, 0x3611), (kernel_ulong_t)&hptiop_mvfrey_ops },
+	{ PCI_VDEVICE(TTI, 0x3620), (kernel_ulong_t)&hptiop_mvfrey_ops },
+	{ PCI_VDEVICE(TTI, 0x3622), (kernel_ulong_t)&hptiop_mvfrey_ops },
+	{ PCI_VDEVICE(TTI, 0x3640), (kernel_ulong_t)&hptiop_mvfrey_ops },
+	{ PCI_VDEVICE(TTI, 0x3660), (kernel_ulong_t)&hptiop_mvfrey_ops },
+	{ PCI_VDEVICE(TTI, 0x3680), (kernel_ulong_t)&hptiop_mvfrey_ops },
+	{ PCI_VDEVICE(TTI, 0x3690), (kernel_ulong_t)&hptiop_mvfrey_ops },
 	{},
 };
 
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index 020619d..4d1c511 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -1,6 +1,6 @@
 /*
  * HighPoint RR3xxx/4xxx controller driver for Linux
- * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
+ * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -327,8 +327,8 @@
 	struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
 
 	/* used to free allocated dma area */
-	void        *dma_coherent;
-	dma_addr_t  dma_coherent_handle;
+	void        *dma_coherent[HPTIOP_MAX_REQUESTS];
+	dma_addr_t  dma_coherent_handle[HPTIOP_MAX_REQUESTS];
 
 	atomic_t    reset_count;
 	atomic_t    resetting;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index a9aa389..3411919 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1165,7 +1165,8 @@
 
 	if (ioa_cfg->sis64) {
 		proto = cfgtew->u.cfgte64->proto;
-		res->res_flags = cfgtew->u.cfgte64->res_flags;
+		res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
+		res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
 		res->qmodel = IPR_QUEUEING_MODEL64(res);
 		res->type = cfgtew->u.cfgte64->res_type;
 
@@ -1313,8 +1314,8 @@
 	int new_path = 0;
 
 	if (res->ioa_cfg->sis64) {
-		res->flags = cfgtew->u.cfgte64->flags;
-		res->res_flags = cfgtew->u.cfgte64->res_flags;
+		res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
+		res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
 		res->type = cfgtew->u.cfgte64->res_type;
 
 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
@@ -1900,7 +1901,7 @@
  * Return value:
  * 	none
  **/
-static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
+static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
 {
 	int i;
 
@@ -2270,7 +2271,7 @@
 			((unsigned long)fabric + be16_to_cpu(fabric->length));
 	}
 
-	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
+	ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
 }
 
 /**
@@ -2364,7 +2365,7 @@
 			((unsigned long)fabric + be16_to_cpu(fabric->length));
 	}
 
-	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
+	ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
 }
 
 /**
@@ -4455,7 +4456,7 @@
 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 	res = (struct ipr_resource_entry *)sdev->hostdata;
 	if (res && ioa_cfg->sis64)
-		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
+		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
 	else if (res)
 		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
 
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 6b97ee4..e4fb17a 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -39,8 +39,8 @@
 /*
  * Literals
  */
-#define IPR_DRIVER_VERSION "2.6.1"
-#define IPR_DRIVER_DATE "(March 12, 2015)"
+#define IPR_DRIVER_VERSION "2.6.2"
+#define IPR_DRIVER_DATE "(June 11, 2015)"
 
 /*
  * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -1005,13 +1005,13 @@
 struct ipr_hostrcb_type_07_error {
 	u8 failure_reason[64];
 	struct ipr_vpd vpd;
-	u32 data[222];
+	__be32 data[222];
 }__attribute__((packed, aligned (4)));
 
 struct ipr_hostrcb_type_17_error {
 	u8 failure_reason[64];
 	struct ipr_ext_vpd vpd;
-	u32 data[476];
+	__be32 data[476];
 }__attribute__((packed, aligned (4)));
 
 struct ipr_hostrcb_config_element {
@@ -1289,18 +1289,17 @@
 	(((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
 
 	u8 ata_class;
-
-	u8 flags;
-	__be16 res_flags;
-
 	u8 type;
 
+	u16 flags;
+	u16 res_flags;
+
 	u8 qmodel;
 	struct ipr_std_inq_data std_inq_data;
 
 	__be32 res_handle;
 	__be64 dev_id;
-	__be64 lun_wwn;
+	u64 lun_wwn;
 	struct scsi_lun dev_lun;
 	u8 res_path[8];
 
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 2d5909c..5121272 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -191,7 +191,7 @@
 }
 
 /**
- * fc_fcp_pkt_destory() - Release hold on a fcp_pkt
+ * fc_fcp_pkt_destroy() - Release hold on a fcp_pkt
  * @seq: The sequence that the FCP packet is on (required by destructor API)
  * @fsp: The FCP packet to be released
  *
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index ce96d5b..759cbeb 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -701,7 +701,7 @@
 								HA_RXMASK));
 			}
 		}
-		if ((phba->sli_rev == LPFC_SLI_REV4) &
+		if ((phba->sli_rev == LPFC_SLI_REV4) &&
 				 (!list_empty(&pring->txq)))
 			lpfc_drain_txq(phba);
 		/*
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index bc7b34c..9d05302 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -268,8 +268,8 @@
 		raw_mbox[2] = NC_SUBOP_PRODUCT_INFO;	/* i.e. 0x0E */
 
 		if ((retval = issue_scb_block(adapter, raw_mbox)))
-			printk(KERN_WARNING
-			"megaraid: Product_info cmd failed with error: %d\n",
+			dev_warn(&adapter->dev->dev,
+				"Product_info cmd failed with error: %d\n",
 				retval);
 
 		pci_unmap_single(adapter->dev, prod_info_dma_handle,
@@ -334,7 +334,7 @@
 		adapter->bios_version[4] = 0;
 	}
 
-	printk(KERN_NOTICE "megaraid: [%s:%s] detected %d logical drives.\n",
+	dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n",
 		adapter->fw_version, adapter->bios_version, adapter->numldrv);
 
 	/*
@@ -342,7 +342,7 @@
 	 */
 	adapter->support_ext_cdb = mega_support_ext_cdb(adapter);
 	if (adapter->support_ext_cdb)
-		printk(KERN_NOTICE "megaraid: supports extended CDBs.\n");
+		dev_notice(&adapter->dev->dev, "supports extended CDBs\n");
 
 
 	return 0;
@@ -678,11 +678,11 @@
 
 			if(!(adapter->flag & (1L << cmd->device->channel))) {
 
-				printk(KERN_NOTICE
-					"scsi%d: scanning scsi channel %d ",
+				dev_notice(&adapter->dev->dev,
+					"scsi%d: scanning scsi channel %d "
+					"for logical drives\n",
 						adapter->host->host_no,
 						cmd->device->channel);
-				printk("for logical drives.\n");
 
 				adapter->flag |= (1L << cmd->device->channel);
 			}
@@ -983,11 +983,11 @@
 	case READ_CAPACITY:
 		if(!(adapter->flag & (1L << cmd->device->channel))) {
 
-			printk(KERN_NOTICE
-				"scsi%d: scanning scsi channel %d [P%d] ",
+			dev_notice(&adapter->dev->dev,
+				"scsi%d: scanning scsi channel %d [P%d] "
+				"for physical devices\n",
 					adapter->host->host_no,
 					cmd->device->channel, channel);
-			printk("for physical devices.\n");
 
 			adapter->flag |= (1L << cmd->device->channel);
 		}
@@ -1045,11 +1045,11 @@
 	case READ_CAPACITY:
 		if(!(adapter->flag & (1L << cmd->device->channel))) {
 
-			printk(KERN_NOTICE
-				"scsi%d: scanning scsi channel %d [P%d] ",
+			dev_notice(&adapter->dev->dev,
+				"scsi%d: scanning scsi channel %d [P%d] "
+				"for physical devices\n",
 					adapter->host->host_no,
 					cmd->device->channel, channel);
-			printk("for physical devices.\n");
 
 			adapter->flag |= (1L << cmd->device->channel);
 		}
@@ -1241,7 +1241,7 @@
 	return mbox->m_in.status;
 
 bug_blocked_mailbox:
-	printk(KERN_WARNING "megaraid: Blocked mailbox......!!\n");
+	dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n");
 	udelay (1000);
 	return -1;
 }
@@ -1454,9 +1454,8 @@
 			 * Make sure f/w has completed a valid command
 			 */
 			if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) {
-				printk(KERN_CRIT
-					"megaraid: invalid command ");
-				printk("Id %d, scb->state:%x, scsi cmd:%p\n",
+				dev_crit(&adapter->dev->dev, "invalid command "
+					"Id %d, scb->state:%x, scsi cmd:%p\n",
 					cmdid, scb->state, scb->cmd);
 
 				continue;
@@ -1467,8 +1466,8 @@
 			 */
 			if( scb->state & SCB_ABORT ) {
 
-				printk(KERN_WARNING
-				"megaraid: aborted cmd [%x] complete.\n",
+				dev_warn(&adapter->dev->dev,
+					"aborted cmd [%x] complete\n",
 					scb->idx);
 
 				scb->cmd->result = (DID_ABORT << 16);
@@ -1486,8 +1485,8 @@
 			 */
 			if( scb->state & SCB_RESET ) {
 
-				printk(KERN_WARNING
-				"megaraid: reset cmd [%x] complete.\n",
+				dev_warn(&adapter->dev->dev,
+					"reset cmd [%x] complete\n",
 					scb->idx);
 
 				scb->cmd->result = (DID_RESET << 16);
@@ -1553,8 +1552,7 @@
 			if( sg_page(sgl) ) {
 				c = *(unsigned char *) sg_virt(&sgl[0]);
 			} else {
-				printk(KERN_WARNING
-				       "megaraid: invalid sg.\n");
+				dev_warn(&adapter->dev->dev, "invalid sg\n");
 				c = 0;
 			}
 
@@ -1902,11 +1900,10 @@
 	mc.opcode = MEGA_RESET_RESERVATIONS;
 
 	if( mega_internal_command(adapter, &mc, NULL) != 0 ) {
-		printk(KERN_WARNING
-				"megaraid: reservation reset failed.\n");
+		dev_warn(&adapter->dev->dev, "reservation reset failed\n");
 	}
 	else {
-		printk(KERN_INFO "megaraid: reservation reset.\n");
+		dev_info(&adapter->dev->dev, "reservation reset\n");
 	}
 #endif
 
@@ -1939,7 +1936,7 @@
 	struct list_head	*pos, *next;
 	scb_t			*scb;
 
-	printk(KERN_WARNING "megaraid: %s cmd=%x <c=%d t=%d l=%d>\n",
+	dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n",
 	     (aor == SCB_ABORT)? "ABORTING":"RESET",
 	     cmd->cmnd[0], cmd->device->channel,
 	     cmd->device->id, (u32)cmd->device->lun);
@@ -1963,8 +1960,8 @@
 			 */
 			if( scb->state & SCB_ISSUED ) {
 
-				printk(KERN_WARNING
-					"megaraid: %s[%x], fw owner.\n",
+				dev_warn(&adapter->dev->dev,
+					"%s[%x], fw owner\n",
 					(aor==SCB_ABORT) ? "ABORTING":"RESET",
 					scb->idx);
 
@@ -1976,8 +1973,8 @@
 				 * Not yet issued! Remove from the pending
 				 * list
 				 */
-				printk(KERN_WARNING
-					"megaraid: %s-[%x], driver owner.\n",
+				dev_warn(&adapter->dev->dev,
+					"%s-[%x], driver owner\n",
 					(aor==SCB_ABORT) ? "ABORTING":"RESET",
 					scb->idx);
 
@@ -2197,7 +2194,7 @@
 
 	if( mega_adapinq(adapter, dma_handle) != 0 ) {
 		seq_puts(m, "Adapter inquiry failed.\n");
-		printk(KERN_WARNING "megaraid: inquiry failed.\n");
+		dev_warn(&adapter->dev->dev, "inquiry failed\n");
 		goto free_inquiry;
 	}
 
@@ -2241,7 +2238,7 @@
 
 	if( mega_adapinq(adapter, dma_handle) != 0 ) {
 		seq_puts(m, "Adapter inquiry failed.\n");
-		printk(KERN_WARNING "megaraid: inquiry failed.\n");
+		dev_warn(&adapter->dev->dev, "inquiry failed\n");
 		goto free_inquiry;
 	}
 
@@ -2350,7 +2347,7 @@
 
 	if( mega_adapinq(adapter, dma_handle) != 0 ) {
 		seq_puts(m, "Adapter inquiry failed.\n");
-		printk(KERN_WARNING "megaraid: inquiry failed.\n");
+		dev_warn(&adapter->dev->dev, "inquiry failed\n");
 		goto free_inquiry;
 	}
 
@@ -2525,7 +2522,7 @@
 
 	if( mega_adapinq(adapter, dma_handle) != 0 ) {
 		seq_puts(m, "Adapter inquiry failed.\n");
-		printk(KERN_WARNING "megaraid: inquiry failed.\n");
+		dev_warn(&adapter->dev->dev, "inquiry failed\n");
 		goto free_inquiry;
 	}
 
@@ -2799,7 +2796,7 @@
 	dir = adapter->controller_proc_dir_entry =
 		proc_mkdir_data(string, 0, parent, adapter);
 	if(!dir) {
-		printk(KERN_WARNING "\nmegaraid: proc_mkdir failed\n");
+		dev_warn(&adapter->dev->dev, "proc_mkdir failed\n");
 		return;
 	}
 
@@ -2807,7 +2804,7 @@
 		de = proc_create_data(f->name, S_IRUSR, dir, &mega_proc_fops,
 				      f->show);
 		if (!de) {
-			printk(KERN_WARNING "\nmegaraid: proc_create failed\n");
+			dev_warn(&adapter->dev->dev, "proc_create failed\n");
 			return;
 		}
 
@@ -2874,9 +2871,9 @@
 				return rval;
 		}
 
-		printk(KERN_INFO
-		"megaraid: invalid partition on this disk on channel %d\n",
-				sdev->channel);
+		dev_info(&adapter->dev->dev,
+			 "invalid partition on this disk on channel %d\n",
+			 sdev->channel);
 
 		/* Default heads (64) & sectors (32) */
 		heads = 64;
@@ -2936,7 +2933,7 @@
 		scb->sgl = (mega_sglist *)scb->sgl64;
 
 		if( !scb->sgl ) {
-			printk(KERN_WARNING "RAID: Can't allocate sglist.\n");
+			dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n");
 			mega_free_sgl(adapter);
 			return -1;
 		}
@@ -2946,7 +2943,7 @@
 				&scb->pthru_dma_addr);
 
 		if( !scb->pthru ) {
-			printk(KERN_WARNING "RAID: Can't allocate passthru.\n");
+			dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
 			mega_free_sgl(adapter);
 			return -1;
 		}
@@ -2956,8 +2953,8 @@
 				&scb->epthru_dma_addr);
 
 		if( !scb->epthru ) {
-			printk(KERN_WARNING
-				"Can't allocate extended passthru.\n");
+			dev_warn(&adapter->dev->dev,
+				"Can't allocate extended passthru\n");
 			mega_free_sgl(adapter);
 			return -1;
 		}
@@ -3154,8 +3151,8 @@
 			 * Do we support this feature
 			 */
 			if( !adapter->support_random_del ) {
-				printk(KERN_WARNING "megaraid: logdrv ");
-				printk("delete on non-supporting F/W.\n");
+				dev_warn(&adapter->dev->dev, "logdrv "
+					"delete on non-supporting F/W\n");
 
 				return (-EINVAL);
 			}
@@ -3179,7 +3176,7 @@
 		if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 ||
 			uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) {
 
-			printk(KERN_WARNING "megaraid: rejected passthru.\n");
+			dev_warn(&adapter->dev->dev, "rejected passthru\n");
 
 			return (-EINVAL);
 		}
@@ -3683,11 +3680,11 @@
 
 	for( i = 0; i < adapter->product_info.nchannels; i++ ) { 
 		if( (adapter->mega_ch_class >> i) & 0x01 ) {
-			printk(KERN_INFO "megaraid: channel[%d] is raid.\n",
+			dev_info(&adapter->dev->dev, "channel[%d] is raid\n",
 					i);
 		}
 		else {
-			printk(KERN_INFO "megaraid: channel[%d] is scsi.\n",
+			dev_info(&adapter->dev->dev, "channel[%d] is scsi\n",
 					i);
 		}
 	}
@@ -3893,7 +3890,7 @@
 
 	/* log this event */
 	if(rval) {
-		printk(KERN_WARNING "megaraid: Delete LD-%d failed.", logdrv);
+		dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv);
 		return rval;
 	}
 
@@ -4161,7 +4158,7 @@
 	 * this information.
 	 */
 	if (rval && trace_level) {
-		printk("megaraid: cmd [%x, %x, %x] status:[%x]\n",
+		dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n",
 			mc->cmd, mc->opcode, mc->subopcode, rval);
 	}
 
@@ -4244,11 +4241,8 @@
 	subsysvid = pdev->subsystem_vendor;
 	subsysid = pdev->subsystem_device;
 
-	printk(KERN_NOTICE "megaraid: found 0x%4.04x:0x%4.04x:bus %d:",
-		id->vendor, id->device, pci_bus);
-
-	printk("slot %d:func %d\n",
-		PCI_SLOT(pci_dev_func), PCI_FUNC(pci_dev_func));
+	dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n",
+		id->vendor, id->device);
 
 	/* Read the base port and IRQ from PCI */
 	mega_baseport = pci_resource_start(pdev, 0);
@@ -4259,14 +4253,13 @@
 		flag |= BOARD_MEMMAP;
 
 		if (!request_mem_region(mega_baseport, 128, "megaraid")) {
-			printk(KERN_WARNING "megaraid: mem region busy!\n");
+			dev_warn(&pdev->dev, "mem region busy!\n");
 			goto out_disable_device;
 		}
 
 		mega_baseport = (unsigned long)ioremap(mega_baseport, 128);
 		if (!mega_baseport) {
-			printk(KERN_WARNING
-			       "megaraid: could not map hba memory\n");
+			dev_warn(&pdev->dev, "could not map hba memory\n");
 			goto out_release_region;
 		}
 	} else {
@@ -4285,7 +4278,7 @@
 	adapter = (adapter_t *)host->hostdata;
 	memset(adapter, 0, sizeof(adapter_t));
 
-	printk(KERN_NOTICE
+	dev_notice(&pdev->dev,
 		"scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n",
 		host->host_no, mega_baseport, irq);
 
@@ -4323,21 +4316,20 @@
 	adapter->mega_buffer = pci_alloc_consistent(adapter->dev,
 		MEGA_BUFFER_SIZE, &adapter->buf_dma_handle);
 	if (!adapter->mega_buffer) {
-		printk(KERN_WARNING "megaraid: out of RAM.\n");
+		dev_warn(&pdev->dev, "out of RAM\n");
 		goto out_host_put;
 	}
 
 	adapter->scb_list = kmalloc(sizeof(scb_t) * MAX_COMMANDS, GFP_KERNEL);
 	if (!adapter->scb_list) {
-		printk(KERN_WARNING "megaraid: out of RAM.\n");
+		dev_warn(&pdev->dev, "out of RAM\n");
 		goto out_free_cmd_buffer;
 	}
 
 	if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ?
 				megaraid_isr_memmapped : megaraid_isr_iomapped,
 					IRQF_SHARED, "megaraid", adapter)) {
-		printk(KERN_WARNING
-			"megaraid: Couldn't register IRQ %d!\n", irq);
+		dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq);
 		goto out_free_scb_list;
 	}
 
@@ -4357,9 +4349,9 @@
 		if (!strcmp(adapter->fw_version, "3.00") ||
 				!strcmp(adapter->fw_version, "3.01")) {
 
-			printk( KERN_WARNING
-				"megaraid: Your  card is a Dell PERC "
-				"2/SC RAID controller with  "
+			dev_warn(&pdev->dev,
+				"Your card is a Dell PERC "
+				"2/SC RAID controller with "
 				"firmware\nmegaraid: 3.00 or 3.01.  "
 				"This driver is known to have "
 				"corruption issues\nmegaraid: with "
@@ -4390,12 +4382,12 @@
 		if (!strcmp(adapter->fw_version, "H01.07") ||
 		    !strcmp(adapter->fw_version, "H01.08") ||
 		    !strcmp(adapter->fw_version, "H01.09") ) {
-			printk(KERN_WARNING
-				"megaraid: Firmware H.01.07, "
+			dev_warn(&pdev->dev,
+				"Firmware H.01.07, "
 				"H.01.08, and H.01.09 on 1M/2M "
 				"controllers\n"
-				"megaraid: do not support 64 bit "
-				"addressing.\nmegaraid: DISABLING "
+				"do not support 64 bit "
+				"addressing.\nDISABLING "
 				"64 bit support.\n");
 			adapter->flag &= ~BOARD_64BIT;
 		}
@@ -4503,8 +4495,8 @@
 	 */
 	adapter->has_cluster = mega_support_cluster(adapter);
 	if (adapter->has_cluster) {
-		printk(KERN_NOTICE
-			"megaraid: Cluster driver, initiator id:%d\n",
+		dev_notice(&pdev->dev,
+			"Cluster driver, initiator id:%d\n",
 			adapter->this_id);
 	}
 #endif
@@ -4571,7 +4563,7 @@
 	issue_scb_block(adapter, raw_mbox);
 	
 	if (atomic_read(&adapter->pend_cmds) > 0)
-		printk(KERN_WARNING "megaraid: pending commands!!\n");
+		dev_warn(&adapter->dev->dev, "pending commands!!\n");
 
 	/*
 	 * Have a delibrate delay to make sure all the caches are
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 71b884d..eaa81e5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -216,7 +216,7 @@
 				 struct megasas_cmd, list);
 		list_del_init(&cmd->list);
 	} else {
-		printk(KERN_ERR "megasas: Command pool empty!\n");
+		dev_err(&instance->pdev->dev, "Command pool empty!\n");
 	}
 
 	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
@@ -273,6 +273,7 @@
 megasas_enable_intr_xscale(struct megasas_instance *instance)
 {
 	struct megasas_register_set __iomem *regs;
+
 	regs = instance->reg_set;
 	writel(0, &(regs)->outbound_intr_mask);
 
@@ -289,6 +290,7 @@
 {
 	struct megasas_register_set __iomem *regs;
 	u32 mask = 0x1f;
+
 	regs = instance->reg_set;
 	writel(mask, &regs->outbound_intr_mask);
 	/* Dummy readl to force pci flush */
@@ -313,6 +315,7 @@
 {
 	u32 status;
 	u32 mfiStatus = 0;
+
 	/*
 	 * Check if it is our interrupt
 	 */
@@ -348,6 +351,7 @@
 		struct megasas_register_set __iomem *regs)
 {
 	unsigned long flags;
+
 	spin_lock_irqsave(&instance->hba_lock, flags);
 	writel((frame_phys_addr >> 3)|(frame_count),
 	       &(regs)->inbound_queue_port);
@@ -364,15 +368,16 @@
 {
 	u32 i;
 	u32 pcidata;
+
 	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
 
 	for (i = 0; i < 3; i++)
 		msleep(1000); /* sleep for 3 secs */
 	pcidata  = 0;
 	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
-	printk(KERN_NOTICE "pcidata = %x\n", pcidata);
+	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
 	if (pcidata & 0x2) {
-		printk(KERN_NOTICE "mfi 1068 offset read=%x\n", pcidata);
+		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
 		pcidata &= ~0x2;
 		pci_write_config_dword(instance->pdev,
 				MFI_1068_PCSR_OFFSET, pcidata);
@@ -383,9 +388,9 @@
 		pcidata  = 0;
 		pci_read_config_dword(instance->pdev,
 				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
-		printk(KERN_NOTICE "1068 offset handshake read=%x\n", pcidata);
+		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
 		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
-			printk(KERN_NOTICE "1068 offset pcidt=%x\n", pcidata);
+			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
 			pcidata = 0;
 			pci_write_config_dword(instance->pdev,
 				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
@@ -402,7 +407,6 @@
 megasas_check_reset_xscale(struct megasas_instance *instance,
 		struct megasas_register_set __iomem *regs)
 {
-
 	if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
 	    (le32_to_cpu(*instance->consumer) ==
 		MEGASAS_ADPRESET_INPROG_SIGN))
@@ -433,7 +437,7 @@
 
 /**
 *	The following functions are defined for ppc (deviceid : 0x60)
-* 	controllers
+*	controllers
 */
 
 /**
@@ -444,6 +448,7 @@
 megasas_enable_intr_ppc(struct megasas_instance *instance)
 {
 	struct megasas_register_set __iomem *regs;
+
 	regs = instance->reg_set;
 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
 
@@ -462,6 +467,7 @@
 {
 	struct megasas_register_set __iomem *regs;
 	u32 mask = 0xFFFFFFFF;
+
 	regs = instance->reg_set;
 	writel(mask, &regs->outbound_intr_mask);
 	/* Dummy readl to force pci flush */
@@ -522,6 +528,7 @@
 		struct megasas_register_set __iomem *regs)
 {
 	unsigned long flags;
+
 	spin_lock_irqsave(&instance->hba_lock, flags);
 	writel((frame_phys_addr | (frame_count<<1))|1,
 			&(regs)->inbound_queue_port);
@@ -566,6 +573,7 @@
 megasas_enable_intr_skinny(struct megasas_instance *instance)
 {
 	struct megasas_register_set __iomem *regs;
+
 	regs = instance->reg_set;
 	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
 
@@ -584,6 +592,7 @@
 {
 	struct megasas_register_set __iomem *regs;
 	u32 mask = 0xFFFFFFFF;
+
 	regs = instance->reg_set;
 	writel(mask, &regs->outbound_intr_mask);
 	/* Dummy readl to force pci flush */
@@ -634,8 +643,8 @@
 	writel(status, &regs->outbound_intr_status);
 
 	/*
-	* dummy read to flush PCI
-	*/
+	 * dummy read to flush PCI
+	 */
 	readl(&regs->outbound_intr_status);
 
 	return mfiStatus;
@@ -654,6 +663,7 @@
 			struct megasas_register_set __iomem *regs)
 {
 	unsigned long flags;
+
 	spin_lock_irqsave(&instance->hba_lock, flags);
 	writel(upper_32_bits(frame_phys_addr),
 	       &(regs)->inbound_high_queue_port);
@@ -706,6 +716,7 @@
 megasas_enable_intr_gen2(struct megasas_instance *instance)
 {
 	struct megasas_register_set __iomem *regs;
+
 	regs = instance->reg_set;
 	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
 
@@ -725,6 +736,7 @@
 {
 	struct megasas_register_set __iomem *regs;
 	u32 mask = 0xFFFFFFFF;
+
 	regs = instance->reg_set;
 	writel(mask, &regs->outbound_intr_mask);
 	/* Dummy readl to force pci flush */
@@ -750,6 +762,7 @@
 {
 	u32 status;
 	u32 mfiStatus = 0;
+
 	/*
 	 * Check if it is our interrupt
 	 */
@@ -786,6 +799,7 @@
 			struct megasas_register_set __iomem *regs)
 {
 	unsigned long flags;
+
 	spin_lock_irqsave(&instance->hba_lock, flags);
 	writel((frame_phys_addr | (frame_count<<1))|1,
 			&(regs)->inbound_queue_port);
@@ -800,10 +814,10 @@
 megasas_adp_reset_gen2(struct megasas_instance *instance,
 			struct megasas_register_set __iomem *reg_set)
 {
-	u32			retry = 0 ;
-	u32			HostDiag;
-	u32 __iomem		*seq_offset = &reg_set->seq_offset;
-	u32 __iomem		*hostdiag_offset = &reg_set->host_diag;
+	u32 retry = 0 ;
+	u32 HostDiag;
+	u32 __iomem *seq_offset = &reg_set->seq_offset;
+	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
 
 	if (instance->instancet == &megasas_instance_template_skinny) {
 		seq_offset = &reg_set->fusion_seq_offset;
@@ -821,10 +835,10 @@
 
 	HostDiag = (u32)readl(hostdiag_offset);
 
-	while ( !( HostDiag & DIAG_WRITE_ENABLE) ) {
+	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
 		msleep(100);
 		HostDiag = (u32)readl(hostdiag_offset);
-		printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n",
+		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
 					retry, HostDiag);
 
 		if (retry++ >= 100)
@@ -832,17 +846,17 @@
 
 	}
 
-	printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
+	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
 
 	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
 
 	ssleep(10);
 
 	HostDiag = (u32)readl(hostdiag_offset);
-	while ( ( HostDiag & DIAG_RESET_ADAPTER) ) {
+	while (HostDiag & DIAG_RESET_ADAPTER) {
 		msleep(100);
 		HostDiag = (u32)readl(hostdiag_offset);
-		printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n",
+		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
 				retry, HostDiag);
 
 		if (retry++ >= 1000)
@@ -904,7 +918,6 @@
 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
 {
 	int seconds;
-
 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
 
 	frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
@@ -940,6 +953,7 @@
 			  struct megasas_cmd *cmd, int timeout)
 {
 	int ret = 0;
+
 	cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
 
 	instance->instancet->issue_dcmd(instance, cmd);
@@ -1120,7 +1134,7 @@
 	int num_cnt;
 	int sge_bytes;
 	u32 sge_sz;
-	u32 frame_count=0;
+	u32 frame_count = 0;
 
 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
 	    sizeof(struct megasas_sge32);
@@ -1151,14 +1165,14 @@
 			num_cnt = sge_count - 3;
 	}
 
-	if(num_cnt>0){
+	if (num_cnt > 0) {
 		sge_bytes = sge_sz * num_cnt;
 
 		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
 		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
 	}
 	/* Main frame */
-	frame_count +=1;
+	frame_count += 1;
 
 	if (frame_count > 7)
 		frame_count = 8;
@@ -1215,9 +1229,9 @@
 	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
 
 	/*
-	* If the command is for the tape device, set the
-	* pthru timeout to the os layer timeout value.
-	*/
+	 * If the command is for the tape device, set the
+	 * pthru timeout to the os layer timeout value.
+	 */
 	if (scp->device->type == TYPE_TAPE) {
 		if ((scp->request->timeout / HZ) > 0xFFFF)
 			pthru->timeout = cpu_to_le16(0xFFFF);
@@ -1241,7 +1255,7 @@
 						      &pthru->sgl);
 
 	if (pthru->sge_count > instance->max_num_sge) {
-		printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n",
+		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
 			pthru->sge_count);
 		return 0;
 	}
@@ -1382,7 +1396,7 @@
 		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
 
 	if (ldio->sge_count > instance->max_num_sge) {
-		printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n",
+		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
 			ldio->sge_count);
 		return 0;
 	}
@@ -1435,7 +1449,7 @@
 
  /**
  * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
- *                              	in FW
+ *					in FW
  * @instance:				Adapter soft state
  */
 static inline void
@@ -1449,63 +1463,60 @@
 	u32 sgcount;
 	u32 max_cmd = instance->max_fw_cmds;
 
-	printk(KERN_ERR "\nmegasas[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
-	printk(KERN_ERR "megasas[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
+	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
+	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
 	if (IS_DMA64)
-		printk(KERN_ERR "\nmegasas[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
+		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
 	else
-		printk(KERN_ERR "\nmegasas[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
+		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
 
-	printk(KERN_ERR "megasas[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
+	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
 	for (i = 0; i < max_cmd; i++) {
 		cmd = instance->cmd_list[i];
-		if(!cmd->scmd)
+		if (!cmd->scmd)
 			continue;
-		printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
+		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
 		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
 			ldio = (struct megasas_io_frame *)cmd->frame;
 			mfi_sgl = &ldio->sgl;
 			sgcount = ldio->sge_count;
-			printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
+			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
 			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
 			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
 			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
 			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
-		}
-		else {
+		} else {
 			pthru = (struct megasas_pthru_frame *) cmd->frame;
 			mfi_sgl = &pthru->sgl;
 			sgcount = pthru->sge_count;
-			printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
+			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
 			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
 			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
 			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
 			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
 		}
-	if(megasas_dbg_lvl & MEGASAS_DBG_LVL){
-		for (n = 0; n < sgcount; n++){
-			if (IS_DMA64)
-				printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ",
-					le32_to_cpu(mfi_sgl->sge64[n].length),
-					le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
-			else
-				printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",
-					le32_to_cpu(mfi_sgl->sge32[n].length),
-					le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
+		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
+			for (n = 0; n < sgcount; n++) {
+				if (IS_DMA64)
+					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
+						le32_to_cpu(mfi_sgl->sge64[n].length),
+						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
+				else
+					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
+						le32_to_cpu(mfi_sgl->sge32[n].length),
+						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
 			}
 		}
-		printk(KERN_ERR "\n");
 	} /*for max_cmd*/
-	printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
+	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
 	for (i = 0; i < max_cmd; i++) {
 
 		cmd = instance->cmd_list[i];
 
-		if(cmd->sync_cmd == 1){
-			printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
-		}
+		if (cmd->sync_cmd == 1)
+			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
 	}
-	printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no);
+	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
 }
 
 u32
@@ -1623,7 +1634,7 @@
 	}
 
 	if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
-		printk(KERN_ERR "megasas: Err returned from build_and_issue_cmd\n");
+		dev_err(&instance->pdev->dev, "Err returned from build_and_issue_cmd\n");
 		return SCSI_MLQUEUE_HOST_BUSY;
 	}
 
@@ -1651,8 +1662,8 @@
 static int megasas_slave_configure(struct scsi_device *sdev)
 {
 	/*
-	* The RAID firmware may require extended timeouts.
-	*/
+	 * The RAID firmware may require extended timeouts.
+	 */
 	blk_queue_rq_timeout(sdev->request_queue,
 		MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
 
@@ -1661,8 +1672,9 @@
 
 static int megasas_slave_alloc(struct scsi_device *sdev)
 {
-	u16             pd_index = 0;
+	u16 pd_index = 0;
 	struct megasas_instance *instance ;
+
 	instance = megasas_lookup_instance(sdev->host->host_no);
 	if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
 		/*
@@ -1728,8 +1740,7 @@
 		(instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
 		(instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
 		(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
-		writel(MFI_STOP_ADP,
-			&instance->reg_set->doorbell);
+		writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
 		/* Flush */
 		readl(&instance->reg_set->doorbell);
 		if (instance->mpio && instance->requestorId)
@@ -1783,7 +1794,7 @@
 	unsigned long flags;
 
 	/* If we have already declared adapter dead, donot complete cmds */
-	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR )
+	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
 		return;
 
 	spin_lock_irqsave(&instance->completion_lock, flags);
@@ -1794,7 +1805,7 @@
 	while (consumer != producer) {
 		context = le32_to_cpu(instance->reply_queue[consumer]);
 		if (context >= instance->max_fw_cmds) {
-			printk(KERN_ERR "Unexpected context value %x\n",
+			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
 				context);
 			BUG();
 		}
@@ -1873,8 +1884,8 @@
 	cmd = megasas_get_cmd(instance);
 
 	if (!cmd) {
-		printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation_111:"
-		       "Failed to get cmd for scsi%d.\n",
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
+		       "Failed to get cmd for scsi%d\n",
 			instance->host->host_no);
 		return -ENOMEM;
 	}
@@ -1882,8 +1893,8 @@
 	dcmd = &cmd->frame->dcmd;
 
 	if (!instance->vf_affiliation_111) {
-		printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
-		       "affiliation for scsi%d.\n", instance->host->host_no);
+		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
+		       "affiliation for scsi%d\n", instance->host->host_no);
 		megasas_return_cmd(instance, cmd);
 		return -ENOMEM;
 	}
@@ -1897,8 +1908,8 @@
 					     sizeof(struct MR_LD_VF_AFFILIATION_111),
 					     &new_affiliation_111_h);
 		if (!new_affiliation_111) {
-			printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
-			       "memory for new affiliation for scsi%d.\n",
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
+			       "memory for new affiliation for scsi%d\n",
 			       instance->host->host_no);
 			megasas_return_cmd(instance, cmd);
 			return -ENOMEM;
@@ -1929,14 +1940,14 @@
 	dcmd->sgl.sge32[0].length = cpu_to_le32(
 		sizeof(struct MR_LD_VF_AFFILIATION_111));
 
-	printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
 	       "scsi%d\n", instance->host->host_no);
 
 	megasas_issue_blocked_cmd(instance, cmd, 0);
 
 	if (dcmd->cmd_status) {
-		printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
-		       " failed with status 0x%x for scsi%d.\n",
+		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
+		       " failed with status 0x%x for scsi%d\n",
 		       dcmd->cmd_status, instance->host->host_no);
 		retval = 1; /* Do a scan if we couldn't get affiliation */
 		goto out;
@@ -1947,9 +1958,8 @@
 		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
 			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
 			    new_affiliation_111->map[ld].policy[thisVf]) {
-				printk(KERN_WARNING "megasas: SR-IOV: "
-				       "Got new LD/VF affiliation "
-				       "for scsi%d.\n",
+				dev_warn(&instance->pdev->dev, "SR-IOV: "
+				       "Got new LD/VF affiliation for scsi%d\n",
 				       instance->host->host_no);
 				memcpy(instance->vf_affiliation_111,
 				       new_affiliation_111,
@@ -1985,8 +1995,8 @@
 	cmd = megasas_get_cmd(instance);
 
 	if (!cmd) {
-		printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation12: "
-		       "Failed to get cmd for scsi%d.\n",
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
+		       "Failed to get cmd for scsi%d\n",
 		       instance->host->host_no);
 		return -ENOMEM;
 	}
@@ -1994,8 +2004,8 @@
 	dcmd = &cmd->frame->dcmd;
 
 	if (!instance->vf_affiliation) {
-		printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
-		       "affiliation for scsi%d.\n", instance->host->host_no);
+		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
+		       "affiliation for scsi%d\n", instance->host->host_no);
 		megasas_return_cmd(instance, cmd);
 		return -ENOMEM;
 	}
@@ -2010,8 +2020,8 @@
 					     sizeof(struct MR_LD_VF_AFFILIATION),
 					     &new_affiliation_h);
 		if (!new_affiliation) {
-			printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
-			       "memory for new affiliation for scsi%d.\n",
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
+			       "memory for new affiliation for scsi%d\n",
 			       instance->host->host_no);
 			megasas_return_cmd(instance, cmd);
 			return -ENOMEM;
@@ -2042,14 +2052,14 @@
 	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
 		sizeof(struct MR_LD_VF_AFFILIATION));
 
-	printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
 	       "scsi%d\n", instance->host->host_no);
 
 	megasas_issue_blocked_cmd(instance, cmd, 0);
 
 	if (dcmd->cmd_status) {
-		printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
-		       " failed with status 0x%x for scsi%d.\n",
+		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
+		       " failed with status 0x%x for scsi%d\n",
 		       dcmd->cmd_status, instance->host->host_no);
 		retval = 1; /* Do a scan if we couldn't get affiliation */
 		goto out;
@@ -2057,8 +2067,8 @@
 
 	if (!initial) {
 		if (!new_affiliation->ldCount) {
-			printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
-			       "affiliation for passive path for scsi%d.\n",
+			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
+			       "affiliation for passive path for scsi%d\n",
 			       instance->host->host_no);
 			retval = 1;
 			goto out;
@@ -2123,8 +2133,8 @@
 	}
 out:
 	if (doscan) {
-		printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
-		       "affiliation for scsi%d.\n", instance->host->host_no);
+		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
+		       "affiliation for scsi%d\n", instance->host->host_no);
 		memcpy(instance->vf_affiliation, new_affiliation,
 		       new_affiliation->size);
 		retval = 1;
@@ -2164,8 +2174,8 @@
 	cmd = megasas_get_cmd(instance);
 
 	if (!cmd) {
-		printk(KERN_DEBUG "megasas: megasas_sriov_start_heartbeat: "
-		       "Failed to get cmd for scsi%d.\n",
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
+		       "Failed to get cmd for scsi%d\n",
 		       instance->host->host_no);
 		return -ENOMEM;
 	}
@@ -2178,9 +2188,9 @@
 					      sizeof(struct MR_CTRL_HB_HOST_MEM),
 					      &instance->hb_host_mem_h);
 		if (!instance->hb_host_mem) {
-			printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate"
-			       " memory for heartbeat host memory for "
-			       "scsi%d.\n", instance->host->host_no);
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
+			       " memory for heartbeat host memory for scsi%d\n",
+			       instance->host->host_no);
 			retval = -ENOMEM;
 			goto out;
 		}
@@ -2200,7 +2210,7 @@
 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
 	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
 
-	printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n",
+	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
 	       instance->host->host_no);
 
 	if (instance->ctrl_context && !instance->mask_interrupts)
@@ -2236,7 +2246,7 @@
 		mod_timer(&instance->sriov_heartbeat_timer,
 			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
 	} else {
-		printk(KERN_WARNING "megasas: SR-IOV: Heartbeat never "
+		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
 		       "completed for scsi%d\n", instance->host->host_no);
 		schedule_work(&instance->work_init);
 	}
@@ -2274,7 +2284,7 @@
 				&clist_local);
 		spin_unlock_irqrestore(&instance->hba_lock, flags);
 
-		printk(KERN_NOTICE "megasas: HBA reset wait ...\n");
+		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
 		for (i = 0; i < wait_time; i++) {
 			msleep(1000);
 			spin_lock_irqsave(&instance->hba_lock, flags);
@@ -2285,28 +2295,28 @@
 		}
 
 		if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
-			printk(KERN_NOTICE "megasas: reset: Stopping HBA.\n");
+			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
 			spin_lock_irqsave(&instance->hba_lock, flags);
-			instance->adprecovery	= MEGASAS_HW_CRITICAL_ERROR;
+			instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
 			spin_unlock_irqrestore(&instance->hba_lock, flags);
 			return FAILED;
 		}
 
-		reset_index	= 0;
+		reset_index = 0;
 		while (!list_empty(&clist_local)) {
-			reset_cmd	= list_entry((&clist_local)->next,
+			reset_cmd = list_entry((&clist_local)->next,
 						struct megasas_cmd, list);
 			list_del_init(&reset_cmd->list);
 			if (reset_cmd->scmd) {
 				reset_cmd->scmd->result = DID_RESET << 16;
-				printk(KERN_NOTICE "%d:%p reset [%02x]\n",
+				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
 					reset_index, reset_cmd,
 					reset_cmd->scmd->cmnd[0]);
 
 				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
 				megasas_return_cmd(instance, reset_cmd);
 			} else if (reset_cmd->sync_cmd) {
-				printk(KERN_NOTICE "megasas:%p synch cmds"
+				dev_notice(&instance->pdev->dev, "%p synch cmds"
 						"reset queue\n",
 						reset_cmd);
 
@@ -2315,7 +2325,7 @@
 						reset_cmd->frame_phys_addr,
 						0, instance->reg_set);
 			} else {
-				printk(KERN_NOTICE "megasas: %p unexpected"
+				dev_notice(&instance->pdev->dev, "%p unexpected"
 					"cmds lst\n",
 					reset_cmd);
 			}
@@ -2326,14 +2336,13 @@
 	}
 
 	for (i = 0; i < resetwaittime; i++) {
-
 		int outstanding = atomic_read(&instance->fw_outstanding);
 
 		if (!outstanding)
 			break;
 
 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
-			printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
 			       "commands to complete\n",i,outstanding);
 			/*
 			 * Call cmd completion routine. Cmd to be
@@ -2365,10 +2374,8 @@
 		i++;
 	} while (i <= 3);
 
-	if (atomic_read(&instance->fw_outstanding) &&
-					!kill_adapter_flag) {
+	if (atomic_read(&instance->fw_outstanding) && !kill_adapter_flag) {
 		if (instance->disableOnlineCtrlReset == 0) {
-
 			megasas_do_ocr(instance);
 
 			/* wait for 5 secs to let FW finish the pending cmds */
@@ -2384,11 +2391,11 @@
 
 	if (atomic_read(&instance->fw_outstanding) ||
 					(kill_adapter_flag == 2)) {
-		printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
+		dev_notice(&instance->pdev->dev, "pending cmds after reset\n");
 		/*
-		* Send signal to FW to stop processing any pending cmds.
-		* The controller will be taken offline by the OS now.
-		*/
+		 * Send signal to FW to stop processing any pending cmds.
+		 * The controller will be taken offline by the OS now.
+		 */
 		if ((instance->pdev->device ==
 			PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
 			(instance->pdev->device ==
@@ -2401,12 +2408,12 @@
 		}
 		megasas_dump_pending_frames(instance);
 		spin_lock_irqsave(&instance->hba_lock, flags);
-		instance->adprecovery	= MEGASAS_HW_CRITICAL_ERROR;
+		instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
 		spin_unlock_irqrestore(&instance->hba_lock, flags);
 		return FAILED;
 	}
 
-	printk(KERN_NOTICE "megaraid_sas: no pending cmds after reset\n");
+	dev_notice(&instance->pdev->dev, "no pending cmds after reset\n");
 
 	return SUCCESS;
 }
@@ -2430,16 +2437,15 @@
 		 scmd->cmnd[0], scmd->retries);
 
 	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
-		printk(KERN_ERR "megasas: cannot recover from previous reset "
-		       "failures\n");
+		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
 		return FAILED;
 	}
 
 	ret_val = megasas_wait_for_outstanding(instance);
 	if (ret_val == SUCCESS)
-		printk(KERN_NOTICE "megasas: reset successful \n");
+		dev_notice(&instance->pdev->dev, "reset successful\n");
 	else
-		printk(KERN_ERR "megasas: failed to do reset\n");
+		dev_err(&instance->pdev->dev, "failed to do reset\n");
 
 	return ret_val;
 }
@@ -2481,14 +2487,10 @@
  */
 static int megasas_reset_device(struct scsi_cmnd *scmd)
 {
-	int ret;
-
 	/*
 	 * First wait for all commands to complete
 	 */
-	ret = megasas_generic_reset(scmd);
-
-	return ret;
+	return megasas_generic_reset(scmd);
 }
 
 /**
@@ -2498,6 +2500,7 @@
 {
 	int ret;
 	struct megasas_instance *instance;
+
 	instance = (struct megasas_instance *)scmd->device->host->hostdata;
 
 	/*
@@ -2516,7 +2519,7 @@
 
 /**
  * megasas_bios_param - Returns disk geometry for a disk
- * @sdev: 		device handle
+ * @sdev:		device handle
  * @bdev:		block device
  * @capacity:		drive capacity
  * @geom:		geometry parameters
@@ -2529,6 +2532,7 @@
 	int sectors;
 	sector_t cylinders;
 	unsigned long tmp;
+
 	/* Default heads (64) & sectors (32) */
 	heads = 64;
 	sectors = 32;
@@ -2575,6 +2579,7 @@
 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
 {
 	unsigned long flags;
+
 	/*
 	 * Don't signal app if it is just an aborted previously registered aen
 	 */
@@ -2595,9 +2600,10 @@
 	if ((instance->unload == 0) &&
 		((instance->issuepend_done == 1))) {
 		struct megasas_aen_event *ev;
+
 		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
 		if (!ev) {
-			printk(KERN_ERR "megasas_service_aen: out of memory\n");
+			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
 		} else {
 			ev->instance = instance;
 			instance->ev = ev;
@@ -2654,8 +2660,7 @@
 
 	buff_addr = (unsigned long) buf;
 
-	if (buff_offset >
-		(instance->fw_crash_buffer_size * dmachunk)) {
+	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
 		dev_err(&instance->pdev->dev,
 			"Firmware crash dump offset is out of range\n");
 		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
@@ -2667,7 +2672,7 @@
 
 	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
 		(buff_offset % dmachunk);
-	memcpy(buf, (void *)src_addr,  size);
+	memcpy(buf, (void *)src_addr, size);
 	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
 
 	return size;
@@ -2727,6 +2732,7 @@
 	struct Scsi_Host *shost = class_to_shost(cdev);
 	struct megasas_instance *instance =
 		(struct megasas_instance *) shost->hostdata;
+
 	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
 }
 
@@ -2811,8 +2817,6 @@
 		cmd->cmd_status_drv = 0;
 		wake_up(&instance->abort_cmd_wait_q);
 	}
-
-	return;
 }
 
 /**
@@ -2820,10 +2824,10 @@
  * @instance:			Adapter soft state
  * @cmd:			Command to be completed
  * @alt_status:			If non-zero, use this value as status to
- * 				SCSI mid-layer instead of the value returned
- * 				by the FW. This should be used if caller wants
- * 				an alternate status (as in the case of aborted
- * 				commands)
+ *				SCSI mid-layer instead of the value returned
+ *				by the FW. This should be used if caller wants
+ *				an alternate status (as in the case of aborted
+ *				commands)
  */
 void
 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
@@ -2847,10 +2851,10 @@
 		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
 		   when booting the kdump kernel.  Ignore this command to
 		   prevent a kernel panic on shutdown of the kdump kernel. */
-		printk(KERN_WARNING "megaraid_sas: MFI_CMD_INVALID command "
-		       "completed.\n");
-		printk(KERN_WARNING "megaraid_sas: If you have a controller "
-		       "other than PERC5, please upgrade your firmware.\n");
+		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
+		       "completed\n");
+		dev_warn(&instance->pdev->dev, "If you have a controller "
+		       "other than PERC5, please upgrade your firmware\n");
 		break;
 	case MFI_CMD_PD_SCSI_IO:
 	case MFI_CMD_LD_SCSI_IO:
@@ -2918,7 +2922,7 @@
 			break;
 
 		default:
-			printk(KERN_DEBUG "megasas: MFI FW status %#x\n",
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
 			       hdr->cmd_status);
 			cmd->scmd->result = DID_ERROR << 16;
 			break;
@@ -2944,8 +2948,7 @@
 			if (cmd->frame->hdr.cmd_status != 0) {
 				if (cmd->frame->hdr.cmd_status !=
 				    MFI_STAT_NOT_FOUND)
-					printk(KERN_WARNING "megasas: map sync"
-					       "failed, status = 0x%x.\n",
+					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
 					       cmd->frame->hdr.cmd_status);
 				else {
 					megasas_return_cmd(instance, cmd);
@@ -2997,7 +3000,7 @@
 		break;
 
 	default:
-		printk("megasas: Unknown command completed! [0x%X]\n",
+		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
 		       hdr->cmd);
 		break;
 	}
@@ -3005,7 +3008,7 @@
 
 /**
  * megasas_issue_pending_cmds_again -	issue all pending cmds
- *                              	in FW again because of the fw reset
+ *					in FW again because of the fw reset
  * @instance:				Adapter soft state
  */
 static inline void
@@ -3023,19 +3026,19 @@
 	spin_unlock_irqrestore(&instance->hba_lock, flags);
 
 	while (!list_empty(&clist_local)) {
-		cmd	= list_entry((&clist_local)->next,
+		cmd = list_entry((&clist_local)->next,
 					struct megasas_cmd, list);
 		list_del_init(&cmd->list);
 
 		if (cmd->sync_cmd || cmd->scmd) {
-			printk(KERN_NOTICE "megaraid_sas: command %p, %p:%d"
-				"detected to be pending while HBA reset.\n",
+			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
+				"detected to be pending while HBA reset\n",
 					cmd, cmd->scmd, cmd->sync_cmd);
 
 			cmd->retry_for_fw_reset++;
 
 			if (cmd->retry_for_fw_reset == 3) {
-				printk(KERN_NOTICE "megaraid_sas: cmd %p, %p:%d"
+				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
 					"was tried multiple times during reset."
 					"Shutting down the HBA\n",
 					cmd, cmd->scmd, cmd->sync_cmd);
@@ -3048,18 +3051,18 @@
 
 		if (cmd->sync_cmd == 1) {
 			if (cmd->scmd) {
-				printk(KERN_NOTICE "megaraid_sas: unexpected"
+				dev_notice(&instance->pdev->dev, "unexpected"
 					"cmd attached to internal command!\n");
 			}
-			printk(KERN_NOTICE "megasas: %p synchronous cmd"
+			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
 						"on the internal reset queue,"
 						"issue it again.\n", cmd);
 			cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
 			instance->instancet->fire_cmd(instance,
-							cmd->frame_phys_addr ,
+							cmd->frame_phys_addr,
 							0, instance->reg_set);
 		} else if (cmd->scmd) {
-			printk(KERN_NOTICE "megasas: %p scsi cmd [%02x]"
+			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
 			"detected on the internal queue, issue again.\n",
 			cmd, cmd->scmd->cmnd[0]);
 
@@ -3068,22 +3071,22 @@
 					cmd->frame_phys_addr,
 					cmd->frame_count-1, instance->reg_set);
 		} else {
-			printk(KERN_NOTICE "megasas: %p unexpected cmd on the"
+			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
 				"internal reset defer list while re-issue!!\n",
 				cmd);
 		}
 	}
 
 	if (instance->aen_cmd) {
-		printk(KERN_NOTICE "megaraid_sas: aen_cmd in def process\n");
+		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
 		megasas_return_cmd(instance, instance->aen_cmd);
 
-		instance->aen_cmd	= NULL;
+		instance->aen_cmd = NULL;
 	}
 
 	/*
-	* Initiate AEN (Asynchronous Event Notification)
-	*/
+	 * Initiate AEN (Asynchronous Event Notification)
+	 */
 	seq_num = instance->last_seq_num;
 	class_locale.members.reserved = 0;
 	class_locale.members.locale = MR_EVT_LOCALE_ALL;
@@ -3110,17 +3113,17 @@
 	u32 defer_index;
 	unsigned long flags;
 
-	defer_index     = 0;
+	defer_index = 0;
 	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
 	for (i = 0; i < max_cmd; i++) {
 		cmd = instance->cmd_list[i];
 		if (cmd->sync_cmd == 1 || cmd->scmd) {
-			printk(KERN_NOTICE "megasas: moving cmd[%d]:%p:%d:%p"
+			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
 					"on the defer queue as internal\n",
 				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
 
 			if (!list_empty(&cmd->list)) {
-				printk(KERN_NOTICE "megaraid_sas: ERROR while"
+				dev_notice(&instance->pdev->dev, "ERROR while"
 					" moving this cmd:%p, %d %p, it was"
 					"discovered on some list?\n",
 					cmd, cmd->sync_cmd, cmd->scmd);
@@ -3145,13 +3148,13 @@
 	unsigned long flags;
 
 	if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
-		printk(KERN_NOTICE "megaraid_sas: error, recovery st %x \n",
+		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
 				instance->adprecovery);
 		return ;
 	}
 
 	if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
-		printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault"
+		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
 					"state, restarting it...\n");
 
 		instance->instancet->disable_intr(instance);
@@ -3159,21 +3162,21 @@
 
 		atomic_set(&instance->fw_reset_no_pci_access, 1);
 		instance->instancet->adp_reset(instance, instance->reg_set);
-		atomic_set(&instance->fw_reset_no_pci_access, 0 );
+		atomic_set(&instance->fw_reset_no_pci_access, 0);
 
-		printk(KERN_NOTICE "megaraid_sas: FW restarted successfully,"
+		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
 					"initiating next stage...\n");
 
-		printk(KERN_NOTICE "megaraid_sas: HBA recovery state machine,"
+		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
 					"state 2 starting...\n");
 
-		/*waitting for about 20 second before start the second init*/
+		/* waiting for about 20 second before start the second init */
 		for (wait = 0; wait < 30; wait++) {
 			msleep(1000);
 		}
 
 		if (megasas_transition_to_ready(instance, 1)) {
-			printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
+			dev_notice(&instance->pdev->dev, "adapter not ready\n");
 
 			atomic_set(&instance->fw_reset_no_pci_access, 1);
 			megaraid_sas_kill_hba(instance);
@@ -3200,15 +3203,14 @@
 		megasas_issue_pending_cmds_again(instance);
 		instance->issuepend_done = 1;
 	}
-	return ;
 }
 
 /**
  * megasas_deplete_reply_queue -	Processes all completed commands
  * @instance:				Adapter soft state
  * @alt_status:				Alternate status to be returned to
- * 					SCSI mid-layer instead of the status
- * 					returned by the FW
+ *					SCSI mid-layer instead of the status
+ *					returned by the FW
  * Note: this must be called with hba lock held
  */
 static int
@@ -3238,13 +3240,13 @@
 				instance->reg_set) & MFI_STATE_MASK;
 
 		if (fw_state != MFI_STATE_FAULT) {
-			printk(KERN_NOTICE "megaraid_sas: fw state:%x\n",
+			dev_notice(&instance->pdev->dev, "fw state:%x\n",
 						fw_state);
 		}
 
 		if ((fw_state == MFI_STATE_FAULT) &&
 				(instance->disableOnlineCtrlReset == 0)) {
-			printk(KERN_NOTICE "megaraid_sas: wait adp restart\n");
+			dev_notice(&instance->pdev->dev, "wait adp restart\n");
 
 			if ((instance->pdev->device ==
 					PCI_DEVICE_ID_LSI_SAS1064R) ||
@@ -3265,14 +3267,14 @@
 			atomic_set(&instance->fw_outstanding, 0);
 			megasas_internal_reset_defer_cmds(instance);
 
-			printk(KERN_NOTICE "megasas: fwState=%x, stage:%d\n",
+			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
 					fw_state, instance->adprecovery);
 
 			schedule_work(&instance->work_init);
 			return IRQ_HANDLED;
 
 		} else {
-			printk(KERN_NOTICE "megasas: fwstate:%x, dis_OCR=%x\n",
+			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
 				fw_state, instance->disableOnlineCtrlReset);
 		}
 	}
@@ -3288,13 +3290,13 @@
 	struct megasas_irq_context *irq_context = devp;
 	struct megasas_instance *instance = irq_context->instance;
 	unsigned long flags;
-	irqreturn_t	rc;
+	irqreturn_t rc;
 
 	if (atomic_read(&instance->fw_reset_no_pci_access))
 		return IRQ_HANDLED;
 
 	spin_lock_irqsave(&instance->hba_lock, flags);
-	rc =  megasas_deplete_reply_queue(instance, DID_OK);
+	rc = megasas_deplete_reply_queue(instance, DID_OK);
 	spin_unlock_irqrestore(&instance->hba_lock, flags);
 
 	return rc;
@@ -3322,7 +3324,7 @@
 	fw_state = abs_state & MFI_STATE_MASK;
 
 	if (fw_state != MFI_STATE_READY)
-		printk(KERN_INFO "megasas: Waiting for FW to come to ready"
+		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
 		       " state\n");
 
 	while (fw_state != MFI_STATE_READY) {
@@ -3330,7 +3332,7 @@
 		switch (fw_state) {
 
 		case MFI_STATE_FAULT:
-			printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
 			if (ocr) {
 				max_wait = MEGASAS_RESET_WAIT_TIME;
 				cur_state = MFI_STATE_FAULT;
@@ -3469,7 +3471,7 @@
 			break;
 
 		default:
-			printk(KERN_DEBUG "megasas: Unknown state 0x%x\n",
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
 			       fw_state);
 			return -ENODEV;
 		}
@@ -3491,7 +3493,7 @@
 		 * Return error if fw_state hasn't changed after max_wait
 		 */
 		if (curr_abs_state == abs_state) {
-			printk(KERN_DEBUG "FW state [%d] hasn't changed "
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
 			       "in %d secs\n", fw_state, max_wait);
 			return -ENODEV;
 		}
@@ -3499,7 +3501,7 @@
 		abs_state = curr_abs_state;
 		fw_state = curr_abs_state & MFI_STATE_MASK;
 	}
-	printk(KERN_INFO "megasas: FW now in Ready state\n");
+	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
 
 	return 0;
 }
@@ -3570,9 +3572,8 @@
 	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
 	    sizeof(struct megasas_sge32);
 
-	if (instance->flag_ieee) {
+	if (instance->flag_ieee)
 		sge_sz = sizeof(struct megasas_sge_skinny);
-	}
 
 	/*
 	 * For MFI controllers.
@@ -3594,7 +3595,7 @@
 					instance->pdev, total_sz, 256, 0);
 
 	if (!instance->frame_dma_pool) {
-		printk(KERN_DEBUG "megasas: failed to setup frame pool\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
 		return -ENOMEM;
 	}
 
@@ -3602,7 +3603,7 @@
 						   instance->pdev, 128, 4, 0);
 
 	if (!instance->sense_dma_pool) {
-		printk(KERN_DEBUG "megasas: failed to setup sense pool\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
 
 		pci_pool_destroy(instance->frame_dma_pool);
 		instance->frame_dma_pool = NULL;
@@ -3630,7 +3631,7 @@
 		 * whatever has been allocated
 		 */
 		if (!cmd->frame || !cmd->sense) {
-			printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n");
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
 			megasas_teardown_frame_pool(instance);
 			return -ENOMEM;
 		}
@@ -3656,6 +3657,7 @@
 void megasas_free_cmds(struct megasas_instance *instance)
 {
 	int i;
+
 	/* First free the MFI frame pool */
 	megasas_teardown_frame_pool(instance);
 
@@ -3708,7 +3710,7 @@
 	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
 
 	if (!instance->cmd_list) {
-		printk(KERN_DEBUG "megasas: out of memory\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
 		return -ENOMEM;
 	}
 
@@ -3744,7 +3746,7 @@
 	 * Create a frame pool and assign one frame to each cmd
 	 */
 	if (megasas_create_frame_pool(instance)) {
-		printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
 		megasas_free_cmds(instance);
 	}
 
@@ -3773,7 +3775,7 @@
 	cmd = megasas_get_cmd(instance);
 
 	if (!cmd) {
-		printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
 		return -ENOMEM;
 	}
 
@@ -3783,7 +3785,7 @@
 		  MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
 
 	if (!ci) {
-		printk(KERN_DEBUG "Failed to alloc mem for pd_list\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
 		megasas_return_cmd(instance, cmd);
 		return -ENOMEM;
 	}
@@ -3811,12 +3813,12 @@
 		ret = megasas_issue_polled(instance, cmd);
 
 	/*
-	* the following function will get the instance PD LIST.
-	*/
+	 * the following function will get the instance PD LIST.
+	 */
 
 	pd_addr = ci->addr;
 
-	if ( ret == 0 &&
+	if (ret == 0 &&
 	     (le32_to_cpu(ci->count) <
 		  (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
 
@@ -3868,7 +3870,7 @@
 	cmd = megasas_get_cmd(instance);
 
 	if (!cmd) {
-		printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
 		return -ENOMEM;
 	}
 
@@ -3879,7 +3881,7 @@
 				&ci_h);
 
 	if (!ci) {
-		printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
 		megasas_return_cmd(instance, cmd);
 		return -ENOMEM;
 	}
@@ -3954,8 +3956,8 @@
 	cmd = megasas_get_cmd(instance);
 
 	if (!cmd) {
-		printk(KERN_WARNING
-		       "megasas:(megasas_ld_list_query): Failed to get cmd\n");
+		dev_warn(&instance->pdev->dev,
+		         "megasas_ld_list_query: Failed to get cmd\n");
 		return -ENOMEM;
 	}
 
@@ -3965,8 +3967,8 @@
 				  sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
 
 	if (!ci) {
-		printk(KERN_WARNING
-		       "megasas: Failed to alloc mem for ld_list_query\n");
+		dev_warn(&instance->pdev->dev,
+		         "Failed to alloc mem for ld_list_query\n");
 		megasas_return_cmd(instance, cmd);
 		return -ENOMEM;
 	}
@@ -4052,11 +4054,11 @@
 		instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
 		"Legacy(64 VD) firmware");
 
-	old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
+	old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
 				(sizeof(struct MR_LD_SPAN_MAP) *
 				(instance->fw_supported_vd_count - 1));
-	new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
-	fusion->drv_map_sz =  sizeof(struct MR_DRV_RAID_MAP) +
+	new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
+	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
 				(sizeof(struct MR_LD_SPAN_MAP) *
 				(instance->drv_supported_vd_count - 1));
 
@@ -4067,7 +4069,6 @@
 		fusion->current_map_sz = new_map_sz;
 	else
 		fusion->current_map_sz = old_map_sz;
-
 }
 
 /**
@@ -4093,7 +4094,7 @@
 	cmd = megasas_get_cmd(instance);
 
 	if (!cmd) {
-		printk(KERN_DEBUG "megasas: Failed to get a free cmd\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
 		return -ENOMEM;
 	}
 
@@ -4103,7 +4104,7 @@
 				  sizeof(struct megasas_ctrl_info), &ci_h);
 
 	if (!ci) {
-		printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
 		megasas_return_cmd(instance, cmd);
 		return -ENOMEM;
 	}
@@ -4214,9 +4215,7 @@
 megasas_issue_init_mfi(struct megasas_instance *instance)
 {
 	__le32 context;
-
 	struct megasas_cmd *cmd;
-
 	struct megasas_init_frame *init_frame;
 	struct megasas_init_queue_info *initq_info;
 	dma_addr_t init_frame_h;
@@ -4269,7 +4268,7 @@
 	 */
 
 	if (megasas_issue_polled(instance, cmd)) {
-		printk(KERN_ERR "megasas: Failed to init firmware\n");
+		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
 		megasas_return_cmd(instance, cmd);
 		goto fail_fw_init;
 	}
@@ -4342,7 +4341,7 @@
 						     &instance->reply_queue_h);
 
 	if (!instance->reply_queue) {
-		printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
 		goto fail_reply_queue;
 	}
 
@@ -4361,7 +4360,7 @@
 		(instance->instancet->read_fw_status_reg(reg_set) &
 		0x04000000);
 
-	printk(KERN_NOTICE "megasas_init_mfi: fw_support_ieee=%d",
+	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
 			instance->fw_support_ieee);
 
 	if (instance->fw_support_ieee)
@@ -4505,7 +4504,7 @@
 	instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
 	if (pci_request_selected_regions(instance->pdev, instance->bar,
 					 "megasas: LSI")) {
-		printk(KERN_DEBUG "megasas: IO memory region busy!\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
 		return -EBUSY;
 	}
 
@@ -4513,7 +4512,7 @@
 	instance->reg_set = ioremap_nocache(base_addr, 8192);
 
 	if (!instance->reg_set) {
-		printk(KERN_DEBUG "megasas: Failed to map IO mem\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
 		goto fail_ioremap;
 	}
 
@@ -4551,7 +4550,7 @@
 			(instance, instance->reg_set);
 		atomic_set(&instance->fw_reset_no_pci_access, 0);
 		dev_info(&instance->pdev->dev,
-			"megasas: FW restarted successfully from %s!\n",
+			"FW restarted successfully from %s!\n",
 			__func__);
 
 		/*waitting for about 30 second before retry*/
@@ -4652,16 +4651,15 @@
 
 	instance->instancet->enable_intr(instance);
 
-	printk(KERN_ERR "megasas: INIT adapter done\n");
+	dev_err(&instance->pdev->dev, "INIT adapter done\n");
 
 	/** for passthrough
-	* the following function will get the PD LIST.
-	*/
-
-	memset(instance->pd_list, 0 ,
+	 * the following function will get the PD LIST.
+	 */
+	memset(instance->pd_list, 0,
 		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
 	if (megasas_get_pd_list(instance) < 0) {
-		printk(KERN_ERR "megasas: failed to get PD list\n");
+		dev_err(&instance->pdev->dev, "failed to get PD list\n");
 		goto fail_get_pd_list;
 	}
 
@@ -4686,7 +4684,7 @@
 		le16_to_cpu(ctrl_info->max_strips_per_io);
 	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
 
-	tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
+	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
 
 	instance->disableOnlineCtrlReset =
 	ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
@@ -4960,7 +4958,7 @@
 								  aen_cmd, 30);
 
 			if (ret_val) {
-				printk(KERN_DEBUG "megasas: Failed to abort "
+				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
 				       "previous AEN command\n");
 				return ret_val;
 			}
@@ -5051,7 +5049,7 @@
 static int megasas_io_attach(struct megasas_instance *instance)
 {
 	struct Scsi_Host *host = instance->host;
-	u32		error;
+	u32 error;
 
 	/*
 	 * Export parameters required by SCSI mid-layer
@@ -5079,7 +5077,7 @@
 				(max_sectors <= MEGASAS_MAX_SECTORS)) {
 				instance->max_sectors_per_req = max_sectors;
 			} else {
-			printk(KERN_INFO "megasas: max_sectors should be > 0"
+			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
 				"and <= %d (or < 1MB for GEN2 controller)\n",
 				instance->max_sectors_per_req);
 			}
@@ -5126,7 +5124,7 @@
 megasas_set_dma_mask(struct pci_dev *pdev)
 {
 	/*
-	 * All our contollers are capable of performing 64-bit DMA
+	 * All our controllers are capable of performing 64-bit DMA
 	 */
 	if (IS_DMA64) {
 		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
@@ -5206,13 +5204,13 @@
 			       sizeof(struct megasas_instance));
 
 	if (!host) {
-		printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n");
+		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
 		goto fail_alloc_instance;
 	}
 
 	instance = (struct megasas_instance *)host->hostdata;
 	memset(instance, 0, sizeof(*instance));
-	atomic_set( &instance->fw_reset_no_pci_access, 0 );
+	atomic_set(&instance->fw_reset_no_pci_access, 0);
 	instance->pdev = pdev;
 
 	switch (instance->pdev->device) {
@@ -5226,7 +5224,7 @@
 		instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
 				instance->ctrl_context_pages);
 		if (!instance->ctrl_context) {
-			printk(KERN_DEBUG "megasas: Failed to allocate "
+			dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
 			       "memory for Fusion context info\n");
 			goto fail_alloc_dma_buf;
 		}
@@ -5245,7 +5243,7 @@
 					     &instance->consumer_h);
 
 		if (!instance->producer || !instance->consumer) {
-			printk(KERN_DEBUG "megasas: Failed to allocate"
+			dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate"
 			       "memory for producer, consumer\n");
 			goto fail_alloc_dma_buf;
 		}
@@ -5276,7 +5274,7 @@
 						CRASH_DMA_BUF_SIZE,
 						&instance->crash_dump_h);
 	if (!instance->crash_dump_buf)
-		dev_err(&instance->pdev->dev, "Can't allocate Firmware "
+		dev_err(&pdev->dev, "Can't allocate Firmware "
 			"crash dump DMA buffer\n");
 
 	megasas_poll_wait_aen = 0;
@@ -5292,7 +5290,7 @@
 						    &instance->evt_detail_h);
 
 	if (!instance->evt_detail) {
-		printk(KERN_DEBUG "megasas: Failed to allocate memory for "
+		dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
 		       "event detail structure\n");
 		goto fail_alloc_dma_buf;
 	}
@@ -5356,7 +5354,7 @@
 				pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
 						     &instance->vf_affiliation_111_h);
 			if (!instance->vf_affiliation_111)
-				printk(KERN_WARNING "megasas: Can't allocate "
+				dev_warn(&pdev->dev, "Can't allocate "
 				       "memory for VF affiliation buffer\n");
 		} else {
 			instance->vf_affiliation =
@@ -5365,7 +5363,7 @@
 						     sizeof(struct MR_LD_VF_AFFILIATION),
 						     &instance->vf_affiliation_h);
 			if (!instance->vf_affiliation)
-				printk(KERN_WARNING "megasas: Can't allocate "
+				dev_warn(&pdev->dev, "Can't allocate "
 				       "memory for VF affiliation buffer\n");
 		}
 	}
@@ -5399,7 +5397,7 @@
 	 * Initiate AEN (Asynchronous Event Notification)
 	 */
 	if (megasas_start_aen(instance)) {
-		printk(KERN_DEBUG "megasas: start aen failed\n");
+		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
 		goto fail_start_aen;
 	}
 
@@ -5409,8 +5407,8 @@
 
 	return 0;
 
-      fail_start_aen:
-      fail_io_attach:
+fail_start_aen:
+fail_io_attach:
 	megasas_mgmt_info.count--;
 	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
 	megasas_mgmt_info.max_index--;
@@ -5428,7 +5426,7 @@
 	if (instance->msix_vectors)
 		pci_disable_msix(instance->pdev);
 fail_init_mfi:
-      fail_alloc_dma_buf:
+fail_alloc_dma_buf:
 	if (instance->evt_detail)
 		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
 				    instance->evt_detail,
@@ -5442,8 +5440,8 @@
 				    instance->consumer_h);
 	scsi_host_put(host);
 
-      fail_alloc_instance:
-      fail_set_dma_mask:
+fail_alloc_instance:
+fail_set_dma_mask:
 	pci_disable_device(pdev);
 
 	return -ENODEV;
@@ -5485,8 +5483,6 @@
 			" from %s\n", __func__);
 
 	megasas_return_cmd(instance, cmd);
-
-	return;
 }
 
 /**
@@ -5532,8 +5528,6 @@
 			"from %s\n", __func__);
 
 	megasas_return_cmd(instance, cmd);
-
-	return;
 }
 
 #ifdef CONFIG_PM
@@ -5607,7 +5601,7 @@
 	rval = pci_enable_device_mem(pdev);
 
 	if (rval) {
-		printk(KERN_ERR "megasas: Enable device failed\n");
+		dev_err(&pdev->dev, "Enable device failed\n");
 		return rval;
 	}
 
@@ -5686,7 +5680,7 @@
 	 * Initiate AEN (Asynchronous Event Notification)
 	 */
 	if (megasas_start_aen(instance))
-		printk(KERN_ERR "megasas: Start AEN failed\n");
+		dev_err(&instance->pdev->dev, "Start AEN failed\n");
 
 	return 0;
 
@@ -5839,8 +5833,6 @@
 	scsi_host_put(host);
 
 	pci_disable_device(pdev);
-
-	return;
 }
 
 /**
@@ -5909,11 +5901,11 @@
 {
 	unsigned int mask;
 	unsigned long flags;
+
 	poll_wait(file, &megasas_poll_wait, wait);
 	spin_lock_irqsave(&poll_aen_lock, flags);
 	if (megasas_poll_wait_aen)
-		mask =   (POLLIN | POLLRDNORM);
-
+		mask = (POLLIN | POLLRDNORM);
 	else
 		mask = 0;
 	megasas_poll_wait_aen = 0;
@@ -5927,8 +5919,7 @@
  * @cmd:	MFI command frame
  */
 
-static int megasas_set_crash_dump_params_ioctl(
-	struct megasas_cmd *cmd)
+static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
 {
 	struct megasas_instance *local_instance;
 	int i, error = 0;
@@ -5982,14 +5973,14 @@
 	memset(kbuff_arr, 0, sizeof(kbuff_arr));
 
 	if (ioc->sge_count > MAX_IOCTL_SGE) {
-		printk(KERN_DEBUG "megasas: SGE count [%d] >  max limit [%d]\n",
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
 		       ioc->sge_count, MAX_IOCTL_SGE);
 		return -EINVAL;
 	}
 
 	cmd = megasas_get_cmd(instance);
 	if (!cmd) {
-		printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
 		return -ENOMEM;
 	}
 
@@ -6034,8 +6025,8 @@
 						    ioc->sgl[i].iov_len,
 						    &buf_handle, GFP_KERNEL);
 		if (!kbuff_arr[i]) {
-			printk(KERN_DEBUG "megasas: Failed to alloc "
-			       "kernel SGL buffer for IOCTL \n");
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
+			       "kernel SGL buffer for IOCTL\n");
 			error = -ENOMEM;
 			goto out;
 		}
@@ -6108,7 +6099,7 @@
 
 		if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
 				 sense, ioc->sense_len)) {
-			printk(KERN_ERR "megasas: Failed to copy out to user "
+			dev_err(&instance->pdev->dev, "Failed to copy out to user "
 					"sense data\n");
 			error = -EFAULT;
 			goto out;
@@ -6120,11 +6111,11 @@
 	 */
 	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
 			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
-		printk(KERN_DEBUG "megasas: Error copying out cmd_status\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
 		error = -EFAULT;
 	}
 
-      out:
+out:
 	if (sense) {
 		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
 				    sense, sense_handle);
@@ -6180,7 +6171,7 @@
 	}
 
 	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
-		printk(KERN_ERR "Controller in crit error\n");
+		dev_err(&instance->pdev->dev, "Controller in crit error\n");
 		error = -ENODEV;
 		goto out_kfree_ioc;
 	}
@@ -6205,7 +6196,7 @@
 		spin_unlock_irqrestore(&instance->hba_lock, flags);
 
 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
-			printk(KERN_NOTICE "megasas: waiting"
+			dev_notice(&instance->pdev->dev, "waiting"
 				"for controller reset to finish\n");
 		}
 
@@ -6216,7 +6207,7 @@
 	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
 		spin_unlock_irqrestore(&instance->hba_lock, flags);
 
-		printk(KERN_ERR "megaraid_sas: timed out while"
+		dev_err(&instance->pdev->dev, "timed out while"
 			"waiting for HBA to recover\n");
 		error = -ENODEV;
 		goto out_up;
@@ -6224,10 +6215,10 @@
 	spin_unlock_irqrestore(&instance->hba_lock, flags);
 
 	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
-      out_up:
+out_up:
 	up(&instance->ioctl_sem);
 
-      out_kfree_ioc:
+out_kfree_ioc:
 	kfree(ioc);
 	return error;
 }
@@ -6275,7 +6266,7 @@
 		spin_unlock_irqrestore(&instance->hba_lock, flags);
 
 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
-			printk(KERN_NOTICE "megasas: waiting for"
+			dev_notice(&instance->pdev->dev, "waiting for"
 				"controller reset to finish\n");
 		}
 
@@ -6285,8 +6276,8 @@
 	spin_lock_irqsave(&instance->hba_lock, flags);
 	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
 		spin_unlock_irqrestore(&instance->hba_lock, flags);
-		printk(KERN_ERR "megaraid_sas: timed out while waiting"
-				"for HBA to recover.\n");
+		dev_err(&instance->pdev->dev, "timed out while waiting"
+				"for HBA to recover\n");
 		return -ENODEV;
 	}
 	spin_unlock_irqrestore(&instance->hba_lock, flags);
@@ -6462,7 +6453,8 @@
 megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
 {
 	int retval = count;
-	if(sscanf(buf,"%u",&megasas_dbg_lvl)<1){
+
+	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
 		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
 		retval = -EINVAL;
 	}
@@ -6502,7 +6494,7 @@
 		if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
 			break;
 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
-			printk(KERN_NOTICE "megasas: %s waiting for "
+			dev_notice(&instance->pdev->dev, "%s waiting for "
 			       "controller reset to finish for scsi%d\n",
 			       __func__, instance->host->host_no);
 		}
@@ -6524,14 +6516,12 @@
 				pd_index =
 				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
 
-				sdev1 =
-				scsi_device_lookup(host, i, j, 0);
+				sdev1 = scsi_device_lookup(host, i, j, 0);
 
 				if (instance->pd_list[pd_index].driveState
 						== MR_PD_STATE_SYSTEM) {
-						if (!sdev1) {
+					if (!sdev1)
 						scsi_add_device(host, i, j, 0);
-						}
 
 					if (sdev1)
 						scsi_device_put(sdev1);
@@ -6552,14 +6542,12 @@
 				pd_index =
 				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
 
-				sdev1 =
-				scsi_device_lookup(host, i, j, 0);
+				sdev1 = scsi_device_lookup(host, i, j, 0);
 
 				if (instance->pd_list[pd_index].driveState
 					== MR_PD_STATE_SYSTEM) {
-					if (sdev1) {
+					if (sdev1)
 						scsi_device_put(sdev1);
-					}
 				} else {
 					if (sdev1) {
 						scsi_remove_device(sdev1);
@@ -6644,13 +6632,13 @@
 			break;
 		}
 	} else {
-		printk(KERN_ERR "invalid evt_detail!\n");
+		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
 		kfree(ev);
 		return;
 	}
 
 	if (doscan) {
-		printk(KERN_INFO "megaraid_sas: scanning for scsi%d...\n",
+		dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
 		       instance->host->host_no);
 		if (megasas_get_pd_list(instance) == 0) {
 			for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
@@ -6705,7 +6693,7 @@
 		}
 	}
 
-	if ( instance->aen_cmd != NULL ) {
+	if (instance->aen_cmd != NULL) {
 		kfree(ev);
 		return ;
 	}
@@ -6722,7 +6710,7 @@
 	mutex_unlock(&instance->aen_mutex);
 
 	if (error)
-		printk(KERN_ERR "register aen failed error %x\n", error);
+		dev_err(&instance->pdev->dev, "register aen failed error %x\n", error);
 
 	kfree(ev);
 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 46a0f8f..f0837cc 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -221,7 +221,7 @@
 	struct megasas_cmd_fusion *cmd;
 
 	if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
-		printk(KERN_ERR "megasas: dma pool is null. SG Pool %p, "
+		dev_err(&instance->pdev->dev, "dma pool is null. SG Pool %p, "
 		       "sense pool : %p\n", fusion->sg_dma_pool,
 		       fusion->sense_dma_pool);
 		return;
@@ -332,8 +332,7 @@
 					      total_sz_chain_frame, 4,
 					      0);
 	if (!fusion->sg_dma_pool) {
-		printk(KERN_DEBUG "megasas: failed to setup request pool "
-		       "fusion\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup request pool fusion\n");
 		return -ENOMEM;
 	}
 	fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion",
@@ -341,8 +340,7 @@
 						 SCSI_SENSE_BUFFERSIZE, 64, 0);
 
 	if (!fusion->sense_dma_pool) {
-		printk(KERN_DEBUG "megasas: failed to setup sense pool "
-		       "fusion\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool fusion\n");
 		pci_pool_destroy(fusion->sg_dma_pool);
 		fusion->sg_dma_pool = NULL;
 		return -ENOMEM;
@@ -366,7 +364,7 @@
 		 * whatever has been allocated
 		 */
 		if (!cmd->sg_frame || !cmd->sense) {
-			printk(KERN_DEBUG "megasas: pci_pool_alloc failed\n");
+			dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
 			megasas_teardown_frame_pool_fusion(instance);
 			return -ENOMEM;
 		}
@@ -412,7 +410,7 @@
 				   &fusion->req_frames_desc_phys, GFP_KERNEL);
 
 	if (!fusion->req_frames_desc) {
-		printk(KERN_ERR "megasas; Could not allocate memory for "
+		dev_err(&instance->pdev->dev, "Could not allocate memory for "
 		       "request_frames\n");
 		goto fail_req_desc;
 	}
@@ -423,7 +421,7 @@
 				fusion->reply_alloc_sz * count, 16, 0);
 
 	if (!fusion->reply_frames_desc_pool) {
-		printk(KERN_ERR "megasas; Could not allocate memory for "
+		dev_err(&instance->pdev->dev, "Could not allocate memory for "
 		       "reply_frame pool\n");
 		goto fail_reply_desc;
 	}
@@ -432,7 +430,7 @@
 		pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
 			       &fusion->reply_frames_desc_phys);
 	if (!fusion->reply_frames_desc) {
-		printk(KERN_ERR "megasas; Could not allocate memory for "
+		dev_err(&instance->pdev->dev, "Could not allocate memory for "
 		       "reply_frame pool\n");
 		pci_pool_destroy(fusion->reply_frames_desc_pool);
 		goto fail_reply_desc;
@@ -449,7 +447,7 @@
 				fusion->io_frames_alloc_sz, 16, 0);
 
 	if (!fusion->io_request_frames_pool) {
-		printk(KERN_ERR "megasas: Could not allocate memory for "
+		dev_err(&instance->pdev->dev, "Could not allocate memory for "
 		       "io_request_frame pool\n");
 		goto fail_io_frames;
 	}
@@ -458,7 +456,7 @@
 		pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
 			       &fusion->io_request_frames_phys);
 	if (!fusion->io_request_frames) {
-		printk(KERN_ERR "megasas: Could not allocate memory for "
+		dev_err(&instance->pdev->dev, "Could not allocate memory for "
 		       "io_request_frames frames\n");
 		pci_pool_destroy(fusion->io_request_frames_pool);
 		goto fail_io_frames;
@@ -473,7 +471,7 @@
 				   * max_cmd, GFP_KERNEL);
 
 	if (!fusion->cmd_list) {
-		printk(KERN_DEBUG "megasas: out of memory. Could not alloc "
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory. Could not alloc "
 		       "memory for cmd_list_fusion\n");
 		goto fail_cmd_list;
 	}
@@ -483,7 +481,7 @@
 		fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
 					      GFP_KERNEL);
 		if (!fusion->cmd_list[i]) {
-			printk(KERN_ERR "Could not alloc cmd list fusion\n");
+			dev_err(&instance->pdev->dev, "Could not alloc cmd list fusion\n");
 
 			for (j = 0; j < i; j++)
 				kfree(fusion->cmd_list[j]);
@@ -527,7 +525,7 @@
 	 * Create a frame pool and assign one frame to each cmd
 	 */
 	if (megasas_create_frame_pool_fusion(instance)) {
-		printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
 		megasas_free_cmds_fusion(instance);
 		goto fail_req_desc;
 	}
@@ -613,7 +611,7 @@
 	cmd = megasas_get_cmd(instance);
 
 	if (!cmd) {
-		printk(KERN_ERR "Could not allocate cmd for INIT Frame\n");
+		dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
 		ret = 1;
 		goto fail_get_cmd;
 	}
@@ -624,7 +622,7 @@
 			     &ioc_init_handle, GFP_KERNEL);
 
 	if (!IOCInitMessage) {
-		printk(KERN_ERR "Could not allocate memory for "
+		dev_err(&instance->pdev->dev, "Could not allocate memory for "
 		       "IOCInitMessage\n");
 		ret = 1;
 		goto fail_fw_init;
@@ -714,7 +712,7 @@
 		ret = 1;
 		goto fail_fw_init;
 	}
-	printk(KERN_ERR "megasas:IOC Init cmd success\n");
+	dev_err(&instance->pdev->dev, "Init cmd success\n");
 
 	ret = 0;
 
@@ -757,7 +755,7 @@
 	cmd = megasas_get_cmd(instance);
 
 	if (!cmd) {
-		printk(KERN_DEBUG "megasas: Failed to get cmd for map info.\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
 		return -ENOMEM;
 	}
 
@@ -776,7 +774,7 @@
 	ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
 
 	if (!ci) {
-		printk(KERN_DEBUG "Failed to alloc mem for ld_map_info\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
 		megasas_return_cmd(instance, cmd);
 		return -ENOMEM;
 	}
@@ -851,8 +849,7 @@
 	cmd = megasas_get_cmd(instance);
 
 	if (!cmd) {
-		printk(KERN_DEBUG "megasas: Failed to get cmd for sync"
-		       "info.\n");
+		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
 		return -ENOMEM;
 	}
 
@@ -1097,7 +1094,7 @@
 						       &fusion->ld_map_phys[i],
 						       GFP_KERNEL);
 		if (!fusion->ld_map[i]) {
-			printk(KERN_ERR "megasas: Could not allocate memory "
+			dev_err(&instance->pdev->dev, "Could not allocate memory "
 			       "for map info\n");
 			goto fail_map_info;
 		}
@@ -1162,7 +1159,7 @@
 		cmd->scmd->result = DID_IMM_RETRY << 16;
 		break;
 	default:
-		printk(KERN_DEBUG "megasas: FW status %#x\n", status);
+		dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status);
 		cmd->scmd->result = DID_ERROR << 16;
 		break;
 	}
@@ -1851,7 +1848,7 @@
 					&io_request->SGL, cmd);
 
 	if (sge_count > instance->max_num_sge) {
-		printk(KERN_ERR "megasas: Error. sge_count (0x%x) exceeds "
+		dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds "
 		       "max (0x%x) allowed\n", sge_count,
 		       instance->max_num_sge);
 		return 1;
@@ -1885,7 +1882,7 @@
 	struct fusion_context *fusion;
 
 	if (index >= instance->max_fw_cmds) {
-		printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for "
+		dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for "
 		       "descriptor for scsi%d\n", index,
 			instance->host->host_no);
 		return NULL;
@@ -1927,7 +1924,7 @@
 
 	if (megasas_build_io_fusion(instance, scmd, cmd)) {
 		megasas_return_cmd_fusion(instance, cmd);
-		printk(KERN_ERR "megasas: Error building command.\n");
+		dev_err(&instance->pdev->dev, "Error building command\n");
 		cmd->request_desc = NULL;
 		return 1;
 	}
@@ -1937,7 +1934,7 @@
 
 	if (cmd->io_request->ChainOffset != 0 &&
 	    cmd->io_request->ChainOffset != 0xF)
-		printk(KERN_ERR "megasas: The chain offset value is not "
+		dev_err(&instance->pdev->dev, "The chain offset value is not "
 		       "correct : %x\n", cmd->io_request->ChainOffset);
 
 	/*
@@ -2025,7 +2022,7 @@
 			if (reply_descript_type ==
 			    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
 				if (megasas_dbg_lvl == 5)
-					printk(KERN_ERR "\nmegasas: FAST Path "
+					dev_err(&instance->pdev->dev, "\nFAST Path "
 					       "IO Success\n");
 			}
 			/* Fall thru and complete IO */
@@ -2186,7 +2183,7 @@
 			else if (fw_state == MFI_STATE_FAULT)
 				schedule_work(&instance->work_init);
 		} else if (fw_state == MFI_STATE_FAULT) {
-			printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
+			dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
 			       "for scsi%d\n", instance->host->host_no);
 			schedule_work(&instance->work_init);
 		}
@@ -2269,7 +2266,7 @@
 	u16 index;
 
 	if (build_mpt_mfi_pass_thru(instance, cmd)) {
-		printk(KERN_ERR "Couldn't build MFI pass thru cmd\n");
+		dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n");
 		return NULL;
 	}
 
@@ -2303,7 +2300,7 @@
 
 	req_desc = build_mpt_cmd(instance, cmd);
 	if (!req_desc) {
-		printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
+		dev_err(&instance->pdev->dev, "Couldn't issue MFI pass thru cmd\n");
 		return;
 	}
 	megasas_fire_cmd_fusion(instance, req_desc);
@@ -2413,7 +2410,7 @@
 		fw_state = instance->instancet->read_fw_status_reg(
 			instance->reg_set) & MFI_STATE_MASK;
 		if (fw_state == MFI_STATE_FAULT) {
-			printk(KERN_WARNING "megasas: Found FW in FAULT state,"
+			dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
 			       " will reset adapter scsi%d.\n",
 				instance->host->host_no);
 			retval = 1;
@@ -2436,7 +2433,7 @@
 				hb_seconds_missed++;
 				if (hb_seconds_missed ==
 				    (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
-					printk(KERN_WARNING "megasas: SR-IOV:"
+					dev_warn(&instance->pdev->dev, "SR-IOV:"
 					       " Heartbeat never completed "
 					       " while polling during I/O "
 					       " timeout handling for "
@@ -2454,7 +2451,7 @@
 			goto out;
 
 		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
-			printk(KERN_NOTICE "megasas: [%2d]waiting for %d "
+			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
 			       "commands to complete for scsi%d\n", i,
 			       outstanding, instance->host->host_no);
 			megasas_complete_cmd_dpc_fusion(
@@ -2464,7 +2461,7 @@
 	}
 
 	if (atomic_read(&instance->fw_outstanding)) {
-		printk("megaraid_sas: pending commands remain after waiting, "
+		dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
 		       "will reset adapter scsi%d.\n",
 		       instance->host->host_no);
 		retval = 1;
@@ -2564,7 +2561,7 @@
 	mutex_lock(&instance->reset_mutex);
 
 	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
-		printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
+		dev_warn(&instance->pdev->dev, "Hardware critical error, "
 		       "returning FAILED for scsi%d.\n",
 			instance->host->host_no);
 		mutex_unlock(&instance->reset_mutex);
@@ -2618,7 +2615,7 @@
 	if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
 						&convert)) {
 		instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
-		printk(KERN_WARNING "megaraid_sas: resetting fusion "
+		dev_warn(&instance->pdev->dev, "resetting fusion "
 		       "adapter scsi%d.\n", instance->host->host_no);
 		if (convert)
 			iotimeout = 0;
@@ -2645,7 +2642,7 @@
 		if (instance->disableOnlineCtrlReset ||
 		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
 			/* Reset not supported, kill adapter */
-			printk(KERN_WARNING "megaraid_sas: Reset not supported"
+			dev_warn(&instance->pdev->dev, "Reset not supported"
 			       ", killing adapter scsi%d.\n",
 				instance->host->host_no);
 			megaraid_sas_kill_hba(instance);
@@ -2663,7 +2660,7 @@
 			     instance->hb_host_mem->HB.driverCounter)) {
 					instance->hb_host_mem->HB.driverCounter =
 						instance->hb_host_mem->HB.fwCounter;
-					printk(KERN_WARNING "megasas: SR-IOV:"
+					dev_warn(&instance->pdev->dev, "SR-IOV:"
 					       "Late FW heartbeat update for "
 					       "scsi%d.\n",
 					       instance->host->host_no);
@@ -2679,8 +2676,8 @@
 					abs_state = status_reg &
 						MFI_STATE_MASK;
 					if (abs_state == MFI_STATE_READY) {
-						printk(KERN_WARNING "megasas"
-						       ": SR-IOV: FW was found"
+						dev_warn(&instance->pdev->dev,
+						       "SR-IOV: FW was found"
 						       "to be in ready state "
 						       "for scsi%d.\n",
 						       instance->host->host_no);
@@ -2689,7 +2686,7 @@
 					msleep(20);
 				}
 				if (abs_state != MFI_STATE_READY) {
-					printk(KERN_WARNING "megasas: SR-IOV: "
+					dev_warn(&instance->pdev->dev, "SR-IOV: "
 					       "FW not in ready state after %d"
 					       " seconds for scsi%d, status_reg = "
 					       "0x%x.\n",
@@ -2731,7 +2728,7 @@
 				host_diag =
 				readl(&instance->reg_set->fusion_host_diag);
 				if (retry++ == 100) {
-					printk(KERN_WARNING "megaraid_sas: "
+					dev_warn(&instance->pdev->dev,
 					       "Host diag unlock failed! "
 					       "for scsi%d\n",
 						instance->host->host_no);
@@ -2754,7 +2751,7 @@
 				host_diag =
 				readl(&instance->reg_set->fusion_host_diag);
 				if (retry++ == 1000) {
-					printk(KERN_WARNING "megaraid_sas: "
+					dev_warn(&instance->pdev->dev,
 					       "Diag reset adapter never "
 					       "cleared for scsi%d!\n",
 						instance->host->host_no);
@@ -2777,7 +2774,7 @@
 					instance->reg_set) & MFI_STATE_MASK;
 			}
 			if (abs_state <= MFI_STATE_FW_INIT) {
-				printk(KERN_WARNING "megaraid_sas: firmware "
+				dev_warn(&instance->pdev->dev, "firmware "
 				       "state < MFI_STATE_FW_INIT, state = "
 				       "0x%x for scsi%d\n", abs_state,
 					instance->host->host_no);
@@ -2786,7 +2783,7 @@
 
 			/* Wait for FW to become ready */
 			if (megasas_transition_to_ready(instance, 1)) {
-				printk(KERN_WARNING "megaraid_sas: Failed to "
+				dev_warn(&instance->pdev->dev, "Failed to "
 				       "transition controller to ready "
 				       "for scsi%d.\n",
 				       instance->host->host_no);
@@ -2795,7 +2792,7 @@
 
 			megasas_reset_reply_desc(instance);
 			if (megasas_ioc_init_fusion(instance)) {
-				printk(KERN_WARNING "megaraid_sas: "
+				dev_warn(&instance->pdev->dev,
 				       "megasas_ioc_init_fusion() failed!"
 				       " for scsi%d\n",
 				       instance->host->host_no);
@@ -2836,7 +2833,7 @@
 			}
 
 			/* Adapter reset completed successfully */
-			printk(KERN_WARNING "megaraid_sas: Reset "
+			dev_warn(&instance->pdev->dev, "Reset "
 			       "successful for scsi%d.\n",
 				instance->host->host_no);
 
@@ -2852,7 +2849,7 @@
 			goto out;
 		}
 		/* Reset failed, kill the adapter */
-		printk(KERN_WARNING "megaraid_sas: Reset failed, killing "
+		dev_warn(&instance->pdev->dev, "Reset failed, killing "
 		       "adapter scsi%d.\n", instance->host->host_no);
 		megaraid_sas_kill_hba(instance);
 		instance->skip_heartbeat_timer_del = 1;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 11248de..6dec7cf 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1557,7 +1557,8 @@
 		goto out_fail;
 	}
 
-	for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
+	for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
+	     (!memap_sz || !pio_sz); i++) {
 		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
 			if (pio_sz)
 				continue;
@@ -1572,16 +1573,17 @@
 				chip_phys = (u64)ioc->chip_phys;
 				memap_sz = pci_resource_len(pdev, i);
 				ioc->chip = ioremap(ioc->chip_phys, memap_sz);
-				if (ioc->chip == NULL) {
-					printk(MPT2SAS_ERR_FMT "unable to map "
-					    "adapter memory!\n", ioc->name);
-					r = -EINVAL;
-					goto out_fail;
-				}
 			}
 		}
 	}
 
+	if (ioc->chip == NULL) {
+		printk(MPT2SAS_ERR_FMT "unable to map adapter memory! "
+		       "or resource not found\n", ioc->name);
+		r = -EINVAL;
+		goto out_fail;
+	}
+
 	_base_mask_interrupts(ioc);
 
 	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 14a781b..43f87e9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1843,7 +1843,8 @@
 		goto out_fail;
 	}
 
-	for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
+	for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
+	     (!memap_sz || !pio_sz); i++) {
 		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
 			if (pio_sz)
 				continue;
@@ -1856,15 +1857,16 @@
 			chip_phys = (u64)ioc->chip_phys;
 			memap_sz = pci_resource_len(pdev, i);
 			ioc->chip = ioremap(ioc->chip_phys, memap_sz);
-			if (ioc->chip == NULL) {
-				pr_err(MPT3SAS_FMT "unable to map adapter memory!\n",
-					ioc->name);
-				r = -EINVAL;
-				goto out_fail;
-			}
 		}
 	}
 
+	if (ioc->chip == NULL) {
+		pr_err(MPT3SAS_FMT "unable to map adapter memory! "
+			" or resource not found\n", ioc->name);
+		r = -EINVAL;
+		goto out_fail;
+	}
+
 	_base_mask_interrupts(ioc);
 
 	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index d40d734..f466a6a 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -338,8 +338,11 @@
 
 	res_start = pci_resource_start(pdev, bar);
 	res_len = pci_resource_len(pdev, bar);
-	if (!res_start || !res_len)
+	if (!res_start || !res_len) {
+		iounmap(mvi->regs_ex);
+		mvi->regs_ex = NULL;
 		goto err_out;
+	}
 
 	res_flag = pci_resource_flags(pdev, bar);
 	if (res_flag & IORESOURCE_CACHEABLE)
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 74a4bb9..f14ec6e 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -49,13 +49,15 @@
 	chip_8019,
 	chip_8074,
 	chip_8076,
-	chip_8077
+	chip_8077,
+	chip_8006,
 };
 
 enum phy_speed {
 	PHY_SPEED_15 = 0x01,
 	PHY_SPEED_30 = 0x02,
 	PHY_SPEED_60 = 0x04,
+	PHY_SPEED_120 = 0x08,
 };
 
 enum data_direction {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 96dcc09..39306b1 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3263,6 +3263,10 @@
 	struct sas_phy *sas_phy = phy->sas_phy.phy;
 
 	switch (link_rate) {
+	case PHY_SPEED_120:
+		phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS;
+		phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_12_0_GBPS;
+		break;
 	case PHY_SPEED_60:
 		phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
 		phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index a132f26..5c0356f 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -57,6 +57,7 @@
 	[chip_8074] = {0,  8, &pm8001_80xx_dispatch,},
 	[chip_8076] = {0,  16, &pm8001_80xx_dispatch,},
 	[chip_8077] = {0,  16, &pm8001_80xx_dispatch,},
+	[chip_8006] = {0,  16, &pm8001_80xx_dispatch,},
 };
 static int pm8001_id;
 
@@ -1107,6 +1108,8 @@
  */
 static struct pci_device_id pm8001_pci_table[] = {
 	{ PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
+	{ PCI_VDEVICE(PMC_Sierra, 0x8006), chip_8006 },
+	{ PCI_VDEVICE(ADAPTEC2, 0x8006), chip_8006 },
 	{ PCI_VDEVICE(ATTO, 0x0042), chip_8001 },
 	/* Support for SPC/SPCv/SPCve controllers */
 	{ PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
@@ -1217,7 +1220,7 @@
 MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
 MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>");
 MODULE_DESCRIPTION(
-		"PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 "
+		"PMC-Sierra PM8001/8006/8081/8088/8089/8074/8076/8077 "
 		"SAS/SATA controller driver");
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index b93f289..949198c 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -790,6 +790,7 @@
 		ccb->device = pm8001_dev;
 		ccb->ccb_tag = ccb_tag;
 		ccb->task = task;
+		ccb->n_elem = 0;
 
 		res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
 			pm8001_dev, flag, task_tag, ccb_tag);
@@ -975,19 +976,27 @@
 	phy = sas_get_local_phy(dev);
 
 	if (dev_is_sata(dev)) {
-		DECLARE_COMPLETION_ONSTACK(completion_setstate);
 		if (scsi_is_sas_phy_local(phy)) {
 			rc = 0;
 			goto out;
 		}
 		rc = sas_phy_reset(phy, 1);
+		if (rc) {
+			PM8001_EH_DBG(pm8001_ha,
+			pm8001_printk("phy reset failed for device %x\n"
+			"with rc %d\n", pm8001_dev->device_id, rc));
+			rc = TMF_RESP_FUNC_FAILED;
+			goto out;
+		}
 		msleep(2000);
 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
 			dev, 1, 0);
-		pm8001_dev->setds_completion = &completion_setstate;
-		rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
-			pm8001_dev, 0x01);
-		wait_for_completion(&completion_setstate);
+		if (rc) {
+			PM8001_EH_DBG(pm8001_ha,
+			pm8001_printk("task abort failed %x\n"
+			"with rc %d\n", pm8001_dev->device_id, rc));
+			rc = TMF_RESP_FUNC_FAILED;
+		}
 	} else {
 		rc = sas_phy_reset(phy, 1);
 		msleep(2000);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 8dd8b78..e2e97db 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -58,7 +58,7 @@
 #include "pm8001_defs.h"
 
 #define DRV_NAME		"pm80xx"
-#define DRV_VERSION		"0.1.37"
+#define DRV_VERSION		"0.1.38"
 #define PM8001_FAIL_LOGGING	0x01 /* Error message logging */
 #define PM8001_INIT_LOGGING	0x02 /* driver init logging */
 #define PM8001_DISC_LOGGING	0x04 /* discovery layer logging */
@@ -241,7 +241,7 @@
 struct pm8001_port {
 	struct asd_sas_port	sas_port;
 	u8			port_attached;
-	u8			wide_port_phymap;
+	u16			wide_port_phymap;
 	u8			port_state;
 	struct list_head	list;
 };
@@ -569,6 +569,14 @@
 #define	NCQ_READ_LOG_FLAG			0x80000000
 #define	NCQ_ABORT_ALL_FLAG			0x40000000
 #define	NCQ_2ND_RLE_FLAG			0x20000000
+
+/* Device states */
+#define DS_OPERATIONAL				0x01
+#define DS_PORT_IN_RESET			0x02
+#define DS_IN_RECOVERY				0x03
+#define DS_IN_ERROR				0x04
+#define DS_NON_OPERATIONAL			0x07
+
 /**
  * brief param structure for firmware flash update.
  */
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 05cce46..0e1628f 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -309,6 +309,9 @@
 		pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET);
 	pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset =
 		pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET);
+	/* read port recover and reset timeout */
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer =
+		pm8001_mr32(address, MAIN_PORT_RECOVERY_TIMER);
 }
 
 /**
@@ -585,6 +588,12 @@
 		pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
 	pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY,
 		pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay);
+
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= 0xffff0000;
+	pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
+							PORT_RECOVERY_TIMEOUT;
+	pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
+			pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
 }
 
 /**
@@ -843,6 +852,7 @@
 	int rc;
 	u32 tag;
 	u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
+	u32 page_code;
 
 	memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
 	rc = pm8001_tag_alloc(pm8001_ha, &tag);
@@ -851,8 +861,14 @@
 
 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
 	payload.tag = cpu_to_le32(tag);
+
+	if (IS_SPCV_12G(pm8001_ha->pdev))
+		page_code = THERMAL_PAGE_CODE_7H;
+	else
+		page_code = THERMAL_PAGE_CODE_8H;
+
 	payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
-			(THERMAL_ENABLE << 8) | THERMAL_OP_CODE;
+				(THERMAL_ENABLE << 8) | page_code;
 	payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
 
 	rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
@@ -1593,6 +1609,13 @@
 		ts->stat = SAS_OPEN_REJECT;
 		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
 		break;
+	case IO_XFER_ERROR_INVALID_SSP_RSP_FRAME:
+		PM8001_IO_DBG(pm8001_ha,
+			pm8001_printk("IO_XFER_ERROR_INVALID_SSP_RSP_FRAME\n"));
+		ts->resp = SAS_TASK_COMPLETE;
+		ts->stat = SAS_OPEN_REJECT;
+		ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
+		break;
 	case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
 		PM8001_IO_DBG(pm8001_ha,
 		pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
@@ -2829,6 +2852,32 @@
 static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
 	u32 phyId, u32 phy_op);
 
+static void hw_event_port_recover(struct pm8001_hba_info *pm8001_ha,
+					void *piomb)
+{
+	struct hw_event_resp *pPayload = (struct hw_event_resp *)(piomb + 4);
+	u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
+	u8 phy_id = (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
+	u32 lr_status_evt_portid =
+		le32_to_cpu(pPayload->lr_status_evt_portid);
+	u8 deviceType = pPayload->sas_identify.dev_type;
+	u8 link_rate = (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
+	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+	u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
+	struct pm8001_port *port = &pm8001_ha->port[port_id];
+
+	if (deviceType == SAS_END_DEVICE) {
+		pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
+					PHY_NOTIFY_ENABLE_SPINUP);
+	}
+
+	port->wide_port_phymap |= (1U << phy_id);
+	pm8001_get_lrate_mode(phy, link_rate);
+	phy->sas_phy.oob_mode = SAS_OOB_MODE;
+	phy->phy_state = PHY_STATE_LINK_UP_SPCV;
+	phy->phy_attached = 1;
+}
+
 /**
  * hw_event_sas_phy_up -FW tells me a SAS phy up event.
  * @pm8001_ha: our hba card information
@@ -2856,6 +2905,7 @@
 	unsigned long flags;
 	u8 deviceType = pPayload->sas_identify.dev_type;
 	port->port_state = portstate;
+	port->wide_port_phymap |= (1U << phy_id);
 	phy->phy_state = PHY_STATE_LINK_UP_SPCV;
 	PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
 		"portid:%d; phyid:%d; linkrate:%d; "
@@ -2981,7 +3031,6 @@
 	struct pm8001_port *port = &pm8001_ha->port[port_id];
 	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
 	port->port_state = portstate;
-	phy->phy_type = 0;
 	phy->identify.device_type = 0;
 	phy->phy_attached = 0;
 	memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
@@ -2993,9 +3042,13 @@
 			pm8001_printk(" PortInvalid portID %d\n", port_id));
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk(" Last phy Down and port invalid\n"));
-		port->port_attached = 0;
-		pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
-			port_id, phy_id, 0, 0);
+		if (phy->phy_type & PORT_TYPE_SATA) {
+			phy->phy_type = 0;
+			port->port_attached = 0;
+			pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+					port_id, phy_id, 0, 0);
+		}
+		sas_phy_disconnected(&phy->sas_phy);
 		break;
 	case PORT_IN_RESET:
 		PM8001_MSG_DBG(pm8001_ha,
@@ -3003,22 +3056,26 @@
 		break;
 	case PORT_NOT_ESTABLISHED:
 		PM8001_MSG_DBG(pm8001_ha,
-			pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+			pm8001_printk(" Phy Down and PORT_NOT_ESTABLISHED\n"));
 		port->port_attached = 0;
 		break;
 	case PORT_LOSTCOMM:
 		PM8001_MSG_DBG(pm8001_ha,
-			pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
+			pm8001_printk(" Phy Down and PORT_LOSTCOMM\n"));
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk(" Last phy Down and port invalid\n"));
-		port->port_attached = 0;
-		pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
-			port_id, phy_id, 0, 0);
+		if (phy->phy_type & PORT_TYPE_SATA) {
+			port->port_attached = 0;
+			phy->phy_type = 0;
+			pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+					port_id, phy_id, 0, 0);
+		}
+		sas_phy_disconnected(&phy->sas_phy);
 		break;
 	default:
 		port->port_attached = 0;
 		PM8001_MSG_DBG(pm8001_ha,
-			pm8001_printk(" phy Down and(default) = 0x%x\n",
+			pm8001_printk(" Phy Down and(default) = 0x%x\n",
 			portstate));
 		break;
 
@@ -3084,7 +3141,7 @@
  */
 static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
-	unsigned long flags;
+	unsigned long flags, i;
 	struct hw_event_resp *pPayload =
 		(struct hw_event_resp *)(piomb + 4);
 	u32 lr_status_evt_portid =
@@ -3097,9 +3154,9 @@
 		(u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
 	u8 status =
 		(u8)((lr_status_evt_portid & 0x0F000000) >> 24);
-
 	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
 	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+	struct pm8001_port *port = &pm8001_ha->port[port_id];
 	struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
 	PM8001_MSG_DBG(pm8001_ha,
 		pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
@@ -3125,7 +3182,9 @@
 	case HW_EVENT_PHY_DOWN:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("HW_EVENT_PHY_DOWN\n"));
-		sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+		if (phy->phy_type & PORT_TYPE_SATA)
+			sas_ha->notify_phy_event(&phy->sas_phy,
+				PHYE_LOSS_OF_SIGNAL);
 		phy->phy_attached = 0;
 		phy->phy_state = 0;
 		hw_event_phy_down(pm8001_ha, piomb);
@@ -3169,9 +3228,6 @@
 			pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
 		pm80xx_hw_event_ack_req(pm8001_ha, 0,
 			HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
-		sas_phy_disconnected(sas_phy);
-		phy->phy_attached = 0;
-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
 		break;
 	case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
 		PM8001_MSG_DBG(pm8001_ha,
@@ -3179,9 +3235,6 @@
 		pm80xx_hw_event_ack_req(pm8001_ha, 0,
 			HW_EVENT_LINK_ERR_DISPARITY_ERROR,
 			port_id, phy_id, 0, 0);
-		sas_phy_disconnected(sas_phy);
-		phy->phy_attached = 0;
-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
 		break;
 	case HW_EVENT_LINK_ERR_CODE_VIOLATION:
 		PM8001_MSG_DBG(pm8001_ha,
@@ -3189,9 +3242,6 @@
 		pm80xx_hw_event_ack_req(pm8001_ha, 0,
 			HW_EVENT_LINK_ERR_CODE_VIOLATION,
 			port_id, phy_id, 0, 0);
-		sas_phy_disconnected(sas_phy);
-		phy->phy_attached = 0;
-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
 		break;
 	case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
 		PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
@@ -3199,9 +3249,6 @@
 		pm80xx_hw_event_ack_req(pm8001_ha, 0,
 			HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
 			port_id, phy_id, 0, 0);
-		sas_phy_disconnected(sas_phy);
-		phy->phy_attached = 0;
-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
 		break;
 	case HW_EVENT_MALFUNCTION:
 		PM8001_MSG_DBG(pm8001_ha,
@@ -3257,13 +3304,19 @@
 		pm80xx_hw_event_ack_req(pm8001_ha, 0,
 			HW_EVENT_PORT_RECOVERY_TIMER_TMO,
 			port_id, phy_id, 0, 0);
-		sas_phy_disconnected(sas_phy);
-		phy->phy_attached = 0;
-		sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+		for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+			if (port->wide_port_phymap & (1 << i)) {
+				phy = &pm8001_ha->phy[i];
+				sas_ha->notify_phy_event(&phy->sas_phy,
+						PHYE_LOSS_OF_SIGNAL);
+				port->wide_port_phymap &= ~(1 << i);
+			}
+		}
 		break;
 	case HW_EVENT_PORT_RECOVER:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
+		hw_event_port_recover(pm8001_ha, piomb);
 		break;
 	case HW_EVENT_PORT_RESET_COMPLETE:
 		PM8001_MSG_DBG(pm8001_ha,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 9970a38..7a443ba 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -177,7 +177,8 @@
 /* Thermal related */
 #define	THERMAL_ENABLE			0x1
 #define	THERMAL_LOG_ENABLE		0x1
-#define THERMAL_OP_CODE			0x6
+#define THERMAL_PAGE_CODE_7H		0x6
+#define THERMAL_PAGE_CODE_8H		0x7
 #define LTEMPHIL			 70
 #define RTEMPHIL			100
 
@@ -1174,7 +1175,7 @@
 #define IO_XFER_ERROR_INTERNAL_CRC_ERROR	0x54
 #define MPI_IO_RQE_BUSY_FULL			0x55
 #define IO_XFER_ERR_EOB_DATA_OVERRUN		0x56
-#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME	0x57
+#define IO_XFER_ERROR_INVALID_SSP_RSP_FRAME	0x57
 #define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED	0x58
 
 #define MPI_ERR_IO_RESOURCE_UNAVAILABLE		0x1004
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 437254e..6b942d9 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -884,7 +884,6 @@
 	    struct device, kobj)));
 	struct qla_hw_data *ha = vha->hw;
 	int rval;
-	uint16_t actual_size;
 
 	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
 		return 0;
@@ -901,7 +900,6 @@
 	}
 
 do_read:
-	actual_size = 0;
 	memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
 
 	rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
@@ -1079,8 +1077,7 @@
 			char *buf)
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
-	return scnprintf(buf, PAGE_SIZE, "%s\n",
-	    vha->hw->model_desc ? vha->hw->model_desc : "");
+	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
 }
 
 static ssize_t
@@ -1348,7 +1345,8 @@
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	struct qla_hw_data *ha = vha->hw;
 
-	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
+	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
+	    !IS_QLA27XX(ha))
 		return scnprintf(buf, PAGE_SIZE, "\n");
 
 	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1537,6 +1535,20 @@
 	return strlen(buf);
 }
 
+static ssize_t
+qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
+	char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct qla_hw_data *ha = vha->hw;
+
+	if (!IS_QLA27XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
+	    ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
+}
+
 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1581,6 +1593,7 @@
 static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
 		   qla2x00_allow_cna_fw_dump_show,
 		   qla2x00_allow_cna_fw_dump_store);
+static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
 
 struct device_attribute *qla2x00_host_attrs[] = {
 	&dev_attr_driver_version,
@@ -1614,6 +1627,7 @@
 	&dev_attr_diag_megabytes,
 	&dev_attr_fw_dump_size,
 	&dev_attr_allow_cna_fw_dump,
+	&dev_attr_pep_version,
 	NULL,
 };
 
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 2e2bb6f..c26acde 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -405,7 +405,7 @@
 	return rval;
 }
 
-inline uint16_t
+static inline uint16_t
 qla24xx_calc_ct_iocbs(uint16_t dsds)
 {
 	uint16_t iocbs;
@@ -1733,7 +1733,6 @@
 	struct Scsi_Host *host = bsg_job->shost;
 	scsi_qla_host_t *vha = shost_priv(host);
 	struct qla_hw_data *ha = vha->hw;
-	uint16_t thread_id;
 	uint32_t rval = EXT_STATUS_OK;
 	uint16_t req_sg_cnt = 0;
 	uint16_t rsp_sg_cnt = 0;
@@ -1790,8 +1789,6 @@
 		goto done;
 	}
 
-	thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
-
 	mutex_lock(&ha->selflogin_lock);
 	if (vha->self_login_loop_id == 0) {
 		/* Initialize all required  fields of fcport */
@@ -2174,7 +2171,6 @@
 {
 	int ret = -EINVAL;
 	struct fc_rport *rport;
-	fc_port_t *fcport = NULL;
 	struct Scsi_Host *host;
 	scsi_qla_host_t *vha;
 
@@ -2183,7 +2179,6 @@
 
 	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
 		rport = bsg_job->rport;
-		fcport = *(fc_port_t **) rport->dd_data;
 		host = rport_to_shost(rport);
 		vha = shost_priv(host);
 	} else {
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 8b011ae..34dc9a3 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -19,14 +19,14 @@
  * | Device Discovery             |       0x2016       | 0x2020-0x2022, |
  * |                              |                    | 0x2011-0x2012, |
  * |                              |                    | 0x2099-0x20a4  |
- * | Queue Command and IO tracing |       0x3059       | 0x300b         |
+ * | Queue Command and IO tracing |       0x3075       | 0x300b         |
  * |                              |                    | 0x3027-0x3028  |
  * |                              |                    | 0x303d-0x3041  |
  * |                              |                    | 0x302d,0x3033  |
  * |                              |                    | 0x3036,0x3038  |
  * |                              |                    | 0x303a		|
  * | DPC Thread                   |       0x4023       | 0x4002,0x4013  |
- * | Async Events                 |       0x5087       | 0x502b-0x502f  |
+ * | Async Events                 |       0x508a       | 0x502b-0x502f  |
  * |                              |                    | 0x5047		|
  * |                              |                    | 0x5084,0x5075	|
  * |                              |                    | 0x503d,0x5044  |
@@ -117,7 +117,7 @@
 {
 	int rval;
 	uint32_t cnt, stat, timer, dwords, idx;
-	uint16_t mb0, mb1;
+	uint16_t mb0;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 	dma_addr_t dump_dma = ha->gid_list_dma;
 	uint32_t *dump = (uint32_t *)ha->gid_list;
@@ -161,7 +161,7 @@
 					    &ha->mbx_cmd_flags);
 
 					mb0 = RD_REG_WORD(&reg->mailbox0);
-					mb1 = RD_REG_WORD(&reg->mailbox1);
+					RD_REG_WORD(&reg->mailbox1);
 
 					WRT_REG_DWORD(&reg->hccr,
 					    HCCRX_CLR_RISC_INT);
@@ -486,7 +486,7 @@
 		return ptr;
 
 	*last_chain = &fcec->type;
-	fcec->type = __constant_htonl(DUMP_CHAIN_FCE);
+	fcec->type = htonl(DUMP_CHAIN_FCE);
 	fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
 	    fce_calc_size(ha->fce_bufs));
 	fcec->size = htonl(fce_calc_size(ha->fce_bufs));
@@ -527,7 +527,7 @@
 		/* aqp = ha->atio_q_map[que]; */
 		q = ptr;
 		*last_chain = &q->type;
-		q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+		q->type = htonl(DUMP_CHAIN_QUEUE);
 		q->chain_size = htonl(
 		    sizeof(struct qla2xxx_mqueue_chain) +
 		    sizeof(struct qla2xxx_mqueue_header) +
@@ -536,7 +536,7 @@
 
 		/* Add header. */
 		qh = ptr;
-		qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
+		qh->queue = htonl(TYPE_ATIO_QUEUE);
 		qh->number = htonl(que);
 		qh->size = htonl(aqp->length * sizeof(request_t));
 		ptr += sizeof(struct qla2xxx_mqueue_header);
@@ -571,7 +571,7 @@
 		/* Add chain. */
 		q = ptr;
 		*last_chain = &q->type;
-		q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+		q->type = htonl(DUMP_CHAIN_QUEUE);
 		q->chain_size = htonl(
 		    sizeof(struct qla2xxx_mqueue_chain) +
 		    sizeof(struct qla2xxx_mqueue_header) +
@@ -580,7 +580,7 @@
 
 		/* Add header. */
 		qh = ptr;
-		qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE);
+		qh->queue = htonl(TYPE_REQUEST_QUEUE);
 		qh->number = htonl(que);
 		qh->size = htonl(req->length * sizeof(request_t));
 		ptr += sizeof(struct qla2xxx_mqueue_header);
@@ -599,7 +599,7 @@
 		/* Add chain. */
 		q = ptr;
 		*last_chain = &q->type;
-		q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+		q->type = htonl(DUMP_CHAIN_QUEUE);
 		q->chain_size = htonl(
 		    sizeof(struct qla2xxx_mqueue_chain) +
 		    sizeof(struct qla2xxx_mqueue_header) +
@@ -608,7 +608,7 @@
 
 		/* Add header. */
 		qh = ptr;
-		qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE);
+		qh->queue = htonl(TYPE_RESPONSE_QUEUE);
 		qh->number = htonl(que);
 		qh->size = htonl(rsp->length * sizeof(response_t));
 		ptr += sizeof(struct qla2xxx_mqueue_header);
@@ -627,15 +627,15 @@
 	uint32_t cnt, que_idx;
 	uint8_t que_cnt;
 	struct qla2xxx_mq_chain *mq = ptr;
-	device_reg_t __iomem *reg;
+	device_reg_t *reg;
 
 	if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
 		return ptr;
 
 	mq = ptr;
 	*last_chain = &mq->type;
-	mq->type = __constant_htonl(DUMP_CHAIN_MQ);
-	mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
+	mq->type = htonl(DUMP_CHAIN_MQ);
+	mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
 
 	que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
 		ha->max_req_queues : ha->max_rsp_queues;
@@ -695,8 +695,10 @@
 
 	flags = 0;
 
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
 
 	if (!ha->fw_dump) {
 		ql_log(ql_log_warn, vha, 0xd002,
@@ -832,8 +834,12 @@
 	qla2xxx_dump_post_process(base_vha, rval);
 
 qla2300_fw_dump_failed:
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+	;
+#endif
 }
 
 /**
@@ -859,8 +865,10 @@
 	mb0 = mb2 = 0;
 	flags = 0;
 
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
 
 	if (!ha->fw_dump) {
 		ql_log(ql_log_warn, vha, 0xd004,
@@ -1030,8 +1038,12 @@
 	qla2xxx_dump_post_process(base_vha, rval);
 
 qla2100_fw_dump_failed:
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+	;
+#endif
 }
 
 void
@@ -1039,7 +1051,6 @@
 {
 	int		rval;
 	uint32_t	cnt;
-	uint32_t	risc_address;
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 	uint32_t __iomem *dmp_reg;
@@ -1047,7 +1058,6 @@
 	uint16_t __iomem *mbx_reg;
 	unsigned long	flags;
 	struct qla24xx_fw_dump *fw;
-	uint32_t	ext_mem_cnt;
 	void		*nxt;
 	void		*nxt_chain;
 	uint32_t	*last_chain = NULL;
@@ -1056,12 +1066,13 @@
 	if (IS_P3P_TYPE(ha))
 		return;
 
-	risc_address = ext_mem_cnt = 0;
 	flags = 0;
 	ha->fw_dump_cap_flags = 0;
 
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
 
 	if (!ha->fw_dump) {
 		ql_log(ql_log_warn, vha, 0xd006,
@@ -1274,8 +1285,8 @@
 	nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
 	if (last_chain) {
-		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
-		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
+		*last_chain |= htonl(DUMP_CHAIN_LAST);
 	}
 
 	/* Adjust valid length. */
@@ -1285,8 +1296,12 @@
 	qla2xxx_dump_post_process(base_vha, rval);
 
 qla24xx_fw_dump_failed:
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+	;
+#endif
 }
 
 void
@@ -1294,7 +1309,6 @@
 {
 	int		rval;
 	uint32_t	cnt;
-	uint32_t	risc_address;
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 	uint32_t __iomem *dmp_reg;
@@ -1302,17 +1316,17 @@
 	uint16_t __iomem *mbx_reg;
 	unsigned long	flags;
 	struct qla25xx_fw_dump *fw;
-	uint32_t	ext_mem_cnt;
 	void		*nxt, *nxt_chain;
 	uint32_t	*last_chain = NULL;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
-	risc_address = ext_mem_cnt = 0;
 	flags = 0;
 	ha->fw_dump_cap_flags = 0;
 
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
 
 	if (!ha->fw_dump) {
 		ql_log(ql_log_warn, vha, 0xd008,
@@ -1329,7 +1343,7 @@
 	}
 	fw = &ha->fw_dump->isp.isp25;
 	qla2xxx_prep_dump(ha, ha->fw_dump);
-	ha->fw_dump->version = __constant_htonl(2);
+	ha->fw_dump->version = htonl(2);
 
 	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
 
@@ -1593,8 +1607,8 @@
 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
 	if (last_chain) {
-		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
-		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
+		*last_chain |= htonl(DUMP_CHAIN_LAST);
 	}
 
 	/* Adjust valid length. */
@@ -1604,8 +1618,12 @@
 	qla2xxx_dump_post_process(base_vha, rval);
 
 qla25xx_fw_dump_failed:
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+	;
+#endif
 }
 
 void
@@ -1613,7 +1631,6 @@
 {
 	int		rval;
 	uint32_t	cnt;
-	uint32_t	risc_address;
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 	uint32_t __iomem *dmp_reg;
@@ -1621,17 +1638,17 @@
 	uint16_t __iomem *mbx_reg;
 	unsigned long	flags;
 	struct qla81xx_fw_dump *fw;
-	uint32_t	ext_mem_cnt;
 	void		*nxt, *nxt_chain;
 	uint32_t	*last_chain = NULL;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
-	risc_address = ext_mem_cnt = 0;
 	flags = 0;
 	ha->fw_dump_cap_flags = 0;
 
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
 
 	if (!ha->fw_dump) {
 		ql_log(ql_log_warn, vha, 0xd00a,
@@ -1914,8 +1931,8 @@
 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
 	if (last_chain) {
-		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
-		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
+		*last_chain |= htonl(DUMP_CHAIN_LAST);
 	}
 
 	/* Adjust valid length. */
@@ -1925,16 +1942,19 @@
 	qla2xxx_dump_post_process(base_vha, rval);
 
 qla81xx_fw_dump_failed:
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+	;
+#endif
 }
 
 void
 qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 {
 	int		rval;
-	uint32_t	cnt, reg_data;
-	uint32_t	risc_address;
+	uint32_t	cnt;
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 	uint32_t __iomem *dmp_reg;
@@ -1942,17 +1962,17 @@
 	uint16_t __iomem *mbx_reg;
 	unsigned long	flags;
 	struct qla83xx_fw_dump *fw;
-	uint32_t	ext_mem_cnt;
 	void		*nxt, *nxt_chain;
 	uint32_t	*last_chain = NULL;
 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
 
-	risc_address = ext_mem_cnt = 0;
 	flags = 0;
 	ha->fw_dump_cap_flags = 0;
 
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
 
 	if (!ha->fw_dump) {
 		ql_log(ql_log_warn, vha, 0xd00c,
@@ -1979,16 +1999,16 @@
 
 	WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
 	dmp_reg = &reg->iobase_window;
-	reg_data = RD_REG_DWORD(dmp_reg);
+	RD_REG_DWORD(dmp_reg);
 	WRT_REG_DWORD(dmp_reg, 0);
 
 	dmp_reg = &reg->unused_4_1[0];
-	reg_data = RD_REG_DWORD(dmp_reg);
+	RD_REG_DWORD(dmp_reg);
 	WRT_REG_DWORD(dmp_reg, 0);
 
 	WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
 	dmp_reg = &reg->unused_4_1[2];
-	reg_data = RD_REG_DWORD(dmp_reg);
+	RD_REG_DWORD(dmp_reg);
 	WRT_REG_DWORD(dmp_reg, 0);
 
 	/* select PCR and disable ecc checking and correction */
@@ -2420,8 +2440,8 @@
 	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
 	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
 	if (last_chain) {
-		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
-		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+		ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
+		*last_chain |= htonl(DUMP_CHAIN_LAST);
 	}
 
 	/* Adjust valid length. */
@@ -2431,8 +2451,12 @@
 	qla2xxx_dump_post_process(base_vha, rval);
 
 qla83xx_fw_dump_failed:
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#else
+	;
+#endif
 }
 
 /****************************************************************************/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 9ad819e..388d790 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3061,6 +3061,7 @@
 #define PCI_DEVICE_ID_QLOGIC_ISP2031	0x2031
 #define PCI_DEVICE_ID_QLOGIC_ISP2071	0x2071
 #define PCI_DEVICE_ID_QLOGIC_ISP2271	0x2271
+#define PCI_DEVICE_ID_QLOGIC_ISP2261	0x2261
 
 	uint32_t	device_type;
 #define DT_ISP2100                      BIT_0
@@ -3084,7 +3085,8 @@
 #define DT_ISP8044			BIT_18
 #define DT_ISP2071			BIT_19
 #define DT_ISP2271			BIT_20
-#define DT_ISP_LAST			(DT_ISP2271 << 1)
+#define DT_ISP2261			BIT_21
+#define DT_ISP_LAST			(DT_ISP2261 << 1)
 
 #define DT_T10_PI                       BIT_25
 #define DT_IIDMA                        BIT_26
@@ -3116,6 +3118,7 @@
 #define IS_QLAFX00(ha)	(DT_MASK(ha) & DT_ISPFX00)
 #define IS_QLA2071(ha)	(DT_MASK(ha) & DT_ISP2071)
 #define IS_QLA2271(ha)	(DT_MASK(ha) & DT_ISP2271)
+#define IS_QLA2261(ha)	(DT_MASK(ha) & DT_ISP2261)
 
 #define IS_QLA23XX(ha)  (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
 			IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -3124,7 +3127,7 @@
 #define IS_QLA25XX(ha)  (IS_QLA2532(ha))
 #define IS_QLA83XX(ha)	(IS_QLA2031(ha) || IS_QLA8031(ha))
 #define IS_QLA84XX(ha)  (IS_QLA8432(ha))
-#define IS_QLA27XX(ha)  (IS_QLA2071(ha) || IS_QLA2271(ha))
+#define IS_QLA27XX(ha)  (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha))
 #define IS_QLA24XX_TYPE(ha)     (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
 				IS_QLA84XX(ha))
 #define IS_CNA_CAPABLE(ha)	(IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -3166,6 +3169,7 @@
 #define IS_TGT_MODE_CAPABLE(ha)	(ha->tgt.atio_q_length)
 #define IS_SHADOW_REG_CAPABLE(ha)  (IS_QLA27XX(ha))
 #define IS_DPORT_CAPABLE(ha)  (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+#define IS_FAWWN_CAPABLE(ha)	(IS_QLA83XX(ha) || IS_QLA27XX(ha))
 
 	/* HBA serial number */
 	uint8_t		serial0;
@@ -3288,6 +3292,7 @@
 	uint8_t		mpi_version[3];
 	uint32_t	mpi_capabilities;
 	uint8_t		phy_version[3];
+	uint8_t		pep_version[3];
 
 	/* Firmware dump template */
 	void		*fw_dump_template;
@@ -3420,9 +3425,9 @@
 	mempool_t       *ctx_mempool;
 #define FCP_CMND_DMA_POOL_SIZE 512
 
-	unsigned long	nx_pcibase;		/* Base I/O address */
-	uint8_t		*nxdb_rd_ptr;		/* Doorbell read pointer */
-	unsigned long	nxdb_wr_ptr;		/* Door bell write pointer */
+	void __iomem	*nx_pcibase;		/* Base I/O address */
+	void __iomem	*nxdb_rd_ptr;		/* Doorbell read pointer */
+	void __iomem	*nxdb_wr_ptr;		/* Door bell write pointer */
 
 	uint32_t	crb_win;
 	uint32_t	curr_window;
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index dccc4dc..94e8a85 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -35,10 +35,10 @@
 	ms_pkt->entry_type = MS_IOCB_TYPE;
 	ms_pkt->entry_count = 1;
 	SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
-	ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
+	ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
 	ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
-	ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
-	ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
+	ms_pkt->cmd_dsd_count = cpu_to_le16(1);
+	ms_pkt->total_dsd_count = cpu_to_le16(2);
 	ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
 	ms_pkt->req_bytecount = cpu_to_le32(req_size);
 
@@ -74,10 +74,10 @@
 
 	ct_pkt->entry_type = CT_IOCB_TYPE;
 	ct_pkt->entry_count = 1;
-	ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS);
+	ct_pkt->nport_handle = cpu_to_le16(NPH_SNS);
 	ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
-	ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
-	ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
+	ct_pkt->cmd_dsd_count = cpu_to_le16(1);
+	ct_pkt->rsp_dsd_count = cpu_to_le16(1);
 	ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
 	ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
 
@@ -142,7 +142,7 @@
 		case CS_DATA_UNDERRUN:
 		case CS_DATA_OVERRUN:		/* Overrun? */
 			if (ct_rsp->header.response !=
-			    __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
+			    cpu_to_be16(CT_ACCEPT_RESPONSE)) {
 				ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
 				    "%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n",
 				    routine, vha->d_id.b.domain,
@@ -1153,10 +1153,10 @@
 	ms_pkt->entry_type = MS_IOCB_TYPE;
 	ms_pkt->entry_count = 1;
 	SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
-	ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG);
+	ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
 	ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
-	ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
-	ms_pkt->total_dsd_count = __constant_cpu_to_le16(2);
+	ms_pkt->cmd_dsd_count = cpu_to_le16(1);
+	ms_pkt->total_dsd_count = cpu_to_le16(2);
 	ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
 	ms_pkt->req_bytecount = cpu_to_le32(req_size);
 
@@ -1193,8 +1193,8 @@
 	ct_pkt->entry_count = 1;
 	ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
 	ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
-	ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
-	ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
+	ct_pkt->cmd_dsd_count = cpu_to_le16(1);
+	ct_pkt->rsp_dsd_count = cpu_to_le16(1);
 	ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
 	ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
 
@@ -1281,19 +1281,19 @@
 
 	/* Prepare FDMI command arguments -- attribute block, attributes. */
 	memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
-	ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1);
+	ct_req->req.rhba.entry_count = cpu_to_be32(1);
 	memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
 	size = 2 * WWN_SIZE + 4 + 4;
 
 	/* Attributes */
 	ct_req->req.rhba.attrs.count =
-	    __constant_cpu_to_be32(FDMI_HBA_ATTR_COUNT);
+	    cpu_to_be32(FDMI_HBA_ATTR_COUNT);
 	entries = ct_req->req.rhba.hba_identifier;
 
 	/* Nodename. */
 	eiter = entries + size;
-	eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
-	eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
+	eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
+	eiter->len = cpu_to_be16(4 + WWN_SIZE);
 	memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
 	size += 4 + WWN_SIZE;
 
@@ -1302,7 +1302,7 @@
 
 	/* Manufacturer. */
 	eiter = entries + size;
-	eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
+	eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
 	alen = strlen(QLA2XXX_MANUFACTURER);
 	snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
 	    "%s", "QLogic Corporation");
@@ -1315,7 +1315,7 @@
 
 	/* Serial number. */
 	eiter = entries + size;
-	eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
+	eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
 	if (IS_FWI2_CAPABLE(ha))
 		qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
 		    sizeof(eiter->a.serial_num));
@@ -1335,7 +1335,7 @@
 
 	/* Model name. */
 	eiter = entries + size;
-	eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL);
+	eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
 	snprintf(eiter->a.model, sizeof(eiter->a.model),
 	    "%s", ha->model_number);
 	alen = strlen(eiter->a.model);
@@ -1348,7 +1348,7 @@
 
 	/* Model description. */
 	eiter = entries + size;
-	eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
+	eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
 	snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
 	    "%s", ha->model_desc);
 	alen = strlen(eiter->a.model_desc);
@@ -1361,7 +1361,7 @@
 
 	/* Hardware version. */
 	eiter = entries + size;
-	eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
+	eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
 	if (!IS_FWI2_CAPABLE(ha)) {
 		snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
 		    "HW:%s", ha->adapter_id);
@@ -1385,7 +1385,7 @@
 
 	/* Driver version. */
 	eiter = entries + size;
-	eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
+	eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
 	snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
 	    "%s", qla2x00_version_str);
 	alen = strlen(eiter->a.driver_version);
@@ -1398,7 +1398,7 @@
 
 	/* Option ROM version. */
 	eiter = entries + size;
-	eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
+	eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
 	snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
 	    "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
 	alen = strlen(eiter->a.orom_version);
@@ -1411,7 +1411,7 @@
 
 	/* Firmware version */
 	eiter = entries + size;
-	eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
+	eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
 	ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
 	    sizeof(eiter->a.fw_version));
 	alen = strlen(eiter->a.fw_version);
@@ -2484,8 +2484,8 @@
 	ct_pkt->entry_count = 1;
 	ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
 	ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
-	ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1);
-	ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1);
+	ct_pkt->cmd_dsd_count = cpu_to_le16(1);
+	ct_pkt->rsp_dsd_count = cpu_to_le16(1);
 	ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
 	ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
 
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 11f2f32..16a1935c 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1132,7 +1132,7 @@
 	unsigned long flags = 0;
 	struct qla_hw_data *ha = vha->hw;
 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
-	uint32_t cnt, d2;
+	uint32_t cnt;
 	uint16_t wd;
 	static int abts_cnt; /* ISP abort retry counts */
 	int rval = QLA_SUCCESS;
@@ -1164,7 +1164,7 @@
 	udelay(100);
 
 	/* Wait for firmware to complete NVRAM accesses. */
-	d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+	RD_REG_WORD(&reg->mailbox0);
 	for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
 	    rval == QLA_SUCCESS; cnt--) {
 		barrier();
@@ -1183,7 +1183,7 @@
 	    RD_REG_DWORD(&reg->mailbox0));
 
 	/* Wait for soft-reset to complete. */
-	d2 = RD_REG_DWORD(&reg->ctrl_status);
+	RD_REG_DWORD(&reg->ctrl_status);
 	for (cnt = 0; cnt < 6000000; cnt++) {
 		barrier();
 		if ((RD_REG_DWORD(&reg->ctrl_status) &
@@ -1226,7 +1226,7 @@
 	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
 	RD_REG_DWORD(&reg->hccr);
 
-	d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
+	RD_REG_WORD(&reg->mailbox0);
 	for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
 	    rval == QLA_SUCCESS; cnt--) {
 		barrier();
@@ -1277,16 +1277,19 @@
 static void
 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
 {
-	struct qla_hw_data *ha = vha->hw;
 	uint32_t wd32 = 0;
 	uint delta_msec = 100;
 	uint elapsed_msec = 0;
 	uint timeout_msec;
 	ulong n;
 
-	if (!IS_QLA25XX(ha) && !IS_QLA2031(ha))
+	if (vha->hw->pdev->subsystem_device != 0x0175 &&
+	    vha->hw->pdev->subsystem_device != 0x0240)
 		return;
 
+	WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
+	udelay(100);
+
 attempt:
 	timeout_msec = TIMEOUT_SEMAPHORE;
 	n = timeout_msec / delta_msec;
@@ -1690,7 +1693,7 @@
 	ha->fw_dump->signature[1] = 'L';
 	ha->fw_dump->signature[2] = 'G';
 	ha->fw_dump->signature[3] = 'C';
-	ha->fw_dump->version = __constant_htonl(1);
+	ha->fw_dump->version = htonl(1);
 
 	ha->fw_dump->fixed_size = htonl(fixed_size);
 	ha->fw_dump->mem_size = htonl(mem_size);
@@ -2070,8 +2073,8 @@
 	struct rsp_que *rsp = ha->rsp_q_map[0];
 
 	/* Setup ring parameters in initialization control block. */
-	ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0);
-	ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0);
+	ha->init_cb->request_q_outpointer = cpu_to_le16(0);
+	ha->init_cb->response_q_inpointer = cpu_to_le16(0);
 	ha->init_cb->request_q_length = cpu_to_le16(req->length);
 	ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
 	ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
@@ -2090,7 +2093,7 @@
 qla24xx_config_rings(struct scsi_qla_host *vha)
 {
 	struct qla_hw_data *ha = vha->hw;
-	device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0);
+	device_reg_t *reg = ISP_QUE_REG(ha, 0);
 	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
 	struct qla_msix_entry *msix;
 	struct init_cb_24xx *icb;
@@ -2100,8 +2103,8 @@
 
 	/* Setup ring parameters in initialization control block. */
 	icb = (struct init_cb_24xx *)ha->init_cb;
-	icb->request_q_outpointer = __constant_cpu_to_le16(0);
-	icb->response_q_inpointer = __constant_cpu_to_le16(0);
+	icb->request_q_outpointer = cpu_to_le16(0);
+	icb->response_q_inpointer = cpu_to_le16(0);
 	icb->request_q_length = cpu_to_le16(req->length);
 	icb->response_q_length = cpu_to_le16(rsp->length);
 	icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
@@ -2110,18 +2113,17 @@
 	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
 
 	/* Setup ATIO queue dma pointers for target mode */
-	icb->atio_q_inpointer = __constant_cpu_to_le16(0);
+	icb->atio_q_inpointer = cpu_to_le16(0);
 	icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
 	icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
 	icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
 
 	if (IS_SHADOW_REG_CAPABLE(ha))
-		icb->firmware_options_2 |=
-		    __constant_cpu_to_le32(BIT_30|BIT_29);
+		icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
 
 	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
-		icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
-		icb->rid = __constant_cpu_to_le16(rid);
+		icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
+		icb->rid = cpu_to_le16(rid);
 		if (ha->flags.msix_enabled) {
 			msix = &ha->msix_entries[1];
 			ql_dbg(ql_dbg_init, vha, 0x00fd,
@@ -2131,26 +2133,22 @@
 		}
 		/* Use alternate PCI bus number */
 		if (MSB(rid))
-			icb->firmware_options_2 |=
-				__constant_cpu_to_le32(BIT_19);
+			icb->firmware_options_2 |= cpu_to_le32(BIT_19);
 		/* Use alternate PCI devfn */
 		if (LSB(rid))
-			icb->firmware_options_2 |=
-				__constant_cpu_to_le32(BIT_18);
+			icb->firmware_options_2 |= cpu_to_le32(BIT_18);
 
 		/* Use Disable MSIX Handshake mode for capable adapters */
 		if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
 		    (ha->flags.msix_enabled)) {
-			icb->firmware_options_2 &=
-				__constant_cpu_to_le32(~BIT_22);
+			icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
 			ha->flags.disable_msix_handshake = 1;
 			ql_dbg(ql_dbg_init, vha, 0x00fe,
 			    "MSIX Handshake Disable Mode turned on.\n");
 		} else {
-			icb->firmware_options_2 |=
-				__constant_cpu_to_le32(BIT_22);
+			icb->firmware_options_2 |= cpu_to_le32(BIT_22);
 		}
-		icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
+		icb->firmware_options_2 |= cpu_to_le32(BIT_23);
 
 		WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
 		WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
@@ -2248,7 +2246,7 @@
 	}
 
 	if (IS_FWI2_CAPABLE(ha)) {
-		mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
+		mid_init_cb->options = cpu_to_le16(BIT_1);
 		mid_init_cb->init_cb.execution_throttle =
 		    cpu_to_le16(ha->fw_xcb_count);
 		/* D-Port Status */
@@ -2677,8 +2675,8 @@
 			nv->frame_payload_size = 1024;
 		}
 
-		nv->max_iocb_allocation = __constant_cpu_to_le16(256);
-		nv->execution_throttle = __constant_cpu_to_le16(16);
+		nv->max_iocb_allocation = cpu_to_le16(256);
+		nv->execution_throttle = cpu_to_le16(16);
 		nv->retry_count = 8;
 		nv->retry_delay = 1;
 
@@ -2696,7 +2694,7 @@
 		nv->host_p[1] = BIT_2;
 		nv->reset_delay = 5;
 		nv->port_down_retry_count = 8;
-		nv->max_luns_per_target = __constant_cpu_to_le16(8);
+		nv->max_luns_per_target = cpu_to_le16(8);
 		nv->link_down_timeout = 60;
 
 		rval = 1;
@@ -2824,7 +2822,7 @@
 	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
 	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
 
-	icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
+	icb->execution_throttle = cpu_to_le16(0xFFFF);
 
 	ha->retry_count = nv->retry_count;
 
@@ -2876,10 +2874,10 @@
 	if (ql2xloginretrycount)
 		ha->login_retry_count = ql2xloginretrycount;
 
-	icb->lun_enables = __constant_cpu_to_le16(0);
+	icb->lun_enables = cpu_to_le16(0);
 	icb->command_resource_count = 0;
 	icb->immediate_notify_resource_count = 0;
-	icb->timeout = __constant_cpu_to_le16(0);
+	icb->timeout = cpu_to_le16(0);
 
 	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
 		/* Enable RIO */
@@ -3958,12 +3956,10 @@
     uint16_t *next_loopid)
 {
 	int	rval;
-	int	retry;
 	uint8_t opts;
 	struct qla_hw_data *ha = vha->hw;
 
 	rval = QLA_SUCCESS;
-	retry = 0;
 
 	if (IS_ALOGIO_CAPABLE(ha)) {
 		if (fcport->flags & FCF_ASYNC_SENT)
@@ -5117,7 +5113,7 @@
 	/* Bad NVRAM data, set defaults parameters. */
 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
 	    || nv->id[3] != ' ' ||
-	    nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
+	    nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
 		/* Reset NVRAM data. */
 		ql_log(ql_log_warn, vha, 0x006b,
 		    "Inconsistent NVRAM detected: checksum=0x%x id=%c "
@@ -5130,12 +5126,12 @@
 		 * Set default initialization control block.
 		 */
 		memset(nv, 0, ha->nvram_size);
-		nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
-		nv->version = __constant_cpu_to_le16(ICB_VERSION);
+		nv->nvram_version = cpu_to_le16(ICB_VERSION);
+		nv->version = cpu_to_le16(ICB_VERSION);
 		nv->frame_payload_size = 2048;
-		nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
-		nv->exchange_count = __constant_cpu_to_le16(0);
-		nv->hard_address = __constant_cpu_to_le16(124);
+		nv->execution_throttle = cpu_to_le16(0xFFFF);
+		nv->exchange_count = cpu_to_le16(0);
+		nv->hard_address = cpu_to_le16(124);
 		nv->port_name[0] = 0x21;
 		nv->port_name[1] = 0x00 + ha->port_no + 1;
 		nv->port_name[2] = 0x00;
@@ -5153,29 +5149,29 @@
 		nv->node_name[6] = 0x55;
 		nv->node_name[7] = 0x86;
 		qla24xx_nvram_wwn_from_ofw(vha, nv);
-		nv->login_retry_count = __constant_cpu_to_le16(8);
-		nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
-		nv->login_timeout = __constant_cpu_to_le16(0);
+		nv->login_retry_count = cpu_to_le16(8);
+		nv->interrupt_delay_timer = cpu_to_le16(0);
+		nv->login_timeout = cpu_to_le16(0);
 		nv->firmware_options_1 =
-		    __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
-		nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
-		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
-		nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
-		nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
-		nv->efi_parameters = __constant_cpu_to_le32(0);
+		    cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
+		nv->firmware_options_2 = cpu_to_le32(2 << 4);
+		nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+		nv->firmware_options_3 = cpu_to_le32(2 << 13);
+		nv->host_p = cpu_to_le32(BIT_11|BIT_10);
+		nv->efi_parameters = cpu_to_le32(0);
 		nv->reset_delay = 5;
-		nv->max_luns_per_target = __constant_cpu_to_le16(128);
-		nv->port_down_retry_count = __constant_cpu_to_le16(30);
-		nv->link_down_timeout = __constant_cpu_to_le16(30);
+		nv->max_luns_per_target = cpu_to_le16(128);
+		nv->port_down_retry_count = cpu_to_le16(30);
+		nv->link_down_timeout = cpu_to_le16(30);
 
 		rval = 1;
 	}
 
 	if (!qla_ini_mode_enabled(vha)) {
 		/* Don't enable full login after initial LIP */
-		nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
 		/* Don't enable LIP full login for initiator */
-		nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+		nv->host_p &= cpu_to_le32(~BIT_10);
 	}
 
 	qlt_24xx_config_nvram_stage1(vha, nv);
@@ -5209,14 +5205,14 @@
 
 	qlt_24xx_config_nvram_stage2(vha, icb);
 
-	if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
+	if (nv->host_p & cpu_to_le32(BIT_15)) {
 		/* Use alternate WWN? */
 		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
 		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
 	}
 
 	/* Prepare nodename */
-	if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
+	if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
 		/*
 		 * Firmware will apply the following mask if the nodename was
 		 * not provided.
@@ -5248,7 +5244,7 @@
 	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
 	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
 
-	icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
+	icb->execution_throttle = cpu_to_le16(0xFFFF);
 
 	ha->retry_count = le16_to_cpu(nv->login_retry_count);
 
@@ -5256,7 +5252,7 @@
 	if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
 		nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
 	if (le16_to_cpu(nv->login_timeout) < 4)
-		nv->login_timeout = __constant_cpu_to_le16(4);
+		nv->login_timeout = cpu_to_le16(4);
 	ha->login_timeout = le16_to_cpu(nv->login_timeout);
 	icb->login_timeout = nv->login_timeout;
 
@@ -5307,7 +5303,7 @@
 		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
 		    le16_to_cpu(icb->interrupt_delay_timer): 2;
 	}
-	icb->firmware_options_2 &= __constant_cpu_to_le32(
+	icb->firmware_options_2 &= cpu_to_le32(
 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
 	vha->flags.process_response_queue = 0;
 	if (ha->zio_mode != QLA_ZIO_DISABLED) {
@@ -6063,7 +6059,7 @@
 	/* Bad NVRAM data, set defaults parameters. */
 	if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
 	    || nv->id[3] != ' ' ||
-	    nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
+	    nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
 		/* Reset NVRAM data. */
 		ql_log(ql_log_info, vha, 0x0073,
 		    "Inconsistent NVRAM detected: checksum=0x%x id=%c "
@@ -6077,11 +6073,11 @@
 		 * Set default initialization control block.
 		 */
 		memset(nv, 0, ha->nvram_size);
-		nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
-		nv->version = __constant_cpu_to_le16(ICB_VERSION);
+		nv->nvram_version = cpu_to_le16(ICB_VERSION);
+		nv->version = cpu_to_le16(ICB_VERSION);
 		nv->frame_payload_size = 2048;
-		nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
-		nv->exchange_count = __constant_cpu_to_le16(0);
+		nv->execution_throttle = cpu_to_le16(0xFFFF);
+		nv->exchange_count = cpu_to_le16(0);
 		nv->port_name[0] = 0x21;
 		nv->port_name[1] = 0x00 + ha->port_no + 1;
 		nv->port_name[2] = 0x00;
@@ -6098,20 +6094,20 @@
 		nv->node_name[5] = 0x1c;
 		nv->node_name[6] = 0x55;
 		nv->node_name[7] = 0x86;
-		nv->login_retry_count = __constant_cpu_to_le16(8);
-		nv->interrupt_delay_timer = __constant_cpu_to_le16(0);
-		nv->login_timeout = __constant_cpu_to_le16(0);
+		nv->login_retry_count = cpu_to_le16(8);
+		nv->interrupt_delay_timer = cpu_to_le16(0);
+		nv->login_timeout = cpu_to_le16(0);
 		nv->firmware_options_1 =
-		    __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
-		nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4);
-		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
-		nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13);
-		nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10);
-		nv->efi_parameters = __constant_cpu_to_le32(0);
+		    cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
+		nv->firmware_options_2 = cpu_to_le32(2 << 4);
+		nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+		nv->firmware_options_3 = cpu_to_le32(2 << 13);
+		nv->host_p = cpu_to_le32(BIT_11|BIT_10);
+		nv->efi_parameters = cpu_to_le32(0);
 		nv->reset_delay = 5;
-		nv->max_luns_per_target = __constant_cpu_to_le16(128);
-		nv->port_down_retry_count = __constant_cpu_to_le16(30);
-		nv->link_down_timeout = __constant_cpu_to_le16(180);
+		nv->max_luns_per_target = cpu_to_le16(128);
+		nv->port_down_retry_count = cpu_to_le16(30);
+		nv->link_down_timeout = cpu_to_le16(180);
 		nv->enode_mac[0] = 0x00;
 		nv->enode_mac[1] = 0xC0;
 		nv->enode_mac[2] = 0xDD;
@@ -6170,13 +6166,13 @@
 	qlt_81xx_config_nvram_stage2(vha, icb);
 
 	/* Use alternate WWN? */
-	if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
+	if (nv->host_p & cpu_to_le32(BIT_15)) {
 		memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
 		memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
 	}
 
 	/* Prepare nodename */
-	if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) {
+	if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
 		/*
 		 * Firmware will apply the following mask if the nodename was
 		 * not provided.
@@ -6205,7 +6201,7 @@
 	memcpy(vha->node_name, icb->node_name, WWN_SIZE);
 	memcpy(vha->port_name, icb->port_name, WWN_SIZE);
 
-	icb->execution_throttle = __constant_cpu_to_le16(0xFFFF);
+	icb->execution_throttle = cpu_to_le16(0xFFFF);
 
 	ha->retry_count = le16_to_cpu(nv->login_retry_count);
 
@@ -6213,7 +6209,7 @@
 	if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
 		nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
 	if (le16_to_cpu(nv->login_timeout) < 4)
-		nv->login_timeout = __constant_cpu_to_le16(4);
+		nv->login_timeout = cpu_to_le16(4);
 	ha->login_timeout = le16_to_cpu(nv->login_timeout);
 	icb->login_timeout = nv->login_timeout;
 
@@ -6259,7 +6255,7 @@
 
 	/* if not running MSI-X we need handshaking on interrupts */
 	if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
-		icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
+		icb->firmware_options_2 |= cpu_to_le32(BIT_22);
 
 	/* Enable ZIO. */
 	if (!vha->flags.init_done) {
@@ -6268,7 +6264,7 @@
 		ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
 		    le16_to_cpu(icb->interrupt_delay_timer): 2;
 	}
-	icb->firmware_options_2 &= __constant_cpu_to_le32(
+	icb->firmware_options_2 &= cpu_to_le32(
 	    ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
 	vha->flags.process_response_queue = 0;
 	if (ha->zio_mode != QLA_ZIO_DISABLED) {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 6f02b26..c49df34 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -108,8 +108,7 @@
 	cont_pkt = (cont_entry_t *)req->ring_ptr;
 
 	/* Load packet defaults. */
-	*((uint32_t *)(&cont_pkt->entry_type)) =
-	    __constant_cpu_to_le32(CONTINUE_TYPE);
+	*((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
 
 	return (cont_pkt);
 }
@@ -138,8 +137,8 @@
 
 	/* Load packet defaults. */
 	*((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
-	    __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
-	    __constant_cpu_to_le32(CONTINUE_A64_TYPE);
+	    cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
+	    cpu_to_le32(CONTINUE_A64_TYPE);
 
 	return (cont_pkt);
 }
@@ -204,11 +203,11 @@
 
 	/* Update entry type to indicate Command Type 2 IOCB */
 	*((uint32_t *)(&cmd_pkt->entry_type)) =
-	    __constant_cpu_to_le32(COMMAND_TYPE);
+	    cpu_to_le32(COMMAND_TYPE);
 
 	/* No data transfer */
 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
-		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+		cmd_pkt->byte_count = cpu_to_le32(0);
 		return;
 	}
 
@@ -261,12 +260,11 @@
 	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 3 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) =
-	    __constant_cpu_to_le32(COMMAND_A64_TYPE);
+	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
 
 	/* No data transfer */
 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
-		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+		cmd_pkt->byte_count = cpu_to_le32(0);
 		return;
 	}
 
@@ -310,7 +308,7 @@
 int
 qla2x00_start_scsi(srb_t *sp)
 {
-	int		ret, nseg;
+	int		nseg;
 	unsigned long   flags;
 	scsi_qla_host_t	*vha;
 	struct scsi_cmnd *cmd;
@@ -327,7 +325,6 @@
 	struct rsp_que *rsp;
 
 	/* Setup device pointers. */
-	ret = 0;
 	vha = sp->fcport->vha;
 	ha = vha->hw;
 	reg = &ha->iobase->isp;
@@ -403,7 +400,7 @@
 	/* Set target ID and LUN number*/
 	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
 	cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
-	cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
+	cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
 
 	/* Load SCSI command packet. */
 	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
@@ -454,7 +451,7 @@
 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
 {
 	struct qla_hw_data *ha = vha->hw;
-	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
 
 	if (IS_P3P_TYPE(ha)) {
 		qla82xx_start_iocbs(vha);
@@ -597,12 +594,11 @@
 	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 3 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) =
-		__constant_cpu_to_le32(COMMAND_TYPE_6);
+	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
 
 	/* No data transfer */
 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
-		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+		cmd_pkt->byte_count = cpu_to_le32(0);
 		return 0;
 	}
 
@@ -611,13 +607,11 @@
 
 	/* Set transfer direction */
 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
-		cmd_pkt->control_flags =
-		    __constant_cpu_to_le16(CF_WRITE_DATA);
+		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 		vha->qla_stats.output_requests++;
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
-		cmd_pkt->control_flags =
-		    __constant_cpu_to_le16(CF_READ_DATA);
+		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 		vha->qla_stats.input_requests++;
 	}
@@ -680,7 +674,7 @@
  *
  * Returns the number of dsd list needed to store @dsds.
  */
-inline uint16_t
+static inline uint16_t
 qla24xx_calc_dsd_lists(uint16_t dsds)
 {
 	uint16_t dsd_lists = 0;
@@ -700,7 +694,7 @@
  * @cmd_pkt: Command type 3 IOCB
  * @tot_dsds: Total number of segments to transfer
  */
-inline void
+static inline void
 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
     uint16_t tot_dsds)
 {
@@ -710,32 +704,27 @@
 	struct scsi_cmnd *cmd;
 	struct scatterlist *sg;
 	int i;
-	struct req_que *req;
 
 	cmd = GET_CMD_SP(sp);
 
 	/* Update entry type to indicate Command Type 3 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) =
-	    __constant_cpu_to_le32(COMMAND_TYPE_7);
+	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
 
 	/* No data transfer */
 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
-		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+		cmd_pkt->byte_count = cpu_to_le32(0);
 		return;
 	}
 
 	vha = sp->fcport->vha;
-	req = vha->req;
 
 	/* Set transfer direction */
 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
-		cmd_pkt->task_mgmt_flags =
-		    __constant_cpu_to_le16(TMF_WRITE_DATA);
+		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
 		vha->qla_stats.output_requests++;
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
-		cmd_pkt->task_mgmt_flags =
-		    __constant_cpu_to_le16(TMF_READ_DATA);
+		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
 		vha->qla_stats.input_requests++;
 	}
@@ -809,7 +798,7 @@
 	 * match LBA in CDB + N
 	 */
 	case SCSI_PROT_DIF_TYPE2:
-		pkt->app_tag = __constant_cpu_to_le16(0);
+		pkt->app_tag = cpu_to_le16(0);
 		pkt->app_tag_mask[0] = 0x0;
 		pkt->app_tag_mask[1] = 0x0;
 
@@ -840,7 +829,7 @@
 	case SCSI_PROT_DIF_TYPE1:
 		pkt->ref_tag = cpu_to_le32((uint32_t)
 		    (0xffffffff & scsi_get_lba(cmd)));
-		pkt->app_tag = __constant_cpu_to_le16(0);
+		pkt->app_tag = cpu_to_le16(0);
 		pkt->app_tag_mask[0] = 0x0;
 		pkt->app_tag_mask[1] = 0x0;
 
@@ -933,11 +922,9 @@
 	dma_addr_t	sle_dma;
 	uint32_t	sle_dma_len, tot_prot_dma_len = 0;
 	struct scsi_cmnd *cmd;
-	struct scsi_qla_host *vha;
 
 	memset(&sgx, 0, sizeof(struct qla2_sgx));
 	if (sp) {
-		vha = sp->fcport->vha;
 		cmd = GET_CMD_SP(sp);
 		prot_int = cmd->device->sector_size;
 
@@ -947,7 +934,6 @@
 
 		sg_prot = scsi_prot_sglist(cmd);
 	} else if (tc) {
-		vha = tc->vha;
 		prot_int      = tc->blk_sz;
 		sgx.tot_bytes = tc->bufflen;
 		sgx.cur_sg    = tc->sg;
@@ -1047,15 +1033,12 @@
 	int	i;
 	uint16_t	used_dsds = tot_dsds;
 	struct scsi_cmnd *cmd;
-	struct scsi_qla_host *vha;
 
 	if (sp) {
 		cmd = GET_CMD_SP(sp);
 		sgl = scsi_sglist(cmd);
-		vha = sp->fcport->vha;
 	} else if (tc) {
 		sgl = tc->sg;
-		vha = tc->vha;
 	} else {
 		BUG();
 		return 1;
@@ -1231,7 +1214,6 @@
 	uint32_t		*cur_dsd, *fcp_dl;
 	scsi_qla_host_t		*vha;
 	struct scsi_cmnd	*cmd;
-	int			sgc;
 	uint32_t		total_bytes = 0;
 	uint32_t		data_bytes;
 	uint32_t		dif_bytes;
@@ -1247,10 +1229,8 @@
 
 	cmd = GET_CMD_SP(sp);
 
-	sgc = 0;
 	/* Update entry type to indicate Command Type CRC_2 IOCB */
-	*((uint32_t *)(&cmd_pkt->entry_type)) =
-	    __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
+	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
 
 	vha = sp->fcport->vha;
 	ha = vha->hw;
@@ -1258,7 +1238,7 @@
 	/* No data transfer */
 	data_bytes = scsi_bufflen(cmd);
 	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
-		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+		cmd_pkt->byte_count = cpu_to_le32(0);
 		return QLA_SUCCESS;
 	}
 
@@ -1267,10 +1247,10 @@
 	/* Set transfer direction */
 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
 		cmd_pkt->control_flags =
-		    __constant_cpu_to_le16(CF_WRITE_DATA);
+		    cpu_to_le16(CF_WRITE_DATA);
 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		cmd_pkt->control_flags =
-		    __constant_cpu_to_le16(CF_READ_DATA);
+		    cpu_to_le16(CF_READ_DATA);
 	}
 
 	if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
@@ -1392,7 +1372,7 @@
 	crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
 	crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
 	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
-	crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
+	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
 	/* Fibre channel byte count */
 	cmd_pkt->byte_count = cpu_to_le32(total_bytes);
 	fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
@@ -1400,13 +1380,12 @@
 	*fcp_dl = htonl(total_bytes);
 
 	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
-		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+		cmd_pkt->byte_count = cpu_to_le32(0);
 		return QLA_SUCCESS;
 	}
 	/* Walks data segments */
 
-	cmd_pkt->control_flags |=
-	    __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
+	cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
 
 	if (!bundling && tot_prot_dsds) {
 		if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
@@ -1418,8 +1397,7 @@
 
 	if (bundling && tot_prot_dsds) {
 		/* Walks dif segments */
-		cmd_pkt->control_flags |=
-			__constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
+		cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
 		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
 				tot_prot_dsds, NULL))
@@ -1442,7 +1420,7 @@
 int
 qla24xx_start_scsi(srb_t *sp)
 {
-	int		ret, nseg;
+	int		nseg;
 	unsigned long   flags;
 	uint32_t	*clr_ptr;
 	uint32_t        index;
@@ -1458,8 +1436,6 @@
 	struct qla_hw_data *ha = vha->hw;
 
 	/* Setup device pointers. */
-	ret = 0;
-
 	qla25xx_set_que(sp, &rsp);
 	req = vha->req;
 
@@ -1753,7 +1729,7 @@
 	cmd_pkt->entry_count = (uint8_t)req_cnt;
 	/* Specify response queue number where completion should happen */
 	cmd_pkt->entry_status = (uint8_t) rsp->id;
-	cmd_pkt->timeout = __constant_cpu_to_le16(0);
+	cmd_pkt->timeout = cpu_to_le16(0);
 	wmb();
 
 	/* Adjust ring index. */
@@ -1819,7 +1795,7 @@
 {
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = ha->req_q_map[0];
-	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
 	uint32_t index, handle;
 	request_t *pkt;
 	uint16_t cnt, req_cnt;
@@ -2044,10 +2020,10 @@
         els_iocb->entry_status = 0;
         els_iocb->handle = sp->handle;
         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
-        els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+	els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
 	els_iocb->vp_index = sp->fcport->vha->vp_idx;
         els_iocb->sof_type = EST_SOFI3;
-        els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+	els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
 
 	els_iocb->opcode =
 	    sp->type == SRB_ELS_CMD_RPT ?
@@ -2091,7 +2067,6 @@
 	struct qla_hw_data *ha = vha->hw;
 	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
 	int loop_iterartion = 0;
-	int cont_iocb_prsnt = 0;
 	int entry_count = 1;
 
 	memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
@@ -2099,13 +2074,13 @@
 	ct_iocb->entry_status = 0;
 	ct_iocb->handle1 = sp->handle;
 	SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
-	ct_iocb->status = __constant_cpu_to_le16(0);
-	ct_iocb->control_flags = __constant_cpu_to_le16(0);
+	ct_iocb->status = cpu_to_le16(0);
+	ct_iocb->control_flags = cpu_to_le16(0);
 	ct_iocb->timeout = 0;
 	ct_iocb->cmd_dsd_count =
-	    __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+	    cpu_to_le16(bsg_job->request_payload.sg_cnt);
 	ct_iocb->total_dsd_count =
-	    __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
+	    cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
 	ct_iocb->req_bytecount =
 	    cpu_to_le32(bsg_job->request_payload.payload_len);
 	ct_iocb->rsp_bytecount =
@@ -2142,7 +2117,6 @@
 			    vha->hw->req_q_map[0]);
 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
 			avail_dsds = 5;
-			cont_iocb_prsnt = 1;
 			entry_count++;
 		}
 
@@ -2170,7 +2144,6 @@
 	struct qla_hw_data *ha = vha->hw;
 	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
 	int loop_iterartion = 0;
-	int cont_iocb_prsnt = 0;
 	int entry_count = 1;
 
 	ct_iocb->entry_type = CT_IOCB_TYPE;
@@ -2180,13 +2153,13 @@
 
 	ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
 	ct_iocb->vp_index = sp->fcport->vha->vp_idx;
-        ct_iocb->comp_status = __constant_cpu_to_le16(0);
+	ct_iocb->comp_status = cpu_to_le16(0);
 
 	ct_iocb->cmd_dsd_count =
-            __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
+		cpu_to_le16(bsg_job->request_payload.sg_cnt);
         ct_iocb->timeout = 0;
         ct_iocb->rsp_dsd_count =
-            __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
+		cpu_to_le16(bsg_job->reply_payload.sg_cnt);
         ct_iocb->rsp_byte_count =
             cpu_to_le32(bsg_job->reply_payload.payload_len);
         ct_iocb->cmd_byte_count =
@@ -2217,7 +2190,6 @@
 			    ha->req_q_map[0]);
 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
 			avail_dsds = 5;
-			cont_iocb_prsnt = 1;
 			entry_count++;
 		}
 
@@ -2240,7 +2212,7 @@
 int
 qla82xx_start_scsi(srb_t *sp)
 {
-	int		ret, nseg;
+	int		nseg;
 	unsigned long   flags;
 	struct scsi_cmnd *cmd;
 	uint32_t	*clr_ptr;
@@ -2260,7 +2232,6 @@
 	struct rsp_que *rsp = NULL;
 
 	/* Setup device pointers. */
-	ret = 0;
 	reg = &ha->iobase->isp82;
 	cmd = GET_CMD_SP(sp);
 	req = vha->req;
@@ -2539,16 +2510,12 @@
 	/* write, read and verify logic */
 	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
 	if (ql2xdbwr)
-		qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+		qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
 	else {
-		WRT_REG_DWORD(
-			(unsigned long __iomem *)ha->nxdb_wr_ptr,
-			dbval);
+		WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
 		wmb();
-		while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
-			WRT_REG_DWORD(
-				(unsigned long __iomem *)ha->nxdb_wr_ptr,
-				dbval);
+		while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+			WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
 			wmb();
 		}
 	}
@@ -2682,7 +2649,7 @@
 
 	/*Update entry type to indicate bidir command */
 	*((uint32_t *)(&cmd_pkt->entry_type)) =
-		__constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
+		cpu_to_le32(COMMAND_BIDIRECTIONAL);
 
 	/* Set the transfer direction, in this set both flags
 	 * Also set the BD_WRAP_BACK flag, firmware will take care
@@ -2690,8 +2657,7 @@
 	 */
 	cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
 	cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
-	cmd_pkt->control_flags =
-			__constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
+	cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
 							BD_WRAP_BACK);
 
 	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5559d5e..ccf6a7f 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -116,7 +116,7 @@
 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
 {
 	/* Check for PCI disconnection */
-	if (reg == 0xffffffff) {
+	if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
 		if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
 		    !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
 		    !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
@@ -560,6 +560,17 @@
 	return ret;
 }
 
+static inline fc_port_t *
+qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
+{
+	fc_port_t *fcport;
+
+	list_for_each_entry(fcport, &vha->vp_fcports, list)
+		if (fcport->loop_id == loop_id)
+			return fcport;
+	return NULL;
+}
+
 /**
  * qla2x00_async_event() - Process aynchronous events.
  * @ha: SCSI driver HA context
@@ -575,7 +586,7 @@
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
-	uint32_t	rscn_entry, host_pid, tmp_pid;
+	uint32_t	rscn_entry, host_pid;
 	unsigned long	flags;
 	fc_port_t	*fcport = NULL;
 
@@ -897,11 +908,29 @@
 			(mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
 			break;
 
-		/* Global event -- port logout or port unavailable. */
-		if (mb[1] == 0xffff && mb[2] == 0x7) {
+		if (mb[2] == 0x7) {
 			ql_dbg(ql_dbg_async, vha, 0x5010,
-			    "Port unavailable %04x %04x %04x.\n",
+			    "Port %s %04x %04x %04x.\n",
+			    mb[1] == 0xffff ? "unavailable" : "logout",
 			    mb[1], mb[2], mb[3]);
+
+			if (mb[1] == 0xffff)
+				goto global_port_update;
+
+			/* Port logout */
+			fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
+			if (!fcport)
+				break;
+			if (atomic_read(&fcport->state) != FCS_ONLINE)
+				break;
+			ql_dbg(ql_dbg_async, vha, 0x508a,
+			    "Marking port lost loopid=%04x portid=%06x.\n",
+			    fcport->loop_id, fcport->d_id.b24);
+			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+			break;
+
+global_port_update:
+			/* Port unavailable. */
 			ql_log(ql_log_warn, vha, 0x505e,
 			    "Link is offline.\n");
 
@@ -998,7 +1027,6 @@
 		list_for_each_entry(fcport, &vha->vp_fcports, list) {
 			if (atomic_read(&fcport->state) != FCS_ONLINE)
 				continue;
-			tmp_pid = fcport->d_id.b24;
 			if (fcport->d_id.b24 == rscn_entry) {
 				qla2x00_mark_device_lost(vha, fcport, 0, 0);
 				break;
@@ -1565,7 +1593,7 @@
 		    "Async-%s error - hdl=%x entry-status(%x).\n",
 		    type, sp->handle, sts->entry_status);
 		iocb->u.tmf.data = QLA_FUNCTION_FAILED;
-	} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+	} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
 		ql_log(ql_log_warn, fcport->vha, 0x5039,
 		    "Async-%s error - hdl=%x completion status(%x).\n",
 		    type, sp->handle, sts->comp_status);
@@ -2045,14 +2073,18 @@
 	}
 
 	/* Validate handle. */
-	if (handle < req->num_outstanding_cmds)
+	if (handle < req->num_outstanding_cmds) {
 		sp = req->outstanding_cmds[handle];
-	else
-		sp = NULL;
-
-	if (sp == NULL) {
+		if (!sp) {
+			ql_dbg(ql_dbg_io, vha, 0x3075,
+			    "%s(%ld): Already returned command for status handle (0x%x).\n",
+			    __func__, vha->host_no, sts->handle);
+			return;
+		}
+	} else {
 		ql_dbg(ql_dbg_io, vha, 0x3017,
-		    "Invalid status handle (0x%x).\n", sts->handle);
+		    "Invalid status handle, out of range (0x%x).\n",
+		    sts->handle);
 
 		if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
 			if (IS_P3P_TYPE(ha))
@@ -2339,12 +2371,12 @@
 		ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
 		    "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
 		    "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
-		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
+		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
 		    comp_status, scsi_status, res, vha->host_no,
 		    cp->device->id, cp->device->lun, fcport->d_id.b.domain,
 		    fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
 		    cp->cmnd, scsi_bufflen(cp), rsp_info_len,
-		    resid_len, fw_resid_len);
+		    resid_len, fw_resid_len, sp, cp);
 
 	if (rsp->status_srb == NULL)
 		sp->done(ha, sp, res);
@@ -2441,13 +2473,7 @@
 	}
 fatal:
 	ql_log(ql_log_warn, vha, 0x5030,
-	    "Error entry - invalid handle/queue.\n");
-
-	if (IS_P3P_TYPE(ha))
-		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
-	else
-		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
-	qla2xxx_wake_dpc(vha);
+	    "Error entry - invalid handle/queue (%04x).\n", que);
 }
 
 /**
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b2f713a..cb11e04 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -555,7 +555,9 @@
 	if (IS_FWI2_CAPABLE(ha))
 		mcp->in_mb |= MBX_17|MBX_16|MBX_15;
 	if (IS_QLA27XX(ha))
-		mcp->in_mb |= MBX_21|MBX_20|MBX_19|MBX_18;
+		mcp->in_mb |= MBX_23 | MBX_22 | MBX_21 | MBX_20 | MBX_19 |
+		    MBX_18 | MBX_14 | MBX_13 | MBX_11 | MBX_10 | MBX_9 | MBX_8;
+
 	mcp->flags = 0;
 	mcp->tov = MBX_TOV_SECONDS;
 	rval = qla2x00_mailbox_command(vha, mcp);
@@ -571,6 +573,7 @@
 		ha->fw_memory_size = 0x1FFFF;		/* Defaults to 128KB. */
 	else
 		ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
+
 	if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
 		ha->mpi_version[0] = mcp->mb[10] & 0xff;
 		ha->mpi_version[1] = mcp->mb[11] >> 8;
@@ -580,6 +583,7 @@
 		ha->phy_version[1] = mcp->mb[9] >> 8;
 		ha->phy_version[2] = mcp->mb[9] & 0xff;
 	}
+
 	if (IS_FWI2_CAPABLE(ha)) {
 		ha->fw_attributes_h = mcp->mb[15];
 		ha->fw_attributes_ext[0] = mcp->mb[16];
@@ -591,7 +595,14 @@
 		    "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
 		    __func__, mcp->mb[17], mcp->mb[16]);
 	}
+
 	if (IS_QLA27XX(ha)) {
+		ha->mpi_version[0] = mcp->mb[10] & 0xff;
+		ha->mpi_version[1] = mcp->mb[11] >> 8;
+		ha->mpi_version[2] = mcp->mb[11] & 0xff;
+		ha->pep_version[0] = mcp->mb[13] & 0xff;
+		ha->pep_version[1] = mcp->mb[14] >> 8;
+		ha->pep_version[2] = mcp->mb[14] & 0xff;
 		ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
 		ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
 	}
@@ -1135,20 +1146,22 @@
 			vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
 		}
 		/* If FA-WWN supported */
-		if (mcp->mb[7] & BIT_14) {
-			vha->port_name[0] = MSB(mcp->mb[16]);
-			vha->port_name[1] = LSB(mcp->mb[16]);
-			vha->port_name[2] = MSB(mcp->mb[17]);
-			vha->port_name[3] = LSB(mcp->mb[17]);
-			vha->port_name[4] = MSB(mcp->mb[18]);
-			vha->port_name[5] = LSB(mcp->mb[18]);
-			vha->port_name[6] = MSB(mcp->mb[19]);
-			vha->port_name[7] = LSB(mcp->mb[19]);
-			fc_host_port_name(vha->host) =
-			    wwn_to_u64(vha->port_name);
-			ql_dbg(ql_dbg_mbx, vha, 0x10ca,
-			    "FA-WWN acquired %016llx\n",
-			    wwn_to_u64(vha->port_name));
+		if (IS_FAWWN_CAPABLE(vha->hw)) {
+			if (mcp->mb[7] & BIT_14) {
+				vha->port_name[0] = MSB(mcp->mb[16]);
+				vha->port_name[1] = LSB(mcp->mb[16]);
+				vha->port_name[2] = MSB(mcp->mb[17]);
+				vha->port_name[3] = LSB(mcp->mb[17]);
+				vha->port_name[4] = MSB(mcp->mb[18]);
+				vha->port_name[5] = LSB(mcp->mb[18]);
+				vha->port_name[6] = MSB(mcp->mb[19]);
+				vha->port_name[7] = LSB(mcp->mb[19]);
+				fc_host_port_name(vha->host) =
+				    wwn_to_u64(vha->port_name);
+				ql_dbg(ql_dbg_mbx, vha, 0x10ca,
+				    "FA-WWN acquired %016llx\n",
+				    wwn_to_u64(vha->port_name));
+			}
 		}
 	}
 
@@ -1239,7 +1252,7 @@
 	    "Entered %s.\n", __func__);
 
 	if (IS_P3P_TYPE(ha) && ql2xdbwr)
-		qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
+		qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
 			(0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
 
 	if (ha->flags.npiv_supported)
@@ -1865,7 +1878,6 @@
 	uint32_t	iop[2];
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req;
-	struct rsp_que *rsp;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
 	    "Entered %s.\n", __func__);
@@ -1874,7 +1886,6 @@
 		req = ha->req_q_map[0];
 	else
 		req = vha->req;
-	rsp = req->rsp;
 
 	lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
 	if (lg == NULL) {
@@ -1888,11 +1899,11 @@
 	lg->entry_count = 1;
 	lg->handle = MAKE_HANDLE(req->id, lg->handle);
 	lg->nport_handle = cpu_to_le16(loop_id);
-	lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
+	lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
 	if (opt & BIT_0)
-		lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI);
+		lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
 	if (opt & BIT_1)
-		lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI);
+		lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
 	lg->port_id[0] = al_pa;
 	lg->port_id[1] = area;
 	lg->port_id[2] = domain;
@@ -1907,7 +1918,7 @@
 		    "Failed to complete IOCB -- error status (%x).\n",
 		    lg->entry_status);
 		rval = QLA_FUNCTION_FAILED;
-	} else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+	} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
 		iop[0] = le32_to_cpu(lg->io_parameter[0]);
 		iop[1] = le32_to_cpu(lg->io_parameter[1]);
 
@@ -1961,7 +1972,7 @@
 			mb[10] |= BIT_0;	/* Class 2. */
 		if (lg->io_parameter[9] || lg->io_parameter[10])
 			mb[10] |= BIT_1;	/* Class 3. */
-		if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
+		if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
 			mb[10] |= BIT_7;	/* Confirmed Completion
 						 * Allowed
 						 */
@@ -2142,7 +2153,6 @@
 	dma_addr_t	lg_dma;
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req;
-	struct rsp_que *rsp;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
 	    "Entered %s.\n", __func__);
@@ -2159,13 +2169,12 @@
 		req = ha->req_q_map[0];
 	else
 		req = vha->req;
-	rsp = req->rsp;
 	lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
 	lg->entry_count = 1;
 	lg->handle = MAKE_HANDLE(req->id, lg->handle);
 	lg->nport_handle = cpu_to_le16(loop_id);
 	lg->control_flags =
-	    __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
+	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
 		LCF_FREE_NPORT);
 	lg->port_id[0] = al_pa;
 	lg->port_id[1] = area;
@@ -2181,7 +2190,7 @@
 		    "Failed to complete IOCB -- error status (%x).\n",
 		    lg->entry_status);
 		rval = QLA_FUNCTION_FAILED;
-	} else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+	} else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
 		ql_dbg(ql_dbg_mbx, vha, 0x1071,
 		    "Failed to complete IOCB -- completion status (%x) "
 		    "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
@@ -2673,7 +2682,7 @@
 		    "Failed to complete IOCB -- error status (%x).\n",
 		    abt->entry_status);
 		rval = QLA_FUNCTION_FAILED;
-	} else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
+	} else if (abt->nport_handle != cpu_to_le16(0)) {
 		ql_dbg(ql_dbg_mbx, vha, 0x1090,
 		    "Failed to complete IOCB -- completion status (%x).\n",
 		    le16_to_cpu(abt->nport_handle));
@@ -2756,8 +2765,7 @@
 		    "Failed to complete IOCB -- error status (%x).\n",
 		    sts->entry_status);
 		rval = QLA_FUNCTION_FAILED;
-	} else if (sts->comp_status !=
-	    __constant_cpu_to_le16(CS_COMPLETE)) {
+	} else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
 		ql_dbg(ql_dbg_mbx, vha, 0x1096,
 		    "Failed to complete IOCB -- completion status (%x).\n",
 		    le16_to_cpu(sts->comp_status));
@@ -2853,7 +2861,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
+	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
+	    !IS_QLA27XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
@@ -2891,7 +2900,8 @@
 	mbx_cmd_t mc;
 	mbx_cmd_t *mcp = &mc;
 
-	if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw))
+	if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
+	    !IS_QLA27XX(vha->hw))
 		return QLA_FUNCTION_FAILED;
 
 	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
@@ -3483,7 +3493,7 @@
 		    "Failed to complete IOCB -- error status (%x).\n",
 		    vpmod->comp_status);
 		rval = QLA_FUNCTION_FAILED;
-	} else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+	} else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
 		ql_dbg(ql_dbg_mbx, vha, 0x10bf,
 		    "Failed to complete IOCB -- completion status (%x).\n",
 		    le16_to_cpu(vpmod->comp_status));
@@ -3542,7 +3552,7 @@
 	vce->entry_type = VP_CTRL_IOCB_TYPE;
 	vce->entry_count = 1;
 	vce->command = cpu_to_le16(cmd);
-	vce->vp_count = __constant_cpu_to_le16(1);
+	vce->vp_count = cpu_to_le16(1);
 
 	/* index map in firmware starts with 1; decrement index
 	 * this is ok as we never use index 0
@@ -3562,7 +3572,7 @@
 		    "Failed to complete IOCB -- error status (%x).\n",
 		    vce->entry_status);
 		rval = QLA_FUNCTION_FAILED;
-	} else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
+	} else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
 		ql_dbg(ql_dbg_mbx, vha, 0x10c5,
 		    "Failed to complet IOCB -- completion status (%x).\n",
 		    le16_to_cpu(vce->comp_status));
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cc94192..c5dd594 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -371,7 +371,6 @@
 void
 qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
 {
-	int ret;
 	struct qla_hw_data *ha = vha->hw;
 	scsi_qla_host_t *vp;
 	unsigned long flags = 0;
@@ -392,7 +391,7 @@
 			atomic_inc(&vp->vref_count);
 			spin_unlock_irqrestore(&ha->vport_slock, flags);
 
-			ret = qla2x00_do_dpc_vp(vp);
+			qla2x00_do_dpc_vp(vp);
 
 			spin_lock_irqsave(&ha->vport_slock, flags);
 			atomic_dec(&vp->vref_count);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 6d190b4..b5029e5 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -862,7 +862,7 @@
 	dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
 
 	req->length = ha->req_que_len;
-	req->ring = (void *)ha->iobase + ha->req_que_off;
+	req->ring = (void __force *)ha->iobase + ha->req_que_off;
 	req->dma = bar2_hdl + ha->req_que_off;
 	if ((!req->ring) || (req->length == 0)) {
 		ql_log_pci(ql_log_info, ha->pdev, 0x012f,
@@ -877,7 +877,7 @@
 	    ha->req_que_off, (u64)req->dma);
 
 	rsp->length = ha->rsp_que_len;
-	rsp->ring = (void *)ha->iobase + ha->rsp_que_off;
+	rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off;
 	rsp->dma = bar2_hdl + ha->rsp_que_off;
 	if ((!rsp->ring) || (rsp->length == 0)) {
 		ql_log_pci(ql_log_info, ha->pdev, 0x0131,
@@ -1317,10 +1317,10 @@
 qlafx00_configure_devices(scsi_qla_host_t *vha)
 {
 	int  rval;
-	unsigned long flags, save_flags;
+	unsigned long flags;
 	rval = QLA_SUCCESS;
 
-	save_flags = flags = vha->dpc_flags;
+	flags = vha->dpc_flags;
 
 	ql_dbg(ql_dbg_disc, vha, 0x2090,
 	    "Configure devices -- dpc flags =0x%lx\n", flags);
@@ -1425,7 +1425,7 @@
 	pkt = rsp->ring_ptr;
 	for (cnt = 0; cnt < rsp->length; cnt++) {
 		pkt->signature = RESPONSE_PROCESSED;
-		WRT_REG_DWORD((void __iomem *)&pkt->signature,
+		WRT_REG_DWORD((void __force __iomem *)&pkt->signature,
 		    RESPONSE_PROCESSED);
 		pkt++;
 	}
@@ -2279,7 +2279,6 @@
 	struct sts_entry_fx00 *sts;
 	__le16		comp_status;
 	__le16		scsi_status;
-	uint16_t	ox_id;
 	__le16		lscsi_status;
 	int32_t		resid;
 	uint32_t	sense_len, par_sense_len, rsp_info_len, resid_len,
@@ -2344,7 +2343,6 @@
 
 	fcport = sp->fcport;
 
-	ox_id = 0;
 	sense_len = par_sense_len = rsp_info_len = resid_len =
 		fw_resid_len = 0;
 	if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))
@@ -2528,12 +2526,12 @@
 		ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
 		    "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
 		    "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
-		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, "
+		    "rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, "
 		    "par_sense_len=0x%x, rsp_info_len=0x%x\n",
 		    comp_status, scsi_status, res, vha->host_no,
 		    cp->device->id, cp->device->lun, fcport->tgt_id,
 		    lscsi_status, cp->cmnd, scsi_bufflen(cp),
-		    rsp_info_len, resid_len, fw_resid_len, sense_len,
+		    rsp_info, resid_len, fw_resid_len, sense_len,
 		    par_sense_len, rsp_info_len);
 
 	if (rsp->status_srb == NULL)
@@ -3009,7 +3007,7 @@
 
 	/* No data transfer */
 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
-		lcmd_pkt->byte_count = __constant_cpu_to_le32(0);
+		lcmd_pkt->byte_count = cpu_to_le32(0);
 		return;
 	}
 
@@ -3071,7 +3069,7 @@
 int
 qlafx00_start_scsi(srb_t *sp)
 {
-	int		ret, nseg;
+	int		nseg;
 	unsigned long   flags;
 	uint32_t        index;
 	uint32_t	handle;
@@ -3088,8 +3086,6 @@
 	struct scsi_lun llun;
 
 	/* Setup device pointers. */
-	ret = 0;
-
 	rsp = ha->rsp_q_map[0];
 	req = vha->req;
 
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 1620b0e..eb0cc54 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -347,32 +347,31 @@
 }
 
 /*
- * In: 'off' is offset from CRB space in 128M pci map
- * Out: 'off' is 2M pci map addr
+ * In: 'off_in' is offset from CRB space in 128M pci map
+ * Out: 'off_out' is 2M pci map addr
  * side effect: lock crb window
  */
 static void
-qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
+qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
+			     void __iomem **off_out)
 {
 	u32 win_read;
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
-	ha->crb_win = CRB_HI(*off);
-	writel(ha->crb_win,
-		(void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+	ha->crb_win = CRB_HI(off_in);
+	writel(ha->crb_win, CRB_WINDOW_2M + ha->nx_pcibase);
 
 	/* Read back value to make sure write has gone through before trying
 	 * to use it.
 	 */
-	win_read = RD_REG_DWORD((void __iomem *)
-	    (CRB_WINDOW_2M + ha->nx_pcibase));
+	win_read = RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
 	if (win_read != ha->crb_win) {
 		ql_dbg(ql_dbg_p3p, vha, 0xb000,
 		    "%s: Written crbwin (0x%x) "
 		    "!= Read crbwin (0x%x), off=0x%lx.\n",
-		    __func__, ha->crb_win, win_read, *off);
+		    __func__, ha->crb_win, win_read, off_in);
 	}
-	*off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
+	*off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
 }
 
 static inline unsigned long
@@ -417,29 +416,30 @@
 }
 
 static int
-qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off)
+qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
+			    void __iomem **off_out)
 {
 	struct crb_128M_2M_sub_block_map *m;
 
-	if (*off >= QLA82XX_CRB_MAX)
+	if (off_in >= QLA82XX_CRB_MAX)
 		return -1;
 
-	if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
-		*off = (*off - QLA82XX_PCI_CAMQM) +
+	if (off_in >= QLA82XX_PCI_CAMQM && off_in < QLA82XX_PCI_CAMQM_2M_END) {
+		*off_out = (off_in - QLA82XX_PCI_CAMQM) +
 		    QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
 		return 0;
 	}
 
-	if (*off < QLA82XX_PCI_CRBSPACE)
+	if (off_in < QLA82XX_PCI_CRBSPACE)
 		return -1;
 
-	*off -= QLA82XX_PCI_CRBSPACE;
+	*off_out = (void __iomem *)(off_in - QLA82XX_PCI_CRBSPACE);
 
 	/* Try direct map */
-	m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
+	m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)];
 
-	if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
-		*off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
+	if (m->valid && (m->start_128M <= off_in) && (m->end_128M > off_in)) {
+		*off_out = off_in + m->start_2M - m->start_128M + ha->nx_pcibase;
 		return 0;
 	}
 	/* Not in direct map, use crb window */
@@ -465,51 +465,61 @@
 }
 
 int
-qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data)
+qla82xx_wr_32(struct qla_hw_data *ha, ulong off_in, u32 data)
 {
+	void __iomem *off;
 	unsigned long flags = 0;
 	int rv;
 
-	rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
+	rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off);
 
 	BUG_ON(rv == -1);
 
 	if (rv == 1) {
+#ifndef __CHECKER__
 		write_lock_irqsave(&ha->hw_lock, flags);
+#endif
 		qla82xx_crb_win_lock(ha);
-		qla82xx_pci_set_crbwindow_2M(ha, &off);
+		qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
 	}
 
 	writel(data, (void __iomem *)off);
 
 	if (rv == 1) {
 		qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+#ifndef __CHECKER__
 		write_unlock_irqrestore(&ha->hw_lock, flags);
+#endif
 	}
 	return 0;
 }
 
 int
-qla82xx_rd_32(struct qla_hw_data *ha, ulong off)
+qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in)
 {
+	void __iomem *off;
 	unsigned long flags = 0;
 	int rv;
 	u32 data;
 
-	rv = qla82xx_pci_get_crb_addr_2M(ha, &off);
+	rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off);
 
 	BUG_ON(rv == -1);
 
 	if (rv == 1) {
+#ifndef __CHECKER__
 		write_lock_irqsave(&ha->hw_lock, flags);
+#endif
 		qla82xx_crb_win_lock(ha);
-		qla82xx_pci_set_crbwindow_2M(ha, &off);
+		qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
 	}
-	data = RD_REG_DWORD((void __iomem *)off);
+	data = RD_REG_DWORD(off);
 
 	if (rv == 1) {
 		qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
+#ifndef __CHECKER__
 		write_unlock_irqrestore(&ha->hw_lock, flags);
+#endif
 	}
 	return data;
 }
@@ -547,9 +557,6 @@
 	qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
 }
 
-/*  PCI Windowing for DDR regions.  */
-#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
-	(((addr) <= (high)) && ((addr) >= (low)))
 /*
  * check memory access boundary.
  * used by test agent. support ddr access only for now
@@ -558,9 +565,9 @@
 qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
 	unsigned long long addr, int size)
 {
-	if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+	if (!addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
 		QLA82XX_ADDR_DDR_NET_MAX) ||
-		!QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET,
+		!addr_in_range(addr + size - 1, QLA82XX_ADDR_DDR_NET,
 		QLA82XX_ADDR_DDR_NET_MAX) ||
 		((size != 1) && (size != 2) && (size != 4) && (size != 8)))
 			return 0;
@@ -577,7 +584,7 @@
 	u32 win_read;
 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
-	if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+	if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
 		QLA82XX_ADDR_DDR_NET_MAX)) {
 		/* DDR network side */
 		window = MN_WIN(addr);
@@ -592,7 +599,7 @@
 			    __func__, window, win_read);
 		}
 		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
-	} else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
+	} else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
 		QLA82XX_ADDR_OCM0_MAX)) {
 		unsigned int temp1;
 		if ((addr & 0x00ff800) == 0xff800) {
@@ -615,7 +622,7 @@
 		}
 		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
 
-	} else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET,
+	} else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET,
 		QLA82XX_P3_ADDR_QDR_NET_MAX)) {
 		/* QDR network side */
 		window = MS_WIN(addr);
@@ -656,16 +663,16 @@
 	qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
 
 	/* DDR network side */
-	if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET,
+	if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
 		QLA82XX_ADDR_DDR_NET_MAX))
 		BUG();
-	else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0,
+	else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
 		QLA82XX_ADDR_OCM0_MAX))
 		return 1;
-	else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1,
+	else if (addr_in_range(addr, QLA82XX_ADDR_OCM1,
 		QLA82XX_ADDR_OCM1_MAX))
 		return 1;
-	else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
+	else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
 		/* QDR network side */
 		window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
 		if (ha->qdr_sn_window == window)
@@ -922,20 +929,18 @@
 {
 	uint32_t  off_value, rval = 0;
 
-	WRT_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase),
-	    (off & 0xFFFF0000));
+	WRT_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
 
 	/* Read back value to make sure write has gone through */
-	RD_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+	RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
 	off_value  = (off & 0x0000FFFF);
 
 	if (flag)
-		WRT_REG_DWORD((void __iomem *)
-		    (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
-		    data);
+		WRT_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
+			      data);
 	else
-		rval = RD_REG_DWORD((void __iomem *)
-		    (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
+		rval = RD_REG_DWORD(off_value + CRB_INDIRECT_2M +
+				    ha->nx_pcibase);
 
 	return rval;
 }
@@ -1663,8 +1668,7 @@
 	}
 
 	len = pci_resource_len(ha->pdev, 0);
-	ha->nx_pcibase =
-	    (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
+	ha->nx_pcibase = ioremap(pci_resource_start(ha->pdev, 0), len);
 	if (!ha->nx_pcibase) {
 		ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
 		    "Cannot remap pcibase MMIO, aborting.\n");
@@ -1673,17 +1677,13 @@
 
 	/* Mapping of IO base pointer */
 	if (IS_QLA8044(ha)) {
-		ha->iobase =
-		    (device_reg_t *)((uint8_t *)ha->nx_pcibase);
+		ha->iobase = ha->nx_pcibase;
 	} else if (IS_QLA82XX(ha)) {
-		ha->iobase =
-		    (device_reg_t *)((uint8_t *)ha->nx_pcibase +
-			0xbc000 + (ha->pdev->devfn << 11));
+		ha->iobase = ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11);
 	}
 
 	if (!ql2xdbwr) {
-		ha->nxdb_wr_ptr =
-		    (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
+		ha->nxdb_wr_ptr = ioremap((pci_resource_start(ha->pdev, 4) +
 		    (ha->pdev->devfn << 12)), 4);
 		if (!ha->nxdb_wr_ptr) {
 			ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
@@ -1694,10 +1694,10 @@
 		/* Mapping of IO base pointer,
 		 * door bell read and write pointer
 		 */
-		ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
+		ha->nxdb_rd_ptr = ha->nx_pcibase + (512 * 1024) +
 		    (ha->pdev->devfn * 8);
 	} else {
-		ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ?
+		ha->nxdb_wr_ptr = (void __iomem *)(ha->pdev->devfn == 6 ?
 			QLA82XX_CAMRAM_DB1 :
 			QLA82XX_CAMRAM_DB2);
 	}
@@ -1707,12 +1707,12 @@
 	ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
 	    "nx_pci_base=%p iobase=%p "
 	    "max_req_queues=%d msix_count=%d.\n",
-	    (void *)ha->nx_pcibase, ha->iobase,
+	    ha->nx_pcibase, ha->iobase,
 	    ha->max_req_queues, ha->msix_count);
 	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
 	    "nx_pci_base=%p iobase=%p "
 	    "max_req_queues=%d msix_count=%d.\n",
-	    (void *)ha->nx_pcibase, ha->iobase,
+	    ha->nx_pcibase, ha->iobase,
 	    ha->max_req_queues, ha->msix_count);
 	return 0;
 
@@ -1740,8 +1740,8 @@
 	ret = pci_set_mwi(ha->pdev);
 	ha->chip_revision = ha->pdev->revision;
 	ql_dbg(ql_dbg_init, vha, 0x0043,
-	    "Chip revision:%d.\n",
-	    ha->chip_revision);
+	    "Chip revision:%d; pci_set_mwi() returned %d.\n",
+	    ha->chip_revision, ret);
 	return 0;
 }
 
@@ -1768,8 +1768,8 @@
 
 	/* Setup ring parameters in initialization control block. */
 	icb = (struct init_cb_81xx *)ha->init_cb;
-	icb->request_q_outpointer = __constant_cpu_to_le16(0);
-	icb->response_q_inpointer = __constant_cpu_to_le16(0);
+	icb->request_q_outpointer = cpu_to_le16(0);
+	icb->response_q_inpointer = cpu_to_le16(0);
 	icb->request_q_length = cpu_to_le16(req->length);
 	icb->response_q_length = cpu_to_le16(rsp->length);
 	icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
@@ -1777,9 +1777,9 @@
 	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
 	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
 
-	WRT_REG_DWORD((unsigned long  __iomem *)&reg->req_q_out[0], 0);
-	WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_in[0], 0);
-	WRT_REG_DWORD((unsigned long  __iomem *)&reg->rsp_q_out[0], 0);
+	WRT_REG_DWORD(&reg->req_q_out[0], 0);
+	WRT_REG_DWORD(&reg->rsp_q_in[0], 0);
+	WRT_REG_DWORD(&reg->rsp_q_out[0], 0);
 }
 
 static int
@@ -2298,7 +2298,7 @@
 	ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
 }
 
-inline void
+static inline void
 qla82xx_set_idc_version(scsi_qla_host_t *vha)
 {
 	int idc_ver;
@@ -2481,14 +2481,12 @@
 		ql_log(ql_log_info, vha, 0x00a5,
 		    "Firmware loaded successfully from binary blob.\n");
 		return QLA_SUCCESS;
-	} else {
-		ql_log(ql_log_fatal, vha, 0x00a6,
-		    "Firmware load failed for binary blob.\n");
-		blob->fw = NULL;
-		blob = NULL;
-		goto fw_load_failed;
 	}
-	return QLA_SUCCESS;
+
+	ql_log(ql_log_fatal, vha, 0x00a6,
+	       "Firmware load failed for binary blob.\n");
+	blob->fw = NULL;
+	blob = NULL;
 
 fw_load_failed:
 	return QLA_FUNCTION_FAILED;
@@ -2549,7 +2547,7 @@
 			    "Do ROM fast read failed.\n");
 			goto done_read;
 		}
-		dwptr[i] = __constant_cpu_to_le32(val);
+		dwptr[i] = cpu_to_le32(val);
 	}
 done_read:
 	return dwptr;
@@ -2671,7 +2669,7 @@
 {
 	int ret;
 	uint32_t liter;
-	uint32_t sec_mask, rest_addr;
+	uint32_t rest_addr;
 	dma_addr_t optrom_dma;
 	void *optrom = NULL;
 	int page_mode = 0;
@@ -2693,7 +2691,6 @@
 	}
 
 	rest_addr = ha->fdt_block_size - 1;
-	sec_mask = ~rest_addr;
 
 	ret = qla82xx_unprotect_flash(ha);
 	if (ret) {
@@ -2789,7 +2786,6 @@
 {
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req = ha->req_q_map[0];
-	struct device_reg_82xx __iomem *reg;
 	uint32_t dbval;
 
 	/* Adjust ring index. */
@@ -2800,18 +2796,16 @@
 	} else
 		req->ring_ptr++;
 
-	reg = &ha->iobase->isp82;
 	dbval = 0x04 | (ha->portnum << 5);
 
 	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
 	if (ql2xdbwr)
-		qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+		qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval);
 	else {
-		WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
+		WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
 		wmb();
-		while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
-			WRT_REG_DWORD((unsigned long  __iomem *)ha->nxdb_wr_ptr,
-				dbval);
+		while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+			WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
 			wmb();
 		}
 	}
@@ -3842,8 +3836,7 @@
 	loop_cnt = ocm_hdr->op_count;
 
 	for (i = 0; i < loop_cnt; i++) {
-		r_value = RD_REG_DWORD((void __iomem *)
-		    (r_addr + ha->nx_pcibase));
+		r_value = RD_REG_DWORD(r_addr + ha->nx_pcibase);
 		*data_ptr++ = cpu_to_le32(r_value);
 		r_addr += r_stride;
 	}
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 000c57e..007192d 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -462,12 +462,11 @@
 static void
 qla8044_flash_unlock(scsi_qla_host_t *vha)
 {
-	int ret_val;
 	struct qla_hw_data *ha = vha->hw;
 
 	/* Reading FLASH_UNLOCK register unlocks the Flash */
 	qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
-	ret_val = qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
+	qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
 }
 
 
@@ -561,7 +560,7 @@
 	return buf;
 }
 
-inline int
+static inline int
 qla8044_need_reset(struct scsi_qla_host *vha)
 {
 	uint32_t drv_state, drv_active;
@@ -1130,9 +1129,9 @@
 	}
 
 	for (i = 0; i < count; i++, addr += 16) {
-		if (!((QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_QDR_NET,
+		if (!((addr_in_range(addr, QLA8044_ADDR_QDR_NET,
 		    QLA8044_ADDR_QDR_NET_MAX)) ||
-		    (QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_DDR_NET,
+		    (addr_in_range(addr, QLA8044_ADDR_DDR_NET,
 			QLA8044_ADDR_DDR_NET_MAX)))) {
 			ret_val = QLA_FUNCTION_FAILED;
 			goto exit_ms_mem_write_unlock;
@@ -1605,7 +1604,7 @@
 	qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
 }
 
-inline void
+static inline void
 qla8044_set_rst_ready(struct scsi_qla_host *vha)
 {
 	uint32_t drv_state;
@@ -2992,7 +2991,7 @@
 	uint32_t addr1, addr2, value, data, temp, wrVal;
 	uint8_t stride, stride2;
 	uint16_t count;
-	uint32_t poll, mask, data_size, modify_mask;
+	uint32_t poll, mask, modify_mask;
 	uint32_t wait_count = 0;
 
 	uint32_t *data_ptr = *d_ptr;
@@ -3009,7 +3008,6 @@
 	poll = rddfe->poll;
 	mask = rddfe->mask;
 	modify_mask = rddfe->modify_mask;
-	data_size = rddfe->data_size;
 
 	addr2 = addr1 + stride;
 
@@ -3091,7 +3089,7 @@
 	uint8_t stride1, stride2;
 	uint32_t addr3, addr4, addr5, addr6, addr7;
 	uint16_t count, loop_cnt;
-	uint32_t poll, mask;
+	uint32_t mask;
 	uint32_t *data_ptr = *d_ptr;
 
 	struct qla8044_minidump_entry_rdmdio *rdmdio;
@@ -3105,7 +3103,6 @@
 	stride2 = rdmdio->stride_2;
 	count = rdmdio->count;
 
-	poll = rdmdio->poll;
 	mask = rdmdio->mask;
 	value2 = rdmdio->value_2;
 
@@ -3164,7 +3161,7 @@
 static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
 		struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
 {
-	uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
+	uint32_t addr1, addr2, value1, value2, poll, r_value;
 	uint32_t wait_count = 0;
 	struct qla8044_minidump_entry_pollwr *pollwr_hdr;
 
@@ -3175,7 +3172,6 @@
 	value2 = pollwr_hdr->value_2;
 
 	poll = pollwr_hdr->poll;
-	mask = pollwr_hdr->mask;
 
 	while (wait_count < poll) {
 		qla8044_rd_reg_indirect(vha, addr1, &r_value);
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index ada3605..02fe3c4 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -58,8 +58,10 @@
 #define QLA8044_PCI_QDR_NET_MAX		((unsigned long)0x043fffff)
 
 /*  PCI Windowing for DDR regions.  */
-#define QLA8044_ADDR_IN_RANGE(addr, low, high)		\
-	(((addr) <= (high)) && ((addr) >= (low)))
+static inline bool addr_in_range(u64 addr, u64 low, u64 high)
+{
+	return addr <= high && addr >= low;
+}
 
 /* Indirectly Mapped Registers */
 #define QLA8044_FLASH_SPI_STATUS	0x2808E010
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8a5cac8..c2dd17b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -656,7 +656,7 @@
 		    "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
 		    sp, GET_CMD_SP(sp));
 		if (ql2xextended_error_logging & ql_dbg_io)
-			BUG();
+			WARN_ON(atomic_read(&sp->ref_count) == 0);
 		return;
 	}
 	if (!atomic_dec_and_test(&sp->ref_count))
@@ -958,8 +958,8 @@
 	}
 
 	ql_dbg(ql_dbg_taskm, vha, 0x8002,
-	    "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p\n",
-	    vha->host_no, id, lun, sp, cmd);
+	    "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
+	    vha->host_no, id, lun, sp, cmd, sp->handle);
 
 	/* Get a reference to the sp and drop the lock.*/
 	sp_get(sp);
@@ -967,14 +967,9 @@
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	rval = ha->isp_ops->abort_command(sp);
 	if (rval) {
-		if (rval == QLA_FUNCTION_PARAMETER_ERROR) {
-			/*
-			 * Decrement the ref_count since we can't find the
-			 * command
-			 */
-			atomic_dec(&sp->ref_count);
+		if (rval == QLA_FUNCTION_PARAMETER_ERROR)
 			ret = SUCCESS;
-		} else
+		else
 			ret = FAILED;
 
 		ql_dbg(ql_dbg_taskm, vha, 0x8003,
@@ -986,12 +981,6 @@
 	}
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
-	/*
-	 * Clear the slot in the oustanding_cmds array if we can't find the
-	 * command to reclaim the resources.
-	 */
-	if (rval == QLA_FUNCTION_PARAMETER_ERROR)
-		vha->req->outstanding_cmds[sp->handle] = NULL;
 	sp->done(ha, sp, 0);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -2219,6 +2208,13 @@
 		ha->device_type |= DT_IIDMA;
 		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
 		break;
+	case PCI_DEVICE_ID_QLOGIC_ISP2261:
+		ha->device_type |= DT_ISP2261;
+		ha->device_type |= DT_ZIO_SUPPORTED;
+		ha->device_type |= DT_FWI2;
+		ha->device_type |= DT_IIDMA;
+		ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+		break;
 	}
 
 	if (IS_QLA82XX(ha))
@@ -2296,7 +2292,8 @@
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
 	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
-	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) {
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
+	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) {
 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
 		mem_only = 1;
 		ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2974,7 +2971,6 @@
 static void
 qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
 {
-	struct Scsi_Host *scsi_host;
 	scsi_qla_host_t *vha;
 	unsigned long flags;
 
@@ -2985,7 +2981,7 @@
 		BUG_ON(base_vha->list.next == &ha->vp_list);
 		/* This assumes first entry in ha->vp_list is always base vha */
 		vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
-		scsi_host = scsi_host_get(vha->host);
+		scsi_host_get(vha->host);
 
 		spin_unlock_irqrestore(&ha->vport_slock, flags);
 		mutex_unlock(&ha->vport_lock);
@@ -3275,9 +3271,10 @@
 	if (!do_login)
 		return;
 
+	set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+
 	if (fcport->login_retry == 0) {
 		fcport->login_retry = vha->hw->login_retry_count;
-		set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
 
 		ql_dbg(ql_dbg_disc, vha, 0x2067,
 		    "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n",
@@ -4801,7 +4798,6 @@
 static int
 qla2x00_do_dpc(void *data)
 {
-	int		rval;
 	scsi_qla_host_t *base_vha;
 	struct qla_hw_data *ha;
 
@@ -5033,7 +5029,7 @@
 			if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
 			    &base_vha->dpc_flags))) {
 
-				rval = qla2x00_loop_resync(base_vha);
+				qla2x00_loop_resync(base_vha);
 
 				clear_bit(LOOP_RESYNC_ACTIVE,
 						&base_vha->dpc_flags);
@@ -5717,6 +5713,7 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
 	{ 0 },
 };
 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 2feb5f3..3272ed5b 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -316,7 +316,7 @@
 
 	wprot_old = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
 	stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base,
-	    __constant_cpu_to_le16(0x1234), 100000);
+					    cpu_to_le16(0x1234), 100000);
 	wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
 	if (stat != QLA_SUCCESS || wprot != 0x1234) {
 		/* Write enable. */
@@ -691,9 +691,9 @@
 	region = (struct qla_flt_region *)&flt[1];
 	ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
 	    flt_addr << 2, OPTROM_BURST_SIZE);
-	if (*wptr == __constant_cpu_to_le16(0xffff))
+	if (*wptr == cpu_to_le16(0xffff))
 		goto no_flash_data;
-	if (flt->version != __constant_cpu_to_le16(1)) {
+	if (flt->version != cpu_to_le16(1)) {
 		ql_log(ql_log_warn, vha, 0x0047,
 		    "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
 		    le16_to_cpu(flt->version), le16_to_cpu(flt->length),
@@ -892,7 +892,7 @@
 	fdt = (struct qla_fdt_layout *)req->ring;
 	ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
 	    ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
-	if (*wptr == __constant_cpu_to_le16(0xffff))
+	if (*wptr == cpu_to_le16(0xffff))
 		goto no_flash_data;
 	if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
 	    fdt->sig[3] != 'D')
@@ -991,7 +991,7 @@
 	ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
 		QLA82XX_IDC_PARAM_ADDR , 8);
 
-	if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
+	if (*wptr == cpu_to_le32(0xffffffff)) {
 		ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
 		ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
 	} else {
@@ -1051,9 +1051,9 @@
 
 	ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
 	    ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
-	if (hdr.version == __constant_cpu_to_le16(0xffff))
+	if (hdr.version == cpu_to_le16(0xffff))
 		return;
-	if (hdr.version != __constant_cpu_to_le16(1)) {
+	if (hdr.version != cpu_to_le16(1)) {
 		ql_dbg(ql_dbg_user, vha, 0x7090,
 		    "Unsupported NPIV-Config "
 		    "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 58651ec..75514a1 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1141,7 +1141,7 @@
 	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
 	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
 		nack->u.isp24.flags = ntfy->u.isp24.flags &
-			__constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+			cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
 	}
 	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
 	nack->u.isp24.status = ntfy->u.isp24.status;
@@ -1199,7 +1199,7 @@
 	resp->sof_type = abts->sof_type;
 	resp->exchange_address = abts->exchange_address;
 	resp->fcp_hdr_le = abts->fcp_hdr_le;
-	f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+	f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
 	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
 	    F_CTL_SEQ_INITIATIVE);
 	p = (uint8_t *)&f_ctl;
@@ -1274,15 +1274,14 @@
 	ctio->entry_count = 1;
 	ctio->nport_handle = entry->nport_handle;
 	ctio->handle = QLA_TGT_SKIP_HANDLE |	CTIO_COMPLETION_HANDLE_MARK;
-	ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
 	ctio->vp_index = vha->vp_idx;
 	ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
 	ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
 	ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
 	ctio->exchange_addr = entry->exchange_addr_to_abort;
-	ctio->u.status1.flags =
-	    __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
-		CTIO7_FLAGS_TERMINATE);
+	ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+					    CTIO7_FLAGS_TERMINATE);
 	ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
 
 	/* Memory Barrier */
@@ -1522,20 +1521,19 @@
 	ctio->entry_count = 1;
 	ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
 	ctio->nport_handle = mcmd->sess->loop_id;
-	ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
 	ctio->vp_index = ha->vp_idx;
 	ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
 	ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
 	ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
 	ctio->exchange_addr = atio->u.isp24.exchange_addr;
 	ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
-	    __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
-		CTIO7_FLAGS_SEND_STATUS);
+	    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
 	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
 	ctio->u.status1.ox_id = cpu_to_le16(temp);
 	ctio->u.status1.scsi_status =
-	    __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
-	ctio->u.status1.response_len = __constant_cpu_to_le16(8);
+	    cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+	ctio->u.status1.response_len = cpu_to_le16(8);
 	ctio->u.status1.sense_data[0] = resp_code;
 
 	/* Memory Barrier */
@@ -1786,7 +1784,7 @@
 
 	pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
 	pkt->nport_handle = prm->cmd->loop_id;
-	pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
 	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
 	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
 	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
@@ -2087,10 +2085,9 @@
 {
 	prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
 	    (uint32_t)sizeof(ctio->u.status1.sense_data));
-	ctio->u.status0.flags |=
-	    __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
+	ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
 	if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
-		ctio->u.status0.flags |= __constant_cpu_to_le16(
+		ctio->u.status0.flags |= cpu_to_le16(
 		    CTIO7_FLAGS_EXPLICIT_CONFORM |
 		    CTIO7_FLAGS_CONFORM_REQ);
 	}
@@ -2107,17 +2104,17 @@
 				    "non GOOD status\n");
 				goto skip_explict_conf;
 			}
-			ctio->u.status1.flags |= __constant_cpu_to_le16(
+			ctio->u.status1.flags |= cpu_to_le16(
 			    CTIO7_FLAGS_EXPLICIT_CONFORM |
 			    CTIO7_FLAGS_CONFORM_REQ);
 		}
 skip_explict_conf:
 		ctio->u.status1.flags &=
-		    ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
 		ctio->u.status1.flags |=
-		    __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
 		ctio->u.status1.scsi_status |=
-		    __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
+		    cpu_to_le16(SS_SENSE_LEN_VALID);
 		ctio->u.status1.sense_length =
 		    cpu_to_le16(prm->sense_buffer_len);
 		for (i = 0; i < prm->sense_buffer_len/4; i++)
@@ -2137,9 +2134,9 @@
 #endif
 	} else {
 		ctio->u.status1.flags &=
-		    ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+		    ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
 		ctio->u.status1.flags |=
-		    __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+		    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
 		ctio->u.status1.sense_length = 0;
 		memset(ctio->u.status1.sense_data, 0,
 		    sizeof(ctio->u.status1.sense_data));
@@ -2261,7 +2258,6 @@
 qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
 {
 	uint32_t		*cur_dsd;
-	int			sgc;
 	uint32_t		transfer_length = 0;
 	uint32_t		data_bytes;
 	uint32_t		dif_bytes;
@@ -2278,7 +2274,6 @@
 	struct atio_from_isp *atio = &prm->cmd->atio;
 	uint16_t t16;
 
-	sgc = 0;
 	ha = vha->hw;
 
 	pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
@@ -2368,7 +2363,7 @@
 
 	pkt->handle  = h | CTIO_COMPLETION_HANDLE_MARK;
 	pkt->nport_handle = prm->cmd->loop_id;
-	pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+	pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
 	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
 	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
 	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
@@ -2384,9 +2379,9 @@
 
 	/* Set transfer direction */
 	if (cmd->dma_data_direction == DMA_TO_DEVICE)
-		pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN);
+		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
 	else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
-		pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
+		pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
 
 
 	pkt->dseg_count = prm->tot_dsds;
@@ -2438,11 +2433,11 @@
 	crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz);
 	crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts);
 	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
-	crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
+	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
 
 
 	/* Walks data segments */
-	pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
+	pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
 
 	if (!bundling && prm->prot_seg_cnt) {
 		if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
@@ -2548,7 +2543,7 @@
 
 	if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
 		pkt->u.status0.flags |=
-		    __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
+		    cpu_to_le16(CTIO7_FLAGS_DATA_IN |
 			CTIO7_FLAGS_STATUS_MODE_0);
 
 		if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
@@ -2560,11 +2555,11 @@
 				    cpu_to_le16(prm.rq_result);
 				pkt->u.status0.residual =
 				    cpu_to_le32(prm.residual);
-				pkt->u.status0.flags |= __constant_cpu_to_le16(
+				pkt->u.status0.flags |= cpu_to_le16(
 				    CTIO7_FLAGS_SEND_STATUS);
 				if (qlt_need_explicit_conf(ha, cmd, 0)) {
 					pkt->u.status0.flags |=
-					    __constant_cpu_to_le16(
+					    cpu_to_le16(
 						CTIO7_FLAGS_EXPLICIT_CONFORM |
 						CTIO7_FLAGS_CONFORM_REQ);
 				}
@@ -2592,12 +2587,12 @@
 			ctio->entry_count = 1;
 			ctio->entry_type = CTIO_TYPE7;
 			ctio->dseg_count = 0;
-			ctio->u.status1.flags &= ~__constant_cpu_to_le16(
+			ctio->u.status1.flags &= ~cpu_to_le16(
 			    CTIO7_FLAGS_DATA_IN);
 
 			/* Real finish is ctio_m1's finish */
 			pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
-			pkt->u.status0.flags |= __constant_cpu_to_le16(
+			pkt->u.status0.flags |= cpu_to_le16(
 			    CTIO7_FLAGS_DONT_RET_CTIO);
 
 			/* qlt_24xx_init_ctio_to_isp will correct
@@ -2687,7 +2682,7 @@
 	}
 
 	pkt = (struct ctio7_to_24xx *)prm.pkt;
-	pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
+	pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
 	    CTIO7_FLAGS_STATUS_MODE_0);
 
 	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
@@ -2762,7 +2757,7 @@
 
 		/* Update protection tag */
 		if (cmd->prot_sg_cnt) {
-			uint32_t i, j = 0, k = 0, num_ent;
+			uint32_t i, k = 0, num_ent;
 			struct scatterlist *sg, *sgl;
 
 
@@ -2775,7 +2770,6 @@
 					k += num_ent;
 					continue;
 				}
-				j = blocks_done - k - 1;
 				k = blocks_done;
 				break;
 			}
@@ -2969,14 +2963,14 @@
 	ctio24 = (struct ctio7_to_24xx *)pkt;
 	ctio24->entry_type = CTIO_TYPE7;
 	ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
-	ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
 	ctio24->vp_index = vha->vp_idx;
 	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
 	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
 	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
 	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
 	ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
-	    __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+	    cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
 		CTIO7_FLAGS_TERMINATE);
 	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
 	ctio24->u.status1.ox_id = cpu_to_le16(temp);
@@ -3216,7 +3210,7 @@
 	if (ctio != NULL) {
 		struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
 		term = !(c->flags &
-		    __constant_cpu_to_le16(OF_TERM_EXCH));
+		    cpu_to_le16(OF_TERM_EXCH));
 	} else
 		term = 1;
 
@@ -3364,7 +3358,6 @@
 {
 	struct qla_hw_data *ha = vha->hw;
 	struct se_cmd *se_cmd;
-	const struct target_core_fabric_ops *tfo;
 	struct qla_tgt_cmd *cmd;
 
 	if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
@@ -3382,7 +3375,6 @@
 		return;
 
 	se_cmd = &cmd->se_cmd;
-	tfo = se_cmd->se_tfo;
 	cmd->cmd_sent_to_fw = 0;
 
 	qlt_unmap_sg(vha, cmd);
@@ -3480,13 +3472,9 @@
 	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
 		cmd->cmd_flags |= BIT_12;
 	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
-		int rx_status = 0;
-
 		cmd->state = QLA_TGT_STATE_DATA_IN;
 
-		if (unlikely(status != CTIO_SUCCESS))
-			rx_status = -EIO;
-		else
+		if (status == CTIO_SUCCESS)
 			cmd->write_data_transferred = 1;
 
 		ha->tgt.tgt_ops->handle_data(cmd);
@@ -3928,12 +3916,11 @@
 	struct qla_tgt *tgt;
 	struct qla_tgt_sess *sess;
 	uint32_t lun, unpacked_lun;
-	int lun_size, fn;
+	int fn;
 
 	tgt = vha->vha_tgt.qla_tgt;
 
 	lun = a->u.isp24.fcp_cmnd.lun;
-	lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
 	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
 	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
 	    a->u.isp24.fcp_hdr.s_id);
@@ -4578,16 +4565,20 @@
 	struct qla_hw_data *ha = vha->hw;
 	unsigned long flags = 0;
 
+#ifndef __CHECKER__
 	if (!ha_locked)
 		spin_lock_irqsave(&ha->hardware_lock, flags);
+#endif
 
 	qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
 	    NOTIFY_ACK_SRR_FLAGS_REJECT,
 	    NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
 	    NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
 
+#ifndef __CHECKER__
 	if (!ha_locked)
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+#endif
 
 	kfree(imm);
 }
@@ -4931,14 +4922,14 @@
 	ctio24 = (struct ctio7_to_24xx *)pkt;
 	ctio24->entry_type = CTIO_TYPE7;
 	ctio24->nport_handle = sess->loop_id;
-	ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
 	ctio24->vp_index = vha->vp_idx;
 	ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
 	ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
 	ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
 	ctio24->exchange_addr = atio->u.isp24.exchange_addr;
 	ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
-	    __constant_cpu_to_le16(
+	    cpu_to_le16(
 		CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
 		CTIO7_FLAGS_DONT_RET_CTIO);
 	/*
@@ -5266,7 +5257,7 @@
 		struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
 		int rc;
 		if (atio->u.isp2x.status !=
-		    __constant_cpu_to_le16(ATIO_CDB_VALID)) {
+		    cpu_to_le16(ATIO_CDB_VALID)) {
 			ql_dbg(ql_dbg_tgt, vha, 0xe05e,
 			    "qla_target(%d): ATIO with error "
 			    "status %x received\n", vha->vp_idx,
@@ -5340,7 +5331,7 @@
 			    le16_to_cpu(entry->u.isp2x.status));
 			tgt->notify_ack_expected--;
 			if (entry->u.isp2x.status !=
-			    __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
+			    cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
 				ql_dbg(ql_dbg_tgt, vha, 0xe061,
 				    "qla_target(%d): NOTIFY_ACK "
 				    "failed %x\n", vha->vp_idx,
@@ -5659,7 +5650,7 @@
 	uint8_t *s_id = NULL; /* to hide compiler warnings */
 	int rc;
 	uint32_t lun, unpacked_lun;
-	int lun_size, fn;
+	int fn;
 	void *iocb;
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -5691,7 +5682,6 @@
 
 	iocb = a;
 	lun = a->u.isp24.fcp_cmnd.lun;
-	lun_size = sizeof(lun);
 	fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
 	unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
 
@@ -6215,19 +6205,19 @@
 			ha->tgt.saved_set = 1;
 		}
 
-		nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+		nv->exchange_count = cpu_to_le16(0xFFFF);
 
 		/* Enable target mode */
-		nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
 
 		/* Disable ini mode, if requested */
 		if (!qla_ini_mode_enabled(vha))
-			nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
+			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
 
 		/* Disable Full Login after LIP */
-		nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
 		/* Enable initial LIP */
-		nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
 		if (ql2xtgt_tape_enable)
 			/* Enable FC Tape support */
 			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -6236,9 +6226,9 @@
 			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
 
 		/* Disable Full Login after LIP */
-		nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+		nv->host_p &= cpu_to_le32(~BIT_10);
 		/* Enable target PRLI control */
-		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
 	} else {
 		if (ha->tgt.saved_set) {
 			nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6260,12 +6250,12 @@
 			fc_host_supported_classes(vha->host) =
 				FC_COS_CLASS2 | FC_COS_CLASS3;
 
-		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
 	} else {
 		if (vha->flags.init_done)
 			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
 
-		nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
 	}
 }
 
@@ -6277,7 +6267,7 @@
 
 	if (ha->tgt.node_name_set) {
 		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
-		icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
 	}
 }
 
@@ -6302,20 +6292,19 @@
 			ha->tgt.saved_set = 1;
 		}
 
-		nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+		nv->exchange_count = cpu_to_le16(0xFFFF);
 
 		/* Enable target mode */
-		nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+		nv->firmware_options_1 |= cpu_to_le32(BIT_4);
 
 		/* Disable ini mode, if requested */
 		if (!qla_ini_mode_enabled(vha))
-			nv->firmware_options_1 |=
-			    __constant_cpu_to_le32(BIT_5);
+			nv->firmware_options_1 |= cpu_to_le32(BIT_5);
 
 		/* Disable Full Login after LIP */
-		nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+		nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
 		/* Enable initial LIP */
-		nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+		nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
 		if (ql2xtgt_tape_enable)
 			/* Enable FC tape support */
 			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -6324,9 +6313,9 @@
 			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
 
 		/* Disable Full Login after LIP */
-		nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+		nv->host_p &= cpu_to_le32(~BIT_10);
 		/* Enable target PRLI control */
-		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+		nv->firmware_options_2 |= cpu_to_le32(BIT_14);
 	} else {
 		if (ha->tgt.saved_set) {
 			nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6348,12 +6337,12 @@
 			fc_host_supported_classes(vha->host) =
 				FC_COS_CLASS2 | FC_COS_CLASS3;
 
-		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+		nv->firmware_options_2 |= cpu_to_le32(BIT_8);
 	} else {
 		if (vha->flags.init_done)
 			fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
 
-		nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+		nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
 	}
 }
 
@@ -6368,7 +6357,7 @@
 
 	if (ha->tgt.node_name_set) {
 		memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
-		icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+		icb->firmware_options_1 |= cpu_to_le32(BIT_14);
 	}
 }
 
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 962cb89..ddbe2e7 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -137,39 +137,39 @@
 }
 
 static inline void
-qla27xx_read8(void *window, void *buf, ulong *len)
+qla27xx_read8(void __iomem *window, void *buf, ulong *len)
 {
 	uint8_t value = ~0;
 
 	if (buf) {
-		value = RD_REG_BYTE((__iomem void *)window);
+		value = RD_REG_BYTE(window);
 	}
 	qla27xx_insert32(value, buf, len);
 }
 
 static inline void
-qla27xx_read16(void *window, void *buf, ulong *len)
+qla27xx_read16(void __iomem *window, void *buf, ulong *len)
 {
 	uint16_t value = ~0;
 
 	if (buf) {
-		value = RD_REG_WORD((__iomem void *)window);
+		value = RD_REG_WORD(window);
 	}
 	qla27xx_insert32(value, buf, len);
 }
 
 static inline void
-qla27xx_read32(void *window, void *buf, ulong *len)
+qla27xx_read32(void __iomem *window, void *buf, ulong *len)
 {
 	uint32_t value = ~0;
 
 	if (buf) {
-		value = RD_REG_DWORD((__iomem void *)window);
+		value = RD_REG_DWORD(window);
 	}
 	qla27xx_insert32(value, buf, len);
 }
 
-static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *)
+static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
 {
 	return
 	    (width == 1) ? qla27xx_read8 :
@@ -181,7 +181,7 @@
 qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
 	uint offset, void *buf, ulong *len)
 {
-	void *window = (void *)reg + offset;
+	void __iomem *window = (void __iomem *)reg + offset;
 
 	qla27xx_read32(window, buf, len);
 }
@@ -202,8 +202,8 @@
 	uint32_t addr, uint offset, uint count, uint width, void *buf,
 	ulong *len)
 {
-	void *window = (void *)reg + offset;
-	void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
+	void __iomem *window = (void __iomem *)reg + offset;
+	void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
 
 	qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
 	while (count--) {
@@ -805,9 +805,8 @@
 qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
 {
 	uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
-	int rval = 0;
 
-	rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
+	sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
 	    v+0, v+1, v+2, v+3, v+4, v+5);
 
 	tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
@@ -940,8 +939,10 @@
 {
 	ulong flags = 0;
 
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+#endif
 
 	if (!vha->hw->fw_dump)
 		ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
@@ -954,6 +955,8 @@
 	else
 		qla27xx_execute_fwdt_template(vha);
 
+#ifndef __CHECKER__
 	if (!hardware_locked)
 		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+#endif
 }
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 2ed9ab9..6d31faa 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.07.00.18-k"
+#define QLA2XXX_VERSION      "8.07.00.26-k"
 
 #define QLA_DRIVER_MAJOR_VER	8
 #define QLA_DRIVER_MINOR_VER	7
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 9224a06..7ed7bae 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -420,6 +420,12 @@
 
 static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
 {
+	if (!(se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+		struct qla_tgt_cmd *cmd = container_of(se_cmd,
+				struct qla_tgt_cmd, se_cmd);
+		return cmd->state;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 6457a8a..afd34a6 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -420,6 +420,10 @@
 			evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
 			sdev_printk(KERN_WARNING, sdev,
 				    "Mode parameters changed");
+		} else if (sshdr->asc == 0x2a && sshdr->ascq == 0x06) {
+			evt_type = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED;
+			sdev_printk(KERN_WARNING, sdev,
+				    "Asymmetric access state changed");
 		} else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
 			evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
 			sdev_printk(KERN_WARNING, sdev,
@@ -1155,8 +1159,13 @@
 	struct Scsi_Host *shost;
 	int rtn;
 
+	/*
+	 * If SCSI_EH_ABORT_SCHEDULED has been set, it is timeout IO,
+	 * should not get sense.
+	 */
 	list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
 		if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
+		    (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) ||
 		    SCSI_SENSE_VALID(scmd))
 			continue;
 
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 448ebda..882864f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2423,7 +2423,7 @@
 	unsigned char cmd[12];
 	int use_10_for_ms;
 	int header_length;
-	int result;
+	int result, retry_count = retries;
 	struct scsi_sense_hdr my_sshdr;
 
 	memset(data, 0, sizeof(*data));
@@ -2502,6 +2502,11 @@
 			data->block_descriptor_length = buffer[3];
 		}
 		data->header_length = header_length;
+	} else if ((status_byte(result) == CHECK_CONDITION) &&
+		   scsi_sense_valid(sshdr) &&
+		   sshdr->sense_key == UNIT_ATTENTION && retry_count) {
+		retry_count--;
+		goto retry;
 	}
 
 	return result;
@@ -2707,6 +2712,9 @@
 	case SDEV_EVT_LUN_CHANGE_REPORTED:
 		envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
 		break;
+	case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
+		envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
+		break;
 	default:
 		/* do nothing */
 		break;
@@ -2810,6 +2818,7 @@
 	case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
 	case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
 	case SDEV_EVT_LUN_CHANGE_REPORTED:
+	case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
 	default:
 		/* do nothing */
 		break;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 55647aa..e4b3d8f 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2042,6 +2042,7 @@
 	session->transport = transport;
 	session->creator = -1;
 	session->recovery_tmo = 120;
+	session->recovery_tmo_sysfs_override = false;
 	session->state = ISCSI_SESSION_FREE;
 	INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
 	INIT_LIST_HEAD(&session->sess_list);
@@ -2786,7 +2787,8 @@
 	switch (ev->u.set_param.param) {
 	case ISCSI_PARAM_SESS_RECOVERY_TMO:
 		sscanf(data, "%d", &value);
-		session->recovery_tmo = value;
+		if (!session->recovery_tmo_sysfs_override)
+			session->recovery_tmo = value;
 		break;
 	default:
 		err = transport->set_param(conn, ev->u.set_param.param,
@@ -3037,7 +3039,7 @@
 
 	shost = scsi_host_lookup(ev->u.get_chap.host_no);
 	if (!shost) {
-		printk(KERN_ERR "%s: failed. Cound not find host no %u\n",
+		printk(KERN_ERR "%s: failed. Could not find host no %u\n",
 		       __func__, ev->u.get_chap.host_no);
 		return -ENODEV;
 	}
@@ -4049,13 +4051,15 @@
 	if ((session->state == ISCSI_SESSION_FREE) ||			\
 	    (session->state == ISCSI_SESSION_FAILED))			\
 		return -EBUSY;						\
-	if (strncmp(buf, "off", 3) == 0)				\
+	if (strncmp(buf, "off", 3) == 0) {				\
 		session->field = -1;					\
-	else {								\
+		session->field##_sysfs_override = true;			\
+	} else {							\
 		val = simple_strtoul(buf, &cp, 0);			\
 		if (*cp != '\0' && *cp != '\n')				\
 			return -EINVAL;					\
 		session->field = val;					\
+		session->field##_sysfs_override = true;			\
 	}								\
 	return count;							\
 }
@@ -4066,6 +4070,7 @@
 static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR,		\
 			show_priv_session_##field,			\
 			store_priv_session_##field)
+
 iscsi_priv_session_rw_attr(recovery_tmo, "%d");
 
 static struct attribute *iscsi_session_attrs[] = {
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 31bbb0d..319868f 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -786,10 +786,10 @@
 		 * IU, then QAS (if we can control them), then finally
 		 * fall down the periods */
 		if (i->f->set_iu && spi_iu(starget)) {
-			starget_printk(KERN_ERR, starget, "Domain Validation Disabing Information Units\n");
+			starget_printk(KERN_ERR, starget, "Domain Validation Disabling Information Units\n");
 			DV_SET(iu, 0);
 		} else if (i->f->set_qas && spi_qas(starget)) {
-			starget_printk(KERN_ERR, starget, "Domain Validation Disabing Quick Arbitration and Selection\n");
+			starget_printk(KERN_ERR, starget, "Domain Validation Disabling Quick Arbitration and Selection\n");
 			DV_SET(qas, 0);
 		} else {
 			newperiod = spi_period(starget);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a20da8c..3f37022 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -647,7 +647,7 @@
 	switch (mode) {
 
 	case SD_LBP_DISABLE:
-		q->limits.max_discard_sectors = 0;
+		blk_queue_max_discard_sectors(q, 0);
 		queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
 		return;
 
@@ -675,7 +675,7 @@
 		break;
 	}
 
-	q->limits.max_discard_sectors = max_blocks * (logical_block_size >> 9);
+	blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
 	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 }
 
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 871f355..b37b9b0 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -85,6 +85,7 @@
 
 static struct class st_sysfs_class;
 static const struct attribute_group *st_dev_groups[];
+static const struct attribute_group *st_drv_groups[];
 
 MODULE_AUTHOR("Kai Makisara");
 MODULE_DESCRIPTION("SCSI tape (st) driver");
@@ -198,15 +199,13 @@
 static int st_probe(struct device *);
 static int st_remove(struct device *);
 
-static int do_create_sysfs_files(void);
-static void do_remove_sysfs_files(void);
-
 static struct scsi_driver st_template = {
 	.gendrv = {
 		.name		= "st",
 		.owner		= THIS_MODULE,
 		.probe		= st_probe,
 		.remove		= st_remove,
+		.groups		= st_drv_groups,
 	},
 };
 
@@ -4404,14 +4403,8 @@
 	if (err)
 		goto err_chrdev;
 
-	err = do_create_sysfs_files();
-	if (err)
-		goto err_scsidrv;
-
 	return 0;
 
-err_scsidrv:
-	scsi_unregister_driver(&st_template.gendrv);
 err_chrdev:
 	unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
 				 ST_MAX_TAPE_ENTRIES);
@@ -4422,11 +4415,11 @@
 
 static void __exit exit_st(void)
 {
-	do_remove_sysfs_files();
 	scsi_unregister_driver(&st_template.gendrv);
 	unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
 				 ST_MAX_TAPE_ENTRIES);
 	class_unregister(&st_sysfs_class);
+	idr_destroy(&st_index_idr);
 	printk(KERN_INFO "st: Unloaded.\n");
 }
 
@@ -4435,68 +4428,38 @@
 
 
 /* The sysfs driver interface. Read-only at the moment */
-static ssize_t st_try_direct_io_show(struct device_driver *ddp, char *buf)
+static ssize_t try_direct_io_show(struct device_driver *ddp, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%d\n", try_direct_io);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", try_direct_io);
 }
-static DRIVER_ATTR(try_direct_io, S_IRUGO, st_try_direct_io_show, NULL);
+static DRIVER_ATTR_RO(try_direct_io);
 
-static ssize_t st_fixed_buffer_size_show(struct device_driver *ddp, char *buf)
+static ssize_t fixed_buffer_size_show(struct device_driver *ddp, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size);
 }
-static DRIVER_ATTR(fixed_buffer_size, S_IRUGO, st_fixed_buffer_size_show, NULL);
+static DRIVER_ATTR_RO(fixed_buffer_size);
 
-static ssize_t st_max_sg_segs_show(struct device_driver *ddp, char *buf)
+static ssize_t max_sg_segs_show(struct device_driver *ddp, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs);
+	return scnprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs);
 }
-static DRIVER_ATTR(max_sg_segs, S_IRUGO, st_max_sg_segs_show, NULL);
+static DRIVER_ATTR_RO(max_sg_segs);
 
-static ssize_t st_version_show(struct device_driver *ddd, char *buf)
+static ssize_t version_show(struct device_driver *ddd, char *buf)
 {
-	return snprintf(buf, PAGE_SIZE, "[%s]\n", verstr);
+	return scnprintf(buf, PAGE_SIZE, "[%s]\n", verstr);
 }
-static DRIVER_ATTR(version, S_IRUGO, st_version_show, NULL);
+static DRIVER_ATTR_RO(version);
 
-static int do_create_sysfs_files(void)
-{
-	struct device_driver *sysfs = &st_template.gendrv;
-	int err;
-
-	err = driver_create_file(sysfs, &driver_attr_try_direct_io);
-	if (err)
-		return err;
-	err = driver_create_file(sysfs, &driver_attr_fixed_buffer_size);
-	if (err)
-		goto err_try_direct_io;
-	err = driver_create_file(sysfs, &driver_attr_max_sg_segs);
-	if (err)
-		goto err_attr_fixed_buf;
-	err = driver_create_file(sysfs, &driver_attr_version);
-	if (err)
-		goto err_attr_max_sg;
-
-	return 0;
-
-err_attr_max_sg:
-	driver_remove_file(sysfs, &driver_attr_max_sg_segs);
-err_attr_fixed_buf:
-	driver_remove_file(sysfs, &driver_attr_fixed_buffer_size);
-err_try_direct_io:
-	driver_remove_file(sysfs, &driver_attr_try_direct_io);
-	return err;
-}
-
-static void do_remove_sysfs_files(void)
-{
-	struct device_driver *sysfs = &st_template.gendrv;
-
-	driver_remove_file(sysfs, &driver_attr_version);
-	driver_remove_file(sysfs, &driver_attr_max_sg_segs);
-	driver_remove_file(sysfs, &driver_attr_fixed_buffer_size);
-	driver_remove_file(sysfs, &driver_attr_try_direct_io);
-}
+static struct attribute *st_drv_attrs[] = {
+	&driver_attr_try_direct_io.attr,
+	&driver_attr_fixed_buffer_size.attr,
+	&driver_attr_max_sg_segs.attr,
+	&driver_attr_version.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(st_drv);
 
 /* The sysfs simple class interface */
 static ssize_t
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3c6584f..40c43ae 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -56,15 +56,18 @@
  * V1 RC > 2008/1/31:  2.0
  * Win7: 4.2
  * Win8: 5.1
+ * Win8.1: 6.0
+ * Win10: 6.2
  */
 
+#define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_)	((((MAJOR_) & 0xff) << 8) | \
+						(((MINOR_) & 0xff)))
 
-#define VMSTOR_WIN7_MAJOR 4
-#define VMSTOR_WIN7_MINOR 2
-
-#define VMSTOR_WIN8_MAJOR 5
-#define VMSTOR_WIN8_MINOR 1
-
+#define VMSTOR_PROTO_VERSION_WIN6	VMSTOR_PROTO_VERSION(2, 0)
+#define VMSTOR_PROTO_VERSION_WIN7	VMSTOR_PROTO_VERSION(4, 2)
+#define VMSTOR_PROTO_VERSION_WIN8	VMSTOR_PROTO_VERSION(5, 1)
+#define VMSTOR_PROTO_VERSION_WIN8_1	VMSTOR_PROTO_VERSION(6, 0)
+#define VMSTOR_PROTO_VERSION_WIN10	VMSTOR_PROTO_VERSION(6, 2)
 
 /*  Packet structure describing virtual storage requests. */
 enum vstor_packet_operation {
@@ -148,21 +151,18 @@
 
 /*
  * Sense buffer size changed in win8; have a run-time
- * variable to track the size we should use.
+ * variable to track the size we should use.  This value will
+ * likely change during protocol negotiation but it is valid
+ * to start by assuming pre-Win8.
  */
-static int sense_buffer_size;
+static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
 
 /*
- * The size of the vmscsi_request has changed in win8. The
- * additional size is because of new elements added to the
- * structure. These elements are valid only when we are talking
- * to a win8 host.
- * Track the correction to size we need to apply.
- */
-
-static int vmscsi_size_delta;
-static int vmstor_current_major;
-static int vmstor_current_minor;
+ * The storage protocol version is determined during the
+ * initial exchange with the host.  It will indicate which
+ * storage functionality is available in the host.
+*/
+static int vmstor_proto_version;
 
 struct vmscsi_win8_extension {
 	/*
@@ -207,6 +207,56 @@
 
 
 /*
+ * The size of the vmscsi_request has changed in win8. The
+ * additional size is because of new elements added to the
+ * structure. These elements are valid only when we are talking
+ * to a win8 host.
+ * Track the correction to size we need to apply. This value
+ * will likely change during protocol negotiation but it is
+ * valid to start by assuming pre-Win8.
+ */
+static int vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
+
+/*
+ * The list of storage protocols in order of preference.
+ */
+struct vmstor_protocol {
+	int protocol_version;
+	int sense_buffer_size;
+	int vmscsi_size_delta;
+};
+
+
+static const struct vmstor_protocol vmstor_protocols[] = {
+	{
+		VMSTOR_PROTO_VERSION_WIN10,
+		POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
+		0
+	},
+	{
+		VMSTOR_PROTO_VERSION_WIN8_1,
+		POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
+		0
+	},
+	{
+		VMSTOR_PROTO_VERSION_WIN8,
+		POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
+		0
+	},
+	{
+		VMSTOR_PROTO_VERSION_WIN7,
+		PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
+		sizeof(struct vmscsi_win8_extension),
+	},
+	{
+		VMSTOR_PROTO_VERSION_WIN6,
+		PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
+		sizeof(struct vmscsi_win8_extension),
+	}
+};
+
+
+/*
  * This structure is sent during the intialization phase to get the different
  * properties of the channel.
  */
@@ -426,7 +476,6 @@
 	struct storvsc_scan_work *wrk;
 	struct Scsi_Host *host;
 	struct scsi_device *sdev;
-	unsigned long flags;
 
 	wrk = container_of(work, struct storvsc_scan_work, work);
 	host = wrk->host;
@@ -443,14 +492,8 @@
 	 * may have been removed this way.
 	 */
 	mutex_lock(&host->scan_mutex);
-	spin_lock_irqsave(host->host_lock, flags);
-	list_for_each_entry(sdev, &host->__devices, siblings) {
-		spin_unlock_irqrestore(host->host_lock, flags);
+	shost_for_each_device(sdev, host)
 		scsi_test_unit_ready(sdev, 1, 1, NULL);
-		spin_lock_irqsave(host->host_lock, flags);
-		continue;
-	}
-	spin_unlock_irqrestore(host->host_lock, flags);
 	mutex_unlock(&host->scan_mutex);
 	/*
 	 * Now scan the host to discover LUNs that may have been added.
@@ -481,18 +524,6 @@
 	kfree(wrk);
 }
 
-/*
- * Major/minor macros.  Minor version is in LSB, meaning that earlier flat
- * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
- */
-
-static inline u16 storvsc_get_version(u8 major, u8 minor)
-{
-	u16 version;
-
-	version = ((major << 8) | minor);
-	return version;
-}
 
 /*
  * We can get incoming messages from the host that are not in response to
@@ -885,7 +916,7 @@
 	struct storvsc_device *stor_device;
 	struct storvsc_cmd_request *request;
 	struct vstor_packet *vstor_packet;
-	int ret, t;
+	int ret, t, i;
 	int max_chns;
 	bool process_sub_channels = false;
 
@@ -921,41 +952,65 @@
 	}
 
 	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
-	    vstor_packet->status != 0)
+	    vstor_packet->status != 0) {
+		ret = -EINVAL;
 		goto cleanup;
+	}
 
 
-	/* reuse the packet for version range supported */
-	memset(vstor_packet, 0, sizeof(struct vstor_packet));
-	vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
-	vstor_packet->flags = REQUEST_COMPLETION_FLAG;
+	for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) {
+		/* reuse the packet for version range supported */
+		memset(vstor_packet, 0, sizeof(struct vstor_packet));
+		vstor_packet->operation =
+			VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
+		vstor_packet->flags = REQUEST_COMPLETION_FLAG;
 
-	vstor_packet->version.major_minor =
-		storvsc_get_version(vmstor_current_major, vmstor_current_minor);
+		vstor_packet->version.major_minor =
+			vmstor_protocols[i].protocol_version;
 
-	/*
-	 * The revision number is only used in Windows; set it to 0.
-	 */
-	vstor_packet->version.revision = 0;
+		/*
+		 * The revision number is only used in Windows; set it to 0.
+		 */
+		vstor_packet->version.revision = 0;
 
-	ret = vmbus_sendpacket(device->channel, vstor_packet,
+		ret = vmbus_sendpacket(device->channel, vstor_packet,
 			       (sizeof(struct vstor_packet) -
 				vmscsi_size_delta),
 			       (unsigned long)request,
 			       VM_PKT_DATA_INBAND,
 			       VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
-	if (ret != 0)
-		goto cleanup;
+		if (ret != 0)
+			goto cleanup;
 
-	t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
-	if (t == 0) {
-		ret = -ETIMEDOUT;
-		goto cleanup;
+		t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+		if (t == 0) {
+			ret = -ETIMEDOUT;
+			goto cleanup;
+		}
+
+		if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO) {
+			ret = -EINVAL;
+			goto cleanup;
+		}
+
+		if (vstor_packet->status == 0) {
+			vmstor_proto_version =
+				vmstor_protocols[i].protocol_version;
+
+			sense_buffer_size =
+				vmstor_protocols[i].sense_buffer_size;
+
+			vmscsi_size_delta =
+				vmstor_protocols[i].vmscsi_size_delta;
+
+			break;
+		}
 	}
 
-	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
-	    vstor_packet->status != 0)
+	if (vstor_packet->status != 0) {
+		ret = -EINVAL;
 		goto cleanup;
+	}
 
 
 	memset(vstor_packet, 0, sizeof(struct vstor_packet));
@@ -979,8 +1034,10 @@
 	}
 
 	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
-	    vstor_packet->status != 0)
+	    vstor_packet->status != 0) {
+		ret = -EINVAL;
 		goto cleanup;
+	}
 
 	/*
 	 * Check to see if multi-channel support is there.
@@ -988,8 +1045,7 @@
 	 * support multi-channel.
 	 */
 	max_chns = vstor_packet->storage_channel_properties.max_channel_cnt;
-	if ((vmbus_proto_version != VERSION_WIN7) &&
-	   (vmbus_proto_version != VERSION_WS2008))  {
+	if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) {
 		if (vstor_packet->storage_channel_properties.flags &
 		    STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
 			process_sub_channels = true;
@@ -1018,8 +1074,10 @@
 	}
 
 	if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
-	    vstor_packet->status != 0)
+	    vstor_packet->status != 0) {
+		ret = -EINVAL;
 		goto cleanup;
+	}
 
 	if (process_sub_channels)
 		handle_multichannel_storage(device, max_chns);
@@ -1428,15 +1486,19 @@
 
 	/*
 	 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
-	 * if the device is a MSFT virtual device.
+	 * if the device is a MSFT virtual device.  If the host is
+	 * WIN10 or newer, allow write_same.
 	 */
 	if (!strncmp(sdevice->vendor, "Msft", 4)) {
-		switch (vmbus_proto_version) {
-		case VERSION_WIN8:
-		case VERSION_WIN8_1:
+		switch (vmstor_proto_version) {
+		case VMSTOR_PROTO_VERSION_WIN8:
+		case VMSTOR_PROTO_VERSION_WIN8_1:
 			sdevice->scsi_level = SCSI_SPC_3;
 			break;
 		}
+
+		if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN10)
+			sdevice->no_write_same = 0;
 	}
 
 	return 0;
@@ -1563,7 +1625,7 @@
 	u32 payload_sz;
 	u32 length;
 
-	if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
+	if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) {
 		/*
 		 * On legacy hosts filter unimplemented commands.
 		 * Future hosts are expected to correctly handle
@@ -1598,10 +1660,18 @@
 		vm_srb->data_in = READ_TYPE;
 		vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
 		break;
-	default:
+	case DMA_NONE:
 		vm_srb->data_in = UNKNOWN_TYPE;
 		vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
 		break;
+	default:
+		/*
+		 * This is DMA_BIDIRECTIONAL or something else we are never
+		 * supposed to see here.
+		 */
+		WARN(1, "Unexpected data direction: %d\n",
+		     scmnd->sc_data_direction);
+		return -EINVAL;
 	}
 
 
@@ -1758,22 +1828,11 @@
 	 * set state to properly communicate with the host.
 	 */
 
-	switch (vmbus_proto_version) {
-	case VERSION_WS2008:
-	case VERSION_WIN7:
-		sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
-		vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
-		vmstor_current_major = VMSTOR_WIN7_MAJOR;
-		vmstor_current_minor = VMSTOR_WIN7_MINOR;
+	if (vmbus_proto_version < VERSION_WIN8) {
 		max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET;
 		max_targets = STORVSC_IDE_MAX_TARGETS;
 		max_channels = STORVSC_IDE_MAX_CHANNELS;
-		break;
-	default:
-		sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
-		vmscsi_size_delta = 0;
-		vmstor_current_major = VMSTOR_WIN8_MAJOR;
-		vmstor_current_minor = VMSTOR_WIN8_MINOR;
+	} else {
 		max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
 		max_targets = STORVSC_MAX_TARGETS;
 		max_channels = STORVSC_MAX_CHANNELS;
@@ -1783,7 +1842,6 @@
 		 * VCPUs in the guest.
 		 */
 		max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
-		break;
 	}
 
 	scsi_driver.can_queue = (max_outstanding_req_per_channel *
diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c
index 61346aa..e3da1a2 100644
--- a/drivers/scsi/wd719x.c
+++ b/drivers/scsi/wd719x.c
@@ -590,7 +590,7 @@
 		dev_dbg(&wd->pdev->dev, "selection timeout\n");
 		break;
 	case WD719X_SUE_RESET:
-		dev_dbg(&wd->pdev->dev, "bus reset occured\n");
+		dev_dbg(&wd->pdev->dev, "bus reset occurred\n");
 		result = DID_RESET;
 		break;
 	case WD719X_SUE_BUSERROR:
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index 46427b4..358df75 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -22,7 +22,7 @@
 
 	for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
 #ifdef CONFIG_SMP
-		if (!cpumask_test_cpu(cpu, data->affinity))
+		if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data)))
 			continue;
 #endif
 		addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
@@ -50,7 +50,7 @@
 
 	for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
 #ifdef CONFIG_SMP
-		if (!cpumask_test_cpu(cpu, data->affinity))
+		if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data)))
 			continue;
 #endif
 		addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
@@ -72,7 +72,7 @@
 	if (!cpumask_intersects(cpumask, cpu_online_mask))
 		return -1;
 
-	cpumask_copy(data->affinity, cpumask);
+	cpumask_copy(irq_data_get_affinity_mask(data), cpumask);
 
 	return IRQ_SET_MASK_OK_NOCOPY;
 }
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 156b790..043419d 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -67,7 +67,7 @@
 
 static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
 {
-	generic_handle_irq((unsigned int)irq_get_handler_data(irq));
+	generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc));
 }
 
 static void __init intc_register_irq(struct intc_desc *desc,
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index f5f1b82..bafc51c 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -83,12 +83,11 @@
 
 static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
 {
-	struct intc_virq_list **last, *entry;
-	struct irq_data *data = irq_get_irq_data(irq);
+	struct intc_virq_list *entry;
+	struct intc_virq_list **last = NULL;
 
 	/* scan for duplicates */
-	last = (struct intc_virq_list **)&data->handler_data;
-	for_each_virq(entry, data->handler_data) {
+	for_each_virq(entry, irq_get_handler_data(irq)) {
 		if (entry->irq == virq)
 			return 0;
 		last = &entry->next;
@@ -102,14 +101,18 @@
 
 	entry->irq = virq;
 
-	*last = entry;
+	if (last)
+		*last = entry;
+	else
+		irq_set_handler_data(irq, entry);
 
 	return 0;
 }
 
-static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
+static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc)
 {
-	struct irq_data *data = irq_get_irq_data(irq);
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct irq_data *data = irq_desc_get_irq_data(desc);
 	struct irq_chip *chip = irq_data_get_irq_chip(data);
 	struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data);
 	struct intc_desc_int *d = get_intc_desc(irq);
@@ -118,12 +121,14 @@
 
 	for_each_virq(entry, vlist) {
 		unsigned long addr, handle;
+		struct irq_desc *vdesc = irq_to_desc(entry->irq);
 
-		handle = (unsigned long)irq_get_handler_data(entry->irq);
-		addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
-
-		if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
-			generic_handle_irq(entry->irq);
+		if (vdesc) {
+			handle = (unsigned long)irq_desc_get_handler_data(vdesc);
+			addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
+			if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
+				generic_handle_irq_desc(entry->irq, vdesc);
+		}
 	}
 
 	chip->irq_unmask(data);
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 7dc7c0d..0b12d77 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -2,6 +2,7 @@
 # Makefile for the Linux Kernel SOC specific device drivers.
 #
 
+obj-$(CONFIG_MACH_DOVE)		+= dove/
 obj-$(CONFIG_ARCH_MEDIATEK)	+= mediatek/
 obj-$(CONFIG_ARCH_QCOM)		+= qcom/
 obj-$(CONFIG_ARCH_SUNXI)	+= sunxi/
diff --git a/drivers/soc/dove/Makefile b/drivers/soc/dove/Makefile
new file mode 100644
index 0000000..2db8e65
--- /dev/null
+++ b/drivers/soc/dove/Makefile
@@ -0,0 +1 @@
+obj-y		+= pmu.o
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
new file mode 100644
index 0000000..6792aae
--- /dev/null
+++ b/drivers/soc/dove/pmu.c
@@ -0,0 +1,412 @@
+/*
+ * Marvell Dove PMU support
+ */
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/reset.h>
+#include <linux/reset-controller.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/soc/dove/pmu.h>
+#include <linux/spinlock.h>
+
+#define NR_PMU_IRQS		7
+
+#define PMC_SW_RST		0x30
+#define PMC_IRQ_CAUSE		0x50
+#define PMC_IRQ_MASK		0x54
+
+#define PMU_PWR			0x10
+#define PMU_ISO			0x58
+
+struct pmu_data {
+	spinlock_t lock;
+	struct device_node *of_node;
+	void __iomem *pmc_base;
+	void __iomem *pmu_base;
+	struct irq_chip_generic *irq_gc;
+	struct irq_domain *irq_domain;
+#ifdef CONFIG_RESET_CONTROLLER
+	struct reset_controller_dev reset;
+#endif
+};
+
+/*
+ * The PMU contains a register to reset various subsystems within the
+ * SoC.  Export this as a reset controller.
+ */
+#ifdef CONFIG_RESET_CONTROLLER
+#define rcdev_to_pmu(rcdev) container_of(rcdev, struct pmu_data, reset)
+
+static int pmu_reset_reset(struct reset_controller_dev *rc, unsigned long id)
+{
+	struct pmu_data *pmu = rcdev_to_pmu(rc);
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&pmu->lock, flags);
+	val = readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+	writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST);
+	writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST);
+	spin_unlock_irqrestore(&pmu->lock, flags);
+
+	return 0;
+}
+
+static int pmu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
+{
+	struct pmu_data *pmu = rcdev_to_pmu(rc);
+	unsigned long flags;
+	u32 val = ~BIT(id);
+
+	spin_lock_irqsave(&pmu->lock, flags);
+	val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+	writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
+	spin_unlock_irqrestore(&pmu->lock, flags);
+
+	return 0;
+}
+
+static int pmu_reset_deassert(struct reset_controller_dev *rc, unsigned long id)
+{
+	struct pmu_data *pmu = rcdev_to_pmu(rc);
+	unsigned long flags;
+	u32 val = BIT(id);
+
+	spin_lock_irqsave(&pmu->lock, flags);
+	val |= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
+	writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
+	spin_unlock_irqrestore(&pmu->lock, flags);
+
+	return 0;
+}
+
+static struct reset_control_ops pmu_reset_ops = {
+	.reset = pmu_reset_reset,
+	.assert = pmu_reset_assert,
+	.deassert = pmu_reset_deassert,
+};
+
+static struct reset_controller_dev pmu_reset __initdata = {
+	.ops = &pmu_reset_ops,
+	.owner = THIS_MODULE,
+	.nr_resets = 32,
+};
+
+static void __init pmu_reset_init(struct pmu_data *pmu)
+{
+	int ret;
+
+	pmu->reset = pmu_reset;
+	pmu->reset.of_node = pmu->of_node;
+
+	ret = reset_controller_register(&pmu->reset);
+	if (ret)
+		pr_err("pmu: %s failed: %d\n", "reset_controller_register", ret);
+}
+#else
+static void __init pmu_reset_init(struct pmu_data *pmu)
+{
+}
+#endif
+
+struct pmu_domain {
+	struct pmu_data *pmu;
+	u32 pwr_mask;
+	u32 rst_mask;
+	u32 iso_mask;
+	struct generic_pm_domain base;
+};
+
+#define to_pmu_domain(dom) container_of(dom, struct pmu_domain, base)
+
+/*
+ * This deals with the "old" Marvell sequence of bringing a power domain
+ * down/up, which is: apply power, release reset, disable isolators.
+ *
+ * Later devices apparantly use a different sequence: power up, disable
+ * isolators, assert repair signal, enable SRMA clock, enable AXI clock,
+ * enable module clock, deassert reset.
+ *
+ * Note: reading the assembly, it seems that the IO accessors have an
+ * unfortunate side-effect - they cause memory already read into registers
+ * for the if () to be re-read for the bit-set or bit-clear operation.
+ * The code is written to avoid this.
+ */
+static int pmu_domain_power_off(struct generic_pm_domain *domain)
+{
+	struct pmu_domain *pmu_dom = to_pmu_domain(domain);
+	struct pmu_data *pmu = pmu_dom->pmu;
+	unsigned long flags;
+	unsigned int val;
+	void __iomem *pmu_base = pmu->pmu_base;
+	void __iomem *pmc_base = pmu->pmc_base;
+
+	spin_lock_irqsave(&pmu->lock, flags);
+
+	/* Enable isolators */
+	if (pmu_dom->iso_mask) {
+		val = ~pmu_dom->iso_mask;
+		val &= readl_relaxed(pmu_base + PMU_ISO);
+		writel_relaxed(val, pmu_base + PMU_ISO);
+	}
+
+	/* Reset unit */
+	if (pmu_dom->rst_mask) {
+		val = ~pmu_dom->rst_mask;
+		val &= readl_relaxed(pmc_base + PMC_SW_RST);
+		writel_relaxed(val, pmc_base + PMC_SW_RST);
+	}
+
+	/* Power down */
+	val = readl_relaxed(pmu_base + PMU_PWR) | pmu_dom->pwr_mask;
+	writel_relaxed(val, pmu_base + PMU_PWR);
+
+	spin_unlock_irqrestore(&pmu->lock, flags);
+
+	return 0;
+}
+
+static int pmu_domain_power_on(struct generic_pm_domain *domain)
+{
+	struct pmu_domain *pmu_dom = to_pmu_domain(domain);
+	struct pmu_data *pmu = pmu_dom->pmu;
+	unsigned long flags;
+	unsigned int val;
+	void __iomem *pmu_base = pmu->pmu_base;
+	void __iomem *pmc_base = pmu->pmc_base;
+
+	spin_lock_irqsave(&pmu->lock, flags);
+
+	/* Power on */
+	val = ~pmu_dom->pwr_mask & readl_relaxed(pmu_base + PMU_PWR);
+	writel_relaxed(val, pmu_base + PMU_PWR);
+
+	/* Release reset */
+	if (pmu_dom->rst_mask) {
+		val = pmu_dom->rst_mask;
+		val |= readl_relaxed(pmc_base + PMC_SW_RST);
+		writel_relaxed(val, pmc_base + PMC_SW_RST);
+	}
+
+	/* Disable isolators */
+	if (pmu_dom->iso_mask) {
+		val = pmu_dom->iso_mask;
+		val |= readl_relaxed(pmu_base + PMU_ISO);
+		writel_relaxed(val, pmu_base + PMU_ISO);
+	}
+
+	spin_unlock_irqrestore(&pmu->lock, flags);
+
+	return 0;
+}
+
+static void __pmu_domain_register(struct pmu_domain *domain,
+	struct device_node *np)
+{
+	unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR);
+
+	domain->base.power_off = pmu_domain_power_off;
+	domain->base.power_on = pmu_domain_power_on;
+
+	pm_genpd_init(&domain->base, NULL, !(val & domain->pwr_mask));
+
+	if (np)
+		of_genpd_add_provider_simple(np, &domain->base);
+}
+
+/* PMU IRQ controller */
+static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+	struct pmu_data *pmu = irq_get_handler_data(irq);
+	struct irq_chip_generic *gc = pmu->irq_gc;
+	struct irq_domain *domain = pmu->irq_domain;
+	void __iomem *base = gc->reg_base;
+	u32 stat = readl_relaxed(base + PMC_IRQ_CAUSE) & gc->mask_cache;
+	u32 done = ~0;
+
+	if (stat == 0) {
+		handle_bad_irq(irq, desc);
+		return;
+	}
+
+	while (stat) {
+		u32 hwirq = fls(stat) - 1;
+
+		stat &= ~(1 << hwirq);
+		done &= ~(1 << hwirq);
+
+		generic_handle_irq(irq_find_mapping(domain, hwirq));
+	}
+
+	/*
+	 * The PMU mask register is not RW0C: it is RW.  This means that
+	 * the bits take whatever value is written to them; if you write
+	 * a '1', you will set the interrupt.
+	 *
+	 * Unfortunately this means there is NO race free way to clear
+	 * these interrupts.
+	 *
+	 * So, let's structure the code so that the window is as small as
+	 * possible.
+	 */
+	irq_gc_lock(gc);
+	done &= readl_relaxed(base + PMC_IRQ_CAUSE);
+	writel_relaxed(done, base + PMC_IRQ_CAUSE);
+	irq_gc_unlock(gc);
+}
+
+static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
+{
+	const char *name = "pmu_irq";
+	struct irq_chip_generic *gc;
+	struct irq_domain *domain;
+	int ret;
+
+	/* mask and clear all interrupts */
+	writel(0, pmu->pmc_base + PMC_IRQ_MASK);
+	writel(0, pmu->pmc_base + PMC_IRQ_CAUSE);
+
+	domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS,
+				       &irq_generic_chip_ops, NULL);
+	if (!domain) {
+		pr_err("%s: unable to add irq domain\n", name);
+		return -ENOMEM;
+	}
+
+	ret = irq_alloc_domain_generic_chips(domain, NR_PMU_IRQS, 1, name,
+					     handle_level_irq,
+					     IRQ_NOREQUEST | IRQ_NOPROBE, 0,
+					     IRQ_GC_INIT_MASK_CACHE);
+	if (ret) {
+		pr_err("%s: unable to alloc irq domain gc: %d\n", name, ret);
+		irq_domain_remove(domain);
+		return ret;
+	}
+
+	gc = irq_get_domain_generic_chip(domain, 0);
+	gc->reg_base = pmu->pmc_base;
+	gc->chip_types[0].regs.mask = PMC_IRQ_MASK;
+	gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
+	gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+
+	pmu->irq_domain = domain;
+	pmu->irq_gc = gc;
+
+	irq_set_handler_data(irq, pmu);
+	irq_set_chained_handler(irq, pmu_irq_handler);
+
+	return 0;
+}
+
+/*
+ * pmu: power-manager@d0000 {
+ *	compatible = "marvell,dove-pmu";
+ *	reg = <0xd0000 0x8000> <0xd8000 0x8000>;
+ *	interrupts = <33>;
+ *	interrupt-controller;
+ *	#reset-cells = 1;
+ *	vpu_domain: vpu-domain {
+ *		#power-domain-cells = <0>;
+ *		marvell,pmu_pwr_mask = <0x00000008>;
+ *		marvell,pmu_iso_mask = <0x00000001>;
+ *		resets = <&pmu 16>;
+ *	};
+ *	gpu_domain: gpu-domain {
+ *		#power-domain-cells = <0>;
+ *		marvell,pmu_pwr_mask = <0x00000004>;
+ *		marvell,pmu_iso_mask = <0x00000002>;
+ *		resets = <&pmu 18>;
+ *	};
+ * };
+ */
+int __init dove_init_pmu(void)
+{
+	struct device_node *np_pmu, *domains_node, *np;
+	struct pmu_data *pmu;
+	int ret, parent_irq;
+
+	/* Lookup the PMU node */
+	np_pmu = of_find_compatible_node(NULL, NULL, "marvell,dove-pmu");
+	if (!np_pmu)
+		return 0;
+
+	domains_node = of_get_child_by_name(np_pmu, "domains");
+	if (!domains_node) {
+		pr_err("%s: failed to find domains sub-node\n", np_pmu->name);
+		return 0;
+	}
+
+	pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+	if (!pmu)
+		return -ENOMEM;
+
+	spin_lock_init(&pmu->lock);
+	pmu->of_node = np_pmu;
+	pmu->pmc_base = of_iomap(pmu->of_node, 0);
+	pmu->pmu_base = of_iomap(pmu->of_node, 1);
+	if (!pmu->pmc_base || !pmu->pmu_base) {
+		pr_err("%s: failed to map PMU\n", np_pmu->name);
+		iounmap(pmu->pmu_base);
+		iounmap(pmu->pmc_base);
+		kfree(pmu);
+		return -ENOMEM;
+	}
+
+	pmu_reset_init(pmu);
+
+	for_each_available_child_of_node(domains_node, np) {
+		struct of_phandle_args args;
+		struct pmu_domain *domain;
+
+		domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+		if (!domain)
+			break;
+
+		domain->pmu = pmu;
+		domain->base.name = kstrdup(np->name, GFP_KERNEL);
+		if (!domain->base.name) {
+			kfree(domain);
+			break;
+		}
+
+		of_property_read_u32(np, "marvell,pmu_pwr_mask",
+				     &domain->pwr_mask);
+		of_property_read_u32(np, "marvell,pmu_iso_mask",
+				     &domain->iso_mask);
+
+		/*
+		 * We parse the reset controller property directly here
+		 * to ensure that we can operate when the reset controller
+		 * support is not configured into the kernel.
+		 */
+		ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
+						 0, &args);
+		if (ret == 0) {
+			if (args.np == pmu->of_node)
+				domain->rst_mask = BIT(args.args[0]);
+			of_node_put(args.np);
+		}
+
+		__pmu_domain_register(domain, np);
+	}
+	pm_genpd_poweroff_unused();
+
+	/* Loss of the interrupt controller is not a fatal error. */
+	parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
+	if (!parent_irq) {
+		pr_err("%s: no interrupt specified\n", np_pmu->name);
+	} else {
+		ret = dove_init_pmu_irq(pmu, parent_irq);
+		if (ret)
+			pr_err("dove_init_pmu_irq() failed: %d\n", ret);
+	}
+
+	return 0;
+}
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 3c18503..9d50682 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -1,6 +1,15 @@
 #
 # MediaTek SoC drivers
 #
+config MTK_INFRACFG
+	bool "MediaTek INFRACFG Support"
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	select REGMAP
+	help
+	  Say yes here to add support for the MediaTek INFRACFG controller. The
+	  INFRACFG controller contains various infrastructure registers not
+	  directly associated to any device.
+
 config MTK_PMIC_WRAP
 	tristate "MediaTek PMIC Wrapper Support"
 	depends on ARCH_MEDIATEK
@@ -10,3 +19,13 @@
 	  Say yes here to add support for MediaTek PMIC Wrapper found
 	  on different MediaTek SoCs. The PMIC wrapper is a proprietary
 	  hardware to connect the PMIC.
+
+config MTK_SCPSYS
+	bool "MediaTek SCPSYS Support"
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	select REGMAP
+	select MTK_INFRACFG
+	select PM_GENERIC_DOMAINS if PM
+	help
+	  Say yes here to add support for the MediaTek SCPSYS power domain
+	  driver.
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index ecaf4de..12998b0 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -1 +1,3 @@
+obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
 obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
+obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
diff --git a/drivers/soc/mediatek/mtk-infracfg.c b/drivers/soc/mediatek/mtk-infracfg.c
new file mode 100644
index 0000000..dba3055
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-infracfg.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2015 Pengutronix, Sascha Hauer <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/jiffies.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/infracfg.h>
+#include <asm/processor.h>
+
+#define INFRA_TOPAXI_PROTECTEN		0x0220
+#define INFRA_TOPAXI_PROTECTSTA1	0x0228
+
+/**
+ * mtk_infracfg_set_bus_protection - enable bus protection
+ * @regmap: The infracfg regmap
+ * @mask: The mask containing the protection bits to be enabled.
+ *
+ * This function enables the bus protection bits for disabled power
+ * domains so that the system does not hang when some unit accesses the
+ * bus while in power down.
+ */
+int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask)
+{
+	unsigned long expired;
+	u32 val;
+	int ret;
+
+	regmap_update_bits(infracfg, INFRA_TOPAXI_PROTECTEN, mask, mask);
+
+	expired = jiffies + HZ;
+
+	while (1) {
+		ret = regmap_read(infracfg, INFRA_TOPAXI_PROTECTSTA1, &val);
+		if (ret)
+			return ret;
+
+		if ((val & mask) == mask)
+			break;
+
+		cpu_relax();
+		if (time_after(jiffies, expired))
+			return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * mtk_infracfg_clear_bus_protection - disable bus protection
+ * @regmap: The infracfg regmap
+ * @mask: The mask containing the protection bits to be disabled.
+ *
+ * This function disables the bus protection bits previously enabled with
+ * mtk_infracfg_set_bus_protection.
+ */
+int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask)
+{
+	unsigned long expired;
+	int ret;
+
+	regmap_update_bits(infracfg, INFRA_TOPAXI_PROTECTEN, mask, 0);
+
+	expired = jiffies + HZ;
+
+	while (1) {
+		u32 val;
+
+		ret = regmap_read(infracfg, INFRA_TOPAXI_PROTECTSTA1, &val);
+		if (ret)
+			return ret;
+
+		if (!(val & mask))
+			break;
+
+		cpu_relax();
+		if (time_after(jiffies, expired))
+			return -EIO;
+	}
+
+	return 0;
+}
diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c
index f432291..8bc7b41 100644
--- a/drivers/soc/mediatek/mtk-pmic-wrap.c
+++ b/drivers/soc/mediatek/mtk-pmic-wrap.c
@@ -926,7 +926,6 @@
 static struct platform_driver pwrap_drv = {
 	.driver = {
 		.name = "mt-pmic-pwrap",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(of_pwrap_match_tbl),
 	},
 	.probe = pwrap_probe,
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
new file mode 100644
index 0000000..164a7d8
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -0,0 +1,488 @@
+/*
+ * Copyright (c) 2015 Pengutronix, Sascha Hauer <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/infracfg.h>
+#include <dt-bindings/power/mt8173-power.h>
+
+#define SPM_VDE_PWR_CON			0x0210
+#define SPM_MFG_PWR_CON			0x0214
+#define SPM_VEN_PWR_CON			0x0230
+#define SPM_ISP_PWR_CON			0x0238
+#define SPM_DIS_PWR_CON			0x023c
+#define SPM_VEN2_PWR_CON		0x0298
+#define SPM_AUDIO_PWR_CON		0x029c
+#define SPM_MFG_2D_PWR_CON		0x02c0
+#define SPM_MFG_ASYNC_PWR_CON		0x02c4
+#define SPM_USB_PWR_CON			0x02cc
+#define SPM_PWR_STATUS			0x060c
+#define SPM_PWR_STATUS_2ND		0x0610
+
+#define PWR_RST_B_BIT			BIT(0)
+#define PWR_ISO_BIT			BIT(1)
+#define PWR_ON_BIT			BIT(2)
+#define PWR_ON_2ND_BIT			BIT(3)
+#define PWR_CLK_DIS_BIT			BIT(4)
+
+#define PWR_STATUS_DISP			BIT(3)
+#define PWR_STATUS_MFG			BIT(4)
+#define PWR_STATUS_ISP			BIT(5)
+#define PWR_STATUS_VDEC			BIT(7)
+#define PWR_STATUS_VENC_LT		BIT(20)
+#define PWR_STATUS_VENC			BIT(21)
+#define PWR_STATUS_MFG_2D		BIT(22)
+#define PWR_STATUS_MFG_ASYNC		BIT(23)
+#define PWR_STATUS_AUDIO		BIT(24)
+#define PWR_STATUS_USB			BIT(25)
+
+enum clk_id {
+	MT8173_CLK_MM,
+	MT8173_CLK_MFG,
+	MT8173_CLK_NONE,
+	MT8173_CLK_MAX = MT8173_CLK_NONE,
+};
+
+struct scp_domain_data {
+	const char *name;
+	u32 sta_mask;
+	int ctl_offs;
+	u32 sram_pdn_bits;
+	u32 sram_pdn_ack_bits;
+	u32 bus_prot_mask;
+	enum clk_id clk_id;
+};
+
+static const struct scp_domain_data scp_domain_data[] __initconst = {
+	[MT8173_POWER_DOMAIN_VDEC] = {
+		.name = "vdec",
+		.sta_mask = PWR_STATUS_VDEC,
+		.ctl_offs = SPM_VDE_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.clk_id = MT8173_CLK_MM,
+	},
+	[MT8173_POWER_DOMAIN_VENC] = {
+		.name = "venc",
+		.sta_mask = PWR_STATUS_VENC,
+		.ctl_offs = SPM_VEN_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = MT8173_CLK_MM,
+	},
+	[MT8173_POWER_DOMAIN_ISP] = {
+		.name = "isp",
+		.sta_mask = PWR_STATUS_ISP,
+		.ctl_offs = SPM_ISP_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(13, 12),
+		.clk_id = MT8173_CLK_MM,
+	},
+	[MT8173_POWER_DOMAIN_MM] = {
+		.name = "mm",
+		.sta_mask = PWR_STATUS_DISP,
+		.ctl_offs = SPM_DIS_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(12, 12),
+		.clk_id = MT8173_CLK_MM,
+		.bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
+			MT8173_TOP_AXI_PROT_EN_MM_M1,
+	},
+	[MT8173_POWER_DOMAIN_VENC_LT] = {
+		.name = "venc_lt",
+		.sta_mask = PWR_STATUS_VENC_LT,
+		.ctl_offs = SPM_VEN2_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = MT8173_CLK_MM,
+	},
+	[MT8173_POWER_DOMAIN_AUDIO] = {
+		.name = "audio",
+		.sta_mask = PWR_STATUS_AUDIO,
+		.ctl_offs = SPM_AUDIO_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = MT8173_CLK_NONE,
+	},
+	[MT8173_POWER_DOMAIN_USB] = {
+		.name = "usb",
+		.sta_mask = PWR_STATUS_USB,
+		.ctl_offs = SPM_USB_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(15, 12),
+		.clk_id = MT8173_CLK_NONE,
+	},
+	[MT8173_POWER_DOMAIN_MFG_ASYNC] = {
+		.name = "mfg_async",
+		.sta_mask = PWR_STATUS_MFG_ASYNC,
+		.ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = 0,
+		.clk_id = MT8173_CLK_MFG,
+	},
+	[MT8173_POWER_DOMAIN_MFG_2D] = {
+		.name = "mfg_2d",
+		.sta_mask = PWR_STATUS_MFG_2D,
+		.ctl_offs = SPM_MFG_2D_PWR_CON,
+		.sram_pdn_bits = GENMASK(11, 8),
+		.sram_pdn_ack_bits = GENMASK(13, 12),
+		.clk_id = MT8173_CLK_NONE,
+	},
+	[MT8173_POWER_DOMAIN_MFG] = {
+		.name = "mfg",
+		.sta_mask = PWR_STATUS_MFG,
+		.ctl_offs = SPM_MFG_PWR_CON,
+		.sram_pdn_bits = GENMASK(13, 8),
+		.sram_pdn_ack_bits = GENMASK(21, 16),
+		.clk_id = MT8173_CLK_NONE,
+		.bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
+			MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+			MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+			MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
+	},
+};
+
+#define NUM_DOMAINS	ARRAY_SIZE(scp_domain_data)
+
+struct scp;
+
+struct scp_domain {
+	struct generic_pm_domain genpd;
+	struct scp *scp;
+	struct clk *clk;
+	u32 sta_mask;
+	void __iomem *ctl_addr;
+	u32 sram_pdn_bits;
+	u32 sram_pdn_ack_bits;
+	u32 bus_prot_mask;
+};
+
+struct scp {
+	struct scp_domain domains[NUM_DOMAINS];
+	struct genpd_onecell_data pd_data;
+	struct device *dev;
+	void __iomem *base;
+	struct regmap *infracfg;
+};
+
+static int scpsys_domain_is_on(struct scp_domain *scpd)
+{
+	struct scp *scp = scpd->scp;
+
+	u32 status = readl(scp->base + SPM_PWR_STATUS) & scpd->sta_mask;
+	u32 status2 = readl(scp->base + SPM_PWR_STATUS_2ND) & scpd->sta_mask;
+
+	/*
+	 * A domain is on when both status bits are set. If only one is set
+	 * return an error. This happens while powering up a domain
+	 */
+
+	if (status && status2)
+		return true;
+	if (!status && !status2)
+		return false;
+
+	return -EINVAL;
+}
+
+static int scpsys_power_on(struct generic_pm_domain *genpd)
+{
+	struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
+	struct scp *scp = scpd->scp;
+	unsigned long timeout;
+	bool expired;
+	void __iomem *ctl_addr = scpd->ctl_addr;
+	u32 sram_pdn_ack = scpd->sram_pdn_ack_bits;
+	u32 val;
+	int ret;
+
+	if (scpd->clk) {
+		ret = clk_prepare_enable(scpd->clk);
+		if (ret)
+			goto err_clk;
+	}
+
+	val = readl(ctl_addr);
+	val |= PWR_ON_BIT;
+	writel(val, ctl_addr);
+	val |= PWR_ON_2ND_BIT;
+	writel(val, ctl_addr);
+
+	/* wait until PWR_ACK = 1 */
+	timeout = jiffies + HZ;
+	expired = false;
+	while (1) {
+		ret = scpsys_domain_is_on(scpd);
+		if (ret > 0)
+			break;
+
+		if (expired) {
+			ret = -ETIMEDOUT;
+			goto err_pwr_ack;
+		}
+
+		cpu_relax();
+
+		if (time_after(jiffies, timeout))
+			expired = true;
+	}
+
+	val &= ~PWR_CLK_DIS_BIT;
+	writel(val, ctl_addr);
+
+	val &= ~PWR_ISO_BIT;
+	writel(val, ctl_addr);
+
+	val |= PWR_RST_B_BIT;
+	writel(val, ctl_addr);
+
+	val &= ~scpd->sram_pdn_bits;
+	writel(val, ctl_addr);
+
+	/* wait until SRAM_PDN_ACK all 0 */
+	timeout = jiffies + HZ;
+	expired = false;
+	while (sram_pdn_ack && (readl(ctl_addr) & sram_pdn_ack)) {
+
+		if (expired) {
+			ret = -ETIMEDOUT;
+			goto err_pwr_ack;
+		}
+
+		cpu_relax();
+
+		if (time_after(jiffies, timeout))
+			expired = true;
+	}
+
+	if (scpd->bus_prot_mask) {
+		ret = mtk_infracfg_clear_bus_protection(scp->infracfg,
+				scpd->bus_prot_mask);
+		if (ret)
+			goto err_pwr_ack;
+	}
+
+	return 0;
+
+err_pwr_ack:
+	clk_disable_unprepare(scpd->clk);
+err_clk:
+	dev_err(scp->dev, "Failed to power on domain %s\n", genpd->name);
+
+	return ret;
+}
+
+static int scpsys_power_off(struct generic_pm_domain *genpd)
+{
+	struct scp_domain *scpd = container_of(genpd, struct scp_domain, genpd);
+	struct scp *scp = scpd->scp;
+	unsigned long timeout;
+	bool expired;
+	void __iomem *ctl_addr = scpd->ctl_addr;
+	u32 pdn_ack = scpd->sram_pdn_ack_bits;
+	u32 val;
+	int ret;
+
+	if (scpd->bus_prot_mask) {
+		ret = mtk_infracfg_set_bus_protection(scp->infracfg,
+				scpd->bus_prot_mask);
+		if (ret)
+			goto out;
+	}
+
+	val = readl(ctl_addr);
+	val |= scpd->sram_pdn_bits;
+	writel(val, ctl_addr);
+
+	/* wait until SRAM_PDN_ACK all 1 */
+	timeout = jiffies + HZ;
+	expired = false;
+	while (pdn_ack && (readl(ctl_addr) & pdn_ack) != pdn_ack) {
+		if (expired) {
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+
+		cpu_relax();
+
+		if (time_after(jiffies, timeout))
+			expired = true;
+	}
+
+	val |= PWR_ISO_BIT;
+	writel(val, ctl_addr);
+
+	val &= ~PWR_RST_B_BIT;
+	writel(val, ctl_addr);
+
+	val |= PWR_CLK_DIS_BIT;
+	writel(val, ctl_addr);
+
+	val &= ~PWR_ON_BIT;
+	writel(val, ctl_addr);
+
+	val &= ~PWR_ON_2ND_BIT;
+	writel(val, ctl_addr);
+
+	/* wait until PWR_ACK = 0 */
+	timeout = jiffies + HZ;
+	expired = false;
+	while (1) {
+		ret = scpsys_domain_is_on(scpd);
+		if (ret == 0)
+			break;
+
+		if (expired) {
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+
+		cpu_relax();
+
+		if (time_after(jiffies, timeout))
+			expired = true;
+	}
+
+	if (scpd->clk)
+		clk_disable_unprepare(scpd->clk);
+
+	return 0;
+
+out:
+	dev_err(scp->dev, "Failed to power off domain %s\n", genpd->name);
+
+	return ret;
+}
+
+static int __init scpsys_probe(struct platform_device *pdev)
+{
+	struct genpd_onecell_data *pd_data;
+	struct resource *res;
+	int i, ret;
+	struct scp *scp;
+	struct clk *clk[MT8173_CLK_MAX];
+
+	scp = devm_kzalloc(&pdev->dev, sizeof(*scp), GFP_KERNEL);
+	if (!scp)
+		return -ENOMEM;
+
+	scp->dev = &pdev->dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	scp->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(scp->base))
+		return PTR_ERR(scp->base);
+
+	pd_data = &scp->pd_data;
+
+	pd_data->domains = devm_kzalloc(&pdev->dev,
+			sizeof(*pd_data->domains) * NUM_DOMAINS, GFP_KERNEL);
+	if (!pd_data->domains)
+		return -ENOMEM;
+
+	clk[MT8173_CLK_MM] = devm_clk_get(&pdev->dev, "mm");
+	if (IS_ERR(clk[MT8173_CLK_MM]))
+		return PTR_ERR(clk[MT8173_CLK_MM]);
+
+	clk[MT8173_CLK_MFG] = devm_clk_get(&pdev->dev, "mfg");
+	if (IS_ERR(clk[MT8173_CLK_MFG]))
+		return PTR_ERR(clk[MT8173_CLK_MFG]);
+
+	scp->infracfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+			"infracfg");
+	if (IS_ERR(scp->infracfg)) {
+		dev_err(&pdev->dev, "Cannot find infracfg controller: %ld\n",
+				PTR_ERR(scp->infracfg));
+		return PTR_ERR(scp->infracfg);
+	}
+
+	pd_data->num_domains = NUM_DOMAINS;
+
+	for (i = 0; i < NUM_DOMAINS; i++) {
+		struct scp_domain *scpd = &scp->domains[i];
+		struct generic_pm_domain *genpd = &scpd->genpd;
+		const struct scp_domain_data *data = &scp_domain_data[i];
+
+		pd_data->domains[i] = genpd;
+		scpd->scp = scp;
+
+		scpd->sta_mask = data->sta_mask;
+		scpd->ctl_addr = scp->base + data->ctl_offs;
+		scpd->sram_pdn_bits = data->sram_pdn_bits;
+		scpd->sram_pdn_ack_bits = data->sram_pdn_ack_bits;
+		scpd->bus_prot_mask = data->bus_prot_mask;
+		if (data->clk_id != MT8173_CLK_NONE)
+			scpd->clk = clk[data->clk_id];
+
+		genpd->name = data->name;
+		genpd->power_off = scpsys_power_off;
+		genpd->power_on = scpsys_power_on;
+
+		/*
+		 * Initially turn on all domains to make the domains usable
+		 * with !CONFIG_PM and to get the hardware in sync with the
+		 * software.  The unused domains will be switched off during
+		 * late_init time.
+		 */
+		genpd->power_on(genpd);
+
+		pm_genpd_init(genpd, NULL, false);
+	}
+
+	/*
+	 * We are not allowed to fail here since there is no way to unregister
+	 * a power domain. Once registered above we have to keep the domains
+	 * valid.
+	 */
+
+	ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_ASYNC],
+		pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D]);
+	if (ret && IS_ENABLED(CONFIG_PM))
+		dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
+
+	ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D],
+		pd_data->domains[MT8173_POWER_DOMAIN_MFG]);
+	if (ret && IS_ENABLED(CONFIG_PM))
+		dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
+
+	ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
+	if (ret)
+		dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
+
+	return 0;
+}
+
+static const struct of_device_id of_scpsys_match_tbl[] = {
+	{
+		.compatible = "mediatek,mt8173-scpsys",
+	}, {
+		/* sentinel */
+	}
+};
+
+static struct platform_driver scpsys_drv = {
+	.driver = {
+		.name = "mtk-scpsys",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(of_scpsys_match_tbl),
+	},
+};
+
+module_platform_driver_probe(scpsys_drv, scpsys_probe);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 5eea374..ba47b70 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -13,7 +13,38 @@
 config QCOM_PM
 	bool "Qualcomm Power Management"
 	depends on ARCH_QCOM && !ARM64
+	select QCOM_SCM
 	help
 	  QCOM Platform specific power driver to manage cores and L2 low power
 	  modes. It interface with various system drivers to put the cores in
 	  low power modes.
+
+config QCOM_SMD
+	tristate "Qualcomm Shared Memory Driver (SMD)"
+	depends on QCOM_SMEM
+	help
+	  Say y here to enable support for the Qualcomm Shared Memory Driver
+	  providing communication channels to remote processors in Qualcomm
+	  platforms.
+
+config QCOM_SMD_RPM
+	tristate "Qualcomm Resource Power Manager (RPM) over SMD"
+	depends on QCOM_SMD && OF
+	help
+	  If you say yes to this option, support will be included for the
+	  Resource Power Manager system found in the Qualcomm 8974 based
+	  devices.
+
+	  This is required to access many regulators, clocks and bus
+	  frequencies controlled by the RPM on these devices.
+
+	  Say M here if you want to include support for the Qualcomm RPM as a
+	  module. This will build a module called "qcom-smd-rpm".
+
+config QCOM_SMEM
+	tristate "Qualcomm Shared Memory Manager (SMEM)"
+	depends on ARCH_QCOM
+	help
+	  Say y here to enable support for the Qualcomm Shared Memory Manager.
+	  The driver provides an interface to items in a heap shared among all
+	  processors in a Qualcomm platform.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 931d385..10a93d1 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,2 +1,5 @@
 obj-$(CONFIG_QCOM_GSBI)	+=	qcom_gsbi.o
 obj-$(CONFIG_QCOM_PM)	+=	spm.o
+obj-$(CONFIG_QCOM_SMD) +=	smd.o
+obj-$(CONFIG_QCOM_SMD_RPM)	+= smd-rpm.o
+obj-$(CONFIG_QCOM_SMEM) +=	smem.o
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
new file mode 100644
index 0000000..1392ccf
--- /dev/null
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+
+#include <linux/soc/qcom/smd.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#define RPM_REQUEST_TIMEOUT     (5 * HZ)
+
+/**
+ * struct qcom_smd_rpm - state of the rpm device driver
+ * @rpm_channel:	reference to the smd channel
+ * @ack:		completion for acks
+ * @lock:		mutual exclusion around the send/complete pair
+ * @ack_status:		result of the rpm request
+ */
+struct qcom_smd_rpm {
+	struct qcom_smd_channel *rpm_channel;
+
+	struct completion ack;
+	struct mutex lock;
+	int ack_status;
+};
+
+/**
+ * struct qcom_rpm_header - header for all rpm requests and responses
+ * @service_type:	identifier of the service
+ * @length:		length of the payload
+ */
+struct qcom_rpm_header {
+	u32 service_type;
+	u32 length;
+};
+
+/**
+ * struct qcom_rpm_request - request message to the rpm
+ * @msg_id:	identifier of the outgoing message
+ * @flags:	active/sleep state flags
+ * @type:	resource type
+ * @id:		resource id
+ * @data_len:	length of the payload following this header
+ */
+struct qcom_rpm_request {
+	u32 msg_id;
+	u32 flags;
+	u32 type;
+	u32 id;
+	u32 data_len;
+};
+
+/**
+ * struct qcom_rpm_message - response message from the rpm
+ * @msg_type:	indicator of the type of message
+ * @length:	the size of this message, including the message header
+ * @msg_id:	message id
+ * @message:	textual message from the rpm
+ *
+ * Multiple of these messages can be stacked in an rpm message.
+ */
+struct qcom_rpm_message {
+	u32 msg_type;
+	u32 length;
+	union {
+		u32 msg_id;
+		u8 message[0];
+	};
+};
+
+#define RPM_SERVICE_TYPE_REQUEST	0x00716572 /* "req\0" */
+
+#define RPM_MSG_TYPE_ERR		0x00727265 /* "err\0" */
+#define RPM_MSG_TYPE_MSG_ID		0x2367736d /* "msg#" */
+
+/**
+ * qcom_rpm_smd_write - write @buf to @type:@id
+ * @rpm:	rpm handle
+ * @type:	resource type
+ * @id:		resource identifier
+ * @buf:	the data to be written
+ * @count:	number of bytes in @buf
+ */
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+		       int state,
+		       u32 type, u32 id,
+		       void *buf,
+		       size_t count)
+{
+	static unsigned msg_id = 1;
+	int left;
+	int ret;
+
+	struct {
+		struct qcom_rpm_header hdr;
+		struct qcom_rpm_request req;
+		u8 payload[count];
+	} pkt;
+
+	/* SMD packets to the RPM may not exceed 256 bytes */
+	if (WARN_ON(sizeof(pkt) >= 256))
+		return -EINVAL;
+
+	mutex_lock(&rpm->lock);
+
+	pkt.hdr.service_type = RPM_SERVICE_TYPE_REQUEST;
+	pkt.hdr.length = sizeof(struct qcom_rpm_request) + count;
+
+	pkt.req.msg_id = msg_id++;
+	pkt.req.flags = BIT(state);
+	pkt.req.type = type;
+	pkt.req.id = id;
+	pkt.req.data_len = count;
+	memcpy(pkt.payload, buf, count);
+
+	ret = qcom_smd_send(rpm->rpm_channel, &pkt, sizeof(pkt));
+	if (ret)
+		goto out;
+
+	left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT);
+	if (!left)
+		ret = -ETIMEDOUT;
+	else
+		ret = rpm->ack_status;
+
+out:
+	mutex_unlock(&rpm->lock);
+	return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_smd_write);
+
+static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
+				 const void *data,
+				 size_t count)
+{
+	const struct qcom_rpm_header *hdr = data;
+	const struct qcom_rpm_message *msg;
+	struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev);
+	const u8 *buf = data + sizeof(struct qcom_rpm_header);
+	const u8 *end = buf + hdr->length;
+	char msgbuf[32];
+	int status = 0;
+	u32 len;
+
+	if (hdr->service_type != RPM_SERVICE_TYPE_REQUEST ||
+	    hdr->length < sizeof(struct qcom_rpm_message)) {
+		dev_err(&qsdev->dev, "invalid request\n");
+		return 0;
+	}
+
+	while (buf < end) {
+		msg = (struct qcom_rpm_message *)buf;
+		switch (msg->msg_type) {
+		case RPM_MSG_TYPE_MSG_ID:
+			break;
+		case RPM_MSG_TYPE_ERR:
+			len = min_t(u32, ALIGN(msg->length, 4), sizeof(msgbuf));
+			memcpy_fromio(msgbuf, msg->message, len);
+			msgbuf[len - 1] = 0;
+
+			if (!strcmp(msgbuf, "resource does not exist"))
+				status = -ENXIO;
+			else
+				status = -EINVAL;
+			break;
+		}
+
+		buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg->length, 4);
+	}
+
+	rpm->ack_status = status;
+	complete(&rpm->ack);
+	return 0;
+}
+
+static int qcom_smd_rpm_probe(struct qcom_smd_device *sdev)
+{
+	struct qcom_smd_rpm *rpm;
+
+	rpm = devm_kzalloc(&sdev->dev, sizeof(*rpm), GFP_KERNEL);
+	if (!rpm)
+		return -ENOMEM;
+
+	mutex_init(&rpm->lock);
+	init_completion(&rpm->ack);
+
+	rpm->rpm_channel = sdev->channel;
+
+	dev_set_drvdata(&sdev->dev, rpm);
+
+	return of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev);
+}
+
+static void qcom_smd_rpm_remove(struct qcom_smd_device *sdev)
+{
+	of_platform_depopulate(&sdev->dev);
+}
+
+static const struct of_device_id qcom_smd_rpm_of_match[] = {
+	{ .compatible = "qcom,rpm-msm8974" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
+
+static struct qcom_smd_driver qcom_smd_rpm_driver = {
+	.probe = qcom_smd_rpm_probe,
+	.remove = qcom_smd_rpm_remove,
+	.callback = qcom_smd_rpm_callback,
+	.driver  = {
+		.name  = "qcom_smd_rpm",
+		.owner = THIS_MODULE,
+		.of_match_table = qcom_smd_rpm_of_match,
+	},
+};
+
+static int __init qcom_smd_rpm_init(void)
+{
+	return qcom_smd_driver_register(&qcom_smd_rpm_driver);
+}
+arch_initcall(qcom_smd_rpm_init);
+
+static void __exit qcom_smd_rpm_exit(void)
+{
+	qcom_smd_driver_unregister(&qcom_smd_rpm_driver);
+}
+module_exit(qcom_smd_rpm_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm SMD backed RPM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
new file mode 100644
index 0000000..327adcf
--- /dev/null
+++ b/drivers/soc/qcom/smd.c
@@ -0,0 +1,1319 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smd.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/wait.h>
+
+/*
+ * The Qualcomm Shared Memory communication solution provides point-to-point
+ * channels for clients to send and receive streaming or packet based data.
+ *
+ * Each channel consists of a control item (channel info) and a ring buffer
+ * pair. The channel info carry information related to channel state, flow
+ * control and the offsets within the ring buffer.
+ *
+ * All allocated channels are listed in an allocation table, identifying the
+ * pair of items by name, type and remote processor.
+ *
+ * Upon creating a new channel the remote processor allocates channel info and
+ * ring buffer items from the smem heap and populate the allocation table. An
+ * interrupt is sent to the other end of the channel and a scan for new
+ * channels should be done. A channel never goes away, it will only change
+ * state.
+ *
+ * The remote processor signals it intent for bring up the communication
+ * channel by setting the state of its end of the channel to "opening" and
+ * sends out an interrupt. We detect this change and register a smd device to
+ * consume the channel. Upon finding a consumer we finish the handshake and the
+ * channel is up.
+ *
+ * Upon closing a channel, the remote processor will update the state of its
+ * end of the channel and signal us, we will then unregister any attached
+ * device and close our end of the channel.
+ *
+ * Devices attached to a channel can use the qcom_smd_send function to push
+ * data to the channel, this is done by copying the data into the tx ring
+ * buffer, updating the pointers in the channel info and signaling the remote
+ * processor.
+ *
+ * The remote processor does the equivalent when it transfer data and upon
+ * receiving the interrupt we check the channel info for new data and delivers
+ * this to the attached device. If the device is not ready to receive the data
+ * we leave it in the ring buffer for now.
+ */
+
+struct smd_channel_info;
+struct smd_channel_info_word;
+
+#define SMD_ALLOC_TBL_COUNT	2
+#define SMD_ALLOC_TBL_SIZE	64
+
+/*
+ * This lists the various smem heap items relevant for the allocation table and
+ * smd channel entries.
+ */
+static const struct {
+	unsigned alloc_tbl_id;
+	unsigned info_base_id;
+	unsigned fifo_base_id;
+} smem_items[SMD_ALLOC_TBL_COUNT] = {
+	{
+		.alloc_tbl_id = 13,
+		.info_base_id = 14,
+		.fifo_base_id = 338
+	},
+	{
+		.alloc_tbl_id = 14,
+		.info_base_id = 266,
+		.fifo_base_id = 202,
+	},
+};
+
+/**
+ * struct qcom_smd_edge - representing a remote processor
+ * @smd:		handle to qcom_smd
+ * @of_node:		of_node handle for information related to this edge
+ * @edge_id:		identifier of this edge
+ * @irq:		interrupt for signals on this edge
+ * @ipc_regmap:		regmap handle holding the outgoing ipc register
+ * @ipc_offset:		offset within @ipc_regmap of the register for ipc
+ * @ipc_bit:		bit in the register at @ipc_offset of @ipc_regmap
+ * @channels:		list of all channels detected on this edge
+ * @channels_lock:	guard for modifications of @channels
+ * @allocated:		array of bitmaps representing already allocated channels
+ * @need_rescan:	flag that the @work needs to scan smem for new channels
+ * @smem_available:	last available amount of smem triggering a channel scan
+ * @work:		work item for edge house keeping
+ */
+struct qcom_smd_edge {
+	struct qcom_smd *smd;
+	struct device_node *of_node;
+	unsigned edge_id;
+
+	int irq;
+
+	struct regmap *ipc_regmap;
+	int ipc_offset;
+	int ipc_bit;
+
+	struct list_head channels;
+	spinlock_t channels_lock;
+
+	DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
+
+	bool need_rescan;
+	unsigned smem_available;
+
+	struct work_struct work;
+};
+
+/*
+ * SMD channel states.
+ */
+enum smd_channel_state {
+	SMD_CHANNEL_CLOSED,
+	SMD_CHANNEL_OPENING,
+	SMD_CHANNEL_OPENED,
+	SMD_CHANNEL_FLUSHING,
+	SMD_CHANNEL_CLOSING,
+	SMD_CHANNEL_RESET,
+	SMD_CHANNEL_RESET_OPENING
+};
+
+/**
+ * struct qcom_smd_channel - smd channel struct
+ * @edge:		qcom_smd_edge this channel is living on
+ * @qsdev:		reference to a associated smd client device
+ * @name:		name of the channel
+ * @state:		local state of the channel
+ * @remote_state:	remote state of the channel
+ * @tx_info:		byte aligned outgoing channel info
+ * @rx_info:		byte aligned incoming channel info
+ * @tx_info_word:	word aligned outgoing channel info
+ * @rx_info_word:	word aligned incoming channel info
+ * @tx_lock:		lock to make writes to the channel mutually exclusive
+ * @fblockread_event:	wakeup event tied to tx fBLOCKREADINTR
+ * @tx_fifo:		pointer to the outgoing ring buffer
+ * @rx_fifo:		pointer to the incoming ring buffer
+ * @fifo_size:		size of each ring buffer
+ * @bounce_buffer:	bounce buffer for reading wrapped packets
+ * @cb:			callback function registered for this channel
+ * @recv_lock:		guard for rx info modifications and cb pointer
+ * @pkt_size:		size of the currently handled packet
+ * @list:		lite entry for @channels in qcom_smd_edge
+ */
+struct qcom_smd_channel {
+	struct qcom_smd_edge *edge;
+
+	struct qcom_smd_device *qsdev;
+
+	char *name;
+	enum smd_channel_state state;
+	enum smd_channel_state remote_state;
+
+	struct smd_channel_info *tx_info;
+	struct smd_channel_info *rx_info;
+
+	struct smd_channel_info_word *tx_info_word;
+	struct smd_channel_info_word *rx_info_word;
+
+	struct mutex tx_lock;
+	wait_queue_head_t fblockread_event;
+
+	void *tx_fifo;
+	void *rx_fifo;
+	int fifo_size;
+
+	void *bounce_buffer;
+	int (*cb)(struct qcom_smd_device *, const void *, size_t);
+
+	spinlock_t recv_lock;
+
+	int pkt_size;
+
+	struct list_head list;
+};
+
+/**
+ * struct qcom_smd - smd struct
+ * @dev:	device struct
+ * @num_edges:	number of entries in @edges
+ * @edges:	array of edges to be handled
+ */
+struct qcom_smd {
+	struct device *dev;
+
+	unsigned num_edges;
+	struct qcom_smd_edge edges[0];
+};
+
+/*
+ * Format of the smd_info smem items, for byte aligned channels.
+ */
+struct smd_channel_info {
+	u32 state;
+	u8  fDSR;
+	u8  fCTS;
+	u8  fCD;
+	u8  fRI;
+	u8  fHEAD;
+	u8  fTAIL;
+	u8  fSTATE;
+	u8  fBLOCKREADINTR;
+	u32 tail;
+	u32 head;
+};
+
+/*
+ * Format of the smd_info smem items, for word aligned channels.
+ */
+struct smd_channel_info_word {
+	u32 state;
+	u32 fDSR;
+	u32 fCTS;
+	u32 fCD;
+	u32 fRI;
+	u32 fHEAD;
+	u32 fTAIL;
+	u32 fSTATE;
+	u32 fBLOCKREADINTR;
+	u32 tail;
+	u32 head;
+};
+
+#define GET_RX_CHANNEL_INFO(channel, param) \
+	(channel->rx_info_word ? \
+		channel->rx_info_word->param : \
+		channel->rx_info->param)
+
+#define SET_RX_CHANNEL_INFO(channel, param, value) \
+	(channel->rx_info_word ? \
+		(channel->rx_info_word->param = value) : \
+		(channel->rx_info->param = value))
+
+#define GET_TX_CHANNEL_INFO(channel, param) \
+	(channel->tx_info_word ? \
+		channel->tx_info_word->param : \
+		channel->tx_info->param)
+
+#define SET_TX_CHANNEL_INFO(channel, param, value) \
+	(channel->tx_info_word ? \
+		(channel->tx_info_word->param = value) : \
+		(channel->tx_info->param = value))
+
+/**
+ * struct qcom_smd_alloc_entry - channel allocation entry
+ * @name:	channel name
+ * @cid:	channel index
+ * @flags:	channel flags and edge id
+ * @ref_count:	reference count of the channel
+ */
+struct qcom_smd_alloc_entry {
+	u8 name[20];
+	u32 cid;
+	u32 flags;
+	u32 ref_count;
+} __packed;
+
+#define SMD_CHANNEL_FLAGS_EDGE_MASK	0xff
+#define SMD_CHANNEL_FLAGS_STREAM	BIT(8)
+#define SMD_CHANNEL_FLAGS_PACKET	BIT(9)
+
+/*
+ * Each smd packet contains a 20 byte header, with the first 4 being the length
+ * of the packet.
+ */
+#define SMD_PACKET_HEADER_LEN	20
+
+/*
+ * Signal the remote processor associated with 'channel'.
+ */
+static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
+{
+	struct qcom_smd_edge *edge = channel->edge;
+
+	regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
+}
+
+/*
+ * Initialize the tx channel info
+ */
+static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
+{
+	SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
+	SET_TX_CHANNEL_INFO(channel, fDSR, 0);
+	SET_TX_CHANNEL_INFO(channel, fCTS, 0);
+	SET_TX_CHANNEL_INFO(channel, fCD, 0);
+	SET_TX_CHANNEL_INFO(channel, fRI, 0);
+	SET_TX_CHANNEL_INFO(channel, fHEAD, 0);
+	SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
+	SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
+	SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
+	SET_TX_CHANNEL_INFO(channel, head, 0);
+	SET_TX_CHANNEL_INFO(channel, tail, 0);
+
+	qcom_smd_signal_channel(channel);
+
+	channel->state = SMD_CHANNEL_CLOSED;
+	channel->pkt_size = 0;
+}
+
+/*
+ * Calculate the amount of data available in the rx fifo
+ */
+static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
+{
+	unsigned head;
+	unsigned tail;
+
+	head = GET_RX_CHANNEL_INFO(channel, head);
+	tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+	return (head - tail) & (channel->fifo_size - 1);
+}
+
+/*
+ * Set tx channel state and inform the remote processor
+ */
+static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
+				       int state)
+{
+	struct qcom_smd_edge *edge = channel->edge;
+	bool is_open = state == SMD_CHANNEL_OPENED;
+
+	if (channel->state == state)
+		return;
+
+	dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
+
+	SET_TX_CHANNEL_INFO(channel, fDSR, is_open);
+	SET_TX_CHANNEL_INFO(channel, fCTS, is_open);
+	SET_TX_CHANNEL_INFO(channel, fCD, is_open);
+
+	SET_TX_CHANNEL_INFO(channel, state, state);
+	SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
+
+	channel->state = state;
+	qcom_smd_signal_channel(channel);
+}
+
+/*
+ * Copy count bytes of data using 32bit accesses, if that's required.
+ */
+static void smd_copy_to_fifo(void __iomem *_dst,
+			     const void *_src,
+			     size_t count,
+			     bool word_aligned)
+{
+	u32 *dst = (u32 *)_dst;
+	u32 *src = (u32 *)_src;
+
+	if (word_aligned) {
+		count /= sizeof(u32);
+		while (count--)
+			writel_relaxed(*src++, dst++);
+	} else {
+		memcpy_toio(_dst, _src, count);
+	}
+}
+
+/*
+ * Copy count bytes of data using 32bit accesses, if that is required.
+ */
+static void smd_copy_from_fifo(void *_dst,
+			       const void __iomem *_src,
+			       size_t count,
+			       bool word_aligned)
+{
+	u32 *dst = (u32 *)_dst;
+	u32 *src = (u32 *)_src;
+
+	if (word_aligned) {
+		count /= sizeof(u32);
+		while (count--)
+			*dst++ = readl_relaxed(src++);
+	} else {
+		memcpy_fromio(_dst, _src, count);
+	}
+}
+
+/*
+ * Read count bytes of data from the rx fifo into buf, but don't advance the
+ * tail.
+ */
+static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
+				    void *buf, size_t count)
+{
+	bool word_aligned;
+	unsigned tail;
+	size_t len;
+
+	word_aligned = channel->rx_info_word != NULL;
+	tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+	len = min_t(size_t, count, channel->fifo_size - tail);
+	if (len) {
+		smd_copy_from_fifo(buf,
+				   channel->rx_fifo + tail,
+				   len,
+				   word_aligned);
+	}
+
+	if (len != count) {
+		smd_copy_from_fifo(buf + len,
+				   channel->rx_fifo,
+				   count - len,
+				   word_aligned);
+	}
+
+	return count;
+}
+
+/*
+ * Advance the rx tail by count bytes.
+ */
+static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
+				     size_t count)
+{
+	unsigned tail;
+
+	tail = GET_RX_CHANNEL_INFO(channel, tail);
+	tail += count;
+	tail &= (channel->fifo_size - 1);
+	SET_RX_CHANNEL_INFO(channel, tail, tail);
+}
+
+/*
+ * Read out a single packet from the rx fifo and deliver it to the device
+ */
+static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
+{
+	struct qcom_smd_device *qsdev = channel->qsdev;
+	unsigned tail;
+	size_t len;
+	void *ptr;
+	int ret;
+
+	if (!channel->cb)
+		return 0;
+
+	tail = GET_RX_CHANNEL_INFO(channel, tail);
+
+	/* Use bounce buffer if the data wraps */
+	if (tail + channel->pkt_size >= channel->fifo_size) {
+		ptr = channel->bounce_buffer;
+		len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size);
+	} else {
+		ptr = channel->rx_fifo + tail;
+		len = channel->pkt_size;
+	}
+
+	ret = channel->cb(qsdev, ptr, len);
+	if (ret < 0)
+		return ret;
+
+	/* Only forward the tail if the client consumed the data */
+	qcom_smd_channel_advance(channel, len);
+
+	channel->pkt_size = 0;
+
+	return 0;
+}
+
+/*
+ * Per channel interrupt handling
+ */
+static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
+{
+	bool need_state_scan = false;
+	int remote_state;
+	u32 pktlen;
+	int avail;
+	int ret;
+
+	/* Handle state changes */
+	remote_state = GET_RX_CHANNEL_INFO(channel, state);
+	if (remote_state != channel->remote_state) {
+		channel->remote_state = remote_state;
+		need_state_scan = true;
+	}
+	/* Indicate that we have seen any state change */
+	SET_RX_CHANNEL_INFO(channel, fSTATE, 0);
+
+	/* Signal waiting qcom_smd_send() about the interrupt */
+	if (!GET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR))
+		wake_up_interruptible(&channel->fblockread_event);
+
+	/* Don't consume any data until we've opened the channel */
+	if (channel->state != SMD_CHANNEL_OPENED)
+		goto out;
+
+	/* Indicate that we've seen the new data */
+	SET_RX_CHANNEL_INFO(channel, fHEAD, 0);
+
+	/* Consume data */
+	for (;;) {
+		avail = qcom_smd_channel_get_rx_avail(channel);
+
+		if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
+			qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
+			qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
+			channel->pkt_size = pktlen;
+		} else if (channel->pkt_size && avail >= channel->pkt_size) {
+			ret = qcom_smd_channel_recv_single(channel);
+			if (ret)
+				break;
+		} else {
+			break;
+		}
+	}
+
+	/* Indicate that we have seen and updated tail */
+	SET_RX_CHANNEL_INFO(channel, fTAIL, 1);
+
+	/* Signal the remote that we've consumed the data (if requested) */
+	if (!GET_RX_CHANNEL_INFO(channel, fBLOCKREADINTR)) {
+		/* Ensure ordering of channel info updates */
+		wmb();
+
+		qcom_smd_signal_channel(channel);
+	}
+
+out:
+	return need_state_scan;
+}
+
+/*
+ * The edge interrupts are triggered by the remote processor on state changes,
+ * channel info updates or when new channels are created.
+ */
+static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
+{
+	struct qcom_smd_edge *edge = data;
+	struct qcom_smd_channel *channel;
+	unsigned available;
+	bool kick_worker = false;
+
+	/*
+	 * Handle state changes or data on each of the channels on this edge
+	 */
+	spin_lock(&edge->channels_lock);
+	list_for_each_entry(channel, &edge->channels, list) {
+		spin_lock(&channel->recv_lock);
+		kick_worker |= qcom_smd_channel_intr(channel);
+		spin_unlock(&channel->recv_lock);
+	}
+	spin_unlock(&edge->channels_lock);
+
+	/*
+	 * Creating a new channel requires allocating an smem entry, so we only
+	 * have to scan if the amount of available space in smem have changed
+	 * since last scan.
+	 */
+	available = qcom_smem_get_free_space(edge->edge_id);
+	if (available != edge->smem_available) {
+		edge->smem_available = available;
+		edge->need_rescan = true;
+		kick_worker = true;
+	}
+
+	if (kick_worker)
+		schedule_work(&edge->work);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Delivers any outstanding packets in the rx fifo, can be used after probe of
+ * the clients to deliver any packets that wasn't delivered before the client
+ * was setup.
+ */
+static void qcom_smd_channel_resume(struct qcom_smd_channel *channel)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&channel->recv_lock, flags);
+	qcom_smd_channel_intr(channel);
+	spin_unlock_irqrestore(&channel->recv_lock, flags);
+}
+
+/*
+ * Calculate how much space is available in the tx fifo.
+ */
+static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel)
+{
+	unsigned head;
+	unsigned tail;
+	unsigned mask = channel->fifo_size - 1;
+
+	head = GET_TX_CHANNEL_INFO(channel, head);
+	tail = GET_TX_CHANNEL_INFO(channel, tail);
+
+	return mask - ((head - tail) & mask);
+}
+
+/*
+ * Write count bytes of data into channel, possibly wrapping in the ring buffer
+ */
+static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
+			       const void *data,
+			       size_t count)
+{
+	bool word_aligned;
+	unsigned head;
+	size_t len;
+
+	word_aligned = channel->tx_info_word != NULL;
+	head = GET_TX_CHANNEL_INFO(channel, head);
+
+	len = min_t(size_t, count, channel->fifo_size - head);
+	if (len) {
+		smd_copy_to_fifo(channel->tx_fifo + head,
+				 data,
+				 len,
+				 word_aligned);
+	}
+
+	if (len != count) {
+		smd_copy_to_fifo(channel->tx_fifo,
+				 data + len,
+				 count - len,
+				 word_aligned);
+	}
+
+	head += count;
+	head &= (channel->fifo_size - 1);
+	SET_TX_CHANNEL_INFO(channel, head, head);
+
+	return count;
+}
+
+/**
+ * qcom_smd_send - write data to smd channel
+ * @channel:	channel handle
+ * @data:	buffer of data to write
+ * @len:	number of bytes to write
+ *
+ * This is a blocking write of len bytes into the channel's tx ring buffer and
+ * signal the remote end. It will sleep until there is enough space available
+ * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid
+ * polling.
+ */
+int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
+{
+	u32 hdr[5] = {len,};
+	int tlen = sizeof(hdr) + len;
+	int ret;
+
+	/* Word aligned channels only accept word size aligned data */
+	if (channel->rx_info_word != NULL && len % 4)
+		return -EINVAL;
+
+	ret = mutex_lock_interruptible(&channel->tx_lock);
+	if (ret)
+		return ret;
+
+	while (qcom_smd_get_tx_avail(channel) < tlen) {
+		if (channel->state != SMD_CHANNEL_OPENED) {
+			ret = -EPIPE;
+			goto out;
+		}
+
+		SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
+
+		ret = wait_event_interruptible(channel->fblockread_event,
+				       qcom_smd_get_tx_avail(channel) >= tlen ||
+				       channel->state != SMD_CHANNEL_OPENED);
+		if (ret)
+			goto out;
+
+		SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
+	}
+
+	SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
+
+	qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
+	qcom_smd_write_fifo(channel, data, len);
+
+	SET_TX_CHANNEL_INFO(channel, fHEAD, 1);
+
+	/* Ensure ordering of channel info updates */
+	wmb();
+
+	qcom_smd_signal_channel(channel);
+
+out:
+	mutex_unlock(&channel->tx_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_smd_send);
+
+static struct qcom_smd_device *to_smd_device(struct device *dev)
+{
+	return container_of(dev, struct qcom_smd_device, dev);
+}
+
+static struct qcom_smd_driver *to_smd_driver(struct device *dev)
+{
+	struct qcom_smd_device *qsdev = to_smd_device(dev);
+
+	return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver);
+}
+
+static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
+{
+	return of_driver_match_device(dev, drv);
+}
+
+/*
+ * Probe the smd client.
+ *
+ * The remote side have indicated that it want the channel to be opened, so
+ * complete the state handshake and probe our client driver.
+ */
+static int qcom_smd_dev_probe(struct device *dev)
+{
+	struct qcom_smd_device *qsdev = to_smd_device(dev);
+	struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
+	struct qcom_smd_channel *channel = qsdev->channel;
+	size_t bb_size;
+	int ret;
+
+	/*
+	 * Packets are maximum 4k, but reduce if the fifo is smaller
+	 */
+	bb_size = min(channel->fifo_size, SZ_4K);
+	channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
+	if (!channel->bounce_buffer)
+		return -ENOMEM;
+
+	channel->cb = qsdrv->callback;
+
+	qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
+
+	qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
+
+	ret = qsdrv->probe(qsdev);
+	if (ret)
+		goto err;
+
+	qcom_smd_channel_resume(channel);
+
+	return 0;
+
+err:
+	dev_err(&qsdev->dev, "probe failed\n");
+
+	channel->cb = NULL;
+	kfree(channel->bounce_buffer);
+	channel->bounce_buffer = NULL;
+
+	qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+	return ret;
+}
+
+/*
+ * Remove the smd client.
+ *
+ * The channel is going away, for some reason, so remove the smd client and
+ * reset the channel state.
+ */
+static int qcom_smd_dev_remove(struct device *dev)
+{
+	struct qcom_smd_device *qsdev = to_smd_device(dev);
+	struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
+	struct qcom_smd_channel *channel = qsdev->channel;
+	unsigned long flags;
+
+	qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
+
+	/*
+	 * Make sure we don't race with the code receiving data.
+	 */
+	spin_lock_irqsave(&channel->recv_lock, flags);
+	channel->cb = NULL;
+	spin_unlock_irqrestore(&channel->recv_lock, flags);
+
+	/* Wake up any sleepers in qcom_smd_send() */
+	wake_up_interruptible(&channel->fblockread_event);
+
+	/*
+	 * We expect that the client might block in remove() waiting for any
+	 * outstanding calls to qcom_smd_send() to wake up and finish.
+	 */
+	if (qsdrv->remove)
+		qsdrv->remove(qsdev);
+
+	/*
+	 * The client is now gone, cleanup and reset the channel state.
+	 */
+	channel->qsdev = NULL;
+	kfree(channel->bounce_buffer);
+	channel->bounce_buffer = NULL;
+
+	qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+
+	qcom_smd_channel_reset(channel);
+
+	return 0;
+}
+
+static struct bus_type qcom_smd_bus = {
+	.name = "qcom_smd",
+	.match = qcom_smd_dev_match,
+	.probe = qcom_smd_dev_probe,
+	.remove = qcom_smd_dev_remove,
+};
+
+/*
+ * Release function for the qcom_smd_device object.
+ */
+static void qcom_smd_release_device(struct device *dev)
+{
+	struct qcom_smd_device *qsdev = to_smd_device(dev);
+
+	kfree(qsdev);
+}
+
+/*
+ * Finds the device_node for the smd child interested in this channel.
+ */
+static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
+						  const char *channel)
+{
+	struct device_node *child;
+	const char *name;
+	const char *key;
+	int ret;
+
+	for_each_available_child_of_node(edge_node, child) {
+		key = "qcom,smd-channels";
+		ret = of_property_read_string(child, key, &name);
+		if (ret) {
+			of_node_put(child);
+			continue;
+		}
+
+		if (strcmp(name, channel) == 0)
+			return child;
+	}
+
+	return NULL;
+}
+
+/*
+ * Create a smd client device for channel that is being opened.
+ */
+static int qcom_smd_create_device(struct qcom_smd_channel *channel)
+{
+	struct qcom_smd_device *qsdev;
+	struct qcom_smd_edge *edge = channel->edge;
+	struct device_node *node;
+	struct qcom_smd *smd = edge->smd;
+	int ret;
+
+	if (channel->qsdev)
+		return -EEXIST;
+
+	node = qcom_smd_match_channel(edge->of_node, channel->name);
+	if (!node) {
+		dev_dbg(smd->dev, "no match for '%s'\n", channel->name);
+		return -ENXIO;
+	}
+
+	dev_dbg(smd->dev, "registering '%s'\n", channel->name);
+
+	qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
+	if (!qsdev)
+		return -ENOMEM;
+
+	dev_set_name(&qsdev->dev, "%s.%s", edge->of_node->name, node->name);
+	qsdev->dev.parent = smd->dev;
+	qsdev->dev.bus = &qcom_smd_bus;
+	qsdev->dev.release = qcom_smd_release_device;
+	qsdev->dev.of_node = node;
+
+	qsdev->channel = channel;
+
+	channel->qsdev = qsdev;
+
+	ret = device_register(&qsdev->dev);
+	if (ret) {
+		dev_err(smd->dev, "device_register failed: %d\n", ret);
+		put_device(&qsdev->dev);
+	}
+
+	return ret;
+}
+
+/*
+ * Destroy a smd client device for a channel that's going away.
+ */
+static void qcom_smd_destroy_device(struct qcom_smd_channel *channel)
+{
+	struct device *dev;
+
+	BUG_ON(!channel->qsdev);
+
+	dev = &channel->qsdev->dev;
+
+	device_unregister(dev);
+	of_node_put(dev->of_node);
+	put_device(dev);
+}
+
+/**
+ * qcom_smd_driver_register - register a smd driver
+ * @qsdrv:	qcom_smd_driver struct
+ */
+int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv)
+{
+	qsdrv->driver.bus = &qcom_smd_bus;
+	return driver_register(&qsdrv->driver);
+}
+EXPORT_SYMBOL(qcom_smd_driver_register);
+
+/**
+ * qcom_smd_driver_unregister - unregister a smd driver
+ * @qsdrv:	qcom_smd_driver struct
+ */
+void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
+{
+	driver_unregister(&qsdrv->driver);
+}
+EXPORT_SYMBOL(qcom_smd_driver_unregister);
+
+/*
+ * Allocate the qcom_smd_channel object for a newly found smd channel,
+ * retrieving and validating the smem items involved.
+ */
+static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge,
+							unsigned smem_info_item,
+							unsigned smem_fifo_item,
+							char *name)
+{
+	struct qcom_smd_channel *channel;
+	struct qcom_smd *smd = edge->smd;
+	size_t fifo_size;
+	size_t info_size;
+	void *fifo_base;
+	void *info;
+	int ret;
+
+	channel = devm_kzalloc(smd->dev, sizeof(*channel), GFP_KERNEL);
+	if (!channel)
+		return ERR_PTR(-ENOMEM);
+
+	channel->edge = edge;
+	channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
+	if (!channel->name)
+		return ERR_PTR(-ENOMEM);
+
+	mutex_init(&channel->tx_lock);
+	spin_lock_init(&channel->recv_lock);
+	init_waitqueue_head(&channel->fblockread_event);
+
+	ret = qcom_smem_get(edge->edge_id, smem_info_item, (void **)&info, &info_size);
+	if (ret)
+		goto free_name_and_channel;
+
+	/*
+	 * Use the size of the item to figure out which channel info struct to
+	 * use.
+	 */
+	if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
+		channel->tx_info_word = info;
+		channel->rx_info_word = info + sizeof(struct smd_channel_info_word);
+	} else if (info_size == 2 * sizeof(struct smd_channel_info)) {
+		channel->tx_info = info;
+		channel->rx_info = info + sizeof(struct smd_channel_info);
+	} else {
+		dev_err(smd->dev,
+			"channel info of size %zu not supported\n", info_size);
+		ret = -EINVAL;
+		goto free_name_and_channel;
+	}
+
+	ret = qcom_smem_get(edge->edge_id, smem_fifo_item, &fifo_base, &fifo_size);
+	if (ret)
+		goto free_name_and_channel;
+
+	/* The channel consist of a rx and tx fifo of equal size */
+	fifo_size /= 2;
+
+	dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
+			  name, info_size, fifo_size);
+
+	channel->tx_fifo = fifo_base;
+	channel->rx_fifo = fifo_base + fifo_size;
+	channel->fifo_size = fifo_size;
+
+	qcom_smd_channel_reset(channel);
+
+	return channel;
+
+free_name_and_channel:
+	devm_kfree(smd->dev, channel->name);
+	devm_kfree(smd->dev, channel);
+
+	return ERR_PTR(ret);
+}
+
+/*
+ * Scans the allocation table for any newly allocated channels, calls
+ * qcom_smd_create_channel() to create representations of these and add
+ * them to the edge's list of channels.
+ */
+static void qcom_discover_channels(struct qcom_smd_edge *edge)
+{
+	struct qcom_smd_alloc_entry *alloc_tbl;
+	struct qcom_smd_alloc_entry *entry;
+	struct qcom_smd_channel *channel;
+	struct qcom_smd *smd = edge->smd;
+	unsigned long flags;
+	unsigned fifo_id;
+	unsigned info_id;
+	int ret;
+	int tbl;
+	int i;
+
+	for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
+		ret = qcom_smem_get(edge->edge_id,
+				    smem_items[tbl].alloc_tbl_id,
+				    (void **)&alloc_tbl,
+				    NULL);
+		if (ret < 0)
+			continue;
+
+		for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
+			entry = &alloc_tbl[i];
+			if (test_bit(i, edge->allocated[tbl]))
+				continue;
+
+			if (entry->ref_count == 0)
+				continue;
+
+			if (!entry->name[0])
+				continue;
+
+			if (!(entry->flags & SMD_CHANNEL_FLAGS_PACKET))
+				continue;
+
+			if ((entry->flags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
+				continue;
+
+			info_id = smem_items[tbl].info_base_id + entry->cid;
+			fifo_id = smem_items[tbl].fifo_base_id + entry->cid;
+
+			channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
+			if (IS_ERR(channel))
+				continue;
+
+			spin_lock_irqsave(&edge->channels_lock, flags);
+			list_add(&channel->list, &edge->channels);
+			spin_unlock_irqrestore(&edge->channels_lock, flags);
+
+			dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
+			set_bit(i, edge->allocated[tbl]);
+		}
+	}
+
+	schedule_work(&edge->work);
+}
+
+/*
+ * This per edge worker scans smem for any new channels and register these. It
+ * then scans all registered channels for state changes that should be handled
+ * by creating or destroying smd client devices for the registered channels.
+ *
+ * LOCKING: edge->channels_lock is not needed to be held during the traversal
+ * of the channels list as it's done synchronously with the only writer.
+ */
+static void qcom_channel_state_worker(struct work_struct *work)
+{
+	struct qcom_smd_channel *channel;
+	struct qcom_smd_edge *edge = container_of(work,
+						  struct qcom_smd_edge,
+						  work);
+	unsigned remote_state;
+
+	/*
+	 * Rescan smem if we have reason to belive that there are new channels.
+	 */
+	if (edge->need_rescan) {
+		edge->need_rescan = false;
+		qcom_discover_channels(edge);
+	}
+
+	/*
+	 * Register a device for any closed channel where the remote processor
+	 * is showing interest in opening the channel.
+	 */
+	list_for_each_entry(channel, &edge->channels, list) {
+		if (channel->state != SMD_CHANNEL_CLOSED)
+			continue;
+
+		remote_state = GET_RX_CHANNEL_INFO(channel, state);
+		if (remote_state != SMD_CHANNEL_OPENING &&
+		    remote_state != SMD_CHANNEL_OPENED)
+			continue;
+
+		qcom_smd_create_device(channel);
+	}
+
+	/*
+	 * Unregister the device for any channel that is opened where the
+	 * remote processor is closing the channel.
+	 */
+	list_for_each_entry(channel, &edge->channels, list) {
+		if (channel->state != SMD_CHANNEL_OPENING &&
+		    channel->state != SMD_CHANNEL_OPENED)
+			continue;
+
+		remote_state = GET_RX_CHANNEL_INFO(channel, state);
+		if (remote_state == SMD_CHANNEL_OPENING ||
+		    remote_state == SMD_CHANNEL_OPENED)
+			continue;
+
+		qcom_smd_destroy_device(channel);
+	}
+}
+
+/*
+ * Parses an of_node describing an edge.
+ */
+static int qcom_smd_parse_edge(struct device *dev,
+			       struct device_node *node,
+			       struct qcom_smd_edge *edge)
+{
+	struct device_node *syscon_np;
+	const char *key;
+	int irq;
+	int ret;
+
+	INIT_LIST_HEAD(&edge->channels);
+	spin_lock_init(&edge->channels_lock);
+
+	INIT_WORK(&edge->work, qcom_channel_state_worker);
+
+	edge->of_node = of_node_get(node);
+
+	irq = irq_of_parse_and_map(node, 0);
+	if (irq < 0) {
+		dev_err(dev, "required smd interrupt missing\n");
+		return -EINVAL;
+	}
+
+	ret = devm_request_irq(dev, irq,
+			       qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
+			       node->name, edge);
+	if (ret) {
+		dev_err(dev, "failed to request smd irq\n");
+		return ret;
+	}
+
+	edge->irq = irq;
+
+	key = "qcom,smd-edge";
+	ret = of_property_read_u32(node, key, &edge->edge_id);
+	if (ret) {
+		dev_err(dev, "edge missing %s property\n", key);
+		return -EINVAL;
+	}
+
+	syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
+	if (!syscon_np) {
+		dev_err(dev, "no qcom,ipc node\n");
+		return -ENODEV;
+	}
+
+	edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
+	if (IS_ERR(edge->ipc_regmap))
+		return PTR_ERR(edge->ipc_regmap);
+
+	key = "qcom,ipc";
+	ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
+	if (ret < 0) {
+		dev_err(dev, "no offset in %s\n", key);
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
+	if (ret < 0) {
+		dev_err(dev, "no bit in %s\n", key);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int qcom_smd_probe(struct platform_device *pdev)
+{
+	struct qcom_smd_edge *edge;
+	struct device_node *node;
+	struct qcom_smd *smd;
+	size_t array_size;
+	int num_edges;
+	int ret;
+	int i = 0;
+
+	/* Wait for smem */
+	ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL, NULL);
+	if (ret == -EPROBE_DEFER)
+		return ret;
+
+	num_edges = of_get_available_child_count(pdev->dev.of_node);
+	array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
+	smd = devm_kzalloc(&pdev->dev, array_size, GFP_KERNEL);
+	if (!smd)
+		return -ENOMEM;
+	smd->dev = &pdev->dev;
+
+	smd->num_edges = num_edges;
+	for_each_available_child_of_node(pdev->dev.of_node, node) {
+		edge = &smd->edges[i++];
+		edge->smd = smd;
+
+		ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
+		if (ret)
+			continue;
+
+		edge->need_rescan = true;
+		schedule_work(&edge->work);
+	}
+
+	platform_set_drvdata(pdev, smd);
+
+	return 0;
+}
+
+/*
+ * Shut down all smd clients by making sure that each edge stops processing
+ * events and scanning for new channels, then call destroy on the devices.
+ */
+static int qcom_smd_remove(struct platform_device *pdev)
+{
+	struct qcom_smd_channel *channel;
+	struct qcom_smd_edge *edge;
+	struct qcom_smd *smd = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < smd->num_edges; i++) {
+		edge = &smd->edges[i];
+
+		disable_irq(edge->irq);
+		cancel_work_sync(&edge->work);
+
+		list_for_each_entry(channel, &edge->channels, list) {
+			if (!channel->qsdev)
+				continue;
+
+			qcom_smd_destroy_device(channel);
+		}
+	}
+
+	return 0;
+}
+
+static const struct of_device_id qcom_smd_of_match[] = {
+	{ .compatible = "qcom,smd" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_smd_of_match);
+
+static struct platform_driver qcom_smd_driver = {
+	.probe = qcom_smd_probe,
+	.remove = qcom_smd_remove,
+	.driver = {
+		.name = "qcom-smd",
+		.of_match_table = qcom_smd_of_match,
+	},
+};
+
+static int __init qcom_smd_init(void)
+{
+	int ret;
+
+	ret = bus_register(&qcom_smd_bus);
+	if (ret) {
+		pr_err("failed to register smd bus: %d\n", ret);
+		return ret;
+	}
+
+	return platform_driver_register(&qcom_smd_driver);
+}
+postcore_initcall(qcom_smd_init);
+
+static void __exit qcom_smd_exit(void)
+{
+	platform_driver_unregister(&qcom_smd_driver);
+	bus_unregister(&qcom_smd_bus);
+}
+module_exit(qcom_smd_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
new file mode 100644
index 0000000..7c2c324c
--- /dev/null
+++ b/drivers/soc/qcom/smem.c
@@ -0,0 +1,775 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/hwspinlock.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smem.h>
+
+/*
+ * The Qualcomm shared memory system is a allocate only heap structure that
+ * consists of one of more memory areas that can be accessed by the processors
+ * in the SoC.
+ *
+ * All systems contains a global heap, accessible by all processors in the SoC,
+ * with a table of contents data structure (@smem_header) at the beginning of
+ * the main shared memory block.
+ *
+ * The global header contains meta data for allocations as well as a fixed list
+ * of 512 entries (@smem_global_entry) that can be initialized to reference
+ * parts of the shared memory space.
+ *
+ *
+ * In addition to this global heap a set of "private" heaps can be set up at
+ * boot time with access restrictions so that only certain processor pairs can
+ * access the data.
+ *
+ * These partitions are referenced from an optional partition table
+ * (@smem_ptable), that is found 4kB from the end of the main smem region. The
+ * partition table entries (@smem_ptable_entry) lists the involved processors
+ * (or hosts) and their location in the main shared memory region.
+ *
+ * Each partition starts with a header (@smem_partition_header) that identifies
+ * the partition and holds properties for the two internal memory regions. The
+ * two regions are cached and non-cached memory respectively. Each region
+ * contain a link list of allocation headers (@smem_private_entry) followed by
+ * their data.
+ *
+ * Items in the non-cached region are allocated from the start of the partition
+ * while items in the cached region are allocated from the end. The free area
+ * is hence the region between the cached and non-cached offsets.
+ *
+ *
+ * To synchronize allocations in the shared memory heaps a remote spinlock must
+ * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
+ * platforms.
+ *
+ */
+
+/*
+ * Item 3 of the global heap contains an array of versions for the various
+ * software components in the SoC. We verify that the boot loader version is
+ * what the expected version (SMEM_EXPECTED_VERSION) as a sanity check.
+ */
+#define SMEM_ITEM_VERSION	3
+#define  SMEM_MASTER_SBL_VERSION_INDEX	7
+#define  SMEM_EXPECTED_VERSION		11
+
+/*
+ * The first 8 items are only to be allocated by the boot loader while
+ * initializing the heap.
+ */
+#define SMEM_ITEM_LAST_FIXED	8
+
+/* Highest accepted item number, for both global and private heaps */
+#define SMEM_ITEM_COUNT		512
+
+/* Processor/host identifier for the application processor */
+#define SMEM_HOST_APPS		0
+
+/* Max number of processors/hosts in a system */
+#define SMEM_HOST_COUNT		9
+
+/**
+  * struct smem_proc_comm - proc_comm communication struct (legacy)
+  * @command:	current command to be executed
+  * @status:	status of the currently requested command
+  * @params:	parameters to the command
+  */
+struct smem_proc_comm {
+	u32 command;
+	u32 status;
+	u32 params[2];
+};
+
+/**
+ * struct smem_global_entry - entry to reference smem items on the heap
+ * @allocated:	boolean to indicate if this entry is used
+ * @offset:	offset to the allocated space
+ * @size:	size of the allocated space, 8 byte aligned
+ * @aux_base:	base address for the memory region used by this unit, or 0 for
+ *		the default region. bits 0,1 are reserved
+ */
+struct smem_global_entry {
+	u32 allocated;
+	u32 offset;
+	u32 size;
+	u32 aux_base; /* bits 1:0 reserved */
+};
+#define AUX_BASE_MASK		0xfffffffc
+
+/**
+ * struct smem_header - header found in beginning of primary smem region
+ * @proc_comm:		proc_comm communication interface (legacy)
+ * @version:		array of versions for the various subsystems
+ * @initialized:	boolean to indicate that smem is initialized
+ * @free_offset:	index of the first unallocated byte in smem
+ * @available:		number of bytes available for allocation
+ * @reserved:		reserved field, must be 0
+ * toc:			array of references to items
+ */
+struct smem_header {
+	struct smem_proc_comm proc_comm[4];
+	u32 version[32];
+	u32 initialized;
+	u32 free_offset;
+	u32 available;
+	u32 reserved;
+	struct smem_global_entry toc[SMEM_ITEM_COUNT];
+};
+
+/**
+ * struct smem_ptable_entry - one entry in the @smem_ptable list
+ * @offset:	offset, within the main shared memory region, of the partition
+ * @size:	size of the partition
+ * @flags:	flags for the partition (currently unused)
+ * @host0:	first processor/host with access to this partition
+ * @host1:	second processor/host with access to this partition
+ * @reserved:	reserved entries for later use
+ */
+struct smem_ptable_entry {
+	u32 offset;
+	u32 size;
+	u32 flags;
+	u16 host0;
+	u16 host1;
+	u32 reserved[8];
+};
+
+/**
+ * struct smem_ptable - partition table for the private partitions
+ * @magic:	magic number, must be SMEM_PTABLE_MAGIC
+ * @version:	version of the partition table
+ * @num_entries: number of partitions in the table
+ * @reserved:	for now reserved entries
+ * @entry:	list of @smem_ptable_entry for the @num_entries partitions
+ */
+struct smem_ptable {
+	u32 magic;
+	u32 version;
+	u32 num_entries;
+	u32 reserved[5];
+	struct smem_ptable_entry entry[];
+};
+#define SMEM_PTABLE_MAGIC	0x434f5424 /* "$TOC" */
+
+/**
+ * struct smem_partition_header - header of the partitions
+ * @magic:	magic number, must be SMEM_PART_MAGIC
+ * @host0:	first processor/host with access to this partition
+ * @host1:	second processor/host with access to this partition
+ * @size:	size of the partition
+ * @offset_free_uncached: offset to the first free byte of uncached memory in
+ *		this partition
+ * @offset_free_cached: offset to the first free byte of cached memory in this
+ *		partition
+ * @reserved:	for now reserved entries
+ */
+struct smem_partition_header {
+	u32 magic;
+	u16 host0;
+	u16 host1;
+	u32 size;
+	u32 offset_free_uncached;
+	u32 offset_free_cached;
+	u32 reserved[3];
+};
+#define SMEM_PART_MAGIC		0x54525024 /* "$PRT" */
+
+/**
+ * struct smem_private_entry - header of each item in the private partition
+ * @canary:	magic number, must be SMEM_PRIVATE_CANARY
+ * @item:	identifying number of the smem item
+ * @size:	size of the data, including padding bytes
+ * @padding_data: number of bytes of padding of data
+ * @padding_hdr: number of bytes of padding between the header and the data
+ * @reserved:	for now reserved entry
+ */
+struct smem_private_entry {
+	u16 canary;
+	u16 item;
+	u32 size; /* includes padding bytes */
+	u16 padding_data;
+	u16 padding_hdr;
+	u32 reserved;
+};
+#define SMEM_PRIVATE_CANARY	0xa5a5
+
+/**
+ * struct smem_region - representation of a chunk of memory used for smem
+ * @aux_base:	identifier of aux_mem base
+ * @virt_base:	virtual base address of memory with this aux_mem identifier
+ * @size:	size of the memory region
+ */
+struct smem_region {
+	u32 aux_base;
+	void __iomem *virt_base;
+	size_t size;
+};
+
+/**
+ * struct qcom_smem - device data for the smem device
+ * @dev:	device pointer
+ * @hwlock:	reference to a hwspinlock
+ * @partitions:	list of pointers to partitions affecting the current
+ *		processor/host
+ * @num_regions: number of @regions
+ * @regions:	list of the memory regions defining the shared memory
+ */
+struct qcom_smem {
+	struct device *dev;
+
+	struct hwspinlock *hwlock;
+
+	struct smem_partition_header *partitions[SMEM_HOST_COUNT];
+
+	unsigned num_regions;
+	struct smem_region regions[0];
+};
+
+/* Pointer to the one and only smem handle */
+static struct qcom_smem *__smem;
+
+/* Timeout (ms) for the trylock of remote spinlocks */
+#define HWSPINLOCK_TIMEOUT	1000
+
+static int qcom_smem_alloc_private(struct qcom_smem *smem,
+				   unsigned host,
+				   unsigned item,
+				   size_t size)
+{
+	struct smem_partition_header *phdr;
+	struct smem_private_entry *hdr;
+	size_t alloc_size;
+	void *p;
+
+	/* We're not going to find it if there's no matching partition */
+	if (host >= SMEM_HOST_COUNT || !smem->partitions[host])
+		return -ENOENT;
+
+	phdr = smem->partitions[host];
+
+	p = (void *)phdr + sizeof(*phdr);
+	while (p < (void *)phdr + phdr->offset_free_uncached) {
+		hdr = p;
+
+		if (hdr->canary != SMEM_PRIVATE_CANARY) {
+			dev_err(smem->dev,
+				"Found invalid canary in host %d partition\n",
+				host);
+			return -EINVAL;
+		}
+
+		if (hdr->item == item)
+			return -EEXIST;
+
+		p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
+	}
+
+	/* Check that we don't grow into the cached region */
+	alloc_size = sizeof(*hdr) + ALIGN(size, 8);
+	if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) {
+		dev_err(smem->dev, "Out of memory\n");
+		return -ENOSPC;
+	}
+
+	hdr = p;
+	hdr->canary = SMEM_PRIVATE_CANARY;
+	hdr->item = item;
+	hdr->size = ALIGN(size, 8);
+	hdr->padding_data = hdr->size - size;
+	hdr->padding_hdr = 0;
+
+	/*
+	 * Ensure the header is written before we advance the free offset, so
+	 * that remote processors that does not take the remote spinlock still
+	 * gets a consistent view of the linked list.
+	 */
+	wmb();
+	phdr->offset_free_uncached += alloc_size;
+
+	return 0;
+}
+
+static int qcom_smem_alloc_global(struct qcom_smem *smem,
+				  unsigned item,
+				  size_t size)
+{
+	struct smem_header *header;
+	struct smem_global_entry *entry;
+
+	if (WARN_ON(item >= SMEM_ITEM_COUNT))
+		return -EINVAL;
+
+	header = smem->regions[0].virt_base;
+	entry = &header->toc[item];
+	if (entry->allocated)
+		return -EEXIST;
+
+	size = ALIGN(size, 8);
+	if (WARN_ON(size > header->available))
+		return -ENOMEM;
+
+	entry->offset = header->free_offset;
+	entry->size = size;
+
+	/*
+	 * Ensure the header is consistent before we mark the item allocated,
+	 * so that remote processors will get a consistent view of the item
+	 * even though they do not take the spinlock on read.
+	 */
+	wmb();
+	entry->allocated = 1;
+
+	header->free_offset += size;
+	header->available -= size;
+
+	return 0;
+}
+
+/**
+ * qcom_smem_alloc() - allocate space for a smem item
+ * @host:	remote processor id, or -1
+ * @item:	smem item handle
+ * @size:	number of bytes to be allocated
+ *
+ * Allocate space for a given smem item of size @size, given that the item is
+ * not yet allocated.
+ */
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
+{
+	unsigned long flags;
+	int ret;
+
+	if (!__smem)
+		return -EPROBE_DEFER;
+
+	if (item < SMEM_ITEM_LAST_FIXED) {
+		dev_err(__smem->dev,
+			"Rejecting allocation of static entry %d\n", item);
+		return -EINVAL;
+	}
+
+	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+					  HWSPINLOCK_TIMEOUT,
+					  &flags);
+	if (ret)
+		return ret;
+
+	ret = qcom_smem_alloc_private(__smem, host, item, size);
+	if (ret == -ENOENT)
+		ret = qcom_smem_alloc_global(__smem, item, size);
+
+	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_smem_alloc);
+
+static int qcom_smem_get_global(struct qcom_smem *smem,
+				unsigned item,
+				void **ptr,
+				size_t *size)
+{
+	struct smem_header *header;
+	struct smem_region *area;
+	struct smem_global_entry *entry;
+	u32 aux_base;
+	unsigned i;
+
+	if (WARN_ON(item >= SMEM_ITEM_COUNT))
+		return -EINVAL;
+
+	header = smem->regions[0].virt_base;
+	entry = &header->toc[item];
+	if (!entry->allocated)
+		return -ENXIO;
+
+	if (ptr != NULL) {
+		aux_base = entry->aux_base & AUX_BASE_MASK;
+
+		for (i = 0; i < smem->num_regions; i++) {
+			area = &smem->regions[i];
+
+			if (area->aux_base == aux_base || !aux_base) {
+				*ptr = area->virt_base + entry->offset;
+				break;
+			}
+		}
+	}
+	if (size != NULL)
+		*size = entry->size;
+
+	return 0;
+}
+
+static int qcom_smem_get_private(struct qcom_smem *smem,
+				 unsigned host,
+				 unsigned item,
+				 void **ptr,
+				 size_t *size)
+{
+	struct smem_partition_header *phdr;
+	struct smem_private_entry *hdr;
+	void *p;
+
+	/* We're not going to find it if there's no matching partition */
+	if (host >= SMEM_HOST_COUNT || !smem->partitions[host])
+		return -ENOENT;
+
+	phdr = smem->partitions[host];
+
+	p = (void *)phdr + sizeof(*phdr);
+	while (p < (void *)phdr + phdr->offset_free_uncached) {
+		hdr = p;
+
+		if (hdr->canary != SMEM_PRIVATE_CANARY) {
+			dev_err(smem->dev,
+				"Found invalid canary in host %d partition\n",
+				host);
+			return -EINVAL;
+		}
+
+		if (hdr->item == item) {
+			if (ptr != NULL)
+				*ptr = p + sizeof(*hdr) + hdr->padding_hdr;
+
+			if (size != NULL)
+				*size = hdr->size - hdr->padding_data;
+
+			return 0;
+		}
+
+		p += sizeof(*hdr) + hdr->padding_hdr + hdr->size;
+	}
+
+	return -ENOENT;
+}
+
+/**
+ * qcom_smem_get() - resolve ptr of size of a smem item
+ * @host:	the remote processor, or -1
+ * @item:	smem item handle
+ * @ptr:	pointer to be filled out with address of the item
+ * @size:	pointer to be filled out with size of the item
+ *
+ * Looks up pointer and size of a smem item.
+ */
+int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size)
+{
+	unsigned long flags;
+	int ret;
+
+	if (!__smem)
+		return -EPROBE_DEFER;
+
+	ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+					  HWSPINLOCK_TIMEOUT,
+					  &flags);
+	if (ret)
+		return ret;
+
+	ret = qcom_smem_get_private(__smem, host, item, ptr, size);
+	if (ret == -ENOENT)
+		ret = qcom_smem_get_global(__smem, item, ptr, size);
+
+	hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+	return ret;
+
+}
+EXPORT_SYMBOL(qcom_smem_get);
+
+/**
+ * qcom_smem_get_free_space() - retrieve amount of free space in a partition
+ * @host:	the remote processor identifying a partition, or -1
+ *
+ * To be used by smem clients as a quick way to determine if any new
+ * allocations has been made.
+ */
+int qcom_smem_get_free_space(unsigned host)
+{
+	struct smem_partition_header *phdr;
+	struct smem_header *header;
+	unsigned ret;
+
+	if (!__smem)
+		return -EPROBE_DEFER;
+
+	if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
+		phdr = __smem->partitions[host];
+		ret = phdr->offset_free_cached - phdr->offset_free_uncached;
+	} else {
+		header = __smem->regions[0].virt_base;
+		ret = header->available;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(qcom_smem_get_free_space);
+
+static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
+{
+	unsigned *versions;
+	size_t size;
+	int ret;
+
+	ret = qcom_smem_get_global(smem, SMEM_ITEM_VERSION,
+				   (void **)&versions, &size);
+	if (ret < 0) {
+		dev_err(smem->dev, "Unable to read the version item\n");
+		return -ENOENT;
+	}
+
+	if (size < sizeof(unsigned) * SMEM_MASTER_SBL_VERSION_INDEX) {
+		dev_err(smem->dev, "Version item is too small\n");
+		return -EINVAL;
+	}
+
+	return versions[SMEM_MASTER_SBL_VERSION_INDEX];
+}
+
+static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
+					  unsigned local_host)
+{
+	struct smem_partition_header *header;
+	struct smem_ptable_entry *entry;
+	struct smem_ptable *ptable;
+	unsigned remote_host;
+	int i;
+
+	ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
+	if (ptable->magic != SMEM_PTABLE_MAGIC)
+		return 0;
+
+	if (ptable->version != 1) {
+		dev_err(smem->dev,
+			"Unsupported partition header version %d\n",
+			ptable->version);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < ptable->num_entries; i++) {
+		entry = &ptable->entry[i];
+
+		if (entry->host0 != local_host && entry->host1 != local_host)
+			continue;
+
+		if (!entry->offset)
+			continue;
+
+		if (!entry->size)
+			continue;
+
+		if (entry->host0 == local_host)
+			remote_host = entry->host1;
+		else
+			remote_host = entry->host0;
+
+		if (remote_host >= SMEM_HOST_COUNT) {
+			dev_err(smem->dev,
+				"Invalid remote host %d\n",
+				remote_host);
+			return -EINVAL;
+		}
+
+		if (smem->partitions[remote_host]) {
+			dev_err(smem->dev,
+				"Already found a partition for host %d\n",
+				remote_host);
+			return -EINVAL;
+		}
+
+		header = smem->regions[0].virt_base + entry->offset;
+
+		if (header->magic != SMEM_PART_MAGIC) {
+			dev_err(smem->dev,
+				"Partition %d has invalid magic\n", i);
+			return -EINVAL;
+		}
+
+		if (header->host0 != local_host && header->host1 != local_host) {
+			dev_err(smem->dev,
+				"Partition %d hosts are invalid\n", i);
+			return -EINVAL;
+		}
+
+		if (header->host0 != remote_host && header->host1 != remote_host) {
+			dev_err(smem->dev,
+				"Partition %d hosts are invalid\n", i);
+			return -EINVAL;
+		}
+
+		if (header->size != entry->size) {
+			dev_err(smem->dev,
+				"Partition %d has invalid size\n", i);
+			return -EINVAL;
+		}
+
+		if (header->offset_free_uncached > header->size) {
+			dev_err(smem->dev,
+				"Partition %d has invalid free pointer\n", i);
+			return -EINVAL;
+		}
+
+		smem->partitions[remote_host] = header;
+	}
+
+	return 0;
+}
+
+static int qcom_smem_count_mem_regions(struct platform_device *pdev)
+{
+	struct resource *res;
+	int num_regions = 0;
+	int i;
+
+	for (i = 0; i < pdev->num_resources; i++) {
+		res = &pdev->resource[i];
+
+		if (resource_type(res) == IORESOURCE_MEM)
+			num_regions++;
+	}
+
+	return num_regions;
+}
+
+static int qcom_smem_probe(struct platform_device *pdev)
+{
+	struct smem_header *header;
+	struct device_node *np;
+	struct qcom_smem *smem;
+	struct resource *res;
+	struct resource r;
+	size_t array_size;
+	int num_regions = 0;
+	int hwlock_id;
+	u32 version;
+	int ret;
+	int i;
+
+	num_regions = qcom_smem_count_mem_regions(pdev) + 1;
+
+	array_size = num_regions * sizeof(struct smem_region);
+	smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
+	if (!smem)
+		return -ENOMEM;
+
+	smem->dev = &pdev->dev;
+	smem->num_regions = num_regions;
+
+	np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+	if (!np) {
+		dev_err(&pdev->dev, "No memory-region specified\n");
+		return -EINVAL;
+	}
+
+	ret = of_address_to_resource(np, 0, &r);
+	of_node_put(np);
+	if (ret)
+		return ret;
+
+	smem->regions[0].aux_base = (u32)r.start;
+	smem->regions[0].size = resource_size(&r);
+	smem->regions[0].virt_base = devm_ioremap_nocache(&pdev->dev,
+							  r.start,
+							  resource_size(&r));
+	if (!smem->regions[0].virt_base)
+		return -ENOMEM;
+
+	for (i = 1; i < num_regions; i++) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i - 1);
+
+		smem->regions[i].aux_base = (u32)res->start;
+		smem->regions[i].size = resource_size(res);
+		smem->regions[i].virt_base = devm_ioremap_nocache(&pdev->dev,
+								  res->start,
+								  resource_size(res));
+		if (!smem->regions[i].virt_base)
+			return -ENOMEM;
+	}
+
+	header = smem->regions[0].virt_base;
+	if (header->initialized != 1 || header->reserved) {
+		dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
+		return -EINVAL;
+	}
+
+	version = qcom_smem_get_sbl_version(smem);
+	if (version >> 16 != SMEM_EXPECTED_VERSION) {
+		dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
+		return -EINVAL;
+	}
+
+	ret = qcom_smem_enumerate_partitions(smem, SMEM_HOST_APPS);
+	if (ret < 0)
+		return ret;
+
+	hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0);
+	if (hwlock_id < 0) {
+		dev_err(&pdev->dev, "failed to retrieve hwlock\n");
+		return hwlock_id;
+	}
+
+	smem->hwlock = hwspin_lock_request_specific(hwlock_id);
+	if (!smem->hwlock)
+		return -ENXIO;
+
+	__smem = smem;
+
+	return 0;
+}
+
+static int qcom_smem_remove(struct platform_device *pdev)
+{
+	__smem = NULL;
+	hwspin_lock_free(__smem->hwlock);
+
+	return 0;
+}
+
+static const struct of_device_id qcom_smem_of_match[] = {
+	{ .compatible = "qcom,smem" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_smem_of_match);
+
+static struct platform_driver qcom_smem_driver = {
+	.probe = qcom_smem_probe,
+	.remove = qcom_smem_remove,
+	.driver  = {
+		.name = "qcom-smem",
+		.of_match_table = qcom_smem_of_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init qcom_smem_init(void)
+{
+	return platform_driver_register(&qcom_smem_driver);
+}
+arch_initcall(qcom_smem_init);
+
+static void __exit qcom_smem_exit(void)
+{
+	platform_driver_unregister(&qcom_smem_driver);
+}
+module_exit(qcom_smem_exit)
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm Shared Memory Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
index cdaad9d..ae857ff 100644
--- a/drivers/soc/tegra/Makefile
+++ b/drivers/soc/tegra/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_ARCH_TEGRA) += fuse/
+obj-y += fuse/
 
-obj-$(CONFIG_ARCH_TEGRA) += common.o
-obj-$(CONFIG_ARCH_TEGRA) += pmc.o
+obj-y += common.o
+obj-y += pmc.o
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index a71cb74..cd8f413 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -15,6 +15,8 @@
 	{ .compatible = "nvidia,tegra30", },
 	{ .compatible = "nvidia,tegra114", },
 	{ .compatible = "nvidia,tegra124", },
+	{ .compatible = "nvidia,tegra132", },
+	{ .compatible = "nvidia,tegra210", },
 	{ }
 };
 
diff --git a/drivers/soc/tegra/fuse/Makefile b/drivers/soc/tegra/fuse/Makefile
index 3af357d..21bc275 100644
--- a/drivers/soc/tegra/fuse/Makefile
+++ b/drivers/soc/tegra/fuse/Makefile
@@ -6,3 +6,5 @@
 obj-$(CONFIG_ARCH_TEGRA_3x_SOC)		+= speedo-tegra30.o
 obj-$(CONFIG_ARCH_TEGRA_114_SOC)	+= speedo-tegra114.o
 obj-$(CONFIG_ARCH_TEGRA_124_SOC)	+= speedo-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_132_SOC)	+= speedo-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_210_SOC)	+= speedo-tegra210.o
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index c0d660f..de2c1bf 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -15,9 +15,10 @@
  *
  */
 
+#include <linux/clk.h>
 #include <linux/device.h>
 #include <linux/kobject.h>
-#include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -28,8 +29,6 @@
 
 #include "fuse.h"
 
-static u32 (*fuse_readl)(const unsigned int offset);
-static int fuse_size;
 struct tegra_sku_info tegra_sku_info;
 EXPORT_SYMBOL(tegra_sku_info);
 
@@ -42,11 +41,11 @@
 	[TEGRA_REVISION_A04]     = "A04",
 };
 
-static u8 fuse_readb(const unsigned int offset)
+static u8 fuse_readb(struct tegra_fuse *fuse, unsigned int offset)
 {
 	u32 val;
 
-	val = fuse_readl(round_down(offset, 4));
+	val = fuse->read(fuse, round_down(offset, 4));
 	val >>= (offset % 4) * 8;
 	val &= 0xff;
 
@@ -54,19 +53,21 @@
 }
 
 static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
-			struct bin_attribute *attr, char *buf,
-			loff_t pos, size_t size)
+			 struct bin_attribute *attr, char *buf,
+			 loff_t pos, size_t size)
 {
+	struct device *dev = kobj_to_dev(kobj);
+	struct tegra_fuse *fuse = dev_get_drvdata(dev);
 	int i;
 
-	if (pos < 0 || pos >= fuse_size)
+	if (pos < 0 || pos >= attr->size)
 		return 0;
 
-	if (size > fuse_size - pos)
-		size = fuse_size - pos;
+	if (size > attr->size - pos)
+		size = attr->size - pos;
 
 	for (i = 0; i < size; i++)
-		buf[i] = fuse_readb(pos + i);
+		buf[i] = fuse_readb(fuse, pos + i);
 
 	return i;
 }
@@ -76,15 +77,122 @@
 	.read = fuse_read,
 };
 
+static int tegra_fuse_create_sysfs(struct device *dev, unsigned int size,
+				   const struct tegra_fuse_info *info)
+{
+	fuse_bin_attr.size = size;
+
+	return device_create_bin_file(dev, &fuse_bin_attr);
+}
+
 static const struct of_device_id car_match[] __initconst = {
 	{ .compatible = "nvidia,tegra20-car", },
 	{ .compatible = "nvidia,tegra30-car", },
 	{ .compatible = "nvidia,tegra114-car", },
 	{ .compatible = "nvidia,tegra124-car", },
 	{ .compatible = "nvidia,tegra132-car", },
+	{ .compatible = "nvidia,tegra210-car", },
 	{},
 };
 
+static struct tegra_fuse *fuse = &(struct tegra_fuse) {
+	.base = NULL,
+	.soc = NULL,
+};
+
+static const struct of_device_id tegra_fuse_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+	{ .compatible = "nvidia,tegra210-efuse", .data = &tegra210_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+	{ .compatible = "nvidia,tegra132-efuse", .data = &tegra124_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+	{ .compatible = "nvidia,tegra124-efuse", .data = &tegra124_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+	{ .compatible = "nvidia,tegra114-efuse", .data = &tegra114_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+	{ .compatible = "nvidia,tegra30-efuse", .data = &tegra30_fuse_soc },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+	{ .compatible = "nvidia,tegra20-efuse", .data = &tegra20_fuse_soc },
+#endif
+	{ /* sentinel */ }
+};
+
+static int tegra_fuse_probe(struct platform_device *pdev)
+{
+	void __iomem *base = fuse->base;
+	struct resource *res;
+	int err;
+
+	/* take over the memory region from the early initialization */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	fuse->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(fuse->base))
+		return PTR_ERR(fuse->base);
+
+	fuse->clk = devm_clk_get(&pdev->dev, "fuse");
+	if (IS_ERR(fuse->clk)) {
+		dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
+			PTR_ERR(fuse->clk));
+		return PTR_ERR(fuse->clk);
+	}
+
+	platform_set_drvdata(pdev, fuse);
+	fuse->dev = &pdev->dev;
+
+	if (fuse->soc->probe) {
+		err = fuse->soc->probe(fuse);
+		if (err < 0)
+			return err;
+	}
+
+	if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
+				    fuse->soc->info))
+		return -ENODEV;
+
+	/* release the early I/O memory mapping */
+	iounmap(base);
+
+	return 0;
+}
+
+static struct platform_driver tegra_fuse_driver = {
+	.driver = {
+		.name = "tegra-fuse",
+		.of_match_table = tegra_fuse_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = tegra_fuse_probe,
+};
+module_platform_driver(tegra_fuse_driver);
+
+bool __init tegra_fuse_read_spare(unsigned int spare)
+{
+	unsigned int offset = fuse->soc->info->spare + spare * 4;
+
+	return fuse->read_early(fuse, offset) & 1;
+}
+
+u32 __init tegra_fuse_read_early(unsigned int offset)
+{
+	return fuse->read_early(fuse, offset);
+}
+
+int tegra_fuse_readl(unsigned long offset, u32 *value)
+{
+	if (!fuse->read)
+		return -EPROBE_DEFER;
+
+	*value = fuse->read(fuse, offset);
+
+	return 0;
+}
+EXPORT_SYMBOL(tegra_fuse_readl);
+
 static void tegra_enable_fuse_clk(void __iomem *base)
 {
 	u32 reg;
@@ -102,63 +210,106 @@
 	writel(reg, base + 0x14);
 }
 
-int tegra_fuse_readl(unsigned long offset, u32 *value)
-{
-	if (!fuse_readl)
-		return -EPROBE_DEFER;
-
-	*value = fuse_readl(offset);
-
-	return 0;
-}
-EXPORT_SYMBOL(tegra_fuse_readl);
-
-int tegra_fuse_create_sysfs(struct device *dev, int size,
-		     u32 (*readl)(const unsigned int offset))
-{
-	if (fuse_size)
-		return -ENODEV;
-
-	fuse_bin_attr.size = size;
-	fuse_bin_attr.read = fuse_read;
-
-	fuse_size = size;
-	fuse_readl = readl;
-
-	return device_create_bin_file(dev, &fuse_bin_attr);
-}
-
 static int __init tegra_init_fuse(void)
 {
+	const struct of_device_id *match;
 	struct device_node *np;
-	void __iomem *car_base;
-
-	if (!soc_is_tegra())
-		return 0;
+	struct resource regs;
 
 	tegra_init_apbmisc();
 
-	np = of_find_matching_node(NULL, car_match);
-	car_base = of_iomap(np, 0);
-	if (car_base) {
-		tegra_enable_fuse_clk(car_base);
-		iounmap(car_base);
+	np = of_find_matching_node_and_match(NULL, tegra_fuse_match, &match);
+	if (!np) {
+		/*
+		 * Fall back to legacy initialization for 32-bit ARM only. All
+		 * 64-bit ARM device tree files for Tegra are required to have
+		 * a FUSE node.
+		 *
+		 * This is for backwards-compatibility with old device trees
+		 * that didn't contain a FUSE node.
+		 */
+		if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+			u8 chip = tegra_get_chip_id();
+
+			regs.start = 0x7000f800;
+			regs.end = 0x7000fbff;
+			regs.flags = IORESOURCE_MEM;
+
+			switch (chip) {
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+			case TEGRA20:
+				fuse->soc = &tegra20_fuse_soc;
+				break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+			case TEGRA30:
+				fuse->soc = &tegra30_fuse_soc;
+				break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+			case TEGRA114:
+				fuse->soc = &tegra114_fuse_soc;
+				break;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+			case TEGRA124:
+				fuse->soc = &tegra124_fuse_soc;
+				break;
+#endif
+
+			default:
+				pr_warn("Unsupported SoC: %02x\n", chip);
+				break;
+			}
+		} else {
+			/*
+			 * At this point we're not running on Tegra, so play
+			 * nice with multi-platform kernels.
+			 */
+			return 0;
+		}
 	} else {
-		pr_err("Could not enable fuse clk. ioremap tegra car failed.\n");
+		/*
+		 * Extract information from the device tree if we've found a
+		 * matching node.
+		 */
+		if (of_address_to_resource(np, 0, &regs) < 0) {
+			pr_err("failed to get FUSE register\n");
+			return -ENXIO;
+		}
+
+		fuse->soc = match->data;
+	}
+
+	np = of_find_matching_node(NULL, car_match);
+	if (np) {
+		void __iomem *base = of_iomap(np, 0);
+		if (base) {
+			tegra_enable_fuse_clk(base);
+			iounmap(base);
+		} else {
+			pr_err("failed to map clock registers\n");
+			return -ENXIO;
+		}
+	}
+
+	fuse->base = ioremap_nocache(regs.start, resource_size(&regs));
+	if (!fuse->base) {
+		pr_err("failed to map FUSE registers\n");
 		return -ENXIO;
 	}
 
-	if (tegra_get_chip_id() == TEGRA20)
-		tegra20_init_fuse_early();
-	else
-		tegra30_init_fuse_early();
+	fuse->soc->init(fuse);
 
-	pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n",
+	pr_info("Tegra Revision: %s SKU: %d CPU Process: %d SoC Process: %d\n",
 		tegra_revision_name[tegra_sku_info.revision],
 		tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id,
-		tegra_sku_info.core_process_id);
-	pr_debug("Tegra CPU Speedo ID %d, Soc Speedo ID %d\n",
-		tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
+		tegra_sku_info.soc_process_id);
+	pr_debug("Tegra CPU Speedo ID %d, SoC Speedo ID %d\n",
+		 tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
 
 	return 0;
 }
diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c
index 6acc2c4..294413a 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra20.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra20.c
@@ -34,159 +34,107 @@
 #include "fuse.h"
 
 #define FUSE_BEGIN	0x100
-#define FUSE_SIZE	0x1f8
 #define FUSE_UID_LOW	0x08
 #define FUSE_UID_HIGH	0x0c
 
-static phys_addr_t fuse_phys;
-static struct clk *fuse_clk;
-static void __iomem __initdata *fuse_base;
-
-static DEFINE_MUTEX(apb_dma_lock);
-static DECLARE_COMPLETION(apb_dma_wait);
-static struct dma_chan *apb_dma_chan;
-static struct dma_slave_config dma_sconfig;
-static u32 *apb_buffer;
-static dma_addr_t apb_buffer_phys;
+static u32 tegra20_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
+{
+	return readl_relaxed(fuse->base + FUSE_BEGIN + offset);
+}
 
 static void apb_dma_complete(void *args)
 {
-	complete(&apb_dma_wait);
+	struct tegra_fuse *fuse = args;
+
+	complete(&fuse->apbdma.wait);
 }
 
-static u32 tegra20_fuse_readl(const unsigned int offset)
+static u32 tegra20_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
 {
-	int ret;
-	u32 val = 0;
+	unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
 	struct dma_async_tx_descriptor *dma_desc;
 	unsigned long time_left;
+	u32 value = 0;
+	int err;
 
-	mutex_lock(&apb_dma_lock);
+	mutex_lock(&fuse->apbdma.lock);
 
-	dma_sconfig.src_addr = fuse_phys + FUSE_BEGIN + offset;
-	ret = dmaengine_slave_config(apb_dma_chan, &dma_sconfig);
-	if (ret)
+	fuse->apbdma.config.src_addr = fuse->apbdma.phys + FUSE_BEGIN + offset;
+
+	err = dmaengine_slave_config(fuse->apbdma.chan, &fuse->apbdma.config);
+	if (err)
 		goto out;
 
-	dma_desc = dmaengine_prep_slave_single(apb_dma_chan, apb_buffer_phys,
-			sizeof(u32), DMA_DEV_TO_MEM,
-			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	dma_desc = dmaengine_prep_slave_single(fuse->apbdma.chan,
+					       fuse->apbdma.phys,
+					       sizeof(u32), DMA_DEV_TO_MEM,
+					       flags);
 	if (!dma_desc)
 		goto out;
 
 	dma_desc->callback = apb_dma_complete;
-	dma_desc->callback_param = NULL;
+	dma_desc->callback_param = fuse;
 
-	reinit_completion(&apb_dma_wait);
+	reinit_completion(&fuse->apbdma.wait);
 
-	clk_prepare_enable(fuse_clk);
+	clk_prepare_enable(fuse->clk);
 
 	dmaengine_submit(dma_desc);
-	dma_async_issue_pending(apb_dma_chan);
-	time_left = wait_for_completion_timeout(&apb_dma_wait,
+	dma_async_issue_pending(fuse->apbdma.chan);
+	time_left = wait_for_completion_timeout(&fuse->apbdma.wait,
 						msecs_to_jiffies(50));
 
 	if (WARN(time_left == 0, "apb read dma timed out"))
-		dmaengine_terminate_all(apb_dma_chan);
+		dmaengine_terminate_all(fuse->apbdma.chan);
 	else
-		val = *apb_buffer;
+		value = *fuse->apbdma.virt;
 
-	clk_disable_unprepare(fuse_clk);
+	clk_disable_unprepare(fuse->clk);
+
 out:
-	mutex_unlock(&apb_dma_lock);
-
-	return val;
+	mutex_unlock(&fuse->apbdma.lock);
+	return value;
 }
 
-static const struct of_device_id tegra20_fuse_of_match[] = {
-	{ .compatible = "nvidia,tegra20-efuse" },
-	{},
-};
-
-static int apb_dma_init(void)
+static int tegra20_fuse_probe(struct tegra_fuse *fuse)
 {
 	dma_cap_mask_t mask;
 
 	dma_cap_zero(mask);
 	dma_cap_set(DMA_SLAVE, mask);
-	apb_dma_chan = dma_request_channel(mask, NULL, NULL);
-	if (!apb_dma_chan)
+
+	fuse->apbdma.chan = dma_request_channel(mask, NULL, NULL);
+	if (!fuse->apbdma.chan)
 		return -EPROBE_DEFER;
 
-	apb_buffer = dma_alloc_coherent(NULL, sizeof(u32), &apb_buffer_phys,
-					GFP_KERNEL);
-	if (!apb_buffer) {
-		dma_release_channel(apb_dma_chan);
+	fuse->apbdma.virt = dma_alloc_coherent(fuse->dev, sizeof(u32),
+					       &fuse->apbdma.phys,
+					       GFP_KERNEL);
+	if (!fuse->apbdma.virt) {
+		dma_release_channel(fuse->apbdma.chan);
 		return -ENOMEM;
 	}
 
-	dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	dma_sconfig.src_maxburst = 1;
-	dma_sconfig.dst_maxburst = 1;
+	fuse->apbdma.config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	fuse->apbdma.config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	fuse->apbdma.config.src_maxburst = 1;
+	fuse->apbdma.config.dst_maxburst = 1;
+
+	init_completion(&fuse->apbdma.wait);
+	mutex_init(&fuse->apbdma.lock);
+	fuse->read = tegra20_fuse_read;
 
 	return 0;
 }
 
-static int tegra20_fuse_probe(struct platform_device *pdev)
-{
-	struct resource *res;
-	int err;
-
-	fuse_clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(fuse_clk)) {
-		dev_err(&pdev->dev, "missing clock");
-		return PTR_ERR(fuse_clk);
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -EINVAL;
-	fuse_phys = res->start;
-
-	err = apb_dma_init();
-	if (err)
-		return err;
-
-	if (tegra_fuse_create_sysfs(&pdev->dev, FUSE_SIZE, tegra20_fuse_readl))
-		return -ENODEV;
-
-	dev_dbg(&pdev->dev, "loaded\n");
-
-	return 0;
-}
-
-static struct platform_driver tegra20_fuse_driver = {
-	.probe = tegra20_fuse_probe,
-	.driver = {
-		.name = "tegra20_fuse",
-		.of_match_table = tegra20_fuse_of_match,
-	}
+static const struct tegra_fuse_info tegra20_fuse_info = {
+	.read = tegra20_fuse_read,
+	.size = 0x1f8,
+	.spare = 0x100,
 };
 
-static int __init tegra20_fuse_init(void)
-{
-	return platform_driver_register(&tegra20_fuse_driver);
-}
-postcore_initcall(tegra20_fuse_init);
-
 /* Early boot code. This code is called before the devices are created */
 
-u32 __init tegra20_fuse_early(const unsigned int offset)
-{
-	return readl_relaxed(fuse_base + FUSE_BEGIN + offset);
-}
-
-bool __init tegra20_spare_fuse_early(int spare_bit)
-{
-	u32 offset = spare_bit * 4;
-	bool value;
-
-	value = tegra20_fuse_early(offset + 0x100);
-
-	return value;
-}
-
 static void __init tegra20_fuse_add_randomness(void)
 {
 	u32 randomness[7];
@@ -195,22 +143,27 @@
 	randomness[1] = tegra_read_straps();
 	randomness[2] = tegra_read_chipid();
 	randomness[3] = tegra_sku_info.cpu_process_id << 16;
-	randomness[3] |= tegra_sku_info.core_process_id;
+	randomness[3] |= tegra_sku_info.soc_process_id;
 	randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
 	randomness[4] |= tegra_sku_info.soc_speedo_id;
-	randomness[5] = tegra20_fuse_early(FUSE_UID_LOW);
-	randomness[6] = tegra20_fuse_early(FUSE_UID_HIGH);
+	randomness[5] = tegra_fuse_read_early(FUSE_UID_LOW);
+	randomness[6] = tegra_fuse_read_early(FUSE_UID_HIGH);
 
 	add_device_randomness(randomness, sizeof(randomness));
 }
 
-void __init tegra20_init_fuse_early(void)
+static void __init tegra20_fuse_init(struct tegra_fuse *fuse)
 {
-	fuse_base = ioremap(TEGRA_FUSE_BASE, TEGRA_FUSE_SIZE);
+	fuse->read_early = tegra20_fuse_read_early;
 
 	tegra_init_revision();
-	tegra20_init_speedo_data(&tegra_sku_info);
+	fuse->soc->speedo_init(&tegra_sku_info);
 	tegra20_fuse_add_randomness();
-
-	iounmap(fuse_base);
 }
+
+const struct tegra_fuse_soc tegra20_fuse_soc = {
+	.init = tegra20_fuse_init,
+	.speedo_init = tegra20_init_speedo_data,
+	.probe = tegra20_fuse_probe,
+	.info = &tegra20_fuse_info,
+};
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index 4d2f71b..882607b 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -42,114 +42,34 @@
 
 #define FUSE_HAS_REVISION_INFO	BIT(0)
 
-enum speedo_idx {
-	SPEEDO_TEGRA30 = 0,
-	SPEEDO_TEGRA114,
-	SPEEDO_TEGRA124,
-};
-
-struct tegra_fuse_info {
-	int		size;
-	int		spare_bit;
-	enum speedo_idx	speedo_idx;
-};
-
-static void __iomem *fuse_base;
-static struct clk *fuse_clk;
-static const struct tegra_fuse_info *fuse_info;
-
-u32 tegra30_fuse_readl(const unsigned int offset)
+#if defined(CONFIG_ARCH_TEGRA_3x_SOC) || \
+    defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+    defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+    defined(CONFIG_ARCH_TEGRA_132_SOC) || \
+    defined(CONFIG_ARCH_TEGRA_210_SOC)
+static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
 {
-	u32 val;
-
-	/*
-	 * early in the boot, the fuse clock will be enabled by
-	 * tegra_init_fuse()
-	 */
-
-	if (fuse_clk)
-		clk_prepare_enable(fuse_clk);
-
-	val = readl_relaxed(fuse_base + FUSE_BEGIN + offset);
-
-	if (fuse_clk)
-		clk_disable_unprepare(fuse_clk);
-
-	return val;
+	return readl_relaxed(fuse->base + FUSE_BEGIN + offset);
 }
 
-static const struct tegra_fuse_info tegra30_info = {
-	.size			= 0x2a4,
-	.spare_bit		= 0x144,
-	.speedo_idx		= SPEEDO_TEGRA30,
-};
-
-static const struct tegra_fuse_info tegra114_info = {
-	.size			= 0x2a0,
-	.speedo_idx		= SPEEDO_TEGRA114,
-};
-
-static const struct tegra_fuse_info tegra124_info = {
-	.size			= 0x300,
-	.speedo_idx		= SPEEDO_TEGRA124,
-};
-
-static const struct of_device_id tegra30_fuse_of_match[] = {
-	{ .compatible = "nvidia,tegra30-efuse", .data = &tegra30_info },
-	{ .compatible = "nvidia,tegra114-efuse", .data = &tegra114_info },
-	{ .compatible = "nvidia,tegra124-efuse", .data = &tegra124_info },
-	{},
-};
-
-static int tegra30_fuse_probe(struct platform_device *pdev)
+static u32 tegra30_fuse_read(struct tegra_fuse *fuse, unsigned int offset)
 {
-	const struct of_device_id *of_dev_id;
+	u32 value;
+	int err;
 
-	of_dev_id = of_match_device(tegra30_fuse_of_match, &pdev->dev);
-	if (!of_dev_id)
-		return -ENODEV;
-
-	fuse_clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(fuse_clk)) {
-		dev_err(&pdev->dev, "missing clock");
-		return PTR_ERR(fuse_clk);
+	err = clk_prepare_enable(fuse->clk);
+	if (err < 0) {
+		dev_err(fuse->dev, "failed to enable FUSE clock: %d\n", err);
+		return 0;
 	}
 
-	platform_set_drvdata(pdev, NULL);
+	value = readl_relaxed(fuse->base + FUSE_BEGIN + offset);
 
-	if (tegra_fuse_create_sysfs(&pdev->dev, fuse_info->size,
-				    tegra30_fuse_readl))
-		return -ENODEV;
+	clk_disable_unprepare(fuse->clk);
 
-	dev_dbg(&pdev->dev, "loaded\n");
-
-	return 0;
+	return value;
 }
 
-static struct platform_driver tegra30_fuse_driver = {
-	.probe = tegra30_fuse_probe,
-	.driver = {
-		.name = "tegra_fuse",
-		.of_match_table = tegra30_fuse_of_match,
-	}
-};
-
-static int __init tegra30_fuse_init(void)
-{
-	return platform_driver_register(&tegra30_fuse_driver);
-}
-postcore_initcall(tegra30_fuse_init);
-
-/* Early boot code. This code is called before the devices are created */
-
-typedef void (*speedo_f)(struct tegra_sku_info *sku_info);
-
-static speedo_f __initdata speedo_tbl[] = {
-	[SPEEDO_TEGRA30]	= tegra30_init_speedo_data,
-	[SPEEDO_TEGRA114]	= tegra114_init_speedo_data,
-	[SPEEDO_TEGRA124]	= tegra124_init_speedo_data,
-};
-
 static void __init tegra30_fuse_add_randomness(void)
 {
 	u32 randomness[12];
@@ -158,67 +78,83 @@
 	randomness[1] = tegra_read_straps();
 	randomness[2] = tegra_read_chipid();
 	randomness[3] = tegra_sku_info.cpu_process_id << 16;
-	randomness[3] |= tegra_sku_info.core_process_id;
+	randomness[3] |= tegra_sku_info.soc_process_id;
 	randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
 	randomness[4] |= tegra_sku_info.soc_speedo_id;
-	randomness[5] = tegra30_fuse_readl(FUSE_VENDOR_CODE);
-	randomness[6] = tegra30_fuse_readl(FUSE_FAB_CODE);
-	randomness[7] = tegra30_fuse_readl(FUSE_LOT_CODE_0);
-	randomness[8] = tegra30_fuse_readl(FUSE_LOT_CODE_1);
-	randomness[9] = tegra30_fuse_readl(FUSE_WAFER_ID);
-	randomness[10] = tegra30_fuse_readl(FUSE_X_COORDINATE);
-	randomness[11] = tegra30_fuse_readl(FUSE_Y_COORDINATE);
+	randomness[5] = tegra_fuse_read_early(FUSE_VENDOR_CODE);
+	randomness[6] = tegra_fuse_read_early(FUSE_FAB_CODE);
+	randomness[7] = tegra_fuse_read_early(FUSE_LOT_CODE_0);
+	randomness[8] = tegra_fuse_read_early(FUSE_LOT_CODE_1);
+	randomness[9] = tegra_fuse_read_early(FUSE_WAFER_ID);
+	randomness[10] = tegra_fuse_read_early(FUSE_X_COORDINATE);
+	randomness[11] = tegra_fuse_read_early(FUSE_Y_COORDINATE);
 
 	add_device_randomness(randomness, sizeof(randomness));
 }
 
-static void __init legacy_fuse_init(void)
+static void __init tegra30_fuse_init(struct tegra_fuse *fuse)
 {
-	switch (tegra_get_chip_id()) {
-	case TEGRA30:
-		fuse_info = &tegra30_info;
-		break;
-	case TEGRA114:
-		fuse_info = &tegra114_info;
-		break;
-	case TEGRA124:
-	case TEGRA132:
-		fuse_info = &tegra124_info;
-		break;
-	default:
-		return;
-	}
-
-	fuse_base = ioremap(TEGRA_FUSE_BASE, TEGRA_FUSE_SIZE);
-}
-
-bool __init tegra30_spare_fuse(int spare_bit)
-{
-	u32 offset = fuse_info->spare_bit + spare_bit * 4;
-
-	return tegra30_fuse_readl(offset) & 1;
-}
-
-void __init tegra30_init_fuse_early(void)
-{
-	struct device_node *np;
-	const struct of_device_id *of_match;
-
-	np = of_find_matching_node_and_match(NULL, tegra30_fuse_of_match,
-						&of_match);
-	if (np) {
-		fuse_base = of_iomap(np, 0);
-		fuse_info = (struct tegra_fuse_info *)of_match->data;
-	} else
-		legacy_fuse_init();
-
-	if (!fuse_base) {
-		pr_warn("fuse DT node missing and unknown chip id: 0x%02x\n",
-			tegra_get_chip_id());
-		return;
-	}
+	fuse->read_early = tegra30_fuse_read_early;
+	fuse->read = tegra30_fuse_read;
 
 	tegra_init_revision();
-	speedo_tbl[fuse_info->speedo_idx](&tegra_sku_info);
+	fuse->soc->speedo_init(&tegra_sku_info);
 	tegra30_fuse_add_randomness();
 }
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+static const struct tegra_fuse_info tegra30_fuse_info = {
+	.read = tegra30_fuse_read,
+	.size = 0x2a4,
+	.spare = 0x144,
+};
+
+const struct tegra_fuse_soc tegra30_fuse_soc = {
+	.init = tegra30_fuse_init,
+	.speedo_init = tegra30_init_speedo_data,
+	.info = &tegra30_fuse_info,
+};
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+static const struct tegra_fuse_info tegra114_fuse_info = {
+	.read = tegra30_fuse_read,
+	.size = 0x2a0,
+	.spare = 0x180,
+};
+
+const struct tegra_fuse_soc tegra114_fuse_soc = {
+	.init = tegra30_fuse_init,
+	.speedo_init = tegra114_init_speedo_data,
+	.info = &tegra114_fuse_info,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+static const struct tegra_fuse_info tegra124_fuse_info = {
+	.read = tegra30_fuse_read,
+	.size = 0x300,
+	.spare = 0x200,
+};
+
+const struct tegra_fuse_soc tegra124_fuse_soc = {
+	.init = tegra30_fuse_init,
+	.speedo_init = tegra124_init_speedo_data,
+	.info = &tegra124_fuse_info,
+};
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_210_SOC)
+static const struct tegra_fuse_info tegra210_fuse_info = {
+	.read = tegra30_fuse_read,
+	.size = 0x300,
+	.spare = 0x280,
+};
+
+const struct tegra_fuse_soc tegra210_fuse_soc = {
+	.init = tegra30_fuse_init,
+	.speedo_init = tegra210_init_speedo_data,
+	.info = &tegra210_fuse_info,
+};
+#endif
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
index 3a398bf3..10c2076 100644
--- a/drivers/soc/tegra/fuse/fuse.h
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -19,53 +19,90 @@
 #ifndef __DRIVERS_MISC_TEGRA_FUSE_H
 #define __DRIVERS_MISC_TEGRA_FUSE_H
 
-#define TEGRA_FUSE_BASE	0x7000f800
-#define TEGRA_FUSE_SIZE	0x400
+#include <linux/dmaengine.h>
+#include <linux/types.h>
 
-int tegra_fuse_create_sysfs(struct device *dev, int size,
-		     u32 (*readl)(const unsigned int offset));
+struct tegra_fuse;
 
-bool tegra30_spare_fuse(int bit);
-u32 tegra30_fuse_readl(const unsigned int offset);
-void tegra30_init_fuse_early(void);
+struct tegra_fuse_info {
+	u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);
+	unsigned int size;
+	unsigned int spare;
+};
+
+struct tegra_fuse_soc {
+	void (*init)(struct tegra_fuse *fuse);
+	void (*speedo_init)(struct tegra_sku_info *info);
+	int (*probe)(struct tegra_fuse *fuse);
+
+	const struct tegra_fuse_info *info;
+};
+
+struct tegra_fuse {
+	struct device *dev;
+	void __iomem *base;
+	phys_addr_t phys;
+	struct clk *clk;
+
+	u32 (*read_early)(struct tegra_fuse *fuse, unsigned int offset);
+	u32 (*read)(struct tegra_fuse *fuse, unsigned int offset);
+	const struct tegra_fuse_soc *soc;
+
+	/* APBDMA on Tegra20 */
+	struct {
+		struct mutex lock;
+		struct completion wait;
+		struct dma_chan *chan;
+		struct dma_slave_config config;
+		dma_addr_t phys;
+		u32 *virt;
+	} apbdma;
+};
+
 void tegra_init_revision(void);
 void tegra_init_apbmisc(void);
 
+bool __init tegra_fuse_read_spare(unsigned int spare);
+u32 __init tegra_fuse_read_early(unsigned int offset);
+
 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
 void tegra20_init_speedo_data(struct tegra_sku_info *sku_info);
-bool tegra20_spare_fuse_early(int spare_bit);
-void tegra20_init_fuse_early(void);
-u32 tegra20_fuse_early(const unsigned int offset);
-#else
-static inline void tegra20_init_speedo_data(struct tegra_sku_info *sku_info) {}
-static inline bool tegra20_spare_fuse_early(int spare_bit)
-{
-	return false;
-}
-static inline void tegra20_init_fuse_early(void) {}
-static inline u32 tegra20_fuse_early(const unsigned int offset)
-{
-	return 0;
-}
 #endif
 
-
 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
 void tegra30_init_speedo_data(struct tegra_sku_info *sku_info);
-#else
-static inline void tegra30_init_speedo_data(struct tegra_sku_info *sku_info) {}
 #endif
 
 #ifdef CONFIG_ARCH_TEGRA_114_SOC
 void tegra114_init_speedo_data(struct tegra_sku_info *sku_info);
-#else
-static inline void tegra114_init_speedo_data(struct tegra_sku_info *sku_info) {}
 #endif
 
-#ifdef CONFIG_ARCH_TEGRA_124_SOC
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
 void tegra124_init_speedo_data(struct tegra_sku_info *sku_info);
-#else
-static inline void tegra124_init_speedo_data(struct tegra_sku_info *sku_info) {}
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+void tegra210_init_speedo_data(struct tegra_sku_info *sku_info);
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+extern const struct tegra_fuse_soc tegra20_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+extern const struct tegra_fuse_soc tegra30_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+extern const struct tegra_fuse_soc tegra114_fuse_soc;
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
+extern const struct tegra_fuse_soc tegra124_fuse_soc;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+extern const struct tegra_fuse_soc tegra210_fuse_soc;
 #endif
 
 #endif
diff --git a/drivers/soc/tegra/fuse/speedo-tegra114.c b/drivers/soc/tegra/fuse/speedo-tegra114.c
index 2a6ca03..1ba41eb 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra114.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra114.c
@@ -22,7 +22,7 @@
 
 #include "fuse.h"
 
-#define CORE_PROCESS_CORNERS	2
+#define SOC_PROCESS_CORNERS	2
 #define CPU_PROCESS_CORNERS	2
 
 enum {
@@ -31,7 +31,7 @@
 	THRESHOLD_INDEX_COUNT,
 };
 
-static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = {
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
 	{1123,     UINT_MAX},
 	{0,        UINT_MAX},
 };
@@ -74,8 +74,8 @@
 	}
 
 	if (rev == TEGRA_REVISION_A01) {
-		tmp = tegra30_fuse_readl(0x270) << 1;
-		tmp |= tegra30_fuse_readl(0x26c);
+		tmp = tegra_fuse_read_early(0x270) << 1;
+		tmp |= tegra_fuse_read_early(0x26c);
 		if (!tmp)
 			sku_info->cpu_speedo_id = 0;
 	}
@@ -84,27 +84,27 @@
 void __init tegra114_init_speedo_data(struct tegra_sku_info *sku_info)
 {
 	u32 cpu_speedo_val;
-	u32 core_speedo_val;
+	u32 soc_speedo_val;
 	int threshold;
 	int i;
 
 	BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
 			THRESHOLD_INDEX_COUNT);
-	BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) !=
+	BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
 			THRESHOLD_INDEX_COUNT);
 
 	rev_sku_to_speedo_ids(sku_info, &threshold);
 
-	cpu_speedo_val = tegra30_fuse_readl(0x12c) + 1024;
-	core_speedo_val = tegra30_fuse_readl(0x134);
+	cpu_speedo_val = tegra_fuse_read_early(0x12c) + 1024;
+	soc_speedo_val = tegra_fuse_read_early(0x134);
 
 	for (i = 0; i < CPU_PROCESS_CORNERS; i++)
 		if (cpu_speedo_val < cpu_process_speedos[threshold][i])
 			break;
 	sku_info->cpu_process_id = i;
 
-	for (i = 0; i < CORE_PROCESS_CORNERS; i++)
-		if (core_speedo_val < core_process_speedos[threshold][i])
+	for (i = 0; i < SOC_PROCESS_CORNERS; i++)
+		if (soc_speedo_val < soc_process_speedos[threshold][i])
 			break;
-	sku_info->core_process_id = i;
+	sku_info->soc_process_id = i;
 }
diff --git a/drivers/soc/tegra/fuse/speedo-tegra124.c b/drivers/soc/tegra/fuse/speedo-tegra124.c
index 4636238..a63a134 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra124.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra124.c
@@ -24,7 +24,7 @@
 
 #define CPU_PROCESS_CORNERS	2
 #define GPU_PROCESS_CORNERS	2
-#define CORE_PROCESS_CORNERS	2
+#define SOC_PROCESS_CORNERS	2
 
 #define FUSE_CPU_SPEEDO_0	0x14
 #define FUSE_CPU_SPEEDO_1	0x2c
@@ -53,7 +53,7 @@
 	{0,	UINT_MAX},
 };
 
-static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = {
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
 	{2101,	UINT_MAX},
 	{0,	UINT_MAX},
 };
@@ -119,19 +119,19 @@
 			THRESHOLD_INDEX_COUNT);
 	BUILD_BUG_ON(ARRAY_SIZE(gpu_process_speedos) !=
 			THRESHOLD_INDEX_COUNT);
-	BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) !=
+	BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
 			THRESHOLD_INDEX_COUNT);
 
-	cpu_speedo_0_value = tegra30_fuse_readl(FUSE_CPU_SPEEDO_0);
+	cpu_speedo_0_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
 
 	/* GPU Speedo is stored in CPU_SPEEDO_2 */
-	sku_info->gpu_speedo_value = tegra30_fuse_readl(FUSE_CPU_SPEEDO_2);
+	sku_info->gpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
 
-	soc_speedo_0_value = tegra30_fuse_readl(FUSE_SOC_SPEEDO_0);
+	soc_speedo_0_value = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
 
-	cpu_iddq_value = tegra30_fuse_readl(FUSE_CPU_IDDQ);
-	soc_iddq_value = tegra30_fuse_readl(FUSE_SOC_IDDQ);
-	gpu_iddq_value = tegra30_fuse_readl(FUSE_GPU_IDDQ);
+	cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
+	soc_iddq_value = tegra_fuse_read_early(FUSE_SOC_IDDQ);
+	gpu_iddq_value = tegra_fuse_read_early(FUSE_GPU_IDDQ);
 
 	sku_info->cpu_speedo_value = cpu_speedo_0_value;
 
@@ -143,7 +143,7 @@
 
 	rev_sku_to_speedo_ids(sku_info, &threshold);
 
-	sku_info->cpu_iddq_value = tegra30_fuse_readl(FUSE_CPU_IDDQ);
+	sku_info->cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
 
 	for (i = 0; i < GPU_PROCESS_CORNERS; i++)
 		if (sku_info->gpu_speedo_value <
@@ -157,11 +157,11 @@
 				break;
 	sku_info->cpu_process_id = i;
 
-	for (i = 0; i < CORE_PROCESS_CORNERS; i++)
+	for (i = 0; i < SOC_PROCESS_CORNERS; i++)
 		if (soc_speedo_0_value <
-			core_process_speedos[threshold][i])
+			soc_process_speedos[threshold][i])
 			break;
-	sku_info->core_process_id = i;
+	sku_info->soc_process_id = i;
 
 	pr_debug("Tegra GPU Speedo ID=%d, Speedo Value=%d\n",
 		 sku_info->gpu_speedo_id, sku_info->gpu_speedo_value);
diff --git a/drivers/soc/tegra/fuse/speedo-tegra20.c b/drivers/soc/tegra/fuse/speedo-tegra20.c
index eff1b63..5f7818b 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra20.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra20.c
@@ -28,11 +28,11 @@
 #define CPU_SPEEDO_REDUND_MSBIT		39
 #define CPU_SPEEDO_REDUND_OFFS	(CPU_SPEEDO_REDUND_MSBIT - CPU_SPEEDO_MSBIT)
 
-#define CORE_SPEEDO_LSBIT		40
-#define CORE_SPEEDO_MSBIT		47
-#define CORE_SPEEDO_REDUND_LSBIT	48
-#define CORE_SPEEDO_REDUND_MSBIT	55
-#define CORE_SPEEDO_REDUND_OFFS	(CORE_SPEEDO_REDUND_MSBIT - CORE_SPEEDO_MSBIT)
+#define SOC_SPEEDO_LSBIT		40
+#define SOC_SPEEDO_MSBIT		47
+#define SOC_SPEEDO_REDUND_LSBIT		48
+#define SOC_SPEEDO_REDUND_MSBIT		55
+#define SOC_SPEEDO_REDUND_OFFS	(SOC_SPEEDO_REDUND_MSBIT - SOC_SPEEDO_MSBIT)
 
 #define SPEEDO_MULT			4
 
@@ -56,7 +56,7 @@
 	{316, 331, 383, UINT_MAX},
 };
 
-static const u32 __initconst core_process_speedos[][PROCESS_CORNERS_NUM] = {
+static const u32 __initconst soc_process_speedos[][PROCESS_CORNERS_NUM] = {
 	{165, 195, 224, UINT_MAX},
 	{165, 195, 224, UINT_MAX},
 	{165, 195, 224, UINT_MAX},
@@ -69,7 +69,7 @@
 	int i;
 
 	BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) != SPEEDO_ID_COUNT);
-	BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) != SPEEDO_ID_COUNT);
+	BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) != SPEEDO_ID_COUNT);
 
 	if (SPEEDO_ID_SELECT_0(sku_info->revision))
 		sku_info->soc_speedo_id = SPEEDO_ID_0;
@@ -80,8 +80,8 @@
 
 	val = 0;
 	for (i = CPU_SPEEDO_MSBIT; i >= CPU_SPEEDO_LSBIT; i--) {
-		reg = tegra20_spare_fuse_early(i) |
-			tegra20_spare_fuse_early(i + CPU_SPEEDO_REDUND_OFFS);
+		reg = tegra_fuse_read_spare(i) |
+			tegra_fuse_read_spare(i + CPU_SPEEDO_REDUND_OFFS);
 		val = (val << 1) | (reg & 0x1);
 	}
 	val = val * SPEEDO_MULT;
@@ -94,17 +94,17 @@
 	sku_info->cpu_process_id = i;
 
 	val = 0;
-	for (i = CORE_SPEEDO_MSBIT; i >= CORE_SPEEDO_LSBIT; i--) {
-		reg = tegra20_spare_fuse_early(i) |
-			tegra20_spare_fuse_early(i + CORE_SPEEDO_REDUND_OFFS);
+	for (i = SOC_SPEEDO_MSBIT; i >= SOC_SPEEDO_LSBIT; i--) {
+		reg = tegra_fuse_read_spare(i) |
+			tegra_fuse_read_spare(i + SOC_SPEEDO_REDUND_OFFS);
 		val = (val << 1) | (reg & 0x1);
 	}
 	val = val * SPEEDO_MULT;
 	pr_debug("Core speedo value %u\n", val);
 
 	for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) {
-		if (val <= core_process_speedos[sku_info->soc_speedo_id][i])
+		if (val <= soc_process_speedos[sku_info->soc_speedo_id][i])
 			break;
 	}
-	sku_info->core_process_id = i;
+	sku_info->soc_process_id = i;
 }
diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
new file mode 100644
index 0000000..5373f4c
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2013-2015, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define CPU_PROCESS_CORNERS	2
+#define GPU_PROCESS_CORNERS	2
+#define SOC_PROCESS_CORNERS	3
+
+#define FUSE_CPU_SPEEDO_0	0x014
+#define FUSE_CPU_SPEEDO_1	0x02c
+#define FUSE_CPU_SPEEDO_2	0x030
+#define FUSE_SOC_SPEEDO_0	0x034
+#define FUSE_SOC_SPEEDO_1	0x038
+#define FUSE_SOC_SPEEDO_2	0x03c
+#define FUSE_CPU_IDDQ		0x018
+#define FUSE_SOC_IDDQ		0x040
+#define FUSE_GPU_IDDQ		0x128
+#define FUSE_FT_REV		0x028
+
+enum {
+	THRESHOLD_INDEX_0,
+	THRESHOLD_INDEX_1,
+	THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+	{ 2119, UINT_MAX },
+	{ 2119, UINT_MAX },
+};
+
+static const u32 __initconst gpu_process_speedos[][GPU_PROCESS_CORNERS] = {
+	{ UINT_MAX, UINT_MAX },
+	{ UINT_MAX, UINT_MAX },
+};
+
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
+	{ 1950, 2100, UINT_MAX },
+	{ 1950, 2100, UINT_MAX },
+};
+
+static u8 __init get_speedo_revision(void)
+{
+	return tegra_fuse_read_spare(4) << 2 |
+	       tegra_fuse_read_spare(3) << 1 |
+	       tegra_fuse_read_spare(2) << 0;
+}
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
+					 u8 speedo_rev, int *threshold)
+{
+	int sku = sku_info->sku_id;
+
+	/* Assign to default */
+	sku_info->cpu_speedo_id = 0;
+	sku_info->soc_speedo_id = 0;
+	sku_info->gpu_speedo_id = 0;
+	*threshold = THRESHOLD_INDEX_0;
+
+	switch (sku) {
+	case 0x00: /* Engineering SKU */
+	case 0x01: /* Engineering SKU */
+	case 0x07:
+	case 0x17:
+	case 0x27:
+		if (speedo_rev >= 2)
+			sku_info->gpu_speedo_id = 1;
+		break;
+
+	case 0x13:
+		if (speedo_rev >= 2)
+			sku_info->gpu_speedo_id = 1;
+
+		sku_info->cpu_speedo_id = 1;
+		break;
+
+	default:
+		pr_err("Tegra210: unknown SKU %#04x\n", sku);
+		/* Using the default for the error case */
+		break;
+	}
+}
+
+static int get_process_id(int value, const u32 *speedos, unsigned int num)
+{
+	unsigned int i;
+
+	for (i = 0; i < num; i++)
+		if (value < speedos[num])
+			return i;
+
+	return -EINVAL;
+}
+
+void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+	int cpu_speedo[3], soc_speedo[3], cpu_iddq, gpu_iddq, soc_iddq;
+	unsigned int index;
+	u8 speedo_revision;
+
+	BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+			THRESHOLD_INDEX_COUNT);
+	BUILD_BUG_ON(ARRAY_SIZE(gpu_process_speedos) !=
+			THRESHOLD_INDEX_COUNT);
+	BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
+			THRESHOLD_INDEX_COUNT);
+
+	/* Read speedo/IDDQ fuses */
+	cpu_speedo[0] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
+	cpu_speedo[1] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_1);
+	cpu_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+
+	soc_speedo[0] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
+	soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1);
+	soc_speedo[2] = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+
+	cpu_iddq = tegra_fuse_read_early(FUSE_CPU_IDDQ) * 4;
+	soc_iddq = tegra_fuse_read_early(FUSE_SOC_IDDQ) * 4;
+	gpu_iddq = tegra_fuse_read_early(FUSE_GPU_IDDQ) * 5;
+
+	/*
+	 * Determine CPU, GPU and SoC speedo values depending on speedo fusing
+	 * revision. Note that GPU speedo value is fused in CPU_SPEEDO_2.
+	 */
+	speedo_revision = get_speedo_revision();
+	pr_info("Speedo Revision %u\n", speedo_revision);
+
+	if (speedo_revision >= 3) {
+		sku_info->cpu_speedo_value = cpu_speedo[0];
+		sku_info->gpu_speedo_value = cpu_speedo[2];
+		sku_info->soc_speedo_value = soc_speedo[0];
+	} else if (speedo_revision == 2) {
+		sku_info->cpu_speedo_value = (-1938 + (1095 * cpu_speedo[0] / 100)) / 10;
+		sku_info->gpu_speedo_value = (-1662 + (1082 * cpu_speedo[2] / 100)) / 10;
+		sku_info->soc_speedo_value = ( -705 + (1037 * soc_speedo[0] / 100)) / 10;
+	} else {
+		sku_info->cpu_speedo_value = 2100;
+		sku_info->gpu_speedo_value = cpu_speedo[2] - 75;
+		sku_info->soc_speedo_value = 1900;
+	}
+
+	if ((sku_info->cpu_speedo_value <= 0) ||
+	    (sku_info->gpu_speedo_value <= 0) ||
+	    (sku_info->soc_speedo_value <= 0)) {
+		WARN(1, "speedo value not fused\n");
+		return;
+	}
+
+	rev_sku_to_speedo_ids(sku_info, speedo_revision, &index);
+
+	sku_info->gpu_process_id = get_process_id(sku_info->gpu_speedo_value,
+						  gpu_process_speedos[index],
+						  GPU_PROCESS_CORNERS);
+
+	sku_info->cpu_process_id = get_process_id(sku_info->cpu_speedo_value,
+						  cpu_process_speedos[index],
+						  CPU_PROCESS_CORNERS);
+
+	sku_info->soc_process_id = get_process_id(sku_info->soc_speedo_value,
+						  soc_process_speedos[index],
+						  SOC_PROCESS_CORNERS);
+
+	pr_debug("Tegra GPU Speedo ID=%d, Speedo Value=%d\n",
+		 sku_info->gpu_speedo_id, sku_info->gpu_speedo_value);
+}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra30.c b/drivers/soc/tegra/fuse/speedo-tegra30.c
index b17f0dc..9b010b3 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra30.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra30.c
@@ -22,7 +22,7 @@
 
 #include "fuse.h"
 
-#define CORE_PROCESS_CORNERS	1
+#define SOC_PROCESS_CORNERS	1
 #define CPU_PROCESS_CORNERS	6
 
 #define FUSE_SPEEDO_CALIB_0	0x14
@@ -54,7 +54,7 @@
 	THRESHOLD_INDEX_COUNT,
 };
 
-static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = {
+static const u32 __initconst soc_process_speedos[][SOC_PROCESS_CORNERS] = {
 	{180},
 	{170},
 	{195},
@@ -93,25 +93,25 @@
 	int bit_minus1;
 	int bit_minus2;
 
-	reg = tegra30_fuse_readl(FUSE_SPEEDO_CALIB_0);
+	reg = tegra_fuse_read_early(FUSE_SPEEDO_CALIB_0);
 
 	*speedo_lp = (reg & 0xFFFF) * 4;
 	*speedo_g = ((reg >> 16) & 0xFFFF) * 4;
 
-	ate_ver = tegra30_fuse_readl(FUSE_TEST_PROG_VER);
+	ate_ver = tegra_fuse_read_early(FUSE_TEST_PROG_VER);
 	pr_debug("Tegra ATE prog ver %d.%d\n", ate_ver/10, ate_ver%10);
 
 	if (ate_ver >= 26) {
-		bit_minus1 = tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS1);
-		bit_minus1 |= tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS1_R);
-		bit_minus2 = tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS2);
-		bit_minus2 |= tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS2_R);
+		bit_minus1 = tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS1);
+		bit_minus1 |= tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS1_R);
+		bit_minus2 = tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS2);
+		bit_minus2 |= tegra_fuse_read_spare(LP_SPEEDO_BIT_MINUS2_R);
 		*speedo_lp |= (bit_minus1 << 1) | bit_minus2;
 
-		bit_minus1 = tegra30_spare_fuse(G_SPEEDO_BIT_MINUS1);
-		bit_minus1 |= tegra30_spare_fuse(G_SPEEDO_BIT_MINUS1_R);
-		bit_minus2 = tegra30_spare_fuse(G_SPEEDO_BIT_MINUS2);
-		bit_minus2 |= tegra30_spare_fuse(G_SPEEDO_BIT_MINUS2_R);
+		bit_minus1 = tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS1);
+		bit_minus1 |= tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS1_R);
+		bit_minus2 = tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS2);
+		bit_minus2 |= tegra_fuse_read_spare(G_SPEEDO_BIT_MINUS2_R);
 		*speedo_g |= (bit_minus1 << 1) | bit_minus2;
 	} else {
 		*speedo_lp |= 0x3;
@@ -121,7 +121,7 @@
 
 static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info)
 {
-	int package_id = tegra30_fuse_readl(FUSE_PACKAGE_INFO) & 0x0F;
+	int package_id = tegra_fuse_read_early(FUSE_PACKAGE_INFO) & 0x0F;
 
 	switch (sku_info->revision) {
 	case TEGRA_REVISION_A01:
@@ -246,19 +246,19 @@
 void __init tegra30_init_speedo_data(struct tegra_sku_info *sku_info)
 {
 	u32 cpu_speedo_val;
-	u32 core_speedo_val;
+	u32 soc_speedo_val;
 	int i;
 
 	BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
 			THRESHOLD_INDEX_COUNT);
-	BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) !=
+	BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
 			THRESHOLD_INDEX_COUNT);
 
 
 	rev_sku_to_speedo_ids(sku_info);
-	fuse_speedo_calib(&cpu_speedo_val, &core_speedo_val);
+	fuse_speedo_calib(&cpu_speedo_val, &soc_speedo_val);
 	pr_debug("Tegra CPU speedo value %u\n", cpu_speedo_val);
-	pr_debug("Tegra Core speedo value %u\n", core_speedo_val);
+	pr_debug("Tegra Core speedo value %u\n", soc_speedo_val);
 
 	for (i = 0; i < CPU_PROCESS_CORNERS; i++) {
 		if (cpu_speedo_val < cpu_process_speedos[threshold_index][i])
@@ -273,16 +273,16 @@
 		sku_info->cpu_speedo_id = 1;
 	}
 
-	for (i = 0; i < CORE_PROCESS_CORNERS; i++) {
-		if (core_speedo_val < core_process_speedos[threshold_index][i])
+	for (i = 0; i < SOC_PROCESS_CORNERS; i++) {
+		if (soc_speedo_val < soc_process_speedos[threshold_index][i])
 			break;
 	}
-	sku_info->core_process_id = i - 1;
+	sku_info->soc_process_id = i - 1;
 
-	if (sku_info->core_process_id == -1) {
-		pr_warn("Tegra CORE speedo value %3d out of range",
-				 core_speedo_val);
-		sku_info->core_process_id = 0;
+	if (sku_info->soc_process_id == -1) {
+		pr_warn("Tegra SoC speedo value %3d out of range",
+			soc_speedo_val);
+		sku_info->soc_process_id = 0;
 		sku_info->soc_speedo_id = 1;
 	}
 }
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
index 73fad05..5b18f6f 100644
--- a/drivers/soc/tegra/fuse/tegra-apbmisc.c
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -21,11 +21,10 @@
 #include <linux/io.h>
 
 #include <soc/tegra/fuse.h>
+#include <soc/tegra/common.h>
 
 #include "fuse.h"
 
-#define APBMISC_BASE	0x70000800
-#define APBMISC_SIZE	0x64
 #define FUSE_SKU_INFO	0x10
 
 #define PMC_STRAPPING_OPT_A_RAM_CODE_SHIFT	4
@@ -95,8 +94,8 @@
 		rev = TEGRA_REVISION_A02;
 		break;
 	case 3:
-		if (chip_id == TEGRA20 && (tegra20_spare_fuse_early(18) ||
-					   tegra20_spare_fuse_early(19)))
+		if (chip_id == TEGRA20 && (tegra_fuse_read_spare(18) ||
+					   tegra_fuse_read_spare(19)))
 			rev = TEGRA_REVISION_A03p;
 		else
 			rev = TEGRA_REVISION_A03;
@@ -110,27 +109,74 @@
 
 	tegra_sku_info.revision = rev;
 
-	if (chip_id == TEGRA20)
-		tegra_sku_info.sku_id = tegra20_fuse_early(FUSE_SKU_INFO);
-	else
-		tegra_sku_info.sku_id = tegra30_fuse_readl(FUSE_SKU_INFO);
+	tegra_sku_info.sku_id = tegra_fuse_read_early(FUSE_SKU_INFO);
 }
 
 void __init tegra_init_apbmisc(void)
 {
+	struct resource apbmisc, straps;
 	struct device_node *np;
 
 	np = of_find_matching_node(NULL, apbmisc_match);
-	apbmisc_base = of_iomap(np, 0);
-	if (!apbmisc_base) {
-		pr_warn("ioremap tegra apbmisc failed. using %08x instead\n",
-			APBMISC_BASE);
-		apbmisc_base = ioremap(APBMISC_BASE, APBMISC_SIZE);
+	if (!np) {
+		/*
+		 * Fall back to legacy initialization for 32-bit ARM only. All
+		 * 64-bit ARM device tree files for Tegra are required to have
+		 * an APBMISC node.
+		 *
+		 * This is for backwards-compatibility with old device trees
+		 * that didn't contain an APBMISC node.
+		 */
+		if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+			/* APBMISC registers (chip revision, ...) */
+			apbmisc.start = 0x70000800;
+			apbmisc.end = 0x70000863;
+			apbmisc.flags = IORESOURCE_MEM;
+
+			/* strapping options */
+			if (tegra_get_chip_id() == TEGRA124) {
+				straps.start = 0x7000e864;
+				straps.end = 0x7000e867;
+			} else {
+				straps.start = 0x70000008;
+				straps.end = 0x7000000b;
+			}
+
+			straps.flags = IORESOURCE_MEM;
+
+			pr_warn("Using APBMISC region %pR\n", &apbmisc);
+			pr_warn("Using strapping options registers %pR\n",
+				&straps);
+		} else {
+			/*
+			 * At this point we're not running on Tegra, so play
+			 * nice with multi-platform kernels.
+			 */
+			return;
+		}
+	} else {
+		/*
+		 * Extract information from the device tree if we've found a
+		 * matching node.
+		 */
+		if (of_address_to_resource(np, 0, &apbmisc) < 0) {
+			pr_err("failed to get APBMISC registers\n");
+			return;
+		}
+
+		if (of_address_to_resource(np, 1, &straps) < 0) {
+			pr_err("failed to get strapping options registers\n");
+			return;
+		}
 	}
 
-	strapping_base = of_iomap(np, 1);
+	apbmisc_base = ioremap_nocache(apbmisc.start, resource_size(&apbmisc));
+	if (!apbmisc_base)
+		pr_err("failed to map APBMISC registers\n");
+
+	strapping_base = ioremap_nocache(straps.start, resource_size(&straps));
 	if (!strapping_base)
-		pr_err("ioremap tegra strapping_base failed\n");
+		pr_err("failed to map strapping options registers\n");
 
 	long_ram_code = of_property_read_bool(np, "nvidia,long-ram-code");
 }
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 75d0457..bc34cf7 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -17,6 +17,8 @@
  *
  */
 
+#define pr_fmt(fmt) "tegra-pmc: " fmt
+
 #include <linux/kernel.h>
 #include <linux/clk.h>
 #include <linux/clk/tegra.h>
@@ -457,7 +459,6 @@
 				 unsigned long *status, unsigned int *bit)
 {
 	unsigned long rate, value;
-	struct clk *clk;
 
 	*bit = id % 32;
 
@@ -476,12 +477,7 @@
 		*request = IO_DPD2_REQ;
 	}
 
-	clk = clk_get_sys(NULL, "pclk");
-	if (IS_ERR(clk))
-		return PTR_ERR(clk);
-
-	rate = clk_get_rate(clk);
-	clk_put(clk);
+	rate = clk_get_rate(pmc->clk);
 
 	tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE);
 
@@ -535,8 +531,10 @@
 	tegra_pmc_writel(value, request);
 
 	err = tegra_io_rail_poll(status, mask, 0, 250);
-	if (err < 0)
+	if (err < 0) {
+		pr_info("tegra_io_rail_poll() failed: %d\n", err);
 		return err;
+	}
 
 	tegra_io_rail_unprepare();
 
@@ -551,8 +549,10 @@
 	int err;
 
 	err = tegra_io_rail_prepare(id, &request, &status, &bit);
-	if (err < 0)
+	if (err < 0) {
+		pr_info("tegra_io_rail_prepare() failed: %d\n", err);
 		return err;
+	}
 
 	mask = 1 << bit;
 
@@ -736,12 +736,12 @@
 	u32 value, checksum;
 
 	if (!pmc->soc->has_tsense_reset)
-		goto out;
+		return;
 
 	np = of_find_node_by_name(pmc->dev->of_node, "i2c-thermtrip");
 	if (!np) {
 		dev_warn(dev, "i2c-thermtrip node not found, %s.\n", disabled);
-		goto out;
+		return;
 	}
 
 	if (of_property_read_u32(np, "nvidia,i2c-controller-id", &ctrl_id)) {
@@ -801,7 +801,6 @@
 
 out:
 	of_node_put(np);
-	return;
 }
 
 static int tegra_pmc_probe(struct platform_device *pdev)
@@ -1002,7 +1001,56 @@
 	.has_gpu_clamps = true,
 };
 
+static const char * const tegra210_powergates[] = {
+	[TEGRA_POWERGATE_CPU] = "crail",
+	[TEGRA_POWERGATE_3D] = "3d",
+	[TEGRA_POWERGATE_VENC] = "venc",
+	[TEGRA_POWERGATE_PCIE] = "pcie",
+	[TEGRA_POWERGATE_L2] = "l2",
+	[TEGRA_POWERGATE_MPE] = "mpe",
+	[TEGRA_POWERGATE_HEG] = "heg",
+	[TEGRA_POWERGATE_SATA] = "sata",
+	[TEGRA_POWERGATE_CPU1] = "cpu1",
+	[TEGRA_POWERGATE_CPU2] = "cpu2",
+	[TEGRA_POWERGATE_CPU3] = "cpu3",
+	[TEGRA_POWERGATE_CELP] = "celp",
+	[TEGRA_POWERGATE_CPU0] = "cpu0",
+	[TEGRA_POWERGATE_C0NC] = "c0nc",
+	[TEGRA_POWERGATE_C1NC] = "c1nc",
+	[TEGRA_POWERGATE_SOR] = "sor",
+	[TEGRA_POWERGATE_DIS] = "dis",
+	[TEGRA_POWERGATE_DISB] = "disb",
+	[TEGRA_POWERGATE_XUSBA] = "xusba",
+	[TEGRA_POWERGATE_XUSBB] = "xusbb",
+	[TEGRA_POWERGATE_XUSBC] = "xusbc",
+	[TEGRA_POWERGATE_VIC] = "vic",
+	[TEGRA_POWERGATE_IRAM] = "iram",
+	[TEGRA_POWERGATE_NVDEC] = "nvdec",
+	[TEGRA_POWERGATE_NVJPG] = "nvjpg",
+	[TEGRA_POWERGATE_AUD] = "aud",
+	[TEGRA_POWERGATE_DFD] = "dfd",
+	[TEGRA_POWERGATE_VE2] = "ve2",
+};
+
+static const u8 tegra210_cpu_powergates[] = {
+	TEGRA_POWERGATE_CPU0,
+	TEGRA_POWERGATE_CPU1,
+	TEGRA_POWERGATE_CPU2,
+	TEGRA_POWERGATE_CPU3,
+};
+
+static const struct tegra_pmc_soc tegra210_pmc_soc = {
+	.num_powergates = ARRAY_SIZE(tegra210_powergates),
+	.powergates = tegra210_powergates,
+	.num_cpu_powergates = ARRAY_SIZE(tegra210_cpu_powergates),
+	.cpu_powergates = tegra210_cpu_powergates,
+	.has_tsense_reset = true,
+	.has_gpu_clamps = true,
+};
+
 static const struct of_device_id tegra_pmc_match[] = {
+	{ .compatible = "nvidia,tegra210-pmc", .data = &tegra210_pmc_soc },
+	{ .compatible = "nvidia,tegra132-pmc", .data = &tegra124_pmc_soc },
 	{ .compatible = "nvidia,tegra124-pmc", .data = &tegra124_pmc_soc },
 	{ .compatible = "nvidia,tegra114-pmc", .data = &tegra114_pmc_soc },
 	{ .compatible = "nvidia,tegra30-pmc", .data = &tegra30_pmc_soc },
@@ -1035,25 +1083,44 @@
 	bool invert;
 	u32 value;
 
-	if (!soc_is_tegra())
-		return 0;
-
 	np = of_find_matching_node_and_match(NULL, tegra_pmc_match, &match);
 	if (!np) {
-		pr_warn("PMC device node not found, disabling powergating\n");
+		/*
+		 * Fall back to legacy initialization for 32-bit ARM only. All
+		 * 64-bit ARM device tree files for Tegra are required to have
+		 * a PMC node.
+		 *
+		 * This is for backwards-compatibility with old device trees
+		 * that didn't contain a PMC node. Note that in this case the
+		 * SoC data can't be matched and therefore powergating is
+		 * disabled.
+		 */
+		if (IS_ENABLED(CONFIG_ARM) && soc_is_tegra()) {
+			pr_warn("DT node not found, powergating disabled\n");
 
-		regs.start = 0x7000e400;
-		regs.end = 0x7000e7ff;
-		regs.flags = IORESOURCE_MEM;
+			regs.start = 0x7000e400;
+			regs.end = 0x7000e7ff;
+			regs.flags = IORESOURCE_MEM;
 
-		pr_warn("Using memory region %pR\n", &regs);
+			pr_warn("Using memory region %pR\n", &regs);
+		} else {
+			/*
+			 * At this point we're not running on Tegra, so play
+			 * nice with multi-platform kernels.
+			 */
+			return 0;
+		}
 	} else {
-		pmc->soc = match->data;
-	}
+		/*
+		 * Extract information from the device tree if we've found a
+		 * matching node.
+		 */
+		if (of_address_to_resource(np, 0, &regs) < 0) {
+			pr_err("failed to get PMC registers\n");
+			return -ENXIO;
+		}
 
-	if (of_address_to_resource(np, 0, &regs) < 0) {
-		pr_err("failed to get PMC registers\n");
-		return -ENXIO;
+		pmc->soc = match->data;
 	}
 
 	pmc->base = ioremap_nocache(regs.start, resource_size(&regs));
@@ -1064,6 +1131,10 @@
 
 	mutex_init(&pmc->powergates_lock);
 
+	/*
+	 * Invert the interrupt polarity if a PMC device tree node exists and
+	 * contains the nvidia,invert-interrupt property.
+	 */
 	invert = of_property_read_bool(np, "nvidia,invert-interrupt");
 
 	value = tegra_pmc_readl(PMC_CNTRL);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index b0f30fb..4887f31 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -326,6 +326,15 @@
 	  This enables master mode support for the SPIFC (SPI flash
 	  controller) available in Amlogic Meson SoCs.
 
+config SPI_MT65XX
+	tristate "MediaTek SPI controller"
+	depends on ARCH_MEDIATEK || COMPILE_TEST
+	help
+	  This selects the MediaTek(R) SPI bus driver.
+	  If you want to use MediaTek(R) SPI interface,
+	  say Y or M here.If you are not sure, say N.
+	  SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
+
 config SPI_OC_TINY
 	tristate "OpenCores tiny SPI"
 	depends on GPIOLIB || COMPILE_TEST
@@ -598,6 +607,17 @@
 
 	  Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)"
 
+config SPI_XLP
+	tristate "Netlogic XLP SPI controller driver"
+	depends on CPU_XLP || COMPILE_TEST
+	help
+	  Enable support for the SPI controller on the Netlogic XLP SoCs.
+	  Currently supported XLP variants are XLP8XX, XLP3XX, XLP2XX, XLP9XX
+	  and XLP5XX.
+
+	  If you have a Netlogic XLP platform say Y here.
+	  If unsure, say N.
+
 config SPI_XTENSA_XTFPGA
 	tristate "Xtensa SPI controller for xtfpga"
 	depends on (XTENSA && XTENSA_PLATFORM_XTFPGA) || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 1154dba..6a7f6f9 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -48,6 +48,7 @@
 obj-$(CONFIG_SPI_MPC512x_PSC)		+= spi-mpc512x-psc.o
 obj-$(CONFIG_SPI_MPC52xx_PSC)		+= spi-mpc52xx-psc.o
 obj-$(CONFIG_SPI_MPC52xx)		+= spi-mpc52xx.o
+obj-$(CONFIG_SPI_MT65XX)                += spi-mt65xx.o
 obj-$(CONFIG_SPI_MXS)			+= spi-mxs.o
 obj-$(CONFIG_SPI_NUC900)		+= spi-nuc900.o
 obj-$(CONFIG_SPI_OC_TINY)		+= spi-oc-tiny.o
@@ -88,5 +89,6 @@
 obj-$(CONFIG_SPI_TXX9)			+= spi-txx9.o
 obj-$(CONFIG_SPI_XCOMM)		+= spi-xcomm.o
 obj-$(CONFIG_SPI_XILINX)		+= spi-xilinx.o
+obj-$(CONFIG_SPI_XLP)			+= spi-xlp.o
 obj-$(CONFIG_SPI_XTENSA_XTFPGA)		+= spi-xtensa-xtfpga.o
 obj-$(CONFIG_SPI_ZYNQMP_GQSPI)		+= spi-zynqmp-gqspi.o
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index c9eca34..bf9ed38 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -19,7 +19,6 @@
 #include <linux/interrupt.h>
 #include <linux/spi/spi.h>
 #include <linux/slab.h>
-#include <linux/platform_data/atmel.h>
 #include <linux/platform_data/dma-atmel.h>
 #include <linux/of.h>
 
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 59705ab..e7874a6 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -480,7 +480,7 @@
 					 struct spi_device *spi,
 					 struct spi_transfer *tfr,
 					 u32 cs,
-					 unsigned long xfer_time_us)
+					 unsigned long long xfer_time_us)
 {
 	struct bcm2835_spi *bs = spi_master_get_devdata(master);
 	unsigned long timeout;
@@ -531,7 +531,8 @@
 {
 	struct bcm2835_spi *bs = spi_master_get_devdata(master);
 	unsigned long spi_hz, clk_hz, cdiv;
-	unsigned long spi_used_hz, xfer_time_us;
+	unsigned long spi_used_hz;
+	unsigned long long xfer_time_us;
 	u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
 
 	/* set clock */
@@ -553,13 +554,11 @@
 	spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
 	bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
 
-	/* handle all the modes */
+	/* handle all the 3-wire mode */
 	if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
 		cs |= BCM2835_SPI_CS_REN;
-	if (spi->mode & SPI_CPOL)
-		cs |= BCM2835_SPI_CS_CPOL;
-	if (spi->mode & SPI_CPHA)
-		cs |= BCM2835_SPI_CS_CPHA;
+	else
+		cs &= ~BCM2835_SPI_CS_REN;
 
 	/* for gpio_cs set dummy CS so that no HW-CS get changed
 	 * we can not run this in bcm2835_spi_set_cs, as it does
@@ -575,9 +574,10 @@
 	bs->rx_len = tfr->len;
 
 	/* calculate the estimated time in us the transfer runs */
-	xfer_time_us = tfr->len
+	xfer_time_us = (unsigned long long)tfr->len
 		* 9 /* clocks/byte - SPI-HW waits 1 clock after each byte */
-		* 1000000 / spi_used_hz;
+		* 1000000;
+	do_div(xfer_time_us, spi_used_hz);
 
 	/* for short requests run polling*/
 	if (xfer_time_us <= BCM2835_SPI_POLLING_LIMIT_US)
@@ -592,6 +592,25 @@
 	return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
 }
 
+static int bcm2835_spi_prepare_message(struct spi_master *master,
+				       struct spi_message *msg)
+{
+	struct spi_device *spi = msg->spi;
+	struct bcm2835_spi *bs = spi_master_get_devdata(master);
+	u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+
+	cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
+
+	if (spi->mode & SPI_CPOL)
+		cs |= BCM2835_SPI_CS_CPOL;
+	if (spi->mode & SPI_CPHA)
+		cs |= BCM2835_SPI_CS_CPHA;
+
+	bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+
+	return 0;
+}
+
 static void bcm2835_spi_handle_err(struct spi_master *master,
 				   struct spi_message *msg)
 {
@@ -739,6 +758,7 @@
 	master->set_cs = bcm2835_spi_set_cs;
 	master->transfer_one = bcm2835_spi_transfer_one;
 	master->handle_err = bcm2835_spi_handle_err;
+	master->prepare_message = bcm2835_spi_prepare_message;
 	master->dev.of_node = pdev->dev.of_node;
 
 	bs = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index f5ca6dc..55789f7 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -76,6 +76,7 @@
 #define HSSPI_FIFO_REG(x)			(0x200 + (x) * 0x200)
 
 
+#define HSSPI_OP_MULTIBIT			BIT(11)
 #define HSSPI_OP_CODE_SHIFT			13
 #define HSSPI_OP_SLEEP				(0 << HSSPI_OP_CODE_SHIFT)
 #define HSSPI_OP_READ_WRITE			(1 << HSSPI_OP_CODE_SHIFT)
@@ -171,9 +172,12 @@
 	if (opcode != HSSPI_OP_READ)
 		step_size -= HSSPI_OPCODE_LEN;
 
-	__raw_writel(0 << MODE_CTRL_PREPENDBYTE_CNT_SHIFT |
-		     2 << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT |
-		     2 << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT | 0xff,
+	if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
+	    (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL))
+		opcode |= HSSPI_OP_MULTIBIT;
+
+	__raw_writel(1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT |
+		     1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT | 0xff,
 		     bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
 
 	while (pending > 0) {
@@ -374,7 +378,8 @@
 	master->num_chipselect = 8;
 	master->setup = bcm63xx_hsspi_setup;
 	master->transfer_one_message = bcm63xx_hsspi_transfer_one;
-	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
+			    SPI_RX_DUAL | SPI_TX_DUAL;
 	master->bits_per_word_mask = SPI_BPW_MASK(8);
 	master->auto_runtime_pm = true;
 
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index 06b34e5..47bb9b89 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -49,7 +49,7 @@
 {
 	/* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
 
-	bool oldbit = !(word & 1);
+	u32 oldbit = (!(word & (1<<(bits-1)))) << 31;
 	/* clock starts at inactive polarity */
 	for (word <<= (32 - bits); likely(bits); bits--) {
 
@@ -81,7 +81,7 @@
 {
 	/* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
 
-	bool oldbit = !(word & (1 << 31));
+	u32 oldbit = (!(word & (1<<(bits-1)))) << 31;
 	/* clock starts at inactive polarity */
 	for (word <<= (32 - bits); likely(bits); bits--) {
 
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 987afeb..3cf9faa 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -139,6 +139,8 @@
 	u32			(*get_tx)(struct davinci_spi *);
 
 	u8			*bytes_per_word;
+
+	u8			prescaler_limit;
 };
 
 static struct davinci_spi_config davinci_spi_default_cfg;
@@ -255,7 +257,7 @@
  * This function calculates the prescale value that generates a clock rate
  * less than or equal to the specified maximum.
  *
- * Returns: calculated prescale - 1 for easy programming into SPI registers
+ * Returns: calculated prescale value for easy programming into SPI registers
  * or negative error number if valid prescalar cannot be updated.
  */
 static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
@@ -263,12 +265,13 @@
 {
 	int ret;
 
-	ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz);
+	/* Subtract 1 to match what will be programmed into SPI register. */
+	ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz) - 1;
 
-	if (ret < 1 || ret > 256)
+	if (ret < dspi->prescaler_limit || ret > 255)
 		return -EINVAL;
 
-	return ret - 1;
+	return ret;
 }
 
 /**
@@ -832,13 +835,40 @@
 }
 
 #if defined(CONFIG_OF)
+
+/* OF SPI data structure */
+struct davinci_spi_of_data {
+	u8	version;
+	u8	prescaler_limit;
+};
+
+static const struct davinci_spi_of_data dm6441_spi_data = {
+	.version = SPI_VERSION_1,
+	.prescaler_limit = 2,
+};
+
+static const struct davinci_spi_of_data da830_spi_data = {
+	.version = SPI_VERSION_2,
+	.prescaler_limit = 2,
+};
+
+static const struct davinci_spi_of_data keystone_spi_data = {
+	.version = SPI_VERSION_1,
+	.prescaler_limit = 0,
+};
+
 static const struct of_device_id davinci_spi_of_match[] = {
 	{
 		.compatible = "ti,dm6441-spi",
+		.data = &dm6441_spi_data,
 	},
 	{
 		.compatible = "ti,da830-spi",
-		.data = (void *)SPI_VERSION_2,
+		.data = &da830_spi_data,
+	},
+	{
+		.compatible = "ti,keystone-spi",
+		.data = &keystone_spi_data,
 	},
 	{ },
 };
@@ -857,21 +887,21 @@
 			struct davinci_spi *dspi)
 {
 	struct device_node *node = pdev->dev.of_node;
+	struct davinci_spi_of_data *spi_data;
 	struct davinci_spi_platform_data *pdata;
 	unsigned int num_cs, intr_line = 0;
 	const struct of_device_id *match;
 
 	pdata = &dspi->pdata;
 
-	pdata->version = SPI_VERSION_1;
 	match = of_match_device(davinci_spi_of_match, &pdev->dev);
 	if (!match)
 		return -ENODEV;
 
-	/* match data has the SPI version number for SPI_VERSION_2 */
-	if (match->data == (void *)SPI_VERSION_2)
-		pdata->version = SPI_VERSION_2;
+	spi_data = (struct davinci_spi_of_data *)match->data;
 
+	pdata->version = spi_data->version;
+	pdata->prescaler_limit = spi_data->prescaler_limit;
 	/*
 	 * default num_cs is 1 and all chipsel are internal to the chip
 	 * indicated by chip_sel being NULL or cs_gpios being NULL or
@@ -991,7 +1021,7 @@
 
 	dspi->bitbang.chipselect = davinci_spi_chipselect;
 	dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
-
+	dspi->prescaler_limit = pdata->prescaler_limit;
 	dspi->version = pdata->version;
 
 	dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index eb03e12..7edede6 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -74,6 +74,9 @@
 
 	dws->max_freq = clk_get_rate(dwsmmio->clk);
 
+	of_property_read_u32(pdev->dev.of_node, "reg-io-width",
+			     &dws->reg_io_width);
+
 	num_cs = 4;
 
 	if (pdev->dev.of_node)
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 8d67d03..4fbfcdc 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -194,7 +194,7 @@
 			else
 				txw = *(u16 *)(dws->tx);
 		}
-		dw_writel(dws, DW_SPI_DR, txw);
+		dw_write_io_reg(dws, DW_SPI_DR, txw);
 		dws->tx += dws->n_bytes;
 	}
 }
@@ -205,7 +205,7 @@
 	u16 rxw;
 
 	while (max--) {
-		rxw = dw_readl(dws, DW_SPI_DR);
+		rxw = dw_read_io_reg(dws, DW_SPI_DR);
 		/* Care rx only if the transfer's original "rx" is not null */
 		if (dws->rx_end - dws->len) {
 			if (dws->n_bytes == 1)
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 6c91391..b75ed32 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -109,6 +109,7 @@
 	u32			fifo_len;	/* depth of the FIFO buffer */
 	u32			max_freq;	/* max bus freq supported */
 
+	u32			reg_io_width;	/* DR I/O width in bytes */
 	u16			bus_num;
 	u16			num_cs;		/* supported slave numbers */
 
@@ -145,11 +146,45 @@
 	return __raw_readl(dws->regs + offset);
 }
 
+static inline u16 dw_readw(struct dw_spi *dws, u32 offset)
+{
+	return __raw_readw(dws->regs + offset);
+}
+
 static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
 {
 	__raw_writel(val, dws->regs + offset);
 }
 
+static inline void dw_writew(struct dw_spi *dws, u32 offset, u16 val)
+{
+	__raw_writew(val, dws->regs + offset);
+}
+
+static inline u32 dw_read_io_reg(struct dw_spi *dws, u32 offset)
+{
+	switch (dws->reg_io_width) {
+	case 2:
+		return dw_readw(dws, offset);
+	case 4:
+	default:
+		return dw_readl(dws, offset);
+	}
+}
+
+static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val)
+{
+	switch (dws->reg_io_width) {
+	case 2:
+		dw_writew(dws, offset, val);
+		break;
+	case 4:
+	default:
+		dw_writel(dws, offset, val);
+		break;
+	}
+}
+
 static inline void spi_enable_chip(struct dw_spi *dws, int enable)
 {
 	dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0));
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index d3f05a0..c27124a 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -21,6 +21,7 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
 #include <sysdev/fsl_soc.h>
 
 #include "spi-fsl-lib.h"
@@ -85,6 +86,8 @@
 #define SPCOM_TRANLEN(x)	((x) << 0)
 #define	SPCOM_TRANLEN_MAX	0xFFFF	/* Max transaction length */
 
+#define AUTOSUSPEND_TIMEOUT 2000
+
 static void fsl_espi_change_mode(struct spi_device *spi)
 {
 	struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
@@ -485,6 +488,8 @@
 	mpc8xxx_spi = spi_master_get_devdata(spi->master);
 	reg_base = mpc8xxx_spi->reg_base;
 
+	pm_runtime_get_sync(mpc8xxx_spi->dev);
+
 	hw_mode = cs->hw_mode; /* Save original settings */
 	cs->hw_mode = mpc8xxx_spi_read_reg(
 			&reg_base->csmode[spi->chip_select]);
@@ -507,6 +512,10 @@
 	mpc8xxx_spi_write_reg(&reg_base->mode, loop_mode);
 
 	retval = fsl_espi_setup_transfer(spi, NULL);
+
+	pm_runtime_mark_last_busy(mpc8xxx_spi->dev);
+	pm_runtime_put_autosuspend(mpc8xxx_spi->dev);
+
 	if (retval < 0) {
 		cs->hw_mode = hw_mode; /* Restore settings */
 		return retval;
@@ -604,20 +613,14 @@
 	return ret;
 }
 
-static void fsl_espi_remove(struct mpc8xxx_spi *mspi)
+#ifdef CONFIG_PM
+static int fsl_espi_runtime_suspend(struct device *dev)
 {
-	iounmap(mspi->reg_base);
-}
-
-static int fsl_espi_suspend(struct spi_master *master)
-{
-	struct mpc8xxx_spi *mpc8xxx_spi;
-	struct fsl_espi_reg *reg_base;
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+	struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
 	u32 regval;
 
-	mpc8xxx_spi = spi_master_get_devdata(master);
-	reg_base = mpc8xxx_spi->reg_base;
-
 	regval = mpc8xxx_spi_read_reg(&reg_base->mode);
 	regval &= ~SPMODE_ENABLE;
 	mpc8xxx_spi_write_reg(&reg_base->mode, regval);
@@ -625,21 +628,20 @@
 	return 0;
 }
 
-static int fsl_espi_resume(struct spi_master *master)
+static int fsl_espi_runtime_resume(struct device *dev)
 {
-	struct mpc8xxx_spi *mpc8xxx_spi;
-	struct fsl_espi_reg *reg_base;
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+	struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
 	u32 regval;
 
-	mpc8xxx_spi = spi_master_get_devdata(master);
-	reg_base = mpc8xxx_spi->reg_base;
-
 	regval = mpc8xxx_spi_read_reg(&reg_base->mode);
 	regval |= SPMODE_ENABLE;
 	mpc8xxx_spi_write_reg(&reg_base->mode, regval);
 
 	return 0;
 }
+#endif
 
 static struct spi_master * fsl_espi_probe(struct device *dev,
 		struct resource *mem, unsigned int irq)
@@ -667,25 +669,23 @@
 	master->setup = fsl_espi_setup;
 	master->cleanup = fsl_espi_cleanup;
 	master->transfer_one_message = fsl_espi_do_one_msg;
-	master->prepare_transfer_hardware = fsl_espi_resume;
-	master->unprepare_transfer_hardware = fsl_espi_suspend;
+	master->auto_runtime_pm = true;
 
 	mpc8xxx_spi = spi_master_get_devdata(master);
-	mpc8xxx_spi->spi_remove = fsl_espi_remove;
 
-	mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
-	if (!mpc8xxx_spi->reg_base) {
-		ret = -ENOMEM;
+	mpc8xxx_spi->reg_base = devm_ioremap_resource(dev, mem);
+	if (IS_ERR(mpc8xxx_spi->reg_base)) {
+		ret = PTR_ERR(mpc8xxx_spi->reg_base);
 		goto err_probe;
 	}
 
 	reg_base = mpc8xxx_spi->reg_base;
 
 	/* Register for SPI Interrupt */
-	ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq,
+	ret = devm_request_irq(dev, mpc8xxx_spi->irq, fsl_espi_irq,
 			  0, "fsl_espi", mpc8xxx_spi);
 	if (ret)
-		goto free_irq;
+		goto err_probe;
 
 	if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
 		mpc8xxx_spi->rx_shift = 16;
@@ -731,18 +731,27 @@
 
 	mpc8xxx_spi_write_reg(&reg_base->mode, regval);
 
-	ret = spi_register_master(master);
+	pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_TIMEOUT);
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_set_active(dev);
+	pm_runtime_enable(dev);
+	pm_runtime_get_sync(dev);
+
+	ret = devm_spi_register_master(dev, master);
 	if (ret < 0)
-		goto unreg_master;
+		goto err_pm;
 
 	dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq);
 
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+
 	return master;
 
-unreg_master:
-	free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
-free_irq:
-	iounmap(mpc8xxx_spi->reg_base);
+err_pm:
+	pm_runtime_put_noidle(dev);
+	pm_runtime_disable(dev);
+	pm_runtime_set_suspended(dev);
 err_probe:
 	spi_master_put(master);
 err:
@@ -809,7 +818,9 @@
 
 static int of_fsl_espi_remove(struct platform_device *dev)
 {
-	return mpc8xxx_spi_remove(&dev->dev);
+	pm_runtime_disable(&dev->dev);
+
+	return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -824,7 +835,11 @@
 		return ret;
 	}
 
-	return fsl_espi_suspend(master);
+	ret = pm_runtime_force_suspend(dev);
+	if (ret < 0)
+		return ret;
+
+	return 0;
 }
 
 static int of_fsl_espi_resume(struct device *dev)
@@ -834,7 +849,7 @@
 	struct mpc8xxx_spi *mpc8xxx_spi;
 	struct fsl_espi_reg *reg_base;
 	u32 regval;
-	int i;
+	int i, ret;
 
 	mpc8xxx_spi = spi_master_get_devdata(master);
 	reg_base = mpc8xxx_spi->reg_base;
@@ -854,11 +869,17 @@
 
 	mpc8xxx_spi_write_reg(&reg_base->mode, regval);
 
+	ret = pm_runtime_force_resume(dev);
+	if (ret < 0)
+		return ret;
+
 	return spi_master_resume(master);
 }
 #endif /* CONFIG_PM_SLEEP */
 
 static const struct dev_pm_ops espi_pm = {
+	SET_RUNTIME_PM_OPS(fsl_espi_runtime_suspend,
+			   fsl_espi_runtime_resume, NULL)
 	SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume)
 };
 
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index cb35d2f..1e43412 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -114,25 +114,6 @@
 }
 EXPORT_SYMBOL_GPL(mpc8xxx_spi_probe);
 
-int mpc8xxx_spi_remove(struct device *dev)
-{
-	struct mpc8xxx_spi *mpc8xxx_spi;
-	struct spi_master *master;
-
-	master = dev_get_drvdata(dev);
-	mpc8xxx_spi = spi_master_get_devdata(master);
-
-	spi_unregister_master(master);
-
-	free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
-
-	if (mpc8xxx_spi->spi_remove)
-		mpc8xxx_spi->spi_remove(mpc8xxx_spi);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mpc8xxx_spi_remove);
-
 int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
 {
 	struct device *dev = &ofdev->dev;
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index 1326a39..84f5dcb 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -54,9 +54,6 @@
 	void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
 	u32(*get_tx) (struct mpc8xxx_spi *);
 
-	/* hooks for different controller driver */
-	void (*spi_remove) (struct mpc8xxx_spi *mspi);
-
 	unsigned int count;
 	unsigned int irq;
 
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 60c5907..8b290d9 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -559,12 +559,6 @@
 	return ret;
 }
 
-static void fsl_spi_remove(struct mpc8xxx_spi *mspi)
-{
-	iounmap(mspi->reg_base);
-	fsl_spi_cpm_free(mspi);
-}
-
 static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
 {
 	struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
@@ -631,7 +625,6 @@
 	master->transfer_one_message = fsl_spi_do_one_msg;
 
 	mpc8xxx_spi = spi_master_get_devdata(master);
-	mpc8xxx_spi->spi_remove = fsl_spi_remove;
 	mpc8xxx_spi->max_bits_per_word = 32;
 	mpc8xxx_spi->type = fsl_spi_get_type(dev);
 
@@ -639,10 +632,10 @@
 	if (ret)
 		goto err_cpm_init;
 
-	mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
-	if (mpc8xxx_spi->reg_base == NULL) {
-		ret = -ENOMEM;
-		goto err_ioremap;
+	mpc8xxx_spi->reg_base = devm_ioremap_resource(dev, mem);
+	if (IS_ERR(mpc8xxx_spi->reg_base)) {
+		ret = PTR_ERR(mpc8xxx_spi->reg_base);
+		goto err_probe;
 	}
 
 	if (mpc8xxx_spi->type == TYPE_GRLIB)
@@ -661,11 +654,11 @@
 					&mpc8xxx_spi->tx_shift, 8, 1);
 
 	/* Register for SPI Interrupt */
-	ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq,
-			  0, "fsl_spi", mpc8xxx_spi);
+	ret = devm_request_irq(dev, mpc8xxx_spi->irq, fsl_spi_irq,
+			       0, "fsl_spi", mpc8xxx_spi);
 
 	if (ret != 0)
-		goto free_irq;
+		goto err_probe;
 
 	reg_base = mpc8xxx_spi->reg_base;
 
@@ -686,20 +679,16 @@
 
 	mpc8xxx_spi_write_reg(&reg_base->mode, regval);
 
-	ret = spi_register_master(master);
+	ret = devm_spi_register_master(dev, master);
 	if (ret < 0)
-		goto unreg_master;
+		goto err_probe;
 
 	dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base,
 		 mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
 
 	return master;
 
-unreg_master:
-	free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
-free_irq:
-	iounmap(mpc8xxx_spi->reg_base);
-err_ioremap:
+err_probe:
 	fsl_spi_cpm_free(mpc8xxx_spi);
 err_cpm_init:
 	spi_master_put(master);
@@ -866,11 +855,8 @@
 {
 	struct spi_master *master = platform_get_drvdata(ofdev);
 	struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
-	int ret;
 
-	ret = mpc8xxx_spi_remove(&ofdev->dev);
-	if (ret)
-		return ret;
+	fsl_spi_cpm_free(mpc8xxx_spi);
 	if (mpc8xxx_spi->type == TYPE_FSL)
 		of_fsl_spi_free_chipselects(&ofdev->dev);
 	return 0;
@@ -916,7 +902,12 @@
 
 static int plat_mpc8xxx_spi_remove(struct platform_device *pdev)
 {
-	return mpc8xxx_spi_remove(&pdev->dev);
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+
+	fsl_spi_cpm_free(mpc8xxx_spi);
+
+	return 0;
 }
 
 MODULE_ALIAS("platform:mpc8xxx_spi");
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index acce90a..823cbc9 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -105,6 +105,10 @@
 	bool rx_dma_busy;
 };
 
+struct img_spfi_device_data {
+	bool gpio_requested;
+};
+
 static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
 {
 	return readl(spfi->regs + reg);
@@ -267,15 +271,15 @@
 		cpu_relax();
 	}
 
-	ret = spfi_wait_all_done(spfi);
-	if (ret < 0)
-		return ret;
-
 	if (rx_bytes > 0 || tx_bytes > 0) {
 		dev_err(spfi->dev, "PIO transfer timed out\n");
 		return -ETIMEDOUT;
 	}
 
+	ret = spfi_wait_all_done(spfi);
+	if (ret < 0)
+		return ret;
+
 	return 0;
 }
 
@@ -440,21 +444,50 @@
 
 static int img_spfi_setup(struct spi_device *spi)
 {
-	int ret;
+	int ret = -EINVAL;
+	struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi);
 
-	ret = gpio_request_one(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ?
-			       GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
-			       dev_name(&spi->dev));
-	if (ret)
-		dev_err(&spi->dev, "can't request chipselect gpio %d\n",
+	if (!spfi_data) {
+		spfi_data = kzalloc(sizeof(*spfi_data), GFP_KERNEL);
+		if (!spfi_data)
+			return -ENOMEM;
+		spfi_data->gpio_requested = false;
+		spi_set_ctldata(spi, spfi_data);
+	}
+	if (!spfi_data->gpio_requested) {
+		ret = gpio_request_one(spi->cs_gpio,
+				       (spi->mode & SPI_CS_HIGH) ?
+				       GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
+				       dev_name(&spi->dev));
+		if (ret)
+			dev_err(&spi->dev, "can't request chipselect gpio %d\n",
 				spi->cs_gpio);
+		else
+			spfi_data->gpio_requested = true;
+	} else {
+		if (gpio_is_valid(spi->cs_gpio)) {
+			int mode = ((spi->mode & SPI_CS_HIGH) ?
+				    GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH);
 
+			ret = gpio_direction_output(spi->cs_gpio, mode);
+			if (ret)
+				dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n",
+					spi->cs_gpio, ret);
+		}
+	}
 	return ret;
 }
 
 static void img_spfi_cleanup(struct spi_device *spi)
 {
-	gpio_free(spi->cs_gpio);
+	struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi);
+
+	if (spfi_data) {
+		if (spfi_data->gpio_requested)
+			gpio_free(spi->cs_gpio);
+		kfree(spfi_data);
+		spi_set_ctldata(spi, NULL);
+	}
 }
 
 static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
@@ -548,6 +581,7 @@
 	struct img_spfi *spfi;
 	struct resource *res;
 	int ret;
+	u32 max_speed_hz;
 
 	master = spi_alloc_master(&pdev->dev, sizeof(*spfi));
 	if (!master)
@@ -612,6 +646,19 @@
 	master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
 	master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;
 
+	/*
+	 * Maximum speed supported by spfi is limited to the lower value
+	 * between 1/4 of the SPFI clock or to "spfi-max-frequency"
+	 * defined in the device tree.
+	 * If no value is defined in the device tree assume the maximum
+	 * speed supported to be 1/4 of the SPFI clock.
+	 */
+	if (!of_property_read_u32(spfi->dev->of_node, "spfi-max-frequency",
+				  &max_speed_hz)) {
+		if (master->max_speed_hz > max_speed_hz)
+			master->max_speed_hz = max_speed_hz;
+	}
+
 	master->setup = img_spfi_setup;
 	master->cleanup = img_spfi_cleanup;
 	master->transfer_one = img_spfi_transfer_one;
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 965d2bd..1e75341 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -30,11 +30,37 @@
 #include <linux/gpio.h>
 #include <asm/mpc52xx_psc.h>
 
+enum {
+	TYPE_MPC5121,
+	TYPE_MPC5125,
+};
+
+/*
+ * This macro abstracts the differences in the PSC register layout between
+ * MPC5121 (which uses a struct mpc52xx_psc) and MPC5125 (using mpc5125_psc).
+ */
+#define psc_addr(mps, regname) ({					\
+	void *__ret = NULL;						\
+	switch (mps->type) {						\
+	case TYPE_MPC5121: {						\
+			struct mpc52xx_psc __iomem *psc = mps->psc;	\
+			__ret = &psc->regname;				\
+		};							\
+		break;							\
+	case TYPE_MPC5125: {						\
+			struct mpc5125_psc __iomem *psc = mps->psc;	\
+			__ret = &psc->regname;				\
+		};							\
+		break;							\
+	}								\
+	__ret; })
+
 struct mpc512x_psc_spi {
 	void (*cs_control)(struct spi_device *spi, bool on);
 
 	/* driver internal data */
-	struct mpc52xx_psc __iomem *psc;
+	int type;
+	void __iomem *psc;
 	struct mpc512x_psc_fifo __iomem *fifo;
 	unsigned int irq;
 	u8 bits_per_word;
@@ -71,13 +97,12 @@
 {
 	struct mpc512x_psc_spi_cs *cs = spi->controller_state;
 	struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
-	struct mpc52xx_psc __iomem *psc = mps->psc;
 	u32 sicr;
 	u32 ccr;
 	int speed;
 	u16 bclkdiv;
 
-	sicr = in_be32(&psc->sicr);
+	sicr = in_be32(psc_addr(mps, sicr));
 
 	/* Set clock phase and polarity */
 	if (spi->mode & SPI_CPHA)
@@ -94,9 +119,9 @@
 		sicr |= 0x10000000;
 	else
 		sicr &= ~0x10000000;
-	out_be32(&psc->sicr, sicr);
+	out_be32(psc_addr(mps, sicr), sicr);
 
-	ccr = in_be32(&psc->ccr);
+	ccr = in_be32(psc_addr(mps, ccr));
 	ccr &= 0xFF000000;
 	speed = cs->speed_hz;
 	if (!speed)
@@ -104,7 +129,7 @@
 	bclkdiv = (mps->mclk_rate / speed) - 1;
 
 	ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
-	out_be32(&psc->ccr, ccr);
+	out_be32(psc_addr(mps, ccr), ccr);
 	mps->bits_per_word = cs->bits_per_word;
 
 	if (mps->cs_control && gpio_is_valid(spi->cs_gpio))
@@ -315,16 +340,15 @@
 static int mpc512x_psc_spi_prep_xfer_hw(struct spi_master *master)
 {
 	struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
-	struct mpc52xx_psc __iomem *psc = mps->psc;
 
 	dev_dbg(&master->dev, "%s()\n", __func__);
 
 	/* Zero MR2 */
-	in_8(&psc->mode);
-	out_8(&psc->mode, 0x0);
+	in_8(psc_addr(mps, mr2));
+	out_8(psc_addr(mps, mr2), 0x0);
 
 	/* enable transmitter/receiver */
-	out_8(&psc->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
+	out_8(psc_addr(mps, command), MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
 
 	return 0;
 }
@@ -332,13 +356,12 @@
 static int mpc512x_psc_spi_unprep_xfer_hw(struct spi_master *master)
 {
 	struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
-	struct mpc52xx_psc __iomem *psc = mps->psc;
 	struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
 
 	dev_dbg(&master->dev, "%s()\n", __func__);
 
 	/* disable transmitter/receiver and fifo interrupt */
-	out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+	out_8(psc_addr(mps, command), MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
 	out_be32(&fifo->tximr, 0);
 
 	return 0;
@@ -388,7 +411,6 @@
 static int mpc512x_psc_spi_port_config(struct spi_master *master,
 				       struct mpc512x_psc_spi *mps)
 {
-	struct mpc52xx_psc __iomem *psc = mps->psc;
 	struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
 	u32 sicr;
 	u32 ccr;
@@ -396,12 +418,12 @@
 	u16 bclkdiv;
 
 	/* Reset the PSC into a known state */
-	out_8(&psc->command, MPC52xx_PSC_RST_RX);
-	out_8(&psc->command, MPC52xx_PSC_RST_TX);
-	out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+	out_8(psc_addr(mps, command), MPC52xx_PSC_RST_RX);
+	out_8(psc_addr(mps, command), MPC52xx_PSC_RST_TX);
+	out_8(psc_addr(mps, command), MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
 
 	/* Disable psc interrupts all useful interrupts are in fifo */
-	out_be16(&psc->isr_imr.imr, 0);
+	out_be16(psc_addr(mps, isr_imr.imr), 0);
 
 	/* Disable fifo interrupts, will be enabled later */
 	out_be32(&fifo->tximr, 0);
@@ -417,18 +439,18 @@
 		0x00004000 |	/* MSTR = 1   -- SPI master */
 		0x00000800;	/* UseEOF = 1 -- SS low until EOF */
 
-	out_be32(&psc->sicr, sicr);
+	out_be32(psc_addr(mps, sicr), sicr);
 
-	ccr = in_be32(&psc->ccr);
+	ccr = in_be32(psc_addr(mps, ccr));
 	ccr &= 0xFF000000;
 	speed = 1000000;	/* default 1MHz */
 	bclkdiv = (mps->mclk_rate / speed) - 1;
 	ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
-	out_be32(&psc->ccr, ccr);
+	out_be32(psc_addr(mps, ccr), ccr);
 
 	/* Set 2ms DTL delay */
-	out_8(&psc->ctur, 0x00);
-	out_8(&psc->ctlr, 0x82);
+	out_8(psc_addr(mps, ctur), 0x00);
+	out_8(psc_addr(mps, ctlr), 0x82);
 
 	/* we don't use the alarms */
 	out_be32(&fifo->rxalarm, 0xfff);
@@ -482,6 +504,7 @@
 
 	dev_set_drvdata(dev, master);
 	mps = spi_master_get_devdata(master);
+	mps->type = (int)of_device_get_match_data(dev);
 	mps->irq = irq;
 
 	if (pdata == NULL) {
@@ -589,7 +612,8 @@
 }
 
 static const struct of_device_id mpc512x_psc_spi_of_match[] = {
-	{ .compatible = "fsl,mpc5121-psc-spi", },
+	{ .compatible = "fsl,mpc5121-psc-spi", .data = (void *)TYPE_MPC5121 },
+	{ .compatible = "fsl,mpc5125-psc-spi", .data = (void *)TYPE_MPC5125 },
 	{},
 };
 
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
new file mode 100644
index 0000000..5f6315c
--- /dev/null
+++ b/drivers/spi/spi-mt65xx.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Leilk Liu <leilk.liu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/spi-mt65xx.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#define SPI_CFG0_REG                      0x0000
+#define SPI_CFG1_REG                      0x0004
+#define SPI_TX_SRC_REG                    0x0008
+#define SPI_RX_DST_REG                    0x000c
+#define SPI_TX_DATA_REG                   0x0010
+#define SPI_RX_DATA_REG                   0x0014
+#define SPI_CMD_REG                       0x0018
+#define SPI_STATUS0_REG                   0x001c
+#define SPI_PAD_SEL_REG                   0x0024
+
+#define SPI_CFG0_SCK_HIGH_OFFSET          0
+#define SPI_CFG0_SCK_LOW_OFFSET           8
+#define SPI_CFG0_CS_HOLD_OFFSET           16
+#define SPI_CFG0_CS_SETUP_OFFSET          24
+
+#define SPI_CFG1_CS_IDLE_OFFSET           0
+#define SPI_CFG1_PACKET_LOOP_OFFSET       8
+#define SPI_CFG1_PACKET_LENGTH_OFFSET     16
+#define SPI_CFG1_GET_TICK_DLY_OFFSET      30
+
+#define SPI_CFG1_CS_IDLE_MASK             0xff
+#define SPI_CFG1_PACKET_LOOP_MASK         0xff00
+#define SPI_CFG1_PACKET_LENGTH_MASK       0x3ff0000
+
+#define SPI_CMD_ACT                  BIT(0)
+#define SPI_CMD_RESUME               BIT(1)
+#define SPI_CMD_RST                  BIT(2)
+#define SPI_CMD_PAUSE_EN             BIT(4)
+#define SPI_CMD_DEASSERT             BIT(5)
+#define SPI_CMD_CPHA                 BIT(8)
+#define SPI_CMD_CPOL                 BIT(9)
+#define SPI_CMD_RX_DMA               BIT(10)
+#define SPI_CMD_TX_DMA               BIT(11)
+#define SPI_CMD_TXMSBF               BIT(12)
+#define SPI_CMD_RXMSBF               BIT(13)
+#define SPI_CMD_RX_ENDIAN            BIT(14)
+#define SPI_CMD_TX_ENDIAN            BIT(15)
+#define SPI_CMD_FINISH_IE            BIT(16)
+#define SPI_CMD_PAUSE_IE             BIT(17)
+
+#define MT8173_SPI_MAX_PAD_SEL 3
+
+#define MTK_SPI_PAUSE_INT_STATUS 0x2
+
+#define MTK_SPI_IDLE 0
+#define MTK_SPI_PAUSED 1
+
+#define MTK_SPI_MAX_FIFO_SIZE 32
+#define MTK_SPI_PACKET_SIZE 1024
+
+struct mtk_spi_compatible {
+	bool need_pad_sel;
+	/* Must explicitly send dummy Tx bytes to do Rx only transfer */
+	bool must_tx;
+};
+
+struct mtk_spi {
+	void __iomem *base;
+	u32 state;
+	u32 pad_sel;
+	struct clk *spi_clk, *parent_clk;
+	struct spi_transfer *cur_transfer;
+	u32 xfer_len;
+	struct scatterlist *tx_sgl, *rx_sgl;
+	u32 tx_sgl_len, rx_sgl_len;
+	const struct mtk_spi_compatible *dev_comp;
+};
+
+static const struct mtk_spi_compatible mt6589_compat;
+static const struct mtk_spi_compatible mt8135_compat;
+static const struct mtk_spi_compatible mt8173_compat = {
+	.need_pad_sel = true,
+	.must_tx = true,
+};
+
+/*
+ * A piece of default chip info unless the platform
+ * supplies it.
+ */
+static const struct mtk_chip_config mtk_default_chip_info = {
+	.rx_mlsb = 1,
+	.tx_mlsb = 1,
+};
+
+static const struct of_device_id mtk_spi_of_match[] = {
+	{ .compatible = "mediatek,mt6589-spi", .data = (void *)&mt6589_compat },
+	{ .compatible = "mediatek,mt8135-spi", .data = (void *)&mt8135_compat },
+	{ .compatible = "mediatek,mt8173-spi", .data = (void *)&mt8173_compat },
+	{}
+};
+MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
+
+static void mtk_spi_reset(struct mtk_spi *mdata)
+{
+	u32 reg_val;
+
+	/* set the software reset bit in SPI_CMD_REG. */
+	reg_val = readl(mdata->base + SPI_CMD_REG);
+	reg_val |= SPI_CMD_RST;
+	writel(reg_val, mdata->base + SPI_CMD_REG);
+
+	reg_val = readl(mdata->base + SPI_CMD_REG);
+	reg_val &= ~SPI_CMD_RST;
+	writel(reg_val, mdata->base + SPI_CMD_REG);
+}
+
+static void mtk_spi_config(struct mtk_spi *mdata,
+			   struct mtk_chip_config *chip_config)
+{
+	u32 reg_val;
+
+	reg_val = readl(mdata->base + SPI_CMD_REG);
+
+	/* set the mlsbx and mlsbtx */
+	if (chip_config->tx_mlsb)
+		reg_val |= SPI_CMD_TXMSBF;
+	else
+		reg_val &= ~SPI_CMD_TXMSBF;
+	if (chip_config->rx_mlsb)
+		reg_val |= SPI_CMD_RXMSBF;
+	else
+		reg_val &= ~SPI_CMD_RXMSBF;
+
+	/* set the tx/rx endian */
+#ifdef __LITTLE_ENDIAN
+	reg_val &= ~SPI_CMD_TX_ENDIAN;
+	reg_val &= ~SPI_CMD_RX_ENDIAN;
+#else
+	reg_val |= SPI_CMD_TX_ENDIAN;
+	reg_val |= SPI_CMD_RX_ENDIAN;
+#endif
+
+	/* set finish and pause interrupt always enable */
+	reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
+
+	/* disable dma mode */
+	reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
+
+	/* disable deassert mode */
+	reg_val &= ~SPI_CMD_DEASSERT;
+
+	writel(reg_val, mdata->base + SPI_CMD_REG);
+
+	/* pad select */
+	if (mdata->dev_comp->need_pad_sel)
+		writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG);
+}
+
+static int mtk_spi_prepare_hardware(struct spi_master *master)
+{
+	struct spi_transfer *trans;
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+	struct spi_message *msg = master->cur_msg;
+
+	trans = list_first_entry(&msg->transfers, struct spi_transfer,
+				 transfer_list);
+	if (!trans->cs_change) {
+		mdata->state = MTK_SPI_IDLE;
+		mtk_spi_reset(mdata);
+	}
+
+	return 0;
+}
+
+static int mtk_spi_prepare_message(struct spi_master *master,
+				   struct spi_message *msg)
+{
+	u32 reg_val;
+	u8 cpha, cpol;
+	struct mtk_chip_config *chip_config;
+	struct spi_device *spi = msg->spi;
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+	cpha = spi->mode & SPI_CPHA ? 1 : 0;
+	cpol = spi->mode & SPI_CPOL ? 1 : 0;
+
+	reg_val = readl(mdata->base + SPI_CMD_REG);
+	if (cpha)
+		reg_val |= SPI_CMD_CPHA;
+	else
+		reg_val &= ~SPI_CMD_CPHA;
+	if (cpol)
+		reg_val |= SPI_CMD_CPOL;
+	else
+		reg_val &= ~SPI_CMD_CPOL;
+	writel(reg_val, mdata->base + SPI_CMD_REG);
+
+	chip_config = spi->controller_data;
+	if (!chip_config) {
+		chip_config = (void *)&mtk_default_chip_info;
+		spi->controller_data = chip_config;
+	}
+	mtk_spi_config(mdata, chip_config);
+
+	return 0;
+}
+
+static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
+{
+	u32 reg_val;
+	struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+
+	reg_val = readl(mdata->base + SPI_CMD_REG);
+	if (!enable)
+		reg_val |= SPI_CMD_PAUSE_EN;
+	else
+		reg_val &= ~SPI_CMD_PAUSE_EN;
+	writel(reg_val, mdata->base + SPI_CMD_REG);
+}
+
+static void mtk_spi_prepare_transfer(struct spi_master *master,
+				     struct spi_transfer *xfer)
+{
+	u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0;
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+	spi_clk_hz = clk_get_rate(mdata->spi_clk);
+	if (xfer->speed_hz < spi_clk_hz / 2)
+		div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
+	else
+		div = 1;
+
+	sck_time = (div + 1) / 2;
+	cs_time = sck_time * 2;
+
+	reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_HIGH_OFFSET);
+	reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
+	reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
+	reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_SETUP_OFFSET);
+	writel(reg_val, mdata->base + SPI_CFG0_REG);
+
+	reg_val = readl(mdata->base + SPI_CFG1_REG);
+	reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
+	reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
+	writel(reg_val, mdata->base + SPI_CFG1_REG);
+}
+
+static void mtk_spi_setup_packet(struct spi_master *master)
+{
+	u32 packet_size, packet_loop, reg_val;
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+	packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
+	packet_loop = mdata->xfer_len / packet_size;
+
+	reg_val = readl(mdata->base + SPI_CFG1_REG);
+	reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK);
+	reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
+	reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
+	writel(reg_val, mdata->base + SPI_CFG1_REG);
+}
+
+static void mtk_spi_enable_transfer(struct spi_master *master)
+{
+	u32 cmd;
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+	cmd = readl(mdata->base + SPI_CMD_REG);
+	if (mdata->state == MTK_SPI_IDLE)
+		cmd |= SPI_CMD_ACT;
+	else
+		cmd |= SPI_CMD_RESUME;
+	writel(cmd, mdata->base + SPI_CMD_REG);
+}
+
+static int mtk_spi_get_mult_delta(u32 xfer_len)
+{
+	u32 mult_delta;
+
+	if (xfer_len > MTK_SPI_PACKET_SIZE)
+		mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
+	else
+		mult_delta = 0;
+
+	return mult_delta;
+}
+
+static void mtk_spi_update_mdata_len(struct spi_master *master)
+{
+	int mult_delta;
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+	if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
+		if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
+			mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
+			mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
+			mdata->rx_sgl_len = mult_delta;
+			mdata->tx_sgl_len -= mdata->xfer_len;
+		} else {
+			mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
+			mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
+			mdata->tx_sgl_len = mult_delta;
+			mdata->rx_sgl_len -= mdata->xfer_len;
+		}
+	} else if (mdata->tx_sgl_len) {
+		mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
+		mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
+		mdata->tx_sgl_len = mult_delta;
+	} else if (mdata->rx_sgl_len) {
+		mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
+		mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
+		mdata->rx_sgl_len = mult_delta;
+	}
+}
+
+static void mtk_spi_setup_dma_addr(struct spi_master *master,
+				   struct spi_transfer *xfer)
+{
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+	if (mdata->tx_sgl)
+		writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG);
+	if (mdata->rx_sgl)
+		writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG);
+}
+
+static int mtk_spi_fifo_transfer(struct spi_master *master,
+				 struct spi_device *spi,
+				 struct spi_transfer *xfer)
+{
+	int cnt;
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+	mdata->cur_transfer = xfer;
+	mdata->xfer_len = xfer->len;
+	mtk_spi_prepare_transfer(master, xfer);
+	mtk_spi_setup_packet(master);
+
+	if (xfer->len % 4)
+		cnt = xfer->len / 4 + 1;
+	else
+		cnt = xfer->len / 4;
+	iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
+
+	mtk_spi_enable_transfer(master);
+
+	return 1;
+}
+
+static int mtk_spi_dma_transfer(struct spi_master *master,
+				struct spi_device *spi,
+				struct spi_transfer *xfer)
+{
+	int cmd;
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+	mdata->tx_sgl = NULL;
+	mdata->rx_sgl = NULL;
+	mdata->tx_sgl_len = 0;
+	mdata->rx_sgl_len = 0;
+	mdata->cur_transfer = xfer;
+
+	mtk_spi_prepare_transfer(master, xfer);
+
+	cmd = readl(mdata->base + SPI_CMD_REG);
+	if (xfer->tx_buf)
+		cmd |= SPI_CMD_TX_DMA;
+	if (xfer->rx_buf)
+		cmd |= SPI_CMD_RX_DMA;
+	writel(cmd, mdata->base + SPI_CMD_REG);
+
+	if (xfer->tx_buf)
+		mdata->tx_sgl = xfer->tx_sg.sgl;
+	if (xfer->rx_buf)
+		mdata->rx_sgl = xfer->rx_sg.sgl;
+
+	if (mdata->tx_sgl) {
+		xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
+		mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
+	}
+	if (mdata->rx_sgl) {
+		xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
+		mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
+	}
+
+	mtk_spi_update_mdata_len(master);
+	mtk_spi_setup_packet(master);
+	mtk_spi_setup_dma_addr(master, xfer);
+	mtk_spi_enable_transfer(master);
+
+	return 1;
+}
+
+static int mtk_spi_transfer_one(struct spi_master *master,
+				struct spi_device *spi,
+				struct spi_transfer *xfer)
+{
+	if (master->can_dma(master, spi, xfer))
+		return mtk_spi_dma_transfer(master, spi, xfer);
+	else
+		return mtk_spi_fifo_transfer(master, spi, xfer);
+}
+
+static bool mtk_spi_can_dma(struct spi_master *master,
+			    struct spi_device *spi,
+			    struct spi_transfer *xfer)
+{
+	return xfer->len > MTK_SPI_MAX_FIFO_SIZE;
+}
+
+static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
+{
+	u32 cmd, reg_val, cnt;
+	struct spi_master *master = dev_id;
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+	struct spi_transfer *trans = mdata->cur_transfer;
+
+	reg_val = readl(mdata->base + SPI_STATUS0_REG);
+	if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
+		mdata->state = MTK_SPI_PAUSED;
+	else
+		mdata->state = MTK_SPI_IDLE;
+
+	if (!master->can_dma(master, master->cur_msg->spi, trans)) {
+		if (trans->rx_buf) {
+			if (mdata->xfer_len % 4)
+				cnt = mdata->xfer_len / 4 + 1;
+			else
+				cnt = mdata->xfer_len / 4;
+			ioread32_rep(mdata->base + SPI_RX_DATA_REG,
+				     trans->rx_buf, cnt);
+		}
+		spi_finalize_current_transfer(master);
+		return IRQ_HANDLED;
+	}
+
+	if (mdata->tx_sgl)
+		trans->tx_dma += mdata->xfer_len;
+	if (mdata->rx_sgl)
+		trans->rx_dma += mdata->xfer_len;
+
+	if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
+		mdata->tx_sgl = sg_next(mdata->tx_sgl);
+		if (mdata->tx_sgl) {
+			trans->tx_dma = sg_dma_address(mdata->tx_sgl);
+			mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
+		}
+	}
+	if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
+		mdata->rx_sgl = sg_next(mdata->rx_sgl);
+		if (mdata->rx_sgl) {
+			trans->rx_dma = sg_dma_address(mdata->rx_sgl);
+			mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
+		}
+	}
+
+	if (!mdata->tx_sgl && !mdata->rx_sgl) {
+		/* spi disable dma */
+		cmd = readl(mdata->base + SPI_CMD_REG);
+		cmd &= ~SPI_CMD_TX_DMA;
+		cmd &= ~SPI_CMD_RX_DMA;
+		writel(cmd, mdata->base + SPI_CMD_REG);
+
+		spi_finalize_current_transfer(master);
+		return IRQ_HANDLED;
+	}
+
+	mtk_spi_update_mdata_len(master);
+	mtk_spi_setup_packet(master);
+	mtk_spi_setup_dma_addr(master, trans);
+	mtk_spi_enable_transfer(master);
+
+	return IRQ_HANDLED;
+}
+
+static int mtk_spi_probe(struct platform_device *pdev)
+{
+	struct spi_master *master;
+	struct mtk_spi *mdata;
+	const struct of_device_id *of_id;
+	struct resource *res;
+	int irq, ret;
+
+	master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
+	if (!master) {
+		dev_err(&pdev->dev, "failed to alloc spi master\n");
+		return -ENOMEM;
+	}
+
+	master->auto_runtime_pm = true;
+	master->dev.of_node = pdev->dev.of_node;
+	master->mode_bits = SPI_CPOL | SPI_CPHA;
+
+	master->set_cs = mtk_spi_set_cs;
+	master->prepare_transfer_hardware = mtk_spi_prepare_hardware;
+	master->prepare_message = mtk_spi_prepare_message;
+	master->transfer_one = mtk_spi_transfer_one;
+	master->can_dma = mtk_spi_can_dma;
+
+	of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
+	if (!of_id) {
+		dev_err(&pdev->dev, "failed to probe of_node\n");
+		ret = -EINVAL;
+		goto err_put_master;
+	}
+
+	mdata = spi_master_get_devdata(master);
+	mdata->dev_comp = of_id->data;
+	if (mdata->dev_comp->must_tx)
+		master->flags = SPI_MASTER_MUST_TX;
+
+	if (mdata->dev_comp->need_pad_sel) {
+		ret = of_property_read_u32(pdev->dev.of_node,
+					   "mediatek,pad-select",
+					   &mdata->pad_sel);
+		if (ret) {
+			dev_err(&pdev->dev, "failed to read pad select: %d\n",
+				ret);
+			goto err_put_master;
+		}
+
+		if (mdata->pad_sel > MT8173_SPI_MAX_PAD_SEL) {
+			dev_err(&pdev->dev, "wrong pad-select: %u\n",
+				mdata->pad_sel);
+			ret = -EINVAL;
+			goto err_put_master;
+		}
+	}
+
+	platform_set_drvdata(pdev, master);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev, "failed to determine base address\n");
+		goto err_put_master;
+	}
+
+	mdata->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(mdata->base)) {
+		ret = PTR_ERR(mdata->base);
+		goto err_put_master;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
+		ret = irq;
+		goto err_put_master;
+	}
+
+	if (!pdev->dev.dma_mask)
+		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+	ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt,
+			       IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
+		goto err_put_master;
+	}
+
+	mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
+	if (IS_ERR(mdata->spi_clk)) {
+		ret = PTR_ERR(mdata->spi_clk);
+		dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
+		goto err_put_master;
+	}
+
+	mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
+	if (IS_ERR(mdata->parent_clk)) {
+		ret = PTR_ERR(mdata->parent_clk);
+		dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret);
+		goto err_put_master;
+	}
+
+	ret = clk_prepare_enable(mdata->spi_clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
+		goto err_put_master;
+	}
+
+	ret = clk_set_parent(mdata->spi_clk, mdata->parent_clk);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
+		goto err_disable_clk;
+	}
+
+	clk_disable_unprepare(mdata->spi_clk);
+
+	pm_runtime_enable(&pdev->dev);
+
+	ret = devm_spi_register_master(&pdev->dev, master);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
+		goto err_put_master;
+	}
+
+	return 0;
+
+err_disable_clk:
+	clk_disable_unprepare(mdata->spi_clk);
+err_put_master:
+	spi_master_put(master);
+
+	return ret;
+}
+
+static int mtk_spi_remove(struct platform_device *pdev)
+{
+	struct spi_master *master = platform_get_drvdata(pdev);
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+	pm_runtime_disable(&pdev->dev);
+
+	mtk_spi_reset(mdata);
+	clk_disable_unprepare(mdata->spi_clk);
+	spi_master_put(master);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_spi_suspend(struct device *dev)
+{
+	int ret;
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+	ret = spi_master_suspend(master);
+	if (ret)
+		return ret;
+
+	if (!pm_runtime_suspended(dev))
+		clk_disable_unprepare(mdata->spi_clk);
+
+	return ret;
+}
+
+static int mtk_spi_resume(struct device *dev)
+{
+	int ret;
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+	if (!pm_runtime_suspended(dev)) {
+		ret = clk_prepare_enable(mdata->spi_clk);
+		if (ret < 0) {
+			dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+			return ret;
+		}
+	}
+
+	ret = spi_master_resume(master);
+	if (ret < 0)
+		clk_disable_unprepare(mdata->spi_clk);
+
+	return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int mtk_spi_runtime_suspend(struct device *dev)
+{
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+	clk_disable_unprepare(mdata->spi_clk);
+
+	return 0;
+}
+
+static int mtk_spi_runtime_resume(struct device *dev)
+{
+	struct spi_master *master = dev_get_drvdata(dev);
+	struct mtk_spi *mdata = spi_master_get_devdata(master);
+	int ret;
+
+	ret = clk_prepare_enable(mdata->spi_clk);
+	if (ret < 0) {
+		dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops mtk_spi_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
+	SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
+			   mtk_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver mtk_spi_driver = {
+	.driver = {
+		.name = "mtk-spi",
+		.pm	= &mtk_spi_pm,
+		.of_match_table = mtk_spi_of_match,
+	},
+	.probe = mtk_spi_probe,
+	.remove = mtk_spi_remove,
+};
+
+module_platform_driver(mtk_spi_driver);
+
+MODULE_DESCRIPTION("MTK SPI Controller driver");
+MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mtk-spi");
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 5867384..3d09e0b 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -245,6 +245,7 @@
 
 static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
 {
+	struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
 	u32 l;
 
 	/* The controller handles the inverted chip selects
@@ -255,6 +256,12 @@
 		enable = !enable;
 
 	if (spi->controller_state) {
+		int err = pm_runtime_get_sync(mcspi->dev);
+		if (err < 0) {
+			dev_err(mcspi->dev, "failed to get sync: %d\n", err);
+			return;
+		}
+
 		l = mcspi_cached_chconf0(spi);
 
 		if (enable)
@@ -263,6 +270,9 @@
 			l |= OMAP2_MCSPI_CHCONF_FORCE;
 
 		mcspi_write_chconf0(spi, l);
+
+		pm_runtime_mark_last_busy(mcspi->dev);
+		pm_runtime_put_autosuspend(mcspi->dev);
 	}
 }
 
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 8cad107..a87cfd4 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -41,6 +41,11 @@
 #define ORION_SPI_DATA_OUT_REG		0x08
 #define ORION_SPI_DATA_IN_REG		0x0c
 #define ORION_SPI_INT_CAUSE_REG		0x10
+#define ORION_SPI_TIMING_PARAMS_REG	0x18
+
+#define ORION_SPI_TMISO_SAMPLE_MASK	(0x3 << 6)
+#define ORION_SPI_TMISO_SAMPLE_1	(1 << 6)
+#define ORION_SPI_TMISO_SAMPLE_2	(2 << 6)
 
 #define ORION_SPI_MODE_CPOL		(1 << 11)
 #define ORION_SPI_MODE_CPHA		(1 << 12)
@@ -70,6 +75,7 @@
 	unsigned int		min_divisor;
 	unsigned int		max_divisor;
 	u32			prescale_mask;
+	bool			is_errata_50mhz_ac;
 };
 
 struct orion_spi {
@@ -195,6 +201,41 @@
 	writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
 }
 
+static void
+orion_spi_50mhz_ac_timing_erratum(struct spi_device *spi, unsigned int speed)
+{
+	u32 reg;
+	struct orion_spi *orion_spi;
+
+	orion_spi = spi_master_get_devdata(spi->master);
+
+	/*
+	 * Erratum description: (Erratum NO. FE-9144572) The device
+	 * SPI interface supports frequencies of up to 50 MHz.
+	 * However, due to this erratum, when the device core clock is
+	 * 250 MHz and the SPI interfaces is configured for 50MHz SPI
+	 * clock and CPOL=CPHA=1 there might occur data corruption on
+	 * reads from the SPI device.
+	 * Erratum Workaround:
+	 * Work in one of the following configurations:
+	 * 1. Set CPOL=CPHA=0 in "SPI Interface Configuration
+	 * Register".
+	 * 2. Set TMISO_SAMPLE value to 0x2 in "SPI Timing Parameters 1
+	 * Register" before setting the interface.
+	 */
+	reg = readl(spi_reg(orion_spi, ORION_SPI_TIMING_PARAMS_REG));
+	reg &= ~ORION_SPI_TMISO_SAMPLE_MASK;
+
+	if (clk_get_rate(orion_spi->clk) == 250000000 &&
+			speed == 50000000 && spi->mode & SPI_CPOL &&
+			spi->mode & SPI_CPHA)
+		reg |= ORION_SPI_TMISO_SAMPLE_2;
+	else
+		reg |= ORION_SPI_TMISO_SAMPLE_1; /* This is the default value */
+
+	writel(reg, spi_reg(orion_spi, ORION_SPI_TIMING_PARAMS_REG));
+}
+
 /*
  * called only when no transfer is active on the bus
  */
@@ -216,6 +257,9 @@
 
 	orion_spi_mode_set(spi);
 
+	if (orion_spi->devdata->is_errata_50mhz_ac)
+		orion_spi_50mhz_ac_timing_erratum(spi, speed);
+
 	rc = orion_spi_baudrate_set(spi, speed);
 	if (rc)
 		return rc;
@@ -413,6 +457,14 @@
 	.prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
 };
 
+static const struct orion_spi_dev armada_380_spi_dev_data = {
+	.typ = ARMADA_SPI,
+	.max_hz = 50000000,
+	.max_divisor = 1920,
+	.prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
+	.is_errata_50mhz_ac = true,
+};
+
 static const struct of_device_id orion_spi_of_match_table[] = {
 	{
 		.compatible = "marvell,orion-spi",
@@ -428,7 +480,7 @@
 	},
 	{
 		.compatible = "marvell,armada-380-spi",
-		.data = &armada_xp_spi_dev_data,
+		.data = &armada_380_spi_dev_data,
 	},
 	{
 		.compatible = "marvell,armada-390-spi",
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 3cfd435..d19d7f2 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -7,7 +7,6 @@
 #include <linux/of_device.h>
 #include <linux/module.h>
 #include <linux/spi/pxa2xx_spi.h>
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 
 #include <linux/dmaengine.h>
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 7293d6d..fdd79197 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -21,6 +21,7 @@
 #include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
+#include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/spi/pxa2xx_spi.h>
 #include <linux/spi/spi.h>
@@ -97,6 +98,15 @@
 		.tx_threshold_lo = 160,
 		.tx_threshold_hi = 224,
 	},
+	{	/* LPSS_SPT_SSP */
+		.offset = 0x200,
+		.reg_general = -1,
+		.reg_ssp = 0x20,
+		.reg_cs_ctrl = 0x24,
+		.rx_threshold = 1,
+		.tx_threshold_lo = 32,
+		.tx_threshold_hi = 56,
+	},
 };
 
 static inline const struct lpss_config
@@ -110,6 +120,7 @@
 	switch (drv_data->ssp_type) {
 	case LPSS_LPT_SSP:
 	case LPSS_BYT_SSP:
+	case LPSS_SPT_SSP:
 		return true;
 	default:
 		return false;
@@ -1107,6 +1118,7 @@
 		break;
 	case LPSS_LPT_SSP:
 	case LPSS_BYT_SSP:
+	case LPSS_SPT_SSP:
 		config = lpss_get_config(drv_data);
 		tx_thres = config->tx_threshold_lo;
 		tx_hi_thres = config->tx_threshold_hi;
@@ -1276,6 +1288,31 @@
 };
 MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
 
+/*
+ * PCI IDs of compound devices that integrate both host controller and private
+ * integrated DMA engine. Please note these are not used in module
+ * autoloading and probing in this module but matching the LPSS SSP type.
+ */
+static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
+	/* SPT-LP */
+	{ PCI_VDEVICE(INTEL, 0x9d29), LPSS_SPT_SSP },
+	{ PCI_VDEVICE(INTEL, 0x9d2a), LPSS_SPT_SSP },
+	/* SPT-H */
+	{ PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
+	{ PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
+	{ },
+};
+
+static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
+{
+	struct device *dev = param;
+
+	if (dev != chan->device->dev->parent)
+		return false;
+
+	return true;
+}
+
 static struct pxa2xx_spi_master *
 pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
 {
@@ -1283,16 +1320,25 @@
 	struct acpi_device *adev;
 	struct ssp_device *ssp;
 	struct resource *res;
-	const struct acpi_device_id *id;
+	const struct acpi_device_id *adev_id = NULL;
+	const struct pci_device_id *pcidev_id = NULL;
 	int devid, type;
 
 	if (!ACPI_HANDLE(&pdev->dev) ||
 	    acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
 		return NULL;
 
-	id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
-	if (id)
-		type = (int)id->driver_data;
+	if (dev_is_pci(pdev->dev.parent))
+		pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match,
+					 to_pci_dev(pdev->dev.parent));
+	else
+		adev_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
+					    &pdev->dev);
+
+	if (adev_id)
+		type = (int)adev_id->driver_data;
+	else if (pcidev_id)
+		type = (int)pcidev_id->driver_data;
 	else
 		return NULL;
 
@@ -1311,6 +1357,12 @@
 	if (IS_ERR(ssp->mmio_base))
 		return NULL;
 
+	if (pcidev_id) {
+		pdata->tx_param = pdev->dev.parent;
+		pdata->rx_param = pdev->dev.parent;
+		pdata->dma_filter = pxa2xx_spi_idma_filter;
+	}
+
 	ssp->clk = devm_clk_get(&pdev->dev, NULL);
 	ssp->irq = platform_get_irq(pdev, 0);
 	ssp->type = type;
@@ -1362,8 +1414,7 @@
 		return -ENODEV;
 	}
 
-	/* Allocate master with space for drv_data and null dma buffer */
-	master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
+	master = spi_alloc_master(dev, sizeof(struct driver_data));
 	if (!master) {
 		dev_err(&pdev->dev, "cannot alloc spi_master\n");
 		pxa_ssp_free(ssp);
@@ -1390,7 +1441,6 @@
 	master->auto_runtime_pm = true;
 
 	drv_data->ssp_type = ssp->type;
-	drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT);
 
 	drv_data->ioaddr = ssp->mmio_base;
 	drv_data->ssdr_physical = ssp->phys_base + SSDR;
@@ -1424,8 +1474,6 @@
 	}
 
 	/* Setup DMA if requested */
-	drv_data->tx_channel = -1;
-	drv_data->rx_channel = -1;
 	if (platform_info->enable_dma) {
 		status = pxa2xx_spi_dma_setup(drv_data);
 		if (status) {
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 9f01e9c..0a9b639 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -36,11 +36,6 @@
 	/* PXA hookup */
 	struct pxa2xx_spi_master *master_info;
 
-	/* PXA private DMA setup stuff */
-	int rx_channel;
-	int tx_channel;
-	u32 *null_dma_buf;
-
 	/* SSP register addresses */
 	void __iomem *ioaddr;
 	u32 ssdr_physical;
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 68e7efe..79a8bc4 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -645,7 +645,6 @@
 	platform_set_drvdata(pdev, master);
 
 	rs = spi_master_get_devdata(master);
-	memset(rs, 0, sizeof(struct rockchip_spi));
 
 	/* Get basic io resource and map it */
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index f9189a0..8188433 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -383,7 +383,8 @@
 	rspi_write8(rspi, data, reg);
 }
 
-static int qspi_set_send_trigger(struct rspi_data *rspi, unsigned int len)
+static unsigned int qspi_set_send_trigger(struct rspi_data *rspi,
+					  unsigned int len)
 {
 	unsigned int n;
 
@@ -724,25 +725,25 @@
 static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
 					u8 *rx, unsigned int len)
 {
-	int i, n, ret;
-	int error;
+	unsigned int i, n;
+	int ret;
 
 	while (len > 0) {
 		n = qspi_set_send_trigger(rspi, len);
 		qspi_set_receive_trigger(rspi, len);
 		if (n == QSPI_BUFFER_SIZE) {
-			error = rspi_wait_for_tx_empty(rspi);
-			if (error < 0) {
+			ret = rspi_wait_for_tx_empty(rspi);
+			if (ret < 0) {
 				dev_err(&rspi->master->dev, "transmit timeout\n");
-				return error;
+				return ret;
 			}
 			for (i = 0; i < n; i++)
 				rspi_write_data(rspi, *tx++);
 
-			error = rspi_wait_for_rx_full(rspi);
-			if (error < 0) {
+			ret = rspi_wait_for_rx_full(rspi);
+			if (ret < 0) {
 				dev_err(&rspi->master->dev, "receive timeout\n");
-				return error;
+				return ret;
 			}
 			for (i = 0; i < n; i++)
 				*rx++ = rspi_read_data(rspi);
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index f747ca2..f36bc32 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -501,7 +501,6 @@
 	}
 
 	hw = spi_master_get_devdata(master);
-	memset(hw, 0, sizeof(struct s3c24xx_spi));
 
 	hw->master = master;
 	hw->pdata = pdata = dev_get_platdata(&pdev->dev);
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 2a8c513..cd1cfac 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1191,8 +1191,8 @@
 
 	dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
 					sdd->port_id, master->num_chipselect);
-	dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n",
-					mem_res,
+	dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tFIFO %dbytes\tDMA=[Rx-%d, Tx-%d]\n",
+					mem_res, (FIFO_LVL_MASK(sdd) >> 1) + 1,
 					sdd->rx_dma.dmach, sdd->tx_dma.dmach);
 
 	return 0;
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index d3370a6..a7934ab 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -48,8 +48,8 @@
 	const struct sh_msiof_chipdata *chipdata;
 	struct sh_msiof_spi_info *info;
 	struct completion done;
-	int tx_fifo_size;
-	int rx_fifo_size;
+	unsigned int tx_fifo_size;
+	unsigned int rx_fifo_size;
 	void *tx_dma_page;
 	void *rx_dma_page;
 	dma_addr_t tx_dma_addr;
@@ -95,8 +95,6 @@
 #define MDR2_WDLEN1(i)	(((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
 #define MDR2_GRPMASK1	0x00000001 /* Group Output Mask 1 (SH, A1) */
 
-#define MAX_WDLEN	256U
-
 /* TSCR and RSCR */
 #define SCR_BRPS_MASK	    0x1f00 /* Prescaler Setting (1-32) */
 #define SCR_BRPS(i)	(((i) - 1) << 8)
@@ -850,7 +848,12 @@
 		 *  DMA supports 32-bit words only, hence pack 8-bit and 16-bit
 		 *  words, with byte resp. word swapping.
 		 */
-		unsigned int l = min(len, MAX_WDLEN * 4);
+		unsigned int l = 0;
+
+		if (tx_buf)
+			l = min(len, p->tx_fifo_size * 4);
+		if (rx_buf)
+			l = min(len, p->rx_fifo_size * 4);
 
 		if (bits <= 8) {
 			if (l & 3)
@@ -963,7 +966,7 @@
 
 static const struct sh_msiof_chipdata r8a779x_data = {
 	.tx_fifo_size = 64,
-	.rx_fifo_size = 256,
+	.rx_fifo_size = 64,
 	.master_flags = SPI_MASTER_MUST_TX,
 };
 
@@ -1265,11 +1268,6 @@
 
 static const struct platform_device_id spi_driver_ids[] = {
 	{ "spi_sh_msiof",	(kernel_ulong_t)&sh_data },
-	{ "spi_r8a7790_msiof",	(kernel_ulong_t)&r8a779x_data },
-	{ "spi_r8a7791_msiof",	(kernel_ulong_t)&r8a779x_data },
-	{ "spi_r8a7792_msiof",	(kernel_ulong_t)&r8a779x_data },
-	{ "spi_r8a7793_msiof",	(kernel_ulong_t)&r8a779x_data },
-	{ "spi_r8a7794_msiof",	(kernel_ulong_t)&r8a779x_data },
 	{},
 };
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 5c06168..aa6d284 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -99,6 +99,8 @@
 #define QSPI_INVAL			(4 << 16)
 #define QSPI_WC_CMD_INT_EN			(1 << 14)
 #define QSPI_FLEN(n)			((n - 1) << 0)
+#define QSPI_WLEN_MAX_BITS		128
+#define QSPI_WLEN_MAX_BYTES		16
 
 /* STATUS REGISTER */
 #define BUSY				0x01
@@ -217,14 +219,16 @@
 
 static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
 {
-	int wlen, count;
+	int wlen, count, xfer_len;
 	unsigned int cmd;
 	const u8 *txbuf;
+	u32 data;
 
 	txbuf = t->tx_buf;
 	cmd = qspi->cmd | QSPI_WR_SNGL;
 	count = t->len;
 	wlen = t->bits_per_word >> 3;	/* in bytes */
+	xfer_len = wlen;
 
 	while (count) {
 		if (qspi_is_busy(qspi))
@@ -234,7 +238,29 @@
 		case 1:
 			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
 					cmd, qspi->dc, *txbuf);
-			writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
+			if (count >= QSPI_WLEN_MAX_BYTES) {
+				u32 *txp = (u32 *)txbuf;
+
+				data = cpu_to_be32(*txp++);
+				writel(data, qspi->base +
+				       QSPI_SPI_DATA_REG_3);
+				data = cpu_to_be32(*txp++);
+				writel(data, qspi->base +
+				       QSPI_SPI_DATA_REG_2);
+				data = cpu_to_be32(*txp++);
+				writel(data, qspi->base +
+				       QSPI_SPI_DATA_REG_1);
+				data = cpu_to_be32(*txp++);
+				writel(data, qspi->base +
+				       QSPI_SPI_DATA_REG);
+				xfer_len = QSPI_WLEN_MAX_BYTES;
+				cmd |= QSPI_WLEN(QSPI_WLEN_MAX_BITS);
+			} else {
+				writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
+				cmd = qspi->cmd | QSPI_WR_SNGL;
+				xfer_len = wlen;
+				cmd |= QSPI_WLEN(wlen);
+			}
 			break;
 		case 2:
 			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
@@ -254,8 +280,8 @@
 			dev_err(qspi->dev, "write timed out\n");
 			return -ETIMEDOUT;
 		}
-		txbuf += wlen;
-		count -= wlen;
+		txbuf += xfer_len;
+		count -= xfer_len;
 	}
 
 	return 0;
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index bb478dc..3c28e24 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -237,11 +237,11 @@
 	{ "spi-xcomm" },
 	{ },
 };
+MODULE_DEVICE_TABLE(i2c, spi_xcomm_ids);
 
 static struct i2c_driver spi_xcomm_driver = {
 	.driver = {
 		.name	= "spi-xcomm",
-		.owner	= THIS_MODULE,
 	},
 	.id_table	= spi_xcomm_ids,
 	.probe		= spi_xcomm_probe,
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 133f53a..a339c1e 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -249,19 +249,23 @@
 	xspi->tx_ptr = t->tx_buf;
 	xspi->rx_ptr = t->rx_buf;
 	remaining_words = t->len / xspi->bytes_per_word;
-	reinit_completion(&xspi->done);
 
 	if (xspi->irq >= 0 &&  remaining_words > xspi->buffer_size) {
+		u32 isr;
 		use_irq = true;
-		xspi->write_fn(XSPI_INTR_TX_EMPTY,
-				xspi->regs + XIPIF_V123B_IISR_OFFSET);
-		/* Enable the global IPIF interrupt */
-		xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
-				xspi->regs + XIPIF_V123B_DGIER_OFFSET);
 		/* Inhibit irq to avoid spurious irqs on tx_empty*/
 		cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
 		xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
 			       xspi->regs + XSPI_CR_OFFSET);
+		/* ACK old irqs (if any) */
+		isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
+		if (isr)
+			xspi->write_fn(isr,
+				       xspi->regs + XIPIF_V123B_IISR_OFFSET);
+		/* Enable the global IPIF interrupt */
+		xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
+				xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+		reinit_completion(&xspi->done);
 	}
 
 	while (remaining_words) {
@@ -302,8 +306,10 @@
 		remaining_words -= n_words;
 	}
 
-	if (use_irq)
+	if (use_irq) {
 		xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+		xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+	}
 
 	return t->len;
 }
diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c
new file mode 100644
index 0000000..8f04fec
--- /dev/null
+++ b/drivers/spi/spi-xlp.c
@@ -0,0 +1,456 @@
+/*
+ * Copyright (C) 2003-2015 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 (GPL v2)
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+
+/* SPI Configuration Register */
+#define XLP_SPI_CONFIG			0x00
+#define XLP_SPI_CPHA			BIT(0)
+#define XLP_SPI_CPOL			BIT(1)
+#define XLP_SPI_CS_POL			BIT(2)
+#define XLP_SPI_TXMISO_EN		BIT(3)
+#define XLP_SPI_TXMOSI_EN		BIT(4)
+#define XLP_SPI_RXMISO_EN		BIT(5)
+#define XLP_SPI_CS_LSBFE		BIT(10)
+#define XLP_SPI_RXCAP_EN		BIT(11)
+
+/* SPI Frequency Divider Register */
+#define XLP_SPI_FDIV			0x04
+
+/* SPI Command Register */
+#define XLP_SPI_CMD			0x08
+#define XLP_SPI_CMD_IDLE_MASK		0x0
+#define XLP_SPI_CMD_TX_MASK		0x1
+#define XLP_SPI_CMD_RX_MASK		0x2
+#define XLP_SPI_CMD_TXRX_MASK		0x3
+#define XLP_SPI_CMD_CONT		BIT(4)
+#define XLP_SPI_XFR_BITCNT_SHIFT	16
+
+/* SPI Status Register */
+#define XLP_SPI_STATUS			0x0c
+#define XLP_SPI_XFR_PENDING		BIT(0)
+#define XLP_SPI_XFR_DONE		BIT(1)
+#define XLP_SPI_TX_INT			BIT(2)
+#define XLP_SPI_RX_INT			BIT(3)
+#define XLP_SPI_TX_UF			BIT(4)
+#define XLP_SPI_RX_OF			BIT(5)
+#define XLP_SPI_STAT_MASK		0x3f
+
+/* SPI Interrupt Enable Register */
+#define XLP_SPI_INTR_EN			0x10
+#define XLP_SPI_INTR_DONE		BIT(0)
+#define XLP_SPI_INTR_TXTH		BIT(1)
+#define XLP_SPI_INTR_RXTH		BIT(2)
+#define XLP_SPI_INTR_TXUF		BIT(3)
+#define XLP_SPI_INTR_RXOF		BIT(4)
+
+/* SPI FIFO Threshold Register */
+#define XLP_SPI_FIFO_THRESH		0x14
+
+/* SPI FIFO Word Count Register */
+#define XLP_SPI_FIFO_WCNT		0x18
+#define XLP_SPI_RXFIFO_WCNT_MASK	0xf
+#define XLP_SPI_TXFIFO_WCNT_MASK	0xf0
+#define XLP_SPI_TXFIFO_WCNT_SHIFT	4
+
+/* SPI Transmit Data FIFO Register */
+#define XLP_SPI_TXDATA_FIFO		0x1c
+
+/* SPI Receive Data FIFO Register */
+#define XLP_SPI_RXDATA_FIFO		0x20
+
+/* SPI System Control Register */
+#define XLP_SPI_SYSCTRL			0x100
+#define XLP_SPI_SYS_RESET		BIT(0)
+#define XLP_SPI_SYS_CLKDIS		BIT(1)
+#define XLP_SPI_SYS_PMEN		BIT(8)
+
+#define SPI_CS_OFFSET			0x40
+#define XLP_SPI_TXRXTH			0x80
+#define XLP_SPI_FIFO_SIZE		8
+#define XLP_SPI_MAX_CS			4
+#define XLP_SPI_DEFAULT_FREQ		133333333
+#define XLP_SPI_FDIV_MIN		4
+#define XLP_SPI_FDIV_MAX		65535
+/*
+ * SPI can transfer only 28 bytes properly at a time. So split the
+ * transfer into 28 bytes size.
+ */
+#define XLP_SPI_XFER_SIZE		28
+
+struct xlp_spi_priv {
+	struct device		dev;		/* device structure */
+	void __iomem		*base;		/* spi registers base address */
+	const u8		*tx_buf;	/* tx data buffer */
+	u8			*rx_buf;	/* rx data buffer */
+	int			tx_len;		/* tx xfer length */
+	int			rx_len;		/* rx xfer length */
+	int			txerrors;	/* TXFIFO underflow count */
+	int			rxerrors;	/* RXFIFO overflow count */
+	int			cs;		/* slave device chip select */
+	u32			spi_clk;	/* spi clock frequency */
+	bool			cmd_cont;	/* cs active */
+	struct completion	done;		/* completion notification */
+};
+
+static inline u32 xlp_spi_reg_read(struct xlp_spi_priv *priv,
+				int cs, int regoff)
+{
+	return readl(priv->base + regoff + cs * SPI_CS_OFFSET);
+}
+
+static inline void xlp_spi_reg_write(struct xlp_spi_priv *priv, int cs,
+				int regoff, u32 val)
+{
+	writel(val, priv->base + regoff + cs * SPI_CS_OFFSET);
+}
+
+static inline void xlp_spi_sysctl_write(struct xlp_spi_priv *priv,
+				int regoff, u32 val)
+{
+	writel(val, priv->base + regoff);
+}
+
+/*
+ * Setup global SPI_SYSCTRL register for all SPI channels.
+ */
+static void xlp_spi_sysctl_setup(struct xlp_spi_priv *xspi)
+{
+	int cs;
+
+	for (cs = 0; cs < XLP_SPI_MAX_CS; cs++)
+		xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL,
+				XLP_SPI_SYS_RESET << cs);
+	xlp_spi_sysctl_write(xspi, XLP_SPI_SYSCTRL, XLP_SPI_SYS_PMEN);
+}
+
+static int xlp_spi_setup(struct spi_device *spi)
+{
+	struct xlp_spi_priv *xspi;
+	u32 fdiv, cfg;
+	int cs;
+
+	xspi = spi_master_get_devdata(spi->master);
+	cs = spi->chip_select;
+	/*
+	 * The value of fdiv must be between 4 and 65535.
+	 */
+	fdiv = DIV_ROUND_UP(xspi->spi_clk, spi->max_speed_hz);
+	if (fdiv > XLP_SPI_FDIV_MAX)
+		fdiv = XLP_SPI_FDIV_MAX;
+	else if (fdiv < XLP_SPI_FDIV_MIN)
+		fdiv = XLP_SPI_FDIV_MIN;
+
+	xlp_spi_reg_write(xspi, cs, XLP_SPI_FDIV, fdiv);
+	xlp_spi_reg_write(xspi, cs, XLP_SPI_FIFO_THRESH, XLP_SPI_TXRXTH);
+	cfg = xlp_spi_reg_read(xspi, cs, XLP_SPI_CONFIG);
+	if (spi->mode & SPI_CPHA)
+		cfg |= XLP_SPI_CPHA;
+	else
+		cfg &= ~XLP_SPI_CPHA;
+	if (spi->mode & SPI_CPOL)
+		cfg |= XLP_SPI_CPOL;
+	else
+		cfg &= ~XLP_SPI_CPOL;
+	if (!(spi->mode & SPI_CS_HIGH))
+		cfg |= XLP_SPI_CS_POL;
+	else
+		cfg &= ~XLP_SPI_CS_POL;
+	if (spi->mode & SPI_LSB_FIRST)
+		cfg |= XLP_SPI_CS_LSBFE;
+	else
+		cfg &= ~XLP_SPI_CS_LSBFE;
+
+	cfg |= XLP_SPI_TXMOSI_EN | XLP_SPI_RXMISO_EN;
+	if (fdiv == 4)
+		cfg |= XLP_SPI_RXCAP_EN;
+	xlp_spi_reg_write(xspi, cs, XLP_SPI_CONFIG, cfg);
+
+	return 0;
+}
+
+static void xlp_spi_read_rxfifo(struct xlp_spi_priv *xspi)
+{
+	u32 rx_data, rxfifo_cnt;
+	int i, j, nbytes;
+
+	rxfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT);
+	rxfifo_cnt &= XLP_SPI_RXFIFO_WCNT_MASK;
+	while (rxfifo_cnt) {
+		rx_data = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_RXDATA_FIFO);
+		j = 0;
+		nbytes = min(xspi->rx_len, 4);
+		for (i = nbytes - 1; i >= 0; i--, j++)
+			xspi->rx_buf[i] = (rx_data >> (j * 8)) & 0xff;
+
+		xspi->rx_len -= nbytes;
+		xspi->rx_buf += nbytes;
+		rxfifo_cnt--;
+	}
+}
+
+static void xlp_spi_fill_txfifo(struct xlp_spi_priv *xspi)
+{
+	u32 tx_data, txfifo_cnt;
+	int i, j, nbytes;
+
+	txfifo_cnt = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_FIFO_WCNT);
+	txfifo_cnt &= XLP_SPI_TXFIFO_WCNT_MASK;
+	txfifo_cnt >>= XLP_SPI_TXFIFO_WCNT_SHIFT;
+	while (xspi->tx_len && (txfifo_cnt < XLP_SPI_FIFO_SIZE)) {
+		j = 0;
+		tx_data = 0;
+		nbytes = min(xspi->tx_len, 4);
+		for (i = nbytes - 1; i >= 0; i--, j++)
+			tx_data |= xspi->tx_buf[i] << (j * 8);
+
+		xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_TXDATA_FIFO, tx_data);
+		xspi->tx_len -= nbytes;
+		xspi->tx_buf += nbytes;
+		txfifo_cnt++;
+	}
+}
+
+static irqreturn_t xlp_spi_interrupt(int irq, void *dev_id)
+{
+	struct xlp_spi_priv *xspi = dev_id;
+	u32 stat;
+
+	stat = xlp_spi_reg_read(xspi, xspi->cs, XLP_SPI_STATUS) &
+		XLP_SPI_STAT_MASK;
+	if (!stat)
+		return IRQ_NONE;
+
+	if (stat & XLP_SPI_TX_INT) {
+		if (xspi->tx_len)
+			xlp_spi_fill_txfifo(xspi);
+		if (stat & XLP_SPI_TX_UF)
+			xspi->txerrors++;
+	}
+
+	if (stat & XLP_SPI_RX_INT) {
+		if (xspi->rx_len)
+			xlp_spi_read_rxfifo(xspi);
+		if (stat & XLP_SPI_RX_OF)
+			xspi->rxerrors++;
+	}
+
+	/* write status back to clear interrupts */
+	xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_STATUS, stat);
+	if (stat & XLP_SPI_XFR_DONE)
+		complete(&xspi->done);
+
+	return IRQ_HANDLED;
+}
+
+static void xlp_spi_send_cmd(struct xlp_spi_priv *xspi, int xfer_len,
+			int cmd_cont)
+{
+	u32 cmd = 0;
+
+	if (xspi->tx_buf)
+		cmd |= XLP_SPI_CMD_TX_MASK;
+	if (xspi->rx_buf)
+		cmd |= XLP_SPI_CMD_RX_MASK;
+	if (cmd_cont)
+		cmd |= XLP_SPI_CMD_CONT;
+	cmd |= ((xfer_len * 8 - 1) << XLP_SPI_XFR_BITCNT_SHIFT);
+	xlp_spi_reg_write(xspi, xspi->cs, XLP_SPI_CMD, cmd);
+}
+
+static int xlp_spi_xfer_block(struct  xlp_spi_priv *xs,
+		const unsigned char *tx_buf,
+		unsigned char *rx_buf, int xfer_len, int cmd_cont)
+{
+	int timeout;
+	u32 intr_mask = 0;
+
+	xs->tx_buf = tx_buf;
+	xs->rx_buf = rx_buf;
+	xs->tx_len = (xs->tx_buf == NULL) ? 0 : xfer_len;
+	xs->rx_len = (xs->rx_buf == NULL) ? 0 : xfer_len;
+	xs->txerrors = xs->rxerrors = 0;
+
+	/* fill TXDATA_FIFO, then send the CMD */
+	if (xs->tx_len)
+		xlp_spi_fill_txfifo(xs);
+
+	xlp_spi_send_cmd(xs, xfer_len, cmd_cont);
+
+	/*
+	 * We are getting some spurious tx interrupts, so avoid enabling
+	 * tx interrupts when only rx is in process.
+	 * Enable all the interrupts in tx case.
+	 */
+	if (xs->tx_len)
+		intr_mask |= XLP_SPI_INTR_TXTH | XLP_SPI_INTR_TXUF |
+				XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF;
+	else
+		intr_mask |= XLP_SPI_INTR_RXTH | XLP_SPI_INTR_RXOF;
+
+	intr_mask |= XLP_SPI_INTR_DONE;
+	xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, intr_mask);
+
+	timeout = wait_for_completion_timeout(&xs->done,
+				msecs_to_jiffies(1000));
+	/* Disable interrupts */
+	xlp_spi_reg_write(xs, xs->cs, XLP_SPI_INTR_EN, 0x0);
+	if (!timeout) {
+		dev_err(&xs->dev, "xfer timedout!\n");
+		goto out;
+	}
+	if (xs->txerrors || xs->rxerrors)
+		dev_err(&xs->dev, "Over/Underflow rx %d tx %d xfer %d!\n",
+				xs->rxerrors, xs->txerrors, xfer_len);
+
+	return xfer_len;
+out:
+	return -ETIMEDOUT;
+}
+
+static int xlp_spi_txrx_bufs(struct xlp_spi_priv *xs, struct spi_transfer *t)
+{
+	int bytesleft, sz;
+	unsigned char *rx_buf;
+	const unsigned char *tx_buf;
+
+	tx_buf = t->tx_buf;
+	rx_buf = t->rx_buf;
+	bytesleft = t->len;
+	while (bytesleft) {
+		if (bytesleft > XLP_SPI_XFER_SIZE)
+			sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf,
+					XLP_SPI_XFER_SIZE, 1);
+		else
+			sz = xlp_spi_xfer_block(xs, tx_buf, rx_buf,
+					bytesleft, xs->cmd_cont);
+		if (sz < 0)
+			return sz;
+		bytesleft -= sz;
+		if (tx_buf)
+			tx_buf += sz;
+		if (rx_buf)
+			rx_buf += sz;
+	}
+	return bytesleft;
+}
+
+static int xlp_spi_transfer_one(struct spi_master *master,
+					struct spi_device *spi,
+					struct spi_transfer *t)
+{
+	struct xlp_spi_priv *xspi = spi_master_get_devdata(master);
+	int ret = 0;
+
+	xspi->cs = spi->chip_select;
+	xspi->dev = spi->dev;
+
+	if (spi_transfer_is_last(master, t))
+		xspi->cmd_cont = 0;
+	else
+		xspi->cmd_cont = 1;
+
+	if (xlp_spi_txrx_bufs(xspi, t))
+		ret = -EIO;
+
+	spi_finalize_current_transfer(master);
+	return ret;
+}
+
+static int xlp_spi_probe(struct platform_device *pdev)
+{
+	struct spi_master *master;
+	struct xlp_spi_priv *xspi;
+	struct resource *res;
+	struct clk *clk;
+	int irq, err;
+
+	xspi = devm_kzalloc(&pdev->dev, sizeof(*xspi), GFP_KERNEL);
+	if (!xspi)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	xspi->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(xspi->base))
+		return PTR_ERR(xspi->base);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "no IRQ resource found\n");
+		return -EINVAL;
+	}
+	err = devm_request_irq(&pdev->dev, irq, xlp_spi_interrupt, 0,
+			pdev->name, xspi);
+	if (err) {
+		dev_err(&pdev->dev, "unable to request irq %d\n", irq);
+		return err;
+	}
+
+	clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "could not get spi clock\n");
+		return -ENODEV;
+	}
+	xspi->spi_clk = clk_get_rate(clk);
+
+	master = spi_alloc_master(&pdev->dev, 0);
+	if (!master) {
+		dev_err(&pdev->dev, "could not alloc master\n");
+		return -ENOMEM;
+	}
+
+	master->bus_num = 0;
+	master->num_chipselect = XLP_SPI_MAX_CS;
+	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+	master->setup = xlp_spi_setup;
+	master->transfer_one = xlp_spi_transfer_one;
+	master->dev.of_node = pdev->dev.of_node;
+
+	init_completion(&xspi->done);
+	spi_master_set_devdata(master, xspi);
+	xlp_spi_sysctl_setup(xspi);
+
+	/* register spi controller */
+	err = devm_spi_register_master(&pdev->dev, master);
+	if (err) {
+		dev_err(&pdev->dev, "spi register master failed!\n");
+		spi_master_put(master);
+		return err;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id xlp_spi_dt_id[] = {
+	{ .compatible = "netlogic,xlp832-spi" },
+	{ },
+};
+
+static struct platform_driver xlp_spi_driver = {
+	.probe	= xlp_spi_probe,
+	.driver = {
+		.name	= "xlp-spi",
+		.of_match_table = xlp_spi_dt_id,
+	},
+};
+module_platform_driver(xlp_spi_driver);
+
+MODULE_AUTHOR("Kamlakant Patel <kamlakant.patel@broadcom.com>");
+MODULE_DESCRIPTION("Netlogic XLP SPI controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index cf8b91b..3abb390 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -67,11 +67,141 @@
 }
 static DEVICE_ATTR_RO(modalias);
 
+#define SPI_STATISTICS_ATTRS(field, file)				\
+static ssize_t spi_master_##field##_show(struct device *dev,		\
+					 struct device_attribute *attr,	\
+					 char *buf)			\
+{									\
+	struct spi_master *master = container_of(dev,			\
+						 struct spi_master, dev); \
+	return spi_statistics_##field##_show(&master->statistics, buf);	\
+}									\
+static struct device_attribute dev_attr_spi_master_##field = {		\
+	.attr = { .name = file, .mode = S_IRUGO },			\
+	.show = spi_master_##field##_show,				\
+};									\
+static ssize_t spi_device_##field##_show(struct device *dev,		\
+					 struct device_attribute *attr,	\
+					char *buf)			\
+{									\
+	struct spi_device *spi = container_of(dev,			\
+					      struct spi_device, dev);	\
+	return spi_statistics_##field##_show(&spi->statistics, buf);	\
+}									\
+static struct device_attribute dev_attr_spi_device_##field = {		\
+	.attr = { .name = file, .mode = S_IRUGO },			\
+	.show = spi_device_##field##_show,				\
+}
+
+#define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)	\
+static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
+					    char *buf)			\
+{									\
+	unsigned long flags;						\
+	ssize_t len;							\
+	spin_lock_irqsave(&stat->lock, flags);				\
+	len = sprintf(buf, format_string, stat->field);			\
+	spin_unlock_irqrestore(&stat->lock, flags);			\
+	return len;							\
+}									\
+SPI_STATISTICS_ATTRS(name, file)
+
+#define SPI_STATISTICS_SHOW(field, format_string)			\
+	SPI_STATISTICS_SHOW_NAME(field, __stringify(field),		\
+				 field, format_string)
+
+SPI_STATISTICS_SHOW(messages, "%lu");
+SPI_STATISTICS_SHOW(transfers, "%lu");
+SPI_STATISTICS_SHOW(errors, "%lu");
+SPI_STATISTICS_SHOW(timedout, "%lu");
+
+SPI_STATISTICS_SHOW(spi_sync, "%lu");
+SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
+SPI_STATISTICS_SHOW(spi_async, "%lu");
+
+SPI_STATISTICS_SHOW(bytes, "%llu");
+SPI_STATISTICS_SHOW(bytes_rx, "%llu");
+SPI_STATISTICS_SHOW(bytes_tx, "%llu");
+
 static struct attribute *spi_dev_attrs[] = {
 	&dev_attr_modalias.attr,
 	NULL,
 };
-ATTRIBUTE_GROUPS(spi_dev);
+
+static const struct attribute_group spi_dev_group = {
+	.attrs  = spi_dev_attrs,
+};
+
+static struct attribute *spi_device_statistics_attrs[] = {
+	&dev_attr_spi_device_messages.attr,
+	&dev_attr_spi_device_transfers.attr,
+	&dev_attr_spi_device_errors.attr,
+	&dev_attr_spi_device_timedout.attr,
+	&dev_attr_spi_device_spi_sync.attr,
+	&dev_attr_spi_device_spi_sync_immediate.attr,
+	&dev_attr_spi_device_spi_async.attr,
+	&dev_attr_spi_device_bytes.attr,
+	&dev_attr_spi_device_bytes_rx.attr,
+	&dev_attr_spi_device_bytes_tx.attr,
+	NULL,
+};
+
+static const struct attribute_group spi_device_statistics_group = {
+	.name  = "statistics",
+	.attrs  = spi_device_statistics_attrs,
+};
+
+static const struct attribute_group *spi_dev_groups[] = {
+	&spi_dev_group,
+	&spi_device_statistics_group,
+	NULL,
+};
+
+static struct attribute *spi_master_statistics_attrs[] = {
+	&dev_attr_spi_master_messages.attr,
+	&dev_attr_spi_master_transfers.attr,
+	&dev_attr_spi_master_errors.attr,
+	&dev_attr_spi_master_timedout.attr,
+	&dev_attr_spi_master_spi_sync.attr,
+	&dev_attr_spi_master_spi_sync_immediate.attr,
+	&dev_attr_spi_master_spi_async.attr,
+	&dev_attr_spi_master_bytes.attr,
+	&dev_attr_spi_master_bytes_rx.attr,
+	&dev_attr_spi_master_bytes_tx.attr,
+	NULL,
+};
+
+static const struct attribute_group spi_master_statistics_group = {
+	.name  = "statistics",
+	.attrs  = spi_master_statistics_attrs,
+};
+
+static const struct attribute_group *spi_master_groups[] = {
+	&spi_master_statistics_group,
+	NULL,
+};
+
+void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
+				       struct spi_transfer *xfer,
+				       struct spi_master *master)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&stats->lock, flags);
+
+	stats->transfers++;
+
+	stats->bytes += xfer->len;
+	if ((xfer->tx_buf) &&
+	    (xfer->tx_buf != master->dummy_tx))
+		stats->bytes_tx += xfer->len;
+	if ((xfer->rx_buf) &&
+	    (xfer->rx_buf != master->dummy_rx))
+		stats->bytes_rx += xfer->len;
+
+	spin_unlock_irqrestore(&stats->lock, flags);
+}
+EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
 
 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
  * and the sysfs version makes coldplug work too.
@@ -249,6 +379,9 @@
 	spi->dev.bus = &spi_bus_type;
 	spi->dev.release = spidev_release;
 	spi->cs_gpio = -ENOENT;
+
+	spin_lock_init(&spi->statistics.lock);
+
 	device_initialize(&spi->dev);
 	return spi;
 }
@@ -476,21 +609,30 @@
 		       enum dma_data_direction dir)
 {
 	const bool vmalloced_buf = is_vmalloc_addr(buf);
-	const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
-	const int sgs = DIV_ROUND_UP(len, desc_len);
+	int desc_len;
+	int sgs;
 	struct page *vm_page;
 	void *sg_buf;
 	size_t min;
 	int i, ret;
 
+	if (vmalloced_buf) {
+		desc_len = PAGE_SIZE;
+		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
+	} else {
+		desc_len = master->max_dma_len;
+		sgs = DIV_ROUND_UP(len, desc_len);
+	}
+
 	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
 	if (ret != 0)
 		return ret;
 
 	for (i = 0; i < sgs; i++) {
-		min = min_t(size_t, len, desc_len);
 
 		if (vmalloced_buf) {
+			min = min_t(size_t,
+				    len, desc_len - offset_in_page(buf));
 			vm_page = vmalloc_to_page(buf);
 			if (!vm_page) {
 				sg_free_table(sgt);
@@ -499,6 +641,7 @@
 			sg_set_page(&sgt->sgl[i], vm_page,
 				    min, offset_in_page(buf));
 		} else {
+			min = min_t(size_t, len, desc_len);
 			sg_buf = buf;
 			sg_set_buf(&sgt->sgl[i], sg_buf, min);
 		}
@@ -539,8 +682,15 @@
 	if (!master->can_dma)
 		return 0;
 
-	tx_dev = master->dma_tx->device->dev;
-	rx_dev = master->dma_rx->device->dev;
+	if (master->dma_tx)
+		tx_dev = master->dma_tx->device->dev;
+	else
+		tx_dev = &master->dev;
+
+	if (master->dma_rx)
+		rx_dev = master->dma_rx->device->dev;
+	else
+		rx_dev = &master->dev;
 
 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 		if (!master->can_dma(master, msg->spi, xfer))
@@ -579,8 +729,15 @@
 	if (!master->cur_msg_mapped || !master->can_dma)
 		return 0;
 
-	tx_dev = master->dma_tx->device->dev;
-	rx_dev = master->dma_rx->device->dev;
+	if (master->dma_tx)
+		tx_dev = master->dma_tx->device->dev;
+	else
+		tx_dev = &master->dev;
+
+	if (master->dma_rx)
+		rx_dev = master->dma_rx->device->dev;
+	else
+		rx_dev = &master->dev;
 
 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 		if (!master->can_dma(master, msg->spi, xfer))
@@ -689,17 +846,29 @@
 	bool keep_cs = false;
 	int ret = 0;
 	unsigned long ms = 1;
+	struct spi_statistics *statm = &master->statistics;
+	struct spi_statistics *stats = &msg->spi->statistics;
 
 	spi_set_cs(msg->spi, true);
 
+	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
+	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
+
 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
 		trace_spi_transfer_start(msg, xfer);
 
+		spi_statistics_add_transfer_stats(statm, xfer, master);
+		spi_statistics_add_transfer_stats(stats, xfer, master);
+
 		if (xfer->tx_buf || xfer->rx_buf) {
 			reinit_completion(&master->xfer_completion);
 
 			ret = master->transfer_one(master, msg->spi, xfer);
 			if (ret < 0) {
+				SPI_STATISTICS_INCREMENT_FIELD(statm,
+							       errors);
+				SPI_STATISTICS_INCREMENT_FIELD(stats,
+							       errors);
 				dev_err(&msg->spi->dev,
 					"SPI transfer failed: %d\n", ret);
 				goto out;
@@ -715,6 +884,10 @@
 			}
 
 			if (ms == 0) {
+				SPI_STATISTICS_INCREMENT_FIELD(statm,
+							       timedout);
+				SPI_STATISTICS_INCREMENT_FIELD(stats,
+							       timedout);
 				dev_err(&msg->spi->dev,
 					"SPI transfer timed out\n");
 				msg->status = -ETIMEDOUT;
@@ -1416,10 +1589,10 @@
 	.name		= "spi_master",
 	.owner		= THIS_MODULE,
 	.dev_release	= spi_master_release,
+	.dev_groups	= spi_master_groups,
 };
 
 
-
 /**
  * spi_alloc_master - allocate SPI master controller
  * @dev: the controller, possibly using the platform_bus
@@ -1585,6 +1758,8 @@
 			goto done;
 		}
 	}
+	/* add statistics */
+	spin_lock_init(&master->statistics.lock);
 
 	mutex_lock(&board_lock);
 	list_add_tail(&master->list, &spi_master_list);
@@ -1740,6 +1915,20 @@
  * other core methods are currently defined as inline functions.
  */
 
+static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
+{
+	if (master->bits_per_word_mask) {
+		/* Only 32 bits fit in the mask */
+		if (bits_per_word > 32)
+			return -EINVAL;
+		if (!(master->bits_per_word_mask &
+				SPI_BPW_MASK(bits_per_word)))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
 /**
  * spi_setup - setup SPI mode and clock rate
  * @spi: the device whose settings are being modified
@@ -1798,6 +1987,9 @@
 	if (!spi->bits_per_word)
 		spi->bits_per_word = 8;
 
+	if (__spi_validate_bits_per_word(spi->master, spi->bits_per_word))
+		return -EINVAL;
+
 	if (!spi->max_speed_hz)
 		spi->max_speed_hz = spi->master->max_speed_hz;
 
@@ -1860,19 +2052,15 @@
 
 		if (!xfer->speed_hz)
 			xfer->speed_hz = spi->max_speed_hz;
+		if (!xfer->speed_hz)
+			xfer->speed_hz = master->max_speed_hz;
 
 		if (master->max_speed_hz &&
 		    xfer->speed_hz > master->max_speed_hz)
 			xfer->speed_hz = master->max_speed_hz;
 
-		if (master->bits_per_word_mask) {
-			/* Only 32 bits fit in the mask */
-			if (xfer->bits_per_word > 32)
-				return -EINVAL;
-			if (!(master->bits_per_word_mask &
-					BIT(xfer->bits_per_word - 1)))
-				return -EINVAL;
-		}
+		if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
+			return -EINVAL;
 
 		/*
 		 * SPI transfer length should be multiple of SPI word size
@@ -1939,6 +2127,9 @@
 
 	message->spi = spi;
 
+	SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
+	SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
+
 	trace_spi_message_submit(message);
 
 	return master->transfer(spi, message);
@@ -2075,6 +2266,9 @@
 	message->context = &done;
 	message->spi = spi;
 
+	SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
+	SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
+
 	if (!bus_locked)
 		mutex_lock(&master->bus_lock_mutex);
 
@@ -2102,8 +2296,13 @@
 		/* Push out the messages in the calling context if we
 		 * can.
 		 */
-		if (master->transfer == spi_queued_transfer)
+		if (master->transfer == spi_queued_transfer) {
+			SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
+						       spi_sync_immediate);
+			SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
+						       spi_sync_immediate);
 			__spi_pump_messages(master, false);
+		}
 
 		wait_for_completion(&done);
 		status = message->status;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index c7de641..fba92a5 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -602,11 +602,11 @@
 	if (!spidev->tx_buffer) {
 		spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
 		if (!spidev->tx_buffer) {
-				dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
-				status = -ENOMEM;
+			dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+			status = -ENOMEM;
 			goto err_find_dev;
-			}
 		}
+	}
 
 	if (!spidev->rx_buffer) {
 		spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
@@ -709,7 +709,7 @@
 
 	/*
 	 * spidev should never be referenced in DT without a specific
-	 * compatbile string, it is a Linux implementation thing
+	 * compatible string, it is a Linux implementation thing
 	 * rather than a description of the hardware.
 	 */
 	if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) {
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index 982580a..0d3b70b 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -12,7 +12,7 @@
 
 config SPMI_MSM_PMIC_ARB
 	tristate "Qualcomm MSM SPMI Controller (PMIC Arbiter)"
-	depends on IRQ_DOMAIN
+	select IRQ_DOMAIN
 	depends on ARCH_QCOM || COMPILE_TEST
 	depends on HAS_IOMEM
 	default ARCH_QCOM
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index d7119db..bdfb3c8 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -453,8 +453,8 @@
 
 static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc)
 {
-	struct spmi_pmic_arb_dev *pa = irq_get_handler_data(irq);
-	struct irq_chip *chip = irq_get_chip(irq);
+	struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	void __iomem *intr = pa->intr;
 	int first = pa->min_apid >> 5;
 	int last = pa->max_apid >> 5;
@@ -575,6 +575,22 @@
 	return 0;
 }
 
+static int qpnpint_get_irqchip_state(struct irq_data *d,
+				     enum irqchip_irq_state which,
+				     bool *state)
+{
+	u8 irq = d->hwirq >> 8;
+	u8 status = 0;
+
+	if (which != IRQCHIP_STATE_LINE_LEVEL)
+		return -EINVAL;
+
+	qpnpint_spmi_read(d, QPNPINT_REG_RT_STS, &status, 1);
+	*state = !!(status & BIT(irq));
+
+	return 0;
+}
+
 static struct irq_chip pmic_arb_irqchip = {
 	.name		= "pmic_arb",
 	.irq_enable	= qpnpint_irq_enable,
@@ -582,6 +598,7 @@
 	.irq_mask	= qpnpint_irq_mask,
 	.irq_unmask	= qpnpint_irq_unmask,
 	.irq_set_type	= qpnpint_irq_set_type,
+	.irq_get_irqchip_state	= qpnpint_get_irqchip_state,
 	.flags		= IRQCHIP_MASK_ON_SUSPEND
 			| IRQCHIP_SKIP_SET_WAKE,
 };
@@ -928,8 +945,7 @@
 		goto err_put_ctrl;
 	}
 
-	irq_set_handler_data(pa->irq, pa);
-	irq_set_chained_handler(pa->irq, pmic_arb_chained_irq);
+	irq_set_chained_handler_and_data(pa->irq, pmic_arb_chained_irq, pa);
 
 	err = spmi_controller_add(ctrl);
 	if (err)
@@ -938,8 +954,7 @@
 	return 0;
 
 err_domain_remove:
-	irq_set_chained_handler(pa->irq, NULL);
-	irq_set_handler_data(pa->irq, NULL);
+	irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
 	irq_domain_remove(pa->domain);
 err_put_ctrl:
 	spmi_controller_put(ctrl);
@@ -951,8 +966,7 @@
 	struct spmi_controller *ctrl = platform_get_drvdata(pdev);
 	struct spmi_pmic_arb_dev *pa = spmi_controller_get_drvdata(ctrl);
 	spmi_controller_remove(ctrl);
-	irq_set_chained_handler(pa->irq, NULL);
-	irq_set_handler_data(pa->irq, NULL);
+	irq_set_chained_handler_and_data(pa->irq, NULL, NULL);
 	irq_domain_remove(pa->domain);
 	spmi_controller_put(ctrl);
 	return 0;
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 9493843..11467e1 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -22,6 +22,8 @@
 #include <linux/pm_runtime.h>
 
 #include <dt-bindings/spmi/spmi.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/spmi.h>
 
 static DEFINE_IDA(ctrl_ida);
 
@@ -96,28 +98,42 @@
 static inline int
 spmi_cmd(struct spmi_controller *ctrl, u8 opcode, u8 sid)
 {
+	int ret;
+
 	if (!ctrl || !ctrl->cmd || ctrl->dev.type != &spmi_ctrl_type)
 		return -EINVAL;
 
-	return ctrl->cmd(ctrl, opcode, sid);
+	ret = ctrl->cmd(ctrl, opcode, sid);
+	trace_spmi_cmd(opcode, sid, ret);
+	return ret;
 }
 
 static inline int spmi_read_cmd(struct spmi_controller *ctrl, u8 opcode,
 				u8 sid, u16 addr, u8 *buf, size_t len)
 {
+	int ret;
+
 	if (!ctrl || !ctrl->read_cmd || ctrl->dev.type != &spmi_ctrl_type)
 		return -EINVAL;
 
-	return ctrl->read_cmd(ctrl, opcode, sid, addr, buf, len);
+	trace_spmi_read_begin(opcode, sid, addr);
+	ret = ctrl->read_cmd(ctrl, opcode, sid, addr, buf, len);
+	trace_spmi_read_end(opcode, sid, addr, ret, len, buf);
+	return ret;
 }
 
 static inline int spmi_write_cmd(struct spmi_controller *ctrl, u8 opcode,
 				 u8 sid, u16 addr, const u8 *buf, size_t len)
 {
+	int ret;
+
 	if (!ctrl || !ctrl->write_cmd || ctrl->dev.type != &spmi_ctrl_type)
 		return -EINVAL;
 
-	return ctrl->write_cmd(ctrl, opcode, sid, addr, buf, len);
+	trace_spmi_write_begin(opcode, sid, addr, len, buf);
+	ret = ctrl->write_cmd(ctrl, opcode, sid, addr, buf, len);
+	trace_spmi_write_end(opcode, sid, addr, ret);
+	return ret;
 }
 
 /**
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 7f6cae5..e29293c 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -56,8 +56,6 @@
 
 source "drivers/staging/iio/Kconfig"
 
-source "drivers/staging/sm7xxfb/Kconfig"
-
 source "drivers/staging/sm750fb/Kconfig"
 
 source "drivers/staging/xgifb/Kconfig"
@@ -78,8 +76,6 @@
 
 source "drivers/staging/board/Kconfig"
 
-source "drivers/staging/ozwpan/Kconfig"
-
 source "drivers/staging/gdm72xx/Kconfig"
 
 source "drivers/staging/gdm724x/Kconfig"
@@ -112,4 +108,6 @@
 
 source "drivers/staging/wilc1000/Kconfig"
 
+source "drivers/staging/most/Kconfig"
+
 endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 347f647..50824dd 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -22,7 +22,6 @@
 obj-$(CONFIG_VT6656)		+= vt6656/
 obj-$(CONFIG_VME_BUS)		+= vme/
 obj-$(CONFIG_IIO)		+= iio/
-obj-$(CONFIG_FB_SM7XX)		+= sm7xxfb/
 obj-$(CONFIG_FB_SM750)		+= sm750fb/
 obj-$(CONFIG_FB_XGI)		+= xgifb/
 obj-$(CONFIG_USB_EMXX)		+= emxx_udc/
@@ -32,7 +31,6 @@
 obj-$(CONFIG_MFD_NVEC)		+= nvec/
 obj-$(CONFIG_ANDROID)		+= android/
 obj-$(CONFIG_STAGING_BOARD)	+= board/
-obj-$(CONFIG_USB_WPAN_HCD)	+= ozwpan/
 obj-$(CONFIG_WIMAX_GDM72XX)	+= gdm72xx/
 obj-$(CONFIG_LTE_GDM724X)	+= gdm724x/
 obj-$(CONFIG_FIREWIRE_SERIAL)	+= fwserial/
@@ -48,3 +46,4 @@
 obj-$(CONFIG_FB_TFT)		+= fbtft/
 obj-$(CONFIG_FSL_MC_BUS)	+= fsl-mc/
 obj-$(CONFIG_WILC1000)		+= wilc1000/
+obj-$(CONFIG_MOST)		+= most/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 24d657b..6830712 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -20,7 +20,8 @@
 
 config ANDROID_TIMED_GPIO
 	tristate "Android timed gpio driver"
-	depends on GPIOLIB && ANDROID_TIMED_OUTPUT
+	depends on GPIOLIB || COMPILE_TEST
+	depends on ANDROID_TIMED_OUTPUT
 	default n
         ---help---
 	  Unlike generic gpio is to allow programs to access and manipulate gpio
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 06954cd..20288fc 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -2,16 +2,8 @@
 	- checkpatch.pl cleanups
 	- sparse fixes
 	- rename files to be not so "generic"
-	- make sure things build as modules properly
 	- add proper arch dependencies as needed
 	- audit userspace interfaces to make sure they are sane
-	- kuid_t should never be exposed to user space as it is
-          kernel internal type. Data structure for this kuid_t is:
-          typedef struct {
-          	uid_t val;
-          } kuid_t;
-	- This bug is introduced by Xiong Zhou in the patch bd471258f2e09
-	- ("staging: android: logger: use kuid_t instead of uid_t")
 
 Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
-Brian Swetland <swetland@google.com>
+Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index c5c037c..60200a3 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -388,7 +388,7 @@
 
 		/* ... and allocate the backing shmem file */
 		vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
-		if (unlikely(IS_ERR(vmfile))) {
+		if (IS_ERR(vmfile)) {
 			ret = PTR_ERR(vmfile);
 			goto out;
 		}
@@ -660,7 +660,7 @@
 		if (page_range_subsumed_by_range(range, pgstart, pgend))
 			return 0;
 		if (page_range_in_range(range, pgstart, pgend)) {
-			pgstart = min_t(size_t, range->pgstart, pgstart),
+			pgstart = min_t(size_t, range->pgstart, pgstart);
 			pgend = max_t(size_t, range->pgend, pgend);
 			purged |= range->purged;
 			range_del(range);
@@ -863,14 +863,9 @@
 
 static void __exit ashmem_exit(void)
 {
-	int ret;
-
 	unregister_shrinker(&ashmem_shrinker);
 
-	ret = misc_deregister(&ashmem_misc);
-	if (unlikely(ret))
-		pr_err("failed to unregister misc device!\n");
-
+	misc_deregister(&ashmem_misc);
 	kmem_cache_destroy(ashmem_range_cachep);
 	kmem_cache_destroy(ashmem_area_cachep);
 
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 6f48112..eec878e 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1103,10 +1103,10 @@
 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
 						struct ion_handle *handle)
 {
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 	struct ion_buffer *buffer;
 	struct dma_buf *dmabuf;
 	bool valid_handle;
-	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 
 	mutex_lock(&client->lock);
 	valid_handle = ion_handle_validate(client, handle);
@@ -1466,7 +1466,6 @@
 	.release = single_release,
 };
 
-#ifdef DEBUG_HEAP_SHRINKER
 static int debug_shrink_set(void *data, u64 val)
 {
 	struct ion_heap *heap = data;
@@ -1474,15 +1473,14 @@
 	int objs;
 
 	sc.gfp_mask = -1;
-	sc.nr_to_scan = 0;
+	sc.nr_to_scan = val;
 
-	if (!val)
-		return 0;
+	if (!val) {
+		objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
+		sc.nr_to_scan = objs;
+	}
 
-	objs = heap->shrinker.shrink(&heap->shrinker, &sc);
-	sc.nr_to_scan = objs;
-
-	heap->shrinker.shrink(&heap->shrinker, &sc);
+	heap->shrinker.scan_objects(&heap->shrinker, &sc);
 	return 0;
 }
 
@@ -1495,14 +1493,13 @@
 	sc.gfp_mask = -1;
 	sc.nr_to_scan = 0;
 
-	objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+	objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
 	*val = objs;
 	return 0;
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
 			debug_shrink_set, "%llu\n");
-#endif
 
 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
 {
@@ -1540,8 +1537,7 @@
 			path, heap->name);
 	}
 
-#ifdef DEBUG_HEAP_SHRINKER
-	if (heap->shrinker.shrink) {
+	if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
 		char debug_name[64];
 
 		snprintf(debug_name, 64, "%s_shrink", heap->name);
@@ -1556,7 +1552,7 @@
 				path, debug_name);
 		}
 	}
-#endif
+
 	up_write(&dev->lock);
 }
 
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 5474615..195c41d 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -81,7 +81,7 @@
 err:
 	sg = table->sgl;
 	for (i -= 1; i >= 0; i--) {
-		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+		gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
 			      sg->length);
 		sg = sg_next(sg);
 	}
@@ -109,7 +109,7 @@
 							DMA_BIDIRECTIONAL);
 
 	for_each_sg(table->sgl, sg, table->nents, i) {
-		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+		gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
 			      sg->length);
 	}
 	chunk_heap->allocated -= allocated_size;
@@ -173,8 +173,8 @@
 	chunk_heap->heap.ops = &chunk_heap_ops;
 	chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
 	chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
-	pr_debug("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base,
-		heap_data->size, heap_data->align);
+	pr_debug("%s: base %lu size %zu align %ld\n", __func__,
+		chunk_heap->base, heap_data->size, heap_data->align);
 
 	return &chunk_heap->heap;
 
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index f4211f1..0b2448c 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -73,8 +73,8 @@
 	if (!info->table)
 		goto free_mem;
 
-	if (dma_common_get_sgtable
-	    (dev, info->table, info->cpu_addr, info->handle, len))
+	if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle,
+			    len))
 		goto free_table;
 	/* keep this for memory release */
 	buffer->priv_virt = info;
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index 4b88f11..19ad3ab 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -116,7 +116,7 @@
 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
 				int nr_to_scan)
 {
-	int freed;
+	int freed = 0;
 	bool high;
 
 	if (current_is_kswapd())
@@ -127,7 +127,7 @@
 	if (nr_to_scan == 0)
 		return ion_page_pool_total(pool, high);
 
-	for (freed = 0; freed < nr_to_scan; freed++) {
+	while (freed < nr_to_scan) {
 		struct page *page;
 
 		mutex_lock(&pool->mutex);
@@ -141,6 +141,7 @@
 		}
 		mutex_unlock(&pool->mutex);
 		ion_page_pool_free_pages(pool, page);
+		freed += (1 << pool->order);
 	}
 
 	return freed;
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index da2a63c..7a7a9a0 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -212,14 +212,26 @@
 {
 	struct ion_system_heap *sys_heap;
 	int nr_total = 0;
-	int i;
+	int i, nr_freed;
+	int only_scan = 0;
 
 	sys_heap = container_of(heap, struct ion_system_heap, heap);
 
+	if (!nr_to_scan)
+		only_scan = 1;
+
 	for (i = 0; i < num_orders; i++) {
 		struct ion_page_pool *pool = sys_heap->pools[i];
 
-		nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+		nr_freed = ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+		nr_total += nr_freed;
+
+		if (!only_scan) {
+			nr_to_scan -= nr_freed;
+			/* shrink completed */
+			if (nr_to_scan <= 0)
+				break;
+		}
 	}
 
 	return nr_total;
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index 7d6e6b6b..b8dcf5a 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -269,7 +269,8 @@
 	if (!testdev)
 		return -ENODATA;
 
-	return misc_deregister(&testdev->misc);
+	misc_deregister(&testdev->misc);
+	return 0;
 }
 
 static struct platform_device *ion_test_pdev;
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index a21b79f..61f8a3a 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -337,11 +337,11 @@
 
 #ifdef CONFIG_DEBUG_FS
 
-extern void sync_timeline_debug_add(struct sync_timeline *obj);
-extern void sync_timeline_debug_remove(struct sync_timeline *obj);
-extern void sync_fence_debug_add(struct sync_fence *fence);
-extern void sync_fence_debug_remove(struct sync_fence *fence);
-extern void sync_dump(void);
+void sync_timeline_debug_add(struct sync_timeline *obj);
+void sync_timeline_debug_remove(struct sync_timeline *obj);
+void sync_fence_debug_add(struct sync_fence *fence);
+void sync_fence_debug_remove(struct sync_fence *fence);
+void sync_dump(void);
 
 #else
 # define sync_timeline_debug_add(obj)
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index 938a35c..ce11726 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -61,9 +61,9 @@
 
 static void gpio_enable(struct timed_output_dev *dev, int value)
 {
-	struct timed_gpio_data	*data =
+	struct timed_gpio_data *data =
 		container_of(dev, struct timed_gpio_data, dev);
-	unsigned long	flags;
+	unsigned long flags;
 
 	spin_lock_irqsave(&data->lock, flags);
 
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index 5455bf3..b8e2f61 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -19,6 +19,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/slab.h>
 #include <linux/io.h>
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 7dee73d..57e71f9 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -2,7 +2,7 @@
 	tristate "Data acquisition support (comedi)"
 	depends on m
 	---help---
-	  Enable support a wide range of data acquisition devices
+	  Enable support for a wide range of data acquisition devices
 	  for Linux.
 
 if COMEDI
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
index 2584824..f356386 100644
--- a/drivers/staging/comedi/comedi_compat32.c
+++ b/drivers/staging/comedi/comedi_compat32.c
@@ -202,7 +202,8 @@
 	err |= __get_user(temp.uint, &cmd32->stop_arg);
 	err |= __put_user(temp.uint, &cmd->stop_arg);
 	err |= __get_user(temp.uptr, &cmd32->chanlist);
-	err |= __put_user(compat_ptr(temp.uptr), &cmd->chanlist);
+	err |= __put_user((unsigned int __force *)compat_ptr(temp.uptr),
+			&cmd->chanlist);
 	err |= __get_user(temp.uint, &cmd32->chanlist_len);
 	err |= __put_user(temp.uint, &cmd->chanlist_len);
 	err |= __get_user(temp.uptr, &cmd32->data);
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 985d94b..fd54d09 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -215,7 +215,6 @@
 	struct comedi_subdevice *s;
 	unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
 
-	BUG_ON(i >= COMEDI_NUM_SUBDEVICE_MINORS);
 	mutex_lock(&comedi_subdevice_minor_table_lock);
 	s = comedi_subdevice_minor_table[i];
 	if (s && s->device != dev)
@@ -228,7 +227,6 @@
 {
 	struct comedi_device *dev;
 
-	BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS);
 	mutex_lock(&comedi_board_minor_table_lock);
 	dev = comedi_dev_get(comedi_board_minor_table[minor]);
 	mutex_unlock(&comedi_board_minor_table_lock);
@@ -241,7 +239,6 @@
 	struct comedi_subdevice *s;
 	unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
 
-	BUG_ON(i >= COMEDI_NUM_SUBDEVICE_MINORS);
 	mutex_lock(&comedi_subdevice_minor_table_lock);
 	s = comedi_subdevice_minor_table[i];
 	dev = comedi_dev_get(s ? s->device : NULL);
@@ -2599,14 +2596,14 @@
 	cfp->dev = dev;
 
 	mutex_lock(&dev->mutex);
-	if (!dev->attached && !capable(CAP_NET_ADMIN)) {
-		dev_dbg(dev->class_dev, "not attached and not CAP_NET_ADMIN\n");
+	if (!dev->attached && !capable(CAP_SYS_ADMIN)) {
+		dev_dbg(dev->class_dev, "not attached and not CAP_SYS_ADMIN\n");
 		rc = -ENODEV;
 		goto out;
 	}
 	if (dev->attached && dev->use_count == 0) {
 		if (!try_module_get(dev->driver->module)) {
-			rc = -ENOSYS;
+			rc = -ENXIO;
 			goto out;
 		}
 		if (dev->open) {
@@ -2777,12 +2774,6 @@
 	return dev;
 }
 
-static void comedi_free_board_minor(unsigned minor)
-{
-	BUG_ON(minor >= COMEDI_NUM_BOARD_MINORS);
-	comedi_free_board_dev(comedi_clear_board_minor(minor));
-}
-
 void comedi_release_hardware_device(struct device *hardware_device)
 {
 	int minor;
@@ -2838,12 +2829,10 @@
 
 	if (!s)
 		return;
-	if (s->minor < 0)
+	if (s->minor < COMEDI_NUM_BOARD_MINORS ||
+	    s->minor >= COMEDI_NUM_MINORS)
 		return;
 
-	BUG_ON(s->minor >= COMEDI_NUM_MINORS);
-	BUG_ON(s->minor < COMEDI_NUM_BOARD_MINORS);
-
 	i = s->minor - COMEDI_NUM_BOARD_MINORS;
 	mutex_lock(&comedi_subdevice_minor_table_lock);
 	if (s == comedi_subdevice_minor_table[i])
@@ -2857,10 +2846,13 @@
 
 static void comedi_cleanup_board_minors(void)
 {
+	struct comedi_device *dev;
 	unsigned i;
 
-	for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++)
-		comedi_free_board_minor(i);
+	for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
+		dev = comedi_clear_board_minor(i);
+		comedi_free_board_dev(dev);
+	}
 }
 
 static int __init comedi_init(void)
@@ -2932,14 +2924,7 @@
 
 static void __exit comedi_cleanup(void)
 {
-	int i;
-
 	comedi_cleanup_board_minors();
-	for (i = 0; i < COMEDI_NUM_BOARD_MINORS; ++i)
-		BUG_ON(comedi_board_minor_table[i]);
-	for (i = 0; i < COMEDI_NUM_SUBDEVICE_MINORS; ++i)
-		BUG_ON(comedi_subdevice_minor_table[i]);
-
 	class_destroy(comedi_class);
 	cdev_del(&comedi_cdev);
 	unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0), COMEDI_NUM_MINORS);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index ed0b60c..b03bc66 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -820,7 +820,7 @@
 			 "driver '%s' does not support attach using comedi_config\n",
 			 driv->driver_name);
 		module_put(driv->module);
-		ret = -ENOSYS;
+		ret = -EIO;
 		goto out;
 	}
 	dev->driver = driv;
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
index fa99c8c..f0c0d58 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
@@ -1,22 +1,3 @@
-/* Digital Input IRQ Function Selection */
-#define APCI1564_DI_INT_OR				(0 << 1)
-#define APCI1564_DI_INT_AND				(1 << 1)
-
-/* Digital Input Interrupt Enable Disable. */
-#define APCI1564_DI_INT_ENABLE				0x4
-#define APCI1564_DI_INT_DISABLE				0xfffffffb
-
-/* Digital Output Interrupt Enable Disable. */
-#define APCI1564_DO_VCC_INT_ENABLE			0x1
-#define APCI1564_DO_VCC_INT_DISABLE			0xfffffffe
-#define APCI1564_DO_CC_INT_ENABLE			0x2
-#define APCI1564_DO_CC_INT_DISABLE			0xfffffffd
-
-/* TIMER COUNTER WATCHDOG DEFINES */
-#define ADDIDATA_TIMER					0
-#define ADDIDATA_COUNTER				1
-#define ADDIDATA_WATCHDOG				2
-
 static int apci1564_timer_insn_config(struct comedi_device *dev,
 				      struct comedi_subdevice *s,
 				      struct comedi_insn *insn,
@@ -27,15 +8,16 @@
 
 	devpriv->tsk_current = current;
 
-	/* First Stop The Timer */
+	/* Stop the timer */
 	ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
-	ctrl &= 0xfffff9fe;
-	/* Stop The Timer */
+	ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+		  ADDI_TCW_CTRL_ENA);
 	outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
 
 	if (data[1] == 1) {
 		/* Enable timer int & disable all the other int sources */
-		outl(0x02, devpriv->timer + ADDI_TCW_CTRL_REG);
+		outl(ADDI_TCW_CTRL_IRQ_ENA,
+		     devpriv->timer + ADDI_TCW_CTRL_REG);
 		outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
 		outl(0x0, dev->iobase + APCI1564_DO_IRQ_REG);
 		outl(0x0, dev->iobase + APCI1564_WDOG_IRQ_REG);
@@ -59,9 +41,11 @@
 	outl(data[3], devpriv->timer + ADDI_TCW_RELOAD_REG);
 
 	ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
-	ctrl &= 0xfff719e2;
-	ctrl |= (2 << 13) | 0x10;
-	/* mode 2 */
+	ctrl &= ~(ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE_MASK |
+		  ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+		  ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
+		  ADDI_TCW_CTRL_WARN_ENA | ADDI_TCW_CTRL_ENA);
+	ctrl |= ADDI_TCW_CTRL_MODE(2) | ADDI_TCW_CTRL_TIMER_ENA;
 	outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
 
 	return insn->n;
@@ -76,13 +60,13 @@
 	unsigned int ctrl;
 
 	ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
+	ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
 	switch (data[1]) {
 	case 0:	/* Stop The Timer */
-		ctrl &= 0xfffff9fe;
+		ctrl &= ~ADDI_TCW_CTRL_ENA;
 		break;
 	case 1:	/* Enable the Timer */
-		ctrl &= 0xfffff9ff;
-		ctrl |= 0x1;
+		ctrl |= ADDI_TCW_CTRL_ENA;
 		break;
 	}
 	outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
@@ -98,7 +82,8 @@
 	struct apci1564_private *devpriv = dev->private;
 
 	/* Stores the status of the Timer */
-	data[0] = inl(devpriv->timer + ADDI_TCW_STATUS_REG) & 0x1;
+	data[0] = inl(devpriv->timer + ADDI_TCW_STATUS_REG) &
+		  ADDI_TCW_STATUS_OVERFLOW;
 
 	/* Stores the Actual value of the Timer */
 	data[1] = inl(devpriv->timer + ADDI_TCW_VAL_REG);
@@ -118,35 +103,34 @@
 
 	devpriv->tsk_current = current;
 
-	/* First Stop The Counter */
-	ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
-	ctrl &= 0xfffff9fe;
 	/* Stop The Timer */
+	ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
+	ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+		  ADDI_TCW_CTRL_ENA);
 	outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
 
 	/* Set the reload value */
 	outl(data[3], iobase + ADDI_TCW_RELOAD_REG);
 
-	/* Set the mode :             */
-	/* - Disable the hardware     */
-	/* - Disable the counter mode */
-	/* - Disable the warning      */
-	/* - Disable the reset        */
-	/* - Disable the timer mode   */
-	/* - Enable the counter mode  */
-
-	ctrl &= 0xfffc19e2;
-	ctrl |= 0x80000 | (data[4] << 16);
+	/* Set the mode */
+	ctrl &= ~(ADDI_TCW_CTRL_EXT_CLK_MASK | ADDI_TCW_CTRL_MODE_MASK |
+		  ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
+		  ADDI_TCW_CTRL_WARN_ENA);
+	ctrl |= ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE(data[4]);
 	outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
 
 	/* Enable or Disable Interrupt */
-	ctrl &= 0xfffff9fd;
-	ctrl |= (data[1] << 1);
+	if (data[1])
+		ctrl |= ADDI_TCW_CTRL_IRQ_ENA;
+	else
+		ctrl &= ~ADDI_TCW_CTRL_IRQ_ENA;
 	outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
 
 	/* Set the Up/Down selection */
-	ctrl &= 0xfffbf9ff;
-	ctrl |= (data[6] << 18);
+	if (data[6])
+		ctrl |= ADDI_TCW_CTRL_CNT_UP;
+	else
+		ctrl &= ~ADDI_TCW_CTRL_CNT_UP;
 	outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
 
 	return insn->n;
@@ -163,17 +147,16 @@
 	unsigned int ctrl;
 
 	ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
+	ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
 	switch (data[1]) {
 	case 0:	/* Stops the Counter subdevice */
 		ctrl = 0;
 		break;
 	case 1:	/* Start the Counter subdevice */
-		ctrl &= 0xfffff9ff;
-		ctrl |= 0x1;
+		ctrl |= ADDI_TCW_CTRL_ENA;
 		break;
 	case 2:	/* Clears the Counter subdevice */
-		ctrl &= 0xfffff9ff;
-		ctrl |= 0x400;
+		ctrl |= ADDI_TCW_CTRL_GATE;
 		break;
 	}
 	outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
@@ -195,10 +178,10 @@
 	data[0] = inl(iobase + ADDI_TCW_VAL_REG);
 
 	status = inl(iobase + ADDI_TCW_STATUS_REG);
-	data[1] = (status >> 1) & 1;	/* software trigger status */
-	data[2] = (status >> 2) & 1;	/* hardware trigger status */
-	data[3] = (status >> 3) & 1;	/* software clear status */
-	data[4] = (status >> 0) & 1;	/* overflow status */
+	data[1] = (status & ADDI_TCW_STATUS_SOFT_TRIG) ? 1 : 0;
+	data[2] = (status & ADDI_TCW_STATUS_HARDWARE_TRIG) ? 1 : 0;
+	data[3] = (status & ADDI_TCW_STATUS_SOFT_CLR) ? 1 : 0;
+	data[4] = (status & ADDI_TCW_STATUS_OVERFLOW) ? 1 : 0;
 
 	return insn->n;
 }
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
index 1f2f781..3757074 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3501.c
@@ -22,54 +22,50 @@
 				      unsigned int *data)
 {
 	struct apci3501_private *devpriv = dev->private;
-	unsigned int ul_Command1 = 0;
+	unsigned int ctrl;
+
+	if (data[0] != ADDIDATA_WATCHDOG &&
+	    data[0] != ADDIDATA_TIMER)
+		return -EINVAL;
 
 	devpriv->tsk_Current = current;
-	if (data[0] == ADDIDATA_WATCHDOG) {
 
-		devpriv->b_TimerSelectMode = ADDIDATA_WATCHDOG;
-		/* Disable the watchdog */
-		outl(0x0, dev->iobase + APCI3501_TIMER_CTRL_REG);
+	devpriv->timer_mode = data[0];
 
-		if (data[1] == 1) {
-			/* Enable TIMER int & DISABLE ALL THE OTHER int SOURCES */
-			outl(0x02, dev->iobase + APCI3501_TIMER_CTRL_REG);
-		} else {
-			/* disable Timer interrupt */
-			outl(0x0, dev->iobase + APCI3501_TIMER_CTRL_REG);
-		}
-
-		outl(data[2], dev->iobase + APCI3501_TIMER_TIMEBASE_REG);
-		outl(data[3], dev->iobase + APCI3501_TIMER_RELOAD_REG);
-
-		/* Set the mode (e2->e0) */
-		ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG) | 0xFFF819E0UL;
-		outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
+	/* first, disable the watchdog or stop the timer */
+	if (devpriv->timer_mode == ADDIDATA_WATCHDOG) {
+		ctrl = 0;
+	} else {
+		ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
+		ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+			  ADDI_TCW_CTRL_ENA);
 	}
+	outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
 
-	else if (data[0] == ADDIDATA_TIMER) {
-		/* First Stop The Timer */
-		ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
-		ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
-		outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
-		devpriv->b_TimerSelectMode = ADDIDATA_TIMER;
-		if (data[1] == 1) {
-			/* Enable TIMER int & DISABLE ALL THE OTHER int SOURCES */
-			outl(0x02, dev->iobase + APCI3501_TIMER_CTRL_REG);
-		} else {
-			/* disable Timer interrupt */
-			outl(0x0, dev->iobase + APCI3501_TIMER_CTRL_REG);
-		}
+	/* enable/disable the timer interrupt */
+	ctrl = (data[1] == 1) ? ADDI_TCW_CTRL_IRQ_ENA : 0;
+	outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
 
-		outl(data[2], dev->iobase + APCI3501_TIMER_TIMEBASE_REG);
-		outl(data[3], dev->iobase + APCI3501_TIMER_RELOAD_REG);
+	outl(data[2], devpriv->tcw + ADDI_TCW_TIMEBASE_REG);
+	outl(data[3], devpriv->tcw + ADDI_TCW_RELOAD_REG);
 
+	ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
+	if (devpriv->timer_mode == ADDIDATA_WATCHDOG) {
+		/* Set the mode (e2->e0) NOTE: this doesn't look correct */
+		ctrl |= ~(ADDI_TCW_CTRL_CNT_UP | ADDI_TCW_CTRL_EXT_CLK_MASK |
+			  ADDI_TCW_CTRL_MODE_MASK | ADDI_TCW_CTRL_GATE |
+			  ADDI_TCW_CTRL_TRIG | ADDI_TCW_CTRL_TIMER_ENA |
+			  ADDI_TCW_CTRL_RESET_ENA | ADDI_TCW_CTRL_WARN_ENA |
+			  ADDI_TCW_CTRL_IRQ_ENA | ADDI_TCW_CTRL_ENA);
+	} else {
 		/* mode 2 */
-		ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
-		ul_Command1 =
-			(ul_Command1 & 0xFFF719E2UL) | 2UL << 13UL | 0x10UL;
-		outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
+		ctrl &= ~(ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE_MASK |
+			  ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+			  ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
+			  ADDI_TCW_CTRL_WARN_ENA | ADDI_TCW_CTRL_ENA);
+		ctrl |= ADDI_TCW_CTRL_MODE(2) | ADDI_TCW_CTRL_TIMER_ENA;
 	}
+	outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
 
 	return insn->n;
 }
@@ -92,49 +88,27 @@
 				     unsigned int *data)
 {
 	struct apci3501_private *devpriv = dev->private;
-	unsigned int ul_Command1 = 0;
+	unsigned int ctrl;
 
-	if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
+	if (devpriv->timer_mode == ADDIDATA_WATCHDOG ||
+	    devpriv->timer_mode == ADDIDATA_TIMER) {
+		ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
+		ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
 
-		if (data[1] == 1) {
-			ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
-			ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
-			/* Enable the Watchdog */
-			outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
-		} else if (data[1] == 0) { /* Stop The Watchdog */
-			ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
-			ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
-			outl(0x0, dev->iobase + APCI3501_TIMER_CTRL_REG);
-		} else if (data[1] == 2) {
-			ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
-			ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x200UL;
-			outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
+		if (data[1] == 1) {		/* enable */
+			ctrl |= ADDI_TCW_CTRL_ENA;
+		} else if (data[1] == 0) {	/* stop */
+			if (devpriv->timer_mode == ADDIDATA_WATCHDOG)
+				ctrl = 0;
+			else
+				ctrl &= ~ADDI_TCW_CTRL_ENA;
+		} else if (data[1] == 2) {	/* trigger */
+			ctrl |= ADDI_TCW_CTRL_TRIG;
 		}
+		outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
 	}
 
-	if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
-		if (data[1] == 1) {
-
-			ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
-			ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x1UL;
-			/* Enable the Timer */
-			outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
-		} else if (data[1] == 0) {
-			/* Stop The Timer */
-			ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
-			ul_Command1 = ul_Command1 & 0xFFFFF9FEUL;
-			outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
-		}
-
-		else if (data[1] == 2) {
-			/* Trigger the Timer */
-			ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
-			ul_Command1 = (ul_Command1 & 0xFFFFF9FFUL) | 0x200UL;
-			outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
-		}
-	}
-
-	inl(dev->iobase + APCI3501_TIMER_STATUS_REG);
+	inl(devpriv->tcw + ADDI_TCW_STATUS_REG);
 	return insn->n;
 }
 
@@ -155,19 +129,13 @@
 {
 	struct apci3501_private *devpriv = dev->private;
 
-	if (devpriv->b_TimerSelectMode == ADDIDATA_WATCHDOG) {
-		data[0] = inl(dev->iobase + APCI3501_TIMER_STATUS_REG) & 0x1;
-		data[1] = inl(dev->iobase + APCI3501_TIMER_SYNC_REG);
-	}
+	if (devpriv->timer_mode != ADDIDATA_TIMER &&
+	    devpriv->timer_mode != ADDIDATA_WATCHDOG)
+		return -EINVAL;
 
-	else if (devpriv->b_TimerSelectMode == ADDIDATA_TIMER) {
-		data[0] = inl(dev->iobase + APCI3501_TIMER_STATUS_REG) & 0x1;
-		data[1] = inl(dev->iobase + APCI3501_TIMER_SYNC_REG);
-	}
+	data[0] = inl(devpriv->tcw + ADDI_TCW_STATUS_REG) &
+		  ADDI_TCW_STATUS_OVERFLOW;
+	data[1] = inl(devpriv->tcw + ADDI_TCW_VAL_REG);
 
-	else if ((devpriv->b_TimerSelectMode != ADDIDATA_TIMER)
-		&& (devpriv->b_TimerSelectMode != ADDIDATA_WATCHDOG)) {
-		dev_err(dev->class_dev, "Invalid subdevice.\n");
-	}
 	return insn->n;
 }
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index 33e58b9..f1ccfbd 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -44,12 +44,12 @@
  *   0x48 - 0x64  Timer 12-Bit
  */
 #define APCI1564_EEPROM_REG			0x00
-#define APCI1564_EEPROM_VCC_STATUS		(1 << 8)
+#define APCI1564_EEPROM_VCC_STATUS		BIT(8)
 #define APCI1564_EEPROM_TO_REV(x)		(((x) >> 4) & 0xf)
-#define APCI1564_EEPROM_DI			(1 << 3)
-#define APCI1564_EEPROM_DO			(1 << 2)
-#define APCI1564_EEPROM_CS			(1 << 1)
-#define APCI1564_EEPROM_CLK			(1 << 0)
+#define APCI1564_EEPROM_DI			BIT(3)
+#define APCI1564_EEPROM_DO			BIT(2)
+#define APCI1564_EEPROM_CS			BIT(1)
+#define APCI1564_EEPROM_CLK			BIT(0)
 #define APCI1564_REV1_TIMER_IOBASE		0x04
 #define APCI1564_REV2_MAIN_IOBASE		0x04
 #define APCI1564_REV2_TIMER_IOBASE		0x48
@@ -79,10 +79,17 @@
 #define APCI1564_DI_INT_MODE2_REG		0x08
 #define APCI1564_DI_INT_STATUS_REG		0x0c
 #define APCI1564_DI_IRQ_REG			0x10
+#define APCI1564_DI_IRQ_ENA			BIT(2)
+#define APCI1564_DI_IRQ_MODE			BIT(1)	/* 1=AND, 0=OR */
 #define APCI1564_DO_REG				0x14
 #define APCI1564_DO_INT_CTRL_REG		0x18
+#define APCI1564_DO_INT_CTRL_CC_INT_ENA		BIT(1)
+#define APCI1564_DO_INT_CTRL_VCC_INT_ENA	BIT(0)
 #define APCI1564_DO_INT_STATUS_REG		0x1c
+#define APCI1564_DO_INT_STATUS_CC		BIT(1)
+#define APCI1564_DO_INT_STATUS_VCC		BIT(0)
 #define APCI1564_DO_IRQ_REG			0x20
+#define APCI1564_DO_IRQ_INTR			BIT(0)
 #define APCI1564_WDOG_REG			0x24
 #define APCI1564_WDOG_RELOAD_REG		0x28
 #define APCI1564_WDOG_TIMEBASE_REG		0x2c
@@ -159,9 +166,9 @@
 	unsigned int chan;
 
 	status = inl(dev->iobase + APCI1564_DI_IRQ_REG);
-	if (status & APCI1564_DI_INT_ENABLE) {
+	if (status & APCI1564_DI_IRQ_ENA) {
 		/* disable the interrupt */
-		outl(status & APCI1564_DI_INT_DISABLE,
+		outl(status & ~APCI1564_DI_IRQ_ENA,
 		     dev->iobase + APCI1564_DI_IRQ_REG);
 
 		s->state = inl(dev->iobase + APCI1564_DI_INT_STATUS_REG) &
@@ -300,11 +307,9 @@
 			outl(0x0, dev->iobase + APCI1564_DI_INT_MODE2_REG);
 			break;
 		case COMEDI_DIGITAL_TRIG_ENABLE_EDGES:
-			if (devpriv->ctrl != (APCI1564_DI_INT_ENABLE |
-					      APCI1564_DI_INT_OR)) {
+			if (devpriv->ctrl != APCI1564_DI_IRQ_ENA) {
 				/* switching to 'OR' mode */
-				devpriv->ctrl = APCI1564_DI_INT_ENABLE |
-						APCI1564_DI_INT_OR;
+				devpriv->ctrl = APCI1564_DI_IRQ_ENA;
 				/* wipe old channels */
 				devpriv->mode1 = 0;
 				devpriv->mode2 = 0;
@@ -318,11 +323,11 @@
 			devpriv->mode2 |= data[5] << shift;
 			break;
 		case COMEDI_DIGITAL_TRIG_ENABLE_LEVELS:
-			if (devpriv->ctrl != (APCI1564_DI_INT_ENABLE |
-					      APCI1564_DI_INT_AND)) {
+			if (devpriv->ctrl != (APCI1564_DI_IRQ_ENA |
+					      APCI1564_DI_IRQ_MODE)) {
 				/* switching to 'AND' mode */
-				devpriv->ctrl = APCI1564_DI_INT_ENABLE |
-						APCI1564_DI_INT_AND;
+				devpriv->ctrl = APCI1564_DI_IRQ_ENA |
+						APCI1564_DI_IRQ_MODE;
 				/* wipe old channels */
 				devpriv->mode1 = 0;
 				devpriv->mode2 = 0;
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index 73786a3..40ff914 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -27,27 +27,21 @@
 #include <linux/sched.h>
 
 #include "../comedi_pci.h"
+#include "addi_tcw.h"
 #include "amcc_s5933.h"
 
 /*
  * PCI bar 1 register I/O map
  */
 #define APCI3501_AO_CTRL_STATUS_REG		0x00
-#define APCI3501_AO_CTRL_BIPOLAR		(1 << 0)
-#define APCI3501_AO_STATUS_READY		(1 << 8)
+#define APCI3501_AO_CTRL_BIPOLAR		BIT(0)
+#define APCI3501_AO_STATUS_READY		BIT(8)
 #define APCI3501_AO_DATA_REG			0x04
 #define APCI3501_AO_DATA_CHAN(x)		((x) << 0)
 #define APCI3501_AO_DATA_VAL(x)			((x) << 8)
-#define APCI3501_AO_DATA_BIPOLAR		(1 << 31)
+#define APCI3501_AO_DATA_BIPOLAR		BIT(31)
 #define APCI3501_AO_TRIG_SCS_REG		0x08
-#define APCI3501_TIMER_SYNC_REG			0x20
-#define APCI3501_TIMER_RELOAD_REG		0x24
-#define APCI3501_TIMER_TIMEBASE_REG		0x28
-#define APCI3501_TIMER_CTRL_REG			0x2c
-#define APCI3501_TIMER_STATUS_REG		0x30
-#define APCI3501_TIMER_IRQ_REG			0x34
-#define APCI3501_TIMER_WARN_RELOAD_REG		0x38
-#define APCI3501_TIMER_WARN_TIMEBASE_REG	0x3c
+#define APCI3501_TIMER_BASE			0x20
 #define APCI3501_DO_REG				0x40
 #define APCI3501_DI_REG				0x50
 
@@ -72,9 +66,10 @@
 #define EEPROM_TIMER_WATCHDOG_COUNTER	10
 
 struct apci3501_private {
-	int i_IobaseAmcc;
+	unsigned long amcc;
+	unsigned long tcw;
 	struct task_struct *tsk_Current;
-	unsigned char b_TimerSelectMode;
+	unsigned char timer_mode;
 };
 
 static struct comedi_lrange apci3501_ao_range = {
@@ -222,11 +217,10 @@
 static int apci3501_eeprom_get_ao_n_chan(struct comedi_device *dev)
 {
 	struct apci3501_private *devpriv = dev->private;
-	unsigned long iobase = devpriv->i_IobaseAmcc;
 	unsigned char nfuncs;
 	int i;
 
-	nfuncs = apci3501_eeprom_readw(iobase, 10) & 0xff;
+	nfuncs = apci3501_eeprom_readw(devpriv->amcc, 10) & 0xff;
 
 	/* Read functionality details */
 	for (i = 0; i < nfuncs; i++) {
@@ -235,11 +229,11 @@
 		unsigned char func;
 		unsigned short val;
 
-		func = apci3501_eeprom_readw(iobase, 12 + offset) & 0x3f;
-		addr = apci3501_eeprom_readw(iobase, 14 + offset);
+		func = apci3501_eeprom_readw(devpriv->amcc, 12 + offset) & 0x3f;
+		addr = apci3501_eeprom_readw(devpriv->amcc, 14 + offset);
 
 		if (func == EEPROM_ANALOGOUTPUT) {
-			val = apci3501_eeprom_readw(iobase, addr + 10);
+			val = apci3501_eeprom_readw(devpriv->amcc, addr + 10);
 			return (val >> 4) & 0x3ff;
 		}
 	}
@@ -254,7 +248,7 @@
 	struct apci3501_private *devpriv = dev->private;
 	unsigned short addr = CR_CHAN(insn->chanspec);
 
-	data[0] = apci3501_eeprom_readw(devpriv->i_IobaseAmcc, 2 * addr);
+	data[0] = apci3501_eeprom_readw(devpriv->amcc, 2 * addr);
 
 	return insn->n;
 }
@@ -263,26 +257,29 @@
 {
 	struct comedi_device *dev = d;
 	struct apci3501_private *devpriv = dev->private;
-	unsigned int ui_Timer_AOWatchdog;
-	unsigned long ul_Command1;
+	unsigned int status;
+	unsigned int ctrl;
 
 	/*  Disable Interrupt */
-	ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
-	ul_Command1 = ul_Command1 & 0xFFFFF9FDul;
-	outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
+	ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
+	ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+		  ADDI_TCW_CTRL_IRQ_ENA);
+	outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
 
-	ui_Timer_AOWatchdog = inl(dev->iobase + APCI3501_TIMER_IRQ_REG) & 0x1;
-	if ((!ui_Timer_AOWatchdog)) {
+	status = inl(devpriv->tcw + ADDI_TCW_IRQ_REG);
+	if (!(status & ADDI_TCW_IRQ)) {
 		dev_err(dev->class_dev, "IRQ from unknown source\n");
 		return IRQ_NONE;
 	}
 
 	/* Enable Interrupt Send a signal to from kernel to user space */
 	send_sig(SIGIO, devpriv->tsk_Current, 0);
-	ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
-	ul_Command1 = (ul_Command1 & 0xFFFFF9FDul) | 1 << 1;
-	outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
-	inl(dev->iobase + APCI3501_TIMER_STATUS_REG);
+	ctrl = inl(devpriv->tcw + ADDI_TCW_CTRL_REG);
+	ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
+		  ADDI_TCW_CTRL_IRQ_ENA);
+	ctrl |= ADDI_TCW_CTRL_IRQ_ENA;
+	outl(ctrl, devpriv->tcw + ADDI_TCW_CTRL_REG);
+	inl(devpriv->tcw + ADDI_TCW_STATUS_REG);
 
 	return IRQ_HANDLED;
 }
@@ -334,8 +331,9 @@
 	if (ret)
 		return ret;
 
+	devpriv->amcc = pci_resource_start(pcidev, 0);
 	dev->iobase = pci_resource_start(pcidev, 1);
-	devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0);
+	devpriv->tcw = dev->iobase + APCI3501_TIMER_BASE;
 
 	ao_n_chan = apci3501_eeprom_get_ao_n_chan(dev);
 
diff --git a/drivers/staging/comedi/drivers/addi_tcw.h b/drivers/staging/comedi/drivers/addi_tcw.h
index 8794d4c..db6d5a4 100644
--- a/drivers/staging/comedi/drivers/addi_tcw.h
+++ b/drivers/staging/comedi/drivers/addi_tcw.h
@@ -10,44 +10,51 @@
 #define ADDI_TCW_VAL_REG		0x00
 
 #define ADDI_TCW_SYNC_REG		0x00
-#define ADDI_TCW_SYNC_CTR_TRIG		(1 << 8)
-#define ADDI_TCW_SYNC_CTR_DIS		(1 << 7)
-#define ADDI_TCW_SYNC_CTR_ENA		(1 << 6)
-#define ADDI_TCW_SYNC_TIMER_TRIG	(1 << 5)
-#define ADDI_TCW_SYNC_TIMER_DIS		(1 << 4)
-#define ADDI_TCW_SYNC_TIMER_ENA		(1 << 3)
-#define ADDI_TCW_SYNC_WDOG_TRIG		(1 << 2)
-#define ADDI_TCW_SYNC_WDOG_DIS		(1 << 1)
-#define ADDI_TCW_SYNC_WDOG_ENA		(1 << 0)
+#define ADDI_TCW_SYNC_CTR_TRIG		BIT(8)
+#define ADDI_TCW_SYNC_CTR_DIS		BIT(7)
+#define ADDI_TCW_SYNC_CTR_ENA		BIT(6)
+#define ADDI_TCW_SYNC_TIMER_TRIG	BIT(5)
+#define ADDI_TCW_SYNC_TIMER_DIS		BIT(4)
+#define ADDI_TCW_SYNC_TIMER_ENA		BIT(3)
+#define ADDI_TCW_SYNC_WDOG_TRIG		BIT(2)
+#define ADDI_TCW_SYNC_WDOG_DIS		BIT(1)
+#define ADDI_TCW_SYNC_WDOG_ENA		BIT(0)
 
 #define ADDI_TCW_RELOAD_REG		0x04
 
 #define ADDI_TCW_TIMEBASE_REG		0x08
 
 #define ADDI_TCW_CTRL_REG		0x0c
-#define ADDI_TCW_CTRL_EXT_CLK_STATUS	(1 << 21)
-#define ADDI_TCW_CTRL_CASCADE		(1 << 20)
-#define ADDI_TCW_CTRL_CNTR_ENA		(1 << 19)
-#define ADDI_TCW_CTRL_CNT_UP		(1 << 18)
-#define ADDI_TCW_CTRL_EXT_CLK(x)	((x) << 16)
-#define ADDI_TCW_CTRL_OUT(x)		((x) << 11)
-#define ADDI_TCW_CTRL_GATE		(1 << 10)
-#define ADDI_TCW_CTRL_TRIG		(1 << 9)
-#define ADDI_TCW_CTRL_EXT_GATE(x)	((x) << 7)
-#define ADDI_TCW_CTRL_EXT_TRIG(x)	((x) << 5)
-#define ADDI_TCW_CTRL_TIMER_ENA		(1 << 4)
-#define ADDI_TCW_CTRL_RESET_ENA		(1 << 3)
-#define ADDI_TCW_CTRL_WARN_ENA		(1 << 2)
-#define ADDI_TCW_CTRL_IRQ_ENA		(1 << 1)
-#define ADDI_TCW_CTRL_ENA		(1 << 0)
+#define ADDI_TCW_CTRL_EXT_CLK_STATUS	BIT(21)
+#define ADDI_TCW_CTRL_CASCADE		BIT(20)
+#define ADDI_TCW_CTRL_CNTR_ENA		BIT(19)
+#define ADDI_TCW_CTRL_CNT_UP		BIT(18)
+#define ADDI_TCW_CTRL_EXT_CLK(x)	(((x) & 3) << 16)
+#define ADDI_TCW_CTRL_EXT_CLK_MASK	ADDI_TCW_CTRL_EXT_CLK(3)
+#define ADDI_TCW_CTRL_MODE(x)		(((x) & 7) << 13)
+#define ADDI_TCW_CTRL_MODE_MASK		ADDI_TCW_CTRL_MODE(7)
+#define ADDI_TCW_CTRL_OUT(x)		(((x) & 3) << 11)
+#define ADDI_TCW_CTRL_OUT_MASK		ADDI_TCW_CTRL_OUT(3)
+#define ADDI_TCW_CTRL_GATE		BIT(10)
+#define ADDI_TCW_CTRL_TRIG		BIT(9)
+#define ADDI_TCW_CTRL_EXT_GATE(x)	(((x) & 3) << 7)
+#define ADDI_TCW_CTRL_EXT_GATE_MASK	ADDI_TCW_CTRL_EXT_GATE(3)
+#define ADDI_TCW_CTRL_EXT_TRIG(x)	(((x) & 3) << 5)
+#define ADDI_TCW_CTRL_EXT_TRIG_MASK	ADDI_TCW_CTRL_EXT_TRIG(3)
+#define ADDI_TCW_CTRL_TIMER_ENA		BIT(4)
+#define ADDI_TCW_CTRL_RESET_ENA		BIT(3)
+#define ADDI_TCW_CTRL_WARN_ENA		BIT(2)
+#define ADDI_TCW_CTRL_IRQ_ENA		BIT(1)
+#define ADDI_TCW_CTRL_ENA		BIT(0)
 
 #define ADDI_TCW_STATUS_REG		0x10
-#define ADDI_TCW_STATUS_SOFT_CLR	(1 << 3)
-#define ADDI_TCW_STATUS_SOFT_TRIG	(1 << 1)
-#define ADDI_TCW_STATUS_OVERFLOW	(1 << 0)
+#define ADDI_TCW_STATUS_SOFT_CLR	BIT(3)
+#define ADDI_TCW_STATUS_HARDWARE_TRIG	BIT(2)
+#define ADDI_TCW_STATUS_SOFT_TRIG	BIT(1)
+#define ADDI_TCW_STATUS_OVERFLOW	BIT(0)
 
 #define ADDI_TCW_IRQ_REG		0x14
-#define ADDI_TCW_IRQ			(1 << 0)
+#define ADDI_TCW_IRQ			BIT(0)
 
 #define ADDI_TCW_WARN_TIMEVAL_REG	0x18
 
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
index 934af3f..b0fc027 100644
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
@@ -120,8 +120,20 @@
 {
 	unsigned long reg = (unsigned long)s->private;
 
-	if (comedi_dio_update_state(s, data))
-		outl(s->state, dev->iobase + reg);
+	if (comedi_dio_update_state(s, data)) {
+		unsigned int val = s->state;
+
+		if (s->n_chan == 16) {
+			/*
+			 * It seems the PCI-7230 needs the 16-bit DO state
+			 * to be shifted left by 16 bits before being written
+			 * to the 32-bit register.  Set the value in both
+			 * halves of the register to be sure.
+			 */
+			val |= val << 16;
+		}
+		outl(val, dev->iobase + reg);
+	}
 
 	data[1] = s->state;
 
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index 4ebf5aa..47e3839 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -141,11 +141,13 @@
  * jumper-settable on the board. The settings are not software-readable.
  */
 static const struct comedi_lrange cb_pcimdas_ao_range = {
-	4, {
+	6, {
 		BIP_RANGE(10),
 		BIP_RANGE(5),
 		UNI_RANGE(10),
-		UNI_RANGE(5)
+		UNI_RANGE(5),
+		RANGE_ext(-1, 1),
+		RANGE_ext(0, 1)
 	}
 };
 
diff --git a/drivers/staging/comedi/drivers/dac02.c b/drivers/staging/comedi/drivers/dac02.c
index a6798ad..a562df4 100644
--- a/drivers/staging/comedi/drivers/dac02.c
+++ b/drivers/staging/comedi/drivers/dac02.c
@@ -130,11 +130,7 @@
 	s->range_table	= &das02_ao_ranges;
 	s->insn_write	= dac02_ao_insn_write;
 
-	ret = comedi_alloc_subdev_readback(s);
-	if (ret)
-		return ret;
-
-	return 0;
+	return comedi_alloc_subdev_readback(s);
 }
 
 static struct comedi_driver dac02_driver = {
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 93fab68..9c02b17 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -108,7 +108,7 @@
 };
 module_comedi_pcmcia_driver(driver_das08_cs, das08_cs_driver);
 
-MODULE_AUTHOR("David A. Schleef <ds@schleef.org>, "
-	      "Frank Mori Hess <fmhess@users.sourceforge.net>");
+MODULE_AUTHOR("David A. Schleef <ds@schleef.org>");
+MODULE_AUTHOR("Frank Mori Hess <fmhess@users.sourceforge.net>");
 MODULE_DESCRIPTION("Comedi driver for ComputerBoards DAS-08 PCMCIA boards");
 MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index d7cf4b1..056bca9 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -1032,8 +1032,7 @@
 
 	/*  check that clock setting is valid */
 	if (it->options[3]) {
-		if (it->options[3] != 0 &&
-		    it->options[3] != 1 && it->options[3] != 10) {
+		if (it->options[3] != 1 && it->options[3] != 10) {
 			dev_err(dev->class_dev,
 				"Invalid option. Master clock must be set to 1 or 10 (MHz)\n");
 			return -EINVAL;
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index a18a887..3a37373 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -69,18 +69,18 @@
 
     "cio-das16/m1"
 
-  0	a/d bits 0-3, mux		start 12 bit
-  1	a/d bits 4-11		unused
-  2	status		control
-  3	di 4 bit		do 4 bit
-  4	unused			clear interrupt
-  5	interrupt, pacer
-  6	channel/gain queue address
-  7	channel/gain queue data
-  89ab	8254
-  cdef	8254
-  400	8255
-  404-407 	8254
+  0		a/d bits 0-3, mux		start 12 bit
+  1		a/d bits 4-11		unused
+  2		status		control
+  3		di 4 bit		do 4 bit
+  4		unused			clear interrupt
+  5		interrupt, pacer
+  6		channel/gain queue address
+  7		channel/gain queue data
+  89ab		8254
+  cdef		8254
+  400		8255
+  404-407	8254
 
 */
 
@@ -411,15 +411,18 @@
 	hw_counter = comedi_8254_read(devpriv->counter, 1);
 	/* make sure hardware counter reading is not bogus due to initial value
 	 * not having been loaded yet */
-	if (devpriv->adc_count == 0 && hw_counter == devpriv->initial_hw_count) {
+	if (devpriv->adc_count == 0 &&
+	    hw_counter == devpriv->initial_hw_count) {
 		num_samples = 0;
 	} else {
-		/* The calculation of num_samples looks odd, but it uses the following facts.
-		 * 16 bit hardware counter is initialized with value of zero (which really
-		 * means 0x1000).  The counter decrements by one on each conversion
-		 * (when the counter decrements from zero it goes to 0xffff).  num_samples
-		 * is a 16 bit variable, so it will roll over in a similar fashion to the
-		 * hardware counter.  Work it out, and this is what you get. */
+		/* The calculation of num_samples looks odd, but it uses the
+		 * following facts. 16 bit hardware counter is initialized with
+		 * value of zero (which really means 0x1000).  The counter
+		 * decrements by one on each conversion (when the counter
+		 * decrements from zero it goes to 0xffff).  num_samples is a
+		 * 16 bit variable, so it will roll over in a similar fashion
+		 * to the hardware counter.  Work it out, and this is what you
+		 * get. */
 		num_samples = -hw_counter - devpriv->adc_count;
 	}
 	/*  check if we only need some of the points */
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index bb2883c..958c0d4 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -607,11 +607,7 @@
 
 	/* Digital I/O subdevice */
 	s = &dev->subdevices[2];
-	ret = subdev_8255_init(dev, s, dmm32at_8255_io, DMM32AT_8255_IOBASE);
-	if (ret)
-		return ret;
-
-	return 0;
+	return subdev_8255_init(dev, s, dmm32at_8255_io, DMM32AT_8255_IOBASE);
 }
 
 static struct comedi_driver dmm32at_driver = {
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
index e1f4932..55cae61 100644
--- a/drivers/staging/comedi/drivers/fl512.c
+++ b/drivers/staging/comedi/drivers/fl512.c
@@ -136,11 +136,7 @@
 	s->range_table	= &range_fl512;
 	s->insn_write	= fl512_ao_insn_write;
 
-	ret = comedi_alloc_subdev_readback(s);
-	if (ret)
-		return ret;
-
-	return 0;
+	return comedi_alloc_subdev_readback(s);
 }
 
 static struct comedi_driver fl512_driver = {
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index a8f3ca4..15a5320 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -1,43 +1,41 @@
 /*
-   comedi/drivers/me4000.c
-   Source code for the Meilhaus ME-4000 board family.
-
-   COMEDI - Linux Control and Measurement Device Interface
-   Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
-   This program is free software; you can redistribute it and/or modify
-   it under the terms of the GNU General Public License as published by
-   the Free Software Foundation; either version 2 of the License, or
-   (at your option) any later version.
-
-   This program is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-   GNU General Public License for more details.
+ * me4000.c
+ * Source code for the Meilhaus ME-4000 board family.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
  */
+
 /*
-Driver: me4000
-Description: Meilhaus ME-4000 series boards
-Devices: [Meilhaus] ME-4650 (me4000), ME-4670i, ME-4680, ME-4680i, ME-4680is
-Author: gg (Guenter Gebhardt <g.gebhardt@meilhaus.com>)
-Updated: Mon, 18 Mar 2002 15:34:01 -0800
-Status: broken (no support for loading firmware)
-
-Supports:
-
-    - Analog Input
-    - Analog Output
-    - Digital I/O
-    - Counter
-
-Configuration Options: not applicable, uses PCI auto config
-
-The firmware required by these boards is available in the
-comedi_nonfree_firmware tarball available from
-http://www.comedi.org.  However, the driver's support for
-loading the firmware through comedi_config is currently
-broken.
-
+ * Driver: me4000
+ * Description: Meilhaus ME-4000 series boards
+ * Devices: [Meilhaus] ME-4650 (me4000), ME-4670i, ME-4680, ME-4680i,
+ *	    ME-4680is
+ * Author: gg (Guenter Gebhardt <g.gebhardt@meilhaus.com>)
+ * Updated: Mon, 18 Mar 2002 15:34:01 -0800
+ * Status: untested
+ *
+ * Supports:
+ *	- Analog Input
+ *	- Analog Output
+ *	- Digital I/O
+ *	- Counter
+ *
+ * Configuration Options: not applicable, uses PCI auto config
+ *
+ * The firmware required by these boards is available in the
+ * comedi_nonfree_firmware tarball available from
+ * http://www.comedi.org.
  */
 
 #include <linux/module.h>
@@ -57,66 +55,61 @@
 #define ME4000_AO_CHAN(x)			((x) * 0x18)
 
 #define ME4000_AO_CTRL_REG(x)			(0x00 + ME4000_AO_CHAN(x))
-#define ME4000_AO_CTRL_BIT_MODE_0		(1 << 0)
-#define ME4000_AO_CTRL_BIT_MODE_1		(1 << 1)
-#define ME4000_AO_CTRL_MASK_MODE		(3 << 0)
-#define ME4000_AO_CTRL_BIT_STOP			(1 << 2)
-#define ME4000_AO_CTRL_BIT_ENABLE_FIFO		(1 << 3)
-#define ME4000_AO_CTRL_BIT_ENABLE_EX_TRIG	(1 << 4)
-#define ME4000_AO_CTRL_BIT_EX_TRIG_EDGE		(1 << 5)
-#define ME4000_AO_CTRL_BIT_IMMEDIATE_STOP	(1 << 7)
-#define ME4000_AO_CTRL_BIT_ENABLE_DO		(1 << 8)
-#define ME4000_AO_CTRL_BIT_ENABLE_IRQ		(1 << 9)
-#define ME4000_AO_CTRL_BIT_RESET_IRQ		(1 << 10)
+#define ME4000_AO_CTRL_MODE_0			BIT(0)
+#define ME4000_AO_CTRL_MODE_1			BIT(1)
+#define ME4000_AO_CTRL_STOP			BIT(2)
+#define ME4000_AO_CTRL_ENABLE_FIFO		BIT(3)
+#define ME4000_AO_CTRL_ENABLE_EX_TRIG		BIT(4)
+#define ME4000_AO_CTRL_EX_TRIG_EDGE		BIT(5)
+#define ME4000_AO_CTRL_IMMEDIATE_STOP		BIT(7)
+#define ME4000_AO_CTRL_ENABLE_DO		BIT(8)
+#define ME4000_AO_CTRL_ENABLE_IRQ		BIT(9)
+#define ME4000_AO_CTRL_RESET_IRQ		BIT(10)
 #define ME4000_AO_STATUS_REG(x)			(0x04 + ME4000_AO_CHAN(x))
-#define ME4000_AO_STATUS_BIT_FSM		(1 << 0)
-#define ME4000_AO_STATUS_BIT_FF			(1 << 1)
-#define ME4000_AO_STATUS_BIT_HF			(1 << 2)
-#define ME4000_AO_STATUS_BIT_EF			(1 << 3)
+#define ME4000_AO_STATUS_FSM			BIT(0)
+#define ME4000_AO_STATUS_FF			BIT(1)
+#define ME4000_AO_STATUS_HF			BIT(2)
+#define ME4000_AO_STATUS_EF			BIT(3)
 #define ME4000_AO_FIFO_REG(x)			(0x08 + ME4000_AO_CHAN(x))
 #define ME4000_AO_SINGLE_REG(x)			(0x0c + ME4000_AO_CHAN(x))
 #define ME4000_AO_TIMER_REG(x)			(0x10 + ME4000_AO_CHAN(x))
 #define ME4000_AI_CTRL_REG			0x74
 #define ME4000_AI_STATUS_REG			0x74
-#define ME4000_AI_CTRL_BIT_MODE_0		(1 << 0)
-#define ME4000_AI_CTRL_BIT_MODE_1		(1 << 1)
-#define ME4000_AI_CTRL_BIT_MODE_2		(1 << 2)
-#define ME4000_AI_CTRL_BIT_SAMPLE_HOLD		(1 << 3)
-#define ME4000_AI_CTRL_BIT_IMMEDIATE_STOP	(1 << 4)
-#define ME4000_AI_CTRL_BIT_STOP			(1 << 5)
-#define ME4000_AI_CTRL_BIT_CHANNEL_FIFO		(1 << 6)
-#define ME4000_AI_CTRL_BIT_DATA_FIFO		(1 << 7)
-#define ME4000_AI_CTRL_BIT_FULLSCALE		(1 << 8)
-#define ME4000_AI_CTRL_BIT_OFFSET		(1 << 9)
-#define ME4000_AI_CTRL_BIT_EX_TRIG_ANALOG	(1 << 10)
-#define ME4000_AI_CTRL_BIT_EX_TRIG		(1 << 11)
-#define ME4000_AI_CTRL_BIT_EX_TRIG_FALLING	(1 << 12)
-#define ME4000_AI_CTRL_BIT_EX_IRQ		(1 << 13)
-#define ME4000_AI_CTRL_BIT_EX_IRQ_RESET		(1 << 14)
-#define ME4000_AI_CTRL_BIT_LE_IRQ		(1 << 15)
-#define ME4000_AI_CTRL_BIT_LE_IRQ_RESET		(1 << 16)
-#define ME4000_AI_CTRL_BIT_HF_IRQ		(1 << 17)
-#define ME4000_AI_CTRL_BIT_HF_IRQ_RESET		(1 << 18)
-#define ME4000_AI_CTRL_BIT_SC_IRQ		(1 << 19)
-#define ME4000_AI_CTRL_BIT_SC_IRQ_RESET		(1 << 20)
-#define ME4000_AI_CTRL_BIT_SC_RELOAD		(1 << 21)
-#define ME4000_AI_STATUS_BIT_EF_CHANNEL		(1 << 22)
-#define ME4000_AI_STATUS_BIT_HF_CHANNEL		(1 << 23)
-#define ME4000_AI_STATUS_BIT_FF_CHANNEL		(1 << 24)
-#define ME4000_AI_STATUS_BIT_EF_DATA		(1 << 25)
-#define ME4000_AI_STATUS_BIT_HF_DATA		(1 << 26)
-#define ME4000_AI_STATUS_BIT_FF_DATA		(1 << 27)
-#define ME4000_AI_STATUS_BIT_LE			(1 << 28)
-#define ME4000_AI_STATUS_BIT_FSM		(1 << 29)
-#define ME4000_AI_CTRL_BIT_EX_TRIG_BOTH		(1 << 31)
+#define ME4000_AI_CTRL_MODE_0			BIT(0)
+#define ME4000_AI_CTRL_MODE_1			BIT(1)
+#define ME4000_AI_CTRL_MODE_2			BIT(2)
+#define ME4000_AI_CTRL_SAMPLE_HOLD		BIT(3)
+#define ME4000_AI_CTRL_IMMEDIATE_STOP		BIT(4)
+#define ME4000_AI_CTRL_STOP			BIT(5)
+#define ME4000_AI_CTRL_CHANNEL_FIFO		BIT(6)
+#define ME4000_AI_CTRL_DATA_FIFO		BIT(7)
+#define ME4000_AI_CTRL_FULLSCALE		BIT(8)
+#define ME4000_AI_CTRL_OFFSET			BIT(9)
+#define ME4000_AI_CTRL_EX_TRIG_ANALOG		BIT(10)
+#define ME4000_AI_CTRL_EX_TRIG			BIT(11)
+#define ME4000_AI_CTRL_EX_TRIG_FALLING		BIT(12)
+#define ME4000_AI_CTRL_EX_IRQ			BIT(13)
+#define ME4000_AI_CTRL_EX_IRQ_RESET		BIT(14)
+#define ME4000_AI_CTRL_LE_IRQ			BIT(15)
+#define ME4000_AI_CTRL_LE_IRQ_RESET		BIT(16)
+#define ME4000_AI_CTRL_HF_IRQ			BIT(17)
+#define ME4000_AI_CTRL_HF_IRQ_RESET		BIT(18)
+#define ME4000_AI_CTRL_SC_IRQ			BIT(19)
+#define ME4000_AI_CTRL_SC_IRQ_RESET		BIT(20)
+#define ME4000_AI_CTRL_SC_RELOAD		BIT(21)
+#define ME4000_AI_STATUS_EF_CHANNEL		BIT(22)
+#define ME4000_AI_STATUS_HF_CHANNEL		BIT(23)
+#define ME4000_AI_STATUS_FF_CHANNEL		BIT(24)
+#define ME4000_AI_STATUS_EF_DATA		BIT(25)
+#define ME4000_AI_STATUS_HF_DATA		BIT(26)
+#define ME4000_AI_STATUS_FF_DATA		BIT(27)
+#define ME4000_AI_STATUS_LE			BIT(28)
+#define ME4000_AI_STATUS_FSM			BIT(29)
+#define ME4000_AI_CTRL_EX_TRIG_BOTH		BIT(31)
 #define ME4000_AI_CHANNEL_LIST_REG		0x78
-#define ME4000_AI_LIST_INPUT_SINGLE_ENDED	(0 << 5)
-#define ME4000_AI_LIST_INPUT_DIFFERENTIAL	(1 << 5)
-#define ME4000_AI_LIST_RANGE_BIPOLAR_10		(0 << 6)
-#define ME4000_AI_LIST_RANGE_BIPOLAR_2_5	(1 << 6)
-#define ME4000_AI_LIST_RANGE_UNIPOLAR_10	(2 << 6)
-#define ME4000_AI_LIST_RANGE_UNIPOLAR_2_5	(3 << 6)
-#define ME4000_AI_LIST_LAST_ENTRY		(1 << 8)
+#define ME4000_AI_LIST_INPUT_DIFFERENTIAL	BIT(5)
+#define ME4000_AI_LIST_RANGE(x)			((3 - ((x) & 3)) << 6)
+#define ME4000_AI_LIST_LAST_ENTRY		BIT(8)
 #define ME4000_AI_DATA_REG			0x7c
 #define ME4000_AI_CHAN_TIMER_REG		0x80
 #define ME4000_AI_CHAN_PRE_TIMER_REG		0x84
@@ -126,14 +119,14 @@
 #define ME4000_AI_SCAN_PRE_TIMER_HIGH_REG	0x94
 #define ME4000_AI_START_REG			0x98
 #define ME4000_IRQ_STATUS_REG			0x9c
-#define ME4000_IRQ_STATUS_BIT_EX		(1 << 0)
-#define ME4000_IRQ_STATUS_BIT_LE		(1 << 1)
-#define ME4000_IRQ_STATUS_BIT_AI_HF		(1 << 2)
-#define ME4000_IRQ_STATUS_BIT_AO_0_HF		(1 << 3)
-#define ME4000_IRQ_STATUS_BIT_AO_1_HF		(1 << 4)
-#define ME4000_IRQ_STATUS_BIT_AO_2_HF		(1 << 5)
-#define ME4000_IRQ_STATUS_BIT_AO_3_HF		(1 << 6)
-#define ME4000_IRQ_STATUS_BIT_SC		(1 << 7)
+#define ME4000_IRQ_STATUS_EX			BIT(0)
+#define ME4000_IRQ_STATUS_LE			BIT(1)
+#define ME4000_IRQ_STATUS_AI_HF			BIT(2)
+#define ME4000_IRQ_STATUS_AO_0_HF		BIT(3)
+#define ME4000_IRQ_STATUS_AO_1_HF		BIT(4)
+#define ME4000_IRQ_STATUS_AO_2_HF		BIT(5)
+#define ME4000_IRQ_STATUS_AO_3_HF		BIT(6)
+#define ME4000_IRQ_STATUS_SC			BIT(7)
 #define ME4000_DIO_PORT_0_REG			0xa0
 #define ME4000_DIO_PORT_1_REG			0xa4
 #define ME4000_DIO_PORT_2_REG			0xa8
@@ -141,20 +134,20 @@
 #define ME4000_DIO_DIR_REG			0xb0
 #define ME4000_AO_LOADSETREG_XX			0xb4
 #define ME4000_DIO_CTRL_REG			0xb8
-#define ME4000_DIO_CTRL_BIT_MODE_0		(1 << 0)
-#define ME4000_DIO_CTRL_BIT_MODE_1		(1 << 1)
-#define ME4000_DIO_CTRL_BIT_MODE_2		(1 << 2)
-#define ME4000_DIO_CTRL_BIT_MODE_3		(1 << 3)
-#define ME4000_DIO_CTRL_BIT_MODE_4		(1 << 4)
-#define ME4000_DIO_CTRL_BIT_MODE_5		(1 << 5)
-#define ME4000_DIO_CTRL_BIT_MODE_6		(1 << 6)
-#define ME4000_DIO_CTRL_BIT_MODE_7		(1 << 7)
-#define ME4000_DIO_CTRL_BIT_FUNCTION_0		(1 << 8)
-#define ME4000_DIO_CTRL_BIT_FUNCTION_1		(1 << 9)
-#define ME4000_DIO_CTRL_BIT_FIFO_HIGH_0		(1 << 10)
-#define ME4000_DIO_CTRL_BIT_FIFO_HIGH_1		(1 << 11)
-#define ME4000_DIO_CTRL_BIT_FIFO_HIGH_2		(1 << 12)
-#define ME4000_DIO_CTRL_BIT_FIFO_HIGH_3		(1 << 13)
+#define ME4000_DIO_CTRL_MODE_0			BIT(0)
+#define ME4000_DIO_CTRL_MODE_1			BIT(1)
+#define ME4000_DIO_CTRL_MODE_2			BIT(2)
+#define ME4000_DIO_CTRL_MODE_3			BIT(3)
+#define ME4000_DIO_CTRL_MODE_4			BIT(4)
+#define ME4000_DIO_CTRL_MODE_5			BIT(5)
+#define ME4000_DIO_CTRL_MODE_6			BIT(6)
+#define ME4000_DIO_CTRL_MODE_7			BIT(7)
+#define ME4000_DIO_CTRL_FUNCTION_0		BIT(8)
+#define ME4000_DIO_CTRL_FUNCTION_1		BIT(9)
+#define ME4000_DIO_CTRL_FIFO_HIGH_0		BIT(10)
+#define ME4000_DIO_CTRL_FIFO_HIGH_1		BIT(11)
+#define ME4000_DIO_CTRL_FIFO_HIGH_2		BIT(12)
+#define ME4000_DIO_CTRL_FIFO_HIGH_3		BIT(13)
 #define ME4000_AO_DEMUX_ADJUST_REG		0xbc
 #define ME4000_AO_DEMUX_ADJUST_VALUE		0x4c
 #define ME4000_AI_SAMPLE_COUNTER_REG		0xc0
@@ -166,8 +159,12 @@
 
 #define ME4000_AI_CHANNEL_LIST_COUNT		1024
 
-struct me4000_info {
+struct me4000_private {
 	unsigned long plx_regbase;
+	unsigned int ai_ctrl_mode;
+	unsigned int ai_init_ticks;
+	unsigned int ai_scan_ticks;
+	unsigned int ai_chan_ticks;
 };
 
 enum me4000_boardid {
@@ -188,134 +185,126 @@
 
 struct me4000_board {
 	const char *name;
-	int ao_nchan;
-	int ao_fifo;
 	int ai_nchan;
-	int ai_diff_nchan;
-	int ai_sh_nchan;
-	int ex_trig_analog;
-	int dio_nchan;
-	int has_counter;
+	unsigned int can_do_diff_ai:1;
+	unsigned int can_do_sh_ai:1;	/* sample & hold (8 channels) */
+	unsigned int ex_trig_analog:1;
+	unsigned int has_ao:1;
+	unsigned int has_ao_fifo:1;
+	unsigned int has_counter:1;
 };
 
 static const struct me4000_board me4000_boards[] = {
 	[BOARD_ME4650] = {
 		.name		= "ME-4650",
 		.ai_nchan	= 16,
-		.dio_nchan	= 32,
 	},
 	[BOARD_ME4660] = {
 		.name		= "ME-4660",
 		.ai_nchan	= 32,
-		.ai_diff_nchan	= 16,
-		.dio_nchan	= 32,
+		.can_do_diff_ai	= 1,
 		.has_counter	= 1,
 	},
 	[BOARD_ME4660I] = {
 		.name		= "ME-4660i",
 		.ai_nchan	= 32,
-		.ai_diff_nchan	= 16,
-		.dio_nchan	= 32,
+		.can_do_diff_ai	= 1,
 		.has_counter	= 1,
 	},
 	[BOARD_ME4660S] = {
 		.name		= "ME-4660s",
 		.ai_nchan	= 32,
-		.ai_diff_nchan	= 16,
-		.ai_sh_nchan	= 8,
-		.dio_nchan	= 32,
+		.can_do_diff_ai	= 1,
+		.can_do_sh_ai	= 1,
 		.has_counter	= 1,
 	},
 	[BOARD_ME4660IS] = {
 		.name		= "ME-4660is",
 		.ai_nchan	= 32,
-		.ai_diff_nchan	= 16,
-		.ai_sh_nchan	= 8,
-		.dio_nchan	= 32,
+		.can_do_diff_ai	= 1,
+		.can_do_sh_ai	= 1,
 		.has_counter	= 1,
 	},
 	[BOARD_ME4670] = {
 		.name		= "ME-4670",
-		.ao_nchan	= 4,
 		.ai_nchan	= 32,
-		.ai_diff_nchan	= 16,
+		.can_do_diff_ai	= 1,
 		.ex_trig_analog	= 1,
-		.dio_nchan	= 32,
+		.has_ao		= 1,
 		.has_counter	= 1,
 	},
 	[BOARD_ME4670I] = {
 		.name		= "ME-4670i",
-		.ao_nchan	= 4,
 		.ai_nchan	= 32,
-		.ai_diff_nchan	= 16,
+		.can_do_diff_ai	= 1,
 		.ex_trig_analog	= 1,
-		.dio_nchan	= 32,
+		.has_ao		= 1,
 		.has_counter	= 1,
 	},
 	[BOARD_ME4670S] = {
 		.name		= "ME-4670s",
-		.ao_nchan	= 4,
 		.ai_nchan	= 32,
-		.ai_diff_nchan	= 16,
-		.ai_sh_nchan	= 8,
+		.can_do_diff_ai	= 1,
+		.can_do_sh_ai	= 1,
 		.ex_trig_analog	= 1,
-		.dio_nchan	= 32,
+		.has_ao		= 1,
 		.has_counter	= 1,
 	},
 	[BOARD_ME4670IS] = {
 		.name		= "ME-4670is",
-		.ao_nchan	= 4,
 		.ai_nchan	= 32,
-		.ai_diff_nchan	= 16,
-		.ai_sh_nchan	= 8,
+		.can_do_diff_ai	= 1,
+		.can_do_sh_ai	= 1,
 		.ex_trig_analog	= 1,
-		.dio_nchan	= 32,
+		.has_ao		= 1,
 		.has_counter	= 1,
 	},
 	[BOARD_ME4680] = {
 		.name		= "ME-4680",
-		.ao_nchan	= 4,
-		.ao_fifo	= 4,
 		.ai_nchan	= 32,
-		.ai_diff_nchan	= 16,
+		.can_do_diff_ai	= 1,
 		.ex_trig_analog	= 1,
-		.dio_nchan	= 32,
+		.has_ao		= 1,
+		.has_ao_fifo	= 1,
 		.has_counter	= 1,
 	},
 	[BOARD_ME4680I] = {
 		.name		= "ME-4680i",
-		.ao_nchan	= 4,
-		.ao_fifo	= 4,
 		.ai_nchan	= 32,
-		.ai_diff_nchan	= 16,
+		.can_do_diff_ai	= 1,
 		.ex_trig_analog	= 1,
-		.dio_nchan	= 32,
+		.has_ao		= 1,
+		.has_ao_fifo	= 1,
 		.has_counter	= 1,
 	},
 	[BOARD_ME4680S] = {
 		.name		= "ME-4680s",
-		.ao_nchan	= 4,
-		.ao_fifo	= 4,
 		.ai_nchan	= 32,
-		.ai_diff_nchan	= 16,
-		.ai_sh_nchan	= 8,
+		.can_do_diff_ai	= 1,
+		.can_do_sh_ai	= 1,
 		.ex_trig_analog	= 1,
-		.dio_nchan	= 32,
+		.has_ao		= 1,
+		.has_ao_fifo	= 1,
 		.has_counter	= 1,
 	},
 	[BOARD_ME4680IS] = {
 		.name		= "ME-4680is",
-		.ao_nchan	= 4,
-		.ao_fifo	= 4,
 		.ai_nchan	= 32,
-		.ai_diff_nchan	= 16,
-		.ai_sh_nchan	= 8,
+		.can_do_diff_ai	= 1,
+		.can_do_sh_ai	= 1,
 		.ex_trig_analog	= 1,
-		.dio_nchan	= 32,
+		.has_ao		= 1,
+		.has_ao_fifo	= 1,
 		.has_counter	= 1,
 	},
 };
 
+/*
+ * NOTE: the ranges here are inverted compared to the values
+ * written to the ME4000_AI_CHANNEL_LIST_REG,
+ *
+ * The ME4000_AI_LIST_RANGE() macro handles the inversion.
+ */
 static const struct comedi_lrange me4000_ai_range = {
 	4, {
 		UNI_RANGE(2.5),
@@ -330,7 +319,7 @@
 				  unsigned long context)
 {
 	struct pci_dev *pcidev = comedi_to_pci_dev(dev);
-	struct me4000_info *info = dev->private;
+	struct me4000_private *devpriv = dev->private;
 	unsigned long xilinx_iobase = pci_resource_start(pcidev, 5);
 	unsigned int file_length;
 	unsigned int val;
@@ -343,42 +332,42 @@
 	 * Set PLX local interrupt 2 polarity to high.
 	 * Interrupt is thrown by init pin of xilinx.
 	 */
-	outl(PLX9052_INTCSR_LI2POL, info->plx_regbase + PLX9052_INTCSR);
+	outl(PLX9052_INTCSR_LI2POL, devpriv->plx_regbase + PLX9052_INTCSR);
 
 	/* Set /CS and /WRITE of the Xilinx */
-	val = inl(info->plx_regbase + PLX9052_CNTRL);
+	val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
 	val |= PLX9052_CNTRL_UIO2_DATA;
-	outl(val, info->plx_regbase + PLX9052_CNTRL);
+	outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
 
 	/* Init Xilinx with CS1 */
 	inb(xilinx_iobase + 0xC8);
 
 	/* Wait until /INIT pin is set */
-	udelay(20);
-	val = inl(info->plx_regbase + PLX9052_INTCSR);
+	usleep_range(20, 1000);
+	val = inl(devpriv->plx_regbase + PLX9052_INTCSR);
 	if (!(val & PLX9052_INTCSR_LI2STAT)) {
 		dev_err(dev->class_dev, "Can't init Xilinx\n");
 		return -EIO;
 	}
 
 	/* Reset /CS and /WRITE of the Xilinx */
-	val = inl(info->plx_regbase + PLX9052_CNTRL);
+	val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
 	val &= ~PLX9052_CNTRL_UIO2_DATA;
-	outl(val, info->plx_regbase + PLX9052_CNTRL);
+	outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
 
 	/* Download Xilinx firmware */
 	file_length = (((unsigned int)data[0] & 0xff) << 24) +
 		      (((unsigned int)data[1] & 0xff) << 16) +
 		      (((unsigned int)data[2] & 0xff) << 8) +
 		      ((unsigned int)data[3] & 0xff);
-	udelay(10);
+	usleep_range(10, 1000);
 
 	for (i = 0; i < file_length; i++) {
 		outb(data[16 + i], xilinx_iobase);
-		udelay(10);
+		usleep_range(10, 1000);
 
 		/* Check if BUSY flag is low */
-		val = inl(info->plx_regbase + PLX9052_CNTRL);
+		val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
 		if (val & PLX9052_CNTRL_UIO1_DATA) {
 			dev_err(dev->class_dev,
 				"Xilinx is still busy (i = %d)\n", i);
@@ -387,7 +376,7 @@
 	}
 
 	/* If done flag is high download was successful */
-	val = inl(info->plx_regbase + PLX9052_CNTRL);
+	val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
 	if (!(val & PLX9052_CNTRL_UIO0_DATA)) {
 		dev_err(dev->class_dev, "DONE flag is not set\n");
 		dev_err(dev->class_dev, "Download not successful\n");
@@ -395,44 +384,53 @@
 	}
 
 	/* Set /CS and /WRITE */
-	val = inl(info->plx_regbase + PLX9052_CNTRL);
+	val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
 	val |= PLX9052_CNTRL_UIO2_DATA;
-	outl(val, info->plx_regbase + PLX9052_CNTRL);
+	outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
 
 	return 0;
 }
 
+static void me4000_ai_reset(struct comedi_device *dev)
+{
+	unsigned int ctrl;
+
+	/* Stop any running conversion */
+	ctrl = inl(dev->iobase + ME4000_AI_CTRL_REG);
+	ctrl |= ME4000_AI_CTRL_STOP | ME4000_AI_CTRL_IMMEDIATE_STOP;
+	outl(ctrl, dev->iobase + ME4000_AI_CTRL_REG);
+
+	/* Clear the control register */
+	outl(0x0, dev->iobase + ME4000_AI_CTRL_REG);
+}
+
 static void me4000_reset(struct comedi_device *dev)
 {
-	struct me4000_info *info = dev->private;
+	struct me4000_private *devpriv = dev->private;
 	unsigned int val;
 	int chan;
 
-	/* Make a hardware reset */
-	val = inl(info->plx_regbase + PLX9052_CNTRL);
+	/* Disable interrupts on the PLX */
+	outl(0, devpriv->plx_regbase + PLX9052_INTCSR);
+
+	/* Software reset the PLX */
+	val = inl(devpriv->plx_regbase + PLX9052_CNTRL);
 	val |= PLX9052_CNTRL_PCI_RESET;
-	outl(val, info->plx_regbase + PLX9052_CNTRL);
+	outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
 	val &= ~PLX9052_CNTRL_PCI_RESET;
-	outl(val, info->plx_regbase + PLX9052_CNTRL);
+	outl(val, devpriv->plx_regbase + PLX9052_CNTRL);
 
 	/* 0x8000 to the DACs means an output voltage of 0V */
 	for (chan = 0; chan < 4; chan++)
 		outl(0x8000, dev->iobase + ME4000_AO_SINGLE_REG(chan));
 
-	/* Set both stop bits in the analog input control register */
-	outl(ME4000_AI_CTRL_BIT_IMMEDIATE_STOP | ME4000_AI_CTRL_BIT_STOP,
-	     dev->iobase + ME4000_AI_CTRL_REG);
+	me4000_ai_reset(dev);
 
 	/* Set both stop bits in the analog output control register */
-	val = ME4000_AO_CTRL_BIT_IMMEDIATE_STOP | ME4000_AO_CTRL_BIT_STOP;
+	val = ME4000_AO_CTRL_IMMEDIATE_STOP | ME4000_AO_CTRL_STOP;
 	for (chan = 0; chan < 4; chan++)
 		outl(val, dev->iobase + ME4000_AO_CTRL_REG(chan));
 
-	/* Enable interrupts on the PLX */
-	outl(PLX9052_INTCSR_LI1ENAB |
-	     PLX9052_INTCSR_LI1POL |
-	     PLX9052_INTCSR_PCIENAB, info->plx_regbase + PLX9052_INTCSR);
-
 	/* Set the adustment register for AO demux */
 	outl(ME4000_AO_DEMUX_ADJUST_VALUE,
 	     dev->iobase + ME4000_AO_DEMUX_ADJUST_REG);
@@ -445,96 +443,68 @@
 		outl(0x1, dev->iobase + ME4000_DIO_CTRL_REG);
 }
 
-/*=============================================================================
-  Analog input section
-  ===========================================================================*/
+static unsigned int me4000_ai_get_sample(struct comedi_device *dev,
+					 struct comedi_subdevice *s)
+{
+	unsigned int val;
+
+	/* read two's complement value and munge to offset binary */
+	val = inl(dev->iobase + ME4000_AI_DATA_REG);
+	return comedi_offset_munge(s, val);
+}
+
+static int me4000_ai_eoc(struct comedi_device *dev,
+			 struct comedi_subdevice *s,
+			 struct comedi_insn *insn,
+			 unsigned long context)
+{
+	unsigned int status;
+
+	status = inl(dev->iobase + ME4000_AI_STATUS_REG);
+	if (status & ME4000_AI_STATUS_EF_DATA)
+		return 0;
+	return -EBUSY;
+}
 
 static int me4000_ai_insn_read(struct comedi_device *dev,
-			       struct comedi_subdevice *subdevice,
-			       struct comedi_insn *insn, unsigned int *data)
+			       struct comedi_subdevice *s,
+			       struct comedi_insn *insn,
+			       unsigned int *data)
 {
-	const struct me4000_board *board = dev->board_ptr;
-	int chan = CR_CHAN(insn->chanspec);
-	int rang = CR_RANGE(insn->chanspec);
-	int aref = CR_AREF(insn->chanspec);
+	unsigned int chan = CR_CHAN(insn->chanspec);
+	unsigned int range = CR_RANGE(insn->chanspec);
+	unsigned int aref = CR_AREF(insn->chanspec);
+	unsigned int entry;
+	int ret = 0;
+	int i;
 
-	unsigned int entry = 0;
-	unsigned int tmp;
-	unsigned int lval;
-
-	if (insn->n == 0) {
-		return 0;
-	} else if (insn->n > 1) {
-		dev_err(dev->class_dev, "Invalid instruction length %d\n",
-			insn->n);
-		return -EINVAL;
-	}
-
-	switch (rang) {
-	case 0:
-		entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_2_5;
-		break;
-	case 1:
-		entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_10;
-		break;
-	case 2:
-		entry |= ME4000_AI_LIST_RANGE_BIPOLAR_2_5;
-		break;
-	case 3:
-		entry |= ME4000_AI_LIST_RANGE_BIPOLAR_10;
-		break;
-	default:
-		dev_err(dev->class_dev, "Invalid range specified\n");
-		return -EINVAL;
-	}
-
-	switch (aref) {
-	case AREF_GROUND:
-	case AREF_COMMON:
-		if (chan >= board->ai_nchan) {
+	entry = chan | ME4000_AI_LIST_RANGE(range);
+	if (aref == AREF_DIFF) {
+		if (!(s->subdev_flags & SDF_DIFF)) {
 			dev_err(dev->class_dev,
-				"Analog input is not available\n");
+				"Differential inputs are not available\n");
 			return -EINVAL;
 		}
-		entry |= ME4000_AI_LIST_INPUT_SINGLE_ENDED | chan;
-		break;
 
-	case AREF_DIFF:
-		if (rang == 0 || rang == 1) {
+		if (!comedi_range_is_bipolar(s, range)) {
 			dev_err(dev->class_dev,
 				"Range must be bipolar when aref = diff\n");
 			return -EINVAL;
 		}
 
-		if (chan >= board->ai_diff_nchan) {
+		if (chan >= (s->n_chan / 2)) {
 			dev_err(dev->class_dev,
 				"Analog input is not available\n");
 			return -EINVAL;
 		}
-		entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL | chan;
-		break;
-	default:
-		dev_err(dev->class_dev, "Invalid aref specified\n");
-		return -EINVAL;
+		entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL;
 	}
 
 	entry |= ME4000_AI_LIST_LAST_ENTRY;
 
-	/* Clear channel list, data fifo and both stop bits */
-	tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
-	tmp &= ~(ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
-		 ME4000_AI_CTRL_BIT_DATA_FIFO |
-		 ME4000_AI_CTRL_BIT_STOP | ME4000_AI_CTRL_BIT_IMMEDIATE_STOP);
-	outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
-	/* Set the acquisition mode to single */
-	tmp &= ~(ME4000_AI_CTRL_BIT_MODE_0 | ME4000_AI_CTRL_BIT_MODE_1 |
-		 ME4000_AI_CTRL_BIT_MODE_2);
-	outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
-	/* Enable channel list and data fifo */
-	tmp |= ME4000_AI_CTRL_BIT_CHANNEL_FIFO | ME4000_AI_CTRL_BIT_DATA_FIFO;
-	outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
+	/* Enable channel list and data fifo for single acquisition mode */
+	outl(ME4000_AI_CTRL_CHANNEL_FIFO | ME4000_AI_CTRL_DATA_FIFO,
+	     dev->iobase + ME4000_AI_CTRL_REG);
 
 	/* Generate channel list entry */
 	outl(entry, dev->iobase + ME4000_AI_CHANNEL_LIST_REG);
@@ -543,36 +513,29 @@
 	outl(ME4000_AI_MIN_TICKS, dev->iobase + ME4000_AI_CHAN_TIMER_REG);
 	outl(ME4000_AI_MIN_TICKS, dev->iobase + ME4000_AI_CHAN_PRE_TIMER_REG);
 
-	/* Start conversion by dummy read */
-	inl(dev->iobase + ME4000_AI_START_REG);
+	for (i = 0; i < insn->n; i++) {
+		unsigned int val;
 
-	/* Wait until ready */
-	udelay(10);
-	if (!(inl(dev->iobase + ME4000_AI_STATUS_REG) &
-	     ME4000_AI_STATUS_BIT_EF_DATA)) {
-		dev_err(dev->class_dev, "Value not available after wait\n");
-		return -EIO;
+		/* start conversion by dummy read */
+		inl(dev->iobase + ME4000_AI_START_REG);
+
+		ret = comedi_timeout(dev, s, insn, me4000_ai_eoc, 0);
+		if (ret)
+			break;
+
+		val = me4000_ai_get_sample(dev, s);
+		data[i] = comedi_offset_munge(s, val);
 	}
 
-	/* Read value from data fifo */
-	lval = inl(dev->iobase + ME4000_AI_DATA_REG) & 0xFFFF;
-	data[0] = lval ^ 0x8000;
+	me4000_ai_reset(dev);
 
-	return 1;
+	return ret ? ret : insn->n;
 }
 
 static int me4000_ai_cancel(struct comedi_device *dev,
 			    struct comedi_subdevice *s)
 {
-	unsigned int tmp;
-
-	/* Stop any running conversion */
-	tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
-	tmp &= ~(ME4000_AI_CTRL_BIT_STOP | ME4000_AI_CTRL_BIT_IMMEDIATE_STOP);
-	outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
-	/* Clear the control register */
-	outl(0x0, dev->iobase + ME4000_AI_CTRL_REG);
+	me4000_ai_reset(dev);
 
 	return 0;
 }
@@ -581,8 +544,6 @@
 				    struct comedi_subdevice *s,
 				    struct comedi_cmd *cmd)
 {
-	const struct me4000_board *board = dev->board_ptr;
-	unsigned int max_diff_chan = board->ai_diff_nchan;
 	unsigned int aref0 = CR_AREF(cmd->chanlist[0]);
 	int i;
 
@@ -598,7 +559,13 @@
 		}
 
 		if (aref == AREF_DIFF) {
-			if (chan >= max_diff_chan) {
+			if (!(s->subdev_flags & SDF_DIFF)) {
+				dev_err(dev->class_dev,
+					"Differential inputs are not available\n");
+				return -EINVAL;
+			}
+
+			if (chan >= (s->n_chan / 2)) {
 				dev_dbg(dev->class_dev,
 					"Channel number to high\n");
 				return -EINVAL;
@@ -615,202 +582,127 @@
 	return 0;
 }
 
-static int ai_round_cmd_args(struct comedi_device *dev,
-			     struct comedi_subdevice *s,
-			     struct comedi_cmd *cmd,
-			     unsigned int *init_ticks,
-			     unsigned int *scan_ticks, unsigned int *chan_ticks)
+static void me4000_ai_round_cmd_args(struct comedi_device *dev,
+				     struct comedi_subdevice *s,
+				     struct comedi_cmd *cmd)
 {
+	struct me4000_private *devpriv = dev->private;
 	int rest;
 
-	*init_ticks = 0;
-	*scan_ticks = 0;
-	*chan_ticks = 0;
+	devpriv->ai_init_ticks = 0;
+	devpriv->ai_scan_ticks = 0;
+	devpriv->ai_chan_ticks = 0;
 
 	if (cmd->start_arg) {
-		*init_ticks = (cmd->start_arg * 33) / 1000;
+		devpriv->ai_init_ticks = (cmd->start_arg * 33) / 1000;
 		rest = (cmd->start_arg * 33) % 1000;
 
 		if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_NEAREST) {
 			if (rest > 33)
-				(*init_ticks)++;
+				devpriv->ai_init_ticks++;
 		} else if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_UP) {
 			if (rest)
-				(*init_ticks)++;
+				devpriv->ai_init_ticks++;
 		}
 	}
 
 	if (cmd->scan_begin_arg) {
-		*scan_ticks = (cmd->scan_begin_arg * 33) / 1000;
+		devpriv->ai_scan_ticks = (cmd->scan_begin_arg * 33) / 1000;
 		rest = (cmd->scan_begin_arg * 33) % 1000;
 
 		if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_NEAREST) {
 			if (rest > 33)
-				(*scan_ticks)++;
+				devpriv->ai_scan_ticks++;
 		} else if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_UP) {
 			if (rest)
-				(*scan_ticks)++;
+				devpriv->ai_scan_ticks++;
 		}
 	}
 
 	if (cmd->convert_arg) {
-		*chan_ticks = (cmd->convert_arg * 33) / 1000;
+		devpriv->ai_chan_ticks = (cmd->convert_arg * 33) / 1000;
 		rest = (cmd->convert_arg * 33) % 1000;
 
 		if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_NEAREST) {
 			if (rest > 33)
-				(*chan_ticks)++;
+				devpriv->ai_chan_ticks++;
 		} else if ((cmd->flags & CMDF_ROUND_MASK) == CMDF_ROUND_UP) {
 			if (rest)
-				(*chan_ticks)++;
+				devpriv->ai_chan_ticks++;
 		}
 	}
-
-	return 0;
 }
 
-static void ai_write_timer(struct comedi_device *dev,
-			   unsigned int init_ticks,
-			   unsigned int scan_ticks, unsigned int chan_ticks)
+static void me4000_ai_write_chanlist(struct comedi_device *dev,
+				     struct comedi_subdevice *s,
+				     struct comedi_cmd *cmd)
 {
-	outl(init_ticks - 1, dev->iobase + ME4000_AI_SCAN_PRE_TIMER_LOW_REG);
-	outl(0x0, dev->iobase + ME4000_AI_SCAN_PRE_TIMER_HIGH_REG);
-
-	if (scan_ticks) {
-		outl(scan_ticks - 1, dev->iobase + ME4000_AI_SCAN_TIMER_LOW_REG);
-		outl(0x0, dev->iobase + ME4000_AI_SCAN_TIMER_HIGH_REG);
-	}
-
-	outl(chan_ticks - 1, dev->iobase + ME4000_AI_CHAN_PRE_TIMER_REG);
-	outl(chan_ticks - 1, dev->iobase + ME4000_AI_CHAN_TIMER_REG);
-}
-
-static int ai_write_chanlist(struct comedi_device *dev,
-			     struct comedi_subdevice *s, struct comedi_cmd *cmd)
-{
-	unsigned int entry;
-	unsigned int chan;
-	unsigned int rang;
-	unsigned int aref;
 	int i;
 
 	for (i = 0; i < cmd->chanlist_len; i++) {
-		chan = CR_CHAN(cmd->chanlist[i]);
-		rang = CR_RANGE(cmd->chanlist[i]);
-		aref = CR_AREF(cmd->chanlist[i]);
+		unsigned int chan = CR_CHAN(cmd->chanlist[i]);
+		unsigned int range = CR_RANGE(cmd->chanlist[i]);
+		unsigned int aref = CR_AREF(cmd->chanlist[i]);
+		unsigned int entry;
 
-		entry = chan;
-
-		if (rang == 0)
-			entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_2_5;
-		else if (rang == 1)
-			entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_10;
-		else if (rang == 2)
-			entry |= ME4000_AI_LIST_RANGE_BIPOLAR_2_5;
-		else
-			entry |= ME4000_AI_LIST_RANGE_BIPOLAR_10;
+		entry = chan | ME4000_AI_LIST_RANGE(range);
 
 		if (aref == AREF_DIFF)
 			entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL;
-		else
-			entry |= ME4000_AI_LIST_INPUT_SINGLE_ENDED;
+
+		if (i == (cmd->chanlist_len - 1))
+			entry |= ME4000_AI_LIST_LAST_ENTRY;
 
 		outl(entry, dev->iobase + ME4000_AI_CHANNEL_LIST_REG);
 	}
-
-	return 0;
-}
-
-static int ai_prepare(struct comedi_device *dev,
-		      struct comedi_subdevice *s,
-		      struct comedi_cmd *cmd,
-		      unsigned int init_ticks,
-		      unsigned int scan_ticks, unsigned int chan_ticks)
-{
-	unsigned int tmp = 0;
-
-	/* Write timer arguments */
-	ai_write_timer(dev, init_ticks, scan_ticks, chan_ticks);
-
-	/* Reset control register */
-	outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
-	/* Start sources */
-	if ((cmd->start_src == TRIG_EXT &&
-	     cmd->scan_begin_src == TRIG_TIMER &&
-	     cmd->convert_src == TRIG_TIMER) ||
-	    (cmd->start_src == TRIG_EXT &&
-	     cmd->scan_begin_src == TRIG_FOLLOW &&
-	     cmd->convert_src == TRIG_TIMER)) {
-		tmp = ME4000_AI_CTRL_BIT_MODE_1 |
-		    ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
-		    ME4000_AI_CTRL_BIT_DATA_FIFO;
-	} else if (cmd->start_src == TRIG_EXT &&
-		   cmd->scan_begin_src == TRIG_EXT &&
-		   cmd->convert_src == TRIG_TIMER) {
-		tmp = ME4000_AI_CTRL_BIT_MODE_2 |
-		    ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
-		    ME4000_AI_CTRL_BIT_DATA_FIFO;
-	} else if (cmd->start_src == TRIG_EXT &&
-		   cmd->scan_begin_src == TRIG_EXT &&
-		   cmd->convert_src == TRIG_EXT) {
-		tmp = ME4000_AI_CTRL_BIT_MODE_0 |
-		    ME4000_AI_CTRL_BIT_MODE_1 |
-		    ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
-		    ME4000_AI_CTRL_BIT_DATA_FIFO;
-	} else {
-		tmp = ME4000_AI_CTRL_BIT_MODE_0 |
-		    ME4000_AI_CTRL_BIT_CHANNEL_FIFO |
-		    ME4000_AI_CTRL_BIT_DATA_FIFO;
-	}
-
-	/* Stop triggers */
-	if (cmd->stop_src == TRIG_COUNT) {
-		outl(cmd->chanlist_len * cmd->stop_arg,
-		     dev->iobase + ME4000_AI_SAMPLE_COUNTER_REG);
-		tmp |= ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ;
-	} else if (cmd->stop_src == TRIG_NONE &&
-		   cmd->scan_end_src == TRIG_COUNT) {
-		outl(cmd->scan_end_arg,
-		     dev->iobase + ME4000_AI_SAMPLE_COUNTER_REG);
-		tmp |= ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ;
-	} else {
-		tmp |= ME4000_AI_CTRL_BIT_HF_IRQ;
-	}
-
-	/* Write the setup to the control register */
-	outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
-	/* Write the channel list */
-	ai_write_chanlist(dev, s, cmd);
-
-	return 0;
 }
 
 static int me4000_ai_do_cmd(struct comedi_device *dev,
 			    struct comedi_subdevice *s)
 {
-	int err;
-	unsigned int init_ticks = 0;
-	unsigned int scan_ticks = 0;
-	unsigned int chan_ticks = 0;
+	struct me4000_private *devpriv = dev->private;
 	struct comedi_cmd *cmd = &s->async->cmd;
+	unsigned int ctrl;
 
-	/* Reset the analog input */
-	err = me4000_ai_cancel(dev, s);
-	if (err)
-		return err;
+	/* Write timer arguments */
+	outl(devpriv->ai_init_ticks - 1,
+	     dev->iobase + ME4000_AI_SCAN_PRE_TIMER_LOW_REG);
+	outl(0x0, dev->iobase + ME4000_AI_SCAN_PRE_TIMER_HIGH_REG);
 
-	/* Round the timer arguments */
-	err = ai_round_cmd_args(dev,
-				s, cmd, &init_ticks, &scan_ticks, &chan_ticks);
-	if (err)
-		return err;
+	if (devpriv->ai_scan_ticks) {
+		outl(devpriv->ai_scan_ticks - 1,
+		     dev->iobase + ME4000_AI_SCAN_TIMER_LOW_REG);
+		outl(0x0, dev->iobase + ME4000_AI_SCAN_TIMER_HIGH_REG);
+	}
 
-	/* Prepare the AI for acquisition */
-	err = ai_prepare(dev, s, cmd, init_ticks, scan_ticks, chan_ticks);
-	if (err)
-		return err;
+	outl(devpriv->ai_chan_ticks - 1,
+	     dev->iobase + ME4000_AI_CHAN_PRE_TIMER_REG);
+	outl(devpriv->ai_chan_ticks - 1,
+	     dev->iobase + ME4000_AI_CHAN_TIMER_REG);
+
+	/* Start sources */
+	ctrl = devpriv->ai_ctrl_mode |
+	       ME4000_AI_CTRL_CHANNEL_FIFO |
+	       ME4000_AI_CTRL_DATA_FIFO;
+
+	/* Stop triggers */
+	if (cmd->stop_src == TRIG_COUNT) {
+		outl(cmd->chanlist_len * cmd->stop_arg,
+		     dev->iobase + ME4000_AI_SAMPLE_COUNTER_REG);
+		ctrl |= ME4000_AI_CTRL_SC_IRQ;
+	} else if (cmd->stop_src == TRIG_NONE &&
+		   cmd->scan_end_src == TRIG_COUNT) {
+		outl(cmd->scan_end_arg,
+		     dev->iobase + ME4000_AI_SAMPLE_COUNTER_REG);
+		ctrl |= ME4000_AI_CTRL_SC_IRQ;
+	}
+	ctrl |= ME4000_AI_CTRL_HF_IRQ;
+
+	/* Write the setup to the control register */
+	outl(ctrl, dev->iobase + ME4000_AI_CTRL_REG);
+
+	/* Write the channel list */
+	me4000_ai_write_chanlist(dev, s, cmd);
 
 	/* Start acquistion by dummy read */
 	inl(dev->iobase + ME4000_AI_START_REG);
@@ -822,14 +714,9 @@
 				 struct comedi_subdevice *s,
 				 struct comedi_cmd *cmd)
 {
-	unsigned int init_ticks;
-	unsigned int chan_ticks;
-	unsigned int scan_ticks;
+	struct me4000_private *devpriv = dev->private;
 	int err = 0;
 
-	/* Round the timer arguments */
-	ai_round_cmd_args(dev, s, cmd, &init_ticks, &scan_ticks, &chan_ticks);
-
 	/* Step 1 : check if triggers are trivially valid */
 
 	err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
@@ -857,21 +744,28 @@
 	if (cmd->start_src == TRIG_NOW &&
 	    cmd->scan_begin_src == TRIG_TIMER &&
 	    cmd->convert_src == TRIG_TIMER) {
+		devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_0;
 	} else if (cmd->start_src == TRIG_NOW &&
 		   cmd->scan_begin_src == TRIG_FOLLOW &&
 		   cmd->convert_src == TRIG_TIMER) {
+		devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_0;
 	} else if (cmd->start_src == TRIG_EXT &&
 		   cmd->scan_begin_src == TRIG_TIMER &&
 		   cmd->convert_src == TRIG_TIMER) {
+		devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_1;
 	} else if (cmd->start_src == TRIG_EXT &&
 		   cmd->scan_begin_src == TRIG_FOLLOW &&
 		   cmd->convert_src == TRIG_TIMER) {
+		devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_1;
 	} else if (cmd->start_src == TRIG_EXT &&
 		   cmd->scan_begin_src == TRIG_EXT &&
 		   cmd->convert_src == TRIG_TIMER) {
+		devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_2;
 	} else if (cmd->start_src == TRIG_EXT &&
 		   cmd->scan_begin_src == TRIG_EXT &&
 		   cmd->convert_src == TRIG_EXT) {
+		devpriv->ai_ctrl_mode = ME4000_AI_CTRL_MODE_0 |
+					ME4000_AI_CTRL_MODE_1;
 	} else {
 		err |= -EINVAL;
 	}
@@ -887,15 +781,19 @@
 		cmd->chanlist_len = 1;
 		err |= -EINVAL;
 	}
-	if (init_ticks < 66) {
+
+	/* Round the timer arguments */
+	me4000_ai_round_cmd_args(dev, s, cmd);
+
+	if (devpriv->ai_init_ticks < 66) {
 		cmd->start_arg = 2000;
 		err |= -EINVAL;
 	}
-	if (scan_ticks && scan_ticks < 67) {
+	if (devpriv->ai_scan_ticks && devpriv->ai_scan_ticks < 67) {
 		cmd->scan_begin_arg = 2031;
 		err |= -EINVAL;
 	}
-	if (chan_ticks < 66) {
+	if (devpriv->ai_chan_ticks < 66) {
 		cmd->convert_arg = 2000;
 		err |= -EINVAL;
 	}
@@ -915,17 +813,18 @@
 	    cmd->scan_begin_src == TRIG_TIMER &&
 	    cmd->convert_src == TRIG_TIMER) {
 		/* Check timer arguments */
-		if (init_ticks < ME4000_AI_MIN_TICKS) {
+		if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
 			dev_err(dev->class_dev, "Invalid start arg\n");
 			cmd->start_arg = 2000;	/*  66 ticks at least */
 			err++;
 		}
-		if (chan_ticks < ME4000_AI_MIN_TICKS) {
+		if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
 			dev_err(dev->class_dev, "Invalid convert arg\n");
 			cmd->convert_arg = 2000;	/*  66 ticks at least */
 			err++;
 		}
-		if (scan_ticks <= cmd->chanlist_len * chan_ticks) {
+		if (devpriv->ai_scan_ticks <=
+		    cmd->chanlist_len * devpriv->ai_chan_ticks) {
 			dev_err(dev->class_dev, "Invalid scan end arg\n");
 
 			/*  At least one tick more */
@@ -936,12 +835,12 @@
 		   cmd->scan_begin_src == TRIG_FOLLOW &&
 		   cmd->convert_src == TRIG_TIMER) {
 		/* Check timer arguments */
-		if (init_ticks < ME4000_AI_MIN_TICKS) {
+		if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
 			dev_err(dev->class_dev, "Invalid start arg\n");
 			cmd->start_arg = 2000;	/*  66 ticks at least */
 			err++;
 		}
-		if (chan_ticks < ME4000_AI_MIN_TICKS) {
+		if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
 			dev_err(dev->class_dev, "Invalid convert arg\n");
 			cmd->convert_arg = 2000;	/*  66 ticks at least */
 			err++;
@@ -950,17 +849,18 @@
 		   cmd->scan_begin_src == TRIG_TIMER &&
 		   cmd->convert_src == TRIG_TIMER) {
 		/* Check timer arguments */
-		if (init_ticks < ME4000_AI_MIN_TICKS) {
+		if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
 			dev_err(dev->class_dev, "Invalid start arg\n");
 			cmd->start_arg = 2000;	/*  66 ticks at least */
 			err++;
 		}
-		if (chan_ticks < ME4000_AI_MIN_TICKS) {
+		if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
 			dev_err(dev->class_dev, "Invalid convert arg\n");
 			cmd->convert_arg = 2000;	/*  66 ticks at least */
 			err++;
 		}
-		if (scan_ticks <= cmd->chanlist_len * chan_ticks) {
+		if (devpriv->ai_scan_ticks <=
+		    cmd->chanlist_len * devpriv->ai_chan_ticks) {
 			dev_err(dev->class_dev, "Invalid scan end arg\n");
 
 			/*  At least one tick more */
@@ -971,12 +871,12 @@
 		   cmd->scan_begin_src == TRIG_FOLLOW &&
 		   cmd->convert_src == TRIG_TIMER) {
 		/* Check timer arguments */
-		if (init_ticks < ME4000_AI_MIN_TICKS) {
+		if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
 			dev_err(dev->class_dev, "Invalid start arg\n");
 			cmd->start_arg = 2000;	/*  66 ticks at least */
 			err++;
 		}
-		if (chan_ticks < ME4000_AI_MIN_TICKS) {
+		if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
 			dev_err(dev->class_dev, "Invalid convert arg\n");
 			cmd->convert_arg = 2000;	/*  66 ticks at least */
 			err++;
@@ -985,12 +885,12 @@
 		   cmd->scan_begin_src == TRIG_EXT &&
 		   cmd->convert_src == TRIG_TIMER) {
 		/* Check timer arguments */
-		if (init_ticks < ME4000_AI_MIN_TICKS) {
+		if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
 			dev_err(dev->class_dev, "Invalid start arg\n");
 			cmd->start_arg = 2000;	/*  66 ticks at least */
 			err++;
 		}
-		if (chan_ticks < ME4000_AI_MIN_TICKS) {
+		if (devpriv->ai_chan_ticks < ME4000_AI_MIN_TICKS) {
 			dev_err(dev->class_dev, "Invalid convert arg\n");
 			cmd->convert_arg = 2000;	/*  66 ticks at least */
 			err++;
@@ -999,7 +899,7 @@
 		   cmd->scan_begin_src == TRIG_EXT &&
 		   cmd->convert_src == TRIG_EXT) {
 		/* Check timer arguments */
-		if (init_ticks < ME4000_AI_MIN_TICKS) {
+		if (devpriv->ai_init_ticks < ME4000_AI_MIN_TICKS) {
 			dev_err(dev->class_dev, "Invalid start arg\n");
 			cmd->start_arg = 2000;	/*  66 ticks at least */
 			err++;
@@ -1039,103 +939,57 @@
 		return IRQ_NONE;
 
 	if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
-	    ME4000_IRQ_STATUS_BIT_AI_HF) {
+	    ME4000_IRQ_STATUS_AI_HF) {
 		/* Read status register to find out what happened */
-		tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
+		tmp = inl(dev->iobase + ME4000_AI_STATUS_REG);
 
-		if (!(tmp & ME4000_AI_STATUS_BIT_FF_DATA) &&
-		    !(tmp & ME4000_AI_STATUS_BIT_HF_DATA) &&
-		    (tmp & ME4000_AI_STATUS_BIT_EF_DATA)) {
-			c = ME4000_AI_FIFO_COUNT;
-
-			/*
-			 * FIFO overflow, so stop conversion
-			 * and disable all interrupts
-			 */
-			tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
-			tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
-				 ME4000_AI_CTRL_BIT_SC_IRQ);
-			outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
-			s->async->events |= COMEDI_CB_ERROR;
-
+		if (!(tmp & ME4000_AI_STATUS_FF_DATA) &&
+		    !(tmp & ME4000_AI_STATUS_HF_DATA) &&
+		    (tmp & ME4000_AI_STATUS_EF_DATA)) {
 			dev_err(dev->class_dev, "FIFO overflow\n");
-		} else if ((tmp & ME4000_AI_STATUS_BIT_FF_DATA)
-			   && !(tmp & ME4000_AI_STATUS_BIT_HF_DATA)
-			   && (tmp & ME4000_AI_STATUS_BIT_EF_DATA)) {
+			s->async->events |= COMEDI_CB_ERROR;
+			c = ME4000_AI_FIFO_COUNT;
+		} else if ((tmp & ME4000_AI_STATUS_FF_DATA) &&
+			   !(tmp & ME4000_AI_STATUS_HF_DATA) &&
+			   (tmp & ME4000_AI_STATUS_EF_DATA)) {
 			c = ME4000_AI_FIFO_COUNT / 2;
 		} else {
-			dev_err(dev->class_dev,
-				"Can't determine state of fifo\n");
-			c = 0;
-
-			/*
-			 * Undefined state, so stop conversion
-			 * and disable all interrupts
-			 */
-			tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
-			tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
-				 ME4000_AI_CTRL_BIT_SC_IRQ);
-			outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
-			s->async->events |= COMEDI_CB_ERROR;
-
 			dev_err(dev->class_dev, "Undefined FIFO state\n");
+			s->async->events |= COMEDI_CB_ERROR;
+			c = 0;
 		}
 
 		for (i = 0; i < c; i++) {
-			/* Read value from data fifo */
-			lval = inl(dev->iobase + ME4000_AI_DATA_REG) & 0xFFFF;
-			lval ^= 0x8000;
-
-			if (!comedi_buf_write_samples(s, &lval, 1)) {
-				/*
-				 * Buffer overflow, so stop conversion
-				 * and disable all interrupts
-				 */
-				tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
-				tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ |
-					 ME4000_AI_CTRL_BIT_SC_IRQ);
-				outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-				break;
-			}
-		}
-
-		/* Work is done, so reset the interrupt */
-		tmp |= ME4000_AI_CTRL_BIT_HF_IRQ_RESET;
-		outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-		tmp &= ~ME4000_AI_CTRL_BIT_HF_IRQ_RESET;
-		outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-	}
-
-	if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
-	    ME4000_IRQ_STATUS_BIT_SC) {
-		s->async->events |= COMEDI_CB_EOA;
-
-		/*
-		 * Acquisition is complete, so stop
-		 * conversion and disable all interrupts
-		 */
-		tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
-		tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP;
-		tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ);
-		outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-
-		/* Poll data until fifo empty */
-		while (inl(dev->iobase + ME4000_AI_CTRL_REG) &
-		       ME4000_AI_STATUS_BIT_EF_DATA) {
-			/* Read value from data fifo */
-			lval = inl(dev->iobase + ME4000_AI_DATA_REG) & 0xFFFF;
-			lval ^= 0x8000;
-
+			lval = me4000_ai_get_sample(dev, s);
 			if (!comedi_buf_write_samples(s, &lval, 1))
 				break;
 		}
 
 		/* Work is done, so reset the interrupt */
-		tmp |= ME4000_AI_CTRL_BIT_SC_IRQ_RESET;
+		tmp |= ME4000_AI_CTRL_HF_IRQ_RESET;
 		outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
-		tmp &= ~ME4000_AI_CTRL_BIT_SC_IRQ_RESET;
+		tmp &= ~ME4000_AI_CTRL_HF_IRQ_RESET;
+		outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
+	}
+
+	if (inl(dev->iobase + ME4000_IRQ_STATUS_REG) &
+	    ME4000_IRQ_STATUS_SC) {
+		/* Acquisition is complete */
+		s->async->events |= COMEDI_CB_EOA;
+
+		/* Poll data until fifo empty */
+		while (inl(dev->iobase + ME4000_AI_STATUS_REG) &
+		       ME4000_AI_STATUS_EF_DATA) {
+			lval = me4000_ai_get_sample(dev, s);
+			if (!comedi_buf_write_samples(s, &lval, 1))
+				break;
+		}
+
+		/* Work is done, so reset the interrupt */
+		tmp = inl(dev->iobase + ME4000_AI_CTRL_REG);
+		tmp |= ME4000_AI_CTRL_SC_IRQ_RESET;
+		outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
+		tmp &= ~ME4000_AI_CTRL_SC_IRQ_RESET;
 		outl(tmp, dev->iobase + ME4000_AI_CTRL_REG);
 	}
 
@@ -1149,12 +1003,12 @@
 				struct comedi_insn *insn,
 				unsigned int *data)
 {
-	int chan = CR_CHAN(insn->chanspec);
+	unsigned int chan = CR_CHAN(insn->chanspec);
 	unsigned int tmp;
 
 	/* Stop any running conversion */
 	tmp = inl(dev->iobase + ME4000_AO_CTRL_REG(chan));
-	tmp |= ME4000_AO_CTRL_BIT_IMMEDIATE_STOP;
+	tmp |= ME4000_AO_CTRL_IMMEDIATE_STOP;
 	outl(tmp, dev->iobase + ME4000_AO_CTRL_REG(chan));
 
 	/* Clear control register and set to single mode */
@@ -1217,18 +1071,18 @@
 		return ret;
 
 	tmp = inl(dev->iobase + ME4000_DIO_CTRL_REG);
-	tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 | ME4000_DIO_CTRL_BIT_MODE_1 |
-		 ME4000_DIO_CTRL_BIT_MODE_2 | ME4000_DIO_CTRL_BIT_MODE_3 |
-		 ME4000_DIO_CTRL_BIT_MODE_4 | ME4000_DIO_CTRL_BIT_MODE_5 |
-		 ME4000_DIO_CTRL_BIT_MODE_6 | ME4000_DIO_CTRL_BIT_MODE_7);
+	tmp &= ~(ME4000_DIO_CTRL_MODE_0 | ME4000_DIO_CTRL_MODE_1 |
+		 ME4000_DIO_CTRL_MODE_2 | ME4000_DIO_CTRL_MODE_3 |
+		 ME4000_DIO_CTRL_MODE_4 | ME4000_DIO_CTRL_MODE_5 |
+		 ME4000_DIO_CTRL_MODE_6 | ME4000_DIO_CTRL_MODE_7);
 	if (s->io_bits & 0x000000ff)
-		tmp |= ME4000_DIO_CTRL_BIT_MODE_0;
+		tmp |= ME4000_DIO_CTRL_MODE_0;
 	if (s->io_bits & 0x0000ff00)
-		tmp |= ME4000_DIO_CTRL_BIT_MODE_2;
+		tmp |= ME4000_DIO_CTRL_MODE_2;
 	if (s->io_bits & 0x00ff0000)
-		tmp |= ME4000_DIO_CTRL_BIT_MODE_4;
+		tmp |= ME4000_DIO_CTRL_MODE_4;
 	if (s->io_bits & 0xff000000)
-		tmp |= ME4000_DIO_CTRL_BIT_MODE_6;
+		tmp |= ME4000_DIO_CTRL_MODE_6;
 
 	/*
 	 * Check for optoisolated ME-4000 version.
@@ -1238,9 +1092,8 @@
 	if (inl(dev->iobase + ME4000_DIO_DIR_REG)) {
 		s->io_bits |= 0x000000ff;
 		s->io_bits &= ~0x0000ff00;
-		tmp |= ME4000_DIO_CTRL_BIT_MODE_0;
-		tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_2 |
-			 ME4000_DIO_CTRL_BIT_MODE_3);
+		tmp |= ME4000_DIO_CTRL_MODE_0;
+		tmp &= ~(ME4000_DIO_CTRL_MODE_2 | ME4000_DIO_CTRL_MODE_3);
 	}
 
 	outl(tmp, dev->iobase + ME4000_DIO_CTRL_REG);
@@ -1253,7 +1106,7 @@
 {
 	struct pci_dev *pcidev = comedi_to_pci_dev(dev);
 	const struct me4000_board *board = NULL;
-	struct me4000_info *info;
+	struct me4000_private *devpriv;
 	struct comedi_subdevice *s;
 	int result;
 
@@ -1264,17 +1117,17 @@
 	dev->board_ptr = board;
 	dev->board_name = board->name;
 
-	info = comedi_alloc_devpriv(dev, sizeof(*info));
-	if (!info)
+	devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+	if (!devpriv)
 		return -ENOMEM;
 
 	result = comedi_pci_enable(dev);
 	if (result)
 		return result;
 
-	info->plx_regbase = pci_resource_start(pcidev, 1);
+	devpriv->plx_regbase = pci_resource_start(pcidev, 1);
 	dev->iobase = pci_resource_start(pcidev, 2);
-	if (!info->plx_regbase || !dev->iobase)
+	if (!devpriv->plx_regbase || !dev->iobase)
 		return -ENODEV;
 
 	result = comedi_load_firmware(dev, &pcidev->dev, ME4000_FIRMWARE,
@@ -1287,79 +1140,66 @@
 	if (pcidev->irq > 0) {
 		result = request_irq(pcidev->irq, me4000_ai_isr, IRQF_SHARED,
 				     dev->board_name, dev);
-		if (result == 0)
+		if (result == 0) {
 			dev->irq = pcidev->irq;
+
+			/* Enable interrupts on the PLX */
+			outl(PLX9052_INTCSR_LI1ENAB | PLX9052_INTCSR_LI1POL |
+			     PLX9052_INTCSR_PCIENAB,
+			     devpriv->plx_regbase + PLX9052_INTCSR);
+		}
 	}
 
 	result = comedi_alloc_subdevices(dev, 4);
 	if (result)
 		return result;
 
-    /*=========================================================================
-      Analog input subdevice
-      ========================================================================*/
-
+	/* Analog Input subdevice */
 	s = &dev->subdevices[0];
+	s->type		= COMEDI_SUBD_AI;
+	s->subdev_flags	= SDF_READABLE | SDF_COMMON | SDF_GROUND;
+	if (board->can_do_diff_ai)
+		s->subdev_flags	|= SDF_DIFF;
+	s->n_chan	= board->ai_nchan;
+	s->maxdata	= 0xffff;
+	s->len_chanlist	= ME4000_AI_CHANNEL_LIST_COUNT;
+	s->range_table	= &me4000_ai_range;
+	s->insn_read	= me4000_ai_insn_read;
 
-	if (board->ai_nchan) {
-		s->type = COMEDI_SUBD_AI;
-		s->subdev_flags =
-		    SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF;
-		s->n_chan = board->ai_nchan;
-		s->maxdata = 0xFFFF;	/*  16 bit ADC */
-		s->len_chanlist = ME4000_AI_CHANNEL_LIST_COUNT;
-		s->range_table = &me4000_ai_range;
-		s->insn_read = me4000_ai_insn_read;
-
-		if (dev->irq) {
-			dev->read_subdev = s;
-			s->subdev_flags |= SDF_CMD_READ;
-			s->cancel = me4000_ai_cancel;
-			s->do_cmdtest = me4000_ai_do_cmd_test;
-			s->do_cmd = me4000_ai_do_cmd;
-		}
-	} else {
-		s->type = COMEDI_SUBD_UNUSED;
+	if (dev->irq) {
+		dev->read_subdev = s;
+		s->subdev_flags	|= SDF_CMD_READ;
+		s->cancel	= me4000_ai_cancel;
+		s->do_cmdtest	= me4000_ai_do_cmd_test;
+		s->do_cmd	= me4000_ai_do_cmd;
 	}
 
-    /*=========================================================================
-      Analog output subdevice
-      ========================================================================*/
-
+	/* Analog Output subdevice */
 	s = &dev->subdevices[1];
-
-	if (board->ao_nchan) {
-		s->type = COMEDI_SUBD_AO;
-		s->subdev_flags = SDF_WRITABLE | SDF_COMMON | SDF_GROUND;
-		s->n_chan = board->ao_nchan;
-		s->maxdata = 0xFFFF;	/*  16 bit DAC */
-		s->range_table = &range_bipolar10;
-		s->insn_write = me4000_ao_insn_write;
+	if (board->has_ao) {
+		s->type		= COMEDI_SUBD_AO;
+		s->subdev_flags	= SDF_WRITABLE | SDF_COMMON | SDF_GROUND;
+		s->n_chan	= 4;
+		s->maxdata	= 0xffff;
+		s->range_table	= &range_bipolar10;
+		s->insn_write	= me4000_ao_insn_write;
 
 		result = comedi_alloc_subdev_readback(s);
 		if (result)
 			return result;
 	} else {
-		s->type = COMEDI_SUBD_UNUSED;
+		s->type		= COMEDI_SUBD_UNUSED;
 	}
 
-    /*=========================================================================
-      Digital I/O subdevice
-      ========================================================================*/
-
+	/* Digital I/O subdevice */
 	s = &dev->subdevices[2];
-
-	if (board->dio_nchan) {
-		s->type = COMEDI_SUBD_DIO;
-		s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
-		s->n_chan = board->dio_nchan;
-		s->maxdata = 1;
-		s->range_table = &range_digital;
-		s->insn_bits = me4000_dio_insn_bits;
-		s->insn_config = me4000_dio_insn_config;
-	} else {
-		s->type = COMEDI_SUBD_UNUSED;
-	}
+	s->type		= COMEDI_SUBD_DIO;
+	s->subdev_flags	= SDF_READABLE | SDF_WRITABLE;
+	s->n_chan	= 32;
+	s->maxdata	= 1;
+	s->range_table	= &range_digital;
+	s->insn_bits	= me4000_dio_insn_bits;
+	s->insn_config	= me4000_dio_insn_config;
 
 	/*
 	 * Check for optoisolated ME-4000 version. If one the first
@@ -1367,7 +1207,7 @@
 	 */
 	if (!inl(dev->iobase + ME4000_DIO_DIR_REG)) {
 		s->io_bits |= 0xFF;
-		outl(ME4000_DIO_CTRL_BIT_MODE_0,
+		outl(ME4000_DIO_CTRL_MODE_0,
 		     dev->iobase + ME4000_DIO_DIR_REG);
 	}
 
@@ -1393,8 +1233,12 @@
 
 static void me4000_detach(struct comedi_device *dev)
 {
-	if (dev->iobase)
-		me4000_reset(dev);
+	if (dev->irq) {
+		struct me4000_private *devpriv = dev->private;
+
+		/* Disable interrupts on the PLX */
+		outl(0, devpriv->plx_regbase + PLX9052_INTCSR);
+	}
 	comedi_pci_detach(dev);
 }
 
@@ -1438,6 +1282,6 @@
 module_comedi_pci_driver(me4000_driver, me4000_pci_driver);
 
 MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for Meilhaus ME-4000 series boards");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(ME4000_FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index a208cb3..d9de83a 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -55,11 +55,7 @@
 
 	/* 8255 dio */
 	s = &dev->subdevices[0];
-	ret = subdev_8255_init(dev, s, NULL, 0x00);
-	if (ret)
-		return ret;
-
-	return 0;
+	return subdev_8255_init(dev, s, NULL, 0x00);
 }
 
 static struct comedi_driver driver_dio24 = {
diff --git a/drivers/staging/comedi/drivers/ni_usb6501.c b/drivers/staging/comedi/drivers/ni_usb6501.c
index 5f649f8..88de8da 100644
--- a/drivers/staging/comedi/drivers/ni_usb6501.c
+++ b/drivers/staging/comedi/drivers/ni_usb6501.c
@@ -172,7 +172,7 @@
 };
 
 static int ni6501_port_command(struct comedi_device *dev, int command,
-			       const u8 *port, u8 *bitmap)
+			       unsigned int val, u8 *bitmap)
 {
 	struct usb_device *usb = comedi_to_usb_dev(dev);
 	struct ni6501_private *devpriv = dev->private;
@@ -190,22 +190,22 @@
 		request_size = sizeof(READ_PORT_REQUEST);
 		response_size = sizeof(READ_PORT_RESPONSE);
 		memcpy(tx, READ_PORT_REQUEST, request_size);
-		tx[14] = port[0];
+		tx[14] = val & 0xff;
 		break;
 	case WRITE_PORT:
 		request_size = sizeof(WRITE_PORT_REQUEST);
 		response_size = sizeof(GENERIC_RESPONSE);
 		memcpy(tx, WRITE_PORT_REQUEST, request_size);
-		tx[14] = port[0];
-		tx[17] = bitmap[0];
+		tx[14] = val & 0xff;
+		tx[17] = *bitmap;
 		break;
 	case SET_PORT_DIR:
 		request_size = sizeof(SET_PORT_DIR_REQUEST);
 		response_size = sizeof(GENERIC_RESPONSE);
 		memcpy(tx, SET_PORT_DIR_REQUEST, request_size);
-		tx[14] = port[0];
-		tx[15] = port[1];
-		tx[16] = port[2];
+		tx[14] = val & 0xff;
+		tx[15] = (val >> 8) & 0xff;
+		tx[16] = (val >> 16) & 0xff;
 		break;
 	default:
 		ret = -EINVAL;
@@ -235,7 +235,7 @@
 	/* Check if results are valid */
 
 	if (command == READ_PORT) {
-		bitmap[0] = devpriv->usb_rx_buf[14];
+		*bitmap = devpriv->usb_rx_buf[14];
 		/* mask bitmap for comparing */
 		devpriv->usb_rx_buf[14] = 0x00;
 
@@ -349,17 +349,12 @@
 				  unsigned int *data)
 {
 	int ret;
-	u8 port[3];
 
 	ret = comedi_dio_insn_config(dev, s, insn, data, 0);
 	if (ret)
 		return ret;
 
-	port[0] = (s->io_bits) & 0xff;
-	port[1] = (s->io_bits >> 8) & 0xff;
-	port[2] = (s->io_bits >> 16) & 0xff;
-
-	ret = ni6501_port_command(dev, SET_PORT_DIR, port, NULL);
+	ret = ni6501_port_command(dev, SET_PORT_DIR, s->io_bits, NULL);
 	if (ret)
 		return ret;
 
@@ -382,7 +377,7 @@
 		if (mask & (0xFF << port * 8)) {
 			bitmap = (s->state >> port * 8) & 0xFF;
 			ret = ni6501_port_command(dev, WRITE_PORT,
-						  &port, &bitmap);
+						  port, &bitmap);
 			if (ret)
 				return ret;
 		}
@@ -391,7 +386,7 @@
 	data[1] = 0;
 
 	for (port = 0; port < 3; port++) {
-		ret = ni6501_port_command(dev, READ_PORT, &port, &bitmap);
+		ret = ni6501_port_command(dev, READ_PORT, port, &bitmap);
 		if (ret)
 			return ret;
 		data[1] |= bitmap << port * 8;
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index 781b321..a353d1b 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -305,7 +305,7 @@
 		chansegment[0] = chanlist[0];
 		for (i = 1, seglen = 1; i < chanlen; i++, seglen++) {
 			/*  we detect loop, this must by finish */
-			    if (chanlist[0] == chanlist[i])
+			if (chanlist[0] == chanlist[i])
 				break;
 			nowmustbechan =
 			    (CR_CHAN(chansegment[i - 1]) + 1) % chanlen;
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 781918d..35f0f67 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -2852,11 +2852,7 @@
 	s->insn_read	= s626_enc_insn_read;
 	s->insn_write	= s626_enc_insn_write;
 
-	ret = s626_initialize(dev);
-	if (ret)
-		return ret;
-
-	return 0;
+	return s626_initialize(dev);
 }
 
 static void s626_detach(struct comedi_device *dev)
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index 83da162..5f19374 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -32,6 +32,7 @@
 #include <linux/delay.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/ktime.h>
 
 #include <linux/termios.h>
 #include <asm/ioctls.h>
@@ -121,9 +122,9 @@
 static void serial2002_tty_read_poll_wait(struct file *f, int timeout)
 {
 	struct poll_wqueues table;
-	struct timeval start, now;
+	ktime_t start, now;
 
-	do_gettimeofday(&start);
+	start = ktime_get();
 	poll_initwait(&table);
 	while (1) {
 		long elapsed;
@@ -134,9 +135,8 @@
 			    POLLHUP | POLLERR)) {
 			break;
 		}
-		do_gettimeofday(&now);
-		elapsed = 1000000 * (now.tv_sec - start.tv_sec) +
-			  now.tv_usec - start.tv_usec;
+		now = ktime_get();
+		elapsed = ktime_us_delta(now, start);
 		if (elapsed > timeout)
 			break;
 		set_current_state(TASK_INTERRUPTIBLE);
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index eaa9add..649cf47 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1,6 +1,6 @@
 /*
  * usbduxsigma.c
- * Copyright (C) 2011-2014 Bernd Porr, mail@berndporr.me.uk
+ * Copyright (C) 2011-2015 Bernd Porr, mail@berndporr.me.uk
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -18,7 +18,7 @@
  * Description: University of Stirling USB DAQ & INCITE Technology Limited
  * Devices: [ITL] USB-DUX-SIGMA (usbduxsigma)
  * Author: Bernd Porr <mail@berndporr.me.uk>
- * Updated: 10 Oct 2014
+ * Updated: 20 July 2015
  * Status: stable
  */
 
@@ -39,6 +39,7 @@
  *   0.4: fixed D/A voltage range
  *   0.5: various bug fixes, health check at startup
  *   0.6: corrected wrong input range
+ *   0.7: rewrite code that urb->interval is always 1
  */
 
 #include <linux/kernel.h>
@@ -122,7 +123,7 @@
 #define RETRIES 10
 
 /* bulk transfer commands to usbduxsigma */
-#define USBBUXSIGMA_AD_CMD		0
+#define USBBUXSIGMA_AD_CMD		9
 #define USBDUXSIGMA_DA_CMD		1
 #define USBDUXSIGMA_DIO_CFG_CMD		2
 #define USBDUXSIGMA_DIO_BITS_CMD	3
@@ -217,24 +218,28 @@
 	int ret;
 	int i;
 
-	devpriv->ai_counter--;
-	if (devpriv->ai_counter == 0) {
-		devpriv->ai_counter = devpriv->ai_timer;
+	if ((urb->actual_length > 0) && (urb->status != -EXDEV)) {
+		devpriv->ai_counter--;
+		if (devpriv->ai_counter == 0) {
+			devpriv->ai_counter = devpriv->ai_timer;
 
-		/* get the data from the USB bus and hand it over to comedi */
-		for (i = 0; i < cmd->chanlist_len; i++) {
-			/* transfer data, note first byte is the DIO state */
-			val = be32_to_cpu(devpriv->in_buf[i+1]);
-			val &= 0x00ffffff;	/* strip status byte */
-			val ^= 0x00800000;	/* convert to unsigned */
+			/* get the data from the USB bus
+			   and hand it over to comedi */
+			for (i = 0; i < cmd->chanlist_len; i++) {
+				/* transfer data,
+				   note first byte is the DIO state */
+				val = be32_to_cpu(devpriv->in_buf[i+1]);
+				val &= 0x00ffffff; /* strip status byte */
+				val ^= 0x00800000; /* convert to unsigned */
 
-			if (!comedi_buf_write_samples(s, &val, 1))
-				return;
+				if (!comedi_buf_write_samples(s, &val, 1))
+					return;
+			}
+
+			if (cmd->stop_src == TRIG_COUNT &&
+			    async->scans_done >= cmd->stop_arg)
+				async->events |= COMEDI_CB_EOA;
 		}
-
-		if (cmd->stop_src == TRIG_COUNT &&
-		    async->scans_done >= cmd->stop_arg)
-			async->events |= COMEDI_CB_EOA;
 	}
 
 	/* if command is still running, resubmit urb */
@@ -374,10 +379,7 @@
 		urb->transfer_buffer_length = SIZEOUTBUF;
 		urb->dev = comedi_to_usb_dev(dev);
 		urb->status = 0;
-		if (devpriv->high_speed)
-			urb->interval = 8;	/* uframes */
-		else
-			urb->interval = 1;	/* frames */
+		urb->interval = 1;	/* (u)frames */
 		urb->number_of_packets = 1;
 		urb->iso_frame_desc[0].offset = 0;
 		urb->iso_frame_desc[0].length = SIZEOUTBUF;
@@ -441,7 +443,6 @@
 				   int input_urb)
 {
 	struct usb_device *usb = comedi_to_usb_dev(dev);
-	struct usbduxsigma_private *devpriv = dev->private;
 	struct urb *urb;
 	int ret;
 	int i;
@@ -452,7 +453,7 @@
 
 		/* in case of a resubmission after an unlink... */
 		if (input_urb)
-			urb->interval = devpriv->ai_interval;
+			urb->interval = 1;
 		urb->context = dev;
 		urb->dev = usb;
 		urb->status = 0;
@@ -481,6 +482,7 @@
 	struct usbduxsigma_private *devpriv = dev->private;
 	int high_speed = devpriv->high_speed;
 	int interval = usbduxsigma_chans_to_interval(cmd->chanlist_len);
+	unsigned int tmp;
 	int err = 0;
 
 	/* Step 1 : check if triggers are trivially valid */
@@ -508,35 +510,20 @@
 
 	err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
 
-	if (cmd->scan_begin_src == TRIG_FOLLOW)	/* internal trigger */
-		err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
-	if (cmd->scan_begin_src == TRIG_TIMER) {
-		unsigned int tmp;
-
-		if (high_speed) {
-			/*
-			 * In high speed mode microframes are possible.
-			 * However, during one microframe we can roughly
-			 * sample two channels. Thus, the more channels
-			 * are in the channel list the more time we need.
-			 */
-			err |= comedi_check_trigger_arg_min(&cmd->
-							    scan_begin_arg,
-							    (1000000 / 8 *
-							     interval));
-
-			tmp = (cmd->scan_begin_arg / 125000) * 125000;
-		} else {
-			/* full speed */
-			/* 1kHz scans every USB frame */
-			err |= comedi_check_trigger_arg_min(&cmd->
-							    scan_begin_arg,
-							    1000000);
-
-			tmp = (cmd->scan_begin_arg / 1000000) * 1000000;
-		}
-		err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, tmp);
+	if (high_speed) {
+		/*
+		 * In high speed mode microframes are possible.
+		 * However, during one microframe we can roughly
+		 * sample two channels. Thus, the more channels
+		 * are in the channel list the more time we need.
+		 */
+		err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
+						    (125000 * interval));
+	} else {
+		/* full speed */
+		/* 1kHz scans every USB frame */
+		err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
+						    1000000);
 	}
 
 	err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
@@ -552,21 +539,8 @@
 
 	/* Step 4: fix up any arguments */
 
-	if (high_speed) {
-		/*
-		 * every 2 channels get a time window of 125us. Thus, if we
-		 * sample all 16 channels we need 1ms. If we sample only one
-		 * channel we need only 125us
-		 */
-		devpriv->ai_interval = interval;
-		devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
-	} else {
-		/* interval always 1ms */
-		devpriv->ai_interval = 1;
-		devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
-	}
-	if (devpriv->ai_timer < 1)
-		err |= -EINVAL;
+	tmp = rounddown(cmd->scan_begin_arg, high_speed ? 125000 : 1000000);
+	err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, tmp);
 
 	if (err)
 		return 4;
@@ -668,19 +642,36 @@
 
 	down(&devpriv->sem);
 
+	if (devpriv->high_speed) {
+		/*
+		 * every 2 channels get a time window of 125us. Thus, if we
+		 * sample all 16 channels we need 1ms. If we sample only one
+		 * channel we need only 125us
+		 */
+		unsigned int interval = usbduxsigma_chans_to_interval(len);
+
+		devpriv->ai_interval = interval;
+		devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
+	} else {
+		/* interval always 1ms */
+		devpriv->ai_interval = 1;
+		devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
+	}
+
 	for (i = 0; i < len; i++) {
 		unsigned int chan  = CR_CHAN(cmd->chanlist[i]);
 
 		create_adc_command(chan, &muxsg0, &muxsg1);
 	}
 
-	devpriv->dux_commands[1] = len;  /* num channels per time step */
-	devpriv->dux_commands[2] = 0x12; /* CONFIG0 */
-	devpriv->dux_commands[3] = 0x03; /* CONFIG1: 23kHz sample, delay 0us */
-	devpriv->dux_commands[4] = 0x00; /* CONFIG3: diff. channels off */
-	devpriv->dux_commands[5] = muxsg0;
-	devpriv->dux_commands[6] = muxsg1;
-	devpriv->dux_commands[7] = sysred;
+	devpriv->dux_commands[1] = devpriv->ai_interval;
+	devpriv->dux_commands[2] = len;  /* num channels per time step */
+	devpriv->dux_commands[3] = 0x12; /* CONFIG0 */
+	devpriv->dux_commands[4] = 0x03; /* CONFIG1: 23kHz sample, delay 0us */
+	devpriv->dux_commands[5] = 0x00; /* CONFIG3: diff. channels off */
+	devpriv->dux_commands[6] = muxsg0;
+	devpriv->dux_commands[7] = muxsg1;
+	devpriv->dux_commands[8] = sysred;
 
 	ret = usbbuxsigma_send_cmd(dev, USBBUXSIGMA_AD_CMD);
 	if (ret < 0) {
@@ -848,29 +839,22 @@
 				  struct comedi_cmd *cmd)
 {
 	struct usbduxsigma_private *devpriv = dev->private;
+	unsigned int tmp;
 	int err = 0;
-	int high_speed;
-	unsigned int flags;
-
-	/* high speed conversions are not used yet */
-	high_speed = 0;		/* (devpriv->high_speed) */
 
 	/* Step 1 : check if triggers are trivially valid */
 
 	err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
 
-	if (high_speed) {
-		/*
-		 * start immediately a new scan
-		 * the sampling rate is set by the coversion rate
-		 */
-		flags = TRIG_FOLLOW;
-	} else {
-		/* start a new scan (output at once) with a timer */
-		flags = TRIG_TIMER;
-	}
-	err |= comedi_check_trigger_src(&cmd->scan_begin_src, flags);
-
+	/*
+	 * For now, always use "scan" timing with all channels updated at once
+	 * (cmd->scan_begin_src == TRIG_TIMER, cmd->convert_src == TRIG_NOW).
+	 *
+	 * In a future version, "convert" timing with channels updated
+	 * indivually may be supported in high speed mode
+	 * (cmd->scan_begin_src == TRIG_FOLLOW, cmd->convert_src == TRIG_TIMER).
+	 */
+	err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
 	err |= comedi_check_trigger_src(&cmd->convert_src, TRIG_NOW);
 	err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
 	err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
@@ -894,17 +878,7 @@
 
 	err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
 
-	if (cmd->scan_begin_src == TRIG_FOLLOW)	/* internal trigger */
-		err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
-	if (cmd->scan_begin_src == TRIG_TIMER) {
-		err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg,
-						    1000000);
-	}
-
-	/* not used now, is for later use */
-	if (cmd->convert_src == TRIG_TIMER)
-		err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 125000);
+	err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, 1000000);
 
 	err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
 					   cmd->chanlist_len);
@@ -919,19 +893,8 @@
 
 	/* Step 4: fix up any arguments */
 
-	/* we count in timer steps */
-	if (high_speed) {
-		/* timing of the conversion itself: every 125 us */
-		devpriv->ao_timer = cmd->convert_arg / 125000;
-	} else {
-		/*
-		 * timing of the scan: every 1ms
-		 * we get all channels at once
-		 */
-		devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
-	}
-	if (devpriv->ao_timer < 1)
-		err |= -EINVAL;
+	tmp = rounddown(cmd->scan_begin_arg, 1000000);
+	err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, tmp);
 
 	if (err)
 		return 4;
@@ -948,6 +911,14 @@
 
 	down(&devpriv->sem);
 
+	/*
+	 * For now, only "scan" timing is supported.  A future version may
+	 * support "convert" timing in high speed mode.
+	 *
+	 * Timing of the scan: every 1ms all channels updated at once.
+	 */
+	devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
+
 	devpriv->ao_counter = devpriv->ao_timer;
 
 	if (cmd->start_src == TRIG_NOW) {
@@ -1427,10 +1398,7 @@
 		urb->transfer_buffer_length = SIZEOUTBUF;
 		urb->iso_frame_desc[0].offset = 0;
 		urb->iso_frame_desc[0].length = SIZEOUTBUF;
-		if (devpriv->high_speed)
-			urb->interval = 8;	/* uframes */
-		else
-			urb->interval = 1;	/* frames */
+		urb->interval = 1;	/* (u)frames */
 	}
 
 	if (devpriv->pwm_buf_sz) {
@@ -1653,7 +1621,7 @@
 };
 module_comedi_usb_driver(usbduxsigma_driver, usbduxsigma_usb_driver);
 
-MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");
-MODULE_DESCRIPTION("Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com");
+MODULE_AUTHOR("Bernd Porr, mail@berndporr.me.uk");
+MODULE_DESCRIPTION("Stirling/ITL USB-DUX SIGMA -- mail@berndporr.me.uk");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(FIRMWARE);
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index 6a393b2..ce3a58a 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -102,7 +102,18 @@
  * @s: comedi_subdevice struct
  * @n: number of elements in the chanlist
  * @chanlist: the chanlist to validate
-*/
+ *
+ * Each element consists of a channel number, a range index, an analog
+ * reference type and some flags, all packed into an unsigned int.
+ *
+ * This checks that the channel number and range index are supported by
+ * the comedi subdevice.  It does not check whether the analog reference
+ * type and the flags are supported.  Drivers that care should check those
+ * themselves.
+ *
+ * Return: %0 if all @chanlist elements are valid (success),
+ *         %-EINVAL if one or more elements are invalid.
+ */
 int comedi_check_chanlist(struct comedi_subdevice *s, int n,
 			  unsigned int *chanlist)
 {
diff --git a/drivers/staging/dgap/dgap.c b/drivers/staging/dgap/dgap.c
index 26b0446..9112dd2 100644
--- a/drivers/staging/dgap/dgap.c
+++ b/drivers/staging/dgap/dgap.c
@@ -4953,9 +4953,8 @@
 		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
 		spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
 
-		rc = put_user(C_CLOCAL(tty) ? 1 : 0,
+		return put_user(C_CLOCAL(tty) ? 1 : 0,
 				(unsigned long __user *) arg);
-		return rc;
 
 	case TIOCSSOFTCAR:
 		spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
@@ -7004,25 +7003,29 @@
 	kfree(brd);
 }
 
-static void dgap_remove_one(struct pci_dev *dev)
+static void dgap_stop(bool removesys, struct pci_driver *drv)
 {
-	unsigned int i;
-	ulong lock_flags;
-	struct pci_driver *drv = to_pci_driver(dev->dev.driver);
+	unsigned long lock_flags;
 
 	spin_lock_irqsave(&dgap_poll_lock, lock_flags);
 	dgap_poll_stop = 1;
 	spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
 
-	/* Turn off poller right away. */
 	del_timer_sync(&dgap_poll_timer);
-
-	dgap_remove_driver_sysfiles(drv);
+	if (removesys)
+		dgap_remove_driver_sysfiles(drv);
 
 	device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
 	class_destroy(dgap_class);
 	unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+}
 
+static void dgap_remove_one(struct pci_dev *dev)
+{
+	unsigned int i;
+	struct pci_driver *drv = to_pci_driver(dev->dev.driver);
+
+	dgap_stop(true, drv);
 	for (i = 0; i < dgap_numboards; ++i) {
 		dgap_remove_ports_sysfiles(dgap_board[i]);
 		dgap_cleanup_tty(dgap_board[i]);
@@ -7096,21 +7099,6 @@
 	return rc;
 }
 
-static void dgap_stop(void)
-{
-	unsigned long lock_flags;
-
-	spin_lock_irqsave(&dgap_poll_lock, lock_flags);
-	dgap_poll_stop = 1;
-	spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
-
-	del_timer_sync(&dgap_poll_timer);
-
-	device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
-	class_destroy(dgap_class);
-	unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
-}
-
 /************************************************************************
  *
  * Driver load/unload functions
@@ -7133,8 +7121,10 @@
 		return rc;
 
 	rc = pci_register_driver(&dgap_driver);
-	if (rc)
-		goto err_stop;
+	if (rc) {
+		dgap_stop(false, NULL);
+		return rc;
+	}
 
 	rc = dgap_create_driver_sysfiles(&dgap_driver);
 	if (rc)
@@ -7146,9 +7136,6 @@
 
 err_unregister:
 	pci_unregister_driver(&dgap_driver);
-err_stop:
-	dgap_stop();
-
 	return rc;
 }
 
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index d04671f..06ece51 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -21,9 +21,9 @@
 #ifndef __DGNC_DRIVER_H
 #define __DGNC_DRIVER_H
 
-#include <linux/types.h>	/* To pick up the varions Linux types */
-#include <linux/tty.h>	  /* To pick up the various tty structs/defines */
-#include <linux/interrupt.h>	/* For irqreturn_t type */
+#include <linux/types.h>
+#include <linux/tty.h>
+#include <linux/interrupt.h>
 
 #include "digi.h"		/* Digi specific ioctl header */
 #include "dgnc_sysfs.h"		/* Support for SYSFS */
diff --git a/drivers/staging/dgnc/dgnc_sysfs.h b/drivers/staging/dgnc/dgnc_sysfs.h
index be0f90a..7be7d55 100644
--- a/drivers/staging/dgnc/dgnc_sysfs.h
+++ b/drivers/staging/dgnc/dgnc_sysfs.h
@@ -25,16 +25,16 @@
 struct pci_driver;
 struct class_device;
 
-extern void dgnc_create_ports_sysfiles(struct dgnc_board *bd);
-extern void dgnc_remove_ports_sysfiles(struct dgnc_board *bd);
+void dgnc_create_ports_sysfiles(struct dgnc_board *bd);
+void dgnc_remove_ports_sysfiles(struct dgnc_board *bd);
 
-extern void dgnc_create_driver_sysfiles(struct pci_driver *);
-extern void dgnc_remove_driver_sysfiles(struct pci_driver *);
+void dgnc_create_driver_sysfiles(struct pci_driver *);
+void dgnc_remove_driver_sysfiles(struct pci_driver *);
 
-extern int dgnc_tty_class_init(void);
-extern int dgnc_tty_class_destroy(void);
+int dgnc_tty_class_init(void);
+int dgnc_tty_class_destroy(void);
 
-extern void dgnc_create_tty_sysfs(struct un_t *un, struct device *c);
-extern void dgnc_remove_tty_sysfs(struct device *c);
+void dgnc_create_tty_sysfs(struct un_t *un, struct device *c);
+void dgnc_remove_tty_sysfs(struct device *c);
 
 #endif
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 4178d96..b6b76ff 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -3153,36 +3153,46 @@
 	.ioctl			= nbu2ss_gad_ioctl,
 };
 
-static const char g_ep0_name[] = "ep0";
-static const char g_ep1_name[] = "ep1-bulk";
-static const char g_ep2_name[] = "ep2-bulk";
-static const char g_ep3_name[] = "ep3in-int";
-static const char g_ep4_name[] = "ep4-iso";
-static const char g_ep5_name[] = "ep5-iso";
-static const char g_ep6_name[] = "ep6-bulk";
-static const char g_ep7_name[] = "ep7-bulk";
-static const char g_ep8_name[] = "ep8in-int";
-static const char g_ep9_name[] = "ep9-iso";
-static const char g_epa_name[] = "epa-iso";
-static const char g_epb_name[] = "epb-bulk";
-static const char g_epc_name[] = "epc-nulk";
-static const char g_epd_name[] = "epdin-int";
+static const struct {
+	const char *name;
+	const struct usb_ep_caps caps;
+} ep_info[NUM_ENDPOINTS] = {
+#define EP_INFO(_name, _caps) \
+	{ \
+		.name = _name, \
+		.caps = _caps, \
+	}
 
-static const char *gp_ep_name[NUM_ENDPOINTS] = {
-	g_ep0_name,
-	g_ep1_name,
-	g_ep2_name,
-	g_ep3_name,
-	g_ep4_name,
-	g_ep5_name,
-	g_ep6_name,
-	g_ep7_name,
-	g_ep8_name,
-	g_ep9_name,
-	g_epa_name,
-	g_epb_name,
-	g_epc_name,
-	g_epd_name,
+	EP_INFO("ep0",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep1-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep2-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep3in-int",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep4-iso",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep5-iso",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep6-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep7-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep8in-int",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep9-iso",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("epa-iso",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("epb-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("epc-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("epdin-int",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+
+#undef EP_INFO
 };
 
 /*-------------------------------------------------------------------------*/
@@ -3200,10 +3210,12 @@
 		ep->desc = NULL;
 
 		ep->ep.driver_data = NULL;
-		ep->ep.name = gp_ep_name[i];
+		ep->ep.name = ep_info[i].name;
+		ep->ep.caps = ep_info[i].caps;
 		ep->ep.ops = &nbu2ss_ep_ops;
 
-		ep->ep.maxpacket = (i == 0 ? EP0_PACKETSIZE : EP_PACKETSIZE);
+		usb_ep_set_maxpacket_limit(&ep->ep,
+				i == 0 ? EP0_PACKETSIZE : EP_PACKETSIZE);
 
 		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
 		INIT_LIST_HEAD(&ep->queue);
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index d401878..d473010 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -1,6 +1,7 @@
 menuconfig FB_TFT
 	tristate "Support for small TFT LCD display modules"
-	depends on FB && SPI && GPIOLIB
+	depends on FB && SPI
+	depends on GPIOLIB || COMPILE_TEST
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
@@ -152,6 +153,12 @@
 	help
 	  Generic Framebuffer support for TLS8204
 
+config FB_TFT_UC1611
+	tristate "FB driver for the UC1611 LCD controller"
+	depends on FB_TFT
+	help
+	  Generic Framebuffer support for UC1611
+
 config FB_TFT_UC1701
 	tristate "FB driver for the UC1701 LCD Controller"
 	depends on FB_TFT
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index 554b526..b26efdc 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -27,6 +27,7 @@
 obj-$(CONFIG_FB_TFT_ST7735R)     += fb_st7735r.o
 obj-$(CONFIG_FB_TFT_TINYLCD)     += fb_tinylcd.o
 obj-$(CONFIG_FB_TFT_TLS8204)     += fb_tls8204.o
+obj-$(CONFIG_FB_TFT_UC1611)      += fb_uc1611.o
 obj-$(CONFIG_FB_TFT_UC1701)      += fb_uc1701.o
 obj-$(CONFIG_FB_TFT_UPD161704)   += fb_upd161704.o
 obj-$(CONFIG_FB_TFT_WATTEROTT)   += fb_watterott.o
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
new file mode 100644
index 0000000..32f3a9d
--- /dev/null
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -0,0 +1,350 @@
+/*
+ * FB driver for the UltraChip UC1611 LCD controller
+ *
+ * The display is 4-bit grayscale (16 shades) 240x160.
+ *
+ * Copyright (C) 2015 Henri Chain
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include "fbtft.h"
+
+#define DRVNAME		"fb_uc1611"
+#define WIDTH		240
+#define HEIGHT		160
+#define BPP		8
+#define FPS		40
+
+/*
+ * LCD voltage is a combination of ratio, gain, pot and temp
+ *
+ * V_LCD = V_BIAS * ratio
+ * V_LCD = (C_V0 + C_PM × pot) * (1 + (T - 25) * temp)
+ * C_V0 and C_PM depend on ratio and gain
+ * T is ambient temperature
+ */
+
+/* BR -> actual ratio: 0-3 -> 5, 10, 11, 13 */
+static unsigned ratio = 2;
+module_param(ratio, uint, 0);
+MODULE_PARM_DESC(ratio, "BR[1:0] Bias voltage ratio: 0-3 (default: 2)");
+
+static unsigned gain = 3;
+module_param(gain, uint, 0);
+MODULE_PARM_DESC(gain, "GN[1:0] Bias voltage gain: 0-3 (default: 3)");
+
+static unsigned pot = 16;
+module_param(pot, uint, 0);
+MODULE_PARM_DESC(pot, "PM[6:0] Bias voltage pot.: 0-63 (default: 16)");
+
+/* TC -> % compensation per deg C: 0-3 -> -.05, -.10, -.015, -.20 */
+static unsigned temp;
+module_param(temp, uint, 0);
+MODULE_PARM_DESC(temp, "TC[1:0] Temperature compensation: 0-3 (default: 0)");
+
+/* PC[1:0] -> LCD capacitance: 0-3 -> <20nF, 20-28 nF, 29-40 nF, 40-56 nF */
+static unsigned load = 1;
+module_param(load, uint, 0);
+MODULE_PARM_DESC(load, "PC[1:0] Panel Loading: 0-3 (default: 1)");
+
+/* PC[3:2] -> V_LCD: 0, 1, 3 -> ext., int. with ratio = 5, int. standard */
+static unsigned pump = 3;
+module_param(pump, uint, 0);
+MODULE_PARM_DESC(pump, "PC[3:2] Pump control: 0,1,3 (default: 3)");
+
+static int init_display(struct fbtft_par *par)
+{
+	int ret;
+
+	fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+	/* Set CS active high */
+	par->spi->mode |= SPI_CS_HIGH;
+	ret = par->spi->master->setup(par->spi);
+	if (ret) {
+		dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
+		return ret;
+	}
+
+	/* Reset controller */
+	write_reg(par, 0xE2);
+
+	/* Set bias ratio */
+	write_reg(par, 0xE8 | (ratio & 0x03));
+
+	/* Set bias gain and potentiometer */
+	write_reg(par, 0x81);
+	write_reg(par, (gain & 0x03) << 6 | (pot & 0x3F));
+
+	/* Set temperature compensation */
+	write_reg(par, 0x24 | (temp & 0x03));
+
+	/* Set panel loading */
+	write_reg(par, 0x28 | (load & 0x03));
+
+	/* Set pump control */
+	write_reg(par, 0x2C | (pump & 0x03));
+
+	/* Set inverse display */
+	write_reg(par, 0xA6 | (0x01 & 0x01));
+
+	/* Set 4-bit grayscale mode */
+	write_reg(par, 0xD0 | (0x02 & 0x03));
+
+	/* Set Display enable */
+	write_reg(par, 0xA8 | 0x07);
+
+	return 0;
+}
+
+static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
+{
+	fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
+		      "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n",
+		      __func__, xs, ys, xe, ye);
+
+	switch (par->info->var.rotate) {
+	case 90:
+	case 270:
+		/* Set column address */
+		write_reg(par, ys & 0x0F);
+		write_reg(par, 0x10 | (ys >> 4));
+
+		/* Set page address (divide xs by 2) (not used by driver) */
+		write_reg(par, 0x60 | ((xs >> 1) & 0x0F));
+		write_reg(par, 0x70 | (xs >> 5));
+		break;
+	default:
+		/* Set column address (not used by driver) */
+		write_reg(par, xs & 0x0F);
+		write_reg(par, 0x10 | (xs >> 4));
+
+		/* Set page address (divide ys by 2) */
+		write_reg(par, 0x60 | ((ys >> 1) & 0x0F));
+		write_reg(par, 0x70 | (ys >> 5));
+		break;
+	}
+}
+
+static int blank(struct fbtft_par *par, bool on)
+{
+	fbtft_par_dbg(DEBUG_BLANK, par, "%s(blank=%s)\n",
+		      __func__, on ? "true" : "false");
+
+	if (on)
+		write_reg(par, 0xA8 | 0x00);
+	else
+		write_reg(par, 0xA8 | 0x07);
+	return 0;
+}
+
+static int set_var(struct fbtft_par *par)
+{
+	fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "%s()\n", __func__);
+
+	/* par->info->fix.visual = FB_VISUAL_PSEUDOCOLOR; */
+	par->info->var.grayscale = 1;
+	par->info->var.red.offset    = 0;
+	par->info->var.red.length    = 8;
+	par->info->var.green.offset  = 0;
+	par->info->var.green.length  = 8;
+	par->info->var.blue.offset   = 0;
+	par->info->var.blue.length   = 8;
+	par->info->var.transp.offset = 0;
+	par->info->var.transp.length = 0;
+
+	switch (par->info->var.rotate) {
+	case 90:
+		/* Set RAM address control */
+		write_reg(par, 0x88
+			| (0x0 & 0x1) << 2 /* Increment positively */
+			| (0x1 & 0x1) << 1 /* Increment page first */
+			| (0x1 & 0x1));    /* Wrap around (default) */
+
+		/* Set LCD mapping */
+		write_reg(par, 0xC0
+			| (0x0 & 0x1) << 2 /* Mirror Y OFF */
+			| (0x0 & 0x1) << 1 /* Mirror X OFF */
+			| (0x0 & 0x1));    /* MS nibble last (default) */
+		break;
+	case 180:
+		/* Set RAM address control */
+		write_reg(par, 0x88
+			| (0x0 & 0x1) << 2 /* Increment positively */
+			| (0x0 & 0x1) << 1 /* Increment column first */
+			| (0x1 & 0x1));    /* Wrap around (default) */
+
+		/* Set LCD mapping */
+		write_reg(par, 0xC0
+			| (0x1 & 0x1) << 2 /* Mirror Y ON */
+			| (0x0 & 0x1) << 1 /* Mirror X OFF */
+			| (0x0 & 0x1));    /* MS nibble last (default) */
+		break;
+	case 270:
+		/* Set RAM address control */
+		write_reg(par, 0x88
+			| (0x0 & 0x1) << 2 /* Increment positively */
+			| (0x1 & 0x1) << 1 /* Increment page first */
+			| (0x1 & 0x1));    /* Wrap around (default) */
+
+		/* Set LCD mapping */
+		write_reg(par, 0xC0
+			| (0x1 & 0x1) << 2 /* Mirror Y ON */
+			| (0x1 & 0x1) << 1 /* Mirror X ON */
+			| (0x0 & 0x1));    /* MS nibble last (default) */
+		break;
+	default:
+		/* Set RAM address control */
+		write_reg(par, 0x88
+			| (0x0 & 0x1) << 2 /* Increment positively */
+			| (0x0 & 0x1) << 1 /* Increment column first */
+			| (0x1 & 0x1));    /* Wrap around (default) */
+
+		/* Set LCD mapping */
+		write_reg(par, 0xC0
+			| (0x0 & 0x1) << 2 /* Mirror Y OFF */
+			| (0x1 & 0x1) << 1 /* Mirror X ON */
+			| (0x0 & 0x1));    /* MS nibble last (default) */
+		break;
+	}
+
+	return 0;
+}
+
+static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
+{
+	u8 *vmem8 = (u8 *)(par->info->screen_base);
+	u8 *buf8 = (u8 *)(par->txbuf.buf);
+	u16 *buf16 = (u16 *)(par->txbuf.buf);
+	int line_length = par->info->fix.line_length;
+	int y_start = (offset / line_length);
+	int y_end = (offset + len - 1) / line_length;
+	int x, y, i;
+	int ret = 0;
+
+	fbtft_par_dbg(DEBUG_WRITE_VMEM, par, "%s()\n", __func__);
+
+	switch (par->pdata->display.buswidth) {
+	case 8:
+		switch (par->info->var.rotate) {
+		case 90:
+		case 270:
+			i = y_start * line_length;
+			for (y = y_start; y <= y_end; y++) {
+				for (x = 0; x < line_length; x += 2) {
+					*buf8 = vmem8[i] >> 4;
+					*buf8 |= vmem8[i + 1] & 0xF0;
+					buf8++;
+					i += 2;
+				}
+			}
+			break;
+		default:
+			/* Must be even because pages are two lines */
+			y_start &= 0xFE;
+			i = y_start * line_length;
+			for (y = y_start; y <= y_end; y += 2) {
+				for (x = 0; x < line_length; x++) {
+					*buf8 = vmem8[i] >> 4;
+					*buf8 |= vmem8[i + line_length] & 0xF0;
+					buf8++;
+					i++;
+				}
+				i += line_length;
+			}
+			break;
+		}
+		gpio_set_value(par->gpio.dc, 1);
+
+		/* Write data */
+		ret = par->fbtftops.write(par, par->txbuf.buf, len / 2);
+		break;
+	case 9:
+		switch (par->info->var.rotate) {
+		case 90:
+		case 270:
+			i = y_start * line_length;
+			for (y = y_start; y <= y_end; y++) {
+				for (x = 0; x < line_length; x += 2) {
+					*buf16 = 0x100;
+					*buf16 |= vmem8[i] >> 4;
+					*buf16 |= vmem8[i + 1] & 0xF0;
+					buf16++;
+					i += 2;
+				}
+			}
+			break;
+		default:
+			/* Must be even because pages are two lines */
+			y_start &= 0xFE;
+			i = y_start * line_length;
+			for (y = y_start; y <= y_end; y += 2) {
+				for (x = 0; x < line_length; x++) {
+					*buf16 = 0x100;
+					*buf16 |= vmem8[i] >> 4;
+					*buf16 |= vmem8[i + line_length] & 0xF0;
+					buf16++;
+					i++;
+				}
+				i += line_length;
+			}
+			break;
+		}
+
+		/* Write data */
+		ret = par->fbtftops.write(par, par->txbuf.buf, len);
+		break;
+	default:
+		dev_err(par->info->device, "unsupported buswidth %d\n",
+			par->pdata->display.buswidth);
+	}
+
+	if (ret < 0)
+		dev_err(par->info->device, "write failed and returned: %d\n",
+			ret);
+
+	return ret;
+}
+
+static struct fbtft_display display = {
+	.txbuflen = -1,
+	.regwidth = 8,
+	.width = WIDTH,
+	.height = HEIGHT,
+	.bpp = BPP,
+	.fps = FPS,
+	.fbtftops = {
+		.write_vmem = write_vmem,
+		.init_display = init_display,
+		.set_addr_win = set_addr_win,
+		.set_var = set_var,
+		.blank = blank,
+	},
+};
+
+FBTFT_REGISTER_DRIVER(DRVNAME, "ultrachip,uc1611", &display);
+
+MODULE_ALIAS("spi:" DRVNAME);
+MODULE_ALIAS("platform:" DRVNAME);
+MODULE_ALIAS("spi:uc1611");
+MODULE_ALIAS("platform:uc1611");
+
+MODULE_DESCRIPTION("FB driver for the UC1611 LCD controller");
+MODULE_AUTHOR("Henri Chain");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 9cc8141..23392eb 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -677,13 +677,13 @@
  *
  */
 struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
-					struct device *dev)
+					struct device *dev,
+					struct fbtft_platform_data *pdata)
 {
 	struct fb_info *info;
 	struct fbtft_par *par;
 	struct fb_ops *fbops = NULL;
 	struct fb_deferred_io *fbdefio = NULL;
-	struct fbtft_platform_data *pdata = dev->platform_data;
 	u8 *vmem = NULL;
 	void *txbuf = NULL;
 	void *buf = NULL;
@@ -828,7 +828,7 @@
 
 	par = info->par;
 	par->info = info;
-	par->pdata = dev->platform_data;
+	par->pdata = pdata;
 	par->debug = display->debug;
 	par->buf = buf;
 	spin_lock_init(&par->dirty_lock);
@@ -1076,6 +1076,11 @@
 	p = of_prop_next_u32(prop, NULL, &val);
 	if (!p)
 		return -EINVAL;
+
+	par->fbtftops.reset(par);
+	if (par->gpio.cs != -1)
+		gpio_set_value(par->gpio.cs, 0);  /* Activate chip */
+
 	while (p) {
 		if (val & FBTFT_OF_INIT_CMD) {
 			val &= 0xFFFF;
@@ -1260,12 +1265,11 @@
  */
 static int fbtft_verify_gpios(struct fbtft_par *par)
 {
-	struct fbtft_platform_data *pdata;
+	struct fbtft_platform_data *pdata = par->pdata;
 	int i;
 
 	fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
 
-	pdata = par->info->device->platform_data;
 	if (pdata->display.buswidth != 9 && par->startbyte == 0 &&
 							par->gpio.dc < 0) {
 		dev_err(par->info->device,
@@ -1383,10 +1387,9 @@
 		pdata = fbtft_probe_dt(dev);
 		if (IS_ERR(pdata))
 			return PTR_ERR(pdata);
-		dev->platform_data = pdata;
 	}
 
-	info = fbtft_framebuffer_alloc(display, dev);
+	info = fbtft_framebuffer_alloc(display, dev, pdata);
 	if (!info)
 		return -ENOMEM;
 
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 7d817eb..7e9a506 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -262,39 +262,38 @@
 	par->fbtftops.write_register(par, NUMARGS(__VA_ARGS__), __VA_ARGS__)
 
 /* fbtft-core.c */
-extern void fbtft_dbg_hex(const struct device *dev,
-	int groupsize, void *buf, size_t len, const char *fmt, ...);
-extern struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
-	struct device *dev);
-extern void fbtft_framebuffer_release(struct fb_info *info);
-extern int fbtft_register_framebuffer(struct fb_info *fb_info);
-extern int fbtft_unregister_framebuffer(struct fb_info *fb_info);
-extern void fbtft_register_backlight(struct fbtft_par *par);
-extern void fbtft_unregister_backlight(struct fbtft_par *par);
-extern int fbtft_init_display(struct fbtft_par *par);
-extern int fbtft_probe_common(struct fbtft_display *display,
-	struct spi_device *sdev, struct platform_device *pdev);
-extern int fbtft_remove_common(struct device *dev, struct fb_info *info);
+void fbtft_dbg_hex(const struct device *dev, int groupsize,
+		   void *buf, size_t len, const char *fmt, ...);
+struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
+					struct device *dev,
+					struct fbtft_platform_data *pdata);
+void fbtft_framebuffer_release(struct fb_info *info);
+int fbtft_register_framebuffer(struct fb_info *fb_info);
+int fbtft_unregister_framebuffer(struct fb_info *fb_info);
+void fbtft_register_backlight(struct fbtft_par *par);
+void fbtft_unregister_backlight(struct fbtft_par *par);
+int fbtft_init_display(struct fbtft_par *par);
+int fbtft_probe_common(struct fbtft_display *display, struct spi_device *sdev,
+		       struct platform_device *pdev);
+int fbtft_remove_common(struct device *dev, struct fb_info *info);
 
 /* fbtft-io.c */
-extern int fbtft_write_spi(struct fbtft_par *par, void *buf, size_t len);
-extern int fbtft_write_spi_emulate_9(struct fbtft_par *par,
-	void *buf, size_t len);
-extern int fbtft_read_spi(struct fbtft_par *par, void *buf, size_t len);
-extern int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len);
-extern int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len);
-extern int fbtft_write_gpio16_wr_latched(struct fbtft_par *par,
-	void *buf, size_t len);
+int fbtft_write_spi(struct fbtft_par *par, void *buf, size_t len);
+int fbtft_write_spi_emulate_9(struct fbtft_par *par, void *buf, size_t len);
+int fbtft_read_spi(struct fbtft_par *par, void *buf, size_t len);
+int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len);
+int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len);
+int fbtft_write_gpio16_wr_latched(struct fbtft_par *par, void *buf, size_t len);
 
 /* fbtft-bus.c */
-extern int fbtft_write_vmem8_bus8(struct fbtft_par *par, size_t offset, size_t len);
-extern int fbtft_write_vmem16_bus16(struct fbtft_par *par, size_t offset, size_t len);
-extern int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len);
-extern int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len);
-extern void fbtft_write_reg8_bus8(struct fbtft_par *par, int len, ...);
-extern void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...);
-extern void fbtft_write_reg16_bus8(struct fbtft_par *par, int len, ...);
-extern void fbtft_write_reg16_bus16(struct fbtft_par *par, int len, ...);
+int fbtft_write_vmem8_bus8(struct fbtft_par *par, size_t offset, size_t len);
+int fbtft_write_vmem16_bus16(struct fbtft_par *par, size_t offset, size_t len);
+int fbtft_write_vmem16_bus8(struct fbtft_par *par, size_t offset, size_t len);
+int fbtft_write_vmem16_bus9(struct fbtft_par *par, size_t offset, size_t len);
+void fbtft_write_reg8_bus8(struct fbtft_par *par, int len, ...);
+void fbtft_write_reg8_bus9(struct fbtft_par *par, int len, ...);
+void fbtft_write_reg16_bus8(struct fbtft_par *par, int len, ...);
+void fbtft_write_reg16_bus16(struct fbtft_par *par, int len, ...);
 
 
 #define FBTFT_REGISTER_DRIVER(_name, _compatible, _display)                \
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index 211d504..fa916e8 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -397,6 +397,37 @@
 			}
 		}
 	}, {
+		.name = "ew24ha0",
+		.spi = &(struct spi_board_info) {
+			.modalias = "fb_uc1611",
+			.max_speed_hz = 32000000,
+			.mode = SPI_MODE_3,
+			.platform_data = &(struct fbtft_platform_data) {
+				.display = {
+					.buswidth = 8,
+				},
+				.gpios = (const struct fbtft_gpio []) {
+					{ "dc", 24 },
+					{},
+				},
+			}
+		}
+	}, {
+		.name = "ew24ha0_9bit",
+		.spi = &(struct spi_board_info) {
+			.modalias = "fb_uc1611",
+			.max_speed_hz = 32000000,
+			.mode = SPI_MODE_3,
+			.platform_data = &(struct fbtft_platform_data) {
+				.display = {
+					.buswidth = 9,
+				},
+				.gpios = (const struct fbtft_gpio []) {
+					{},
+				},
+			}
+		}
+	}, {
 		.name = "flexfb",
 		.spi = &(struct spi_board_info) {
 			.modalias = "flexfb",
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index 2c4ce07..c763efc 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -12,10 +12,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/module.h>
@@ -30,7 +26,6 @@
 
 #define DRVNAME	    "flexfb"
 
-
 static char *chip;
 module_param(chip, charp, 0);
 MODULE_PARM_DESC(chip, "LCD controller");
@@ -68,7 +63,6 @@
 module_param(latched, bool, 0);
 MODULE_PARM_DESC(latched, "Use with latched 16-bit databus");
 
-
 static int *initp;
 static int initp_num;
 
@@ -132,14 +126,115 @@
 			      -1, 0xab, 0x01, -1, 0xb1, 0x32, -1, 0xb4, 0xa0, 0xb5, 0x55, -1, 0xbb, 0x17, -1, 0xbe, 0x05,
 			      -1, 0xc1, 0xc8, 0x80, 0xc8, -1, 0xc7, 0x0f, -1, 0xb6, 0x01, -1, 0xa6, -1, 0xaf, -3 };
 
+/**
+ * struct flexfb_lcd_controller - Describes the LCD controller properties
+ * @name: Model name of the chip
+ * @width: Width of display in pixels
+ * @height: Height of display in pixels
+ * @setaddrwin: Which set_addr_win() implementation to use
+ * @regwidth: LCD Controller Register width in bits
+ * @init_seq: LCD initialization sequence
+ * @init_seq_sz: Size of LCD initialization sequence
+ */
+struct flexfb_lcd_controller {
+	const char *name;
+	unsigned int width;
+	unsigned int height;
+	unsigned int setaddrwin;
+	unsigned int regwidth;
+	int *init_seq;
+	int init_seq_sz;
+};
+
+static const struct flexfb_lcd_controller flexfb_chip_table[] = {
+	{
+		.name = "st7735r",
+		.width = 120,
+		.height = 160,
+		.init_seq = st7735r_init,
+		.init_seq_sz = ARRAY_SIZE(st7735r_init),
+	},
+	{
+		.name = "hx8340bn",
+		.width = 176,
+		.height = 220,
+		.init_seq = hx8340bn_init,
+		.init_seq_sz = ARRAY_SIZE(hx8340bn_init),
+	},
+	{
+		.name = "ili9225",
+		.width = 176,
+		.height = 220,
+		.regwidth = 16,
+		.init_seq = ili9225_init,
+		.init_seq_sz = ARRAY_SIZE(ili9225_init),
+	},
+	{
+		.name = "ili9225",
+		.width = 176,
+		.height = 220,
+		.regwidth = 16,
+		.init_seq = ili9225_init,
+		.init_seq_sz = ARRAY_SIZE(ili9225_init),
+	},
+	{
+		.name = "ili9225",
+		.width = 176,
+		.height = 220,
+		.regwidth = 16,
+		.init_seq = ili9225_init,
+		.init_seq_sz = ARRAY_SIZE(ili9225_init),
+	},
+	{
+		.name = "ili9320",
+		.width = 240,
+		.height = 320,
+		.setaddrwin = 1,
+		.regwidth = 16,
+		.init_seq = ili9320_init,
+		.init_seq_sz = ARRAY_SIZE(ili9320_init),
+	},
+	{
+		.name = "ili9325",
+		.width = 240,
+		.height = 320,
+		.setaddrwin = 1,
+		.regwidth = 16,
+		.init_seq = ili9325_init,
+		.init_seq_sz = ARRAY_SIZE(ili9325_init),
+	},
+	{
+		.name = "ili9341",
+		.width = 240,
+		.height = 320,
+		.init_seq = ili9341_init,
+		.init_seq_sz = ARRAY_SIZE(ili9341_init),
+	},
+	{
+		.name = "ssd1289",
+		.width = 240,
+		.height = 320,
+		.setaddrwin = 2,
+		.regwidth = 16,
+		.init_seq = ssd1289_init,
+		.init_seq_sz = ARRAY_SIZE(ssd1289_init),
+	},
+	{
+		.name = "ssd1351",
+		.width = 128,
+		.height = 128,
+		.setaddrwin = 3,
+		.init_seq = ssd1351_init,
+		.init_seq_sz = ARRAY_SIZE(ssd1351_init),
+	},
+};
 
 /* ili9320, ili9325 */
 static void flexfb_set_addr_win_1(struct fbtft_par *par,
 				  int xs, int ys, int xe, int ye)
 {
-	fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par,
-		     "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n",
-		     __func__, xs, ys, xe, ye);
+	fbtft_par_dbg(DEBUG_SET_ADDR_WIN, par, "%s(xs=%d, ys=%d, xe=%d, ye=%d)\n",
+		      __func__, xs, ys, xe, ye);
 	switch (par->info->var.rotate) {
 	/* R20h = Horizontal GRAM Start Address */
 	/* R21h = Vertical GRAM Start Address */
@@ -242,7 +337,7 @@
 		return -EINVAL;
 	}
 	if (latched)
-		num_db = buswidth/2;
+		num_db = buswidth / 2;
 	for (i = 0; i < num_db; i++) {
 		if (par->gpio.db[i] < 0) {
 			dev_err(par->info->device,
@@ -255,8 +350,38 @@
 	return 0;
 }
 
+static void flexfb_chip_load_param(const struct flexfb_lcd_controller *chip)
+{
+	if (!width)
+		width = chip->width;
+	if (!height)
+		height = chip->height;
+	setaddrwin = chip->setaddrwin;
+	if (chip->regwidth)
+		regwidth = chip->regwidth;
+	if (!init_num) {
+		initp = chip->init_seq;
+		initp_num = chip->init_seq_sz;
+	}
+}
+
 static struct fbtft_display flex_display = { };
 
+static int flexfb_chip_init(const struct device *dev)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(flexfb_chip_table); i++)
+		if (!strcmp(chip, flexfb_chip_table[i].name)) {
+			flexfb_chip_load_param(&flexfb_chip_table[i]);
+			return 0;
+		}
+
+	dev_err(dev, "chip=%s is not supported\n", chip);
+
+	return -EINVAL;
+}
+
 static int flexfb_probe_common(struct spi_device *sdev,
 			       struct platform_device *pdev)
 {
@@ -277,110 +402,9 @@
 		       sdev ? "'SPI device'" : "'Platform device'");
 
 	if (chip) {
-
-		if (!strcmp(chip, "st7735r")) {
-			if (!width)
-				width = 128;
-			if (!height)
-				height = 160;
-			if (init_num == 0) {
-				initp = st7735r_init;
-				initp_num = ARRAY_SIZE(st7735r_init);
-			}
-
-
-		} else if (!strcmp(chip, "hx8340bn")) {
-			if (!width)
-				width = 176;
-			if (!height)
-				height = 220;
-			setaddrwin = 0;
-			if (init_num == 0) {
-				initp = hx8340bn_init;
-				initp_num = ARRAY_SIZE(hx8340bn_init);
-			}
-
-
-		} else if (!strcmp(chip, "ili9225")) {
-			if (!width)
-				width = 176;
-			if (!height)
-				height = 220;
-			setaddrwin = 0;
-			regwidth = 16;
-			if (init_num == 0) {
-				initp = ili9225_init;
-				initp_num = ARRAY_SIZE(ili9225_init);
-			}
-
-
-
-		} else if (!strcmp(chip, "ili9320")) {
-			if (!width)
-				width = 240;
-			if (!height)
-				height = 320;
-			setaddrwin = 1;
-			regwidth = 16;
-			if (init_num == 0) {
-				initp = ili9320_init;
-				initp_num = ARRAY_SIZE(ili9320_init);
-			}
-
-
-		} else if (!strcmp(chip, "ili9325")) {
-			if (!width)
-				width = 240;
-			if (!height)
-				height = 320;
-			setaddrwin = 1;
-			regwidth = 16;
-			if (init_num == 0) {
-				initp = ili9325_init;
-				initp_num = ARRAY_SIZE(ili9325_init);
-			}
-
-		} else if (!strcmp(chip, "ili9341")) {
-			if (!width)
-				width = 240;
-			if (!height)
-				height = 320;
-			setaddrwin = 0;
-			regwidth = 8;
-			if (init_num == 0) {
-				initp = ili9341_init;
-				initp_num = ARRAY_SIZE(ili9341_init);
-			}
-
-
-		} else if (!strcmp(chip, "ssd1289")) {
-			if (!width)
-				width = 240;
-			if (!height)
-				height = 320;
-			setaddrwin = 2;
-			regwidth = 16;
-			if (init_num == 0) {
-				initp = ssd1289_init;
-				initp_num = ARRAY_SIZE(ssd1289_init);
-			}
-
-
-
-		} else if (!strcmp(chip, "ssd1351")) {
-			if (!width)
-				width = 128;
-			if (!height)
-				height = 128;
-			setaddrwin = 3;
-			if (init_num == 0) {
-				initp = ssd1351_init;
-				initp_num = ARRAY_SIZE(ssd1351_init);
-			}
-		} else {
-			dev_err(dev, "chip=%s is not supported\n", chip);
-			return -EINVAL;
-		}
+		ret = flexfb_chip_init(dev);
+		if (ret)
+			return ret;
 	}
 
 	if (width == 0 || height == 0) {
@@ -395,7 +419,7 @@
 	fbtft_init_dbg(dev, "regwidth = %d\n", regwidth);
 	fbtft_init_dbg(dev, "buswidth = %d\n", buswidth);
 
-	info = fbtft_framebuffer_alloc(&flex_display, dev);
+	info = fbtft_framebuffer_alloc(&flex_display, dev, dev->platform_data);
 	if (!info)
 		return -ENOMEM;
 
@@ -527,8 +551,8 @@
 		return -EINVAL;
 	par = info->par;
 	if (par)
-		fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par,
-			"%s()\n", __func__);
+		fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par, "%s()\n",
+			      __func__);
 	fbtft_unregister_framebuffer(info);
 	fbtft_framebuffer_release(info);
 
diff --git a/drivers/staging/fsl-mc/README.txt b/drivers/staging/fsl-mc/README.txt
new file mode 100644
index 0000000..8214102
--- /dev/null
+++ b/drivers/staging/fsl-mc/README.txt
@@ -0,0 +1,364 @@
+Copyright (C) 2015 Freescale Semiconductor Inc.
+
+DPAA2 (Data Path Acceleration Architecture Gen2)
+------------------------------------------------
+
+This document provides an overview of the Freescale DPAA2 architecture
+and how it is integrated into the Linux kernel.
+
+Contents summary
+   -DPAA2 overview
+   -Overview of DPAA2 objects
+   -DPAA2 Linux driver architecture overview
+        -bus driver
+        -dprc driver
+        -allocator
+        -dpio driver
+        -Ethernet
+        -mac
+
+DPAA2 Overview
+--------------
+
+DPAA2 is a hardware architecture designed for high-speeed network
+packet processing.  DPAA2 consists of sophisticated mechanisms for
+processing Ethernet packets, queue management, buffer management,
+autonomous L2 switching, virtual Ethernet bridging, and accelerator
+(e.g. crypto) sharing.
+
+A DPAA2 hardware component called the Management Complex (or MC) manages the
+DPAA2 hardware resources.  The MC provides an object-based abstraction for
+software drivers to use the DPAA2 hardware.
+
+The MC uses DPAA2 hardware resources such as queues, buffer pools, and
+network ports to create functional objects/devices such as network
+interfaces, an L2 switch, or accelerator instances.
+
+The MC provides memory-mapped I/O command interfaces (MC portals)
+which DPAA2 software drivers use to operate on DPAA2 objects:
+
+         +--------------------------------------+
+         |                  OS                  |
+         |                        DPAA2 drivers |
+         |                             |        |
+         +-----------------------------|--------+
+                                       |
+                                       | (create,discover,connect
+                                       |  config,use,destroy)
+                                       |
+                         DPAA2         |
+         +------------------------| mc portal |-+
+         |                             |        |
+         |   +- - - - - - - - - - - - -V- - -+  |
+         |   |                               |  |
+         |   |   Management Complex (MC)     |  |
+         |   |                               |  |
+         |   +- - - - - - - - - - - - - - - -+  |
+         |                                      |
+         | Hardware                  Hardware   |
+         | Resources                 Objects    |
+         | ---------                 -------    |
+         | -queues                   -DPRC      |
+         | -buffer pools             -DPMCP     |
+         | -Eth MACs/ports           -DPIO      |
+         | -network interface        -DPNI      |
+         |  profiles                 -DPMAC     |
+         | -queue portals            -DPBP      |
+         | -MC portals                ...       |
+         |  ...                                 |
+         |                                      |
+         +--------------------------------------+
+
+The MC mediates operations such as create, discover,
+connect, configuration, and destroy.  Fast-path operations
+on data, such as packet transmit/receive, are not mediated by
+the MC and are done directly using memory mapped regions in
+DPIO objects.
+
+Overview of DPAA2 Objects
+-------------------------
+The section provides a brief overview of some key objects
+in the DPAA2 hardware.  A simple scenario is described illustrating
+the objects involved in creating a network interfaces.
+
+-DPRC (Datapath Resource Container)
+
+    A DPRC is an container object that holds all the other
+    types of DPAA2 objects.  In the example diagram below there
+    are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC)
+    in the container.
+
+    +---------------------------------------------------------+
+    | DPRC                                                    |
+    |                                                         |
+    |  +-------+  +-------+  +-------+  +-------+  +-------+  |
+    |  | DPMCP |  | DPIO  |  | DPBP  |  | DPNI  |  | DPMAC |  |
+    |  +-------+  +-------+  +-------+  +---+---+  +---+---+  |
+    |  | DPMCP |  | DPIO  |                                   |
+    |  +-------+  +-------+                                   |
+    |  | DPMCP |                                              |
+    |  +-------+                                              |
+    |                                                         |
+    +---------------------------------------------------------+
+
+    From the point of view of an OS, a DPRC is bus-like.  Like
+    a plug-and-play bus, such as PCI, DPRC commands can be used to
+    enumerate the contents of the DPRC, discover the hardware
+    objects present (including mappable regions and interrupts).
+
+     dprc.1 (bus)
+       |
+       +--+--------+-------+-------+-------+
+          |        |       |       |       |
+        dpmcp.1  dpio.1  dpbp.1  dpni.1  dpmac.1
+        dpmcp.2  dpio.2
+        dpmcp.3
+
+    Hardware objects can be created and destroyed dynamically, providing
+    the ability to hot plug/unplug objects in and out of the DPRC.
+
+    A DPRC has a mappable mmio region (an MC portal) that can be used
+    to send MC commands.  It has an interrupt for status events (like
+    hotplug).
+
+    All objects in a container share the same hardware "isolation context".
+    This means that with respect to an IOMMU the isolation granularity
+    is at the DPRC (container) level, not at the individual object
+    level.
+
+    DPRCs can be defined statically and populated with objects
+    via a config file passed to the MC when firmware starts
+    it.  There is also a Linux user space tool called "restool"
+    that can be used to create/destroy containers and objects
+    dynamically.
+
+-DPAA2 Objects for an Ethernet Network Interface
+
+    A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX
+    queuing mechanisms, configuration mechanisms, buffer management,
+    physical ports, and interrupts.  DPAA2 uses a more granular approach
+    utilizing multiple hardware objects.  Each object has specialized
+    functions, and are used together by software to provide Ethernet network
+    interface functionality.  This approach provides efficient use of finite
+    hardware resources, flexibility, and performance advantages.
+
+    The diagram below shows the objects needed for a simple
+    network interface configuration on a system with 2 CPUs.
+
+              +---+---+ +---+---+
+                 CPU0     CPU1
+              +---+---+ +---+---+
+                  |         |
+              +---+---+ +---+---+
+                 DPIO     DPIO
+              +---+---+ +---+---+
+                    \     /
+                     \   /
+                      \ /
+                   +---+---+
+                      DPNI  --- DPBP,DPMCP
+                   +---+---+
+                       |
+                       |
+                   +---+---+
+                     DPMAC
+                   +---+---+
+                       |
+                    port/PHY
+
+    Below the objects are described.  For each object a brief description
+    is provided along with a summary of the kinds of operations the object
+    supports and a summary of key resources of the object (mmio regions
+    and irqs).
+
+       -DPMAC (Datapath Ethernet MAC): represents an Ethernet MAC, a
+        hardware device that connects to an Ethernet PHY and allows
+        physical transmission and reception of Ethernet frames.
+           -mmio regions: none
+           -irqs: dpni link change
+           -commands: set link up/down, link config, get stats,
+             irq config, enable, reset
+
+       -DPNI (Datapath Network Interface): contains TX/RX queues,
+        network interface configuration, and rx buffer pool configuration
+        mechanisms.
+           -mmio regions: none
+           -irqs: link state
+           -commands: port config, offload config, queue config,
+            parse/classify config, irq config, enable, reset
+
+       -DPIO (Datapath I/O): provides interfaces to enqueue and dequeue
+        packets and do hardware buffer pool management operations.  For
+        optimum performance there is typically DPIO per CPU.  This allows
+        each CPU to perform simultaneous enqueue/dequeue operations.
+           -mmio regions: queue operations, buffer mgmt
+           -irqs: data availability, congestion notification, buffer
+                  pool depletion
+           -commands: irq config, enable, reset
+
+       -DPBP (Datapath Buffer Pool): represents a hardware buffer
+        pool.
+           -mmio regions: none
+           -irqs: none
+           -commands: enable, reset
+
+       -DPMCP (Datapath MC Portal): provides an MC command portal.
+        Used by drivers to send commands to the MC to manage
+        objects.
+           -mmio regions: MC command portal
+           -irqs: command completion
+           -commands: irq config, enable, reset
+
+    Object Connections
+    ------------------
+    Some objects have explicit relationships that must
+    be configured:
+
+       -DPNI <--> DPMAC
+       -DPNI <--> DPNI
+       -DPNI <--> L2-switch-port
+          A DPNI must be connected to something such as a DPMAC,
+          another DPNI, or L2 switch port.  The DPNI connection
+          is made via a DPRC command.
+
+              +-------+  +-------+
+              | DPNI  |  | DPMAC |
+              +---+---+  +---+---+
+                  |          |
+                  +==========+
+
+       -DPNI <--> DPBP
+          A network interface requires a 'buffer pool' (DPBP
+          object) which provides a list of pointers to memory
+          where received Ethernet data is to be copied.  The
+          Ethernet driver configures the DPBPs associated with
+          the network interface.
+
+    Interrupts
+    ----------
+    All interrupts generated by DPAA2 objects are message
+    interrupts.  At the hardware level message interrupts
+    generated by devices will normally have 3 components--
+    1) a non-spoofable 'device-id' expressed on the hardware
+    bus, 2) an address, 3) a data value.
+
+    In the case of DPAA2 devices/objects, all objects in the
+    same container/DPRC share the same 'device-id'.
+    For ARM-based SoC this is the same as the stream ID.
+
+
+DPAA2 Linux Driver Overview
+---------------------------
+
+This section provides an overview of the Linux kernel drivers for
+DPAA2-- 1) the bus driver and associated "DPAA2 infrastructure"
+drivers and 2) functional object drivers (such as Ethernet).
+
+As described previously, a DPRC is a container that holds the other
+types of DPAA2 objects.  It is functionally similar to a plug-and-play
+bus controller.
+
+Each object in the DPRC is a Linux "device" and is bound to a driver.
+The diagram below shows the Linux drivers involved in a networking
+scenario and the objects bound to each driver.  A brief description
+of each driver follows.
+
+                                             +------------+
+                                             | OS Network |
+                                             |   Stack    |
+                 +------------+              +------------+
+                 | Allocator  |. . . . . . . |  Ethernet  |
+                 |(dpmcp,dpbp)|              |   (dpni)   |
+                 +-.----------+              +---+---+----+
+                  .          .                   ^   |
+                 .            .     <data avail, |   |<enqueue,
+                .              .     tx confirm> |   | dequeue>
+    +-------------+             .                |   |
+    | DPRC driver |              .           +---+---V----+     +---------+
+    |   (dprc)    |               . . . . . .| DPIO driver|     |   MAC   |
+    +----------+--+                          |  (dpio)    |     | (dpmac) |
+               |                             +------+-----+     +-----+---+
+               |<dev add/remove>                    |                 |
+               |                                    |                 |
+          +----+--------------+                     |              +--+---+
+          |   mc-bus driver   |                     |              | PHY  |
+          |                   |                     |              |driver|
+          | /fsl-mc@80c000000 |                     |              +--+---+
+          +-------------------+                     |                 |
+                                                    |                 |
+ ================================ HARDWARE =========|=================|======
+                                                  DPIO                |
+                                                    |                 |
+                                                  DPNI---DPBP         |
+                                                    |                 |
+                                                  DPMAC               |
+                                                    |                 |
+                                                   PHY ---------------+
+ ===================================================|========================
+
+A brief description of each driver is provided below.
+
+    mc-bus driver
+    -------------
+    The mc-bus driver is a platform driver and is probed from an
+    "/fsl-mc@xxxx" node in the device tree passed in by boot firmware.
+    It is responsible for bootstrapping the DPAA2 kernel infrastructure.
+    Key functions include:
+       -registering a new bus type named "fsl-mc" with the kernel,
+        and implementing bus call-backs (e.g. match/uevent/dev_groups)
+       -implemeting APIs for DPAA2 driver registration and for device
+        add/remove
+       -creates an MSI irq domain
+       -do a device add of the 'root' DPRC device, which is needed
+        to bootstrap things
+
+    DPRC driver
+    -----------
+    The dprc-driver is bound DPRC objects and does runtime management
+    of a bus instance.  It performs the initial bus scan of the DPRC
+    and handles interrupts for container events such as hot plug.
+
+    Allocator
+    ----------
+    Certain objects such as DPMCP and DPBP are generic and fungible,
+    and are intended to be used by other drivers.  For example,
+    the DPAA2 Ethernet driver needs:
+       -DPMCPs to send MC commands, to configure network interfaces
+       -DPBPs for network buffer pools
+
+    The allocator driver registers for these allocatable object types
+    and those objects are bound to the allocator when the bus is probed.
+    The allocator maintains a pool of objects that are available for
+    allocation by other DPAA2 drivers.
+
+    DPIO driver
+    -----------
+    The DPIO driver is bound to DPIO objects and provides services that allow
+    other drivers such as the Ethernet driver to receive and transmit data.
+    Key services include:
+        -data availability notifications
+        -hardware queuing operations (enqueue and dequeue of data)
+        -hardware buffer pool management
+
+    There is typically one DPIO object per physical CPU for optimum
+    performance, allowing each CPU to simultaneously enqueue
+    and dequeue data.
+
+    The DPIO driver operates on behalf of all DPAA2 drivers
+    active in the kernel--  Ethernet, crypto, compression,
+    etc.
+
+    Ethernet
+    --------
+    The Ethernet driver is bound to a DPNI and implements the kernel
+    interfaces needed to connect the DPAA2 network interface to
+    the network stack.
+
+    Each DPNI corresponds to a Linux network interface.
+
+    MAC driver
+    ----------
+    An Ethernet PHY is an off-chip, board specific component and is managed
+    by the appropriate PHY driver via an mdio bus.  The MAC driver
+    plays a role of being a proxy between the PHY driver and the
+    MC.  It does this proxy via the MC commands to a DPMAC object.
diff --git a/drivers/staging/fsl-mc/TODO b/drivers/staging/fsl-mc/TODO
index d78288b..3894368 100644
--- a/drivers/staging/fsl-mc/TODO
+++ b/drivers/staging/fsl-mc/TODO
@@ -1,13 +1,31 @@
-* Add README file (with ASCII art) describing relationships between
-  DPAA2 objects and how combine them to make a NIC, an LS2 switch, etc.
-  Also, define all acronyms used.
-
 * Decide if multiple root fsl-mc buses will be supported per Linux instance,
   and if so add support for this.
 
 * Add at least one device driver for a DPAA2 object (child device of the
-  fsl-mc bus).
+  fsl-mc bus).  Most likely candidate for this is adding DPAA2 Ethernet
+  driver support, which depends on drivers for several objects: DPNI,
+  DPIO, DPMAC.  Other pre-requisites include:
+
+     * interrupt support. for meaningful driver support we need
+       interrupts, and thus need message interrupt support by the bus
+       driver.
+          -Note: this has dependencies on generic MSI support work
+           in process upstream, see [1] and [2].
+
+     * Management Complex (MC) command serialization. locking mechanisms
+       are needed by drivers to serialize commands sent to the MC, including
+       from atomic context.
+
+     * MC firmware uprev.  The MC firmware upon which the fsl-mc
+       bus driver and DPAA2 object drivers are based is continuing
+       to evolve, so minor updates are needed to keep in sync with binary
+       interface changes to the MC.
+
+* Cleanup
 
 Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
 german.rivera@freescale.com, devel@driverdev.osuosl.org,
 linux-kernel@vger.kernel.org
+
+[1] https://lkml.org/lkml/2015/7/9/93
+[2] https://lkml.org/lkml/2015/7/7/712
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
index 5992670..e1861cf 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000.h
@@ -42,17 +42,16 @@
 
 struct pcmcia_device;
 struct net_device;
-extern struct net_device *init_ft1000_card(struct pcmcia_device *link,
-					   void *ft1000_reset);
-extern void stop_ft1000_card(struct net_device *dev);
-extern int card_download(struct net_device *dev, const u8 *pFileStart,
-			 size_t FileLength);
+struct net_device *init_ft1000_card(struct pcmcia_device *link,
+				    void *ft1000_reset);
+void stop_ft1000_card(struct net_device *dev);
+int card_download(struct net_device *dev, const u8 *pFileStart,
+		  size_t FileLength);
 
-extern u16 ft1000_read_dpram(struct net_device *dev, int offset);
-extern void card_bootload(struct net_device *dev);
-extern u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset,
-				    int Index);
-extern u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset);
+u16 ft1000_read_dpram(struct net_device *dev, int offset);
+void card_bootload(struct net_device *dev);
+u16 ft1000_read_dpram_mag_16(struct net_device *dev, int offset, int Index);
+u32 ft1000_read_dpram_mag_32(struct net_device *dev, int offset);
 void ft1000_write_dpram_mag_32(struct net_device *dev, int offset, u32 value);
 
 /* Read the value of a given ASIC register. */
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
index 409266b..f241a3a 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_debug.c
@@ -260,7 +260,8 @@
 		/* Make sure we free any memory reserve for slow Queue */
 		for (i = 0; i < MAX_NUM_APP; i++) {
 			while (list_empty(&dev->app_info[i].app_sqlist) == 0) {
-				pdpram_blk = list_entry(dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
+				pdpram_blk = list_entry(dev->app_info[i].app_sqlist.next,
+							struct dpram_blk, list);
 				list_del(&pdpram_blk->list);
 				ft1000_free_buffer(pdpram_blk, &freercvpool);
 
@@ -415,12 +416,19 @@
 	struct timeval tv;
 	struct IOCTL_GET_VER get_ver_data;
 	struct IOCTL_GET_DSP_STAT get_stat_data;
-	u8 ConnectionMsg[] = {0x00, 0x44, 0x10, 0x20, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x93, 0x64,
-			      0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0a,
-			      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			      0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			      0x00, 0x00, 0x02, 0x37, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x7f, 0x00,
-			      0x00, 0x01, 0x00, 0x00};
+	u8 ConnectionMsg[] = {
+		0x00, 0x44, 0x10, 0x20, 0x80, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x93, 0x64,
+		0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x0a,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12,
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+		0x00, 0x00, 0x02, 0x37, 0x00, 0x00, 0x00, 0x08,
+		0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x7f, 0x00,
+		0x00, 0x01, 0x00, 0x00
+	};
 
 	unsigned short ledStat = 0;
 	unsigned short conStat = 0;
@@ -495,10 +503,12 @@
 		memcpy(get_stat_data.eui64, info->eui64, EUISZ);
 
 		if (info->ProgConStat != 0xFF) {
-			ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_LED, (u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
+			ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_LED,
+					    (u8 *)&ledStat, FT1000_MAG_DSP_LED_INDX);
 			get_stat_data.LedStat = ntohs(ledStat);
 			pr_debug("LedStat = 0x%x\n", get_stat_data.LedStat);
-			ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE, (u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
+			ft1000_read_dpram16(ft1000dev, FT1000_MAG_DSP_CON_STATE,
+					    (u8 *)&conStat, FT1000_MAG_DSP_CON_STATE_INDX);
 			get_stat_data.ConStat = ntohs(conStat);
 			pr_debug("ConStat = 0x%x\n", get_stat_data.ConStat);
 		} else {
@@ -689,7 +699,8 @@
 		if (list_empty(&ft1000dev->app_info[i].app_sqlist) == 0) {
 			/* pr_debug("Message detected in slow queue\n"); */
 			spin_lock_irqsave(&free_buff_lock, flags);
-			pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next, struct dpram_blk, list);
+			pdpram_blk = list_entry(ft1000dev->app_info[i].app_sqlist.next,
+						struct dpram_blk, list);
 			list_del(&pdpram_blk->list);
 			ft1000dev->app_info[i].NumOfMsg--;
 			/* pr_debug("NumOfMsg for app %d = %d\n", i, ft1000dev->app_info[i].NumOfMsg); */
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
index 5def347..297b7ae 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_download.c
@@ -95,7 +95,6 @@
 	long              nDspImages;          /* Number of DSP images in file. */
 };
 
-#pragma pack(1)
 struct dsp_image_info {
 	long              coff_date;           /* Date/time when DSP Coff image was built. */
 	long              begin_offset;        /* Offset in file where image begins. */
@@ -105,7 +104,7 @@
 	long              version;             /* Embedded version # of DSP code. */
 	unsigned short    checksum;            /* DSP File checksum */
 	unsigned short    pad1;
-};
+} __packed;
 
 
 /* checks if the doorbell register is cleared */
@@ -180,7 +179,8 @@
 		}
 
 		status = ft1000_read_dpram16(ft1000dev,
-					     DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1);
+					     DWNLD_MAG1_HANDSHAKE_LOC,
+					     (u8 *)&handshake, 1);
 		handshake = ntohs(handshake);
 
 		if (status)
@@ -281,12 +281,14 @@
 
 	if (ft1000dev->bootmode == 1) {
 		status = fix_ft1000_read_dpram32(ft1000dev,
-						 DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
+						 DWNLD_MAG1_TYPE_LOC,
+						 (u8 *)&tempx);
 		tempx = ntohl(tempx);
 	} else {
 		tempx = 0;
 		status = ft1000_read_dpram16(ft1000dev,
-					     DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1);
+					     DWNLD_MAG1_TYPE_LOC,
+					     (u8 *)&tempword, 1);
 		tempx |= (tempword << 16);
 		tempx = ntohl(tempx);
 	}
@@ -304,7 +306,8 @@
 
 	if (ft1000dev->bootmode == 1) {
 		status = fix_ft1000_read_dpram32(ft1000dev,
-						 DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx);
+						 DWNLD_MAG1_TYPE_LOC,
+						 (u8 *)&tempx);
 		tempx = ntohl(tempx);
 	} else {
 		if (ft1000dev->usbboot == 2) {
@@ -332,14 +335,17 @@
 
 	if (ft1000dev->bootmode == 1) {
 		status = fix_ft1000_read_dpram32(ft1000dev,
-						 DWNLD_MAG1_SIZE_LOC, (u8 *)&value);
+						 DWNLD_MAG1_SIZE_LOC,
+						 (u8 *)&value);
 		value = ntohl(value);
 	} else	{
 		status = ft1000_read_dpram16(ft1000dev,
-					     DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 0);
+					     DWNLD_MAG1_SIZE_LOC,
+					     (u8 *)&tempword, 0);
 		value = tempword;
 		status = ft1000_read_dpram16(ft1000dev,
-					     DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 1);
+					     DWNLD_MAG1_SIZE_LOC,
+					     (u8 *)&tempword, 1);
 		value |= (tempword << 16);
 		value = ntohl(value);
 	}
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index e6b5976..9620970 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -842,7 +842,6 @@
 	skb = dev_alloc_skb(len + 12 + 2);
 
 	if (skb == NULL) {
-		pr_debug("No Network buffers available\n");
 		info->stats.rx_errors++;
 		ft1000_submit_rx_urb(info);
 		return -1;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
index fea60d5..9b5050f 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.h
@@ -134,8 +134,8 @@
 
 int ft1000_create_dev(struct ft1000_usb *dev);
 void ft1000_destroy_dev(struct net_device *dev);
-extern int card_send_command(struct ft1000_usb *ft1000dev,
-			     void *ptempbuffer, int size);
+int card_send_command(struct ft1000_usb *ft1000dev,
+		      void *ptempbuffer, int size);
 
 struct dpram_blk *ft1000_get_buffer(struct list_head *bufflist);
 void ft1000_free_buffer(struct dpram_blk *pdpram_blk, struct list_head *plist);
diff --git a/drivers/staging/gdm72xx/usb_ids.h b/drivers/staging/gdm72xx/usb_ids.h
index 8ce544d..7afb9ba 100644
--- a/drivers/staging/gdm72xx/usb_ids.h
+++ b/drivers/staging/gdm72xx/usb_ids.h
@@ -32,7 +32,9 @@
 #define BL_PID_MASK		0xffc0
 
 #define USB_DEVICE_BOOTLOADER(vid, pid)	\
-	{USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD)},	\
+	{USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD)}
+
+#define USB_DEVICE_BOOTLOADER_DRV(vid, pid)	\
 	{USB_DEVICE((vid), ((pid)&BL_PID_MASK)|B_DOWNLOAD|B_DIFF_DL_DRV)}
 
 #define USB_DEVICE_CDC_DATA(vid, pid)	\
@@ -40,6 +42,7 @@
 
 static const struct usb_device_id id_table[] = {
 	USB_DEVICE_BOOTLOADER(GCT_VID, GCT_PID1),
+	USB_DEVICE_BOOTLOADER_DRV(GCT_VID, GCT_PID1),
 	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1),
 	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x1),
 	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0x2),
@@ -58,6 +61,7 @@
 	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID1+0xf),
 
 	USB_DEVICE_BOOTLOADER(GCT_VID, GCT_PID2),
+	USB_DEVICE_BOOTLOADER_DRV(GCT_VID, GCT_PID2),
 	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2),
 	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x1),
 	USB_DEVICE_CDC_DATA(GCT_VID, GCT_PID2+0x2),
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index d7c5223..3f7715c 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -1,5 +1,5 @@
 /*
- * Freescale i.MX28 LRADC driver
+ * Freescale MXS LRADC driver
  *
  * Copyright (c) 2012 DENX Software Engineering, GmbH.
  * Marek Vasut <marex@denx.de>
@@ -15,34 +15,30 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/err.h>
-#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
 #include <linux/device.h>
+#include <linux/err.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
-#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/io.h>
-#include <linux/module.h>
 #include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
+#include <linux/slab.h>
 #include <linux/stmp_device.h>
-#include <linux/bitops.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/input.h>
-#include <linux/clk.h>
+#include <linux/sysfs.h>
 
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
 #include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
 #include <linux/iio/trigger.h>
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
+#include <linux/iio/sysfs.h>
 
 #define DRIVER_NAME		"mxs-lradc"
 
@@ -65,14 +61,14 @@
  * Once the pen touches the touchscreen, the touchscreen switches from
  * IRQ-driven mode to polling mode to prevent interrupt storm. The polling
  * is realized by worker thread, which is called every 20 or so milliseconds.
- * This gives the touchscreen enough fluence and does not strain the system
+ * This gives the touchscreen enough fluency and does not strain the system
  * too much.
  */
 #define LRADC_TS_SAMPLE_DELAY_MS	5
 
 /*
  * The LRADC reads the following amount of samples from each touchscreen
- * channel and the driver then computes avarage of these.
+ * channel and the driver then computes average of these.
  */
 #define LRADC_TS_SAMPLE_AMOUNT		4
 
@@ -238,7 +234,7 @@
 	 * CH5 -- Touch screen YNLR
 	 * CH6 -- Touch screen WIPER (5-wire only)
 	 *
-	 * The bitfields below represents which parts of the LRADC block are
+	 * The bit fields below represents which parts of the LRADC block are
 	 * switched into special mode of operation. These channels can not
 	 * be sampled as regular LRADC channels. The driver will refuse any
 	 * attempt to sample these channels.
@@ -252,7 +248,7 @@
 	struct input_dev	*ts_input;
 
 	enum mxs_lradc_id	soc;
-	enum lradc_ts_plate	cur_plate; /* statemachine */
+	enum lradc_ts_plate	cur_plate; /* state machine */
 	bool			ts_valid;
 	unsigned		ts_x_pos;
 	unsigned		ts_y_pos;
@@ -812,7 +808,7 @@
 	int ret;
 
 	/*
-	 * See if there is no buffered operation in progess. If there is, simply
+	 * See if there is no buffered operation in progress. If there is, simply
 	 * bail out. This can be improved to support both buffered and raw IO at
 	 * the same time, yet the code becomes horribly complicated. Therefore I
 	 * applied KISS principle here.
@@ -1369,7 +1365,7 @@
  * Driver initialization
  */
 
-#define MXS_ADC_CHAN(idx, chan_type) {				\
+#define MXS_ADC_CHAN(idx, chan_type, name) {			\
 	.type = (chan_type),					\
 	.indexed = 1,						\
 	.scan_index = (idx),					\
@@ -1382,17 +1378,18 @@
 		.realbits = LRADC_RESOLUTION,			\
 		.storagebits = 32,				\
 	},							\
+	.datasheet_name = (name),				\
 }
 
-static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
-	MXS_ADC_CHAN(0, IIO_VOLTAGE),
-	MXS_ADC_CHAN(1, IIO_VOLTAGE),
-	MXS_ADC_CHAN(2, IIO_VOLTAGE),
-	MXS_ADC_CHAN(3, IIO_VOLTAGE),
-	MXS_ADC_CHAN(4, IIO_VOLTAGE),
-	MXS_ADC_CHAN(5, IIO_VOLTAGE),
-	MXS_ADC_CHAN(6, IIO_VOLTAGE),
-	MXS_ADC_CHAN(7, IIO_VOLTAGE),	/* VBATT */
+static const struct iio_chan_spec mx23_lradc_chan_spec[] = {
+	MXS_ADC_CHAN(0, IIO_VOLTAGE, "LRADC0"),
+	MXS_ADC_CHAN(1, IIO_VOLTAGE, "LRADC1"),
+	MXS_ADC_CHAN(2, IIO_VOLTAGE, "LRADC2"),
+	MXS_ADC_CHAN(3, IIO_VOLTAGE, "LRADC3"),
+	MXS_ADC_CHAN(4, IIO_VOLTAGE, "LRADC4"),
+	MXS_ADC_CHAN(5, IIO_VOLTAGE, "LRADC5"),
+	MXS_ADC_CHAN(6, IIO_VOLTAGE, "VDDIO"),
+	MXS_ADC_CHAN(7, IIO_VOLTAGE, "VBATT"),
 	/* Combined Temperature sensors */
 	{
 		.type = IIO_TEMP,
@@ -1403,6 +1400,7 @@
 				      BIT(IIO_CHAN_INFO_SCALE),
 		.channel = 8,
 		.scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
+		.datasheet_name = "TEMP_DIE",
 	},
 	/* Hidden channel to keep indexes */
 	{
@@ -1411,12 +1409,48 @@
 		.scan_index = -1,
 		.channel = 9,
 	},
-	MXS_ADC_CHAN(10, IIO_VOLTAGE),	/* VDDIO */
-	MXS_ADC_CHAN(11, IIO_VOLTAGE),	/* VTH */
-	MXS_ADC_CHAN(12, IIO_VOLTAGE),	/* VDDA */
-	MXS_ADC_CHAN(13, IIO_VOLTAGE),	/* VDDD */
-	MXS_ADC_CHAN(14, IIO_VOLTAGE),	/* VBG */
-	MXS_ADC_CHAN(15, IIO_VOLTAGE),	/* VDD5V */
+	MXS_ADC_CHAN(10, IIO_VOLTAGE, NULL),
+	MXS_ADC_CHAN(11, IIO_VOLTAGE, NULL),
+	MXS_ADC_CHAN(12, IIO_VOLTAGE, "USB_DP"),
+	MXS_ADC_CHAN(13, IIO_VOLTAGE, "USB_DN"),
+	MXS_ADC_CHAN(14, IIO_VOLTAGE, "VBG"),
+	MXS_ADC_CHAN(15, IIO_VOLTAGE, "VDD5V"),
+};
+
+static const struct iio_chan_spec mx28_lradc_chan_spec[] = {
+	MXS_ADC_CHAN(0, IIO_VOLTAGE, "LRADC0"),
+	MXS_ADC_CHAN(1, IIO_VOLTAGE, "LRADC1"),
+	MXS_ADC_CHAN(2, IIO_VOLTAGE, "LRADC2"),
+	MXS_ADC_CHAN(3, IIO_VOLTAGE, "LRADC3"),
+	MXS_ADC_CHAN(4, IIO_VOLTAGE, "LRADC4"),
+	MXS_ADC_CHAN(5, IIO_VOLTAGE, "LRADC5"),
+	MXS_ADC_CHAN(6, IIO_VOLTAGE, "LRADC6"),
+	MXS_ADC_CHAN(7, IIO_VOLTAGE, "VBATT"),
+	/* Combined Temperature sensors */
+	{
+		.type = IIO_TEMP,
+		.indexed = 1,
+		.scan_index = 8,
+		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+				      BIT(IIO_CHAN_INFO_OFFSET) |
+				      BIT(IIO_CHAN_INFO_SCALE),
+		.channel = 8,
+		.scan_type = {.sign = 'u', .realbits = 18, .storagebits = 32,},
+		.datasheet_name = "TEMP_DIE",
+	},
+	/* Hidden channel to keep indexes */
+	{
+		.type = IIO_TEMP,
+		.indexed = 1,
+		.scan_index = -1,
+		.channel = 9,
+	},
+	MXS_ADC_CHAN(10, IIO_VOLTAGE, "VDDIO"),
+	MXS_ADC_CHAN(11, IIO_VOLTAGE, "VTH"),
+	MXS_ADC_CHAN(12, IIO_VOLTAGE, "VDDA"),
+	MXS_ADC_CHAN(13, IIO_VOLTAGE, "VDDD"),
+	MXS_ADC_CHAN(14, IIO_VOLTAGE, "VBG"),
+	MXS_ADC_CHAN(15, IIO_VOLTAGE, "VDD5V"),
 };
 
 static int mxs_lradc_hw_init(struct mxs_lradc *lradc)
@@ -1612,10 +1646,16 @@
 	iio->dev.parent = &pdev->dev;
 	iio->info = &mxs_lradc_iio_info;
 	iio->modes = INDIO_DIRECT_MODE;
-	iio->channels = mxs_lradc_chan_spec;
-	iio->num_channels = ARRAY_SIZE(mxs_lradc_chan_spec);
 	iio->masklength = LRADC_MAX_TOTAL_CHANS;
 
+	if (lradc->soc == IMX23_LRADC) {
+		iio->channels = mx23_lradc_chan_spec;
+		iio->num_channels = ARRAY_SIZE(mx23_lradc_chan_spec);
+	} else {
+		iio->channels = mx28_lradc_chan_spec;
+		iio->num_channels = ARRAY_SIZE(mx28_lradc_chan_spec);
+	}
+
 	ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time,
 				&mxs_lradc_trigger_handler,
 				&mxs_lradc_buffer_ops);
@@ -1707,6 +1747,6 @@
 module_platform_driver(mxs_lradc_driver);
 
 MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
-MODULE_DESCRIPTION("Freescale i.MX28 LRADC driver");
+MODULE_DESCRIPTION("Freescale MXS LRADC driver");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 75ddd4f..78fe0b5 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -124,7 +124,6 @@
 	.driver = {
 		.name = "adt7316",
 		.pm = ADT7316_PM_OPS,
-		.owner  = THIS_MODULE,
 	},
 	.probe = adt7316_i2c_probe,
 	.id_table = adt7316_i2c_id,
diff --git a/drivers/staging/iio/iio_dummy_evgen.c b/drivers/staging/iio/iio_dummy_evgen.c
index c54d5b5..6d38854 100644
--- a/drivers/staging/iio/iio_dummy_evgen.c
+++ b/drivers/staging/iio/iio_dummy_evgen.c
@@ -214,6 +214,7 @@
 	.groups = iio_evgen_groups,
 	.release = &iio_evgen_release,
 };
+
 static __init int iio_dummy_evgen_init(void)
 {
 	int ret = iio_dummy_evgen_create();
diff --git a/drivers/staging/iio/iio_simple_dummy.c b/drivers/staging/iio/iio_simple_dummy.c
index 1629a8a..381f90f 100644
--- a/drivers/staging/iio/iio_simple_dummy.c
+++ b/drivers/staging/iio/iio_simple_dummy.c
@@ -611,7 +611,6 @@
 	 */
 	iio_dummy_devs[index] = indio_dev;
 
-
 	/*
 	 * Set the device name.
 	 *
@@ -675,7 +674,6 @@
 	 */
 	struct iio_dev *indio_dev = iio_dummy_devs[index];
 
-
 	/* Unregister the device */
 	iio_device_unregister(indio_dev);
 
diff --git a/drivers/staging/iio/iio_simple_dummy.h b/drivers/staging/iio/iio_simple_dummy.h
index e877a99..8d00224 100644
--- a/drivers/staging/iio/iio_simple_dummy.h
+++ b/drivers/staging/iio/iio_simple_dummy.h
@@ -119,6 +119,7 @@
 {
 	return 0;
 };
+
 static inline
 void iio_simple_dummy_unconfigure_buffer(struct iio_dev *indio_dev)
 {};
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
index a651b89..00ed774 100644
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -32,6 +32,7 @@
 	[diffvoltage3m4] = -2,
 	[accelx] = 344,
 };
+
 /**
  * iio_simple_dummy_trigger_h() - the trigger handler function
  * @irq: the interrupt number
@@ -178,7 +179,6 @@
 	iio_kfifo_free(indio_dev->buffer);
 error_ret:
 	return ret;
-
 }
 
 /**
diff --git a/drivers/staging/iio/iio_simple_dummy_events.c b/drivers/staging/iio/iio_simple_dummy_events.c
index ecc563c..73108ba 100644
--- a/drivers/staging/iio/iio_simple_dummy_events.c
+++ b/drivers/staging/iio/iio_simple_dummy_events.c
@@ -120,7 +120,7 @@
 				      const struct iio_chan_spec *chan,
 				      enum iio_event_type type,
 				      enum iio_event_direction dir,
-					  enum iio_event_info info,
+				      enum iio_event_info info,
 				      int *val, int *val2)
 {
 	struct iio_dummy_state *st = iio_priv(indio_dev);
@@ -143,7 +143,7 @@
 				       const struct iio_chan_spec *chan,
 				       enum iio_event_type type,
 				       enum iio_event_direction dir,
-					   enum iio_event_info info,
+				       enum iio_event_info info,
 				       int val, int val2)
 {
 	struct iio_dummy_state *st = iio_priv(indio_dev);
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index e646c5d..019ba52 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -838,7 +838,6 @@
 			.name = "isl29018",
 			.acpi_match_table = ACPI_PTR(isl29018_acpi_match),
 			.pm = ISL29018_PM_OPS,
-			.owner = THIS_MODULE,
 			.of_match_table = isl29018_of_match,
 		    },
 	.probe	 = isl29018_probe,
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index e5b2fdc..cd6f272 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -547,7 +547,6 @@
 	.class	= I2C_CLASS_HWMON,
 	.driver  = {
 		.name = "isl29028",
-		.owner = THIS_MODULE,
 		.of_match_table = isl29028_of_match,
 	},
 	.probe	 = isl29028_probe,
diff --git a/drivers/staging/iio/meter/ade7854.h b/drivers/staging/iio/meter/ade7854.h
index 52ca541..52f4195 100644
--- a/drivers/staging/iio/meter/ade7854.h
+++ b/drivers/staging/iio/meter/ade7854.h
@@ -168,7 +168,7 @@
 
 };
 
-extern int ade7854_probe(struct iio_dev *indio_dev, struct device *dev);
-extern int ade7854_remove(struct iio_dev *indio_dev);
+int ade7854_probe(struct iio_dev *indio_dev, struct device *dev);
+int ade7854_remove(struct iio_dev *indio_dev);
 
 #endif
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 3c1c8c6..9fe48ef 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -79,7 +79,8 @@
 }
 
 static ssize_t iio_bfin_tmr_frequency_store(struct device *dev,
-		struct device_attribute *attr, const char *buf, size_t count)
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
 {
 	struct iio_trigger *trig = to_iio_trigger(dev);
 	struct bfin_tmr_state *st = iio_trigger_get_drvdata(trig);
@@ -116,8 +117,8 @@
 }
 
 static ssize_t iio_bfin_tmr_frequency_show(struct device *dev,
-				 struct device_attribute *attr,
-				 char *buf)
+					   struct device_attribute *attr,
+					   char *buf)
 {
 	struct iio_trigger *trig = to_iio_trigger(dev);
 	struct bfin_tmr_state *st = iio_trigger_get_drvdata(trig);
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index 0c1976d..2db8857 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -37,7 +37,7 @@
 	if (trig_info->frequency == 0 && state)
 		return -EINVAL;
 	dev_dbg(&trig_info->rtc->dev, "trigger frequency is %u\n",
-			trig_info->frequency);
+		trig_info->frequency);
 	ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, state);
 	if (ret == 0)
 		trig_info->state = state;
@@ -74,8 +74,9 @@
 		if (ret == 0 && trig_info->state && trig_info->frequency == 0)
 			ret = rtc_irq_set_state(trig_info->rtc,
 						&trig_info->task, 1);
-	} else
+	} else {
 		ret = rtc_irq_set_state(trig_info->rtc, &trig_info->task, 0);
+	}
 	if (ret)
 		goto error_ret;
 
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 5dd9cdf..01961d9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -134,7 +134,7 @@
 /* container_of depends on "likely" which is defined in libcfs_private.h */
 static inline void *__container_of(void *ptr, unsigned long shift)
 {
-	if (unlikely(IS_ERR(ptr) || ptr == NULL))
+	if (IS_ERR_OR_NULL(ptr))
 		return ptr;
 	return (char *)ptr - shift;
 }
@@ -148,4 +148,17 @@
 void *libcfs_kvzalloc_cpt(struct cfs_cpt_table *cptab, int cpt, size_t size,
 			  gfp_t flags);
 
+extern struct miscdevice libcfs_dev;
+/**
+ * The path of debug log dump upcall script.
+ */
+extern char lnet_upcall[1024];
+extern char lnet_debug_log_upcall[1024];
+
+extern void libcfs_init_nidstrings(void);
+
+extern struct cfs_psdev_ops libcfs_psdev_ops;
+
+extern struct cfs_wi_sched *cfs_sched_rehash;
+
 #endif /* _LIBCFS_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 8251ac9..a3aa644 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -50,7 +50,6 @@
 extern unsigned int libcfs_debug;
 extern unsigned int libcfs_printk;
 extern unsigned int libcfs_console_ratelimit;
-extern unsigned int libcfs_watchdog_ratelimit;
 extern unsigned int libcfs_console_max_delay;
 extern unsigned int libcfs_console_min_delay;
 extern unsigned int libcfs_console_backoff;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index eea55d9..aa69c6a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -79,14 +79,16 @@
 {
 	int ret = 0;
 
-	if (unlikely(CFS_FAIL_PRECHECK(id) &&
-		     (ret = __cfs_fail_check_set(id, value, set)))) {
-		if (quiet) {
-			CDEBUG(D_INFO, "*** cfs_fail_loc=%x, val=%u***\n",
-			       id, value);
-		} else {
-			LCONSOLE_INFO("*** cfs_fail_loc=%x, val=%u***\n",
-				      id, value);
+	if (unlikely(CFS_FAIL_PRECHECK(id))) {
+		ret = __cfs_fail_check_set(id, value, set);
+		if (ret) {
+			if (quiet) {
+				CDEBUG(D_INFO, "*** cfs_fail_loc=%x, val=%u***\n",
+				       id, value);
+			} else {
+				LCONSOLE_INFO("*** cfs_fail_loc=%x, val=%u***\n",
+					      id, value);
+			}
 		}
 	}
 
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index ed37d26..9544860 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -87,24 +87,6 @@
 	lbug_with_loc(&msgdata);					\
 } while (0)
 
-extern atomic_t libcfs_kmemory;
-/*
- * Memory
- */
-
-# define libcfs_kmem_inc(ptr, size)		\
-do {						\
-	atomic_add(size, &libcfs_kmemory);	\
-} while (0)
-
-# define libcfs_kmem_dec(ptr, size)		\
-do {						\
-	atomic_sub(size, &libcfs_kmemory);	\
-} while (0)
-
-# define libcfs_kmem_read()			\
-	atomic_read(&libcfs_kmemory)
-
 #ifndef LIBCFS_VMALLOC_SIZE
 #define LIBCFS_VMALLOC_SIZE	(2 << PAGE_CACHE_SHIFT) /* 2 pages */
 #endif
@@ -121,14 +103,9 @@
 	if (unlikely((ptr) == NULL)) {					    \
 		CERROR("LNET: out of memory at %s:%d (tried to alloc '"	    \
 		       #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size));  \
-		CERROR("LNET: %d total bytes allocated by lnet\n",	    \
-		       libcfs_kmem_read());				    \
 	} else {							    \
 		memset((ptr), 0, (size));				    \
-		libcfs_kmem_inc((ptr), (size));				    \
-		CDEBUG(D_MALLOC, "alloc '" #ptr "': %d at %p (tot %d).\n",  \
-		       (int)(size), (ptr), libcfs_kmem_read());		    \
-	}								   \
+	}								    \
 } while (0)
 
 /**
@@ -180,9 +157,6 @@
 		       "%s:%d\n", s, __FILE__, __LINE__);	       \
 		break;						  \
 	}							       \
-	libcfs_kmem_dec((ptr), s);				      \
-	CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n",     \
-	       s, (ptr), libcfs_kmem_read());				\
 	if (unlikely(s > LIBCFS_VMALLOC_SIZE))			  \
 		vfree(ptr);				    \
 	else							    \
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
index 509dc1e..478e958 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
@@ -102,6 +102,4 @@
 int cfs_ip_addr_match(__u32 addr, struct list_head *list);
 void cfs_ip_addr_free(struct list_head *list);
 
-#define	strtoul(str, endp, base)	simple_strtoul(str, endp, base)
-
 #endif
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 4eb24a1..c29d2ce 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -1390,7 +1390,7 @@
 		.max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
 		.page_shift        = PAGE_SHIFT,
 		.access            = (IB_ACCESS_LOCAL_WRITE |
-		                      IB_ACCESS_REMOTE_WRITE),
+				      IB_ACCESS_REMOTE_WRITE),
 		.pool_size         = fps->fps_pool_size,
 		.dirty_watermark   = fps->fps_flush_trigger,
 		.flush_function    = NULL,
@@ -1789,140 +1789,6 @@
 	goto again;
 }
 
-void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr)
-{
-	kib_pmr_pool_t *ppo = pmr->pmr_pool;
-	struct ib_mr *mr = pmr->pmr_mr;
-
-	pmr->pmr_mr = NULL;
-	kiblnd_pool_free_node(&ppo->ppo_pool, &pmr->pmr_list);
-	if (mr != NULL)
-		ib_dereg_mr(mr);
-}
-
-int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
-		    kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr)
-{
-	kib_phys_mr_t *pmr;
-	struct list_head *node;
-	int rc;
-	int i;
-
-	node = kiblnd_pool_alloc_node(&pps->pps_poolset);
-	if (node == NULL) {
-		CERROR("Failed to allocate PMR descriptor\n");
-		return -ENOMEM;
-	}
-
-	pmr = container_of(node, kib_phys_mr_t, pmr_list);
-	if (pmr->pmr_pool->ppo_hdev != hdev) {
-		kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
-		return -EAGAIN;
-	}
-
-	for (i = 0; i < rd->rd_nfrags; i++) {
-		pmr->pmr_ipb[i].addr = rd->rd_frags[i].rf_addr;
-		pmr->pmr_ipb[i].size = rd->rd_frags[i].rf_nob;
-	}
-
-	pmr->pmr_mr = ib_reg_phys_mr(hdev->ibh_pd,
-				     pmr->pmr_ipb, rd->rd_nfrags,
-				     IB_ACCESS_LOCAL_WRITE |
-				     IB_ACCESS_REMOTE_WRITE,
-				     iova);
-	if (!IS_ERR(pmr->pmr_mr)) {
-		pmr->pmr_iova = *iova;
-		*pp_pmr = pmr;
-		return 0;
-	}
-
-	rc = PTR_ERR(pmr->pmr_mr);
-	CERROR("Failed ib_reg_phys_mr: %d\n", rc);
-
-	pmr->pmr_mr = NULL;
-	kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
-
-	return rc;
-}
-
-static void kiblnd_destroy_pmr_pool(kib_pool_t *pool)
-{
-	kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool);
-	kib_phys_mr_t *pmr;
-	kib_phys_mr_t *tmp;
-
-	LASSERT(pool->po_allocated == 0);
-
-	list_for_each_entry_safe(pmr, tmp, &pool->po_free_list, pmr_list) {
-		LASSERT(pmr->pmr_mr == NULL);
-		list_del(&pmr->pmr_list);
-
-		if (pmr->pmr_ipb != NULL) {
-			LIBCFS_FREE(pmr->pmr_ipb,
-				    IBLND_MAX_RDMA_FRAGS *
-				    sizeof(struct ib_phys_buf));
-		}
-
-		LIBCFS_FREE(pmr, sizeof(kib_phys_mr_t));
-	}
-
-	kiblnd_fini_pool(pool);
-	if (ppo->ppo_hdev != NULL)
-		kiblnd_hdev_decref(ppo->ppo_hdev);
-
-	LIBCFS_FREE(ppo, sizeof(kib_pmr_pool_t));
-}
-
-static inline int kiblnd_pmr_pool_size(int ncpts)
-{
-	int size = *kiblnd_tunables.kib_pmr_pool_size / ncpts;
-
-	return max(IBLND_PMR_POOL, size);
-}
-
-static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size,
-				  kib_pool_t **pp_po)
-{
-	struct kib_pmr_pool *ppo;
-	struct kib_pool *pool;
-	kib_phys_mr_t *pmr;
-	int i;
-
-	LIBCFS_CPT_ALLOC(ppo, lnet_cpt_table(),
-			 ps->ps_cpt, sizeof(kib_pmr_pool_t));
-	if (ppo == NULL) {
-		CERROR("Failed to allocate PMR pool\n");
-		return -ENOMEM;
-	}
-
-	pool = &ppo->ppo_pool;
-	kiblnd_init_pool(ps, pool, size);
-
-	for (i = 0; i < size; i++) {
-		LIBCFS_CPT_ALLOC(pmr, lnet_cpt_table(),
-				 ps->ps_cpt, sizeof(kib_phys_mr_t));
-		if (pmr == NULL)
-			break;
-
-		pmr->pmr_pool = ppo;
-		LIBCFS_CPT_ALLOC(pmr->pmr_ipb, lnet_cpt_table(), ps->ps_cpt,
-				 IBLND_MAX_RDMA_FRAGS * sizeof(*pmr->pmr_ipb));
-		if (pmr->pmr_ipb == NULL)
-			break;
-
-		list_add(&pmr->pmr_list, &pool->po_free_list);
-	}
-
-	if (i < size) {
-		ps->ps_pool_destroy(pool);
-		return -ENOMEM;
-	}
-
-	ppo->ppo_hdev = kiblnd_current_hdev(ps->ps_net->ibn_dev);
-	*pp_po = pool;
-	return 0;
-}
-
 static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
 {
 	kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
@@ -2078,7 +1944,6 @@
 	cfs_cpt_for_each(i, lnet_cpt_table()) {
 		kib_tx_poolset_t *tps;
 		kib_fmr_poolset_t *fps;
-		kib_pmr_poolset_t *pps;
 
 		if (net->ibn_tx_ps != NULL) {
 			tps = net->ibn_tx_ps[i];
@@ -2089,11 +1954,6 @@
 			fps = net->ibn_fmr_ps[i];
 			kiblnd_fini_fmr_poolset(fps);
 		}
-
-		if (net->ibn_pmr_ps != NULL) {
-			pps = net->ibn_pmr_ps[i];
-			kiblnd_fini_poolset(&pps->pps_poolset);
-		}
 	}
 
 	if (net->ibn_tx_ps != NULL) {
@@ -2105,18 +1965,13 @@
 		cfs_percpt_free(net->ibn_fmr_ps);
 		net->ibn_fmr_ps = NULL;
 	}
-
-	if (net->ibn_pmr_ps != NULL) {
-		cfs_percpt_free(net->ibn_pmr_ps);
-		net->ibn_pmr_ps = NULL;
-	}
 }
 
 static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
 {
 	unsigned long flags;
 	int cpt;
-	int rc;
+	int		rc = 0;
 	int i;
 
 	read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
@@ -2137,12 +1992,16 @@
 		goto failed;
 	}
 
-	/* TX pool must be created later than FMR/PMR, see LU-2268
-	 * for details */
+	/*
+	 * TX pool must be created later than FMR, see LU-2268
+	 * for details
+	 */
 	LASSERT(net->ibn_tx_ps == NULL);
 
-	/* premapping can fail if ibd_nmr > 1, so we always create
-	 * FMR/PMR pool and map-on-demand if premapping failed */
+	/*
+	 * premapping can fail if ibd_nmr > 1, so we always create
+	 * FMR pool and map-on-demand if premapping failed
+	 */
 
 	net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
 					   sizeof(kib_fmr_poolset_t));
@@ -2158,7 +2017,7 @@
 					     kiblnd_fmr_pool_size(ncpts),
 					     kiblnd_fmr_flush_trigger(ncpts));
 		if (rc == -ENOSYS && i == 0) /* no FMR */
-			break; /* create PMR pool */
+			break;
 
 		if (rc != 0) { /* a real error */
 			CERROR("Can't initialize FMR pool for CPT %d: %d\n",
@@ -2175,38 +2034,8 @@
 	cfs_percpt_free(net->ibn_fmr_ps);
 	net->ibn_fmr_ps = NULL;
 
-	CWARN("Device does not support FMR, failing back to PMR\n");
-
-	if (*kiblnd_tunables.kib_pmr_pool_size <
-	    *kiblnd_tunables.kib_ntx / 4) {
-		CERROR("Can't set pmr pool size (%d) < ntx / 4(%d)\n",
-		       *kiblnd_tunables.kib_pmr_pool_size,
-		       *kiblnd_tunables.kib_ntx / 4);
-		rc = -EINVAL;
-		goto failed;
-	}
-
-	net->ibn_pmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
-					   sizeof(kib_pmr_poolset_t));
-	if (net->ibn_pmr_ps == NULL) {
-		CERROR("Failed to allocate PMR pool array\n");
-		rc = -ENOMEM;
-		goto failed;
-	}
-
-	for (i = 0; i < ncpts; i++) {
-		cpt = (cpts == NULL) ? i : cpts[i];
-		rc = kiblnd_init_poolset(&net->ibn_pmr_ps[cpt]->pps_poolset,
-					 cpt, net, "PMR",
-					 kiblnd_pmr_pool_size(ncpts),
-					 kiblnd_create_pmr_pool,
-					 kiblnd_destroy_pmr_pool, NULL, NULL);
-		if (rc != 0) {
-			CERROR("Can't initialize PMR pool for CPT %d: %d\n",
-			       cpt, rc);
+	CWARN("Device does not support FMR\n");
 			goto failed;
-		}
-	}
 
  create_tx_pool:
 	net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
@@ -2318,17 +2147,13 @@
 static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
 {
 	struct ib_mr *mr;
-	int i;
 	int rc;
-	__u64 mm_size;
-	__u64 mr_size;
 	int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
 
 	rc = kiblnd_hdev_get_attr(hdev);
 	if (rc != 0)
 		return rc;
 
-	if (hdev->ibh_mr_shift == 64) {
 		LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
 		if (hdev->ibh_mrs == NULL) {
 			CERROR("Failed to allocate MRs table\n");
@@ -2347,53 +2172,6 @@
 
 		hdev->ibh_mrs[0] = mr;
 
-		goto out;
-	}
-
-	mr_size = 1ULL << hdev->ibh_mr_shift;
-	mm_size = (unsigned long)high_memory - PAGE_OFFSET;
-
-	hdev->ibh_nmrs = (int)((mm_size + mr_size - 1) >> hdev->ibh_mr_shift);
-
-	if (hdev->ibh_mr_shift < 32 || hdev->ibh_nmrs > 1024) {
-		/* it's 4T..., assume we will re-code at that time */
-		CERROR("Can't support memory size: x%#llx with MR size: x%#llx\n",
-		       mm_size, mr_size);
-		return -EINVAL;
-	}
-
-	/* create an array of MRs to cover all memory */
-	LIBCFS_ALLOC(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
-	if (hdev->ibh_mrs == NULL) {
-		CERROR("Failed to allocate MRs' table\n");
-		return -ENOMEM;
-	}
-
-	for (i = 0; i < hdev->ibh_nmrs; i++) {
-		struct ib_phys_buf ipb;
-		__u64 iova;
-
-		ipb.size = hdev->ibh_mr_size;
-		ipb.addr = i * mr_size;
-		iova = ipb.addr;
-
-		mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova);
-		if (IS_ERR(mr)) {
-			CERROR("Failed ib_reg_phys_mr addr %#llx size %#llx : %ld\n",
-			       ipb.addr, ipb.size, PTR_ERR(mr));
-			kiblnd_hdev_cleanup_mrs(hdev);
-			return PTR_ERR(mr);
-		}
-
-		LASSERT(iova == ipb.addr);
-
-		hdev->ibh_mrs[i] = mr;
-	}
-
-out:
-	if (hdev->ibh_mr_size != ~0ULL || hdev->ibh_nmrs != 1)
-		LCONSOLE_INFO("Register global MR array, MR size: %#llx, array size: %d\n",
-			      hdev->ibh_mr_size, hdev->ibh_nmrs);
 	return 0;
 }
 
@@ -2564,14 +2342,9 @@
 			kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
 					    &zombie_tpo);
 
-			if (net->ibn_fmr_ps != NULL) {
+			if (net->ibn_fmr_ps)
 				kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
 							&zombie_fpo);
-
-			} else if (net->ibn_pmr_ps != NULL) {
-				kiblnd_fail_poolset(&net->ibn_pmr_ps[i]->
-						    pps_poolset, &zombie_ppo);
-			}
 		}
 	}
 
@@ -2667,9 +2440,6 @@
 
 	LASSERT(list_empty(&kiblnd_data.kib_devs));
 
-	CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
-	       atomic_read(&libcfs_kmemory));
-
 	switch (kiblnd_data.kib_init) {
 	default:
 		LBUG();
@@ -2720,9 +2490,6 @@
 	if (kiblnd_data.kib_scheds != NULL)
 		cfs_percpt_free(kiblnd_data.kib_scheds);
 
-	CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n",
-	       atomic_read(&libcfs_kmemory));
-
 	kiblnd_data.kib_init = IBLND_INIT_NOTHING;
 	module_put(THIS_MODULE);
 }
@@ -2739,9 +2506,6 @@
 	if (net == NULL)
 		goto out;
 
-	CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
-	       atomic_read(&libcfs_kmemory));
-
 	write_lock_irqsave(g_lock, flags);
 	net->ibn_shutdown = 1;
 	write_unlock_irqrestore(g_lock, flags);
@@ -2786,9 +2550,6 @@
 		break;
 	}
 
-	CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n",
-	       atomic_read(&libcfs_kmemory));
-
 	net->ibn_init = IBLND_INIT_NOTHING;
 	ni->ni_data = NULL;
 
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index f5d1d9f..f4b6c33 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -48,7 +48,7 @@
 #include <linux/uio.h>
 #include <linux/uaccess.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <linux/fs.h>
 #include <linux/file.h>
@@ -104,7 +104,6 @@
 	int          *kib_map_on_demand;     /* map-on-demand if RD has more
 					      * fragments than this value, 0
 					      * disable map-on-demand */
-	int          *kib_pmr_pool_size;     /* # physical MR in pool */
 	int          *kib_fmr_pool_size;     /* # FMRs in pool */
 	int          *kib_fmr_flush_trigger; /* When to trigger FMR flush */
 	int          *kib_fmr_cache;         /* enable FMR pool cache? */
@@ -120,7 +119,7 @@
 #define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
 
 #define IBLND_CREDITS_DEFAULT     8 /* default # of peer credits */
-#define IBLND_CREDITS_MAX	  ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1)  /* Max # of peer credits */
+#define IBLND_CREDITS_MAX	  ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1)  /* Max # of peer credits */
 
 #define IBLND_MSG_QUEUE_SIZE(v)    ((v) == IBLND_MSG_VERSION_1 ? \
 				     IBLND_MSG_QUEUE_SIZE_V1 :   \
@@ -163,7 +162,6 @@
 /* Pools (shared by connections on each CPT) */
 /* These pools can grow at runtime, so don't need give a very large value */
 #define IBLND_TX_POOL			256
-#define IBLND_PMR_POOL			256
 #define IBLND_FMR_POOL			256
 #define IBLND_FMR_POOL_FLUSH		192
 
@@ -232,17 +230,6 @@
 	struct page        *ibp_pages[0];       /* page array */
 } kib_pages_t;
 
-struct kib_pmr_pool;
-
-typedef struct {
-	struct list_head    pmr_list;           /* chain node */
-	struct ib_phys_buf  *pmr_ipb;           /* physical buffer */
-	struct ib_mr        *pmr_mr;            /* IB MR */
-	struct kib_pmr_pool *pmr_pool;          /* owner of this MR */
-	__u64               pmr_iova;           /* Virtual I/O address */
-	int                 pmr_refcount;       /* reference count */
-} kib_phys_mr_t;
-
 struct kib_pool;
 struct kib_poolset;
 
@@ -299,15 +286,6 @@
 } kib_tx_pool_t;
 
 typedef struct {
-	kib_poolset_t         pps_poolset;        /* pool-set */
-} kib_pmr_poolset_t;
-
-typedef struct kib_pmr_pool {
-	struct kib_hca_dev    *ppo_hdev;          /* device for this pool */
-	kib_pool_t            ppo_pool;           /* pool */
-} kib_pmr_pool_t;
-
-typedef struct {
 	spinlock_t            fps_lock;            /* serialize */
 	struct kib_net        *fps_net;            /* IB network */
 	struct list_head      fps_pool_list;       /* FMR pool list */
@@ -347,7 +325,6 @@
 
 	kib_tx_poolset_t      **ibn_tx_ps;    /* tx pool-set */
 	kib_fmr_poolset_t     **ibn_fmr_ps;   /* fmr pool-set */
-	kib_pmr_poolset_t     **ibn_pmr_ps;   /* pmr pool-set */
 
 	kib_dev_t             *ibn_dev;       /* underlying IB device */
 } kib_net_t;
@@ -519,7 +496,7 @@
 	enum ib_wc_status      rx_status;     /* completion status */
 	kib_msg_t              *rx_msg;       /* message buffer (host vaddr) */
 	__u64                  rx_msgaddr;    /* message buffer (I/O addr) */
-	DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */
+	DECLARE_PCI_UNMAP_ADDR(rx_msgunmap);  /* for dma_unmap_single() */
 	struct ib_recv_wr      rx_wrq;        /* receive work item... */
 	struct ib_sge          rx_sge;        /* ...and its memory */
 } kib_rx_t;
@@ -546,7 +523,7 @@
 					       * completion */
 	kib_msg_t              *tx_msg;       /* message buffer (host vaddr) */
 	__u64                  tx_msgaddr;    /* message buffer (I/O addr) */
-	DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */
+	DECLARE_PCI_UNMAP_ADDR(tx_msgunmap);  /* for dma_unmap_single() */
 	int                    tx_nwrq;       /* # send work items */
 	struct ib_send_wr      *tx_wrq;       /* send work items... */
 	struct ib_sge          *tx_sge;       /* ...and their memory */
@@ -554,10 +531,7 @@
 	int                    tx_nfrags;     /* # entries in... */
 	struct scatterlist     *tx_frags;     /* dma_map_sg descriptor */
 	__u64                  *tx_pages;     /* rdma phys page addrs */
-	union {
-		kib_phys_mr_t  *pmr;          /* MR for physical buffer */
-		kib_fmr_t      fmr;           /* FMR */
-	}                      tx_u;
+	kib_fmr_t		fmr;	      /* FMR */
 	int                    tx_dmadir;     /* dma direction */
 } kib_tx_t;
 
@@ -642,19 +616,19 @@
 
 extern kib_data_t kiblnd_data;
 
-extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
+void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
 
 static inline void
 kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
 {
-	LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+	LASSERT(atomic_read(&hdev->ibh_ref) > 0);
 	atomic_inc(&hdev->ibh_ref);
 }
 
 static inline void
 kiblnd_hdev_decref(kib_hca_dev_t *hdev)
 {
-	LASSERT (atomic_read(&hdev->ibh_ref) > 0);
+	LASSERT(atomic_read(&hdev->ibh_ref) > 0);
 	if (atomic_dec_and_test(&hdev->ibh_ref))
 		kiblnd_hdev_destroy(hdev);
 }
@@ -701,7 +675,7 @@
 do {							    \
 	CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n",		\
 	       (peer), libcfs_nid2str((peer)->ibp_nid),	 \
-	       atomic_read (&(peer)->ibp_refcount));	\
+	       atomic_read(&(peer)->ibp_refcount));	\
 	atomic_inc(&(peer)->ibp_refcount);		  \
 } while (0)
 
@@ -709,32 +683,32 @@
 do {							    \
 	CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n",		\
 	       (peer), libcfs_nid2str((peer)->ibp_nid),	 \
-	       atomic_read (&(peer)->ibp_refcount));	\
+	       atomic_read(&(peer)->ibp_refcount));	\
 	LASSERT_ATOMIC_POS(&(peer)->ibp_refcount);	      \
 	if (atomic_dec_and_test(&(peer)->ibp_refcount))     \
 		kiblnd_destroy_peer(peer);		      \
 } while (0)
 
 static inline struct list_head *
-kiblnd_nid2peerlist (lnet_nid_t nid)
+kiblnd_nid2peerlist(lnet_nid_t nid)
 {
 	unsigned int hash =
 		((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
 
-	return (&kiblnd_data.kib_peers [hash]);
+	return &kiblnd_data.kib_peers[hash];
 }
 
 static inline int
-kiblnd_peer_active (kib_peer_t *peer)
+kiblnd_peer_active(kib_peer_t *peer)
 {
 	/* Am I in the peer hash table? */
-	return (!list_empty(&peer->ibp_list));
+	return !list_empty(&peer->ibp_list);
 }
 
 static inline kib_conn_t *
-kiblnd_get_conn_locked (kib_peer_t *peer)
+kiblnd_get_conn_locked(kib_peer_t *peer)
 {
-	LASSERT (!list_empty(&peer->ibp_conns));
+	LASSERT(!list_empty(&peer->ibp_conns));
 
 	/* just return the first connection */
 	return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
@@ -751,7 +725,7 @@
 static inline int
 kiblnd_need_noop(kib_conn_t *conn)
 {
-	LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+	LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
 	if (conn->ibc_outstanding_credits <
 	    IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
@@ -788,7 +762,7 @@
 }
 
 static inline const char *
-kiblnd_queue2str (kib_conn_t *conn, struct list_head *q)
+kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
 {
 	if (q == &conn->ibc_tx_queue)
 		return "tx_queue";
@@ -815,43 +789,43 @@
 #define IBLND_WID_MASK  3UL
 
 static inline __u64
-kiblnd_ptr2wreqid (void *ptr, int type)
+kiblnd_ptr2wreqid(void *ptr, int type)
 {
 	unsigned long lptr = (unsigned long)ptr;
 
-	LASSERT ((lptr & IBLND_WID_MASK) == 0);
-	LASSERT ((type & ~IBLND_WID_MASK) == 0);
+	LASSERT((lptr & IBLND_WID_MASK) == 0);
+	LASSERT((type & ~IBLND_WID_MASK) == 0);
 	return (__u64)(lptr | type);
 }
 
 static inline void *
-kiblnd_wreqid2ptr (__u64 wreqid)
+kiblnd_wreqid2ptr(__u64 wreqid)
 {
 	return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
 }
 
 static inline int
-kiblnd_wreqid2type (__u64 wreqid)
+kiblnd_wreqid2type(__u64 wreqid)
 {
-	return (wreqid & IBLND_WID_MASK);
+	return wreqid & IBLND_WID_MASK;
 }
 
 static inline void
-kiblnd_set_conn_state (kib_conn_t *conn, int state)
+kiblnd_set_conn_state(kib_conn_t *conn, int state)
 {
 	conn->ibc_state = state;
 	mb();
 }
 
 static inline void
-kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob)
+kiblnd_init_msg(kib_msg_t *msg, int type, int body_nob)
 {
 	msg->ibm_type = type;
 	msg->ibm_nob  = offsetof(kib_msg_t, ibm_u) + body_nob;
 }
 
 static inline int
-kiblnd_rd_size (kib_rdma_desc_t *rd)
+kiblnd_rd_size(kib_rdma_desc_t *rd)
 {
 	int   i;
 	int   size;
@@ -887,7 +861,7 @@
 		rd->rd_frags[index].rf_addr += nob;
 		rd->rd_frags[index].rf_nob  -= nob;
 	} else {
-		index ++;
+		index++;
 	}
 
 	return index;
@@ -896,8 +870,8 @@
 static inline int
 kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
 {
-	LASSERT (msgtype == IBLND_MSG_GET_REQ ||
-		 msgtype == IBLND_MSG_PUT_ACK);
+	LASSERT(msgtype == IBLND_MSG_GET_REQ ||
+		msgtype == IBLND_MSG_PUT_ACK);
 
 	return msgtype == IBLND_MSG_GET_REQ ?
 	       offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
@@ -978,57 +952,53 @@
 			 int npages, __u64 iov, kib_fmr_t *fmr);
 void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
 
-int  kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
-			 kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr);
-void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr);
-
-int  kiblnd_startup (lnet_ni_t *ni);
-void kiblnd_shutdown (lnet_ni_t *ni);
-int  kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg);
-void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
+int  kiblnd_startup(lnet_ni_t *ni);
+void kiblnd_shutdown(lnet_ni_t *ni);
+int  kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
+void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
 
 int  kiblnd_tunables_init(void);
 void kiblnd_tunables_fini(void);
 
-int  kiblnd_connd (void *arg);
+int  kiblnd_connd(void *arg);
 int  kiblnd_scheduler(void *arg);
 int  kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
-int  kiblnd_failover_thread (void *arg);
+int  kiblnd_failover_thread(void *arg);
 
 int  kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
-void kiblnd_free_pages (kib_pages_t *p);
+void kiblnd_free_pages(kib_pages_t *p);
 
 int  kiblnd_cm_callback(struct rdma_cm_id *cmid,
 			struct rdma_cm_event *event);
 int  kiblnd_translate_mtu(int value);
 
 int  kiblnd_dev_failover(kib_dev_t *dev);
-int  kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
-void kiblnd_destroy_peer (kib_peer_t *peer);
-void kiblnd_destroy_dev (kib_dev_t *dev);
-void kiblnd_unlink_peer_locked (kib_peer_t *peer);
-void kiblnd_peer_alive (kib_peer_t *peer);
-kib_peer_t *kiblnd_find_peer_locked (lnet_nid_t nid);
-void kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error);
-int  kiblnd_close_stale_conns_locked (kib_peer_t *peer,
+int  kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
+void kiblnd_destroy_peer(kib_peer_t *peer);
+void kiblnd_destroy_dev(kib_dev_t *dev);
+void kiblnd_unlink_peer_locked(kib_peer_t *peer);
+void kiblnd_peer_alive(kib_peer_t *peer);
+kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid);
+void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
+int  kiblnd_close_stale_conns_locked(kib_peer_t *peer,
 				      int version, __u64 incarnation);
-int  kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why);
+int  kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
 
 void kiblnd_connreq_done(kib_conn_t *conn, int status);
-kib_conn_t *kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid,
+kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
 				int state, int version);
-void kiblnd_destroy_conn (kib_conn_t *conn);
-void kiblnd_close_conn (kib_conn_t *conn, int error);
-void kiblnd_close_conn_locked (kib_conn_t *conn, int error);
+void kiblnd_destroy_conn(kib_conn_t *conn);
+void kiblnd_close_conn(kib_conn_t *conn, int error);
+void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
 
-int  kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
+int  kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
 		       int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
 
-void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
-void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
-void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist,
+void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
+void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
+void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
+void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
+void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
 			 int status);
 void kiblnd_check_sends (kib_conn_t *conn);
 
@@ -1036,10 +1006,10 @@
 void kiblnd_cq_event(struct ib_event *event, void *arg);
 void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
 
-void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
+void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
 		      int credits, lnet_nid_t dstnid, __u64 dststamp);
 int  kiblnd_unpack_msg(kib_msg_t *msg, int nob);
-int  kiblnd_post_rx (kib_rx_t *rx, int credit);
+int  kiblnd_post_rx(kib_rx_t *rx, int credit);
 
 int  kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
 int  kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 477aa8b..a23a6d9 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -121,7 +121,6 @@
 	LASSERT(tx->tx_conn == NULL);
 	LASSERT(tx->tx_lntmsg[0] == NULL);
 	LASSERT(tx->tx_lntmsg[1] == NULL);
-	LASSERT(tx->tx_u.pmr == NULL);
 	LASSERT(tx->tx_nfrags == 0);
 
 	return tx;
@@ -575,7 +574,7 @@
 	cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
 
 	fps = net->ibn_fmr_ps[cpt];
-	rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr);
+	rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr);
 	if (rc != 0) {
 		CERROR("Can't map %d pages: %d\n", npages, rc);
 		return rc;
@@ -583,8 +582,8 @@
 
 	/* If rd is not tx_rd, it's going to get sent to a peer, who will need
 	 * the rkey */
-	rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
-					 tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
+	rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey :
+					 tx->fmr.fmr_pfmr->fmr->lkey;
 	rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
 	rd->rd_frags[0].rf_nob = nob;
 	rd->rd_nfrags = 1;
@@ -592,42 +591,6 @@
 	return 0;
 }
 
-static int
-kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
-{
-	kib_hca_dev_t *hdev;
-	kib_pmr_poolset_t *pps;
-	__u64 iova;
-	int cpt;
-	int rc;
-
-	LASSERT(tx->tx_pool != NULL);
-	LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
-
-	hdev = tx->tx_pool->tpo_hdev;
-
-	iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask;
-
-	cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
-
-	pps = net->ibn_pmr_ps[cpt];
-	rc = kiblnd_pmr_pool_map(pps, hdev, rd, &iova, &tx->tx_u.pmr);
-	if (rc != 0) {
-		CERROR("Failed to create MR by phybuf: %d\n", rc);
-		return rc;
-	}
-
-	/* If rd is not tx_rd, it's going to get sent to a peer, who will need
-	 * the rkey */
-	rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.pmr->pmr_mr->rkey :
-					 tx->tx_u.pmr->pmr_mr->lkey;
-	rd->rd_nfrags = 1;
-	rd->rd_frags[0].rf_addr = iova;
-	rd->rd_frags[0].rf_nob = nob;
-
-	return 0;
-}
-
 void
 kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
 {
@@ -635,13 +598,9 @@
 
 	LASSERT(net != NULL);
 
-	if (net->ibn_fmr_ps != NULL && tx->tx_u.fmr.fmr_pfmr != NULL) {
-		kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status);
-		tx->tx_u.fmr.fmr_pfmr = NULL;
-
-	} else if (net->ibn_pmr_ps != NULL && tx->tx_u.pmr != NULL) {
-		kiblnd_pmr_pool_unmap(tx->tx_u.pmr);
-		tx->tx_u.pmr = NULL;
+	if (net->ibn_fmr_ps && tx->fmr.fmr_pfmr) {
+		kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
+		tx->fmr.fmr_pfmr = NULL;
 	}
 
 	if (tx->tx_nfrags != 0) {
@@ -687,8 +646,6 @@
 
 	if (net->ibn_fmr_ps != NULL)
 		return kiblnd_fmr_map_tx(net, tx, rd, nob);
-	else if (net->ibn_pmr_ps != NULL)
-		return kiblnd_pmr_map_tx(net, tx, rd, nob);
 
 	return -EINVAL;
 }
@@ -3133,8 +3090,7 @@
 		dropped_lock = 0;
 
 		if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
-			conn = list_entry(kiblnd_data. \
-					      kib_connd_zombies.next,
+			conn = list_entry(kiblnd_data.kib_connd_zombies.next,
 					      kib_conn_t, ibc_list);
 			list_del(&conn->ibc_list);
 
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index b0e0036..b3d1b5d 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -126,11 +126,6 @@
 module_param(fmr_cache, int, 0444);
 MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching");
 
-/* NB: this value is shared by all CPTs, it can grow at runtime */
-static int pmr_pool_size = 512;
-module_param(pmr_pool_size, int, 0444);
-MODULE_PARM_DESC(pmr_pool_size, "size of MR cache pmr pool on each CPT");
-
 /*
  * 0: disable failover
  * 1: enable failover if necessary
@@ -170,7 +165,6 @@
 	.kib_fmr_pool_size     = &fmr_pool_size,
 	.kib_fmr_flush_trigger = &fmr_flush_trigger,
 	.kib_fmr_cache         = &fmr_cache,
-	.kib_pmr_pool_size     = &pmr_pool_size,
 	.kib_require_priv_port = &require_privileged_port,
 	.kib_use_priv_port     = &use_privileged_port,
 	.kib_nscheds           = &nscheds
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 4128a92..d8bfcad 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -2252,8 +2252,6 @@
 	int i;
 	int j;
 
-	CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n",
-	       atomic_read(&libcfs_kmemory));
 	LASSERT(ksocknal_data.ksnd_nnets == 0);
 
 	switch (ksocknal_data.ksnd_init) {
@@ -2331,9 +2329,6 @@
 		break;
 	}
 
-	CDEBUG(D_MALLOC, "after NAL cleanup: kmem %d\n",
-	       atomic_read(&libcfs_kmemory));
-
 	module_put(THIS_MODULE);
 }
 
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index 8a9d4a0..a0fcbc3 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -46,6 +46,7 @@
 #include <linux/sysctl.h>
 #include <linux/uio.h>
 #include <linux/unistd.h>
+#include <asm/irq.h>
 #include <net/sock.h>
 #include <net/tcp.h>
 
@@ -519,8 +520,8 @@
 	atomic_inc(&conn->ksnc_conn_refcount);
 }
 
-extern void ksocknal_queue_zombie_conn(ksock_conn_t *conn);
-extern void ksocknal_finalize_zcreq(ksock_conn_t *conn);
+void ksocknal_queue_zombie_conn(ksock_conn_t *conn);
+void ksocknal_finalize_zcreq(ksock_conn_t *conn);
 
 static inline void
 ksocknal_conn_decref(ksock_conn_t *conn)
@@ -565,8 +566,8 @@
 	atomic_inc(&tx->tx_refcount);
 }
 
-extern void ksocknal_tx_prep(ksock_conn_t *, ksock_tx_t *tx);
-extern void ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx);
+void ksocknal_tx_prep(ksock_conn_t *, ksock_tx_t *tx);
+void ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx);
 
 static inline void
 ksocknal_tx_decref(ksock_tx_t *tx)
@@ -583,7 +584,7 @@
 	atomic_inc(&route->ksnr_refcount);
 }
 
-extern void ksocknal_destroy_route(ksock_route_t *route);
+void ksocknal_destroy_route(ksock_route_t *route);
 
 static inline void
 ksocknal_route_decref(ksock_route_t *route)
@@ -600,7 +601,7 @@
 	atomic_inc(&peer->ksnp_refcount);
 }
 
-extern void ksocknal_destroy_peer(ksock_peer_t *peer);
+void ksocknal_destroy_peer(ksock_peer_t *peer);
 
 static inline void
 ksocknal_peer_decref(ksock_peer_t *peer)
@@ -620,70 +621,69 @@
 		  unsigned int offset, unsigned int mlen, unsigned int rlen);
 int ksocknal_accept(lnet_ni_t *ni, struct socket *sock);
 
-extern int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
-extern ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
-extern ksock_peer_t *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
-extern void ksocknal_peer_failed(ksock_peer_t *peer);
-extern int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
-				 struct socket *sock, int type);
-extern void ksocknal_close_conn_locked(ksock_conn_t *conn, int why);
-extern void ksocknal_terminate_conn(ksock_conn_t *conn);
-extern void ksocknal_destroy_conn(ksock_conn_t *conn);
-extern int  ksocknal_close_peer_conns_locked(ksock_peer_t *peer,
-					      __u32 ipaddr, int why);
-extern int ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why);
-extern int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr);
-extern ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer,
-					       ksock_tx_t *tx, int nonblk);
+int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
+ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
+ksock_peer_t *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
+void ksocknal_peer_failed(ksock_peer_t *peer);
+int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
+			 struct socket *sock, int type);
+void ksocknal_close_conn_locked(ksock_conn_t *conn, int why);
+void ksocknal_terminate_conn(ksock_conn_t *conn);
+void ksocknal_destroy_conn(ksock_conn_t *conn);
+int  ksocknal_close_peer_conns_locked(ksock_peer_t *peer,
+				      __u32 ipaddr, int why);
+int ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why);
+int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr);
+ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer,
+					ksock_tx_t *tx, int nonblk);
 
-extern int  ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx,
-				   lnet_process_id_t id);
-extern ksock_tx_t *ksocknal_alloc_tx(int type, int size);
-extern void ksocknal_free_tx(ksock_tx_t *tx);
-extern ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
-extern void ksocknal_next_tx_carrier(ksock_conn_t *conn);
-extern void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn);
-extern void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
-				  int error);
-extern void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
-extern void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
-extern int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
-extern void ksocknal_thread_fini(void);
-extern void ksocknal_launch_all_connections_locked(ksock_peer_t *peer);
-extern ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t *peer);
-extern ksock_route_t *ksocknal_find_connecting_route_locked(ksock_peer_t *peer);
-extern int ksocknal_new_packet(ksock_conn_t *conn, int skip);
-extern int ksocknal_scheduler(void *arg);
-extern int ksocknal_connd(void *arg);
-extern int ksocknal_reaper(void *arg);
-extern int ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
-				lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
-extern int ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
-				ksock_hello_msg_t *hello, lnet_process_id_t *id,
-				__u64 *incarnation);
-extern void ksocknal_read_callback(ksock_conn_t *conn);
-extern void ksocknal_write_callback(ksock_conn_t *conn);
+int  ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx,
+			    lnet_process_id_t id);
+ksock_tx_t *ksocknal_alloc_tx(int type, int size);
+void ksocknal_free_tx(ksock_tx_t *tx);
+ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
+void ksocknal_next_tx_carrier(ksock_conn_t *conn);
+void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn);
+void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error);
+void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
+void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
+int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
+void ksocknal_thread_fini(void);
+void ksocknal_launch_all_connections_locked(ksock_peer_t *peer);
+ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t *peer);
+ksock_route_t *ksocknal_find_connecting_route_locked(ksock_peer_t *peer);
+int ksocknal_new_packet(ksock_conn_t *conn, int skip);
+int ksocknal_scheduler(void *arg);
+int ksocknal_connd(void *arg);
+int ksocknal_reaper(void *arg);
+int ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+			lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
+int ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+			ksock_hello_msg_t *hello, lnet_process_id_t *id,
+			__u64 *incarnation);
+void ksocknal_read_callback(ksock_conn_t *conn);
+void ksocknal_write_callback(ksock_conn_t *conn);
 
-extern int ksocknal_lib_zc_capable(ksock_conn_t *conn);
-extern void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn);
-extern void ksocknal_lib_set_callback(struct socket *sock,  ksock_conn_t *conn);
-extern void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn);
-extern void ksocknal_lib_push_conn(ksock_conn_t *conn);
-extern int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn);
-extern int ksocknal_lib_setup_sock(struct socket *so);
-extern int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx);
-extern int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx);
-extern void ksocknal_lib_eager_ack(ksock_conn_t *conn);
-extern int ksocknal_lib_recv_iov(ksock_conn_t *conn);
-extern int ksocknal_lib_recv_kiov(ksock_conn_t *conn);
-extern int ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem,
-					   int *rxmem, int *nagle);
+int ksocknal_lib_zc_capable(ksock_conn_t *conn);
+void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn);
+void ksocknal_lib_set_callback(struct socket *sock,  ksock_conn_t *conn);
+void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn);
+void ksocknal_lib_push_conn(ksock_conn_t *conn);
+int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn);
+int ksocknal_lib_setup_sock(struct socket *so);
+int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx);
+int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx);
+void ksocknal_lib_eager_ack(ksock_conn_t *conn);
+int ksocknal_lib_recv_iov(ksock_conn_t *conn);
+int ksocknal_lib_recv_kiov(ksock_conn_t *conn);
+int ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem,
+				   int *rxmem, int *nagle);
 
-extern int ksocknal_tunables_init(void);
+int ksocknal_tunables_init(void);
 
-extern void ksocknal_lib_csum_tx(ksock_tx_t *tx);
+void ksocknal_lib_csum_tx(ksock_tx_t *tx);
 
-extern int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
-extern int ksocknal_lib_bind_thread_to_cpu(int id);
+int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
+int ksocknal_lib_bind_thread_to_cpu(int id);
 
 #endif /* _SOCKLND_SOCKLND_H_ */
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index fe2a83a..0d5aac6 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -526,8 +526,7 @@
 
 		counter++;   /* exponential backoff warnings */
 		if ((counter & (-counter)) == counter)
-			CWARN("%u ENOMEM tx %p (%u allocated)\n",
-			      counter, conn, atomic_read(&libcfs_kmemory));
+			CWARN("%u ENOMEM tx %p\n", counter, conn);
 
 		/* Queue on ksnd_enomem_conns for retry after a timeout */
 		spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index ee902dc..40f418b 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -32,17 +32,6 @@
 
 static struct ctl_table_header *lnet_table_header;
 
-#define CTL_LNET	 (0x100)
-enum {
-	PSDEV_LNET_STATS = 100,
-	PSDEV_LNET_ROUTES,
-	PSDEV_LNET_ROUTERS,
-	PSDEV_LNET_PEERS,
-	PSDEV_LNET_BUFFERS,
-	PSDEV_LNET_NIS,
-	PSDEV_LNET_PTL_ROTOR,
-};
-
 #define LNET_LOFFT_BITS		(sizeof(loff_t) * 8)
 /*
  * NB: max allowed LNET_CPT_BITS is 8 on 64-bit system and 2 on 32-bit system
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index c4cf0ae..cdce2dd 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -187,51 +187,49 @@
 int lstcon_console_init(void);
 int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data);
 int lstcon_console_fini(void);
-extern int lstcon_session_match(lst_sid_t sid);
-extern int lstcon_session_new(char *name, int key, unsigned version,
-			      int timeout, int flags, lst_sid_t *sid_up);
-extern int lstcon_session_info(lst_sid_t *sid_up, int *key, unsigned *verp,
-			       lstcon_ndlist_ent_t *entp, char *name_up, int len);
-extern int lstcon_session_end(void);
-extern int lstcon_session_debug(int timeout, struct list_head *result_up);
-extern int lstcon_session_feats_check(unsigned feats);
-extern int lstcon_batch_debug(int timeout, char *name,
-			      int client, struct list_head *result_up);
-extern int lstcon_group_debug(int timeout, char *name,
-			      struct list_head *result_up);
-extern int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t *nds_up,
-			      struct list_head *result_up);
-extern int lstcon_group_add(char *name);
-extern int lstcon_group_del(char *name);
-extern int lstcon_group_clean(char *name, int args);
-extern int lstcon_group_refresh(char *name, struct list_head *result_up);
-extern int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up,
-			    unsigned *featp, struct list_head *result_up);
-extern int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up,
-			       struct list_head *result_up);
-extern int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up,
-			     int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up);
-extern int lstcon_group_list(int idx, int len, char *name_up);
-extern int lstcon_batch_add(char *name);
-extern int lstcon_batch_run(char *name, int timeout,
+int lstcon_session_match(lst_sid_t sid);
+int lstcon_session_new(char *name, int key, unsigned version,
+		       int timeout, int flags, lst_sid_t *sid_up);
+int lstcon_session_info(lst_sid_t *sid_up, int *key, unsigned *verp,
+			lstcon_ndlist_ent_t *entp, char *name_up, int len);
+int lstcon_session_end(void);
+int lstcon_session_debug(int timeout, struct list_head *result_up);
+int lstcon_session_feats_check(unsigned feats);
+int lstcon_batch_debug(int timeout, char *name,
+		       int client, struct list_head *result_up);
+int lstcon_group_debug(int timeout, char *name,
+		       struct list_head *result_up);
+int lstcon_nodes_debug(int timeout, int nnd, lnet_process_id_t *nds_up,
+		       struct list_head *result_up);
+int lstcon_group_add(char *name);
+int lstcon_group_del(char *name);
+int lstcon_group_clean(char *name, int args);
+int lstcon_group_refresh(char *name, struct list_head *result_up);
+int lstcon_nodes_add(char *name, int nnd, lnet_process_id_t *nds_up,
+		     unsigned *featp, struct list_head *result_up);
+int lstcon_nodes_remove(char *name, int nnd, lnet_process_id_t *nds_up,
+			struct list_head *result_up);
+int lstcon_group_info(char *name, lstcon_ndlist_ent_t *gent_up,
+		      int *index_p, int *ndent_p, lstcon_node_ent_t *ndents_up);
+int lstcon_group_list(int idx, int len, char *name_up);
+int lstcon_batch_add(char *name);
+int lstcon_batch_run(char *name, int timeout, struct list_head *result_up);
+int lstcon_batch_stop(char *name, int force, struct list_head *result_up);
+int lstcon_test_batch_query(char *name, int testidx,
+			    int client, int timeout,
 			    struct list_head *result_up);
-extern int lstcon_batch_stop(char *name, int force,
-			     struct list_head *result_up);
-extern int lstcon_test_batch_query(char *name, int testidx,
-				   int client, int timeout,
-				   struct list_head *result_up);
-extern int lstcon_batch_del(char *name);
-extern int lstcon_batch_list(int idx, int namelen, char *name_up);
-extern int lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up,
-			     int server, int testidx, int *index_p,
-			     int *ndent_p, lstcon_node_ent_t *dents_up);
-extern int lstcon_group_stat(char *grp_name, int timeout,
-			     struct list_head *result_up);
-extern int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
-			     int timeout, struct list_head *result_up);
-extern int lstcon_test_add(char *batch_name, int type, int loop,
-			   int concur, int dist, int span,
-			   char *src_name, char *dst_name,
-			   void *param, int paramlen, int *retp,
-			   struct list_head *result_up);
+int lstcon_batch_del(char *name);
+int lstcon_batch_list(int idx, int namelen, char *name_up);
+int lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up,
+		      int server, int testidx, int *index_p,
+		      int *ndent_p, lstcon_node_ent_t *dents_up);
+int lstcon_group_stat(char *grp_name, int timeout,
+		      struct list_head *result_up);
+int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up,
+		      int timeout, struct list_head *result_up);
+int lstcon_test_add(char *batch_name, int type, int loop,
+		    int concur, int dist, int span,
+		    char *src_name, char *dst_name,
+		    void *param, int paramlen, int *retp,
+		    struct list_head *result_up);
 #endif
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index 7c5185a..257de35 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -203,7 +203,8 @@
 	sfw_batch_t *tsb;
 	sfw_test_case_t *tsc;
 
-	if (sn == NULL) return;
+	if (sn == NULL)
+		return;
 
 	LASSERT(!sn->sn_timer_active);
 
@@ -613,7 +614,8 @@
 	srpc_client_rpc_t *rpc;
 	sfw_test_unit_t *tsu;
 
-	if (!tsi->tsi_is_client) goto clean;
+	if (!tsi->tsi_is_client)
+		goto clean;
 
 	tsi->tsi_ops->tso_fini(tsi);
 
@@ -1700,7 +1702,8 @@
 
 	for (i = 0; ; i++) {
 		sv = &sfw_services[i];
-		if (sv->sv_name == NULL) break;
+		if (sv->sv_name == NULL)
+			break;
 
 		sv->sv_bulk_ready = NULL;
 		sv->sv_handler    = sfw_handle_server_rpc;
@@ -1717,7 +1720,8 @@
 		}
 
 		/* about to sfw_shutdown, no need to add buffer */
-		if (error) continue;
+		if (error)
+			continue;
 
 		rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
 		if (rc != 0) {
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 1362783..a16d577 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -498,11 +498,11 @@
 	int rc;
 
 	cli->cl_seq = kzalloc(sizeof(*cli->cl_seq), GFP_NOFS);
-	if (cli->cl_seq == NULL)
+	if (!cli->cl_seq)
 		return -ENOMEM;
 
 	prefix = kzalloc(MAX_OBD_NAME + 5, GFP_NOFS);
-	if (prefix == NULL) {
+	if (!prefix) {
 		rc = -ENOMEM;
 		goto out_free_seq;
 	}
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index ec2fc43..1b1066b 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -70,7 +70,7 @@
 	LASSERT(cache_threshold < cache_size);
 
 	cache = kzalloc(sizeof(*cache), GFP_NOFS);
-	if (cache == NULL)
+	if (!cache)
 		return ERR_PTR(-ENOMEM);
 
 	INIT_LIST_HEAD(&cache->fci_entries_head);
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index c3b47f2..1e450bf 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -222,7 +222,7 @@
 			fld->lcf_name, name, tar->ft_idx);
 
 	target = kzalloc(sizeof(*target), GFP_NOFS);
-	if (target == NULL)
+	if (!target)
 		return -ENOMEM;
 
 	spin_lock(&fld->lcf_lock);
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
index 513c81f..6b14406 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -64,98 +64,6 @@
 
 #define LTIME_S(time)		   (time.tv_sec)
 
-/* inode_dio_wait(i) use as-is for write lock */
-# define inode_dio_write_done(i)	do {} while (0) /* for write unlock */
-# define inode_dio_read(i)		atomic_inc(&(i)->i_dio_count)
-/* inode_dio_done(i) use as-is for read unlock */
-
-
-#ifndef FS_HAS_FIEMAP
-#define FS_HAS_FIEMAP			(0)
-#endif
-
-#define ll_vfs_rmdir(dir, entry, mnt)	     vfs_rmdir(dir, entry)
-#define ll_vfs_mkdir(inode, dir, mnt, mode)	vfs_mkdir(inode, dir, mode)
-#define ll_vfs_link(old, mnt, dir, new, mnt1)       vfs_link(old, dir, new)
-#define ll_vfs_unlink(inode, entry, mnt)	  vfs_unlink(inode, entry)
-#define ll_vfs_mknod(dir, entry, mnt, mode, dev) \
-		     vfs_mknod(dir, entry, mode, dev)
-#define ll_security_inode_unlink(dir, entry, mnt) \
-				 security_inode_unlink(dir, entry)
-#define ll_vfs_rename(old, old_dir, mnt, new, new_dir, mnt1) \
-		vfs_rename(old, old_dir, new, new_dir, NULL, 0)
-
-#define cfs_bio_io_error(a, b)   bio_io_error((a))
-#define cfs_bio_endio(a, b, c)    bio_endio((a), (c))
-
-#define cfs_path_put(nd)     path_put(&(nd)->path)
-
-
-#ifndef SLAB_DESTROY_BY_RCU
-#define SLAB_DESTROY_BY_RCU 0
-#endif
-
-
-
-static inline int
-ll_quota_on(struct super_block *sb, int off, int ver, char *name, int remount)
-{
-	int rc;
-
-	if (sb->s_qcop->quota_on) {
-		struct path path;
-
-		rc = kern_path(name, LOOKUP_FOLLOW, &path);
-		if (!rc)
-			return rc;
-		rc = sb->s_qcop->quota_on(sb, off, ver
-					    , &path
-					   );
-		path_put(&path);
-		return rc;
-	} else
-		return -ENOSYS;
-}
-
-static inline int ll_quota_off(struct super_block *sb, int off, int remount)
-{
-	if (sb->s_qcop->quota_off) {
-		return sb->s_qcop->quota_off(sb, off
-					    );
-	} else
-		return -ENOSYS;
-}
-
-
-# define ll_vfs_dq_init	     dquot_initialize
-# define ll_vfs_dq_drop	     dquot_drop
-# define ll_vfs_dq_transfer	 dquot_transfer
-# define ll_vfs_dq_off(sb, remount) dquot_suspend(sb, -1)
-
-
-
-
-
-#define queue_max_phys_segments(rq)       queue_max_segments(rq)
-#define queue_max_hw_segments(rq)	 queue_max_segments(rq)
-
-
-#define ll_d_hlist_node hlist_node
-#define ll_d_hlist_empty(list) hlist_empty(list)
-#define ll_d_hlist_entry(ptr, type, name) hlist_entry(ptr.first, type, name)
-#define ll_d_hlist_for_each(tmp, i_dentry) hlist_for_each(tmp, i_dentry)
-#define ll_d_hlist_for_each_entry(dentry, p, i_dentry, alias) \
-	p = NULL; hlist_for_each_entry(dentry, i_dentry, alias)
-
-
-#define bio_hw_segments(q, bio) 0
-
-
-#define ll_pagevec_init(pv, cold)       do {} while (0)
-#define ll_pagevec_add(pv, pg)	  (0)
-#define ll_pagevec_lru_add_file(pv)     do {} while (0)
-
-
 #ifndef QUOTA_OK
 # define QUOTA_OK 0
 #endif
@@ -163,17 +71,6 @@
 # define NO_QUOTA (-EDQUOT)
 #endif
 
-#ifndef SEEK_DATA
-#define SEEK_DATA      3       /* seek to the next data */
-#endif
-#ifndef SEEK_HOLE
-#define SEEK_HOLE      4       /* seek to the next hole */
-#endif
-
-#ifndef FMODE_UNSIGNED_OFFSET
-#define FMODE_UNSIGNED_OFFSET	((__force fmode_t)0x2000)
-#endif
-
 #if !defined(_ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_) && !defined(ext2_set_bit)
 # define ext2_set_bit	     __test_and_set_bit_le
 # define ext2_clear_bit	   __test_and_clear_bit_le
@@ -182,20 +79,4 @@
 # define ext2_find_next_zero_bit  find_next_zero_bit_le
 #endif
 
-#ifdef ATTR_TIMES_SET
-# define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
-#else
-# define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET)
-#endif
-
-
-#include <linux/version.h>
-#include <linux/fs.h>
-
-# define ll_umode_t	umode_t
-
-#include <linux/dcache.h>
-
-# define ll_dirty_inode(inode, flag)	(inode)->i_sb->s_op->dirty_inode((inode), flag)
-
 #endif /* _COMPAT25_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
index 1456278..ebe8d68 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -60,18 +60,6 @@
 	ll_delete_from_page_cache(page);
 }
 
-#ifdef ATTR_OPEN
-# define ATTR_FROM_OPEN ATTR_OPEN
-#else
-# ifndef ATTR_FROM_OPEN
-#  define ATTR_FROM_OPEN 0
-# endif
-#endif /* ATTR_OPEN */
-
-#ifndef ATTR_RAW
-#define ATTR_RAW 0
-#endif
-
 #ifndef ATTR_CTIME_SET
 /*
  * set ATTR_CTIME_SET to a high value to avoid any risk of collision with other
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index 8ede2a0..fd3c4df 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -375,12 +375,11 @@
 #define JOBSTATS_PROCNAME_UID		"procname_uid"
 #define JOBSTATS_NODELOCAL		"nodelocal"
 
-extern int lprocfs_write_frac_helper(const char __user *buffer,
-				     unsigned long count, int *val, int mult);
-extern int lprocfs_read_frac_helper(char *buffer, unsigned long count,
-				    long val, int mult);
-extern int lprocfs_stats_alloc_one(struct lprocfs_stats *stats,
-				   unsigned int cpuid);
+int lprocfs_write_frac_helper(const char __user *buffer,
+			      unsigned long count, int *val, int mult);
+int lprocfs_read_frac_helper(char *buffer, unsigned long count,
+			     long val, int mult);
+int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid);
 /*
  * \return value
  *      < 0     : on error (only possible for opc as LPROCFS_GET_SMP_ID)
@@ -497,20 +496,18 @@
  * count itself to reside within a single cache line.
  */
 
-extern void lprocfs_counter_add(struct lprocfs_stats *stats, int idx,
-				long amount);
-extern void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx,
-				long amount);
+void lprocfs_counter_add(struct lprocfs_stats *stats, int idx, long amount);
+void lprocfs_counter_sub(struct lprocfs_stats *stats, int idx, long amount);
 
 #define lprocfs_counter_incr(stats, idx) \
 	lprocfs_counter_add(stats, idx, 1)
 #define lprocfs_counter_decr(stats, idx) \
 	lprocfs_counter_sub(stats, idx, 1)
 
-extern __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
-				 struct lprocfs_counter_header *header,
-				 enum lprocfs_stats_flags flags,
-				 enum lprocfs_fields_flags field);
+__s64 lprocfs_read_helper(struct lprocfs_counter *lc,
+			  struct lprocfs_counter_header *header,
+			  enum lprocfs_stats_flags flags,
+			  enum lprocfs_fields_flags field);
 static inline __u64 lprocfs_stats_collector(struct lprocfs_stats *stats,
 					    int idx,
 					    enum lprocfs_fields_flags field)
@@ -537,107 +534,103 @@
 
 extern struct lprocfs_stats *
 lprocfs_alloc_stats(unsigned int num, enum lprocfs_stats_flags flags);
-extern void lprocfs_clear_stats(struct lprocfs_stats *stats);
-extern void lprocfs_free_stats(struct lprocfs_stats **stats);
-extern void lprocfs_init_ops_stats(int num_private_stats,
-				   struct lprocfs_stats *stats);
-extern void lprocfs_init_mps_stats(int num_private_stats,
-				   struct lprocfs_stats *stats);
-extern void lprocfs_init_ldlm_stats(struct lprocfs_stats *ldlm_stats);
-extern int lprocfs_alloc_obd_stats(struct obd_device *obddev,
-				   unsigned int num_private_stats);
-extern int lprocfs_alloc_md_stats(struct obd_device *obddev,
-				  unsigned int num_private_stats);
-extern void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
-				 unsigned conf, const char *name,
-				 const char *units);
-extern void lprocfs_free_obd_stats(struct obd_device *obddev);
-extern void lprocfs_free_md_stats(struct obd_device *obddev);
+void lprocfs_clear_stats(struct lprocfs_stats *stats);
+void lprocfs_free_stats(struct lprocfs_stats **stats);
+void lprocfs_init_ops_stats(int num_private_stats, struct lprocfs_stats *stats);
+void lprocfs_init_mps_stats(int num_private_stats, struct lprocfs_stats *stats);
+void lprocfs_init_ldlm_stats(struct lprocfs_stats *ldlm_stats);
+int lprocfs_alloc_obd_stats(struct obd_device *obddev,
+			    unsigned int num_private_stats);
+int lprocfs_alloc_md_stats(struct obd_device *obddev,
+			   unsigned int num_private_stats);
+void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
+			  unsigned conf, const char *name, const char *units);
+void lprocfs_free_obd_stats(struct obd_device *obddev);
+void lprocfs_free_md_stats(struct obd_device *obddev);
 struct obd_export;
-extern int lprocfs_exp_cleanup(struct obd_export *exp);
-extern struct dentry *ldebugfs_add_simple(struct dentry *root,
-					  char *name,
-					  void *data,
-					  struct file_operations *fops);
-extern struct dentry *
+int lprocfs_exp_cleanup(struct obd_export *exp);
+struct dentry *ldebugfs_add_simple(struct dentry *root,
+				   char *name,
+				   void *data,
+				   struct file_operations *fops);
+struct dentry *
 ldebugfs_add_symlink(const char *name, struct dentry *parent,
-		    const char *format, ...);
+		     const char *format, ...);
 
-extern int ldebugfs_register_stats(struct dentry *parent,
-				   const char *name,
-				   struct lprocfs_stats *stats);
+int ldebugfs_register_stats(struct dentry *parent,
+			    const char *name,
+			    struct lprocfs_stats *stats);
 
 /* lprocfs_status.c */
-extern int ldebugfs_add_vars(struct dentry *parent,
-			     struct lprocfs_vars *var,
-			     void *data);
+int ldebugfs_add_vars(struct dentry *parent,
+		      struct lprocfs_vars *var,
+		      void *data);
 
-extern struct dentry *ldebugfs_register(const char *name,
-					struct dentry *parent,
-					struct lprocfs_vars *list,
-					void *data);
+struct dentry *ldebugfs_register(const char *name,
+				 struct dentry *parent,
+				 struct lprocfs_vars *list,
+				 void *data);
 
-extern void ldebugfs_remove(struct dentry **entryp);
+void ldebugfs_remove(struct dentry **entryp);
 
-extern int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list,
-			     struct attribute_group *attrs);
-extern int lprocfs_obd_cleanup(struct obd_device *obd);
+int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list,
+		      struct attribute_group *attrs);
+int lprocfs_obd_cleanup(struct obd_device *obd);
 
-extern int ldebugfs_seq_create(struct dentry *parent,
-			       const char *name,
-			       umode_t mode,
-			       const struct file_operations *seq_fops,
-			       void *data);
-extern int ldebugfs_obd_seq_create(struct obd_device *dev,
-				   const char *name,
-				   umode_t mode,
-				   const struct file_operations *seq_fops,
-				   void *data);
+int ldebugfs_seq_create(struct dentry *parent,
+			const char *name,
+			umode_t mode,
+			const struct file_operations *seq_fops,
+			void *data);
+int ldebugfs_obd_seq_create(struct obd_device *dev,
+			    const char *name,
+			    umode_t mode,
+			    const struct file_operations *seq_fops,
+			    void *data);
 
 /* Generic callbacks */
 
-extern int lprocfs_rd_u64(struct seq_file *m, void *data);
-extern int lprocfs_rd_atomic(struct seq_file *m, void *data);
-extern int lprocfs_wr_atomic(struct file *file, const char __user *buffer,
-			     unsigned long count, void *data);
-extern int lprocfs_rd_uint(struct seq_file *m, void *data);
-extern int lprocfs_wr_uint(struct file *file, const char __user *buffer,
-			   unsigned long count, void *data);
-extern int lprocfs_rd_name(struct seq_file *m, void *data);
-extern int lprocfs_rd_server_uuid(struct seq_file *m, void *data);
-extern int lprocfs_rd_conn_uuid(struct seq_file *m, void *data);
-extern int lprocfs_rd_import(struct seq_file *m, void *data);
-extern int lprocfs_rd_state(struct seq_file *m, void *data);
-extern int lprocfs_rd_connect_flags(struct seq_file *m, void *data);
+int lprocfs_rd_u64(struct seq_file *m, void *data);
+int lprocfs_rd_atomic(struct seq_file *m, void *data);
+int lprocfs_wr_atomic(struct file *file, const char __user *buffer,
+		      unsigned long count, void *data);
+int lprocfs_rd_uint(struct seq_file *m, void *data);
+int lprocfs_wr_uint(struct file *file, const char __user *buffer,
+		    unsigned long count, void *data);
+int lprocfs_rd_name(struct seq_file *m, void *data);
+int lprocfs_rd_server_uuid(struct seq_file *m, void *data);
+int lprocfs_rd_conn_uuid(struct seq_file *m, void *data);
+int lprocfs_rd_import(struct seq_file *m, void *data);
+int lprocfs_rd_state(struct seq_file *m, void *data);
+int lprocfs_rd_connect_flags(struct seq_file *m, void *data);
 
 struct adaptive_timeout;
-extern int lprocfs_at_hist_helper(struct seq_file *m,
-				  struct adaptive_timeout *at);
-extern int lprocfs_rd_timeouts(struct seq_file *m, void *data);
-extern int lprocfs_wr_timeouts(struct file *file, const char __user *buffer,
-			       unsigned long count, void *data);
-extern int lprocfs_wr_evict_client(struct file *file, const char __user *buffer,
+int lprocfs_at_hist_helper(struct seq_file *m, struct adaptive_timeout *at);
+int lprocfs_rd_timeouts(struct seq_file *m, void *data);
+int lprocfs_wr_timeouts(struct file *file, const char __user *buffer,
+			unsigned long count, void *data);
+int lprocfs_wr_evict_client(struct file *file, const char __user *buffer,
 			    size_t count, loff_t *off);
-extern int lprocfs_wr_ping(struct file *file, const char __user *buffer,
-			   size_t count, loff_t *off);
-extern int lprocfs_wr_import(struct file *file, const char __user *buffer,
+int lprocfs_wr_ping(struct file *file, const char __user *buffer,
+		    size_t count, loff_t *off);
+int lprocfs_wr_import(struct file *file, const char __user *buffer,
 		      size_t count, loff_t *off);
-extern int lprocfs_rd_pinger_recov(struct seq_file *m, void *n);
-extern int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
-				   size_t count, loff_t *off);
+int lprocfs_rd_pinger_recov(struct seq_file *m, void *n);
+int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
+			    size_t count, loff_t *off);
 
 /* Statfs helpers */
 
-extern int lprocfs_write_helper(const char __user *buffer, unsigned long count,
-				int *val);
-extern int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult);
-extern int lprocfs_write_u64_helper(const char __user *buffer,
-				unsigned long count, __u64 *val);
-extern int lprocfs_write_frac_u64_helper(const char *buffer,
-					 unsigned long count,
-					 __u64 *val, int mult);
-extern char *lprocfs_find_named_value(const char *buffer, const char *name,
-				      size_t *count);
+int lprocfs_write_helper(const char __user *buffer, unsigned long count,
+			 int *val);
+int lprocfs_seq_read_frac_helper(struct seq_file *m, long val, int mult);
+int lprocfs_write_u64_helper(const char __user *buffer,
+			     unsigned long count, __u64 *val);
+int lprocfs_write_frac_u64_helper(const char *buffer,
+				  unsigned long count,
+				  __u64 *val, int mult);
+char *lprocfs_find_named_value(const char *buffer, const char *name,
+			       size_t *count);
 void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value);
 void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value);
 void lprocfs_oh_clear(struct obd_histogram *oh);
@@ -646,8 +639,8 @@
 void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
 			   struct lprocfs_counter *cnt);
 
-extern int lprocfs_single_release(struct inode *, struct file *);
-extern int lprocfs_seq_release(struct inode *, struct file *);
+int lprocfs_single_release(struct inode *, struct file *);
+int lprocfs_seq_release(struct inode *, struct file *);
 
 /* You must use these macros when you want to refer to
  * the import in a client obd_device for a lprocfs entry */
@@ -746,7 +739,7 @@
 
 /* lproc_ptlrpc.c */
 struct ptlrpc_request;
-extern void target_print_req(void *seq_file, struct ptlrpc_request *req);
+void target_print_req(void *seq_file, struct ptlrpc_request *req);
 
 /* lproc_status.c */
 int lprocfs_obd_rd_max_pages_per_rpc(struct seq_file *m, void *data);
@@ -754,62 +747,62 @@
 				     size_t count, loff_t *off);
 
 /* all quota proc functions */
-extern int lprocfs_quota_rd_bunit(char *page, char **start,
-				  loff_t off, int count,
-				  int *eof, void *data);
-extern int lprocfs_quota_wr_bunit(struct file *file, const char *buffer,
-				  unsigned long count, void *data);
-extern int lprocfs_quota_rd_btune(char *page, char **start,
-				  loff_t off, int count,
-				  int *eof, void *data);
-extern int lprocfs_quota_wr_btune(struct file *file, const char *buffer,
-				  unsigned long count, void *data);
-extern int lprocfs_quota_rd_iunit(char *page, char **start,
-				  loff_t off, int count,
-				  int *eof, void *data);
-extern int lprocfs_quota_wr_iunit(struct file *file, const char *buffer,
-				  unsigned long count, void *data);
-extern int lprocfs_quota_rd_itune(char *page, char **start,
-				  loff_t off, int count,
-				  int *eof, void *data);
-extern int lprocfs_quota_wr_itune(struct file *file, const char *buffer,
-				  unsigned long count, void *data);
-extern int lprocfs_quota_rd_type(char *page, char **start, loff_t off, int count,
-				 int *eof, void *data);
-extern int lprocfs_quota_wr_type(struct file *file, const char *buffer,
-				 unsigned long count, void *data);
-extern int lprocfs_quota_rd_switch_seconds(char *page, char **start, loff_t off,
-					   int count, int *eof, void *data);
-extern int lprocfs_quota_wr_switch_seconds(struct file *file,
-					   const char *buffer,
-					   unsigned long count, void *data);
-extern int lprocfs_quota_rd_sync_blk(char *page, char **start, loff_t off,
+int lprocfs_quota_rd_bunit(char *page, char **start,
+			   loff_t off, int count,
+			   int *eof, void *data);
+int lprocfs_quota_wr_bunit(struct file *file, const char *buffer,
+			   unsigned long count, void *data);
+int lprocfs_quota_rd_btune(char *page, char **start,
+			   loff_t off, int count,
+			   int *eof, void *data);
+int lprocfs_quota_wr_btune(struct file *file, const char *buffer,
+			   unsigned long count, void *data);
+int lprocfs_quota_rd_iunit(char *page, char **start,
+			   loff_t off, int count,
+			   int *eof, void *data);
+int lprocfs_quota_wr_iunit(struct file *file, const char *buffer,
+			   unsigned long count, void *data);
+int lprocfs_quota_rd_itune(char *page, char **start,
+			   loff_t off, int count,
+			   int *eof, void *data);
+int lprocfs_quota_wr_itune(struct file *file, const char *buffer,
+			   unsigned long count, void *data);
+int lprocfs_quota_rd_type(char *page, char **start, loff_t off, int count,
+			  int *eof, void *data);
+int lprocfs_quota_wr_type(struct file *file, const char *buffer,
+			  unsigned long count, void *data);
+int lprocfs_quota_rd_switch_seconds(char *page, char **start, loff_t off,
+				    int count, int *eof, void *data);
+int lprocfs_quota_wr_switch_seconds(struct file *file,
+				    const char *buffer,
+				    unsigned long count, void *data);
+int lprocfs_quota_rd_sync_blk(char *page, char **start, loff_t off,
+			      int count, int *eof, void *data);
+int lprocfs_quota_wr_sync_blk(struct file *file, const char *buffer,
+			      unsigned long count, void *data);
+int lprocfs_quota_rd_switch_qs(char *page, char **start, loff_t off,
+			       int count, int *eof, void *data);
+int lprocfs_quota_wr_switch_qs(struct file *file,
+			       const char *buffer, unsigned long count,
+			       void *data);
+int lprocfs_quota_rd_boundary_factor(char *page, char **start, loff_t off,
 				     int count, int *eof, void *data);
-extern int lprocfs_quota_wr_sync_blk(struct file *file, const char *buffer,
-				     unsigned long count, void *data);
-extern int lprocfs_quota_rd_switch_qs(char *page, char **start, loff_t off,
-				      int count, int *eof, void *data);
-extern int lprocfs_quota_wr_switch_qs(struct file *file,
-				      const char *buffer,
-				      unsigned long count, void *data);
-extern int lprocfs_quota_rd_boundary_factor(char *page, char **start, loff_t off,
-					    int count, int *eof, void *data);
-extern int lprocfs_quota_wr_boundary_factor(struct file *file,
-					    const char *buffer,
-					    unsigned long count, void *data);
-extern int lprocfs_quota_rd_least_bunit(char *page, char **start, loff_t off,
-					int count, int *eof, void *data);
-extern int lprocfs_quota_wr_least_bunit(struct file *file,
-					const char *buffer,
-					unsigned long count, void *data);
-extern int lprocfs_quota_rd_least_iunit(char *page, char **start, loff_t off,
-					int count, int *eof, void *data);
-extern int lprocfs_quota_wr_least_iunit(struct file *file,
-					const char *buffer,
-					unsigned long count, void *data);
-extern int lprocfs_quota_rd_qs_factor(char *page, char **start, loff_t off,
-				      int count, int *eof, void *data);
-extern int lprocfs_quota_wr_qs_factor(struct file *file,
-				      const char *buffer,
-				      unsigned long count, void *data);
+int lprocfs_quota_wr_boundary_factor(struct file *file,
+				     const char *buffer, unsigned long count,
+				     void *data);
+int lprocfs_quota_rd_least_bunit(char *page, char **start, loff_t off,
+				 int count, int *eof, void *data);
+int lprocfs_quota_wr_least_bunit(struct file *file,
+				 const char *buffer, unsigned long count,
+				 void *data);
+int lprocfs_quota_rd_least_iunit(char *page, char **start, loff_t off,
+				 int count, int *eof, void *data);
+int lprocfs_quota_wr_least_iunit(struct file *file,
+				 const char *buffer, unsigned long count,
+				 void *data);
+int lprocfs_quota_rd_qs_factor(char *page, char **start, loff_t off,
+			       int count, int *eof, void *data);
+int lprocfs_quota_wr_qs_factor(struct file *file,
+			       const char *buffer, unsigned long count,
+			       void *data);
 #endif /* LPROCFS_SNMP_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 4d72d6e..ac78dbc 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -747,7 +747,7 @@
 	return fid_seq(fid);
 }
 
-extern void lustre_swab_ost_id(struct ost_id *oid);
+void lustre_swab_ost_id(struct ost_id *oid);
 
 /**
  * Get inode generation from a igif.
@@ -814,8 +814,8 @@
 	return fid_seq(fid) == 0 && fid_oid(fid) == 0;
 }
 
-extern void lustre_swab_lu_fid(struct lu_fid *fid);
-extern void lustre_swab_lu_seq_range(struct lu_seq_range *range);
+void lustre_swab_lu_fid(struct lu_fid *fid);
+void lustre_swab_lu_seq_range(struct lu_seq_range *range);
 
 static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
 {
@@ -1131,7 +1131,7 @@
 	__u64 pb_padding[4];
 };
 
-extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
+void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
 
 /* message body offset for lustre_msg_v2 */
 /* ptlrpc body offset in all request/reply messages */
@@ -1395,7 +1395,7 @@
  * reserve the flag for future use. */
 
 
-extern void lustre_swab_connect(struct obd_connect_data *ocd);
+void lustre_swab_connect(struct obd_connect_data *ocd);
 
 /*
  * Supported checksum algorithms. Up to 32 checksum types are supported.
@@ -1737,10 +1737,10 @@
 	__u64	hss_clearmask;
 };
 
-extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-extern void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
 
-extern void lustre_swab_obd_statfs (struct obd_statfs *os);
+void lustre_swab_obd_statfs(struct obd_statfs *os);
 
 /* ost_body.data values for OST_BRW */
 
@@ -1780,7 +1780,7 @@
 #define ioobj_max_brw_set(ioo, num)					\
 do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
 
-extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo);
+void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
 
 /* multiple of 8 bytes => can array */
 struct niobuf_remote {
@@ -1789,7 +1789,7 @@
 	__u32 flags;
 };
 
-extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr);
+void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
 
 /* lock value block communicated between the filter and llite */
 
@@ -1811,7 +1811,7 @@
 	__u64		lvb_blocks;
 };
 
-extern void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
+void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
 
 struct ost_lvb {
 	__u64		lvb_size;
@@ -1825,7 +1825,7 @@
 	__u32		lvb_padding;
 };
 
-extern void lustre_swab_ost_lvb(struct ost_lvb *lvb);
+void lustre_swab_ost_lvb(struct ost_lvb *lvb);
 
 /*
  *   lquota data structures
@@ -1864,7 +1864,7 @@
 	struct obd_dqblk	qc_dqblk;
 };
 
-extern void lustre_swab_obd_quotactl(struct obd_quotactl *q);
+void lustre_swab_obd_quotactl(struct obd_quotactl *q);
 
 #define Q_QUOTACHECK	0x800100 /* deprecated as of 2.4 */
 #define Q_INITQUOTA	0x800101 /* deprecated as of 2.4  */
@@ -1913,7 +1913,7 @@
 #define QUOTA_DQACQ_FL_REL	0x4  /* release quota */
 #define QUOTA_DQACQ_FL_REPORT	0x8  /* report usage */
 
-extern void lustre_swab_quota_body(struct quota_body *b);
+void lustre_swab_quota_body(struct quota_body *b);
 
 /* Quota types currently supported */
 enum {
@@ -1993,7 +1993,7 @@
 	__u64	lvb_pad1;
 };
 
-extern void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
+void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
 
 /* LVB used with global quota lock */
 #define lvb_glb_ver  lvb_id_may_rel /* current version of the global index */
@@ -2072,7 +2072,7 @@
 	REINT_MAX
 } mds_reint_t, mdt_reint_t;
 
-extern void lustre_swab_generic_32s (__u32 *val);
+void lustre_swab_generic_32s(__u32 *val);
 
 /* the disposition of the intent outlines what was executed */
 #define DISP_IT_EXECD	0x00000001
@@ -2112,7 +2112,7 @@
 /* This FULL lock is useful to take on unlink sort of operations */
 #define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
 
-extern void lustre_swab_ll_fid (struct ll_fid *fid);
+void lustre_swab_ll_fid(struct ll_fid *fid);
 
 /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
  * but was moved into name[1] along with the OID to avoid consuming the
@@ -2231,7 +2231,7 @@
 	__u64	  padding_10;
 }; /* 216 */
 
-extern void lustre_swab_mdt_body (struct mdt_body *b);
+void lustre_swab_mdt_body(struct mdt_body *b);
 
 struct mdt_ioepoch {
 	struct lustre_handle handle;
@@ -2240,7 +2240,7 @@
 	__u32  padding;
 };
 
-extern void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b);
+void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
 
 /* permissions for md_perm.mp_perm */
 enum {
@@ -2264,7 +2264,7 @@
 	__u32	   rp_padding;
 };
 
-extern void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
+void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
 
 struct mdt_rec_setattr {
 	__u32	   sa_opcode;
@@ -2294,7 +2294,7 @@
 	__u32	   sa_padding_5;
 };
 
-extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa);
+void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
 
 /*
  * Attribute flags used in mdt_rec_setattr::sa_valid.
@@ -2584,7 +2584,7 @@
 	__u32	   rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
 };
 
-extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
+void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
 
 struct lmv_desc {
 	__u32 ld_tgt_count;		/* how many MDS's */
@@ -2600,7 +2600,7 @@
 	struct obd_uuid ld_uuid;
 };
 
-extern void lustre_swab_lmv_desc (struct lmv_desc *ld);
+void lustre_swab_lmv_desc(struct lmv_desc *ld);
 
 /* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
 struct lmv_stripe_md {
@@ -2612,7 +2612,7 @@
 	struct lu_fid mea_ids[0];
 };
 
-extern void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea);
+void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea);
 
 /* lmv structures */
 #define MEA_MAGIC_LAST_CHAR      0xb2221ca1
@@ -2670,7 +2670,7 @@
 
 #define ld_magic ld_active_tgt_count       /* for swabbing from llogs */
 
-extern void lustre_swab_lov_desc (struct lov_desc *ld);
+void lustre_swab_lov_desc(struct lov_desc *ld);
 
 /*
  *   LDLM requests:
@@ -2697,7 +2697,7 @@
 #define PLDLMRES(res)	(res)->lr_name.name[0], (res)->lr_name.name[1], \
 			(res)->lr_name.name[2], (res)->lr_name.name[3]
 
-extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id);
+void lustre_swab_ldlm_res_id(struct ldlm_res_id *id);
 
 static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
 			      const struct ldlm_res_id *res1)
@@ -2774,19 +2774,19 @@
 	struct ldlm_inodebits l_inodebits;
 } ldlm_wire_policy_data_t;
 
-extern void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d);
+void lustre_swab_ldlm_policy_data(ldlm_wire_policy_data_t *d);
 
 union ldlm_gl_desc {
 	struct ldlm_gl_lquota_desc	lquota_desc;
 };
 
-extern void lustre_swab_gl_desc(union ldlm_gl_desc *);
+void lustre_swab_gl_desc(union ldlm_gl_desc *);
 
 struct ldlm_intent {
 	__u64 opc;
 };
 
-extern void lustre_swab_ldlm_intent (struct ldlm_intent *i);
+void lustre_swab_ldlm_intent(struct ldlm_intent *i);
 
 struct ldlm_resource_desc {
 	ldlm_type_t lr_type;
@@ -2794,7 +2794,7 @@
 	struct ldlm_res_id lr_name;
 };
 
-extern void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r);
+void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r);
 
 struct ldlm_lock_desc {
 	struct ldlm_resource_desc l_resource;
@@ -2803,7 +2803,7 @@
 	ldlm_wire_policy_data_t l_policy_data;
 };
 
-extern void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l);
+void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l);
 
 #define LDLM_LOCKREQ_HANDLES 2
 #define LDLM_ENQUEUE_CANCEL_OFF 1
@@ -2815,7 +2815,7 @@
 	struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
 };
 
-extern void lustre_swab_ldlm_request (struct ldlm_request *rq);
+void lustre_swab_ldlm_request(struct ldlm_request *rq);
 
 /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
  * Otherwise, 2 are available. */
@@ -2837,7 +2837,7 @@
 	__u64  lock_policy_res2;
 };
 
-extern void lustre_swab_ldlm_reply (struct ldlm_reply *r);
+void lustre_swab_ldlm_reply(struct ldlm_reply *r);
 
 #define ldlm_flags_to_wire(flags)    ((__u32)(flags))
 #define ldlm_flags_from_wire(flags)  ((__u64)(flags))
@@ -2881,7 +2881,8 @@
 	__u64	    mti_nids[MTI_NIDS_MAX];     /* host nids (lnet_nid_t)*/
 	char	     mti_params[MTI_PARAM_MAXLEN];
 };
-extern void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
+
+void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
 
 struct mgs_nidtbl_entry {
 	__u64	   mne_version;    /* table version of this entry */
@@ -2896,7 +2897,8 @@
 		lnet_nid_t nids[0];     /* variable size buffer for NIDs. */
 	} u;
 };
-extern void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
+
+void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
 
 struct mgs_config_body {
 	char     mcb_name[MTI_NAME_MAXLEN]; /* logname */
@@ -2906,13 +2908,15 @@
 	__u8     mcb_bits;      /* bits unit size of config log */
 	__u32    mcb_units;     /* # of units for bulk transfer */
 };
-extern void lustre_swab_mgs_config_body(struct mgs_config_body *body);
+
+void lustre_swab_mgs_config_body(struct mgs_config_body *body);
 
 struct mgs_config_res {
 	__u64    mcr_offset;    /* index of last config log */
 	__u64    mcr_size;      /* size of the log */
 };
-extern void lustre_swab_mgs_config_res(struct mgs_config_res *body);
+
+void lustre_swab_mgs_config_res(struct mgs_config_res *body);
 
 /* Config marker flags (in config log) */
 #define CM_START       0x01
@@ -2933,8 +2937,7 @@
 	char	      cm_comment[MTI_NAME_MAXLEN];
 };
 
-extern void lustre_swab_cfg_marker(struct cfg_marker *marker,
-				   int swab, int size);
+void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
 
 /*
  * Opcodes for multiple servers.
@@ -3334,7 +3337,7 @@
 	}
 }
 
-extern void lustre_swab_obdo (struct obdo *o);
+void lustre_swab_obdo(struct obdo *o);
 
 /* request structure for OST's */
 struct ost_body {
@@ -3348,25 +3351,25 @@
 	struct  ll_user_fiemap fiemap;
 };
 
-extern void lustre_swab_ost_body (struct ost_body *b);
-extern void lustre_swab_ost_last_id(__u64 *id);
-extern void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
+void lustre_swab_ost_body(struct ost_body *b);
+void lustre_swab_ost_last_id(__u64 *id);
+void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
 
-extern void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
-extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
-extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
-					    int stripe_count);
-extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
+void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
+void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
+void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
+				     int stripe_count);
+void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
 
 /* llog_swab.c */
-extern void lustre_swab_llogd_body (struct llogd_body *d);
-extern void lustre_swab_llog_hdr (struct llog_log_hdr *h);
-extern void lustre_swab_llogd_conn_body (struct llogd_conn_body *d);
-extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
-extern void lustre_swab_llog_id(struct llog_logid *lid);
+void lustre_swab_llogd_body(struct llogd_body *d);
+void lustre_swab_llog_hdr(struct llog_log_hdr *h);
+void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
+void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
+void lustre_swab_llog_id(struct llog_logid *lid);
 
 struct lustre_cfg;
-extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
+void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
 
 /* Functions for dumping PTLRPC fields */
 void dump_rniobuf(struct niobuf_remote *rnb);
@@ -3418,7 +3421,8 @@
 	__u64		ii_pad2;
 	__u64		ii_pad3;
 };
-extern void lustre_swab_idx_info(struct idx_info *ii);
+
+void lustre_swab_idx_info(struct idx_info *ii);
 
 #define II_END_OFF	MDS_DIR_END_OFF /* all entries have been read */
 
@@ -3450,7 +3454,8 @@
 	 * For the time being, we only support fixed-size key & record. */
 	char	lip_entries[0];
 };
-extern void lustre_swab_lip_header(struct lu_idxpage *lip);
+
+void lustre_swab_lip_header(struct lu_idxpage *lip);
 
 #define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))
 
@@ -3490,7 +3495,7 @@
 	__u8	    lc_hmac[CAPA_HMAC_MAX_LEN];   /** HMAC */
 } __attribute__((packed));
 
-extern void lustre_swab_lustre_capa(struct lustre_capa *c);
+void lustre_swab_lustre_capa(struct lustre_capa *c);
 
 /** lustre_capa::lc_opc */
 enum {
@@ -3548,7 +3553,7 @@
 	__u8    lk_key[CAPA_HMAC_KEY_MAX_LEN];    /**< key */
 } __attribute__((packed));
 
-extern void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
+void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
 
 /** The link ea holds 1 \a link_ea_entry for each hardlink */
 #define LINK_EA_MAGIC 0x11EAF1DFUL
@@ -3620,12 +3625,12 @@
 	__u64			hpk_padding2;
 } __attribute__((packed));
 
-extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-extern void lustre_swab_hsm_current_action(struct hsm_current_action *action);
-extern void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
-extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
-extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
-extern void lustre_swab_hsm_request(struct hsm_request *hr);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_current_action(struct hsm_current_action *action);
+void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
+void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
+void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
+void lustre_swab_hsm_request(struct hsm_request *hr);
 
 /**
  * These are object update opcode under UPDATE_OBJ, which is currently
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index e095ada..9b1bb23 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -406,7 +406,7 @@
 		      stripes * sizeof(struct lmv_user_mds_data);
 }
 
-extern void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
+void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
 
 struct ll_recreate_obj {
 	__u64 lrc_id;
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index f6f4c03..3552546 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -1017,7 +1017,7 @@
 
 extern char *ldlm_lockname[];
 extern char *ldlm_typename[];
-extern char *ldlm_it2str(int it);
+char *ldlm_it2str(int it);
 
 /**
  * Just a fancy CDEBUG call with log level preset to LDLM_DEBUG.
diff --git a/drivers/staging/lustre/lustre/include/lustre_export.h b/drivers/staging/lustre/lustre/include/lustre_export.h
index 3b992b4..5189fad 100644
--- a/drivers/staging/lustre/lustre/include/lustre_export.h
+++ b/drivers/staging/lustre/lustre/include/lustre_export.h
@@ -368,8 +368,8 @@
 	return ocd->ocd_connect_flags & OBD_CONNECT_DISP_STRIPE;
 }
 
-extern struct obd_export *class_conn2export(struct lustre_handle *conn);
-extern struct obd_device *class_conn2obd(struct lustre_handle *conn);
+struct obd_export *class_conn2export(struct lustre_handle *conn);
+struct obd_device *class_conn2obd(struct lustre_handle *conn);
 
 /** @} export */
 
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index dcc8076..5a38f3d 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -375,8 +375,8 @@
 
 /* genops.c */
 struct obd_export;
-extern struct obd_import *class_exp2cliimp(struct obd_export *);
-extern struct obd_import *class_conn2cliimp(struct lustre_handle *);
+struct obd_import *class_exp2cliimp(struct obd_export *);
+struct obd_import *class_conn2cliimp(struct lustre_handle *);
 
 /** @} import */
 
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 77a7de9..48ad60b 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -2183,7 +2183,7 @@
 	 */
 	struct ptlrpc_request_set  *pc_set;
 	/**
-	 * Thread name used in cfs_daemonize()
+	 * Thread name used in kthread_run()
 	 */
 	char			pc_name[16];
 	/**
@@ -2277,18 +2277,18 @@
 
 /* ptlrpc/events.c */
 extern lnet_handle_eq_t ptlrpc_eq_h;
-extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
-			       lnet_process_id_t *peer, lnet_nid_t *self);
+int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
+			lnet_process_id_t *peer, lnet_nid_t *self);
 /**
  * These callbacks are invoked by LNet when something happened to
  * underlying buffer
  * @{
  */
-extern void request_out_callback(lnet_event_t *ev);
-extern void reply_in_callback(lnet_event_t *ev);
-extern void client_bulk_callback(lnet_event_t *ev);
-extern void request_in_callback(lnet_event_t *ev);
-extern void reply_out_callback(lnet_event_t *ev);
+void request_out_callback(lnet_event_t *ev);
+void reply_in_callback(lnet_event_t *ev);
+void client_bulk_callback(lnet_event_t *ev);
+void request_in_callback(lnet_event_t *ev);
+void reply_out_callback(lnet_event_t *ev);
 /** @} */
 
 /* ptlrpc/connection.c */
@@ -2299,7 +2299,7 @@
 struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
 int ptlrpc_connection_init(void);
 void ptlrpc_connection_fini(void);
-extern lnet_pid_t ptl_get_pid(void);
+lnet_pid_t ptl_get_pid(void);
 
 /* ptlrpc/niobuf.c */
 /**
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 55452e5..9ad8c26 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -1472,7 +1472,7 @@
 	}
 	/* we have an idx, read it */
 	start = name + LUSTRE_VOLATILE_HDR_LEN + 1;
-	*idx = strtoul(start, &end, 0);
+	*idx = simple_strtoul(start, &end, 0);
 	/* error cases:
 	 * no digit, no trailing :, negative value
 	 */
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 36ed781..87bb2ce 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -64,10 +64,10 @@
 extern rwlock_t obd_dev_lock;
 
 /* OBD Operations Declarations */
-extern struct obd_device *class_conn2obd(struct lustre_handle *);
-extern struct obd_device *class_exp2obd(struct obd_export *);
-extern int class_handle_ioctl(unsigned int cmd, unsigned long arg);
-extern int lustre_get_jobid(char *jobid);
+struct obd_device *class_conn2obd(struct lustre_handle *);
+struct obd_device *class_exp2obd(struct obd_export *);
+int class_handle_ioctl(unsigned int cmd, unsigned long arg);
+int lustre_get_jobid(char *jobid);
 
 struct lu_device_type;
 
@@ -139,7 +139,7 @@
 int class_add_uuid(const char *uuid, __u64 nid);
 
 /*obdecho*/
-extern void lprocfs_echo_init_vars(struct lprocfs_static_vars *lvars);
+void lprocfs_echo_init_vars(struct lprocfs_static_vars *lvars);
 
 #define CFG_F_START     0x01   /* Set when we start updating from a log */
 #define CFG_F_MARKER    0x02   /* We are within a maker */
@@ -1823,8 +1823,8 @@
 
 /* OBD Metadata Support */
 
-extern int obd_init_caches(void);
-extern void obd_cleanup_caches(void);
+int obd_init_caches(void);
+void obd_cleanup_caches(void);
 
 /* support routines */
 extern struct kmem_cache *obdo_cachep;
@@ -1869,8 +1869,7 @@
 /* obd_mount.c */
 
 /* sysctl.c */
-extern void obd_sysctl_init (void);
-extern void obd_sysctl_clean (void);
+int obd_sysctl_init(void);
 
 /* uuid.c  */
 typedef __u8 class_uuid_t[16];
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index 73e2d48..18aec79 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -56,9 +56,7 @@
 /* obd_timeout should only be used for recovery, not for
    networking / disk / timings affected by load (use Adaptive Timeouts) */
 extern unsigned int obd_timeout;	  /* seconds */
-extern unsigned int ldlm_timeout;	 /* seconds */
 extern unsigned int obd_timeout_set;
-extern unsigned int ldlm_timeout_set;
 extern unsigned int at_min;
 extern unsigned int at_max;
 extern unsigned int at_history;
@@ -105,8 +103,6 @@
 
 /* Timeout definitions */
 #define OBD_TIMEOUT_DEFAULT	     100
-#define LDLM_TIMEOUT_DEFAULT	    20
-#define MDS_LDLM_TIMEOUT_DEFAULT	6
 /* Time to wait for all clients to reconnect during recovery (hard limit) */
 #define OBD_RECOVERY_TIME_HARD	  (obd_timeout * 9)
 /* Time to wait for all clients to reconnect during recovery (soft limit) */
@@ -505,9 +501,7 @@
 #define OBD_FAIL_ONCE			   CFS_FAIL_ONCE
 #define OBD_FAILED			      CFS_FAILED
 
-extern atomic_t libcfs_kmemory;
-
-extern void obd_update_maxusage(void);
+void obd_update_maxusage(void);
 
 #define obd_memory_add(size)						  \
 	lprocfs_counter_add(obd_memory, OBD_MEMORY_STAT, (long)(size))
@@ -526,8 +520,8 @@
 	lprocfs_stats_collector(obd_memory, OBD_MEMORY_PAGES_STAT,	    \
 				LPROCFS_FIELDS_FLAGS_SUM)
 
-extern __u64 obd_memory_max(void);
-extern __u64 obd_pages_max(void);
+__u64 obd_memory_max(void);
+__u64 obd_pages_max(void);
 
 #define OBD_DEBUG_MEMUSAGE (1)
 
@@ -622,8 +616,8 @@
 	if (unlikely((ptr) == NULL)) {					\
 		CERROR("vmalloc of '" #ptr "' (%d bytes) failed\n",	   \
 		       (int)(size));					  \
-		CERROR("%llu total bytes allocated by Lustre, %d by LNET\n", \
-		       obd_memory_sum(), atomic_read(&libcfs_kmemory));   \
+		CERROR("%llu total bytes allocated by Lustre\n",	      \
+		       obd_memory_sum());				      \
 	} else {							      \
 		OBD_ALLOC_POST(ptr, size, "vmalloced");		       \
 	}								     \
@@ -769,12 +763,10 @@
 		       "failed\n", (int)1,				    \
 		       (__u64)(1 << PAGE_CACHE_SHIFT));			 \
 		CERROR("%llu total bytes and %llu total pages "	   \
-		       "(%llu bytes) allocated by Lustre, "		\
-		       "%d total bytes by LNET\n",			    \
+		       "(%llu bytes) allocated by Lustre\n",		      \
 		       obd_memory_sum(),				      \
 		       obd_pages_sum() << PAGE_CACHE_SHIFT,		     \
-		       obd_pages_sum(),				       \
-		       atomic_read(&libcfs_kmemory));		     \
+		       obd_pages_sum());				       \
 	} else {							      \
 		obd_pages_add(0);					     \
 		CDEBUG(D_MALLOC, "alloc_pages '" #ptr "': %d page(s) / "      \
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index e0c1cca..9053f81 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -203,7 +203,7 @@
 	int rc;
 
 	vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
-	if (vdv == NULL)
+	if (!vdv)
 		return ERR_PTR(-ENOMEM);
 
 	lud = &vdv->cdv_cl.cd_lu_dev;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 6601e6b..fa4b7c7 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -209,7 +209,7 @@
 
 /* interval tree, for LDLM_EXTENT. */
 extern struct kmem_cache *ldlm_interval_slab; /* slab cache for ldlm_interval */
-extern void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l);
+void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l);
 struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l);
 struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock);
 void ldlm_interval_free(struct ldlm_interval *node);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 764f986..badd227 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -656,7 +656,8 @@
 }
 EXPORT_SYMBOL(target_pack_pool_reply);
 
-int target_send_reply_msg(struct ptlrpc_request *req, int rc, int fail_id)
+static int
+target_send_reply_msg(struct ptlrpc_request *req, int rc, int fail_id)
 {
 	if (OBD_FAIL_CHECK_ORSET(fail_id & ~OBD_FAIL_ONCE, OBD_FAIL_ONCE)) {
 		DEBUG_REQ(D_ERROR, req, "dropping reply");
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index bb2246d..cd340fc 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -1528,7 +1528,7 @@
 	if (lvb_len) {
 		lock->l_lvb_len = lvb_len;
 		lock->l_lvb_data = kzalloc(lvb_len, GFP_NOFS);
-		if (lock->l_lvb_data == NULL)
+		if (!lock->l_lvb_data)
 			goto out;
 	}
 
@@ -1813,7 +1813,7 @@
 		return 0;
 
 	arg = kzalloc(sizeof(*arg), GFP_NOFS);
-	if (arg == NULL)
+	if (!arg)
 		return -ENOMEM;
 
 	atomic_set(&arg->restart, 0);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index b7b6ca1..ac79db9 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -76,15 +76,6 @@
 	return cfs_time_seconds((int)cfs_duration_sec(cfs_time_sub(timeout, 0)) + 1);
 }
 
-/* timeout for initial callback (AST) reply (bz10399) */
-static inline unsigned int ldlm_get_rq_timeout(void)
-{
-	/* Non-AT value */
-	unsigned int timeout = min(ldlm_timeout, obd_timeout / 3);
-
-	return timeout < 1 ? 1 : timeout;
-}
-
 #define ELT_STOPPED   0
 #define ELT_READY     1
 #define ELT_TERMINATE 2
@@ -225,7 +216,7 @@
 			void *lvb_data;
 
 			lvb_data = kzalloc(lvb_len, GFP_NOFS);
-			if (lvb_data == NULL) {
+			if (!lvb_data) {
 				LDLM_ERROR(lock, "No memory: %d.\n", lvb_len);
 				rc = -ENOMEM;
 				goto out;
@@ -453,7 +444,7 @@
 		struct ldlm_bl_work_item *blwi;
 
 		blwi = kzalloc(sizeof(*blwi), GFP_NOFS);
-		if (blwi == NULL)
+		if (!blwi)
 			return -ENOMEM;
 		init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags);
 
@@ -1053,7 +1044,7 @@
 		return -EALREADY;
 
 	ldlm_state = kzalloc(sizeof(*ldlm_state), GFP_NOFS);
-	if (ldlm_state == NULL)
+	if (!ldlm_state)
 		return -ENOMEM;
 
 	ldlm_kobj = kobject_create_and_add("ldlm", lustre_kobj);
@@ -1123,7 +1114,7 @@
 
 
 	blp = kzalloc(sizeof(*blp), GFP_NOFS);
-	if (blp == NULL) {
+	if (!blp) {
 		rc = -ENOMEM;
 		goto out;
 	}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 1605b9c..c234acb 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -1422,7 +1422,7 @@
 		return -EALREADY;
 
 	ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS);
-	if (ldlm_pools_thread == NULL)
+	if (!ldlm_pools_thread)
 		return -ENOMEM;
 
 	init_completion(&ldlm_pools_comp);
@@ -1486,8 +1486,10 @@
 
 void ldlm_pools_fini(void)
 {
-	unregister_shrinker(&ldlm_pools_srv_shrinker);
-	unregister_shrinker(&ldlm_pools_cli_shrinker);
+	if (ldlm_pools_thread) {
+		unregister_shrinker(&ldlm_pools_srv_shrinker);
+		unregister_shrinker(&ldlm_pools_cli_shrinker);
+	}
 	ldlm_pools_thread_stop();
 }
 EXPORT_SYMBOL(ldlm_pools_fini);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index cdb6366..4bb3173 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -939,6 +939,7 @@
 	ldlm_pool_fini(&ns->ns_pool);
 
 	ldlm_namespace_debugfs_unregister(ns);
+	ldlm_namespace_sysfs_unregister(ns);
 	cfs_hash_putref(ns->ns_rs_hash);
 	/* Namespace \a ns should be not on list at this time, otherwise
 	 * this will cause issues related to using freed \a ns in poold
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
index 021c92f..e93f556 100644
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/debug.c
@@ -57,8 +57,42 @@
 MODULE_PARM_DESC(libcfs_debug, "Lustre kernel debug mask");
 EXPORT_SYMBOL(libcfs_debug);
 
+static int libcfs_param_debug_mb_set(const char *val,
+				     const struct kernel_param *kp)
+{
+	int rc;
+	unsigned num;
+
+	rc = kstrtouint(val, 0, &num);
+	if (rc < 0)
+		return rc;
+
+	if (!*((unsigned int *)kp->arg)) {
+		*((unsigned int *)kp->arg) = num;
+		return 0;
+	}
+
+	rc = cfs_trace_set_debug_mb(num);
+
+	if (!rc)
+		*((unsigned int *)kp->arg) = cfs_trace_get_debug_mb();
+
+	return rc;
+}
+
+/* While debug_mb setting look like unsigned int, in fact
+ * it needs quite a bunch of extra processing, so we define special
+ * debugmb parameter type with corresponding methods to handle this case */
+static struct kernel_param_ops param_ops_debugmb = {
+	.set = libcfs_param_debug_mb_set,
+	.get = param_get_uint,
+};
+
+#define param_check_debugmb(name, p) \
+		__param_check(name, p, unsigned int)
+
 static unsigned int libcfs_debug_mb;
-module_param(libcfs_debug_mb, uint, 0644);
+module_param(libcfs_debug_mb, debugmb, 0644);
 MODULE_PARM_DESC(libcfs_debug_mb, "Total debug buffer size.");
 EXPORT_SYMBOL(libcfs_debug_mb);
 
@@ -72,18 +106,106 @@
 MODULE_PARM_DESC(libcfs_console_ratelimit, "Lustre kernel debug console ratelimit (0 to disable)");
 EXPORT_SYMBOL(libcfs_console_ratelimit);
 
-unsigned int libcfs_console_max_delay;
-module_param(libcfs_console_max_delay, uint, 0644);
-MODULE_PARM_DESC(libcfs_console_max_delay, "Lustre kernel debug console max delay (jiffies)");
-EXPORT_SYMBOL(libcfs_console_max_delay);
+static int param_set_delay_minmax(const char *val,
+				  const struct kernel_param *kp,
+				  long min, long max)
+{
+	long d;
+	int sec;
+	int rc;
 
+	rc = kstrtoint(val, 0, &sec);
+	if (rc)
+		return -EINVAL;
+
+	d = cfs_time_seconds(sec) / 100;
+	if (d < min || d > max)
+		return -EINVAL;
+
+	*((unsigned int *)kp->arg) = d;
+
+	return 0;
+}
+
+static int param_get_delay(char *buffer, const struct kernel_param *kp)
+{
+	unsigned int d = *(unsigned int *)kp->arg;
+
+	return sprintf(buffer, "%u", (unsigned int)cfs_duration_sec(d * 100));
+}
+
+unsigned int libcfs_console_max_delay;
+EXPORT_SYMBOL(libcfs_console_max_delay);
 unsigned int libcfs_console_min_delay;
-module_param(libcfs_console_min_delay, uint, 0644);
-MODULE_PARM_DESC(libcfs_console_min_delay, "Lustre kernel debug console min delay (jiffies)");
 EXPORT_SYMBOL(libcfs_console_min_delay);
 
+static int param_set_console_max_delay(const char *val,
+				       const struct kernel_param *kp)
+{
+	return param_set_delay_minmax(val, kp,
+				      libcfs_console_min_delay, INT_MAX);
+}
+
+static struct kernel_param_ops param_ops_console_max_delay = {
+	.set = param_set_console_max_delay,
+	.get = param_get_delay,
+};
+
+#define param_check_console_max_delay(name, p) \
+		__param_check(name, p, unsigned int)
+
+module_param(libcfs_console_max_delay, console_max_delay, 0644);
+MODULE_PARM_DESC(libcfs_console_max_delay, "Lustre kernel debug console max delay (jiffies)");
+
+static int param_set_console_min_delay(const char *val,
+				       const struct kernel_param *kp)
+{
+	return param_set_delay_minmax(val, kp,
+				      1, libcfs_console_max_delay);
+}
+
+static struct kernel_param_ops param_ops_console_min_delay = {
+	.set = param_set_console_min_delay,
+	.get = param_get_delay,
+};
+
+#define param_check_console_min_delay(name, p) \
+		__param_check(name, p, unsigned int)
+
+module_param(libcfs_console_min_delay, console_min_delay, 0644);
+MODULE_PARM_DESC(libcfs_console_min_delay, "Lustre kernel debug console min delay (jiffies)");
+
+static int param_set_uint_minmax(const char *val,
+				 const struct kernel_param *kp,
+				 unsigned int min, unsigned int max)
+{
+	unsigned int num;
+	int ret;
+
+	if (!val)
+		return -EINVAL;
+	ret = kstrtouint(val, 0, &num);
+	if (ret < 0 || num < min || num > max)
+		return -EINVAL;
+	*((unsigned int *)kp->arg) = num;
+	return 0;
+}
+
+static int param_set_uintpos(const char *val, const struct kernel_param *kp)
+{
+	return param_set_uint_minmax(val, kp, 1, -1);
+}
+
+static struct kernel_param_ops param_ops_uintpos = {
+	.set = param_set_uintpos,
+	.get = param_get_uint,
+};
+
+#define param_check_uintpos(name, p) \
+		__param_check(name, p, unsigned int)
+
 unsigned int libcfs_console_backoff = CDEBUG_DEFAULT_BACKOFF;
-module_param(libcfs_console_backoff, uint, 0644);
+module_param(libcfs_console_backoff, uintpos, 0644);
 MODULE_PARM_DESC(libcfs_console_backoff, "Lustre kernel debug console backoff factor");
 EXPORT_SYMBOL(libcfs_console_backoff);
 
@@ -93,23 +215,14 @@
 unsigned int libcfs_stack = 3 * THREAD_SIZE / 4;
 EXPORT_SYMBOL(libcfs_stack);
 
-static unsigned int portal_enter_debugger;
-EXPORT_SYMBOL(portal_enter_debugger);
-
 unsigned int libcfs_catastrophe;
 EXPORT_SYMBOL(libcfs_catastrophe);
 
-unsigned int libcfs_watchdog_ratelimit = 300;
-EXPORT_SYMBOL(libcfs_watchdog_ratelimit);
-
 unsigned int libcfs_panic_on_lbug = 1;
 module_param(libcfs_panic_on_lbug, uint, 0644);
 MODULE_PARM_DESC(libcfs_panic_on_lbug, "Lustre kernel panic on LBUG");
 EXPORT_SYMBOL(libcfs_panic_on_lbug);
 
-atomic_t libcfs_kmemory = ATOMIC_INIT(0);
-EXPORT_SYMBOL(libcfs_kmemory);
-
 static wait_queue_head_t debug_ctlwq;
 
 char libcfs_debug_file_path_arr[PATH_MAX] = LIBCFS_DEBUG_FILE_PATH_DEFAULT;
@@ -414,8 +527,10 @@
 	}
 	rc = cfs_tracefile_init(max);
 
-	if (rc == 0)
+	if (rc == 0) {
 		libcfs_register_panic_notifier();
+		libcfs_debug_mb = cfs_trace_get_debug_mb();
+	}
 
 	return rc;
 }
diff --git a/drivers/staging/lustre/lustre/libcfs/fail.c b/drivers/staging/lustre/lustre/libcfs/fail.c
index 7b7fc21..42d615f 100644
--- a/drivers/staging/lustre/lustre/libcfs/fail.c
+++ b/drivers/staging/lustre/lustre/libcfs/fail.c
@@ -123,7 +123,7 @@
 
 int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
 {
-	int ret = 0;
+	int ret;
 
 	ret = __cfs_fail_check_set(id, value, set);
 	if (ret) {
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
index 31a5581..933525c 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
@@ -78,7 +78,7 @@
 int
 cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
 {
-	int	rc = 0;
+	int	rc;
 
 	rc = snprintf(buf, len, "%d\t: %d\n", 0, 0);
 	len -= rc;
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
index 76d4392..efe5e66 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
@@ -231,7 +231,7 @@
 	char	*endp;
 
 	str = cfs_trimwhite(str);
-	*num = strtoul(str, &endp, 0);
+	*num = simple_strtoul(str, &endp, 0);
 	if (endp == str)
 		return 0;
 
@@ -400,7 +400,7 @@
 		struct cfs_range_expr *expr;
 
 		expr = list_entry(expr_list->el_exprs.next,
-				      struct cfs_range_expr, re_link),
+				      struct cfs_range_expr, re_link);
 		list_del(&expr->re_link);
 		LIBCFS_FREE(expr, sizeof(*expr));
 	}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
index aa3fffe..fbbc8a7 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
@@ -114,7 +114,7 @@
 		crypto_free_hash(hdesc.tfm);
 		return -ENOSPC;
 	}
-	sg_init_one(&sl, (void *)buf, buf_len);
+	sg_init_one(&sl, buf, buf_len);
 
 	hdesc.flags = 0;
 	err = crypto_hash_digest(&hdesc, &sl, sl.length, hash);
@@ -165,7 +165,7 @@
 {
 	struct scatterlist sl;
 
-	sg_init_one(&sl, (void *)buf, buf_len);
+	sg_init_one(&sl, buf, buf_len);
 
 	return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
 }
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
index e962f89..64a984b 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
@@ -49,7 +49,7 @@
 	hdr = (struct libcfs_ioctl_hdr *)buf;
 	data = (struct libcfs_ioctl_data *)buf;
 
-	if (copy_from_user(buf, (void *)arg, sizeof(*hdr)))
+	if (copy_from_user(buf, arg, sizeof(*hdr)))
 		return -EFAULT;
 
 	if (hdr->ioc_version != LIBCFS_IOCTL_VERSION) {
@@ -69,7 +69,7 @@
 	}
 
 	orig_len = hdr->ioc_len;
-	if (copy_from_user(buf, (void *)arg, hdr->ioc_len))
+	if (copy_from_user(buf, arg, hdr->ioc_len))
 		return -EFAULT;
 	if (orig_len != data->ioc_len)
 		return -EINVAL;
@@ -96,8 +96,6 @@
 	return 0;
 }
 
-extern struct cfs_psdev_ops	  libcfs_psdev_ops;
-
 static int
 libcfs_psdev_open(struct inode *inode, struct file *file)
 {
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
index e60b2e9..806f974 100644
--- a/drivers/staging/lustre/lustre/libcfs/module.c
+++ b/drivers/staging/lustre/lustre/libcfs/module.c
@@ -50,6 +50,7 @@
 #include <linux/list.h>
 
 #include <linux/sysctl.h>
+#include <linux/debugfs.h>
 
 # define DEBUG_SUBSYSTEM S_LNET
 
@@ -65,48 +66,12 @@
 MODULE_DESCRIPTION("Portals v3.1");
 MODULE_LICENSE("GPL");
 
-extern struct miscdevice libcfs_dev;
-extern struct cfs_wi_sched *cfs_sched_rehash;
-extern void libcfs_init_nidstrings(void);
+static void insert_debugfs(void);
+static void remove_debugfs(void);
 
-static int insert_proc(void);
-static void remove_proc(void);
+static struct dentry *lnet_debugfs_root;
 
-static struct ctl_table_header *lnet_table_header;
-extern char lnet_upcall[1024];
-/**
- * The path of debug log dump upcall script.
- */
-extern char lnet_debug_log_upcall[1024];
-
-#define CTL_LNET	(0x100)
-
-enum {
-	PSDEV_DEBUG = 1,	  /* control debugging */
-	PSDEV_SUBSYSTEM_DEBUG,    /* control debugging */
-	PSDEV_PRINTK,	     /* force all messages to console */
-	PSDEV_CONSOLE_RATELIMIT,  /* ratelimit console messages */
-	PSDEV_CONSOLE_MAX_DELAY_CS, /* maximum delay over which we skip messages */
-	PSDEV_CONSOLE_MIN_DELAY_CS, /* initial delay over which we skip messages */
-	PSDEV_CONSOLE_BACKOFF,    /* delay increase factor */
-	PSDEV_DEBUG_PATH,	 /* crashdump log location */
-	PSDEV_DEBUG_DUMP_PATH,    /* crashdump tracelog location */
-	PSDEV_CPT_TABLE,	  /* information about cpu partitions */
-	PSDEV_LNET_UPCALL,	/* User mode upcall script  */
-	PSDEV_LNET_MEMUSED,       /* bytes currently PORTAL_ALLOCated */
-	PSDEV_LNET_CATASTROPHE,   /* if we have LBUGged or panic'd */
-	PSDEV_LNET_PANIC_ON_LBUG, /* flag to panic on LBUG */
-	PSDEV_LNET_DUMP_KERNEL,   /* snapshot kernel debug buffer to file */
-	PSDEV_LNET_DAEMON_FILE,   /* spool kernel debug buffer to file */
-	PSDEV_LNET_DEBUG_MB,      /* size of debug buffer */
-	PSDEV_LNET_DEBUG_LOG_UPCALL, /* debug log upcall script */
-	PSDEV_LNET_WATCHDOG_RATELIMIT,  /* ratelimit watchdog messages  */
-	PSDEV_LNET_FORCE_LBUG,    /* hook to force an LBUG */
-	PSDEV_LNET_FAIL_LOC,      /* control test failures instrumentation */
-	PSDEV_LNET_FAIL_VAL,      /* userdata for fail loc */
-};
-
-static void kportal_memhog_free (struct libcfs_device_userstate *ldu)
+static void kportal_memhog_free(struct libcfs_device_userstate *ldu)
 {
 	struct page **level0p = &ldu->ldu_memhog_root_page;
 	struct page **level1p;
@@ -146,7 +111,7 @@
 		*level0p = NULL;
 	}
 
-	LASSERT (ldu->ldu_memhog_pages == 0);
+	LASSERT(ldu->ldu_memhog_pages == 0);
 }
 
 static int kportal_memhog_alloc(struct libcfs_device_userstate *ldu, int npages,
@@ -158,8 +123,8 @@
 	int	   count1;
 	int	   count2;
 
-	LASSERT (ldu->ldu_memhog_pages == 0);
-	LASSERT (ldu->ldu_memhog_root_page == NULL);
+	LASSERT(ldu->ldu_memhog_pages == 0);
+	LASSERT(ldu->ldu_memhog_root_page == NULL);
 
 	if (npages < 0)
 		return -EINVAL;
@@ -338,7 +303,7 @@
 			if (err != -EINVAL) {
 				if (err == 0)
 					err = libcfs_ioctl_popdata(arg,
-							data, sizeof (*data));
+							data, sizeof(*data));
 				break;
 			}
 		}
@@ -361,7 +326,7 @@
 		return -ENOMEM;
 
 	/* 'cmd' and permissions get checked in our arch-specific caller */
-	if (libcfs_ioctl_getdata(buf, buf + 800, (void *)arg)) {
+	if (libcfs_ioctl_getdata(buf, buf + 800, arg)) {
 		CERROR("PORTALS ioctl: data error\n");
 		err = -EINVAL;
 		goto out;
@@ -428,17 +393,10 @@
 		goto cleanup_wi;
 	}
 
+	insert_debugfs();
 
-	rc = insert_proc();
-	if (rc) {
-		CERROR("insert_proc: error %d\n", rc);
-		goto cleanup_crypto;
-	}
-
-	CDEBUG (D_OTHER, "portals setup OK\n");
+	CDEBUG(D_OTHER, "portals setup OK\n");
 	return 0;
- cleanup_crypto:
-	cfs_crypto_unregister();
  cleanup_wi:
 	cfs_wi_shutdown();
  cleanup_deregister:
@@ -454,10 +412,7 @@
 {
 	int rc;
 
-	remove_proc();
-
-	CDEBUG(D_MALLOC, "before Portals cleanup: kmem %d\n",
-	       atomic_read(&libcfs_kmemory));
+	remove_debugfs();
 
 	if (cfs_sched_rehash != NULL) {
 		cfs_wi_sched_destroy(cfs_sched_rehash);
@@ -467,16 +422,10 @@
 	cfs_crypto_unregister();
 	cfs_wi_shutdown();
 
-	rc = misc_deregister(&libcfs_dev);
-	if (rc)
-		CERROR("misc_deregister error %d\n", rc);
+	misc_deregister(&libcfs_dev);
 
 	cfs_cpu_fini();
 
-	if (atomic_read(&libcfs_kmemory) != 0)
-		CERROR("Portals memory leaked: %d bytes\n",
-		       atomic_read(&libcfs_kmemory));
-
 	rc = libcfs_debug_cleanup();
 	if (rc)
 		pr_err("LustreError: libcfs_debug_cleanup: %d\n", rc);
@@ -551,9 +500,6 @@
 				 __proc_dobitmasks);
 }
 
-static int min_watchdog_ratelimit;	  /* disable ratelimiting */
-static int max_watchdog_ratelimit = (24*60*60); /* limit to once per day */
-
 static int __proc_dump_kernel(void *data, int write,
 			      loff_t pos, void __user *buffer, int nob)
 {
@@ -593,125 +539,6 @@
 				 __proc_daemon_file);
 }
 
-static int __proc_debug_mb(void *data, int write,
-			   loff_t pos, void __user *buffer, int nob)
-{
-	if (!write) {
-		char tmpstr[32];
-		int  len = snprintf(tmpstr, sizeof(tmpstr), "%d",
-				    cfs_trace_get_debug_mb());
-
-		if (pos >= len)
-			return 0;
-
-		return cfs_trace_copyout_string(buffer, nob, tmpstr + pos,
-		       "\n");
-	}
-
-	return cfs_trace_set_debug_mb_usrstr(buffer, nob);
-}
-
-static int proc_debug_mb(struct ctl_table *table, int write,
-			 void __user *buffer, size_t *lenp, loff_t *ppos)
-{
-	return proc_call_handler(table->data, write, ppos, buffer, lenp,
-				 __proc_debug_mb);
-}
-
-static int proc_console_max_delay_cs(struct ctl_table *table, int write,
-				     void __user *buffer, size_t *lenp,
-				     loff_t *ppos)
-{
-	int rc, max_delay_cs;
-	struct ctl_table dummy = *table;
-	long d;
-
-	dummy.data = &max_delay_cs;
-	dummy.proc_handler = &proc_dointvec;
-
-	if (!write) { /* read */
-		max_delay_cs = cfs_duration_sec(libcfs_console_max_delay * 100);
-		rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
-		return rc;
-	}
-
-	/* write */
-	max_delay_cs = 0;
-	rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
-	if (rc < 0)
-		return rc;
-	if (max_delay_cs <= 0)
-		return -EINVAL;
-
-	d = cfs_time_seconds(max_delay_cs) / 100;
-	if (d == 0 || d < libcfs_console_min_delay)
-		return -EINVAL;
-	libcfs_console_max_delay = d;
-
-	return rc;
-}
-
-static int proc_console_min_delay_cs(struct ctl_table *table, int write,
-				     void __user *buffer, size_t *lenp,
-				     loff_t *ppos)
-{
-	int rc, min_delay_cs;
-	struct ctl_table dummy = *table;
-	long d;
-
-	dummy.data = &min_delay_cs;
-	dummy.proc_handler = &proc_dointvec;
-
-	if (!write) { /* read */
-		min_delay_cs = cfs_duration_sec(libcfs_console_min_delay * 100);
-		rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
-		return rc;
-	}
-
-	/* write */
-	min_delay_cs = 0;
-	rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
-	if (rc < 0)
-		return rc;
-	if (min_delay_cs <= 0)
-		return -EINVAL;
-
-	d = cfs_time_seconds(min_delay_cs) / 100;
-	if (d == 0 || d > libcfs_console_max_delay)
-		return -EINVAL;
-	libcfs_console_min_delay = d;
-
-	return rc;
-}
-
-static int proc_console_backoff(struct ctl_table *table, int write,
-				void __user *buffer, size_t *lenp, loff_t *ppos)
-{
-	int rc, backoff;
-	struct ctl_table dummy = *table;
-
-	dummy.data = &backoff;
-	dummy.proc_handler = &proc_dointvec;
-
-	if (!write) { /* read */
-		backoff = libcfs_console_backoff;
-		rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
-		return rc;
-	}
-
-	/* write */
-	backoff = 0;
-	rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
-	if (rc < 0)
-		return rc;
-	if (backoff <= 0)
-		return -EINVAL;
-
-	libcfs_console_backoff = backoff;
-
-	return rc;
-}
-
 static int libcfs_force_lbug(struct ctl_table *table, int write,
 			     void __user *buffer,
 			     size_t *lenp, loff_t *ppos)
@@ -809,40 +636,6 @@
 		.proc_handler = &proc_dobitmasks,
 	},
 	{
-		.procname = "console_ratelimit",
-		.data     = &libcfs_console_ratelimit,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_dointvec
-	},
-	{
-		.procname = "console_max_delay_centisecs",
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_console_max_delay_cs
-	},
-	{
-		.procname = "console_min_delay_centisecs",
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_console_min_delay_cs
-	},
-	{
-		.procname = "console_backoff",
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_console_backoff
-	},
-
-	{
-		.procname = "debug_path",
-		.data     = libcfs_debug_file_path_arr,
-		.maxlen   = sizeof(libcfs_debug_file_path_arr),
-		.mode     = 0644,
-		.proc_handler = &proc_dostring,
-	},
-
-	{
 		.procname = "cpu_partition_table",
 		.maxlen   = 128,
 		.mode     = 0444,
@@ -864,13 +657,6 @@
 		.proc_handler = &proc_dostring,
 	},
 	{
-		.procname = "lnet_memused",
-		.data     = (int *)&libcfs_kmemory.counter,
-		.maxlen   = sizeof(int),
-		.mode     = 0444,
-		.proc_handler = &proc_dointvec,
-	},
-	{
 		.procname = "catastrophe",
 		.data     = &libcfs_catastrophe,
 		.maxlen   = sizeof(int),
@@ -878,13 +664,6 @@
 		.proc_handler = &proc_dointvec,
 	},
 	{
-		.procname = "panic_on_lbug",
-		.data     = &libcfs_panic_on_lbug,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_dointvec,
-	},
-	{
 		.procname = "dump_kernel",
 		.maxlen   = 256,
 		.mode     = 0200,
@@ -897,20 +676,6 @@
 		.proc_handler = &proc_daemon_file,
 	},
 	{
-		.procname = "debug_mb",
-		.mode     = 0644,
-		.proc_handler = &proc_debug_mb,
-	},
-	{
-		.procname = "watchdog_ratelimit",
-		.data     = &libcfs_watchdog_ratelimit,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_dointvec_minmax,
-		.extra1   = &min_watchdog_ratelimit,
-		.extra2   = &max_watchdog_ratelimit,
-	},
-	{
 		.procname = "force_lbug",
 		.data     = NULL,
 		.maxlen   = 0,
@@ -935,31 +700,93 @@
 	}
 };
 
-static struct ctl_table top_table[] = {
-	{
-		.procname = "lnet",
-		.mode     = 0555,
-		.data     = NULL,
-		.maxlen   = 0,
-		.child    = lnet_table,
-	},
-	{
-	}
+struct lnet_debugfs_symlink_def {
+	char *name;
+	char *target;
 };
 
-static int insert_proc(void)
+static const struct lnet_debugfs_symlink_def lnet_debugfs_symlinks[] = {
+	{ "console_ratelimit",
+	  "/sys/module/libcfs/parameters/libcfs_console_ratelimit"},
+	{ "debug_path",
+	  "/sys/module/libcfs/parameters/libcfs_debug_file_path"},
+	{ "panic_on_lbug",
+	  "/sys/module/libcfs/parameters/libcfs_panic_on_lbug"},
+	{ "libcfs_console_backoff",
+	  "/sys/module/libcfs/parameters/libcfs_console_backoff"},
+	{ "debug_mb",
+	  "/sys/module/libcfs/parameters/libcfs_debug_mb"},
+	{ "console_min_delay_centisecs",
+	  "/sys/module/libcfs/parameters/libcfs_console_min_delay"},
+	{ "console_max_delay_centisecs",
+	  "/sys/module/libcfs/parameters/libcfs_console_max_delay"},
+	{},
+};
+
+static ssize_t lnet_debugfs_read(struct file *filp, char __user *buf,
+				 size_t count, loff_t *ppos)
 {
-	if (lnet_table_header == NULL)
-		lnet_table_header = register_sysctl_table(top_table);
-	return 0;
+	struct ctl_table *table = filp->private_data;
+	int error;
+
+	error = table->proc_handler(table, 0, (void __user *)buf, &count, ppos);
+	if (!error)
+		error = count;
+
+	return error;
 }
 
-static void remove_proc(void)
+static ssize_t lnet_debugfs_write(struct file *filp, const char __user *buf,
+				  size_t count, loff_t *ppos)
 {
-	if (lnet_table_header != NULL)
-		unregister_sysctl_table(lnet_table_header);
+	struct ctl_table *table = filp->private_data;
+	int error;
 
-	lnet_table_header = NULL;
+	error = table->proc_handler(table, 1, (void __user *)buf, &count, ppos);
+	if (!error)
+		error = count;
+
+	return error;
+}
+
+static const struct file_operations lnet_debugfs_file_operations = {
+	.open		= simple_open,
+	.read		= lnet_debugfs_read,
+	.write		= lnet_debugfs_write,
+	.llseek		= default_llseek,
+};
+
+static void insert_debugfs(void)
+{
+	struct ctl_table *table;
+	struct dentry *entry;
+	const struct lnet_debugfs_symlink_def *symlinks;
+
+	if (lnet_debugfs_root == NULL)
+		lnet_debugfs_root = debugfs_create_dir("lnet", NULL);
+
+	/* Even if we cannot create, just ignore it altogether) */
+	if (IS_ERR_OR_NULL(lnet_debugfs_root))
+		return;
+
+	for (table = lnet_table; table->procname; table++)
+		entry = debugfs_create_file(table->procname, table->mode,
+					    lnet_debugfs_root, table,
+					    &lnet_debugfs_file_operations);
+
+	for (symlinks = lnet_debugfs_symlinks; symlinks->name; symlinks++)
+		entry = debugfs_create_symlink(symlinks->name,
+					       lnet_debugfs_root,
+					       symlinks->target);
+
+}
+
+static void remove_debugfs(void)
+{
+	if (lnet_debugfs_root != NULL)
+		debugfs_remove_recursive(lnet_debugfs_root);
+
+	lnet_debugfs_root = NULL;
 }
 
 MODULE_VERSION("1.0.0");
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index 6ee2adc..effa2af 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -937,18 +937,6 @@
 	return 0;
 }
 
-int cfs_trace_set_debug_mb_usrstr(void __user *usr_str, int usr_str_nob)
-{
-	char     str[32];
-	int      rc;
-
-	rc = cfs_trace_copyin_string(str, sizeof(str), usr_str, usr_str_nob);
-	if (rc < 0)
-		return rc;
-
-	return cfs_trace_set_debug_mb(simple_strtoul(str, NULL, 0));
-}
-
 int cfs_trace_get_debug_mb(void)
 {
 	int i;
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.h b/drivers/staging/lustre/lustre/libcfs/tracefile.h
index 0601476..e931f6d 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.h
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.h
@@ -47,7 +47,7 @@
 extern char      cfs_tracefile[TRACEFILE_NAME_SIZE];
 extern long long cfs_tracefile_size;
 
-extern void libcfs_run_debug_log_upcall(char *file);
+void libcfs_run_debug_log_upcall(char *file);
 
 int  cfs_tracefile_init_arch(void);
 void cfs_tracefile_fini_arch(void);
@@ -77,14 +77,13 @@
 int cfs_trace_daemon_command(char *str);
 int cfs_trace_daemon_command_usrstr(void __user *usr_str, int usr_str_nob);
 int cfs_trace_set_debug_mb(int mb);
-int cfs_trace_set_debug_mb_usrstr(void __user *usr_str, int usr_str_nob);
 int cfs_trace_get_debug_mb(void);
 
-extern void libcfs_debug_dumplog_internal(void *arg);
-extern void libcfs_register_panic_notifier(void);
-extern void libcfs_unregister_panic_notifier(void);
+void libcfs_debug_dumplog_internal(void *arg);
+void libcfs_register_panic_notifier(void);
+void libcfs_unregister_panic_notifier(void);
 extern int  libcfs_panic_in_progress;
-extern int  cfs_trace_max_debug_mb(void);
+int cfs_trace_max_debug_mb(void);
 
 #define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
 #define TCD_STOCK_PAGES (TCD_MAX_PAGES)
@@ -253,15 +252,15 @@
 	unsigned short       type;
 };
 
-extern void cfs_set_ptldebug_header(struct ptldebug_header *header,
-				    struct libcfs_debug_msg_data *m,
-				    unsigned long stack);
-extern void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
-				 const char *buf, int len, const char *file,
-				 const char *fn);
+void cfs_set_ptldebug_header(struct ptldebug_header *header,
+			     struct libcfs_debug_msg_data *m,
+			     unsigned long stack);
+void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
+			  const char *buf, int len, const char *file,
+			  const char *fn);
 
-extern int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
-extern void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
+int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
+void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking);
 
 /**
  * trace_buf_type_t, trace_buf_idx_get() and trace_console_buffers[][]
@@ -271,7 +270,7 @@
  */
 
 extern char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
-extern cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
+cfs_trace_buf_type_t cfs_trace_buf_idx_get(void);
 
 static inline char *
 cfs_trace_get_console_buffer(void)
@@ -314,8 +313,8 @@
 int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
 		      struct cfs_trace_page *tage);
 
-extern void cfs_trace_assertion_failed(const char *str,
-				       struct libcfs_debug_msg_data *m);
+void cfs_trace_assertion_failed(const char *str,
+				struct libcfs_debug_msg_data *m);
 
 /* ASSERTION that is safe to use within the debug system */
 #define __LASSERT(cond)						 \
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index 7b008a6..b866859 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -250,7 +250,6 @@
 void ll_invalidate_aliases(struct inode *inode)
 {
 	struct dentry *dentry;
-	struct ll_d_hlist_node *p;
 
 	LASSERT(inode != NULL);
 
@@ -258,7 +257,7 @@
 	       inode->i_ino, inode->i_generation, inode);
 
 	ll_lock_dcache(inode);
-	ll_d_hlist_for_each_entry(dentry, p, &inode->i_dentry, d_u.d_alias) {
+	hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
 		CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
 		       dentry, dentry, dentry->d_parent,
 		       d_inode(dentry), dentry->d_flags);
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 3d746a9..769b611 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -203,7 +203,6 @@
 
 	CDEBUG(D_VFSTRACE, "read %d/%d pages\n", nrdpgs, npages);
 
-	ll_pagevec_init(&lru_pvec, 0);
 	for (i = 1; i < npages; i++) {
 		unsigned long offset;
 		int ret;
@@ -228,15 +227,12 @@
 					    GFP_KERNEL);
 		if (ret == 0) {
 			unlock_page(page);
-			if (ll_pagevec_add(&lru_pvec, page) == 0)
-				ll_pagevec_lru_add_file(&lru_pvec);
 		} else {
 			CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
 			       offset, ret);
 		}
 		page_cache_release(page);
 	}
-	ll_pagevec_lru_add_file(&lru_pvec);
 
 	if (page_pool != &page0)
 		kfree(page_pool);
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index 3075db2..dcd0c6d 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -702,8 +702,7 @@
 out_openerr:
 		if (opendir_set != 0)
 			ll_stop_statahead(inode, lli->lli_opendir_key);
-		if (fd != NULL)
-			ll_file_data_put(fd);
+		ll_file_data_put(fd);
 	} else {
 		ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_OPEN, 1);
 	}
@@ -3005,7 +3004,7 @@
 	struct inode *inode = d_inode(de);
 	struct ll_sb_info *sbi = ll_i2sbi(inode);
 	struct ll_inode_info *lli = ll_i2info(inode);
-	int res = 0;
+	int res;
 
 	res = ll_inode_revalidate(de, MDS_INODELOCK_UPDATE |
 				      MDS_INODELOCK_LOOKUP);
diff --git a/drivers/staging/lustre/lustre/llite/llite_capa.c b/drivers/staging/lustre/lustre/llite/llite_capa.c
index a626871..24590ae 100644
--- a/drivers/staging/lustre/lustre/llite/llite_capa.c
+++ b/drivers/staging/lustre/lustre/llite/llite_capa.c
@@ -70,7 +70,8 @@
 
 static int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa);
 
-static inline void update_capa_timer(struct obd_capa *ocapa, unsigned long expiry)
+static inline void update_capa_timer(struct obd_capa *ocapa,
+				     unsigned long expiry)
 {
 	if (time_before(expiry, ll_capa_timer.expires) ||
 	    !timer_pending(&ll_capa_timer)) {
@@ -102,13 +103,13 @@
 	spin_lock(&capa_lock);
 	if (!list_empty(ll_capa_list)) {
 		ocapa = list_entry(ll_capa_list->next, struct obd_capa,
-				       c_list);
+				   c_list);
 		expired = capa_is_to_expire(ocapa);
 		if (!expired)
 			update_capa_timer(ocapa, capa_renewal_time(ocapa));
 	} else if (!list_empty(&ll_idle_capas)) {
 		ocapa = list_entry(ll_idle_capas.next, struct obd_capa,
-				       c_list);
+				   c_list);
 		expired = capa_is_expired(ocapa);
 		if (!expired)
 			update_capa_timer(ocapa, ocapa->c_expiry);
@@ -165,7 +166,8 @@
 /* three places where client capa is deleted:
  * 1. capa_thread_main(), main place to delete expired capa.
  * 2. ll_clear_inode_capas() in ll_clear_inode().
- * 3. ll_truncate_free_capa() delete truncate capa explicitly in ll_setattr_ost().
+ * 3. ll_truncate_free_capa() delete truncate capa explicitly in
+ *    ll_setattr_ost().
  */
 static int capa_thread_main(void *unused)
 {
@@ -206,7 +208,8 @@
 			 * lock.
 			 */
 			/* ibits may be changed by ll_have_md_lock() so we have
-			 * to set it each time */
+			 * to set it each time
+			 */
 			ibits = MDS_INODELOCK_LOOKUP;
 			if (capa_for_mds(&ocapa->c_capa) &&
 			    !S_ISDIR(ocapa->u.cli.inode->i_mode) &&
@@ -225,14 +228,15 @@
 			if (capa_for_oss(&ocapa->c_capa) &&
 			    obd_capa_open_count(ocapa) == 0) {
 				/* oss capa with open count == 0 won't renew,
-				 * move to idle list */
+				 * move to idle list
+				 */
 				sort_add_capa(ocapa, &ll_idle_capas);
 				continue;
 			}
 
 			/* NB iput() is in ll_update_capa() */
 			inode = igrab(ocapa->u.cli.inode);
-			if (inode == NULL) {
+			if (!inode) {
 				DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
 					   "igrab failed for");
 				continue;
@@ -255,7 +259,7 @@
 			update_capa_timer(next, capa_renewal_time(next));
 
 		list_for_each_entry_safe(ocapa, tmp, &ll_idle_capas,
-					     c_list) {
+					 c_list) {
 			if (!capa_is_expired(ocapa)) {
 				if (!next)
 					update_capa_timer(ocapa,
@@ -299,11 +303,11 @@
 	task = kthread_run(capa_thread_main, NULL, "ll_capa");
 	if (IS_ERR(task)) {
 		CERROR("cannot start expired capa thread: rc %ld\n",
-			PTR_ERR(task));
+		       PTR_ERR(task));
 		return PTR_ERR(task);
 	}
 	wait_event(ll_capa_thread.t_ctl_waitq,
-		       thread_is_running(&ll_capa_thread));
+		   thread_is_running(&ll_capa_thread));
 
 	return 0;
 }
@@ -313,7 +317,7 @@
 	thread_set_flags(&ll_capa_thread, SVC_STOPPING);
 	wake_up(&ll_capa_thread.t_ctl_waitq);
 	wait_event(ll_capa_thread.t_ctl_waitq,
-		       thread_is_stopped(&ll_capa_thread));
+		   thread_is_stopped(&ll_capa_thread));
 }
 
 struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
@@ -360,7 +364,7 @@
 		ocapa = NULL;
 
 		if (atomic_read(&ll_capa_debug)) {
-			CERROR("no capability for "DFID" opc %#llx\n",
+			CERROR("no capability for " DFID " opc %#llx\n",
 			       PFID(&lli->lli_fid), opc);
 			atomic_set(&ll_capa_debug, 0);
 		}
@@ -376,7 +380,7 @@
 	struct ll_inode_info *lli = ll_i2info(inode);
 	struct obd_capa *ocapa;
 
-	LASSERT(inode != NULL);
+	LASSERT(inode);
 
 	if ((ll_i2sbi(inode)->ll_flags & LL_SBI_MDS_CAPA) == 0)
 		return NULL;
@@ -385,7 +389,7 @@
 	ocapa = capa_get(lli->lli_mds_capa);
 	spin_unlock(&capa_lock);
 	if (!ocapa && atomic_read(&ll_capa_debug)) {
-		CERROR("no mds capability for "DFID"\n", PFID(&lli->lli_fid));
+		CERROR("no mds capability for " DFID "\n", PFID(&lli->lli_fid));
 		atomic_set(&ll_capa_debug, 0);
 	}
 
@@ -447,7 +451,8 @@
 	struct list_head *next = NULL;
 
 	/* capa is sorted in lli_oss_capas so lookup can always find the
-	 * latest one */
+	 * latest one
+	 */
 	list_for_each_entry(tmp, &lli->lli_oss_capas, u.cli.lli_list) {
 		if (cfs_time_after(ocapa->c_expiry, tmp->c_expiry)) {
 			next = &tmp->u.cli.lli_list;
@@ -537,7 +542,8 @@
 			ll_capa_renewal_failed++;
 
 			/* failed capa won't be renewed any longer, but if -EIO,
-			 * client might be doing recovery, retry in 2 min. */
+			 * client might be doing recovery, retry in 2 min.
+			 */
 			if (rc == -EIO && !capa_is_expired(ocapa)) {
 				delay_capa_renew(ocapa, 120);
 				DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
@@ -638,7 +644,7 @@
 		ll_delete_capa(ocapa);
 
 	list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
-				     u.cli.lli_list)
+				 u.cli.lli_list)
 		ll_delete_capa(ocapa);
 	spin_unlock(&capa_lock);
 }
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index f097d4d..ec8fff4 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -456,7 +456,6 @@
 };
 
 struct ll_sb_info {
-	struct list_head		  ll_list;
 	/* this protects pglist and ra_info.  It isn't safe to
 	 * grab from interrupt contexts */
 	spinlock_t		  ll_lock;
@@ -711,11 +710,11 @@
 extern struct file_operations ll_file_operations_flock;
 extern struct file_operations ll_file_operations_noflock;
 extern struct inode_operations ll_file_inode_operations;
-extern int ll_have_md_lock(struct inode *inode, __u64 *bits,
-			   ldlm_mode_t l_req_mode);
-extern ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
-				   struct lustre_handle *lockh, __u64 flags,
-				   ldlm_mode_t mode);
+int ll_have_md_lock(struct inode *inode, __u64 *bits,
+		    ldlm_mode_t l_req_mode);
+ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
+			    struct lustre_handle *lockh, __u64 flags,
+			    ldlm_mode_t mode);
 int ll_file_open(struct inode *inode, struct file *file);
 int ll_file_release(struct inode *inode, struct file *file);
 int ll_glimpse_ioctl(struct ll_sb_info *sbi,
@@ -1376,9 +1375,9 @@
 	ll_stats_ops_tally(ll_s2sbi(cl2ccc_dev(dev)->cdv_sb), opc, rc);
 }
 
-extern ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
-				  int rw, struct inode *inode,
-				  struct ll_dio_pages *pv);
+ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
+			   int rw, struct inode *inode,
+			   struct ll_dio_pages *pv);
 
 static inline int ll_file_nolock(const struct file *file)
 {
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index 2513988..b4ed6c8 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -60,9 +60,6 @@
 struct dentry *llite_root;
 struct kset *llite_kset;
 
-static LIST_HEAD(ll_super_blocks);
-static DEFINE_SPINLOCK(ll_sb_lock);
-
 #ifndef log2
 #define log2(n) ffz(~(n))
 #endif
@@ -112,10 +109,6 @@
 	class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
 	CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
 
-	spin_lock(&ll_sb_lock);
-	list_add_tail(&sbi->ll_list, &ll_super_blocks);
-	spin_unlock(&ll_sb_lock);
-
 	sbi->ll_flags |= LL_SBI_VERBOSE;
 	sbi->ll_flags |= LL_SBI_CHECKSUM;
 
@@ -144,12 +137,7 @@
 {
 	struct ll_sb_info *sbi = ll_s2sbi(sb);
 
-	if (sbi != NULL) {
-		spin_lock(&ll_sb_lock);
-		list_del(&sbi->ll_list);
-		spin_unlock(&ll_sb_lock);
-		kfree(sbi);
-	}
+	kfree(sbi);
 }
 
 static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
@@ -1114,7 +1102,7 @@
 	if (lli->lli_mds_read_och)
 		ll_md_real_close(inode, FMODE_READ);
 
-	if (S_ISLNK(inode->i_mode) && lli->lli_symlink_name) {
+	if (S_ISLNK(inode->i_mode)) {
 		kfree(lli->lli_symlink_name);
 		lli->lli_symlink_name = NULL;
 	}
@@ -1150,6 +1138,8 @@
 	lli->lli_has_smd = false;
 }
 
+#define TIMES_SET_FLAGS (ATTR_MTIME_SET | ATTR_ATIME_SET | ATTR_TIMES_SET)
+
 static int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
 		  struct md_open_data **mod)
 {
@@ -1354,11 +1344,8 @@
 	if (!op_data)
 		return -ENOMEM;
 
-	if (!S_ISDIR(inode->i_mode)) {
-		if (attr->ia_valid & ATTR_SIZE)
-			inode_dio_write_done(inode);
+	if (!S_ISDIR(inode->i_mode))
 		mutex_unlock(&inode->i_mutex);
-	}
 
 	memcpy(&op_data->op_attr, attr, sizeof(*attr));
 
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index cc00fd1..5f0d80c 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -162,7 +162,7 @@
 static struct lloop_device *loop_dev;
 static struct gendisk **disks;
 static struct mutex lloop_mutex;
-static void *ll_iocontrol_magic = NULL;
+static void *ll_iocontrol_magic;
 
 static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
 {
@@ -340,6 +340,8 @@
 	int rw = bio_rw(old_bio);
 	int inactive;
 
+	blk_queue_split(q, &old_bio, q->bio_split);
+
 	if (!lo)
 		goto err;
 
@@ -365,7 +367,7 @@
 	loop_add_bio(lo, old_bio);
 	return;
 err:
-	cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size);
+	bio_io_error(old_bio);
 }
 
 
@@ -376,7 +378,8 @@
 	while (bio) {
 		struct bio *tmp = bio->bi_next;
 		bio->bi_next = NULL;
-		cfs_bio_endio(bio, bio->bi_iter.bi_size, ret);
+		bio->bi_error = ret;
+		bio_endio(bio);
 		bio = tmp;
 	}
 }
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 72ce6e7..05e7dc8 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -144,10 +144,9 @@
 static void ll_invalidate_negative_children(struct inode *dir)
 {
 	struct dentry *dentry, *tmp_subdir;
-	struct ll_d_hlist_node *p;
 
 	ll_lock_dcache(dir);
-	ll_d_hlist_for_each_entry(dentry, p, &dir->i_dentry, d_u.d_alias) {
+	hlist_for_each_entry(dentry, &dir->i_dentry, d_u.d_alias) {
 		spin_lock(&dentry->d_lock);
 		if (!list_empty(&dentry->d_subdirs)) {
 			struct dentry *child;
@@ -334,15 +333,14 @@
 static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
 {
 	struct dentry *alias, *discon_alias, *invalid_alias;
-	struct ll_d_hlist_node *p;
 
-	if (ll_d_hlist_empty(&inode->i_dentry))
+	if (hlist_empty(&inode->i_dentry))
 		return NULL;
 
 	discon_alias = invalid_alias = NULL;
 
 	ll_lock_dcache(inode);
-	ll_d_hlist_for_each_entry(alias, p, &inode->i_dentry, d_u.d_alias) {
+	hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
 		LASSERT(alias != dentry);
 
 		spin_lock(&alias->d_lock);
@@ -690,7 +688,7 @@
 		goto out;
 	}
 
-	LASSERT(ll_d_hlist_empty(&inode->i_dentry));
+	LASSERT(hlist_empty(&inode->i_dentry));
 
 	/* We asked for a lock on the directory, but were granted a
 	 * lock on the inode.  Since we finally have an inode pointer,
@@ -1008,7 +1006,7 @@
 	return rc;
 }
 
-static int ll_mkdir(struct inode *dir, struct dentry *dentry, ll_umode_t mode)
+static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	int err;
 
diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c
index a581826..39022ea 100644
--- a/drivers/staging/lustre/lustre/llite/remote_perm.c
+++ b/drivers/staging/lustre/lustre/llite/remote_perm.c
@@ -54,8 +54,8 @@
 #include "../include/lustre_param.h"
 #include "llite_internal.h"
 
-struct kmem_cache *ll_remote_perm_cachep = NULL;
-struct kmem_cache *ll_rmtperm_hash_cachep = NULL;
+struct kmem_cache *ll_remote_perm_cachep;
+struct kmem_cache *ll_rmtperm_hash_cachep;
 
 static inline struct ll_remote_perm *alloc_ll_remote_perm(void)
 {
@@ -104,8 +104,7 @@
 		return;
 
 	for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
-		hlist_for_each_entry_safe(lrp, next, hash + i,
-					      lrp_list)
+		hlist_for_each_entry_safe(lrp, next, hash + i, lrp_list)
 			free_ll_remote_perm(lrp);
 	OBD_SLAB_FREE(hash, ll_rmtperm_hash_cachep,
 		      REMOTE_PERM_HASHSIZE * sizeof(*hash));
@@ -117,7 +116,8 @@
 }
 
 /* NB: setxid permission is not checked here, instead it's done on
- * MDT when client get remote permission. */
+ * MDT when client get remote permission.
+ */
 static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
 {
 	struct hlist_head *head;
@@ -184,7 +184,7 @@
 
 	if (!lli->lli_remote_perms) {
 		perm_hash = alloc_rmtperm_hash();
-		if (perm_hash == NULL) {
+		if (!perm_hash) {
 			CERROR("alloc lli_remote_perms failed!\n");
 			return -ENOMEM;
 		}
@@ -287,7 +287,7 @@
 
 		perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL,
 						   lustre_swab_mdt_remote_perm);
-		if (unlikely(perm == NULL)) {
+		if (unlikely(!perm)) {
 			mutex_unlock(&lli->lli_rmtperm_mutex);
 			rc = -EPROTO;
 			break;
@@ -321,8 +321,7 @@
 	spin_lock(&lli->lli_lock);
 
 	for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
-		hlist_for_each_entry_safe(lrp, node, next, hash + i,
-					      lrp_list)
+		hlist_for_each_entry_safe(lrp, node, next, hash + i, lrp_list)
 			free_ll_remote_perm(lrp);
 	}
 
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 91bba79..a659962 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -455,12 +455,11 @@
 	struct cl_io *io    = ios->cis_io;
 	struct inode *inode = ccc_object_inode(io->ci_obj);
 
-	if (cl_io_is_trunc(io)) {
+	if (cl_io_is_trunc(io))
 		/* Truncate in memory pages - they must be clean pages
 		 * because osc has already notified to destroy osc_extents. */
 		vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
-		inode_dio_write_done(inode);
-	}
+
 	mutex_unlock(&inode->i_mutex);
 }
 
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 954ed08..a3cf5ad 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -227,11 +227,16 @@
 			       struct cl_io *unused)
 {
 	struct page *vmpage = cl2vm_page(slice);
+	struct cl_page *pg = slice->cpl_page;
 
 	LASSERT(PageLocked(vmpage));
 	LASSERT(!PageDirty(vmpage));
 
-	set_page_writeback(vmpage);
+	/* ll_writepage path is not a sync write, so need to set page writeback
+	 * flag */
+	if (!pg->cp_sync_io)
+		set_page_writeback(vmpage);
+
 	vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
 
 	return 0;
@@ -298,9 +303,6 @@
 	struct cl_page  *pg     = slice->cpl_page;
 	struct page      *vmpage = cp->cpg_page;
 
-	LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
-	LASSERT(PageWriteback(vmpage));
-
 	CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
 
 	/*
@@ -316,14 +318,19 @@
 	cp->cpg_write_queued = 0;
 	vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
 
-	/*
-	 * Only mark the page error only when it's an async write because
-	 * applications won't wait for IO to finish.
-	 */
-	if (pg->cp_sync_io == NULL)
+	if (pg->cp_sync_io != NULL) {
+		LASSERT(PageLocked(vmpage));
+		LASSERT(!PageWriteback(vmpage));
+	} else {
+		LASSERT(PageWriteback(vmpage));
+		/*
+		 * Only mark the page error only when it's an async write
+		 * because applications won't wait for IO to finish.
+		 */
 		vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
 
-	end_page_writeback(vmpage);
+		end_page_writeback(vmpage);
+	}
 }
 
 /**
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 6956dec..9e763ce 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -357,7 +357,7 @@
 	struct ll_inode_info *lli = ll_i2info(inode);
 	struct mdt_body *body;
 	__u32 *xsizes;
-	int rc = 0, i;
+	int rc, i;
 
 
 
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index cb35f63..eebe45b 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -100,7 +100,7 @@
 	}
 
 	op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
-	if (op_data == NULL) {
+	if (!op_data) {
 		rc = -ENOMEM;
 		goto out;
 	}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index ac5053c..c9e0536e 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -716,7 +716,7 @@
 	if (remote_gf == NULL) {
 		remote_gf_size = sizeof(*remote_gf) + PATH_MAX;
 		remote_gf = kzalloc(remote_gf_size, GFP_NOFS);
-		if (remote_gf == NULL) {
+		if (!remote_gf) {
 			rc = -ENOMEM;
 			goto out_fid2path;
 		}
@@ -1398,7 +1398,7 @@
 		return rc;
 
 	temp = kzalloc(sizeof(*temp), GFP_NOFS);
-	if (temp == NULL)
+	if (!temp)
 		return -ENOMEM;
 
 	for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
@@ -1730,7 +1730,7 @@
 	}
 
 	rdata = kzalloc(sizeof(*rdata), GFP_NOFS);
-	if (rdata == NULL) {
+	if (!rdata) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1993,7 +1993,7 @@
 	struct obd_device       *obd = exp->exp_obd;
 	struct lmv_obd	  *lmv = &obd->u.lmv;
 	struct lmv_tgt_desc     *tgt;
-	int		      rc = 0;
+	int		      rc;
 
 	rc = lmv_check_connect(obd);
 	if (rc)
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index 504b24a..8c3bbe5 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -478,7 +478,7 @@
 	int rc;
 
 	ld = kzalloc(sizeof(*ld), GFP_NOFS);
-	if (ld == NULL)
+	if (!ld)
 		return ERR_PTR(-ENOMEM);
 
 	cl_device_init(&ld->ld_cl, t);
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 11c1081..bf36291 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -181,7 +181,7 @@
 			} else {
 				sub->sub_io = kzalloc(sizeof(*sub->sub_io),
 						      GFP_NOFS);
-				if (sub->sub_io == NULL)
+				if (!sub->sub_io)
 					result = -ENOMEM;
 			}
 		}
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index b7e7bfa..dd1cf3d 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -123,6 +123,7 @@
 	if (shrink) {
 		for (; stripe < lsm->lsm_stripe_count; stripe++) {
 			struct lov_oinfo *loi = lsm->lsm_oinfo[stripe];
+
 			kms = lov_size_to_stripe(lsm, size, stripe);
 			CDEBUG(D_INODE,
 			       "stripe %d KMS %sing %llu->%llu\n",
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 96c55ac..c5c67d9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -107,6 +107,10 @@
 			/* Disconnect */
 			__lov_del_obd(obd, tgt);
 		}
+
+		if (lov->lov_tgts_kobj)
+			kobject_put(lov->lov_tgts_kobj);
+
 	} else {
 		mutex_unlock(&lov->lov_lock);
 	}
@@ -322,9 +326,6 @@
 		}
 	}
 
-	if (lov->lov_tgts_kobj)
-		kobject_put(lov->lov_tgts_kobj);
-
 	obd_putref(obd);
 
 out:
@@ -976,7 +977,7 @@
 		src_oa->o_flags & OBD_FL_RECREATE_OBJS);
 
 	obj_mdp = kzalloc(sizeof(*obj_mdp), GFP_NOFS);
-	if (obj_mdp == NULL)
+	if (!obj_mdp)
 		return -ENOMEM;
 
 	ost_idx = src_oa->o_nlink;
@@ -1439,7 +1440,7 @@
 		__u32 *genp;
 
 		len = 0;
-		if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
+		if (obd_ioctl_getdata(&buf, &len, uarg))
 			return -EINVAL;
 
 		data = (struct obd_ioctl_data *)buf;
@@ -1472,7 +1473,7 @@
 			*genp = lov->lov_tgts[i]->ltd_gen;
 		}
 
-		if (copy_to_user((void *)uarg, buf, len))
+		if (copy_to_user(uarg, buf, len))
 			rc = -EFAULT;
 		obd_ioctl_freedata(buf, len);
 		break;
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index 1e4d3fb..c59b140 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -431,7 +431,7 @@
 		return -ENAMETOOLONG;
 
 	new_pool = kzalloc(sizeof(*new_pool), GFP_NOFS);
-	if (new_pool == NULL)
+	if (!new_pool)
 		return -ENOMEM;
 
 	strncpy(new_pool->pool_name, poolname, LOV_MAXPOOLNAME);
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index f4de8b8..416e42e 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -275,7 +275,7 @@
 	int rc = 0, i;
 
 	set = kzalloc(sizeof(*set), GFP_NOFS);
-	if (set == NULL)
+	if (!set)
 		return -ENOMEM;
 	lov_init_set(set);
 
@@ -301,7 +301,7 @@
 		}
 
 		req = kzalloc(sizeof(*req), GFP_NOFS);
-		if (req == NULL) {
+		if (!req) {
 			rc = -ENOMEM;
 			goto out_set;
 		}
@@ -358,7 +358,7 @@
 	int rc = 0, i;
 
 	set = kzalloc(sizeof(*set), GFP_NOFS);
-	if (set == NULL)
+	if (!set)
 		return -ENOMEM;
 	lov_init_set(set);
 
@@ -384,7 +384,7 @@
 		}
 
 		req = kzalloc(sizeof(*req), GFP_NOFS);
-		if (req == NULL) {
+		if (!req) {
 			rc = -ENOMEM;
 			goto out_set;
 		}
@@ -477,7 +477,7 @@
 	int rc = 0, i;
 
 	set = kzalloc(sizeof(*set), GFP_NOFS);
-	if (set == NULL)
+	if (!set)
 		return -ENOMEM;
 	lov_init_set(set);
 
@@ -500,7 +500,7 @@
 		}
 
 		req = kzalloc(sizeof(*req), GFP_NOFS);
-		if (req == NULL) {
+		if (!req) {
 			rc = -ENOMEM;
 			goto out_set;
 		}
@@ -704,7 +704,7 @@
 	int rc = 0, i;
 
 	set = kzalloc(sizeof(*set), GFP_NOFS);
-	if (set == NULL)
+	if (!set)
 		return -ENOMEM;
 	lov_init_set(set);
 
@@ -730,14 +730,14 @@
 		}
 
 		req = kzalloc(sizeof(*req), GFP_NOFS);
-		if (req == NULL) {
+		if (!req) {
 			rc = -ENOMEM;
 			goto out_set;
 		}
 
 		req->rq_oi.oi_osfs = kzalloc(sizeof(*req->rq_oi.oi_osfs),
 					     GFP_NOFS);
-		if (req->rq_oi.oi_osfs == NULL) {
+		if (!req->rq_oi.oi_osfs) {
 			kfree(req);
 			rc = -ENOMEM;
 			goto out_set;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index d3234cb..1a850ea 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -286,7 +286,7 @@
 		sa_valid |= MDS_ATTR_KILL_SGID;
 	if (ia_valid & ATTR_CTIME_SET)
 		sa_valid |= MDS_ATTR_CTIME_SET;
-	if (ia_valid & ATTR_FROM_OPEN)
+	if (ia_valid & ATTR_OPEN)
 		sa_valid |= MDS_ATTR_FROM_OPEN;
 	if (ia_valid & ATTR_BLOCKS)
 		sa_valid |= MDS_ATTR_BLOCKS;
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 7f208a6..204d512 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1202,7 +1202,7 @@
 	/* Key is KEY_FID2PATH + getinfo_fid2path description */
 	keylen = cfs_size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf);
 	key = kzalloc(keylen, GFP_NOFS);
-	if (key == NULL)
+	if (!key)
 		return -ENOMEM;
 	memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH));
 	memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
@@ -1605,7 +1605,7 @@
 	       cs->cs_fp, cs->cs_startrec);
 
 	cs->cs_buf = kzalloc(KUC_CHANGELOG_MSG_MAXSIZE, GFP_NOFS);
-	if (cs->cs_buf == NULL) {
+	if (!cs->cs_buf) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1934,7 +1934,7 @@
 		struct obd_quotactl *oqctl;
 
 		oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
-		if (oqctl == NULL) {
+		if (!oqctl) {
 			rc = -ENOMEM;
 			goto out;
 		}
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 174dfc3..019ee2f 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -1128,7 +1128,7 @@
 	LASSERT(cfg->cfg_sb == cfg->cfg_instance);
 
 	inst = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
-	if (inst == NULL)
+	if (!inst)
 		return -ENOMEM;
 
 	if (!IS_SERVER(lsi)) {
@@ -1232,7 +1232,7 @@
 		pos += sprintf(obdname + pos, "-%s%04x",
 				  is_ost ? "OST" : "MDT", entry->mne_index);
 
-		cname = is_ost ? "osc" : "mdc",
+		cname = is_ost ? "osc" : "mdc";
 		pos += sprintf(obdname + pos, "-%s-%s", cname, inst);
 		lustre_cfg_bufs_reset(&bufs, obdname);
 
@@ -1493,7 +1493,7 @@
 		lsi = s2lsi(cld->cld_cfg.cfg_sb);
 
 	env = kzalloc(sizeof(*env), GFP_NOFS);
-	if (env == NULL)
+	if (!env)
 		return -ENOMEM;
 
 	rc = lu_env_init(env, LCT_MG_THREAD);
diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c
index bc3fc47..933456c 100644
--- a/drivers/staging/lustre/lustre/obdclass/acl.c
+++ b/drivers/staging/lustre/lustre/obdclass/acl.c
@@ -104,11 +104,10 @@
 	if (unlikely(old_count <= new_count))
 		return old_size;
 
-	new = kzalloc(new_size, GFP_NOFS);
+	new = kmemdup(*header, new_size, GFP_NOFS);
 	if (unlikely(new == NULL))
 		return -ENOMEM;
 
-	memcpy(new, *header, new_size);
 	kfree(*header);
 	*header = new;
 	return new_size;
@@ -125,11 +124,10 @@
 	if (unlikely(old_count <= ext_count))
 		return 0;
 
-	new = kzalloc(ext_size, GFP_NOFS);
+	new = kmemdup(*header, ext_size, GFP_NOFS);
 	if (unlikely(new == NULL))
 		return -ENOMEM;
 
-	memcpy(new, *header, ext_size);
 	kfree(*header);
 	*header = new;
 	return 0;
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index a7f3032..d5fb81f 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -51,13 +51,13 @@
 static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
 			    int radix);
 
-# define PASSERT(env, page, expr)				       \
-  do {								    \
-	  if (unlikely(!(expr))) {				      \
-		  CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n");    \
-		  LASSERT(0);					   \
-	  }							     \
-  } while (0)
+# define PASSERT(env, page, expr)					   \
+	do {								   \
+		if (unlikely(!(expr))) {				   \
+			CL_PAGE_DEBUG(D_ERROR, (env), (page), #expr "\n"); \
+			LASSERT(0);					   \
+		}							   \
+	} while (0)
 
 # define PINVRNT(env, page, exp) \
 	((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
@@ -169,6 +169,7 @@
 	while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
 					    idx, CLT_PVEC_SIZE)) > 0) {
 		int end_of_region = 0;
+
 		idx = pvec[nr - 1]->cp_index + 1;
 		for (i = 0, j = 0; i < nr; ++i) {
 			page = pvec[i];
@@ -286,6 +287,7 @@
 			GFP_NOFS);
 	if (page != NULL) {
 		int result = 0;
+
 		atomic_set(&page->cp_ref, 1);
 		if (type == CPT_CACHEABLE) /* for radix tree */
 			atomic_inc(&page->cp_ref);
@@ -352,8 +354,10 @@
 	       idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
 	/* fast path. */
 	if (type == CPT_CACHEABLE) {
-		/* vmpage lock is used to protect the child/parent
-		 * relationship */
+		/*
+		 * vmpage lock is used to protect the child/parent
+		 * relationship
+		 */
 		KLASSERT(PageLocked(vmpage));
 		/*
 		 * cl_vmpage_page() can be called here without any locks as
@@ -372,9 +376,8 @@
 						       idx) == page));
 	}
 
-	if (page != NULL) {
+	if (page != NULL)
 		return page;
-	}
 
 	/* allocate and initialize cl_page */
 	page = cl_page_alloc(env, o, idx, vmpage, type);
@@ -1189,9 +1192,6 @@
 	if (result == 0)
 		cl_page_io_start(env, pg, crt);
 
-	KLASSERT(ergo(crt == CRT_WRITE && pg->cp_type == CPT_CACHEABLE,
-		      equi(result == 0,
-			   PageWriteback(cl_page_vmpage(env, pg)))));
 	CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
 	return result;
 }
@@ -1425,7 +1425,7 @@
 	CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", from, to);
 	CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_clip),
 		       (const struct lu_env *,
-			const struct cl_page_slice *,int, int),
+			const struct cl_page_slice *, int, int),
 		       from, to);
 }
 EXPORT_SYMBOL(cl_page_clip);
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index 1bc3756..2c705d7 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -78,12 +78,8 @@
 EXPORT_SYMBOL(obd_dirty_pages);
 unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT;   /* seconds */
 EXPORT_SYMBOL(obd_timeout);
-unsigned int ldlm_timeout = LDLM_TIMEOUT_DEFAULT; /* seconds */
-EXPORT_SYMBOL(ldlm_timeout);
 unsigned int obd_timeout_set;
 EXPORT_SYMBOL(obd_timeout_set);
-unsigned int ldlm_timeout_set;
-EXPORT_SYMBOL(ldlm_timeout_set);
 /* Adaptive timeout defs here instead of ptlrpc module for /proc/sys/ access */
 unsigned int at_min = 0;
 EXPORT_SYMBOL(at_min);
@@ -144,11 +140,11 @@
 		CERROR("%s%salloc of %s (%llu bytes) failed at %s:%d\n",
 		       ptr ? "force " :"", type, name, (__u64)size, file,
 		       line);
-		CERROR("%llu total bytes and %llu total pages (%llu bytes) allocated by Lustre, %d total bytes by LNET\n",
+		CERROR("%llu total bytes and %llu total pages"
+			" (%llu bytes) allocated by Lustre\n",
 		       obd_memory_sum(),
 		       obd_pages_sum() << PAGE_CACHE_SHIFT,
-		       obd_pages_sum(),
-		       atomic_read(&libcfs_kmemory));
+		       obd_pages_sum());
 		return 1;
 	}
 	return 0;
@@ -232,7 +228,7 @@
 			goto out;
 		}
 		lcfg = kzalloc(data->ioc_plen1, GFP_NOFS);
-		if (lcfg == NULL) {
+		if (!lcfg) {
 			err = -ENOMEM;
 			goto out;
 		}
@@ -571,12 +567,14 @@
 	if (err)
 		return err;
 
-	obd_sysctl_init();
-
 	err = class_procfs_init();
 	if (err)
 		return err;
 
+	err = obd_sysctl_init();
+	if (err)
+		return err;
+
 	err = lu_global_init();
 	if (err)
 		return err;
@@ -661,7 +659,6 @@
 	lu_global_fini();
 
 	obd_cleanup_caches();
-	obd_sysctl_clean();
 
 	class_procfs_clean();
 
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index 978c3c5..0ca73094 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -45,7 +45,7 @@
 
 spinlock_t obd_types_lock;
 
-struct kmem_cache *obd_device_cachep;
+static struct kmem_cache *obd_device_cachep;
 struct kmem_cache *obdo_cachep;
 EXPORT_SYMBOL(obdo_cachep);
 static struct kmem_cache *import_cachep;
@@ -71,9 +71,8 @@
 	struct obd_device *obd;
 
 	OBD_SLAB_ALLOC_PTR_GFP(obd, obd_device_cachep, GFP_NOFS);
-	if (obd != NULL) {
+	if (obd != NULL)
 		obd->obd_magic = OBD_DEVICE_MAGIC;
-	}
 	return obd;
 }
 
@@ -172,7 +171,7 @@
 
 	rc = -ENOMEM;
 	type = kzalloc(sizeof(*type), GFP_NOFS);
-	if (type == NULL)
+	if (!type)
 		return rc;
 
 	type->typ_dt_ops = kzalloc(sizeof(*type->typ_dt_ops), GFP_NOFS);
@@ -294,7 +293,7 @@
 	}
 
 	type = class_get_type(type_name);
-	if (type == NULL){
+	if (type == NULL) {
 		CERROR("OBD: unknown type: %s\n", type_name);
 		return ERR_PTR(-ENODEV);
 	}
@@ -999,7 +998,8 @@
 }
 EXPORT_SYMBOL(class_import_put);
 
-static void init_imp_at(struct imp_at *at) {
+static void init_imp_at(struct imp_at *at)
+{
 	int i;
 	at_init(&at->iat_net_latency, 0, 0);
 	for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
@@ -1016,7 +1016,7 @@
 	struct obd_import *imp;
 
 	imp = kzalloc(sizeof(*imp), GFP_NOFS);
-	if (imp == NULL)
+	if (!imp)
 		return NULL;
 
 	INIT_LIST_HEAD(&imp->imp_pinger_chain);
@@ -1642,7 +1642,8 @@
 /**
  * Add export to the obd_zombie thread and notify it.
  */
-static void obd_zombie_export_add(struct obd_export *exp) {
+static void obd_zombie_export_add(struct obd_export *exp)
+{
 	spin_lock(&exp->exp_obd->obd_dev_lock);
 	LASSERT(!list_empty(&exp->exp_obd_chain));
 	list_del_init(&exp->exp_obd_chain);
@@ -1658,7 +1659,8 @@
 /**
  * Add import to the obd_zombie thread and notify it.
  */
-static void obd_zombie_import_add(struct obd_import *imp) {
+static void obd_zombie_import_add(struct obd_import *imp)
+{
 	LASSERT(imp->imp_sec == NULL);
 	LASSERT(imp->imp_rq_pool == NULL);
 	spin_lock(&obd_zombie_impexp_lock);
@@ -1819,7 +1821,7 @@
 	int len = kuc_len(payload_len);
 
 	lh = kzalloc(len, GFP_NOFS);
-	if (lh == NULL)
+	if (!lh)
 		return ERR_PTR(-ENOMEM);
 
 	lh->kuc_magic = KUC_MAGIC;
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 84f75dc..6218ef3 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -385,7 +385,7 @@
 	return 0;
 }
 
-struct seq_operations obd_device_list_sops = {
+static const struct seq_operations obd_device_list_sops = {
 	.start = obd_device_list_seq_start,
 	.stop = obd_device_list_seq_stop,
 	.next = obd_device_list_seq_next,
@@ -406,7 +406,7 @@
 	return 0;
 }
 
-struct file_operations obd_device_list_fops = {
+static const struct file_operations obd_device_list_fops = {
 	.owner   = THIS_MODULE,
 	.open    = obd_device_list_open,
 	.read    = seq_read,
@@ -423,7 +423,7 @@
 
 int class_procfs_init(void)
 {
-	int rc = 0;
+	int rc = -ENOMEM;
 	struct dentry *file;
 
 	lustre_kobj = kobject_create_and_add("lustre", fs_kobj);
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index 54f0a81..1515163 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -50,331 +50,119 @@
 #include "../../include/obd_support.h"
 #include "../../include/lprocfs_status.h"
 
-#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *obd_table_header;
-#endif
+struct static_lustre_uintvalue_attr {
+	struct {
+		struct attribute attr;
+		ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+				char *buf);
+		ssize_t (*store)(struct kobject *kobj, struct attribute *attr,
+				 const char *buf, size_t len);
+	} u;
+	int *value;
+};
 
-#ifdef CONFIG_SYSCTL
-static int proc_set_timeout(struct ctl_table *table, int write,
-			void __user *buffer, size_t *lenp, loff_t *ppos)
+static ssize_t static_uintvalue_show(struct kobject *kobj,
+				    struct attribute *attr,
+				    char *buf)
+{
+	struct static_lustre_uintvalue_attr *lattr = (void *)attr;
+
+	return sprintf(buf, "%d\n", *lattr->value);
+}
+
+static ssize_t static_uintvalue_store(struct kobject *kobj,
+				     struct attribute *attr,
+				     const char *buffer, size_t count)
+{
+	struct static_lustre_uintvalue_attr *lattr  = (void *)attr;
+	int rc;
+	unsigned int val;
+
+	rc = kstrtouint(buffer, 10, &val);
+	if (rc)
+		return rc;
+
+	*lattr->value = val;
+
+	return count;
+}
+
+#define LUSTRE_STATIC_UINT_ATTR(name, value) \
+static struct static_lustre_uintvalue_attr lustre_sattr_##name =	\
+					{__ATTR(name, 0644,		\
+						static_uintvalue_show,	\
+						static_uintvalue_store),\
+					  value }
+
+LUSTRE_STATIC_UINT_ATTR(timeout, &obd_timeout);
+
+static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
+				 char *buf)
+{
+	return sprintf(buf, "%ul\n",
+			obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT)));
+}
+
+static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
+				  const char *buffer, size_t count)
 {
 	int rc;
+	unsigned long val;
 
-	rc = proc_dointvec(table, write, buffer, lenp, ppos);
-	if (ldlm_timeout >= obd_timeout)
-		ldlm_timeout = max(obd_timeout / 3, 1U);
-	return rc;
-}
+	rc = kstrtoul(buffer, 10, &val);
+	if (rc)
+		return rc;
 
-static int proc_memory_alloc(struct ctl_table *table, int write,
-			void __user *buffer, size_t *lenp, loff_t *ppos)
-{
-	char buf[22];
-	int len;
+	val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */
 
-	if (!*lenp || (*ppos && !write)) {
-		*lenp = 0;
-		return 0;
-	}
-	if (write)
+	if (val > ((totalram_pages / 10) * 9)) {
+		/* Somebody wants to assign too much memory to dirty pages */
 		return -EINVAL;
-
-	len = snprintf(buf, sizeof(buf), "%llu\n", obd_memory_sum());
-	if (len > *lenp)
-		len = *lenp;
-	buf[len] = '\0';
-	if (copy_to_user(buffer, buf, len))
-		return -EFAULT;
-	*lenp = len;
-	*ppos += *lenp;
-	return 0;
-}
-
-static int proc_pages_alloc(struct ctl_table *table, int write,
-			void __user *buffer, size_t *lenp, loff_t *ppos)
-{
-	char buf[22];
-	int len;
-
-	if (!*lenp || (*ppos && !write)) {
-		*lenp = 0;
-		return 0;
 	}
-	if (write)
+
+	if (val < 4 << (20 - PAGE_CACHE_SHIFT)) {
+		/* Less than 4 Mb for dirty cache is also bad */
 		return -EINVAL;
+	}
 
-	len = snprintf(buf, sizeof(buf), "%llu\n", obd_pages_sum());
-	if (len > *lenp)
-		len = *lenp;
-	buf[len] = '\0';
-	if (copy_to_user(buffer, buf, len))
-		return -EFAULT;
-	*lenp = len;
-	*ppos += *lenp;
-	return 0;
+	obd_max_dirty_pages = val;
+
+	return count;
 }
+LUSTRE_RW_ATTR(max_dirty_mb);
 
-static int proc_mem_max(struct ctl_table *table, int write, void __user *buffer,
-		 size_t *lenp, loff_t *ppos)
-{
-	char buf[22];
-	int len;
+LUSTRE_STATIC_UINT_ATTR(debug_peer_on_timeout, &obd_debug_peer_on_timeout);
+LUSTRE_STATIC_UINT_ATTR(dump_on_timeout, &obd_dump_on_timeout);
+LUSTRE_STATIC_UINT_ATTR(dump_on_eviction, &obd_dump_on_eviction);
+LUSTRE_STATIC_UINT_ATTR(at_min, &at_min);
+LUSTRE_STATIC_UINT_ATTR(at_max, &at_max);
+LUSTRE_STATIC_UINT_ATTR(at_extra, &at_extra);
+LUSTRE_STATIC_UINT_ATTR(at_early_margin, &at_early_margin);
+LUSTRE_STATIC_UINT_ATTR(at_history, &at_history);
 
-	if (!*lenp || (*ppos && !write)) {
-		*lenp = 0;
-		return 0;
-	}
-	if (write)
-		return -EINVAL;
-
-	len = snprintf(buf, sizeof(buf), "%llu\n", obd_memory_max());
-	if (len > *lenp)
-		len = *lenp;
-	buf[len] = '\0';
-	if (copy_to_user(buffer, buf, len))
-		return -EFAULT;
-	*lenp = len;
-	*ppos += *lenp;
-	return 0;
-}
-
-static int proc_pages_max(struct ctl_table *table, int write,
-			void __user *buffer, size_t *lenp, loff_t *ppos)
-{
-	char buf[22];
-	int len;
-
-	if (!*lenp || (*ppos && !write)) {
-		*lenp = 0;
-		return 0;
-	}
-	if (write)
-		return -EINVAL;
-
-	len = snprintf(buf, sizeof(buf), "%llu\n", obd_pages_max());
-	if (len > *lenp)
-		len = *lenp;
-	buf[len] = '\0';
-	if (copy_to_user(buffer, buf, len))
-		return -EFAULT;
-	*lenp = len;
-	*ppos += *lenp;
-	return 0;
-}
-
-static int proc_max_dirty_pages_in_mb(struct ctl_table *table, int write,
-			       void __user *buffer, size_t *lenp, loff_t *ppos)
-{
-	int rc = 0;
-
-	if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) {
-		*lenp = 0;
-		return 0;
-	}
-	if (write) {
-		rc = lprocfs_write_frac_helper(buffer, *lenp,
-					       (unsigned int *)table->data,
-					       1 << (20 - PAGE_CACHE_SHIFT));
-		/* Don't allow them to let dirty pages exceed 90% of system
-		 * memory and set a hard minimum of 4MB. */
-		if (obd_max_dirty_pages > ((totalram_pages / 10) * 9)) {
-			CERROR("Refusing to set max dirty pages to %u, which is more than 90%% of available RAM; setting to %lu\n",
-			       obd_max_dirty_pages,
-			       ((totalram_pages / 10) * 9));
-			obd_max_dirty_pages = (totalram_pages / 10) * 9;
-		} else if (obd_max_dirty_pages < 4 << (20 - PAGE_CACHE_SHIFT)) {
-			obd_max_dirty_pages = 4 << (20 - PAGE_CACHE_SHIFT);
-		}
-	} else {
-		char buf[21];
-		int len;
-
-		len = lprocfs_read_frac_helper(buf, sizeof(buf),
-					       *(unsigned int *)table->data,
-					       1 << (20 - PAGE_CACHE_SHIFT));
-		if (len > *lenp)
-			len = *lenp;
-		buf[len] = '\0';
-		if (copy_to_user(buffer, buf, len))
-			return -EFAULT;
-		*lenp = len;
-	}
-	*ppos += *lenp;
-	return rc;
-}
-
-static int proc_alloc_fail_rate(struct ctl_table *table, int write,
-			 void __user *buffer, size_t *lenp, loff_t *ppos)
-{
-	int rc	  = 0;
-
-	if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) {
-		*lenp = 0;
-		return 0;
-	}
-	if (write) {
-		rc = lprocfs_write_frac_helper(buffer, *lenp,
-					       (unsigned int *)table->data,
-					       OBD_ALLOC_FAIL_MULT);
-	} else {
-		char buf[21];
-		int  len;
-
-		len = lprocfs_read_frac_helper(buf, 21,
-					       *(unsigned int *)table->data,
-					       OBD_ALLOC_FAIL_MULT);
-		if (len > *lenp)
-			len = *lenp;
-		buf[len] = '\0';
-		if (copy_to_user(buffer, buf, len))
-			return -EFAULT;
-		*lenp = len;
-	}
-	*ppos += *lenp;
-	return rc;
-}
-
-static struct ctl_table obd_table[] = {
-	{
-		.procname = "timeout",
-		.data     = &obd_timeout,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_set_timeout
-	},
-	{
-		.procname = "debug_peer_on_timeout",
-		.data     = &obd_debug_peer_on_timeout,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_dointvec
-	},
-	{
-		.procname = "dump_on_timeout",
-		.data     = &obd_dump_on_timeout,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_dointvec
-	},
-	{
-		.procname = "dump_on_eviction",
-		.data     = &obd_dump_on_eviction,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_dointvec
-	},
-	{
-		.procname = "memused",
-		.data     = NULL,
-		.maxlen   = 0,
-		.mode     = 0444,
-		.proc_handler = &proc_memory_alloc
-	},
-	{
-		.procname = "pagesused",
-		.data     = NULL,
-		.maxlen   = 0,
-		.mode     = 0444,
-		.proc_handler = &proc_pages_alloc
-	},
-	{
-		.procname = "memused_max",
-		.data     = NULL,
-		.maxlen   = 0,
-		.mode     = 0444,
-		.proc_handler = &proc_mem_max
-	},
-	{
-		.procname = "pagesused_max",
-		.data     = NULL,
-		.maxlen   = 0,
-		.mode     = 0444,
-		.proc_handler = &proc_pages_max
-	},
-	{
-		.procname = "ldlm_timeout",
-		.data     = &ldlm_timeout,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_set_timeout
-	},
-	{
-		.procname = "alloc_fail_rate",
-		.data     = &obd_alloc_fail_rate,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_alloc_fail_rate
-	},
-	{
-		.procname = "max_dirty_mb",
-		.data     = &obd_max_dirty_pages,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_max_dirty_pages_in_mb
-	},
-	{
-		.procname = "at_min",
-		.data     = &at_min,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_dointvec,
-	},
-	{
-		.procname = "at_max",
-		.data     = &at_max,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_dointvec,
-	},
-	{
-		.procname = "at_extra",
-		.data     = &at_extra,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_dointvec,
-	},
-	{
-		.procname = "at_early_margin",
-		.data     = &at_early_margin,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_dointvec,
-	},
-	{
-		.procname = "at_history",
-		.data     = &at_history,
-		.maxlen   = sizeof(int),
-		.mode     = 0644,
-		.proc_handler = &proc_dointvec,
-	},
-	{}
+static struct attribute *lustre_attrs[] = {
+	&lustre_sattr_timeout.u.attr,
+	&lustre_attr_max_dirty_mb.attr,
+	&lustre_sattr_debug_peer_on_timeout.u.attr,
+	&lustre_sattr_dump_on_timeout.u.attr,
+	&lustre_sattr_dump_on_eviction.u.attr,
+	&lustre_sattr_at_min.u.attr,
+	&lustre_sattr_at_max.u.attr,
+	&lustre_sattr_at_extra.u.attr,
+	&lustre_sattr_at_early_margin.u.attr,
+	&lustre_sattr_at_history.u.attr,
+	NULL,
 };
 
-static struct ctl_table parent_table[] = {
-	{
-		.procname = "lustre",
-		.data     = NULL,
-		.maxlen   = 0,
-		.mode     = 0555,
-		.child    = obd_table
-	},
-	{}
+static struct attribute_group lustre_attr_group = {
+	.attrs = lustre_attrs,
 };
-#endif
 
-void obd_sysctl_init(void)
+int obd_sysctl_init(void)
 {
-#ifdef CONFIG_SYSCTL
-	if (!obd_table_header)
-		obd_table_header = register_sysctl_table(parent_table);
-#endif
+	return sysfs_create_group(lustre_kobj, &lustre_attr_group);
 }
 
 void obd_sysctl_clean(void)
 {
-#ifdef CONFIG_SYSCTL
-	if (obd_table_header)
-		unregister_sysctl_table(obd_table_header);
-	obd_table_header = NULL;
-#endif
 }
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 4fa52d1..facc835 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -61,7 +61,7 @@
 	struct llog_handle *loghandle;
 
 	loghandle = kzalloc(sizeof(*loghandle), GFP_NOFS);
-	if (loghandle == NULL)
+	if (!loghandle)
 		return NULL;
 
 	init_rwsem(&loghandle->lgh_lock);
@@ -208,7 +208,7 @@
 	LASSERT(handle->lgh_hdr == NULL);
 
 	llh = kzalloc(sizeof(*llh), GFP_NOFS);
-	if (llh == NULL)
+	if (!llh)
 		return -ENOMEM;
 	handle->lgh_hdr = llh;
 	/* first assign flags to use llog_client_ops */
@@ -435,7 +435,7 @@
 	int		      rc;
 
 	lpi = kzalloc(sizeof(*lpi), GFP_NOFS);
-	if (lpi == NULL) {
+	if (!lpi) {
 		CERROR("cannot alloc pointer\n");
 		return -ENOMEM;
 	}
@@ -907,7 +907,7 @@
 		  char *name)
 {
 	struct llog_handle	*llh;
-	int			 rc = 0;
+	int			 rc;
 
 	rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
 	if (rc < 0) {
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index 17e7c18..08d1f0e 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -275,7 +275,7 @@
 		return NULL;
 
 	dest = kzalloc(MAX_STRING_SIZE + 1, GFP_KERNEL);
-	if (dest == NULL)
+	if (!dest)
 		return NULL;
 
 	va_start(ap, format);
@@ -329,7 +329,7 @@
 
 void ldebugfs_remove(struct dentry **entryp)
 {
-	debugfs_remove(*entryp);
+	debugfs_remove_recursive(*entryp);
 	*entryp = NULL;
 }
 EXPORT_SYMBOL(ldebugfs_remove);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 4d9b633..8e47232 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -602,7 +602,7 @@
 	struct lu_site_bkt_data *bkt;
 
 	o = lu_object_alloc(env, dev, f, conf);
-	if (unlikely(IS_ERR(o)))
+	if (IS_ERR(o))
 		return o;
 
 	hs = dev->ld_site->ls_obj_hash;
@@ -666,7 +666,7 @@
 	 * operations, including fld queries, inode loading, etc.
 	 */
 	o = lu_object_alloc(env, dev, f, conf);
-	if (unlikely(IS_ERR(o)))
+	if (IS_ERR(o))
 		return o;
 
 	LASSERT(lu_fid_eq(lu_object_fid(o), f));
@@ -674,7 +674,7 @@
 	cfs_hash_bd_lock(hs, &bd, 1);
 
 	shadow = htable_lookup(s, &bd, f, waiter, &version);
-	if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) {
+	if (likely(PTR_ERR(shadow) == -ENOENT)) {
 		struct lu_site_bkt_data *bkt;
 
 		bkt = cfs_hash_bd_extra_get(hs, &bd);
@@ -1558,7 +1558,7 @@
 			LINVRNT(key->lct_index == i);
 
 			value = key->lct_init(ctx, key);
-			if (unlikely(IS_ERR(value)))
+			if (IS_ERR(value))
 				return PTR_ERR(value);
 
 			if (!(ctx->lc_tags & LCT_NOREF))
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index 5cc6435..d6184f8 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -105,7 +105,7 @@
 		return -EOVERFLOW;
 
 	data = kzalloc(sizeof(*data), GFP_NOFS);
-	if (data == NULL)
+	if (!data)
 		return -ENOMEM;
 
 	obd_str2uuid(&data->un_uuid, uuid);
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index fbdb748..93805ac 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -835,7 +835,7 @@
 	CDEBUG(D_CONFIG, "Add profile %s\n", prof);
 
 	lprof = kzalloc(sizeof(*lprof), GFP_NOFS);
-	if (lprof == NULL)
+	if (!lprof)
 		return -ENOMEM;
 	INIT_LIST_HEAD(&lprof->lp_list);
 
@@ -979,7 +979,7 @@
 	new_len = LUSTRE_CFG_BUFLEN(cfg, 1) + strlen(new_name) - name_len;
 
 	new_param = kzalloc(new_len, GFP_NOFS);
-	if (new_param == NULL)
+	if (!new_param)
 		return ERR_PTR(-ENOMEM);
 
 	strcpy(new_param, new_name);
@@ -987,7 +987,7 @@
 		strcat(new_param, value);
 
 	bufs = kzalloc(sizeof(*bufs), GFP_NOFS);
-	if (bufs == NULL) {
+	if (!bufs) {
 		kfree(new_param);
 		return ERR_PTR(-ENOMEM);
 	}
@@ -1123,12 +1123,7 @@
 		goto out;
 	}
 	case LCFG_SET_LDLM_TIMEOUT: {
-		CDEBUG(D_IOCTL, "changing lustre ldlm_timeout from %d to %d\n",
-		       ldlm_timeout, lcfg->lcfg_num);
-		ldlm_timeout = max(lcfg->lcfg_num, 1U);
-		if (ldlm_timeout >= obd_timeout)
-			ldlm_timeout = max(obd_timeout / 3, 1U);
-		ldlm_timeout_set = 1;
+		/* ldlm_timeout is not used on the client */
 		err = 0;
 		goto out;
 	}
@@ -1461,7 +1456,7 @@
 			inst_len = LUSTRE_CFG_BUFLEN(lcfg, 0) +
 				   sizeof(clli->cfg_instance) * 2 + 4;
 			inst_name = kzalloc(inst_len, GFP_NOFS);
-			if (inst_name == NULL) {
+			if (!inst_name) {
 				rc = -ENOMEM;
 				goto out;
 			}
@@ -1639,7 +1634,7 @@
 	int	 rc = 0;
 
 	outstr = kzalloc(256, GFP_NOFS);
-	if (outstr == NULL)
+	if (!outstr)
 		return -ENOMEM;
 
 	if (rec->lrh_type == OBD_CFG_REC) {
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index ce4a71f..7c5bab3 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -85,7 +85,7 @@
 	LASSERT(cfg);
 
 	bufs = kzalloc(sizeof(*bufs), GFP_NOFS);
-	if (bufs == NULL)
+	if (!bufs)
 		return -ENOMEM;
 
 	/* mgc_process_config */
@@ -247,18 +247,18 @@
 	mutex_lock(&mgc_start_lock);
 
 	len = strlen(LUSTRE_MGC_OBDNAME) + strlen(libcfs_nid2str(nid)) + 1;
-	mgcname = kzalloc(len, GFP_NOFS);
-	niduuid = kzalloc(len + 2, GFP_NOFS);
+	mgcname = kasprintf(GFP_NOFS,
+			    "%s%s", LUSTRE_MGC_OBDNAME, libcfs_nid2str(nid));
+	niduuid = kasprintf(GFP_NOFS, "%s_%x", mgcname, i);
 	if (!mgcname || !niduuid) {
 		rc = -ENOMEM;
 		goto out_free;
 	}
-	sprintf(mgcname, "%s%s", LUSTRE_MGC_OBDNAME, libcfs_nid2str(nid));
 
 	mgssec = lsi->lsi_lmd->lmd_mgssec ? lsi->lsi_lmd->lmd_mgssec : "";
 
 	data = kzalloc(sizeof(*data), GFP_NOFS);
-	if (data == NULL) {
+	if (!data) {
 		rc = -ENOMEM;
 		goto out_free;
 	}
@@ -326,7 +326,6 @@
 
 	/* Add the primary nids for the MGS */
 	i = 0;
-	sprintf(niduuid, "%s_%x", mgcname, i);
 	if (IS_SERVER(lsi)) {
 		ptr = lsi->lsi_lmd->lmd_mgs;
 		if (IS_MGS(lsi)) {
@@ -885,7 +884,7 @@
 		length = tail - ptr;
 
 	lmd->lmd_mgssec = kzalloc(length + 1, GFP_NOFS);
-	if (lmd->lmd_mgssec == NULL)
+	if (!lmd->lmd_mgssec)
 		return -ENOMEM;
 
 	memcpy(lmd->lmd_mgssec, ptr, length);
@@ -911,7 +910,7 @@
 		length = tail - ptr;
 
 	*handle = kzalloc(length + 1, GFP_NOFS);
-	if (*handle == NULL)
+	if (!*handle)
 		return -ENOMEM;
 
 	memcpy(*handle, ptr, length);
@@ -941,7 +940,7 @@
 		oldlen = strlen(lmd->lmd_mgs) + 1;
 
 	mgsnid = kzalloc(oldlen + length + 1, GFP_NOFS);
-	if (mgsnid == NULL)
+	if (!mgsnid)
 		return -ENOMEM;
 
 	if (lmd->lmd_mgs != NULL) {
@@ -983,7 +982,7 @@
 	lmd->lmd_magic = LMD_MAGIC;
 
 	lmd->lmd_params = kzalloc(4096, GFP_NOFS);
-	if (lmd->lmd_params == NULL)
+	if (!lmd->lmd_params)
 		return -ENOMEM;
 	lmd->lmd_params[0] = '\0';
 
@@ -1120,10 +1119,9 @@
 		/* Remove leading /s from fsname */
 		while (*++s1 == '/') ;
 		/* Freed in lustre_free_lsi */
-		lmd->lmd_profile = kzalloc(strlen(s1) + 8, GFP_NOFS);
+		lmd->lmd_profile = kasprintf(GFP_NOFS, "%s-client", s1);
 		if (!lmd->lmd_profile)
 			return -ENOMEM;
-		sprintf(lmd->lmd_profile, "%s-client", s1);
 	}
 
 	/* Freed in lustre_free_lsi */
@@ -1281,7 +1279,7 @@
 	.mount	= lustre_mount,
 	.kill_sb      = lustre_kill_super,
 	.fs_flags     = FS_BINARY_MOUNTDATA | FS_REQUIRES_DEV |
-			FS_HAS_FIEMAP | FS_RENAME_DOES_D_MOVE,
+			FS_RENAME_DOES_D_MOVE,
 };
 MODULE_ALIAS_FS("lustre");
 
diff --git a/drivers/staging/lustre/lustre/obdclass/uuid.c b/drivers/staging/lustre/lustre/obdclass/uuid.c
index ff0a01b..b0b0157 100644
--- a/drivers/staging/lustre/lustre/obdclass/uuid.c
+++ b/drivers/staging/lustre/lustre/obdclass/uuid.c
@@ -43,40 +43,8 @@
 #include "../include/obd_support.h"
 #include "../include/obd_class.h"
 
-
-static inline __u32 consume(int nob, __u8 **ptr)
-{
-	__u32 value;
-
-	LASSERT(nob <= sizeof(value));
-
-	for (value = 0; nob > 0; --nob)
-		value = (value << 8) | *((*ptr)++);
-	return value;
-}
-
-#define CONSUME(val, ptr) (val) = consume(sizeof(val), (ptr))
-
-static void uuid_unpack(class_uuid_t in, __u16 *uu, int nr)
-{
-	__u8 *ptr = in;
-
-	LASSERT(nr * sizeof(*uu) == sizeof(class_uuid_t));
-
-	while (nr-- > 0)
-		CONSUME(uu[nr], &ptr);
-}
-
 void class_uuid_unparse(class_uuid_t uu, struct obd_uuid *out)
 {
-	/* uu as an array of __u16's */
-	__u16 uuid[sizeof(class_uuid_t) / sizeof(__u16)];
-
-	CLASSERT(ARRAY_SIZE(uuid) == 8);
-
-	uuid_unpack(uu, uuid, ARRAY_SIZE(uuid));
-	sprintf(out->uuid, "%04x%04x-%04x-%04x-%04x-%04x%04x%04x",
-		uuid[0], uuid[1], uuid[2], uuid[3],
-		uuid[4], uuid[5], uuid[6], uuid[7]);
+	sprintf(out->uuid, "%pU", uu);
 }
 EXPORT_SYMBOL(class_uuid_unparse);
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 0222fd2..27bd170 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -480,11 +480,11 @@
 
 	LASSERT(*lsmp == NULL);
 	*lsmp = kzalloc(lsm_size, GFP_NOFS);
-	if (*lsmp == NULL)
+	if (!*lsmp)
 		return -ENOMEM;
 
 	(*lsmp)->lsm_oinfo[0] = kzalloc(sizeof(struct lov_oinfo), GFP_NOFS);
-	if ((*lsmp)->lsm_oinfo[0] == NULL) {
+	if (!(*lsmp)->lsm_oinfo[0]) {
 		kfree(*lsmp);
 		return -ENOMEM;
 	}
@@ -701,7 +701,7 @@
 	int cleanup = 0;
 
 	ed = kzalloc(sizeof(*ed), GFP_NOFS);
-	if (ed == NULL) {
+	if (!ed) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -1878,7 +1878,7 @@
 		return rc;
 
 	env = kzalloc(sizeof(*env), GFP_NOFS);
-	if (env == NULL)
+	if (!env)
 		return -ENOMEM;
 
 	rc = lu_env_init(env, LCT_DT_THREAD);
@@ -2049,7 +2049,7 @@
 	ec->ec_nstripes = 0;
 
 	ocd = kzalloc(sizeof(*ocd), GFP_NOFS);
-	if (ocd == NULL) {
+	if (!ocd) {
 		CERROR("Can't alloc ocd connecting to %s\n",
 		       lustre_cfg_string(lcfg, 1));
 		return -ENOMEM;
@@ -2139,7 +2139,7 @@
 	.o_disconnect  = echo_client_disconnect
 };
 
-int echo_client_init(void)
+static int echo_client_init(void)
 {
 	int rc;
 
@@ -2154,7 +2154,7 @@
 	return rc;
 }
 
-void echo_client_exit(void)
+static void echo_client_exit(void)
 {
 	class_unregister_type(LUSTRE_ECHO_CLIENT_NAME);
 	lu_kmem_fini(echo_caches);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 5592d32..c72035e 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -1837,12 +1837,6 @@
 		oap2 = list_first_entry(&tmp->oe_pages, struct osc_async_page,
 					oap_pending_item);
 		EASSERT(tmp->oe_owner == current, tmp);
-#if 0
-		if (overlapped(tmp, ext)) {
-			OSC_EXTENT_DUMP(D_ERROR, tmp, "overlapped %p.\n", ext);
-			EASSERT(0, ext);
-		}
-#endif
 		if (oap2cl_page(oap)->cp_type != oap2cl_page(oap2)->cp_type) {
 			CDEBUG(D_CACHE, "Do not permit different type of IO"
 					" for a same RPC\n");
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
index 9222c9f..91fdec4 100644
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -218,7 +218,7 @@
 	int rc;
 
 	od = kzalloc(sizeof(*od), GFP_NOFS);
-	if (od == NULL)
+	if (!od)
 		return ERR_PTR(-ENOMEM);
 
 	cl_device_init(&od->od_cl, t);
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index 43dfa73..f9cf5ce 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -471,7 +471,7 @@
 			  struct cl_io *io)
 {
 	struct osc_page *opg = cl2osc_page(slice);
-	int rc = 0;
+	int rc;
 
 	rc = osc_flush_async_page(env, io, opg);
 	return rc;
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index f84b4c7..12113df 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -119,7 +119,7 @@
 
 	if (*lmmp == NULL) {
 		*lmmp = kzalloc(lmm_size, GFP_NOFS);
-		if (*lmmp == NULL)
+		if (!*lmmp)
 			return -ENOMEM;
 	}
 
@@ -1909,7 +1909,7 @@
 		mpflag = cfs_memory_pressure_get_and_set();
 
 	crattr = kzalloc(sizeof(*crattr), GFP_NOFS);
-	if (crattr == NULL) {
+	if (!crattr) {
 		rc = -ENOMEM;
 		goto out;
 	}
@@ -2665,7 +2665,7 @@
 
 		buf = NULL;
 		len = 0;
-		if (obd_ioctl_getdata(&buf, &len, (void *)uarg)) {
+		if (obd_ioctl_getdata(&buf, &len, uarg)) {
 			err = -EINVAL;
 			goto out;
 		}
@@ -2695,7 +2695,7 @@
 
 		memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
 
-		err = copy_to_user((void *)uarg, buf, len);
+		err = copy_to_user(uarg, buf, len);
 		if (err)
 			err = -EFAULT;
 		obd_ioctl_freedata(buf, len);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index a12cd66..c83a34a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -971,7 +971,7 @@
 	struct ptlrpc_set_cbdata *cbdata;
 
 	cbdata = kzalloc(sizeof(*cbdata), GFP_NOFS);
-	if (cbdata == NULL)
+	if (!cbdata)
 		return -ENOMEM;
 
 	cbdata->psc_interpret = fn;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 8cb1929..c8ef9e5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -485,7 +485,7 @@
 	return rc;
 }
 
-void ptlrpc_ni_fini(void)
+static void ptlrpc_ni_fini(void)
 {
 	wait_queue_head_t waitq;
 	struct l_wait_info lwi;
@@ -529,7 +529,7 @@
 	return pid;
 }
 
-int ptlrpc_ni_init(void)
+static int ptlrpc_ni_init(void)
 {
 	int rc;
 	lnet_pid_t pid;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index c9b8481..1eae389 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -555,14 +555,12 @@
 	imp_conn->oic_last_attempt = cfs_time_current_64();
 
 	/* switch connection, don't mind if it's same as the current one */
-	if (imp->imp_connection)
-		ptlrpc_connection_put(imp->imp_connection);
+	ptlrpc_connection_put(imp->imp_connection);
 	imp->imp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
 
 	dlmexp = class_conn2export(&imp->imp_dlm_handle);
 	LASSERT(dlmexp != NULL);
-	if (dlmexp->exp_connection)
-		ptlrpc_connection_put(dlmexp->exp_connection);
+	ptlrpc_connection_put(dlmexp->exp_connection);
 	dlmexp->exp_connection = ptlrpc_connection_addref(imp_conn->oic_conn);
 	class_export_put(dlmexp);
 
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index aaaabbf..53f9af1 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -652,7 +652,7 @@
 		return -EINVAL;
 
 	cmd = kzalloc(LPROCFS_NRS_WR_MAX_CMD, GFP_NOFS);
-	if (cmd == NULL)
+	if (!cmd)
 		return -ENOMEM;
 	/**
 	 * strsep() modifies its argument, so keep a copy
@@ -819,7 +819,7 @@
 	}
 
 	srhi = kzalloc(sizeof(*srhi), GFP_NOFS);
-	if (srhi == NULL)
+	if (!srhi)
 		return NULL;
 
 	srhi->srhi_seq = 0;
@@ -1219,7 +1219,7 @@
 	char *tmpbuf;
 
 	kbuf = kzalloc(BUFLEN, GFP_NOFS);
-	if (kbuf == NULL)
+	if (!kbuf)
 		return -ENOMEM;
 
 	/*
@@ -1303,7 +1303,7 @@
 		return -EINVAL;
 
 	kbuf = kzalloc(count + 1, GFP_NOFS);
-	if (kbuf == NULL)
+	if (!kbuf)
 		return -ENOMEM;
 
 	if (copy_from_user(kbuf, buffer, count)) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 9516aca..d37cdd5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -1156,7 +1156,7 @@
 	}
 
 	desc = kzalloc(sizeof(*desc), GFP_NOFS);
-	if (desc == NULL) {
+	if (!desc) {
 		rc = -ENOMEM;
 		goto fail;
 	}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 2787bfd..84937ad 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -52,6 +52,8 @@
 #include "../include/obd_cksum.h"
 #include "../include/lustre/ll_fiemap.h"
 
+#include "ptlrpc_internal.h"
+
 static inline int lustre_msg_hdr_size_v2(int count)
 {
 	return cfs_size_round(offsetof(struct lustre_msg_v2,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index d05c37c..f8edb79 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -318,8 +318,6 @@
 
 	strcpy(pinger_thread.t_name, "ll_ping");
 
-	/* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
-	 * just drop the VM and FILES in cfs_daemonize_ctxt() right away. */
 	rc = PTR_ERR(kthread_run(ptlrpc_pinger_main, &pinger_thread,
 				 "%s", pinger_thread.t_name));
 	if (IS_ERR_VALUE(rc)) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
index 5268887..ae99180 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
@@ -51,7 +51,7 @@
 extern struct mutex pinger_mutex;
 extern struct mutex ptlrpcd_mutex;
 
-__init int ptlrpc_init(void)
+static int __init ptlrpc_init(void)
 {
 	int rc, cleanup_phase = 0;
 
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index e591cff..17cc81d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -739,7 +739,7 @@
 
 	size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
 	ptlrpcds = kzalloc(size, GFP_NOFS);
-	if (ptlrpcds == NULL) {
+	if (!ptlrpcds) {
 		rc = -ENOMEM;
 		goto out;
 	}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index 69d73c4..2ee3e8b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -415,7 +415,7 @@
 
 	for (i = 0; i < npools; i++) {
 		pools[i] = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
-		if (pools[i] == NULL)
+		if (!pools[i])
 			goto out_pools;
 
 		for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
index 31da43e..e7f2f33 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -564,7 +564,7 @@
 		return NULL;
 
 	conf = kzalloc(sizeof(*conf), GFP_NOFS);
-	if (conf == NULL)
+	if (!conf)
 		return NULL;
 
 	strcpy(conf->sc_fsname, fsname);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 53ce0d1..a243db6 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -444,7 +444,7 @@
 	LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
 
 	plsec = kzalloc(sizeof(*plsec), GFP_NOFS);
-	if (plsec == NULL)
+	if (!plsec)
 		return NULL;
 
 	/*
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 9117f1c..003344c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -43,7 +43,7 @@
 #include "ptlrpc_internal.h"
 
 /* The following are visible and mutable through /sys/module/ptlrpc */
-int test_req_buffer_pressure = 0;
+int test_req_buffer_pressure;
 module_param(test_req_buffer_pressure, int, 0444);
 MODULE_PARM_DESC(test_req_buffer_pressure, "set non-zero to put pressure on request buffer pools");
 module_param(at_min, int, 0644);
@@ -69,7 +69,7 @@
 /** Used to protect the \e ptlrpc_all_services list */
 struct mutex ptlrpc_all_services_mutex;
 
-struct ptlrpc_request_buffer_desc *
+static struct ptlrpc_request_buffer_desc *
 ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
 {
 	struct ptlrpc_service *svc = svcpt->scp_service;
@@ -101,7 +101,7 @@
 	return rqbd;
 }
 
-void
+static void
 ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
 {
 	struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
@@ -118,7 +118,7 @@
 	kfree(rqbd);
 }
 
-int
+static int
 ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
 {
 	struct ptlrpc_service *svc = svcpt->scp_service;
@@ -732,7 +732,7 @@
 
 	service = kzalloc(offsetof(struct ptlrpc_service, srv_parts[ncpts]),
 			  GFP_NOFS);
-	if (service == NULL) {
+	if (!service) {
 		kfree(cpts);
 		return ERR_PTR(-ENOMEM);
 	}
@@ -2298,7 +2298,7 @@
 	}
 
 	env = kzalloc(sizeof(*env), GFP_NOFS);
-	if (env == NULL) {
+	if (!env) {
 		rc = -ENOMEM;
 		goto out_srv_fini;
 	}
@@ -2826,9 +2826,7 @@
 	ptlrpc_stop_hr_threads();
 
 	cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
-		if (hrp->hrp_thrs != NULL) {
-			kfree(hrp->hrp_thrs);
-		}
+		kfree(hrp->hrp_thrs);
 	}
 
 	cfs_percpt_free(ptlrpc_hr.hr_partitions);
@@ -3054,7 +3052,7 @@
  * Right now, it just checks to make sure that requests aren't languishing
  * in the queue.  We'll use this health check to govern whether a node needs
  * to be shot, so it's intentionally non-aggressive. */
-int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
+static int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
 {
 	struct ptlrpc_request *request = NULL;
 	struct timeval right_now;
diff --git a/drivers/staging/lustre/sysfs-fs-lustre b/drivers/staging/lustre/sysfs-fs-lustre
index 1e302e8..873e2cf 100644
--- a/drivers/staging/lustre/sysfs-fs-lustre
+++ b/drivers/staging/lustre/sysfs-fs-lustre
@@ -40,6 +40,109 @@
 			       e.g. dd.1253
 		nodelocal - use jobid_name value from above.
 
+What:		/sys/fs/lustre/timeout
+Date:		June 2015
+Contact:	"Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+		Controls "lustre timeout" variable, also known as obd_timeout
+		in some old manual. In the past obd_timeout was of paramount
+		importance as the timeout value used everywhere and where
+		other timeouts were derived from. These days it's much less
+		important as network timeouts are mostly determined by
+		AT (adaptive timeouts).
+		Unit: seconds, default: 100
+
+What:		/sys/fs/lustre/max_dirty_mb
+Date:		June 2015
+Contact:	"Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+		Controls total number of dirty cache (in megabytes) allowed
+		across all mounted lustre filesystems.
+		Since writeout of dirty pages in Lustre is somewhat expensive,
+		when you allow to many dirty pages, this might lead to
+		performance degradations as kernel tries to desperately
+		find some pages to free/writeout.
+		Default 1/2 RAM. Min value 4, max value 9/10 of RAM.
+
+What:		/sys/fs/lustre/debug_peer_on_timeout
+Date:		June 2015
+Contact:	"Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+		Control if lnet debug information should be printed when
+		an RPC timeout occurs.
+		0 disabled (default)
+		1 enabled
+
+What:		/sys/fs/lustre/dump_on_timeout
+Date:		June 2015
+Contact:	"Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+		Controls if Lustre debug log should be dumped when an RPC
+		timeout occurs. This is useful if yout debug buffer typically
+		rolls over by the time you notice RPC timeouts.
+
+What:		/sys/fs/lustre/dump_on_eviction
+Date:		June 2015
+Contact:	"Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+		Controls if Lustre debug log should be dumped when an this
+		client is evicted from one of the servers.
+		This is useful if yout debug buffer typically rolls over
+		 by the time you notice the eviction event.
+
+What:		/sys/fs/lustre/at_min
+Date:		July 2015
+Contact:	"Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+		Controls minimum adaptive timeout in seconds. If you encounter
+		a case where clients timeout due to server-reported processing
+		time being too short, you might consider increasing this value.
+		One common case of this if the underlying network has
+		unpredictable long delays.
+		Default: 0
+
+What:		/sys/fs/lustre/at_max
+Date:		July 2015
+Contact:	"Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+		Controls maximum adaptive timeout in seconds. If at_max timeout
+		is reached for an RPC, the RPC will time out.
+		Some genuinuely slow network hardware might warrant increasing
+		this value.
+		Setting this value to 0 disables Adaptive Timeouts
+		functionality and old-style obd_timeout value is then used.
+		Default: 600
+
+What:		/sys/fs/lustre/at_extra
+Date:		July 2015
+Contact:	"Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+		Controls how much extra time to request for unfinished requests
+		in processing in seconds. Normally a server-side parameter, it
+		is also used on the client for responses to various LDLM ASTs
+		that are handled with a special server thread on the client.
+		This is a way for the servers to ask the clients not to time
+		out the request that reached current servicing time estimate
+		yet and give it some more time.
+		Default: 30
+
+What:		/sys/fs/lustre/at_early_margin
+Date:		July 2015
+Contact:	"Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+		Controls when to send the early reply for requests that are
+		about to timeout as an offset to the estimated service time in
+		seconds..
+		Default: 5
+
+What:		/sys/fs/lustre/at_history
+Date:		July 2015
+Contact:	"Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+		Controls for how many seconds to remember slowest events
+		encountered by adaptive timeouts code.
+		Default: 600
+
 What:		/sys/fs/lustre/llite/<fsname>-<uuid>/blocksize
 Date:		May 2015
 Contact:	"Oleg Drokin" <oleg.drokin@intel.com>
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index dc79844..465796a 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -327,9 +327,6 @@
  * time
  */
 
-/* So send_pulse can quickly convert microseconds to clocks */
-static unsigned long conv_us_to_clocks;
-
 static int init_timing_params(unsigned int new_duty_cycle,
 		unsigned int new_freq)
 {
@@ -344,7 +341,6 @@
 	/* How many clocks in a microsecond?, avoiding long long divide */
 	work = loops_per_sec;
 	work *= 4295;  /* 4295 = 2^32 / 1e6 */
-	conv_us_to_clocks = work >> 32;
 
 	/*
 	 * Carrier period in clocks, approach good up to 32GHz clock,
@@ -357,10 +353,9 @@
 	pulse_width = period * duty_cycle / 100;
 	space_width = period - pulse_width;
 	dprintk("in init_timing_params, freq=%d, duty_cycle=%d, "
-		"clk/jiffy=%ld, pulse=%ld, space=%ld, "
-		"conv_us_to_clocks=%ld\n",
+		"clk/jiffy=%ld, pulse=%ld, space=%ld\n",
 		freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy),
-		pulse_width, space_width, conv_us_to_clocks);
+		pulse_width, space_width);
 	return 0;
 }
 #else /* ! USE_RDTSC */
@@ -431,63 +426,14 @@
 	return ret;
 }
 
-#ifdef USE_RDTSC
-/* Version that uses Pentium rdtsc instruction to measure clocks */
-
-/*
- * This version does sub-microsecond timing using rdtsc instruction,
- * and does away with the fudged LIRC_SERIAL_TRANSMITTER_LATENCY
- * Implicitly i586 architecture...  - Steve
- */
-
-static long send_pulse_homebrew_softcarrier(unsigned long length)
-{
-	int flag;
-	unsigned long target, start, now;
-
-	/* Get going quick as we can */
-	rdtscl(start);
-	on();
-	/* Convert length from microseconds to clocks */
-	length *= conv_us_to_clocks;
-	/* And loop till time is up - flipping at right intervals */
-	now = start;
-	target = pulse_width;
-	flag = 1;
-	/*
-	 * FIXME: This looks like a hard busy wait, without even an occasional,
-	 * polite, cpu_relax() call.  There's got to be a better way?
-	 *
-	 * The i2c code has the result of a lot of bit-banging work, I wonder if
-	 * there's something there which could be helpful here.
-	 */
-	while ((now - start) < length) {
-		/* Delay till flip time */
-		do {
-			rdtscl(now);
-		} while ((now - start) < target);
-
-		/* flip */
-		if (flag) {
-			rdtscl(now);
-			off();
-			target += space_width;
-		} else {
-			rdtscl(now); on();
-			target += pulse_width;
-		}
-		flag = !flag;
-	}
-	rdtscl(now);
-	return ((now - start) - length) / conv_us_to_clocks;
-}
-#else /* ! USE_RDTSC */
 /* Version using udelay() */
 
 /*
  * here we use fixed point arithmetic, with 8
  * fractional bits.  that gets us within 0.1% or so of the right average
  * frequency, albeit with some jitter in pulse length - Steve
+ *
+ * This should use ndelay instead.
  */
 
 /* To match 8 fractional bits used for pulse/space length */
@@ -520,7 +466,6 @@
 	}
 	return (actual-length) >> 8;
 }
-#endif /* USE_RDTSC */
 
 static long send_pulse_homebrew(unsigned long length)
 {
diff --git a/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt b/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt
new file mode 100644
index 0000000..380c137
--- /dev/null
+++ b/drivers/staging/most/Documentation/ABI/sysfs-class-most.txt
@@ -0,0 +1,181 @@
+What:		/sys/class/most/mostcore/aims
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		List of AIMs that have been loaded.
+Users:
+
+What:		/sys/class/most/mostcore/aims/<aim>/add_link
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		This is used to establish a connection of a channel and the
+		current AIM.
+Users:
+
+What:		/sys/class/most/mostcore/aims/<aim>/remove_link
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		This is used to remove a connected channel from the
+		current AIM.
+Users:
+
+What:		/sys/class/most/mostcore/devices
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		List of attached MOST interfaces.
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/description
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		Provides information about the interface type and the physical
+		location of the device. Hardware attached via USB, for instance,
+		might return <usb_device 1-1.1:1.0>
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/interface
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		Indicates the type of peripherial interface the current device
+		uses.
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		For every channel of the device a directory is created, whose
+		name is dictated by the HDM. This enables an application to
+		collect information about the channel's capabilities and
+		configure it.
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/available_datatypes
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		Indicates the data types the current channel can transport.
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/available_directions
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		Indicates the directions the current channel is capable of.
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/number_of_packet_buffers
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		Indicates the number of packet buffers the current channel can
+		handle.
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/number_of_stream_buffers
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		Indicates the number of streaming buffers the current channel can
+		handle.
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/size_of_packet_buffer
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		Indicates the size of a packet buffer the current channel can
+		handle.
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/size_of_stream_buffer
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		Indicates the size of a streaming buffer the current channel can
+		handle.
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/set_number_of_buffers
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		This is to configure the number of buffers of the current channel.
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/set_buffer_size
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		This is to configure the size of a buffer of the current channel.
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/set_direction
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		This is to configure the direction of the current channel.
+		The following strings will be accepted:
+			'dir_tx',
+			'dir_rx'
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/set_datatype
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		This is to configure the data type of the current channel.
+		The following strings will be accepted:
+			'control',
+			'async',
+			'sync',
+			'isoc_avp'
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/set_subbuffer_size
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		This is to configure the subbuffer size of the current channel.
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/set_packets_per_xact
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		This is to configure the number of packets per transaction of
+		the current channel. This is only needed network interface
+		controller is attached via USB.
+Users:
+
+What:		/sys/class/most/mostcore/devices/<mdev>/<channel>/channel_starving
+Date:		June 2015
+KernelVersion:	4.3
+Contact:	Christian Gromm <christian.gromm@microchip.com>
+Description:
+		Indicates whether current channel ran out of buffers.
+Users:
diff --git a/drivers/staging/most/Documentation/driver_usage.txt b/drivers/staging/most/Documentation/driver_usage.txt
new file mode 100644
index 0000000..a4dc0c3
--- /dev/null
+++ b/drivers/staging/most/Documentation/driver_usage.txt
@@ -0,0 +1,180 @@
+
+		Section 1 Overview
+
+The Media Oriented Systems Transport (MOST) driver gives Linux applications
+access a MOST network: The Automotive Information Backbone and the de-facto
+standard for high-bandwidth automotive multimedia networking.
+
+MOST defines the protocol, hardware and software layers necessary to allow
+for the efficient and low-cost transport of control, real-time and packet
+data using a single medium (physical layer). Media currently in use are
+fiber optics, unshielded twisted pair cables (UTP) and coax cables. MOST
+also supports various speed grades up to 150 Mbps.
+For more information on MOST, visit the MOST Cooperation website:
+www.mostcooperation.com.
+
+Cars continue to evolve into sophisticated consumer electronics platforms,
+increasing the demand for reliable and simple solutions to support audio,
+video and data communications. MOST can be used to connect multiple
+consumer devices via optical or electrical physical layers directly to one
+another or in a network configuration. As a synchronous network, MOST
+provides excellent Quality of Service and seamless connectivity for
+audio/video streaming. Therefore, the driver perfectly fits to the mission
+of Automotive Grade Linux to create open source software solutions for
+automotive applications.
+
+The driver consists basically of three layers. The hardware layer, the
+core layer and the application layer. The core layer consists of the core
+module only. This module handles the communication flow through all three
+layers, the configuration of the driver, the configuration interface
+representation in sysfs, and the buffer management.
+For each of the other two layers a selection of modules is provided. These
+modules can arbitrarily be combined to meet the needs of the desired
+system architecture. A module of the hardware layer is referred to as an
+HDM (hardware dependent module). Each module of this layer handles exactly
+one of the peripheral interfaces of a network interface controller (e.g.
+USB, MediaLB, I2C). A module of the application layer is referred to as an
+AIM (application interfacing module). The modules of this layer give access
+to MOST via one the following ways: character devices, ALSA, Networking or
+V4L2.
+
+To physically access MOST, an Intelligent Network Interface Controller
+(INIC) is needed. For more information on available controllers visit:
+www.microchip.com
+
+
+
+		Section 1.1 Hardware Layer
+
+The hardware layer contains so called hardware dependent modules (HDM). For each
+peripheral interface the hardware supports the driver has a suitable module
+that handles the interface.
+
+The HDMs encapsulate the peripheral interface specific knowledge of the driver
+and provides an easy way of extending the number of supported interfaces.
+Currently the following HDMs are available:
+
+	1) MediaLB (DIM2)
+	   Host wants to communicate with hardware via MediaLB.
+
+	2) I2C
+	   Host wants to communicate with the hardware via I2C.
+
+	3) USB
+	   Host wants to communicate with the hardware via USB.
+
+
+		Section 1.2 Core Layer
+
+The core layer contains the mostcore module only, which processes the driver
+configuration via sysfs, buffer management and data forwarding.
+
+
+
+		Section 1.2 Application Layer
+
+The application layer contains so called application interfacing modules (AIM).
+Depending on how the driver should interface to the application, one or more
+suitable modules can be selected.
+
+The AIMs encapsulate the application interface specific knowledge of the driver
+and provides access to user space or other kernel subsystems.
+Currently the following AIMs are available
+
+	1) Character Device
+	   Applications can access the driver by means of character devices.
+
+	2) Networking
+	   Standard networking applications (e.g. iperf) can by used to access
+	   the driver via the networking subsystem.
+
+	3) Video4Linux (v4l2)
+	   Standard video applications (e.g. VLC) can by used to access the
+	   driver via the V4L subsystem.
+
+	4) Advanced Linux Sound Architecture (ALSA)
+	   Standard sound applications (e.g. aplay, arecord, audacity) can by
+	   used to access the driver via the ALSA subsystem.
+
+
+
+		Section 2 Configuration
+
+See ABI/sysfs-class-most.txt
+
+
+
+		Section 3 USB Padding
+
+When transceiving synchronous or isochronous data, the number of packets per USB
+transaction and the sub-buffer size need to be configured. These values
+are needed for the driver to process buffer padding, as expected by hardware,
+which is for performance optimization purposes of the USB transmission.
+
+When transmitting synchronous data the allocated channel width needs to be
+written to 'set_subbuffer_size'. Additionally, the number of MOST frames that
+should travel to the host within one USB transaction need to be written to
+'packets_per_xact'.
+
+Internally the synchronous threshold is calculated as follows:
+
+	frame_size = set_subbuffer_size * packets_per_xact
+
+In case 'packets_per_xact' is set to 0xFF the maximum number of packets,
+allocated within one MOST frame, is calculated that fit into _one_ 512 byte
+USB full packet.
+
+	frame_size = floor(MTU_USB / bandwidth_sync) * bandwidth_sync
+
+This frame_size is the number of synchronous data within an USB transaction,
+which renders MTU_USB - frame_size bytes for padding.
+
+When transmitting isochronous AVP data the desired packet size needs to be
+written to 'set_subbuffer_size' and hardware will always expect two isochronous
+packets within one USB transaction. This renders
+
+	MTU_USB - (2 * set_subbuffer_size)
+
+bytes for padding.
+
+Note that at least 2 times set_subbuffer_size bytes for isochronous data or
+set_subbuffer_size times packts_per_xact bytes for synchronous data need to be
+put in the transmission buffer and passed to the driver.
+
+Since HDMs are allowed to change a chosen configuration to best fit its
+constraints, it is recommended to always double check the configuration and read
+back the previously written files.
+
+
+
+		Section 4 Routing Channels
+
+To connect a channel that has been configured as outlined above to an AIM and
+make it accessible to user space applications, the attribute file 'add_link' is
+used. To actually bind a channel to the AIM a string needs to be written to the
+file that complies with the following syntax:
+
+	"most_device:channel_name:link_name[.param]"
+
+The example above links the channel "channel_name" of the device "most_device"
+to the AIM. In case the AIM interfaces the VFS this would also create a device
+node "link_name" in the /dev directory. The parameter "param" is an AIM dependent
+string, which can be omitted in case the used AIM does not make any use of it.
+
+Cdev AIM example:
+        $ echo "mdev0:ep_81:my_rx_channel" >add_link
+        $ echo "mdev0:ep_81" >add_link
+
+
+Sound/ALSA AIM example:
+
+The sound/ALSA AIM needs an additional parameter to determine the audio resolution
+that is going to be used. The following strings can be used:
+
+	- "1x8" (Mono)
+	- "2x16" (16-bit stereo)
+	- "2x24" (24-bit stereo)
+	- "2x32" (32-bit stereo)
+
+        $ echo "mdev0:ep_81:audio_rx.2x16" >add_link
+        $ echo "mdev0:ep_81" >add_link
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
new file mode 100644
index 0000000..d50de03
--- /dev/null
+++ b/drivers/staging/most/Kconfig
@@ -0,0 +1,30 @@
+menuconfig MOST
+        tristate "MOST driver"
+        select MOSTCORE
+        default n
+        ---help---
+          This option allows you to enable support for MOST Network transceivers.
+
+          If in doubt, say N here.
+
+
+
+if MOST
+
+source "drivers/staging/most/mostcore/Kconfig"
+
+source "drivers/staging/most/aim-cdev/Kconfig"
+
+source "drivers/staging/most/aim-network/Kconfig"
+
+source "drivers/staging/most/aim-sound/Kconfig"
+
+source "drivers/staging/most/aim-v4l2/Kconfig"
+
+source "drivers/staging/most/hdm-dim2/Kconfig"
+
+source "drivers/staging/most/hdm-i2c/Kconfig"
+
+source "drivers/staging/most/hdm-usb/Kconfig"
+
+endif
diff --git a/drivers/staging/most/Makefile b/drivers/staging/most/Makefile
new file mode 100644
index 0000000..9ee981c
--- /dev/null
+++ b/drivers/staging/most/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_MOSTCORE)	+= mostcore/
+obj-$(CONFIG_AIM_CDEV)	+= aim-cdev/
+obj-$(CONFIG_AIM_NETWORK)	+= aim-network/
+obj-$(CONFIG_AIM_SOUND)	+= aim-sound/
+obj-$(CONFIG_AIM_V4L2)	+= aim-v4l2/
+obj-$(CONFIG_HDM_DIM2)	+= hdm-dim2/
+obj-$(CONFIG_HDM_I2C)	+= hdm-i2c/
+obj-$(CONFIG_HDM_USB)	+= hdm-usb/
diff --git a/drivers/staging/most/TODO b/drivers/staging/most/TODO
new file mode 100644
index 0000000..4fa11a9
--- /dev/null
+++ b/drivers/staging/most/TODO
@@ -0,0 +1,8 @@
+* Get through code review with Greg Kroah-Hartman
+
+Contact:
+To:
+Christian Gromm <christian.gromm@microchip.com>
+Cc:
+Michael Fabry <Michael.Fabry@microchip.com>
+Christian Gromm <chris@engineersdelight.de>
diff --git a/drivers/staging/most/aim-cdev/Kconfig b/drivers/staging/most/aim-cdev/Kconfig
new file mode 100644
index 0000000..3c59f1b
--- /dev/null
+++ b/drivers/staging/most/aim-cdev/Kconfig
@@ -0,0 +1,12 @@
+#
+# MOST Cdev configuration
+#
+
+config AIM_CDEV
+	tristate "Cdev AIM"
+
+	---help---
+	  Say Y here if you want to commumicate via character devices.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called aim_cdev.
\ No newline at end of file
diff --git a/drivers/staging/most/aim-cdev/Makefile b/drivers/staging/most/aim-cdev/Makefile
new file mode 100644
index 0000000..0bcc6c6
--- /dev/null
+++ b/drivers/staging/most/aim-cdev/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_AIM_CDEV) += aim_cdev.o
+
+aim_cdev-objs := cdev.o
+ccflags-y += -Idrivers/staging/most/mostcore/
\ No newline at end of file
diff --git a/drivers/staging/most/aim-cdev/cdev.c b/drivers/staging/most/aim-cdev/cdev.c
new file mode 100644
index 0000000..0a13d8d
--- /dev/null
+++ b/drivers/staging/most/aim-cdev/cdev.c
@@ -0,0 +1,528 @@
+/*
+ * cdev.c - Application interfacing module for character devices
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/kfifo.h>
+#include <linux/uaccess.h>
+#include <linux/idr.h>
+#include "mostcore.h"
+
+static dev_t aim_devno;
+static struct class *aim_class;
+static struct ida minor_id;
+static unsigned int major;
+
+struct aim_channel {
+	wait_queue_head_t wq;
+	struct cdev cdev;
+	struct device *dev;
+	struct mutex io_mutex;
+	struct most_interface *iface;
+	struct most_channel_config *cfg;
+	unsigned int channel_id;
+	dev_t devno;
+	bool keep_mbo;
+	unsigned int mbo_offs;
+	struct mbo *stacked_mbo;
+	DECLARE_KFIFO_PTR(fifo, typeof(struct mbo *));
+	atomic_t access_ref;
+	struct list_head list;
+};
+#define to_channel(d) container_of(d, struct aim_channel, cdev)
+static struct list_head channel_list;
+static spinlock_t ch_list_lock;
+
+
+static struct aim_channel *get_channel(struct most_interface *iface, int id)
+{
+	struct aim_channel *channel, *tmp;
+	unsigned long flags;
+	int found_channel = 0;
+
+	spin_lock_irqsave(&ch_list_lock, flags);
+	list_for_each_entry_safe(channel, tmp, &channel_list, list) {
+		if ((channel->iface == iface) && (channel->channel_id == id)) {
+			found_channel = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&ch_list_lock, flags);
+	if (!found_channel)
+		return NULL;
+	return channel;
+}
+
+/**
+ * aim_open - implements the syscall to open the device
+ * @inode: inode pointer
+ * @filp: file pointer
+ *
+ * This stores the channel pointer in the private data field of
+ * the file structure and activates the channel within the core.
+ */
+static int aim_open(struct inode *inode, struct file *filp)
+{
+	struct aim_channel *channel;
+	int ret;
+
+	channel = to_channel(inode->i_cdev);
+	filp->private_data = channel;
+
+	if (((channel->cfg->direction == MOST_CH_RX) &&
+	     ((filp->f_flags & O_ACCMODE) != O_RDONLY))
+	    || ((channel->cfg->direction == MOST_CH_TX) &&
+		((filp->f_flags & O_ACCMODE) != O_WRONLY))) {
+		pr_info("WARN: Access flags mismatch\n");
+		return -EACCES;
+	}
+	if (!atomic_inc_and_test(&channel->access_ref)) {
+		pr_info("WARN: Device is busy\n");
+		atomic_dec(&channel->access_ref);
+		return -EBUSY;
+	}
+
+	ret = most_start_channel(channel->iface, channel->channel_id);
+	if (ret)
+		atomic_dec(&channel->access_ref);
+	return ret;
+}
+
+/**
+ * aim_close - implements the syscall to close the device
+ * @inode: inode pointer
+ * @filp: file pointer
+ *
+ * This stops the channel within the core.
+ */
+static int aim_close(struct inode *inode, struct file *filp)
+{
+	int ret;
+	struct mbo *mbo;
+	struct aim_channel *channel = to_channel(inode->i_cdev);
+
+	mutex_lock(&channel->io_mutex);
+	if (!channel->dev) {
+		mutex_unlock(&channel->io_mutex);
+		atomic_dec(&channel->access_ref);
+		device_destroy(aim_class, channel->devno);
+		cdev_del(&channel->cdev);
+		kfifo_free(&channel->fifo);
+		list_del(&channel->list);
+		ida_simple_remove(&minor_id, MINOR(channel->devno));
+		wake_up_interruptible(&channel->wq);
+		kfree(channel);
+		return 0;
+	}
+	mutex_unlock(&channel->io_mutex);
+
+	while (0 != kfifo_out((struct kfifo *)&channel->fifo, &mbo, 1))
+		most_put_mbo(mbo);
+	if (channel->keep_mbo == true)
+		most_put_mbo(channel->stacked_mbo);
+	ret = most_stop_channel(channel->iface, channel->channel_id);
+	atomic_dec(&channel->access_ref);
+	wake_up_interruptible(&channel->wq);
+	return ret;
+}
+
+/**
+ * aim_write - implements the syscall to write to the device
+ * @filp: file pointer
+ * @buf: pointer to user buffer
+ * @count: number of bytes to write
+ * @offset: offset from where to start writing
+ */
+static ssize_t aim_write(struct file *filp, const char __user *buf,
+			 size_t count, loff_t *offset)
+{
+	int ret, err;
+	size_t actual_len = 0;
+	size_t max_len = 0;
+	ssize_t retval;
+	struct mbo *mbo;
+	struct aim_channel *channel = filp->private_data;
+
+	mutex_lock(&channel->io_mutex);
+	if (unlikely(!channel->dev)) {
+		mutex_unlock(&channel->io_mutex);
+		return -EPIPE;
+	}
+	mutex_unlock(&channel->io_mutex);
+
+	mbo = most_get_mbo(channel->iface, channel->channel_id);
+
+	if (!mbo && channel->dev) {
+		if ((filp->f_flags & O_NONBLOCK))
+			return -EAGAIN;
+		if (wait_event_interruptible(
+			    channel->wq,
+			    (mbo = most_get_mbo(channel->iface,
+						channel->channel_id)) ||
+			    (channel->dev == NULL)))
+			return -ERESTARTSYS;
+	}
+
+	mutex_lock(&channel->io_mutex);
+	if (unlikely(!channel->dev)) {
+		mutex_unlock(&channel->io_mutex);
+		err = -EPIPE;
+		goto error;
+	}
+	mutex_unlock(&channel->io_mutex);
+
+	max_len = channel->cfg->buffer_size;
+	actual_len = min(count, max_len);
+	mbo->buffer_length = actual_len;
+
+	retval = copy_from_user(mbo->virt_address, buf, mbo->buffer_length);
+	if (retval) {
+		err = -EIO;
+		goto error;
+	}
+
+	ret = most_submit_mbo(mbo);
+	if (ret) {
+		pr_info("submitting MBO to core failed\n");
+		err = ret;
+		goto error;
+	}
+	return actual_len - retval;
+error:
+	if (mbo)
+		most_put_mbo(mbo);
+	return err;
+}
+
+/**
+ * aim_read - implements the syscall to read from the device
+ * @filp: file pointer
+ * @buf: pointer to user buffer
+ * @count: number of bytes to read
+ * @offset: offset from where to start reading
+ */
+static ssize_t
+aim_read(struct file *filp, char __user *buf, size_t count, loff_t *offset)
+{
+	ssize_t retval;
+	size_t not_copied, proc_len;
+	struct mbo *mbo;
+	struct aim_channel *channel = filp->private_data;
+
+	if (channel->keep_mbo == true) {
+		mbo = channel->stacked_mbo;
+		channel->keep_mbo = false;
+		goto start_copy;
+	}
+	while ((0 == kfifo_out(&channel->fifo, &mbo, 1))
+	       && (channel->dev != NULL)) {
+		if (filp->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+		if (wait_event_interruptible(channel->wq,
+					     (!kfifo_is_empty(&channel->fifo) ||
+					      (channel->dev == NULL))))
+			return -ERESTARTSYS;
+	}
+
+start_copy:
+	/* make sure we don't submit to gone devices */
+	mutex_lock(&channel->io_mutex);
+	if (unlikely(!channel->dev)) {
+		mutex_unlock(&channel->io_mutex);
+		return -EIO;
+	}
+
+	if (count < mbo->processed_length)
+		channel->keep_mbo = true;
+
+	proc_len = min((int)count,
+		       (int)(mbo->processed_length - channel->mbo_offs));
+
+	not_copied = copy_to_user(buf,
+				  mbo->virt_address + channel->mbo_offs,
+				  proc_len);
+
+	retval = not_copied ? proc_len - not_copied : proc_len;
+
+	if (channel->keep_mbo == true) {
+		channel->mbo_offs = retval;
+		channel->stacked_mbo = mbo;
+	} else {
+		most_put_mbo(mbo);
+		channel->mbo_offs = 0;
+	}
+	mutex_unlock(&channel->io_mutex);
+	return retval;
+}
+
+/**
+ * Initialization of struct file_operations
+ */
+static const struct file_operations channel_fops = {
+	.owner = THIS_MODULE,
+	.read = aim_read,
+	.write = aim_write,
+	.open = aim_open,
+	.release = aim_close,
+};
+
+/**
+ * aim_disconnect_channel - disconnect a channel
+ * @iface: pointer to interface instance
+ * @channel_id: channel index
+ *
+ * This frees allocated memory and removes the cdev that represents this
+ * channel in user space.
+ */
+static int aim_disconnect_channel(struct most_interface *iface, int channel_id)
+{
+	struct aim_channel *channel;
+	unsigned long flags;
+
+	if (!iface) {
+		pr_info("Bad interface pointer\n");
+		return -EINVAL;
+	}
+
+	channel = get_channel(iface, channel_id);
+	if (channel == NULL)
+		return -ENXIO;
+
+	mutex_lock(&channel->io_mutex);
+	channel->dev = NULL;
+	mutex_unlock(&channel->io_mutex);
+
+	if (atomic_read(&channel->access_ref)) {
+		device_destroy(aim_class, channel->devno);
+		cdev_del(&channel->cdev);
+		kfifo_free(&channel->fifo);
+		ida_simple_remove(&minor_id, MINOR(channel->devno));
+		spin_lock_irqsave(&ch_list_lock, flags);
+		list_del(&channel->list);
+		spin_unlock_irqrestore(&ch_list_lock, flags);
+		kfree(channel);
+	} else {
+		wake_up_interruptible(&channel->wq);
+	}
+	return 0;
+}
+
+/**
+ * aim_rx_completion - completion handler for rx channels
+ * @mbo: pointer to buffer object that has completed
+ *
+ * This searches for the channel linked to this MBO and stores it in the local
+ * fifo buffer.
+ */
+static int aim_rx_completion(struct mbo *mbo)
+{
+	struct aim_channel *channel;
+
+	if (!mbo)
+		return -EINVAL;
+
+	channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
+	if (channel == NULL)
+		return -ENXIO;
+
+	kfifo_in(&channel->fifo, &mbo, 1);
+#ifdef DEBUG_MESG
+	if (kfifo_is_full(&channel->fifo))
+		pr_info("WARN: Fifo is full\n");
+#endif
+	wake_up_interruptible(&channel->wq);
+	return 0;
+}
+
+/**
+ * aim_tx_completion - completion handler for tx channels
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ *
+ * This wakes sleeping processes in the wait-queue.
+ */
+static int aim_tx_completion(struct most_interface *iface, int channel_id)
+{
+	struct aim_channel *channel;
+
+	if (!iface) {
+		pr_info("Bad interface pointer\n");
+		return -EINVAL;
+	}
+	if ((channel_id < 0) || (channel_id >= iface->num_channels)) {
+		pr_info("Channel ID out of range\n");
+		return -EINVAL;
+	}
+
+	channel = get_channel(iface, channel_id);
+	if (channel == NULL)
+		return -ENXIO;
+	wake_up_interruptible(&channel->wq);
+	return 0;
+}
+
+static struct most_aim cdev_aim;
+
+/**
+ * aim_probe - probe function of the driver module
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ * @cfg: pointer to actual channel configuration
+ * @parent: pointer to kobject (needed for sysfs hook-up)
+ * @name: name of the device to be created
+ *
+ * This allocates achannel object and creates the device node in /dev
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int aim_probe(struct most_interface *iface, int channel_id,
+		     struct most_channel_config *cfg,
+		     struct kobject *parent, char *name)
+{
+	struct aim_channel *channel;
+	unsigned long cl_flags;
+	int retval;
+	int current_minor;
+
+	if ((!iface) || (!cfg) || (!parent) || (!name)) {
+		pr_info("Probing AIM with bad arguments");
+		return -EINVAL;
+	}
+	channel = get_channel(iface, channel_id);
+	if (channel)
+		return -EEXIST;
+
+	current_minor = ida_simple_get(&minor_id, 0, 0, GFP_KERNEL);
+	if (current_minor < 0)
+		return current_minor;
+
+	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+	if (!channel) {
+		pr_info("failed to alloc channel object\n");
+		retval = -ENOMEM;
+		goto error_alloc_channel;
+	}
+
+	channel->devno = MKDEV(major, current_minor);
+	cdev_init(&channel->cdev, &channel_fops);
+	channel->cdev.owner = THIS_MODULE;
+	cdev_add(&channel->cdev, channel->devno, 1);
+	channel->iface = iface;
+	channel->cfg = cfg;
+	channel->channel_id = channel_id;
+	channel->mbo_offs = 0;
+	atomic_set(&channel->access_ref, -1);
+	INIT_KFIFO(channel->fifo);
+	retval = kfifo_alloc(&channel->fifo, cfg->num_buffers, GFP_KERNEL);
+	if (retval) {
+		pr_info("failed to alloc channel kfifo");
+		goto error_alloc_kfifo;
+	}
+	init_waitqueue_head(&channel->wq);
+	mutex_init(&channel->io_mutex);
+	spin_lock_irqsave(&ch_list_lock, cl_flags);
+	list_add_tail(&channel->list, &channel_list);
+	spin_unlock_irqrestore(&ch_list_lock, cl_flags);
+	channel->dev = device_create(aim_class,
+				     NULL,
+				     channel->devno,
+				     NULL,
+				     "%s", name);
+
+	retval = IS_ERR(channel->dev);
+	if (retval) {
+		pr_info("failed to create new device node %s\n", name);
+		goto error_create_device;
+	}
+	kobject_uevent(&channel->dev->kobj, KOBJ_ADD);
+	return 0;
+
+error_create_device:
+	kfifo_free(&channel->fifo);
+	list_del(&channel->list);
+error_alloc_kfifo:
+	cdev_del(&channel->cdev);
+	kfree(channel);
+error_alloc_channel:
+	ida_simple_remove(&minor_id, current_minor);
+	return retval;
+}
+
+static struct most_aim cdev_aim = {
+	.name = "cdev",
+	.probe_channel = aim_probe,
+	.disconnect_channel = aim_disconnect_channel,
+	.rx_completion = aim_rx_completion,
+	.tx_completion = aim_tx_completion,
+};
+
+static int __init mod_init(void)
+{
+	pr_info("init()\n");
+
+	INIT_LIST_HEAD(&channel_list);
+	spin_lock_init(&ch_list_lock);
+	ida_init(&minor_id);
+
+	if (alloc_chrdev_region(&aim_devno, 0, 50, "cdev") < 0)
+		return -EIO;
+	major = MAJOR(aim_devno);
+
+	aim_class = class_create(THIS_MODULE, "most_cdev_aim");
+	if (IS_ERR(aim_class)) {
+		pr_err("no udev support\n");
+		goto free_cdev;
+	}
+
+	if (most_register_aim(&cdev_aim))
+		goto dest_class;
+	return 0;
+
+dest_class:
+	class_destroy(aim_class);
+free_cdev:
+	unregister_chrdev_region(aim_devno, 1);
+	return -EIO;
+}
+
+static void __exit mod_exit(void)
+{
+	struct aim_channel *channel, *tmp;
+
+	pr_info("exit module\n");
+
+	most_deregister_aim(&cdev_aim);
+
+	list_for_each_entry_safe(channel, tmp, &channel_list, list) {
+		device_destroy(aim_class, channel->devno);
+		cdev_del(&channel->cdev);
+		kfifo_free(&channel->fifo);
+		list_del(&channel->list);
+		ida_simple_remove(&minor_id, MINOR(channel->devno));
+		kfree(channel);
+	}
+	class_destroy(aim_class);
+	unregister_chrdev_region(aim_devno, 1);
+	ida_destroy(&minor_id);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("character device AIM for mostcore");
diff --git a/drivers/staging/most/aim-network/Kconfig b/drivers/staging/most/aim-network/Kconfig
new file mode 100644
index 0000000..4c66b24
--- /dev/null
+++ b/drivers/staging/most/aim-network/Kconfig
@@ -0,0 +1,13 @@
+#
+# MOST Networking configuration
+#
+
+config AIM_NETWORK
+	tristate "Networking AIM"
+	depends on NET
+
+	---help---
+	  Say Y here if you want to commumicate via a networking device.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called aim_network.
diff --git a/drivers/staging/most/aim-network/Makefile b/drivers/staging/most/aim-network/Makefile
new file mode 100644
index 0000000..840c1dd
--- /dev/null
+++ b/drivers/staging/most/aim-network/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_AIM_NETWORK) += aim_network.o
+
+aim_network-objs := networking.o
+ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/aim-network/networking.c b/drivers/staging/most/aim-network/networking.c
new file mode 100644
index 0000000..c8ab239
--- /dev/null
+++ b/drivers/staging/most/aim-network/networking.c
@@ -0,0 +1,567 @@
+/*
+ * Networking AIM - Networking Application Interface Module for MostCore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/kobject.h>
+#include "mostcore.h"
+#include "networking.h"
+
+
+#define MEP_HDR_LEN 8
+#define MDP_HDR_LEN 16
+#define MAMAC_DATA_LEN (1024 - MDP_HDR_LEN)
+
+#define PMHL 5
+
+#define PMS_TELID_UNSEGM_MAMAC	0x0A
+#define PMS_FIFONO_MDP		0x01
+#define PMS_FIFONO_MEP		0x04
+#define PMS_MSGTYPE_DATA	0x04
+#define PMS_DEF_PRIO		0
+#define MEP_DEF_RETRY		15
+
+#define PMS_FIFONO_MASK		0x07
+#define PMS_FIFONO_SHIFT	3
+#define PMS_RETRY_SHIFT		4
+#define PMS_TELID_MASK		0x0F
+#define PMS_TELID_SHIFT		4
+
+#define HB(value)		((u8)((u16)(value) >> 8))
+#define LB(value)		((u8)(value))
+
+
+
+#define EXTRACT_BIT_SET(bitset_name, value) \
+	(((value) >> bitset_name##_SHIFT) & bitset_name##_MASK)
+
+#define PMS_IS_MEP(buf, len) \
+	((len) > MEP_HDR_LEN && \
+	 EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MEP)
+
+#define PMS_IS_MAMAC(buf, len) \
+	((len) > MDP_HDR_LEN && \
+	 EXTRACT_BIT_SET(PMS_FIFONO, (buf)[3]) == PMS_FIFONO_MDP && \
+	 EXTRACT_BIT_SET(PMS_TELID, (buf)[14]) == PMS_TELID_UNSEGM_MAMAC)
+
+struct net_dev_channel {
+	bool linked;
+	int ch_id;
+};
+
+struct net_dev_context {
+	struct most_interface *iface;
+	bool channels_opened;
+	bool is_mamac;
+	unsigned char link_stat;
+	struct net_device *dev;
+	struct net_dev_channel rx;
+	struct net_dev_channel tx;
+	struct list_head list;
+};
+
+static struct list_head net_devices = LIST_HEAD_INIT(net_devices);
+static struct spinlock list_lock;
+static struct most_aim aim;
+
+
+static int skb_to_mamac(const struct sk_buff *skb, struct mbo *mbo)
+{
+	u8 *buff = mbo->virt_address;
+	const u8 broadcast[] = { 0x03, 0xFF };
+	const u8 *dest_addr = skb->data + 4;
+	const u8 *eth_type = skb->data + 12;
+	unsigned int payload_len = skb->len - ETH_HLEN;
+	unsigned int mdp_len = payload_len + MDP_HDR_LEN;
+
+	if (mbo->buffer_length < mdp_len) {
+		pr_err("drop: too small buffer! (%d for %d)\n",
+		       mbo->buffer_length, mdp_len);
+		return -EINVAL;
+	}
+
+	if (skb->len < ETH_HLEN) {
+		pr_err("drop: too small packet! (%d)\n", skb->len);
+		return -EINVAL;
+	}
+
+	if (dest_addr[0] == 0xFF && dest_addr[1] == 0xFF)
+		dest_addr = broadcast;
+
+	*buff++ = HB(mdp_len - 2);
+	*buff++ = LB(mdp_len - 2);
+
+	*buff++ = PMHL;
+	*buff++ = (PMS_FIFONO_MDP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
+	*buff++ = PMS_DEF_PRIO;
+	*buff++ = dest_addr[0];
+	*buff++ = dest_addr[1];
+	*buff++ = 0x00;
+
+	*buff++ = HB(payload_len + 6);
+	*buff++ = LB(payload_len + 6);
+
+	/* end of FPH here */
+
+	*buff++ = eth_type[0];
+	*buff++ = eth_type[1];
+	*buff++ = 0;
+	*buff++ = 0;
+
+	*buff++ = PMS_TELID_UNSEGM_MAMAC << 4 | HB(payload_len);
+	*buff++ = LB(payload_len);
+
+	memcpy(buff, skb->data + ETH_HLEN, payload_len);
+	mbo->buffer_length = mdp_len;
+	return 0;
+}
+
+static int skb_to_mep(const struct sk_buff *skb, struct mbo *mbo)
+{
+	u8 *buff = mbo->virt_address;
+	unsigned int mep_len = skb->len + MEP_HDR_LEN;
+
+	if (mbo->buffer_length < mep_len) {
+		pr_err("drop: too small buffer! (%d for %d)\n",
+		       mbo->buffer_length, mep_len);
+		return -EINVAL;
+	}
+
+	*buff++ = HB(mep_len - 2);
+	*buff++ = LB(mep_len - 2);
+
+	*buff++ = PMHL;
+	*buff++ = (PMS_FIFONO_MEP << PMS_FIFONO_SHIFT) | PMS_MSGTYPE_DATA;
+	*buff++ = (MEP_DEF_RETRY << PMS_RETRY_SHIFT) | PMS_DEF_PRIO;
+	*buff++ = 0;
+	*buff++ = 0;
+	*buff++ = 0;
+
+	memcpy(buff, skb->data, skb->len);
+	mbo->buffer_length = mep_len;
+	return 0;
+}
+
+static int most_nd_set_mac_address(struct net_device *dev, void *p)
+{
+	struct net_dev_context *nd = dev->ml_priv;
+	int err = eth_mac_addr(dev, p);
+
+	if (err)
+		return err;
+
+	BUG_ON(nd->dev != dev);
+
+	nd->is_mamac =
+		(dev->dev_addr[0] == 0 && dev->dev_addr[1] == 0 &&
+		 dev->dev_addr[2] == 0 && dev->dev_addr[3] == 0);
+
+	/*
+	 * Set default MTU for the given packet type.
+	 * It is still possible to change MTU using ip tools afterwards.
+	 */
+	dev->mtu = nd->is_mamac ? MAMAC_DATA_LEN : ETH_DATA_LEN;
+
+	return 0;
+}
+
+static int most_nd_open(struct net_device *dev)
+{
+	struct net_dev_context *nd = dev->ml_priv;
+
+	pr_info("open net device %s\n", dev->name);
+
+	BUG_ON(nd->dev != dev);
+
+	if (nd->channels_opened)
+		return -EFAULT;
+
+	BUG_ON(!nd->tx.linked || !nd->rx.linked);
+
+	if (most_start_channel(nd->iface, nd->rx.ch_id)) {
+		pr_err("most_start_channel() failed\n");
+		return -EBUSY;
+	}
+
+	if (most_start_channel(nd->iface, nd->tx.ch_id)) {
+		pr_err("most_start_channel() failed\n");
+		most_stop_channel(nd->iface, nd->rx.ch_id);
+		return -EBUSY;
+	}
+
+	nd->channels_opened = true;
+
+	if (nd->is_mamac) {
+		nd->link_stat = 1;
+		netif_wake_queue(dev);
+	} else {
+		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id);
+	}
+
+	return 0;
+}
+
+static int most_nd_stop(struct net_device *dev)
+{
+	struct net_dev_context *nd = dev->ml_priv;
+
+	pr_info("stop net device %s\n", dev->name);
+
+	BUG_ON(nd->dev != dev);
+	netif_stop_queue(dev);
+
+	if (nd->channels_opened) {
+		most_stop_channel(nd->iface, nd->rx.ch_id);
+		most_stop_channel(nd->iface, nd->tx.ch_id);
+		nd->channels_opened = false;
+	}
+
+	return 0;
+}
+
+static netdev_tx_t most_nd_start_xmit(struct sk_buff *skb,
+				      struct net_device *dev)
+{
+	struct net_dev_context *nd = dev->ml_priv;
+	struct mbo *mbo;
+	int ret;
+
+	BUG_ON(nd->dev != dev);
+
+	mbo = most_get_mbo(nd->iface, nd->tx.ch_id);
+
+	if (!mbo) {
+		netif_stop_queue(dev);
+		dev->stats.tx_fifo_errors++;
+		return NETDEV_TX_BUSY;
+	}
+
+	if (nd->is_mamac)
+		ret = skb_to_mamac(skb, mbo);
+	else
+		ret = skb_to_mep(skb, mbo);
+
+	if (ret) {
+		most_put_mbo(mbo);
+		dev->stats.tx_dropped++;
+		kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	most_submit_mbo(mbo);
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
+	kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops most_nd_ops = {
+	.ndo_open = most_nd_open,
+	.ndo_stop = most_nd_stop,
+	.ndo_start_xmit = most_nd_start_xmit,
+	.ndo_set_mac_address = most_nd_set_mac_address,
+};
+
+static void most_nd_setup(struct net_device *dev)
+{
+	pr_info("setup net device %s\n", dev->name);
+	ether_setup(dev);
+	dev->netdev_ops = &most_nd_ops;
+}
+
+static void most_net_rm_netdev_safe(struct net_dev_context *nd)
+{
+	if (!nd->dev)
+		return;
+
+	pr_info("remove net device %p\n", nd->dev);
+
+	unregister_netdev(nd->dev);
+	free_netdev(nd->dev);
+	nd->dev = 0;
+}
+
+static struct net_dev_context *get_net_dev_context(
+	struct most_interface *iface)
+{
+	struct net_dev_context *nd, *tmp;
+
+	spin_lock(&list_lock);
+	list_for_each_entry_safe(nd, tmp, &net_devices, list) {
+		if (nd->iface == iface) {
+			spin_unlock(&list_lock);
+			return nd;
+		}
+	}
+	spin_unlock(&list_lock);
+	return NULL;
+}
+
+static int aim_probe_channel(struct most_interface *iface, int channel_idx,
+			     struct most_channel_config *ccfg,
+			     struct kobject *parent, char *name)
+{
+	struct net_dev_context *nd;
+	struct net_dev_channel *ch;
+
+	if (!iface)
+		return -EINVAL;
+
+	if (ccfg->data_type != MOST_CH_ASYNC)
+		return -EINVAL;
+
+	nd = get_net_dev_context(iface);
+
+	if (!nd) {
+		nd = kzalloc(sizeof(*nd), GFP_KERNEL);
+		if (!nd)
+			return -ENOMEM;
+
+		nd->iface = iface;
+
+		spin_lock(&list_lock);
+		list_add(&nd->list, &net_devices);
+		spin_unlock(&list_lock);
+	}
+
+	ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
+	if (ch->linked) {
+		pr_err("only one channel per instance & direction allowed\n");
+		return -EINVAL;
+	}
+
+	if (nd->tx.linked || nd->rx.linked) {
+		struct net_device *dev =
+			alloc_netdev(0, "meth%d", NET_NAME_UNKNOWN, most_nd_setup);
+
+		if (!dev) {
+			pr_err("no memory for net_device\n");
+			return -ENOMEM;
+		}
+
+		nd->dev = dev;
+
+		dev->ml_priv = nd;
+		if (register_netdev(dev)) {
+			pr_err("registering net device failed\n");
+			free_netdev(dev);
+			return -EINVAL;
+		}
+	}
+
+	ch->ch_id = channel_idx;
+	ch->linked = true;
+
+	return 0;
+}
+
+static int aim_disconnect_channel(struct most_interface *iface,
+				  int channel_idx)
+{
+	struct net_dev_context *nd;
+	struct net_dev_channel *ch;
+
+	nd = get_net_dev_context(iface);
+	if (!nd)
+		return -EINVAL;
+
+	if (nd->rx.linked && channel_idx == nd->rx.ch_id)
+		ch = &nd->rx;
+	else if (nd->tx.linked && channel_idx == nd->tx.ch_id)
+		ch = &nd->tx;
+	else
+		return -EINVAL;
+
+	ch->linked = false;
+
+	/*
+	 * do not call most_stop_channel() here, because channels are
+	 * going to be closed in ndo_stop() after unregister_netdev()
+	 */
+	most_net_rm_netdev_safe(nd);
+
+	if (!nd->rx.linked && !nd->tx.linked) {
+		spin_lock(&list_lock);
+		list_del(&nd->list);
+		spin_unlock(&list_lock);
+		kfree(nd);
+	}
+
+	return 0;
+}
+
+static int aim_resume_tx_channel(struct most_interface *iface,
+				 int channel_idx)
+{
+	struct net_dev_context *nd;
+
+	nd = get_net_dev_context(iface);
+	if (!nd || !nd->channels_opened || nd->tx.ch_id != channel_idx)
+		return 0;
+
+	if (!nd->dev)
+		return 0;
+
+	netif_wake_queue(nd->dev);
+	return 0;
+}
+
+static int aim_rx_data(struct mbo *mbo)
+{
+	const u32 zero = 0;
+	struct net_dev_context *nd;
+	char *buf = mbo->virt_address;
+	uint32_t len = mbo->processed_length;
+	struct sk_buff *skb;
+	struct net_device *dev;
+
+	nd = get_net_dev_context(mbo->ifp);
+	if (!nd || !nd->channels_opened || nd->rx.ch_id != mbo->hdm_channel_id)
+		return -EIO;
+
+	dev = nd->dev;
+	if (!dev) {
+		pr_err_once("drop packet: missing net_device\n");
+		return -EIO;
+	}
+
+	if (nd->is_mamac) {
+		if (!PMS_IS_MAMAC(buf, len))
+			return -EIO;
+
+		skb = dev_alloc_skb(len - MDP_HDR_LEN + 2 * ETH_ALEN + 2);
+	} else {
+		if (!PMS_IS_MEP(buf, len))
+			return -EIO;
+
+		skb = dev_alloc_skb(len - MEP_HDR_LEN);
+	}
+
+	if (!skb) {
+		dev->stats.rx_dropped++;
+		pr_err_once("drop packet: no memory for skb\n");
+		goto out;
+	}
+
+	skb->dev = dev;
+
+	if (nd->is_mamac) {
+		/* dest */
+		memcpy(skb_put(skb, ETH_ALEN), dev->dev_addr, ETH_ALEN);
+
+		/* src */
+		memcpy(skb_put(skb, 4), &zero, 4);
+		memcpy(skb_put(skb, 2), buf + 5, 2);
+
+		/* eth type */
+		memcpy(skb_put(skb, 2), buf + 10, 2);
+
+		buf += MDP_HDR_LEN;
+		len -= MDP_HDR_LEN;
+	} else {
+		buf += MEP_HDR_LEN;
+		len -= MEP_HDR_LEN;
+	}
+
+	memcpy(skb_put(skb, len), buf, len);
+	skb->protocol = eth_type_trans(skb, dev);
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += skb->len;
+	netif_rx(skb);
+
+out:
+	most_put_mbo(mbo);
+	return 0;
+}
+
+static int __init most_net_init(void)
+{
+	pr_info("most_net_init()\n");
+	spin_lock_init(&list_lock);
+	aim.name = "networking";
+	aim.probe_channel = aim_probe_channel;
+	aim.disconnect_channel = aim_disconnect_channel;
+	aim.tx_completion = aim_resume_tx_channel;
+	aim.rx_completion = aim_rx_data;
+	return most_register_aim(&aim);
+}
+
+static void __exit most_net_exit(void)
+{
+	struct net_dev_context *nd, *tmp;
+
+	spin_lock(&list_lock);
+	list_for_each_entry_safe(nd, tmp, &net_devices, list) {
+		list_del(&nd->list);
+		spin_unlock(&list_lock);
+		/*
+		 * do not call most_stop_channel() here, because channels are
+		 * going to be closed in ndo_stop() after unregister_netdev()
+		 */
+		most_net_rm_netdev_safe(nd);
+		kfree(nd);
+		spin_lock(&list_lock);
+	}
+	spin_unlock(&list_lock);
+
+	most_deregister_aim(&aim);
+	pr_info("most_net_exit()\n");
+}
+
+/**
+ * most_deliver_netinfo - callback for HDM to be informed about HW's MAC
+ * @param iface - most interface instance
+ * @param link_stat - link status
+ * @param mac_addr - MAC address
+ */
+void most_deliver_netinfo(struct most_interface *iface,
+			  unsigned char link_stat, unsigned char *mac_addr)
+{
+	struct net_dev_context *nd;
+	struct net_device *dev;
+
+	pr_info("Received netinfo from %s\n", iface->description);
+
+	nd = get_net_dev_context(iface);
+	if (!nd)
+		return;
+
+	dev = nd->dev;
+	if (!dev)
+		return;
+
+	if (mac_addr)
+		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+
+	if (nd->link_stat != link_stat) {
+		nd->link_stat = link_stat;
+		if (nd->link_stat)
+			netif_wake_queue(dev);
+		else
+			netif_stop_queue(dev);
+	}
+}
+EXPORT_SYMBOL(most_deliver_netinfo);
+
+module_init(most_net_init);
+module_exit(most_net_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_DESCRIPTION("Networking Application Interface Module for MostCore");
diff --git a/drivers/staging/most/aim-network/networking.h b/drivers/staging/most/aim-network/networking.h
new file mode 100644
index 0000000..1b8b434
--- /dev/null
+++ b/drivers/staging/most/aim-network/networking.h
@@ -0,0 +1,23 @@
+/*
+ * Networking AIM - Networking Application Interface Module for MostCore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+#ifndef _NETWORKING_H_
+#define _NETWORKING_H_
+
+#include "mostcore.h"
+
+
+void most_deliver_netinfo(struct most_interface *iface,
+			  unsigned char link_stat, unsigned char *mac_addr);
+
+
+#endif
diff --git a/drivers/staging/most/aim-sound/Kconfig b/drivers/staging/most/aim-sound/Kconfig
new file mode 100644
index 0000000..3194c21
--- /dev/null
+++ b/drivers/staging/most/aim-sound/Kconfig
@@ -0,0 +1,13 @@
+#
+# MOST ALSA configuration
+#
+
+config AIM_SOUND
+	tristate "ALSA AIM"
+	depends on SND
+	select SND_PCM
+	---help---
+	  Say Y here if you want to commumicate via ALSA/sound devices.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called aim_sound.
diff --git a/drivers/staging/most/aim-sound/Makefile b/drivers/staging/most/aim-sound/Makefile
new file mode 100644
index 0000000..beba958
--- /dev/null
+++ b/drivers/staging/most/aim-sound/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_AIM_SOUND) += aim_sound.o
+
+aim_sound-objs := sound.o
+ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/aim-sound/sound.c b/drivers/staging/most/aim-sound/sound.c
new file mode 100644
index 0000000..860302e
--- /dev/null
+++ b/drivers/staging/most/aim-sound/sound.c
@@ -0,0 +1,758 @@
+/*
+ * sound.c - Audio Application Interface Module for Mostcore
+ *
+ * Copyright (C) 2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <mostcore.h>
+
+#define DRIVER_NAME "sound"
+
+static struct list_head dev_list;
+
+/**
+ * struct channel - private structure to keep channel specific data
+ * @substream: stores the substream structure
+ * @iface: interface for which the channel belongs to
+ * @cfg: channel configuration
+ * @card: registered sound card
+ * @list: list for private use
+ * @id: channel index
+ * @period_pos: current period position (ring buffer)
+ * @buffer_pos: current buffer position (ring buffer)
+ * @is_stream_running: identifies whether a stream is running or not
+ * @opened: set when the stream is opened
+ * @playback_task: playback thread
+ * @playback_waitq: waitq used by playback thread
+ */
+struct channel {
+	struct snd_pcm_substream *substream;
+	struct most_interface *iface;
+	struct most_channel_config *cfg;
+	struct snd_card *card;
+	struct list_head list;
+	int id;
+	unsigned int period_pos;
+	unsigned int buffer_pos;
+	bool is_stream_running;
+
+	struct task_struct *playback_task;
+	wait_queue_head_t playback_waitq;
+
+	void (*copy_fn)(void *alsa, void *most, unsigned int bytes);
+};
+
+#define MOST_PCM_INFO (SNDRV_PCM_INFO_MMAP | \
+		       SNDRV_PCM_INFO_MMAP_VALID | \
+		       SNDRV_PCM_INFO_BATCH | \
+		       SNDRV_PCM_INFO_INTERLEAVED | \
+		       SNDRV_PCM_INFO_BLOCK_TRANSFER)
+
+/**
+ * Initialization of struct snd_pcm_hardware
+ */
+static struct snd_pcm_hardware pcm_hardware_template = {
+	.info               = MOST_PCM_INFO,
+	.rates              = SNDRV_PCM_RATE_48000,
+	.rate_min           = 48000,
+	.rate_max           = 48000,
+	.channels_min       = 1,
+	.channels_max       = 8,
+};
+
+#define swap16(val) ( \
+	(((u16)(val) << 8) & (u16)0xFF00) | \
+	(((u16)(val) >> 8) & (u16)0x00FF))
+
+#define swap32(val) ( \
+	(((u32)(val) << 24) & (u32)0xFF000000) | \
+	(((u32)(val) <<  8) & (u32)0x00FF0000) | \
+	(((u32)(val) >>  8) & (u32)0x0000FF00) | \
+	(((u32)(val) >> 24) & (u32)0x000000FF))
+
+static void swap_copy16(u16 *dest, const u16 *source, unsigned int bytes)
+{
+	unsigned int i = 0;
+
+	while (i < (bytes / 2)) {
+		dest[i] = swap16(source[i]);
+		i++;
+	}
+}
+
+static void swap_copy24(u8 *dest, const u8 *source, unsigned int bytes)
+{
+	unsigned int i = 0;
+
+	while (i < bytes - 2) {
+		dest[i] = source[i + 2];
+		dest[i + 1] = source[i + 1];
+		dest[i + 2] = source[i];
+		i += 3;
+	}
+}
+
+static void swap_copy32(u32 *dest, const u32 *source, unsigned int bytes)
+{
+	unsigned int i = 0;
+
+	while (i < bytes / 4) {
+		dest[i] = swap32(source[i]);
+		i++;
+	}
+}
+
+static void alsa_to_most_memcpy(void *alsa, void *most, unsigned int bytes)
+{
+	memcpy(most, alsa, bytes);
+}
+
+static void alsa_to_most_copy16(void *alsa, void *most, unsigned int bytes)
+{
+	swap_copy16(most, alsa, bytes);
+}
+
+static void alsa_to_most_copy24(void *alsa, void *most, unsigned int bytes)
+{
+	swap_copy24(most, alsa, bytes);
+}
+
+static void alsa_to_most_copy32(void *alsa, void *most, unsigned int bytes)
+{
+	swap_copy32(most, alsa, bytes);
+}
+
+static void most_to_alsa_memcpy(void *alsa, void *most, unsigned int bytes)
+{
+	memcpy(alsa, most, bytes);
+}
+
+static void most_to_alsa_copy16(void *alsa, void *most, unsigned int bytes)
+{
+	swap_copy16(alsa, most, bytes);
+}
+
+static void most_to_alsa_copy24(void *alsa, void *most, unsigned int bytes)
+{
+	swap_copy24(alsa, most, bytes);
+}
+
+static void most_to_alsa_copy32(void *alsa, void *most, unsigned int bytes)
+{
+	swap_copy32(alsa, most, bytes);
+}
+
+/**
+ * get_channel - get pointer to channel
+ * @iface: interface structure
+ * @channel_id: channel ID
+ *
+ * This traverses the channel list and returns the channel matching the
+ * ID and interface.
+ *
+ * Returns pointer to channel on success or NULL otherwise.
+ */
+static struct channel *get_channel(struct most_interface *iface,
+				   int channel_id)
+{
+	struct channel *channel, *tmp;
+
+	list_for_each_entry_safe(channel, tmp, &dev_list, list) {
+		if ((channel->iface == iface) && (channel->id == channel_id))
+			return channel;
+	}
+
+	return NULL;
+}
+
+/**
+ * copy_data - implements data copying function
+ * @channel: channel
+ * @mbo: MBO from core
+ *
+ * Copy data from/to ring buffer to/from MBO and update the buffer position
+ */
+static bool copy_data(struct channel *channel, struct mbo *mbo)
+{
+	struct snd_pcm_runtime *const runtime = channel->substream->runtime;
+	unsigned int const frame_bytes = channel->cfg->subbuffer_size;
+	unsigned int const buffer_size = runtime->buffer_size;
+	unsigned int frames;
+	unsigned int fr0;
+
+	if (channel->cfg->direction & MOST_CH_RX)
+		frames = mbo->processed_length / frame_bytes;
+	else
+		frames = mbo->buffer_length / frame_bytes;
+	fr0 = min(buffer_size - channel->buffer_pos, frames);
+
+	channel->copy_fn(runtime->dma_area + channel->buffer_pos * frame_bytes,
+			 mbo->virt_address,
+			 fr0 * frame_bytes);
+
+	if (frames > fr0) {
+		/* wrap around at end of ring buffer */
+		channel->copy_fn(runtime->dma_area,
+				 mbo->virt_address + fr0 * frame_bytes,
+				 (frames - fr0) * frame_bytes);
+	}
+
+	channel->buffer_pos += frames;
+	if (channel->buffer_pos >= buffer_size)
+		channel->buffer_pos -= buffer_size;
+	channel->period_pos += frames;
+	if (channel->period_pos >= runtime->period_size) {
+		channel->period_pos -= runtime->period_size;
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * playback_thread - function implements the playback thread
+ * @data: private data
+ *
+ * Thread which does the playback functionality in a loop. It waits for a free
+ * MBO from mostcore for a particular channel and copy the data from ring buffer
+ * to MBO. Submit the MBO back to mostcore, after copying the data.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int playback_thread(void *data)
+{
+	struct channel *const channel = data;
+
+	pr_info("playback thread started\n");
+
+	while (!kthread_should_stop()) {
+		struct mbo *mbo = NULL;
+		bool period_elapsed = false;
+		int ret;
+
+		wait_event_interruptible(
+			channel->playback_waitq,
+			kthread_should_stop() ||
+			(mbo = most_get_mbo(channel->iface, channel->id)));
+
+		if (!mbo)
+			continue;
+
+		if (channel->is_stream_running)
+			period_elapsed = copy_data(channel, mbo);
+		else
+			memset(mbo->virt_address, 0, mbo->buffer_length);
+
+		ret = most_submit_mbo(mbo);
+		if (ret)
+			channel->is_stream_running = false;
+
+		if (period_elapsed)
+			snd_pcm_period_elapsed(channel->substream);
+	}
+
+	return 0;
+}
+
+/**
+ * pcm_open - implements open callback function for PCM middle layer
+ * @substream: pointer to ALSA PCM substream
+ *
+ * This is called when a PCM substream is opened. At least, the function should
+ * initialize the runtime->hw record.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_open(struct snd_pcm_substream *substream)
+{
+	struct channel *channel = substream->private_data;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct most_channel_config *cfg = channel->cfg;
+
+	pr_info("pcm_open(), %s\n", substream->name);
+
+	channel->substream = substream;
+
+	if (cfg->direction == MOST_CH_TX) {
+		init_waitqueue_head(&channel->playback_waitq);
+		channel->playback_task = kthread_run(&playback_thread, channel,
+						     "most_audio_playback");
+		if (IS_ERR(channel->playback_task))
+			return PTR_ERR(channel->playback_task);
+	}
+
+	if (most_start_channel(channel->iface, channel->id)) {
+		pr_err("most_start_channel() failed!\n");
+		if (cfg->direction == MOST_CH_TX)
+			kthread_stop(channel->playback_task);
+		return -EBUSY;
+	}
+
+	runtime->hw = pcm_hardware_template;
+	runtime->hw.buffer_bytes_max = cfg->num_buffers * cfg->buffer_size;
+	runtime->hw.period_bytes_min = cfg->buffer_size;
+	runtime->hw.period_bytes_max = cfg->buffer_size;
+	runtime->hw.periods_min = 1;
+	runtime->hw.periods_max = cfg->num_buffers;
+
+	return 0;
+}
+
+/**
+ * pcm_close - implements close callback function for PCM middle layer
+ * @substream: sub-stream pointer
+ *
+ * Obviously, this is called when a PCM substream is closed. Any private
+ * instance for a PCM substream allocated in the open callback will be
+ * released here.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_close(struct snd_pcm_substream *substream)
+{
+	struct channel *channel = substream->private_data;
+
+	pr_info("pcm_close(), %s\n", substream->name);
+
+	if (channel->cfg->direction == MOST_CH_TX)
+		kthread_stop(channel->playback_task);
+	most_stop_channel(channel->iface, channel->id);
+
+	return 0;
+}
+
+/**
+ * pcm_hw_params - implements hw_params callback function for PCM middle layer
+ * @substream: sub-stream pointer
+ * @hw_params: contains the hardware parameters set by the application
+ *
+ * This is called when the hardware parameters is set by the application, that
+ * is, once when the buffer size, the period size, the format, etc. are defined
+ * for the PCM substream. Many hardware setups should be done is this callback,
+ * including the allocation of buffers.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_hw_params(struct snd_pcm_substream *substream,
+			 struct snd_pcm_hw_params *hw_params)
+{
+	pr_info("pcm_hw_params()\n");
+
+	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+						params_buffer_bytes(hw_params));
+}
+
+/**
+ * pcm_hw_free - implements hw_free callback function for PCM middle layer
+ * @substream: substream pointer
+ *
+ * This is called to release the resources allocated via hw_params.
+ * This function will be always called before the close callback is called.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	pr_info("pcm_hw_free()\n");
+
+	return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+/**
+ * pcm_prepare - implements prepare callback function for PCM middle layer
+ * @substream: substream pointer
+ *
+ * This callback is called when the PCM is "prepared". Format rate, sample rate,
+ * etc., can be set here. This callback can be called many times at each setup.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_prepare(struct snd_pcm_substream *substream)
+{
+	struct channel *channel = substream->private_data;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct most_channel_config *cfg = channel->cfg;
+	int width = snd_pcm_format_physical_width(runtime->format);
+
+	channel->copy_fn = NULL;
+
+	if (cfg->direction == MOST_CH_TX) {
+		if (snd_pcm_format_big_endian(runtime->format) || width == 8)
+			channel->copy_fn = alsa_to_most_memcpy;
+		else if (width == 16)
+			channel->copy_fn = alsa_to_most_copy16;
+		else if (width == 24)
+			channel->copy_fn = alsa_to_most_copy24;
+		else if (width == 32)
+			channel->copy_fn = alsa_to_most_copy32;
+	} else {
+		if (snd_pcm_format_big_endian(runtime->format) || width == 8)
+			channel->copy_fn = most_to_alsa_memcpy;
+		else if (width == 16)
+			channel->copy_fn = most_to_alsa_copy16;
+		else if (width == 24)
+			channel->copy_fn = most_to_alsa_copy24;
+		else if (width == 32)
+			channel->copy_fn = most_to_alsa_copy32;
+	}
+
+	if (!channel->copy_fn) {
+		pr_err("unsupported format\n");
+		return -EINVAL;
+	}
+
+	channel->period_pos = 0;
+	channel->buffer_pos = 0;
+
+	return 0;
+}
+
+/**
+ * pcm_trigger - implements trigger callback function for PCM middle layer
+ * @substream: substream pointer
+ * @cmd: action to perform
+ *
+ * This is called when the PCM is started, stopped or paused. The action will be
+ * specified in the second argument, SNDRV_PCM_TRIGGER_XXX
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+	struct channel *channel = substream->private_data;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		channel->is_stream_running = true;
+		return 0;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+		channel->is_stream_running = false;
+		return 0;
+
+	default:
+		pr_info("pcm_trigger(), invalid\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/**
+ * pcm_pointer - implements pointer callback function for PCM middle layer
+ * @substream: substream pointer
+ *
+ * This callback is called when the PCM middle layer inquires the current
+ * hardware position on the buffer. The position must be returned in frames,
+ * ranging from 0 to buffer_size-1.
+ */
+static snd_pcm_uframes_t pcm_pointer(struct snd_pcm_substream *substream)
+{
+	struct channel *channel = substream->private_data;
+
+	return channel->buffer_pos;
+}
+
+/**
+ * Initialization of struct snd_pcm_ops
+ */
+static struct snd_pcm_ops pcm_ops = {
+	.open       = pcm_open,
+	.close      = pcm_close,
+	.ioctl      = snd_pcm_lib_ioctl,
+	.hw_params  = pcm_hw_params,
+	.hw_free    = pcm_hw_free,
+	.prepare    = pcm_prepare,
+	.trigger    = pcm_trigger,
+	.pointer    = pcm_pointer,
+	.page       = snd_pcm_lib_get_vmalloc_page,
+	.mmap       = snd_pcm_lib_mmap_vmalloc,
+};
+
+
+int split_arg_list(char *buf, char **card_name, char **pcm_format)
+{
+	*card_name = strsep(&buf, ".");
+	if (!*card_name)
+		return -EIO;
+	*pcm_format = strsep(&buf, ".\n");
+	if (!*pcm_format)
+		return -EIO;
+	return 0;
+}
+
+int audio_set_pcm_format(char *pcm_format, struct most_channel_config *cfg)
+{
+	if (!strcmp(pcm_format, "1x8")) {
+		if (cfg->subbuffer_size != 1)
+			goto error;
+		pr_info("PCM format is 8-bit mono\n");
+		pcm_hardware_template.formats = SNDRV_PCM_FMTBIT_S8;
+	} else if (!strcmp(pcm_format, "2x16")) {
+		if (cfg->subbuffer_size != 4)
+			goto error;
+		pr_info("PCM format is 16-bit stereo\n");
+		pcm_hardware_template.formats = SNDRV_PCM_FMTBIT_S16_LE |
+						SNDRV_PCM_FMTBIT_S16_BE;
+	} else if (!strcmp(pcm_format, "2x24")) {
+		if (cfg->subbuffer_size != 6)
+			goto error;
+		pr_info("PCM format is 24-bit stereo\n");
+		pcm_hardware_template.formats = SNDRV_PCM_FMTBIT_S24_3LE |
+						SNDRV_PCM_FMTBIT_S24_3BE;
+	} else if (!strcmp(pcm_format, "2x32")) {
+		if (cfg->subbuffer_size != 8)
+			goto error;
+		pr_info("PCM format is 32-bit stereo\n");
+		pcm_hardware_template.formats = SNDRV_PCM_FMTBIT_S32_LE |
+						SNDRV_PCM_FMTBIT_S32_BE;
+	} else {
+		pr_err("PCM format %s not supported\n", pcm_format);
+		return -EIO;
+	}
+	return 0;
+error:
+	pr_err("Audio resolution doesn't fit subbuffer size\n");
+	return -EINVAL;
+}
+
+/**
+ * audio_probe_channel - probe function of the driver module
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ * @cfg: pointer to actual channel configuration
+ * @parent: pointer to kobject (needed for sysfs hook-up)
+ * @arg_list: string that provides the name of the device to be created in /dev
+ *	      plus the desired audio resolution
+ *
+ * Creates sound card, pcm device, sets pcm ops and registers sound card.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_probe_channel(struct most_interface *iface, int channel_id,
+			       struct most_channel_config *cfg,
+			       struct kobject *parent, char *arg_list)
+{
+	struct channel *channel;
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+	int playback_count = 0;
+	int capture_count = 0;
+	int ret;
+	int direction;
+	char *card_name;
+	char *pcm_format;
+
+	pr_info("sound_probe_channel()\n");
+
+	if (!iface)
+		return -EINVAL;
+
+	if (cfg->data_type != MOST_CH_SYNC) {
+		pr_err("Incompatible channel type\n");
+		return -EINVAL;
+	}
+
+	if (get_channel(iface, channel_id)) {
+		pr_err("channel (%s:%d) is already linked\n",
+		       iface->description, channel_id);
+		return -EINVAL;
+	}
+
+	if (cfg->direction == MOST_CH_TX) {
+		playback_count = 1;
+		direction = SNDRV_PCM_STREAM_PLAYBACK;
+	} else {
+		capture_count = 1;
+		direction = SNDRV_PCM_STREAM_CAPTURE;
+	}
+
+	ret = split_arg_list(arg_list, &card_name, &pcm_format);
+	if (ret < 0) {
+		pr_info("PCM format missing\n");
+		return ret;
+	}
+	if (audio_set_pcm_format(pcm_format, cfg))
+		return ret;
+
+	ret = snd_card_new(NULL, -1, card_name, THIS_MODULE,
+			   sizeof(*channel), &card);
+	if (ret < 0)
+		return ret;
+
+	channel = card->private_data;
+	channel->card = card;
+	channel->cfg = cfg;
+	channel->iface = iface;
+	channel->id = channel_id;
+
+	snprintf(card->driver, sizeof(card->driver), "%s", DRIVER_NAME);
+	snprintf(card->shortname, sizeof(card->shortname), "MOST:%d",
+		 card->number);
+	snprintf(card->longname, sizeof(card->longname), "%s at %s, ch %d",
+		 card->shortname, iface->description, channel_id);
+
+	ret = snd_pcm_new(card, card_name, 0, playback_count,
+			  capture_count, &pcm);
+	if (ret < 0)
+		goto err_free_card;
+
+	pcm->private_data = channel;
+
+	snd_pcm_set_ops(pcm, direction, &pcm_ops);
+
+	ret = snd_card_register(card);
+	if (ret < 0)
+		goto err_free_card;
+
+	list_add_tail(&channel->list, &dev_list);
+
+	return 0;
+
+err_free_card:
+	snd_card_free(card);
+	return ret;
+}
+
+/**
+ * audio_disconnect_channel - function to disconnect a channel
+ * @iface: pointer to interface instance
+ * @channel_id: channel index
+ *
+ * This frees allocated memory and removes the sound card from ALSA
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_disconnect_channel(struct most_interface *iface,
+				    int channel_id)
+{
+	struct channel *channel;
+
+	pr_info("sound_disconnect_channel()\n");
+
+	channel = get_channel(iface, channel_id);
+	if (!channel) {
+		pr_err("sound_disconnect_channel(), invalid channel %d\n",
+		       channel_id);
+		return -EINVAL;
+	}
+
+	list_del(&channel->list);
+	snd_card_free(channel->card);
+
+	return 0;
+}
+
+/**
+ * audio_rx_completion - completion handler for rx channels
+ * @mbo: pointer to buffer object that has completed
+ *
+ * This searches for the channel this MBO belongs to and copy the data from MBO
+ * to ring buffer
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_rx_completion(struct mbo *mbo)
+{
+	struct channel *channel = get_channel(mbo->ifp, mbo->hdm_channel_id);
+	bool period_elapsed = false;
+
+	if (!channel) {
+		pr_err("sound_rx_completion(), invalid channel %d\n",
+		       mbo->hdm_channel_id);
+		return -EINVAL;
+	}
+
+	if (channel->is_stream_running)
+		period_elapsed = copy_data(channel, mbo);
+
+	most_put_mbo(mbo);
+
+	if (period_elapsed)
+		snd_pcm_period_elapsed(channel->substream);
+
+	return 0;
+}
+
+/**
+ * audio_tx_completion - completion handler for tx channels
+ * @iface: pointer to interface instance
+ * @channel_id: channel index/ID
+ *
+ * This searches the channel that belongs to this combination of interface
+ * pointer and channel ID and wakes a process sitting in the wait queue of
+ * this channel.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int audio_tx_completion(struct most_interface *iface, int channel_id)
+{
+	struct channel *channel = get_channel(iface, channel_id);
+
+	if (!channel) {
+		pr_err("sound_tx_completion(), invalid channel %d\n",
+		       channel_id);
+		return -EINVAL;
+	}
+
+	wake_up_interruptible(&channel->playback_waitq);
+
+	return 0;
+}
+
+/**
+ * Initialization of the struct most_aim
+ */
+static struct most_aim audio_aim = {
+	.name = DRIVER_NAME,
+	.probe_channel = audio_probe_channel,
+	.disconnect_channel = audio_disconnect_channel,
+	.rx_completion = audio_rx_completion,
+	.tx_completion = audio_tx_completion,
+};
+
+static int __init audio_init(void)
+{
+	pr_info("init()\n");
+
+	INIT_LIST_HEAD(&dev_list);
+
+	return most_register_aim(&audio_aim);
+}
+
+static void __exit audio_exit(void)
+{
+	struct channel *channel, *tmp;
+
+	pr_info("exit()\n");
+
+	list_for_each_entry_safe(channel, tmp, &dev_list, list) {
+		list_del(&channel->list);
+		snd_card_free(channel->card);
+	}
+
+	most_deregister_aim(&audio_aim);
+}
+
+module_init(audio_init);
+module_exit(audio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+MODULE_DESCRIPTION("Audio Application Interface Module for MostCore");
diff --git a/drivers/staging/most/aim-v4l2/Kconfig b/drivers/staging/most/aim-v4l2/Kconfig
new file mode 100644
index 0000000..d70eaaf
--- /dev/null
+++ b/drivers/staging/most/aim-v4l2/Kconfig
@@ -0,0 +1,12 @@
+#
+# MOST V4L2 configuration
+#
+
+config AIM_V4L2
+	tristate "V4L2 AIM"
+	depends on VIDEO_V4L2
+	---help---
+	  Say Y here if you want to commumicate via Video 4 Linux.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called aim_v4l2.
\ No newline at end of file
diff --git a/drivers/staging/most/aim-v4l2/Makefile b/drivers/staging/most/aim-v4l2/Makefile
new file mode 100644
index 0000000..28aa948
--- /dev/null
+++ b/drivers/staging/most/aim-v4l2/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_AIM_V4L2) += aim_v4l2.o
+
+aim_v4l2-objs := video.o
+
+ccflags-y += -Idrivers/staging/most/mostcore/
+ccflags-y += -Idrivers/media/video
diff --git a/drivers/staging/most/aim-v4l2/video.c b/drivers/staging/most/aim-v4l2/video.c
new file mode 100644
index 0000000..d968791
--- /dev/null
+++ b/drivers/staging/most/aim-v4l2/video.c
@@ -0,0 +1,635 @@
+/*
+ * V4L2 AIM - V4L2 Application Interface Module for MostCore
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/suspend.h>
+#include <linux/videodev2.h>
+#include <linux/mutex.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+
+#include "mostcore.h"
+
+
+#define V4L2_AIM_MAX_INPUT  1
+
+
+struct most_video_dev {
+	struct most_interface *iface;
+	int ch_idx;
+	struct list_head list;
+	bool mute;
+
+	struct list_head pending_mbos;
+	spinlock_t list_lock;
+
+	struct v4l2_device v4l2_dev;
+	atomic_t access_ref;
+	struct video_device *vdev;
+	unsigned int ctrl_input;
+
+	struct mutex lock;
+
+	wait_queue_head_t wait_data;
+};
+
+struct aim_fh {
+	/* must be the first field of this struct! */
+	struct v4l2_fh fh;
+	struct most_video_dev *mdev;
+	u32 offs;
+};
+
+
+static struct list_head video_devices = LIST_HEAD_INIT(video_devices);
+static struct spinlock list_lock;
+static struct most_aim aim_info;
+
+
+static inline bool data_ready(struct most_video_dev *mdev)
+{
+	return !list_empty(&mdev->pending_mbos);
+}
+
+static inline struct mbo *get_top_mbo(struct most_video_dev *mdev)
+{
+	return list_first_entry(&mdev->pending_mbos, struct mbo, list);
+}
+
+
+static int aim_vdev_open(struct file *filp)
+{
+	int ret;
+	struct video_device *vdev = video_devdata(filp);
+	struct most_video_dev *mdev = video_drvdata(filp);
+	struct aim_fh *fh;
+
+	pr_info("aim_vdev_open()\n");
+
+	switch (vdev->vfl_type) {
+	case VFL_TYPE_GRABBER:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	fh = kzalloc(sizeof(struct aim_fh), GFP_KERNEL);
+	if (!fh)
+		return -ENOMEM;
+
+	if (!atomic_inc_and_test(&mdev->access_ref)) {
+		pr_err("too many clients\n");
+		ret = -EBUSY;
+		goto err_dec;
+	}
+
+	fh->mdev = mdev;
+	v4l2_fh_init(&fh->fh, vdev);
+	filp->private_data = fh;
+
+	v4l2_fh_add(&fh->fh);
+
+	ret = most_start_channel(mdev->iface, mdev->ch_idx);
+	if (ret) {
+		pr_err("most_start_channel() failed\n");
+		goto err_rm;
+	}
+
+	return 0;
+
+err_rm:
+	v4l2_fh_del(&fh->fh);
+	v4l2_fh_exit(&fh->fh);
+
+err_dec:
+	atomic_dec(&mdev->access_ref);
+	kfree(fh);
+	return ret;
+}
+
+static int aim_vdev_close(struct file *filp)
+{
+	struct aim_fh *fh = filp->private_data;
+	struct most_video_dev *mdev = fh->mdev;
+	struct mbo *mbo, *tmp;
+
+	pr_info("aim_vdev_close()\n");
+
+	/*
+	 * We need to put MBOs back before we call most_stop_channel()
+	 * to deallocate MBOs.
+	 * From the other hand mostcore still calling rx_completion()
+	 * to deliver MBOs until most_stop_channel() is called.
+	 * Use mute to work around this issue.
+	 * This must be implemented in core.
+	 */
+
+	spin_lock(&mdev->list_lock);
+	mdev->mute = true;
+	list_for_each_entry_safe(mbo, tmp, &mdev->pending_mbos, list) {
+		list_del(&mbo->list);
+		spin_unlock(&mdev->list_lock);
+		most_put_mbo(mbo);
+		spin_lock(&mdev->list_lock);
+	}
+	spin_unlock(&mdev->list_lock);
+	most_stop_channel(mdev->iface, mdev->ch_idx);
+	mdev->mute = false;
+
+	v4l2_fh_del(&fh->fh);
+	v4l2_fh_exit(&fh->fh);
+
+	atomic_dec(&mdev->access_ref);
+	kfree(fh);
+	return 0;
+}
+
+static ssize_t aim_vdev_read(struct file *filp, char __user *buf,
+			     size_t count, loff_t *pos)
+{
+	struct aim_fh *fh = filp->private_data;
+	struct most_video_dev *mdev = fh->mdev;
+	int ret = 0;
+
+	if (*pos)
+		return -ESPIPE;
+
+	if (!mdev)
+		return -ENODEV;
+
+	/* wait for the first buffer */
+	if (!(filp->f_flags & O_NONBLOCK)) {
+		if (wait_event_interruptible(mdev->wait_data, data_ready(mdev)))
+			return -ERESTARTSYS;
+	}
+
+	if (!data_ready(mdev))
+		return -EAGAIN;
+
+	while (count > 0 && data_ready(mdev)) {
+		struct mbo *const mbo = get_top_mbo(mdev);
+		int const rem = mbo->processed_length - fh->offs;
+		int const cnt = rem < count ? rem : count;
+
+		if (copy_to_user(buf, mbo->virt_address + fh->offs, cnt)) {
+			pr_err("read: copy_to_user failed\n");
+			if (!ret)
+				ret = -EFAULT;
+			return ret;
+		}
+
+		fh->offs += cnt;
+		count -= cnt;
+		buf += cnt;
+		ret += cnt;
+
+		if (cnt >= rem) {
+			fh->offs = 0;
+			spin_lock(&mdev->list_lock);
+			list_del(&mbo->list);
+			spin_unlock(&mdev->list_lock);
+			most_put_mbo(mbo);
+		}
+	}
+	return ret;
+}
+
+static unsigned int aim_vdev_poll(struct file *filp, poll_table *wait)
+{
+	struct aim_fh *fh = filp->private_data;
+	struct most_video_dev *mdev = fh->mdev;
+	unsigned int mask = 0;
+
+	/* only wait if no data is available */
+	if (!data_ready(mdev))
+		poll_wait(filp, &mdev->wait_data, wait);
+	if (data_ready(mdev))
+		mask |= POLLIN | POLLRDNORM;
+
+	return mask;
+}
+
+static void aim_set_format_struct(struct v4l2_format *f)
+{
+	f->fmt.pix.width = 8;
+	f->fmt.pix.height = 8;
+	f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+	f->fmt.pix.bytesperline = 0;
+	f->fmt.pix.sizeimage = 188 * 2;
+	f->fmt.pix.colorspace = V4L2_COLORSPACE_REC709;
+	f->fmt.pix.field = V4L2_FIELD_NONE;
+	f->fmt.pix.priv = 0;
+}
+
+static int aim_set_format(struct most_video_dev *mdev, unsigned int cmd,
+			  struct v4l2_format *format)
+{
+#if 0
+	u32 const pixfmt = format->fmt.pix.pixelformat;
+	const char *fmt;
+
+	if (pixfmt != V4L2_PIX_FMT_MPEG) {
+		if (cmd == VIDIOC_TRY_FMT)
+			fmt = KERN_ERR "try %c%c%c%c failed\n";
+		else
+			fmt = KERN_ERR "set %c%c%c%c failed\n";
+	} else {
+		if (cmd == VIDIOC_TRY_FMT)
+			fmt = KERN_ERR "try %c%c%c%c\n";
+		else
+			fmt = KERN_ERR "set %c%c%c%c\n";
+	}
+	printk(fmt,
+	       (pixfmt) & 255,
+	       (pixfmt >> 8) & 255,
+	       (pixfmt >> 16) & 255,
+	       (pixfmt >> 24) & 255);
+#endif
+
+	if (format->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG)
+		return -EINVAL;
+
+	if (cmd == VIDIOC_TRY_FMT)
+		return 0;
+
+	aim_set_format_struct(format);
+
+	return 0;
+}
+
+
+static int vidioc_querycap(struct file *file, void  *priv,
+			   struct v4l2_capability *cap)
+{
+	struct aim_fh *fh = priv;
+	struct most_video_dev *mdev = fh->mdev;
+
+	pr_info("vidioc_querycap()\n");
+
+	strlcpy(cap->driver, "v4l2_most_aim", sizeof(cap->driver));
+	strlcpy(cap->card, "my_card", sizeof(cap->card));
+	snprintf(cap->bus_info, sizeof(cap->bus_info),
+		 "%s", mdev->iface->description);
+
+	cap->capabilities =
+		V4L2_CAP_READWRITE |
+		V4L2_CAP_TUNER |
+		V4L2_CAP_VIDEO_CAPTURE;
+	return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void  *priv,
+				   struct v4l2_fmtdesc *f)
+{
+	pr_info("vidioc_enum_fmt_vid_cap() %d\n", f->index);
+
+	if (f->index)
+		return -EINVAL;
+
+	strcpy(f->description, "MPEG");
+	f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+	f->flags = V4L2_FMT_FLAG_COMPRESSED;
+	f->pixelformat = V4L2_PIX_FMT_MPEG;
+
+	return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	pr_info("vidioc_g_fmt_vid_cap()\n");
+
+	aim_set_format_struct(f);
+	return 0;
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+				  struct v4l2_format *f)
+{
+	struct aim_fh *fh  = priv;
+	struct most_video_dev *mdev = fh->mdev;
+
+	return aim_set_format(mdev, VIDIOC_TRY_FMT, f);
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+				struct v4l2_format *f)
+{
+	struct aim_fh *fh  = priv;
+	struct most_video_dev *mdev = fh->mdev;
+
+	return aim_set_format(mdev, VIDIOC_S_FMT, f);
+}
+
+static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
+{
+	pr_info("vidioc_g_std()\n");
+
+	*norm = V4L2_STD_UNKNOWN;
+	return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *priv,
+			     struct v4l2_input *input)
+{
+	struct aim_fh *fh = priv;
+	struct most_video_dev *mdev = fh->mdev;
+
+	if (input->index >= V4L2_AIM_MAX_INPUT)
+		return -EINVAL;
+
+	strcpy(input->name, "MOST Video");
+	input->type |= V4L2_INPUT_TYPE_CAMERA;
+	input->audioset = 0;
+
+	input->std = mdev->vdev->tvnorms;
+
+	return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+	struct aim_fh *fh = priv;
+	struct most_video_dev *mdev = fh->mdev;
+	*i = mdev->ctrl_input;
+	return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int index)
+{
+	struct aim_fh *fh = priv;
+	struct most_video_dev *mdev = fh->mdev;
+
+	pr_info("vidioc_s_input(%d)\n", index);
+
+	if (index >= V4L2_AIM_MAX_INPUT)
+		return -EINVAL;
+	mdev->ctrl_input = index;
+	return 0;
+}
+
+static struct v4l2_file_operations aim_fops = {
+	.owner      = THIS_MODULE,
+	.open       = aim_vdev_open,
+	.release    = aim_vdev_close,
+	.read       = aim_vdev_read,
+	.poll       = aim_vdev_poll,
+	.unlocked_ioctl = video_ioctl2,
+};
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+	.vidioc_querycap            = vidioc_querycap,
+	.vidioc_enum_fmt_vid_cap    = vidioc_enum_fmt_vid_cap,
+	.vidioc_g_fmt_vid_cap       = vidioc_g_fmt_vid_cap,
+	.vidioc_try_fmt_vid_cap     = vidioc_try_fmt_vid_cap,
+	.vidioc_s_fmt_vid_cap       = vidioc_s_fmt_vid_cap,
+	.vidioc_g_std               = vidioc_g_std,
+	.vidioc_enum_input          = vidioc_enum_input,
+	.vidioc_g_input             = vidioc_g_input,
+	.vidioc_s_input             = vidioc_s_input,
+};
+
+static const struct video_device aim_videodev_template = {
+	.fops = &aim_fops,
+	.release = video_device_release,
+	.ioctl_ops = &video_ioctl_ops,
+	.tvnorms = V4L2_STD_UNKNOWN,
+};
+
+/**************************************************************************/
+
+static struct most_video_dev *get_aim_dev(
+	struct most_interface *iface, int channel_idx)
+{
+	struct most_video_dev *mdev, *tmp;
+
+	spin_lock(&list_lock);
+	list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
+		if (mdev->iface == iface && mdev->ch_idx == channel_idx) {
+			spin_unlock(&list_lock);
+			return mdev;
+		}
+	}
+	spin_unlock(&list_lock);
+	return 0;
+}
+
+static int aim_rx_data(struct mbo *mbo)
+{
+	struct most_video_dev *mdev =
+		get_aim_dev(mbo->ifp, mbo->hdm_channel_id);
+
+	if (!mdev)
+		return -EIO;
+
+	spin_lock(&mdev->list_lock);
+	if (unlikely(mdev->mute)) {
+		spin_unlock(&mdev->list_lock);
+		return -EIO;
+	}
+
+	list_add_tail(&mbo->list, &mdev->pending_mbos);
+	spin_unlock(&mdev->list_lock);
+	wake_up_interruptible(&mdev->wait_data);
+	return 0;
+}
+
+static int aim_register_videodev(struct most_video_dev *mdev)
+{
+	int retval = -ENOMEM;
+	int ret;
+
+	pr_info("aim_register_videodev()\n");
+
+	init_waitqueue_head(&mdev->wait_data);
+
+	/* allocate and fill v4l2 video struct */
+	mdev->vdev = video_device_alloc();
+	if (!mdev->vdev)
+		return -ENOMEM;
+
+	/* Fill the video capture device struct */
+	*mdev->vdev = aim_videodev_template;
+	mdev->vdev->v4l2_dev = &mdev->v4l2_dev;
+	mdev->vdev->lock = &mdev->lock;
+	strcpy(mdev->vdev->name, "most v4l2 aim video");
+
+	/* Register the v4l2 device */
+	video_set_drvdata(mdev->vdev, mdev);
+	retval = video_register_device(mdev->vdev, VFL_TYPE_GRABBER, -1);
+	if (retval != 0) {
+		pr_err("video_register_device failed (%d)\n", retval);
+		ret = -ENODEV;
+		goto err_vbi_dev;
+	}
+
+	return 0;
+
+err_vbi_dev:
+	video_device_release(mdev->vdev);
+	return ret;
+}
+
+static void aim_unregister_videodev(struct most_video_dev *mdev)
+{
+	pr_info("aim_unregister_videodev()\n");
+
+	video_unregister_device(mdev->vdev);
+}
+
+
+static void aim_v4l2_dev_release(struct v4l2_device *v4l2_dev)
+{
+	struct most_video_dev *mdev =
+		container_of(v4l2_dev, struct most_video_dev, v4l2_dev);
+
+	v4l2_device_unregister(v4l2_dev);
+	kfree(mdev);
+}
+
+static int aim_probe_channel(struct most_interface *iface, int channel_idx,
+			     struct most_channel_config *ccfg,
+			     struct kobject *parent, char *name)
+{
+	int ret;
+	struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
+
+	pr_info("aim_probe_channel()\n");
+
+	if (mdev) {
+		pr_err("channel already linked\n");
+		return -EEXIST;
+	}
+
+	if (ccfg->direction != MOST_CH_RX) {
+		pr_err("wrong direction, expect rx\n");
+		return -EINVAL;
+	}
+
+	if (ccfg->data_type != MOST_CH_SYNC &&
+	    ccfg->data_type != MOST_CH_ISOC_AVP) {
+		pr_err("wrong channel type, expect sync or isoc_avp\n");
+		return -EINVAL;
+	}
+
+	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+	if (!mdev)
+		return -ENOMEM;
+
+	mutex_init(&mdev->lock);
+	atomic_set(&mdev->access_ref, -1);
+	spin_lock_init(&mdev->list_lock);
+	INIT_LIST_HEAD(&mdev->pending_mbos);
+	mdev->iface = iface;
+	mdev->ch_idx = channel_idx;
+	mdev->v4l2_dev.release = aim_v4l2_dev_release;
+
+	/* Create the v4l2_device */
+	strlcpy(mdev->v4l2_dev.name, "most_video_device",
+		sizeof(mdev->v4l2_dev.name));
+	ret = v4l2_device_register(NULL, &mdev->v4l2_dev);
+	if (ret) {
+		pr_err("v4l2_device_register() failed\n");
+		kfree(mdev);
+		return ret;
+	}
+
+	ret = aim_register_videodev(mdev);
+	if (ret)
+		goto err_unreg;
+
+	spin_lock(&list_lock);
+	list_add(&mdev->list, &video_devices);
+	spin_unlock(&list_lock);
+	return 0;
+
+err_unreg:
+	v4l2_device_disconnect(&mdev->v4l2_dev);
+	v4l2_device_put(&mdev->v4l2_dev);
+	return ret;
+}
+
+static int aim_disconnect_channel(struct most_interface *iface,
+				  int channel_idx)
+{
+	struct most_video_dev *mdev = get_aim_dev(iface, channel_idx);
+
+	pr_info("aim_disconnect_channel()\n");
+
+	if (!mdev) {
+		pr_err("no such channel is linked\n");
+		return -ENOENT;
+	}
+
+	spin_lock(&list_lock);
+	list_del(&mdev->list);
+	spin_unlock(&list_lock);
+
+	aim_unregister_videodev(mdev);
+	v4l2_device_disconnect(&mdev->v4l2_dev);
+	v4l2_device_put(&mdev->v4l2_dev);
+	return 0;
+}
+
+static int __init aim_init(void)
+{
+	spin_lock_init(&list_lock);
+
+	aim_info.name = "v4l";
+	aim_info.probe_channel = aim_probe_channel;
+	aim_info.disconnect_channel = aim_disconnect_channel;
+	aim_info.rx_completion = aim_rx_data;
+	return most_register_aim(&aim_info);
+}
+
+static void __exit aim_exit(void)
+{
+	struct most_video_dev *mdev, *tmp;
+
+	/*
+	 * As the mostcore currently doesn't call disconnect_channel()
+	 * for linked channels while we call most_deregister_aim()
+	 * we simulate this call here.
+	 * This must be fixed in core.
+	 */
+	spin_lock(&list_lock);
+	list_for_each_entry_safe(mdev, tmp, &video_devices, list) {
+		list_del(&mdev->list);
+		spin_unlock(&list_lock);
+
+		aim_unregister_videodev(mdev);
+		v4l2_device_disconnect(&mdev->v4l2_dev);
+		v4l2_device_put(&mdev->v4l2_dev);
+		spin_lock(&list_lock);
+	}
+	spin_unlock(&list_lock);
+
+	most_deregister_aim(&aim_info);
+	BUG_ON(!list_empty(&video_devices));
+}
+
+module_init(aim_init);
+module_exit(aim_exit);
+
+MODULE_DESCRIPTION("V4L2 Application Interface Module for MostCore");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/hdm-dim2/Kconfig b/drivers/staging/most/hdm-dim2/Kconfig
new file mode 100644
index 0000000..1d4ad1d
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/Kconfig
@@ -0,0 +1,16 @@
+#
+# MediaLB configuration
+#
+
+config HDM_DIM2
+	tristate "DIM2 HDM"
+	depends on AIM_NETWORK
+
+	---help---
+	  Say Y here if you want to connect via MediaLB to network transceiver.
+	  This device driver is platform dependent and needs an addtional
+	  platform driver to be installed. For more information contact
+	  maintainer of this driver.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called hdm_dim2.
diff --git a/drivers/staging/most/hdm-dim2/Makefile b/drivers/staging/most/hdm-dim2/Makefile
new file mode 100644
index 0000000..6bbee87
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_HDM_DIM2) += hdm_dim2.o
+
+hdm_dim2-objs := dim2_hdm.o dim2_hal.o dim2_sysfs.o
+ccflags-y += -Idrivers/staging/most/mostcore/
+ccflags-y += -Idrivers/staging/most/aim-network/
diff --git a/drivers/staging/most/hdm-dim2/dim2_errors.h b/drivers/staging/most/hdm-dim2/dim2_errors.h
new file mode 100644
index 0000000..314f7de
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_errors.h
@@ -0,0 +1,67 @@
+/*
+ * dim2_errors.h - Definitions of errors for DIM2 HAL API
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef _MOST_DIM_ERRORS_H
+#define _MOST_DIM_ERRORS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * MOST DIM errors.
+ */
+enum dim_errors_t {
+	/** Not an error */
+	DIM_NO_ERROR = 0,
+
+	/** Bad base address for DIM2 IP */
+	DIM_INIT_ERR_DIM_ADDR = 0x10,
+
+	/**< Bad MediaLB clock */
+	DIM_INIT_ERR_MLB_CLOCK,
+
+	/** Bad channel address */
+	DIM_INIT_ERR_CHANNEL_ADDRESS,
+
+	/** Out of DBR memory */
+	DIM_INIT_ERR_OUT_OF_MEMORY,
+
+	/** DIM API is called while DIM is not initialized successfully */
+	DIM_ERR_DRIVER_NOT_INITIALIZED = 0x20,
+
+	/**
+	 * Configuration does not respect hardware limitations
+	 * for isochronous or synchronous channels
+	 */
+	DIM_ERR_BAD_CONFIG,
+
+	/**
+	 * Buffer size does not respect hardware limitations
+	 * for isochronous or synchronous channels
+	 */
+	DIM_ERR_BAD_BUFFER_SIZE,
+
+	DIM_ERR_UNDERFLOW,
+
+	DIM_ERR_OVERFLOW,
+};
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MOST_DIM_ERRORS_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.c b/drivers/staging/most/hdm-dim2/dim2_hal.c
new file mode 100644
index 0000000..a54cf2c
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.c
@@ -0,0 +1,919 @@
+/*
+ * dim2_hal.c - DIM2 HAL implementation
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
+
+#include "dim2_hal.h"
+#include "dim2_errors.h"
+#include "dim2_reg.h"
+#include <linux/stddef.h>
+
+
+/*
+ * The number of frames per sub-buffer for synchronous channels.
+ * Allowed values: 1, 2, 4, 8, 16, 32, 64.
+ */
+#define FRAMES_PER_SUBBUFF 16
+
+/*
+ * Size factor for synchronous DBR buffer.
+ * Minimal value is 4*FRAMES_PER_SUBBUFF.
+ */
+#define SYNC_DBR_FACTOR (4u * (u16)FRAMES_PER_SUBBUFF)
+
+/*
+ * Size factor for isochronous DBR buffer.
+ * Minimal value is 3.
+ */
+#define ISOC_DBR_FACTOR 3u
+
+/*
+ * Number of 32-bit units for DBR map.
+ *
+ * 1: block size is 512, max allocation is 16K
+ * 2: block size is 256, max allocation is 8K
+ * 4: block size is 128, max allocation is 4K
+ * 8: block size is 64, max allocation is 2K
+ *
+ * Min allocated space is block size.
+ * Max possible allocated space is 32 blocks.
+ */
+#define DBR_MAP_SIZE 2
+
+
+/* -------------------------------------------------------------------------- */
+/* not configurable area */
+
+#define CDT 0x00
+#define ADT 0x40
+#define MLB_CAT 0x80
+#define AHB_CAT 0x88
+
+#define DBR_SIZE  (16*1024) /* specified by IP */
+#define DBR_BLOCK_SIZE  (DBR_SIZE / 32 / DBR_MAP_SIZE)
+
+
+/* -------------------------------------------------------------------------- */
+/* generic helper functions and macros */
+
+#define MLBC0_FCNT_VAL_MACRO(n) MLBC0_FCNT_VAL_ ## n ## FPSB
+#define MLBC0_FCNT_VAL(fpsb) MLBC0_FCNT_VAL_MACRO(fpsb)
+
+static inline u32 bit_mask(u8 position)
+{
+	return (u32)1 << position;
+}
+
+static inline bool dim_on_error(u8 error_id, const char *error_message)
+{
+	DIMCB_OnError(error_id, error_message);
+	return false;
+}
+
+
+/* -------------------------------------------------------------------------- */
+/* types and local variables */
+
+struct lld_global_vars_t {
+	bool dim_is_initialized;
+	bool mcm_is_initialized;
+	struct dim2_regs *dim2; /* DIM2 core base address */
+	u32 dbr_map[DBR_MAP_SIZE];
+};
+
+static struct lld_global_vars_t g = { false };
+
+
+/* -------------------------------------------------------------------------- */
+
+static int dbr_get_mask_size(u16 size)
+{
+	int i;
+
+	for (i = 0; i < 6; i++)
+		if (size <= (DBR_BLOCK_SIZE << i))
+			return 1 << i;
+	return 0;
+}
+
+/**
+ * Allocates DBR memory.
+ * @param size Allocating memory size.
+ * @return Offset in DBR memory by success or DBR_SIZE if out of memory.
+ */
+static int alloc_dbr(u16 size)
+{
+	int mask_size;
+	int i, block_idx = 0;
+
+	if (size <= 0)
+		return DBR_SIZE; /* out of memory */
+
+	mask_size = dbr_get_mask_size(size);
+	if (mask_size == 0)
+		return DBR_SIZE; /* out of memory */
+
+	for (i = 0; i < DBR_MAP_SIZE; i++) {
+		u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
+		u32 mask = ~((~(u32)0) << blocks);
+
+		do {
+			if ((g.dbr_map[i] & mask) == 0) {
+				g.dbr_map[i] |= mask;
+				return block_idx * DBR_BLOCK_SIZE;
+			}
+			block_idx += mask_size;
+			/* do shift left with 2 steps for case mask_size == 32 */
+			mask <<= mask_size - 1;
+		} while ((mask <<= 1) != 0);
+	}
+
+	return DBR_SIZE; /* out of memory */
+}
+
+static void free_dbr(int offs, int size)
+{
+	int block_idx = offs / DBR_BLOCK_SIZE;
+	u32 const blocks = (size + DBR_BLOCK_SIZE - 1) / DBR_BLOCK_SIZE;
+	u32 mask = ~((~(u32)0) << blocks);
+
+	mask <<= block_idx % 32;
+	g.dbr_map[block_idx / 32] &= ~mask;
+}
+
+/* -------------------------------------------------------------------------- */
+
+static u32 dim2_read_ctr(u32 ctr_addr, u16 mdat_idx)
+{
+	DIMCB_IoWrite(&g.dim2->MADR, ctr_addr);
+
+	/* wait till transfer is completed */
+	while ((DIMCB_IoRead(&g.dim2->MCTL) & 1) != 1)
+		continue;
+
+	DIMCB_IoWrite(&g.dim2->MCTL, 0);   /* clear transfer complete */
+
+	return DIMCB_IoRead((&g.dim2->MDAT0) + mdat_idx);
+}
+
+static void dim2_write_ctr_mask(u32 ctr_addr, const u32 *mask, const u32 *value)
+{
+	enum { MADR_WNR_BIT = 31 };
+
+	DIMCB_IoWrite(&g.dim2->MCTL, 0);   /* clear transfer complete */
+
+	if (mask[0] != 0)
+		DIMCB_IoWrite(&g.dim2->MDAT0, value[0]);
+	if (mask[1] != 0)
+		DIMCB_IoWrite(&g.dim2->MDAT1, value[1]);
+	if (mask[2] != 0)
+		DIMCB_IoWrite(&g.dim2->MDAT2, value[2]);
+	if (mask[3] != 0)
+		DIMCB_IoWrite(&g.dim2->MDAT3, value[3]);
+
+	DIMCB_IoWrite(&g.dim2->MDWE0, mask[0]);
+	DIMCB_IoWrite(&g.dim2->MDWE1, mask[1]);
+	DIMCB_IoWrite(&g.dim2->MDWE2, mask[2]);
+	DIMCB_IoWrite(&g.dim2->MDWE3, mask[3]);
+
+	DIMCB_IoWrite(&g.dim2->MADR, bit_mask(MADR_WNR_BIT) | ctr_addr);
+
+	/* wait till transfer is completed */
+	while ((DIMCB_IoRead(&g.dim2->MCTL) & 1) != 1)
+		continue;
+
+	DIMCB_IoWrite(&g.dim2->MCTL, 0);   /* clear transfer complete */
+}
+
+static inline void dim2_write_ctr(u32 ctr_addr, const u32 *value)
+{
+	u32 const mask[4] = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+	dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static inline void dim2_clear_ctr(u32 ctr_addr)
+{
+	u32 const value[4] = { 0, 0, 0, 0 };
+
+	dim2_write_ctr(ctr_addr, value);
+}
+
+static void dim2_configure_cat(u8 cat_base, u8 ch_addr, u8 ch_type,
+			       bool read_not_write, bool sync_mfe)
+{
+	u16 const cat =
+		(read_not_write << CAT_RNW_BIT) |
+		(ch_type << CAT_CT_SHIFT) |
+		(ch_addr << CAT_CL_SHIFT) |
+		(sync_mfe << CAT_MFE_BIT) |
+		(false << CAT_MT_BIT) |
+		(true << CAT_CE_BIT);
+	u8 const ctr_addr = cat_base + ch_addr / 8;
+	u8 const idx = (ch_addr % 8) / 2;
+	u8 const shift = (ch_addr % 2) * 16;
+	u32 mask[4] = { 0, 0, 0, 0 };
+	u32 value[4] = { 0, 0, 0, 0 };
+
+	mask[idx] = (u32)0xFFFF << shift;
+	value[idx] = cat << shift;
+	dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static void dim2_clear_cat(u8 cat_base, u8 ch_addr)
+{
+	u8 const ctr_addr = cat_base + ch_addr / 8;
+	u8 const idx = (ch_addr % 8) / 2;
+	u8 const shift = (ch_addr % 2) * 16;
+	u32 mask[4] = { 0, 0, 0, 0 };
+	u32 value[4] = { 0, 0, 0, 0 };
+
+	mask[idx] = (u32)0xFFFF << shift;
+	dim2_write_ctr_mask(ctr_addr, mask, value);
+}
+
+static void dim2_configure_cdt(u8 ch_addr, u16 dbr_address, u16 hw_buffer_size,
+			       u16 packet_length)
+{
+	u32 cdt[4] = { 0, 0, 0, 0 };
+
+	if (packet_length)
+		cdt[1] = ((packet_length - 1) << CDT1_BS_ISOC_SHIFT);
+
+	cdt[3] =
+		((hw_buffer_size - 1) << CDT3_BD_SHIFT) |
+		(dbr_address << CDT3_BA_SHIFT);
+	dim2_write_ctr(CDT + ch_addr, cdt);
+}
+
+static void dim2_clear_cdt(u8 ch_addr)
+{
+	u32 cdt[4] = { 0, 0, 0, 0 };
+
+	dim2_write_ctr(CDT + ch_addr, cdt);
+}
+
+static void dim2_configure_adt(u8 ch_addr)
+{
+	u32 adt[4] = { 0, 0, 0, 0 };
+
+	adt[0] =
+		(true << ADT0_CE_BIT) |
+		(true << ADT0_LE_BIT) |
+		(0 << ADT0_PG_BIT);
+
+	dim2_write_ctr(ADT + ch_addr, adt);
+}
+
+static void dim2_clear_adt(u8 ch_addr)
+{
+	u32 adt[4] = { 0, 0, 0, 0 };
+
+	dim2_write_ctr(ADT + ch_addr, adt);
+}
+
+static void dim2_start_ctrl_async(u8 ch_addr, u8 idx, u32 buf_addr,
+				  u16 buffer_size)
+{
+	u8 const shift = idx * 16;
+
+	u32 mask[4] = { 0, 0, 0, 0 };
+	u32 adt[4] = { 0, 0, 0, 0 };
+
+	mask[1] =
+		bit_mask(ADT1_PS_BIT + shift) |
+		bit_mask(ADT1_RDY_BIT + shift) |
+		(ADT1_CTRL_ASYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
+	adt[1] =
+		(true << (ADT1_PS_BIT + shift)) |
+		(true << (ADT1_RDY_BIT + shift)) |
+		((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
+
+	mask[idx + 2] = 0xFFFFFFFF;
+	adt[idx + 2] = buf_addr;
+
+	dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
+}
+
+static void dim2_start_isoc_sync(u8 ch_addr, u8 idx, u32 buf_addr,
+				 u16 buffer_size)
+{
+	u8 const shift = idx * 16;
+
+	u32 mask[4] = { 0, 0, 0, 0 };
+	u32 adt[4] = { 0, 0, 0, 0 };
+
+	mask[1] =
+		bit_mask(ADT1_RDY_BIT + shift) |
+		(ADT1_ISOC_SYNC_BD_MASK << (ADT1_BD_SHIFT + shift));
+	adt[1] =
+		(true << (ADT1_RDY_BIT + shift)) |
+		((buffer_size - 1) << (ADT1_BD_SHIFT + shift));
+
+	mask[idx + 2] = 0xFFFFFFFF;
+	adt[idx + 2] = buf_addr;
+
+	dim2_write_ctr_mask(ADT + ch_addr, mask, adt);
+}
+
+
+static void dim2_clear_ctram(void)
+{
+	u32 ctr_addr;
+
+	for (ctr_addr = 0; ctr_addr < 0x90; ctr_addr++)
+		dim2_clear_ctr(ctr_addr);
+}
+
+static void dim2_configure_channel(
+	u8 ch_addr, u8 type, u8 is_tx, u16 dbr_address, u16 hw_buffer_size,
+	u16 packet_length, bool sync_mfe)
+{
+	dim2_configure_cdt(ch_addr, dbr_address, hw_buffer_size, packet_length);
+	dim2_configure_cat(MLB_CAT, ch_addr, type, is_tx ? 1 : 0, sync_mfe);
+
+	dim2_configure_adt(ch_addr);
+	dim2_configure_cat(AHB_CAT, ch_addr, type, is_tx ? 0 : 1, sync_mfe);
+
+	/* unmask interrupt for used channel, enable mlb_sys_int[0] interrupt */
+	DIMCB_IoWrite(&g.dim2->ACMR0,
+		      DIMCB_IoRead(&g.dim2->ACMR0) | bit_mask(ch_addr));
+}
+
+static void dim2_clear_channel(u8 ch_addr)
+{
+	/* mask interrupt for used channel, disable mlb_sys_int[0] interrupt */
+	DIMCB_IoWrite(&g.dim2->ACMR0,
+		      DIMCB_IoRead(&g.dim2->ACMR0) & ~bit_mask(ch_addr));
+
+	dim2_clear_cat(AHB_CAT, ch_addr);
+	dim2_clear_adt(ch_addr);
+
+	dim2_clear_cat(MLB_CAT, ch_addr);
+	dim2_clear_cdt(ch_addr);
+}
+
+/* -------------------------------------------------------------------------- */
+/* channel state helpers */
+
+static void state_init(struct int_ch_state *state)
+{
+	state->request_counter = 0;
+	state->service_counter = 0;
+
+	state->idx1 = 0;
+	state->idx2 = 0;
+	state->level = 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* macro helper functions */
+
+static inline bool check_channel_address(u32 ch_address)
+{
+	return ch_address > 0 && (ch_address % 2) == 0 &&
+	       (ch_address / 2) <= (u32)CAT_CL_MASK;
+}
+
+static inline bool check_packet_length(u32 packet_length)
+{
+	u16 const max_size = ((u16)CDT3_BD_ISOC_MASK + 1u) / ISOC_DBR_FACTOR;
+
+	if (packet_length <= 0)
+		return false; /* too small */
+
+	if (packet_length > max_size)
+		return false; /* too big */
+
+	if (packet_length - 1u > (u32)CDT1_BS_ISOC_MASK)
+		return false; /* too big */
+
+	return true;
+}
+
+static inline bool check_bytes_per_frame(u32 bytes_per_frame)
+{
+	u16 const max_size = ((u16)CDT3_BD_MASK + 1u) / SYNC_DBR_FACTOR;
+
+	if (bytes_per_frame <= 0)
+		return false; /* too small */
+
+	if (bytes_per_frame > max_size)
+		return false; /* too big */
+
+	return true;
+}
+
+static inline u16 norm_ctrl_async_buffer_size(u16 buf_size)
+{
+	u16 const max_size = (u16)ADT1_CTRL_ASYNC_BD_MASK + 1u;
+
+	if (buf_size > max_size)
+		return max_size;
+
+	return buf_size;
+}
+
+static inline u16 norm_isoc_buffer_size(u16 buf_size, u16 packet_length)
+{
+	u16 n;
+	u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
+
+	if (buf_size > max_size)
+		buf_size = max_size;
+
+	n = buf_size / packet_length;
+
+	if (n < 2u)
+		return 0; /* too small buffer for given packet_length */
+
+	return packet_length * n;
+}
+
+static inline u16 norm_sync_buffer_size(u16 buf_size, u16 bytes_per_frame)
+{
+	u16 n;
+	u16 const max_size = (u16)ADT1_ISOC_SYNC_BD_MASK + 1u;
+	u32 const unit = bytes_per_frame * (u16)FRAMES_PER_SUBBUFF;
+
+	if (buf_size > max_size)
+		buf_size = max_size;
+
+	n = buf_size / unit;
+
+	if (n < 1u)
+		return 0; /* too small buffer for given bytes_per_frame */
+
+	return unit * n;
+}
+
+static void dim2_cleanup(void)
+{
+	/* disable MediaLB */
+	DIMCB_IoWrite(&g.dim2->MLBC0, false << MLBC0_MLBEN_BIT);
+
+	dim2_clear_ctram();
+
+	/* disable mlb_int interrupt */
+	DIMCB_IoWrite(&g.dim2->MIEN, 0);
+
+	/* clear status for all dma channels */
+	DIMCB_IoWrite(&g.dim2->ACSR0, 0xFFFFFFFF);
+	DIMCB_IoWrite(&g.dim2->ACSR1, 0xFFFFFFFF);
+
+	/* mask interrupts for all channels */
+	DIMCB_IoWrite(&g.dim2->ACMR0, 0);
+	DIMCB_IoWrite(&g.dim2->ACMR1, 0);
+}
+
+static void dim2_initialize(bool enable_6pin, u8 mlb_clock)
+{
+	dim2_cleanup();
+
+	/* configure and enable MediaLB */
+	DIMCB_IoWrite(&g.dim2->MLBC0,
+		      enable_6pin << MLBC0_MLBPEN_BIT |
+		      mlb_clock << MLBC0_MLBCLK_SHIFT |
+		      MLBC0_FCNT_VAL(FRAMES_PER_SUBBUFF) << MLBC0_FCNT_SHIFT |
+		      true << MLBC0_MLBEN_BIT);
+
+	/* activate all HBI channels */
+	DIMCB_IoWrite(&g.dim2->HCMR0, 0xFFFFFFFF);
+	DIMCB_IoWrite(&g.dim2->HCMR1, 0xFFFFFFFF);
+
+	/* enable HBI */
+	DIMCB_IoWrite(&g.dim2->HCTL, bit_mask(HCTL_EN_BIT));
+
+	/* configure DMA */
+	DIMCB_IoWrite(&g.dim2->ACTL,
+		      ACTL_DMA_MODE_VAL_DMA_MODE_1 << ACTL_DMA_MODE_BIT |
+		      true << ACTL_SCE_BIT);
+
+#if 0
+	DIMCB_IoWrite(&g.dim2->MIEN,
+		      bit_mask(MIEN_CTX_BREAK_BIT) |
+		      bit_mask(MIEN_CTX_PE_BIT) |
+		      bit_mask(MIEN_CTX_DONE_BIT) |
+		      bit_mask(MIEN_CRX_BREAK_BIT) |
+		      bit_mask(MIEN_CRX_PE_BIT) |
+		      bit_mask(MIEN_CRX_DONE_BIT) |
+		      bit_mask(MIEN_ATX_BREAK_BIT) |
+		      bit_mask(MIEN_ATX_PE_BIT) |
+		      bit_mask(MIEN_ATX_DONE_BIT) |
+		      bit_mask(MIEN_ARX_BREAK_BIT) |
+		      bit_mask(MIEN_ARX_PE_BIT) |
+		      bit_mask(MIEN_ARX_DONE_BIT));
+#endif
+}
+
+static bool dim2_is_mlb_locked(void)
+{
+	u32 const mask0 = bit_mask(MLBC0_MLBLK_BIT);
+	u32 const mask1 = bit_mask(MLBC1_CLKMERR_BIT) |
+			  bit_mask(MLBC1_LOCKERR_BIT);
+	u32 const c1 = DIMCB_IoRead(&g.dim2->MLBC1);
+	u32 const nda_mask = (u32)MLBC1_NDA_MASK << MLBC1_NDA_SHIFT;
+
+	DIMCB_IoWrite(&g.dim2->MLBC1, c1 & nda_mask);
+	return (DIMCB_IoRead(&g.dim2->MLBC1) & mask1) == 0 &&
+	       (DIMCB_IoRead(&g.dim2->MLBC0) & mask0) != 0;
+}
+
+
+/* -------------------------------------------------------------------------- */
+/* channel help routines */
+
+static inline bool service_channel(u8 ch_addr, u8 idx)
+{
+	u8 const shift = idx * 16;
+	u32 const adt1 = dim2_read_ctr(ADT + ch_addr, 1);
+
+	if (((adt1 >> (ADT1_DNE_BIT + shift)) & 1) == 0)
+		return false;
+
+	{
+		u32 mask[4] = { 0, 0, 0, 0 };
+		u32 adt_w[4] = { 0, 0, 0, 0 };
+
+		mask[1] =
+			bit_mask(ADT1_DNE_BIT + shift) |
+			bit_mask(ADT1_ERR_BIT + shift) |
+			bit_mask(ADT1_RDY_BIT + shift);
+		dim2_write_ctr_mask(ADT + ch_addr, mask, adt_w);
+	}
+
+	/* clear channel status bit */
+	DIMCB_IoWrite(&g.dim2->ACSR0, bit_mask(ch_addr));
+
+	return true;
+}
+
+
+/* -------------------------------------------------------------------------- */
+/* channel init routines */
+
+static void isoc_init(struct dim_channel *ch, u8 ch_addr, u16 packet_length)
+{
+	state_init(&ch->state);
+
+	ch->addr = ch_addr;
+
+	ch->packet_length = packet_length;
+	ch->bytes_per_frame = 0;
+	ch->done_sw_buffers_number = 0;
+}
+
+static void sync_init(struct dim_channel *ch, u8 ch_addr, u16 bytes_per_frame)
+{
+	state_init(&ch->state);
+
+	ch->addr = ch_addr;
+
+	ch->packet_length = 0;
+	ch->bytes_per_frame = bytes_per_frame;
+	ch->done_sw_buffers_number = 0;
+}
+
+static void channel_init(struct dim_channel *ch, u8 ch_addr)
+{
+	state_init(&ch->state);
+
+	ch->addr = ch_addr;
+
+	ch->packet_length = 0;
+	ch->bytes_per_frame = 0;
+	ch->done_sw_buffers_number = 0;
+}
+
+/* returns true if channel interrupt state is cleared */
+static bool channel_service_interrupt(struct dim_channel *ch)
+{
+	struct int_ch_state *const state = &ch->state;
+
+	if (!service_channel(ch->addr, state->idx2))
+		return false;
+
+	state->idx2 ^= 1;
+	state->request_counter++;
+	return true;
+}
+
+static bool channel_start(struct dim_channel *ch, u32 buf_addr, u16 buf_size)
+{
+	struct int_ch_state *const state = &ch->state;
+
+	if (buf_size <= 0)
+		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE, "Bad buffer size");
+
+	if (ch->packet_length == 0 && ch->bytes_per_frame == 0 &&
+	    buf_size != norm_ctrl_async_buffer_size(buf_size))
+		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+				    "Bad control/async buffer size");
+
+	if (ch->packet_length &&
+	    buf_size != norm_isoc_buffer_size(buf_size, ch->packet_length))
+		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+				    "Bad isochronous buffer size");
+
+	if (ch->bytes_per_frame &&
+	    buf_size != norm_sync_buffer_size(buf_size, ch->bytes_per_frame))
+		return dim_on_error(DIM_ERR_BAD_BUFFER_SIZE,
+				    "Bad synchronous buffer size");
+
+	if (state->level >= 2u)
+		return dim_on_error(DIM_ERR_OVERFLOW, "Channel overflow");
+
+	++state->level;
+
+	if (ch->packet_length || ch->bytes_per_frame)
+		dim2_start_isoc_sync(ch->addr, state->idx1, buf_addr, buf_size);
+	else
+		dim2_start_ctrl_async(ch->addr, state->idx1, buf_addr, buf_size);
+	state->idx1 ^= 1;
+
+	return true;
+}
+
+static u8 channel_service(struct dim_channel *ch)
+{
+	struct int_ch_state *const state = &ch->state;
+
+	if (state->service_counter != state->request_counter) {
+		state->service_counter++;
+		if (state->level == 0)
+			return DIM_ERR_UNDERFLOW;
+
+		--state->level;
+		ch->done_sw_buffers_number++;
+	}
+
+	return DIM_NO_ERROR;
+}
+
+static bool channel_detach_buffers(struct dim_channel *ch, u16 buffers_number)
+{
+	if (buffers_number > ch->done_sw_buffers_number)
+		return dim_on_error(DIM_ERR_UNDERFLOW, "Channel underflow");
+
+	ch->done_sw_buffers_number -= buffers_number;
+	return true;
+}
+
+
+/* -------------------------------------------------------------------------- */
+/* API */
+
+u8 DIM_Startup(void *dim_base_address, u32 mlb_clock)
+{
+	g.dim_is_initialized = false;
+
+	if (!dim_base_address)
+		return DIM_INIT_ERR_DIM_ADDR;
+
+	/* MediaLB clock: 0 - 256 fs, 1 - 512 fs, 2 - 1024 fs, 3 - 2048 fs */
+	/* MediaLB clock: 4 - 3072 fs, 5 - 4096 fs, 6 - 6144 fs, 7 - 8192 fs */
+	if (mlb_clock >= 8)
+		return DIM_INIT_ERR_MLB_CLOCK;
+
+	g.dim2 = dim_base_address;
+	g.dbr_map[0] = g.dbr_map[1] = 0;
+
+	dim2_initialize(mlb_clock >= 3, mlb_clock);
+
+	g.dim_is_initialized = true;
+
+	return DIM_NO_ERROR;
+}
+
+void DIM_Shutdown(void)
+{
+	g.dim_is_initialized = false;
+	dim2_cleanup();
+}
+
+bool DIM_GetLockState(void)
+{
+	return dim2_is_mlb_locked();
+}
+
+static u8 init_ctrl_async(struct dim_channel *ch, u8 type, u8 is_tx,
+			  u16 ch_address, u16 hw_buffer_size)
+{
+	if (!g.dim_is_initialized || !ch)
+		return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+	if (!check_channel_address(ch_address))
+		return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+	ch->dbr_size = hw_buffer_size;
+	ch->dbr_addr = alloc_dbr(ch->dbr_size);
+	if (ch->dbr_addr >= DBR_SIZE)
+		return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+	channel_init(ch, ch_address / 2);
+
+	dim2_configure_channel(ch->addr, type, is_tx,
+			       ch->dbr_addr, ch->dbr_size, 0, false);
+
+	return DIM_NO_ERROR;
+}
+
+u16 DIM_NormCtrlAsyncBufferSize(u16 buf_size)
+{
+	return norm_ctrl_async_buffer_size(buf_size);
+}
+
+/**
+ * Retrieves maximal possible correct buffer size for isochronous data type
+ * conform to given packet length and not bigger than given buffer size.
+ *
+ * Returns non-zero correct buffer size or zero by error.
+ */
+u16 DIM_NormIsocBufferSize(u16 buf_size, u16 packet_length)
+{
+	if (!check_packet_length(packet_length))
+		return 0;
+
+	return norm_isoc_buffer_size(buf_size, packet_length);
+}
+
+/**
+ * Retrieves maximal possible correct buffer size for synchronous data type
+ * conform to given bytes per frame and not bigger than given buffer size.
+ *
+ * Returns non-zero correct buffer size or zero by error.
+ */
+u16 DIM_NormSyncBufferSize(u16 buf_size, u16 bytes_per_frame)
+{
+	if (!check_bytes_per_frame(bytes_per_frame))
+		return 0;
+
+	return norm_sync_buffer_size(buf_size, bytes_per_frame);
+}
+
+u8 DIM_InitControl(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		   u16 max_buffer_size)
+{
+	return init_ctrl_async(ch, CAT_CT_VAL_CONTROL, is_tx, ch_address,
+			       max_buffer_size * 2);
+}
+
+u8 DIM_InitAsync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		 u16 max_buffer_size)
+{
+	return init_ctrl_async(ch, CAT_CT_VAL_ASYNC, is_tx, ch_address,
+			       max_buffer_size * 2);
+}
+
+u8 DIM_InitIsoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		u16 packet_length)
+{
+	if (!g.dim_is_initialized || !ch)
+		return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+	if (!check_channel_address(ch_address))
+		return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+	if (!check_packet_length(packet_length))
+		return DIM_ERR_BAD_CONFIG;
+
+	ch->dbr_size = packet_length * ISOC_DBR_FACTOR;
+	ch->dbr_addr = alloc_dbr(ch->dbr_size);
+	if (ch->dbr_addr >= DBR_SIZE)
+		return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+	isoc_init(ch, ch_address / 2, packet_length);
+
+	dim2_configure_channel(ch->addr, CAT_CT_VAL_ISOC, is_tx, ch->dbr_addr,
+			       ch->dbr_size, packet_length, false);
+
+	return DIM_NO_ERROR;
+}
+
+u8 DIM_InitSync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		u16 bytes_per_frame)
+{
+	if (!g.dim_is_initialized || !ch)
+		return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+	if (!check_channel_address(ch_address))
+		return DIM_INIT_ERR_CHANNEL_ADDRESS;
+
+	if (!check_bytes_per_frame(bytes_per_frame))
+		return DIM_ERR_BAD_CONFIG;
+
+	ch->dbr_size = bytes_per_frame * SYNC_DBR_FACTOR;
+	ch->dbr_addr = alloc_dbr(ch->dbr_size);
+	if (ch->dbr_addr >= DBR_SIZE)
+		return DIM_INIT_ERR_OUT_OF_MEMORY;
+
+	sync_init(ch, ch_address / 2, bytes_per_frame);
+
+	dim2_configure_channel(ch->addr, CAT_CT_VAL_SYNC, is_tx,
+			       ch->dbr_addr, ch->dbr_size, 0, true);
+
+	return DIM_NO_ERROR;
+}
+
+u8 DIM_DestroyChannel(struct dim_channel *ch)
+{
+	if (!g.dim_is_initialized || !ch)
+		return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+	dim2_clear_channel(ch->addr);
+	if (ch->dbr_addr < DBR_SIZE)
+		free_dbr(ch->dbr_addr, ch->dbr_size);
+	ch->dbr_addr = DBR_SIZE;
+
+	return DIM_NO_ERROR;
+}
+
+void DIM_ServiceIrq(struct dim_channel *const *channels)
+{
+	bool state_changed;
+
+	if (!g.dim_is_initialized) {
+		dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED,
+			     "DIM is not initialized");
+		return;
+	}
+
+	if (!channels) {
+		dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channels");
+		return;
+	}
+
+	/*
+	 * Use while-loop and a flag to make sure the age is changed back at least once,
+	 * otherwise the interrupt may never come if CPU generates interrupt on changing age.
+	 *
+	 * This cycle runs not more than number of channels, because service_interrupts
+	 * routine doesn't start the channel again.
+	 */
+	do {
+		struct dim_channel *const *ch = channels;
+
+		state_changed = false;
+
+		while (*ch) {
+			state_changed |= channel_service_interrupt(*ch);
+			++ch;
+		}
+	} while (state_changed);
+
+	/* clear pending Interrupts */
+	DIMCB_IoWrite(&g.dim2->MS0, 0);
+	DIMCB_IoWrite(&g.dim2->MS1, 0);
+}
+
+u8 DIM_ServiceChannel(struct dim_channel *ch)
+{
+	if (!g.dim_is_initialized || !ch)
+		return DIM_ERR_DRIVER_NOT_INITIALIZED;
+
+	return channel_service(ch);
+}
+
+struct dim_ch_state_t *DIM_GetChannelState(struct dim_channel *ch,
+		struct dim_ch_state_t *state_ptr)
+{
+	if (!ch || !state_ptr)
+		return NULL;
+
+	state_ptr->ready = ch->state.level < 2;
+	state_ptr->done_buffers = ch->done_sw_buffers_number;
+
+	return state_ptr;
+}
+
+bool DIM_EnqueueBuffer(struct dim_channel *ch, u32 buffer_addr, u16 buffer_size)
+{
+	if (!ch)
+		return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channel");
+
+	return channel_start(ch, buffer_addr, buffer_size);
+}
+
+bool DIM_DetachBuffers(struct dim_channel *ch, u16 buffers_number)
+{
+	if (!ch)
+		return dim_on_error(DIM_ERR_DRIVER_NOT_INITIALIZED, "Bad channel");
+
+	return channel_detach_buffers(ch, buffers_number);
+}
+
+u32 DIM_ReadRegister(u8 register_index)
+{
+	return DIMCB_IoRead((u32 *)g.dim2 + register_index);
+}
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/hdm-dim2/dim2_hal.h
new file mode 100644
index 0000000..8929af9
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.h
@@ -0,0 +1,124 @@
+/*
+ * dim2_hal.h - DIM2 HAL interface
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef _DIM2_HAL_H
+#define _DIM2_HAL_H
+
+#include <linux/types.h>
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * The values below are specified in the hardware specification.
+ * So, they should not be changed until the hardware specification changes.
+ */
+enum mlb_clk_speed {
+	CLK_256FS = 0,
+	CLK_512FS = 1,
+	CLK_1024FS = 2,
+	CLK_2048FS = 3,
+	CLK_3072FS = 4,
+	CLK_4096FS = 5,
+	CLK_6144FS = 6,
+	CLK_8192FS = 7,
+};
+
+struct dim_ch_state_t {
+	bool ready; /* Shows readiness to enqueue next buffer */
+	u16 done_buffers; /* Number of completed buffers */
+};
+
+typedef int atomic_counter_t;
+
+struct int_ch_state {
+	/* changed only in interrupt context */
+	volatile atomic_counter_t request_counter;
+
+	/* changed only in task context */
+	volatile atomic_counter_t service_counter;
+
+	u8 idx1;
+	u8 idx2;
+	u8 level; /* [0..2], buffering level */
+};
+
+struct dim_channel {
+	struct int_ch_state state;
+	u8 addr;
+	u16 dbr_addr;
+	u16 dbr_size;
+	u16 packet_length; /*< Isochronous packet length in bytes. */
+	u16 bytes_per_frame; /*< Synchronous bytes per frame. */
+	u16 done_sw_buffers_number; /*< Done software buffers number. */
+};
+
+
+u8 DIM_Startup(void *dim_base_address, u32 mlb_clock);
+
+void DIM_Shutdown(void);
+
+bool DIM_GetLockState(void);
+
+u16 DIM_NormCtrlAsyncBufferSize(u16 buf_size);
+
+u16 DIM_NormIsocBufferSize(u16 buf_size, u16 packet_length);
+
+u16 DIM_NormSyncBufferSize(u16 buf_size, u16 bytes_per_frame);
+
+u8 DIM_InitControl(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		   u16 max_buffer_size);
+
+u8 DIM_InitAsync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		 u16 max_buffer_size);
+
+u8 DIM_InitIsoc(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		u16 packet_length);
+
+u8 DIM_InitSync(struct dim_channel *ch, u8 is_tx, u16 ch_address,
+		u16 bytes_per_frame);
+
+u8 DIM_DestroyChannel(struct dim_channel *ch);
+
+void DIM_ServiceIrq(struct dim_channel *const *channels);
+
+u8 DIM_ServiceChannel(struct dim_channel *ch);
+
+struct dim_ch_state_t *DIM_GetChannelState(struct dim_channel *ch,
+		struct dim_ch_state_t *dim_ch_state_ptr);
+
+bool DIM_EnqueueBuffer(struct dim_channel *ch, u32 buffer_addr,
+		       u16 buffer_size);
+
+bool DIM_DetachBuffers(struct dim_channel *ch, u16 buffers_number);
+
+u32 DIM_ReadRegister(u8 register_index);
+
+
+u32 DIMCB_IoRead(u32 *ptr32);
+
+void DIMCB_IoWrite(u32 *ptr32, u32 value);
+
+void DIMCB_OnError(u8 error_id, const char *error_message);
+
+void DIMCB_OnFail(const char *filename, int linenum);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _DIM2_HAL_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.c b/drivers/staging/most/hdm-dim2/dim2_hdm.c
new file mode 100644
index 0000000..6a5a3a2
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.c
@@ -0,0 +1,964 @@
+/*
+ * dim2_hdm.c - MediaLB DIM2 Hardware Dependent Module
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+
+#include <mostcore.h>
+#include <networking.h>
+#include "dim2_hal.h"
+#include "dim2_hdm.h"
+#include "dim2_errors.h"
+#include "dim2_sysfs.h"
+
+#define DMA_CHANNELS (32 - 1)  /* channel 0 is a system channel */
+
+#define MAX_BUFFERS_PACKET      32
+#define MAX_BUFFERS_STREAMING   32
+#define MAX_BUF_SIZE_PACKET     2048
+#define MAX_BUF_SIZE_STREAMING  (8*1024)
+
+/* command line parameter to select clock speed */
+static char *clock_speed;
+module_param(clock_speed, charp, 0);
+MODULE_PARM_DESC(clock_speed, "MediaLB Clock Speed");
+
+/*
+ * #############################################################################
+ *
+ * The define below activates an utility function used by HAL-simu
+ * for calling DIM interrupt handler.
+ * It is used only for TEST PURPOSE and shall be commented before release.
+ *
+ * #############################################################################
+ */
+/* #define ENABLE_HDM_TEST */
+
+static DEFINE_SPINLOCK(dim_lock);
+
+static void dim2_tasklet_fn(unsigned long data);
+static DECLARE_TASKLET(dim2_tasklet, dim2_tasklet_fn, 0);
+
+/**
+ * struct hdm_channel - private structure to keep channel specific data
+ * @is_initialized: identifier to know whether the channel is initialized
+ * @ch: HAL specific channel data
+ * @pending_list: list to keep MBO's before starting transfer
+ * @started_list: list to keep MBO's after starting transfer
+ * @direction: channel direction (TX or RX)
+ * @data_type: channel data type
+ */
+struct hdm_channel {
+	char name[sizeof "caNNN"];
+	bool is_initialized;
+	struct dim_channel ch;
+	struct list_head pending_list;	/* before DIM_EnqueueBuffer() */
+	struct list_head started_list;	/* after DIM_EnqueueBuffer() */
+	enum most_channel_direction direction;
+	enum most_channel_data_type data_type;
+};
+
+/**
+ * struct dim2_hdm - private structure to keep interface specific data
+ * @hch: an array of channel specific data
+ * @most_iface: most interface structure
+ * @capabilities: an array of channel capability data
+ * @io_base: I/O register base address
+ * @irq_ahb0: dim2 AHB0 irq number
+ * @clk_speed: user selectable (through command line parameter) clock speed
+ * @netinfo_task: thread to deliver network status
+ * @netinfo_waitq: waitq for the thread to sleep
+ * @deliver_netinfo: to identify whether network status received
+ * @mac_addrs: INIC mac address
+ * @link_state: network link state
+ * @atx_idx: index of async tx channel
+ */
+struct dim2_hdm {
+	struct hdm_channel hch[DMA_CHANNELS];
+	struct most_channel_capability capabilities[DMA_CHANNELS];
+	struct most_interface most_iface;
+	char name[16 + sizeof "dim2-"];
+	void *io_base;
+	unsigned int irq_ahb0;
+	int clk_speed;
+	struct task_struct *netinfo_task;
+	wait_queue_head_t netinfo_waitq;
+	int deliver_netinfo;
+	unsigned char mac_addrs[6];
+	unsigned char link_state;
+	int atx_idx;
+	struct medialb_bus bus;
+};
+
+#define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
+
+/* Macro to identify a network status message */
+#define PACKET_IS_NET_INFO(p)  \
+	(((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
+	 ((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
+
+#if defined(ENABLE_HDM_TEST)
+static struct dim2_hdm *test_dev;
+#endif
+
+bool dim2_sysfs_get_state_cb(void)
+{
+	bool state;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dim_lock, flags);
+	state = DIM_GetLockState();
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	return state;
+}
+
+/**
+ * DIMCB_IoRead - callback from HAL to read an I/O register
+ * @ptr32: register address
+ */
+u32 DIMCB_IoRead(u32 *ptr32)
+{
+	return __raw_readl(ptr32);
+}
+
+/**
+ * DIMCB_IoWrite - callback from HAL to write value to an I/O register
+ * @ptr32: register address
+ * @value: value to write
+ */
+void DIMCB_IoWrite(u32 *ptr32, u32 value)
+{
+	__raw_writel(value, ptr32);
+}
+
+/**
+ * DIMCB_OnError - callback from HAL to report miscommunication between
+ * HDM and HAL
+ * @error_id: Error ID
+ * @error_message: Error message. Some text in a free format
+ */
+void DIMCB_OnError(u8 error_id, const char *error_message)
+{
+	pr_err("DIMCB_OnError: error_id - %d, error_message - %s\n", error_id,
+	       error_message);
+}
+
+/**
+ * DIMCB_OnFail - callback from HAL to report unrecoverable errors
+ * @filename: Source file where the error happened
+ * @linenum: Line number of the file where the error happened
+ */
+void DIMCB_OnFail(const char *filename, int linenum)
+{
+	pr_err("DIMCB_OnFail: file - %s, line no. - %d\n", filename, linenum);
+}
+
+/**
+ * startup_dim - initialize the dim2 interface
+ * @pdev: platform device
+ *
+ * Get the value of command line parameter "clock_speed" if given or use the
+ * default value, enable the clock and PLL, and initialize the dim2 interface.
+ */
+static int startup_dim(struct platform_device *pdev)
+{
+	struct dim2_hdm *dev = platform_get_drvdata(pdev);
+	struct dim2_platform_data *pdata = pdev->dev.platform_data;
+	u8 hal_ret;
+
+	dev->clk_speed = -1;
+
+	if (clock_speed) {
+		if (!strcmp(clock_speed, "256fs"))
+			dev->clk_speed = CLK_256FS;
+		else if (!strcmp(clock_speed, "512fs"))
+			dev->clk_speed = CLK_512FS;
+		else if (!strcmp(clock_speed, "1024fs"))
+			dev->clk_speed = CLK_1024FS;
+		else if (!strcmp(clock_speed, "2048fs"))
+			dev->clk_speed = CLK_2048FS;
+		else if (!strcmp(clock_speed, "3072fs"))
+			dev->clk_speed = CLK_3072FS;
+		else if (!strcmp(clock_speed, "4096fs"))
+			dev->clk_speed = CLK_4096FS;
+		else if (!strcmp(clock_speed, "6144fs"))
+			dev->clk_speed = CLK_6144FS;
+		else if (!strcmp(clock_speed, "8192fs"))
+			dev->clk_speed = CLK_8192FS;
+	}
+
+	if (dev->clk_speed == -1) {
+		pr_info("Bad or missing clock speed parameter,"
+			" using default value: 3072fs\n");
+		dev->clk_speed = CLK_3072FS;
+	} else
+		pr_info("Selected clock speed: %s\n", clock_speed);
+
+	if (pdata && pdata->init) {
+		int ret = pdata->init(pdata, dev->io_base, dev->clk_speed);
+
+		if (ret)
+			return ret;
+	}
+
+	hal_ret = DIM_Startup(dev->io_base, dev->clk_speed);
+	if (hal_ret != DIM_NO_ERROR) {
+		pr_err("DIM_Startup failed: %d\n", hal_ret);
+		if (pdata && pdata->destroy)
+			pdata->destroy(pdata);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * try_start_dim_transfer - try to transfer a buffer on a channel
+ * @hdm_ch: channel specific data
+ *
+ * Transfer a buffer from pending_list if the channel is ready
+ */
+static int try_start_dim_transfer(struct hdm_channel *hdm_ch)
+{
+	u16 buf_size;
+	struct list_head *head = &hdm_ch->pending_list;
+	struct mbo *mbo;
+	unsigned long flags;
+	struct dim_ch_state_t st;
+
+	BUG_ON(hdm_ch == 0);
+	BUG_ON(!hdm_ch->is_initialized);
+
+	spin_lock_irqsave(&dim_lock, flags);
+	if (list_empty(head)) {
+		spin_unlock_irqrestore(&dim_lock, flags);
+		return -EAGAIN;
+	}
+
+	if (!DIM_GetChannelState(&hdm_ch->ch, &st)->ready) {
+		spin_unlock_irqrestore(&dim_lock, flags);
+		return -EAGAIN;
+	}
+
+	mbo = list_entry(head->next, struct mbo, list);
+	buf_size = mbo->buffer_length;
+
+	BUG_ON(mbo->bus_address == 0);
+	if (!DIM_EnqueueBuffer(&hdm_ch->ch, mbo->bus_address, buf_size)) {
+		list_del(head->next);
+		spin_unlock_irqrestore(&dim_lock, flags);
+		mbo->processed_length = 0;
+		mbo->status = MBO_E_INVAL;
+		mbo->complete(mbo);
+		return -EFAULT;
+	}
+
+	list_move_tail(head->next, &hdm_ch->started_list);
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	return 0;
+}
+
+/**
+ * deliver_netinfo_thread - thread to deliver network status to mostcore
+ * @data: private data
+ *
+ * Wait for network status and deliver it to mostcore once it is received
+ */
+static int deliver_netinfo_thread(void *data)
+{
+	struct dim2_hdm *dev = (struct dim2_hdm *)data;
+
+	while (!kthread_should_stop()) {
+		wait_event_interruptible(dev->netinfo_waitq,
+					 dev->deliver_netinfo ||
+					 kthread_should_stop());
+
+		if (dev->deliver_netinfo) {
+			dev->deliver_netinfo--;
+			most_deliver_netinfo(&dev->most_iface, dev->link_state,
+					     dev->mac_addrs);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * retrieve_netinfo - retrieve network status from received buffer
+ * @dev: private data
+ * @mbo: received MBO
+ *
+ * Parse the message in buffer and get node address, link state, MAC address.
+ * Wake up a thread to deliver this status to mostcore
+ */
+static void retrieve_netinfo(struct dim2_hdm *dev, struct mbo *mbo)
+{
+	u8 *data = mbo->virt_address;
+	u8 *mac = dev->mac_addrs;
+
+	pr_info("Node Address: 0x%03x\n", (u16)data[16] << 8 | data[17]);
+	dev->link_state = data[18];
+	pr_info("NIState: %d\n", dev->link_state);
+	memcpy(mac, data + 19, 6);
+	pr_info("MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n",
+		mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+	dev->deliver_netinfo++;
+	wake_up_interruptible(&dev->netinfo_waitq);
+}
+
+/**
+ * service_done_flag - handle completed buffers
+ * @dev: private data
+ * @ch_idx: channel index
+ *
+ * Return back the completed buffers to mostcore, using completion callback
+ */
+static void service_done_flag(struct dim2_hdm *dev, int ch_idx)
+{
+	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+	struct dim_ch_state_t st;
+	struct list_head *head;
+	struct mbo *mbo;
+	int done_buffers;
+	unsigned long flags;
+	u8 *data;
+
+	BUG_ON(hdm_ch == 0);
+	BUG_ON(!hdm_ch->is_initialized);
+
+	spin_lock_irqsave(&dim_lock, flags);
+
+	done_buffers = DIM_GetChannelState(&hdm_ch->ch, &st)->done_buffers;
+	if (!done_buffers) {
+		spin_unlock_irqrestore(&dim_lock, flags);
+		return;
+	}
+
+	if (!DIM_DetachBuffers(&hdm_ch->ch, done_buffers)) {
+		spin_unlock_irqrestore(&dim_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	head = &hdm_ch->started_list;
+
+	while (done_buffers) {
+		spin_lock_irqsave(&dim_lock, flags);
+		if (list_empty(head)) {
+			spin_unlock_irqrestore(&dim_lock, flags);
+			pr_crit("hard error: started_mbo list is empty "
+				"whereas DIM2 has sent buffers\n");
+			break;
+		}
+
+		mbo = list_entry(head->next, struct mbo, list);
+		list_del(head->next);
+		spin_unlock_irqrestore(&dim_lock, flags);
+
+		data = mbo->virt_address;
+
+		if (hdm_ch->data_type == MOST_CH_ASYNC &&
+		    hdm_ch->direction == MOST_CH_RX &&
+		    PACKET_IS_NET_INFO(data)) {
+
+			retrieve_netinfo(dev, mbo);
+
+			spin_lock_irqsave(&dim_lock, flags);
+			list_add_tail(&mbo->list, &hdm_ch->pending_list);
+			spin_unlock_irqrestore(&dim_lock, flags);
+		} else {
+			if (hdm_ch->data_type == MOST_CH_CONTROL ||
+			    hdm_ch->data_type == MOST_CH_ASYNC) {
+
+				u32 const data_size =
+					(u32)data[0] * 256 + data[1] + 2;
+
+				mbo->processed_length =
+					min(data_size, (u32)mbo->buffer_length);
+			} else {
+				mbo->processed_length = mbo->buffer_length;
+			}
+			mbo->status = MBO_SUCCESS;
+			mbo->complete(mbo);
+		}
+
+		done_buffers--;
+	}
+}
+
+static struct dim_channel **get_active_channels(struct dim2_hdm *dev,
+		struct dim_channel **buffer)
+{
+	int idx = 0;
+	int ch_idx;
+
+	for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
+		if (dev->hch[ch_idx].is_initialized)
+			buffer[idx++] = &dev->hch[ch_idx].ch;
+	}
+	buffer[idx++] = 0;
+
+	return buffer;
+}
+
+/**
+ * dim2_tasklet_fn - tasklet function
+ * @data: private data
+ *
+ * Service each initialized channel, if needed
+ */
+static void dim2_tasklet_fn(unsigned long data)
+{
+	struct dim2_hdm *dev = (struct dim2_hdm *)data;
+	unsigned long flags;
+	int ch_idx;
+
+	for (ch_idx = 0; ch_idx < DMA_CHANNELS; ch_idx++) {
+		if (!dev->hch[ch_idx].is_initialized)
+			continue;
+
+		spin_lock_irqsave(&dim_lock, flags);
+		DIM_ServiceChannel(&(dev->hch[ch_idx].ch));
+		spin_unlock_irqrestore(&dim_lock, flags);
+
+		service_done_flag(dev, ch_idx);
+		while (!try_start_dim_transfer(dev->hch + ch_idx))
+			continue;
+	}
+}
+
+/**
+ * dim2_ahb_isr - interrupt service routine
+ * @irq: irq number
+ * @_dev: private data
+ *
+ * Acknowledge the interrupt and schedule a tasklet to service channels.
+ * Return IRQ_HANDLED.
+ */
+static irqreturn_t dim2_ahb_isr(int irq, void *_dev)
+{
+	struct dim2_hdm *dev = (struct dim2_hdm *)_dev;
+	struct dim_channel *buffer[DMA_CHANNELS + 1];
+	unsigned long flags;
+
+	spin_lock_irqsave(&dim_lock, flags);
+	DIM_ServiceIrq(get_active_channels(dev, buffer));
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+#if !defined(ENABLE_HDM_TEST)
+	dim2_tasklet.data = (unsigned long)dev;
+	tasklet_schedule(&dim2_tasklet);
+#else
+	dim2_tasklet_fn((unsigned long)dev);
+#endif
+	return IRQ_HANDLED;
+}
+
+#if defined(ENABLE_HDM_TEST)
+
+/*
+ * Utility function used by HAL-simu for calling DIM interrupt handler.
+ * It is used only for TEST PURPOSE.
+ */
+void raise_dim_interrupt(void)
+{
+	(void)dim2_ahb_isr(0, test_dev);
+}
+#endif
+
+/**
+ * complete_all_mbos - complete MBO's in a list
+ * @head: list head
+ *
+ * Delete all the entries in list and return back MBO's to mostcore using
+ * completion call back.
+ */
+static void complete_all_mbos(struct list_head *head)
+{
+	unsigned long flags;
+	struct mbo *mbo;
+
+	for (;;) {
+		spin_lock_irqsave(&dim_lock, flags);
+		if (list_empty(head)) {
+			spin_unlock_irqrestore(&dim_lock, flags);
+			break;
+		}
+
+		mbo = list_entry(head->next, struct mbo, list);
+		list_del(head->next);
+		spin_unlock_irqrestore(&dim_lock, flags);
+
+		mbo->processed_length = 0;
+		mbo->status = MBO_E_CLOSE;
+		mbo->complete(mbo);
+	}
+}
+
+/**
+ * configure_channel - initialize a channel
+ * @iface: interface the channel belongs to
+ * @channel: channel to be configured
+ * @channel_config: structure that holds the configuration information
+ *
+ * Receives configuration information from mostcore and initialize
+ * the corresponding channel. Return 0 on success, negative on failure.
+ */
+static int configure_channel(struct most_interface *most_iface, int ch_idx,
+			     struct most_channel_config *ccfg)
+{
+	struct dim2_hdm *dev = iface_to_hdm(most_iface);
+	bool const is_tx = ccfg->direction == MOST_CH_TX;
+	u16 const sub_size = ccfg->subbuffer_size;
+	u16 const buf_size = ccfg->buffer_size;
+	u16 new_size;
+	unsigned long flags;
+	u8 hal_ret;
+	int const ch_addr = ch_idx * 2 + 2;
+	struct hdm_channel *const hdm_ch = dev->hch + ch_idx;
+
+	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+	if (hdm_ch->is_initialized)
+		return -EPERM;
+
+	switch (ccfg->data_type) {
+	case MOST_CH_CONTROL:
+		new_size = DIM_NormCtrlAsyncBufferSize(buf_size);
+		if (new_size == 0) {
+			pr_err("%s: too small buffer size\n", hdm_ch->name);
+			return -EINVAL;
+		}
+		ccfg->buffer_size = new_size;
+		if (new_size != buf_size)
+			pr_warn("%s: fixed buffer size (%d -> %d)\n",
+				hdm_ch->name, buf_size, new_size);
+		spin_lock_irqsave(&dim_lock, flags);
+		hal_ret = DIM_InitControl(&hdm_ch->ch, is_tx, ch_addr, buf_size);
+		break;
+	case MOST_CH_ASYNC:
+		new_size = DIM_NormCtrlAsyncBufferSize(buf_size);
+		if (new_size == 0) {
+			pr_err("%s: too small buffer size\n", hdm_ch->name);
+			return -EINVAL;
+		}
+		ccfg->buffer_size = new_size;
+		if (new_size != buf_size)
+			pr_warn("%s: fixed buffer size (%d -> %d)\n",
+				hdm_ch->name, buf_size, new_size);
+		spin_lock_irqsave(&dim_lock, flags);
+		hal_ret = DIM_InitAsync(&hdm_ch->ch, is_tx, ch_addr, buf_size);
+		break;
+	case MOST_CH_ISOC_AVP:
+		new_size = DIM_NormIsocBufferSize(buf_size, sub_size);
+		if (new_size == 0) {
+			pr_err("%s: invalid sub-buffer size or "
+			       "too small buffer size\n", hdm_ch->name);
+			return -EINVAL;
+		}
+		ccfg->buffer_size = new_size;
+		if (new_size != buf_size)
+			pr_warn("%s: fixed buffer size (%d -> %d)\n",
+				hdm_ch->name, buf_size, new_size);
+		spin_lock_irqsave(&dim_lock, flags);
+		hal_ret = DIM_InitIsoc(&hdm_ch->ch, is_tx, ch_addr, sub_size);
+		break;
+	case MOST_CH_SYNC:
+		new_size = DIM_NormSyncBufferSize(buf_size, sub_size);
+		if (new_size == 0) {
+			pr_err("%s: invalid sub-buffer size or "
+			       "too small buffer size\n", hdm_ch->name);
+			return -EINVAL;
+		}
+		ccfg->buffer_size = new_size;
+		if (new_size != buf_size)
+			pr_warn("%s: fixed buffer size (%d -> %d)\n",
+				hdm_ch->name, buf_size, new_size);
+		spin_lock_irqsave(&dim_lock, flags);
+		hal_ret = DIM_InitSync(&hdm_ch->ch, is_tx, ch_addr, sub_size);
+		break;
+	default:
+		pr_err("%s: configure failed, bad channel type: %d\n",
+		       hdm_ch->name, ccfg->data_type);
+		return -EINVAL;
+	}
+
+	if (hal_ret != DIM_NO_ERROR) {
+		spin_unlock_irqrestore(&dim_lock, flags);
+		pr_err("%s: configure failed (%d), type: %d, is_tx: %d\n",
+		       hdm_ch->name, hal_ret, ccfg->data_type, (int)is_tx);
+		return -ENODEV;
+	}
+
+	hdm_ch->data_type = ccfg->data_type;
+	hdm_ch->direction = ccfg->direction;
+	hdm_ch->is_initialized = true;
+
+	if (hdm_ch->data_type == MOST_CH_ASYNC &&
+	    hdm_ch->direction == MOST_CH_TX &&
+	    dev->atx_idx < 0)
+		dev->atx_idx = ch_idx;
+
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	return 0;
+}
+
+/**
+ * enqueue - enqueue a buffer for data transfer
+ * @iface: intended interface
+ * @channel: ID of the channel the buffer is intended for
+ * @mbo: pointer to the buffer object
+ *
+ * Push the buffer into pending_list and try to transfer one buffer from
+ * pending_list. Return 0 on success, negative on failure.
+ */
+static int enqueue(struct most_interface *most_iface, int ch_idx,
+		   struct mbo *mbo)
+{
+	struct dim2_hdm *dev = iface_to_hdm(most_iface);
+	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+	unsigned long flags;
+
+	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+	if (!hdm_ch->is_initialized)
+		return -EPERM;
+
+	if (mbo->bus_address == 0)
+		return -EFAULT;
+
+	spin_lock_irqsave(&dim_lock, flags);
+	list_add_tail(&mbo->list, &hdm_ch->pending_list);
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	(void)try_start_dim_transfer(hdm_ch);
+
+	return 0;
+}
+
+/**
+ * request_netinfo - triggers retrieving of network info
+ * @iface: pointer to the interface
+ * @channel_id: corresponding channel ID
+ *
+ * Send a command to INIC which triggers retrieving of network info by means of
+ * "Message exchange over MDP/MEP". Return 0 on success, negative on failure.
+ */
+static void request_netinfo(struct most_interface *most_iface, int ch_idx)
+{
+	struct dim2_hdm *dev = iface_to_hdm(most_iface);
+	struct mbo *mbo;
+	u8 *data;
+
+	if (dev->atx_idx < 0) {
+		pr_err("Async Tx Not initialized\n");
+		return;
+	}
+
+	mbo = most_get_mbo(&dev->most_iface, dev->atx_idx);
+	if (!mbo)
+		return;
+
+	mbo->buffer_length = 5;
+
+	data = mbo->virt_address;
+
+	data[0] = 0x00; /* PML High byte */
+	data[1] = 0x03; /* PML Low byte */
+	data[2] = 0x02; /* PMHL */
+	data[3] = 0x08; /* FPH */
+	data[4] = 0x40; /* FMF (FIFO cmd msg - Triggers NAOverMDP) */
+
+	most_submit_mbo(mbo);
+}
+
+/**
+ * poison_channel - poison buffers of a channel
+ * @iface: pointer to the interface the channel to be poisoned belongs to
+ * @channel_id: corresponding channel ID
+ *
+ * Destroy a channel and complete all the buffers in both started_list &
+ * pending_list. Return 0 on success, negative on failure.
+ */
+static int poison_channel(struct most_interface *most_iface, int ch_idx)
+{
+	struct dim2_hdm *dev = iface_to_hdm(most_iface);
+	struct hdm_channel *hdm_ch = dev->hch + ch_idx;
+	unsigned long flags;
+	u8 hal_ret;
+	int ret = 0;
+
+	BUG_ON(ch_idx < 0 || ch_idx >= DMA_CHANNELS);
+
+	if (!hdm_ch->is_initialized)
+		return -EPERM;
+
+	spin_lock_irqsave(&dim_lock, flags);
+	hal_ret = DIM_DestroyChannel(&hdm_ch->ch);
+	hdm_ch->is_initialized = false;
+	if (ch_idx == dev->atx_idx)
+		dev->atx_idx = -1;
+	spin_unlock_irqrestore(&dim_lock, flags);
+	if (hal_ret != DIM_NO_ERROR) {
+		pr_err("HAL Failed to close channel %s\n", hdm_ch->name);
+		ret = -EFAULT;
+	}
+
+	complete_all_mbos(&hdm_ch->started_list);
+	complete_all_mbos(&hdm_ch->pending_list);
+
+	return ret;
+}
+
+/*
+ * dim2_probe - dim2 probe handler
+ * @pdev: platform device structure
+ *
+ * Register the dim2 interface with mostcore and initialize it.
+ * Return 0 on success, negative on failure.
+ */
+static int dim2_probe(struct platform_device *pdev)
+{
+	struct dim2_hdm *dev;
+	struct resource *res;
+	int ret, i;
+	struct kobject *kobj;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->atx_idx = -1;
+
+	platform_set_drvdata(pdev, dev);
+#if defined(ENABLE_HDM_TEST)
+	test_dev = dev;
+#else
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		pr_err("no memory region defined\n");
+		ret = -ENOENT;
+		goto err_free_dev;
+	}
+
+	if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+		pr_err("failed to request mem region\n");
+		ret = -EBUSY;
+		goto err_free_dev;
+	}
+
+	dev->io_base = ioremap(res->start, resource_size(res));
+	if (!dev->io_base) {
+		pr_err("failed to ioremap\n");
+		ret = -ENOMEM;
+		goto err_release_mem;
+	}
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret < 0) {
+		pr_err("failed to get irq\n");
+		goto err_unmap_io;
+	}
+	dev->irq_ahb0 = ret;
+
+	ret = request_irq(dev->irq_ahb0, dim2_ahb_isr, 0, "mlb_ahb0", dev);
+	if (ret) {
+		pr_err("failed to request IRQ: %d, err: %d\n", dev->irq_ahb0, ret);
+		goto err_unmap_io;
+	}
+#endif
+	init_waitqueue_head(&dev->netinfo_waitq);
+	dev->deliver_netinfo = 0;
+	dev->netinfo_task = kthread_run(&deliver_netinfo_thread, (void *)dev,
+					"dim2_netinfo");
+	if (IS_ERR(dev->netinfo_task)) {
+		ret = PTR_ERR(dev->netinfo_task);
+		goto err_free_irq;
+	}
+
+	for (i = 0; i < DMA_CHANNELS; i++) {
+		struct most_channel_capability *cap = dev->capabilities + i;
+		struct hdm_channel *hdm_ch = dev->hch + i;
+
+		INIT_LIST_HEAD(&hdm_ch->pending_list);
+		INIT_LIST_HEAD(&hdm_ch->started_list);
+		hdm_ch->is_initialized = false;
+		snprintf(hdm_ch->name, sizeof(hdm_ch->name), "ca%d", i * 2 + 2);
+
+		cap->name_suffix = hdm_ch->name;
+		cap->direction = MOST_CH_RX | MOST_CH_TX;
+		cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
+				 MOST_CH_ISOC_AVP | MOST_CH_SYNC;
+		cap->num_buffers_packet = MAX_BUFFERS_PACKET;
+		cap->buffer_size_packet = MAX_BUF_SIZE_PACKET;
+		cap->num_buffers_streaming = MAX_BUFFERS_STREAMING;
+		cap->buffer_size_streaming = MAX_BUF_SIZE_STREAMING;
+	}
+
+	{
+		const char *fmt;
+
+		if (sizeof(res->start) == sizeof(long long))
+			fmt = "dim2-%016llx";
+		else if (sizeof(res->start) == sizeof(long))
+			fmt = "dim2-%016lx";
+		else
+			fmt = "dim2-%016x";
+
+		snprintf(dev->name, sizeof(dev->name), fmt, res->start);
+	}
+
+	dev->most_iface.interface = ITYPE_MEDIALB_DIM2;
+	dev->most_iface.description = dev->name;
+	dev->most_iface.num_channels = DMA_CHANNELS;
+	dev->most_iface.channel_vector = dev->capabilities;
+	dev->most_iface.configure = configure_channel;
+	dev->most_iface.enqueue = enqueue;
+	dev->most_iface.poison_channel = poison_channel;
+	dev->most_iface.request_netinfo = request_netinfo;
+
+	kobj = most_register_interface(&dev->most_iface);
+	if (IS_ERR(kobj)) {
+		ret = PTR_ERR(kobj);
+		pr_err("failed to register MOST interface\n");
+		goto err_stop_thread;
+	}
+
+	ret = dim2_sysfs_probe(&dev->bus, kobj);
+	if (ret)
+		goto err_unreg_iface;
+
+	ret = startup_dim(pdev);
+	if (ret) {
+		pr_err("failed to initialize DIM2\n");
+		goto err_destroy_bus;
+	}
+
+	return 0;
+
+err_destroy_bus:
+	dim2_sysfs_destroy(&dev->bus);
+err_unreg_iface:
+	most_deregister_interface(&dev->most_iface);
+err_stop_thread:
+	kthread_stop(dev->netinfo_task);
+err_free_irq:
+#if !defined(ENABLE_HDM_TEST)
+	free_irq(dev->irq_ahb0, dev);
+err_unmap_io:
+	iounmap(dev->io_base);
+err_release_mem:
+	release_mem_region(res->start, resource_size(res));
+err_free_dev:
+#endif
+	kfree(dev);
+
+	return ret;
+}
+
+/**
+ * dim2_remove - dim2 remove handler
+ * @pdev: platform device structure
+ *
+ * Unregister the interface from mostcore
+ */
+static int dim2_remove(struct platform_device *pdev)
+{
+	struct dim2_hdm *dev = platform_get_drvdata(pdev);
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	struct dim2_platform_data *pdata = pdev->dev.platform_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dim_lock, flags);
+	DIM_Shutdown();
+	spin_unlock_irqrestore(&dim_lock, flags);
+
+	if (pdata && pdata->destroy)
+		pdata->destroy(pdata);
+
+	dim2_sysfs_destroy(&dev->bus);
+	most_deregister_interface(&dev->most_iface);
+	kthread_stop(dev->netinfo_task);
+#if !defined(ENABLE_HDM_TEST)
+	free_irq(dev->irq_ahb0, dev);
+	iounmap(dev->io_base);
+	release_mem_region(res->start, resource_size(res));
+#endif
+	kfree(dev);
+	platform_set_drvdata(pdev, NULL);
+
+	/*
+	 * break link to local platform_device_id struct
+	 * to prevent crash by unload platform device module
+	 */
+	pdev->id_entry = 0;
+
+	return 0;
+}
+
+static struct platform_device_id dim2_id[] = {
+	{ "medialb_dim2" },
+	{ }, /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(platform, dim2_id);
+
+static struct platform_driver dim2_driver = {
+	.probe = dim2_probe,
+	.remove = dim2_remove,
+	.id_table = dim2_id,
+	.driver = {
+		.name = "hdm_dim2",
+		.owner = THIS_MODULE,
+	},
+};
+
+/**
+ * dim2_hdm_init - Driver Registration Routine
+ */
+static int __init dim2_hdm_init(void)
+{
+	pr_info("dim2_hdm_init()\n");
+	return platform_driver_register(&dim2_driver);
+}
+
+/**
+ * dim2_hdm_exit - Driver Cleanup Routine
+ **/
+static void __exit dim2_hdm_exit(void)
+{
+	pr_info("dim2_hdm_exit()\n");
+	platform_driver_unregister(&dim2_driver);
+}
+
+module_init(dim2_hdm_init);
+module_exit(dim2_hdm_exit);
+
+MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@microchip.com>");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_DESCRIPTION("MediaLB DIM2 Hardware Dependent Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/hdm-dim2/dim2_hdm.h b/drivers/staging/most/hdm-dim2/dim2_hdm.h
new file mode 100644
index 0000000..6e68832
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_hdm.h
@@ -0,0 +1,26 @@
+/*
+ * dim2_hdm.h - MediaLB DIM2 HDM Header
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef DIM2_HDM_H
+#define	DIM2_HDM_H
+
+struct device;
+
+/* platform dependent data for dim2 interface */
+struct dim2_platform_data {
+	int (*init)(struct dim2_platform_data *pd, void *io_base, int clk_speed);
+	void (*destroy)(struct dim2_platform_data *pd);
+	void *priv;
+};
+
+#endif	/* DIM2_HDM_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_reg.h b/drivers/staging/most/hdm-dim2/dim2_reg.h
new file mode 100644
index 0000000..476f66f
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_reg.h
@@ -0,0 +1,176 @@
+/*
+ * dim2_reg.h - Definitions for registers of DIM2
+ * (MediaLB, Device Interface Macro IP, OS62420)
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#ifndef DIM2_OS62420_H
+#define	DIM2_OS62420_H
+
+#include <linux/types.h>
+
+#ifdef	__cplusplus
+extern "C" {
+#endif
+
+
+struct dim2_regs {
+	/* 0x00 */ u32 MLBC0;
+	/* 0x01 */ u32 rsvd0[1];
+	/* 0x02 */ u32 MLBPC0;
+	/* 0x03 */ u32 MS0;
+	/* 0x04 */ u32 rsvd1[1];
+	/* 0x05 */ u32 MS1;
+	/* 0x06 */ u32 rsvd2[2];
+	/* 0x08 */ u32 MSS;
+	/* 0x09 */ u32 MSD;
+	/* 0x0A */ u32 rsvd3[1];
+	/* 0x0B */ u32 MIEN;
+	/* 0x0C */ u32 rsvd4[1];
+	/* 0x0D */ u32 MLBPC2;
+	/* 0x0E */ u32 MLBPC1;
+	/* 0x0F */ u32 MLBC1;
+	/* 0x10 */ u32 rsvd5[0x10];
+	/* 0x20 */ u32 HCTL;
+	/* 0x21 */ u32 rsvd6[1];
+	/* 0x22 */ u32 HCMR0;
+	/* 0x23 */ u32 HCMR1;
+	/* 0x24 */ u32 HCER0;
+	/* 0x25 */ u32 HCER1;
+	/* 0x26 */ u32 HCBR0;
+	/* 0x27 */ u32 HCBR1;
+	/* 0x28 */ u32 rsvd7[8];
+	/* 0x30 */ u32 MDAT0;
+	/* 0x31 */ u32 MDAT1;
+	/* 0x32 */ u32 MDAT2;
+	/* 0x33 */ u32 MDAT3;
+	/* 0x34 */ u32 MDWE0;
+	/* 0x35 */ u32 MDWE1;
+	/* 0x36 */ u32 MDWE2;
+	/* 0x37 */ u32 MDWE3;
+	/* 0x38 */ u32 MCTL;
+	/* 0x39 */ u32 MADR;
+	/* 0x3A */ u32 rsvd8[0xB6];
+	/* 0xF0 */ u32 ACTL;
+	/* 0xF1 */ u32 rsvd9[3];
+	/* 0xF4 */ u32 ACSR0;
+	/* 0xF5 */ u32 ACSR1;
+	/* 0xF6 */ u32 ACMR0;
+	/* 0xF7 */ u32 ACMR1;
+};
+
+
+#define DIM2_MASK(n)  (~((~(u32)0)<<(n)))
+
+enum {
+	MLBC0_MLBLK_BIT = 7,
+
+	MLBC0_MLBPEN_BIT = 5,
+
+	MLBC0_MLBCLK_SHIFT = 2,
+	MLBC0_MLBCLK_VAL_256FS = 0,
+	MLBC0_MLBCLK_VAL_512FS = 1,
+	MLBC0_MLBCLK_VAL_1024FS = 2,
+	MLBC0_MLBCLK_VAL_2048FS = 3,
+
+	MLBC0_FCNT_SHIFT = 15,
+	MLBC0_FCNT_MASK = 7,
+	MLBC0_FCNT_VAL_1FPSB = 0,
+	MLBC0_FCNT_VAL_2FPSB = 1,
+	MLBC0_FCNT_VAL_4FPSB = 2,
+	MLBC0_FCNT_VAL_8FPSB = 3,
+	MLBC0_FCNT_VAL_16FPSB = 4,
+	MLBC0_FCNT_VAL_32FPSB = 5,
+	MLBC0_FCNT_VAL_64FPSB = 6,
+
+	MLBC0_MLBEN_BIT = 0,
+
+	MIEN_CTX_BREAK_BIT = 29,
+	MIEN_CTX_PE_BIT = 28,
+	MIEN_CTX_DONE_BIT = 27,
+
+	MIEN_CRX_BREAK_BIT = 26,
+	MIEN_CRX_PE_BIT = 25,
+	MIEN_CRX_DONE_BIT = 24,
+
+	MIEN_ATX_BREAK_BIT = 22,
+	MIEN_ATX_PE_BIT = 21,
+	MIEN_ATX_DONE_BIT = 20,
+
+	MIEN_ARX_BREAK_BIT = 19,
+	MIEN_ARX_PE_BIT = 18,
+	MIEN_ARX_DONE_BIT = 17,
+
+	MIEN_SYNC_PE_BIT = 16,
+
+	MIEN_ISOC_BUFO_BIT = 1,
+	MIEN_ISOC_PE_BIT = 0,
+
+	MLBC1_NDA_SHIFT = 8,
+	MLBC1_NDA_MASK = 0xFF,
+
+	MLBC1_CLKMERR_BIT = 7,
+	MLBC1_LOCKERR_BIT = 6,
+
+	ACTL_DMA_MODE_BIT = 2,
+	ACTL_DMA_MODE_VAL_DMA_MODE_0 = 0,
+	ACTL_DMA_MODE_VAL_DMA_MODE_1 = 1,
+	ACTL_SCE_BIT = 0,
+
+	HCTL_EN_BIT = 15
+};
+
+enum {
+	CDT1_BS_ISOC_SHIFT = 0,
+	CDT1_BS_ISOC_MASK = DIM2_MASK(9),
+
+	CDT3_BD_SHIFT = 0,
+	CDT3_BD_MASK = DIM2_MASK(12),
+	CDT3_BD_ISOC_MASK = DIM2_MASK(13),
+	CDT3_BA_SHIFT = 16,
+
+	ADT0_CE_BIT = 15,
+	ADT0_LE_BIT = 14,
+	ADT0_PG_BIT = 13,
+
+	ADT1_RDY_BIT = 15,
+	ADT1_DNE_BIT = 14,
+	ADT1_ERR_BIT = 13,
+	ADT1_PS_BIT = 12,
+	ADT1_MEP_BIT = 11,
+	ADT1_BD_SHIFT = 0,
+	ADT1_CTRL_ASYNC_BD_MASK = DIM2_MASK(11),
+	ADT1_ISOC_SYNC_BD_MASK = DIM2_MASK(13),
+
+	CAT_MFE_BIT = 14,
+
+	CAT_MT_BIT = 13,
+
+	CAT_RNW_BIT = 12,
+
+	CAT_CE_BIT = 11,
+
+	CAT_CT_SHIFT = 8,
+	CAT_CT_VAL_SYNC = 0,
+	CAT_CT_VAL_CONTROL = 1,
+	CAT_CT_VAL_ASYNC = 2,
+	CAT_CT_VAL_ISOC = 3,
+
+	CAT_CL_SHIFT = 0,
+	CAT_CL_MASK = DIM2_MASK(6)
+};
+
+
+#ifdef	__cplusplus
+}
+#endif
+
+#endif	/* DIM2_OS62420_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.c b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
new file mode 100644
index 0000000..8e331a2
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_sysfs.c
@@ -0,0 +1,116 @@
+/*
+ * dim2_sysfs.c - MediaLB sysfs information
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include "dim2_sysfs.h"
+
+struct bus_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct medialb_bus *bus, char *buf);
+	ssize_t (*store)(struct medialb_bus *bus, const char *buf, size_t count);
+};
+
+static ssize_t state_show(struct medialb_bus *bus, char *buf)
+{
+	bool state = dim2_sysfs_get_state_cb();
+
+	return sprintf(buf, "%s\n", state ? "locked" : "");
+}
+
+static struct bus_attr state_attr = __ATTR_RO(state);
+
+static struct attribute *bus_default_attrs[] = {
+	&state_attr.attr,
+	NULL,
+};
+
+static struct attribute_group bus_attr_group = {
+	.attrs = bus_default_attrs,
+};
+
+static void bus_kobj_release(struct kobject *kobj)
+{
+}
+
+static ssize_t bus_kobj_attr_show(struct kobject *kobj, struct attribute *attr,
+				  char *buf)
+{
+	struct medialb_bus *bus =
+		container_of(kobj, struct medialb_bus, kobj_group);
+	struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
+
+	if (!xattr->show)
+		return -EIO;
+
+	return xattr->show(bus, buf);
+}
+
+static ssize_t bus_kobj_attr_store(struct kobject *kobj, struct attribute *attr,
+				   const char *buf, size_t count)
+{
+	ssize_t ret;
+	struct medialb_bus *bus =
+		container_of(kobj, struct medialb_bus, kobj_group);
+	struct bus_attr *xattr = container_of(attr, struct bus_attr, attr);
+
+	if (!xattr->store)
+		return -EIO;
+
+	ret = xattr->store(bus, buf, count);
+	return ret;
+}
+
+static struct sysfs_ops const bus_kobj_sysfs_ops = {
+	.show = bus_kobj_attr_show,
+	.store = bus_kobj_attr_store,
+};
+
+static struct kobj_type bus_ktype = {
+	.release = bus_kobj_release,
+	.sysfs_ops = &bus_kobj_sysfs_ops,
+};
+
+int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj)
+{
+	int err;
+
+	kobject_init(&bus->kobj_group, &bus_ktype);
+	err = kobject_add(&bus->kobj_group, parent_kobj, "bus");
+	if (err) {
+		pr_err("kobject_add() failed: %d\n", err);
+		goto err_kobject_add;
+	}
+
+	err = sysfs_create_group(&bus->kobj_group, &bus_attr_group);
+	if (err) {
+		pr_err("sysfs_create_group() failed: %d\n", err);
+		goto err_create_group;
+	}
+
+	return 0;
+
+err_create_group:
+	kobject_put(&bus->kobj_group);
+
+err_kobject_add:
+	return err;
+}
+
+void dim2_sysfs_destroy(struct medialb_bus *bus)
+{
+	kobject_put(&bus->kobj_group);
+}
diff --git a/drivers/staging/most/hdm-dim2/dim2_sysfs.h b/drivers/staging/most/hdm-dim2/dim2_sysfs.h
new file mode 100644
index 0000000..e719691
--- /dev/null
+++ b/drivers/staging/most/hdm-dim2/dim2_sysfs.h
@@ -0,0 +1,39 @@
+/*
+ * dim2_sysfs.h - MediaLB sysfs information
+ *
+ * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
+
+#ifndef DIM2_SYSFS_H
+#define	DIM2_SYSFS_H
+
+
+#include <linux/kobject.h>
+
+
+struct medialb_bus {
+	struct kobject kobj_group;
+};
+
+struct dim2_hdm;
+
+int dim2_sysfs_probe(struct medialb_bus *bus, struct kobject *parent_kobj);
+void dim2_sysfs_destroy(struct medialb_bus *bus);
+
+/*
+ * callback,
+ * must deliver MediaLB state as true if locked or false if unlocked
+ */
+bool dim2_sysfs_get_state_cb(void);
+
+
+#endif	/* DIM2_SYSFS_H */
diff --git a/drivers/staging/most/hdm-i2c/Kconfig b/drivers/staging/most/hdm-i2c/Kconfig
new file mode 100644
index 0000000..6fd7983
--- /dev/null
+++ b/drivers/staging/most/hdm-i2c/Kconfig
@@ -0,0 +1,12 @@
+#
+# MOST I2C configuration
+#
+
+config HDM_I2C
+	tristate "I2C HDM"
+	depends on I2C
+	---help---
+	  Say Y here if you want to connect via I2C to network tranceiver.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called hdm_i2c.
diff --git a/drivers/staging/most/hdm-i2c/Makefile b/drivers/staging/most/hdm-i2c/Makefile
new file mode 100644
index 0000000..03a4a59
--- /dev/null
+++ b/drivers/staging/most/hdm-i2c/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_HDM_I2C) += hdm_i2c.o
+
+ccflags-y += -Idrivers/staging/most/mostcore/
diff --git a/drivers/staging/most/hdm-i2c/hdm_i2c.c b/drivers/staging/most/hdm-i2c/hdm_i2c.c
new file mode 100644
index 0000000..029ded3
--- /dev/null
+++ b/drivers/staging/most/hdm-i2c/hdm_i2c.c
@@ -0,0 +1,451 @@
+/*
+ * hdm_i2c.c - Hardware Dependent Module for I2C Interface
+ *
+ * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+
+#include <mostcore.h>
+
+enum { CH_RX, CH_TX, NUM_CHANNELS };
+
+#define MAX_BUFFERS_CONTROL 32
+#define MAX_BUF_SIZE_CONTROL 256
+
+/**
+ * list_first_mbo - get the first mbo from a list
+ * @ptr:	the list head to take the mbo from.
+ */
+#define list_first_mbo(ptr) \
+	list_first_entry(ptr, struct mbo, list)
+
+
+/* IRQ / Polling option */
+static bool polling_req;
+module_param(polling_req, bool, S_IRUGO);
+MODULE_PARM_DESC(polling_req, "Request Polling. Default = 0 (use irq)");
+
+/* Polling Rate */
+static int scan_rate = 100;
+module_param(scan_rate, int, 0644);
+MODULE_PARM_DESC(scan_rate, "Polling rate in times/sec. Default = 100");
+
+struct hdm_i2c {
+	bool is_open[NUM_CHANNELS];
+	bool polling_mode;
+	struct most_interface most_iface;
+	struct most_channel_capability capabilities[NUM_CHANNELS];
+	struct i2c_client *client;
+	struct rx {
+		struct delayed_work dwork;
+		wait_queue_head_t waitq;
+		struct list_head list;
+		struct mutex list_mutex;
+	} rx;
+	char name[64];
+};
+
+#define to_hdm(iface) container_of(iface, struct hdm_i2c, most_iface)
+
+/**
+ * configure_channel - called from MOST core to configure a channel
+ * @iface: interface the channel belongs to
+ * @channel: channel to be configured
+ * @channel_config: structure that holds the configuration information
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * Receives configuration information from MOST core and initialize the
+ * corresponding channel.
+ */
+static int configure_channel(struct most_interface *most_iface,
+			     int ch_idx,
+			     struct most_channel_config *channel_config)
+{
+	struct hdm_i2c *dev = to_hdm(most_iface);
+
+	BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
+	BUG_ON(dev->is_open[ch_idx]);
+
+	if (channel_config->data_type != MOST_CH_CONTROL) {
+		pr_err("bad data type for channel %d\n", ch_idx);
+		return -EPERM;
+	}
+
+	if (channel_config->direction != dev->capabilities[ch_idx].direction) {
+		pr_err("bad direction for channel %d\n", ch_idx);
+		return -EPERM;
+	}
+
+	if (channel_config->direction == MOST_CH_RX) {
+		if (dev->polling_mode)
+			schedule_delayed_work(&dev->rx.dwork,
+					      msecs_to_jiffies(MSEC_PER_SEC / 4));
+	}
+	dev->is_open[ch_idx] = true;
+
+	return 0;
+}
+
+/**
+ * enqueue - called from MOST core to enqueue a buffer for data transfer
+ * @iface: intended interface
+ * @channel: ID of the channel the buffer is intended for
+ * @mbo: pointer to the buffer object
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * Transmit the data over I2C if it is a "write" request or push the buffer into
+ * list if it is an "read" request
+ */
+static int enqueue(struct most_interface *most_iface,
+		   int ch_idx, struct mbo *mbo)
+{
+	struct hdm_i2c *dev = to_hdm(most_iface);
+	int ret;
+
+	BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
+	BUG_ON(!dev->is_open[ch_idx]);
+
+	if (ch_idx == CH_RX) {
+		/* RX */
+		mutex_lock(&dev->rx.list_mutex);
+		list_add_tail(&mbo->list, &dev->rx.list);
+		mutex_unlock(&dev->rx.list_mutex);
+		wake_up_interruptible(&dev->rx.waitq);
+	} else {
+		/* TX */
+		ret = i2c_master_send(dev->client, mbo->virt_address,
+				      mbo->buffer_length);
+		if (ret <= 0) {
+			mbo->processed_length = 0;
+			mbo->status = MBO_E_INVAL;
+		} else {
+			mbo->processed_length = mbo->buffer_length;
+			mbo->status = MBO_SUCCESS;
+		}
+		mbo->complete(mbo);
+	}
+
+	return 0;
+}
+
+/**
+ * poison_channel - called from MOST core to poison buffers of a channel
+ * @iface: pointer to the interface the channel to be poisoned belongs to
+ * @channel_id: corresponding channel ID
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * If channel direction is RX, complete the buffers in list with
+ * status MBO_E_CLOSE
+ */
+static int poison_channel(struct most_interface *most_iface,
+			  int ch_idx)
+{
+	struct hdm_i2c *dev = to_hdm(most_iface);
+	struct mbo *mbo;
+
+	BUG_ON(ch_idx < 0 || ch_idx >= NUM_CHANNELS);
+	BUG_ON(!dev->is_open[ch_idx]);
+
+	dev->is_open[ch_idx] = false;
+
+	if (ch_idx == CH_RX) {
+		mutex_lock(&dev->rx.list_mutex);
+		while (!list_empty(&dev->rx.list)) {
+			mbo = list_first_mbo(&dev->rx.list);
+			list_del(&mbo->list);
+			mutex_unlock(&dev->rx.list_mutex);
+
+			mbo->processed_length = 0;
+			mbo->status = MBO_E_CLOSE;
+			mbo->complete(mbo);
+
+			mutex_lock(&dev->rx.list_mutex);
+		}
+		mutex_unlock(&dev->rx.list_mutex);
+		wake_up_interruptible(&dev->rx.waitq);
+	}
+
+	return 0;
+}
+
+static void request_netinfo(struct most_interface *most_iface,
+			    int ch_idx)
+{
+	pr_info("request_netinfo()\n");
+}
+
+static void do_rx_work(struct hdm_i2c *dev)
+{
+	struct mbo *mbo;
+	unsigned char msg[MAX_BUF_SIZE_CONTROL];
+	int ret, ch_idx = CH_RX;
+	uint16_t pml, data_size;
+
+	/* Read PML (2 bytes) */
+	ret = i2c_master_recv(dev->client, msg, 2);
+	if (ret <= 0) {
+		pr_err("Failed to receive PML\n");
+		return;
+	}
+
+	pml = (msg[0] << 8) | msg[1];
+	if (!pml)
+		return;
+
+	data_size = pml + 2;
+
+	/* Read the whole message, including PML */
+	ret = i2c_master_recv(dev->client, msg, data_size);
+	if (ret <= 0) {
+		pr_err("Failed to receive a Port Message\n");
+		return;
+	}
+
+	for (;;) {
+		/* Conditions to wait for: poisoned channel or free buffer
+		   available for reading  */
+		if (wait_event_interruptible(dev->rx.waitq,
+					     !dev->is_open[ch_idx] ||
+					     !list_empty(&dev->rx.list))) {
+			pr_err("wait_event_interruptible() failed\n");
+			return;
+		}
+
+		if (!dev->is_open[ch_idx])
+			return;
+
+		mutex_lock(&dev->rx.list_mutex);
+
+		/* list may be empty if poison or remove is called */
+		if (!list_empty(&dev->rx.list))
+			break;
+
+		mutex_unlock(&dev->rx.list_mutex);
+	}
+
+	mbo = list_first_mbo(&dev->rx.list);
+	list_del(&mbo->list);
+	mutex_unlock(&dev->rx.list_mutex);
+
+	mbo->processed_length = min(data_size, mbo->buffer_length);
+	memcpy(mbo->virt_address, msg, mbo->processed_length);
+	mbo->status = MBO_SUCCESS;
+	mbo->complete(mbo);
+}
+
+/**
+ * pending_rx_work - Read pending messages through I2C
+ * @work: definition of this work item
+ *
+ * Invoked by the Interrupt Service Routine, most_irq_handler()
+ */
+static void pending_rx_work(struct work_struct *work)
+{
+	struct hdm_i2c *dev = container_of(work, struct hdm_i2c, rx.dwork.work);
+
+	do_rx_work(dev);
+
+	if (dev->polling_mode) {
+		if (dev->is_open[CH_RX])
+			schedule_delayed_work(&dev->rx.dwork,
+					      msecs_to_jiffies(MSEC_PER_SEC
+							       / scan_rate));
+	} else
+		enable_irq(dev->client->irq);
+}
+
+/*
+ * most_irq_handler - Interrupt Service Routine
+ * @irq: irq number
+ * @_dev: private data
+ *
+ * Schedules a delayed work
+ *
+ * By default the interrupt line behavior is Active Low. Once an interrupt is
+ * generated by the device, until driver clears the interrupt (by reading
+ * the PMP message), device keeps the interrupt line in low state. Since i2c
+ * read is done in work queue, the interrupt line must be disabled temporarily
+ * to avoid ISR being called repeatedly. Re-enable the interrupt in workqueue,
+ * after reading the message.
+ *
+ * Note: If we use the interrupt line in Falling edge mode, there is a
+ * possibility to miss interrupts when ISR is getting executed.
+ *
+ */
+static irqreturn_t most_irq_handler(int irq, void *_dev)
+{
+	struct hdm_i2c *dev = _dev;
+
+	disable_irq_nosync(irq);
+
+	schedule_delayed_work(&dev->rx.dwork, 0);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * i2c_probe - i2c probe handler
+ * @client: i2c client device structure
+ * @id: i2c client device id
+ *
+ * Return 0 on success, negative on failure.
+ *
+ * Register the i2c client device as a MOST interface
+ */
+static int i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+	struct hdm_i2c *dev;
+	int ret, i;
+	struct kobject *kobj;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	/* ID format: i2c-<bus>-<address> */
+	snprintf(dev->name, sizeof(dev->name), "i2c-%d-%04x",
+		 client->adapter->nr, client->addr);
+
+	for (i = 0; i < NUM_CHANNELS; i++) {
+		dev->is_open[i] = false;
+		dev->capabilities[i].data_type = MOST_CH_CONTROL;
+		dev->capabilities[i].num_buffers_packet = MAX_BUFFERS_CONTROL;
+		dev->capabilities[i].buffer_size_packet = MAX_BUF_SIZE_CONTROL;
+	}
+	dev->capabilities[CH_RX].direction = MOST_CH_RX;
+	dev->capabilities[CH_RX].name_suffix = "rx";
+	dev->capabilities[CH_TX].direction = MOST_CH_TX;
+	dev->capabilities[CH_TX].name_suffix = "tx";
+
+	dev->most_iface.interface = ITYPE_I2C;
+	dev->most_iface.description = dev->name;
+	dev->most_iface.num_channels = NUM_CHANNELS;
+	dev->most_iface.channel_vector = dev->capabilities;
+	dev->most_iface.configure = configure_channel;
+	dev->most_iface.enqueue = enqueue;
+	dev->most_iface.poison_channel = poison_channel;
+	dev->most_iface.request_netinfo = request_netinfo;
+
+	INIT_LIST_HEAD(&dev->rx.list);
+	mutex_init(&dev->rx.list_mutex);
+	init_waitqueue_head(&dev->rx.waitq);
+
+	INIT_DELAYED_WORK(&dev->rx.dwork, pending_rx_work);
+
+	dev->client = client;
+	i2c_set_clientdata(client, dev);
+
+	kobj = most_register_interface(&dev->most_iface);
+	if (IS_ERR(kobj)) {
+		pr_err("Failed to register i2c as a MOST interface\n");
+		kfree(dev);
+		return PTR_ERR(kobj);
+	}
+
+	dev->polling_mode = polling_req || client->irq <= 0;
+	if (!dev->polling_mode) {
+		pr_info("Requesting IRQ: %d\n", client->irq);
+		ret = request_irq(client->irq, most_irq_handler, IRQF_SHARED,
+				  client->name, dev);
+		if (ret) {
+			pr_info("IRQ request failed: %d, "
+				"falling back to polling\n", ret);
+			dev->polling_mode = true;
+		}
+	}
+
+	if (dev->polling_mode)
+		pr_info("Using polling at rate: %d times/sec\n", scan_rate);
+
+	return 0;
+}
+
+/*
+ * i2c_remove - i2c remove handler
+ * @client: i2c client device structure
+ *
+ * Return 0 on success.
+ *
+ * Unregister the i2c client device as a MOST interface
+ */
+static int i2c_remove(struct i2c_client *client)
+{
+	struct hdm_i2c *dev = i2c_get_clientdata(client);
+	int i;
+
+	if (!dev->polling_mode)
+		free_irq(client->irq, dev);
+
+	most_deregister_interface(&dev->most_iface);
+
+	for (i = 0 ; i < NUM_CHANNELS; i++)
+		if (dev->is_open[i])
+			poison_channel(&dev->most_iface, i);
+	cancel_delayed_work_sync(&dev->rx.dwork);
+	kfree(dev);
+
+	return 0;
+}
+
+static const struct i2c_device_id i2c_id[] = {
+	{ "most_i2c", 0 },
+	{ }, /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(i2c, i2c_id);
+
+static struct i2c_driver i2c_driver = {
+	.driver = {
+		.name = "hdm_i2c",
+		.owner = THIS_MODULE,
+	},
+	.probe = i2c_probe,
+	.remove = i2c_remove,
+	.id_table = i2c_id,
+};
+
+/**
+ * hdm_i2c_init - Driver Registration Routine
+ */
+static int __init hdm_i2c_init(void)
+{
+	pr_info("hdm_i2c_init()\n");
+
+	return i2c_add_driver(&i2c_driver);
+}
+
+/**
+ * hdm_i2c_exit - Driver Cleanup Routine
+ **/
+static void __exit hdm_i2c_exit(void)
+{
+	i2c_del_driver(&i2c_driver);
+	pr_info("hdm_i2c_exit()\n");
+}
+
+module_init(hdm_i2c_init);
+module_exit(hdm_i2c_exit);
+
+MODULE_AUTHOR("Jain Roy Ambi <JainRoy.Ambi@microchip.com>");
+MODULE_AUTHOR("Andrey Shvetsov <andrey.shvetsov@k2l.de>");
+MODULE_DESCRIPTION("I2C Hardware Dependent Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/most/hdm-usb/Kconfig b/drivers/staging/most/hdm-usb/Kconfig
new file mode 100644
index 0000000..a482c3f
--- /dev/null
+++ b/drivers/staging/most/hdm-usb/Kconfig
@@ -0,0 +1,14 @@
+#
+# MOST USB configuration
+#
+
+config HDM_USB
+	tristate "USB HDM"
+	depends on USB
+	select AIM_NETWORK
+	---help---
+	  Say Y here if you want to connect via USB to network tranceiver.
+	  This device driver depends on the networking AIM.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called hdm_usb.
diff --git a/drivers/staging/most/hdm-usb/Makefile b/drivers/staging/most/hdm-usb/Makefile
new file mode 100644
index 0000000..6bbacb4
--- /dev/null
+++ b/drivers/staging/most/hdm-usb/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_HDM_USB) += hdm_usb.o
+
+ccflags-y += -Idrivers/staging/most/mostcore/
+ccflags-y += -Idrivers/staging/most/aim-network/
diff --git a/drivers/staging/most/hdm-usb/hdm_usb.c b/drivers/staging/most/hdm-usb/hdm_usb.c
new file mode 100644
index 0000000..305303f
--- /dev/null
+++ b/drivers/staging/most/hdm-usb/hdm_usb.c
@@ -0,0 +1,1454 @@
+/*
+ * hdm_usb.c - Hardware dependent module for USB
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/usb.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/uaccess.h>
+#include "mostcore.h"
+#include "networking.h"
+
+#define USB_MTU			512
+#define NO_ISOCHRONOUS_URB	0
+#define AV_PACKETS_PER_XACT	2
+#define BUF_CHAIN_SIZE		0xFFFF
+#define MAX_NUM_ENDPOINTS	30
+#define MAX_SUFFIX_LEN		10
+#define MAX_STRING_LEN		80
+#define MAX_BUF_SIZE		0xFFFF
+#define CEILING(x, y)		(((x) + (y) - 1) / (y))
+
+#define USB_VENDOR_ID_SMSC	0x0424  /* VID: SMSC */
+#define USB_DEV_ID_BRDG		0xC001  /* PID: USB Bridge */
+#define USB_DEV_ID_INIC		0xCF18  /* PID: USB INIC */
+#define HW_RESYNC		0x0000
+/* DRCI Addresses */
+#define DRCI_REG_NI_STATE	0x0100
+#define DRCI_REG_PACKET_BW	0x0101
+#define DRCI_REG_NODE_ADDR	0x0102
+#define DRCI_REG_NODE_POS	0x0103
+#define DRCI_REG_MEP_FILTER	0x0140
+#define DRCI_REG_HASH_TBL0	0x0141
+#define DRCI_REG_HASH_TBL1	0x0142
+#define DRCI_REG_HASH_TBL2	0x0143
+#define DRCI_REG_HASH_TBL3	0x0144
+#define DRCI_REG_HW_ADDR_HI	0x0145
+#define DRCI_REG_HW_ADDR_MI	0x0146
+#define DRCI_REG_HW_ADDR_LO	0x0147
+#define DRCI_READ_REQ		0xA0
+#define DRCI_WRITE_REQ		0xA1
+
+/**
+ * struct buf_anchor - used to create a list of pending URBs
+ * @urb: pointer to USB request block
+ * @clear_work_obj:
+ * @list: linked list
+ * @urb_completion:
+ */
+struct buf_anchor {
+	struct urb *urb;
+	struct work_struct clear_work_obj;
+	struct list_head list;
+	struct completion urb_compl;
+};
+#define to_buf_anchor(w) container_of(w, struct buf_anchor, clear_work_obj)
+
+/**
+ * struct most_dci_obj - Direct Communication Interface
+ * @kobj:position in sysfs
+ * @usb_device: pointer to the usb device
+ */
+struct most_dci_obj {
+	struct kobject kobj;
+	struct usb_device *usb_device;
+};
+#define to_dci_obj(p) container_of(p, struct most_dci_obj, kobj)
+
+/**
+ * struct most_dev - holds all usb interface specific stuff
+ * @parent: parent object in sysfs
+ * @usb_device: pointer to usb device
+ * @iface: hardware interface
+ * @cap: channel capabilities
+ * @conf: channel configuration
+ * @dci: direct communication interface of hardware
+ * @hw_addr: MAC address of hardware
+ * @ep_address: endpoint address table
+ * @link_stat: link status of hardware
+ * @description: device description
+ * @suffix: suffix for channel name
+ * @anchor_list_lock: locks list access
+ * @padding_active: indicates channel uses padding
+ * @is_channel_healthy: health status table of each channel
+ * @anchor_list: list of anchored items
+ * @io_mutex: synchronize I/O with disconnect
+ * @link_stat_timer: timer for link status reports
+ * @poll_work_obj: work for polling link status
+ */
+struct most_dev {
+	struct kobject *parent;
+	struct usb_device *usb_device;
+	struct most_interface iface;
+	struct most_channel_capability *cap;
+	struct most_channel_config *conf;
+	struct most_dci_obj *dci;
+	u8 hw_addr[6];
+	u8 *ep_address;
+	u16 link_stat;
+	char description[MAX_STRING_LEN];
+	char suffix[MAX_NUM_ENDPOINTS][MAX_SUFFIX_LEN];
+	spinlock_t anchor_list_lock[MAX_NUM_ENDPOINTS];
+	bool padding_active[MAX_NUM_ENDPOINTS];
+	bool is_channel_healthy[MAX_NUM_ENDPOINTS];
+	struct list_head *anchor_list;
+	struct mutex io_mutex;
+	struct timer_list link_stat_timer;
+	struct work_struct poll_work_obj;
+};
+#define to_mdev(d) container_of(d, struct most_dev, iface)
+#define to_mdev_from_work(w) container_of(w, struct most_dev, poll_work_obj)
+
+static struct workqueue_struct *schedule_usb_work;
+static void wq_clear_halt(struct work_struct *wq_obj);
+static void wq_netinfo(struct work_struct *wq_obj);
+
+/**
+ * trigger_resync_vr - Vendor request to trigger HW re-sync mechanism
+ * @dev: usb device
+ *
+ */
+static void trigger_resync_vr(struct usb_device *dev)
+{
+	int retval;
+	u8 request_type = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT;
+	int *data = kzalloc(sizeof(*data), GFP_KERNEL);
+
+	if (!data)
+		goto error;
+	*data = HW_RESYNC;
+	retval = usb_control_msg(dev,
+				 usb_sndctrlpipe(dev, 0),
+				 0,
+				 request_type,
+				 0,
+				 0,
+				 data,
+				 0,
+				 5 * HZ);
+	kfree(data);
+	if (retval >= 0)
+		return;
+error:
+	dev_err(&dev->dev, "Vendor request \"stall\" failed\n");
+}
+
+/**
+ * drci_rd_reg - read a DCI register
+ * @dev: usb device
+ * @reg: register address
+ * @buf: buffer to store data
+ *
+ * This is reads data from INIC's direct register communication interface
+ */
+static inline int drci_rd_reg(struct usb_device *dev, u16 reg, void *buf)
+{
+	return usb_control_msg(dev,
+			       usb_rcvctrlpipe(dev, 0),
+			       DRCI_READ_REQ,
+			       USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			       0x0000,
+			       reg,
+			       buf,
+			       2,
+			       5 * HZ);
+}
+
+/**
+ * drci_wr_reg - write a DCI register
+ * @dev: usb device
+ * @reg: register address
+ * @data: data to write
+ *
+ * This is writes data to INIC's direct register communication interface
+ */
+static inline int drci_wr_reg(struct usb_device *dev, u16 reg, u16 data)
+{
+	return usb_control_msg(dev,
+			       usb_sndctrlpipe(dev, 0),
+			       DRCI_WRITE_REQ,
+			       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+			       data,
+			       reg,
+			       NULL,
+			       0,
+			       5 * HZ);
+}
+
+/**
+ * free_anchored_buffers - free device's anchored items
+ * @mdev: the device
+ * @channel: channel ID
+ */
+static void free_anchored_buffers(struct most_dev *mdev, unsigned int channel)
+{
+	struct mbo *mbo;
+	struct buf_anchor *anchor, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+	list_for_each_entry_safe(anchor, tmp, &mdev->anchor_list[channel], list) {
+		struct urb *urb = anchor->urb;
+
+		spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+		if (likely(urb)) {
+			mbo = urb->context;
+			if (!irqs_disabled()) {
+				usb_kill_urb(urb);
+			} else {
+				usb_unlink_urb(urb);
+				wait_for_completion(&anchor->urb_compl);
+			}
+			if ((mbo) && (mbo->complete)) {
+				mbo->status = MBO_E_CLOSE;
+				mbo->processed_length = 0;
+				mbo->complete(mbo);
+			}
+			usb_free_urb(urb);
+		}
+		spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+		list_del(&anchor->list);
+		kfree(anchor);
+	}
+	spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+}
+
+/**
+ * get_stream_frame_size - calculate frame size of current configuration
+ * @cfg: channel configuration
+ */
+static unsigned int get_stream_frame_size(struct most_channel_config *cfg)
+{
+	unsigned int frame_size = 0;
+	unsigned int sub_size = cfg->subbuffer_size;
+
+	if (!sub_size) {
+		pr_warn("Misconfig: Subbuffer size zero.\n");
+		return frame_size;
+	}
+	switch (cfg->data_type) {
+	case MOST_CH_ISOC_AVP:
+		frame_size = AV_PACKETS_PER_XACT * sub_size;
+		break;
+	case MOST_CH_SYNC:
+		if (cfg->packets_per_xact == 0) {
+			pr_warn("Misconfig: Packets per XACT zero\n");
+			frame_size = 0;
+		} else if (cfg->packets_per_xact == 0xFF)
+			frame_size = (USB_MTU / sub_size) * sub_size;
+		else
+			frame_size = cfg->packets_per_xact * sub_size;
+		break;
+	default:
+		pr_warn("Query frame size of non-streaming channel\n");
+		break;
+	}
+	return frame_size;
+}
+
+/**
+ * hdm_poison_channel - mark buffers of this channel as invalid
+ * @iface: pointer to the interface
+ * @channel: channel ID
+ *
+ * This unlinks all URBs submitted to the HCD,
+ * calls the associated completion function of the core and removes
+ * them from the list.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+static int hdm_poison_channel(struct most_interface *iface, int channel)
+{
+	struct most_dev *mdev;
+
+	mdev = to_mdev(iface);
+	if (unlikely(!iface)) {
+		dev_warn(&mdev->usb_device->dev, "Poison: Bad interface.\n");
+		return -EIO;
+	}
+	if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
+		dev_warn(&mdev->usb_device->dev, "Channel ID out of range.\n");
+		return -ECHRNG;
+	}
+
+	mdev->is_channel_healthy[channel] = false;
+
+	mutex_lock(&mdev->io_mutex);
+	free_anchored_buffers(mdev, channel);
+	if (mdev->padding_active[channel] == true)
+		mdev->padding_active[channel] = false;
+
+	if (mdev->conf[channel].data_type == MOST_CH_ASYNC) {
+		del_timer_sync(&mdev->link_stat_timer);
+		cancel_work_sync(&mdev->poll_work_obj);
+	}
+	mutex_unlock(&mdev->io_mutex);
+	return 0;
+}
+
+/**
+ * hdm_add_padding - add padding bytes
+ * @mdev: most device
+ * @channel: channel ID
+ * @mbo: buffer object
+ *
+ * This inserts the INIC hardware specific padding bytes into a streaming
+ * channel's buffer
+ */
+static int hdm_add_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
+{
+	struct most_channel_config *conf = &mdev->conf[channel];
+	unsigned int j, num_frames, frame_size;
+	u16 rd_addr, wr_addr;
+
+	frame_size = get_stream_frame_size(conf);
+	if (!frame_size)
+		return -EIO;
+	num_frames = mbo->buffer_length / frame_size;
+
+	if (num_frames < 1) {
+		dev_err(&mdev->usb_device->dev,
+			"Missed minimal transfer unit.\n");
+		return -EIO;
+	}
+
+	for (j = 1; j < num_frames; j++) {
+		wr_addr = (num_frames - j) * USB_MTU;
+		rd_addr = (num_frames - j) * frame_size;
+		memmove(mbo->virt_address + wr_addr,
+			mbo->virt_address + rd_addr,
+			frame_size);
+	}
+	mbo->buffer_length = num_frames * USB_MTU;
+	return 0;
+}
+
+/**
+ * hdm_remove_padding - remove padding bytes
+ * @mdev: most device
+ * @channel: channel ID
+ * @mbo: buffer object
+ *
+ * This takes the INIC hardware specific padding bytes off a streaming
+ * channel's buffer.
+ */
+static int hdm_remove_padding(struct most_dev *mdev, int channel, struct mbo *mbo)
+{
+	unsigned int j, num_frames, frame_size;
+	struct most_channel_config *const conf = &mdev->conf[channel];
+
+	frame_size = get_stream_frame_size(conf);
+	if (!frame_size)
+		return -EIO;
+	num_frames = mbo->processed_length / USB_MTU;
+
+	for (j = 1; j < num_frames; j++)
+		memmove(mbo->virt_address + frame_size * j,
+			mbo->virt_address + USB_MTU * j,
+			frame_size);
+
+	mbo->processed_length = frame_size * num_frames;
+	return 0;
+}
+
+/**
+ * hdm_write_completion - completion function for submitted Tx URBs
+ * @urb: the URB that has been completed
+ *
+ * This checks the status of the completed URB. In case the URB has been
+ * unlinked before, it is immediately freed. On any other error the MBO
+ * transfer flag is set. On success it frees allocated resources and calls
+ * the completion function.
+ *
+ * Context: interrupt!
+ */
+static void hdm_write_completion(struct urb *urb)
+{
+	struct mbo *mbo;
+	struct buf_anchor *anchor;
+	struct most_dev *mdev;
+	struct device *dev;
+	unsigned int channel;
+	unsigned long flags;
+
+	mbo = urb->context;
+	anchor = mbo->priv;
+	mdev = to_mdev(mbo->ifp);
+	channel = mbo->hdm_channel_id;
+	dev = &mdev->usb_device->dev;
+
+	if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
+	    (mdev->is_channel_healthy[channel] == false)) {
+		complete(&anchor->urb_compl);
+		return;
+	}
+
+	if (unlikely(urb->status && !(urb->status == -ENOENT ||
+				      urb->status == -ECONNRESET ||
+				      urb->status == -ESHUTDOWN))) {
+		mbo->processed_length = 0;
+		switch (urb->status) {
+		case -EPIPE:
+			dev_warn(dev, "Broken OUT pipe detected\n");
+			most_stop_enqueue(&mdev->iface, channel);
+			mbo->status = MBO_E_INVAL;
+			usb_unlink_urb(urb);
+			INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
+			queue_work(schedule_usb_work, &anchor->clear_work_obj);
+			return;
+		case -ENODEV:
+		case -EPROTO:
+			mbo->status = MBO_E_CLOSE;
+			break;
+		default:
+			mbo->status = MBO_E_INVAL;
+			break;
+		}
+	} else {
+		mbo->status = MBO_SUCCESS;
+		mbo->processed_length = urb->actual_length;
+	}
+
+	spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+	list_del(&anchor->list);
+	spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+	kfree(anchor);
+
+	if (likely(mbo->complete))
+		mbo->complete(mbo);
+	usb_free_urb(urb);
+}
+
+/**
+ * hdm_read_completion - completion funciton for submitted Rx URBs
+ * @urb: the URB that has been completed
+ *
+ * This checks the status of the completed URB. In case the URB has been
+ * unlinked before it is immediately freed. On any other error the MBO transfer
+ * flag is set. On success it frees allocated resources, removes
+ * padding bytes -if necessary- and calls the completion function.
+ *
+ * Context: interrupt!
+ *
+ * **************************************************************************
+ *                   Error codes returned by in urb->status
+ *                   or in iso_frame_desc[n].status (for ISO)
+ * *************************************************************************
+ *
+ * USB device drivers may only test urb status values in completion handlers.
+ * This is because otherwise there would be a race between HCDs updating
+ * these values on one CPU, and device drivers testing them on another CPU.
+ *
+ * A transfer's actual_length may be positive even when an error has been
+ * reported.  That's because transfers often involve several packets, so that
+ * one or more packets could finish before an error stops further endpoint I/O.
+ *
+ * For isochronous URBs, the urb status value is non-zero only if the URB is
+ * unlinked, the device is removed, the host controller is disabled or the total
+ * transferred length is less than the requested length and the URB_SHORT_NOT_OK
+ * flag is set.  Completion handlers for isochronous URBs should only see
+ * urb->status set to zero, -ENOENT, -ECONNRESET, -ESHUTDOWN, or -EREMOTEIO.
+ * Individual frame descriptor status fields may report more status codes.
+ *
+ *
+ * 0			Transfer completed successfully
+ *
+ * -ENOENT		URB was synchronously unlinked by usb_unlink_urb
+ *
+ * -EINPROGRESS		URB still pending, no results yet
+ *			(That is, if drivers see this it's a bug.)
+ *
+ * -EPROTO (*, **)	a) bitstuff error
+ *			b) no response packet received within the
+ *			   prescribed bus turn-around time
+ *			c) unknown USB error
+ *
+ * -EILSEQ (*, **)	a) CRC mismatch
+ *			b) no response packet received within the
+ *			   prescribed bus turn-around time
+ *			c) unknown USB error
+ *
+ *			Note that often the controller hardware does not
+ *			distinguish among cases a), b), and c), so a
+ *			driver cannot tell whether there was a protocol
+ *			error, a failure to respond (often caused by
+ *			device disconnect), or some other fault.
+ *
+ * -ETIME (**)		No response packet received within the prescribed
+ *			bus turn-around time.  This error may instead be
+ *			reported as -EPROTO or -EILSEQ.
+ *
+ * -ETIMEDOUT		Synchronous USB message functions use this code
+ *			to indicate timeout expired before the transfer
+ *			completed, and no other error was reported by HC.
+ *
+ * -EPIPE (**)		Endpoint stalled.  For non-control endpoints,
+ *			reset this status with usb_clear_halt().
+ *
+ * -ECOMM		During an IN transfer, the host controller
+ *			received data from an endpoint faster than it
+ *			could be written to system memory
+ *
+ * -ENOSR		During an OUT transfer, the host controller
+ *			could not retrieve data from system memory fast
+ *			enough to keep up with the USB data rate
+ *
+ * -EOVERFLOW (*)	The amount of data returned by the endpoint was
+ *			greater than either the max packet size of the
+ *			endpoint or the remaining buffer size.  "Babble".
+ *
+ * -EREMOTEIO		The data read from the endpoint did not fill the
+ *			specified buffer, and URB_SHORT_NOT_OK was set in
+ *			urb->transfer_flags.
+ *
+ * -ENODEV		Device was removed.  Often preceded by a burst of
+ *			other errors, since the hub driver doesn't detect
+ *			device removal events immediately.
+ *
+ * -EXDEV		ISO transfer only partially completed
+ *			(only set in iso_frame_desc[n].status, not urb->status)
+ *
+ * -EINVAL		ISO madness, if this happens: Log off and go home
+ *
+ * -ECONNRESET		URB was asynchronously unlinked by usb_unlink_urb
+ *
+ * -ESHUTDOWN		The device or host controller has been disabled due
+ *			to some problem that could not be worked around,
+ *			such as a physical disconnect.
+ *
+ *
+ * (*) Error codes like -EPROTO, -EILSEQ and -EOVERFLOW normally indicate
+ * hardware problems such as bad devices (including firmware) or cables.
+ *
+ * (**) This is also one of several codes that different kinds of host
+ * controller use to indicate a transfer has failed because of device
+ * disconnect.  In the interval before the hub driver starts disconnect
+ * processing, devices may receive such fault reports for every request.
+ *
+ * See <https://www.kernel.org/doc/Documentation/usb/error-codes.txt>
+ */
+static void hdm_read_completion(struct urb *urb)
+{
+	struct mbo *mbo;
+	struct buf_anchor *anchor;
+	struct most_dev *mdev;
+	struct device *dev;
+	unsigned long flags;
+	unsigned int channel;
+	struct most_channel_config *conf;
+
+	mbo = urb->context;
+	anchor = mbo->priv;
+	mdev = to_mdev(mbo->ifp);
+	channel = mbo->hdm_channel_id;
+	dev = &mdev->usb_device->dev;
+
+	if ((urb->status == -ENOENT) || (urb->status == -ECONNRESET) ||
+	    (mdev->is_channel_healthy[channel] == false)) {
+		complete(&anchor->urb_compl);
+		return;
+	}
+
+	conf = &mdev->conf[channel];
+
+	if (unlikely(urb->status && !(urb->status == -ENOENT ||
+				      urb->status == -ECONNRESET ||
+				      urb->status == -ESHUTDOWN))) {
+		mbo->processed_length = 0;
+		switch (urb->status) {
+		case -EPIPE:
+			dev_warn(dev, "Broken IN pipe detected\n");
+			mbo->status = MBO_E_INVAL;
+			usb_unlink_urb(urb);
+			INIT_WORK(&anchor->clear_work_obj, wq_clear_halt);
+			queue_work(schedule_usb_work, &anchor->clear_work_obj);
+			return;
+		case -ENODEV:
+		case -EPROTO:
+			mbo->status = MBO_E_CLOSE;
+			break;
+		case -EOVERFLOW:
+			dev_warn(dev, "Babble on IN pipe detected\n");
+		default:
+			mbo->status = MBO_E_INVAL;
+			break;
+		}
+	} else {
+		mbo->processed_length = urb->actual_length;
+		if (mdev->padding_active[channel] == false) {
+			mbo->status = MBO_SUCCESS;
+		} else {
+			if (hdm_remove_padding(mdev, channel, mbo)) {
+				mbo->processed_length = 0;
+				mbo->status = MBO_E_INVAL;
+			} else {
+				mbo->status = MBO_SUCCESS;
+			}
+		}
+	}
+	spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+	list_del(&anchor->list);
+	spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+	kfree(anchor);
+
+	if (likely(mbo->complete))
+		mbo->complete(mbo);
+	usb_free_urb(urb);
+}
+
+/**
+ * hdm_enqueue - receive a buffer to be used for data transfer
+ * @iface: interface to enqueue to
+ * @channel: ID of the channel
+ * @mbo: pointer to the buffer object
+ *
+ * This allocates a new URB and fills it according to the channel
+ * that is being used for transmission of data. Before the URB is
+ * submitted it is stored in the private anchor list.
+ *
+ * Returns 0 on success. On any error the URB is freed and a error code
+ * is returned.
+ *
+ * Context: Could in _some_ cases be interrupt!
+ */
+static int hdm_enqueue(struct most_interface *iface, int channel, struct mbo *mbo)
+{
+	struct most_dev *mdev;
+	struct buf_anchor *anchor;
+	struct most_channel_config *conf;
+	struct device *dev;
+	int retval = 0;
+	struct urb *urb;
+	unsigned long flags;
+	unsigned long length;
+	void *virt_address;
+
+	if (unlikely(!iface || !mbo))
+		return -EIO;
+	if (unlikely(iface->num_channels <= channel) || (channel < 0))
+		return -ECHRNG;
+
+	mdev = to_mdev(iface);
+	conf = &mdev->conf[channel];
+	dev = &mdev->usb_device->dev;
+
+	if (!mdev->usb_device)
+		return -ENODEV;
+
+	urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
+	if (!urb) {
+		dev_err(dev, "Failed to allocate URB\n");
+		return -ENOMEM;
+	}
+
+	anchor = kzalloc(sizeof(*anchor), GFP_ATOMIC);
+	if (!anchor) {
+		retval = -ENOMEM;
+		goto _error;
+	}
+
+	anchor->urb = urb;
+	init_completion(&anchor->urb_compl);
+	mbo->priv = anchor;
+
+	spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+	list_add_tail(&anchor->list, &mdev->anchor_list[channel]);
+	spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+
+	if ((mdev->padding_active[channel] == true) &&
+	    (conf->direction & MOST_CH_TX))
+		if (hdm_add_padding(mdev, channel, mbo)) {
+			retval = -EIO;
+			goto _error_1;
+		}
+
+	urb->transfer_dma = mbo->bus_address;
+	virt_address = mbo->virt_address;
+	length = mbo->buffer_length;
+
+	if (conf->direction & MOST_CH_TX) {
+		usb_fill_bulk_urb(urb, mdev->usb_device,
+				  usb_sndbulkpipe(mdev->usb_device,
+						  mdev->ep_address[channel]),
+				  virt_address,
+				  length,
+				  hdm_write_completion,
+				  mbo);
+		if (conf->data_type != MOST_CH_ISOC_AVP)
+			urb->transfer_flags |= URB_ZERO_PACKET;
+	} else {
+		usb_fill_bulk_urb(urb, mdev->usb_device,
+				  usb_rcvbulkpipe(mdev->usb_device,
+						  mdev->ep_address[channel]),
+				  virt_address,
+				  length,
+				  hdm_read_completion,
+				  mbo);
+	}
+	urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+	retval = usb_submit_urb(urb, GFP_KERNEL);
+	if (retval) {
+		dev_err(dev, "URB submit failed with error %d.\n", retval);
+		goto _error_1;
+	}
+	return 0;
+
+_error_1:
+	spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+	list_del(&anchor->list);
+	spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+	kfree(anchor);
+_error:
+	usb_free_urb(urb);
+	return retval;
+}
+
+/**
+ * hdm_configure_channel - receive channel configuration from core
+ * @iface: interface
+ * @channel: channel ID
+ * @conf: structure that holds the configuration information
+ */
+static int hdm_configure_channel(struct most_interface *iface, int channel,
+				 struct most_channel_config *conf)
+{
+	unsigned int num_frames;
+	unsigned int frame_size;
+	unsigned int temp_size;
+	unsigned int tail_space;
+	struct most_dev *mdev;
+	struct device *dev;
+
+	mdev = to_mdev(iface);
+	mdev->is_channel_healthy[channel] = true;
+	dev = &mdev->usb_device->dev;
+
+	if (unlikely(!iface || !conf)) {
+		dev_err(dev, "Bad interface or config pointer.\n");
+		return -EINVAL;
+	}
+	if (unlikely((channel < 0) || (channel >= iface->num_channels))) {
+		dev_err(dev, "Channel ID out of range.\n");
+		return -EINVAL;
+	}
+	if ((!conf->num_buffers) || (!conf->buffer_size)) {
+		dev_err(dev, "Misconfig: buffer size or #buffers zero.\n");
+		return -EINVAL;
+	}
+
+	if (!(conf->data_type == MOST_CH_SYNC) &&
+	    !((conf->data_type == MOST_CH_ISOC_AVP) &&
+	      (conf->packets_per_xact != 0xFF))) {
+		mdev->padding_active[channel] = false;
+		goto exit;
+	}
+
+	mdev->padding_active[channel] = true;
+	temp_size = conf->buffer_size;
+
+	if ((conf->data_type != MOST_CH_SYNC) &&
+	    (conf->data_type != MOST_CH_ISOC_AVP)) {
+		dev_warn(dev, "Unsupported data type\n");
+		return -EINVAL;
+	}
+
+	frame_size = get_stream_frame_size(conf);
+	if ((frame_size == 0) || (frame_size > USB_MTU)) {
+		dev_warn(dev, "Misconfig: frame size wrong\n");
+		return -EINVAL;
+	}
+
+	if (conf->buffer_size % frame_size) {
+		u16 tmp_val;
+
+		tmp_val = conf->buffer_size / frame_size;
+		conf->buffer_size = tmp_val * frame_size;
+		dev_notice(dev,
+			   "Channel %d - rouding buffer size to %d bytes, "
+			   "channel config says %d bytes\n",
+			   channel,
+			   conf->buffer_size,
+			   temp_size);
+	}
+
+	num_frames = conf->buffer_size / frame_size;
+	tail_space = num_frames * (USB_MTU - frame_size);
+	temp_size += tail_space;
+
+	/* calculate extra length to comply w/ HW padding */
+	conf->extra_len = (CEILING(temp_size, USB_MTU) * USB_MTU)
+			  - conf->buffer_size;
+exit:
+	mdev->conf[channel] = *conf;
+	return 0;
+}
+
+/**
+ * hdm_update_netinfo - retrieve latest networking information
+ * @mdev: device interface
+ *
+ * This triggers the USB vendor requests to read the hardware address and
+ * the current link status of the attached device.
+ */
+static int hdm_update_netinfo(struct most_dev *mdev)
+{
+	struct device *dev = &mdev->usb_device->dev;
+	int i;
+	u16 link;
+	u8 addr[6];
+
+	if (!is_valid_ether_addr(mdev->hw_addr)) {
+		if (0 > drci_rd_reg(mdev->usb_device,
+				    DRCI_REG_HW_ADDR_HI, addr)) {
+			dev_err(dev, "Vendor request \"hw_addr_hi\" failed\n");
+			return -1;
+		}
+		if (0 > drci_rd_reg(mdev->usb_device,
+				    DRCI_REG_HW_ADDR_MI, addr + 2)) {
+			dev_err(dev, "Vendor request \"hw_addr_mid\" failed\n");
+			return -1;
+		}
+		if (0 > drci_rd_reg(mdev->usb_device,
+				    DRCI_REG_HW_ADDR_LO, addr + 4)) {
+			dev_err(dev, "Vendor request \"hw_addr_low\" failed\n");
+			return -1;
+		}
+		mutex_lock(&mdev->io_mutex);
+		for (i = 0; i < 6; i++)
+			mdev->hw_addr[i] = addr[i];
+		mutex_unlock(&mdev->io_mutex);
+
+	}
+	if (0 > drci_rd_reg(mdev->usb_device, DRCI_REG_NI_STATE, &link)) {
+		dev_err(dev, "Vendor request \"link status\" failed\n");
+		return -1;
+	}
+	le16_to_cpus(&link);
+	mutex_lock(&mdev->io_mutex);
+	mdev->link_stat = link;
+	mutex_unlock(&mdev->io_mutex);
+	return 0;
+}
+
+/**
+ * hdm_request_netinfo - request network information
+ * @iface: pointer to interface
+ * @channel: channel ID
+ *
+ * This is used as trigger to set up the link status timer that
+ * polls for the NI state of the INIC every 2 seconds.
+ *
+ */
+static void hdm_request_netinfo(struct most_interface *iface, int channel)
+{
+	struct most_dev *mdev;
+
+	BUG_ON(!iface);
+	mdev = to_mdev(iface);
+	mdev->link_stat_timer.expires = jiffies + HZ;
+	mod_timer(&mdev->link_stat_timer, mdev->link_stat_timer.expires);
+}
+
+/**
+ * link_stat_timer_handler - add work to link_stat work queue
+ * @data: pointer to USB device instance
+ *
+ * The handler runs in interrupt context. That's why we need to defer the
+ * tasks to a work queue.
+ */
+static void link_stat_timer_handler(unsigned long data)
+{
+	struct most_dev *mdev = (struct most_dev *)data;
+
+	queue_work(schedule_usb_work, &mdev->poll_work_obj);
+	mdev->link_stat_timer.expires = jiffies + (2 * HZ);
+	add_timer(&mdev->link_stat_timer);
+}
+
+/**
+ * wq_netinfo - work queue function
+ * @wq_obj: object that holds data for our deferred work to do
+ *
+ * This retrieves the network interface status of the USB INIC
+ * and compares it with the current status. If the status has
+ * changed, it updates the status of the core.
+ */
+static void wq_netinfo(struct work_struct *wq_obj)
+{
+	struct most_dev *mdev;
+	int i, prev_link_stat;
+	u8 prev_hw_addr[6];
+
+	mdev = to_mdev_from_work(wq_obj);
+	prev_link_stat = mdev->link_stat;
+
+	for (i = 0; i < 6; i++)
+		prev_hw_addr[i] = mdev->hw_addr[i];
+
+	if (0 > hdm_update_netinfo(mdev))
+		return;
+	if ((prev_link_stat != mdev->link_stat) ||
+	    (prev_hw_addr[0] != mdev->hw_addr[0]) ||
+	    (prev_hw_addr[1] != mdev->hw_addr[1]) ||
+	    (prev_hw_addr[2] != mdev->hw_addr[2]) ||
+	    (prev_hw_addr[3] != mdev->hw_addr[3]) ||
+	    (prev_hw_addr[4] != mdev->hw_addr[4]) ||
+	    (prev_hw_addr[5] != mdev->hw_addr[5]))
+		most_deliver_netinfo(&mdev->iface, mdev->link_stat,
+				     &mdev->hw_addr[0]);
+}
+
+/**
+ * wq_clear_halt - work queue function
+ * @wq_obj: work_struct object to execute
+ *
+ * This sends a clear_halt to the given USB pipe.
+ */
+static void wq_clear_halt(struct work_struct *wq_obj)
+{
+	struct buf_anchor *anchor;
+	struct most_dev *mdev;
+	struct mbo *mbo;
+	struct urb *urb;
+	unsigned int channel;
+	unsigned long flags;
+
+	anchor = to_buf_anchor(wq_obj);
+	urb = anchor->urb;
+	mbo = urb->context;
+	mdev = to_mdev(mbo->ifp);
+	channel = mbo->hdm_channel_id;
+
+	if (usb_clear_halt(urb->dev, urb->pipe))
+		dev_warn(&mdev->usb_device->dev, "Failed to reset endpoint.\n");
+
+	usb_free_urb(urb);
+	spin_lock_irqsave(&mdev->anchor_list_lock[channel], flags);
+	list_del(&anchor->list);
+	spin_unlock_irqrestore(&mdev->anchor_list_lock[channel], flags);
+
+	if (likely(mbo->complete))
+		mbo->complete(mbo);
+	if (mdev->conf[channel].direction & MOST_CH_TX)
+		most_resume_enqueue(&mdev->iface, channel);
+
+	kfree(anchor);
+}
+
+/**
+ * hdm_usb_fops - file operation table for USB driver
+ */
+static const struct file_operations hdm_usb_fops = {
+	.owner = THIS_MODULE,
+};
+
+/**
+ * usb_device_id - ID table for HCD device probing
+ */
+static struct usb_device_id usbid[] = {
+	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_BRDG), },
+	{ USB_DEVICE(USB_VENDOR_ID_SMSC, USB_DEV_ID_INIC), },
+	{ } /* Terminating entry */
+};
+
+#define MOST_DCI_RO_ATTR(_name) \
+	struct most_dci_attribute most_dci_attr_##_name = \
+		__ATTR(_name, S_IRUGO, show_value, NULL)
+
+#define MOST_DCI_ATTR(_name) \
+	struct most_dci_attribute most_dci_attr_##_name = \
+		__ATTR(_name, S_IRUGO | S_IWUSR, show_value, store_value)
+
+/**
+ * struct most_dci_attribute - to access the attributes of a dci object
+ * @attr: attributes of a dci object
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_dci_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct most_dci_obj *d,
+			struct most_dci_attribute *attr,
+			char *buf);
+	ssize_t (*store)(struct most_dci_obj *d,
+			 struct most_dci_attribute *attr,
+			 const char *buf,
+			 size_t count);
+};
+#define to_dci_attr(a) container_of(a, struct most_dci_attribute, attr)
+
+
+/**
+ * dci_attr_show - show function for dci object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ */
+static ssize_t dci_attr_show(struct kobject *kobj, struct attribute *attr,
+			     char *buf)
+{
+	struct most_dci_attribute *dci_attr = to_dci_attr(attr);
+	struct most_dci_obj *dci_obj = to_dci_obj(kobj);
+
+	if (!dci_attr->show)
+		return -EIO;
+
+	return dci_attr->show(dci_obj, dci_attr, buf);
+}
+
+/**
+ * dci_attr_store - store function for dci object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t dci_attr_store(struct kobject *kobj,
+			      struct attribute *attr,
+			      const char *buf,
+			      size_t len)
+{
+	struct most_dci_attribute *dci_attr = to_dci_attr(attr);
+	struct most_dci_obj *dci_obj = to_dci_obj(kobj);
+
+	if (!dci_attr->store)
+		return -EIO;
+
+	return dci_attr->store(dci_obj, dci_attr, buf, len);
+}
+
+static const struct sysfs_ops most_dci_sysfs_ops = {
+	.show = dci_attr_show,
+	.store = dci_attr_store,
+};
+
+/**
+ * most_dci_release - release function for dci object
+ * @kobj: pointer to kobject
+ *
+ * This frees the memory allocated for the dci object
+ */
+static void most_dci_release(struct kobject *kobj)
+{
+	struct most_dci_obj *dci_obj = to_dci_obj(kobj);
+
+	kfree(dci_obj);
+}
+
+static ssize_t show_value(struct most_dci_obj *dci_obj,
+			  struct most_dci_attribute *attr, char *buf)
+{
+	u16 tmp_val;
+	u16 reg_addr;
+	int err;
+
+	if (!strcmp(attr->attr.name, "ni_state"))
+		reg_addr = DRCI_REG_NI_STATE;
+	else if (!strcmp(attr->attr.name, "packet_bandwidth"))
+		reg_addr = DRCI_REG_PACKET_BW;
+	else if (!strcmp(attr->attr.name, "node_address"))
+		reg_addr = DRCI_REG_NODE_ADDR;
+	else if (!strcmp(attr->attr.name, "node_position"))
+		reg_addr = DRCI_REG_NODE_POS;
+	else if (!strcmp(attr->attr.name, "mep_filter"))
+		reg_addr = DRCI_REG_MEP_FILTER;
+	else if (!strcmp(attr->attr.name, "mep_hash0"))
+		reg_addr = DRCI_REG_HASH_TBL0;
+	else if (!strcmp(attr->attr.name, "mep_hash1"))
+		reg_addr = DRCI_REG_HASH_TBL1;
+	else if (!strcmp(attr->attr.name, "mep_hash2"))
+		reg_addr = DRCI_REG_HASH_TBL2;
+	else if (!strcmp(attr->attr.name, "mep_hash3"))
+		reg_addr = DRCI_REG_HASH_TBL3;
+	else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
+		reg_addr = DRCI_REG_HW_ADDR_HI;
+	else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
+		reg_addr = DRCI_REG_HW_ADDR_MI;
+	else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
+		reg_addr = DRCI_REG_HW_ADDR_LO;
+	else
+		return -EIO;
+
+	err = drci_rd_reg(dci_obj->usb_device, reg_addr, &tmp_val);
+	if (err < 0)
+		return err;
+
+	return snprintf(buf, PAGE_SIZE, "%04x\n", le16_to_cpu(tmp_val));
+}
+
+static ssize_t store_value(struct most_dci_obj *dci_obj,
+			   struct most_dci_attribute *attr,
+			   const char *buf, size_t count)
+{
+	u16 v16;
+	u16 reg_addr;
+	int err;
+
+	if (!strcmp(attr->attr.name, "mep_filter"))
+		reg_addr = DRCI_REG_MEP_FILTER;
+	else if (!strcmp(attr->attr.name, "mep_hash0"))
+		reg_addr = DRCI_REG_HASH_TBL0;
+	else if (!strcmp(attr->attr.name, "mep_hash1"))
+		reg_addr = DRCI_REG_HASH_TBL1;
+	else if (!strcmp(attr->attr.name, "mep_hash2"))
+		reg_addr = DRCI_REG_HASH_TBL2;
+	else if (!strcmp(attr->attr.name, "mep_hash3"))
+		reg_addr = DRCI_REG_HASH_TBL3;
+	else if (!strcmp(attr->attr.name, "mep_eui48_hi"))
+		reg_addr = DRCI_REG_HW_ADDR_HI;
+	else if (!strcmp(attr->attr.name, "mep_eui48_mi"))
+		reg_addr = DRCI_REG_HW_ADDR_MI;
+	else if (!strcmp(attr->attr.name, "mep_eui48_lo"))
+		reg_addr = DRCI_REG_HW_ADDR_LO;
+	else
+		return -EIO;
+
+	err = kstrtou16(buf, 16, &v16);
+	if (err)
+		return err;
+
+	err = drci_wr_reg(dci_obj->usb_device, reg_addr, cpu_to_le16(v16));
+	if (err < 0)
+		return err;
+
+	return count;
+}
+
+static MOST_DCI_RO_ATTR(ni_state);
+static MOST_DCI_RO_ATTR(packet_bandwidth);
+static MOST_DCI_RO_ATTR(node_address);
+static MOST_DCI_RO_ATTR(node_position);
+static MOST_DCI_ATTR(mep_filter);
+static MOST_DCI_ATTR(mep_hash0);
+static MOST_DCI_ATTR(mep_hash1);
+static MOST_DCI_ATTR(mep_hash2);
+static MOST_DCI_ATTR(mep_hash3);
+static MOST_DCI_ATTR(mep_eui48_hi);
+static MOST_DCI_ATTR(mep_eui48_mi);
+static MOST_DCI_ATTR(mep_eui48_lo);
+
+/**
+ * most_dci_def_attrs - array of default attribute files of the dci object
+ */
+static struct attribute *most_dci_def_attrs[] = {
+	&most_dci_attr_ni_state.attr,
+	&most_dci_attr_packet_bandwidth.attr,
+	&most_dci_attr_node_address.attr,
+	&most_dci_attr_node_position.attr,
+	&most_dci_attr_mep_filter.attr,
+	&most_dci_attr_mep_hash0.attr,
+	&most_dci_attr_mep_hash1.attr,
+	&most_dci_attr_mep_hash2.attr,
+	&most_dci_attr_mep_hash3.attr,
+	&most_dci_attr_mep_eui48_hi.attr,
+	&most_dci_attr_mep_eui48_mi.attr,
+	&most_dci_attr_mep_eui48_lo.attr,
+	NULL,
+};
+
+/**
+ * DCI ktype
+ */
+static struct kobj_type most_dci_ktype = {
+	.sysfs_ops = &most_dci_sysfs_ops,
+	.release = most_dci_release,
+	.default_attrs = most_dci_def_attrs,
+};
+
+/**
+ * create_most_dci_obj - allocates a dci object
+ * @parent: parent kobject
+ *
+ * This creates a dci object and registers it with sysfs.
+ * Returns a pointer to the object or NULL when something went wrong.
+ */
+static struct
+most_dci_obj *create_most_dci_obj(struct kobject *parent)
+{
+	struct most_dci_obj *most_dci;
+	int retval;
+
+	most_dci = kzalloc(sizeof(*most_dci), GFP_KERNEL);
+	if (!most_dci)
+		return NULL;
+
+	retval = kobject_init_and_add(&most_dci->kobj, &most_dci_ktype, parent,
+				      "dci");
+	if (retval) {
+		kobject_put(&most_dci->kobj);
+		return NULL;
+	}
+	return most_dci;
+}
+
+/**
+ * destroy_most_dci_obj - DCI object release function
+ * @p: pointer to dci object
+ */
+static void destroy_most_dci_obj(struct most_dci_obj *p)
+{
+	kobject_put(&p->kobj);
+}
+
+/**
+ * hdm_probe - probe function of USB device driver
+ * @interface: Interface of the attached USB device
+ * @id: Pointer to the USB ID table.
+ *
+ * This allocates and initializes the device instance, adds the new
+ * entry to the internal list, scans the USB descriptors and registers
+ * the interface with the core.
+ * Additionally, the DCI objects are created and the hardware is sync'd.
+ *
+ * Return 0 on success. In case of an error a negative number is returned.
+ */
+static int
+hdm_probe(struct usb_interface *interface, const struct usb_device_id *id)
+{
+	unsigned int i;
+	unsigned int num_endpoints;
+	struct most_channel_capability *tmp_cap;
+	struct most_dev *mdev;
+	struct usb_device *usb_dev;
+	struct device *dev;
+	struct usb_host_interface *usb_iface_desc;
+	struct usb_endpoint_descriptor *ep_desc;
+	int ret = 0;
+
+	usb_iface_desc = interface->cur_altsetting;
+	usb_dev = interface_to_usbdev(interface);
+	dev = &usb_dev->dev;
+	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+	if (!mdev)
+		goto exit_ENOMEM;
+
+	usb_set_intfdata(interface, mdev);
+	num_endpoints = usb_iface_desc->desc.bNumEndpoints;
+	mutex_init(&mdev->io_mutex);
+	INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
+	init_timer(&mdev->link_stat_timer);
+
+	mdev->usb_device = usb_dev;
+	mdev->link_stat_timer.function = link_stat_timer_handler;
+	mdev->link_stat_timer.data = (unsigned long)mdev;
+	mdev->link_stat_timer.expires = jiffies + (2 * HZ);
+
+	mdev->iface.mod = hdm_usb_fops.owner;
+	mdev->iface.interface = ITYPE_USB;
+	mdev->iface.configure = hdm_configure_channel;
+	mdev->iface.request_netinfo = hdm_request_netinfo;
+	mdev->iface.enqueue = hdm_enqueue;
+	mdev->iface.poison_channel = hdm_poison_channel;
+	mdev->iface.description = mdev->description;
+	mdev->iface.num_channels = num_endpoints;
+
+	snprintf(mdev->description, sizeof(mdev->description),
+		 "usb_device %d-%s:%d.%d",
+		 usb_dev->bus->busnum,
+		 usb_dev->devpath,
+		 usb_dev->config->desc.bConfigurationValue,
+		 usb_iface_desc->desc.bInterfaceNumber);
+
+	mdev->conf = kcalloc(num_endpoints, sizeof(*mdev->conf), GFP_KERNEL);
+	if (!mdev->conf)
+		goto exit_free;
+
+	mdev->cap = kcalloc(num_endpoints, sizeof(*mdev->cap), GFP_KERNEL);
+	if (!mdev->cap)
+		goto exit_free1;
+
+	mdev->iface.channel_vector = mdev->cap;
+	mdev->iface.priv = NULL;
+
+	mdev->ep_address =
+		kcalloc(num_endpoints, sizeof(*mdev->ep_address), GFP_KERNEL);
+	if (!mdev->ep_address)
+		goto exit_free2;
+
+	mdev->anchor_list =
+		kcalloc(num_endpoints, sizeof(*mdev->anchor_list), GFP_KERNEL);
+	if (!mdev->anchor_list)
+		goto exit_free3;
+
+	tmp_cap = mdev->cap;
+	for (i = 0; i < num_endpoints; i++) {
+		ep_desc = &usb_iface_desc->endpoint[i].desc;
+		mdev->ep_address[i] = ep_desc->bEndpointAddress;
+		mdev->padding_active[i] = false;
+		mdev->is_channel_healthy[i] = true;
+
+		snprintf(&mdev->suffix[i][0], MAX_SUFFIX_LEN, "ep%02x",
+			 mdev->ep_address[i]);
+
+		tmp_cap->name_suffix = &mdev->suffix[i][0];
+		tmp_cap->buffer_size_packet = MAX_BUF_SIZE;
+		tmp_cap->buffer_size_streaming = MAX_BUF_SIZE;
+		tmp_cap->num_buffers_packet = BUF_CHAIN_SIZE;
+		tmp_cap->num_buffers_streaming = BUF_CHAIN_SIZE;
+		tmp_cap->data_type = MOST_CH_CONTROL | MOST_CH_ASYNC |
+				     MOST_CH_ISOC_AVP | MOST_CH_SYNC;
+		if (ep_desc->bEndpointAddress & USB_DIR_IN)
+			tmp_cap->direction = MOST_CH_RX;
+		else
+			tmp_cap->direction = MOST_CH_TX;
+		tmp_cap++;
+		INIT_LIST_HEAD(&mdev->anchor_list[i]);
+		spin_lock_init(&mdev->anchor_list_lock[i]);
+	}
+	dev_notice(dev, "claimed gadget: Vendor=%4.4x ProdID=%4.4x Bus=%02x Device=%02x\n",
+		   le16_to_cpu(usb_dev->descriptor.idVendor),
+		   le16_to_cpu(usb_dev->descriptor.idProduct),
+		   usb_dev->bus->busnum,
+		   usb_dev->devnum);
+
+	dev_notice(dev, "device path: /sys/bus/usb/devices/%d-%s:%d.%d\n",
+		   usb_dev->bus->busnum,
+		   usb_dev->devpath,
+		   usb_dev->config->desc.bConfigurationValue,
+		   usb_iface_desc->desc.bInterfaceNumber);
+
+	mdev->parent = most_register_interface(&mdev->iface);
+	if (IS_ERR(mdev->parent)) {
+		ret = PTR_ERR(mdev->parent);
+		goto exit_free4;
+	}
+
+	mutex_lock(&mdev->io_mutex);
+	if (le16_to_cpu(usb_dev->descriptor.idProduct) == USB_DEV_ID_INIC) {
+		/* this increments the reference count of the instance
+		 * object of the core
+		 */
+		mdev->dci = create_most_dci_obj(mdev->parent);
+		if (!mdev->dci) {
+			mutex_unlock(&mdev->io_mutex);
+			most_deregister_interface(&mdev->iface);
+			ret = -ENOMEM;
+			goto exit_free4;
+		}
+
+		kobject_uevent(&mdev->dci->kobj, KOBJ_ADD);
+		mdev->dci->usb_device = mdev->usb_device;
+		trigger_resync_vr(usb_dev);
+	}
+	mutex_unlock(&mdev->io_mutex);
+	return 0;
+
+exit_free4:
+	kfree(mdev->anchor_list);
+exit_free3:
+	kfree(mdev->ep_address);
+exit_free2:
+	kfree(mdev->cap);
+exit_free1:
+	kfree(mdev->conf);
+exit_free:
+	kfree(mdev);
+exit_ENOMEM:
+	if (ret == 0 || ret == -ENOMEM) {
+		ret = -ENOMEM;
+		dev_err(dev, "out of memory\n");
+	}
+	return ret;
+}
+
+/**
+ * hdm_disconnect - disconnect function of USB device driver
+ * @interface: Interface of the attached USB device
+ *
+ * This deregisters the interface with the core, removes the kernel timer
+ * and frees resources.
+ *
+ * Context: hub kernel thread
+ */
+static void hdm_disconnect(struct usb_interface *interface)
+{
+	struct most_dev *mdev;
+
+	mdev = usb_get_intfdata(interface);
+	mutex_lock(&mdev->io_mutex);
+	usb_set_intfdata(interface, NULL);
+	mdev->usb_device = NULL;
+	mutex_unlock(&mdev->io_mutex);
+
+	del_timer_sync(&mdev->link_stat_timer);
+	cancel_work_sync(&mdev->poll_work_obj);
+
+	destroy_most_dci_obj(mdev->dci);
+	most_deregister_interface(&mdev->iface);
+
+	kfree(mdev->anchor_list);
+	kfree(mdev->cap);
+	kfree(mdev->conf);
+	kfree(mdev->ep_address);
+	kfree(mdev);
+}
+
+static struct usb_driver hdm_usb = {
+	.name = "hdm_usb",
+	.id_table = usbid,
+	.probe = hdm_probe,
+	.disconnect = hdm_disconnect,
+};
+
+static int __init hdm_usb_init(void)
+{
+	pr_info("hdm_usb_init()\n");
+	if (usb_register(&hdm_usb)) {
+		pr_err("could not register hdm_usb driver\n");
+		return -EIO;
+	}
+	schedule_usb_work = create_workqueue("hdmu_work");
+	if (schedule_usb_work == NULL) {
+		pr_err("could not create workqueue\n");
+		usb_deregister(&hdm_usb);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void __exit hdm_usb_exit(void)
+{
+	pr_info("hdm_usb_exit()\n");
+	destroy_workqueue(schedule_usb_work);
+	usb_deregister(&hdm_usb);
+}
+
+module_init(hdm_usb_init);
+module_exit(hdm_usb_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+MODULE_DESCRIPTION("HDM_4_USB");
diff --git a/drivers/staging/most/mostcore/Kconfig b/drivers/staging/most/mostcore/Kconfig
new file mode 100644
index 0000000..38abf1b
--- /dev/null
+++ b/drivers/staging/most/mostcore/Kconfig
@@ -0,0 +1,13 @@
+#
+# MOSTCore configuration
+#
+
+config MOSTCORE
+	tristate "MOST Core"
+
+	---help---
+	  Say Y here if you want to enable MOST support.
+	  This device driver needs at least an additional AIM and HDM to work.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called mostcore.
diff --git a/drivers/staging/most/mostcore/Makefile b/drivers/staging/most/mostcore/Makefile
new file mode 100644
index 0000000..a078f01
--- /dev/null
+++ b/drivers/staging/most/mostcore/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MOSTCORE) += mostcore.o
+
+mostcore-objs := core.o
diff --git a/drivers/staging/most/mostcore/core.c b/drivers/staging/most/mostcore/core.c
new file mode 100644
index 0000000..7bb16db
--- /dev/null
+++ b/drivers/staging/most/mostcore/core.c
@@ -0,0 +1,1932 @@
+/*
+ * core.c - Implementation of core module of MOST Linux driver stack
+ *
+ * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/sysfs.h>
+#include <linux/kthread.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include "mostcore.h"
+
+#define MAX_CHANNELS	64
+#define STRING_SIZE	80
+
+static struct class *most_class;
+static struct device *class_glue_dir;
+static struct ida mdev_id;
+static int modref;
+
+struct most_c_obj {
+	struct kobject kobj;
+	struct completion cleanup;
+	atomic_t mbo_ref;
+	atomic_t mbo_nq_level;
+	uint16_t channel_id;
+	bool is_poisoned;
+	bool is_started;
+	int is_starving;
+	struct most_interface *iface;
+	struct most_inst_obj *inst;
+	struct most_channel_config cfg;
+	bool keep_mbo;
+	bool enqueue_halt;
+	struct list_head fifo;
+	spinlock_t fifo_lock;
+	struct list_head halt_fifo;
+	struct list_head list;
+	struct most_aim *first_aim;
+	struct most_aim *second_aim;
+	struct list_head trash_fifo;
+	struct task_struct *hdm_enqueue_task;
+	struct mutex stop_task_mutex;
+	wait_queue_head_t hdm_fifo_wq;
+};
+#define to_c_obj(d) container_of(d, struct most_c_obj, kobj)
+
+struct most_inst_obj {
+	int dev_id;
+	atomic_t tainted;
+	struct most_interface *iface;
+	struct list_head channel_list;
+	struct most_c_obj *channel[MAX_CHANNELS];
+	struct kobject kobj;
+	struct list_head list;
+};
+#define to_inst_obj(d) container_of(d, struct most_inst_obj, kobj)
+
+/**
+ * list_pop_mbo - retrieves the first MBO of the list and removes it
+ * @ptr: the list head to grab the MBO from.
+ */
+#define list_pop_mbo(ptr)						\
+({									\
+	struct mbo *_mbo = list_first_entry(ptr, struct mbo, list);	\
+	list_del(&_mbo->list);						\
+	_mbo;								\
+})
+
+static struct mutex deregister_mutex;
+
+/*		     ___	     ___
+ *		     ___C H A N N E L___
+ */
+
+/**
+ * struct most_c_attr - to access the attributes of a channel object
+ * @attr: attributes of a channel
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_c_attr {
+	struct attribute attr;
+	ssize_t (*show)(struct most_c_obj *d,
+			struct most_c_attr *attr,
+			char *buf);
+	ssize_t (*store)(struct most_c_obj *d,
+			 struct most_c_attr *attr,
+			 const char *buf,
+			 size_t count);
+};
+#define to_channel_attr(a) container_of(a, struct most_c_attr, attr)
+
+#define MOST_CHNL_ATTR(_name, _mode, _show, _store) \
+		struct most_c_attr most_chnl_attr_##_name = \
+		__ATTR(_name, _mode, _show, _store)
+
+/**
+ * channel_attr_show - show function of channel object
+ * @kobj: pointer to its kobject
+ * @attr: pointer to its attributes
+ * @buf: buffer
+ */
+static ssize_t channel_attr_show(struct kobject *kobj, struct attribute *attr,
+				 char *buf)
+{
+	struct most_c_attr *channel_attr = to_channel_attr(attr);
+	struct most_c_obj *c_obj = to_c_obj(kobj);
+
+	if (!channel_attr->show)
+		return -EIO;
+
+	return channel_attr->show(c_obj, channel_attr, buf);
+}
+
+/**
+ * channel_attr_store - store function of channel object
+ * @kobj: pointer to its kobject
+ * @attr: pointer to its attributes
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t channel_attr_store(struct kobject *kobj,
+				  struct attribute *attr,
+				  const char *buf,
+				  size_t len)
+{
+	struct most_c_attr *channel_attr = to_channel_attr(attr);
+	struct most_c_obj *c_obj = to_c_obj(kobj);
+
+	if (!channel_attr->store)
+		return -EIO;
+	return channel_attr->store(c_obj, channel_attr, buf, len);
+}
+
+static const struct sysfs_ops most_channel_sysfs_ops = {
+	.show = channel_attr_show,
+	.store = channel_attr_store,
+};
+
+/**
+ * most_free_mbo_coherent - free an MBO and its coherent buffer
+ * @mbo: buffer to be released
+ *
+ */
+static void most_free_mbo_coherent(struct mbo *mbo)
+{
+	struct most_c_obj *c = mbo->context;
+	u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
+
+	dma_free_coherent(NULL, coherent_buf_size, mbo->virt_address,
+			  mbo->bus_address);
+	kfree(mbo);
+	if (atomic_sub_and_test(1, &c->mbo_ref))
+		complete(&c->cleanup);
+}
+
+/**
+ * flush_channel_fifos - clear the channel fifos
+ * @c: pointer to channel object
+ */
+static void flush_channel_fifos(struct most_c_obj *c)
+{
+	unsigned long flags, hf_flags;
+	struct mbo *mbo, *tmp;
+
+	if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
+		return;
+
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
+		list_del(&mbo->list);
+		spin_unlock_irqrestore(&c->fifo_lock, flags);
+		if (likely(mbo))
+			most_free_mbo_coherent(mbo);
+		spin_lock_irqsave(&c->fifo_lock, flags);
+	}
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+	spin_lock_irqsave(&c->fifo_lock, hf_flags);
+	list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
+		list_del(&mbo->list);
+		spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
+		if (likely(mbo))
+			most_free_mbo_coherent(mbo);
+		spin_lock_irqsave(&c->fifo_lock, hf_flags);
+	}
+	spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
+
+	if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
+		pr_info("WARN: fifo | trash fifo not empty\n");
+}
+
+/**
+ * flush_trash_fifo - clear the trash fifo
+ * @c: pointer to channel object
+ */
+static int flush_trash_fifo(struct most_c_obj *c)
+{
+	struct mbo *mbo, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
+		list_del(&mbo->list);
+		spin_unlock_irqrestore(&c->fifo_lock, flags);
+		most_free_mbo_coherent(mbo);
+		spin_lock_irqsave(&c->fifo_lock, flags);
+	}
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+	return 0;
+}
+
+/**
+ * most_channel_release - release function of channel object
+ * @kobj: pointer to channel's kobject
+ */
+static void most_channel_release(struct kobject *kobj)
+{
+	struct most_c_obj *c = to_c_obj(kobj);
+
+	kfree(c);
+}
+
+static ssize_t show_available_directions(struct most_c_obj *c,
+		struct most_c_attr *attr,
+		char *buf)
+{
+	unsigned int i = c->channel_id;
+
+	strcpy(buf, "");
+	if (c->iface->channel_vector[i].direction & MOST_CH_RX)
+		strcat(buf, "dir_rx ");
+	if (c->iface->channel_vector[i].direction & MOST_CH_TX)
+		strcat(buf, "dir_tx ");
+	strcat(buf, "\n");
+	return strlen(buf) + 1;
+}
+
+static ssize_t show_available_datatypes(struct most_c_obj *c,
+					struct most_c_attr *attr,
+					char *buf)
+{
+	unsigned int i = c->channel_id;
+
+	strcpy(buf, "");
+	if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
+		strcat(buf, "control ");
+	if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
+		strcat(buf, "async ");
+	if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
+		strcat(buf, "sync ");
+	if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC_AVP)
+		strcat(buf, "isoc_avp ");
+	strcat(buf, "\n");
+	return strlen(buf) + 1;
+}
+
+static
+ssize_t show_number_of_packet_buffers(struct most_c_obj *c,
+				      struct most_c_attr *attr,
+				      char *buf)
+{
+	unsigned int i = c->channel_id;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			c->iface->channel_vector[i].num_buffers_packet);
+}
+
+static
+ssize_t show_number_of_stream_buffers(struct most_c_obj *c,
+				      struct most_c_attr *attr,
+				      char *buf)
+{
+	unsigned int i = c->channel_id;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			c->iface->channel_vector[i].num_buffers_streaming);
+}
+
+static
+ssize_t show_size_of_packet_buffer(struct most_c_obj *c,
+				   struct most_c_attr *attr,
+				   char *buf)
+{
+	unsigned int i = c->channel_id;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			c->iface->channel_vector[i].buffer_size_packet);
+}
+
+static
+ssize_t show_size_of_stream_buffer(struct most_c_obj *c,
+				   struct most_c_attr *attr,
+				   char *buf)
+{
+	unsigned int i = c->channel_id;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			c->iface->channel_vector[i].buffer_size_streaming);
+}
+
+static ssize_t show_channel_starving(struct most_c_obj *c,
+				     struct most_c_attr *attr,
+				     char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
+}
+
+
+#define create_show_channel_attribute(val) \
+	static MOST_CHNL_ATTR(val, S_IRUGO, show_##val, NULL)
+
+create_show_channel_attribute(available_directions);
+create_show_channel_attribute(available_datatypes);
+create_show_channel_attribute(number_of_packet_buffers);
+create_show_channel_attribute(number_of_stream_buffers);
+create_show_channel_attribute(size_of_stream_buffer);
+create_show_channel_attribute(size_of_packet_buffer);
+create_show_channel_attribute(channel_starving);
+
+static ssize_t show_set_number_of_buffers(struct most_c_obj *c,
+					  struct most_c_attr *attr,
+					  char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
+}
+
+static ssize_t store_set_number_of_buffers(struct most_c_obj *c,
+					   struct most_c_attr *attr,
+					   const char *buf,
+					   size_t count)
+{
+	int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
+
+	if (ret)
+		return ret;
+	return count;
+}
+
+static ssize_t show_set_buffer_size(struct most_c_obj *c,
+				    struct most_c_attr *attr,
+				    char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
+}
+
+static ssize_t store_set_buffer_size(struct most_c_obj *c,
+				     struct most_c_attr *attr,
+				     const char *buf,
+				     size_t count)
+{
+	int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
+
+	if (ret)
+		return ret;
+	return count;
+}
+
+static ssize_t show_set_direction(struct most_c_obj *c,
+				  struct most_c_attr *attr,
+				  char *buf)
+{
+	if (c->cfg.direction & MOST_CH_TX)
+		return snprintf(buf, PAGE_SIZE, "dir_tx\n");
+	else if (c->cfg.direction & MOST_CH_RX)
+		return snprintf(buf, PAGE_SIZE, "dir_rx\n");
+	return snprintf(buf, PAGE_SIZE, "unconfigured\n");
+}
+
+static ssize_t store_set_direction(struct most_c_obj *c,
+				   struct most_c_attr *attr,
+				   const char *buf,
+				   size_t count)
+{
+	if (!strcmp(buf, "dir_rx\n"))
+		c->cfg.direction = MOST_CH_RX;
+	else if (!strcmp(buf, "dir_tx\n"))
+		c->cfg.direction = MOST_CH_TX;
+	else {
+		pr_info("WARN: invalid attribute settings\n");
+		return -EINVAL;
+	}
+	return count;
+}
+
+static ssize_t show_set_datatype(struct most_c_obj *c,
+				 struct most_c_attr *attr,
+				 char *buf)
+{
+	if (c->cfg.data_type & MOST_CH_CONTROL)
+		return snprintf(buf, PAGE_SIZE, "control\n");
+	else if (c->cfg.data_type & MOST_CH_ASYNC)
+		return snprintf(buf, PAGE_SIZE, "async\n");
+	else if (c->cfg.data_type & MOST_CH_SYNC)
+		return snprintf(buf, PAGE_SIZE, "sync\n");
+	else if (c->cfg.data_type & MOST_CH_ISOC_AVP)
+		return snprintf(buf, PAGE_SIZE, "isoc_avp\n");
+	return snprintf(buf, PAGE_SIZE, "unconfigured\n");
+}
+
+static ssize_t store_set_datatype(struct most_c_obj *c,
+				  struct most_c_attr *attr,
+				  const char *buf,
+				  size_t count)
+{
+	if (!strcmp(buf, "control\n"))
+		c->cfg.data_type = MOST_CH_CONTROL;
+	else if (!strcmp(buf, "async\n"))
+		c->cfg.data_type = MOST_CH_ASYNC;
+	else if (!strcmp(buf, "sync\n"))
+		c->cfg.data_type = MOST_CH_SYNC;
+	else if (!strcmp(buf, "isoc_avp\n"))
+		c->cfg.data_type = MOST_CH_ISOC_AVP;
+	else {
+		pr_info("WARN: invalid attribute settings\n");
+		return -EINVAL;
+	}
+	return count;
+}
+
+static ssize_t show_set_subbuffer_size(struct most_c_obj *c,
+				       struct most_c_attr *attr,
+				       char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
+}
+
+static ssize_t store_set_subbuffer_size(struct most_c_obj *c,
+					struct most_c_attr *attr,
+					const char *buf,
+					size_t count)
+{
+	int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
+
+	if (ret)
+		return ret;
+	return count;
+}
+
+static ssize_t show_set_packets_per_xact(struct most_c_obj *c,
+					 struct most_c_attr *attr,
+					 char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
+}
+
+static ssize_t store_set_packets_per_xact(struct most_c_obj *c,
+					  struct most_c_attr *attr,
+					  const char *buf,
+					  size_t count)
+{
+	int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
+
+	if (ret)
+		return ret;
+	return count;
+}
+
+#define create_channel_attribute(value) \
+	static MOST_CHNL_ATTR(value, S_IRUGO | S_IWUSR, \
+			      show_##value, \
+			      store_##value)
+
+create_channel_attribute(set_buffer_size);
+create_channel_attribute(set_number_of_buffers);
+create_channel_attribute(set_direction);
+create_channel_attribute(set_datatype);
+create_channel_attribute(set_subbuffer_size);
+create_channel_attribute(set_packets_per_xact);
+
+
+/**
+ * most_channel_def_attrs - array of default attributes of channel object
+ */
+static struct attribute *most_channel_def_attrs[] = {
+	&most_chnl_attr_available_directions.attr,
+	&most_chnl_attr_available_datatypes.attr,
+	&most_chnl_attr_number_of_packet_buffers.attr,
+	&most_chnl_attr_number_of_stream_buffers.attr,
+	&most_chnl_attr_size_of_packet_buffer.attr,
+	&most_chnl_attr_size_of_stream_buffer.attr,
+	&most_chnl_attr_set_number_of_buffers.attr,
+	&most_chnl_attr_set_buffer_size.attr,
+	&most_chnl_attr_set_direction.attr,
+	&most_chnl_attr_set_datatype.attr,
+	&most_chnl_attr_set_subbuffer_size.attr,
+	&most_chnl_attr_set_packets_per_xact.attr,
+	&most_chnl_attr_channel_starving.attr,
+	NULL,
+};
+
+static struct kobj_type most_channel_ktype = {
+	.sysfs_ops = &most_channel_sysfs_ops,
+	.release = most_channel_release,
+	.default_attrs = most_channel_def_attrs,
+};
+
+static struct kset *most_channel_kset;
+
+/**
+ * create_most_c_obj - allocates a channel object
+ * @name: name of the channel object
+ * @parent: parent kobject
+ *
+ * This create a channel object and registers it with sysfs.
+ * Returns a pointer to the object or NULL when something went wrong.
+ */
+static struct most_c_obj *
+create_most_c_obj(const char *name, struct kobject *parent)
+{
+	struct most_c_obj *c;
+	int retval;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return NULL;
+	c->kobj.kset = most_channel_kset;
+	retval = kobject_init_and_add(&c->kobj, &most_channel_ktype, parent,
+				      "%s", name);
+	if (retval) {
+		kobject_put(&c->kobj);
+		return NULL;
+	}
+	kobject_uevent(&c->kobj, KOBJ_ADD);
+	return c;
+}
+
+/**
+ * destroy_most_c_obj - channel release function
+ * @c: pointer to channel object
+ *
+ * This decrements the reference counter of the channel object.
+ * If the reference count turns zero, its release function is called.
+ */
+static void destroy_most_c_obj(struct most_c_obj *c)
+{
+	if (c->first_aim)
+		c->first_aim->disconnect_channel(c->iface, c->channel_id);
+	if (c->second_aim)
+		c->second_aim->disconnect_channel(c->iface, c->channel_id);
+	c->first_aim = NULL;
+	c->second_aim = NULL;
+
+	mutex_lock(&deregister_mutex);
+	flush_trash_fifo(c);
+	flush_channel_fifos(c);
+	mutex_unlock(&deregister_mutex);
+	kobject_put(&c->kobj);
+}
+
+/*		     ___	       ___
+ *		     ___I N S T A N C E___
+ */
+#define MOST_INST_ATTR(_name, _mode, _show, _store) \
+		struct most_inst_attribute most_inst_attr_##_name = \
+		__ATTR(_name, _mode, _show, _store)
+
+static struct list_head instance_list;
+
+/**
+ * struct most_inst_attribute - to access the attributes of instance object
+ * @attr: attributes of an instance
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_inst_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct most_inst_obj *d,
+			struct most_inst_attribute *attr,
+			char *buf);
+	ssize_t (*store)(struct most_inst_obj *d,
+			 struct most_inst_attribute *attr,
+			 const char *buf,
+			 size_t count);
+};
+#define to_instance_attr(a) \
+	container_of(a, struct most_inst_attribute, attr)
+
+/**
+ * instance_attr_show - show function for an instance object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ */
+static ssize_t instance_attr_show(struct kobject *kobj,
+				  struct attribute *attr,
+				  char *buf)
+{
+	struct most_inst_attribute *instance_attr;
+	struct most_inst_obj *instance_obj;
+
+	instance_attr = to_instance_attr(attr);
+	instance_obj = to_inst_obj(kobj);
+
+	if (!instance_attr->show)
+		return -EIO;
+
+	return instance_attr->show(instance_obj, instance_attr, buf);
+}
+
+/**
+ * instance_attr_store - store function for an instance object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t instance_attr_store(struct kobject *kobj,
+				   struct attribute *attr,
+				   const char *buf,
+				   size_t len)
+{
+	struct most_inst_attribute *instance_attr;
+	struct most_inst_obj *instance_obj;
+
+	instance_attr = to_instance_attr(attr);
+	instance_obj = to_inst_obj(kobj);
+
+	if (!instance_attr->store)
+		return -EIO;
+
+	return instance_attr->store(instance_obj, instance_attr, buf, len);
+}
+
+static const struct sysfs_ops most_inst_sysfs_ops = {
+	.show = instance_attr_show,
+	.store = instance_attr_store,
+};
+
+/**
+ * most_inst_release - release function for instance object
+ * @kobj: pointer to instance's kobject
+ *
+ * This frees the allocated memory for the instance object
+ */
+static void most_inst_release(struct kobject *kobj)
+{
+	struct most_inst_obj *inst = to_inst_obj(kobj);
+
+	kfree(inst);
+}
+
+static ssize_t show_description(struct most_inst_obj *instance_obj,
+				struct most_inst_attribute *attr,
+				char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			instance_obj->iface->description);
+}
+
+static ssize_t show_interface(struct most_inst_obj *instance_obj,
+			      struct most_inst_attribute *attr,
+			      char *buf)
+{
+	switch (instance_obj->iface->interface) {
+	case ITYPE_LOOPBACK:
+		return snprintf(buf, PAGE_SIZE, "loopback\n");
+	case ITYPE_I2C:
+		return snprintf(buf, PAGE_SIZE, "i2c\n");
+	case ITYPE_I2S:
+		return snprintf(buf, PAGE_SIZE, "i2s\n");
+	case ITYPE_TSI:
+		return snprintf(buf, PAGE_SIZE, "tsi\n");
+	case ITYPE_HBI:
+		return snprintf(buf, PAGE_SIZE, "hbi\n");
+	case ITYPE_MEDIALB_DIM:
+		return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
+	case ITYPE_MEDIALB_DIM2:
+		return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
+	case ITYPE_USB:
+		return snprintf(buf, PAGE_SIZE, "usb\n");
+	case ITYPE_PCIE:
+		return snprintf(buf, PAGE_SIZE, "pcie\n");
+	}
+	return snprintf(buf, PAGE_SIZE, "unknown\n");
+}
+
+#define create_inst_attribute(value) \
+	static MOST_INST_ATTR(value, S_IRUGO, show_##value, NULL)
+
+create_inst_attribute(description);
+create_inst_attribute(interface);
+
+static struct attribute *most_inst_def_attrs[] = {
+	&most_inst_attr_description.attr,
+	&most_inst_attr_interface.attr,
+	NULL,
+};
+
+static struct kobj_type most_inst_ktype = {
+	.sysfs_ops = &most_inst_sysfs_ops,
+	.release = most_inst_release,
+	.default_attrs = most_inst_def_attrs,
+};
+
+static struct kset *most_inst_kset;
+
+
+/**
+ * create_most_inst_obj - creates an instance object
+ * @name: name of the object to be created
+ *
+ * This allocates memory for an instance structure, assigns the proper kset
+ * and registers it with sysfs.
+ *
+ * Returns a pointer to the instance object or NULL when something went wrong.
+ */
+static struct most_inst_obj *create_most_inst_obj(const char *name)
+{
+	struct most_inst_obj *inst;
+	int retval;
+
+	inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+	if (!inst)
+		return NULL;
+	inst->kobj.kset = most_inst_kset;
+	retval = kobject_init_and_add(&inst->kobj, &most_inst_ktype, NULL,
+				      "%s", name);
+	if (retval) {
+		kobject_put(&inst->kobj);
+		return NULL;
+	}
+	kobject_uevent(&inst->kobj, KOBJ_ADD);
+	return inst;
+}
+
+/**
+ * destroy_most_inst_obj - MOST instance release function
+ * @inst: pointer to the instance object
+ *
+ * This decrements the reference counter of the instance object.
+ * If the reference count turns zero, its release function is called
+ */
+static void destroy_most_inst_obj(struct most_inst_obj *inst)
+{
+	struct most_c_obj *c, *tmp;
+
+	/* need to destroy channels first, since
+	 * each channel incremented the
+	 * reference count of the inst->kobj
+	 */
+	list_for_each_entry_safe(c, tmp, &inst->channel_list, list) {
+		destroy_most_c_obj(c);
+	}
+	kobject_put(&inst->kobj);
+}
+
+/*		     ___     ___
+ *		     ___A I M___
+ */
+struct most_aim_obj {
+	struct kobject kobj;
+	struct list_head list;
+	struct most_aim *driver;
+	char add_link[STRING_SIZE];
+	char remove_link[STRING_SIZE];
+};
+#define to_aim_obj(d) container_of(d, struct most_aim_obj, kobj)
+
+static struct list_head aim_list;
+
+
+/**
+ * struct most_aim_attribute - to access the attributes of AIM object
+ * @attr: attributes of an AIM
+ * @show: pointer to the show function
+ * @store: pointer to the store function
+ */
+struct most_aim_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct most_aim_obj *d,
+			struct most_aim_attribute *attr,
+			char *buf);
+	ssize_t (*store)(struct most_aim_obj *d,
+			 struct most_aim_attribute *attr,
+			 const char *buf,
+			 size_t count);
+};
+#define to_aim_attr(a) container_of(a, struct most_aim_attribute, attr)
+
+/**
+ * aim_attr_show - show function of an AIM object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ */
+static ssize_t aim_attr_show(struct kobject *kobj,
+			     struct attribute *attr,
+			     char *buf)
+{
+	struct most_aim_attribute *aim_attr;
+	struct most_aim_obj *aim_obj;
+
+	aim_attr = to_aim_attr(attr);
+	aim_obj = to_aim_obj(kobj);
+
+	if (!aim_attr->show)
+		return -EIO;
+
+	return aim_attr->show(aim_obj, aim_attr, buf);
+}
+
+/**
+ * aim_attr_store - store function of an AIM object
+ * @kobj: pointer to kobject
+ * @attr: pointer to attribute struct
+ * @buf: buffer
+ * @len: length of buffer
+ */
+static ssize_t aim_attr_store(struct kobject *kobj,
+			      struct attribute *attr,
+			      const char *buf,
+			      size_t len)
+{
+	struct most_aim_attribute *aim_attr;
+	struct most_aim_obj *aim_obj;
+
+	aim_attr = to_aim_attr(attr);
+	aim_obj = to_aim_obj(kobj);
+
+	if (!aim_attr->store)
+		return -EIO;
+	return aim_attr->store(aim_obj, aim_attr, buf, len);
+}
+
+static const struct sysfs_ops most_aim_sysfs_ops = {
+	.show = aim_attr_show,
+	.store = aim_attr_store,
+};
+
+/**
+ * most_aim_release - AIM release function
+ * @kobj: pointer to AIM's kobject
+ */
+static void most_aim_release(struct kobject *kobj)
+{
+	struct most_aim_obj *aim_obj = to_aim_obj(kobj);
+
+	kfree(aim_obj);
+}
+
+static ssize_t show_add_link(struct most_aim_obj *aim_obj,
+			     struct most_aim_attribute *attr,
+			     char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->add_link);
+}
+
+/**
+ * split_string - parses and changes string in the buffer buf and
+ * splits it into two mandatory and one optional substrings.
+ *
+ * @buf: complete string from attribute 'add_channel'
+ * @a: address of pointer to 1st substring (=instance name)
+ * @b: address of pointer to 2nd substring (=channel name)
+ * @c: optional address of pointer to 3rd substring (=user defined name)
+ *
+ * Examples:
+ *
+ * Input: "mdev0:ch0@ep_81:my_channel\n" or
+ *        "mdev0:ch0@ep_81:my_channel"
+ *
+ * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> "my_channel"
+ *
+ * Input: "mdev0:ch0@ep_81\n"
+ * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c -> ""
+ *
+ * Input: "mdev0:ch0@ep_81"
+ * Output: *a -> "mdev0", *b -> "ch0@ep_81", *c == NULL
+ */
+static int split_string(char *buf, char **a, char **b, char **c)
+{
+	*a = strsep(&buf, ":");
+	if (!*a)
+		return -EIO;
+
+	*b = strsep(&buf, ":\n");
+	if (!*b)
+		return -EIO;
+
+	if (c)
+		*c = strsep(&buf, ":\n");
+
+	return 0;
+}
+
+/**
+ * get_channel_by_name - get pointer to channel object
+ * @mdev: name of the device instance
+ * @mdev_ch: name of the respective channel
+ *
+ * This retrieves the pointer to a channel object.
+ */
+static struct
+most_c_obj *get_channel_by_name(char *mdev, char *mdev_ch)
+{
+	struct most_c_obj *c, *tmp;
+	struct most_inst_obj *i, *i_tmp;
+	int found = 0;
+
+	list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
+		if (!strcmp(kobject_name(&i->kobj), mdev)) {
+			found++;
+			break;
+		}
+	}
+	if (unlikely(!found))
+		return ERR_PTR(-EIO);
+
+	list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
+		if (!strcmp(kobject_name(&c->kobj), mdev_ch)) {
+			found++;
+			break;
+		}
+	}
+	if (unlikely(2 > found))
+		return ERR_PTR(-EIO);
+	return c;
+}
+
+/**
+ * store_add_link - store() function for add_link attribute
+ * @aim_obj: pointer to AIM object
+ * @attr: its attributes
+ * @buf: buffer
+ * @len: buffer length
+ *
+ * This parses the string given by buf and splits it into
+ * three substrings. Note: third substring is optional. In case a cdev
+ * AIM is loaded the optional 3rd substring will make up the name of
+ * device node in the /dev directory. If omitted, the device node will
+ * inherit the channel's name within sysfs.
+ *
+ * Searches for a pair of device and channel and probes the AIM
+ *
+ * Example:
+ * (1) echo -n -e "mdev0:ch0@ep_81:my_rxchannel\n" >add_link
+ * (2) echo -n -e "mdev0:ch0@ep_81\n" >add_link
+ *
+ * (1) would create the device node /dev/my_rxchannel
+ * (2) would create the device node /dev/mdev0-ch0@ep_81
+ */
+static ssize_t store_add_link(struct most_aim_obj *aim_obj,
+			      struct most_aim_attribute *attr,
+			      const char *buf,
+			      size_t len)
+{
+	struct most_c_obj *c;
+	struct most_aim **aim_ptr;
+	char buffer[STRING_SIZE];
+	char *mdev;
+	char *mdev_ch;
+	char *mdev_devnod;
+	char devnod_buf[STRING_SIZE];
+	int ret;
+	size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
+
+	strlcpy(buffer, buf, max_len);
+	strlcpy(aim_obj->add_link, buf, max_len);
+
+	ret = split_string(buffer, &mdev, &mdev_ch, &mdev_devnod);
+	if (ret)
+		return ret;
+
+	if (!mdev_devnod || *mdev_devnod == 0) {
+		snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev, mdev_ch);
+		mdev_devnod = devnod_buf;
+	}
+
+	c = get_channel_by_name(mdev, mdev_ch);
+	if (IS_ERR(c))
+		return -ENODEV;
+
+	if (!c->first_aim)
+		aim_ptr = &c->first_aim;
+	else if (!c->second_aim)
+		aim_ptr = &c->second_aim;
+	else
+		return -ENOSPC;
+
+	ret = aim_obj->driver->probe_channel(c->iface, c->channel_id,
+					     &c->cfg, &c->kobj, mdev_devnod);
+	if (ret)
+		return ret;
+	*aim_ptr = aim_obj->driver;
+	return len;
+}
+
+static struct most_aim_attribute most_aim_attr_add_link =
+	__ATTR(add_link, S_IRUGO | S_IWUSR, show_add_link, store_add_link);
+
+static ssize_t show_remove_link(struct most_aim_obj *aim_obj,
+				struct most_aim_attribute *attr,
+				char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", aim_obj->remove_link);
+}
+
+/**
+ * store_remove_link - store function for remove_link attribute
+ * @aim_obj: pointer to AIM object
+ * @attr: its attributes
+ * @buf: buffer
+ * @len: buffer length
+ *
+ * Example:
+ * echo -n -e "mdev0:ch0@ep_81\n" >remove_link
+ */
+static ssize_t store_remove_link(struct most_aim_obj *aim_obj,
+				 struct most_aim_attribute *attr,
+				 const char *buf,
+				 size_t len)
+{
+	struct most_c_obj *c;
+	char buffer[STRING_SIZE];
+	char *mdev;
+	char *mdev_ch;
+	int ret;
+	size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
+
+	strlcpy(buffer, buf, max_len);
+	strlcpy(aim_obj->remove_link, buf, max_len);
+	ret = split_string(buffer, &mdev, &mdev_ch, NULL);
+	if (ret)
+		return ret;
+
+	c = get_channel_by_name(mdev, mdev_ch);
+	if (IS_ERR(c))
+		return -ENODEV;
+
+	if (c->first_aim == aim_obj->driver)
+		c->first_aim = NULL;
+	if (c->second_aim == aim_obj->driver)
+		c->second_aim = NULL;
+	if (aim_obj->driver->disconnect_channel(c->iface, c->channel_id))
+		return -EIO;
+	return len;
+}
+
+static struct most_aim_attribute most_aim_attr_remove_link =
+	__ATTR(remove_link, S_IRUGO | S_IWUSR, show_remove_link, store_remove_link);
+
+static struct attribute *most_aim_def_attrs[] = {
+	&most_aim_attr_add_link.attr,
+	&most_aim_attr_remove_link.attr,
+	NULL,
+};
+
+static struct kobj_type most_aim_ktype = {
+	.sysfs_ops = &most_aim_sysfs_ops,
+	.release = most_aim_release,
+	.default_attrs = most_aim_def_attrs,
+};
+
+static struct kset *most_aim_kset;
+
+/**
+ * create_most_aim_obj - creates an AIM object
+ * @name: name of the AIM
+ *
+ * This creates an AIM object assigns the proper kset and registers
+ * it with sysfs.
+ * Returns a pointer to the object or NULL if something went wrong.
+ */
+static struct most_aim_obj *create_most_aim_obj(const char *name)
+{
+	struct most_aim_obj *most_aim;
+	int retval;
+
+	most_aim = kzalloc(sizeof(*most_aim), GFP_KERNEL);
+	if (!most_aim)
+		return NULL;
+	most_aim->kobj.kset = most_aim_kset;
+	retval = kobject_init_and_add(&most_aim->kobj, &most_aim_ktype,
+				      NULL, "%s", name);
+	if (retval) {
+		kobject_put(&most_aim->kobj);
+		return NULL;
+	}
+	kobject_uevent(&most_aim->kobj, KOBJ_ADD);
+	return most_aim;
+}
+
+/**
+ * destroy_most_aim_obj - AIM release function
+ * @p: pointer to AIM object
+ *
+ * This decrements the reference counter of the AIM object. If the
+ * reference count turns zero, its release function will be called.
+ */
+static void destroy_most_aim_obj(struct most_aim_obj *p)
+{
+	kobject_put(&p->kobj);
+}
+
+
+/*		     ___       ___
+ *		     ___C O R E___
+ */
+
+/**
+ * Instantiation of the MOST bus
+ */
+static struct bus_type most_bus = {
+	.name = "most",
+};
+
+/**
+ * Instantiation of the core driver
+ */
+static struct device_driver mostcore = {
+	.name = "mostcore",
+	.bus = &most_bus,
+};
+
+static inline void trash_mbo(struct mbo *mbo)
+{
+	unsigned long flags;
+	struct most_c_obj *c = mbo->context;
+
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	list_add(&mbo->list, &c->trash_fifo);
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+}
+
+static struct mbo *get_hdm_mbo(struct most_c_obj *c)
+{
+	unsigned long flags;
+	struct mbo *mbo;
+
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	if (c->enqueue_halt || list_empty(&c->halt_fifo))
+		mbo = NULL;
+	else
+		mbo = list_pop_mbo(&c->halt_fifo);
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+	return mbo;
+}
+
+static void nq_hdm_mbo(struct mbo *mbo)
+{
+	unsigned long flags;
+	struct most_c_obj *c = mbo->context;
+
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	list_add_tail(&mbo->list, &c->halt_fifo);
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+	wake_up_interruptible(&c->hdm_fifo_wq);
+}
+
+static int hdm_enqueue_thread(void *data)
+{
+	struct most_c_obj *c = data;
+	struct mbo *mbo;
+	typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
+
+	while (likely(!kthread_should_stop())) {
+		wait_event_interruptible(c->hdm_fifo_wq,
+					 (mbo = get_hdm_mbo(c))
+					 || kthread_should_stop());
+
+		if (unlikely(!mbo))
+			continue;
+
+		if (c->cfg.direction == MOST_CH_RX)
+			mbo->buffer_length = c->cfg.buffer_size;
+
+		if (unlikely(enqueue(mbo->ifp, mbo->hdm_channel_id, mbo))) {
+			pr_err("hdm enqueue failed\n");
+			nq_hdm_mbo(mbo);
+			c->hdm_enqueue_task = NULL;
+			return 0;
+		}
+	}
+
+	return 0;
+}
+
+static int run_enqueue_thread(struct most_c_obj *c, int channel_id)
+{
+	struct task_struct *task =
+		kthread_run(&hdm_enqueue_thread, c, "hdm_fifo_%d", channel_id);
+
+	if (IS_ERR(task))
+		return PTR_ERR(task);
+
+	c->hdm_enqueue_task = task;
+	return 0;
+}
+
+/**
+ * arm_mbo - recycle MBO for further usage
+ * @mbo: buffer object
+ *
+ * This puts an MBO back to the list to have it ready for up coming
+ * tx transactions.
+ *
+ * In case the MBO belongs to a channel that recently has been
+ * poisoned, the MBO is scheduled to be trashed.
+ * Calls the completion handler of an attached AIM.
+ */
+static void arm_mbo(struct mbo *mbo)
+{
+	unsigned long flags;
+	struct most_c_obj *c;
+
+	BUG_ON((!mbo) || (!mbo->context));
+	c = mbo->context;
+
+	if (c->is_poisoned) {
+		trash_mbo(mbo);
+		return;
+	}
+
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	list_add_tail(&mbo->list, &c->fifo);
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+
+	if (c->second_aim && c->second_aim->tx_completion)
+		c->second_aim->tx_completion(c->iface, c->channel_id);
+	if (c->first_aim && c->first_aim->tx_completion)
+		c->first_aim->tx_completion(c->iface, c->channel_id);
+}
+
+/**
+ * arm_mbo_chain - helper function that arms an MBO chain for the HDM
+ * @c: pointer to interface channel
+ * @dir: direction of the channel
+ * @compl: pointer to completion function
+ *
+ * This allocates buffer objects including the containing DMA coherent
+ * buffer and puts them in the fifo.
+ * Buffers of Rx channels are put in the kthread fifo, hence immediately
+ * submitted to the HDM.
+ *
+ * Returns the number of allocated and enqueued MBOs.
+ */
+static int arm_mbo_chain(struct most_c_obj *c, int dir,
+			 void (*compl)(struct mbo *))
+{
+	unsigned int i;
+	int retval;
+	struct mbo *mbo;
+	u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
+
+	atomic_set(&c->mbo_nq_level, 0);
+
+	for (i = 0; i < c->cfg.num_buffers; i++) {
+		mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
+		if (!mbo) {
+			pr_info("WARN: Allocation of MBO failed.\n");
+			retval = i;
+			goto _exit;
+		}
+		mbo->context = c;
+		mbo->ifp = c->iface;
+		mbo->hdm_channel_id = c->channel_id;
+		mbo->virt_address = dma_alloc_coherent(NULL,
+						       coherent_buf_size,
+						       &mbo->bus_address,
+						       GFP_KERNEL);
+		if (!mbo->virt_address) {
+			pr_info("WARN: No DMA coherent buffer.\n");
+			retval = i;
+			goto _error1;
+		}
+		mbo->complete = compl;
+		if (dir == MOST_CH_RX) {
+			nq_hdm_mbo(mbo);
+			atomic_inc(&c->mbo_nq_level);
+		} else {
+			arm_mbo(mbo);
+		}
+	}
+	return i;
+
+_error1:
+	kfree(mbo);
+_exit:
+	return retval;
+}
+
+/**
+ * most_submit_mbo - submits an MBO to fifo
+ * @mbo: pointer to the MBO
+ *
+ */
+int most_submit_mbo(struct mbo *mbo)
+{
+	struct most_c_obj *c;
+	struct most_inst_obj *i;
+
+	if (unlikely((!mbo) || (!mbo->context))) {
+		pr_err("Bad MBO or missing channel reference\n");
+		return -EINVAL;
+	}
+	c = mbo->context;
+	i = c->inst;
+
+	if (unlikely(atomic_read(&i->tainted)))
+		return -ENODEV;
+
+	nq_hdm_mbo(mbo);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(most_submit_mbo);
+
+/**
+ * most_write_completion - write completion handler
+ * @mbo: pointer to MBO
+ *
+ * This recycles the MBO for further usage. In case the channel has been
+ * poisoned, the MBO is scheduled to be trashed.
+ */
+static void most_write_completion(struct mbo *mbo)
+{
+	struct most_c_obj *c;
+
+	BUG_ON((!mbo) || (!mbo->context));
+
+	c = mbo->context;
+	if (mbo->status == MBO_E_INVAL)
+		pr_info("WARN: Tx MBO status: invalid\n");
+	if (unlikely((c->is_poisoned == true) || (mbo->status == MBO_E_CLOSE)))
+		trash_mbo(mbo);
+	else
+		arm_mbo(mbo);
+}
+
+/**
+ * get_channel_by_iface - get pointer to channel object
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ *
+ * This retrieves a pointer to a channel of the given interface and channel ID.
+ */
+static struct
+most_c_obj *get_channel_by_iface(struct most_interface *iface, int id)
+{
+	struct most_inst_obj *i;
+
+	if (unlikely(!iface)) {
+		pr_err("Bad interface\n");
+		return NULL;
+	}
+	if (unlikely((id < 0) || (id >= iface->num_channels))) {
+		pr_err("Channel index (%d) out of range\n", id);
+		return NULL;
+	}
+	i = iface->priv;
+	if (unlikely(!i)) {
+		pr_err("interface is not registered\n");
+		return NULL;
+	}
+	return i->channel[id];
+}
+
+/**
+ * most_get_mbo - get pointer to an MBO of pool
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ *
+ * This attempts to get a free buffer out of the channel fifo.
+ * Returns a pointer to MBO on success or NULL otherwise.
+ */
+struct mbo *most_get_mbo(struct most_interface *iface, int id)
+{
+	struct mbo *mbo;
+	struct most_c_obj *c;
+	unsigned long flags;
+
+	c = get_channel_by_iface(iface, id);
+	if (unlikely(!c))
+		return NULL;
+	spin_lock_irqsave(&c->fifo_lock, flags);
+	if (list_empty(&c->fifo)) {
+		spin_unlock_irqrestore(&c->fifo_lock, flags);
+		return NULL;
+	}
+	mbo = list_pop_mbo(&c->fifo);
+	spin_unlock_irqrestore(&c->fifo_lock, flags);
+	mbo->buffer_length = c->cfg.buffer_size;
+	return mbo;
+}
+EXPORT_SYMBOL_GPL(most_get_mbo);
+
+
+/**
+ * most_put_mbo - return buffer to pool
+ * @mbo: buffer object
+ */
+void most_put_mbo(struct mbo *mbo)
+{
+	struct most_c_obj *c;
+	struct most_inst_obj *i;
+
+	c = mbo->context;
+	i = c->inst;
+
+	if (unlikely(atomic_read(&i->tainted))) {
+		mbo->status = MBO_E_CLOSE;
+		trash_mbo(mbo);
+		return;
+	}
+	if (c->cfg.direction == MOST_CH_TX) {
+		arm_mbo(mbo);
+		return;
+	}
+	nq_hdm_mbo(mbo);
+	atomic_inc(&c->mbo_nq_level);
+}
+EXPORT_SYMBOL_GPL(most_put_mbo);
+
+/**
+ * most_read_completion - read completion handler
+ * @mbo: pointer to MBO
+ *
+ * This function is called by the HDM when data has been received from the
+ * hardware and copied to the buffer of the MBO.
+ *
+ * In case the channel has been poisoned it puts the buffer in the trash queue.
+ * Otherwise, it passes the buffer to an AIM for further processing.
+ */
+static void most_read_completion(struct mbo *mbo)
+{
+	struct most_c_obj *c;
+
+	c = mbo->context;
+	if (unlikely((c->is_poisoned == true) || (mbo->status == MBO_E_CLOSE)))
+		goto release_mbo;
+
+	if (mbo->status == MBO_E_INVAL) {
+		nq_hdm_mbo(mbo);
+		atomic_inc(&c->mbo_nq_level);
+		return;
+	}
+
+	if (atomic_sub_and_test(1, &c->mbo_nq_level)) {
+		pr_info("WARN: rx device out of buffers\n");
+		c->is_starving = 1;
+	}
+
+	if (c->first_aim && c->first_aim->rx_completion &&
+	    c->first_aim->rx_completion(mbo) == 0)
+		return;
+	if (c->second_aim && c->second_aim->rx_completion &&
+	    c->second_aim->rx_completion(mbo) == 0)
+		return;
+	pr_info("WARN: no driver linked with this channel\n");
+	mbo->status = MBO_E_CLOSE;
+release_mbo:
+	trash_mbo(mbo);
+}
+
+/**
+ * most_start_channel - prepares a channel for communication
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ *
+ * This prepares the channel for usage. Cross-checks whether the
+ * channel's been properly configured.
+ *
+ * Returns 0 on success or error code otherwise.
+ */
+int most_start_channel(struct most_interface *iface, int id)
+{
+	int num_buffer;
+	int ret;
+	struct most_c_obj *c = get_channel_by_iface(iface, id);
+
+	if (unlikely(!c))
+		return -EINVAL;
+
+	if (c->is_started)
+		return -EBUSY;
+
+	if (!try_module_get(iface->mod)) {
+		pr_info("failed to acquire HDM lock\n");
+		return -ENOLCK;
+	}
+	modref++;
+
+	c->cfg.extra_len = 0;
+	if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
+		pr_info("channel configuration failed. Go check settings...\n");
+		ret = -EINVAL;
+		goto error;
+	}
+
+	init_waitqueue_head(&c->hdm_fifo_wq);
+
+	if (c->cfg.direction == MOST_CH_RX)
+		num_buffer = arm_mbo_chain(c, c->cfg.direction,
+					   most_read_completion);
+	else
+		num_buffer = arm_mbo_chain(c, c->cfg.direction,
+					   most_write_completion);
+	if (unlikely(0 == num_buffer)) {
+		pr_info("failed to allocate memory\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ret = run_enqueue_thread(c, id);
+	if (ret)
+		goto error;
+
+	c->is_started = true;
+	c->is_starving = 0;
+	atomic_set(&c->mbo_ref, num_buffer);
+	return 0;
+error:
+	if (iface->mod)
+		module_put(iface->mod);
+	modref--;
+	return ret;
+}
+EXPORT_SYMBOL_GPL(most_start_channel);
+
+/**
+ * most_stop_channel - stops a running channel
+ * @iface: pointer to interface instance
+ * @id: channel ID
+ */
+int most_stop_channel(struct most_interface *iface, int id)
+{
+	struct most_c_obj *c;
+
+	if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
+		pr_err("Bad interface or index out of range\n");
+		return -EINVAL;
+	}
+	c = get_channel_by_iface(iface, id);
+	if (unlikely(!c))
+		return -EINVAL;
+
+	if (!c->is_started)
+		return 0;
+
+	/* FIXME: we need to know calling AIM to reset only one link */
+	c->first_aim = NULL;
+	c->second_aim = NULL;
+	/* do not go into recursion calling aim->disconnect_channel */
+
+	mutex_lock(&c->stop_task_mutex);
+	if (c->hdm_enqueue_task)
+		kthread_stop(c->hdm_enqueue_task);
+	c->hdm_enqueue_task = NULL;
+	mutex_unlock(&c->stop_task_mutex);
+
+	mutex_lock(&deregister_mutex);
+	if (atomic_read(&c->inst->tainted)) {
+		mutex_unlock(&deregister_mutex);
+		return -ENODEV;
+	}
+	mutex_unlock(&deregister_mutex);
+
+	if (iface->mod && modref) {
+		module_put(iface->mod);
+		modref--;
+	}
+
+	c->is_poisoned = true;
+	if (c->iface->poison_channel(c->iface, c->channel_id)) {
+		pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
+		       c->iface->description);
+		return -EAGAIN;
+	}
+	flush_trash_fifo(c);
+	flush_channel_fifos(c);
+
+#ifdef CMPL_INTERRUPTIBLE
+	if (wait_for_completion_interruptible(&c->cleanup)) {
+		pr_info("Interrupted while clean up ch %d\n", c->channel_id);
+		return -EINTR;
+	}
+#else
+	wait_for_completion(&c->cleanup);
+#endif
+	c->is_poisoned = false;
+	c->is_started = false;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(most_stop_channel);
+
+/**
+ * most_register_aim - registers an AIM (driver) with the core
+ * @aim: instance of AIM to be registered
+ */
+int most_register_aim(struct most_aim *aim)
+{
+	struct most_aim_obj *aim_obj;
+
+	if (!aim) {
+		pr_err("Bad driver\n");
+		return -EINVAL;
+	}
+	aim_obj = create_most_aim_obj(aim->name);
+	if (!aim_obj) {
+		pr_info("failed to alloc driver object\n");
+		return -ENOMEM;
+	}
+	aim_obj->driver = aim;
+	aim->context = aim_obj;
+	pr_info("registered new application interfacing module %s\n",
+		aim->name);
+	list_add_tail(&aim_obj->list, &aim_list);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(most_register_aim);
+
+/**
+ * most_deregister_aim - deregisters an AIM (driver) with the core
+ * @aim: AIM to be removed
+ */
+int most_deregister_aim(struct most_aim *aim)
+{
+	struct most_aim_obj *aim_obj;
+	struct most_c_obj *c, *tmp;
+	struct most_inst_obj *i, *i_tmp;
+
+	if (!aim) {
+		pr_err("Bad driver\n");
+		return -EINVAL;
+	}
+
+	aim_obj = aim->context;
+	if (!aim_obj) {
+		pr_info("driver not registered.\n");
+		return -EINVAL;
+	}
+	list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
+		list_for_each_entry_safe(c, tmp, &i->channel_list, list) {
+			if (c->first_aim == aim || c->second_aim == aim)
+				aim->disconnect_channel(
+					c->iface, c->channel_id);
+			if (c->first_aim == aim)
+				c->first_aim = NULL;
+			if (c->second_aim == aim)
+				c->second_aim = NULL;
+		}
+	}
+	list_del(&aim_obj->list);
+	destroy_most_aim_obj(aim_obj);
+	pr_info("deregistering application interfacing module %s\n", aim->name);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(most_deregister_aim);
+
+/**
+ * most_register_interface - registers an interface with core
+ * @iface: pointer to the instance of the interface description.
+ *
+ * Allocates and initializes a new interface instance and all of its channels.
+ * Returns a pointer to kobject or an error pointer.
+ */
+struct kobject *most_register_interface(struct most_interface *iface)
+{
+	unsigned int i;
+	int id;
+	char name[STRING_SIZE];
+	char channel_name[STRING_SIZE];
+	struct most_c_obj *c;
+	struct most_inst_obj *inst;
+
+	if (!iface || !iface->enqueue || !iface->configure ||
+	    !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
+		pr_err("Bad interface or channel overflow\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
+	if (id < 0) {
+		pr_info("Failed to alloc mdev ID\n");
+		return ERR_PTR(id);
+	}
+	snprintf(name, STRING_SIZE, "mdev%d", id);
+
+	inst = create_most_inst_obj(name);
+	if (!inst) {
+		pr_info("Failed to allocate interface instance\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	iface->priv = inst;
+	INIT_LIST_HEAD(&inst->channel_list);
+	inst->iface = iface;
+	inst->dev_id = id;
+	atomic_set(&inst->tainted, 0);
+	list_add_tail(&inst->list, &instance_list);
+
+	for (i = 0; i < iface->num_channels; i++) {
+		const char *name_suffix = iface->channel_vector[i].name_suffix;
+
+		if (!name_suffix)
+			snprintf(channel_name, STRING_SIZE, "ch%d", i);
+		else if (name_suffix[0] == '@')
+			snprintf(channel_name, STRING_SIZE, "ch%d%s", i,
+				 name_suffix);
+		else
+			snprintf(channel_name, STRING_SIZE, "%s", name_suffix);
+
+		/* this increments the reference count of this instance */
+		c = create_most_c_obj(channel_name, &inst->kobj);
+		if (!c)
+			goto free_instance;
+		inst->channel[i] = c;
+		c->is_starving = 0;
+		c->iface = iface;
+		c->inst = inst;
+		c->channel_id = i;
+		c->keep_mbo = false;
+		c->enqueue_halt = false;
+		c->is_poisoned = false;
+		c->is_started = false;
+		c->cfg.direction = 0;
+		c->cfg.data_type = 0;
+		c->cfg.num_buffers = 0;
+		c->cfg.buffer_size = 0;
+		c->cfg.subbuffer_size = 0;
+		c->cfg.packets_per_xact = 0;
+		spin_lock_init(&c->fifo_lock);
+		INIT_LIST_HEAD(&c->fifo);
+		INIT_LIST_HEAD(&c->trash_fifo);
+		INIT_LIST_HEAD(&c->halt_fifo);
+		init_completion(&c->cleanup);
+		atomic_set(&c->mbo_ref, 0);
+		mutex_init(&c->stop_task_mutex);
+		list_add_tail(&c->list, &inst->channel_list);
+	}
+	pr_info("registered new MOST device mdev%d (%s)\n",
+		inst->dev_id, iface->description);
+	return &inst->kobj;
+
+free_instance:
+	pr_info("Failed allocate channel(s)\n");
+	list_del(&inst->list);
+	destroy_most_inst_obj(inst);
+	return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_GPL(most_register_interface);
+
+/**
+ * most_deregister_interface - deregisters an interface with core
+ * @iface: pointer to the interface instance description.
+ *
+ * Before removing an interface instance from the list, all running
+ * channels are stopped and poisoned.
+ */
+void most_deregister_interface(struct most_interface *iface)
+{
+	struct most_inst_obj *i = iface->priv;
+	struct most_c_obj *c;
+
+	mutex_lock(&deregister_mutex);
+	if (unlikely(!i)) {
+		pr_info("Bad Interface\n");
+		mutex_unlock(&deregister_mutex);
+		return;
+	}
+	pr_info("deregistering MOST device %s (%s)\n", i->kobj.name,
+		iface->description);
+
+	atomic_set(&i->tainted, 1);
+	mutex_unlock(&deregister_mutex);
+
+	while (modref) {
+		if (iface->mod && modref)
+			module_put(iface->mod);
+		modref--;
+	}
+
+	list_for_each_entry(c, &i->channel_list, list) {
+		if (!c->is_started)
+			continue;
+
+		mutex_lock(&c->stop_task_mutex);
+		if (c->hdm_enqueue_task)
+			kthread_stop(c->hdm_enqueue_task);
+		c->hdm_enqueue_task = NULL;
+		mutex_unlock(&c->stop_task_mutex);
+
+		if (iface->poison_channel(iface, c->channel_id))
+			pr_err("Can't poison channel %d\n", c->channel_id);
+	}
+	ida_simple_remove(&mdev_id, i->dev_id);
+	list_del(&i->list);
+	destroy_most_inst_obj(i);
+}
+EXPORT_SYMBOL_GPL(most_deregister_interface);
+
+/**
+ * most_stop_enqueue - prevents core from enqueueing MBOs
+ * @iface: pointer to interface
+ * @id: channel id
+ *
+ * This is called by an HDM that _cannot_ attend to its duties and
+ * is imminent to get run over by the core. The core is not going to
+ * enqueue any further packets unless the flagging HDM calls
+ * most_resume enqueue().
+ */
+void most_stop_enqueue(struct most_interface *iface, int id)
+{
+	struct most_c_obj *c = get_channel_by_iface(iface, id);
+
+	if (likely(c))
+		c->enqueue_halt = true;
+}
+EXPORT_SYMBOL_GPL(most_stop_enqueue);
+
+/**
+ * most_resume_enqueue - allow core to enqueue MBOs again
+ * @iface: pointer to interface
+ * @id: channel id
+ *
+ * This clears the enqueue halt flag and enqueues all MBOs currently
+ * sitting in the wait fifo.
+ */
+void most_resume_enqueue(struct most_interface *iface, int id)
+{
+	struct most_c_obj *c = get_channel_by_iface(iface, id);
+
+	if (unlikely(!c))
+		return;
+	c->enqueue_halt = false;
+
+	wake_up_interruptible(&c->hdm_fifo_wq);
+}
+EXPORT_SYMBOL_GPL(most_resume_enqueue);
+
+static int __init most_init(void)
+{
+	pr_info("init()\n");
+	INIT_LIST_HEAD(&instance_list);
+	INIT_LIST_HEAD(&aim_list);
+	mutex_init(&deregister_mutex);
+	ida_init(&mdev_id);
+
+	if (bus_register(&most_bus)) {
+		pr_info("Cannot register most bus\n");
+		goto exit;
+	}
+
+	most_class = class_create(THIS_MODULE, "most");
+	if (IS_ERR(most_class)) {
+		pr_info("No udev support.\n");
+		goto exit_bus;
+	}
+	if (driver_register(&mostcore)) {
+		pr_info("Cannot register core driver\n");
+		goto exit_class;
+	}
+
+	class_glue_dir =
+		device_create(most_class, NULL, 0, NULL, "mostcore");
+	if (!class_glue_dir)
+		goto exit_driver;
+
+	most_aim_kset =
+		kset_create_and_add("aims", NULL, &class_glue_dir->kobj);
+	if (!most_aim_kset)
+		goto exit_class_container;
+
+	most_inst_kset =
+		kset_create_and_add("devices", NULL, &class_glue_dir->kobj);
+	if (!most_inst_kset)
+		goto exit_driver_kset;
+
+	return 0;
+
+exit_driver_kset:
+	kset_unregister(most_aim_kset);
+exit_class_container:
+	device_destroy(most_class, 0);
+exit_driver:
+	driver_unregister(&mostcore);
+exit_class:
+	class_destroy(most_class);
+exit_bus:
+	bus_unregister(&most_bus);
+exit:
+	return -ENOMEM;
+}
+
+static void __exit most_exit(void)
+{
+	struct most_inst_obj *i, *i_tmp;
+	struct most_aim_obj *d, *d_tmp;
+
+	pr_info("exit core module\n");
+	list_for_each_entry_safe(d, d_tmp, &aim_list, list) {
+		destroy_most_aim_obj(d);
+	}
+
+	list_for_each_entry_safe(i, i_tmp, &instance_list, list) {
+		list_del(&i->list);
+		destroy_most_inst_obj(i);
+	}
+	kset_unregister(most_inst_kset);
+	kset_unregister(most_aim_kset);
+	device_destroy(most_class, 0);
+	driver_unregister(&mostcore);
+	class_destroy(most_class);
+	bus_unregister(&most_bus);
+	ida_destroy(&mdev_id);
+}
+
+module_init(most_init);
+module_exit(most_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
+MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");
diff --git a/drivers/staging/most/mostcore/mostcore.h b/drivers/staging/most/mostcore/mostcore.h
new file mode 100644
index 0000000..299c7d5
--- /dev/null
+++ b/drivers/staging/most/mostcore/mostcore.h
@@ -0,0 +1,316 @@
+/*
+ * mostcore.h - Interface between MostCore,
+ *   Hardware Dependent Module (HDM) and Application Interface Module (AIM).
+ *
+ * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * This file is licensed under GPLv2.
+ */
+
+/*
+ * Authors:
+ *   Andrey Shvetsov <andrey.shvetsov@k2l.de>
+ *   Christian Gromm <christian.gromm@microchip.com>
+ *   Sebastian Graf
+ */
+
+#ifndef __MOST_CORE_H__
+#define __MOST_CORE_H__
+
+#include <linux/types.h>
+
+struct kobject;
+struct module;
+
+/**
+ * Interface type
+ */
+enum most_interface_type {
+	ITYPE_LOOPBACK = 1,
+	ITYPE_I2C,
+	ITYPE_I2S,
+	ITYPE_TSI,
+	ITYPE_HBI,
+	ITYPE_MEDIALB_DIM,
+	ITYPE_MEDIALB_DIM2,
+	ITYPE_USB,
+	ITYPE_PCIE
+};
+
+/**
+ * Channel direction.
+ */
+enum most_channel_direction {
+	MOST_CH_RX = 1 << 0,
+	MOST_CH_TX = 1 << 1,
+};
+
+/**
+ * Channel data type.
+ */
+enum most_channel_data_type {
+	MOST_CH_CONTROL = 1 << 0,
+	MOST_CH_ASYNC = 1 << 1,
+	MOST_CH_ISOC_AVP = 1 << 2,
+	MOST_CH_SYNC = 1 << 5,
+};
+
+
+enum mbo_status_flags {
+	/* MBO was processed successfully (data was send or received )*/
+	MBO_SUCCESS = 0,
+	/* The MBO contains wrong or missing information.  */
+	MBO_E_INVAL,
+	/* MBO was completed as HDM Channel will be closed */
+	MBO_E_CLOSE,
+};
+
+/**
+ * struct most_channel_capability - Channel capability
+ * @direction: Supported channel directions.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_direction. Zero is allowed value and means
+ * "channel may not be used".
+ * @data_type: Supported channel data types.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_data_type. Zero is allowed value and means
+ * "channel may not be used".
+ * @num_buffer_packet: Maximum number of buffers supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @buffer_size_packet: Maximum buffer size supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @num_buffer_streaming: Maximum number of buffers supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @buffer_size_streaming: Maximum buffer size supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @name_suffix: Optional suffix providean by an HDM that is attached to the
+ * regular channel name.
+ *
+ * Describes the capabilities of a MostCore channel like supported Data Types
+ * and directions. This information is provided by an HDM for the MostCore.
+ *
+ * The Core creates read only sysfs attribute files in
+ * /sys/devices/virtual/most/mostcore/devices/mdev-#/mdev#-ch#/ with the
+ * following attributes:
+ *	-available_directions
+ *	-available_datatypes
+ *	-number_of_packet_buffers
+ *	-number_of_stream_buffers
+ *	-size_of_packet_buffer
+ *	-size_of_stream_buffer
+ * where content of each file is a string with all supported properties of this
+ * very channel attribute.
+ */
+struct most_channel_capability {
+	u16 direction;
+	u16 data_type;
+	u16 num_buffers_packet;
+	u16 buffer_size_packet;
+	u16 num_buffers_streaming;
+	u16 buffer_size_streaming;
+	char *name_suffix;
+};
+
+/**
+ * struct most_channel_config - stores channel configuration
+ * @direction: direction of the channel
+ * @data_type: data type travelling over this channel
+ * @num_buffers: number of buffers
+ * @buffer_size: size of a buffer for AIM.
+ * Buffer size may be cutted down by HDM in a configure callback
+ * to match to a given interface and channel type.
+ * @extra_len: additional buffer space for internal HDM purposes like padding.
+ * May be set by HDM in a configure callback if needed.
+ * @subbuffer_size: size of a subbuffer
+ * @packets_per_xact: number of MOST frames that are packet inside one USB
+ *		      packet. This is USB specific
+ *
+ * Describes the configuration for a MostCore channel. This information is
+ * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a
+ * parameter of the "configure" function call.
+ */
+struct most_channel_config {
+	enum most_channel_direction direction;
+	enum most_channel_data_type data_type;
+	u16 num_buffers;
+	u16 buffer_size;
+	u16 extra_len;
+	u16 subbuffer_size;
+	u16 packets_per_xact;
+};
+
+/*
+ * struct mbo - MOST Buffer Object.
+ * @context: context for core completion handler
+ * @priv: private data for HDM
+ *
+ *	public: documented fields that are used for the communications
+ *	between MostCore and HDMs
+ *
+ * @list: list head for use by the mbo's current owner
+ * @ifp: (in) associated interface instance
+ * @hdm_channel_id: (in) HDM channel instance
+ * @virt_address: (in) kernel virtual address of the buffer
+ * @bus_address: (in) bus address of the buffer
+ * @buffer_length: (in) buffer payload length
+ * @processed_length: (out) processed length
+ * @status: (out) transfer status
+ * @complete: (in) completion routine
+ *
+ * The MostCore allocates and initializes the MBO.
+ *
+ * The HDM receives MBO for transfer from MostCore with the call to enqueue().
+ * The HDM copies the data to- or from the buffer depending on configured
+ * channel direction, set "processed_length" and "status" and completes
+ * the transfer procedure by calling the completion routine.
+ *
+ * At the end the MostCore deallocates the MBO or recycles it for further
+ * transfers for the same or different HDM.
+ *
+ * Directions of usage:
+ * The core driver should never access any MBO fields (even if marked
+ * as "public") while the MBO is owned by an HDM. The ownership starts with
+ * the call of enqueue() and ends with the call of its complete() routine.
+ *
+ *					II.
+ * Every HDM attached to the core driver _must_ ensure that it returns any MBO
+ * it owns (due to a previous call to enqueue() by the core driver) before it
+ * de-registers an interface or gets unloaded from the kernel. If this direction
+ * is violated memory leaks will occur, since the core driver does _not_ track
+ * MBOs it is currently not in control of.
+ *
+ */
+struct mbo {
+	void *context;
+	void *priv;
+	struct list_head list;
+	struct most_interface *ifp;
+	u16 hdm_channel_id;
+	void *virt_address;
+	dma_addr_t bus_address;
+	u16 buffer_length;
+	u16 processed_length;
+	enum mbo_status_flags status;
+	void (*complete)(struct mbo *);
+};
+
+/**
+ * Interface instance description.
+ *
+ * Describes one instance of an interface like Medusa PCIe or Vantage USB.
+ * This structure is allocated and initialized in the HDM. MostCore may not
+ * modify this structure.
+ *
+ * @interface Interface type. \sa most_interface_type.
+ * @description PRELIMINARY.
+ *   Unique description of the device instance from point of view of the
+ *   interface in free text form (ASCII).
+ *   It may be a hexadecimal presentation of the memory address for the MediaLB
+ *   IP or USB device ID with USB properties for USB interface, etc.
+ * @num_channels Number of channels and size of the channel_vector.
+ * @channel_vector Properties of the channels.
+ *   Array index represents channel ID by the driver.
+ * @configure Callback to change data type for the channel of the
+ *   interface instance. May be zero if the instance of the interface is not
+ *   configurable. Parameter channel_config describes direction and data
+ *   type for the channel, configured by the higher level. The content of
+ * @enqueue Delivers MBO to the HDM for processing.
+ *   After HDM completes Rx- or Tx- operation the processed MBO shall
+ *   be returned back to the MostCore using completion routine.
+ *   The reason to get the MBO delivered from the MostCore after the channel
+ *   is poisoned is the re-opening of the channel by the application.
+ *   In this case the HDM shall hold MBOs and service the channel as usual.
+ *   The HDM must be able to hold at least one MBO for each channel.
+ *   The callback returns a negative value on error, otherwise 0.
+ * @poison_channel Informs HDM about closing the channel. The HDM shall
+ *   cancel all transfers and synchronously or asynchronously return
+ *   all enqueued for this channel MBOs using the completion routine.
+ *   The callback returns a negative value on error, otherwise 0.
+ * @request_netinfo: triggers retrieving of network info from the HDM by
+ *   means of "Message exchange over MDP/MEP"
+ * @priv Private field used by mostcore to store context information.
+ */
+struct most_interface {
+	struct module *mod;
+	enum most_interface_type interface;
+	const char *description;
+	int num_channels;
+	struct most_channel_capability *channel_vector;
+	int (*configure)(struct most_interface *iface, int channel_idx,
+			 struct most_channel_config *channel_config);
+	int (*enqueue)(struct most_interface *iface, int channel_idx,
+		       struct mbo *mbo);
+	int (*poison_channel)(struct most_interface *iface, int channel_idx);
+	void (*request_netinfo)(struct most_interface *iface, int channel_idx);
+	void *priv;
+};
+
+/**
+ * struct most_aim - identifies MOST device driver to mostcore
+ * @name: Driver name
+ * @probe_channel: function for core to notify driver about channel connection
+ * @disconnect_channel: notification that a certain channel isn't available anymore
+ * @rx_completion: completion handler for received packets
+ * @tx_completion: completion handler for transmitted packets
+ * @context: context pointer to be used by mostcore
+ */
+struct most_aim {
+	const char *name;
+	int (*probe_channel)(struct most_interface *iface, int channel_idx,
+			     struct most_channel_config *cfg,
+			     struct kobject *parent, char *name);
+	int (*disconnect_channel)(struct most_interface *iface,
+				  int channel_idx);
+	int (*rx_completion)(struct mbo *mbo);
+	int (*tx_completion)(struct most_interface *iface, int channel_idx);
+	void *context;
+};
+
+/**
+ * most_register_interface - Registers instance of the interface.
+ * @iface: Pointer to the interface instance description.
+ *
+ * Returns a pointer to the kobject of the generated instance.
+ *
+ * Note: HDM has to ensure that any reference held on the kobj is
+ * released before deregistering the interface.
+ */
+struct kobject *most_register_interface(struct most_interface *iface);
+
+/**
+ * Deregisters instance of the interface.
+ * @intf_instance Pointer to the interface instance description.
+ */
+void most_deregister_interface(struct most_interface *iface);
+int most_submit_mbo(struct mbo *mbo);
+
+/**
+ * most_stop_enqueue - prevents core from enqueing MBOs
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ */
+void most_stop_enqueue(struct most_interface *iface, int channel_idx);
+
+/**
+ * most_resume_enqueue - allow core to enqueue MBOs again
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ *
+ * This clears the enqueue halt flag and enqueues all MBOs currently
+ * in wait fifo.
+ */
+void most_resume_enqueue(struct most_interface *iface, int channel_idx);
+int most_register_aim(struct most_aim *aim);
+int most_deregister_aim(struct most_aim *aim);
+struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx);
+void most_put_mbo(struct mbo *mbo);
+int most_start_channel(struct most_interface *iface, int channel_idx);
+int most_stop_channel(struct most_interface *iface, int channel_idx);
+
+
+#endif /* MOST_CORE_H_ */
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
index 7285c64..ad30ce4 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
@@ -948,7 +948,6 @@
 static struct spi_driver spinand_driver = {
 	.driver = {
 		.name		= "mt29f",
-		.bus		= &spi_bus_type,
 		.owner		= THIS_MODULE,
 		.of_match_table	= spinand_dt,
 	},
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.h b/drivers/staging/mt29f_spinand/mt29f_spinand.h
index 7f2c24d..6c8e413 100644
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.h
+++ b/drivers/staging/mt29f_spinand/mt29f_spinand.h
@@ -101,7 +101,7 @@
 	u8		*rx_buf;	/* Rx buf */
 };
 
-extern int spinand_mtd(struct mtd_info *mtd);
-extern void spinand_mtd_release(struct mtd_info *mtd);
+int spinand_mtd(struct mtd_info *mtd);
+void spinand_mtd_release(struct mtd_info *mtd);
 
 #endif /* __LINUX_MTD_SPI_NAND_H */
diff --git a/drivers/staging/netlogic/platform_net.c b/drivers/staging/netlogic/platform_net.c
index 77c3c35..e914147 100644
--- a/drivers/staging/netlogic/platform_net.c
+++ b/drivers/staging/netlogic/platform_net.c
@@ -163,7 +163,7 @@
 	switch (nlm_prom_info.board_major_version) {
 	case 12:
 		/* first block RGMII or XAUI, use RGMII */
-		ndata0.phy_interface = PHY_INTERFACE_MODE_RGMII,
+		ndata0.phy_interface = PHY_INTERFACE_MODE_RGMII;
 		ndata0.tx_stnid[0] = FMN_STNID_GMAC0_TX0;
 		ndata0.phy_addr[0] = 0;
 
diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h
index 13e03f0..2f65ec5 100644
--- a/drivers/staging/netlogic/xlr_net.h
+++ b/drivers/staging/netlogic/xlr_net.h
@@ -1102,4 +1102,4 @@
 	u64 *class_3_spill;
 };
 
-extern void xlr_set_gmac_speed(struct xlr_net_priv *priv);
+void xlr_set_gmac_speed(struct xlr_net_priv *priv);
diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h
index e271375..2ec9de9 100644
--- a/drivers/staging/nvec/nvec.h
+++ b/drivers/staging/nvec/nvec.h
@@ -165,19 +165,18 @@
 	int state;
 };
 
-extern int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
-			     short size);
+int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
+		     short size);
 
-extern struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
-					const unsigned char *data, short size);
+struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
+				 const unsigned char *data, short size);
 
-extern int nvec_register_notifier(struct nvec_chip *nvec,
-				  struct notifier_block *nb,
-				  unsigned int events);
+int nvec_register_notifier(struct nvec_chip *nvec,
+			   struct notifier_block *nb,
+			   unsigned int events);
 
-extern int nvec_unregister_notifier(struct nvec_chip *dev,
-				    struct notifier_block *nb);
+int nvec_unregister_notifier(struct nvec_chip *dev, struct notifier_block *nb);
 
-extern void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg);
+void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg);
 
 #endif
diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h
index a530b55..5ed8483 100644
--- a/drivers/staging/octeon/ethernet-mdio.h
+++ b/drivers/staging/octeon/ethernet-mdio.h
@@ -25,7 +25,7 @@
 
 extern const struct ethtool_ops cvm_oct_ethtool_ops;
 
-extern void octeon_mdiobus_force_mod_depencency(void);
+void octeon_mdiobus_force_mod_depencency(void);
 
 int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 int cvm_oct_phy_setup_device(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index beb7aac..51dcb61 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -118,9 +118,10 @@
 	}
 
 	/* Since the 10Mbps preamble workaround is allowed we need to enable
-	   preamble checking, FCS stripping, and clear error bits on
-	   every speed change. If errors occur during 10Mbps operation
-	   the above code will change this stuff */
+	 * preamble checking, FCS stripping, and clear error bits on
+	 * every speed change. If errors occur during 10Mbps operation
+	 * the above code will change this stuff
+	 */
 	cvm_oct_set_hw_preamble(priv, true);
 
 	if (priv->phydev == NULL) {
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 22853d3..d1a33a9 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -70,7 +70,14 @@
  */
 static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
 {
-	if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
+	int port;
+
+	if (octeon_has_feature(OCTEON_FEATURE_PKND))
+		port = work->word0.pip.cn68xx.pknd;
+	else
+		port = work->word1.cn38xx.ipprt;
+
+	if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
 		/*
 		 * Ignore length errors on min size packets. Some
 		 * equipment incorrectly pads packets to 64+4FCS
@@ -87,8 +94,8 @@
 		 * packet to determine if we can remove a non spec
 		 * preamble and generate a correct packet.
 		 */
-		int interface = cvmx_helper_get_interface_num(work->ipprt);
-		int index = cvmx_helper_get_interface_index_num(work->ipprt);
+		int interface = cvmx_helper_get_interface_num(port);
+		int index = cvmx_helper_get_interface_index_num(port);
 		union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
 
 		gmxx_rxx_frm_ctl.u64 =
@@ -99,7 +106,7 @@
 			    cvmx_phys_to_ptr(work->packet_ptr.s.addr);
 			int i = 0;
 
-			while (i < work->len - 1) {
+			while (i < work->word1.len - 1) {
 				if (*ptr != 0x55)
 					break;
 				ptr++;
@@ -109,18 +116,18 @@
 			if (*ptr == 0xd5) {
 				/*
 				  printk_ratelimited("Port %d received 0xd5 preamble\n",
-					  work->ipprt);
+					  port);
 				 */
 				work->packet_ptr.s.addr += i + 1;
-				work->len -= i + 5;
+				work->word1.len -= i + 5;
 			} else if ((*ptr & 0xf) == 0xd) {
 				/*
 				  printk_ratelimited("Port %d received 0x?d preamble\n",
-					  work->ipprt);
+					  port);
 				 */
 				work->packet_ptr.s.addr += i;
-				work->len -= i + 4;
-				for (i = 0; i < work->len; i++) {
+				work->word1.len -= i + 4;
+				for (i = 0; i < work->word1.len; i++) {
 					*ptr =
 					    ((*ptr & 0xf0) >> 4) |
 					    ((*(ptr + 1) & 0xf) << 4);
@@ -128,7 +135,7 @@
 				}
 			} else {
 				printk_ratelimited("Port %d unknown preamble, packet dropped\n",
-						   work->ipprt);
+						   port);
 				/*
 				   cvmx_helper_dump_packet(work);
 				 */
@@ -138,7 +145,7 @@
 		}
 	} else {
 		printk_ratelimited("Port %d receive error code %d, packet dropped\n",
-				   work->ipprt, work->word2.snoip.err_code);
+				   port, work->word2.snoip.err_code);
 		cvm_oct_free_work(work);
 		return 1;
 	}
@@ -172,9 +179,16 @@
 	}
 
 	/* Only allow work for our group (and preserve priorities) */
-	old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
-	cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
-		       (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
+		cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
+				1ull << pow_receive_group);
+		cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
+	} else {
+		old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
+		cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
+			(old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
+	}
 
 	if (USE_ASYNC_IOBDMA) {
 		cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
@@ -186,6 +200,7 @@
 		struct sk_buff **pskb = NULL;
 		int skb_in_hw;
 		cvmx_wqe_t *work;
+		int port;
 
 		if (USE_ASYNC_IOBDMA && did_work_request)
 			work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
@@ -195,12 +210,19 @@
 		prefetch(work);
 		did_work_request = 0;
 		if (work == NULL) {
-			union cvmx_pow_wq_int wq_int;
+			if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+				cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
+					       1ull << pow_receive_group);
+				cvmx_write_csr(CVMX_SSO_WQ_INT,
+					       1ull << pow_receive_group);
+			} else {
+				union cvmx_pow_wq_int wq_int;
 
-			wq_int.u64 = 0;
-			wq_int.s.iq_dis = 1 << pow_receive_group;
-			wq_int.s.wq_int = 1 << pow_receive_group;
-			cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
+				wq_int.u64 = 0;
+				wq_int.s.iq_dis = 1 << pow_receive_group;
+				wq_int.s.wq_int = 1 << pow_receive_group;
+				cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
+			}
 			break;
 		}
 		pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
@@ -220,7 +242,13 @@
 			prefetch(&skb->head);
 			prefetch(&skb->len);
 		}
-		prefetch(cvm_oct_device[work->ipprt]);
+
+		if (octeon_has_feature(OCTEON_FEATURE_PKND))
+			port = work->word0.pip.cn68xx.pknd;
+		else
+			port = work->word1.cn38xx.ipprt;
+
+		prefetch(cvm_oct_device[port]);
 
 		/* Immediately throw away all packets with receive errors */
 		if (unlikely(work->word2.snoip.rcv_error)) {
@@ -237,7 +265,7 @@
 			skb->data = skb->head + work->packet_ptr.s.addr -
 				cvmx_ptr_to_phys(skb->head);
 			prefetch(skb->data);
-			skb->len = work->len;
+			skb->len = work->word1.len;
 			skb_set_tail_pointer(skb, skb->len);
 			packet_not_copied = 1;
 		} else {
@@ -245,7 +273,7 @@
 			 * We have to copy the packet. First allocate
 			 * an skbuff for it.
 			 */
-			skb = dev_alloc_skb(work->len);
+			skb = dev_alloc_skb(work->word1.len);
 			if (!skb) {
 				cvm_oct_free_work(work);
 				continue;
@@ -268,13 +296,14 @@
 					else
 						ptr += 6;
 				}
-				memcpy(skb_put(skb, work->len), ptr, work->len);
+				memcpy(skb_put(skb, work->word1.len), ptr,
+				       work->word1.len);
 				/* No packet buffers to free */
 			} else {
 				int segments = work->word2.s.bufs;
 				union cvmx_buf_ptr segment_ptr =
 				    work->packet_ptr;
-				int len = work->len;
+				int len = work->word1.len;
 
 				while (segments--) {
 					union cvmx_buf_ptr next_ptr =
@@ -310,10 +339,9 @@
 			}
 			packet_not_copied = 0;
 		}
-
-		if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
-			   cvm_oct_device[work->ipprt])) {
-			struct net_device *dev = cvm_oct_device[work->ipprt];
+		if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
+			   cvm_oct_device[port])) {
+			struct net_device *dev = cvm_oct_device[port];
 			struct octeon_ethernet *priv = netdev_priv(dev);
 
 			/*
@@ -333,7 +361,7 @@
 					skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 				/* Increment RX stats for virtual ports */
-				if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
+				if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
 #ifdef CONFIG_64BIT
 					atomic64_add(1,
 						     (atomic64_t *)&priv->stats.rx_packets);
@@ -368,7 +396,7 @@
 			 * doesn't exist.
 			 */
 			printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
-				   work->ipprt);
+				   port);
 			dev_kfree_skb_irq(skb);
 		}
 		/*
@@ -390,7 +418,13 @@
 		}
 	}
 	/* Restore the original POW group mask */
-	cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
+		cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
+	} else {
+		cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
+	}
+
 	if (USE_ASYNC_IOBDMA) {
 		/* Restore the scratch area */
 		cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
@@ -422,8 +456,6 @@
 {
 	int i;
 	struct net_device *dev_for_napi = NULL;
-	union cvmx_pow_wq_int_thrx int_thr;
-	union cvmx_pow_wq_int_pc int_pc;
 
 	for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
 		if (cvm_oct_device[i]) {
@@ -449,15 +481,34 @@
 
 	disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
 
-	int_thr.u64 = 0;
-	int_thr.s.tc_en = 1;
-	int_thr.s.tc_thr = 1;
 	/* Enable POW interrupt when our port has at least one packet */
-	cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
+		union cvmx_sso_wq_int_thrx int_thr;
+		union cvmx_pow_wq_int_pc int_pc;
 
-	int_pc.u64 = 0;
-	int_pc.s.pc_thr = 5;
-	cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+		int_thr.u64 = 0;
+		int_thr.s.tc_en = 1;
+		int_thr.s.tc_thr = 1;
+		cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group),
+			       int_thr.u64);
+
+		int_pc.u64 = 0;
+		int_pc.s.pc_thr = 5;
+		cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
+	} else {
+		union cvmx_pow_wq_int_thrx int_thr;
+		union cvmx_pow_wq_int_pc int_pc;
+
+		int_thr.u64 = 0;
+		int_thr.s.tc_en = 1;
+		int_thr.s.tc_thr = 1;
+		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
+			       int_thr.u64);
+
+		int_pc.u64 = 0;
+		int_pc.s.pc_thr = 5;
+		cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
+	}
 
 	/* Schedule NAPI now. This will indirectly enable the interrupt. */
 	napi_schedule(&cvm_oct_napi);
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 7c1c1b0..9e2116f 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -396,10 +396,12 @@
 
 	/* Check if we can use the hardware checksumming */
 	if ((skb->protocol == htons(ETH_P_IP)) &&
-	    (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
-	    ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == htons(1 << 14)))
-	    && ((ip_hdr(skb)->protocol == IPPROTO_TCP)
-		|| (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
+	    (ip_hdr(skb)->version == 4) &&
+	    (ip_hdr(skb)->ihl == 5) &&
+	    ((ip_hdr(skb)->frag_off == 0) ||
+	     (ip_hdr(skb)->frag_off == htons(1 << 14))) &&
+	    ((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
+	     (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
 		/* Use hardware checksum calc */
 		pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
 	}
@@ -589,13 +591,14 @@
 	 * Fill in some of the work queue fields. We may need to add
 	 * more if the software at the other end needs them.
 	 */
-	work->hw_chksum = skb->csum;
-	work->len = skb->len;
-	work->ipprt = priv->port;
-	work->qos = priv->port & 0x7;
-	work->grp = pow_send_group;
-	work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
-	work->tag = pow_send_group;	/* FIXME */
+	if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
+		work->word0.pip.cn38xx.hw_chksum = skb->csum;
+	work->word1.len = skb->len;
+	cvmx_wqe_set_port(work, priv->port);
+	cvmx_wqe_set_qos(work, priv->port & 0x7);
+	cvmx_wqe_set_grp(work, pow_send_group);
+	work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
+	work->word1.tag = pow_send_group;	/* FIXME */
 	/* Default to zero. Sets of zero later are commented out */
 	work->word2.u64 = 0;
 	work->word2.s.bufs = 1;
@@ -675,8 +678,8 @@
 	}
 
 	/* Submit the packet to the POW */
-	cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
-			     work->grp);
+	cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
+			     cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
 	priv->stats.tx_packets++;
 	priv->stats.tx_bytes += skb->len;
 	dev_consume_skb_any(skb);
diff --git a/drivers/staging/octeon/ethernet-util.h b/drivers/staging/octeon/ethernet-util.h
index 1ba789a..45f024b 100644
--- a/drivers/staging/octeon/ethernet-util.h
+++ b/drivers/staging/octeon/ethernet-util.h
@@ -8,6 +8,10 @@
  * published by the Free Software Foundation.
  */
 
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-helper-util.h>
+
 /**
  * cvm_oct_get_buffer_ptr - convert packet data address to pointer
  * @packet_ptr: Packet data hardware address
@@ -28,14 +32,12 @@
  */
 static inline int INTERFACE(int ipd_port)
 {
-	if (ipd_port < 32)	/* Interface 0 or 1 for RGMII,GMII,SPI, etc */
-		return ipd_port >> 4;
-	else if (ipd_port < 36)	/* Interface 2 for NPI */
-		return 2;
-	else if (ipd_port < 40)	/* Interface 3 for loopback */
-		return 3;
-	else if (ipd_port == 40)	/* Non existent interface for POW0 */
-		return 4;
+	int interface = cvmx_helper_get_interface_num(ipd_port);
+
+	if (interface >= 0)
+		return interface;
+	else if (ipd_port == CVMX_PIP_NUM_INPUT_PORTS)
+		return 10;
 	panic("Illegal ipd_port %d passed to INTERFACE\n", ipd_port);
 }
 
@@ -47,7 +49,5 @@
  */
 static inline int INDEX(int ipd_port)
 {
-	if (ipd_port < 32)
-		return ipd_port & 15;
-	return ipd_port & 3;
+	return cvmx_helper_get_interface_index_num(ipd_port);
 }
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index f9dba23..7274fda 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -152,11 +152,12 @@
 			     num_packet_buffers);
 	if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
 		cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
-				     CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
+				     CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
 
 #ifdef __LITTLE_ENDIAN
 	{
 		union cvmx_ipd_ctl_status ipd_ctl_status;
+
 		ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
 		ipd_ctl_status.s.pkt_lend = 1;
 		ipd_ctl_status.s.wqe_lend = 1;
@@ -859,7 +860,10 @@
 	int port;
 
 	/* Disable POW interrupt */
-	cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
+	if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+		cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
+	else
+		cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
 
 	cvmx_ipd_disable();
 
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index e9d3e9a..a242c70 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -53,20 +53,20 @@
 
 int cvm_oct_free_work(void *work_queue_entry);
 
-extern int cvm_oct_rgmii_init(struct net_device *dev);
-extern void cvm_oct_rgmii_uninit(struct net_device *dev);
-extern int cvm_oct_rgmii_open(struct net_device *dev);
+int cvm_oct_rgmii_init(struct net_device *dev);
+void cvm_oct_rgmii_uninit(struct net_device *dev);
+int cvm_oct_rgmii_open(struct net_device *dev);
 
-extern int cvm_oct_sgmii_init(struct net_device *dev);
-extern int cvm_oct_sgmii_open(struct net_device *dev);
+int cvm_oct_sgmii_init(struct net_device *dev);
+int cvm_oct_sgmii_open(struct net_device *dev);
 
-extern int cvm_oct_spi_init(struct net_device *dev);
-extern void cvm_oct_spi_uninit(struct net_device *dev);
-extern int cvm_oct_xaui_init(struct net_device *dev);
-extern int cvm_oct_xaui_open(struct net_device *dev);
+int cvm_oct_spi_init(struct net_device *dev);
+void cvm_oct_spi_uninit(struct net_device *dev);
+int cvm_oct_xaui_init(struct net_device *dev);
+int cvm_oct_xaui_open(struct net_device *dev);
 
-extern int cvm_oct_common_init(struct net_device *dev);
-extern void cvm_oct_common_uninit(struct net_device *dev);
+int cvm_oct_common_init(struct net_device *dev);
+void cvm_oct_common_uninit(struct net_device *dev);
 void cvm_oct_adjust_link(struct net_device *dev);
 int cvm_oct_common_stop(struct net_device *dev);
 int cvm_oct_common_open(struct net_device *dev,
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index aec9895..d06e19d 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -98,7 +98,7 @@
 
 #include <linux/interrupt.h>
 
-extern irqreturn_t dcon_interrupt(int irq, void *id);
+irqreturn_t dcon_interrupt(int irq, void *id);
 
 #ifdef CONFIG_FB_OLPC_DCON_1
 extern struct dcon_platform_data dcon_pdata_xo_1;
diff --git a/drivers/staging/ozwpan/Kconfig b/drivers/staging/ozwpan/Kconfig
deleted file mode 100644
index 7904cae..0000000
--- a/drivers/staging/ozwpan/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-config USB_WPAN_HCD
-	tristate "USB over WiFi Host Controller"
-	depends on USB && NET
-	help
-	  A driver for USB Host Controllers that are compatible with
-	  Ozmo Devices USB over WiFi technology.
-
-	  To compile this driver a module, choose M here: the module
-	  will be called "ozwpan".
diff --git a/drivers/staging/ozwpan/Makefile b/drivers/staging/ozwpan/Makefile
deleted file mode 100644
index 29529c1..0000000
--- a/drivers/staging/ozwpan/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# -----------------------------------------------------------------------------
-# Copyright (c) 2011 Ozmo Inc
-# Released under the GNU General Public License Version 2 (GPLv2).
-# -----------------------------------------------------------------------------
-
-obj-$(CONFIG_USB_WPAN_HCD) += ozwpan.o
-ozwpan-y := \
-	ozmain.o \
-	ozpd.o \
-	ozusbsvc.o \
-	ozusbsvc1.o \
-	ozhcd.o \
-	ozeltbuf.o \
-	ozproto.o \
-	ozcdev.o \
-	ozurbparanoia.o
diff --git a/drivers/staging/ozwpan/README b/drivers/staging/ozwpan/README
deleted file mode 100644
index 7c055ec..0000000
--- a/drivers/staging/ozwpan/README
+++ /dev/null
@@ -1,25 +0,0 @@
-OZWPAN USB Host Controller Driver
----------------------------------
-This driver is a USB HCD driver that does not have an associated a physical
-device but instead uses Wi-Fi to communicate with the wireless peripheral.
-The USB requests are converted into a layer 2 network protocol and transmitted
-on the network using an ethertype (0x892e) regestered to Ozmo Device Inc.
-This driver is compatible with existing wireless devices that use Ozmo Devices
-technology.
-
-To operate the driver must be bound to a suitable network interface. This can
-be done when the module is loaded (specifying the name of the network interface
-as a parameter - e.g. 'insmod ozwpan g_net_dev=go0') or can be bound after
-loading using an ioctl call. See the ozappif.h file and the ioctls
-OZ_IOCTL_ADD_BINDING and OZ_IOCTL_REMOVE_BINDING.
-
-The devices connect to the host use Wi-Fi Direct so a network card that supports
-Wi-Fi direct is required. A recent version (0.8.x or later) version of the
-wpa_supplicant can be used to setup the network interface to create a persistent
-autonomous group (for older pre-WFD peripherals) or put in a listen state to
-allow group negotiation to occur for more recent devices that support WFD.
-
-The protocol used over the network does not directly mimic the USB bus
-transactions as this would be rather busy and inefficient. Instead the chapter 9
-requests are converted into a request/response pair of messages. (See
-ozprotocol.h for data structures used in the protocol).
diff --git a/drivers/staging/ozwpan/TODO b/drivers/staging/ozwpan/TODO
deleted file mode 100644
index f32c1c0..0000000
--- a/drivers/staging/ozwpan/TODO
+++ /dev/null
@@ -1,14 +0,0 @@
-TODO:
-	- Convert event tracing code to in-kernel tracing infrastructure
-	- Check for remaining ioctl & check if that can be converted into
-	  sysfs entries
-	- Convert debug prints to appropriate dev_debug or something better
-	- Modify Kconfig to add CONFIG option for enabling/disabling event
-	  tracing.
-	- check USB HCD implementation is complete and correct.
-	- code review by USB developer community.
-	- testing with as many devices as possible.
-
-Please send any patches for this driver to
-Shigekatsu Tateno <shigekatsu.tateno@atmel.com>
-and Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
diff --git a/drivers/staging/ozwpan/ozappif.h b/drivers/staging/ozwpan/ozappif.h
deleted file mode 100644
index ea1b271..0000000
--- a/drivers/staging/ozwpan/ozappif.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZAPPIF_H
-#define _OZAPPIF_H
-
-#define OZ_IOCTL_MAGIC	0xf4
-
-struct oz_mac_addr {
-	__u8 a[6];
-};
-
-#define OZ_MAX_PDS	8
-
-struct oz_pd_list {
-	__u32 count;
-	struct oz_mac_addr addr[OZ_MAX_PDS];
-};
-
-#define OZ_MAX_BINDING_LEN	32
-
-struct oz_binding_info {
-	char name[OZ_MAX_BINDING_LEN];
-};
-
-#define OZ_IOCTL_GET_PD_LIST	_IOR(OZ_IOCTL_MAGIC, 0, struct oz_pd_list)
-#define OZ_IOCTL_SET_ACTIVE_PD	_IOW(OZ_IOCTL_MAGIC, 1, struct oz_mac_addr)
-#define OZ_IOCTL_GET_ACTIVE_PD	_IOR(OZ_IOCTL_MAGIC, 2, struct oz_mac_addr)
-#define OZ_IOCTL_ADD_BINDING	_IOW(OZ_IOCTL_MAGIC, 3, struct oz_binding_info)
-#define OZ_IOCTL_REMOVE_BINDING	_IOW(OZ_IOCTL_MAGIC, 4, struct oz_binding_info)
-#define OZ_IOCTL_MAX		5
-
-
-#endif /* _OZAPPIF_H */
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
deleted file mode 100644
index da0e1fd..0000000
--- a/drivers/staging/ozwpan/ozcdev.c
+++ /dev/null
@@ -1,554 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/cdev.h>
-#include <linux/uaccess.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include "ozdbg.h"
-#include "ozprotocol.h"
-#include "ozappif.h"
-#include "ozeltbuf.h"
-#include "ozpd.h"
-#include "ozproto.h"
-#include "ozcdev.h"
-
-#define OZ_RD_BUF_SZ	256
-struct oz_cdev {
-	dev_t devnum;
-	struct cdev cdev;
-	wait_queue_head_t rdq;
-	spinlock_t lock;
-	u8 active_addr[ETH_ALEN];
-	struct oz_pd *active_pd;
-};
-
-/* Per PD context for the serial service stored in the PD. */
-struct oz_serial_ctx {
-	atomic_t ref_count;
-	u8 tx_seq_num;
-	u8 rx_seq_num;
-	u8 rd_buf[OZ_RD_BUF_SZ];
-	int rd_in;
-	int rd_out;
-};
-
-static struct oz_cdev g_cdev;
-static struct class *g_oz_class;
-
-/*
- * Context: process and softirq
- */
-static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
-{
-	struct oz_serial_ctx *ctx;
-
-	spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
-	ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL];
-	if (ctx)
-		atomic_inc(&ctx->ref_count);
-	spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
-	return ctx;
-}
-
-/*
- * Context: softirq or process
- */
-static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
-{
-	if (atomic_dec_and_test(&ctx->ref_count)) {
-		oz_dbg(ON, "Dealloc serial context\n");
-		kfree(ctx);
-	}
-}
-
-/*
- * Context: process
- */
-static int oz_cdev_open(struct inode *inode, struct file *filp)
-{
-	struct oz_cdev *dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
-
-	oz_dbg(ON, "major = %d minor = %d\n", imajor(inode), iminor(inode));
-
-	filp->private_data = dev;
-	return 0;
-}
-
-/*
- * Context: process
- */
-static int oz_cdev_release(struct inode *inode, struct file *filp)
-{
-	return 0;
-}
-
-/*
- * Context: process
- */
-static ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
-		loff_t *fpos)
-{
-	int n;
-	int ix;
-
-	struct oz_pd *pd;
-	struct oz_serial_ctx *ctx;
-
-	spin_lock_bh(&g_cdev.lock);
-	pd = g_cdev.active_pd;
-	if (pd)
-		oz_pd_get(pd);
-	spin_unlock_bh(&g_cdev.lock);
-	if (pd == NULL)
-		return -1;
-	ctx = oz_cdev_claim_ctx(pd);
-	if (ctx == NULL)
-		goto out2;
-	n = ctx->rd_in - ctx->rd_out;
-	if (n < 0)
-		n += OZ_RD_BUF_SZ;
-	if (count > n)
-		count = n;
-	ix = ctx->rd_out;
-	n = OZ_RD_BUF_SZ - ix;
-	if (n > count)
-		n = count;
-	if (copy_to_user(buf, &ctx->rd_buf[ix], n)) {
-		count = 0;
-		goto out1;
-	}
-	ix += n;
-	if (ix == OZ_RD_BUF_SZ)
-		ix = 0;
-	if (n < count) {
-		if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) {
-			count = 0;
-			goto out1;
-		}
-		ix = count-n;
-	}
-	ctx->rd_out = ix;
-out1:
-	oz_cdev_release_ctx(ctx);
-out2:
-	oz_pd_put(pd);
-	return count;
-}
-
-/*
- * Context: process
- */
-static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
-		size_t count, loff_t *fpos)
-{
-	struct oz_pd *pd;
-	struct oz_elt_buf *eb;
-	struct oz_elt_info *ei;
-	struct oz_elt *elt;
-	struct oz_app_hdr *app_hdr;
-	struct oz_serial_ctx *ctx;
-
-	if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
-		return -EINVAL;
-
-	spin_lock_bh(&g_cdev.lock);
-	pd = g_cdev.active_pd;
-	if (pd)
-		oz_pd_get(pd);
-	spin_unlock_bh(&g_cdev.lock);
-	if (pd == NULL)
-		return -ENXIO;
-	if (!(pd->state & OZ_PD_S_CONNECTED))
-		return -EAGAIN;
-	eb = &pd->elt_buff;
-	ei = oz_elt_info_alloc(eb);
-	if (ei == NULL) {
-		count = 0;
-		goto out;
-	}
-	elt = (struct oz_elt *)ei->data;
-	app_hdr = (struct oz_app_hdr *)(elt+1);
-	elt->length = sizeof(struct oz_app_hdr) + count;
-	elt->type = OZ_ELT_APP_DATA;
-	ei->app_id = OZ_APPID_SERIAL;
-	ei->length = elt->length + sizeof(struct oz_elt);
-	app_hdr->app_id = OZ_APPID_SERIAL;
-	if (copy_from_user(app_hdr+1, buf, count))
-		goto out;
-	spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
-	ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL];
-	if (ctx) {
-		app_hdr->elt_seq_num = ctx->tx_seq_num++;
-		if (ctx->tx_seq_num == 0)
-			ctx->tx_seq_num = 1;
-		spin_lock(&eb->lock);
-		if (oz_queue_elt_info(eb, 0, 0, ei) == 0)
-			ei = NULL;
-		spin_unlock(&eb->lock);
-	}
-	spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
-out:
-	if (ei) {
-		count = 0;
-		spin_lock_bh(&eb->lock);
-		oz_elt_info_free(eb, ei);
-		spin_unlock_bh(&eb->lock);
-	}
-	oz_pd_put(pd);
-	return count;
-}
-
-/*
- * Context: process
- */
-static int oz_set_active_pd(const u8 *addr)
-{
-	int rc = 0;
-	struct oz_pd *pd;
-	struct oz_pd *old_pd;
-
-	pd = oz_pd_find(addr);
-	if (pd) {
-		spin_lock_bh(&g_cdev.lock);
-		ether_addr_copy(g_cdev.active_addr, addr);
-		old_pd = g_cdev.active_pd;
-		g_cdev.active_pd = pd;
-		spin_unlock_bh(&g_cdev.lock);
-		if (old_pd)
-			oz_pd_put(old_pd);
-	} else {
-		if (is_zero_ether_addr(addr)) {
-			spin_lock_bh(&g_cdev.lock);
-			pd = g_cdev.active_pd;
-			g_cdev.active_pd = NULL;
-			memset(g_cdev.active_addr, 0,
-				sizeof(g_cdev.active_addr));
-			spin_unlock_bh(&g_cdev.lock);
-			if (pd)
-				oz_pd_put(pd);
-		} else {
-			rc = -1;
-		}
-	}
-	return rc;
-}
-
-/*
- * Context: process
- */
-static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
-			  unsigned long arg)
-{
-	int rc = 0;
-
-	if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC)
-		return -ENOTTY;
-	if (_IOC_NR(cmd) > OZ_IOCTL_MAX)
-		return -ENOTTY;
-	if (_IOC_DIR(cmd) & _IOC_READ)
-		rc = !access_ok(VERIFY_WRITE, (void __user *)arg,
-			_IOC_SIZE(cmd));
-	else if (_IOC_DIR(cmd) & _IOC_WRITE)
-		rc = !access_ok(VERIFY_READ, (void __user *)arg,
-			_IOC_SIZE(cmd));
-	if (rc)
-		return -EFAULT;
-	switch (cmd) {
-	case OZ_IOCTL_GET_PD_LIST: {
-			struct oz_pd_list list;
-
-			oz_dbg(ON, "OZ_IOCTL_GET_PD_LIST\n");
-			memset(&list, 0, sizeof(list));
-			list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS);
-			if (copy_to_user((void __user *)arg, &list,
-				sizeof(list)))
-				return -EFAULT;
-		}
-		break;
-	case OZ_IOCTL_SET_ACTIVE_PD: {
-			u8 addr[ETH_ALEN];
-
-			oz_dbg(ON, "OZ_IOCTL_SET_ACTIVE_PD\n");
-			if (copy_from_user(addr, (void __user *)arg, ETH_ALEN))
-				return -EFAULT;
-			rc = oz_set_active_pd(addr);
-		}
-		break;
-	case OZ_IOCTL_GET_ACTIVE_PD: {
-			u8 addr[ETH_ALEN];
-
-			oz_dbg(ON, "OZ_IOCTL_GET_ACTIVE_PD\n");
-			spin_lock_bh(&g_cdev.lock);
-			ether_addr_copy(addr, g_cdev.active_addr);
-			spin_unlock_bh(&g_cdev.lock);
-			if (copy_to_user((void __user *)arg, addr, ETH_ALEN))
-				return -EFAULT;
-		}
-		break;
-	case OZ_IOCTL_ADD_BINDING:
-	case OZ_IOCTL_REMOVE_BINDING: {
-			struct oz_binding_info b;
-
-			if (copy_from_user(&b, (void __user *)arg,
-				sizeof(struct oz_binding_info))) {
-				return -EFAULT;
-			}
-			/* Make sure name is null terminated. */
-			b.name[OZ_MAX_BINDING_LEN-1] = 0;
-			if (cmd == OZ_IOCTL_ADD_BINDING)
-				oz_binding_add(b.name);
-			else
-				oz_binding_remove(b.name);
-		}
-		break;
-	}
-	return rc;
-}
-
-/*
- * Context: process
- */
-static unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
-{
-	unsigned int ret = 0;
-	struct oz_cdev *dev = filp->private_data;
-
-	oz_dbg(ON, "Poll called wait = %p\n", wait);
-	spin_lock_bh(&dev->lock);
-	if (dev->active_pd) {
-		struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd);
-
-		if (ctx) {
-			if (ctx->rd_in != ctx->rd_out)
-				ret |= POLLIN | POLLRDNORM;
-			oz_cdev_release_ctx(ctx);
-		}
-	}
-	spin_unlock_bh(&dev->lock);
-	if (wait)
-		poll_wait(filp, &dev->rdq, wait);
-	return ret;
-}
-
-/*
- */
-static const struct file_operations oz_fops = {
-	.owner =	THIS_MODULE,
-	.open =		oz_cdev_open,
-	.release =	oz_cdev_release,
-	.read =		oz_cdev_read,
-	.write =	oz_cdev_write,
-	.unlocked_ioctl = oz_cdev_ioctl,
-	.poll =		oz_cdev_poll
-};
-
-/*
- * Context: process
- */
-int oz_cdev_register(void)
-{
-	int err;
-	struct device *dev;
-
-	memset(&g_cdev, 0, sizeof(g_cdev));
-	err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan");
-	if (err < 0)
-		return err;
-	oz_dbg(ON, "Alloc dev number %d:%d\n",
-	       MAJOR(g_cdev.devnum), MINOR(g_cdev.devnum));
-	cdev_init(&g_cdev.cdev, &oz_fops);
-	g_cdev.cdev.owner = THIS_MODULE;
-	spin_lock_init(&g_cdev.lock);
-	init_waitqueue_head(&g_cdev.rdq);
-	err = cdev_add(&g_cdev.cdev, g_cdev.devnum, 1);
-	if (err < 0) {
-		oz_dbg(ON, "Failed to add cdev\n");
-		goto unregister;
-	}
-	g_oz_class = class_create(THIS_MODULE, "ozmo_wpan");
-	if (IS_ERR(g_oz_class)) {
-		oz_dbg(ON, "Failed to register ozmo_wpan class\n");
-		err = PTR_ERR(g_oz_class);
-		goto delete;
-	}
-	dev = device_create(g_oz_class, NULL, g_cdev.devnum, NULL, "ozwpan");
-	if (IS_ERR(dev)) {
-		oz_dbg(ON, "Failed to create sysfs entry for cdev\n");
-		err = PTR_ERR(dev);
-		goto delete;
-	}
-	return 0;
-
-delete:
-	cdev_del(&g_cdev.cdev);
-unregister:
-	unregister_chrdev_region(g_cdev.devnum, 1);
-	return err;
-}
-
-/*
- * Context: process
- */
-int oz_cdev_deregister(void)
-{
-	cdev_del(&g_cdev.cdev);
-	unregister_chrdev_region(g_cdev.devnum, 1);
-	if (g_oz_class) {
-		device_destroy(g_oz_class, g_cdev.devnum);
-		class_destroy(g_oz_class);
-	}
-	return 0;
-}
-
-/*
- * Context: process
- */
-int oz_cdev_init(void)
-{
-	oz_app_enable(OZ_APPID_SERIAL, 1);
-	return 0;
-}
-
-/*
- * Context: process
- */
-void oz_cdev_term(void)
-{
-	oz_app_enable(OZ_APPID_SERIAL, 0);
-}
-
-/*
- * Context: softirq-serialized
- */
-int oz_cdev_start(struct oz_pd *pd, int resume)
-{
-	struct oz_serial_ctx *ctx;
-	struct oz_serial_ctx *old_ctx;
-
-	if (resume) {
-		oz_dbg(ON, "Serial service resumed\n");
-		return 0;
-	}
-	ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC);
-	if (ctx == NULL)
-		return -ENOMEM;
-	atomic_set(&ctx->ref_count, 1);
-	ctx->tx_seq_num = 1;
-	spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
-	old_ctx = pd->app_ctx[OZ_APPID_SERIAL];
-	if (old_ctx) {
-		spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
-		kfree(ctx);
-	} else {
-		pd->app_ctx[OZ_APPID_SERIAL] = ctx;
-		spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
-	}
-	spin_lock(&g_cdev.lock);
-	if ((g_cdev.active_pd == NULL) &&
-		ether_addr_equal(pd->mac_addr, g_cdev.active_addr)) {
-		oz_pd_get(pd);
-		g_cdev.active_pd = pd;
-		oz_dbg(ON, "Active PD arrived\n");
-	}
-	spin_unlock(&g_cdev.lock);
-	oz_dbg(ON, "Serial service started\n");
-	return 0;
-}
-
-/*
- * Context: softirq or process
- */
-void oz_cdev_stop(struct oz_pd *pd, int pause)
-{
-	struct oz_serial_ctx *ctx;
-
-	if (pause) {
-		oz_dbg(ON, "Serial service paused\n");
-		return;
-	}
-	spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
-	ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL];
-	pd->app_ctx[OZ_APPID_SERIAL] = NULL;
-	spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL]);
-	if (ctx)
-		oz_cdev_release_ctx(ctx);
-	spin_lock(&g_cdev.lock);
-	if (pd == g_cdev.active_pd)
-		g_cdev.active_pd = NULL;
-	else
-		pd = NULL;
-	spin_unlock(&g_cdev.lock);
-	if (pd) {
-		oz_pd_put(pd);
-		oz_dbg(ON, "Active PD departed\n");
-	}
-	oz_dbg(ON, "Serial service stopped\n");
-}
-
-/*
- * Context: softirq-serialized
- */
-void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
-{
-	struct oz_serial_ctx *ctx;
-	struct oz_app_hdr *app_hdr;
-	u8 *data;
-	int len;
-	int space;
-	int copy_sz;
-	int ix;
-
-	ctx = oz_cdev_claim_ctx(pd);
-	if (ctx == NULL) {
-		oz_dbg(ON, "Cannot claim serial context\n");
-		return;
-	}
-
-	app_hdr = (struct oz_app_hdr *)(elt+1);
-	/* If sequence number is non-zero then check it is not a duplicate.
-	 */
-	if (app_hdr->elt_seq_num != 0) {
-		if (((ctx->rx_seq_num - app_hdr->elt_seq_num) & 0x80) == 0) {
-			/* Reject duplicate element. */
-			oz_dbg(ON, "Duplicate element:%02x %02x\n",
-			       app_hdr->elt_seq_num, ctx->rx_seq_num);
-			goto out;
-		}
-	}
-	ctx->rx_seq_num = app_hdr->elt_seq_num;
-	len = elt->length - sizeof(struct oz_app_hdr);
-	data = ((u8 *)(elt+1)) + sizeof(struct oz_app_hdr);
-	if (len <= 0)
-		goto out;
-	space = ctx->rd_out - ctx->rd_in - 1;
-	if (space < 0)
-		space += OZ_RD_BUF_SZ;
-	if (len > space) {
-		oz_dbg(ON, "Not enough space:%d %d\n", len, space);
-		len = space;
-	}
-	ix = ctx->rd_in;
-	copy_sz = OZ_RD_BUF_SZ - ix;
-	if (copy_sz > len)
-		copy_sz = len;
-	memcpy(&ctx->rd_buf[ix], data, copy_sz);
-	len -= copy_sz;
-	ix += copy_sz;
-	if (ix == OZ_RD_BUF_SZ)
-		ix = 0;
-	if (len) {
-		memcpy(ctx->rd_buf, data+copy_sz, len);
-		ix = len;
-	}
-	ctx->rd_in = ix;
-	wake_up(&g_cdev.rdq);
-out:
-	oz_cdev_release_ctx(ctx);
-}
diff --git a/drivers/staging/ozwpan/ozcdev.h b/drivers/staging/ozwpan/ozcdev.h
deleted file mode 100644
index dd11935..0000000
--- a/drivers/staging/ozwpan/ozcdev.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZCDEV_H
-#define _OZCDEV_H
-
-int oz_cdev_register(void);
-int oz_cdev_deregister(void);
-int oz_cdev_init(void);
-void oz_cdev_term(void);
-int oz_cdev_start(struct oz_pd *pd, int resume);
-void oz_cdev_stop(struct oz_pd *pd, int pause);
-void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt);
-
-#endif /* _OZCDEV_H */
diff --git a/drivers/staging/ozwpan/ozdbg.h b/drivers/staging/ozwpan/ozdbg.h
deleted file mode 100644
index b86a2b7..0000000
--- a/drivers/staging/ozwpan/ozdbg.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * ---------------------------------------------------------------------------*/
-
-#ifndef _OZDBG_H
-#define _OZDBG_H
-
-#define OZ_WANT_DBG 0
-#define OZ_WANT_VERBOSE_DBG 1
-
-#define OZ_DBG_ON		0x0
-#define OZ_DBG_STREAM		0x1
-#define OZ_DBG_URB		0x2
-#define OZ_DBG_CTRL_DETAIL	0x4
-#define OZ_DBG_HUB		0x8
-#define OZ_DBG_RX_FRAMES	0x10
-#define OZ_DBG_TX_FRAMES	0x20
-
-#define OZ_DEFAULT_DBG_MASK			\
-	(					\
-	/* OZ_DBG_STREAM | */			\
-	/* OZ_DBG_URB | */			\
-	/* OZ_DBG_CTRL_DETAIL | */		\
-	OZ_DBG_HUB |				\
-	/* OZ_DBG_RX_FRAMES | */		\
-	/* OZ_DBG_TX_FRAMES | */		\
-	0)
-
-extern unsigned int oz_dbg_mask;
-
-#define oz_want_dbg(mask)						\
-	((OZ_WANT_DBG && (OZ_DBG_##mask == OZ_DBG_ON)) ||		\
-	 (OZ_WANT_VERBOSE_DBG && (OZ_DBG_##mask & oz_dbg_mask)))
-
-#define oz_dbg(mask, fmt, ...)						\
-do {									\
-	if (oz_want_dbg(mask))						\
-		pr_debug(fmt, ##__VA_ARGS__);				\
-} while (0)
-
-#define oz_cdev_dbg(cdev, mask, fmt, ...)				\
-do {									\
-	if (oz_want_dbg(mask))						\
-		netdev_dbg((cdev)->dev, fmt, ##__VA_ARGS__);		\
-} while (0)
-
-#define oz_pd_dbg(pd, mask, fmt, ...)					\
-do {									\
-	if (oz_want_dbg(mask))						\
-		pr_debug(fmt, ##__VA_ARGS__);				\
-} while (0)
-
-#endif /* _OZDBG_H */
diff --git a/drivers/staging/ozwpan/ozeltbuf.c b/drivers/staging/ozwpan/ozeltbuf.c
deleted file mode 100644
index 01b25da..0000000
--- a/drivers/staging/ozwpan/ozeltbuf.c
+++ /dev/null
@@ -1,252 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include "ozdbg.h"
-#include "ozprotocol.h"
-#include "ozeltbuf.h"
-#include "ozpd.h"
-
-/*
- * Context: softirq-serialized
- */
-void oz_elt_buf_init(struct oz_elt_buf *buf)
-{
-	memset(buf, 0, sizeof(struct oz_elt_buf));
-	INIT_LIST_HEAD(&buf->stream_list);
-	INIT_LIST_HEAD(&buf->order_list);
-	INIT_LIST_HEAD(&buf->isoc_list);
-	spin_lock_init(&buf->lock);
-}
-
-/*
- * Context: softirq or process
- */
-void oz_elt_buf_term(struct oz_elt_buf *buf)
-{
-	struct oz_elt_info *ei, *n;
-
-	list_for_each_entry_safe(ei, n, &buf->isoc_list, link_order)
-		kfree(ei);
-	list_for_each_entry_safe(ei, n, &buf->order_list, link_order)
-		kfree(ei);
-}
-
-/*
- * Context: softirq or process
- */
-struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
-{
-	struct oz_elt_info *ei;
-
-	ei = kmem_cache_zalloc(oz_elt_info_cache, GFP_ATOMIC);
-	if (ei) {
-		INIT_LIST_HEAD(&ei->link);
-		INIT_LIST_HEAD(&ei->link_order);
-	}
-	return ei;
-}
-
-/*
- * Precondition: oz_elt_buf.lock must be held.
- * Context: softirq or process
- */
-void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei)
-{
-	if (ei)
-		kmem_cache_free(oz_elt_info_cache, ei);
-}
-
-/*------------------------------------------------------------------------------
- * Context: softirq
- */
-void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list)
-{
-	struct oz_elt_info *ei, *n;
-
-	spin_lock_bh(&buf->lock);
-	list_for_each_entry_safe(ei, n, list->next, link)
-		oz_elt_info_free(buf, ei);
-	spin_unlock_bh(&buf->lock);
-}
-
-int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
-{
-	struct oz_elt_stream *st;
-
-	oz_dbg(ON, "%s: (0x%x)\n", __func__, id);
-
-	st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC);
-	if (st == NULL)
-		return -ENOMEM;
-	atomic_set(&st->ref_count, 1);
-	st->id = id;
-	st->max_buf_count = max_buf_count;
-	INIT_LIST_HEAD(&st->elt_list);
-	spin_lock_bh(&buf->lock);
-	list_add_tail(&st->link, &buf->stream_list);
-	spin_unlock_bh(&buf->lock);
-	return 0;
-}
-
-int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
-{
-	struct list_head *e, *n;
-	struct oz_elt_stream *st = NULL;
-
-	oz_dbg(ON, "%s: (0x%x)\n", __func__, id);
-	spin_lock_bh(&buf->lock);
-	list_for_each(e, &buf->stream_list) {
-		st = list_entry(e, struct oz_elt_stream, link);
-		if (st->id == id) {
-			list_del(e);
-			break;
-		}
-		st = NULL;
-	}
-	if (!st) {
-		spin_unlock_bh(&buf->lock);
-		return -1;
-	}
-	list_for_each_safe(e, n, &st->elt_list) {
-		struct oz_elt_info *ei =
-			list_entry(e, struct oz_elt_info, link);
-		list_del_init(&ei->link);
-		list_del_init(&ei->link_order);
-		st->buf_count -= ei->length;
-		oz_dbg(STREAM, "Stream down: %d %d %d\n",
-		       st->buf_count, ei->length, atomic_read(&st->ref_count));
-		oz_elt_stream_put(st);
-		oz_elt_info_free(buf, ei);
-	}
-	spin_unlock_bh(&buf->lock);
-	oz_elt_stream_put(st);
-	return 0;
-}
-
-void oz_elt_stream_get(struct oz_elt_stream *st)
-{
-	atomic_inc(&st->ref_count);
-}
-
-void oz_elt_stream_put(struct oz_elt_stream *st)
-{
-	if (atomic_dec_and_test(&st->ref_count)) {
-		oz_dbg(ON, "Stream destroyed\n");
-		kfree(st);
-	}
-}
-
-/*
- * Precondition: Element buffer lock must be held.
- * If this function fails the caller is responsible for deallocating the elt
- * info structure.
- */
-int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
-	struct oz_elt_info *ei)
-{
-	struct oz_elt_stream *st = NULL;
-	struct list_head *e;
-
-	if (id) {
-		list_for_each(e, &buf->stream_list) {
-			st = list_entry(e, struct oz_elt_stream, link);
-			if (st->id == id)
-				break;
-		}
-		if (e == &buf->stream_list) {
-			/* Stream specified but stream not known so fail.
-			 * Caller deallocates element info. */
-			return -1;
-		}
-	}
-	if (st) {
-		/* If this is an ISOC fixed element that needs a frame number
-		 * then insert that now. Earlier we stored the unit count in
-		 * this field.
-		 */
-		struct oz_isoc_fixed *body = (struct oz_isoc_fixed *)
-			&ei->data[sizeof(struct oz_elt)];
-		if ((body->app_id == OZ_APPID_USB) && (body->type
-			== OZ_USB_ENDPOINT_DATA) &&
-			(body->format == OZ_DATA_F_ISOC_FIXED)) {
-			u8 unit_count = body->frame_number;
-
-			body->frame_number = st->frame_number;
-			st->frame_number += unit_count;
-		}
-		/* Claim stream and update accounts */
-		oz_elt_stream_get(st);
-		ei->stream = st;
-		st->buf_count += ei->length;
-		/* Add to list in stream. */
-		list_add_tail(&ei->link, &st->elt_list);
-		oz_dbg(STREAM, "Stream up: %d %d\n", st->buf_count, ei->length);
-		/* Check if we have too much buffered for this stream. If so
-		 * start dropping elements until we are back in bounds.
-		 */
-		while ((st->buf_count > st->max_buf_count) &&
-			!list_empty(&st->elt_list)) {
-			struct oz_elt_info *ei2 =
-				list_first_entry(&st->elt_list,
-					struct oz_elt_info, link);
-			list_del_init(&ei2->link);
-			list_del_init(&ei2->link_order);
-			st->buf_count -= ei2->length;
-			oz_elt_info_free(buf, ei2);
-			oz_elt_stream_put(st);
-		}
-	}
-	list_add_tail(&ei->link_order, isoc ?
-		&buf->isoc_list : &buf->order_list);
-	return 0;
-}
-
-int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
-		unsigned max_len, struct list_head *list)
-{
-	int count = 0;
-	struct list_head *el;
-	struct oz_elt_info *ei, *n;
-
-	spin_lock_bh(&buf->lock);
-	if (isoc)
-		el = &buf->isoc_list;
-	else
-		el = &buf->order_list;
-
-	list_for_each_entry_safe(ei, n, el, link_order) {
-		if ((*len + ei->length) <= max_len) {
-			struct oz_app_hdr *app_hdr = (struct oz_app_hdr *)
-				&ei->data[sizeof(struct oz_elt)];
-			app_hdr->elt_seq_num = buf->tx_seq_num[ei->app_id]++;
-			if (buf->tx_seq_num[ei->app_id] == 0)
-				buf->tx_seq_num[ei->app_id] = 1;
-			*len += ei->length;
-			list_del(&ei->link);
-			list_del(&ei->link_order);
-			if (ei->stream) {
-				ei->stream->buf_count -= ei->length;
-				oz_dbg(STREAM, "Stream down: %d %d\n",
-				       ei->stream->buf_count, ei->length);
-				oz_elt_stream_put(ei->stream);
-				ei->stream = NULL;
-			}
-			INIT_LIST_HEAD(&ei->link_order);
-			list_add_tail(&ei->link, list);
-			count++;
-		} else {
-			break;
-		}
-	}
-	spin_unlock_bh(&buf->lock);
-	return count;
-}
-
-int oz_are_elts_available(struct oz_elt_buf *buf)
-{
-	return !list_empty(&buf->order_list);
-}
diff --git a/drivers/staging/ozwpan/ozeltbuf.h b/drivers/staging/ozwpan/ozeltbuf.h
deleted file mode 100644
index f09f5fe..0000000
--- a/drivers/staging/ozwpan/ozeltbuf.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZELTBUF_H
-#define _OZELTBUF_H
-
-#include "ozprotocol.h"
-
-/*-----------------------------------------------------------------------------
- */
-struct oz_pd;
-typedef void (*oz_elt_callback_t)(struct oz_pd *pd, long context);
-
-struct oz_elt_stream {
-	struct list_head link;
-	struct list_head elt_list;
-	atomic_t ref_count;
-	unsigned buf_count;
-	unsigned max_buf_count;
-	u8 frame_number;
-	u8 id;
-};
-
-#define OZ_MAX_ELT_PAYLOAD	255
-struct oz_elt_info {
-	struct list_head link;
-	struct list_head link_order;
-	u8 flags;
-	u8 app_id;
-	oz_elt_callback_t callback;
-	long context;
-	struct oz_elt_stream *stream;
-	u8 data[sizeof(struct oz_elt) + OZ_MAX_ELT_PAYLOAD];
-	int length;
-};
-/* Flags values */
-#define OZ_EI_F_MARKED		0x1
-
-struct oz_elt_buf {
-	spinlock_t lock;
-	struct list_head stream_list;
-	struct list_head order_list;
-	struct list_head isoc_list;
-	u8 tx_seq_num[OZ_NB_APPS];
-};
-
-void oz_elt_buf_init(struct oz_elt_buf *buf);
-void oz_elt_buf_term(struct oz_elt_buf *buf);
-struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf);
-void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei);
-void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list);
-int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count);
-int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id);
-void oz_elt_stream_get(struct oz_elt_stream *st);
-void oz_elt_stream_put(struct oz_elt_stream *st);
-int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
-		struct oz_elt_info *ei);
-int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
-		unsigned max_len, struct list_head *list);
-int oz_are_elts_available(struct oz_elt_buf *buf);
-
-#endif /* _OZELTBUF_H */
-
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
deleted file mode 100644
index 784b5ec..0000000
--- a/drivers/staging/ozwpan/ozhcd.c
+++ /dev/null
@@ -1,2301 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- *
- * This file provides the implementation of a USB host controller device that
- * does not have any associated hardware. Instead the virtual device is
- * connected to the WiFi network and emulates the operation of a USB hcd by
- * receiving and sending network frames.
- * Note:
- * We take great pains to reduce the amount of code where interrupts need to be
- * disabled and in this respect we are different from standard HCD's. In
- * particular we don't want in_irq() code bleeding over to the protocol side of
- * the driver.
- * The troublesome functions are the urb enqueue and dequeue functions both of
- * which can be called in_irq(). So for these functions we put the urbs into a
- * queue and request a tasklet to process them. This means that a spinlock with
- * interrupts disabled must be held for insertion and removal but most code is
- * is in tasklet or soft irq context. The lock that protects this list is called
- * the tasklet lock and serves the purpose of the 'HCD lock' which must be held
- * when calling the following functions.
- *   usb_hcd_link_urb_to_ep()
- *   usb_hcd_unlink_urb_from_ep()
- *   usb_hcd_flush_endpoint()
- *   usb_hcd_check_unlink_urb()
- * -----------------------------------------------------------------------------
- */
-#include <linux/platform_device.h>
-#include <linux/usb.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include "linux/usb/hcd.h"
-#include <asm/unaligned.h>
-#include "ozdbg.h"
-#include "ozusbif.h"
-#include "ozurbparanoia.h"
-#include "ozhcd.h"
-
-/*
- * Number of units of buffering to capture for an isochronous IN endpoint before
- * allowing data to be indicated up.
- */
-#define OZ_IN_BUFFERING_UNITS	100
-
-/* Name of our platform device.
- */
-#define OZ_PLAT_DEV_NAME	"ozwpan"
-
-/*EP0 timeout before ep0 request is again added to TX queue. (13*8 = 98mSec)
- */
-#define EP0_TIMEOUT_COUNTER 13
-
-/* Debounce time HCD driver should wait before unregistering.
- */
-#define OZ_HUB_DEBOUNCE_TIMEOUT 1500
-
-/*
- * Used to link urbs together and also store some status information for each
- * urb.
- * A cache of these are kept in a pool to reduce number of calls to kmalloc.
- */
-struct oz_urb_link {
-	struct list_head link;
-	struct urb *urb;
-	struct oz_port *port;
-	u8 req_id;
-	u8 ep_num;
-	unsigned submit_counter;
-};
-
-static struct kmem_cache *oz_urb_link_cache;
-
-/* Holds state information about a USB endpoint.
- */
-#define OZ_EP_BUFFER_SIZE_ISOC  (1024 * 24)
-#define OZ_EP_BUFFER_SIZE_INT   512
-struct oz_endpoint {
-	struct list_head urb_list;	/* List of oz_urb_link items. */
-	struct list_head link;		/* For isoc ep, links in to isoc
-					   lists of oz_port. */
-	struct timespec timestamp;
-	int credit;
-	int credit_ceiling;
-	u8 ep_num;
-	u8 attrib;
-	u8 *buffer;
-	int buffer_size;
-	int in_ix;
-	int out_ix;
-	int buffered_units;
-	unsigned flags;
-	int start_frame;
-};
-
-/* Bits in the flags field. */
-#define OZ_F_EP_BUFFERING	0x1
-#define OZ_F_EP_HAVE_STREAM	0x2
-
-/* Holds state information about a USB interface.
- */
-struct oz_interface {
-	unsigned ep_mask;
-	u8 alt;
-};
-
-/* Holds state information about an hcd port.
- */
-#define OZ_NB_ENDPOINTS	16
-struct oz_port {
-	unsigned flags;
-	unsigned status;
-	void *hpd;
-	struct oz_hcd *ozhcd;
-	spinlock_t port_lock;
-	u8 bus_addr;
-	u8 next_req_id;
-	u8 config_num;
-	int num_iface;
-	struct oz_interface *iface;
-	struct oz_endpoint *out_ep[OZ_NB_ENDPOINTS];
-	struct oz_endpoint *in_ep[OZ_NB_ENDPOINTS];
-	struct list_head isoc_out_ep;
-	struct list_head isoc_in_ep;
-};
-
-#define OZ_PORT_F_PRESENT	0x1
-#define OZ_PORT_F_CHANGED	0x2
-#define OZ_PORT_F_DYING		0x4
-
-/* Data structure in the private context area of struct usb_hcd.
- */
-#define OZ_NB_PORTS	8
-struct oz_hcd {
-	spinlock_t hcd_lock;
-	struct list_head urb_pending_list;
-	struct list_head urb_cancel_list;
-	struct list_head orphanage;
-	int conn_port; /* Port that is currently connecting, -1 if none.*/
-	struct oz_port ports[OZ_NB_PORTS];
-	uint flags;
-	struct usb_hcd *hcd;
-};
-
-/* Bits in flags field.
- */
-#define OZ_HDC_F_SUSPENDED	0x1
-
-/*
- * Static function prototypes.
- */
-static int oz_hcd_start(struct usb_hcd *hcd);
-static void oz_hcd_stop(struct usb_hcd *hcd);
-static void oz_hcd_shutdown(struct usb_hcd *hcd);
-static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
-				gfp_t mem_flags);
-static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
-static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
-				struct usb_host_endpoint *ep);
-static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
-				struct usb_host_endpoint *ep);
-static int oz_hcd_get_frame_number(struct usb_hcd *hcd);
-static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf);
-static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
-				u16 windex, char *buf, u16 wlength);
-static int oz_hcd_bus_suspend(struct usb_hcd *hcd);
-static int oz_hcd_bus_resume(struct usb_hcd *hcd);
-static int oz_plat_probe(struct platform_device *dev);
-static int oz_plat_remove(struct platform_device *dev);
-static void oz_plat_shutdown(struct platform_device *dev);
-static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg);
-static int oz_plat_resume(struct platform_device *dev);
-static void oz_urb_process_tasklet(unsigned long unused);
-static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
-		struct oz_port *port, struct usb_host_config *config,
-		gfp_t mem_flags);
-static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
-				struct oz_port *port);
-static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
-			struct oz_port *port,
-			struct usb_host_interface *intf, gfp_t mem_flags);
-static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
-			struct oz_port *port, int if_ix);
-static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
-		gfp_t mem_flags);
-static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
-		struct urb *urb);
-static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status);
-
-/*
- * Static external variables.
- */
-static struct platform_device *g_plat_dev;
-static struct oz_hcd *g_ozhcd;
-static DEFINE_SPINLOCK(g_hcdlock);	/* Guards g_ozhcd. */
-static const char g_hcd_name[] = "Ozmo WPAN";
-static DEFINE_SPINLOCK(g_tasklet_lock);
-static struct tasklet_struct g_urb_process_tasklet;
-static struct tasklet_struct g_urb_cancel_tasklet;
-static atomic_t g_pending_urbs = ATOMIC_INIT(0);
-static atomic_t g_usb_frame_number = ATOMIC_INIT(0);
-static const struct hc_driver g_oz_hc_drv = {
-	.description =		g_hcd_name,
-	.product_desc =		"Ozmo Devices WPAN",
-	.hcd_priv_size =	sizeof(struct oz_hcd),
-	.flags =		HCD_USB11,
-	.start =		oz_hcd_start,
-	.stop =			oz_hcd_stop,
-	.shutdown =		oz_hcd_shutdown,
-	.urb_enqueue =		oz_hcd_urb_enqueue,
-	.urb_dequeue =		oz_hcd_urb_dequeue,
-	.endpoint_disable =	oz_hcd_endpoint_disable,
-	.endpoint_reset =	oz_hcd_endpoint_reset,
-	.get_frame_number =	oz_hcd_get_frame_number,
-	.hub_status_data =	oz_hcd_hub_status_data,
-	.hub_control =		oz_hcd_hub_control,
-	.bus_suspend =		oz_hcd_bus_suspend,
-	.bus_resume =		oz_hcd_bus_resume,
-};
-
-static struct platform_driver g_oz_plat_drv = {
-	.probe = oz_plat_probe,
-	.remove = oz_plat_remove,
-	.shutdown = oz_plat_shutdown,
-	.suspend = oz_plat_suspend,
-	.resume = oz_plat_resume,
-	.driver = {
-		.name = OZ_PLAT_DEV_NAME,
-	},
-};
-
-/*
- * Gets our private context area (which is of type struct oz_hcd) from the
- * usb_hcd structure.
- * Context: any
- */
-static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd)
-{
-	return (struct oz_hcd *)hcd->hcd_priv;
-}
-
-/*
- * Searches list of ports to find the index of the one with a specified  USB
- * bus address. If none of the ports has the bus address then the connection
- * port is returned, if there is one or -1 otherwise.
- * Context: any
- */
-static int oz_get_port_from_addr(struct oz_hcd *ozhcd, u8 bus_addr)
-{
-	int i;
-
-	for (i = 0; i < OZ_NB_PORTS; i++) {
-		if (ozhcd->ports[i].bus_addr == bus_addr)
-			return i;
-	}
-	return ozhcd->conn_port;
-}
-
-/*
- * Context: any
- */
-static struct oz_urb_link *oz_alloc_urb_link(void)
-{
-	return kmem_cache_alloc(oz_urb_link_cache, GFP_ATOMIC);
-}
-
-/*
- * Context: any
- */
-static void oz_free_urb_link(struct oz_urb_link *urbl)
-{
-	if (!urbl)
-		return;
-
-	kmem_cache_free(oz_urb_link_cache, urbl);
-}
-
-/*
- * Allocates endpoint structure and optionally a buffer. If a buffer is
- * allocated it immediately follows the endpoint structure.
- * Context: softirq
- */
-static struct oz_endpoint *oz_ep_alloc(int buffer_size, gfp_t mem_flags)
-{
-	struct oz_endpoint *ep;
-
-	ep = kzalloc(sizeof(struct oz_endpoint)+buffer_size, mem_flags);
-	if (!ep)
-		return NULL;
-
-	INIT_LIST_HEAD(&ep->urb_list);
-	INIT_LIST_HEAD(&ep->link);
-	ep->credit = -1;
-	if (buffer_size) {
-		ep->buffer_size = buffer_size;
-		ep->buffer = (u8 *)(ep+1);
-	}
-
-	return ep;
-}
-
-/*
- * Pre-condition: Must be called with g_tasklet_lock held and interrupts
- * disabled.
- * Context: softirq or process
- */
-static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd,
-		struct urb *urb)
-{
-	struct oz_urb_link *urbl;
-
-	list_for_each_entry(urbl, &ozhcd->urb_cancel_list, link) {
-		if (urb == urbl->urb) {
-			list_del_init(&urbl->link);
-			return urbl;
-		}
-	}
-	return NULL;
-}
-
-/*
- * This is called when we have finished processing an urb. It unlinks it from
- * the ep and returns it to the core.
- * Context: softirq or process
- */
-static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
-		int status)
-{
-	struct oz_hcd *ozhcd = oz_hcd_private(hcd);
-	unsigned long irq_state;
-	struct oz_urb_link *cancel_urbl;
-
-	spin_lock_irqsave(&g_tasklet_lock, irq_state);
-	usb_hcd_unlink_urb_from_ep(hcd, urb);
-	/* Clear hcpriv which will prevent it being put in the cancel list
-	 * in the event that an attempt is made to cancel it.
-	 */
-	urb->hcpriv = NULL;
-	/* Walk the cancel list in case the urb is already sitting there.
-	 * Since we process the cancel list in a tasklet rather than in
-	 * the dequeue function this could happen.
-	 */
-	cancel_urbl = oz_uncancel_urb(ozhcd, urb);
-	/* Note: we release lock but do not enable local irqs.
-	 * It appears that usb_hcd_giveback_urb() expects irqs to be disabled,
-	 * or at least other host controllers disable interrupts at this point
-	 * so we do the same. We must, however, release the lock otherwise a
-	 * deadlock will occur if an urb is submitted to our driver in the urb
-	 * completion function. Because we disable interrupts it is possible
-	 * that the urb_enqueue function can be called with them disabled.
-	 */
-	spin_unlock(&g_tasklet_lock);
-	if (oz_forget_urb(urb)) {
-		oz_dbg(ON, "ERROR Unknown URB %p\n", urb);
-	} else {
-		atomic_dec(&g_pending_urbs);
-		usb_hcd_giveback_urb(hcd, urb, status);
-	}
-	spin_lock(&g_tasklet_lock);
-	spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
-	oz_free_urb_link(cancel_urbl);
-}
-
-/*
- * Deallocates an endpoint including deallocating any associated stream and
- * returning any queued urbs to the core.
- * Context: softirq
- */
-static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
-{
-	if (port) {
-		LIST_HEAD(list);
-		struct oz_hcd *ozhcd = port->ozhcd;
-
-		if (ep->flags & OZ_F_EP_HAVE_STREAM)
-			oz_usb_stream_delete(port->hpd, ep->ep_num);
-		/* Transfer URBs to the orphanage while we hold the lock. */
-		spin_lock_bh(&ozhcd->hcd_lock);
-		/* Note: this works even if ep->urb_list is empty.*/
-		list_replace_init(&ep->urb_list, &list);
-		/* Put the URBs in the orphanage. */
-		list_splice_tail(&list, &ozhcd->orphanage);
-		spin_unlock_bh(&ozhcd->hcd_lock);
-	}
-	oz_dbg(ON, "Freeing endpoint memory\n");
-	kfree(ep);
-}
-
-/*
- * Context: softirq
- */
-static void oz_complete_buffered_urb(struct oz_port *port,
-			struct oz_endpoint *ep,
-			struct urb *urb)
-{
-	int data_len, available_space, copy_len;
-
-	data_len = ep->buffer[ep->out_ix];
-	if (data_len <= urb->transfer_buffer_length)
-		available_space = data_len;
-	else
-		available_space = urb->transfer_buffer_length;
-
-	if (++ep->out_ix == ep->buffer_size)
-		ep->out_ix = 0;
-	copy_len = ep->buffer_size - ep->out_ix;
-	if (copy_len >= available_space)
-		copy_len = available_space;
-	memcpy(urb->transfer_buffer, &ep->buffer[ep->out_ix], copy_len);
-
-	if (copy_len < available_space) {
-		memcpy((urb->transfer_buffer + copy_len), ep->buffer,
-						(available_space - copy_len));
-		ep->out_ix = available_space - copy_len;
-	} else {
-		ep->out_ix += copy_len;
-	}
-	urb->actual_length = available_space;
-	if (ep->out_ix == ep->buffer_size)
-		ep->out_ix = 0;
-
-	ep->buffered_units--;
-	oz_dbg(ON, "Trying to give back buffered frame of size=%d\n",
-	       available_space);
-	oz_complete_urb(port->ozhcd->hcd, urb, 0);
-}
-
-/*
- * Context: softirq
- */
-static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
-			struct urb *urb, u8 req_id)
-{
-	struct oz_urb_link *urbl;
-	struct oz_endpoint *ep = NULL;
-	int err = 0;
-
-	if (ep_addr >= OZ_NB_ENDPOINTS) {
-		oz_dbg(ON, "%s: Invalid endpoint number\n", __func__);
-		return -EINVAL;
-	}
-	urbl = oz_alloc_urb_link();
-	if (!urbl)
-		return -ENOMEM;
-	urbl->submit_counter = 0;
-	urbl->urb = urb;
-	urbl->req_id = req_id;
-	urbl->ep_num = ep_addr;
-	/* Hold lock while we insert the URB into the list within the
-	 * endpoint structure.
-	 */
-	spin_lock_bh(&port->ozhcd->hcd_lock);
-	/* If the urb has been unlinked while out of any list then
-	 * complete it now.
-	 */
-	if (urb->unlinked) {
-		spin_unlock_bh(&port->ozhcd->hcd_lock);
-		oz_dbg(ON, "urb %p unlinked so complete immediately\n", urb);
-		oz_complete_urb(port->ozhcd->hcd, urb, 0);
-		oz_free_urb_link(urbl);
-		return 0;
-	}
-
-	if (in_dir)
-		ep = port->in_ep[ep_addr];
-	else
-		ep = port->out_ep[ep_addr];
-	if (!ep) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	/*For interrupt endpoint check for buffered data
-	* & complete urb
-	*/
-	if (((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT)
-						 && ep->buffered_units > 0) {
-		oz_free_urb_link(urbl);
-		spin_unlock_bh(&port->ozhcd->hcd_lock);
-		oz_complete_buffered_urb(port, ep, urb);
-		return 0;
-	}
-
-	if (port->hpd) {
-		list_add_tail(&urbl->link, &ep->urb_list);
-		if (!in_dir && ep_addr && (ep->credit < 0)) {
-			getrawmonotonic(&ep->timestamp);
-			ep->credit = 0;
-		}
-	} else {
-		err = -EPIPE;
-	}
-out:
-	spin_unlock_bh(&port->ozhcd->hcd_lock);
-	if (err)
-		oz_free_urb_link(urbl);
-	return err;
-}
-
-/*
- * Removes an urb from the queue in the endpoint.
- * Returns 0 if it is found and -EIDRM otherwise.
- * Context: softirq
- */
-static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
-			struct urb *urb)
-{
-	struct oz_urb_link *urbl = NULL;
-	struct oz_endpoint *ep;
-
-	spin_lock_bh(&port->ozhcd->hcd_lock);
-	if (in_dir)
-		ep = port->in_ep[ep_addr];
-	else
-		ep = port->out_ep[ep_addr];
-	if (ep) {
-		struct list_head *e;
-
-		list_for_each(e, &ep->urb_list) {
-			urbl = list_entry(e, struct oz_urb_link, link);
-			if (urbl->urb == urb) {
-				list_del_init(e);
-				break;
-			}
-			urbl = NULL;
-		}
-	}
-	spin_unlock_bh(&port->ozhcd->hcd_lock);
-	oz_free_urb_link(urbl);
-	return urbl ? 0 : -EIDRM;
-}
-
-/*
- * Finds an urb given its request id.
- * Context: softirq
- */
-static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
-		u8 req_id)
-{
-	struct oz_hcd *ozhcd = port->ozhcd;
-	struct urb *urb = NULL;
-	struct oz_urb_link *urbl;
-	struct oz_endpoint *ep;
-
-	spin_lock_bh(&ozhcd->hcd_lock);
-	ep = port->out_ep[ep_ix];
-	if (ep) {
-		struct list_head *e;
-
-		list_for_each(e, &ep->urb_list) {
-			urbl = list_entry(e, struct oz_urb_link, link);
-			if (urbl->req_id == req_id) {
-				urb = urbl->urb;
-				list_del_init(e);
-				break;
-			}
-		}
-	}
-	spin_unlock_bh(&ozhcd->hcd_lock);
-	/* If urb is non-zero then we we must have an urb link to delete.
-	 */
-	if (urb)
-		oz_free_urb_link(urbl);
-	return urb;
-}
-
-/*
- * Pre-condition: Port lock must be held.
- * Context: softirq
- */
-static void oz_acquire_port(struct oz_port *port, void *hpd)
-{
-	INIT_LIST_HEAD(&port->isoc_out_ep);
-	INIT_LIST_HEAD(&port->isoc_in_ep);
-	port->flags |= OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED;
-	port->status |= USB_PORT_STAT_CONNECTION |
-			(USB_PORT_STAT_C_CONNECTION << 16);
-	oz_usb_get(hpd);
-	port->hpd = hpd;
-}
-
-/*
- * Context: softirq
- */
-static struct oz_hcd *oz_hcd_claim(void)
-{
-	struct oz_hcd *ozhcd;
-
-	spin_lock_bh(&g_hcdlock);
-	ozhcd = g_ozhcd;
-	if (ozhcd)
-		usb_get_hcd(ozhcd->hcd);
-	spin_unlock_bh(&g_hcdlock);
-	return ozhcd;
-}
-
-/*
- * Context: softirq
- */
-static inline void oz_hcd_put(struct oz_hcd *ozhcd)
-{
-	if (ozhcd)
-		usb_put_hcd(ozhcd->hcd);
-}
-
-/*
- * This is called by the protocol handler to notify that a PD has arrived.
- * We allocate a port to associate with the PD and create a structure for
- * endpoint 0. This port is made the connection port.
- * In the event that one of the other port is already a connection port then
- * we fail.
- * TODO We should be able to do better than fail and should be able remember
- * that this port needs configuring and make it the connection port once the
- * current connection port has been assigned an address. Collisions here are
- * probably very rare indeed.
- * Context: softirq
- */
-struct oz_port *oz_hcd_pd_arrived(void *hpd)
-{
-	int i;
-	struct oz_port *hport;
-	struct oz_hcd *ozhcd;
-	struct oz_endpoint *ep;
-
-	ozhcd = oz_hcd_claim();
-	if (!ozhcd)
-		return NULL;
-	/* Allocate an endpoint object in advance (before holding hcd lock) to
-	 * use for out endpoint 0.
-	 */
-	ep = oz_ep_alloc(0, GFP_ATOMIC);
-	if (!ep)
-		goto err_put;
-
-	spin_lock_bh(&ozhcd->hcd_lock);
-	if (ozhcd->conn_port >= 0)
-		goto err_unlock;
-
-	for (i = 0; i < OZ_NB_PORTS; i++) {
-		struct oz_port *port = &ozhcd->ports[i];
-
-		spin_lock(&port->port_lock);
-		if (!(port->flags & (OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED))) {
-			oz_acquire_port(port, hpd);
-			spin_unlock(&port->port_lock);
-			break;
-		}
-		spin_unlock(&port->port_lock);
-	}
-	if (i == OZ_NB_PORTS)
-		goto err_unlock;
-
-	ozhcd->conn_port = i;
-	hport = &ozhcd->ports[i];
-	hport->out_ep[0] = ep;
-	spin_unlock_bh(&ozhcd->hcd_lock);
-	if (ozhcd->flags & OZ_HDC_F_SUSPENDED)
-		usb_hcd_resume_root_hub(ozhcd->hcd);
-	usb_hcd_poll_rh_status(ozhcd->hcd);
-	oz_hcd_put(ozhcd);
-
-	return hport;
-
-err_unlock:
-	spin_unlock_bh(&ozhcd->hcd_lock);
-	oz_ep_free(NULL, ep);
-err_put:
-	oz_hcd_put(ozhcd);
-	return NULL;
-}
-
-/*
- * This is called by the protocol handler to notify that the PD has gone away.
- * We need to deallocate all resources and then request that the root hub is
- * polled. We release the reference we hold on the PD.
- * Context: softirq
- */
-void oz_hcd_pd_departed(struct oz_port *port)
-{
-	struct oz_hcd *ozhcd;
-	void *hpd;
-	struct oz_endpoint *ep = NULL;
-
-	if (port == NULL) {
-		oz_dbg(ON, "%s: port = 0\n", __func__);
-		return;
-	}
-	ozhcd = port->ozhcd;
-	if (ozhcd == NULL)
-		return;
-	/* Check if this is the connection port - if so clear it.
-	 */
-	spin_lock_bh(&ozhcd->hcd_lock);
-	if ((ozhcd->conn_port >= 0) &&
-		(port == &ozhcd->ports[ozhcd->conn_port])) {
-		oz_dbg(ON, "Clearing conn_port\n");
-		ozhcd->conn_port = -1;
-	}
-	spin_lock(&port->port_lock);
-	port->flags |= OZ_PORT_F_DYING;
-	spin_unlock(&port->port_lock);
-	spin_unlock_bh(&ozhcd->hcd_lock);
-
-	oz_clean_endpoints_for_config(ozhcd->hcd, port);
-	spin_lock_bh(&port->port_lock);
-	hpd = port->hpd;
-	port->hpd = NULL;
-	port->bus_addr = 0xff;
-	port->config_num = 0;
-	port->flags &= ~(OZ_PORT_F_PRESENT | OZ_PORT_F_DYING);
-	port->flags |= OZ_PORT_F_CHANGED;
-	port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE);
-	port->status |= (USB_PORT_STAT_C_CONNECTION << 16);
-	/* If there is an endpont 0 then clear the pointer while we hold
-	 * the spinlock be we deallocate it after releasing the lock.
-	 */
-	if (port->out_ep[0]) {
-		ep = port->out_ep[0];
-		port->out_ep[0] = NULL;
-	}
-	spin_unlock_bh(&port->port_lock);
-	if (ep)
-		oz_ep_free(port, ep);
-	usb_hcd_poll_rh_status(ozhcd->hcd);
-	oz_usb_put(hpd);
-}
-
-/*
- * Context: softirq
- */
-void oz_hcd_pd_reset(void *hpd, void *hport)
-{
-	/* Cleanup the current configuration and report reset to the core.
-	 */
-	struct oz_port *port = hport;
-	struct oz_hcd *ozhcd = port->ozhcd;
-
-	oz_dbg(ON, "PD Reset\n");
-	spin_lock_bh(&port->port_lock);
-	port->flags |= OZ_PORT_F_CHANGED;
-	port->status |= USB_PORT_STAT_RESET;
-	port->status |= (USB_PORT_STAT_C_RESET << 16);
-	spin_unlock_bh(&port->port_lock);
-	oz_clean_endpoints_for_config(ozhcd->hcd, port);
-	usb_hcd_poll_rh_status(ozhcd->hcd);
-}
-
-/*
- * Context: softirq
- */
-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc,
-			u8 length, u16 offset, u16 total_size)
-{
-	struct oz_port *port = hport;
-	struct urb *urb;
-	int err = 0;
-
-	oz_dbg(ON, "oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
-	       length, offset, total_size);
-	urb = oz_find_urb_by_id(port, 0, req_id);
-	if (!urb)
-		return;
-	if (status == 0) {
-		unsigned int copy_len;
-		unsigned int required_size = urb->transfer_buffer_length;
-
-		if (required_size > total_size)
-			required_size = total_size;
-		copy_len = required_size-offset;
-		if (length <= copy_len)
-			copy_len = length;
-		memcpy(urb->transfer_buffer+offset, desc, copy_len);
-		offset += copy_len;
-		if (offset < required_size) {
-			struct usb_ctrlrequest *setup =
-				(struct usb_ctrlrequest *)urb->setup_packet;
-			unsigned wvalue = le16_to_cpu(setup->wValue);
-
-			if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
-				err = -ENOMEM;
-			else if (oz_usb_get_desc_req(port->hpd, req_id,
-					setup->bRequestType, (u8)(wvalue>>8),
-					(u8)wvalue, setup->wIndex, offset,
-					required_size-offset)) {
-				oz_dequeue_ep_urb(port, 0, 0, urb);
-				err = -ENOMEM;
-			}
-			if (err == 0)
-				return;
-		}
-	}
-	urb->actual_length = total_size;
-	oz_complete_urb(port->ozhcd->hcd, urb, 0);
-}
-
-/*
- * Context: softirq
- */
-static void oz_display_conf_type(u8 t)
-{
-	switch (t) {
-	case USB_REQ_GET_STATUS:
-		oz_dbg(ON, "USB_REQ_GET_STATUS - cnf\n");
-		break;
-	case USB_REQ_CLEAR_FEATURE:
-		oz_dbg(ON, "USB_REQ_CLEAR_FEATURE - cnf\n");
-		break;
-	case USB_REQ_SET_FEATURE:
-		oz_dbg(ON, "USB_REQ_SET_FEATURE - cnf\n");
-		break;
-	case USB_REQ_SET_ADDRESS:
-		oz_dbg(ON, "USB_REQ_SET_ADDRESS - cnf\n");
-		break;
-	case USB_REQ_GET_DESCRIPTOR:
-		oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
-		break;
-	case USB_REQ_SET_DESCRIPTOR:
-		oz_dbg(ON, "USB_REQ_SET_DESCRIPTOR - cnf\n");
-		break;
-	case USB_REQ_GET_CONFIGURATION:
-		oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - cnf\n");
-		break;
-	case USB_REQ_SET_CONFIGURATION:
-		oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - cnf\n");
-		break;
-	case USB_REQ_GET_INTERFACE:
-		oz_dbg(ON, "USB_REQ_GET_INTERFACE - cnf\n");
-		break;
-	case USB_REQ_SET_INTERFACE:
-		oz_dbg(ON, "USB_REQ_SET_INTERFACE - cnf\n");
-		break;
-	case USB_REQ_SYNCH_FRAME:
-		oz_dbg(ON, "USB_REQ_SYNCH_FRAME - cnf\n");
-		break;
-	}
-}
-
-/*
- * Context: softirq
- */
-static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
-		u8 rcode, u8 config_num)
-{
-	int rc = 0;
-	struct usb_hcd *hcd = port->ozhcd->hcd;
-
-	if (rcode == 0) {
-		port->config_num = config_num;
-		oz_clean_endpoints_for_config(hcd, port);
-		if (oz_build_endpoints_for_config(hcd, port,
-			&urb->dev->config[port->config_num-1], GFP_ATOMIC)) {
-			rc = -ENOMEM;
-		}
-	} else {
-		rc = -ENOMEM;
-	}
-	oz_complete_urb(hcd, urb, rc);
-}
-
-/*
- * Context: softirq
- */
-static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
-		u8 rcode, u8 if_num, u8 alt)
-{
-	struct usb_hcd *hcd = port->ozhcd->hcd;
-	int rc = 0;
-
-	if ((rcode == 0) && (port->config_num > 0)) {
-		struct usb_host_config *config;
-		struct usb_host_interface *intf;
-
-		oz_dbg(ON, "Set interface %d alt %d\n", if_num, alt);
-		oz_clean_endpoints_for_interface(hcd, port, if_num);
-		config = &urb->dev->config[port->config_num-1];
-		intf = &config->intf_cache[if_num]->altsetting[alt];
-		if (oz_build_endpoints_for_interface(hcd, port, intf,
-			GFP_ATOMIC))
-			rc = -ENOMEM;
-		else
-			port->iface[if_num].alt = alt;
-	} else {
-		rc = -ENOMEM;
-	}
-	oz_complete_urb(hcd, urb, rc);
-}
-
-/*
- * Context: softirq
- */
-void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
-	int data_len)
-{
-	struct oz_port *port = hport;
-	struct urb *urb;
-	struct usb_ctrlrequest *setup;
-	struct usb_hcd *hcd = port->ozhcd->hcd;
-	unsigned windex;
-	unsigned wvalue;
-
-	oz_dbg(ON, "oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
-	urb = oz_find_urb_by_id(port, 0, req_id);
-	if (!urb) {
-		oz_dbg(ON, "URB not found\n");
-		return;
-	}
-	setup = (struct usb_ctrlrequest *)urb->setup_packet;
-	windex = le16_to_cpu(setup->wIndex);
-	wvalue = le16_to_cpu(setup->wValue);
-	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
-		/* Standard requests */
-		oz_display_conf_type(setup->bRequest);
-		switch (setup->bRequest) {
-		case USB_REQ_SET_CONFIGURATION:
-			oz_hcd_complete_set_config(port, urb, rcode,
-				(u8)wvalue);
-			break;
-		case USB_REQ_SET_INTERFACE:
-			oz_hcd_complete_set_interface(port, urb, rcode,
-				(u8)windex, (u8)wvalue);
-			break;
-		default:
-			oz_complete_urb(hcd, urb, 0);
-		}
-
-	} else {
-		int copy_len;
-
-		oz_dbg(ON, "VENDOR-CLASS - cnf\n");
-		if (data_len) {
-			if (data_len <= urb->transfer_buffer_length)
-				copy_len = data_len;
-			else
-				copy_len = urb->transfer_buffer_length;
-			memcpy(urb->transfer_buffer, data, copy_len);
-			urb->actual_length = copy_len;
-		}
-		oz_complete_urb(hcd, urb, 0);
-	}
-}
-
-/*
- * Context: softirq-serialized
- */
-static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
-			      int data_len)
-{
-	int space;
-	int copy_len;
-
-	if (!ep->buffer)
-		return -1;
-	space = ep->out_ix-ep->in_ix-1;
-	if (space < 0)
-		space += ep->buffer_size;
-	if (space < (data_len+1)) {
-		oz_dbg(ON, "Buffer full\n");
-		return -1;
-	}
-	ep->buffer[ep->in_ix] = (u8)data_len;
-	if (++ep->in_ix == ep->buffer_size)
-		ep->in_ix = 0;
-	copy_len = ep->buffer_size - ep->in_ix;
-	if (copy_len > data_len)
-		copy_len = data_len;
-	memcpy(&ep->buffer[ep->in_ix], data, copy_len);
-
-	if (copy_len < data_len) {
-		memcpy(ep->buffer, data+copy_len, data_len-copy_len);
-		ep->in_ix = data_len-copy_len;
-	} else {
-		ep->in_ix += copy_len;
-	}
-	if (ep->in_ix == ep->buffer_size)
-		ep->in_ix = 0;
-	ep->buffered_units++;
-	return 0;
-}
-
-/*
- * Context: softirq-serialized
- */
-void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
-{
-	struct oz_port *port = (struct oz_port *)hport;
-	struct oz_endpoint *ep;
-	struct oz_hcd *ozhcd = port->ozhcd;
-
-	spin_lock_bh(&ozhcd->hcd_lock);
-	ep = port->in_ep[endpoint & USB_ENDPOINT_NUMBER_MASK];
-	if (ep == NULL)
-		goto done;
-	switch (ep->attrib & USB_ENDPOINT_XFERTYPE_MASK) {
-	case USB_ENDPOINT_XFER_INT:
-	case USB_ENDPOINT_XFER_BULK:
-		if (!list_empty(&ep->urb_list)) {
-			struct oz_urb_link *urbl =
-				list_first_entry(&ep->urb_list,
-					struct oz_urb_link, link);
-			struct urb *urb;
-			int copy_len;
-
-			list_del_init(&urbl->link);
-			spin_unlock_bh(&ozhcd->hcd_lock);
-			urb = urbl->urb;
-			oz_free_urb_link(urbl);
-			if (data_len <= urb->transfer_buffer_length)
-				copy_len = data_len;
-			else
-				copy_len = urb->transfer_buffer_length;
-			memcpy(urb->transfer_buffer, data, copy_len);
-			urb->actual_length = copy_len;
-			oz_complete_urb(port->ozhcd->hcd, urb, 0);
-			return;
-		}
-		oz_dbg(ON, "buffering frame as URB is not available\n");
-		oz_hcd_buffer_data(ep, data, data_len);
-		break;
-	case USB_ENDPOINT_XFER_ISOC:
-		oz_hcd_buffer_data(ep, data, data_len);
-		break;
-	}
-done:
-	spin_unlock_bh(&ozhcd->hcd_lock);
-}
-
-/*
- * Context: unknown
- */
-static inline int oz_usb_get_frame_number(void)
-{
-	return atomic_inc_return(&g_usb_frame_number);
-}
-
-/*
- * Context: softirq
- */
-int oz_hcd_heartbeat(void *hport)
-{
-	int rc = 0;
-	struct oz_port *port = hport;
-	struct oz_hcd *ozhcd = port->ozhcd;
-	struct oz_urb_link *urbl, *n;
-	LIST_HEAD(xfr_list);
-	struct urb *urb;
-	struct oz_endpoint *ep;
-	struct timespec ts, delta;
-
-	getrawmonotonic(&ts);
-	/* Check the OUT isoc endpoints to see if any URB data can be sent.
-	 */
-	spin_lock_bh(&ozhcd->hcd_lock);
-	list_for_each_entry(ep, &port->isoc_out_ep, link) {
-		if (ep->credit < 0)
-			continue;
-		delta = timespec_sub(ts, ep->timestamp);
-		ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
-		if (ep->credit > ep->credit_ceiling)
-			ep->credit = ep->credit_ceiling;
-		ep->timestamp = ts;
-		while (ep->credit && !list_empty(&ep->urb_list)) {
-			urbl = list_first_entry(&ep->urb_list,
-				struct oz_urb_link, link);
-			urb = urbl->urb;
-			if ((ep->credit + 1) < urb->number_of_packets)
-				break;
-			ep->credit -= urb->number_of_packets;
-			if (ep->credit < 0)
-				ep->credit = 0;
-			list_move_tail(&urbl->link, &xfr_list);
-		}
-	}
-	spin_unlock_bh(&ozhcd->hcd_lock);
-	/* Send to PD and complete URBs.
-	 */
-	list_for_each_entry_safe(urbl, n, &xfr_list, link) {
-		urb = urbl->urb;
-		list_del_init(&urbl->link);
-		urb->error_count = 0;
-		urb->start_frame = oz_usb_get_frame_number();
-		oz_usb_send_isoc(port->hpd, urbl->ep_num, urb);
-		oz_free_urb_link(urbl);
-		oz_complete_urb(port->ozhcd->hcd, urb, 0);
-	}
-	/* Check the IN isoc endpoints to see if any URBs can be completed.
-	 */
-	spin_lock_bh(&ozhcd->hcd_lock);
-	list_for_each_entry(ep, &port->isoc_in_ep, link) {
-		if (ep->flags & OZ_F_EP_BUFFERING) {
-			if (ep->buffered_units >= OZ_IN_BUFFERING_UNITS) {
-				ep->flags &= ~OZ_F_EP_BUFFERING;
-				ep->credit = 0;
-				ep->timestamp = ts;
-				ep->start_frame = 0;
-			}
-			continue;
-		}
-		delta = timespec_sub(ts, ep->timestamp);
-		ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
-		ep->timestamp = ts;
-		list_for_each_entry_safe(urbl, n, &ep->urb_list, link) {
-			struct urb *urb = urbl->urb;
-			int len = 0;
-			int copy_len;
-			int i;
-
-			if (ep->credit  < urb->number_of_packets)
-				break;
-			if (ep->buffered_units < urb->number_of_packets)
-				break;
-			urb->actual_length = 0;
-			for (i = 0; i < urb->number_of_packets; i++) {
-				len = ep->buffer[ep->out_ix];
-				if (++ep->out_ix == ep->buffer_size)
-					ep->out_ix = 0;
-				copy_len = ep->buffer_size - ep->out_ix;
-				if (copy_len > len)
-					copy_len = len;
-				memcpy(urb->transfer_buffer,
-					&ep->buffer[ep->out_ix], copy_len);
-				if (copy_len < len) {
-					memcpy(urb->transfer_buffer+copy_len,
-						ep->buffer, len-copy_len);
-					ep->out_ix = len-copy_len;
-				} else
-					ep->out_ix += copy_len;
-				if (ep->out_ix == ep->buffer_size)
-					ep->out_ix = 0;
-				urb->iso_frame_desc[i].offset =
-					urb->actual_length;
-				urb->actual_length += len;
-				urb->iso_frame_desc[i].actual_length = len;
-				urb->iso_frame_desc[i].status = 0;
-			}
-			ep->buffered_units -= urb->number_of_packets;
-			urb->error_count = 0;
-			urb->start_frame = ep->start_frame;
-			ep->start_frame += urb->number_of_packets;
-			list_move_tail(&urbl->link, &xfr_list);
-			ep->credit -= urb->number_of_packets;
-		}
-	}
-	if (!list_empty(&port->isoc_out_ep) || !list_empty(&port->isoc_in_ep))
-		rc = 1;
-	spin_unlock_bh(&ozhcd->hcd_lock);
-	/* Complete the filled URBs.
-	 */
-	list_for_each_entry_safe(urbl, n, &xfr_list, link) {
-		urb = urbl->urb;
-		list_del_init(&urbl->link);
-		oz_free_urb_link(urbl);
-		oz_complete_urb(port->ozhcd->hcd, urb, 0);
-	}
-	/* Check if there are any ep0 requests that have timed out.
-	 * If so resent to PD.
-	 */
-	ep = port->out_ep[0];
-	if (ep) {
-		spin_lock_bh(&ozhcd->hcd_lock);
-		list_for_each_entry_safe(urbl, n, &ep->urb_list, link) {
-			if (urbl->submit_counter > EP0_TIMEOUT_COUNTER) {
-				oz_dbg(ON, "Request 0x%p timeout\n", urbl->urb);
-				list_move_tail(&urbl->link, &xfr_list);
-				urbl->submit_counter = 0;
-			} else {
-				urbl->submit_counter++;
-			}
-		}
-		if (!list_empty(&ep->urb_list))
-			rc = 1;
-		spin_unlock_bh(&ozhcd->hcd_lock);
-		list_for_each_entry_safe(urbl, n, &xfr_list, link) {
-			oz_dbg(ON, "Resending request to PD\n");
-			oz_process_ep0_urb(ozhcd, urbl->urb, GFP_ATOMIC);
-			oz_free_urb_link(urbl);
-		}
-	}
-	return rc;
-}
-
-/*
- * Context: softirq
- */
-static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
-		struct oz_port *port,
-		struct usb_host_interface *intf, gfp_t mem_flags)
-{
-	struct oz_hcd *ozhcd = port->ozhcd;
-	int i;
-	int if_ix = intf->desc.bInterfaceNumber;
-	int request_heartbeat = 0;
-
-	oz_dbg(ON, "interface[%d] = %p\n", if_ix, intf);
-	if (if_ix >= port->num_iface || port->iface == NULL)
-		return -ENOMEM;
-	for (i = 0; i < intf->desc.bNumEndpoints; i++) {
-		struct usb_host_endpoint *hep = &intf->endpoint[i];
-		u8 ep_addr = hep->desc.bEndpointAddress;
-		u8 ep_num = ep_addr & USB_ENDPOINT_NUMBER_MASK;
-		struct oz_endpoint *ep;
-		int buffer_size = 0;
-
-		oz_dbg(ON, "%d bEndpointAddress = %x\n", i, ep_addr);
-		if (ep_addr & USB_ENDPOINT_DIR_MASK) {
-			switch (hep->desc.bmAttributes &
-						USB_ENDPOINT_XFERTYPE_MASK) {
-			case USB_ENDPOINT_XFER_ISOC:
-				buffer_size = OZ_EP_BUFFER_SIZE_ISOC;
-				break;
-			case USB_ENDPOINT_XFER_INT:
-				buffer_size = OZ_EP_BUFFER_SIZE_INT;
-				break;
-			}
-		}
-
-		ep = oz_ep_alloc(buffer_size, mem_flags);
-		if (!ep) {
-			oz_clean_endpoints_for_interface(hcd, port, if_ix);
-			return -ENOMEM;
-		}
-		ep->attrib = hep->desc.bmAttributes;
-		ep->ep_num = ep_num;
-		if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
-			== USB_ENDPOINT_XFER_ISOC) {
-			oz_dbg(ON, "wMaxPacketSize = %d\n",
-			       usb_endpoint_maxp(&hep->desc));
-			ep->credit_ceiling = 200;
-			if (ep_addr & USB_ENDPOINT_DIR_MASK) {
-				ep->flags |= OZ_F_EP_BUFFERING;
-			} else {
-				ep->flags |= OZ_F_EP_HAVE_STREAM;
-				if (oz_usb_stream_create(port->hpd, ep_num))
-					ep->flags &= ~OZ_F_EP_HAVE_STREAM;
-			}
-		}
-		spin_lock_bh(&ozhcd->hcd_lock);
-		if (ep_addr & USB_ENDPOINT_DIR_MASK) {
-			port->in_ep[ep_num] = ep;
-			port->iface[if_ix].ep_mask |=
-				(1<<(ep_num+OZ_NB_ENDPOINTS));
-			if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
-				 == USB_ENDPOINT_XFER_ISOC) {
-				list_add_tail(&ep->link, &port->isoc_in_ep);
-				request_heartbeat = 1;
-			}
-		} else {
-			port->out_ep[ep_num] = ep;
-			port->iface[if_ix].ep_mask |= (1<<ep_num);
-			if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
-				== USB_ENDPOINT_XFER_ISOC) {
-				list_add_tail(&ep->link, &port->isoc_out_ep);
-				request_heartbeat = 1;
-			}
-		}
-		spin_unlock_bh(&ozhcd->hcd_lock);
-		if (request_heartbeat && port->hpd)
-			oz_usb_request_heartbeat(port->hpd);
-	}
-	return 0;
-}
-
-/*
- * Context: softirq
- */
-static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
-			struct oz_port *port, int if_ix)
-{
-	struct oz_hcd *ozhcd = port->ozhcd;
-	unsigned mask;
-	int i;
-	LIST_HEAD(ep_list);
-	struct oz_endpoint *ep, *n;
-
-	oz_dbg(ON, "Deleting endpoints for interface %d\n", if_ix);
-	if (if_ix >= port->num_iface)
-		return;
-	spin_lock_bh(&ozhcd->hcd_lock);
-	mask = port->iface[if_ix].ep_mask;
-	port->iface[if_ix].ep_mask = 0;
-	for (i = 0; i < OZ_NB_ENDPOINTS; i++) {
-		struct list_head *e;
-		/* Gather OUT endpoints.
-		 */
-		if ((mask & (1<<i)) && port->out_ep[i]) {
-			e = &port->out_ep[i]->link;
-			port->out_ep[i] = NULL;
-			/* Remove from isoc list if present.
-			 */
-			list_move_tail(e, &ep_list);
-		}
-		/* Gather IN endpoints.
-		 */
-		if ((mask & (1<<(i+OZ_NB_ENDPOINTS))) && port->in_ep[i]) {
-			e = &port->in_ep[i]->link;
-			port->in_ep[i] = NULL;
-			list_move_tail(e, &ep_list);
-		}
-	}
-	spin_unlock_bh(&ozhcd->hcd_lock);
-	list_for_each_entry_safe(ep, n, &ep_list, link) {
-		list_del_init(&ep->link);
-		oz_ep_free(port, ep);
-	}
-}
-
-/*
- * Context: softirq
- */
-static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
-		struct oz_port *port, struct usb_host_config *config,
-		gfp_t mem_flags)
-{
-	struct oz_hcd *ozhcd = port->ozhcd;
-	int i;
-	int num_iface = config->desc.bNumInterfaces;
-
-	if (num_iface) {
-		struct oz_interface *iface;
-
-		iface = kmalloc_array(num_iface, sizeof(struct oz_interface),
-					mem_flags | __GFP_ZERO);
-		if (!iface)
-			return -ENOMEM;
-		spin_lock_bh(&ozhcd->hcd_lock);
-		port->iface = iface;
-		port->num_iface = num_iface;
-		spin_unlock_bh(&ozhcd->hcd_lock);
-	}
-	for (i = 0; i < num_iface; i++) {
-		struct usb_host_interface *intf =
-			&config->intf_cache[i]->altsetting[0];
-		if (oz_build_endpoints_for_interface(hcd, port, intf,
-			mem_flags))
-			goto fail;
-	}
-	return 0;
-fail:
-	oz_clean_endpoints_for_config(hcd, port);
-	return -1;
-}
-
-/*
- * Context: softirq
- */
-static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
-			struct oz_port *port)
-{
-	struct oz_hcd *ozhcd = port->ozhcd;
-	int i;
-
-	oz_dbg(ON, "Deleting endpoints for configuration\n");
-	for (i = 0; i < port->num_iface; i++)
-		oz_clean_endpoints_for_interface(hcd, port, i);
-	spin_lock_bh(&ozhcd->hcd_lock);
-	if (port->iface) {
-		oz_dbg(ON, "Freeing interfaces object\n");
-		kfree(port->iface);
-		port->iface = NULL;
-	}
-	port->num_iface = 0;
-	spin_unlock_bh(&ozhcd->hcd_lock);
-}
-
-/*
- * Context: tasklet
- */
-static void *oz_claim_hpd(struct oz_port *port)
-{
-	void *hpd;
-	struct oz_hcd *ozhcd = port->ozhcd;
-
-	spin_lock_bh(&ozhcd->hcd_lock);
-	hpd = port->hpd;
-	if (hpd)
-		oz_usb_get(hpd);
-	spin_unlock_bh(&ozhcd->hcd_lock);
-	return hpd;
-}
-
-/*
- * Context: tasklet
- */
-static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
-		gfp_t mem_flags)
-{
-	struct usb_ctrlrequest *setup;
-	unsigned windex;
-	unsigned wvalue;
-	unsigned wlength;
-	void *hpd;
-	u8 req_id;
-	int rc = 0;
-	unsigned complete = 0;
-
-	int port_ix = -1;
-	struct oz_port *port = NULL;
-
-	oz_dbg(URB, "[%s]:(%p)\n", __func__, urb);
-	port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
-	if (port_ix < 0) {
-		rc = -EPIPE;
-		goto out;
-	}
-	port =  &ozhcd->ports[port_ix];
-	if (((port->flags & OZ_PORT_F_PRESENT) == 0)
-		|| (port->flags & OZ_PORT_F_DYING)) {
-		oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
-		       port_ix, urb->dev->devnum);
-		rc = -EPIPE;
-		goto out;
-	}
-	/* Store port in private context data.
-	 */
-	urb->hcpriv = port;
-	setup = (struct usb_ctrlrequest *)urb->setup_packet;
-	windex = le16_to_cpu(setup->wIndex);
-	wvalue = le16_to_cpu(setup->wValue);
-	wlength = le16_to_cpu(setup->wLength);
-	oz_dbg(CTRL_DETAIL, "bRequestType = %x\n", setup->bRequestType);
-	oz_dbg(CTRL_DETAIL, "bRequest = %x\n", setup->bRequest);
-	oz_dbg(CTRL_DETAIL, "wValue = %x\n", wvalue);
-	oz_dbg(CTRL_DETAIL, "wIndex = %x\n", windex);
-	oz_dbg(CTRL_DETAIL, "wLength = %x\n", wlength);
-
-	req_id = port->next_req_id++;
-	hpd = oz_claim_hpd(port);
-	if (hpd == NULL) {
-		oz_dbg(ON, "Cannot claim port\n");
-		rc = -EPIPE;
-		goto out;
-	}
-
-	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
-		/* Standard requests
-		 */
-		switch (setup->bRequest) {
-		case USB_REQ_GET_DESCRIPTOR:
-			oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - req\n");
-			break;
-		case USB_REQ_SET_ADDRESS:
-			oz_dbg(ON, "USB_REQ_SET_ADDRESS - req\n");
-			oz_dbg(ON, "Port %d address is 0x%x\n",
-			       ozhcd->conn_port,
-			       (u8)le16_to_cpu(setup->wValue));
-			spin_lock_bh(&ozhcd->hcd_lock);
-			if (ozhcd->conn_port >= 0) {
-				ozhcd->ports[ozhcd->conn_port].bus_addr =
-					(u8)le16_to_cpu(setup->wValue);
-				oz_dbg(ON, "Clearing conn_port\n");
-				ozhcd->conn_port = -1;
-			}
-			spin_unlock_bh(&ozhcd->hcd_lock);
-			complete = 1;
-			break;
-		case USB_REQ_SET_CONFIGURATION:
-			oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - req\n");
-			break;
-		case USB_REQ_GET_CONFIGURATION:
-			/* We short circuit this case and reply directly since
-			 * we have the selected configuration number cached.
-			 */
-			oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - reply now\n");
-			if (urb->transfer_buffer_length >= 1) {
-				urb->actual_length = 1;
-				*((u8 *)urb->transfer_buffer) =
-					port->config_num;
-				complete = 1;
-			} else {
-				rc = -EPIPE;
-			}
-			break;
-		case USB_REQ_GET_INTERFACE:
-			/* We short circuit this case and reply directly since
-			 * we have the selected interface alternative cached.
-			 */
-			oz_dbg(ON, "USB_REQ_GET_INTERFACE - reply now\n");
-			if (urb->transfer_buffer_length >= 1) {
-				urb->actual_length = 1;
-				*((u8 *)urb->transfer_buffer) =
-					port->iface[(u8)windex].alt;
-				oz_dbg(ON, "interface = %d alt = %d\n",
-				       windex, port->iface[(u8)windex].alt);
-				complete = 1;
-			} else {
-				rc = -EPIPE;
-			}
-			break;
-		case USB_REQ_SET_INTERFACE:
-			oz_dbg(ON, "USB_REQ_SET_INTERFACE - req\n");
-			break;
-		}
-	}
-	if (!rc && !complete) {
-		int data_len = 0;
-
-		if ((setup->bRequestType & USB_DIR_IN) == 0)
-			data_len = wlength;
-		urb->actual_length = data_len;
-		if (oz_usb_control_req(port->hpd, req_id, setup,
-				urb->transfer_buffer, data_len)) {
-			rc = -ENOMEM;
-		} else {
-			/* Note: we are queuing the request after we have
-			 * submitted it to be transmitted. If the request were
-			 * to complete before we queued it then it would not
-			 * be found in the queue. It seems impossible for
-			 * this to happen but if it did the request would
-			 * be resubmitted so the problem would hopefully
-			 * resolve itself. Putting the request into the
-			 * queue before it has been sent is worse since the
-			 * urb could be cancelled while we are using it
-			 * to build the request.
-			 */
-			if (oz_enqueue_ep_urb(port, 0, 0, urb, req_id))
-				rc = -ENOMEM;
-		}
-	}
-	oz_usb_put(hpd);
-out:
-	if (rc || complete) {
-		oz_dbg(ON, "Completing request locally\n");
-		oz_complete_urb(ozhcd->hcd, urb, rc);
-	} else {
-		oz_usb_request_heartbeat(port->hpd);
-	}
-}
-
-/*
- * Context: tasklet
- */
-static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
-{
-	int rc = 0;
-	struct oz_port *port = urb->hcpriv;
-	u8 ep_addr;
-
-	/* When we are paranoid we keep a list of urbs which we check against
-	 * before handing one back. This is just for debugging during
-	 * development and should be turned off in the released driver.
-	 */
-	oz_remember_urb(urb);
-	/* Check buffer is valid.
-	 */
-	if (!urb->transfer_buffer && urb->transfer_buffer_length)
-		return -EINVAL;
-	/* Check if there is a device at the port - refuse if not.
-	 */
-	if ((port->flags & OZ_PORT_F_PRESENT) == 0)
-		return -EPIPE;
-	ep_addr = usb_pipeendpoint(urb->pipe);
-	if (ep_addr) {
-		/* If the request is not for EP0 then queue it.
-		 */
-		if (oz_enqueue_ep_urb(port, ep_addr, usb_pipein(urb->pipe),
-			urb, 0))
-			rc = -EPIPE;
-	} else {
-		oz_process_ep0_urb(ozhcd, urb, GFP_ATOMIC);
-	}
-	return rc;
-}
-
-/*
- * Context: tasklet
- */
-static void oz_urb_process_tasklet(unsigned long unused)
-{
-	unsigned long irq_state;
-	struct urb *urb;
-	struct oz_hcd *ozhcd = oz_hcd_claim();
-	struct oz_urb_link *urbl, *n;
-	int rc = 0;
-
-	if (ozhcd == NULL)
-		return;
-	/* This is called from a tasklet so is in softirq context but the urb
-	 * list is filled from any context so we need to lock
-	 * appropriately while removing urbs.
-	 */
-	spin_lock_irqsave(&g_tasklet_lock, irq_state);
-	list_for_each_entry_safe(urbl, n, &ozhcd->urb_pending_list, link) {
-		list_del_init(&urbl->link);
-		spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
-		urb = urbl->urb;
-		oz_free_urb_link(urbl);
-		rc = oz_urb_process(ozhcd, urb);
-		if (rc)
-			oz_complete_urb(ozhcd->hcd, urb, rc);
-		spin_lock_irqsave(&g_tasklet_lock, irq_state);
-	}
-	spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
-	oz_hcd_put(ozhcd);
-}
-
-/*
- * This function searches for the urb in any of the lists it could be in.
- * If it is found it is removed from the list and completed. If the urb is
- * being processed then it won't be in a list so won't be found. However, the
- * call to usb_hcd_check_unlink_urb() will set the value of the unlinked field
- * to a non-zero value. When an attempt is made to put the urb back in a list
- * the unlinked field will be checked and the urb will then be completed.
- * Context: tasklet
- */
-static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
-{
-	struct oz_urb_link *urbl = NULL;
-	struct list_head *e;
-	struct oz_hcd *ozhcd;
-	unsigned long irq_state;
-	u8 ix;
-
-	if (port == NULL) {
-		oz_dbg(ON, "%s: ERROR: (%p) port is null\n", __func__, urb);
-		return;
-	}
-	ozhcd = port->ozhcd;
-	if (ozhcd == NULL) {
-		oz_dbg(ON, "%s; ERROR: (%p) ozhcd is null\n", __func__, urb);
-		return;
-	}
-
-	/* Look in the tasklet queue.
-	 */
-	spin_lock_irqsave(&g_tasklet_lock, irq_state);
-	list_for_each(e, &ozhcd->urb_cancel_list) {
-		urbl = list_entry(e, struct oz_urb_link, link);
-		if (urb == urbl->urb) {
-			list_del_init(e);
-			spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
-			goto out2;
-		}
-	}
-	spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
-	urbl = NULL;
-
-	/* Look in the orphanage.
-	 */
-	spin_lock_irqsave(&ozhcd->hcd_lock, irq_state);
-	list_for_each(e, &ozhcd->orphanage) {
-		urbl = list_entry(e, struct oz_urb_link, link);
-		if (urbl->urb == urb) {
-			list_del(e);
-			oz_dbg(ON, "Found urb in orphanage\n");
-			goto out;
-		}
-	}
-	ix = (ep_num & 0xf);
-	urbl = NULL;
-	if ((ep_num & USB_DIR_IN) && ix)
-		urbl = oz_remove_urb(port->in_ep[ix], urb);
-	else
-		urbl = oz_remove_urb(port->out_ep[ix], urb);
-out:
-	spin_unlock_irqrestore(&ozhcd->hcd_lock, irq_state);
-out2:
-	if (urbl) {
-		urb->actual_length = 0;
-		oz_free_urb_link(urbl);
-		oz_complete_urb(ozhcd->hcd, urb, -EPIPE);
-	}
-}
-
-/*
- * Context: tasklet
- */
-static void oz_urb_cancel_tasklet(unsigned long unused)
-{
-	unsigned long irq_state;
-	struct urb *urb;
-	struct oz_urb_link *urbl, *n;
-	struct oz_hcd *ozhcd = oz_hcd_claim();
-
-	if (ozhcd == NULL)
-		return;
-	spin_lock_irqsave(&g_tasklet_lock, irq_state);
-	list_for_each_entry_safe(urbl, n, &ozhcd->urb_cancel_list, link) {
-		list_del_init(&urbl->link);
-		spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
-		urb = urbl->urb;
-		if (urb->unlinked)
-			oz_urb_cancel(urbl->port, urbl->ep_num, urb);
-		oz_free_urb_link(urbl);
-		spin_lock_irqsave(&g_tasklet_lock, irq_state);
-	}
-	spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
-	oz_hcd_put(ozhcd);
-}
-
-/*
- * Context: unknown
- */
-static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
-{
-	if (ozhcd) {
-		struct oz_urb_link *urbl, *n;
-
-		list_for_each_entry_safe(urbl, n, &ozhcd->orphanage, link) {
-			list_del(&urbl->link);
-			oz_complete_urb(ozhcd->hcd, urbl->urb, status);
-			oz_free_urb_link(urbl);
-		}
-	}
-}
-
-/*
- * Context: unknown
- */
-static int oz_hcd_start(struct usb_hcd *hcd)
-{
-	hcd->power_budget = 200;
-	hcd->state = HC_STATE_RUNNING;
-	hcd->uses_new_polling = 1;
-	return 0;
-}
-
-/*
- * Context: unknown
- */
-static void oz_hcd_stop(struct usb_hcd *hcd)
-{
-}
-
-/*
- * Context: unknown
- */
-static void oz_hcd_shutdown(struct usb_hcd *hcd)
-{
-}
-
-/*
- * Called to queue an urb for the device.
- * This function should return a non-zero error code if it fails the urb but
- * should not call usb_hcd_giveback_urb().
- * Context: any
- */
-static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
-				gfp_t mem_flags)
-{
-	struct oz_hcd *ozhcd = oz_hcd_private(hcd);
-	int rc;
-	int port_ix;
-	struct oz_port *port;
-	unsigned long irq_state;
-	struct oz_urb_link *urbl;
-
-	oz_dbg(URB, "%s: (%p)\n",  __func__, urb);
-	if (unlikely(ozhcd == NULL)) {
-		oz_dbg(URB, "Refused urb(%p) not ozhcd\n", urb);
-		return -EPIPE;
-	}
-	if (unlikely(hcd->state != HC_STATE_RUNNING)) {
-		oz_dbg(URB, "Refused urb(%p) not running\n", urb);
-		return -EPIPE;
-	}
-	port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
-	if (port_ix < 0)
-		return -EPIPE;
-	port =  &ozhcd->ports[port_ix];
-	if (port == NULL)
-		return -EPIPE;
-	if (!(port->flags & OZ_PORT_F_PRESENT) ||
-				(port->flags & OZ_PORT_F_CHANGED)) {
-		oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
-		       port_ix, urb->dev->devnum);
-		return -EPIPE;
-	}
-	urb->hcpriv = port;
-	/* Put request in queue for processing by tasklet.
-	 */
-	urbl = oz_alloc_urb_link();
-	if (unlikely(urbl == NULL))
-		return -ENOMEM;
-	urbl->urb = urb;
-	spin_lock_irqsave(&g_tasklet_lock, irq_state);
-	rc = usb_hcd_link_urb_to_ep(hcd, urb);
-	if (unlikely(rc)) {
-		spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
-		oz_free_urb_link(urbl);
-		return rc;
-	}
-	list_add_tail(&urbl->link, &ozhcd->urb_pending_list);
-	spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
-	tasklet_schedule(&g_urb_process_tasklet);
-	atomic_inc(&g_pending_urbs);
-	return 0;
-}
-
-/*
- * Context: tasklet
- */
-static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
-				struct urb *urb)
-{
-	struct oz_urb_link *urbl;
-
-	if (unlikely(ep == NULL))
-		return NULL;
-
-	list_for_each_entry(urbl, &ep->urb_list, link) {
-		if (urbl->urb == urb) {
-			list_del_init(&urbl->link);
-			if (usb_pipeisoc(urb->pipe)) {
-				ep->credit -= urb->number_of_packets;
-				if (ep->credit < 0)
-					ep->credit = 0;
-			}
-			return urbl;
-		}
-	}
-	return NULL;
-}
-
-/*
- * Called to dequeue a previously submitted urb for the device.
- * Context: any
- */
-static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
-{
-	struct oz_hcd *ozhcd = oz_hcd_private(hcd);
-	struct oz_urb_link *urbl;
-	int rc;
-	unsigned long irq_state;
-
-	oz_dbg(URB, "%s: (%p)\n",  __func__, urb);
-	urbl = oz_alloc_urb_link();
-	if (unlikely(urbl == NULL))
-		return -ENOMEM;
-	spin_lock_irqsave(&g_tasklet_lock, irq_state);
-	/* The following function checks the urb is still in the queue
-	 * maintained by the core and that the unlinked field is zero.
-	 * If both are true the function sets the unlinked field and returns
-	 * zero. Otherwise it returns an error.
-	 */
-	rc = usb_hcd_check_unlink_urb(hcd, urb, status);
-	/* We have to check we haven't completed the urb or are about
-	 * to complete it. When we do we set hcpriv to 0 so if this has
-	 * already happened we don't put the urb in the cancel queue.
-	 */
-	if ((rc == 0) && urb->hcpriv) {
-		urbl->urb = urb;
-		urbl->port = (struct oz_port *)urb->hcpriv;
-		urbl->ep_num = usb_pipeendpoint(urb->pipe);
-		if (usb_pipein(urb->pipe))
-			urbl->ep_num |= USB_DIR_IN;
-		list_add_tail(&urbl->link, &ozhcd->urb_cancel_list);
-		spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
-		tasklet_schedule(&g_urb_cancel_tasklet);
-	} else {
-		spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
-		oz_free_urb_link(urbl);
-	}
-	return rc;
-}
-
-/*
- * Context: unknown
- */
-static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
-				struct usb_host_endpoint *ep)
-{
-}
-
-/*
- * Context: unknown
- */
-static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
-				struct usb_host_endpoint *ep)
-{
-}
-
-/*
- * Context: unknown
- */
-static int oz_hcd_get_frame_number(struct usb_hcd *hcd)
-{
-	oz_dbg(ON, "oz_hcd_get_frame_number\n");
-	return oz_usb_get_frame_number();
-}
-
-/*
- * Context: softirq
- * This is called as a consquence of us calling usb_hcd_poll_rh_status() and we
- * always do that in softirq context.
- */
-static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
-{
-	struct oz_hcd *ozhcd = oz_hcd_private(hcd);
-	int i;
-
-	buf[0] = 0;
-	buf[1] = 0;
-
-	spin_lock_bh(&ozhcd->hcd_lock);
-	for (i = 0; i < OZ_NB_PORTS; i++) {
-		if (ozhcd->ports[i].flags & OZ_PORT_F_CHANGED) {
-			oz_dbg(HUB, "Port %d changed\n", i);
-			ozhcd->ports[i].flags &= ~OZ_PORT_F_CHANGED;
-			if (i < 7)
-				buf[0] |= 1 << (i + 1);
-			else
-				buf[1] |= 1 << (i - 7);
-		}
-	}
-	spin_unlock_bh(&ozhcd->hcd_lock);
-	if (buf[0] != 0 || buf[1] != 0)
-		return 2;
-	return 0;
-}
-
-/*
- * Context: process
- */
-static void oz_get_hub_descriptor(struct usb_hcd *hcd,
-				struct usb_hub_descriptor *desc)
-{
-	memset(desc, 0, sizeof(*desc));
-	desc->bDescriptorType = 0x29;
-	desc->bDescLength = 9;
-	desc->wHubCharacteristics = cpu_to_le16(0x0001);
-	desc->bNbrPorts = OZ_NB_PORTS;
-}
-
-/*
- * Context: process
- */
-static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
-{
-	struct oz_port *port;
-	u8 port_id = (u8)windex;
-	struct oz_hcd *ozhcd = oz_hcd_private(hcd);
-	unsigned set_bits = 0;
-	unsigned clear_bits = 0;
-
-	if ((port_id < 1) || (port_id > OZ_NB_PORTS))
-		return -EPIPE;
-	port = &ozhcd->ports[port_id-1];
-	switch (wvalue) {
-	case USB_PORT_FEAT_CONNECTION:
-		oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
-		break;
-	case USB_PORT_FEAT_ENABLE:
-		oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
-		break;
-	case USB_PORT_FEAT_SUSPEND:
-		oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
-		break;
-	case USB_PORT_FEAT_OVER_CURRENT:
-		oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
-		break;
-	case USB_PORT_FEAT_RESET:
-		oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
-		set_bits = USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET<<16);
-		clear_bits = USB_PORT_STAT_RESET;
-		ozhcd->ports[port_id-1].bus_addr = 0;
-		break;
-	case USB_PORT_FEAT_POWER:
-		oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
-		set_bits |= USB_PORT_STAT_POWER;
-		break;
-	case USB_PORT_FEAT_LOWSPEED:
-		oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
-		break;
-	case USB_PORT_FEAT_C_CONNECTION:
-		oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
-		break;
-	case USB_PORT_FEAT_C_ENABLE:
-		oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
-		break;
-	case USB_PORT_FEAT_C_SUSPEND:
-		oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
-		break;
-	case USB_PORT_FEAT_C_OVER_CURRENT:
-		oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
-		break;
-	case USB_PORT_FEAT_C_RESET:
-		oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
-		break;
-	case USB_PORT_FEAT_TEST:
-		oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
-		break;
-	case USB_PORT_FEAT_INDICATOR:
-		oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
-		break;
-	default:
-		oz_dbg(HUB, "Other %d\n", wvalue);
-		break;
-	}
-	if (set_bits || clear_bits) {
-		spin_lock_bh(&port->port_lock);
-		port->status &= ~clear_bits;
-		port->status |= set_bits;
-		spin_unlock_bh(&port->port_lock);
-	}
-	oz_dbg(HUB, "Port[%d] status = 0x%x\n", port_id, port->status);
-	return 0;
-}
-
-/*
- * Context: process
- */
-static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
-{
-	struct oz_port *port;
-	u8 port_id = (u8)windex;
-	struct oz_hcd *ozhcd = oz_hcd_private(hcd);
-	unsigned clear_bits = 0;
-
-	if ((port_id < 1) || (port_id > OZ_NB_PORTS))
-		return -EPIPE;
-	port = &ozhcd->ports[port_id-1];
-	switch (wvalue) {
-	case USB_PORT_FEAT_CONNECTION:
-		oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
-		break;
-	case USB_PORT_FEAT_ENABLE:
-		oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
-		clear_bits = USB_PORT_STAT_ENABLE;
-		break;
-	case USB_PORT_FEAT_SUSPEND:
-		oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
-		break;
-	case USB_PORT_FEAT_OVER_CURRENT:
-		oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
-		break;
-	case USB_PORT_FEAT_RESET:
-		oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
-		break;
-	case USB_PORT_FEAT_POWER:
-		oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
-		clear_bits |= USB_PORT_STAT_POWER;
-		break;
-	case USB_PORT_FEAT_LOWSPEED:
-		oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
-		break;
-	case USB_PORT_FEAT_C_CONNECTION:
-		oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
-		clear_bits = USB_PORT_STAT_C_CONNECTION << 16;
-		break;
-	case USB_PORT_FEAT_C_ENABLE:
-		oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
-		clear_bits = USB_PORT_STAT_C_ENABLE << 16;
-		break;
-	case USB_PORT_FEAT_C_SUSPEND:
-		oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
-		break;
-	case USB_PORT_FEAT_C_OVER_CURRENT:
-		oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
-		break;
-	case USB_PORT_FEAT_C_RESET:
-		oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
-		clear_bits = USB_PORT_FEAT_C_RESET << 16;
-		break;
-	case USB_PORT_FEAT_TEST:
-		oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
-		break;
-	case USB_PORT_FEAT_INDICATOR:
-		oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
-		break;
-	default:
-		oz_dbg(HUB, "Other %d\n", wvalue);
-		break;
-	}
-	if (clear_bits) {
-		spin_lock_bh(&port->port_lock);
-		port->status &= ~clear_bits;
-		spin_unlock_bh(&port->port_lock);
-	}
-	oz_dbg(HUB, "Port[%d] status = 0x%x\n",
-	       port_id, ozhcd->ports[port_id-1].status);
-	return 0;
-}
-
-/*
- * Context: process
- */
-static int oz_get_port_status(struct usb_hcd *hcd, u16 windex, char *buf)
-{
-	struct oz_hcd *ozhcd;
-	u32 status;
-
-	if ((windex < 1) || (windex > OZ_NB_PORTS))
-		return -EPIPE;
-	ozhcd = oz_hcd_private(hcd);
-	oz_dbg(HUB, "GetPortStatus windex = %d\n", windex);
-	status = ozhcd->ports[windex-1].status;
-	put_unaligned(cpu_to_le32(status), (__le32 *)buf);
-	oz_dbg(HUB, "Port[%d] status = %x\n", windex, status);
-	return 0;
-}
-
-/*
- * Context: process
- */
-static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
-				u16 windex, char *buf, u16 wlength)
-{
-	int err = 0;
-
-	switch (req_type) {
-	case ClearHubFeature:
-		oz_dbg(HUB, "ClearHubFeature: %d\n", req_type);
-		break;
-	case ClearPortFeature:
-		err = oz_clear_port_feature(hcd, wvalue, windex);
-		break;
-	case GetHubDescriptor:
-		oz_get_hub_descriptor(hcd, (struct usb_hub_descriptor *)buf);
-		break;
-	case GetHubStatus:
-		oz_dbg(HUB, "GetHubStatus: req_type = 0x%x\n", req_type);
-		put_unaligned(cpu_to_le32(0), (__le32 *)buf);
-		break;
-	case GetPortStatus:
-		err = oz_get_port_status(hcd, windex, buf);
-		break;
-	case SetHubFeature:
-		oz_dbg(HUB, "SetHubFeature: %d\n", req_type);
-		break;
-	case SetPortFeature:
-		err = oz_set_port_feature(hcd, wvalue, windex);
-		break;
-	default:
-		oz_dbg(HUB, "Other: %d\n", req_type);
-		break;
-	}
-	return err;
-}
-
-/*
- * Context: process
- */
-static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
-{
-	struct oz_hcd *ozhcd;
-
-	ozhcd = oz_hcd_private(hcd);
-	spin_lock_bh(&ozhcd->hcd_lock);
-	hcd->state = HC_STATE_SUSPENDED;
-	ozhcd->flags |= OZ_HDC_F_SUSPENDED;
-	spin_unlock_bh(&ozhcd->hcd_lock);
-	return 0;
-}
-
-/*
- * Context: process
- */
-static int oz_hcd_bus_resume(struct usb_hcd *hcd)
-{
-	struct oz_hcd *ozhcd;
-
-	ozhcd = oz_hcd_private(hcd);
-	spin_lock_bh(&ozhcd->hcd_lock);
-	ozhcd->flags &= ~OZ_HDC_F_SUSPENDED;
-	hcd->state = HC_STATE_RUNNING;
-	spin_unlock_bh(&ozhcd->hcd_lock);
-	return 0;
-}
-
-static void oz_plat_shutdown(struct platform_device *dev)
-{
-}
-
-/*
- * Context: process
- */
-static int oz_plat_probe(struct platform_device *dev)
-{
-	int i;
-	int err;
-	struct usb_hcd *hcd;
-	struct oz_hcd *ozhcd;
-
-	hcd = usb_create_hcd(&g_oz_hc_drv, &dev->dev, dev_name(&dev->dev));
-	if (hcd == NULL) {
-		oz_dbg(ON, "Failed to created hcd object OK\n");
-		return -ENOMEM;
-	}
-	ozhcd = oz_hcd_private(hcd);
-	memset(ozhcd, 0, sizeof(*ozhcd));
-	INIT_LIST_HEAD(&ozhcd->urb_pending_list);
-	INIT_LIST_HEAD(&ozhcd->urb_cancel_list);
-	INIT_LIST_HEAD(&ozhcd->orphanage);
-	ozhcd->hcd = hcd;
-	ozhcd->conn_port = -1;
-	spin_lock_init(&ozhcd->hcd_lock);
-	for (i = 0; i < OZ_NB_PORTS; i++) {
-		struct oz_port *port = &ozhcd->ports[i];
-
-		port->ozhcd = ozhcd;
-		port->flags = 0;
-		port->status = 0;
-		port->bus_addr = 0xff;
-		spin_lock_init(&port->port_lock);
-	}
-	err = usb_add_hcd(hcd, 0, 0);
-	if (err) {
-		oz_dbg(ON, "Failed to add hcd object OK\n");
-		usb_put_hcd(hcd);
-		return -1;
-	}
-	device_wakeup_enable(hcd->self.controller);
-
-	spin_lock_bh(&g_hcdlock);
-	g_ozhcd = ozhcd;
-	spin_unlock_bh(&g_hcdlock);
-	return 0;
-}
-
-/*
- * Context: unknown
- */
-static int oz_plat_remove(struct platform_device *dev)
-{
-	struct usb_hcd *hcd = platform_get_drvdata(dev);
-	struct oz_hcd *ozhcd;
-
-	if (hcd == NULL)
-		return -1;
-	ozhcd = oz_hcd_private(hcd);
-	spin_lock_bh(&g_hcdlock);
-	if (ozhcd == g_ozhcd)
-		g_ozhcd = NULL;
-	spin_unlock_bh(&g_hcdlock);
-	oz_dbg(ON, "Clearing orphanage\n");
-	oz_hcd_clear_orphanage(ozhcd, -EPIPE);
-	oz_dbg(ON, "Removing hcd\n");
-	usb_remove_hcd(hcd);
-	usb_put_hcd(hcd);
-	return 0;
-}
-
-/*
- * Context: unknown
- */
-static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg)
-{
-	return 0;
-}
-
-
-/*
- * Context: unknown
- */
-static int oz_plat_resume(struct platform_device *dev)
-{
-	return 0;
-}
-
-/*
- * Context: process
- */
-int oz_hcd_init(void)
-{
-	int err;
-
-	if (usb_disabled())
-		return -ENODEV;
-
-	oz_urb_link_cache = KMEM_CACHE(oz_urb_link, 0);
-	if (!oz_urb_link_cache)
-		return -ENOMEM;
-
-	tasklet_init(&g_urb_process_tasklet, oz_urb_process_tasklet, 0);
-	tasklet_init(&g_urb_cancel_tasklet, oz_urb_cancel_tasklet, 0);
-	err = platform_driver_register(&g_oz_plat_drv);
-	oz_dbg(ON, "platform_driver_register() returned %d\n", err);
-	if (err)
-		goto error;
-	g_plat_dev = platform_device_alloc(OZ_PLAT_DEV_NAME, -1);
-	if (g_plat_dev == NULL) {
-		err = -ENOMEM;
-		goto error1;
-	}
-	oz_dbg(ON, "platform_device_alloc() succeeded\n");
-	err = platform_device_add(g_plat_dev);
-	if (err)
-		goto error2;
-	oz_dbg(ON, "platform_device_add() succeeded\n");
-	return 0;
-error2:
-	platform_device_put(g_plat_dev);
-error1:
-	platform_driver_unregister(&g_oz_plat_drv);
-error:
-	tasklet_disable(&g_urb_process_tasklet);
-	tasklet_disable(&g_urb_cancel_tasklet);
-	oz_dbg(ON, "oz_hcd_init() failed %d\n", err);
-	return err;
-}
-
-/*
- * Context: process
- */
-void oz_hcd_term(void)
-{
-	msleep(OZ_HUB_DEBOUNCE_TIMEOUT);
-	tasklet_kill(&g_urb_process_tasklet);
-	tasklet_kill(&g_urb_cancel_tasklet);
-	platform_device_unregister(g_plat_dev);
-	platform_driver_unregister(&g_oz_plat_drv);
-	oz_dbg(ON, "Pending urbs:%d\n", atomic_read(&g_pending_urbs));
-	kmem_cache_destroy(oz_urb_link_cache);
-}
diff --git a/drivers/staging/ozwpan/ozhcd.h b/drivers/staging/ozwpan/ozhcd.h
deleted file mode 100644
index 55e97b1..0000000
--- a/drivers/staging/ozwpan/ozhcd.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * ---------------------------------------------------------------------------*/
-#ifndef _OZHCD_H
-#define _OZHCD_H
-
-int oz_hcd_init(void);
-void oz_hcd_term(void);
-struct oz_port *oz_hcd_pd_arrived(void *ctx);
-void oz_hcd_pd_departed(struct oz_port *hport);
-void oz_hcd_pd_reset(void *hpd, void *hport);
-
-#endif /* _OZHCD_H */
-
diff --git a/drivers/staging/ozwpan/ozmain.c b/drivers/staging/ozwpan/ozmain.c
deleted file mode 100644
index 74ef348..0000000
--- a/drivers/staging/ozwpan/ozmain.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-#include <linux/errno.h>
-#include <linux/ieee80211.h>
-#include "ozdbg.h"
-#include "ozpd.h"
-#include "ozproto.h"
-#include "ozcdev.h"
-
-unsigned int oz_dbg_mask = OZ_DEFAULT_DBG_MASK;
-
-/*
- * The name of the 802.11 mac device. Empty string is the default value but a
- * value can be supplied as a parameter to the module. An empty string means
- * bind to nothing. '*' means bind to all netcards - this includes non-802.11
- * netcards. Bindings can be added later using an IOCTL.
- */
-static char *g_net_dev = "";
-module_param(g_net_dev, charp, S_IRUGO);
-MODULE_PARM_DESC(g_net_dev, "The device(s) to bind to; "
-	"'*' means all, '' (empty string; default) means none.");
-
-/*
- * Context: process
- */
-static int __init ozwpan_init(void)
-{
-	int err;
-
-	err = oz_cdev_register();
-	if (err)
-		return err;
-	err = oz_protocol_init(g_net_dev);
-	if (err)
-		goto err_protocol;
-	oz_app_enable(OZ_APPID_USB, 1);
-	oz_apps_init();
-	return 0;
-
-err_protocol:
-	oz_cdev_deregister();
-	return err;
-}
-
-/*
- * Context: process
- */
-static void __exit ozwpan_exit(void)
-{
-	oz_protocol_term();
-	oz_apps_term();
-	oz_cdev_deregister();
-}
-
-module_init(ozwpan_init);
-module_exit(ozwpan_exit);
-
-MODULE_AUTHOR("Chris Kelly");
-MODULE_DESCRIPTION("Ozmo Devices USB over WiFi hcd driver");
-MODULE_VERSION("1.0.13");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
deleted file mode 100644
index 021d74a..0000000
--- a/drivers/staging/ozwpan/ozpd.c
+++ /dev/null
@@ -1,886 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/errno.h>
-#include "ozdbg.h"
-#include "ozprotocol.h"
-#include "ozeltbuf.h"
-#include "ozpd.h"
-#include "ozproto.h"
-#include "ozcdev.h"
-#include "ozusbsvc.h"
-#include <asm/unaligned.h>
-#include <linux/uaccess.h>
-#include <net/psnap.h>
-
-static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
-static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
-static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
-static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
-static int oz_send_isoc_frame(struct oz_pd *pd);
-static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
-static void oz_isoc_stream_free(struct oz_isoc_stream *st);
-static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
-static void oz_isoc_destructor(struct sk_buff *skb);
-
-/*
- * Counts the uncompleted isoc frames submitted to netcard.
- */
-static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
-
-/* Application handler functions.
- */
-static const struct oz_app_if g_app_if[OZ_NB_APPS] = {
-	[OZ_APPID_USB] = {
-		.init      = oz_usb_init,
-		.term      = oz_usb_term,
-		.start     = oz_usb_start,
-		.stop      = oz_usb_stop,
-		.rx        = oz_usb_rx,
-		.heartbeat = oz_usb_heartbeat,
-		.farewell  = oz_usb_farewell,
-	},
-	[OZ_APPID_SERIAL] = {
-		.init      = oz_cdev_init,
-		.term      = oz_cdev_term,
-		.start     = oz_cdev_start,
-		.stop      = oz_cdev_stop,
-		.rx        = oz_cdev_rx,
-	},
-};
-
-
-/*
- * Context: softirq or process
- */
-void oz_pd_set_state(struct oz_pd *pd, unsigned state)
-{
-	pd->state = state;
-	switch (state) {
-	case OZ_PD_S_IDLE:
-		oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n");
-		break;
-	case OZ_PD_S_CONNECTED:
-		oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n");
-		break;
-	case OZ_PD_S_STOPPED:
-		oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n");
-		break;
-	case OZ_PD_S_SLEEP:
-		oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n");
-		break;
-	}
-}
-
-/*
- * Context: softirq or process
- */
-void oz_pd_get(struct oz_pd *pd)
-{
-	atomic_inc(&pd->ref_count);
-}
-
-/*
- * Context: softirq or process
- */
-void oz_pd_put(struct oz_pd *pd)
-{
-	if (atomic_dec_and_test(&pd->ref_count))
-		oz_pd_destroy(pd);
-}
-
-/*
- * Context: softirq-serialized
- */
-struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
-{
-	struct oz_pd *pd;
-	int i;
-
-	pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
-	if (!pd)
-		return NULL;
-
-	atomic_set(&pd->ref_count, 2);
-	for (i = 0; i < OZ_NB_APPS; i++)
-		spin_lock_init(&pd->app_lock[i]);
-	pd->last_rx_pkt_num = 0xffffffff;
-	oz_pd_set_state(pd, OZ_PD_S_IDLE);
-	pd->max_tx_size = OZ_MAX_TX_SIZE;
-	ether_addr_copy(pd->mac_addr, mac_addr);
-	oz_elt_buf_init(&pd->elt_buff);
-	spin_lock_init(&pd->tx_frame_lock);
-	INIT_LIST_HEAD(&pd->tx_queue);
-	INIT_LIST_HEAD(&pd->farewell_list);
-	pd->last_sent_frame = &pd->tx_queue;
-	spin_lock_init(&pd->stream_lock);
-	INIT_LIST_HEAD(&pd->stream_list);
-	tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
-						(unsigned long)pd);
-	tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
-						(unsigned long)pd);
-	hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-	pd->heartbeat.function = oz_pd_heartbeat_event;
-	pd->timeout.function = oz_pd_timeout_event;
-
-	return pd;
-}
-
-/*
- * Context: softirq or process
- */
-static void oz_pd_free(struct work_struct *work)
-{
-	struct list_head *e, *n;
-	struct oz_pd *pd;
-
-	oz_pd_dbg(pd, ON, "Destroying PD\n");
-	pd = container_of(work, struct oz_pd, workitem);
-	/*Disable timer tasklets*/
-	tasklet_kill(&pd->heartbeat_tasklet);
-	tasklet_kill(&pd->timeout_tasklet);
-
-	/* Free streams, queued tx frames and farewells. */
-
-	list_for_each_safe(e, n, &pd->stream_list)
-		oz_isoc_stream_free(list_entry(e, struct oz_isoc_stream, link));
-
-	list_for_each_safe(e, n, &pd->tx_queue) {
-		struct oz_tx_frame *f = list_entry(e, struct oz_tx_frame, link);
-
-		if (f->skb != NULL)
-			kfree_skb(f->skb);
-		oz_retire_frame(pd, f);
-	}
-
-	oz_elt_buf_term(&pd->elt_buff);
-
-	list_for_each_safe(e, n, &pd->farewell_list)
-		kfree(list_entry(e, struct oz_farewell, link));
-
-	if (pd->net_dev)
-		dev_put(pd->net_dev);
-	kfree(pd);
-}
-
-/*
- * Context: softirq or Process
- */
-void oz_pd_destroy(struct oz_pd *pd)
-{
-	if (hrtimer_active(&pd->timeout))
-		hrtimer_cancel(&pd->timeout);
-	if (hrtimer_active(&pd->heartbeat))
-		hrtimer_cancel(&pd->heartbeat);
-
-	INIT_WORK(&pd->workitem, oz_pd_free);
-	if (!schedule_work(&pd->workitem))
-		oz_pd_dbg(pd, ON, "failed to schedule workitem\n");
-}
-
-/*
- * Context: softirq-serialized
- */
-int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
-{
-	int i, rc = 0;
-
-	oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume);
-	for (i = 0; i < OZ_NB_APPS; i++) {
-		if (g_app_if[i].start && (apps & (1 << i))) {
-			if (g_app_if[i].start(pd, resume)) {
-				rc = -1;
-				oz_pd_dbg(pd, ON,
-					  "Unable to start service %d\n", i);
-				break;
-			}
-			spin_lock_bh(&g_polling_lock);
-			pd->total_apps |= (1 << i);
-			if (resume)
-				pd->paused_apps &= ~(1 << i);
-			spin_unlock_bh(&g_polling_lock);
-		}
-	}
-	return rc;
-}
-
-/*
- * Context: softirq or process
- */
-void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
-{
-	int i;
-
-	oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
-	for (i = 0; i < OZ_NB_APPS; i++) {
-		if (g_app_if[i].stop && (apps & (1 << i))) {
-			spin_lock_bh(&g_polling_lock);
-			if (pause) {
-				pd->paused_apps |=  (1 << i);
-			} else {
-				pd->total_apps  &= ~(1 << i);
-				pd->paused_apps &= ~(1 << i);
-			}
-			spin_unlock_bh(&g_polling_lock);
-			g_app_if[i].stop(pd, pause);
-		}
-	}
-}
-
-/*
- * Context: softirq
- */
-void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
-{
-	int i, more = 0;
-
-	for (i = 0; i < OZ_NB_APPS; i++) {
-		if (g_app_if[i].heartbeat && (apps & (1 << i))) {
-			if (g_app_if[i].heartbeat(pd))
-				more = 1;
-		}
-	}
-	if ((!more) && (hrtimer_active(&pd->heartbeat)))
-		hrtimer_cancel(&pd->heartbeat);
-	if (pd->mode & OZ_F_ISOC_ANYTIME) {
-		int count = 8;
-
-		while (count-- && (oz_send_isoc_frame(pd) >= 0))
-			;
-	}
-}
-
-/*
- * Context: softirq or process
- */
-void oz_pd_stop(struct oz_pd *pd)
-{
-	u16 stop_apps;
-
-	oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
-	oz_pd_indicate_farewells(pd);
-	spin_lock_bh(&g_polling_lock);
-	stop_apps = pd->total_apps;
-	pd->total_apps = 0;
-	pd->paused_apps = 0;
-	spin_unlock_bh(&g_polling_lock);
-	oz_services_stop(pd, stop_apps, 0);
-	spin_lock_bh(&g_polling_lock);
-	oz_pd_set_state(pd, OZ_PD_S_STOPPED);
-	/* Remove from PD list.*/
-	list_del(&pd->link);
-	spin_unlock_bh(&g_polling_lock);
-	oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
-	oz_pd_put(pd);
-}
-
-/*
- * Context: softirq
- */
-int oz_pd_sleep(struct oz_pd *pd)
-{
-	int do_stop = 0;
-	u16 stop_apps;
-
-	spin_lock_bh(&g_polling_lock);
-	if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
-		spin_unlock_bh(&g_polling_lock);
-		return 0;
-	}
-	if (pd->keep_alive && pd->session_id)
-		oz_pd_set_state(pd, OZ_PD_S_SLEEP);
-	else
-		do_stop = 1;
-
-	stop_apps = pd->total_apps;
-	spin_unlock_bh(&g_polling_lock);
-	if (do_stop) {
-		oz_pd_stop(pd);
-	} else {
-		oz_services_stop(pd, stop_apps, 1);
-		oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
-	}
-	return do_stop;
-}
-
-/*
- * Context: softirq
- */
-static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
-{
-	struct oz_tx_frame *f;
-
-	f = kmem_cache_alloc(oz_tx_frame_cache, GFP_ATOMIC);
-	if (f) {
-		f->total_size = sizeof(struct oz_hdr);
-		INIT_LIST_HEAD(&f->link);
-		INIT_LIST_HEAD(&f->elt_list);
-	}
-	return f;
-}
-
-/*
- * Context: softirq or process
- */
-static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
-{
-	pd->nb_queued_isoc_frames--;
-	list_del_init(&f->link);
-
-	kmem_cache_free(oz_tx_frame_cache, f);
-
-	oz_dbg(TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
-	       pd->nb_queued_isoc_frames);
-}
-
-/*
- * Context: softirq or process
- */
-static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
-{
-	kmem_cache_free(oz_tx_frame_cache, f);
-}
-
-/*
- * Context: softirq-serialized
- */
-static void oz_set_more_bit(struct sk_buff *skb)
-{
-	struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
-
-	oz_hdr->control |= OZ_F_MORE_DATA;
-}
-
-/*
- * Context: softirq-serialized
- */
-static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
-{
-	struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
-
-	oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
-}
-
-/*
- * Context: softirq
- */
-int oz_prepare_frame(struct oz_pd *pd, int empty)
-{
-	struct oz_tx_frame *f;
-
-	if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
-		return -1;
-	if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
-		return -1;
-	if (!empty && !oz_are_elts_available(&pd->elt_buff))
-		return -1;
-	f = oz_tx_frame_alloc(pd);
-	if (f == NULL)
-		return -1;
-	f->skb = NULL;
-	f->hdr.control =
-		(OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
-	++pd->last_tx_pkt_num;
-	put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
-	if (empty == 0) {
-		oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
-			pd->max_tx_size, &f->elt_list);
-	}
-	spin_lock(&pd->tx_frame_lock);
-	list_add_tail(&f->link, &pd->tx_queue);
-	pd->nb_queued_frames++;
-	spin_unlock(&pd->tx_frame_lock);
-	return 0;
-}
-
-/*
- * Context: softirq-serialized
- */
-static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
-{
-	struct sk_buff *skb;
-	struct net_device *dev = pd->net_dev;
-	struct oz_hdr *oz_hdr;
-	struct oz_elt *elt;
-	struct oz_elt_info *ei;
-
-	/* Allocate skb with enough space for the lower layers as well
-	 * as the space we need.
-	 */
-	skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
-	if (skb == NULL)
-		return NULL;
-	/* Reserve the head room for lower layers.
-	 */
-	skb_reserve(skb, LL_RESERVED_SPACE(dev));
-	skb_reset_network_header(skb);
-	skb->dev = dev;
-	skb->protocol = htons(OZ_ETHERTYPE);
-	if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
-		dev->dev_addr, skb->len) < 0)
-		goto fail;
-	/* Push the tail to the end of the area we are going to copy to.
-	 */
-	oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
-	f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
-	memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
-	/* Copy the elements into the frame body.
-	 */
-	elt = (struct oz_elt *)(oz_hdr+1);
-	list_for_each_entry(ei, &f->elt_list, link) {
-		memcpy(elt, ei->data, ei->length);
-		elt = oz_next_elt(elt);
-	}
-	return skb;
-fail:
-	kfree_skb(skb);
-	return NULL;
-}
-
-/*
- * Context: softirq or process
- */
-static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
-{
-	struct oz_elt_info *ei, *n;
-
-	list_for_each_entry_safe(ei, n, &f->elt_list, link) {
-		list_del_init(&ei->link);
-		if (ei->callback)
-			ei->callback(pd, ei->context);
-		spin_lock_bh(&pd->elt_buff.lock);
-		oz_elt_info_free(&pd->elt_buff, ei);
-		spin_unlock_bh(&pd->elt_buff.lock);
-	}
-	oz_tx_frame_free(pd, f);
-}
-
-/*
- * Context: softirq-serialized
- */
-static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
-{
-	struct sk_buff *skb;
-	struct oz_tx_frame *f;
-	struct list_head *e;
-
-	spin_lock(&pd->tx_frame_lock);
-	e = pd->last_sent_frame->next;
-	if (e == &pd->tx_queue) {
-		spin_unlock(&pd->tx_frame_lock);
-		return -1;
-	}
-	f = list_entry(e, struct oz_tx_frame, link);
-
-	if (f->skb != NULL) {
-		skb = f->skb;
-		oz_tx_isoc_free(pd, f);
-		spin_unlock(&pd->tx_frame_lock);
-		if (more_data)
-			oz_set_more_bit(skb);
-		oz_set_last_pkt_nb(pd, skb);
-		if ((int)atomic_read(&g_submitted_isoc) <
-							OZ_MAX_SUBMITTED_ISOC) {
-			if (dev_queue_xmit(skb) < 0) {
-				oz_dbg(TX_FRAMES, "Dropping ISOC Frame\n");
-				return -1;
-			}
-			atomic_inc(&g_submitted_isoc);
-			oz_dbg(TX_FRAMES, "Sending ISOC Frame, nb_isoc= %d\n",
-			       pd->nb_queued_isoc_frames);
-			return 0;
-		}
-		kfree_skb(skb);
-		oz_dbg(TX_FRAMES, "Dropping ISOC Frame>\n");
-		return -1;
-	}
-
-	pd->last_sent_frame = e;
-	skb = oz_build_frame(pd, f);
-	spin_unlock(&pd->tx_frame_lock);
-	if (!skb)
-		return -1;
-	if (more_data)
-		oz_set_more_bit(skb);
-	oz_dbg(TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
-	if (dev_queue_xmit(skb) < 0)
-		return -1;
-
-	return 0;
-}
-
-/*
- * Context: softirq-serialized
- */
-void oz_send_queued_frames(struct oz_pd *pd, int backlog)
-{
-	while (oz_prepare_frame(pd, 0) >= 0)
-		backlog++;
-
-	switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
-
-		case OZ_F_ISOC_NO_ELTS: {
-			backlog += pd->nb_queued_isoc_frames;
-			if (backlog <= 0)
-				goto out;
-			if (backlog > OZ_MAX_SUBMITTED_ISOC)
-				backlog = OZ_MAX_SUBMITTED_ISOC;
-			break;
-		}
-		case OZ_NO_ELTS_ANYTIME: {
-			if ((backlog <= 0) && (pd->isoc_sent == 0))
-				goto out;
-			break;
-		}
-		default: {
-			if (backlog <= 0)
-				goto out;
-			break;
-		}
-	}
-	while (backlog--) {
-		if (oz_send_next_queued_frame(pd, backlog) < 0)
-			break;
-	}
-	return;
-
-out:	oz_prepare_frame(pd, 1);
-	oz_send_next_queued_frame(pd, 0);
-}
-
-/*
- * Context: softirq
- */
-static int oz_send_isoc_frame(struct oz_pd *pd)
-{
-	struct sk_buff *skb;
-	struct net_device *dev = pd->net_dev;
-	struct oz_hdr *oz_hdr;
-	struct oz_elt *elt;
-	struct oz_elt_info *ei;
-	LIST_HEAD(list);
-	int total_size = sizeof(struct oz_hdr);
-
-	oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
-		pd->max_tx_size, &list);
-	if (list_empty(&list))
-		return 0;
-	skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
-	if (skb == NULL) {
-		oz_dbg(ON, "Cannot alloc skb\n");
-		oz_elt_info_free_chain(&pd->elt_buff, &list);
-		return -1;
-	}
-	skb_reserve(skb, LL_RESERVED_SPACE(dev));
-	skb_reset_network_header(skb);
-	skb->dev = dev;
-	skb->protocol = htons(OZ_ETHERTYPE);
-	if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
-		dev->dev_addr, skb->len) < 0) {
-		kfree_skb(skb);
-		return -1;
-	}
-	oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
-	oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
-	oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
-	elt = (struct oz_elt *)(oz_hdr+1);
-
-	list_for_each_entry(ei, &list, link) {
-		memcpy(elt, ei->data, ei->length);
-		elt = oz_next_elt(elt);
-	}
-	dev_queue_xmit(skb);
-	oz_elt_info_free_chain(&pd->elt_buff, &list);
-	return 0;
-}
-
-/*
- * Context: softirq-serialized
- */
-void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
-{
-	struct oz_tx_frame *f, *tmp = NULL;
-	u8 diff;
-	u32 pkt_num;
-
-	LIST_HEAD(list);
-
-	spin_lock(&pd->tx_frame_lock);
-	list_for_each_entry(f, &pd->tx_queue, link) {
-		pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
-		diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
-		if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
-			break;
-		oz_dbg(TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
-		       pkt_num, pd->nb_queued_frames);
-		tmp = f;
-		pd->nb_queued_frames--;
-	}
-	if (tmp)
-		list_cut_position(&list, &pd->tx_queue, &tmp->link);
-	pd->last_sent_frame = &pd->tx_queue;
-	spin_unlock(&pd->tx_frame_lock);
-
-	list_for_each_entry_safe(f, tmp, &list, link)
-		oz_retire_frame(pd, f);
-}
-
-/*
- * Precondition: stream_lock must be held.
- * Context: softirq
- */
-static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
-{
-	struct oz_isoc_stream *st;
-
-	list_for_each_entry(st, &pd->stream_list, link) {
-		if (st->ep_num == ep_num)
-			return st;
-	}
-	return NULL;
-}
-
-/*
- * Context: softirq
- */
-int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
-{
-	struct oz_isoc_stream *st;
-
-	st = kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
-	if (!st)
-		return -ENOMEM;
-	st->ep_num = ep_num;
-	spin_lock_bh(&pd->stream_lock);
-	if (!pd_stream_find(pd, ep_num)) {
-		list_add(&st->link, &pd->stream_list);
-		st = NULL;
-	}
-	spin_unlock_bh(&pd->stream_lock);
-	kfree(st);
-	return 0;
-}
-
-/*
- * Context: softirq or process
- */
-static void oz_isoc_stream_free(struct oz_isoc_stream *st)
-{
-	kfree_skb(st->skb);
-	kfree(st);
-}
-
-/*
- * Context: softirq
- */
-int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
-{
-	struct oz_isoc_stream *st;
-
-	spin_lock_bh(&pd->stream_lock);
-	st = pd_stream_find(pd, ep_num);
-	if (st)
-		list_del(&st->link);
-	spin_unlock_bh(&pd->stream_lock);
-	if (st)
-		oz_isoc_stream_free(st);
-	return 0;
-}
-
-/*
- * Context: any
- */
-static void oz_isoc_destructor(struct sk_buff *skb)
-{
-	atomic_dec(&g_submitted_isoc);
-}
-
-/*
- * Context: softirq
- */
-int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
-{
-	struct net_device *dev = pd->net_dev;
-	struct oz_isoc_stream *st;
-	u8 nb_units = 0;
-	struct sk_buff *skb = NULL;
-	struct oz_hdr *oz_hdr = NULL;
-	int size = 0;
-
-	spin_lock_bh(&pd->stream_lock);
-	st = pd_stream_find(pd, ep_num);
-	if (st) {
-		skb = st->skb;
-		st->skb = NULL;
-		nb_units = st->nb_units;
-		st->nb_units = 0;
-		oz_hdr = st->oz_hdr;
-		size = st->size;
-	}
-	spin_unlock_bh(&pd->stream_lock);
-	if (!st)
-		return 0;
-	if (!skb) {
-		/* Allocate enough space for max size frame. */
-		skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
-				GFP_ATOMIC);
-		if (skb == NULL)
-			return 0;
-		/* Reserve the head room for lower layers. */
-		skb_reserve(skb, LL_RESERVED_SPACE(dev));
-		skb_reset_network_header(skb);
-		skb->dev = dev;
-		skb->protocol = htons(OZ_ETHERTYPE);
-		/* For audio packet set priority to AC_VO */
-		skb->priority = 0x7;
-		size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
-		oz_hdr = (struct oz_hdr *)skb_put(skb, size);
-	}
-	memcpy(skb_put(skb, len), data, len);
-	size += len;
-	if (++nb_units < pd->ms_per_isoc) {
-		spin_lock_bh(&pd->stream_lock);
-		st->skb = skb;
-		st->nb_units = nb_units;
-		st->oz_hdr = oz_hdr;
-		st->size = size;
-		spin_unlock_bh(&pd->stream_lock);
-	} else {
-		struct oz_hdr oz;
-		struct oz_isoc_large iso;
-
-		spin_lock_bh(&pd->stream_lock);
-		iso.frame_number = st->frame_num;
-		st->frame_num += nb_units;
-		spin_unlock_bh(&pd->stream_lock);
-		oz.control =
-			(OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
-		oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
-		oz.pkt_num = 0;
-		iso.endpoint = ep_num;
-		iso.format = OZ_DATA_F_ISOC_LARGE;
-		iso.ms_data = nb_units;
-		memcpy(oz_hdr, &oz, sizeof(oz));
-		memcpy(oz_hdr+1, &iso, sizeof(iso));
-		if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
-				dev->dev_addr, skb->len) < 0)
-			goto out;
-
-		skb->destructor = oz_isoc_destructor;
-		/*Queue for Xmit if mode is not ANYTIME*/
-		if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
-			struct oz_tx_frame *isoc_unit = NULL;
-			int nb = pd->nb_queued_isoc_frames;
-
-			if (nb >= pd->isoc_latency) {
-				struct oz_tx_frame *f;
-
-				oz_dbg(TX_FRAMES, "Dropping ISOC Unit nb= %d\n",
-				       nb);
-				spin_lock(&pd->tx_frame_lock);
-				list_for_each_entry(f, &pd->tx_queue, link) {
-					if (f->skb != NULL) {
-						oz_tx_isoc_free(pd, f);
-						break;
-					}
-				}
-				spin_unlock(&pd->tx_frame_lock);
-			}
-			isoc_unit = oz_tx_frame_alloc(pd);
-			if (isoc_unit == NULL)
-				goto out;
-			isoc_unit->hdr = oz;
-			isoc_unit->skb = skb;
-			spin_lock_bh(&pd->tx_frame_lock);
-			list_add_tail(&isoc_unit->link, &pd->tx_queue);
-			pd->nb_queued_isoc_frames++;
-			spin_unlock_bh(&pd->tx_frame_lock);
-			oz_dbg(TX_FRAMES,
-			       "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
-			       pd->nb_queued_isoc_frames, pd->nb_queued_frames);
-			return 0;
-		}
-
-		/*In ANYTIME mode Xmit unit immediately*/
-		if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
-			atomic_inc(&g_submitted_isoc);
-			if (dev_queue_xmit(skb) < 0)
-				return -1;
-			return 0;
-		}
-
-out:	kfree_skb(skb);
-	return -1;
-
-	}
-	return 0;
-}
-
-/*
- * Context: process
- */
-void oz_apps_init(void)
-{
-	int i;
-
-	for (i = 0; i < OZ_NB_APPS; i++) {
-		if (g_app_if[i].init)
-			g_app_if[i].init();
-	}
-}
-
-/*
- * Context: process
- */
-void oz_apps_term(void)
-{
-	int i;
-
-	/* Terminate all the apps. */
-	for (i = 0; i < OZ_NB_APPS; i++) {
-		if (g_app_if[i].term)
-			g_app_if[i].term();
-	}
-}
-
-/*
- * Context: softirq-serialized
- */
-void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
-{
-	if (app_id < OZ_NB_APPS && g_app_if[app_id].rx)
-		g_app_if[app_id].rx(pd, elt);
-}
-
-/*
- * Context: softirq or process
- */
-void oz_pd_indicate_farewells(struct oz_pd *pd)
-{
-	struct oz_farewell *f;
-	const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB];
-
-	while (1) {
-		spin_lock_bh(&g_polling_lock);
-		if (list_empty(&pd->farewell_list)) {
-			spin_unlock_bh(&g_polling_lock);
-			break;
-		}
-		f = list_first_entry(&pd->farewell_list,
-				struct oz_farewell, link);
-		list_del(&f->link);
-		spin_unlock_bh(&g_polling_lock);
-		if (ai->farewell)
-			ai->farewell(pd, f->ep_num, f->report, f->len);
-		kfree(f);
-	}
-}
diff --git a/drivers/staging/ozwpan/ozpd.h b/drivers/staging/ozwpan/ozpd.h
deleted file mode 100644
index 212fab0..0000000
--- a/drivers/staging/ozwpan/ozpd.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZPD_H_
-#define _OZPD_H_
-
-#include <linux/interrupt.h>
-#include "ozeltbuf.h"
-
-/* PD state
- */
-#define OZ_PD_S_IDLE		0x1
-#define OZ_PD_S_CONNECTED	0x2
-#define OZ_PD_S_SLEEP		0x4
-#define OZ_PD_S_STOPPED		0x8
-
-/* Timer event types.
- */
-#define OZ_TIMER_TOUT		1
-#define OZ_TIMER_HEARTBEAT	2
-#define OZ_TIMER_STOP		3
-
-/*
- *External spinlock variable
- */
-extern spinlock_t g_polling_lock;
-
-/* Data structure that hold information on a frame for transmisson. This is
- * built when the frame is first transmitted and is used to rebuild the frame
- * if a re-transmission is required.
- */
-struct oz_tx_frame {
-	struct list_head link;
-	struct list_head elt_list;
-	struct oz_hdr hdr;
-	struct sk_buff *skb;
-	int total_size;
-};
-
-struct oz_isoc_stream {
-	struct list_head link;
-	u8 ep_num;
-	u8 frame_num;
-	u8 nb_units;
-	int size;
-	struct sk_buff *skb;
-	struct oz_hdr *oz_hdr;
-};
-
-struct oz_farewell {
-	struct list_head link;
-	u8 ep_num;
-	u8 index;
-	u8 len;
-	u8 report[0];
-};
-
-/* Data structure that holds information on a specific peripheral device (PD).
- */
-struct oz_pd {
-	struct list_head link;
-	atomic_t	ref_count;
-	u8		mac_addr[ETH_ALEN];
-	unsigned	state;
-	unsigned	state_flags;
-	unsigned	send_flags;
-	u16		total_apps;
-	u16		paused_apps;
-	u8		session_id;
-	u8		param_rsp_status;
-	u8		pd_info;
-	u8		isoc_sent;
-	u32		last_rx_pkt_num;
-	u32		last_tx_pkt_num;
-	struct timespec last_rx_timestamp;
-	u32		trigger_pkt_num;
-	unsigned long	pulse_time;
-	unsigned long	pulse_period;
-	unsigned long	presleep;
-	unsigned long	keep_alive;
-	struct oz_elt_buf elt_buff;
-	void		*app_ctx[OZ_NB_APPS];
-	spinlock_t	app_lock[OZ_NB_APPS];
-	int		max_tx_size;
-	u8		mode;
-	u8		ms_per_isoc;
-	unsigned	isoc_latency;
-	unsigned	max_stream_buffering;
-	int		nb_queued_frames;
-	int		nb_queued_isoc_frames;
-	spinlock_t	tx_frame_lock;
-	struct list_head *last_sent_frame;
-	struct list_head tx_queue;
-	struct list_head farewell_list;
-	spinlock_t	stream_lock;
-	struct list_head stream_list;
-	struct net_device *net_dev;
-	struct hrtimer  heartbeat;
-	struct hrtimer  timeout;
-	u8      timeout_type;
-	struct tasklet_struct   heartbeat_tasklet;
-	struct tasklet_struct   timeout_tasklet;
-	struct work_struct workitem;
-};
-
-#define OZ_MAX_QUEUED_FRAMES	4
-
-struct oz_pd *oz_pd_alloc(const u8 *mac_addr);
-void oz_pd_destroy(struct oz_pd *pd);
-void oz_pd_get(struct oz_pd *pd);
-void oz_pd_put(struct oz_pd *pd);
-void oz_pd_set_state(struct oz_pd *pd, unsigned state);
-void oz_pd_indicate_farewells(struct oz_pd *pd);
-int oz_pd_sleep(struct oz_pd *pd);
-void oz_pd_stop(struct oz_pd *pd);
-void oz_pd_heartbeat(struct oz_pd *pd, u16 apps);
-int oz_services_start(struct oz_pd *pd, u16 apps, int resume);
-void oz_services_stop(struct oz_pd *pd, u16 apps, int pause);
-int oz_prepare_frame(struct oz_pd *pd, int empty);
-void oz_send_queued_frames(struct oz_pd *pd, int backlog);
-void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn);
-int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num);
-int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num);
-int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len);
-void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt);
-void oz_apps_init(void);
-void oz_apps_term(void);
-
-extern struct kmem_cache *oz_elt_info_cache;
-extern struct kmem_cache *oz_tx_frame_cache;
-
-#endif /* Sentry */
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
deleted file mode 100644
index 1ba24a2..0000000
--- a/drivers/staging/ozwpan/ozproto.c
+++ /dev/null
@@ -1,813 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/errno.h>
-#include <linux/ieee80211.h>
-#include <linux/slab.h>
-#include "ozdbg.h"
-#include "ozprotocol.h"
-#include "ozeltbuf.h"
-#include "ozpd.h"
-#include "ozproto.h"
-#include "ozusbsvc.h"
-
-#include "ozappif.h"
-#include <asm/unaligned.h>
-#include <linux/uaccess.h>
-#include <net/psnap.h>
-
-#define OZ_CF_CONN_SUCCESS	1
-#define OZ_CF_CONN_FAILURE	2
-
-#define OZ_DO_STOP		1
-#define OZ_DO_SLEEP		2
-
-struct oz_binding {
-	struct packet_type ptype;
-	char name[OZ_MAX_BINDING_LEN];
-	struct list_head link;
-};
-
-/*
- * External variable
- */
-
-DEFINE_SPINLOCK(g_polling_lock);
-/*
- * Static external variables.
- */
-static LIST_HEAD(g_pd_list);
-static LIST_HEAD(g_binding);
-static DEFINE_SPINLOCK(g_binding_lock);
-static struct sk_buff_head g_rx_queue;
-static u8 g_session_id;
-static u16 g_apps = 0x1;
-static int g_processing_rx;
-
-struct kmem_cache *oz_elt_info_cache;
-struct kmem_cache *oz_tx_frame_cache;
-
-/*
- * Context: softirq-serialized
- */
-static u8 oz_get_new_session_id(u8 exclude)
-{
-	if (++g_session_id == 0)
-		g_session_id = 1;
-	if (g_session_id == exclude) {
-		if (++g_session_id == 0)
-			g_session_id = 1;
-	}
-	return g_session_id;
-}
-
-/*
- * Context: softirq-serialized
- */
-static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
-{
-	struct sk_buff *skb;
-	struct net_device *dev = pd->net_dev;
-	struct oz_hdr *oz_hdr;
-	struct oz_elt *elt;
-	struct oz_elt_connect_rsp *body;
-
-	int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
-			sizeof(struct oz_elt_connect_rsp);
-	skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
-	if (skb == NULL)
-		return;
-	skb_reserve(skb, LL_RESERVED_SPACE(dev));
-	skb_reset_network_header(skb);
-	oz_hdr = (struct oz_hdr *)skb_put(skb, sz);
-	elt = (struct oz_elt *)(oz_hdr+1);
-	body = (struct oz_elt_connect_rsp *)(elt+1);
-	skb->dev = dev;
-	skb->protocol = htons(OZ_ETHERTYPE);
-	/* Fill in device header */
-	if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
-			dev->dev_addr, skb->len) < 0) {
-		kfree_skb(skb);
-		return;
-	}
-	oz_hdr->control = OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT;
-	oz_hdr->last_pkt_num = 0;
-	put_unaligned(0, &oz_hdr->pkt_num);
-	elt->type = OZ_ELT_CONNECT_RSP;
-	elt->length = sizeof(struct oz_elt_connect_rsp);
-	memset(body, 0, sizeof(struct oz_elt_connect_rsp));
-	body->status = status;
-	if (status == 0) {
-		body->mode = pd->mode;
-		body->session_id = pd->session_id;
-		put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
-	}
-	oz_dbg(ON, "TX: OZ_ELT_CONNECT_RSP %d", status);
-	dev_queue_xmit(skb);
-}
-
-/*
- * Context: softirq-serialized
- */
-static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
-{
-	unsigned long keep_alive = kalive & OZ_KALIVE_VALUE_MASK;
-
-	switch (kalive & OZ_KALIVE_TYPE_MASK) {
-	case OZ_KALIVE_SPECIAL:
-		pd->keep_alive = keep_alive * 1000*60*60*24*20;
-		break;
-	case OZ_KALIVE_SECS:
-		pd->keep_alive = keep_alive*1000;
-		break;
-	case OZ_KALIVE_MINS:
-		pd->keep_alive = keep_alive*1000*60;
-		break;
-	case OZ_KALIVE_HOURS:
-		pd->keep_alive = keep_alive*1000*60*60;
-		break;
-	default:
-		pd->keep_alive = 0;
-	}
-	oz_dbg(ON, "Keepalive = %lu mSec\n", pd->keep_alive);
-}
-
-/*
- * Context: softirq-serialized
- */
-static void pd_set_presleep(struct oz_pd *pd, u8 presleep, u8 start_timer)
-{
-	if (presleep)
-		pd->presleep = presleep*100;
-	else
-		pd->presleep = OZ_PRESLEEP_TOUT;
-	if (start_timer) {
-		spin_unlock(&g_polling_lock);
-		oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
-		spin_lock(&g_polling_lock);
-	}
-	oz_dbg(ON, "Presleep time = %lu mSec\n", pd->presleep);
-}
-
-/*
- * Context: softirq-serialized
- */
-static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
-			const u8 *pd_addr, struct net_device *net_dev)
-{
-	struct oz_pd *pd;
-	struct oz_elt_connect_req *body =
-			(struct oz_elt_connect_req *)(elt+1);
-	u8 rsp_status = OZ_STATUS_SUCCESS;
-	u8 stop_needed = 0;
-	u16 new_apps = g_apps;
-	struct net_device *old_net_dev = NULL;
-	struct oz_pd *free_pd = NULL;
-
-	if (cur_pd) {
-		pd = cur_pd;
-		spin_lock_bh(&g_polling_lock);
-	} else {
-		struct oz_pd *pd2 = NULL;
-		struct list_head *e;
-
-		pd = oz_pd_alloc(pd_addr);
-		if (pd == NULL)
-			return NULL;
-		getnstimeofday(&pd->last_rx_timestamp);
-		spin_lock_bh(&g_polling_lock);
-		list_for_each(e, &g_pd_list) {
-			pd2 = list_entry(e, struct oz_pd, link);
-			if (ether_addr_equal(pd2->mac_addr, pd_addr)) {
-				free_pd = pd;
-				pd = pd2;
-				break;
-			}
-		}
-		if (pd != pd2)
-			list_add_tail(&pd->link, &g_pd_list);
-	}
-	if (pd == NULL) {
-		spin_unlock_bh(&g_polling_lock);
-		return NULL;
-	}
-	if (pd->net_dev != net_dev) {
-		old_net_dev = pd->net_dev;
-		dev_hold(net_dev);
-		pd->net_dev = net_dev;
-	}
-	oz_dbg(ON, "Host vendor: %d\n", body->host_vendor);
-	pd->max_tx_size = OZ_MAX_TX_SIZE;
-	pd->mode = body->mode;
-	pd->pd_info = body->pd_info;
-	if (pd->mode & OZ_F_ISOC_NO_ELTS) {
-		pd->ms_per_isoc = body->ms_per_isoc;
-		if (!pd->ms_per_isoc)
-			pd->ms_per_isoc = 4;
-
-		switch (body->ms_isoc_latency & OZ_LATENCY_MASK) {
-		case OZ_ONE_MS_LATENCY:
-			pd->isoc_latency = (body->ms_isoc_latency &
-					~OZ_LATENCY_MASK) / pd->ms_per_isoc;
-			break;
-		case OZ_TEN_MS_LATENCY:
-			pd->isoc_latency = ((body->ms_isoc_latency &
-				~OZ_LATENCY_MASK) * 10) / pd->ms_per_isoc;
-			break;
-		default:
-			pd->isoc_latency = OZ_MAX_TX_QUEUE_ISOC;
-		}
-	}
-	if (body->max_len_div16)
-		pd->max_tx_size = ((u16)body->max_len_div16)<<4;
-	oz_dbg(ON, "Max frame:%u Ms per isoc:%u\n",
-	       pd->max_tx_size, pd->ms_per_isoc);
-	pd->max_stream_buffering = 3*1024;
-	pd->pulse_period = OZ_QUANTUM;
-	pd_set_presleep(pd, body->presleep, 0);
-	pd_set_keepalive(pd, body->keep_alive);
-
-	new_apps &= le16_to_cpu(get_unaligned(&body->apps));
-	if ((new_apps & 0x1) && (body->session_id)) {
-		if (pd->session_id) {
-			if (pd->session_id != body->session_id) {
-				rsp_status = OZ_STATUS_SESSION_MISMATCH;
-				goto done;
-			}
-		} else {
-			new_apps &= ~0x1;  /* Resume not permitted */
-			pd->session_id =
-				oz_get_new_session_id(body->session_id);
-		}
-	} else {
-		if (pd->session_id && !body->session_id) {
-			rsp_status = OZ_STATUS_SESSION_TEARDOWN;
-			stop_needed = 1;
-		} else {
-			new_apps &= ~0x1;  /* Resume not permitted */
-			pd->session_id =
-				oz_get_new_session_id(body->session_id);
-		}
-	}
-done:
-	if (rsp_status == OZ_STATUS_SUCCESS) {
-		u16 start_apps = new_apps & ~pd->total_apps & ~0x1;
-		u16 stop_apps = pd->total_apps & ~new_apps & ~0x1;
-		u16 resume_apps = new_apps & pd->paused_apps  & ~0x1;
-
-		spin_unlock_bh(&g_polling_lock);
-		oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
-		oz_dbg(ON, "new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
-		       new_apps, pd->total_apps, pd->paused_apps);
-		if (start_apps) {
-			if (oz_services_start(pd, start_apps, 0))
-				rsp_status = OZ_STATUS_TOO_MANY_PDS;
-		}
-		if (resume_apps)
-			if (oz_services_start(pd, resume_apps, 1))
-				rsp_status = OZ_STATUS_TOO_MANY_PDS;
-		if (stop_apps)
-			oz_services_stop(pd, stop_apps, 0);
-		oz_pd_request_heartbeat(pd);
-	} else {
-		spin_unlock_bh(&g_polling_lock);
-	}
-	oz_send_conn_rsp(pd, rsp_status);
-	if (rsp_status != OZ_STATUS_SUCCESS) {
-		if (stop_needed)
-			oz_pd_stop(pd);
-		oz_pd_put(pd);
-		pd = NULL;
-	}
-	if (old_net_dev)
-		dev_put(old_net_dev);
-	if (free_pd)
-		oz_pd_destroy(free_pd);
-	return pd;
-}
-
-/*
- * Context: softirq-serialized
- */
-static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
-			const u8 *report, u8 len)
-{
-	struct oz_farewell *f;
-	struct oz_farewell *f2;
-	int found = 0;
-
-	f = kmalloc(sizeof(struct oz_farewell) + len, GFP_ATOMIC);
-	if (!f)
-		return;
-	f->ep_num = ep_num;
-	f->index = index;
-	f->len = len;
-	memcpy(f->report, report, len);
-	oz_dbg(ON, "RX: Adding farewell report\n");
-	spin_lock(&g_polling_lock);
-	list_for_each_entry(f2, &pd->farewell_list, link) {
-		if ((f2->ep_num == ep_num) && (f2->index == index)) {
-			found = 1;
-			list_del(&f2->link);
-			break;
-		}
-	}
-	list_add_tail(&f->link, &pd->farewell_list);
-	spin_unlock(&g_polling_lock);
-	if (found)
-		kfree(f2);
-}
-
-/*
- * Context: softirq-serialized
- */
-static void oz_rx_frame(struct sk_buff *skb)
-{
-	u8 *mac_hdr;
-	u8 *src_addr;
-	struct oz_elt *elt;
-	int length;
-	struct oz_pd *pd = NULL;
-	struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
-	struct timespec current_time;
-	int dup = 0;
-	u32 pkt_num;
-
-	oz_dbg(RX_FRAMES, "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
-	       oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
-	mac_hdr = skb_mac_header(skb);
-	src_addr = &mac_hdr[ETH_ALEN];
-	length = skb->len;
-
-	/* Check the version field */
-	if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) {
-		oz_dbg(ON, "Incorrect protocol version: %d\n",
-		       oz_get_prot_ver(oz_hdr->control));
-		goto done;
-	}
-
-	pkt_num = le32_to_cpu(get_unaligned(&oz_hdr->pkt_num));
-
-	pd = oz_pd_find(src_addr);
-	if (pd) {
-		if (!(pd->state & OZ_PD_S_CONNECTED))
-			oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
-		getnstimeofday(&current_time);
-		if ((current_time.tv_sec != pd->last_rx_timestamp.tv_sec) ||
-			(pd->presleep < MSEC_PER_SEC))  {
-			oz_timer_add(pd, OZ_TIMER_TOUT,	pd->presleep);
-			pd->last_rx_timestamp = current_time;
-		}
-		if (pkt_num != pd->last_rx_pkt_num) {
-			pd->last_rx_pkt_num = pkt_num;
-		} else {
-			dup = 1;
-			oz_dbg(ON, "Duplicate frame\n");
-		}
-	}
-
-	if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
-		oz_dbg(RX_FRAMES, "Received TRIGGER Frame\n");
-		pd->last_sent_frame = &pd->tx_queue;
-		if (oz_hdr->control & OZ_F_ACK) {
-			/* Retire completed frames */
-			oz_retire_tx_frames(pd, oz_hdr->last_pkt_num);
-		}
-		if ((oz_hdr->control & OZ_F_ACK_REQUESTED) &&
-				(pd->state == OZ_PD_S_CONNECTED)) {
-			int backlog = pd->nb_queued_frames;
-
-			pd->trigger_pkt_num = pkt_num;
-			/* Send queued frames */
-			oz_send_queued_frames(pd, backlog);
-		}
-	}
-
-	length -= sizeof(struct oz_hdr);
-	elt = (struct oz_elt *)((u8 *)oz_hdr + sizeof(struct oz_hdr));
-
-	while (length >= sizeof(struct oz_elt)) {
-		length -= sizeof(struct oz_elt) + elt->length;
-		if (length < 0)
-			break;
-		switch (elt->type) {
-		case OZ_ELT_CONNECT_REQ:
-			oz_dbg(ON, "RX: OZ_ELT_CONNECT_REQ\n");
-			pd = oz_connect_req(pd, elt, src_addr, skb->dev);
-			break;
-		case OZ_ELT_DISCONNECT:
-			oz_dbg(ON, "RX: OZ_ELT_DISCONNECT\n");
-			if (pd)
-				oz_pd_sleep(pd);
-			break;
-		case OZ_ELT_UPDATE_PARAM_REQ: {
-				struct oz_elt_update_param *body =
-					(struct oz_elt_update_param *)(elt + 1);
-				oz_dbg(ON, "RX: OZ_ELT_UPDATE_PARAM_REQ\n");
-				if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
-					spin_lock(&g_polling_lock);
-					pd_set_keepalive(pd, body->keepalive);
-					pd_set_presleep(pd, body->presleep, 1);
-					spin_unlock(&g_polling_lock);
-				}
-			}
-			break;
-		case OZ_ELT_FAREWELL_REQ: {
-				struct oz_elt_farewell *body =
-					(struct oz_elt_farewell *)(elt + 1);
-				oz_dbg(ON, "RX: OZ_ELT_FAREWELL_REQ\n");
-				oz_add_farewell(pd, body->ep_num,
-					body->index, body->report,
-					elt->length + 1 - sizeof(*body));
-			}
-			break;
-		case OZ_ELT_APP_DATA:
-			if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
-				struct oz_app_hdr *app_hdr =
-					(struct oz_app_hdr *)(elt+1);
-				if (dup)
-					break;
-				oz_handle_app_elt(pd, app_hdr->app_id, elt);
-			}
-			break;
-		default:
-			oz_dbg(ON, "RX: Unknown elt %02x\n", elt->type);
-		}
-		elt = oz_next_elt(elt);
-	}
-done:
-	if (pd)
-		oz_pd_put(pd);
-	consume_skb(skb);
-}
-
-/*
- * Context: process
- */
-void oz_protocol_term(void)
-{
-	struct oz_binding *b, *t;
-
-	/* Walk the list of bindings and remove each one.
-	 */
-	spin_lock_bh(&g_binding_lock);
-	list_for_each_entry_safe(b, t, &g_binding, link) {
-		list_del(&b->link);
-		spin_unlock_bh(&g_binding_lock);
-		dev_remove_pack(&b->ptype);
-		if (b->ptype.dev)
-			dev_put(b->ptype.dev);
-		kfree(b);
-		spin_lock_bh(&g_binding_lock);
-	}
-	spin_unlock_bh(&g_binding_lock);
-	/* Walk the list of PDs and stop each one. This causes the PD to be
-	 * removed from the list so we can just pull each one from the head
-	 * of the list.
-	 */
-	spin_lock_bh(&g_polling_lock);
-	while (!list_empty(&g_pd_list)) {
-		struct oz_pd *pd =
-			list_first_entry(&g_pd_list, struct oz_pd, link);
-		oz_pd_get(pd);
-		spin_unlock_bh(&g_polling_lock);
-		oz_pd_stop(pd);
-		oz_pd_put(pd);
-		spin_lock_bh(&g_polling_lock);
-	}
-	spin_unlock_bh(&g_polling_lock);
-	oz_dbg(ON, "Protocol stopped\n");
-
-	kmem_cache_destroy(oz_tx_frame_cache);
-	kmem_cache_destroy(oz_elt_info_cache);
-}
-
-/*
- * Context: softirq
- */
-void oz_pd_heartbeat_handler(unsigned long data)
-{
-	struct oz_pd *pd = (struct oz_pd *)data;
-	u16 apps = 0;
-
-	spin_lock_bh(&g_polling_lock);
-	if (pd->state & OZ_PD_S_CONNECTED)
-		apps = pd->total_apps;
-	spin_unlock_bh(&g_polling_lock);
-	if (apps)
-		oz_pd_heartbeat(pd, apps);
-	oz_pd_put(pd);
-}
-
-/*
- * Context: softirq
- */
-void oz_pd_timeout_handler(unsigned long data)
-{
-	int type;
-	struct oz_pd *pd = (struct oz_pd *)data;
-
-	spin_lock_bh(&g_polling_lock);
-	type = pd->timeout_type;
-	spin_unlock_bh(&g_polling_lock);
-	switch (type) {
-	case OZ_TIMER_TOUT:
-		oz_pd_sleep(pd);
-		break;
-	case OZ_TIMER_STOP:
-		oz_pd_stop(pd);
-		break;
-	}
-	oz_pd_put(pd);
-}
-
-/*
- * Context: Interrupt
- */
-enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer)
-{
-	struct oz_pd *pd;
-
-	pd = container_of(timer, struct oz_pd, heartbeat);
-	hrtimer_forward_now(timer, ktime_set(pd->pulse_period /
-	MSEC_PER_SEC, (pd->pulse_period % MSEC_PER_SEC) * NSEC_PER_MSEC));
-	oz_pd_get(pd);
-	tasklet_schedule(&pd->heartbeat_tasklet);
-	return HRTIMER_RESTART;
-}
-
-/*
- * Context: Interrupt
- */
-enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer)
-{
-	struct oz_pd *pd;
-
-	pd = container_of(timer, struct oz_pd, timeout);
-	oz_pd_get(pd);
-	tasklet_schedule(&pd->timeout_tasklet);
-	return HRTIMER_NORESTART;
-}
-
-/*
- * Context: softirq or process
- */
-void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time)
-{
-	spin_lock_bh(&g_polling_lock);
-	switch (type) {
-	case OZ_TIMER_TOUT:
-	case OZ_TIMER_STOP:
-		if (hrtimer_active(&pd->timeout)) {
-			hrtimer_set_expires(&pd->timeout, ktime_set(due_time /
-			MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
-							NSEC_PER_MSEC));
-			hrtimer_start_expires(&pd->timeout, HRTIMER_MODE_REL);
-		} else {
-			hrtimer_start(&pd->timeout, ktime_set(due_time /
-			MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
-					NSEC_PER_MSEC), HRTIMER_MODE_REL);
-		}
-		pd->timeout_type = type;
-		break;
-	case OZ_TIMER_HEARTBEAT:
-		if (!hrtimer_active(&pd->heartbeat))
-			hrtimer_start(&pd->heartbeat, ktime_set(due_time /
-			MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
-					NSEC_PER_MSEC), HRTIMER_MODE_REL);
-		break;
-	}
-	spin_unlock_bh(&g_polling_lock);
-}
-
-/*
- * Context: softirq or process
- */
-void oz_pd_request_heartbeat(struct oz_pd *pd)
-{
-	oz_timer_add(pd, OZ_TIMER_HEARTBEAT, pd->pulse_period > 0 ?
-					pd->pulse_period : OZ_QUANTUM);
-}
-
-/*
- * Context: softirq or process
- */
-struct oz_pd *oz_pd_find(const u8 *mac_addr)
-{
-	struct oz_pd *pd;
-
-	spin_lock_bh(&g_polling_lock);
-	list_for_each_entry(pd, &g_pd_list, link) {
-		if (ether_addr_equal(pd->mac_addr, mac_addr)) {
-			oz_pd_get(pd);
-			spin_unlock_bh(&g_polling_lock);
-			return pd;
-		}
-	}
-	spin_unlock_bh(&g_polling_lock);
-	return NULL;
-}
-
-/*
- * Context: process
- */
-void oz_app_enable(int app_id, int enable)
-{
-	if (app_id < OZ_NB_APPS) {
-		spin_lock_bh(&g_polling_lock);
-		if (enable)
-			g_apps |= (1<<app_id);
-		else
-			g_apps &= ~(1<<app_id);
-		spin_unlock_bh(&g_polling_lock);
-	}
-}
-
-/*
- * Context: softirq
- */
-static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
-		struct packet_type *pt, struct net_device *orig_dev)
-{
-	skb = skb_share_check(skb, GFP_ATOMIC);
-	if (skb == NULL)
-		return 0;
-	spin_lock_bh(&g_rx_queue.lock);
-	if (g_processing_rx) {
-		/* We already hold the lock so use __ variant.
-		 */
-		__skb_queue_head(&g_rx_queue, skb);
-		spin_unlock_bh(&g_rx_queue.lock);
-	} else {
-		g_processing_rx = 1;
-		do {
-
-			spin_unlock_bh(&g_rx_queue.lock);
-			oz_rx_frame(skb);
-			spin_lock_bh(&g_rx_queue.lock);
-			if (skb_queue_empty(&g_rx_queue)) {
-				g_processing_rx = 0;
-				spin_unlock_bh(&g_rx_queue.lock);
-				break;
-			}
-			/* We already hold the lock so use __ variant.
-			 */
-			skb = __skb_dequeue(&g_rx_queue);
-		} while (1);
-	}
-	return 0;
-}
-
-/*
- * Context: process
- */
-void oz_binding_add(const char *net_dev)
-{
-	struct oz_binding *binding;
-
-	binding = kzalloc(sizeof(struct oz_binding), GFP_KERNEL);
-	if (!binding)
-		return;
-
-	binding->ptype.type = htons(OZ_ETHERTYPE);
-	binding->ptype.func = oz_pkt_recv;
-	if (net_dev && *net_dev) {
-		memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
-		oz_dbg(ON, "Adding binding: %s\n", net_dev);
-		binding->ptype.dev = dev_get_by_name(&init_net, net_dev);
-		if (binding->ptype.dev == NULL) {
-			oz_dbg(ON, "Netdev %s not found\n", net_dev);
-			kfree(binding);
-			return;
-		}
-	}
-	dev_add_pack(&binding->ptype);
-	spin_lock_bh(&g_binding_lock);
-	list_add_tail(&binding->link, &g_binding);
-	spin_unlock_bh(&g_binding_lock);
-}
-
-/*
- * Context: process
- */
-static void pd_stop_all_for_device(struct net_device *net_dev)
-{
-	LIST_HEAD(h);
-	struct oz_pd *pd;
-	struct oz_pd *n;
-
-	spin_lock_bh(&g_polling_lock);
-	list_for_each_entry_safe(pd, n, &g_pd_list, link) {
-		if (pd->net_dev == net_dev) {
-			list_move(&pd->link, &h);
-			oz_pd_get(pd);
-		}
-	}
-	spin_unlock_bh(&g_polling_lock);
-	while (!list_empty(&h)) {
-		pd = list_first_entry(&h, struct oz_pd, link);
-		oz_pd_stop(pd);
-		oz_pd_put(pd);
-	}
-}
-
-/*
- * Context: process
- */
-void oz_binding_remove(const char *net_dev)
-{
-	struct oz_binding *binding;
-	int found = 0;
-
-	oz_dbg(ON, "Removing binding: %s\n", net_dev);
-	spin_lock_bh(&g_binding_lock);
-	list_for_each_entry(binding, &g_binding, link) {
-		if (strncmp(binding->name, net_dev, OZ_MAX_BINDING_LEN) == 0) {
-			oz_dbg(ON, "Binding '%s' found\n", net_dev);
-			found = 1;
-			break;
-		}
-	}
-	spin_unlock_bh(&g_binding_lock);
-	if (found) {
-		dev_remove_pack(&binding->ptype);
-		if (binding->ptype.dev) {
-			dev_put(binding->ptype.dev);
-			pd_stop_all_for_device(binding->ptype.dev);
-		}
-		list_del(&binding->link);
-		kfree(binding);
-	}
-}
-
-/*
- * Context: process
- */
-static char *oz_get_next_device_name(char *s, char *dname, int max_size)
-{
-	while (*s == ',')
-		s++;
-	while (*s && (*s != ',') && max_size > 1) {
-		*dname++ = *s++;
-		max_size--;
-	}
-	*dname = 0;
-	return s;
-}
-
-/*
- * Context: process
- */
-int oz_protocol_init(char *devs)
-{
-	oz_elt_info_cache = KMEM_CACHE(oz_elt_info, 0);
-	if (!oz_elt_info_cache)
-		return -ENOMEM;
-
-	oz_tx_frame_cache = KMEM_CACHE(oz_tx_frame, 0);
-	if (!oz_tx_frame_cache) {
-		kmem_cache_destroy(oz_elt_info_cache);
-		return -ENOMEM;
-	}
-
-	skb_queue_head_init(&g_rx_queue);
-	if (devs[0] == '*') {
-		oz_binding_add(NULL);
-	} else {
-		char d[32];
-
-		while (*devs) {
-			devs = oz_get_next_device_name(devs, d, sizeof(d));
-			if (d[0])
-				oz_binding_add(d);
-		}
-	}
-	return 0;
-}
-
-/*
- * Context: process
- */
-int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
-{
-	struct oz_pd *pd;
-	int count = 0;
-
-	spin_lock_bh(&g_polling_lock);
-	list_for_each_entry(pd, &g_pd_list, link) {
-		if (count >= max_count)
-			break;
-		ether_addr_copy((u8 *)&addr[count++], pd->mac_addr);
-	}
-	spin_unlock_bh(&g_polling_lock);
-	return count;
-}
-
diff --git a/drivers/staging/ozwpan/ozproto.h b/drivers/staging/ozwpan/ozproto.h
deleted file mode 100644
index 30c2db9..0000000
--- a/drivers/staging/ozwpan/ozproto.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZPROTO_H
-#define _OZPROTO_H
-
-#include <asm/byteorder.h>
-#include "ozdbg.h"
-#include "ozappif.h"
-
-#define OZ_ALLOCATED_SPACE(__x)	(LL_RESERVED_SPACE(__x)+(__x)->needed_tailroom)
-
-/* Quantum in MS */
-#define OZ_QUANTUM		8
-/* Default timeouts.
- */
-#define OZ_PRESLEEP_TOUT	11
-
-/* Maximun sizes of tx frames. */
-#define OZ_MAX_TX_SIZE		760
-
-/* Maximum number of uncompleted isoc frames that can be pending in network. */
-#define OZ_MAX_SUBMITTED_ISOC	16
-
-/* Maximum number of uncompleted isoc frames that can be pending in Tx Queue. */
-#define OZ_MAX_TX_QUEUE_ISOC	32
-
-/* Application handler functions.
- */
-struct oz_app_if {
-	int  (*init)(void);
-	void (*term)(void);
-	int  (*start)(struct oz_pd *pd, int resume);
-	void (*stop)(struct oz_pd *pd, int pause);
-	void (*rx)(struct oz_pd *pd, struct oz_elt *elt);
-	int  (*heartbeat)(struct oz_pd *pd);
-	void (*farewell)(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len);
-};
-
-int oz_protocol_init(char *devs);
-void oz_protocol_term(void);
-int oz_get_pd_list(struct oz_mac_addr *addr, int max_count);
-void oz_app_enable(int app_id, int enable);
-struct oz_pd *oz_pd_find(const u8 *mac_addr);
-void oz_binding_add(const char *net_dev);
-void oz_binding_remove(const char *net_dev);
-void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time);
-void oz_timer_delete(struct oz_pd *pd, int type);
-void oz_pd_request_heartbeat(struct oz_pd *pd);
-void oz_pd_heartbeat_handler(unsigned long data);
-void oz_pd_timeout_handler(unsigned long data);
-enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer);
-enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer);
-int oz_get_pd_status_list(char *pd_list, int max_count);
-int oz_get_binding_list(char *buf, int max_if);
-
-extern struct kmem_cache *oz_elt_info_cache;
-extern struct kmem_cache *oz_tx_frame_cache;
-
-#endif /* _OZPROTO_H */
diff --git a/drivers/staging/ozwpan/ozprotocol.h b/drivers/staging/ozwpan/ozprotocol.h
deleted file mode 100644
index 4642072..0000000
--- a/drivers/staging/ozwpan/ozprotocol.h
+++ /dev/null
@@ -1,375 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZPROTOCOL_H
-#define _OZPROTOCOL_H
-
-#define PACKED __packed
-
-#define OZ_ETHERTYPE 0x892e
-
-/* Status codes
- */
-#define OZ_STATUS_SUCCESS		0
-#define OZ_STATUS_INVALID_PARAM		1
-#define OZ_STATUS_TOO_MANY_PDS		2
-#define OZ_STATUS_NOT_ALLOWED		4
-#define OZ_STATUS_SESSION_MISMATCH	5
-#define OZ_STATUS_SESSION_TEARDOWN	6
-
-/* This is the generic element header.
-   Every element starts with this.
- */
-struct oz_elt {
-	u8 type;
-	u8 length;
-} PACKED;
-
-#define oz_next_elt(__elt)	\
-	(struct oz_elt *)((u8 *)((__elt) + 1) + (__elt)->length)
-
-/* Protocol element IDs.
- */
-#define OZ_ELT_CONNECT_REQ	0x06
-#define OZ_ELT_CONNECT_RSP	0x07
-#define OZ_ELT_DISCONNECT	0x08
-#define OZ_ELT_UPDATE_PARAM_REQ	0x11
-#define OZ_ELT_FAREWELL_REQ	0x12
-#define OZ_ELT_APP_DATA		0x31
-
-/* This is the Ozmo header which is the first Ozmo specific part
- * of a frame and comes after the MAC header.
- */
-struct oz_hdr {
-	u8	control;
-	u8	last_pkt_num;
-	u32	pkt_num;
-} PACKED;
-
-#define OZ_PROTOCOL_VERSION	0x1
-/* Bits in the control field. */
-#define OZ_VERSION_MASK		0xc
-#define OZ_VERSION_SHIFT	2
-#define OZ_F_ACK		0x10
-#define OZ_F_ISOC		0x20
-#define OZ_F_MORE_DATA		0x40
-#define OZ_F_ACK_REQUESTED	0x80
-
-#define oz_get_prot_ver(__x)	(((__x) & OZ_VERSION_MASK) >> OZ_VERSION_SHIFT)
-
-/* Used to select the bits of packet number to put in the last_pkt_num.
- */
-#define OZ_LAST_PN_MASK		0x00ff
-
-#define OZ_LAST_PN_HALF_CYCLE	127
-
-#define OZ_LATENCY_MASK		0xc0
-#define OZ_ONE_MS_LATENCY	0x40
-#define OZ_TEN_MS_LATENCY	0x80
-
-/* Connect request data structure.
- */
-struct oz_elt_connect_req {
-	u8	mode;
-	u8	resv1[16];
-	u8	pd_info;
-	u8	session_id;
-	u8	presleep;
-	u8	ms_isoc_latency;
-	u8	host_vendor;
-	u8	keep_alive;
-	u16	apps;
-	u8	max_len_div16;
-	u8	ms_per_isoc;
-	u8	resv3[2];
-} PACKED;
-
-/* mode field bits.
- */
-#define OZ_MODE_POLLED		0x0
-#define OZ_MODE_TRIGGERED	0x1
-#define OZ_MODE_MASK		0xf
-#define OZ_F_ISOC_NO_ELTS	0x40
-#define OZ_F_ISOC_ANYTIME	0x80
-#define OZ_NO_ELTS_ANYTIME	0xc0
-
-/* Keep alive field.
- */
-#define OZ_KALIVE_TYPE_MASK	0xc0
-#define OZ_KALIVE_VALUE_MASK	0x3f
-#define OZ_KALIVE_SPECIAL	0x00
-#define OZ_KALIVE_SECS		0x40
-#define OZ_KALIVE_MINS		0x80
-#define OZ_KALIVE_HOURS		0xc0
-
-/* Connect response data structure.
- */
-struct oz_elt_connect_rsp {
-	u8	mode;
-	u8	status;
-	u8	resv1[3];
-	u8	session_id;
-	u16	apps;
-	u32	resv2;
-} PACKED;
-
-struct oz_elt_farewell {
-	u8	ep_num;
-	u8	index;
-	u8	report[1];
-} PACKED;
-
-struct oz_elt_update_param {
-	u8	resv1[16];
-	u8	presleep;
-	u8	resv2;
-	u8	host_vendor;
-	u8	keepalive;
-} PACKED;
-
-/* Header common to all application elements.
- */
-struct oz_app_hdr {
-	u8	app_id;
-	u8	elt_seq_num;
-} PACKED;
-
-/* Values for app_id.
- */
-#define OZ_APPID_USB				0x1
-#define OZ_APPID_SERIAL				0x4
-#define OZ_APPID_MAX				OZ_APPID_SERIAL
-#define OZ_NB_APPS				(OZ_APPID_MAX+1)
-
-/* USB header common to all elements for the  USB application.
- * This header extends the oz_app_hdr and comes directly after
- * the element header in a USB application.
- */
-struct oz_usb_hdr {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-} PACKED;
-
-
-
-/* USB requests element subtypes (type field of hs_usb_hdr).
- */
-#define OZ_GET_DESC_REQ			1
-#define OZ_GET_DESC_RSP			2
-#define OZ_SET_CONFIG_REQ		3
-#define OZ_SET_CONFIG_RSP		4
-#define OZ_SET_INTERFACE_REQ		5
-#define OZ_SET_INTERFACE_RSP		6
-#define OZ_VENDOR_CLASS_REQ		7
-#define OZ_VENDOR_CLASS_RSP		8
-#define OZ_GET_STATUS_REQ		9
-#define OZ_GET_STATUS_RSP		10
-#define OZ_CLEAR_FEATURE_REQ		11
-#define OZ_CLEAR_FEATURE_RSP		12
-#define OZ_SET_FEATURE_REQ		13
-#define OZ_SET_FEATURE_RSP		14
-#define OZ_GET_CONFIGURATION_REQ	15
-#define OZ_GET_CONFIGURATION_RSP	16
-#define OZ_GET_INTERFACE_REQ		17
-#define OZ_GET_INTERFACE_RSP		18
-#define OZ_SYNCH_FRAME_REQ		19
-#define OZ_SYNCH_FRAME_RSP		20
-#define OZ_USB_ENDPOINT_DATA		23
-
-#define OZ_REQD_D2H			0x80
-
-struct oz_get_desc_req {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	req_id;
-	u16	offset;
-	u16	size;
-	u8	req_type;
-	u8	desc_type;
-	__le16	w_index;
-	u8	index;
-} PACKED;
-
-/* Values for desc_type field.
-*/
-#define OZ_DESC_DEVICE			0x01
-#define OZ_DESC_CONFIG			0x02
-#define OZ_DESC_STRING			0x03
-
-/* Values for req_type field.
- */
-#define OZ_RECP_MASK			0x1F
-#define OZ_RECP_DEVICE			0x00
-#define OZ_RECP_INTERFACE		0x01
-#define OZ_RECP_ENDPOINT		0x02
-
-#define OZ_REQT_MASK			0x60
-#define OZ_REQT_STD			0x00
-#define OZ_REQT_CLASS			0x20
-#define OZ_REQT_VENDOR			0x40
-
-struct oz_get_desc_rsp {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	req_id;
-	__le16	offset;
-	__le16	total_size;
-	u8	rcode;
-	u8	data[1];
-} PACKED;
-
-struct oz_feature_req {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	req_id;
-	u8	recipient;
-	u8	index;
-	u16	feature;
-} PACKED;
-
-struct oz_feature_rsp {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	req_id;
-	u8	rcode;
-} PACKED;
-
-struct oz_set_config_req {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	req_id;
-	u8	index;
-} PACKED;
-
-struct oz_set_config_rsp {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	req_id;
-	u8	rcode;
-} PACKED;
-
-struct oz_set_interface_req {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	req_id;
-	u8	index;
-	u8	alternative;
-} PACKED;
-
-struct oz_set_interface_rsp {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	req_id;
-	u8	rcode;
-} PACKED;
-
-struct oz_get_interface_req {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	req_id;
-	u8	index;
-} PACKED;
-
-struct oz_get_interface_rsp {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	req_id;
-	u8	rcode;
-	u8	alternative;
-} PACKED;
-
-struct oz_vendor_class_req {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	req_id;
-	u8	req_type;
-	u8	request;
-	u16	value;
-	u16	index;
-	u8	data[1];
-} PACKED;
-
-struct oz_vendor_class_rsp {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	req_id;
-	u8	rcode;
-	u8	data[1];
-} PACKED;
-
-struct oz_data {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	endpoint;
-	u8	format;
-} PACKED;
-
-struct oz_isoc_fixed {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	endpoint;
-	u8	format;
-	u8	unit_size;
-	u8	frame_number;
-	u8	data[1];
-} PACKED;
-
-struct oz_multiple_fixed {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	endpoint;
-	u8	format;
-	u8	unit_size;
-	u8	data[1];
-} PACKED;
-
-struct oz_fragmented {
-	u8	app_id;
-	u8	elt_seq_num;
-	u8	type;
-	u8	endpoint;
-	u8	format;
-	u16	total_size;
-	u16	offset;
-	u8	data[1];
-} PACKED;
-
-/* Note: the following does not get packaged in an element in the same way
- * that other data formats are packaged. Instead the data is put in a frame
- * directly after the oz_header and is the only permitted data in such a
- * frame. The length of the data is directly determined from the frame size.
- */
-struct oz_isoc_large {
-	u8	endpoint;
-	u8	format;
-	u8	ms_data;
-	u8	frame_number;
-} PACKED;
-
-#define OZ_DATA_F_TYPE_MASK		0xF
-#define OZ_DATA_F_MULTIPLE_FIXED	0x1
-#define OZ_DATA_F_MULTIPLE_VAR		0x2
-#define OZ_DATA_F_ISOC_FIXED		0x3
-#define OZ_DATA_F_ISOC_VAR		0x4
-#define OZ_DATA_F_FRAGMENTED		0x5
-#define OZ_DATA_F_ISOC_LARGE		0x7
-
-#endif /* _OZPROTOCOL_H */
diff --git a/drivers/staging/ozwpan/ozurbparanoia.c b/drivers/staging/ozwpan/ozurbparanoia.c
deleted file mode 100644
index cf6278a..0000000
--- a/drivers/staging/ozwpan/ozurbparanoia.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#include <linux/usb.h>
-#include "ozdbg.h"
-
-#ifdef WANT_URB_PARANOIA
-
-#include "ozurbparanoia.h"
-
-#define OZ_MAX_URBS	1000
-struct urb *g_urb_memory[OZ_MAX_URBS];
-int g_nb_urbs;
-DEFINE_SPINLOCK(g_urb_mem_lock);
-
-void oz_remember_urb(struct urb *urb)
-{
-	unsigned long irq_state;
-
-	spin_lock_irqsave(&g_urb_mem_lock, irq_state);
-	if (g_nb_urbs < OZ_MAX_URBS) {
-		g_urb_memory[g_nb_urbs++] = urb;
-		oz_dbg(ON, "urb up = %d %p\n", g_nb_urbs, urb);
-	} else {
-		oz_dbg(ON, "ERROR urb buffer full\n");
-	}
-	spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
-}
-
-/*
- */
-int oz_forget_urb(struct urb *urb)
-{
-	unsigned long irq_state;
-	int i;
-	int rc = -1;
-
-	spin_lock_irqsave(&g_urb_mem_lock, irq_state);
-	for (i = 0; i < g_nb_urbs; i++) {
-		if (g_urb_memory[i] == urb) {
-			rc = 0;
-			if (--g_nb_urbs > i)
-				memcpy(&g_urb_memory[i], &g_urb_memory[i+1],
-					(g_nb_urbs - i) * sizeof(struct urb *));
-			oz_dbg(ON, "urb down = %d %p\n", g_nb_urbs, urb);
-		}
-	}
-	spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
-	return rc;
-}
-#endif /* #ifdef WANT_URB_PARANOIA */
-
diff --git a/drivers/staging/ozwpan/ozurbparanoia.h b/drivers/staging/ozwpan/ozurbparanoia.h
deleted file mode 100644
index 5080ea7..0000000
--- a/drivers/staging/ozwpan/ozurbparanoia.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _OZURBPARANOIA_H
-#define _OZURBPARANOIA_H
-/* -----------------------------------------------------------------------------
- * Released under the GNU General Public License Version 2 (GPLv2).
- * Copyright (c) 2011 Ozmo Inc
- * -----------------------------------------------------------------------------
- */
-
-#ifdef WANT_URB_PARANOIA
-void oz_remember_urb(struct urb *urb);
-int oz_forget_urb(struct urb *urb);
-#else
-static inline void oz_remember_urb(struct urb *urb) {}
-static inline int oz_forget_urb(struct urb *urb) { return 0; }
-#endif /* WANT_URB_PARANOIA */
-
-
-#endif /* _OZURBPARANOIA_H */
-
diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
deleted file mode 100644
index d2a6085..0000000
--- a/drivers/staging/ozwpan/ozusbif.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZUSBIF_H
-#define _OZUSBIF_H
-
-#include <linux/usb.h>
-
-/* Reference counting functions.
- */
-void oz_usb_get(void *hpd);
-void oz_usb_put(void *hpd);
-
-/* Stream functions.
- */
-int oz_usb_stream_create(void *hpd, u8 ep_num);
-int oz_usb_stream_delete(void *hpd, u8 ep_num);
-
-/* Request functions.
- */
-int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
-		const u8 *data, int data_len);
-int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
-	u8 index, __le16 windex, int offset, int len);
-int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb);
-void oz_usb_request_heartbeat(void *hpd);
-
-/* Confirmation functions.
- */
-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status,
-	const u8 *desc, u8 length, u16 offset, u16 total_size);
-void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
-	const u8 *data, int data_len);
-
-/* Indication functions.
- */
-void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len);
-
-int oz_hcd_heartbeat(void *hport);
-
-#endif /* _OZUSBIF_H */
diff --git a/drivers/staging/ozwpan/ozusbsvc.c b/drivers/staging/ozwpan/ozusbsvc.c
deleted file mode 100644
index bf15dc3..0000000
--- a/drivers/staging/ozwpan/ozusbsvc.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- *
- * This file provides protocol independent part of the implementation of the USB
- * service for a PD.
- * The implementation of this service is split into two parts the first of which
- * is protocol independent and the second contains protocol specific details.
- * This split is to allow alternative protocols to be defined.
- * The implementation of this service uses ozhcd.c to implement a USB HCD.
- * -----------------------------------------------------------------------------
- */
-
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-#include <linux/errno.h>
-#include <linux/input.h>
-#include <asm/unaligned.h>
-#include "ozdbg.h"
-#include "ozprotocol.h"
-#include "ozeltbuf.h"
-#include "ozpd.h"
-#include "ozproto.h"
-#include "ozusbif.h"
-#include "ozhcd.h"
-#include "ozusbsvc.h"
-
-/*
- * This is called once when the driver is loaded to initialise the USB service.
- * Context: process
- */
-int oz_usb_init(void)
-{
-	return oz_hcd_init();
-}
-
-/*
- * This is called once when the driver is unloaded to terminate the USB service.
- * Context: process
- */
-void oz_usb_term(void)
-{
-	oz_hcd_term();
-}
-
-/*
- * This is called when the USB service is started or resumed for a PD.
- * Context: softirq
- */
-int oz_usb_start(struct oz_pd *pd, int resume)
-{
-	int rc = 0;
-	struct oz_usb_ctx *usb_ctx;
-	struct oz_usb_ctx *old_ctx;
-
-	if (resume) {
-		oz_dbg(ON, "USB service resumed\n");
-		return 0;
-	}
-	oz_dbg(ON, "USB service started\n");
-	/* Create a USB context in case we need one. If we find the PD already
-	 * has a USB context then we will destroy it.
-	 */
-	usb_ctx = kzalloc(sizeof(struct oz_usb_ctx), GFP_ATOMIC);
-	if (usb_ctx == NULL)
-		return -ENOMEM;
-	atomic_set(&usb_ctx->ref_count, 1);
-	usb_ctx->pd = pd;
-	usb_ctx->stopped = 0;
-	/* Install the USB context if the PD doesn't already have one.
-	 * If it does already have one then destroy the one we have just
-	 * created.
-	 */
-	spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
-	old_ctx = pd->app_ctx[OZ_APPID_USB];
-	if (old_ctx == NULL)
-		pd->app_ctx[OZ_APPID_USB] = usb_ctx;
-	oz_usb_get(pd->app_ctx[OZ_APPID_USB]);
-	spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
-	if (old_ctx) {
-		oz_dbg(ON, "Already have USB context\n");
-		kfree(usb_ctx);
-		usb_ctx = old_ctx;
-	} else if (usb_ctx) {
-		/* Take a reference to the PD. This will be released when
-		 * the USB context is destroyed.
-		 */
-		oz_pd_get(pd);
-	}
-	/* If we already had a USB context and had obtained a port from
-	 * the USB HCD then just reset the port. If we didn't have a port
-	 * then report the arrival to the USB HCD so we get one.
-	 */
-	if (usb_ctx->hport) {
-		oz_hcd_pd_reset(usb_ctx, usb_ctx->hport);
-	} else {
-		usb_ctx->hport = oz_hcd_pd_arrived(usb_ctx);
-		if (usb_ctx->hport == NULL) {
-			oz_dbg(ON, "USB hub returned null port\n");
-			spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
-			pd->app_ctx[OZ_APPID_USB] = NULL;
-			spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
-			oz_usb_put(usb_ctx);
-			rc = -1;
-		}
-	}
-	oz_usb_put(usb_ctx);
-	return rc;
-}
-
-/*
- * This is called when the USB service is stopped or paused for a PD.
- * Context: softirq or process
- */
-void oz_usb_stop(struct oz_pd *pd, int pause)
-{
-	struct oz_usb_ctx *usb_ctx;
-
-	if (pause) {
-		oz_dbg(ON, "USB service paused\n");
-		return;
-	}
-	spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
-	usb_ctx = (struct oz_usb_ctx *) pd->app_ctx[OZ_APPID_USB];
-	pd->app_ctx[OZ_APPID_USB] = NULL;
-	spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
-	if (usb_ctx) {
-		struct timespec ts, now;
-
-		getnstimeofday(&ts);
-		oz_dbg(ON, "USB service stopping...\n");
-		usb_ctx->stopped = 1;
-		/* At this point the reference count on the usb context should
-		 * be 2 - one from when we created it and one from the hcd
-		 * which claims a reference. Since stopped = 1 no one else
-		 * should get in but someone may already be in. So wait
-		 * until they leave but timeout after 1 second.
-		 */
-		while ((atomic_read(&usb_ctx->ref_count) > 2)) {
-			getnstimeofday(&now);
-			/*Approx 1 Sec. this is not perfect calculation*/
-			if (now.tv_sec != ts.tv_sec)
-				break;
-		}
-		oz_dbg(ON, "USB service stopped\n");
-		oz_hcd_pd_departed(usb_ctx->hport);
-		/* Release the reference taken in oz_usb_start.
-		 */
-		oz_usb_put(usb_ctx);
-	}
-}
-
-/*
- * This increments the reference count of the context area for a specific PD.
- * This ensures this context area does not disappear while still in use.
- * Context: softirq
- */
-void oz_usb_get(void *hpd)
-{
-	struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
-
-	atomic_inc(&usb_ctx->ref_count);
-}
-
-/*
- * This decrements the reference count of the context area for a specific PD
- * and destroys the context area if the reference count becomes zero.
- * Context: irq or process
- */
-void oz_usb_put(void *hpd)
-{
-	struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
-
-	if (atomic_dec_and_test(&usb_ctx->ref_count)) {
-		oz_dbg(ON, "Dealloc USB context\n");
-		oz_pd_put(usb_ctx->pd);
-		kfree(usb_ctx);
-	}
-}
-
-/*
- * Context: softirq
- */
-int oz_usb_heartbeat(struct oz_pd *pd)
-{
-	struct oz_usb_ctx *usb_ctx;
-	int rc = 0;
-
-	spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
-	usb_ctx = (struct oz_usb_ctx *) pd->app_ctx[OZ_APPID_USB];
-	if (usb_ctx)
-		oz_usb_get(usb_ctx);
-	spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
-	if (usb_ctx == NULL)
-		return rc;
-	if (usb_ctx->stopped)
-		goto done;
-	if (usb_ctx->hport)
-		if (oz_hcd_heartbeat(usb_ctx->hport))
-			rc = 1;
-done:
-	oz_usb_put(usb_ctx);
-	return rc;
-}
-
-/*
- * Context: softirq
- */
-int oz_usb_stream_create(void *hpd, u8 ep_num)
-{
-	struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
-	struct oz_pd *pd = usb_ctx->pd;
-
-	oz_dbg(ON, "%s: (0x%x)\n", __func__, ep_num);
-	if (pd->mode & OZ_F_ISOC_NO_ELTS) {
-		oz_isoc_stream_create(pd, ep_num);
-	} else {
-		oz_pd_get(pd);
-		if (oz_elt_stream_create(&pd->elt_buff, ep_num,
-			4*pd->max_tx_size)) {
-			oz_pd_put(pd);
-			return -1;
-		}
-	}
-	return 0;
-}
-
-/*
- * Context: softirq
- */
-int oz_usb_stream_delete(void *hpd, u8 ep_num)
-{
-	struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
-
-	if (usb_ctx) {
-		struct oz_pd *pd = usb_ctx->pd;
-
-		if (pd) {
-			oz_dbg(ON, "%s: (0x%x)\n", __func__, ep_num);
-			if (pd->mode & OZ_F_ISOC_NO_ELTS) {
-				oz_isoc_stream_delete(pd, ep_num);
-			} else {
-				if (oz_elt_stream_delete(&pd->elt_buff, ep_num))
-					return -1;
-				oz_pd_put(pd);
-			}
-		}
-	}
-	return 0;
-}
-
-/*
- * Context: softirq or process
- */
-void oz_usb_request_heartbeat(void *hpd)
-{
-	struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
-
-	if (usb_ctx && usb_ctx->pd)
-		oz_pd_request_heartbeat(usb_ctx->pd);
-}
diff --git a/drivers/staging/ozwpan/ozusbsvc.h b/drivers/staging/ozwpan/ozusbsvc.h
deleted file mode 100644
index 58e05a5..0000000
--- a/drivers/staging/ozwpan/ozusbsvc.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZUSBSVC_H
-#define _OZUSBSVC_H
-
-/*------------------------------------------------------------------------------
- * Per PD context info stored in application context area of PD.
- * This object is reference counted to ensure it doesn't disappear while
- * still in use.
- */
-struct oz_usb_ctx {
-	atomic_t ref_count;
-	u8 tx_seq_num;
-	u8 rx_seq_num;
-	struct oz_pd *pd;
-	void *hport;
-	int stopped;
-};
-
-int oz_usb_init(void);
-void oz_usb_term(void);
-int oz_usb_start(struct oz_pd *pd, int resume);
-void oz_usb_stop(struct oz_pd *pd, int pause);
-void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt);
-int oz_usb_heartbeat(struct oz_pd *pd);
-void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len);
-
-#endif /* _OZUSBSVC_H */
-
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
deleted file mode 100644
index 301fee8..0000000
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ /dev/null
@@ -1,471 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- *
- * This file implements the protocol specific parts of the USB service for a PD.
- * -----------------------------------------------------------------------------
- */
-#include <linux/module.h>
-#include <linux/timer.h>
-#include <linux/sched.h>
-#include <linux/netdevice.h>
-#include <linux/errno.h>
-#include <linux/input.h>
-#include <asm/unaligned.h>
-#include "ozdbg.h"
-#include "ozprotocol.h"
-#include "ozeltbuf.h"
-#include "ozpd.h"
-#include "ozproto.h"
-#include "ozusbif.h"
-#include "ozhcd.h"
-#include "ozusbsvc.h"
-
-#define MAX_ISOC_FIXED_DATA	(253-sizeof(struct oz_isoc_fixed))
-
-/*
- * Context: softirq
- */
-static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
-	struct oz_usb_ctx *usb_ctx, u8 strid, u8 isoc)
-{
-	int ret;
-	struct oz_elt *elt = (struct oz_elt *)ei->data;
-	struct oz_app_hdr *app_hdr = (struct oz_app_hdr *)(elt+1);
-
-	elt->type = OZ_ELT_APP_DATA;
-	ei->app_id = OZ_APPID_USB;
-	ei->length = elt->length + sizeof(struct oz_elt);
-	app_hdr->app_id = OZ_APPID_USB;
-	spin_lock_bh(&eb->lock);
-	if (isoc == 0) {
-		app_hdr->elt_seq_num = usb_ctx->tx_seq_num++;
-		if (usb_ctx->tx_seq_num == 0)
-			usb_ctx->tx_seq_num = 1;
-	}
-	ret = oz_queue_elt_info(eb, isoc, strid, ei);
-	if (ret)
-		oz_elt_info_free(eb, ei);
-	spin_unlock_bh(&eb->lock);
-	return ret;
-}
-
-/*
- * Context: softirq
- */
-int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
-	u8 index, __le16 windex, int offset, int len)
-{
-	struct oz_usb_ctx *usb_ctx = hpd;
-	struct oz_pd *pd = usb_ctx->pd;
-	struct oz_elt *elt;
-	struct oz_get_desc_req *body;
-	struct oz_elt_buf *eb = &pd->elt_buff;
-	struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
-
-	oz_dbg(ON, "    req_type = 0x%x\n", req_type);
-	oz_dbg(ON, "    desc_type = 0x%x\n", desc_type);
-	oz_dbg(ON, "    index = 0x%x\n", index);
-	oz_dbg(ON, "    windex = 0x%x\n", windex);
-	oz_dbg(ON, "    offset = 0x%x\n", offset);
-	oz_dbg(ON, "    len = 0x%x\n", len);
-	if (len > 200)
-		len = 200;
-	if (ei == NULL)
-		return -1;
-	elt = (struct oz_elt *)ei->data;
-	elt->length = sizeof(struct oz_get_desc_req);
-	body = (struct oz_get_desc_req *)(elt+1);
-	body->type = OZ_GET_DESC_REQ;
-	body->req_id = req_id;
-	put_unaligned(cpu_to_le16(offset), &body->offset);
-	put_unaligned(cpu_to_le16(len), &body->size);
-	body->req_type = req_type;
-	body->desc_type = desc_type;
-	body->w_index = windex;
-	body->index = index;
-	return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
-}
-
-/*
- * Context: tasklet
- */
-static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
-{
-	struct oz_usb_ctx *usb_ctx = hpd;
-	struct oz_pd *pd = usb_ctx->pd;
-	struct oz_elt *elt;
-	struct oz_elt_buf *eb = &pd->elt_buff;
-	struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
-	struct oz_set_config_req *body;
-
-	if (ei == NULL)
-		return -1;
-	elt = (struct oz_elt *)ei->data;
-	elt->length = sizeof(struct oz_set_config_req);
-	body = (struct oz_set_config_req *)(elt+1);
-	body->type = OZ_SET_CONFIG_REQ;
-	body->req_id = req_id;
-	body->index = index;
-	return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
-}
-
-/*
- * Context: tasklet
- */
-static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
-{
-	struct oz_usb_ctx *usb_ctx = hpd;
-	struct oz_pd *pd = usb_ctx->pd;
-	struct oz_elt *elt;
-	struct oz_elt_buf *eb = &pd->elt_buff;
-	struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
-	struct oz_set_interface_req *body;
-
-	if (ei == NULL)
-		return -1;
-	elt = (struct oz_elt *)ei->data;
-	elt->length = sizeof(struct oz_set_interface_req);
-	body = (struct oz_set_interface_req *)(elt+1);
-	body->type = OZ_SET_INTERFACE_REQ;
-	body->req_id = req_id;
-	body->index = index;
-	body->alternative = alt;
-	return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
-}
-
-/*
- * Context: tasklet
- */
-static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
-			u8 recipient, u8 index, __le16 feature)
-{
-	struct oz_usb_ctx *usb_ctx = hpd;
-	struct oz_pd *pd = usb_ctx->pd;
-	struct oz_elt *elt;
-	struct oz_elt_buf *eb = &pd->elt_buff;
-	struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
-	struct oz_feature_req *body;
-
-	if (ei == NULL)
-		return -1;
-	elt = (struct oz_elt *)ei->data;
-	elt->length = sizeof(struct oz_feature_req);
-	body = (struct oz_feature_req *)(elt+1);
-	body->type = type;
-	body->req_id = req_id;
-	body->recipient = recipient;
-	body->index = index;
-	put_unaligned(feature, &body->feature);
-	return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
-}
-
-/*
- * Context: tasklet
- */
-static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
-	u8 request, __le16 value, __le16 index, const u8 *data, int data_len)
-{
-	struct oz_usb_ctx *usb_ctx = hpd;
-	struct oz_pd *pd = usb_ctx->pd;
-	struct oz_elt *elt;
-	struct oz_elt_buf *eb = &pd->elt_buff;
-	struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
-	struct oz_vendor_class_req *body;
-
-	if (ei == NULL)
-		return -1;
-	elt = (struct oz_elt *)ei->data;
-	elt->length = sizeof(struct oz_vendor_class_req) - 1 + data_len;
-	body = (struct oz_vendor_class_req *)(elt+1);
-	body->type = OZ_VENDOR_CLASS_REQ;
-	body->req_id = req_id;
-	body->req_type = req_type;
-	body->request = request;
-	put_unaligned(value, &body->value);
-	put_unaligned(index, &body->index);
-	if (data_len)
-		memcpy(body->data, data, data_len);
-	return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
-}
-
-/*
- * Context: tasklet
- */
-int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
-			const u8 *data, int data_len)
-{
-	unsigned wvalue = le16_to_cpu(setup->wValue);
-	unsigned windex = le16_to_cpu(setup->wIndex);
-	unsigned wlength = le16_to_cpu(setup->wLength);
-	int rc = 0;
-
-	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
-		switch (setup->bRequest) {
-		case USB_REQ_GET_DESCRIPTOR:
-			rc = oz_usb_get_desc_req(hpd, req_id,
-				setup->bRequestType, (u8)(wvalue>>8),
-				(u8)wvalue, setup->wIndex, 0, wlength);
-			break;
-		case USB_REQ_SET_CONFIGURATION:
-			rc = oz_usb_set_config_req(hpd, req_id, (u8)wvalue);
-			break;
-		case USB_REQ_SET_INTERFACE: {
-				u8 if_num = (u8)windex;
-				u8 alt = (u8)wvalue;
-
-				rc = oz_usb_set_interface_req(hpd, req_id,
-					if_num, alt);
-			}
-			break;
-		case USB_REQ_SET_FEATURE:
-			rc = oz_usb_set_clear_feature_req(hpd, req_id,
-				OZ_SET_FEATURE_REQ,
-				setup->bRequestType & 0xf, (u8)windex,
-				setup->wValue);
-			break;
-		case USB_REQ_CLEAR_FEATURE:
-			rc = oz_usb_set_clear_feature_req(hpd, req_id,
-				OZ_CLEAR_FEATURE_REQ,
-				setup->bRequestType & 0xf,
-				(u8)windex, setup->wValue);
-			break;
-		}
-	} else {
-		rc = oz_usb_vendor_class_req(hpd, req_id, setup->bRequestType,
-			setup->bRequest, setup->wValue, setup->wIndex,
-			data, data_len);
-	}
-	return rc;
-}
-
-/*
- * Context: softirq
- */
-int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
-{
-	struct oz_usb_ctx *usb_ctx = hpd;
-	struct oz_pd *pd = usb_ctx->pd;
-	struct oz_elt_buf *eb;
-	int i;
-	int hdr_size;
-	u8 *data;
-	struct usb_iso_packet_descriptor *desc;
-
-	if (pd->mode & OZ_F_ISOC_NO_ELTS) {
-		for (i = 0; i < urb->number_of_packets; i++) {
-			u8 *data;
-
-			desc = &urb->iso_frame_desc[i];
-			data = ((u8 *)urb->transfer_buffer)+desc->offset;
-			oz_send_isoc_unit(pd, ep_num, data, desc->length);
-		}
-		return 0;
-	}
-
-	hdr_size = sizeof(struct oz_isoc_fixed) - 1;
-	eb = &pd->elt_buff;
-	i = 0;
-	while (i < urb->number_of_packets) {
-		struct oz_elt_info *ei = oz_elt_info_alloc(eb);
-		struct oz_elt *elt;
-		struct oz_isoc_fixed *body;
-		int unit_count;
-		int unit_size;
-		int rem;
-
-		if (ei == NULL)
-			return -1;
-		rem = MAX_ISOC_FIXED_DATA;
-		elt = (struct oz_elt *)ei->data;
-		body = (struct oz_isoc_fixed *)(elt + 1);
-		body->type = OZ_USB_ENDPOINT_DATA;
-		body->endpoint = ep_num;
-		body->format = OZ_DATA_F_ISOC_FIXED;
-		unit_size = urb->iso_frame_desc[i].length;
-		body->unit_size = (u8)unit_size;
-		data = ((u8 *)(elt+1)) + hdr_size;
-		unit_count = 0;
-		while (i < urb->number_of_packets) {
-			desc = &urb->iso_frame_desc[i];
-			if ((unit_size == desc->length) &&
-				(desc->length <= rem)) {
-				memcpy(data, ((u8 *)urb->transfer_buffer) +
-					desc->offset, unit_size);
-				data += unit_size;
-				rem -= unit_size;
-				unit_count++;
-				desc->status = 0;
-				desc->actual_length = desc->length;
-				i++;
-			} else {
-				break;
-			}
-		}
-		elt->length = hdr_size + MAX_ISOC_FIXED_DATA - rem;
-		/* Store the number of units in body->frame_number for the
-		 * moment. This field will be correctly determined before
-		 * the element is sent. */
-		body->frame_number = (u8)unit_count;
-		oz_usb_submit_elt(eb, ei, usb_ctx, ep_num,
-			pd->mode & OZ_F_ISOC_ANYTIME);
-	}
-	return 0;
-}
-
-/*
- * Context: softirq-serialized
- */
-static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
-	struct oz_usb_hdr *usb_hdr, int len)
-{
-	struct oz_data *data_hdr = (struct oz_data *)usb_hdr;
-
-	switch (data_hdr->format) {
-	case OZ_DATA_F_MULTIPLE_FIXED: {
-			struct oz_multiple_fixed *body =
-				(struct oz_multiple_fixed *)data_hdr;
-			u8 *data = body->data;
-			unsigned int n;
-			if (!body->unit_size ||
-				len < sizeof(struct oz_multiple_fixed) - 1)
-				break;
-			n = (len - (sizeof(struct oz_multiple_fixed) - 1))
-				/ body->unit_size;
-			while (n--) {
-				oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
-					data, body->unit_size);
-				data += body->unit_size;
-			}
-		}
-		break;
-	case OZ_DATA_F_ISOC_FIXED: {
-			struct oz_isoc_fixed *body =
-				(struct oz_isoc_fixed *)data_hdr;
-			int data_len;
-			int unit_size = body->unit_size;
-			u8 *data = body->data;
-			int count;
-			int i;
-
-			if (len < sizeof(struct oz_isoc_fixed) - 1)
-				break;
-			data_len = len - (sizeof(struct oz_isoc_fixed) - 1);
-
-			if (!unit_size)
-				break;
-			count = data_len/unit_size;
-			for (i = 0; i < count; i++) {
-				oz_hcd_data_ind(usb_ctx->hport,
-					body->endpoint, data, unit_size);
-				data += unit_size;
-			}
-		}
-		break;
-	}
-
-}
-
-/*
- * This is called when the PD has received a USB element. The type of element
- * is determined and is then passed to an appropriate handler function.
- * Context: softirq-serialized
- */
-void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
-{
-	struct oz_usb_hdr *usb_hdr = (struct oz_usb_hdr *)(elt + 1);
-	struct oz_usb_ctx *usb_ctx;
-
-	spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
-	usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB];
-	if (usb_ctx)
-		oz_usb_get(usb_ctx);
-	spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
-	if (usb_ctx == NULL)
-		return; /* Context has gone so nothing to do. */
-	if (usb_ctx->stopped)
-		goto done;
-	/* If sequence number is non-zero then check it is not a duplicate.
-	 * Zero sequence numbers are always accepted.
-	 */
-	if (usb_hdr->elt_seq_num != 0) {
-		if (((usb_ctx->rx_seq_num - usb_hdr->elt_seq_num) & 0x80) == 0)
-			/* Reject duplicate element. */
-			goto done;
-	}
-	usb_ctx->rx_seq_num = usb_hdr->elt_seq_num;
-	switch (usb_hdr->type) {
-	case OZ_GET_DESC_RSP: {
-			struct oz_get_desc_rsp *body =
-				(struct oz_get_desc_rsp *)usb_hdr;
-			u16 offs, total_size;
-			u8 data_len;
-
-			if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
-				break;
-			data_len = elt->length -
-					(sizeof(struct oz_get_desc_rsp) - 1);
-			offs = le16_to_cpu(get_unaligned(&body->offset));
-			total_size =
-				le16_to_cpu(get_unaligned(&body->total_size));
-			oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
-			oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
-					body->rcode, body->data,
-					data_len, offs, total_size);
-		}
-		break;
-	case OZ_SET_CONFIG_RSP: {
-			struct oz_set_config_rsp *body =
-				(struct oz_set_config_rsp *)usb_hdr;
-			oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
-				body->rcode, NULL, 0);
-		}
-		break;
-	case OZ_SET_INTERFACE_RSP: {
-			struct oz_set_interface_rsp *body =
-				(struct oz_set_interface_rsp *)usb_hdr;
-			oz_hcd_control_cnf(usb_ctx->hport,
-				body->req_id, body->rcode, NULL, 0);
-		}
-		break;
-	case OZ_VENDOR_CLASS_RSP: {
-			struct oz_vendor_class_rsp *body =
-				(struct oz_vendor_class_rsp *)usb_hdr;
-
-			if (elt->length <
-			    sizeof(struct oz_vendor_class_rsp) - 1)
-				break;
-
-			oz_hcd_control_cnf(usb_ctx->hport, body->req_id,
-				body->rcode, body->data, elt->length-
-				sizeof(struct oz_vendor_class_rsp)+1);
-		}
-		break;
-	case OZ_USB_ENDPOINT_DATA:
-		oz_usb_handle_ep_data(usb_ctx, usb_hdr, elt->length);
-		break;
-	}
-done:
-	oz_usb_put(usb_ctx);
-}
-
-/*
- * Context: softirq, process
- */
-void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
-{
-	struct oz_usb_ctx *usb_ctx;
-
-	spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
-	usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB];
-	if (usb_ctx)
-		oz_usb_get(usb_ctx);
-	spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
-	if (usb_ctx == NULL)
-		return; /* Context has gone so nothing to do. */
-	if (!usb_ctx->stopped) {
-		oz_dbg(ON, "Farewell indicated ep = 0x%x\n", ep_num);
-		oz_hcd_data_ind(usb_ctx->hport, ep_num, data, len);
-	}
-	oz_usb_put(usb_ctx);
-}
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index bda208d..3e9ee7e 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -781,14 +781,18 @@
 		schedule_timeout_interruptible(msecs_to_jiffies(ms));
 }
 
-/* send a serial byte to the LCD panel. The caller is responsible for locking
-   if needed. */
+/*
+ * send a serial byte to the LCD panel. The caller is responsible for locking
+ * if needed.
+ */
 static void lcd_send_serial(int byte)
 {
 	int bit;
 
-	/* the data bit is set on D0, and the clock on STROBE.
-	 * LCD reads D0 on STROBE's rising edge. */
+	/*
+	 * the data bit is set on D0, and the clock on STROBE.
+	 * LCD reads D0 on STROBE's rising edge.
+	 */
 	for (bit = 0; bit < 8; bit++) {
 		bits.cl = BIT_CLR;	/* CLK low */
 		panel_set_bits();
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 581af88..5c45f8a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -379,7 +379,8 @@
 			if (pmlmeext->active_keep_alive_check) {
 				int stainfo_offset;
 
-				stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
+				stainfo_offset =
+					rtw_stainfo_offset(pstapriv, psta);
 				if (stainfo_offset_valid(stainfo_offset))
 					chk_alive_list[chk_alive_num++] = stainfo_offset;
 				continue;
@@ -1584,7 +1585,7 @@
 		}
 	}
 
-	if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT)) {
+	if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)) {
 		if (!psta->no_short_slot_time_set) {
 			psta->no_short_slot_time_set = 1;
 
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index bc3fe10..993c7db 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -219,6 +219,7 @@
 	struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
 
 	int len = 0;
+
 	len += snprintf(page + len, count - len, "ht_option=%d\n", pmlmepriv->htpriv.ht_option);
 	*eof = 1;
 	return len;
@@ -588,6 +589,7 @@
 
 	if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
 		int num = sscanf(tmp, "%u %u", &is_signal_dbg, &signal_strength);
+
 		is_signal_dbg = is_signal_dbg == 0 ? 0 : 1;
 		if (is_signal_dbg && num != 2)
 			return count;
@@ -917,7 +919,7 @@
 		/*  5G */
 		if (pmlmeext->channel_set[i].ChannelNum >= 36 &&
 		    pmlmeext->channel_set[i].ChannelNum < 140) {
-			 /*  Find primary channel */
+			/*  Find primary channel */
 			if (((pmlmeext->channel_set[i].ChannelNum - 36) % 8 == 0) &&
 			    (pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_5G].rx_count)) {
 				index_5G = i;
@@ -927,7 +929,7 @@
 
 		if (pmlmeext->channel_set[i].ChannelNum >= 149 &&
 		    pmlmeext->channel_set[i].ChannelNum < 165) {
-			 /*  find primary channel */
+			/*  find primary channel */
 			if (((pmlmeext->channel_set[i].ChannelNum - 149) % 8 == 0) &&
 			    (pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_5G].rx_count)) {
 				index_5G = i;
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index dbaba2c..7b99ea9 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -551,7 +551,7 @@
 				bContinual = false;
 			}
 		} else if (ReadState & PG_STATE_DATA) {
-		/*   Data section Read ------------- */
+			/*   Data section Read ------------- */
 			efuse_WordEnableDataRead(hworden, tmpdata, data);
 			efuse_addr = efuse_addr + (word_cnts*2)+1;
 			ReadState = PG_STATE_HEADER;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index 11b780d..c3c5828 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -19,6 +19,8 @@
  ******************************************************************************/
 #define _IEEE80211_C
 
+#include <linux/ieee80211.h>
+
 #include <drv_types.h>
 #include <osdep_intf.h>
 #include <ieee80211.h>
@@ -1042,7 +1044,7 @@
 			elems->timeout_int = pos;
 			elems->timeout_int_len = elen;
 			break;
-		case WLAN_EID_HT_CAP:
+		case WLAN_EID_HT_CAPABILITY:
 			elems->ht_capabilities = pos;
 			elems->ht_capabilities_len = elen;
 			break;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index 8c05cb0..22f5b45 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -183,7 +183,7 @@
 			if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
 				rtw_indicate_disconnect(padapter);
 
-			rtw_free_assoc_resources(padapter, 1);
+			rtw_free_assoc_resources(padapter);
 
 			if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
 				_clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
@@ -271,7 +271,7 @@
 					if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
 						rtw_indicate_disconnect(padapter);
 
-					rtw_free_assoc_resources(padapter, 1);
+					rtw_free_assoc_resources(padapter);
 
 					if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
 						_clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
@@ -293,7 +293,7 @@
 			if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
 				rtw_indicate_disconnect(padapter);
 
-			rtw_free_assoc_resources(padapter, 1);
+			rtw_free_assoc_resources(padapter);
 
 			if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
 				_clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
@@ -366,7 +366,7 @@
 
 		if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
 		    (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
-			rtw_free_assoc_resources(padapter, 1);
+			rtw_free_assoc_resources(padapter);
 
 		if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) {
 			if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
@@ -415,7 +415,7 @@
 
 		rtw_disassoc_cmd(padapter, 0, true);
 		rtw_indicate_disconnect(padapter);
-		rtw_free_assoc_resources(padapter, 1);
+		rtw_free_assoc_resources(padapter);
 		rtw_pwr_wakeup(padapter);
 	}
 
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index 0558451..2b917a1 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -19,6 +19,7 @@
  ******************************************************************************/
 #define _RTW_MLME_C_
 
+#include <linux/ieee80211.h>
 
 #include <osdep_service.h>
 #include <drv_types.h>
@@ -160,7 +161,7 @@
 	return pnetwork;
 }
 
-static void _rtw_free_network(struct	mlme_priv *pmlmepriv , struct wlan_network *pnetwork, u8 isfreeall)
+static void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork, u8 isfreeall)
 {
 	u32 curr_time, delta_time;
 	u32 lifetime = SCANQUEUE_LIFETIME;
@@ -352,8 +353,8 @@
 		((!memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength)) == true) &&
 		((s_cap & WLAN_CAPABILITY_IBSS) ==
 		(d_cap & WLAN_CAPABILITY_IBSS)) &&
-		((s_cap & WLAN_CAPABILITY_BSS) ==
-		(d_cap & WLAN_CAPABILITY_BSS)));
+		((s_cap & WLAN_CAPABILITY_ESS) ==
+		(d_cap & WLAN_CAPABILITY_ESS)));
 }
 
 struct	wlan_network	*rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
@@ -581,7 +582,7 @@
 }
 
 /* TODO: Perry: For Power Management */
-void rtw_atimdone_event_callback(struct adapter	*adapter , u8 *pbuf)
+void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf)
 {
 	RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_evet\n"));
 	return;
@@ -614,7 +615,7 @@
 			spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
 			ibss_wlan = rtw_find_network(&pmlmepriv->scanned_queue,  pnetwork->MacAddress);
 			if (ibss_wlan) {
-				memcpy(ibss_wlan->network.IEs , pnetwork->IEs, 8);
+				memcpy(ibss_wlan->network.IEs, pnetwork->IEs, 8);
 				spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
 				goto exit;
 			}
@@ -692,8 +693,8 @@
 			pmlmepriv->to_join = false;
 			s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
 			if (_SUCCESS == s_ret) {
-			     mod_timer(&pmlmepriv->assoc_timer,
-				       jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT));
+				mod_timer(&pmlmepriv->assoc_timer,
+					jiffies + msecs_to_jiffies(MAX_JOIN_TIMEOUT));
 			} else if (s_ret == 2) { /* there is no need to wait for join */
 				_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
 				rtw_indicate_connect(adapter);
@@ -703,7 +704,7 @@
 					if (--pmlmepriv->to_roaming == 0 ||
 					    _SUCCESS != rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0)) {
 						pmlmepriv->to_roaming = 0;
-						rtw_free_assoc_resources(adapter, 1);
+						rtw_free_assoc_resources(adapter);
 						rtw_indicate_disconnect(adapter);
 					} else {
 						pmlmepriv->to_join = true;
@@ -757,7 +758,19 @@
 /*
 *rtw_free_assoc_resources: the caller has to lock pmlmepriv->lock
 */
-void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
+void rtw_free_assoc_resources(struct adapter *adapter)
+{
+	struct	mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+	spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+	rtw_free_assoc_resources_locked(adapter);
+	spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+}
+
+/*
+*rtw_free_assoc_resources_locked: the caller has to lock pmlmepriv->lock
+*/
+void rtw_free_assoc_resources_locked(struct adapter *adapter)
 {
 	struct wlan_network *pwlan = NULL;
 	struct	mlme_priv *pmlmepriv = &adapter->mlmepriv;
@@ -792,8 +805,6 @@
 		rtw_init_bcmc_stainfo(adapter);
 	}
 
-	if (lock_scanned_queue)
-		spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
 
 	pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
 	if (pwlan)
@@ -804,8 +815,6 @@
 	if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) && (adapter->stapriv.asoc_sta_count == 1)))
 		rtw_free_network_nolock(pmlmepriv, pwlan);
 
-	if (lock_scanned_queue)
-		spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
 	pmlmepriv->key_mask = 0;
 }
 
@@ -1301,7 +1310,7 @@
 
 		rtw_free_uc_swdec_pending_queue(adapter);
 
-		rtw_free_assoc_resources(adapter, 1);
+		rtw_free_assoc_resources(adapter);
 		rtw_indicate_disconnect(adapter);
 		spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
 		/*  remove the network entry in scanned_queue */
@@ -1382,7 +1391,7 @@
 				DBG_88E("%s try another roaming\n", __func__);
 				do_join_r = rtw_do_join(adapter);
 				if (_SUCCESS != do_join_r) {
-					DBG_88E("%s roaming do_join return %d\n", __func__ , do_join_r);
+					DBG_88E("%s roaming do_join return %d\n", __func__, do_join_r);
 					continue;
 				}
 				break;
@@ -1556,7 +1565,7 @@
 
 		rtw_disassoc_cmd(adapter, 0, true);
 		rtw_indicate_disconnect(adapter);
-		rtw_free_assoc_resources(adapter, 0);
+		rtw_free_assoc_resources_locked(adapter);
 	}
 
 	rtw_hal_get_def_var(adapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &(supp_ant_div));
@@ -1997,7 +2006,7 @@
 		p = rtw_get_ie(in_ie+12, _HT_ADD_INFO_IE_, &ielen, in_len-12);
 		if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
 			out_len = *pout_len;
-			rtw_set_ie(out_ie+out_len, _HT_ADD_INFO_IE_, ielen, p+2 , pout_len);
+			rtw_set_ie(out_ie+out_len, _HT_ADD_INFO_IE_, ielen, p+2, pout_len);
 		}
 	}
 	return phtpriv->ht_option;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index a0b8f66..935b48e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -1096,7 +1096,7 @@
 
 		/*  Check if the AP's supported rates are also supported by STA. */
 		for (j = 0; j < sta_bssrate_len; j++) {
-			 /*  Avoid the proprietary data rate (22Mbps) of Handlink WSG-4000 AP */
+			/*  Avoid the proprietary data rate (22Mbps) of Handlink WSG-4000 AP */
 			if ((pmlmeinfo->network.SupportedRates[i]|IEEE80211_BASIC_RATE_MASK)
 					== (sta_bssrate[j]|IEEE80211_BASIC_RATE_MASK))
 				break;
@@ -2932,7 +2932,7 @@
 
 	if (seq == 2) {
 		if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
-			 /*  legendary shared system */
+			/*  legendary shared system */
 			p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&len,
 				pkt_len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_);
 
@@ -3367,7 +3367,7 @@
 	spin_unlock_bh(&pstapriv->asoc_list_lock);
 
 	/*  now the station is qualified to join our BSS... */
-	if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
+	if ((pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
 		/* 1 bss_cap_update & sta_info_update */
 		bss_cap_update_on_sta_join(padapter, pstat);
 		sta_info_update(padapter, pstat);
@@ -4155,8 +4155,8 @@
 	u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 	u8 *pframe = precv_frame->rx_data;
 
-	  if (ptable->func) {
-	 /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
+	if (ptable->func) {
+		/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
 		if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
 		    memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
 			return;
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index ec0a8a4..9765946 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -70,7 +70,7 @@
 		}
 	}
 	/* s2-3. */
-	rtw_free_assoc_resources(padapter, 1);
+	rtw_free_assoc_resources(padapter);
 
 	/* s2-4. */
 	rtw_free_network_queue(padapter, true);
@@ -549,12 +549,6 @@
 		    (unsigned long)padapter);
 }
 
-inline void rtw_set_ips_deny(struct adapter *padapter, u32 ms)
-{
-	struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-	pwrpriv->ips_deny_time = jiffies + msecs_to_jiffies(ms);
-}
-
 /*
 * rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
 * @adapter: pointer to struct adapter structure
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 8501eb8..44eeb03 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -74,10 +74,8 @@
 
 	precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(struct recv_frame) + RXFRAME_ALIGN_SZ);
 
-	if (precvpriv->pallocated_frame_buf == NULL) {
-		res = _FAIL;
-		goto exit;
-	}
+	if (!precvpriv->pallocated_frame_buf)
+		return _FAIL;
 
 	precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
 
@@ -89,7 +87,7 @@
 		list_add_tail(&(precvframe->list),
 				     &(precvpriv->free_recv_queue.queue));
 
-		res = rtw_os_recv_resource_alloc(padapter, precvframe);
+		rtw_os_recv_resource_alloc(precvframe);
 
 		precvframe->len = 0;
 
@@ -107,8 +105,6 @@
 	precvpriv->signal_stat_sampling_interval = 1000; /* ms */
 
 	rtw_set_signal_stat_timer(precvpriv);
-exit:
-
 
 	return res;
 }
@@ -117,7 +113,6 @@
 {
 	struct adapter	*padapter = precvpriv->adapter;
 
-
 	rtw_free_uc_swdec_pending_queue(padapter);
 
 	if (precvpriv->pallocated_frame_buf) {
@@ -153,7 +148,6 @@
 		}
 	}
 
-
 	return (struct recv_frame *)hdr;
 }
 
@@ -170,14 +164,6 @@
 	return precvframe;
 }
 
-void rtw_init_recvframe(struct recv_frame *precvframe, struct recv_priv *precvpriv)
-{
-	/* Perry: This can be removed */
-	INIT_LIST_HEAD(&precvframe->list);
-
-	precvframe->len = 0;
-}
-
 int rtw_free_recvframe(struct recv_frame *precvframe,
 		       struct __queue *pfree_recv_queue)
 {
@@ -208,7 +194,6 @@
 
       spin_unlock_bh(&pfree_recv_queue->lock);
 
-
 	return _SUCCESS;
 }
 
@@ -217,7 +202,6 @@
 	struct adapter *padapter = precvframe->adapter;
 	struct recv_priv *precvpriv = &padapter->recvpriv;
 
-
 	list_del_init(&(precvframe->list));
 	list_add_tail(&(precvframe->list), get_list_head(queue));
 
@@ -226,7 +210,6 @@
 			precvpriv->free_recvframe_cnt++;
 	}
 
-
 	return _SUCCESS;
 }
 
@@ -421,7 +404,6 @@
 
 exit:
 
-
 	return res;
 }
 
@@ -483,7 +465,6 @@
 		return_packet = NULL;
 	}
 
-
 	return return_packet;
 }
 
@@ -502,7 +483,6 @@
 	struct rx_pkt_attrib *pattrib;
 	__be16 be_tmp;
 
-
 	pstapriv = &adapter->stapriv;
 
 	auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
@@ -561,7 +541,6 @@
 		prtnframe = precv_frame;
 	}
 
-
 		return prtnframe;
 }
 
@@ -573,7 +552,6 @@
 	u16 seq_ctrl = ((precv_frame->attrib.seq_num&0xffff) << 4) |
 		(precv_frame->attrib.frag_num & 0xf);
 
-
 	if (tid > 15) {
 		RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_decache, (tid>15)! seq_ctrl=0x%x, tid=0x%x\n", seq_ctrl, tid));
 
@@ -590,7 +568,6 @@
 
 	prxcache->tid_rxseq[tid] = seq_ctrl;
 
-
 	return _SUCCESS;
 }
 
@@ -727,7 +704,6 @@
 	u8 *sta_addr = NULL;
 	int bmcast = IS_MCAST(pattrib->dst);
 
-
 	if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
 	    (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
 		/*  filter packets that SA is myself or multicast or broadcast */
@@ -815,7 +791,6 @@
 	u8 *myhwaddr = myid(&adapter->eeprompriv);
 	int bmcast = IS_MCAST(pattrib->dst);
 
-
 	if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) &&
 	    (check_fwstate(pmlmepriv, _FW_LINKED) == true ||
 	    check_fwstate(pmlmepriv, _FW_UNDER_LINKING))) {
@@ -907,7 +882,6 @@
 
 exit:
 
-
 	return ret;
 }
 
@@ -922,7 +896,6 @@
 	unsigned char *mybssid  = get_bssid(pmlmepriv);
 	int ret = _SUCCESS;
 
-
 	if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
 		/* For AP mode, RA = BSSID, TX = STA(SRC_ADDR), A3 = DST_ADDR */
 		if (memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
@@ -967,7 +940,6 @@
 
 exit:
 
-
 	return ret;
 }
 
@@ -1149,7 +1121,6 @@
 	struct security_priv	*psecuritypriv = &adapter->securitypriv;
 	int ret = _SUCCESS;
 
-
 	bretry = GetRetry(ptr);
 	pda = get_da(ptr);
 	psa = get_sa(ptr);
@@ -1253,7 +1224,6 @@
 
 exit:
 
-
 	return ret;
 }
 
@@ -1273,7 +1243,6 @@
 	u8  ver = (unsigned char)(*ptr)&0x3;
 	struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
 
-
 	if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
 		int ch_set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, rtw_get_oper_ch(adapter));
 		if (ch_set_idx >= 0)
@@ -1362,7 +1331,6 @@
 
 exit:
 
-
 	return retval;
 }
 
@@ -1445,7 +1413,6 @@
 	struct recv_frame *prframe, *pnextrframe;
 	struct __queue *pfree_recv_queue;
 
-
 	curfragnum = 0;
 	pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
 
@@ -1510,7 +1477,6 @@
 
 	RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("Performance defrag!!!!!\n"));
 
-
 	return prframe;
 }
 
@@ -1528,7 +1494,6 @@
 	struct recv_frame *prtnframe = NULL;
 	struct __queue *pfree_recv_queue, *pdefrag_q;
 
-
 	pstapriv = &padapter->stapriv;
 
 	pfhdr = precv_frame;
@@ -1612,7 +1577,6 @@
 		}
 	}
 
-
 	return prtnframe;
 }
 
@@ -2116,7 +2080,6 @@
 	struct recv_priv *precvpriv;
 	s32 ret = _SUCCESS;
 
-
 	padapter = precvframe->adapter;
 
 	precvpriv = &padapter->recvpriv;
@@ -2129,7 +2092,6 @@
 
 	precvpriv->rx_pkts++;
 
-
 	return ret;
 
 _recv_entry_drop:
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index d870a5c..22839d5 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -1330,7 +1330,7 @@
 		bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
 
 		for (j = 0; j < 16; j++)
-			 pframe[payload_index++] = chain_buffer[j];
+			pframe[payload_index++] = chain_buffer[j];
 	}
 
 	if (payload_remainder > 0) {    /* If there is a short final block, then pad it,*/
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 32300df..077b39a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -19,6 +19,8 @@
  ******************************************************************************/
 #define _RTW_WLAN_UTIL_C_
 
+#include <linux/ieee80211.h>
+
 #include <osdep_service.h>
 #include <drv_types.h>
 #include <wifi.h>
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index fda169d..5dc0b90 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -868,7 +868,7 @@
 			/* check if enable ampdu */
 			if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
 				if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
-				pattrib->ampdu_en = true;
+					pattrib->ampdu_en = true;
 			}
 
 			/* re-check if enable ampdu by BA_starting_seqctrl */
@@ -1026,22 +1026,22 @@
 		/* adding icv, if necessary... */
 		if (pattrib->iv_len) {
 			switch (pattrib->encrypt) {
-				case _WEP40_:
-				case _WEP104_:
-					WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
-					break;
-				case _TKIP_:
-					if (bmcst)
-						TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
-					else
-						TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
-					break;
-				case _AES_:
-					if (bmcst)
-						AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
-					else
-						AES_IV(pattrib->iv, psta->dot11txpn, 0);
-					break;
+			case _WEP40_:
+			case _WEP104_:
+				WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+				break;
+			case _TKIP_:
+				if (bmcst)
+					TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+				else
+					TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
+				break;
+			case _AES_:
+				if (bmcst)
+					AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+				else
+					AES_IV(pattrib->iv, psta->dot11txpn, 0);
+				break;
 			}
 
 			memcpy(pframe, pattrib->iv, pattrib->iv_len);
@@ -1769,7 +1769,7 @@
 	int bmcst = IS_MCAST(pattrib->ra);
 
 	if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == false)
-	    return ret;
+		return ret;
 
 	if (pattrib->psta)
 		psta = pattrib->psta;
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
index 15a1765..2633a13 100644
--- a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -659,11 +659,11 @@
 {
 	struct odm_ra_info *pRaInfo = NULL;
 
+	if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+		return;
 	ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
 		     ("macid =%d RateID = 0x%x RateMask = 0x%x SGIEnable =%d\n",
 		     macid, RateID, RateMask, SGIEnable));
-	if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
-		return;
 
 	pRaInfo = &(dm_odm->RAInfo[macid]);
 	pRaInfo->RateID = RateID;
@@ -676,10 +676,10 @@
 {
 	struct odm_ra_info *pRaInfo = NULL;
 
-	ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
-		     (" macid =%d Rssi =%d\n", macid, Rssi));
 	if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
 		return;
+	ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+		     (" macid =%d Rssi =%d\n", macid, Rssi));
 
 	pRaInfo = &(dm_odm->RAInfo[macid]);
 	pRaInfo->RssiStaRA = Rssi;
diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c
index 8eb2b39..9c7e626 100644
--- a/drivers/staging/rtl8188eu/hal/bb_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c
@@ -24,9 +24,9 @@
 
 #define read_next_pair(array, v1, v2, i)		\
 	 do {						\
-		 i += 2;				\
-		 v1 = array[i];				\
-		 v2 = array[i+1];			\
+		i += 2;					\
+		v1 = array[i];				\
+		v2 = array[i+1];			\
 	 } while (0)
 
 
diff --git a/drivers/staging/rtl8188eu/hal/hal_com.c b/drivers/staging/rtl8188eu/hal/hal_com.c
index 170e3de..38e9fdc 100644
--- a/drivers/staging/rtl8188eu/hal/hal_com.c
+++ b/drivers/staging/rtl8188eu/hal/hal_com.c
@@ -31,18 +31,7 @@
 	uint cnt = 0;
 	char buf[128];
 
-	if (IS_81XXC(chip_vers)) {
-		cnt += sprintf((buf+cnt), "Chip Version Info: %s_",
-			       IS_92C_SERIAL(chip_vers) ?
-			       "CHIP_8192C" : "CHIP_8188C");
-	} else if (IS_92D(chip_vers)) {
-		cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8192D_");
-	} else if (IS_8723_SERIES(chip_vers)) {
-		cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8723A_");
-	} else if (IS_8188E(chip_vers)) {
-		cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8188E_");
-	}
-
+	cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8188E_");
 	cnt += sprintf((buf+cnt), "%s_", IS_NORMAL_CHIP(chip_vers) ?
 		       "Normal_Chip" : "Test_Chip");
 	cnt += sprintf((buf+cnt), "%s_", IS_CHIP_VENDOR_TSMC(chip_vers) ?
@@ -60,18 +49,8 @@
 	else
 		cnt += sprintf((buf+cnt), "UNKNOWN_CUT(%d)_",
 			       chip_vers.CUTVersion);
-
-	if (IS_1T1R(chip_vers))
-		cnt += sprintf((buf+cnt), "1T1R_");
-	else if (IS_1T2R(chip_vers))
-		cnt += sprintf((buf+cnt), "1T2R_");
-	else if (IS_2T2R(chip_vers))
-		cnt += sprintf((buf+cnt), "2T2R_");
-	else
-		cnt += sprintf((buf+cnt), "UNKNOWN_RFTYPE(%d)_",
-			       chip_vers.RFType);
-
-	cnt += sprintf((buf+cnt), "RomVer(%d)\n", chip_vers.ROMVer);
+	cnt += sprintf((buf+cnt), "1T1R_");
+	cnt += sprintf((buf+cnt), "RomVer(0)\n");
 
 	pr_info("%s", buf);
 }
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index 5edb5c4..85c17ef 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -131,14 +131,6 @@
 		adapt->HalFunc.GetHwRegHandler(adapt, variable, val);
 }
 
-u8 rtw_hal_set_def_var(struct adapter *adapt, enum hal_def_variable var,
-		      void *val)
-{
-	if (adapt->HalFunc.SetHalDefVarHandler)
-		return adapt->HalFunc.SetHalDefVarHandler(adapt, var, val);
-	return _FAIL;
-}
-
 u8 rtw_hal_get_def_var(struct adapter *adapt,
 		       enum hal_def_variable var, void *val)
 {
@@ -156,22 +148,6 @@
 						      val1, set);
 }
 
-void rtw_hal_enable_interrupt(struct adapter *adapt)
-{
-	if (adapt->HalFunc.enable_interrupt)
-		adapt->HalFunc.enable_interrupt(adapt);
-	else
-		DBG_88E("%s: HalFunc.enable_interrupt is NULL!\n", __func__);
-}
-
-void rtw_hal_disable_interrupt(struct adapter *adapt)
-{
-	if (adapt->HalFunc.disable_interrupt)
-		adapt->HalFunc.disable_interrupt(adapt);
-	else
-		DBG_88E("%s: HalFunc.disable_interrupt is NULL!\n", __func__);
-}
-
 u32 rtw_hal_inirp_init(struct adapter *adapt)
 {
 	u32 rst = _FAIL;
@@ -269,14 +245,6 @@
 	return data;
 }
 
-void rtw_hal_write_rfreg(struct adapter *adapt, enum rf_radio_path rfpath,
-			 u32 regaddr, u32 bitmask, u32 data)
-{
-	if (adapt->HalFunc.write_rfreg)
-		adapt->HalFunc.write_rfreg(adapt, rfpath, regaddr,
-					      bitmask, data);
-}
-
 void rtw_hal_set_bwmode(struct adapter *adapt,
 			enum ht_channel_width bandwidth, u8 offset)
 {
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 28b5e7b..710fdc3 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -1170,13 +1170,10 @@
 	}
 
 	for (i = 0; i < sta_cnt; i++) {
-		if (PWDB_rssi[i] != (0)) {
-			if (pHalData->fw_ractrl) {
-				/*  Report every sta's RSSI to FW */
-			} else {
-				ODM_RA_SetRSSI_8188E(
-				&(pHalData->odmpriv), (PWDB_rssi[i]&0xFF), (u8)((PWDB_rssi[i]>>16) & 0xFF));
-			}
+		if (PWDB_rssi[i] != 0) {
+			ODM_RA_SetRSSI_8188E(&pHalData->odmpriv,
+					     PWDB_rssi[i] & 0xFF,
+					     (PWDB_rssi[i] >> 16) & 0xFF);
 		}
 	}
 
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 0970927..38845d1 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -102,7 +102,7 @@
 		}
 	}
 	rtl88eu_dm_txpower_track_adjust(&hal_data->odmpriv, 1, &direction,
-				        &pwrtrac_value);
+					&pwrtrac_value);
 
 	if (direction == 1) {
 		/*  Increase TX power */
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index 455ecdc..954cade 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -295,7 +295,7 @@
 			break;
 		}
 
-		if (rtstatus != true)
+		if (!rtstatus)
 			return false;
 	}
 
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 86347f2..0a62bfa 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -127,27 +127,6 @@
 	return ret;
 }
 
-u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
-{
-	u8 buf[3];
-	u8 res = _SUCCESS;
-	struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
-
-	if (haldata->fw_ractrl) {
-
-		memset(buf, 0, 3);
-		put_unaligned_le32(mask, buf);
-
-		FillH2CCmd_88E(adapt, H2C_DM_MACID_CFG, 3, buf);
-	} else {
-		DBG_88E("==>%s fw dont support RA\n", __func__);
-		res = _FAIL;
-	}
-
-
-	return res;
-}
-
 /* bitmap[0:27] = tx_rate_bitmap */
 /* bitmap[28:31]= Rate Adaptive id */
 /* arg[0:4] = macid */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 7904d22..a6295ca 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -128,7 +128,7 @@
 	padapter->HalData = NULL;
 }
 
-static struct HAL_VERSION ReadChipVersion8188E(struct adapter *padapter)
+static void ReadChipVersion8188E(struct adapter *padapter)
 {
 	u32				value32;
 	struct HAL_VERSION		ChipVersion;
@@ -137,41 +137,17 @@
 	pHalData = GET_HAL_DATA(padapter);
 
 	value32 = usb_read32(padapter, REG_SYS_CFG);
-	ChipVersion.ICType = CHIP_8188E;
 	ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
-
-	ChipVersion.RFType = RF_TYPE_1T1R;
 	ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
 	ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK)>>CHIP_VER_RTL_SHIFT; /*  IC version (CUT) */
 
-	/*  For regulator mode. by tynli. 2011.01.14 */
-	pHalData->RegulatorMode = ((value32 & TRP_BT_EN) ? RT_LDO_REGULATOR : RT_SWITCHING_REGULATOR);
-
-	ChipVersion.ROMVer = 0;	/*  ROM code version. */
-
 	dump_chip_info(ChipVersion);
 
 	pHalData->VersionID = ChipVersion;
-
-	if (IS_1T2R(ChipVersion)) {
-		pHalData->rf_type = RF_1T2R;
-		pHalData->NumTotalRFPath = 2;
-	} else if (IS_2T2R(ChipVersion)) {
-		pHalData->rf_type = RF_2T2R;
-		pHalData->NumTotalRFPath = 2;
-	} else{
-		pHalData->rf_type = RF_1T1R;
-		pHalData->NumTotalRFPath = 1;
-	}
+	pHalData->rf_type = RF_1T1R;
+	pHalData->NumTotalRFPath = 1;
 
 	MSG_88E("RF_Type is %x!!\n", pHalData->rf_type);
-
-	return ChipVersion;
-}
-
-static void rtl8188e_read_chip_version(struct adapter *padapter)
-{
-	ReadChipVersion8188E(padapter);
 }
 
 static void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
@@ -220,7 +196,7 @@
 
 	pHalFunc->dm_init = &rtl8188e_init_dm_priv;
 
-	pHalFunc->read_chip_version = &rtl8188e_read_chip_version;
+	pHalFunc->read_chip_version = &ReadChipVersion8188E;
 
 	pHalFunc->set_bwmode_handler = &phy_set_bw_mode;
 	pHalFunc->set_channel_handler = &phy_sw_chnl;
@@ -232,7 +208,6 @@
 	pHalFunc->AntDivBeforeLinkHandler = &AntDivBeforeLink8188E;
 	pHalFunc->AntDivCompareHandler = &AntDivCompare8188E;
 	pHalFunc->read_rfreg = &phy_query_rf_reg;
-	pHalFunc->write_rfreg = &phy_set_rf_reg;
 
 	pHalFunc->sreset_init_value = &sreset_init_value;
 	pHalFunc->sreset_get_wifi_status  = &sreset_get_wifi_status;
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 8726222..1ef878f 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -743,19 +743,16 @@
 	if (Adapter->registrypriv.mp_mode == 1) {
 		_InitRxSetting(Adapter);
 		Adapter->bFWReady = false;
-		haldata->fw_ractrl = false;
 	} else {
 		status = rtl88eu_download_fw(Adapter);
 
 		if (status) {
 			DBG_88E("%s: Download Firmware failed!!\n", __func__);
 			Adapter->bFWReady = false;
-			haldata->fw_ractrl = false;
 			return status;
 		} else {
 			RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializeadapt8192CSdio(): Download Firmware Success!!\n"));
 			Adapter->bFWReady = true;
-			haldata->fw_ractrl = false;
 		}
 	}
 	rtl8188e_InitializeFirmwareVars(Adapter);
@@ -1703,7 +1700,7 @@
 
 			/*  Forece leave RF low power mode for 1T1R to prevent conficting setting in Fw power */
 			/*  saving sequence. 2010.06.07. Added by tynli. Suggested by SD3 yschang. */
-			if ((psmode != PS_MODE_ACTIVE) && (!IS_92C_SERIAL(haldata->VersionID)))
+			if (psmode != PS_MODE_ACTIVE)
 				ODM_RF_Saving(podmpriv, true);
 			rtl8188e_set_FwPwrMode_cmd(Adapter, psmode);
 		}
@@ -1961,75 +1958,6 @@
 	return bResult;
 }
 
-/*  */
-/*	Description: */
-/*		Change default setting of specified variable. */
-/*  */
-static u8 SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
-{
-	struct hal_data_8188e	*haldata = GET_HAL_DATA(Adapter);
-	u8 bResult = _SUCCESS;
-
-	switch (eVariable) {
-	case HAL_DEF_DBG_DM_FUNC:
-		{
-			u8 dm_func = *((u8 *)pValue);
-			struct odm_dm_struct *podmpriv = &haldata->odmpriv;
-
-			if (dm_func == 0) { /* disable all dynamic func */
-				podmpriv->SupportAbility = DYNAMIC_FUNC_DISABLE;
-				DBG_88E("==> Disable all dynamic function...\n");
-			} else if (dm_func == 1) {/* disable DIG */
-				podmpriv->SupportAbility  &= (~DYNAMIC_BB_DIG);
-				DBG_88E("==> Disable DIG...\n");
-			} else if (dm_func == 2) {/* disable High power */
-				podmpriv->SupportAbility  &= (~DYNAMIC_BB_DYNAMIC_TXPWR);
-			} else if (dm_func == 3) {/* disable tx power tracking */
-				podmpriv->SupportAbility  &= (~DYNAMIC_RF_CALIBRATION);
-				DBG_88E("==> Disable tx power tracking...\n");
-			} else if (dm_func == 5) {/* disable antenna diversity */
-				podmpriv->SupportAbility  &= (~DYNAMIC_BB_ANT_DIV);
-			} else if (dm_func == 6) {/* turn on all dynamic func */
-				if (!(podmpriv->SupportAbility  & DYNAMIC_BB_DIG)) {
-					struct rtw_dig *pDigTable = &podmpriv->DM_DigTable;
-					pDigTable->CurIGValue = usb_read8(Adapter, 0xc50);
-				}
-				podmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
-				DBG_88E("==> Turn on all dynamic function...\n");
-			}
-		}
-		break;
-	case HAL_DEF_DBG_DUMP_RXPKT:
-		haldata->bDumpRxPkt = *((u8 *)pValue);
-		break;
-	case HAL_DEF_DBG_DUMP_TXPKT:
-		haldata->bDumpTxPkt = *((u8 *)pValue);
-		break;
-	case HW_DEF_FA_CNT_DUMP:
-		{
-			u8 bRSSIDump = *((u8 *)pValue);
-			struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
-			if (bRSSIDump)
-				dm_ocm->DebugComponents	=	ODM_COMP_DIG|ODM_COMP_FA_CNT;
-			else
-				dm_ocm->DebugComponents	= 0;
-		}
-		break;
-	case HW_DEF_ODM_DBG_FLAG:
-		{
-			u64	DebugComponents = *((u64 *)pValue);
-			struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
-			dm_ocm->DebugComponents = DebugComponents;
-		}
-		break;
-	default:
-		bResult = _FAIL;
-		break;
-	}
-
-	return bResult;
-}
-
 static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
 {
 	u8 init_rate = 0;
@@ -2085,28 +2013,9 @@
 
 	init_rate = get_highest_rate_idx(mask)&0x3f;
 
-	if (haldata->fw_ractrl) {
-		u8 arg;
+	ODM_RA_UpdateRateInfo_8188E(&haldata->odmpriv, mac_id,
+				    raid, mask, shortGIrate);
 
-		arg = mac_id & 0x1f;/* MACID */
-		arg |= BIT(7);
-		if (shortGIrate)
-			arg |= BIT(5);
-		mask |= ((raid << 28) & 0xf0000000);
-		DBG_88E("update raid entry, mask=0x%x, arg=0x%x\n", mask, arg);
-		psta->ra_mask = mask;
-		mask |= ((raid << 28) & 0xf0000000);
-
-		/* to do ,for 8188E-SMIC */
-		rtl8188e_set_raid_cmd(adapt, mask);
-	} else {
-		ODM_RA_UpdateRateInfo_8188E(&(haldata->odmpriv),
-				mac_id,
-				raid,
-				mask,
-				shortGIrate
-				);
-	}
 	/* set ra_id */
 	psta->raid = raid;
 	psta->init_rate = init_rate;
@@ -2156,7 +2065,6 @@
 	pwrctrlpriv = &adapt->pwrctrlpriv;
 
 	/* init default value */
-	haldata->fw_ractrl = false;
 	if (!pwrctrlpriv->bkeepfwalive)
 		haldata->LastHMEBoxNum = 0;
 
@@ -2200,7 +2108,6 @@
 	halfunc->SetHwRegHandler = &SetHwReg8188EU;
 	halfunc->GetHwRegHandler = &GetHwReg8188EU;
 	halfunc->GetHalDefVarHandler = &GetHalDefVar8188EUsb;
-	halfunc->SetHalDefVarHandler = &SetHalDefVar8188EUsb;
 
 	halfunc->UpdateRAMaskHandler = &UpdateHalRAMask8188EUsb;
 	halfunc->SetBeaconRelatedRegistersHandler = &SetBeaconRelatedRegisters8188EUsb;
diff --git a/drivers/staging/rtl8188eu/include/HalVerDef.h b/drivers/staging/rtl8188eu/include/HalVerDef.h
index 97047cf..56b4ff0 100644
--- a/drivers/staging/rtl8188eu/include/HalVerDef.h
+++ b/drivers/staging/rtl8188eu/include/HalVerDef.h
@@ -20,20 +20,6 @@
 #ifndef __HAL_VERSION_DEF_H__
 #define __HAL_VERSION_DEF_H__
 
-enum HAL_IC_TYPE {
-	CHIP_8192S	=	0,
-	CHIP_8188C	=	1,
-	CHIP_8192C	=	2,
-	CHIP_8192D	=	3,
-	CHIP_8723A	=	4,
-	CHIP_8188E	=	5,
-	CHIP_8881A	=	6,
-	CHIP_8812A	=	7,
-	CHIP_8821A	=	8,
-	CHIP_8723B	=	9,
-	CHIP_8192E	=	10,
-};
-
 enum HAL_CHIP_TYPE {
 	TEST_CHIP	=	0,
 	NORMAL_CHIP	=	1,
@@ -55,48 +41,20 @@
 	CHIP_VENDOR_UMC		=	1,
 };
 
-enum HAL_RF_TYPE {
-	RF_TYPE_1T1R	=	0,
-	RF_TYPE_1T2R	=	1,
-	RF_TYPE_2T2R	=	2,
-	RF_TYPE_2T3R	=	3,
-	RF_TYPE_2T4R	=	4,
-	RF_TYPE_3T3R	=	5,
-	RF_TYPE_3T4R	=	6,
-	RF_TYPE_4T4R	=	7,
-};
-
 struct HAL_VERSION {
-	enum HAL_IC_TYPE	ICType;
 	enum HAL_CHIP_TYPE	ChipType;
 	enum HAL_CUT_VERSION	CUTVersion;
 	enum HAL_VENDOR		VendorType;
-	enum HAL_RF_TYPE	RFType;
-	u8			ROMVer;
 };
 
 /*  Get element */
-#define GET_CVID_IC_TYPE(version)	(((version).ICType))
 #define GET_CVID_CHIP_TYPE(version)	(((version).ChipType))
-#define GET_CVID_RF_TYPE(version)	(((version).RFType))
 #define GET_CVID_MANUFACTUER(version)	(((version).VendorType))
 #define GET_CVID_CUT_VERSION(version)	(((version).CUTVersion))
-#define GET_CVID_ROM_VERSION(version)	(((version).ROMVer) & ROM_VERSION_MASK)
 
 /* Common Macro. -- */
 /* HAL_VERSION VersionID */
 
-/*  HAL_IC_TYPE_E */
-#define IS_81XXC(version)				\
-	(((GET_CVID_IC_TYPE(version) == CHIP_8192C) ||	\
-	 (GET_CVID_IC_TYPE(version) == CHIP_8188C)) ? true : false)
-#define IS_8723_SERIES(version)				\
-	((GET_CVID_IC_TYPE(version) == CHIP_8723A) ? true : false)
-#define IS_92D(version)					\
-	((GET_CVID_IC_TYPE(version) == CHIP_8192D) ? true : false)
-#define IS_8188E(version)				\
-	((GET_CVID_IC_TYPE(version) == CHIP_8188E) ? true : false)
-
 /* HAL_CHIP_TYPE_E */
 #define IS_TEST_CHIP(version)				\
 	((GET_CVID_CHIP_TYPE(version) == TEST_CHIP) ? true : false)
@@ -122,46 +80,4 @@
 #define IS_CHIP_VENDOR_UMC(version)			\
 	((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_UMC) ? true : false)
 
-/* HAL_RF_TYPE_E */
-#define IS_1T1R(version)				\
-	((GET_CVID_RF_TYPE(version) == RF_TYPE_1T1R) ? true : false)
-#define IS_1T2R(version)				\
-	((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R) ? true : false)
-#define IS_2T2R(version)				\
-	((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R) ? true : false)
-
-/* Chip version Macro. -- */
-#define IS_81XXC_TEST_CHIP(version)			\
-	((IS_81XXC(version) && (!IS_NORMAL_CHIP(version))) ? true : false)
-
-#define IS_92C_SERIAL(version)				\
-	((IS_81XXC(version) && IS_2T2R(version)) ? true : false)
-#define IS_81xxC_VENDOR_UMC_A_CUT(version)		\
-	(IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ?	\
-	(IS_A_CUT(version) ? true : false) : false) : false)
-#define IS_81xxC_VENDOR_UMC_B_CUT(version)		\
-	(IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ?	\
-	(IS_B_CUT(version) ? true : false) : false) : false)
-#define IS_81xxC_VENDOR_UMC_C_CUT(version)		\
-	(IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
-	 (IS_C_CUT(version) ? true : false) : false) : false)
-
-#define IS_NORMAL_CHIP92D(version)			\
-	((IS_92D(version)) ?				\
-	((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false) : false)
-
-#define IS_92D_SINGLEPHY(version)			\
-	((IS_92D(version)) ? (IS_2T2R(version) ? true : false) : false)
-#define IS_92D_C_CUT(version)				\
-	((IS_92D(version)) ? (IS_C_CUT(version) ? true : false) : false)
-#define IS_92D_D_CUT(version)				\
-	((IS_92D(version)) ? (IS_D_CUT(version) ? true : false) : false)
-#define IS_92D_E_CUT(version)				\
-	((IS_92D(version)) ? (IS_E_CUT(version) ? true : false) : false)
-
-#define IS_8723A_A_CUT(version)				\
-	((IS_8723_SERIES(version)) ? (IS_A_CUT(version) ? true : false) : false)
-#define IS_8723A_B_CUT(version)				\
-	((IS_8723_SERIES(version)) ? (IS_B_CUT(version) ? true : false) : false)
-
 #endif
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 3b476d8..e73c634 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -171,8 +171,6 @@
 
 	void	(*read_adapter_info)(struct adapter *padapter);
 
-	void	(*enable_interrupt)(struct adapter *padapter);
-	void	(*disable_interrupt)(struct adapter *padapter);
 	s32	(*interrupt_handler)(struct adapter *padapter);
 
 	void	(*set_bwmode_handler)(struct adapter *padapter,
@@ -190,9 +188,6 @@
 	u8	(*GetHalDefVarHandler)(struct adapter *padapter,
 				       enum hal_def_variable eVariable,
 				       void *pValue);
-	u8	(*SetHalDefVarHandler)(struct adapter *padapter,
-				       enum hal_def_variable eVariable,
-				       void *pValue);
 
 	void	(*SetHalODMVarHandler)(struct adapter *padapter,
 				       enum hal_odm_variable eVariable,
@@ -216,9 +211,6 @@
 	u32	(*read_rfreg)(struct adapter *padapter,
 			      enum rf_radio_path eRFPath, u32 RegAddr,
 			      u32 BitMask);
-	void	(*write_rfreg)(struct adapter *padapter,
-			       enum rf_radio_path eRFPath, u32 RegAddr,
-			       u32 BitMask, u32 Data);
 
 	void (*sreset_init_value)(struct adapter *padapter);
 	u8 (*sreset_get_wifi_status)(struct adapter *padapter);
@@ -267,8 +259,6 @@
 void rtw_hal_read_chip_info(struct adapter *padapter);
 void rtw_hal_read_chip_version(struct adapter *padapter);
 
-u8 rtw_hal_set_def_var(struct adapter *padapter,
-		       enum hal_def_variable eVariable, void *pValue);
 u8 rtw_hal_get_def_var(struct adapter *padapter,
 		       enum hal_def_variable eVariable, void *pValue);
 
@@ -276,9 +266,6 @@
 			 enum hal_odm_variable eVariable, void *pValue1,
 			 bool bSet);
 
-void rtw_hal_enable_interrupt(struct adapter *padapter);
-void rtw_hal_disable_interrupt(struct adapter *padapter);
-
 u32	rtw_hal_inirp_init(struct adapter *padapter);
 u32	rtw_hal_inirp_deinit(struct adapter *padapter);
 
@@ -300,9 +287,6 @@
 
 u32	rtw_hal_read_rfreg(struct adapter *padapter, enum rf_radio_path eRFPath,
 			   u32 RegAddr, u32 BitMask);
-void	rtw_hal_write_rfreg(struct adapter *padapter,
-			    enum rf_radio_path eRFPath, u32 RegAddr,
-			    u32 BitMask, u32 Data);
 
 void	rtw_hal_set_bwmode(struct adapter *padapter,
 			   enum ht_channel_width Bandwidth, u8 Offset);
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index b129ad1..6400f75 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -477,63 +477,9 @@
 #define WLAN_GET_SEQ_FRAG(seq) ((seq) & RTW_IEEE80211_SCTL_FRAG)
 #define WLAN_GET_SEQ_SEQ(seq)  ((seq) & RTW_IEEE80211_SCTL_SEQ)
 
-/* Authentication algorithms */
-#define WLAN_AUTH_OPEN 0
-#define WLAN_AUTH_SHARED_KEY 1
-
-#define WLAN_AUTH_CHALLENGE_LEN 128
-
-#define WLAN_CAPABILITY_BSS (1<<0)
-#define WLAN_CAPABILITY_IBSS (1<<1)
-#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
-#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
-#define WLAN_CAPABILITY_PRIVACY (1<<4)
-#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
-#define WLAN_CAPABILITY_PBCC (1<<6)
-#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
-#define WLAN_CAPABILITY_SHORT_SLOT (1<<10)
-
 /* Non standard?  Not in <linux/ieee80211.h> */
 #define WLAN_REASON_EXPIRATION_CHK 65535
 
-/* Information Element IDs */
-#define WLAN_EID_SSID 0
-#define WLAN_EID_SUPP_RATES 1
-#define WLAN_EID_FH_PARAMS 2
-#define WLAN_EID_DS_PARAMS 3
-#define WLAN_EID_CF_PARAMS 4
-#define WLAN_EID_TIM 5
-#define WLAN_EID_IBSS_PARAMS 6
-#define WLAN_EID_CHALLENGE 16
-/* EIDs defined by IEEE 802.11h - START */
-#define WLAN_EID_PWR_CONSTRAINT 32
-#define WLAN_EID_PWR_CAPABILITY 33
-#define WLAN_EID_TPC_REQUEST 34
-#define WLAN_EID_TPC_REPORT 35
-#define WLAN_EID_SUPPORTED_CHANNELS 36
-#define WLAN_EID_CHANNEL_SWITCH 37
-#define WLAN_EID_MEASURE_REQUEST 38
-#define WLAN_EID_MEASURE_REPORT 39
-#define WLAN_EID_QUITE 40
-#define WLAN_EID_IBSS_DFS 41
-/* EIDs defined by IEEE 802.11h - END */
-#define WLAN_EID_ERP_INFO 42
-#define WLAN_EID_HT_CAP 45
-#define WLAN_EID_RSN 48
-#define WLAN_EID_EXT_SUPP_RATES 50
-#define WLAN_EID_MOBILITY_DOMAIN 54
-#define WLAN_EID_FAST_BSS_TRANSITION 55
-#define WLAN_EID_TIMEOUT_INTERVAL 56
-#define WLAN_EID_RIC_DATA 57
-#define WLAN_EID_HT_OPERATION 61
-#define WLAN_EID_SECONDARY_CHANNEL_OFFSET 62
-#define WLAN_EID_20_40_BSS_COEXISTENCE 72
-#define WLAN_EID_20_40_BSS_INTOLERANT 73
-#define WLAN_EID_OVERLAPPING_BSS_SCAN_PARAMS 74
-#define WLAN_EID_MMIE 76
-#define WLAN_EID_VENDOR_SPECIFIC 221
-#define WLAN_EID_GENERIC (WLAN_EID_VENDOR_SPECIFIC)
-
 #define IEEE80211_MGMT_HDR_LEN 24
 #define IEEE80211_DATA_HDR3_LEN 24
 #define IEEE80211_DATA_HDR4_LEN 30
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 00472e0..cf9ca68 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -123,7 +123,7 @@
 #define BIT35	0x0800000000
 #define BIT36	0x1000000000
 
-extern int RTW_STATUS_CODE(int error_code);
+int RTW_STATUS_CODE(int error_code);
 
 #define rtw_update_mem_stat(flag, sz) do {} while (0)
 u8 *_rtw_malloc(u32 sz);
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index 0809963..fdeb603 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -38,8 +38,7 @@
 int rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
 void rtw_free_recv_priv(struct recv_priv *precvpriv);
 
-int rtw_os_recv_resource_alloc(struct adapter *adapt,
-			       struct recv_frame *recvfr);
+void rtw_os_recv_resource_alloc(struct recv_frame *recvfr);
 
 int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
 
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
index 42b1f22..f813ce0 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
@@ -107,7 +107,6 @@
 /*  host message to firmware cmd */
 void rtl8188e_set_FwPwrMode_cmd(struct adapter *padapter, u8 Mode);
 void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *padapter, u8 mstatus);
-u8 rtl8188e_set_raid_cmd(struct adapter *padapter, u32 mask);
 void rtl8188e_Add_RateATid(struct adapter *padapter, u32 bitmap, u8 arg,
 			   u8 rssi_level);
 
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 7d8e022..cbad364 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -188,15 +188,8 @@
 
 #define EFUSE_PROTECT_BYTES_BANK	16
 
-/*  For RTL8723 regulator mode. */
-enum rt_regulator_mode {
-	RT_SWITCHING_REGULATOR = 0,
-	RT_LDO_REGULATOR = 1,
-};
-
 struct hal_data_8188e {
 	struct HAL_VERSION	VersionID;
-	enum rt_regulator_mode RegulatorMode; /*  switching regulator or LDO */
 	u16	CustomerID;
 	u8 *pfirmware;
 	u32 fwsize;
@@ -301,7 +294,6 @@
 	/* for host message to fw */
 	u8	LastHMEBoxNum;
 
-	u8	fw_ractrl;
 	u8	RegTxPause;
 	/*  Beacon function related global variable. */
 	u32	RegBcnCtrlVal;
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 8c7e8a3..4c99257 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -535,7 +535,8 @@
 struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr);
 struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue);
 
-void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue);
+void rtw_free_assoc_resources(struct adapter *adapter);
+void rtw_free_assoc_resources_locked(struct adapter *adapter);
 void rtw_indicate_disconnect(struct adapter *adapter);
 void rtw_indicate_connect(struct adapter *adapter);
 void rtw_indicate_scan_done(struct adapter *padapter, bool aborted);
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 2417809..9093a5f 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -457,9 +457,9 @@
 int init_mlme_ext_priv(struct adapter *adapter);
 int init_hw_mlme_ext(struct adapter *padapter);
 void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext);
-extern void init_mlme_ext_timer(struct adapter *padapter);
-extern void init_addba_retry_timer(struct adapter *adapt, struct sta_info *sta);
-extern struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
+void init_mlme_ext_timer(struct adapter *padapter);
+void init_addba_retry_timer(struct adapter *adapt, struct sta_info *sta);
+struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
 
 unsigned char networktype_to_raid(unsigned char network_type);
 u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int len);
@@ -554,7 +554,7 @@
 			  int cam_idx);
 
 void beacon_timing_control(struct adapter *padapter);
-extern u8 set_tx_beacon_cmd(struct adapter *padapter);
+u8 set_tx_beacon_cmd(struct adapter *padapter);
 unsigned int setup_beacon_frame(struct adapter *padapter,
 				unsigned char *beacon_frame);
 void update_mgnt_tx_rate(struct adapter *padapter, u8 rate);
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index aa1fd87..a493d4c 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -257,7 +257,6 @@
 void LPS_Enter(struct adapter *adapter);
 void LPS_Leave(struct adapter *adapter);
 
-void rtw_set_ips_deny(struct adapter *adapter, u32 ms);
 int _rtw_pwr_wakeup(struct adapter *adapter, u32 ips_defer_ms,
 		    const char *caller);
 #define rtw_pwr_wakeup(adapter)						\
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
index 9612490..d4e7832 100644
--- a/drivers/staging/rtl8188eu/include/sta_info.h
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -113,7 +113,6 @@
 
 	u8	raid;
 	u8	init_rate;
-	u32	ra_mask;
 	u8	wireless_mode;	/*  NETWORK_TYPE */
 	struct stainfo_stats sta_stats;
 
@@ -351,19 +350,19 @@
 	return x;
 }
 
-extern u32	_rtw_init_sta_priv(struct sta_priv *pstapriv);
-extern u32	_rtw_free_sta_priv(struct sta_priv *pstapriv);
+u32 _rtw_init_sta_priv(struct sta_priv *pstapriv);
+u32 _rtw_free_sta_priv(struct sta_priv *pstapriv);
 
 #define stainfo_offset_valid(offset) (offset < NUM_STA && offset >= 0)
 int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta);
 struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int off);
 
-extern struct sta_info *rtw_alloc_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
-extern u32	rtw_free_stainfo(struct adapter *adapt, struct sta_info *psta);
-extern void rtw_free_all_stainfo(struct adapter *adapt);
-extern struct sta_info *rtw_get_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
-extern u32 rtw_init_bcmc_stainfo(struct adapter *adapt);
-extern struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter);
-extern u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
+struct sta_info *rtw_alloc_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
+u32 rtw_free_stainfo(struct adapter *adapt, struct sta_info *psta);
+void rtw_free_all_stainfo(struct adapter *adapt);
+struct sta_info *rtw_get_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
+u32 rtw_init_bcmc_stainfo(struct adapter *adapt);
+struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter);
+u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
 
 #endif /* _STA_INFO_H_ */
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index a08a2e0..dba8af1 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -649,13 +649,6 @@
 #define IEEE80211_MAX_AMPDU_BUF 0x40
 
 
-/* Spatial Multiplexing Power Save Modes */
-#define WLAN_HT_CAP_SM_PS_STATIC	0
-#define WLAN_HT_CAP_SM_PS_DYNAMIC	1
-#define WLAN_HT_CAP_SM_PS_INVALID	2
-#define WLAN_HT_CAP_SM_PS_DISABLED	3
-
-
 #define OP_MODE_PURE                    0
 #define OP_MODE_MAY_BE_LEGACY_STAS      1
 #define OP_MODE_20MHZ_HT_STA_ASSOCED    2
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 38dba14..9695749 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -179,8 +179,8 @@
 
 	cap = le16_to_cpu(le_tmp);
 
-	if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_BSS)) {
-		if (cap & WLAN_CAPABILITY_BSS)
+	if (!WLAN_CAPABILITY_IS_STA_BSS(cap)) {
+		if (cap & WLAN_CAPABILITY_ESS)
 			iwe.u.mode = IW_MODE_MASTER;
 		else
 			iwe.u.mode = IW_MODE_ADHOC;
@@ -1871,7 +1871,7 @@
 			rtw_disassoc_cmd(padapter, 500, false);
 			DBG_88E("%s...call rtw_indicate_disconnect\n ", __func__);
 			rtw_indicate_disconnect(padapter);
-			rtw_free_assoc_resources(padapter, 1);
+			rtw_free_assoc_resources(padapter);
 		}
 		ret = wpa_set_auth_algs(dev, (u32)param->value);
 		break;
@@ -2485,16 +2485,13 @@
 
 static int rtw_hostapd_sta_flush(struct net_device *dev)
 {
-	int ret = 0;
 	struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
 
 	DBG_88E("%s\n", __func__);
 
 	flush_all_cam_entry(padapter);	/* clear CAM */
 
-	ret = rtw_sta_flush(padapter);
-
-	return ret;
+	return rtw_sta_flush(padapter);
 }
 
 static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
@@ -2666,7 +2663,8 @@
 
 	psta = rtw_get_stainfo(pstapriv, param->sta_addr);
 	if (psta) {
-		if ((psta->wpa_ie[0] == WLAN_EID_RSN) || (psta->wpa_ie[0] == WLAN_EID_GENERIC)) {
+		if (psta->wpa_ie[0] == WLAN_EID_RSN ||
+		    psta->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC) {
 			int wpa_ie_len;
 			int copy_len;
 
@@ -2809,7 +2807,6 @@
 
 static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *param, int len)
 {
-	int ret = 0;
 	struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
 	struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
 
@@ -2820,13 +2817,11 @@
 	    param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
 	    param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
 		return -EINVAL;
-	ret = rtw_acl_remove_sta(padapter, param->sta_addr);
-	return ret;
+	return rtw_acl_remove_sta(padapter, param->sta_addr);
 }
 
 static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *param, int len)
 {
-	int ret = 0;
 	struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
 	struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
 
@@ -2837,8 +2832,7 @@
 	    param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
 	    param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
 		return -EINVAL;
-	ret = rtw_acl_add_sta(padapter, param->sta_addr);
-	return ret;
+	return rtw_acl_add_sta(padapter, param->sta_addr);
 }
 
 static int rtw_ioctl_set_macaddr_acl(struct net_device *dev, struct ieee_param *param, int len)
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index a14e79f..2361bce 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -1175,7 +1175,7 @@
 		/* s2-2.  indicate disconnect to os */
 		rtw_indicate_disconnect(padapter);
 		/* s2-3. */
-		rtw_free_assoc_resources(padapter, 1);
+		rtw_free_assoc_resources(padapter);
 		/* s2-4. */
 		rtw_free_network_queue(padapter, true);
 		/*  Close LED */
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index 0570132..3ebb8b2 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -17,8 +17,6 @@
  *
  *
  ******************************************************************************/
-#define _RECV_OSDEP_C_
-
 #include <osdep_service.h>
 #include <drv_types.h>
 
@@ -29,26 +27,22 @@
 #include <usb_ops_linux.h>
 
 /* alloc os related resource in struct recv_frame */
-int rtw_os_recv_resource_alloc(struct adapter *padapter,
-			       struct recv_frame *precvframe)
+void rtw_os_recv_resource_alloc(struct recv_frame *precvframe)
 {
 	precvframe->pkt_newalloc = NULL;
 	precvframe->pkt = NULL;
-	return _SUCCESS;
 }
 
 /* alloc os related resource in struct recv_buf */
 int rtw_os_recvbuf_resource_alloc(struct adapter *padapter,
 				  struct recv_buf *precvbuf)
 {
-	int res = _SUCCESS;
-
-	precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
-	if (precvbuf->purb == NULL)
-		res = _FAIL;
 	precvbuf->pskb = NULL;
 	precvbuf->reuse = false;
-	return res;
+	precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
+	if (!precvbuf->purb)
+		return _FAIL;
+	return _SUCCESS;
 }
 
 void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index d0d4335..33bfe05 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -17,8 +17,8 @@
  *
  *
  ******************************************************************************/
-#define _HCI_INTF_C_
 
+#define pr_fmt(fmt) "R8188EU: " fmt
 #include <osdep_service.h>
 #include <drv_types.h>
 #include <recv_osdep.h>
@@ -55,7 +55,6 @@
 static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
 {
 	int	i;
-	int	status = _FAIL;
 	struct dvobj_priv *pdvobjpriv;
 	struct usb_host_config		*phost_conf;
 	struct usb_config_descriptor	*pconf_desc;
@@ -64,10 +63,9 @@
 	struct usb_endpoint_descriptor	*pendp_desc;
 	struct usb_device	*pusbd;
 
-
 	pdvobjpriv = kzalloc(sizeof(*pdvobjpriv), GFP_KERNEL);
 	if (pdvobjpriv == NULL)
-		goto exit;
+		return NULL;
 
 	pdvobjpriv->pusbintf = usb_intf;
 	pusbd = interface_to_usbdev(usb_intf);
@@ -115,20 +113,13 @@
 	mutex_init(&pdvobjpriv->usb_vendor_req_mutex);
 	pdvobjpriv->usb_vendor_req_buf = kzalloc(MAX_USB_IO_CTL_SIZE, GFP_KERNEL);
 
-	if (!pdvobjpriv->usb_vendor_req_buf)
-		goto free_dvobj;
-
-	usb_get_dev(pusbd);
-
-	status = _SUCCESS;
-
-free_dvobj:
-	if (status != _SUCCESS && pdvobjpriv) {
+	if (!pdvobjpriv->usb_vendor_req_buf) {
 		usb_set_intfdata(usb_intf, NULL);
 		kfree(pdvobjpriv);
-		pdvobjpriv = NULL;
+		return NULL;
 	}
-exit:
+	usb_get_dev(pusbd);
+
 	return pdvobjpriv;
 }
 
@@ -136,7 +127,6 @@
 {
 	struct dvobj_priv *dvobj = usb_get_intfdata(usb_intf);
 
-
 	usb_set_intfdata(usb_intf, NULL);
 	if (dvobj) {
 		/* Modify condition for 92DU DMDP 2010.11.18, by Thomas */
@@ -150,7 +140,7 @@
 				 * on sitesurvey for the first time when
 				 * device is up . Reset usb port for sitesurvey
 				 * fail issue. */
-				DBG_88E("usb attached..., try to reset usb device\n");
+				pr_debug("usb attached..., try to reset usb device\n");
 				usb_reset_device(interface_to_usbdev(usb_intf));
 			}
 		}
@@ -201,7 +191,7 @@
 	RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_dev_unload\n"));
 
 	if (padapter->bup) {
-		DBG_88E("===> rtw_dev_unload\n");
+		pr_debug("===> rtw_dev_unload\n");
 		padapter->bDriverStopped = true;
 		if (padapter->xmitpriv.ack_tx)
 			rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP);
@@ -224,7 +214,7 @@
 			 ("r871x_dev_unload():padapter->bup == false\n"));
 	}
 
-	DBG_88E("<=== rtw_dev_unload\n");
+	pr_debug("<=== rtw_dev_unload\n");
 
 	RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-rtw_dev_unload\n"));
 }
@@ -236,16 +226,13 @@
 	struct net_device *pnetdev = padapter->pnetdev;
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 	struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
-	int ret = 0;
 	u32 start_time = jiffies;
 
-
-	DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
+	pr_debug("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
 
 	if ((!padapter->bup) || (padapter->bDriverStopped) ||
 	    (padapter->bSurpriseRemoved)) {
-		DBG_88E("padapter->bup=%d bDriverStopped=%d bSurpriseRemoved = %d\n",
+		pr_debug("padapter->bup=%d bDriverStopped=%d bSurpriseRemoved = %d\n",
 			padapter->bup, padapter->bDriverStopped,
 			padapter->bSurpriseRemoved);
 		goto exit;
@@ -267,7 +254,7 @@
 
 	if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
 	    check_fwstate(pmlmepriv, _FW_LINKED)) {
-		DBG_88E("%s:%d %s(%pM), length:%d assoc_ssid.length:%d\n",
+		pr_debug("%s:%d %s(%pM), length:%d assoc_ssid.length:%d\n",
 			__func__, __LINE__,
 			pmlmepriv->cur_network.network.Ssid.Ssid,
 			pmlmepriv->cur_network.network.MacAddress,
@@ -279,7 +266,7 @@
 	/* s2-2.  indicate disconnect to os */
 	rtw_indicate_disconnect(padapter);
 	/* s2-3. */
-	rtw_free_assoc_resources(padapter, 1);
+	rtw_free_assoc_resources(padapter);
 	/* s2-4. */
 	rtw_free_network_queue(padapter, true);
 
@@ -293,10 +280,10 @@
 		rtw_indicate_disconnect(padapter);
 
 exit:
-	DBG_88E("<===  %s return %d.............. in %dms\n", __func__
-		, ret, rtw_get_passing_time_ms(start_time));
+	pr_debug("<===  %s .............. in %dms\n", __func__,
+		 rtw_get_passing_time_ms(start_time));
 
-	return ret;
+	return 0;
 }
 
 static int rtw_resume_process(struct adapter *padapter)
@@ -306,7 +293,7 @@
 	int ret = -1;
 	u32 start_time = jiffies;
 
-	DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
+	pr_debug("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
 
 	if (padapter) {
 		pnetdev = padapter->pnetdev;
@@ -319,7 +306,7 @@
 	rtw_reset_drv_sw(padapter);
 	pwrpriv->bkeepfwalive = false;
 
-	DBG_88E("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
+	pr_debug("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
 	if (pm_netdev_open(pnetdev, true) != 0)
 		goto exit;
 
@@ -334,10 +321,9 @@
 exit:
 	if (pwrpriv)
 		pwrpriv->bInSuspend = false;
-	DBG_88E("<===  %s return %d.............. in %dms\n", __func__,
+	pr_debug("<===  %s return %d.............. in %dms\n", __func__,
 		ret, rtw_get_passing_time_ms(start_time));
 
-
 	return ret;
 }
 
@@ -407,8 +393,8 @@
 		dvobj->pusbdev->do_remote_wakeup = 1;
 		pusb_intf->needs_remote_wakeup = 1;
 		device_init_wakeup(&pusb_intf->dev, 1);
-		DBG_88E("\n  padapter->pwrctrlpriv.bSupportRemoteWakeup~~~~~~\n");
-		DBG_88E("\n  padapter->pwrctrlpriv.bSupportRemoteWakeup~~~[%d]~~~\n",
+		pr_debug("\n  padapter->pwrctrlpriv.bSupportRemoteWakeup~~~~~~\n");
+		pr_debug("\n  padapter->pwrctrlpriv.bSupportRemoteWakeup~~~[%d]~~~\n",
 			device_may_wakeup(&pusb_intf->dev));
 	}
 #endif
@@ -416,13 +402,13 @@
 	/* 2012-07-11 Move here to prevent the 8723AS-VAU BT auto
 	 * suspend influence */
 	if (usb_autopm_get_interface(pusb_intf) < 0)
-			DBG_88E("can't get autopm:\n");
+			pr_debug("can't get autopm:\n");
 
 	/*  alloc dev name after read efuse. */
 	rtw_init_netdev_name(pnetdev, padapter->registrypriv.ifname);
 	rtw_macaddr_cfg(padapter->eeprompriv.mac_addr);
 	memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
-	DBG_88E("MAC Address from pnetdev->dev_addr =  %pM\n",
+	pr_debug("MAC Address from pnetdev->dev_addr =  %pM\n",
 		pnetdev->dev_addr);
 
 	/* step 6. Tell the network stack we exist */
@@ -431,7 +417,7 @@
 		goto free_hal_data;
 	}
 
-	DBG_88E("bDriverStopped:%d, bSurpriseRemoved:%d, bup:%d, hw_init_completed:%d\n"
+	pr_debug("bDriverStopped:%d, bSurpriseRemoved:%d, bup:%d, hw_init_completed:%d\n"
 		, padapter->bDriverStopped
 		, padapter->bSurpriseRemoved
 		, padapter->bup
@@ -475,7 +461,7 @@
 	rtw_cancel_all_timer(if1);
 
 	rtw_dev_unload(if1);
-	DBG_88E("+r871xu_dev_remove, hw_init_completed=%d\n",
+	pr_debug("+r871xu_dev_remove, hw_init_completed=%d\n",
 		if1->hw_init_completed);
 	rtw_free_drv_sw(if1);
 	if (pnetdev)
@@ -485,7 +471,6 @@
 static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device_id *pdid)
 {
 	struct adapter *if1 = NULL;
-	int status = _FAIL;
 	struct dvobj_priv *dvobj;
 
 	RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_init\n"));
@@ -500,19 +485,18 @@
 
 	if1 = rtw_usb_if1_init(dvobj, pusb_intf, pdid);
 	if (if1 == NULL) {
-		DBG_88E("rtw_init_primarystruct adapter Failed!\n");
+		pr_debug("rtw_init_primarystruct adapter Failed!\n");
 		goto free_dvobj;
 	}
 
 	RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-871x_drv - drv_init, success!\n"));
 
-	status = _SUCCESS;
+	return 0;
 
 free_dvobj:
-	if (status != _SUCCESS)
-		usb_dvobj_deinit(pusb_intf);
+	usb_dvobj_deinit(pusb_intf);
 exit:
-	return status == _SUCCESS ? 0 : -ENODEV;
+	return -ENODEV;
 }
 
 /*
@@ -524,8 +508,7 @@
 	struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
 	struct adapter *padapter = dvobj->if1;
 
-
-	DBG_88E("+rtw_dev_remove\n");
+	pr_debug("+rtw_dev_remove\n");
 	RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+dev_remove()\n"));
 
 	if (!pusb_intf->unregistering)
@@ -541,7 +524,7 @@
 	usb_dvobj_deinit(pusb_intf);
 
 	RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-dev_remove()\n"));
-	DBG_88E("-r871xu_dev_remove, done\n");
+	pr_debug("-r871xu_dev_remove, done\n");
 }
 
 static struct usb_driver rtl8188e_usb_drv = {
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index ef9da86..fcf9b3b 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -160,21 +160,6 @@
 	pDot11dInfo->State = DOT11D_STATE_LEARNED;
 }
 
-u8 DOT11D_GetMaxTxPwrInDbm(struct rtllib_device *dev, u8 Channel)
-{
-	struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
-	u8 MaxTxPwrInDbm = 255;
-
-	if (MAX_CHANNEL_NUMBER < Channel) {
-		netdev_info(dev->dev, "DOT11D_GetMaxTxPwrInDbm(): Invalid Channel\n");
-		return MaxTxPwrInDbm;
-	}
-	if (pDot11dInfo->channel_map[Channel])
-		MaxTxPwrInDbm = pDot11dInfo->MaxTxPwrDbmList[Channel];
-
-	return MaxTxPwrInDbm;
-}
-
 void DOT11D_ScanComplete(struct rtllib_device *dev)
 {
 	struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
@@ -190,27 +175,3 @@
 		break;
 	}
 }
-
-int ToLegalChannel(struct rtllib_device *dev, u8 channel)
-{
-	struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(dev);
-	u8 default_chn = 0;
-	u32 i;
-
-	for (i = 1; i <= MAX_CHANNEL_NUMBER; i++) {
-		if (pDot11dInfo->channel_map[i] > 0) {
-			default_chn = i;
-			break;
-		}
-	}
-
-	if (MAX_CHANNEL_NUMBER < channel) {
-		netdev_err(dev->dev, "%s(): Invalid Channel\n", __func__);
-		return default_chn;
-	}
-
-	if (pDot11dInfo->channel_map[channel] > 0)
-		return channel;
-
-	return default_chn;
-}
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index 69e0f8f..129ebed 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -79,7 +79,6 @@
 #define UPDATE_CIE_SRC(__pIeeeDev, __pTa)		\
 	cpMacAddr(GET_DOT11D_INFO(__pIeeeDev)->CountryIeSrcAddr, __pTa)
 
-#define CIE_WATCHDOG_TH 1
 #define GET_CIE_WATCHDOG(__pIeeeDev)				\
 	 (GET_DOT11D_INFO(__pIeeeDev)->CountryIeWatchdog)
 static inline void RESET_CIE_WATCHDOG(struct rtllib_device *__pIeeeDev)
@@ -88,16 +87,11 @@
 }
 #define UPDATE_CIE_WATCHDOG(__pIeeeDev) (++GET_CIE_WATCHDOG(__pIeeeDev))
 
-#define IS_DOT11D_STATE_DONE(__pIeeeDev)			\
-	(GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
-
 void dot11d_init(struct rtllib_device *dev);
 void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee);
 void Dot11d_Reset(struct rtllib_device *dev);
 void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
 			    u16 CoutryIeLen, u8 *pCoutryIe);
-u8 DOT11D_GetMaxTxPwrInDbm(struct rtllib_device *dev, u8 Channel);
 void DOT11D_ScanComplete(struct rtllib_device *dev);
-int ToLegalChannel(struct rtllib_device *dev, u8 channel);
 
 #endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
index d0b0830..dba4584 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
@@ -26,8 +26,6 @@
 #define		MAX_SILENT_RESET_RX_SLOT_NUM	10
 
 #define RX_MPDU_QUEUE				0
-#define RX_CMD_QUEUE				1
-
 
 enum rtl819x_loopback {
 	RTL819X_NO_LOOPBACK = 0,
@@ -36,11 +34,6 @@
 	RTL819X_CCK_LOOPBACK = 3,
 };
 
-
-#define RESET_DELAY_8185			20
-
-#define RT_IBSS_INT_MASKS (IMR_BcnInt | IMR_BcnInt | IMR_TBDOK | IMR_TBDER)
-
 #define DESC90_RATE1M				0x00
 #define DESC90_RATE2M				0x01
 #define DESC90_RATE5_5M				0x02
@@ -74,17 +67,6 @@
 #define SHORT_SLOT_TIME				9
 #define NON_SHORT_SLOT_TIME		20
 
-
-#define	MAX_LINES_HWCONFIG_TXT			1000
-#define MAX_BYTES_LINE_HWCONFIG_TXT		128
-
-#define SW_THREE_WIRE			0
-#define HW_THREE_WIRE			2
-
-#define BT_DEMO_BOARD			0
-#define BT_QA_BOARD				1
-#define BT_FPGA					2
-
 #define	RX_SMOOTH				20
 
 #define QSLT_BK					0x1
@@ -96,25 +78,14 @@
 #define	QSLT_MGNT				0x12
 #define	QSLT_CMD				0x13
 
-#define NUM_OF_FIRMWARE_QUEUE				10
-#define NUM_OF_PAGES_IN_FW					0x100
 #define NUM_OF_PAGE_IN_FW_QUEUE_BK		0x007
 #define NUM_OF_PAGE_IN_FW_QUEUE_BE		0x0aa
 #define NUM_OF_PAGE_IN_FW_QUEUE_VI		0x024
 #define NUM_OF_PAGE_IN_FW_QUEUE_VO		0x007
-#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA		0
-#define NUM_OF_PAGE_IN_FW_QUEUE_CMD		0x2
 #define NUM_OF_PAGE_IN_FW_QUEUE_MGNT		0x10
-#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH		0
 #define NUM_OF_PAGE_IN_FW_QUEUE_BCN		0x4
 #define NUM_OF_PAGE_IN_FW_QUEUE_PUB		0xd
 
-#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM	0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM	0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM	0x048
-#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM	0x026
-#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM	0x00
-
 #define APPLIED_RESERVED_QUEUE_IN_FW		0x80000000
 #define RSVD_FW_QUEUE_PAGE_BK_SHIFT		0x00
 #define RSVD_FW_QUEUE_PAGE_BE_SHIFT		0x08
@@ -197,23 +168,6 @@
 
 };
 
-
-#define TX_DESC_SIZE			32
-
-#define TX_DESC_CMD_SIZE	32
-
-
-#define TX_STATUS_DESC_SIZE	32
-
-#define TX_FWINFO_SIZE	8
-
-
-#define RX_DESC_SIZE	16
-
-#define RX_STATUS_DESC_SIZE	16
-
-#define RX_DRIVER_INFO_SIZE	8
-
 struct log_int_8190 {
 	u32	nIMR_COMDOK;
 	u32	nIMR_MGNTDOK;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index facc6f1..c8f25ad 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
@@ -22,55 +22,38 @@
 #include "r8192E_phy.h"
 #include "r8190P_rtl8256.h"
 
-void PHY_SetRF8256Bandwidth(struct net_device *dev,
-			    enum ht_channel_width Bandwidth)
+void rtl92e_set_bandwidth(struct net_device *dev,
+			  enum ht_channel_width Bandwidth)
 {
 	u8	eRFPath;
 	struct r8192_priv *priv = rtllib_priv(dev);
 
+	if (priv->card_8192_version != VERSION_8190_BD &&
+	    priv->card_8192_version != VERSION_8190_BE) {
+		netdev_warn(dev, "%s(): Unknown HW version.\n", __func__);
+		return;
+	}
+
 	for (eRFPath = 0; eRFPath < priv->NumTotalRFPath; eRFPath++) {
-		if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+		if (!rtl92e_is_legal_rf_path(dev, eRFPath))
 				continue;
 
 		switch (Bandwidth) {
 		case HT_CHANNEL_WIDTH_20:
-			if (priv->card_8192_version == VERSION_8190_BD ||
-			    priv->card_8192_version == VERSION_8190_BE) {
-				rtl8192_phy_SetRFReg(dev,
-						(enum rf90_radio_path)eRFPath,
-						0x0b, bMask12Bits, 0x100);
-				rtl8192_phy_SetRFReg(dev,
-						(enum rf90_radio_path)eRFPath,
-						0x2c, bMask12Bits, 0x3d7);
-				rtl8192_phy_SetRFReg(dev,
-						(enum rf90_radio_path)eRFPath,
-						0x0e, bMask12Bits, 0x021);
-
-			} else {
-				netdev_warn(dev, "%s(): Unknown HW version.\n",
-					    __func__);
-			}
-
+			rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
+					  0x0b, bMask12Bits, 0x100);
+			rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
+					  0x2c, bMask12Bits, 0x3d7);
+			rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
+					  0x0e, bMask12Bits, 0x021);
 			break;
 		case HT_CHANNEL_WIDTH_20_40:
-			if (priv->card_8192_version == VERSION_8190_BD ||
-			    priv->card_8192_version == VERSION_8190_BE) {
-				rtl8192_phy_SetRFReg(dev,
-						 (enum rf90_radio_path)eRFPath,
-						 0x0b, bMask12Bits, 0x300);
-				rtl8192_phy_SetRFReg(dev,
-						 (enum rf90_radio_path)eRFPath,
-						 0x2c, bMask12Bits, 0x3ff);
-				rtl8192_phy_SetRFReg(dev,
-						 (enum rf90_radio_path)eRFPath,
-						 0x0e, bMask12Bits, 0x0e1);
-
-			} else {
-				netdev_warn(dev, "%s(): Unknown HW version.\n",
-					    __func__);
-			}
-
-
+			rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
+					  0x0b, bMask12Bits, 0x300);
+			rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
+					  0x2c, bMask12Bits, 0x3ff);
+			rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath,
+					  0x0e, bMask12Bits, 0x0e1);
 			break;
 		default:
 			netdev_err(dev, "%s(): Unknown bandwidth: %#X\n",
@@ -81,15 +64,7 @@
 	}
 }
 
-bool PHY_RF8256_Config(struct net_device *dev)
-{
-	struct r8192_priv *priv = rtllib_priv(dev);
-
-	priv->NumTotalRFPath = RTL819X_TOTAL_RF_PATH;
-	return phy_RF8256_Config_ParaFile(dev);
-}
-
-bool phy_RF8256_Config_ParaFile(struct net_device *dev)
+bool rtl92e_config_rf(struct net_device *dev)
 {
 	u32	u4RegValue = 0;
 	u8	eRFPath;
@@ -102,9 +77,11 @@
 	u8	ConstRetryTimes = 5, RetryTimes = 5;
 	u8 ret = 0;
 
+	priv->NumTotalRFPath = RTL819X_TOTAL_RF_PATH;
+
 	for (eRFPath = (enum rf90_radio_path)RF90_PATH_A;
 	     eRFPath < priv->NumTotalRFPath; eRFPath++) {
-		if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+		if (!rtl92e_is_legal_rf_path(dev, eRFPath))
 				continue;
 
 		pPhyReg = &priv->PHYRegDef[eRFPath];
@@ -113,114 +90,63 @@
 		switch (eRFPath) {
 		case RF90_PATH_A:
 		case RF90_PATH_C:
-			u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs,
-							bRFSI_RFENV);
+			u4RegValue = rtl92e_get_bb_reg(dev, pPhyReg->rfintfs,
+						       bRFSI_RFENV);
 			break;
 		case RF90_PATH_B:
 		case RF90_PATH_D:
-			u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs,
-							bRFSI_RFENV<<16);
+			u4RegValue = rtl92e_get_bb_reg(dev, pPhyReg->rfintfs,
+						       bRFSI_RFENV<<16);
 			break;
 		}
 
-		rtl8192_setBBreg(dev, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
+		rtl92e_set_bb_reg(dev, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
 
-		rtl8192_setBBreg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
+		rtl92e_set_bb_reg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
 
-		rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2,
-				 b3WireAddressLength, 0x0);
-		rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2,
-				 b3WireDataLength, 0x0);
+		rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2,
+				  b3WireAddressLength, 0x0);
+		rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2,
+				  b3WireDataLength, 0x0);
 
-		rtl8192_phy_SetRFReg(dev, (enum rf90_radio_path) eRFPath, 0x0,
-				     bMask12Bits, 0xbf);
+		rtl92e_set_rf_reg(dev, (enum rf90_radio_path)eRFPath, 0x0,
+				  bMask12Bits, 0xbf);
 
-		rtStatus = rtl8192_phy_checkBBAndRF(dev, HW90_BLOCK_RF,
-						(enum rf90_radio_path)eRFPath);
+		rtStatus = rtl92e_check_bb_and_rf(dev, HW90_BLOCK_RF,
+						  (enum rf90_radio_path)eRFPath);
 		if (!rtStatus) {
 			netdev_err(dev, "%s(): Failed to check RF Path %d.\n",
 				   __func__, eRFPath);
-			goto phy_RF8256_Config_ParaFile_Fail;
+			goto fail;
 		}
 
 		RetryTimes = ConstRetryTimes;
 		RF3_Final_Value = 0;
-		switch (eRFPath) {
-		case RF90_PATH_A:
-			while (RF3_Final_Value != RegValueToBeCheck &&
-			       RetryTimes != 0) {
-				ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,
+		while (RF3_Final_Value != RegValueToBeCheck &&
+		       RetryTimes != 0) {
+			ret = rtl92e_config_rf_path(dev,
 						(enum rf90_radio_path)eRFPath);
-				RF3_Final_Value = rtl8192_phy_QueryRFReg(dev,
-						 (enum rf90_radio_path)eRFPath,
-						 RegOffSetToBeCheck,
-						 bMask12Bits);
-				RT_TRACE(COMP_RF,
-					 "RF %d %d register final value: %x\n",
-					 eRFPath, RegOffSetToBeCheck,
-					 RF3_Final_Value);
-				RetryTimes--;
-			}
-			break;
-		case RF90_PATH_B:
-			while (RF3_Final_Value != RegValueToBeCheck &&
-			       RetryTimes != 0) {
-				ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,
-						(enum rf90_radio_path)eRFPath);
-				RF3_Final_Value = rtl8192_phy_QueryRFReg(dev,
-						 (enum rf90_radio_path)eRFPath,
-						 RegOffSetToBeCheck,
-						 bMask12Bits);
-				RT_TRACE(COMP_RF,
-					 "RF %d %d register final value: %x\n",
-					 eRFPath, RegOffSetToBeCheck,
-					 RF3_Final_Value);
-				RetryTimes--;
-			}
-			break;
-		case RF90_PATH_C:
-			while (RF3_Final_Value != RegValueToBeCheck &&
-			       RetryTimes != 0) {
-				ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,
-						(enum rf90_radio_path)eRFPath);
-				RF3_Final_Value = rtl8192_phy_QueryRFReg(dev,
+			RF3_Final_Value = rtl92e_get_rf_reg(dev,
 						(enum rf90_radio_path)eRFPath,
 						RegOffSetToBeCheck,
 						bMask12Bits);
-				RT_TRACE(COMP_RF,
-					 "RF %d %d register final value: %x\n",
-					 eRFPath, RegOffSetToBeCheck,
-					 RF3_Final_Value);
-				RetryTimes--;
-			}
-			break;
-		case RF90_PATH_D:
-			while (RF3_Final_Value != RegValueToBeCheck &&
-			       RetryTimes != 0) {
-				ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,
-					       (enum rf90_radio_path)eRFPath);
-				RF3_Final_Value = rtl8192_phy_QueryRFReg(dev,
-					       (enum rf90_radio_path)eRFPath,
-					       RegOffSetToBeCheck, bMask12Bits);
-				RT_TRACE(COMP_RF,
-					 "RF %d %d register final value: %x\n",
-					 eRFPath, RegOffSetToBeCheck,
-					 RF3_Final_Value);
-				RetryTimes--;
-			}
-			break;
+			RT_TRACE(COMP_RF,
+				 "RF %d %d register final value: %x\n",
+				 eRFPath, RegOffSetToBeCheck,
+				 RF3_Final_Value);
+			RetryTimes--;
 		}
 
 		switch (eRFPath) {
 		case RF90_PATH_A:
 		case RF90_PATH_C:
-			rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV,
-					 u4RegValue);
+			rtl92e_set_bb_reg(dev, pPhyReg->rfintfs, bRFSI_RFENV,
+					  u4RegValue);
 			break;
 		case RF90_PATH_B:
 		case RF90_PATH_D:
-			rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV<<16,
-					 u4RegValue);
+			rtl92e_set_bb_reg(dev, pPhyReg->rfintfs,
+					  bRFSI_RFENV<<16, u4RegValue);
 			break;
 		}
 
@@ -228,7 +154,7 @@
 			netdev_err(dev,
 				   "%s(): Failed to initialize RF Path %d.\n",
 				   __func__, eRFPath);
-			goto phy_RF8256_Config_ParaFile_Fail;
+			goto fail;
 		}
 
 	}
@@ -236,11 +162,11 @@
 	RT_TRACE(COMP_PHY, "PHY Initialization Success\n");
 	return true;
 
-phy_RF8256_Config_ParaFile_Fail:
+fail:
 	return false;
 }
 
-void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8	powerlevel)
+void rtl92e_set_cck_tx_power(struct net_device *dev, u8 powerlevel)
 {
 	u32	TxAGC = 0;
 	struct r8192_priv *priv = rtllib_priv(dev);
@@ -254,11 +180,11 @@
 	}
 	if (TxAGC > 0x24)
 		TxAGC = 0x24;
-	rtl8192_setBBreg(dev, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC);
+	rtl92e_set_bb_reg(dev, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC);
 }
 
 
-void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel)
+void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	u32 writeVal, powerBase0, powerBase1, writeVal_tmp;
@@ -300,7 +226,7 @@
 		else
 			writeVal = (byte3 << 24) | (byte2 << 16) |
 				   (byte1 << 8) | byte0;
-		rtl8192_setBBreg(dev, RegOffset[index], 0x7f7f7f7f, writeVal);
+		rtl92e_set_bb_reg(dev, RegOffset[index], 0x7f7f7f7f, writeVal);
 	}
 
 }
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
index 64e831d..3e4363f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
@@ -21,11 +21,10 @@
 #define RTL8225H
 
 #define RTL819X_TOTAL_RF_PATH 2
-extern void PHY_SetRF8256Bandwidth(struct net_device *dev,
-				   enum ht_channel_width Bandwidth);
-extern bool PHY_RF8256_Config(struct net_device *dev);
-extern bool phy_RF8256_Config_ParaFile(struct net_device *dev);
-extern void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8	powerlevel);
-extern void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel);
+void rtl92e_set_bandwidth(struct net_device *dev,
+			  enum ht_channel_width Bandwidth);
+bool rtl92e_config_rf(struct net_device *dev);
+void rtl92e_set_cck_tx_power(struct net_device *dev, u8	powerlevel);
+void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel);
 
 #endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index ebd08a1..9ddabf5 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -21,11 +21,8 @@
 #include "r8192E_hw.h"
 #include "r8192E_cmdpkt.h"
 
-bool cmpk_message_handle_tx(
-	struct net_device *dev,
-	u8	*code_virtual_address,
-	u32	packettype,
-	u32	buffer_len)
+bool rtl92e_send_cmd_pkt(struct net_device *dev, u8 *code_virtual_address,
+			 u32 packettype, u32 buffer_len)
 {
 
 	bool				rt_status = true;
@@ -41,7 +38,7 @@
 	struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
 
 	RT_TRACE(COMP_CMDPKT, "%s(),buffer_len is %d\n", __func__, buffer_len);
-	firmware_init_param(dev);
+	rtl92e_init_fw_param(dev);
 	frag_threshold = pfirmware->cmdpacket_frag_thresold;
 
 	do {
@@ -84,7 +81,7 @@
 
 	} while (frag_offset < buffer_len);
 
-	write_nic_byte(dev, TPPoll, TPPoll_CQ);
+	rtl92e_writeb(dev, TPPoll, TPPoll_CQ);
 Failed:
 	return rt_status;
 }
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
index f714d51..2a8b165c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
@@ -19,7 +19,6 @@
 #ifndef R819XUSB_CMDPKT_H
 #define R819XUSB_CMDPKT_H
 
-extern bool cmpk_message_handle_tx(struct net_device *dev,
-				   u8 *codevirtualaddress, u32 packettype,
-				   u32 buffer_len);
+bool rtl92e_send_cmd_pkt(struct net_device *dev, u8 *codevirtualaddress,
+			 u32 packettype, u32 buffer_len);
 #endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index f6661bb..c28cabc 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -33,7 +33,7 @@
 static int WDCAPARA_ADD[] = {EDCAPARA_BE, EDCAPARA_BK, EDCAPARA_VI,
 			     EDCAPARA_VO};
 
-void rtl8192e_start_beacon(struct net_device *dev)
+void rtl92e_start_beacon(struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 	struct rtllib_network *net = &priv->rtllib->current_network;
@@ -41,21 +41,20 @@
 	u16 BcnCW = 6;
 	u16 BcnIFS = 0xf;
 
-	DMESG("Enabling beacon TX");
-	rtl8192_irq_disable(dev);
+	rtl92e_irq_disable(dev);
 
-	write_nic_word(dev, ATIMWND, 2);
+	rtl92e_writew(dev, ATIMWND, 2);
 
-	write_nic_word(dev, BCN_INTERVAL, net->beacon_interval);
-	write_nic_word(dev, BCN_DRV_EARLY_INT, 10);
-	write_nic_word(dev, BCN_DMATIME, 256);
+	rtl92e_writew(dev, BCN_INTERVAL, net->beacon_interval);
+	rtl92e_writew(dev, BCN_DRV_EARLY_INT, 10);
+	rtl92e_writew(dev, BCN_DMATIME, 256);
 
-	write_nic_byte(dev, BCN_ERR_THRESH, 100);
+	rtl92e_writeb(dev, BCN_ERR_THRESH, 100);
 
 	BcnTimeCfg |= BcnCW<<BCN_TCFG_CW_SHIFT;
 	BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
-	write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
-	rtl8192_irq_enable(dev);
+	rtl92e_writew(dev, BCN_TCFG, BcnTimeCfg);
+	rtl92e_irq_enable(dev);
 }
 
 static void rtl8192e_update_msr(struct net_device *dev)
@@ -64,7 +63,7 @@
 	u8 msr;
 	enum led_ctl_mode LedAction = LED_CTL_NO_LINK;
 
-	msr  = read_nic_byte(dev, MSR);
+	msr  = rtl92e_readb(dev, MSR);
 	msr &= ~MSR_LINK_MASK;
 
 	switch (priv->rtllib->iw_mode) {
@@ -91,26 +90,26 @@
 		break;
 	}
 
-	write_nic_byte(dev, MSR, msr);
+	rtl92e_writeb(dev, MSR, msr);
 	if (priv->rtllib->LedControlHandler)
 		priv->rtllib->LedControlHandler(dev, LedAction);
 }
 
-void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val)
+void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
 	switch (variable) {
 	case HW_VAR_BSSID:
-		write_nic_dword(dev, BSSIDR, ((u32 *)(val))[0]);
-		write_nic_word(dev, BSSIDR+2, ((u16 *)(val+2))[0]);
+		rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]);
+		rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]);
 		break;
 
 	case HW_VAR_MEDIA_STATUS:
 	{
 		enum rt_op_mode OpMode = *((enum rt_op_mode *)(val));
 		enum led_ctl_mode LedAction = LED_CTL_NO_LINK;
-		u8		btMsr = read_nic_byte(dev, MSR);
+		u8 btMsr = rtl92e_readb(dev, MSR);
 
 		btMsr &= 0xfc;
 
@@ -134,7 +133,7 @@
 			break;
 		}
 
-		write_nic_byte(dev, MSR, btMsr);
+		rtl92e_writeb(dev, MSR, btMsr);
 
 	}
 	break;
@@ -144,7 +143,7 @@
 		u32	RegRCR, Type;
 
 		Type = ((u8 *)(val))[0];
-		RegRCR = read_nic_dword(dev, RCR);
+		RegRCR = rtl92e_readl(dev, RCR);
 		priv->ReceiveConfig = RegRCR;
 
 		if (Type == true)
@@ -152,7 +151,7 @@
 		else if (Type == false)
 			RegRCR &= (~RCR_CBSSID);
 
-		write_nic_dword(dev, RCR, RegRCR);
+		rtl92e_writel(dev, RCR, RegRCR);
 		priv->ReceiveConfig = RegRCR;
 
 	}
@@ -161,7 +160,7 @@
 	case HW_VAR_SLOT_TIME:
 
 		priv->slot_time = val[0];
-		write_nic_byte(dev, SLOT_TIME, val[0]);
+		rtl92e_writeb(dev, SLOT_TIME, val[0]);
 
 		break;
 
@@ -173,12 +172,12 @@
 		regTmp = priv->basic_rate;
 		if (priv->short_preamble)
 			regTmp |= BRSR_AckShortPmb;
-		write_nic_dword(dev, RRSR, regTmp);
+		rtl92e_writel(dev, RRSR, regTmp);
 		break;
 	}
 
 	case HW_VAR_CPU_RST:
-		write_nic_dword(dev, CPU_GEN, ((u32 *)(val))[0]);
+		rtl92e_writel(dev, CPU_GEN, ((u32 *)(val))[0]);
 		break;
 
 	case HW_VAR_AC_PARAM:
@@ -194,7 +193,7 @@
 		u1bAIFS = qop->aifs[pAcParam] *
 			  ((mode&(IEEE_G|IEEE_N_24G)) ? 9 : 20) + aSifsTime;
 
-		dm_init_edca_turbo(dev);
+		rtl92e_dm_init_edca_turbo(dev);
 
 		u4bAcParam = (le16_to_cpu(qop->tx_op_limit[pAcParam]) <<
 			      AC_PARAM_TXOP_LIMIT_OFFSET) |
@@ -208,19 +207,19 @@
 			 __func__, eACI, u4bAcParam);
 		switch (eACI) {
 		case AC1_BK:
-			write_nic_dword(dev, EDCAPARA_BK, u4bAcParam);
+			rtl92e_writel(dev, EDCAPARA_BK, u4bAcParam);
 			break;
 
 		case AC0_BE:
-			write_nic_dword(dev, EDCAPARA_BE, u4bAcParam);
+			rtl92e_writel(dev, EDCAPARA_BE, u4bAcParam);
 			break;
 
 		case AC2_VI:
-			write_nic_dword(dev, EDCAPARA_VI, u4bAcParam);
+			rtl92e_writel(dev, EDCAPARA_VI, u4bAcParam);
 			break;
 
 		case AC3_VO:
-			write_nic_dword(dev, EDCAPARA_VO, u4bAcParam);
+			rtl92e_writel(dev, EDCAPARA_VO, u4bAcParam);
 			break;
 
 		default:
@@ -242,7 +241,7 @@
 		union aci_aifsn *pAciAifsn = (union aci_aifsn *) &
 					      (qos_parameters->aifs[0]);
 		u8 acm = pAciAifsn->f.acm;
-		u8 AcmCtrl = read_nic_byte(dev, AcmHwCtrl);
+		u8 AcmCtrl = rtl92e_readb(dev, AcmHwCtrl);
 
 		RT_TRACE(COMP_DBG, "===========>%s():HW_VAR_ACM_CTRL:%x\n",
 			 __func__, eACI);
@@ -290,20 +289,20 @@
 		RT_TRACE(COMP_QOS,
 			 "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
 			 AcmCtrl);
-		write_nic_byte(dev, AcmHwCtrl, AcmCtrl);
+		rtl92e_writeb(dev, AcmHwCtrl, AcmCtrl);
 		break;
 	}
 
 	case HW_VAR_SIFS:
-		write_nic_byte(dev, SIFS, val[0]);
-		write_nic_byte(dev, SIFS+1, val[0]);
+		rtl92e_writeb(dev, SIFS, val[0]);
+		rtl92e_writeb(dev, SIFS+1, val[0]);
 		break;
 
 	case HW_VAR_RF_TIMING:
 	{
 		u8 Rf_Timing = *((u8 *)val);
 
-		write_nic_byte(dev, rFPGA0_RFTiming1, Rf_Timing);
+		rtl92e_writeb(dev, rFPGA0_RFTiming1, Rf_Timing);
 		break;
 	}
 
@@ -324,7 +323,7 @@
 
 	RT_TRACE(COMP_INIT, "====> rtl8192_read_eeprom_info\n");
 
-	EEPROMId = eprom_read(dev, 0);
+	EEPROMId = rtl92e_eeprom_read(dev, 0);
 	if (EEPROMId != RTL8190_EEPROM_ID) {
 		netdev_err(dev, "%s(): Invalid EEPROM ID: %x\n", __func__,
 			   EEPROMId);
@@ -334,12 +333,14 @@
 	}
 
 	if (!priv->AutoloadFailFlag) {
-		priv->eeprom_vid = eprom_read(dev, EEPROM_VID >> 1);
-		priv->eeprom_did = eprom_read(dev, EEPROM_DID >> 1);
+		priv->eeprom_vid = rtl92e_eeprom_read(dev, EEPROM_VID >> 1);
+		priv->eeprom_did = rtl92e_eeprom_read(dev, EEPROM_DID >> 1);
 
-		usValue = eprom_read(dev, (u16)(EEPROM_Customer_ID>>1)) >> 8;
+		usValue = rtl92e_eeprom_read(dev,
+					     (u16)(EEPROM_Customer_ID>>1)) >> 8;
 		priv->eeprom_CustomerID = (u8)(usValue & 0xff);
-		usValue = eprom_read(dev, EEPROM_ICVersion_ChannelPlan>>1);
+		usValue = rtl92e_eeprom_read(dev,
+					     EEPROM_ICVersion_ChannelPlan>>1);
 		priv->eeprom_ChannelPlan = usValue&0xff;
 		IC_Version = (usValue & 0xff00)>>8;
 
@@ -377,7 +378,7 @@
 
 	if (!priv->AutoloadFailFlag) {
 		for (i = 0; i < 6; i += 2) {
-			usValue = eprom_read(dev,
+			usValue = rtl92e_eeprom_read(dev,
 				 (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1));
 			*(u16 *)(&dev->dev_addr[i]) = usValue;
 		}
@@ -397,8 +398,8 @@
 
 	if (priv->card_8192_version > VERSION_8190_BD) {
 		if (!priv->AutoloadFailFlag) {
-			tempval = (eprom_read(dev, (EEPROM_RFInd_PowerDiff >>
-					      1))) & 0xff;
+			tempval = (rtl92e_eeprom_read(dev,
+						      (EEPROM_RFInd_PowerDiff >> 1))) & 0xff;
 			priv->EEPROMLegacyHTTxPowerDiff = tempval & 0xf;
 
 			if (tempval&0x80)
@@ -412,7 +413,7 @@
 			priv->EEPROMLegacyHTTxPowerDiff);
 
 		if (!priv->AutoloadFailFlag)
-			priv->EEPROMThermalMeter = (u8)(((eprom_read(dev,
+			priv->EEPROMThermalMeter = (u8)(((rtl92e_eeprom_read(dev,
 						   (EEPROM_ThermalMeter>>1))) &
 						   0xff00)>>8);
 		else
@@ -423,7 +424,7 @@
 
 		if (priv->epromtype == EEPROM_93C46) {
 			if (!priv->AutoloadFailFlag) {
-				usValue = eprom_read(dev,
+				usValue = rtl92e_eeprom_read(dev,
 					  EEPROM_TxPwDiff_CrystalCap >> 1);
 				priv->EEPROMAntPwDiff = (usValue&0x0fff);
 				priv->EEPROMCrystalCap = (u8)((usValue & 0xf000)
@@ -441,7 +442,7 @@
 
 			for (i = 0; i < 14; i += 2) {
 				if (!priv->AutoloadFailFlag)
-					usValue = eprom_read(dev,
+					usValue = rtl92e_eeprom_read(dev,
 						  (u16)((EEPROM_TxPwIndex_CCK +
 						  i) >> 1));
 				else
@@ -457,7 +458,7 @@
 			}
 			for (i = 0; i < 14; i += 2) {
 				if (!priv->AutoloadFailFlag)
-					usValue = eprom_read(dev,
+					usValue = rtl92e_eeprom_read(dev,
 						(u16)((EEPROM_TxPwIndex_OFDM_24G
 						+ i) >> 1));
 				else
@@ -561,7 +562,7 @@
 		RT_TRACE(COMP_INIT, "\n2T4R config\n");
 	}
 
-	init_rate_adaptive(dev);
+	rtl92e_init_adaptive_rate(dev);
 
 	priv->rf_chip = RF_8256;
 
@@ -626,13 +627,13 @@
 	RT_TRACE(COMP_TRACE, "<==== ReadAdapterInfo\n");
 }
 
-void rtl8192_get_eeprom_size(struct net_device *dev)
+void rtl92e_get_eeprom_size(struct net_device *dev)
 {
 	u16 curCR;
 	struct r8192_priv *priv = rtllib_priv(dev);
 
 	RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
-	curCR = read_nic_dword(dev, EPROM_CMD);
+	curCR = rtl92e_readl(dev, EPROM_CMD);
 	RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
 		 curCR);
 	priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
@@ -684,26 +685,26 @@
 		break;
 	}
 
-	write_nic_byte(dev, BW_OPMODE, regBwOpMode);
+	rtl92e_writeb(dev, BW_OPMODE, regBwOpMode);
 	{
 		u32 ratr_value = 0;
 
 		ratr_value = regRATR;
 		if (priv->rf_type == RF_1T2R)
 			ratr_value &= ~(RATE_ALL_OFDM_2SS);
-		write_nic_dword(dev, RATR0, ratr_value);
-		write_nic_byte(dev, UFWP, 1);
+		rtl92e_writel(dev, RATR0, ratr_value);
+		rtl92e_writeb(dev, UFWP, 1);
 	}
-	regTmp = read_nic_byte(dev, 0x313);
+	regTmp = rtl92e_readb(dev, 0x313);
 	regRRSR = ((regTmp) << 24) | (regRRSR & 0x00ffffff);
-	write_nic_dword(dev, RRSR, regRRSR);
+	rtl92e_writel(dev, RRSR, regRRSR);
 
-	write_nic_word(dev, RETRY_LIMIT,
-			priv->ShortRetryLimit << RETRY_LIMIT_SHORT_SHIFT |
-			priv->LongRetryLimit << RETRY_LIMIT_LONG_SHIFT);
+	rtl92e_writew(dev, RETRY_LIMIT,
+		      priv->ShortRetryLimit << RETRY_LIMIT_SHORT_SHIFT |
+		      priv->LongRetryLimit << RETRY_LIMIT_LONG_SHIFT);
 }
 
-bool rtl8192_adapter_start(struct net_device *dev)
+bool rtl92e_start_adapter(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	u32 ulRegRead;
@@ -719,10 +720,10 @@
 	priv->being_init_adapter = true;
 
 start:
-	rtl8192_pci_resetdescring(dev);
+	rtl92e_reset_desc_ring(dev);
 	priv->Rf_Mode = RF_OP_By_SW_3wire;
 	if (priv->ResetProgress == RESET_TYPE_NORESET) {
-		write_nic_byte(dev, ANAPAR, 0x37);
+		rtl92e_writeb(dev, ANAPAR, 0x37);
 		mdelay(500);
 	}
 	priv->pFirmware->firmware_status = FW_STATUS_0_INIT;
@@ -730,7 +731,7 @@
 	if (priv->RegRfOff)
 		priv->rtllib->eRFPowerState = eRfOff;
 
-	ulRegRead = read_nic_dword(dev, CPU_GEN);
+	ulRegRead = rtl92e_readl(dev, CPU_GEN);
 	if (priv->pFirmware->firmware_status == FW_STATUS_0_INIT)
 		ulRegRead |= CPU_GEN_SYSTEM_RESET;
 	else if (priv->pFirmware->firmware_status == FW_STATUS_5_READY)
@@ -739,19 +740,19 @@
 		netdev_err(dev, "%s(): undefined firmware state: %d.\n",
 			   __func__, priv->pFirmware->firmware_status);
 
-	write_nic_dword(dev, CPU_GEN, ulRegRead);
+	rtl92e_writel(dev, CPU_GEN, ulRegRead);
 
-	ICVersion = read_nic_byte(dev, IC_VERRSION);
+	ICVersion = rtl92e_readb(dev, IC_VERRSION);
 	if (ICVersion >= 0x4) {
-		SwitchingRegulatorOutput = read_nic_byte(dev, SWREGULATOR);
+		SwitchingRegulatorOutput = rtl92e_readb(dev, SWREGULATOR);
 		if (SwitchingRegulatorOutput  != 0xb8) {
-			write_nic_byte(dev, SWREGULATOR, 0xa8);
+			rtl92e_writeb(dev, SWREGULATOR, 0xa8);
 			mdelay(1);
-			write_nic_byte(dev, SWREGULATOR, 0xb8);
+			rtl92e_writeb(dev, SWREGULATOR, 0xb8);
 		}
 	}
 	RT_TRACE(COMP_INIT, "BB Config Start!\n");
-	rtStatus = rtl8192_BBConfig(dev);
+	rtStatus = rtl92e_config_bb(dev);
 	if (!rtStatus) {
 		netdev_warn(dev, "%s(): Failed to configure BB\n", __func__);
 		return rtStatus;
@@ -760,7 +761,7 @@
 
 	priv->LoopbackMode = RTL819X_NO_LOOPBACK;
 	if (priv->ResetProgress == RESET_TYPE_NORESET) {
-		ulRegRead = read_nic_dword(dev, CPU_GEN);
+		ulRegRead = rtl92e_readl(dev, CPU_GEN);
 		if (priv->LoopbackMode == RTL819X_NO_LOOPBACK)
 			ulRegRead = ((ulRegRead & CPU_GEN_NO_LOOPBACK_MSK) |
 				     CPU_GEN_NO_LOOPBACK_SET);
@@ -770,73 +771,73 @@
 			netdev_err(dev, "%s: Invalid loopback mode setting.\n",
 				   __func__);
 
-		write_nic_dword(dev, CPU_GEN, ulRegRead);
+		rtl92e_writel(dev, CPU_GEN, ulRegRead);
 
 		udelay(500);
 	}
 	rtl8192_hwconfig(dev);
-	write_nic_byte(dev, CMDR, CR_RE | CR_TE);
+	rtl92e_writeb(dev, CMDR, CR_RE | CR_TE);
 
-	write_nic_byte(dev, PCIF, ((MXDMA2_NoLimit<<MXDMA2_RX_SHIFT) |
-		       (MXDMA2_NoLimit<<MXDMA2_TX_SHIFT)));
-	write_nic_dword(dev, MAC0, ((u32 *)dev->dev_addr)[0]);
-	write_nic_word(dev, MAC4, ((u16 *)(dev->dev_addr + 4))[0]);
-	write_nic_dword(dev, RCR, priv->ReceiveConfig);
+	rtl92e_writeb(dev, PCIF, ((MXDMA2_NoLimit<<MXDMA2_RX_SHIFT) |
+				  (MXDMA2_NoLimit<<MXDMA2_TX_SHIFT)));
+	rtl92e_writel(dev, MAC0, ((u32 *)dev->dev_addr)[0]);
+	rtl92e_writew(dev, MAC4, ((u16 *)(dev->dev_addr + 4))[0]);
+	rtl92e_writel(dev, RCR, priv->ReceiveConfig);
 
-	write_nic_dword(dev, RQPN1,  NUM_OF_PAGE_IN_FW_QUEUE_BK <<
-			RSVD_FW_QUEUE_PAGE_BK_SHIFT |
-			NUM_OF_PAGE_IN_FW_QUEUE_BE <<
-			RSVD_FW_QUEUE_PAGE_BE_SHIFT |
-			NUM_OF_PAGE_IN_FW_QUEUE_VI <<
-			RSVD_FW_QUEUE_PAGE_VI_SHIFT |
-			NUM_OF_PAGE_IN_FW_QUEUE_VO <<
-			RSVD_FW_QUEUE_PAGE_VO_SHIFT);
-	write_nic_dword(dev, RQPN2, NUM_OF_PAGE_IN_FW_QUEUE_MGNT <<
-			RSVD_FW_QUEUE_PAGE_MGNT_SHIFT);
-	write_nic_dword(dev, RQPN3, APPLIED_RESERVED_QUEUE_IN_FW |
-			NUM_OF_PAGE_IN_FW_QUEUE_BCN <<
-			RSVD_FW_QUEUE_PAGE_BCN_SHIFT|
-			NUM_OF_PAGE_IN_FW_QUEUE_PUB <<
-			RSVD_FW_QUEUE_PAGE_PUB_SHIFT);
+	rtl92e_writel(dev, RQPN1, NUM_OF_PAGE_IN_FW_QUEUE_BK <<
+		      RSVD_FW_QUEUE_PAGE_BK_SHIFT |
+		      NUM_OF_PAGE_IN_FW_QUEUE_BE <<
+		      RSVD_FW_QUEUE_PAGE_BE_SHIFT |
+		      NUM_OF_PAGE_IN_FW_QUEUE_VI <<
+		      RSVD_FW_QUEUE_PAGE_VI_SHIFT |
+		      NUM_OF_PAGE_IN_FW_QUEUE_VO <<
+		      RSVD_FW_QUEUE_PAGE_VO_SHIFT);
+	rtl92e_writel(dev, RQPN2, NUM_OF_PAGE_IN_FW_QUEUE_MGNT <<
+		      RSVD_FW_QUEUE_PAGE_MGNT_SHIFT);
+	rtl92e_writel(dev, RQPN3, APPLIED_RESERVED_QUEUE_IN_FW |
+		      NUM_OF_PAGE_IN_FW_QUEUE_BCN <<
+		      RSVD_FW_QUEUE_PAGE_BCN_SHIFT|
+		      NUM_OF_PAGE_IN_FW_QUEUE_PUB <<
+		      RSVD_FW_QUEUE_PAGE_PUB_SHIFT);
 
-	rtl8192_tx_enable(dev);
-	rtl8192_rx_enable(dev);
-	ulRegRead = (0xFFF00000 & read_nic_dword(dev, RRSR))  |
+	rtl92e_tx_enable(dev);
+	rtl92e_rx_enable(dev);
+	ulRegRead = (0xFFF00000 & rtl92e_readl(dev, RRSR))  |
 		     RATE_ALL_OFDM_AG | RATE_ALL_CCK;
-	write_nic_dword(dev, RRSR, ulRegRead);
-	write_nic_dword(dev, RATR0+4*7, (RATE_ALL_OFDM_AG | RATE_ALL_CCK));
+	rtl92e_writel(dev, RRSR, ulRegRead);
+	rtl92e_writel(dev, RATR0+4*7, (RATE_ALL_OFDM_AG | RATE_ALL_CCK));
 
-	write_nic_byte(dev, ACK_TIMEOUT, 0x30);
+	rtl92e_writeb(dev, ACK_TIMEOUT, 0x30);
 
 	if (priv->ResetProgress == RESET_TYPE_NORESET)
-		rtl8192_SetWirelessMode(dev, priv->rtllib->mode);
-	CamResetAllEntry(dev);
+		rtl92e_set_wireless_mode(dev, priv->rtllib->mode);
+	rtl92e_cam_reset(dev);
 	{
 		u8 SECR_value = 0x0;
 
 		SECR_value |= SCR_TxEncEnable;
 		SECR_value |= SCR_RxDecEnable;
 		SECR_value |= SCR_NoSKMC;
-		write_nic_byte(dev, SECR, SECR_value);
+		rtl92e_writeb(dev, SECR, SECR_value);
 	}
-	write_nic_word(dev, ATIMWND, 2);
-	write_nic_word(dev, BCN_INTERVAL, 100);
+	rtl92e_writew(dev, ATIMWND, 2);
+	rtl92e_writew(dev, BCN_INTERVAL, 100);
 	{
 		int i;
 
 		for (i = 0; i < QOS_QUEUE_NUM; i++)
-			write_nic_dword(dev, WDCAPARA_ADD[i], 0x005e4332);
+			rtl92e_writel(dev, WDCAPARA_ADD[i], 0x005e4332);
 	}
-	write_nic_byte(dev, 0xbe, 0xc0);
+	rtl92e_writeb(dev, 0xbe, 0xc0);
 
-	rtl8192_phy_configmac(dev);
+	rtl92e_config_mac(dev);
 
 	if (priv->card_8192_version > (u8) VERSION_8190_BD) {
-		rtl8192_phy_getTxPower(dev);
-		rtl8192_phy_setTxPower(dev, priv->chan);
+		rtl92e_get_tx_power(dev);
+		rtl92e_set_tx_power(dev, priv->chan);
 	}
 
-	tmpvalue = read_nic_byte(dev, IC_VERRSION);
+	tmpvalue = rtl92e_readb(dev, IC_VERRSION);
 	priv->IC_Cut = tmpvalue;
 	RT_TRACE(COMP_INIT, "priv->IC_Cut= 0x%x\n", priv->IC_Cut);
 	if (priv->IC_Cut >= IC_VersionCut_D) {
@@ -851,7 +852,7 @@
 	}
 
 	RT_TRACE(COMP_INIT, "Load Firmware!\n");
-	bfirmwareok = init_firmware(dev);
+	bfirmwareok = rtl92e_init_fw(dev);
 	if (!bfirmwareok) {
 		if (retry_times < 10) {
 			retry_times++;
@@ -864,37 +865,34 @@
 	RT_TRACE(COMP_INIT, "Load Firmware finished!\n");
 	if (priv->ResetProgress == RESET_TYPE_NORESET) {
 		RT_TRACE(COMP_INIT, "RF Config Started!\n");
-		rtStatus = rtl8192_phy_RFConfig(dev);
+		rtStatus = rtl92e_config_phy(dev);
 		if (!rtStatus) {
 			netdev_info(dev, "RF Config failed\n");
 			return rtStatus;
 		}
 		RT_TRACE(COMP_INIT, "RF Config Finished!\n");
 	}
-	rtl8192_phy_updateInitGain(dev);
 
-	rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
-	rtl8192_setBBreg(dev, rFPGA0_RFMOD, bOFDMEn, 0x1);
+	rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bCCKEn, 0x1);
+	rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bOFDMEn, 0x1);
 
-	write_nic_byte(dev, 0x87, 0x0);
+	rtl92e_writeb(dev, 0x87, 0x0);
 
 	if (priv->RegRfOff) {
 		RT_TRACE((COMP_INIT | COMP_RF | COMP_POWER),
 			  "%s(): Turn off RF for RegRfOff ----------\n",
 			  __func__);
-		MgntActSet_RF_State(dev, eRfOff, RF_CHANGE_BY_SW, true);
+		rtl92e_set_rf_state(dev, eRfOff, RF_CHANGE_BY_SW);
 	} else if (priv->rtllib->RfOffReason > RF_CHANGE_BY_PS) {
 		RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER),
 			 "%s(): Turn off RF for RfOffReason(%d) ----------\n",
 			 __func__, priv->rtllib->RfOffReason);
-		MgntActSet_RF_State(dev, eRfOff, priv->rtllib->RfOffReason,
-				    true);
+		rtl92e_set_rf_state(dev, eRfOff, priv->rtllib->RfOffReason);
 	} else if (priv->rtllib->RfOffReason >= RF_CHANGE_BY_IPS) {
 		RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER),
 			 "%s(): Turn off RF for RfOffReason(%d) ----------\n",
 			 __func__, priv->rtllib->RfOffReason);
-		MgntActSet_RF_State(dev, eRfOff, priv->rtllib->RfOffReason,
-				    true);
+		rtl92e_set_rf_state(dev, eRfOff, priv->rtllib->RfOffReason);
 	} else {
 		RT_TRACE((COMP_INIT|COMP_RF|COMP_POWER), "%s(): RF-ON\n",
 			  __func__);
@@ -908,13 +906,13 @@
 		priv->Rf_Mode = RF_OP_By_SW_3wire;
 
 	if (priv->ResetProgress == RESET_TYPE_NORESET) {
-		dm_initialize_txpower_tracking(dev);
+		rtl92e_dm_init_txpower_tracking(dev);
 
 		if (priv->IC_Cut >= IC_VersionCut_D) {
-			tmpRegA = rtl8192_QueryBBReg(dev,
-				  rOFDM0_XATxIQImbalance, bMaskDWord);
-			tmpRegC = rtl8192_QueryBBReg(dev,
-				  rOFDM0_XCTxIQImbalance, bMaskDWord);
+			tmpRegA = rtl92e_get_bb_reg(dev, rOFDM0_XATxIQImbalance,
+						    bMaskDWord);
+			tmpRegC = rtl92e_get_bb_reg(dev, rOFDM0_XCTxIQImbalance,
+						    bMaskDWord);
 			for (i = 0; i < TxBBGainTableLength; i++) {
 				if (tmpRegA == dm_tx_bb_gain[i]) {
 					priv->rfa_txpowertrackingindex = (u8)i;
@@ -926,8 +924,8 @@
 				}
 			}
 
-			TempCCk = rtl8192_QueryBBReg(dev,
-				  rCCK0_TxFilter1, bMaskByte2);
+			TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1,
+						    bMaskByte2);
 
 			for (i = 0; i < CCKTxBBGainTableLength; i++) {
 				if (TempCCk == dm_cck_tx_bb_gain[i][0]) {
@@ -954,7 +952,7 @@
 			priv->btxpower_tracking = false;
 		}
 	}
-	rtl8192_irq_enable(dev);
+	rtl92e_irq_enable(dev);
 end:
 	priv->being_init_adapter = false;
 	return rtStatus;
@@ -969,27 +967,27 @@
 	u16 rate_config = 0;
 
 	net = &priv->rtllib->current_network;
-	rtl8192_config_rate(dev, &rate_config);
+	rtl92e_config_rate(dev, &rate_config);
 	priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
 	 priv->basic_rate = rate_config &= 0x15f;
-	write_nic_dword(dev, BSSIDR, ((u32 *)net->bssid)[0]);
-	write_nic_word(dev, BSSIDR+4, ((u16 *)net->bssid)[2]);
+	rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]);
+	rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]);
 
 	if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
-		write_nic_word(dev, ATIMWND, 2);
-		write_nic_word(dev, BCN_DMATIME, 256);
-		write_nic_word(dev, BCN_INTERVAL, net->beacon_interval);
-		write_nic_word(dev, BCN_DRV_EARLY_INT, 10);
-		write_nic_byte(dev, BCN_ERR_THRESH, 100);
+		rtl92e_writew(dev, ATIMWND, 2);
+		rtl92e_writew(dev, BCN_DMATIME, 256);
+		rtl92e_writew(dev, BCN_INTERVAL, net->beacon_interval);
+		rtl92e_writew(dev, BCN_DRV_EARLY_INT, 10);
+		rtl92e_writeb(dev, BCN_ERR_THRESH, 100);
 
 		BcnTimeCfg |= (BcnCW<<BCN_TCFG_CW_SHIFT);
 		BcnTimeCfg |= BcnIFS<<BCN_TCFG_IFS;
 
-		write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
+		rtl92e_writew(dev, BCN_TCFG, BcnTimeCfg);
 	}
 }
 
-void rtl8192_link_change(struct net_device *dev)
+void rtl92e_link_change(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rtllib_device *ieee = priv->rtllib;
@@ -1002,16 +1000,16 @@
 		priv->ops->update_ratr_table(dev);
 		if ((KEY_TYPE_WEP40 == ieee->pairwise_key_type) ||
 		    (KEY_TYPE_WEP104 == ieee->pairwise_key_type))
-			EnableHWSecurityConfig8192(dev);
+			rtl92e_enable_hw_security_config(dev);
 	} else {
-		write_nic_byte(dev, 0x173, 0);
+		rtl92e_writeb(dev, 0x173, 0);
 	}
 	rtl8192e_update_msr(dev);
 
 	if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC) {
 		u32 reg = 0;
 
-		reg = read_nic_dword(dev, RCR);
+		reg = rtl92e_readl(dev, RCR);
 		if (priv->rtllib->state == RTLLIB_LINKED) {
 			if (ieee->IntelPromiscuousModeInfo.bPromiscuousOn)
 				;
@@ -1020,12 +1018,12 @@
 		} else
 			priv->ReceiveConfig = reg &= ~RCR_CBSSID;
 
-		write_nic_dword(dev, RCR, reg);
+		rtl92e_writel(dev, RCR, reg);
 	}
 }
 
-void rtl8192_AllowAllDestAddr(struct net_device *dev,
-			      bool bAllowAllDA, bool WriteIntoReg)
+void rtl92e_set_monitor_mode(struct net_device *dev, bool bAllowAllDA,
+			     bool WriteIntoReg)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
@@ -1035,7 +1033,7 @@
 		priv->ReceiveConfig &= ~RCR_AAP;
 
 	if (WriteIntoReg)
-		write_nic_dword(dev, RCR, priv->ReceiveConfig);
+		rtl92e_writel(dev, RCR, priv->ReceiveConfig);
 }
 
 static u8 MRateToHwRate8190Pci(u8 rate)
@@ -1177,8 +1175,20 @@
 	return QueueSelect;
 }
 
-void  rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
-			   struct cb_desc *cb_desc, struct sk_buff *skb)
+static u8 rtl8192_QueryIsShort(u8 TxHT, u8 TxRate, struct cb_desc *tcb_desc)
+{
+	u8   tmp_Short;
+
+	tmp_Short = (TxHT == 1) ? ((tcb_desc->bUseShortGI) ? 1 : 0) :
+			((tcb_desc->bUseShortPreamble) ? 1 : 0);
+	if (TxHT == 1 && TxRate != DESC90_RATEMCS15)
+		tmp_Short = 0;
+
+	return tmp_Short;
+}
+
+void  rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
+			  struct cb_desc *cb_desc, struct sk_buff *skb)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
@@ -1286,9 +1296,8 @@
 	pdesc->TxBuffAddr = mapping;
 }
 
-void  rtl8192_tx_fill_cmd_desc(struct net_device *dev,
-			       struct tx_desc_cmd *entry,
-			       struct cb_desc *cb_desc, struct sk_buff *skb)
+void  rtl92e_fill_tx_cmd_desc(struct net_device *dev, struct tx_desc_cmd *entry,
+			      struct cb_desc *cb_desc, struct sk_buff *skb)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
@@ -1506,8 +1515,9 @@
 	pstats->bPacketBeacon = precord_stats->bPacketBeacon = bPacketBeacon;
 	pstats->bToSelfBA = precord_stats->bToSelfBA = bToSelfBA;
 	if (check_reg824 == 0) {
-		reg824_bit9 = rtl8192_QueryBBReg(priv->rtllib->dev,
-			      rFPGA0_XA_HSSIParameter2, 0x200);
+		reg824_bit9 = rtl92e_get_bb_reg(priv->rtllib->dev,
+						rFPGA0_XA_HSSIParameter2,
+						0x200);
 		check_reg824 = 1;
 	}
 
@@ -1575,7 +1585,7 @@
 			}
 		}
 
-		pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
+		pwdb_all = rtl92e_rx_db_to_percent(rx_pwr_all);
 		pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
 		pstats->RecvSignalPower = rx_pwr_all;
 
@@ -1615,7 +1625,7 @@
 			rx_snrX /= 2;
 			priv->stats.rxSNRdB[i] = (long)rx_snrX;
 
-			RSSI = rtl819x_query_rxpwrpercentage(rx_pwr[i]);
+			RSSI = rtl92e_rx_db_to_percent(rx_pwr[i]);
 			if (priv->brfpath_rxenable[i])
 				total_rssi += RSSI;
 
@@ -1628,7 +1638,7 @@
 
 
 		rx_pwr_all = (((pofdm_buf->pwdb_all) >> 1) & 0x7f) - 106;
-		pwdb_all = rtl819x_query_rxpwrpercentage(rx_pwr_all);
+		pwdb_all = rtl92e_rx_db_to_percent(rx_pwr_all);
 
 		pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
 		pstats->RxPower = precord_stats->RxPower =	rx_pwr_all;
@@ -1645,7 +1655,7 @@
 
 			rx_evmX /= 2;
 
-			evm = rtl819x_evm_dbtopercentage(rx_evmX);
+			evm = rtl92e_evm_db_to_percent(rx_evmX);
 			if (bpacket_match_bssid) {
 				if (i == 0) {
 					pstats->SignalQuality = (u8)(evm &
@@ -1721,8 +1731,8 @@
 		slide_rssi_index = 0;
 
 	tmp_val = priv->stats.slide_rssi_total/slide_rssi_statistics;
-	priv->stats.signal_strength = rtl819x_translate_todbm(priv,
-				      (u8)tmp_val);
+	priv->stats.signal_strength = rtl92e_translate_to_dbm(priv,
+							      (u8)tmp_val);
 	curr_st->rssi = priv->stats.signal_strength;
 	if (!prev_st->bPacketMatchBSSID) {
 		if (!prev_st->bToSelfBA)
@@ -1732,13 +1742,10 @@
 	if (!bcheck)
 		return;
 
-	rtl819x_process_cck_rxpathsel(priv, prev_st);
-
 	priv->stats.num_process_phyinfo++;
 	if (!prev_st->bIsCCK && prev_st->bPacketToSelf) {
 		for (rfpath = RF90_PATH_A; rfpath < RF90_PATH_C; rfpath++) {
-			if (!rtl8192_phy_CheckIsLegalRFPath(priv->rtllib->dev,
-			    rfpath))
+			if (!rtl92e_is_legal_rf_path(priv->rtllib->dev, rfpath))
 				continue;
 			RT_TRACE(COMP_DBG,
 				 "Jacken -> pPreviousstats->RxMIMOSignalStrength[rfpath]  = %d\n",
@@ -1813,7 +1820,7 @@
 					(RX_SMOOTH-1)) +
 					(prev_st->RxPWDBAll)) / (RX_SMOOTH);
 		}
-		rtl819x_update_rxsignalstatistics8190pci(priv, prev_st);
+		rtl92e_update_rx_statistics(priv, prev_st);
 	}
 
 	if (prev_st->SignalQuality != 0) {
@@ -1900,7 +1907,7 @@
 	rtl8192_query_rxphystatus(priv, pstats, pdesc, pdrvinfo,
 				  &previous_stats, bpacket_match_bssid,
 				  bpacket_toself, bPacketBeacon, bToSelfBA);
-	rtl8192_record_rxdesc_forlateruse(pstats, &previous_stats);
+	rtl92e_copy_mpdu_stats(pstats, &previous_stats);
 }
 
 static void rtl8192_UpdateReceivedRateHistogramStatistics(
@@ -2016,10 +2023,8 @@
 	priv->stats.received_rate_histogram[rcvType][rateIndex]++;
 }
 
-bool rtl8192_rx_query_status_desc(struct net_device *dev,
-				  struct rtllib_rx_stats *stats,
-				  struct rx_desc *pdesc,
-				  struct sk_buff *skb)
+bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
+			 struct rx_desc *pdesc, struct sk_buff *skb)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rx_fwinfo *pDrvInfo = NULL;
@@ -2063,9 +2068,9 @@
 			    (pDrvInfo->FirstAGGR == 1);
 
 	stats->TimeStampLow = pDrvInfo->TSFL;
-	stats->TimeStampHigh = read_nic_dword(dev, TSFR+4);
+	stats->TimeStampHigh = rtl92e_readl(dev, TSFR+4);
 
-	rtl819x_UpdateRxPktTimeStamp(dev, stats);
+	rtl92e_update_rx_pkt_timestamp(dev, stats);
 
 	if ((stats->RxBufShift + stats->RxDrvInfoSize) > 0)
 		stats->bShift = 1;
@@ -2089,7 +2094,7 @@
 	return true;
 }
 
-void rtl8192_halt_adapter(struct net_device *dev, bool reset)
+void rtl92e_stop_adapter(struct net_device *dev, bool reset)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	int i;
@@ -2102,7 +2107,7 @@
 
 	if (!priv->rtllib->bSupportRemoteWakeUp) {
 		u1bTmp = 0x0;
-		write_nic_byte(dev, CMDR, u1bTmp);
+		rtl92e_writeb(dev, CMDR, u1bTmp);
 	}
 
 	mdelay(20);
@@ -2113,18 +2118,18 @@
 		priv->bHwRfOffAction = 2;
 
 		if (!priv->rtllib->bSupportRemoteWakeUp) {
-			PHY_SetRtl8192eRfOff(dev);
-			ulRegRead = read_nic_dword(dev, CPU_GEN);
+			rtl92e_set_rf_off(dev);
+			ulRegRead = rtl92e_readl(dev, CPU_GEN);
 			ulRegRead |= CPU_GEN_SYSTEM_RESET;
-			write_nic_dword(dev, CPU_GEN, ulRegRead);
+			rtl92e_writel(dev, CPU_GEN, ulRegRead);
 		} else {
-			write_nic_dword(dev, WFCRC0, 0xffffffff);
-			write_nic_dword(dev, WFCRC1, 0xffffffff);
-			write_nic_dword(dev, WFCRC2, 0xffffffff);
+			rtl92e_writel(dev, WFCRC0, 0xffffffff);
+			rtl92e_writel(dev, WFCRC1, 0xffffffff);
+			rtl92e_writel(dev, WFCRC2, 0xffffffff);
 
 
-			write_nic_byte(dev, PMR, 0x5);
-			write_nic_byte(dev, MacBlkCtrl, 0xa);
+			rtl92e_writeb(dev, PMR, 0x5);
+			rtl92e_writeb(dev, MacBlkCtrl, 0xa);
 		}
 	}
 
@@ -2136,7 +2141,7 @@
 	skb_queue_purge(&priv->skb_queue);
 }
 
-void rtl8192_update_ratr_table(struct net_device *dev)
+void rtl92e_update_ratr_table(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rtllib_device *ieee = priv->rtllib;
@@ -2145,7 +2150,7 @@
 	u16 rate_config = 0;
 	u8 rate_index = 0;
 
-	rtl8192_config_rate(dev, &rate_config);
+	rtl92e_config_rate(dev, &rate_config);
 	ratr_value = rate_config | *pMcsRate << 12;
 	switch (ieee->mode) {
 	case IEEE_A:
@@ -2179,12 +2184,12 @@
 	else if (!ieee->pHTInfo->bCurTxBW40MHz &&
 		  ieee->pHTInfo->bCurShortGI20MHz)
 		ratr_value |= 0x80000000;
-	write_nic_dword(dev, RATR0+rate_index*4, ratr_value);
-	write_nic_byte(dev, UFWP, 1);
+	rtl92e_writel(dev, RATR0+rate_index*4, ratr_value);
+	rtl92e_writeb(dev, UFWP, 1);
 }
 
 void
-rtl8192_InitializeVariables(struct net_device  *dev)
+rtl92e_init_variables(struct net_device  *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
@@ -2218,66 +2223,65 @@
 	priv->bfirst_after_down = false;
 }
 
-void rtl8192_EnableInterrupt(struct net_device *dev)
+void rtl92e_enable_irq(struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 
 	priv->irq_enabled = 1;
 
-	write_nic_dword(dev, INTA_MASK, priv->irq_mask[0]);
+	rtl92e_writel(dev, INTA_MASK, priv->irq_mask[0]);
 
 }
 
-void rtl8192_DisableInterrupt(struct net_device *dev)
+void rtl92e_disable_irq(struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 
-	write_nic_dword(dev, INTA_MASK, 0);
+	rtl92e_writel(dev, INTA_MASK, 0);
 
 	priv->irq_enabled = 0;
 }
 
-void rtl8192_ClearInterrupt(struct net_device *dev)
+void rtl92e_clear_irq(struct net_device *dev)
 {
 	u32 tmp = 0;
 
-	tmp = read_nic_dword(dev, ISR);
-	write_nic_dword(dev, ISR, tmp);
+	tmp = rtl92e_readl(dev, ISR);
+	rtl92e_writel(dev, ISR, tmp);
 }
 
 
-void rtl8192_enable_rx(struct net_device *dev)
+void rtl92e_enable_rx(struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 
-	write_nic_dword(dev, RDQDA, priv->rx_ring_dma[RX_MPDU_QUEUE]);
+	rtl92e_writel(dev, RDQDA, priv->rx_ring_dma[RX_MPDU_QUEUE]);
 }
 
 static const u32 TX_DESC_BASE[] = {
 	BKQDA, BEQDA, VIQDA, VOQDA, HCCAQDA, CQDA, MQDA, HQDA, BQDA
 };
 
-void rtl8192_enable_tx(struct net_device *dev)
+void rtl92e_enable_tx(struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 	u32 i;
 
 	for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
-		write_nic_dword(dev, TX_DESC_BASE[i], priv->tx_ring[i].dma);
+		rtl92e_writel(dev, TX_DESC_BASE[i], priv->tx_ring[i].dma);
 }
 
 
-void rtl8192_interrupt_recognized(struct net_device *dev, u32 *p_inta,
-				  u32 *p_intb)
+void rtl92e_ack_irq(struct net_device *dev, u32 *p_inta, u32 *p_intb)
 {
-	*p_inta = read_nic_dword(dev, ISR);
-	write_nic_dword(dev, ISR, *p_inta);
+	*p_inta = rtl92e_readl(dev, ISR);
+	rtl92e_writel(dev, ISR, *p_inta);
 }
 
-bool rtl8192_HalRxCheckStuck(struct net_device *dev)
+bool rtl92e_is_rx_stuck(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
-	u16		  RegRxCounter = read_nic_word(dev, 0x130);
+	u16		  RegRxCounter = rtl92e_readw(dev, 0x130);
 	bool		  bStuck = false;
 	static u8	  rx_chk_cnt;
 	u32		SlotIndex = 0, TotalRxStuckCount = 0;
@@ -2338,11 +2342,11 @@
 	return bStuck;
 }
 
-bool rtl8192_HalTxCheckStuck(struct net_device *dev)
+bool rtl92e_is_tx_stuck(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	bool	bStuck = false;
-	u16	RegTxCounter = read_nic_word(dev, 0x128);
+	u16	RegTxCounter = rtl92e_readw(dev, 0x128);
 
 	RT_TRACE(COMP_RESET, "%s():RegTxCounter is %d,TxCounter is %d\n",
 		 __func__, RegTxCounter, priv->TxCounter);
@@ -2355,7 +2359,7 @@
 	return bStuck;
 }
 
-bool rtl8192_GetNmodeSupportBySecCfg(struct net_device *dev)
+bool rtl92e_get_nmode_support_by_sec(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rtllib_device *ieee = priv->rtllib;
@@ -2369,34 +2373,10 @@
 	}
 }
 
-bool rtl8192_GetHalfNmodeSupportByAPs(struct net_device *dev)
+bool rtl92e_is_halfn_supported_by_ap(struct net_device *dev)
 {
-	bool Reval;
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rtllib_device *ieee = priv->rtllib;
 
-	if (ieee->bHalfWirelessN24GMode == true)
-		Reval = true;
-	else
-		Reval =  false;
-
-	return Reval;
-}
-
-u8 rtl8192_QueryIsShort(u8 TxHT, u8 TxRate, struct cb_desc *tcb_desc)
-{
-	u8   tmp_Short;
-
-	tmp_Short = (TxHT == 1) ? ((tcb_desc->bUseShortGI) ? 1 : 0) :
-			((tcb_desc->bUseShortPreamble) ? 1 : 0);
-	if (TxHT == 1 && TxRate != DESC90_RATEMCS15)
-		tmp_Short = 0;
-
-	return tmp_Short;
-}
-
-void ActUpdateChannelAccessSetting(struct net_device *dev,
-	enum wireless_mode WirelessMode,
-	struct channel_access_setting *ChnlAccessSetting)
-{
+	return ieee->bHalfWirelessN24GMode;
 }
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
index dbe0e1c..6bd6b3a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
@@ -27,36 +27,30 @@
 
 #include "r8190P_def.h"
 
-u8 rtl8192_QueryIsShort(u8 TxHT, u8 TxRate, struct cb_desc *tcb_desc);
-bool rtl8192_GetHalfNmodeSupportByAPs(struct net_device *dev);
-bool rtl8192_GetNmodeSupportBySecCfg(struct net_device *dev);
-bool rtl8192_HalTxCheckStuck(struct net_device *dev);
-bool rtl8192_HalRxCheckStuck(struct net_device *dev);
-void rtl8192_interrupt_recognized(struct net_device *dev, u32 *p_inta,
-				  u32 *p_intb);
-void rtl8192_enable_rx(struct net_device *dev);
-void rtl8192_enable_tx(struct net_device *dev);
-void rtl8192_EnableInterrupt(struct net_device *dev);
-void rtl8192_DisableInterrupt(struct net_device *dev);
-void rtl8192_ClearInterrupt(struct net_device *dev);
-void rtl8192_InitializeVariables(struct net_device  *dev);
-void rtl8192e_start_beacon(struct net_device *dev);
-void rtl8192e_SetHwReg(struct net_device *dev, u8 variable, u8 *val);
-void rtl8192_get_eeprom_size(struct net_device *dev);
-bool rtl8192_adapter_start(struct net_device *dev);
-void rtl8192_link_change(struct net_device *dev);
-void rtl8192_AllowAllDestAddr(struct net_device *dev, bool bAllowAllDA,
-			      bool WriteIntoReg);
-void  rtl8192_tx_fill_desc(struct net_device *dev, struct tx_desc *pdesc,
-			   struct cb_desc *cb_desc,
-			   struct sk_buff *skb);
-void  rtl8192_tx_fill_cmd_desc(struct net_device *dev,
-			       struct tx_desc_cmd *entry,
-			       struct cb_desc *cb_desc, struct sk_buff *skb);
-bool rtl8192_rx_query_status_desc(struct net_device *dev,
-				  struct rtllib_rx_stats *stats,
-				  struct rx_desc *pdesc,
-				  struct sk_buff *skb);
-void rtl8192_halt_adapter(struct net_device *dev, bool reset);
-void rtl8192_update_ratr_table(struct net_device *dev);
+bool rtl92e_is_halfn_supported_by_ap(struct net_device *dev);
+bool rtl92e_get_nmode_support_by_sec(struct net_device *dev);
+bool rtl92e_is_tx_stuck(struct net_device *dev);
+bool rtl92e_is_rx_stuck(struct net_device *dev);
+void rtl92e_ack_irq(struct net_device *dev, u32 *p_inta, u32 *p_intb);
+void rtl92e_enable_rx(struct net_device *dev);
+void rtl92e_enable_tx(struct net_device *dev);
+void rtl92e_enable_irq(struct net_device *dev);
+void rtl92e_disable_irq(struct net_device *dev);
+void rtl92e_clear_irq(struct net_device *dev);
+void rtl92e_init_variables(struct net_device  *dev);
+void rtl92e_start_beacon(struct net_device *dev);
+void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val);
+void rtl92e_get_eeprom_size(struct net_device *dev);
+bool rtl92e_start_adapter(struct net_device *dev);
+void rtl92e_link_change(struct net_device *dev);
+void rtl92e_set_monitor_mode(struct net_device *dev, bool bAllowAllDA,
+			     bool WriteIntoReg);
+void  rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
+			  struct cb_desc *cb_desc, struct sk_buff *skb);
+void  rtl92e_fill_tx_cmd_desc(struct net_device *dev, struct tx_desc_cmd *entry,
+			      struct cb_desc *cb_desc, struct sk_buff *skb);
+bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
+			 struct rx_desc *pdesc, struct sk_buff *skb);
+void rtl92e_stop_adapter(struct net_device *dev, bool reset);
+void rtl92e_update_ratr_table(struct net_device *dev);
 #endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 17d2a15..5c527c4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -23,7 +23,7 @@
 #include "r8192E_firmware.h"
 #include <linux/firmware.h>
 
-void firmware_init_param(struct net_device *dev)
+void rtl92e_init_fw_param(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rt_firmware *pfirmware = priv->pFirmware;
@@ -46,7 +46,7 @@
 	struct cb_desc *tcb_desc;
 	u8                  bLastIniPkt;
 
-	firmware_init_param(dev);
+	rtl92e_init_fw_param(dev);
 	frag_threshold = pfirmware->cmdpacket_frag_thresold;
 	do {
 		if ((buffer_len - frag_offset) > frag_threshold) {
@@ -96,7 +96,7 @@
 
 	} while (frag_offset < buffer_len);
 
-	write_nic_byte(dev, TPPoll, TPPoll_CQ);
+	rtl92e_writeb(dev, TPPoll, TPPoll_CQ);
 
 	return true;
 }
@@ -109,7 +109,7 @@
 
 	timeout = jiffies + msecs_to_jiffies(200);
 	while (time_before(jiffies, timeout)) {
-		CPU_status = read_nic_dword(dev, CPU_GEN);
+		CPU_status = rtl92e_readl(dev, CPU_GEN);
 		if (CPU_status & CPU_GEN_PUT_CODE_OK)
 			break;
 		mdelay(2);
@@ -122,14 +122,14 @@
 		RT_TRACE(COMP_FIRMWARE, "Download Firmware: Put code ok!\n");
 	}
 
-	CPU_status = read_nic_dword(dev, CPU_GEN);
-	write_nic_byte(dev, CPU_GEN,
-		       (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff));
+	CPU_status = rtl92e_readl(dev, CPU_GEN);
+	rtl92e_writeb(dev, CPU_GEN,
+		      (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff));
 	mdelay(1);
 
 	timeout = jiffies + msecs_to_jiffies(200);
 	while (time_before(jiffies, timeout)) {
-		CPU_status = read_nic_dword(dev, CPU_GEN);
+		CPU_status = rtl92e_readl(dev, CPU_GEN);
 		if (CPU_status&CPU_GEN_BOOT_RDY)
 			break;
 		mdelay(2);
@@ -158,7 +158,7 @@
 
 	timeout = jiffies + msecs_to_jiffies(20);
 	while (time_before(jiffies, timeout)) {
-		CPU_status = read_nic_dword(dev, CPU_GEN);
+		CPU_status = rtl92e_readl(dev, CPU_GEN);
 		if (CPU_status&CPU_GEN_FIRM_RDY)
 			break;
 		mdelay(2);
@@ -223,7 +223,7 @@
 	return rt_status;
 }
 
-bool init_firmware(struct net_device *dev)
+bool rtl92e_init_fw(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	bool			rt_status = true;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
index d79e542..fa760f7 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
@@ -19,8 +19,6 @@
 #ifndef __INC_FIRMWARE_H
 #define __INC_FIRMWARE_H
 
-#define RTL8190_CPU_START_OFFSET	0x80
-
 #define GET_COMMAND_PACKET_FRAG_THRESHOLD(v)	(4*(v/4) - 8)
 
 #define RTL8192E_BOOT_IMG_FW	"RTL8192E/boot.img"
@@ -61,7 +59,7 @@
 	u16		  firmware_buf_size[MAX_FW_INIT_STEP];
 };
 
-bool init_firmware(struct net_device *dev);
-extern void firmware_init_param(struct net_device *dev);
+bool rtl92e_init_fw(struct net_device *dev);
+void rtl92e_init_fw_param(struct net_device *dev);
 
 #endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
index 43c3fb8..c81832d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
@@ -119,10 +119,10 @@
 #define EPROM_CMD_NORMAL 0
 #define EPROM_CMD_LOAD 1
 #define EPROM_CMD_PROGRAM 2
-#define EPROM_CS_SHIFT 3
-#define EPROM_CK_SHIFT 2
-#define EPROM_W_SHIFT 1
-#define EPROM_R_SHIFT 0
+#define EPROM_CS_BIT 3
+#define EPROM_CK_BIT 2
+#define EPROM_W_BIT 1
+#define EPROM_R_BIT 0
 
 	AFR			 = 0x010,
 #define AFR_CardBEn		(1<<0)
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index fba7654..3a15a0f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -64,7 +64,7 @@
 	return i;
 }
 
-u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath)
+u8 rtl92e_is_legal_rf_path(struct net_device *dev, u32 eRFPath)
 {
 	u8 ret = 1;
 	struct r8192_priv *priv = rtllib_priv(dev);
@@ -80,27 +80,27 @@
 	return ret;
 }
 
-void rtl8192_setBBreg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask,
-		      u32 dwData)
+void rtl92e_set_bb_reg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask,
+		       u32 dwData)
 {
 
 	u32 OriginalValue, BitShift, NewValue;
 
 	if (dwBitMask != bMaskDWord) {
-		OriginalValue = read_nic_dword(dev, dwRegAddr);
+		OriginalValue = rtl92e_readl(dev, dwRegAddr);
 		BitShift = rtl8192_CalculateBitShift(dwBitMask);
 		NewValue = (((OriginalValue) & (~dwBitMask)) |
 			    (dwData << BitShift));
-		write_nic_dword(dev, dwRegAddr, NewValue);
+		rtl92e_writel(dev, dwRegAddr, NewValue);
 	} else
-		write_nic_dword(dev, dwRegAddr, dwData);
+		rtl92e_writel(dev, dwRegAddr, dwData);
 }
 
-u32 rtl8192_QueryBBReg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask)
+u32 rtl92e_get_bb_reg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask)
 {
 	u32 Ret = 0, OriginalValue, BitShift;
 
-	OriginalValue = read_nic_dword(dev, dwRegAddr);
+	OriginalValue = rtl92e_readl(dev, dwRegAddr);
 	BitShift = rtl8192_CalculateBitShift(dwBitMask);
 	Ret = (OriginalValue & dwBitMask) >> BitShift;
 
@@ -117,19 +117,19 @@
 	Offset &= 0x3f;
 
 	if (priv->rf_chip == RF_8256) {
-		rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
+		rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
 		if (Offset >= 31) {
 			priv->RfReg0Value[eRFPath] |= 0x140;
-			rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
-					 bMaskDWord,
-					 (priv->RfReg0Value[eRFPath]<<16));
+			rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
+					  bMaskDWord,
+					  (priv->RfReg0Value[eRFPath]<<16));
 			NewOffset = Offset - 30;
 		} else if (Offset >= 16) {
 			priv->RfReg0Value[eRFPath] |= 0x100;
 			priv->RfReg0Value[eRFPath] &= (~0x40);
-			rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
-					 bMaskDWord,
-					 (priv->RfReg0Value[eRFPath]<<16));
+			rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
+					  bMaskDWord,
+					  (priv->RfReg0Value[eRFPath]<<16));
 
 			NewOffset = Offset - 15;
 		} else
@@ -139,23 +139,23 @@
 			 "check RF type here, need to be 8256\n");
 		NewOffset = Offset;
 	}
-	rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress,
-			 NewOffset);
-	rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2,  bLSSIReadEdge, 0x0);
-	rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2,  bLSSIReadEdge, 0x1);
+	rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress,
+			  NewOffset);
+	rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2,  bLSSIReadEdge, 0x0);
+	rtl92e_set_bb_reg(dev, pPhyReg->rfHSSIPara2,  bLSSIReadEdge, 0x1);
 
 	mdelay(1);
 
-	ret = rtl8192_QueryBBReg(dev, pPhyReg->rfLSSIReadBack,
-				 bLSSIReadBackData);
+	ret = rtl92e_get_bb_reg(dev, pPhyReg->rfLSSIReadBack,
+				bLSSIReadBackData);
 
 	if (priv->rf_chip == RF_8256) {
 		priv->RfReg0Value[eRFPath] &= 0xebf;
 
-		rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord,
-				(priv->RfReg0Value[eRFPath] << 16));
+		rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset, bMaskDWord,
+				  (priv->RfReg0Value[eRFPath] << 16));
 
-		rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
+		rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
 	}
 
 
@@ -173,20 +173,20 @@
 
 	Offset &= 0x3f;
 	if (priv->rf_chip == RF_8256) {
-		rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
+		rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
 
 		if (Offset >= 31) {
 			priv->RfReg0Value[eRFPath] |= 0x140;
-			rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
-					 bMaskDWord,
-					 (priv->RfReg0Value[eRFPath] << 16));
+			rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
+					  bMaskDWord,
+					  (priv->RfReg0Value[eRFPath] << 16));
 			NewOffset = Offset - 30;
 		} else if (Offset >= 16) {
 			priv->RfReg0Value[eRFPath] |= 0x100;
 			priv->RfReg0Value[eRFPath] &= (~0x40);
-			rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset,
-					 bMaskDWord,
-					 (priv->RfReg0Value[eRFPath] << 16));
+			rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
+					  bMaskDWord,
+					  (priv->RfReg0Value[eRFPath] << 16));
 			NewOffset = Offset - 15;
 		} else
 			NewOffset = Offset;
@@ -198,7 +198,7 @@
 
 	DataAndAddr = (Data<<16) | (NewOffset&0x3f);
 
-	rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
+	rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
 
 	if (Offset == 0x0)
 		priv->RfReg0Value[eRFPath] = Data;
@@ -206,23 +206,21 @@
 	if (priv->rf_chip == RF_8256) {
 		if (Offset != 0) {
 			priv->RfReg0Value[eRFPath] &= 0xebf;
-			rtl8192_setBBreg(
-				dev,
-				pPhyReg->rf3wireOffset,
-				bMaskDWord,
-				(priv->RfReg0Value[eRFPath] << 16));
+			rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
+					  bMaskDWord,
+					  (priv->RfReg0Value[eRFPath] << 16));
 		}
-		rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
+		rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
 	}
 }
 
-void rtl8192_phy_SetRFReg(struct net_device *dev, enum rf90_radio_path eRFPath,
-			  u32 RegAddr, u32 BitMask, u32 Data)
+void rtl92e_set_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
+		       u32 RegAddr, u32 BitMask, u32 Data)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	u32 Original_Value, BitShift, New_Value;
 
-	if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+	if (!rtl92e_is_legal_rf_path(dev, eRFPath))
 		return;
 	if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter)
 		return;
@@ -256,13 +254,13 @@
 	}
 }
 
-u32 rtl8192_phy_QueryRFReg(struct net_device *dev, enum rf90_radio_path eRFPath,
-			   u32 RegAddr, u32 BitMask)
+u32 rtl92e_get_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
+		      u32 RegAddr, u32 BitMask)
 {
 	u32 Original_Value, Readback_Value, BitShift;
 	struct r8192_priv *priv = rtllib_priv(dev);
 
-	if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath))
+	if (!rtl92e_is_legal_rf_path(dev, eRFPath))
 		return 0;
 	if (priv->rtllib->eRFPowerState != eRfOn && !priv->being_init_adapter)
 		return	0;
@@ -289,20 +287,20 @@
 	Data |= ((Offset & 0xFF) << 12);
 	Data |= ((eRFPath & 0x3) << 20);
 	Data |= 0x80000000;
-	while (read_nic_dword(dev, QPNR)&0x80000000) {
+	while (rtl92e_readl(dev, QPNR) & 0x80000000) {
 		if (time++ < 100)
 			udelay(10);
 		else
 			break;
 	}
-	write_nic_dword(dev, QPNR, Data);
-	while (read_nic_dword(dev, QPNR) & 0x80000000) {
+	rtl92e_writel(dev, QPNR, Data);
+	while (rtl92e_readl(dev, QPNR) & 0x80000000) {
 		if (time++ < 100)
 			udelay(10);
 		else
 			return 0;
 	}
-	return read_nic_dword(dev, RF_DATA);
+	return rtl92e_readl(dev, RF_DATA);
 
 }
 
@@ -317,18 +315,18 @@
 	Data |= 0x400000;
 	Data |= 0x80000000;
 
-	while (read_nic_dword(dev, QPNR) & 0x80000000) {
+	while (rtl92e_readl(dev, QPNR) & 0x80000000) {
 		if (time++ < 100)
 			udelay(10);
 		else
 			break;
 	}
-	write_nic_dword(dev, QPNR, Data);
+	rtl92e_writel(dev, QPNR, Data);
 
 }
 
 
-void rtl8192_phy_configmac(struct net_device *dev)
+void rtl92e_config_mac(struct net_device *dev)
 {
 	u32 dwArrayLen = 0, i = 0;
 	u32 *pdwArray = NULL;
@@ -350,14 +348,14 @@
 			 pdwArray[i], pdwArray[i+1], pdwArray[i+2]);
 		if (pdwArray[i] == 0x318)
 			pdwArray[i+2] = 0x00000800;
-		rtl8192_setBBreg(dev, pdwArray[i], pdwArray[i+1],
-				 pdwArray[i+2]);
+		rtl92e_set_bb_reg(dev, pdwArray[i], pdwArray[i+1],
+				  pdwArray[i+2]);
 	}
 	return;
 
 }
 
-void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType)
+static void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType)
 {
 	int i;
 	u32 *Rtl819XPHY_REGArray_Table = NULL;
@@ -377,9 +375,9 @@
 
 	if (ConfigType == BaseBand_Config_PHY_REG) {
 		for (i = 0; i < PHY_REGArrayLen; i += 2) {
-			rtl8192_setBBreg(dev, Rtl819XPHY_REGArray_Table[i],
-					 bMaskDWord,
-					 Rtl819XPHY_REGArray_Table[i+1]);
+			rtl92e_set_bb_reg(dev, Rtl819XPHY_REGArray_Table[i],
+					  bMaskDWord,
+					  Rtl819XPHY_REGArray_Table[i+1]);
 			RT_TRACE(COMP_DBG,
 				 "i: %x, The Rtl819xUsbPHY_REGArray[0] is %x Rtl819xUsbPHY_REGArray[1] is %x\n",
 				 i, Rtl819XPHY_REGArray_Table[i],
@@ -387,9 +385,9 @@
 		}
 	} else if (ConfigType == BaseBand_Config_AGC_TAB) {
 		for (i = 0; i < AGCTAB_ArrayLen; i += 2) {
-			rtl8192_setBBreg(dev, Rtl819XAGCTAB_Array_Table[i],
-					 bMaskDWord,
-					 Rtl819XAGCTAB_Array_Table[i+1]);
+			rtl92e_set_bb_reg(dev, Rtl819XAGCTAB_Array_Table[i],
+					  bMaskDWord,
+					  Rtl819XAGCTAB_Array_Table[i+1]);
 			RT_TRACE(COMP_DBG,
 				 "i:%x, The rtl819XAGCTAB_Array[0] is %x rtl819XAGCTAB_Array[1] is %x\n",
 				 i, Rtl819XAGCTAB_Array_Table[i],
@@ -489,9 +487,8 @@
 
 }
 
-bool rtl8192_phy_checkBBAndRF(struct net_device *dev,
-			      enum hw90_block CheckBlock,
-			      enum rf90_radio_path eRFPath)
+bool rtl92e_check_bb_and_rf(struct net_device *dev, enum hw90_block CheckBlock,
+			    enum rf90_radio_path eRFPath)
 {
 	bool ret = true;
 	u32 i, CheckTimes = 4, dwRegRead = 0;
@@ -515,20 +512,20 @@
 		switch (CheckBlock) {
 		case HW90_BLOCK_PHY0:
 		case HW90_BLOCK_PHY1:
-			write_nic_dword(dev, WriteAddr[CheckBlock],
-					WriteData[i]);
-			dwRegRead = read_nic_dword(dev, WriteAddr[CheckBlock]);
+			rtl92e_writel(dev, WriteAddr[CheckBlock],
+				      WriteData[i]);
+			dwRegRead = rtl92e_readl(dev, WriteAddr[CheckBlock]);
 			break;
 
 		case HW90_BLOCK_RF:
 			WriteData[i] &= 0xfff;
-			rtl8192_phy_SetRFReg(dev, eRFPath,
-						 WriteAddr[HW90_BLOCK_RF],
-						 bMask12Bits, WriteData[i]);
+			rtl92e_set_rf_reg(dev, eRFPath,
+					  WriteAddr[HW90_BLOCK_RF],
+					  bMask12Bits, WriteData[i]);
 			mdelay(10);
-			dwRegRead = rtl8192_phy_QueryRFReg(dev, eRFPath,
-						 WriteAddr[HW90_BLOCK_RF],
-						 bMaskDWord);
+			dwRegRead = rtl92e_get_rf_reg(dev, eRFPath,
+						      WriteAddr[HW90_BLOCK_RF],
+						      bMaskDWord);
 			mdelay(10);
 			break;
 
@@ -555,29 +552,29 @@
 	u8 bRegValue = 0, eCheckItem = 0;
 	u32 dwRegValue = 0;
 
-	bRegValue = read_nic_byte(dev, BB_GLOBAL_RESET);
-	write_nic_byte(dev, BB_GLOBAL_RESET, (bRegValue|BB_GLOBAL_RESET_BIT));
+	bRegValue = rtl92e_readb(dev, BB_GLOBAL_RESET);
+	rtl92e_writeb(dev, BB_GLOBAL_RESET, (bRegValue|BB_GLOBAL_RESET_BIT));
 
-	dwRegValue = read_nic_dword(dev, CPU_GEN);
-	write_nic_dword(dev, CPU_GEN, (dwRegValue&(~CPU_GEN_BB_RST)));
+	dwRegValue = rtl92e_readl(dev, CPU_GEN);
+	rtl92e_writel(dev, CPU_GEN, (dwRegValue&(~CPU_GEN_BB_RST)));
 
 	for (eCheckItem = (enum hw90_block)HW90_BLOCK_PHY0;
 	     eCheckItem <= HW90_BLOCK_PHY1; eCheckItem++) {
-		rtStatus  = rtl8192_phy_checkBBAndRF(dev,
-					 (enum hw90_block)eCheckItem,
-					 (enum rf90_radio_path)0);
+		rtStatus  = rtl92e_check_bb_and_rf(dev,
+						   (enum hw90_block)eCheckItem,
+						   (enum rf90_radio_path)0);
 		if (!rtStatus) {
 			RT_TRACE((COMP_ERR | COMP_PHY),
-				 "PHY_RF8256_Config():Check PHY%d Fail!!\n",
+				 "rtl92e_config_rf():Check PHY%d Fail!!\n",
 				 eCheckItem-1);
 			return rtStatus;
 		}
 	}
-	rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0);
+	rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0);
 	rtl8192_phyConfigBB(dev, BaseBand_Config_PHY_REG);
 
-	dwRegValue = read_nic_dword(dev, CPU_GEN);
-	write_nic_dword(dev, CPU_GEN, (dwRegValue|CPU_GEN_BB_RST));
+	dwRegValue = rtl92e_readl(dev, CPU_GEN);
+	rtl92e_writel(dev, CPU_GEN, (dwRegValue|CPU_GEN_BB_RST));
 
 	rtl8192_phyConfigBB(dev, BaseBand_Config_AGC_TAB);
 
@@ -588,57 +585,57 @@
 				      priv->AntennaTxPwDiff[0]);
 		else
 			dwRegValue = 0x0;
-		rtl8192_setBBreg(dev, rFPGA0_TxGainStage,
-			(bXBTxAGC|bXCTxAGC|bXDTxAGC), dwRegValue);
+		rtl92e_set_bb_reg(dev, rFPGA0_TxGainStage,
+				  (bXBTxAGC|bXCTxAGC|bXDTxAGC), dwRegValue);
 
 
 		dwRegValue = priv->CrystalCap;
-		rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, bXtalCap92x,
-				 dwRegValue);
+		rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, bXtalCap92x,
+				  dwRegValue);
 	}
 
 	return rtStatus;
 }
-bool rtl8192_BBConfig(struct net_device *dev)
+bool rtl92e_config_bb(struct net_device *dev)
 {
 	rtl8192_InitBBRFRegDef(dev);
 	return rtl8192_BB_Config_ParaFile(dev);
 }
 
-void rtl8192_phy_getTxPower(struct net_device *dev)
+void rtl92e_get_tx_power(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
 	priv->MCSTxPowerLevelOriginalOffset[0] =
-		read_nic_dword(dev, rTxAGC_Rate18_06);
+		rtl92e_readl(dev, rTxAGC_Rate18_06);
 	priv->MCSTxPowerLevelOriginalOffset[1] =
-		read_nic_dword(dev, rTxAGC_Rate54_24);
+		rtl92e_readl(dev, rTxAGC_Rate54_24);
 	priv->MCSTxPowerLevelOriginalOffset[2] =
-		read_nic_dword(dev, rTxAGC_Mcs03_Mcs00);
+		rtl92e_readl(dev, rTxAGC_Mcs03_Mcs00);
 	priv->MCSTxPowerLevelOriginalOffset[3] =
-		read_nic_dword(dev, rTxAGC_Mcs07_Mcs04);
+		rtl92e_readl(dev, rTxAGC_Mcs07_Mcs04);
 	priv->MCSTxPowerLevelOriginalOffset[4] =
-		read_nic_dword(dev, rTxAGC_Mcs11_Mcs08);
+		rtl92e_readl(dev, rTxAGC_Mcs11_Mcs08);
 	priv->MCSTxPowerLevelOriginalOffset[5] =
-		read_nic_dword(dev, rTxAGC_Mcs15_Mcs12);
+		rtl92e_readl(dev, rTxAGC_Mcs15_Mcs12);
 
-	priv->DefaultInitialGain[0] = read_nic_byte(dev, rOFDM0_XAAGCCore1);
-	priv->DefaultInitialGain[1] = read_nic_byte(dev, rOFDM0_XBAGCCore1);
-	priv->DefaultInitialGain[2] = read_nic_byte(dev, rOFDM0_XCAGCCore1);
-	priv->DefaultInitialGain[3] = read_nic_byte(dev, rOFDM0_XDAGCCore1);
+	priv->DefaultInitialGain[0] = rtl92e_readb(dev, rOFDM0_XAAGCCore1);
+	priv->DefaultInitialGain[1] = rtl92e_readb(dev, rOFDM0_XBAGCCore1);
+	priv->DefaultInitialGain[2] = rtl92e_readb(dev, rOFDM0_XCAGCCore1);
+	priv->DefaultInitialGain[3] = rtl92e_readb(dev, rOFDM0_XDAGCCore1);
 	RT_TRACE(COMP_INIT,
 		 "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
 		 priv->DefaultInitialGain[0], priv->DefaultInitialGain[1],
 		 priv->DefaultInitialGain[2], priv->DefaultInitialGain[3]);
 
-	priv->framesync = read_nic_byte(dev, rOFDM0_RxDetector3);
-	priv->framesyncC34 = read_nic_dword(dev, rOFDM0_RxDetector2);
+	priv->framesync = rtl92e_readb(dev, rOFDM0_RxDetector3);
+	priv->framesyncC34 = rtl92e_readl(dev, rOFDM0_RxDetector2);
 	RT_TRACE(COMP_INIT, "Default framesync (0x%x) = 0x%x\n",
 		rOFDM0_RxDetector3, priv->framesync);
-	priv->SifsTime = read_nic_word(dev, SIFS);
+	priv->SifsTime = rtl92e_readw(dev, SIFS);
 }
 
-void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel)
+void rtl92e_set_tx_power(struct net_device *dev, u8 channel)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	u8	powerlevel = 0, powerlevelOFDM24G = 0;
@@ -671,16 +668,17 @@
 				      priv->AntennaTxPwDiff[1]<<4 |
 				      priv->AntennaTxPwDiff[0]);
 
-			rtl8192_setBBreg(dev, rFPGA0_TxGainStage,
-			(bXBTxAGC|bXCTxAGC|bXDTxAGC), u4RegValue);
+			rtl92e_set_bb_reg(dev, rFPGA0_TxGainStage,
+					  (bXBTxAGC|bXCTxAGC|bXDTxAGC),
+					  u4RegValue);
 		}
 	}
 	switch (priv->rf_chip) {
 	case RF_8225:
 		break;
 	case RF_8256:
-		PHY_SetRF8256CCKTxPower(dev, powerlevel);
-		PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G);
+		rtl92e_set_cck_tx_power(dev, powerlevel);
+		rtl92e_set_ofdm_tx_power(dev, powerlevelOFDM24G);
 		break;
 	case RF_8258:
 		break;
@@ -690,7 +688,7 @@
 	}
 }
 
-bool rtl8192_phy_RFConfig(struct net_device *dev)
+bool rtl92e_config_phy(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	bool rtStatus = true;
@@ -699,7 +697,7 @@
 	case RF_8225:
 		break;
 	case RF_8256:
-		rtStatus = PHY_RF8256_Config(dev);
+		rtStatus = rtl92e_config_rf(dev);
 		break;
 
 	case RF_8258:
@@ -714,12 +712,7 @@
 	return rtStatus;
 }
 
-void rtl8192_phy_updateInitGain(struct net_device *dev)
-{
-}
-
-u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
-				      enum rf90_radio_path eRFPath)
+u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath)
 {
 
 	int i;
@@ -731,10 +724,9 @@
 				msleep(100);
 				continue;
 			}
-			rtl8192_phy_SetRFReg(dev, eRFPath,
-					     Rtl819XRadioA_Array[i],
-					     bMask12Bits,
-					     Rtl819XRadioA_Array[i+1]);
+			rtl92e_set_rf_reg(dev, eRFPath, Rtl819XRadioA_Array[i],
+					  bMask12Bits,
+					  Rtl819XRadioA_Array[i+1]);
 
 		}
 		break;
@@ -744,10 +736,9 @@
 				msleep(100);
 				continue;
 			}
-			rtl8192_phy_SetRFReg(dev, eRFPath,
-					     Rtl819XRadioB_Array[i],
-					     bMask12Bits,
-					     Rtl819XRadioB_Array[i+1]);
+			rtl92e_set_rf_reg(dev, eRFPath, Rtl819XRadioB_Array[i],
+					  bMask12Bits,
+					  Rtl819XRadioB_Array[i+1]);
 
 		}
 		break;
@@ -757,10 +748,9 @@
 				msleep(100);
 				continue;
 			}
-			rtl8192_phy_SetRFReg(dev, eRFPath,
-					     Rtl819XRadioC_Array[i],
-					     bMask12Bits,
-					     Rtl819XRadioC_Array[i+1]);
+			rtl92e_set_rf_reg(dev, eRFPath, Rtl819XRadioC_Array[i],
+					  bMask12Bits,
+					  Rtl819XRadioC_Array[i+1]);
 
 		}
 		break;
@@ -770,9 +760,9 @@
 					msleep(100);
 					continue;
 			}
-			rtl8192_phy_SetRFReg(dev, eRFPath,
-					 Rtl819XRadioD_Array[i], bMask12Bits,
-					 Rtl819XRadioD_Array[i+1]);
+			rtl92e_set_rf_reg(dev, eRFPath, Rtl819XRadioD_Array[i],
+					  bMask12Bits,
+					  Rtl819XRadioD_Array[i+1]);
 
 		}
 		break;
@@ -794,8 +784,8 @@
 		break;
 
 	case RF_8256:
-		PHY_SetRF8256CCKTxPower(dev, powerlevel);
-		PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G);
+		rtl92e_set_cck_tx_power(dev, powerlevel);
+		rtl92e_set_ofdm_tx_power(dev, powerlevelOFDM24G);
 		break;
 
 	case RF_8258:
@@ -941,21 +931,21 @@
 					rtl8192_SetTxPowerLevel(dev, channel);
 				break;
 			case CmdID_WritePortUlong:
-				write_nic_dword(dev, CurrentCmd->Para1,
-						CurrentCmd->Para2);
+				rtl92e_writel(dev, CurrentCmd->Para1,
+					      CurrentCmd->Para2);
 				break;
 			case CmdID_WritePortUshort:
-				write_nic_word(dev, CurrentCmd->Para1,
-					       (u16)CurrentCmd->Para2);
+				rtl92e_writew(dev, CurrentCmd->Para1,
+					      (u16)CurrentCmd->Para2);
 				break;
 			case CmdID_WritePortUchar:
-				write_nic_byte(dev, CurrentCmd->Para1,
-					       (u8)CurrentCmd->Para2);
+				rtl92e_writeb(dev, CurrentCmd->Para1,
+					      (u8)CurrentCmd->Para2);
 				break;
 			case CmdID_RF_WriteReg:
 				for (eRFPath = 0; eRFPath <
 				     priv->NumTotalRFPath; eRFPath++)
-					rtl8192_phy_SetRFReg(dev,
+					rtl92e_set_rf_reg(dev,
 						 (enum rf90_radio_path)eRFPath,
 						 CurrentCmd->Para1, bMask12Bits,
 						 CurrentCmd->Para2<<7);
@@ -986,7 +976,7 @@
 			break;
 	}
 }
-void rtl8192_SwChnl_WorkItem(struct net_device *dev)
+static void rtl8192_SwChnl_WorkItem(struct net_device *dev)
 {
 
 	struct r8192_priv *priv = rtllib_priv(dev);
@@ -1001,7 +991,7 @@
 	RT_TRACE(COMP_TRACE, "<== SwChnlCallback819xUsbWorkItem()\n");
 }
 
-u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel)
+u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
@@ -1082,13 +1072,13 @@
 		if (priv->rtllib->current_network.channel == 14 &&
 		    !priv->bcck_in_ch14) {
 			priv->bcck_in_ch14 = true;
-			dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+			rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
 		} else if (priv->rtllib->current_network.channel !=
 			   14 && priv->bcck_in_ch14) {
 			priv->bcck_in_ch14 = false;
-			dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+			rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
 		} else {
-			dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+			rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
 		}
 		break;
 
@@ -1110,13 +1100,13 @@
 		if (priv->rtllib->current_network.channel == 14 &&
 		    !priv->bcck_in_ch14) {
 			priv->bcck_in_ch14 = true;
-			dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+			rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
 		} else if (priv->rtllib->current_network.channel != 14
 			   && priv->bcck_in_ch14) {
 			priv->bcck_in_ch14 = false;
-			dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+			rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
 		} else {
-			dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+			rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
 		}
 		break;
 	}
@@ -1150,7 +1140,7 @@
 			 priv->CCK_index);
 	break;
 	}
-	dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+	rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
 }
 
 static void CCK_Tx_Power_Track_BW_Switch(struct net_device *dev)
@@ -1163,7 +1153,7 @@
 		CCK_Tx_Power_Track_BW_Switch_ThermalMeter(dev);
 }
 
-void rtl8192_SetBWModeWorkItem(struct net_device *dev)
+static void rtl8192_SetBWModeWorkItem(struct net_device *dev)
 {
 
 	struct r8192_priv *priv = rtllib_priv(dev);
@@ -1183,17 +1173,17 @@
 		netdev_err(dev, "%s(): Driver is not initialized\n", __func__);
 		return;
 	}
-	regBwOpMode = read_nic_byte(dev, BW_OPMODE);
+	regBwOpMode = rtl92e_readb(dev, BW_OPMODE);
 
 	switch (priv->CurrentChannelBW) {
 	case HT_CHANNEL_WIDTH_20:
 		regBwOpMode |= BW_OPMODE_20MHZ;
-		write_nic_byte(dev, BW_OPMODE, regBwOpMode);
+		rtl92e_writeb(dev, BW_OPMODE, regBwOpMode);
 		break;
 
 	case HT_CHANNEL_WIDTH_20_40:
 		regBwOpMode &= ~BW_OPMODE_20MHZ;
-		write_nic_byte(dev, BW_OPMODE, regBwOpMode);
+		rtl92e_writeb(dev, BW_OPMODE, regBwOpMode);
 		break;
 
 	default:
@@ -1204,38 +1194,38 @@
 
 	switch (priv->CurrentChannelBW) {
 	case HT_CHANNEL_WIDTH_20:
-		rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x0);
-		rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x0);
+		rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bRFMOD, 0x0);
+		rtl92e_set_bb_reg(dev, rFPGA1_RFMOD, bRFMOD, 0x0);
 
 		if (!priv->btxpower_tracking) {
-			write_nic_dword(dev, rCCK0_TxFilter1, 0x1a1b0000);
-			write_nic_dword(dev, rCCK0_TxFilter2, 0x090e1317);
-			write_nic_dword(dev, rCCK0_DebugPort, 0x00000204);
+			rtl92e_writel(dev, rCCK0_TxFilter1, 0x1a1b0000);
+			rtl92e_writel(dev, rCCK0_TxFilter2, 0x090e1317);
+			rtl92e_writel(dev, rCCK0_DebugPort, 0x00000204);
 		} else {
 			CCK_Tx_Power_Track_BW_Switch(dev);
 		}
 
-		rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 1);
+		rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x00100000, 1);
 
 		break;
 	case HT_CHANNEL_WIDTH_20_40:
-		rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1);
-		rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1);
+		rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bRFMOD, 0x1);
+		rtl92e_set_bb_reg(dev, rFPGA1_RFMOD, bRFMOD, 0x1);
 
 		if (!priv->btxpower_tracking) {
-			write_nic_dword(dev, rCCK0_TxFilter1, 0x35360000);
-			write_nic_dword(dev, rCCK0_TxFilter2, 0x121c252e);
-			write_nic_dword(dev, rCCK0_DebugPort, 0x00000409);
+			rtl92e_writel(dev, rCCK0_TxFilter1, 0x35360000);
+			rtl92e_writel(dev, rCCK0_TxFilter2, 0x121c252e);
+			rtl92e_writel(dev, rCCK0_DebugPort, 0x00000409);
 		} else {
 			CCK_Tx_Power_Track_BW_Switch(dev);
 		}
 
-		rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand,
-				 (priv->nCur40MhzPrimeSC>>1));
-		rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00,
-				 priv->nCur40MhzPrimeSC);
+		rtl92e_set_bb_reg(dev, rCCK0_System, bCCKSideBand,
+				  (priv->nCur40MhzPrimeSC>>1));
+		rtl92e_set_bb_reg(dev, rOFDM1_LSTF, 0xC00,
+				  priv->nCur40MhzPrimeSC);
 
-		rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0);
+		rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0);
 		break;
 	default:
 		netdev_err(dev, "%s(): unknown Bandwidth: %#X\n", __func__,
@@ -1249,7 +1239,7 @@
 		break;
 
 	case RF_8256:
-		PHY_SetRF8256Bandwidth(dev, priv->CurrentChannelBW);
+		rtl92e_set_bandwidth(dev, priv->CurrentChannelBW);
 		break;
 
 	case RF_8258:
@@ -1270,8 +1260,8 @@
 	RT_TRACE(COMP_SWBW, "<==SetBWMode819xUsb()");
 }
 
-void rtl8192_SetBWMode(struct net_device *dev, enum ht_channel_width Bandwidth,
-		       enum ht_extchnl_offset Offset)
+void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width Bandwidth,
+			enum ht_extchnl_offset Offset)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
@@ -1295,7 +1285,7 @@
 
 }
 
-void InitialGain819xPci(struct net_device *dev, u8 Operation)
+void rtl92e_init_gain(struct net_device *dev, u8 Operation)
 {
 #define SCAN_RX_INITIAL_GAIN	0x17
 #define POWER_DETECTION_TH	0x08
@@ -1312,21 +1302,21 @@
 			BitMask = bMaskByte0;
 			if (dm_digtable.dig_algorithm ==
 			    DIG_ALGO_BY_FALSE_ALARM)
-				rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+				rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
 			priv->initgain_backup.xaagccore1 =
-				 (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1,
-				 BitMask);
+				 (u8)rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1,
+						       BitMask);
 			priv->initgain_backup.xbagccore1 =
-				 (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1,
-				 BitMask);
+				 (u8)rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1,
+						       BitMask);
 			priv->initgain_backup.xcagccore1 =
-				 (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1,
-				 BitMask);
+				 (u8)rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1,
+						       BitMask);
 			priv->initgain_backup.xdagccore1 =
-				 (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1,
-				 BitMask);
+				 (u8)rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1,
+						       BitMask);
 			BitMask = bMaskByte2;
-			priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev,
+			priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev,
 						    rCCK0_CCA, BitMask);
 
 			RT_TRACE(COMP_SCAN,
@@ -1347,13 +1337,13 @@
 
 			RT_TRACE(COMP_SCAN, "Write scan initial gain = 0x%x\n",
 				 initial_gain);
-			write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain);
-			write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain);
-			write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain);
-			write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain);
+			rtl92e_writeb(dev, rOFDM0_XAAGCCore1, initial_gain);
+			rtl92e_writeb(dev, rOFDM0_XBAGCCore1, initial_gain);
+			rtl92e_writeb(dev, rOFDM0_XCAGCCore1, initial_gain);
+			rtl92e_writeb(dev, rOFDM0_XDAGCCore1, initial_gain);
 			RT_TRACE(COMP_SCAN, "Write scan 0xa0a = 0x%x\n",
 				 POWER_DETECTION_TH);
-			write_nic_byte(dev, 0xa0a, POWER_DETECTION_TH);
+			rtl92e_writeb(dev, 0xa0a, POWER_DETECTION_TH);
 			break;
 		case IG_Restore:
 			RT_TRACE(COMP_SCAN,
@@ -1361,18 +1351,18 @@
 			BitMask = 0x7f;
 			if (dm_digtable.dig_algorithm ==
 			    DIG_ALGO_BY_FALSE_ALARM)
-				rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+				rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
 
-			rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, BitMask,
+			rtl92e_set_bb_reg(dev, rOFDM0_XAAGCCore1, BitMask,
 					 (u32)priv->initgain_backup.xaagccore1);
-			rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, BitMask,
+			rtl92e_set_bb_reg(dev, rOFDM0_XBAGCCore1, BitMask,
 					 (u32)priv->initgain_backup.xbagccore1);
-			rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, BitMask,
+			rtl92e_set_bb_reg(dev, rOFDM0_XCAGCCore1, BitMask,
 					 (u32)priv->initgain_backup.xcagccore1);
-			rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, BitMask,
+			rtl92e_set_bb_reg(dev, rOFDM0_XDAGCCore1, BitMask,
 					 (u32)priv->initgain_backup.xdagccore1);
 			BitMask  = bMaskByte2;
-			rtl8192_setBBreg(dev, rCCK0_CCA, BitMask,
+			rtl92e_set_bb_reg(dev, rCCK0_CCA, BitMask,
 					 (u32)priv->initgain_backup.cca);
 
 			RT_TRACE(COMP_SCAN,
@@ -1391,12 +1381,12 @@
 				 "Scan BBInitialGainRestore 0xa0a is %x\n",
 				 priv->initgain_backup.cca);
 
-			rtl8192_phy_setTxPower(dev,
+			rtl92e_set_tx_power(dev,
 					 priv->rtllib->current_network.channel);
 
 			if (dm_digtable.dig_algorithm ==
 			    DIG_ALGO_BY_FALSE_ALARM)
-				rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
+				rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
 			break;
 		default:
 			RT_TRACE(COMP_SCAN, "Unknown IG Operation.\n");
@@ -1405,17 +1395,17 @@
 	}
 }
 
-void PHY_SetRtl8192eRfOff(struct net_device *dev)
+void rtl92e_set_rf_off(struct net_device *dev)
 {
 
-	rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0);
-	rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x0);
-	rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x18, 0x0);
-	rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0xf, 0x0);
-	rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0xf, 0x0);
-	rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x60, 0x0);
-	rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x4, 0x0);
-	write_nic_byte(dev, ANAPAR_FOR_8192PciE, 0x07);
+	rtl92e_set_bb_reg(dev, rFPGA0_XA_RFInterfaceOE, BIT4, 0x0);
+	rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x0);
+	rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x18, 0x0);
+	rtl92e_set_bb_reg(dev, rOFDM0_TRxPathEnable, 0xf, 0x0);
+	rtl92e_set_bb_reg(dev, rOFDM1_TRxPathEnable, 0xf, 0x0);
+	rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x60, 0x0);
+	rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x4, 0x0);
+	rtl92e_writeb(dev, ANAPAR_FOR_8192PciE, 0x07);
 
 }
 
@@ -1447,7 +1437,7 @@
 				do {
 					InitilizeCount--;
 					priv->RegRfOff = false;
-					rtstatus = NicIFEnableNIC(dev);
+					rtstatus = rtl92e_enable_nic(dev);
 				} while (!rtstatus && (InitilizeCount > 0));
 
 				if (!rtstatus) {
@@ -1461,24 +1451,24 @@
 				RT_CLEAR_PS_LEVEL(pPSC,
 						  RT_RF_OFF_LEVL_HALT_NIC);
 			} else {
-				write_nic_byte(dev, ANAPAR, 0x37);
+				rtl92e_writeb(dev, ANAPAR, 0x37);
 				mdelay(1);
-				rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1,
+				rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1,
 						 0x4, 0x1);
 				priv->bHwRfOffAction = 0;
 
-				rtl8192_setBBreg(dev, rFPGA0_XA_RFInterfaceOE,
-						 BIT4, 0x1);
-				rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4,
-						 0x300, 0x3);
-				rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1,
-						 0x18, 0x3);
-				rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable, 0x3,
-						 0x3);
-				rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable, 0x3,
-						 0x3);
-				rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1,
-						 0x60, 0x3);
+				rtl92e_set_bb_reg(dev, rFPGA0_XA_RFInterfaceOE,
+						  BIT4, 0x1);
+				rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4,
+						  0x300, 0x3);
+				rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1,
+						  0x18, 0x3);
+				rtl92e_set_bb_reg(dev, rOFDM0_TRxPathEnable,
+						  0x3, 0x3);
+				rtl92e_set_bb_reg(dev, rOFDM1_TRxPathEnable,
+						  0x3, 0x3);
+				rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1,
+						  0x60, 0x3);
 
 			}
 
@@ -1511,7 +1501,7 @@
 					break;
 				}
 			}
-			PHY_SetRtl8192eRfOff(dev);
+			rtl92e_set_rf_off(dev);
 			break;
 
 		case eRfOff:
@@ -1543,11 +1533,11 @@
 
 			if (pPSC->RegRfPsLevel & RT_RF_OFF_LEVL_HALT_NIC &&
 			    !RT_IN_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC)) {
-				NicIFDisableNIC(dev);
+				rtl92e_disable_nic(dev);
 				RT_SET_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
 			} else if (!(pPSC->RegRfPsLevel &
 				   RT_RF_OFF_LEVL_HALT_NIC)) {
-				PHY_SetRtl8192eRfOff(dev);
+				rtl92e_set_rf_off(dev);
 			}
 
 			break;
@@ -1586,32 +1576,34 @@
 	return bResult;
 }
 
-bool SetRFPowerState(struct net_device *dev,
-		     enum rt_rf_power_state eRFPowerState)
+bool rtl92e_set_rf_power_state(struct net_device *dev,
+			       enum rt_rf_power_state eRFPowerState)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
 	bool bResult = false;
 
-	RT_TRACE(COMP_PS, "---------> SetRFPowerState(): eRFPowerState(%d)\n",
+	RT_TRACE(COMP_PS,
+		 "---------> rtl92e_set_rf_power_state(): eRFPowerState(%d)\n",
 		 eRFPowerState);
 	if (eRFPowerState == priv->rtllib->eRFPowerState &&
 	    priv->bHwRfOffAction == 0) {
 		RT_TRACE(COMP_PS,
-			 "<--------- SetRFPowerState(): discard the request for eRFPowerState(%d) is the same.\n",
+			 "<--------- rtl92e_set_rf_power_state(): discard the request for eRFPowerState(%d) is the same.\n",
 			 eRFPowerState);
 		return bResult;
 	}
 
 	bResult = SetRFPowerState8190(dev, eRFPowerState);
 
-	RT_TRACE(COMP_PS, "<--------- SetRFPowerState(): bResult(%d)\n",
+	RT_TRACE(COMP_PS,
+		 "<--------- rtl92e_set_rf_power_state(): bResult(%d)\n",
 		 bResult);
 
 	return bResult;
 }
 
-void PHY_ScanOperationBackup8192(struct net_device *dev, u8 Operation)
+void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
index 18bc582..96015d3 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
@@ -67,48 +67,36 @@
 #define bMaskLWord                0x0000ffff
 #define bMaskDWord                0xffffffff
 
-extern u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev,
-					 u32 eRFPath);
-extern void rtl8192_setBBreg(struct net_device *dev, u32 dwRegAddr,
-			     u32 dwBitMask, u32 dwData);
-extern u32 rtl8192_QueryBBReg(struct net_device *dev, u32 dwRegAddr,
-			      u32 dwBitMask);
-extern void rtl8192_phy_SetRFReg(struct net_device *dev,
-				 enum rf90_radio_path eRFPath,
-				 u32 RegAddr, u32 BitMask, u32 Data);
-extern u32 rtl8192_phy_QueryRFReg(struct net_device *dev,
-				  enum rf90_radio_path eRFPath,
-				  u32 RegAddr, u32 BitMask);
-extern void rtl8192_phy_configmac(struct net_device *dev);
-extern void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType);
-extern bool rtl8192_phy_checkBBAndRF(struct net_device *dev,
-				     enum hw90_block CheckBlock,
-				     enum rf90_radio_path eRFPath);
-extern bool rtl8192_BBConfig(struct net_device *dev);
-extern void rtl8192_phy_getTxPower(struct net_device *dev);
-extern void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel);
-extern bool rtl8192_phy_RFConfig(struct net_device *dev);
-extern void rtl8192_phy_updateInitGain(struct net_device *dev);
-extern u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
-					     enum rf90_radio_path eRFPath);
+u8 rtl92e_is_legal_rf_path(struct net_device *dev, u32 eRFPath);
+void rtl92e_set_bb_reg(struct net_device *dev, u32 dwRegAddr,
+		       u32 dwBitMask, u32 dwData);
+u32 rtl92e_get_bb_reg(struct net_device *dev, u32 dwRegAddr, u32 dwBitMask);
+void rtl92e_set_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
+		       u32 RegAddr, u32 BitMask, u32 Data);
+u32 rtl92e_get_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
+		      u32 RegAddr, u32 BitMask);
+void rtl92e_config_mac(struct net_device *dev);
+bool rtl92e_check_bb_and_rf(struct net_device *dev,
+			    enum hw90_block CheckBlock,
+			    enum rf90_radio_path eRFPath);
+bool rtl92e_config_bb(struct net_device *dev);
+void rtl92e_get_tx_power(struct net_device *dev);
+void rtl92e_set_tx_power(struct net_device *dev, u8 channel);
+bool rtl92e_config_phy(struct net_device *dev);
+u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath);
 
-extern u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel);
-extern void rtl8192_SetBWMode(struct net_device *dev,
-			      enum ht_channel_width Bandwidth,
-			      enum ht_extchnl_offset Offset);
-extern void rtl8192_SwChnl_WorkItem(struct net_device *dev);
-extern void rtl8192_SetBWModeWorkItem(struct net_device *dev);
-extern void InitialGain819xPci(struct net_device *dev, u8 Operation);
+u8 rtl92e_set_channel(struct net_device *dev, u8 channel);
+void rtl92e_set_bw_mode(struct net_device *dev,
+			enum ht_channel_width Bandwidth,
+			enum ht_extchnl_offset Offset);
+void rtl92e_init_gain(struct net_device *dev, u8 Operation);
 
-extern	void PHY_SetRtl8192eRfOff(struct net_device *dev);
+void rtl92e_set_rf_off(struct net_device *dev);
 
-bool
-SetRFPowerState(
-	struct net_device *dev,
-	enum rt_rf_power_state eRFPowerState
-	);
-#define PHY_SetRFPowerState SetRFPowerState
+bool rtl92e_set_rf_power_state(struct net_device *dev,
+			       enum rt_rf_power_state eRFPowerState);
+#define PHY_SetRFPowerState rtl92e_set_rf_power_state
 
-extern void PHY_ScanOperationBackup8192(struct net_device *dev, u8 Operation);
+void rtl92e_scan_op_backup(struct net_device *dev, u8 Operation);
 
 #endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index f246222..29dd93a 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -28,15 +28,15 @@
 #include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */
 #include "r8192E_cmdpkt.h"
 
-void CamResetAllEntry(struct net_device *dev)
+void rtl92e_cam_reset(struct net_device *dev)
 {
 	u32 ulcommand = 0;
 
 	ulcommand |= BIT31|BIT30;
-	write_nic_dword(dev, RWCAM, ulcommand);
+	rtl92e_writel(dev, RWCAM, ulcommand);
 }
 
-void EnableHWSecurityConfig8192(struct net_device *dev)
+void rtl92e_enable_hw_security_config(struct net_device *dev)
 {
 	u8 SECR_value = 0x0;
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
@@ -65,11 +65,12 @@
 	RT_TRACE(COMP_SEC, "%s:, hwsec:%d, pairwise_key:%d, SECR_value:%x\n",
 		 __func__, ieee->hwsec_active, ieee->pairwise_key_type,
 		 SECR_value);
-	write_nic_byte(dev, SECR,  SECR_value);
+	rtl92e_writeb(dev, SECR, SECR_value);
 }
 
-void set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
-	       const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent, u8 is_mesh)
+void rtl92e_set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
+		      u16 KeyType, const u8 *MacAddr, u8 DefaultKey,
+		      u32 *KeyContent, u8 is_mesh)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rtllib_device *ieee = priv->rtllib;
@@ -77,6 +78,10 @@
 	RT_TRACE(COMP_DBG,
 		 "===========>%s():EntryNo is %d,KeyIndex is %d,KeyType is %d,is_mesh is %d\n",
 		 __func__, EntryNo, KeyIndex, KeyType, is_mesh);
+
+	if (EntryNo >= TOTAL_CAM_ENTRY)
+		return;
+
 	if (!is_mesh) {
 		ieee->swcamtable[EntryNo].bused = true;
 		ieee->swcamtable[EntryNo].key_index = KeyIndex;
@@ -87,8 +92,9 @@
 	}
 }
 
-void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
-	    const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent)
+void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
+		    u16 KeyType, const u8 *MacAddr, u8 DefaultKey,
+		    u32 *KeyContent)
 {
 	u32 TargetCommand = 0;
 	u32 TargetContent = 0;
@@ -106,16 +112,18 @@
 				return;
 			}
 			down(&priv->rtllib->ips_sem);
-			IPSLeave(dev);
+			rtl92e_ips_leave(dev);
 			up(&priv->rtllib->ips_sem);
 		}
 	}
 	priv->rtllib->is_set_key = true;
-	if (EntryNo >= TOTAL_CAM_ENTRY)
+	if (EntryNo >= TOTAL_CAM_ENTRY) {
 		netdev_info(dev, "%s(): Invalid CAM entry\n", __func__);
+		return;
+	}
 
 	RT_TRACE(COMP_SEC,
-		 "====>to setKey(), dev:%p, EntryNo:%d, KeyIndex:%d,KeyType:%d, MacAddr %pM\n",
+		 "====>to rtl92e_set_key(), dev:%p, EntryNo:%d, KeyIndex:%d,KeyType:%d, MacAddr %pM\n",
 		 dev, EntryNo, KeyIndex, KeyType, MacAddr);
 
 	if (DefaultKey)
@@ -133,20 +141,20 @@
 				(u32)(*(MacAddr+1)) << 24 |
 				(u32)usConfig;
 
-			write_nic_dword(dev, WCAMI, TargetContent);
-			write_nic_dword(dev, RWCAM, TargetCommand);
+			rtl92e_writel(dev, WCAMI, TargetContent);
+			rtl92e_writel(dev, RWCAM, TargetCommand);
 		} else if (i == 1) {
 			TargetContent = (u32)(*(MacAddr+2)) |
 				(u32)(*(MacAddr+3)) <<  8 |
 				(u32)(*(MacAddr+4)) << 16 |
 				(u32)(*(MacAddr+5)) << 24;
-			write_nic_dword(dev, WCAMI, TargetContent);
-			write_nic_dword(dev, RWCAM, TargetCommand);
+			rtl92e_writel(dev, WCAMI, TargetContent);
+			rtl92e_writel(dev, RWCAM, TargetCommand);
 		} else {
 			if (KeyContent != NULL) {
-				write_nic_dword(dev, WCAMI,
-						(u32)(*(KeyContent+i-2)));
-				write_nic_dword(dev, RWCAM, TargetCommand);
+				rtl92e_writel(dev, WCAMI,
+					      (u32)(*(KeyContent+i-2)));
+				rtl92e_writel(dev, RWCAM, TargetCommand);
 				udelay(100);
 			}
 		}
@@ -154,7 +162,7 @@
 	RT_TRACE(COMP_SEC, "=========>after set key, usconfig:%x\n", usConfig);
 }
 
-void CamRestoreAllEntry(struct net_device *dev)
+void rtl92e_cam_restore(struct net_device *dev)
 {
 	u8 EntryId = 0;
 	struct r8192_priv *priv = rtllib_priv(dev);
@@ -170,7 +178,7 @@
 		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
 	};
 
-	RT_TRACE(COMP_SEC, "CamRestoreAllEntry:\n");
+	RT_TRACE(COMP_SEC, "rtl92e_cam_restore:\n");
 
 
 	if ((priv->rtllib->pairwise_key_type == KEY_TYPE_WEP40) ||
@@ -179,36 +187,41 @@
 		for (EntryId = 0; EntryId < 4; EntryId++) {
 			MacAddr = CAM_CONST_ADDR[EntryId];
 			if (priv->rtllib->swcamtable[EntryId].bused) {
-				setKey(dev, EntryId, EntryId,
-				       priv->rtllib->pairwise_key_type, MacAddr,
-				       0, (u32 *)(&priv->rtllib->swcamtable
-				      [EntryId].key_buf[0]));
+				rtl92e_set_key(dev, EntryId, EntryId,
+					       priv->rtllib->pairwise_key_type,
+					       MacAddr, 0,
+					       (u32 *)(&priv->rtllib->swcamtable
+						       [EntryId].key_buf[0]));
 			}
 		}
 
 	} else if (priv->rtllib->pairwise_key_type == KEY_TYPE_TKIP) {
 		if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
-			setKey(dev, 4, 0, priv->rtllib->pairwise_key_type,
-			       (u8 *)dev->dev_addr, 0,
-			       (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
+			rtl92e_set_key(dev, 4, 0,
+				       priv->rtllib->pairwise_key_type,
+				       (u8 *)dev->dev_addr, 0,
+				       (u32 *)(&priv->rtllib->swcamtable[4].
+				       key_buf[0]));
 		} else {
-			setKey(dev, 4, 0, priv->rtllib->pairwise_key_type,
-			       MacAddr, 0,
-			       (u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
+			rtl92e_set_key(dev, 4, 0,
+				       priv->rtllib->pairwise_key_type,
+				       MacAddr, 0,
+				       (u32 *)(&priv->rtllib->swcamtable[4].
+				       key_buf[0]));
 		}
 
 	} else if (priv->rtllib->pairwise_key_type == KEY_TYPE_CCMP) {
 		if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
-			setKey(dev, 4, 0,
-			       priv->rtllib->pairwise_key_type,
-			       (u8 *)dev->dev_addr, 0,
-			       (u32 *)(&priv->rtllib->swcamtable[4].
-			       key_buf[0]));
+			rtl92e_set_key(dev, 4, 0,
+				       priv->rtllib->pairwise_key_type,
+				       (u8 *)dev->dev_addr, 0,
+				       (u32 *)(&priv->rtllib->swcamtable[4].
+				       key_buf[0]));
 		} else {
-			setKey(dev, 4, 0,
-			       priv->rtllib->pairwise_key_type, MacAddr,
-			       0, (u32 *)(&priv->rtllib->swcamtable[4].
-			       key_buf[0]));
+			rtl92e_set_key(dev, 4, 0,
+				       priv->rtllib->pairwise_key_type, MacAddr,
+				       0, (u32 *)(&priv->rtllib->swcamtable[4].
+				       key_buf[0]));
 			}
 	}
 
@@ -216,20 +229,18 @@
 		MacAddr = CAM_CONST_BROAD;
 		for (EntryId = 1; EntryId < 4; EntryId++) {
 			if (priv->rtllib->swcamtable[EntryId].bused) {
-				setKey(dev, EntryId, EntryId,
-					priv->rtllib->group_key_type,
-					MacAddr, 0,
-					(u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0])
-				     );
+				rtl92e_set_key(dev, EntryId, EntryId,
+					       priv->rtllib->group_key_type,
+					       MacAddr, 0,
+					       (u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0]));
 			}
 		}
 		if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
 			if (priv->rtllib->swcamtable[0].bused) {
-				setKey(dev, 0, 0,
-				       priv->rtllib->group_key_type,
-				       CAM_CONST_ADDR[0], 0,
-				       (u32 *)(&priv->rtllib->swcamtable[0].key_buf[0])
-				     );
+				rtl92e_set_key(dev, 0, 0,
+					       priv->rtllib->group_key_type,
+					       CAM_CONST_ADDR[0], 0,
+					       (u32 *)(&priv->rtllib->swcamtable[0].key_buf[0]));
 			} else {
 				netdev_warn(dev,
 					    "%s(): ADHOC TKIP: missing key entry.\n",
@@ -241,19 +252,19 @@
 		MacAddr = CAM_CONST_BROAD;
 		for (EntryId = 1; EntryId < 4; EntryId++) {
 			if (priv->rtllib->swcamtable[EntryId].bused) {
-				setKey(dev, EntryId, EntryId,
-				       priv->rtllib->group_key_type,
-				       MacAddr, 0,
-				       (u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0]));
+				rtl92e_set_key(dev, EntryId, EntryId,
+					       priv->rtllib->group_key_type,
+					       MacAddr, 0,
+					       (u32 *)(&priv->rtllib->swcamtable[EntryId].key_buf[0]));
 			}
 		}
 
 		if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
 			if (priv->rtllib->swcamtable[0].bused) {
-				setKey(dev, 0, 0,
-					priv->rtllib->group_key_type,
-					CAM_CONST_ADDR[0], 0,
-					(u32 *)(&priv->rtllib->swcamtable[0].key_buf[0]));
+				rtl92e_set_key(dev, 0, 0,
+					       priv->rtllib->group_key_type,
+					       CAM_CONST_ADDR[0], 0,
+					       (u32 *)(&priv->rtllib->swcamtable[0].key_buf[0]));
 			} else {
 				netdev_warn(dev,
 					    "%s(): ADHOC CCMP: missing key entry.\n",
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
index f23ab46..9ef8b36 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
@@ -28,12 +28,14 @@
 #include <linux/types.h>
 struct net_device;
 
-void CamResetAllEntry(struct net_device *dev);
-void EnableHWSecurityConfig8192(struct net_device *dev);
-void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
-	    const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent);
-void set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
-	       const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent, u8 is_mesh);
-void CamRestoreAllEntry(struct net_device *dev);
+void rtl92e_cam_reset(struct net_device *dev);
+void rtl92e_enable_hw_security_config(struct net_device *dev);
+void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
+		    u16 KeyType, const u8 *MacAddr, u8 DefaultKey,
+		    u32 *KeyContent);
+void rtl92e_set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
+		      u16 KeyType, const u8 *MacAddr, u8 DefaultKey,
+		      u32 *KeyContent, u8 is_mesh);
+void rtl92e_cam_restore(struct net_device *dev);
 
 #endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index c6cdb43..d6b46df 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -22,25 +22,6 @@
  * Contact Information:
  * wlanfae <wlanfae@realtek.com>
 ******************************************************************************/
-#undef RX_DONT_PASS_UL
-#undef DEBUG_EPROM
-#undef DEBUG_RX_VERBOSE
-#undef DUMMY_RX
-#undef DEBUG_ZERO_RX
-#undef DEBUG_RX_SKB
-#undef DEBUG_TX_FRAG
-#undef DEBUG_RX_FRAG
-#undef DEBUG_TX_FILLDESC
-#undef DEBUG_TX
-#undef DEBUG_IRQ
-#undef DEBUG_RX
-#undef DEBUG_RXALLOC
-#undef DEBUG_REGISTERS
-#undef DEBUG_RING
-#undef DEBUG_IRQ_TASKLET
-#undef DEBUG_TX_ALLOC
-#undef DEBUG_TX_DESC
-
 #include <linux/uaccess.h>
 #include <linux/pci.h>
 #include <linux/vmalloc.h>
@@ -63,24 +44,24 @@
 
 static struct rtl819x_ops rtl819xp_ops = {
 	.nic_type			= NIC_8192E,
-	.get_eeprom_size		= rtl8192_get_eeprom_size,
-	.init_adapter_variable		= rtl8192_InitializeVariables,
-	.initialize_adapter		= rtl8192_adapter_start,
-	.link_change			= rtl8192_link_change,
-	.tx_fill_descriptor		= rtl8192_tx_fill_desc,
-	.tx_fill_cmd_descriptor		= rtl8192_tx_fill_cmd_desc,
-	.rx_query_status_descriptor	= rtl8192_rx_query_status_desc,
+	.get_eeprom_size		= rtl92e_get_eeprom_size,
+	.init_adapter_variable		= rtl92e_init_variables,
+	.initialize_adapter		= rtl92e_start_adapter,
+	.link_change			= rtl92e_link_change,
+	.tx_fill_descriptor		= rtl92e_fill_tx_desc,
+	.tx_fill_cmd_descriptor		= rtl92e_fill_tx_cmd_desc,
+	.rx_query_status_descriptor	= rtl92e_get_rx_stats,
 	.rx_command_packet_handler = NULL,
-	.stop_adapter			= rtl8192_halt_adapter,
-	.update_ratr_table		= rtl8192_update_ratr_table,
-	.irq_enable			= rtl8192_EnableInterrupt,
-	.irq_disable			= rtl8192_DisableInterrupt,
-	.irq_clear			= rtl8192_ClearInterrupt,
-	.rx_enable			= rtl8192_enable_rx,
-	.tx_enable			= rtl8192_enable_tx,
-	.interrupt_recognized		= rtl8192_interrupt_recognized,
-	.TxCheckStuckHandler		= rtl8192_HalTxCheckStuck,
-	.RxCheckStuckHandler		= rtl8192_HalRxCheckStuck,
+	.stop_adapter			= rtl92e_stop_adapter,
+	.update_ratr_table		= rtl92e_update_ratr_table,
+	.irq_enable			= rtl92e_enable_irq,
+	.irq_disable			= rtl92e_disable_irq,
+	.irq_clear			= rtl92e_clear_irq,
+	.rx_enable			= rtl92e_enable_rx,
+	.tx_enable			= rtl92e_enable_tx,
+	.interrupt_recognized		= rtl92e_ack_irq,
+	.TxCheckStuckHandler		= rtl92e_is_tx_stuck,
+	.RxCheckStuckHandler		= rtl92e_is_rx_stuck,
 };
 
 static struct pci_device_id rtl8192_pci_id_tbl[] = {
@@ -102,202 +83,61 @@
 	.id_table = rtl8192_pci_id_tbl,	/* PCI_ID table  */
 	.probe	= rtl8192_pci_probe,	/* probe fn      */
 	.remove	 = rtl8192_pci_disconnect,	/* remove fn */
-	.suspend = rtl8192E_suspend,	/* PM suspend fn */
-	.resume = rtl8192E_resume,                 /* PM resume fn  */
+	.suspend = rtl92e_suspend,	/* PM suspend fn */
+	.resume = rtl92e_resume,                 /* PM resume fn  */
 };
 
+static short rtl8192_is_tx_queue_empty(struct net_device *dev);
+static void rtl819x_watchdog_wqcallback(void *data);
+static void watch_dog_timer_callback(unsigned long data);
+static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
+				   int rate);
+static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static void rtl8192_tx_cmd(struct net_device *dev, struct sk_buff *skb);
+static short rtl8192_tx(struct net_device *dev, struct sk_buff *skb);
+static short rtl8192_pci_initdescring(struct net_device *dev);
+static void rtl8192_irq_tx_tasklet(struct r8192_priv *priv);
+static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv);
+static void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
+static int _rtl8192_up(struct net_device *dev, bool is_silent_reset);
+static int rtl8192_up(struct net_device *dev);
+static int rtl8192_down(struct net_device *dev, bool shutdownrf);
+static void rtl8192_restart(void *data);
+
 /****************************************************************************
    -----------------------------IO STUFF-------------------------
 *****************************************************************************/
-static bool PlatformIOCheckPageLegalAndGetRegMask(u32 u4bPage, u8 *pu1bPageMask)
-{
-	bool		bReturn = false;
 
-	*pu1bPageMask = 0xfe;
-
-	switch (u4bPage) {
-	case 1: case 2: case 3: case 4:
-	case 8: case 9: case 10: case 12: case 13:
-		bReturn = true;
-		*pu1bPageMask = 0xf0;
-		break;
-
-	default:
-		bReturn = false;
-		break;
-	}
-
-	return bReturn;
-}
-
-void write_nic_io_byte(struct net_device *dev, int x, u8 y)
-{
-	u32 u4bPage = x >> 8;
-	u8 u1PageMask = 0;
-	bool	bIsLegalPage = false;
-
-	if (u4bPage == 0) {
-		outb(y&0xff, dev->base_addr + x);
-
-	} else {
-		bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
-			       &u1PageMask);
-		if (bIsLegalPage) {
-			u8 u1bPsr = read_nic_io_byte(dev, PSR);
-
-			write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
-					  (u8)u4bPage));
-			write_nic_io_byte(dev, (x & 0xff), y);
-			write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
-		}
-	}
-}
-
-void write_nic_io_word(struct net_device *dev, int x, u16 y)
-{
-	u32 u4bPage = x >> 8;
-	u8 u1PageMask = 0;
-	bool	bIsLegalPage = false;
-
-	if (u4bPage == 0) {
-		outw(y, dev->base_addr + x);
-	} else {
-		bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
-							 &u1PageMask);
-		if (bIsLegalPage) {
-			u8 u1bPsr = read_nic_io_byte(dev, PSR);
-
-			write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
-					  (u8)u4bPage));
-			write_nic_io_word(dev, (x & 0xff), y);
-			write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
-
-		}
-	}
-}
-
-void write_nic_io_dword(struct net_device *dev, int x, u32 y)
-{
-	u32 u4bPage = x >> 8;
-	u8 u1PageMask = 0;
-	bool	bIsLegalPage = false;
-
-	if (u4bPage == 0) {
-		outl(y, dev->base_addr + x);
-	} else {
-		bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
-						 &u1PageMask);
-		if (bIsLegalPage) {
-			u8 u1bPsr = read_nic_io_byte(dev, PSR);
-
-			write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
-					  (u8)u4bPage));
-			write_nic_io_dword(dev, (x & 0xff), y);
-			write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
-		}
-	}
-}
-
-u8 read_nic_io_byte(struct net_device *dev, int x)
-{
-	u32 u4bPage = x >> 8;
-	u8 u1PageMask = 0;
-	bool	bIsLegalPage = false;
-	u8	Data = 0;
-
-	if (u4bPage == 0)
-		return 0xff&inb(dev->base_addr + x);
-
-	bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
-							     &u1PageMask);
-	if (bIsLegalPage) {
-		u8 u1bPsr = read_nic_io_byte(dev, PSR);
-
-		write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
-				  (u8)u4bPage));
-		Data = read_nic_io_byte(dev, (x & 0xff));
-		write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
-	}
-
-	return Data;
-}
-
-u16 read_nic_io_word(struct net_device *dev, int x)
-{
-	u32 u4bPage = x >> 8;
-	u8 u1PageMask = 0;
-	bool	bIsLegalPage = false;
-	u16	Data = 0;
-
-	if (u4bPage == 0)
-		return inw(dev->base_addr + x);
-	bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
-							     &u1PageMask);
-	if (bIsLegalPage) {
-		u8 u1bPsr = read_nic_io_byte(dev, PSR);
-
-		write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
-				  (u8)u4bPage));
-		Data = read_nic_io_word(dev, (x & 0xff));
-		write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
-	}
-
-	return Data;
-}
-
-u32 read_nic_io_dword(struct net_device *dev, int x)
-{
-	u32 u4bPage = x >> 8;
-	u8 u1PageMask = 0;
-	bool	bIsLegalPage = false;
-	u32	Data = 0;
-
-	if (u4bPage == 0)
-		return inl(dev->base_addr + x);
-	bIsLegalPage = PlatformIOCheckPageLegalAndGetRegMask(u4bPage,
-		       &u1PageMask);
-	if (bIsLegalPage) {
-		u8 u1bPsr = read_nic_io_byte(dev, PSR);
-
-		write_nic_io_byte(dev, PSR, ((u1bPsr & u1PageMask) |
-				  (u8)u4bPage));
-		Data = read_nic_io_dword(dev, (x & 0xff));
-		write_nic_io_byte(dev, PSR, (u1bPsr & u1PageMask));
-	}
-
-	return Data;
-}
-
-u8 read_nic_byte(struct net_device *dev, int x)
+u8 rtl92e_readb(struct net_device *dev, int x)
 {
 	return 0xff & readb((u8 __iomem *)dev->mem_start + x);
 }
 
-u32 read_nic_dword(struct net_device *dev, int x)
+u32 rtl92e_readl(struct net_device *dev, int x)
 {
 	return readl((u8 __iomem *)dev->mem_start + x);
 }
 
-u16 read_nic_word(struct net_device *dev, int x)
+u16 rtl92e_readw(struct net_device *dev, int x)
 {
 	return readw((u8 __iomem *)dev->mem_start + x);
 }
 
-void write_nic_byte(struct net_device *dev, int x, u8 y)
+void rtl92e_writeb(struct net_device *dev, int x, u8 y)
 {
 	writeb(y, (u8 __iomem *)dev->mem_start + x);
 
 	udelay(20);
 }
 
-void write_nic_dword(struct net_device *dev, int x, u32 y)
+void rtl92e_writel(struct net_device *dev, int x, u32 y)
 {
 	writel(y, (u8 __iomem *)dev->mem_start + x);
 
 	udelay(20);
 }
 
-void write_nic_word(struct net_device *dev, int x, u16 y)
+void rtl92e_writew(struct net_device *dev, int x, u16 y)
 {
 	writew(y, (u8 __iomem *)dev->mem_start + x);
 
@@ -307,10 +147,9 @@
 /****************************************************************************
    -----------------------------GENERAL FUNCTION-------------------------
 *****************************************************************************/
-bool MgntActSet_RF_State(struct net_device *dev,
+bool rtl92e_set_rf_state(struct net_device *dev,
 			 enum rt_rf_power_state StateToSet,
-			 RT_RF_CHANGE_SOURCE ChangeSource,
-			 bool	ProtectOrNot)
+			 RT_RF_CHANGE_SOURCE ChangeSource)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rtllib_device *ieee = priv->rtllib;
@@ -321,39 +160,34 @@
 	unsigned long flag;
 
 	RT_TRACE((COMP_PS | COMP_RF),
-		 "===>MgntActSet_RF_State(): StateToSet(%d)\n", StateToSet);
+		 "===>rtl92e_set_rf_state(): StateToSet(%d)\n", StateToSet);
 
-	ProtectOrNot = false;
+	while (true) {
+		spin_lock_irqsave(&priv->rf_ps_lock, flag);
+		if (priv->RFChangeInProgress) {
+			spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
+			RT_TRACE((COMP_PS | COMP_RF),
+				 "rtl92e_set_rf_state(): RF Change in progress! Wait to set..StateToSet(%d).\n",
+				 StateToSet);
 
-
-	if (!ProtectOrNot) {
-		while (true) {
-			spin_lock_irqsave(&priv->rf_ps_lock, flag);
-			if (priv->RFChangeInProgress) {
-				spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
+			while (priv->RFChangeInProgress) {
+				RFWaitCounter++;
 				RT_TRACE((COMP_PS | COMP_RF),
-					 "MgntActSet_RF_State(): RF Change in progress! Wait to set..StateToSet(%d).\n",
-					 StateToSet);
+					 "rtl92e_set_rf_state(): Wait 1 ms (%d times)...\n",
+					 RFWaitCounter);
+				mdelay(1);
 
-				while (priv->RFChangeInProgress) {
-					RFWaitCounter++;
-					RT_TRACE((COMP_PS | COMP_RF),
-						 "MgntActSet_RF_State(): Wait 1 ms (%d times)...\n",
-						 RFWaitCounter);
-					mdelay(1);
-
-					if (RFWaitCounter > 100) {
-						netdev_warn(dev,
-							    "%s(): Timeout waiting for RF change.\n",
-							    __func__);
-						return false;
-					}
+				if (RFWaitCounter > 100) {
+					netdev_warn(dev,
+						    "%s(): Timeout waiting for RF change.\n",
+						    __func__);
+					return false;
 				}
-			} else {
-				priv->RFChangeInProgress = true;
-				spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
-				break;
 			}
+		} else {
+			priv->RFChangeInProgress = true;
+			spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
+			break;
 		}
 	}
 
@@ -376,7 +210,7 @@
 				bConnectBySSID = true;
 		} else {
 			RT_TRACE((COMP_PS | COMP_RF),
-				 "MgntActSet_RF_State - eRfon reject pMgntInfo->RfOffReason= 0x%x, ChangeSource=0x%X\n",
+				 "rtl92e_set_rf_state - eRfon reject pMgntInfo->RfOffReason= 0x%x, ChangeSource=0x%X\n",
 				  priv->rtllib->RfOffReason, ChangeSource);
 	}
 
@@ -413,7 +247,7 @@
 
 	if (bActionAllowed) {
 		RT_TRACE((COMP_PS | COMP_RF),
-			 "MgntActSet_RF_State(): Action is allowed.... StateToSet(%d), RfOffReason(%#X)\n",
+			 "rtl92e_set_rf_state(): Action is allowed.... StateToSet(%d), RfOffReason(%#X)\n",
 			 StateToSet, priv->rtllib->RfOffReason);
 		PHY_SetRFPowerState(dev, StateToSet);
 		if (StateToSet == eRfOn) {
@@ -426,17 +260,15 @@
 		}
 	} else {
 		RT_TRACE((COMP_PS | COMP_RF),
-			 "MgntActSet_RF_State(): Action is rejected.... StateToSet(%d), ChangeSource(%#X), RfOffReason(%#X)\n",
+			 "rtl92e_set_rf_state(): Action is rejected.... StateToSet(%d), ChangeSource(%#X), RfOffReason(%#X)\n",
 			 StateToSet, ChangeSource, priv->rtllib->RfOffReason);
 	}
 
-	if (!ProtectOrNot) {
-		spin_lock_irqsave(&priv->rf_ps_lock, flag);
-		priv->RFChangeInProgress = false;
-		spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
-	}
+	spin_lock_irqsave(&priv->rf_ps_lock, flag);
+	priv->RFChangeInProgress = false;
+	spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
 
-	RT_TRACE((COMP_PS | COMP_RF), "<===MgntActSet_RF_State()\n");
+	RT_TRACE((COMP_PS | COMP_RF), "<===rtl92e_set_rf_state()\n");
 	return bActionAllowed;
 }
 
@@ -450,7 +282,7 @@
 	return 0;
 }
 
-void rtl8192_tx_timeout(struct net_device *dev)
+static void rtl8192_tx_timeout(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
@@ -458,7 +290,7 @@
 	netdev_info(dev, "TXTIMEOUT");
 }
 
-void rtl8192_irq_enable(struct net_device *dev)
+void rtl92e_irq_enable(struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 
@@ -467,7 +299,7 @@
 	priv->ops->irq_enable(dev);
 }
 
-void rtl8192_irq_disable(struct net_device *dev)
+void rtl92e_irq_disable(struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 
@@ -476,7 +308,7 @@
 	priv->irq_enabled = 0;
 }
 
-void rtl8192_set_chan(struct net_device *dev, short ch)
+static void rtl8192_set_chan(struct net_device *dev, short ch)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 
@@ -490,7 +322,7 @@
 		priv->rf_set_chan(dev, priv->chan);
 }
 
-void rtl8192_update_cap(struct net_device *dev, u16 cap)
+static void rtl8192_update_cap(struct net_device *dev, u16 cap)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rtllib_network *net = &priv->rtllib->current_network;
@@ -683,7 +515,7 @@
 	RT_TRACE(COMP_QOS, "%s: network->flags = %d,%d\n", __func__,
 		 network->flags, priv->rtllib->current_network.qos_data.active);
 	if (set_qos_param == 1) {
-		dm_init_edca_turbo(priv->rtllib->dev);
+		rtl92e_dm_init_edca_turbo(priv->rtllib->dev);
 		queue_work_rsl(priv->priv_wq, &priv->qos_activate);
 	}
 	return 0;
@@ -733,7 +565,7 @@
 {
 }
 
-void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
+void rtl92e_config_rate(struct net_device *dev, u16 *rate_config)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rtllib_network *net;
@@ -864,7 +696,7 @@
 	return ret;
 }
 
-void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
+void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	u8 bSupportMode = rtl8192_getSupportedWireleeMode(dev);
@@ -895,9 +727,6 @@
 
 	priv->rtllib->mode = wireless_mode;
 
-	ActUpdateChannelAccessSetting(dev, wireless_mode,
-				      &priv->ChannelAccessSetting);
-
 	if ((wireless_mode == WIRELESS_MODE_N_24G) ||
 	    (wireless_mode == WIRELESS_MODE_N_5G)) {
 		priv->rtllib->pHTInfo->bEnableHT = 1;
@@ -941,7 +770,7 @@
 	priv->bfirst_init = false;
 
 	if (priv->polling_timer_on == 0)
-		check_rfctrl_gpio_timer((unsigned long)dev);
+		rtl92e_check_rfctrl_gpio_timer((unsigned long)dev);
 
 	if (priv->rtllib->state != RTLLIB_LINKED)
 		rtllib_softmac_start_protocol(priv->rtllib, 0);
@@ -969,7 +798,7 @@
 		priv->rtllib->rtllib_ips_leave(dev);
 
 	if (priv->rtllib->state == RTLLIB_LINKED)
-		LeisurePSLeave(dev);
+		rtl92e_leisure_ps_leave(dev);
 
 	priv->bDriverIsGoingToUnload = true;
 	priv->up = 0;
@@ -982,9 +811,9 @@
 	priv->rtllib->wpa_ie_len = 0;
 	kfree(priv->rtllib->wpa_ie);
 	priv->rtllib->wpa_ie = NULL;
-	CamResetAllEntry(dev);
+	rtl92e_cam_reset(dev);
 	memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
-	rtl8192_irq_disable(dev);
+	rtl92e_irq_disable(dev);
 
 	del_timer_sync(&priv->watch_dog_timer);
 	rtl8192_cancel_deferred_work(priv);
@@ -1027,38 +856,36 @@
 	priv->rtllib->set_chan			= rtl8192_set_chan;
 	priv->rtllib->link_change		= priv->ops->link_change;
 	priv->rtllib->softmac_data_hard_start_xmit = rtl8192_hard_data_xmit;
-	priv->rtllib->data_hard_stop		= rtl8192_data_hard_stop;
-	priv->rtllib->data_hard_resume		= rtl8192_data_hard_resume;
 	priv->rtllib->check_nic_enough_desc	= rtl8192_check_nic_enough_desc;
 	priv->rtllib->handle_assoc_response	= rtl8192_handle_assoc_response;
 	priv->rtllib->handle_beacon		= rtl8192_handle_beacon;
-	priv->rtllib->SetWirelessMode		= rtl8192_SetWirelessMode;
-	priv->rtllib->LeisurePSLeave		= LeisurePSLeave;
-	priv->rtllib->SetBWModeHandler		= rtl8192_SetBWMode;
-	priv->rf_set_chan			= rtl8192_phy_SwChnl;
+	priv->rtllib->SetWirelessMode		= rtl92e_set_wireless_mode;
+	priv->rtllib->LeisurePSLeave		= rtl92e_leisure_ps_leave;
+	priv->rtllib->SetBWModeHandler		= rtl92e_set_bw_mode;
+	priv->rf_set_chan			= rtl92e_set_channel;
 
-	priv->rtllib->start_send_beacons = rtl8192e_start_beacon;
+	priv->rtllib->start_send_beacons = rtl92e_start_beacon;
 	priv->rtllib->stop_send_beacons = rtl8192_stop_beacon;
 
-	priv->rtllib->sta_wake_up = rtl8192_hw_wakeup;
-	priv->rtllib->enter_sleep_state = rtl8192_hw_to_sleep;
+	priv->rtllib->sta_wake_up = rtl92e_hw_wakeup;
+	priv->rtllib->enter_sleep_state = rtl92e_enter_sleep;
 	priv->rtllib->ps_is_queue_empty = rtl8192_is_tx_queue_empty;
 
-	priv->rtllib->GetNmodeSupportBySecCfg = rtl8192_GetNmodeSupportBySecCfg;
+	priv->rtllib->GetNmodeSupportBySecCfg = rtl92e_get_nmode_support_by_sec;
 	priv->rtllib->GetHalfNmodeSupportByAPsHandler =
-					 rtl8192_GetHalfNmodeSupportByAPs;
+						rtl92e_is_halfn_supported_by_ap;
 
-	priv->rtllib->SetHwRegHandler = rtl8192e_SetHwReg;
-	priv->rtllib->AllowAllDestAddrHandler = rtl8192_AllowAllDestAddr;
+	priv->rtllib->SetHwRegHandler = rtl92e_set_reg;
+	priv->rtllib->AllowAllDestAddrHandler = rtl92e_set_monitor_mode;
 	priv->rtllib->SetFwCmdHandler = NULL;
-	priv->rtllib->InitialGainHandler = InitialGain819xPci;
-	priv->rtllib->rtllib_ips_leave_wq = rtllib_ips_leave_wq;
-	priv->rtllib->rtllib_ips_leave = rtllib_ips_leave;
+	priv->rtllib->InitialGainHandler = rtl92e_init_gain;
+	priv->rtllib->rtllib_ips_leave_wq = rtl92e_rtllib_ips_leave_wq;
+	priv->rtllib->rtllib_ips_leave = rtl92e_rtllib_ips_leave;
 
 	priv->rtllib->LedControlHandler = NULL;
 	priv->rtllib->UpdateBeaconInterruptHandler = NULL;
 
-	priv->rtllib->ScanOperationBackupHandler = PHY_ScanOperationBackup8192;
+	priv->rtllib->ScanOperationBackupHandler = rtl92e_scan_op_backup;
 }
 
 static void rtl8192_init_priv_constant(struct net_device *dev)
@@ -1181,20 +1008,21 @@
 
 	priv->priv_wq = create_workqueue(DRV_NAME);
 	INIT_WORK_RSL(&priv->reset_wq, (void *)rtl8192_restart, dev);
-	INIT_WORK_RSL(&priv->rtllib->ips_leave_wq, (void *)IPSLeave_wq, dev);
+	INIT_WORK_RSL(&priv->rtllib->ips_leave_wq, (void *)rtl92e_ips_leave_wq,
+		      dev);
 	INIT_DELAYED_WORK_RSL(&priv->watch_dog_wq,
 			      (void *)rtl819x_watchdog_wqcallback, dev);
 	INIT_DELAYED_WORK_RSL(&priv->txpower_tracking_wq,
-			      (void *)dm_txpower_trackingcallback, dev);
+			      (void *)rtl92e_dm_txpower_tracking_wq, dev);
 	INIT_DELAYED_WORK_RSL(&priv->rfpath_check_wq,
-			      (void *)dm_rf_pathcheck_workitemcallback, dev);
+			      (void *)rtl92e_dm_rf_pathcheck_wq, dev);
 	INIT_DELAYED_WORK_RSL(&priv->update_beacon_wq,
 			      (void *)rtl8192_update_beacon, dev);
 	INIT_WORK_RSL(&priv->qos_activate, (void *)rtl8192_qos_activate, dev);
 	INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_wakeup_wq,
-			      (void *) rtl8192_hw_wakeup_wq, dev);
+			      (void *) rtl92e_hw_wakeup_wq, dev);
 	INIT_DELAYED_WORK_RSL(&priv->rtllib->hw_sleep_wq,
-			      (void *) rtl8192_hw_sleep_wq, dev);
+			      (void *) rtl92e_hw_sleep_wq, dev);
 	tasklet_init(&priv->irq_rx_tasklet,
 		     (void(*)(unsigned long))rtl8192_irq_rx_tasklet,
 		     (unsigned long)priv);
@@ -1250,17 +1078,17 @@
 	priv->ops->init_adapter_variable(dev);
 	rtl8192_get_channel_map(dev);
 
-	init_hal_dm(dev);
+	rtl92e_dm_init(dev);
 
 	setup_timer(&priv->watch_dog_timer,
 		    watch_dog_timer_callback,
 		    (unsigned long) dev);
 
 	setup_timer(&priv->gpio_polling_timer,
-		    check_rfctrl_gpio_timer,
+		    rtl92e_check_rfctrl_gpio_timer,
 		    (unsigned long)dev);
 
-	rtl8192_irq_disable(dev);
+	rtl92e_irq_disable(dev);
 	if (request_irq(dev->irq, rtl8192_interrupt, IRQF_SHARED,
 	    dev->name, dev)) {
 		netdev_err(dev, "Error allocating IRQ %d", dev->irq);
@@ -1282,7 +1110,7 @@
 /***************************************************************************
 	-------------------------------WATCHDOG STUFF---------------------------
 ***************************************************************************/
-short rtl8192_is_tx_queue_empty(struct net_device *dev)
+static short rtl8192_is_tx_queue_empty(struct net_device *dev)
 {
 	int i = 0;
 	struct r8192_priv *priv = rtllib_priv(dev);
@@ -1439,7 +1267,7 @@
 		down(&priv->wx_sem);
 
 		if (priv->rtllib->state == RTLLIB_LINKED)
-			LeisurePSLeave(dev);
+			rtl92e_leisure_ps_leave(dev);
 
 		if (priv->up) {
 			netdev_info(dev, "%s():the driver is not up.\n",
@@ -1459,10 +1287,10 @@
 		if (!netif_queue_stopped(dev))
 			netif_stop_queue(dev);
 
-		rtl8192_irq_disable(dev);
+		rtl92e_irq_disable(dev);
 		del_timer_sync(&priv->watch_dog_timer);
 		rtl8192_cancel_deferred_work(priv);
-		deinit_hal_dm(dev);
+		rtl92e_dm_deinit(dev);
 		rtllib_stop_scan_syncro(ieee);
 
 		if (ieee->state == RTLLIB_LINKED) {
@@ -1479,7 +1307,7 @@
 			rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
 		}
 
-		dm_backup_dynamic_mechanism_state(dev);
+		rtl92e_dm_backup_state(dev);
 
 		up(&priv->wx_sem);
 		RT_TRACE(COMP_RESET,
@@ -1508,7 +1336,7 @@
 		priv->RFChangeInProgress = false;
 		spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
 
-		EnableHWSecurityConfig8192(dev);
+		rtl92e_enable_hw_security_config(dev);
 
 		if (ieee->state == RTLLIB_LINKED && ieee->iw_mode ==
 		    IW_MODE_INFRA) {
@@ -1527,15 +1355,13 @@
 
 			rtllib_start_send_beacons(ieee);
 
-			if (ieee->data_hard_resume)
-				ieee->data_hard_resume(ieee->dev);
 			netif_carrier_on(ieee->dev);
 		} else if (ieee->iw_mode == IW_MODE_MESH) {
 			rtl819x_silentreset_mesh_bk(dev, IsPortal);
 		}
 
-		CamRestoreAllEntry(dev);
-		dm_restore_dynamic_mechanism_state(dev);
+		rtl92e_cam_restore(dev);
+		rtl92e_dm_restore_state(dev);
 END:
 		priv->ResetProgress = RESET_TYPE_NORESET;
 		priv->reset_count++;
@@ -1543,7 +1369,7 @@
 		priv->bForcedSilentReset = false;
 		priv->bResetInProgress = false;
 
-		write_nic_byte(dev, UFWP, 1);
+		rtl92e_writeb(dev, UFWP, 1);
 		RT_TRACE(COMP_RESET, "Reset finished!! ====>[%d]\n",
 			 priv->reset_count);
 	}
@@ -1570,8 +1396,7 @@
 	}
 }
 
-
-void	rtl819x_watchdog_wqcallback(void *data)
+static void rtl819x_watchdog_wqcallback(void *data)
 {
 	struct r8192_priv *priv = container_of_dwork_rsl(data,
 				  struct r8192_priv, watch_dog_wq);
@@ -1597,7 +1422,7 @@
 		priv->rtllib->CntAfterLink = 0;
 	}
 
-	hal_dm_watchdog(dev);
+	rtl92e_dm_watchdog(dev);
 
 	if (rtllib_act_scanning(priv->rtllib, false) == false) {
 		if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->state ==
@@ -1608,8 +1433,8 @@
 			     IPS_CALLBACK_NONE) &&
 			     (!ieee->bNetPromiscuousMode)) {
 				RT_TRACE(COMP_PS,
-					 "====================>haha: IPSEnter()\n");
-				IPSEnter(dev);
+					 "====================>haha: rtl92e_ips_enter()\n");
+				rtl92e_ips_enter(dev);
 			}
 		}
 	}
@@ -1640,13 +1465,13 @@
 			bEnterPS = false;
 
 		if (bEnterPS)
-			LeisurePSEnter(dev);
+			rtl92e_leisure_ps_enter(dev);
 		else
-			LeisurePSLeave(dev);
+			rtl92e_leisure_ps_leave(dev);
 
 	} else {
 		RT_TRACE(COMP_LPS, "====>no link LPS leave\n");
-		LeisurePSLeave(dev);
+		rtl92e_leisure_ps_leave(dev);
 	}
 
 	ieee->LinkDetectInfo.NumRxOkInPeriod = 0;
@@ -1725,7 +1550,7 @@
 	RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
 }
 
-void watch_dog_timer_callback(unsigned long data)
+static void watch_dog_timer_callback(unsigned long data)
 {
 	struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
 
@@ -1737,14 +1562,14 @@
 /****************************************************************************
  ---------------------------- NIC TX/RX STUFF---------------------------
 *****************************************************************************/
-void rtl8192_rx_enable(struct net_device *dev)
+void rtl92e_rx_enable(struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 
 	priv->ops->rx_enable(dev);
 }
 
-void rtl8192_tx_enable(struct net_device *dev)
+void rtl92e_tx_enable(struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 
@@ -1802,16 +1627,7 @@
 	ring->desc = NULL;
 }
 
-void rtl8192_data_hard_stop(struct net_device *dev)
-{
-}
-
-
-void rtl8192_data_hard_resume(struct net_device *dev)
-{
-}
-
-void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
+static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
 			    int rate)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
@@ -1826,8 +1642,8 @@
 		return;
 	}
 
-	if (queue_index != TXCMD_QUEUE)
-		netdev_warn(dev, "%s(): queue index != TXCMD_QUEUE\n",
+	if (queue_index == TXCMD_QUEUE)
+		netdev_warn(dev, "%s(): queue index == TXCMD_QUEUE\n",
 			    __func__);
 
 	memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
@@ -1843,7 +1659,7 @@
 	}
 }
 
-int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 	int ret;
@@ -1902,7 +1718,7 @@
 		tasklet_schedule(&priv->irq_tx_tasklet);
 }
 
-void rtl8192_tx_cmd(struct net_device *dev, struct sk_buff *skb)
+static void rtl8192_tx_cmd(struct net_device *dev, struct sk_buff *skb)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rtl8192_tx_ring *ring;
@@ -1925,7 +1741,7 @@
 	spin_unlock_irqrestore(&priv->irq_th_lock, flags);
 }
 
-short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
+static short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rtl8192_tx_ring  *ring;
@@ -1997,7 +1813,7 @@
 	spin_unlock_irqrestore(&priv->irq_th_lock, flags);
 	dev->trans_start = jiffies;
 
-	write_nic_word(dev, TPPoll, 0x01 << tcb_desc->queue_index);
+	rtl92e_writew(dev, TPPoll, 0x01 << tcb_desc->queue_index);
 	return 0;
 }
 
@@ -2077,8 +1893,7 @@
 	return 0;
 }
 
-
-short rtl8192_pci_initdescring(struct net_device *dev)
+static short rtl8192_pci_initdescring(struct net_device *dev)
 {
 	u32 ret;
 	int i;
@@ -2104,7 +1919,7 @@
 	return 1;
 }
 
-void rtl8192_pci_resetdescring(struct net_device *dev)
+void rtl92e_reset_desc_ring(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	int i, rx_queue_idx;
@@ -2144,8 +1959,8 @@
 	spin_unlock_irqrestore(&priv->irq_th_lock, flags);
 }
 
-void rtl819x_UpdateRxPktTimeStamp(struct net_device *dev,
-				  struct rtllib_rx_stats *stats)
+void rtl92e_update_rx_pkt_timestamp(struct net_device *dev,
+				    struct rtllib_rx_stats *stats)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 
@@ -2155,7 +1970,7 @@
 		priv->LastRxDescTSF = stats->mac_time;
 }
 
-long rtl819x_translate_todbm(struct r8192_priv *priv, u8 signal_strength_index)
+long rtl92e_translate_to_dbm(struct r8192_priv *priv, u8 signal_strength_index)
 {
 	long	signal_power;
 
@@ -2166,11 +1981,8 @@
 }
 
 
-void
-rtl819x_update_rxsignalstatistics8190pci(
-	struct r8192_priv *priv,
-	struct rtllib_rx_stats *pprevious_stats
-	)
+void rtl92e_update_rx_statistics(struct r8192_priv *priv,
+				 struct rtllib_rx_stats *pprevious_stats)
 {
 	int weighting = 0;
 
@@ -2189,13 +2001,7 @@
 					weighting) / 6;
 }
 
-void rtl819x_process_cck_rxpathsel(struct r8192_priv *priv,
-				   struct rtllib_rx_stats *pprevious_stats)
-{
-}
-
-
-u8 rtl819x_query_rxpwrpercentage(char antpower)
+u8 rtl92e_rx_db_to_percent(char antpower)
 {
 	if ((antpower <= -100) || (antpower >= 20))
 		return	0;
@@ -2206,10 +2012,7 @@
 
 }	/* QueryRxPwrPercentage */
 
-u8
-rtl819x_evm_dbtopercentage(
-	char value
-	)
+u8 rtl92e_evm_db_to_percent(char value)
 {
 	char ret_val;
 
@@ -2226,11 +2029,8 @@
 	return ret_val;
 }
 
-void
-rtl8192_record_rxdesc_forlateruse(
-	struct rtllib_rx_stats *psrc_stats,
-	struct rtllib_rx_stats *ptarget_stats
-)
+void rtl92e_copy_mpdu_stats(struct rtllib_rx_stats *psrc_stats,
+			    struct rtllib_rx_stats *ptarget_stats)
 {
 	ptarget_stats->bIsAMPDU = psrc_stats->bIsAMPDU;
 	ptarget_stats->bFirstMPDU = psrc_stats->bFirstMPDU;
@@ -2344,11 +2144,6 @@
 
 }
 
-static void rtl8192_rx_cmd(struct net_device *dev)
-{
-}
-
-
 static void rtl8192_tx_resume(struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
@@ -2366,26 +2161,23 @@
 	}
 }
 
-void rtl8192_irq_tx_tasklet(struct r8192_priv *priv)
+static void rtl8192_irq_tx_tasklet(struct r8192_priv *priv)
 {
 	rtl8192_tx_resume(priv->rtllib->dev);
 }
 
-void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
+static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
 {
 	rtl8192_rx_normal(priv->rtllib->dev);
 
-	if (MAX_RX_QUEUE > 1)
-		rtl8192_rx_cmd(priv->rtllib->dev);
-
-	write_nic_dword(priv->rtllib->dev, INTA_MASK,
-			read_nic_dword(priv->rtllib->dev, INTA_MASK) | IMR_RDU);
+	rtl92e_writel(priv->rtllib->dev, INTA_MASK,
+		      rtl92e_readl(priv->rtllib->dev, INTA_MASK) | IMR_RDU);
 }
 
 /****************************************************************************
  ---------------------------- NIC START/CLOSE STUFF---------------------------
 *****************************************************************************/
-void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
+static void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
 {
 	cancel_delayed_work(&priv->watch_dog_wq);
 	cancel_delayed_work(&priv->update_beacon_wq);
@@ -2394,14 +2186,13 @@
 	cancel_work_sync(&priv->qos_activate);
 }
 
-int _rtl8192_up(struct net_device *dev, bool is_silent_reset)
+static int _rtl8192_up(struct net_device *dev, bool is_silent_reset)
 {
 	if (_rtl8192_sta_up(dev, is_silent_reset) == -1)
 		return -1;
 	return 0;
 }
 
-
 static int rtl8192_open(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
@@ -2414,8 +2205,7 @@
 
 }
 
-
-int rtl8192_up(struct net_device *dev)
+static int rtl8192_up(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
@@ -2445,7 +2235,7 @@
 
 }
 
-int rtl8192_down(struct net_device *dev, bool shutdownrf)
+static int rtl8192_down(struct net_device *dev, bool shutdownrf)
 {
 	if (rtl8192_sta_down(dev, shutdownrf) == -1)
 		return -1;
@@ -2453,19 +2243,19 @@
 	return 0;
 }
 
-void rtl8192_commit(struct net_device *dev)
+void rtl92e_commit(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
 	if (priv->up == 0)
 		return;
 	rtllib_softmac_stop_protocol(priv->rtllib, 0, true);
-	rtl8192_irq_disable(dev);
+	rtl92e_irq_disable(dev);
 	priv->ops->stop_adapter(dev, true);
 	_rtl8192_up(dev, false);
 }
 
-void rtl8192_restart(void *data)
+static void rtl8192_restart(void *data)
 {
 	struct r8192_priv *priv = container_of_work_rsl(data, struct r8192_priv,
 				  reset_wq);
@@ -2473,7 +2263,7 @@
 
 	down(&priv->wx_sem);
 
-	rtl8192_commit(dev);
+	rtl92e_commit(dev);
 
 	up(&priv->wx_sem);
 }
@@ -2552,30 +2342,34 @@
 					if (is_zero_ether_addr(ieee->ap_mac_addr))
 						ieee->iw_mode = IW_MODE_ADHOC;
 					memcpy((u8 *)key, ipw->u.crypt.key, 16);
-					EnableHWSecurityConfig8192(dev);
-					set_swcam(dev, 4, ipw->u.crypt.idx,
-						  ieee->pairwise_key_type,
-						  (u8 *)ieee->ap_mac_addr,
-						  0, key, 0);
-					setKey(dev, 4, ipw->u.crypt.idx,
-					       ieee->pairwise_key_type,
-					       (u8 *)ieee->ap_mac_addr, 0, key);
-					if (ieee->iw_mode == IW_MODE_ADHOC) {
-						set_swcam(dev, ipw->u.crypt.idx,
-							ipw->u.crypt.idx,
-							ieee->pairwise_key_type,
-							(u8 *)ieee->ap_mac_addr,
-							0, key, 0);
-						setKey(dev, ipw->u.crypt.idx,
-						       ipw->u.crypt.idx,
+					rtl92e_enable_hw_security_config(dev);
+					rtl92e_set_swcam(dev, 4,
+							 ipw->u.crypt.idx,
+							 ieee->pairwise_key_type,
+							 (u8 *)ieee->ap_mac_addr,
+							 0, key, 0);
+					rtl92e_set_key(dev, 4, ipw->u.crypt.idx,
 						       ieee->pairwise_key_type,
 						       (u8 *)ieee->ap_mac_addr,
 						       0, key);
+					if (ieee->iw_mode == IW_MODE_ADHOC) {
+						rtl92e_set_swcam(dev,
+								 ipw->u.crypt.idx,
+								 ipw->u.crypt.idx,
+								 ieee->pairwise_key_type,
+								 (u8 *)ieee->ap_mac_addr,
+								 0, key, 0);
+						rtl92e_set_key(dev,
+							       ipw->u.crypt.idx,
+							       ipw->u.crypt.idx,
+							       ieee->pairwise_key_type,
+							       (u8 *)ieee->ap_mac_addr,
+							       0, key);
 					}
 				}
 				if ((ieee->pairwise_key_type == KEY_TYPE_CCMP)
 				     && ieee->pHTInfo->bCurrentHTSupport) {
-					write_nic_byte(dev, 0x173, 1);
+					rtl92e_writeb(dev, 0x173, 1);
 				}
 
 			} else {
@@ -2595,14 +2389,15 @@
 					ieee->group_key_type = KEY_TYPE_NA;
 
 				if (ieee->group_key_type) {
-					set_swcam(dev, ipw->u.crypt.idx,
-						  ipw->u.crypt.idx,
-						  ieee->group_key_type,
-						  broadcast_addr, 0, key, 0);
-					setKey(dev, ipw->u.crypt.idx,
-					       ipw->u.crypt.idx,
-					       ieee->group_key_type,
-					       broadcast_addr, 0, key);
+					rtl92e_set_swcam(dev, ipw->u.crypt.idx,
+							 ipw->u.crypt.idx,
+							 ieee->group_key_type,
+							 broadcast_addr, 0, key,
+							 0);
+					rtl92e_set_key(dev, ipw->u.crypt.idx,
+						       ipw->u.crypt.idx,
+						       ieee->group_key_type,
+						       broadcast_addr, 0, key);
 				}
 			}
 		}
@@ -2707,8 +2502,8 @@
 	if (inta & IMR_RDU) {
 		RT_TRACE(COMP_INTR, "rx descriptor unavailable!\n");
 		priv->stats.rxrdu++;
-		write_nic_dword(dev, INTA_MASK,
-				read_nic_dword(dev, INTA_MASK) & ~IMR_RDU);
+		rtl92e_writel(dev, INTA_MASK,
+			      rtl92e_readl(dev, INTA_MASK) & ~IMR_RDU);
 		tasklet_schedule(&priv->irq_rx_tasklet);
 	}
 
@@ -2782,7 +2577,6 @@
 	struct rtl819x_ops *ops = (struct rtl819x_ops *)(id->driver_data);
 	unsigned long pmem_start, pmem_len, pmem_flags;
 	int err = -ENOMEM;
-	bool bdma64 = false;
 	u8 revision_id;
 
 	RT_TRACE(COMP_INIT, "Configuring chip resources");
@@ -2806,8 +2600,6 @@
 		goto err_pci_disable;
 
 	err = -ENODEV;
-	if (bdma64)
-		dev->features |= NETIF_F_HIGHDMA;
 
 	pci_set_drvdata(pdev, dev);
 	SET_NETDEV_DEV(dev, &pdev->dev);
@@ -2850,12 +2642,12 @@
 	pci_read_config_byte(pdev, 0x08, &revision_id);
 	/* If the revisionid is 0x10, the device uses rtl8192se. */
 	if (pdev->device == 0x8192 && revision_id == 0x10)
-		goto err_rel_mem;
+		goto err_unmap;
 
 	priv->ops = ops;
 
-	if (rtl8192_pci_findadapter(pdev, dev) == false)
-		goto err_rel_mem;
+	if (rtl92e_check_adapter(pdev, dev) == false)
+		goto err_unmap;
 
 	dev->irq = pdev->irq;
 	priv->irq = 0;
@@ -2888,7 +2680,7 @@
 	RT_TRACE(COMP_INIT, "dev name: %s\n", dev->name);
 
 	if (priv->polling_timer_on == 0)
-		check_rfctrl_gpio_timer((unsigned long)dev);
+		rtl92e_check_rfctrl_gpio_timer((unsigned long)dev);
 
 	RT_TRACE(COMP_INIT, "Driver probe completed\n");
 	return 0;
@@ -2896,12 +2688,12 @@
 err_free_irq:
 	free_irq(dev->irq, dev);
 	priv->irq = 0;
+err_unmap:
+	iounmap((void __iomem *)ioaddr);
 err_rel_mem:
 	release_mem_region(pmem_start, pmem_len);
 err_rel_rtllib:
 	free_rtllib(dev);
-
-	DMESG("wlan driver load failed\n");
 err_pci_disable:
 	pci_disable_device(pdev);
 	return err;
@@ -2922,7 +2714,7 @@
 		cancel_delayed_work(&priv->gpio_change_rf_wq);
 		priv->polling_timer_on = 0;
 		rtl8192_down(dev, true);
-		deinit_hal_dm(dev);
+		rtl92e_dm_deinit(dev);
 		if (priv->pFirmware) {
 			vfree(priv->pFirmware);
 			priv->pFirmware = NULL;
@@ -2952,7 +2744,7 @@
 	RT_TRACE(COMP_DOWN, "wlan driver removed\n");
 }
 
-bool NicIFEnableNIC(struct net_device *dev)
+bool rtl92e_enable_nic(struct net_device *dev)
 {
 	bool init_status = true;
 	struct r8192_priv *priv = rtllib_priv(dev);
@@ -2977,12 +2769,12 @@
 	RT_CLEAR_PS_LEVEL(pPSC, RT_RF_OFF_LEVL_HALT_NIC);
 	priv->bfirst_init = false;
 
-	rtl8192_irq_enable(dev);
+	rtl92e_irq_enable(dev);
 	priv->bdisable_nic = false;
 	RT_TRACE(COMP_PS, "<===========%s()\n", __func__);
 	return init_status;
 }
-bool NicIFDisableNIC(struct net_device *dev)
+bool rtl92e_disable_nic(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	u8 tmp_state = 0;
@@ -2993,7 +2785,7 @@
 	rtllib_softmac_stop_protocol(priv->rtllib, 0, false);
 	priv->rtllib->state = tmp_state;
 	rtl8192_cancel_deferred_work(priv);
-	rtl8192_irq_disable(dev);
+	rtl92e_irq_disable(dev);
 
 	priv->ops->stop_adapter(dev, false);
 	RT_TRACE(COMP_PS, "<=========%s()\n", __func__);
@@ -3007,7 +2799,6 @@
 	pr_info("Copyright (c) 2007-2008, Realsil Wlan Driver\n");
 
 	if (0 != pci_register_driver(&rtl8192_pci_driver)) {
-		DMESG("No device found");
 		/*pci_unregister_driver (&rtl8192_pci_driver);*/
 		return -ENODEV;
 	}
@@ -3021,7 +2812,7 @@
 	RT_TRACE(COMP_DOWN, "Exiting");
 }
 
-void check_rfctrl_gpio_timer(unsigned long data)
+void rtl92e_check_rfctrl_gpio_timer(unsigned long data)
 {
 	struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
 
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 776d950..cd948bb 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -578,84 +578,44 @@
 
 extern const struct ethtool_ops rtl819x_ethtool_ops;
 
-void rtl8192_tx_cmd(struct net_device *dev, struct sk_buff *skb);
-short rtl8192_tx(struct net_device *dev, struct sk_buff *skb);
-
-u8 read_nic_io_byte(struct net_device *dev, int x);
-u32 read_nic_io_dword(struct net_device *dev, int x);
-u16 read_nic_io_word(struct net_device *dev, int x);
-void write_nic_io_byte(struct net_device *dev, int x, u8 y);
-void write_nic_io_word(struct net_device *dev, int x, u16 y);
-void write_nic_io_dword(struct net_device *dev, int x, u32 y);
-
-u8 read_nic_byte(struct net_device *dev, int x);
-u32 read_nic_dword(struct net_device *dev, int x);
-u16 read_nic_word(struct net_device *dev, int x);
-void write_nic_byte(struct net_device *dev, int x, u8 y);
-void write_nic_word(struct net_device *dev, int x, u16 y);
-void write_nic_dword(struct net_device *dev, int x, u32 y);
+u8 rtl92e_readb(struct net_device *dev, int x);
+u32 rtl92e_readl(struct net_device *dev, int x);
+u16 rtl92e_readw(struct net_device *dev, int x);
+void rtl92e_writeb(struct net_device *dev, int x, u8 y);
+void rtl92e_writew(struct net_device *dev, int x, u16 y);
+void rtl92e_writel(struct net_device *dev, int x, u32 y);
 
 void force_pci_posting(struct net_device *dev);
 
-void rtl8192_rx_enable(struct net_device *);
-void rtl8192_tx_enable(struct net_device *);
+void rtl92e_rx_enable(struct net_device *);
+void rtl92e_tx_enable(struct net_device *);
 
-int rtl8192_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
-void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
-			    int rate);
-void rtl8192_data_hard_stop(struct net_device *dev);
-void rtl8192_data_hard_resume(struct net_device *dev);
-void rtl8192_restart(void *data);
-void rtl819x_watchdog_wqcallback(void *data);
-void rtl8192_hw_sleep_wq(void *data);
-void watch_dog_timer_callback(unsigned long data);
-void rtl8192_irq_rx_tasklet(struct r8192_priv *priv);
-void rtl8192_irq_tx_tasklet(struct r8192_priv *priv);
-int rtl8192_down(struct net_device *dev, bool shutdownrf);
-int rtl8192_up(struct net_device *dev);
-void rtl8192_commit(struct net_device *dev);
-void rtl8192_set_chan(struct net_device *dev, short ch);
+void rtl92e_hw_sleep_wq(void *data);
+void rtl92e_commit(struct net_device *dev);
 
-void check_rfctrl_gpio_timer(unsigned long data);
+void rtl92e_check_rfctrl_gpio_timer(unsigned long data);
 
-void rtl8192_hw_wakeup_wq(void *data);
-short rtl8192_pci_initdescring(struct net_device *dev);
+void rtl92e_hw_wakeup_wq(void *data);
 
-void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
+void rtl92e_reset_desc_ring(struct net_device *dev);
+void rtl92e_set_wireless_mode(struct net_device *dev, u8 wireless_mode);
+void rtl92e_irq_enable(struct net_device *dev);
+void rtl92e_config_rate(struct net_device *dev, u16 *rate_config);
+void rtl92e_irq_disable(struct net_device *dev);
 
-int _rtl8192_up(struct net_device *dev, bool is_silent_reset);
+void rtl92e_update_rx_pkt_timestamp(struct net_device *dev,
+				    struct rtllib_rx_stats *stats);
+long rtl92e_translate_to_dbm(struct r8192_priv *priv, u8 signal_strength_index);
+void rtl92e_update_rx_statistics(struct r8192_priv *priv,
+				 struct rtllib_rx_stats *pprevious_stats);
+u8 rtl92e_evm_db_to_percent(char value);
+u8 rtl92e_rx_db_to_percent(char antpower);
+void rtl92e_copy_mpdu_stats(struct rtllib_rx_stats *psrc_stats,
+			    struct rtllib_rx_stats *ptarget_stats);
+bool rtl92e_enable_nic(struct net_device *dev);
+bool rtl92e_disable_nic(struct net_device *dev);
 
-short rtl8192_is_tx_queue_empty(struct net_device *dev);
-void rtl8192_irq_disable(struct net_device *dev);
-
-void rtl8192_tx_timeout(struct net_device *dev);
-void rtl8192_pci_resetdescring(struct net_device *dev);
-void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode);
-void rtl8192_irq_enable(struct net_device *dev);
-void rtl8192_config_rate(struct net_device *dev, u16 *rate_config);
-void rtl8192_update_cap(struct net_device *dev, u16 cap);
-void rtl8192_irq_disable(struct net_device *dev);
-
-void rtl819x_UpdateRxPktTimeStamp(struct net_device *dev,
-				  struct rtllib_rx_stats *stats);
-long rtl819x_translate_todbm(struct r8192_priv *priv, u8 signal_strength_index);
-void rtl819x_update_rxsignalstatistics8190pci(struct r8192_priv *priv,
-				      struct rtllib_rx_stats *pprevious_stats);
-u8 rtl819x_evm_dbtopercentage(char value);
-void rtl819x_process_cck_rxpathsel(struct r8192_priv *priv,
-				   struct rtllib_rx_stats *pprevious_stats);
-u8 rtl819x_query_rxpwrpercentage(char antpower);
-void rtl8192_record_rxdesc_forlateruse(struct rtllib_rx_stats *psrc_stats,
-				       struct rtllib_rx_stats *ptarget_stats);
-bool NicIFEnableNIC(struct net_device *dev);
-bool NicIFDisableNIC(struct net_device *dev);
-
-bool MgntActSet_RF_State(struct net_device *dev,
+bool rtl92e_set_rf_state(struct net_device *dev,
 			 enum rt_rf_power_state StateToSet,
-			 RT_RF_CHANGE_SOURCE ChangeSource,
-			 bool	ProtectOrNot);
-void ActUpdateChannelAccessSetting(struct net_device *dev,
-			   enum wireless_mode WirelessMode,
-			   struct channel_access_setting *ChnlAccessSetting);
-
+			 RT_RF_CHANGE_SOURCE ChangeSource);
 #endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 3de7cc5..1a0c690 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -58,9 +58,6 @@
 	0x5e4332
 };
 
-#define RTK_UL_EDCA 0xa44f
-#define RTK_DL_EDCA 0x5e4322
-
 const u32 dm_tx_bb_gain[TxBBGainTableLength] = {
 	0x7f8001fe, /* 12 dB */
 	0x788001e2, /* 11 dB */
@@ -213,6 +210,9 @@
 
 static	void dm_check_txrateandretrycount(struct net_device *dev);
 static  void dm_check_ac_dc_power(struct net_device *dev);
+static void dm_check_fsync(struct net_device *dev);
+static void dm_CheckRfCtrlGPIO(void *data);
+static void dm_fsync_timer_callback(unsigned long data);
 
 /*---------------------Define local function prototype-----------------------*/
 
@@ -224,7 +224,7 @@
 static	void	dm_ctstoself(struct net_device *dev);
 /*---------------------------Define function prototype------------------------*/
 
-void init_hal_dm(struct net_device *dev)
+void rtl92e_dm_init(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
@@ -234,10 +234,10 @@
 
 	dm_init_dynamic_txpower(dev);
 
-	init_rate_adaptive(dev);
+	rtl92e_init_adaptive_rate(dev);
 
 	dm_dig_init(dev);
-	dm_init_edca_turbo(dev);
+	rtl92e_dm_init_edca_turbo(dev);
 	dm_init_bandwidth_autoswitch(dev);
 	dm_init_fsync(dev);
 	dm_init_rxpath_selection(dev);
@@ -249,14 +249,14 @@
 			      (void *)dm_CheckRfCtrlGPIO, dev);
 }
 
-void deinit_hal_dm(struct net_device *dev)
+void rtl92e_dm_deinit(struct net_device *dev)
 {
 
 	dm_deInit_fsync(dev);
 
 }
 
-void hal_dm_watchdog(struct net_device *dev)
+void rtl92e_dm_watchdog(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
@@ -307,7 +307,7 @@
 };
 
 
-void init_rate_adaptive(struct net_device *dev)
+void rtl92e_init_adaptive_rate(struct net_device *dev)
 {
 
 	struct r8192_priv *priv = rtllib_priv(dev);
@@ -444,7 +444,7 @@
 		if (priv->rtllib->GetHalfNmodeSupportByAPsHandler(dev))
 			targetRATR &=  0xf00fffff;
 
-		currentRATR = read_nic_dword(dev, RATR0);
+		currentRATR = rtl92e_readl(dev, RATR0);
 		if (targetRATR !=  currentRATR) {
 			u32 ratr_value;
 
@@ -454,8 +454,8 @@
 				 currentRATR, targetRATR);
 			if (priv->rf_type == RF_1T2R)
 				ratr_value &= ~(RATE_ALL_OFDM_2SS);
-			write_nic_dword(dev, RATR0, ratr_value);
-			write_nic_byte(dev, UFWP, 1);
+			rtl92e_writel(dev, RATR0, ratr_value);
+			rtl92e_writeb(dev, UFWP, 1);
 
 			pra->last_ratr = targetRATR;
 		}
@@ -561,40 +561,40 @@
 			p->rfa_txpowertrackingindex--;
 			if (p->rfa_txpowertrackingindex_real > 4) {
 				p->rfa_txpowertrackingindex_real--;
-				rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
-						 bMaskDWord,
-						 dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
+				rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+						  bMaskDWord,
+						  dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
 			}
 
 			p->rfc_txpowertrackingindex--;
 			if (p->rfc_txpowertrackingindex_real > 4) {
 				p->rfc_txpowertrackingindex_real--;
-				rtl8192_setBBreg(dev,
-						 rOFDM0_XCTxIQImbalance,
-						 bMaskDWord,
-						 dm_tx_bb_gain[p->rfc_txpowertrackingindex_real]);
+				rtl92e_set_bb_reg(dev,
+						  rOFDM0_XCTxIQImbalance,
+						  bMaskDWord,
+						  dm_tx_bb_gain[p->rfc_txpowertrackingindex_real]);
 			}
 		} else {
-			rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
-					 bMaskDWord,
-					 dm_tx_bb_gain[4]);
-			rtl8192_setBBreg(dev,
-					 rOFDM0_XCTxIQImbalance,
-					 bMaskDWord, dm_tx_bb_gain[4]);
+			rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+					  bMaskDWord,
+					  dm_tx_bb_gain[4]);
+			rtl92e_set_bb_reg(dev,
+					  rOFDM0_XCTxIQImbalance,
+					  bMaskDWord, dm_tx_bb_gain[4]);
 		}
 	} else {
 		if (p->rfa_txpowertrackingindex > 0) {
 			p->rfa_txpowertrackingindex--;
 			if (p->rfa_txpowertrackingindex_real > 4) {
 				p->rfa_txpowertrackingindex_real--;
-				rtl8192_setBBreg(dev,
-						 rOFDM0_XATxIQImbalance,
-						 bMaskDWord,
-						 dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
+				rtl92e_set_bb_reg(dev,
+						  rOFDM0_XATxIQImbalance,
+						  bMaskDWord,
+						  dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
 			}
 		} else {
-			rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
-					 bMaskDWord, dm_tx_bb_gain[4]);
+			rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+					  bMaskDWord, dm_tx_bb_gain[4]);
 		}
 	}
 }
@@ -608,36 +608,33 @@
 		    (p->rfc_txpowertrackingindex < TxBBGainTableLength - 1)) {
 			p->rfa_txpowertrackingindex++;
 			p->rfa_txpowertrackingindex_real++;
-			rtl8192_setBBreg(dev,
-				 rOFDM0_XATxIQImbalance,
-				 bMaskDWord,
-				 dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
+			rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+					  bMaskDWord,
+					  dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
 			p->rfc_txpowertrackingindex++;
 			p->rfc_txpowertrackingindex_real++;
-			rtl8192_setBBreg(dev,
-				 rOFDM0_XCTxIQImbalance,
-				 bMaskDWord,
-				 dm_tx_bb_gain[p->rfc_txpowertrackingindex_real]);
+			rtl92e_set_bb_reg(dev, rOFDM0_XCTxIQImbalance,
+					  bMaskDWord,
+					  dm_tx_bb_gain[p->rfc_txpowertrackingindex_real]);
 		} else {
-			rtl8192_setBBreg(dev,
-				 rOFDM0_XATxIQImbalance,
-				 bMaskDWord,
-				 dm_tx_bb_gain[TxBBGainTableLength - 1]);
-			rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance,
-					 bMaskDWord,
-					 dm_tx_bb_gain[TxBBGainTableLength - 1]);
+			rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+					  bMaskDWord,
+					  dm_tx_bb_gain[TxBBGainTableLength - 1]);
+			rtl92e_set_bb_reg(dev, rOFDM0_XCTxIQImbalance,
+					  bMaskDWord,
+					  dm_tx_bb_gain[TxBBGainTableLength - 1]);
 		}
 	} else {
 		if (p->rfa_txpowertrackingindex < (TxBBGainTableLength - 1)) {
 			p->rfa_txpowertrackingindex++;
 			p->rfa_txpowertrackingindex_real++;
-			rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
-					 bMaskDWord,
-					 dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
+			rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+					  bMaskDWord,
+					  dm_tx_bb_gain[p->rfa_txpowertrackingindex_real]);
 		} else {
-			rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance,
-					 bMaskDWord,
-					 dm_tx_bb_gain[TxBBGainTableLength - 1]);
+			rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
+					  bMaskDWord,
+					  dm_tx_bb_gain[TxBBGainTableLength - 1]);
 		}
 	}
 }
@@ -656,8 +653,8 @@
 	u32	delta = 0;
 
 	RT_TRACE(COMP_POWER_TRACKING, "%s()\n", __func__);
-	write_nic_byte(dev, Pw_Track_Flag, 0);
-	write_nic_byte(dev, FW_Busy_Flag, 0);
+	rtl92e_writeb(dev, Pw_Track_Flag, 0);
+	rtl92e_writeb(dev, FW_Busy_Flag, 0);
 	priv->rtllib->bdynamic_txpower_enable = false;
 	bHighpowerstate = priv->bDynamicTxHighPower;
 
@@ -674,12 +671,11 @@
 		tx_cmd.Op		= TXCMD_SET_TX_PWR_TRACKING;
 		tx_cmd.Length	= 4;
 		tx_cmd.Value		= Value;
-		cmpk_message_handle_tx(dev, (u8 *)&tx_cmd,
-				       DESC_PACKET_TYPE_INIT,
-				       sizeof(struct dcmd_txcmd));
+		rtl92e_send_cmd_pkt(dev, (u8 *)&tx_cmd, DESC_PACKET_TYPE_INIT,
+				    sizeof(struct dcmd_txcmd));
 		mdelay(1);
 		for (i = 0; i <= 30; i++) {
-			Pwr_Flag = read_nic_byte(dev, Pw_Track_Flag);
+			Pwr_Flag = rtl92e_readb(dev, Pw_Track_Flag);
 
 			if (Pwr_Flag == 0) {
 				mdelay(1);
@@ -687,35 +683,35 @@
 				if (priv->bResetInProgress) {
 					RT_TRACE(COMP_POWER_TRACKING,
 						 "we are in silent reset progress, so return\n");
-					write_nic_byte(dev, Pw_Track_Flag, 0);
-					write_nic_byte(dev, FW_Busy_Flag, 0);
+					rtl92e_writeb(dev, Pw_Track_Flag, 0);
+					rtl92e_writeb(dev, FW_Busy_Flag, 0);
 					return;
 				}
 				if (priv->rtllib->eRFPowerState != eRfOn) {
 					RT_TRACE(COMP_POWER_TRACKING,
 						 "we are in power save, so return\n");
-					write_nic_byte(dev, Pw_Track_Flag, 0);
-					write_nic_byte(dev, FW_Busy_Flag, 0);
+					rtl92e_writeb(dev, Pw_Track_Flag, 0);
+					rtl92e_writeb(dev, FW_Busy_Flag, 0);
 					return;
 				}
 
 				continue;
 			}
 
-			Avg_TSSI_Meas = read_nic_word(dev, Tssi_Mea_Value);
+			Avg_TSSI_Meas = rtl92e_readw(dev, Tssi_Mea_Value);
 
 			if (Avg_TSSI_Meas == 0) {
-				write_nic_byte(dev, Pw_Track_Flag, 0);
-				write_nic_byte(dev, FW_Busy_Flag, 0);
+				rtl92e_writeb(dev, Pw_Track_Flag, 0);
+				rtl92e_writeb(dev, FW_Busy_Flag, 0);
 				return;
 			}
 
 			for (k = 0; k < 5; k++) {
 				if (k != 4)
-					tmp_report[k] = read_nic_byte(dev,
+					tmp_report[k] = rtl92e_readb(dev,
 							 Tssi_Report_Value1+k);
 				else
-					tmp_report[k] = read_nic_byte(dev,
+					tmp_report[k] = rtl92e_readb(dev,
 							 Tssi_Report_Value2);
 
 				RT_TRACE(COMP_POWER_TRACKING,
@@ -729,7 +725,7 @@
 			}
 
 			if (viviflag) {
-				write_nic_byte(dev, Pw_Track_Flag, 0);
+				rtl92e_writeb(dev, Pw_Track_Flag, 0);
 				viviflag = false;
 				RT_TRACE(COMP_POWER_TRACKING,
 					 "we filted this data\n");
@@ -756,8 +752,8 @@
 
 			if (delta <= E_FOR_TX_POWER_TRACK) {
 				priv->rtllib->bdynamic_txpower_enable = true;
-				write_nic_byte(dev, Pw_Track_Flag, 0);
-				write_nic_byte(dev, FW_Busy_Flag, 0);
+				rtl92e_writeb(dev, Pw_Track_Flag, 0);
+				rtl92e_writeb(dev, FW_Busy_Flag, 0);
 				RT_TRACE(COMP_POWER_TRACKING,
 					 "tx power track is done\n");
 				RT_TRACE(COMP_POWER_TRACKING,
@@ -806,12 +802,12 @@
 				if (priv->rtllib->current_network.channel == 14 &&
 				    !priv->bcck_in_ch14) {
 					priv->bcck_in_ch14 = true;
-					dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+					rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
 				} else if (priv->rtllib->current_network.channel != 14 && priv->bcck_in_ch14) {
 					priv->bcck_in_ch14 = false;
-					dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+					rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
 				} else
-					dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+					rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
 			}
 			RT_TRACE(COMP_POWER_TRACKING,
 				 "priv->rfa_txpowertrackingindex = %d\n",
@@ -829,23 +825,23 @@
 			if (priv->CCKPresentAttentuation_difference <= -12 ||
 			    priv->CCKPresentAttentuation_difference >= 24) {
 				priv->rtllib->bdynamic_txpower_enable = true;
-				write_nic_byte(dev, Pw_Track_Flag, 0);
-				write_nic_byte(dev, FW_Busy_Flag, 0);
+				rtl92e_writeb(dev, Pw_Track_Flag, 0);
+				rtl92e_writeb(dev, FW_Busy_Flag, 0);
 				RT_TRACE(COMP_POWER_TRACKING,
 					 "tx power track--->limited\n");
 				return;
 			}
 
-			write_nic_byte(dev, Pw_Track_Flag, 0);
+			rtl92e_writeb(dev, Pw_Track_Flag, 0);
 			Avg_TSSI_Meas_from_driver = 0;
 			for (k = 0; k < 5; k++)
 				tmp_report[k] = 0;
 			break;
 		}
-		write_nic_byte(dev, FW_Busy_Flag, 0);
+		rtl92e_writeb(dev, FW_Busy_Flag, 0);
 	}
 	priv->rtllib->bdynamic_txpower_enable = true;
-	write_nic_byte(dev, Pw_Track_Flag, 0);
+	rtl92e_writeb(dev, Pw_Track_Flag, 0);
 }
 
 static void dm_TXPowerTrackingCallback_ThermalMeter(struct net_device *dev)
@@ -857,8 +853,8 @@
 	int i = 0, CCKSwingNeedUpdate = 0;
 
 	if (!priv->btxpower_trackingInit) {
-		tmpRegA = rtl8192_QueryBBReg(dev, rOFDM0_XATxIQImbalance,
-					     bMaskDWord);
+		tmpRegA = rtl92e_get_bb_reg(dev, rOFDM0_XATxIQImbalance,
+					    bMaskDWord);
 		for (i = 0; i < OFDM_Table_Length; i++) {
 			if (tmpRegA == OFDMSwingTable[i]) {
 				priv->OFDM_index[0] = (u8)i;
@@ -869,7 +865,7 @@
 			}
 		}
 
-		TempCCk = rtl8192_QueryBBReg(dev, rCCK0_TxFilter1, bMaskByte2);
+		TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1, bMaskByte2);
 		for (i = 0; i < CCK_Table_length; i++) {
 			if (TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
 				priv->CCK_index = (u8) i;
@@ -884,7 +880,7 @@
 		return;
 	}
 
-	tmpRegA = rtl8192_phy_QueryRFReg(dev, RF90_PATH_A, 0x12, 0x078);
+	tmpRegA = rtl92e_get_rf_reg(dev, RF90_PATH_A, 0x12, 0x078);
 	RT_TRACE(COMP_POWER_TRACKING, "Readback ThermalMeterA = %d\n", tmpRegA);
 	if (tmpRegA < 3 || tmpRegA > 13)
 		return;
@@ -939,11 +935,11 @@
 	}
 
 	if (CCKSwingNeedUpdate)
-		dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+		rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
 	if (priv->OFDM_index[0] != tmpOFDMindex) {
 		priv->OFDM_index[0] = tmpOFDMindex;
-		rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
-				 OFDMSwingTable[priv->OFDM_index[0]]);
+		rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
+				  OFDMSwingTable[priv->OFDM_index[0]]);
 		RT_TRACE(COMP_POWER_TRACKING, "Update OFDMSwing[%d] = 0x%x\n",
 			 priv->OFDM_index[0],
 			 OFDMSwingTable[priv->OFDM_index[0]]);
@@ -951,7 +947,7 @@
 	priv->txpower_count = 0;
 }
 
-void	dm_txpower_trackingcallback(void *data)
+void rtl92e_dm_txpower_tracking_wq(void *data)
 {
 	struct r8192_priv *priv = container_of_dwork_rsl(data,
 				  struct r8192_priv, txpower_tracking_wq);
@@ -989,7 +985,7 @@
 		 priv->btxpower_tracking);
 }
 
-void dm_initialize_txpower_tracking(struct net_device *dev)
+void rtl92e_dm_init_txpower_tracking(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
@@ -1005,7 +1001,7 @@
 	static u32 tx_power_track_counter;
 
 	RT_TRACE(COMP_POWER_TRACKING, "%s()\n", __func__);
-	if (read_nic_byte(dev, 0x11e) == 1)
+	if (rtl92e_readb(dev, 0x11e) == 1)
 		return;
 	if (!priv->btxpower_tracking)
 		return;
@@ -1039,10 +1035,10 @@
 
 	if (!TM_Trigger) {
 		{
-		rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
-		rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
-		rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
-		rtl8192_phy_SetRFReg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
+		rtl92e_set_rf_reg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
+		rtl92e_set_rf_reg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
+		rtl92e_set_rf_reg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4d);
+		rtl92e_set_rf_reg(dev, RF90_PATH_A, 0x02, bMask12Bits, 0x4f);
 		}
 		TM_Trigger = 1;
 		return;
@@ -1074,30 +1070,30 @@
 		TempVal = (u32)(dm_cck_tx_bb_gain[attenuation][0] +
 			  (dm_cck_tx_bb_gain[attenuation][1] << 8));
 
-		rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
+		rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
 		TempVal = (u32)((dm_cck_tx_bb_gain[attenuation][2]) +
 			  (dm_cck_tx_bb_gain[attenuation][3] << 8) +
 			  (dm_cck_tx_bb_gain[attenuation][4] << 16)+
 			  (dm_cck_tx_bb_gain[attenuation][5] << 24));
-		rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
+		rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
 		TempVal = (u32)(dm_cck_tx_bb_gain[attenuation][6] +
 			  (dm_cck_tx_bb_gain[attenuation][7] << 8));
 
-		rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
+		rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
 	} else {
 		TempVal = (u32)((dm_cck_tx_bb_gain_ch14[attenuation][0]) +
 			  (dm_cck_tx_bb_gain_ch14[attenuation][1] << 8));
 
-		rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
+		rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
 		TempVal = (u32)((dm_cck_tx_bb_gain_ch14[attenuation][2]) +
 			  (dm_cck_tx_bb_gain_ch14[attenuation][3] << 8) +
 			  (dm_cck_tx_bb_gain_ch14[attenuation][4] << 16)+
 			  (dm_cck_tx_bb_gain_ch14[attenuation][5] << 24));
-		rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
+		rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
 		TempVal = (u32)((dm_cck_tx_bb_gain_ch14[attenuation][6]) +
 			  (dm_cck_tx_bb_gain_ch14[attenuation][7] << 8));
 
-		rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
+		rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
 	}
 }
 
@@ -1111,7 +1107,7 @@
 	if (!bInCH14) {
 		TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] +
 			  (CCKSwingTable_Ch1_Ch13[priv->CCK_index][1] << 8);
-		rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
+		rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
 		RT_TRACE(COMP_POWER_TRACKING,
 			 "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter1,
 			 TempVal);
@@ -1119,14 +1115,14 @@
 			  (CCKSwingTable_Ch1_Ch13[priv->CCK_index][3] << 8) +
 			  (CCKSwingTable_Ch1_Ch13[priv->CCK_index][4] << 16)+
 			  (CCKSwingTable_Ch1_Ch13[priv->CCK_index][5] << 24);
-		rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
+		rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
 		RT_TRACE(COMP_POWER_TRACKING,
 			 "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_TxFilter2,
 			 TempVal);
 		TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
 			  (CCKSwingTable_Ch1_Ch13[priv->CCK_index][7] << 8);
 
-		rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
+		rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
 		RT_TRACE(COMP_POWER_TRACKING,
 			 "CCK not chnl 14, reg 0x%x = 0x%x\n", rCCK0_DebugPort,
 			 TempVal);
@@ -1134,26 +1130,26 @@
 		TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] +
 			  (CCKSwingTable_Ch14[priv->CCK_index][1] << 8);
 
-		rtl8192_setBBreg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
+		rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
 		RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
 			rCCK0_TxFilter1, TempVal);
 		TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
 			  (CCKSwingTable_Ch14[priv->CCK_index][3] << 8) +
 			  (CCKSwingTable_Ch14[priv->CCK_index][4] << 16)+
 			  (CCKSwingTable_Ch14[priv->CCK_index][5] << 24);
-		rtl8192_setBBreg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
+		rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
 		RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
 			rCCK0_TxFilter2, TempVal);
 		TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
 			  (CCKSwingTable_Ch14[priv->CCK_index][7]<<8);
 
-		rtl8192_setBBreg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
+		rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
 		RT_TRACE(COMP_POWER_TRACKING, "CCK chnl 14, reg 0x%x = 0x%x\n",
 			rCCK0_DebugPort, TempVal);
 	}
 }
 
-void dm_cck_txpower_adjust(struct net_device *dev, bool  binch14)
+void rtl92e_dm_cck_txpower_adjust(struct net_device *dev, bool binch14)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
@@ -1168,8 +1164,8 @@
 	struct r8192_priv *priv = rtllib_priv(dev);
 
 	RT_TRACE(COMP_POWER_TRACKING, "Start Reset Recovery ==>\n");
-	rtl8192_setBBreg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
-			 dm_tx_bb_gain[priv->rfa_txpowertrackingindex]);
+	rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
+			  dm_tx_bb_gain[priv->rfa_txpowertrackingindex]);
 	RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc80 is %08x\n",
 		 dm_tx_bb_gain[priv->rfa_txpowertrackingindex]);
 	RT_TRACE(COMP_POWER_TRACKING,
@@ -1181,10 +1177,10 @@
 	RT_TRACE(COMP_POWER_TRACKING,
 		 "Reset Recovery: CCK Attenuation is %d dB\n",
 		 priv->CCKPresentAttentuation);
-	dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
+	rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
 
-	rtl8192_setBBreg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord,
-			 dm_tx_bb_gain[priv->rfc_txpowertrackingindex]);
+	rtl92e_set_bb_reg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord,
+			  dm_tx_bb_gain[priv->rfc_txpowertrackingindex]);
 	RT_TRACE(COMP_POWER_TRACKING, "Reset Recovery: Fill in 0xc90 is %08x\n",
 		 dm_tx_bb_gain[priv->rfc_txpowertrackingindex]);
 	RT_TRACE(COMP_POWER_TRACKING,
@@ -1195,7 +1191,7 @@
 		 dm_tx_bb_gain_idx_to_amplify(priv->rfc_txpowertrackingindex));
 }
 
-void dm_restore_dynamic_mechanism_state(struct net_device *dev)
+void rtl92e_dm_restore_state(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	u32	reg_ratr = priv->rate_adaptive.last_ratr;
@@ -1203,7 +1199,7 @@
 
 	if (!priv->up) {
 		RT_TRACE(COMP_RATE,
-			 "<---- dm_restore_dynamic_mechanism_state(): driver is going to unload\n");
+			 "<---- rtl92e_dm_restore_state(): driver is going to unload\n");
 		return;
 	}
 
@@ -1215,8 +1211,8 @@
 	ratr_value = reg_ratr;
 	if (priv->rf_type == RF_1T2R)
 		ratr_value &= ~(RATE_ALL_OFDM_2SS);
-	write_nic_dword(dev, RATR0, ratr_value);
-	write_nic_byte(dev, UFWP, 1);
+	rtl92e_writel(dev, RATR0, ratr_value);
+	rtl92e_writeb(dev, UFWP, 1);
 	if (priv->btxpower_trackingInit && priv->btxpower_tracking)
 		dm_txpower_reset_recovery(dev);
 
@@ -1232,18 +1228,18 @@
 	if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
 		return;
 
-	rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
-	rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, bit_mask,
-			 (u32)priv->initgain_backup.xaagccore1);
-	rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, bit_mask,
-			 (u32)priv->initgain_backup.xbagccore1);
-	rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, bit_mask,
-			 (u32)priv->initgain_backup.xcagccore1);
-	rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, bit_mask,
-			 (u32)priv->initgain_backup.xdagccore1);
+	rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
+	rtl92e_set_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask,
+			  (u32)priv->initgain_backup.xaagccore1);
+	rtl92e_set_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask,
+			  (u32)priv->initgain_backup.xbagccore1);
+	rtl92e_set_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask,
+			  (u32)priv->initgain_backup.xcagccore1);
+	rtl92e_set_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask,
+			  (u32)priv->initgain_backup.xdagccore1);
 	bit_mask  = bMaskByte2;
-	rtl8192_setBBreg(dev, rCCK0_CCA, bit_mask,
-			 (u32)priv->initgain_backup.cca);
+	rtl92e_set_bb_reg(dev, rCCK0_CCA, bit_mask,
+			  (u32)priv->initgain_backup.cca);
 
 	RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xc50 is %x\n",
 		 priv->initgain_backup.xaagccore1);
@@ -1255,12 +1251,12 @@
 		 priv->initgain_backup.xdagccore1);
 	RT_TRACE(COMP_DIG, "dm_BBInitialGainRestore 0xa0a is %x\n",
 		 priv->initgain_backup.cca);
-	rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
+	rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
 
 }
 
 
-void dm_backup_dynamic_mechanism_state(struct net_device *dev)
+void rtl92e_dm_backup_state(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
@@ -1279,13 +1275,13 @@
 	if (dm_digtable.dig_algorithm == DIG_ALGO_BY_RSSI)
 		return;
 
-	rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
-	priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, bit_mask);
-	priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, bit_mask);
-	priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, bit_mask);
-	priv->initgain_backup.xdagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, bit_mask);
+	rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
+	priv->initgain_backup.xaagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XAAGCCore1, bit_mask);
+	priv->initgain_backup.xbagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XBAGCCore1, bit_mask);
+	priv->initgain_backup.xcagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XCAGCCore1, bit_mask);
+	priv->initgain_backup.xdagccore1 = (u8)rtl92e_get_bb_reg(dev, rOFDM0_XDAGCCore1, bit_mask);
 	bit_mask  = bMaskByte2;
-	priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, bit_mask);
+	priv->initgain_backup.cca = (u8)rtl92e_get_bb_reg(dev, rCCK0_CCA, bit_mask);
 
 	RT_TRACE(COMP_DIG, "BBInitialGainBackup 0xc50 is %x\n",
 		 priv->initgain_backup.xaagccore1);
@@ -1376,7 +1372,7 @@
 		fw_dig = 0;
 	if (fw_dig <= 3) {
 		for (i = 0; i < 3; i++)
-			rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+			rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
 		fw_dig++;
 		dm_digtable.dig_state = DM_STA_DIG_OFF;
 	}
@@ -1410,7 +1406,7 @@
 	if (dm_digtable.dig_algorithm_switch) {
 		dm_digtable.dig_state = DM_STA_DIG_MAX;
 		for (i = 0; i < 3; i++)
-			rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
+			rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
 		dm_digtable.dig_algorithm_switch = 0;
 	}
 
@@ -1429,19 +1425,19 @@
 		dm_digtable.dig_highpwr_state = DM_STA_DIG_MAX;
 		dm_digtable.dig_state = DM_STA_DIG_OFF;
 
-		rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8);
+		rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x8);
 
-		write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x17);
-		write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x17);
-		write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x17);
-		write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x17);
+		rtl92e_writeb(dev, rOFDM0_XAAGCCore1, 0x17);
+		rtl92e_writeb(dev, rOFDM0_XBAGCCore1, 0x17);
+		rtl92e_writeb(dev, rOFDM0_XCAGCCore1, 0x17);
+		rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x17);
 
 		if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
-			write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
+			rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x00);
 		else
-			write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
+			rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x42);
 
-		write_nic_byte(dev, 0xa0a, 0x08);
+		rtl92e_writeb(dev, 0xa0a, 0x08);
 
 		return;
 	}
@@ -1462,25 +1458,25 @@
 		dm_digtable.dig_state = DM_STA_DIG_ON;
 
 		if (reset_flag == 1) {
-			write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x2c);
-			write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x2c);
-			write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x2c);
-			write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x2c);
+			rtl92e_writeb(dev, rOFDM0_XAAGCCore1, 0x2c);
+			rtl92e_writeb(dev, rOFDM0_XBAGCCore1, 0x2c);
+			rtl92e_writeb(dev, rOFDM0_XCAGCCore1, 0x2c);
+			rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x2c);
 		} else {
-			write_nic_byte(dev, rOFDM0_XAAGCCore1, 0x20);
-			write_nic_byte(dev, rOFDM0_XBAGCCore1, 0x20);
-			write_nic_byte(dev, rOFDM0_XCAGCCore1, 0x20);
-			write_nic_byte(dev, rOFDM0_XDAGCCore1, 0x20);
+			rtl92e_writeb(dev, rOFDM0_XAAGCCore1, 0x20);
+			rtl92e_writeb(dev, rOFDM0_XBAGCCore1, 0x20);
+			rtl92e_writeb(dev, rOFDM0_XCAGCCore1, 0x20);
+			rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x20);
 		}
 
 		if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
-			write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
+			rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x20);
 		else
-			write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
+			rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
 
-		write_nic_byte(dev, 0xa0a, 0xcd);
+		rtl92e_writeb(dev, 0xa0a, 0xcd);
 
-		rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1);
+		rtl92e_set_bb_reg(dev, UFWP, bMaskByte1, 0x1);
 	}
 	dm_ctrl_initgain_byrssi_highpwr(dev);
 }
@@ -1505,9 +1501,9 @@
 		dm_digtable.dig_highpwr_state = DM_STA_DIG_ON;
 
 		if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
-				write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
+				rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x10);
 		else
-			write_nic_byte(dev, rOFDM0_RxDetector1, 0x43);
+			rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x43);
 	} else {
 		if (dm_digtable.dig_highpwr_state == DM_STA_DIG_OFF &&
 			(priv->reset_count == reset_cnt_highpwr))
@@ -1519,9 +1515,9 @@
 		    (priv->undecorated_smoothed_pwdb >=
 		    dm_digtable.rssi_high_thresh)) {
 			if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
-				write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
+				rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x20);
 			else
-				write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
+				rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
 		}
 	}
 	reset_cnt_highpwr = priv->reset_count;
@@ -1568,16 +1564,16 @@
 		reset_cnt = priv->reset_count;
 	}
 
-	if (dm_digtable.pre_ig_value != read_nic_byte(dev, rOFDM0_XAAGCCore1))
+	if (dm_digtable.pre_ig_value != rtl92e_readb(dev, rOFDM0_XAAGCCore1))
 		force_write = 1;
 
 	if ((dm_digtable.pre_ig_value != dm_digtable.cur_ig_value)
 	    || !initialized || force_write) {
 		initial_gain = (u8)dm_digtable.cur_ig_value;
-		write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain);
-		write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain);
-		write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain);
-		write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain);
+		rtl92e_writeb(dev, rOFDM0_XAAGCCore1, initial_gain);
+		rtl92e_writeb(dev, rOFDM0_XBAGCCore1, initial_gain);
+		rtl92e_writeb(dev, rOFDM0_XCAGCCore1, initial_gain);
+		rtl92e_writeb(dev, rOFDM0_XDAGCCore1, initial_gain);
 		dm_digtable.pre_ig_value = dm_digtable.cur_ig_value;
 		initialized = 1;
 		force_write = 0;
@@ -1630,20 +1626,20 @@
 	    (initialized <= 3) || force_write) {
 		if (dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER) {
 			if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
-				write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x00);
+				rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x00);
 			else
-				write_nic_byte(dev, rOFDM0_RxDetector1, 0x42);
+				rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x42);
 		} else if (dm_digtable.curpd_thstate ==
 			   DIG_PD_AT_NORMAL_POWER) {
 			if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
-				write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x20);
+				rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x20);
 			else
-				write_nic_byte(dev, rOFDM0_RxDetector1, 0x44);
+				rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
 		} else if (dm_digtable.curpd_thstate == DIG_PD_AT_HIGH_POWER) {
 			if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
-				write_nic_byte(dev, (rOFDM0_XATxAFE+3), 0x10);
+				rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x10);
 			else
-				write_nic_byte(dev, rOFDM0_RxDetector1, 0x43);
+				rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x43);
 		}
 		dm_digtable.prepd_thstate = dm_digtable.curpd_thstate;
 		if (initialized <= 3)
@@ -1687,16 +1683,16 @@
 	if ((dm_digtable.precs_ratio_state != dm_digtable.curcs_ratio_state) ||
 	    !initialized || force_write) {
 		if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_LOWER)
-			write_nic_byte(dev, 0xa0a, 0x08);
+			rtl92e_writeb(dev, 0xa0a, 0x08);
 		else if (dm_digtable.curcs_ratio_state == DIG_CS_RATIO_HIGHER)
-			write_nic_byte(dev, 0xa0a, 0xcd);
+			rtl92e_writeb(dev, 0xa0a, 0xcd);
 		dm_digtable.precs_ratio_state = dm_digtable.curcs_ratio_state;
 		initialized = 1;
 		force_write = 0;
 	}
 }
 
-void dm_init_edca_turbo(struct net_device *dev)
+void rtl92e_dm_init_edca_turbo(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
@@ -1745,21 +1741,19 @@
 			if (curTxOkCnt > 4*curRxOkCnt) {
 				if (priv->bis_cur_rdlstate ||
 				    !priv->bcurrent_turbo_EDCA) {
-					write_nic_dword(dev, EDCAPARA_BE,
-						 edca_setting_UL[pHTInfo->IOTPeer]);
+					rtl92e_writel(dev, EDCAPARA_BE,
+						      edca_setting_UL[pHTInfo->IOTPeer]);
 					priv->bis_cur_rdlstate = false;
 				}
 			} else {
 				if (!priv->bis_cur_rdlstate ||
 				    !priv->bcurrent_turbo_EDCA) {
 					if (priv->rtllib->mode == WIRELESS_MODE_G)
-						write_nic_dword(dev,
-								EDCAPARA_BE,
-							 edca_setting_DL_GMode[pHTInfo->IOTPeer]);
+						rtl92e_writel(dev, EDCAPARA_BE,
+							      edca_setting_DL_GMode[pHTInfo->IOTPeer]);
 					else
-						write_nic_dword(dev,
-								EDCAPARA_BE,
-							 edca_setting_DL[pHTInfo->IOTPeer]);
+						rtl92e_writel(dev, EDCAPARA_BE,
+							      edca_setting_DL[pHTInfo->IOTPeer]);
 					priv->bis_cur_rdlstate = true;
 				}
 			}
@@ -1769,20 +1763,18 @@
 				if (!priv->bis_cur_rdlstate ||
 				    !priv->bcurrent_turbo_EDCA) {
 					if (priv->rtllib->mode == WIRELESS_MODE_G)
-						write_nic_dword(dev,
-								EDCAPARA_BE,
-							 edca_setting_DL_GMode[pHTInfo->IOTPeer]);
+						rtl92e_writel(dev, EDCAPARA_BE,
+							      edca_setting_DL_GMode[pHTInfo->IOTPeer]);
 					else
-						write_nic_dword(dev,
-								EDCAPARA_BE,
-							 edca_setting_DL[pHTInfo->IOTPeer]);
+						rtl92e_writel(dev, EDCAPARA_BE,
+							      edca_setting_DL[pHTInfo->IOTPeer]);
 					priv->bis_cur_rdlstate = true;
 				}
 			} else {
 				if (priv->bis_cur_rdlstate ||
 				    !priv->bcurrent_turbo_EDCA) {
-					write_nic_dword(dev, EDCAPARA_BE,
-							edca_setting_UL[pHTInfo->IOTPeer]);
+					rtl92e_writel(dev, EDCAPARA_BE,
+						      edca_setting_UL[pHTInfo->IOTPeer]);
 					priv->bis_cur_rdlstate = false;
 				}
 
@@ -1854,7 +1846,7 @@
 {
 }
 
-void dm_CheckRfCtrlGPIO(void *data)
+static void dm_CheckRfCtrlGPIO(void *data)
 {
 	struct r8192_priv *priv = container_of_dwork_rsl(data,
 				  struct r8192_priv, gpio_change_rf_wq);
@@ -1877,7 +1869,7 @@
 		return;
 	}
 
-	tmp1byte = read_nic_byte(dev, GPI);
+	tmp1byte = rtl92e_readb(dev, GPI);
 
 	eRfPowerStateToSet = (tmp1byte&BIT1) ?  eRfOn : eRfOff;
 
@@ -1896,8 +1888,7 @@
 	if (bActuallySet) {
 		mdelay(1000);
 		priv->bHwRfOffAction = 1;
-		MgntActSet_RF_State(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW,
-				    true);
+		rtl92e_set_rf_state(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW);
 		if (priv->bHwRadioOff)
 			argv[1] = "RFOFF";
 		else
@@ -1909,7 +1900,7 @@
 	}
 }
 
-void	dm_rf_pathcheck_workitemcallback(void *data)
+void rtl92e_dm_rf_pathcheck_wq(void *data)
 {
 	struct r8192_priv *priv = container_of_dwork_rsl(data,
 				  struct r8192_priv,
@@ -1917,7 +1908,7 @@
 	struct net_device *dev = priv->rtllib->dev;
 	u8 rfpath = 0, i;
 
-	rfpath = read_nic_byte(dev, 0xc04);
+	rfpath = rtl92e_readb(dev, 0xc04);
 
 	for (i = 0; i < RF90_PATH_MAX; i++) {
 		if (rfpath & (0x01<<i))
@@ -1974,12 +1965,12 @@
 		return;
 
 	if (!cck_Rx_Path_initialized) {
-		DM_RxPathSelTable.cck_Rx_path = (read_nic_byte(dev, 0xa07)&0xf);
+		DM_RxPathSelTable.cck_Rx_path = (rtl92e_readb(dev, 0xa07)&0xf);
 		cck_Rx_Path_initialized = 1;
 	}
 
 	DM_RxPathSelTable.disabledRF = 0xf;
-	DM_RxPathSelTable.disabledRF &= ~(read_nic_byte(dev, 0xc04));
+	DM_RxPathSelTable.disabledRF &= ~(rtl92e_readb(dev, 0xc04));
 
 	if (priv->rtllib->mode == WIRELESS_MODE_B)
 		DM_RxPathSelTable.cck_method = CCK_Rx_Version_2;
@@ -2116,10 +2107,10 @@
 		     DM_RxPathSelTable.diff_TH) {
 			DM_RxPathSelTable.rf_enable_rssi_th[min_rssi_index] =
 				 tmp_max_rssi+5;
-			rtl8192_setBBreg(dev, rOFDM0_TRxPathEnable,
-				 0x1<<min_rssi_index, 0x0);
-			rtl8192_setBBreg(dev, rOFDM1_TRxPathEnable,
-				 0x1<<min_rssi_index, 0x0);
+			rtl92e_set_bb_reg(dev, rOFDM0_TRxPathEnable,
+					  0x1<<min_rssi_index, 0x0);
+			rtl92e_set_bb_reg(dev, rOFDM1_TRxPathEnable,
+					  0x1<<min_rssi_index, 0x0);
 			disabled_rf_cnt++;
 		}
 		if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_1) {
@@ -2133,8 +2124,8 @@
 	if (update_cck_rx_path) {
 		DM_RxPathSelTable.cck_Rx_path = (cck_default_Rx<<2) |
 						(cck_optional_Rx);
-		rtl8192_setBBreg(dev, rCCK0_AFESetting, 0x0f000000,
-				 DM_RxPathSelTable.cck_Rx_path);
+		rtl92e_set_bb_reg(dev, rCCK0_AFESetting, 0x0f000000,
+				  DM_RxPathSelTable.cck_Rx_path);
 	}
 
 	if (DM_RxPathSelTable.disabledRF) {
@@ -2142,12 +2133,12 @@
 			if ((DM_RxPathSelTable.disabledRF>>i) & 0x1) {
 				if (tmp_max_rssi >=
 				    DM_RxPathSelTable.rf_enable_rssi_th[i]) {
-					rtl8192_setBBreg(dev,
-						 rOFDM0_TRxPathEnable, 0x1 << i,
-						 0x1);
-					rtl8192_setBBreg(dev,
-						 rOFDM1_TRxPathEnable,
-						 0x1 << i, 0x1);
+					rtl92e_set_bb_reg(dev,
+							  rOFDM0_TRxPathEnable,
+							  0x1 << i, 0x1);
+					rtl92e_set_bb_reg(dev,
+							  rOFDM1_TRxPathEnable,
+							  0x1 << i, 0x1);
 					DM_RxPathSelTable.rf_enable_rssi_th[i]
 						 = 100;
 					disabled_rf_cnt--;
@@ -2191,7 +2182,7 @@
 	del_timer_sync(&priv->fsync_timer);
 }
 
-void dm_fsync_timer_callback(unsigned long data)
+static void dm_fsync_timer_callback(unsigned long data)
 {
 	struct net_device *dev = (struct net_device *)data;
 	struct r8192_priv *priv = rtllib_priv((struct net_device *)data);
@@ -2252,18 +2243,18 @@
 			bDoubleTimeInterval = true;
 			priv->bswitch_fsync = !priv->bswitch_fsync;
 			if (priv->bswitch_fsync) {
-				write_nic_byte(dev, 0xC36, 0x1c);
-				write_nic_byte(dev, 0xC3e, 0x90);
+				rtl92e_writeb(dev, 0xC36, 0x1c);
+				rtl92e_writeb(dev, 0xC3e, 0x90);
 			} else {
-				write_nic_byte(dev, 0xC36, 0x5c);
-				write_nic_byte(dev, 0xC3e, 0x96);
+				rtl92e_writeb(dev, 0xC36, 0x5c);
+				rtl92e_writeb(dev, 0xC3e, 0x96);
 			}
 		} else if (priv->undecorated_smoothed_pwdb <=
 			   priv->rtllib->fsync_rssi_threshold) {
 			if (priv->bswitch_fsync) {
 				priv->bswitch_fsync  = false;
-				write_nic_byte(dev, 0xC36, 0x5c);
-				write_nic_byte(dev, 0xC3e, 0x96);
+				rtl92e_writeb(dev, 0xC36, 0x5c);
+				rtl92e_writeb(dev, 0xC3e, 0x96);
 			}
 		}
 		if (bDoubleTimeInterval) {
@@ -2283,11 +2274,11 @@
 	} else {
 		if (priv->bswitch_fsync) {
 			priv->bswitch_fsync  = false;
-			write_nic_byte(dev, 0xC36, 0x5c);
-			write_nic_byte(dev, 0xC3e, 0x96);
+			rtl92e_writeb(dev, 0xC36, 0x5c);
+			rtl92e_writeb(dev, 0xC3e, 0x96);
 		}
 		priv->ContinueDiffCount = 0;
-		write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
+		rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
 	}
 	RT_TRACE(COMP_HALDM, "ContinueDiffCount %d\n", priv->ContinueDiffCount);
 	RT_TRACE(COMP_HALDM,
@@ -2302,10 +2293,10 @@
 	struct r8192_priv *priv = rtllib_priv(dev);
 
 	RT_TRACE(COMP_HALDM, "%s\n", __func__);
-	write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cf);
+	rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c12cf);
 	priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING,
 				      (u8 *)(&rf_timing));
-	write_nic_byte(dev, 0xc3b, 0x41);
+	rtl92e_writeb(dev, 0xc3b, 0x41);
 }
 
 static void dm_EndHWFsync(struct net_device *dev)
@@ -2314,10 +2305,10 @@
 	struct r8192_priv *priv = rtllib_priv(dev);
 
 	RT_TRACE(COMP_HALDM, "%s\n", __func__);
-	write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
+	rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
 	priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING, (u8 *)
 				     (&rf_timing));
-	write_nic_byte(dev, 0xc3b, 0x49);
+	rtl92e_writeb(dev, 0xc3b, 0x49);
 }
 
 static void dm_EndSWFsync(struct net_device *dev)
@@ -2330,13 +2321,13 @@
 	if (priv->bswitch_fsync) {
 		priv->bswitch_fsync  = false;
 
-		write_nic_byte(dev, 0xC36, 0x5c);
+		rtl92e_writeb(dev, 0xC36, 0x5c);
 
-		write_nic_byte(dev, 0xC3e, 0x96);
+		rtl92e_writeb(dev, 0xC3e, 0x96);
 	}
 
 	priv->ContinueDiffCount = 0;
-	write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c52cd);
+	rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
 }
 
 static void dm_StartSWFsync(struct net_device *dev)
@@ -2371,11 +2362,11 @@
 				    msecs_to_jiffies(priv->rtllib->fsync_time_interval);
 	add_timer(&priv->fsync_timer);
 
-	write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd);
+	rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c12cd);
 
 }
 
-void dm_check_fsync(struct net_device *dev)
+static void dm_check_fsync(struct net_device *dev)
 {
 #define	RegC38_Default			0
 #define	RegC38_NonFsync_Other_AP	1
@@ -2431,7 +2422,7 @@
 		}
 		if (priv->framesyncMonitor) {
 			if (reg_c38_State != RegC38_Fsync_AP_BCM) {
-				write_nic_byte(dev, rOFDM0_RxDetector3, 0x95);
+				rtl92e_writeb(dev, rOFDM0_RxDetector3, 0x95);
 
 				reg_c38_State = RegC38_Fsync_AP_BCM;
 			}
@@ -2457,7 +2448,7 @@
 				    RegC38_TH) {
 					if (reg_c38_State !=
 					    RegC38_NonFsync_Other_AP) {
-							write_nic_byte(dev,
+							rtl92e_writeb(dev,
 							    rOFDM0_RxDetector3,
 							    0x90);
 
@@ -2467,7 +2458,7 @@
 				} else if (priv->undecorated_smoothed_pwdb >=
 					   (RegC38_TH+5)) {
 					if (reg_c38_State) {
-						write_nic_byte(dev,
+						rtl92e_writeb(dev,
 							rOFDM0_RxDetector3,
 							priv->framesync);
 						reg_c38_State = RegC38_Default;
@@ -2475,8 +2466,8 @@
 				}
 			} else {
 				if (reg_c38_State) {
-					write_nic_byte(dev, rOFDM0_RxDetector3,
-						       priv->framesync);
+					rtl92e_writeb(dev, rOFDM0_RxDetector3,
+						      priv->framesync);
 					reg_c38_State = RegC38_Default;
 				}
 			}
@@ -2484,14 +2475,14 @@
 	}
 	if (priv->framesyncMonitor) {
 		if (priv->reset_count != reset_cnt) {
-			write_nic_byte(dev, rOFDM0_RxDetector3,
+			rtl92e_writeb(dev, rOFDM0_RxDetector3,
 				       priv->framesync);
 			reg_c38_State = RegC38_Default;
 			reset_cnt = priv->reset_count;
 		}
 	} else {
 		if (reg_c38_State) {
-			write_nic_byte(dev, rOFDM0_RxDetector3,
+			rtl92e_writeb(dev, rOFDM0_RxDetector3,
 				       priv->framesync);
 			reg_c38_State = RegC38_Default;
 		}
@@ -2556,8 +2547,7 @@
 		RT_TRACE(COMP_TXAGC, "SetTxPowerLevel8190()  channel = %d\n",
 			 priv->rtllib->current_network.channel);
 
-		rtl8192_phy_setTxPower(dev,
-				 priv->rtllib->current_network.channel);
+		rtl92e_set_tx_power(dev, priv->rtllib->current_network.channel);
 	}
 	priv->bLastDTPFlag_High = priv->bDynamicTxHighPower;
 	priv->bLastDTPFlag_Low = priv->bDynamicTxLowPower;
@@ -2569,13 +2559,13 @@
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rtllib_device *ieee = priv->rtllib;
 
-	ieee->softmac_stats.CurrentShowTxate = read_nic_byte(dev,
+	ieee->softmac_stats.CurrentShowTxate = rtl92e_readb(dev,
 						 Current_Tx_Rate_Reg);
 
-	ieee->softmac_stats.last_packet_rate = read_nic_byte(dev,
+	ieee->softmac_stats.last_packet_rate = rtl92e_readb(dev,
 						 Initial_Tx_Rate_Reg);
 
-	ieee->softmac_stats.txretrycount = read_nic_dword(dev,
+	ieee->softmac_stats.txretrycount = rtl92e_readl(dev,
 						 Tx_Retry_Count_Reg);
 }
 
@@ -2583,5 +2573,5 @@
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
-	write_nic_byte(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
+	rtl92e_writeb(dev, DRIVER_RSSI, (u8)priv->undecorated_smoothed_pwdb);
 }
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
index b037451..097f0dc 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
@@ -27,26 +27,17 @@
 #define		DM_DIG_THRESH_HIGH					40
 #define		DM_DIG_THRESH_LOW					35
 
-#define		DM_FALSEALARM_THRESH_LOW	40
-#define		DM_FALSEALARM_THRESH_HIGH	1000
-
 #define		DM_DIG_HIGH_PWR_THRESH_HIGH		75
 #define		DM_DIG_HIGH_PWR_THRESH_LOW		70
 
 #define		BW_AUTO_SWITCH_HIGH_LOW			25
 #define		BW_AUTO_SWITCH_LOW_HIGH			30
 
-#define		DM_check_fsync_time_interval				500
-
-
 #define		DM_DIG_BACKOFF				12
 #define		DM_DIG_MAX					0x36
 #define		DM_DIG_MIN					0x1c
 #define		DM_DIG_MIN_Netcore			0x12
 
-#define		DM_DIG_BACKOFF_MAX			12
-#define		DM_DIG_BACKOFF_MIN			-4
-
 #define		RxPathSelection_SS_TH_low		30
 #define		RxPathSelection_diff_TH			18
 
@@ -55,8 +46,6 @@
 #define		RateAdaptiveTH_Low_40M		10
 #define		VeryLowRSSI					15
 
-#define		CTSToSelfTHVal					35
-
 #define		WAIotTHVal						25
 
 #define		E_FOR_TX_POWER_TRACK	       300
@@ -70,14 +59,6 @@
 #define			Tx_Retry_Count_Reg	 0x1ac
 #define		RegC38_TH				 20
 
-#define		TX_POWER_NEAR_FIELD_THRESH_LVL2	74
-#define		TX_POWER_NEAR_FIELD_THRESH_LVL1	67
-
-#define		TxHighPwrLevel_Normal		0
-#define		TxHighPwrLevel_Level1		1
-#define		TxHighPwrLevel_Level2		2
-
-#define		DM_Type_ByFW			0
 #define		DM_Type_ByDriver		1
 
 /*--------------------------Define Parameters-------------------------------*/
@@ -207,23 +188,20 @@
 /*--------------------------Exported Function prototype---------------------*/
 /*--------------------------Exported Function prototype---------------------*/
 
-extern  void    init_hal_dm(struct net_device *dev);
-extern  void deinit_hal_dm(struct net_device *dev);
+void rtl92e_dm_init(struct net_device *dev);
+void rtl92e_dm_deinit(struct net_device *dev);
 
-extern void hal_dm_watchdog(struct net_device *dev);
+void rtl92e_dm_watchdog(struct net_device *dev);
 
 
-extern  void    init_rate_adaptive(struct net_device *dev);
-extern  void    dm_txpower_trackingcallback(void *data);
+void    rtl92e_init_adaptive_rate(struct net_device *dev);
+void    rtl92e_dm_txpower_tracking_wq(void *data);
 
-extern  void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
+void rtl92e_dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
 
-extern  void    dm_restore_dynamic_mechanism_state(struct net_device *dev);
-extern  void    dm_backup_dynamic_mechanism_state(struct net_device *dev);
-extern  void    dm_init_edca_turbo(struct net_device *dev);
-extern  void    dm_rf_pathcheck_workitemcallback(void *data);
-extern  void dm_fsync_timer_callback(unsigned long data);
-extern  void dm_check_fsync(struct net_device *dev);
-extern  void dm_initialize_txpower_tracking(struct net_device *dev);
-extern  void    dm_CheckRfCtrlGPIO(void *data);
+void    rtl92e_dm_restore_state(struct net_device *dev);
+void    rtl92e_dm_backup_state(struct net_device *dev);
+void    rtl92e_dm_init_edca_turbo(struct net_device *dev);
+void    rtl92e_dm_rf_pathcheck_wq(void *data);
+void rtl92e_dm_init_txpower_tracking(struct net_device *dev);
 #endif	/*__R8192UDM_H__ */
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
index a6778e0..039ccfd 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
@@ -25,115 +25,75 @@
 #include "rtl_core.h"
 #include "rtl_eeprom.h"
 
-static void eprom_cs(struct net_device *dev, short bit)
+static void _rtl92e_gpio_write_bit(struct net_device *dev, int no, bool val)
 {
-	if (bit)
-		write_nic_byte(dev, EPROM_CMD,
-			       (1 << EPROM_CS_SHIFT) |
-			       read_nic_byte(dev, EPROM_CMD));
+	u8 reg = rtl92e_readb(dev, EPROM_CMD);
+
+	if (val)
+		reg |= 1 << no;
 	else
-		write_nic_byte(dev, EPROM_CMD, read_nic_byte(dev, EPROM_CMD)
-			       & ~(1<<EPROM_CS_SHIFT));
+		reg &= ~(1 << no);
 
+	rtl92e_writeb(dev, EPROM_CMD, reg);
 	udelay(EPROM_DELAY);
 }
 
-
-static void eprom_ck_cycle(struct net_device *dev)
+static bool _rtl92e_gpio_get_bit(struct net_device *dev, int no)
 {
-	write_nic_byte(dev, EPROM_CMD,
-		       (1<<EPROM_CK_SHIFT) | read_nic_byte(dev, EPROM_CMD));
-	udelay(EPROM_DELAY);
-	write_nic_byte(dev, EPROM_CMD,
-		       read_nic_byte(dev, EPROM_CMD) & ~(1<<EPROM_CK_SHIFT));
-	udelay(EPROM_DELAY);
+	u8 reg = rtl92e_readb(dev, EPROM_CMD);
+
+	return (reg >> no) & 0x1;
 }
 
-
-static void eprom_w(struct net_device *dev, short bit)
+static void _rtl92e_eeprom_ck_cycle(struct net_device *dev)
 {
-	if (bit)
-		write_nic_byte(dev, EPROM_CMD, (1<<EPROM_W_SHIFT) |
-			       read_nic_byte(dev, EPROM_CMD));
-	else
-		write_nic_byte(dev, EPROM_CMD, read_nic_byte(dev, EPROM_CMD)
-			       & ~(1<<EPROM_W_SHIFT));
-
-	udelay(EPROM_DELAY);
+	_rtl92e_gpio_write_bit(dev, EPROM_CK_BIT, 1);
+	_rtl92e_gpio_write_bit(dev, EPROM_CK_BIT, 0);
 }
 
-
-static short eprom_r(struct net_device *dev)
+static u16 _rtl92e_eeprom_xfer(struct net_device *dev, u16 data, int tx_len)
 {
-	short bit;
+	u16 ret = 0;
+	int rx_len = 16;
 
-	bit = (read_nic_byte(dev, EPROM_CMD) & (1<<EPROM_R_SHIFT));
-	udelay(EPROM_DELAY);
+	_rtl92e_gpio_write_bit(dev, EPROM_CS_BIT, 1);
+	_rtl92e_eeprom_ck_cycle(dev);
 
-	if (bit)
-		return 1;
-	return 0;
-}
-
-static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
-{
-	int i;
-
-	for (i = 0; i < len; i++) {
-		eprom_w(dev, b[i]);
-		eprom_ck_cycle(dev);
+	while (tx_len--) {
+		_rtl92e_gpio_write_bit(dev, EPROM_W_BIT,
+				       (data >> tx_len) & 0x1);
+		_rtl92e_eeprom_ck_cycle(dev);
 	}
+
+	_rtl92e_gpio_write_bit(dev, EPROM_W_BIT, 0);
+
+	while (rx_len--) {
+		_rtl92e_eeprom_ck_cycle(dev);
+		ret |= _rtl92e_gpio_get_bit(dev, EPROM_R_BIT) << rx_len;
+	}
+
+	_rtl92e_gpio_write_bit(dev, EPROM_CS_BIT, 0);
+	_rtl92e_eeprom_ck_cycle(dev);
+
+	return ret;
 }
 
-u32 eprom_read(struct net_device *dev, u32 addr)
+u32 rtl92e_eeprom_read(struct net_device *dev, u32 addr)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
-	short read_cmd[] = {1, 1, 0};
-	short addr_str[8];
-	int i;
-	int addr_len;
-	u32 ret;
+	u32 ret = 0;
 
-	ret = 0;
-	write_nic_byte(dev, EPROM_CMD,
-		       (EPROM_CMD_PROGRAM << EPROM_CMD_OPERATING_MODE_SHIFT));
+	rtl92e_writeb(dev, EPROM_CMD,
+		      (EPROM_CMD_PROGRAM << EPROM_CMD_OPERATING_MODE_SHIFT));
 	udelay(EPROM_DELAY);
 
-	if (priv->epromtype == EEPROM_93C56) {
-		addr_str[7] = addr & 1;
-		addr_str[6] = addr & (1<<1);
-		addr_str[5] = addr & (1<<2);
-		addr_str[4] = addr & (1<<3);
-		addr_str[3] = addr & (1<<4);
-		addr_str[2] = addr & (1<<5);
-		addr_str[1] = addr & (1<<6);
-		addr_str[0] = addr & (1<<7);
-		addr_len = 8;
-	} else {
-		addr_str[5] = addr & 1;
-		addr_str[4] = addr & (1<<1);
-		addr_str[3] = addr & (1<<2);
-		addr_str[2] = addr & (1<<3);
-		addr_str[1] = addr & (1<<4);
-		addr_str[0] = addr & (1<<5);
-		addr_len = 6;
-	}
-	eprom_cs(dev, 1);
-	eprom_ck_cycle(dev);
-	eprom_send_bits_string(dev, read_cmd, 3);
-	eprom_send_bits_string(dev, addr_str, addr_len);
+	/* EEPROM is configured as x16 */
+	if (priv->epromtype == EEPROM_93C56)
+		ret = _rtl92e_eeprom_xfer(dev, (addr & 0xFF) | (0x6 << 8), 11);
+	else
+		ret = _rtl92e_eeprom_xfer(dev, (addr & 0x3F) | (0x6 << 6), 9);
 
-	eprom_w(dev, 0);
-
-	for (i = 0; i < 16; i++) {
-		eprom_ck_cycle(dev);
-		ret |= (eprom_r(dev)<<(15-i));
-	}
-
-	eprom_cs(dev, 0);
-	eprom_ck_cycle(dev);
-
-	write_nic_byte(dev, EPROM_CMD,
-		       (EPROM_CMD_NORMAL<<EPROM_CMD_OPERATING_MODE_SHIFT));
+	rtl92e_writeb(dev, EPROM_CMD,
+		      (EPROM_CMD_NORMAL<<EPROM_CMD_OPERATING_MODE_SHIFT));
 	return ret;
 }
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
index adea2b4..8d23aea 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
@@ -26,4 +26,4 @@
 
 #define EPROM_DELAY 10
 
-u32 eprom_read(struct net_device *dev, u32 addr);
+u32 rtl92e_eeprom_read(struct net_device *dev, u32 addr);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
index 6bbd1c6..9fcb099 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
@@ -45,7 +45,7 @@
 	pci_write_config_byte(pdev, 0x70f, tmp);
 }
 
-bool rtl8192_pci_findadapter(struct pci_dev *pdev, struct net_device *dev)
+bool rtl92e_check_adapter(struct pci_dev *pdev, struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 	u16 VenderID;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
index e8d5527..6246841 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
@@ -29,6 +29,6 @@
 #include <linux/pci.h>
 
 struct net_device;
-bool rtl8192_pci_findadapter(struct pci_dev *pdev, struct net_device *dev);
+bool rtl92e_check_adapter(struct pci_dev *pdev, struct net_device *dev);
 
 #endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index e4908672..b0268fd 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -23,7 +23,7 @@
 #include "rtl_pm.h"
 
 
-int rtl8192E_suspend(struct pci_dev *pdev, pm_message_t state)
+int rtl92e_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct r8192_priv *priv = rtllib_priv(dev);
@@ -45,16 +45,16 @@
 	netif_device_detach(dev);
 
 	if (!priv->rtllib->bSupportRemoteWakeUp) {
-		MgntActSet_RF_State(dev, eRfOff, RF_CHANGE_BY_INIT, true);
-		ulRegRead = read_nic_dword(dev, CPU_GEN);
+		rtl92e_set_rf_state(dev, eRfOff, RF_CHANGE_BY_INIT);
+		ulRegRead = rtl92e_readl(dev, CPU_GEN);
 		ulRegRead |= CPU_GEN_SYSTEM_RESET;
-		write_nic_dword(dev, CPU_GEN, ulRegRead);
+		rtl92e_writel(dev, CPU_GEN, ulRegRead);
 	} else {
-		write_nic_dword(dev, WFCRC0, 0xffffffff);
-		write_nic_dword(dev, WFCRC1, 0xffffffff);
-		write_nic_dword(dev, WFCRC2, 0xffffffff);
-		write_nic_byte(dev, PMR, 0x5);
-		write_nic_byte(dev, MacBlkCtrl, 0xa);
+		rtl92e_writel(dev, WFCRC0, 0xffffffff);
+		rtl92e_writel(dev, WFCRC1, 0xffffffff);
+		rtl92e_writel(dev, WFCRC2, 0xffffffff);
+		rtl92e_writeb(dev, PMR, 0x5);
+		rtl92e_writeb(dev, MacBlkCtrl, 0xa);
 	}
 out_pci_suspend:
 	netdev_info(dev, "WOL is %s\n", priv->rtllib->bSupportRemoteWakeUp ?
@@ -70,7 +70,7 @@
 	return 0;
 }
 
-int rtl8192E_resume(struct pci_dev *pdev)
+int rtl92e_resume(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct r8192_priv *priv = rtllib_priv(dev);
@@ -95,7 +95,7 @@
 	pci_enable_wake(pdev, PCI_D0, 0);
 
 	if (priv->polling_timer_on == 0)
-		check_rfctrl_gpio_timer((unsigned long)dev);
+		rtl92e_check_rfctrl_gpio_timer((unsigned long)dev);
 
 	if (!netif_running(dev)) {
 		netdev_info(dev,
@@ -108,7 +108,7 @@
 		dev->netdev_ops->ndo_open(dev);
 
 	if (!priv->rtllib->bSupportRemoteWakeUp)
-		MgntActSet_RF_State(dev, eRfOn, RF_CHANGE_BY_INIT, true);
+		rtl92e_set_rf_state(dev, eRfOn, RF_CHANGE_BY_INIT);
 
 out:
 	RT_TRACE(COMP_POWER, "<================r8192E resume call.\n");
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
index 7bfe448..cdc45f7 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
@@ -23,7 +23,7 @@
 #include <linux/types.h>
 #include <linux/pci.h>
 
-int rtl8192E_suspend(struct pci_dev *dev, pm_message_t state);
-int rtl8192E_resume(struct pci_dev *dev);
+int rtl92e_suspend(struct pci_dev *dev, pm_message_t state);
+int rtl92e_resume(struct pci_dev *dev);
 
 #endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index 404cb83..f09560d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -45,10 +45,10 @@
 	spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
 	RT_TRACE(COMP_DBG, "%s()============>come to sleep down\n", __func__);
 
-	MgntActSet_RF_State(dev, eRfSleep, RF_CHANGE_BY_PS, false);
+	rtl92e_set_rf_state(dev, eRfSleep, RF_CHANGE_BY_PS);
 }
 
-void rtl8192_hw_sleep_wq(void *data)
+void rtl92e_hw_sleep_wq(void *data)
 {
 	struct rtllib_device *ieee = container_of_dwork_rsl(data,
 				     struct rtllib_device, hw_sleep_wq);
@@ -57,7 +57,7 @@
 	rtl8192_hw_sleep_down(dev);
 }
 
-void rtl8192_hw_wakeup(struct net_device *dev)
+void rtl92e_hw_wakeup(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	unsigned long flags = 0;
@@ -66,7 +66,7 @@
 	if (priv->RFChangeInProgress) {
 		spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
 		RT_TRACE(COMP_DBG,
-			 "rtl8192_hw_wakeup(): RF Change in progress!\n");
+			 "rtl92e_hw_wakeup(): RF Change in progress!\n");
 		queue_delayed_work_rsl(priv->rtllib->wq,
 				       &priv->rtllib->hw_wakeup_wq,
 				       msecs_to_jiffies(10));
@@ -74,21 +74,21 @@
 	}
 	spin_unlock_irqrestore(&priv->rf_ps_lock, flags);
 	RT_TRACE(COMP_PS, "%s()============>come to wake up\n", __func__);
-	MgntActSet_RF_State(dev, eRfOn, RF_CHANGE_BY_PS, false);
+	rtl92e_set_rf_state(dev, eRfOn, RF_CHANGE_BY_PS);
 }
 
-void rtl8192_hw_wakeup_wq(void *data)
+void rtl92e_hw_wakeup_wq(void *data)
 {
 	struct rtllib_device *ieee = container_of_dwork_rsl(data,
 				     struct rtllib_device, hw_wakeup_wq);
 	struct net_device *dev = ieee->dev;
 
-	rtl8192_hw_wakeup(dev);
+	rtl92e_hw_wakeup(dev);
 }
 
 #define MIN_SLEEP_TIME 50
 #define MAX_SLEEP_TIME 10000
-void rtl8192_hw_to_sleep(struct net_device *dev, u64 time)
+void rtl92e_enter_sleep(struct net_device *dev, u64 time)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 
@@ -133,14 +133,13 @@
 
 	RT_TRACE(COMP_PS, "InactivePsWorkItemCallback(): Set RF to %s.\n",
 		 pPSC->eInactivePowerState == eRfOff ? "OFF" : "ON");
-	MgntActSet_RF_State(dev, pPSC->eInactivePowerState, RF_CHANGE_BY_IPS,
-			    false);
+	rtl92e_set_rf_state(dev, pPSC->eInactivePowerState, RF_CHANGE_BY_IPS);
 
 	pPSC->bSwRfProcessing = false;
 	RT_TRACE(COMP_PS, "InactivePsWorkItemCallback() <---------\n");
 }
 
-void IPSEnter(struct net_device *dev)
+void rtl92e_ips_enter(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
@@ -152,7 +151,7 @@
 		if (rtState == eRfOn && !pPSC->bSwRfProcessing &&
 			(priv->rtllib->state != RTLLIB_LINKED) &&
 			(priv->rtllib->iw_mode != IW_MODE_MASTER)) {
-			RT_TRACE(COMP_PS, "IPSEnter(): Turn off RF.\n");
+			RT_TRACE(COMP_PS, "rtl92e_ips_enter(): Turn off RF.\n");
 			pPSC->eInactivePowerState = eRfOff;
 			priv->isRFOff = true;
 			priv->bInPowerSaveMode = true;
@@ -161,7 +160,7 @@
 	}
 }
 
-void IPSLeave(struct net_device *dev)
+void rtl92e_ips_leave(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
@@ -172,7 +171,7 @@
 		rtState = priv->rtllib->eRFPowerState;
 		if (rtState != eRfOn  && !pPSC->bSwRfProcessing &&
 		    priv->rtllib->RfOffReason <= RF_CHANGE_BY_IPS) {
-			RT_TRACE(COMP_PS, "IPSLeave(): Turn on RF.\n");
+			RT_TRACE(COMP_PS, "rtl92e_ips_leave(): Turn on RF.\n");
 			pPSC->eInactivePowerState = eRfOn;
 			priv->bInPowerSaveMode = false;
 			InactivePsWorkItemCallback(dev);
@@ -180,7 +179,7 @@
 	}
 }
 
-void IPSLeave_wq(void *data)
+void rtl92e_ips_leave_wq(void *data)
 {
 	struct rtllib_device *ieee = container_of_work_rsl(data,
 				     struct rtllib_device, ips_leave_wq);
@@ -188,11 +187,11 @@
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 
 	down(&priv->rtllib->ips_sem);
-	IPSLeave(dev);
+	rtl92e_ips_leave(dev);
 	up(&priv->rtllib->ips_sem);
 }
 
-void rtllib_ips_leave_wq(struct net_device *dev)
+void rtl92e_rtllib_ips_leave_wq(struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 	enum rt_rf_power_state rtState;
@@ -206,7 +205,7 @@
 					    __func__);
 				return;
 			}
-			netdev_info(dev, "=========>%s(): IPSLeave\n",
+			netdev_info(dev, "=========>%s(): rtl92e_ips_leave\n",
 				    __func__);
 			queue_work_rsl(priv->rtllib->wq,
 				       &priv->rtllib->ips_leave_wq);
@@ -214,12 +213,12 @@
 	}
 }
 
-void rtllib_ips_leave(struct net_device *dev)
+void rtl92e_rtllib_ips_leave(struct net_device *dev)
 {
 	struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
 
 	down(&priv->rtllib->ips_sem);
-	IPSLeave(dev);
+	rtl92e_ips_leave(dev);
 	up(&priv->rtllib->ips_sem);
 }
 
@@ -238,7 +237,7 @@
 	    rtPsMode == RTLLIB_PS_DISABLED) {
 		unsigned long flags;
 
-		rtl8192_hw_wakeup(dev);
+		rtl92e_hw_wakeup(dev);
 		priv->rtllib->sta_sleep = LPS_IS_WAKE;
 
 		spin_lock_irqsave(&(priv->rtllib->mgmt_tx_lock), flags);
@@ -251,13 +250,13 @@
 	return true;
 }
 
-void LeisurePSEnter(struct net_device *dev)
+void rtl92e_leisure_ps_enter(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
 					&(priv->rtllib->PowerSaveControl);
 
-	RT_TRACE(COMP_PS, "LeisurePSEnter()...\n");
+	RT_TRACE(COMP_PS, "rtl92e_leisure_ps_enter()...\n");
 	RT_TRACE(COMP_PS,
 		 "pPSC->bLeisurePs = %d, ieee->ps = %d,pPSC->LpsIdleCount is %d,RT_CHECK_FOR_HANG_PERIOD is %d\n",
 		 pPSC->bLeisurePs, priv->rtllib->ps, pPSC->LpsIdleCount,
@@ -275,7 +274,7 @@
 			if (priv->rtllib->ps == RTLLIB_PS_DISABLED) {
 
 				RT_TRACE(COMP_LPS,
-					 "LeisurePSEnter(): Enter 802.11 power save mode...\n");
+					 "rtl92e_leisure_ps_enter(): Enter 802.11 power save mode...\n");
 
 				if (!pPSC->bFwCtrlLPS) {
 					if (priv->rtllib->SetFwCmdHandler)
@@ -291,21 +290,21 @@
 	}
 }
 
-void LeisurePSLeave(struct net_device *dev)
+void rtl92e_leisure_ps_leave(struct net_device *dev)
 {
 	struct r8192_priv *priv = rtllib_priv(dev);
 	struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
 					&(priv->rtllib->PowerSaveControl);
 
 
-	RT_TRACE(COMP_PS, "LeisurePSLeave()...\n");
+	RT_TRACE(COMP_PS, "rtl92e_leisure_ps_leave()...\n");
 	RT_TRACE(COMP_PS, "pPSC->bLeisurePs = %d, ieee->ps = %d\n",
 		pPSC->bLeisurePs, priv->rtllib->ps);
 
 	if (pPSC->bLeisurePs) {
 		if (priv->rtllib->ps != RTLLIB_PS_DISABLED) {
 			RT_TRACE(COMP_LPS,
-				 "LeisurePSLeave(): Busy Traffic , Leave 802.11 power save..\n");
+				 "rtl92e_leisure_ps_leave(): Busy Traffic , Leave 802.11 power save..\n");
 			MgntActSet_802_11_PowerSaveMode(dev,
 					 RTLLIB_PS_DISABLED);
 
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
index 962f2e5..35fc9e2 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
@@ -30,18 +30,17 @@
 struct net_device;
 
 #define RT_CHECK_FOR_HANG_PERIOD 2
-#define INIT_DEFAULT_CHAN	 1
 
-void rtl8192_hw_wakeup(struct net_device *dev);
-void rtl8192_hw_to_sleep(struct net_device *dev, u64 time);
-void rtllib_ips_leave_wq(struct net_device *dev);
-void rtllib_ips_leave(struct net_device *dev);
-void IPSLeave_wq(void *data);
+void rtl92e_hw_wakeup(struct net_device *dev);
+void rtl92e_enter_sleep(struct net_device *dev, u64 time);
+void rtl92e_rtllib_ips_leave_wq(struct net_device *dev);
+void rtl92e_rtllib_ips_leave(struct net_device *dev);
+void rtl92e_ips_leave_wq(void *data);
 
-void IPSEnter(struct net_device *dev);
-void IPSLeave(struct net_device *dev);
+void rtl92e_ips_enter(struct net_device *dev);
+void rtl92e_ips_leave(struct net_device *dev);
 
-void LeisurePSEnter(struct net_device *dev);
-void LeisurePSLeave(struct net_device *dev);
+void rtl92e_leisure_ps_enter(struct net_device *dev);
+void rtl92e_leisure_ps_leave(struct net_device *dev);
 
 #endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index f5e4961..7e3ca7e 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -192,7 +192,7 @@
 		pPSC->bLeisurePs = true;
 	} else {
 		if (priv->rtllib->state == RTLLIB_LINKED)
-			LeisurePSLeave(dev);
+			rtl92e_leisure_ps_leave(dev);
 
 		priv->ps_force = true;
 		pPSC->bLeisurePs = false;
@@ -282,10 +282,11 @@
 					up(&priv->wx_sem);
 					return -1;
 				}
-				netdev_info(dev,  "=========>%s(): IPSLeave\n",
+				netdev_info(dev,
+					    "=========>%s(): rtl92e_ips_leave\n",
 					    __func__);
 				down(&priv->rtllib->ips_sem);
-				IPSLeave(dev);
+				rtl92e_ips_leave(dev);
 				up(&priv->rtllib->ips_sem);
 			}
 		}
@@ -442,10 +443,11 @@
 					up(&priv->wx_sem);
 					return -1;
 				}
-				RT_TRACE(COMP_PS, "=========>%s(): IPSLeave\n",
+				RT_TRACE(COMP_PS,
+					 "=========>%s(): rtl92e_ips_leave\n",
 					 __func__);
 				down(&priv->rtllib->ips_sem);
-				IPSLeave(dev);
+				rtl92e_ips_leave(dev);
 				up(&priv->rtllib->ips_sem);
 			}
 		}
@@ -700,7 +702,7 @@
 
 	priv->rtllib->wx_set_enc = 1;
 	down(&priv->rtllib->ips_sem);
-	IPSLeave(dev);
+	rtl92e_ips_leave(dev);
 	up(&priv->rtllib->ips_sem);
 	down(&priv->wx_sem);
 
@@ -711,7 +713,7 @@
 
 	if (wrqu->encoding.flags & IW_ENCODE_DISABLED) {
 		ieee->pairwise_key_type = ieee->group_key_type = KEY_TYPE_NA;
-		CamResetAllEntry(dev);
+		rtl92e_cam_reset(dev);
 		memset(priv->rtllib->swcamtable, 0,
 		       sizeof(struct sw_cam_table) * 32);
 		goto end_hw_sec;
@@ -729,9 +731,6 @@
 			hwkey[i] |= (key[4 * i + 3] & mask) << 24;
 		}
 
-		#define CONF_WEP40  0x4
-		#define CONF_WEP104 0x14
-
 		switch (wrqu->encoding.flags & IW_ENCODE_INDEX) {
 		case 0:
 			key_idx = ieee->crypt_info.tx_keyidx;
@@ -753,16 +752,16 @@
 		}
 		if (wrqu->encoding.length == 0x5) {
 			ieee->pairwise_key_type = KEY_TYPE_WEP40;
-			EnableHWSecurityConfig8192(dev);
+			rtl92e_enable_hw_security_config(dev);
 		}
 
 		else if (wrqu->encoding.length == 0xd) {
 			ieee->pairwise_key_type = KEY_TYPE_WEP104;
-				EnableHWSecurityConfig8192(dev);
-			setKey(dev, key_idx, key_idx, KEY_TYPE_WEP104,
-			       zero_addr[key_idx], 0, hwkey);
-			set_swcam(dev, key_idx, key_idx, KEY_TYPE_WEP104,
-				  zero_addr[key_idx], 0, hwkey, 0);
+				rtl92e_enable_hw_security_config(dev);
+			rtl92e_set_key(dev, key_idx, key_idx, KEY_TYPE_WEP104,
+				       zero_addr[key_idx], 0, hwkey);
+			rtl92e_set_swcam(dev, key_idx, key_idx, KEY_TYPE_WEP104,
+					 zero_addr[key_idx], 0, hwkey, 0);
 		} else {
 			netdev_info(dev,
 				    "wrong type in WEP, not WEP40 and WEP104\n");
@@ -821,17 +820,13 @@
 	}
 	if (wrqu->retry.flags & IW_RETRY_MAX) {
 		priv->retry_rts = wrqu->retry.value;
-		DMESG("Setting retry for RTS/CTS data to %d",
-		      wrqu->retry.value);
 
 	} else {
 		priv->retry_data = wrqu->retry.value;
-		DMESG("Setting retry for non RTS/CTS data to %d",
-		      wrqu->retry.value);
 	}
 
 
-	rtl8192_commit(dev);
+	rtl92e_commit(dev);
 exit:
 	up(&priv->wx_sem);
 
@@ -917,7 +912,7 @@
 
 	priv->rtllib->wx_set_enc = 1;
 	down(&priv->rtllib->ips_sem);
-	IPSLeave(dev);
+	rtl92e_ips_leave(dev);
 	up(&priv->rtllib->ips_sem);
 
 	ret = rtllib_wx_set_encode_ext(ieee, info, wrqu, extra);
@@ -933,7 +928,7 @@
 		     ext->alg == IW_ENCODE_ALG_NONE) {
 			ieee->pairwise_key_type = ieee->group_key_type
 						= KEY_TYPE_NA;
-			CamResetAllEntry(dev);
+			rtl92e_cam_reset(dev);
 			memset(priv->rtllib->swcamtable, 0,
 			       sizeof(struct sw_cam_table) * 32);
 			goto end_hw_sec;
@@ -950,28 +945,29 @@
 			if ((ext->key_len == 13) && (alg == KEY_TYPE_WEP40))
 				alg = KEY_TYPE_WEP104;
 			ieee->pairwise_key_type = alg;
-			EnableHWSecurityConfig8192(dev);
+			rtl92e_enable_hw_security_config(dev);
 		}
 		memcpy((u8 *)key, ext->key, 16);
 
 		if ((alg & KEY_TYPE_WEP40) && (ieee->auth_mode != 2)) {
 			if (ext->key_len == 13)
 				ieee->pairwise_key_type = alg = KEY_TYPE_WEP104;
-			setKey(dev, idx, idx, alg, zero, 0, key);
-			set_swcam(dev, idx, idx, alg, zero, 0, key, 0);
+			rtl92e_set_key(dev, idx, idx, alg, zero, 0, key);
+			rtl92e_set_swcam(dev, idx, idx, alg, zero, 0, key, 0);
 		} else if (group) {
 			ieee->group_key_type = alg;
-			setKey(dev, idx, idx, alg, broadcast_addr, 0, key);
-			set_swcam(dev, idx, idx, alg, broadcast_addr, 0,
-				  key, 0);
+			rtl92e_set_key(dev, idx, idx, alg, broadcast_addr, 0,
+				       key);
+			rtl92e_set_swcam(dev, idx, idx, alg, broadcast_addr, 0,
+					 key, 0);
 		} else {
 			if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) &&
 			     ieee->pHTInfo->bCurrentHTSupport)
-				write_nic_byte(dev, 0x173, 1);
-			setKey(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr,
-			       0, key);
-			set_swcam(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr,
-				  0, key, 0);
+				rtl92e_writeb(dev, 0x173, 1);
+			rtl92e_set_key(dev, 4, idx, alg,
+				       (u8 *)ieee->ap_mac_addr, 0, key);
+			rtl92e_set_swcam(dev, 4, idx, alg,
+					 (u8 *)ieee->ap_mac_addr, 0, key, 0);
 		}
 
 
@@ -1119,41 +1115,41 @@
 }
 
 
-#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
+#define IW_IOCTL(x) ((x) - SIOCSIWCOMMIT)
 static iw_handler r8192_wx_handlers[] = {
-	IW_IOCTL(SIOCGIWNAME) = r8192_wx_get_name,
-	IW_IOCTL(SIOCSIWFREQ) = r8192_wx_set_freq,
-	IW_IOCTL(SIOCGIWFREQ) = r8192_wx_get_freq,
-	IW_IOCTL(SIOCSIWMODE) = r8192_wx_set_mode,
-	IW_IOCTL(SIOCGIWMODE) = r8192_wx_get_mode,
-	IW_IOCTL(SIOCSIWSENS) = r8192_wx_set_sens,
-	IW_IOCTL(SIOCGIWSENS) = r8192_wx_get_sens,
-	IW_IOCTL(SIOCGIWRANGE) = rtl8192_wx_get_range,
-	IW_IOCTL(SIOCSIWAP) = r8192_wx_set_wap,
-	IW_IOCTL(SIOCGIWAP) = r8192_wx_get_wap,
-	IW_IOCTL(SIOCSIWSCAN) = r8192_wx_set_scan,
-	IW_IOCTL(SIOCGIWSCAN) = r8192_wx_get_scan,
-	IW_IOCTL(SIOCSIWESSID) = r8192_wx_set_essid,
-	IW_IOCTL(SIOCGIWESSID) = r8192_wx_get_essid,
-	IW_IOCTL(SIOCSIWNICKN) = r8192_wx_set_nick,
-		IW_IOCTL(SIOCGIWNICKN) = r8192_wx_get_nick,
-	IW_IOCTL(SIOCSIWRATE) = r8192_wx_set_rate,
-	IW_IOCTL(SIOCGIWRATE) = r8192_wx_get_rate,
-	IW_IOCTL(SIOCSIWRTS) = r8192_wx_set_rts,
-	IW_IOCTL(SIOCGIWRTS) = r8192_wx_get_rts,
-	IW_IOCTL(SIOCSIWFRAG) = r8192_wx_set_frag,
-	IW_IOCTL(SIOCGIWFRAG) = r8192_wx_get_frag,
-	IW_IOCTL(SIOCSIWRETRY) = r8192_wx_set_retry,
-	IW_IOCTL(SIOCGIWRETRY) = r8192_wx_get_retry,
-	IW_IOCTL(SIOCSIWENCODE) = r8192_wx_set_enc,
-	IW_IOCTL(SIOCGIWENCODE) = r8192_wx_get_enc,
-	IW_IOCTL(SIOCSIWPOWER) = r8192_wx_set_power,
-	IW_IOCTL(SIOCGIWPOWER) = r8192_wx_get_power,
-	IW_IOCTL(SIOCSIWGENIE) = r8192_wx_set_gen_ie,
-	IW_IOCTL(SIOCGIWGENIE) = r8192_wx_get_gen_ie,
-	IW_IOCTL(SIOCSIWMLME) = r8192_wx_set_mlme,
-	IW_IOCTL(SIOCSIWAUTH) = r8192_wx_set_auth,
-	IW_IOCTL(SIOCSIWENCODEEXT) = r8192_wx_set_enc_ext,
+	[IW_IOCTL(SIOCGIWNAME)] = r8192_wx_get_name,
+	[IW_IOCTL(SIOCSIWFREQ)] = r8192_wx_set_freq,
+	[IW_IOCTL(SIOCGIWFREQ)] = r8192_wx_get_freq,
+	[IW_IOCTL(SIOCSIWMODE)] = r8192_wx_set_mode,
+	[IW_IOCTL(SIOCGIWMODE)] = r8192_wx_get_mode,
+	[IW_IOCTL(SIOCSIWSENS)] = r8192_wx_set_sens,
+	[IW_IOCTL(SIOCGIWSENS)] = r8192_wx_get_sens,
+	[IW_IOCTL(SIOCGIWRANGE)] = rtl8192_wx_get_range,
+	[IW_IOCTL(SIOCSIWAP)] = r8192_wx_set_wap,
+	[IW_IOCTL(SIOCGIWAP)] = r8192_wx_get_wap,
+	[IW_IOCTL(SIOCSIWSCAN)] = r8192_wx_set_scan,
+	[IW_IOCTL(SIOCGIWSCAN)] = r8192_wx_get_scan,
+	[IW_IOCTL(SIOCSIWESSID)] = r8192_wx_set_essid,
+	[IW_IOCTL(SIOCGIWESSID)] = r8192_wx_get_essid,
+	[IW_IOCTL(SIOCSIWNICKN)] = r8192_wx_set_nick,
+	[IW_IOCTL(SIOCGIWNICKN)] = r8192_wx_get_nick,
+	[IW_IOCTL(SIOCSIWRATE)] = r8192_wx_set_rate,
+	[IW_IOCTL(SIOCGIWRATE)] = r8192_wx_get_rate,
+	[IW_IOCTL(SIOCSIWRTS)] = r8192_wx_set_rts,
+	[IW_IOCTL(SIOCGIWRTS)] = r8192_wx_get_rts,
+	[IW_IOCTL(SIOCSIWFRAG)] = r8192_wx_set_frag,
+	[IW_IOCTL(SIOCGIWFRAG)] = r8192_wx_get_frag,
+	[IW_IOCTL(SIOCSIWRETRY)] = r8192_wx_set_retry,
+	[IW_IOCTL(SIOCGIWRETRY)] = r8192_wx_get_retry,
+	[IW_IOCTL(SIOCSIWENCODE)] = r8192_wx_set_enc,
+	[IW_IOCTL(SIOCGIWENCODE)] = r8192_wx_get_enc,
+	[IW_IOCTL(SIOCSIWPOWER)] = r8192_wx_set_power,
+	[IW_IOCTL(SIOCGIWPOWER)] = r8192_wx_get_power,
+	[IW_IOCTL(SIOCSIWGENIE)] = r8192_wx_set_gen_ie,
+	[IW_IOCTL(SIOCGIWGENIE)] = r8192_wx_get_gen_ie,
+	[IW_IOCTL(SIOCSIWMLME)] = r8192_wx_set_mlme,
+	[IW_IOCTL(SIOCSIWAUTH)] = r8192_wx_set_auth,
+	[IW_IOCTL(SIOCSIWENCODEEXT)] = r8192_wx_set_enc_ext,
 };
 
 /* the following rule need to be following,
diff --git a/drivers/staging/rtl8192e/rtl819x_BA.h b/drivers/staging/rtl8192e/rtl819x_BA.h
index 613e14c..8946664 100644
--- a/drivers/staging/rtl8192e/rtl819x_BA.h
+++ b/drivers/staging/rtl8192e/rtl819x_BA.h
@@ -19,11 +19,7 @@
 #ifndef _BATYPE_H_
 #define _BATYPE_H_
 
-#define		TOTAL_TXBA_NUM	16
-#define	TOTAL_RXBA_NUM	16
-
 #define	BA_SETUP_TIMEOUT	200
-#define	BA_INACT_TIMEOUT	60000
 
 #define	BA_POLICY_DELAYED		0
 #define	BA_POLICY_IMMEDIATE	1
@@ -32,7 +28,6 @@
 #define	ADDBA_STATUS_REFUSED		37
 #define	ADDBA_STATUS_INVALID_PARAM	38
 
-#define	DELBA_REASON_QSTA_LEAVING	36
 #define	DELBA_REASON_END_BA			37
 #define	DELBA_REASON_UNKNOWN_BA	38
 #define	DELBA_REASON_TIMEOUT			39
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 60f536c..78ede4a 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -428,7 +428,6 @@
 {
 	 struct rtllib_hdr_3addr *delba = NULL;
 	union delba_param_set *pDelBaParamSet = NULL;
-	u16 *pReasonCode = NULL;
 	u8 *dst = NULL;
 
 	if (skb->len < sizeof(struct rtllib_hdr_3addr) + 6) {
@@ -453,9 +452,7 @@
 #endif
 	delba = (struct rtllib_hdr_3addr *)skb->data;
 	dst = (u8 *)(&delba->addr2[0]);
-	delba += sizeof(struct rtllib_hdr_3addr);
-	pDelBaParamSet = (union delba_param_set *)(delba+2);
-	pReasonCode = (u16 *)(delba+4);
+	pDelBaParamSet = (union delba_param_set *)&delba->payload[2];
 
 	if (pDelBaParamSet->field.Initiator == 1) {
 		struct rx_ts_record *pRxTs;
diff --git a/drivers/staging/rtl8192e/rtl819x_HT.h b/drivers/staging/rtl8192e/rtl819x_HT.h
index 0c263d9..51711dc 100644
--- a/drivers/staging/rtl8192e/rtl819x_HT.h
+++ b/drivers/staging/rtl8192e/rtl819x_HT.h
@@ -20,8 +20,6 @@
 #define _RTL819XU_HTTYPE_H_
 
 #define MIMO_PS_STATIC				0
-#define MIMO_PS_DYNAMIC			1
-#define MIMO_PS_NOLIMIT			3
 
 #define sHTCLng	4
 
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index b5c3647..555745b 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -117,7 +117,7 @@
 	pHTInfo->RxReorderPendingTime = 30;
 }
 
-u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate)
+static u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate)
 {
 	struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
 
@@ -502,7 +502,8 @@
 	return mcsRate | 0x80;
 }
 
-u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS, u8 *pOperateMCS)
+static u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
+			  u8 *pOperateMCS)
 {
 
 	u8 i;
diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h
index 3aa35ce..fcc8fab 100644
--- a/drivers/staging/rtl8192e/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192e/rtl819x_Qos.h
@@ -96,11 +96,6 @@
 	u16 Length;
 };
 
-enum ack_policy {
-	eAckPlc0_ACK		= 0x00,
-	eAckPlc1_NoACK		= 0x01,
-};
-
 #define AC0_BE	0
 #define AC1_BK	1
 #define AC2_VI	2
diff --git a/drivers/staging/rtl8192e/rtl819x_TS.h b/drivers/staging/rtl8192e/rtl819x_TS.h
index b8fed55..a93348c 100644
--- a/drivers/staging/rtl8192e/rtl819x_TS.h
+++ b/drivers/staging/rtl8192e/rtl819x_TS.h
@@ -19,8 +19,6 @@
 #ifndef _TSTYPE_H_
 #define _TSTYPE_H_
 #include "rtl819x_Qos.h"
-#define TS_SETUP_TIMEOUT	60
-#define TS_INACT_TIMEOUT	60
 #define TS_ADDBA_DELAY		60
 
 #define TOTAL_TS_NUM		16
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 05aea43..70879594 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -113,7 +113,7 @@
 
 static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo)
 {
-	memset(pTsCommonInfo->Addr, 0, 6);
+	eth_zero_addr(pTsCommonInfo->Addr);
 	memset(&pTsCommonInfo->TSpec, 0, sizeof(union tspec_body));
 	memset(&pTsCommonInfo->TClass, 0, sizeof(union qos_tclas)*TCLAS_NUM);
 	pTsCommonInfo->TClasProc = 0;
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index fd38c6d..563ac12 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -84,9 +84,6 @@
 #define iwe_stream_add_point_rsl(info, start, stop, iwe, p)	\
 	iwe_stream_add_point(info, start, stop, iwe, p)
 
-#define usb_alloc_urb_rsl(x, y) usb_alloc_urb(x, y)
-#define usb_submit_urb_rsl(x, y) usb_submit_urb(x, y)
-
 static inline void *netdev_priv_rsl(struct net_device *dev)
 {
 	return netdev_priv(dev);
@@ -110,27 +107,14 @@
 #define HIGH_QUEUE			     7
 #define BEACON_QUEUE			   8
 
-#define LOW_QUEUE			      BE_QUEUE
-#define NORMAL_QUEUE			   MGNT_QUEUE
-
 #ifndef IW_MODE_MESH
 #define IW_MODE_MESH			7
 #endif
-#define AMSDU_SUBHEADER_LEN 14
-#define SWRF_TIMEOUT				50
 
 #define IE_CISCO_FLAG_POSITION		0x08
 #define SUPPORT_CKIP_MIC			0x08
 #define SUPPORT_CKIP_PK			0x10
-#define	RT_RF_OFF_LEVL_ASPM			BIT0
-#define	RT_RF_OFF_LEVL_CLK_REQ		BIT1
-#define	RT_RF_OFF_LEVL_PCI_D3			BIT2
 #define	RT_RF_OFF_LEVL_HALT_NIC		BIT3
-#define	RT_RF_OFF_LEVL_FREE_FW		BIT4
-#define	RT_RF_OFF_LEVL_FW_32K		BIT5
-#define	RT_RF_PS_LEVEL_ALWAYS_ASPM	BIT6
-#define	RT_RF_LPS_DISALBE_2R			BIT30
-#define	RT_RF_LPS_LEVEL_ASPM			BIT31
 #define	RT_IN_PS_LEVEL(pPSC, _PS_FLAG)		\
 	((pPSC->CurPsLevel & _PS_FLAG) ? true : false)
 #define	RT_CLEAR_PS_LEVEL(pPSC, _PS_FLAG)	\
@@ -244,22 +228,6 @@
 #define MGN_MCS13	       0x8d
 #define MGN_MCS14	       0x8e
 #define MGN_MCS15	       0x8f
-#define	MGN_MCS0_SG			0x90
-#define	MGN_MCS1_SG			0x91
-#define	MGN_MCS2_SG			0x92
-#define	MGN_MCS3_SG			0x93
-#define	MGN_MCS4_SG			0x94
-#define	MGN_MCS5_SG			0x95
-#define	MGN_MCS6_SG			0x96
-#define	MGN_MCS7_SG			0x97
-#define	MGN_MCS8_SG			0x98
-#define	MGN_MCS9_SG			0x99
-#define	MGN_MCS10_SG		0x9a
-#define	MGN_MCS11_SG		0x9b
-#define	MGN_MCS12_SG		0x9c
-#define	MGN_MCS13_SG		0x9d
-#define	MGN_MCS14_SG		0x9e
-#define	MGN_MCS15_SG		0x9f
 
 enum hw_variables {
 	HW_VAR_ETHER_ADDR,
@@ -722,42 +690,13 @@
 	u8 dst_addr[ETH_ALEN];
 };
 
-struct rtllib_stats {
-	unsigned int tx_unicast_frames;
-	unsigned int tx_multicast_frames;
-	unsigned int tx_fragments;
-	unsigned int tx_unicast_octets;
-	unsigned int tx_multicast_octets;
-	unsigned int tx_deferred_transmissions;
-	unsigned int tx_single_retry_frames;
-	unsigned int tx_multiple_retry_frames;
-	unsigned int tx_retry_limit_exceeded;
-	unsigned int tx_discards;
-	unsigned int rx_unicast_frames;
-	unsigned int rx_multicast_frames;
-	unsigned int rx_fragments;
-	unsigned int rx_unicast_octets;
-	unsigned int rx_multicast_octets;
-	unsigned int rx_fcs_errors;
-	unsigned int rx_discards_no_buffer;
-	unsigned int tx_discards_wrong_sa;
-	unsigned int rx_discards_undecryptable;
-	unsigned int rx_message_in_msg_fragments;
-	unsigned int rx_message_in_bad_msg_fragments;
-};
-
 struct rtllib_device;
 
-#define SEC_KEY_1	 (1<<0)
-#define SEC_KEY_2	 (1<<1)
-#define SEC_KEY_3	 (1<<2)
-#define SEC_KEY_4	 (1<<3)
 #define SEC_ACTIVE_KEY    (1<<4)
 #define SEC_AUTH_MODE     (1<<5)
 #define SEC_UNICAST_GROUP (1<<6)
 #define SEC_LEVEL	 (1<<7)
 #define SEC_ENABLED       (1<<8)
-#define SEC_ENCRYPT       (1<<9)
 
 #define SEC_LEVEL_0      0 /* None */
 #define SEC_LEVEL_1      1 /* WEP 40 and 104 bit */
@@ -772,7 +711,6 @@
 
 #define WEP_KEY_LEN		13
 #define SCM_KEY_LEN		32
-#define SCM_TEMPORAL_KEY_LENGTH 16
 
 struct rtllib_security {
 	u16 active_key:2,
@@ -1187,8 +1125,6 @@
 #define WME_AC_BE   0x01
 #define WME_AC_VI   0x02
 #define WME_AC_VO   0x03
-#define WME_ACI_MASK 0x03
-#define WME_AIFSN_MASK 0x03
 #define WME_AC_PRAM_LEN 16
 
 #define MAX_RECEIVE_BUFFER_SIZE 9100
@@ -1204,12 +1140,6 @@
 #define ETHERNET_HEADER_SIZE    14      /* length of two Ethernet address
 					 * plus ether type*/
 
-struct	ether_header {
-	u8 ether_dhost[ETHER_ADDR_LEN];
-	u8 ether_shost[ETHER_ADDR_LEN];
-	u16 ether_type;
-} __packed;
-
 enum erp_t {
 	ERP_NonERPpresent	= 0x01,
 	ERP_UseProtection	= 0x02,
@@ -1591,7 +1521,6 @@
 
 	/* Bookkeeping structures */
 	struct net_device_stats stats;
-	struct rtllib_stats ieee_stats;
 	struct rtllib_softmac_stats softmac_stats;
 
 	/* Probe / Beacon management */
@@ -1673,7 +1602,6 @@
 	int short_slot;
 	int mode;       /* A, B, G */
 	int modulation; /* CCK, OFDM */
-	int freq_band;  /* 2.4Ghz, 5.2Ghz, Mixed */
 
 	/* used for forcing the ibss workqueue to terminate
 	 * without wait for the syncro scan to terminate
@@ -2056,244 +1984,199 @@
 
 
 /* rtllib.c */
-extern void free_rtllib(struct net_device *dev);
-extern struct net_device *alloc_rtllib(int sizeof_priv);
+void free_rtllib(struct net_device *dev);
+struct net_device *alloc_rtllib(int sizeof_priv);
 
 /* rtllib_tx.c */
 
-extern int rtllib_encrypt_fragment(
+int rtllib_encrypt_fragment(
 	struct rtllib_device *ieee,
 	struct sk_buff *frag,
 	int hdr_len);
 
-extern int rtllib_xmit(struct sk_buff *skb,  struct net_device *dev);
-extern int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev);
-extern void rtllib_txb_free(struct rtllib_txb *);
+int rtllib_xmit(struct sk_buff *skb,  struct net_device *dev);
+void rtllib_txb_free(struct rtllib_txb *);
 
 /* rtllib_rx.c */
-extern int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
-			struct rtllib_rx_stats *rx_stats);
-extern void rtllib_rx_mgt(struct rtllib_device *ieee,
-			     struct sk_buff *skb,
-			     struct rtllib_rx_stats *stats);
-extern void rtllib_rx_probe_rq(struct rtllib_device *ieee,
-			   struct sk_buff *skb);
-extern int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel);
+int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
+	      struct rtllib_rx_stats *rx_stats);
+void rtllib_rx_probe_rq(struct rtllib_device *ieee,
+			struct sk_buff *skb);
+int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel);
 
 /* rtllib_wx.c */
-extern int rtllib_wx_get_scan(struct rtllib_device *ieee,
-				 struct iw_request_info *info,
-				 union iwreq_data *wrqu, char *key);
-extern int rtllib_wx_set_encode(struct rtllib_device *ieee,
-				   struct iw_request_info *info,
-				   union iwreq_data *wrqu, char *key);
-extern int rtllib_wx_get_encode(struct rtllib_device *ieee,
-				   struct iw_request_info *info,
-				   union iwreq_data *wrqu, char *key);
-extern int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
-			    struct iw_request_info *info,
-			    union iwreq_data *wrqu, char *extra);
-extern int rtllib_wx_set_auth(struct rtllib_device *ieee,
-			       struct iw_request_info *info,
-			       struct iw_param *data, char *extra);
-extern int rtllib_wx_set_mlme(struct rtllib_device *ieee,
-			       struct iw_request_info *info,
-			       union iwreq_data *wrqu, char *extra);
-extern int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len);
+int rtllib_wx_get_scan(struct rtllib_device *ieee,
+		       struct iw_request_info *info,
+		       union iwreq_data *wrqu, char *key);
+int rtllib_wx_set_encode(struct rtllib_device *ieee,
+			 struct iw_request_info *info,
+			 union iwreq_data *wrqu, char *key);
+int rtllib_wx_get_encode(struct rtllib_device *ieee,
+			 struct iw_request_info *info,
+			 union iwreq_data *wrqu, char *key);
+int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
+			     struct iw_request_info *info,
+			     union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_auth(struct rtllib_device *ieee,
+		       struct iw_request_info *info,
+		       struct iw_param *data, char *extra);
+int rtllib_wx_set_mlme(struct rtllib_device *ieee,
+		       struct iw_request_info *info,
+		       union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len);
 
 /* rtllib_softmac.c */
-extern short rtllib_is_54g(struct rtllib_network *net);
-extern int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
-				   struct sk_buff *skb,
-				   struct rtllib_rx_stats *rx_stats, u16 type,
-				   u16 stype);
-extern void rtllib_softmac_new_net(struct rtllib_device *ieee,
-				   struct rtllib_network *net);
+int rtllib_rx_frame_softmac(struct rtllib_device *ieee, struct sk_buff *skb,
+			    struct rtllib_rx_stats *rx_stats, u16 type,
+			    u16 stype);
+void rtllib_softmac_new_net(struct rtllib_device *ieee,
+			    struct rtllib_network *net);
 
 void SendDisassociation(struct rtllib_device *ieee, bool deauth, u16 asRsn);
-extern void rtllib_softmac_xmit(struct rtllib_txb *txb,
-				struct rtllib_device *ieee);
+void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee);
 
-extern void rtllib_stop_send_beacons(struct rtllib_device *ieee);
-extern void notify_wx_assoc_event(struct rtllib_device *ieee);
-extern void rtllib_softmac_check_all_nets(struct rtllib_device *ieee);
-extern void rtllib_start_bss(struct rtllib_device *ieee);
-extern void rtllib_start_master_bss(struct rtllib_device *ieee);
-extern void rtllib_start_ibss(struct rtllib_device *ieee);
-extern void rtllib_softmac_init(struct rtllib_device *ieee);
-extern void rtllib_softmac_free(struct rtllib_device *ieee);
-extern void rtllib_associate_abort(struct rtllib_device *ieee);
-extern void rtllib_disassociate(struct rtllib_device *ieee);
-extern void rtllib_stop_scan(struct rtllib_device *ieee);
-extern bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan);
-extern void rtllib_stop_scan_syncro(struct rtllib_device *ieee);
-extern void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
-extern void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee,
-					  short pwr);
-extern void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl);
-extern void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee);
-extern void rtllib_start_protocol(struct rtllib_device *ieee);
-extern void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown);
+void rtllib_stop_send_beacons(struct rtllib_device *ieee);
+void notify_wx_assoc_event(struct rtllib_device *ieee);
+void rtllib_start_ibss(struct rtllib_device *ieee);
+void rtllib_softmac_init(struct rtllib_device *ieee);
+void rtllib_softmac_free(struct rtllib_device *ieee);
+void rtllib_disassociate(struct rtllib_device *ieee);
+void rtllib_stop_scan(struct rtllib_device *ieee);
+bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan);
+void rtllib_stop_scan_syncro(struct rtllib_device *ieee);
+void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
+void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee, short pwr);
+void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee);
+void rtllib_start_protocol(struct rtllib_device *ieee);
+void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown);
 
-extern void rtllib_EnableNetMonitorMode(struct net_device *dev,
+void rtllib_EnableNetMonitorMode(struct net_device *dev, bool bInitState);
+void rtllib_DisableNetMonitorMode(struct net_device *dev, bool bInitState);
+void rtllib_EnableIntelPromiscuousMode(struct net_device *dev, bool bInitState);
+void rtllib_DisableIntelPromiscuousMode(struct net_device *dev,
 					bool bInitState);
-extern void rtllib_DisableNetMonitorMode(struct net_device *dev,
-					 bool bInitState);
-extern void rtllib_EnableIntelPromiscuousMode(struct net_device *dev,
-					      bool bInitState);
-extern void rtllib_DisableIntelPromiscuousMode(struct net_device *dev,
-					       bool bInitState);
-extern void rtllib_send_probe_requests(struct rtllib_device *ieee, u8 is_mesh);
+void rtllib_softmac_stop_protocol(struct rtllib_device *ieee,
+				  u8 mesh_flag, u8 shutdown);
+void rtllib_softmac_start_protocol(struct rtllib_device *ieee, u8 mesh_flag);
 
-extern void rtllib_softmac_stop_protocol(struct rtllib_device *ieee,
-					 u8 mesh_flag, u8 shutdown);
-extern void rtllib_softmac_start_protocol(struct rtllib_device *ieee,
-					  u8 mesh_flag);
+void rtllib_reset_queue(struct rtllib_device *ieee);
+void rtllib_wake_all_queues(struct rtllib_device *ieee);
+void rtllib_stop_all_queues(struct rtllib_device *ieee);
+struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee);
+void rtllib_start_send_beacons(struct rtllib_device *ieee);
+void rtllib_stop_send_beacons(struct rtllib_device *ieee);
+int rtllib_wpa_supplicant_ioctl(struct rtllib_device *ieee,
+				struct iw_point *p, u8 is_mesh);
 
-extern void rtllib_reset_queue(struct rtllib_device *ieee);
-extern void rtllib_wake_all_queues(struct rtllib_device *ieee);
-extern void rtllib_stop_all_queues(struct rtllib_device *ieee);
-extern struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee);
-extern void rtllib_start_send_beacons(struct rtllib_device *ieee);
-extern void rtllib_stop_send_beacons(struct rtllib_device *ieee);
-extern int rtllib_wpa_supplicant_ioctl(struct rtllib_device *ieee,
-				       struct iw_point *p, u8 is_mesh);
+void notify_wx_assoc_event(struct rtllib_device *ieee);
+void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success);
 
-extern void notify_wx_assoc_event(struct rtllib_device *ieee);
-extern void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success);
-
-extern void softmac_mgmt_xmit(struct sk_buff *skb,
-			      struct rtllib_device *ieee);
-extern u16 rtllib_query_seqnum(struct rtllib_device *ieee,
-			       struct sk_buff *skb, u8 *dst);
-extern u8 rtllib_ap_sec_type(struct rtllib_device *ieee);
+void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee);
+u8 rtllib_ap_sec_type(struct rtllib_device *ieee);
 
 /* rtllib_softmac_wx.c */
 
-extern int rtllib_wx_get_wap(struct rtllib_device *ieee,
-			     struct iw_request_info *info,
-			     union iwreq_data *wrqu, char *ext);
+int rtllib_wx_get_wap(struct rtllib_device *ieee, struct iw_request_info *info,
+		      union iwreq_data *wrqu, char *ext);
 
-extern int rtllib_wx_set_wap(struct rtllib_device *ieee,
-			     struct iw_request_info *info,
-			     union iwreq_data *awrq,
-			     char *extra);
+int rtllib_wx_set_wap(struct rtllib_device *ieee, struct iw_request_info *info,
+		      union iwreq_data *awrq, char *extra);
 
-extern int rtllib_wx_get_essid(struct rtllib_device *ieee,
-			       struct iw_request_info *a,
-			       union iwreq_data *wrqu, char *b);
+int rtllib_wx_get_essid(struct rtllib_device *ieee, struct iw_request_info *a,
+			union iwreq_data *wrqu, char *b);
 
-extern int rtllib_wx_set_rate(struct rtllib_device *ieee,
-			      struct iw_request_info *info,
-			      union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_rate(struct rtllib_device *ieee, struct iw_request_info *info,
+		       union iwreq_data *wrqu, char *extra);
 
-extern int rtllib_wx_get_rate(struct rtllib_device *ieee,
-			      struct iw_request_info *info,
-			      union iwreq_data *wrqu, char *extra);
+int rtllib_wx_get_rate(struct rtllib_device *ieee, struct iw_request_info *info,
+		       union iwreq_data *wrqu, char *extra);
 
-extern int rtllib_wx_set_mode(struct rtllib_device *ieee,
-			      struct iw_request_info *a,
-			      union iwreq_data *wrqu, char *b);
+int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a,
+		       union iwreq_data *wrqu, char *b);
 
-extern int rtllib_wx_set_scan(struct rtllib_device *ieee,
-			      struct iw_request_info *a,
-			      union iwreq_data *wrqu, char *b);
+int rtllib_wx_set_scan(struct rtllib_device *ieee, struct iw_request_info *a,
+		       union iwreq_data *wrqu, char *b);
 
-extern int rtllib_wx_set_essid(struct rtllib_device *ieee,
-			      struct iw_request_info *a,
-			      union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_essid(struct rtllib_device *ieee, struct iw_request_info *a,
+			union iwreq_data *wrqu, char *extra);
 
-extern int rtllib_wx_get_mode(struct rtllib_device *ieee,
-			      struct iw_request_info *a,
-			      union iwreq_data *wrqu, char *b);
+int rtllib_wx_get_mode(struct rtllib_device *ieee, struct iw_request_info *a,
+		       union iwreq_data *wrqu, char *b);
 
-extern int rtllib_wx_set_freq(struct rtllib_device *ieee,
-			      struct iw_request_info *a,
-			      union iwreq_data *wrqu, char *b);
+int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
+		       union iwreq_data *wrqu, char *b);
 
-extern int rtllib_wx_get_freq(struct rtllib_device *ieee,
-			      struct iw_request_info *a,
-			      union iwreq_data *wrqu, char *b);
-extern void rtllib_wx_sync_scan_wq(void *data);
+int rtllib_wx_get_freq(struct rtllib_device *ieee, struct iw_request_info *a,
+		       union iwreq_data *wrqu, char *b);
+void rtllib_wx_sync_scan_wq(void *data);
 
-extern int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
-			       struct iw_request_info *info,
-			       union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
+			struct iw_request_info *info,
+			union iwreq_data *wrqu, char *extra);
 
-extern int rtllib_wx_get_name(struct rtllib_device *ieee,
-			     struct iw_request_info *info,
-			     union iwreq_data *wrqu, char *extra);
+int rtllib_wx_get_name(struct rtllib_device *ieee, struct iw_request_info *info,
+		       union iwreq_data *wrqu, char *extra);
 
-extern int rtllib_wx_set_power(struct rtllib_device *ieee,
-				 struct iw_request_info *info,
-				 union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_power(struct rtllib_device *ieee,
+			struct iw_request_info *info,
+			union iwreq_data *wrqu, char *extra);
 
-extern int rtllib_wx_get_power(struct rtllib_device *ieee,
-				 struct iw_request_info *info,
-				 union iwreq_data *wrqu, char *extra);
+int rtllib_wx_get_power(struct rtllib_device *ieee,
+			struct iw_request_info *info,
+			union iwreq_data *wrqu, char *extra);
 
-extern int rtllib_wx_set_rts(struct rtllib_device *ieee,
-			     struct iw_request_info *info,
-			     union iwreq_data *wrqu, char *extra);
+int rtllib_wx_set_rts(struct rtllib_device *ieee, struct iw_request_info *info,
+		      union iwreq_data *wrqu, char *extra);
 
-extern int rtllib_wx_get_rts(struct rtllib_device *ieee,
-			     struct iw_request_info *info,
-			     union iwreq_data *wrqu, char *extra);
+int rtllib_wx_get_rts(struct rtllib_device *ieee, struct iw_request_info *info,
+		      union iwreq_data *wrqu, char *extra);
 #define MAX_RECEIVE_BUFFER_SIZE 9100
 
 void HTSetConnectBwMode(struct rtllib_device *ieee,
 			enum ht_channel_width Bandwidth,
 			enum ht_extchnl_offset Offset);
-extern void HTUpdateDefaultSetting(struct rtllib_device *ieee);
-extern void HTConstructCapabilityElement(struct rtllib_device *ieee,
-					 u8 *posHTCap, u8 *len,
-					 u8 isEncrypt, bool bAssoc);
-extern void HTConstructInfoElement(struct rtllib_device *ieee,
-				   u8 *posHTInfo, u8 *len, u8 isEncrypt);
-extern void HTConstructRT2RTAggElement(struct rtllib_device *ieee,
-				       u8 *posRT2RTAgg, u8 *len);
-extern void HTOnAssocRsp(struct rtllib_device *ieee);
-extern void HTInitializeHTInfo(struct rtllib_device *ieee);
-extern void HTInitializeBssDesc(struct bss_ht *pBssHT);
-extern void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
-					  struct rtllib_network *pNetwork);
-extern void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
-					    struct rtllib_network *pNetwork);
-extern u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
-			      u8 *pMCSFilter);
+void HTUpdateDefaultSetting(struct rtllib_device *ieee);
+void HTConstructCapabilityElement(struct rtllib_device *ieee,
+				  u8 *posHTCap, u8 *len,
+				  u8 isEncrypt, bool bAssoc);
+void HTConstructInfoElement(struct rtllib_device *ieee,
+			    u8 *posHTInfo, u8 *len, u8 isEncrypt);
+void HTConstructRT2RTAggElement(struct rtllib_device *ieee,
+				u8 *posRT2RTAgg, u8 *len);
+void HTOnAssocRsp(struct rtllib_device *ieee);
+void HTInitializeHTInfo(struct rtllib_device *ieee);
+void HTInitializeBssDesc(struct bss_ht *pBssHT);
+void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
+				   struct rtllib_network *pNetwork);
+void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
+				     struct rtllib_network *pNetwork);
+u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
+		       u8 *pMCSFilter);
 extern u8 MCS_FILTER_ALL[];
 extern u16 MCS_DATA_RATE[2][2][77];
-extern u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame);
-extern void HTResetIOTSetting(struct rt_hi_throughput *pHTInfo);
-extern bool IsHTHalfNmodeAPs(struct rtllib_device *ieee);
-extern u16 HTMcsToDataRate(struct rtllib_device *ieee, u8 nMcsRate);
-extern u16  TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate);
-extern int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb);
-extern int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb);
-extern int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb);
-extern void TsInitAddBA(struct rtllib_device *ieee, struct tx_ts_record *pTS,
-			u8 Policy, u8 bOverwritePending);
-extern void TsInitDelBA(struct rtllib_device *ieee,
-			struct ts_common_info *pTsCommonInfo,
-			enum tr_select TxRxSelect);
-extern void BaSetupTimeOut(unsigned long data);
-extern void TxBaInactTimeout(unsigned long data);
-extern void RxBaInactTimeout(unsigned long data);
-extern void ResetBaEntry(struct ba_record *pBA);
-extern bool GetTs(
-	struct rtllib_device *ieee,
-	struct ts_common_info **ppTS,
-	u8 *Addr,
-	u8 TID,
-	enum tr_select TxRxSelect,
-	bool bAddNewTs
-);
-extern void TSInitialize(struct rtllib_device *ieee);
-extern  void TsStartAddBaProcess(struct rtllib_device *ieee,
-				  struct tx_ts_record *pTxTS);
-extern void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr);
-extern void RemoveAllTS(struct rtllib_device *ieee);
-void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
+u8 HTCCheck(struct rtllib_device *ieee, u8 *pFrame);
+void HTResetIOTSetting(struct rt_hi_throughput *pHTInfo);
+bool IsHTHalfNmodeAPs(struct rtllib_device *ieee);
+u16  TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate);
+int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb);
+int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb);
+int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb);
+void TsInitAddBA(struct rtllib_device *ieee, struct tx_ts_record *pTS,
+		 u8 Policy, u8 bOverwritePending);
+void TsInitDelBA(struct rtllib_device *ieee,
+		 struct ts_common_info *pTsCommonInfo,
+		 enum tr_select TxRxSelect);
+void BaSetupTimeOut(unsigned long data);
+void TxBaInactTimeout(unsigned long data);
+void RxBaInactTimeout(unsigned long data);
+void ResetBaEntry(struct ba_record *pBA);
+bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, u8 *Addr,
+	   u8 TID, enum tr_select TxRxSelect, bool bAddNewTs);
+void TSInitialize(struct rtllib_device *ieee);
+void TsStartAddBaProcess(struct rtllib_device *ieee,
+			 struct tx_ts_record *pTxTS);
+void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr);
+void RemoveAllTS(struct rtllib_device *ieee);
 
 extern const long rtllib_wlan_frequencies[];
 
@@ -2317,23 +2200,19 @@
 /* For the function is more related to hardware setting, it's better to use the
  * ieee handler to refer to it.
  */
-extern void rtllib_update_active_chan_map(struct rtllib_device *ieee);
-extern void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
-					struct rx_ts_record *pTS);
-extern int rtllib_parse_info_param(struct rtllib_device *ieee,
-		struct rtllib_info_element *info_element,
-		u16 length,
-		struct rtllib_network *network,
-		struct rtllib_rx_stats *stats);
+void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
+				 struct rx_ts_record *pTS);
+int rtllib_parse_info_param(struct rtllib_device *ieee,
+			    struct rtllib_info_element *info_element,
+			    u16 length,
+			    struct rtllib_network *network,
+			    struct rtllib_rx_stats *stats);
 
 void rtllib_indicate_packets(struct rtllib_device *ieee,
 			     struct rtllib_rxb **prxbIndicateArray, u8  index);
-extern u8 HTFilterMCSRate(struct rtllib_device *ieee, u8 *pSupportMCS,
-			  u8 *pOperateMCS);
-extern void HTUseDefaultSetting(struct rtllib_device *ieee);
+void HTUseDefaultSetting(struct rtllib_device *ieee);
 #define RT_ASOC_RETRY_LIMIT	5
 u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee);
-extern void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p);
 #define SEM_DOWN_IEEE_WX(psem) down(psem)
 #define SEM_UP_IEEE_WX(psem) up(psem)
 
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
index 42e88d6..17c276d 100644
--- a/drivers/staging/rtl8192e/rtllib_debug.h
+++ b/drivers/staging/rtl8192e/rtllib_debug.h
@@ -30,8 +30,6 @@
 #define DRV_NAME "rtllib_92e"
 #endif
 
-#define DMESG(x, a...)
-
 extern u32 rt_global_debug_component;
 
 /* These are the defines for rt_global_debug_component */
@@ -40,10 +38,7 @@
 	COMP_DBG		= (1 << 1),
 	COMP_INIT		= (1 << 2),
 	COMP_RECV		= (1 << 3),
-	COMP_SEND		= (1 << 4),
-	COMP_CMD		= (1 << 5),
 	COMP_POWER		= (1 << 6),
-	COMP_EPROM		= (1 << 7),
 	COMP_SWBW		= (1 << 8),
 	COMP_SEC		= (1 << 9),
 	COMP_LPS		= (1 << 10),
@@ -58,15 +53,12 @@
 	COMP_CH			= (1 << 19),
 	COMP_RF			= (1 << 20),
 	COMP_FIRMWARE		= (1 << 21),
-	COMP_HT			= (1 << 22),
 	COMP_RESET		= (1 << 23),
 	COMP_CMDPKT		= (1 << 24),
 	COMP_SCAN		= (1 << 25),
 	COMP_PS			= (1 << 26),
 	COMP_DOWN		= (1 << 27),
 	COMP_INTR		= (1 << 28),
-	COMP_LED		= (1 << 29),
-	COMP_MLME		= (1 << 30),
 	COMP_ERR		= (1 << 31)
 };
 
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index da862c3d..09f0820 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -44,6 +44,9 @@
 #include "rtllib.h"
 #include "dot11d.h"
 
+static void rtllib_rx_mgt(struct rtllib_device *ieee, struct sk_buff *skb,
+			  struct rtllib_rx_stats *stats);
+
 static inline void rtllib_monitor_rx(struct rtllib_device *ieee,
 				     struct sk_buff *skb,
 				     struct rtllib_rx_stats *rx_status,
@@ -317,7 +320,6 @@
 			netdev_dbg(ieee->dev,
 				   "Decryption failed ICV mismatch (key %d)\n",
 				   skb->data[hdrlen + 3] >> 6);
-		ieee->ieee_stats.rx_discards_undecryptable++;
 		return -1;
 	}
 
@@ -1077,7 +1079,6 @@
 			netdev_dbg(ieee->dev,
 				   "Decryption failed (not set) (SA= %pM)\n",
 				   hdr->addr2);
-			ieee->ieee_stats.rx_discards_undecryptable++;
 			return -1;
 		}
 	}
@@ -1743,37 +1744,61 @@
 	return rc;
 }
 
-#define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x
-
 static const char *get_info_element_string(u16 id)
 {
 	switch (id) {
-	MFIE_STRING(SSID);
-	MFIE_STRING(RATES);
-	MFIE_STRING(FH_SET);
-	MFIE_STRING(DS_SET);
-	MFIE_STRING(CF_SET);
-	MFIE_STRING(TIM);
-	MFIE_STRING(IBSS_SET);
-	MFIE_STRING(COUNTRY);
-	MFIE_STRING(HOP_PARAMS);
-	MFIE_STRING(HOP_TABLE);
-	MFIE_STRING(REQUEST);
-	MFIE_STRING(CHALLENGE);
-	MFIE_STRING(POWER_CONSTRAINT);
-	MFIE_STRING(POWER_CAPABILITY);
-	MFIE_STRING(TPC_REQUEST);
-	MFIE_STRING(TPC_REPORT);
-	MFIE_STRING(SUPP_CHANNELS);
-	MFIE_STRING(CSA);
-	MFIE_STRING(MEASURE_REQUEST);
-	MFIE_STRING(MEASURE_REPORT);
-	MFIE_STRING(QUIET);
-	MFIE_STRING(IBSS_DFS);
-	MFIE_STRING(RSN);
-	MFIE_STRING(RATES_EX);
-	MFIE_STRING(GENERIC);
-	MFIE_STRING(QOS_PARAMETER);
+	case MFIE_TYPE_SSID:
+		return "SSID";
+	case MFIE_TYPE_RATES:
+		return "RATES";
+	case MFIE_TYPE_FH_SET:
+		return "FH_SET";
+	case MFIE_TYPE_DS_SET:
+		return "DS_SET";
+	case MFIE_TYPE_CF_SET:
+		return "CF_SET";
+	case MFIE_TYPE_TIM:
+		return "TIM";
+	case MFIE_TYPE_IBSS_SET:
+		return "IBSS_SET";
+	case MFIE_TYPE_COUNTRY:
+		return "COUNTRY";
+	case MFIE_TYPE_HOP_PARAMS:
+		return "HOP_PARAMS";
+	case MFIE_TYPE_HOP_TABLE:
+		return "HOP_TABLE";
+	case MFIE_TYPE_REQUEST:
+		return "REQUEST";
+	case MFIE_TYPE_CHALLENGE:
+		return "CHALLENGE";
+	case MFIE_TYPE_POWER_CONSTRAINT:
+		return "POWER_CONSTRAINT";
+	case MFIE_TYPE_POWER_CAPABILITY:
+		return "POWER_CAPABILITY";
+	case MFIE_TYPE_TPC_REQUEST:
+		return "TPC_REQUEST";
+	case MFIE_TYPE_TPC_REPORT:
+		return "TPC_REPORT";
+	case MFIE_TYPE_SUPP_CHANNELS:
+		return "SUPP_CHANNELS";
+	case MFIE_TYPE_CSA:
+		return "CSA";
+	case MFIE_TYPE_MEASURE_REQUEST:
+		return "MEASURE_REQUEST";
+	case MFIE_TYPE_MEASURE_REPORT:
+		return "MEASURE_REPORT";
+	case MFIE_TYPE_QUIET:
+		return "QUIET";
+	case MFIE_TYPE_IBSS_DFS:
+		return "IBSS_DFS";
+	case MFIE_TYPE_RSN:
+		return "RSN";
+	case MFIE_TYPE_RATES_EX:
+		return "RATES_EX";
+	case MFIE_TYPE_GENERIC:
+		return "GENERIC";
+	case MFIE_TYPE_QOS_PARAMETER:
+		return "QOS_PARAMETER";
 	default:
 		return "UNKNOWN";
 	}
@@ -2717,9 +2742,9 @@
 	kfree(network);
 }
 
-void rtllib_rx_mgt(struct rtllib_device *ieee,
-		      struct sk_buff *skb,
-		      struct rtllib_rx_stats *stats)
+static void rtllib_rx_mgt(struct rtllib_device *ieee,
+			  struct sk_buff *skb,
+			  struct rtllib_rx_stats *stats)
 {
 	struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data;
 
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index d320c31..1503cbb 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -23,7 +23,10 @@
 #include <linux/ieee80211.h>
 #include "dot11d.h"
 
-short rtllib_is_54g(struct rtllib_network *net)
+static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl);
+
+
+static short rtllib_is_54g(struct rtllib_network *net)
 {
 	return (net->rates_ex_len > 0) || (net->rates_len > 4);
 }
@@ -107,7 +110,7 @@
 	*tag_p = tag;
 }
 
-void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p)
+static void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p)
 {
 	u8 *tag = *tag_p;
 
@@ -369,7 +372,7 @@
 	return skb;
 }
 
-struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee);
+static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee);
 
 static void rtllib_send_beacon(struct rtllib_device *ieee)
 {
@@ -483,7 +486,7 @@
 }
 
 
-void rtllib_send_probe_requests(struct rtllib_device *ieee, u8 is_mesh)
+static void rtllib_send_probe_requests(struct rtllib_device *ieee, u8 is_mesh)
 {
 	if (ieee->active_scan && (ieee->softmac_features &
 	    IEEE_SOFTMAC_PROBERQ)) {
@@ -492,7 +495,7 @@
 	}
 }
 
-void rtllib_update_active_chan_map(struct rtllib_device *ieee)
+static void rtllib_update_active_chan_map(struct rtllib_device *ieee)
 {
 	memcpy(ieee->active_channel_map, GET_DOT11D_INFO(ieee)->channel_map,
 	       MAX_CHANNEL_NUMBER+1);
@@ -501,7 +504,7 @@
 /* this performs syncro scan blocking the caller until all channels
  * in the allowed channel map has been checked.
  */
-void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
+static void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
 {
 	union iwreq_data wrqu;
 	short ch = 0;
@@ -1401,7 +1404,7 @@
 	return skb;
 }
 
-void rtllib_associate_abort(struct rtllib_device *ieee)
+static void rtllib_associate_abort(struct rtllib_device *ieee)
 {
 	unsigned long flags;
 
@@ -1511,7 +1514,6 @@
 	}
 }
 
-#define CANCELLED  2
 static void rtllib_associate_complete_wq(void *data)
 {
 	struct rtllib_device *ieee = (struct rtllib_device *)
@@ -1753,7 +1755,7 @@
 	}
 }
 
-void rtllib_softmac_check_all_nets(struct rtllib_device *ieee)
+static void rtllib_softmac_check_all_nets(struct rtllib_device *ieee)
 {
 	unsigned long flags;
 	struct rtllib_network *target;
@@ -2109,7 +2111,7 @@
 
 }
 
-void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl)
+static void rtllib_sta_wakeup(struct rtllib_device *ieee, short nl)
 {
 	if (ieee->sta_sleep == LPS_IS_WAKE) {
 		if (nl) {
@@ -2545,7 +2547,7 @@
 }
 
 /* called in user context only */
-void rtllib_start_master_bss(struct rtllib_device *ieee)
+static void rtllib_start_master_bss(struct rtllib_device *ieee)
 {
 	ieee->assoc_id = 1;
 
@@ -2720,7 +2722,7 @@
 }
 
 /* this is called only in user context, with wx_sem held */
-void rtllib_start_bss(struct rtllib_device *ieee)
+static void rtllib_start_bss(struct rtllib_device *ieee)
 {
 	unsigned long flags;
 
@@ -2817,7 +2819,7 @@
 	up(&ieee->wx_sem);
 }
 
-struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
+static struct sk_buff *rtllib_get_beacon_(struct rtllib_device *ieee)
 {
 	const u8 broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 
@@ -3084,7 +3086,7 @@
 	 */
 	netdev_info(ieee->dev, "%s WPA\n", value ? "enabling" : "disabling");
 	ieee->wpa_enabled = value;
-	memset(ieee->ap_mac_addr, 0, 6);
+	eth_zero_addr(ieee->ap_mac_addr);
 	return 0;
 }
 
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index e99ea5e..b992e46 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -151,7 +151,7 @@
 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
 
-inline int rtllib_put_snap(u8 *data, u16 h_proto)
+static int rtllib_put_snap(u8 *data, u16 h_proto)
 {
 	struct rtllib_snap_hdr *snap;
 	u8 *oui;
@@ -205,7 +205,6 @@
 	if (res < 0) {
 		netdev_info(ieee->dev, "%s: Encryption failed: len=%d.\n",
 			    ieee->dev->name, frag->len);
-		ieee->ieee_stats.tx_discards++;
 		return -1;
 	}
 
@@ -515,8 +514,8 @@
 	}
 }
 
-u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
-			u8 *dst)
+static u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
+			       u8 *dst)
 {
 	u16 seqnum = 0;
 
@@ -566,7 +565,7 @@
 		return ieee->rate & 0x7F;
 }
 
-int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
+static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
 {
 	struct rtllib_device *ieee = (struct rtllib_device *)
 				     netdev_priv_rsl(dev);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index 23af2aa..d481a26 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -2169,98 +2169,99 @@
 
 
 /* ieee80211.c */
-extern void free_ieee80211(struct net_device *dev);
-extern struct net_device *alloc_ieee80211(int sizeof_priv);
+void free_ieee80211(struct net_device *dev);
+struct net_device *alloc_ieee80211(int sizeof_priv);
 
-extern int ieee80211_set_encryption(struct ieee80211_device *ieee);
+int ieee80211_set_encryption(struct ieee80211_device *ieee);
 
 /* ieee80211_tx.c */
 
-extern int ieee80211_encrypt_fragment(
-	struct ieee80211_device *ieee,
-	struct sk_buff *frag,
-	int hdr_len);
+int ieee80211_encrypt_fragment(struct ieee80211_device *ieee,
+			       struct sk_buff *frag, int hdr_len);
 
-extern int ieee80211_xmit(struct sk_buff *skb,
-			  struct net_device *dev);
-extern void ieee80211_txb_free(struct ieee80211_txb *);
+int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev);
+void ieee80211_txb_free(struct ieee80211_txb *);
 
 
 /* ieee80211_rx.c */
-extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
-			struct ieee80211_rx_stats *rx_stats);
-extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
-			     struct rtl_80211_hdr_4addr *header,
-			     struct ieee80211_rx_stats *stats);
+int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+		 struct ieee80211_rx_stats *rx_stats);
+void ieee80211_rx_mgt(struct ieee80211_device *ieee,
+		      struct rtl_80211_hdr_4addr *header,
+		      struct ieee80211_rx_stats *stats);
 
 /* ieee80211_wx.c */
-extern int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
-				 struct iw_request_info *info,
-				 union iwreq_data *wrqu, char *key);
-extern int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
-				   struct iw_request_info *info,
-				   union iwreq_data *wrqu, char *key);
-extern int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
-				   struct iw_request_info *info,
-				   union iwreq_data *wrqu, char *key);
-extern int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
+int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *key);
+int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
 			    struct iw_request_info *info,
-			    union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
+			    union iwreq_data *wrqu, char *key);
+int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
 			    struct iw_request_info *info,
-			    union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
-			       struct iw_request_info *info,
-			       struct iw_param *data, char *extra);
-extern int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
-			       struct iw_request_info *info,
-			       union iwreq_data *wrqu, char *extra);
-extern int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len);
+			    union iwreq_data *wrqu, char *key);
+int ieee80211_wx_get_encode_ext(struct ieee80211_device *ieee,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_auth(struct ieee80211_device *ieee,
+			  struct iw_request_info *info,
+			  struct iw_param *data, char *extra);
+int ieee80211_wx_set_mlme(struct ieee80211_device *ieee,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_gen_ie(struct ieee80211_device *ieee, u8 *ie, size_t len);
 
 /* ieee80211_softmac.c */
-extern short ieee80211_is_54g(const struct ieee80211_network *net);
-extern short ieee80211_is_shortslot(const struct ieee80211_network *net);
-extern int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
-			struct ieee80211_rx_stats *rx_stats, u16 type,
-			u16 stype);
-extern void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net);
+short ieee80211_is_54g(const struct ieee80211_network *net);
+short ieee80211_is_shortslot(const struct ieee80211_network *net);
+int ieee80211_rx_frame_softmac(struct ieee80211_device *ieee,
+			       struct sk_buff *skb,
+			       struct ieee80211_rx_stats *rx_stats,
+			       u16 type, u16 stype);
+void ieee80211_softmac_new_net(struct ieee80211_device *ieee,
+			       struct ieee80211_network *net);
 
 void SendDisassociation(struct ieee80211_device *ieee, u8 *asSta, u8 asRsn);
-extern void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee);
+void ieee80211_softmac_xmit(struct ieee80211_txb *txb,
+			    struct ieee80211_device *ieee);
 
-extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
-extern void notify_wx_assoc_event(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee);
-extern void ieee80211_start_bss(struct ieee80211_device *ieee);
-extern void ieee80211_start_master_bss(struct ieee80211_device *ieee);
-extern void ieee80211_start_ibss(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_init(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_free(struct ieee80211_device *ieee);
-extern void ieee80211_associate_abort(struct ieee80211_device *ieee);
-extern void ieee80211_disassociate(struct ieee80211_device *ieee);
-extern void ieee80211_stop_scan(struct ieee80211_device *ieee);
-extern void ieee80211_start_scan_syncro(struct ieee80211_device *ieee);
-extern void ieee80211_check_all_nets(struct ieee80211_device *ieee);
-extern void ieee80211_start_protocol(struct ieee80211_device *ieee);
-extern void ieee80211_stop_protocol(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
-extern void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
-extern void ieee80211_reset_queue(struct ieee80211_device *ieee);
-extern void ieee80211_wake_queue(struct ieee80211_device *ieee);
-extern void ieee80211_stop_queue(struct ieee80211_device *ieee);
-extern struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
-extern void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
-extern void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
-extern int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_point *p);
-extern void notify_wx_assoc_event(struct ieee80211_device *ieee);
-extern void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
+void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
+void notify_wx_assoc_event(struct ieee80211_device *ieee);
+void ieee80211_softmac_check_all_nets(struct ieee80211_device *ieee);
+void ieee80211_start_bss(struct ieee80211_device *ieee);
+void ieee80211_start_master_bss(struct ieee80211_device *ieee);
+void ieee80211_start_ibss(struct ieee80211_device *ieee);
+void ieee80211_softmac_init(struct ieee80211_device *ieee);
+void ieee80211_softmac_free(struct ieee80211_device *ieee);
+void ieee80211_associate_abort(struct ieee80211_device *ieee);
+void ieee80211_disassociate(struct ieee80211_device *ieee);
+void ieee80211_stop_scan(struct ieee80211_device *ieee);
+void ieee80211_start_scan_syncro(struct ieee80211_device *ieee);
+void ieee80211_check_all_nets(struct ieee80211_device *ieee);
+void ieee80211_start_protocol(struct ieee80211_device *ieee);
+void ieee80211_stop_protocol(struct ieee80211_device *ieee);
+void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee);
+void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee);
+void ieee80211_reset_queue(struct ieee80211_device *ieee);
+void ieee80211_wake_queue(struct ieee80211_device *ieee);
+void ieee80211_stop_queue(struct ieee80211_device *ieee);
+struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee);
+void ieee80211_start_send_beacons(struct ieee80211_device *ieee);
+void ieee80211_stop_send_beacons(struct ieee80211_device *ieee);
+int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee,
+				   struct iw_point *p);
+void notify_wx_assoc_event(struct ieee80211_device *ieee);
+void ieee80211_ps_tx_ack(struct ieee80211_device *ieee, short success);
 
-extern void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee);
+void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee);
 
 /* ieee80211_crypt_ccmp&tkip&wep.c */
-extern void ieee80211_tkip_null(void);
-extern void ieee80211_wep_null(void);
-extern void ieee80211_ccmp_null(void);
+void ieee80211_tkip_null(void);
+void ieee80211_wep_null(void);
+void ieee80211_ccmp_null(void);
 
 int ieee80211_crypto_init(void);
 void ieee80211_crypto_deinit(void);
@@ -2273,116 +2274,128 @@
 
 /* ieee80211_softmac_wx.c */
 
-extern int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
-			    struct iw_request_info *info,
-			    union iwreq_data *wrqu, char *ext);
+int ieee80211_wx_get_wap(struct ieee80211_device *ieee,
+			 struct iw_request_info *info,
+			 union iwreq_data *wrqu, char *ext);
 
-extern int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
+int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
 			 struct iw_request_info *info,
 			 union iwreq_data *awrq,
 			 char *extra);
 
-extern int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a,union iwreq_data *wrqu,char *b);
+int ieee80211_wx_get_essid(struct ieee80211_device *ieee,
+			   struct iw_request_info *a,
+			   union iwreq_data *wrqu, char *b);
 
-extern int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
-			     struct iw_request_info *info,
-			     union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_rate(struct ieee80211_device *ieee,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra);
 
-extern int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
-			     struct iw_request_info *info,
-			     union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_get_rate(struct ieee80211_device *ieee,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra);
 
-extern int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
-			     union iwreq_data *wrqu, char *b);
+int ieee80211_wx_set_mode(struct ieee80211_device *ieee,
+			  struct iw_request_info *a,
+			  union iwreq_data *wrqu, char *b);
 
-extern int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a,
-			     union iwreq_data *wrqu, char *b);
+int ieee80211_wx_set_scan(struct ieee80211_device *ieee,
+			  struct iw_request_info *a,
+			  union iwreq_data *wrqu, char *b);
 
-extern int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
-			      struct iw_request_info *a,
-			      union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
+			   struct iw_request_info *a,
+			   union iwreq_data *wrqu, char *extra);
 
-extern int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a,
-			     union iwreq_data *wrqu, char *b);
+int ieee80211_wx_get_mode(struct ieee80211_device *ieee,
+			  struct iw_request_info *a,
+			  union iwreq_data *wrqu, char *b);
 
-extern int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
-			     union iwreq_data *wrqu, char *b);
+int ieee80211_wx_set_freq(struct ieee80211_device *ieee,
+			  struct iw_request_info *a,
+			  union iwreq_data *wrqu, char *b);
 
-extern int ieee80211_wx_get_freq(struct ieee80211_device *ieee, struct iw_request_info *a,
-			     union iwreq_data *wrqu, char *b);
+int ieee80211_wx_get_freq(struct ieee80211_device *ieee,
+			  struct iw_request_info *a,
+			  union iwreq_data *wrqu, char *b);
 
 /* ieee80211_module.c */
-extern int ieee80211_debug_init(void);
-extern void ieee80211_debug_exit(void);
+int ieee80211_debug_init(void);
+void ieee80211_debug_exit(void);
 
 //extern void ieee80211_wx_sync_scan_wq(struct ieee80211_device *ieee);
-extern void ieee80211_wx_sync_scan_wq(struct work_struct *work);
+void ieee80211_wx_sync_scan_wq(struct work_struct *work);
 
 
-extern int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
-			       struct iw_request_info *info,
+int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
+			   struct iw_request_info *info,
 			       union iwreq_data *wrqu, char *extra);
 
-extern int ieee80211_wx_get_name(struct ieee80211_device *ieee,
-			     struct iw_request_info *info,
-			     union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_get_name(struct ieee80211_device *ieee,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra);
 
-extern int ieee80211_wx_set_power(struct ieee80211_device *ieee,
-				 struct iw_request_info *info,
-				 union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_power(struct ieee80211_device *ieee,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra);
 
-extern int ieee80211_wx_get_power(struct ieee80211_device *ieee,
-				 struct iw_request_info *info,
-				 union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_get_power(struct ieee80211_device *ieee,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra);
 
-extern int ieee80211_wx_set_rts(struct ieee80211_device *ieee,
-			     struct iw_request_info *info,
-			     union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_set_rts(struct ieee80211_device *ieee,
+			 struct iw_request_info *info,
+			 union iwreq_data *wrqu, char *extra);
 
-extern int ieee80211_wx_get_rts(struct ieee80211_device *ieee,
-			     struct iw_request_info *info,
-			     union iwreq_data *wrqu, char *extra);
+int ieee80211_wx_get_rts(struct ieee80211_device *ieee,
+			 struct iw_request_info *info,
+			 union iwreq_data *wrqu, char *extra);
 //HT
 #define MAX_RECEIVE_BUFFER_SIZE 9100  //
-extern void HTDebugHTCapability(u8 *CapIE, u8 *TitleString );
-extern void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString);
+void HTDebugHTCapability(u8 *CapIE, u8 *TitleString);
+void HTDebugHTInfo(u8 *InfoIE, u8 *TitleString);
 
-void HTSetConnectBwMode(struct ieee80211_device *ieee, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET    Offset);
-extern void HTUpdateDefaultSetting(struct ieee80211_device *ieee);
-extern void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap, u8 *len, u8 isEncrypt);
-extern void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo, u8 *len, u8 isEncrypt);
-extern void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg, u8 *len);
-extern void HTOnAssocRsp(struct ieee80211_device *ieee);
-extern void HTInitializeHTInfo(struct ieee80211_device *ieee);
-extern void HTInitializeBssDesc(PBSS_HT pBssHT);
-extern void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee, struct ieee80211_network *pNetwork);
-extern void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee,   struct ieee80211_network *pNetwork);
-extern u8 HTGetHighestMCSRate(struct ieee80211_device *ieee, u8 *pMCSRateSet, u8 *pMCSFilter);
+void HTSetConnectBwMode(struct ieee80211_device *ieee,
+			HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset);
+void HTUpdateDefaultSetting(struct ieee80211_device *ieee);
+void HTConstructCapabilityElement(struct ieee80211_device *ieee, u8 *posHTCap,
+				  u8 *len, u8 isEncrypt);
+void HTConstructInfoElement(struct ieee80211_device *ieee, u8 *posHTInfo,
+			    u8 *len, u8 isEncrypt);
+void HTConstructRT2RTAggElement(struct ieee80211_device *ieee, u8 *posRT2RTAgg,
+				u8 *len);
+void HTOnAssocRsp(struct ieee80211_device *ieee);
+void HTInitializeHTInfo(struct ieee80211_device *ieee);
+void HTInitializeBssDesc(PBSS_HT pBssHT);
+void HTResetSelfAndSavePeerSetting(struct ieee80211_device *ieee,
+				   struct ieee80211_network *pNetwork);
+void HTUpdateSelfAndPeerSetting(struct ieee80211_device *ieee,
+				struct ieee80211_network *pNetwork);
+u8 HTGetHighestMCSRate(struct ieee80211_device *ieee,
+		       u8 *pMCSRateSet, u8 *pMCSFilter);
 extern u8 MCS_FILTER_ALL[];
 extern u16 MCS_DATA_RATE[2][2][77] ;
-extern u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame);
+u8 HTCCheck(struct ieee80211_device *ieee, u8 *pFrame);
 //extern void HTSetConnectBwModeCallback(unsigned long data);
-extern void HTResetIOTSetting(PRT_HIGH_THROUGHPUT  pHTInfo);
-extern bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee);
-extern u16 HTHalfMcsToDataRate(struct ieee80211_device *ieee,  u8      nMcsRate);
-extern u16 HTMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate);
-extern u16  TxCountToDataRate(struct ieee80211_device *ieee, u8 nDataRate);
+void HTResetIOTSetting(PRT_HIGH_THROUGHPUT pHTInfo);
+bool IsHTHalfNmodeAPs(struct ieee80211_device *ieee);
+u16 HTHalfMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate);
+u16 HTMcsToDataRate(struct ieee80211_device *ieee, u8 nMcsRate);
+u16 TxCountToDataRate(struct ieee80211_device *ieee, u8 nDataRate);
 //function in BAPROC.c
-extern int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee,
-				 struct sk_buff *skb);
-extern int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee,
-				 struct sk_buff *skb);
-extern int ieee80211_rx_DELBA(struct ieee80211_device *ieee,struct sk_buff *skb);
-extern void TsInitAddBA(struct ieee80211_device *ieee, PTX_TS_RECORD pTS,
-			u8 Policy, u8 bOverwritePending);
-extern void TsInitDelBA(struct ieee80211_device *ieee,
-			PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect);
-extern void BaSetupTimeOut(unsigned long data);
-extern void TxBaInactTimeout(unsigned long data);
-extern void RxBaInactTimeout(unsigned long data);
-extern void ResetBaEntry(PBA_RECORD pBA);
+int ieee80211_rx_ADDBAReq(struct ieee80211_device *ieee, struct sk_buff *skb);
+int ieee80211_rx_ADDBARsp(struct ieee80211_device *ieee, struct sk_buff *skb);
+int ieee80211_rx_DELBA(struct ieee80211_device *ieee, struct sk_buff *skb);
+void TsInitAddBA(struct ieee80211_device *ieee, PTX_TS_RECORD pTS,
+		 u8 Policy, u8 bOverwritePending);
+void TsInitDelBA(struct ieee80211_device *ieee,
+		 PTS_COMMON_INFO pTsCommonInfo, TR_SELECT TxRxSelect);
+void BaSetupTimeOut(unsigned long data);
+void TxBaInactTimeout(unsigned long data);
+void RxBaInactTimeout(unsigned long data);
+void ResetBaEntry(PBA_RECORD pBA);
 //function in TS.c
-extern bool GetTs(
+bool GetTs(
 	struct ieee80211_device		*ieee,
 	PTS_COMMON_INFO                 *ppTS,
 	u8                              *Addr,
@@ -2390,10 +2403,10 @@
 	TR_SELECT                       TxRxSelect,  //Rx:1, Tx:0
 	bool                            bAddNewTs
 	);
-extern void TSInitialize(struct ieee80211_device *ieee);
-extern  void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD   pTxTS);
-extern void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr);
-extern void RemoveAllTS(struct ieee80211_device *ieee);
+void TSInitialize(struct ieee80211_device *ieee);
+void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD   pTxTS);
+void RemovePeerTS(struct ieee80211_device *ieee, u8 *Addr);
+void RemoveAllTS(struct ieee80211_device *ieee);
 void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee);
 
 extern const long ieee80211_wlan_frequencies[];
@@ -2423,14 +2436,16 @@
 /* For the function is more related to hardware setting, it's better to use the
  * ieee handler to refer to it.
  */
-extern short check_nic_enough_desc(struct net_device *dev, int queue_index);
-extern int ieee80211_data_xmit(struct sk_buff *skb, struct net_device *dev);
-extern int ieee80211_parse_info_param(struct ieee80211_device *ieee,
-		struct ieee80211_info_element *info_element,
-		u16 length,
-		struct ieee80211_network *network,
-		struct ieee80211_rx_stats *stats);
+short check_nic_enough_desc(struct net_device *dev, int queue_index);
+int ieee80211_data_xmit(struct sk_buff *skb, struct net_device *dev);
+int ieee80211_parse_info_param(struct ieee80211_device *ieee,
+			       struct ieee80211_info_element *info_element,
+			       u16 length,
+			       struct ieee80211_network *network,
+			       struct ieee80211_rx_stats *stats);
 
-void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb **prxbIndicateArray,u8  index);
+void ieee80211_indicate_packets(struct ieee80211_device *ieee,
+				struct ieee80211_rxb **prxbIndicateArray,
+				u8 index);
 #define RT_ASOC_RETRY_LIMIT	5
 #endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index b374088..0aa9021 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -1014,7 +1014,7 @@
 		goto rx_dropped;
 
 	// if QoS enabled, should check the sequence for each of the AC
-	if( (ieee->pHTInfo->bCurRxReorderEnable == false) || !ieee->current_network.qos_data.active|| !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)){
+	if ((!ieee->pHTInfo->bCurRxReorderEnable) || !ieee->current_network.qos_data.active|| !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)) {
 		if (is_duplicate_packet(ieee, hdr))
 		goto rx_dropped;
 
@@ -1307,7 +1307,7 @@
 	}
 
 //added by amy for reorder
-	if(ieee->pHTInfo->bCurRxReorderEnable == false ||pTS == NULL){
+	if (!ieee->pHTInfo->bCurRxReorderEnable || pTS == NULL){
 //added by amy for reorder
 		for(i = 0; i<rxb->nr_subframes; i++) {
 			struct sk_buff *sub_skb = rxb->subframes[i];
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 1b11acb..39e9892 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1177,7 +1177,7 @@
 			tag = skb_put(skb, ht_cap_len);
 			*tag++ = MFIE_TYPE_HT_CAP;
 			*tag++ = ht_cap_len - 2;
-			memcpy(tag, ht_cap_buf,ht_cap_len -2);
+			memcpy(tag, ht_cap_buf, ht_cap_len - 2);
 			tag += ht_cap_len -2;
 		}
 	}
@@ -1214,7 +1214,7 @@
 			tag = skb_put(skb, realtek_ie_len);
 			*tag++ = MFIE_TYPE_GENERIC;
 			*tag++ = realtek_ie_len - 2;
-			memcpy(tag, realtek_ie_buf,realtek_ie_len -2 );
+			memcpy(tag, realtek_ie_buf, realtek_ie_len - 2);
 		}
 	}
 //	printk("<=====%s(), %p, %p\n", __func__, ieee->dev, ieee->dev->dev_addr);
@@ -1964,7 +1964,7 @@
 			}
 
 			if (ieee->current_network.mode == IEEE_N_24G &&
-					bHalfSupportNmode == true) {
+					bHalfSupportNmode) {
 				netdev_dbg(ieee->dev, "enter half N mode\n");
 				ieee->bHalfWirelessN24GMode = true;
 			} else
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index 714fbca..3e502520 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -391,7 +391,7 @@
 			      union iwreq_data *wrqu, char *extra)
 {
 
-	int ret=0,len;
+	int ret = 0, len;
 	short proto_started;
 	unsigned long flags;
 
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 5353a45..fff8d58 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -336,12 +336,12 @@
 			printk("===>can't get TS\n");
 			return;
 		}
-		if (pTxTs->TxAdmittedBARecord.bValid == false)
+		if (!pTxTs->TxAdmittedBARecord.bValid)
 		{
 			TsStartAddBaProcess(ieee, pTxTs);
 			goto FORCED_AGG_SETTING;
 		}
-		else if (pTxTs->bUsingBa == false)
+		else if (!pTxTs->bUsingBa)
 		{
 			if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum, (pTxTs->TxCurSeq+1)%4096))
 				pTxTs->bUsingBa = true;
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
index 9ff8e05..3bde744 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_BAProc.c
@@ -364,8 +364,8 @@
 	printk("====================>rx ADDBAREQ from :%pM\n", dst);
 //some other capability is not ready now.
 	if ((ieee->current_network.qos_data.active == 0) ||
-		(ieee->pHTInfo->bCurrentHTSupport == false)) //||
-	//	(ieee->pStaQos->bEnableRxImmBA == false)	)
+		(!ieee->pHTInfo->bCurrentHTSupport)) //||
+	//	(!ieee->pStaQos->bEnableRxImmBA)	)
 	{
 		rc = ADDBA_STATUS_REFUSED;
 		IEEE80211_DEBUG(IEEE80211_DL_ERR, "Failed to reply on ADDBA_REQ as some capability is not ready(%d, %d)\n", ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport);
@@ -462,8 +462,8 @@
 	// Check the capability
 	// Since we can always receive A-MPDU, we just check if it is under HT mode.
 	if (ieee->current_network.qos_data.active == 0  ||
-	    ieee->pHTInfo->bCurrentHTSupport == false ||
-	    ieee->pHTInfo->bCurrentAMPDUEnable == false) {
+	    !ieee->pHTInfo->bCurrentHTSupport ||
+	    !ieee->pHTInfo->bCurrentAMPDUEnable) {
 		IEEE80211_DEBUG(IEEE80211_DL_ERR, "reject to ADDBA_RSP as some capability is not ready(%d, %d, %d)\n",ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport, ieee->pHTInfo->bCurrentAMPDUEnable);
 		ReasonCode = DELBA_REASON_UNKNOWN_BA;
 		goto OnADDBARsp_Reject;
@@ -502,7 +502,7 @@
 		IEEE80211_DEBUG(IEEE80211_DL_BA, "OnADDBARsp(): Recv ADDBA Rsp. Drop because already admit it! \n");
 		return -1;
 	}
-	else if((pPendingBA->bValid == false) ||(*pDialogToken != pPendingBA->DialogToken))
+	else if((!pPendingBA->bValid) ||(*pDialogToken != pPendingBA->DialogToken))
 	{
 		IEEE80211_DEBUG(IEEE80211_DL_ERR,  "OnADDBARsp(): Recv ADDBA Rsp. BA invalid, DELBA! \n");
 		ReasonCode = DELBA_REASON_UNKNOWN_BA;
@@ -571,7 +571,6 @@
 {
 	 struct rtl_80211_hdr_3addr *delba = NULL;
 	PDELBA_PARAM_SET	pDelBaParamSet = NULL;
-	u16			*pReasonCode = NULL;
 	u8			*dst = NULL;
 
 	if (skb->len < sizeof(struct rtl_80211_hdr_3addr) + 6) {
@@ -583,7 +582,7 @@
 	}
 
 	if (ieee->current_network.qos_data.active == 0 ||
-		ieee->pHTInfo->bCurrentHTSupport == false )
+	    !ieee->pHTInfo->bCurrentHTSupport)
 	{
 		IEEE80211_DEBUG(IEEE80211_DL_ERR, "received DELBA while QOS or HT is not supported(%d, %d)\n",ieee->current_network.qos_data.active, ieee->pHTInfo->bCurrentHTSupport);
 		return -1;
@@ -592,9 +591,7 @@
 	IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA|IEEE80211_DL_BA, skb->data, skb->len);
 	delba = (struct rtl_80211_hdr_3addr *)skb->data;
 	dst = (u8 *)(&delba->addr2[0]);
-	delba += sizeof(struct rtl_80211_hdr_3addr);
-	pDelBaParamSet = (PDELBA_PARAM_SET)(delba+2);
-	pReasonCode = (u16 *)(delba+4);
+	pDelBaParamSet = (PDELBA_PARAM_SET)&delba->payload[2];
 
 	if(pDelBaParamSet->field.Initiator == 1)
 	{
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
index c2588f8..c27397b 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c
@@ -224,9 +224,9 @@
 	bool			retValue = false;
 	PRT_HIGH_THROUGHPUT	 pHTInfo = ieee->pHTInfo;
 
-	if(pHTInfo->bCurrentHTSupport == false )	// wireless is n mode
+	if(!pHTInfo->bCurrentHTSupport)		// wireless is n mode
 		retValue = false;
-	else if(pHTInfo->bRegBW40MHz == false)	// station supports 40 bw
+	else if(!pHTInfo->bRegBW40MHz)		// station supports 40 bw
 		retValue = false;
 	else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))	// station in half n mode
 		retValue = false;
@@ -243,7 +243,7 @@
 	bool			retValue = false;
 	PRT_HIGH_THROUGHPUT	 pHTInfo = ieee->pHTInfo;
 
-	if(pHTInfo->bCurrentHTSupport == false )	// wireless is n mode
+	if(!pHTInfo->bCurrentHTSupport)		// wireless is n mode
 		retValue = false;
 	else if(!ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))	// station in half n mode
 		retValue = false;
@@ -675,7 +675,7 @@
 	if ( (ieee->iw_mode == IW_MODE_ADHOC) || (ieee->iw_mode == IW_MODE_MASTER)) //ap mode is not currently supported
 	{
 		pHTInfoEle->ControlChl			= ieee->current_network.channel;
-		pHTInfoEle->ExtChlOffset			= ((pHT->bRegBW40MHz == false)?HT_EXTCHNL_OFFSET_NO_EXT:
+		pHTInfoEle->ExtChlOffset			= ((!pHT->bRegBW40MHz)?HT_EXTCHNL_OFFSET_NO_EXT:
 											(ieee->current_network.channel<=6)?
 												HT_EXTCHNL_OFFSET_UPPER:HT_EXTCHNL_OFFSET_LOWER);
 		pHTInfoEle->RecommemdedTxWidth	= pHT->bRegBW40MHz;
@@ -945,7 +945,7 @@
 	static u8				EWC11NHTCap[] = {0x00, 0x90, 0x4c, 0x33};		// For 11n EWC definition, 2007.07.17, by Emily
 	static u8				EWC11NHTInfo[] = {0x00, 0x90, 0x4c, 0x34};	// For 11n EWC definition, 2007.07.17, by Emily
 
-	if (pHTInfo->bCurrentHTSupport == false) {
+	if (!pHTInfo->bCurrentHTSupport) {
 		IEEE80211_DEBUG(IEEE80211_DL_ERR, "<=== HTOnAssocRsp(): HT_DISABLE\n");
 		return;
 	}
@@ -956,7 +956,7 @@
 //	HTDebugHTCapability(pHTInfo->PeerHTCapBuf,"HTOnAssocRsp_wq");
 //	HTDebugHTInfo(pHTInfo->PeerHTInfoBuf,"HTOnAssocRsp_wq");
 	//
-	if(!memcmp(pHTInfo->PeerHTCapBuf,EWC11NHTCap, sizeof(EWC11NHTCap)))
+	if (!memcmp(pHTInfo->PeerHTCapBuf, EWC11NHTCap, sizeof(EWC11NHTCap)))
 		pPeerHTCap = (PHT_CAPABILITY_ELE)(&pHTInfo->PeerHTCapBuf[4]);
 	else
 		pPeerHTCap = (PHT_CAPABILITY_ELE)(pHTInfo->PeerHTCapBuf);
@@ -976,7 +976,7 @@
 	//
 	HTSetConnectBwMode(ieee, (HT_CHANNEL_WIDTH)(pPeerHTCap->ChlWidth), (HT_EXTCHNL_OFFSET)(pPeerHTInfo->ExtChlOffset));
 
-//	if(pHTInfo->bCurBW40MHz == true)
+//	if (pHTInfo->bCurBW40MHz)
 		pHTInfo->bCurTxBW40MHz = ((pPeerHTInfo->RecommemdedTxWidth == 1)?true:false);
 
 	//
@@ -1341,7 +1341,7 @@
 	PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
 //	u32 flags = 0;
 
-	if(pHTInfo->bRegBW40MHz == false)
+	if(!pHTInfo->bRegBW40MHz)
 		return;
 
 
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index ea92fde..f33c743 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -112,7 +112,7 @@
 
 static void ResetTsCommonInfo(PTS_COMMON_INFO pTsCommonInfo)
 {
-	memset(pTsCommonInfo->Addr, 0, 6);
+	eth_zero_addr(pTsCommonInfo->Addr);
 	memset(&pTsCommonInfo->TSpec, 0, sizeof(TSPEC_BODY));
 	memset(&pTsCommonInfo->TClass, 0, sizeof(QOS_TCLAS)*TCLAS_NUM);
 	pTsCommonInfo->TClasProc = 0;
@@ -584,7 +584,7 @@
 
 void TsStartAddBaProcess(struct ieee80211_device *ieee, PTX_TS_RECORD	pTxTS)
 {
-	if(pTxTS->bAddBaReqInProgress == false)
+	if(!pTxTS->bAddBaReqInProgress)
 	{
 		pTxTS->bAddBaReqInProgress = true;
 		if(pTxTS->bAddBaReqDelayed)
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.h b/drivers/staging/rtl8192u/r8190_rtl8256.h
index 6e5662f..1ba4f83 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.h
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.h
@@ -14,11 +14,10 @@
 #define RTL8225H
 
 #define RTL819X_TOTAL_RF_PATH 2 /* for 8192U */
-extern void PHY_SetRF8256Bandwidth(struct net_device *dev,
-				   HT_CHANNEL_WIDTH Bandwidth);
-extern void PHY_RF8256_Config(struct net_device *dev);
-extern void phy_RF8256_Config_ParaFile(struct net_device *dev);
-extern void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8	powerlevel);
-extern void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel);
+void PHY_SetRF8256Bandwidth(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth);
+void PHY_RF8256_Config(struct net_device *dev);
+void phy_RF8256_Config_ParaFile(struct net_device *dev);
+void PHY_SetRF8256CCKTxPower(struct net_device *dev, u8	powerlevel);
+void PHY_SetRF8256OFDMTxPower(struct net_device *dev, u8 powerlevel);
 
 #endif
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index 6c2e438..785fd02 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -1187,7 +1187,7 @@
 void write_phy_ofdm(struct net_device *dev, u8 adr, u32 data);
 void rtl8185_tx_antenna(struct net_device *dev, u8 ant);
 void rtl8192_set_rxconf(struct net_device *dev);
-extern void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate);
+void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate);
 
 void EnableHWSecurityConfig8192(struct net_device *dev);
 void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType, u8 *MacAddr, u8 DefaultKey, u32 *KeyContent);
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index b852396..6f6fe38 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -2043,16 +2043,9 @@
 
 static bool GetHalfNmodeSupportByAPs819xUsb(struct net_device *dev)
 {
-	bool			Reval;
 	struct r8192_priv *priv = ieee80211_priv(dev);
-	struct ieee80211_device *ieee = priv->ieee80211;
 
-	if (ieee->bHalfWirelessN24GMode == true)
-		Reval = true;
-	else
-		Reval =  false;
-
-	return Reval;
+	return priv->ieee80211->bHalfWirelessN24GMode;
 }
 
 static void rtl8192_refresh_supportrate(struct r8192_priv *priv)
@@ -2762,7 +2755,7 @@
 	//
 #ifdef TO_DO_LIST
 	if (Adapter->ResetProgress == RESET_TYPE_NORESET) {
-		if (pMgntInfo->RegRfOff == true) { /* User disable RF via registry. */
+		if (pMgntInfo->RegRfOff) { /* User disable RF via registry. */
 			RT_TRACE((COMP_INIT|COMP_RF), DBG_LOUD, ("InitializeAdapter819xUsb(): Turn off RF for RegRfOff ----------\n"));
 			MgntActSet_RF_State(Adapter, eRfOff, RF_CHANGE_BY_SW);
 			// Those actions will be discard in MgntActSet_RF_State because of the same state
@@ -4406,7 +4399,8 @@
 	/* RTL8190 set this bit to indicate that Hw does not decrypt packet */
 	stats->Decrypted = !desc->SWDec;
 
-	if ((priv->ieee80211->pHTInfo->bCurrentHTSupport == true) && (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP))
+	if ((priv->ieee80211->pHTInfo->bCurrentHTSupport) &&
+	    (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP))
 		stats->bHwError = false;
 	else
 		stats->bHwError = stats->bCRC|stats->bICV;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index 7ca5d8f..5277f2e 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -438,7 +438,7 @@
 
 	if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 || !priv->ieee80211->bandwidth_auto_switch.bautoswitch_enable)
 		return;
-	if (priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz == false) { /* If send packets in 40 Mhz in 20/40 */
+	if (!priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz) { /* If send packets in 40 Mhz in 20/40 */
 		if (priv->undecorated_smoothed_pwdb <= priv->ieee80211->bandwidth_auto_switch.threshold_40Mhzto20Mhz)
 			priv->ieee80211->bandwidth_auto_switch.bforced_tx20Mhz = true;
 	} else { /* in force send packets in 20 Mhz in 20/40 */
@@ -1731,7 +1731,7 @@
  *---------------------------------------------------------------------------*/
 static void dm_ctrl_initgain_byrssi(struct net_device *dev)
 {
-	if (dm_digtable.dig_enable_flag == false)
+	if (!dm_digtable.dig_enable_flag)
 		return;
 
 	if (dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM)
@@ -1750,7 +1750,7 @@
 	u8 i;
 	static u8	fw_dig;
 
-	if (dm_digtable.dig_enable_flag == false)
+	if (!dm_digtable.dig_enable_flag)
 		return;
 
 	/*DbgPrint("Dig by Sw Rssi\n");*/
@@ -1792,7 +1792,7 @@
 	static u32 reset_cnt;
 	u8 i;
 
-	if (dm_digtable.dig_enable_flag == false)
+	if (!dm_digtable.dig_enable_flag)
 		return;
 
 	if (dm_digtable.dig_algorithm_switch) {
diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h
index 6cd32eb4..2d0232f 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.h
+++ b/drivers/staging/rtl8192u/r8192U_dm.h
@@ -212,24 +212,24 @@
 
 
 /*--------------------------Exported Function prototype---------------------*/
-extern void init_hal_dm(struct net_device *dev);
-extern void deinit_hal_dm(struct net_device *dev);
-extern void hal_dm_watchdog(struct net_device *dev);
-extern void init_rate_adaptive(struct net_device *dev);
-extern void dm_txpower_trackingcallback(struct work_struct *work);
-extern void dm_restore_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_backup_dynamic_mechanism_state(struct net_device *dev);
-extern void dm_change_dynamic_initgain_thresh(struct net_device *dev,
-					      u32 dm_type, u32 dm_value);
-extern void dm_force_tx_fw_info(struct net_device *dev,
-				u32 force_type, u32 force_value);
-extern void dm_init_edca_turbo(struct net_device *dev);
-extern void dm_rf_operation_test_callback(unsigned long data);
-extern void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-extern void dm_fsync_timer_callback(unsigned long data);
-extern void dm_cck_txpower_adjust(struct net_device *dev, bool  binch14);
-extern void dm_shadow_init(struct net_device *dev);
-extern void dm_initialize_txpower_tracking(struct net_device *dev);
+void init_hal_dm(struct net_device *dev);
+void deinit_hal_dm(struct net_device *dev);
+void hal_dm_watchdog(struct net_device *dev);
+void init_rate_adaptive(struct net_device *dev);
+void dm_txpower_trackingcallback(struct work_struct *work);
+void dm_restore_dynamic_mechanism_state(struct net_device *dev);
+void dm_backup_dynamic_mechanism_state(struct net_device *dev);
+void dm_change_dynamic_initgain_thresh(struct net_device *dev,
+				       u32 dm_type, u32 dm_value);
+void dm_force_tx_fw_info(struct net_device *dev,
+			 u32 force_type, u32 force_value);
+void dm_init_edca_turbo(struct net_device *dev);
+void dm_rf_operation_test_callback(unsigned long data);
+void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
+void dm_fsync_timer_callback(unsigned long data);
+void dm_cck_txpower_adjust(struct net_device *dev, bool  binch14);
+void dm_shadow_init(struct net_device *dev);
+void dm_initialize_txpower_tracking(struct net_device *dev);
 /*--------------------------Exported Function prototype---------------------*/
 
 
diff --git a/drivers/staging/rtl8192u/r8192U_wx.h b/drivers/staging/rtl8192u/r8192U_wx.h
index d6a2d97..fb5f808 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.h
+++ b/drivers/staging/rtl8192u/r8192U_wx.h
@@ -19,6 +19,6 @@
 
 extern struct iw_handler_def r8192_wx_handlers_def;
 /* Enable  the rtl819x_core.c to share this function, david 2008.9.22 */
-extern struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev);
+struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev);
 
 #endif
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.h b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
index 52cd437..cc8029a 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.h
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.h
@@ -182,10 +182,10 @@
 	RT_STATUS_RESOURCE
 } rt_status, *prt_status;
 
-extern u32 cmpk_message_handle_rx(struct net_device *dev,
-		struct ieee80211_rx_stats *pstats);
-extern rt_status SendTxCommandPacket(struct net_device *dev,
-		void *pData, u32 DataLen);
+u32 cmpk_message_handle_rx(struct net_device *dev,
+			   struct ieee80211_rx_stats *pstats);
+rt_status SendTxCommandPacket(struct net_device *dev,
+			      void *pData, u32 DataLen);
 
 
 #endif
diff --git a/drivers/staging/rtl8192u/r819xU_firmware.c b/drivers/staging/rtl8192u/r819xU_firmware.c
index d27b1e2..08302df 100644
--- a/drivers/staging/rtl8192u/r819xU_firmware.c
+++ b/drivers/staging/rtl8192u/r819xU_firmware.c
@@ -66,7 +66,7 @@
 		skb  = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4);
 		if (!skb)
 			return false;
-		memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
+		memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
 		tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
 		tcb_desc->queue_index = TXCMD_QUEUE;
 		tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_INIT;
@@ -91,7 +91,7 @@
 		if (!priv->ieee80211->check_nic_enough_desc(dev, index) ||
 		       (!skb_queue_empty(&priv->ieee80211->skb_waitQ[index])) ||
 		       (priv->ieee80211->queue_stop)) {
-			RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n");
+			RT_TRACE(COMP_FIRMWARE, "=====================================================> tx full!\n");
 			skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
 		} else {
 			priv->ieee80211->softmac_hard_start_xmit(skb, dev);
@@ -144,7 +144,8 @@
 
 	/* Turn On CPU */
 	read_nic_dword(dev, CPU_GEN, &CPU_status);
-	write_nic_byte(dev, CPU_GEN, (u8)((CPU_status|CPU_GEN_PWR_STB_CPU)&0xff));
+	write_nic_byte(dev, CPU_GEN,
+		       (u8)((CPU_status | CPU_GEN_PWR_STB_CPU) & 0xff));
 	mdelay(1000);
 
 	/* Check whether CPU boot OK */
@@ -242,7 +243,7 @@
 		 * or read image file from array. Default load from IMG file
 		 */
 		if (rst_opt == OPT_SYSTEM_RESET) {
-			rc = request_firmware(&fw_entry, fw_name[init_step],&priv->udev->dev);
+			rc = request_firmware(&fw_entry, fw_name[init_step], &priv->udev->dev);
 			if (rc < 0) {
 				RT_TRACE(COMP_ERR, "request firmware fail!\n");
 				goto download_firmware_fail;
@@ -254,12 +255,12 @@
 			}
 
 			if (init_step != FW_INIT_STEP1_MAIN) {
-				memcpy(pfirmware->firmware_buf,fw_entry->data,fw_entry->size);
+				memcpy(pfirmware->firmware_buf, fw_entry->data, fw_entry->size);
 				mapped_file = pfirmware->firmware_buf;
 				file_length = fw_entry->size;
 			} else {
 				memset(pfirmware->firmware_buf, 0, 128);
-				memcpy(&pfirmware->firmware_buf[128],fw_entry->data,fw_entry->size);
+				memcpy(&pfirmware->firmware_buf[128], fw_entry->data, fw_entry->size);
 				mapped_file = pfirmware->firmware_buf;
 				file_length = fw_entry->size + 128;
 			}
@@ -319,7 +320,7 @@
 
 			rt_status = CPUcheck_firmware_ready(dev);
 			if (!rt_status) {
-				RT_TRACE(COMP_ERR, "CPUcheck_firmware_ready fail(%d)!\n",rt_status);
+				RT_TRACE(COMP_ERR, "CPUcheck_firmware_ready fail(%d)!\n", rt_status);
 				goto download_firmware_fail;
 			}
 
diff --git a/drivers/staging/rtl8192u/r819xU_phy.h b/drivers/staging/rtl8192u/r819xU_phy.h
index 66cbe3f..e672126 100644
--- a/drivers/staging/rtl8192u/r819xU_phy.h
+++ b/drivers/staging/rtl8192u/r819xU_phy.h
@@ -57,36 +57,35 @@
 #define bMaskLWord                0x0000ffff
 #define bMaskDWord                0xffffffff
 
-extern u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath);
-extern void rtl8192_setBBreg(struct net_device *dev, u32 reg_addr,
-	u32 bitmask, u32 data);
-extern u32 rtl8192_QueryBBReg(struct net_device *dev, u32 reg_addr,
-	u32 bitmask);
-extern void rtl8192_phy_SetRFReg(struct net_device *dev,
-	RF90_RADIO_PATH_E eRFPath, u32 reg_addr, u32 bitmask, u32 data);
-extern u32 rtl8192_phy_QueryRFReg(struct net_device *dev,
-	RF90_RADIO_PATH_E eRFPath, u32 reg_addr, u32 bitmask);
-extern void rtl8192_phy_configmac(struct net_device *dev);
-extern void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType);
-extern u8 rtl8192_phy_checkBBAndRF(struct net_device *dev,
-	HW90_BLOCK_E CheckBlock, RF90_RADIO_PATH_E eRFPath);
-extern void rtl8192_BBConfig(struct net_device *dev);
-extern void rtl8192_phy_getTxPower(struct net_device *dev);
-extern void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel);
-extern void rtl8192_phy_RFConfig(struct net_device *dev);
-extern void rtl8192_phy_updateInitGain(struct net_device *dev);
-extern u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
-	RF90_RADIO_PATH_E eRFPath);
+u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device *dev, u32 eRFPath);
+void rtl8192_setBBreg(struct net_device *dev, u32 reg_addr,
+		      u32 bitmask, u32 data);
+u32 rtl8192_QueryBBReg(struct net_device *dev, u32 reg_addr, u32 bitmask);
+void rtl8192_phy_SetRFReg(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+			  u32 reg_addr, u32 bitmask, u32 data);
+u32 rtl8192_phy_QueryRFReg(struct net_device *dev, RF90_RADIO_PATH_E eRFPath,
+			   u32 reg_addr, u32 bitmask);
+void rtl8192_phy_configmac(struct net_device *dev);
+void rtl8192_phyConfigBB(struct net_device *dev, u8 ConfigType);
+u8 rtl8192_phy_checkBBAndRF(struct net_device *dev,
+			    HW90_BLOCK_E CheckBlock, RF90_RADIO_PATH_E eRFPath);
+void rtl8192_BBConfig(struct net_device *dev);
+void rtl8192_phy_getTxPower(struct net_device *dev);
+void rtl8192_phy_setTxPower(struct net_device *dev, u8 channel);
+void rtl8192_phy_RFConfig(struct net_device *dev);
+void rtl8192_phy_updateInitGain(struct net_device *dev);
+u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device *dev,
+				      RF90_RADIO_PATH_E eRFPath);
 
-extern u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel);
-extern void rtl8192_SetBWMode(struct net_device *dev,
-	HT_CHANNEL_WIDTH bandwidth, HT_EXTCHNL_OFFSET offset);
-extern void rtl8192_SwChnl_WorkItem(struct net_device *dev);
+u8 rtl8192_phy_SwChnl(struct net_device *dev, u8 channel);
+void rtl8192_SetBWMode(struct net_device *dev, HT_CHANNEL_WIDTH bandwidth,
+		       HT_EXTCHNL_OFFSET offset);
+void rtl8192_SwChnl_WorkItem(struct net_device *dev);
 void rtl8192_SetBWModeWorkItem(struct net_device *dev);
-extern bool rtl8192_SetRFPowerState(struct net_device *dev,
-	RT_RF_POWER_STATE eRFPowerState);
-extern void InitialGain819xUsb(struct net_device *dev, u8 Operation);
+bool rtl8192_SetRFPowerState(struct net_device *dev,
+			     RT_RF_POWER_STATE eRFPowerState);
+void InitialGain819xUsb(struct net_device *dev, u8 Operation);
 
-extern void InitialGainOperateWorkItemCallBack(struct work_struct *work);
+void InitialGainOperateWorkItemCallBack(struct work_struct *work);
 
 #endif
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index 5786808..c5527c1 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -134,22 +134,20 @@
 	return NULL;
 }
 
-static void set_supported_rate(u8 *SupportedRates, uint mode)
+static void set_supported_rate(u8 *rates, uint mode)
 {
-	memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
+	memset(rates, 0, NDIS_802_11_LENGTH_RATES_EX);
 	switch (mode) {
 	case WIRELESS_11B:
-		memcpy(SupportedRates, WIFI_CCKRATES,
-			IEEE80211_CCK_RATE_LEN);
+		memcpy(rates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
 		break;
 	case WIRELESS_11G:
 	case WIRELESS_11A:
-		memcpy(SupportedRates, WIFI_OFDMRATES,
-			IEEE80211_NUM_OFDM_RATESLEN);
+		memcpy(rates, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
 		break;
 	case WIRELESS_11BG:
-		memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
-		memcpy(SupportedRates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES,
+		memcpy(rates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
+		memcpy(rates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES,
 			IEEE80211_NUM_OFDM_RATESLEN);
 		break;
 	}
@@ -195,17 +193,16 @@
 	ie = r8712_set_ie(ie, _SSID_IE_, pdev_network->Ssid.SsidLength,
 		    pdev_network->Ssid.Ssid, &sz);
 	/*supported rates*/
-	set_supported_rate(pdev_network->SupportedRates,
-			   pregistrypriv->wireless_mode);
-	rateLen = r8712_get_rateset_len(pdev_network->SupportedRates);
+	set_supported_rate(pdev_network->rates, pregistrypriv->wireless_mode);
+	rateLen = r8712_get_rateset_len(pdev_network->rates);
 	if (rateLen > 8) {
 		ie = r8712_set_ie(ie, _SUPPORTEDRATES_IE_, 8,
-			    pdev_network->SupportedRates, &sz);
+			    pdev_network->rates, &sz);
 		ie = r8712_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8),
-			    (pdev_network->SupportedRates + 8), &sz);
+			    (pdev_network->rates + 8), &sz);
 	} else
 		ie = r8712_set_ie(ie, _SUPPORTEDRATES_IE_,
-			    rateLen, pdev_network->SupportedRates, &sz);
+			    rateLen, pdev_network->rates, &sz);
 	/*DS parameter set*/
 	ie = r8712_set_ie(ie, _DSSET_IE_, 1,
 		    (u8 *)&(pdev_network->Configuration.DSConfig), &sz);
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index fcb8c61..4fa2540 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -58,8 +58,8 @@
 
 	/*init recv_buf*/
 	_init_queue(&precvpriv->free_recv_buf_queue);
-	precvpriv->pallocated_recv_buf = kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4,
-						 GFP_ATOMIC);
+	precvpriv->pallocated_recv_buf =
+		kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4, GFP_ATOMIC);
 	if (precvpriv->pallocated_recv_buf == NULL)
 		return _FAIL;
 	precvpriv->precv_buf = precvpriv->pallocated_recv_buf + 4 -
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index e35854d..ef71829 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -456,9 +456,7 @@
 	INIT_LIST_HEAD(&pcmd->list);
 	pcmd->cmdcode = _CreateBss_CMD_;
 	pcmd->parmbuf = (unsigned char *)pdev_network;
-	pcmd->cmdsz = r8712_get_ndis_wlan_bssid_ex_sz((
-			struct ndis_wlan_bssid_ex *)
-			pdev_network);
+	pcmd->cmdsz = r8712_get_wlan_bssid_ex_sz(pdev_network);
 	pcmd->rsp = NULL;
 	pcmd->rspsz = 0;
 	/* notes: translate IELength & Length after assign to cmdsz; */
@@ -471,8 +469,7 @@
 
 u8 r8712_joinbss_cmd(struct _adapter  *padapter, struct wlan_network *pnetwork)
 {
-	uint t_len = 0;
-	struct ndis_wlan_bssid_ex *psecnetwork;
+	struct wlan_bssid_ex *psecnetwork;
 	struct cmd_obj		*pcmd;
 	struct cmd_priv		*pcmdpriv = &padapter->cmdpriv;
 	struct mlme_priv	*pmlmepriv = &padapter->mlmepriv;
@@ -486,14 +483,6 @@
 	pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
 	if (pcmd == NULL)
 		return _FAIL;
-	t_len = sizeof(u32) + 6 * sizeof(unsigned char) + 2 +
-			sizeof(struct ndis_802_11_ssid) + sizeof(u32) +
-			sizeof(s32) +
-			sizeof(enum NDIS_802_11_NETWORK_TYPE) +
-			sizeof(struct NDIS_802_11_CONFIGURATION) +
-			sizeof(enum NDIS_802_11_NETWORK_INFRASTRUCTURE) +
-			sizeof(NDIS_802_11_RATES_EX) +
-			sizeof(u32) + MAX_IE_SZ;
 
 	/* for hidden ap to set fw_state here */
 	if (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) !=
@@ -511,12 +500,12 @@
 			break;
 		}
 	}
-	psecnetwork = (struct ndis_wlan_bssid_ex *)&psecuritypriv->sec_bss;
+	psecnetwork = &psecuritypriv->sec_bss;
 	if (psecnetwork == NULL) {
 		kfree(pcmd);
 		return _FAIL;
 	}
-	memcpy(psecnetwork, &pnetwork->network, t_len);
+	memcpy(psecnetwork, &pnetwork->network, sizeof(*psecnetwork));
 	psecuritypriv->authenticator_ie[0] = (unsigned char)
 					     psecnetwork->IELength;
 	if ((psecnetwork->IELength-12) < (256 - 1))
@@ -575,7 +564,7 @@
 		memcpy(&psecuritypriv->supplicant_ie[1], &psecnetwork->IEs[0],
 			255);
 	/* get cmdsz before endian conversion */
-	pcmd->cmdsz = r8712_get_ndis_wlan_bssid_ex_sz(psecnetwork);
+	pcmd->cmdsz = r8712_get_wlan_bssid_ex_sz(psecnetwork);
 #ifdef __BIG_ENDIAN
 	/* wlan_network endian conversion */
 	psecnetwork->Length = cpu_to_le32(psecnetwork->Length);
@@ -903,8 +892,7 @@
 	struct sta_info *psta = NULL;
 	struct wlan_network *pwlan = NULL;
 	struct	mlme_priv *pmlmepriv = &padapter->mlmepriv;
-	struct ndis_wlan_bssid_ex *pnetwork = (struct ndis_wlan_bssid_ex *)
-					      pcmd->parmbuf;
+	struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf;
 	struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
 
 	if (pcmd->res != H2C_SUCCESS)
@@ -958,11 +946,11 @@
 		} else
 			list_add_tail(&(pwlan->list),
 					 &pmlmepriv->scanned_queue.queue);
-		pnetwork->Length = r8712_get_ndis_wlan_bssid_ex_sz(pnetwork);
+		pnetwork->Length = r8712_get_wlan_bssid_ex_sz(pnetwork);
 		memcpy(&(pwlan->network), pnetwork, pnetwork->Length);
 		pwlan->fixed = true;
 		memcpy(&tgt_network->network, pnetwork,
-			(r8712_get_ndis_wlan_bssid_ex_sz(pnetwork)));
+			(r8712_get_wlan_bssid_ex_sz(pnetwork)));
 		if (pmlmepriv->fw_state & _FW_UNDER_LINKING)
 			pmlmepriv->fw_state ^= _FW_UNDER_LINKING;
 		/* we will set _FW_LINKED when there is one more sat to
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index cb8225b..818cd88 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -123,15 +123,6 @@
 };
 
 /*
- * Caller Mode: Infra, Ad-Hoc
- * Notes: To join the specified bss
- * Command Event Mode
- */
-struct joinbss_parm {
-	struct ndis_wlan_bssid_ex network;
-};
-
-/*
  * Caller Mode: Infra, Ad-HoC(C)
  * Notes: To disconnect the current associated BSS
  * Command Mode
@@ -141,15 +132,6 @@
 };
 
 /*
- * Caller Mode: AP, Ad-HoC(M)
- * Notes: To create a BSS
- * Command Mode
- */
-struct createbss_parm {
-	struct ndis_wlan_bssid_ex network;
-};
-
-/*
  * Caller Mode: AP, Ad-HoC, Infra
  * Notes: To set the NIC mode of RTL8711
  * Command Mode
diff --git a/drivers/staging/rtl8712/rtl871x_event.h b/drivers/staging/rtl8712/rtl871x_event.h
index e03ee90..697c8d7 100644
--- a/drivers/staging/rtl8712/rtl871x_event.h
+++ b/drivers/staging/rtl8712/rtl871x_event.h
@@ -36,7 +36,7 @@
  * Used to report a bss has been scanned
 */
 struct survey_event	{
-	struct ndis_wlan_bssid_ex bss;
+	struct wlan_bssid_ex bss;
 };
 
 /*
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl.h b/drivers/staging/rtl8712/rtl871x_ioctl.h
index 8e6ef5d..c9218be 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl.h
+++ b/drivers/staging/rtl8712/rtl871x_ioctl.h
@@ -76,22 +76,18 @@
 
 extern struct iw_handler_def  r871x_handlers_def;
 
-extern	uint drv_query_info(
-	struct  net_device *MiniportAdapterContext,
-	uint Oid,
-	void *InformationBuffer,
-	u32 InformationBufferLength,
-	u32 *BytesWritten,
-	u32 *BytesNeeded
-);
+uint drv_query_info(struct net_device *MiniportAdapterContext,
+		    uint Oid,
+		    void *InformationBuffer,
+		    u32 InformationBufferLength,
+		    u32 *BytesWritten,
+		    u32 *BytesNeeded);
 
-extern	uint drv_set_info(
-	struct  net_device *MiniportAdapterContext,
-	uint Oid,
-	void *InformationBuffer,
-	u32 InformationBufferLength,
-	u32 *BytesRead,
-	u32 *BytesNeeded
-);
+uint drv_set_info(struct net_device *MiniportAdapterContext,
+		  uint Oid,
+		  void *InformationBuffer,
+		  u32 InformationBufferLength,
+		  u32 *BytesRead,
+		  u32 *BytesNeeded);
 
 #endif
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 3388f97..143be0f 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -203,14 +203,12 @@
 	}
 	/* Add the protocol name */
 	iwe.cmd = SIOCGIWNAME;
-	if ((r8712_is_cckratesonly_included((u8 *)&pnetwork->network.
-	     SupportedRates)) == true) {
+	if (r8712_is_cckratesonly_included(pnetwork->network.rates)) {
 		if (ht_cap == true)
 			snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bn");
 		else
 			snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11b");
-	} else if ((r8712_is_cckrates_included((u8 *)&pnetwork->network.
-		    SupportedRates)) == true) {
+	} else if (r8712_is_cckrates_included(pnetwork->network.rates)) {
 		if (ht_cap == true)
 			snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bgn");
 		else
@@ -270,9 +268,9 @@
 	iwe.u.bitrate.disabled = 0;
 	iwe.u.bitrate.value = 0;
 	i = 0;
-	while (pnetwork->network.SupportedRates[i] != 0) {
+	while (pnetwork->network.rates[i] != 0) {
 		/* Bit rate given in 500 kb/s units */
-		iwe.u.bitrate.value = (pnetwork->network.SupportedRates[i++] &
+		iwe.u.bitrate.value = (pnetwork->network.rates[i++] &
 				      0x7F) * 500000;
 		current_val = iwe_stream_add_value(info, start, current_val,
 			      stop, &iwe, IW_EV_PARAM_LEN);
@@ -634,8 +632,8 @@
 	char *p;
 	u8 ht_cap = false;
 	struct	mlme_priv	*pmlmepriv = &(padapter->mlmepriv);
-	struct ndis_wlan_bssid_ex  *pcur_bss = &pmlmepriv->cur_network.network;
-	NDIS_802_11_RATES_EX *prates = NULL;
+	struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+	u8 *prates;
 
 	if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) ==
 	    true) {
@@ -644,15 +642,15 @@
 				 &ht_ielen, pcur_bss->IELength - 12);
 		if (p && ht_ielen > 0)
 			ht_cap = true;
-		prates = &pcur_bss->SupportedRates;
-		if (r8712_is_cckratesonly_included((u8 *)prates) == true) {
+		prates = pcur_bss->rates;
+		if (r8712_is_cckratesonly_included(prates) == true) {
 			if (ht_cap == true)
 				snprintf(wrqu->name, IFNAMSIZ,
 					 "IEEE 802.11bn");
 			else
 				snprintf(wrqu->name, IFNAMSIZ,
 					 "IEEE 802.11b");
-		} else if ((r8712_is_cckrates_included((u8 *)prates)) == true) {
+		} else if ((r8712_is_cckrates_included(prates)) == true) {
 			if (ht_cap == true)
 				snprintf(wrqu->name, IFNAMSIZ,
 					 "IEEE 802.11bgn");
@@ -723,7 +721,7 @@
 {
 	struct _adapter *padapter = netdev_priv(dev);
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-	struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+	struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
 
 	if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
 		wrqu->freq.m = ieee80211_wlan_frequencies[
@@ -1111,7 +1109,7 @@
 {
 	struct _adapter *padapter = netdev_priv(dev);
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-	struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+	struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
 
 	wrqu->ap_addr.sa_family = ARPHRD_ETHER;
 	if (check_fwstate(pmlmepriv, _FW_LINKED | WIFI_ADHOC_MASTER_STATE |
@@ -1327,7 +1325,7 @@
 {
 	struct _adapter *padapter = netdev_priv(dev);
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-	struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+	struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
 	u32 len, ret = 0;
 
 	if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) {
@@ -1419,7 +1417,7 @@
 {
 	struct _adapter *padapter = netdev_priv(dev);
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-	struct ndis_wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+	struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
 	struct ieee80211_ht_cap *pht_capie;
 	unsigned char rf_type = padapter->registrypriv.rf_config;
 	int i;
@@ -1444,9 +1442,9 @@
 				    (IEEE80211_HT_CAP_SGI_20 |
 				    IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
 		}
-		while ((pcur_bss->SupportedRates[i] != 0) &&
-			(pcur_bss->SupportedRates[i] != 0xFF)) {
-			rate = pcur_bss->SupportedRates[i] & 0x7F;
+		while ((pcur_bss->rates[i] != 0) &&
+			(pcur_bss->rates[i] != 0xFF)) {
+			rate = pcur_bss->rates[i] & 0x7F;
 			if (rate > max_rate)
 				max_rate = rate;
 			wrqu->bitrate.fixed = 0;	/* no auto select */
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index c044b0e..fc5dbea 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -208,19 +208,9 @@
 	pibss[5] = (u8)((curtime>>16) & 0xff);
 }
 
-uint r8712_get_ndis_wlan_bssid_ex_sz(struct ndis_wlan_bssid_ex *bss)
+uint r8712_get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
 {
-	uint t_len;
-
-	t_len = sizeof(u32) + 6 * sizeof(unsigned long) + 2 +
-			sizeof(struct ndis_802_11_ssid) + sizeof(u32) +
-			sizeof(s32) +
-			sizeof(enum NDIS_802_11_NETWORK_TYPE) +
-			sizeof(struct NDIS_802_11_CONFIGURATION) +
-			sizeof(enum NDIS_802_11_NETWORK_INFRASTRUCTURE) +
-			sizeof(NDIS_802_11_RATES_EX) +
-			sizeof(u32) + bss->IELength;
-	return t_len;
+	return sizeof(*bss) + bss->IELength - MAX_IE_SZ;
 }
 
 u8 *r8712_get_capability_from_ie(u8 *ie)
@@ -286,8 +276,8 @@
 
 }
 
-static int is_same_network(struct ndis_wlan_bssid_ex *src,
-			   struct ndis_wlan_bssid_ex *dst)
+static int is_same_network(struct wlan_bssid_ex *src,
+			   struct wlan_bssid_ex *dst)
 {
 	 u16 s_cap, d_cap;
 
@@ -332,8 +322,8 @@
 	return oldest;
 }
 
-static void update_network(struct ndis_wlan_bssid_ex *dst,
-			   struct ndis_wlan_bssid_ex *src,
+static void update_network(struct wlan_bssid_ex *dst,
+			   struct wlan_bssid_ex *src,
 			   struct _adapter *padapter)
 {
 	u32 last_evm = 0, tmpVal;
@@ -366,11 +356,11 @@
 		src->Rssi = padapter->recvpriv.signal;
 	} else
 		src->Rssi = (src->Rssi + dst->Rssi) / 2;
-	memcpy((u8 *)dst, (u8 *)src, r8712_get_ndis_wlan_bssid_ex_sz(src));
+	memcpy((u8 *)dst, (u8 *)src, r8712_get_wlan_bssid_ex_sz(src));
 }
 
 static void update_current_network(struct _adapter *adapter,
-				   struct ndis_wlan_bssid_ex *pnetwork)
+				   struct wlan_bssid_ex *pnetwork)
 {
 	struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
 
@@ -388,7 +378,7 @@
 Caller must hold pmlmepriv->lock first.
 */
 static void update_scanned_network(struct _adapter *adapter,
-			    struct ndis_wlan_bssid_ex *target)
+			    struct wlan_bssid_ex *target)
 {
 	struct list_head *plist, *phead;
 
@@ -426,7 +416,7 @@
 			target->Rssi = (pnetwork->network.Rssi +
 					target->Rssi) / 2;
 			memcpy(&pnetwork->network, target,
-				r8712_get_ndis_wlan_bssid_ex_sz(target));
+				r8712_get_wlan_bssid_ex_sz(target));
 			pnetwork->last_scanned = jiffies;
 		} else {
 			/* Otherwise just pull from the free list */
@@ -434,7 +424,7 @@
 			pnetwork = alloc_network(pmlmepriv);
 			if (pnetwork == NULL)
 				return;
-			bssid_ex_sz = r8712_get_ndis_wlan_bssid_ex_sz(target);
+			bssid_ex_sz = r8712_get_wlan_bssid_ex_sz(target);
 			target->Length = bssid_ex_sz;
 			memcpy(&pnetwork->network, target, bssid_ex_sz);
 			list_add_tail(&pnetwork->list, &queue->queue);
@@ -451,7 +441,7 @@
 }
 
 static void rtl8711_add_network(struct _adapter *adapter,
-			 struct ndis_wlan_bssid_ex *pnetwork)
+			 struct wlan_bssid_ex *pnetwork)
 {
 	unsigned long irqL;
 	struct mlme_priv *pmlmepriv = &(((struct _adapter *)adapter)->mlmepriv);
@@ -507,10 +497,10 @@
 {
 	unsigned long flags;
 	u32 len;
-	struct ndis_wlan_bssid_ex *pnetwork;
+	struct wlan_bssid_ex *pnetwork;
 	struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
 
-	pnetwork = (struct ndis_wlan_bssid_ex *)pbuf;
+	pnetwork = (struct wlan_bssid_ex *)pbuf;
 #ifdef __BIG_ENDIAN
 	/* endian_convert */
 	pnetwork->Length = le32_to_cpu(pnetwork->Length);
@@ -538,7 +528,7 @@
 		 le32_to_cpu(pnetwork->InfrastructureMode);
 	pnetwork->IELength = le32_to_cpu(pnetwork->IELength);
 #endif
-	len = r8712_get_ndis_wlan_bssid_ex_sz(pnetwork);
+	len = r8712_get_wlan_bssid_ex_sz(pnetwork);
 	if (len > sizeof(struct wlan_bssid_ex))
 		return;
 	spin_lock_irqsave(&pmlmepriv->lock2, flags);
@@ -769,7 +759,7 @@
 	the_same_macaddr = !memcmp(pnetwork->network.MacAddress,
 				   cur_network->network.MacAddress, ETH_ALEN);
 	pnetwork->network.Length =
-		 r8712_get_ndis_wlan_bssid_ex_sz(&pnetwork->network);
+		 r8712_get_wlan_bssid_ex_sz(&pnetwork->network);
 	spin_lock_irqsave(&pmlmepriv->lock, irqL);
 	if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex))
 		goto ignore_joinbss_callback;
@@ -1001,7 +991,7 @@
 			pdev_network = &(adapter->registrypriv.dev_network);
 			pibss = adapter->registrypriv.dev_network.MacAddress;
 			memcpy(pdev_network, &tgt_network->network,
-				r8712_get_ndis_wlan_bssid_ex_sz(&tgt_network->
+				r8712_get_wlan_bssid_ex_sz(&tgt_network->
 							network));
 			memcpy(&pdev_network->Ssid,
 				&pmlmepriv->assoc_ssid,
@@ -1668,8 +1658,7 @@
 	 */
 	sz = r8712_generate_ie(pregistrypriv);
 	pdev_network->IELength = sz;
-	pdev_network->Length = r8712_get_ndis_wlan_bssid_ex_sz(
-			      (struct ndis_wlan_bssid_ex *)pdev_network);
+	pdev_network->Length = r8712_get_wlan_bssid_ex_sz(pdev_network);
 }
 
 /*the function is at passive_level*/
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.h b/drivers/staging/rtl8712/rtl871x_mlme.h
index 42bd0bf..08d6c98 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.h
+++ b/drivers/staging/rtl8712/rtl871x_mlme.h
@@ -202,7 +202,7 @@
 		   struct security_priv *psecuritypriv, sint keyid);
 sint r8712_set_auth(struct _adapter *adapter,
 		    struct security_priv *psecuritypriv);
-uint r8712_get_ndis_wlan_bssid_ex_sz(struct ndis_wlan_bssid_ex *bss);
+uint r8712_get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss);
 void r8712_generate_random_ibss(u8 *pibss);
 u8 *r8712_get_capability_from_ie(u8 *ie);
 struct wlan_network *r8712_get_oldest_wlan_network(
diff --git a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
index 0b54612..77f01bf 100644
--- a/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
+++ b/drivers/staging/rtl8712/rtl871x_mp_ioctl.c
@@ -160,13 +160,13 @@
 	struct mp_priv *pmppriv = &padapter->mppriv;
 	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
 	struct wlan_network *tgt_network = &pmlmepriv->cur_network;
-	struct ndis_wlan_bssid_ex bssid;
+	struct wlan_bssid_ex bssid;
 	struct sta_info *psta;
 	unsigned long length;
 	unsigned long irqL;
 	int res = _SUCCESS;
 
-	/* 3 1. initialize a new struct ndis_wlan_bssid_ex */
+	/* 3 1. initialize a new struct wlan_bssid_ex */
 	memcpy(bssid.MacAddress, pmppriv->network_macaddr, ETH_ALEN);
 	bssid.Ssid.SsidLength = 16;
 	memcpy(bssid.Ssid.Ssid, (unsigned char *)"mp_pseudo_adhoc",
@@ -174,7 +174,7 @@
 	bssid.InfrastructureMode = Ndis802_11IBSS;
 	bssid.NetworkTypeInUse = Ndis802_11DS;
 	bssid.IELength = 0;
-	length = r8712_get_ndis_wlan_bssid_ex_sz(&bssid);
+	length = r8712_get_wlan_bssid_ex_sz(&bssid);
 	if (length % 4) {
 		/*round up to multiple of 4 bytes.*/
 		bssid.Length = ((length >> 2) + 1) << 2;
diff --git a/drivers/staging/rtl8712/rtl871x_security.c b/drivers/staging/rtl8712/rtl871x_security.c
index bcd1a51..8627928 100644
--- a/drivers/staging/rtl8712/rtl871x_security.c
+++ b/drivers/staging/rtl8712/rtl871x_security.c
@@ -124,28 +124,25 @@
 
 static void crc32_init(void)
 {
+	sint i, j;
+	u32 c;
+	u8 *p = (u8 *)&c, *p1;
+	u8 k;
+
 	if (bcrc32initialized == 1)
 		return;
-	else {
-		sint i, j;
-		u32 c;
-		u8 *p = (u8 *)&c, *p1;
-		u8 k;
 
-		c = 0x12340000;
-		for (i = 0; i < 256; ++i) {
-			k = crc32_reverseBit((u8)i);
-			for (c = ((u32)k) << 24, j = 8; j > 0; --j)
-				c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY :
-				    (c << 1);
-			p1 = (u8 *)&crc32_table[i];
-			p1[0] = crc32_reverseBit(p[3]);
-			p1[1] = crc32_reverseBit(p[2]);
-			p1[2] = crc32_reverseBit(p[1]);
-			p1[3] = crc32_reverseBit(p[0]);
-		}
-		bcrc32initialized = 1;
+	for (i = 0; i < 256; ++i) {
+		k = crc32_reverseBit((u8)i);
+		for (c = ((u32)k) << 24, j = 8; j > 0; --j)
+			c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+		p1 = (u8 *)&crc32_table[i];
+		p1[0] = crc32_reverseBit(p[3]);
+		p1[1] = crc32_reverseBit(p[2]);
+		p1[2] = crc32_reverseBit(p[1]);
+		p1[3] = crc32_reverseBit(p[0]);
 	}
+	bcrc32initialized = 1;
 }
 
 static u32 getcrc32(u8 *buf, u32 len)
diff --git a/drivers/staging/rtl8712/wlan_bssdef.h b/drivers/staging/rtl8712/wlan_bssdef.h
index 2ea8a3d..fda5707 100644
--- a/drivers/staging/rtl8712/wlan_bssdef.h
+++ b/drivers/staging/rtl8712/wlan_bssdef.h
@@ -32,11 +32,6 @@
 #define NDIS_802_11_LENGTH_RATES        8
 #define NDIS_802_11_LENGTH_RATES_EX     16
 
-/* Set of 8 data rates*/
-typedef unsigned char   NDIS_802_11_RATES[NDIS_802_11_LENGTH_RATES];
-/* Set of 16 data rates */
-typedef unsigned char   NDIS_802_11_RATES_EX[NDIS_802_11_LENGTH_RATES_EX];
-
 struct ndis_802_11_ssid {
 	u32 SsidLength;
 	u8  Ssid[32];
@@ -83,18 +78,7 @@
 	u16 Capabilities;
 };
 
-/*
- * Length is the 4 bytes multiples of the sume of
- * 6 * sizeof (unsigned char) + 2 + sizeof (ndis_802_11_ssid) + sizeof (u32)
- * + sizeof (s32) + sizeof (NDIS_802_11_NETWORK_TYPE)
- * + sizeof (struct NDIS_802_11_CONFIGURATION)
- * + sizeof (NDIS_802_11_RATES_EX) + IELength
-
- * Except the IELength, all other fields are fixed length. Therefore, we can
- * define a macro to present the partial sum.
- */
-
-struct ndis_wlan_bssid_ex {
+struct wlan_bssid_ex {
 	u32 Length;
 	unsigned char  MacAddress[6];
 	u8  Reserved[2];
@@ -104,7 +88,8 @@
 	enum NDIS_802_11_NETWORK_TYPE  NetworkTypeInUse;
 	struct NDIS_802_11_CONFIGURATION  Configuration;
 	enum NDIS_802_11_NETWORK_INFRASTRUCTURE  InfrastructureMode;
-	NDIS_802_11_RATES_EX  SupportedRates;
+	u8 rates[NDIS_802_11_LENGTH_RATES_EX];
+	/* number of content bytes in EIs, which varies */
 	u32 IELength;
 	/*(timestamp, beacon interval, and capability information) */
 	u8 IEs[MAX_IE_SZ];
@@ -213,7 +198,7 @@
 	unsigned int	last_scanned; /*timestamp for the network */
 	int	aid;		/*will only be valid when a BSS is joined. */
 	int	join_res;
-	struct ndis_wlan_bssid_ex network; /*must be the last item */
+	struct wlan_bssid_ex network; /*must be the last item */
 };
 
 enum VRTL_CARRIER_SENSE {
@@ -244,24 +229,5 @@
 #define NUM_PRE_AUTH_KEY 16
 #define NUM_PMKID_CACHE NUM_PRE_AUTH_KEY
 
-/*
- *	WPA2
- */
-struct wlan_bssid_ex {
-	u32 Length;
-	unsigned char  MacAddress[6];
-	u8  Reserved[2];
-	struct ndis_802_11_ssid  Ssid;
-	u32 Privacy;
-	s32 Rssi;
-	enum NDIS_802_11_NETWORK_TYPE  NetworkTypeInUse;
-	struct NDIS_802_11_CONFIGURATION  Configuration;
-	enum NDIS_802_11_NETWORK_INFRASTRUCTURE  InfrastructureMode;
-	NDIS_802_11_RATES_EX  SupportedRates;
-	u32 IELength;
-	u8  IEs[MAX_IE_SZ];	/* (timestamp, beacon interval, and capability
-				 * information) */
-};
-
 #endif /* #ifndef WLAN_BSSDEF_H_ */
 
diff --git a/drivers/staging/rtl8723au/core/rtw_recv.c b/drivers/staging/rtl8723au/core/rtw_recv.c
index 274a4b6..ad0549c 100644
--- a/drivers/staging/rtl8723au/core/rtw_recv.c
+++ b/drivers/staging/rtl8723au/core/rtw_recv.c
@@ -1554,8 +1554,7 @@
 	ether_addr_copy(ptr + ETH_ALEN, pattrib->src);
 
 	if (!bsnaphdr) {
-		len = htons(len);
-		memcpy(ptr + 12, &len, 2);
+		put_unaligned_be16(len, ptr + 12);
 	}
 
 
diff --git a/drivers/staging/rtl8723au/core/rtw_security.c b/drivers/staging/rtl8723au/core/rtw_security.c
index af53c92..3d40bab 100644
--- a/drivers/staging/rtl8723au/core/rtw_security.c
+++ b/drivers/staging/rtl8723au/core/rtw_security.c
@@ -148,7 +148,7 @@
 		     struct xmit_frame *pxmitframe)
 {
 	/*  exclude ICV */
-	unsigned char crc[4];
+	__le32 crc;
 	struct arc4context mycontext;
 	int curfragnum, length, index;
 	u32 keylength;
@@ -186,18 +186,20 @@
 			length = pattrib->last_txcmdsz - pattrib->hdrlen -
 				pattrib->iv_len - pattrib->icv_len;
 
-			*((u32 *)crc) = cpu_to_le32(getcrc32(payload, length));
+			crc = cpu_to_le32(getcrc32(payload, length));
 
 			arcfour_init(&mycontext, wepkey, 3 + keylength);
 			arcfour_encrypt(&mycontext, payload, payload, length);
-			arcfour_encrypt(&mycontext, payload + length, crc, 4);
+			arcfour_encrypt(&mycontext, payload + length,
+					(char *)&crc, 4);
 		} else {
 			length = pxmitpriv->frag_len - pattrib->hdrlen -
 				pattrib->iv_len - pattrib->icv_len;
-			*((u32 *)crc) = cpu_to_le32(getcrc32(payload, length));
+			crc = cpu_to_le32(getcrc32(payload, length));
 			arcfour_init(&mycontext, wepkey, 3 + keylength);
 			arcfour_encrypt(&mycontext, payload, payload, length);
-			arcfour_encrypt(&mycontext, payload + length, crc, 4);
+			arcfour_encrypt(&mycontext, payload + length,
+					(char *)&crc, 4);
 
 			pframe += pxmitpriv->frag_len;
 			pframe = PTR_ALIGN(pframe, 4);
@@ -602,7 +604,7 @@
 	u32 pnh;
 	u8 rc4key[16];
 	u8 ttkey[16];
-	u8 crc[4];
+	__le32 crc;
 	u8 hw_hdr_offset = 0;
 	struct arc4context mycontext;
 	int curfragnum, length;
@@ -679,11 +681,12 @@
 				 "pattrib->iv_len =%x, pattrib->icv_len =%x\n",
 				 pattrib->iv_len,
 				 pattrib->icv_len);
-			*((u32 *)crc) = cpu_to_le32(getcrc32(payload, length));
+			crc = cpu_to_le32(getcrc32(payload, length));
 
 			arcfour_init(&mycontext, rc4key, 16);
 			arcfour_encrypt(&mycontext, payload, payload, length);
-			arcfour_encrypt(&mycontext, payload + length, crc, 4);
+			arcfour_encrypt(&mycontext, payload + length,
+					(char *)&crc, 4);
 
 		} else {
 			length = (pxmitpriv->frag_len -
@@ -691,10 +694,11 @@
 				  pattrib->iv_len -
 				  pattrib->icv_len);
 
-			*((u32 *)crc) = cpu_to_le32(getcrc32(payload, length));
+			crc = cpu_to_le32(getcrc32(payload, length));
 			arcfour_init(&mycontext, rc4key, 16);
 			arcfour_encrypt(&mycontext, payload, payload, length);
-			arcfour_encrypt(&mycontext, payload + length, crc, 4);
+			arcfour_encrypt(&mycontext, payload + length,
+					(char *)&crc, 4);
 
 			pframe += pxmitpriv->frag_len;
 			pframe  = PTR_ALIGN(pframe, 4);
diff --git a/drivers/staging/rtl8723au/hal/odm.c b/drivers/staging/rtl8723au/hal/odm.c
index f354f5e..6b9dbef 100644
--- a/drivers/staging/rtl8723au/hal/odm.c
+++ b/drivers/staging/rtl8723au/hal/odm.c
@@ -985,7 +985,7 @@
 			val32 = rtl8723au_read32(adapter, 0x874);
 			val32 |= pDM_PSTable->Reg874;
 			rtl8723au_write32(adapter, 0x874, val32);
-		
+
 			val32 = rtl8723au_read32(adapter, 0xc70);
 			val32 |= pDM_PSTable->RegC70;
 			rtl8723au_write32(adapter, 0xc70, val32);
diff --git a/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c b/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c
index 342dec3..a63c6cb 100644
--- a/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c
+++ b/drivers/staging/rtl8723au/hal/odm_RegConfig8723A.c
@@ -21,7 +21,7 @@
 	struct dm_odm_t *pDM_Odm,
 	u32					Addr,
 	u32					Data,
-  enum RF_RADIO_PATH     RF_PATH,
+	enum RF_RADIO_PATH     RF_PATH,
 	u32				    RegAddr
 	)
 {
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
index cb5076a..cf2388f 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
@@ -1838,7 +1838,7 @@
 
 static void rtl8723a_cal_txdesc_chksum(struct tx_desc *ptxdesc)
 {
-	u16 *usPtr = (u16 *) ptxdesc;
+	__le16 *usPtr = (__le16 *)ptxdesc;
 	u32 count = 16;		/*  (32 bytes / 2 bytes per XOR) => 16 times */
 	u32 index;
 	u16 checksum = 0;
@@ -1847,7 +1847,7 @@
 	ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
 
 	for (index = 0; index < count; index++)
-		checksum ^= le16_to_cpu(*(usPtr + index));
+		checksum ^= le16_to_cpu(usPtr[index]);
 
 	ptxdesc->txdw7 |= cpu_to_le32(checksum & 0x0000ffff);
 }
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index ee818b0..cdaa1ab 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -1121,11 +1121,10 @@
 
 #ifdef SUPPORT_MSXC
 		if ((buf[cur_addr_off + 8] == 0x10) ||
-			(buf[cur_addr_off + 8] == 0x13))
+			(buf[cur_addr_off + 8] == 0x13)) {
 #else
-		if (buf[cur_addr_off + 8] == 0x10)
+		if (buf[cur_addr_off + 8] == 0x10) {
 #endif
-		{
 			sys_info_addr = ((u32)buf[cur_addr_off + 0] << 24) |
 				((u32)buf[cur_addr_off + 1] << 16) |
 				((u32)buf[cur_addr_off + 2] << 8) |
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index a8d657b..d6c4982 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -246,11 +246,10 @@
 				}
 			}
 #ifdef SUPPORT_SD_LOCK
-			if (ptr[1] & 0x7D)
+			if (ptr[1] & 0x7D) {
 #else
-			if (ptr[1] & 0x7F)
+			if (ptr[1] & 0x7F) {
 #endif
-			{
 				dev_dbg(rtsx_dev(chip), "ptr[1]: 0x%02x\n",
 					ptr[1]);
 				rtsx_trace(chip);
@@ -3520,12 +3519,11 @@
 			if (chip->sd_io) {
 				rtsx_trace(chip);
 				return STATUS_FAIL;
-			} else {
-				retval = reset_mmc(chip);
-				if (retval != STATUS_SUCCESS) {
-					rtsx_trace(chip);
-					return STATUS_FAIL;
-				}
+			}
+			retval = reset_mmc(chip);
+			if (retval != STATUS_SUCCESS) {
+				rtsx_trace(chip);
+				return STATUS_FAIL;
 			}
 		}
 	}
@@ -4149,11 +4147,10 @@
 			}
 		}
 #ifdef SUPPORT_SD_LOCK
-		if (ptr[1] & 0x7D)
+		if (ptr[1] & 0x7D) {
 #else
-		if (ptr[1] & 0x7F)
+		if (ptr[1] & 0x7F) {
 #endif
-		{
 			rtsx_trace(chip);
 			return STATUS_FAIL;
 		}
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index a609f3e..8585970 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -2329,6 +2329,7 @@
 
 	if (!adapter->isp_initialized) {
 		unsigned long flags;
+
 		pshmem = (struct slic_shmem *)(unsigned long)
 			 adapter->phys_shmem;
 
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index f4975d2..5e6798e 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -268,7 +268,7 @@
 #endif
 
 
-	if (pInitParam->powerMode != 0 )
+	if (pInitParam->powerMode != 0)
 		pInitParam->powerMode = 0;
 	setPowerMode(pInitParam->powerMode);
 
@@ -464,17 +464,18 @@
 		RN = N * request;
 		quo = RN / input;
 		rem = RN % input;/* rem always small than 14318181 */
-		fl_quo = (rem * 10000 /input);
+		fl_quo = (rem * 10000 / input);
 
 		for (d = xcnt - 1; d >= 0; d--) {
 			X = xparm[d].value;
 			M = quo*X;
 			M += fl_quo * X / 10000;
 			/* round step */
-			M += (fl_quo*X % 10000)>5000?1:0;
+			M += (fl_quo*X % 10000) > 5000?1:0;
 			if (M < 256 && M > 0) {
 				unsigned int diff;
-				tmpClock = pll->inputFreq *M / N / X;
+
+				tmpClock = pll->inputFreq * M / N / X;
 				diff = absDiff(tmpClock, request_orig);
 				if (diff < miniDiff) {
 					pll->M = M;
@@ -599,9 +600,9 @@
        On returning a 32 bit number, the value can be applied to any PLL in the calling function.
     */
 	ulPllReg =
-	FIELD_SET(  0, PANEL_PLL_CTRL, BYPASS, OFF)
-	| FIELD_SET(  0, PANEL_PLL_CTRL, POWER,  ON)
-	| FIELD_SET(  0, PANEL_PLL_CTRL, INPUT,  OSC)
+	FIELD_SET(0, PANEL_PLL_CTRL, BYPASS, OFF)
+	| FIELD_SET(0, PANEL_PLL_CTRL, POWER,  ON)
+	| FIELD_SET(0, PANEL_PLL_CTRL, INPUT,  OSC)
 #ifndef VALIDATION_CHIP
 	| FIELD_VALUE(0, PANEL_PLL_CTRL, POD,    pPLL->POD)
 #endif
diff --git a/drivers/staging/sm750fb/ddk750_chip.h b/drivers/staging/sm750fb/ddk750_chip.h
index 4e030e8..6ff0436 100644
--- a/drivers/staging/sm750fb/ddk750_chip.h
+++ b/drivers/staging/sm750fb/ddk750_chip.h
@@ -8,8 +8,7 @@
 #include <linux/io.h>
 
 /* This is all the chips recognized by this library */
-typedef enum _logical_chip_type_t
-{
+typedef enum _logical_chip_type_t {
 	SM_UNKNOWN,
 	SM718,
 	SM750,
@@ -18,8 +17,7 @@
 logical_chip_type_t;
 
 
-typedef enum _clock_type_t
-{
+typedef enum _clock_type_t {
 	MXCLK_PLL,
 	PRIMARY_PLL,
 	SECONDARY_PLL,
@@ -28,8 +26,7 @@
 }
 clock_type_t;
 
-typedef struct _pll_value_t
-{
+typedef struct _pll_value_t {
 	clock_type_t clockType;
 	unsigned long inputFreq; /* Input clock frequency to the PLL */
 
@@ -42,8 +39,7 @@
 pll_value_t;
 
 /* input struct to initChipParam() function */
-typedef struct _initchip_param_t
-{
+typedef struct _initchip_param_t {
 	unsigned short powerMode;    /* Use power mode 0 or 1 */
 	unsigned short chipClock;    /**
 				      * Speed of main chip clock in MHz unit
diff --git a/drivers/staging/sm750fb/ddk750_display.c b/drivers/staging/sm750fb/ddk750_display.c
index a3e6720..8348113 100644
--- a/drivers/staging/sm750fb/ddk750_display.c
+++ b/drivers/staging/sm750fb/ddk750_display.c
@@ -15,16 +15,14 @@
 	cnt = 0;
 
 	/* Set the primary display control */
-	if (!ctrl)
-	{
+	if (!ctrl) {
 		ulDisplayCtrlReg = PEEK32(PANEL_DISPLAY_CTRL);
 		/* Turn on/off the Panel display control */
-		if (dispState)
-		{
+		if (dispState) {
 			/* Timing should be enabled first before enabling the plane
 			 * because changing at the same time does not guarantee that
 			 * the plane will also enabled or disabled.
-     	     */
+			 */
 			ulDisplayCtrlReg = FIELD_SET(ulDisplayCtrlReg,
 								PANEL_DISPLAY_CTRL, TIMING, ENABLE);
 			POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
@@ -45,16 +43,13 @@
 			 * until a few delay. Need to write
 			 * and read it a couple times
 			 */
-			do
-			{
+			do {
 				cnt++;
 				POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
-			} while((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) !=
+			} while ((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) !=
 					(ulDisplayCtrlReg & ~ulReservedBits));
 			printk("Set Panel Plane enbit:after tried %d times\n", cnt);
-		}
-		else
-		{
+		} else {
 			/* When turning off, there is no rule on the programming
 			 * sequence since whenever the clock is off, then it does not
 			 * matter whether the plane is enabled or disabled.
@@ -71,14 +66,11 @@
 			POKE32(PANEL_DISPLAY_CTRL, ulDisplayCtrlReg);
 		}
 
-	}
-	/* Set the secondary display control */
-	else
-	{
+	} else {
+		/* Set the secondary display control */
 		ulDisplayCtrlReg = PEEK32(CRT_DISPLAY_CTRL);
 
-		if (dispState)
-		{
+		if (dispState) {
 			/* Timing should be enabled first before enabling the plane because changing at the
 			   same time does not guarantee that the plane will also enabled or disabled.
 			   */
@@ -100,16 +92,13 @@
 				FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE) |
 				FIELD_SET(0, CRT_DISPLAY_CTRL, RESERVED_4_MASK, ENABLE);
 
-			do
-			{
+			do {
 				cnt++;
 				POKE32(CRT_DISPLAY_CTRL, ulDisplayCtrlReg);
-			} while((PEEK32(CRT_DISPLAY_CTRL) & ~ulReservedBits) !=
+			} while ((PEEK32(CRT_DISPLAY_CTRL) & ~ulReservedBits) !=
 					(ulDisplayCtrlReg & ~ulReservedBits));
 				printk("Set Crt Plane enbit:after tried %d times\n", cnt);
-		}
-		else
-		{
+		} else {
 			/* When turning off, there is no rule on the programming
 			 * sequence since whenever the clock is off, then it does not
 			 * matter whether the plane is enabled or disabled.
@@ -132,71 +121,60 @@
 static void waitNextVerticalSync(int ctrl, int delay)
 {
 	unsigned int status;
-	if(!ctrl){
+
+	if (!ctrl) {
 		/* primary controller */
 
-        /* Do not wait when the Primary PLL is off or display control is already off.
-	           This will prevent the software to wait forever. */
+		/* Do not wait when the Primary PLL is off or display control is already off.
+		   This will prevent the software to wait forever. */
 		if ((FIELD_GET(PEEK32(PANEL_PLL_CTRL), PANEL_PLL_CTRL, POWER) ==
 			 PANEL_PLL_CTRL_POWER_OFF) ||
 			(FIELD_GET(PEEK32(PANEL_DISPLAY_CTRL), PANEL_DISPLAY_CTRL, TIMING) ==
-			 PANEL_DISPLAY_CTRL_TIMING_DISABLE))
-		{
+			 PANEL_DISPLAY_CTRL_TIMING_DISABLE)) {
 			return;
 		}
 
-        while (delay-- > 0)
-        {
-            /* Wait for end of vsync. */
-            do
-            {
-                status = FIELD_GET(PEEK32(SYSTEM_CTRL),
-                                   SYSTEM_CTRL,
-                                   PANEL_VSYNC);
-            }
-            while (status == SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
+		while (delay-- > 0) {
+			/* Wait for end of vsync. */
+			do {
+				status = FIELD_GET(PEEK32(SYSTEM_CTRL),
+						   SYSTEM_CTRL,
+						   PANEL_VSYNC);
+			} while (status == SYSTEM_CTRL_PANEL_VSYNC_ACTIVE);
 
-            /* Wait for start of vsync. */
-            do
-            {
-                status = FIELD_GET(PEEK32(SYSTEM_CTRL),
-                                   SYSTEM_CTRL,
-                                   PANEL_VSYNC);
-            }
-            while (status == SYSTEM_CTRL_PANEL_VSYNC_INACTIVE);
-        }
+			/* Wait for start of vsync. */
+			do {
+				status = FIELD_GET(PEEK32(SYSTEM_CTRL),
+						   SYSTEM_CTRL,
+						   PANEL_VSYNC);
+			} while (status == SYSTEM_CTRL_PANEL_VSYNC_INACTIVE);
+		}
 
-	}else{
+	} else {
 
 		/* Do not wait when the Primary PLL is off or display control is already off.
 			   This will prevent the software to wait forever. */
 		if ((FIELD_GET(PEEK32(CRT_PLL_CTRL), CRT_PLL_CTRL, POWER) ==
 			 CRT_PLL_CTRL_POWER_OFF) ||
 			(FIELD_GET(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, TIMING) ==
-			 CRT_DISPLAY_CTRL_TIMING_DISABLE))
-		{
+			 CRT_DISPLAY_CTRL_TIMING_DISABLE)) {
 			return;
 		}
 
-		while (delay-- > 0)
-		{
+		while (delay-- > 0) {
 			/* Wait for end of vsync. */
-			do
-			{
+			do {
 				status = FIELD_GET(PEEK32(SYSTEM_CTRL),
 								   SYSTEM_CTRL,
 								   CRT_VSYNC);
-			}
-			while (status == SYSTEM_CTRL_CRT_VSYNC_ACTIVE);
+			} while (status == SYSTEM_CTRL_CRT_VSYNC_ACTIVE);
 
 			/* Wait for start of vsync. */
-			do
-			{
+			do {
 				status = FIELD_GET(PEEK32(SYSTEM_CTRL),
 								   SYSTEM_CTRL,
 								   CRT_VSYNC);
-			}
-			while (status == SYSTEM_CTRL_CRT_VSYNC_INACTIVE);
+			} while (status == SYSTEM_CTRL_CRT_VSYNC_INACTIVE);
 		}
 	}
 }
@@ -233,14 +211,15 @@
 void ddk750_setLogicalDispOut(disp_output_t output)
 {
 	unsigned int reg;
-	if(output & PNL_2_USAGE){
+
+	if (output & PNL_2_USAGE) {
 		/* set panel path controller select */
 		reg = PEEK32(PANEL_DISPLAY_CTRL);
 		reg = FIELD_VALUE(reg, PANEL_DISPLAY_CTRL, SELECT, (output & PNL_2_MASK)>>PNL_2_OFFSET);
 		POKE32(PANEL_DISPLAY_CTRL, reg);
 	}
 
-	if(output & CRT_2_USAGE){
+	if (output & CRT_2_USAGE) {
 		/* set crt path controller select */
 		reg = PEEK32(CRT_DISPLAY_CTRL);
 		reg = FIELD_VALUE(reg, CRT_DISPLAY_CTRL, SELECT, (output & CRT_2_MASK)>>CRT_2_OFFSET);
@@ -250,58 +229,57 @@
 
 	}
 
-	if(output & PRI_TP_USAGE){
+	if (output & PRI_TP_USAGE) {
 		/* set primary timing and plane en_bit */
 		setDisplayControl(0, (output&PRI_TP_MASK)>>PRI_TP_OFFSET);
 	}
 
-	if(output & SEC_TP_USAGE){
+	if (output & SEC_TP_USAGE) {
 		/* set secondary timing and plane en_bit*/
 		setDisplayControl(1, (output&SEC_TP_MASK)>>SEC_TP_OFFSET);
 	}
 
-	if(output & PNL_SEQ_USAGE){
+	if (output & PNL_SEQ_USAGE) {
 		/* set  panel sequence */
 		swPanelPowerSequence((output&PNL_SEQ_MASK)>>PNL_SEQ_OFFSET, 4);
 	}
 
-	if(output & DAC_USAGE)
+	if (output & DAC_USAGE)
 		setDAC((output & DAC_MASK)>>DAC_OFFSET);
 
-	if(output & DPMS_USAGE)
+	if (output & DPMS_USAGE)
 		ddk750_setDPMS((output & DPMS_MASK) >> DPMS_OFFSET);
 }
 
 
 int ddk750_initDVIDisp(void)
 {
-    /* Initialize DVI. If the dviInit fail and the VendorID or the DeviceID are
-       not zeroed, then set the failure flag. If it is zeroe, it might mean
-       that the system is in Dual CRT Monitor configuration. */
+	/* Initialize DVI. If the dviInit fail and the VendorID or the DeviceID are
+	   not zeroed, then set the failure flag. If it is zeroe, it might mean
+	   that the system is in Dual CRT Monitor configuration. */
 
-    /* De-skew enabled with default 111b value.
-       This will fix some artifacts problem in some mode on board 2.2.
-       Somehow this fix does not affect board 2.1.
-     */
-    if ((dviInit(1,  /* Select Rising Edge */
-                1,  /* Select 24-bit bus */
-                0,  /* Select Single Edge clock */
-                1,  /* Enable HSync as is */
-                1,  /* Enable VSync as is */
-                1,  /* Enable De-skew */
-                7,  /* Set the de-skew setting to maximum setup */
-                1,  /* Enable continuous Sync */
-                1,  /* Enable PLL Filter */
-                4   /* Use the recommended value for PLL Filter value */
-        ) != 0) && (dviGetVendorID() != 0x0000) && (dviGetDeviceID() != 0x0000))
-    {
-        return (-1);
-    }
+	/* De-skew enabled with default 111b value.
+	   This will fix some artifacts problem in some mode on board 2.2.
+	   Somehow this fix does not affect board 2.1.
+	 */
+	if ((dviInit(1,  /* Select Rising Edge */
+		     1,  /* Select 24-bit bus */
+		     0,  /* Select Single Edge clock */
+		     1,  /* Enable HSync as is */
+		     1,  /* Enable VSync as is */
+		     1,  /* Enable De-skew */
+		     7,  /* Set the de-skew setting to maximum setup */
+		     1,  /* Enable continuous Sync */
+		     1,  /* Enable PLL Filter */
+		     4   /* Use the recommended value for PLL Filter value */
+		     ) != 0) && (dviGetVendorID() != 0x0000) && (dviGetDeviceID() != 0x0000)) {
+		return (-1);
+	}
 
-    /* TODO: Initialize other display component */
+	/* TODO: Initialize other display component */
 
-    /* Success */
-    return 0;
+	/* Success */
+	return 0;
 
 }
 
diff --git a/drivers/staging/sm750fb/ddk750_display.h b/drivers/staging/sm750fb/ddk750_display.h
index ae0f84c..abccf84 100644
--- a/drivers/staging/sm750fb/ddk750_display.h
+++ b/drivers/staging/sm750fb/ddk750_display.h
@@ -8,7 +8,7 @@
 #define PNL_2_OFFSET 0
 #define PNL_2_MASK (3 << PNL_2_OFFSET)
 #define PNL_2_USAGE	(PNL_2_MASK << 16)
-#define PNL_2_PRI 	((0 << PNL_2_OFFSET)|PNL_2_USAGE)
+#define PNL_2_PRI	((0 << PNL_2_OFFSET)|PNL_2_USAGE)
 #define PNL_2_SEC	((2 << PNL_2_OFFSET)|PNL_2_USAGE)
 
 
@@ -46,7 +46,7 @@
 	0: both off
 */
 #define SEC_TP_OFFSET 5
-#define SEC_TP_MASK (1<< SEC_TP_OFFSET)
+#define SEC_TP_MASK (1 << SEC_TP_OFFSET)
 #define SEC_TP_USAGE (SEC_TP_MASK << 16)
 #define SEC_TP_ON  ((0x1 << SEC_TP_OFFSET)|SEC_TP_USAGE)
 #define SEC_TP_OFF ((0x0 << SEC_TP_OFFSET)|SEC_TP_USAGE)
@@ -67,7 +67,7 @@
 #define DAC_OFFSET 7
 #define DAC_MASK (1 << DAC_OFFSET)
 #define DAC_USAGE (DAC_MASK << 16)
-#define DAC_ON ((0x0<< DAC_OFFSET)|DAC_USAGE)
+#define DAC_ON ((0x0 << DAC_OFFSET)|DAC_USAGE)
 #define DAC_OFF ((0x1 << DAC_OFFSET)|DAC_USAGE)
 
 /* DPMS only affect D-SUB head
@@ -86,8 +86,7 @@
 	CRT means crt path DSUB
 */
 #if 0
-typedef enum _disp_output_t
-{
+typedef enum _disp_output_t {
 	NO_DISPLAY = DPMS_OFF,
 
 	LCD1_PRI = PNL_2_PRI|PRI_TP_ON|PNL_SEQ_ON|DPMS_OFF|DAC_ON,
@@ -129,7 +128,7 @@
 }
 disp_output_t;
 #else
-typedef enum _disp_output_t{
+typedef enum _disp_output_t {
 	do_LCD1_PRI = PNL_2_PRI|PRI_TP_ON|PNL_SEQ_ON|DAC_ON,
 	do_LCD1_SEC = PNL_2_SEC|SEC_TP_ON|PNL_SEQ_ON|DAC_ON,
 #if 0
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index b2bf7e6..a7a2351 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -1,4 +1,4 @@
-#define USE_DVICHIP 
+#define USE_DVICHIP
 #ifdef USE_DVICHIP
 #include "ddk750_help.h"
 #include "ddk750_reg.h"
@@ -9,47 +9,46 @@
 /* This global variable contains all the supported driver and its corresponding
    function API. Please set the function pointer to NULL whenever the function
    is not supported. */
-static dvi_ctrl_device_t g_dcftSupportedDviController[] =
-{
+static dvi_ctrl_device_t g_dcftSupportedDviController[] = {
 #ifdef DVI_CTRL_SII164
-    {
-        .pfnInit = sii164InitChip,
-        .pfnGetVendorId = sii164GetVendorID,
-        .pfnGetDeviceId = sii164GetDeviceID,
+	{
+		.pfnInit = sii164InitChip,
+		.pfnGetVendorId = sii164GetVendorID,
+		.pfnGetDeviceId = sii164GetDeviceID,
 #ifdef SII164_FULL_FUNCTIONS
-        .pfnResetChip = sii164ResetChip,
-        .pfnGetChipString = sii164GetChipString,
-        .pfnSetPower = sii164SetPower,
-        .pfnEnableHotPlugDetection = sii164EnableHotPlugDetection,
-        .pfnIsConnected = sii164IsConnected,
-        .pfnCheckInterrupt = sii164CheckInterrupt,
-        .pfnClearInterrupt = sii164ClearInterrupt,
+		.pfnResetChip = sii164ResetChip,
+		.pfnGetChipString = sii164GetChipString,
+		.pfnSetPower = sii164SetPower,
+		.pfnEnableHotPlugDetection = sii164EnableHotPlugDetection,
+		.pfnIsConnected = sii164IsConnected,
+		.pfnCheckInterrupt = sii164CheckInterrupt,
+		.pfnClearInterrupt = sii164ClearInterrupt,
 #endif
-    },
+	},
 #endif
 };
 
 
 int dviInit(
-    unsigned char edgeSelect,
-    unsigned char busSelect,
-    unsigned char dualEdgeClkSelect,
-    unsigned char hsyncEnable,
-    unsigned char vsyncEnable,
-    unsigned char deskewEnable,
-    unsigned char deskewSetting,
-    unsigned char continuousSyncEnable,
-    unsigned char pllFilterEnable,
-    unsigned char pllFilterValue
+	unsigned char edgeSelect,
+	unsigned char busSelect,
+	unsigned char dualEdgeClkSelect,
+	unsigned char hsyncEnable,
+	unsigned char vsyncEnable,
+	unsigned char deskewEnable,
+	unsigned char deskewSetting,
+	unsigned char continuousSyncEnable,
+	unsigned char pllFilterEnable,
+	unsigned char pllFilterValue
 			)
 {
 	dvi_ctrl_device_t *pCurrentDviCtrl;
+
 	pCurrentDviCtrl = g_dcftSupportedDviController;
-	if(pCurrentDviCtrl->pfnInit != NULL)
-	{
+	if (pCurrentDviCtrl->pfnInit != NULL) {
 		return pCurrentDviCtrl->pfnInit(edgeSelect, busSelect, dualEdgeClkSelect, hsyncEnable,
-                              vsyncEnable, deskewEnable, deskewSetting, continuousSyncEnable,
-                              pllFilterEnable, pllFilterValue);
+						vsyncEnable, deskewEnable, deskewSetting, continuousSyncEnable,
+						pllFilterEnable, pllFilterValue);
 	}
 	return -1; /* error */
 }
@@ -64,13 +63,13 @@
  */
 unsigned short dviGetVendorID(void)
 {
-    dvi_ctrl_device_t *pCurrentDviCtrl;
+	dvi_ctrl_device_t *pCurrentDviCtrl;
 
-    pCurrentDviCtrl = g_dcftSupportedDviController;
-    if (pCurrentDviCtrl != (dvi_ctrl_device_t *)0)
-        return pCurrentDviCtrl->pfnGetVendorId();
+	pCurrentDviCtrl = g_dcftSupportedDviController;
+	if (pCurrentDviCtrl != (dvi_ctrl_device_t *)0)
+		return pCurrentDviCtrl->pfnGetVendorId();
 
-    return 0x0000;
+	return 0x0000;
 }
 
 
@@ -83,13 +82,13 @@
  */
 unsigned short dviGetDeviceID(void)
 {
-    dvi_ctrl_device_t *pCurrentDviCtrl;
+	dvi_ctrl_device_t *pCurrentDviCtrl;
 
 	pCurrentDviCtrl = g_dcftSupportedDviController;
-    if (pCurrentDviCtrl != (dvi_ctrl_device_t *)0)
-        return pCurrentDviCtrl->pfnGetDeviceId();
+	if (pCurrentDviCtrl != (dvi_ctrl_device_t *)0)
+		return pCurrentDviCtrl->pfnGetDeviceId();
 
-    return 0x0000;
+	return 0x0000;
 }
 
 #endif
diff --git a/drivers/staging/sm750fb/ddk750_dvi.h b/drivers/staging/sm750fb/ddk750_dvi.h
index 83bbd6d..e1d4c9a 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.h
+++ b/drivers/staging/sm750fb/ddk750_dvi.h
@@ -26,8 +26,7 @@
 typedef void (*PFN_DVICTRL_CLEARINTERRUPT)(void);
 
 /* Structure to hold all the function pointer to the DVI Controller. */
-typedef struct _dvi_ctrl_device_t
-{
+typedef struct _dvi_ctrl_device_t {
 	PFN_DVICTRL_INIT		pfnInit;
 	PFN_DVICTRL_RESETCHIP		pfnResetChip;
 	PFN_DVICTRL_GETCHIPSTRING	pfnGetChipString;
diff --git a/drivers/staging/sm750fb/ddk750_help.c b/drivers/staging/sm750fb/ddk750_help.c
index 1adcafc..9637dd3 100644
--- a/drivers/staging/sm750fb/ddk750_help.c
+++ b/drivers/staging/sm750fb/ddk750_help.c
@@ -1,8 +1,8 @@
 #include "ddk750_help.h"
 
-void __iomem *mmio750 = NULL;
-char revId750 = 0;
-unsigned short devId750 = 0;
+void __iomem *mmio750;
+char revId750;
+unsigned short devId750;
 
 /* after driver mapped io registers, use this function first */
 void ddk750_set_mmio(void __iomem *addr, unsigned short devId, char revId)
@@ -10,7 +10,7 @@
 	mmio750 = addr;
 	devId750 = devId;
 	revId750 = revId;
-	if(revId == 0xfe)
+	if (revId == 0xfe)
 		printk("found sm750le\n");
 }
 
diff --git a/drivers/staging/sm750fb/ddk750_help.h b/drivers/staging/sm750fb/ddk750_help.h
index 4285b05..3b06aed 100644
--- a/drivers/staging/sm750fb/ddk750_help.h
+++ b/drivers/staging/sm750fb/ddk750_help.h
@@ -12,8 +12,8 @@
 #if 0
 /* if 718 big endian turned on,be aware that don't use this driver for general use,only for ppc big-endian */
 #warning "big endian on target cpu and enable nature big endian support of 718 capability !"
-#define PEEK32(addr)  			__raw_readl(mmio750 + addr)
-#define POKE32(addr, data) 		__raw_writel(data, mmio750 + addr)
+#define PEEK32(addr)			__raw_readl(mmio750 + addr)
+#define POKE32(addr, data)		__raw_writel(data, mmio750 + addr)
 #else /* software control endianness */
 #define PEEK32(addr) readl(addr + mmio750)
 #define POKE32(addr, data) writel(data, addr + mmio750)
diff --git a/drivers/staging/sm750fb/ddk750_hwi2c.c b/drivers/staging/sm750fb/ddk750_hwi2c.c
index 7826376..5ddac43 100644
--- a/drivers/staging/sm750fb/ddk750_hwi2c.c
+++ b/drivers/staging/sm750fb/ddk750_hwi2c.c
@@ -10,70 +10,70 @@
 
 
 int hwI2CInit(
-    unsigned char busSpeedMode
+unsigned char busSpeedMode
 )
 {
-    unsigned int value;
+	unsigned int value;
 
-    /* Enable GPIO 30 & 31 as IIC clock & data */
+	/* Enable GPIO 30 & 31 as IIC clock & data */
 	value = PEEK32(GPIO_MUX);
 
-    value = FIELD_SET(value, GPIO_MUX, 30, I2C) |
-			FIELD_SET(0, GPIO_MUX, 31, I2C);
+	value = FIELD_SET(value, GPIO_MUX, 30, I2C) |
+			  FIELD_SET(0, GPIO_MUX, 31, I2C);
 	POKE32(GPIO_MUX, value);
 
-    /* Enable Hardware I2C power.
-       TODO: Check if we need to enable GPIO power?
-     */
-    enableI2C(1);
+	/* Enable Hardware I2C power.
+	 TODO: Check if we need to enable GPIO power?
+	 */
+	enableI2C(1);
 
-    /* Enable the I2C Controller and set the bus speed mode */
-    value = PEEK32(I2C_CTRL);
-    if (busSpeedMode == 0)
-        value = FIELD_SET(value, I2C_CTRL, MODE, STANDARD);
-    else
-        value = FIELD_SET(value, I2C_CTRL, MODE, FAST);
-    value = FIELD_SET(value, I2C_CTRL, EN, ENABLE);
-    POKE32(I2C_CTRL, value);
+	/* Enable the I2C Controller and set the bus speed mode */
+	value = PEEK32(I2C_CTRL);
+	if (busSpeedMode == 0)
+		value = FIELD_SET(value, I2C_CTRL, MODE, STANDARD);
+	else
+		value = FIELD_SET(value, I2C_CTRL, MODE, FAST);
+	value = FIELD_SET(value, I2C_CTRL, EN, ENABLE);
+	POKE32(I2C_CTRL, value);
 
-    return 0;
+	return 0;
 }
 
 
 void hwI2CClose(void)
 {
-    unsigned int value;
+	unsigned int value;
 
-    /* Disable I2C controller */
-    value = PEEK32(I2C_CTRL);
-    value = FIELD_SET(value, I2C_CTRL, EN, DISABLE);
-    POKE32(I2C_CTRL, value);
+	/* Disable I2C controller */
+	value = PEEK32(I2C_CTRL);
+	value = FIELD_SET(value, I2C_CTRL, EN, DISABLE);
+	POKE32(I2C_CTRL, value);
 
-    /* Disable I2C Power */
-    enableI2C(0);
+	/* Disable I2C Power */
+	enableI2C(0);
 
-    /* Set GPIO 30 & 31 back as GPIO pins */
-    value = PEEK32(GPIO_MUX);
-    value = FIELD_SET(value, GPIO_MUX, 30, GPIO);
-    value = FIELD_SET(value, GPIO_MUX, 31, GPIO);
-    POKE32(GPIO_MUX, value);
+	/* Set GPIO 30 & 31 back as GPIO pins */
+	value = PEEK32(GPIO_MUX);
+	value = FIELD_SET(value, GPIO_MUX, 30, GPIO);
+	value = FIELD_SET(value, GPIO_MUX, 31, GPIO);
+	POKE32(GPIO_MUX, value);
 }
 
 
 static long hwI2CWaitTXDone(void)
 {
-    unsigned int timeout;
+	unsigned int timeout;
 
-    /* Wait until the transfer is completed. */
-    timeout = HWI2C_WAIT_TIMEOUT;
+	/* Wait until the transfer is completed. */
+	timeout = HWI2C_WAIT_TIMEOUT;
 	while ((FIELD_GET(PEEK32(I2C_STATUS), I2C_STATUS, TX) != I2C_STATUS_TX_COMPLETED) &&
-           (timeout != 0))
+	       (timeout != 0))
 		timeout--;
 
 	if (timeout == 0)
-	    return (-1);
+		return (-1);
 
-    return 0;
+	return 0;
 }
 
 
@@ -91,53 +91,52 @@
  *      Total number of bytes those are actually written.
  */
 static unsigned int hwI2CWriteData(
-    unsigned char deviceAddress,
-    unsigned int length,
-    unsigned char *pBuffer
+	unsigned char deviceAddress,
+	unsigned int length,
+	unsigned char *pBuffer
 )
 {
-    unsigned char count, i;
-    unsigned int totalBytes = 0;
+	unsigned char count, i;
+	unsigned int totalBytes = 0;
 
-    /* Set the Device Address */
-    POKE32(I2C_SLAVE_ADDRESS, deviceAddress & ~0x01);
+	/* Set the Device Address */
+	POKE32(I2C_SLAVE_ADDRESS, deviceAddress & ~0x01);
 
-    /* Write data.
-     * Note:
-     *      Only 16 byte can be accessed per i2c start instruction.
-     */
-    do
-    {
-        /* Reset I2C by writing 0 to I2C_RESET register to clear the previous status. */
-        POKE32(I2C_RESET, 0);
+	/* Write data.
+	 * Note:
+	 *      Only 16 byte can be accessed per i2c start instruction.
+	 */
+	do {
+		/* Reset I2C by writing 0 to I2C_RESET register to clear the previous status. */
+		POKE32(I2C_RESET, 0);
 
-        /* Set the number of bytes to be written */
-        if (length < MAX_HWI2C_FIFO)
-            count = length - 1;
-        else
-            count = MAX_HWI2C_FIFO - 1;
-        POKE32(I2C_BYTE_COUNT, count);
+		/* Set the number of bytes to be written */
+		if (length < MAX_HWI2C_FIFO)
+			count = length - 1;
+		else
+			count = MAX_HWI2C_FIFO - 1;
+		POKE32(I2C_BYTE_COUNT, count);
 
-        /* Move the data to the I2C data register */
-	    for (i = 0; i <= count; i++)
-            POKE32(I2C_DATA0 + i, *pBuffer++);
+		/* Move the data to the I2C data register */
+		for (i = 0; i <= count; i++)
+			POKE32(I2C_DATA0 + i, *pBuffer++);
 
-        /* Start the I2C */
-        POKE32(I2C_CTRL, FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START));
+		/* Start the I2C */
+		POKE32(I2C_CTRL, FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START));
 
-        /* Wait until the transfer is completed. */
-        if (hwI2CWaitTXDone() != 0)
-            break;
+		/* Wait until the transfer is completed. */
+		if (hwI2CWaitTXDone() != 0)
+			break;
 
-        /* Substract length */
-        length -= (count + 1);
+		/* Substract length */
+		length -= (count + 1);
 
-        /* Total byte written */
-        totalBytes += (count + 1);
+		/* Total byte written */
+		totalBytes += (count + 1);
 
-    } while (length > 0);
+	} while (length > 0);
 
-    return totalBytes;
+	return totalBytes;
 }
 
 
@@ -158,53 +157,52 @@
  *      Total number of actual bytes read from the slave device
  */
 static unsigned int hwI2CReadData(
-    unsigned char deviceAddress,
-    unsigned int length,
-    unsigned char *pBuffer
+	unsigned char deviceAddress,
+	unsigned int length,
+	unsigned char *pBuffer
 )
 {
-    unsigned char count, i;
-    unsigned int totalBytes = 0;
+	unsigned char count, i;
+	unsigned int totalBytes = 0;
 
-    /* Set the Device Address */
-    POKE32(I2C_SLAVE_ADDRESS, deviceAddress | 0x01);
+	/* Set the Device Address */
+	POKE32(I2C_SLAVE_ADDRESS, deviceAddress | 0x01);
 
-    /* Read data and save them to the buffer.
-     * Note:
-     *      Only 16 byte can be accessed per i2c start instruction.
-     */
-    do
-    {
-        /* Reset I2C by writing 0 to I2C_RESET register to clear all the status. */
-        POKE32(I2C_RESET, 0);
+	/* Read data and save them to the buffer.
+	 * Note:
+	 *      Only 16 byte can be accessed per i2c start instruction.
+	 */
+	do {
+		/* Reset I2C by writing 0 to I2C_RESET register to clear all the status. */
+		POKE32(I2C_RESET, 0);
 
-        /* Set the number of bytes to be read */
-        if (length <= MAX_HWI2C_FIFO)
-            count = length - 1;
-        else
-            count = MAX_HWI2C_FIFO - 1;
-        POKE32(I2C_BYTE_COUNT, count);
+		/* Set the number of bytes to be read */
+		if (length <= MAX_HWI2C_FIFO)
+			count = length - 1;
+		else
+			count = MAX_HWI2C_FIFO - 1;
+		POKE32(I2C_BYTE_COUNT, count);
 
-        /* Start the I2C */
-        POKE32(I2C_CTRL, FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START));
+		/* Start the I2C */
+		POKE32(I2C_CTRL, FIELD_SET(PEEK32(I2C_CTRL), I2C_CTRL, CTRL, START));
 
-        /* Wait until transaction done. */
-        if (hwI2CWaitTXDone() != 0)
-            break;
+		/* Wait until transaction done. */
+		if (hwI2CWaitTXDone() != 0)
+			break;
 
-        /* Save the data to the given buffer */
-        for (i = 0; i <= count; i++)
-		    *pBuffer++ = PEEK32(I2C_DATA0 + i);
+		/* Save the data to the given buffer */
+		for (i = 0; i <= count; i++)
+			*pBuffer++ = PEEK32(I2C_DATA0 + i);
 
-        /* Substract length by 16 */
-        length -= (count + 1);
+		/* Substract length by 16 */
+		length -= (count + 1);
 
-        /* Number of bytes read. */
-        totalBytes += (count + 1);
+		/* Number of bytes read. */
+		totalBytes += (count + 1);
 
-    } while (length > 0);
+	} while (length > 0);
 
-    return totalBytes;
+	return totalBytes;
 }
 
 
@@ -222,16 +220,16 @@
  *      Register value
  */
 unsigned char hwI2CReadReg(
-    unsigned char deviceAddress,
-    unsigned char registerIndex
+	unsigned char deviceAddress,
+	unsigned char registerIndex
 )
 {
-    unsigned char value = (0xFF);
+	unsigned char value = (0xFF);
 
-    if (hwI2CWriteData(deviceAddress, 1, &registerIndex) == 1)
-        hwI2CReadData(deviceAddress, 1, &value);
+	if (hwI2CWriteData(deviceAddress, 1, &registerIndex) == 1)
+		hwI2CReadData(deviceAddress, 1, &value);
 
-    return value;
+	return value;
 }
 
 
@@ -252,19 +250,19 @@
  *         -1   - Fail
  */
 int hwI2CWriteReg(
-    unsigned char deviceAddress,
-    unsigned char registerIndex,
-    unsigned char data
+	unsigned char deviceAddress,
+	unsigned char registerIndex,
+	unsigned char data
 )
 {
-    unsigned char value[2];
+	unsigned char value[2];
 
-    value[0] = registerIndex;
-    value[1] = data;
-    if (hwI2CWriteData(deviceAddress, 2, value) == 2)
-        return 0;
+	value[0] = registerIndex;
+	value[1] = data;
+	if (hwI2CWriteData(deviceAddress, 2, value) == 2)
+		return 0;
 
-    return (-1);
+	return (-1);
 }
 
 
diff --git a/drivers/staging/sm750fb/ddk750_mode.c b/drivers/staging/sm750fb/ddk750_mode.c
index 74313ff..2399b17 100644
--- a/drivers/staging/sm750fb/ddk750_mode.c
+++ b/drivers/staging/sm750fb/ddk750_mode.c
@@ -20,54 +20,54 @@
 	x = pModeParam->horizontal_display_end;
 	y = pModeParam->vertical_display_end;
 
-    /* SM750LE has to set up the top-left and bottom-right
-       registers as well.
-       Note that normal SM750/SM718 only use those two register for
-       auto-centering mode.
-    */
-    POKE32(CRT_AUTO_CENTERING_TL,
-      FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, TOP, 0)
-    | FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, LEFT, 0));
+	/* SM750LE has to set up the top-left and bottom-right
+	   registers as well.
+	   Note that normal SM750/SM718 only use those two register for
+	   auto-centering mode.
+	 */
+	POKE32(CRT_AUTO_CENTERING_TL,
+	FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, TOP, 0)
+	| FIELD_VALUE(0, CRT_AUTO_CENTERING_TL, LEFT, 0));
 
-    POKE32(CRT_AUTO_CENTERING_BR,
-      FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, BOTTOM, y-1)
-    | FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, RIGHT, x-1));
+	POKE32(CRT_AUTO_CENTERING_BR,
+	FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, BOTTOM, y-1)
+	| FIELD_VALUE(0, CRT_AUTO_CENTERING_BR, RIGHT, x-1));
 
-    /* Assume common fields in dispControl have been properly set before
-       calling this function.
-       This function only sets the extra fields in dispControl.
-    */
+	/* Assume common fields in dispControl have been properly set before
+	   calling this function.
+	   This function only sets the extra fields in dispControl.
+	 */
 
 	/* Clear bit 29:27 of display control register */
-    dispControl &= FIELD_CLEAR(CRT_DISPLAY_CTRL, CLK);
+	dispControl &= FIELD_CLEAR(CRT_DISPLAY_CTRL, CLK);
 
 	/* Set bit 29:27 of display control register for the right clock */
 	/* Note that SM750LE only need to supported 7 resoluitons. */
-	if ( x == 800 && y == 600 )
-    	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL41);
+	if (x == 800 && y == 600)
+		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL41);
 	else if (x == 1024 && y == 768)
-    	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL65);
+		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL65);
 	else if (x == 1152 && y == 864)
-    	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80);
+		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80);
 	else if (x == 1280 && y == 768)
-    	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80);
+		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL80);
 	else if (x == 1280 && y == 720)
-    	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL74);
+		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL74);
 	else if (x == 1280 && y == 960)
-    	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108);
+		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108);
 	else if (x == 1280 && y == 1024)
-    	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108);
+		dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL108);
 	else /* default to VGA clock */
-    	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL25);
+	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLK, PLL25);
 
 	/* Set bit 25:24 of display controller */
-    dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CRTSELECT, CRT);
-    dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, RGBBIT, 24BIT);
+	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CRTSELECT, CRT);
+	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, RGBBIT, 24BIT);
 
-    /* Set bit 14 of display controller */
-    dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLOCK_PHASE, ACTIVE_LOW);
+	/* Set bit 14 of display controller */
+	dispControl = FIELD_SET(dispControl, CRT_DISPLAY_CTRL, CLOCK_PHASE, ACTIVE_LOW);
 
-    POKE32(CRT_DISPLAY_CTRL, dispControl);
+	POKE32(CRT_DISPLAY_CTRL, dispControl);
 
 	return dispControl;
 }
@@ -80,25 +80,25 @@
 	int ret = 0;
 	int cnt = 0;
 	unsigned int ulTmpValue, ulReg;
-	if(pll->clockType == SECONDARY_PLL)
-	{
+
+	if (pll->clockType == SECONDARY_PLL) {
 		/* programe secondary pixel clock */
 		POKE32(CRT_PLL_CTRL, formatPllReg(pll));
-        POKE32(CRT_HORIZONTAL_TOTAL,
-              FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1)
-            | FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1));
+		POKE32(CRT_HORIZONTAL_TOTAL,
+		FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1)
+		| FIELD_VALUE(0, CRT_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1));
 
-        POKE32(CRT_HORIZONTAL_SYNC,
-              FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width)
-            | FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1));
+		POKE32(CRT_HORIZONTAL_SYNC,
+		FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width)
+		| FIELD_VALUE(0, CRT_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1));
 
-        POKE32(CRT_VERTICAL_TOTAL,
-              FIELD_VALUE(0, CRT_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1)
-            | FIELD_VALUE(0, CRT_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1));
+		POKE32(CRT_VERTICAL_TOTAL,
+		FIELD_VALUE(0, CRT_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1)
+		| FIELD_VALUE(0, CRT_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1));
 
-        POKE32(CRT_VERTICAL_SYNC,
-              FIELD_VALUE(0, CRT_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height)
-            | FIELD_VALUE(0, CRT_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1));
+		POKE32(CRT_VERTICAL_SYNC,
+		FIELD_VALUE(0, CRT_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height)
+		| FIELD_VALUE(0, CRT_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1));
 
 
 		ulTmpValue = FIELD_VALUE(0, CRT_DISPLAY_CTRL, VSYNC_PHASE, pModeParam->vertical_sync_polarity)|
@@ -107,9 +107,9 @@
 					  FIELD_SET(0, CRT_DISPLAY_CTRL, PLANE, ENABLE);
 
 
-		if(getChipType() == SM750LE){
+		if (getChipType() == SM750LE) {
 			displayControlAdjust_SM750LE(pModeParam, ulTmpValue);
-		}else{
+		} else {
 			ulReg = PEEK32(CRT_DISPLAY_CTRL)
 					& FIELD_CLEAR(CRT_DISPLAY_CTRL, VSYNC_PHASE)
 					& FIELD_CLEAR(CRT_DISPLAY_CTRL, HSYNC_PHASE)
@@ -119,45 +119,44 @@
 			 POKE32(CRT_DISPLAY_CTRL, ulTmpValue|ulReg);
 		}
 
-	}
-	else if(pll->clockType == PRIMARY_PLL)
-	{
+	} else if (pll->clockType == PRIMARY_PLL) {
 		unsigned int ulReservedBits;
+
 		POKE32(PANEL_PLL_CTRL, formatPllReg(pll));
 
-        POKE32(PANEL_HORIZONTAL_TOTAL,
-              FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1)
-            | FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1));
+		POKE32(PANEL_HORIZONTAL_TOTAL,
+		FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, TOTAL, pModeParam->horizontal_total - 1)
+		| FIELD_VALUE(0, PANEL_HORIZONTAL_TOTAL, DISPLAY_END, pModeParam->horizontal_display_end - 1));
 
-        POKE32(PANEL_HORIZONTAL_SYNC,
-              FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width)
-            | FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1));
+		POKE32(PANEL_HORIZONTAL_SYNC,
+		FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, WIDTH, pModeParam->horizontal_sync_width)
+		| FIELD_VALUE(0, PANEL_HORIZONTAL_SYNC, START, pModeParam->horizontal_sync_start - 1));
 
-        POKE32(PANEL_VERTICAL_TOTAL,
-              FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1)
-            | FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1));
+		POKE32(PANEL_VERTICAL_TOTAL,
+		FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, TOTAL, pModeParam->vertical_total - 1)
+			| FIELD_VALUE(0, PANEL_VERTICAL_TOTAL, DISPLAY_END, pModeParam->vertical_display_end - 1));
 
-        POKE32(PANEL_VERTICAL_SYNC,
-              FIELD_VALUE(0, PANEL_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height)
-            | FIELD_VALUE(0, PANEL_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1));
+		POKE32(PANEL_VERTICAL_SYNC,
+		FIELD_VALUE(0, PANEL_VERTICAL_SYNC, HEIGHT, pModeParam->vertical_sync_height)
+		| FIELD_VALUE(0, PANEL_VERTICAL_SYNC, START, pModeParam->vertical_sync_start - 1));
 
 		ulTmpValue = FIELD_VALUE(0, PANEL_DISPLAY_CTRL, VSYNC_PHASE, pModeParam->vertical_sync_polarity)|
-					FIELD_VALUE(0, PANEL_DISPLAY_CTRL, HSYNC_PHASE, pModeParam->horizontal_sync_polarity)|
-					FIELD_VALUE(0, PANEL_DISPLAY_CTRL, CLOCK_PHASE, pModeParam->clock_phase_polarity)|
-					FIELD_SET(0, PANEL_DISPLAY_CTRL, TIMING, ENABLE)|
-					FIELD_SET(0, PANEL_DISPLAY_CTRL, PLANE, ENABLE);
+			     FIELD_VALUE(0, PANEL_DISPLAY_CTRL, HSYNC_PHASE, pModeParam->horizontal_sync_polarity)|
+			     FIELD_VALUE(0, PANEL_DISPLAY_CTRL, CLOCK_PHASE, pModeParam->clock_phase_polarity)|
+			     FIELD_SET(0, PANEL_DISPLAY_CTRL, TIMING, ENABLE)|
+			     FIELD_SET(0, PANEL_DISPLAY_CTRL, PLANE, ENABLE);
 
-        ulReservedBits = FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_1_MASK, ENABLE) |
-                         FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_2_MASK, ENABLE) |
-                         FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE)|
-                         FIELD_SET(0, PANEL_DISPLAY_CTRL, VSYNC, ACTIVE_LOW);
+		ulReservedBits = FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_1_MASK, ENABLE) |
+				 FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_2_MASK, ENABLE) |
+				 FIELD_SET(0, PANEL_DISPLAY_CTRL, RESERVED_3_MASK, ENABLE)|
+				 FIELD_SET(0, PANEL_DISPLAY_CTRL, VSYNC, ACTIVE_LOW);
 
-        ulReg = (PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits)
-              & FIELD_CLEAR(PANEL_DISPLAY_CTRL, CLOCK_PHASE)
-              & FIELD_CLEAR(PANEL_DISPLAY_CTRL, VSYNC_PHASE)
-              & FIELD_CLEAR(PANEL_DISPLAY_CTRL, HSYNC_PHASE)
-              & FIELD_CLEAR(PANEL_DISPLAY_CTRL, TIMING)
-              & FIELD_CLEAR(PANEL_DISPLAY_CTRL, PLANE);
+		ulReg = (PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits)
+			& FIELD_CLEAR(PANEL_DISPLAY_CTRL, CLOCK_PHASE)
+			& FIELD_CLEAR(PANEL_DISPLAY_CTRL, VSYNC_PHASE)
+			& FIELD_CLEAR(PANEL_DISPLAY_CTRL, HSYNC_PHASE)
+			& FIELD_CLEAR(PANEL_DISPLAY_CTRL, TIMING)
+			& FIELD_CLEAR(PANEL_DISPLAY_CTRL, PLANE);
 
 
 		/* May a hardware bug or just my test chip (not confirmed).
@@ -170,16 +169,14 @@
 
 		POKE32(PANEL_DISPLAY_CTRL, ulTmpValue|ulReg);
 #if 1
-		while((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) != (ulTmpValue|ulReg))
-		{
+		while ((PEEK32(PANEL_DISPLAY_CTRL) & ~ulReservedBits) != (ulTmpValue|ulReg)) {
 			cnt++;
-			if(cnt > 1000)
+			if (cnt > 1000)
 				break;
 			POKE32(PANEL_DISPLAY_CTRL, ulTmpValue|ulReg);
 		}
 #endif
-	}
-	else{
+	} else {
 		ret = -1;
 	}
 	return ret;
@@ -189,11 +186,12 @@
 {
 	pll_value_t pll;
 	unsigned int uiActualPixelClk;
+
 	pll.inputFreq = DEFAULT_INPUT_CLOCK;
 	pll.clockType = clock;
 
 	uiActualPixelClk = calcPllValue(parm->pixel_clock, &pll);
-	if(getChipType() == SM750LE){
+	if (getChipType() == SM750LE) {
 		/* set graphic mode via IO method */
 		outb_p(0x88, 0x3d4);
 		outb_p(0x06, 0x3d5);
diff --git a/drivers/staging/sm750fb/ddk750_mode.h b/drivers/staging/sm750fb/ddk750_mode.h
index 4e8fab3..e846dc2 100644
--- a/drivers/staging/sm750fb/ddk750_mode.h
+++ b/drivers/staging/sm750fb/ddk750_mode.h
@@ -3,37 +3,35 @@
 
 #include "ddk750_chip.h"
 
-typedef enum _spolarity_t
-{
-    POS = 0, /* positive */
-    NEG, /* negative */
+typedef enum _spolarity_t {
+	POS = 0, /* positive */
+	NEG, /* negative */
 }
 spolarity_t;
 
 
-typedef struct _mode_parameter_t
-{
-    /* Horizontal timing. */
-    unsigned long horizontal_total;
-    unsigned long horizontal_display_end;
-    unsigned long horizontal_sync_start;
-    unsigned long horizontal_sync_width;
-    spolarity_t horizontal_sync_polarity;
+typedef struct _mode_parameter_t {
+	/* Horizontal timing. */
+	unsigned long horizontal_total;
+	unsigned long horizontal_display_end;
+	unsigned long horizontal_sync_start;
+	unsigned long horizontal_sync_width;
+	spolarity_t horizontal_sync_polarity;
 
-    /* Vertical timing. */
-    unsigned long vertical_total;
-    unsigned long vertical_display_end;
-    unsigned long vertical_sync_start;
-    unsigned long vertical_sync_height;
-    spolarity_t vertical_sync_polarity;
+	/* Vertical timing. */
+	unsigned long vertical_total;
+	unsigned long vertical_display_end;
+	unsigned long vertical_sync_start;
+	unsigned long vertical_sync_height;
+	spolarity_t vertical_sync_polarity;
 
-    /* Refresh timing. */
-    unsigned long pixel_clock;
-    unsigned long horizontal_frequency;
-    unsigned long vertical_frequency;
+	/* Refresh timing. */
+	unsigned long pixel_clock;
+	unsigned long horizontal_frequency;
+	unsigned long vertical_frequency;
 
-    /* Clock Phase. This clock phase only applies to Panel. */
-    spolarity_t clock_phase_polarity;
+	/* Clock Phase. This clock phase only applies to Panel. */
+	spolarity_t clock_phase_polarity;
 }
 mode_parameter_t;
 
diff --git a/drivers/staging/sm750fb/ddk750_power.c b/drivers/staging/sm750fb/ddk750_power.c
index 1e5f398..e580dab 100644
--- a/drivers/staging/sm750fb/ddk750_power.c
+++ b/drivers/staging/sm750fb/ddk750_power.c
@@ -5,21 +5,23 @@
 void ddk750_setDPMS(DPMS_t state)
 {
 	unsigned int value;
-	if(getChipType() == SM750LE){
+
+	if (getChipType() == SM750LE) {
 		value = PEEK32(CRT_DISPLAY_CTRL);
-		POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(value, CRT_DISPLAY_CTRL, DPMS, state));
-	}else{
+		POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(value, CRT_DISPLAY_CTRL,
+						     DPMS, state));
+	} else {
 		value = PEEK32(SYSTEM_CTRL);
-		value= FIELD_VALUE(value, SYSTEM_CTRL, DPMS, state);
+		value = FIELD_VALUE(value, SYSTEM_CTRL, DPMS, state);
 		POKE32(SYSTEM_CTRL, value);
 	}
 }
 
 unsigned int getPowerMode(void)
 {
-	if(getChipType() == SM750LE)
+	if (getChipType() == SM750LE)
 		return 0;
-    return (FIELD_GET(PEEK32(POWER_MODE_CTRL), POWER_MODE_CTRL, MODE));
+	return FIELD_GET(PEEK32(POWER_MODE_CTRL), POWER_MODE_CTRL, MODE);
 }
 
 
@@ -29,76 +31,74 @@
  */
 void setPowerMode(unsigned int powerMode)
 {
-    unsigned int control_value = 0;
+	unsigned int control_value = 0;
 
-    control_value = PEEK32(POWER_MODE_CTRL);
+	control_value = PEEK32(POWER_MODE_CTRL);
 
-	if(getChipType() == SM750LE)
+	if (getChipType() == SM750LE)
 		return;
 
-    switch (powerMode)
-    {
-        case POWER_MODE_CTRL_MODE_MODE0:
-            control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE, MODE0);
-            break;
+	switch (powerMode) {
+	case POWER_MODE_CTRL_MODE_MODE0:
+		control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE,
+					  MODE0);
+		break;
 
-        case POWER_MODE_CTRL_MODE_MODE1:
-            control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE, MODE1);
-            break;
+	case POWER_MODE_CTRL_MODE_MODE1:
+		control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE,
+					  MODE1);
+		break;
 
-        case POWER_MODE_CTRL_MODE_SLEEP:
-            control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE, SLEEP);
-            break;
+	case POWER_MODE_CTRL_MODE_SLEEP:
+		control_value = FIELD_SET(control_value, POWER_MODE_CTRL, MODE,
+					  SLEEP);
+		break;
 
-        default:
-            break;
-    }
+	default:
+		break;
+	}
 
-    /* Set up other fields in Power Control Register */
-    if (powerMode == POWER_MODE_CTRL_MODE_SLEEP)
-    {
-        control_value =
+	/* Set up other fields in Power Control Register */
+	if (powerMode == POWER_MODE_CTRL_MODE_SLEEP) {
+		control_value =
 #ifdef VALIDATION_CHIP
-            FIELD_SET(  control_value, POWER_MODE_CTRL, 336CLK, OFF) |
+		FIELD_SET(control_value, POWER_MODE_CTRL, 336CLK, OFF) |
 #endif
-            FIELD_SET(  control_value, POWER_MODE_CTRL, OSC_INPUT,  OFF);
-    }
-    else
-    {
-        control_value =
+		FIELD_SET(control_value, POWER_MODE_CTRL, OSC_INPUT,  OFF);
+	} else {
+		control_value =
 #ifdef VALIDATION_CHIP
-            FIELD_SET(  control_value, POWER_MODE_CTRL, 336CLK, ON) |
+		FIELD_SET(control_value, POWER_MODE_CTRL, 336CLK, ON) |
 #endif
-            FIELD_SET(  control_value, POWER_MODE_CTRL, OSC_INPUT,  ON);
-    }
+		FIELD_SET(control_value, POWER_MODE_CTRL, OSC_INPUT,  ON);
+	}
 
-    /* Program new power mode. */
-    POKE32(POWER_MODE_CTRL, control_value);
+	/* Program new power mode. */
+	POKE32(POWER_MODE_CTRL, control_value);
 }
 
 void setCurrentGate(unsigned int gate)
 {
-    unsigned int gate_reg;
-    unsigned int mode;
+	unsigned int gate_reg;
+	unsigned int mode;
 
-    /* Get current power mode. */
-    mode = getPowerMode();
+	/* Get current power mode. */
+	mode = getPowerMode();
 
-    switch (mode)
-    {
-        case POWER_MODE_CTRL_MODE_MODE0:
-            gate_reg = MODE0_GATE;
-            break;
+	switch (mode) {
+	case POWER_MODE_CTRL_MODE_MODE0:
+		gate_reg = MODE0_GATE;
+		break;
 
-        case POWER_MODE_CTRL_MODE_MODE1:
-            gate_reg = MODE1_GATE;
-            break;
+	case POWER_MODE_CTRL_MODE_MODE1:
+		gate_reg = MODE1_GATE;
+		break;
 
-        default:
-            gate_reg = MODE0_GATE;
-            break;
-    }
-    POKE32(gate_reg, gate);
+	default:
+		gate_reg = MODE0_GATE;
+		break;
+	}
+	POKE32(gate_reg, gate);
 }
 
 
@@ -108,21 +108,18 @@
  */
 void enable2DEngine(unsigned int enable)
 {
-    uint32_t gate;
+	uint32_t gate;
 
-    gate = PEEK32(CURRENT_GATE);
-    if (enable)
-    {
-        gate = FIELD_SET(gate, CURRENT_GATE, DE,  ON);
-        gate = FIELD_SET(gate, CURRENT_GATE, CSC, ON);
-    }
-    else
-    {
-        gate = FIELD_SET(gate, CURRENT_GATE, DE,  OFF);
-        gate = FIELD_SET(gate, CURRENT_GATE, CSC, OFF);
-    }
+	gate = PEEK32(CURRENT_GATE);
+	if (enable) {
+		gate = FIELD_SET(gate, CURRENT_GATE, DE,  ON);
+		gate = FIELD_SET(gate, CURRENT_GATE, CSC, ON);
+	} else {
+		gate = FIELD_SET(gate, CURRENT_GATE, DE,  OFF);
+		gate = FIELD_SET(gate, CURRENT_GATE, CSC, OFF);
+	}
 
-    setCurrentGate(gate);
+	setCurrentGate(gate);
 }
 
 
@@ -131,58 +128,56 @@
  */
 void enableZVPort(unsigned int enable)
 {
-    uint32_t gate;
+	uint32_t gate;
 
-    /* Enable ZV Port Gate */
-    gate = PEEK32(CURRENT_GATE);
-    if (enable)
-    {
-        gate = FIELD_SET(gate, CURRENT_GATE, ZVPORT, ON);
+	/* Enable ZV Port Gate */
+	gate = PEEK32(CURRENT_GATE);
+	if (enable) {
+		gate = FIELD_SET(gate, CURRENT_GATE, ZVPORT, ON);
 #if 1
-        /* Using Software I2C */
-        gate = FIELD_SET(gate, CURRENT_GATE, GPIO, ON);
+		/* Using Software I2C */
+		gate = FIELD_SET(gate, CURRENT_GATE, GPIO, ON);
 #else
-        /* Using Hardware I2C */
-        gate = FIELD_SET(gate, CURRENT_GATE, I2C,    ON);
+		/* Using Hardware I2C */
+		gate = FIELD_SET(gate, CURRENT_GATE, I2C,    ON);
 #endif
-    }
-    else
-    {
-        /* Disable ZV Port Gate. There is no way to know whether the GPIO pins are being used
-           or not. Therefore, do not disable the GPIO gate. */
-        gate = FIELD_SET(gate, CURRENT_GATE, ZVPORT, OFF);
-    }
+	} else {
+		/* Disable ZV Port Gate. There is no way to know whether the
+		GPIO pins are being used or not. Therefore, do not disable the
+		GPIO gate. */
+		gate = FIELD_SET(gate, CURRENT_GATE, ZVPORT, OFF);
+	}
 
-    setCurrentGate(gate);
+	setCurrentGate(gate);
 }
 
 
 void enableSSP(unsigned int enable)
 {
-    uint32_t gate;
+	uint32_t gate;
 
-    /* Enable SSP Gate */
-    gate = PEEK32(CURRENT_GATE);
-    if (enable)
-        gate = FIELD_SET(gate, CURRENT_GATE, SSP, ON);
-    else
-        gate = FIELD_SET(gate, CURRENT_GATE, SSP, OFF);
+	/* Enable SSP Gate */
+	gate = PEEK32(CURRENT_GATE);
+	if (enable)
+		gate = FIELD_SET(gate, CURRENT_GATE, SSP, ON);
+	else
+		gate = FIELD_SET(gate, CURRENT_GATE, SSP, OFF);
 
-    setCurrentGate(gate);
+	setCurrentGate(gate);
 }
 
 void enableDMA(unsigned int enable)
 {
-    uint32_t gate;
+	uint32_t gate;
 
-    /* Enable DMA Gate */
-    gate = PEEK32(CURRENT_GATE);
-    if (enable)
-        gate = FIELD_SET(gate, CURRENT_GATE, DMA, ON);
-    else
-        gate = FIELD_SET(gate, CURRENT_GATE, DMA, OFF);
+	/* Enable DMA Gate */
+	gate = PEEK32(CURRENT_GATE);
+	if (enable)
+		gate = FIELD_SET(gate, CURRENT_GATE, DMA, ON);
+	else
+		gate = FIELD_SET(gate, CURRENT_GATE, DMA, OFF);
 
-    setCurrentGate(gate);
+	setCurrentGate(gate);
 }
 
 /*
@@ -190,16 +185,16 @@
  */
 void enableGPIO(unsigned int enable)
 {
-    uint32_t gate;
+	uint32_t gate;
 
-    /* Enable GPIO Gate */
-    gate = PEEK32(CURRENT_GATE);
-    if (enable)
-        gate = FIELD_SET(gate, CURRENT_GATE, GPIO, ON);
-    else
-        gate = FIELD_SET(gate, CURRENT_GATE, GPIO, OFF);
+	/* Enable GPIO Gate */
+	gate = PEEK32(CURRENT_GATE);
+	if (enable)
+		gate = FIELD_SET(gate, CURRENT_GATE, GPIO, ON);
+	else
+		gate = FIELD_SET(gate, CURRENT_GATE, GPIO, OFF);
 
-    setCurrentGate(gate);
+	setCurrentGate(gate);
 }
 
 /*
@@ -207,16 +202,16 @@
  */
 void enablePWM(unsigned int enable)
 {
-    uint32_t gate;
+	uint32_t gate;
 
-    /* Enable PWM Gate */
-    gate = PEEK32(CURRENT_GATE);
-    if (enable)
-        gate = FIELD_SET(gate, CURRENT_GATE, PWM, ON);
-    else
-        gate = FIELD_SET(gate, CURRENT_GATE, PWM, OFF);
+	/* Enable PWM Gate */
+	gate = PEEK32(CURRENT_GATE);
+	if (enable)
+		gate = FIELD_SET(gate, CURRENT_GATE, PWM, ON);
+	else
+		gate = FIELD_SET(gate, CURRENT_GATE, PWM, OFF);
 
-    setCurrentGate(gate);
+	setCurrentGate(gate);
 }
 
 /*
@@ -224,16 +219,16 @@
  */
 void enableI2C(unsigned int enable)
 {
-    uint32_t gate;
+	uint32_t gate;
 
-    /* Enable I2C Gate */
-    gate = PEEK32(CURRENT_GATE);
-    if (enable)
-        gate = FIELD_SET(gate, CURRENT_GATE, I2C, ON);
-    else
-        gate = FIELD_SET(gate, CURRENT_GATE, I2C, OFF);
+	/* Enable I2C Gate */
+	gate = PEEK32(CURRENT_GATE);
+	if (enable)
+		gate = FIELD_SET(gate, CURRENT_GATE, I2C, ON);
+	else
+	gate = FIELD_SET(gate, CURRENT_GATE, I2C, OFF);
 
-    setCurrentGate(gate);
+	setCurrentGate(gate);
 }
 
 
diff --git a/drivers/staging/sm750fb/ddk750_power.h b/drivers/staging/sm750fb/ddk750_power.h
index 4e00955..b7cf6b2 100644
--- a/drivers/staging/sm750fb/ddk750_power.h
+++ b/drivers/staging/sm750fb/ddk750_power.h
@@ -1,12 +1,11 @@
 #ifndef DDK750_POWER_H__
 #define DDK750_POWER_H__
 
-typedef enum _DPMS_t
-{
-    crtDPMS_ON = 0x0,
-    crtDPMS_STANDBY = 0x1,
-    crtDPMS_SUSPEND = 0x2,
-    crtDPMS_OFF = 0x3,
+typedef enum _DPMS_t {
+	crtDPMS_ON = 0x0,
+	crtDPMS_STANDBY = 0x1,
+	crtDPMS_SUSPEND = 0x2,
+	crtDPMS_OFF = 0x3,
 }
 DPMS_t;
 
diff --git a/drivers/staging/sm750fb/ddk750_reg.h b/drivers/staging/sm750fb/ddk750_reg.h
index 1a40dc2..2995625 100644
--- a/drivers/staging/sm750fb/ddk750_reg.h
+++ b/drivers/staging/sm750fb/ddk750_reg.h
@@ -1640,9 +1640,9 @@
 /* CRT Graphics Control */
 
 #define CRT_DISPLAY_CTRL                              0x080200
-#define CRT_DISPLAY_CTRL_RESERVED_1_MASK  			  31:27
-#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_DISABLE  			  0
-#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_ENABLE 			  0x1F
+#define CRT_DISPLAY_CTRL_RESERVED_1_MASK	      31:27
+#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_DISABLE      0
+#define CRT_DISPLAY_CTRL_RESERVED_1_MASK_ENABLE       0x1F
 
 /* SM750LE definition */
 #define CRT_DISPLAY_CTRL_DPMS                         31:30
@@ -1664,9 +1664,9 @@
 #define CRT_DISPLAY_CTRL_SHIFT_VGA_DAC_ENABLE         0
 
 
-#define CRT_DISPLAY_CTRL_RESERVED_2_MASK 			  25:24
-#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_ENABLE 			  3
-#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_DISABLE 			  0
+#define CRT_DISPLAY_CTRL_RESERVED_2_MASK	      25:24
+#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_ENABLE	      3
+#define CRT_DISPLAY_CTRL_RESERVED_2_MASK_DISABLE      0
 
 /* SM750LE definition */
 #define CRT_DISPLAY_CTRL_CRTSELECT                    25:25
@@ -1677,11 +1677,11 @@
 #define CRT_DISPLAY_CTRL_RGBBIT_12BIT                 1
 
 
-#define CRT_DISPLAY_CTRL_RESERVED_3_MASK 			  15:15
+#define CRT_DISPLAY_CTRL_RESERVED_3_MASK	      15:15
 #define CRT_DISPLAY_CTRL_RESERVED_3_MASK_DISABLE      0
 #define CRT_DISPLAY_CTRL_RESERVED_3_MASK_ENABLE       1
 
-#define CRT_DISPLAY_CTRL_RESERVED_4_MASK 			  9:9
+#define CRT_DISPLAY_CTRL_RESERVED_4_MASK	      9:9
 #define CRT_DISPLAY_CTRL_RESERVED_4_MASK_DISABLE      0
 #define CRT_DISPLAY_CTRL_RESERVED_4_MASK_ENABLE       1
 
@@ -1882,7 +1882,7 @@
 #endif
 
 /* sm750le new register to control panel output */
-#define DISPLAY_CONTROL_750LE 	0x80288
+#define DISPLAY_CONTROL_750LE			      0x80288
 /* Palette RAM */
 
 /* Panel Palette register starts at 0x080400 ~ 0x0807FC */
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index b6395b8..0bdf3db 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -36,12 +36,12 @@
  */
 unsigned short sii164GetVendorID(void)
 {
-    unsigned short vendorID;
+	unsigned short vendorID;
 
-    vendorID = ((unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_HIGH) << 8) |
-                (unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_LOW);
+	vendorID = ((unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_HIGH) << 8) |
+		    (unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_VENDOR_ID_LOW);
 
-    return vendorID;
+	return vendorID;
 }
 
 /*
@@ -53,12 +53,12 @@
  */
 unsigned short sii164GetDeviceID(void)
 {
-    unsigned short deviceID;
+	unsigned short deviceID;
 
-    deviceID = ((unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_HIGH) << 8) |
-                (unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_LOW);
+	deviceID = ((unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_HIGH) << 8) |
+		    (unsigned short) i2cReadReg(SII164_I2C_ADDRESS, SII164_DEVICE_ID_LOW);
 
-    return deviceID;
+	return deviceID;
 }
 
 
@@ -113,132 +113,130 @@
  *     -1   - Fail.
  */
 long sii164InitChip(
-    unsigned char edgeSelect,
-    unsigned char busSelect,
-    unsigned char dualEdgeClkSelect,
-    unsigned char hsyncEnable,
-    unsigned char vsyncEnable,
-    unsigned char deskewEnable,
-    unsigned char deskewSetting,
-    unsigned char continuousSyncEnable,
-    unsigned char pllFilterEnable,
-    unsigned char pllFilterValue
+	unsigned char edgeSelect,
+	unsigned char busSelect,
+	unsigned char dualEdgeClkSelect,
+	unsigned char hsyncEnable,
+	unsigned char vsyncEnable,
+	unsigned char deskewEnable,
+	unsigned char deskewSetting,
+	unsigned char continuousSyncEnable,
+	unsigned char pllFilterEnable,
+	unsigned char pllFilterValue
 )
 {
 	unsigned char config;
 
-    /* Initialize the i2c bus */
+	/* Initialize the i2c bus */
 #ifdef USE_HW_I2C
-    /* Use fast mode. */
-    hwI2CInit(1);
+	/* Use fast mode. */
+	hwI2CInit(1);
 #else
-    swI2CInit(DEFAULT_I2C_SCL, DEFAULT_I2C_SDA);
+	swI2CInit(DEFAULT_I2C_SCL, DEFAULT_I2C_SDA);
 #endif
 
-    /* Check if SII164 Chip exists */
-    if ((sii164GetVendorID() == SII164_VENDOR_ID) && (sii164GetDeviceID() == SII164_DEVICE_ID))
-    {
-        /*
-         *  Initialize SII164 controller chip.
-         */
+	/* Check if SII164 Chip exists */
+	if ((sii164GetVendorID() == SII164_VENDOR_ID) && (sii164GetDeviceID() == SII164_DEVICE_ID)) {
+		/*
+		 *  Initialize SII164 controller chip.
+		 */
 
-        /* Select the edge */
-        if (edgeSelect == 0)
-            config = SII164_CONFIGURATION_LATCH_FALLING;
-        else
-            config = SII164_CONFIGURATION_LATCH_RISING;
+		/* Select the edge */
+		if (edgeSelect == 0)
+			config = SII164_CONFIGURATION_LATCH_FALLING;
+		else
+			config = SII164_CONFIGURATION_LATCH_RISING;
 
-        /* Select bus wide */
-        if (busSelect == 0)
-            config |= SII164_CONFIGURATION_BUS_12BITS;
-        else
-            config |= SII164_CONFIGURATION_BUS_24BITS;
+		/* Select bus wide */
+		if (busSelect == 0)
+			config |= SII164_CONFIGURATION_BUS_12BITS;
+		else
+			config |= SII164_CONFIGURATION_BUS_24BITS;
 
-        /* Select Dual/Single Edge Clock */
-        if (dualEdgeClkSelect == 0)
-            config |= SII164_CONFIGURATION_CLOCK_SINGLE;
-        else
-            config |= SII164_CONFIGURATION_CLOCK_DUAL;
+		/* Select Dual/Single Edge Clock */
+		if (dualEdgeClkSelect == 0)
+			config |= SII164_CONFIGURATION_CLOCK_SINGLE;
+		else
+			config |= SII164_CONFIGURATION_CLOCK_DUAL;
 
-        /* Select HSync Enable */
-        if (hsyncEnable == 0)
-            config |= SII164_CONFIGURATION_HSYNC_FORCE_LOW;
-        else
-            config |= SII164_CONFIGURATION_HSYNC_AS_IS;
+		/* Select HSync Enable */
+		if (hsyncEnable == 0)
+			config |= SII164_CONFIGURATION_HSYNC_FORCE_LOW;
+		else
+			config |= SII164_CONFIGURATION_HSYNC_AS_IS;
 
-        /* Select VSync Enable */
-        if (vsyncEnable == 0)
-            config |= SII164_CONFIGURATION_VSYNC_FORCE_LOW;
-        else
-            config |= SII164_CONFIGURATION_VSYNC_AS_IS;
+		/* Select VSync Enable */
+		if (vsyncEnable == 0)
+			config |= SII164_CONFIGURATION_VSYNC_FORCE_LOW;
+		else
+			config |= SII164_CONFIGURATION_VSYNC_AS_IS;
 
-        i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
+		i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
 
-        /* De-skew enabled with default 111b value.
-           This will fix some artifacts problem in some mode on board 2.2.
-           Somehow this fix does not affect board 2.1.
-         */
-        if (deskewEnable == 0)
-            config = SII164_DESKEW_DISABLE;
-        else
-            config = SII164_DESKEW_ENABLE;
+		/* De-skew enabled with default 111b value.
+		   This will fix some artifacts problem in some mode on board 2.2.
+		   Somehow this fix does not affect board 2.1.
+		 */
+		if (deskewEnable == 0)
+			config = SII164_DESKEW_DISABLE;
+		else
+			config = SII164_DESKEW_ENABLE;
 
-        switch (deskewSetting)
-        {
-            case 0:
-                config |= SII164_DESKEW_1_STEP;
-                break;
-            case 1:
-                config |= SII164_DESKEW_2_STEP;
-                break;
-            case 2:
-                config |= SII164_DESKEW_3_STEP;
-                break;
-            case 3:
-                config |= SII164_DESKEW_4_STEP;
-                break;
-            case 4:
-                config |= SII164_DESKEW_5_STEP;
-                break;
-            case 5:
-                config |= SII164_DESKEW_6_STEP;
-                break;
-            case 6:
-                config |= SII164_DESKEW_7_STEP;
-                break;
-            case 7:
-                config |= SII164_DESKEW_8_STEP;
-                break;
-        }
-        i2cWriteReg(SII164_I2C_ADDRESS, SII164_DESKEW, config);
+		switch (deskewSetting) {
+		case 0:
+			config |= SII164_DESKEW_1_STEP;
+			break;
+		case 1:
+			config |= SII164_DESKEW_2_STEP;
+			break;
+		case 2:
+			config |= SII164_DESKEW_3_STEP;
+			break;
+		case 3:
+			config |= SII164_DESKEW_4_STEP;
+			break;
+		case 4:
+			config |= SII164_DESKEW_5_STEP;
+			break;
+		case 5:
+			config |= SII164_DESKEW_6_STEP;
+			break;
+		case 6:
+			config |= SII164_DESKEW_7_STEP;
+			break;
+		case 7:
+			config |= SII164_DESKEW_8_STEP;
+			break;
+		}
+		i2cWriteReg(SII164_I2C_ADDRESS, SII164_DESKEW, config);
 
-        /* Enable/Disable Continuous Sync. */
-        if (continuousSyncEnable == 0)
-            config = SII164_PLL_FILTER_SYNC_CONTINUOUS_DISABLE;
-        else
-            config = SII164_PLL_FILTER_SYNC_CONTINUOUS_ENABLE;
+		/* Enable/Disable Continuous Sync. */
+		if (continuousSyncEnable == 0)
+			config = SII164_PLL_FILTER_SYNC_CONTINUOUS_DISABLE;
+		else
+			config = SII164_PLL_FILTER_SYNC_CONTINUOUS_ENABLE;
 
-        /* Enable/Disable PLL Filter */
-        if (pllFilterEnable == 0)
-            config |= SII164_PLL_FILTER_DISABLE;
-        else
-            config |= SII164_PLL_FILTER_ENABLE;
+		/* Enable/Disable PLL Filter */
+		if (pllFilterEnable == 0)
+			config |= SII164_PLL_FILTER_DISABLE;
+		else
+			config |= SII164_PLL_FILTER_ENABLE;
 
-        /* Set the PLL Filter value */
-        config |= ((pllFilterValue & 0x07) << 1);
+		/* Set the PLL Filter value */
+		config |= ((pllFilterValue & 0x07) << 1);
 
-        i2cWriteReg(SII164_I2C_ADDRESS, SII164_PLL, config);
+		i2cWriteReg(SII164_I2C_ADDRESS, SII164_PLL, config);
 
-        /* Recover from Power Down and enable output. */
-        config = i2cReadReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION);
-        config |= SII164_CONFIGURATION_POWER_NORMAL;
-        i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
+		/* Recover from Power Down and enable output. */
+		config = i2cReadReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION);
+		config |= SII164_CONFIGURATION_POWER_NORMAL;
+		i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
 
-        return 0;
-    }
+		return 0;
+	}
 
-    /* Return -1 if initialization fails. */
-    return (-1);
+	/* Return -1 if initialization fails. */
+	return (-1);
 }
 
 
@@ -255,9 +253,9 @@
  */
 void sii164ResetChip(void)
 {
-    /* Power down */
-    sii164SetPower(0);
-    sii164SetPower(1);
+	/* Power down */
+	sii164SetPower(0);
+	sii164SetPower(1);
 }
 
 
@@ -268,7 +266,7 @@
  */
 char *sii164GetChipString(void)
 {
-    return gDviCtrlChipName;
+	return gDviCtrlChipName;
 }
 
 
@@ -280,26 +278,23 @@
  *      powerUp - Flag to set the power down or up
  */
 void sii164SetPower(
-    unsigned char powerUp
+	unsigned char powerUp
 )
 {
-    unsigned char config;
+	unsigned char config;
 
-    config = i2cReadReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION);
-    if (powerUp == 1)
-    {
-        /* Power up the chip */
-        config &= ~SII164_CONFIGURATION_POWER_MASK;
-        config |= SII164_CONFIGURATION_POWER_NORMAL;
-        i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
-    }
-    else
-    {
-        /* Power down the chip */
-        config &= ~SII164_CONFIGURATION_POWER_MASK;
-        config |= SII164_CONFIGURATION_POWER_DOWN;
-        i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
-    }
+	config = i2cReadReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION);
+	if (powerUp == 1) {
+		/* Power up the chip */
+		config &= ~SII164_CONFIGURATION_POWER_MASK;
+		config |= SII164_CONFIGURATION_POWER_NORMAL;
+		i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
+	} else {
+		/* Power down the chip */
+		config &= ~SII164_CONFIGURATION_POWER_MASK;
+		config |= SII164_CONFIGURATION_POWER_DOWN;
+		i2cWriteReg(SII164_I2C_ADDRESS, SII164_CONFIGURATION, config);
+	}
 }
 
 
@@ -308,31 +303,30 @@
  *      This function selects the mode of the hot plug detection.
  */
 static void sii164SelectHotPlugDetectionMode(
-    sii164_hot_plug_mode_t hotPlugMode
+	sii164_hot_plug_mode_t hotPlugMode
 )
 {
-    unsigned char detectReg;
+	unsigned char detectReg;
 
-    detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & ~SII164_DETECT_MONITOR_SENSE_OUTPUT_FLAG;
-    switch (hotPlugMode)
-    {
-        case SII164_HOTPLUG_DISABLE:
-            detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_HIGH;
-            break;
-        case SII164_HOTPLUG_USE_MDI:
-            detectReg &= ~SII164_DETECT_INTERRUPT_MASK;
-            detectReg |= SII164_DETECT_INTERRUPT_BY_HTPLG_PIN;
-            detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_MDI;
-            break;
-        case SII164_HOTPLUG_USE_RSEN:
-            detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_RSEN;
-            break;
-        case SII164_HOTPLUG_USE_HTPLG:
-            detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_HTPLG;
-            break;
-    }
+	detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & ~SII164_DETECT_MONITOR_SENSE_OUTPUT_FLAG;
+	switch (hotPlugMode) {
+	case SII164_HOTPLUG_DISABLE:
+		detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_HIGH;
+		break;
+	case SII164_HOTPLUG_USE_MDI:
+		detectReg &= ~SII164_DETECT_INTERRUPT_MASK;
+		detectReg |= SII164_DETECT_INTERRUPT_BY_HTPLG_PIN;
+		detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_MDI;
+		break;
+	case SII164_HOTPLUG_USE_RSEN:
+		detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_RSEN;
+		break;
+	case SII164_HOTPLUG_USE_HTPLG:
+		detectReg |= SII164_DETECT_MONITOR_SENSE_OUTPUT_HTPLG;
+		break;
+	}
 
-    i2cWriteReg(SII164_I2C_ADDRESS, SII164_DETECT, detectReg);
+	i2cWriteReg(SII164_I2C_ADDRESS, SII164_DETECT, detectReg);
 }
 
 /*
@@ -342,18 +336,19 @@
  *  enableHotPlug   - Enable (=1) / disable (=0) Hot Plug detection
  */
 void sii164EnableHotPlugDetection(
-    unsigned char enableHotPlug
+	unsigned char enableHotPlug
 )
 {
-    unsigned char detectReg;
-    detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
+	unsigned char detectReg;
 
-    /* Depending on each DVI controller, need to enable the hot plug based on each
-       individual chip design. */
-    if (enableHotPlug != 0)
-        sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_USE_MDI);
-    else
-        sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_DISABLE);
+	detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
+
+	/* Depending on each DVI controller, need to enable the hot plug based on each
+	   individual chip design. */
+	if (enableHotPlug != 0)
+		sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_USE_MDI);
+	else
+		sii164SelectHotPlugDetectionMode(SII164_HOTPLUG_DISABLE);
 }
 
 /*
@@ -366,13 +361,13 @@
  */
 unsigned char sii164IsConnected(void)
 {
-    unsigned char hotPlugValue;
+	unsigned char hotPlugValue;
 
-    hotPlugValue = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & SII164_DETECT_HOT_PLUG_STATUS_MASK;
-    if (hotPlugValue == SII164_DETECT_HOT_PLUG_STATUS_ON)
-        return 1;
-    else
-        return 0;
+	hotPlugValue = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & SII164_DETECT_HOT_PLUG_STATUS_MASK;
+	if (hotPlugValue == SII164_DETECT_HOT_PLUG_STATUS_ON)
+		return 1;
+	else
+		return 0;
 }
 
 /*
@@ -385,13 +380,13 @@
  */
 unsigned char sii164CheckInterrupt(void)
 {
-    unsigned char detectReg;
+	unsigned char detectReg;
 
-    detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & SII164_DETECT_MONITOR_STATE_MASK;
-    if (detectReg == SII164_DETECT_MONITOR_STATE_CHANGE)
-        return 1;
-    else
-        return 0;
+	detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT) & SII164_DETECT_MONITOR_STATE_MASK;
+	if (detectReg == SII164_DETECT_MONITOR_STATE_CHANGE)
+		return 1;
+	else
+		return 0;
 }
 
 /*
@@ -400,11 +395,11 @@
  */
 void sii164ClearInterrupt(void)
 {
-    unsigned char detectReg;
+	unsigned char detectReg;
 
-    /* Clear the MDI interrupt */
-    detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
-    i2cWriteReg(SII164_I2C_ADDRESS, SII164_DETECT, detectReg | SII164_DETECT_MONITOR_STATE_CLEAR);
+	/* Clear the MDI interrupt */
+	detectReg = i2cReadReg(SII164_I2C_ADDRESS, SII164_DETECT);
+	i2cWriteReg(SII164_I2C_ADDRESS, SII164_DETECT, detectReg | SII164_DETECT_MONITOR_STATE_CLEAR);
 }
 
 #endif
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
index 2b4c7d3..f2610c9 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ b/drivers/staging/sm750fb/ddk750_sii164.h
@@ -4,27 +4,26 @@
 #define USE_DVICHIP
 
 /* Hot Plug detection mode structure */
-typedef enum _sii164_hot_plug_mode_t
-{
-    SII164_HOTPLUG_DISABLE = 0,         /* Disable Hot Plug output bit (always high). */
-    SII164_HOTPLUG_USE_MDI,             /* Use Monitor Detect Interrupt bit. */
-    SII164_HOTPLUG_USE_RSEN,            /* Use Receiver Sense detect bit. */
-    SII164_HOTPLUG_USE_HTPLG            /* Use Hot Plug detect bit. */
+typedef enum _sii164_hot_plug_mode_t {
+	SII164_HOTPLUG_DISABLE = 0,         /* Disable Hot Plug output bit (always high). */
+	SII164_HOTPLUG_USE_MDI,             /* Use Monitor Detect Interrupt bit. */
+	SII164_HOTPLUG_USE_RSEN,            /* Use Receiver Sense detect bit. */
+	SII164_HOTPLUG_USE_HTPLG            /* Use Hot Plug detect bit. */
 } sii164_hot_plug_mode_t;
 
 
 /* Silicon Image SiI164 chip prototype */
 long sii164InitChip(
-    unsigned char edgeSelect,
-    unsigned char busSelect,
-    unsigned char dualEdgeClkSelect,
-    unsigned char hsyncEnable,
-    unsigned char vsyncEnable,
-    unsigned char deskewEnable,
-    unsigned char deskewSetting,
-    unsigned char continuousSyncEnable,
-    unsigned char pllFilterEnable,
-    unsigned char pllFilterValue
+	unsigned char edgeSelect,
+	unsigned char busSelect,
+	unsigned char dualEdgeClkSelect,
+	unsigned char hsyncEnable,
+	unsigned char vsyncEnable,
+	unsigned char deskewEnable,
+	unsigned char deskewSetting,
+	unsigned char continuousSyncEnable,
+	unsigned char pllFilterEnable,
+	unsigned char pllFilterValue
 );
 
 unsigned short sii164GetVendorID(void);
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 8e201f1..07f8afd 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -24,8 +24,7 @@
 
 #include "modedb.h"
 
-int smi_indent = 0;
-
+int smi_indent;
 
 /*
  * #ifdef __BIG_ENDIAN
@@ -40,17 +39,15 @@
 typedef int (*PROC_SPEC_MAP)(struct lynx_share*, struct pci_dev*);
 typedef int (*PROC_SPEC_INITHW)(struct lynx_share*, struct pci_dev*);
 
-
 /* common var for all device */
 static int g_hwcursor = 1;
 static int g_noaccel;
 static int g_nomtrr;
 static const char *g_fbmode[] = {NULL, NULL};
 static const char *g_def_fbmode = "800x600-16@60";
-static char *g_settings = NULL;
+static char *g_settings;
 static int g_dualview;
-static char *g_option = NULL;
-
+static char *g_option;
 
 static const struct fb_videomode lynx750_ext[] = {
 	/*	1024x600-60 VESA	[1.71:1] */
@@ -115,8 +112,6 @@
 };
 
 
-
-
 /* no hardware cursor supported under version 2.6.10, kernel bug */
 static int lynxfb_ops_cursor(struct fb_info *info, struct fb_cursor *fbcursor)
 {
@@ -149,18 +144,17 @@
 		/* get the 16bit color of kernel means */
 		u16 fg, bg;
 
-		fg = ((info->cmap.red[fbcursor->image.fg_color] & 0xf800))|
-		      ((info->cmap.green[fbcursor->image.fg_color] & 0xfc00) >> 5)|
+		fg = ((info->cmap.red[fbcursor->image.fg_color] & 0xf800)) |
+		      ((info->cmap.green[fbcursor->image.fg_color] & 0xfc00) >> 5) |
 		      ((info->cmap.blue[fbcursor->image.fg_color] & 0xf800) >> 11);
 
-		bg = ((info->cmap.red[fbcursor->image.bg_color] & 0xf800))|
-		      ((info->cmap.green[fbcursor->image.bg_color] & 0xfc00) >> 5)|
+		bg = ((info->cmap.red[fbcursor->image.bg_color] & 0xf800)) |
+		      ((info->cmap.green[fbcursor->image.bg_color] & 0xfc00) >> 5) |
 		      ((info->cmap.blue[fbcursor->image.bg_color] & 0xf800) >> 11);
 
 		cursor->setColor(cursor, fg, bg);
 	}
 
-
 	if (fbcursor->set & (FB_CUR_SETSHAPE | FB_CUR_SETIMAGE)) {
 		cursor->setData(cursor,
 				fbcursor->rop,
@@ -188,14 +182,17 @@
 	par = info->par;
 	share = par->share;
 
-	/* each time 2d function begin to work,below three variable always need
-	 * be set, seems we can put them together in some place  */
+	/*
+	 * each time 2d function begin to work,below three variable always need
+	 * be set, seems we can put them together in some place
+	 */
 	base = par->crtc.oScreen;
 	pitch = info->fix.line_length;
 	Bpp = info->var.bits_per_pixel >> 3;
 
-	color = (Bpp == 1)?region->color:((u32 *)info->pseudo_palette)[region->color];
-	rop = (region->rop != ROP_COPY) ? HW_ROP2_XOR:HW_ROP2_COPY;
+	color = (Bpp == 1) ? region->color :
+		((u32 *)info->pseudo_palette)[region->color];
+	rop = (region->rop != ROP_COPY) ? HW_ROP2_XOR : HW_ROP2_COPY;
 
 	/*
 	 * If not use spin_lock,system will die if user load driver
@@ -223,8 +220,10 @@
 	par = info->par;
 	share = par->share;
 
-	/* each time 2d function begin to work,below three variable always need
-	 * be set, seems we can put them together in some place  */
+	/*
+	 * each time 2d function begin to work,below three variable always need
+	 * be set, seems we can put them together in some place
+	 */
 	base = par->crtc.oScreen;
 	pitch = info->fix.line_length;
 	Bpp = info->var.bits_per_pixel >> 3;
@@ -254,28 +253,29 @@
 
 	par = info->par;
 	share = par->share;
-	/* each time 2d function begin to work,below three variable always need
-	 * be set, seems we can put them together in some place  */
+	/*
+	 * each time 2d function begin to work,below three variable always need
+	 * be set, seems we can put them together in some place
+	 */
 	base = par->crtc.oScreen;
 	pitch = info->fix.line_length;
 	Bpp = info->var.bits_per_pixel >> 3;
 
-	if (image->depth == 1) {
-		if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
-		    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
-			fgcol = ((u32 *)info->pseudo_palette)[image->fg_color];
-			bgcol = ((u32 *)info->pseudo_palette)[image->bg_color];
-		} else {
-			fgcol = image->fg_color;
-			bgcol = image->bg_color;
-		}
-		goto _do_work;
-	}
 	/* TODO: Implement hardware acceleration for image->depth > 1 */
-	cfb_imageblit(info, image);
-	return;
+	if (image->depth != 1) {
+		cfb_imageblit(info, image);
+		return;
+	}
 
-_do_work:
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+		fgcol = ((u32 *)info->pseudo_palette)[image->fg_color];
+		bgcol = ((u32 *)info->pseudo_palette)[image->bg_color];
+	} else {
+		fgcol = image->fg_color;
+		bgcol = image->bg_color;
+	}
+
 	/*
 	 * If not use spin_lock, system will die if user load driver
 	 * and immediately unload driver frequently (dual)
@@ -284,7 +284,7 @@
 		spin_lock(&share->slock);
 
 	share->accel.de_imageblit(&share->accel,
-				  image->data, image->width>>3, 0,
+				  image->data, image->width >> 3, 0,
 				  base, pitch, Bpp,
 				  image->dx, image->dy,
 				  image->width, image->height,
@@ -298,18 +298,13 @@
 {
 	struct lynxfb_par *par;
 	struct lynxfb_crtc *crtc;
-	int ret;
-
 
 	if (!info)
 		return -EINVAL;
 
-	ret = 0;
 	par = info->par;
 	crtc = &par->crtc;
-	ret = crtc->proc_panDisplay(crtc, var, info);
-
-	return ret;
+	return crtc->proc_panDisplay(crtc, var, info);
 }
 
 static int lynxfb_ops_set_par(struct fb_info *info)
@@ -340,9 +335,10 @@
 	fix->line_length = line_length;
 	pr_info("fix->line_length = %d\n", fix->line_length);
 
-	/* var->red,green,blue,transp are need to be set by driver
+	/*
+	 * var->red,green,blue,transp are need to be set by driver
 	 * and these data should be set before setcolreg routine
-	 * */
+	 */
 
 	switch (var->bits_per_pixel) {
 	case 8:
@@ -466,7 +462,6 @@
 
 	int ret;
 
-
 	ret = 0;
 	share = pci_get_drvdata(pdev);
 
@@ -478,7 +473,6 @@
 		return ret;
 	}
 
-
 	if (pdev->dev.power.power_state.event != PM_EVENT_FREEZE) {
 		pci_restore_state(pdev);
 		ret = pci_enable_device(pdev);
@@ -493,7 +487,6 @@
 
 	hw_sm750_inithw(share, pdev);
 
-
 	info = share->fbinfo[0];
 
 	if (info) {
@@ -518,7 +511,6 @@
 		fb_set_suspend(info, 0);
 	}
 
-
 	console_unlock();
 	return ret;
 }
@@ -534,7 +526,6 @@
 	int ret;
 	resource_size_t request;
 
-
 	par = info->par;
 	crtc = &par->crtc;
 	output = &par->output;
@@ -546,7 +537,6 @@
 		 var->yres,
 		 var->bits_per_pixel);
 
-
 	switch (var->bits_per_pixel) {
 	case 8:
 	case 16:
@@ -617,7 +607,6 @@
 	return ret;
 }
 
-
 static int lynxfb_ops_setcolreg(unsigned regno,
 				unsigned red,
 				unsigned green,
@@ -652,7 +641,6 @@
 		goto exit;
 	}
 
-
 	if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 256) {
 		u32 val;
 
@@ -699,7 +687,8 @@
 	output = &par->output;
 	crtc = &par->crtc;
 
-	crtc->vidmem_size = (share->dual)?share->vidmem_size>>1:share->vidmem_size;
+	crtc->vidmem_size = (share->dual) ? share->vidmem_size >> 1 :
+			     share->vidmem_size;
 	/* setup crtc and output member */
 	spec_share->hwCursor = g_hwcursor;
 
@@ -716,10 +705,12 @@
 	output->proc_setMode = hw_sm750_output_setMode;
 	output->proc_checkMode = hw_sm750_output_checkMode;
 
-	output->proc_setBLANK = (share->revid == SM750LE_REVISION_ID)?hw_sm750le_setBLANK:hw_sm750_setBLANK;
+	output->proc_setBLANK = (share->revid == SM750LE_REVISION_ID) ?
+				 hw_sm750le_setBLANK : hw_sm750_setBLANK;
 	output->clear = hw_sm750_output_clear;
 	/* chip specific phase */
-	share->accel.de_wait = (share->revid == SM750LE_REVISION_ID)?hw_sm750le_deWait : hw_sm750_deWait;
+	share->accel.de_wait = (share->revid == SM750LE_REVISION_ID) ?
+				hw_sm750le_deWait : hw_sm750_deWait;
 	switch (spec_share->state.dataflow) {
 	case sm750_simul_pri:
 		output->paths = sm750_pnc;
@@ -782,7 +773,6 @@
 	.fb_cursor = lynxfb_ops_cursor,
 };
 
-
 static int lynxfb_set_fbinfo(struct fb_info *info, int index)
 {
 	int i;
@@ -803,7 +793,6 @@
 		"kernel HELPERS prepared vesa_modes",
 	};
 
-
 	static const char *fixId[2] = {
 		"sm750_fb1", "sm750_fb2",
 	};
@@ -824,15 +813,16 @@
 	sm750fb_set_drv(par);
 	lynxfb_ops.fb_pan_display = lynxfb_ops_pan_display;
 
-
-	/* set current cursor variable and proc pointer,
-	 * must be set after crtc member initialized */
+	/*
+	 * set current cursor variable and proc pointer,
+	 * must be set after crtc member initialized
+	 */
 	crtc->cursor.offset = crtc->oScreen + crtc->vidmem_size - 1024;
 	crtc->cursor.mmio = share->pvReg + 0x800f0 + (int)crtc->channel * 0x140;
 
 	pr_info("crtc->cursor.mmio = %p\n", crtc->cursor.mmio);
 	crtc->cursor.maxH = crtc->cursor.maxW = 64;
-	crtc->cursor.size = crtc->cursor.maxH*crtc->cursor.maxW*2/8;
+	crtc->cursor.size = crtc->cursor.maxH * crtc->cursor.maxW * 2 / 8;
 	crtc->cursor.disable = hw_cursor_disable;
 	crtc->cursor.enable = hw_cursor_enable;
 	crtc->cursor.setColor = hw_cursor_setColor;
@@ -841,7 +831,6 @@
 	crtc->cursor.setData = hw_cursor_setData;
 	crtc->cursor.vstart = share->pvMem + crtc->cursor.offset;
 
-
 	crtc->cursor.share = share;
 		memset_io(crtc->cursor.vstart, 0, crtc->cursor.size);
 	if (!g_hwcursor) {
@@ -849,7 +838,6 @@
 		crtc->cursor.disable(&crtc->cursor);
 	}
 
-
 	/* set info->fbops, must be set before fb_find_mode */
 	if (!share->accel_off) {
 		/* use 2d acceleration */
@@ -865,7 +853,6 @@
 			g_fbmode[index] = g_fbmode[0];
 	}
 
-
 	for (i = 0; i < 3; i++) {
 
 		ret = fb_find_mode(var, info, g_fbmode[index],
@@ -917,13 +904,13 @@
 
 	/* set info */
 	line_length = PADDING(crtc->line_pad,
-			      (var->xres_virtual * var->bits_per_pixel/8));
+			      (var->xres_virtual * var->bits_per_pixel / 8));
 
 	info->pseudo_palette = &par->pseudo_palette[0];
 	info->screen_base = crtc->vScreen;
 	pr_debug("screen_base vaddr = %p\n", info->screen_base);
 	info->screen_size = line_length * var->yres_virtual;
-	info->flags = FBINFO_FLAG_DEFAULT|0;
+	info->flags = FBINFO_FLAG_DEFAULT | 0;
 
 	/* set info->fix */
 	fix->type = FB_TYPE_PACKED_PIXELS;
@@ -935,15 +922,15 @@
 
 	strlcpy(fix->id, fixId[index], sizeof(fix->id));
 
-
 	fix->smem_start = crtc->oScreen + share->vidmem_start;
 	pr_info("fix->smem_start = %lx\n", fix->smem_start);
-	/* according to mmap experiment from user space application,
+	/*
+	 * according to mmap experiment from user space application,
 	 * fix->mmio_len should not larger than virtual size
 	 * (xres_virtual x yres_virtual x ByPP)
 	 * Below line maybe buggy when user mmap fb dev node and write
 	 * data into the bound over virtual size
-	 * */
+	 */
 	fix->smem_len = crtc->vidmem_size;
 	pr_info("fix->smem_len = %x\n", fix->smem_len);
 	info->screen_size = fix->smem_len;
@@ -967,7 +954,7 @@
 	var->accel_flags = 0;
 	var->vmode = FB_VMODE_NONINTERLACED;
 
-	pr_debug("#1 show info->cmap : \nstart=%d,len=%d,red=%p,green=%p,blue=%p,transp=%p\n",
+	pr_debug("#1 show info->cmap :\nstart=%d,len=%d,red=%p,green=%p,blue=%p,transp=%p\n",
 		 info->cmap.start, info->cmap.len,
 		 info->cmap.red, info->cmap.green, info->cmap.blue,
 		 info->cmap.transp);
@@ -998,7 +985,6 @@
 #endif
 	int swap;
 
-
 	spec_share = container_of(share, struct sm750_share, share);
 #ifdef CAP_EXPENSIION
 	exp_res = NULL;
@@ -1096,15 +1082,16 @@
 	size_t spec_offset = 0;
 	int fbidx;
 
-
 	/* enable device */
 	if (pci_enable_device(pdev)) {
 		pr_err("can not enable device.\n");
 		goto err_enable;
 	}
 
-	/* though offset of share in sm750_share is 0,
-	 * we use this marcro as the same */
+	/*
+	 * though offset of share in sm750_share is 0,
+	 * we use this marcro as the same
+	 */
 	spec_offset = offsetof(struct sm750_share, share);
 
 	spec_share = kzalloc(sizeof(*spec_share), GFP_KERNEL);
@@ -1128,10 +1115,12 @@
 	spin_lock_init(&share->slock);
 
 	if (!share->accel_off) {
-		/* hook deInit and 2d routines, notes that below hw_xxx
+		/*
+		 * hook deInit and 2d routines, notes that below hw_xxx
 		 * routine can work on most of lynx chips
 		 * if some chip need specific function,
-		 * please hook it in smXXX_set_drv routine */
+		 * please hook it in smXXX_set_drv routine
+		 */
 		share->accel.de_init = hw_de_init;
 		share->accel.de_fillrect = hw_fillrect;
 		share->accel.de_copyarea = hw_copyarea;
@@ -1268,7 +1257,6 @@
 	int len;
 	char *opt, *tmp;
 
-
 	if (!options || !*options) {
 		pr_warn("no options.\n");
 		return 0;
@@ -1283,14 +1271,15 @@
 
 	tmp = g_settings;
 
-	/*	Notes:
-		char * strsep(char **s,const char * ct);
-		@s: the string to be searched
-		@ct :the characters to search for
-
-		strsep() updates @options to pointer after the first found token
-		it also returns the pointer ahead the token.
-		*/
+	/*
+	 * Notes:
+	 * char * strsep(char **s,const char * ct);
+	 * @s: the string to be searched
+	 * @ct :the characters to search for
+	 *
+	 * strsep() updates @options to pointer after the first found token
+	 * it also returns the pointer ahead the token.
+	 */
 	while ((opt = strsep(&options, ":")) != NULL) {
 		/* options that mean for any lynx chips are configured here */
 		if (!strncmp(opt, "noaccel", strlen("noaccel")))
@@ -1332,7 +1321,6 @@
 #endif
 };
 
-
 static int __init lynxfb_init(void)
 {
 	char *option;
diff --git a/drivers/staging/sm750fb/sm750.h b/drivers/staging/sm750fb/sm750.h
index cc80580..5bc4455 100644
--- a/drivers/staging/sm750fb/sm750.h
+++ b/drivers/staging/sm750fb/sm750.h
@@ -5,20 +5,20 @@
 
 #define FB_ACCEL_SMI 0xab
 /* please use revision id to distinguish sm750le and sm750*/
-#define SPC_SM750 	0
+#define SPC_SM750 0
 
 #define MB(x) ((x)<<20)
 #define MHZ(x) ((x) * 1000000)
 /* align should be 2,4,8,16 */
-#define PADDING(align, data) (((data)+(align)-1)&(~((align) -1)))
+#define PADDING(align, data) (((data)+(align)-1)&(~((align) - 1)))
 extern int smi_indent;
 
 
-struct lynx_accel{
+struct lynx_accel {
 	/* base virtual address of DPR registers */
-	volatile unsigned char __iomem * dprBase;
+	volatile unsigned char __iomem *dprBase;
 	/* base virtual address of de data port */
-	volatile unsigned char __iomem * dpPortBase;
+	volatile unsigned char __iomem *dpPortBase;
 
 	/* function fointers */
 	void (*de_init)(struct lynx_accel *);
@@ -38,10 +38,10 @@
 
 };
 
-/* 	lynx_share stands for a presentation of two frame buffer
-	that use one smi adaptor , it is similar to a basic class of C++
+/* lynx_share stands for a presentation of two frame buffer
+   that use one smi adaptor , it is similar to a basic class of C++
 */
-struct lynx_share{
+struct lynx_share {
 	/* common members */
 	u16 devid;
 	u8 revid;
@@ -53,7 +53,7 @@
 		int mtrr_off;
 		struct{
 			int vram;
-		}mtrr;
+		} mtrr;
 	/* all smi graphic adaptor got below attributes */
 	unsigned long vidmem_start;
 	unsigned long vidreg_start;
@@ -64,11 +64,11 @@
 	/* locks*/
 	spinlock_t slock;
 	/* function pointers */
-	void (*suspend)(struct lynx_share*);
-	void (*resume)(struct lynx_share*);
+	void (*suspend)(struct lynx_share *);
+	void (*resume)(struct lynx_share *);
 };
 
-struct lynx_cursor{
+struct lynx_cursor {
 	/* cursor width ,height and size */
 	int w;
 	int h;
@@ -80,7 +80,7 @@
 	char __iomem *vstart;
 	int offset;
 	/* mmio addr of hw cursor */
-	volatile char __iomem * mmio;
+	volatile char __iomem *mmio;
 	/* the lynx_share of this adaptor */
 	struct lynx_share *share;
 	/* proc_routines */
@@ -92,7 +92,7 @@
 	void (*setData)(struct lynx_cursor *, u16, const u8*, const u8*);
 };
 
-struct lynxfb_crtc{
+struct lynxfb_crtc {
 	unsigned char __iomem *vCursor; /* virtual address of cursor */
 	unsigned char __iomem *vScreen; /* virtual address of on_screen */
 	int oCursor; /* cursor address offset in vidmem */
@@ -108,14 +108,14 @@
 
 	void *priv;
 
-	int(*proc_setMode)(struct lynxfb_crtc*,
+	int (*proc_setMode)(struct lynxfb_crtc*,
 						struct fb_var_screeninfo*,
 						struct fb_fix_screeninfo*);
 
-	int(*proc_checkMode)(struct lynxfb_crtc*, struct fb_var_screeninfo*);
-	int(*proc_setColReg)(struct lynxfb_crtc*, ushort, ushort, ushort, ushort);
-	void (*clear)(struct lynxfb_crtc*);
-        /* pan display */
+	int (*proc_checkMode)(struct lynxfb_crtc*, struct fb_var_screeninfo*);
+	int (*proc_setColReg)(struct lynxfb_crtc*, ushort, ushort, ushort, ushort);
+	void (*clear)(struct lynxfb_crtc *);
+	/* pan display */
 	int (*proc_panDisplay)(struct lynxfb_crtc *,
 			       const struct fb_var_screeninfo *,
 			       const struct fb_info *);
@@ -123,33 +123,33 @@
 	struct lynx_cursor cursor;
 };
 
-struct lynxfb_output{
+struct lynxfb_output {
 	int dpms;
 	int paths;
-	/* 	which paths(s) this output stands for,for sm750:
-		paths=1:means output for panel paths
-		paths=2:means output for crt paths
-		paths=3:means output for both panel and crt paths
+	/* which paths(s) this output stands for,for sm750:
+	   paths=1:means output for panel paths
+	   paths=2:means output for crt paths
+	   paths=3:means output for both panel and crt paths
 	*/
 
 	int *channel;
-	/* 	which channel these outputs linked with,for sm750:
-		*channel=0 means primary channel
-		*channel=1 means secondary channel
-		output->channel ==> &crtc->channel
+	/* which channel these outputs linked with,for sm750:
+	   *channel=0 means primary channel
+	   *channel=1 means secondary channel
+	   output->channel ==> &crtc->channel
 	*/
 	void *priv;
 
-	int(*proc_setMode)(struct lynxfb_output*,
+	int (*proc_setMode)(struct lynxfb_output*,
 						struct fb_var_screeninfo*,
 						struct fb_fix_screeninfo*);
 
-	int(*proc_checkMode)(struct lynxfb_output*, struct fb_var_screeninfo*);
-	int(*proc_setBLANK)(struct lynxfb_output*, int);
-	void  (*clear)(struct lynxfb_output*);
+	int (*proc_checkMode)(struct lynxfb_output*, struct fb_var_screeninfo*);
+	int (*proc_setBLANK)(struct lynxfb_output*, int);
+	void  (*clear)(struct lynxfb_output *);
 };
 
-struct lynxfb_par{
+struct lynxfb_par {
 	/* either 0 or 1 for dual head adaptor,0 is the older one registered */
 	int index;
 	unsigned int pseudo_palette[256];
@@ -165,14 +165,14 @@
 
 
 #define PS_TO_HZ(ps)	\
-			({ 	\
+			({ \
 			unsigned long long hz = 1000*1000*1000*1000ULL;	\
 			do_div(hz, ps);	\
-			(unsigned long)hz;})
+			(unsigned long)hz; })
 
 static inline unsigned long ps_to_hz(unsigned int psvalue)
 {
-	unsigned long long numerator=1000*1000*1000*1000ULL;
+	unsigned long long numerator = 1000*1000*1000*1000ULL;
 	/* 10^12 / picosecond period gives frequency in Hz */
 	do_div(numerator, psvalue);
 	return (unsigned long)numerator;
diff --git a/drivers/staging/sm750fb/sm750_accel.c b/drivers/staging/sm750fb/sm750_accel.c
index 6eee4cd..1dd06a2 100644
--- a/drivers/staging/sm750fb/sm750_accel.c
+++ b/drivers/staging/sm750fb/sm750_accel.c
@@ -37,7 +37,7 @@
 {
 	/* setup 2d engine registers */
 	u32 reg, clr;
-	
+
 	write_dpr(accel, DE_MASKS, 0xFFFFFFFF);
 
 	/* dpr1c */
@@ -82,7 +82,7 @@
 void hw_set2dformat(struct lynx_accel *accel, int fmt)
 {
 	u32 reg;
-	
+
 	/* fmt=0,1,2 for 8,16,32,bpp on sm718/750/502 */
 	reg = read_dpr(accel, DE_STRETCH_FORMAT);
 	reg = FIELD_VALUE(reg, DE_STRETCH_FORMAT, PIXEL_FORMAT, fmt);
@@ -96,11 +96,10 @@
 {
 	u32 deCtrl;
 
-	if(accel->de_wait() != 0)
-	{
+	if (accel->de_wait() != 0) {
 		/* int time wait and always busy,seems hardware
 		 * got something error */
-		pr_debug("%s:De engine always bussy\n", __func__);
+		pr_debug("De engine always busy\n");
 		return -1;
 	}
 
@@ -151,112 +150,102 @@
 unsigned int height, /* width and height of rectangle in pixel value */
 unsigned int rop2)   /* ROP value */
 {
-    unsigned int nDirection, de_ctrl;
-    int opSign;
-    nDirection = LEFT_TO_RIGHT;
+	unsigned int nDirection, de_ctrl;
+	int opSign;
+
+	nDirection = LEFT_TO_RIGHT;
 	/* Direction of ROP2 operation: 1 = Left to Right, (-1) = Right to Left */
-    opSign = 1;
-    de_ctrl = 0;
+	opSign = 1;
+	de_ctrl = 0;
 
-    /* If source and destination are the same surface, need to check for overlay cases */
-    if (sBase == dBase && sPitch == dPitch)
-    {
-        /* Determine direction of operation */
-        if (sy < dy)
-        {
-            /* +----------+
-               |S         |
-               |   +----------+
-               |   |      |   |
-               |   |      |   |
-               +---|------+   |
-                   |         D|
-                   +----------+ */
+	/* If source and destination are the same surface, need to check for overlay cases */
+	if (sBase == dBase && sPitch == dPitch) {
+		/* Determine direction of operation */
+		if (sy < dy) {
+			/* +----------+
+			   |S         |
+			   |   +----------+
+			   |   |      |   |
+			   |   |      |   |
+			   +---|------+   |
+			   |         D|
+			   +----------+ */
 
-            nDirection = BOTTOM_TO_TOP;
-        }
-        else if (sy > dy)
-        {
-            /* +----------+
-               |D         |
-               |   +----------+
-               |   |      |   |
-               |   |      |   |
-               +---|------+   |
-                   |         S|
-                   +----------+ */
+			nDirection = BOTTOM_TO_TOP;
+		} else if (sy > dy) {
+			/* +----------+
+			   |D         |
+			   |   +----------+
+			   |   |      |   |
+			   |   |      |   |
+			   +---|------+   |
+			   |         S|
+			   +----------+ */
 
-            nDirection = TOP_TO_BOTTOM;
-        }
-        else
-        {
-            /* sy == dy */
+			nDirection = TOP_TO_BOTTOM;
+		} else {
+			/* sy == dy */
 
-            if (sx <= dx)
-            {
-                /* +------+---+------+
-                   |S     |   |     D|
-                   |      |   |      |
-                   |      |   |      |
-                   |      |   |      |
-                   +------+---+------+ */
+			if (sx <= dx) {
+				/* +------+---+------+
+				   |S     |   |     D|
+				   |      |   |      |
+				   |      |   |      |
+				   |      |   |      |
+				   +------+---+------+ */
 
-                nDirection = RIGHT_TO_LEFT;
-            }
-            else
-            {
-                /* sx > dx */
+				nDirection = RIGHT_TO_LEFT;
+			} else {
+			/* sx > dx */
 
-                /* +------+---+------+
-                   |D     |   |     S|
-                   |      |   |      |
-                   |      |   |      |
-                   |      |   |      |
-                   +------+---+------+ */
+				/* +------+---+------+
+				   |D     |   |     S|
+				   |      |   |      |
+				   |      |   |      |
+				   |      |   |      |
+				   +------+---+------+ */
 
-                nDirection = LEFT_TO_RIGHT;
-            }
-        }
-    }
+				nDirection = LEFT_TO_RIGHT;
+			}
+		}
+	}
 
-    if ((nDirection == BOTTOM_TO_TOP) || (nDirection == RIGHT_TO_LEFT))
-    {
-        sx += width - 1;
-        sy += height - 1;
-        dx += width - 1;
-        dy += height - 1;
-        opSign = (-1);
-    }
+	if ((nDirection == BOTTOM_TO_TOP) || (nDirection == RIGHT_TO_LEFT)) {
+		sx += width - 1;
+		sy += height - 1;
+		dx += width - 1;
+		dy += height - 1;
+		opSign = (-1);
+	}
 
-    /* Note:
-       DE_FOREGROUND are DE_BACKGROUND are don't care.
-       DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS are set by set deSetTransparency().
-    */
+	/* Note:
+	   DE_FOREGROUND are DE_BACKGROUND are don't care.
+	  DE_COLOR_COMPARE and DE_COLOR_COMPARE_MAKS are set by set deSetTransparency().
+	 */
 
-    /* 2D Source Base.
-       It is an address offset (128 bit aligned) from the beginning of frame buffer.
-    */
-    write_dpr(accel, DE_WINDOW_SOURCE_BASE, sBase); /* dpr40 */
+	/* 2D Source Base.
+	 It is an address offset (128 bit aligned) from the beginning of frame buffer.
+	 */
+	write_dpr(accel, DE_WINDOW_SOURCE_BASE, sBase); /* dpr40 */
 
-    /* 2D Destination Base.
-       It is an address offset (128 bit aligned) from the beginning of frame buffer.
-    */
-    write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */
+	/* 2D Destination Base.
+	 It is an address offset (128 bit aligned) from the beginning of frame buffer.
+	 */
+	write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase); /* dpr44 */
 
 #if 0
     /* Program pitch (distance between the 1st points of two adjacent lines).
        Note that input pitch is BYTE value, but the 2D Pitch register uses
        pixel values. Need Byte to pixel conversion.
     */
-	if(Bpp == 3){
+	if (Bpp == 3) {
 			sx *= 3;
 			dx *= 3;
 			width *= 3;
 		write_dpr(accel, DE_PITCH,
 				FIELD_VALUE(0, DE_PITCH, DESTINATION, dPitch) |
 				FIELD_VALUE(0, DE_PITCH, SOURCE,      sPitch)); /* dpr10 */
-	}
-	else
+	} else
 #endif
 	{
 		write_dpr(accel, DE_PITCH,
@@ -267,54 +256,53 @@
     /* Screen Window width in Pixels.
        2D engine uses this value to calculate the linear address in frame buffer for a given point.
     */
-    write_dpr(accel, DE_WINDOW_WIDTH,
-        FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/Bpp)) |
-        FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE,      (sPitch/Bpp))); /* dpr3c */
+	write_dpr(accel, DE_WINDOW_WIDTH,
+	FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/Bpp)) |
+	FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE,      (sPitch/Bpp))); /* dpr3c */
 
-	if (accel->de_wait() != 0){
+	if (accel->de_wait() != 0)
 		return -1;
+
+	{
+
+	write_dpr(accel, DE_SOURCE,
+		  FIELD_SET(0, DE_SOURCE, WRAP, DISABLE) |
+		  FIELD_VALUE(0, DE_SOURCE, X_K1, sx)   |
+		  FIELD_VALUE(0, DE_SOURCE, Y_K2, sy)); /* dpr0 */
+	write_dpr(accel, DE_DESTINATION,
+		  FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
+		  FIELD_VALUE(0, DE_DESTINATION, X,    dx)  |
+		  FIELD_VALUE(0, DE_DESTINATION, Y,    dy)); /* dpr04 */
+	write_dpr(accel, DE_DIMENSION,
+		  FIELD_VALUE(0, DE_DIMENSION, X,    width) |
+		  FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */
+
+	de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, rop2) |
+		  FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
+		  FIELD_SET(0, DE_CONTROL, COMMAND, BITBLT) |
+		  ((nDirection == RIGHT_TO_LEFT) ?
+		  FIELD_SET(0, DE_CONTROL, DIRECTION, RIGHT_TO_LEFT)
+		  : FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT)) |
+		  FIELD_SET(0, DE_CONTROL, STATUS, START);
+	write_dpr(accel, DE_CONTROL, de_ctrl); /* dpr0c */
+
 	}
 
-    {
-
-        write_dpr(accel, DE_SOURCE,
-            FIELD_SET  (0, DE_SOURCE, WRAP, DISABLE) |
-            FIELD_VALUE(0, DE_SOURCE, X_K1, sx)   |
-            FIELD_VALUE(0, DE_SOURCE, Y_K2, sy)); /* dpr0 */
-        write_dpr(accel, DE_DESTINATION,
-            FIELD_SET  (0, DE_DESTINATION, WRAP, DISABLE) |
-            FIELD_VALUE(0, DE_DESTINATION, X,    dx)  |
-            FIELD_VALUE(0, DE_DESTINATION, Y,    dy)); /* dpr04 */
-        write_dpr(accel, DE_DIMENSION,
-            FIELD_VALUE(0, DE_DIMENSION, X,    width) |
-            FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */
-
-        de_ctrl =
-            FIELD_VALUE(0, DE_CONTROL, ROP, rop2) |
-            FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2) |
-            FIELD_SET(0, DE_CONTROL, COMMAND, BITBLT) |
-            ((nDirection == RIGHT_TO_LEFT) ?
-            FIELD_SET(0, DE_CONTROL, DIRECTION, RIGHT_TO_LEFT)
-            : FIELD_SET(0, DE_CONTROL, DIRECTION, LEFT_TO_RIGHT)) |
-            FIELD_SET(0, DE_CONTROL, STATUS, START);
-		write_dpr(accel, DE_CONTROL, de_ctrl); /* dpr0c */
-    }
-
-    return 0;
+	return 0;
 }
 
 static unsigned int deGetTransparency(struct lynx_accel *accel)
 {
-    unsigned int de_ctrl;
+	unsigned int de_ctrl;
 
-    de_ctrl = read_dpr(accel, DE_CONTROL);
+	de_ctrl = read_dpr(accel, DE_CONTROL);
 
-    de_ctrl &=
-        FIELD_MASK(DE_CONTROL_TRANSPARENCY_MATCH) |
-        FIELD_MASK(DE_CONTROL_TRANSPARENCY_SELECT)|
-        FIELD_MASK(DE_CONTROL_TRANSPARENCY);
+	de_ctrl &=
+		   FIELD_MASK(DE_CONTROL_TRANSPARENCY_MATCH) |
+		   FIELD_MASK(DE_CONTROL_TRANSPARENCY_SELECT)|
+		   FIELD_MASK(DE_CONTROL_TRANSPARENCY);
 
-    return de_ctrl;
+	return de_ctrl;
 }
 
 int hw_imageblit(struct lynx_accel *accel,
@@ -332,38 +320,36 @@
 		 u32 bColor,   /* Background color (corresponding to a 0 in the monochrome data */
 		 u32 rop2)     /* ROP value */
 {
-    unsigned int ulBytesPerScan;
-    unsigned int ul4BytesPerScan;
-    unsigned int ulBytesRemain;
-    unsigned int de_ctrl = 0;
-    unsigned char ajRemain[4];
-    int i, j;
+	unsigned int ulBytesPerScan;
+	unsigned int ul4BytesPerScan;
+	unsigned int ulBytesRemain;
+	unsigned int de_ctrl = 0;
+	unsigned char ajRemain[4];
+	int i, j;
 
-    startBit &= 7; /* Just make sure the start bit is within legal range */
-    ulBytesPerScan = (width + startBit + 7) / 8;
-    ul4BytesPerScan = ulBytesPerScan & ~3;
-    ulBytesRemain = ulBytesPerScan & 3;
+	startBit &= 7; /* Just make sure the start bit is within legal range */
+	ulBytesPerScan = (width + startBit + 7) / 8;
+	ul4BytesPerScan = ulBytesPerScan & ~3;
+	ulBytesRemain = ulBytesPerScan & 3;
 
-	if(accel->de_wait() != 0)
-    {
-        return -1;
-    }
+	if (accel->de_wait() != 0)
+		return -1;
 
-    /* 2D Source Base.
-       Use 0 for HOST Blt.
-    */
-    write_dpr(accel, DE_WINDOW_SOURCE_BASE, 0);
+	/* 2D Source Base.
+	 Use 0 for HOST Blt.
+	 */
+	write_dpr(accel, DE_WINDOW_SOURCE_BASE, 0);
 
-    /* 2D Destination Base.
-       It is an address offset (128 bit aligned) from the beginning of frame buffer.
-    */
-    write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase);
+	/* 2D Destination Base.
+	 It is an address offset (128 bit aligned) from the beginning of frame buffer.
+	 */
+	write_dpr(accel, DE_WINDOW_DESTINATION_BASE, dBase);
 #if 0
     /* Program pitch (distance between the 1st points of two adjacent lines).
        Note that input pitch is BYTE value, but the 2D Pitch register uses
        pixel values. Need Byte to pixel conversion.
     */
-	if(bytePerPixel == 3 ){
+	if (bytePerPixel == 3) {
 		dx *= 3;
 		width *= 3;
 		startBit *= 3;
@@ -371,8 +357,7 @@
 				FIELD_VALUE(0, DE_PITCH, DESTINATION, dPitch) |
 				FIELD_VALUE(0, DE_PITCH, SOURCE,      dPitch)); /* dpr10 */
 
-	}
-	else
+	} else
 #endif
 	{
 		write_dpr(accel, DE_PITCH,
@@ -380,30 +365,30 @@
 				FIELD_VALUE(0, DE_PITCH, SOURCE,      dPitch/bytePerPixel)); /* dpr10 */
 	}
 
-    /* Screen Window width in Pixels.
-       2D engine uses this value to calculate the linear address in frame buffer for a given point.
-    */
-    write_dpr(accel, DE_WINDOW_WIDTH,
-        FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/bytePerPixel)) |
-        FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE,      (dPitch/bytePerPixel)));
+	/* Screen Window width in Pixels.
+	 2D engine uses this value to calculate the linear address in frame buffer for a given point.
+	 */
+	write_dpr(accel, DE_WINDOW_WIDTH,
+		  FIELD_VALUE(0, DE_WINDOW_WIDTH, DESTINATION, (dPitch/bytePerPixel)) |
+		  FIELD_VALUE(0, DE_WINDOW_WIDTH, SOURCE,      (dPitch/bytePerPixel)));
 
-    /* Note: For 2D Source in Host Write, only X_K1_MONO field is needed, and Y_K2 field is not used.
-             For mono bitmap, use startBit for X_K1. */
-    write_dpr(accel, DE_SOURCE,
-        FIELD_SET  (0, DE_SOURCE, WRAP, DISABLE)       |
-        FIELD_VALUE(0, DE_SOURCE, X_K1_MONO, startBit)); /* dpr00 */
+	 /* Note: For 2D Source in Host Write, only X_K1_MONO field is needed, and Y_K2 field is not used.
+	    For mono bitmap, use startBit for X_K1. */
+	write_dpr(accel, DE_SOURCE,
+		  FIELD_SET(0, DE_SOURCE, WRAP, DISABLE)       |
+		  FIELD_VALUE(0, DE_SOURCE, X_K1_MONO, startBit)); /* dpr00 */
 
-    write_dpr(accel, DE_DESTINATION,
-        FIELD_SET  (0, DE_DESTINATION, WRAP, DISABLE) |
-        FIELD_VALUE(0, DE_DESTINATION, X,    dx)    |
-        FIELD_VALUE(0, DE_DESTINATION, Y,    dy)); /* dpr04 */
+	write_dpr(accel, DE_DESTINATION,
+		  FIELD_SET(0, DE_DESTINATION, WRAP, DISABLE) |
+		  FIELD_VALUE(0, DE_DESTINATION, X,    dx)    |
+		  FIELD_VALUE(0, DE_DESTINATION, Y,    dy)); /* dpr04 */
 
-    write_dpr(accel, DE_DIMENSION,
-        FIELD_VALUE(0, DE_DIMENSION, X,    width) |
-        FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */
+	write_dpr(accel, DE_DIMENSION,
+		  FIELD_VALUE(0, DE_DIMENSION, X,    width) |
+		  FIELD_VALUE(0, DE_DIMENSION, Y_ET, height)); /* dpr08 */
 
-    write_dpr(accel, DE_FOREGROUND, fColor);
-    write_dpr(accel, DE_BACKGROUND, bColor);
+	write_dpr(accel, DE_FOREGROUND, fColor);
+	write_dpr(accel, DE_BACKGROUND, bColor);
 
 	de_ctrl = FIELD_VALUE(0, DE_CONTROL, ROP, rop2)         |
 		FIELD_SET(0, DE_CONTROL, ROP_SELECT, ROP2)    |
@@ -413,24 +398,20 @@
 
 	write_dpr(accel, DE_CONTROL, de_ctrl | deGetTransparency(accel));
 
-    /* Write MONO data (line by line) to 2D Engine data port */
-    for (i=0; i<height; i++)
-    {
-        /* For each line, send the data in chunks of 4 bytes */
-        for (j=0; j<(ul4BytesPerScan/4); j++)
-        {
-            write_dpPort(accel, *(unsigned int *)(pSrcbuf + (j * 4)));
-        }
+	/* Write MONO data (line by line) to 2D Engine data port */
+	for (i = 0; i < height; i++) {
+		/* For each line, send the data in chunks of 4 bytes */
+		for (j = 0; j < (ul4BytesPerScan/4); j++)
+			write_dpPort(accel, *(unsigned int *)(pSrcbuf + (j * 4)));
 
-        if (ulBytesRemain)
-        {
-            memcpy(ajRemain, pSrcbuf+ul4BytesPerScan, ulBytesRemain);
-            write_dpPort(accel, *(unsigned int *)ajRemain);
-        }
+		if (ulBytesRemain) {
+			memcpy(ajRemain, pSrcbuf+ul4BytesPerScan, ulBytesRemain);
+			write_dpPort(accel, *(unsigned int *)ajRemain);
+		}
 
-        pSrcbuf += srcDelta;
-    }
+		pSrcbuf += srcDelta;
+	}
 
-    return 0;
+	    return 0;
 }
 
diff --git a/drivers/staging/sm750fb/sm750_accel.h b/drivers/staging/sm750fb/sm750_accel.h
index d3d256c..f252e47 100644
--- a/drivers/staging/sm750fb/sm750_accel.h
+++ b/drivers/staging/sm750fb/sm750_accel.h
@@ -7,7 +7,7 @@
 /* notes: below address are the offset value from de_base_address (0x100000)*/
 
 /* for sm718/750/502 de_base is at mmreg_1mb*/
-#define DE_BASE_ADDR_TYPE1 	0x100000
+#define DE_BASE_ADDR_TYPE1 0x100000
 /* for sm712,de_base is at mmreg_32kb */
 #define DE_BASE_ADDR_TYPE2  0x8000
 /* for sm722,de_base is at mmreg_0 */
@@ -26,7 +26,7 @@
 #define DE_SOURCE_WRAP_ENABLE                           1
 #define DE_SOURCE_X_K1                                  29:16
 #define DE_SOURCE_Y_K2                                  15:0
-#define DE_SOURCE_X_K1_MONO 							20:16
+#define DE_SOURCE_X_K1_MONO				20:16
 
 #define DE_DESTINATION                                  0x4
 #define DE_DESTINATION_WRAP                             31:31
diff --git a/drivers/staging/sm750fb/sm750_cursor.c b/drivers/staging/sm750fb/sm750_cursor.c
index 405e24b..a94a4bb 100644
--- a/drivers/staging/sm750fb/sm750_cursor.c
+++ b/drivers/staging/sm750fb/sm750_cursor.c
@@ -61,6 +61,7 @@
 void hw_cursor_enable(struct lynx_cursor *cursor)
 {
 	u32 reg;
+
 	reg = FIELD_VALUE(0, HWC_ADDRESS, ADDRESS, cursor->offset)|
 			FIELD_SET(0, HWC_ADDRESS, EXT, LOCAL)|
 			FIELD_SET(0, HWC_ADDRESS, ENABLE, ENABLE);
@@ -81,6 +82,7 @@
 						int x, int y)
 {
 	u32 reg;
+
 	reg = FIELD_VALUE(0, HWC_LOCATION, Y, y)|
 			FIELD_VALUE(0, HWC_LOCATION, X, x);
 	POKE32(HWC_LOCATION, reg);
@@ -93,7 +95,7 @@
 }
 
 void hw_cursor_setData(struct lynx_cursor *cursor,
-			u16 rop, const u8* pcol, const u8* pmsk)
+			u16 rop, const u8 *pcol, const u8 *pmsk)
 {
 	int i, j, count, pitch, offset;
 	u8 color, mask, opr;
@@ -122,8 +124,7 @@
 		odd=0;
 */
 
-	for(i=0;i<count;i++)
-	{
+	for (i = 0; i < count; i++) {
 		color = *pcol++;
 		mask = *pmsk++;
 		data = 0;
@@ -132,26 +133,25 @@
 		 * but method 2 shows no lag
 		 * and method 1 seems a bit wrong*/
 #if 0
-		if(rop == ROP_XOR)
+		if (rop == ROP_XOR)
 			opr = mask ^ color;
 		else
 			opr = mask & color;
 
-		for(j=0;j<8;j++)
-		{
+		for (j = 0; j < 8; j++) {
 
-			if(opr & (0x80 >> j))
-			{	/* use fg color,id = 2 */
+			if (opr & (0x80 >> j)) {
+				/* use fg color,id = 2 */
 				data |= 2 << (j*2);
-			}else{
+			} else {
 				/* use bg color,id = 1 */
 				data |= 1 << (j*2);
 			}
 		}
 #else
-		for(j=0;j<8;j++){
-			if(mask & (0x80>>j)){
-				if(rop == ROP_XOR)
+		for (j = 0; j < 8; j++) {
+			if (mask & (0x80>>j)) {
+				if (rop == ROP_XOR)
 					opr = mask ^ color;
 				else
 					opr = mask & color;
@@ -165,15 +165,15 @@
 
 		/* assume pitch is 1,2,4,8,...*/
 #if 0
-		if(!((i+1)&(pitch-1)))   /* below line equal to is line */
+		if (!((i+1)&(pitch-1)))   /* below line equal to is line */
 #else
-		if((i+1) % pitch == 0)
+		if ((i+1) % pitch == 0)
 #endif
 		{
 			/* need a return */
 			pstart += offset;
 			pbuffer = pstart;
-		}else{
+		} else {
 			pbuffer += sizeof(u16);
 		}
 
@@ -184,7 +184,7 @@
 
 
 void hw_cursor_setData2(struct lynx_cursor *cursor,
-			u16 rop, const u8* pcol, const u8* pmsk)
+			u16 rop, const u8 *pcol, const u8 *pmsk)
 {
 	int i, j, count, pitch, offset;
 	u8 color, mask;
@@ -204,45 +204,42 @@
 	pstart = cursor->vstart;
 	pbuffer = pstart;
 
-	for(i=0;i<count;i++)
-	{
+	for (i = 0; i < count; i++) {
 		color = *pcol++;
 		mask = *pmsk++;
 		data = 0;
 
 		/* either method below works well, but method 2 shows no lag */
 #if 0
-		if(rop == ROP_XOR)
+		if (rop == ROP_XOR)
 			opr = mask ^ color;
 		else
 			opr = mask & color;
 
-		for(j=0;j<8;j++)
-		{
+		for (j = 0; j < 8; j++) {
 
-			if(opr & (0x80 >> j))
-			{	/* use fg color,id = 2 */
+			if (opr & (0x80 >> j)) {
+				/* use fg color,id = 2 */
 				data |= 2 << (j*2);
-			}else{
+			} else {
 				/* use bg color,id = 1 */
 				data |= 1 << (j*2);
 			}
 		}
 #else
-		for(j=0;j<8;j++){
-			if(mask & (1<<j))
+		for (j = 0; j < 8; j++) {
+			if (mask & (1<<j))
 				data |= ((color & (1<<j))?1:2)<<(j*2);
 		}
 #endif
 		iowrite16(data, pbuffer);
 
 		/* assume pitch is 1,2,4,8,...*/
-		if(!(i&(pitch-1)))
-		{
+		if (!(i&(pitch-1))) {
 			/* need a return */
 			pstart += offset;
 			pbuffer = pstart;
-		}else{
+		} else {
 			pbuffer += sizeof(u16);
 		}
 
diff --git a/drivers/staging/sm750fb/sm750_cursor.h b/drivers/staging/sm750fb/sm750_cursor.h
index e1716a6..6c4fc9b 100644
--- a/drivers/staging/sm750fb/sm750_cursor.h
+++ b/drivers/staging/sm750fb/sm750_cursor.h
@@ -11,7 +11,7 @@
 void hw_cursor_setColor(struct lynx_cursor *cursor,
 						u32 fg, u32 bg);
 void hw_cursor_setData(struct lynx_cursor *cursor,
-			u16 rop, const u8* data, const u8* mask);
+			u16 rop, const u8 *data, const u8 *mask);
 void hw_cursor_setData2(struct lynx_cursor *cursor,
-			u16 rop, const u8* data, const u8* mask);
+			u16 rop, const u8 *data, const u8 *mask);
 #endif
diff --git a/drivers/staging/sm750fb/sm750_help.h b/drivers/staging/sm750fb/sm750_help.h
index 05777f7..8dc6bd2 100644
--- a/drivers/staging/sm750fb/sm750_help.h
+++ b/drivers/staging/sm750fb/sm750_help.h
@@ -11,9 +11,9 @@
 #define GET_FIELD(d, f)     (((d) >> _LSB(f)) & RAW_MASK(f))
 #define TEST_FIELD(d, f, v) (GET_FIELD(d, f) == f ## _ ## v)
 #define SET_FIELD(d, f, v)  (((d) & ~GET_MASK(f)) | \
-                            (((f ## _ ## v) & RAW_MASK(f)) << _LSB(f)))
+			    (((f ## _ ## v) & RAW_MASK(f)) << _LSB(f)))
 #define SET_FIELDV(d, f, v) (((d) & ~GET_MASK(f)) | \
-                            (((v) & RAW_MASK(f)) << _LSB(f)))
+			    (((v) & RAW_MASK(f)) << _LSB(f)))
 
 /* Internal macros */
 #define _F_START(f)             (0 ? f)
@@ -26,24 +26,24 @@
 /* Global macros */
 #define FIELD_GET(x, reg, field) \
 ( \
-    _F_NORMALIZE((x), reg ## _ ## field) \
+	_F_NORMALIZE((x), reg ## _ ## field) \
 )
 
 #define FIELD_SET(x, reg, field, value) \
 ( \
-    (x & ~_F_MASK(reg ## _ ## field)) \
-    | _F_DENORMALIZE(reg ## _ ## field ## _ ## value, reg ## _ ## field) \
+	(x & ~_F_MASK(reg ## _ ## field)) \
+	| _F_DENORMALIZE(reg ## _ ## field ## _ ## value, reg ## _ ## field) \
 )
 
 #define FIELD_VALUE(x, reg, field, value) \
 ( \
-    (x & ~_F_MASK(reg ## _ ## field)) \
-    | _F_DENORMALIZE(value, reg ## _ ## field) \
+	(x & ~_F_MASK(reg ## _ ## field)) \
+	| _F_DENORMALIZE(value, reg ## _ ## field) \
 )
 
 #define FIELD_CLEAR(reg, field) \
 ( \
-    ~ _F_MASK(reg ## _ ## field) \
+	~ _F_MASK(reg ## _ ## field) \
 )
 
 /* Field Macros */
@@ -55,25 +55,25 @@
 #define FIELD_DENORMALIZE(field, value) (((value) << FIELD_START(field)) & FIELD_MASK(field))
 
 #define FIELD_INIT(reg, field, value)   FIELD_DENORMALIZE(reg ## _ ## field, \
-                                                          reg ## _ ## field ## _ ## value)
+							  reg ## _ ## field ## _ ## value)
 #define FIELD_INIT_VAL(reg, field, value) \
-                                        (FIELD_DENORMALIZE(reg ## _ ## field, value))
+	(FIELD_DENORMALIZE(reg ## _ ## field, value))
 #define FIELD_VAL_SET(x, r, f, v)       x = x & ~FIELD_MASK(r ## _ ## f) \
-                                              | FIELD_DENORMALIZE(r ## _ ## f, r ## _ ## f ## _ ## v)
+					| FIELD_DENORMALIZE(r ## _ ## f, r ## _ ## f ## _ ## v)
 
 #define RGB(r, g, b) \
 ( \
-    (unsigned long) (((r) << 16) | ((g) << 8) | (b)) \
+	(unsigned long) (((r) << 16) | ((g) << 8) | (b)) \
 )
 
 #define RGB16(r, g, b) \
 ( \
-    (unsigned short) ((((r) & 0xF8) << 8) | (((g) & 0xFC) << 3) | (((b) & 0xF8) >> 3)) \
+	(unsigned short) ((((r) & 0xF8) << 8) | (((g) & 0xFC) << 3) | (((b) & 0xF8) >> 3)) \
 )
 
 static inline unsigned int absDiff(unsigned int a, unsigned int b)
 {
-	if(a<b)
+	if (a < b)
 		return b-a;
 	else
 		return a-b;
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index 84381bc..7317ba9 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -24,11 +24,11 @@
 #include "ddk750.h"
 #include "sm750_accel.h"
 
-int hw_sm750_map(struct lynx_share* share, struct pci_dev* pdev)
+int hw_sm750_map(struct lynx_share *share, struct pci_dev *pdev)
 {
 	int ret;
 	struct sm750_share *spec_share;
-	
+
 
 	spec_share = container_of(share, struct sm750_share, share);
 	ret = 0;
@@ -43,24 +43,23 @@
 	 * in lynxfb_remove, or memory will not be mapped again
 	 * successfully
 	 * */
-
-	if((ret = pci_request_region(pdev, 1, "sm750fb")))
-	{
+	ret = pci_request_region(pdev, 1, "sm750fb");
+	if (ret) {
 		pr_err("Can not request PCI regions.\n");
 		goto exit;
 	}
 
 	/* now map mmio and vidmem*/
 	share->pvReg = ioremap_nocache(share->vidreg_start, share->vidreg_size);
-	if(!share->pvReg){
+	if (!share->pvReg) {
 		pr_err("mmio failed\n");
 		ret = -EFAULT;
 		goto exit;
-	}else{
+	} else {
 		pr_info("mmio virtual addr = %p\n", share->pvReg);
 	}
 
-	
+
 	share->accel.dprBase = share->pvReg + DE_BASE_ADDR_TYPE1;
 	share->accel.dpPortBase = share->pvReg + DE_PORT_ADDR_TYPE1;
 
@@ -78,8 +77,8 @@
 
 	/* reserve the vidmem space of smi adaptor */
 #if 0
-	if((ret = pci_request_region(pdev, 0, _moduleName_)))
-	{
+	ret = pci_request_region(pdev, 0, _moduleName_);
+	if (ret) {
 		pr_err("Can not request PCI regions.\n");
 		goto exit;
 	}
@@ -87,11 +86,11 @@
 
 	share->pvMem = ioremap_wc(share->vidmem_start, share->vidmem_size);
 
-	if(!share->pvMem){
+	if (!share->pvMem) {
 		pr_err("Map video memory failed\n");
 		ret = -EFAULT;
 		goto exit;
-	}else{
+	} else {
 		pr_info("video memory vaddr = %p\n", share->pvMem);
 	}
 exit:
@@ -104,22 +103,22 @@
 {
 	struct sm750_share *spec_share;
 	struct init_status *parm;
-	
+
 	spec_share = container_of(share, struct sm750_share, share);
 	parm = &spec_share->state.initParm;
-	if(parm->chip_clk == 0)
-		parm->chip_clk = (getChipType() == SM750LE)?
+	if (parm->chip_clk == 0)
+		parm->chip_clk = (getChipType() == SM750LE) ?
 						DEFAULT_SM750LE_CHIP_CLOCK :
 						DEFAULT_SM750_CHIP_CLOCK;
 
-	if(parm->mem_clk == 0)
+	if (parm->mem_clk == 0)
 		parm->mem_clk = parm->chip_clk;
-	if(parm->master_clk == 0)
+	if (parm->master_clk == 0)
 		parm->master_clk = parm->chip_clk/3;
 
 	ddk750_initHw((initchip_param_t *)&spec_share->state.initParm);
 	/* for sm718,open pci burst */
-	if(share->devid == 0x718){
+	if (share->devid == 0x718) {
 		POKE32(SYSTEM_CTRL,
 				FIELD_SET(PEEK32(SYSTEM_CTRL), SYSTEM_CTRL, PCI_BURST, ON));
 	}
@@ -130,10 +129,9 @@
 	ddk750_initDVIDisp();
 #endif
 
-	if(getChipType() != SM750LE)
-	{
+	if (getChipType() != SM750LE) {
 		/* does user need CRT ?*/
-		if(spec_share->state.nocrt){
+		if (spec_share->state.nocrt) {
 			POKE32(MISC_CTRL,
 					FIELD_SET(PEEK32(MISC_CTRL),
 					MISC_CTRL,
@@ -143,7 +141,7 @@
 					FIELD_SET(PEEK32(SYSTEM_CTRL),
 					SYSTEM_CTRL,
 					DPMS, VNHN));
-		}else{
+		} else {
 			POKE32(MISC_CTRL,
 					FIELD_SET(PEEK32(MISC_CTRL),
 					MISC_CTRL,
@@ -155,45 +153,43 @@
 					DPMS, VPHP));
 		}
 
-		switch (spec_share->state.pnltype){
-			case sm750_doubleTFT:
-			case sm750_24TFT:
-			case sm750_dualTFT:
-			POKE32(PANEL_DISPLAY_CTRL,
-				FIELD_VALUE(PEEK32(PANEL_DISPLAY_CTRL),
-							PANEL_DISPLAY_CTRL,
-							TFT_DISP,
-							spec_share->state.pnltype));
-			break;
+		switch (spec_share->state.pnltype) {
+		case sm750_doubleTFT:
+		case sm750_24TFT:
+		case sm750_dualTFT:
+		POKE32(PANEL_DISPLAY_CTRL,
+			FIELD_VALUE(PEEK32(PANEL_DISPLAY_CTRL),
+						PANEL_DISPLAY_CTRL,
+						TFT_DISP,
+						spec_share->state.pnltype));
+		break;
 		}
-	}else{
+	} else {
 		/* for 750LE ,no DVI chip initilization makes Monitor no signal */
 		/* Set up GPIO for software I2C to program DVI chip in the
 		   Xilinx SP605 board, in order to have video signal.
 		 */
-        swI2CInit(0, 1);
+	swI2CInit(0, 1);
 
 
-        /* Customer may NOT use CH7301 DVI chip, which has to be
-           initialized differently.
-         */
-        if (swI2CReadReg(0xec, 0x4a) == 0x95)
-        {
-            /* The following register values for CH7301 are from
-               Chrontel app note and our experiment.
-             */
+	/* Customer may NOT use CH7301 DVI chip, which has to be
+	   initialized differently.
+	*/
+	if (swI2CReadReg(0xec, 0x4a) == 0x95) {
+		/* The following register values for CH7301 are from
+		   Chrontel app note and our experiment.
+		*/
 			pr_info("yes,CH7301 DVI chip found\n");
-            swI2CWriteReg(0xec, 0x1d, 0x16);
-            swI2CWriteReg(0xec, 0x21, 0x9);
-            swI2CWriteReg(0xec, 0x49, 0xC0);
+		swI2CWriteReg(0xec, 0x1d, 0x16);
+		swI2CWriteReg(0xec, 0x21, 0x9);
+		swI2CWriteReg(0xec, 0x49, 0xC0);
 			pr_info("okay,CH7301 DVI chip setup done\n");
-        }
+	}
 	}
 
 	/* init 2d engine */
-	if(!share->accel_off){
+	if (!share->accel_off)
 		hw_sm750_initAccel(share);
-	}
 
 	return 0;
 }
@@ -202,86 +198,87 @@
 resource_size_t hw_sm750_getVMSize(struct lynx_share *share)
 {
 	resource_size_t ret;
-	
+
 	ret = ddk750_getVMSize();
 	return ret;
 }
 
 
 
-int hw_sm750_output_checkMode(struct lynxfb_output* output, struct fb_var_screeninfo* var)
+int hw_sm750_output_checkMode(struct lynxfb_output *output, struct fb_var_screeninfo *var)
 {
-	
+
 	return 0;
 }
 
 
-int hw_sm750_output_setMode(struct lynxfb_output* output,
-									struct fb_var_screeninfo* var, struct fb_fix_screeninfo* fix)
+int hw_sm750_output_setMode(struct lynxfb_output *output,
+									struct fb_var_screeninfo *var, struct fb_fix_screeninfo *fix)
 {
 	int ret;
 	disp_output_t dispSet;
 	int channel;
-	
+
 	ret = 0;
 	dispSet = 0;
 	channel = *output->channel;
 
 
-	if(getChipType() != SM750LE){
-		if(channel == sm750_primary){
+	if (getChipType() != SM750LE) {
+		if (channel == sm750_primary) {
 			pr_info("primary channel\n");
-			if(output->paths & sm750_panel)
+			if (output->paths & sm750_panel)
 				dispSet |= do_LCD1_PRI;
-			if(output->paths & sm750_crt)
+			if (output->paths & sm750_crt)
 				dispSet |= do_CRT_PRI;
 
-		}else{
+		} else {
 			pr_info("secondary channel\n");
-			if(output->paths & sm750_panel)
+			if (output->paths & sm750_panel)
 				dispSet |= do_LCD1_SEC;
-			if(output->paths & sm750_crt)
+			if (output->paths & sm750_crt)
 				dispSet |= do_CRT_SEC;
 
 		}
 		ddk750_setLogicalDispOut(dispSet);
-	}else{
+	} else {
 		/* just open DISPLAY_CONTROL_750LE register bit 3:0*/
 		u32 reg;
+
 		reg = PEEK32(DISPLAY_CONTROL_750LE);
 		reg |= 0xf;
 		POKE32(DISPLAY_CONTROL_750LE, reg);
 	}
 
-	pr_info("ddk setlogicdispout done \n");
+	pr_info("ddk setlogicdispout done\n");
 	return ret;
 }
 
-void hw_sm750_output_clear(struct lynxfb_output* output)
+void hw_sm750_output_clear(struct lynxfb_output *output)
 {
-	
+
 	return;
 }
 
-int hw_sm750_crtc_checkMode(struct lynxfb_crtc* crtc, struct fb_var_screeninfo* var)
+int hw_sm750_crtc_checkMode(struct lynxfb_crtc *crtc, struct fb_var_screeninfo *var)
 {
 	struct lynx_share *share;
-	
+
 
 	share = container_of(crtc, struct lynxfb_par, crtc)->share;
 
-	switch (var->bits_per_pixel){
-		case 8:
-		case 16:
-			break;
-		case 32:
-			if (share->revid == SM750LE_REVISION_ID) {
-				pr_debug("750le do not support 32bpp\n");
-				return -EINVAL;
-			}
-			break;
-		default:
+	switch (var->bits_per_pixel) {
+	case 8:
+	case 16:
+		break;
+	case 32:
+		if (share->revid == SM750LE_REVISION_ID) {
+			pr_debug("750le do not support 32bpp\n");
 			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
 
 	}
 
@@ -292,9 +289,9 @@
 /*
 	set the controller's mode for @crtc charged with @var and @fix parameters
 */
-int hw_sm750_crtc_setMode(struct lynxfb_crtc* crtc,
-								struct fb_var_screeninfo* var,
-								struct fb_fix_screeninfo* fix)
+int hw_sm750_crtc_setMode(struct lynxfb_crtc *crtc,
+								struct fb_var_screeninfo *var,
+								struct fb_fix_screeninfo *fix)
 {
 	int ret, fmt;
 	u32 reg;
@@ -303,24 +300,24 @@
 	struct lynx_share *share;
 	struct lynxfb_par *par;
 
-	
+
 	ret = 0;
 	par = container_of(crtc, struct lynxfb_par, crtc);
 	share = par->share;
 #if 1
-	if(!share->accel_off){
+	if (!share->accel_off) {
 		/* set 2d engine pixel format according to mode bpp */
-		switch(var->bits_per_pixel){
-			case 8:
-				fmt = 0;
-				break;
-			case 16:
-				fmt = 1;
-				break;
-			case 32:
-			default:
-				fmt = 2;
-				break;
+		switch (var->bits_per_pixel) {
+		case 8:
+			fmt = 0;
+			break;
+		case 16:
+			fmt = 1;
+			break;
+		case 32:
+		default:
+			fmt = 2;
+			break;
 		}
 		hw_set2dformat(&share->accel, fmt);
 	}
@@ -330,7 +327,7 @@
 	modparm.pixel_clock = ps_to_hz(var->pixclock);
 	modparm.vertical_sync_polarity = (var->sync & FB_SYNC_HOR_HIGH_ACT) ? POS:NEG;
 	modparm.horizontal_sync_polarity = (var->sync & FB_SYNC_VERT_HIGH_ACT) ? POS:NEG;
-	modparm.clock_phase_polarity = (var->sync& FB_SYNC_COMP_HIGH_ACT) ? POS:NEG;
+	modparm.clock_phase_polarity = (var->sync & FB_SYNC_COMP_HIGH_ACT) ? POS:NEG;
 	modparm.horizontal_display_end = var->xres;
 	modparm.horizontal_sync_width = var->hsync_len;
 	modparm.horizontal_sync_start = var->xres + var->right_margin;
@@ -341,19 +338,19 @@
 	modparm.vertical_total = var->yres + var->upper_margin + var->lower_margin + var->vsync_len;
 
 	/* choose pll */
-	if(crtc->channel != sm750_secondary)
+	if (crtc->channel != sm750_secondary)
 		clock = PRIMARY_PLL;
 	else
 		clock = SECONDARY_PLL;
 
 	pr_debug("Request pixel clock = %lu\n", modparm.pixel_clock);
 	ret = ddk750_setModeTiming(&modparm, clock);
-	if(ret){
+	if (ret) {
 		pr_err("Set mode timing failed\n");
 		goto exit;
 	}
 
-	if(crtc->channel != sm750_secondary){
+	if (crtc->channel != sm750_secondary) {
 		/* set pitch, offset ,width,start address ,etc... */
 		POKE32(PANEL_FB_ADDRESS,
 			FIELD_SET(0, PANEL_FB_ADDRESS, STATUS, CURRENT)|
@@ -369,7 +366,7 @@
 			FIELD_VALUE(0, PANEL_FB_WIDTH, OFFSET, fix->line_length));
 
 		POKE32(PANEL_WINDOW_WIDTH,
-			FIELD_VALUE(0, PANEL_WINDOW_WIDTH, WIDTH, var->xres -1)|
+			FIELD_VALUE(0, PANEL_WINDOW_WIDTH, WIDTH, var->xres - 1)|
 			FIELD_VALUE(0, PANEL_WINDOW_WIDTH, X, var->xoffset));
 
 		POKE32(PANEL_WINDOW_HEIGHT,
@@ -389,7 +386,7 @@
 			PANEL_DISPLAY_CTRL, FORMAT,
 			(var->bits_per_pixel >> 4)
 			));
-	}else{
+	} else {
 		/* not implemented now */
 		POKE32(CRT_FB_ADDRESS, crtc->oScreen);
 		reg = var->xres * (var->bits_per_pixel >> 3);
@@ -412,138 +409,137 @@
 	return ret;
 }
 
-void hw_sm750_crtc_clear(struct lynxfb_crtc* crtc)
+void hw_sm750_crtc_clear(struct lynxfb_crtc *crtc)
 {
-	
+
 	return;
 }
 
-int hw_sm750_setColReg(struct lynxfb_crtc* crtc, ushort index,
+int hw_sm750_setColReg(struct lynxfb_crtc *crtc, ushort index,
 								ushort red, ushort green, ushort blue)
 {
-	static unsigned int add[]={PANEL_PALETTE_RAM, CRT_PALETTE_RAM};
+	static unsigned int add[] = {PANEL_PALETTE_RAM, CRT_PALETTE_RAM};
+
 	POKE32(add[crtc->channel] + index*4, (red<<16)|(green<<8)|blue);
 	return 0;
 }
 
-int hw_sm750le_setBLANK(struct lynxfb_output * output, int blank){
+int hw_sm750le_setBLANK(struct lynxfb_output *output, int blank)
+{
 	int dpms, crtdb;
-	
-	switch(blank)
-	{
+
+	switch (blank) {
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
-		case FB_BLANK_UNBLANK:
+	case FB_BLANK_UNBLANK:
 #else
-		case VESA_NO_BLANKING:
+	case VESA_NO_BLANKING:
 #endif
-			dpms = CRT_DISPLAY_CTRL_DPMS_0;
-			crtdb = CRT_DISPLAY_CTRL_BLANK_OFF;
-			break;
+		dpms = CRT_DISPLAY_CTRL_DPMS_0;
+		crtdb = CRT_DISPLAY_CTRL_BLANK_OFF;
+		break;
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
-		case FB_BLANK_NORMAL:
-			dpms = CRT_DISPLAY_CTRL_DPMS_0;
-			crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
-			break;
+	case FB_BLANK_NORMAL:
+		dpms = CRT_DISPLAY_CTRL_DPMS_0;
+		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		break;
 #endif
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
-		case FB_BLANK_VSYNC_SUSPEND:
+	case FB_BLANK_VSYNC_SUSPEND:
 #else
-		case VESA_VSYNC_SUSPEND:
+	case VESA_VSYNC_SUSPEND:
 #endif
-			dpms = CRT_DISPLAY_CTRL_DPMS_2;
-			crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
-			break;
+		dpms = CRT_DISPLAY_CTRL_DPMS_2;
+		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		break;
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
-		case FB_BLANK_HSYNC_SUSPEND:
+	case FB_BLANK_HSYNC_SUSPEND:
 #else
-		case VESA_HSYNC_SUSPEND:
+	case VESA_HSYNC_SUSPEND:
 #endif
-			dpms = CRT_DISPLAY_CTRL_DPMS_1;
-			crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
-			break;
+		dpms = CRT_DISPLAY_CTRL_DPMS_1;
+		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		break;
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
-		case FB_BLANK_POWERDOWN:
+	case FB_BLANK_POWERDOWN:
 #else
-		case VESA_POWERDOWN:
+	case VESA_POWERDOWN:
 #endif
-			dpms = CRT_DISPLAY_CTRL_DPMS_3;
-			crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
-			break;
-		default:
-			return -EINVAL;
+		dpms = CRT_DISPLAY_CTRL_DPMS_3;
+		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		break;
+	default:
+		return -EINVAL;
 	}
 
-	if(output->paths & sm750_crt){
+	if (output->paths & sm750_crt) {
 		POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, DPMS, dpms));
 		POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, BLANK, crtdb));
 	}
 	return 0;
 }
 
-int hw_sm750_setBLANK(struct lynxfb_output* output, int blank)
+int hw_sm750_setBLANK(struct lynxfb_output *output, int blank)
 {
 	unsigned int dpms, pps, crtdb;
-	
+
 	dpms = pps = crtdb = 0;
 
-	switch (blank)
-	{
+	switch (blank) {
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
-		case FB_BLANK_UNBLANK:
+	case FB_BLANK_UNBLANK:
 #else
-		case VESA_NO_BLANKING:
+	case VESA_NO_BLANKING:
 #endif
-			pr_info("flag = FB_BLANK_UNBLANK \n");
-			dpms = SYSTEM_CTRL_DPMS_VPHP;
-			pps = PANEL_DISPLAY_CTRL_DATA_ENABLE;
-			crtdb = CRT_DISPLAY_CTRL_BLANK_OFF;
-			break;
+		pr_info("flag = FB_BLANK_UNBLANK\n");
+		dpms = SYSTEM_CTRL_DPMS_VPHP;
+		pps = PANEL_DISPLAY_CTRL_DATA_ENABLE;
+		crtdb = CRT_DISPLAY_CTRL_BLANK_OFF;
+		break;
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
-		case FB_BLANK_NORMAL:
-			pr_info("flag = FB_BLANK_NORMAL \n");
-			dpms = SYSTEM_CTRL_DPMS_VPHP;
-			pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
-			crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
-			break;
+	case FB_BLANK_NORMAL:
+		pr_info("flag = FB_BLANK_NORMAL\n");
+		dpms = SYSTEM_CTRL_DPMS_VPHP;
+		pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
+		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		break;
 #endif
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
-		case FB_BLANK_VSYNC_SUSPEND:
+	case FB_BLANK_VSYNC_SUSPEND:
 #else
-		case VESA_VSYNC_SUSPEND:
+	case VESA_VSYNC_SUSPEND:
 #endif
-			dpms = SYSTEM_CTRL_DPMS_VNHP;
-			pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
-			crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
-			break;
+		dpms = SYSTEM_CTRL_DPMS_VNHP;
+		pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
+		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		break;
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
-		case FB_BLANK_HSYNC_SUSPEND:
+	case FB_BLANK_HSYNC_SUSPEND:
 #else
-		case VESA_HSYNC_SUSPEND:
+	case VESA_HSYNC_SUSPEND:
 #endif
-			dpms = SYSTEM_CTRL_DPMS_VPHN;
-			pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
-			crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
-			break;
+		dpms = SYSTEM_CTRL_DPMS_VPHN;
+		pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
+		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		break;
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
-		case FB_BLANK_POWERDOWN:
+	case FB_BLANK_POWERDOWN:
 #else
-		case VESA_POWERDOWN:
+	case VESA_POWERDOWN:
 #endif
-			dpms = SYSTEM_CTRL_DPMS_VNHN;
-			pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
-			crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
-			break;
+		dpms = SYSTEM_CTRL_DPMS_VNHN;
+		pps = PANEL_DISPLAY_CTRL_DATA_DISABLE;
+		crtdb = CRT_DISPLAY_CTRL_BLANK_ON;
+		break;
 	}
 
-	if(output->paths & sm750_crt){
+	if (output->paths & sm750_crt) {
 
 		POKE32(SYSTEM_CTRL, FIELD_VALUE(PEEK32(SYSTEM_CTRL), SYSTEM_CTRL, DPMS, dpms));
 		POKE32(CRT_DISPLAY_CTRL, FIELD_VALUE(PEEK32(CRT_DISPLAY_CTRL), CRT_DISPLAY_CTRL, BLANK, crtdb));
 	}
 
-	if(output->paths & sm750_panel){
+	if (output->paths & sm750_panel)
 		POKE32(PANEL_DISPLAY_CTRL, FIELD_VALUE(PEEK32(PANEL_DISPLAY_CTRL), PANEL_DISPLAY_CTRL, DATA, pps));
-	}
 
 	return 0;
 }
@@ -552,9 +548,10 @@
 void hw_sm750_initAccel(struct lynx_share *share)
 {
 	u32 reg;
+
 	enable2DEngine(1);
 
-	if(getChipType() == SM750LE){
+	if (getChipType() == SM750LE) {
 		reg = PEEK32(DE_STATE1);
 		reg = FIELD_SET(reg, DE_STATE1, DE_ABORT, ON);
 		POKE32(DE_STATE1, reg);
@@ -563,7 +560,7 @@
 		reg = FIELD_SET(reg, DE_STATE1, DE_ABORT, OFF);
 		POKE32(DE_STATE1, reg);
 
-	}else{
+	} else {
 		/* engine reset */
 		reg = PEEK32(SYSTEM_CTRL);
 	    reg = FIELD_SET(reg, SYSTEM_CTRL, DE_ABORT, ON);
@@ -580,13 +577,14 @@
 
 int hw_sm750le_deWait(void)
 {
-	int i=0x10000000;
-	while(i--){
+	int i = 0x10000000;
+
+	while (i--) {
 		unsigned int dwVal = PEEK32(DE_STATE2);
-		if((FIELD_GET(dwVal, DE_STATE2, DE_STATUS) == DE_STATE2_DE_STATUS_IDLE) &&
+
+		if ((FIELD_GET(dwVal, DE_STATE2, DE_STATUS) == DE_STATE2_DE_STATUS_IDLE) &&
 			(FIELD_GET(dwVal, DE_STATE2, DE_FIFO)  == DE_STATE2_DE_FIFO_EMPTY) &&
-			(FIELD_GET(dwVal, DE_STATE2, DE_MEM_FIFO) == DE_STATE2_DE_MEM_FIFO_EMPTY))
-		{
+			(FIELD_GET(dwVal, DE_STATE2, DE_MEM_FIFO) == DE_STATE2_DE_MEM_FIFO_EMPTY)) {
 			return 0;
 		}
 	}
@@ -597,13 +595,14 @@
 
 int hw_sm750_deWait(void)
 {
-	int i=0x10000000;
-	while(i--){
+	int i = 0x10000000;
+
+	while (i--) {
 		unsigned int dwVal = PEEK32(SYSTEM_CTRL);
-		if((FIELD_GET(dwVal, SYSTEM_CTRL, DE_STATUS) == SYSTEM_CTRL_DE_STATUS_IDLE) &&
+
+		if ((FIELD_GET(dwVal, SYSTEM_CTRL, DE_STATUS) == SYSTEM_CTRL_DE_STATUS_IDLE) &&
 			(FIELD_GET(dwVal, SYSTEM_CTRL, DE_FIFO)  == SYSTEM_CTRL_DE_FIFO_EMPTY) &&
-			(FIELD_GET(dwVal, SYSTEM_CTRL, DE_MEM_FIFO) == SYSTEM_CTRL_DE_MEM_FIFO_EMPTY))
-		{
+			(FIELD_GET(dwVal, SYSTEM_CTRL, DE_MEM_FIFO) == SYSTEM_CTRL_DE_MEM_FIFO_EMPTY)) {
 			return 0;
 		}
 	}
@@ -612,28 +611,27 @@
 }
 
 int hw_sm750_pan_display(struct lynxfb_crtc *crtc,
-        const struct fb_var_screeninfo *var,
-        const struct fb_info *info)
+	const struct fb_var_screeninfo *var,
+	const struct fb_info *info)
 {
-    uint32_t total;
-    /* check params */
-    if ((var->xoffset + var->xres > var->xres_virtual) ||
-            (var->yoffset + var->yres > var->yres_virtual)) {
-        return -EINVAL;
-    }
+	uint32_t total;
+	/* check params */
+	if ((var->xoffset + var->xres > var->xres_virtual) ||
+	    (var->yoffset + var->yres > var->yres_virtual)) {
+		return -EINVAL;
+	}
 
-    total = var->yoffset * info->fix.line_length +
-        ((var->xoffset * var->bits_per_pixel) >> 3);
-    total += crtc->oScreen;
-    if (crtc->channel == sm750_primary) {
-        POKE32(PANEL_FB_ADDRESS,
-                FIELD_VALUE(PEEK32(PANEL_FB_ADDRESS),
-                    PANEL_FB_ADDRESS, ADDRESS, total));
-    } else {
-        POKE32(CRT_FB_ADDRESS,
-                FIELD_VALUE(PEEK32(CRT_FB_ADDRESS),
-                    CRT_FB_ADDRESS, ADDRESS, total));
-    }
-    return 0;
+	total = var->yoffset * info->fix.line_length +
+		((var->xoffset * var->bits_per_pixel) >> 3);
+	total += crtc->oScreen;
+	if (crtc->channel == sm750_primary) {
+		POKE32(PANEL_FB_ADDRESS,
+			FIELD_VALUE(PEEK32(PANEL_FB_ADDRESS),
+				PANEL_FB_ADDRESS, ADDRESS, total));
+	} else {
+		POKE32(CRT_FB_ADDRESS,
+			FIELD_VALUE(PEEK32(CRT_FB_ADDRESS),
+				CRT_FB_ADDRESS, ADDRESS, total));
+	}
+	return 0;
 }
-
diff --git a/drivers/staging/sm750fb/sm750_hw.h b/drivers/staging/sm750fb/sm750_hw.h
index 93288b3..3781a1a 100644
--- a/drivers/staging/sm750fb/sm750_hw.h
+++ b/drivers/staging/sm750fb/sm750_hw.h
@@ -2,14 +2,14 @@
 #define LYNX_HW750_H__
 
 
-#define DEFAULT_SM750_CHIP_CLOCK 		290
-#define DEFAULT_SM750LE_CHIP_CLOCK  	333
+#define DEFAULT_SM750_CHIP_CLOCK	290
+#define DEFAULT_SM750LE_CHIP_CLOCK	333
 #ifndef SM750LE_REVISION_ID
 #define SM750LE_REVISION_ID (unsigned char)0xfe
 #endif
 
 
-enum sm750_pnltype{
+enum sm750_pnltype {
 
 	sm750_24TFT = 0,/* 24bit tft */
 
@@ -19,30 +19,30 @@
 };
 
 /* vga channel is not concerned  */
-enum sm750_dataflow{
+enum sm750_dataflow {
 	sm750_simul_pri,/* primary => all head */
 
 	sm750_simul_sec,/* secondary => all head */
 
-	sm750_dual_normal,/* 	primary => panel head and secondary => crt */
+	sm750_dual_normal,/* primary => panel head and secondary => crt */
 
-	sm750_dual_swap,/* 	primary => crt head and secondary => panel */
+	sm750_dual_swap,/* primary => crt head and secondary => panel */
 };
 
 
-enum sm750_channel{
+enum sm750_channel {
 	sm750_primary = 0,
 	/* enum value equal to the register filed data */
 	sm750_secondary = 1,
 };
 
-enum sm750_path{
+enum sm750_path {
 	sm750_panel = 1,
 	sm750_crt = 2,
 	sm750_pnc = 3,/* panel and crt */
 };
 
-struct init_status{
+struct init_status {
 	ushort powerMode;
 	/* below three clocks are in unit of MHZ*/
 	ushort chip_clk;
@@ -52,7 +52,7 @@
 	ushort resetMemory;
 };
 
-struct sm750_state{
+struct sm750_state {
 	struct init_status initParm;
 	enum sm750_pnltype pnltype;
 	enum sm750_dataflow dataflow;
@@ -61,24 +61,24 @@
 	int yLCD;
 };
 
-/* 	sm750_share stands for a presentation of two frame buffer
-	that use one sm750 adaptor, it is similar to the super class of lynx_share
-	in C++
-*/
+/* sm750_share stands for a presentation of two frame buffer
+   that use one sm750 adaptor, it is similar to the super class of lynx_share
+   in C++
+ */
 
-struct sm750_share{
+struct sm750_share {
 	/* it's better to put lynx_share struct to the first place of sm750_share */
 	struct lynx_share share;
 	struct sm750_state state;
 	int hwCursor;
-	/* 	0: no hardware cursor
-		1: primary crtc hw cursor enabled,
-		2: secondary crtc hw cursor enabled
-		3: both ctrc hw cursor enabled
+	/* 0: no hardware cursor
+	   1: primary crtc hw cursor enabled,
+	   2: secondary crtc hw cursor enabled
+	   3: both ctrc hw cursor enabled
 	*/
 };
 
-int hw_sm750_map(struct lynx_share* share, struct pci_dev* pdev);
+int hw_sm750_map(struct lynx_share *share, struct pci_dev *pdev);
 int hw_sm750_inithw(struct lynx_share*, struct pci_dev *);
 void hw_sm750_initAccel(struct lynx_share *);
 int hw_sm750_deWait(void);
@@ -92,10 +92,10 @@
 int hw_sm750_setColReg(struct lynxfb_crtc*, ushort, ushort, ushort, ushort);
 int hw_sm750_setBLANK(struct lynxfb_output*, int);
 int hw_sm750le_setBLANK(struct lynxfb_output*, int);
-void hw_sm750_crtc_clear(struct lynxfb_crtc*);
-void hw_sm750_output_clear(struct lynxfb_output*);
+void hw_sm750_crtc_clear(struct lynxfb_crtc *);
+void hw_sm750_output_clear(struct lynxfb_output *);
 int hw_sm750_pan_display(struct lynxfb_crtc *crtc,
-        const struct fb_var_screeninfo *var,
-        const struct fb_info *info);
+			 const struct fb_var_screeninfo *var,
+			 const struct fb_info *info);
 
 #endif
diff --git a/drivers/staging/sm7xxfb/Kconfig b/drivers/staging/sm7xxfb/Kconfig
deleted file mode 100644
index e2922ae..0000000
--- a/drivers/staging/sm7xxfb/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-config FB_SM7XX
-	tristate "Silicon Motion SM7XX framebuffer support"
-	depends on FB && PCI
-	select FB_CFB_FILLRECT
-	select FB_CFB_COPYAREA
-	select FB_CFB_IMAGEBLIT
-	help
-	  Frame buffer driver for the Silicon Motion SM710, SM712, SM721
-	  and SM722 chips.
-
-	  This driver is also available as a module. The module will be
-	  called sm7xxfb. If you want to compile it as a module, say M
-	  here and read <file:Documentation/kbuild/modules.txt>.
diff --git a/drivers/staging/sm7xxfb/Makefile b/drivers/staging/sm7xxfb/Makefile
deleted file mode 100644
index 48f471cf..0000000
--- a/drivers/staging/sm7xxfb/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_FB_SM7XX) += sm7xxfb.o
diff --git a/drivers/staging/sm7xxfb/TODO b/drivers/staging/sm7xxfb/TODO
deleted file mode 100644
index 7cb0b24..0000000
--- a/drivers/staging/sm7xxfb/TODO
+++ /dev/null
@@ -1,12 +0,0 @@
-TODO:
-- Dual head support
-- 2D acceleration support
-- use kernel coding style
-- refine the code and remove unused code
-- move it to drivers/video/fbdev/sm7xxfb.c
-
-Please send any patches to
-	Greg Kroah-Hartman <greg@kroah.com>
-	Sudip Mukherjee <sudipm.mukherjee@gmail.com>
-	Teddy Wang <teddy.wang@siliconmotion.com>
-	Sudip Mukherjee <sudip@vectorindia.org>
diff --git a/drivers/staging/sm7xxfb/sm7xx.h b/drivers/staging/sm7xxfb/sm7xx.h
deleted file mode 100644
index 4bed094..0000000
--- a/drivers/staging/sm7xxfb/sm7xx.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Silicon Motion SM712 frame buffer device
- *
- * Copyright (C) 2006 Silicon Motion Technology Corp.
- * Authors:	Ge Wang, gewang@siliconmotion.com
- *		Boyod boyod.yang@siliconmotion.com.cn
- *
- * Copyright (C) 2009 Lemote, Inc.
- * Author: Wu Zhangjin, wuzhangjin@gmail.com
- *
- *  This file is subject to the terms and conditions of the GNU General Public
- *  License. See the file COPYING in the main directory of this archive for
- *  more details.
- */
-
-#define NR_PALETTE        256
-
-#define FB_ACCEL_SMI_LYNX 88
-
-#define SCREEN_X_RES      1024
-#define SCREEN_Y_RES      600
-#define SCREEN_BPP        16
-
-/*Assume SM712 graphics chip has 4MB VRAM */
-#define SM712_VIDEOMEMORYSIZE	  0x00400000
-/*Assume SM722 graphics chip has 8MB VRAM */
-#define SM722_VIDEOMEMORYSIZE	  0x00800000
-
-#define dac_reg	(0x3c8)
-#define dac_val	(0x3c9)
-
-extern void __iomem *smtc_regbaseaddress;
-#define smtc_mmiowb(dat, reg)	writeb(dat, smtc_regbaseaddress + reg)
-#define smtc_mmioww(dat, reg)	writew(dat, smtc_regbaseaddress + reg)
-#define smtc_mmiowl(dat, reg)	writel(dat, smtc_regbaseaddress + reg)
-
-#define smtc_mmiorb(reg)	readb(smtc_regbaseaddress + reg)
-#define smtc_mmiorw(reg)	readw(smtc_regbaseaddress + reg)
-#define smtc_mmiorl(reg)	readl(smtc_regbaseaddress + reg)
-
-#define SIZE_SR00_SR04      (0x04 - 0x00 + 1)
-#define SIZE_SR10_SR24      (0x24 - 0x10 + 1)
-#define SIZE_SR30_SR75      (0x75 - 0x30 + 1)
-#define SIZE_SR80_SR93      (0x93 - 0x80 + 1)
-#define SIZE_SRA0_SRAF      (0xAF - 0xA0 + 1)
-#define SIZE_GR00_GR08      (0x08 - 0x00 + 1)
-#define SIZE_AR00_AR14      (0x14 - 0x00 + 1)
-#define SIZE_CR00_CR18      (0x18 - 0x00 + 1)
-#define SIZE_CR30_CR4D      (0x4D - 0x30 + 1)
-#define SIZE_CR90_CRA7      (0xA7 - 0x90 + 1)
-#define SIZE_VPR		(0x6C + 1)
-#define SIZE_DPR		(0x44 + 1)
-
-static inline void smtc_crtcw(int reg, int val)
-{
-	smtc_mmiowb(reg, 0x3d4);
-	smtc_mmiowb(val, 0x3d5);
-}
-
-static inline unsigned int smtc_crtcr(int reg)
-{
-	smtc_mmiowb(reg, 0x3d4);
-	return smtc_mmiorb(0x3d5);
-}
-
-static inline void smtc_grphw(int reg, int val)
-{
-	smtc_mmiowb(reg, 0x3ce);
-	smtc_mmiowb(val, 0x3cf);
-}
-
-static inline unsigned int smtc_grphr(int reg)
-{
-	smtc_mmiowb(reg, 0x3ce);
-	return smtc_mmiorb(0x3cf);
-}
-
-static inline void smtc_attrw(int reg, int val)
-{
-	smtc_mmiorb(0x3da);
-	smtc_mmiowb(reg, 0x3c0);
-	smtc_mmiorb(0x3c1);
-	smtc_mmiowb(val, 0x3c0);
-}
-
-static inline void smtc_seqw(int reg, int val)
-{
-	smtc_mmiowb(reg, 0x3c4);
-	smtc_mmiowb(val, 0x3c5);
-}
-
-static inline unsigned int smtc_seqr(int reg)
-{
-	smtc_mmiowb(reg, 0x3c4);
-	return smtc_mmiorb(0x3c5);
-}
-
-/* The next structure holds all information relevant for a specific video mode.
- */
-
-struct modeinit {
-	int mmsizex;
-	int mmsizey;
-	int bpp;
-	int hz;
-	unsigned char init_misc;
-	unsigned char init_sr00_sr04[SIZE_SR00_SR04];
-	unsigned char init_sr10_sr24[SIZE_SR10_SR24];
-	unsigned char init_sr30_sr75[SIZE_SR30_SR75];
-	unsigned char init_sr80_sr93[SIZE_SR80_SR93];
-	unsigned char init_sra0_sraf[SIZE_SRA0_SRAF];
-	unsigned char init_gr00_gr08[SIZE_GR00_GR08];
-	unsigned char init_ar00_ar14[SIZE_AR00_AR14];
-	unsigned char init_cr00_cr18[SIZE_CR00_CR18];
-	unsigned char init_cr30_cr4d[SIZE_CR30_CR4D];
-	unsigned char init_cr90_cra7[SIZE_CR90_CRA7];
-};
diff --git a/drivers/staging/sm7xxfb/sm7xxfb.c b/drivers/staging/sm7xxfb/sm7xxfb.c
deleted file mode 100644
index 2ff4fe7..0000000
--- a/drivers/staging/sm7xxfb/sm7xxfb.c
+++ /dev/null
@@ -1,1686 +0,0 @@
-/*
- * Silicon Motion SM7XX frame buffer device
- *
- * Copyright (C) 2006 Silicon Motion Technology Corp.
- * Authors:  Ge Wang, gewang@siliconmotion.com
- *	     Boyod boyod.yang@siliconmotion.com.cn
- *
- * Copyright (C) 2009 Lemote, Inc.
- * Author:   Wu Zhangjin, wuzhangjin@gmail.com
- *
- * Copyright (C) 2011 Igalia, S.L.
- * Author:   Javier M. Mellid <jmunhoz@igalia.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- * Framebuffer driver for Silicon Motion SM710, SM712, SM721 and SM722 chips
- */
-
-#include <linux/io.h>
-#include <linux/fb.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <linux/console.h>
-#include <linux/screen_info.h>
-
-#ifdef CONFIG_PM
-#include <linux/pm.h>
-#endif
-
-#include "sm7xx.h"
-
-/*
-* Private structure
-*/
-struct smtcfb_info {
-	struct pci_dev *pdev;
-	struct fb_info *fb;
-	u16 chip_id;
-	u8  chip_rev_id;
-
-	void __iomem *lfb;	/* linear frame buffer */
-	void __iomem *dp_regs;	/* drawing processor control regs */
-	void __iomem *vp_regs;	/* video processor control regs */
-	void __iomem *cp_regs;	/* capture processor control regs */
-	void __iomem *mmio;	/* memory map IO port */
-
-	u_int width;
-	u_int height;
-	u_int hz;
-
-	u32 colreg[17];
-};
-
-void __iomem *smtc_regbaseaddress;	/* Memory Map IO starting address */
-
-static struct fb_var_screeninfo smtcfb_var = {
-	.xres           = 1024,
-	.yres           = 600,
-	.xres_virtual   = 1024,
-	.yres_virtual   = 600,
-	.bits_per_pixel = 16,
-	.red            = {16, 8, 0},
-	.green          = {8, 8, 0},
-	.blue           = {0, 8, 0},
-	.activate       = FB_ACTIVATE_NOW,
-	.height         = -1,
-	.width          = -1,
-	.vmode          = FB_VMODE_NONINTERLACED,
-	.nonstd         = 0,
-	.accel_flags    = FB_ACCELF_TEXT,
-};
-
-static struct fb_fix_screeninfo smtcfb_fix = {
-	.id             = "smXXXfb",
-	.type           = FB_TYPE_PACKED_PIXELS,
-	.visual         = FB_VISUAL_TRUECOLOR,
-	.line_length    = 800 * 3,
-	.accel          = FB_ACCEL_SMI_LYNX,
-	.type_aux       = 0,
-	.xpanstep       = 0,
-	.ypanstep       = 0,
-	.ywrapstep      = 0,
-};
-
-struct vesa_mode {
-	char index[6];
-	u16  lfb_width;
-	u16  lfb_height;
-	u16  lfb_depth;
-};
-
-static const struct vesa_mode vesa_mode_table[] = {
-	{"0x301", 640,  480,  8},
-	{"0x303", 800,  600,  8},
-	{"0x305", 1024, 768,  8},
-	{"0x307", 1280, 1024, 8},
-
-	{"0x311", 640,  480,  16},
-	{"0x314", 800,  600,  16},
-	{"0x317", 1024, 768,  16},
-	{"0x31A", 1280, 1024, 16},
-
-	{"0x312", 640,  480,  24},
-	{"0x315", 800,  600,  24},
-	{"0x318", 1024, 768,  24},
-	{"0x31B", 1280, 1024, 24},
-};
-
-/**********************************************************************
-			 SM712 Mode table.
- **********************************************************************/
-static const struct modeinit vgamode[] = {
-	{
-		/*  mode#0: 640 x 480  16Bpp  60Hz */
-		640, 480, 16, 60,
-		/*  Init_MISC */
-		0xE3,
-		{	/*  Init_SR0_SR4 */
-			0x03, 0x01, 0x0F, 0x00, 0x0E,
-		},
-		{	/*  Init_SR10_SR24 */
-			0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
-			0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xC4, 0x30, 0x02, 0x01, 0x01,
-		},
-		{	/*  Init_SR30_SR75 */
-			0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
-			0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
-			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
-			0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
-			0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
-			0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
-			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
-			0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
-			0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
-		},
-		{	/*  Init_SR80_SR93 */
-			0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
-			0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
-			0x00, 0x00, 0x00, 0x00,
-		},
-		{	/*  Init_SRA0_SRAF */
-			0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
-			0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
-		},
-		{	/*  Init_GR00_GR08 */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
-			0xFF,
-		},
-		{	/*  Init_AR00_AR14 */
-			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
-			0x41, 0x00, 0x0F, 0x00, 0x00,
-		},
-		{	/*  Init_CR00_CR18 */
-			0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
-			0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
-			0xFF,
-		},
-		{	/*  Init_CR30_CR4D */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
-			0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
-			0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
-			0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
-		},
-		{	/*  Init_CR90_CRA7 */
-			0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
-			0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
-			0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
-		},
-	},
-	{
-		/*  mode#1: 640 x 480  24Bpp  60Hz */
-		640, 480, 24, 60,
-		/*  Init_MISC */
-		0xE3,
-		{	/*  Init_SR0_SR4 */
-			0x03, 0x01, 0x0F, 0x00, 0x0E,
-		},
-		{	/*  Init_SR10_SR24 */
-			0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
-			0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xC4, 0x30, 0x02, 0x01, 0x01,
-		},
-		{	/*  Init_SR30_SR75 */
-			0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
-			0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
-			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
-			0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
-			0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
-			0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
-			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
-			0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
-			0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
-		},
-		{	/*  Init_SR80_SR93 */
-			0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
-			0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
-			0x00, 0x00, 0x00, 0x00,
-		},
-		{	/*  Init_SRA0_SRAF */
-			0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
-			0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
-		},
-		{	/*  Init_GR00_GR08 */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
-			0xFF,
-		},
-		{	/*  Init_AR00_AR14 */
-			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
-			0x41, 0x00, 0x0F, 0x00, 0x00,
-		},
-		{	/*  Init_CR00_CR18 */
-			0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
-			0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
-			0xFF,
-		},
-		{	/*  Init_CR30_CR4D */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
-			0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
-			0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
-			0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
-		},
-		{	/*  Init_CR90_CRA7 */
-			0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
-			0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
-			0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
-		},
-	},
-	{
-		/*  mode#0: 640 x 480  32Bpp  60Hz */
-		640, 480, 32, 60,
-		/*  Init_MISC */
-		0xE3,
-		{	/*  Init_SR0_SR4 */
-			0x03, 0x01, 0x0F, 0x00, 0x0E,
-		},
-		{	/*  Init_SR10_SR24 */
-			0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
-			0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xC4, 0x30, 0x02, 0x01, 0x01,
-		},
-		{	/*  Init_SR30_SR75 */
-			0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
-			0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
-			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
-			0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
-			0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
-			0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
-			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
-			0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
-			0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
-		},
-		{	/*  Init_SR80_SR93 */
-			0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
-			0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
-			0x00, 0x00, 0x00, 0x00,
-		},
-		{	/*  Init_SRA0_SRAF */
-			0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
-			0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
-		},
-		{	/*  Init_GR00_GR08 */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
-			0xFF,
-		},
-		{	/*  Init_AR00_AR14 */
-			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
-			0x41, 0x00, 0x0F, 0x00, 0x00,
-		},
-		{	/*  Init_CR00_CR18 */
-			0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
-			0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
-			0xFF,
-		},
-		{	/*  Init_CR30_CR4D */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
-			0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
-			0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
-			0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
-		},
-		{	/*  Init_CR90_CRA7 */
-			0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
-			0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
-			0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
-		},
-	},
-
-	{	/*  mode#2: 800 x 600  16Bpp  60Hz */
-		800, 600, 16, 60,
-		/*  Init_MISC */
-		0x2B,
-		{	/*  Init_SR0_SR4 */
-			0x03, 0x01, 0x0F, 0x03, 0x0E,
-		},
-		{	/*  Init_SR10_SR24 */
-			0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
-			0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xC4, 0x30, 0x02, 0x01, 0x01,
-		},
-		{	/*  Init_SR30_SR75 */
-			0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
-			0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
-			0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
-			0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
-			0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
-			0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
-			0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
-			0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
-			0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
-		},
-		{	/*  Init_SR80_SR93 */
-			0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
-			0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
-			0x00, 0x00, 0x00, 0x00,
-		},
-		{	/*  Init_SRA0_SRAF */
-			0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
-			0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
-		},
-		{	/*  Init_GR00_GR08 */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
-			0xFF,
-		},
-		{	/*  Init_AR00_AR14 */
-			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
-			0x41, 0x00, 0x0F, 0x00, 0x00,
-		},
-		{	/*  Init_CR00_CR18 */
-			0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
-			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
-			0xFF,
-		},
-		{	/*  Init_CR30_CR4D */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
-			0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
-			0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
-			0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
-		},
-		{	/*  Init_CR90_CRA7 */
-			0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
-			0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
-			0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
-		},
-	},
-	{	/*  mode#3: 800 x 600  24Bpp  60Hz */
-		800, 600, 24, 60,
-		0x2B,
-		{	/*  Init_SR0_SR4 */
-			0x03, 0x01, 0x0F, 0x03, 0x0E,
-		},
-		{	/*  Init_SR10_SR24 */
-			0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
-			0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xC4, 0x30, 0x02, 0x01, 0x01,
-		},
-		{	/*  Init_SR30_SR75 */
-			0x36, 0x03, 0x20, 0x09, 0xC0, 0x36, 0x36, 0x36,
-			0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x03, 0xFF,
-			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
-			0x20, 0x0C, 0x44, 0x20, 0x00, 0x36, 0x36, 0x36,
-			0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
-			0x04, 0x55, 0x59, 0x36, 0x36, 0x00, 0x00, 0x36,
-			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
-			0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
-			0x02, 0x45, 0x30, 0x30, 0x40, 0x20,
-		},
-		{	/*  Init_SR80_SR93 */
-			0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x36,
-			0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x36, 0x36,
-			0x00, 0x00, 0x00, 0x00,
-		},
-		{	/*  Init_SRA0_SRAF */
-			0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
-			0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
-		},
-		{	/*  Init_GR00_GR08 */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
-			0xFF,
-		},
-		{	/*  Init_AR00_AR14 */
-			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
-			0x41, 0x00, 0x0F, 0x00, 0x00,
-		},
-		{	/*  Init_CR00_CR18 */
-			0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
-			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
-			0xFF,
-		},
-		{	/*  Init_CR30_CR4D */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
-			0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
-			0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
-			0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
-		},
-		{	/*  Init_CR90_CRA7 */
-			0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
-			0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
-			0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
-		},
-	},
-	{	/*  mode#7: 800 x 600  32Bpp  60Hz */
-		800, 600, 32, 60,
-		/*  Init_MISC */
-		0x2B,
-		{	/*  Init_SR0_SR4 */
-			0x03, 0x01, 0x0F, 0x03, 0x0E,
-		},
-		{	/*  Init_SR10_SR24 */
-			0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
-			0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xC4, 0x30, 0x02, 0x01, 0x01,
-		},
-		{	/*  Init_SR30_SR75 */
-			0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
-			0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
-			0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
-			0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
-			0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
-			0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
-			0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
-			0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
-			0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
-		},
-		{	/*  Init_SR80_SR93 */
-			0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
-			0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
-			0x00, 0x00, 0x00, 0x00,
-		},
-		{	/*  Init_SRA0_SRAF */
-			0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
-			0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
-		},
-		{	/*  Init_GR00_GR08 */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
-			0xFF,
-		},
-		{	/*  Init_AR00_AR14 */
-			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
-			0x41, 0x00, 0x0F, 0x00, 0x00,
-		},
-		{	/*  Init_CR00_CR18 */
-			0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
-			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
-			0xFF,
-		},
-		{	/*  Init_CR30_CR4D */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
-			0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
-			0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
-			0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
-		},
-		{	/*  Init_CR90_CRA7 */
-			0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
-			0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
-			0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
-		},
-	},
-	/* We use 1024x768 table to light 1024x600 panel for lemote */
-	{	/*  mode#4: 1024 x 600  16Bpp  60Hz  */
-		1024, 600, 16, 60,
-		/*  Init_MISC */
-		0xEB,
-		{	/*  Init_SR0_SR4 */
-			0x03, 0x01, 0x0F, 0x00, 0x0E,
-		},
-		{	/*  Init_SR10_SR24 */
-			0xC8, 0x40, 0x14, 0x60, 0x00, 0x0A, 0x17, 0x20,
-			0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xC4, 0x30, 0x02, 0x00, 0x01,
-		},
-		{	/*  Init_SR30_SR75 */
-			0x22, 0x03, 0x24, 0x09, 0xC0, 0x22, 0x22, 0x22,
-			0x22, 0x22, 0x22, 0x22, 0x00, 0x00, 0x03, 0xFF,
-			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
-			0x20, 0x0C, 0x44, 0x20, 0x00, 0x22, 0x22, 0x22,
-			0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
-			0x00, 0x60, 0x59, 0x22, 0x22, 0x00, 0x00, 0x22,
-			0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
-			0x50, 0x03, 0x16, 0x02, 0x0D, 0x82, 0x09, 0x02,
-			0x04, 0x45, 0x3F, 0x30, 0x40, 0x20,
-		},
-		{	/*  Init_SR80_SR93 */
-			0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
-			0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
-			0x00, 0x00, 0x00, 0x00,
-		},
-		{	/*  Init_SRA0_SRAF */
-			0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
-			0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
-		},
-		{	/*  Init_GR00_GR08 */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
-			0xFF,
-		},
-		{	/*  Init_AR00_AR14 */
-			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
-			0x41, 0x00, 0x0F, 0x00, 0x00,
-		},
-		{	/*  Init_CR00_CR18 */
-			0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
-			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
-			0xFF,
-		},
-		{	/*  Init_CR30_CR4D */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
-			0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
-			0xA3, 0x7F, 0x00, 0x82, 0x0b, 0x6f, 0x57, 0x00,
-			0x5c, 0x0f, 0xE0, 0xe0, 0x7F, 0x57,
-		},
-		{	/*  Init_CR90_CRA7 */
-			0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
-			0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
-			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
-		},
-	},
-	{	/*  mode#5: 1024 x 768  24Bpp  60Hz */
-		1024, 768, 24, 60,
-		/*  Init_MISC */
-		0xEB,
-		{	/*  Init_SR0_SR4 */
-			0x03, 0x01, 0x0F, 0x03, 0x0E,
-		},
-		{	/*  Init_SR10_SR24 */
-			0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
-			0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xC4, 0x30, 0x02, 0x01, 0x01,
-		},
-		{	/*  Init_SR30_SR75 */
-			0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
-			0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
-			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
-			0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
-			0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
-			0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
-			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
-			0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
-			0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
-		},
-		{	/*  Init_SR80_SR93 */
-			0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
-			0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
-			0x00, 0x00, 0x00, 0x00,
-		},
-		{	/*  Init_SRA0_SRAF */
-			0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
-			0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
-		},
-		{	/*  Init_GR00_GR08 */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
-			0xFF,
-		},
-		{	/*  Init_AR00_AR14 */
-			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
-			0x41, 0x00, 0x0F, 0x00, 0x00,
-		},
-		{	/*  Init_CR00_CR18 */
-			0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
-			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
-			0xFF,
-		},
-		{	/*  Init_CR30_CR4D */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
-			0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
-			0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
-			0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
-		},
-		{	/*  Init_CR90_CRA7 */
-			0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
-			0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
-			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
-		},
-	},
-	{	/*  mode#4: 1024 x 768  32Bpp  60Hz */
-		1024, 768, 32, 60,
-		/*  Init_MISC */
-		0xEB,
-		{	/*  Init_SR0_SR4 */
-			0x03, 0x01, 0x0F, 0x03, 0x0E,
-		},
-		{	/*  Init_SR10_SR24 */
-			0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
-			0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xC4, 0x32, 0x02, 0x01, 0x01,
-		},
-		{	/*  Init_SR30_SR75 */
-			0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
-			0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
-			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
-			0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
-			0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
-			0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
-			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
-			0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
-			0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
-		},
-		{	/*  Init_SR80_SR93 */
-			0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
-			0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
-			0x00, 0x00, 0x00, 0x00,
-		},
-		{	/*  Init_SRA0_SRAF */
-			0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
-			0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
-		},
-		{	/*  Init_GR00_GR08 */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
-			0xFF,
-		},
-		{	/*  Init_AR00_AR14 */
-			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
-			0x41, 0x00, 0x0F, 0x00, 0x00,
-		},
-		{	/*  Init_CR00_CR18 */
-			0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
-			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
-			0xFF,
-		},
-		{	/*  Init_CR30_CR4D */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
-			0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
-			0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
-			0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
-		},
-		{	/*  Init_CR90_CRA7 */
-			0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
-			0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
-			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
-		},
-	},
-	{	/*  mode#6: 320 x 240  16Bpp  60Hz */
-		320, 240, 16, 60,
-		/*  Init_MISC */
-		0xEB,
-		{	/*  Init_SR0_SR4 */
-			0x03, 0x01, 0x0F, 0x03, 0x0E,
-		},
-		{	/*  Init_SR10_SR24 */
-			0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
-			0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xC4, 0x32, 0x02, 0x01, 0x01,
-		},
-		{	/*  Init_SR30_SR75 */
-			0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
-			0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
-			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
-			0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
-			0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
-			0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
-			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
-			0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
-			0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
-		},
-		{	/*  Init_SR80_SR93 */
-			0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
-			0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
-			0x00, 0x00, 0x00, 0x00,
-		},
-		{	/*  Init_SRA0_SRAF */
-			0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
-			0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
-		},
-		{	/*  Init_GR00_GR08 */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
-			0xFF,
-		},
-		{	/*  Init_AR00_AR14 */
-			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
-			0x41, 0x00, 0x0F, 0x00, 0x00,
-		},
-		{	/*  Init_CR00_CR18 */
-			0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
-			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
-			0xFF,
-		},
-		{	/*  Init_CR30_CR4D */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
-			0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
-			0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
-			0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
-		},
-		{	/*  Init_CR90_CRA7 */
-			0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
-			0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
-			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
-		},
-	},
-
-	{	/*  mode#8: 320 x 240  32Bpp  60Hz */
-		320, 240, 32, 60,
-		/*  Init_MISC */
-		0xEB,
-		{	/*  Init_SR0_SR4 */
-			0x03, 0x01, 0x0F, 0x03, 0x0E,
-		},
-		{	/*  Init_SR10_SR24 */
-			0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
-			0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0xC4, 0x32, 0x02, 0x01, 0x01,
-		},
-		{	/*  Init_SR30_SR75 */
-			0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
-			0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
-			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
-			0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
-			0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
-			0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
-			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
-			0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
-			0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
-		},
-		{	/*  Init_SR80_SR93 */
-			0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
-			0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
-			0x00, 0x00, 0x00, 0x00,
-		},
-		{	/*  Init_SRA0_SRAF */
-			0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
-			0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
-		},
-		{	/*  Init_GR00_GR08 */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
-			0xFF,
-		},
-		{	/*  Init_AR00_AR14 */
-			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
-			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
-			0x41, 0x00, 0x0F, 0x00, 0x00,
-		},
-		{	/*  Init_CR00_CR18 */
-			0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
-			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-			0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
-			0xFF,
-		},
-		{	/*  Init_CR30_CR4D */
-			0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
-			0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
-			0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
-			0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
-		},
-		{	/*  Init_CR90_CRA7 */
-			0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
-			0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
-			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
-		},
-	},
-};
-
-static struct screen_info smtc_scr_info;
-
-static char *mode_option;
-
-/* process command line options, get vga parameter */
-static void __init sm7xx_vga_setup(char *options)
-{
-	int i;
-
-	if (!options || !*options)
-		return;
-
-	smtc_scr_info.lfb_width = 0;
-	smtc_scr_info.lfb_height = 0;
-	smtc_scr_info.lfb_depth = 0;
-
-	pr_debug("sm7xx_vga_setup = %s\n", options);
-
-	for (i = 0; i < ARRAY_SIZE(vesa_mode_table); i++) {
-		if (strstr(options, vesa_mode_table[i].index)) {
-			smtc_scr_info.lfb_width  = vesa_mode_table[i].lfb_width;
-			smtc_scr_info.lfb_height =
-						vesa_mode_table[i].lfb_height;
-			smtc_scr_info.lfb_depth  = vesa_mode_table[i].lfb_depth;
-			return;
-		}
-	}
-}
-
-static void sm712_setpalette(int regno, unsigned red, unsigned green,
-			     unsigned blue, struct fb_info *info)
-{
-	/* set bit 5:4 = 01 (write LCD RAM only) */
-	smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x10);
-
-	smtc_mmiowb(regno, dac_reg);
-	smtc_mmiowb(red >> 10, dac_val);
-	smtc_mmiowb(green >> 10, dac_val);
-	smtc_mmiowb(blue >> 10, dac_val);
-}
-
-/* chan_to_field
- *
- * convert a colour value into a field position
- *
- * from pxafb.c
- */
-
-static inline unsigned int chan_to_field(unsigned int chan,
-					 struct fb_bitfield *bf)
-{
-	chan &= 0xffff;
-	chan >>= 16 - bf->length;
-	return chan << bf->offset;
-}
-
-static int smtc_blank(int blank_mode, struct fb_info *info)
-{
-	/* clear DPMS setting */
-	switch (blank_mode) {
-	case FB_BLANK_UNBLANK:
-		/* Screen On: HSync: On, VSync : On */
-		smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
-		smtc_seqw(0x6a, 0x16);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
-		smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
-		smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
-		smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
-		smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
-		break;
-	case FB_BLANK_NORMAL:
-		/* Screen Off: HSync: On, VSync : On   Soft blank */
-		smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
-		smtc_seqw(0x6a, 0x16);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
-		smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
-		smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
-		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
-		break;
-	case FB_BLANK_VSYNC_SUSPEND:
-		/* Screen On: HSync: On, VSync : Off */
-		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
-		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
-		smtc_seqw(0x6a, 0x0c);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
-		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
-		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
-		smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
-		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
-		smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
-		break;
-	case FB_BLANK_HSYNC_SUSPEND:
-		/* Screen On: HSync: Off, VSync : On */
-		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
-		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
-		smtc_seqw(0x6a, 0x0c);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
-		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
-		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
-		smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
-		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
-		smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
-		break;
-	case FB_BLANK_POWERDOWN:
-		/* Screen On: HSync: Off, VSync : Off */
-		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
-		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
-		smtc_seqw(0x6a, 0x0c);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
-		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
-		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
-		smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
-		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
-		smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
-			  unsigned blue, unsigned trans, struct fb_info *info)
-{
-	struct smtcfb_info *sfb;
-	u32 val;
-
-	sfb = info->par;
-
-	if (regno > 255)
-		return 1;
-
-	switch (sfb->fb->fix.visual) {
-	case FB_VISUAL_DIRECTCOLOR:
-	case FB_VISUAL_TRUECOLOR:
-		/*
-		 * 16/32 bit true-colour, use pseudo-palette for 16 base color
-		 */
-		if (regno >= 16)
-			break;
-		if (sfb->fb->var.bits_per_pixel == 16) {
-			u32 *pal = sfb->fb->pseudo_palette;
-
-			val = chan_to_field(red, &sfb->fb->var.red);
-			val |= chan_to_field(green, &sfb->fb->var.green);
-			val |= chan_to_field(blue, &sfb->fb->var.blue);
-#ifdef __BIG_ENDIAN
-			pal[regno] = ((red & 0xf800) >> 8) |
-				     ((green & 0xe000) >> 13) |
-				     ((green & 0x1c00) << 3) |
-				     ((blue & 0xf800) >> 3);
-#else
-			pal[regno] = val;
-#endif
-		} else {
-			u32 *pal = sfb->fb->pseudo_palette;
-
-			val = chan_to_field(red, &sfb->fb->var.red);
-			val |= chan_to_field(green, &sfb->fb->var.green);
-			val |= chan_to_field(blue, &sfb->fb->var.blue);
-#ifdef __BIG_ENDIAN
-			val = (val & 0xff00ff00 >> 8) |
-			      (val & 0x00ff00ff << 8);
-#endif
-			pal[regno] = val;
-		}
-		break;
-
-	case FB_VISUAL_PSEUDOCOLOR:
-		/* color depth 8 bit */
-		sm712_setpalette(regno, red, green, blue, info);
-		break;
-
-	default:
-		return 1;	/* unknown type */
-	}
-
-	return 0;
-}
-
-#ifdef __BIG_ENDIAN
-static ssize_t smtcfb_read(struct fb_info *info, char __user *buf,
-			   size_t count, loff_t *ppos)
-{
-	unsigned long p = *ppos;
-
-	u32 *buffer, *dst;
-	u32 __iomem *src;
-	int c, i, cnt = 0, err = 0;
-	unsigned long total_size;
-
-	if (!info || !info->screen_base)
-		return -ENODEV;
-
-	if (info->state != FBINFO_STATE_RUNNING)
-		return -EPERM;
-
-	total_size = info->screen_size;
-
-	if (total_size == 0)
-		total_size = info->fix.smem_len;
-
-	if (p >= total_size)
-		return 0;
-
-	if (count >= total_size)
-		count = total_size;
-
-	if (count + p > total_size)
-		count = total_size - p;
-
-	buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
-	if (!buffer)
-		return -ENOMEM;
-
-	src = (u32 __iomem *)(info->screen_base + p);
-
-	if (info->fbops->fb_sync)
-		info->fbops->fb_sync(info);
-
-	while (count) {
-		c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
-		dst = buffer;
-		for (i = c >> 2; i--;) {
-			*dst = fb_readl(src++);
-			*dst = (*dst & 0xff00ff00 >> 8) |
-			       (*dst & 0x00ff00ff << 8);
-			dst++;
-		}
-		if (c & 3) {
-			u8 *dst8 = (u8 *)dst;
-			u8 __iomem *src8 = (u8 __iomem *)src;
-
-			for (i = c & 3; i--;) {
-				if (i & 1) {
-					*dst8++ = fb_readb(++src8);
-				} else {
-					*dst8++ = fb_readb(--src8);
-					src8 += 2;
-				}
-			}
-			src = (u32 __iomem *)src8;
-		}
-
-		if (copy_to_user(buf, buffer, c)) {
-			err = -EFAULT;
-			break;
-		}
-		*ppos += c;
-		buf += c;
-		cnt += c;
-		count -= c;
-	}
-
-	kfree(buffer);
-
-	return (err) ? err : cnt;
-}
-
-static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf,
-			    size_t count, loff_t *ppos)
-{
-	unsigned long p = *ppos;
-
-	u32 *buffer, *src;
-	u32 __iomem *dst;
-	int c, i, cnt = 0, err = 0;
-	unsigned long total_size;
-
-	if (!info || !info->screen_base)
-		return -ENODEV;
-
-	if (info->state != FBINFO_STATE_RUNNING)
-		return -EPERM;
-
-	total_size = info->screen_size;
-
-	if (total_size == 0)
-		total_size = info->fix.smem_len;
-
-	if (p > total_size)
-		return -EFBIG;
-
-	if (count > total_size) {
-		err = -EFBIG;
-		count = total_size;
-	}
-
-	if (count + p > total_size) {
-		if (!err)
-			err = -ENOSPC;
-
-		count = total_size - p;
-	}
-
-	buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
-	if (!buffer)
-		return -ENOMEM;
-
-	dst = (u32 __iomem *)(info->screen_base + p);
-
-	if (info->fbops->fb_sync)
-		info->fbops->fb_sync(info);
-
-	while (count) {
-		c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
-		src = buffer;
-
-		if (copy_from_user(src, buf, c)) {
-			err = -EFAULT;
-			break;
-		}
-
-		for (i = c >> 2; i--;) {
-			fb_writel((*src & 0xff00ff00 >> 8) |
-				  (*src & 0x00ff00ff << 8), dst++);
-			src++;
-		}
-		if (c & 3) {
-			u8 *src8 = (u8 *)src;
-			u8 __iomem *dst8 = (u8 __iomem *)dst;
-
-			for (i = c & 3; i--;) {
-				if (i & 1) {
-					fb_writeb(*src8++, ++dst8);
-				} else {
-					fb_writeb(*src8++, --dst8);
-					dst8 += 2;
-				}
-			}
-			dst = (u32 __iomem *)dst8;
-		}
-
-		*ppos += c;
-		buf += c;
-		cnt += c;
-		count -= c;
-	}
-
-	kfree(buffer);
-
-	return (cnt) ? cnt : err;
-}
-#endif	/* ! __BIG_ENDIAN */
-
-static void sm7xx_set_timing(struct smtcfb_info *sfb)
-{
-	int i = 0, j = 0;
-	u32 m_nscreenstride;
-
-	dev_dbg(&sfb->pdev->dev,
-		"sfb->width=%d sfb->height=%d sfb->fb->var.bits_per_pixel=%d sfb->hz=%d\n",
-		sfb->width, sfb->height, sfb->fb->var.bits_per_pixel, sfb->hz);
-
-	for (j = 0; j < ARRAY_SIZE(vgamode); j++) {
-		if (vgamode[j].mmsizex != sfb->width ||
-		    vgamode[j].mmsizey != sfb->height ||
-		    vgamode[j].bpp != sfb->fb->var.bits_per_pixel ||
-		    vgamode[j].hz != sfb->hz)
-			continue;
-
-		dev_dbg(&sfb->pdev->dev,
-			"vgamode[j].mmsizex=%d vgamode[j].mmSizeY=%d vgamode[j].bpp=%d vgamode[j].hz=%d\n",
-			vgamode[j].mmsizex, vgamode[j].mmsizey,
-			vgamode[j].bpp, vgamode[j].hz);
-
-		dev_dbg(&sfb->pdev->dev, "vgamode index=%d\n", j);
-
-		smtc_mmiowb(0x0, 0x3c6);
-
-		smtc_seqw(0, 0x1);
-
-		smtc_mmiowb(vgamode[j].init_misc, 0x3c2);
-
-		/* init SEQ register SR00 - SR04 */
-		for (i = 0; i < SIZE_SR00_SR04; i++)
-			smtc_seqw(i, vgamode[j].init_sr00_sr04[i]);
-
-		/* init SEQ register SR10 - SR24 */
-		for (i = 0; i < SIZE_SR10_SR24; i++)
-			smtc_seqw(i + 0x10, vgamode[j].init_sr10_sr24[i]);
-
-		/* init SEQ register SR30 - SR75 */
-		for (i = 0; i < SIZE_SR30_SR75; i++)
-			if ((i + 0x30) != 0x62 && (i + 0x30) != 0x6a &&
-			    (i + 0x30) != 0x6b)
-				smtc_seqw(i + 0x30,
-					  vgamode[j].init_sr30_sr75[i]);
-
-		/* init SEQ register SR80 - SR93 */
-		for (i = 0; i < SIZE_SR80_SR93; i++)
-			smtc_seqw(i + 0x80, vgamode[j].init_sr80_sr93[i]);
-
-		/* init SEQ register SRA0 - SRAF */
-		for (i = 0; i < SIZE_SRA0_SRAF; i++)
-			smtc_seqw(i + 0xa0, vgamode[j].init_sra0_sraf[i]);
-
-		/* init Graphic register GR00 - GR08 */
-		for (i = 0; i < SIZE_GR00_GR08; i++)
-			smtc_grphw(i, vgamode[j].init_gr00_gr08[i]);
-
-		/* init Attribute register AR00 - AR14 */
-		for (i = 0; i < SIZE_AR00_AR14; i++)
-			smtc_attrw(i, vgamode[j].init_ar00_ar14[i]);
-
-		/* init CRTC register CR00 - CR18 */
-		for (i = 0; i < SIZE_CR00_CR18; i++)
-			smtc_crtcw(i, vgamode[j].init_cr00_cr18[i]);
-
-		/* init CRTC register CR30 - CR4D */
-		for (i = 0; i < SIZE_CR30_CR4D; i++)
-			smtc_crtcw(i + 0x30, vgamode[j].init_cr30_cr4d[i]);
-
-		/* init CRTC register CR90 - CRA7 */
-		for (i = 0; i < SIZE_CR90_CRA7; i++)
-			smtc_crtcw(i + 0x90, vgamode[j].init_cr90_cra7[i]);
-	}
-	smtc_mmiowb(0x67, 0x3c2);
-
-	/* set VPR registers */
-	writel(0x0, sfb->vp_regs + 0x0C);
-	writel(0x0, sfb->vp_regs + 0x40);
-
-	/* set data width */
-	m_nscreenstride = (sfb->width * sfb->fb->var.bits_per_pixel) / 64;
-	switch (sfb->fb->var.bits_per_pixel) {
-	case 8:
-		writel(0x0, sfb->vp_regs + 0x0);
-		break;
-	case 16:
-		writel(0x00020000, sfb->vp_regs + 0x0);
-		break;
-	case 24:
-		writel(0x00040000, sfb->vp_regs + 0x0);
-		break;
-	case 32:
-		writel(0x00030000, sfb->vp_regs + 0x0);
-		break;
-	}
-	writel((u32)(((m_nscreenstride + 2) << 16) | m_nscreenstride),
-	       sfb->vp_regs + 0x10);
-}
-
-static void smtc_set_timing(struct smtcfb_info *sfb)
-{
-	switch (sfb->chip_id) {
-	case 0x710:
-	case 0x712:
-	case 0x720:
-		sm7xx_set_timing(sfb);
-		break;
-	}
-}
-
-static void smtcfb_setmode(struct smtcfb_info *sfb)
-{
-	switch (sfb->fb->var.bits_per_pixel) {
-	case 32:
-		sfb->fb->fix.visual       = FB_VISUAL_TRUECOLOR;
-		sfb->fb->fix.line_length  = sfb->fb->var.xres * 4;
-		sfb->fb->var.red.length   = 8;
-		sfb->fb->var.green.length = 8;
-		sfb->fb->var.blue.length  = 8;
-		sfb->fb->var.red.offset   = 16;
-		sfb->fb->var.green.offset = 8;
-		sfb->fb->var.blue.offset  = 0;
-		break;
-	case 24:
-		sfb->fb->fix.visual       = FB_VISUAL_TRUECOLOR;
-		sfb->fb->fix.line_length  = sfb->fb->var.xres * 3;
-		sfb->fb->var.red.length   = 8;
-		sfb->fb->var.green.length = 8;
-		sfb->fb->var.blue.length  = 8;
-		sfb->fb->var.red.offset   = 16;
-		sfb->fb->var.green.offset = 8;
-		sfb->fb->var.blue.offset  = 0;
-		break;
-	case 8:
-		sfb->fb->fix.visual       = FB_VISUAL_PSEUDOCOLOR;
-		sfb->fb->fix.line_length  = sfb->fb->var.xres;
-		sfb->fb->var.red.length   = 3;
-		sfb->fb->var.green.length = 3;
-		sfb->fb->var.blue.length  = 2;
-		sfb->fb->var.red.offset   = 5;
-		sfb->fb->var.green.offset = 2;
-		sfb->fb->var.blue.offset  = 0;
-		break;
-	case 16:
-	default:
-		sfb->fb->fix.visual       = FB_VISUAL_TRUECOLOR;
-		sfb->fb->fix.line_length  = sfb->fb->var.xres * 2;
-		sfb->fb->var.red.length   = 5;
-		sfb->fb->var.green.length = 6;
-		sfb->fb->var.blue.length  = 5;
-		sfb->fb->var.red.offset   = 11;
-		sfb->fb->var.green.offset = 5;
-		sfb->fb->var.blue.offset  = 0;
-		break;
-	}
-
-	sfb->width  = sfb->fb->var.xres;
-	sfb->height = sfb->fb->var.yres;
-	sfb->hz = 60;
-	smtc_set_timing(sfb);
-}
-
-static int smtc_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-{
-	/* sanity checks */
-	if (var->xres_virtual < var->xres)
-		var->xres_virtual = var->xres;
-
-	if (var->yres_virtual < var->yres)
-		var->yres_virtual = var->yres;
-
-	/* set valid default bpp */
-	if ((var->bits_per_pixel != 8)  && (var->bits_per_pixel != 16) &&
-	    (var->bits_per_pixel != 24) && (var->bits_per_pixel != 32))
-		var->bits_per_pixel = 16;
-
-	return 0;
-}
-
-static int smtc_set_par(struct fb_info *info)
-{
-	smtcfb_setmode(info->par);
-
-	return 0;
-}
-
-static struct fb_ops smtcfb_ops = {
-	.owner        = THIS_MODULE,
-	.fb_check_var = smtc_check_var,
-	.fb_set_par   = smtc_set_par,
-	.fb_setcolreg = smtc_setcolreg,
-	.fb_blank     = smtc_blank,
-	.fb_fillrect  = cfb_fillrect,
-	.fb_imageblit = cfb_imageblit,
-	.fb_copyarea  = cfb_copyarea,
-#ifdef __BIG_ENDIAN
-	.fb_read      = smtcfb_read,
-	.fb_write     = smtcfb_write,
-#endif
-};
-
-/*
- * Unmap in the memory mapped IO registers
- */
-
-static void smtc_unmap_mmio(struct smtcfb_info *sfb)
-{
-	if (sfb && smtc_regbaseaddress)
-		smtc_regbaseaddress = NULL;
-}
-
-/*
- * Map in the screen memory
- */
-
-static int smtc_map_smem(struct smtcfb_info *sfb,
-			 struct pci_dev *pdev, u_long smem_len)
-{
-	sfb->fb->fix.smem_start = pci_resource_start(pdev, 0);
-
-#ifdef __BIG_ENDIAN
-	if (sfb->fb->var.bits_per_pixel == 32)
-		sfb->fb->fix.smem_start += 0x800000;
-#endif
-
-	sfb->fb->fix.smem_len = smem_len;
-
-	sfb->fb->screen_base = sfb->lfb;
-
-	if (!sfb->fb->screen_base) {
-		dev_err(&pdev->dev,
-			"%s: unable to map screen memory\n", sfb->fb->fix.id);
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
-/*
- * Unmap in the screen memory
- *
- */
-static void smtc_unmap_smem(struct smtcfb_info *sfb)
-{
-	if (sfb && sfb->fb->screen_base) {
-		iounmap(sfb->fb->screen_base);
-		sfb->fb->screen_base = NULL;
-	}
-}
-
-/*
- * We need to wake up the device and make sure its in linear memory mode.
- */
-static inline void sm7xx_init_hw(void)
-{
-	outb_p(0x18, 0x3c4);
-	outb_p(0x11, 0x3c5);
-}
-
-static int smtcfb_pci_probe(struct pci_dev *pdev,
-			    const struct pci_device_id *ent)
-{
-	struct smtcfb_info *sfb;
-	struct fb_info *info;
-	u_long smem_size = 0x00800000;	/* default 8MB */
-	int err;
-	unsigned long mmio_base;
-
-	dev_info(&pdev->dev, "Silicon Motion display driver.\n");
-
-	err = pci_enable_device(pdev);	/* enable SMTC chip */
-	if (err)
-		return err;
-
-	err = pci_request_region(pdev, 0, "sm7xxfb");
-	if (err < 0) {
-		dev_err(&pdev->dev, "cannot reserve framebuffer region\n");
-		goto failed_regions;
-	}
-
-	sprintf(smtcfb_fix.id, "sm%Xfb", ent->device);
-
-	info = framebuffer_alloc(sizeof(*sfb), &pdev->dev);
-	if (!info) {
-		dev_err(&pdev->dev, "framebuffer_alloc failed\n");
-		err = -ENOMEM;
-		goto failed_free;
-	}
-
-	sfb = info->par;
-	sfb->fb = info;
-	sfb->chip_id = ent->device;
-	sfb->pdev = pdev;
-	info->flags = FBINFO_FLAG_DEFAULT;
-	info->fbops = &smtcfb_ops;
-	info->fix = smtcfb_fix;
-	info->var = smtcfb_var;
-	info->pseudo_palette = sfb->colreg;
-	info->par = sfb;
-
-	pci_set_drvdata(pdev, sfb);
-
-	sm7xx_init_hw();
-
-	/* get mode parameter from smtc_scr_info */
-	if (smtc_scr_info.lfb_width != 0) {
-		sfb->fb->var.xres = smtc_scr_info.lfb_width;
-		sfb->fb->var.yres = smtc_scr_info.lfb_height;
-		sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
-	} else {
-		/* default resolution 1024x600 16bit mode */
-		sfb->fb->var.xres = SCREEN_X_RES;
-		sfb->fb->var.yres = SCREEN_Y_RES;
-		sfb->fb->var.bits_per_pixel = SCREEN_BPP;
-	}
-
-#ifdef __BIG_ENDIAN
-	if (sfb->fb->var.bits_per_pixel == 24)
-		sfb->fb->var.bits_per_pixel = (smtc_scr_info.lfb_depth = 32);
-#endif
-	/* Map address and memory detection */
-	mmio_base = pci_resource_start(pdev, 0);
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
-
-	switch (sfb->chip_id) {
-	case 0x710:
-	case 0x712:
-		sfb->fb->fix.mmio_start = mmio_base + 0x00400000;
-		sfb->fb->fix.mmio_len = 0x00400000;
-		smem_size = SM712_VIDEOMEMORYSIZE;
-#ifdef __BIG_ENDIAN
-		sfb->lfb = ioremap(mmio_base, 0x00c00000);
-#else
-		sfb->lfb = ioremap(mmio_base, 0x00800000);
-#endif
-		sfb->mmio = (smtc_regbaseaddress =
-		    sfb->lfb + 0x00700000);
-		sfb->dp_regs = sfb->lfb + 0x00408000;
-		sfb->vp_regs = sfb->lfb + 0x0040c000;
-#ifdef __BIG_ENDIAN
-		if (sfb->fb->var.bits_per_pixel == 32) {
-			sfb->lfb += 0x800000;
-			dev_info(&pdev->dev, "sfb->lfb=%p\n", sfb->lfb);
-		}
-#endif
-		if (!smtc_regbaseaddress) {
-			dev_err(&pdev->dev,
-				"%s: unable to map memory mapped IO!\n",
-				sfb->fb->fix.id);
-			err = -ENOMEM;
-			goto failed_fb;
-		}
-
-		/* set MCLK = 14.31818 * (0x16 / 0x2) */
-		smtc_seqw(0x6a, 0x16);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x62, 0x3e);
-		/* enable PCI burst */
-		smtc_seqw(0x17, 0x20);
-		/* enable word swap */
-#ifdef __BIG_ENDIAN
-		if (sfb->fb->var.bits_per_pixel == 32)
-			smtc_seqw(0x17, 0x30);
-#endif
-		break;
-	case 0x720:
-		sfb->fb->fix.mmio_start = mmio_base;
-		sfb->fb->fix.mmio_len = 0x00200000;
-		smem_size = SM722_VIDEOMEMORYSIZE;
-		sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
-		sfb->lfb = sfb->dp_regs + 0x00200000;
-		sfb->mmio = (smtc_regbaseaddress =
-		    sfb->dp_regs + 0x000c0000);
-		sfb->vp_regs = sfb->dp_regs + 0x800;
-
-		smtc_seqw(0x62, 0xff);
-		smtc_seqw(0x6a, 0x0d);
-		smtc_seqw(0x6b, 0x02);
-		break;
-	default:
-		dev_err(&pdev->dev,
-			"No valid Silicon Motion display chip was detected!\n");
-
-		goto failed_fb;
-	}
-
-	/* can support 32 bpp */
-	if (15 == sfb->fb->var.bits_per_pixel)
-		sfb->fb->var.bits_per_pixel = 16;
-
-	sfb->fb->var.xres_virtual = sfb->fb->var.xres;
-	sfb->fb->var.yres_virtual = sfb->fb->var.yres;
-	err = smtc_map_smem(sfb, pdev, smem_size);
-	if (err)
-		goto failed;
-
-	smtcfb_setmode(sfb);
-
-	err = register_framebuffer(info);
-	if (err < 0)
-		goto failed;
-
-	dev_info(&pdev->dev,
-		 "Silicon Motion SM%X Rev%X primary display mode %dx%d-%d Init Complete.\n",
-		 sfb->chip_id, sfb->chip_rev_id, sfb->fb->var.xres,
-		 sfb->fb->var.yres, sfb->fb->var.bits_per_pixel);
-
-	return 0;
-
-failed:
-	dev_err(&pdev->dev, "Silicon Motion, Inc. primary display init fail.\n");
-
-	smtc_unmap_smem(sfb);
-	smtc_unmap_mmio(sfb);
-failed_fb:
-	framebuffer_release(info);
-
-failed_free:
-	pci_release_region(pdev, 0);
-
-failed_regions:
-	pci_disable_device(pdev);
-
-	return err;
-}
-
-/*
- * 0x710 (LynxEM)
- * 0x712 (LynxEM+)
- * 0x720 (Lynx3DM, Lynx3DM+)
- */
-static const struct pci_device_id smtcfb_pci_table[] = {
-	{ PCI_DEVICE(0x126f, 0x710), },
-	{ PCI_DEVICE(0x126f, 0x712), },
-	{ PCI_DEVICE(0x126f, 0x720), },
-	{0,}
-};
-
-MODULE_DEVICE_TABLE(pci, smtcfb_pci_table);
-
-static void smtcfb_pci_remove(struct pci_dev *pdev)
-{
-	struct smtcfb_info *sfb;
-
-	sfb = pci_get_drvdata(pdev);
-	smtc_unmap_smem(sfb);
-	smtc_unmap_mmio(sfb);
-	unregister_framebuffer(sfb->fb);
-	framebuffer_release(sfb->fb);
-	pci_release_region(pdev, 0);
-	pci_disable_device(pdev);
-}
-
-#ifdef CONFIG_PM
-static int smtcfb_pci_suspend(struct device *device)
-{
-	struct pci_dev *pdev = to_pci_dev(device);
-	struct smtcfb_info *sfb;
-
-	sfb = pci_get_drvdata(pdev);
-
-	/* set the hw in sleep mode use external clock and self memory refresh
-	 * so that we can turn off internal PLLs later on
-	 */
-	smtc_seqw(0x20, (smtc_seqr(0x20) | 0xc0));
-	smtc_seqw(0x69, (smtc_seqr(0x69) & 0xf7));
-
-	console_lock();
-	fb_set_suspend(sfb->fb, 1);
-	console_unlock();
-
-	/* additionally turn off all function blocks including internal PLLs */
-	smtc_seqw(0x21, 0xff);
-
-	return 0;
-}
-
-static int smtcfb_pci_resume(struct device *device)
-{
-	struct pci_dev *pdev = to_pci_dev(device);
-	struct smtcfb_info *sfb;
-
-	sfb = pci_get_drvdata(pdev);
-
-	/* reinit hardware */
-	sm7xx_init_hw();
-	switch (sfb->chip_id) {
-	case 0x710:
-	case 0x712:
-		/* set MCLK = 14.31818 *  (0x16 / 0x2) */
-		smtc_seqw(0x6a, 0x16);
-		smtc_seqw(0x6b, 0x02);
-		smtc_seqw(0x62, 0x3e);
-		/* enable PCI burst */
-		smtc_seqw(0x17, 0x20);
-#ifdef __BIG_ENDIAN
-		if (sfb->fb->var.bits_per_pixel == 32)
-			smtc_seqw(0x17, 0x30);
-#endif
-		break;
-	case 0x720:
-		smtc_seqw(0x62, 0xff);
-		smtc_seqw(0x6a, 0x0d);
-		smtc_seqw(0x6b, 0x02);
-		break;
-	}
-
-	smtc_seqw(0x34, (smtc_seqr(0x34) | 0xc0));
-	smtc_seqw(0x33, ((smtc_seqr(0x33) | 0x08) & 0xfb));
-
-	smtcfb_setmode(sfb);
-
-	console_lock();
-	fb_set_suspend(sfb->fb, 0);
-	console_unlock();
-
-	return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(sm7xx_pm_ops, smtcfb_pci_suspend, smtcfb_pci_resume);
-#define SM7XX_PM_OPS (&sm7xx_pm_ops)
-
-#else  /* !CONFIG_PM */
-
-#define SM7XX_PM_OPS NULL
-
-#endif /* !CONFIG_PM */
-
-static struct pci_driver smtcfb_driver = {
-	.name = "smtcfb",
-	.id_table = smtcfb_pci_table,
-	.probe = smtcfb_pci_probe,
-	.remove = smtcfb_pci_remove,
-	.driver.pm  = SM7XX_PM_OPS,
-};
-
-static int __init sm712fb_init(void)
-{
-#ifndef MODULE
-	char *option = NULL;
-
-	if (fb_get_options("sm712fb", &option))
-		return -ENODEV;
-	if (option && *option)
-		mode_option = option;
-#endif
-	sm7xx_vga_setup(mode_option);
-
-	return pci_register_driver(&smtcfb_driver);
-}
-
-module_init(sm712fb_init);
-
-static void __exit sm712fb_exit(void)
-{
-	pci_unregister_driver(&smtcfb_driver);
-}
-
-module_exit(sm712fb_exit);
-
-MODULE_AUTHOR("Siliconmotion ");
-MODULE_DESCRIPTION("Framebuffer driver for SMI Graphic Cards");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/speakup/buffers.c b/drivers/staging/speakup/buffers.c
index d45c8af..d4d4598 100644
--- a/drivers/staging/speakup/buffers.c
+++ b/drivers/staging/speakup/buffers.c
@@ -63,7 +63,8 @@
 {
 	if (!synth->alive) {
 		/* This makes sure that we won't stop TTYs if there is no synth
-		 * to restart them */
+		 * to restart them
+		 */
 		return;
 	}
 	if (synth_buffer_free() <= 100) {
diff --git a/drivers/staging/speakup/i18n.c b/drivers/staging/speakup/i18n.c
index 9ea16c5..f061747 100644
--- a/drivers/staging/speakup/i18n.c
+++ b/drivers/staging/speakup/i18n.c
@@ -1,5 +1,6 @@
 /* Internationalization implementation.  Includes definitions of English
- * string arrays, and the i18n pointer. */
+ * string arrays, and the i18n pointer.
+ */
 
 #include <linux/slab.h>		/* For kmalloc. */
 #include <linux/ctype.h>
diff --git a/drivers/staging/speakup/i18n.h b/drivers/staging/speakup/i18n.h
index 326d086..8fcce56 100644
--- a/drivers/staging/speakup/i18n.h
+++ b/drivers/staging/speakup/i18n.h
@@ -224,11 +224,11 @@
 	enum msg_index_t end;
 };
 
-extern char *spk_msg_get(enum msg_index_t index);
-extern ssize_t spk_msg_set(enum msg_index_t index, char *text, size_t length);
-extern struct msg_group_t *spk_find_msg_group(const char *group_name);
-extern void spk_reset_msg_group(struct msg_group_t *group);
-extern void spk_initialize_msgs(void);
-extern void spk_free_user_msgs(void);
+char *spk_msg_get(enum msg_index_t index);
+ssize_t spk_msg_set(enum msg_index_t index, char *text, size_t length);
+struct msg_group_t *spk_find_msg_group(const char *group_name);
+void spk_reset_msg_group(struct msg_group_t *group);
+void spk_initialize_msgs(void);
+void spk_free_user_msgs(void);
 
 #endif
diff --git a/drivers/staging/speakup/keyhelp.c b/drivers/staging/speakup/keyhelp.c
index 9475674..02d5c70 100644
--- a/drivers/staging/speakup/keyhelp.c
+++ b/drivers/staging/speakup/keyhelp.c
@@ -165,7 +165,7 @@
 			synth_printf("\n");
 			return 1;
 		}
-	cur_item = letter_offsets[ch-'a'];
+		cur_item = letter_offsets[ch-'a'];
 	} else if (type == KT_CUR) {
 		if (ch == 0
 		    && (MSG_FUNCNAMES_START + cur_item + 1) <=
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 0211df6..958add4 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -240,7 +240,8 @@
 	cp += sprintf(cp, "%d, %d, %d,\n", KEY_MAP_VER, num_keys, nstates);
 	cp1 += 2; /* now pointing at shift states */
 	/* dump num_keys+1 as first row is shift states + flags,
-	 * each subsequent row is key + states */
+	 * each subsequent row is key + states
+	 */
 	for (n = 0; n <= num_keys; n++) {
 		for (i = 0; i <= nstates; i++) {
 			ch = *cp1++;
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 6c4f9a1..63c59bc 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -128,7 +128,8 @@
 
 /* array of 256 char pointers (one for each character description)
  * initialized to default_chars and user selectable via
- * /proc/speakup/characters */
+ * /proc/speakup/characters
+ */
 char *spk_characters[256];
 
 char *spk_default_chars[256] = {
@@ -194,7 +195,8 @@
 
 /* array of 256 u_short (one for each character)
  * initialized to default_chartab and user selectable via
- * /sys/module/speakup/parameters/chartab */
+ * /sys/module/speakup/parameters/chartab
+ */
 u_short spk_chartab[256];
 
 static u_short default_chartab[256] = {
@@ -540,7 +542,8 @@
  * see if there is a word starting on the next position to the right
  * and return that word if it exists.  If it does not exist it will
  * move left to the beginning of any previous word on the line or the
- * beginning off the line whichever comes first.. */
+ * beginning off the line whichever comes first..
+ */
 
 static u_long get_word(struct vc_data *vc)
 {
@@ -1113,7 +1116,8 @@
 			 * suppress multiple to get rid of long pauses and
 			 * clear repeat count
 			 * so if someone has
-			 * repeats on you don't get nothing repeated count */
+			 * repeats on you don't get nothing repeated count
+			 */
 			if (ch != old_ch)
 				synth_printf("%c", ch);
 			else
@@ -1509,7 +1513,8 @@
 	if (spk_no_intr)
 		spk_do_flush();
 /* the key press flushes if !no_inter but we want to flush on cursor
- * moves regardless of no_inter state */
+ * moves regardless of no_inter state
+ */
 	is_cursor = value + 1;
 	old_cursor_pos = vc->vc_pos;
 	old_cursor_x = vc->vc_x;
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index a031570..98af3b1 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -114,7 +114,8 @@
 			obp = bp;
 		if (!((i + 2) % vc->vc_size_row)) {
 			/* strip trailing blanks from line and add newline,
-			   unless non-space at end of line. */
+			 * unless non-space at end of line.
+			 */
 			if (obp != bp) {
 				bp = obp;
 				*bp++ = '\r';
diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
index 1d9d51b..66ac999 100644
--- a/drivers/staging/speakup/serialio.c
+++ b/drivers/staging/speakup/serialio.c
@@ -51,7 +51,8 @@
 	}
 
 	/*	Disable UART interrupts, set DTR and RTS high
-	 *	and set speed. */
+	 *	and set speed.
+	 */
 	outb(cval | UART_LCR_DLAB, ser->port + UART_LCR);	/* set DLAB */
 	outb(quot & 0xff, ser->port + UART_DLL);	/* LS of divisor */
 	outb(quot >> 8, ser->port + UART_DLM);		/* MS of divisor */
@@ -145,7 +146,8 @@
 		synth->alive = 0;
 		/* No synth any more, so nobody will restart TTYs, and we thus
 		 * need to do it ourselves.  Now that there is no synth we can
-		 * let application flood anyway */
+		 * let application flood anyway
+		 */
 		speakup_start_ttys();
 		timeouts = 0;
 		return 0;
@@ -163,7 +165,8 @@
 		/* CTS */
 		if (--tmout == 0) {
 			/* pr_warn("%s: timed out (cts)\n",
-			 * synth->long_name); */
+			 * synth->long_name);
+			 */
 			timeouts++;
 			return 0;
 		}
@@ -217,4 +220,3 @@
 	speakup_info.port_tts = 0;
 }
 EXPORT_SYMBOL_GPL(spk_serial_release);
-
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index a7f4962..df74c91 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -42,46 +42,44 @@
 #define IS_CHAR(x, type) (spk_chartab[((u_char)x)]&type)
 #define IS_TYPE(x, type) ((spk_chartab[((u_char)x)]&type) == type)
 
-extern int speakup_thread(void *data);
-extern void spk_reset_default_chars(void);
-extern void spk_reset_default_chartab(void);
-extern void synth_start(void);
+int speakup_thread(void *data);
+void spk_reset_default_chars(void);
+void spk_reset_default_chartab(void);
+void synth_start(void);
 void synth_insert_next_index(int sent_num);
 void spk_reset_index_count(int sc);
 void spk_get_index_count(int *linecount, int *sentcount);
-extern int spk_set_key_info(const u_char *key_info, u_char *k_buffer);
-extern char *spk_strlwr(char *s);
-extern char *spk_s2uchar(char *start, char *dest);
-extern int speakup_kobj_init(void);
-extern void speakup_kobj_exit(void);
-extern int spk_chartab_get_value(char *keyword);
-extern void speakup_register_var(struct var_t *var);
-extern void speakup_unregister_var(enum var_id_t var_id);
-extern struct st_var_header *spk_get_var_header(enum var_id_t var_id);
-extern struct st_var_header *spk_var_header_by_name(const char *name);
-extern struct punc_var_t *spk_get_punc_var(enum var_id_t var_id);
-extern int spk_set_num_var(int val, struct st_var_header *var, int how);
-extern int spk_set_string_var(const char *page, struct st_var_header *var,
-				int len);
-extern int spk_set_mask_bits(const char *input, const int which, const int how);
+int spk_set_key_info(const u_char *key_info, u_char *k_buffer);
+char *spk_strlwr(char *s);
+char *spk_s2uchar(char *start, char *dest);
+int speakup_kobj_init(void);
+void speakup_kobj_exit(void);
+int spk_chartab_get_value(char *keyword);
+void speakup_register_var(struct var_t *var);
+void speakup_unregister_var(enum var_id_t var_id);
+struct st_var_header *spk_get_var_header(enum var_id_t var_id);
+struct st_var_header *spk_var_header_by_name(const char *name);
+struct punc_var_t *spk_get_punc_var(enum var_id_t var_id);
+int spk_set_num_var(int val, struct st_var_header *var, int how);
+int spk_set_string_var(const char *page, struct st_var_header *var, int len);
+int spk_set_mask_bits(const char *input, const int which, const int how);
 extern special_func spk_special_handler;
-extern int spk_handle_help(struct vc_data *vc, u_char type, u_char ch,
-				u_short key);
-extern int synth_init(char *name);
-extern void synth_release(void);
+int spk_handle_help(struct vc_data *vc, u_char type, u_char ch, u_short key);
+int synth_init(char *name);
+void synth_release(void);
 
-extern void spk_do_flush(void);
-extern void speakup_start_ttys(void);
-extern void synth_buffer_add(char ch);
-extern void synth_buffer_clear(void);
-extern void speakup_clear_selection(void);
-extern int speakup_set_selection(struct tty_struct *tty);
-extern int speakup_paste_selection(struct tty_struct *tty);
-extern void speakup_cancel_paste(void);
-extern void speakup_register_devsynth(void);
-extern void speakup_unregister_devsynth(void);
-extern void synth_write(const char *buf, size_t count);
-extern int synth_supports_indexing(void);
+void spk_do_flush(void);
+void speakup_start_ttys(void);
+void synth_buffer_add(char ch);
+void synth_buffer_clear(void);
+void speakup_clear_selection(void);
+int speakup_set_selection(struct tty_struct *tty);
+int speakup_paste_selection(struct tty_struct *tty);
+void speakup_cancel_paste(void);
+void speakup_register_devsynth(void);
+void speakup_unregister_devsynth(void);
+void synth_write(const char *buf, size_t count);
+int synth_supports_indexing(void);
 
 extern struct vc_data *spk_sel_cons;
 extern unsigned short spk_xs, spk_ys, spk_xe, spk_ye; /* our region points */
diff --git a/drivers/staging/speakup/speakup_acnt.h b/drivers/staging/speakup/speakup_acnt.h
index 6376fca..107ec11 100644
--- a/drivers/staging/speakup/speakup_acnt.h
+++ b/drivers/staging/speakup/speakup_acnt.h
@@ -6,10 +6,12 @@
 
 	/* Port Status Flags */
 #define SYNTH_READABLE	0x01	/* mask for bit which is nonzero if a
-				   byte can be read from the data port */
+				 * byte can be read from the data port
+				 */
 #define SYNTH_WRITABLE	0x02	/* mask for RDY bit, which when set to
-				   1, indicates the data port is ready
-				   to accept a byte of data. */
+				 * 1, indicates the data port is ready
+				 *  to accept a byte of data.
+				 */
 #define SYNTH_QUIET	'S' /* synth is not speaking */
 #define SYNTH_FULL	'F' /* synth is full. */
 #define SYNTH_ALMOST_EMPTY 'M' /* synth has less than 2 seconds of text left */
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index 437e13a..4893fef 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -88,8 +88,9 @@
 #define	CTRL_last_index		0x0b00	/*   get last index spoken */
 #define	CTRL_io_priority	0x0c00	/*   change i/o priority */
 #define	CTRL_free_mem		0x0d00	/*   get free paragraphs on module */
-#define	CTRL_get_lang		0x0e00	/*   return bit mask of loaded
-					 *   languages */
+#define	CTRL_get_lang		0x0e00	/* return bit mask of loaded
+						 * languages
+						 */
 #define	CMD_test			0x2000		/* self-test request */
 #define	TEST_mask		0x0F00	/* isolate test field */
 #define	TEST_null		0x0000	/* no test requested */
@@ -500,4 +501,3 @@
 MODULE_DESCRIPTION("Speakup support for DECtalk PC synthesizers");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
-
diff --git a/drivers/staging/speakup/speakup_dtlk.h b/drivers/staging/speakup/speakup_dtlk.h
index d951d18..46d885f 100644
--- a/drivers/staging/speakup/speakup_dtlk.h
+++ b/drivers/staging/speakup/speakup_dtlk.h
@@ -4,31 +4,37 @@
 #define SYNTH_CLEAR	0x18		/* stops speech */
 	/* TTS Port Status Flags */
 #define TTS_READABLE	0x80	/* mask for bit which is nonzero if a
-					 byte can be read from the TTS port */
+				 * byte can be read from the TTS port
+				 */
 #define TTS_SPEAKING	0x40	/* mask for SYNC bit, which is nonzero
-					 while DoubleTalk is producing
-					 output with TTS, PCM or CVSD
-					 synthesizers or tone generators
-					 (that is, all but LPC) */
+				 * while DoubleTalk is producing
+				 * output with TTS, PCM or CVSD
+				 * synthesizers or tone generators
+				 * (that is, all but LPC)
+				 */
 #define TTS_SPEAKING2	0x20	/* mask for SYNC2 bit,
-					 which falls to zero up to 0.4 sec
-					 before speech stops */
+				 * which falls to zero up to 0.4 sec
+				 * before speech stops
+				 */
 #define TTS_WRITABLE	0x10	/* mask for RDY bit, which when set to
-					 1, indicates the TTS port is ready
-					 to accept a byte of data.  The RDY
-					 bit goes zero 2-3 usec after
-					 writing, and goes 1 again 180-190
-					 usec later. */
+				 * 1, indicates the TTS port is ready
+				 * to accept a byte of data.  The RDY
+				 * bit goes zero 2-3 usec after
+				 * writing, and goes 1 again 180-190
+				 * usec later.
+				 */
 #define TTS_ALMOST_FULL	0x08	/* mask for AF bit: When set to 1,
-					 indicates that less than 300 bytes
-					 are available in the TTS input
-					 buffer. AF is always 0 in the PCM,
-					 TGN and CVSD modes. */
+					 * indicates that less than 300 bytes
+					 * are available in the TTS input
+					 * buffer. AF is always 0 in the PCM,
+					 * TGN and CVSD modes.
+					 */
 #define TTS_ALMOST_EMPTY 0x04	/* mask for AE bit: When set to 1,
-					 indicates that less than 300 bytes
-					 are remaining in DoubleTalk's input
-					 (TTS or PCM) buffer. AE is always 1
-					 in the TGN and CVSD modes. */
+				 * indicates that less than 300 bytes
+				 * are remaining in DoubleTalk's input
+				 * (TTS or PCM) buffer. AE is always 1
+				 * in the TGN and CVSD modes.
+				 */
 
 				/* data returned by Interrogate command */
 struct synth_settings {
@@ -45,10 +51,12 @@
 	u_char ext_dict_loaded; /* 1=exception dictionary loaded */
 	u_char ext_dict_status; /* 1=exception dictionary enabled */
 	u_char free_ram;	/* # pages (truncated) remaining for
-				 * text buffer */
+				 * text buffer
+				 */
 	u_char articulation;	/* nA; 0-9 */
 	u_char reverb;		/* nR; 0-9 */
 	u_char eob;		/* 7Fh value indicating end of
-				 * parameter block */
+				 * parameter block
+				 */
 	u_char has_indexing;	/* nonzero if indexing is implemented */
 };
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index fb31bb9..366358b 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -356,4 +356,3 @@
 MODULE_DESCRIPTION("Speakup userspace software synthesizer support");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
-
diff --git a/drivers/staging/speakup/thread.c b/drivers/staging/speakup/thread.c
index d95efb7..90c383e 100644
--- a/drivers/staging/speakup/thread.c
+++ b/drivers/staging/speakup/thread.c
@@ -48,7 +48,8 @@
 			kd_mksound(our_sound.freq, our_sound.jiffies);
 		if (synth && synth->catch_up && synth->alive) {
 			/* It is up to the callee to take the lock, so that it
-			 * can sleep whenever it likes */
+			 * can sleep whenever it likes
+			 */
 			synth->catch_up(synth);
 		}
 
diff --git a/drivers/staging/speakup/varhandlers.c b/drivers/staging/speakup/varhandlers.c
index 1b0d1c0..75bf40c 100644
--- a/drivers/staging/speakup/varhandlers.c
+++ b/drivers/staging/speakup/varhandlers.c
@@ -269,7 +269,8 @@
 /* spk_set_mask_bits sets or clears the punc/delim/repeat bits,
  * if input is null uses the defaults.
  * values for how: 0 clears bits of chars supplied,
- * 1 clears allk, 2 sets bits for chars */
+ * 1 clears allk, 2 sets bits for chars
+ */
 int spk_set_mask_bits(const char *input, const int which, const int how)
 {
 	u_char *cp;
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index 0f524bb..1f9ba8b 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -1126,7 +1126,6 @@
 static struct i2c_driver synaptics_rmi4_driver = {
 	.driver = {
 		.name	=	DRIVER_NAME,
-		.owner	=	THIS_MODULE,
 		.pm	=	&synaptics_rmi4_dev_pm_ops,
 	},
 	.probe		=	synaptics_rmi4_probe,
diff --git a/drivers/staging/unisys/Kconfig b/drivers/staging/unisys/Kconfig
index 778f9d0..624abe6 100644
--- a/drivers/staging/unisys/Kconfig
+++ b/drivers/staging/unisys/Kconfig
@@ -3,7 +3,7 @@
 #
 menuconfig UNISYSSPAR
 	bool "Unisys SPAR driver support"
-	depends on X86_64
+	depends on X86_64 && !UML
 	select PCI
 	select ACPI
 	---help---
diff --git a/drivers/staging/unisys/include/channel_guid.h b/drivers/staging/unisys/include/channel_guid.h
index 706363fc..17cb499 100644
--- a/drivers/staging/unisys/include/channel_guid.h
+++ b/drivers/staging/unisys/include/channel_guid.h
@@ -17,35 +17,31 @@
  * CHANNEL Guids
  */
 
-/* Used in IOChannel
- * {414815ed-c58c-11da-95a9-00e08161165f}
- */
+/* {414815ed-c58c-11da-95a9-00e08161165f} */
 #define SPAR_VHBA_CHANNEL_PROTOCOL_UUID \
 		UUID_LE(0x414815ed, 0xc58c, 0x11da, \
 				0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
 static const uuid_le spar_vhba_channel_protocol_uuid =
 	SPAR_VHBA_CHANNEL_PROTOCOL_UUID;
+#define SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR \
+	"414815ed-c58c-11da-95a9-00e08161165f"
 
-/* Used in IOChannel
- * {8cd5994d-c58e-11da-95a9-00e08161165f}
- */
+/* {8cd5994d-c58e-11da-95a9-00e08161165f} */
 #define SPAR_VNIC_CHANNEL_PROTOCOL_UUID \
 		UUID_LE(0x8cd5994d, 0xc58e, 0x11da, \
 				0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f)
 static const uuid_le spar_vnic_channel_protocol_uuid =
 	SPAR_VNIC_CHANNEL_PROTOCOL_UUID;
+#define SPAR_VNIC_CHANNEL_PROTOCOL_UUID_STR \
+	"8cd5994d-c58e-11da-95a9-00e08161165f"
 
-/* Used in IOChannel
- * {72120008-4AAB-11DC-8530-444553544200}
- */
+/* {72120008-4AAB-11DC-8530-444553544200} */
 #define SPAR_SIOVM_UUID \
 		UUID_LE(0x72120008, 0x4AAB, 0x11DC, \
 				0x85, 0x30, 0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
 static const uuid_le spar_siovm_uuid = SPAR_SIOVM_UUID;
 
-/* Used in visornoop/visornoop_main.c
- * {5b52c5ac-e5f5-4d42-8dff-429eaecd221f}
- */
+/* {5b52c5ac-e5f5-4d42-8dff-429eaecd221f} */
 #define SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID  \
 		UUID_LE(0x5b52c5ac, 0xe5f5, 0x4d42, \
 				0x8d, 0xff, 0x42, 0x9e, 0xae, 0xcd, 0x22, 0x1f)
@@ -53,9 +49,7 @@
 static const uuid_le spar_controldirector_channel_protocol_uuid =
 	SPAR_CONTROLDIRECTOR_CHANNEL_PROTOCOL_UUID;
 
-/* Used in visorchipset/visorchipset_main.c
- * {B4E79625-AEDE-4EAA-9E11-D3EDDCD4504C}
- */
+/* {b4e79625-aede-4eAA-9e11-D3eddcd4504c} */
 #define SPAR_DIAG_POOL_CHANNEL_PROTOCOL_UUID				\
 		UUID_LE(0xb4e79625, 0xaede, 0x4eaa, \
 				0x9e, 0x11, 0xd3, 0xed, 0xdc, 0xd4, 0x50, 0x4c)
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index e4a21e4..9235536 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -113,7 +113,8 @@
 	struct driver_attribute version_attr;
 };
 
-#define to_visor_driver(x) container_of(x, struct visor_driver, driver)
+#define to_visor_driver(x) ((x) ? \
+	(container_of(x, struct visor_driver, driver)) : (NULL))
 
 /** A device type for things "plugged" into the visorbus bus */
 
@@ -200,6 +201,8 @@
 			       void *msg);
 bool visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
 			       void *msg);
+bool visorchannel_signalempty(struct visorchannel *channel, u32 queue);
+
 int visorchannel_signalqueue_slots_avail(struct visorchannel *channel,
 					 u32 queue);
 int visorchannel_signalqueue_max_slots(struct visorchannel *channel, u32 queue);
diff --git a/drivers/staging/unisys/visorbus/controlvmchannel.h b/drivers/staging/unisys/visorbus/controlvmchannel.h
index a50d9cf..ec25366 100644
--- a/drivers/staging/unisys/visorbus/controlvmchannel.h
+++ b/drivers/staging/unisys/visorbus/controlvmchannel.h
@@ -1,10 +1,9 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
  * All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h b/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h
index f74f5d8..3c97ebac4 100644
--- a/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h
+++ b/drivers/staging/unisys/visorbus/controlvmcompletionstatus.h
@@ -1,12 +1,11 @@
 /* controlvmcompletionstatus.c
  *
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
  * All Rights Reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/iovmcall_gnuc.h b/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
index 57dd93e..b08b6ec 100644
--- a/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
+++ b/drivers/staging/unisys/visorbus/iovmcall_gnuc.h
@@ -1,10 +1,9 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
  * All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/periodic_work.c b/drivers/staging/unisys/visorbus/periodic_work.c
index 5e56088..a3631c3 100644
--- a/drivers/staging/unisys/visorbus/periodic_work.c
+++ b/drivers/staging/unisys/visorbus/periodic_work.c
@@ -1,12 +1,11 @@
 /* periodic_work.c
  *
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
  * All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/vbuschannel.h b/drivers/staging/unisys/visorbus/vbuschannel.h
index 5ed83a3..80e6447 100644
--- a/drivers/staging/unisys/visorbus/vbuschannel.h
+++ b/drivers/staging/unisys/visorbus/vbuschannel.h
@@ -1,10 +1,9 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
  * All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/vbusdeviceinfo.h b/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
index 9b6d3e6..f59fd8a 100644
--- a/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
+++ b/drivers/staging/unisys/visorbus/vbusdeviceinfo.h
@@ -1,10 +1,9 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
  * All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 6db4719..2309f5f 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -1,12 +1,11 @@
 /* visorbus_main.c
  *
- * Copyright � 2010 - 2013 UNISYS CORPORATION
+ * Copyright � 2010 - 2015 UNISYS CORPORATION
  * All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -70,6 +69,38 @@
 	NULL,
 };
 
+/*
+ * DEVICE type attributes
+ *
+ * The modalias file will contain the guid of the device.
+ */
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct visor_device *vdev;
+	uuid_le guid;
+
+	vdev = to_visor_device(dev);
+	guid = visorchannel_get_uuid(vdev->visorchannel);
+	return snprintf(buf, PAGE_SIZE, "visorbus:%pUl\n", &guid);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *visorbus_dev_attrs[] = {
+	&dev_attr_modalias.attr,
+	NULL,
+};
+
+/* sysfs example for bridge-only sysfs files using device_type's */
+static const struct attribute_group visorbus_dev_group = {
+	.attrs = visorbus_dev_attrs,
+};
+
+static const struct attribute_group *visorbus_dev_groups[] = {
+	&visorbus_dev_group,
+	NULL,
+};
+
 /** This describes the TYPE of bus.
  *  (Don't confuse this with an INSTANCE of the bus.)
  */
@@ -77,6 +108,7 @@
 	.name = "visorbus",
 	.match = visorbus_match,
 	.uevent = visorbus_uevent,
+	.dev_groups = visorbus_dev_groups,
 	.bus_groups = visorbus_bus_groups,
 };
 
@@ -129,7 +161,13 @@
 static int
 visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env)
 {
-	if (add_uevent_var(env, "VERSION=%s", VERSION))
+	struct visor_device *dev;
+	uuid_le guid;
+
+	dev = to_visor_device(xdev);
+	guid = visorchannel_get_uuid(dev->visorchannel);
+
+	if (add_uevent_var(env, "MODALIAS=visorbus:%pUl", &guid))
 		return -ENOMEM;
 	return 0;
 }
@@ -218,9 +256,9 @@
 struct devmajorminor_attribute {
 	struct attribute attr;
 	int slot;
-	 ssize_t (*show)(struct visor_device *, int slot, char *buf);
-	 ssize_t (*store)(struct visor_device *, int slot, const char *buf,
-			  size_t count);
+	ssize_t (*show)(struct visor_device *, int slot, char *buf);
+	ssize_t (*store)(struct visor_device *, int slot, const char *buf,
+			 size_t count);
 };
 
 static ssize_t DEVMAJORMINOR_ATTR(struct visor_device *dev, int slot, char *buf)
@@ -281,12 +319,11 @@
 		rc = -ENOMEM;
 		goto away;
 	}
-	myattr = kmalloc(sizeof(*myattr), GFP_KERNEL);
+	myattr = kzalloc(sizeof(*myattr), GFP_KERNEL);
 	if (!myattr) {
 		rc = -ENOMEM;
 		goto away;
 	}
-	memset(myattr, 0, sizeof(struct devmajorminor_attribute));
 	myattr->show = DEVMAJORMINOR_ATTR;
 	myattr->store = NULL;
 	myattr->slot = slot;
@@ -471,6 +508,7 @@
 		&dev_attr_typeguid.attr,
 		&dev_attr_zoneguid.attr,
 		&dev_attr_typename.attr,
+		NULL
 };
 
 static struct attribute_group channel_attr_grp = {
@@ -478,7 +516,7 @@
 		.attrs = channel_attrs,
 };
 
-static const struct attribute_group *visorbus_dev_groups[] = {
+static const struct attribute_group *visorbus_channel_groups[] = {
 		&channel_attr_grp,
 		NULL
 };
@@ -678,7 +716,7 @@
 static void
 dev_periodic_work(void *xdev)
 {
-	struct visor_device *dev = (struct visor_device *)xdev;
+	struct visor_device *dev = xdev;
 	struct visor_driver *drv = to_visor_driver(dev->device.driver);
 
 	down(&dev->visordriver_callback_lock);
@@ -937,7 +975,7 @@
 
 	sema_init(&dev->visordriver_callback_lock, 1);	/* unlocked */
 	dev->device.bus = &visorbus_type;
-	dev->device.groups = visorbus_dev_groups;
+	dev->device.groups = visorbus_channel_groups;
 	device_initialize(&dev->device);
 	dev->device.release = visorbus_release_device;
 	/* keep a reference just for us (now 2) */
@@ -1043,10 +1081,10 @@
 	int off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
 
 	if (hdr_info->chp_info_offset == 0)
-			return -1;
+		return -1;
 
 	if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
-			return -1;
+		return -1;
 	return 0;
 }
 
@@ -1061,10 +1099,10 @@
 	int off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
 
 	if (hdr_info->bus_info_offset == 0)
-			return -1;
+		return -1;
 
 	if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
-			return -1;
+		return -1;
 	return 0;
 }
 
@@ -1081,10 +1119,10 @@
 	    (hdr_info->device_info_struct_bytes * devix);
 
 	if (hdr_info->dev_info_offset == 0)
-			return -1;
+		return -1;
 
 	if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
-			return -1;
+		return -1;
 	return 0;
 }
 
@@ -1106,7 +1144,7 @@
 	struct spar_vbus_headerinfo *hdr_info;
 
 	if (!visordev->device.driver)
-			return;
+		return;
 
 	hdr_info = (struct spar_vbus_headerinfo *)visordev->vbus_hdr_info;
 	if (!hdr_info)
@@ -1319,11 +1357,11 @@
 pause_state_change_complete(struct visor_device *dev, int status)
 {
 	if (!dev->pausing)
-			return;
+		return;
 
 	dev->pausing = false;
 	if (!chipset_responders.device_pause) /* this can never happen! */
-			return;
+		return;
 
 	/* Notify the chipset driver that the pause is complete, which
 	* will presumably want to send some sort of response to the
@@ -1339,11 +1377,11 @@
 resume_state_change_complete(struct visor_device *dev, int status)
 {
 	if (!dev->resuming)
-			return;
+		return;
 
 	dev->resuming = false;
 	if (!chipset_responders.device_resume) /* this can never happen! */
-			return;
+		return;
 
 	/* Notify the chipset driver that the resume is complete,
 	 * which will presumably want to send some sort of response to
@@ -1367,14 +1405,14 @@
 	else
 		notify_func = chipset_responders.device_resume;
 	if (!notify_func)
-			goto away;
+		goto away;
 
 	drv = to_visor_driver(dev->device.driver);
 	if (!drv)
-			goto away;
+		goto away;
 
 	if (dev->pausing || dev->resuming)
-			goto away;
+		goto away;
 
 	/* Note that even though both drv->pause() and drv->resume
 	 * specify a callback function, it is NOT necessary for us to
@@ -1385,7 +1423,7 @@
 	 */
 	if (is_pause) {
 		if (!drv->pause)
-				goto away;
+			goto away;
 
 		dev->pausing = true;
 		x = drv->pause(dev, pause_state_change_complete);
@@ -1397,7 +1435,7 @@
 		 * would never even get here in that case. */
 		fix_vbus_dev_info(dev);
 		if (!drv->resume)
-				goto away;
+			goto away;
 
 		dev->resuming = true;
 		x = drv->resume(dev, resume_state_change_complete);
@@ -1413,7 +1451,7 @@
 away:
 	if (rc < 0) {
 		if (notify_func)
-				(*notify_func)(dev, rc);
+			(*notify_func)(dev, rc);
 	}
 }
 
@@ -1469,8 +1507,8 @@
 
 away:
 	if (rc)
-			POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
-					 POSTCODE_SEVERITY_ERR);
+		POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
+				 POSTCODE_SEVERITY_ERR);
 	return rc;
 }
 
@@ -1495,9 +1533,8 @@
 
 	list_for_each_safe(listentry, listtmp, &list_all_bus_instances) {
 		struct visor_device *dev = list_entry(listentry,
-							      struct
-							      visor_device,
-							      list_all);
+						      struct visor_device,
+						      list_all);
 		remove_bus_instance(dev);
 	}
 	remove_bus_type();
diff --git a/drivers/staging/unisys/visorbus/visorbus_private.h b/drivers/staging/unisys/visorbus/visorbus_private.h
index 2f12483..39edd20 100644
--- a/drivers/staging/unisys/visorbus/visorbus_private.h
+++ b/drivers/staging/unisys/visorbus/visorbus_private.h
@@ -1,12 +1,11 @@
 /* visorchipset.h
  *
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
  * All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index 20b6349..6da7e49 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -1,12 +1,11 @@
 /* visorchannel_funcs.c
  *
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
  * All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -259,7 +258,8 @@
 
 	if (offset < chdr_size) {
 		copy_size = min(chdr_size - offset, nbytes);
-		memcpy(&channel->chan_hdr + offset, local, copy_size);
+		memcpy(((char *)(&channel->chan_hdr)) + offset,
+		       local, copy_size);
 	}
 
 	memcpy_toio(channel->mapped + offset, local, nbytes);
@@ -416,11 +416,12 @@
 visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg)
 {
 	bool rc;
+	unsigned long flags;
 
 	if (channel->needs_lock) {
-		spin_lock(&channel->remove_lock);
+		spin_lock_irqsave(&channel->remove_lock, flags);
 		rc = signalremove_inner(channel, queue, msg);
-		spin_unlock(&channel->remove_lock);
+		spin_unlock_irqrestore(&channel->remove_lock, flags);
 	} else {
 		rc = signalremove_inner(channel, queue, msg);
 	}
@@ -429,6 +430,27 @@
 }
 EXPORT_SYMBOL_GPL(visorchannel_signalremove);
 
+bool
+visorchannel_signalempty(struct visorchannel *channel, u32 queue)
+{
+	unsigned long flags = 0;
+	struct signal_queue_header sig_hdr;
+	bool rc = false;
+
+	if (channel->needs_lock)
+		spin_lock_irqsave(&channel->remove_lock, flags);
+
+	if (!sig_read_header(channel, queue, &sig_hdr))
+		rc = true;
+	if (sig_hdr.head == sig_hdr.tail)
+		rc = true;
+	if (channel->needs_lock)
+		spin_unlock_irqrestore(&channel->remove_lock, flags);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(visorchannel_signalempty);
+
 static bool
 signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg)
 {
@@ -470,11 +492,12 @@
 visorchannel_signalinsert(struct visorchannel *channel, u32 queue, void *msg)
 {
 	bool rc;
+	unsigned long flags;
 
 	if (channel->needs_lock) {
-		spin_lock(&channel->insert_lock);
+		spin_lock_irqsave(&channel->insert_lock, flags);
 		rc = signalinsert_inner(channel, queue, msg);
-		spin_unlock(&channel->insert_lock);
+		spin_unlock_irqrestore(&channel->insert_lock, flags);
 	} else {
 		rc = signalinsert_inner(channel, queue, msg);
 	}
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index bb8087e..4b76cb4 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -1,12 +1,11 @@
 /* visorchipset_main.c
  *
- * Copyright (C) 2010 - 2013 UNISYS CORPORATION
+ * Copyright (C) 2010 - 2015 UNISYS CORPORATION
  * All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -1247,10 +1246,11 @@
 	POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
 			 POSTCODE_SEVERITY_INFO);
 
-	visorchannel = visorchannel_create(cmd->create_device.channel_addr,
-					   cmd->create_device.channel_bytes,
-					   GFP_KERNEL,
-					   cmd->create_device.data_type_uuid);
+	visorchannel =
+	       visorchannel_create_with_lock(cmd->create_device.channel_addr,
+					     cmd->create_device.channel_bytes,
+					     GFP_KERNEL,
+					     cmd->create_device.data_type_uuid);
 
 	if (!visorchannel) {
 		POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
@@ -2047,6 +2047,7 @@
 			 response);
 
 	kfree(dev_info->pending_msg_hdr);
+	dev_info->pending_msg_hdr = NULL;
 }
 
 static void
@@ -2381,6 +2382,9 @@
 		.remove = visorchipset_exit,
 		},
 };
+
+MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
+
 static __init uint32_t visorutil_spar_detect(void)
 {
 	unsigned int eax, ebx, ecx, edx;
diff --git a/drivers/staging/unisys/visorbus/vmcallinterface.h b/drivers/staging/unisys/visorbus/vmcallinterface.h
index 7a53df00..7abd27a 100644
--- a/drivers/staging/unisys/visorbus/vmcallinterface.h
+++ b/drivers/staging/unisys/visorbus/vmcallinterface.h
@@ -1,10 +1,9 @@
-/* Copyright (C) 2010 - 2013 UNISYS CORPORATION
+/* Copyright (C) 2010 - 2015 UNISYS CORPORATION
  * All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 7100744..8c9da7e 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1,10 +1,9 @@
 /* Copyright (c) 2012 - 2015 UNISYS CORPORATION
  * All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it will be useful, but
  * WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -20,15 +19,16 @@
  */
 
 #include <linux/debugfs.h>
-#include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/skbuff.h>
+#include <linux/netdevice.h>
 #include <linux/kthread.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
 
 #include "visorbus.h"
 #include "iochannel.h"
 
-#define VISORNIC_INFINITE_RESPONSE_WAIT 0
+#define VISORNIC_INFINITE_RSP_WAIT 0
 #define VISORNICSOPENMAX 32
 #define MAXDEVICES     16384
 
@@ -61,7 +61,6 @@
 	.write = enable_ints_write,
 };
 
-static struct workqueue_struct *visornic_serverdown_workqueue;
 static struct workqueue_struct *visornic_timeout_reset_workqueue;
 
 /* GUIDS for director channel type supported by this driver.  */
@@ -72,6 +71,15 @@
 	{ SPAR_VNIC_CHANNEL_PROTOCOL_UUID, "ultravnic" },
 	{ NULL_UUID_LE, NULL }
 };
+MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
+/*
+ * FIXME XXX: This next line of code must be fixed and removed before
+ * acceptance into the 'normal' part of the kernel.  It is only here as a place
+ * holder to get module autoloading functionality working for visorbus.  Code
+ * must be added to scripts/mode/file2alias.c, etc., to get this working
+ * properly.
+ */
+MODULE_ALIAS("visorbus:" SPAR_VNIC_CHANNEL_PROTOCOL_UUID_STR);
 
 /* This is used to tell the visor bus driver which types of visor devices
  * we support, and what functions to call when a visor device that we support
@@ -90,12 +98,6 @@
 	.channel_interrupt = NULL,
 };
 
-struct visor_thread_info {
-	struct task_struct *task;
-	struct completion has_stopped;
-	int id;
-};
-
 struct chanstat {
 	unsigned long got_rcv;
 	unsigned long got_enbdisack;
@@ -104,6 +106,7 @@
 	unsigned long sent_enbdis;
 	unsigned long sent_promisc;
 	unsigned long sent_post;
+	unsigned long sent_post_failed;
 	unsigned long sent_xmit;
 	unsigned long reject_count;
 	unsigned long extra_rcvbufs_sent;
@@ -111,7 +114,6 @@
 
 struct visornic_devdata {
 	int devnum;
-	int thread_wait_ms;
 	unsigned short enabled;		/* 0 disabled 1 enabled to receive */
 	unsigned short enab_dis_acked;	/* NET_RCV_ENABLE/DISABLE acked by
 					 * IOPART
@@ -119,7 +121,6 @@
 	struct visor_device *dev;
 	char name[99];
 	struct list_head list_all;   /* < link within list_all_devices list */
-	struct kref kref;
 	struct net_device *netdev;
 	struct net_device_stats net_stats;
 	atomic_t interrupt_rcvd;
@@ -137,20 +138,21 @@
 	atomic_t num_rcvbuf_in_iovm;
 	unsigned long alloc_failed_in_if_needed_cnt;
 	unsigned long alloc_failed_in_repost_rtn_cnt;
-	int max_outstanding_net_xmits;   /* absolute max number of outstanding
-					  * xmits - should never hit this
-					  */
-	int upper_threshold_net_xmits;   /* high water mark for calling
-					  * netif_stop_queue()
-					  */
-	int lower_threshold_net_xmits;	 /* high water mark for calling
-					  * netif_wake_queue()
-					  */
+	unsigned long max_outstanding_net_xmits; /* absolute max number of
+						  * outstanding xmits - should
+						  * never hit this
+						  */
+	unsigned long upper_threshold_net_xmits;  /* high water mark for
+						   * calling netif_stop_queue()
+						   */
+	unsigned long lower_threshold_net_xmits; /* high water mark for calling
+						  * netif_wake_queue()
+						  */
 	struct sk_buff_head xmitbufhead; /* xmitbufhead is the head of the
 					  * xmit buffer list that have been
 					  * sent to the IOPART end
 					  */
-	struct work_struct serverdown_completion;
+	visorbus_state_complete_func server_down_complete_func;
 	struct work_struct timeout_reset;
 	struct uiscmdrsp *cmdrsp_rcv;	 /* cmdrsp_rcv is used for
 					  * posting/unposting rcv buffers
@@ -161,8 +163,8 @@
 					  */
 	bool server_down;		 /* IOPART is down */
 	bool server_change_state;	 /* Processing SERVER_CHANGESTATE msg */
+	bool going_away;		 /* device is being torn down */
 	struct dentry *eth_debugfs_dir;
-	struct visor_thread_info threadinfo;
 	u64 interrupts_rcvd;
 	u64 interrupts_notme;
 	u64 interrupts_disabled;
@@ -194,16 +196,19 @@
 
 	int queuefullmsg_logged;
 	struct chanstat chstat;
+	struct timer_list irq_poll_timer;
+	struct napi_struct napi;
+	struct uiscmdrsp cmdrsp[SIZEOF_CMDRSP];
 };
 
-/* array of open devices maintained by open() and close() */
-static struct net_device *num_visornic_open[VISORNICSOPENMAX];
 
 /* List of all visornic_devdata structs,
  * linked via the list_all member
  */
 static LIST_HEAD(list_all_devices);
 static DEFINE_SPINLOCK(lock_all_devices);
+static int visornic_poll(struct napi_struct *napi, int budget);
+static void poll_for_irq(unsigned long v);
 
 /**
  *	visor_copy_fragsinfo_from_skb(
@@ -223,9 +228,25 @@
 			      struct phys_info frags[])
 {
 	unsigned int count = 0, ii, size, offset = 0, numfrags;
+	unsigned int total_count;
 
 	numfrags = skb_shinfo(skb)->nr_frags;
 
+	/*
+	 * Compute the number of fragments this skb has, and if its more than
+	 * frag array can hold, linearize the skb
+	 */
+	total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
+	if (firstfraglen % PI_PAGE_SIZE)
+		total_count++;
+
+	if (total_count > frags_max) {
+		if (skb_linearize(skb))
+			return -EINVAL;
+		numfrags = skb_shinfo(skb)->nr_frags;
+		firstfraglen = 0;
+	}
+
 	while (firstfraglen) {
 		if (count == frags_max)
 			return -EINVAL;
@@ -256,8 +277,16 @@
 					      page_offset,
 					      skb_shinfo(skb)->frags[ii].
 					      size, count, frags_max, frags);
-			if (!count)
-				return -EIO;
+			/*
+			 * add_physinfo_entries only returns
+			 * zero if the frags array is out of room
+			 * That should never happen because we
+			 * fail above, if count+numfrags > frags_max.
+			 * Given that theres no recovery mechanism from putting
+			 * half a packet in the I/O channel, panic here as this
+			 * should never happen
+			 */
+			BUG_ON(!count);
 		}
 	}
 	if (skb_shinfo(skb)->frag_list) {
@@ -279,222 +308,15 @@
 	return count;
 }
 
-/**
- *	visort_thread_start - starts thread for the device
- *	@thrinfo: The thread to start
- *	@threadfn: Function the thread starts
- *	@thrcontext: Context to pass to the thread, i.e. devdata
- *	@name:	string describing name of thread
- *
- *	Starts a thread for the device, currently only thread is
- *	process_incoming_rsps
- *	Returns 0 on success;
- */
-static int visor_thread_start(struct visor_thread_info *thrinfo,
-			      int (*threadfn)(void *),
-			      void *thrcontext, char *name)
-{
-	/* used to stop the thread */
-	init_completion(&thrinfo->has_stopped);
-	thrinfo->task = kthread_run(threadfn, thrcontext, name);
-	if (IS_ERR(thrinfo->task)) {
-		thrinfo->id = 0;
-		return -EINVAL;
-	}
-	thrinfo->id = thrinfo->task->pid;
-	return 0;
-}
-
-/**
- *	visor_thread_stop - stop a thread for the device
- *	@thrinfo: The thread to stop
- *
- *	Stop the thread and wait for completion for a minute
- *	Returns void.
- */
-static void visor_thread_stop(struct visor_thread_info *thrinfo)
-{
-	if (!thrinfo->id)
-		return;	/* thread not running */
-
-	kthread_stop(thrinfo->task);
-	/* give up if the thread has NOT died in 1 minute */
-	if (wait_for_completion_timeout(&thrinfo->has_stopped, 60 * HZ))
-		thrinfo->id = 0;
-}
-
-/* DebugFS code */
-static ssize_t info_debugfs_read(struct file *file, char __user *buf,
-				 size_t len, loff_t *offset)
-{
-	int i;
-	ssize_t bytes_read = 0;
-	int str_pos = 0;
-	struct visornic_devdata *devdata;
-	char *vbuf;
-
-	if (len > MAX_BUF)
-		len = MAX_BUF;
-	vbuf = kzalloc(len, GFP_KERNEL);
-	if (!vbuf)
-		return -ENOMEM;
-
-	/* for each vnic channel
-	 * dump out channel specific data
-	 */
-	for (i = 0; i < VISORNICSOPENMAX; i++) {
-		if (!num_visornic_open[i])
-			continue;
-
-		devdata = netdev_priv(num_visornic_open[i]);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     "Vnic i = %d\n", i);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     "netdev = %s (0x%p), MAC Addr %pM\n",
-				     num_visornic_open[i]->name,
-				     num_visornic_open[i],
-				     num_visornic_open[i]->dev_addr);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     "VisorNic Dev Info = 0x%p\n", devdata);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " num_rcv_bufs = %d\n",
-				     devdata->num_rcv_bufs);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " max_oustanding_next_xmits = %d\n",
-				    devdata->max_outstanding_net_xmits);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " upper_threshold_net_xmits = %d\n",
-				     devdata->upper_threshold_net_xmits);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " lower_threshold_net_xmits = %d\n",
-				     devdata->lower_threshold_net_xmits);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " queuefullmsg_logged = %d\n",
-				     devdata->queuefullmsg_logged);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " chstat.got_rcv = %lu\n",
-				     devdata->chstat.got_rcv);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " chstat.got_enbdisack = %lu\n",
-				     devdata->chstat.got_enbdisack);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " chstat.got_xmit_done = %lu\n",
-				     devdata->chstat.got_xmit_done);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " chstat.xmit_fail = %lu\n",
-				     devdata->chstat.xmit_fail);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " chstat.sent_enbdis = %lu\n",
-				     devdata->chstat.sent_enbdis);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " chstat.sent_promisc = %lu\n",
-				     devdata->chstat.sent_promisc);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " chstat.sent_post = %lu\n",
-				     devdata->chstat.sent_post);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " chstat.sent_xmit = %lu\n",
-				     devdata->chstat.sent_xmit);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " chstat.reject_count = %lu\n",
-				     devdata->chstat.reject_count);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " chstat.extra_rcvbufs_sent = %lu\n",
-				     devdata->chstat.extra_rcvbufs_sent);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " n_rcv0 = %lu\n", devdata->n_rcv0);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " n_rcv1 = %lu\n", devdata->n_rcv1);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " n_rcv2 = %lu\n", devdata->n_rcv2);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " n_rcvx = %lu\n", devdata->n_rcvx);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " num_rcvbuf_in_iovm = %d\n",
-				     atomic_read(&devdata->num_rcvbuf_in_iovm));
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " alloc_failed_in_if_needed_cnt = %lu\n",
-				     devdata->alloc_failed_in_if_needed_cnt);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " alloc_failed_in_repost_rtn_cnt = %lu\n",
-				     devdata->alloc_failed_in_repost_rtn_cnt);
-		/* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-		 *		     " inner_loop_limit_reached_cnt = %lu\n",
-		 *		     devdata->inner_loop_limit_reached_cnt);
-		 */
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " found_repost_rcvbuf_cnt = %lu\n",
-				     devdata->found_repost_rcvbuf_cnt);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " repost_found_skb_cnt = %lu\n",
-				     devdata->repost_found_skb_cnt);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " n_repost_deficit = %lu\n",
-				     devdata->n_repost_deficit);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " bad_rcv_buf = %lu\n",
-				     devdata->bad_rcv_buf);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " n_rcv_packets_not_accepted = %lu\n",
-				     devdata->n_rcv_packets_not_accepted);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " interrupts_rcvd = %llu\n",
-				     devdata->interrupts_rcvd);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " interrupts_notme = %llu\n",
-				     devdata->interrupts_notme);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " interrupts_disabled = %llu\n",
-				     devdata->interrupts_disabled);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " busy_cnt = %llu\n",
-				     devdata->busy_cnt);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " flow_control_upper_hits = %llu\n",
-				     devdata->flow_control_upper_hits);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " flow_control_lower_hits = %llu\n",
-				     devdata->flow_control_lower_hits);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " thread_wait_ms = %d\n",
-				     devdata->thread_wait_ms);
-		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-				     " netif_queue = %s\n",
-				     netif_queue_stopped(devdata->netdev) ?
-				     "stopped" : "running");
-	}
-	bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
-	kfree(vbuf);
-	return bytes_read;
-}
-
 static ssize_t enable_ints_write(struct file *file,
 				 const char __user *buffer,
 				 size_t count, loff_t *ppos)
 {
-	char buf[4];
-	int i, new_value;
-	struct visornic_devdata *devdata;
-
-	if (count >= ARRAY_SIZE(buf))
-		return -EINVAL;
-
-	buf[count] = '\0';
-	if (copy_from_user(buf, buffer, count))
-		return -EFAULT;
-
-	i = kstrtoint(buf, 10, &new_value);
-	if (i != 0)
-		return -EFAULT;
-
-	/* set all counts to new_value usually 0 */
-	for (i = 0; i < VISORNICSOPENMAX; i++) {
-		if (num_visornic_open[i]) {
-			devdata = netdev_priv(num_visornic_open[i]);
-			/* TODO update features bit in channel */
-		}
-	}
-
+	/*
+	 * Don't want to break ABI here by having a debugfs
+	 * file that no longer exists or is writable, so
+	 * lets just make this a vestigual function
+	 */
 	return count;
 }
 
@@ -509,44 +331,29 @@
  *	Returns void.
  */
 static void
-visornic_serverdown_complete(struct work_struct *work)
+visornic_serverdown_complete(struct visornic_devdata *devdata)
 {
-	struct visornic_devdata *devdata;
 	struct net_device *netdev;
-	unsigned long flags;
-	int i = 0, count = 0;
 
-	devdata = container_of(work, struct visornic_devdata,
-			       serverdown_completion);
 	netdev = devdata->netdev;
 
-	/* Stop using datachan */
-	visor_thread_stop(&devdata->threadinfo);
+	/* Stop polling for interrupts */
+	del_timer_sync(&devdata->irq_poll_timer);
 
-	/* Inform Linux that the link is down */
-	netif_carrier_off(netdev);
-	netif_stop_queue(netdev);
+	rtnl_lock();
+	dev_close(netdev);
+	rtnl_unlock();
 
-	/* Free the skb for XMITs that haven't been serviced by the server
-	 * We shouldn't have to inform Linux about these IOs because they
-	 * are "lost in the ethernet"
-	 */
-	skb_queue_purge(&devdata->xmitbufhead);
-
-	spin_lock_irqsave(&devdata->priv_lock, flags);
-	/* free rcv buffers */
-	for (i = 0; i < devdata->num_rcv_bufs; i++) {
-		if (devdata->rcvbuf[i]) {
-			kfree_skb(devdata->rcvbuf[i]);
-			devdata->rcvbuf[i] = NULL;
-			count++;
-		}
-	}
 	atomic_set(&devdata->num_rcvbuf_in_iovm, 0);
-	spin_unlock_irqrestore(&devdata->priv_lock, flags);
+	devdata->chstat.sent_xmit = 0;
+	devdata->chstat.got_xmit_done = 0;
+
+	if (devdata->server_down_complete_func)
+		(*devdata->server_down_complete_func)(devdata->dev, 0);
 
 	devdata->server_down = true;
 	devdata->server_change_state = false;
+	devdata->server_down_complete_func = NULL;
 }
 
 /**
@@ -558,15 +365,31 @@
  *	Returns 0 if we scheduled the work, -EINVAL on error.
  */
 static int
-visornic_serverdown(struct visornic_devdata *devdata)
+visornic_serverdown(struct visornic_devdata *devdata,
+		    visorbus_state_complete_func complete_func)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&devdata->priv_lock, flags);
 	if (!devdata->server_down && !devdata->server_change_state) {
+		if (devdata->going_away) {
+			spin_unlock_irqrestore(&devdata->priv_lock, flags);
+			dev_dbg(&devdata->dev->device,
+				"%s aborting because device removal pending\n",
+				__func__);
+			return -ENODEV;
+		}
 		devdata->server_change_state = true;
-		queue_work(visornic_serverdown_workqueue,
-			   &devdata->serverdown_completion);
+		devdata->server_down_complete_func = complete_func;
+		spin_unlock_irqrestore(&devdata->priv_lock, flags);
+		visornic_serverdown_complete(devdata);
 	} else if (devdata->server_change_state) {
+		dev_dbg(&devdata->dev->device, "%s changing state\n",
+			__func__);
+		spin_unlock_irqrestore(&devdata->priv_lock, flags);
 		return -EINVAL;
-	}
+	} else
+		spin_unlock_irqrestore(&devdata->priv_lock, flags);
 	return 0;
 }
 
@@ -625,11 +448,14 @@
 	if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) <= PI_PAGE_SIZE) {
 		cmdrsp->net.type = NET_RCV_POST;
 		cmdrsp->cmdtype = CMD_NET_TYPE;
-		visorchannel_signalinsert(devdata->dev->visorchannel,
+		if (visorchannel_signalinsert(devdata->dev->visorchannel,
 					  IOCHAN_TO_IOPART,
-					  cmdrsp);
-		atomic_inc(&devdata->num_rcvbuf_in_iovm);
-		devdata->chstat.sent_post++;
+					  cmdrsp)) {
+			atomic_inc(&devdata->num_rcvbuf_in_iovm);
+			devdata->chstat.sent_post++;
+		} else {
+			devdata->chstat.sent_post_failed++;
+		}
 	}
 }
 
@@ -651,10 +477,10 @@
 	devdata->cmdrsp_rcv->net.enbdis.context = netdev;
 	devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
 	devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
-	visorchannel_signalinsert(devdata->dev->visorchannel,
+	if (visorchannel_signalinsert(devdata->dev->visorchannel,
 				  IOCHAN_TO_IOPART,
-				  devdata->cmdrsp_rcv);
-	devdata->chstat.sent_enbdis++;
+				  devdata->cmdrsp_rcv))
+		devdata->chstat.sent_enbdis++;
 }
 
 /**
@@ -676,9 +502,6 @@
 	unsigned long flags;
 	int wait = 0;
 
-	/* stop the transmit queue so nothing more can be transmitted */
-	netif_stop_queue(netdev);
-
 	/* send a msg telling the other end we are stopping incoming pkts */
 	spin_lock_irqsave(&devdata->priv_lock, flags);
 	devdata->enabled = 0;
@@ -695,12 +518,14 @@
 	 * when it gets a disable.
 	 */
 	spin_lock_irqsave(&devdata->priv_lock, flags);
-	while ((timeout == VISORNIC_INFINITE_RESPONSE_WAIT) ||
+	while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
 	       (wait < timeout)) {
 		if (devdata->enab_dis_acked)
 			break;
 		if (devdata->server_down || devdata->server_change_state) {
 			spin_unlock_irqrestore(&devdata->priv_lock, flags);
+			dev_dbg(&netdev->dev, "%s server went away\n",
+				__func__);
 			return -EIO;
 		}
 		set_current_state(TASK_INTERRUPTIBLE);
@@ -722,10 +547,16 @@
 				break;
 		}
 	}
-
 	/* we've set enabled to 0, so we can give up the lock. */
 	spin_unlock_irqrestore(&devdata->priv_lock, flags);
 
+	/* stop the transmit queue so nothing more can be transmitted */
+	netif_stop_queue(netdev);
+
+	napi_disable(&devdata->napi);
+
+	skb_queue_purge(&devdata->xmitbufhead);
+
 	/* Free rcv buffers - other end has automatically unposed them on
 	 * disable
 	 */
@@ -736,13 +567,6 @@
 		}
 	}
 
-	/* remove references from array */
-	for (i = 0; i < VISORNICSOPENMAX; i++)
-		if (num_visornic_open[i] == netdev) {
-			num_visornic_open[i] = NULL;
-			break;
-		}
-
 	return 0;
 }
 
@@ -814,11 +638,15 @@
 	 * gets a disable.
 	 */
 	i = init_rcv_bufs(netdev, devdata);
-	if (i < 0)
+	if (i < 0) {
+		dev_err(&netdev->dev,
+			"%s failed to init rcv bufs (%d)\n", __func__, i);
 		return i;
+	}
 
 	spin_lock_irqsave(&devdata->priv_lock, flags);
 	devdata->enabled = 1;
+	devdata->enab_dis_acked = 0;
 
 	/* now we're ready, let's send an ENB to uisnic but until we get
 	 * an ACK back from uisnic, we'll drop the packets
@@ -829,15 +657,18 @@
 	/* send enable and wait for ack -- don't hold lock when sending enable
 	 * because if the queue is full, insert might sleep.
 	 */
+	napi_enable(&devdata->napi);
 	send_enbdis(netdev, 1, devdata);
 
 	spin_lock_irqsave(&devdata->priv_lock, flags);
-	while ((timeout == VISORNIC_INFINITE_RESPONSE_WAIT) ||
+	while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
 	       (wait < timeout)) {
 		if (devdata->enab_dis_acked)
 			break;
 		if (devdata->server_down || devdata->server_change_state) {
 			spin_unlock_irqrestore(&devdata->priv_lock, flags);
+			dev_dbg(&netdev->dev, "%s server went away\n",
+				__func__);
 			return -EIO;
 		}
 		set_current_state(TASK_INTERRUPTIBLE);
@@ -848,19 +679,13 @@
 
 	spin_unlock_irqrestore(&devdata->priv_lock, flags);
 
-	if (!devdata->enab_dis_acked)
+	if (!devdata->enab_dis_acked) {
+		dev_err(&netdev->dev, "%s missing ACK\n", __func__);
 		return -EIO;
-
-	/* find an open slot in the array to save off VisorNic references
-	 * for debug
-	 */
-	for (i = 0; i < VISORNICSOPENMAX; i++) {
-		if (!num_visornic_open[i]) {
-			num_visornic_open[i] = netdev;
-			break;
-		}
 	}
 
+	netif_start_queue(netdev);
+
 	return 0;
 }
 
@@ -882,20 +707,29 @@
 	devdata = container_of(work, struct visornic_devdata, timeout_reset);
 	netdev = devdata->netdev;
 
-	netif_stop_queue(netdev);
-	response = visornic_disable_with_timeout(netdev, 100);
+	rtnl_lock();
+	if (!netif_running(netdev)) {
+		rtnl_unlock();
+		return;
+	}
+
+	response = visornic_disable_with_timeout(netdev,
+						 VISORNIC_INFINITE_RSP_WAIT);
 	if (response)
 		goto call_serverdown;
 
-	response = visornic_enable_with_timeout(netdev, 100);
+	response = visornic_enable_with_timeout(netdev,
+						VISORNIC_INFINITE_RSP_WAIT);
 	if (response)
 		goto call_serverdown;
-	netif_wake_queue(netdev);
+
+	rtnl_unlock();
 
 	return;
 
 call_serverdown:
-	visornic_serverdown(devdata);
+	visornic_serverdown(devdata, NULL);
+	rtnl_unlock();
 }
 
 /**
@@ -908,12 +742,7 @@
 static int
 visornic_open(struct net_device *netdev)
 {
-	visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RESPONSE_WAIT);
-
-	/* start the interface's transmit queue, allowing it to accept
-	 * packets for transmission
-	 */
-	netif_start_queue(netdev);
+	visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
 
 	return 0;
 }
@@ -928,13 +757,59 @@
 static int
 visornic_close(struct net_device *netdev)
 {
-	netif_stop_queue(netdev);
-	visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RESPONSE_WAIT);
+	visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
 
 	return 0;
 }
 
 /**
+ *	devdata_xmits_outstanding - compute outstanding xmits
+ *	@devdata: visornic_devdata for device
+ *
+ *	Return value is the number of outstanding xmits.
+ */
+static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
+{
+	if (devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done)
+		return devdata->chstat.sent_xmit -
+			devdata->chstat.got_xmit_done;
+	else
+		return (ULONG_MAX - devdata->chstat.got_xmit_done
+			+ devdata->chstat.sent_xmit + 1);
+}
+
+/**
+ *	vnic_hit_high_watermark
+ *	@devdata: indicates visornic device we are checking
+ *	@high_watermark: max num of unacked xmits we will tolerate,
+ *                       before we will start throttling
+ *
+ *      Returns true iff the number of unacked xmits sent to
+ *      the IO partition is >= high_watermark.
+ */
+static inline bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
+					   ulong high_watermark)
+{
+	return (devdata_xmits_outstanding(devdata) >= high_watermark);
+}
+
+/**
+ *	vnic_hit_low_watermark
+ *	@devdata: indicates visornic device we are checking
+ *	@low_watermark: we will wait until the num of unacked xmits
+ *                      drops to this value or lower before we start
+ *                      transmitting again
+ *
+ *      Returns true iff the number of unacked xmits sent to
+ *      the IO partition is <= low_watermark.
+ */
+static inline bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
+					  ulong low_watermark)
+{
+	return (devdata_xmits_outstanding(devdata) <= low_watermark);
+}
+
+/**
  *	visornic_xmit - send a packet to the IO Partition
  *	@skb: Packet to be sent
  *	@netdev: net device the packet is being sent from
@@ -944,7 +819,7 @@
  *	function is protected from concurrent calls by a spinlock xmit_lock
  *	in the net_device struct, but as soon as the function returns it
  *	can be called again.
- *	Returns NETDEV_TX_OK for success, NETDEV_TX_BUSY for error.
+ *	Returns NETDEV_TX_OK.
  */
 static int
 visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
@@ -961,7 +836,10 @@
 	    devdata->server_change_state) {
 		spin_unlock_irqrestore(&devdata->priv_lock, flags);
 		devdata->busy_cnt++;
-		return NETDEV_TX_BUSY;
+		dev_dbg(&netdev->dev,
+			"%s busy - queue stopped\n", __func__);
+		kfree_skb(skb);
+		return NETDEV_TX_OK;
 	}
 
 	/* sk_buff struct is used to host network data throughout all the
@@ -979,7 +857,11 @@
 	if (firstfraglen < ETH_HEADER_SIZE) {
 		spin_unlock_irqrestore(&devdata->priv_lock, flags);
 		devdata->busy_cnt++;
-		return NETDEV_TX_BUSY;
+		dev_err(&netdev->dev,
+			"%s busy - first frag too small (%d)\n",
+			__func__, firstfraglen);
+		kfree_skb(skb);
+		return NETDEV_TX_OK;
 	}
 
 	if ((len < ETH_MIN_PACKET_SIZE) &&
@@ -1002,13 +884,8 @@
 	/* save the pointer to skb -- we'll need it for completion */
 	cmdrsp->net.buf = skb;
 
-	if (((devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done) &&
-	     (devdata->chstat.sent_xmit - devdata->chstat.got_xmit_done >=
-	     devdata->max_outstanding_net_xmits)) ||
-	     ((devdata->chstat.sent_xmit < devdata->chstat.got_xmit_done) &&
-	     (ULONG_MAX - devdata->chstat.got_xmit_done +
-	      devdata->chstat.sent_xmit >=
-	      devdata->max_outstanding_net_xmits))) {
+	if (vnic_hit_high_watermark(devdata,
+				    devdata->max_outstanding_net_xmits)) {
 		/* too many NET_XMITs queued over to IOVM - need to wait
 		 */
 		devdata->chstat.reject_count++;
@@ -1018,7 +895,11 @@
 		netif_stop_queue(netdev);
 		spin_unlock_irqrestore(&devdata->priv_lock, flags);
 		devdata->busy_cnt++;
-		return NETDEV_TX_BUSY;
+		dev_dbg(&netdev->dev,
+			"%s busy - waiting for iovm to catch up\n",
+			__func__);
+		kfree_skb(skb);
+		return NETDEV_TX_OK;
 	}
 	if (devdata->queuefullmsg_logged)
 		devdata->queuefullmsg_logged = 0;
@@ -1055,10 +936,13 @@
 		visor_copy_fragsinfo_from_skb(skb, firstfraglen,
 					      MAX_PHYS_INFO,
 					      cmdrsp->net.xmt.frags);
-	if (cmdrsp->net.xmt.num_frags == -1) {
+	if (cmdrsp->net.xmt.num_frags < 0) {
 		spin_unlock_irqrestore(&devdata->priv_lock, flags);
 		devdata->busy_cnt++;
-		return NETDEV_TX_BUSY;
+		dev_err(&netdev->dev,
+			"%s busy - copy frags failed\n", __func__);
+		kfree_skb(skb);
+		return NETDEV_TX_OK;
 	}
 
 	if (!visorchannel_signalinsert(devdata->dev->visorchannel,
@@ -1066,18 +950,15 @@
 		netif_stop_queue(netdev);
 		spin_unlock_irqrestore(&devdata->priv_lock, flags);
 		devdata->busy_cnt++;
-		return NETDEV_TX_BUSY;
+		dev_dbg(&netdev->dev,
+			"%s busy - signalinsert failed\n", __func__);
+		kfree_skb(skb);
+		return NETDEV_TX_OK;
 	}
 
 	/* Track the skbs that have been sent to the IOVM for XMIT */
 	skb_queue_head(&devdata->xmitbufhead, skb);
 
-	/* set the last transmission start time
-	 * linux doc says: Do not forget to update netdev->trans_start to
-	 * jiffies after each new tx packet is given to the hardware.
-	 */
-	netdev->trans_start = jiffies;
-
 	/* update xmt stats */
 	devdata->net_stats.tx_packets++;
 	devdata->net_stats.tx_bytes += skb->len;
@@ -1086,18 +967,16 @@
 	/* check to see if we have hit the high watermark for
 	 * netif_stop_queue()
 	 */
-	if (((devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done) &&
-	     (devdata->chstat.sent_xmit - devdata->chstat.got_xmit_done >=
-	      devdata->upper_threshold_net_xmits)) ||
-	    ((devdata->chstat.sent_xmit < devdata->chstat.got_xmit_done) &&
-	     (ULONG_MAX - devdata->chstat.got_xmit_done +
-	      devdata->chstat.sent_xmit >=
-	      devdata->upper_threshold_net_xmits))) {
+	if (vnic_hit_high_watermark(devdata,
+				    devdata->upper_threshold_net_xmits)) {
 		/* too many NET_XMITs queued over to IOVM - need to wait */
 		netif_stop_queue(netdev); /* calling stop queue - call
 					   * netif_wake_queue() after lower
 					   * threshold
 					   */
+		dev_dbg(&netdev->dev,
+			"%s busy - invoking iovm flow control\n",
+			__func__);
 		devdata->flow_control_upper_hits++;
 	}
 	spin_unlock_irqrestore(&devdata->priv_lock, flags);
@@ -1121,21 +1000,6 @@
 }
 
 /**
- *	visornic_ioctl - ioctl function for netdevice.
- *	@netdev: netdevice
- *	@ifr: ignored
- *	@cmd: ignored
- *
- *	Currently not supported.
- *	Returns EOPNOTSUPP
- */
-static int
-visornic_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
-	return -EOPNOTSUPP;
-}
-
-/**
  *	visornic_change_mtu - changes mtu of device.
  *	@netdev: netdevice
  *	@new_mtu: value of new mtu
@@ -1201,15 +1065,24 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&devdata->priv_lock, flags);
+	if (devdata->going_away) {
+		spin_unlock_irqrestore(&devdata->priv_lock, flags);
+		dev_dbg(&devdata->dev->device,
+			"%s aborting because device removal pending\n",
+			__func__);
+		return;
+	}
+
 	/* Ensure that a ServerDown message hasn't been received */
 	if (!devdata->enabled ||
 	    (devdata->server_down && !devdata->server_change_state)) {
+		dev_dbg(&netdev->dev, "%s no processing\n",
+			__func__);
 		spin_unlock_irqrestore(&devdata->priv_lock, flags);
 		return;
 	}
-	spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
 	queue_work(visornic_timeout_reset_workqueue, &devdata->timeout_reset);
+	spin_unlock_irqrestore(&devdata->priv_lock, flags);
 }
 
 /**
@@ -1281,7 +1154,6 @@
 			devdata->bad_rcv_buf++;
 		}
 	}
-	atomic_dec(&devdata->usage);
 	return status;
 }
 
@@ -1293,18 +1165,16 @@
  *	it up the stack.
  *	Returns void
  */
-static void
+static int
 visornic_rx(struct uiscmdrsp *cmdrsp)
 {
 	struct visornic_devdata *devdata;
 	struct sk_buff *skb, *prev, *curr;
 	struct net_device *netdev;
-	int cc, currsize, off, status;
+	int cc, currsize, off;
 	struct ethhdr *eth;
 	unsigned long flags;
-#ifdef DEBUG
-	struct phys_info testfrags[MAX_PHYS_INFO];
-#endif
+	int rx_count = 0;
 
 	/* post new rcv buf to the other end using the cmdrsp we have at hand
 	 * post it without holding lock - but we'll use the signal lock to
@@ -1314,18 +1184,6 @@
 	skb = cmdrsp->net.buf;
 	netdev = skb->dev;
 
-	if (!netdev) {
-		/* We must have previously downed this network device and
-		 * this skb and device is no longer valid. This also means
-		 * the skb reference was removed from devdata->rcvbuf so no
-		 * need to search for it.
-		 * All we can do is free the skb and return.
-		 * Note: We crash if we try to log this here.
-		 */
-		kfree_skb(skb);
-		return;
-	}
-
 	devdata = netdev_priv(netdev);
 
 	spin_lock_irqsave(&devdata->priv_lock, flags);
@@ -1335,10 +1193,6 @@
 	devdata->net_stats.rx_packets++;
 	devdata->net_stats.rx_bytes = skb->len;
 
-	atomic_inc(&devdata->usage);	/* don't want a close to happen before
-					 *  we're done here
-					 */
-
 	/* set length to how much was ACTUALLY received -
 	 * NOTE: rcv_done_len includes actual length of data rcvd
 	 * including ethhdr
@@ -1352,7 +1206,7 @@
 		 */
 		spin_unlock_irqrestore(&devdata->priv_lock, flags);
 		repost_return(cmdrsp, devdata, skb, netdev);
-		return;
+		return rx_count;
 	}
 
 	spin_unlock_irqrestore(&devdata->priv_lock, flags);
@@ -1371,7 +1225,7 @@
 			if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
 				dev_err(&devdata->netdev->dev,
 					"repost_return failed");
-			return;
+			return rx_count;
 		}
 		/* length rcvd is greater than firstfrag in this skb rcv buf  */
 		skb->tail += RCVPOST_BUF_SIZE;	/* amount in skb->data */
@@ -1386,7 +1240,7 @@
 			if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
 				dev_err(&devdata->netdev->dev,
 					"repost_return failed");
-			return;
+			return rx_count;
 		}
 		skb->tail += skb->len;
 		skb->data_len = 0;	/* nothing rcvd in frag_list */
@@ -1405,7 +1259,7 @@
 	if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
 		if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
 			dev_err(&devdata->netdev->dev, "repost_return failed");
-		return;
+		return rx_count;
 	}
 
 	if (cmdrsp->net.rcv.numrcvbufs > 1) {
@@ -1431,29 +1285,12 @@
 			curr->data_len = 0;
 			off += currsize;
 		}
-#ifdef DEBUG
 		/* assert skb->len == off */
 		if (skb->len != off) {
-			dev_err(&devdata->netdev->dev,
-				"%s something wrong; skb->len:%d != off:%d\n",
-				netdev->name, skb->len, off);
+			netdev_err(devdata->netdev,
+				   "something wrong; skb->len:%d != off:%d\n",
+				   skb->len, off);
 		}
-		/* test code */
-		cc = util_copy_fragsinfo_from_skb("rcvchaintest", skb,
-						  RCVPOST_BUF_SIZE,
-						  MAX_PHYS_INFO, testfrags);
-		if (cc != cmdrsp->net.rcv.numrcvbufs) {
-			dev_err(&devdata->netdev->dev,
-				"**** %s Something wrong; rcvd chain length %d different from one we calculated %d\n",
-				netdev->name, cmdrsp->net.rcv.numrcvbufs, cc);
-		}
-		for (i = 0; i < cc; i++) {
-			dev_inf(&devdata->netdev->dev,
-				"test:RCVPOST_BUF_SIZE:%d[%d] pfn:%llu off:0x%x len:%d\n",
-				RCVPOST_BUF_SIZE, i, testfrags[i].pi_pfn,
-				testfrags[i].pi_off, testfrags[i].pi_len);
-		}
-#endif
 	}
 
 	/* set up packet's protocl type using ethernet header - this
@@ -1505,10 +1342,11 @@
 		/* drop packet - don't forward it up to OS */
 		devdata->n_rcv_packets_not_accepted++;
 		repost_return(cmdrsp, devdata, skb, netdev);
-		return;
+		return rx_count;
 	} while (0);
 
-	status = netif_rx(skb);
+	rx_count++;
+	netif_receive_skb(skb);
 	/* netif_rx returns various values, but "in practice most drivers
 	 * ignore the return value
 	 */
@@ -1520,6 +1358,7 @@
 	 * new rcv buffer.
 	 */
 	repost_return(cmdrsp, devdata, skb, netdev);
+	return rx_count;
 }
 
 /**
@@ -1545,14 +1384,11 @@
 	spin_unlock(&dev_num_pool_lock);
 	if (devnum == MAXDEVICES)
 		devnum = -1;
-	if (devnum < 0) {
-		kfree(devdata);
+	if (devnum < 0)
 		return NULL;
-	}
 	devdata->devnum = devnum;
 	devdata->dev = dev;
 	strncpy(devdata->name, dev_name(&dev->device), sizeof(devdata->name));
-	kref_init(&devdata->kref);
 	spin_lock(&lock_all_devices);
 	list_add_tail(&devdata->list_all, &list_all_devices);
 	spin_unlock(&lock_all_devices);
@@ -1560,24 +1396,23 @@
 }
 
 /**
- *	devdata_release	- Frees up a devdata
- *	@mykref: kref to the devdata
+ *	devdata_release	- Frees up references in devdata
+ *	@devdata: struct to clean up
  *
- *	Frees up a devdata.
+ *	Frees up references in devdata.
  *	Returns void
  */
-static void devdata_release(struct kref *mykref)
+static void devdata_release(struct visornic_devdata *devdata)
 {
-	struct visornic_devdata *devdata =
-		container_of(mykref, struct visornic_devdata, kref);
-
 	spin_lock(&dev_num_pool_lock);
 	clear_bit(devdata->devnum, dev_num_pool);
 	spin_unlock(&dev_num_pool_lock);
 	spin_lock(&lock_all_devices);
 	list_del(&devdata->list_all);
 	spin_unlock(&lock_all_devices);
-	kfree(devdata);
+	kfree(devdata->rcvbuf);
+	kfree(devdata->cmdrsp_rcv);
+	kfree(devdata->xmit_cmdrsp);
 }
 
 static const struct net_device_ops visornic_dev_ops = {
@@ -1585,12 +1420,163 @@
 	.ndo_stop = visornic_close,
 	.ndo_start_xmit = visornic_xmit,
 	.ndo_get_stats = visornic_get_stats,
-	.ndo_do_ioctl = visornic_ioctl,
 	.ndo_change_mtu = visornic_change_mtu,
 	.ndo_tx_timeout = visornic_xmit_timeout,
 	.ndo_set_rx_mode = visornic_set_multi,
 };
 
+/* DebugFS code */
+static ssize_t info_debugfs_read(struct file *file, char __user *buf,
+				 size_t len, loff_t *offset)
+{
+	ssize_t bytes_read = 0;
+	int str_pos = 0;
+	struct visornic_devdata *devdata;
+	struct net_device *dev;
+	char *vbuf;
+
+	if (len > MAX_BUF)
+		len = MAX_BUF;
+	vbuf = kzalloc(len, GFP_KERNEL);
+	if (!vbuf)
+		return -ENOMEM;
+
+	/* for each vnic channel
+	 * dump out channel specific data
+	 */
+	rcu_read_lock();
+	for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
+		/*
+		 * Only consider netdevs that are visornic, and are open
+		 */
+		if ((dev->netdev_ops != &visornic_dev_ops) ||
+		    (!netif_queue_stopped(dev)))
+			continue;
+
+		devdata = netdev_priv(dev);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     "netdev = %s (0x%p), MAC Addr %pM\n",
+				     dev->name,
+				     dev,
+				     dev->dev_addr);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     "VisorNic Dev Info = 0x%p\n", devdata);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " num_rcv_bufs = %d\n",
+				     devdata->num_rcv_bufs);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " max_oustanding_next_xmits = %lu\n",
+				    devdata->max_outstanding_net_xmits);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " upper_threshold_net_xmits = %lu\n",
+				     devdata->upper_threshold_net_xmits);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " lower_threshold_net_xmits = %lu\n",
+				     devdata->lower_threshold_net_xmits);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " queuefullmsg_logged = %d\n",
+				     devdata->queuefullmsg_logged);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " chstat.got_rcv = %lu\n",
+				     devdata->chstat.got_rcv);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " chstat.got_enbdisack = %lu\n",
+				     devdata->chstat.got_enbdisack);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " chstat.got_xmit_done = %lu\n",
+				     devdata->chstat.got_xmit_done);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " chstat.xmit_fail = %lu\n",
+				     devdata->chstat.xmit_fail);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " chstat.sent_enbdis = %lu\n",
+				     devdata->chstat.sent_enbdis);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " chstat.sent_promisc = %lu\n",
+				     devdata->chstat.sent_promisc);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " chstat.sent_post = %lu\n",
+				     devdata->chstat.sent_post);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " chstat.sent_post_failed = %lu\n",
+				     devdata->chstat.sent_post_failed);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " chstat.sent_xmit = %lu\n",
+				     devdata->chstat.sent_xmit);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " chstat.reject_count = %lu\n",
+				     devdata->chstat.reject_count);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " chstat.extra_rcvbufs_sent = %lu\n",
+				     devdata->chstat.extra_rcvbufs_sent);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " n_rcv0 = %lu\n", devdata->n_rcv0);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " n_rcv1 = %lu\n", devdata->n_rcv1);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " n_rcv2 = %lu\n", devdata->n_rcv2);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " n_rcvx = %lu\n", devdata->n_rcvx);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " num_rcvbuf_in_iovm = %d\n",
+				     atomic_read(&devdata->num_rcvbuf_in_iovm));
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " alloc_failed_in_if_needed_cnt = %lu\n",
+				     devdata->alloc_failed_in_if_needed_cnt);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " alloc_failed_in_repost_rtn_cnt = %lu\n",
+				     devdata->alloc_failed_in_repost_rtn_cnt);
+		/* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+		 *		     " inner_loop_limit_reached_cnt = %lu\n",
+		 *		     devdata->inner_loop_limit_reached_cnt);
+		 */
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " found_repost_rcvbuf_cnt = %lu\n",
+				     devdata->found_repost_rcvbuf_cnt);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " repost_found_skb_cnt = %lu\n",
+				     devdata->repost_found_skb_cnt);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " n_repost_deficit = %lu\n",
+				     devdata->n_repost_deficit);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " bad_rcv_buf = %lu\n",
+				     devdata->bad_rcv_buf);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " n_rcv_packets_not_accepted = %lu\n",
+				     devdata->n_rcv_packets_not_accepted);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " interrupts_rcvd = %llu\n",
+				     devdata->interrupts_rcvd);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " interrupts_notme = %llu\n",
+				     devdata->interrupts_notme);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " interrupts_disabled = %llu\n",
+				     devdata->interrupts_disabled);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " busy_cnt = %llu\n",
+				     devdata->busy_cnt);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " flow_control_upper_hits = %llu\n",
+				     devdata->flow_control_upper_hits);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " flow_control_lower_hits = %llu\n",
+				     devdata->flow_control_lower_hits);
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " netif_queue = %s\n",
+				     netif_queue_stopped(devdata->netdev) ?
+				     "stopped" : "running");
+		str_pos += scnprintf(vbuf + str_pos, len - str_pos,
+				     " xmits_outstanding = %lu\n",
+				     devdata_xmits_outstanding(devdata));
+	}
+	rcu_read_unlock();
+	bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
+	kfree(vbuf);
+	return bytes_read;
+}
+
 /**
  *	send_rcv_posts_if_needed
  *	@devdata: visornic device
@@ -1644,15 +1630,15 @@
  *	Returns when response queue is empty or when the threadd stops.
  */
 static void
-drain_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
+service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
+		   int *rx_work_done)
 {
 	unsigned long flags;
 	struct net_device *netdev;
 
-	/* drain queue */
-	while (1) {
-		/* TODO: CLIENT ACQUIRE -- Don't really need this at the
-		 * moment */
+	/* TODO: CLIENT ACQUIRE -- Don't really need this at the
+	 * moment */
+	for (;;) {
 		if (!visorchannel_signalremove(devdata->dev->visorchannel,
 					       IOCHAN_FROM_IOPART,
 					       cmdrsp))
@@ -1662,7 +1648,7 @@
 		case NET_RCV:
 			devdata->chstat.got_rcv++;
 			/* process incoming packet */
-			visornic_rx(cmdrsp);
+			*rx_work_done += visornic_rx(cmdrsp);
 			break;
 		case NET_XMIT_DONE:
 			spin_lock_irqsave(&devdata->priv_lock, flags);
@@ -1678,16 +1664,8 @@
 				 * the lower watermark for
 				 * netif_wake_queue()
 				 */
-				if (((devdata->chstat.sent_xmit >=
-				    devdata->chstat.got_xmit_done) &&
-				    (devdata->chstat.sent_xmit -
-				    devdata->chstat.got_xmit_done <=
-				    devdata->lower_threshold_net_xmits)) ||
-				    ((devdata->chstat.sent_xmit <
-				    devdata->chstat.got_xmit_done) &&
-				    (ULONG_MAX - devdata->chstat.got_xmit_done
-				    + devdata->chstat.sent_xmit <=
-				    devdata->lower_threshold_net_xmits))) {
+				if (vnic_hit_low_watermark(devdata,
+					devdata->lower_threshold_net_xmits)) {
 					/* enough NET_XMITs completed
 					 * so can restart netif queue
 					 */
@@ -1738,50 +1716,51 @@
 			break;
 		}
 		/* cmdrsp is now available for reuse  */
-
-		if (kthread_should_stop())
-			break;
 	}
 }
 
+static int visornic_poll(struct napi_struct *napi, int budget)
+{
+	struct visornic_devdata *devdata = container_of(napi,
+							struct visornic_devdata,
+							napi);
+	int rx_count = 0;
+
+	send_rcv_posts_if_needed(devdata);
+	service_resp_queue(devdata->cmdrsp, devdata, &rx_count);
+
+	/*
+	 * If there aren't any more packets to receive
+	 * stop the poll
+	 */
+	if (rx_count < budget)
+		napi_complete(napi);
+
+	return rx_count;
+}
+
 /**
- *	process_incoming_rsps	- Checks the status of the response queue.
+ *	poll_for_irq	- Checks the status of the response queue.
  *	@v: void pointer to the visronic devdata
  *
  *	Main function of the vnic_incoming thread. Peridocially check the
  *	response queue and drain it if needed.
  *	Returns when thread has stopped.
  */
-static int
-process_incoming_rsps(void *v)
+static void
+poll_for_irq(unsigned long v)
 {
-	struct visornic_devdata *devdata = v;
-	struct uiscmdrsp *cmdrsp = NULL;
-	const int SZ = SIZEOF_CMDRSP;
+	struct visornic_devdata *devdata = (struct visornic_devdata *)v;
 
-	cmdrsp = kmalloc(SZ, GFP_ATOMIC);
-	if (!cmdrsp)
-		complete_and_exit(&devdata->threadinfo.has_stopped, 0);
+	if (!visorchannel_signalempty(
+				   devdata->dev->visorchannel,
+				   IOCHAN_FROM_IOPART))
+		napi_schedule(&devdata->napi);
 
-	while (1) {
-		wait_event_interruptible_timeout(
-			devdata->rsp_queue, (atomic_read(
-					     &devdata->interrupt_rcvd) == 1),
-				msecs_to_jiffies(devdata->thread_wait_ms));
+	atomic_set(&devdata->interrupt_rcvd, 0);
 
-		/* periodically check to see if there are any rcf bufs which
-		 * need to get sent to the IOSP. This can only happen if
-		 * we run out of memory when trying to allocate skbs.
-		 */
-		atomic_set(&devdata->interrupt_rcvd, 0);
-		send_rcv_posts_if_needed(devdata);
-		drain_queue(cmdrsp, devdata);
-		if (kthread_should_stop())
-			break;
-	}
+	mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
 
-	kfree(cmdrsp);
-	complete_and_exit(&devdata->threadinfo.has_stopped, 0);
 }
 
 /**
@@ -1801,12 +1780,15 @@
 	u64 features;
 
 	netdev = alloc_etherdev(sizeof(struct visornic_devdata));
-	if (!netdev)
+	if (!netdev) {
+		dev_err(&dev->device,
+			"%s alloc_etherdev failed\n", __func__);
 		return -ENOMEM;
+	}
 
 	netdev->netdev_ops = &visornic_dev_ops;
 	netdev->watchdog_timeo = (5 * HZ);
-	netdev->dev.parent = &dev->device;
+	SET_NETDEV_DEV(netdev, &dev->device);
 
 	/* Get MAC adddress from channel and read it into the device. */
 	netdev->addr_len = ETH_ALEN;
@@ -1814,16 +1796,23 @@
 				  vnic.macaddr);
 	err = visorbus_read_channel(dev, channel_offset, netdev->dev_addr,
 				    ETH_ALEN);
-	if (err < 0)
+	if (err < 0) {
+		dev_err(&dev->device,
+			"%s failed to get mac addr from chan (%d)\n",
+			__func__, err);
 		goto cleanup_netdev;
+	}
 
 	devdata = devdata_initialize(netdev_priv(netdev), dev);
 	if (!devdata) {
+		dev_err(&dev->device,
+			"%s devdata_initialize failed\n", __func__);
 		err = -ENOMEM;
 		goto cleanup_netdev;
 	}
 
 	devdata->netdev = netdev;
+	dev_set_drvdata(&dev->device, devdata);
 	init_waitqueue_head(&devdata->rsp_queue);
 	spin_lock_init(&devdata->priv_lock);
 	devdata->enabled = 0; /* not yet */
@@ -1834,10 +1823,14 @@
 				  vnic.num_rcv_bufs);
 	err = visorbus_read_channel(dev, channel_offset,
 				    &devdata->num_rcv_bufs, 4);
-	if (err)
+	if (err) {
+		dev_err(&dev->device,
+			"%s failed to get #rcv bufs from chan (%d)\n",
+			__func__, err);
 		goto cleanup_netdev;
+	}
 
-	devdata->rcvbuf = kmalloc(sizeof(struct sk_buff *) *
+	devdata->rcvbuf = kzalloc(sizeof(struct sk_buff *) *
 				  devdata->num_rcv_bufs, GFP_KERNEL);
 	if (!devdata->rcvbuf) {
 		err = -ENOMEM;
@@ -1846,12 +1839,15 @@
 
 	/* set the net_xmit outstanding threshold */
 	/* always leave two slots open but you should have 3 at a minimum */
+	/* note that max_outstanding_net_xmits must be > 0 */
 	devdata->max_outstanding_net_xmits =
-		max(3, ((devdata->num_rcv_bufs / 3) - 2));
+		max_t(unsigned long, 3, ((devdata->num_rcv_bufs / 3) - 2));
 	devdata->upper_threshold_net_xmits =
-		max(2, devdata->max_outstanding_net_xmits - 1);
+		max_t(unsigned long,
+		      2, (devdata->max_outstanding_net_xmits - 1));
 	devdata->lower_threshold_net_xmits =
-		max(1, devdata->max_outstanding_net_xmits / 2);
+		max_t(unsigned long,
+		      1, (devdata->max_outstanding_net_xmits / 2));
 
 	skb_queue_head_init(&devdata->xmitbufhead);
 
@@ -1866,8 +1862,6 @@
 		err = -ENOMEM;
 		goto cleanup_xmit_cmdrsp;
 	}
-	INIT_WORK(&devdata->serverdown_completion,
-		  visornic_serverdown_complete);
 	INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
 	devdata->server_down = false;
 	devdata->server_change_state = false;
@@ -1876,42 +1870,70 @@
 	channel_offset = offsetof(struct spar_io_channel_protocol,
 				  vnic.mtu);
 	err = visorbus_read_channel(dev, channel_offset, &netdev->mtu, 4);
-	if (err)
+	if (err) {
+		dev_err(&dev->device,
+			"%s failed to get mtu from chan (%d)\n",
+			__func__, err);
 		goto cleanup_xmit_cmdrsp;
+	}
 
 	/* TODO: Setup Interrupt information */
 	/* Let's start our threads to get responses */
+	netif_napi_add(netdev, &devdata->napi, visornic_poll, 64);
+
+	setup_timer(&devdata->irq_poll_timer, poll_for_irq,
+		    (unsigned long)devdata);
+	/*
+	 * Note: This time has to start running before the while
+	 * loop below because the napi routine is responsible for
+	 * setting enab_dis_acked
+	 */
+	mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
+
 	channel_offset = offsetof(struct spar_io_channel_protocol,
 				  channel_header.features);
 	err = visorbus_read_channel(dev, channel_offset, &features, 8);
-	if (err)
-		goto cleanup_xmit_cmdrsp;
+	if (err) {
+		dev_err(&dev->device,
+			"%s failed to get features from chan (%d)\n",
+			__func__, err);
+		goto cleanup_napi_add;
+	}
 
 	features |= ULTRA_IO_CHANNEL_IS_POLLING;
 	err = visorbus_write_channel(dev, channel_offset, &features, 8);
-	if (err)
-		goto cleanup_xmit_cmdrsp;
-
-	devdata->thread_wait_ms = 2;
-	visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
-			   devdata, "vnic_incoming");
+	if (err) {
+		dev_err(&dev->device,
+			"%s failed to set features in chan (%d)\n",
+			__func__, err);
+		goto cleanup_napi_add;
+	}
 
 	err = register_netdev(netdev);
-	if (err)
-		goto cleanup_thread_stop;
+	if (err) {
+		dev_err(&dev->device,
+			"%s register_netdev failed (%d)\n", __func__, err);
+		goto cleanup_napi_add;
+	}
 
 	/* create debgug/sysfs directories */
 	devdata->eth_debugfs_dir = debugfs_create_dir(netdev->name,
 						      visornic_debugfs_dir);
 	if (!devdata->eth_debugfs_dir) {
+		dev_err(&dev->device,
+			"%s debugfs_create_dir %s failed\n",
+			__func__, netdev->name);
 		err = -ENOMEM;
-		goto cleanup_thread_stop;
+		goto cleanup_xmit_cmdrsp;
 	}
 
+	dev_info(&dev->device, "%s success netdev=%s\n",
+		 __func__, netdev->name);
 	return 0;
 
-cleanup_thread_stop:
-	visor_thread_stop(&devdata->threadinfo);
+cleanup_napi_add:
+	del_timer_sync(&devdata->irq_poll_timer);
+	netif_napi_del(&devdata->napi);
 
 cleanup_xmit_cmdrsp:
 	kfree(devdata->xmit_cmdrsp);
@@ -1954,12 +1976,41 @@
 static void visornic_remove(struct visor_device *dev)
 {
 	struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
+	struct net_device *netdev;
+	unsigned long flags;
 
-	if (!devdata)
+	if (!devdata) {
+		dev_err(&dev->device, "%s no devdata\n", __func__);
 		return;
+	}
+	spin_lock_irqsave(&devdata->priv_lock, flags);
+	if (devdata->going_away) {
+		spin_unlock_irqrestore(&devdata->priv_lock, flags);
+		dev_err(&dev->device, "%s already being removed\n", __func__);
+		return;
+	}
+	devdata->going_away = true;
+	spin_unlock_irqrestore(&devdata->priv_lock, flags);
+	netdev = devdata->netdev;
+	if (!netdev) {
+		dev_err(&dev->device, "%s not net device\n", __func__);
+		return;
+	}
+
+	/* going_away prevents new items being added to the workqueues */
+	flush_workqueue(visornic_timeout_reset_workqueue);
+
+	debugfs_remove_recursive(devdata->eth_debugfs_dir);
+
+	unregister_netdev(netdev);  /* this will call visornic_close() */
+
+	del_timer_sync(&devdata->irq_poll_timer);
+	netif_napi_del(&devdata->napi);
+
 	dev_set_drvdata(&dev->device, NULL);
 	host_side_disappeared(devdata);
-	kref_put(&devdata->kref, devdata_release);
+	devdata_release(devdata);
+	free_netdev(netdev);
 }
 
 /**
@@ -1980,8 +2031,7 @@
 {
 	struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
 
-	visornic_serverdown(devdata);
-	complete_func(dev, 0);
+	visornic_serverdown(devdata, complete_func);
 	return 0;
 }
 
@@ -2003,37 +2053,40 @@
 	unsigned long flags;
 
 	devdata = dev_get_drvdata(&dev->device);
-	if (!devdata)
+	if (!devdata) {
+		dev_err(&dev->device, "%s no devdata\n", __func__);
 		return -EINVAL;
+	}
 
 	netdev = devdata->netdev;
 
-	if (devdata->server_down && !devdata->server_change_state) {
-		devdata->server_change_state = true;
-		/* Must transition channel to ATTACHED state BEFORE
-		 * we can start using the device again.
-		 * TODO: State transitions
-		 */
-		visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
-				   devdata, "vnic_incoming");
-		init_rcv_bufs(netdev, devdata);
-		spin_lock_irqsave(&devdata->priv_lock, flags);
-		devdata->enabled = 1;
-
-		/* Now we're ready, let's send an ENB to uisnic but until
-		 * we get an ACK back from uisnic, we'll drop the packets
-		 */
-		devdata->enab_dis_acked = 0;
+	spin_lock_irqsave(&devdata->priv_lock, flags);
+	if (devdata->server_change_state) {
 		spin_unlock_irqrestore(&devdata->priv_lock, flags);
-
-		/* send enable and wait for ack - don't hold lock when
-		 * sending enable because if the queue if sull, insert
-		 * might sleep.
-		 */
-		send_enbdis(netdev, 1, devdata);
-	} else if (devdata->server_change_state) {
-		return -EIO;
+		dev_err(&dev->device, "%s server already changing state\n",
+			__func__);
+		return -EINVAL;
 	}
+	if (!devdata->server_down) {
+		spin_unlock_irqrestore(&devdata->priv_lock, flags);
+		dev_err(&dev->device, "%s server not down\n", __func__);
+		complete_func(dev, 0);
+		return 0;
+	}
+	devdata->server_change_state = true;
+	spin_unlock_irqrestore(&devdata->priv_lock, flags);
+
+	/* Must transition channel to ATTACHED state BEFORE
+	 * we can start using the device again.
+	 * TODO: State transitions
+	 */
+	mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
+
+	init_rcv_bufs(netdev, devdata);
+
+	rtnl_lock();
+	dev_open(netdev);
+	rtnl_unlock();
 
 	complete_func(dev, 0);
 	return 0;
@@ -2051,18 +2104,6 @@
 	struct dentry *ret;
 	int err = -ENOMEM;
 
-	/* create workqueue for serverdown completion */
-	visornic_serverdown_workqueue =
-		create_singlethread_workqueue("visornic_serverdown");
-	if (!visornic_serverdown_workqueue)
-		return -ENOMEM;
-
-	/* create workqueue for tx timeout reset */
-	visornic_timeout_reset_workqueue =
-		create_singlethread_workqueue("visornic_timeout_reset");
-	if (!visornic_timeout_reset_workqueue)
-		return -ENOMEM;
-
 	visornic_debugfs_dir = debugfs_create_dir("visornic", NULL);
 	if (!visornic_debugfs_dir)
 		return err;
@@ -2076,12 +2117,6 @@
 	if (!ret)
 		goto cleanup_debugfs;
 
-	/* create workqueue for serverdown completion */
-	visornic_serverdown_workqueue =
-		create_singlethread_workqueue("visornic_serverdown");
-	if (!visornic_serverdown_workqueue)
-		goto cleanup_debugfs;
-
 	/* create workqueue for tx timeout reset */
 	visornic_timeout_reset_workqueue =
 		create_singlethread_workqueue("visornic_timeout_reset");
@@ -2097,8 +2132,6 @@
 	return 0;
 
 cleanup_workqueue:
-	flush_workqueue(visornic_serverdown_workqueue);
-	destroy_workqueue(visornic_serverdown_workqueue);
 	if (visornic_timeout_reset_workqueue) {
 		flush_workqueue(visornic_timeout_reset_workqueue);
 		destroy_workqueue(visornic_timeout_reset_workqueue);
@@ -2116,17 +2149,14 @@
  */
 static void visornic_cleanup(void)
 {
-	if (visornic_serverdown_workqueue) {
-		flush_workqueue(visornic_serverdown_workqueue);
-		destroy_workqueue(visornic_serverdown_workqueue);
-	}
+	visorbus_unregister_visor_driver(&visornic_driver);
+
 	if (visornic_timeout_reset_workqueue) {
 		flush_workqueue(visornic_timeout_reset_workqueue);
 		destroy_workqueue(visornic_timeout_reset_workqueue);
 	}
 	debugfs_remove_recursive(visornic_debugfs_dir);
 
-	visorbus_unregister_visor_driver(&visornic_driver);
 	kfree(dev_num_pool);
 	dev_num_pool = NULL;
 }
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
index eabbcc7..35c6ce5 100644
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -24,7 +24,6 @@
 
 #include "vme_pio2.h"
 
-
 static const char driver_name[] = "pio2";
 
 static int bus[PIO2_CARDS_MAX];
@@ -118,7 +117,6 @@
 	}
 }
 
-
 /*
  * We return whether this has been successful - this is used in the probe to
  * ensure we have a valid card.
@@ -158,7 +156,6 @@
 	.remove = pio2_remove,
 };
 
-
 static int __init pio2_init(void)
 {
 	if (bus_num == 0) {
@@ -178,7 +175,6 @@
 
 static int pio2_match(struct vme_dev *vdev)
 {
-
 	if (vdev->num >= bus_num) {
 		dev_err(&vdev->dev,
 			"The enumeration of the VMEbus to which the board is connected must be specified\n");
@@ -220,7 +216,7 @@
 	int vec;
 
 	card = kzalloc(sizeof(struct pio2_card), GFP_KERNEL);
-	if (card == NULL) {
+	if (!card) {
 		retval = -ENOMEM;
 		goto err_struct;
 	}
@@ -234,7 +230,6 @@
 	card->vdev = vdev;
 
 	for (i = 0; i < PIO2_VARIANT_LENGTH; i++) {
-
 		if (isdigit(card->variant[i]) == 0) {
 			dev_err(&card->vdev->dev, "Variant invalid\n");
 			retval = -EINVAL;
@@ -264,29 +259,29 @@
 	for (i = 1; i < PIO2_VARIANT_LENGTH; i++) {
 		switch (card->variant[i]) {
 		case '0':
-			card->bank[i-1].config = NOFIT;
+			card->bank[i - 1].config = NOFIT;
 			break;
 		case '1':
 		case '2':
 		case '3':
 		case '4':
-			card->bank[i-1].config = INPUT;
+			card->bank[i - 1].config = INPUT;
 			break;
 		case '5':
-			card->bank[i-1].config = OUTPUT;
+			card->bank[i - 1].config = OUTPUT;
 			break;
 		case '6':
 		case '7':
 		case '8':
 		case '9':
-			card->bank[i-1].config = BOTH;
+			card->bank[i - 1].config = BOTH;
 			break;
 		}
 	}
 
 	/* Get a master window and position over regs */
 	card->window = vme_master_request(vdev, VME_A24, VME_SCT, VME_D16);
-	if (card->window == NULL) {
+	if (!card->window) {
 		dev_err(&card->vdev->dev,
 			"Unable to assign VME master resource\n");
 		retval = -EIO;
@@ -481,7 +476,6 @@
 	vme_unregister_driver(&pio2_driver);
 }
 
-
 /* These are required for each board */
 MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the board is connected");
 module_param_array(bus, int, &bus_num, S_IRUGO);
diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
index 9cca97a..8e61a3b 100644
--- a/drivers/staging/vme/devices/vme_user.c
+++ b/drivers/staging/vme/devices/vme_user.c
@@ -101,13 +101,13 @@
 	struct vme_resource *resource;	/* VME resource */
 	int mmap_count;		/* Number of current mmap's */
 };
+
 static struct image_desc image[VME_DEVS];
 
 static struct cdev *vme_user_cdev;		/* Character device */
 static struct class *vme_user_sysfs_class;	/* Sysfs class */
 static struct vme_dev *vme_user_bridge;		/* Pointer to user device */
 
-
 static const int type[VME_DEVS] = {	MASTER_MINOR,	MASTER_MINOR,
 					MASTER_MINOR,	MASTER_MINOR,
 					SLAVE_MINOR,	SLAVE_MINOR,
@@ -120,125 +120,68 @@
 	atomic_t refcnt;
 };
 
-
-/*
- * We are going ot alloc a page during init per window for small transfers.
- * Small transfers will go VME -> buffer -> user space. Larger (more than a
- * page) transfers will lock the user space buffer into memory and then
- * transfer the data directly into the user space buffers.
- */
 static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
-	loff_t *ppos)
+				loff_t *ppos)
 {
-	ssize_t retval;
 	ssize_t copied = 0;
 
-	if (count <= image[minor].size_buf) {
-		/* We copy to kernel buffer */
-		copied = vme_master_read(image[minor].resource,
-			image[minor].kern_buf, count, *ppos);
-		if (copied < 0)
-			return (int)copied;
+	if (count > image[minor].size_buf)
+		count = image[minor].size_buf;
 
-		retval = __copy_to_user(buf, image[minor].kern_buf,
-			(unsigned long)copied);
-		if (retval != 0) {
-			copied = (copied - retval);
-			pr_info("User copy failed\n");
-			return -EINVAL;
-		}
+	copied = vme_master_read(image[minor].resource, image[minor].kern_buf,
+				 count, *ppos);
+	if (copied < 0)
+		return (int)copied;
 
-	} else {
-		/* XXX Need to write this */
-		pr_info("Currently don't support large transfers\n");
-		/* Map in pages from userspace */
-
-		/* Call vme_master_read to do the transfer */
-		return -EINVAL;
-	}
+	if (__copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
+		return -EFAULT;
 
 	return copied;
 }
 
-/*
- * We are going to alloc a page during init per window for small transfers.
- * Small transfers will go user space -> buffer -> VME. Larger (more than a
- * page) transfers will lock the user space buffer into memory and then
- * transfer the data directly from the user space buffers out to VME.
- */
 static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
-	size_t count, loff_t *ppos)
+				  size_t count, loff_t *ppos)
 {
-	ssize_t retval;
-	ssize_t copied = 0;
+	if (count > image[minor].size_buf)
+		count = image[minor].size_buf;
 
-	if (count <= image[minor].size_buf) {
-		retval = __copy_from_user(image[minor].kern_buf, buf,
-			(unsigned long)count);
-		if (retval != 0)
-			copied = (copied - retval);
-		else
-			copied = count;
+	if (__copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
+		return -EFAULT;
 
-		copied = vme_master_write(image[minor].resource,
-			image[minor].kern_buf, copied, *ppos);
-	} else {
-		/* XXX Need to write this */
-		pr_info("Currently don't support large transfers\n");
-		/* Map in pages from userspace */
-
-		/* Call vme_master_write to do the transfer */
-		return -EINVAL;
-	}
-
-	return copied;
+	return vme_master_write(image[minor].resource, image[minor].kern_buf,
+				count, *ppos);
 }
 
 static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
-	size_t count, loff_t *ppos)
+			      size_t count, loff_t *ppos)
 {
 	void *image_ptr;
-	ssize_t retval;
 
 	image_ptr = image[minor].kern_buf + *ppos;
+	if (__copy_to_user(buf, image_ptr, (unsigned long)count))
+		return -EFAULT;
 
-	retval = __copy_to_user(buf, image_ptr, (unsigned long)count);
-	if (retval != 0) {
-		retval = (count - retval);
-		pr_warn("Partial copy to userspace\n");
-	} else
-		retval = count;
-
-	/* Return number of bytes successfully read */
-	return retval;
+	return count;
 }
 
 static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
-	size_t count, loff_t *ppos)
+				size_t count, loff_t *ppos)
 {
 	void *image_ptr;
-	size_t retval;
 
 	image_ptr = image[minor].kern_buf + *ppos;
+	if (__copy_from_user(image_ptr, buf, (unsigned long)count))
+		return -EFAULT;
 
-	retval = __copy_from_user(image_ptr, buf, (unsigned long)count);
-	if (retval != 0) {
-		retval = (count - retval);
-		pr_warn("Partial copy to userspace\n");
-	} else
-		retval = count;
-
-	/* Return number of bytes successfully read */
-	return retval;
+	return count;
 }
 
 static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
-			loff_t *ppos)
+			     loff_t *ppos)
 {
 	unsigned int minor = MINOR(file_inode(file)->i_rdev);
 	ssize_t retval;
 	size_t image_size;
-	size_t okcount;
 
 	if (minor == CONTROL_MINOR)
 		return 0;
@@ -256,16 +199,14 @@
 
 	/* Ensure not reading past end of the image */
 	if (*ppos + count > image_size)
-		okcount = image_size - *ppos;
-	else
-		okcount = count;
+		count = image_size - *ppos;
 
 	switch (type[minor]) {
 	case MASTER_MINOR:
-		retval = resource_to_user(minor, buf, okcount, ppos);
+		retval = resource_to_user(minor, buf, count, ppos);
 		break;
 	case SLAVE_MINOR:
-		retval = buffer_to_user(minor, buf, okcount, ppos);
+		retval = buffer_to_user(minor, buf, count, ppos);
 		break;
 	default:
 		retval = -EINVAL;
@@ -279,12 +220,11 @@
 }
 
 static ssize_t vme_user_write(struct file *file, const char __user *buf,
-			size_t count, loff_t *ppos)
+			      size_t count, loff_t *ppos)
 {
 	unsigned int minor = MINOR(file_inode(file)->i_rdev);
 	ssize_t retval;
 	size_t image_size;
-	size_t okcount;
 
 	if (minor == CONTROL_MINOR)
 		return 0;
@@ -301,16 +241,14 @@
 
 	/* Ensure not reading past end of the image */
 	if (*ppos + count > image_size)
-		okcount = image_size - *ppos;
-	else
-		okcount = count;
+		count = image_size - *ppos;
 
 	switch (type[minor]) {
 	case MASTER_MINOR:
-		retval = resource_from_user(minor, buf, okcount, ppos);
+		retval = resource_from_user(minor, buf, count, ppos);
 		break;
 	case SLAVE_MINOR:
-		retval = buffer_from_user(minor, buf, okcount, ppos);
+		retval = buffer_from_user(minor, buf, count, ppos);
 		break;
 	default:
 		retval = -EINVAL;
@@ -354,7 +292,7 @@
  * already been defined.
  */
 static int vme_user_ioctl(struct inode *inode, struct file *file,
-	unsigned int cmd, unsigned long arg)
+			  unsigned int cmd, unsigned long arg)
 {
 	struct vme_master master;
 	struct vme_slave slave;
@@ -390,12 +328,13 @@
 			 *	to userspace as they are
 			 */
 			retval = vme_master_get(image[minor].resource,
-				&master.enable, &master.vme_addr,
-				&master.size, &master.aspace,
-				&master.cycle, &master.dwidth);
+						&master.enable,
+						&master.vme_addr,
+						&master.size, &master.aspace,
+						&master.cycle, &master.dwidth);
 
 			copied = copy_to_user(argp, &master,
-				sizeof(struct vme_master));
+					      sizeof(struct vme_master));
 			if (copied != 0) {
 				pr_warn("Partial copy to userspace\n");
 				return -EFAULT;
@@ -435,12 +374,12 @@
 			 *	to userspace as they are
 			 */
 			retval = vme_slave_get(image[minor].resource,
-				&slave.enable, &slave.vme_addr,
-				&slave.size, &pci_addr, &slave.aspace,
-				&slave.cycle);
+					       &slave.enable, &slave.vme_addr,
+					       &slave.size, &pci_addr,
+					       &slave.aspace, &slave.cycle);
 
 			copied = copy_to_user(argp, &slave,
-				sizeof(struct vme_slave));
+					      sizeof(struct vme_slave));
 			if (copied != 0) {
 				pr_warn("Partial copy to userspace\n");
 				return -EFAULT;
@@ -526,8 +465,8 @@
 		return err;
 	}
 
-	vma_priv = kmalloc(sizeof(struct vme_user_vma_priv), GFP_KERNEL);
-	if (vma_priv == NULL) {
+	vma_priv = kmalloc(sizeof(*vma_priv), GFP_KERNEL);
+	if (!vma_priv) {
 		mutex_unlock(&image[minor].mutex);
 		return -ENOMEM;
 	}
@@ -588,7 +527,7 @@
 	char *name;
 
 	/* Save pointer to the bridge device */
-	if (vme_user_bridge != NULL) {
+	if (vme_user_bridge) {
 		dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
 		err = -EINVAL;
 		goto err_dev;
@@ -606,7 +545,7 @@
 
 	/* Assign major and minor numbers for the driver */
 	err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
-		driver_name);
+				     driver_name);
 	if (err) {
 		dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
 			 VME_MAJOR);
@@ -622,10 +561,8 @@
 	vme_user_cdev->ops = &vme_user_fops;
 	vme_user_cdev->owner = THIS_MODULE;
 	err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
-	if (err) {
-		dev_warn(&vdev->dev, "cdev_all failed\n");
+	if (err)
 		goto err_char;
-	}
 
 	/* Request slave resources and allocate buffers (128kB wide) */
 	for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
@@ -636,7 +573,7 @@
 		 */
 		image[i].resource = vme_slave_request(vme_user_bridge,
 			VME_A24, VME_SCT);
-		if (image[i].resource == NULL) {
+		if (!image[i].resource) {
 			dev_warn(&vdev->dev,
 				 "Unable to allocate slave resource\n");
 			err = -ENOMEM;
@@ -645,7 +582,7 @@
 		image[i].size_buf = PCI_BUF_SIZE;
 		image[i].kern_buf = vme_alloc_consistent(image[i].resource,
 			image[i].size_buf, &image[i].pci_buf);
-		if (image[i].kern_buf == NULL) {
+		if (!image[i].kern_buf) {
 			dev_warn(&vdev->dev,
 				 "Unable to allocate memory for buffer\n");
 			image[i].pci_buf = 0;
@@ -663,7 +600,7 @@
 		/* XXX Need to properly request attributes */
 		image[i].resource = vme_master_request(vme_user_bridge,
 			VME_A32, VME_SCT, VME_D32);
-		if (image[i].resource == NULL) {
+		if (!image[i].resource) {
 			dev_warn(&vdev->dev,
 				 "Unable to allocate master resource\n");
 			err = -ENOMEM;
@@ -671,7 +608,7 @@
 		}
 		image[i].size_buf = PCI_BUF_SIZE;
 		image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
-		if (image[i].kern_buf == NULL) {
+		if (!image[i].kern_buf) {
 			err = -ENOMEM;
 			vme_master_free(image[i].resource);
 			goto err_master;
@@ -835,7 +772,6 @@
 	vme_unregister_driver(&vme_user_driver);
 }
 
-
 MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
 module_param_array(bus, int, &bus_num, 0);
 
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index b0ea38f..9e61f2d 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -1728,10 +1728,8 @@
 	unsigned int uRateIdx = (unsigned int) wRate;
 	unsigned int uRate = 0;
 
-	if (uRateIdx > RATE_54M) {
-		ASSERT(0);
+	if (uRateIdx > RATE_54M)
 		return 0;
-	}
 
 	uRate = (unsigned int)awcFrameTime[uRateIdx];
 
@@ -1945,7 +1943,6 @@
 	VNSvInPortB(dwIoBase + MAC_REG_BBREGDATA, pbyData);
 
 	if (ww == W_MAX_TIMEOUT) {
-		DBG_PORT80(0x30);
 		pr_debug(" DBG_PORT80(0x30)\n");
 		return false;
 	}
@@ -1988,7 +1985,6 @@
 	}
 
 	if (ww == W_MAX_TIMEOUT) {
-		DBG_PORT80(0x31);
 		pr_debug(" DBG_PORT80(0x31)\n");
 		return false;
 	}
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index e00c060..c7b75df 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -514,7 +514,7 @@
 )
 {
 	unsigned int uu;
-	PSTxDesc    pCurrTD;
+	struct vnt_tx_desc *pCurrTD;
 
 	/* initialize TD index */
 	pDevice->apTailTD[0] = pDevice->apCurrTD[0] = &(pDevice->apTD0Rings[0]);
@@ -525,12 +525,12 @@
 
 	for (uu = 0; uu < pDevice->sOpts.nTxDescs[0]; uu++) {
 		pCurrTD = &(pDevice->apTD0Rings[uu]);
-		pCurrTD->m_td0TD0.f1Owner = OWNED_BY_HOST;
+		pCurrTD->td0.owner = OWNED_BY_HOST;
 		/* init all Tx Packet pointer to NULL */
 	}
 	for (uu = 0; uu < pDevice->sOpts.nTxDescs[1]; uu++) {
 		pCurrTD = &(pDevice->apTD1Rings[uu]);
-		pCurrTD->m_td0TD0.f1Owner = OWNED_BY_HOST;
+		pCurrTD->td0.owner = OWNED_BY_HOST;
 		/* init all Tx Packet pointer to NULL */
 	}
 
@@ -573,17 +573,17 @@
 	/* init state, all RD is chip's */
 	for (uu = 0; uu < pDevice->sOpts.nRxDescs0; uu++) {
 		pDesc = &(pDevice->aRD0Ring[uu]);
-		pDesc->m_rd0RD0.wResCount = (unsigned short)(pDevice->rx_buf_sz);
+		pDesc->m_rd0RD0.wResCount = cpu_to_le16(pDevice->rx_buf_sz);
 		pDesc->m_rd0RD0.f1Owner = OWNED_BY_NIC;
-		pDesc->m_rd1RD1.wReqCount = (unsigned short)(pDevice->rx_buf_sz);
+		pDesc->m_rd1RD1.wReqCount = cpu_to_le16(pDevice->rx_buf_sz);
 	}
 
 	/* init state, all RD is chip's */
 	for (uu = 0; uu < pDevice->sOpts.nRxDescs1; uu++) {
 		pDesc = &(pDevice->aRD1Ring[uu]);
-		pDesc->m_rd0RD0.wResCount = (unsigned short)(pDevice->rx_buf_sz);
+		pDesc->m_rd0RD0.wResCount = cpu_to_le16(pDevice->rx_buf_sz);
 		pDesc->m_rd0RD0.f1Owner = OWNED_BY_NIC;
-		pDesc->m_rd1RD1.wReqCount = (unsigned short)(pDevice->rx_buf_sz);
+		pDesc->m_rd1RD1.wReqCount = cpu_to_le16(pDevice->rx_buf_sz);
 	}
 
 	/* set perPkt mode */
@@ -847,7 +847,6 @@
 	case CARD_LB_PHY:
 		break;
 	default:
-		ASSERT(false);
 		break;
 	}
 	/* set MAC loopback */
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 758eeb2..3c9007e 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -170,13 +170,12 @@
 typedef struct tagDEVICE_RD_INFO {
 	struct sk_buff *skb;
 	dma_addr_t  skb_dma;
-	dma_addr_t  curr_desc;
 } DEVICE_RD_INFO,   *PDEVICE_RD_INFO;
 
 #ifdef __BIG_ENDIAN
 
 typedef struct tagRDES0 {
-	volatile unsigned short wResCount;
+	volatile __le16 wResCount;
 	union {
 		volatile u16    f15Reserved;
 		struct {
@@ -191,7 +190,7 @@
 #else
 
 typedef struct tagRDES0 {
-	unsigned short wResCount;
+	__le16         wResCount;
 	unsigned short f15Reserved:15;
 	unsigned short f1Owner:1;
 } __attribute__ ((__packed__))
@@ -200,7 +199,7 @@
 #endif
 
 typedef struct tagRDES1 {
-	unsigned short wReqCount;
+	__le16	       wReqCount;
 	unsigned short wReserved;
 } __attribute__ ((__packed__))
 SRDES1;
@@ -209,93 +208,56 @@
 typedef struct tagSRxDesc {
 	volatile SRDES0 m_rd0RD0;
 	volatile SRDES1 m_rd1RD1;
-	volatile u32    buff_addr;
-	volatile u32    next_desc;
+	volatile __le32 buff_addr;
+	volatile __le32 next_desc;
 	struct tagSRxDesc *next __aligned(8);
 	volatile PDEVICE_RD_INFO pRDInfo __aligned(8);
 } __attribute__ ((__packed__))
 SRxDesc, *PSRxDesc;
 typedef const SRxDesc *PCSRxDesc;
 
+struct vnt_tdes0 {
+	volatile u8 tsr0;
+	volatile u8 tsr1;
 #ifdef __BIG_ENDIAN
-
-typedef struct tagTDES0 {
-	volatile    unsigned char byTSR0;
-	volatile    unsigned char byTSR1;
 	union {
-		volatile u16    f15Txtime;
+		volatile u16 f15_txtime;
 		struct {
-			volatile u8 f8Reserved1;
-			volatile u8 f1Owner:1;
-			volatile u8 f7Reserved:7;
-		} __attribute__ ((__packed__));
-	} __attribute__ ((__packed__));
-} __attribute__ ((__packed__))
-STDES0, PSTDES0;
-
+			volatile u8 f8_reserved;
+			volatile u8 owner:1;
+			volatile u8 f7_reserved:7;
+		} __packed;
+	} __packed;
 #else
-
-typedef struct tagTDES0 {
-	volatile    unsigned char byTSR0;
-	volatile    unsigned char byTSR1;
-	volatile    unsigned short f15Txtime:15;
-	volatile    unsigned short f1Owner:1;
-} __attribute__ ((__packed__))
-STDES0;
-
+	volatile u16 f15_txtime:15;
+	volatile u16 owner:1;
 #endif
+} __packed;
 
-typedef struct tagTDES1 {
-	volatile    unsigned short wReqCount;
-	volatile    unsigned char byTCR;
-	volatile    unsigned char byReserved;
-} __attribute__ ((__packed__))
-STDES1;
+struct vnt_tdes1 {
+	volatile __le16 req_count;
+	volatile u8 tcr;
+	volatile u8 reserved;
+} __packed;
 
-typedef struct tagDEVICE_TD_INFO {
+struct vnt_td_info {
 	void *mic_hdr;
 	struct sk_buff *skb;
 	unsigned char *buf;
-	dma_addr_t          skb_dma;
-	dma_addr_t          buf_dma;
-	dma_addr_t          curr_desc;
-	unsigned long dwReqCount;
-	unsigned long dwHeaderLength;
-	unsigned char byFlags;
-} DEVICE_TD_INFO,    *PDEVICE_TD_INFO;
+	dma_addr_t buf_dma;
+	u16 req_count;
+	u8 flags;
+};
 
 /* transmit descriptor */
-typedef struct tagSTxDesc {
-	volatile    STDES0  m_td0TD0;
-	volatile    STDES1  m_td1TD1;
-	volatile    u32    buff_addr;
-	volatile    u32    next_desc;
-	struct tagSTxDesc *next __aligned(8);
-	volatile    PDEVICE_TD_INFO pTDInfo __aligned(8);
-} __attribute__ ((__packed__))
-STxDesc, *PSTxDesc;
-typedef const STxDesc *PCSTxDesc;
-
-typedef struct tagSTxSyncDesc {
-	volatile    STDES0  m_td0TD0;
-	volatile    STDES1  m_td1TD1;
-	volatile    u32 buff_addr; /* pointer to logical buffer */
-	volatile    u32 next_desc; /* pointer to next logical descriptor */
-	volatile    unsigned short m_wFIFOCtl;
-	volatile    unsigned short m_wTimeStamp;
-	struct tagSTxSyncDesc *next __aligned(8);
-	volatile    PDEVICE_TD_INFO pTDInfo __aligned(8);
-} __attribute__ ((__packed__))
-STxSyncDesc, *PSTxSyncDesc;
-typedef const STxSyncDesc *PCSTxSyncDesc;
-
-/* RsvTime buffer header */
-typedef struct tagSRrvTime_atim {
-	unsigned short wCTSTxRrvTime_ba;
-	unsigned short wTxRrvTime_a;
-} __attribute__ ((__packed__))
-SRrvTime_atim, *PSRrvTime_atim;
-typedef const SRrvTime_atim *PCSRrvTime_atim;
+struct vnt_tx_desc {
+	volatile struct vnt_tdes0 td0;
+	volatile struct vnt_tdes1 td1;
+	volatile __le32 buff_addr;
+	volatile __le32 next_desc;
+	struct vnt_tx_desc *next __aligned(8);
+	struct vnt_td_info *td_info __aligned(8);
+} __packed;
 
 /* Length, Service, and Signal fields of Phy for Tx */
 struct vnt_phy_field {
@@ -310,42 +272,4 @@
 	u32 field_write;
 };
 
-/* Tx FIFO header */
-typedef struct tagSTxBufHead {
-	u32 adwTxKey[4];
-	unsigned short wFIFOCtl;
-	unsigned short wTimeStamp;
-	unsigned short wFragCtl;
-	unsigned char byTxPower;
-	unsigned char wReserved;
-} __attribute__ ((__packed__))
-STxBufHead, *PSTxBufHead;
-typedef const STxBufHead *PCSTxBufHead;
-
-typedef struct tagSBEACONCtl {
-	u32 BufReady:1;
-	u32 TSF:15;
-	u32 BufLen:11;
-	u32 Reserved:5;
-} __attribute__ ((__packed__))
-SBEACONCtl;
-
-typedef struct tagSSecretKey {
-	u32 dwLowDword;
-	unsigned char byHighByte;
-} __attribute__ ((__packed__))
-SSecretKey;
-
-typedef struct tagSKeyEntry {
-	unsigned char abyAddrHi[2];
-	unsigned short wKCTL;
-	unsigned char abyAddrLo[4];
-	u32 dwKey0[4];
-	u32 dwKey1[4];
-	u32 dwKey2[4];
-	u32 dwKey3[4];
-	u32 dwKey4[4];
-} __attribute__ ((__packed__))
-SKeyEntry;
-
 #endif /* __DESC_H__ */
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 5cf1b33..c9fa6ef 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -252,11 +252,11 @@
 	int                         nTxQueues;
 	volatile int                iTDUsed[TYPE_MAXTD];
 
-	volatile PSTxDesc           apCurrTD[TYPE_MAXTD];
-	volatile PSTxDesc           apTailTD[TYPE_MAXTD];
+	struct vnt_tx_desc *apCurrTD[TYPE_MAXTD];
+	struct vnt_tx_desc *apTailTD[TYPE_MAXTD];
 
-	volatile PSTxDesc           apTD0Rings;
-	volatile PSTxDesc           apTD1Rings;
+	struct vnt_tx_desc *apTD0Rings;
+	struct vnt_tx_desc *apTD1Rings;
 
 	volatile PSRxDesc           aRD0Ring;
 	volatile PSRxDesc           aRD1Ring;
@@ -403,6 +403,7 @@
 	unsigned char abyEEPROM[EEP_MAX_CONTEXT_SIZE]; /* unsigned long alignment */
 
 	unsigned short wBeaconInterval;
+	u16 wake_up_count;
 
 	struct work_struct interrupt_work;
 
@@ -414,8 +415,8 @@
 	return kzalloc(sizeof(DEVICE_RD_INFO), GFP_ATOMIC);
 }
 
-static inline PDEVICE_TD_INFO alloc_td_info(void)
+static inline struct vnt_td_info *alloc_td_info(void)
 {
-	return kzalloc(sizeof(DEVICE_TD_INFO), GFP_ATOMIC);
+	return kzalloc(sizeof(struct vnt_td_info), GFP_ATOMIC);
 }
 #endif
diff --git a/drivers/staging/vt6655/device_cfg.h b/drivers/staging/vt6655/device_cfg.h
index a4a8a84..b4c9547 100644
--- a/drivers/staging/vt6655/device_cfg.h
+++ b/drivers/staging/vt6655/device_cfg.h
@@ -69,19 +69,4 @@
 	VT3253 = 1
 } CHIP_TYPE, *PCHIP_TYPE;
 
-#ifdef VIAWET_DEBUG
-#define ASSERT(x)							\
-do {									\
-	if (!(x)) {							\
-		pr_err("assertion %s failed: file %s line %d\n", \
-		       #x, __func__, __LINE__);				\
-		*(int *)0 = 0;						\
-	}								\
-} while (0)
-#define DBG_PORT80(value)                   outb(value, 0x80)
-#else
-#define ASSERT(x)
-#define DBG_PORT80(value)
-#endif
-
 #endif
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 69bdc8f..0d8f123 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -157,7 +157,7 @@
 static int  device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx);
 static bool device_alloc_rx_buf(struct vnt_private *pDevice, PSRxDesc pDesc);
 static void device_init_registers(struct vnt_private *pDevice);
-static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc);
+static void device_free_tx_buf(struct vnt_private *, struct vnt_tx_desc *);
 static void device_free_td0_ring(struct vnt_private *pDevice);
 static void device_free_td1_ring(struct vnt_private *pDevice);
 static void device_free_rd0_ring(struct vnt_private *pDevice);
@@ -522,8 +522,8 @@
 	vir_pool = dma_zalloc_coherent(&pDevice->pcid->dev,
 					 pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
 					 pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
-					 pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
-					 pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc),
+					 pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc) +
+					 pDevice->sOpts.nTxDescs[1] * sizeof(struct vnt_tx_desc),
 					 &pDevice->pool_dma, GFP_ATOMIC);
 	if (vir_pool == NULL) {
 		dev_err(&pDevice->pcid->dev, "allocate desc dma memory failed\n");
@@ -551,8 +551,8 @@
 		dma_free_coherent(&pDevice->pcid->dev,
 				    pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
 				    pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
-				    pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
-				    pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc),
+				    pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc) +
+				    pDevice->sOpts.nTxDescs[1] * sizeof(struct vnt_tx_desc),
 				    vir_pool, pDevice->pool_dma
 			);
 		return false;
@@ -562,7 +562,7 @@
 		pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc);
 
 	pDevice->td1_pool_dma = pDevice->td0_pool_dma +
-		pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc);
+		pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc);
 
 	/* vir_pool: pvoid type */
 	pDevice->apTD0Rings = vir_pool
@@ -572,7 +572,7 @@
 	pDevice->apTD1Rings = vir_pool
 		+ pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc)
 		+ pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc)
-		+ pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc);
+		+ pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc);
 
 	pDevice->tx1_bufs = pDevice->tx0_bufs +
 		pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ;
@@ -597,8 +597,8 @@
 	dma_free_coherent(&pDevice->pcid->dev,
 			    pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
 			    pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
-			    pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
-			    pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc)
+			    pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc) +
+			    pDevice->sOpts.nTxDescs[1] * sizeof(struct vnt_tx_desc)
 			    ,
 			    pDevice->aRD0Ring, pDevice->pool_dma
 		);
@@ -623,12 +623,11 @@
 	for (i = 0; i < pDevice->sOpts.nRxDescs0; i ++, curr += sizeof(SRxDesc)) {
 		pDesc = &(pDevice->aRD0Ring[i]);
 		pDesc->pRDInfo = alloc_rd_info();
-		ASSERT(pDesc->pRDInfo);
+
 		if (!device_alloc_rx_buf(pDevice, pDesc))
 			dev_err(&pDevice->pcid->dev, "can not alloc rx bufs\n");
 
 		pDesc->next = &(pDevice->aRD0Ring[(i+1) % pDevice->sOpts.nRxDescs0]);
-		pDesc->pRDInfo->curr_desc = cpu_to_le32(curr);
 		pDesc->next_desc = cpu_to_le32(curr + sizeof(SRxDesc));
 	}
 
@@ -647,12 +646,11 @@
 	for (i = 0; i < pDevice->sOpts.nRxDescs1; i ++, curr += sizeof(SRxDesc)) {
 		pDesc = &(pDevice->aRD1Ring[i]);
 		pDesc->pRDInfo = alloc_rd_info();
-		ASSERT(pDesc->pRDInfo);
+
 		if (!device_alloc_rx_buf(pDevice, pDesc))
 			dev_err(&pDevice->pcid->dev, "can not alloc rx bufs\n");
 
 		pDesc->next = &(pDevice->aRD1Ring[(i+1) % pDevice->sOpts.nRxDescs1]);
-		pDesc->pRDInfo->curr_desc = cpu_to_le32(curr);
 		pDesc->next_desc = cpu_to_le32(curr + sizeof(SRxDesc));
 	}
 
@@ -699,20 +697,20 @@
 {
 	int i;
 	dma_addr_t  curr;
-	PSTxDesc        pDesc;
+	struct vnt_tx_desc *pDesc;
 
 	curr = pDevice->td0_pool_dma;
-	for (i = 0; i < pDevice->sOpts.nTxDescs[0]; i++, curr += sizeof(STxDesc)) {
+	for (i = 0; i < pDevice->sOpts.nTxDescs[0];
+	     i++, curr += sizeof(struct vnt_tx_desc)) {
 		pDesc = &(pDevice->apTD0Rings[i]);
-		pDesc->pTDInfo = alloc_td_info();
-		ASSERT(pDesc->pTDInfo);
+		pDesc->td_info = alloc_td_info();
+
 		if (pDevice->flags & DEVICE_FLAGS_TX_ALIGN) {
-			pDesc->pTDInfo->buf = pDevice->tx0_bufs + (i)*PKT_BUF_SZ;
-			pDesc->pTDInfo->buf_dma = pDevice->tx_bufs_dma0 + (i)*PKT_BUF_SZ;
+			pDesc->td_info->buf = pDevice->tx0_bufs + (i)*PKT_BUF_SZ;
+			pDesc->td_info->buf_dma = pDevice->tx_bufs_dma0 + (i)*PKT_BUF_SZ;
 		}
 		pDesc->next = &(pDevice->apTD0Rings[(i+1) % pDevice->sOpts.nTxDescs[0]]);
-		pDesc->pTDInfo->curr_desc = cpu_to_le32(curr);
-		pDesc->next_desc = cpu_to_le32(curr+sizeof(STxDesc));
+		pDesc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
 	}
 
 	if (i > 0)
@@ -724,21 +722,21 @@
 {
 	int i;
 	dma_addr_t  curr;
-	PSTxDesc    pDesc;
+	struct vnt_tx_desc *pDesc;
 
 	/* Init the TD ring entries */
 	curr = pDevice->td1_pool_dma;
-	for (i = 0; i < pDevice->sOpts.nTxDescs[1]; i++, curr += sizeof(STxDesc)) {
+	for (i = 0; i < pDevice->sOpts.nTxDescs[1];
+	     i++, curr += sizeof(struct vnt_tx_desc)) {
 		pDesc = &(pDevice->apTD1Rings[i]);
-		pDesc->pTDInfo = alloc_td_info();
-		ASSERT(pDesc->pTDInfo);
+		pDesc->td_info = alloc_td_info();
+
 		if (pDevice->flags & DEVICE_FLAGS_TX_ALIGN) {
-			pDesc->pTDInfo->buf = pDevice->tx1_bufs + (i) * PKT_BUF_SZ;
-			pDesc->pTDInfo->buf_dma = pDevice->tx_bufs_dma1 + (i) * PKT_BUF_SZ;
+			pDesc->td_info->buf = pDevice->tx1_bufs + (i) * PKT_BUF_SZ;
+			pDesc->td_info->buf_dma = pDevice->tx_bufs_dma1 + (i) * PKT_BUF_SZ;
 		}
 		pDesc->next = &(pDevice->apTD1Rings[(i + 1) % pDevice->sOpts.nTxDescs[1]]);
-		pDesc->pTDInfo->curr_desc = cpu_to_le32(curr);
-		pDesc->next_desc = cpu_to_le32(curr+sizeof(STxDesc));
+		pDesc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
 	}
 
 	if (i > 0)
@@ -751,17 +749,11 @@
 	int i;
 
 	for (i = 0; i < pDevice->sOpts.nTxDescs[0]; i++) {
-		PSTxDesc        pDesc = &(pDevice->apTD0Rings[i]);
-		PDEVICE_TD_INFO  pTDInfo = pDesc->pTDInfo;
+		struct vnt_tx_desc *pDesc = &pDevice->apTD0Rings[i];
+		struct vnt_td_info *pTDInfo = pDesc->td_info;
 
-		if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma))
-			dma_unmap_single(&pDevice->pcid->dev, pTDInfo->skb_dma,
-					 pTDInfo->skb->len, DMA_TO_DEVICE);
-
-		if (pTDInfo->skb)
-			dev_kfree_skb(pTDInfo->skb);
-
-		kfree(pDesc->pTDInfo);
+		dev_kfree_skb(pTDInfo->skb);
+		kfree(pDesc->td_info);
 	}
 }
 
@@ -770,17 +762,11 @@
 	int i;
 
 	for (i = 0; i < pDevice->sOpts.nTxDescs[1]; i++) {
-		PSTxDesc        pDesc = &(pDevice->apTD1Rings[i]);
-		PDEVICE_TD_INFO  pTDInfo = pDesc->pTDInfo;
+		struct vnt_tx_desc *pDesc = &pDevice->apTD1Rings[i];
+		struct vnt_td_info *pTDInfo = pDesc->td_info;
 
-		if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma))
-			dma_unmap_single(&pDevice->pcid->dev, pTDInfo->skb_dma,
-					 pTDInfo->skb->len, DMA_TO_DEVICE);
-
-		if (pTDInfo->skb)
-			dev_kfree_skb(pTDInfo->skb);
-
-		kfree(pDesc->pTDInfo);
+		dev_kfree_skb(pTDInfo->skb);
+		kfree(pDesc->td_info);
 	}
 }
 
@@ -822,7 +808,6 @@
 	pRDInfo->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
 	if (pRDInfo->skb == NULL)
 		return false;
-	ASSERT(pRDInfo->skb);
 
 	pRDInfo->skb_dma =
 		dma_map_single(&pDevice->pcid->dev,
@@ -856,7 +841,7 @@
 };
 
 static int vnt_int_report_rate(struct vnt_private *priv,
-			       PDEVICE_TD_INFO context, u8 tsr0, u8 tsr1)
+			       struct vnt_td_info *context, u8 tsr0, u8 tsr1)
 {
 	struct vnt_tx_fifo_head *fifo_head;
 	struct ieee80211_tx_info *info;
@@ -917,23 +902,23 @@
 
 static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
 {
-	PSTxDesc                 pTD;
+	struct vnt_tx_desc *pTD;
 	int                      works = 0;
 	unsigned char byTsr0;
 	unsigned char byTsr1;
 
 	for (pTD = pDevice->apTailTD[uIdx]; pDevice->iTDUsed[uIdx] > 0; pTD = pTD->next) {
-		if (pTD->m_td0TD0.f1Owner == OWNED_BY_NIC)
+		if (pTD->td0.owner == OWNED_BY_NIC)
 			break;
 		if (works++ > 15)
 			break;
 
-		byTsr0 = pTD->m_td0TD0.byTSR0;
-		byTsr1 = pTD->m_td0TD0.byTSR1;
+		byTsr0 = pTD->td0.tsr0;
+		byTsr1 = pTD->td0.tsr1;
 
 		/* Only the status of first TD in the chain is correct */
-		if (pTD->m_td1TD1.byTCR & TCR_STP) {
-			if ((pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0) {
+		if (pTD->td1.tcr & TCR_STP) {
+			if ((pTD->td_info->flags & TD_FLAGS_NETIF_SKB) != 0) {
 				if (!(byTsr1 & TSR1_TERR)) {
 					if (byTsr0 != 0) {
 						pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n",
@@ -947,13 +932,13 @@
 			}
 
 			if (byTsr1 & TSR1_TERR) {
-				if ((pTD->pTDInfo->byFlags & TD_FLAGS_PRIV_SKB) != 0) {
+				if ((pTD->td_info->flags & TD_FLAGS_PRIV_SKB) != 0) {
 					pr_debug(" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X]\n",
 						 (int)uIdx, byTsr1, byTsr0);
 				}
 			}
 
-			vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
+			vnt_int_report_rate(pDevice, pTD->td_info, byTsr0, byTsr1);
 
 			device_free_tx_buf(pDevice, pTD);
 			pDevice->iTDUsed[uIdx]--;
@@ -975,23 +960,17 @@
 	}
 }
 
-static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc)
+static void device_free_tx_buf(struct vnt_private *pDevice,
+			       struct vnt_tx_desc *pDesc)
 {
-	PDEVICE_TD_INFO  pTDInfo = pDesc->pTDInfo;
+	struct vnt_td_info *pTDInfo = pDesc->td_info;
 	struct sk_buff *skb = pTDInfo->skb;
 
-	/* pre-allocated buf_dma can't be unmapped. */
-	if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma)) {
-		dma_unmap_single(&pDevice->pcid->dev, pTDInfo->skb_dma,
-				 skb->len, DMA_TO_DEVICE);
-	}
-
 	if (skb)
 		ieee80211_tx_status_irqsafe(pDevice->hw, skb);
 
-	pTDInfo->skb_dma = 0;
 	pTDInfo->skb = NULL;
-	pTDInfo->byFlags = 0;
+	pTDInfo->flags = 0;
 }
 
 static void vnt_check_bb_vga(struct vnt_private *priv)
@@ -1180,7 +1159,7 @@
 static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-	PSTxDesc head_td;
+	struct vnt_tx_desc *head_td;
 	u32 dma_idx;
 	unsigned long flags;
 
@@ -1198,12 +1177,12 @@
 
 	head_td = priv->apCurrTD[dma_idx];
 
-	head_td->m_td1TD1.byTCR = 0;
+	head_td->td1.tcr = 0;
 
-	head_td->pTDInfo->skb = skb;
+	head_td->td_info->skb = skb;
 
 	if (dma_idx == TYPE_AC0DMA)
-		head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
+		head_td->td_info->flags = TD_FLAGS_NETIF_SKB;
 
 	priv->apCurrTD[dma_idx] = head_td->next;
 
@@ -1211,26 +1190,22 @@
 
 	vnt_generate_fifo_header(priv, dma_idx, head_td, skb);
 
-	if (MACbIsRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_PS))
-		MACbPSWakeup(priv->PortOffset);
-
 	spin_lock_irqsave(&priv->lock, flags);
 
 	priv->bPWBitOn = false;
 
 	/* Set TSR1 & ReqCount in TxDescHead */
-	head_td->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
-	head_td->m_td1TD1.wReqCount =
-			cpu_to_le16((u16)head_td->pTDInfo->dwReqCount);
+	head_td->td1.tcr |= (TCR_STP | TCR_EDP | EDMSDU);
+	head_td->td1.req_count = cpu_to_le16(head_td->td_info->req_count);
 
-	head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma);
+	head_td->buff_addr = cpu_to_le32(head_td->td_info->buf_dma);
 
 	/* Poll Transmit the adapter */
 	wmb();
-	head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
+	head_td->td0.owner = OWNED_BY_NIC;
 	wmb(); /* second memory barrier */
 
-	if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
+	if (head_td->td_info->flags & TD_FLAGS_NETIF_SKB)
 		MACvTransmitAC0(priv->PortOffset);
 	else
 		MACvTransmit0(priv->PortOffset);
@@ -1775,6 +1750,12 @@
 		return -ENODEV;
 	}
 
+	if (dma_set_mask(&pcid->dev, DMA_BIT_MASK(32))) {
+		dev_err(&pcid->dev, ": Failed to set dma 32 bit mask\n");
+		device_free_info(priv);
+		return -ENODEV;
+	}
+
 	INIT_WORK(&priv->interrupt_work, vnt_interrupt_work);
 
 	/* do reset */
@@ -1812,6 +1793,7 @@
 	ieee80211_hw_set(priv->hw, SIGNAL_DBM);
 	ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
 	ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(priv->hw, SUPPORTS_PS);
 
 	priv->hw->max_signal = 100;
 
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index b25ee96..e14eed1 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -144,7 +144,7 @@
 			 priv->rx_buf_sz, DMA_FROM_DEVICE);
 
 	frame_size = le16_to_cpu(curr_rd->m_rd1RD1.wReqCount)
-			- cpu_to_le16(curr_rd->m_rd0RD0.wResCount);
+			- le16_to_cpu(curr_rd->m_rd0RD0.wResCount);
 
 	if ((frame_size > 2364) || (frame_size < 33)) {
 		/* Frame Size error drop this packet.*/
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index aed530f..3dfd333 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -186,7 +186,6 @@
 {
 	unsigned char byOrgValue;
 
-	ASSERT(byLoopbackMode < 3);
 	byLoopbackMode <<= 6;
 	/* set TCR */
 	VNSvInPortB(dwIoBase + MAC_REG_TEST, &byOrgValue);
@@ -374,7 +373,6 @@
 			break;
 	}
 	if (ww == W_MAX_TIMEOUT) {
-		DBG_PORT80(0x10);
 		pr_debug(" DBG_PORT80(0x10)\n");
 		return false;
 	}
@@ -384,7 +382,6 @@
 			break;
 	}
 	if (ww == W_MAX_TIMEOUT) {
-		DBG_PORT80(0x11);
 		pr_debug(" DBG_PORT80(0x11)\n");
 		return false;
 	}
@@ -398,7 +395,6 @@
 			break;
 	}
 	if (ww == W_MAX_TIMEOUT) {
-		DBG_PORT80(0x12);
 		pr_debug(" DBG_PORT80(0x12)\n");
 		return false;
 	}
@@ -436,7 +432,6 @@
 			break;
 	}
 	if (ww == W_MAX_TIMEOUT) {
-		DBG_PORT80(0x20);
 		pr_debug(" DBG_PORT80(0x20)\n");
 		return false;
 	}
@@ -446,7 +441,6 @@
 			break;
 	}
 	if (ww == W_MAX_TIMEOUT) {
-		DBG_PORT80(0x21);
 		pr_debug(" DBG_PORT80(0x21)\n");
 		return false;
 	}
@@ -461,7 +455,6 @@
 			break;
 	}
 	if (ww == W_MAX_TIMEOUT) {
-		DBG_PORT80(0x24);
 		pr_debug(" DBG_PORT80(0x24)\n");
 		return false;
 	}
@@ -486,13 +479,11 @@
 	MACvRegBitsOff(dwIoBase, MAC_REG_TCR, TCR_AUTOBCNTX);
 
 	if (!MACbSafeRxOff(dwIoBase)) {
-		DBG_PORT80(0xA1);
 		pr_debug(" MACbSafeRxOff == false)\n");
 		MACbSafeSoftwareReset(dwIoBase);
 		return false;
 	}
 	if (!MACbSafeTxOff(dwIoBase)) {
-		DBG_PORT80(0xA2);
 		pr_debug(" MACbSafeTxOff == false)\n");
 		MACbSafeSoftwareReset(dwIoBase);
 		return false;
@@ -590,9 +581,6 @@
 			break;
 	}
 
-	if (ww == W_MAX_TIMEOUT)
-		DBG_PORT80(0x13);
-
 	VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR0, dwCurrDescAddr);
 	if (byOrgDMACtl & DMACTL_RUN)
 		VNSvOutPortB(dwIoBase + MAC_REG_RXDMACTL0, DMACTL_RUN);
@@ -627,8 +615,6 @@
 		if (!(byData & DMACTL_RUN))
 			break;
 	}
-	if (ww == W_MAX_TIMEOUT)
-		DBG_PORT80(0x14);
 
 	VNSvOutPortD(dwIoBase + MAC_REG_RXDMAPTR1, dwCurrDescAddr);
 	if (byOrgDMACtl & DMACTL_RUN)
@@ -666,8 +652,6 @@
 		if (!(byData & DMACTL_RUN))
 			break;
 	}
-	if (ww == W_MAX_TIMEOUT)
-		DBG_PORT80(0x25);
 
 	VNSvOutPortD(dwIoBase + MAC_REG_TXDMAPTR0, dwCurrDescAddr);
 	if (byOrgDMACtl & DMACTL_RUN)
@@ -706,7 +690,6 @@
 			break;
 	}
 	if (ww == W_MAX_TIMEOUT) {
-		DBG_PORT80(0x26);
 		pr_debug(" DBG_PORT80(0x26)\n");
 	}
 	VNSvOutPortD(dwIoBase + MAC_REG_AC0DMAPTR, dwCurrDescAddr);
@@ -807,7 +790,6 @@
 			break;
 	}
 	if (ww == W_MAX_TIMEOUT) {
-		DBG_PORT80(0x36);
 		pr_debug(" DBG_PORT80(0x33)\n");
 		return false;
 	}
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index be3c4e9..06e6b9d 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -157,10 +157,18 @@
 	struct ieee80211_conf *conf = &hw->conf;
 	bool bWakeUp = false;
 
-	if (conf->listen_interval == 1) {
-		/* Turn on wake up to listen next beacon */
-		MACvRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
-		bWakeUp = true;
+	if (conf->listen_interval > 1) {
+		if (!pDevice->wake_up_count)
+			pDevice->wake_up_count = conf->listen_interval;
+
+		--pDevice->wake_up_count;
+
+		if (pDevice->wake_up_count == 1) {
+			/* Turn on wake up to listen next beacon */
+			MACvRegBitsOn(pDevice->PortOffset,
+				      MAC_REG_PSCTL, PSCTL_LNBCN);
+			bWakeUp = true;
+		}
 	}
 
 	return bWakeUp;
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 7626f63..c537321 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -39,66 +39,66 @@
 #include "rf.h"
 #include "baseband.h"
 
-#define BY_AL2230_REG_LEN     23 //24bit
+#define BY_AL2230_REG_LEN     23 /* 24bit */
 #define CB_AL2230_INIT_SEQ    15
-#define SWITCH_CHANNEL_DELAY_AL2230 200 //us
+#define SWITCH_CHANNEL_DELAY_AL2230 200 /* us */
 #define AL2230_PWR_IDX_LEN    64
 
-#define BY_AL7230_REG_LEN     23 //24bit
+#define BY_AL7230_REG_LEN     23 /* 24bit */
 #define CB_AL7230_INIT_SEQ    16
-#define SWITCH_CHANNEL_DELAY_AL7230 200 //us
+#define SWITCH_CHANNEL_DELAY_AL7230 200 /* us */
 #define AL7230_PWR_IDX_LEN    64
 
 static const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
-	0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
-	0x01A00200+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
-	0x00FFF300+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
-	0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
-	0x0F4DC500+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
-	0x0805B600+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
-	0x0146C700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
-	0x00068800+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
-	0x0403B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
-	0x00DBBA00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
-	0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, //
+	0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+	0x01A00200+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+	0x00FFF300+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+	0x0005A400+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+	0x0F4DC500+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+	0x0805B600+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+	0x0146C700+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+	0x00068800+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+	0x0403B900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+	0x00DBBA00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
+	0x00099B00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
 	0x0BDFFC00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
 	0x00000D00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW,
 	0x00580F00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
 };
 
 static const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
-	0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
-	0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
-	0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
-	0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz
-	0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz
-	0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz
-	0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz
-	0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz
-	0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz
-	0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz
-	0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz
-	0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz
-	0x03F7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz
-	0x03E7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW  // channel = 14, Tf = 2412M
+	0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+	0x03F79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+	0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+	0x03E79000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+	0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+	0x03F7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+	0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+	0x03E7A000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+	0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+	0x03F7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+	0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+	0x03E7B000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+	0x03F7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+	0x03E7C000+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW  /* channel = 14, Tf = 2412M */
 };
 
 static const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 1, Tf = 2412MHz
-	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 2, Tf = 2417MHz
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 3, Tf = 2422MHz
-	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 4, Tf = 2427MHz
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 5, Tf = 2432MHz
-	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 6, Tf = 2437MHz
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 7, Tf = 2442MHz
-	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 8, Tf = 2447MHz
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 9, Tf = 2452MHz
-	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz
-	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz
-	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz
-	0x06666100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW  // channel = 14, Tf = 2412M
+	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
+	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
+	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
+	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 4, Tf = 2427MHz */
+	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 5, Tf = 2432MHz */
+	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 6, Tf = 2437MHz */
+	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 7, Tf = 2442MHz */
+	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 8, Tf = 2447MHz */
+	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 9, Tf = 2452MHz */
+	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+	0x0B333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+	0x03333100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+	0x06666100+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW  /* channel = 14, Tf = 2412M */
 };
 
 static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
@@ -168,240 +168,240 @@
 	0x0407F900+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW
 };
 
-// 40MHz reference frequency
-// Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
+/* 40MHz reference frequency
+ * Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.*/
 static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
-	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel1 // Need modify for 11a
-	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel1 // Need modify for 11a
-	0x841FF200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 451FE2
-	0x3FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 5FDFA3
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // 11b/g    // Need modify for 11a
-	// RoberYu:20050113, Rev0.47 Regsiter Setting Guide
-	0x802B5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 8D1B55
+	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
+	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
+	0x841FF200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */
+	0x3FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 5FDFA3 */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11b/g    // Need modify for 11a */
+	/* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
+	0x802B5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 8D1B55 */
 	0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 860207
+	0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 860207 */
 	0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
 	0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0xE0000A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: E0600A
-	0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10)
-	// RoberYu:20050113, Rev0.47 Regsiter Setting Guide
-	0x000A3C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11a: 00143C
+	0xE0000A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: E0600A */
+	0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
+	/* RoberYu:20050113, Rev0.47 Regsiter Setting Guide */
+	0x000A3C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11a: 00143C */
 	0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
 	0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x1ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  // Need modify for 11a: 12BACF
+	0x1ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  /* Need modify for 11a: 12BACF */
 };
 
 static const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel184 // Need modify for 11b/g
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Channel184 // Need modify for 11b/g
-	0x451FE200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g
-	0x5FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g
-	0x67F78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // 11a    // Need modify for 11b/g
-	0x853F5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g, RoberYu:20050113
+	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
+	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
+	0x451FE200+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
+	0x5FDFA300+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
+	0x67F78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* 11a    // Need modify for 11b/g */
+	0x853F5500+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g, RoberYu:20050113 */
 	0x56AF3600+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g
+	0xCE020700+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
 	0x6EBC0800+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
 	0x221BB900+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0xE0600A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g
-	0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10)
-	0x00147C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // Need modify for 11b/g
+	0xE0600A00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
+	0x08031B00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* init 0x080B1B00 => 0x080F1B00 for 3 wire control TxGain(D10) */
+	0x00147C00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* Need modify for 11b/g */
 	0xFFFFFD00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
 	0x00000E00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW,
-	0x12BACF00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  // Need modify for 11b/g
+	0x12BACF00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  /* Need modify for 11b/g */
 };
 
 static const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
-	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  1, Tf = 2412MHz
-	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  2, Tf = 2417MHz
-	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  3, Tf = 2422MHz
-	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  4, Tf = 2427MHz
-	0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  5, Tf = 2432MHz
-	0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  6, Tf = 2437MHz
-	0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  7, Tf = 2442MHz
-	0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49
-	0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49
-	0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49
-	0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49
-	0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49
-	0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49
-	0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 14, Tf = 2484MHz
+	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  1, Tf = 2412MHz */
+	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  2, Tf = 2417MHz */
+	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  3, Tf = 2422MHz */
+	0x00379000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  4, Tf = 2427MHz */
+	0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  5, Tf = 2432MHz */
+	0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  6, Tf = 2437MHz */
+	0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  7, Tf = 2442MHz */
+	0x0037A000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  8, Tf = 2447MHz //RobertYu: 20050218, update for APNode 0.49 */
+	0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  9, Tf = 2452MHz //RobertYu: 20050218, update for APNode 0.49 */
+	0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz //RobertYu: 20050218, update for APNode 0.49 */
+	0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz //RobertYu: 20050218, update for APNode 0.49 */
+	0x0037B000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz //RobertYu: 20050218, update for APNode 0.49 */
+	0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz //RobertYu: 20050218, update for APNode 0.49 */
+	0x0037C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
 
-	// 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196  (Value:15 ~ 22)
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 183, Tf = 4915MHz (15)
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 184, Tf = 4920MHz (16)
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 185, Tf = 4925MHz (17)
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 187, Tf = 4935MHz (18)
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 188, Tf = 4940MHz (19)
-	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 189, Tf = 4945MHz (20)
-	0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 192, Tf = 4960MHz (21)
-	0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 196, Tf = 4980MHz (22)
+	/* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196  (Value:15 ~ 22) */
+	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+	0x0FF52000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+	0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+	0x0FF53000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
 
-	// 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
-	// 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165  (Value 23 ~ 56)
+	/* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
+	 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165  (Value 23 ~ 56) */
 
-	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =   7, Tf = 5035MHz (23)
-	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =   8, Tf = 5040MHz (24)
-	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =   9, Tf = 5045MHz (25)
-	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  11, Tf = 5055MHz (26)
-	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  12, Tf = 5060MHz (27)
-	0x0FF55000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  16, Tf = 5080MHz (28)
-	0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  34, Tf = 5170MHz (29)
-	0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  36, Tf = 5180MHz (30)
-	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49
-	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  40, Tf = 5200MHz (32)
-	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  42, Tf = 5210MHz (33)
-	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  44, Tf = 5220MHz (34)
-	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  46, Tf = 5230MHz (35)
-	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  48, Tf = 5240MHz (36)
-	0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  52, Tf = 5260MHz (37)
-	0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  56, Tf = 5280MHz (38)
-	0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  60, Tf = 5300MHz (39)
-	0x0FF59000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  64, Tf = 5320MHz (40)
+	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   7, Tf = 5035MHz (23) */
+	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   8, Tf = 5040MHz (24) */
+	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   9, Tf = 5045MHz (25) */
+	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  11, Tf = 5055MHz (26) */
+	0x0FF54000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  12, Tf = 5060MHz (27) */
+	0x0FF55000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  16, Tf = 5080MHz (28) */
+	0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  34, Tf = 5170MHz (29) */
+	0x0FF56000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  36, Tf = 5180MHz (30) */
+	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  38, Tf = 5190MHz (31) //RobertYu: 20050218, update for APNode 0.49 */
+	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  40, Tf = 5200MHz (32) */
+	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  42, Tf = 5210MHz (33) */
+	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  44, Tf = 5220MHz (34) */
+	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  46, Tf = 5230MHz (35) */
+	0x0FF57000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  48, Tf = 5240MHz (36) */
+	0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  52, Tf = 5260MHz (37) */
+	0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  56, Tf = 5280MHz (38) */
+	0x0FF58000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  60, Tf = 5300MHz (39) */
+	0x0FF59000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  64, Tf = 5320MHz (40) */
 
-	0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 100, Tf = 5500MHz (41)
-	0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 104, Tf = 5520MHz (42)
-	0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 108, Tf = 5540MHz (43)
-	0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 112, Tf = 5560MHz (44)
-	0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 116, Tf = 5580MHz (45)
-	0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 120, Tf = 5600MHz (46)
-	0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 124, Tf = 5620MHz (47)
-	0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 128, Tf = 5640MHz (48)
-	0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 132, Tf = 5660MHz (49)
-	0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 136, Tf = 5680MHz (50)
-	0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 140, Tf = 5700MHz (51)
-	0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 149, Tf = 5745MHz (52)
-	0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 153, Tf = 5765MHz (53)
-	0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 157, Tf = 5785MHz (54)
-	0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55)
-	0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  // channel = 165, Tf = 5825MHz (56)
+	0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+	0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+	0x0FF5C000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+	0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+	0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+	0x0FF5D000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+	0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+	0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+	0x0FF5E000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+	0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+	0x0FF5F000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+	0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+	0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+	0x0FF60000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+	0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+	0x0FF61000+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  /* channel = 165, Tf = 5825MHz (56) */
 };
 
 static const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
-	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  1, Tf = 2412MHz
-	0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  2, Tf = 2417MHz
-	0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  3, Tf = 2422MHz
-	0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  4, Tf = 2427MHz
-	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  5, Tf = 2432MHz
-	0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  6, Tf = 2437MHz
-	0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  7, Tf = 2442MHz
-	0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  8, Tf = 2447MHz
-	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  9, Tf = 2452MHz
-	0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz
-	0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz
-	0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz
-	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz
-	0x06666100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 14, Tf = 2484MHz
+	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  1, Tf = 2412MHz */
+	0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  2, Tf = 2417MHz */
+	0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  3, Tf = 2422MHz */
+	0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  4, Tf = 2427MHz */
+	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  5, Tf = 2432MHz */
+	0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  6, Tf = 2437MHz */
+	0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  7, Tf = 2442MHz */
+	0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  8, Tf = 2447MHz */
+	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  9, Tf = 2452MHz */
+	0x1B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+	0x03333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+	0x0B333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+	0x13333100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+	0x06666100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
 
-	// 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196  (Value:15 ~ 22)
-	0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 183, Tf = 4915MHz (15)
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 184, Tf = 4920MHz (16)
-	0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 185, Tf = 4925MHz (17)
-	0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 187, Tf = 4935MHz (18)
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 188, Tf = 4940MHz (19)
-	0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 189, Tf = 4945MHz (20)
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 192, Tf = 4960MHz (21)
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 196, Tf = 4980MHz (22)
+	/* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196  (Value:15 ~ 22) */
+	0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+	0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+	0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+	0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
 
-	// 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
-	// 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165  (Value 23 ~ 56)
-	0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =   7, Tf = 5035MHz (23)
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =   8, Tf = 5040MHz (24)
-	0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =   9, Tf = 5045MHz (25)
-	0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  11, Tf = 5055MHz (26)
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  12, Tf = 5060MHz (27)
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  16, Tf = 5080MHz (28)
-	0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  34, Tf = 5170MHz (29)
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  36, Tf = 5180MHz (30)
-	0x10000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  38, Tf = 5190MHz (31)
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  40, Tf = 5200MHz (32)
-	0x1AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  42, Tf = 5210MHz (33)
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  44, Tf = 5220MHz (34)
-	0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  46, Tf = 5230MHz (35)
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  48, Tf = 5240MHz (36)
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  52, Tf = 5260MHz (37)
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  56, Tf = 5280MHz (38)
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  60, Tf = 5300MHz (39)
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  64, Tf = 5320MHz (40)
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 100, Tf = 5500MHz (41)
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 104, Tf = 5520MHz (42)
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 108, Tf = 5540MHz (43)
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 112, Tf = 5560MHz (44)
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 116, Tf = 5580MHz (45)
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 120, Tf = 5600MHz (46)
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 124, Tf = 5620MHz (47)
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 128, Tf = 5640MHz (48)
-	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 132, Tf = 5660MHz (49)
-	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 136, Tf = 5680MHz (50)
-	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 140, Tf = 5700MHz (51)
-	0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 149, Tf = 5745MHz (52)
-	0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 153, Tf = 5765MHz (53)
-	0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 157, Tf = 5785MHz (54)
-	0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55)
-	0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  // channel = 165, Tf = 5825MHz (56)
+	/* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
+	 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165  (Value 23 ~ 56) */
+	0x1D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   7, Tf = 5035MHz (23) */
+	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   8, Tf = 5040MHz (24) */
+	0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   9, Tf = 5045MHz (25) */
+	0x08000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  11, Tf = 5055MHz (26) */
+	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  12, Tf = 5060MHz (27) */
+	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  16, Tf = 5080MHz (28) */
+	0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  34, Tf = 5170MHz (29) */
+	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  36, Tf = 5180MHz (30) */
+	0x10000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  38, Tf = 5190MHz (31) */
+	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  40, Tf = 5200MHz (32) */
+	0x1AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  42, Tf = 5210MHz (33) */
+	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  44, Tf = 5220MHz (34) */
+	0x05555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  46, Tf = 5230MHz (35) */
+	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  48, Tf = 5240MHz (36) */
+	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  52, Tf = 5260MHz (37) */
+	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  56, Tf = 5280MHz (38) */
+	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  60, Tf = 5300MHz (39) */
+	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  64, Tf = 5320MHz (40) */
+	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+	0x0AAAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+	0x15555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+	0x00000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+	0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+	0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+	0x0D555100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+	0x18000100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+	0x02AAA100+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  /* channel = 165, Tf = 5825MHz (56) */
 };
 
 static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  1, Tf = 2412MHz
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  2, Tf = 2417MHz
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  3, Tf = 2422MHz
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  4, Tf = 2427MHz
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  5, Tf = 2432MHz
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  6, Tf = 2437MHz
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  7, Tf = 2442MHz
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  8, Tf = 2447MHz
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  9, Tf = 2452MHz
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 10, Tf = 2457MHz
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 11, Tf = 2462MHz
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 12, Tf = 2467MHz
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 13, Tf = 2472MHz
-	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 14, Tf = 2484MHz
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  1, Tf = 2412MHz */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  2, Tf = 2417MHz */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  3, Tf = 2422MHz */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  4, Tf = 2427MHz */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  5, Tf = 2432MHz */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  6, Tf = 2437MHz */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  7, Tf = 2442MHz */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  8, Tf = 2447MHz */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  9, Tf = 2452MHz */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 10, Tf = 2457MHz */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 11, Tf = 2462MHz */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 12, Tf = 2467MHz */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 13, Tf = 2472MHz */
+	0x7FD78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 14, Tf = 2484MHz */
 
-	// 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196  (Value:15 ~ 22)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 183, Tf = 4915MHz (15)
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 184, Tf = 4920MHz (16)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 185, Tf = 4925MHz (17)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 187, Tf = 4935MHz (18)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 188, Tf = 4940MHz (19)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 189, Tf = 4945MHz (20)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 192, Tf = 4960MHz (21)
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 196, Tf = 4980MHz (22)
+	/* 4.9G => Ch 183, 184, 185, 187, 188, 189, 192, 196  (Value:15 ~ 22) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 183, Tf = 4915MHz (15) */
+	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 184, Tf = 4920MHz (16) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 185, Tf = 4925MHz (17) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 187, Tf = 4935MHz (18) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 188, Tf = 4940MHz (19) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 189, Tf = 4945MHz (20) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 192, Tf = 4960MHz (21) */
+	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 196, Tf = 4980MHz (22) */
 
-	// 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
-	// 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165  (Value 23 ~ 56)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =   7, Tf = 5035MHz (23)
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =   8, Tf = 5040MHz (24)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =   9, Tf = 5045MHz (25)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  11, Tf = 5055MHz (26)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  12, Tf = 5060MHz (27)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  16, Tf = 5080MHz (28)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  34, Tf = 5170MHz (29)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  36, Tf = 5180MHz (30)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  38, Tf = 5190MHz (31)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  40, Tf = 5200MHz (32)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  42, Tf = 5210MHz (33)
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  44, Tf = 5220MHz (34)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  46, Tf = 5230MHz (35)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  48, Tf = 5240MHz (36)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  52, Tf = 5260MHz (37)
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  56, Tf = 5280MHz (38)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  60, Tf = 5300MHz (39)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel =  64, Tf = 5320MHz (40)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 100, Tf = 5500MHz (41)
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 104, Tf = 5520MHz (42)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 108, Tf = 5540MHz (43)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 112, Tf = 5560MHz (44)
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 116, Tf = 5580MHz (45)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 120, Tf = 5600MHz (46)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 124, Tf = 5620MHz (47)
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 128, Tf = 5640MHz (48)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 132, Tf = 5660MHz (49)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 136, Tf = 5680MHz (50)
-	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 140, Tf = 5700MHz (51)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 149, Tf = 5745MHz (52)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 153, Tf = 5765MHz (53)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 157, Tf = 5785MHz (54)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, // channel = 161, Tf = 5805MHz (55)
-	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  // channel = 165, Tf = 5825MHz (56)
+	/* 5G => Ch 7, 8, 9, 11, 12, 16, 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64,
+	 * 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165  (Value 23 ~ 56) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   7, Tf = 5035MHz (23) */
+	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   8, Tf = 5040MHz (24) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =   9, Tf = 5045MHz (25) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  11, Tf = 5055MHz (26) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  12, Tf = 5060MHz (27) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  16, Tf = 5080MHz (28) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  34, Tf = 5170MHz (29) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  36, Tf = 5180MHz (30) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  38, Tf = 5190MHz (31) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  40, Tf = 5200MHz (32) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  42, Tf = 5210MHz (33) */
+	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  44, Tf = 5220MHz (34) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  46, Tf = 5230MHz (35) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  48, Tf = 5240MHz (36) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  52, Tf = 5260MHz (37) */
+	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  56, Tf = 5280MHz (38) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  60, Tf = 5300MHz (39) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel =  64, Tf = 5320MHz (40) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 100, Tf = 5500MHz (41) */
+	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 104, Tf = 5520MHz (42) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 108, Tf = 5540MHz (43) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 112, Tf = 5560MHz (44) */
+	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 116, Tf = 5580MHz (45) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 120, Tf = 5600MHz (46) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 124, Tf = 5620MHz (47) */
+	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 128, Tf = 5640MHz (48) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 132, Tf = 5660MHz (49) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 136, Tf = 5680MHz (50) */
+	0x67D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 140, Tf = 5700MHz (51) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 149, Tf = 5745MHz (52) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 153, Tf = 5765MHz (53) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 157, Tf = 5785MHz (54) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW, /* channel = 161, Tf = 5805MHz (55) */
+	0x77D78400+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW  /* channel = 165, Tf = 5825MHz (56) */
 };
 
 /*
@@ -438,13 +438,13 @@
 	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
 
 	/* Calibration */
-	MACvTimer0MicroSDelay(dwIoBase, 150);//150us
+	MACvTimer0MicroSDelay(dwIoBase, 150);/* 150us */
 	/* TXDCOC:active, RCK:disable */
 	bResult &= IFRFbWriteEmbedded(priv, (0x9ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
-	MACvTimer0MicroSDelay(dwIoBase, 30);//30us
+	MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
 	/* TXDCOC:disable, RCK:active */
 	bResult &= IFRFbWriteEmbedded(priv, (0x3ABA8F00+(BY_AL7230_REG_LEN<<3)+IFREGCTL_REGW));
-	MACvTimer0MicroSDelay(dwIoBase, 30);//30us
+	MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
 	/* TXDCOC:disable, RCK:disable */
 	bResult &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ-1]);
 
@@ -457,7 +457,7 @@
 
 	/* PE1: TX_ON, PE2: RX_ON, PE3: PLLON */
 	/* 3-wire control for power saving mode */
-	VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); //1100 0000
+	VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
 
 	return bResult;
 }
@@ -557,16 +557,16 @@
 
 	for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
 		bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]);
-	MACvTimer0MicroSDelay(dwIoBase, 30); //delay 30 us
+	MACvTimer0MicroSDelay(dwIoBase, 30); /* delay 30 us */
 
 	/* PLL On */
 	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
 
-	MACvTimer0MicroSDelay(dwIoBase, 150);//150us
+	MACvTimer0MicroSDelay(dwIoBase, 150);/* 150us */
 	bResult &= IFRFbWriteEmbedded(priv, (0x00d80f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
-	MACvTimer0MicroSDelay(dwIoBase, 30);//30us
+	MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
 	bResult &= IFRFbWriteEmbedded(priv, (0x00780f00+(BY_AL2230_REG_LEN<<3)+IFREGCTL_REGW));
-	MACvTimer0MicroSDelay(dwIoBase, 30);//30us
+	MACvTimer0MicroSDelay(dwIoBase, 30);/* 30us */
 	bResult &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[CB_AL2230_INIT_SEQ-1]);
 
 	MACvWordRegBitsOn(dwIoBase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3    |
@@ -575,7 +575,7 @@
 							 SOFTPWRCTL_TXPEINV));
 
 	/* 3-wire control for power saving mode */
-	VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); //1100 0000
+	VNSvOutPortB(dwIoBase + MAC_REG_PSPWRSIG, (PSSIG_WPE3 | PSSIG_WPE2)); /* 1100 0000 */
 
 	return bResult;
 }
@@ -661,11 +661,11 @@
 	case RF_AL2230S:
 		bResult = RFbAL2230SelectChannel(priv, byChannel);
 		break;
-		//{{ RobertYu: 20050104
+		/*{{ RobertYu: 20050104 */
 	case RF_AIROHA7230:
 		bResult = s_bAL7230SelectChannel(priv, byChannel);
 		break;
-		//}} RobertYu
+		/*}} RobertYu */
 	case RF_NOTHING:
 		bResult = true;
 		break;
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index 2ea21e2..b5fc3ee 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -33,18 +33,18 @@
 #include "device.h"
 
 /*---------------------  Export Definitions -------------------------*/
-//
-// Baseband RF pair definition in eeprom (Bits 6..0)
-//
+/*
+ * Baseband RF pair definition in eeprom (Bits 6..0)
+*/
 #define RF_RFMD2959             0x01
 #define RF_MAXIMAG              0x02
 #define RF_AIROHA               0x03
 
 #define RF_UW2451               0x05
 #define RF_MAXIMG               0x06
-#define RF_MAXIM2829            0x07 // RobertYu: 20041118
-#define RF_UW2452               0x08 // RobertYu: 20041210
-#define RF_AIROHA7230           0x0a // RobertYu: 20050104
+#define RF_MAXIM2829            0x07 /* RobertYu: 20041118 */
+#define RF_UW2452               0x08 /* RobertYu: 20041210 */
+#define RF_AIROHA7230           0x0a /* RobertYu: 20050104 */
 #define RF_UW2453               0x0b
 
 #define RF_VT3226               0x09
@@ -63,9 +63,9 @@
 #define ZONE_MKK                6
 #define ZONE_ISRAEL             7
 
-//[20050104] CB_MAXIM2829_CHANNEL_5G_HIGH, CB_UW2452_CHANNEL_5G_HIGH: 40==>41
-#define CB_MAXIM2829_CHANNEL_5G_HIGH    41 //Index41: channel = 100, Tf = 5500MHz, set the (A3:A0=0101) D6=1
-#define CB_UW2452_CHANNEL_5G_HIGH       41 //[20041210] Index41: channel = 100, Tf = 5500MHz, change VCO2->VCO3
+/* [20050104] CB_MAXIM2829_CHANNEL_5G_HIGH, CB_UW2452_CHANNEL_5G_HIGH: 40==>41 */
+#define CB_MAXIM2829_CHANNEL_5G_HIGH    41 /* Index41: channel = 100, Tf = 5500MHz, set the (A3:A0=0101) D6=1 */
+#define CB_UW2452_CHANNEL_5G_HIGH       41 /* [20041210] Index41: channel = 100, Tf = 5500MHz, change VCO2->VCO3 */
 
 /*---------------------  Export Classes  ----------------------------*/
 
@@ -93,8 +93,8 @@
 	long    *pldBm
 );
 
-//{{ RobertYu: 20050104
+/* {{ RobertYu: 20050104 */
 bool RFbAL7230SelectChannelPostProcess(struct vnt_private *, u16, u16);
-//}} RobertYu
+/* }} RobertYu */
 
-#endif // __RF_H__
+#endif /* __RF_H__ */
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 534338c..5875d65 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -130,7 +130,7 @@
 static unsigned int
 s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
 		  unsigned char *pbyTxBufferAddr,
-		  unsigned int uDMAIdx, PSTxDesc pHeadTD,
+		  unsigned int uDMAIdx, struct vnt_tx_desc *pHeadTD,
 		  unsigned int uNodeIndex);
 
 static
@@ -387,7 +387,6 @@
 		break;
 	}
 
-	ASSERT(false);
 	return 0;
 }
 
@@ -1028,10 +1027,10 @@
 static unsigned int
 s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
 		  unsigned char *pbyTxBufferAddr,
-		  unsigned int uDMAIdx, PSTxDesc pHeadTD,
+		  unsigned int uDMAIdx, struct vnt_tx_desc *pHeadTD,
 		  unsigned int is_pspoll)
 {
-	PDEVICE_TD_INFO td_info = pHeadTD->pTDInfo;
+	struct vnt_td_info *td_info = pHeadTD->td_info;
 	struct sk_buff *skb = td_info->skb;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -1048,7 +1047,7 @@
 	unsigned int cbReqCount = 0;
 	bool bNeedACK = (bool)(fifo_ctl & FIFOCTL_NEEDACK);
 	bool bRTS = (bool)(fifo_ctl & FIFOCTL_RTS);
-	PSTxDesc       ptdCurr;
+	struct vnt_tx_desc *ptdCurr;
 	unsigned int cbHeaderLength = 0;
 	void *pvRrvTime;
 	struct vnt_mic_hdr *pMICHDR;
@@ -1089,7 +1088,7 @@
 
 
 	/* Set RrvTime/RTS/CTS Buffer */
-	wTxBufSize = sizeof(STxBufHead);
+	wTxBufSize = sizeof(struct vnt_tx_fifo_head);
 	if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {/* 802.11g packet */
 
 		if (byFBOption == AUTO_FB_NONE) {
@@ -1193,17 +1192,15 @@
 	hdr->duration_id = uDuration;
 
 	cbReqCount = cbHeaderLength + uPadding + skb->len;
-	pbyBuffer = (unsigned char *)pHeadTD->pTDInfo->buf;
+	pbyBuffer = (unsigned char *)pHeadTD->td_info->buf;
 	uLength = cbHeaderLength + uPadding;
 
 	/* Copy the Packet into a tx Buffer */
 	memcpy((pbyBuffer + uLength), skb->data, skb->len);
 
-	ptdCurr = (PSTxDesc)pHeadTD;
+	ptdCurr = pHeadTD;
 
-	ptdCurr->pTDInfo->dwReqCount = cbReqCount;
-	ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
-	ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
+	ptdCurr->td_info->req_count = (u16)cbReqCount;
 
 	return cbHeaderLength;
 }
@@ -1276,9 +1273,9 @@
 }
 
 int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
-			     PSTxDesc head_td, struct sk_buff *skb)
+			     struct vnt_tx_desc *head_td, struct sk_buff *skb)
 {
-	PDEVICE_TD_INFO td_info = head_td->pTDInfo;
+	struct vnt_td_info *td_info = head_td->td_info;
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_tx_rate *tx_rate = &info->control.rates[0];
 	struct ieee80211_rate *rate;
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index b9bd163..1e30ecb 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -192,9 +192,9 @@
 } __packed;
 
 int vnt_generate_fifo_header(struct vnt_private *, u32,
-			     PSTxDesc head_td, struct sk_buff *);
+			     struct vnt_tx_desc *head_td, struct sk_buff *);
 int vnt_beacon_make(struct vnt_private *, struct ieee80211_vif *);
 int vnt_beacon_enable(struct vnt_private *, struct ieee80211_vif *,
 		      struct ieee80211_bss_conf *);
 
-#endif // __RXTX_H__
+#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6655/upc.h b/drivers/staging/vt6655/upc.h
index cc63dc8..85fe046 100644
--- a/drivers/staging/vt6655/upc.h
+++ b/drivers/staging/vt6655/upc.h
@@ -37,35 +37,23 @@
 /* For memory mapped IO */
 
 
-#define VNSvInPortB(dwIOAddress, pbyData)				\
-do {									\
-	*(pbyData) = ioread8(dwIOAddress);				\
-} while (0)
+#define VNSvInPortB(dwIOAddress, pbyData) \
+	(*(pbyData) = ioread8(dwIOAddress))
 
-#define VNSvInPortW(dwIOAddress, pwData)				\
-do {									\
-	*(pwData) = ioread16(dwIOAddress);				\
-} while (0)
+#define VNSvInPortW(dwIOAddress, pwData) \
+	(*(pwData) = ioread16(dwIOAddress))
 
-#define VNSvInPortD(dwIOAddress, pdwData)				\
-do {									\
-	*(pdwData) = ioread32(dwIOAddress);				\
-} while (0)
+#define VNSvInPortD(dwIOAddress, pdwData) \
+	(*(pdwData) = ioread32(dwIOAddress))
 
-#define VNSvOutPortB(dwIOAddress, byData)				\
-do {									\
-	iowrite8((u8)byData, dwIOAddress);				\
-} while (0)
+#define VNSvOutPortB(dwIOAddress, byData) \
+	iowrite8((u8)(byData), dwIOAddress)
 
-#define VNSvOutPortW(dwIOAddress, wData)				\
-do {									\
-	iowrite16((u16)wData, dwIOAddress);				\
-} while (0)
+#define VNSvOutPortW(dwIOAddress, wData) \
+	iowrite16((u16)(wData), dwIOAddress)
 
-#define VNSvOutPortD(dwIOAddress, dwData)				\
-do {									\
-	iowrite32((u32)dwData, dwIOAddress);				\
-} while (0)
+#define VNSvOutPortD(dwIOAddress, dwData) \
+	iowrite32((u32)(dwData), dwIOAddress)
 
 #define PCAvDelayByIO(uDelayUnit)				\
 do {								\
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 8116791..da075f4 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -45,8 +45,11 @@
 #include "usbpipe.h"
 
 static const u16 vnt_time_stampoff[2][MAX_RATE] = {
-	{384, 288, 226, 209, 54, 43, 37, 31, 28, 25, 24, 23},/* Long Preamble */
-	{384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23},/* Short Preamble */
+	/* Long Preamble */
+	{384, 288, 226, 209, 54, 43, 37, 31, 28, 25, 24, 23},
+
+	/* Short Preamble */
+	{384, 192, 130, 113, 54, 43, 37, 31, 28, 25, 24, 23},
 };
 
 static const u16 vnt_fb_opt0[2][5] = {
diff --git a/drivers/staging/wilc1000/Kconfig b/drivers/staging/wilc1000/Kconfig
index 062d9c5..51bbf46 100644
--- a/drivers/staging/wilc1000/Kconfig
+++ b/drivers/staging/wilc1000/Kconfig
@@ -37,17 +37,26 @@
 	bool "SDIO support"
 	depends on MMC
 	---help---
-	  This module adds support for the SDIO interface
-	  of adapters using WILC chipset. Select this if
-	  your platform is using the SDIO bus.
+	  This module adds support for the SDIO interface of adapters using
+	  WILC1000 chipset. The Atmel WILC1000 SDIO is a full speed interface.
+	  It meets SDIO card specification version 2.0. The interface supports
+	  the 1-bit/4-bit SD transfer mode at the clock range of 0-50 MHz.
+	  The host can use this interface to read and write from any register
+	  within the chip as well as configure the WILC1000 for data DMA.
+	  To use this interface, pin9 (SDIO_SPI_CFG) must be grounded. Select
+	  this if your platform is using the SDIO bus.
 
 	config WILC1000_SPI
 	depends on SPI
 	bool "SPI support"
 	---help---
-	  This module adds support for the SPI interface
-	  of adapters using WILC chipset. Select this if
-	  your platform is using the SPI bus.
+	  This module adds support for the SPI interface of adapters using
+	  WILC1000 chipset. The Atmel WILC1000 has a Serial Peripheral
+	  Interface (SPI) that operates as a SPI slave. This SPI interface can
+	  be used for control and for serial I/O of 802.11 data. The SPI is a
+	  full-duplex slave synchronous serial interface that is available
+	  immediately following reset when pin 9 (SDIO_SPI_CFG) is tied to
+	  VDDIO. Select this if your platform is using the SPI bus.
 endchoice
 
 config WILC1000_HW_OOB_INTR
@@ -55,5 +64,8 @@
 	depends on WILC1000 && WILC1000_SDIO
 	default n
 	---help---
-	  If your platform don't recognize SDIO IRQ, connect chipset external IRQ pin
-	  and check this option. Or, Use this to get all interrupts including SDIO interrupts.
+	  This option enables out-of-band interrupt support for the WILC1000
+	  chipset. This OOB interrupt is intended to provide a faster interrupt
+	  mechanism for SDIO host controllers that don't support SDIO interrupt.
+	  Select this option If the SDIO host controller in your platform
+	  doesn't support SDIO time devision interrupt.
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index a78c4d5..6be8a92 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -25,10 +25,10 @@
 ccflags-$(CONFIG_WILC1000_DYNAMICALLY_ALLOCATE_MEMROY) += -DWILC_NORMAL_ALLOC
 
 
-wilc1000-objs := wilc_wfi_netdevice.o wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
-			wilc_memory.o wilc_msgqueue.o wilc_sleep.o wilc_strutils.o \
-			wilc_timer.o coreconfigurator.o host_interface.o \
-			fifo_buffer.o wilc_sdio.o wilc_spi.o wilc_wlan_cfg.o wilc_debugfs.o
+wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
+			wilc_memory.o wilc_msgqueue.o \
+			coreconfigurator.o host_interface.o \
+			wilc_sdio.o wilc_spi.o wilc_wlan_cfg.o wilc_debugfs.o
 
 wilc1000-$(CONFIG_WILC1000_SDIO) += linux_wlan_sdio.o
 wilc1000-$(CONFIG_WILC1000_SPI) += linux_wlan_spi.o
diff --git a/drivers/staging/wilc1000/coreconfigsimulator.h b/drivers/staging/wilc1000/coreconfigsimulator.h
deleted file mode 100644
index 5e01f8e..0000000
--- a/drivers/staging/wilc1000/coreconfigsimulator.h
+++ /dev/null
@@ -1,17 +0,0 @@
-
-/*!
- *  @file	coreconfigsimulator.h
- *  @brief
- *  @author
- *  @sa		coreconfigsimulator.c
- *  @date	1 Mar 2012
- *  @version	1.0
- */
-
-#ifndef CORECONFIGSIMULATOR_H
-#define CORECONFIGSIMULATOR_H
-
-extern s32 CoreConfigSimulatorInit(void);
-extern s32 CoreConfigSimulatorDeInit(void);
-
-#endif
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c
index ed6ac45..16a0abc 100644
--- a/drivers/staging/wilc1000/coreconfigurator.c
+++ b/drivers/staging/wilc1000/coreconfigurator.c
@@ -167,7 +167,6 @@
 static struct semaphore SemHandleSendPkt;
 static struct semaphore SemHandlePktResp;
 
-static s8 *gps8ConfigPacket;
 
 static tstrConfigPktInfo gstrConfigPktInfo;
 
@@ -544,21 +543,21 @@
 /* header and updates the MAC Address in the allocated 'addr' variable.      */
 INLINE void get_address1(u8 *pu8msa, u8 *addr)
 {
-	WILC_memcpy(addr, pu8msa + 4, 6);
+	memcpy(addr, pu8msa + 4, 6);
 }
 
 /* This function extracts the MAC Address in 'address2' field of the MAC     */
 /* header and updates the MAC Address in the allocated 'addr' variable.      */
 INLINE void get_address2(u8 *pu8msa, u8 *addr)
 {
-	WILC_memcpy(addr, pu8msa + 10, 6);
+	memcpy(addr, pu8msa + 10, 6);
 }
 
 /* This function extracts the MAC Address in 'address3' field of the MAC     */
 /* header and updates the MAC Address in the allocated 'addr' variable.      */
 INLINE void get_address3(u8 *pu8msa, u8 *addr)
 {
-	WILC_memcpy(addr, pu8msa + 16, 6);
+	memcpy(addr, pu8msa + 16, 6);
 }
 
 /* This function extracts the BSSID from the incoming WLAN packet based on   */
@@ -605,7 +604,7 @@
 {
 	u16 cap_info = 0;
 	u16 index    = MAC_HDR_LEN;
-	tenuFrmSubtype st = BEACON;
+	tenuFrmSubtype st;
 
 	st = get_sub_type(data);
 
@@ -674,17 +673,8 @@
 	sema_init(&SemHandleSendPkt, 1);
 	sema_init(&SemHandlePktResp, 0);
 
-	gps8ConfigPacket = (s8 *)WILC_MALLOC(MAX_PACKET_BUFF_SIZE);
-	if (gps8ConfigPacket == NULL) {
-		PRINT_ER("failed in gps8ConfigPacket allocation\n");
-		s32Error = WILC_NO_MEM;
-		goto _fail_;
-	}
 
-	WILC_memset((void *)gps8ConfigPacket, 0, MAX_PACKET_BUFF_SIZE);
-
-	WILC_memset((void *)(&gstrConfigPktInfo), 0, sizeof(tstrConfigPktInfo));
-_fail_:
+	memset((void *)(&gstrConfigPktInfo), 0, sizeof(tstrConfigPktInfo));
 	return s32Error;
 }
 
@@ -706,11 +696,10 @@
 
 	/* Search for the TIM Element Field and return if the element is found */
 	while (u16index < (u16RxLen - FCS_LEN)) {
-		if (pu8msa[u16index] == ITIM) {
+		if (pu8msa[u16index] == ITIM)
 			return &pu8msa[u16index];
-		} else {
+		else
 			u16index += (IE_HDR_LEN + pu8msa[u16index + 1]);
-		}
 	}
 
 	return 0;
@@ -811,8 +800,11 @@
 		u32 u32Tsf_Lo;
 		u32 u32Tsf_Hi;
 
-		pstrNetworkInfo = (tstrNetworkInfo *)WILC_MALLOC(sizeof(tstrNetworkInfo));
-		WILC_memset((void *)(pstrNetworkInfo), 0, sizeof(tstrNetworkInfo));
+		pstrNetworkInfo = kmalloc(sizeof(tstrNetworkInfo), GFP_KERNEL);
+		if (!pstrNetworkInfo)
+			return -ENOMEM;
+
+		memset((void *)(pstrNetworkInfo), 0, sizeof(tstrNetworkInfo));
 
 		pstrNetworkInfo->s8rssi = pu8WidVal[0];
 
@@ -855,17 +847,19 @@
 
 		/* Get DTIM Period */
 		pu8TimElm = get_tim_elm(pu8msa, (u16RxLen + FCS_LEN), u8index);
-		if (pu8TimElm != 0) {
+		if (pu8TimElm != 0)
 			pstrNetworkInfo->u8DtimPeriod = pu8TimElm[3];
-		}
 		pu8IEs = &pu8msa[MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN + CAP_INFO_LEN];
 		u16IEsLen = u16RxLen - (MAC_HDR_LEN + TIME_STAMP_LEN + BEACON_INTERVAL_LEN + CAP_INFO_LEN);
 
 		if (u16IEsLen > 0) {
-			pstrNetworkInfo->pu8IEs = (u8 *)WILC_MALLOC(u16IEsLen);
-			WILC_memset((void *)(pstrNetworkInfo->pu8IEs), 0, u16IEsLen);
+			pstrNetworkInfo->pu8IEs = kmalloc(u16IEsLen, GFP_KERNEL);
+			if (!pstrNetworkInfo->pu8IEs)
+				return -ENOMEM;
 
-			WILC_memcpy(pstrNetworkInfo->pu8IEs, pu8IEs, u16IEsLen);
+			memset((void *)(pstrNetworkInfo->pu8IEs), 0, u16IEsLen);
+
+			memcpy(pstrNetworkInfo->pu8IEs, pu8IEs, u16IEsLen);
 		}
 		pstrNetworkInfo->u16IEsLen = u16IEsLen;
 
@@ -893,13 +887,13 @@
 
 	if (pstrNetworkInfo != NULL) {
 		if (pstrNetworkInfo->pu8IEs != NULL) {
-			WILC_FREE(pstrNetworkInfo->pu8IEs);
+			kfree(pstrNetworkInfo->pu8IEs);
 			pstrNetworkInfo->pu8IEs = NULL;
 		} else {
 			s32Error = WILC_FAIL;
 		}
 
-		WILC_FREE(pstrNetworkInfo);
+		kfree(pstrNetworkInfo);
 		pstrNetworkInfo = NULL;
 
 	} else {
@@ -929,8 +923,11 @@
 	u8 *pu8IEs = 0;
 	u16 u16IEsLen = 0;
 
-	pstrConnectRespInfo = (tstrConnectRespInfo *)WILC_MALLOC(sizeof(tstrConnectRespInfo));
-	WILC_memset((void *)(pstrConnectRespInfo), 0, sizeof(tstrConnectRespInfo));
+	pstrConnectRespInfo = kmalloc(sizeof(tstrConnectRespInfo), GFP_KERNEL);
+	if (!pstrConnectRespInfo)
+		return -ENOMEM;
+
+	memset((void *)(pstrConnectRespInfo), 0, sizeof(tstrConnectRespInfo));
 
 	/* u16AssocRespLen = pu8Buffer[0]; */
 	u16AssocRespLen = (u16)u32BufferLen;
@@ -949,10 +946,13 @@
 		pu8IEs = &pu8Buffer[CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN];
 		u16IEsLen = u16AssocRespLen - (CAP_INFO_LEN + STATUS_CODE_LEN + AID_LEN);
 
-		pstrConnectRespInfo->pu8RespIEs = (u8 *)WILC_MALLOC(u16IEsLen);
-		WILC_memset((void *)(pstrConnectRespInfo->pu8RespIEs), 0, u16IEsLen);
+		pstrConnectRespInfo->pu8RespIEs = kmalloc(u16IEsLen, GFP_KERNEL);
+		if (!pstrConnectRespInfo->pu8RespIEs)
+			return -ENOMEM;
 
-		WILC_memcpy(pstrConnectRespInfo->pu8RespIEs, pu8IEs, u16IEsLen);
+		memset((void *)(pstrConnectRespInfo->pu8RespIEs), 0, u16IEsLen);
+
+		memcpy(pstrConnectRespInfo->pu8RespIEs, pu8IEs, u16IEsLen);
 		pstrConnectRespInfo->u16RespIEsLen = u16IEsLen;
 	}
 
@@ -978,13 +978,13 @@
 
 	if (pstrConnectRespInfo != NULL) {
 		if (pstrConnectRespInfo->pu8RespIEs != NULL) {
-			WILC_FREE(pstrConnectRespInfo->pu8RespIEs);
+			kfree(pstrConnectRespInfo->pu8RespIEs);
 			pstrConnectRespInfo->pu8RespIEs = NULL;
 		} else {
 			s32Error = WILC_FAIL;
 		}
 
-		WILC_FREE(pstrConnectRespInfo);
+		kfree(pstrConnectRespInfo);
 		pstrConnectRespInfo = NULL;
 
 	} else {
@@ -1018,13 +1018,12 @@
 		}
 	}
 
-	pstrSurveyResults = (wid_site_survey_reslts_s *)WILC_MALLOC(u32SurveyResultsCount * sizeof(wid_site_survey_reslts_s));
-	if (pstrSurveyResults == NULL) {
-		u32SurveyResultsCount = 0;
-		WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-	}
+	pstrSurveyResults = kmalloc_array(u32SurveyResultsCount,
+				sizeof(wid_site_survey_reslts_s), GFP_KERNEL);
+	if (!pstrSurveyResults)
+		return -ENOMEM;
 
-	WILC_memset((void *)(pstrSurveyResults), 0, u32SurveyResultsCount * sizeof(wid_site_survey_reslts_s));
+	memset((void *)(pstrSurveyResults), 0, u32SurveyResultsCount * sizeof(wid_site_survey_reslts_s));
 
 	u32SurveyResultsCount = 0;
 
@@ -1039,7 +1038,7 @@
 		pu8BufferPtr += 2;
 
 		for (j = 0; j < u32SurveyBytesLength; j += SURVEY_RESULT_LENGTH) {
-			WILC_memcpy(&pstrSurveyResults[u32SurveyResultsCount], pu8BufferPtr, SURVEY_RESULT_LENGTH);
+			memcpy(&pstrSurveyResults[u32SurveyResultsCount], pu8BufferPtr, SURVEY_RESULT_LENGTH);
 			pu8BufferPtr += SURVEY_RESULT_LENGTH;
 			u32SurveyResultsCount++;
 		}
@@ -1058,7 +1057,7 @@
 	s32 s32Error = WILC_SUCCESS;
 
 	if (pstrSurveyResults != NULL) {
-		WILC_FREE(pstrSurveyResults);
+		kfree(pstrSurveyResults);
 	}
 
 	return s32Error;
@@ -1334,7 +1333,6 @@
 
 	if (g_oper_mode == SET_CFG) {
 		/* Message Length */
-		/* u16MsgLen = WILC_strlen(pu8val); */
 		u16MsgLen = (u16)s32ValueSize;
 
 		/* Length */
@@ -1441,8 +1439,6 @@
 void ProcessBinWid(char *pcPacket, s32 *ps32PktLen,
 		   tstrWID *pstrWID, u8 *pu8val, s32 s32ValueSize)
 {
-	/* WILC_ERROR("processing Binary WIDs is not supported\n"); */
-
 	u16 u16MsgLen = 0;
 	u16 idx    = 0;
 	s32 s32PktLen = *ps32PktLen;
@@ -1528,11 +1524,10 @@
 	u8 cfg_str[256] = {0};
 	tenuWIDtype enuWIDtype = WID_UNDEF;
 
-	if (process_wid_num) {
+	if (process_wid_num)
 		enuWIDtype = get_wid_type(g_wid_num);
-	} else {
+	else
 		enuWIDtype = gastrWIDs[cnt].enuWIDtype;
-	}
 
 
 	switch (enuWIDtype) {
@@ -1566,18 +1561,10 @@
 	}
 
 	case WID_STR:
-		WILC_memcpy(cfg_str, resp + idx, cfg_len);
+		memcpy(cfg_str, resp + idx, cfg_len);
 		/* cfg_str[cfg_len] = '\0'; //mostafa: no need currently for NULL termination */
-		if (process_wid_num) {
-			/*fprintf(out_file,"0x%4.4x = %s\n",g_wid_num,
-			 *                              cfg_str);*/
-		} else {
-			/*fprintf(out_file,"%s = %s\n",gastrWIDs[cnt].cfg_switch,
-			 *                           cfg_str);*/
-		}
-
 		if (pstrWIDresult->s32ValueSize >= cfg_len) {
-			WILC_memcpy(pstrWIDresult->ps8WidVal, cfg_str, cfg_len); /* mostafa: no need currently for the extra NULL byte */
+			memcpy(pstrWIDresult->ps8WidVal, cfg_str, cfg_len); /* mostafa: no need currently for the extra NULL byte */
 			pstrWIDresult->s32ValueSize = cfg_len;
 		} else {
 			PRINT_ER("allocated WID buffer length is smaller than the received WID Length\n");
@@ -1589,15 +1576,8 @@
 	case WID_ADR:
 		create_mac_addr(cfg_str, resp + idx);
 
-		WILC_strncpy(pstrWIDresult->ps8WidVal, cfg_str, WILC_strlen(cfg_str));
-		pstrWIDresult->ps8WidVal[WILC_strlen(cfg_str)] = '\0';
-		if (process_wid_num) {
-			/*fprintf(out_file,"0x%4.4x = %s\n",g_wid_num,
-			 *                              cfg_str);*/
-		} else {
-			/*fprintf(out_file,"%s = %s\n",gastrWIDs[cnt].cfg_switch,
-			 *                           cfg_str);*/
-		}
+		strncpy(pstrWIDresult->ps8WidVal, cfg_str, strlen(cfg_str));
+		pstrWIDresult->ps8WidVal[strlen(cfg_str)] = '\0';
 		break;
 
 	case WID_IP:
@@ -1606,18 +1586,11 @@
 				MAKE_WORD16(resp[idx + 2], resp[idx + 3])
 				);
 		conv_int_to_ip(cfg_str, cfg_int);
-		if (process_wid_num) {
-			/*fprintf(out_file,"0x%4.4x = %s\n",g_wid_num,
-			 *                              cfg_str);*/
-		} else {
-			/*fprintf(out_file,"%s = %s\n",gastrWIDs[cnt].cfg_switch,
-			 *                           cfg_str);*/
-		}
 		break;
 
 	case WID_BIN_DATA:
 		if (pstrWIDresult->s32ValueSize >= cfg_len) {
-			WILC_memcpy(pstrWIDresult->ps8WidVal, resp + idx, cfg_len);
+			memcpy(pstrWIDresult->ps8WidVal, resp + idx, cfg_len);
 			pstrWIDresult->s32ValueSize = cfg_len;
 		} else {
 			PRINT_ER("Allocated WID buffer length is smaller than the received WID Length Err(%d)\n", retval);
@@ -1739,7 +1712,6 @@
 s32 ParseWriteResponse(u8 *pu8RespBuffer)
 {
 	s32 s32Error = WILC_FAIL;
-	u16 u16RespLen   = 0;
 	u16 u16WIDtype = (u16)WID_NIL;
 
 	/* Check whether the received frame is a valid response */
@@ -1748,9 +1720,6 @@
 		return WILC_FAIL;
 	}
 
-	/* Extract Response Length */
-	u16RespLen = MAKE_WORD16(pu8RespBuffer[2], pu8RespBuffer[3]);
-
 	u16WIDtype = MAKE_WORD16(pu8RespBuffer[4], pu8RespBuffer[5]);
 
 	/* Check for WID_STATUS ID and then check the length and status value */
@@ -1898,104 +1867,21 @@
 		*ps32BytesRead = gstrConfigPktInfo.s32BytesRead;
 	}
 
-	WILC_memset((void *)(&gstrConfigPktInfo), 0, sizeof(tstrConfigPktInfo));
+	memset((void *)(&gstrConfigPktInfo), 0, sizeof(tstrConfigPktInfo));
 
 	return s32Error;
 }
 
-/**
- *  @brief              sends certain Configuration Packet based on the input WIDs pstrWIDs
- *                      and retrieves the packet response pu8RxResp
- *  @details
- *  @param[in]  pstrWIDs WIDs to be sent in the configuration packet
- *  @param[in]  u32WIDsCount number of WIDs to be sent in the configuration packet
- *  @param[out]         pu8RxResp The received Packet Response
- *  @param[out]         ps32RxRespLen Length of the received Packet Response
- *  @return     Error code indicating success/failure
- *  @note
- *  @author	mabubakr
- *  @date		1 Mar 2012
- *  @version	1.0
- */
-#ifdef SIMULATION
-s32 SendConfigPkt(u8 u8Mode, tstrWID *pstrWIDs,
-			  u32 u32WIDsCount, bool bRespRequired, u32 drvHandler)
-{
-	s32 s32Error = WILC_SUCCESS;
-	s32 err = WILC_SUCCESS;
-	s32 s32ConfigPacketLen = 0;
-	s32 s32RcvdRespLen = 0;
-
-	down(&SemHandleSendPkt);
-
-	/*set the packet mode*/
-	g_oper_mode = u8Mode;
-
-	WILC_memset((void *)gps8ConfigPacket, 0, MAX_PACKET_BUFF_SIZE);
-
-	if (CreateConfigPacket(gps8ConfigPacket, &s32ConfigPacketLen, pstrWIDs, u32WIDsCount) != WILC_SUCCESS) {
-		s32Error = WILC_FAIL;
-		goto End_ConfigPkt;
-	}
-	/*bug 3878*/
-	gstrConfigPktInfo.pcRespBuffer = gps8ConfigPacket;
-	gstrConfigPktInfo.s32MaxRespBuffLen = MAX_PACKET_BUFF_SIZE;
-	PRINT_INFO(CORECONFIG_DBG, "GLOBAL =bRespRequired =%d\n", bRespRequired);
-	gstrConfigPktInfo.bRespRequired = bRespRequired;
-
-	s32Error = SendRawPacket(gps8ConfigPacket, s32ConfigPacketLen);
-	if (s32Error != WILC_SUCCESS) {
-		goto End_ConfigPkt;
-	}
-
-	WILC_memset((void *)gps8ConfigPacket, 0, MAX_PACKET_BUFF_SIZE);
-
-	ConfigWaitResponse(gps8ConfigPacket, MAX_PACKET_BUFF_SIZE, &s32RcvdRespLen, bRespRequired);
-
-
-	if (bRespRequired)	{
-		/* If the operating Mode is GET, then we expect a response frame from */
-		/* the driver. Hence start listening to the port for response         */
-		if (g_oper_mode == GET_CFG) {
-			#if 1
-			err = ParseResponse(gps8ConfigPacket, pstrWIDs);
-			if (err != 0) {
-				s32Error = WILC_FAIL;
-				goto End_ConfigPkt;
-			} else {
-				s32Error = WILC_SUCCESS;
-			}
-			#endif
-		} else {
-			err = ParseWriteResponse(gps8ConfigPacket);
-			if (err != WRITE_RESP_SUCCESS) {
-				s32Error = WILC_FAIL;
-				goto End_ConfigPkt;
-			} else {
-				s32Error = WILC_SUCCESS;
-			}
-		}
-
-
-	}
-
-
-End_ConfigPkt:
-	up(&SemHandleSendPkt);
-
-	return s32Error;
-}
-#endif
 s32 ConfigProvideResponse(char *pcRespBuffer, s32 s32RespLen)
 {
 	s32 s32Error = WILC_SUCCESS;
 
 	if (gstrConfigPktInfo.bRespRequired) {
 		if (s32RespLen <= gstrConfigPktInfo.s32MaxRespBuffLen) {
-			WILC_memcpy(gstrConfigPktInfo.pcRespBuffer, pcRespBuffer, s32RespLen);
+			memcpy(gstrConfigPktInfo.pcRespBuffer, pcRespBuffer, s32RespLen);
 			gstrConfigPktInfo.s32BytesRead = s32RespLen;
 		} else {
-			WILC_memcpy(gstrConfigPktInfo.pcRespBuffer, pcRespBuffer, gstrConfigPktInfo.s32MaxRespBuffLen);
+			memcpy(gstrConfigPktInfo.pcRespBuffer, pcRespBuffer, gstrConfigPktInfo.s32MaxRespBuffLen);
 			gstrConfigPktInfo.s32BytesRead = gstrConfigPktInfo.s32MaxRespBuffLen;
 			PRINT_ER("BusProvideResponse() Response greater than the prepared Buffer Size\n");
 		}
@@ -2069,17 +1955,10 @@
 
 	PRINT_D(CORECONFIG_DBG, "CoreConfiguratorDeInit()\n");
 
-	if (gps8ConfigPacket != NULL) {
-
-		WILC_FREE(gps8ConfigPacket);
-		gps8ConfigPacket = NULL;
-	}
 
 	return s32Error;
 }
 
-
-#ifndef SIMULATION
 /*Using the global handle of the driver*/
 extern wilc_wlan_oup_t *gpstrWlanOps;
 /**
@@ -2129,7 +2008,6 @@
 		/**
 		 *      get the value
 		 **/
-		/* WILC_Sleep(1000); */
 		counter = 0;
 		for (counter = 0; counter < u32WIDsCount; counter++) {
 			pstrWIDs[counter].s32ValueSize = gpstrWlanOps->wlan_cfg_get_value(
@@ -2153,4 +2031,3 @@
 
 	return ret;
 }
-#endif
diff --git a/drivers/staging/wilc1000/coreconfigurator.h b/drivers/staging/wilc1000/coreconfigurator.h
index 9059c8d..3af1935 100644
--- a/drivers/staging/wilc1000/coreconfigurator.h
+++ b/drivers/staging/wilc1000/coreconfigurator.h
@@ -8,7 +8,6 @@
  *  @version	1.0
  */
 
-
 #ifndef CORECONFIGURATOR_H
 #define CORECONFIGURATOR_H
 
@@ -42,7 +41,6 @@
 #define AID_LEN                 2
 #define IE_HDR_LEN              2
 
-
 /* Operating Mode: SET */
 #define SET_CFG              0
 /* Operating Mode: GET */
@@ -59,15 +57,12 @@
 #define MAC_CONNECTED                1
 #define MAC_DISCONNECTED             0
 
-
-
 /*****************************************************************************/
 /* Function Macros                                                           */
 /*****************************************************************************/
 #define MAKE_WORD16(lsb, msb) ((((u16)(msb) << 8) & 0xFF00) | (lsb))
 #define MAKE_WORD32(lsw, msw) ((((u32)(msw) << 16) & 0xFFFF0000) | (lsw))
 
-
 /*****************************************************************************/
 /* Type Definitions                                                                                                                       */
 /*****************************************************************************/
@@ -140,7 +135,6 @@
 	u16 u16RespIEsLen;
 } tstrConnectRespInfo;
 
-
 typedef struct {
 	u8 au8bssid[6];
 	u8 *pu8ReqIEs;
@@ -150,8 +144,6 @@
 	u16 u16ConnectStatus;
 } tstrConnectInfo;
 
-
-
 typedef struct {
 	u16 u16reason;
 	u8 *ie;
@@ -171,26 +163,27 @@
 } wid_site_survey_reslts_s;
 #endif
 
-extern s32 CoreConfiguratorInit(void);
-extern s32 CoreConfiguratorDeInit(void);
+s32 CoreConfiguratorInit(void);
+s32 CoreConfiguratorDeInit(void);
 
-extern s32 SendConfigPkt(u8 u8Mode, tstrWID *pstrWIDs,
-				 u32 u32WIDsCount, bool bRespRequired, u32 drvHandler);
-extern s32 ParseNetworkInfo(u8 *pu8MsgBuffer, tstrNetworkInfo **ppstrNetworkInfo);
-extern s32 DeallocateNetworkInfo(tstrNetworkInfo *pstrNetworkInfo);
+s32 SendConfigPkt(u8 u8Mode, tstrWID *pstrWIDs,
+		  u32 u32WIDsCount, bool bRespRequired, u32 drvHandler);
+s32 ParseNetworkInfo(u8 *pu8MsgBuffer, tstrNetworkInfo **ppstrNetworkInfo);
+s32 DeallocateNetworkInfo(tstrNetworkInfo *pstrNetworkInfo);
 
-extern s32 ParseAssocRespInfo(u8 *pu8Buffer, u32 u32BufferLen,
-				      tstrConnectRespInfo **ppstrConnectRespInfo);
-extern s32 DeallocateAssocRespInfo(tstrConnectRespInfo *pstrConnectRespInfo);
+s32 ParseAssocRespInfo(u8 *pu8Buffer, u32 u32BufferLen,
+		       tstrConnectRespInfo **ppstrConnectRespInfo);
+s32 DeallocateAssocRespInfo(tstrConnectRespInfo *pstrConnectRespInfo);
 
 #ifndef CONNECT_DIRECT
-extern s32 ParseSurveyResults(u8 ppu8RcvdSiteSurveyResults[][MAX_SURVEY_RESULT_FRAG_SIZE],
-				      wid_site_survey_reslts_s **ppstrSurveyResults, u32 *pu32SurveyResultsCount);
-extern s32 DeallocateSurveyResults(wid_site_survey_reslts_s *pstrSurveyResults);
+s32 ParseSurveyResults(u8 ppu8RcvdSiteSurveyResults[][MAX_SURVEY_RESULT_FRAG_SIZE],
+		       wid_site_survey_reslts_s **ppstrSurveyResults,
+		       u32 *pu32SurveyResultsCount);
+s32 DeallocateSurveyResults(wid_site_survey_reslts_s *pstrSurveyResults);
 #endif
 
-extern s32 SendRawPacket(s8 *pspacket, s32 s32PacketLen);
-extern void NetworkInfoReceived(u8 *pu8Buffer, u32 u32Length);
+s32 SendRawPacket(s8 *pspacket, s32 s32PacketLen);
+void NetworkInfoReceived(u8 *pu8Buffer, u32 u32Length);
 void GnrlAsyncInfoReceived(u8 *pu8Buffer, u32 u32Length);
 void host_int_ScanCompleteReceived(u8 *pu8Buffer, u32 u32Length);
 
diff --git a/drivers/staging/wilc1000/fifo_buffer.c b/drivers/staging/wilc1000/fifo_buffer.c
deleted file mode 100644
index b6c07cf..0000000
--- a/drivers/staging/wilc1000/fifo_buffer.c
+++ /dev/null
@@ -1,133 +0,0 @@
-
-
-#include "fifo_buffer.h"
-
-
-
-u32 FIFO_InitBuffer(tHANDLE *hBuffer, u32 u32BufferLength)
-{
-	u32 u32Error = 0;
-	tstrFifoHandler *pstrFifoHandler = WILC_MALLOC (sizeof (tstrFifoHandler));
-	if (pstrFifoHandler) {
-		WILC_memset (pstrFifoHandler, 0, sizeof (tstrFifoHandler));
-		pstrFifoHandler->pu8Buffer = WILC_MALLOC (u32BufferLength);
-		if (pstrFifoHandler->pu8Buffer)	{
-			pstrFifoHandler->u32BufferLength = u32BufferLength;
-			WILC_memset (pstrFifoHandler->pu8Buffer, 0, u32BufferLength);
-			/* create semaphore */
-			sema_init(&pstrFifoHandler->SemBuffer, 1);
-			*hBuffer = pstrFifoHandler;
-		} else {
-			*hBuffer = NULL;
-			u32Error = 1;
-		}
-	} else {
-		u32Error = 1;
-	}
-	return u32Error;
-}
-u32 FIFO_DeInit(tHANDLE hFifo)
-{
-	u32 u32Error = 0;
-	tstrFifoHandler *pstrFifoHandler = (tstrFifoHandler *) hFifo;
-	if (pstrFifoHandler) {
-		if (pstrFifoHandler->pu8Buffer)	{
-			WILC_FREE (pstrFifoHandler->pu8Buffer);
-		} else {
-			u32Error = 1;
-		}
-
-		WILC_FREE (pstrFifoHandler);
-	} else {
-		u32Error = 1;
-	}
-	return u32Error;
-}
-
-u32 FIFO_ReadBytes(tHANDLE hFifo, u8 *pu8Buffer, u32 u32BytesToRead, u32 *pu32BytesRead)
-{
-	u32 u32Error = 0;
-	tstrFifoHandler *pstrFifoHandler = (tstrFifoHandler *) hFifo;
-	if (pstrFifoHandler && pu32BytesRead) {
-		if (pstrFifoHandler->u32TotalBytes) {
-			down(&pstrFifoHandler->SemBuffer);
-
-			if (u32BytesToRead > pstrFifoHandler->u32TotalBytes) {
-				*pu32BytesRead = pstrFifoHandler->u32TotalBytes;
-			} else {
-				*pu32BytesRead = u32BytesToRead;
-			}
-			if ((pstrFifoHandler->u32ReadOffset + u32BytesToRead) <= pstrFifoHandler->u32BufferLength) {
-				WILC_memcpy(pu8Buffer, pstrFifoHandler->pu8Buffer + pstrFifoHandler->u32ReadOffset,
-					    *pu32BytesRead);
-				/* update read offset and total bytes */
-				pstrFifoHandler->u32ReadOffset += u32BytesToRead;
-				pstrFifoHandler->u32TotalBytes -= u32BytesToRead;
-
-			} else {
-				u32 u32FirstPart =
-					pstrFifoHandler->u32BufferLength - pstrFifoHandler->u32ReadOffset;
-				WILC_memcpy(pu8Buffer, pstrFifoHandler->pu8Buffer + pstrFifoHandler->u32ReadOffset,
-					    u32FirstPart);
-				WILC_memcpy(pu8Buffer + u32FirstPart, pstrFifoHandler->pu8Buffer,
-					    u32BytesToRead - u32FirstPart);
-				/* update read offset and total bytes */
-				pstrFifoHandler->u32ReadOffset = u32BytesToRead - u32FirstPart;
-				pstrFifoHandler->u32TotalBytes -= u32BytesToRead;
-			}
-			up(&pstrFifoHandler->SemBuffer);
-		} else {
-			u32Error = 1;
-		}
-	} else {
-		u32Error = 1;
-	}
-	return u32Error;
-}
-
-u32 FIFO_WriteBytes(tHANDLE hFifo, u8 *pu8Buffer, u32 u32BytesToWrite, bool bForceOverWrite)
-{
-	u32 u32Error = 0;
-	tstrFifoHandler *pstrFifoHandler = (tstrFifoHandler *) hFifo;
-	if (pstrFifoHandler) {
-		if (u32BytesToWrite < pstrFifoHandler->u32BufferLength)	{
-			if ((pstrFifoHandler->u32TotalBytes + u32BytesToWrite) <= pstrFifoHandler->u32BufferLength ||
-			    bForceOverWrite) {
-				down(&pstrFifoHandler->SemBuffer);
-				if ((pstrFifoHandler->u32WriteOffset + u32BytesToWrite) <= pstrFifoHandler->u32BufferLength) {
-					WILC_memcpy(pstrFifoHandler->pu8Buffer + pstrFifoHandler->u32WriteOffset, pu8Buffer,
-						    u32BytesToWrite);
-					/* update read offset and total bytes */
-					pstrFifoHandler->u32WriteOffset += u32BytesToWrite;
-					pstrFifoHandler->u32TotalBytes  += u32BytesToWrite;
-
-				} else {
-					u32 u32FirstPart =
-						pstrFifoHandler->u32BufferLength - pstrFifoHandler->u32WriteOffset;
-					WILC_memcpy(pstrFifoHandler->pu8Buffer + pstrFifoHandler->u32WriteOffset, pu8Buffer,
-						    u32FirstPart);
-					WILC_memcpy(pstrFifoHandler->pu8Buffer, pu8Buffer + u32FirstPart,
-						    u32BytesToWrite - u32FirstPart);
-					/* update read offset and total bytes */
-					pstrFifoHandler->u32WriteOffset = u32BytesToWrite - u32FirstPart;
-					pstrFifoHandler->u32TotalBytes += u32BytesToWrite;
-				}
-				/* if data overwriten */
-				if (pstrFifoHandler->u32TotalBytes > pstrFifoHandler->u32BufferLength) {
-					/* adjust read offset to the oldest data available */
-					pstrFifoHandler->u32ReadOffset = pstrFifoHandler->u32WriteOffset;
-					/* data availabe is the buffer length */
-					pstrFifoHandler->u32TotalBytes = pstrFifoHandler->u32BufferLength;
-				}
-				up(&pstrFifoHandler->SemBuffer);
-			} else {
-				u32Error = 1;
-			}
-		} else {
-			u32Error = 1;
-		}
-	} else {
-		u32Error = 1;
-	}
-	return u32Error;
-}
diff --git a/drivers/staging/wilc1000/fifo_buffer.h b/drivers/staging/wilc1000/fifo_buffer.h
deleted file mode 100644
index 7b76998..0000000
--- a/drivers/staging/wilc1000/fifo_buffer.h
+++ /dev/null
@@ -1,26 +0,0 @@
-
-#include <linux/types.h>
-#include <linux/semaphore.h>
-#include "wilc_memory.h"
-#include "wilc_strutils.h"
-
-
-#define tHANDLE	void *
-
-typedef struct {
-	u8		*pu8Buffer;
-	u32 u32BufferLength;
-	u32 u32WriteOffset;
-	u32 u32ReadOffset;
-	u32 u32TotalBytes;
-	struct semaphore SemBuffer;
-} tstrFifoHandler;
-
-
-extern u32 FIFO_InitBuffer(tHANDLE *hBuffer,
-								   u32 u32BufferLength);
-extern u32 FIFO_DeInit(tHANDLE hFifo);
-extern u32 FIFO_ReadBytes(tHANDLE hFifo, u8 *pu8Buffer,
-				u32 u32BytesToRead, u32 *pu32BytesRead);
-extern u32 FIFO_WriteBytes(tHANDLE hFifo, u8 *pu8Buffer,
-				u32 u32BytesToWrite, bool bForceOverWrite);
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 6b10bbb..66fa677 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -6,10 +6,9 @@
 extern u8 connecting;
 
 #ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
-extern WILC_TimerHandle hDuringIpTimer;
+extern struct timer_list hDuringIpTimer;
 #endif
 
-extern bool bEnablePS;
 /*BugID_5137*/
 extern u8 g_wilc_initialized;
 /*****************************************************************************/
@@ -467,7 +466,7 @@
 typedef struct _tstrHostIFmsg {
 	u16 u16MsgId;                                           /*!< Message ID */
 	tuniHostIFmsgBody uniHostIFmsgBody;             /*!< Message body */
-	void *drvHandler;
+	tstrWILC_WFIDrv *drvHandler;
 } tstrHostIFmsg;
 
 #ifdef CONNECT_DIRECT
@@ -534,8 +533,8 @@
 /*****************************************************************************/
 
 
-tstrWILC_WFIDrv *terminated_handle = NULL;
-tstrWILC_WFIDrv *gWFiDrvHandle = NULL;
+tstrWILC_WFIDrv *terminated_handle;
+tstrWILC_WFIDrv *gWFiDrvHandle;
 #ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
 bool g_obtainingIP = false;
 #endif
@@ -547,7 +546,7 @@
 struct semaphore hSemDeinitDrvHandle;
 static struct semaphore hWaitResponse;
 struct semaphore hSemHostIntDeinit;
-WILC_TimerHandle g_hPeriodicRSSI;
+struct timer_list g_hPeriodicRSSI;
 
 
 
@@ -570,9 +569,7 @@
 static u32 gu32InactiveTime;
 static u8 gu8DelBcn;
 #endif
-#ifndef SIMULATION
 static u32 gu32WidConnRstHack;
-#endif
 
 /*BugID_5137*/
 u8 *gu8FlushedJoinReq;
@@ -604,7 +601,7 @@
  *  @date
  *  @version	1.0
  */
-static s32 Handle_SetChannel(void *drvHandler, tstrHostIFSetChan *pstrHostIFSetChan)
+static s32 Handle_SetChannel(tstrWILC_WFIDrv *drvHandler, tstrHostIFSetChan *pstrHostIFSetChan)
 {
 
 	s32 s32Error = WILC_SUCCESS;
@@ -659,9 +656,8 @@
 	s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
 
 
-	if ((pstrHostIfSetDrvHandler->u32Address) == (u32)NULL) {
+	if ((pstrHostIfSetDrvHandler->u32Address) == (u32)NULL)
 		up(&hSemDeinitDrvHandle);
-	}
 
 
 	if (s32Error) {
@@ -685,7 +681,7 @@
  *  @date
  *  @version	1.0
  */
-static s32 Handle_SetOperationMode(void *drvHandler, tstrHostIfSetOperationMode *pstrHostIfSetOperationMode)
+static s32 Handle_SetOperationMode(tstrWILC_WFIDrv *drvHandler, tstrHostIfSetOperationMode *pstrHostIfSetOperationMode)
 {
 
 	s32 s32Error = WILC_SUCCESS;
@@ -700,14 +696,13 @@
 	strWID.s32ValueSize = sizeof(u32);
 
 	/*Sending Cfg*/
-	PRINT_INFO(HOSTINF_DBG, "pstrWFIDrv= %p \n", pstrWFIDrv);
+	PRINT_INFO(HOSTINF_DBG, "pstrWFIDrv= %p\n", pstrWFIDrv);
 
 	s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
 
 
-	if ((pstrHostIfSetOperationMode->u32Mode) == (u32)NULL) {
+	if ((pstrHostIfSetOperationMode->u32Mode) == (u32)NULL)
 		up(&hSemDeinitDrvHandle);
-	}
 
 
 	if (s32Error) {
@@ -731,7 +726,7 @@
  *  @date
  *  @version	1.0
  */
-s32 Handle_set_IPAddress(void *drvHandler, u8 *pu8IPAddr, u8 idx)
+s32 Handle_set_IPAddress(tstrWILC_WFIDrv *drvHandler, u8 *pu8IPAddr, u8 idx)
 {
 
 	s32 s32Error = WILC_SUCCESS;
@@ -742,9 +737,9 @@
 	if (pu8IPAddr[0] < 192)
 		pu8IPAddr[0] = 0;
 
-	PRINT_INFO(HOSTINF_DBG, "Indx = %d, Handling set  IP = %d.%d.%d.%d \n", idx, pu8IPAddr[0], pu8IPAddr[1], pu8IPAddr[2], pu8IPAddr[3]);
+	PRINT_INFO(HOSTINF_DBG, "Indx = %d, Handling set  IP = %pI4\n", idx, pu8IPAddr);
 
-	WILC_memcpy(gs8SetIP[idx], pu8IPAddr, IP_ALEN);
+	memcpy(gs8SetIP[idx], pu8IPAddr, IP_ALEN);
 
 	/*prepare configuration packet*/
 	strWID.u16WIDid = (u16)WID_IP_ADDRESS;
@@ -756,7 +751,7 @@
 
 
 
-	host_int_get_ipaddress((WILC_WFIDrvHandle)drvHandler, firmwareIPAddress, idx);
+	host_int_get_ipaddress(drvHandler, firmwareIPAddress, idx);
 
 	if (s32Error) {
 		PRINT_D(HOSTINF_DBG, "Failed to set IP address\n");
@@ -783,7 +778,7 @@
  *  @date
  *  @version	1.0
  */
-s32 Handle_get_IPAddress(void *drvHandler, u8 *pu8IPAddr, u8 idx)
+s32 Handle_get_IPAddress(tstrWILC_WFIDrv *drvHandler, u8 *pu8IPAddr, u8 idx)
 {
 
 	s32 s32Error = WILC_SUCCESS;
@@ -793,27 +788,27 @@
 	/*prepare configuration packet*/
 	strWID.u16WIDid = (u16)WID_IP_ADDRESS;
 	strWID.enuWIDtype = WID_STR;
-	strWID.ps8WidVal = (u8 *)WILC_MALLOC(IP_ALEN);
+	strWID.ps8WidVal = WILC_MALLOC(IP_ALEN);
 	strWID.s32ValueSize = IP_ALEN;
 
 	s32Error = SendConfigPkt(GET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
 
-	PRINT_INFO(HOSTINF_DBG, "%d.%d.%d.%d\n", (u8)(strWID.ps8WidVal[0]), (u8)(strWID.ps8WidVal[1]), (u8)(strWID.ps8WidVal[2]), (u8)(strWID.ps8WidVal[3]));
+	PRINT_INFO(HOSTINF_DBG, "%pI4\n", strWID.ps8WidVal);
 
-	WILC_memcpy(gs8GetIP[idx], strWID.ps8WidVal, IP_ALEN);
+	memcpy(gs8GetIP[idx], strWID.ps8WidVal, IP_ALEN);
 
 	/*get the value by searching the local copy*/
-	WILC_FREE(strWID.ps8WidVal);
+	kfree(strWID.ps8WidVal);
 
-	if (WILC_memcmp(gs8GetIP[idx], gs8SetIP[idx], IP_ALEN) != 0)
-		host_int_setup_ipaddress((WILC_WFIDrvHandle)pstrWFIDrv, gs8SetIP[idx], idx);
+	if (memcmp(gs8GetIP[idx], gs8SetIP[idx], IP_ALEN) != 0)
+		host_int_setup_ipaddress(pstrWFIDrv, gs8SetIP[idx], idx);
 
 	if (s32Error != WILC_SUCCESS) {
 		PRINT_ER("Failed to get IP address\n");
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_STATE);
 	} else {
-		PRINT_INFO(HOSTINF_DBG, "IP address retrieved:: u8IfIdx = %d \n", idx);
-		PRINT_INFO(HOSTINF_DBG, "%d.%d.%d.%d\n", gs8GetIP[idx][0], gs8GetIP[idx][1], gs8GetIP[idx][2], gs8GetIP[idx][3]);
+		PRINT_INFO(HOSTINF_DBG, "IP address retrieved:: u8IfIdx = %d\n", idx);
+		PRINT_INFO(HOSTINF_DBG, "%pI4\n", gs8GetIP[idx]);
 		PRINT_INFO(HOSTINF_DBG, "\n");
 	}
 
@@ -836,18 +831,19 @@
  *  @date		November 2013
  *  @version	7.0
  */
-static s32 Handle_SetMacAddress(void *drvHandler, tstrHostIfSetMacAddress *pstrHostIfSetMacAddress)
+static s32 Handle_SetMacAddress(tstrWILC_WFIDrv *drvHandler, tstrHostIfSetMacAddress *pstrHostIfSetMacAddress)
 {
 
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID	strWID;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
-	u8 *mac_buf = (u8 *)WILC_MALLOC(ETH_ALEN);
+	u8 *mac_buf = WILC_MALLOC(ETH_ALEN);
+
 	if (mac_buf == NULL) {
 		PRINT_ER("No buffer to send mac address\n");
 		return WILC_FAIL;
 	}
-	WILC_memcpy(mac_buf, pstrHostIfSetMacAddress->u8MacAddress, ETH_ALEN);
+	memcpy(mac_buf, pstrHostIfSetMacAddress->u8MacAddress, ETH_ALEN);
 
 	/*prepare configuration packet*/
 	strWID.u16WIDid = (u16)WID_MAC_ADDR;
@@ -866,7 +862,7 @@
 	{
 
 	}
-	WILC_FREE(mac_buf);
+	kfree(mac_buf);
 	return s32Error;
 }
 
@@ -881,7 +877,7 @@
  *  @date		JAN 2013
  *  @version	8.0
  */
-static s32 Handle_GetMacAddress(void *drvHandler, tstrHostIfGetMacAddress *pstrHostIfGetMacAddress)
+static s32 Handle_GetMacAddress(tstrWILC_WFIDrv *drvHandler, tstrHostIfGetMacAddress *pstrHostIfGetMacAddress)
 {
 
 	s32 s32Error = WILC_SUCCESS;
@@ -918,7 +914,7 @@
  *  @date
  *  @version	1.0
  */
-static s32 Handle_CfgParam(void *drvHandler, tstrHostIFCfgParamAttr *strHostIFCfgParamAttr)
+static s32 Handle_CfgParam(tstrWILC_WFIDrv *drvHandler, tstrHostIFCfgParamAttr *strHostIFCfgParamAttr)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWIDList[32];
@@ -1204,10 +1200,9 @@
 	}
 	s32Error = SendConfigPkt(SET_CFG, strWIDList, u8WidCnt, false, (u32)pstrWFIDrv);
 
-	if (s32Error) {
+	if (s32Error)
 		PRINT_ER("Error in setting CFG params\n");
 
-	}
 	WILC_CATCH(s32Error)
 	{
 	}
@@ -1228,6 +1223,7 @@
 static s32 Handle_wait_msg_q_empty(void)
 {
 	s32 s32Error = WILC_SUCCESS;
+
 	g_wilc_initialized = 0;
 	up(&hWaitResponse);
 	return s32Error;
@@ -1242,7 +1238,7 @@
  *  @date
  *  @version	1.0
  */
-static s32 Handle_Scan(void *drvHandler, tstrHostIFscanAttr *pstrHostIFscanAttr)
+static s32 Handle_Scan(tstrWILC_WFIDrv *drvHandler, tstrHostIFscanAttr *pstrHostIFscanAttr)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWIDList[5];
@@ -1254,7 +1250,7 @@
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *) drvHandler;
 
 	PRINT_D(HOSTINF_DBG, "Setting SCAN params\n");
-	PRINT_D(HOSTINF_DBG, "Scanning: In [%d] state \n", pstrWFIDrv->enuHostIFstate);
+	PRINT_D(HOSTINF_DBG, "Scanning: In [%d] state\n", pstrWFIDrv->enuHostIFstate);
 
 	pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult = pstrHostIFscanAttr->pfScanResult;
 	pstrWFIDrv->strWILC_UsrScanReq.u32UserScanPvoid = pstrHostIFscanAttr->pvUserArg;
@@ -1284,9 +1280,8 @@
 	strWIDList[u32WidsCount].u16WIDid = (u16)WID_SSID_PROBE_REQ;
 	strWIDList[u32WidsCount].enuWIDtype = WID_STR;
 
-	for (i = 0; i < pstrHostIFscanAttr->strHiddenNetwork.u8ssidnum; i++) {
+	for (i = 0; i < pstrHostIFscanAttr->strHiddenNetwork.u8ssidnum; i++)
 		valuesize += ((pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen) + 1);
-	}
 	pu8HdnNtwrksWidVal = WILC_MALLOC(valuesize + 1);
 	strWIDList[u32WidsCount].ps8WidVal = pu8HdnNtwrksWidVal;
 	if (strWIDList[u32WidsCount].ps8WidVal != NULL) {
@@ -1298,7 +1293,7 @@
 
 		for (i = 0; i < pstrHostIFscanAttr->strHiddenNetwork.u8ssidnum; i++) {
 			*pu8Buffer++ = pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen;
-			WILC_memcpy(pu8Buffer, pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid, pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen);
+			memcpy(pu8Buffer, pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid, pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen);
 			pu8Buffer += pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen;
 		}
 
@@ -1336,9 +1331,8 @@
 		int i;
 
 		for (i = 0; i < pstrHostIFscanAttr->u8ChnlListLen; i++)	{
-			if (pstrHostIFscanAttr->pu8ChnlFreqList[i] > 0) {
+			if (pstrHostIFscanAttr->pu8ChnlFreqList[i] > 0)
 				pstrHostIFscanAttr->pu8ChnlFreqList[i] = pstrHostIFscanAttr->pu8ChnlFreqList[i] - 1;
-			}
 		}
 	}
 
@@ -1356,11 +1350,10 @@
 	/*keep the state as is , no need to change it*/
 	/* gWFiDrvHandle->enuHostIFstate = HOST_IF_SCANNING; */
 
-	if (pstrWFIDrv->enuHostIFstate == HOST_IF_CONNECTED) {
+	if (pstrWFIDrv->enuHostIFstate == HOST_IF_CONNECTED)
 		gbScanWhileConnected = true;
-	} else if (pstrWFIDrv->enuHostIFstate == HOST_IF_IDLE)	  {
+	else if (pstrWFIDrv->enuHostIFstate == HOST_IF_IDLE)
 		gbScanWhileConnected = false;
-	}
 
 	s32Error = SendConfigPkt(SET_CFG, strWIDList, u32WidsCount, false, (u32)pstrWFIDrv);
 
@@ -1373,36 +1366,35 @@
 
 	WILC_CATCH(s32Error)
 	{
-		WILC_TimerStop(&(pstrWFIDrv->hScanTimer), NULL);
+		del_timer(&pstrWFIDrv->hScanTimer);
 		/*if there is an ongoing scan request*/
 		Handle_ScanDone(drvHandler, SCAN_EVENT_ABORTED);
 	}
 
 	/* Deallocate pstrHostIFscanAttr->u8ChnlListLen which was prevoisuly allocated by the sending thread */
 	if (pstrHostIFscanAttr->pu8ChnlFreqList != NULL) {
-		WILC_FREE(pstrHostIFscanAttr->pu8ChnlFreqList);
+		kfree(pstrHostIFscanAttr->pu8ChnlFreqList);
 		pstrHostIFscanAttr->pu8ChnlFreqList = NULL;
 	}
 
 	/* Deallocate pstrHostIFscanAttr->pu8IEs which was previously allocated by the sending thread */
 	if (pstrHostIFscanAttr->pu8IEs != NULL)	{
-		WILC_FREE(pstrHostIFscanAttr->pu8IEs);
+		kfree(pstrHostIFscanAttr->pu8IEs);
 		pstrHostIFscanAttr->pu8IEs = NULL;
 	}
 	if (pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo != NULL)	{
-		WILC_FREE(pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo);
+		kfree(pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo);
 		pstrHostIFscanAttr->strHiddenNetwork.pstrHiddenNetworkInfo = NULL;
 	}
 
 	/* Deallocate pstrHostIFscanAttr->u8ChnlListLen which was prevoisuly allocated by the sending thread */
 	if (pstrHostIFscanAttr->pu8ChnlFreqList != NULL) {
-		WILC_FREE(pstrHostIFscanAttr->pu8ChnlFreqList);
+		kfree(pstrHostIFscanAttr->pu8ChnlFreqList);
 		pstrHostIFscanAttr->pu8ChnlFreqList = NULL;
 	}
 
-	if (pu8HdnNtwrksWidVal != NULL)	{
-		WILC_FREE(pu8HdnNtwrksWidVal);
-	}
+	if (pu8HdnNtwrksWidVal != NULL)
+		kfree(pu8HdnNtwrksWidVal);
 
 	return s32Error;
 }
@@ -1416,7 +1408,7 @@
  *  @date
  *  @version	1.0
  */
-static s32 Handle_ScanDone(void *drvHandler, tenuScanEvent enuEvent)
+static s32 Handle_ScanDone(tstrWILC_WFIDrv *drvHandler, tenuScanEvent enuEvent)
 {
 	s32 s32Error = WILC_SUCCESS;
 
@@ -1476,7 +1468,7 @@
  *  @version	1.0
  */
 u8 u8ConnectedSSID[6] = {0};
-static s32 Handle_Connect(void *drvHandler, tstrHostIFconnectAttr *pstrHostIFconnectAttr)
+static s32 Handle_Connect(tstrWILC_WFIDrv *drvHandler, tstrHostIFconnectAttr *pstrHostIFconnectAttr)
 {
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *) drvHandler;
 	s32 s32Error = WILC_SUCCESS;
@@ -1500,12 +1492,12 @@
 	PRINT_D(GENERIC_DBG, "Handling connect request\n");
 
 	#ifndef CONNECT_DIRECT
-	WILC_memset(gapu8RcvdSurveyResults[0], 0, MAX_SURVEY_RESULT_FRAG_SIZE);
-	WILC_memset(gapu8RcvdSurveyResults[1], 0, MAX_SURVEY_RESULT_FRAG_SIZE);
+	memset(gapu8RcvdSurveyResults[0], 0, MAX_SURVEY_RESULT_FRAG_SIZE);
+	memset(gapu8RcvdSurveyResults[1], 0, MAX_SURVEY_RESULT_FRAG_SIZE);
 
 
 	PRINT_D(HOSTINF_DBG, "Getting site survey results\n");
-	s32Err = host_int_get_site_survey_results((WILC_WFIDrvHandle)pstrWFIDrv,
+	s32Err = host_int_get_site_survey_results(pstrWFIDrv,
 						  gapu8RcvdSurveyResults,
 						  MAX_SURVEY_RESULT_FRAG_SIZE);
 	if (s32Err) {
@@ -1521,19 +1513,19 @@
 		/* use the parsed info in pstrSurveyResults, then deallocate it */
 		PRINT_D(HOSTINF_DBG, "Copying site survey results in global structure, then deallocate\n");
 		for (i = 0; i < pstrWFIDrv->u32SurveyResultsCount; i++)	{
-			WILC_memcpy(&pstrWFIDrv->astrSurveyResults[i], &pstrSurveyResults[i],
+			memcpy(&pstrWFIDrv->astrSurveyResults[i], &pstrSurveyResults[i],
 				    sizeof(wid_site_survey_reslts_s));
 		}
 
 		DeallocateSurveyResults(pstrSurveyResults);
 	} else {
 		WILC_ERRORREPORT(s32Error, WILC_FAIL);
-		PRINT_ER("ParseSurveyResults() Error(%d) \n", s32Err);
+		PRINT_ER("ParseSurveyResults() Error(%d)\n", s32Err);
 	}
 
 
 	for (i = 0; i < pstrWFIDrv->u32SurveyResultsCount; i++)	{
-		if (WILC_memcmp(pstrWFIDrv->astrSurveyResults[i].SSID,
+		if (memcmp(pstrWFIDrv->astrSurveyResults[i].SSID,
 				pstrHostIFconnectAttr->pu8ssid,
 				pstrHostIFconnectAttr->ssidLen) == 0) {
 			PRINT_INFO(HOSTINF_DBG, "Network with required SSID is found %s\n", pstrHostIFconnectAttr->pu8ssid);
@@ -1546,7 +1538,7 @@
 				/* BSSID is also passed from the user, so decision of matching
 				 * should consider also this passed BSSID */
 
-				if (WILC_memcmp(pstrWFIDrv->astrSurveyResults[i].BSSID,
+				if (memcmp(pstrWFIDrv->astrSurveyResults[i].BSSID,
 						pstrHostIFconnectAttr->pu8bssid,
 						6) == 0) {
 					PRINT_INFO(HOSTINF_DBG, "BSSID is passed from the user and matched\n");
@@ -1559,29 +1551,29 @@
 	if (i < pstrWFIDrv->u32SurveyResultsCount) {
 		u8bssDscListIndex = i;
 
-		PRINT_INFO(HOSTINF_DBG, "Connecting to network of Bss Idx %d and SSID %s and channel %d \n",
+		PRINT_INFO(HOSTINF_DBG, "Connecting to network of Bss Idx%d and SSID %s and channel%d\n",
 			   u8bssDscListIndex, pstrWFIDrv->astrSurveyResults[u8bssDscListIndex].SSID,
 			   pstrWFIDrv->astrSurveyResults[u8bssDscListIndex].Channel);
 
 		PRINT_INFO(HOSTINF_DBG, "Saving connection parameters in global structure\n");
 
 		if (pstrHostIFconnectAttr->pu8bssid != NULL) {
-			pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = (u8 *)WILC_MALLOC(6);
-			WILC_memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, pstrHostIFconnectAttr->pu8bssid, 6);
+			pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = WILC_MALLOC(6);
+			memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, pstrHostIFconnectAttr->pu8bssid, 6);
 		}
 
 		pstrWFIDrv->strWILC_UsrConnReq.ssidLen = pstrHostIFconnectAttr->ssidLen;
 		if (pstrHostIFconnectAttr->pu8ssid != NULL) {
-			pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = (u8 *)WILC_MALLOC(pstrHostIFconnectAttr->ssidLen + 1);
-			WILC_memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid, pstrHostIFconnectAttr->pu8ssid,
+			pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = WILC_MALLOC(pstrHostIFconnectAttr->ssidLen + 1);
+			memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid, pstrHostIFconnectAttr->pu8ssid,
 				    pstrHostIFconnectAttr->ssidLen);
 			pstrWFIDrv->strWILC_UsrConnReq.pu8ssid[pstrHostIFconnectAttr->ssidLen] = '\0';
 		}
 
 		pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen = pstrHostIFconnectAttr->IEsLen;
 		if (pstrHostIFconnectAttr->pu8IEs != NULL) {
-			pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = (u8 *)WILC_MALLOC(pstrHostIFconnectAttr->IEsLen);
-			WILC_memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs, pstrHostIFconnectAttr->pu8IEs,
+			pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = WILC_MALLOC(pstrHostIFconnectAttr->IEsLen);
+			memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs, pstrHostIFconnectAttr->pu8IEs,
 				    pstrHostIFconnectAttr->IEsLen);
 		}
 
@@ -1630,14 +1622,12 @@
 		strWIDList[u32WidsCount].ps8WidVal = (s8 *)&u8bssDscListIndex;
 		u32WidsCount++;
 
-		#ifndef SIMULATION
 		/* A temporary workaround to avoid handling the misleading MAC_DISCONNECTED raised from the
 		 *   firmware at chip reset when processing the WIDs of the Connect Request.
 		 *   (This workaround should be removed in the future when the Chip reset of the Connect WIDs is disabled) */
 		/* ////////////////////// */
 		gu32WidConnRstHack = 0;
 		/* ////////////////////// */
-		#endif
 
 		s32Error = SendConfigPkt(SET_CFG, strWIDList, u32WidsCount, false, (u32)pstrWFIDrv);
 		if (s32Error) {
@@ -1656,7 +1646,7 @@
 
 	/* if we try to connect to an already connected AP then discard the request */
 
-	if (WILC_memcmp(pstrHostIFconnectAttr->pu8bssid, u8ConnectedSSID, ETH_ALEN) == 0) {
+	if (memcmp(pstrHostIFconnectAttr->pu8bssid, u8ConnectedSSID, ETH_ALEN) == 0) {
 
 		s32Error = WILC_SUCCESS;
 		PRINT_ER("Trying to connect to an already connected AP, Discard connect request\n");
@@ -1675,22 +1665,22 @@
 	#endif /*WILC_PARSE_SCAN_IN_HOST*/
 
 	if (pstrHostIFconnectAttr->pu8bssid != NULL) {
-		pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = (u8 *)WILC_MALLOC(6);
-		WILC_memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, pstrHostIFconnectAttr->pu8bssid, 6);
+		pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = WILC_MALLOC(6);
+		memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, pstrHostIFconnectAttr->pu8bssid, 6);
 	}
 
 	pstrWFIDrv->strWILC_UsrConnReq.ssidLen = pstrHostIFconnectAttr->ssidLen;
 	if (pstrHostIFconnectAttr->pu8ssid != NULL) {
-		pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = (u8 *)WILC_MALLOC(pstrHostIFconnectAttr->ssidLen + 1);
-		WILC_memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid, pstrHostIFconnectAttr->pu8ssid,
+		pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = WILC_MALLOC(pstrHostIFconnectAttr->ssidLen + 1);
+		memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid, pstrHostIFconnectAttr->pu8ssid,
 			    pstrHostIFconnectAttr->ssidLen);
 		pstrWFIDrv->strWILC_UsrConnReq.pu8ssid[pstrHostIFconnectAttr->ssidLen] = '\0';
 	}
 
 	pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen = pstrHostIFconnectAttr->IEsLen;
 	if (pstrHostIFconnectAttr->pu8IEs != NULL) {
-		pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = (u8 *)WILC_MALLOC(pstrHostIFconnectAttr->IEsLen);
-		WILC_memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs, pstrHostIFconnectAttr->pu8IEs,
+		pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = WILC_MALLOC(pstrHostIFconnectAttr->IEsLen);
+		memcpy(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs, pstrHostIFconnectAttr->pu8IEs,
 			    pstrHostIFconnectAttr->IEsLen);
 	}
 
@@ -1728,7 +1718,7 @@
 		u32WidsCount++;
 
 		/*BugID_5137*/
-		if (WILC_memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7)) {
+		if (memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7)) {
 
 			gu32FlushedInfoElemAsocSize = pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen;
 			gu8FlushedInfoElemAsoc =  WILC_MALLOC(gu32FlushedInfoElemAsocSize);
@@ -1743,7 +1733,7 @@
 	u32WidsCount++;
 
 	/*BugID_5137*/
-	if (WILC_memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7))
+	if (memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7))
 		gu8Flushed11iMode = pstrWFIDrv->strWILC_UsrConnReq.u8security;
 
 	PRINT_INFO(HOSTINF_DBG, "Encrypt Mode = %x\n", pstrWFIDrv->strWILC_UsrConnReq.u8security);
@@ -1756,7 +1746,7 @@
 	u32WidsCount++;
 
 	/*BugID_5137*/
-	if (WILC_memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7))
+	if (memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7))
 		gu8FlushedAuthType = (u8)pstrWFIDrv->strWILC_UsrConnReq.tenuAuth_type;
 
 	PRINT_INFO(HOSTINF_DBG, "Authentication Type = %x\n", pstrWFIDrv->strWILC_UsrConnReq.tenuAuth_type);
@@ -1778,14 +1768,13 @@
 	strWIDList[u32WidsCount].s32ValueSize = MAX_SSID_LEN + 7;
 	strWIDList[u32WidsCount].ps8WidVal = WILC_MALLOC(strWIDList[u32WidsCount].s32ValueSize);
 
-	if (strWIDList[u32WidsCount].ps8WidVal == NULL) {
+	if (strWIDList[u32WidsCount].ps8WidVal == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-	}
 
 	pu8CurrByte = strWIDList[u32WidsCount].ps8WidVal;
 
 	if (pstrHostIFconnectAttr->pu8ssid != NULL) {
-		WILC_memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8ssid, pstrHostIFconnectAttr->ssidLen);
+		memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8ssid, pstrHostIFconnectAttr->ssidLen);
 		pu8CurrByte[pstrHostIFconnectAttr->ssidLen] = '\0';
 	}
 	pu8CurrByte += MAX_SSID_LEN;
@@ -1795,9 +1784,8 @@
 		PRINT_ER("Channel out of range\n");
 		*(pu8CurrByte++) = 0xFF;
 	}
-	if (pstrHostIFconnectAttr->pu8bssid != NULL) {
-		WILC_memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8bssid, 6);
-	}
+	if (pstrHostIFconnectAttr->pu8bssid != NULL)
+		memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8bssid, 6);
 	pu8CurrByte += 6;
 
 	/* keep the buffer at the start of the allocated pointer to use it with the free*/
@@ -1813,19 +1801,18 @@
 	strWIDList[u32WidsCount].ps8WidVal = WILC_MALLOC(strWIDList[u32WidsCount].s32ValueSize);
 
 	/*BugID_5137*/
-	if (WILC_memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7)) {
+	if (memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7)) {
 		gu32FlushedJoinReqSize = strWIDList[u32WidsCount].s32ValueSize;
 		gu8FlushedJoinReq = WILC_MALLOC(gu32FlushedJoinReqSize);
 	}
-	if (strWIDList[u32WidsCount].ps8WidVal == NULL) {
+	if (strWIDList[u32WidsCount].ps8WidVal == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-	}
 
 	pu8CurrByte = strWIDList[u32WidsCount].ps8WidVal;
 
 
 	if (pstrHostIFconnectAttr->pu8ssid != NULL) {
-		WILC_memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8ssid, pstrHostIFconnectAttr->ssidLen);
+		memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8ssid, pstrHostIFconnectAttr->ssidLen);
 		pu8CurrByte[pstrHostIFconnectAttr->ssidLen] = '\0';
 	}
 	pu8CurrByte += MAX_SSID_LEN;
@@ -1845,15 +1832,13 @@
 	PRINT_D(HOSTINF_DBG, "* Cap Info %0x*\n", (*(pu8CurrByte - 2) | ((*(pu8CurrByte - 1)) << 8)));
 
 	/* sa*/
-	if (pstrHostIFconnectAttr->pu8bssid != NULL) {
-		WILC_memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8bssid, 6);
-	}
+	if (pstrHostIFconnectAttr->pu8bssid != NULL)
+		memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8bssid, 6);
 	pu8CurrByte += 6;
 
 	/* bssid*/
-	if (pstrHostIFconnectAttr->pu8bssid != NULL) {
-		WILC_memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8bssid, 6);
-	}
+	if (pstrHostIFconnectAttr->pu8bssid != NULL)
+		memcpy(pu8CurrByte, pstrHostIFconnectAttr->pu8bssid, 6);
 	pu8CurrByte += 6;
 
 	/* Beacon Period*/
@@ -1864,7 +1849,7 @@
 	*(pu8CurrByte++)  =  ptstrJoinBssParam->dtim_period;
 	PRINT_D(HOSTINF_DBG, "* DTIM Period %d*\n", (*(pu8CurrByte - 1)));
 	/* Supported rates*/
-	WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->supp_rates, MAX_RATES_SUPPORTED + 1);
+	memcpy(pu8CurrByte, ptstrJoinBssParam->supp_rates, MAX_RATES_SUPPORTED + 1);
 	pu8CurrByte += (MAX_RATES_SUPPORTED + 1);
 
 	/* wmm cap*/
@@ -1888,15 +1873,15 @@
 	*(pu8CurrByte++) =  ptstrJoinBssParam->mode_802_11i;
 	PRINT_D(HOSTINF_DBG, "* mode_802_11i %d*\n", (*(pu8CurrByte - 1)));
 	/* rsn pcip policy*/
-	WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_pcip_policy, sizeof(ptstrJoinBssParam->rsn_pcip_policy));
+	memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_pcip_policy, sizeof(ptstrJoinBssParam->rsn_pcip_policy));
 	pu8CurrByte += sizeof(ptstrJoinBssParam->rsn_pcip_policy);
 
 	/* rsn auth policy*/
-	WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_auth_policy, sizeof(ptstrJoinBssParam->rsn_auth_policy));
+	memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_auth_policy, sizeof(ptstrJoinBssParam->rsn_auth_policy));
 	pu8CurrByte += sizeof(ptstrJoinBssParam->rsn_auth_policy);
 
 	/* rsn auth policy*/
-	WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_cap, sizeof(ptstrJoinBssParam->rsn_cap));
+	memcpy(pu8CurrByte, ptstrJoinBssParam->rsn_cap, sizeof(ptstrJoinBssParam->rsn_cap));
 	pu8CurrByte += sizeof(ptstrJoinBssParam->rsn_cap);
 
 	/*BugID_5137*/
@@ -1921,15 +1906,15 @@
 
 		*(pu8CurrByte++) = ptstrJoinBssParam->u8Count;
 
-		WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->au8Duration, sizeof(ptstrJoinBssParam->au8Duration));
+		memcpy(pu8CurrByte, ptstrJoinBssParam->au8Duration, sizeof(ptstrJoinBssParam->au8Duration));
 
 		pu8CurrByte += sizeof(ptstrJoinBssParam->au8Duration);
 
-		WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->au8Interval, sizeof(ptstrJoinBssParam->au8Interval));
+		memcpy(pu8CurrByte, ptstrJoinBssParam->au8Interval, sizeof(ptstrJoinBssParam->au8Interval));
 
 		pu8CurrByte += sizeof(ptstrJoinBssParam->au8Interval);
 
-		WILC_memcpy(pu8CurrByte, ptstrJoinBssParam->au8StartTime, sizeof(ptstrJoinBssParam->au8StartTime));
+		memcpy(pu8CurrByte, ptstrJoinBssParam->au8StartTime, sizeof(ptstrJoinBssParam->au8StartTime));
 
 		pu8CurrByte += sizeof(ptstrJoinBssParam->au8StartTime);
 
@@ -1945,17 +1930,15 @@
 	#endif /* #ifdef WILC_PARSE_SCAN_IN_HOST*/
 	u32WidsCount++;
 
-	#ifndef SIMULATION
 	/* A temporary workaround to avoid handling the misleading MAC_DISCONNECTED raised from the
 	 *   firmware at chip reset when processing the WIDs of the Connect Request.
 	 *   (This workaround should be removed in the future when the Chip reset of the Connect WIDs is disabled) */
 	/* ////////////////////// */
 	gu32WidConnRstHack = 0;
 	/* ////////////////////// */
-	#endif
 
 	/*BugID_5137*/
-	if (WILC_memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7)) {
+	if (memcmp("DIRECT-", pstrHostIFconnectAttr->pu8ssid, 7)) {
 		memcpy(gu8FlushedJoinReq, pu8CurrByte, gu32FlushedJoinReqSize);
 		gu8FlushedJoinReqDrvHandler = (u32)pstrWFIDrv;
 	}
@@ -1963,7 +1946,7 @@
 	PRINT_D(GENERIC_DBG, "send HOST_IF_WAITING_CONN_RESP\n");
 
 	if (pstrHostIFconnectAttr->pu8bssid != NULL) {
-		WILC_memcpy(u8ConnectedSSID, pstrHostIFconnectAttr->pu8bssid, ETH_ALEN);
+		memcpy(u8ConnectedSSID, pstrHostIFconnectAttr->pu8bssid, ETH_ALEN);
 
 		PRINT_D(GENERIC_DBG, "save Bssid = %x:%x:%x:%x:%x:%x\n", (pstrHostIFconnectAttr->pu8bssid[0]), (pstrHostIFconnectAttr->pu8bssid[1]), (pstrHostIFconnectAttr->pu8bssid[2]), (pstrHostIFconnectAttr->pu8bssid[3]), (pstrHostIFconnectAttr->pu8bssid[4]), (pstrHostIFconnectAttr->pu8bssid[5]));
 		PRINT_D(GENERIC_DBG, "save bssid = %x:%x:%x:%x:%x:%x\n", (u8ConnectedSSID[0]), (u8ConnectedSSID[1]), (u8ConnectedSSID[2]), (u8ConnectedSSID[3]), (u8ConnectedSSID[4]), (u8ConnectedSSID[5]));
@@ -1983,21 +1966,20 @@
 	{
 		tstrConnectInfo strConnectInfo;
 
-		WILC_TimerStop(&(pstrWFIDrv->hConnectTimer), NULL);
+		del_timer(&pstrWFIDrv->hConnectTimer);
 
 		PRINT_D(HOSTINF_DBG, "could not start connecting to the required network\n");
 
-		WILC_memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
+		memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
 
 		if (pstrHostIFconnectAttr->pfConnectResult != NULL) {
-			if (pstrHostIFconnectAttr->pu8bssid != NULL) {
-				WILC_memcpy(strConnectInfo.au8bssid, pstrHostIFconnectAttr->pu8bssid, 6);
-			}
+			if (pstrHostIFconnectAttr->pu8bssid != NULL)
+				memcpy(strConnectInfo.au8bssid, pstrHostIFconnectAttr->pu8bssid, 6);
 
 			if (pstrHostIFconnectAttr->pu8IEs != NULL) {
 				strConnectInfo.ReqIEsLen = pstrHostIFconnectAttr->IEsLen;
-				strConnectInfo.pu8ReqIEs = (u8 *)WILC_MALLOC(pstrHostIFconnectAttr->IEsLen);
-				WILC_memcpy(strConnectInfo.pu8ReqIEs,
+				strConnectInfo.pu8ReqIEs = WILC_MALLOC(pstrHostIFconnectAttr->IEsLen);
+				memcpy(strConnectInfo.pu8ReqIEs,
 					    pstrHostIFconnectAttr->pu8IEs,
 					    pstrHostIFconnectAttr->IEsLen);
 			}
@@ -2011,37 +1993,36 @@
 			pstrWFIDrv->enuHostIFstate = HOST_IF_IDLE;
 			/* Deallocation */
 			if (strConnectInfo.pu8ReqIEs != NULL) {
-				WILC_FREE(strConnectInfo.pu8ReqIEs);
+				kfree(strConnectInfo.pu8ReqIEs);
 				strConnectInfo.pu8ReqIEs = NULL;
 			}
 
 		} else {
-			PRINT_ER("Connect callback function pointer is NULL \n");
+			PRINT_ER("Connect callback function pointer is NULL\n");
 		}
 	}
 
 	PRINT_D(HOSTINF_DBG, "Deallocating connection parameters\n");
 	/* Deallocate pstrHostIFconnectAttr->pu8bssid which was prevoisuly allocated by the sending thread */
 	if (pstrHostIFconnectAttr->pu8bssid != NULL) {
-		WILC_FREE(pstrHostIFconnectAttr->pu8bssid);
+		kfree(pstrHostIFconnectAttr->pu8bssid);
 		pstrHostIFconnectAttr->pu8bssid = NULL;
 	}
 
 	/* Deallocate pstrHostIFconnectAttr->pu8ssid which was prevoisuly allocated by the sending thread */
 	if (pstrHostIFconnectAttr->pu8ssid != NULL) {
-		WILC_FREE(pstrHostIFconnectAttr->pu8ssid);
+		kfree(pstrHostIFconnectAttr->pu8ssid);
 		pstrHostIFconnectAttr->pu8ssid = NULL;
 	}
 
 	/* Deallocate pstrHostIFconnectAttr->pu8IEs which was prevoisuly allocated by the sending thread */
 	if (pstrHostIFconnectAttr->pu8IEs != NULL) {
-		WILC_FREE(pstrHostIFconnectAttr->pu8IEs);
+		kfree(pstrHostIFconnectAttr->pu8IEs);
 		pstrHostIFconnectAttr->pu8IEs = NULL;
 	}
 
-	if (pu8CurrByte != NULL) {
-		WILC_FREE(pu8CurrByte);
-	}
+	if (pu8CurrByte != NULL)
+		kfree(pu8CurrByte);
 	return s32Error;
 }
 
@@ -2056,7 +2037,7 @@
  *  @version		8.0
  */
 
-static s32 Handle_FlushConnect(void *drvHandler)
+static s32 Handle_FlushConnect(tstrWILC_WFIDrv *drvHandler)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWIDList[5];
@@ -2123,7 +2104,7 @@
  *  @date
  *  @version	1.0
  */
-static s32 Handle_ConnectTimeout(void *drvHandler)
+static s32 Handle_ConnectTimeout(tstrWILC_WFIDrv *drvHandler)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrConnectInfo strConnectInfo;
@@ -2141,7 +2122,7 @@
 	gbScanWhileConnected = false;
 
 
-	WILC_memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
+	memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
 
 
 	/* First, we will notify the upper layer with the Connection failure {through the Connect Callback function},
@@ -2149,14 +2130,14 @@
 	 *   WID_DISCONNECT} */
 	if (pstrWFIDrv->strWILC_UsrConnReq.pfUserConnectResult != NULL)	{
 		if (pstrWFIDrv->strWILC_UsrConnReq.pu8bssid != NULL) {
-			WILC_memcpy(strConnectInfo.au8bssid,
+			memcpy(strConnectInfo.au8bssid,
 				    pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, 6);
 		}
 
 		if (pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs != NULL) {
 			strConnectInfo.ReqIEsLen = pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen;
-			strConnectInfo.pu8ReqIEs = (u8 *)WILC_MALLOC(pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen);
-			WILC_memcpy(strConnectInfo.pu8ReqIEs,
+			strConnectInfo.pu8ReqIEs = WILC_MALLOC(pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen);
+			memcpy(strConnectInfo.pu8ReqIEs,
 				    pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs,
 				    pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen);
 		}
@@ -2169,11 +2150,11 @@
 
 		/* Deallocation of strConnectInfo.pu8ReqIEs */
 		if (strConnectInfo.pu8ReqIEs != NULL) {
-			WILC_FREE(strConnectInfo.pu8ReqIEs);
+			kfree(strConnectInfo.pu8ReqIEs);
 			strConnectInfo.pu8ReqIEs = NULL;
 		}
 	} else {
-		PRINT_ER("Connect callback function pointer is NULL \n");
+		PRINT_ER("Connect callback function pointer is NULL\n");
 	}
 
 	/* Here we will notify our firmware also with the Connection failure {through sending to it Cfg packet carrying
@@ -2186,37 +2167,36 @@
 	PRINT_D(HOSTINF_DBG, "Sending disconnect request\n");
 
 	s32Error = SendConfigPkt(SET_CFG, &strWID, 1, false, (u32)pstrWFIDrv);
-	if (s32Error) {
+	if (s32Error)
 		PRINT_ER("Failed to send dissconect config packet\n");
-	}
 
 	/* Deallocation of the Saved Connect Request in the global Handle */
 	pstrWFIDrv->strWILC_UsrConnReq.ssidLen = 0;
 	if (pstrWFIDrv->strWILC_UsrConnReq.pu8ssid != NULL) {
-		WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
+		kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
 		pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = NULL;
 	}
 
 	if (pstrWFIDrv->strWILC_UsrConnReq.pu8bssid != NULL) {
-		WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
+		kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
 		pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = NULL;
 	}
 
 	pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen = 0;
 	if (pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs != NULL) {
-		WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
+		kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
 		pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = NULL;
 	}
 
-	WILC_memset(u8ConnectedSSID, 0, ETH_ALEN);
+	memset(u8ConnectedSSID, 0, ETH_ALEN);
 	/*BugID_5213*/
 	/*Freeing flushed join request params on connect timeout*/
 	if (gu8FlushedJoinReq != NULL && gu8FlushedJoinReqDrvHandler == (u32)drvHandler) {
-		WILC_FREE(gu8FlushedJoinReq);
+		kfree(gu8FlushedJoinReq);
 		gu8FlushedJoinReq = NULL;
 	}
 	if (gu8FlushedInfoElemAsoc != NULL && gu8FlushedJoinReqDrvHandler == (u32)drvHandler) {
-		WILC_FREE(gu8FlushedInfoElemAsoc);
+		kfree(gu8FlushedInfoElemAsoc);
 		gu8FlushedInfoElemAsoc = NULL;
 	}
 
@@ -2232,7 +2212,7 @@
  *  @date
  *  @version	1.0
  */
-static s32 Handle_RcvdNtwrkInfo(void *drvHandler, tstrRcvdNetworkInfo *pstrRcvdNetworkInfo)
+static s32 Handle_RcvdNtwrkInfo(tstrWILC_WFIDrv *drvHandler, tstrRcvdNetworkInfo *pstrRcvdNetworkInfo)
 {
 	u32 i;
 	bool bNewNtwrkFound;
@@ -2264,7 +2244,7 @@
 
 			if ((pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[i].au8bssid != NULL) &&
 			    (pstrNetworkInfo->au8bssid != NULL)) {
-				if (WILC_memcmp(pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[i].au8bssid,
+				if (memcmp(pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[i].au8bssid,
 						pstrNetworkInfo->au8bssid, 6) == 0) {
 					if (pstrNetworkInfo->s8rssi <= pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[i].s8rssi) {
 						/*we have already found this network with better rssi, so keep the old cached one and don't
@@ -2294,7 +2274,7 @@
 
 				if ((pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[pstrWFIDrv->strWILC_UsrScanReq.u32RcvdChCount].au8bssid != NULL)
 				    && (pstrNetworkInfo->au8bssid != NULL)) {
-					WILC_memcpy(pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[pstrWFIDrv->strWILC_UsrScanReq.u32RcvdChCount].au8bssid,
+					memcpy(pstrWFIDrv->strWILC_UsrScanReq.astrFoundNetworkInfo[pstrWFIDrv->strWILC_UsrScanReq.u32RcvdChCount].au8bssid,
 						    pstrNetworkInfo->au8bssid, 6);
 
 					pstrWFIDrv->strWILC_UsrScanReq.u32RcvdChCount++;
@@ -2313,7 +2293,7 @@
 
 				}
 			} else {
-				PRINT_WRN(HOSTINF_DBG, "Discovered networks exceeded max. limit \n");
+				PRINT_WRN(HOSTINF_DBG, "Discovered networks exceeded max. limit\n");
 			}
 		} else {
 			pstrNetworkInfo->bNewNetwork = false;
@@ -2332,7 +2312,7 @@
 done:
 	/* Deallocate pstrRcvdNetworkInfo->pu8Buffer which was prevoisuly allocated by the sending thread */
 	if (pstrRcvdNetworkInfo->pu8Buffer != NULL) {
-		WILC_FREE(pstrRcvdNetworkInfo->pu8Buffer);
+		kfree(pstrRcvdNetworkInfo->pu8Buffer);
 		pstrRcvdNetworkInfo->pu8Buffer = NULL;
 	}
 
@@ -2354,7 +2334,7 @@
  *  @date
  *  @version	1.0
  */
-static s32 Handle_RcvdGnrlAsyncInfo(void *drvHandler, tstrRcvdGnrlAsyncInfo *pstrRcvdGnrlAsyncInfo)
+static s32 Handle_RcvdGnrlAsyncInfo(tstrWILC_WFIDrv *drvHandler, tstrRcvdGnrlAsyncInfo *pstrRcvdGnrlAsyncInfo)
 {
 	/* TODO: mostafa: till now, this function just handles only the received mac status msg, */
 	/*				 which carries only 1 WID which have WID ID = WID_STATUS */
@@ -2371,9 +2351,9 @@
 	tstrDisconnectNotifInfo strDisconnectNotifInfo;
 	s32 s32Err = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *) drvHandler;
-	if (pstrWFIDrv == NULL)	{
+
+	if (pstrWFIDrv == NULL)
 		PRINT_ER("Driver handler is NULL\n");
-	}
 	PRINT_D(GENERIC_DBG, "Current State = %d,Received state = %d\n", pstrWFIDrv->enuHostIFstate,
 		pstrRcvdGnrlAsyncInfo->pu8Buffer[7]);
 
@@ -2417,12 +2397,12 @@
 
 			PRINT_D(HOSTINF_DBG, "Recieved MAC status = %d with Reason = %d , Code = %d\n", u8MacStatus, u8MacStatusReasonCode, u8MacStatusAdditionalInfo);
 
-			WILC_memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
+			memset(&strConnectInfo, 0, sizeof(tstrConnectInfo));
 
 			if (u8MacStatus == MAC_CONNECTED) {
-				WILC_memset(gapu8RcvdAssocResp, 0, MAX_ASSOC_RESP_FRAME_SIZE);
+				memset(gapu8RcvdAssocResp, 0, MAX_ASSOC_RESP_FRAME_SIZE);
 
-				host_int_get_assoc_res_info((WILC_WFIDrvHandle)pstrWFIDrv,
+				host_int_get_assoc_res_info(pstrWFIDrv,
 							    gapu8RcvdAssocResp,
 							    MAX_ASSOC_RESP_FRAME_SIZE,
 							    &u32RcvdAssocRespInfoLen);
@@ -2435,7 +2415,7 @@
 					s32Err = ParseAssocRespInfo(gapu8RcvdAssocResp, u32RcvdAssocRespInfoLen,
 								    &pstrConnectRespInfo);
 					if (s32Err) {
-						PRINT_ER("ParseAssocRespInfo() returned error %d \n", s32Err);
+						PRINT_ER("ParseAssocRespInfo() returned error %d\n", s32Err);
 					} else {
 						/* use the necessary parsed Info from the Received Association Response */
 						strConnectInfo.u16ConnectStatus = pstrConnectRespInfo->u16ConnectStatus;
@@ -2446,8 +2426,8 @@
 								strConnectInfo.u16RespIEsLen = pstrConnectRespInfo->u16RespIEsLen;
 
 
-								strConnectInfo.pu8RespIEs = (u8 *)WILC_MALLOC(pstrConnectRespInfo->u16RespIEsLen);
-								WILC_memcpy(strConnectInfo.pu8RespIEs, pstrConnectRespInfo->pu8RespIEs,
+								strConnectInfo.pu8RespIEs = WILC_MALLOC(pstrConnectRespInfo->u16RespIEsLen);
+								memcpy(strConnectInfo.pu8RespIEs, pstrConnectRespInfo->pu8RespIEs,
 									    pstrConnectRespInfo->u16RespIEsLen);
 							}
 						}
@@ -2466,23 +2446,23 @@
 			 *   So check first the matching between the received mac status and the received status code in Asoc Resp */
 			if ((u8MacStatus == MAC_CONNECTED) &&
 			    (strConnectInfo.u16ConnectStatus != SUCCESSFUL_STATUSCODE))	{
-				PRINT_ER("Received MAC status is MAC_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE \n");
-				WILC_memset(u8ConnectedSSID, 0, ETH_ALEN);
+				PRINT_ER("Received MAC status is MAC_CONNECTED while the received status code in Asoc Resp is not SUCCESSFUL_STATUSCODE\n");
+				memset(u8ConnectedSSID, 0, ETH_ALEN);
 
 			} else if (u8MacStatus == MAC_DISCONNECTED)    {
 				PRINT_ER("Received MAC status is MAC_DISCONNECTED\n");
-				WILC_memset(u8ConnectedSSID, 0, ETH_ALEN);
+				memset(u8ConnectedSSID, 0, ETH_ALEN);
 			}
 
 			/* TODO: mostafa: correct BSSID should be retrieved from actual BSSID received from AP */
 			/*               through a structure of type tstrConnectRespInfo */
 			if (pstrWFIDrv->strWILC_UsrConnReq.pu8bssid != NULL) {
 				PRINT_D(HOSTINF_DBG, "Retrieving actual BSSID from AP\n");
-				WILC_memcpy(strConnectInfo.au8bssid, pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, 6);
+				memcpy(strConnectInfo.au8bssid, pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, 6);
 
 				if ((u8MacStatus == MAC_CONNECTED) &&
 				    (strConnectInfo.u16ConnectStatus == SUCCESSFUL_STATUSCODE))	{
-					WILC_memcpy(pstrWFIDrv->au8AssociatedBSSID,
+					memcpy(pstrWFIDrv->au8AssociatedBSSID,
 						    pstrWFIDrv->strWILC_UsrConnReq.pu8bssid, ETH_ALEN);
 				}
 			}
@@ -2490,14 +2470,14 @@
 
 			if (pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs != NULL) {
 				strConnectInfo.ReqIEsLen = pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen;
-				strConnectInfo.pu8ReqIEs = (u8 *)WILC_MALLOC(pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen);
-				WILC_memcpy(strConnectInfo.pu8ReqIEs,
+				strConnectInfo.pu8ReqIEs = WILC_MALLOC(pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen);
+				memcpy(strConnectInfo.pu8ReqIEs,
 					    pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs,
 					    pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen);
 			}
 
 
-			WILC_TimerStop(&(pstrWFIDrv->hConnectTimer), NULL);
+			del_timer(&pstrWFIDrv->hConnectTimer);
 			pstrWFIDrv->strWILC_UsrConnReq.pfUserConnectResult(CONN_DISCONN_EVENT_CONN_RESP,
 									   &strConnectInfo,
 									   u8MacStatus,
@@ -2512,7 +2492,7 @@
 			    (strConnectInfo.u16ConnectStatus == SUCCESSFUL_STATUSCODE))	{
 				#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
 
-				host_int_set_power_mgmt((WILC_WFIDrvHandle)pstrWFIDrv, 0, 0);
+				host_int_set_power_mgmt(pstrWFIDrv, 0, 0);
 				#endif
 
 				PRINT_D(HOSTINF_DBG, "MAC status : CONNECTED and Connect Status : Successful\n");
@@ -2521,7 +2501,8 @@
 				#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
 				PRINT_D(GENERIC_DBG, "Obtaining an IP, Disable Scan\n");
 				g_obtainingIP = true;
-				WILC_TimerStart(&hDuringIpTimer, 10000, NULL, NULL);
+				mod_timer(&hDuringIpTimer,
+					  jiffies + msecs_to_jiffies(10000));
 				#endif
 
 				#ifdef WILC_PARSE_SCAN_IN_HOST
@@ -2540,30 +2521,30 @@
 
 			/* Deallocation */
 			if (strConnectInfo.pu8RespIEs != NULL) {
-				WILC_FREE(strConnectInfo.pu8RespIEs);
+				kfree(strConnectInfo.pu8RespIEs);
 				strConnectInfo.pu8RespIEs = NULL;
 			}
 
 			if (strConnectInfo.pu8ReqIEs != NULL) {
-				WILC_FREE(strConnectInfo.pu8ReqIEs);
+				kfree(strConnectInfo.pu8ReqIEs);
 				strConnectInfo.pu8ReqIEs = NULL;
 			}
 
 
 			pstrWFIDrv->strWILC_UsrConnReq.ssidLen = 0;
 			if (pstrWFIDrv->strWILC_UsrConnReq.pu8ssid != NULL) {
-				WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
+				kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
 				pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = NULL;
 			}
 
 			if (pstrWFIDrv->strWILC_UsrConnReq.pu8bssid != NULL) {
-				WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
+				kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
 				pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = NULL;
 			}
 
 			pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen = 0;
 			if (pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs != NULL) {
-				WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
+				kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
 				pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = NULL;
 			}
 
@@ -2572,11 +2553,11 @@
 			/* Disassociation or Deauthentication frame has been received */
 			PRINT_D(HOSTINF_DBG, "Received MAC_DISCONNECTED from the FW\n");
 
-			WILC_memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo));
+			memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo));
 
 			if (pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult) {
-				PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running OBSS Scan >> \n\n");
-				WILC_TimerStop(&(pstrWFIDrv->hScanTimer), NULL);
+				PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running OBSS Scan >>\n\n");
+				del_timer(&pstrWFIDrv->hScanTimer);
 				Handle_ScanDone((void *)pstrWFIDrv, SCAN_EVENT_ABORTED);
 			}
 
@@ -2588,7 +2569,7 @@
 				#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
 
 				g_obtainingIP = false;
-				host_int_set_power_mgmt((WILC_WFIDrvHandle)pstrWFIDrv, 0, 0);
+				host_int_set_power_mgmt(pstrWFIDrv, 0, 0);
 				#endif
 
 				pstrWFIDrv->strWILC_UsrConnReq.pfUserConnectResult(CONN_DISCONN_EVENT_DISCONN_NOTIF,
@@ -2598,10 +2579,10 @@
 										   pstrWFIDrv->strWILC_UsrConnReq.u32UserConnectPvoid);
 
 			} else {
-				PRINT_ER("Connect result callback function is NULL \n");
+				PRINT_ER("Connect result callback function is NULL\n");
 			}
 
-			WILC_memset(pstrWFIDrv->au8AssociatedBSSID, 0, ETH_ALEN);
+			memset(pstrWFIDrv->au8AssociatedBSSID, 0, ETH_ALEN);
 
 
 			/* Deallocation */
@@ -2611,25 +2592,25 @@
 			/*
 			 * if(strDisconnectNotifInfo.ie != NULL)
 			 * {
-			 *      WILC_FREE(strDisconnectNotifInfo.ie);
+			 *      kfree(strDisconnectNotifInfo.ie);
 			 *      strDisconnectNotifInfo.ie = NULL;
 			 * }
 			 */
 
 			pstrWFIDrv->strWILC_UsrConnReq.ssidLen = 0;
 			if (pstrWFIDrv->strWILC_UsrConnReq.pu8ssid != NULL) {
-				WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
+				kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
 				pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = NULL;
 			}
 
 			if (pstrWFIDrv->strWILC_UsrConnReq.pu8bssid != NULL) {
-				WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
+				kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
 				pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = NULL;
 			}
 
 			pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen = 0;
 			if (pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs != NULL) {
-				WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
+				kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
 				pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = NULL;
 			}
 
@@ -2637,11 +2618,11 @@
 			/*Freeing flushed join request params on receiving*/
 			/*MAC_DISCONNECTED while connected*/
 			if (gu8FlushedJoinReq != NULL && gu8FlushedJoinReqDrvHandler == (u32)drvHandler) {
-				WILC_FREE(gu8FlushedJoinReq);
+				kfree(gu8FlushedJoinReq);
 				gu8FlushedJoinReq = NULL;
 			}
 			if (gu8FlushedInfoElemAsoc != NULL && gu8FlushedJoinReqDrvHandler == (u32)drvHandler) {
-				WILC_FREE(gu8FlushedInfoElemAsoc);
+				kfree(gu8FlushedInfoElemAsoc);
 				gu8FlushedInfoElemAsoc = NULL;
 			}
 
@@ -2651,13 +2632,12 @@
 		} else if ((u8MacStatus == MAC_DISCONNECTED) &&
 			   (pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult != NULL)) {
 			PRINT_D(HOSTINF_DBG, "Received MAC_DISCONNECTED from the FW while scanning\n");
-			PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running Scan >> \n\n");
+			PRINT_D(HOSTINF_DBG, "\n\n<< Abort the running Scan >>\n\n");
 			/*Abort the running scan*/
-			WILC_TimerStop(&(pstrWFIDrv->hScanTimer), NULL);
-			if (pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult) {
-				Handle_ScanDone((void *)pstrWFIDrv, SCAN_EVENT_ABORTED);
+			del_timer(&pstrWFIDrv->hScanTimer);
+			if (pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult)
+				Handle_ScanDone(pstrWFIDrv, SCAN_EVENT_ABORTED);
 
-			}
 		}
 
 	}
@@ -2669,7 +2649,7 @@
 
 	/* Deallocate pstrRcvdGnrlAsyncInfo->pu8Buffer which was prevoisuly allocated by the sending thread */
 	if (pstrRcvdGnrlAsyncInfo->pu8Buffer != NULL) {
-		WILC_FREE(pstrRcvdGnrlAsyncInfo->pu8Buffer);
+		kfree(pstrRcvdGnrlAsyncInfo->pu8Buffer);
 		pstrRcvdGnrlAsyncInfo->pu8Buffer = NULL;
 	}
 
@@ -2685,7 +2665,7 @@
  *  @date
  *  @version	1.0
  */
-static int Handle_Key(void *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
+static int Handle_Key(tstrWILC_WFIDrv *drvHandler, tstrHostIFkeyAttr *pstrHostIFkeyAttr)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
@@ -2726,7 +2706,7 @@
 			strWIDList[2].s32ValueSize = sizeof(char);
 
 
-			pu8keybuf = (u8 *)WILC_MALLOC(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen);
+			pu8keybuf = WILC_MALLOC(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen);
 
 
 			if (pu8keybuf == NULL) {
@@ -2734,11 +2714,11 @@
 				return -1;
 			}
 
-			WILC_memcpy(pu8keybuf, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
+			memcpy(pu8keybuf, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
 				    pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen);
 
 
-			WILC_FREE(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey);
+			kfree(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey);
 
 			strWIDList[3].u16WIDid = (u16)WID_WEP_KEY_VALUE;
 			strWIDList[3].enuWIDtype = WID_STR;
@@ -2747,7 +2727,7 @@
 
 
 			s32Error = SendConfigPkt(SET_CFG, strWIDList, 4, true, (u32)pstrWFIDrv);
-			WILC_FREE(pu8keybuf);
+			kfree(pu8keybuf);
 
 
 		}
@@ -2755,19 +2735,19 @@
 
 		if (pstrHostIFkeyAttr->u8KeyAction & ADDKEY) {
 			PRINT_D(HOSTINF_DBG, "Handling WEP key\n");
-			pu8keybuf = (u8 *)WILC_MALLOC(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen + 2);
+			pu8keybuf = WILC_MALLOC(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen + 2);
 			if (pu8keybuf == NULL) {
 				PRINT_ER("No buffer to send Key\n");
 				return -1;
 			}
 			pu8keybuf[0] = pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8Wepidx;
 
-			WILC_memcpy(pu8keybuf + 1, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen, 1);
+			memcpy(pu8keybuf + 1, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen, 1);
 
-			WILC_memcpy(pu8keybuf + 2, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
+			memcpy(pu8keybuf + 2, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
 				    pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen);
 
-			WILC_FREE(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey);
+			kfree(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey);
 
 			strWID.u16WIDid	= (u16)WID_ADD_WEP_KEY;
 			strWID.enuWIDtype	= WID_STR;
@@ -2775,7 +2755,7 @@
 			strWID.s32ValueSize = pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwepAttr.u8WepKeylen + 2;
 
 			s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
-			WILC_FREE(pu8keybuf);
+			kfree(pu8keybuf);
 		} else if (pstrHostIFkeyAttr->u8KeyAction & REMOVEKEY)	  {
 
 			PRINT_D(HOSTINF_DBG, "Removing key\n");
@@ -2803,14 +2783,14 @@
 	case WPARxGtk:
 			#ifdef WILC_AP_EXTERNAL_MLME
 		if (pstrHostIFkeyAttr->u8KeyAction & ADDKEY_AP)	{
-			pu8keybuf = (u8 *)WILC_MALLOC(RX_MIC_KEY_MSG_LEN);
+			pu8keybuf = WILC_MALLOC(RX_MIC_KEY_MSG_LEN);
 			if (pu8keybuf == NULL) {
 				PRINT_ER("No buffer to send RxGTK Key\n");
 				ret = -1;
 				goto _WPARxGtk_end_case_;
 			}
 
-			WILC_memset(pu8keybuf, 0, RX_MIC_KEY_MSG_LEN);
+			memset(pu8keybuf, 0, RX_MIC_KEY_MSG_LEN);
 
 
 			/*|----------------------------------------------------------------------------|
@@ -2821,14 +2801,14 @@
 
 
 			if (pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq != NULL)
-				WILC_memcpy(pu8keybuf + 6, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq, 8);
+				memcpy(pu8keybuf + 6, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq, 8);
 
 
-			WILC_memcpy(pu8keybuf + 14, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8keyidx, 1);
+			memcpy(pu8keybuf + 14, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8keyidx, 1);
 
-			WILC_memcpy(pu8keybuf + 15, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
+			memcpy(pu8keybuf + 15, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
 
-			WILC_memcpy(pu8keybuf + 16, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
+			memcpy(pu8keybuf + 16, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
 				    pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen);
 			/* pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Ciphermode =  0X51; */
 			strWIDList[0].u16WIDid = (u16)WID_11I_MODE;
@@ -2843,7 +2823,7 @@
 
 			s32Error = SendConfigPkt(SET_CFG, strWIDList, 2, true, (u32)pstrWFIDrv);
 
-			WILC_FREE(pu8keybuf);
+			kfree(pu8keybuf);
 
 			/* ////////////////////////// */
 			up(&(pstrWFIDrv->hSemTestKeyBlock));
@@ -2854,14 +2834,14 @@
 		if (pstrHostIFkeyAttr->u8KeyAction & ADDKEY) {
 			PRINT_D(HOSTINF_DBG, "Handling group key(Rx) function\n");
 
-			pu8keybuf = (u8 *)WILC_MALLOC(RX_MIC_KEY_MSG_LEN);
+			pu8keybuf = WILC_MALLOC(RX_MIC_KEY_MSG_LEN);
 			if (pu8keybuf == NULL) {
 				PRINT_ER("No buffer to send RxGTK Key\n");
 				ret = -1;
 				goto _WPARxGtk_end_case_;
 			}
 
-			WILC_memset(pu8keybuf, 0, RX_MIC_KEY_MSG_LEN);
+			memset(pu8keybuf, 0, RX_MIC_KEY_MSG_LEN);
 
 
 			/*|----------------------------------------------------------------------------|
@@ -2869,18 +2849,17 @@
 			 * |------------|---------|-------|------------|---------------|----------------|
 			 |	6 bytes	 | 8 byte  |1 byte |  1 byte	|   16 bytes	|	  8 bytes	 |*/
 
-			if (pstrWFIDrv->enuHostIFstate == HOST_IF_CONNECTED) {
-				WILC_memcpy(pu8keybuf, pstrWFIDrv->au8AssociatedBSSID, ETH_ALEN);
-			} else {
-				PRINT_ER("Couldn't handle WPARxGtk while enuHostIFstate is not HOST_IF_CONNECTED \n");
-			}
+			if (pstrWFIDrv->enuHostIFstate == HOST_IF_CONNECTED)
+				memcpy(pu8keybuf, pstrWFIDrv->au8AssociatedBSSID, ETH_ALEN);
+			else
+				PRINT_ER("Couldn't handle WPARxGtk while enuHostIFstate is not HOST_IF_CONNECTED\n");
 
-			WILC_memcpy(pu8keybuf + 6, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq, 8);
+			memcpy(pu8keybuf + 6, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq, 8);
 
-			WILC_memcpy(pu8keybuf + 14, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8keyidx, 1);
+			memcpy(pu8keybuf + 14, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8keyidx, 1);
 
-			WILC_memcpy(pu8keybuf + 15, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
-			WILC_memcpy(pu8keybuf + 16, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
+			memcpy(pu8keybuf + 15, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
+			memcpy(pu8keybuf + 16, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
 				    pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen);
 
 			strWID.u16WIDid	= (u16)WID_ADD_RX_GTK;
@@ -2890,15 +2869,15 @@
 
 			s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
 
-			WILC_FREE(pu8keybuf);
+			kfree(pu8keybuf);
 
 			/* ////////////////////////// */
 			up(&(pstrWFIDrv->hSemTestKeyBlock));
 			/* ///////////////////////// */
 		}
 _WPARxGtk_end_case_:
-		WILC_FREE(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key);
-		WILC_FREE(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq);
+		kfree(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key);
+		kfree(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq);
 		if (ret == -1)
 			return ret;
 
@@ -2909,7 +2888,7 @@
 		if (pstrHostIFkeyAttr->u8KeyAction & ADDKEY_AP)	{
 
 
-			pu8keybuf = (u8 *)WILC_MALLOC(PTK_KEY_MSG_LEN + 1);
+			pu8keybuf = WILC_MALLOC(PTK_KEY_MSG_LEN + 1);
 
 
 
@@ -2926,12 +2905,12 @@
 			 |	6 bytes    |	1 byte    |   1byte	 |   16 bytes	 |	  8 bytes	  |	   8 bytes	  |
 			 |-----------------------------------------------------------------------------|*/
 
-			WILC_memcpy(pu8keybuf, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8macaddr, 6);  /*1 bytes Key Length */
+			memcpy(pu8keybuf, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8macaddr, 6);  /*1 bytes Key Length */
 
-			WILC_memcpy(pu8keybuf + 6, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8keyidx, 1);
-			WILC_memcpy(pu8keybuf + 7, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
+			memcpy(pu8keybuf + 6, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8keyidx, 1);
+			memcpy(pu8keybuf + 7, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
 			/*16 byte TK*/
-			WILC_memcpy(pu8keybuf + 8, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
+			memcpy(pu8keybuf + 8, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
 				    pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen);
 
 
@@ -2946,7 +2925,7 @@
 			strWIDList[1].s32ValueSize = PTK_KEY_MSG_LEN + 1;
 
 			s32Error = SendConfigPkt(SET_CFG, strWIDList, 2, true, (u32)pstrWFIDrv);
-			WILC_FREE(pu8keybuf);
+			kfree(pu8keybuf);
 
 			/* ////////////////////////// */
 			up(&(pstrWFIDrv->hSemTestKeyBlock));
@@ -2956,7 +2935,7 @@
 		if (pstrHostIFkeyAttr->u8KeyAction & ADDKEY) {
 
 
-			pu8keybuf = (u8 *)WILC_MALLOC(PTK_KEY_MSG_LEN);
+			pu8keybuf = WILC_MALLOC(PTK_KEY_MSG_LEN);
 
 
 
@@ -2973,11 +2952,11 @@
 			 |	6 bytes		 |	1byte	  |   16 bytes	 |	  8 bytes	  |	   8 bytes	  |
 			 |-----------------------------------------------------------------------------|*/
 
-			WILC_memcpy(pu8keybuf, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8macaddr, 6);  /*1 bytes Key Length */
+			memcpy(pu8keybuf, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8macaddr, 6);  /*1 bytes Key Length */
 
-			WILC_memcpy(pu8keybuf + 6, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
+			memcpy(pu8keybuf + 6, &pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen, 1);
 			/*16 byte TK*/
-			WILC_memcpy(pu8keybuf + 7, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
+			memcpy(pu8keybuf + 7, pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
 				    pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.u8Keylen);
 
 
@@ -2987,7 +2966,7 @@
 			strWID.s32ValueSize = PTK_KEY_MSG_LEN;
 
 			s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
-			WILC_FREE(pu8keybuf);
+			kfree(pu8keybuf);
 
 			/* ////////////////////////// */
 			up(&(pstrWFIDrv->hSemTestKeyBlock));
@@ -2995,7 +2974,7 @@
 		}
 
 _WPAPtk_end_case_:
-		WILC_FREE(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key);
+		kfree(pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFwpaAttr.pu8key);
 		if (ret == -1)
 			return ret;
 
@@ -3006,7 +2985,7 @@
 
 		PRINT_D(HOSTINF_DBG, "Handling PMKSA key\n");
 
-		pu8keybuf = (u8 *)WILC_MALLOC((pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.numpmkid * PMKSA_KEY_LEN) + 1);
+		pu8keybuf = WILC_MALLOC((pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.numpmkid * PMKSA_KEY_LEN) + 1);
 		if (pu8keybuf == NULL) {
 			PRINT_ER("No buffer to send PMKSA Key\n");
 			return -1;
@@ -3016,8 +2995,8 @@
 
 		for (i = 0; i < pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.numpmkid; i++) {
 
-			WILC_memcpy(pu8keybuf + ((PMKSA_KEY_LEN * i) + 1), pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].bssid, ETH_ALEN);
-			WILC_memcpy(pu8keybuf + ((PMKSA_KEY_LEN * i) + ETH_ALEN + 1), pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].pmkid, PMKID_LEN);
+			memcpy(pu8keybuf + ((PMKSA_KEY_LEN * i) + 1), pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].bssid, ETH_ALEN);
+			memcpy(pu8keybuf + ((PMKSA_KEY_LEN * i) + ETH_ALEN + 1), pstrHostIFkeyAttr->uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].pmkid, PMKID_LEN);
 		}
 
 		strWID.u16WIDid	= (u16)WID_PMKID_INFO;
@@ -3027,7 +3006,7 @@
 
 		s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
 
-		WILC_FREE(pu8keybuf);
+		kfree(pu8keybuf);
 		break;
 	}
 
@@ -3048,7 +3027,7 @@
  *  @date
  *  @version	1.0
  */
-static void Handle_Disconnect(void *drvHandler)
+static void Handle_Disconnect(tstrWILC_WFIDrv *drvHandler)
 {
 	tstrWID strWID;
 
@@ -3069,10 +3048,10 @@
 	#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
 
 	g_obtainingIP = false;
-	host_int_set_power_mgmt((WILC_WFIDrvHandle)pstrWFIDrv, 0, 0);
+	host_int_set_power_mgmt(pstrWFIDrv, 0, 0);
 	#endif
 
-	WILC_memset(u8ConnectedSSID, 0, ETH_ALEN);
+	memset(u8ConnectedSSID, 0, ETH_ALEN);
 
 	s32Error = SendConfigPkt(SET_CFG, &strWID, 1, false, (u32)pstrWFIDrv);
 
@@ -3082,14 +3061,14 @@
 	} else {
 		tstrDisconnectNotifInfo strDisconnectNotifInfo;
 
-		WILC_memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo));
+		memset(&strDisconnectNotifInfo, 0, sizeof(tstrDisconnectNotifInfo));
 
 		strDisconnectNotifInfo.u16reason = 0;
 		strDisconnectNotifInfo.ie = NULL;
 		strDisconnectNotifInfo.ie_len = 0;
 
 		if (pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult) {
-			WILC_TimerStop(&(pstrWFIDrv->hScanTimer), NULL);
+			del_timer(&pstrWFIDrv->hScanTimer);
 			pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult(SCAN_EVENT_ABORTED, NULL,
 									pstrWFIDrv->strWILC_UsrScanReq.u32UserScanPvoid, NULL);
 
@@ -3102,48 +3081,48 @@
 			/*Stop connect timer, if connection in progress*/
 			if (pstrWFIDrv->enuHostIFstate == HOST_IF_WAITING_CONN_RESP) {
 				PRINT_D(HOSTINF_DBG, "Upper layer requested termination of connection\n");
-				WILC_TimerStop(&(pstrWFIDrv->hConnectTimer), NULL);
+				del_timer(&pstrWFIDrv->hConnectTimer);
 			}
 
 			pstrWFIDrv->strWILC_UsrConnReq.pfUserConnectResult(CONN_DISCONN_EVENT_DISCONN_NOTIF, NULL,
 									   0, &strDisconnectNotifInfo, pstrWFIDrv->strWILC_UsrConnReq.u32UserConnectPvoid);
 		} else {
-			PRINT_ER("strWILC_UsrConnReq.pfUserConnectResult = NULL \n");
+			PRINT_ER("strWILC_UsrConnReq.pfUserConnectResult = NULL\n");
 		}
 
 		gbScanWhileConnected = false;
 
 		pstrWFIDrv->enuHostIFstate = HOST_IF_IDLE;
 
-		WILC_memset(pstrWFIDrv->au8AssociatedBSSID, 0, ETH_ALEN);
+		memset(pstrWFIDrv->au8AssociatedBSSID, 0, ETH_ALEN);
 
 
 		/* Deallocation */
 		pstrWFIDrv->strWILC_UsrConnReq.ssidLen = 0;
 		if (pstrWFIDrv->strWILC_UsrConnReq.pu8ssid != NULL) {
-			WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
+			kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ssid);
 			pstrWFIDrv->strWILC_UsrConnReq.pu8ssid = NULL;
 		}
 
 		if (pstrWFIDrv->strWILC_UsrConnReq.pu8bssid != NULL) {
-			WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
+			kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8bssid);
 			pstrWFIDrv->strWILC_UsrConnReq.pu8bssid = NULL;
 		}
 
 		pstrWFIDrv->strWILC_UsrConnReq.ConnReqIEsLen = 0;
 		if (pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs != NULL) {
-			WILC_FREE(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
+			kfree(pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs);
 			pstrWFIDrv->strWILC_UsrConnReq.pu8ConnReqIEs = NULL;
 		}
 
 
 		/*BugID_5137*/
 		if (gu8FlushedJoinReq != NULL && gu8FlushedJoinReqDrvHandler == (u32)drvHandler) {
-			WILC_FREE(gu8FlushedJoinReq);
+			kfree(gu8FlushedJoinReq);
 			gu8FlushedJoinReq = NULL;
 		}
 		if (gu8FlushedInfoElemAsoc != NULL && gu8FlushedJoinReqDrvHandler == (u32)drvHandler) {
-			WILC_FREE(gu8FlushedInfoElemAsoc);
+			kfree(gu8FlushedInfoElemAsoc);
 			gu8FlushedInfoElemAsoc = NULL;
 		}
 
@@ -3161,7 +3140,7 @@
 }
 
 
-void resolve_disconnect_aberration(void *drvHandler)
+void resolve_disconnect_aberration(tstrWILC_WFIDrv *drvHandler)
 {
 	tstrWILC_WFIDrv *pstrWFIDrv;
 
@@ -3170,10 +3149,10 @@
 		return;
 	if ((pstrWFIDrv->enuHostIFstate == HOST_IF_WAITING_CONN_RESP) || (pstrWFIDrv->enuHostIFstate == HOST_IF_CONNECTING)) {
 		PRINT_D(HOSTINF_DBG, "\n\n<< correcting Supplicant state machine >>\n\n");
-		host_int_disconnect((WILC_WFIDrvHandle)pstrWFIDrv, 1);
+		host_int_disconnect(pstrWFIDrv, 1);
 	}
 }
-static s32 Switch_Log_Terminal(void *drvHandler)
+static s32 Switch_Log_Terminal(tstrWILC_WFIDrv *drvHandler)
 {
 
 
@@ -3194,7 +3173,7 @@
 		PRINT_D(HOSTINF_DBG, "Failed to switch log terminal\n");
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_STATE);
 	} else {
-		PRINT_INFO(HOSTINF_DBG, "MAC address set :: \n");
+		PRINT_INFO(HOSTINF_DBG, "MAC address set ::\n");
 
 
 	}
@@ -3217,13 +3196,14 @@
  *  @date
  *  @version	1.0
  */
-static s32 Handle_GetChnl(void *drvHandler)
+static s32 Handle_GetChnl(tstrWILC_WFIDrv *drvHandler)
 {
 
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID	strWID;
 	/* tstrWILC_WFIDrv * pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv; */
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
+
 	strWID.u16WIDid = (u16)WID_CURRENT_CHANNEL;
 	strWID.enuWIDtype = WID_CHAR;
 	strWID.ps8WidVal = (s8 *)&gu8Chnl;
@@ -3261,7 +3241,7 @@
  *  @date
  *  @version	1.0
  */
-static void Handle_GetRssi(void *drvHandler)
+static void Handle_GetRssi(tstrWILC_WFIDrv *drvHandler)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
@@ -3291,7 +3271,7 @@
 }
 
 
-static void Handle_GetLinkspeed(void *drvHandler)
+static void Handle_GetLinkspeed(tstrWILC_WFIDrv *drvHandler)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
@@ -3321,7 +3301,7 @@
 
 }
 
-s32 Handle_GetStatistics(void *drvHandler, tstrStatistics *pstrStatistics)
+s32 Handle_GetStatistics(tstrWILC_WFIDrv *drvHandler, tstrStatistics *pstrStatistics)
 {
 	tstrWID strWIDList[5];
 	uint32_t u32WidsCount = 0, s32Error = 0;
@@ -3382,7 +3362,7 @@
  *  @date
  *  @version	1.0
  */
-static s32 Handle_Get_InActiveTime(void *drvHandler, tstrHostIfStaInactiveT *strHostIfStaInactiveT)
+static s32 Handle_Get_InActiveTime(tstrWILC_WFIDrv *drvHandler, tstrHostIfStaInactiveT *strHostIfStaInactiveT)
 {
 
 	s32 s32Error = WILC_SUCCESS;
@@ -3394,11 +3374,11 @@
 	strWID.u16WIDid = (u16)WID_SET_STA_MAC_INACTIVE_TIME;
 	strWID.enuWIDtype = WID_STR;
 	strWID.s32ValueSize = ETH_ALEN;
-	strWID.ps8WidVal = (u8 *)WILC_MALLOC(strWID.s32ValueSize);
+	strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
 
 
 	stamac = strWID.ps8WidVal;
-	WILC_memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN);
+	memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN);
 
 
 	PRINT_D(CFG80211_DBG, "SETING STA inactive time\n");
@@ -3451,21 +3431,21 @@
  *  @date
  *  @version	1.0
  */
-static void Handle_AddBeacon(void *drvHandler, tstrHostIFSetBeacon *pstrSetBeaconParam)
+static void Handle_AddBeacon(tstrWILC_WFIDrv *drvHandler, tstrHostIFSetBeacon *pstrSetBeaconParam)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
 	u8 *pu8CurrByte;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
+
 	PRINT_D(HOSTINF_DBG, "Adding BEACON\n");
 
 	strWID.u16WIDid = (u16)WID_ADD_BEACON;
 	strWID.enuWIDtype = WID_BIN;
 	strWID.s32ValueSize = pstrSetBeaconParam->u32HeadLen + pstrSetBeaconParam->u32TailLen + 16;
 	strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
-	if (strWID.ps8WidVal == NULL) {
+	if (strWID.ps8WidVal == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-	}
 
 	pu8CurrByte = strWID.ps8WidVal;
 	*pu8CurrByte++ = (pstrSetBeaconParam->u32Interval & 0xFF);
@@ -3508,9 +3488,9 @@
 	WILC_CATCH(s32Error)
 	{
 	}
-	WILC_FREE_IF_TRUE(strWID.ps8WidVal);
-	WILC_FREE_IF_TRUE(pstrSetBeaconParam->pu8Head);
-	WILC_FREE_IF_TRUE(pstrSetBeaconParam->pu8Tail);
+	kfree(strWID.ps8WidVal);
+	kfree(pstrSetBeaconParam->pu8Head);
+	kfree(pstrSetBeaconParam->pu8Tail);
 }
 
 
@@ -3523,20 +3503,20 @@
  *  @date
  *  @version	1.0
  */
-static void Handle_DelBeacon(void *drvHandler, tstrHostIFDelBeacon *pstrDelBeacon)
+static void Handle_DelBeacon(tstrWILC_WFIDrv *drvHandler, tstrHostIFDelBeacon *pstrDelBeacon)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
 	u8 *pu8CurrByte;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
+
 	strWID.u16WIDid = (u16)WID_DEL_BEACON;
 	strWID.enuWIDtype = WID_CHAR;
 	strWID.s32ValueSize = sizeof(char);
 	strWID.ps8WidVal = &gu8DelBcn;
 
-	if (strWID.ps8WidVal == NULL) {
+	if (strWID.ps8WidVal == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-	}
 
 	pu8CurrByte = strWID.ps8WidVal;
 
@@ -3573,16 +3553,15 @@
 	pu8CurrByte = pu8Buffer;
 
 	PRINT_D(HOSTINF_DBG, "Packing STA params\n");
-	WILC_memcpy(pu8CurrByte, pstrStationParam->au8BSSID, ETH_ALEN);
+	memcpy(pu8CurrByte, pstrStationParam->au8BSSID, ETH_ALEN);
 	pu8CurrByte +=  ETH_ALEN;
 
 	*pu8CurrByte++ = pstrStationParam->u16AssocID & 0xFF;
 	*pu8CurrByte++ = (pstrStationParam->u16AssocID >> 8) & 0xFF;
 
 	*pu8CurrByte++ = pstrStationParam->u8NumRates;
-	if (pstrStationParam->u8NumRates > 0) {
-		WILC_memcpy(pu8CurrByte, pstrStationParam->pu8Rates, pstrStationParam->u8NumRates);
-	}
+	if (pstrStationParam->u8NumRates > 0)
+		memcpy(pu8CurrByte, pstrStationParam->pu8Rates, pstrStationParam->u8NumRates);
 	pu8CurrByte += pstrStationParam->u8NumRates;
 
 	*pu8CurrByte++ = pstrStationParam->bIsHTSupported;
@@ -3590,7 +3569,7 @@
 	*pu8CurrByte++ = (pstrStationParam->u16HTCapInfo >> 8) & 0xFF;
 
 	*pu8CurrByte++ = pstrStationParam->u8AmpduParams;
-	WILC_memcpy(pu8CurrByte, pstrStationParam->au8SuppMCsSet, WILC_SUPP_MCS_SET_SIZE);
+	memcpy(pu8CurrByte, pstrStationParam->au8SuppMCsSet, WILC_SUPP_MCS_SET_SIZE);
 	pu8CurrByte += WILC_SUPP_MCS_SET_SIZE;
 
 	*pu8CurrByte++ = pstrStationParam->u16HTExtParams & 0xFF;
@@ -3621,21 +3600,21 @@
  *  @date
  *  @version	1.0
  */
-static void Handle_AddStation(void *drvHandler, tstrWILC_AddStaParam *pstrStationParam)
+static void Handle_AddStation(tstrWILC_WFIDrv *drvHandler, tstrWILC_AddStaParam *pstrStationParam)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
 	u8 *pu8CurrByte;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
+
 	PRINT_D(HOSTINF_DBG, "Handling add station\n");
 	strWID.u16WIDid = (u16)WID_ADD_STA;
 	strWID.enuWIDtype = WID_BIN;
 	strWID.s32ValueSize = WILC_ADD_STA_LENGTH + pstrStationParam->u8NumRates;
 
 	strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
-	if (strWID.ps8WidVal == NULL) {
+	if (strWID.ps8WidVal == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-	}
 
 	pu8CurrByte = strWID.ps8WidVal;
 	pu8CurrByte += WILC_HostIf_PackStaParam(pu8CurrByte, pstrStationParam);
@@ -3651,8 +3630,8 @@
 	WILC_CATCH(s32Error)
 	{
 	}
-	WILC_FREE_IF_TRUE(pstrStationParam->pu8Rates);
-	WILC_FREE_IF_TRUE(strWID.ps8WidVal);
+	kfree(pstrStationParam->pu8Rates);
+	kfree(strWID.ps8WidVal);
 }
 
 /**
@@ -3664,24 +3643,25 @@
  *  @date
  *  @version	1.0
  */
-static void Handle_DelAllSta(void *drvHandler, tstrHostIFDelAllSta *pstrDelAllStaParam)
+static void Handle_DelAllSta(tstrWILC_WFIDrv *drvHandler, tstrHostIFDelAllSta *pstrDelAllStaParam)
 {
 	s32 s32Error = WILC_SUCCESS;
+
 	tstrWID strWID;
 	u8 *pu8CurrByte;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
 	u8 i;
 	u8 au8Zero_Buff[6] = {0};
+
 	strWID.u16WIDid = (u16)WID_DEL_ALL_STA;
 	strWID.enuWIDtype = WID_STR;
 	strWID.s32ValueSize = (pstrDelAllStaParam->u8Num_AssocSta * ETH_ALEN) + 1;
 
-	PRINT_D(HOSTINF_DBG, "Handling delete station \n");
+	PRINT_D(HOSTINF_DBG, "Handling delete station\n");
 
 	strWID.ps8WidVal = WILC_MALLOC((pstrDelAllStaParam->u8Num_AssocSta * ETH_ALEN) + 1);
-	if (strWID.ps8WidVal == NULL) {
+	if (strWID.ps8WidVal == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-	}
 
 	pu8CurrByte = strWID.ps8WidVal;
 
@@ -3689,7 +3669,7 @@
 
 	for (i = 0; i < MAX_NUM_STA; i++) {
 		if (memcmp(pstrDelAllStaParam->au8Sta_DelAllSta[i], au8Zero_Buff, ETH_ALEN))
-			WILC_memcpy(pu8CurrByte, pstrDelAllStaParam->au8Sta_DelAllSta[i], ETH_ALEN);
+			memcpy(pu8CurrByte, pstrDelAllStaParam->au8Sta_DelAllSta[i], ETH_ALEN);
 		else
 			continue;
 
@@ -3700,14 +3680,14 @@
 	s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
 	if (s32Error) {
 
-		PRINT_ER("Failed to send add station config packe\n");
+		PRINT_ER("Failed to send add station config packet\n");
 		WILC_ERRORREPORT(s32Error, WILC_FAIL);
 	}
 
 	WILC_CATCH(s32Error)
 	{
 	}
-	WILC_FREE_IF_TRUE(strWID.ps8WidVal);
+	kfree(strWID.ps8WidVal);
 
 	up(&hWaitResponse);
 }
@@ -3722,7 +3702,7 @@
  *  @date
  *  @version	1.0
  */
-static void Handle_DelStation(void *drvHandler, tstrHostIFDelSta *pstrDelStaParam)
+static void Handle_DelStation(tstrWILC_WFIDrv *drvHandler, tstrHostIFDelSta *pstrDelStaParam)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
@@ -3733,29 +3713,28 @@
 	strWID.enuWIDtype = WID_BIN;
 	strWID.s32ValueSize = ETH_ALEN;
 
-	PRINT_D(HOSTINF_DBG, "Handling delete station \n");
+	PRINT_D(HOSTINF_DBG, "Handling delete station\n");
 
 	strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
-	if (strWID.ps8WidVal == NULL) {
+	if (strWID.ps8WidVal == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-	}
 
 	pu8CurrByte = strWID.ps8WidVal;
 
-	WILC_memcpy(pu8CurrByte, pstrDelStaParam->au8MacAddr, ETH_ALEN);
+	memcpy(pu8CurrByte, pstrDelStaParam->au8MacAddr, ETH_ALEN);
 
 	/*Sending Cfg*/
 	s32Error = SendConfigPkt(SET_CFG, &strWID, 1, false, (u32)pstrWFIDrv);
 	if (s32Error) {
 
-		PRINT_ER("Failed to send add station config packe\n");
+		PRINT_ER("Failed to send add station config packet\n");
 		WILC_ERRORREPORT(s32Error, WILC_FAIL);
 	}
 
 	WILC_CATCH(s32Error)
 	{
 	}
-	WILC_FREE_IF_TRUE(strWID.ps8WidVal);
+	kfree(strWID.ps8WidVal);
 }
 
 
@@ -3768,7 +3747,7 @@
  *  @date
  *  @version	1.0
  */
-static void Handle_EditStation(void *drvHandler, tstrWILC_AddStaParam *pstrStationParam)
+static void Handle_EditStation(tstrWILC_WFIDrv *drvHandler, tstrWILC_AddStaParam *pstrStationParam)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
@@ -3781,9 +3760,8 @@
 
 	PRINT_D(HOSTINF_DBG, "Handling edit station\n");
 	strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
-	if (strWID.ps8WidVal == NULL) {
+	if (strWID.ps8WidVal == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-	}
 
 	pu8CurrByte = strWID.ps8WidVal;
 	pu8CurrByte += WILC_HostIf_PackStaParam(pu8CurrByte, pstrStationParam);
@@ -3799,8 +3777,8 @@
 	WILC_CATCH(s32Error)
 	{
 	}
-	WILC_FREE_IF_TRUE(pstrStationParam->pu8Rates);
-	WILC_FREE_IF_TRUE(strWID.ps8WidVal);
+	kfree(pstrStationParam->pu8Rates);
+	kfree(strWID.ps8WidVal);
 }
 #endif /*WILC_AP_EXTERNAL_MLME*/
 
@@ -3814,7 +3792,7 @@
  *  @date
  *  @version	1.0
  */
-static int Handle_RemainOnChan(void *drvHandler, tstrHostIfRemainOnChan *pstrHostIfRemainOnChan)
+static int Handle_RemainOnChan(tstrWILC_WFIDrv *drvHandler, tstrHostIfRemainOnChan *pstrHostIfRemainOnChan)
 {
 	s32 s32Error = WILC_SUCCESS;
 	u8 u8remain_on_chan_flag;
@@ -3856,30 +3834,30 @@
 	strWID.u16WIDid	= (u16)WID_REMAIN_ON_CHAN;
 	strWID.enuWIDtype	= WID_STR;
 	strWID.s32ValueSize = 2;
-	strWID.ps8WidVal = (s8 *)WILC_MALLOC(strWID.s32ValueSize);
+	strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
 
-	if (strWID.ps8WidVal == NULL) {
+	if (strWID.ps8WidVal == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-	}
 
 	strWID.ps8WidVal[0] = u8remain_on_chan_flag;
 	strWID.ps8WidVal[1] = (s8)pstrHostIfRemainOnChan->u16Channel;
 
 	/*Sending Cfg*/
 	s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
-	if (s32Error != WILC_SUCCESS) {
+	if (s32Error != WILC_SUCCESS)
 		PRINT_ER("Failed to set remain on channel\n");
-	}
 
 	WILC_CATCH(-1)
 	{
 		P2P_LISTEN_STATE = 1;
-		WILC_TimerStart(&(pstrWFIDrv->hRemainOnChannel), pstrHostIfRemainOnChan->u32duration, (void *)pstrWFIDrv, NULL);
+		pstrWFIDrv->hRemainOnChannel.data = (unsigned long)pstrWFIDrv;
+		mod_timer(&pstrWFIDrv->hRemainOnChannel,
+			  jiffies +
+			  msecs_to_jiffies(pstrHostIfRemainOnChan->u32duration));
 
 		/*Calling CFG ready_on_channel*/
-		if (pstrWFIDrv->strHostIfRemainOnChan.pRemainOnChanReady) {
+		if (pstrWFIDrv->strHostIfRemainOnChan.pRemainOnChanReady)
 			pstrWFIDrv->strHostIfRemainOnChan.pRemainOnChanReady(pstrWFIDrv->strHostIfRemainOnChan.pVoid);
-		}
 
 		if (pstrWFIDrv->u8RemainOnChan_pendingreq)
 			pstrWFIDrv->u8RemainOnChan_pendingreq = 0;
@@ -3896,7 +3874,7 @@
  *  @date
  *  @version	1.0
  */
-static int Handle_RegisterFrame(void *drvHandler, tstrHostIfRegisterFrame *pstrHostIfRegisterFrame)
+static int Handle_RegisterFrame(tstrWILC_WFIDrv *drvHandler, tstrHostIfRegisterFrame *pstrHostIfRegisterFrame)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
@@ -3909,15 +3887,14 @@
 	strWID.u16WIDid = (u16)WID_REGISTER_FRAME;
 	strWID.enuWIDtype = WID_STR;
 	strWID.ps8WidVal = WILC_MALLOC(sizeof(u16) + 2);
-	if (strWID.ps8WidVal == NULL) {
+	if (strWID.ps8WidVal == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-	}
 
 	pu8CurrByte = strWID.ps8WidVal;
 
 	*pu8CurrByte++ = pstrHostIfRegisterFrame->bReg;
 	*pu8CurrByte++ = pstrHostIfRegisterFrame->u8Regid;
-	WILC_memcpy(pu8CurrByte, &(pstrHostIfRegisterFrame->u16FrameType), sizeof(u16));
+	memcpy(pu8CurrByte, &(pstrHostIfRegisterFrame->u16FrameType), sizeof(u16));
 
 
 	strWID.s32ValueSize = sizeof(u16) + 2;
@@ -3949,7 +3926,7 @@
  *  @version		1.0
  */
 #define FALSE_FRMWR_CHANNEL 100
-static u32 Handle_ListenStateExpired(void *drvHandler, tstrHostIfRemainOnChan *pstrHostIfRemainOnChan)
+static u32 Handle_ListenStateExpired(tstrWILC_WFIDrv *drvHandler, tstrHostIfRemainOnChan *pstrHostIfRemainOnChan)
 {
 	u8 u8remain_on_chan_flag;
 	tstrWID strWID;
@@ -3968,9 +3945,8 @@
 		strWID.s32ValueSize = 2;
 		strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
 
-		if (strWID.ps8WidVal == NULL) {
+		if (strWID.ps8WidVal == NULL)
 			PRINT_ER("Failed to allocate memory\n");
-		}
 
 		strWID.ps8WidVal[0] = u8remain_on_chan_flag;
 		strWID.ps8WidVal[1] = FALSE_FRMWR_CHANNEL;
@@ -4006,25 +3982,24 @@
  *  @date
  *  @version		1.0
  */
-static void ListenTimerCB(void *pvArg)
+static void ListenTimerCB(unsigned long arg)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrHostIFmsg strHostIFmsg;
-	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)pvArg;
+	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)arg;
 	/*Stopping remain-on-channel timer*/
-	WILC_TimerStop(&(pstrWFIDrv->hRemainOnChannel), NULL);
+	del_timer(&pstrWFIDrv->hRemainOnChannel);
 
 	/* prepare the Timer Callback message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_LISTEN_TIMER_FIRED;
 	strHostIFmsg.drvHandler = pstrWFIDrv;
 	strHostIFmsg.uniHostIFmsgBody.strHostIfRemainOnChan.u32ListenSessionID = pstrWFIDrv->strHostIfRemainOnChan.u32ListenSessionID;
 
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 
@@ -4042,19 +4017,19 @@
  *  @date
  *  @version	1.0
  */
-static void Handle_PowerManagement(void *drvHandler, tstrHostIfPowerMgmtParam *strPowerMgmtParam)
+static void Handle_PowerManagement(tstrWILC_WFIDrv *drvHandler, tstrHostIfPowerMgmtParam *strPowerMgmtParam)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
 	s8 s8PowerMode;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
+
 	strWID.u16WIDid = (u16)WID_POWER_MANAGEMENT;
 
-	if (strPowerMgmtParam->bIsEnabled == true)	{
+	if (strPowerMgmtParam->bIsEnabled == true)
 		s8PowerMode = MIN_FAST_PS;
-	} else {
+	else
 		s8PowerMode = NO_POWERSAVE;
-	}
 	PRINT_D(HOSTINF_DBG, "Handling power mgmt to %d\n", s8PowerMode);
 	strWID.ps8WidVal = &s8PowerMode;
 	strWID.s32ValueSize = sizeof(char);
@@ -4083,7 +4058,7 @@
  *  @date
  *  @version	1.0
  */
-static void Handle_SetMulticastFilter(void *drvHandler, tstrHostIFSetMulti *strHostIfSetMulti)
+static void Handle_SetMulticastFilter(tstrWILC_WFIDrv *drvHandler, tstrHostIFSetMulti *strHostIfSetMulti)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
@@ -4095,9 +4070,8 @@
 	strWID.enuWIDtype = WID_BIN;
 	strWID.s32ValueSize = sizeof(tstrHostIFSetMulti) + ((strHostIfSetMulti->u32count) * ETH_ALEN);
 	strWID.ps8WidVal = WILC_MALLOC(strWID.s32ValueSize);
-	if (strWID.ps8WidVal == NULL) {
+	if (strWID.ps8WidVal == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-	}
 
 	pu8CurrByte = strWID.ps8WidVal;
 	*pu8CurrByte++ = (strHostIfSetMulti->bIsEnabled & 0xFF);
@@ -4123,7 +4097,7 @@
 	WILC_CATCH(s32Error)
 	{
 	}
-	WILC_FREE_IF_TRUE(strWID.ps8WidVal);
+	kfree(strWID.ps8WidVal);
 
 }
 
@@ -4138,7 +4112,7 @@
  *  @date			Feb. 2014
  *  @version		9.0
  */
-static s32 Handle_AddBASession(void *drvHandler, tstrHostIfBASessionInfo *strHostIfBASessionInfo)
+static s32 Handle_AddBASession(tstrWILC_WFIDrv *drvHandler, tstrHostIfBASessionInfo *strHostIfBASessionInfo)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
@@ -4146,7 +4120,7 @@
 	char *ptr = NULL;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
 
-	PRINT_D(HOSTINF_DBG, "Opening Block Ack session with\nBSSID = %.2x:%.2x:%.2x \nTID=%d \nBufferSize == %d \nSessionTimeOut = %d\n",
+	PRINT_D(HOSTINF_DBG, "Opening Block Ack session with\nBSSID = %.2x:%.2x:%.2x\nTID=%d\nBufferSize == %d\nSessionTimeOut = %d\n",
 		strHostIfBASessionInfo->au8Bssid[0],
 		strHostIfBASessionInfo->au8Bssid[1],
 		strHostIfBASessionInfo->au8Bssid[2],
@@ -4156,14 +4130,14 @@
 
 	strWID.u16WIDid = (u16)WID_11E_P_ACTION_REQ;
 	strWID.enuWIDtype = WID_STR;
-	strWID.ps8WidVal = (u8 *)WILC_MALLOC(BLOCK_ACK_REQ_SIZE);
+	strWID.ps8WidVal = WILC_MALLOC(BLOCK_ACK_REQ_SIZE);
 	strWID.s32ValueSize = BLOCK_ACK_REQ_SIZE;
 	ptr = strWID.ps8WidVal;
 	/* *ptr++ = 0x14; */
 	*ptr++ = 0x14;
 	*ptr++ = 0x3;
 	*ptr++ = 0x0;
-	WILC_memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
+	memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
 	ptr += ETH_ALEN;
 	*ptr++ = strHostIfBASessionInfo->u8Ted;
 	/* BA Policy*/
@@ -4195,7 +4169,7 @@
 	*ptr++ = 15;
 	*ptr++ = 7;
 	*ptr++ = 0x2;
-	WILC_memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
+	memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
 	ptr += ETH_ALEN;
 	/* TID*/
 	*ptr++ = strHostIfBASessionInfo->u8Ted;
@@ -4209,7 +4183,7 @@
 	s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
 
 	if (strWID.ps8WidVal != NULL)
-		WILC_FREE(strWID.ps8WidVal);
+		kfree(strWID.ps8WidVal);
 
 	return s32Error;
 
@@ -4226,14 +4200,14 @@
  *  @date			Feb. 2013
  *  @version		9.0
  */
-static s32 Handle_DelBASession(void *drvHandler, tstrHostIfBASessionInfo *strHostIfBASessionInfo)
+static s32 Handle_DelBASession(tstrWILC_WFIDrv *drvHandler, tstrHostIfBASessionInfo *strHostIfBASessionInfo)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
 	char *ptr = NULL;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
 
-	PRINT_D(GENERIC_DBG, "Delete Block Ack session with\nBSSID = %.2x:%.2x:%.2x \nTID=%d\n",
+	PRINT_D(GENERIC_DBG, "Delete Block Ack session with\nBSSID = %.2x:%.2x:%.2x\nTID=%d\n",
 		strHostIfBASessionInfo->au8Bssid[0],
 		strHostIfBASessionInfo->au8Bssid[1],
 		strHostIfBASessionInfo->au8Bssid[2],
@@ -4241,14 +4215,14 @@
 
 	strWID.u16WIDid = (u16)WID_11E_P_ACTION_REQ;
 	strWID.enuWIDtype = WID_STR;
-	strWID.ps8WidVal = (u8 *)WILC_MALLOC(BLOCK_ACK_REQ_SIZE);
+	strWID.ps8WidVal = WILC_MALLOC(BLOCK_ACK_REQ_SIZE);
 	strWID.s32ValueSize = BLOCK_ACK_REQ_SIZE;
 	ptr = strWID.ps8WidVal;
 	/* *ptr++ = 0x14; */
 	*ptr++ = 0x14;
 	*ptr++ = 0x3;
 	*ptr++ = 0x2;
-	WILC_memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
+	memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
 	ptr += ETH_ALEN;
 	*ptr++ = strHostIfBASessionInfo->u8Ted;
 	/* BA direction = recipent*/
@@ -4269,7 +4243,7 @@
 	*ptr++ = 15;
 	*ptr++ = 7;
 	*ptr++ = 0x3;
-	WILC_memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
+	memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
 	ptr += ETH_ALEN;
 	/* TID*/
 	*ptr++ = strHostIfBASessionInfo->u8Ted;
@@ -4277,7 +4251,7 @@
 	s32Error = SendConfigPkt(SET_CFG, &strWID, 1, true, (u32)pstrWFIDrv);
 
 	if (strWID.ps8WidVal != NULL)
-		WILC_FREE(strWID.ps8WidVal);
+		kfree(strWID.ps8WidVal);
 
 	/*BugID_5222*/
 	up(&hWaitResponse);
@@ -4296,14 +4270,14 @@
  *  @date			Feb. 2013
  *  @version		9.0
  */
-static s32 Handle_DelAllRxBASessions(void *drvHandler, tstrHostIfBASessionInfo *strHostIfBASessionInfo)
+static s32 Handle_DelAllRxBASessions(tstrWILC_WFIDrv *drvHandler, tstrHostIfBASessionInfo *strHostIfBASessionInfo)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
 	char *ptr = NULL;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
 
-	PRINT_D(GENERIC_DBG, "Delete Block Ack session with\nBSSID = %.2x:%.2x:%.2x \nTID=%d\n",
+	PRINT_D(GENERIC_DBG, "Delete Block Ack session with\nBSSID = %.2x:%.2x:%.2x\nTID=%d\n",
 		strHostIfBASessionInfo->au8Bssid[0],
 		strHostIfBASessionInfo->au8Bssid[1],
 		strHostIfBASessionInfo->au8Bssid[2],
@@ -4311,13 +4285,13 @@
 
 	strWID.u16WIDid = (u16)WID_DEL_ALL_RX_BA;
 	strWID.enuWIDtype = WID_STR;
-	strWID.ps8WidVal = (u8 *)WILC_MALLOC(BLOCK_ACK_REQ_SIZE);
+	strWID.ps8WidVal = WILC_MALLOC(BLOCK_ACK_REQ_SIZE);
 	strWID.s32ValueSize = BLOCK_ACK_REQ_SIZE;
 	ptr = strWID.ps8WidVal;
 	*ptr++ = 0x14;
 	*ptr++ = 0x3;
 	*ptr++ = 0x2;
-	WILC_memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
+	memcpy(ptr, strHostIfBASessionInfo->au8Bssid, ETH_ALEN);
 	ptr += ETH_ALEN;
 	*ptr++ = strHostIfBASessionInfo->u8Ted;
 	/* BA direction = recipent*/
@@ -4331,7 +4305,7 @@
 
 
 	if (strWID.ps8WidVal != NULL)
-		WILC_FREE(strWID.ps8WidVal);
+		kfree(strWID.ps8WidVal);
 
 	/*BugID_5222*/
 	up(&hWaitResponse);
@@ -4355,10 +4329,10 @@
 	tstrHostIFmsg strHostIFmsg;
 	tstrWILC_WFIDrv *pstrWFIDrv;
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	while (1) {
-		WILC_MsgQueueRecv(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), &u32Ret, NULL);
+		WILC_MsgQueueRecv(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), &u32Ret);
 		pstrWFIDrv = (tstrWILC_WFIDrv *)strHostIFmsg.drvHandler;
 		if (strHostIFmsg.u16MsgId == HOST_IF_MSG_EXIT) {
 			PRINT_D(GENERIC_DBG, "THREAD: Exiting HostIfThread\n");
@@ -4369,15 +4343,15 @@
 		/*Re-Queue HIF message*/
 		if ((!g_wilc_initialized)) {
 			PRINT_D(GENERIC_DBG, "--WAIT--");
-			WILC_Sleep(200);
-			WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+			usleep_range(200 * 1000, 200 * 1000);
+			WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 			continue;
 		}
 
 		if (strHostIFmsg.u16MsgId == HOST_IF_MSG_CONNECT && pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult != NULL) {
 			PRINT_D(HOSTINF_DBG, "Requeue connect request till scan done received\n");
-			WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-			WILC_Sleep(2);
+			WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+			usleep_range(2 * 1000, 2 * 1000);
 			continue;
 		}
 
@@ -4425,14 +4399,13 @@
 			break;
 
 		case HOST_IF_MSG_RCVD_SCAN_COMPLETE:
-			WILC_TimerStop(&(pstrWFIDrv->hScanTimer), NULL);
+			del_timer(&pstrWFIDrv->hScanTimer);
 			PRINT_D(HOSTINF_DBG, "scan completed successfully\n");
 
 			/*BugID_5213*/
 			/*Allow chip sleep, only if both interfaces are not connected*/
-			if (!linux_wlan_get_num_conn_ifcs()) {
+			if (!linux_wlan_get_num_conn_ifcs())
 				chip_sleep_manually(INFINITE_SLEEP_TIME);
-			}
 
 			Handle_ScanDone(strHostIFmsg.drvHandler, SCAN_EVENT_DONE);
 
@@ -4492,7 +4465,7 @@
 			break;
 
 		case HOST_IF_MSG_CONNECT_TIMER_FIRED:
-			PRINT_D(HOSTINF_DBG, "Connect Timeout \n");
+			PRINT_D(HOSTINF_DBG, "Connect Timeout\n");
 			Handle_ConnectTimeout(strHostIFmsg.drvHandler);
 			break;
 
@@ -4563,7 +4536,7 @@
 			break;
 
 		default:
-			PRINT_ER("[Host Interface] undefined Received Msg ID  \n");
+			PRINT_ER("[Host Interface] undefined Received Msg ID\n");
 			break;
 		}
 	}
@@ -4573,30 +4546,32 @@
 	return 0;
 }
 
-static void TimerCB_Scan(void *pvArg)
+static void TimerCB_Scan(unsigned long arg)
 {
+	void *pvArg = (void *)arg;
 	tstrHostIFmsg strHostIFmsg;
 
 	/* prepare the Timer Callback message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 	strHostIFmsg.drvHandler = pvArg;
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_SCAN_TIMER_FIRED;
 
 	/* send the message */
-	WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 }
 
-static void TimerCB_Connect(void *pvArg)
+static void TimerCB_Connect(unsigned long arg)
 {
+	void *pvArg = (void *)arg;
 	tstrHostIFmsg strHostIFmsg;
 
 	/* prepare the Timer Callback message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 	strHostIFmsg.drvHandler = pvArg;
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_CONNECT_TIMER_FIRED;
 
 	/* send the message */
-	WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 }
 
 
@@ -4613,7 +4588,7 @@
  *  @version		1.0
  */
 /* Check implementation in core adding 9 bytes to the input! */
-s32 host_int_remove_key(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8StaAddress)
+s32 host_int_remove_key(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8StaAddress)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
@@ -4642,19 +4617,18 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_remove_wep_key(WILC_WFIDrvHandle hWFIDrv, u8 u8keyIdx)
+s32 host_int_remove_wep_key(tstrWILC_WFIDrv *hWFIDrv, u8 u8keyIdx)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
 	/* prepare the Remove Wep Key Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_KEY;
@@ -4668,9 +4642,9 @@
 	uniHostIFkeyAttr.strHostIFwepAttr.u8Wepidx = u8keyIdx;
 
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error)
-		PRINT_ER("Error in sending message queue : Request to remove WEP key \n");
+		PRINT_ER("Error in sending message queue : Request to remove WEP key\n");
 	down(&(pstrWFIDrv->hSemTestKeyBlock));
 
 	WILC_CATCH(s32Error)
@@ -4692,19 +4666,18 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_set_WEPDefaultKeyID(WILC_WFIDrvHandle hWFIDrv, u8 u8Index)
+s32 host_int_set_WEPDefaultKeyID(tstrWILC_WFIDrv *hWFIDrv, u8 u8Index)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
 	/* prepare the Key Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_KEY;
@@ -4717,7 +4690,7 @@
 	uniHostIFkeyAttr.strHostIFwepAttr.u8Wepidx = u8Index;
 
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error)
 		PRINT_ER("Error in sending message queue : Default key index\n");
 	down(&(pstrWFIDrv->hSemTestKeyBlock));
@@ -4749,20 +4722,19 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_add_wep_key_bss_sta(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx)
+s32 host_int_add_wep_key_bss_sta(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx)
 {
 
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
 
-	}
 
 	/* prepare the Key Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_KEY;
@@ -4772,9 +4744,9 @@
 
 
 	strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.
-	uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey = (u8 *)WILC_MALLOC(u8WepKeylen);
+	uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey = WILC_MALLOC(u8WepKeylen);
 
-	WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
+	memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
 		    pu8WepKey, u8WepKeylen);
 
 
@@ -4785,7 +4757,7 @@
 	uniHostIFkeyAttr.strHostIFwepAttr.u8Wepidx = u8Keyidx;
 
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error)
 		PRINT_ER("Error in sending message queue :WEP Key\n");
 	down(&(pstrWFIDrv->hSemTestKeyBlock));
@@ -4815,7 +4787,7 @@
  *  @date		28 FEB 2013
  *  @version		1.0
  */
-s32 host_int_add_wep_key_bss_ap(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx, u8 u8mode, AUTHTYPE_T tenuAuth_type)
+s32 host_int_add_wep_key_bss_ap(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx, u8 u8mode, AUTHTYPE_T tenuAuth_type)
 {
 
 	s32 s32Error = WILC_SUCCESS;
@@ -4823,13 +4795,12 @@
 	tstrHostIFmsg strHostIFmsg;
 	u8 i;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
 
-	}
 
 	/* prepare the Key Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	if (INFO) {
 		for (i = 0; i < u8WepKeylen; i++)
@@ -4842,10 +4813,10 @@
 
 
 	strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.
-	uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey = (u8 *)WILC_MALLOC((u8WepKeylen));
+	uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey = WILC_MALLOC((u8WepKeylen));
 
 
-	WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
+	memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwepAttr.pu8WepKey,
 		    pu8WepKey, (u8WepKeylen));
 
 
@@ -4861,7 +4832,7 @@
 	strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.
 	uniHostIFkeyAttr.strHostIFwepAttr.tenuAuth_type = tenuAuth_type;
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 
 	if (s32Error)
 		PRINT_ER("Error in sending message queue :WEP Key\n");
@@ -4891,7 +4862,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_add_ptk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen,
+s32 host_int_add_ptk(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen,
 			     const u8 *mac_addr, const u8 *pu8RxMic, const u8 *pu8TxMic, u8 mode, u8 u8Ciphermode, u8 u8Idx)
 {
 	s32 s32Error = WILC_SUCCESS;
@@ -4899,18 +4870,16 @@
 	tstrHostIFmsg strHostIFmsg;
 	u8 u8KeyLen = u8PtkKeylen;
 	u32 i;
-	if (pstrWFIDrv == NULL) {
+
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
-	if (pu8RxMic != NULL) {
+	if (pu8RxMic != NULL)
 		u8KeyLen += RX_MIC_KEY_LEN;
-	}
-	if (pu8TxMic != NULL) {
+	if (pu8TxMic != NULL)
 		u8KeyLen += TX_MIC_KEY_LEN;
-	}
 
 	/* prepare the Key Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_KEY;
@@ -4927,15 +4896,15 @@
 
 
 	strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.
-	uniHostIFkeyAttr.strHostIFwpaAttr.pu8key = (u8 *)WILC_MALLOC(u8PtkKeylen);
+	uniHostIFkeyAttr.strHostIFwpaAttr.pu8key = WILC_MALLOC(u8PtkKeylen);
 
 
-	WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
+	memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
 		    pu8Ptk, u8PtkKeylen);
 
 	if (pu8RxMic != NULL) {
 
-		WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 16,
+		memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 16,
 			    pu8RxMic, RX_MIC_KEY_LEN);
 		if (INFO) {
 			for (i = 0; i < RX_MIC_KEY_LEN; i++)
@@ -4944,7 +4913,7 @@
 	}
 	if (pu8TxMic != NULL) {
 
-		WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 24,
+		memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 24,
 			    pu8TxMic, TX_MIC_KEY_LEN);
 		if (INFO) {
 			for (i = 0; i < TX_MIC_KEY_LEN; i++)
@@ -4962,14 +4931,13 @@
 	strHostIFmsg.drvHandler = hWFIDrv;
 
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 
 	if (s32Error)
 		PRINT_ER("Error in sending message queue:  PTK Key\n");
 
 	/* ////////////// */
 	down(&(pstrWFIDrv->hSemTestKeyBlock));
-	/* WILC_Sleep(100); */
 	/* /////// */
 
 	WILC_CATCH(s32Error)
@@ -4993,7 +4961,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_add_rx_gtk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8RxGtk, u8 u8GtkKeylen,
+s32 host_int_add_rx_gtk(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8RxGtk, u8 u8GtkKeylen,
 				u8 u8KeyIdx, u32 u32KeyRSClen, const u8 *KeyRSC,
 				const u8 *pu8RxMic, const u8 *pu8TxMic, u8 mode, u8 u8Ciphermode)
 {
@@ -5002,24 +4970,21 @@
 	tstrHostIFmsg strHostIFmsg;
 	u8 u8KeyLen = u8GtkKeylen;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 	/* prepare the Key Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 
-	if (pu8RxMic != NULL) {
+	if (pu8RxMic != NULL)
 		u8KeyLen += RX_MIC_KEY_LEN;
-	}
-	if (pu8TxMic != NULL) {
+	if (pu8TxMic != NULL)
 		u8KeyLen += TX_MIC_KEY_LEN;
-	}
 	if (KeyRSC != NULL) {
 		strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.
-		uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq = (u8 *)WILC_MALLOC(u32KeyRSClen);
+		uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq = WILC_MALLOC(u32KeyRSClen);
 
-		WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq,
+		memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8seq,
 			    KeyRSC, u32KeyRSClen);
 	}
 
@@ -5039,20 +5004,20 @@
 
 
 	strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.
-	uniHostIFkeyAttr.strHostIFwpaAttr.pu8key = (u8 *)WILC_MALLOC(u8KeyLen);
+	uniHostIFkeyAttr.strHostIFwpaAttr.pu8key = WILC_MALLOC(u8KeyLen);
 
-	WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
+	memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key,
 		    pu8RxGtk, u8GtkKeylen);
 
 	if (pu8RxMic != NULL) {
 
-		WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 16,
+		memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 16,
 			    pu8RxMic, RX_MIC_KEY_LEN);
 
 	}
 	if (pu8TxMic != NULL) {
 
-		WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 24,
+		memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFwpaAttr.pu8key + 24,
 			    pu8TxMic, TX_MIC_KEY_LEN);
 
 	}
@@ -5068,12 +5033,11 @@
 
 
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error)
 		PRINT_ER("Error in sending message queue:  RX GTK\n");
 	/* ////////////// */
 	down(&(pstrWFIDrv->hSemTestKeyBlock));
-	/* WILC_Sleep(100); */
 	/* /////// */
 
 	WILC_CATCH(s32Error)
@@ -5103,7 +5067,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_set_pmkid_info(WILC_WFIDrvHandle hWFIDrv, tstrHostIFpmkidAttr *pu8PmkidInfoArray)
+s32 host_int_set_pmkid_info(tstrWILC_WFIDrv *hWFIDrv, tstrHostIFpmkidAttr *pu8PmkidInfoArray)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
@@ -5111,12 +5075,11 @@
 	u32 i;
 
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
 	/* prepare the Key Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_KEY;
 	strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.enuKeyType = PMKSA;
@@ -5125,15 +5088,15 @@
 
 	for (i = 0; i < pu8PmkidInfoArray->numpmkid; i++) {
 
-		WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].bssid, &pu8PmkidInfoArray->pmkidlist[i].bssid,
+		memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].bssid, &pu8PmkidInfoArray->pmkidlist[i].bssid,
 			    ETH_ALEN);
 
-		WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].pmkid, &pu8PmkidInfoArray->pmkidlist[i].pmkid,
+		memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFkeyAttr.uniHostIFkeyAttr.strHostIFpmkidAttr.pmkidlist[i].pmkid, &pu8PmkidInfoArray->pmkidlist[i].pmkid,
 			    PMKID_LEN);
 	}
 
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error)
 		PRINT_ER(" Error in sending messagequeue: PMKID Info\n");
 
@@ -5166,7 +5129,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_get_pmkid_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8PmkidInfoArray,
+s32 host_int_get_pmkid_info(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8PmkidInfoArray,
 				    u32 u32PmkidInfoLen)
 {
 	s32 s32Error = WILC_SUCCESS;
@@ -5195,14 +5158,12 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_set_RSNAConfigPSKPassPhrase(WILC_WFIDrvHandle hWFIDrv, u8 *pu8PassPhrase,
+s32 host_int_set_RSNAConfigPSKPassPhrase(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8PassPhrase,
 						 u8 u8Psklength)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
-	/* tstrWILC_WFIDrv * pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv; */
 
-	/* u8 u8Psklength = WILC_strlen(pu8PassPhrase); */
 	/*validating psk length*/
 	if ((u8Psklength > 7) && (u8Psklength < 65)) {
 		strWID.u16WIDid	= (u16)WID_11I_PSK;
@@ -5224,20 +5185,20 @@
  *  @date		19 April 2012
  *  @version		1.0
  */
-s32 host_int_get_MacAddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8MacAddress)
+s32 host_int_get_MacAddress(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8MacAddress)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrHostIFmsg strHostIFmsg;
 
 
 	/* prepare the Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_MAC_ADDRESS;
 	strHostIFmsg.uniHostIFmsgBody.strHostIfGetMacAddress.u8MacAddress = pu8MacAddress;
 	strHostIFmsg.drvHandler = hWFIDrv;
 	/* send the message */
-	s32Error =	WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error) {
 		PRINT_ER("Failed to send get mac address\n");
 		return WILC_FAIL;
@@ -5258,7 +5219,7 @@
  *  @date		16 July 2012
  *  @version		1.0
  */
-s32 host_int_set_MacAddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8MacAddress)
+s32 host_int_set_MacAddress(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8MacAddress)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrHostIFmsg strHostIFmsg;
@@ -5266,12 +5227,12 @@
 	PRINT_D(GENERIC_DBG, "mac addr = %x:%x:%x\n", pu8MacAddress[0], pu8MacAddress[1], pu8MacAddress[2]);
 
 	/* prepare setting mac address message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_SET_MAC_ADDRESS;
-	WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIfSetMacAddress.u8MacAddress, pu8MacAddress, ETH_ALEN);
+	memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIfSetMacAddress.u8MacAddress, pu8MacAddress, ETH_ALEN);
 	strHostIFmsg.drvHandler = hWFIDrv;
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error) {
 		PRINT_ER("Failed to send message queue: Set mac address\n");
 		WILC_ERRORREPORT(s32Error, s32Error);
@@ -5299,7 +5260,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_get_RSNAConfigPSKPassPhrase(WILC_WFIDrvHandle hWFIDrv,
+s32 host_int_get_RSNAConfigPSKPassPhrase(tstrWILC_WFIDrv *hWFIDrv,
 						 u8 *pu8PassPhrase, u8 u8Psklength)
 {
 	s32 s32Error = WILC_SUCCESS;
@@ -5346,7 +5307,7 @@
  *  @version		1.0
  */
 #ifndef CONNECT_DIRECT
-s32 host_int_get_site_survey_results(WILC_WFIDrvHandle hWFIDrv,
+s32 host_int_get_site_survey_results(tstrWILC_WFIDrv *hWFIDrv,
 					     u8 ppu8RcvdSiteSurveyResults[][MAX_SURVEY_RESULT_FRAG_SIZE],
 					     u32 u32MaxSiteSrvyFragLen)
 {
@@ -5396,7 +5357,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_set_start_scan_req(WILC_WFIDrvHandle hWFIDrv, u8 scanSource)
+s32 host_int_set_start_scan_req(tstrWILC_WFIDrv *hWFIDrv, u8 scanSource)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
@@ -5426,7 +5387,7 @@
  *  @version		1.0
  */
 
-s32 host_int_get_start_scan_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8ScanSource)
+s32 host_int_get_start_scan_req(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8ScanSource)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
@@ -5451,7 +5412,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_set_join_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8bssid,
+s32 host_int_set_join_req(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8bssid,
 				  const u8 *pu8ssid, size_t ssidLen,
 				  const u8 *pu8IEs, size_t IEsLen,
 				  tWILCpfConnectResult pfConnectResult, void *pvUserArg,
@@ -5464,9 +5425,8 @@
 	tstrHostIFmsg strHostIFmsg;
 	tenuScanConnTimer enuScanConnTimer;
 
-	if (pstrWFIDrv == NULL || pfConnectResult == NULL) {
+	if (pstrWFIDrv == NULL || pfConnectResult == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
 	if (hWFIDrv == NULL) {
 		PRINT_ER("Driver not initialized: gWFiDrvHandle = NULL\n");
@@ -5486,7 +5446,7 @@
  *      }
  */
 	/* prepare the Connect Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_CONNECT;
 
@@ -5499,39 +5459,41 @@
 	strHostIFmsg.drvHandler = hWFIDrv;
 
 	if (pu8bssid != NULL) {
-		strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8bssid = (u8 *)WILC_MALLOC(6); /* will be deallocated by the receiving thread */
-		WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8bssid,
+		strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8bssid = WILC_MALLOC(6); /* will be deallocated by the receiving thread */
+		memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8bssid,
 			    pu8bssid, 6);
 	}
 
 	if (pu8ssid != NULL) {
 		strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.ssidLen = ssidLen;
-		strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8ssid = (u8 *)WILC_MALLOC(ssidLen); /* will be deallocated by the receiving thread */
-		WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8ssid,
+		strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8ssid = WILC_MALLOC(ssidLen); /* will be deallocated by the receiving thread */
+		memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8ssid,
 
 			    pu8ssid, ssidLen);
 	}
 
 	if (pu8IEs != NULL) {
 		strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.IEsLen = IEsLen;
-		strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8IEs = (u8 *)WILC_MALLOC(IEsLen); /* will be deallocated by the receiving thread */
-		WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8IEs,
+		strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8IEs = WILC_MALLOC(IEsLen); /* will be deallocated by the receiving thread */
+		memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFconnectAttr.pu8IEs,
 			    pu8IEs, IEsLen);
 	}
-	if (pstrWFIDrv->enuHostIFstate < HOST_IF_CONNECTING) {
+	if (pstrWFIDrv->enuHostIFstate < HOST_IF_CONNECTING)
 		pstrWFIDrv->enuHostIFstate = HOST_IF_CONNECTING;
-	} else
+	else
 		PRINT_D(GENERIC_DBG, "Don't set state to 'connecting' as state is %d\n", pstrWFIDrv->enuHostIFstate);
 
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error) {
 		PRINT_ER("Failed to send message queue: Set join request\n");
 		WILC_ERRORREPORT(s32Error, WILC_FAIL);
 	}
 
 	enuScanConnTimer = CONNECT_TIMER;
-	WILC_TimerStart(&(pstrWFIDrv->hConnectTimer), HOST_IF_CONNECT_TIMEOUT, (void *) hWFIDrv, NULL);
+	pstrWFIDrv->hConnectTimer.data = (unsigned long)hWFIDrv;
+	mod_timer(&pstrWFIDrv->hConnectTimer,
+		  jiffies + msecs_to_jiffies(HOST_IF_CONNECT_TIMEOUT));
 
 	WILC_CATCH(s32Error)
 	{
@@ -5553,7 +5515,7 @@
  *  @version	8.0
  */
 
-s32 host_int_flush_join_req(WILC_WFIDrvHandle hWFIDrv)
+s32 host_int_flush_join_req(tstrWILC_WFIDrv *hWFIDrv)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrHostIFmsg strHostIFmsg;
@@ -5564,16 +5526,15 @@
 	}
 
 
-	if (hWFIDrv  == NULL) {
+	if (hWFIDrv  == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_FLUSH_CONNECT;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error) {
 		PRINT_ER("Failed to send message queue: Flush join request\n");
 		WILC_ERRORREPORT(s32Error, WILC_FAIL);
@@ -5597,14 +5558,14 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_disconnect(WILC_WFIDrvHandle hWFIDrv, u16 u16ReasonCode)
+s32 host_int_disconnect(tstrWILC_WFIDrv *hWFIDrv, u16 u16ReasonCode)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrHostIFmsg strHostIFmsg;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 
 	if (pstrWFIDrv == NULL) {
-		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
 	}
 
@@ -5614,13 +5575,13 @@
 	}
 
 	/* prepare the Disconnect Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_DISCONNECT;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error)
 		PRINT_ER("Failed to send message queue: disconnect\n");
 	/* ////////////// */
@@ -5646,7 +5607,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_disconnect_station(WILC_WFIDrvHandle hWFIDrv, u8 assoc_id)
+s32 host_int_disconnect_station(tstrWILC_WFIDrv *hWFIDrv, u8 assoc_id)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID strWID;
@@ -5686,7 +5647,7 @@
  *  @version		1.0
  */
 
-s32 host_int_get_assoc_req_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8AssocReqInfo,
+s32 host_int_get_assoc_req_info(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8AssocReqInfo,
 					u32 u32AssocReqInfoLen)
 {
 	s32 s32Error = WILC_SUCCESS;
@@ -5713,7 +5674,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_get_assoc_res_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8AssocRespInfo,
+s32 host_int_get_assoc_res_info(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8AssocRespInfo,
 					u32 u32MaxAssocRespInfoLen, u32 *pu32RcvdAssocRespInfoLen)
 {
 	s32 s32Error = WILC_SUCCESS;
@@ -5721,7 +5682,7 @@
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 
 	if (pstrWFIDrv == NULL) {
-		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
 	}
 
@@ -5763,7 +5724,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_get_rx_power_level(WILC_WFIDrvHandle hWFIDrv, u8 *pu8RxPowerLevel,
+s32 host_int_get_rx_power_level(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8RxPowerLevel,
 					u32 u32RxPowerLevelLen)
 {
 	s32 s32Error = WILC_SUCCESS;
@@ -5794,26 +5755,24 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_set_mac_chnl_num(WILC_WFIDrvHandle hWFIDrv, u8 u8ChNum)
+s32 host_int_set_mac_chnl_num(tstrWILC_WFIDrv *hWFIDrv, u8 u8ChNum)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
 	/* prepare the set channel message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_SET_CHANNEL;
 	strHostIFmsg.uniHostIFmsgBody.strHostIFSetChan.u8SetChan = u8ChNum;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 
@@ -5831,12 +5790,11 @@
 
 	/* prepare the set driver handler message */
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_Q_IDLE;
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 
@@ -5849,7 +5807,7 @@
 
 }
 
-s32 host_int_set_wfi_drv_handler(u32 u32address)
+s32 host_int_set_wfi_drv_handler(tstrWILC_WFIDrv *u32address)
 {
 	s32 s32Error = WILC_SUCCESS;
 
@@ -5858,15 +5816,14 @@
 
 	/* prepare the set driver handler message */
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_SET_WFIDRV_HANDLER;
 	strHostIFmsg.uniHostIFmsgBody.strHostIfSetDrvHandler.u32Address = u32address;
 	/* strHostIFmsg.drvHandler=hWFIDrv; */
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 
@@ -5877,7 +5834,7 @@
 
 
 
-s32 host_int_set_operation_mode(WILC_WFIDrvHandle hWFIDrv, u32 u32mode)
+s32 host_int_set_operation_mode(tstrWILC_WFIDrv *hWFIDrv, u32 u32mode)
 {
 	s32 s32Error = WILC_SUCCESS;
 
@@ -5886,15 +5843,14 @@
 
 	/* prepare the set driver handler message */
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_SET_OPERATION_MODE;
 	strHostIFmsg.uniHostIFmsgBody.strHostIfSetOperationMode.u32Mode = u32mode;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 
@@ -5918,25 +5874,25 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_get_host_chnl_num(WILC_WFIDrvHandle hWFIDrv, u8 *pu8ChNo)
+s32 host_int_get_host_chnl_num(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8ChNo)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 
 	if (pstrWFIDrv == NULL) {
-		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
 	}
 
 	/* prepare the Get Channel Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_CHNL;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
 	/* send the message */
-	s32Error =	WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error =	WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error)
 		PRINT_ER("Failed to send get host channel param's message queue ");
 	down(&(pstrWFIDrv->hSemGetCHNL));
@@ -5964,7 +5920,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_test_set_int_wid(WILC_WFIDrvHandle hWFIDrv, u32 u32TestMemAddr)
+s32 host_int_test_set_int_wid(tstrWILC_WFIDrv *hWFIDrv, u32 u32TestMemAddr)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWID	strWID;
@@ -5972,7 +5928,7 @@
 
 
 	if (pstrWFIDrv == NULL) {
-		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
 	}
 
@@ -6011,28 +5967,28 @@
  *  @date
  *  @version		1.0
  */
-s32 host_int_get_inactive_time(WILC_WFIDrvHandle hWFIDrv, const u8 *mac, u32 *pu32InactiveTime)
+s32 host_int_get_inactive_time(tstrWILC_WFIDrv *hWFIDrv, const u8 *mac, u32 *pu32InactiveTime)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 
 	if (pstrWFIDrv == NULL) {
-		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
 	}
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 
-	WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIfStaInactiveT.mac,
+	memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIfStaInactiveT.mac,
 		    mac, ETH_ALEN);
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_INACTIVETIME;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
 	/* send the message */
-	s32Error =	WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error)
 		PRINT_ER("Failed to send get host channel param's message queue ");
 
@@ -6057,7 +6013,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_test_get_int_wid(WILC_WFIDrvHandle hWFIDrv, u32 *pu32TestMemAddr)
+s32 host_int_test_get_int_wid(tstrWILC_WFIDrv *hWFIDrv, u32 *pu32TestMemAddr)
 {
 
 	s32 s32Error = WILC_SUCCESS;
@@ -6066,7 +6022,7 @@
 
 
 	if (pstrWFIDrv == NULL) {
-		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
 	}
 
@@ -6106,7 +6062,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_get_rssi(WILC_WFIDrvHandle hWFIDrv, s8 *ps8Rssi)
+s32 host_int_get_rssi(tstrWILC_WFIDrv *hWFIDrv, s8 *ps8Rssi)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrHostIFmsg strHostIFmsg;
@@ -6114,13 +6070,13 @@
 
 
 	/* prepare the Get RSSI Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_RSSI;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
 	/* send the message */
-	s32Error =	WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error) {
 		PRINT_ER("Failed to send get host channel param's message queue ");
 		return WILC_FAIL;
@@ -6141,7 +6097,7 @@
 	return s32Error;
 }
 
-s32 host_int_get_link_speed(WILC_WFIDrvHandle hWFIDrv, s8 *ps8lnkspd)
+s32 host_int_get_link_speed(tstrWILC_WFIDrv *hWFIDrv, s8 *ps8lnkspd)
 {
 	tstrHostIFmsg strHostIFmsg;
 	s32 s32Error = WILC_SUCCESS;
@@ -6151,13 +6107,13 @@
 
 
 	/* prepare the Get LINKSPEED Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_LINKSPEED;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
 	/* send the message */
-	s32Error =	WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error) {
 		PRINT_ER("Failed to send GET_LINKSPEED to message queue ");
 		return WILC_FAIL;
@@ -6178,20 +6134,20 @@
 	return s32Error;
 }
 
-s32 host_int_get_statistics(WILC_WFIDrvHandle hWFIDrv, tstrStatistics *pstrStatistics)
+s32 host_int_get_statistics(tstrWILC_WFIDrv *hWFIDrv, tstrStatistics *pstrStatistics)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrHostIFmsg strHostIFmsg;
 
 
 	/* prepare the Get RSSI Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_STATISTICS;
 	strHostIFmsg.uniHostIFmsgBody.pUserData = (char *)pstrStatistics;
 	strHostIFmsg.drvHandler = hWFIDrv;
 	/* send the message */
-	s32Error =	WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error) {
 		PRINT_ER("Failed to send get host channel param's message queue ");
 		return WILC_FAIL;
@@ -6218,7 +6174,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_scan(WILC_WFIDrvHandle hWFIDrv, u8 u8ScanSource,
+s32 host_int_scan(tstrWILC_WFIDrv *hWFIDrv, u8 u8ScanSource,
 			  u8 u8ScanType, u8 *pu8ChnlFreqList,
 			  u8 u8ChnlListLen, const u8 *pu8IEs,
 			  size_t IEsLen, tWILCpfScanResult ScanResult,
@@ -6229,13 +6185,12 @@
 	tstrHostIFmsg strHostIFmsg;
 	tenuScanConnTimer enuScanConnTimer;
 
-	if (pstrWFIDrv == NULL || ScanResult == NULL)	{
+	if (pstrWFIDrv == NULL || ScanResult == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
 
 	/* prepare the Scan Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_SCAN;
 
@@ -6253,17 +6208,17 @@
 	strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pvUserArg = pvUserArg;
 
 	strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.u8ChnlListLen = u8ChnlListLen;
-	strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8ChnlFreqList = (u8 *)WILC_MALLOC(u8ChnlListLen);        /* will be deallocated by the receiving thread */
-	WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8ChnlFreqList,
+	strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8ChnlFreqList = WILC_MALLOC(u8ChnlListLen);        /* will be deallocated by the receiving thread */
+	memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8ChnlFreqList,
 		    pu8ChnlFreqList, u8ChnlListLen);
 
 	strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.IEsLen = IEsLen;
-	strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8IEs = (u8 *)WILC_MALLOC(IEsLen);        /* will be deallocated by the receiving thread */
-	WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8IEs,
+	strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8IEs = WILC_MALLOC(IEsLen);        /* will be deallocated by the receiving thread */
+	memcpy(strHostIFmsg.uniHostIFmsgBody.strHostIFscanAttr.pu8IEs,
 		    pu8IEs, IEsLen);
 
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	if (s32Error) {
 		PRINT_ER("Error in sending message queue scanning parameters: Error(%d)\n", s32Error);
 		WILC_ERRORREPORT(s32Error, WILC_FAIL);
@@ -6271,8 +6226,9 @@
 
 	enuScanConnTimer = SCAN_TIMER;
 	PRINT_D(HOSTINF_DBG, ">> Starting the SCAN timer\n");
-	WILC_TimerStart(&(pstrWFIDrv->hScanTimer), HOST_IF_SCAN_TIMEOUT, (void *) hWFIDrv, NULL);
-
+	pstrWFIDrv->hScanTimer.data = (unsigned long)hWFIDrv;
+	mod_timer(&pstrWFIDrv->hScanTimer,
+		  jiffies + msecs_to_jiffies(HOST_IF_SCAN_TIMEOUT));
 
 	WILC_CATCH(s32Error)
 	{
@@ -6292,7 +6248,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 hif_set_cfg(WILC_WFIDrvHandle hWFIDrv, tstrCfgParamVal *pstrCfgParamVal)
+s32 hif_set_cfg(tstrWILC_WFIDrv *hWFIDrv, tstrCfgParamVal *pstrCfgParamVal)
 {
 
 	s32 s32Error = WILC_SUCCESS;
@@ -6301,16 +6257,15 @@
 	tstrHostIFmsg strHostIFmsg;
 
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 	/* prepare the WiphyParams Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_CFG_PARAMS;
 	strHostIFmsg.uniHostIFmsgBody.strHostIFCfgParamAttr.pstrCfgParamVal = *pstrCfgParamVal;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 
 	WILC_CATCH(s32Error)
 	{
@@ -6334,7 +6289,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 hif_get_cfg(WILC_WFIDrvHandle hWFIDrv, u16 u16WID, u16 *pu16WID_Value)
+s32 hif_get_cfg(tstrWILC_WFIDrv *hWFIDrv, u16 u16WID, u16 *pu16WID_Value)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
@@ -6342,7 +6297,7 @@
 	down(&(pstrWFIDrv->gtOsCfgValuesSem));
 
 	if (pstrWFIDrv == NULL) {
-		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL \n");
+		PRINT_ER("Driver not initialized: pstrWFIDrv = NULL\n");
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
 	}
 	PRINT_D(HOSTINF_DBG, "Getting configuration parameters\n");
@@ -6469,9 +6424,10 @@
  *  @version		1.0
  */
 
-void GetPeriodicRSSI(void *pvArg)
+static void GetPeriodicRSSI(unsigned long arg)
 {
-	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)pvArg;
+	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)arg;
+
 	if (pstrWFIDrv == NULL)	{
 		PRINT_ER("Driver handler is NULL\n");
 		return;
@@ -6482,19 +6438,20 @@
 		tstrHostIFmsg strHostIFmsg;
 
 		/* prepare the Get RSSI Message */
-		WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+		memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 		strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_RSSI;
 		strHostIFmsg.drvHandler = pstrWFIDrv;
 
 		/* send the message */
-		s32Error =	WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+		s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 		if (s32Error) {
 			PRINT_ER("Failed to send get host channel param's message queue ");
 			return;
 		}
 	}
-	WILC_TimerStart(&(g_hPeriodicRSSI), 5000, (void *)pstrWFIDrv, NULL);
+	g_hPeriodicRSSI.data = (unsigned long)pstrWFIDrv;
+	mod_timer(&g_hPeriodicRSSI, jiffies + msecs_to_jiffies(5000));
 }
 
 
@@ -6515,7 +6472,7 @@
 static u32 msgQ_created;
 static u32 clients_count;
 
-s32 host_int_init(WILC_WFIDrvHandle *phWFIDrv)
+s32 host_int_init(tstrWILC_WFIDrv **phWFIDrv)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv;
@@ -6535,16 +6492,16 @@
 
 
 	/*Allocate host interface private structure*/
-	pstrWFIDrv  = (tstrWILC_WFIDrv *)WILC_MALLOC(sizeof(tstrWILC_WFIDrv));
+	pstrWFIDrv  = WILC_MALLOC(sizeof(tstrWILC_WFIDrv));
 	if (pstrWFIDrv == NULL) {
 		/* WILC_ERRORREPORT(s32Error,WILC_NO_MEM); */
 		s32Error = WILC_NO_MEM;
 		PRINT_ER("Failed to allocate memory\n");
 		goto _fail_timer_2;
 	}
-	WILC_memset(pstrWFIDrv, 0, sizeof(tstrWILC_WFIDrv));
+	memset(pstrWFIDrv, 0, sizeof(tstrWILC_WFIDrv));
 	/*return driver handle to user*/
-	*phWFIDrv = (WILC_WFIDrvHandle)pstrWFIDrv;
+	*phWFIDrv = pstrWFIDrv;
 	/*save into globl handle*/
 
 	#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
@@ -6575,9 +6532,7 @@
 	PRINT_D(HOSTINF_DBG, "INIT: CLIENT COUNT %d\n", clients_count);
 
 	if (clients_count == 0)	{
-
-		s32Error = WILC_MsgQueueCreate(&gMsgQHostIF, NULL);
-
+		s32Error = WILC_MsgQueueCreate(&gMsgQHostIF);
 
 		if (s32Error < 0) {
 			PRINT_ER("Failed to creat MQ\n");
@@ -6590,47 +6545,24 @@
 			s32Error = WILC_FAIL;
 			goto _fail_mq_;
 		}
-		s32Error = WILC_TimerCreate(&(g_hPeriodicRSSI), GetPeriodicRSSI, NULL);
-		if (s32Error < 0) {
-			PRINT_ER("Failed to creat Timer\n");
-			goto _fail_timer_1;
-		}
-		WILC_TimerStart(&(g_hPeriodicRSSI), 5000, (void *)pstrWFIDrv, NULL);
-
+		setup_timer(&g_hPeriodicRSSI, GetPeriodicRSSI,
+			    (unsigned long)pstrWFIDrv);
+		mod_timer(&g_hPeriodicRSSI, jiffies + msecs_to_jiffies(5000));
 	}
 
 
-	s32Error = WILC_TimerCreate(&(pstrWFIDrv->hScanTimer), TimerCB_Scan, NULL);
-	if (s32Error < 0) {
-		PRINT_ER("Failed to creat Timer\n");
-		goto _fail_thread_;
-	}
+	setup_timer(&pstrWFIDrv->hScanTimer, TimerCB_Scan, 0);
 
-	s32Error = WILC_TimerCreate(&(pstrWFIDrv->hConnectTimer), TimerCB_Connect, NULL);
-	if (s32Error < 0) {
-		PRINT_ER("Failed to creat Timer\n");
-		goto _fail_timer_1;
-	}
-
+	setup_timer(&pstrWFIDrv->hConnectTimer, TimerCB_Connect, 0);
 
 	#ifdef WILC_P2P
 	/*Remain on channel timer*/
-	s32Error = WILC_TimerCreate(&(pstrWFIDrv->hRemainOnChannel), ListenTimerCB, NULL);
-	if (s32Error < 0) {
-		PRINT_ER("Failed to creat Remain-on-channel Timer\n");
-		goto _fail_timer_3;
-	}
+	setup_timer(&pstrWFIDrv->hRemainOnChannel, ListenTimerCB, 0);
 	#endif
 
 	sema_init(&(pstrWFIDrv->gtOsCfgValuesSem), 1);
 	down(&(pstrWFIDrv->gtOsCfgValuesSem));
 
-
-
-#ifdef SIMULATION
-	TransportInit();
-#endif
-
 	pstrWFIDrv->enuHostIFstate = HOST_IF_IDLE;
 	/* gWFiDrvHandle->bPendingConnRequest = false; */
 
@@ -6666,11 +6598,6 @@
 		goto _fail_mem_;
 	}
 
-#ifdef SIMULATION
-	/*Initialize Simulaor*/
-	CoreConfigSimulatorInit();
-#endif
-
 	u32Intialized = 1;
 	clients_count++; /* increase number of created entities */
 
@@ -6679,20 +6606,17 @@
 
 _fail_mem_:
 	if (pstrWFIDrv != NULL)
-		WILC_FREE(pstrWFIDrv);
+		kfree(pstrWFIDrv);
 #ifdef WILC_P2P
-_fail_timer_3:
-	WILC_TimerDestroy(&(pstrWFIDrv->hRemainOnChannel), NULL);
+	del_timer_sync(&pstrWFIDrv->hRemainOnChannel);
 #endif
 _fail_timer_2:
 	up(&(pstrWFIDrv->gtOsCfgValuesSem));
-	WILC_TimerDestroy(&(pstrWFIDrv->hConnectTimer), NULL);
-_fail_timer_1:
-	WILC_TimerDestroy(&(pstrWFIDrv->hScanTimer), NULL);
-_fail_thread_:
+	del_timer_sync(&pstrWFIDrv->hConnectTimer);
+	del_timer_sync(&pstrWFIDrv->hScanTimer);
 	kthread_stop(HostIFthreadHandler);
 _fail_mq_:
-	WILC_MsgQueueDestroy(&gMsgQHostIF, NULL);
+	WILC_MsgQueueDestroy(&gMsgQHostIF);
 _fail_:
 	return s32Error;
 
@@ -6708,7 +6632,7 @@
  *  @version		1.0
  */
 
-s32 host_int_deinit(WILC_WFIDrvHandle hWFIDrv)
+s32 host_int_deinit(tstrWILC_WFIDrv *hWFIDrv)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrHostIFmsg strHostIFmsg;
@@ -6737,28 +6661,28 @@
 	/*BugID_5348*/
 	/*Destroy all timers before acquiring hSemDeinitDrvHandle*/
 	/*to guarantee handling all messages befor proceeding*/
-	if (WILC_TimerDestroy(&(pstrWFIDrv->hScanTimer), NULL)) {
-		PRINT_D(HOSTINF_DBG, ">> Scan timer is active \n");
+	if (del_timer_sync(&pstrWFIDrv->hScanTimer)) {
+		PRINT_D(HOSTINF_DBG, ">> Scan timer is active\n");
 		/* msleep(HOST_IF_SCAN_TIMEOUT+1000); */
 	}
 
-	if (WILC_TimerDestroy(&(pstrWFIDrv->hConnectTimer), NULL)) {
-		PRINT_D(HOSTINF_DBG, ">> Connect timer is active \n");
+	if (del_timer_sync(&pstrWFIDrv->hConnectTimer)) {
+		PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n");
 		/* msleep(HOST_IF_CONNECT_TIMEOUT+1000); */
 	}
 
 
-	if (WILC_TimerDestroy(&(g_hPeriodicRSSI), NULL)) {
-		PRINT_D(HOSTINF_DBG, ">> Connect timer is active \n");
+	if (del_timer_sync(&g_hPeriodicRSSI)) {
+		PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n");
 		/* msleep(HOST_IF_CONNECT_TIMEOUT+1000); */
 	}
 
 	#ifdef WILC_P2P
 	/*Destroy Remain-onchannel Timer*/
-	WILC_TimerDestroy(&(pstrWFIDrv->hRemainOnChannel), NULL);
+	del_timer_sync(&pstrWFIDrv->hRemainOnChannel);
 	#endif
 
-	host_int_set_wfi_drv_handler((u32)NULL);
+	host_int_set_wfi_drv_handler(NULL);
 	down(&hSemDeinitDrvHandle);
 
 
@@ -6770,39 +6694,30 @@
 		pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult = NULL;
 	}
 	/*deinit configurator and simulator*/
-#ifdef SIMULATION
-	CoreConfigSimulatorDeInit();
-#endif
 	CoreConfiguratorDeInit();
-#ifdef SIMULATION
-	TransportDeInit();
-#endif
 
 	pstrWFIDrv->enuHostIFstate = HOST_IF_IDLE;
 
 	gbScanWhileConnected = false;
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	if (clients_count == 1)	{
-		if (WILC_TimerDestroy(&g_hPeriodicRSSI, NULL)) {
-			PRINT_D(HOSTINF_DBG, ">> Connect timer is active \n");
+		if (del_timer_sync(&g_hPeriodicRSSI)) {
+			PRINT_D(HOSTINF_DBG, ">> Connect timer is active\n");
 			/* msleep(HOST_IF_CONNECT_TIMEOUT+1000); */
 		}
 		strHostIFmsg.u16MsgId = HOST_IF_MSG_EXIT;
 		strHostIFmsg.drvHandler = hWFIDrv;
 
 
-		s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-		if (s32Error != WILC_SUCCESS) {
+		s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+		if (s32Error != WILC_SUCCESS)
 			PRINT_ER("Error in sending deinit's message queue message function: Error(%d)\n", s32Error);
-		}
 
 		down(&hSemHostIFthrdEnd);
 
-
-
-		WILC_MsgQueueDestroy(&gMsgQHostIF, NULL);
+		WILC_MsgQueueDestroy(&gMsgQHostIF);
 		msgQ_created = 0;
 	}
 
@@ -6812,7 +6727,7 @@
 	u32Intialized = 0;
 	/* gWFiDrvHandle = NULL; */
 	if (pstrWFIDrv != NULL) {
-		WILC_FREE(pstrWFIDrv);
+		kfree(pstrWFIDrv);
 		/* pstrWFIDrv=NULL; */
 
 	}
@@ -6854,24 +6769,20 @@
 	}
 
 	/* prepare the Asynchronous Network Info message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_RCVD_NTWRK_INFO;
 	strHostIFmsg.drvHandler = pstrWFIDrv;
 
 	strHostIFmsg.uniHostIFmsgBody.strRcvdNetworkInfo.u32Length = u32Length;
-	strHostIFmsg.uniHostIFmsgBody.strRcvdNetworkInfo.pu8Buffer = (u8 *)WILC_MALLOC(u32Length); /* will be deallocated by the receiving thread */
-	WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strRcvdNetworkInfo.pu8Buffer,
+	strHostIFmsg.uniHostIFmsgBody.strRcvdNetworkInfo.pu8Buffer = WILC_MALLOC(u32Length); /* will be deallocated by the receiving thread */
+	memcpy(strHostIFmsg.uniHostIFmsgBody.strRcvdNetworkInfo.pu8Buffer,
 		    pu8Buffer, u32Length);
 
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		PRINT_ER("Error in sending network info message queue message parameters: Error(%d)\n", s32Error);
-	}
-
-
-	return;
 }
 
 /**
@@ -6897,7 +6808,7 @@
 
 	drvHandler = ((pu8Buffer[u32Length - 4]) | (pu8Buffer[u32Length - 3] << 8) | (pu8Buffer[u32Length - 2] << 16) | (pu8Buffer[u32Length - 1] << 24));
 	pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
-	PRINT_D(HOSTINF_DBG, "General asynchronous info packet received \n");
+	PRINT_D(HOSTINF_DBG, "General asynchronous info packet received\n");
 
 
 	if (pstrWFIDrv == NULL || pstrWFIDrv == terminated_handle) {
@@ -6916,7 +6827,7 @@
 	}
 
 	/* prepare the General Asynchronous Info message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_RCVD_GNRL_ASYNC_INFO;
@@ -6924,19 +6835,17 @@
 
 
 	strHostIFmsg.uniHostIFmsgBody.strRcvdGnrlAsyncInfo.u32Length = u32Length;
-	strHostIFmsg.uniHostIFmsgBody.strRcvdGnrlAsyncInfo.pu8Buffer = (u8 *)WILC_MALLOC(u32Length); /* will be deallocated by the receiving thread */
-	WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strRcvdGnrlAsyncInfo.pu8Buffer,
+	strHostIFmsg.uniHostIFmsgBody.strRcvdGnrlAsyncInfo.pu8Buffer = WILC_MALLOC(u32Length); /* will be deallocated by the receiving thread */
+	memcpy(strHostIFmsg.uniHostIFmsgBody.strRcvdGnrlAsyncInfo.pu8Buffer,
 		    pu8Buffer, u32Length);
 
 	/* send the message */
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		PRINT_ER("Error in sending message queue asynchronous message info: Error(%d)\n", s32Error);
-	}
 
 	/*BugID_5348*/
 	up(&hSemHostIntDeinit);
-	return;
 }
 
 /**
@@ -6954,20 +6863,20 @@
 	tstrHostIFmsg strHostIFmsg;
 	u32 drvHandler;
 	tstrWILC_WFIDrv *pstrWFIDrv = NULL;
+
 	drvHandler = ((pu8Buffer[u32Length - 4]) | (pu8Buffer[u32Length - 3] << 8) | (pu8Buffer[u32Length - 2] << 16) | (pu8Buffer[u32Length - 1] << 24));
 	pstrWFIDrv = (tstrWILC_WFIDrv *)drvHandler;
 
 
 	PRINT_D(GENERIC_DBG, "Scan notification received %p\n", pstrWFIDrv);
 
-	if (pstrWFIDrv == NULL || pstrWFIDrv == terminated_handle) {
+	if (pstrWFIDrv == NULL || pstrWFIDrv == terminated_handle)
 		return;
-	}
 
 	/*if there is an ongoing scan request*/
 	if (pstrWFIDrv->strWILC_UsrScanReq.pfUserScanResult) {
 		/* prepare theScan Done message */
-		WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+		memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 		strHostIFmsg.u16MsgId = HOST_IF_MSG_RCVD_SCAN_COMPLETE;
 		strHostIFmsg.drvHandler = pstrWFIDrv;
@@ -6978,14 +6887,13 @@
 
 		/*strHostIFmsg.uniHostIFmsgBody.strScanComplete.u32Length = u32Length;
 		 * strHostIFmsg.uniHostIFmsgBody.strScanComplete.pu8Buffer  = (u8*)WILC_MALLOC(u32Length);
-		 * WILC_memcpy(strHostIFmsg.uniHostIFmsgBody.strScanComplete.pu8Buffer,
+		 * memcpy(strHostIFmsg.uniHostIFmsgBody.strScanComplete.pu8Buffer,
 		 *                        pu8Buffer, u32Length); */
 
 		/* send the message */
-		s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-		if (s32Error) {
+		s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+		if (s32Error)
 			PRINT_ER("Error in sending message queue scan complete parameters: Error(%d)\n", s32Error);
-		}
 	}
 
 
@@ -7008,18 +6916,17 @@
  *  @date
  *  @version		1.0
  */
-s32 host_int_remain_on_channel(WILC_WFIDrvHandle hWFIDrv, u32 u32SessionID, u32 u32duration, u16 chan, tWILCpfRemainOnChanExpired RemainOnChanExpired, tWILCpfRemainOnChanReady RemainOnChanReady, void *pvUserArg)
+s32 host_int_remain_on_channel(tstrWILC_WFIDrv *hWFIDrv, u32 u32SessionID, u32 u32duration, u16 chan, tWILCpfRemainOnChanExpired RemainOnChanExpired, tWILCpfRemainOnChanReady RemainOnChanReady, void *pvUserArg)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
 	/* prepare the remainonchan Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	/* prepare the WiphyParams Message */
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_REMAIN_ON_CHAN;
@@ -7031,10 +6938,9 @@
 	strHostIFmsg.uniHostIFmsgBody.strHostIfRemainOnChan.u32ListenSessionID = u32SessionID;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 
@@ -7057,29 +6963,27 @@
  *  @date
  *  @version		1.0
  */
-s32 host_int_ListenStateExpired(WILC_WFIDrvHandle hWFIDrv, u32 u32SessionID)
+s32 host_int_ListenStateExpired(tstrWILC_WFIDrv *hWFIDrv, u32 u32SessionID)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
 	/*Stopping remain-on-channel timer*/
-	WILC_TimerStop(&(pstrWFIDrv->hRemainOnChannel), NULL);
+	del_timer(&pstrWFIDrv->hRemainOnChannel);
 
 	/* prepare the timer fire Message */
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_LISTEN_TIMER_FIRED;
 	strHostIFmsg.drvHandler = hWFIDrv;
 	strHostIFmsg.uniHostIFmsgBody.strHostIfRemainOnChan.u32ListenSessionID = u32SessionID;
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 
@@ -7095,17 +6999,16 @@
  *  @author
  *  @date
  *  @version		1.0*/
-s32 host_int_frame_register(WILC_WFIDrvHandle hWFIDrv, u16 u16FrameType, bool bReg)
+s32 host_int_frame_register(tstrWILC_WFIDrv *hWFIDrv, u16 u16FrameType, bool bReg)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	/* prepare the WiphyParams Message */
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_REGISTER_FRAME;
@@ -7128,10 +7031,9 @@
 	strHostIFmsg.uniHostIFmsgBody.strHostIfRegisterFrame.bReg = bReg;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 
@@ -7155,7 +7057,7 @@
  *  @date
  *  @version	1.0
  */
-s32 host_int_add_beacon(WILC_WFIDrvHandle hWFIDrv, u32 u32Interval,
+s32 host_int_add_beacon(tstrWILC_WFIDrv *hWFIDrv, u32 u32Interval,
 				u32 u32DTIMPeriod,
 				u32 u32HeadLen, u8 *pu8Head,
 				u32 u32TailLen, u8 *pu8Tail)
@@ -7165,11 +7067,10 @@
 	tstrHostIFmsg strHostIFmsg;
 	tstrHostIFSetBeacon *pstrSetBeaconParam = &strHostIFmsg.uniHostIFmsgBody.strHostIFSetBeacon;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	PRINT_D(HOSTINF_DBG, "Setting adding beacon message queue params\n");
 
@@ -7180,38 +7081,33 @@
 	pstrSetBeaconParam->u32Interval = u32Interval;
 	pstrSetBeaconParam->u32DTIMPeriod = u32DTIMPeriod;
 	pstrSetBeaconParam->u32HeadLen = u32HeadLen;
-	pstrSetBeaconParam->pu8Head = (u8 *)WILC_MALLOC(u32HeadLen);
-	if (pstrSetBeaconParam->pu8Head == NULL) {
+	pstrSetBeaconParam->pu8Head = WILC_MALLOC(u32HeadLen);
+	if (pstrSetBeaconParam->pu8Head == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-	}
-	WILC_memcpy(pstrSetBeaconParam->pu8Head, pu8Head, u32HeadLen);
+	memcpy(pstrSetBeaconParam->pu8Head, pu8Head, u32HeadLen);
 	pstrSetBeaconParam->u32TailLen = u32TailLen;
 
 	/* Bug 4599 : if tail length = 0 skip allocating & copying */
 	if (u32TailLen > 0) {
-		pstrSetBeaconParam->pu8Tail = (u8 *)WILC_MALLOC(u32TailLen);
-		if (pstrSetBeaconParam->pu8Tail == NULL) {
+		pstrSetBeaconParam->pu8Tail = WILC_MALLOC(u32TailLen);
+		if (pstrSetBeaconParam->pu8Tail == NULL)
 			WILC_ERRORREPORT(s32Error, WILC_NO_MEM);
-		}
-		WILC_memcpy(pstrSetBeaconParam->pu8Tail, pu8Tail, u32TailLen);
+		memcpy(pstrSetBeaconParam->pu8Tail, pu8Tail, u32TailLen);
 	} else {
 		pstrSetBeaconParam->pu8Tail = NULL;
 	}
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 
 	WILC_CATCH(s32Error)
 	{
-		if (pstrSetBeaconParam->pu8Head != NULL) {
-			WILC_FREE(pstrSetBeaconParam->pu8Head);
-		}
+		if (pstrSetBeaconParam->pu8Head != NULL)
+			kfree(pstrSetBeaconParam->pu8Head);
 
-		if (pstrSetBeaconParam->pu8Tail != NULL) {
-			WILC_FREE(pstrSetBeaconParam->pu8Tail);
-		}
+		if (pstrSetBeaconParam->pu8Tail != NULL)
+			kfree(pstrSetBeaconParam->pu8Tail);
 	}
 
 	return s32Error;
@@ -7228,22 +7124,21 @@
  *  @date
  *  @version	1.0
  */
-s32 host_int_del_beacon(WILC_WFIDrvHandle hWFIDrv)
+s32 host_int_del_beacon(tstrWILC_WFIDrv *hWFIDrv)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
 	/* prepare the WiphyParams Message */
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_DEL_BEACON;
 	strHostIFmsg.drvHandler = hWFIDrv;
 	PRINT_D(HOSTINF_DBG, "Setting deleting beacon message queue params\n");
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 	WILC_ERRORCHECK(s32Error);
 
 	WILC_CATCH(s32Error)
@@ -7262,7 +7157,7 @@
  *  @date
  *  @version	1.0
  */
-s32 host_int_add_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrStaParams)
+s32 host_int_add_station(tstrWILC_WFIDrv *hWFIDrv, tstrWILC_AddStaParam *pstrStaParams)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
@@ -7270,11 +7165,10 @@
 	tstrWILC_AddStaParam *pstrAddStationMsg = &strHostIFmsg.uniHostIFmsgBody.strAddStaParam;
 
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	PRINT_D(HOSTINF_DBG, "Setting adding station message queue params\n");
 
@@ -7283,20 +7177,20 @@
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_ADD_STATION;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
-	WILC_memcpy(pstrAddStationMsg, pstrStaParams, sizeof(tstrWILC_AddStaParam));
+	memcpy(pstrAddStationMsg, pstrStaParams, sizeof(tstrWILC_AddStaParam));
 	if (pstrAddStationMsg->u8NumRates > 0) {
 		u8 *rates = WILC_MALLOC(pstrAddStationMsg->u8NumRates);
+
 		WILC_NULLCHECK(s32Error, rates);
 
-		WILC_memcpy(rates, pstrStaParams->pu8Rates, pstrAddStationMsg->u8NumRates);
+		memcpy(rates, pstrStaParams->pu8Rates, pstrAddStationMsg->u8NumRates);
 		pstrAddStationMsg->pu8Rates = rates;
 	}
 
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 
 	WILC_CATCH(s32Error)
 	{
@@ -7313,18 +7207,17 @@
  *  @date
  *  @version	1.0
  */
-s32 host_int_del_station(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8MacAddr)
+s32 host_int_del_station(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8MacAddr)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 	tstrHostIFDelSta *pstrDelStationMsg = &strHostIFmsg.uniHostIFmsgBody.strDelStaParam;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	PRINT_D(HOSTINF_DBG, "Setting deleting station message queue params\n");
 
@@ -7336,14 +7229,13 @@
 
 	/*BugID_4795: Handling situation of deleting all stations*/
 	if (pu8MacAddr == NULL)
-		WILC_memset(pstrDelStationMsg->au8MacAddr, 255, ETH_ALEN);
+		memset(pstrDelStationMsg->au8MacAddr, 255, ETH_ALEN);
 	else
-		WILC_memcpy(pstrDelStationMsg->au8MacAddr, pu8MacAddr, ETH_ALEN);
+		memcpy(pstrDelStationMsg->au8MacAddr, pu8MacAddr, ETH_ALEN);
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 
 	WILC_CATCH(s32Error)
 	{
@@ -7359,7 +7251,7 @@
  *  @date
  *  @version	1.0
  */
-s32 host_int_del_allstation(WILC_WFIDrvHandle hWFIDrv, u8 pu8MacAddr[][ETH_ALEN])
+s32 host_int_del_allstation(tstrWILC_WFIDrv *hWFIDrv, u8 pu8MacAddr[][ETH_ALEN])
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
@@ -7370,11 +7262,10 @@
 	u8 u8AssocNumb = 0;
 
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	PRINT_D(HOSTINF_DBG, "Setting deauthenticating station message queue params\n");
 
@@ -7385,7 +7276,7 @@
 	/* Handling situation of deauthenticing all associated stations*/
 	for (i = 0; i < MAX_NUM_STA; i++) {
 		if (memcmp(pu8MacAddr[i], au8Zero_Buff, ETH_ALEN)) {
-			WILC_memcpy(pstrDelAllStationMsg->au8Sta_DelAllSta[i], pu8MacAddr[i], ETH_ALEN);
+			memcpy(pstrDelAllStationMsg->au8Sta_DelAllSta[i], pu8MacAddr[i], ETH_ALEN);
 			PRINT_D(CFG80211_DBG, "BSSID = %x%x%x%x%x%x\n", pstrDelAllStationMsg->au8Sta_DelAllSta[i][0], pstrDelAllStationMsg->au8Sta_DelAllSta[i][1], pstrDelAllStationMsg->au8Sta_DelAllSta[i][2], pstrDelAllStationMsg->au8Sta_DelAllSta[i][3], pstrDelAllStationMsg->au8Sta_DelAllSta[i][4],
 				pstrDelAllStationMsg->au8Sta_DelAllSta[i][5]);
 			u8AssocNumb++;
@@ -7397,13 +7288,12 @@
 	}
 
 	pstrDelAllStationMsg->u8Num_AssocSta = u8AssocNumb;
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
 
 
-	if (s32Error) {
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
 
-	}
 	WILC_CATCH(s32Error)
 	{
 
@@ -7423,38 +7313,37 @@
  *  @date
  *  @version	1.0
  */
-s32 host_int_edit_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrStaParams)
+s32 host_int_edit_station(tstrWILC_WFIDrv *hWFIDrv, tstrWILC_AddStaParam *pstrStaParams)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 	tstrWILC_AddStaParam *pstrAddStationMsg = &strHostIFmsg.uniHostIFmsgBody.strAddStaParam;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
 	PRINT_D(HOSTINF_DBG, "Setting editing station message queue params\n");
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 
 	/* prepare the WiphyParams Message */
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_EDIT_STATION;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
-	WILC_memcpy(pstrAddStationMsg, pstrStaParams, sizeof(tstrWILC_AddStaParam));
+	memcpy(pstrAddStationMsg, pstrStaParams, sizeof(tstrWILC_AddStaParam));
 	if (pstrAddStationMsg->u8NumRates > 0) {
 		u8 *rates = WILC_MALLOC(pstrAddStationMsg->u8NumRates);
+
 		WILC_NULLCHECK(s32Error, rates);
-		WILC_memcpy(rates, pstrStaParams->pu8Rates, pstrAddStationMsg->u8NumRates);
+		memcpy(rates, pstrStaParams->pu8Rates, pstrAddStationMsg->u8NumRates);
 		pstrAddStationMsg->pu8Rates = rates;
 	}
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 	}
@@ -7463,22 +7352,21 @@
 #endif /*WILC_AP_EXTERNAL_MLME*/
 uint32_t wilc_get_chipid(uint8_t);
 
-s32 host_int_set_power_mgmt(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled, u32 u32Timeout)
+s32 host_int_set_power_mgmt(tstrWILC_WFIDrv *hWFIDrv, bool bIsEnabled, u32 u32Timeout)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 	tstrHostIfPowerMgmtParam *pstrPowerMgmtParam = &strHostIFmsg.uniHostIFmsgBody.strPowerMgmtparam;
 
-	PRINT_INFO(HOSTINF_DBG, "\n\n>> Setting PS to %d << \n\n", bIsEnabled);
+	PRINT_INFO(HOSTINF_DBG, "\n\n>> Setting PS to %d <<\n\n", bIsEnabled);
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
 	PRINT_D(HOSTINF_DBG, "Setting Power management message queue params\n");
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 
 	/* prepare the WiphyParams Message */
@@ -7489,17 +7377,16 @@
 	pstrPowerMgmtParam->u32Timeout = u32Timeout;
 
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 	}
 	return s32Error;
 }
 
-s32 host_int_setup_multicast_filter(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled, u32 u32count)
+s32 host_int_setup_multicast_filter(tstrWILC_WFIDrv *hWFIDrv, bool bIsEnabled, u32 u32count)
 {
 	s32 s32Error = WILC_SUCCESS;
 
@@ -7508,13 +7395,12 @@
 	tstrHostIFSetMulti *pstrMulticastFilterParam = &strHostIFmsg.uniHostIFmsgBody.strHostIfSetMulti;
 
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
 	PRINT_D(HOSTINF_DBG, "Setting Multicast Filter params\n");
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 
 	/* prepare the WiphyParams Message */
@@ -7524,10 +7410,9 @@
 	pstrMulticastFilterParam->bIsEnabled = bIsEnabled;
 	pstrMulticastFilterParam->u32count = u32count;
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 	}
@@ -7568,17 +7453,17 @@
 
 	pNewJoinBssParam = WILC_MALLOC(sizeof(tstrJoinBssParam));
 	if (pNewJoinBssParam != NULL) {
-		WILC_memset(pNewJoinBssParam, 0, sizeof(tstrJoinBssParam));
+		memset(pNewJoinBssParam, 0, sizeof(tstrJoinBssParam));
 		pNewJoinBssParam->dtim_period = ptstrNetworkInfo->u8DtimPeriod;
 		pNewJoinBssParam->beacon_period = ptstrNetworkInfo->u16BeaconPeriod;
 		pNewJoinBssParam->cap_info = ptstrNetworkInfo->u16CapInfo;
-		WILC_memcpy(pNewJoinBssParam->au8bssid, ptstrNetworkInfo->au8bssid, 6);
+		memcpy(pNewJoinBssParam->au8bssid, ptstrNetworkInfo->au8bssid, 6);
 		/*for(i=0; i<6;i++)
 		 *      PRINT_D(HOSTINF_DBG,"%c",pNewJoinBssParam->au8bssid[i]);*/
-		WILC_memcpy((u8 *)pNewJoinBssParam->ssid, ptstrNetworkInfo->au8ssid, ptstrNetworkInfo->u8SsidLen + 1);
+		memcpy((u8 *)pNewJoinBssParam->ssid, ptstrNetworkInfo->au8ssid, ptstrNetworkInfo->u8SsidLen + 1);
 		pNewJoinBssParam->ssidLen = ptstrNetworkInfo->u8SsidLen;
-		WILC_memset(pNewJoinBssParam->rsn_pcip_policy, 0xFF, 3);
-		WILC_memset(pNewJoinBssParam->rsn_auth_policy, 0xFF, 3);
+		memset(pNewJoinBssParam->rsn_pcip_policy, 0xFF, 3);
+		memset(pNewJoinBssParam->rsn_auth_policy, 0xFF, 3);
 		/*for(i=0; i<pNewJoinBssParam->ssidLen;i++)
 		 *      PRINT_D(HOSTINF_DBG,"%c",pNewJoinBssParam->ssid[i]);*/
 
@@ -7633,9 +7518,8 @@
 				pNewJoinBssParam->wmm_cap = true;
 
 				/* Check if Bit 7 is set indicating U-APSD capability */
-				if (pu8IEs[index + 8] & (1 << 7)) {
+				if (pu8IEs[index + 8] & (1 << 7))
 					pNewJoinBssParam->uapsd_cap = true;
-				}
 				index += pu8IEs[index + 1] + 2;
 				continue;
 			}
@@ -7645,6 +7529,7 @@
 				 (pu8IEs[index + 4] == 0x9a) && /* OUI */
 				 (pu8IEs[index + 5] == 0x09) && (pu8IEs[index + 6] == 0x0c)) { /* OUI Type     */
 				u16 u16P2P_count;
+
 				pNewJoinBssParam->tsf = ptstrNetworkInfo->u32Tsf;
 				pNewJoinBssParam->u8NoaEnbaled = 1;
 				pNewJoinBssParam->u8Index = pu8IEs[index + 9];
@@ -7656,20 +7541,20 @@
 				} else
 					pNewJoinBssParam->u8OppEnable = 0;
 				/* HOSTINF_DBG */
-				PRINT_D(GENERIC_DBG, "P2P Dump \n");
+				PRINT_D(GENERIC_DBG, "P2P Dump\n");
 				for (i = 0; i < pu8IEs[index + 7]; i++)
-					PRINT_D(GENERIC_DBG, " %x \n", pu8IEs[index + 9 + i]);
+					PRINT_D(GENERIC_DBG, " %x\n", pu8IEs[index + 9 + i]);
 
 				pNewJoinBssParam->u8Count = pu8IEs[index + 11];
 				u16P2P_count = index + 12;
 
-				WILC_memcpy(pNewJoinBssParam->au8Duration, pu8IEs + u16P2P_count, 4);
+				memcpy(pNewJoinBssParam->au8Duration, pu8IEs + u16P2P_count, 4);
 				u16P2P_count += 4;
 
-				WILC_memcpy(pNewJoinBssParam->au8Interval, pu8IEs + u16P2P_count, 4);
+				memcpy(pNewJoinBssParam->au8Interval, pu8IEs + u16P2P_count, 4);
 				u16P2P_count += 4;
 
-				WILC_memcpy(pNewJoinBssParam->au8StartTime, pu8IEs + u16P2P_count, 4);
+				memcpy(pNewJoinBssParam->au8StartTime, pu8IEs + u16P2P_count, 4);
 
 				index += pu8IEs[index + 1] + 2;
 				continue;
@@ -7698,7 +7583,7 @@
 				rsnIndex += 7; /* skipping id, length, version(2B) and first 3 bytes of gcipher */
 				pNewJoinBssParam->rsn_grp_policy = pu8IEs[rsnIndex];
 				rsnIndex++;
-				/* PRINT_D(HOSTINF_DBG,"Group Policy: %0x \n",pNewJoinBssParam->rsn_grp_policy); */
+				/* PRINT_D(HOSTINF_DBG,"Group Policy: %0x\n",pNewJoinBssParam->rsn_grp_policy); */
 				/* initialize policies with invalid values */
 
 				jumpOffset = pu8IEs[rsnIndex] * 4; /* total no.of bytes of pcipher field (count*4) */
@@ -7709,7 +7594,7 @@
 				pcipherCount = (pu8IEs[rsnIndex] > 3) ? 3 : pu8IEs[rsnIndex];
 				rsnIndex += 2; /* jump 2 bytes of pcipher count */
 
-				/* PRINT_D(HOSTINF_DBG,"\npcipher:%d \n",pcipherCount); */
+				/* PRINT_D(HOSTINF_DBG,"\npcipher:%d\n",pcipherCount); */
 				for (i = pcipherTotalCount, j = 0; i < pcipherCount + pcipherTotalCount && i < 3; i++, j++) {
 					/* each count corresponds to 4 bytes, only last byte is saved */
 					pNewJoinBssParam->rsn_pcip_policy[i] = pu8IEs[rsnIndex + ((j + 1) * 4) - 1];
@@ -7755,7 +7640,7 @@
 void host_int_freeJoinParams(void *pJoinParams)
 {
 	if ((tstrJoinBssParam *)pJoinParams != NULL)
-		WILC_FREE((tstrJoinBssParam *)pJoinParams);
+		kfree((tstrJoinBssParam *)pJoinParams);
 	else
 		PRINT_ER("Unable to FREE null pointer\n");
 }
@@ -7771,7 +7656,7 @@
  *  @date
  *  @version		1.0**/
 
-static int host_int_addBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID, short int BufferSize,
+static int host_int_addBASession(tstrWILC_WFIDrv *hWFIDrv, char *pBSSID, char TID, short int BufferSize,
 				 short int SessionTimeout, void *drvHandler)
 {
 	s32 s32Error = WILC_SUCCESS;
@@ -7779,11 +7664,10 @@
 	tstrHostIFmsg strHostIFmsg;
 	tstrHostIfBASessionInfo *pBASessionInfo = &strHostIFmsg.uniHostIFmsgBody.strHostIfBASessionInfo;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	/* prepare the WiphyParams Message */
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_ADD_BA_SESSION;
@@ -7794,10 +7678,9 @@
 	pBASessionInfo->u16SessionTimeout = SessionTimeout;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 
@@ -7807,18 +7690,17 @@
 }
 
 
-s32 host_int_delBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID)
+s32 host_int_delBASession(tstrWILC_WFIDrv *hWFIDrv, char *pBSSID, char TID)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 	tstrHostIfBASessionInfo *pBASessionInfo = &strHostIFmsg.uniHostIFmsgBody.strHostIfBASessionInfo;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	/* prepare the WiphyParams Message */
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_DEL_BA_SESSION;
@@ -7827,10 +7709,9 @@
 	pBASessionInfo->u8Ted = TID;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 
@@ -7842,18 +7723,17 @@
 	return s32Error;
 }
 
-s32 host_int_del_All_Rx_BASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID)
+s32 host_int_del_All_Rx_BASession(tstrWILC_WFIDrv *hWFIDrv, char *pBSSID, char TID)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 	tstrHostIfBASessionInfo *pBASessionInfo = &strHostIFmsg.uniHostIFmsgBody.strHostIfBASessionInfo;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	/* prepare the WiphyParams Message */
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_DEL_ALL_RX_BA_SESSIONS;
@@ -7862,10 +7742,9 @@
 	pBASessionInfo->u8Ted = TID;
 	strHostIFmsg.drvHandler = hWFIDrv;
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 
@@ -7885,7 +7764,7 @@
  *  @author		Abdelrahman Sobhy
  *  @date
  *  @version		1.0*/
-s32 host_int_setup_ipaddress(WILC_WFIDrvHandle hWFIDrv, u8 *u16ipadd, u8 idx)
+s32 host_int_setup_ipaddress(tstrWILC_WFIDrv *hWFIDrv, u8 *u16ipadd, u8 idx)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
@@ -7894,11 +7773,10 @@
 	/* TODO: Enable This feature on softap firmware */
 	return 0;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	/* prepare the WiphyParams Message */
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_SET_IPADDRESS;
@@ -7907,10 +7785,9 @@
 	strHostIFmsg.drvHandler = hWFIDrv;
 	strHostIFmsg.uniHostIFmsgBody.strHostIfSetIP.idx = idx;
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 
@@ -7929,29 +7806,27 @@
  *  @author		Abdelrahman Sobhy
  *  @date
  *  @version		1.0*/
-s32 host_int_get_ipaddress(WILC_WFIDrvHandle hWFIDrv, u8 *u16ipadd, u8 idx)
+s32 host_int_get_ipaddress(tstrWILC_WFIDrv *hWFIDrv, u8 *u16ipadd, u8 idx)
 {
 	s32 s32Error = WILC_SUCCESS;
 	tstrWILC_WFIDrv *pstrWFIDrv = (tstrWILC_WFIDrv *)hWFIDrv;
 	tstrHostIFmsg strHostIFmsg;
 
-	if (pstrWFIDrv == NULL) {
+	if (pstrWFIDrv == NULL)
 		WILC_ERRORREPORT(s32Error, WILC_INVALID_ARGUMENT);
-	}
 
-	WILC_memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
+	memset(&strHostIFmsg, 0, sizeof(tstrHostIFmsg));
 
 	/* prepare the WiphyParams Message */
 	strHostIFmsg.u16MsgId = HOST_IF_MSG_GET_IPADDRESS;
 
 	strHostIFmsg.uniHostIFmsgBody.strHostIfSetIP.au8IPAddr = u16ipadd;
-	strHostIFmsg.drvHandler=hWFIDrv;
-	strHostIFmsg.uniHostIFmsgBody.strHostIfSetIP.idx= idx;
+	strHostIFmsg.drvHandler = hWFIDrv;
+	strHostIFmsg.uniHostIFmsgBody.strHostIfSetIP.idx = idx;
 
-	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg), NULL);
-	if (s32Error) {
+	s32Error = WILC_MsgQueueSend(&gMsgQHostIF, &strHostIFmsg, sizeof(tstrHostIFmsg));
+	if (s32Error)
 		WILC_ERRORREPORT(s32Error, s32Error);
-	}
 	WILC_CATCH(s32Error)
 	{
 
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 38db740..e66dee9 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -11,7 +11,6 @@
 #define HOST_INT_H
 
 #include "coreconfigurator.h"
-#include "coreconfigsimulator.h"
 /*****************************************************************************/
 /*								Macros                                       */
 /*****************************************************************************/
@@ -368,10 +367,10 @@
 	struct semaphore hSemGetCHNL;
 	struct semaphore hSemInactiveTime;
 /* timer handlers */
-	WILC_TimerHandle hScanTimer;
-	WILC_TimerHandle hConnectTimer;
+	struct timer_list hScanTimer;
+	struct timer_list hConnectTimer;
 	#ifdef WILC_P2P
-	WILC_TimerHandle hRemainOnChannel;
+	struct timer_list hRemainOnChannel;
 	#endif
 
 	bool IFC_UP;
@@ -433,7 +432,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_remove_key(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8StaAddress);
+s32 host_int_remove_key(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8StaAddress);
 /**
  *  @brief              removes WEP key
  *  @details    valid only in BSS STA mode if External Supplicant support is enabled.
@@ -448,7 +447,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_remove_wep_key(WILC_WFIDrvHandle hWFIDrv, u8 u8Index);
+s32 host_int_remove_wep_key(tstrWILC_WFIDrv *hWFIDrv, u8 u8Index);
 /**
  *  @brief              sets WEP deafault key
  *  @details    Sets the index of the WEP encryption key in use,
@@ -461,7 +460,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_set_WEPDefaultKeyID(WILC_WFIDrvHandle hWFIDrv, u8 u8Index);
+s32 host_int_set_WEPDefaultKeyID(tstrWILC_WFIDrv *hWFIDrv, u8 u8Index);
 
 /**
  *  @brief              sets WEP deafault key
@@ -482,7 +481,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_add_wep_key_bss_sta(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx);
+s32 host_int_add_wep_key_bss_sta(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx);
 /**
  *  @brief              host_int_add_wep_key_bss_ap
  *  @details    valid only in AP mode if External Supplicant support is enabled.
@@ -497,7 +496,7 @@
  *  @date		28 Feb 2013
  *  @version		1.0
  */
-s32 host_int_add_wep_key_bss_ap(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx, u8 u8mode, AUTHTYPE_T tenuAuth_type);
+s32 host_int_add_wep_key_bss_ap(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8WepKey, u8 u8WepKeylen, u8 u8Keyidx, u8 u8mode, AUTHTYPE_T tenuAuth_type);
 
 /**
  *  @brief              adds ptk Key
@@ -515,7 +514,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_add_ptk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen,
+s32 host_int_add_ptk(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8Ptk, u8 u8PtkKeylen,
 			     const u8 *mac_addr, const u8 *pu8RxMic, const u8 *pu8TxMic, u8 mode, u8 u8Ciphermode, u8 u8Idx);
 
 /**
@@ -530,7 +529,7 @@
  *  @date		15 April 2013
  *  @version		1.0
  */
-s32 host_int_get_inactive_time(WILC_WFIDrvHandle hWFIDrv, const u8 *mac, u32 *pu32InactiveTime);
+s32 host_int_get_inactive_time(tstrWILC_WFIDrv *hWFIDrv, const u8 *mac, u32 *pu32InactiveTime);
 
 /**
  *  @brief              adds Rx GTk Key
@@ -548,7 +547,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_add_rx_gtk(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8RxGtk, u8 u8GtkKeylen,
+s32 host_int_add_rx_gtk(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8RxGtk, u8 u8GtkKeylen,
 				u8 u8KeyIdx, u32 u32KeyRSClen, const u8 *KeyRSC,
 				const u8 *pu8RxMic, const u8 *pu8TxMic, u8 mode, u8 u8Ciphermode);
 
@@ -569,7 +568,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_add_tx_gtk(WILC_WFIDrvHandle hWFIDrv, u8 u8KeyLen, u8 *pu8TxGtk, u8 u8KeyIdx);
+s32 host_int_add_tx_gtk(tstrWILC_WFIDrv *hWFIDrv, u8 u8KeyLen, u8 *pu8TxGtk, u8 u8KeyIdx);
 
 /**
  *  @brief              caches the pmkid
@@ -592,7 +591,7 @@
  *  @version		1.0
  */
 
-s32 host_int_set_pmkid_info(WILC_WFIDrvHandle hWFIDrv, tstrHostIFpmkidAttr *pu8PmkidInfoArray);
+s32 host_int_set_pmkid_info(tstrWILC_WFIDrv *hWFIDrv, tstrHostIFpmkidAttr *pu8PmkidInfoArray);
 /**
  *  @brief              gets the cached the pmkid info
  *  @details    valid only in BSS STA mode if External Supplicant
@@ -616,7 +615,7 @@
  *  @version		1.0
  */
 
-s32 host_int_get_pmkid_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8PmkidInfoArray,
+s32 host_int_get_pmkid_info(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8PmkidInfoArray,
 				    u32 u32PmkidInfoLen);
 
 /**
@@ -633,7 +632,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_set_RSNAConfigPSKPassPhrase(WILC_WFIDrvHandle hWFIDrv, u8 *pu8PassPhrase,
+s32 host_int_set_RSNAConfigPSKPassPhrase(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8PassPhrase,
 						 u8 u8Psklength);
 /**
  *  @brief              gets the pass phrase
@@ -649,7 +648,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_get_RSNAConfigPSKPassPhrase(WILC_WFIDrvHandle hWFIDrv,
+s32 host_int_get_RSNAConfigPSKPassPhrase(tstrWILC_WFIDrv *hWFIDrv,
 						 u8 *pu8PassPhrase, u8 u8Psklength);
 
 /**
@@ -663,7 +662,7 @@
  *  @date		19 April 2012
  *  @version		1.0
  */
-s32 host_int_get_MacAddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8MacAddress);
+s32 host_int_get_MacAddress(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8MacAddress);
 
 /**
  *  @brief              sets mac address
@@ -676,7 +675,7 @@
  *  @date		16 July 2012
  *  @version		1.0
  */
-s32 host_int_set_MacAddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8MacAddress);
+s32 host_int_set_MacAddress(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8MacAddress);
 
 /**
  *  @brief              wait until msg q is empty
@@ -721,7 +720,7 @@
  *  @version		1.0
  */
 #ifndef CONNECT_DIRECT
-s32 host_int_get_site_survey_results(WILC_WFIDrvHandle hWFIDrv,
+s32 host_int_get_site_survey_results(tstrWILC_WFIDrv *hWFIDrv,
 					     u8 ppu8RcvdSiteSurveyResults[][MAX_SURVEY_RESULT_FRAG_SIZE],
 					     u32 u32MaxSiteSrvyFragLen);
 #endif
@@ -742,7 +741,7 @@
  *  @version		1.0
  */
 
-s32 host_int_set_start_scan_req(WILC_WFIDrvHandle hWFIDrv, u8 scanSource);
+s32 host_int_set_start_scan_req(tstrWILC_WFIDrv *hWFIDrv, u8 scanSource);
 /**
  *  @brief              gets scan source of the last scan
  *  @details
@@ -758,7 +757,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_get_start_scan_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8ScanSource);
+s32 host_int_get_start_scan_req(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8ScanSource);
 
 /**
  *  @brief              sets a join request
@@ -772,7 +771,7 @@
  *  @version		1.0
  */
 
-s32 host_int_set_join_req(WILC_WFIDrvHandle hWFIDrv, u8 *pu8bssid,
+s32 host_int_set_join_req(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8bssid,
 				  const u8 *pu8ssid, size_t ssidLen,
 				  const u8 *pu8IEs, size_t IEsLen,
 				  tWILCpfConnectResult pfConnectResult, void *pvUserArg,
@@ -792,7 +791,7 @@
  *  @version		8.0
  */
 
-s32 host_int_flush_join_req(WILC_WFIDrvHandle hWFIDrv);
+s32 host_int_flush_join_req(tstrWILC_WFIDrv *hWFIDrv);
 
 
 /**
@@ -806,7 +805,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_disconnect(WILC_WFIDrvHandle hWFIDrv, u16 u16ReasonCode);
+s32 host_int_disconnect(tstrWILC_WFIDrv *hWFIDrv, u16 u16ReasonCode);
 
 /**
  *  @brief              disconnects a sta
@@ -819,7 +818,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_disconnect_station(WILC_WFIDrvHandle hWFIDrv, u8 assoc_id);
+s32 host_int_disconnect_station(tstrWILC_WFIDrv *hWFIDrv, u8 assoc_id);
 /**
  *  @brief              gets a Association request info
  *  @details
@@ -846,7 +845,7 @@
  *  @version		1.0
  */
 
-s32 host_int_get_assoc_req_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8AssocReqInfo,
+s32 host_int_get_assoc_req_info(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8AssocReqInfo,
 					u32 u32AssocReqInfoLen);
 /**
  *  @brief              gets a Association Response info
@@ -860,7 +859,7 @@
  *  @version		1.0
  */
 
-s32 host_int_get_assoc_res_info(WILC_WFIDrvHandle hWFIDrv, u8 *pu8AssocRespInfo,
+s32 host_int_get_assoc_res_info(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8AssocRespInfo,
 					u32 u32MaxAssocRespInfoLen, u32 *pu32RcvdAssocRespInfoLen);
 /**
  *  @brief              gets a Association Response info
@@ -877,7 +876,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_get_rx_power_level(WILC_WFIDrvHandle hWFIDrv, u8 *pu8RxPowerLevel,
+s32 host_int_get_rx_power_level(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8RxPowerLevel,
 					u32 u32RxPowerLevelLen);
 
 /**
@@ -895,7 +894,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_set_mac_chnl_num(WILC_WFIDrvHandle hWFIDrv, u8 u8ChNum);
+s32 host_int_set_mac_chnl_num(tstrWILC_WFIDrv *hWFIDrv, u8 u8ChNum);
 
 /**
  *  @brief              gets the current channel index
@@ -912,7 +911,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_get_host_chnl_num(WILC_WFIDrvHandle hWFIDrv, u8 *pu8ChNo);
+s32 host_int_get_host_chnl_num(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8ChNo);
 /**
  *  @brief              gets the sta rssi
  *  @details    gets the currently maintained RSSI value for the station.
@@ -926,8 +925,8 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_get_rssi(WILC_WFIDrvHandle hWFIDrv, s8 *ps8Rssi);
-s32 host_int_get_link_speed(WILC_WFIDrvHandle hWFIDrv, s8 *ps8lnkspd);
+s32 host_int_get_rssi(tstrWILC_WFIDrv *hWFIDrv, s8 *ps8Rssi);
+s32 host_int_get_link_speed(tstrWILC_WFIDrv *hWFIDrv, s8 *ps8lnkspd);
 /**
  *  @brief              scans a set of channels
  *  @details
@@ -945,7 +944,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_scan(WILC_WFIDrvHandle hWFIDrv, u8 u8ScanSource,
+s32 host_int_scan(tstrWILC_WFIDrv *hWFIDrv, u8 u8ScanSource,
 			  u8 u8ScanType, u8 *pu8ChnlFreqList,
 			  u8 u8ChnlListLen, const u8 *pu8IEs,
 			  size_t IEsLen, tWILCpfScanResult ScanResult,
@@ -961,7 +960,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 hif_set_cfg(WILC_WFIDrvHandle hWFIDrv, tstrCfgParamVal *pstrCfgParamVal);
+s32 hif_set_cfg(tstrWILC_WFIDrv *hWFIDrv, tstrCfgParamVal *pstrCfgParamVal);
 
 /**
  *  @brief              gets configuration wids values
@@ -975,7 +974,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 hif_get_cfg(WILC_WFIDrvHandle hWFIDrv, u16 u16WID, u16 *pu16WID_Value);
+s32 hif_get_cfg(tstrWILC_WFIDrv *hWFIDrv, u16 u16WID, u16 *pu16WID_Value);
 /*****************************************************************************/
 /*							Notification Functions							 */
 /*****************************************************************************/
@@ -1022,7 +1021,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_init(WILC_WFIDrvHandle *phWFIDrv);
+s32 host_int_init(tstrWILC_WFIDrv **phWFIDrv);
 
 /**
  *  @brief              host interface initialization function
@@ -1033,7 +1032,7 @@
  *  @date		8 March 2012
  *  @version		1.0
  */
-s32 host_int_deinit(WILC_WFIDrvHandle hWFIDrv);
+s32 host_int_deinit(tstrWILC_WFIDrv *hWFIDrv);
 
 
 /*!
@@ -1058,7 +1057,7 @@
  *  @version		1.0 Description
  *
  */
-s32 host_int_add_beacon(WILC_WFIDrvHandle hWFIDrv, u32 u32Interval,
+s32 host_int_add_beacon(tstrWILC_WFIDrv *hWFIDrv, u32 u32Interval,
 				u32 u32DTIMPeriod,
 				u32 u32HeadLen, u8 *pu8Head,
 				u32 u32TailLen, u8 *pu8tail);
@@ -1076,7 +1075,7 @@
  *  @date		10 Julys 2012
  *  @version		1.0 Description
  */
-s32 host_int_del_beacon(WILC_WFIDrvHandle hWFIDrv);
+s32 host_int_del_beacon(tstrWILC_WFIDrv *hWFIDrv);
 
 /*!
  *  @fn		s32 host_int_add_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam strStaParams)
@@ -1091,7 +1090,7 @@
  *  @date		12 July 2012
  *  @version		1.0 Description
  */
-s32 host_int_add_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrStaParams);
+s32 host_int_add_station(tstrWILC_WFIDrv *hWFIDrv, tstrWILC_AddStaParam *pstrStaParams);
 
 /*!
  *  @fn		s32 host_int_del_allstation(WILC_WFIDrvHandle hWFIDrv, const u8* pu8MacAddr)
@@ -1106,7 +1105,7 @@
  *  @date		09 April 2014
  *  @version		1.0 Description
  */
-s32 host_int_del_allstation(WILC_WFIDrvHandle hWFIDrv, u8 pu8MacAddr[][ETH_ALEN]);
+s32 host_int_del_allstation(tstrWILC_WFIDrv *hWFIDrv, u8 pu8MacAddr[][ETH_ALEN]);
 
 /*!
  *  @fn		s32 host_int_del_station(WILC_WFIDrvHandle hWFIDrv, u8* pu8MacAddr)
@@ -1121,7 +1120,7 @@
  *  @date		15 July 2012
  *  @version		1.0 Description
  */
-s32 host_int_del_station(WILC_WFIDrvHandle hWFIDrv, const u8 *pu8MacAddr);
+s32 host_int_del_station(tstrWILC_WFIDrv *hWFIDrv, const u8 *pu8MacAddr);
 
 /*!
  *  @fn		s32 host_int_edit_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam strStaParams)
@@ -1136,7 +1135,7 @@
  *  @date		15 July 2012
  *  @version		1.0 Description
  */
-s32 host_int_edit_station(WILC_WFIDrvHandle hWFIDrv, tstrWILC_AddStaParam *pstrStaParams);
+s32 host_int_edit_station(tstrWILC_WFIDrv *hWFIDrv, tstrWILC_AddStaParam *pstrStaParams);
 
 /*!
  *  @fn		s32 host_int_set_power_mgmt(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled, u32 u32Timeout)
@@ -1153,7 +1152,7 @@
  *  @date		24 November 2012
  *  @version		1.0 Description
  */
-s32 host_int_set_power_mgmt(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled, u32 u32Timeout);
+s32 host_int_set_power_mgmt(tstrWILC_WFIDrv *hWFIDrv, bool bIsEnabled, u32 u32Timeout);
 /*  @param[in,out]	hWFIDrv		handle to the wifi driver
  *  @param[in]	bIsEnabled	TRUE if enabled, FALSE otherwise
  *  @param[in]	u8count		count of mac address entries in the filter table
@@ -1165,7 +1164,7 @@
  *  @date		24 November 2012
  *  @version		1.0 Description
  */
-s32 host_int_setup_multicast_filter(WILC_WFIDrvHandle hWFIDrv, bool bIsEnabled, u32 u32count);
+s32 host_int_setup_multicast_filter(tstrWILC_WFIDrv *hWFIDrv, bool bIsEnabled, u32 u32count);
 /**
  *  @brief           host_int_setup_ipaddress
  *  @details       set IP address on firmware
@@ -1175,7 +1174,7 @@
  *  @date
  *  @version	1.0
  */
-s32 host_int_setup_ipaddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8IPAddr, u8 idx);
+s32 host_int_setup_ipaddress(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8IPAddr, u8 idx);
 
 
 /**
@@ -1187,7 +1186,7 @@
  *  @date
  *  @version	1.0
  */
-s32 host_int_delBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID);
+s32 host_int_delBASession(tstrWILC_WFIDrv *hWFIDrv, char *pBSSID, char TID);
 
 /**
  *  @brief           host_int_delBASession
@@ -1198,7 +1197,7 @@
  *  @date
  *  @version	1.0
  */
-s32 host_int_del_All_Rx_BASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID);
+s32 host_int_del_All_Rx_BASession(tstrWILC_WFIDrv *hWFIDrv, char *pBSSID, char TID);
 
 
 /**
@@ -1210,7 +1209,7 @@
  *  @date
  *  @version	1.0
  */
-s32 host_int_get_ipaddress(WILC_WFIDrvHandle hWFIDrv, u8 *pu8IPAddr, u8 idx);
+s32 host_int_get_ipaddress(tstrWILC_WFIDrv *hWFIDrv, u8 *pu8IPAddr, u8 idx);
 
 #ifdef WILC_P2P
 /**
@@ -1222,7 +1221,7 @@
  *  @date
  *  @version	1.0
  */
-s32 host_int_remain_on_channel(WILC_WFIDrvHandle hWFIDrv, u32 u32SessionID, u32 u32duration, u16 chan, tWILCpfRemainOnChanExpired RemainOnChanExpired, tWILCpfRemainOnChanReady RemainOnChanReady, void *pvUserArg);
+s32 host_int_remain_on_channel(tstrWILC_WFIDrv *hWFIDrv, u32 u32SessionID, u32 u32duration, u16 chan, tWILCpfRemainOnChanExpired RemainOnChanExpired, tWILCpfRemainOnChanReady RemainOnChanReady, void *pvUserArg);
 
 /**
  *  @brief              host_int_ListenStateExpired
@@ -1238,7 +1237,7 @@
  *  @date
  *  @version		1.0
  */
-s32 host_int_ListenStateExpired(WILC_WFIDrvHandle hWFIDrv, u32 u32SessionID);
+s32 host_int_ListenStateExpired(tstrWILC_WFIDrv *hWFIDrv, u32 u32SessionID);
 
 /**
  *  @brief           host_int_frame_register
@@ -1249,7 +1248,7 @@
  *  @date
  *  @version	1.0
  */
-s32 host_int_frame_register(WILC_WFIDrvHandle hWFIDrv, u16 u16FrameType, bool bReg);
+s32 host_int_frame_register(tstrWILC_WFIDrv *hWFIDrv, u16 u16FrameType, bool bReg);
 #endif
 /**
  *  @brief           host_int_set_wfi_drv_handler
@@ -1260,18 +1259,18 @@
  *  @date
  *  @version	1.0
  */
-s32 host_int_set_wfi_drv_handler(u32 u32address);
-s32 host_int_set_operation_mode(WILC_WFIDrvHandle hWFIDrv, u32 u32mode);
+s32 host_int_set_wfi_drv_handler(tstrWILC_WFIDrv *u32address);
+s32 host_int_set_operation_mode(tstrWILC_WFIDrv *hWFIDrv, u32 u32mode);
 
-static s32 Handle_ScanDone(void *drvHandler, tenuScanEvent enuEvent);
+static s32 Handle_ScanDone(tstrWILC_WFIDrv *drvHandler, tenuScanEvent enuEvent);
 
-static int host_int_addBASession(WILC_WFIDrvHandle hWFIDrv, char *pBSSID, char TID, short int BufferSize,
+static int host_int_addBASession(tstrWILC_WFIDrv *hWFIDrv, char *pBSSID, char TID, short int BufferSize,
 				 short int SessionTimeout, void *drvHandler);
 
 
 void host_int_freeJoinParams(void *pJoinParams);
 
-s32 host_int_get_statistics(WILC_WFIDrvHandle hWFIDrv, tstrStatistics *pstrStatistics);
+s32 host_int_get_statistics(tstrWILC_WFIDrv *hWFIDrv, tstrStatistics *pstrStatistics);
 
 /*****************************************************************************/
 /*																			 */
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index f5296f5..b8d7d04 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -6,20 +6,15 @@
  *  @date	01 MAR 2012
  *  @version	1.0
  */
-
-#ifndef SIMULATION
 #include "wilc_wfi_cfgoperations.h"
 #include "linux_wlan_common.h"
 #include "wilc_wlan_if.h"
 #include "wilc_wlan.h"
-#endif
+
 #ifdef WILC_FULLY_HOSTING_AP
 #include "wilc_host_ap.h"
 #endif
 #ifdef WILC_AP_EXTERNAL_MLME
-#ifdef SIMULATION
-#include "wilc_wfi_cfgoperations.h"
-#endif
 
 struct wilc_wfi_radiotap_hdr {
 	struct ieee80211_radiotap_header hdr;
@@ -39,9 +34,7 @@
 
 static struct net_device *wilc_wfi_mon; /* global monitor netdev */
 
-#ifdef SIMULATION
-extern int WILC_WFI_Tx(struct sk_buff *skb, struct net_device *dev);
-#elif USE_WIRELESS
+#if USE_WIRELESS
 extern int  mac_xmit(struct sk_buff *skb, struct net_device *dev);
 #endif
 
@@ -237,14 +230,12 @@
 }
 static int mon_mgmt_tx(struct net_device *dev, const u8 *buf, size_t len)
 {
-	linux_wlan_t *nic;
 	struct tx_complete_mon_data *mgmt_tx = NULL;
 
 	if (dev == NULL) {
 		PRINT_D(HOSTAPD_DBG, "ERROR: dev == NULL\n");
 		return WILC_FAIL;
 	}
-	nic = netdev_priv(dev);
 
 	netif_stop_queue(dev);
 	mgmt_tx = kmalloc(sizeof(struct tx_complete_mon_data), GFP_ATOMIC);
@@ -298,7 +289,6 @@
 static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
 				     struct net_device *dev)
 {
-	struct ieee80211_radiotap_header *rtap_hdr;
 	u32 rtap_len, i, ret = 0;
 	struct WILC_WFI_mon_priv  *mon_priv;
 
@@ -318,7 +308,6 @@
 		return WILC_FAIL;
 	}
 
-	rtap_hdr = (struct ieee80211_radiotap_header *)skb->data;
 
 	rtap_len = ieee80211_get_radiotap_len(skb->data);
 	if (skb->len < rtap_len) {
@@ -378,9 +367,7 @@
 	PRINT_INFO(HOSTAPD_DBG, "SKB netdevice name = %s\n", skb->dev->name);
 	PRINT_INFO(HOSTAPD_DBG, "MONITOR real dev name = %s\n", mon_priv->real_ndev->name);
 
-	#ifdef SIMULATION
-	ret = WILC_WFI_Tx(skb, mon_priv->real_ndev);
-	#elif USE_WIRELESS
+	#if USE_WIRELESS
 	/* Identify if Ethernet or MAC header (data or mgmt) */
 	memcpy(srcAdd, &skb->data[10], 6);
 	memcpy(bssid, &skb->data[16], 6);
@@ -493,9 +480,9 @@
 	/* dev->destructor = free_netdev; */
 	PRINT_INFO(CORECONFIG_DBG, "In Ethernet setup function\n");
 	ether_setup(dev);
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 	dev->type = ARPHRD_IEEE80211_RADIOTAP;
-	memset(dev->dev_addr, 0, ETH_ALEN);
+	eth_zero_addr(dev->dev_addr);
 
 	#ifdef USE_WIRELESS
 	{
@@ -571,7 +558,7 @@
  *  @date	12 JUL 2012
  *  @version	1.0
  */
-int WILC_WFI_deinit_mon_interface()
+int WILC_WFI_deinit_mon_interface(void)
 {
 	bool rollback_lock = false;
 
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index b352c50..b3cc9f5 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1,4 +1,3 @@
-#ifndef SIMULATION
 #include "wilc_wfi_cfgoperations.h"
 #include "linux_wlan_common.h"
 #include "wilc_wlan_if.h"
@@ -72,7 +71,7 @@
 extern u8 gau8MulticastMacAddrList[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
 void wilc1000_wlan_deinit(linux_wlan_t *nic);
 #ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
-extern WILC_TimerHandle hDuringIpTimer;
+extern struct timer_list hDuringIpTimer;
 #endif
 
 static int linux_wlan_device_power(int on_off)
@@ -103,7 +102,6 @@
 	return 0;
 }
 
-
 #ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
 static int dev_state_ev_handler(struct notifier_block *this, unsigned long event, void *ptr);
 
@@ -116,7 +114,6 @@
 		if (g_linux_wlan->oup.wlan_cleanup != NULL) \
 			g_linux_wlan->oup.wlan_cleanup(); }
 
-
 #ifndef STA_FIRMWARE
 #define STA_FIRMWARE	"wifi_firmware.bin"
 #endif
@@ -129,15 +126,12 @@
 #define P2P_CONCURRENCY_FIRMWARE	"wifi_firmware_p2p_concurrency.bin"
 #endif
 
-
-
 typedef struct android_wifi_priv_cmd {
 	char *buf;
 	int used_len;
 	int total_len;
 } android_wifi_priv_cmd;
 
-
 #define IRQ_WAIT	1
 #define IRQ_NO_WAIT	0
 /*
@@ -158,7 +152,6 @@
 extern void WILC_WFI_monitor_rx(uint8_t *buff, uint32_t size);
 extern void WILC_WFI_p2p_rx(struct net_device *dev, uint8_t *buff, uint32_t size);
 
-
 static void *internal_alloc(uint32_t size, uint32_t flag);
 static void linux_wlan_tx_complete(void *priv, int status);
 void frmw_to_linux(uint8_t *buff, uint32_t size, uint32_t pkt_offset);
@@ -170,8 +163,6 @@
 static int  mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd);
 static void wilc_set_multicast_list(struct net_device *dev);
 
-
-
 /*
  * for now - in frmw_to_linux there should be private data to be passed to it
  * and this data should be pointer to net device
@@ -200,22 +191,18 @@
 char DebugBuffer[DEGUG_BUFFER_LENGTH + 20] = {0};
 static char *ps8current = DebugBuffer;
 
-
-
 void printk_later(const char *format, ...)
 {
 	va_list args;
-	va_start (args, format);
-	ps8current += vsprintf (ps8current, format, args);
-	va_end (args);
-	if ((ps8current - DebugBuffer) > DEGUG_BUFFER_LENGTH) {
+	va_start(args, format);
+	ps8current += vsprintf(ps8current, format, args);
+	va_end(args);
+	if ((ps8current - DebugBuffer) > DEGUG_BUFFER_LENGTH)
 		ps8current = DebugBuffer;
-	}
 
 }
 
-
-void dump_logs()
+void dump_logs(void)
 {
 	if (DebugBuffer[0]) {
 		DebugBuffer[DEGUG_BUFFER_LENGTH] = 0;
@@ -229,7 +216,7 @@
 	}
 }
 
-void Reset_WatchDogdebugger()
+void Reset_WatchDogdebugger(void)
 {
 	WatchDogdebuggerCounter = 0;
 }
@@ -246,11 +233,8 @@
 		WatchDogdebuggerCounter = 0;
 	}
 }
-
-
 #endif
 
-
 #ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
 static int dev_state_ev_handler(struct notifier_block *this, unsigned long event, void *ptr)
 {
@@ -298,25 +282,22 @@
 
 		PRINT_INFO(GENERIC_DBG, "\n ============== IP Address Obtained ===============\n\n");
 
-
 		/*If we are in station mode or client mode*/
 		if (nic->iftype == STATION_MODE || nic->iftype == CLIENT_MODE) {
 			pstrWFIDrv->IFC_UP = 1;
 			g_obtainingIP = false;
-			WILC_TimerStop(&hDuringIpTimer, NULL);
+			del_timer(&hDuringIpTimer);
 			PRINT_D(GENERIC_DBG, "IP obtained , enable scan\n");
 		}
 
-
-
 		if (bEnablePS)
-			host_int_set_power_mgmt((WILC_WFIDrvHandle)pstrWFIDrv, 1, 0);
+			host_int_set_power_mgmt(pstrWFIDrv, 1, 0);
 
 		PRINT_D(GENERIC_DBG, "[%s] Up IP\n", dev_iface->ifa_label);
 
 		pIP_Add_buff = (char *) (&(dev_iface->ifa_address));
-		PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d \n", pIP_Add_buff[0], pIP_Add_buff[1], pIP_Add_buff[2], pIP_Add_buff[3]);
-		host_int_setup_ipaddress((WILC_WFIDrvHandle)pstrWFIDrv, pIP_Add_buff, nic->u8IfIdx);
+		PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d\n", pIP_Add_buff[0], pIP_Add_buff[1], pIP_Add_buff[2], pIP_Add_buff[3]);
+		host_int_setup_ipaddress(pstrWFIDrv, pIP_Add_buff, nic->u8IfIdx);
 
 		break;
 
@@ -330,17 +311,16 @@
 		}
 
 		if (memcmp(dev_iface->ifa_label, wlan_dev_name, 5) == 0)
-			host_int_set_power_mgmt((WILC_WFIDrvHandle)pstrWFIDrv, 0, 0);
+			host_int_set_power_mgmt(pstrWFIDrv, 0, 0);
 
 		resolve_disconnect_aberration(pstrWFIDrv);
 
-
 		PRINT_D(GENERIC_DBG, "[%s] Down IP\n", dev_iface->ifa_label);
 
 		pIP_Add_buff = null_ip;
-		PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d \n", pIP_Add_buff[0], pIP_Add_buff[1], pIP_Add_buff[2], pIP_Add_buff[3]);
+		PRINT_D(GENERIC_DBG, "IP add=%d:%d:%d:%d\n", pIP_Add_buff[0], pIP_Add_buff[1], pIP_Add_buff[2], pIP_Add_buff[3]);
 
-		host_int_setup_ipaddress((WILC_WFIDrvHandle)pstrWFIDrv, pIP_Add_buff, nic->u8IfIdx);
+		host_int_setup_ipaddress(pstrWFIDrv, pIP_Add_buff, nic->u8IfIdx);
 
 		break;
 
@@ -387,8 +367,6 @@
 #if (defined WILC_SPI) || (defined WILC_SDIO_IRQ_GPIO)
 static irqreturn_t isr_uh_routine(int irq, void *user_data)
 {
-
-
 	int_rcvdU++;
 #if (RX_BH_TYPE != RX_BH_THREADED_IRQ)
 	linux_wlan_disable_irq(IRQ_NO_WAIT);
@@ -440,19 +418,14 @@
 	#else
 		return;
 	#endif
-
-
-
 	}
 
 	int_rcvdB++;
 	PRINT_D(INT_DBG, "Interrupt received BH\n");
-	if (g_linux_wlan->oup.wlan_handle_rx_isr != 0) {
+	if (g_linux_wlan->oup.wlan_handle_rx_isr != 0)
 		g_linux_wlan->oup.wlan_handle_rx_isr();
-	} else {
+	else
 		PRINT_ER("wlan_handle_rx_isr() hasn't been initialized\n");
-	}
-
 
 #if (RX_BH_TYPE == RX_BH_THREADED_IRQ)
 	return IRQ_HANDLED;
@@ -476,18 +449,16 @@
 		}
 		int_rcvdB++;
 		PRINT_D(INT_DBG, "Interrupt received BH\n");
-		if (g_linux_wlan->oup.wlan_handle_rx_isr != 0) {
+		if (g_linux_wlan->oup.wlan_handle_rx_isr != 0)
 			g_linux_wlan->oup.wlan_handle_rx_isr();
-		} else {
+		else
 			PRINT_ER("wlan_handle_rx_isr() hasn't been initialized\n");
-		}
 	}
 
 	return 0;
 }
 #endif
 
-
 #if (defined WILC_SPI) || (defined WILC_SDIO_IRQ_GPIO)
 static int init_irq(linux_wlan_t *p_nic)
 {
@@ -504,9 +475,9 @@
  *
  * ex) nic->dev_irq_num = gpio_to_irq(GPIO_NUM);
  */
-#elif defined (NM73131_0_BOARD)
+#elif defined(NM73131_0_BOARD)
 		nic->dev_irq_num = IRQ_WILC1000;
-#elif defined (PANDA_BOARD)
+#elif defined(PANDA_BOARD)
 		gpio_export(GPIO_NUM, 1);
 		nic->dev_irq_num = OMAP_GPIO_IRQ(GPIO_NUM);
 		irq_set_irq_type(nic->dev_irq_num, IRQ_TYPE_LEVEL_LOW);
@@ -518,7 +489,6 @@
 		PRINT_ER("could not obtain gpio for WILC_INTR\n");
 	}
 
-
 #if (RX_BH_TYPE == RX_BH_THREADED_IRQ)
 	if ((ret != -1) && (request_threaded_irq(nic->dev_irq_num, isr_uh_routine, isr_bh_routine,
 						  IRQF_TRIGGER_LOW | IRQF_ONESHOT,               /*Without IRQF_ONESHOT the uh will remain kicked in and dont gave a chance to bh*/
@@ -554,7 +524,6 @@
 #endif
 }
 
-
 /*
  *      OS functions
  */
@@ -601,7 +570,6 @@
 	}
 }
 
-
 static void *internal_alloc(uint32_t size, uint32_t flag)
 {
 	char *pntr = NULL;
@@ -610,7 +578,6 @@
 	return (void *)pntr;
 }
 
-
 static void linux_wlan_init_lock(char *lockName, void *plock, int count)
 {
 	sema_init((struct semaphore *)plock, count);
@@ -638,25 +605,22 @@
 {
 	int error = -1;
 	PRINT_D(LOCK_DBG, "Locking %p\n", vp);
-	if (vp != NULL)	{
+	if (vp != NULL)
 		error = down_timeout((struct semaphore *)vp, msecs_to_jiffies(timeout));
-	} else {
+	else
 		PRINT_ER("Failed, mutex is NULL\n");
-	}
 	return error;
 }
 
 void linux_wlan_unlock(void *vp)
 {
 	PRINT_D(LOCK_DBG, "Unlocking %p\n", vp);
-	if (vp != NULL)	{
+	if (vp != NULL)
 		up((struct semaphore *)vp);
-	} else {
+	else
 		PRINT_ER("Failed, mutex is NULL\n");
-	}
 }
 
-
 static void linux_wlan_init_mutex(char *lockName, void *plock, int count)
 {
 	mutex_init((struct mutex *)plock);
@@ -702,7 +666,6 @@
 	}
 }
 
-
 /*Added by Amr - BugID_4720*/
 static void linux_wlan_init_spin_lock(char *lockName, void *plock, int count)
 {
@@ -780,9 +743,8 @@
 		}
 	}
 	PRINT_INFO(INIT_DBG, "Invalide handle\n");
-	for (i = 0; i < 25; i++) {
+	for (i = 0; i < 25; i++)
 		PRINT_D(INIT_DBG, "%02x ", pMacHeader[i]);
-	}
 	Bssid  = pMacHeader + 18;
 	Bssid1 = pMacHeader + 12;
 	for (i = 0; i < g_linux_wlan->u8NoIfcs; i++) {
@@ -822,9 +784,8 @@
 	uint8_t ret_val = 0;
 
 	for (i = 0; i < g_linux_wlan->u8NoIfcs; i++) {
-		if (memcmp(g_linux_wlan->strInterfaceInfo[i].aBSSID, null_bssid, 6)) {
+		if (memcmp(g_linux_wlan->strInterfaceInfo[i].aBSSID, null_bssid, 6))
 			ret_val++;
-		}
 	}
 	return ret_val;
 }
@@ -868,7 +829,6 @@
 #define TX_BACKOFF_WEIGHT_MIN (0)
 #define TX_BACKOFF_WEIGHT_UNIT_MS (10)
 	int backoff_weight = TX_BACKOFF_WEIGHT_MIN;
-	signed long timeout;
 #endif
 
 	/* inform wilc1000_wlan_init that TXQ task is started. */
@@ -906,7 +866,6 @@
 			}
 
 			if (ret == WILC_TX_ERR_NO_BUF) { /* failed to allocate buffers in chip. */
-				timeout = msecs_to_jiffies(TX_BACKOFF_WEIGHT_UNIT_MS << backoff_weight);
 				do {
 					/* Back off from sending packets for some time. */
 					/* schedule_timeout will allow RX task to run and free buffers.*/
@@ -915,15 +874,13 @@
 					msleep(TX_BACKOFF_WEIGHT_UNIT_MS << backoff_weight);
 				} while (/*timeout*/ 0);
 				backoff_weight += TX_BACKOFF_WEIGHT_INCR_STEP;
-				if (backoff_weight > TX_BACKOFF_WEIGHT_MAX) {
+				if (backoff_weight > TX_BACKOFF_WEIGHT_MAX)
 					backoff_weight = TX_BACKOFF_WEIGHT_MAX;
-				}
 			} else {
 				if (backoff_weight > TX_BACKOFF_WEIGHT_MIN) {
 					backoff_weight -= TX_BACKOFF_WEIGHT_DECR_STEP;
-					if (backoff_weight < TX_BACKOFF_WEIGHT_MIN) {
+					if (backoff_weight < TX_BACKOFF_WEIGHT_MIN)
 						backoff_weight = TX_BACKOFF_WEIGHT_MIN;
-					}
 				}
 			}
 			/*TODO: drop packets after a certain time/number of retry count. */
@@ -946,7 +903,6 @@
 	const struct firmware *wilc_firmware;
 	char *firmware;
 
-
 	if (nic->iftype == AP_MODE)
 		firmware = AP_FIRMWARE;
 	else if (nic->iftype == STATION_MODE)
@@ -958,8 +914,6 @@
 		firmware = P2P_CONCURRENCY_FIRMWARE;
 	}
 
-
-
 	if (nic == NULL) {
 		PRINT_ER("NIC is NULL\n");
 		goto _fail_;
@@ -970,7 +924,6 @@
 		goto _fail_;
 	}
 
-
 	/*	the firmare should be located in /lib/firmware in
 	 *      root file system with the name specified above */
 
@@ -1054,9 +1007,8 @@
 	 **/
 	PRINT_D(INIT_DBG, "Downloading Firmware ...\n");
 	ret = g_linux_wlan->oup.wlan_firmware_download(g_linux_wlan->wilc_firmware->data, g_linux_wlan->wilc_firmware->size);
-	if (ret < 0) {
+	if (ret < 0)
 		goto _FAIL_;
-	}
 
 	/* Freeing FW buffer */
 	PRINT_D(INIT_DBG, "Freeing FW buffer ...\n");
@@ -1064,13 +1016,12 @@
 	release_firmware(g_linux_wlan->wilc_firmware);
 	g_linux_wlan->wilc_firmware = NULL;
 
-	PRINT_D(INIT_DBG, "Download Succeeded \n");
+	PRINT_D(INIT_DBG, "Download Succeeded\n");
 
 _FAIL_:
 	return ret;
 }
 
-
 /* startup configuration - could be changed later using iconfig*/
 static int linux_wlan_init_test_config(struct net_device *dev, linux_wlan_t *p_nic)
 {
@@ -1096,7 +1047,6 @@
 	PRINT_D(INIT_DBG, "MAC address is : %02x-%02x-%02x-%02x-%02x-%02x\n", mac_add[0], mac_add[1], mac_add[2], mac_add[3], mac_add[4], mac_add[5]);
 	wilc_get_chipid(0);
 
-
 	if (g_linux_wlan->oup.wlan_cfg_set == NULL) {
 		PRINT_D(INIT_DBG, "Null p[ointer\n");
 		goto _fail_;
@@ -1116,7 +1066,6 @@
 	if (!g_linux_wlan->oup.wlan_cfg_set(0, WID_BSS_TYPE, c_val, 1, 0, 0))
 		goto _fail_;
 
-
 	/* c_val[0] = RATE_AUTO; / * bug 4275: Enable autorate and limit it to 24Mbps * / */
 	c_val[0] = RATE_AUTO;
 	if (!g_linux_wlan->oup.wlan_cfg_set(0, WID_CURRENT_TX_RATE, c_val, 1, 0, 0))
@@ -1351,7 +1300,6 @@
 	return -1;
 }
 
-
 /**************************/
 void wilc1000_wlan_deinit(linux_wlan_t *nic)
 {
@@ -1385,16 +1333,12 @@
 		  #endif
 		#endif
 
-
 		/* not sure if the following unlocks are needed or not*/
-		if (&g_linux_wlan->rxq_event != NULL) {
+		if (&g_linux_wlan->rxq_event != NULL)
 			linux_wlan_unlock(&g_linux_wlan->rxq_event);
-		}
 
-		if (&g_linux_wlan->txq_event != NULL) {
+		if (&g_linux_wlan->txq_event != NULL)
 			linux_wlan_unlock(&g_linux_wlan->txq_event);
-		}
-
 
 	#if (RX_BH_TYPE == RX_BH_WORK_QUEUE)
 		/*Removing the work struct from the linux kernel workqueue*/
@@ -1412,7 +1356,6 @@
 		PRINT_D(INIT_DBG, "Deinitializing IRQ\n");
 		deinit_irq(g_linux_wlan);
 
-
 		if (&g_linux_wlan->oup != NULL) {
 			if (g_linux_wlan->oup.wlan_stop != NULL)
 				g_linux_wlan->oup.wlan_stop();
@@ -1442,7 +1385,6 @@
 	} else {
 		PRINT_D(INIT_DBG, "wilc1000 is not initialized\n");
 	}
-	return;
 }
 
 int wlan_init_locks(linux_wlan_t *p_nic)
@@ -1536,7 +1478,7 @@
 
 	nwi->os_context.txq_wait_event = (void *)&g_linux_wlan->txq_event;
 
-#if defined (MEMORY_STATIC)
+#if defined(MEMORY_STATIC)
 	nwi->os_context.rx_buffer_size = LINUX_RX_SIZE;
 #endif
 	nwi->os_context.rxq_critical_section = (void *)&g_linux_wlan->rxq_cs;
@@ -1673,17 +1615,14 @@
 	if (&g_linux_wlan->rxq_event != NULL)
 		linux_wlan_unlock(&g_linux_wlan->rxq_event);
 
-
 	if (g_linux_wlan->rxq_thread != NULL) {
 		kthread_stop(g_linux_wlan->rxq_thread);
 		g_linux_wlan->rxq_thread = NULL;
 	}
 
-
 	if (&g_linux_wlan->txq_event != NULL)
 		linux_wlan_unlock(&g_linux_wlan->txq_event);
 
-
 	if (g_linux_wlan->txq_thread != NULL) {
 		kthread_stop(g_linux_wlan->txq_thread);
 		g_linux_wlan->txq_thread = NULL;
@@ -1747,14 +1686,12 @@
 		}
 	}
 
-	if (index == array_size) {
+	if (index == array_size)
 		PRINT_ER("random MAC\n");
-	}
 
 exit:
-	if (fp && !IS_ERR(fp)) {
+	if (fp && !IS_ERR(fp))
 		filp_close(fp, NULL);
-	}
 
 	set_fs(old_fs);
 
@@ -1786,9 +1723,8 @@
 
 		sdio_register_driver(&wilc_bus);
 
-		while (!probe) {
+		while (!probe)
 			msleep(100);
-		}
 		probe = 0;
 		g_linux_wlan->wilc_sdio_func = local_sdio_func;
 		linux_to_wlan(nwi, nic);
@@ -1820,9 +1756,8 @@
 	sdio_register_driver(&wilc_bus);
 
 	/* msleep(1000); */
-	while (!probe) {
+	while (!probe)
 		msleep(100);
-	}
 	probe = 0;
 	g_linux_wlan->wilc_sdio_func = local_sdio_func;
 	linux_to_wlan(&nwi, g_linux_wlan);
@@ -1834,7 +1769,7 @@
 	#endif
 
 	if (linux_wlan_get_firmware(nic)) {
-		PRINT_ER("Can't get firmware \n");
+		PRINT_ER("Can't get firmware\n");
 		ret = -1;
 		goto __fail__;
 	}
@@ -1847,9 +1782,8 @@
 	}
 	/* Start firmware*/
 	ret = linux_wlan_start_firmware(nic);
-	if (ret < 0) {
+	if (ret < 0)
 		PRINT_ER("Failed to start firmware\n");
-	}
 __fail__:
 	return ret;
 }
@@ -1871,9 +1805,8 @@
 
 #ifdef STATIC_MACADDRESS
 		wilc_mac_thread = kthread_run(linux_wlan_read_mac_addr, NULL, "wilc_mac_thread");
-		if (wilc_mac_thread < 0) {
+		if (wilc_mac_thread < 0)
 			PRINT_ER("couldn't create Mac addr thread\n");
-		}
 #endif
 
 		linux_to_wlan(&nwi, g_linux_wlan);
@@ -1889,7 +1822,6 @@
 		/*Save the oup structre into global pointer*/
 		gpstrWlanOps = &g_linux_wlan->oup;
 
-
 		ret = wlan_initialize_threads(nic);
 		if (ret < 0) {
 			PRINT_ER("Initializing Threads FAILED\n");
@@ -1922,12 +1854,11 @@
 #endif
 
 		if (linux_wlan_get_firmware(nic)) {
-			PRINT_ER("Can't get firmware \n");
+			PRINT_ER("Can't get firmware\n");
 			ret = -EIO;
 			goto _fail_irq_enable_;
 		}
 
-
 		/*Download firmware*/
 		ret = linux_wlan_firmware_download(g_linux_wlan);
 		if (ret < 0) {
@@ -1967,7 +1898,6 @@
 		g_linux_wlan->wilc1000_initialized = 1;
 		return 0; /*success*/
 
-
 _fail_fw_start_:
 		if (&g_linux_wlan->oup != NULL) {
 			if (g_linux_wlan->oup.wlan_stop != NULL)
@@ -1996,12 +1926,11 @@
 	return ret;
 }
 
-
 /*
  *      - this function will be called automatically by OS when module inserted.
  */
 
-#if !defined (NM73131_0_BOARD)
+#if !defined(NM73131_0_BOARD)
 int mac_init_fn(struct net_device *ndev)
 {
 
@@ -2028,12 +1957,11 @@
 }
 #endif
 
-
 void    WILC_WFI_frame_register(struct wiphy *wiphy, struct net_device *dev,
 				u16 frame_type, bool reg);
 
 /* This fn is called, when this device is setup using ifconfig */
-#if !defined (NM73131_0_BOARD)
+#if !defined(NM73131_0_BOARD)
 int mac_open(struct net_device *ndev)
 {
 	perInterface_wlan_t *nic;
@@ -2078,7 +2006,7 @@
 	for (i = 0; i < g_linux_wlan->u8NoIfcs; i++) {
 		if (ndev == g_linux_wlan->strInterfaceInfo[i].wilc_netdev) {
 			memcpy(g_linux_wlan->strInterfaceInfo[i].aSrcAddress, mac_add, ETH_ALEN);
-			g_linux_wlan->strInterfaceInfo[i].drvHandler = (u32)priv->hWILCWFIDrv;
+			g_linux_wlan->strInterfaceInfo[i].drvHandler = priv->hWILCWFIDrv;
 			break;
 		}
 	}
@@ -2092,7 +2020,6 @@
 		goto _err_;
 	}
 
-
 	WILC_WFI_frame_register(nic->wilc_netdev->ieee80211_ptr->wiphy, nic->wilc_netdev,
 				nic->g_struct_frame_reg[0].frame_type, nic->g_struct_frame_reg[0].reg);
 	WILC_WFI_frame_register(nic->wilc_netdev->ieee80211_ptr->wiphy, nic->wilc_netdev,
@@ -2131,7 +2058,6 @@
 {
 	perInterface_wlan_t *nic = netdev_priv(dev);
 
-
 	return &nic->netstats;
 }
 
@@ -2146,17 +2072,16 @@
 	priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
 	pstrWFIDrv = (tstrWILC_WFIDrv *)priv->hWILCWFIDrv;
 
-
 	if (!dev)
 		return;
 
-	PRINT_D(INIT_DBG, "Setting Multicast List with count = %d. \n", dev->mc.count);
+	PRINT_D(INIT_DBG, "Setting Multicast List with count = %d.\n", dev->mc.count);
 
 	if (dev->flags & IFF_PROMISC) {
 		/* Normally, we should configure the chip to retrive all packets
 		 * but we don't wanna support this right now */
 		/* TODO: add promiscuous mode support */
-		PRINT_D(INIT_DBG, "Set promiscuous mode ON, retrive all packets \n");
+		PRINT_D(INIT_DBG, "Set promiscuous mode ON, retrive all packets\n");
 		return;
 	}
 
@@ -2165,27 +2090,27 @@
 	if ((dev->flags & IFF_ALLMULTI) || (dev->mc.count) > WILC_MULTICAST_TABLE_SIZE) {
 		PRINT_D(INIT_DBG, "Disable multicast filter, retrive all multicast packets\n");
 		/* get all multicast packets */
-		host_int_setup_multicast_filter((WILC_WFIDrvHandle)pstrWFIDrv, false, 0);
+		host_int_setup_multicast_filter(pstrWFIDrv, false, 0);
 		return;
 	}
 
 	/* No multicast?  Just get our own stuff */
 	if ((dev->mc.count) == 0) {
 		PRINT_D(INIT_DBG, "Enable multicast filter, retrive directed packets only.\n");
-		host_int_setup_multicast_filter((WILC_WFIDrvHandle)pstrWFIDrv, true, 0);
+		host_int_setup_multicast_filter(pstrWFIDrv, true, 0);
 		return;
 	}
 
 	/* Store all of the multicast addresses in the hardware filter */
 	netdev_for_each_mc_addr(ha, dev)
 	{
-		WILC_memcpy(gau8MulticastMacAddrList[i], ha->addr, ETH_ALEN);
+		memcpy(gau8MulticastMacAddrList[i], ha->addr, ETH_ALEN);
 		PRINT_D(INIT_DBG, "Entry[%d]: %x:%x:%x:%x:%x:%x\n", i,
 			gau8MulticastMacAddrList[i][0], gau8MulticastMacAddrList[i][1], gau8MulticastMacAddrList[i][2], gau8MulticastMacAddrList[i][3], gau8MulticastMacAddrList[i][4], gau8MulticastMacAddrList[i][5]);
 		i++;
 	}
 
-	host_int_setup_multicast_filter((WILC_WFIDrvHandle)pstrWFIDrv, true, (dev->mc.count));
+	host_int_setup_multicast_filter(pstrWFIDrv, true, (dev->mc.count));
 
 	return;
 
@@ -2195,11 +2120,10 @@
 {
 
 	struct tx_complete_data *pv_data = (struct tx_complete_data *)priv;
-	if (status == 1) {
+	if (status == 1)
 		PRINT_D(TX_DBG, "Packet sent successfully - Size = %d - Address = %p - SKB = %p\n", pv_data->size, pv_data->buff, pv_data->skb);
-	} else {
+	else
 		PRINT_D(TX_DBG, "Couldn't send packet - Size = %d - Address = %p - SKB = %p\n", pv_data->size, pv_data->buff, pv_data->skb);
-	}
 	/* Free the SK Buffer, its work is done */
 	dev_kfree_skb(pv_data->skb);
 	linux_wlan_free(pv_data);
@@ -2215,7 +2139,7 @@
 	struct ethhdr *eth_h;
 	nic = netdev_priv(ndev);
 
-	PRINT_D(INT_DBG, "\n========\n IntUH: %d - IntBH: %d - IntCld: %d \n========\n", int_rcvdU, int_rcvdB, int_clrd);
+	PRINT_D(INT_DBG, "\n========\n IntUH: %d - IntBH: %d - IntCld: %d\n========\n", int_rcvdU, int_rcvdB, int_clrd);
 	PRINT_D(TX_DBG, "Sending packet just received from TCP/IP\n");
 
 	/* Stop the network interface queue */
@@ -2237,18 +2161,16 @@
 	tx_data->skb  = skb;
 
 	eth_h = (struct ethhdr *)(skb->data);
-	if (eth_h->h_proto == 0x8e88) {
+	if (eth_h->h_proto == 0x8e88)
 		PRINT_D(INIT_DBG, "EAPOL transmitted\n");
-	}
 
 	/*get source and dest ip addresses*/
 	ih = (struct iphdr *)(skb->data + sizeof(struct ethhdr));
 
 	pu8UdpBuffer = (char *)ih + sizeof(struct iphdr);
-	if ((pu8UdpBuffer[1] == 68 && pu8UdpBuffer[3] == 67) || (pu8UdpBuffer[1] == 67 && pu8UdpBuffer[3] == 68)) {
+	if ((pu8UdpBuffer[1] == 68 && pu8UdpBuffer[3] == 67) || (pu8UdpBuffer[1] == 67 && pu8UdpBuffer[3] == 68))
 		PRINT_D(GENERIC_DBG, "DHCP Message transmitted, type:%x %x %x\n", pu8UdpBuffer[248], pu8UdpBuffer[249], pu8UdpBuffer[250]);
 
-	}
 	PRINT_D(TX_DBG, "Sending packet - Size = %d - Address = %p - SKB = %p\n", tx_data->size, tx_data->buff, tx_data->skb);
 
 	/* Send packet to MAC HW - for now the tx_complete function will be just status
@@ -2269,7 +2191,6 @@
 	QueueCount = WILC_Xmit_data((void *)tx_data, HOST_TO_WLAN);
 	#endif /* WILC_FULLY_HOSTING_AP */
 
-
 	if (QueueCount > FLOW_CONTROL_UPPER_THRESHOLD) {
 		netif_stop_queue(g_linux_wlan->strInterfaceInfo[0].wilc_netdev);
 		netif_stop_queue(g_linux_wlan->strInterfaceInfo[1].wilc_netdev);
@@ -2278,7 +2199,6 @@
 	return 0;
 }
 
-
 int mac_close(struct net_device *ndev)
 {
 	struct WILC_WFI_priv *priv;
@@ -2301,8 +2221,6 @@
 
 	pstrWFIDrv = (tstrWILC_WFIDrv *)priv->hWILCWFIDrv;
 
-
-
 	PRINT_D(GENERIC_DBG, "Mac close\n");
 
 	if (g_linux_wlan == NULL) {
@@ -2348,7 +2266,6 @@
 	return 0;
 }
 
-
 int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
 {
 
@@ -2359,8 +2276,6 @@
 	struct WILC_WFI_priv *priv;
 	s32 s32Error = WILC_SUCCESS;
 
-
-
 	/* struct iwreq *wrq = (struct iwreq *) req;	// tony moved to case SIOCSIWPRIV */
 	#ifdef USE_WIRELESS
 	nic = netdev_priv(ndev);
@@ -2422,9 +2337,7 @@
 
 done:
 
-	if (buff != NULL) {
-		kfree(buff);
-	}
+	kfree(buff);
 
 	return s32Error;
 }
@@ -2455,7 +2368,6 @@
 		frame_len = size;
 		buff_to_send = buff;
 
-
 		/* Need to send the packet up to the host, allocate a skb buffer */
 		skb = dev_alloc_skb(frame_len);
 		if (skb == NULL) {
@@ -2465,14 +2377,12 @@
 
 		skb_reserve(skb, (unsigned int)skb->data & 0x3);
 
-		if (g_linux_wlan == NULL || wilc_netdev == NULL) {
+		if (g_linux_wlan == NULL || wilc_netdev == NULL)
 			PRINT_ER("wilc_netdev in g_linux_wlan is NULL");
-		}
 		skb->dev = wilc_netdev;
 
-		if (skb->dev == NULL) {
+		if (skb->dev == NULL)
 			PRINT_ER("skb->dev is NULL\n");
-		}
 
 		/*
 		 * for(i=0;i<40;i++)
@@ -2502,9 +2412,8 @@
 		ih = (struct iphdr *)(skb->data + sizeof(struct ethhdr));
 
 		pu8UdpBuffer = (char *)ih + sizeof(struct iphdr);
-		if (buff_to_send[35] == 67 && buff_to_send[37] == 68) {
+		if (buff_to_send[35] == 67 && buff_to_send[37] == 68)
 			PRINT_D(RX_DBG, "DHCP Message received\n");
-		}
 		if (buff_to_send[12] == 0x88 && buff_to_send[13] == 0x8e)
 			PRINT_D(GENERIC_DBG, "eapol received\n");
 			#endif
@@ -2516,9 +2425,8 @@
 		PRINT_D(RX_DBG, "netif_rx ret value is: %d\n", stats);
 	}
 		#ifndef TCP_ENHANCEMENTS
-	else {
+	else
 		PRINT_ER("Discard sending packet with len = %d\n", size);
-	}
 		#endif
 }
 
@@ -2541,9 +2449,8 @@
 	#ifdef WILC_P2P
 	nic = netdev_priv(g_linux_wlan->strInterfaceInfo[1].wilc_netdev); /* p2p0 */
 	if ((buff[0] == nic->g_struct_frame_reg[0].frame_type && nic->g_struct_frame_reg[0].reg) ||
-	    (buff[0] == nic->g_struct_frame_reg[1].frame_type && nic->g_struct_frame_reg[1].reg)) {
+	    (buff[0] == nic->g_struct_frame_reg[1].frame_type && nic->g_struct_frame_reg[1].reg))
 		WILC_WFI_p2p_rx(g_linux_wlan->strInterfaceInfo[1].wilc_netdev, buff, size);
-	}
 	#endif
 }
 
@@ -2557,7 +2464,7 @@
 	linux_wlan_init_lock("close_exit_sync", &close_exit_sync, 0);
 
 	/*create the common structure*/
-	g_linux_wlan = (linux_wlan_t *)WILC_MALLOC(sizeof(linux_wlan_t));
+	g_linux_wlan = WILC_MALLOC(sizeof(linux_wlan_t));
 	memset(g_linux_wlan, 0, sizeof(linux_wlan_t));
 
 	/*Reset interrupt count debug*/
@@ -2625,7 +2532,6 @@
 		}
 		#endif
 
-
 		if (register_netdev(ndev)) {
 			PRINT_ER("Device couldn't be registered - %s\n", ndev->name);
 			return -1; /* ERROR */
@@ -2638,7 +2544,7 @@
 
 	#ifndef WILC_SDIO
 	if (!linux_spi_init(&g_linux_wlan->wilc_spidev)) {
-		PRINT_ER("Can't initialize SPI \n");
+		PRINT_ER("Can't initialize SPI\n");
 		return -1; /* ERROR */
 	}
 	g_linux_wlan->wilc_spidev = wilc_spi_dev;
@@ -2649,13 +2555,10 @@
 	return 0;
 }
 
-
 /*The 1st function called after module inserted*/
 static int __init init_wilc_driver(void)
 {
-
-
-#if defined (WILC_DEBUGFS)
+#if defined(WILC_DEBUGFS)
 	if (wilc_debugfs_init() < 0) {
 		PRINT_D(GENERIC_DBG, "fail to create debugfs for wilc driver\n");
 		return -1;
@@ -2674,17 +2577,15 @@
 		int ret;
 
 		ret = sdio_register_driver(&wilc_bus);
-		if (ret < 0) {
+		if (ret < 0)
 			PRINT_D(INIT_DBG, "init_wilc_driver: Failed register sdio driver\n");
-		}
 
 		return ret;
 	}
 #else
 	PRINT_D(INIT_DBG, "Initializing netdev\n");
-	if (wilc_netdev_init()) {
+	if (wilc_netdev_init())
 		PRINT_ER("Couldn't initialize netdev\n");
-	}
 	return 0;
 #endif
 }
@@ -2702,18 +2603,15 @@
 		unregister_inetaddr_notifier(&g_dev_notifier);
 	#endif
 
-		for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
+		for (i = 0; i < NUM_CONCURRENT_IFC; i++)
 			nic[i] = netdev_priv(g_linux_wlan->strInterfaceInfo[i].wilc_netdev);
-		}
 	}
 
-
 	if ((g_linux_wlan != NULL) && g_linux_wlan->wilc_firmware != NULL) {
 		release_firmware(g_linux_wlan->wilc_firmware);
 		g_linux_wlan->wilc_firmware = NULL;
 	}
 
-
 	if ((g_linux_wlan != NULL) && (((g_linux_wlan->strInterfaceInfo[0].wilc_netdev) != NULL)
 				       || ((g_linux_wlan->strInterfaceInfo[1].wilc_netdev) != NULL))) {
 		PRINT_D(INIT_DBG, "Waiting for mac_close ....\n");
@@ -2723,17 +2621,15 @@
 		else
 			PRINT_D(INIT_DBG, "mac_closed\n");
 
-
 		for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
 			/* close all opened interfaces */
 			if (g_linux_wlan->strInterfaceInfo[i].wilc_netdev != NULL) {
-				if (nic[i]->mac_opened)	{
+				if (nic[i]->mac_opened)
 					mac_close(g_linux_wlan->strInterfaceInfo[i].wilc_netdev);
-				}
 			}
 		}
 		for (i = 0; i < NUM_CONCURRENT_IFC; i++) {
-			PRINT_D(INIT_DBG, "Unregistering netdev %p \n", g_linux_wlan->strInterfaceInfo[i].wilc_netdev);
+			PRINT_D(INIT_DBG, "Unregistering netdev %p\n", g_linux_wlan->strInterfaceInfo[i].wilc_netdev);
 			unregister_netdev(g_linux_wlan->strInterfaceInfo[i].wilc_netdev);
 			#ifdef USE_WIRELESS
 			PRINT_D(INIT_DBG, "Freeing Wiphy...\n");
@@ -2744,7 +2640,6 @@
 		}
 	}
 
-
 #ifdef USE_WIRELESS
 #ifdef WILC_AP_EXTERNAL_MLME
 	/* Bug 4600 : WILC_WFI_deinit_mon_interface was already called at mac_close */
@@ -2764,12 +2659,12 @@
 
 		linux_wlan_deinit_lock(&close_exit_sync);
 		if (g_linux_wlan != NULL) {
-			WILC_FREE(g_linux_wlan);
+			kfree(g_linux_wlan);
 			g_linux_wlan = NULL;
 		}
 		printk("Module_exit Done.\n");
 
-#if defined (WILC_DEBUGFS)
+#if defined(WILC_DEBUGFS)
 		wilc_debugfs_remove();
 #endif
 
@@ -2780,4 +2675,3 @@
 module_exit(exit_wilc_driver);
 
 MODULE_LICENSE("GPL");
-#endif
diff --git a/drivers/staging/wilc1000/linux_wlan_common.h b/drivers/staging/wilc1000/linux_wlan_common.h
index 2476bfd..e6ebf3e 100644
--- a/drivers/staging/wilc1000/linux_wlan_common.h
+++ b/drivers/staging/wilc1000/linux_wlan_common.h
@@ -39,8 +39,8 @@
 #define FIRM_DBG                (1 << Firmware_debug)
 
 #if defined (WILC_DEBUGFS)
-extern int wilc_debugfs_init(void);
-extern void wilc_debugfs_remove(void);
+int wilc_debugfs_init(void);
+void wilc_debugfs_remove(void);
 
 extern atomic_t REGION;
 extern atomic_t DEBUG_LEVEL;
diff --git a/drivers/staging/wilc1000/linux_wlan_sdio.c b/drivers/staging/wilc1000/linux_wlan_sdio.c
index 858e3a1..37f31f4 100644
--- a/drivers/staging/wilc1000/linux_wlan_sdio.c
+++ b/drivers/staging/wilc1000/linux_wlan_sdio.c
@@ -31,7 +31,6 @@
 struct sdio_func *local_sdio_func;
 extern linux_wlan_t *g_linux_wlan;
 extern int wilc_netdev_init(void);
-extern int sdio_clear_int(void);
 extern void wilc_handle_isr(void);
 
 static unsigned int sdio_default_speed;
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index c328208..ae11186 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -48,38 +48,28 @@
 	return simple_read_from_buffer(userbuf, count, ppos, buf, res);
 }
 
-static ssize_t wilc_debug_level_write(struct file *filp, const char *buf, size_t count, loff_t *ppos)
+static ssize_t wilc_debug_level_write(struct file *filp, const char __user *buf,
+					size_t count, loff_t *ppos)
 {
-	char buffer[128] = {};
 	int flag = 0;
+	int ret;
 
-	if (count > sizeof(buffer))
-		return -EINVAL;
-
-	if (copy_from_user(buffer, buf, count)) {
-		return -EFAULT;
-	}
-
-	flag = buffer[0] - '0';
-
-	if (flag > 0) {
-		flag = DEBUG | ERR;
-	} else if (flag < 0) {
-		flag = 100;
-	}
+	ret = kstrtouint_from_user(buf, count, 16, &flag);
+	if (ret)
+		return ret;
 
 	if (flag > DBG_LEVEL_ALL) {
 		printk("%s, value (0x%08x) is out of range, stay previous flag (0x%08x)\n", __func__, flag, atomic_read(&DEBUG_LEVEL));
-		return -EFAULT;
+		return -EINVAL;
 	}
 
 	atomic_set(&DEBUG_LEVEL, (int)flag);
 
-	if (flag == 0) {
+	if (flag == 0)
 		printk("Debug-level disabled\n");
-	} else {
+	else
 		printk("Debug-level enabled\n");
-	}
+
 	return count;
 }
 
diff --git a/drivers/staging/wilc1000/wilc_exported_buf.c b/drivers/staging/wilc1000/wilc_exported_buf.c
index 5294578..c3f6a0a 100644
--- a/drivers/staging/wilc1000/wilc_exported_buf.c
+++ b/drivers/staging/wilc1000/wilc_exported_buf.c
@@ -8,9 +8,6 @@
 #define LINUX_TX_SIZE	(64 * 1024)
 #define WILC1000_FW_SIZE (4 * 1024)
 
-#define DECLARE_WILC_BUFFER(name)	\
-	void *exported_ ## name = NULL;
-
 #define MALLOC_WILC_BUFFER(name, size)	\
 	exported_ ## name = kmalloc(size, GFP_KERNEL);	  \
 	if (!exported_ ## name) {   \
@@ -24,9 +21,9 @@
 /*
  * Add necessary buffer pointers
  */
-DECLARE_WILC_BUFFER(g_tx_buf)
-DECLARE_WILC_BUFFER(g_rx_buf)
-DECLARE_WILC_BUFFER(g_fw_buf)
+void *exported_g_tx_buf;
+void *exported_g_rx_buf;
+void *exported_g_fw_buf;
 
 void *get_tx_buffer(void)
 {
@@ -65,12 +62,10 @@
 	FREE_WILC_BUFFER(g_tx_buf)
 	FREE_WILC_BUFFER(g_rx_buf)
 	FREE_WILC_BUFFER(g_fw_buf)
-
-	return;
 }
 
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Tony Cho");
 MODULE_DESCRIPTION("WILC1xxx Memory Manager");
 pure_initcall(wilc_module_init);
-module_exit(wilc_module_deinit);
\ No newline at end of file
+module_exit(wilc_module_deinit);
diff --git a/drivers/staging/wilc1000/wilc_log.h b/drivers/staging/wilc1000/wilc_log.h
deleted file mode 100644
index 2269ebd..0000000
--- a/drivers/staging/wilc1000/wilc_log.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#ifndef __WILC_LOG_H__
-#define __WILC_LOG_H__
-
-/* Errors will always get printed */
-#define WILC_ERROR(...) do {  WILC_PRINTF("(ERR)(%s:%d) ", __WILC_FUNCTION__, __WILC_LINE__); \
-			      WILC_PRINTF(__VA_ARGS__); \
-				} while (0)
-
-/* Wraning only printed if verbosity is 1 or more */
-#if (WILC_LOG_VERBOSITY_LEVEL > 0)
-#define WILC_WARN(...) do { WILC_PRINTF("(WRN)"); \
-			    WILC_PRINTF(__VA_ARGS__); \
-				} while (0)
-#else
-#define WILC_WARN(...) (0)
-#endif
-
-/* Info only printed if verbosity is 2 or more */
-#if (WILC_LOG_VERBOSITY_LEVEL > 1)
-#define WILC_INFO(...) do {  WILC_PRINTF("(INF)"); \
-			     WILC_PRINTF(__VA_ARGS__); \
-				} while (0)
-#else
-#define WILC_INFO(...) (0)
-#endif
-
-/* Debug is only printed if verbosity is 3 or more */
-#if (WILC_LOG_VERBOSITY_LEVEL > 2)
-#define WILC_DBG(...) do { WILC_PRINTF("(DBG)(%s:%d) ", __WILC_FUNCTION__, __WILC_LINE__); \
-			   WILC_PRINTF(__VA_ARGS__); \
-			} while (0)
-
-#else
-#define WILC_DBG(...) (0)
-#endif
-
-/* Function In/Out is only printed if verbosity is 4 or more */
-#if (WILC_LOG_VERBOSITY_LEVEL > 3)
-#define WILC_FN_IN do { WILC_PRINTF("(FIN) (%s:%d) \n", __WILC_FUNCTION__, __WILC_LINE__);  } while (0)
-#define WILC_FN_OUT(ret) do { WILC_PRINTF("(FOUT) (%s:%d) %d.\n", __WILC_FUNCTION__, __WILC_LINE__, (ret));  } while (0)
-#else
-#define WILC_FN_IN (0)
-#define WILC_FN_OUT(ret) (0)
-#endif
-
-
-#endif
\ No newline at end of file
diff --git a/drivers/staging/wilc1000/wilc_memory.c b/drivers/staging/wilc1000/wilc_memory.c
index c70707f..e90a957 100644
--- a/drivers/staging/wilc1000/wilc_memory.c
+++ b/drivers/staging/wilc1000/wilc_memory.c
@@ -9,50 +9,8 @@
 void *WILC_MemoryAlloc(u32 u32Size, tstrWILC_MemoryAttrs *strAttrs,
 		       char *pcFileName, u32 u32LineNo)
 {
-	if (u32Size > 0) {
+	if (u32Size > 0)
 		return kmalloc(u32Size, GFP_ATOMIC);
-	} else {
+	else
 		return NULL;
-	}
-}
-
-/*!
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-void *WILC_MemoryCalloc(u32 u32Size, tstrWILC_MemoryAttrs *strAttrs,
-			char *pcFileName, u32 u32LineNo)
-{
-	return kcalloc(u32Size, 1, GFP_KERNEL);
-}
-
-/*!
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-void *WILC_MemoryRealloc(void *pvOldBlock, u32 u32NewSize,
-			 tstrWILC_MemoryAttrs *strAttrs, char *pcFileName, u32 u32LineNo)
-{
-	if (u32NewSize == 0) {
-		kfree(pvOldBlock);
-		return NULL;
-	} else if (pvOldBlock == NULL)	 {
-		return kmalloc(u32NewSize, GFP_KERNEL);
-	} else {
-		return krealloc(pvOldBlock, u32NewSize, GFP_KERNEL);
-	}
-
-}
-
-/*!
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-void WILC_MemoryFree(const void *pvBlock, tstrWILC_MemoryAttrs *strAttrs,
-		     char *pcFileName, u32 u32LineNo)
-{
-	kfree(pvBlock);
 }
diff --git a/drivers/staging/wilc1000/wilc_memory.h b/drivers/staging/wilc1000/wilc_memory.h
index 372d705..f19cec1 100644
--- a/drivers/staging/wilc1000/wilc_memory.h
+++ b/drivers/staging/wilc1000/wilc_memory.h
@@ -38,8 +38,6 @@
  *  @sa		sttrWILC_MemoryAttrs
  *  @sa		WILC_MALLOC
  *  @sa		WILC_MALLOC_EX
- *  @sa		WILC_NEW
- *  @sa		WILC_NEW_EX
  *  @author	syounan
  *  @date	16 Aug 2010
  *  @version	1.0
@@ -48,140 +46,12 @@
 		       char *pcFileName, u32 u32LineNo);
 
 /*!
- *  @brief	Allocates a given size of bytes and zero filling it
- *  @param[in]	u32Size size of memory in bytes to be allocated
- *  @param[in]	strAttrs Optional attributes, NULL for default
- *              if not NULL, pAllocationPool should point to the pool to use for
- *              this allocation. if NULL memory will be allocated directly from
- *              the system
- *  @param[in]	pcFileName file name of the calling code for debugging
- *  @param[in]	u32LineNo line number of the calling code for debugging
- *  @return	The new allocated block, NULL if allocation fails
- *  @note	It is recommended to use of of the wrapper macros instead of
- *              calling this function directly
- *  @sa		sttrWILC_MemoryAttrs
- *  @sa		WILC_CALLOC
- *  @sa		WILC_CALLOC_EX
- *  @sa		WILC_NEW_0
- *  @sa		WILC_NEW_0_EX
- *  @author	syounan
- *  @date	16 Aug 2010
- *  @version	1.0
- */
-void *WILC_MemoryCalloc(u32 u32Size, tstrWILC_MemoryAttrs *strAttrs,
-			char *pcFileName, u32 u32LineNo);
-
-/*!
- *  @brief	Reallocates a given block to a new size
- *  @param[in]	pvOldBlock the old memory block, if NULL then this function
- *              behaves as a new allocation function
- *  @param[in]	u32NewSize size of the new memory block in bytes, if zero then
- *              this function behaves as a free function
- *  @param[in]	strAttrs Optional attributes, NULL for default
- *              if pAllocationPool!=NULL and pvOldBlock==NULL, pAllocationPool
- *              should point to the pool to use for this allocation.
- *              if pAllocationPool==NULL and pvOldBlock==NULL memory will be
- *              allocated directly from	the system
- *              if and pvOldBlock!=NULL, pAllocationPool will not be inspected
- *              and reallocation is done from the same pool as the original block
- *  @param[in]	pcFileName file name of the calling code for debugging
- *  @param[in]	u32LineNo line number of the calling code for debugging
- *  @return	The new allocated block, possibly same as pvOldBlock
- *  @note	It is recommended to use of of the wrapper macros instead of
- *              calling this function directly
- *  @sa		sttrWILC_MemoryAttrs
- *  @sa		WILC_REALLOC
- *  @sa		WILC_REALLOC_EX
- *  @author	syounan
- *  @date	16 Aug 2010
- *  @version	1.0
- */
-void *WILC_MemoryRealloc(void *pvOldBlock, u32 u32NewSize,
-			 tstrWILC_MemoryAttrs *strAttrs, char *pcFileName, u32 u32LineNo);
-
-/*!
- *  @brief	Frees given block
- *  @param[in]	pvBlock the memory block to be freed
- *  @param[in]	strAttrs Optional attributes, NULL for default
- *  @param[in]	pcFileName file name of the calling code for debugging
- *  @param[in]	u32LineNo line number of the calling code for debugging
- *  @note	It is recommended to use of of the wrapper macros instead of
- *              calling this function directly
- *  @sa		sttrWILC_MemoryAttrs
- *  @sa		WILC_FREE
- *  @sa		WILC_FREE_EX
- *  @sa		WILC_FREE_SET_NULL
- *  @sa		WILC_FREE_IF_TRUE
- *  @author	syounan
- *  @date	16 Aug 2010
- *  @version	1.0
- */
-void WILC_MemoryFree(const void *pvBlock, tstrWILC_MemoryAttrs *strAttrs,
-			char *pcFileName, u32 u32LineNo);
-
-/*!
  * @brief	standrad malloc wrapper with custom attributes
  */
 	#define WILC_MALLOC_EX(__size__, __attrs__) \
 	(WILC_MemoryAlloc( \
 		 (__size__), __attrs__, NULL, 0))
 
-/*!
- * @brief	standrad calloc wrapper with custom attributes
- */
-	#define WILC_CALLOC_EX(__size__, __attrs__) \
-	(WILC_MemoryCalloc( \
-		 (__size__), __attrs__, NULL, 0))
-
-/*!
- * @brief	standrad realloc wrapper with custom attributes
- */
-	#define WILC_REALLOC_EX(__ptr__, __new_size__, __attrs__) \
-	(WILC_MemoryRealloc( \
-		 (__ptr__), (__new_size__), __attrs__, NULL, 0))
-/*!
- * @brief	standrad free wrapper with custom attributes
- */
-	#define WILC_FREE_EX(__ptr__, __attrs__) \
-	(WILC_MemoryFree( \
-		 (__ptr__), __attrs__, NULL, 0))
-
-/*!
- * @brief	Allocates a block (with custom attributes) of given type and number of
- * elements
- */
-#define WILC_NEW_EX(__struct_type__, __n_structs__, __attrs__) \
-	((__struct_type__ *)WILC_MALLOC_EX( \
-		 sizeof(__struct_type__) * (u32)(__n_structs__), __attrs__))
-
-/*!
- * @brief	Allocates a block (with custom attributes) of given type and number of
- * elements and Zero-fills it
- */
-#define WILC_NEW_0_EX(__struct_type__, __n_structs__, __attrs__) \
-	((__struct_type__ *)WILC_CALLOC_EX( \
-		 sizeof(__struct_type__) * (u32)(__n_structs__), __attrs__))
-
-/*!
- * @brief	Frees a block (with custom attributes), also setting the original pointer
- * to NULL
- */
-#define WILC_FREE_SET_NULL_EX(__ptr__, __attrs__) do { \
-		if (__ptr__ != NULL) { \
-			WILC_FREE_EX(__ptr__, __attrs__); \
-			__ptr__ = NULL; \
-		} \
-} while (0)
-
-/*!
- * @brief	Frees a block (with custom attributes) if the pointer expression evaluates
- * to true
- */
-#define WILC_FREE_IF_TRUE_EX(__ptr__, __attrs__) do { \
-		if (__ptr__ != NULL) { \
-			WILC_FREE_EX(__ptr__, __attrs__); \
-		} \
-} while (0)
 
 /*!
  * @brief	standrad malloc wrapper with default attributes
@@ -189,51 +59,8 @@
 #define WILC_MALLOC(__size__) \
 	WILC_MALLOC_EX(__size__, NULL)
 
-/*!
- * @brief	standrad calloc wrapper with default attributes
- */
-#define WILC_CALLOC(__size__) \
-	WILC_CALLOC_EX(__size__, NULL)
 
-/*!
- * @brief	standrad realloc wrapper with default attributes
- */
-#define WILC_REALLOC(__ptr__, __new_size__) \
-	WILC_REALLOC_EX(__ptr__, __new_size__, NULL)
 
-/*!
- * @brief	standrad free wrapper with default attributes
- */
-#define WILC_FREE(__ptr__) \
-	WILC_FREE_EX(__ptr__, NULL)
-
-/*!
- * @brief	Allocates a block (with default attributes) of given type and number of
- * elements
- */
-#define WILC_NEW(__struct_type__, __n_structs__) \
-	WILC_NEW_EX(__struct_type__, __n_structs__, NULL)
-
-/*!
- * @brief	Allocates a block (with default attributes) of given type and number of
- * elements and Zero-fills it
- */
-#define WILC_NEW_0(__struct_type__, __n_structs__) \
-	WILC_NEW_O_EX(__struct_type__, __n_structs__, NULL)
-
-/*!
- * @brief	Frees a block (with default attributes), also setting the original pointer
- * to NULL
- */
-#define WILC_FREE_SET_NULL(__ptr__) \
-	WILC_FREE_SET_NULL_EX(__ptr__, NULL)
-
-/*!
- * @brief	Frees a block (with default attributes) if the pointer expression evaluates
- * to true
- */
-#define WILC_FREE_IF_TRUE(__ptr__) \
-	WILC_FREE_IF_TRUE_EX(__ptr__, NULL)
 
 
 #endif
diff --git a/drivers/staging/wilc1000/wilc_msgqueue.c b/drivers/staging/wilc1000/wilc_msgqueue.c
index 16bcef4..70e4fa6 100644
--- a/drivers/staging/wilc1000/wilc_msgqueue.c
+++ b/drivers/staging/wilc1000/wilc_msgqueue.c
@@ -8,8 +8,7 @@
  *  @note		copied from FLO glue implementatuion
  *  @version		1.0
  */
-WILC_ErrNo WILC_MsgQueueCreate(WILC_MsgQueueHandle *pHandle,
-			       tstrWILC_MsgQueueAttrs *pstrAttrs)
+WILC_ErrNo WILC_MsgQueueCreate(WILC_MsgQueueHandle *pHandle)
 {
 	spin_lock_init(&pHandle->strCriticalSection);
 	sema_init(&pHandle->hSem, 0);
@@ -25,8 +24,7 @@
  *  @note		copied from FLO glue implementatuion
  *  @version		1.0
  */
-WILC_ErrNo WILC_MsgQueueDestroy(WILC_MsgQueueHandle *pHandle,
-				tstrWILC_MsgQueueAttrs *pstrAttrs)
+WILC_ErrNo WILC_MsgQueueDestroy(WILC_MsgQueueHandle *pHandle)
 {
 
 	pHandle->bExiting = true;
@@ -39,7 +37,7 @@
 
 	while (pHandle->pstrMessageList != NULL) {
 		Message *pstrMessge = pHandle->pstrMessageList->pstrNext;
-		WILC_FREE(pHandle->pstrMessageList);
+		kfree(pHandle->pstrMessageList);
 		pHandle->pstrMessageList = pstrMessge;
 	}
 
@@ -53,8 +51,7 @@
  *  @version		1.0
  */
 WILC_ErrNo WILC_MsgQueueSend(WILC_MsgQueueHandle *pHandle,
-			     const void *pvSendBuffer, u32 u32SendBufferSize,
-			     tstrWILC_MsgQueueAttrs *pstrAttrs)
+			     const void *pvSendBuffer, u32 u32SendBufferSize)
 {
 	WILC_ErrNo s32RetStatus = WILC_SUCCESS;
 	unsigned long flags;
@@ -71,13 +68,13 @@
 	spin_lock_irqsave(&pHandle->strCriticalSection, flags);
 
 	/* construct a new message */
-	pstrMessage = WILC_NEW(Message, 1);
+	pstrMessage = kmalloc(sizeof(Message), GFP_ATOMIC);
 	WILC_NULLCHECK(s32RetStatus, pstrMessage);
 	pstrMessage->u32Length = u32SendBufferSize;
 	pstrMessage->pstrNext = NULL;
 	pstrMessage->pvBuffer = WILC_MALLOC(u32SendBufferSize);
 	WILC_NULLCHECK(s32RetStatus, pstrMessage->pvBuffer);
-	WILC_memcpy(pstrMessage->pvBuffer, pvSendBuffer, u32SendBufferSize);
+	memcpy(pstrMessage->pvBuffer, pvSendBuffer, u32SendBufferSize);
 
 
 	/* add it to the message queue */
@@ -100,9 +97,9 @@
 		/* error occured, free any allocations */
 		if (pstrMessage != NULL) {
 			if (pstrMessage->pvBuffer != NULL) {
-				WILC_FREE(pstrMessage->pvBuffer);
+				kfree(pstrMessage->pvBuffer);
 			}
-			WILC_FREE(pstrMessage);
+			kfree(pstrMessage);
 		}
 	}
 
@@ -119,8 +116,7 @@
  */
 WILC_ErrNo WILC_MsgQueueRecv(WILC_MsgQueueHandle *pHandle,
 			     void *pvRecvBuffer, u32 u32RecvBufferSize,
-			     u32 *pu32ReceivedLength,
-			     tstrWILC_MsgQueueAttrs *pstrAttrs)
+			     u32 *pu32ReceivedLength)
 {
 
 	Message *pstrMessage;
@@ -170,13 +166,13 @@
 
 		/* consume the message */
 		pHandle->u32ReceiversCount--;
-		WILC_memcpy(pvRecvBuffer, pstrMessage->pvBuffer, pstrMessage->u32Length);
+		memcpy(pvRecvBuffer, pstrMessage->pvBuffer, pstrMessage->u32Length);
 		*pu32ReceivedLength = pstrMessage->u32Length;
 
 		pHandle->pstrMessageList = pstrMessage->pstrNext;
 
-		WILC_FREE(pstrMessage->pvBuffer);
-		WILC_FREE(pstrMessage);
+		kfree(pstrMessage->pvBuffer);
+		kfree(pstrMessage);
 
 		spin_unlock_irqrestore(&pHandle->strCriticalSection, flags);
 
diff --git a/drivers/staging/wilc1000/wilc_msgqueue.h b/drivers/staging/wilc1000/wilc_msgqueue.h
index 35b1001..ef1d2fa 100644
--- a/drivers/staging/wilc1000/wilc_msgqueue.h
+++ b/drivers/staging/wilc1000/wilc_msgqueue.h
@@ -13,20 +13,6 @@
 #include "wilc_platform.h"
 #include "wilc_errorsupport.h"
 #include "wilc_memory.h"
-#include "wilc_strutils.h"
-
-/*!
- *  @struct             tstrWILC_MsgQueueAttrs
- *  @brief		Message Queue API options
- *  @author		syounan
- *  @date		30 Aug 2010
- *  @version		1.0
- */
-typedef struct {
-	/* a dummy member to avoid compiler errors*/
-	u8 dummy;
-
-} tstrWILC_MsgQueueAttrs;
 
 /*!
  *  @brief		Creates a new Message queue
@@ -37,14 +23,11 @@
  *  @param[in,out]	pHandle handle to the message queue object
  *  @param[in]	pstrAttrs Optional attributes, NULL for default
  *  @return		Error code indicating sucess/failure
- *  @sa			tstrWILC_MsgQueueAttrs
  *  @author		syounan
  *  @date		30 Aug 2010
  *  @version		1.0
  */
-WILC_ErrNo WILC_MsgQueueCreate(WILC_MsgQueueHandle *pHandle,
-			       tstrWILC_MsgQueueAttrs *pstrAttrs);
-
+WILC_ErrNo WILC_MsgQueueCreate(WILC_MsgQueueHandle *pHandle);
 
 /*!
  *  @brief		Sends a message
@@ -57,15 +40,12 @@
  *  @param[in]	u32SendBufferSize the size of the data to send
  *  @param[in]	pstrAttrs Optional attributes, NULL for default
  *  @return		Error code indicating sucess/failure
- *  @sa			tstrWILC_MsgQueueAttrs
  *  @author		syounan
  *  @date		30 Aug 2010
  *  @version		1.0
  */
 WILC_ErrNo WILC_MsgQueueSend(WILC_MsgQueueHandle *pHandle,
-			     const void *pvSendBuffer, u32 u32SendBufferSize,
-			     tstrWILC_MsgQueueAttrs *pstrAttrs);
-
+			     const void *pvSendBuffer, u32 u32SendBufferSize);
 
 /*!
  *  @brief		Receives a message
@@ -79,30 +59,23 @@
  *  @param[out]	pu32ReceivedLength the length of received data
  *  @param[in]	pstrAttrs Optional attributes, NULL for default
  *  @return		Error code indicating sucess/failure
- *  @sa			tstrWILC_MsgQueueAttrs
  *  @author		syounan
  *  @date		30 Aug 2010
  *  @version		1.0
  */
 WILC_ErrNo WILC_MsgQueueRecv(WILC_MsgQueueHandle *pHandle,
 			     void *pvRecvBuffer, u32 u32RecvBufferSize,
-			     u32 *pu32ReceivedLength,
-			     tstrWILC_MsgQueueAttrs *pstrAttrs);
-
+			     u32 *pu32ReceivedLength);
 
 /*!
  *  @brief		Destroys an existing  Message queue
  *  @param[in]	pHandle handle to the message queue object
  *  @param[in]	pstrAttrs Optional attributes, NULL for default
  *  @return		Error code indicating sucess/failure
- *  @sa			tstrWILC_MsgQueueAttrs
  *  @author		syounan
  *  @date		30 Aug 2010
  *  @version		1.0
  */
-WILC_ErrNo WILC_MsgQueueDestroy(WILC_MsgQueueHandle *pHandle,
-				tstrWILC_MsgQueueAttrs *pstrAttrs);
-
-
+WILC_ErrNo WILC_MsgQueueDestroy(WILC_MsgQueueHandle *pHandle);
 
 #endif
diff --git a/drivers/staging/wilc1000/wilc_osconfig.h b/drivers/staging/wilc1000/wilc_osconfig.h
deleted file mode 100644
index f9c2514..0000000
--- a/drivers/staging/wilc1000/wilc_osconfig.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* Logs options */
-#define WILC_LOGS_NOTHING          0
-#define WILC_LOGS_WARN             1
-#define WILC_LOGS_WARN_INFO        2
-#define WILC_LOGS_WARN_INFO_DBG    3
-#define WILC_LOGS_WARN_INFO_DBG_FN 4
-#define WILC_LOGS_ALL              5
-
-#define WILC_LOG_VERBOSITY_LEVEL WILC_LOGS_ALL
diff --git a/drivers/staging/wilc1000/wilc_oswrapper.h b/drivers/staging/wilc1000/wilc_oswrapper.h
index e97aa96..cb48325 100644
--- a/drivers/staging/wilc1000/wilc_oswrapper.h
+++ b/drivers/staging/wilc1000/wilc_oswrapper.h
@@ -14,26 +14,14 @@
 #define WILC_OSW_INTERFACE_VER 2
 
 /* Os Configuration File */
-#include "wilc_osconfig.h"
 #include "wilc_platform.h"
 
-/* Logging Functions */
-#include "wilc_log.h"
-
 /* Error reporting and handling support */
 #include "wilc_errorsupport.h"
 
-/* Sleep support */
-#include "wilc_sleep.h"
-
-/* Timer support */
-#include "wilc_timer.h"
-
 /* Memory support */
 #include "wilc_memory.h"
 
-/* String Utilities */
-#include "wilc_strutils.h"
 
 /* Message Queue */
 #include "wilc_msgqueue.h"
diff --git a/drivers/staging/wilc1000/wilc_platform.h b/drivers/staging/wilc1000/wilc_platform.h
index d03532c..1e56973 100644
--- a/drivers/staging/wilc1000/wilc_platform.h
+++ b/drivers/staging/wilc1000/wilc_platform.h
@@ -1,5 +1,5 @@
-#ifndef __WILC_platfrom_H__
-#define __WILC_platfrom_H__
+#ifndef __WILC_platform_H__
+#define __WILC_platform_H__
 
 #include <linux/kthread.h>
 #include <linux/semaphore.h>
@@ -16,10 +16,6 @@
  *      OS specific types
  *******************************************************************/
 
-typedef struct timer_list WILC_TimerHandle;
-
-
-
 /* Message Queue type is a structure */
 typedef struct __Message_struct {
 	void *pvBuffer;
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index 897e47e..5a18148 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -10,17 +10,7 @@
 #include "wilc_wlan_if.h"
 #include "wilc_wlan.h"
 
-
-#ifdef WILC1000_SINGLE_TRANSFER
-#define WILC_SDIO_BLOCK_SIZE 256
-#else
- #if defined(PLAT_AML8726_M3) /* johnny */
-	#define WILC_SDIO_BLOCK_SIZE 512
-	#define MAX_SEG_SIZE (1 << 12) /* 4096 */
- #else
-	#define WILC_SDIO_BLOCK_SIZE 512
- #endif
-#endif
+#define WILC_SDIO_BLOCK_SIZE 512
 
 typedef struct {
 	void *os_context;
@@ -90,7 +80,6 @@
 {
 	sdio_cmd52_t cmd;
 
-
 	/**
 	 *      Review: BIG ENDIAN
 	 **/
@@ -108,6 +97,7 @@
 _fail_:
 	return 0;
 }
+
 static int sdio_set_func0_block_size(uint32_t block_size)
 {
 	sdio_cmd52_t cmd;
@@ -170,6 +160,7 @@
 #ifndef WILC_SDIO_IRQ_GPIO
 	/* uint32_t sts; */
 	sdio_cmd52_t cmd;
+
 	cmd.read_write = 0;
 	cmd.function = 1;
 	cmd.raw = 0;
@@ -181,6 +172,7 @@
 	return cmd.data;
 #else
 	uint32_t reg;
+
 	if (!sdio_read_reg(WILC_HOST_RX_CTRL_0, &reg)) {
 		g_sdio.dPrint(N_ERR, "[wilc spi]: Failed read reg (%08x)...\n", WILC_HOST_RX_CTRL_0);
 		return 0;
@@ -197,6 +189,7 @@
 {
 	uint32_t cnt = 0;
 	sdio_cmd52_t cmd;
+
 	cmd.read_write = 0;
 	cmd.function = 1;
 	cmd.raw = 0;
@@ -222,8 +215,6 @@
 	cnt |= (cmd.data << 16);
 
 	return cnt;
-
-
 }
 
 /********************************************
@@ -263,6 +254,7 @@
 
 	if ((addr >= 0xf0) && (addr <= 0xff)) {
 		sdio_cmd52_t cmd;
+
 		cmd.read_write = 1;
 		cmd.function = 0;
 		cmd.raw = 0;
@@ -325,16 +317,6 @@
 		cmd.function = 0;
 		cmd.address = 0x10f;
 	} else {
-#ifdef WILC1000_SINGLE_TRANSFER
-		/**
-		 *      has to be block aligned...
-		 **/
-		nleft = size % block_size;
-		if (nleft > 0) {
-			size += block_size;
-			size &= ~(block_size - 1);
-		}
-#else
 		/**
 		 *      has to be word aligned...
 		 **/
@@ -342,7 +324,6 @@
 			size += 4;
 			size &= ~0x3;
 		}
-#endif
 
 		/**
 		 *      func 1 access
@@ -355,89 +336,6 @@
 	nleft = size % block_size;
 
 	if (nblk > 0) {
-
-#if defined(PLAT_AML8726_M3_BACKUP) /* johnny */
-		int i;
-
-		for (i = 0; i < nblk; i++) {
-			cmd.block_mode = 0; /* 1; */
-			cmd.increment = 1;
-			cmd.count = block_size; /* nblk; */
-			cmd.buffer = buf;
-			cmd.block_size = block_size;
-			if (addr > 0) {
-				if (!sdio_set_func0_csa_address(addr))
-					goto _fail_;
-			}
-			if (!g_sdio.sdio_cmd53(&cmd)) {
-				g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed cmd53 [%x], block send...\n", addr);
-				goto _fail_;
-			}
-
-			if (addr > 0)
-				addr += block_size;     /* addr += nblk*block_size; */
-
-			buf += block_size;              /* buf += nblk*block_size; */
-		}
-
-#elif defined(PLAT_AML8726_M3) /* johnny */
-
-		int i;
-		int rest;
-		int seg_cnt;
-
-		seg_cnt = (nblk * block_size) / MAX_SEG_SIZE;
-		rest = (nblk * block_size) & (MAX_SEG_SIZE - 1);
-
-		for (i = 0; i < seg_cnt; i++) {
-			cmd.block_mode = 1;
-			cmd.increment = 1;
-			cmd.count = MAX_SEG_SIZE / block_size;
-			cmd.buffer = buf;
-			cmd.block_size = block_size;
-
-			if (addr > 0) {
-				if (!sdio_set_func0_csa_address(addr))
-					goto _fail_;
-			}
-			if (!g_sdio.sdio_cmd53(&cmd)) {
-				g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed cmd53 [%x], block send...\n", addr);
-				goto _fail_;
-			}
-
-			if (addr > 0)
-				addr += MAX_SEG_SIZE;
-
-			buf += MAX_SEG_SIZE;
-
-		}
-
-
-		if (rest > 0) {
-			cmd.block_mode = 1;
-			cmd.increment = 1;
-			cmd.count = rest / block_size;
-			cmd.buffer = buf;
-			cmd.block_size = block_size; /* johnny : prevent it from setting unexpected value */
-
-			if (addr > 0) {
-				if (!sdio_set_func0_csa_address(addr))
-					goto _fail_;
-			}
-			if (!g_sdio.sdio_cmd53(&cmd)) {
-				g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed cmd53 [%x], bytes send...\n", addr);
-				goto _fail_;
-			}
-
-			if (addr > 0)
-				addr += rest;
-
-			buf += rest;
-
-		}
-
-#else
-
 		cmd.block_mode = 1;
 		cmd.increment = 1;
 		cmd.count = nblk;
@@ -454,11 +352,8 @@
 		if (addr > 0)
 			addr += nblk * block_size;
 		buf += nblk * block_size;
-
-#endif /* platform */
 	}
 
-
 	if (nleft > 0) {
 		cmd.block_mode = 0;
 		cmd.increment = 1;
@@ -488,6 +383,7 @@
 {
 	if ((addr >= 0xf0) && (addr <= 0xff)) {
 		sdio_cmd52_t cmd;
+
 		cmd.read_write = 0;
 		cmd.function = 0;
 		cmd.raw = 0;
@@ -552,16 +448,6 @@
 		cmd.function = 0;
 		cmd.address = 0x10f;
 	} else {
-#ifdef WILC1000_SINGLE_TRANSFER
-		/**
-		 *      has to be block aligned...
-		 **/
-		nleft = size % block_size;
-		if (nleft > 0) {
-			size += block_size;
-			size &= ~(block_size - 1);
-		}
-#else
 		/**
 		 *      has to be word aligned...
 		 **/
@@ -569,7 +455,6 @@
 			size += 4;
 			size &= ~0x3;
 		}
-#endif
 
 		/**
 		 *      func 1 access
@@ -582,89 +467,6 @@
 	nleft = size % block_size;
 
 	if (nblk > 0) {
-
-#if defined(PLAT_AML8726_M3_BACKUP) /* johnny */
-
-		int i;
-
-		for (i = 0; i < nblk; i++) {
-			cmd.block_mode = 0; /* 1; */
-			cmd.increment = 1;
-			cmd.count = block_size; /* nblk; */
-			cmd.buffer = buf;
-			cmd.block_size = block_size;
-			if (addr > 0) {
-				if (!sdio_set_func0_csa_address(addr))
-					goto _fail_;
-			}
-			if (!g_sdio.sdio_cmd53(&cmd)) {
-				g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed cmd53 [%x], block read...\n", addr);
-				goto _fail_;
-			}
-			if (addr > 0)
-				addr += block_size;             /* addr += nblk*block_size; */
-			buf += block_size;              /* buf += nblk*block_size; */
-		}
-
-#elif defined(PLAT_AML8726_M3) /* johnny */
-
-		int i;
-		int rest;
-		int seg_cnt;
-
-		seg_cnt = (nblk * block_size) / MAX_SEG_SIZE;
-		rest = (nblk * block_size) & (MAX_SEG_SIZE - 1);
-
-		for (i = 0; i < seg_cnt; i++) {
-			cmd.block_mode = 1;
-			cmd.increment = 1;
-			cmd.count = MAX_SEG_SIZE / block_size;
-			cmd.buffer = buf;
-			cmd.block_size = block_size;
-
-
-			if (addr > 0) {
-				if (!sdio_set_func0_csa_address(addr))
-					goto _fail_;
-			}
-			if (!g_sdio.sdio_cmd53(&cmd)) {
-				g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed cmd53 [%x], block read...\n", addr);
-				goto _fail_;
-			}
-
-			if (addr > 0)
-				addr += MAX_SEG_SIZE;
-
-			buf += MAX_SEG_SIZE;
-
-		}
-
-
-		if (rest > 0) {
-			cmd.block_mode = 1;
-			cmd.increment = 1;
-			cmd.count = rest / block_size;
-			cmd.buffer = buf;
-			cmd.block_size = block_size; /* johnny : prevent it from setting unexpected value */
-
-			if (addr > 0) {
-				if (!sdio_set_func0_csa_address(addr))
-					goto _fail_;
-			}
-			if (!g_sdio.sdio_cmd53(&cmd)) {
-				g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed cmd53 [%x], block read...\n", addr);
-				goto _fail_;
-			}
-
-			if (addr > 0)
-				addr += rest;
-
-			buf += rest;
-
-		}
-
-#else
-
 		cmd.block_mode = 1;
 		cmd.increment = 1;
 		cmd.count = nblk;
@@ -681,8 +483,6 @@
 		if (addr > 0)
 			addr += nblk * block_size;
 		buf += nblk * block_size;
-
-#endif /* platform */
 	}       /* if (nblk > 0) */
 
 	if (nleft > 0) {
@@ -784,6 +584,7 @@
 	sdio_cmd52_t cmd;
 	int loop;
 	uint32_t chipid;
+
 	memset(&g_sdio, 0, sizeof(wilc_sdio_t));
 
 	g_sdio.dPrint = func;
@@ -891,14 +692,12 @@
 		goto _fail_;
 	}
 	g_sdio.dPrint(N_ERR, "[wilc sdio]: chipid (%08x)\n", chipid);
-	if ((chipid & 0xfff) > 0x2a0) {
+	if ((chipid & 0xfff) > 0x2a0)
 		g_sdio.has_thrpt_enh3 = 1;
-	} else {
+	else
 		g_sdio.has_thrpt_enh3 = 0;
-	}
 	g_sdio.dPrint(N_ERR, "[wilc sdio]: has_thrpt_enh3 = %d...\n", g_sdio.has_thrpt_enh3);
 
-
 	return 1;
 
 _fail_:
@@ -925,23 +724,21 @@
 	/**
 	 *      Read DMA count in words
 	 **/
-	{
-		cmd.read_write = 0;
-		cmd.function = 0;
-		cmd.raw = 0;
-		cmd.address = 0xf2;
-		cmd.data = 0;
-		g_sdio.sdio_cmd52(&cmd);
-		tmp = cmd.data;
+	cmd.read_write = 0;
+	cmd.function = 0;
+	cmd.raw = 0;
+	cmd.address = 0xf2;
+	cmd.data = 0;
+	g_sdio.sdio_cmd52(&cmd);
+	tmp = cmd.data;
 
-		/* cmd.read_write = 0; */
-		/* cmd.function = 0; */
-		/* cmd.raw = 0; */
-		cmd.address = 0xf3;
-		cmd.data = 0;
-		g_sdio.sdio_cmd52(&cmd);
-		tmp |= (cmd.data << 8);
-	}
+	/* cmd.read_write = 0; */
+	/* cmd.function = 0; */
+	/* cmd.raw = 0; */
+	cmd.address = 0xf3;
+	cmd.data = 0;
+	g_sdio.sdio_cmd52(&cmd);
+	tmp |= (cmd.data << 8);
 
 	*size = tmp;
 	return 1;
@@ -966,26 +763,21 @@
 	cmd.data = 0;
 	g_sdio.sdio_cmd52(&cmd);
 
-	if (cmd.data & (1 << 0)) {
+	if (cmd.data & (1 << 0))
 		tmp |= INT_0;
-	}
-	if (cmd.data & (1 << 2)) {
+	if (cmd.data & (1 << 2))
 		tmp |= INT_1;
-	}
-	if (cmd.data & (1 << 3)) {
+	if (cmd.data & (1 << 3))
 		tmp |= INT_2;
-	}
-	if (cmd.data & (1 << 4)) {
+	if (cmd.data & (1 << 4))
 		tmp |= INT_3;
-	}
-	if (cmd.data & (1 << 5)) {
+	if (cmd.data & (1 << 5))
 		tmp |= INT_4;
-	}
-	if (cmd.data & (1 << 6)) {
+	if (cmd.data & (1 << 6))
 		tmp |= INT_5;
-	}
 	{
 		int i;
+
 		for (i = g_sdio.nint; i < MAX_NUM_INT; i++) {
 			if ((tmp >> (IRG_FLAGS_OFFSET + i)) & 0x1) {
 				g_sdio.dPrint(N_ERR, "[wilc sdio]: Unexpected interrupt (1) : tmp=%x, data=%x\n", tmp, cmd.data);
@@ -1024,6 +816,7 @@
 #ifdef WILC_SDIO_IRQ_GPIO
 		{
 			uint32_t flags;
+
 			flags = val & ((1 << MAX_NUN_INT_THRPT_ENH2) - 1);
 			reg = flags;
 		}
@@ -1041,6 +834,7 @@
 			reg |= (1 << 7);
 		if (reg) {
 			sdio_cmd52_t cmd;
+
 			cmd.read_write = 1;
 			cmd.function = 0;
 			cmd.raw = 0;
@@ -1060,6 +854,7 @@
 			/* see below. has_thrpt_enh2 uses register 0xf8 to clear interrupts. */
 			/* Cannot clear multiple interrupts. Must clear each interrupt individually */
 			uint32_t flags;
+
 			flags = val & ((1 << MAX_NUM_INT) - 1);
 			if (flags) {
 				int i;
@@ -1068,6 +863,7 @@
 				for (i = 0; i < g_sdio.nint; i++) {
 					if (flags & 1) {
 						sdio_cmd52_t cmd;
+
 						cmd.read_write = 1;
 						cmd.function = 0;
 						cmd.raw = 0;
@@ -1085,9 +881,8 @@
 						break;
 					flags >>= 1;
 				}
-				if (!ret) {
+				if (!ret)
 					goto _fail_;
-				}
 				for (i = g_sdio.nint; i < MAX_NUM_INT; i++) {
 					if (flags & 1)
 						g_sdio.dPrint(N_ERR, "[wilc sdio]: Unexpected interrupt cleared %d...\n", i);
@@ -1097,7 +892,6 @@
 		}
 #endif /* WILC_SDIO_IRQ_GPIO */
 
-
 		{
 			uint32_t vmm_ctl;
 
@@ -1138,7 +932,6 @@
 {
 	uint32_t reg;
 
-
 	if (nint > MAX_NUM_INT) {
 		g_sdio.dPrint(N_ERR, "[wilc sdio]: Too many interupts (%d)...\n", nint);
 		return 0;
@@ -1148,7 +941,6 @@
 		return 0;
 	}
 
-
 	g_sdio.nint = nint;
 
 	/**
@@ -1170,7 +962,6 @@
 		uint32_t reg;
 		int ret, i;
 
-
 		/**
 		 *      interrupt pin mux select
 		 **/
@@ -1195,9 +986,8 @@
 			return 0;
 		}
 
-		for (i = 0; (i < 5) && (nint > 0); i++, nint--) {
+		for (i = 0; (i < 5) && (nint > 0); i++, nint--)
 			reg |= (1 << (27 + i));
-		}
 		ret = sdio_write_reg(WILC_INTR_ENABLE, reg);
 		if (!ret) {
 			g_sdio.dPrint(N_ERR, "[wilc sdio]: Failed write reg (%08x)...\n", WILC_INTR_ENABLE);
@@ -1210,9 +1000,8 @@
 				return 0;
 			}
 
-			for (i = 0; (i < 3) && (nint > 0); i++, nint--) {
+			for (i = 0; (i < 3) && (nint > 0); i++, nint--)
 				reg |= (1 << i);
-			}
 
 			ret = sdio_read_reg(WILC_INTR2_ENABLE, &reg);
 			if (!ret) {
@@ -1225,7 +1014,6 @@
 	return 1;
 }
 
-
 /********************************************
  *
  *      Global sdio HIF function table
diff --git a/drivers/staging/wilc1000/wilc_sleep.c b/drivers/staging/wilc1000/wilc_sleep.c
deleted file mode 100644
index adab3cac..0000000
--- a/drivers/staging/wilc1000/wilc_sleep.c
+++ /dev/null
@@ -1,18 +0,0 @@
-
-#include "wilc_sleep.h"
-
-/*
- *  @author	mdaftedar
- *  @date	10 Aug 2010
- *  @version	1.0
- */
-void WILC_Sleep(u32 u32TimeMilliSec)
-{
-	if (u32TimeMilliSec <= 4000000)	{
-		u32 u32Temp = u32TimeMilliSec * 1000;
-		usleep_range(u32Temp, u32Temp);
-	} else {
-		msleep(u32TimeMilliSec);
-	}
-
-}
diff --git a/drivers/staging/wilc1000/wilc_sleep.h b/drivers/staging/wilc1000/wilc_sleep.h
deleted file mode 100644
index cf9047f..0000000
--- a/drivers/staging/wilc1000/wilc_sleep.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __WILC_SLEEP_H__
-#define __WILC_SLEEP_H__
-
-#include <linux/types.h>
-#include <linux/delay.h>
-
-/*!
- *  @brief	forces the current thread to sleep until the given time has elapsed
- *  @param[in]	u32TimeMilliSec Time to sleep in Milli seconds
- *  @sa		WILC_SleepMicrosec
- *  @author	syounan
- *  @date	10 Aug 2010
- *  @version	1.0
- *  @note	This function offers a relatively innacurate and low resolution
- *              sleep, for accurate high resolution sleep use u32TimeMicoSec
- */
-/* TODO: remove and open-code in callers */
-void WILC_Sleep(u32 u32TimeMilliSec);
-
-#endif
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index abea5df..1bf7d31 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -374,11 +374,10 @@
 		return result;
 	}
 
-	if (!g_spi.crc_off) {
+	if (!g_spi.crc_off)
 		wb[len - 1] = (crc7(0x7f, (const uint8_t *)&wb[0], len - 1)) << 1;
-	} else {
+	else
 		len -= 1;
-	}
 
 #define NUM_SKIP_BYTES (1)
 #define NUM_RSP_BYTES (2)
@@ -522,11 +521,10 @@
 			if (sz > 0) {
 				int nbytes;
 
-				if (sz <= (DATA_PKT_SZ - ix)) {
+				if (sz <= (DATA_PKT_SZ - ix))
 					nbytes = sz;
-				} else {
+				else
 					nbytes = DATA_PKT_SZ - ix;
-				}
 
 				/**
 				 * Read bytes
@@ -557,11 +555,10 @@
 			while (sz > 0) {
 				int nbytes;
 
-				if (sz <= DATA_PKT_SZ) {
+				if (sz <= DATA_PKT_SZ)
 					nbytes = sz;
-				} else {
+				else
 					nbytes = DATA_PKT_SZ;
-				}
 
 				/**
 				 * read data response only on the next DMA cycles not
diff --git a/drivers/staging/wilc1000/wilc_strutils.c b/drivers/staging/wilc1000/wilc_strutils.c
deleted file mode 100644
index e014595..0000000
--- a/drivers/staging/wilc1000/wilc_strutils.c
+++ /dev/null
@@ -1,80 +0,0 @@
-
-#define _CRT_SECURE_NO_DEPRECATE
-
-#include "wilc_strutils.h"
-
-
-/*!
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-s32 WILC_memcmp(const void *pvArg1, const void *pvArg2, u32 u32Count)
-{
-	return memcmp(pvArg1, pvArg2, u32Count);
-}
-
-
-/*!
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-void WILC_memcpy_INTERNAL(void *pvTarget, const void *pvSource, u32 u32Count)
-{
-	memcpy(pvTarget, pvSource, u32Count);
-}
-
-/*!
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-void *WILC_memset(void *pvTarget, u8 u8SetValue, u32 u32Count)
-{
-	return memset(pvTarget, u8SetValue, u32Count);
-}
-
-/*!
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-char *WILC_strncpy(char *pcTarget, const char *pcSource,
-			u32 u32Count)
-{
-	return strncpy(pcTarget, pcSource, u32Count);
-}
-
-s32 WILC_strncmp(const char *pcStr1, const char *pcStr2,
-			 u32 u32Count)
-{
-	s32 s32Result;
-
-	if (pcStr1 == NULL && pcStr2 == NULL)	{
-		s32Result = 0;
-	} else if (pcStr1 == NULL)	   {
-		s32Result = -1;
-	} else if (pcStr2 == NULL)	   {
-		s32Result = 1;
-	} else {
-		s32Result = strncmp(pcStr1, pcStr2, u32Count);
-		if (s32Result < 0) {
-			s32Result = -1;
-		} else if (s32Result > 0)    {
-			s32Result = 1;
-		}
-	}
-
-	return s32Result;
-}
-
-/*!
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-u32 WILC_strlen(const char *pcStr)
-{
-	return (u32)strlen(pcStr);
-}
diff --git a/drivers/staging/wilc1000/wilc_strutils.h b/drivers/staging/wilc1000/wilc_strutils.h
deleted file mode 100644
index d144557..0000000
--- a/drivers/staging/wilc1000/wilc_strutils.h
+++ /dev/null
@@ -1,134 +0,0 @@
-#ifndef __WILC_STRUTILS_H__
-#define __WILC_STRUTILS_H__
-
-/*!
- *  @file	wilc_strutils.h
- *  @brief	Basic string utilities
- *  @author	syounan
- *  @sa		wilc_oswrapper.h top level OS wrapper file
- *  @date	16 Aug 2010
- *  @version	1.0
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include "wilc_errorsupport.h"
-
-/*!
- *  @brief	Compares two memory buffers
- *  @param[in]	pvArg1 pointer to the first memory location
- *  @param[in]	pvArg2 pointer to the second memory location
- *  @param[in]	u32Count the size of the memory buffers
- *  @return	0 if the 2 buffers are equal, 1 if pvArg1 is bigger than pvArg2,
- *              -1 if pvArg1 smaller than pvArg2
- *  @note	this function repeats the functionality of standard memcmp
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-s32 WILC_memcmp(const void *pvArg1, const void *pvArg2, u32 u32Count);
-
-/*!
- *  @brief	Internal implementation for memory copy
- *  @param[in]	pvTarget the target buffer to which the data is copied into
- *  @param[in]	pvSource pointer to the second memory location
- *  @param[in]	u32Count the size of the data to copy
- *  @note	this function should not be used directly, use WILC_memcpy instead
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-void WILC_memcpy_INTERNAL(void *pvTarget, const void *pvSource, u32 u32Count);
-
-/*!
- *  @brief	Copies the contents of a memory buffer into another
- *  @param[in]	pvTarget the target buffer to which the data is copied into
- *  @param[in]	pvSource pointer to the second memory location
- *  @param[in]	u32Count the size of the data to copy
- *  @return	WILC_SUCCESS if copy is successfully handeled
- *              WILC_FAIL if copy failed
- *  @note	this function repeats the functionality of standard memcpy,
- *              however memcpy is undefined if the two buffers overlap but this
- *              implementation will check for overlap and report error
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-static WILC_ErrNo WILC_memcpy(void *pvTarget, const void *pvSource, u32 u32Count)
-{
-	if (
-		(((u8 *)pvTarget <= (u8 *)pvSource)
-		 && (((u8 *)pvTarget + u32Count) > (u8 *)pvSource))
-
-		|| (((u8 *)pvSource <= (u8 *)pvTarget)
-		    && (((u8 *)pvSource + u32Count) > (u8 *)pvTarget))
-		) {
-		/* ovelapped memory, return Error */
-		return WILC_FAIL;
-	} else {
-		WILC_memcpy_INTERNAL(pvTarget, pvSource, u32Count);
-		return WILC_SUCCESS;
-	}
-}
-
-/*!
- *  @brief	Sets the contents of a memory buffer with the given value
- *  @param[in]	pvTarget the target buffer which contsnts will be set
- *  @param[in]	u8SetValue the value to be used
- *  @param[in]	u32Count the size of the memory buffer
- *  @return	value of pvTarget
- *  @note	this function repeats the functionality of standard memset
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-void *WILC_memset(void *pvTarget, u8 u8SetValue, u32 u32Count);
-
-/*!
- *  @brief	copies the contents of source string into the target string
- *  @param[in]	pcTarget the target string buffer
- *  @param[in]	pcSource the source string the will be copied
- *  @param[in]	u32Count copying will proceed until a null character in pcSource
- *              is encountered or u32Count of bytes copied
- *  @return	value of pcTarget
- *  @note	this function repeats the functionality of standard strncpy
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-char *WILC_strncpy(char *pcTarget, const char *pcSource,
-			u32 u32Count);
-
-/*!
- *  @brief	Compares two strings up to u32Count characters
- *  @details	Compares 2 strings reporting which is bigger, NULL is considered
- *              the smallest string, then a zero length string then all other
- *              strings depending on thier ascii characters order with small case
- *              converted to uppder case
- *  @param[in]	pcStr1 the first string, NULL is valid and considered smaller
- *              than any other non-NULL string (incliding zero lenght strings)
- *  @param[in]	pcStr2 the second string, NULL is valid and considered smaller
- *              than any other non-NULL string (incliding zero lenght strings)
- *  @param[in]	u32Count copying will proceed until a null character in pcStr1 or
- *              pcStr2 is encountered or u32Count of bytes copied
- *  @return	0 if the 2 strings are equal, 1 if pcStr1 is bigger than pcStr2,
- *              -1 if pcStr1 smaller than pcStr2
- *  @author	aabozaeid
- *  @date	7 Dec 2010
- *  @version	1.0
- */
-s32 WILC_strncmp(const char *pcStr1, const char *pcStr2,
-			 u32 u32Count);
-
-/*!
- *  @brief	gets the length of a string
- *  @param[in]	pcStr the string
- *  @return	the length
- *  @note	this function repeats the functionality of standard strlen
- *  @author	syounan
- *  @date	18 Aug 2010
- *  @version	1.0
- */
-u32 WILC_strlen(const char *pcStr);
-
-#endif
diff --git a/drivers/staging/wilc1000/wilc_timer.c b/drivers/staging/wilc1000/wilc_timer.c
deleted file mode 100644
index dc71157..0000000
--- a/drivers/staging/wilc1000/wilc_timer.c
+++ /dev/null
@@ -1,45 +0,0 @@
-
-#include "wilc_timer.h"
-
-WILC_ErrNo WILC_TimerCreate(WILC_TimerHandle *pHandle,
-	tpfWILC_TimerFunction pfCallback, tstrWILC_TimerAttrs *pstrAttrs)
-{
-	WILC_ErrNo s32RetStatus = WILC_SUCCESS;
-	setup_timer(pHandle, (void(*)(unsigned long))pfCallback, 0);
-
-	return s32RetStatus;
-}
-
-WILC_ErrNo WILC_TimerDestroy(WILC_TimerHandle *pHandle,
-	tstrWILC_TimerAttrs *pstrAttrs)
-{
-	WILC_ErrNo s32RetStatus = WILC_FAIL;
-	if (pHandle != NULL) {
-		s32RetStatus = del_timer_sync(pHandle);
-		pHandle = NULL;
-	}
-
-	return s32RetStatus;
-}
-
-
-WILC_ErrNo WILC_TimerStart(WILC_TimerHandle *pHandle, u32 u32Timeout,
-	void *pvArg, tstrWILC_TimerAttrs *pstrAttrs)
-{
-	WILC_ErrNo s32RetStatus = WILC_FAIL;
-	if (pHandle != NULL) {
-		pHandle->data = (unsigned long)pvArg;
-		s32RetStatus = mod_timer(pHandle, (jiffies + msecs_to_jiffies(u32Timeout)));
-	}
-	return s32RetStatus;
-}
-
-WILC_ErrNo WILC_TimerStop(WILC_TimerHandle *pHandle,
-	tstrWILC_TimerAttrs *pstrAttrs)
-{
-	WILC_ErrNo s32RetStatus = WILC_FAIL;
-	if (pHandle != NULL)
-		s32RetStatus = del_timer(pHandle);
-
-	return s32RetStatus;
-}
diff --git a/drivers/staging/wilc1000/wilc_timer.h b/drivers/staging/wilc1000/wilc_timer.h
deleted file mode 100644
index 931269d..0000000
--- a/drivers/staging/wilc1000/wilc_timer.h
+++ /dev/null
@@ -1,129 +0,0 @@
-#ifndef __WILC_TIMER_H__
-#define __WILC_TIMER_H__
-
-/*!
- *  @file	wilc_timer.h
- *  @brief	Timer (One Shot and Periodic) OS wrapper functionality
- *  @author	syounan
- *  @sa		wilc_oswrapper.h top level OS wrapper file
- *  @date	16 Aug 2010
- *  @version	1.0
- */
-
-#include "wilc_platform.h"
-#include "wilc_errorsupport.h"
-
-typedef void (*tpfWILC_TimerFunction)(void *);
-
-/*!
- *  @struct             tstrWILC_TimerAttrs
- *  @brief		Timer API options
- *  @author		syounan
- *  @date		16 Aug 2010
- *  @version		1.0
- */
-typedef struct {
-	/* a dummy member to avoid compiler errors*/
-	u8 dummy;
-} tstrWILC_TimerAttrs;
-
-/*!
- *  @brief	Creates a new timer
- *  @details	Timers are a useful utility to execute some callback function
- *              in the future.
- *              A timer object has 3 states : IDLE, PENDING and EXECUTING
- *              IDLE : initial timer state after creation, no execution for the
- *              callback function is planned
- *              PENDING : a request to execute the callback function is made
- *              using WILC_TimerStart.
- *              EXECUTING : the timer has expired and its callback is now
- *              executing, when execution is done the timer returns to PENDING
- *              if the feature CONFIG_WILC_TIMER_PERIODIC is enabled and
- *              the flag tstrWILC_TimerAttrs.bPeriodicTimer is set. otherwise the
- *              timer will return to IDLE
- *  @param[out]	pHandle handle to the newly created timer object
- *  @param[in]	pfEntry pointer to the callback function to be called when the
- *              timer expires
- *              the underlaying OS may put many restrictions on what can be
- *              called inside a timer's callback, as a general rule no blocking
- *              operations (IO or semaphore Acquision) should be perfomred
- *              It is recommended that the callback will be as short as possible
- *              and only flags other threads to do the actual work
- *              also it should be noted that the underlaying OS maynot give any
- *              guarentees on which contect this callback will execute in
- *  @param[in]	pstrAttrs Optional attributes, NULL for default
- *  @return	Error code indicating sucess/failure
- *  @sa		WILC_TimerAttrs
- *  @author	syounan
- *  @date	16 Aug 2010
- *  @version	1.0
- */
-WILC_ErrNo WILC_TimerCreate(WILC_TimerHandle *pHandle,
-			    tpfWILC_TimerFunction pfCallback, tstrWILC_TimerAttrs *pstrAttrs);
-
-
-/*!
- *  @brief	Destroys a given timer
- *  @details	This will destroy a given timer freeing any resources used by it
- *              if the timer was PENDING Then must be cancelled as well(i.e.
- *              goes to	IDLE, same effect as calling WILC_TimerCancel first)
- *              if the timer was EXECUTING then the callback will be allowed to
- *              finish first then all resources are freed
- *  @param[in]	pHandle handle to the timer object
- *  @param[in]	pstrAttrs Optional attributes, NULL for default
- *  @return	Error code indicating sucess/failure
- *  @sa		WILC_TimerAttrs
- *  @author	syounan
- *  @date	16 Aug 2010
- *  @version	1.0
- */
-WILC_ErrNo WILC_TimerDestroy(WILC_TimerHandle *pHandle,
-			     tstrWILC_TimerAttrs *pstrAttrs);
-
-/*!
- *  @brief	Starts a given timer
- *  @details	This function will move the timer to the PENDING state until the
- *              given time expires (in msec) then the callback function will be
- *              executed (timer in EXECUTING state) after execution is dene the
- *              timer either goes to IDLE (if bPeriodicTimer==false) or
- *              PENDING with same timeout value (if bPeriodicTimer==true)
- *  @param[in]	pHandle handle to the timer object
- *  @param[in]	u32Timeout timeout value in msec after witch the callback
- *              function will be executed. Timeout value of 0 is not allowed for
- *              periodic timers
- *  @param[in]	pstrAttrs Optional attributes, NULL for default,
- *              set bPeriodicTimer to run this timer as a periodic timer
- *  @return	Error code indicating sucess/failure
- *  @sa		WILC_TimerAttrs
- *  @author	syounan
- *  @date	16 Aug 2010
- *  @version	1.0
- */
-WILC_ErrNo WILC_TimerStart(WILC_TimerHandle *pHandle, u32 u32Timeout, void *pvArg,
-			   tstrWILC_TimerAttrs *pstrAttrs);
-
-
-/*!
- *  @brief	Stops a given timer
- *  @details	This function will move the timer to the IDLE state cancelling
- *              any sheduled callback execution.
- *              if this function is called on a timer already in the IDLE state
- *              it will have no effect.
- *              if this function is called on a timer in EXECUTING state
- *              (callback has already started) it will wait until executing is
- *              done then move the timer to the IDLE state (which is trivial
- *              work if the timer is non periodic)
- *  @param[in]	pHandle handle to the timer object
- *  @param[in]	pstrAttrs Optional attributes, NULL for default,
- *  @return	Error code indicating sucess/failure
- *  @sa		WILC_TimerAttrs
- *  @author	syounan
- *  @date	16 Aug 2010
- *  @version	1.0
- */
-WILC_ErrNo WILC_TimerStop(WILC_TimerHandle *pHandle,
-			  tstrWILC_TimerAttrs *pstrAttrs);
-
-
-
-#endif
diff --git a/drivers/staging/wilc1000/wilc_type.h b/drivers/staging/wilc1000/wilc_type.h
deleted file mode 100644
index 5f36e7f..0000000
--- a/drivers/staging/wilc1000/wilc_type.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* ////////////////////////////////////////////////////////////////////////// */
-/*  */
-/* Copyright (c) Atmel Corporation.  All rights reserved. */
-/*  */
-/* Module Name:  wilc_type.h */
-/*  */
-/*  */
-/* //////////////////////////////////////////////////////////////////////////// */
-#ifndef WILC_TYPE_H
-#define WILC_TYPE_H
-
-/********************************************
- *
- *      Type Defines
- *
- ********************************************/
-#ifdef WIN32
-typedef char int8_t;
-typedef short int16_t;
-typedef long int32_t;
-typedef unsigned char uint8_t;
-typedef unsigned short uint16_t;
-typedef unsigned long uint32_t;
-#else
-#ifdef _linux_
-/*typedef unsigned char         uint8_t;
- * typedef unsigned short       uint16_t;
- * typedef unsigned long        uint32_t;*/
-#include <stdint.h>
-#else
-#include "wilc_oswrapper.h"
-#endif
-#endif
-#endif
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 92064db..a6edc97 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -22,7 +22,6 @@
 #define IS_MGMT_STATUS_SUCCES			0x040
 #define GET_PKT_OFFSET(a) (((a) >> 22) & 0x1ff)
 
-extern void linux_wlan_free(void *vp);
 extern int linux_wlan_get_firmware(perInterface_wlan_t *p_nic);
 extern void linux_wlan_unlock(void *vp);
 extern u16 Set_machw_change_vir_if(bool bValue);
@@ -33,9 +32,9 @@
 tstrNetworkInfo astrLastScannedNtwrksShadow[MAX_NUM_SCANNED_NETWORKS_SHADOW];
 u32 u32LastScannedNtwrksCountShadow;
 #ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
-WILC_TimerHandle hDuringIpTimer;
+struct timer_list hDuringIpTimer;
 #endif
-WILC_TimerHandle hAgingTimer;
+struct timer_list hAgingTimer;
 static u8 op_ifcs;
 extern u8 u8ConnectedSSID[6];
 
@@ -112,7 +111,7 @@
 u8 u8P2Plocalrandom = 0x01;
 u8 u8P2Precvrandom = 0x00;
 u8 u8P2P_vendorspec[] = {0xdd, 0x05, 0x00, 0x08, 0x40, 0x03};
-bool bWilc_ie = false;
+bool bWilc_ie;
 #endif
 
 static struct ieee80211_supported_band WILC_WFI_band_2ghz = {
@@ -135,25 +134,23 @@
 struct wilc_wfi_key g_key_ptk_params;
 struct wilc_wfi_wep_key g_key_wep_params;
 u8 g_flushing_in_progress;
-bool g_ptk_keys_saved = false;
-bool g_gtk_keys_saved = false;
-bool g_wep_keys_saved = false;
+bool g_ptk_keys_saved;
+bool g_gtk_keys_saved;
+bool g_wep_keys_saved;
 
 #define AGING_TIME	(9 * 1000)
 #define duringIP_TIME 15000
 
 void clear_shadow_scan(void *pUserVoid)
 {
-	struct WILC_WFI_priv *priv;
 	int i;
-	priv = (struct WILC_WFI_priv *)pUserVoid;
 	if (op_ifcs == 0) {
-		WILC_TimerDestroy(&hAgingTimer, NULL);
+		del_timer_sync(&hAgingTimer);
 		PRINT_INFO(CORECONFIG_DBG, "destroy aging timer\n");
 
 		for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
 			if (astrLastScannedNtwrksShadow[u32LastScannedNtwrksCountShadow].pu8IEs != NULL) {
-				WILC_FREE(astrLastScannedNtwrksShadow[i].pu8IEs);
+				kfree(astrLastScannedNtwrksShadow[i].pu8IEs);
 				astrLastScannedNtwrksShadow[u32LastScannedNtwrksCountShadow].pu8IEs = NULL;
 			}
 
@@ -204,7 +201,7 @@
 				channel = ieee80211_get_channel(wiphy, s32Freq);
 
 				rssi = get_rssi_avg(pstrNetworkInfo);
-				if (WILC_memcmp("DIRECT-", pstrNetworkInfo->au8ssid, 7) || bDirectScan)	{
+				if (memcmp("DIRECT-", pstrNetworkInfo->au8ssid, 7) || bDirectScan)	{
 					bss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, pstrNetworkInfo->au8bssid, pstrNetworkInfo->u64Tsf, pstrNetworkInfo->u16CapInfo,
 								  pstrNetworkInfo->u16BeaconPeriod, (const u8 *)pstrNetworkInfo->pu8IEs,
 								  (size_t)pstrNetworkInfo->u16IEsLen, (((s32)rssi) * 100), GFP_KERNEL);
@@ -219,9 +216,7 @@
 
 void reset_shadow_found(void *pUserVoid)
 {
-	struct WILC_WFI_priv *priv;
 	int i;
-	priv = (struct WILC_WFI_priv *)pUserVoid;
 	for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
 		astrLastScannedNtwrksShadow[i].u8Found = 0;
 
@@ -230,28 +225,24 @@
 
 void update_scan_time(void *pUserVoid)
 {
-	struct WILC_WFI_priv *priv;
 	int i;
-	priv = (struct WILC_WFI_priv *)pUserVoid;
 	for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
 		astrLastScannedNtwrksShadow[i].u32TimeRcvdInScan = jiffies;
 	}
 }
 
-void remove_network_from_shadow(void *pUserVoid)
+static void remove_network_from_shadow(unsigned long arg)
 {
-	struct WILC_WFI_priv *priv;
 	unsigned long now = jiffies;
 	int i, j;
 
-	priv = (struct WILC_WFI_priv *)pUserVoid;
 
 	for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
 		if (time_after(now, astrLastScannedNtwrksShadow[i].u32TimeRcvdInScan + (unsigned long)(SCAN_RESULT_EXPIRE))) {
-			PRINT_D(CFG80211_DBG, "Network expired in ScanShadow: %s \n", astrLastScannedNtwrksShadow[i].au8ssid);
+			PRINT_D(CFG80211_DBG, "Network expired in ScanShadow: %s\n", astrLastScannedNtwrksShadow[i].au8ssid);
 
 			if (astrLastScannedNtwrksShadow[i].pu8IEs != NULL) {
-				WILC_FREE(astrLastScannedNtwrksShadow[i].pu8IEs);
+				kfree(astrLastScannedNtwrksShadow[i].pu8IEs);
 				astrLastScannedNtwrksShadow[i].pu8IEs = NULL;
 			}
 
@@ -265,14 +256,16 @@
 	}
 
 	PRINT_D(CFG80211_DBG, "Number of cached networks: %d\n", u32LastScannedNtwrksCountShadow);
-	if (u32LastScannedNtwrksCountShadow != 0)
-		WILC_TimerStart(&(hAgingTimer), AGING_TIME, pUserVoid, NULL);
-	else
+	if (u32LastScannedNtwrksCountShadow != 0) {
+		hAgingTimer.data = arg;
+		mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME));
+	} else {
 		PRINT_D(CFG80211_DBG, "No need to restart Aging timer\n");
+	}
 }
 
 #ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
-void clear_duringIP(void *pUserVoid)
+static void clear_duringIP(unsigned long arg)
 {
 	PRINT_D(GENERIC_DBG, "GO:IP Obtained , enable scan\n");
 	g_obtainingIP = false;
@@ -281,19 +274,18 @@
 
 int8_t is_network_in_shadow(tstrNetworkInfo *pstrNetworkInfo, void *pUserVoid)
 {
-	struct WILC_WFI_priv *priv;
 	int8_t state = -1;
 	int i;
 
-	priv = (struct WILC_WFI_priv *)pUserVoid;
 	if (u32LastScannedNtwrksCountShadow == 0) {
 		PRINT_D(CFG80211_DBG, "Starting Aging timer\n");
-		WILC_TimerStart(&(hAgingTimer), AGING_TIME, pUserVoid, NULL);
+		hAgingTimer.data = (unsigned long)pUserVoid;
+		mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME));
 		state = -1;
 	} else {
 		/* Linear search for now */
 		for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
-			if (WILC_memcmp(astrLastScannedNtwrksShadow[i].au8bssid,
+			if (memcmp(astrLastScannedNtwrksShadow[i].au8bssid,
 					pstrNetworkInfo->au8bssid, 6) == 0) {
 				state = i;
 				break;
@@ -305,11 +297,9 @@
 
 void add_network_to_shadow(tstrNetworkInfo *pstrNetworkInfo, void *pUserVoid, void *pJoinParams)
 {
-	struct WILC_WFI_priv *priv;
 	int8_t ap_found = is_network_in_shadow(pstrNetworkInfo, pUserVoid);
 	uint32_t ap_index = 0;
 	uint8_t rssi_index = 0;
-	priv = (struct WILC_WFI_priv *)pUserVoid;
 
 	if (u32LastScannedNtwrksCountShadow >= MAX_NUM_SCANNED_NETWORKS_SHADOW) {
 		PRINT_D(CFG80211_DBG, "Shadow network reached its maximum limit\n");
@@ -334,10 +324,10 @@
 	astrLastScannedNtwrksShadow[ap_index].u16CapInfo = pstrNetworkInfo->u16CapInfo;
 
 	astrLastScannedNtwrksShadow[ap_index].u8SsidLen = pstrNetworkInfo->u8SsidLen;
-	WILC_memcpy(astrLastScannedNtwrksShadow[ap_index].au8ssid,
+	memcpy(astrLastScannedNtwrksShadow[ap_index].au8ssid,
 		    pstrNetworkInfo->au8ssid, pstrNetworkInfo->u8SsidLen);
 
-	WILC_memcpy(astrLastScannedNtwrksShadow[ap_index].au8bssid,
+	memcpy(astrLastScannedNtwrksShadow[ap_index].au8bssid,
 		    pstrNetworkInfo->au8bssid, ETH_ALEN);
 
 	astrLastScannedNtwrksShadow[ap_index].u16BeaconPeriod = pstrNetworkInfo->u16BeaconPeriod;
@@ -347,10 +337,10 @@
 	astrLastScannedNtwrksShadow[ap_index].u16IEsLen = pstrNetworkInfo->u16IEsLen;
 	astrLastScannedNtwrksShadow[ap_index].u64Tsf = pstrNetworkInfo->u64Tsf;
 	if (ap_found != -1)
-		WILC_FREE(astrLastScannedNtwrksShadow[ap_index].pu8IEs);
+		kfree(astrLastScannedNtwrksShadow[ap_index].pu8IEs);
 	astrLastScannedNtwrksShadow[ap_index].pu8IEs =
-		(u8 *)WILC_MALLOC(pstrNetworkInfo->u16IEsLen);        /* will be deallocated by the WILC_WFI_CfgScan() function */
-	WILC_memcpy(astrLastScannedNtwrksShadow[ap_index].pu8IEs,
+		WILC_MALLOC(pstrNetworkInfo->u16IEsLen);        /* will be deallocated by the WILC_WFI_CfgScan() function */
+	memcpy(astrLastScannedNtwrksShadow[ap_index].pu8IEs,
 		    pstrNetworkInfo->pu8IEs, pstrNetworkInfo->u16IEsLen);
 
 	astrLastScannedNtwrksShadow[ap_index].u32TimeRcvdInScan = jiffies;
@@ -406,7 +396,7 @@
 				WILC_NULLCHECK(s32Error, channel);
 
 				PRINT_INFO(CFG80211_DBG, "Network Info:: CHANNEL Frequency: %d, RSSI: %d, CapabilityInfo: %d,"
-					   "BeaconPeriod: %d \n", channel->center_freq, (((s32)pstrNetworkInfo->s8rssi) * 100),
+					   "BeaconPeriod: %d\n", channel->center_freq, (((s32)pstrNetworkInfo->s8rssi) * 100),
 					   pstrNetworkInfo->u16CapInfo, pstrNetworkInfo->u16BeaconPeriod);
 
 				if (pstrNetworkInfo->bNewNetwork == true) {
@@ -426,7 +416,7 @@
 
 						/*P2P peers are sent to WPA supplicant and added to shadow table*/
 
-						if (!(WILC_memcmp("DIRECT-", pstrNetworkInfo->au8ssid, 7))) {
+						if (!(memcmp("DIRECT-", pstrNetworkInfo->au8ssid, 7))) {
 							bss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN,  pstrNetworkInfo->au8bssid, pstrNetworkInfo->u64Tsf, pstrNetworkInfo->u16CapInfo,
 										  pstrNetworkInfo->u16BeaconPeriod, (const u8 *)pstrNetworkInfo->pu8IEs,
 										  (size_t)pstrNetworkInfo->u16IEsLen, (((s32)pstrNetworkInfo->s8rssi) * 100), GFP_KERNEL);
@@ -441,8 +431,8 @@
 					u32 i;
 					/* So this network is discovered before, we'll just update its RSSI */
 					for (i = 0; i < priv->u32RcvdChCount; i++) {
-						if (WILC_memcmp(astrLastScannedNtwrksShadow[i].au8bssid, pstrNetworkInfo->au8bssid, 6) == 0) {
-							PRINT_D(CFG80211_DBG, "Update RSSI of %s \n", astrLastScannedNtwrksShadow[i].au8ssid);
+						if (memcmp(astrLastScannedNtwrksShadow[i].au8bssid, pstrNetworkInfo->au8bssid, 6) == 0) {
+							PRINT_D(CFG80211_DBG, "Update RSSI of %s\n", astrLastScannedNtwrksShadow[i].au8ssid);
 
 							astrLastScannedNtwrksShadow[i].s8rssi = pstrNetworkInfo->s8rssi;
 							astrLastScannedNtwrksShadow[i].u32TimeRcvdInScan = jiffies;
@@ -452,15 +442,14 @@
 				}
 			}
 		} else if (enuScanEvent == SCAN_EVENT_DONE)    {
-			PRINT_D(CFG80211_DBG, "Scan Done[%p] \n", priv->dev);
-			PRINT_D(CFG80211_DBG, "Refreshing Scan ... \n");
+			PRINT_D(CFG80211_DBG, "Scan Done[%p]\n", priv->dev);
+			PRINT_D(CFG80211_DBG, "Refreshing Scan ...\n");
 			refresh_scan(priv, 1, false);
 
-			if (priv->u32RcvdChCount > 0) {
-				PRINT_D(CFG80211_DBG, "%d Network(s) found \n", priv->u32RcvdChCount);
-			} else {
-				PRINT_D(CFG80211_DBG, "No networks found \n");
-			}
+			if (priv->u32RcvdChCount > 0)
+				PRINT_D(CFG80211_DBG, "%d Network(s) found\n", priv->u32RcvdChCount);
+			else
+				PRINT_D(CFG80211_DBG, "No networks found\n");
 
 			down(&(priv->hSemScanReq));
 
@@ -477,7 +466,7 @@
 		else if (enuScanEvent == SCAN_EVENT_ABORTED) {
 			down(&(priv->hSemScanReq));
 
-			PRINT_D(CFG80211_DBG, "Scan Aborted \n");
+			PRINT_D(CFG80211_DBG, "Scan Aborted\n");
 			if (priv->pstrScanReq != NULL) {
 
 				update_scan_time(priv);
@@ -515,7 +504,7 @@
 
 	for (i = 0; i < priv->pmkid_list.numpmkid; i++)	{
 
-		if (!WILC_memcmp(bssid, priv->pmkid_list.pmkidlist[i].bssid,
+		if (!memcmp(bssid, priv->pmkid_list.pmkidlist[i].bssid,
 				 ETH_ALEN)) {
 			PRINT_D(CFG80211_DBG, "PMKID successful comparison");
 
@@ -586,7 +575,7 @@
 			 *  = SUCCESSFUL_STATUSCODE, while mac status is MAC_DISCONNECTED (which means something wrong happened) */
 			u16ConnectStatus = WLAN_STATUS_UNSPECIFIED_FAILURE;
 			linux_wlan_set_bssid(priv->dev, NullBssid);
-			WILC_memset(u8ConnectedSSID, 0, ETH_ALEN);
+			memset(u8ConnectedSSID, 0, ETH_ALEN);
 
 			/*BugID_5457*/
 			/*Invalidate u8WLANChannel value on wlan0 disconnect*/
@@ -595,7 +584,7 @@
 				u8WLANChannel = INVALID_CHANNEL;
 			#endif
 
-			PRINT_ER("Unspecified failure: Connection status %d : MAC status = %d \n", u16ConnectStatus, u8MacStatus);
+			PRINT_ER("Unspecified failure: Connection status %d : MAC status = %d\n", u16ConnectStatus, u8MacStatus);
 		}
 
 		if (u16ConnectStatus == WLAN_STATUS_SUCCESS) {
@@ -604,14 +593,14 @@
 
 			PRINT_INFO(CFG80211_DBG, "Connection Successful:: BSSID: %x%x%x%x%x%x\n", pstrConnectInfo->au8bssid[0],
 				   pstrConnectInfo->au8bssid[1], pstrConnectInfo->au8bssid[2], pstrConnectInfo->au8bssid[3], pstrConnectInfo->au8bssid[4], pstrConnectInfo->au8bssid[5]);
-			WILC_memcpy(priv->au8AssociatedBss, pstrConnectInfo->au8bssid, ETH_ALEN);
+			memcpy(priv->au8AssociatedBss, pstrConnectInfo->au8bssid, ETH_ALEN);
 
 			/* BugID_4209: if this network has expired in the scan results in the above nl80211 layer, refresh them here by calling
 			 *  cfg80211_inform_bss() with the last Scan results before calling cfg80211_connect_result() to avoid
 			 *  Linux kernel warning generated at the nl80211 layer */
 
 			for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
-				if (WILC_memcmp(astrLastScannedNtwrksShadow[i].au8bssid,
+				if (memcmp(astrLastScannedNtwrksShadow[i].au8bssid,
 						pstrConnectInfo->au8bssid, ETH_ALEN) == 0) {
 					unsigned long now = jiffies;
 
@@ -652,9 +641,9 @@
 		u8P2Plocalrandom = 0x01;
 		u8P2Precvrandom = 0x00;
 		bWilc_ie = false;
-		WILC_memset(priv->au8AssociatedBss, 0, ETH_ALEN);
+		memset(priv->au8AssociatedBss, 0, ETH_ALEN);
 		linux_wlan_set_bssid(priv->dev, NullBssid);
-		WILC_memset(u8ConnectedSSID, 0, ETH_ALEN);
+		memset(u8ConnectedSSID, 0, ETH_ALEN);
 
 		/*BugID_5457*/
 		/*Invalidate u8WLANChannel value on wlan0 disconnect*/
@@ -675,7 +664,8 @@
 			pstrDisconnectNotifInfo->u16reason = 1;
 		}
 		cfg80211_disconnected(dev, pstrDisconnectNotifInfo->u16reason, pstrDisconnectNotifInfo->ie,
-				      pstrDisconnectNotifInfo->ie_len, GFP_KERNEL);
+				      pstrDisconnectNotifInfo->ie_len, false,
+				      GFP_KERNEL);
 
 	}
 
@@ -747,7 +737,7 @@
 
 	priv->u32RcvdChCount = 0;
 
-	host_int_set_wfi_drv_handler((u32)priv->hWILCWFIDrv);
+	host_int_set_wfi_drv_handler(priv->hWILCWFIDrv);
 
 
 	reset_shadow_found(priv);
@@ -777,20 +767,20 @@
 
 				if (request->ssids[i].ssid != NULL && request->ssids[i].ssid_len != 0) {
 					strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid = WILC_MALLOC(request->ssids[i].ssid_len);
-					WILC_memcpy(strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid, request->ssids[i].ssid, request->ssids[i].ssid_len);
+					memcpy(strHiddenNetwork.pstrHiddenNetworkInfo[i].pu8ssid, request->ssids[i].ssid, request->ssids[i].ssid_len);
 					strHiddenNetwork.pstrHiddenNetworkInfo[i].u8ssidlen = request->ssids[i].ssid_len;
 				} else {
-					PRINT_D(CFG80211_DBG, "Received one NULL SSID \n");
+					PRINT_D(CFG80211_DBG, "Received one NULL SSID\n");
 					strHiddenNetwork.u8ssidnum -= 1;
 				}
 			}
-			PRINT_D(CFG80211_DBG, "Trigger Scan Request \n");
+			PRINT_D(CFG80211_DBG, "Trigger Scan Request\n");
 			s32Error = host_int_scan(priv->hWILCWFIDrv, USER_SCAN, ACTIVE_SCAN,
 						 au8ScanChanList, request->n_channels,
 						 (const u8 *)request->ie, request->ie_len,
 						 CfgScanResult, (void *)priv, &strHiddenNetwork);
 		} else {
-			PRINT_D(CFG80211_DBG, "Trigger Scan Request \n");
+			PRINT_D(CFG80211_DBG, "Trigger Scan Request\n");
 			s32Error = host_int_scan(priv->hWILCWFIDrv, USER_SCAN, ACTIVE_SCAN,
 						 au8ScanChanList, request->n_channels,
 						 (const u8 *)request->ie, request->ie_len,
@@ -799,7 +789,7 @@
 
 	} else {
 		PRINT_ER("Requested num of scanned channels is greater than the max, supported"
-			 " channels \n");
+			 " channels\n");
 	}
 
 	if (s32Error != WILC_SUCCESS) {
@@ -842,21 +832,21 @@
 	priv = wiphy_priv(wiphy);
 	pstrWFIDrv = (tstrWILC_WFIDrv *)(priv->hWILCWFIDrv);
 
-	host_int_set_wfi_drv_handler((u32)priv->hWILCWFIDrv);
+	host_int_set_wfi_drv_handler(priv->hWILCWFIDrv);
 
 	PRINT_D(CFG80211_DBG, "Connecting to SSID [%s] on netdev [%p] host if [%p]\n", sme->ssid, dev, priv->hWILCWFIDrv);
 	#ifdef WILC_P2P
-	if (!(WILC_strncmp(sme->ssid, "DIRECT-", 7))) {
+	if (!(strncmp(sme->ssid, "DIRECT-", 7))) {
 		PRINT_D(CFG80211_DBG, "Connected to Direct network,OBSS disabled\n");
 		pstrWFIDrv->u8P2PConnect = 1;
 	} else
 		pstrWFIDrv->u8P2PConnect = 0;
 	#endif
-	PRINT_INFO(CFG80211_DBG, "Required SSID = %s\n , AuthType = %d \n", sme->ssid, sme->auth_type);
+	PRINT_INFO(CFG80211_DBG, "Required SSID = %s\n , AuthType = %d\n", sme->ssid, sme->auth_type);
 
 	for (i = 0; i < u32LastScannedNtwrksCountShadow; i++) {
 		if ((sme->ssid_len == astrLastScannedNtwrksShadow[i].u8SsidLen) &&
-		    WILC_memcmp(astrLastScannedNtwrksShadow[i].au8ssid,
+		    memcmp(astrLastScannedNtwrksShadow[i].au8ssid,
 				sme->ssid,
 				sme->ssid_len) == 0) {
 			PRINT_INFO(CFG80211_DBG, "Network with required SSID is found %s\n", sme->ssid);
@@ -868,7 +858,7 @@
 			} else {
 				/* BSSID is also passed from the user, so decision of matching
 				 * should consider also this passed BSSID */
-				if (WILC_memcmp(astrLastScannedNtwrksShadow[i].au8bssid,
+				if (memcmp(astrLastScannedNtwrksShadow[i].au8bssid,
 						sme->bssid,
 						ETH_ALEN) == 0)	{
 					PRINT_INFO(CFG80211_DBG, "BSSID is passed from the user and matched\n");
@@ -898,8 +888,8 @@
 	}
 
 	priv->WILC_WFI_wep_default = 0;
-	WILC_memset(priv->WILC_WFI_wep_key, 0, sizeof(priv->WILC_WFI_wep_key));
-	WILC_memset(priv->WILC_WFI_wep_key_len, 0, sizeof(priv->WILC_WFI_wep_key_len));
+	memset(priv->WILC_WFI_wep_key, 0, sizeof(priv->WILC_WFI_wep_key));
+	memset(priv->WILC_WFI_wep_key_len, 0, sizeof(priv->WILC_WFI_wep_key_len));
 
 	PRINT_INFO(CFG80211_DBG, "sme->crypto.wpa_versions=%x\n", sme->crypto.wpa_versions);
 	PRINT_INFO(CFG80211_DBG, "sme->crypto.cipher_group=%x\n", sme->crypto.cipher_group);
@@ -928,7 +918,7 @@
 			}
 			priv->WILC_WFI_wep_default = sme->key_idx;
 			priv->WILC_WFI_wep_key_len[sme->key_idx] = sme->key_len;
-			WILC_memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
+			memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
 
 			/*BugID_5137*/
 			g_key_wep_params.key_len = sme->key_len;
@@ -946,7 +936,7 @@
 
 			priv->WILC_WFI_wep_default = sme->key_idx;
 			priv->WILC_WFI_wep_key_len[sme->key_idx] = sme->key_len;
-			WILC_memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
+			memcpy(priv->WILC_WFI_wep_key[sme->key_idx], sme->key, sme->key_len);
 
 			/*BugID_5137*/
 			g_key_wep_params.key_len = sme->key_len;
@@ -1057,7 +1047,7 @@
 					 tenuAuth_type, pstrNetworkInfo->u8channel,
 					 pstrNetworkInfo->pJoinParams);
 	if (s32Error != WILC_SUCCESS) {
-		PRINT_ER("host_int_set_join_req(): Error(%d) \n", s32Error);
+		PRINT_ER("host_int_set_join_req(): Error(%d)\n", s32Error);
 		s32Error = -ENOENT;
 		goto done;
 	}
@@ -1162,7 +1152,7 @@
 
 			priv->WILC_WFI_wep_default = key_index;
 			priv->WILC_WFI_wep_key_len[key_index] = params->key_len;
-			WILC_memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
+			memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
 
 			PRINT_D(CFG80211_DBG, "Adding AP WEP Default key Idx = %d\n", key_index);
 			PRINT_D(CFG80211_DBG, "Adding AP WEP Key len= %d\n", params->key_len);
@@ -1181,10 +1171,10 @@
 			break;
 		}
 				#endif
-		if (WILC_memcmp(params->key, priv->WILC_WFI_wep_key[key_index], params->key_len)) {
+		if (memcmp(params->key, priv->WILC_WFI_wep_key[key_index], params->key_len)) {
 			priv->WILC_WFI_wep_default = key_index;
 			priv->WILC_WFI_wep_key_len[key_index] = params->key_len;
-			WILC_memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
+			memcpy(priv->WILC_WFI_wep_key[key_index], params->key, params->key_len);
 
 			PRINT_D(CFG80211_DBG, "Adding WEP Default key Idx = %d\n", key_index);
 			PRINT_D(CFG80211_DBG, "Adding WEP Key length = %d\n", params->key_len);
@@ -1203,21 +1193,20 @@
 		if (priv->wdev->iftype == NL80211_IFTYPE_AP || priv->wdev->iftype == NL80211_IFTYPE_P2P_GO) {
 
 			if (priv->wilc_gtk[key_index] == NULL) {
-				priv->wilc_gtk[key_index] = (struct wilc_wfi_key *)WILC_MALLOC(sizeof(struct wilc_wfi_key));
+				priv->wilc_gtk[key_index] = WILC_MALLOC(sizeof(struct wilc_wfi_key));
 				priv->wilc_gtk[key_index]->key = NULL;
 				priv->wilc_gtk[key_index]->seq = NULL;
 
 			}
 			if (priv->wilc_ptk[key_index] == NULL) {
-				priv->wilc_ptk[key_index] = (struct wilc_wfi_key *)WILC_MALLOC(sizeof(struct wilc_wfi_key));
+				priv->wilc_ptk[key_index] = WILC_MALLOC(sizeof(struct wilc_wfi_key));
 				priv->wilc_ptk[key_index]->key = NULL;
 				priv->wilc_ptk[key_index]->seq = NULL;
 			}
 
 
 
-			if (!pairwise)
-			{
+			if (!pairwise) {
 				if (params->cipher == WLAN_CIPHER_SUITE_TKIP)
 					u8gmode = ENCRYPT_ENABLED | WPA | TKIP;
 				else
@@ -1233,18 +1222,18 @@
 				}
 				/* if there has been previous allocation for the same index through its key, free that memory and allocate again*/
 				if (priv->wilc_gtk[key_index]->key)
-					WILC_FREE(priv->wilc_gtk[key_index]->key);
+					kfree(priv->wilc_gtk[key_index]->key);
 
-				priv->wilc_gtk[key_index]->key = (u8 *)WILC_MALLOC(params->key_len);
-				WILC_memcpy(priv->wilc_gtk[key_index]->key, params->key, params->key_len);
+				priv->wilc_gtk[key_index]->key = WILC_MALLOC(params->key_len);
+				memcpy(priv->wilc_gtk[key_index]->key, params->key, params->key_len);
 
 				/* if there has been previous allocation for the same index through its seq, free that memory and allocate again*/
 				if (priv->wilc_gtk[key_index]->seq)
-					WILC_FREE(priv->wilc_gtk[key_index]->seq);
+					kfree(priv->wilc_gtk[key_index]->seq);
 
 				if ((params->seq_len) > 0) {
-					priv->wilc_gtk[key_index]->seq = (u8 *)WILC_MALLOC(params->seq_len);
-					WILC_memcpy(priv->wilc_gtk[key_index]->seq, params->seq, params->seq_len);
+					priv->wilc_gtk[key_index]->seq = WILC_MALLOC(params->seq_len);
+					memcpy(priv->wilc_gtk[key_index]->seq, params->seq, params->seq_len);
 				}
 
 				priv->wilc_gtk[key_index]->cipher = params->cipher;
@@ -1279,15 +1268,15 @@
 				}
 
 				if (priv->wilc_ptk[key_index]->key)
-					WILC_FREE(priv->wilc_ptk[key_index]->key);
+					kfree(priv->wilc_ptk[key_index]->key);
 
-				priv->wilc_ptk[key_index]->key = (u8 *)WILC_MALLOC(params->key_len);
+				priv->wilc_ptk[key_index]->key = WILC_MALLOC(params->key_len);
 
 				if (priv->wilc_ptk[key_index]->seq)
-					WILC_FREE(priv->wilc_ptk[key_index]->seq);
+					kfree(priv->wilc_ptk[key_index]->seq);
 
 				if ((params->seq_len) > 0)
-					priv->wilc_ptk[key_index]->seq = (u8 *)WILC_MALLOC(params->seq_len);
+					priv->wilc_ptk[key_index]->seq = WILC_MALLOC(params->seq_len);
 
 				if (INFO) {
 					for (i = 0; i < params->key_len; i++)
@@ -1297,10 +1286,10 @@
 						PRINT_INFO(CFG80211_DBG, "Adding group seq value[%d] = %x\n", i, params->seq[i]);
 				}
 
-				WILC_memcpy(priv->wilc_ptk[key_index]->key, params->key, params->key_len);
+				memcpy(priv->wilc_ptk[key_index]->key, params->key, params->key_len);
 
 				if ((params->seq_len) > 0)
-					WILC_memcpy(priv->wilc_ptk[key_index]->seq, params->seq, params->seq_len);
+					memcpy(priv->wilc_ptk[key_index]->seq, params->seq, params->seq_len);
 
 				priv->wilc_ptk[key_index]->cipher = params->cipher;
 				priv->wilc_ptk[key_index]->key_len = params->key_len;
@@ -1315,8 +1304,7 @@
 
 		{
 			u8mode = 0;
-			if (!pairwise)
-			{
+			if (!pairwise) {
 				if (params->key_len > 16 && params->cipher == WLAN_CIPHER_SUITE_TKIP) {
 					/* swap the tx mic by rx mic */
 					pu8RxMic = params->key + 24;
@@ -1437,7 +1425,7 @@
 
 		/*Delete saved WEP keys params, if any*/
 		if (g_key_wep_params.key != NULL) {
-			WILC_FREE(g_key_wep_params.key);
+			kfree(g_key_wep_params.key);
 			g_key_wep_params.key = NULL;
 		}
 
@@ -1448,16 +1436,16 @@
 
 			if (priv->wilc_gtk[key_index]->key != NULL) {
 
-				WILC_FREE(priv->wilc_gtk[key_index]->key);
+				kfree(priv->wilc_gtk[key_index]->key);
 				priv->wilc_gtk[key_index]->key = NULL;
 			}
 			if (priv->wilc_gtk[key_index]->seq) {
 
-				WILC_FREE(priv->wilc_gtk[key_index]->seq);
+				kfree(priv->wilc_gtk[key_index]->seq);
 				priv->wilc_gtk[key_index]->seq = NULL;
 			}
 
-			WILC_FREE(priv->wilc_gtk[key_index]);
+			kfree(priv->wilc_gtk[key_index]);
 			priv->wilc_gtk[key_index] = NULL;
 
 		}
@@ -1466,35 +1454,35 @@
 
 			if (priv->wilc_ptk[key_index]->key) {
 
-				WILC_FREE(priv->wilc_ptk[key_index]->key);
+				kfree(priv->wilc_ptk[key_index]->key);
 				priv->wilc_ptk[key_index]->key = NULL;
 			}
 			if (priv->wilc_ptk[key_index]->seq) {
 
-				WILC_FREE(priv->wilc_ptk[key_index]->seq);
+				kfree(priv->wilc_ptk[key_index]->seq);
 				priv->wilc_ptk[key_index]->seq = NULL;
 			}
-			WILC_FREE(priv->wilc_ptk[key_index]);
+			kfree(priv->wilc_ptk[key_index]);
 			priv->wilc_ptk[key_index] = NULL;
 		}
 	#endif
 
 		/*Delete saved PTK and GTK keys params, if any*/
 		if (g_key_ptk_params.key != NULL) {
-			WILC_FREE(g_key_ptk_params.key);
+			kfree(g_key_ptk_params.key);
 			g_key_ptk_params.key = NULL;
 		}
 		if (g_key_ptk_params.seq != NULL) {
-			WILC_FREE(g_key_ptk_params.seq);
+			kfree(g_key_ptk_params.seq);
 			g_key_ptk_params.seq = NULL;
 		}
 
 		if (g_key_gtk_params.key != NULL) {
-			WILC_FREE(g_key_gtk_params.key);
+			kfree(g_key_gtk_params.key);
 			g_key_gtk_params.key = NULL;
 		}
 		if (g_key_gtk_params.seq != NULL) {
-			WILC_FREE(g_key_gtk_params.seq);
+			kfree(g_key_gtk_params.seq);
 			g_key_gtk_params.seq = NULL;
 		}
 
@@ -1503,7 +1491,7 @@
 	}
 
 	if (key_index >= 0 && key_index <= 3) {
-		WILC_memset(priv->WILC_WFI_wep_key[key_index], 0, priv->WILC_WFI_wep_key_len[key_index]);
+		memset(priv->WILC_WFI_wep_key[key_index], 0, priv->WILC_WFI_wep_key_len[key_index]);
 		priv->WILC_WFI_wep_key_len[key_index] = 0;
 
 		PRINT_D(CFG80211_DBG, "Removing WEP key with index = %d\n", key_index);
@@ -1588,7 +1576,7 @@
 
 	priv = wiphy_priv(wiphy);
 
-	PRINT_D(CFG80211_DBG, "Setting default key with idx = %d \n", key_index);
+	PRINT_D(CFG80211_DBG, "Setting default key with idx = %d\n", key_index);
 
 	if (key_index != priv->WILC_WFI_wep_default) {
 
@@ -1633,7 +1621,6 @@
  *  @version	1.0
  */
 
-extern uint32_t Statisitcs_totalAcks, Statisitcs_DroppedAcks;
 static int WILC_WFI_get_station(struct wiphy *wiphy, struct net_device *dev,
 				const u8 *mac, struct station_info *sinfo)
 {
@@ -1689,7 +1676,7 @@
 		 * kernel version 3.0.0
 		 */
 		sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL) |
-						BIT( NL80211_STA_INFO_RX_PACKETS) |
+						BIT(NL80211_STA_INFO_RX_PACKETS) |
 						BIT(NL80211_STA_INFO_TX_PACKETS) |
 						BIT(NL80211_STA_INFO_TX_FAILED) |
 						BIT(NL80211_STA_INFO_TX_BITRATE);
@@ -1701,11 +1688,10 @@
 		sinfo->txrate.legacy = strStatistics.u8LinkSpeed * 10;
 
 #ifdef TCP_ENHANCEMENTS
-		if ((strStatistics.u8LinkSpeed > TCP_ACK_FILTER_LINK_SPEED_THRESH) && (strStatistics.u8LinkSpeed != DEFAULT_LINK_SPEED)) {
+		if ((strStatistics.u8LinkSpeed > TCP_ACK_FILTER_LINK_SPEED_THRESH) && (strStatistics.u8LinkSpeed != DEFAULT_LINK_SPEED))
 			Enable_TCP_ACK_Filter(true);
-		} else if (strStatistics.u8LinkSpeed != DEFAULT_LINK_SPEED)   {
+		else if (strStatistics.u8LinkSpeed != DEFAULT_LINK_SPEED)
 			Enable_TCP_ACK_Filter(false);
-		}
 #endif
 
 		PRINT_D(CORECONFIG_DBG, "*** stats[%d][%d][%d][%d][%d]\n", sinfo->signal, sinfo->rx_packets, sinfo->tx_packets,
@@ -1826,7 +1812,7 @@
 	priv = wiphy_priv(wiphy);
 
 	pstrCfgParamVal.u32SetCfgFlag = 0;
-	PRINT_D(CFG80211_DBG, "Setting Wiphy params \n");
+	PRINT_D(CFG80211_DBG, "Setting Wiphy params\n");
 
 	if (changed & WIPHY_PARAM_RETRY_SHORT) {
 		PRINT_D(CFG80211_DBG, "Setting WIPHY_PARAM_RETRY_SHORT %d\n",
@@ -1909,7 +1895,7 @@
 
 
 	for (i = 0; i < priv->pmkid_list.numpmkid; i++)	{
-		if (!WILC_memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
+		if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
 				 ETH_ALEN)) {
 			/*If bssid already exists and pmkid value needs to reset*/
 			flag = PMKID_FOUND;
@@ -1919,9 +1905,9 @@
 	}
 	if (i < WILC_MAX_NUM_PMKIDS) {
 		PRINT_D(CFG80211_DBG, "Setting PMKID in private structure\n");
-		WILC_memcpy(priv->pmkid_list.pmkidlist[i].bssid, pmksa->bssid,
+		memcpy(priv->pmkid_list.pmkidlist[i].bssid, pmksa->bssid,
 			    ETH_ALEN);
-		WILC_memcpy(priv->pmkid_list.pmkidlist[i].pmkid, pmksa->pmkid,
+		memcpy(priv->pmkid_list.pmkidlist[i].pmkid, pmksa->pmkid,
 			    PMKID_LEN);
 		if (!(flag == PMKID_FOUND))
 			priv->pmkid_list.numpmkid++;
@@ -1959,11 +1945,11 @@
 	PRINT_D(CFG80211_DBG, "Deleting PMKSA keys\n");
 
 	for (i = 0; i < priv->pmkid_list.numpmkid; i++)	{
-		if (!WILC_memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
+		if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid,
 				 ETH_ALEN)) {
 			/*If bssid is found, reset the values*/
 			PRINT_D(CFG80211_DBG, "Reseting PMKID values\n");
-			WILC_memset(&priv->pmkid_list.pmkidlist[i], 0, sizeof(tstrHostIFpmkid));
+			memset(&priv->pmkid_list.pmkidlist[i], 0, sizeof(tstrHostIFpmkid));
 			flag = PMKID_FOUND;
 			break;
 		}
@@ -1971,10 +1957,10 @@
 
 	if (i < priv->pmkid_list.numpmkid && priv->pmkid_list.numpmkid > 0) {
 		for (; i < (priv->pmkid_list.numpmkid - 1); i++) {
-			WILC_memcpy(priv->pmkid_list.pmkidlist[i].bssid,
+			memcpy(priv->pmkid_list.pmkidlist[i].bssid,
 				    priv->pmkid_list.pmkidlist[i + 1].bssid,
 				    ETH_ALEN);
-			WILC_memcpy(priv->pmkid_list.pmkidlist[i].pmkid,
+			memcpy(priv->pmkid_list.pmkidlist[i].pmkid,
 				    priv->pmkid_list.pmkidlist[i].pmkid,
 				    PMKID_LEN);
 		}
@@ -2002,7 +1988,7 @@
 	PRINT_D(CFG80211_DBG,  "Flushing  PMKID key values\n");
 
 	/*Get cashed Pmkids and set all with zeros*/
-	WILC_memset(&priv->pmkid_list, 0, sizeof(tstrHostIFpmkidAttr));
+	memset(&priv->pmkid_list, 0, sizeof(tstrHostIFpmkidAttr));
 
 	return 0;
 }
@@ -2074,11 +2060,10 @@
 		}
 		#endif  /* USE_SUPPLICANT_GO_INTENT */
 
-		if (buf[index] ==  CHANLIST_ATTR_ID) {
+		if (buf[index] ==  CHANLIST_ATTR_ID)
 			channel_list_attr_index = index;
-		} else if (buf[index] ==  OPERCHAN_ATTR_ID)   {
+		else if (buf[index] ==  OPERCHAN_ATTR_ID)
 			op_channel_attr_index = index;
-		}
 		index += buf[index + 1] + 3; /* ID,Length byte */
 	}
 
@@ -2160,11 +2145,10 @@
 		}
 		#endif
 
-		if (buf[index] ==  CHANLIST_ATTR_ID) {
+		if (buf[index] ==  CHANLIST_ATTR_ID)
 			channel_list_attr_index = index;
-		} else if (buf[index] ==  OPERCHAN_ATTR_ID)   {
+		else if (buf[index] ==  OPERCHAN_ATTR_ID)
 			op_channel_attr_index = index;
-		}
 		index += buf[index + 1] + 3; /* ID,Length byte */
 	}
 
@@ -2218,7 +2202,7 @@
 	pstrWFIDrv = (tstrWILC_WFIDrv *)priv->hWILCWFIDrv;
 
 	/* Get WILC header */
-	WILC_memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
+	memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
 
 	/* The packet offset field conain info about what type of managment frame */
 	/* we are dealing with and ack status */
@@ -2270,11 +2254,11 @@
 				case PUBLIC_ACT_VENDORSPEC:
 					/*Now we have a public action vendor specific action frame, check if its a p2p public action frame
 					 * based on the standard its should have the p2p_oui attribute with the following values 50 6f 9A 09*/
-					if (!WILC_memcmp(u8P2P_oui, &buff[ACTION_SUBTYPE_ID + 1], 4)) {
+					if (!memcmp(u8P2P_oui, &buff[ACTION_SUBTYPE_ID + 1], 4)) {
 						if ((buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP))	{
 							if (!bWilc_ie) {
 								for (i = P2P_PUB_ACTION_SUBTYPE; i < size; i++)	{
-									if (!WILC_memcmp(u8P2P_vendorspec, &buff[i], 6)) {
+									if (!memcmp(u8P2P_vendorspec, &buff[i], 6)) {
 										u8P2Precvrandom = buff[i + 6];
 										bWilc_ie = true;
 										PRINT_D(GENERIC_DBG, "WILC Vendor specific IE:%02x\n", u8P2Precvrandom);
@@ -2287,7 +2271,7 @@
 							if ((buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP
 							      || buff[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buff[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)) {
 								for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < size; i++) {
-									if (buff[i] == P2PELEM_ATTR_ID && !(WILC_memcmp(u8P2P_oui, &buff[i + 2], 4))) {
+									if (buff[i] == P2PELEM_ATTR_ID && !(memcmp(u8P2P_oui, &buff[i + 2], 4))) {
 										WILC_WFI_CfgParseRxAction(&buff[i + 6], size - (i + 6));
 										break;
 									}
@@ -2351,7 +2335,7 @@
 	struct WILC_WFI_priv *priv;
 	priv = (struct WILC_WFI_priv *)pUserVoid;
 
-	PRINT_D(HOSTINF_DBG, "Remain on channel ready \n");
+	PRINT_D(HOSTINF_DBG, "Remain on channel ready\n");
 
 	priv->bInP2PlistenState = true;
 
@@ -2379,7 +2363,7 @@
 
 	/*BugID_5477*/
 	if (u32SessionID == priv->strRemainOnChanParams.u32ListenSessionID) {
-		PRINT_D(GENERIC_DBG, "Remain on channel expired \n");
+		PRINT_D(GENERIC_DBG, "Remain on channel expired\n");
 
 		priv->bInP2PlistenState = false;
 
@@ -2485,7 +2469,7 @@
  */
 void WILC_WFI_add_wilcvendorspec(u8 *buff)
 {
-	WILC_memcpy(buff, u8P2P_vendorspec, sizeof(u8P2P_vendorspec));
+	memcpy(buff, u8P2P_vendorspec, sizeof(u8P2P_vendorspec));
 }
 /**
  *  @brief      WILC_WFI_mgmt_tx_frame
@@ -2528,17 +2512,17 @@
 	if (ieee80211_is_mgmt(mgmt->frame_control)) {
 
 		/*mgmt frame allocation*/
-		mgmt_tx = (struct p2p_mgmt_data *)WILC_MALLOC(sizeof(struct p2p_mgmt_data));
+		mgmt_tx = WILC_MALLOC(sizeof(struct p2p_mgmt_data));
 		if (mgmt_tx == NULL) {
 			PRINT_ER("Failed to allocate memory for mgmt_tx structure\n");
 			return WILC_FAIL;
 		}
-		mgmt_tx->buff = (char *)WILC_MALLOC(buf_len);
+		mgmt_tx->buff = WILC_MALLOC(buf_len);
 		if (mgmt_tx->buff == NULL) {
 			PRINT_ER("Failed to allocate memory for mgmt_tx buff\n");
 			return WILC_FAIL;
 		}
-		WILC_memcpy(mgmt_tx->buff, buf, len);
+		memcpy(mgmt_tx->buff, buf, len);
 		mgmt_tx->size = len;
 
 
@@ -2583,7 +2567,7 @@
 				{
 					/*Now we have a public action vendor specific action frame, check if its a p2p public action frame
 					 * based on the standard its should have the p2p_oui attribute with the following values 50 6f 9A 09*/
-					if (!WILC_memcmp(u8P2P_oui, &buf[ACTION_SUBTYPE_ID + 1], 4)) {
+					if (!memcmp(u8P2P_oui, &buf[ACTION_SUBTYPE_ID + 1], 4)) {
 						/*For the connection of two WILC's connection generate a rand number to determine who will be a GO*/
 						if ((buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == GO_NEG_RSP)) {
 							if (u8P2Plocalrandom == 1 && u8P2Precvrandom < u8P2Plocalrandom) {
@@ -2600,7 +2584,7 @@
 
 								/*Search for the p2p information information element , after the Public action subtype theres a byte for teh dialog token, skip that*/
 								for (i = P2P_PUB_ACTION_SUBTYPE + 2; i < len; i++) {
-									if (buf[i] == P2PELEM_ATTR_ID && !(WILC_memcmp(u8P2P_oui, &buf[i + 2], 4))) {
+									if (buf[i] == P2PELEM_ATTR_ID && !(memcmp(u8P2P_oui, &buf[i + 2], 4))) {
 										if (buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_REQ || buf[P2P_PUB_ACTION_SUBTYPE] == P2P_INV_RSP)
 											WILC_WFI_CfgParseTxAction(&mgmt_tx->buff[i + 6], len - (i + 6), true, nic->iftype);
 
@@ -2862,7 +2846,7 @@
 
 	#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
 	g_obtainingIP = false;
-	WILC_TimerStop(&hDuringIpTimer, NULL);
+	del_timer(&hDuringIpTimer);
 	PRINT_D(GENERIC_DBG, "Changing virtual interface, enable scan\n");
 	#endif
 	/*BugID_5137*/
@@ -2886,7 +2870,6 @@
 
 		/*Remove the enteries of the previously connected clients*/
 		memset(priv->assoc_stainfo.au8Sta_AssociatedBss, 0, MAX_NUM_STA * ETH_ALEN);
-		#ifndef SIMULATION
 		#ifdef WILC_P2P
 		interface_type = nic->iftype;
 		nic->iftype = STATION_MODE;
@@ -2907,15 +2890,15 @@
 
 			/*Setting interface 1 drv handler and mac address in newly downloaded FW*/
 			host_int_set_wfi_drv_handler(g_linux_wlan->strInterfaceInfo[0].drvHandler);
-			host_int_set_MacAddress((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+			host_int_set_MacAddress(g_linux_wlan->strInterfaceInfo[0].drvHandler,
 						g_linux_wlan->strInterfaceInfo[0].aSrcAddress);
 			host_int_set_operation_mode(priv->hWILCWFIDrv, STATION_MODE);
 
 			/*Add saved WEP keys, if any*/
 			if (g_wep_keys_saved) {
-				host_int_set_WEPDefaultKeyID((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+				host_int_set_WEPDefaultKeyID(g_linux_wlan->strInterfaceInfo[0].drvHandler,
 							     g_key_wep_params.key_idx);
-				host_int_add_wep_key_bss_sta((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+				host_int_add_wep_key_bss_sta(g_linux_wlan->strInterfaceInfo[0].drvHandler,
 							     g_key_wep_params.key,
 							     g_key_wep_params.key_len,
 							     g_key_wep_params.key_idx);
@@ -2964,7 +2947,6 @@
 			host_int_set_power_mgmt(priv->hWILCWFIDrv, 1, 0);
 		}
 		#endif
-		#endif
 		break;
 
 	case NL80211_IFTYPE_P2P_CLIENT:
@@ -2979,7 +2961,6 @@
 		priv->wdev->iftype = type;
 		nic->monitor_flag = 0;
 
-		#ifndef SIMULATION
 		#ifdef WILC_P2P
 
 		PRINT_D(HOSTAPD_DBG, "Downloading P2P_CONCURRENCY_FIRMWARE\n");
@@ -2995,15 +2976,15 @@
 			g_wilc_initialized = 1;
 
 			host_int_set_wfi_drv_handler(g_linux_wlan->strInterfaceInfo[0].drvHandler);
-			host_int_set_MacAddress((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+			host_int_set_MacAddress(g_linux_wlan->strInterfaceInfo[0].drvHandler,
 						g_linux_wlan->strInterfaceInfo[0].aSrcAddress);
 			host_int_set_operation_mode(priv->hWILCWFIDrv, STATION_MODE);
 
 			/*Add saved WEP keys, if any*/
 			if (g_wep_keys_saved) {
-				host_int_set_WEPDefaultKeyID((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+				host_int_set_WEPDefaultKeyID(g_linux_wlan->strInterfaceInfo[0].drvHandler,
 							     g_key_wep_params.key_idx);
-				host_int_add_wep_key_bss_sta((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+				host_int_add_wep_key_bss_sta(g_linux_wlan->strInterfaceInfo[0].drvHandler,
 							     g_key_wep_params.key,
 							     g_key_wep_params.key_len,
 							     g_key_wep_params.key_idx);
@@ -3053,7 +3034,6 @@
 			}
 		}
 		#endif
-		#endif
 		break;
 
 	case NL80211_IFTYPE_AP:
@@ -3064,7 +3044,6 @@
 		nic->iftype = AP_MODE;
 		PRINT_D(CORECONFIG_DBG, "priv->hWILCWFIDrv[%p]\n", priv->hWILCWFIDrv);
 
-		#ifndef SIMULATION
 		PRINT_D(HOSTAPD_DBG, "Downloading AP firmware\n");
 		linux_wlan_get_firmware(nic);
 		#ifdef WILC_P2P
@@ -3086,7 +3065,6 @@
 			}
 		}
 		#endif
-		#endif
 		break;
 
 	case NL80211_IFTYPE_P2P_GO:
@@ -3094,7 +3072,7 @@
 
 		#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
 		g_obtainingIP = true;
-		WILC_TimerStart(&hDuringIpTimer, duringIP_TIME, NULL, NULL);
+		mod_timer(&hDuringIpTimer, jiffies + msecs_to_jiffies(duringIP_TIME));
 		#endif
 		host_int_set_power_mgmt(priv->hWILCWFIDrv, 0, 0);
 		/*BugID_5222*/
@@ -3110,7 +3088,6 @@
 
 		PRINT_D(CORECONFIG_DBG, "priv->hWILCWFIDrv[%p]\n", priv->hWILCWFIDrv);
 
-		#ifndef SIMULATION
 		#ifdef WILC_P2P
 		PRINT_D(HOSTAPD_DBG, "Downloading P2P_CONCURRENCY_FIRMWARE\n");
 
@@ -3127,15 +3104,15 @@
 
 		/*Setting interface 1 drv handler and mac address in newly downloaded FW*/
 		host_int_set_wfi_drv_handler(g_linux_wlan->strInterfaceInfo[0].drvHandler);
-		host_int_set_MacAddress((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+		host_int_set_MacAddress(g_linux_wlan->strInterfaceInfo[0].drvHandler,
 					g_linux_wlan->strInterfaceInfo[0].aSrcAddress);
 		host_int_set_operation_mode(priv->hWILCWFIDrv, AP_MODE);
 
 		/*Add saved WEP keys, if any*/
 		if (g_wep_keys_saved) {
-			host_int_set_WEPDefaultKeyID((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+			host_int_set_WEPDefaultKeyID(g_linux_wlan->strInterfaceInfo[0].drvHandler,
 						     g_key_wep_params.key_idx);
-			host_int_add_wep_key_bss_sta((WILC_WFIDrvHandle)(g_linux_wlan->strInterfaceInfo[0].drvHandler),
+			host_int_add_wep_key_bss_sta(g_linux_wlan->strInterfaceInfo[0].drvHandler,
 						     g_key_wep_params.key,
 						     g_key_wep_params.key_len,
 						     g_key_wep_params.key_idx);
@@ -3185,7 +3162,6 @@
 			}
 		}
 		#endif
-		#endif
 		break;
 
 	default:
@@ -3234,7 +3210,7 @@
 	priv = wiphy_priv(wiphy);
 	PRINT_D(HOSTAPD_DBG, "Starting ap\n");
 
-	PRINT_D(HOSTAPD_DBG, "Interval = %d \n DTIM period = %d\n Head length = %zu Tail length = %zu\n",
+	PRINT_D(HOSTAPD_DBG, "Interval = %d\n DTIM period = %d\n Head length = %zu Tail length = %zu\n",
 		settings->beacon_interval, settings->dtim_period, beacon->head_len, beacon->tail_len);
 
 	s32Error = WILC_WFI_CfgSetChannel(wiphy, &settings->chandef);
@@ -3353,7 +3329,7 @@
 {
 	s32 s32Error = WILC_SUCCESS;
 	struct WILC_WFI_priv *priv;
-	tstrWILC_AddStaParam strStaParams = {{0}};
+	tstrWILC_AddStaParam strStaParams = { {0} };
 	perInterface_wlan_t *nic;
 
 
@@ -3365,8 +3341,8 @@
 	if (nic->iftype == AP_MODE || nic->iftype == GO_MODE) {
 		#ifndef WILC_FULLY_HOSTING_AP
 
-		WILC_memcpy(strStaParams.au8BSSID, mac, ETH_ALEN);
-		WILC_memcpy(priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid], mac, ETH_ALEN);
+		memcpy(strStaParams.au8BSSID, mac, ETH_ALEN);
+		memcpy(priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid], mac, ETH_ALEN);
 		strStaParams.u16AssocID = params->aid;
 		strStaParams.u8NumRates = params->supported_rates_len;
 		strStaParams.pu8Rates = params->supported_rates;
@@ -3384,7 +3360,7 @@
 			strStaParams.bIsHTSupported = true;
 			strStaParams.u16HTCapInfo = params->ht_capa->cap_info;
 			strStaParams.u8AmpduParams = params->ht_capa->ampdu_params_info;
-			WILC_memcpy(strStaParams.au8SuppMCsSet, &params->ht_capa->mcs, WILC_SUPP_MCS_SET_SIZE);
+			memcpy(strStaParams.au8SuppMCsSet, &params->ht_capa->mcs, WILC_SUPP_MCS_SET_SIZE);
 			strStaParams.u16HTExtParams = params->ht_capa->extended_ht_cap_info;
 			strStaParams.u32TxBeamformingCap = params->ht_capa->tx_BF_cap_info;
 			strStaParams.u8ASELCap = params->ht_capa->antenna_selection_info;
@@ -3407,7 +3383,7 @@
 
 		#else
 		PRINT_D(CFG80211_DBG, "Adding station parameters %d\n", params->aid);
-		WILC_memcpy(priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid], mac, ETH_ALEN);
+		memcpy(priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid], mac, ETH_ALEN);
 
 		PRINT_D(CFG80211_DBG, "BSSID = %x%x%x%x%x%x\n", priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][0], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][1], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][2], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][3], priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][4],
 			priv->assoc_stainfo.au8Sta_AssociatedBss[params->aid][5]);
@@ -3450,7 +3426,7 @@
 
 
 		if (mac == NULL) {
-			PRINT_D(HOSTAPD_DBG, "All associated stations \n");
+			PRINT_D(HOSTAPD_DBG, "All associated stations\n");
 			s32Error = host_int_del_allstation(priv->hWILCWFIDrv, priv->assoc_stainfo.au8Sta_AssociatedBss);
 		} else {
 			PRINT_D(HOSTAPD_DBG, "With mac address: %x%x%x%x%x%x\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
@@ -3484,7 +3460,7 @@
 {
 	s32 s32Error = WILC_SUCCESS;
 	struct WILC_WFI_priv *priv;
-	tstrWILC_AddStaParam strStaParams = {{0}};
+	tstrWILC_AddStaParam strStaParams = { {0} };
 	perInterface_wlan_t *nic;
 
 
@@ -3498,7 +3474,7 @@
 	if (nic->iftype == AP_MODE || nic->iftype == GO_MODE) {
 		#ifndef WILC_FULLY_HOSTING_AP
 
-		WILC_memcpy(strStaParams.au8BSSID, mac, ETH_ALEN);
+		memcpy(strStaParams.au8BSSID, mac, ETH_ALEN);
 		strStaParams.u16AssocID = params->aid;
 		strStaParams.u8NumRates = params->supported_rates_len;
 		strStaParams.pu8Rates = params->supported_rates;
@@ -3514,7 +3490,7 @@
 			strStaParams.bIsHTSupported = true;
 			strStaParams.u16HTCapInfo = params->ht_capa->cap_info;
 			strStaParams.u8AmpduParams = params->ht_capa->ampdu_params_info;
-			WILC_memcpy(strStaParams.au8SuppMCsSet, &params->ht_capa->mcs, WILC_SUPP_MCS_SET_SIZE);
+			memcpy(strStaParams.au8SuppMCsSet, &params->ht_capa->mcs, WILC_SUPP_MCS_SET_SIZE);
 			strStaParams.u16HTExtParams = params->ht_capa->extended_ht_cap_info;
 			strStaParams.u32TxBeamformingCap = params->ht_capa->tx_BF_cap_info;
 			strStaParams.u8ASELCap = params->ht_capa->antenna_selection_info;
@@ -3581,13 +3557,8 @@
 		new_ifc = WILC_WFI_init_mon_interface(name, nic->wilc_netdev);
 		if (new_ifc != NULL) {
 			PRINT_D(HOSTAPD_DBG, "Setting monitor flag in private structure\n");
-			#ifdef SIMULATION
-			priv = netdev_priv(priv->wdev->netdev);
-			priv->monitor_flag = 1;
-			#else
 			nic = netdev_priv(priv->wdev->netdev);
 			nic->monitor_flag = 1;
-			#endif
 		} else
 			PRINT_ER("Error in initializing monitor interface\n ");
 	}
@@ -3856,9 +3827,9 @@
 	PRINT_D(INIT_DBG, "Host[%p][%p]\n", net, net->ieee80211_ptr);
 	priv = wdev_priv(net->ieee80211_ptr);
 	if (op_ifcs == 0) {
-		s32Error = WILC_TimerCreate(&(hAgingTimer), remove_network_from_shadow, NULL);
+		setup_timer(&hAgingTimer, remove_network_from_shadow, 0);
 		#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
-		s32Error = WILC_TimerCreate(&(hDuringIpTimer), clear_duringIP, NULL);
+		setup_timer(&hDuringIpTimer, clear_duringIP, 0);
 		#endif
 	}
 	op_ifcs++;
@@ -3908,7 +3879,7 @@
 	#ifdef DISABLE_PWRSAVE_AND_SCAN_DURING_IP
 	if (op_ifcs == 0) {
 		PRINT_D(CORECONFIG_DBG, "destroy during ip\n");
-		WILC_TimerDestroy(&hDuringIpTimer, NULL);
+		del_timer_sync(&hDuringIpTimer);
 	}
 	#endif
 
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
index c25350c..97b663b 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.h
@@ -123,7 +123,7 @@
 #ifdef TCP_ENHANCEMENTS
 #define TCP_ACK_FILTER_LINK_SPEED_THRESH 54
 #define DEFAULT_LINK_SPEED 72
-extern void Enable_TCP_ACK_Filter(bool value);
+void Enable_TCP_ACK_Filter(bool value);
 #endif
 
 #endif
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.c b/drivers/staging/wilc1000/wilc_wfi_netdevice.c
deleted file mode 100644
index ab66ce4..0000000
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.c
+++ /dev/null
@@ -1,951 +0,0 @@
-/*!
- *  @file	wilc_wfi_netdevice.c
- *  @brief	File Operations OS wrapper functionality
- *  @author	mdaftedar
- *  @sa		wilc_wfi_netdevice.h
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-
-#ifdef SIMULATION
-
-#include "wilc_wfi_cfgoperations.h"
-#include "host_interface.h"
-
-
-MODULE_AUTHOR("Mai Daftedar");
-MODULE_LICENSE("Dual BSD/GPL");
-
-
-struct net_device *WILC_WFI_devs[2];
-
-/*
- * Transmitter lockup simulation, normally disabled.
- */
-static int lockup;
-module_param(lockup, int, 0);
-
-static int timeout = WILC_WFI_TIMEOUT;
-module_param(timeout, int, 0);
-
-/*
- * Do we run in NAPI mode?
- */
-static int use_napi ;
-module_param(use_napi, int, 0);
-
-
-/*
- * A structure representing an in-flight packet.
- */
-struct WILC_WFI_packet {
-	struct WILC_WFI_packet *next;
-	struct net_device *dev;
-	int datalen;
-	u8 data[ETH_DATA_LEN];
-};
-
-
-
-int pool_size = 8;
-module_param(pool_size, int, 0);
-
-
-static void WILC_WFI_TxTimeout(struct net_device *dev);
-static void (*WILC_WFI_Interrupt)(int, void *, struct pt_regs *);
-
-/**
- *  @brief      WILC_WFI_SetupPool
- *  @details    Set up a device's packet pool.
- *  @param[in]  struct net_device *dev : Network Device Pointer
- *  @return     NONE
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-void WILC_WFI_SetupPool(struct net_device *dev)
-{
-	struct WILC_WFI_priv *priv = netdev_priv(dev);
-	int i;
-	struct WILC_WFI_packet *pkt;
-
-	priv->ppool = NULL;
-	for (i = 0; i < pool_size; i++) {
-		pkt = kmalloc (sizeof (struct WILC_WFI_packet), GFP_KERNEL);
-		if (pkt == NULL) {
-			PRINT_D(RX_DBG, "Ran out of memory allocating packet pool\n");
-			return;
-		}
-		pkt->dev = dev;
-		pkt->next = priv->ppool;
-		priv->ppool = pkt;
-	}
-}
-
-/**
- *  @brief      WILC_WFI_TearDownPool
- *  @details    Internal cleanup function that's called after the network device
- *              driver is unregistered
- *  @param[in]  struct net_device *dev : Network Device Driver
- *  @return     NONE
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-void WILC_WFI_TearDownPool(struct net_device *dev)
-{
-	struct WILC_WFI_priv *priv = netdev_priv(dev);
-	struct WILC_WFI_packet *pkt;
-
-	while ((pkt = priv->ppool)) {
-		priv->ppool = pkt->next;
-		kfree (pkt);
-		/* FIXME - in-flight packets ? */
-	}
-}
-
-/**
- *  @brief      WILC_WFI_GetTxBuffer
- *  @details    Buffer/pool management
- *  @param[in]  net_device *dev : Network Device Driver Structure
- *  @return     struct WILC_WFI_packet
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-struct WILC_WFI_packet *WILC_WFI_GetTxBuffer(struct net_device *dev)
-{
-	struct WILC_WFI_priv *priv = netdev_priv(dev);
-	unsigned long flags;
-	struct WILC_WFI_packet *pkt;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	pkt = priv->ppool;
-	priv->ppool = pkt->next;
-	if (priv->ppool == NULL) {
-		PRINT_INFO(RX_DBG, "Pool empty\n");
-		netif_stop_queue(dev);
-	}
-	spin_unlock_irqrestore(&priv->lock, flags);
-	return pkt;
-}
-/**
- *  @brief      WILC_WFI_ReleaseBuffer
- *  @details    Buffer/pool management
- *  @param[in]  WILC_WFI_packet *pkt : Structure holding in-flight packet
- *  @return     NONE
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-void WILC_WFI_ReleaseBuffer(struct WILC_WFI_packet *pkt)
-{
-	unsigned long flags;
-	struct WILC_WFI_priv *priv = netdev_priv(pkt->dev);
-
-	spin_lock_irqsave(&priv->lock, flags);
-	pkt->next = priv->ppool;
-	priv->ppool = pkt;
-	spin_unlock_irqrestore(&priv->lock, flags);
-	if (netif_queue_stopped(pkt->dev) && pkt->next == NULL)
-		netif_wake_queue(pkt->dev);
-}
-
-/**
- *  @brief      WILC_WFI_EnqueueBuf
- *  @details    Enqueuing packets in an RX buffer queue
- *  @param[in]  WILC_WFI_packet *pkt : Structure holding in-flight packet
- *  @param[in]   net_device *dev : Network Device Driver Structure
- *  @return     NONE
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-void WILC_WFI_EnqueueBuf(struct net_device *dev, struct WILC_WFI_packet *pkt)
-{
-	unsigned long flags;
-	struct WILC_WFI_priv *priv = netdev_priv(dev);
-
-	spin_lock_irqsave(&priv->lock, flags);
-	pkt->next = priv->rx_queue;   /* FIXME - misorders packets */
-	priv->rx_queue = pkt;
-	spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-/**
- *  @brief      WILC_WFI_DequeueBuf
- *  @details    Dequeuing packets from the RX buffer queue
- *  @param[in]   net_device *dev : Network Device Driver Structure
- *  @return     WILC_WFI_packet *pkt : Structure holding in-flight pac
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-struct WILC_WFI_packet *WILC_WFI_DequeueBuf(struct net_device *dev)
-{
-	struct WILC_WFI_priv *priv = netdev_priv(dev);
-	struct WILC_WFI_packet *pkt;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->lock, flags);
-	pkt = priv->rx_queue;
-	if (pkt != NULL)
-		priv->rx_queue = pkt->next;
-	spin_unlock_irqrestore(&priv->lock, flags);
-	return pkt;
-}
-/**
- *  @brief      WILC_WFI_RxInts
- *  @details    Enable and disable receive interrupts.
- *  @param[in]   net_device *dev : Network Device Driver Structure
- *  @param[in]	enable : Enable/Disable flag
- *  @return     NONE
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-static void WILC_WFI_RxInts(struct net_device *dev, int enable)
-{
-	struct WILC_WFI_priv *priv = netdev_priv(dev);
-	priv->rx_int_enabled = enable;
-}
-
-/**
- *  @brief      WILC_WFI_Open
- *  @details    Open Network Device Driver, called when the network
- *              interface is opened. It starts the interface's transmit queue.
- *  @param[in]   net_device *dev : Network Device Driver Structure
- *  @param[in]	enable : Enable/Disable flag
- *  @return     int : Returns 0 upon success.
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-int WILC_WFI_Open(struct net_device *dev)
-{
-	/* request_region(), request_irq(), ....  (like fops->open) */
-	/*
-	 * Assign the hardware address of the board: use "\0SNULx", where
-	 * x is 0 or 1. The first byte is '\0' to avoid being a multicast
-	 * address (the first byte of multicast addrs is odd).
-	 */
-	memcpy(dev->dev_addr, "\0WLAN0", ETH_ALEN);
-	if (dev == WILC_WFI_devs[1])
-		dev->dev_addr[ETH_ALEN - 1]++;  /* \0SNUL1 */
-
-	WILC_WFI_InitHostInt(dev);
-	netif_start_queue(dev);
-	return 0;
-}
-/**
- *  @brief      WILC_WFI_Release
- *  @details    Release Network Device Driver, called when the network
- *              interface is stopped or brought down. This function marks
- *              the network driver as not being able to transmit
- *  @param[in]   net_device *dev : Network Device Driver Structure
- *  @return     int : Return 0 on Success.
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-int WILC_WFI_Release(struct net_device *dev)
-{
-	/* release ports, irq and such -- like fops->close */
-
-	netif_stop_queue(dev); /* can't transmit any more */
-
-	return 0;
-}
-/**
- *  @brief      WILC_WFI_Config
- *  @details    Configuration changes (passed on by ifconfig)
- *  @param[in]   net_device *dev : Network Device Driver Structure
- *  @param[in]	struct ifmap *map : Contains the ioctl implementation for the
- *              network driver.
- *  @return     int : Return 0 on Success.
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-int WILC_WFI_Config(struct net_device *dev, struct ifmap *map)
-{
-	if (dev->flags & IFF_UP) /* can't act on a running interface */
-		return -EBUSY;
-
-	/* Don't allow changing the I/O address */
-	if (map->base_addr != dev->base_addr) {
-		PRINT_D(RX_DBG, KERN_WARNING "WILC_WFI: Can't change I/O address\n");
-		return -EOPNOTSUPP;
-	}
-
-	/* Allow changing the IRQ */
-	if (map->irq != dev->irq) {
-		dev->irq = map->irq;
-		/* request_irq() is delayed to open-time */
-	}
-
-	/* ignore other fields */
-	return 0;
-}
-/**
- *  @brief      WILC_WFI_Rx
- *  @details    Receive a packet: retrieve, encapsulate and pass over to upper
- *              levels
- *  @param[in]   net_device *dev : Network Device Driver Structure
- *  @param[in]	WILC_WFI_packet :
- *  @return     NONE
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-void WILC_WFI_Rx(struct net_device *dev, struct WILC_WFI_packet *pkt)
-{
-	int i;
-	struct sk_buff *skb;
-	struct WILC_WFI_priv *priv = netdev_priv(dev);
-	s8 rssi;
-	/*
-	 * The packet has been retrieved from the transmission
-	 * medium. Build an skb around it, so upper layers can handle it
-	 */
-
-
-	skb = dev_alloc_skb(pkt->datalen + 2);
-	if (!skb) {
-		if (printk_ratelimit())
-			PRINT_D(RX_DBG, "WILC_WFI rx: low on mem - packet dropped\n");
-		priv->stats.rx_dropped++;
-		goto out;
-	}
-	skb_reserve(skb, 2);  /* align IP on 16B boundary */
-	memcpy(skb_put(skb, pkt->datalen), pkt->data, pkt->datalen);
-
-	if (priv->monitor_flag) {
-		PRINT_INFO(RX_DBG, "In monitor device name %s\n", dev->name);
-		priv = wiphy_priv(priv->dev->ieee80211_ptr->wiphy);
-		PRINT_D(RX_DBG, "VALUE PASSED IN OF HRWD %p\n", priv->hWILCWFIDrv);
-		/* host_int_get_rssi(priv->hWILCWFIDrv, &(rssi)); */
-		if (INFO) {
-			for (i = 14; i < skb->len; i++)
-				PRINT_INFO(RX_DBG, "RXdata[%d] %02x\n", i, skb->data[i]);
-		}
-		WILC_WFI_monitor_rx(dev, skb);
-		return;
-	}
-out:
-	return;
-}
-
-/**
- *  @brief      WILC_WFI_Poll
- *  @details    The poll implementation
- *  @param[in]   struct napi_struct *napi :
- *  @param[in]	int budget :
- *  @return     int : Return 0 on Success.
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-static int WILC_WFI_Poll(struct napi_struct *napi, int budget)
-{
-	int npackets = 0;
-	struct sk_buff *skb;
-	struct WILC_WFI_priv  *priv = container_of(napi, struct WILC_WFI_priv, napi);
-	struct net_device  *dev =  priv->dev;
-	struct WILC_WFI_packet *pkt;
-
-	while (npackets < budget && priv->rx_queue) {
-		pkt = WILC_WFI_DequeueBuf(dev);
-		skb = dev_alloc_skb(pkt->datalen + 2);
-		if (!skb) {
-			if (printk_ratelimit())
-				PRINT_D(RX_DBG, "WILC_WFI: packet dropped\n");
-			priv->stats.rx_dropped++;
-			WILC_WFI_ReleaseBuffer(pkt);
-			continue;
-		}
-		skb_reserve(skb, 2);  /* align IP on 16B boundary */
-		memcpy(skb_put(skb, pkt->datalen), pkt->data, pkt->datalen);
-		skb->dev = dev;
-		skb->protocol = eth_type_trans(skb, dev);
-		skb->ip_summed = CHECKSUM_UNNECESSARY;  /* don't check it */
-		netif_receive_skb(skb);
-		/* Maintain stats */
-		npackets++;
-		WILC_WFI_update_stats(priv->dev->ieee80211_ptr->wiphy, pkt->datalen, WILC_WFI_RX_PKT);
-		WILC_WFI_ReleaseBuffer(pkt);
-	}
-	/* If we processed all packets, we're done; tell the kernel and re-enable ints */
-	if (npackets < budget) {
-		napi_complete(napi);
-		WILC_WFI_RxInts(dev, 1);
-	}
-	return npackets;
-}
-
-/**
- *  @brief      WILC_WFI_Poll
- *  @details    The typical interrupt entry point
- *  @param[in]   struct napi_struct *napi :
- *  @param[in]	int budget :
- *  @return     int : Return 0 on Success.
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-static void WILC_WFI_RegularInterrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
-	int statusword;
-	struct WILC_WFI_priv *priv;
-	struct WILC_WFI_packet *pkt = NULL;
-	/*
-	 * As usual, check the "device" pointer to be sure it is
-	 * really interrupting.
-	 * Then assign "struct device *dev"
-	 */
-	struct net_device *dev = (struct net_device *)dev_id;
-	/* ... and check with hw if it's really ours */
-
-	/* paranoid */
-	if (!dev)
-		return;
-
-	/* Lock the device */
-	priv = netdev_priv(dev);
-	spin_lock(&priv->lock);
-
-	/* retrieve statusword: real netdevices use I/O instructions */
-	statusword = priv->status;
-	priv->status = 0;
-	if (statusword & WILC_WFI_RX_INTR) {
-		/* send it to WILC_WFI_rx for handling */
-		pkt = priv->rx_queue;
-		if (pkt) {
-			priv->rx_queue = pkt->next;
-			WILC_WFI_Rx(dev, pkt);
-		}
-	}
-	if (statusword & WILC_WFI_TX_INTR) {
-		/* a transmission is over: free the skb */
-		WILC_WFI_update_stats(priv->dev->ieee80211_ptr->wiphy, priv->tx_packetlen, WILC_WFI_TX_PKT);
-		dev_kfree_skb(priv->skb);
-	}
-
-	/* Unlock the device and we are done */
-	spin_unlock(&priv->lock);
-	if (pkt)
-		WILC_WFI_ReleaseBuffer(pkt);  /* Do this outside the lock! */
-	return;
-}
-/**
- *  @brief      WILC_WFI_NapiInterrupt
- *  @details    A NAPI interrupt handler
- *  @param[in]   irq:
- *  @param[in]	dev_id:
- *  @param[in]	pt_regs:
- *  @return     NONE
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-static void WILC_WFI_NapiInterrupt(int irq, void *dev_id, struct pt_regs *regs)
-{
-	int statusword;
-	struct WILC_WFI_priv *priv;
-
-	/*
-	 * As usual, check the "device" pointer for shared handlers.
-	 * Then assign "struct device *dev"
-	 */
-	struct net_device *dev = (struct net_device *)dev_id;
-	/* ... and check with hw if it's really ours */
-
-	/* paranoid */
-	if (!dev)
-		return;
-
-	/* Lock the device */
-	priv = netdev_priv(dev);
-	spin_lock(&priv->lock);
-
-	/* retrieve statusword: real netdevices use I/O instructions */
-	statusword = priv->status;
-	priv->status = 0;
-	if (statusword & WILC_WFI_RX_INTR) {
-		WILC_WFI_RxInts(dev, 0);   /* Disable further interrupts */
-		napi_schedule(&priv->napi);
-	}
-	if (statusword & WILC_WFI_TX_INTR) {
-		/* a transmission is over: free the skb */
-
-		WILC_WFI_update_stats(priv->dev->ieee80211_ptr->wiphy, priv->tx_packetlen, WILC_WFI_TX_PKT);
-		dev_kfree_skb(priv->skb);
-	}
-
-	/* Unlock the device and we are done */
-	spin_unlock(&priv->lock);
-	return;
-}
-
-/**
- *  @brief      MI_WFI_HwTx
- *  @details    Transmit a packet (low level interface)
- *  @param[in]   buf:
- *  @param[in]	len:
- *  @param[in]	net_device *dev:
- *  @return     NONE
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-void WILC_WFI_HwTx(char *buf, int len, struct net_device *dev)
-{
-	/*
-	 * This function deals with hw details. This interface loops
-	 * back the packet to the other WILC_WFI interface (if any).
-	 * In other words, this function implements the WILC_WFI behaviour,
-	 * while all other procedures are rather device-independent
-	 */
-	struct iphdr *ih;
-	struct net_device *dest;
-	struct WILC_WFI_priv *priv;
-	u32 *saddr, *daddr;
-	struct WILC_WFI_packet *tx_buffer;
-
-
-	/* I am paranoid. Ain't I? */
-	if (len < sizeof(struct ethhdr) + sizeof(struct iphdr)) {
-		PRINT_D(RX_DBG, "WILC_WFI: Hmm... packet too short (%i octets)\n",
-			len);
-		return;
-	}
-
-	if (0) {  /* enable this conditional to look at the data */
-		int i;
-		PRINT_D(RX_DBG, "len is %i", len);
-		for (i = 14; i < len; i++)
-			PRINT_D(RX_DBG, "TXdata[%d] %02x\n", i, buf[i] & 0xff);
-		/*   PRINT_D(RX_DBG, "\n"); */
-	}
-	/*
-	 * Ethhdr is 14 bytes, but the kernel arranges for iphdr
-	 * to be aligned (i.e., ethhdr is unaligned)
-	 */
-	ih = (struct iphdr *)(buf + sizeof(struct ethhdr));
-	saddr = &ih->saddr;
-	daddr = &ih->daddr;
-
-	((u8 *)saddr)[2] ^= 1;  /* change the third octet (class C) */
-	((u8 *)daddr)[2] ^= 1;
-
-	ih->check = 0;          /* and rebuild the checksum (ip needs it) */
-	ih->check = ip_fast_csum((unsigned char *)ih, ih->ihl);
-
-
-	if (dev == WILC_WFI_devs[0])
-		PRINT_D(RX_DBG, "%08x:%05i --> %08x:%05i\n",
-			ntohl(ih->saddr), ntohs(((struct tcphdr *)(ih + 1))->source),
-			ntohl(ih->daddr), ntohs(((struct tcphdr *)(ih + 1))->dest));
-	else
-		PRINT_D(RX_DBG, "%08x:%05i <-- %08x:%05i\n",
-			ntohl(ih->daddr), ntohs(((struct tcphdr *)(ih + 1))->dest),
-			ntohl(ih->saddr), ntohs(((struct tcphdr *)(ih + 1))->source));
-
-	/*
-	 * Ok, now the packet is ready for transmission: first simulate a
-	 * receive interrupt on the twin device, then  a
-	 * transmission-done on the transmitting device
-	 */
-	dest = WILC_WFI_devs[dev == WILC_WFI_devs[0] ? 1 : 0];
-	priv = netdev_priv(dest);
-
-	tx_buffer = WILC_WFI_GetTxBuffer(dev);
-	tx_buffer->datalen = len;
-	memcpy(tx_buffer->data, buf, len);
-	WILC_WFI_EnqueueBuf(dest, tx_buffer);
-	if (priv->rx_int_enabled) {
-		priv->status |= WILC_WFI_RX_INTR;
-		WILC_WFI_Interrupt(0, dest, NULL);
-	}
-
-	priv = netdev_priv(dev);
-	priv->tx_packetlen = len;
-	priv->tx_packetdata = buf;
-	priv->status |= WILC_WFI_TX_INTR;
-	if (lockup && ((priv->stats.tx_packets + 1) % lockup) == 0) {
-		/* Simulate a dropped transmit interrupt */
-		netif_stop_queue(dev);
-		PRINT_D(RX_DBG, "Simulate lockup at %ld, txp %ld\n", jiffies,
-			(unsigned long) priv->stats.tx_packets);
-	} else
-		WILC_WFI_Interrupt(0, dev, NULL);
-
-}
-
-/**
- *  @brief      WILC_WFI_Tx
- *  @details    Transmit a packet (called by the kernel)
- *  @param[in]   sk_buff *skb:
- *  @param[in]	net_device *dev:
- *  @return     NONE
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-int WILC_WFI_Tx(struct sk_buff *skb, struct net_device *dev)
-{
-	int len;
-	char *data, shortpkt[ETH_ZLEN];
-	struct WILC_WFI_priv *priv = netdev_priv(dev);
-
-	/* priv = wiphy_priv(priv->dev->ieee80211_ptr->wiphy); */
-
-	/*  if(priv->monitor_flag) */
-	/*	 mac80211_hwsim_monitor_rx(skb); */
-
-
-	data = skb->data;
-	len = skb->len;
-
-	if (len < ETH_ZLEN) {
-		memset(shortpkt, 0, ETH_ZLEN);
-		memcpy(shortpkt, skb->data, skb->len);
-		len = ETH_ZLEN;
-		data = shortpkt;
-	}
-	dev->trans_start = jiffies;  /* save the timestamp */
-
-	/* Remember the skb, so we can free it at interrupt time */
-	priv->skb = skb;
-
-	/* actual deliver of data is device-specific, and not shown here */
-	WILC_WFI_HwTx(data, len, dev);
-
-	return 0;  /* Our simple device can not fail */
-}
-
-/**
- *  @brief      WILC_WFI_TxTimeout
- *  @details    Deal with a transmit timeout.
- *  @param[in]	net_device *dev:
- *  @return     NONE
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-void WILC_WFI_TxTimeout(struct net_device *dev)
-{
-	struct WILC_WFI_priv *priv = netdev_priv(dev);
-
-	PRINT_D(RX_DBG, "Transmit timeout at %ld, latency %ld\n", jiffies,
-		jiffies - dev->trans_start);
-	/* Simulate a transmission interrupt to get things moving */
-	priv->status = WILC_WFI_TX_INTR;
-	WILC_WFI_Interrupt(0, dev, NULL);
-	priv->stats.tx_errors++;
-	netif_wake_queue(dev);
-	return;
-}
-
-/**
- *  @brief      WILC_WFI_Ioctl
- *  @details    Ioctl commands
- *  @param[in]	net_device *dev:
- *  @param[in]	ifreq *rq
- *  @param[in]	cmd:
- *  @return     int : Return 0 on Success
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-int WILC_WFI_Ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
-	PRINT_D(RX_DBG, "ioctl\n");
-	return 0;
-}
-
-/**
- *  @brief      WILC_WFI_Stat
- *  @details    Return statistics to the caller
- *  @param[in]	net_device *dev:
- *  @return     WILC_WFI_Stats : Return net_device_stats stucture with the
- *              network device driver private data contents.
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-struct net_device_stats *WILC_WFI_Stats(struct net_device *dev)
-{
-	struct WILC_WFI_priv *priv = netdev_priv(dev);
-	return &priv->stats;
-}
-
-/**
- *  @brief      WILC_WFI_RebuildHeader
- *  @details    This function is called to fill up an eth header, since arp is not
- *              available on the interface
- *  @param[in]	sk_buff *skb:
- *  @return     int : Return 0 on Success
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-int WILC_WFI_RebuildHeader(struct sk_buff *skb)
-{
-	struct ethhdr *eth = (struct ethhdr *) skb->data;
-	struct net_device *dev = skb->dev;
-
-	memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
-	memcpy(eth->h_dest, dev->dev_addr, dev->addr_len);
-	eth->h_dest[ETH_ALEN - 1]   ^= 0x01;  /* dest is us xor 1 */
-	return 0;
-}
-/**
- *  @brief      WILC_WFI_RebuildHeader
- *  @details    This function is called to fill up an eth header, since arp is not
- *              available on the interface
- *  @param[in]	sk_buff *skb:
- *  @param[in]	struct net_device *dev:
- *  @param[in]   unsigned short type:
- *  @param[in]   const void *saddr,
- *  @param[in]   const void *daddr:
- *  @param[in]          unsigned int len
- *  @return     int : Return 0 on Success
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-int WILC_WFI_Header(struct sk_buff *skb, struct net_device *dev,
-		    unsigned short type, const void *daddr, const void *saddr,
-		    unsigned int len)
-{
-	struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
-
-	eth->h_proto = htons(type);
-	memcpy(eth->h_source, saddr ? saddr : dev->dev_addr, dev->addr_len);
-	memcpy(eth->h_dest,   daddr ? daddr : dev->dev_addr, dev->addr_len);
-	eth->h_dest[ETH_ALEN - 1]   ^= 0x01;  /* dest is us xor 1 */
-	return dev->hard_header_len;
-}
-
-/**
- *  @brief      WILC_WFI_ChangeMtu
- *  @details    The "change_mtu" method is usually not needed.
- *              If you need it, it must be like this.
- *  @param[in]	net_device *dev : Network Device Driver Structure
- *  @param[in]	new_mtu :
- *  @return     int : Returns 0 on Success.
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-int WILC_WFI_ChangeMtu(struct net_device *dev, int new_mtu)
-{
-	unsigned long flags;
-	struct WILC_WFI_priv *priv = netdev_priv(dev);
-	spinlock_t *lock = &priv->lock;
-
-	/* check ranges */
-	if ((new_mtu < 68) || (new_mtu > 1500))
-		return -EINVAL;
-	/*
-	 * Do anything you need, and the accept the value
-	 */
-	spin_lock_irqsave(lock, flags);
-	dev->mtu = new_mtu;
-	spin_unlock_irqrestore(lock, flags);
-	return 0;  /* success */
-}
-
-static const struct header_ops WILC_WFI_header_ops = {
-	.create  = WILC_WFI_Header,
-	.rebuild = WILC_WFI_RebuildHeader,
-	.cache   = NULL,   /* disable caching */
-};
-
-
-static const struct net_device_ops WILC_WFI_netdev_ops = {
-	.ndo_open = WILC_WFI_Open,
-	.ndo_stop = WILC_WFI_Release,
-	.ndo_set_config = WILC_WFI_Config,
-	.ndo_start_xmit = WILC_WFI_Tx,
-	.ndo_do_ioctl = WILC_WFI_Ioctl,
-	.ndo_get_stats = WILC_WFI_Stats,
-	.ndo_change_mtu = WILC_WFI_ChangeMtu,
-	.ndo_tx_timeout = WILC_WFI_TxTimeout,
-};
-
-/**
- *  @brief      WILC_WFI_Init
- *  @details    The init function (sometimes called probe).
- *              It is invoked by register_netdev()
- *  @param[in]	net_device *dev:
- *  @return     NONE
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-void WILC_WFI_Init(struct net_device *dev)
-{
-	struct WILC_WFI_priv *priv;
-
-
-	/*
-	 * Then, assign other fields in dev, using ether_setup() and some
-	 * hand assignments
-	 */
-	ether_setup(dev);  /* assign some of the fields */
-	/* 1- Allocate space */
-
-	dev->netdev_ops      = &WILC_WFI_netdev_ops;
-	dev->header_ops      = &WILC_WFI_header_ops;
-	dev->watchdog_timeo = timeout;
-	/* keep the default flags, just add NOARP */
-	dev->flags           |= IFF_NOARP;
-	dev->features        |= NETIF_F_NO_CSUM;
-	/*
-	 * Then, initialize the priv field. This encloses the statistics
-	 * and a few private fields.
-	 */
-	priv = netdev_priv(dev);
-	memset(priv, 0, sizeof(struct WILC_WFI_priv));
-	priv->dev = dev;
-	netif_napi_add(dev, &priv->napi, WILC_WFI_Poll, 2);
-	/* The last parameter above is the NAPI "weight". */
-	spin_lock_init(&priv->lock);
-	WILC_WFI_RxInts(dev, 1);           /* enable receive interrupts */
-	WILC_WFI_SetupPool(dev);
-}
-
-/**
- *  @brief      WILC_WFI_Stat
- *  @details    Return statistics to the caller
- *  @param[in]	net_device *dev:
- *  @return     WILC_WFI_Stats : Return net_device_stats stucture with the
- *              network device driver private data contents.
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-
-void WILC_WFI_Cleanup(void)
-{
-	int i;
-	struct WILC_WFI_priv *priv[2];
-
-	/*if(hwsim_mon!=NULL)
-	 * {
-	 *      PRINT_D(RX_DBG, "Freeing monitor interface\n");
-	 *      unregister_netdev(hwsim_mon);
-	 *      free_netdev(hwsim_mon);
-	 * }*/
-	for (i = 0; i < 2; i++) {
-		priv[i] = netdev_priv(WILC_WFI_devs[i]);
-
-		if (WILC_WFI_devs[i]) {
-			PRINT_D(RX_DBG, "Unregistering\n");
-			unregister_netdev(WILC_WFI_devs[i]);
-			WILC_WFI_TearDownPool(WILC_WFI_devs[i]);
-			free_netdev(WILC_WFI_devs[i]);
-			PRINT_D(RX_DBG, "[NETDEV]Stopping interface\n");
-			WILC_WFI_DeInitHostInt(WILC_WFI_devs[i]);
-			WILC_WFI_WiphyFree(WILC_WFI_devs[i]);
-		}
-
-	}
-	/* unregister_netdev(hwsim_mon); */
-	WILC_WFI_deinit_mon_interface();
-	return;
-}
-
-
-void StartConfigSim(void);
-
-
-
-
-
-
-
-/**
- *  @brief      WILC_WFI_Stat
- *  @details    Return statistics to the caller
- *  @param[in]	net_device *dev:
- *  @return     WILC_WFI_Stats : Return net_device_stats stucture with the
- *              network device driver private data contents.
- *  @author	mdaftedar
- *  @date	01 MAR 2012
- *  @version	1.0
- */
-int WILC_WFI_InitModule(void)
-{
-
-	int result, i, ret = -ENOMEM;
-	struct WILC_WFI_priv *priv[2], *netpriv;
-	struct wireless_dev *wdev;
-	WILC_WFI_Interrupt = use_napi ? WILC_WFI_NapiInterrupt : WILC_WFI_RegularInterrupt;
-	char buf[IFNAMSIZ];
-
-	for (i = 0; i < 2; i++)	{
-
-		/* Allocate the net devices */
-		WILC_WFI_devs[i] = alloc_netdev(sizeof(struct WILC_WFI_priv), "wlan%d",
-						WILC_WFI_Init);
-		if (WILC_WFI_devs[i] == NULL)
-			goto out;
-		/* priv[i] = netdev_priv(WILC_WFI_devs[i]); */
-
-		wdev = WILC_WFI_WiphyRegister(WILC_WFI_devs[i]);
-		WILC_WFI_devs[i]->ieee80211_ptr = wdev;
-		netpriv = netdev_priv(WILC_WFI_devs[i]);
-		netpriv->dev->ieee80211_ptr = wdev;
-		netpriv->dev->ml_priv = netpriv;
-		wdev->netdev = netpriv->dev;
-
-		/*Registering the net device*/
-		result = register_netdev(WILC_WFI_devs[i]);
-		if (result)
-			PRINT_D(RX_DBG, "WILC_WFI: error %i registering device \"%s\"\n",
-				result, WILC_WFI_devs[i]->name);
-		else
-			ret = 0;
-	}
-
-
-	/*init atmel driver */
-	priv[0] = netdev_priv(WILC_WFI_devs[0]);
-	priv[1] = netdev_priv(WILC_WFI_devs[1]);
-
-	if (priv[1]->dev->ieee80211_ptr->wiphy->interface_modes && BIT(NL80211_IFTYPE_MONITOR))	{
-		/* snprintf(buf, IFNAMSIZ, "mon.%s",  priv[1]->dev->name); */
-		/*	WILC_WFI_init_mon_interface(); */
-		/*	priv[1]->monitor_flag = 1; */
-
-	}
-	priv[0]->bCfgScanning = false;
-	priv[0]->u32RcvdChCount = 0;
-
-	WILC_memset(priv[0]->au8AssociatedBss, 0xFF, ETH_ALEN);
-
-
-	/* ret = host_int_init(&priv[0]->hWILCWFIDrv); */
-	/*copy handle to the other driver*/
-	/* priv[1]->hWILCWFIDrv = priv[0]->hWILCWFIDrv; */
-	if (ret) {
-		PRINT_ER("Error Init Driver\n");
-	}
-
-
-out:
-	if (ret)
-		WILC_WFI_Cleanup();
-	return ret;
-
-
-}
-
-
-module_init(WILC_WFI_InitModule);
-module_exit(WILC_WFI_Cleanup);
-
-#endif
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index d413fa3..77f320d 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -77,12 +77,12 @@
 #define num_reg_frame 2
 /*
  * If you use RX_BH_WORK_QUEUE on LPC3131: You may lose the first interrupt on
- * LPC3131 which is important to get the MAC start status when you are blocked inside
- * linux_wlan_firmware_download() which blocks mac_open().
+ * LPC3131 which is important to get the MAC start status when you are blocked
+ * inside linux_wlan_firmware_download() which blocks mac_open().
  */
-#if defined (NM73131_0_BOARD)
+#if defined(NM73131_0_BOARD)
  #define RX_BH_TYPE  RX_BH_KTHREAD
-#elif defined (PANDA_BOARD)
+#elif defined(PANDA_BOARD)
  #define RX_BH_TYPE  RX_BH_THREADED_IRQ
 #else
  #define RX_BH_TYPE  RX_BH_KTHREAD
@@ -95,6 +95,7 @@
 	int seq_len;
 	u32 cipher;
 };
+
 struct wilc_wfi_wep_key {
 	u8 *key;
 	u8 key_len;
@@ -143,14 +144,15 @@
 	spinlock_t lock;
 	struct net_device *dev;
 	struct napi_struct napi;
-	WILC_WFIDrvHandle hWILCWFIDrv;
+	tstrWILC_WFIDrv *hWILCWFIDrv;
 	WILC_WFIDrvHandle hWILCWFIDrv_2;
 	tstrHostIFpmkidAttr pmkid_list;
 	struct WILC_WFI_stats netstats;
 	u8 WILC_WFI_wep_default;
 	u8 WILC_WFI_wep_key[4][WLAN_KEY_LEN_WEP104];
 	u8 WILC_WFI_wep_key_len[4];
-	struct net_device *real_ndev;   /* The real interface that the monitor is on */
+	/* The real interface that the monitor is on */
+	struct net_device *real_ndev;
 	struct wilc_wfi_key *wilc_gtk[MAX_NUM_STA];
 	struct wilc_wfi_key *wilc_ptk[MAX_NUM_STA];
 	u8 wilc_groupkey;
@@ -174,7 +176,7 @@
 typedef struct {
 	uint8_t aSrcAddress[ETH_ALEN];
 	uint8_t aBSSID[ETH_ALEN];
-	uint32_t drvHandler;
+	tstrWILC_WFIDrv *drvHandler;
 	struct net_device *wilc_netdev;
 } tstrInterfaceInfo;
 typedef struct {
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 3af91f7..7c53a2b 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -16,7 +16,6 @@
  *      Global
  *
  ********************************************/
-extern unsigned int int_clrd;
 extern wilc_hif_func_t hif_sdio;
 extern wilc_hif_func_t hif_spi;
 extern wilc_cfg_func_t mac_cfg;
@@ -24,8 +23,6 @@
 extern u8 g_wilc_initialized; /* AMR : 0422 RK3026 Crash issue */
 #endif
 extern void WILC_WFI_mgmt_rx(uint8_t *buff, uint32_t size);
-extern void frmw_to_linux(uint8_t *buff, uint32_t size);
-int sdio_xfer_cnt(void);
 uint32_t wilc_get_chipid(uint8_t update);
 u16 Set_machw_change_vir_if(bool bValue);
 
@@ -118,18 +115,15 @@
 {
 	char buf[256];
 	va_list args;
-	int len;
 
 	if (flag & dbgflag) {
 		va_start(args, fmt);
-		len = vsprintf(buf, fmt, args);
+		vsprintf(buf, fmt, args);
 		va_end(args);
 
 		if (g_wlan.os_func.os_debug)
 			g_wlan.os_func.os_debug(buf);
 	}
-
-	return;
 }
 
 static CHIP_PS_STATE_T genuChipPSstate = CHIP_WAKEDUP;
@@ -292,13 +286,13 @@
 
 #ifdef	TCP_ACK_FILTER
 struct Ack_session_info;
-typedef struct Ack_session_info {
+struct Ack_session_info {
 	uint32_t Ack_seq_num;
 	uint32_t Bigger_Ack_num;
 	uint16_t src_port;
 	uint16_t dst_port;
 	uint16_t status;
-} Ack_session_info_t;
+};
 
 typedef struct {
 	uint32_t ack_num;
@@ -319,7 +313,7 @@
 
 #define MAX_TCP_SESSION		25
 #define MAX_PENDING_ACKS		256
-Ack_session_info_t Acks_keep_track_info[2 * MAX_TCP_SESSION];
+struct Ack_session_info Acks_keep_track_info[2 * MAX_TCP_SESSION];
 Pending_Acks_info_t Pending_Acks_info[MAX_PENDING_ACKS];
 
 uint32_t PendingAcks_arrBase;
@@ -451,7 +445,7 @@
 	for (i = PendingAcks_arrBase; i < (PendingAcks_arrBase + Pending_Acks); i++) {
 		if (Pending_Acks_info[i].ack_num < Acks_keep_track_info[Pending_Acks_info[i].Session_index].Bigger_Ack_num) {
 			struct txq_entry_t *tqe;
-			PRINT_D(TCP_ENH, "DROP ACK: %u \n", Pending_Acks_info[i].ack_num);
+			PRINT_D(TCP_ENH, "DROP ACK: %u\n", Pending_Acks_info[i].ack_num);
 			tqe = Pending_Acks_info[i].txqe;
 			if (tqe) {
 				wilc_wlan_txq_remove(tqe);
@@ -467,11 +461,10 @@
 	Pending_Acks = 0;
 	Opened_TCP_session = 0;
 
-	if (PendingAcks_arrBase == 0) {
+	if (PendingAcks_arrBase == 0)
 		PendingAcks_arrBase = MAX_TCP_SESSION;
-	} else {
+	else
 		PendingAcks_arrBase = 0;
-	}
 
 
 	p->os_func.os_spin_unlock(p->txq_spinlock, &p->txq_spinlock_flags);
@@ -735,7 +728,7 @@
 
 			do {
 				/* Wait for the chip to stabilize*/
-				WILC_Sleep(2);
+				usleep_range(2 * 1000, 2 * 1000);
 				/* Make sure chip is awake. This is an extra step that can be removed */
 				/* later to avoid the bus access overhead */
 				if ((wilc_get_chipid(true) == 0)) {
@@ -758,7 +751,7 @@
 			/* If still off, redo the wake up sequence */
 			while (((clk_status_reg & 0x1) == 0) && (((++trials) % 3) == 0)) {
 				/* Wait for the chip to stabilize*/
-				WILC_Sleep(2);
+				usleep_range(2 * 1000, 2 * 1000);
 
 				/* Make sure chip is awake. This is an extra step that can be removed */
 				/* later to avoid the bus access overhead */
@@ -996,7 +989,7 @@
 				/**
 				 *      wait for vmm table is ready
 				 **/
-				PRINT_WRN(GENERIC_DBG, "[wilc txq]: warn, vmm table not clear yet, wait... \n");
+				PRINT_WRN(GENERIC_DBG, "[wilc txq]: warn, vmm table not clear yet, wait...\n");
 				release_bus(RELEASE_ALLOW_SLEEP);
 				p->os_func.os_sleep(3); /* wait 3 ms */
 				acquire_bus(ACQUIRE_AND_WAKEUP);
@@ -1063,7 +1056,7 @@
 			}
 
 			if (entries == 0) {
-				PRINT_WRN(GENERIC_DBG, "[wilc txq]: no more buffer in the chip (reg: %08x), retry later [[ %d, %x ]] \n", reg, i, vmm_table[i - 1]);
+				PRINT_WRN(GENERIC_DBG, "[wilc txq]: no more buffer in the chip (reg: %08x), retry later [[ %d, %x ]]\n", reg, i, vmm_table[i - 1]);
 
 				/* undo the transaction. */
 				ret = p->hif_func.hif_read_reg(WILC_HOST_TX_CTRL, &reg);
@@ -1114,11 +1107,10 @@
 				/*Bug3959: transmitting mgmt frames received from host*/
 				/*setting bit 30 in the host header to indicate mgmt frame*/
 #ifdef WILC_AP_EXTERNAL_MLME
-				if (tqe->type == WILC_MGMT_PKT)	{
+				if (tqe->type == WILC_MGMT_PKT)
 					header |= (1 << 30);
-				} else {
+				else
 					header &= ~(1 << 30);
-				}
 #endif
 
 #ifdef BIG_ENDIAN
@@ -1213,7 +1205,7 @@
 
 	do {
 		if (p->quit) {
-			PRINT_D(RX_DBG, "exit 1st do-while due to Clean_UP function \n");
+			PRINT_D(RX_DBG, "exit 1st do-while due to Clean_UP function\n");
 			p->os_func.os_signal(p->cfg_wait);
 			break;
 		}
@@ -1330,8 +1322,7 @@
 	} while (1);
 
 	p->rxq_exit = 1;
-	PRINT_D(RX_DBG, "THREAD: Exiting RX thread \n");
-	return;
+	PRINT_D(RX_DBG, "THREAD: Exiting RX thread\n");
 }
 
 /********************************************
@@ -1414,7 +1405,7 @@
 		buffer = p->os_func.os_malloc(size);
 		if (buffer == NULL) {
 			wilc_debug(N_ERR, "[wilc isr]: fail alloc host memory...drop the packets (%d)\n", size);
-			WILC_Sleep(100);
+			usleep_range(100 * 1000, 100 * 1000);
 			goto _end_;
 		}
 #endif
@@ -1545,11 +1536,10 @@
 		acquire_bus(ACQUIRE_ONLY);
 		offset += 8;
 		while (((int)size) && (offset < buffer_size)) {
-			if (size <= blksz) {
+			if (size <= blksz)
 				size2 = size;
-			} else {
+			else
 				size2 = blksz;
-			}
 			/* Copy firmware into a DMA coherent buffer */
 			memcpy(dma_buffer, &buffer[offset], size2);
 			ret = p->hif_func.hif_block_tx(addr, dma_buffer, size2);
@@ -1782,7 +1772,7 @@
 /******************************************************************************/
 	reg = ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 8) | (1 << 9) | (1 << 26) | (1 << 29) | (1 << 30) | (1 << 31)); /**/
 	/**/
-	ret = p->hif_func.hif_write_reg(WILC_GLB_RESET_0, reg);                                 /**/
+	p->hif_func.hif_write_reg(WILC_GLB_RESET_0, reg);                                 /**/
 	reg = ~(1 << 10);                                                                                               /**/
 	/**/
 	ret = p->hif_func.hif_write_reg(WILC_GLB_RESET_0, reg);                                 /**/
@@ -2306,11 +2296,10 @@
 		PRINT_ER("Error while Reading reg WILC_CHANGING_VIR_IF\n");
 	}
 
-	if (bValue) {
+	if (bValue)
 		reg |= (BIT31);
-	} else {
+	else
 		reg &= ~(BIT31);
-	}
 
 	ret = (&g_wlan)->hif_func.hif_write_reg(WILC_CHANGING_VIR_IF, reg);
 
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index 0ba7ec6..244f710 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -1,7 +1,7 @@
 #ifndef WILC_WLAN_H
 #define WILC_WLAN_H
 
-#include "wilc_type.h"
+#include "wilc_oswrapper.h"
 
 
 #define ISWILC1000(id)   (((id & 0xfffff000) == 0x100000) ? 1 : 0)
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index 3cffe55..e2842d3 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -363,8 +363,6 @@
 		size -= (2 + len);
 		info += (2 + len);
 	}
-
-	return;
 }
 
 static int wilc_wlan_parse_info_frame(uint8_t *info, int size)
@@ -513,7 +511,6 @@
 	int ret = 1;
 	uint8_t msg_type;
 	uint8_t msg_id;
-	uint16_t msg_len;
 	#ifdef WILC_FULLY_HOSTING_AP
 	u32 *ptru32Frame;
 	bool bStatus = frame[2];
@@ -528,11 +525,6 @@
 
 	msg_type = frame[0];
 	msg_id = frame[1];      /* seq no */
-#ifdef BIG_ENDIAN
-	msg_len = (frame[2] << 8) | frame[3];
-#else
-	msg_len = (frame[3] << 8) | frame[2];
-#endif
 	frame += 4;
 	size -= 4;
 
@@ -557,7 +549,7 @@
 
 	case 'L':
 #ifndef SWITCH_LOG_TERMINAL
-		PRINT_ER("Unexpected firmware log message received \n");
+		PRINT_ER("Unexpected firmware log message received\n");
 #else
 		PRINT_D(FIRM_DBG, "\nFIRMWARE LOGS :\n<<\n%s\n>>\n", frame);
 		break;
@@ -572,18 +564,18 @@
 #endif
 /*bug3819:*/
 	case 'S':
-		PRINT_INFO(RX_DBG, "Scan Notification Received \n");
+		PRINT_INFO(RX_DBG, "Scan Notification Received\n");
 		host_int_ScanCompleteReceived(frame - 4, size + 4);
 		break;
 
 #ifdef WILC_FULLY_HOSTING_AP
 	case 'T':
-		PRINT_INFO(RX_DBG, "TBTT Notification Received \n");
+		PRINT_INFO(RX_DBG, "TBTT Notification Received\n");
 		process_tbtt_isr();
 		break;
 
 	case 'A':
-		PRINT_INFO(RX_DBG, "HOSTAPD ACK Notification Received \n");
+		PRINT_INFO(RX_DBG, "HOSTAPD ACK Notification Received\n");
 		WILC_mgm_HOSTAPD_ACK(ptru32Frame, bStatus);
 		break;
 #endif
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 8ed51e3..5cf74e4 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -20,7 +20,7 @@
 /* #define USE_OLD_SPI_SW */
 
 
-#include "wilc_type.h"
+#include "wilc_oswrapper.h"
 #include "linux_wlan_common.h"
 
 
@@ -43,8 +43,8 @@
  ********************************************/
 
 #define HIF_SDIO           (0)
-#define HIF_SPI            (1 << 0)
-#define HIF_SDIO_GPIO_IRQ  (1 << 2)
+#define HIF_SPI            BIT(0)
+#define HIF_SDIO_GPIO_IRQ  BIT(2)
 
 
 /********************************************
diff --git a/drivers/staging/xgifb/Makefile b/drivers/staging/xgifb/Makefile
index 55e5199..964a843 100644
--- a/drivers/staging/xgifb/Makefile
+++ b/drivers/staging/xgifb/Makefile
@@ -1,4 +1,4 @@
 obj-$(CONFIG_FB_XGI)  += xgifb.o
 
-xgifb-y := XGI_main_26.o vb_init.o vb_setmode.o vb_util.o
+xgifb-y := XGI_main_26.o vb_init.o vb_setmode.o
 
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 943d463..5a6251a4 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -18,8 +18,8 @@
 #define Index_CR_GPIO_Reg1 0x48
 #define Index_CR_GPIO_Reg3 0x4a
 
-#define GPIOG_EN    (1<<6)
-#define GPIOG_READ  (1<<1)
+#define GPIOG_EN    BIT(6)
+#define GPIOG_READ  BIT(1)
 
 static char *forcecrt2type;
 static char *mode;
@@ -29,7 +29,7 @@
 /* -------------------- Macro definitions ---------------------------- */
 
 #ifdef DEBUG
-static void dumpVGAReg(void)
+static void dumpVGAReg(struct xgifb_video_info *xgifb_info)
 {
 	u8 i, reg;
 
@@ -48,7 +48,7 @@
 	}
 }
 #else
-static inline void dumpVGAReg(void)
+static inline void dumpVGAReg(struct xgifb_video_info *xgifb_info)
 {
 }
 #endif
@@ -1073,7 +1073,7 @@
 	}
 	XGIfb_bpp_to_var(xgifb_info, var); /*update ARGB info*/
 
-	dumpVGAReg();
+	dumpVGAReg(xgifb_info);
 	return 0;
 }
 
@@ -2019,7 +2019,7 @@
 		goto error_mtrr;
 	}
 
-	dumpVGAReg();
+	dumpVGAReg(xgifb_info);
 
 	return 0;
 
diff --git a/drivers/staging/xgifb/vb_init.h b/drivers/staging/xgifb/vb_init.h
index 2457302..500cabe 100644
--- a/drivers/staging/xgifb/vb_init.h
+++ b/drivers/staging/xgifb/vb_init.h
@@ -1,6 +1,6 @@
 #ifndef _VBINIT_
 #define _VBINIT_
-extern unsigned char XGIInitNew(struct pci_dev *pdev);
-extern void XGIRegInit(struct vb_device_info *, unsigned long);
+unsigned char XGIInitNew(struct pci_dev *pdev);
+void XGIRegInit(struct vb_device_info *, unsigned long);
 #endif
 
diff --git a/drivers/staging/xgifb/vb_setmode.h b/drivers/staging/xgifb/vb_setmode.h
index 5301bec..6f082a7a 100644
--- a/drivers/staging/xgifb/vb_setmode.h
+++ b/drivers/staging/xgifb/vb_setmode.h
@@ -1,23 +1,23 @@
 #ifndef _VBSETMODE_
 #define _VBSETMODE_
 
-extern void InitTo330Pointer(unsigned char, struct vb_device_info *);
-extern void XGI_UnLockCRT2(struct vb_device_info *);
-extern void XGI_LockCRT2(struct vb_device_info *);
-extern void XGI_DisplayOff(struct xgifb_video_info *,
-			   struct xgi_hw_device_info *,
-			   struct vb_device_info *);
-extern void XGI_GetVBType(struct vb_device_info *);
-extern void XGI_SenseCRT1(struct vb_device_info *);
-extern unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
-				   struct xgi_hw_device_info *HwDeviceExtension,
-				   unsigned short ModeNo);
+void InitTo330Pointer(unsigned char, struct vb_device_info *);
+void XGI_UnLockCRT2(struct vb_device_info *);
+void XGI_LockCRT2(struct vb_device_info *);
+void XGI_DisplayOff(struct xgifb_video_info *,
+		    struct xgi_hw_device_info *,
+		    struct vb_device_info *);
+void XGI_GetVBType(struct vb_device_info *);
+void XGI_SenseCRT1(struct vb_device_info *);
+unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
+			    struct xgi_hw_device_info *HwDeviceExtension,
+			    unsigned short ModeNo);
 
-extern unsigned char XGI_SearchModeID(unsigned short ModeNo,
-				      unsigned short *ModeIdIndex);
-extern unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
-					 unsigned short ModeNo,
-					 unsigned short ModeIdIndex,
-					 struct vb_device_info *);
+unsigned char XGI_SearchModeID(unsigned short ModeNo,
+			       unsigned short *ModeIdIndex);
+unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
+				  unsigned short ModeNo,
+				  unsigned short ModeIdIndex,
+				  struct vb_device_info *);
 
 #endif
diff --git a/drivers/staging/xgifb/vb_util.c b/drivers/staging/xgifb/vb_util.c
deleted file mode 100644
index be3437ca..0000000
--- a/drivers/staging/xgifb/vb_util.c
+++ /dev/null
@@ -1,42 +0,0 @@
-#include "vgatypes.h"
-#include "vb_util.h"
-
-void xgifb_reg_set(unsigned long port, u8 index, u8 data)
-{
-	outb(index, port);
-	outb(data, port + 1);
-}
-
-u8 xgifb_reg_get(unsigned long port, u8 index)
-{
-	outb(index, port);
-	return inb(port + 1);
-}
-
-void xgifb_reg_and_or(unsigned long port, u8 index,
-		unsigned data_and, unsigned data_or)
-{
-	u8 temp;
-
-	temp = xgifb_reg_get(port, index); /* XGINew_Part1Port index 02 */
-	temp = (temp & data_and) | data_or;
-	xgifb_reg_set(port, index, temp);
-}
-
-void xgifb_reg_and(unsigned long port, u8 index, unsigned data_and)
-{
-	u8 temp;
-
-	temp = xgifb_reg_get(port, index); /* XGINew_Part1Port index 02 */
-	temp &= data_and;
-	xgifb_reg_set(port, index, temp);
-}
-
-void xgifb_reg_or(unsigned long port, u8 index, unsigned data_or)
-{
-	u8 temp;
-
-	temp = xgifb_reg_get(port, index); /* XGINew_Part1Port index 02 */
-	temp |= data_or;
-	xgifb_reg_set(port, index, temp);
-}
diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h
index 9161de1..7bd395f 100644
--- a/drivers/staging/xgifb/vb_util.h
+++ b/drivers/staging/xgifb/vb_util.h
@@ -1,9 +1,43 @@
 #ifndef _VBUTIL_
 #define _VBUTIL_
-extern void xgifb_reg_set(unsigned long, u8, u8);
-extern u8 xgifb_reg_get(unsigned long, u8);
-extern void xgifb_reg_or(unsigned long, u8, unsigned);
-extern void xgifb_reg_and(unsigned long, u8, unsigned);
-extern void xgifb_reg_and_or(unsigned long, u8, unsigned, unsigned);
+static inline void xgifb_reg_set(unsigned long port, u8 index, u8 data)
+{
+	outb(index, port);
+	outb(data, port + 1);
+}
+
+static inline u8 xgifb_reg_get(unsigned long port, u8 index)
+{
+	outb(index, port);
+	return inb(port + 1);
+}
+
+static inline void xgifb_reg_and_or(unsigned long port, u8 index,
+				    unsigned data_and, unsigned data_or)
+{
+	u8 temp;
+
+	temp = xgifb_reg_get(port, index);
+	temp = (temp & data_and) | data_or;
+	xgifb_reg_set(port, index, temp);
+}
+
+static inline void xgifb_reg_and(unsigned long port, u8 index, unsigned data_and)
+{
+	u8 temp;
+
+	temp = xgifb_reg_get(port, index);
+	temp &= data_and;
+	xgifb_reg_set(port, index, temp);
+}
+
+static inline void xgifb_reg_or(unsigned long port, u8 index, unsigned data_or)
+{
+	u8 temp;
+
+	temp = xgifb_reg_get(port, index);
+	temp |= data_or;
+	xgifb_reg_set(port, index, temp);
+}
 #endif
 
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 6d88d24..5a9982f 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -306,20 +306,13 @@
 	kfree(ibr);
 }
 
-static void iblock_bio_done(struct bio *bio, int err)
+static void iblock_bio_done(struct bio *bio)
 {
 	struct se_cmd *cmd = bio->bi_private;
 	struct iblock_req *ibr = cmd->priv;
 
-	/*
-	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
-	 */
-	if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
-		err = -EIO;
-
-	if (err != 0) {
-		pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
-			" err: %d\n", bio, err);
+	if (bio->bi_error) {
+		pr_err("bio error: %p,  err: %d\n", bio, bio->bi_error);
 		/*
 		 * Bump the ib_bio_err_cnt and release bio.
 		 */
@@ -370,15 +363,15 @@
 	blk_finish_plug(&plug);
 }
 
-static void iblock_end_io_flush(struct bio *bio, int err)
+static void iblock_end_io_flush(struct bio *bio)
 {
 	struct se_cmd *cmd = bio->bi_private;
 
-	if (err)
-		pr_err("IBLOCK: cache flush failed: %d\n", err);
+	if (bio->bi_error)
+		pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error);
 
 	if (cmd) {
-		if (err)
+		if (bio->bi_error)
 			target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
 		else
 			target_complete_cmd(cmd, SAM_STAT_GOOD);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 08e9084..de18790 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -852,7 +852,7 @@
 	return bl;
 }
 
-static void pscsi_bi_endio(struct bio *bio, int error)
+static void pscsi_bi_endio(struct bio *bio)
 {
 	bio_put(bio);
 }
@@ -973,7 +973,7 @@
 	while (*hbio) {
 		bio = *hbio;
 		*hbio = (*hbio)->bi_next;
-		bio_endio(bio, 0);	/* XXX: should be error */
+		bio_endio(bio);
 	}
 	return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 }
@@ -1061,7 +1061,7 @@
 	while (hbio) {
 		struct bio *bio = hbio;
 		hbio = hbio->bi_next;
-		bio_endio(bio, 0);	/* XXX: should be error */
+		bio_endio(bio);
 	}
 	ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 fail:
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 384cf88..47a833f 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -138,16 +138,12 @@
 		sg_per_table = (total_sg_needed > max_sg_per_table) ?
 			max_sg_per_table : total_sg_needed;
 
-#ifdef CONFIG_ARCH_HAS_SG_CHAIN
-
 		/*
 		 * Reserve extra element for chain entry
 		 */
 		if (sg_per_table < total_sg_needed)
 			chain_entry = 1;
 
-#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
-
 		sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
 				GFP_KERNEL);
 		if (!sg) {
@@ -158,15 +154,11 @@
 
 		sg_init_table(sg, sg_per_table + chain_entry);
 
-#ifdef CONFIG_ARCH_HAS_SG_CHAIN
-
 		if (i > 0) {
 			sg_chain(sg_table[i - 1].sg_table,
 				 max_sg_per_table + 1, sg);
 		}
 
-#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
-
 		sg_table[i].sg_table = sg;
 		sg_table[i].rd_sg_count = sg_per_table;
 		sg_table[i].page_start_offset = page_offset;
@@ -430,42 +422,6 @@
 	prot_sg = &prot_table->sg_table[prot_page -
 					prot_table->page_start_offset];
 
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
-
-	prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
-				   PAGE_SIZE);
-
-	/*
-	 * Allocate temporaly contiguous scatterlist entries if prot pages
-	 * straddles multiple scatterlist tables.
-	 */
-	if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
-		int i;
-
-		prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
-		if (!prot_sg)
-			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-
-		need_to_release = true;
-		sg_init_table(prot_sg, prot_npages);
-
-		for (i = 0; i < prot_npages; i++) {
-			if (prot_page + i > prot_table->page_end_offset) {
-				prot_table = rd_get_prot_table(dev,
-								prot_page + i);
-				if (!prot_table) {
-					kfree(prot_sg);
-					return rc;
-				}
-				sg_unmark_end(&prot_sg[i - 1]);
-			}
-			prot_sg[i] = prot_table->sg_table[prot_page + i -
-						prot_table->page_start_offset];
-		}
-	}
-
-#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
-
 	if (is_read)
 		rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
 				    prot_sg, prot_offset);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 5820e85..2ac0c70 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -340,7 +340,7 @@
 
 	/* check result for the last window */
 	msr_now = pkg_state_counter();
-	rdtscll(tsc_now);
+	tsc_now = rdtsc();
 
 	/* calculate pkg cstate vs tsc ratio */
 	if (!msr_last || !tsc_last)
@@ -482,7 +482,7 @@
 	u64 val64;
 
 	msr_now = pkg_state_counter();
-	rdtscll(tsc_now);
+	tsc_now = rdtsc();
 	jiffies_now = jiffies;
 
 	/* calculate pkg cstate vs tsc ratio */
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 7006860..2516769 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -258,8 +258,7 @@
 	BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
 	BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
 	BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
-	req_power = devm_kcalloc(&tz->device, num_actors * 5,
-				 sizeof(*req_power), GFP_KERNEL);
+	req_power = kcalloc(num_actors * 5, sizeof(*req_power), GFP_KERNEL);
 	if (!req_power) {
 		ret = -ENOMEM;
 		goto unlock;
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index 4190199..a75146f 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -240,9 +240,9 @@
 {
 	struct hvsi_control *header = (struct hvsi_control *)packet;
 
-	switch (header->verb) {
+	switch (be16_to_cpu(header->verb)) {
 		case VSV_MODEM_CTL_UPDATE:
-			if ((header->word & HVSI_TSCD) == 0) {
+			if ((be32_to_cpu(header->word) & HVSI_TSCD) == 0) {
 				/* CD went away; no more connection */
 				pr_debug("hvsi%i: CD dropped\n", hp->index);
 				hp->mctrl &= TIOCM_CD;
@@ -267,6 +267,7 @@
 static void hvsi_recv_response(struct hvsi_struct *hp, uint8_t *packet)
 {
 	struct hvsi_query_response *resp = (struct hvsi_query_response *)packet;
+	uint32_t mctrl_word;
 
 	switch (hp->state) {
 		case HVSI_WAIT_FOR_VER_RESPONSE:
@@ -274,9 +275,10 @@
 			break;
 		case HVSI_WAIT_FOR_MCTRL_RESPONSE:
 			hp->mctrl = 0;
-			if (resp->u.mctrl_word & HVSI_TSDTR)
+			mctrl_word = be32_to_cpu(resp->u.mctrl_word);
+			if (mctrl_word & HVSI_TSDTR)
 				hp->mctrl |= TIOCM_DTR;
-			if (resp->u.mctrl_word & HVSI_TSCD)
+			if (mctrl_word & HVSI_TSCD)
 				hp->mctrl |= TIOCM_CD;
 			__set_state(hp, HVSI_OPEN);
 			break;
@@ -295,10 +297,10 @@
 
 	packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
 	packet.hdr.len = sizeof(struct hvsi_query_response);
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
-	packet.verb = VSV_SEND_VERSION_NUMBER;
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
+	packet.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
 	packet.u.version = HVSI_VERSION;
-	packet.query_seqno = query_seqno+1;
+	packet.query_seqno = cpu_to_be16(query_seqno+1);
 
 	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
 	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -319,7 +321,7 @@
 
 	switch (hp->state) {
 		case HVSI_WAIT_FOR_VER_QUERY:
-			hvsi_version_respond(hp, query->hdr.seqno);
+			hvsi_version_respond(hp, be16_to_cpu(query->hdr.seqno));
 			__set_state(hp, HVSI_OPEN);
 			break;
 		default:
@@ -555,8 +557,8 @@
 
 	packet.hdr.type = VS_QUERY_PACKET_HEADER;
 	packet.hdr.len = sizeof(struct hvsi_query);
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
-	packet.verb = verb;
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
+	packet.verb = cpu_to_be16(verb);
 
 	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
 	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -596,14 +598,14 @@
 	struct hvsi_control packet __ALIGNED__;
 	int wrote;
 
-	packet.hdr.type = VS_CONTROL_PACKET_HEADER,
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.type = VS_CONTROL_PACKET_HEADER;
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
 	packet.hdr.len = sizeof(struct hvsi_control);
-	packet.verb = VSV_SET_MODEM_CTL;
-	packet.mask = HVSI_TSDTR;
+	packet.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
+	packet.mask = cpu_to_be32(HVSI_TSDTR);
 
 	if (mctrl & TIOCM_DTR)
-		packet.word = HVSI_TSDTR;
+		packet.word = cpu_to_be32(HVSI_TSDTR);
 
 	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
 	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -680,7 +682,7 @@
 	BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
 
 	packet.hdr.type = VS_DATA_PACKET_HEADER;
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
 	packet.hdr.len = count + sizeof(struct hvsi_header);
 	memcpy(&packet.data, buf, count);
 
@@ -697,9 +699,9 @@
 	struct hvsi_control packet __ALIGNED__;
 
 	packet.hdr.type = VS_CONTROL_PACKET_HEADER;
-	packet.hdr.seqno = atomic_inc_return(&hp->seqno);
+	packet.hdr.seqno = cpu_to_be16(atomic_inc_return(&hp->seqno));
 	packet.hdr.len = 6;
-	packet.verb = VSV_CLOSE_PROTOCOL;
+	packet.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL);
 
 	pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
 	dbg_dump_hex((uint8_t*)&packet, packet.hdr.len);
@@ -1180,7 +1182,7 @@
 	/* search device tree for vty nodes */
 	for_each_compatible_node(vty, "serial", "hvterm-protocol") {
 		struct hvsi_struct *hp;
-		const uint32_t *vtermno, *irq;
+		const __be32 *vtermno, *irq;
 
 		vtermno = of_get_property(vty, "reg", NULL);
 		irq = of_get_property(vty, "interrupts", NULL);
@@ -1202,11 +1204,11 @@
 		hp->index = hvsi_count;
 		hp->inbuf_end = hp->inbuf;
 		hp->state = HVSI_CLOSED;
-		hp->vtermno = *vtermno;
-		hp->virq = irq_create_mapping(NULL, irq[0]);
+		hp->vtermno = be32_to_cpup(vtermno);
+		hp->virq = irq_create_mapping(NULL, be32_to_cpup(irq));
 		if (hp->virq == 0) {
 			printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n",
-				__func__, irq[0]);
+			       __func__, be32_to_cpup(irq));
 			tty_port_destroy(&hp->port);
 			continue;
 		}
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 358323c..a8c8cfd 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -879,6 +879,11 @@
 	.chars_in_buffer	= mips_ejtag_fdc_tty_chars_in_buffer,
 };
 
+int __weak get_c0_fdc_int(void)
+{
+	return -1;
+}
+
 static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
 {
 	int ret, nport;
@@ -967,9 +972,7 @@
 	wake_up_process(priv->thread);
 
 	/* Look for an FDC IRQ */
-	priv->irq = -1;
-	if (get_c0_fdc_int)
-		priv->irq = get_c0_fdc_int();
+	priv->irq = get_c0_fdc_int();
 
 	/* Try requesting the IRQ */
 	if (priv->irq >= 0) {
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 382d3fc..c3fe026 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2712,7 +2712,7 @@
 	memcpy(skb_put(skb, size), in_buf, size);
 
 	skb->dev = net;
-	skb->protocol = __constant_htons(ETH_P_IP);
+	skb->protocol = htons(ETH_P_IP);
 
 	/* Ship it off to the kernel */
 	netif_rx(skb);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index ee8bfac..20932cc 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2147,6 +2147,8 @@
 
 static int job_control(struct tty_struct *tty, struct file *file)
 {
+	struct pid *pgrp;
+
 	/* Job control check -- must be done at start and after
 	   every sleep (POSIX.1 7.1.1.4). */
 	/* NOTE: not yet done after every sleep pending a thorough
@@ -2156,18 +2158,25 @@
 	    current->signal->tty != tty)
 		return 0;
 
+	rcu_read_lock();
+	pgrp = task_pgrp(current);
+
 	spin_lock_irq(&tty->ctrl_lock);
 	if (!tty->pgrp)
 		printk(KERN_ERR "n_tty_read: no tty->pgrp!\n");
-	else if (task_pgrp(current) != tty->pgrp) {
+	else if (pgrp != tty->pgrp) {
 		spin_unlock_irq(&tty->ctrl_lock);
-		if (is_ignored(SIGTTIN) || is_current_pgrp_orphaned())
+		if (is_ignored(SIGTTIN) || is_current_pgrp_orphaned()) {
+			rcu_read_unlock();
 			return -EIO;
-		kill_pgrp(task_pgrp(current), SIGTTIN, 1);
+		}
+		kill_pgrp(pgrp, SIGTTIN, 1);
+		rcu_read_unlock();
 		set_thread_flag(TIF_SIGPENDING);
 		return -ERESTARTSYS;
 	}
 	spin_unlock_irq(&tty->ctrl_lock);
+	rcu_read_unlock();
 	return 0;
 }
 
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 4d5e840..4d5937c 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -26,6 +26,12 @@
 #include <linux/mutex.h>
 #include <linux/poll.h>
 
+#undef TTY_DEBUG_HANGUP
+#ifdef TTY_DEBUG_HANGUP
+# define tty_debug_hangup(tty, f, args...)	tty_debug(tty, f, ##args)
+#else
+# define tty_debug_hangup(tty, f, args...)	do {} while (0)
+#endif
 
 #ifdef CONFIG_UNIX98_PTYS
 static struct tty_driver *ptm_driver;
@@ -779,6 +785,8 @@
 	if (retval)
 		goto err_release;
 
+	tty_debug_hangup(tty, "(tty count=%d)\n", tty->count);
+
 	tty_unlock(tty);
 	return 0;
 err_release:
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index c43f74c..d54dcd8 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -42,9 +42,9 @@
 	size_t			rx_size;
 	size_t			tx_size;
 
-	unsigned char		tx_running:1;
-	unsigned char		tx_err: 1;
-	unsigned char		rx_running:1;
+	unsigned char		tx_running;
+	unsigned char		tx_err;
+	unsigned char		rx_running;
 };
 
 struct old_serial_port {
@@ -211,3 +211,14 @@
 	}
 	return 1;
 }
+
+static inline int serial_index(struct uart_port *port)
+{
+	return port->minor - 64;
+}
+
+#if 0
+#define DEBUG_INTR(fmt...)	printk(fmt)
+#else
+#define DEBUG_INTR(fmt...)	do { } while (0)
+#endif
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 37fff12..cfbb9d7 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -1,25 +1,23 @@
 /*
- *  Driver for 8250/16550-type serial ports
+ *  Universal/legacy driver for 8250/16550-type serial ports
  *
  *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
  *
  *  Copyright (C) 2001 Russell King.
  *
+ *  Supports: ISA-compatible 8250/16550 ports
+ *	      PNP 8250/16550 ports
+ *	      early_serial_setup() ports
+ *	      userspace-configurable "phantom" ports
+ *	      "serial8250" platform devices
+ *	      serial8250_register_8250_port() ports
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
- *
- * A note about mapbase / membase
- *
- *  mapbase is the physical address of the IO port.
- *  membase is an 'ioremapped' cookie.
  */
 
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/ioport.h>
@@ -58,33 +56,10 @@
 
 static struct uart_driver serial8250_reg;
 
-static int serial_index(struct uart_port *port)
-{
-	return port->minor - 64;
-}
-
 static unsigned int skip_txen_test; /* force skip of txen test at init time */
 
-/*
- * Debugging.
- */
-#if 0
-#define DEBUG_AUTOCONF(fmt...)	printk(fmt)
-#else
-#define DEBUG_AUTOCONF(fmt...)	do { } while (0)
-#endif
-
-#if 0
-#define DEBUG_INTR(fmt...)	printk(fmt)
-#else
-#define DEBUG_INTR(fmt...)	do { } while (0)
-#endif
-
 #define PASS_LIMIT	512
 
-#define BOTH_EMPTY 	(UART_LSR_TEMT | UART_LSR_THRE)
-
-
 #include <asm/serial.h>
 /*
  * SERIAL_PORT_DFNS tells us about built-in ports that have no
@@ -120,1563 +95,6 @@
 static DEFINE_MUTEX(hash_mutex);	/* Used to walk the hash */
 
 /*
- * Here we define the default xmit fifo size used for each type of UART.
- */
-static const struct serial8250_config uart_config[] = {
-	[PORT_UNKNOWN] = {
-		.name		= "unknown",
-		.fifo_size	= 1,
-		.tx_loadsz	= 1,
-	},
-	[PORT_8250] = {
-		.name		= "8250",
-		.fifo_size	= 1,
-		.tx_loadsz	= 1,
-	},
-	[PORT_16450] = {
-		.name		= "16450",
-		.fifo_size	= 1,
-		.tx_loadsz	= 1,
-	},
-	[PORT_16550] = {
-		.name		= "16550",
-		.fifo_size	= 1,
-		.tx_loadsz	= 1,
-	},
-	[PORT_16550A] = {
-		.name		= "16550A",
-		.fifo_size	= 16,
-		.tx_loadsz	= 16,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-		.rxtrig_bytes	= {1, 4, 8, 14},
-		.flags		= UART_CAP_FIFO,
-	},
-	[PORT_CIRRUS] = {
-		.name		= "Cirrus",
-		.fifo_size	= 1,
-		.tx_loadsz	= 1,
-	},
-	[PORT_16650] = {
-		.name		= "ST16650",
-		.fifo_size	= 1,
-		.tx_loadsz	= 1,
-		.flags		= UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
-	},
-	[PORT_16650V2] = {
-		.name		= "ST16650V2",
-		.fifo_size	= 32,
-		.tx_loadsz	= 16,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
-				  UART_FCR_T_TRIG_00,
-		.rxtrig_bytes	= {8, 16, 24, 28},
-		.flags		= UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
-	},
-	[PORT_16750] = {
-		.name		= "TI16750",
-		.fifo_size	= 64,
-		.tx_loadsz	= 64,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
-				  UART_FCR7_64BYTE,
-		.rxtrig_bytes	= {1, 16, 32, 56},
-		.flags		= UART_CAP_FIFO | UART_CAP_SLEEP | UART_CAP_AFE,
-	},
-	[PORT_STARTECH] = {
-		.name		= "Startech",
-		.fifo_size	= 1,
-		.tx_loadsz	= 1,
-	},
-	[PORT_16C950] = {
-		.name		= "16C950/954",
-		.fifo_size	= 128,
-		.tx_loadsz	= 128,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-		/* UART_CAP_EFR breaks billionon CF bluetooth card. */
-		.flags		= UART_CAP_FIFO | UART_CAP_SLEEP,
-	},
-	[PORT_16654] = {
-		.name		= "ST16654",
-		.fifo_size	= 64,
-		.tx_loadsz	= 32,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
-				  UART_FCR_T_TRIG_10,
-		.rxtrig_bytes	= {8, 16, 56, 60},
-		.flags		= UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
-	},
-	[PORT_16850] = {
-		.name		= "XR16850",
-		.fifo_size	= 128,
-		.tx_loadsz	= 128,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-		.flags		= UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
-	},
-	[PORT_RSA] = {
-		.name		= "RSA",
-		.fifo_size	= 2048,
-		.tx_loadsz	= 2048,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11,
-		.flags		= UART_CAP_FIFO,
-	},
-	[PORT_NS16550A] = {
-		.name		= "NS16550A",
-		.fifo_size	= 16,
-		.tx_loadsz	= 16,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-		.flags		= UART_CAP_FIFO | UART_NATSEMI,
-	},
-	[PORT_XSCALE] = {
-		.name		= "XScale",
-		.fifo_size	= 32,
-		.tx_loadsz	= 32,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-		.flags		= UART_CAP_FIFO | UART_CAP_UUE | UART_CAP_RTOIE,
-	},
-	[PORT_OCTEON] = {
-		.name		= "OCTEON",
-		.fifo_size	= 64,
-		.tx_loadsz	= 64,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-		.flags		= UART_CAP_FIFO,
-	},
-	[PORT_AR7] = {
-		.name		= "AR7",
-		.fifo_size	= 16,
-		.tx_loadsz	= 16,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
-		.flags		= UART_CAP_FIFO | UART_CAP_AFE,
-	},
-	[PORT_U6_16550A] = {
-		.name		= "U6_16550A",
-		.fifo_size	= 64,
-		.tx_loadsz	= 64,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-		.flags		= UART_CAP_FIFO | UART_CAP_AFE,
-	},
-	[PORT_TEGRA] = {
-		.name		= "Tegra",
-		.fifo_size	= 32,
-		.tx_loadsz	= 8,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
-				  UART_FCR_T_TRIG_01,
-		.rxtrig_bytes	= {1, 4, 8, 14},
-		.flags		= UART_CAP_FIFO | UART_CAP_RTOIE,
-	},
-	[PORT_XR17D15X] = {
-		.name		= "XR17D15X",
-		.fifo_size	= 64,
-		.tx_loadsz	= 64,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-		.flags		= UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
-				  UART_CAP_SLEEP,
-	},
-	[PORT_XR17V35X] = {
-		.name		= "XR17V35X",
-		.fifo_size	= 256,
-		.tx_loadsz	= 256,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11 |
-				  UART_FCR_T_TRIG_11,
-		.flags		= UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
-				  UART_CAP_SLEEP,
-	},
-	[PORT_LPC3220] = {
-		.name		= "LPC3220",
-		.fifo_size	= 64,
-		.tx_loadsz	= 32,
-		.fcr		= UART_FCR_DMA_SELECT | UART_FCR_ENABLE_FIFO |
-				  UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,
-		.flags		= UART_CAP_FIFO,
-	},
-	[PORT_BRCM_TRUMANAGE] = {
-		.name		= "TruManage",
-		.fifo_size	= 1,
-		.tx_loadsz	= 1024,
-		.flags		= UART_CAP_HFIFO,
-	},
-	[PORT_8250_CIR] = {
-		.name		= "CIR port"
-	},
-	[PORT_ALTR_16550_F32] = {
-		.name		= "Altera 16550 FIFO32",
-		.fifo_size	= 32,
-		.tx_loadsz	= 32,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-		.flags		= UART_CAP_FIFO | UART_CAP_AFE,
-	},
-	[PORT_ALTR_16550_F64] = {
-		.name		= "Altera 16550 FIFO64",
-		.fifo_size	= 64,
-		.tx_loadsz	= 64,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-		.flags		= UART_CAP_FIFO | UART_CAP_AFE,
-	},
-	[PORT_ALTR_16550_F128] = {
-		.name		= "Altera 16550 FIFO128",
-		.fifo_size	= 128,
-		.tx_loadsz	= 128,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-		.flags		= UART_CAP_FIFO | UART_CAP_AFE,
-	},
-/* tx_loadsz is set to 63-bytes instead of 64-bytes to implement
-workaround of errata A-008006 which states that tx_loadsz should  be
-configured less than Maximum supported fifo bytes */
-	[PORT_16550A_FSL64] = {
-		.name		= "16550A_FSL64",
-		.fifo_size	= 64,
-		.tx_loadsz	= 63,
-		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
-				  UART_FCR7_64BYTE,
-		.flags		= UART_CAP_FIFO,
-	},
-};
-
-/* Uart divisor latch read */
-static int default_serial_dl_read(struct uart_8250_port *up)
-{
-	return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8;
-}
-
-/* Uart divisor latch write */
-static void default_serial_dl_write(struct uart_8250_port *up, int value)
-{
-	serial_out(up, UART_DLL, value & 0xff);
-	serial_out(up, UART_DLM, value >> 8 & 0xff);
-}
-
-#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
-
-/* Au1x00/RT288x UART hardware has a weird register layout */
-static const s8 au_io_in_map[8] = {
-	 0,	/* UART_RX  */
-	 2,	/* UART_IER */
-	 3,	/* UART_IIR */
-	 5,	/* UART_LCR */
-	 6,	/* UART_MCR */
-	 7,	/* UART_LSR */
-	 8,	/* UART_MSR */
-	-1,	/* UART_SCR (unmapped) */
-};
-
-static const s8 au_io_out_map[8] = {
-	 1,	/* UART_TX  */
-	 2,	/* UART_IER */
-	 4,	/* UART_FCR */
-	 5,	/* UART_LCR */
-	 6,	/* UART_MCR */
-	-1,	/* UART_LSR (unmapped) */
-	-1,	/* UART_MSR (unmapped) */
-	-1,	/* UART_SCR (unmapped) */
-};
-
-static unsigned int au_serial_in(struct uart_port *p, int offset)
-{
-	if (offset >= ARRAY_SIZE(au_io_in_map))
-		return UINT_MAX;
-	offset = au_io_in_map[offset];
-	if (offset < 0)
-		return UINT_MAX;
-	return __raw_readl(p->membase + (offset << p->regshift));
-}
-
-static void au_serial_out(struct uart_port *p, int offset, int value)
-{
-	if (offset >= ARRAY_SIZE(au_io_out_map))
-		return;
-	offset = au_io_out_map[offset];
-	if (offset < 0)
-		return;
-	__raw_writel(value, p->membase + (offset << p->regshift));
-}
-
-/* Au1x00 haven't got a standard divisor latch */
-static int au_serial_dl_read(struct uart_8250_port *up)
-{
-	return __raw_readl(up->port.membase + 0x28);
-}
-
-static void au_serial_dl_write(struct uart_8250_port *up, int value)
-{
-	__raw_writel(value, up->port.membase + 0x28);
-}
-
-#endif
-
-static unsigned int hub6_serial_in(struct uart_port *p, int offset)
-{
-	offset = offset << p->regshift;
-	outb(p->hub6 - 1 + offset, p->iobase);
-	return inb(p->iobase + 1);
-}
-
-static void hub6_serial_out(struct uart_port *p, int offset, int value)
-{
-	offset = offset << p->regshift;
-	outb(p->hub6 - 1 + offset, p->iobase);
-	outb(value, p->iobase + 1);
-}
-
-static unsigned int mem_serial_in(struct uart_port *p, int offset)
-{
-	offset = offset << p->regshift;
-	return readb(p->membase + offset);
-}
-
-static void mem_serial_out(struct uart_port *p, int offset, int value)
-{
-	offset = offset << p->regshift;
-	writeb(value, p->membase + offset);
-}
-
-static void mem32_serial_out(struct uart_port *p, int offset, int value)
-{
-	offset = offset << p->regshift;
-	writel(value, p->membase + offset);
-}
-
-static unsigned int mem32_serial_in(struct uart_port *p, int offset)
-{
-	offset = offset << p->regshift;
-	return readl(p->membase + offset);
-}
-
-static void mem32be_serial_out(struct uart_port *p, int offset, int value)
-{
-	offset = offset << p->regshift;
-	iowrite32be(value, p->membase + offset);
-}
-
-static unsigned int mem32be_serial_in(struct uart_port *p, int offset)
-{
-	offset = offset << p->regshift;
-	return ioread32be(p->membase + offset);
-}
-
-static unsigned int io_serial_in(struct uart_port *p, int offset)
-{
-	offset = offset << p->regshift;
-	return inb(p->iobase + offset);
-}
-
-static void io_serial_out(struct uart_port *p, int offset, int value)
-{
-	offset = offset << p->regshift;
-	outb(value, p->iobase + offset);
-}
-
-static int serial8250_default_handle_irq(struct uart_port *port);
-static int exar_handle_irq(struct uart_port *port);
-
-static void set_io_from_upio(struct uart_port *p)
-{
-	struct uart_8250_port *up = up_to_u8250p(p);
-
-	up->dl_read = default_serial_dl_read;
-	up->dl_write = default_serial_dl_write;
-
-	switch (p->iotype) {
-	case UPIO_HUB6:
-		p->serial_in = hub6_serial_in;
-		p->serial_out = hub6_serial_out;
-		break;
-
-	case UPIO_MEM:
-		p->serial_in = mem_serial_in;
-		p->serial_out = mem_serial_out;
-		break;
-
-	case UPIO_MEM32:
-		p->serial_in = mem32_serial_in;
-		p->serial_out = mem32_serial_out;
-		break;
-
-	case UPIO_MEM32BE:
-		p->serial_in = mem32be_serial_in;
-		p->serial_out = mem32be_serial_out;
-		break;
-
-#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
-	case UPIO_AU:
-		p->serial_in = au_serial_in;
-		p->serial_out = au_serial_out;
-		up->dl_read = au_serial_dl_read;
-		up->dl_write = au_serial_dl_write;
-		break;
-#endif
-
-	default:
-		p->serial_in = io_serial_in;
-		p->serial_out = io_serial_out;
-		break;
-	}
-	/* Remember loaded iotype */
-	up->cur_iotype = p->iotype;
-	p->handle_irq = serial8250_default_handle_irq;
-}
-
-static void
-serial_port_out_sync(struct uart_port *p, int offset, int value)
-{
-	switch (p->iotype) {
-	case UPIO_MEM:
-	case UPIO_MEM32:
-	case UPIO_MEM32BE:
-	case UPIO_AU:
-		p->serial_out(p, offset, value);
-		p->serial_in(p, UART_LCR);	/* safe, no side-effects */
-		break;
-	default:
-		p->serial_out(p, offset, value);
-	}
-}
-
-/*
- * For the 16C950
- */
-static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
-{
-	serial_out(up, UART_SCR, offset);
-	serial_out(up, UART_ICR, value);
-}
-
-static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
-{
-	unsigned int value;
-
-	serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
-	serial_out(up, UART_SCR, offset);
-	value = serial_in(up, UART_ICR);
-	serial_icr_write(up, UART_ACR, up->acr);
-
-	return value;
-}
-
-/*
- * FIFO support.
- */
-static void serial8250_clear_fifos(struct uart_8250_port *p)
-{
-	if (p->capabilities & UART_CAP_FIFO) {
-		serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO);
-		serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO |
-			       UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
-		serial_out(p, UART_FCR, 0);
-	}
-}
-
-void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p)
-{
-	serial8250_clear_fifos(p);
-	serial_out(p, UART_FCR, p->fcr);
-}
-EXPORT_SYMBOL_GPL(serial8250_clear_and_reinit_fifos);
-
-void serial8250_rpm_get(struct uart_8250_port *p)
-{
-	if (!(p->capabilities & UART_CAP_RPM))
-		return;
-	pm_runtime_get_sync(p->port.dev);
-}
-EXPORT_SYMBOL_GPL(serial8250_rpm_get);
-
-void serial8250_rpm_put(struct uart_8250_port *p)
-{
-	if (!(p->capabilities & UART_CAP_RPM))
-		return;
-	pm_runtime_mark_last_busy(p->port.dev);
-	pm_runtime_put_autosuspend(p->port.dev);
-}
-EXPORT_SYMBOL_GPL(serial8250_rpm_put);
-
-/*
- * These two wrappers ensure that enable_runtime_pm_tx() can be called more than
- * once and disable_runtime_pm_tx() will still disable RPM because the fifo is
- * empty and the HW can idle again.
- */
-static void serial8250_rpm_get_tx(struct uart_8250_port *p)
-{
-	unsigned char rpm_active;
-
-	if (!(p->capabilities & UART_CAP_RPM))
-		return;
-
-	rpm_active = xchg(&p->rpm_tx_active, 1);
-	if (rpm_active)
-		return;
-	pm_runtime_get_sync(p->port.dev);
-}
-
-static void serial8250_rpm_put_tx(struct uart_8250_port *p)
-{
-	unsigned char rpm_active;
-
-	if (!(p->capabilities & UART_CAP_RPM))
-		return;
-
-	rpm_active = xchg(&p->rpm_tx_active, 0);
-	if (!rpm_active)
-		return;
-	pm_runtime_mark_last_busy(p->port.dev);
-	pm_runtime_put_autosuspend(p->port.dev);
-}
-
-/*
- * IER sleep support.  UARTs which have EFRs need the "extended
- * capability" bit enabled.  Note that on XR16C850s, we need to
- * reset LCR to write to IER.
- */
-static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
-{
-	unsigned char lcr = 0, efr = 0;
-	/*
-	 * Exar UARTs have a SLEEP register that enables or disables
-	 * each UART to enter sleep mode separately.  On the XR17V35x the
-	 * register is accessible to each UART at the UART_EXAR_SLEEP
-	 * offset but the UART channel may only write to the corresponding
-	 * bit.
-	 */
-	serial8250_rpm_get(p);
-	if ((p->port.type == PORT_XR17V35X) ||
-	   (p->port.type == PORT_XR17D15X)) {
-		serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0);
-		goto out;
-	}
-
-	if (p->capabilities & UART_CAP_SLEEP) {
-		if (p->capabilities & UART_CAP_EFR) {
-			lcr = serial_in(p, UART_LCR);
-			efr = serial_in(p, UART_EFR);
-			serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
-			serial_out(p, UART_EFR, UART_EFR_ECB);
-			serial_out(p, UART_LCR, 0);
-		}
-		serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
-		if (p->capabilities & UART_CAP_EFR) {
-			serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
-			serial_out(p, UART_EFR, efr);
-			serial_out(p, UART_LCR, lcr);
-		}
-	}
-out:
-	serial8250_rpm_put(p);
-}
-
-#ifdef CONFIG_SERIAL_8250_RSA
-/*
- * Attempts to turn on the RSA FIFO.  Returns zero on failure.
- * We set the port uart clock rate if we succeed.
- */
-static int __enable_rsa(struct uart_8250_port *up)
-{
-	unsigned char mode;
-	int result;
-
-	mode = serial_in(up, UART_RSA_MSR);
-	result = mode & UART_RSA_MSR_FIFO;
-
-	if (!result) {
-		serial_out(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO);
-		mode = serial_in(up, UART_RSA_MSR);
-		result = mode & UART_RSA_MSR_FIFO;
-	}
-
-	if (result)
-		up->port.uartclk = SERIAL_RSA_BAUD_BASE * 16;
-
-	return result;
-}
-
-static void enable_rsa(struct uart_8250_port *up)
-{
-	if (up->port.type == PORT_RSA) {
-		if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
-			spin_lock_irq(&up->port.lock);
-			__enable_rsa(up);
-			spin_unlock_irq(&up->port.lock);
-		}
-		if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
-			serial_out(up, UART_RSA_FRR, 0);
-	}
-}
-
-/*
- * Attempts to turn off the RSA FIFO.  Returns zero on failure.
- * It is unknown why interrupts were disabled in here.  However,
- * the caller is expected to preserve this behaviour by grabbing
- * the spinlock before calling this function.
- */
-static void disable_rsa(struct uart_8250_port *up)
-{
-	unsigned char mode;
-	int result;
-
-	if (up->port.type == PORT_RSA &&
-	    up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
-		spin_lock_irq(&up->port.lock);
-
-		mode = serial_in(up, UART_RSA_MSR);
-		result = !(mode & UART_RSA_MSR_FIFO);
-
-		if (!result) {
-			serial_out(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
-			mode = serial_in(up, UART_RSA_MSR);
-			result = !(mode & UART_RSA_MSR_FIFO);
-		}
-
-		if (result)
-			up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
-		spin_unlock_irq(&up->port.lock);
-	}
-}
-#endif /* CONFIG_SERIAL_8250_RSA */
-
-/*
- * This is a quickie test to see how big the FIFO is.
- * It doesn't work at all the time, more's the pity.
- */
-static int size_fifo(struct uart_8250_port *up)
-{
-	unsigned char old_fcr, old_mcr, old_lcr;
-	unsigned short old_dl;
-	int count;
-
-	old_lcr = serial_in(up, UART_LCR);
-	serial_out(up, UART_LCR, 0);
-	old_fcr = serial_in(up, UART_FCR);
-	old_mcr = serial_in(up, UART_MCR);
-	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
-		    UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
-	serial_out(up, UART_MCR, UART_MCR_LOOP);
-	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
-	old_dl = serial_dl_read(up);
-	serial_dl_write(up, 0x0001);
-	serial_out(up, UART_LCR, 0x03);
-	for (count = 0; count < 256; count++)
-		serial_out(up, UART_TX, count);
-	mdelay(20);/* FIXME - schedule_timeout */
-	for (count = 0; (serial_in(up, UART_LSR) & UART_LSR_DR) &&
-	     (count < 256); count++)
-		serial_in(up, UART_RX);
-	serial_out(up, UART_FCR, old_fcr);
-	serial_out(up, UART_MCR, old_mcr);
-	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
-	serial_dl_write(up, old_dl);
-	serial_out(up, UART_LCR, old_lcr);
-
-	return count;
-}
-
-/*
- * Read UART ID using the divisor method - set DLL and DLM to zero
- * and the revision will be in DLL and device type in DLM.  We
- * preserve the device state across this.
- */
-static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
-{
-	unsigned char old_dll, old_dlm, old_lcr;
-	unsigned int id;
-
-	old_lcr = serial_in(p, UART_LCR);
-	serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
-
-	old_dll = serial_in(p, UART_DLL);
-	old_dlm = serial_in(p, UART_DLM);
-
-	serial_out(p, UART_DLL, 0);
-	serial_out(p, UART_DLM, 0);
-
-	id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
-
-	serial_out(p, UART_DLL, old_dll);
-	serial_out(p, UART_DLM, old_dlm);
-	serial_out(p, UART_LCR, old_lcr);
-
-	return id;
-}
-
-/*
- * This is a helper routine to autodetect StarTech/Exar/Oxsemi UART's.
- * When this function is called we know it is at least a StarTech
- * 16650 V2, but it might be one of several StarTech UARTs, or one of
- * its clones.  (We treat the broken original StarTech 16650 V1 as a
- * 16550, and why not?  Startech doesn't seem to even acknowledge its
- * existence.)
- *
- * What evil have men's minds wrought...
- */
-static void autoconfig_has_efr(struct uart_8250_port *up)
-{
-	unsigned int id1, id2, id3, rev;
-
-	/*
-	 * Everything with an EFR has SLEEP
-	 */
-	up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
-
-	/*
-	 * First we check to see if it's an Oxford Semiconductor UART.
-	 *
-	 * If we have to do this here because some non-National
-	 * Semiconductor clone chips lock up if you try writing to the
-	 * LSR register (which serial_icr_read does)
-	 */
-
-	/*
-	 * Check for Oxford Semiconductor 16C950.
-	 *
-	 * EFR [4] must be set else this test fails.
-	 *
-	 * This shouldn't be necessary, but Mike Hudson (Exoray@isys.ca)
-	 * claims that it's needed for 952 dual UART's (which are not
-	 * recommended for new designs).
-	 */
-	up->acr = 0;
-	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
-	serial_out(up, UART_EFR, UART_EFR_ECB);
-	serial_out(up, UART_LCR, 0x00);
-	id1 = serial_icr_read(up, UART_ID1);
-	id2 = serial_icr_read(up, UART_ID2);
-	id3 = serial_icr_read(up, UART_ID3);
-	rev = serial_icr_read(up, UART_REV);
-
-	DEBUG_AUTOCONF("950id=%02x:%02x:%02x:%02x ", id1, id2, id3, rev);
-
-	if (id1 == 0x16 && id2 == 0xC9 &&
-	    (id3 == 0x50 || id3 == 0x52 || id3 == 0x54)) {
-		up->port.type = PORT_16C950;
-
-		/*
-		 * Enable work around for the Oxford Semiconductor 952 rev B
-		 * chip which causes it to seriously miscalculate baud rates
-		 * when DLL is 0.
-		 */
-		if (id3 == 0x52 && rev == 0x01)
-			up->bugs |= UART_BUG_QUOT;
-		return;
-	}
-
-	/*
-	 * We check for a XR16C850 by setting DLL and DLM to 0, and then
-	 * reading back DLL and DLM.  The chip type depends on the DLM
-	 * value read back:
-	 *  0x10 - XR16C850 and the DLL contains the chip revision.
-	 *  0x12 - XR16C2850.
-	 *  0x14 - XR16C854.
-	 */
-	id1 = autoconfig_read_divisor_id(up);
-	DEBUG_AUTOCONF("850id=%04x ", id1);
-
-	id2 = id1 >> 8;
-	if (id2 == 0x10 || id2 == 0x12 || id2 == 0x14) {
-		up->port.type = PORT_16850;
-		return;
-	}
-
-	/*
-	 * It wasn't an XR16C850.
-	 *
-	 * We distinguish between the '654 and the '650 by counting
-	 * how many bytes are in the FIFO.  I'm using this for now,
-	 * since that's the technique that was sent to me in the
-	 * serial driver update, but I'm not convinced this works.
-	 * I've had problems doing this in the past.  -TYT
-	 */
-	if (size_fifo(up) == 64)
-		up->port.type = PORT_16654;
-	else
-		up->port.type = PORT_16650V2;
-}
-
-/*
- * We detected a chip without a FIFO.  Only two fall into
- * this category - the original 8250 and the 16450.  The
- * 16450 has a scratch register (accessible with LCR=0)
- */
-static void autoconfig_8250(struct uart_8250_port *up)
-{
-	unsigned char scratch, status1, status2;
-
-	up->port.type = PORT_8250;
-
-	scratch = serial_in(up, UART_SCR);
-	serial_out(up, UART_SCR, 0xa5);
-	status1 = serial_in(up, UART_SCR);
-	serial_out(up, UART_SCR, 0x5a);
-	status2 = serial_in(up, UART_SCR);
-	serial_out(up, UART_SCR, scratch);
-
-	if (status1 == 0xa5 && status2 == 0x5a)
-		up->port.type = PORT_16450;
-}
-
-static int broken_efr(struct uart_8250_port *up)
-{
-	/*
-	 * Exar ST16C2550 "A2" devices incorrectly detect as
-	 * having an EFR, and report an ID of 0x0201.  See
-	 * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html
-	 */
-	if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16)
-		return 1;
-
-	return 0;
-}
-
-/*
- * We know that the chip has FIFOs.  Does it have an EFR?  The
- * EFR is located in the same register position as the IIR and
- * we know the top two bits of the IIR are currently set.  The
- * EFR should contain zero.  Try to read the EFR.
- */
-static void autoconfig_16550a(struct uart_8250_port *up)
-{
-	unsigned char status1, status2;
-	unsigned int iersave;
-
-	up->port.type = PORT_16550A;
-	up->capabilities |= UART_CAP_FIFO;
-
-	/*
-	 * XR17V35x UARTs have an extra divisor register, DLD
-	 * that gets enabled with when DLAB is set which will
-	 * cause the device to incorrectly match and assign
-	 * port type to PORT_16650.  The EFR for this UART is
-	 * found at offset 0x09. Instead check the Deice ID (DVID)
-	 * register for a 2, 4 or 8 port UART.
-	 */
-	if (up->port.flags & UPF_EXAR_EFR) {
-		status1 = serial_in(up, UART_EXAR_DVID);
-		if (status1 == 0x82 || status1 == 0x84 || status1 == 0x88) {
-			DEBUG_AUTOCONF("Exar XR17V35x ");
-			up->port.type = PORT_XR17V35X;
-			up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
-						UART_CAP_SLEEP;
-
-			return;
-		}
-
-	}
-
-	/*
-	 * Check for presence of the EFR when DLAB is set.
-	 * Only ST16C650V1 UARTs pass this test.
-	 */
-	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
-	if (serial_in(up, UART_EFR) == 0) {
-		serial_out(up, UART_EFR, 0xA8);
-		if (serial_in(up, UART_EFR) != 0) {
-			DEBUG_AUTOCONF("EFRv1 ");
-			up->port.type = PORT_16650;
-			up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
-		} else {
-			serial_out(up, UART_LCR, 0);
-			serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
-				   UART_FCR7_64BYTE);
-			status1 = serial_in(up, UART_IIR) >> 5;
-			serial_out(up, UART_FCR, 0);
-			serial_out(up, UART_LCR, 0);
-
-			if (status1 == 7)
-				up->port.type = PORT_16550A_FSL64;
-			else
-				DEBUG_AUTOCONF("Motorola 8xxx DUART ");
-		}
-		serial_out(up, UART_EFR, 0);
-		return;
-	}
-
-	/*
-	 * Maybe it requires 0xbf to be written to the LCR.
-	 * (other ST16C650V2 UARTs, TI16C752A, etc)
-	 */
-	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
-	if (serial_in(up, UART_EFR) == 0 && !broken_efr(up)) {
-		DEBUG_AUTOCONF("EFRv2 ");
-		autoconfig_has_efr(up);
-		return;
-	}
-
-	/*
-	 * Check for a National Semiconductor SuperIO chip.
-	 * Attempt to switch to bank 2, read the value of the LOOP bit
-	 * from EXCR1. Switch back to bank 0, change it in MCR. Then
-	 * switch back to bank 2, read it from EXCR1 again and check
-	 * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
-	 */
-	serial_out(up, UART_LCR, 0);
-	status1 = serial_in(up, UART_MCR);
-	serial_out(up, UART_LCR, 0xE0);
-	status2 = serial_in(up, 0x02); /* EXCR1 */
-
-	if (!((status2 ^ status1) & UART_MCR_LOOP)) {
-		serial_out(up, UART_LCR, 0);
-		serial_out(up, UART_MCR, status1 ^ UART_MCR_LOOP);
-		serial_out(up, UART_LCR, 0xE0);
-		status2 = serial_in(up, 0x02); /* EXCR1 */
-		serial_out(up, UART_LCR, 0);
-		serial_out(up, UART_MCR, status1);
-
-		if ((status2 ^ status1) & UART_MCR_LOOP) {
-			unsigned short quot;
-
-			serial_out(up, UART_LCR, 0xE0);
-
-			quot = serial_dl_read(up);
-			quot <<= 3;
-
-			if (ns16550a_goto_highspeed(up))
-				serial_dl_write(up, quot);
-
-			serial_out(up, UART_LCR, 0);
-
-			up->port.uartclk = 921600*16;
-			up->port.type = PORT_NS16550A;
-			up->capabilities |= UART_NATSEMI;
-			return;
-		}
-	}
-
-	/*
-	 * No EFR.  Try to detect a TI16750, which only sets bit 5 of
-	 * the IIR when 64 byte FIFO mode is enabled when DLAB is set.
-	 * Try setting it with and without DLAB set.  Cheap clones
-	 * set bit 5 without DLAB set.
-	 */
-	serial_out(up, UART_LCR, 0);
-	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
-	status1 = serial_in(up, UART_IIR) >> 5;
-	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
-	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
-	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
-	status2 = serial_in(up, UART_IIR) >> 5;
-	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
-	serial_out(up, UART_LCR, 0);
-
-	DEBUG_AUTOCONF("iir1=%d iir2=%d ", status1, status2);
-
-	if (status1 == 6 && status2 == 7) {
-		up->port.type = PORT_16750;
-		up->capabilities |= UART_CAP_AFE | UART_CAP_SLEEP;
-		return;
-	}
-
-	/*
-	 * Try writing and reading the UART_IER_UUE bit (b6).
-	 * If it works, this is probably one of the Xscale platform's
-	 * internal UARTs.
-	 * We're going to explicitly set the UUE bit to 0 before
-	 * trying to write and read a 1 just to make sure it's not
-	 * already a 1 and maybe locked there before we even start start.
-	 */
-	iersave = serial_in(up, UART_IER);
-	serial_out(up, UART_IER, iersave & ~UART_IER_UUE);
-	if (!(serial_in(up, UART_IER) & UART_IER_UUE)) {
-		/*
-		 * OK it's in a known zero state, try writing and reading
-		 * without disturbing the current state of the other bits.
-		 */
-		serial_out(up, UART_IER, iersave | UART_IER_UUE);
-		if (serial_in(up, UART_IER) & UART_IER_UUE) {
-			/*
-			 * It's an Xscale.
-			 * We'll leave the UART_IER_UUE bit set to 1 (enabled).
-			 */
-			DEBUG_AUTOCONF("Xscale ");
-			up->port.type = PORT_XSCALE;
-			up->capabilities |= UART_CAP_UUE | UART_CAP_RTOIE;
-			return;
-		}
-	} else {
-		/*
-		 * If we got here we couldn't force the IER_UUE bit to 0.
-		 * Log it and continue.
-		 */
-		DEBUG_AUTOCONF("Couldn't force IER_UUE to 0 ");
-	}
-	serial_out(up, UART_IER, iersave);
-
-	/*
-	 * Exar uarts have EFR in a weird location
-	 */
-	if (up->port.flags & UPF_EXAR_EFR) {
-		DEBUG_AUTOCONF("Exar XR17D15x ");
-		up->port.type = PORT_XR17D15X;
-		up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
-				    UART_CAP_SLEEP;
-
-		return;
-	}
-
-	/*
-	 * We distinguish between 16550A and U6 16550A by counting
-	 * how many bytes are in the FIFO.
-	 */
-	if (up->port.type == PORT_16550A && size_fifo(up) == 64) {
-		up->port.type = PORT_U6_16550A;
-		up->capabilities |= UART_CAP_AFE;
-	}
-}
-
-/*
- * This routine is called by rs_init() to initialize a specific serial
- * port.  It determines what type of UART chip this serial port is
- * using: 8250, 16450, 16550, 16550A.  The important question is
- * whether or not this UART is a 16550A or not, since this will
- * determine whether or not we can use its FIFO features or not.
- */
-static void autoconfig(struct uart_8250_port *up)
-{
-	unsigned char status1, scratch, scratch2, scratch3;
-	unsigned char save_lcr, save_mcr;
-	struct uart_port *port = &up->port;
-	unsigned long flags;
-	unsigned int old_capabilities;
-
-	if (!port->iobase && !port->mapbase && !port->membase)
-		return;
-
-	DEBUG_AUTOCONF("ttyS%d: autoconf (0x%04lx, 0x%p): ",
-		       serial_index(port), port->iobase, port->membase);
-
-	/*
-	 * We really do need global IRQs disabled here - we're going to
-	 * be frobbing the chips IRQ enable register to see if it exists.
-	 */
-	spin_lock_irqsave(&port->lock, flags);
-
-	up->capabilities = 0;
-	up->bugs = 0;
-
-	if (!(port->flags & UPF_BUGGY_UART)) {
-		/*
-		 * Do a simple existence test first; if we fail this,
-		 * there's no point trying anything else.
-		 *
-		 * 0x80 is used as a nonsense port to prevent against
-		 * false positives due to ISA bus float.  The
-		 * assumption is that 0x80 is a non-existent port;
-		 * which should be safe since include/asm/io.h also
-		 * makes this assumption.
-		 *
-		 * Note: this is safe as long as MCR bit 4 is clear
-		 * and the device is in "PC" mode.
-		 */
-		scratch = serial_in(up, UART_IER);
-		serial_out(up, UART_IER, 0);
-#ifdef __i386__
-		outb(0xff, 0x080);
-#endif
-		/*
-		 * Mask out IER[7:4] bits for test as some UARTs (e.g. TL
-		 * 16C754B) allow only to modify them if an EFR bit is set.
-		 */
-		scratch2 = serial_in(up, UART_IER) & 0x0f;
-		serial_out(up, UART_IER, 0x0F);
-#ifdef __i386__
-		outb(0, 0x080);
-#endif
-		scratch3 = serial_in(up, UART_IER) & 0x0f;
-		serial_out(up, UART_IER, scratch);
-		if (scratch2 != 0 || scratch3 != 0x0F) {
-			/*
-			 * We failed; there's nothing here
-			 */
-			spin_unlock_irqrestore(&port->lock, flags);
-			DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
-				       scratch2, scratch3);
-			goto out;
-		}
-	}
-
-	save_mcr = serial_in(up, UART_MCR);
-	save_lcr = serial_in(up, UART_LCR);
-
-	/*
-	 * Check to see if a UART is really there.  Certain broken
-	 * internal modems based on the Rockwell chipset fail this
-	 * test, because they apparently don't implement the loopback
-	 * test mode.  So this test is skipped on the COM 1 through
-	 * COM 4 ports.  This *should* be safe, since no board
-	 * manufacturer would be stupid enough to design a board
-	 * that conflicts with COM 1-4 --- we hope!
-	 */
-	if (!(port->flags & UPF_SKIP_TEST)) {
-		serial_out(up, UART_MCR, UART_MCR_LOOP | 0x0A);
-		status1 = serial_in(up, UART_MSR) & 0xF0;
-		serial_out(up, UART_MCR, save_mcr);
-		if (status1 != 0x90) {
-			spin_unlock_irqrestore(&port->lock, flags);
-			DEBUG_AUTOCONF("LOOP test failed (%02x) ",
-				       status1);
-			goto out;
-		}
-	}
-
-	/*
-	 * We're pretty sure there's a port here.  Lets find out what
-	 * type of port it is.  The IIR top two bits allows us to find
-	 * out if it's 8250 or 16450, 16550, 16550A or later.  This
-	 * determines what we test for next.
-	 *
-	 * We also initialise the EFR (if any) to zero for later.  The
-	 * EFR occupies the same register location as the FCR and IIR.
-	 */
-	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
-	serial_out(up, UART_EFR, 0);
-	serial_out(up, UART_LCR, 0);
-
-	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
-	scratch = serial_in(up, UART_IIR) >> 6;
-
-	switch (scratch) {
-	case 0:
-		autoconfig_8250(up);
-		break;
-	case 1:
-		port->type = PORT_UNKNOWN;
-		break;
-	case 2:
-		port->type = PORT_16550;
-		break;
-	case 3:
-		autoconfig_16550a(up);
-		break;
-	}
-
-#ifdef CONFIG_SERIAL_8250_RSA
-	/*
-	 * Only probe for RSA ports if we got the region.
-	 */
-	if (port->type == PORT_16550A && up->probe & UART_PROBE_RSA &&
-	    __enable_rsa(up))
-		port->type = PORT_RSA;
-#endif
-
-	serial_out(up, UART_LCR, save_lcr);
-
-	port->fifosize = uart_config[up->port.type].fifo_size;
-	old_capabilities = up->capabilities;
-	up->capabilities = uart_config[port->type].flags;
-	up->tx_loadsz = uart_config[port->type].tx_loadsz;
-
-	if (port->type == PORT_UNKNOWN)
-		goto out_lock;
-
-	/*
-	 * Reset the UART.
-	 */
-#ifdef CONFIG_SERIAL_8250_RSA
-	if (port->type == PORT_RSA)
-		serial_out(up, UART_RSA_FRR, 0);
-#endif
-	serial_out(up, UART_MCR, save_mcr);
-	serial8250_clear_fifos(up);
-	serial_in(up, UART_RX);
-	if (up->capabilities & UART_CAP_UUE)
-		serial_out(up, UART_IER, UART_IER_UUE);
-	else
-		serial_out(up, UART_IER, 0);
-
-out_lock:
-	spin_unlock_irqrestore(&port->lock, flags);
-	if (up->capabilities != old_capabilities) {
-		printk(KERN_WARNING
-		       "ttyS%d: detected caps %08x should be %08x\n",
-		       serial_index(port), old_capabilities,
-		       up->capabilities);
-	}
-out:
-	DEBUG_AUTOCONF("iir=%d ", scratch);
-	DEBUG_AUTOCONF("type=%s\n", uart_config[port->type].name);
-}
-
-static void autoconfig_irq(struct uart_8250_port *up)
-{
-	struct uart_port *port = &up->port;
-	unsigned char save_mcr, save_ier;
-	unsigned char save_ICP = 0;
-	unsigned int ICP = 0;
-	unsigned long irqs;
-	int irq;
-
-	if (port->flags & UPF_FOURPORT) {
-		ICP = (port->iobase & 0xfe0) | 0x1f;
-		save_ICP = inb_p(ICP);
-		outb_p(0x80, ICP);
-		inb_p(ICP);
-	}
-
-	/* forget possible initially masked and pending IRQ */
-	probe_irq_off(probe_irq_on());
-	save_mcr = serial_in(up, UART_MCR);
-	save_ier = serial_in(up, UART_IER);
-	serial_out(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2);
-
-	irqs = probe_irq_on();
-	serial_out(up, UART_MCR, 0);
-	udelay(10);
-	if (port->flags & UPF_FOURPORT) {
-		serial_out(up, UART_MCR,
-			    UART_MCR_DTR | UART_MCR_RTS);
-	} else {
-		serial_out(up, UART_MCR,
-			    UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
-	}
-	serial_out(up, UART_IER, 0x0f);	/* enable all intrs */
-	serial_in(up, UART_LSR);
-	serial_in(up, UART_RX);
-	serial_in(up, UART_IIR);
-	serial_in(up, UART_MSR);
-	serial_out(up, UART_TX, 0xFF);
-	udelay(20);
-	irq = probe_irq_off(irqs);
-
-	serial_out(up, UART_MCR, save_mcr);
-	serial_out(up, UART_IER, save_ier);
-
-	if (port->flags & UPF_FOURPORT)
-		outb_p(save_ICP, ICP);
-
-	port->irq = (irq > 0) ? irq : 0;
-}
-
-static inline void __stop_tx(struct uart_8250_port *p)
-{
-	if (p->ier & UART_IER_THRI) {
-		p->ier &= ~UART_IER_THRI;
-		serial_out(p, UART_IER, p->ier);
-		serial8250_rpm_put_tx(p);
-	}
-}
-
-static void serial8250_stop_tx(struct uart_port *port)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-
-	serial8250_rpm_get(up);
-	__stop_tx(up);
-
-	/*
-	 * We really want to stop the transmitter from sending.
-	 */
-	if (port->type == PORT_16C950) {
-		up->acr |= UART_ACR_TXDIS;
-		serial_icr_write(up, UART_ACR, up->acr);
-	}
-	serial8250_rpm_put(up);
-}
-
-static void serial8250_start_tx(struct uart_port *port)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-
-	serial8250_rpm_get_tx(up);
-
-	if (up->dma && !up->dma->tx_dma(up))
-		return;
-
-	if (!(up->ier & UART_IER_THRI)) {
-		up->ier |= UART_IER_THRI;
-		serial_port_out(port, UART_IER, up->ier);
-
-		if (up->bugs & UART_BUG_TXEN) {
-			unsigned char lsr;
-			lsr = serial_in(up, UART_LSR);
-			up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
-			if (lsr & UART_LSR_THRE)
-				serial8250_tx_chars(up);
-		}
-	}
-
-	/*
-	 * Re-enable the transmitter if we disabled it.
-	 */
-	if (port->type == PORT_16C950 && up->acr & UART_ACR_TXDIS) {
-		up->acr &= ~UART_ACR_TXDIS;
-		serial_icr_write(up, UART_ACR, up->acr);
-	}
-}
-
-static void serial8250_throttle(struct uart_port *port)
-{
-	port->throttle(port);
-}
-
-static void serial8250_unthrottle(struct uart_port *port)
-{
-	port->unthrottle(port);
-}
-
-static void serial8250_stop_rx(struct uart_port *port)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-
-	serial8250_rpm_get(up);
-
-	up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
-	up->port.read_status_mask &= ~UART_LSR_DR;
-	serial_port_out(port, UART_IER, up->ier);
-
-	serial8250_rpm_put(up);
-}
-
-static void serial8250_disable_ms(struct uart_port *port)
-{
-	struct uart_8250_port *up =
-		container_of(port, struct uart_8250_port, port);
-
-	/* no MSR capabilities */
-	if (up->bugs & UART_BUG_NOMSR)
-		return;
-
-	up->ier &= ~UART_IER_MSI;
-	serial_port_out(port, UART_IER, up->ier);
-}
-
-static void serial8250_enable_ms(struct uart_port *port)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-
-	/* no MSR capabilities */
-	if (up->bugs & UART_BUG_NOMSR)
-		return;
-
-	up->ier |= UART_IER_MSI;
-
-	serial8250_rpm_get(up);
-	serial_port_out(port, UART_IER, up->ier);
-	serial8250_rpm_put(up);
-}
-
-/*
- * serial8250_rx_chars: processes according to the passed in LSR
- * value, and returns the remaining LSR bits not handled
- * by this Rx routine.
- */
-unsigned char
-serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
-{
-	struct uart_port *port = &up->port;
-	unsigned char ch;
-	int max_count = 256;
-	char flag;
-
-	do {
-		if (likely(lsr & UART_LSR_DR))
-			ch = serial_in(up, UART_RX);
-		else
-			/*
-			 * Intel 82571 has a Serial Over Lan device that will
-			 * set UART_LSR_BI without setting UART_LSR_DR when
-			 * it receives a break. To avoid reading from the
-			 * receive buffer without UART_LSR_DR bit set, we
-			 * just force the read character to be 0
-			 */
-			ch = 0;
-
-		flag = TTY_NORMAL;
-		port->icount.rx++;
-
-		lsr |= up->lsr_saved_flags;
-		up->lsr_saved_flags = 0;
-
-		if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
-			if (lsr & UART_LSR_BI) {
-				lsr &= ~(UART_LSR_FE | UART_LSR_PE);
-				port->icount.brk++;
-				/*
-				 * We do the SysRQ and SAK checking
-				 * here because otherwise the break
-				 * may get masked by ignore_status_mask
-				 * or read_status_mask.
-				 */
-				if (uart_handle_break(port))
-					goto ignore_char;
-			} else if (lsr & UART_LSR_PE)
-				port->icount.parity++;
-			else if (lsr & UART_LSR_FE)
-				port->icount.frame++;
-			if (lsr & UART_LSR_OE)
-				port->icount.overrun++;
-
-			/*
-			 * Mask off conditions which should be ignored.
-			 */
-			lsr &= port->read_status_mask;
-
-			if (lsr & UART_LSR_BI) {
-				DEBUG_INTR("handling break....");
-				flag = TTY_BREAK;
-			} else if (lsr & UART_LSR_PE)
-				flag = TTY_PARITY;
-			else if (lsr & UART_LSR_FE)
-				flag = TTY_FRAME;
-		}
-		if (uart_handle_sysrq_char(port, ch))
-			goto ignore_char;
-
-		uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
-
-ignore_char:
-		lsr = serial_in(up, UART_LSR);
-	} while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (--max_count > 0));
-	spin_unlock(&port->lock);
-	tty_flip_buffer_push(&port->state->port);
-	spin_lock(&port->lock);
-	return lsr;
-}
-EXPORT_SYMBOL_GPL(serial8250_rx_chars);
-
-void serial8250_tx_chars(struct uart_8250_port *up)
-{
-	struct uart_port *port = &up->port;
-	struct circ_buf *xmit = &port->state->xmit;
-	int count;
-
-	if (port->x_char) {
-		serial_out(up, UART_TX, port->x_char);
-		port->icount.tx++;
-		port->x_char = 0;
-		return;
-	}
-	if (uart_tx_stopped(port)) {
-		serial8250_stop_tx(port);
-		return;
-	}
-	if (uart_circ_empty(xmit)) {
-		__stop_tx(up);
-		return;
-	}
-
-	count = up->tx_loadsz;
-	do {
-		serial_out(up, UART_TX, xmit->buf[xmit->tail]);
-		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
-		port->icount.tx++;
-		if (uart_circ_empty(xmit))
-			break;
-		if (up->capabilities & UART_CAP_HFIFO) {
-			if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) !=
-			    BOTH_EMPTY)
-				break;
-		}
-	} while (--count > 0);
-
-	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
-		uart_write_wakeup(port);
-
-	DEBUG_INTR("THRE...");
-
-	/*
-	 * With RPM enabled, we have to wait until the FIFO is empty before the
-	 * HW can go idle. So we get here once again with empty FIFO and disable
-	 * the interrupt and RPM in __stop_tx()
-	 */
-	if (uart_circ_empty(xmit) && !(up->capabilities & UART_CAP_RPM))
-		__stop_tx(up);
-}
-EXPORT_SYMBOL_GPL(serial8250_tx_chars);
-
-/* Caller holds uart port lock */
-unsigned int serial8250_modem_status(struct uart_8250_port *up)
-{
-	struct uart_port *port = &up->port;
-	unsigned int status = serial_in(up, UART_MSR);
-
-	status |= up->msr_saved_flags;
-	up->msr_saved_flags = 0;
-	if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
-	    port->state != NULL) {
-		if (status & UART_MSR_TERI)
-			port->icount.rng++;
-		if (status & UART_MSR_DDSR)
-			port->icount.dsr++;
-		if (status & UART_MSR_DDCD)
-			uart_handle_dcd_change(port, status & UART_MSR_DCD);
-		if (status & UART_MSR_DCTS)
-			uart_handle_cts_change(port, status & UART_MSR_CTS);
-
-		wake_up_interruptible(&port->state->port.delta_msr_wait);
-	}
-
-	return status;
-}
-EXPORT_SYMBOL_GPL(serial8250_modem_status);
-
-/*
- * This handles the interrupt from one port.
- */
-int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
-{
-	unsigned char status;
-	unsigned long flags;
-	struct uart_8250_port *up = up_to_u8250p(port);
-	int dma_err = 0;
-
-	if (iir & UART_IIR_NO_INT)
-		return 0;
-
-	spin_lock_irqsave(&port->lock, flags);
-
-	status = serial_port_in(port, UART_LSR);
-
-	DEBUG_INTR("status = %x...", status);
-
-	if (status & (UART_LSR_DR | UART_LSR_BI)) {
-		if (up->dma)
-			dma_err = up->dma->rx_dma(up, iir);
-
-		if (!up->dma || dma_err)
-			status = serial8250_rx_chars(up, status);
-	}
-	serial8250_modem_status(up);
-	if ((!up->dma || (up->dma && up->dma->tx_err)) &&
-	    (status & UART_LSR_THRE))
-		serial8250_tx_chars(up);
-
-	spin_unlock_irqrestore(&port->lock, flags);
-	return 1;
-}
-EXPORT_SYMBOL_GPL(serial8250_handle_irq);
-
-static int serial8250_default_handle_irq(struct uart_port *port)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-	unsigned int iir;
-	int ret;
-
-	serial8250_rpm_get(up);
-
-	iir = serial_port_in(port, UART_IIR);
-	ret = serial8250_handle_irq(port, iir);
-
-	serial8250_rpm_put(up);
-	return ret;
-}
-
-/*
- * These Exar UARTs have an extra interrupt indicator that could
- * fire for a few unimplemented interrupts.  One of which is a
- * wakeup event when coming out of sleep.  Put this here just
- * to be on the safe side that these interrupts don't go unhandled.
- */
-static int exar_handle_irq(struct uart_port *port)
-{
-	unsigned char int0, int1, int2, int3;
-	unsigned int iir = serial_port_in(port, UART_IIR);
-	int ret;
-
-	ret = serial8250_handle_irq(port, iir);
-
-	if ((port->type == PORT_XR17V35X) ||
-	   (port->type == PORT_XR17D15X)) {
-		int0 = serial_port_in(port, 0x80);
-		int1 = serial_port_in(port, 0x81);
-		int2 = serial_port_in(port, 0x82);
-		int3 = serial_port_in(port, 0x83);
-	}
-
-	return ret;
-}
-
-/*
  * This is the serial driver's interrupt routine.
  *
  * Arjan thinks the old way was overly complex, so it got simplified.
@@ -1941,876 +359,6 @@
 		serial_unlink_irq_chain(up);
 }
 
-static unsigned int serial8250_tx_empty(struct uart_port *port)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-	unsigned long flags;
-	unsigned int lsr;
-
-	serial8250_rpm_get(up);
-
-	spin_lock_irqsave(&port->lock, flags);
-	lsr = serial_port_in(port, UART_LSR);
-	up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
-	spin_unlock_irqrestore(&port->lock, flags);
-
-	serial8250_rpm_put(up);
-
-	return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
-}
-
-static unsigned int serial8250_get_mctrl(struct uart_port *port)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-	unsigned int status;
-	unsigned int ret;
-
-	serial8250_rpm_get(up);
-	status = serial8250_modem_status(up);
-	serial8250_rpm_put(up);
-
-	ret = 0;
-	if (status & UART_MSR_DCD)
-		ret |= TIOCM_CAR;
-	if (status & UART_MSR_RI)
-		ret |= TIOCM_RNG;
-	if (status & UART_MSR_DSR)
-		ret |= TIOCM_DSR;
-	if (status & UART_MSR_CTS)
-		ret |= TIOCM_CTS;
-	return ret;
-}
-
-void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-	unsigned char mcr = 0;
-
-	if (mctrl & TIOCM_RTS)
-		mcr |= UART_MCR_RTS;
-	if (mctrl & TIOCM_DTR)
-		mcr |= UART_MCR_DTR;
-	if (mctrl & TIOCM_OUT1)
-		mcr |= UART_MCR_OUT1;
-	if (mctrl & TIOCM_OUT2)
-		mcr |= UART_MCR_OUT2;
-	if (mctrl & TIOCM_LOOP)
-		mcr |= UART_MCR_LOOP;
-
-	mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
-
-	serial_port_out(port, UART_MCR, mcr);
-}
-EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl);
-
-static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
-	if (port->set_mctrl)
-		port->set_mctrl(port, mctrl);
-	else
-		serial8250_do_set_mctrl(port, mctrl);
-}
-
-static void serial8250_break_ctl(struct uart_port *port, int break_state)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-	unsigned long flags;
-
-	serial8250_rpm_get(up);
-	spin_lock_irqsave(&port->lock, flags);
-	if (break_state == -1)
-		up->lcr |= UART_LCR_SBC;
-	else
-		up->lcr &= ~UART_LCR_SBC;
-	serial_port_out(port, UART_LCR, up->lcr);
-	spin_unlock_irqrestore(&port->lock, flags);
-	serial8250_rpm_put(up);
-}
-
-/*
- *	Wait for transmitter & holding register to empty
- */
-static void wait_for_xmitr(struct uart_8250_port *up, int bits)
-{
-	unsigned int status, tmout = 10000;
-
-	/* Wait up to 10ms for the character(s) to be sent. */
-	for (;;) {
-		status = serial_in(up, UART_LSR);
-
-		up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
-
-		if ((status & bits) == bits)
-			break;
-		if (--tmout == 0)
-			break;
-		udelay(1);
-	}
-
-	/* Wait up to 1s for flow control if necessary */
-	if (up->port.flags & UPF_CONS_FLOW) {
-		unsigned int tmout;
-		for (tmout = 1000000; tmout; tmout--) {
-			unsigned int msr = serial_in(up, UART_MSR);
-			up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
-			if (msr & UART_MSR_CTS)
-				break;
-			udelay(1);
-			touch_nmi_watchdog();
-		}
-	}
-}
-
-#ifdef CONFIG_CONSOLE_POLL
-/*
- * Console polling routines for writing and reading from the uart while
- * in an interrupt or debug context.
- */
-
-static int serial8250_get_poll_char(struct uart_port *port)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-	unsigned char lsr;
-	int status;
-
-	serial8250_rpm_get(up);
-
-	lsr = serial_port_in(port, UART_LSR);
-
-	if (!(lsr & UART_LSR_DR)) {
-		status = NO_POLL_CHAR;
-		goto out;
-	}
-
-	status = serial_port_in(port, UART_RX);
-out:
-	serial8250_rpm_put(up);
-	return status;
-}
-
-
-static void serial8250_put_poll_char(struct uart_port *port,
-			 unsigned char c)
-{
-	unsigned int ier;
-	struct uart_8250_port *up = up_to_u8250p(port);
-
-	serial8250_rpm_get(up);
-	/*
-	 *	First save the IER then disable the interrupts
-	 */
-	ier = serial_port_in(port, UART_IER);
-	if (up->capabilities & UART_CAP_UUE)
-		serial_port_out(port, UART_IER, UART_IER_UUE);
-	else
-		serial_port_out(port, UART_IER, 0);
-
-	wait_for_xmitr(up, BOTH_EMPTY);
-	/*
-	 *	Send the character out.
-	 */
-	serial_port_out(port, UART_TX, c);
-
-	/*
-	 *	Finally, wait for transmitter to become empty
-	 *	and restore the IER
-	 */
-	wait_for_xmitr(up, BOTH_EMPTY);
-	serial_port_out(port, UART_IER, ier);
-	serial8250_rpm_put(up);
-}
-
-#endif /* CONFIG_CONSOLE_POLL */
-
-int serial8250_do_startup(struct uart_port *port)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-	unsigned long flags;
-	unsigned char lsr, iir;
-	int retval;
-
-	if (port->type == PORT_8250_CIR)
-		return -ENODEV;
-
-	if (!port->fifosize)
-		port->fifosize = uart_config[port->type].fifo_size;
-	if (!up->tx_loadsz)
-		up->tx_loadsz = uart_config[port->type].tx_loadsz;
-	if (!up->capabilities)
-		up->capabilities = uart_config[port->type].flags;
-	up->mcr = 0;
-
-	if (port->iotype != up->cur_iotype)
-		set_io_from_upio(port);
-
-	serial8250_rpm_get(up);
-	if (port->type == PORT_16C950) {
-		/* Wake up and initialize UART */
-		up->acr = 0;
-		serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
-		serial_port_out(port, UART_EFR, UART_EFR_ECB);
-		serial_port_out(port, UART_IER, 0);
-		serial_port_out(port, UART_LCR, 0);
-		serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
-		serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
-		serial_port_out(port, UART_EFR, UART_EFR_ECB);
-		serial_port_out(port, UART_LCR, 0);
-	}
-
-#ifdef CONFIG_SERIAL_8250_RSA
-	/*
-	 * If this is an RSA port, see if we can kick it up to the
-	 * higher speed clock.
-	 */
-	enable_rsa(up);
-#endif
-	/*
-	 * Clear the FIFO buffers and disable them.
-	 * (they will be reenabled in set_termios())
-	 */
-	serial8250_clear_fifos(up);
-
-	/*
-	 * Clear the interrupt registers.
-	 */
-	serial_port_in(port, UART_LSR);
-	serial_port_in(port, UART_RX);
-	serial_port_in(port, UART_IIR);
-	serial_port_in(port, UART_MSR);
-
-	/*
-	 * At this point, there's no way the LSR could still be 0xff;
-	 * if it is, then bail out, because there's likely no UART
-	 * here.
-	 */
-	if (!(port->flags & UPF_BUGGY_UART) &&
-	    (serial_port_in(port, UART_LSR) == 0xff)) {
-		printk_ratelimited(KERN_INFO "ttyS%d: LSR safety check engaged!\n",
-				   serial_index(port));
-		retval = -ENODEV;
-		goto out;
-	}
-
-	/*
-	 * For a XR16C850, we need to set the trigger levels
-	 */
-	if (port->type == PORT_16850) {
-		unsigned char fctr;
-
-		serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
-
-		fctr = serial_in(up, UART_FCTR) & ~(UART_FCTR_RX|UART_FCTR_TX);
-		serial_port_out(port, UART_FCTR,
-				fctr | UART_FCTR_TRGD | UART_FCTR_RX);
-		serial_port_out(port, UART_TRG, UART_TRG_96);
-		serial_port_out(port, UART_FCTR,
-				fctr | UART_FCTR_TRGD | UART_FCTR_TX);
-		serial_port_out(port, UART_TRG, UART_TRG_96);
-
-		serial_port_out(port, UART_LCR, 0);
-	}
-
-	if (port->irq) {
-		unsigned char iir1;
-		/*
-		 * Test for UARTs that do not reassert THRE when the
-		 * transmitter is idle and the interrupt has already
-		 * been cleared.  Real 16550s should always reassert
-		 * this interrupt whenever the transmitter is idle and
-		 * the interrupt is enabled.  Delays are necessary to
-		 * allow register changes to become visible.
-		 */
-		spin_lock_irqsave(&port->lock, flags);
-		if (up->port.irqflags & IRQF_SHARED)
-			disable_irq_nosync(port->irq);
-
-		wait_for_xmitr(up, UART_LSR_THRE);
-		serial_port_out_sync(port, UART_IER, UART_IER_THRI);
-		udelay(1); /* allow THRE to set */
-		iir1 = serial_port_in(port, UART_IIR);
-		serial_port_out(port, UART_IER, 0);
-		serial_port_out_sync(port, UART_IER, UART_IER_THRI);
-		udelay(1); /* allow a working UART time to re-assert THRE */
-		iir = serial_port_in(port, UART_IIR);
-		serial_port_out(port, UART_IER, 0);
-
-		if (port->irqflags & IRQF_SHARED)
-			enable_irq(port->irq);
-		spin_unlock_irqrestore(&port->lock, flags);
-
-		/*
-		 * If the interrupt is not reasserted, or we otherwise
-		 * don't trust the iir, setup a timer to kick the UART
-		 * on a regular basis.
-		 */
-		if ((!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) ||
-		    up->port.flags & UPF_BUG_THRE) {
-			up->bugs |= UART_BUG_THRE;
-		}
-	}
-
-	retval = up->ops->setup_irq(up);
-	if (retval)
-		goto out;
-
-	/*
-	 * Now, initialize the UART
-	 */
-	serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
-
-	spin_lock_irqsave(&port->lock, flags);
-	if (up->port.flags & UPF_FOURPORT) {
-		if (!up->port.irq)
-			up->port.mctrl |= TIOCM_OUT1;
-	} else
-		/*
-		 * Most PC uarts need OUT2 raised to enable interrupts.
-		 */
-		if (port->irq)
-			up->port.mctrl |= TIOCM_OUT2;
-
-	serial8250_set_mctrl(port, port->mctrl);
-
-	/* Serial over Lan (SoL) hack:
-	   Intel 8257x Gigabit ethernet chips have a
-	   16550 emulation, to be used for Serial Over Lan.
-	   Those chips take a longer time than a normal
-	   serial device to signalize that a transmission
-	   data was queued. Due to that, the above test generally
-	   fails. One solution would be to delay the reading of
-	   iir. However, this is not reliable, since the timeout
-	   is variable. So, let's just don't test if we receive
-	   TX irq. This way, we'll never enable UART_BUG_TXEN.
-	 */
-	if (up->port.flags & UPF_NO_TXEN_TEST)
-		goto dont_test_tx_en;
-
-	/*
-	 * Do a quick test to see if we receive an
-	 * interrupt when we enable the TX irq.
-	 */
-	serial_port_out(port, UART_IER, UART_IER_THRI);
-	lsr = serial_port_in(port, UART_LSR);
-	iir = serial_port_in(port, UART_IIR);
-	serial_port_out(port, UART_IER, 0);
-
-	if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
-		if (!(up->bugs & UART_BUG_TXEN)) {
-			up->bugs |= UART_BUG_TXEN;
-			pr_debug("ttyS%d - enabling bad tx status workarounds\n",
-				 serial_index(port));
-		}
-	} else {
-		up->bugs &= ~UART_BUG_TXEN;
-	}
-
-dont_test_tx_en:
-	spin_unlock_irqrestore(&port->lock, flags);
-
-	/*
-	 * Clear the interrupt registers again for luck, and clear the
-	 * saved flags to avoid getting false values from polling
-	 * routines or the previous session.
-	 */
-	serial_port_in(port, UART_LSR);
-	serial_port_in(port, UART_RX);
-	serial_port_in(port, UART_IIR);
-	serial_port_in(port, UART_MSR);
-	up->lsr_saved_flags = 0;
-	up->msr_saved_flags = 0;
-
-	/*
-	 * Request DMA channels for both RX and TX.
-	 */
-	if (up->dma) {
-		retval = serial8250_request_dma(up);
-		if (retval) {
-			pr_warn_ratelimited("ttyS%d - failed to request DMA\n",
-					    serial_index(port));
-			up->dma = NULL;
-		}
-	}
-
-	/*
-	 * Finally, enable interrupts.  Note: Modem status interrupts
-	 * are set via set_termios(), which will be occurring imminently
-	 * anyway, so we don't enable them here.
-	 */
-	up->ier = UART_IER_RLSI | UART_IER_RDI;
-	serial_port_out(port, UART_IER, up->ier);
-
-	if (port->flags & UPF_FOURPORT) {
-		unsigned int icp;
-		/*
-		 * Enable interrupts on the AST Fourport board
-		 */
-		icp = (port->iobase & 0xfe0) | 0x01f;
-		outb_p(0x80, icp);
-		inb_p(icp);
-	}
-	retval = 0;
-out:
-	serial8250_rpm_put(up);
-	return retval;
-}
-EXPORT_SYMBOL_GPL(serial8250_do_startup);
-
-static int serial8250_startup(struct uart_port *port)
-{
-	if (port->startup)
-		return port->startup(port);
-	return serial8250_do_startup(port);
-}
-
-void serial8250_do_shutdown(struct uart_port *port)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-	unsigned long flags;
-
-	serial8250_rpm_get(up);
-	/*
-	 * Disable interrupts from this port
-	 */
-	up->ier = 0;
-	serial_port_out(port, UART_IER, 0);
-
-	if (up->dma)
-		serial8250_release_dma(up);
-
-	spin_lock_irqsave(&port->lock, flags);
-	if (port->flags & UPF_FOURPORT) {
-		/* reset interrupts on the AST Fourport board */
-		inb((port->iobase & 0xfe0) | 0x1f);
-		port->mctrl |= TIOCM_OUT1;
-	} else
-		port->mctrl &= ~TIOCM_OUT2;
-
-	serial8250_set_mctrl(port, port->mctrl);
-	spin_unlock_irqrestore(&port->lock, flags);
-
-	/*
-	 * Disable break condition and FIFOs
-	 */
-	serial_port_out(port, UART_LCR,
-			serial_port_in(port, UART_LCR) & ~UART_LCR_SBC);
-	serial8250_clear_fifos(up);
-
-#ifdef CONFIG_SERIAL_8250_RSA
-	/*
-	 * Reset the RSA board back to 115kbps compat mode.
-	 */
-	disable_rsa(up);
-#endif
-
-	/*
-	 * Read data port to reset things, and then unlink from
-	 * the IRQ chain.
-	 */
-	serial_port_in(port, UART_RX);
-	serial8250_rpm_put(up);
-
-	up->ops->release_irq(up);
-}
-EXPORT_SYMBOL_GPL(serial8250_do_shutdown);
-
-static void serial8250_shutdown(struct uart_port *port)
-{
-	if (port->shutdown)
-		port->shutdown(port);
-	else
-		serial8250_do_shutdown(port);
-}
-
-/*
- * XR17V35x UARTs have an extra fractional divisor register (DLD)
- * Calculate divisor with extra 4-bit fractional portion
- */
-static unsigned int xr17v35x_get_divisor(struct uart_8250_port *up,
-					 unsigned int baud,
-					 unsigned int *frac)
-{
-	struct uart_port *port = &up->port;
-	unsigned int quot_16;
-
-	quot_16 = DIV_ROUND_CLOSEST(port->uartclk, baud);
-	*frac = quot_16 & 0x0f;
-
-	return quot_16 >> 4;
-}
-
-static unsigned int serial8250_get_divisor(struct uart_8250_port *up,
-					   unsigned int baud,
-					   unsigned int *frac)
-{
-	struct uart_port *port = &up->port;
-	unsigned int quot;
-
-	/*
-	 * Handle magic divisors for baud rates above baud_base on
-	 * SMSC SuperIO chips.
-	 *
-	 */
-	if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
-	    baud == (port->uartclk/4))
-		quot = 0x8001;
-	else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
-		 baud == (port->uartclk/8))
-		quot = 0x8002;
-	else if (up->port.type == PORT_XR17V35X)
-		quot = xr17v35x_get_divisor(up, baud, frac);
-	else
-		quot = uart_get_divisor(port, baud);
-
-	/*
-	 * Oxford Semi 952 rev B workaround
-	 */
-	if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0)
-		quot++;
-
-	return quot;
-}
-
-static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
-					    tcflag_t c_cflag)
-{
-	unsigned char cval;
-
-	switch (c_cflag & CSIZE) {
-	case CS5:
-		cval = UART_LCR_WLEN5;
-		break;
-	case CS6:
-		cval = UART_LCR_WLEN6;
-		break;
-	case CS7:
-		cval = UART_LCR_WLEN7;
-		break;
-	default:
-	case CS8:
-		cval = UART_LCR_WLEN8;
-		break;
-	}
-
-	if (c_cflag & CSTOPB)
-		cval |= UART_LCR_STOP;
-	if (c_cflag & PARENB) {
-		cval |= UART_LCR_PARITY;
-		if (up->bugs & UART_BUG_PARITY)
-			up->fifo_bug = true;
-	}
-	if (!(c_cflag & PARODD))
-		cval |= UART_LCR_EPAR;
-#ifdef CMSPAR
-	if (c_cflag & CMSPAR)
-		cval |= UART_LCR_SPAR;
-#endif
-
-	return cval;
-}
-
-static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
-			    unsigned int quot, unsigned int quot_frac)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-
-	/* Workaround to enable 115200 baud on OMAP1510 internal ports */
-	if (is_omap1510_8250(up)) {
-		if (baud == 115200) {
-			quot = 1;
-			serial_port_out(port, UART_OMAP_OSC_12M_SEL, 1);
-		} else
-			serial_port_out(port, UART_OMAP_OSC_12M_SEL, 0);
-	}
-
-	/*
-	 * For NatSemi, switch to bank 2 not bank 1, to avoid resetting EXCR2,
-	 * otherwise just set DLAB
-	 */
-	if (up->capabilities & UART_NATSEMI)
-		serial_port_out(port, UART_LCR, 0xe0);
-	else
-		serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB);
-
-	serial_dl_write(up, quot);
-
-	/* XR17V35x UARTs have an extra fractional divisor register (DLD) */
-	if (up->port.type == PORT_XR17V35X)
-		serial_port_out(port, 0x2, quot_frac);
-}
-
-void
-serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
-		          struct ktermios *old)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-	unsigned char cval;
-	unsigned long flags;
-	unsigned int baud, quot, frac = 0;
-
-	cval = serial8250_compute_lcr(up, termios->c_cflag);
-
-	/*
-	 * Ask the core to calculate the divisor for us.
-	 */
-	baud = uart_get_baud_rate(port, termios, old,
-				  port->uartclk / 16 / 0xffff,
-				  port->uartclk / 16);
-	quot = serial8250_get_divisor(up, baud, &frac);
-
-	/*
-	 * Ok, we're now changing the port state.  Do it with
-	 * interrupts disabled.
-	 */
-	serial8250_rpm_get(up);
-	spin_lock_irqsave(&port->lock, flags);
-
-	up->lcr = cval;					/* Save computed LCR */
-
-	if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) {
-		/* NOTE: If fifo_bug is not set, a user can set RX_trigger. */
-		if ((baud < 2400 && !up->dma) || up->fifo_bug) {
-			up->fcr &= ~UART_FCR_TRIGGER_MASK;
-			up->fcr |= UART_FCR_TRIGGER_1;
-		}
-	}
-
-	/*
-	 * MCR-based auto flow control.  When AFE is enabled, RTS will be
-	 * deasserted when the receive FIFO contains more characters than
-	 * the trigger, or the MCR RTS bit is cleared.  In the case where
-	 * the remote UART is not using CTS auto flow control, we must
-	 * have sufficient FIFO entries for the latency of the remote
-	 * UART to respond.  IOW, at least 32 bytes of FIFO.
-	 */
-	if (up->capabilities & UART_CAP_AFE && port->fifosize >= 32) {
-		up->mcr &= ~UART_MCR_AFE;
-		if (termios->c_cflag & CRTSCTS)
-			up->mcr |= UART_MCR_AFE;
-	}
-
-	/*
-	 * Update the per-port timeout.
-	 */
-	uart_update_timeout(port, termios->c_cflag, baud);
-
-	port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
-	if (termios->c_iflag & INPCK)
-		port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
-		port->read_status_mask |= UART_LSR_BI;
-
-	/*
-	 * Characteres to ignore
-	 */
-	port->ignore_status_mask = 0;
-	if (termios->c_iflag & IGNPAR)
-		port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
-	if (termios->c_iflag & IGNBRK) {
-		port->ignore_status_mask |= UART_LSR_BI;
-		/*
-		 * If we're ignoring parity and break indicators,
-		 * ignore overruns too (for real raw support).
-		 */
-		if (termios->c_iflag & IGNPAR)
-			port->ignore_status_mask |= UART_LSR_OE;
-	}
-
-	/*
-	 * ignore all characters if CREAD is not set
-	 */
-	if ((termios->c_cflag & CREAD) == 0)
-		port->ignore_status_mask |= UART_LSR_DR;
-
-	/*
-	 * CTS flow control flag and modem status interrupts
-	 */
-	up->ier &= ~UART_IER_MSI;
-	if (!(up->bugs & UART_BUG_NOMSR) &&
-			UART_ENABLE_MS(&up->port, termios->c_cflag))
-		up->ier |= UART_IER_MSI;
-	if (up->capabilities & UART_CAP_UUE)
-		up->ier |= UART_IER_UUE;
-	if (up->capabilities & UART_CAP_RTOIE)
-		up->ier |= UART_IER_RTOIE;
-
-	serial_port_out(port, UART_IER, up->ier);
-
-	if (up->capabilities & UART_CAP_EFR) {
-		unsigned char efr = 0;
-		/*
-		 * TI16C752/Startech hardware flow control.  FIXME:
-		 * - TI16C752 requires control thresholds to be set.
-		 * - UART_MCR_RTS is ineffective if auto-RTS mode is enabled.
-		 */
-		if (termios->c_cflag & CRTSCTS)
-			efr |= UART_EFR_CTS;
-
-		serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
-		if (port->flags & UPF_EXAR_EFR)
-			serial_port_out(port, UART_XR_EFR, efr);
-		else
-			serial_port_out(port, UART_EFR, efr);
-	}
-
-	serial8250_set_divisor(port, baud, quot, frac);
-
-	/*
-	 * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
-	 * is written without DLAB set, this mode will be disabled.
-	 */
-	if (port->type == PORT_16750)
-		serial_port_out(port, UART_FCR, up->fcr);
-
-	serial_port_out(port, UART_LCR, up->lcr);	/* reset DLAB */
-	if (port->type != PORT_16750) {
-		/* emulated UARTs (Lucent Venus 167x) need two steps */
-		if (up->fcr & UART_FCR_ENABLE_FIFO)
-			serial_port_out(port, UART_FCR, UART_FCR_ENABLE_FIFO);
-		serial_port_out(port, UART_FCR, up->fcr);	/* set fcr */
-	}
-	serial8250_set_mctrl(port, port->mctrl);
-	spin_unlock_irqrestore(&port->lock, flags);
-	serial8250_rpm_put(up);
-
-	/* Don't rewrite B0 */
-	if (tty_termios_baud_rate(termios))
-		tty_termios_encode_baud_rate(termios, baud, baud);
-}
-EXPORT_SYMBOL(serial8250_do_set_termios);
-
-static void
-serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
-		       struct ktermios *old)
-{
-	if (port->set_termios)
-		port->set_termios(port, termios, old);
-	else
-		serial8250_do_set_termios(port, termios, old);
-}
-
-static void
-serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
-{
-	if (termios->c_line == N_PPS) {
-		port->flags |= UPF_HARDPPS_CD;
-		spin_lock_irq(&port->lock);
-		serial8250_enable_ms(port);
-		spin_unlock_irq(&port->lock);
-	} else {
-		port->flags &= ~UPF_HARDPPS_CD;
-		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
-			spin_lock_irq(&port->lock);
-			serial8250_disable_ms(port);
-			spin_unlock_irq(&port->lock);
-		}
-	}
-}
-
-
-void serial8250_do_pm(struct uart_port *port, unsigned int state,
-		      unsigned int oldstate)
-{
-	struct uart_8250_port *p = up_to_u8250p(port);
-
-	serial8250_set_sleep(p, state != 0);
-}
-EXPORT_SYMBOL(serial8250_do_pm);
-
-static void
-serial8250_pm(struct uart_port *port, unsigned int state,
-	      unsigned int oldstate)
-{
-	if (port->pm)
-		port->pm(port, state, oldstate);
-	else
-		serial8250_do_pm(port, state, oldstate);
-}
-
-static unsigned int serial8250_port_size(struct uart_8250_port *pt)
-{
-	if (pt->port.mapsize)
-		return pt->port.mapsize;
-	if (pt->port.iotype == UPIO_AU) {
-		if (pt->port.type == PORT_RT2880)
-			return 0x100;
-		return 0x1000;
-	}
-	if (is_omap1_8250(pt))
-		return 0x16 << pt->port.regshift;
-
-	return 8 << pt->port.regshift;
-}
-
-/*
- * Resource handling.
- */
-static int serial8250_request_std_resource(struct uart_8250_port *up)
-{
-	unsigned int size = serial8250_port_size(up);
-	struct uart_port *port = &up->port;
-	int ret = 0;
-
-	switch (port->iotype) {
-	case UPIO_AU:
-	case UPIO_TSI:
-	case UPIO_MEM32:
-	case UPIO_MEM32BE:
-	case UPIO_MEM:
-		if (!port->mapbase)
-			break;
-
-		if (!request_mem_region(port->mapbase, size, "serial")) {
-			ret = -EBUSY;
-			break;
-		}
-
-		if (port->flags & UPF_IOREMAP) {
-			port->membase = ioremap_nocache(port->mapbase, size);
-			if (!port->membase) {
-				release_mem_region(port->mapbase, size);
-				ret = -ENOMEM;
-			}
-		}
-		break;
-
-	case UPIO_HUB6:
-	case UPIO_PORT:
-		if (!request_region(port->iobase, size, "serial"))
-			ret = -EBUSY;
-		break;
-	}
-	return ret;
-}
-
-static void serial8250_release_std_resource(struct uart_8250_port *up)
-{
-	unsigned int size = serial8250_port_size(up);
-	struct uart_port *port = &up->port;
-
-	switch (port->iotype) {
-	case UPIO_AU:
-	case UPIO_TSI:
-	case UPIO_MEM32:
-	case UPIO_MEM32BE:
-	case UPIO_MEM:
-		if (!port->mapbase)
-			break;
-
-		if (port->flags & UPF_IOREMAP) {
-			iounmap(port->membase);
-			port->membase = NULL;
-		}
-
-		release_mem_region(port->mapbase, size);
-		break;
-
-	case UPIO_HUB6:
-	case UPIO_PORT:
-		release_region(port->iobase, size);
-		break;
-	}
-}
-
 #ifdef CONFIG_SERIAL_8250_RSA
 static int serial8250_request_rsa_resource(struct uart_8250_port *up)
 {
@@ -2848,259 +396,6 @@
 }
 #endif
 
-static void serial8250_release_port(struct uart_port *port)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-
-	serial8250_release_std_resource(up);
-}
-
-static int serial8250_request_port(struct uart_port *port)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-	int ret;
-
-	if (port->type == PORT_8250_CIR)
-		return -ENODEV;
-
-	ret = serial8250_request_std_resource(up);
-
-	return ret;
-}
-
-static int fcr_get_rxtrig_bytes(struct uart_8250_port *up)
-{
-	const struct serial8250_config *conf_type = &uart_config[up->port.type];
-	unsigned char bytes;
-
-	bytes = conf_type->rxtrig_bytes[UART_FCR_R_TRIG_BITS(up->fcr)];
-
-	return bytes ? bytes : -EOPNOTSUPP;
-}
-
-static int bytes_to_fcr_rxtrig(struct uart_8250_port *up, unsigned char bytes)
-{
-	const struct serial8250_config *conf_type = &uart_config[up->port.type];
-	int i;
-
-	if (!conf_type->rxtrig_bytes[UART_FCR_R_TRIG_BITS(UART_FCR_R_TRIG_00)])
-		return -EOPNOTSUPP;
-
-	for (i = 1; i < UART_FCR_R_TRIG_MAX_STATE; i++) {
-		if (bytes < conf_type->rxtrig_bytes[i])
-			/* Use the nearest lower value */
-			return (--i) << UART_FCR_R_TRIG_SHIFT;
-	}
-
-	return UART_FCR_R_TRIG_11;
-}
-
-static int do_get_rxtrig(struct tty_port *port)
-{
-	struct uart_state *state = container_of(port, struct uart_state, port);
-	struct uart_port *uport = state->uart_port;
-	struct uart_8250_port *up =
-		container_of(uport, struct uart_8250_port, port);
-
-	if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1)
-		return -EINVAL;
-
-	return fcr_get_rxtrig_bytes(up);
-}
-
-static int do_serial8250_get_rxtrig(struct tty_port *port)
-{
-	int rxtrig_bytes;
-
-	mutex_lock(&port->mutex);
-	rxtrig_bytes = do_get_rxtrig(port);
-	mutex_unlock(&port->mutex);
-
-	return rxtrig_bytes;
-}
-
-static ssize_t serial8250_get_attr_rx_trig_bytes(struct device *dev,
-	struct device_attribute *attr, char *buf)
-{
-	struct tty_port *port = dev_get_drvdata(dev);
-	int rxtrig_bytes;
-
-	rxtrig_bytes = do_serial8250_get_rxtrig(port);
-	if (rxtrig_bytes < 0)
-		return rxtrig_bytes;
-
-	return snprintf(buf, PAGE_SIZE, "%d\n", rxtrig_bytes);
-}
-
-static int do_set_rxtrig(struct tty_port *port, unsigned char bytes)
-{
-	struct uart_state *state = container_of(port, struct uart_state, port);
-	struct uart_port *uport = state->uart_port;
-	struct uart_8250_port *up =
-		container_of(uport, struct uart_8250_port, port);
-	int rxtrig;
-
-	if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1 ||
-	    up->fifo_bug)
-		return -EINVAL;
-
-	rxtrig = bytes_to_fcr_rxtrig(up, bytes);
-	if (rxtrig < 0)
-		return rxtrig;
-
-	serial8250_clear_fifos(up);
-	up->fcr &= ~UART_FCR_TRIGGER_MASK;
-	up->fcr |= (unsigned char)rxtrig;
-	serial_out(up, UART_FCR, up->fcr);
-	return 0;
-}
-
-static int do_serial8250_set_rxtrig(struct tty_port *port, unsigned char bytes)
-{
-	int ret;
-
-	mutex_lock(&port->mutex);
-	ret = do_set_rxtrig(port, bytes);
-	mutex_unlock(&port->mutex);
-
-	return ret;
-}
-
-static ssize_t serial8250_set_attr_rx_trig_bytes(struct device *dev,
-	struct device_attribute *attr, const char *buf, size_t count)
-{
-	struct tty_port *port = dev_get_drvdata(dev);
-	unsigned char bytes;
-	int ret;
-
-	if (!count)
-		return -EINVAL;
-
-	ret = kstrtou8(buf, 10, &bytes);
-	if (ret < 0)
-		return ret;
-
-	ret = do_serial8250_set_rxtrig(port, bytes);
-	if (ret < 0)
-		return ret;
-
-	return count;
-}
-
-static DEVICE_ATTR(rx_trig_bytes, S_IRUSR | S_IWUSR | S_IRGRP,
-		   serial8250_get_attr_rx_trig_bytes,
-		   serial8250_set_attr_rx_trig_bytes);
-
-static struct attribute *serial8250_dev_attrs[] = {
-	&dev_attr_rx_trig_bytes.attr,
-	NULL,
-	};
-
-static struct attribute_group serial8250_dev_attr_group = {
-	.attrs = serial8250_dev_attrs,
-	};
-
-static void register_dev_spec_attr_grp(struct uart_8250_port *up)
-{
-	const struct serial8250_config *conf_type = &uart_config[up->port.type];
-
-	if (conf_type->rxtrig_bytes[0])
-		up->port.attr_group = &serial8250_dev_attr_group;
-}
-
-static void serial8250_config_port(struct uart_port *port, int flags)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-	int ret;
-
-	if (port->type == PORT_8250_CIR)
-		return;
-
-	/*
-	 * Find the region that we can probe for.  This in turn
-	 * tells us whether we can probe for the type of port.
-	 */
-	ret = serial8250_request_std_resource(up);
-	if (ret < 0)
-		return;
-
-	if (port->iotype != up->cur_iotype)
-		set_io_from_upio(port);
-
-	if (flags & UART_CONFIG_TYPE)
-		autoconfig(up);
-
-	/* if access method is AU, it is a 16550 with a quirk */
-	if (port->type == PORT_16550A && port->iotype == UPIO_AU)
-		up->bugs |= UART_BUG_NOMSR;
-
-	/* HW bugs may trigger IRQ while IIR == NO_INT */
-	if (port->type == PORT_TEGRA)
-		up->bugs |= UART_BUG_NOMSR;
-
-	if (port->type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
-		autoconfig_irq(up);
-
-	if (port->type == PORT_UNKNOWN)
-		serial8250_release_std_resource(up);
-
-	/* Fixme: probably not the best place for this */
-	if ((port->type == PORT_XR17V35X) ||
-	   (port->type == PORT_XR17D15X))
-		port->handle_irq = exar_handle_irq;
-
-	register_dev_spec_attr_grp(up);
-	up->fcr = uart_config[up->port.type].fcr;
-}
-
-static int
-serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
-	if (ser->irq >= nr_irqs || ser->irq < 0 ||
-	    ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
-	    ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
-	    ser->type == PORT_STARTECH)
-		return -EINVAL;
-	return 0;
-}
-
-static const char *
-serial8250_type(struct uart_port *port)
-{
-	int type = port->type;
-
-	if (type >= ARRAY_SIZE(uart_config))
-		type = 0;
-	return uart_config[type].name;
-}
-
-static const struct uart_ops serial8250_pops = {
-	.tx_empty	= serial8250_tx_empty,
-	.set_mctrl	= serial8250_set_mctrl,
-	.get_mctrl	= serial8250_get_mctrl,
-	.stop_tx	= serial8250_stop_tx,
-	.start_tx	= serial8250_start_tx,
-	.throttle	= serial8250_throttle,
-	.unthrottle	= serial8250_unthrottle,
-	.stop_rx	= serial8250_stop_rx,
-	.enable_ms	= serial8250_enable_ms,
-	.break_ctl	= serial8250_break_ctl,
-	.startup	= serial8250_startup,
-	.shutdown	= serial8250_shutdown,
-	.set_termios	= serial8250_set_termios,
-	.set_ldisc	= serial8250_set_ldisc,
-	.pm		= serial8250_pm,
-	.type		= serial8250_type,
-	.release_port	= serial8250_release_port,
-	.request_port	= serial8250_request_port,
-	.config_port	= serial8250_config_port,
-	.verify_port	= serial8250_verify_port,
-#ifdef CONFIG_CONSOLE_POLL
-	.poll_get_char = serial8250_get_poll_char,
-	.poll_put_char = serial8250_put_poll_char,
-#endif
-};
-
 static const struct uart_ops *base_ops;
 static struct uart_ops univ8250_port_ops;
 
@@ -3139,42 +434,6 @@
 }
 EXPORT_SYMBOL(serial8250_set_isa_configurator);
 
-static void serial8250_init_port(struct uart_8250_port *up)
-{
-	struct uart_port *port = &up->port;
-
-	spin_lock_init(&port->lock);
-	port->ops = &serial8250_pops;
-
-	up->cur_iotype = 0xFF;
-}
-
-static void serial8250_set_defaults(struct uart_8250_port *up)
-{
-	struct uart_port *port = &up->port;
-
-	if (up->port.flags & UPF_FIXED_TYPE) {
-		unsigned int type = up->port.type;
-
-		if (!up->port.fifosize)
-			up->port.fifosize = uart_config[type].fifo_size;
-		if (!up->tx_loadsz)
-			up->tx_loadsz = uart_config[type].tx_loadsz;
-		if (!up->capabilities)
-			up->capabilities = uart_config[type].flags;
-	}
-
-	set_io_from_upio(port);
-
-	/* default dma handlers */
-	if (up->dma) {
-		if (!up->dma->tx_dma)
-			up->dma->tx_dma = serial8250_tx_dma;
-		if (!up->dma->rx_dma)
-			up->dma->rx_dma = serial8250_rx_dma;
-	}
-}
-
 #ifdef CONFIG_SERIAL_8250_RSA
 
 static void univ8250_config_port(struct uart_port *port, int flags)
@@ -3324,94 +583,6 @@
 
 #ifdef CONFIG_SERIAL_8250_CONSOLE
 
-static void serial8250_console_putchar(struct uart_port *port, int ch)
-{
-	struct uart_8250_port *up = up_to_u8250p(port);
-
-	wait_for_xmitr(up, UART_LSR_THRE);
-	serial_port_out(port, UART_TX, ch);
-}
-
-/*
- *	Print a string to the serial port trying not to disturb
- *	any possible real use of the port...
- *
- *	The console_lock must be held when we get here.
- */
-static void serial8250_console_write(struct uart_8250_port *up, const char *s,
-				     unsigned int count)
-{
-	struct uart_port *port = &up->port;
-	unsigned long flags;
-	unsigned int ier;
-	int locked = 1;
-
-	touch_nmi_watchdog();
-
-	serial8250_rpm_get(up);
-
-	if (port->sysrq)
-		locked = 0;
-	else if (oops_in_progress)
-		locked = spin_trylock_irqsave(&port->lock, flags);
-	else
-		spin_lock_irqsave(&port->lock, flags);
-
-	/*
-	 *	First save the IER then disable the interrupts
-	 */
-	ier = serial_port_in(port, UART_IER);
-
-	if (up->capabilities & UART_CAP_UUE)
-		serial_port_out(port, UART_IER, UART_IER_UUE);
-	else
-		serial_port_out(port, UART_IER, 0);
-
-	/* check scratch reg to see if port powered off during system sleep */
-	if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
-		struct ktermios termios;
-		unsigned int baud, quot, frac = 0;
-
-		termios.c_cflag = port->cons->cflag;
-		if (port->state->port.tty && termios.c_cflag == 0)
-			termios.c_cflag = port->state->port.tty->termios.c_cflag;
-
-		baud = uart_get_baud_rate(port, &termios, NULL,
-					  port->uartclk / 16 / 0xffff,
-					  port->uartclk / 16);
-		quot = serial8250_get_divisor(up, baud, &frac);
-
-		serial8250_set_divisor(port, baud, quot, frac);
-		serial_port_out(port, UART_LCR, up->lcr);
-		serial_port_out(port, UART_MCR, UART_MCR_DTR | UART_MCR_RTS);
-
-		up->canary = 0;
-	}
-
-	uart_console_write(port, s, count, serial8250_console_putchar);
-
-	/*
-	 *	Finally, wait for transmitter to become empty
-	 *	and restore the IER
-	 */
-	wait_for_xmitr(up, BOTH_EMPTY);
-	serial_port_out(port, UART_IER, ier);
-
-	/*
-	 *	The receive handling will happen properly because the
-	 *	receive ready bit will still be set; it is not cleared
-	 *	on read.  However, modem control will not, we must
-	 *	call it if we have saved something in the saved flags
-	 *	while processing with interrupts off.
-	 */
-	if (up->msr_saved_flags)
-		serial8250_modem_status(up);
-
-	if (locked)
-		spin_unlock_irqrestore(&port->lock, flags);
-	serial8250_rpm_put(up);
-}
-
 static void univ8250_console_write(struct console *co, const char *s,
 				   unsigned int count)
 {
@@ -3420,39 +591,6 @@
 	serial8250_console_write(up, s, count);
 }
 
-static unsigned int probe_baud(struct uart_port *port)
-{
-	unsigned char lcr, dll, dlm;
-	unsigned int quot;
-
-	lcr = serial_port_in(port, UART_LCR);
-	serial_port_out(port, UART_LCR, lcr | UART_LCR_DLAB);
-	dll = serial_port_in(port, UART_DLL);
-	dlm = serial_port_in(port, UART_DLM);
-	serial_port_out(port, UART_LCR, lcr);
-
-	quot = (dlm << 8) | dll;
-	return (port->uartclk / 16) / quot;
-}
-
-static int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
-{
-	int baud = 9600;
-	int bits = 8;
-	int parity = 'n';
-	int flow = 'n';
-
-	if (!port->iobase && !port->membase)
-		return -ENODEV;
-
-	if (options)
-		uart_parse_options(options, &baud, &parity, &bits, &flow);
-	else if (probe)
-		baud = probe_baud(port);
-
-	return uart_set_options(port, port->cons, baud, parity, bits, flow);
-}
-
 static int univ8250_console_setup(struct console *co, char *options)
 {
 	struct uart_port *port;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index d48b506..06324f1 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -56,7 +56,6 @@
 
 struct dw8250_data {
 	u8			usr_reg;
-	int			last_mcr;
 	int			line;
 	int			msr_mask_on;
 	int			msr_mask_off;
@@ -76,12 +75,6 @@
 {
 	struct dw8250_data *d = p->private_data;
 
-	/* If reading MSR, report CTS asserted when auto-CTS/RTS enabled */
-	if (offset == UART_MSR && d->last_mcr & UART_MCR_AFE) {
-		value |= UART_MSR_CTS;
-		value &= ~UART_MSR_DCTS;
-	}
-
 	/* Override any modem control signals if needed */
 	if (offset == UART_MSR) {
 		value |= d->msr_mask_on;
@@ -101,11 +94,6 @@
 
 static void dw8250_serial_out(struct uart_port *p, int offset, int value)
 {
-	struct dw8250_data *d = p->private_data;
-
-	if (offset == UART_MCR)
-		d->last_mcr = value;
-
 	writeb(value, p->membase + (offset << p->regshift));
 
 	/* Make sure LCR write wasn't ignored */
@@ -144,11 +132,6 @@
 
 static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
 {
-	struct dw8250_data *d = p->private_data;
-
-	if (offset == UART_MCR)
-		d->last_mcr = value;
-
 	value &= 0xff;
 	__raw_writeq(value, p->membase + (offset << p->regshift));
 	/* Read back to ensure register write ordering. */
@@ -175,11 +158,6 @@
 
 static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
 {
-	struct dw8250_data *d = p->private_data;
-
-	if (offset == UART_MCR)
-		d->last_mcr = value;
-
 	writel(value, p->membase + (offset << p->regshift));
 
 	/* Make sure LCR write wasn't ignored */
@@ -257,6 +235,11 @@
 
 	if (!ret)
 		p->uartclk = rate;
+
+	p->status &= ~UPSTAT_AUTOCTS;
+	if (termios->c_cflag & CRTSCTS)
+		p->status |= UPSTAT_AUTOCTS;
+
 out:
 	serial8250_do_set_termios(p, termios, old);
 }
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 771dda2..faed05f 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -35,7 +35,7 @@
 #include <asm/io.h>
 #include <asm/serial.h>
 
-unsigned int __weak __init serial8250_early_in(struct uart_port *port, int offset)
+static unsigned int __init serial8250_early_in(struct uart_port *port, int offset)
 {
 	switch (port->iotype) {
 	case UPIO_MEM:
@@ -51,7 +51,7 @@
 	}
 }
 
-void __weak __init serial8250_early_out(struct uart_port *port, int offset, int value)
+static void __init serial8250_early_out(struct uart_port *port, int offset, int value)
 {
 	switch (port->iotype) {
 	case UPIO_MEM:
diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
index 5815e81..8947439 100644
--- a/drivers/tty/serial/8250/8250_fintek.c
+++ b/drivers/tty/serial/8250/8250_fintek.c
@@ -17,18 +17,19 @@
 #include <linux/serial_core.h>
 #include  "8250.h"
 
-#define ADDR_PORT 0x4E
-#define DATA_PORT 0x4F
-#define ENTRY_KEY 0x77
+#define ADDR_PORT 0
+#define DATA_PORT 1
 #define EXIT_KEY 0xAA
 #define CHIP_ID1  0x20
-#define CHIP_ID1_VAL 0x02
 #define CHIP_ID2  0x21
-#define CHIP_ID2_VAL 0x16
+#define CHIP_ID_0 0x1602
+#define CHIP_ID_1 0x0501
 #define VENDOR_ID1 0x23
 #define VENDOR_ID1_VAL 0x19
 #define VENDOR_ID2 0x24
 #define VENDOR_ID2_VAL 0x34
+#define IO_ADDR1 0x61
+#define IO_ADDR2 0x60
 #define LDN 0x7
 
 #define RS485  0xF0
@@ -39,51 +40,49 @@
 
 #define DRIVER_NAME "8250_fintek"
 
-static int fintek_8250_enter_key(void){
+struct fintek_8250 {
+	u16 base_port;
+	u8 index;
+	u8 key;
+	long line;
+};
 
-	if (!request_muxed_region(ADDR_PORT, 2, DRIVER_NAME))
+static int fintek_8250_enter_key(u16 base_port, u8 key)
+{
+
+	if (!request_muxed_region(base_port, 2, DRIVER_NAME))
 		return -EBUSY;
 
-	outb(ENTRY_KEY, ADDR_PORT);
-	outb(ENTRY_KEY, ADDR_PORT);
+	outb(key, base_port + ADDR_PORT);
+	outb(key, base_port + ADDR_PORT);
 	return 0;
 }
 
-static void fintek_8250_exit_key(void){
-
-	outb(EXIT_KEY, ADDR_PORT);
-	release_region(ADDR_PORT, 2);
-}
-
-static int fintek_8250_get_index(resource_size_t base_addr)
-{
-	resource_size_t base[] = {0x3f8, 0x2f8, 0x3e8, 0x2e8};
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(base); i++)
-		if (base_addr == base[i])
-			return i;
-
-	return -ENODEV;
-}
-
-static int fintek_8250_check_id(void)
+static void fintek_8250_exit_key(u16 base_port)
 {
 
-	outb(CHIP_ID1, ADDR_PORT);
-	if (inb(DATA_PORT) != CHIP_ID1_VAL)
+	outb(EXIT_KEY, base_port + ADDR_PORT);
+	release_region(base_port + ADDR_PORT, 2);
+}
+
+static int fintek_8250_check_id(u16 base_port)
+{
+	u16 chip;
+
+	outb(VENDOR_ID1, base_port + ADDR_PORT);
+	if (inb(base_port + DATA_PORT) != VENDOR_ID1_VAL)
 		return -ENODEV;
 
-	outb(CHIP_ID2, ADDR_PORT);
-	if (inb(DATA_PORT) != CHIP_ID2_VAL)
+	outb(VENDOR_ID2, base_port + ADDR_PORT);
+	if (inb(base_port + DATA_PORT) != VENDOR_ID2_VAL)
 		return -ENODEV;
 
-	outb(VENDOR_ID1, ADDR_PORT);
-	if (inb(DATA_PORT) != VENDOR_ID1_VAL)
-		return -ENODEV;
+	outb(CHIP_ID1, base_port + ADDR_PORT);
+	chip = inb(base_port + DATA_PORT);
+	outb(CHIP_ID2, base_port + ADDR_PORT);
+	chip |= inb(base_port + DATA_PORT) << 8;
 
-	outb(VENDOR_ID2, ADDR_PORT);
-	if (inb(DATA_PORT) != VENDOR_ID2_VAL)
+	if (chip != CHIP_ID_0 && chip != CHIP_ID_1)
 		return -ENODEV;
 
 	return 0;
@@ -93,9 +92,9 @@
 			      struct serial_rs485 *rs485)
 {
 	uint8_t config = 0;
-	int index = fintek_8250_get_index(port->iobase);
+	struct fintek_8250 *pdata = port->private_data;
 
-	if (index < 0)
+	if (!pdata)
 		return -EINVAL;
 
 	if (rs485->flags & SER_RS485_ENABLED)
@@ -125,44 +124,84 @@
 	if (rs485->flags & SER_RS485_RTS_ON_SEND)
 		config |= RTS_INVERT;
 
-	if (fintek_8250_enter_key())
+	if (fintek_8250_enter_key(pdata->base_port, pdata->key))
 		return -EBUSY;
 
-	outb(LDN, ADDR_PORT);
-	outb(index, DATA_PORT);
-	outb(RS485, ADDR_PORT);
-	outb(config, DATA_PORT);
-	fintek_8250_exit_key();
+	outb(LDN, pdata->base_port + ADDR_PORT);
+	outb(pdata->index, pdata->base_port + DATA_PORT);
+	outb(RS485, pdata->base_port + ADDR_PORT);
+	outb(config, pdata->base_port + DATA_PORT);
+	fintek_8250_exit_key(pdata->base_port);
 
 	port->rs485 = *rs485;
 
 	return 0;
 }
 
+static int fintek_8250_base_port(u16 io_address, u8 *key, u8 *index)
+{
+	static const u16 addr[] = {0x4e, 0x2e};
+	static const u8 keys[] = {0x77, 0xa0, 0x87, 0x67};
+	int i, j, k;
+
+	for (i = 0; i < ARRAY_SIZE(addr); i++) {
+		for (j = 0; j < ARRAY_SIZE(keys); j++) {
+
+			if (fintek_8250_enter_key(addr[i], keys[j]))
+				continue;
+			if (fintek_8250_check_id(addr[i])) {
+				fintek_8250_exit_key(addr[i]);
+				continue;
+			}
+
+			for (k = 0; k < 4; k++) {
+				u16 aux;
+
+				outb(LDN, addr[i] + ADDR_PORT);
+				outb(k, addr[i] + DATA_PORT);
+
+				outb(IO_ADDR1, addr[i] + ADDR_PORT);
+				aux = inb(addr[i] + DATA_PORT);
+				outb(IO_ADDR2, addr[i] + ADDR_PORT);
+				aux |= inb(addr[i] + DATA_PORT) << 8;
+				if (aux != io_address)
+					continue;
+
+				fintek_8250_exit_key(addr[i]);
+				*key = keys[j];
+				*index = k;
+				return addr[i];
+			}
+			fintek_8250_exit_key(addr[i]);
+		}
+	}
+
+	return -ENODEV;
+}
+
 static int
 fintek_8250_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
 {
-	int line;
 	struct uart_8250_port uart;
-	int ret;
+	struct fintek_8250 *pdata;
+	int base_port;
+	u8 key;
+	u8 index;
 
 	if (!pnp_port_valid(dev, 0))
 		return -ENODEV;
 
-	if (fintek_8250_get_index(pnp_port_start(dev, 0)) < 0)
+	base_port = fintek_8250_base_port(pnp_port_start(dev, 0), &key, &index);
+	if (base_port < 0)
 		return -ENODEV;
 
-	/* Enable configuration registers*/
-	if (fintek_8250_enter_key())
-		return -EBUSY;
-
-	/*Check ID*/
-	ret = fintek_8250_check_id();
-	fintek_8250_exit_key();
-	if (ret)
-		return ret;
-
 	memset(&uart, 0, sizeof(uart));
+
+	pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+	uart.port.private_data = pdata;
+
 	if (!pnp_irq_valid(dev, 0))
 		return -ENODEV;
 	uart.port.irq = pnp_irq(dev, 0);
@@ -176,40 +215,43 @@
 	uart.port.uartclk = 1843200;
 	uart.port.dev = &dev->dev;
 
-	line = serial8250_register_8250_port(&uart);
-	if (line < 0)
+	pdata->key = key;
+	pdata->base_port = base_port;
+	pdata->index = index;
+	pdata->line = serial8250_register_8250_port(&uart);
+	if (pdata->line < 0)
 		return -ENODEV;
 
-	pnp_set_drvdata(dev, (void *)((long)line + 1));
+	pnp_set_drvdata(dev, pdata);
 	return 0;
 }
 
 static void fintek_8250_remove(struct pnp_dev *dev)
 {
-	long line = (long)pnp_get_drvdata(dev);
+	struct fintek_8250 *pdata = pnp_get_drvdata(dev);
 
-	if (line)
-		serial8250_unregister_port(line - 1);
+	if (pdata)
+		serial8250_unregister_port(pdata->line);
 }
 
 #ifdef CONFIG_PM
 static int fintek_8250_suspend(struct pnp_dev *dev, pm_message_t state)
 {
-	long line = (long)pnp_get_drvdata(dev);
+	struct fintek_8250 *pdata = pnp_get_drvdata(dev);
 
-	if (!line)
+	if (!pdata)
 		return -ENODEV;
-	serial8250_suspend_port(line - 1);
+	serial8250_suspend_port(pdata->line);
 	return 0;
 }
 
 static int fintek_8250_resume(struct pnp_dev *dev)
 {
-	long line = (long)pnp_get_drvdata(dev);
+	struct fintek_8250 *pdata = pnp_get_drvdata(dev);
 
-	if (!line)
+	if (!pdata)
 		return -ENODEV;
-	serial8250_resume_port(line - 1);
+	serial8250_resume_port(pdata->line);
 	return 0;
 }
 #else
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index 21bf81f..7c1e4be 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -252,7 +252,6 @@
 static struct platform_driver ingenic_uart_platform_driver = {
 	.driver = {
 		.name		= "ingenic-uart",
-		.owner		= THIS_MODULE,
 		.of_match_table	= of_match,
 	},
 	.probe			= ingenic_uart_probe,
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index d75a66c..826c5c4 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -16,6 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <linux/of_irq.h>
 #include <linux/delay.h>
@@ -32,6 +33,11 @@
 #define UART_ERRATA_i202_MDR1_ACCESS	(1 << 0)
 #define OMAP_UART_WER_HAS_TX_WAKEUP	(1 << 1)
 #define OMAP_DMA_TX_KICK		(1 << 2)
+/*
+ * See Advisory 21 in AM437x errata SPRZ408B, updated April 2015.
+ * The same errata is applicable to AM335x and DRA7x processors too.
+ */
+#define UART_ERRATA_CLOCK_DISABLE	(1 << 3)
 
 #define OMAP_UART_FCR_RX_TRIG		6
 #define OMAP_UART_FCR_TX_TRIG		4
@@ -53,6 +59,12 @@
 #define OMAP_UART_MVR_MAJ_SHIFT		8
 #define OMAP_UART_MVR_MIN_MASK		0x3f
 
+/* SYSC register bitmasks */
+#define OMAP_UART_SYSC_SOFTRESET	(1 << 1)
+
+/* SYSS register bitmasks */
+#define OMAP_UART_SYSS_RESETDONE	(1 << 0)
+
 #define UART_TI752_TLR_TX	0
 #define UART_TI752_TLR_RX	4
 
@@ -100,6 +112,7 @@
 	struct work_struct qos_work;
 	struct uart_8250_dma omap8250_dma;
 	spinlock_t rx_dma_lock;
+	bool rx_dma_broken;
 };
 
 static u32 uart_read(struct uart_8250_port *up, u32 reg)
@@ -232,6 +245,15 @@
 	serial_out(up, UART_OMAP_SCR, priv->scr);
 }
 
+static void omap8250_update_mdr1(struct uart_8250_port *up,
+				 struct omap8250_priv *priv)
+{
+	if (priv->habit & UART_ERRATA_i202_MDR1_ACCESS)
+		omap_8250_mdr1_errataset(up, priv);
+	else
+		serial_out(up, UART_OMAP_MDR1, priv->mdr1);
+}
+
 static void omap8250_restore_regs(struct uart_8250_port *up)
 {
 	struct omap8250_priv *priv = up->port.private_data;
@@ -282,11 +304,9 @@
 	serial_out(up, UART_XOFF1, priv->xoff);
 
 	serial_out(up, UART_LCR, up->lcr);
-	/* need mode A for FCR */
-	if (priv->habit & UART_ERRATA_i202_MDR1_ACCESS)
-		omap_8250_mdr1_errataset(up, priv);
-	else
-		serial_out(up, UART_OMAP_MDR1, priv->mdr1);
+
+	omap8250_update_mdr1(up, priv);
+
 	up->port.ops->set_mctrl(&up->port, up->port.mctrl);
 }
 
@@ -428,12 +448,9 @@
 		priv->efr |= UART_EFR_CTS;
 	} else	if (up->port.flags & UPF_SOFT_FLOW) {
 		/*
-		 * IXON Flag:
-		 * Enable XON/XOFF flow control on input.
-		 * Receiver compares XON1, XOFF1.
+		 * OMAP rx s/w flow control is borked; the transmitter remains
+		 * stuck off even if rx flow control is subsequently disabled
 		 */
-		if (termios->c_iflag & IXON)
-			priv->efr |= OMAP_UART_SW_RX;
 
 		/*
 		 * IXOFF Flag:
@@ -444,15 +461,6 @@
 			up->port.status |= UPSTAT_AUTOXOFF;
 			priv->efr |= OMAP_UART_SW_TX;
 		}
-
-		/*
-		 * IXANY Flag:
-		 * Enable any character to restart output.
-		 * Operation resumes after receiving any
-		 * character after recognition of the XOFF character
-		 */
-		if (termios->c_iflag & IXANY)
-			up->mcr |= UART_MCR_XONANY;
 	}
 	omap8250_restore_regs(up);
 
@@ -530,14 +538,14 @@
 
 	switch (revision) {
 	case OMAP_UART_REV_46:
-		priv->habit = UART_ERRATA_i202_MDR1_ACCESS;
+		priv->habit |= UART_ERRATA_i202_MDR1_ACCESS;
 		break;
 	case OMAP_UART_REV_52:
-		priv->habit = UART_ERRATA_i202_MDR1_ACCESS |
+		priv->habit |= UART_ERRATA_i202_MDR1_ACCESS |
 				OMAP_UART_WER_HAS_TX_WAKEUP;
 		break;
 	case OMAP_UART_REV_63:
-		priv->habit = UART_ERRATA_i202_MDR1_ACCESS |
+		priv->habit |= UART_ERRATA_i202_MDR1_ACCESS |
 			OMAP_UART_WER_HAS_TX_WAKEUP;
 		break;
 	default:
@@ -754,6 +762,7 @@
 	struct omap8250_priv	*priv = p->port.private_data;
 	struct uart_8250_dma	*dma = p->dma;
 	unsigned long		flags;
+	int ret;
 
 	spin_lock_irqsave(&priv->rx_dma_lock, flags);
 
@@ -762,7 +771,9 @@
 		return;
 	}
 
-	dmaengine_pause(dma->rxchan);
+	ret = dmaengine_pause(dma->rxchan);
+	if (WARN_ON_ONCE(ret))
+		priv->rx_dma_broken = true;
 
 	spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
 
@@ -806,6 +817,9 @@
 		break;
 	}
 
+	if (priv->rx_dma_broken)
+		return -EINVAL;
+
 	spin_lock_irqsave(&priv->rx_dma_lock, flags);
 
 	if (dma->rx_running)
@@ -1054,6 +1068,20 @@
 	return 0;
 }
 
+static const u8 am3352_habit = OMAP_DMA_TX_KICK | UART_ERRATA_CLOCK_DISABLE;
+static const u8 am4372_habit = UART_ERRATA_CLOCK_DISABLE;
+
+static const struct of_device_id omap8250_dt_ids[] = {
+	{ .compatible = "ti,omap2-uart" },
+	{ .compatible = "ti,omap3-uart" },
+	{ .compatible = "ti,omap4-uart" },
+	{ .compatible = "ti,am3352-uart", .data = &am3352_habit, },
+	{ .compatible = "ti,am4372-uart", .data = &am4372_habit, },
+	{ .compatible = "ti,dra742-uart", .data = &am4372_habit, },
+	{},
+};
+MODULE_DEVICE_TABLE(of, omap8250_dt_ids);
+
 static int omap8250_probe(struct platform_device *pdev)
 {
 	struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1118,11 +1146,17 @@
 	up.port.unthrottle = omap_8250_unthrottle;
 
 	if (pdev->dev.of_node) {
+		const struct of_device_id *id;
+
 		ret = of_alias_get_id(pdev->dev.of_node, "serial");
 
 		of_property_read_u32(pdev->dev.of_node, "clock-frequency",
 				     &up.port.uartclk);
 		priv->wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1);
+
+		id = of_match_device(of_match_ptr(omap8250_dt_ids), &pdev->dev);
+		if (id && id->data)
+			priv->habit |= *(u8 *)id->data;
 	} else {
 		ret = pdev->id;
 	}
@@ -1180,6 +1214,11 @@
 
 			if (of_machine_is_compatible("ti,am33xx"))
 				priv->habit |= OMAP_DMA_TX_KICK;
+			/*
+			 * pause is currently not supported atleast on omap-sdma
+			 * and edma on most earlier kernels.
+			 */
+			priv->rx_dma_broken = true;
 		}
 	}
 #endif
@@ -1257,17 +1296,46 @@
 {
 	u32 val;
 
-	val = serial_in(up, UART_OMAP_MDR1);
+	val = serial_in(up, UART_OMAP_SCR);
 	/*
-	 * If we lose context, then MDR1 is set to its reset value which is
-	 * UART_OMAP_MDR1_DISABLE. After set_termios() we set it either to 13x
-	 * or 16x but never to disable again.
+	 * If we lose context, then SCR is set to its reset value of zero.
+	 * After set_termios() we set bit 3 of SCR (TX_EMPTY_CTL_IT) to 1,
+	 * among other bits, to never set the register back to zero again.
 	 */
-	if (val == UART_OMAP_MDR1_DISABLE)
+	if (!val)
 		return 1;
 	return 0;
 }
 
+/* TODO: in future, this should happen via API in drivers/reset/ */
+static int omap8250_soft_reset(struct device *dev)
+{
+	struct omap8250_priv *priv = dev_get_drvdata(dev);
+	struct uart_8250_port *up = serial8250_get_port(priv->line);
+	int timeout = 100;
+	int sysc;
+	int syss;
+
+	sysc = serial_in(up, UART_OMAP_SYSC);
+
+	/* softreset the UART */
+	sysc |= OMAP_UART_SYSC_SOFTRESET;
+	serial_out(up, UART_OMAP_SYSC, sysc);
+
+	/* By experiments, 1us enough for reset complete on AM335x */
+	do {
+		udelay(1);
+		syss = serial_in(up, UART_OMAP_SYSS);
+	} while (--timeout && !(syss & OMAP_UART_SYSS_RESETDONE));
+
+	if (!timeout) {
+		dev_err(dev, "timed out waiting for reset done\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
 static int omap8250_runtime_suspend(struct device *dev)
 {
 	struct omap8250_priv *priv = dev_get_drvdata(dev);
@@ -1285,7 +1353,18 @@
 			return -EBUSY;
 	}
 
-	if (up->dma)
+	if (priv->habit & UART_ERRATA_CLOCK_DISABLE) {
+		int ret;
+
+		ret = omap8250_soft_reset(dev);
+		if (ret)
+			return ret;
+
+		/* Restore to UART mode after reset (for wakeup) */
+		omap8250_update_mdr1(up, priv);
+	}
+
+	if (up->dma && up->dma->rxchan)
 		omap_8250_rx_dma(up, UART_IIR_RX_TIMEOUT);
 
 	priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
@@ -1310,7 +1389,7 @@
 	if (loss_cntx)
 		omap8250_restore_regs(up);
 
-	if (up->dma)
+	if (up->dma && up->dma->rxchan)
 		omap_8250_rx_dma(up, 0);
 
 	priv->latency = priv->calc_latency;
@@ -1367,14 +1446,6 @@
 	.complete       = omap8250_complete,
 };
 
-static const struct of_device_id omap8250_dt_ids[] = {
-	{ .compatible = "ti,omap2-uart" },
-	{ .compatible = "ti,omap3-uart" },
-	{ .compatible = "ti,omap4-uart" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, omap8250_dt_ids);
-
 static struct platform_driver omap8250_platform_driver = {
 	.driver = {
 		.name		= "omap8250",
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e55f18b..68042dd 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1417,6 +1417,10 @@
 	reg |= BYT_PRV_CLK_EN | BYT_PRV_CLK_UPDATE;
 	writel(reg, p->membase + BYT_PRV_CLK);
 
+	p->status &= ~UPSTAT_AUTOCTS;
+	if (termios->c_cflag & CRTSCTS)
+		p->status |= UPSTAT_AUTOCTS;
+
 	serial8250_do_set_termios(p, termios, old);
 }
 
@@ -1685,11 +1689,65 @@
 	return ret;
 }
 
+/* RTS will control by MCR if this bit is 0 */
+#define FINTEK_RTS_CONTROL_BY_HW	BIT(4)
+/* only worked with FINTEK_RTS_CONTROL_BY_HW on */
+#define FINTEK_RTS_INVERT		BIT(5)
+
+/* We should do proper H/W transceiver setting before change to RS485 mode */
+static int pci_fintek_rs485_config(struct uart_port *port,
+			       struct serial_rs485 *rs485)
+{
+	u8 setting;
+	u8 *index = (u8 *) port->private_data;
+	struct pci_dev *pci_dev = container_of(port->dev, struct pci_dev,
+						dev);
+
+	pci_read_config_byte(pci_dev, 0x40 + 8 * *index + 7, &setting);
+
+	if (!rs485)
+		rs485 = &port->rs485;
+	else if (rs485->flags & SER_RS485_ENABLED)
+		memset(rs485->padding, 0, sizeof(rs485->padding));
+	else
+		memset(rs485, 0, sizeof(*rs485));
+
+	/* F81504/508/512 not support RTS delay before or after send */
+	rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND;
+
+	if (rs485->flags & SER_RS485_ENABLED) {
+		/* Enable RTS H/W control mode */
+		setting |= FINTEK_RTS_CONTROL_BY_HW;
+
+		if (rs485->flags & SER_RS485_RTS_ON_SEND) {
+			/* RTS driving high on TX */
+			setting &= ~FINTEK_RTS_INVERT;
+		} else {
+			/* RTS driving low on TX */
+			setting |= FINTEK_RTS_INVERT;
+		}
+
+		rs485->delay_rts_after_send = 0;
+		rs485->delay_rts_before_send = 0;
+	} else {
+		/* Disable RTS H/W control mode */
+		setting &= ~(FINTEK_RTS_CONTROL_BY_HW | FINTEK_RTS_INVERT);
+	}
+
+	pci_write_config_byte(pci_dev, 0x40 + 8 * *index + 7, setting);
+
+	if (rs485 != &port->rs485)
+		port->rs485 = *rs485;
+
+	return 0;
+}
+
 static int pci_fintek_setup(struct serial_private *priv,
 			    const struct pciserial_board *board,
 			    struct uart_8250_port *port, int idx)
 {
 	struct pci_dev *pdev = priv->dev;
+	u8 *data;
 	u8 config_base;
 	u16 iobase;
 
@@ -1702,6 +1760,15 @@
 
 	port->port.iotype = UPIO_PORT;
 	port->port.iobase = iobase;
+	port->port.rs485_config = pci_fintek_rs485_config;
+
+	data = devm_kzalloc(&pdev->dev, sizeof(u8), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	/* preserve index in PCI configuration space */
+	*data = idx;
+	port->port.private_data = data;
 
 	return 0;
 }
@@ -1712,6 +1779,8 @@
 	u32 max_port, i;
 	u32 bar_data[3];
 	u8 config_base;
+	struct serial_private *priv = pci_get_drvdata(dev);
+	struct uart_8250_port *port;
 
 	switch (dev->device) {
 	case 0x1104: /* 4 ports */
@@ -1752,6 +1821,19 @@
 				(u8)((iobase & 0xff00) >> 8));
 
 		pci_write_config_byte(dev, config_base + 0x06, dev->irq);
+
+		if (priv) {
+			/* re-apply RS232/485 mode when
+			 * pciserial_resume_ports()
+			 */
+			port = serial8250_get_port(priv->line[i]);
+			pci_fintek_rs485_config(&port->port, NULL);
+		} else {
+			/* First init without port data
+			 * force init to RS232 Mode
+			 */
+			pci_write_config_byte(dev, config_base + 0x07, 0x01);
+		}
 	}
 
 	return max_port;
@@ -2017,6 +2099,12 @@
 #define PCIE_DEVICE_ID_WCH_CH382_2S1P	0x3250
 #define PCIE_DEVICE_ID_WCH_CH384_4S	0x3470
 
+#define PCI_VENDOR_ID_PERICOM			0x12D8
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7951	0x7951
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7952	0x7952
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7954	0x7954
+#define PCI_DEVICE_ID_PERICOM_PI7C9X7958	0x7958
+
 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584	0x1584
 #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588	0x1588
@@ -2331,27 +2419,12 @@
 	 * Pericom
 	 */
 	{
-		.vendor		= 0x12d8,
-		.device		= 0x7952,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.setup		= pci_pericom_setup,
+		.vendor         = PCI_VENDOR_ID_PERICOM,
+		.device         = PCI_ANY_ID,
+		.subvendor      = PCI_ANY_ID,
+		.subdevice      = PCI_ANY_ID,
+		.setup          = pci_pericom_setup,
 	},
-	{
-		.vendor		= 0x12d8,
-		.device		= 0x7954,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.setup		= pci_pericom_setup,
-	},
-	{
-		.vendor		= 0x12d8,
-		.device		= 0x7958,
-		.subvendor	= PCI_ANY_ID,
-		.subdevice	= PCI_ANY_ID,
-		.setup		= pci_pericom_setup,
-	},
-
 	/*
 	 * PLX
 	 */
@@ -3056,6 +3129,10 @@
 	pbn_fintek_8,
 	pbn_fintek_12,
 	pbn_wch384_4,
+	pbn_pericom_PI7C9X7951,
+	pbn_pericom_PI7C9X7952,
+	pbn_pericom_PI7C9X7954,
+	pbn_pericom_PI7C9X7958,
 };
 
 /*
@@ -3881,7 +3958,6 @@
 		.base_baud	= 115200,
 		.first_offset	= 0x40,
 	},
-
 	[pbn_wch384_4] = {
 		.flags		= FL_BASE0,
 		.num_ports	= 4,
@@ -3889,6 +3965,33 @@
 		.uart_offset    = 8,
 		.first_offset   = 0xC0,
 	},
+	/*
+	 * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
+	 */
+	[pbn_pericom_PI7C9X7951] = {
+		.flags          = FL_BASE0,
+		.num_ports      = 1,
+		.base_baud      = 921600,
+		.uart_offset	= 0x8,
+	},
+	[pbn_pericom_PI7C9X7952] = {
+		.flags          = FL_BASE0,
+		.num_ports      = 2,
+		.base_baud      = 921600,
+		.uart_offset	= 0x8,
+	},
+	[pbn_pericom_PI7C9X7954] = {
+		.flags          = FL_BASE0,
+		.num_ports      = 4,
+		.base_baud      = 921600,
+		.uart_offset	= 0x8,
+	},
+	[pbn_pericom_PI7C9X7958] = {
+		.flags          = FL_BASE0,
+		.num_ports      = 8,
+		.base_baud      = 921600,
+		.uart_offset	= 0x8,
+	},
 };
 
 static const struct pci_device_id blacklist[] = {
@@ -5154,6 +5257,25 @@
 		0,
 		0, pbn_exar_XR17V8358 },
 	/*
+	 * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
+	 */
+	{   PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7951,
+		PCI_ANY_ID, PCI_ANY_ID,
+		0,
+		0, pbn_pericom_PI7C9X7951 },
+	{   PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7952,
+		PCI_ANY_ID, PCI_ANY_ID,
+		0,
+		0, pbn_pericom_PI7C9X7952 },
+	{   PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7954,
+		PCI_ANY_ID, PCI_ANY_ID,
+		0,
+		0, pbn_pericom_PI7C9X7954 },
+	{   PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7958,
+		PCI_ANY_ID, PCI_ANY_ID,
+		0,
+		0, pbn_pericom_PI7C9X7958 },
+	/*
 	 * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
 	 */
 	{	PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560,
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index 50a09cd..658b392 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -41,6 +41,12 @@
 	{	"AEI1240",		0	},
 	/* Rockwell 56K ACF II Fax+Data+Voice Modem */
 	{	"AKY1021",		0 /*SPCI_FL_NO_SHIRQ*/	},
+	/*
+	 * ALi Fast Infrared Controller
+	 * Native driver (ali-ircc) is broken so at least
+	 * it can be used with irtty-sir.
+	 */
+	{	"ALI5123",		0	},
 	/* AZT3005 PnP SOUND DEVICE */
 	{	"AZT4001",		0	},
 	/* Best Data Products Inc. Smart One 336F PnP Modem */
@@ -364,6 +370,11 @@
 	/* Winbond CIR port, should not be probed. We should keep track
 	   of it to prevent the legacy serial driver from probing it */
 	{	"WEC1022",		CIR_PORT	},
+	/*
+	 * SMSC IrCC SIR/FIR port, should not be probed by serial driver
+	 * as well so its own driver can bind to it.
+	 */
+	{	"SMCF010",		CIR_PORT	},
 	{	"",			0	}
 };
 
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
new file mode 100644
index 0000000..54e6c8d
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -0,0 +1,2912 @@
+/*
+ *  Base port operations for 8250/16550-type serial ports
+ *
+ *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *  Split from 8250_core.c, Copyright (C) 2001 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * A note about mapbase / membase
+ *
+ *  mapbase is the physical address of the IO port.
+ *  membase is an 'ioremapped' cookie.
+ */
+
+#if defined(CONFIG_SERIAL_8250_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/ratelimit.h>
+#include <linux/tty_flip.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/nmi.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "8250.h"
+
+/*
+ * Debugging.
+ */
+#if 0
+#define DEBUG_AUTOCONF(fmt...)	printk(fmt)
+#else
+#define DEBUG_AUTOCONF(fmt...)	do { } while (0)
+#endif
+
+#define BOTH_EMPTY 	(UART_LSR_TEMT | UART_LSR_THRE)
+
+/*
+ * Here we define the default xmit fifo size used for each type of UART.
+ */
+static const struct serial8250_config uart_config[] = {
+	[PORT_UNKNOWN] = {
+		.name		= "unknown",
+		.fifo_size	= 1,
+		.tx_loadsz	= 1,
+	},
+	[PORT_8250] = {
+		.name		= "8250",
+		.fifo_size	= 1,
+		.tx_loadsz	= 1,
+	},
+	[PORT_16450] = {
+		.name		= "16450",
+		.fifo_size	= 1,
+		.tx_loadsz	= 1,
+	},
+	[PORT_16550] = {
+		.name		= "16550",
+		.fifo_size	= 1,
+		.tx_loadsz	= 1,
+	},
+	[PORT_16550A] = {
+		.name		= "16550A",
+		.fifo_size	= 16,
+		.tx_loadsz	= 16,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+		.rxtrig_bytes	= {1, 4, 8, 14},
+		.flags		= UART_CAP_FIFO,
+	},
+	[PORT_CIRRUS] = {
+		.name		= "Cirrus",
+		.fifo_size	= 1,
+		.tx_loadsz	= 1,
+	},
+	[PORT_16650] = {
+		.name		= "ST16650",
+		.fifo_size	= 1,
+		.tx_loadsz	= 1,
+		.flags		= UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+	},
+	[PORT_16650V2] = {
+		.name		= "ST16650V2",
+		.fifo_size	= 32,
+		.tx_loadsz	= 16,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+				  UART_FCR_T_TRIG_00,
+		.rxtrig_bytes	= {8, 16, 24, 28},
+		.flags		= UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+	},
+	[PORT_16750] = {
+		.name		= "TI16750",
+		.fifo_size	= 64,
+		.tx_loadsz	= 64,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+				  UART_FCR7_64BYTE,
+		.rxtrig_bytes	= {1, 16, 32, 56},
+		.flags		= UART_CAP_FIFO | UART_CAP_SLEEP | UART_CAP_AFE,
+	},
+	[PORT_STARTECH] = {
+		.name		= "Startech",
+		.fifo_size	= 1,
+		.tx_loadsz	= 1,
+	},
+	[PORT_16C950] = {
+		.name		= "16C950/954",
+		.fifo_size	= 128,
+		.tx_loadsz	= 128,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+		/* UART_CAP_EFR breaks billionon CF bluetooth card. */
+		.flags		= UART_CAP_FIFO | UART_CAP_SLEEP,
+	},
+	[PORT_16654] = {
+		.name		= "ST16654",
+		.fifo_size	= 64,
+		.tx_loadsz	= 32,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+				  UART_FCR_T_TRIG_10,
+		.rxtrig_bytes	= {8, 16, 56, 60},
+		.flags		= UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+	},
+	[PORT_16850] = {
+		.name		= "XR16850",
+		.fifo_size	= 128,
+		.tx_loadsz	= 128,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+		.flags		= UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+	},
+	[PORT_RSA] = {
+		.name		= "RSA",
+		.fifo_size	= 2048,
+		.tx_loadsz	= 2048,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11,
+		.flags		= UART_CAP_FIFO,
+	},
+	[PORT_NS16550A] = {
+		.name		= "NS16550A",
+		.fifo_size	= 16,
+		.tx_loadsz	= 16,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+		.flags		= UART_CAP_FIFO | UART_NATSEMI,
+	},
+	[PORT_XSCALE] = {
+		.name		= "XScale",
+		.fifo_size	= 32,
+		.tx_loadsz	= 32,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+		.flags		= UART_CAP_FIFO | UART_CAP_UUE | UART_CAP_RTOIE,
+	},
+	[PORT_OCTEON] = {
+		.name		= "OCTEON",
+		.fifo_size	= 64,
+		.tx_loadsz	= 64,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+		.flags		= UART_CAP_FIFO,
+	},
+	[PORT_AR7] = {
+		.name		= "AR7",
+		.fifo_size	= 16,
+		.tx_loadsz	= 16,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
+		.flags		= UART_CAP_FIFO | UART_CAP_AFE,
+	},
+	[PORT_U6_16550A] = {
+		.name		= "U6_16550A",
+		.fifo_size	= 64,
+		.tx_loadsz	= 64,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+		.flags		= UART_CAP_FIFO | UART_CAP_AFE,
+	},
+	[PORT_TEGRA] = {
+		.name		= "Tegra",
+		.fifo_size	= 32,
+		.tx_loadsz	= 8,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01 |
+				  UART_FCR_T_TRIG_01,
+		.rxtrig_bytes	= {1, 4, 8, 14},
+		.flags		= UART_CAP_FIFO | UART_CAP_RTOIE,
+	},
+	[PORT_XR17D15X] = {
+		.name		= "XR17D15X",
+		.fifo_size	= 64,
+		.tx_loadsz	= 64,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+		.flags		= UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
+				  UART_CAP_SLEEP,
+	},
+	[PORT_XR17V35X] = {
+		.name		= "XR17V35X",
+		.fifo_size	= 256,
+		.tx_loadsz	= 256,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_11 |
+				  UART_FCR_T_TRIG_11,
+		.flags		= UART_CAP_FIFO | UART_CAP_AFE | UART_CAP_EFR |
+				  UART_CAP_SLEEP,
+	},
+	[PORT_LPC3220] = {
+		.name		= "LPC3220",
+		.fifo_size	= 64,
+		.tx_loadsz	= 32,
+		.fcr		= UART_FCR_DMA_SELECT | UART_FCR_ENABLE_FIFO |
+				  UART_FCR_R_TRIG_00 | UART_FCR_T_TRIG_00,
+		.flags		= UART_CAP_FIFO,
+	},
+	[PORT_BRCM_TRUMANAGE] = {
+		.name		= "TruManage",
+		.fifo_size	= 1,
+		.tx_loadsz	= 1024,
+		.flags		= UART_CAP_HFIFO,
+	},
+	[PORT_8250_CIR] = {
+		.name		= "CIR port"
+	},
+	[PORT_ALTR_16550_F32] = {
+		.name		= "Altera 16550 FIFO32",
+		.fifo_size	= 32,
+		.tx_loadsz	= 32,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+		.flags		= UART_CAP_FIFO | UART_CAP_AFE,
+	},
+	[PORT_ALTR_16550_F64] = {
+		.name		= "Altera 16550 FIFO64",
+		.fifo_size	= 64,
+		.tx_loadsz	= 64,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+		.flags		= UART_CAP_FIFO | UART_CAP_AFE,
+	},
+	[PORT_ALTR_16550_F128] = {
+		.name		= "Altera 16550 FIFO128",
+		.fifo_size	= 128,
+		.tx_loadsz	= 128,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
+		.flags		= UART_CAP_FIFO | UART_CAP_AFE,
+	},
+/* tx_loadsz is set to 63-bytes instead of 64-bytes to implement
+workaround of errata A-008006 which states that tx_loadsz should  be
+configured less than Maximum supported fifo bytes */
+	[PORT_16550A_FSL64] = {
+		.name		= "16550A_FSL64",
+		.fifo_size	= 64,
+		.tx_loadsz	= 63,
+		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10 |
+				  UART_FCR7_64BYTE,
+		.flags		= UART_CAP_FIFO,
+	},
+};
+
+/* Uart divisor latch read */
+static int default_serial_dl_read(struct uart_8250_port *up)
+{
+	return serial_in(up, UART_DLL) | serial_in(up, UART_DLM) << 8;
+}
+
+/* Uart divisor latch write */
+static void default_serial_dl_write(struct uart_8250_port *up, int value)
+{
+	serial_out(up, UART_DLL, value & 0xff);
+	serial_out(up, UART_DLM, value >> 8 & 0xff);
+}
+
+#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
+
+/* Au1x00/RT288x UART hardware has a weird register layout */
+static const s8 au_io_in_map[8] = {
+	 0,	/* UART_RX  */
+	 2,	/* UART_IER */
+	 3,	/* UART_IIR */
+	 5,	/* UART_LCR */
+	 6,	/* UART_MCR */
+	 7,	/* UART_LSR */
+	 8,	/* UART_MSR */
+	-1,	/* UART_SCR (unmapped) */
+};
+
+static const s8 au_io_out_map[8] = {
+	 1,	/* UART_TX  */
+	 2,	/* UART_IER */
+	 4,	/* UART_FCR */
+	 5,	/* UART_LCR */
+	 6,	/* UART_MCR */
+	-1,	/* UART_LSR (unmapped) */
+	-1,	/* UART_MSR (unmapped) */
+	-1,	/* UART_SCR (unmapped) */
+};
+
+static unsigned int au_serial_in(struct uart_port *p, int offset)
+{
+	if (offset >= ARRAY_SIZE(au_io_in_map))
+		return UINT_MAX;
+	offset = au_io_in_map[offset];
+	if (offset < 0)
+		return UINT_MAX;
+	return __raw_readl(p->membase + (offset << p->regshift));
+}
+
+static void au_serial_out(struct uart_port *p, int offset, int value)
+{
+	if (offset >= ARRAY_SIZE(au_io_out_map))
+		return;
+	offset = au_io_out_map[offset];
+	if (offset < 0)
+		return;
+	__raw_writel(value, p->membase + (offset << p->regshift));
+}
+
+/* Au1x00 haven't got a standard divisor latch */
+static int au_serial_dl_read(struct uart_8250_port *up)
+{
+	return __raw_readl(up->port.membase + 0x28);
+}
+
+static void au_serial_dl_write(struct uart_8250_port *up, int value)
+{
+	__raw_writel(value, up->port.membase + 0x28);
+}
+
+#endif
+
+static unsigned int hub6_serial_in(struct uart_port *p, int offset)
+{
+	offset = offset << p->regshift;
+	outb(p->hub6 - 1 + offset, p->iobase);
+	return inb(p->iobase + 1);
+}
+
+static void hub6_serial_out(struct uart_port *p, int offset, int value)
+{
+	offset = offset << p->regshift;
+	outb(p->hub6 - 1 + offset, p->iobase);
+	outb(value, p->iobase + 1);
+}
+
+static unsigned int mem_serial_in(struct uart_port *p, int offset)
+{
+	offset = offset << p->regshift;
+	return readb(p->membase + offset);
+}
+
+static void mem_serial_out(struct uart_port *p, int offset, int value)
+{
+	offset = offset << p->regshift;
+	writeb(value, p->membase + offset);
+}
+
+static void mem32_serial_out(struct uart_port *p, int offset, int value)
+{
+	offset = offset << p->regshift;
+	writel(value, p->membase + offset);
+}
+
+static unsigned int mem32_serial_in(struct uart_port *p, int offset)
+{
+	offset = offset << p->regshift;
+	return readl(p->membase + offset);
+}
+
+static void mem32be_serial_out(struct uart_port *p, int offset, int value)
+{
+	offset = offset << p->regshift;
+	iowrite32be(value, p->membase + offset);
+}
+
+static unsigned int mem32be_serial_in(struct uart_port *p, int offset)
+{
+	offset = offset << p->regshift;
+	return ioread32be(p->membase + offset);
+}
+
+static unsigned int io_serial_in(struct uart_port *p, int offset)
+{
+	offset = offset << p->regshift;
+	return inb(p->iobase + offset);
+}
+
+static void io_serial_out(struct uart_port *p, int offset, int value)
+{
+	offset = offset << p->regshift;
+	outb(value, p->iobase + offset);
+}
+
+static int serial8250_default_handle_irq(struct uart_port *port);
+static int exar_handle_irq(struct uart_port *port);
+
+static void set_io_from_upio(struct uart_port *p)
+{
+	struct uart_8250_port *up = up_to_u8250p(p);
+
+	up->dl_read = default_serial_dl_read;
+	up->dl_write = default_serial_dl_write;
+
+	switch (p->iotype) {
+	case UPIO_HUB6:
+		p->serial_in = hub6_serial_in;
+		p->serial_out = hub6_serial_out;
+		break;
+
+	case UPIO_MEM:
+		p->serial_in = mem_serial_in;
+		p->serial_out = mem_serial_out;
+		break;
+
+	case UPIO_MEM32:
+		p->serial_in = mem32_serial_in;
+		p->serial_out = mem32_serial_out;
+		break;
+
+	case UPIO_MEM32BE:
+		p->serial_in = mem32be_serial_in;
+		p->serial_out = mem32be_serial_out;
+		break;
+
+#if defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_SERIAL_8250_RT288X)
+	case UPIO_AU:
+		p->serial_in = au_serial_in;
+		p->serial_out = au_serial_out;
+		up->dl_read = au_serial_dl_read;
+		up->dl_write = au_serial_dl_write;
+		break;
+#endif
+
+	default:
+		p->serial_in = io_serial_in;
+		p->serial_out = io_serial_out;
+		break;
+	}
+	/* Remember loaded iotype */
+	up->cur_iotype = p->iotype;
+	p->handle_irq = serial8250_default_handle_irq;
+}
+
+static void
+serial_port_out_sync(struct uart_port *p, int offset, int value)
+{
+	switch (p->iotype) {
+	case UPIO_MEM:
+	case UPIO_MEM32:
+	case UPIO_MEM32BE:
+	case UPIO_AU:
+		p->serial_out(p, offset, value);
+		p->serial_in(p, UART_LCR);	/* safe, no side-effects */
+		break;
+	default:
+		p->serial_out(p, offset, value);
+	}
+}
+
+/*
+ * For the 16C950
+ */
+static void serial_icr_write(struct uart_8250_port *up, int offset, int value)
+{
+	serial_out(up, UART_SCR, offset);
+	serial_out(up, UART_ICR, value);
+}
+
+static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
+{
+	unsigned int value;
+
+	serial_icr_write(up, UART_ACR, up->acr | UART_ACR_ICRRD);
+	serial_out(up, UART_SCR, offset);
+	value = serial_in(up, UART_ICR);
+	serial_icr_write(up, UART_ACR, up->acr);
+
+	return value;
+}
+
+/*
+ * FIFO support.
+ */
+static void serial8250_clear_fifos(struct uart_8250_port *p)
+{
+	if (p->capabilities & UART_CAP_FIFO) {
+		serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO);
+		serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO |
+			       UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+		serial_out(p, UART_FCR, 0);
+	}
+}
+
+void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p)
+{
+	serial8250_clear_fifos(p);
+	serial_out(p, UART_FCR, p->fcr);
+}
+EXPORT_SYMBOL_GPL(serial8250_clear_and_reinit_fifos);
+
+void serial8250_rpm_get(struct uart_8250_port *p)
+{
+	if (!(p->capabilities & UART_CAP_RPM))
+		return;
+	pm_runtime_get_sync(p->port.dev);
+}
+EXPORT_SYMBOL_GPL(serial8250_rpm_get);
+
+void serial8250_rpm_put(struct uart_8250_port *p)
+{
+	if (!(p->capabilities & UART_CAP_RPM))
+		return;
+	pm_runtime_mark_last_busy(p->port.dev);
+	pm_runtime_put_autosuspend(p->port.dev);
+}
+EXPORT_SYMBOL_GPL(serial8250_rpm_put);
+
+/*
+ * These two wrappers ensure that enable_runtime_pm_tx() can be called more than
+ * once and disable_runtime_pm_tx() will still disable RPM because the fifo is
+ * empty and the HW can idle again.
+ */
+static void serial8250_rpm_get_tx(struct uart_8250_port *p)
+{
+	unsigned char rpm_active;
+
+	if (!(p->capabilities & UART_CAP_RPM))
+		return;
+
+	rpm_active = xchg(&p->rpm_tx_active, 1);
+	if (rpm_active)
+		return;
+	pm_runtime_get_sync(p->port.dev);
+}
+
+static void serial8250_rpm_put_tx(struct uart_8250_port *p)
+{
+	unsigned char rpm_active;
+
+	if (!(p->capabilities & UART_CAP_RPM))
+		return;
+
+	rpm_active = xchg(&p->rpm_tx_active, 0);
+	if (!rpm_active)
+		return;
+	pm_runtime_mark_last_busy(p->port.dev);
+	pm_runtime_put_autosuspend(p->port.dev);
+}
+
+/*
+ * IER sleep support.  UARTs which have EFRs need the "extended
+ * capability" bit enabled.  Note that on XR16C850s, we need to
+ * reset LCR to write to IER.
+ */
+static void serial8250_set_sleep(struct uart_8250_port *p, int sleep)
+{
+	unsigned char lcr = 0, efr = 0;
+	/*
+	 * Exar UARTs have a SLEEP register that enables or disables
+	 * each UART to enter sleep mode separately.  On the XR17V35x the
+	 * register is accessible to each UART at the UART_EXAR_SLEEP
+	 * offset but the UART channel may only write to the corresponding
+	 * bit.
+	 */
+	serial8250_rpm_get(p);
+	if ((p->port.type == PORT_XR17V35X) ||
+	   (p->port.type == PORT_XR17D15X)) {
+		serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0);
+		goto out;
+	}
+
+	if (p->capabilities & UART_CAP_SLEEP) {
+		if (p->capabilities & UART_CAP_EFR) {
+			lcr = serial_in(p, UART_LCR);
+			efr = serial_in(p, UART_EFR);
+			serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
+			serial_out(p, UART_EFR, UART_EFR_ECB);
+			serial_out(p, UART_LCR, 0);
+		}
+		serial_out(p, UART_IER, sleep ? UART_IERX_SLEEP : 0);
+		if (p->capabilities & UART_CAP_EFR) {
+			serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
+			serial_out(p, UART_EFR, efr);
+			serial_out(p, UART_LCR, lcr);
+		}
+	}
+out:
+	serial8250_rpm_put(p);
+}
+
+#ifdef CONFIG_SERIAL_8250_RSA
+/*
+ * Attempts to turn on the RSA FIFO.  Returns zero on failure.
+ * We set the port uart clock rate if we succeed.
+ */
+static int __enable_rsa(struct uart_8250_port *up)
+{
+	unsigned char mode;
+	int result;
+
+	mode = serial_in(up, UART_RSA_MSR);
+	result = mode & UART_RSA_MSR_FIFO;
+
+	if (!result) {
+		serial_out(up, UART_RSA_MSR, mode | UART_RSA_MSR_FIFO);
+		mode = serial_in(up, UART_RSA_MSR);
+		result = mode & UART_RSA_MSR_FIFO;
+	}
+
+	if (result)
+		up->port.uartclk = SERIAL_RSA_BAUD_BASE * 16;
+
+	return result;
+}
+
+static void enable_rsa(struct uart_8250_port *up)
+{
+	if (up->port.type == PORT_RSA) {
+		if (up->port.uartclk != SERIAL_RSA_BAUD_BASE * 16) {
+			spin_lock_irq(&up->port.lock);
+			__enable_rsa(up);
+			spin_unlock_irq(&up->port.lock);
+		}
+		if (up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16)
+			serial_out(up, UART_RSA_FRR, 0);
+	}
+}
+
+/*
+ * Attempts to turn off the RSA FIFO.  Returns zero on failure.
+ * It is unknown why interrupts were disabled in here.  However,
+ * the caller is expected to preserve this behaviour by grabbing
+ * the spinlock before calling this function.
+ */
+static void disable_rsa(struct uart_8250_port *up)
+{
+	unsigned char mode;
+	int result;
+
+	if (up->port.type == PORT_RSA &&
+	    up->port.uartclk == SERIAL_RSA_BAUD_BASE * 16) {
+		spin_lock_irq(&up->port.lock);
+
+		mode = serial_in(up, UART_RSA_MSR);
+		result = !(mode & UART_RSA_MSR_FIFO);
+
+		if (!result) {
+			serial_out(up, UART_RSA_MSR, mode & ~UART_RSA_MSR_FIFO);
+			mode = serial_in(up, UART_RSA_MSR);
+			result = !(mode & UART_RSA_MSR_FIFO);
+		}
+
+		if (result)
+			up->port.uartclk = SERIAL_RSA_BAUD_BASE_LO * 16;
+		spin_unlock_irq(&up->port.lock);
+	}
+}
+#endif /* CONFIG_SERIAL_8250_RSA */
+
+/*
+ * This is a quickie test to see how big the FIFO is.
+ * It doesn't work at all the time, more's the pity.
+ */
+static int size_fifo(struct uart_8250_port *up)
+{
+	unsigned char old_fcr, old_mcr, old_lcr;
+	unsigned short old_dl;
+	int count;
+
+	old_lcr = serial_in(up, UART_LCR);
+	serial_out(up, UART_LCR, 0);
+	old_fcr = serial_in(up, UART_FCR);
+	old_mcr = serial_in(up, UART_MCR);
+	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+		    UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+	serial_out(up, UART_MCR, UART_MCR_LOOP);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+	old_dl = serial_dl_read(up);
+	serial_dl_write(up, 0x0001);
+	serial_out(up, UART_LCR, 0x03);
+	for (count = 0; count < 256; count++)
+		serial_out(up, UART_TX, count);
+	mdelay(20);/* FIXME - schedule_timeout */
+	for (count = 0; (serial_in(up, UART_LSR) & UART_LSR_DR) &&
+	     (count < 256); count++)
+		serial_in(up, UART_RX);
+	serial_out(up, UART_FCR, old_fcr);
+	serial_out(up, UART_MCR, old_mcr);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+	serial_dl_write(up, old_dl);
+	serial_out(up, UART_LCR, old_lcr);
+
+	return count;
+}
+
+/*
+ * Read UART ID using the divisor method - set DLL and DLM to zero
+ * and the revision will be in DLL and device type in DLM.  We
+ * preserve the device state across this.
+ */
+static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
+{
+	unsigned char old_dll, old_dlm, old_lcr;
+	unsigned int id;
+
+	old_lcr = serial_in(p, UART_LCR);
+	serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
+
+	old_dll = serial_in(p, UART_DLL);
+	old_dlm = serial_in(p, UART_DLM);
+
+	serial_out(p, UART_DLL, 0);
+	serial_out(p, UART_DLM, 0);
+
+	id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
+
+	serial_out(p, UART_DLL, old_dll);
+	serial_out(p, UART_DLM, old_dlm);
+	serial_out(p, UART_LCR, old_lcr);
+
+	return id;
+}
+
+/*
+ * This is a helper routine to autodetect StarTech/Exar/Oxsemi UART's.
+ * When this function is called we know it is at least a StarTech
+ * 16650 V2, but it might be one of several StarTech UARTs, or one of
+ * its clones.  (We treat the broken original StarTech 16650 V1 as a
+ * 16550, and why not?  Startech doesn't seem to even acknowledge its
+ * existence.)
+ *
+ * What evil have men's minds wrought...
+ */
+static void autoconfig_has_efr(struct uart_8250_port *up)
+{
+	unsigned int id1, id2, id3, rev;
+
+	/*
+	 * Everything with an EFR has SLEEP
+	 */
+	up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
+
+	/*
+	 * First we check to see if it's an Oxford Semiconductor UART.
+	 *
+	 * If we have to do this here because some non-National
+	 * Semiconductor clone chips lock up if you try writing to the
+	 * LSR register (which serial_icr_read does)
+	 */
+
+	/*
+	 * Check for Oxford Semiconductor 16C950.
+	 *
+	 * EFR [4] must be set else this test fails.
+	 *
+	 * This shouldn't be necessary, but Mike Hudson (Exoray@isys.ca)
+	 * claims that it's needed for 952 dual UART's (which are not
+	 * recommended for new designs).
+	 */
+	up->acr = 0;
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+	serial_out(up, UART_EFR, UART_EFR_ECB);
+	serial_out(up, UART_LCR, 0x00);
+	id1 = serial_icr_read(up, UART_ID1);
+	id2 = serial_icr_read(up, UART_ID2);
+	id3 = serial_icr_read(up, UART_ID3);
+	rev = serial_icr_read(up, UART_REV);
+
+	DEBUG_AUTOCONF("950id=%02x:%02x:%02x:%02x ", id1, id2, id3, rev);
+
+	if (id1 == 0x16 && id2 == 0xC9 &&
+	    (id3 == 0x50 || id3 == 0x52 || id3 == 0x54)) {
+		up->port.type = PORT_16C950;
+
+		/*
+		 * Enable work around for the Oxford Semiconductor 952 rev B
+		 * chip which causes it to seriously miscalculate baud rates
+		 * when DLL is 0.
+		 */
+		if (id3 == 0x52 && rev == 0x01)
+			up->bugs |= UART_BUG_QUOT;
+		return;
+	}
+
+	/*
+	 * We check for a XR16C850 by setting DLL and DLM to 0, and then
+	 * reading back DLL and DLM.  The chip type depends on the DLM
+	 * value read back:
+	 *  0x10 - XR16C850 and the DLL contains the chip revision.
+	 *  0x12 - XR16C2850.
+	 *  0x14 - XR16C854.
+	 */
+	id1 = autoconfig_read_divisor_id(up);
+	DEBUG_AUTOCONF("850id=%04x ", id1);
+
+	id2 = id1 >> 8;
+	if (id2 == 0x10 || id2 == 0x12 || id2 == 0x14) {
+		up->port.type = PORT_16850;
+		return;
+	}
+
+	/*
+	 * It wasn't an XR16C850.
+	 *
+	 * We distinguish between the '654 and the '650 by counting
+	 * how many bytes are in the FIFO.  I'm using this for now,
+	 * since that's the technique that was sent to me in the
+	 * serial driver update, but I'm not convinced this works.
+	 * I've had problems doing this in the past.  -TYT
+	 */
+	if (size_fifo(up) == 64)
+		up->port.type = PORT_16654;
+	else
+		up->port.type = PORT_16650V2;
+}
+
+/*
+ * We detected a chip without a FIFO.  Only two fall into
+ * this category - the original 8250 and the 16450.  The
+ * 16450 has a scratch register (accessible with LCR=0)
+ */
+static void autoconfig_8250(struct uart_8250_port *up)
+{
+	unsigned char scratch, status1, status2;
+
+	up->port.type = PORT_8250;
+
+	scratch = serial_in(up, UART_SCR);
+	serial_out(up, UART_SCR, 0xa5);
+	status1 = serial_in(up, UART_SCR);
+	serial_out(up, UART_SCR, 0x5a);
+	status2 = serial_in(up, UART_SCR);
+	serial_out(up, UART_SCR, scratch);
+
+	if (status1 == 0xa5 && status2 == 0x5a)
+		up->port.type = PORT_16450;
+}
+
+static int broken_efr(struct uart_8250_port *up)
+{
+	/*
+	 * Exar ST16C2550 "A2" devices incorrectly detect as
+	 * having an EFR, and report an ID of 0x0201.  See
+	 * http://linux.derkeiler.com/Mailing-Lists/Kernel/2004-11/4812.html
+	 */
+	if (autoconfig_read_divisor_id(up) == 0x0201 && size_fifo(up) == 16)
+		return 1;
+
+	return 0;
+}
+
+/*
+ * We know that the chip has FIFOs.  Does it have an EFR?  The
+ * EFR is located in the same register position as the IIR and
+ * we know the top two bits of the IIR are currently set.  The
+ * EFR should contain zero.  Try to read the EFR.
+ */
+static void autoconfig_16550a(struct uart_8250_port *up)
+{
+	unsigned char status1, status2;
+	unsigned int iersave;
+
+	up->port.type = PORT_16550A;
+	up->capabilities |= UART_CAP_FIFO;
+
+	/*
+	 * XR17V35x UARTs have an extra divisor register, DLD
+	 * that gets enabled with when DLAB is set which will
+	 * cause the device to incorrectly match and assign
+	 * port type to PORT_16650.  The EFR for this UART is
+	 * found at offset 0x09. Instead check the Deice ID (DVID)
+	 * register for a 2, 4 or 8 port UART.
+	 */
+	if (up->port.flags & UPF_EXAR_EFR) {
+		status1 = serial_in(up, UART_EXAR_DVID);
+		if (status1 == 0x82 || status1 == 0x84 || status1 == 0x88) {
+			DEBUG_AUTOCONF("Exar XR17V35x ");
+			up->port.type = PORT_XR17V35X;
+			up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
+						UART_CAP_SLEEP;
+
+			return;
+		}
+
+	}
+
+	/*
+	 * Check for presence of the EFR when DLAB is set.
+	 * Only ST16C650V1 UARTs pass this test.
+	 */
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+	if (serial_in(up, UART_EFR) == 0) {
+		serial_out(up, UART_EFR, 0xA8);
+		if (serial_in(up, UART_EFR) != 0) {
+			DEBUG_AUTOCONF("EFRv1 ");
+			up->port.type = PORT_16650;
+			up->capabilities |= UART_CAP_EFR | UART_CAP_SLEEP;
+		} else {
+			serial_out(up, UART_LCR, 0);
+			serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
+				   UART_FCR7_64BYTE);
+			status1 = serial_in(up, UART_IIR) >> 5;
+			serial_out(up, UART_FCR, 0);
+			serial_out(up, UART_LCR, 0);
+
+			if (status1 == 7)
+				up->port.type = PORT_16550A_FSL64;
+			else
+				DEBUG_AUTOCONF("Motorola 8xxx DUART ");
+		}
+		serial_out(up, UART_EFR, 0);
+		return;
+	}
+
+	/*
+	 * Maybe it requires 0xbf to be written to the LCR.
+	 * (other ST16C650V2 UARTs, TI16C752A, etc)
+	 */
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+	if (serial_in(up, UART_EFR) == 0 && !broken_efr(up)) {
+		DEBUG_AUTOCONF("EFRv2 ");
+		autoconfig_has_efr(up);
+		return;
+	}
+
+	/*
+	 * Check for a National Semiconductor SuperIO chip.
+	 * Attempt to switch to bank 2, read the value of the LOOP bit
+	 * from EXCR1. Switch back to bank 0, change it in MCR. Then
+	 * switch back to bank 2, read it from EXCR1 again and check
+	 * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
+	 */
+	serial_out(up, UART_LCR, 0);
+	status1 = serial_in(up, UART_MCR);
+	serial_out(up, UART_LCR, 0xE0);
+	status2 = serial_in(up, 0x02); /* EXCR1 */
+
+	if (!((status2 ^ status1) & UART_MCR_LOOP)) {
+		serial_out(up, UART_LCR, 0);
+		serial_out(up, UART_MCR, status1 ^ UART_MCR_LOOP);
+		serial_out(up, UART_LCR, 0xE0);
+		status2 = serial_in(up, 0x02); /* EXCR1 */
+		serial_out(up, UART_LCR, 0);
+		serial_out(up, UART_MCR, status1);
+
+		if ((status2 ^ status1) & UART_MCR_LOOP) {
+			unsigned short quot;
+
+			serial_out(up, UART_LCR, 0xE0);
+
+			quot = serial_dl_read(up);
+			quot <<= 3;
+
+			if (ns16550a_goto_highspeed(up))
+				serial_dl_write(up, quot);
+
+			serial_out(up, UART_LCR, 0);
+
+			up->port.uartclk = 921600*16;
+			up->port.type = PORT_NS16550A;
+			up->capabilities |= UART_NATSEMI;
+			return;
+		}
+	}
+
+	/*
+	 * No EFR.  Try to detect a TI16750, which only sets bit 5 of
+	 * the IIR when 64 byte FIFO mode is enabled when DLAB is set.
+	 * Try setting it with and without DLAB set.  Cheap clones
+	 * set bit 5 without DLAB set.
+	 */
+	serial_out(up, UART_LCR, 0);
+	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
+	status1 = serial_in(up, UART_IIR) >> 5;
+	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
+	status2 = serial_in(up, UART_IIR) >> 5;
+	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+	serial_out(up, UART_LCR, 0);
+
+	DEBUG_AUTOCONF("iir1=%d iir2=%d ", status1, status2);
+
+	if (status1 == 6 && status2 == 7) {
+		up->port.type = PORT_16750;
+		up->capabilities |= UART_CAP_AFE | UART_CAP_SLEEP;
+		return;
+	}
+
+	/*
+	 * Try writing and reading the UART_IER_UUE bit (b6).
+	 * If it works, this is probably one of the Xscale platform's
+	 * internal UARTs.
+	 * We're going to explicitly set the UUE bit to 0 before
+	 * trying to write and read a 1 just to make sure it's not
+	 * already a 1 and maybe locked there before we even start start.
+	 */
+	iersave = serial_in(up, UART_IER);
+	serial_out(up, UART_IER, iersave & ~UART_IER_UUE);
+	if (!(serial_in(up, UART_IER) & UART_IER_UUE)) {
+		/*
+		 * OK it's in a known zero state, try writing and reading
+		 * without disturbing the current state of the other bits.
+		 */
+		serial_out(up, UART_IER, iersave | UART_IER_UUE);
+		if (serial_in(up, UART_IER) & UART_IER_UUE) {
+			/*
+			 * It's an Xscale.
+			 * We'll leave the UART_IER_UUE bit set to 1 (enabled).
+			 */
+			DEBUG_AUTOCONF("Xscale ");
+			up->port.type = PORT_XSCALE;
+			up->capabilities |= UART_CAP_UUE | UART_CAP_RTOIE;
+			return;
+		}
+	} else {
+		/*
+		 * If we got here we couldn't force the IER_UUE bit to 0.
+		 * Log it and continue.
+		 */
+		DEBUG_AUTOCONF("Couldn't force IER_UUE to 0 ");
+	}
+	serial_out(up, UART_IER, iersave);
+
+	/*
+	 * Exar uarts have EFR in a weird location
+	 */
+	if (up->port.flags & UPF_EXAR_EFR) {
+		DEBUG_AUTOCONF("Exar XR17D15x ");
+		up->port.type = PORT_XR17D15X;
+		up->capabilities |= UART_CAP_AFE | UART_CAP_EFR |
+				    UART_CAP_SLEEP;
+
+		return;
+	}
+
+	/*
+	 * We distinguish between 16550A and U6 16550A by counting
+	 * how many bytes are in the FIFO.
+	 */
+	if (up->port.type == PORT_16550A && size_fifo(up) == 64) {
+		up->port.type = PORT_U6_16550A;
+		up->capabilities |= UART_CAP_AFE;
+	}
+}
+
+/*
+ * This routine is called by rs_init() to initialize a specific serial
+ * port.  It determines what type of UART chip this serial port is
+ * using: 8250, 16450, 16550, 16550A.  The important question is
+ * whether or not this UART is a 16550A or not, since this will
+ * determine whether or not we can use its FIFO features or not.
+ */
+static void autoconfig(struct uart_8250_port *up)
+{
+	unsigned char status1, scratch, scratch2, scratch3;
+	unsigned char save_lcr, save_mcr;
+	struct uart_port *port = &up->port;
+	unsigned long flags;
+	unsigned int old_capabilities;
+
+	if (!port->iobase && !port->mapbase && !port->membase)
+		return;
+
+	DEBUG_AUTOCONF("ttyS%d: autoconf (0x%04lx, 0x%p): ",
+		       serial_index(port), port->iobase, port->membase);
+
+	/*
+	 * We really do need global IRQs disabled here - we're going to
+	 * be frobbing the chips IRQ enable register to see if it exists.
+	 */
+	spin_lock_irqsave(&port->lock, flags);
+
+	up->capabilities = 0;
+	up->bugs = 0;
+
+	if (!(port->flags & UPF_BUGGY_UART)) {
+		/*
+		 * Do a simple existence test first; if we fail this,
+		 * there's no point trying anything else.
+		 *
+		 * 0x80 is used as a nonsense port to prevent against
+		 * false positives due to ISA bus float.  The
+		 * assumption is that 0x80 is a non-existent port;
+		 * which should be safe since include/asm/io.h also
+		 * makes this assumption.
+		 *
+		 * Note: this is safe as long as MCR bit 4 is clear
+		 * and the device is in "PC" mode.
+		 */
+		scratch = serial_in(up, UART_IER);
+		serial_out(up, UART_IER, 0);
+#ifdef __i386__
+		outb(0xff, 0x080);
+#endif
+		/*
+		 * Mask out IER[7:4] bits for test as some UARTs (e.g. TL
+		 * 16C754B) allow only to modify them if an EFR bit is set.
+		 */
+		scratch2 = serial_in(up, UART_IER) & 0x0f;
+		serial_out(up, UART_IER, 0x0F);
+#ifdef __i386__
+		outb(0, 0x080);
+#endif
+		scratch3 = serial_in(up, UART_IER) & 0x0f;
+		serial_out(up, UART_IER, scratch);
+		if (scratch2 != 0 || scratch3 != 0x0F) {
+			/*
+			 * We failed; there's nothing here
+			 */
+			spin_unlock_irqrestore(&port->lock, flags);
+			DEBUG_AUTOCONF("IER test failed (%02x, %02x) ",
+				       scratch2, scratch3);
+			goto out;
+		}
+	}
+
+	save_mcr = serial_in(up, UART_MCR);
+	save_lcr = serial_in(up, UART_LCR);
+
+	/*
+	 * Check to see if a UART is really there.  Certain broken
+	 * internal modems based on the Rockwell chipset fail this
+	 * test, because they apparently don't implement the loopback
+	 * test mode.  So this test is skipped on the COM 1 through
+	 * COM 4 ports.  This *should* be safe, since no board
+	 * manufacturer would be stupid enough to design a board
+	 * that conflicts with COM 1-4 --- we hope!
+	 */
+	if (!(port->flags & UPF_SKIP_TEST)) {
+		serial_out(up, UART_MCR, UART_MCR_LOOP | 0x0A);
+		status1 = serial_in(up, UART_MSR) & 0xF0;
+		serial_out(up, UART_MCR, save_mcr);
+		if (status1 != 0x90) {
+			spin_unlock_irqrestore(&port->lock, flags);
+			DEBUG_AUTOCONF("LOOP test failed (%02x) ",
+				       status1);
+			goto out;
+		}
+	}
+
+	/*
+	 * We're pretty sure there's a port here.  Lets find out what
+	 * type of port it is.  The IIR top two bits allows us to find
+	 * out if it's 8250 or 16450, 16550, 16550A or later.  This
+	 * determines what we test for next.
+	 *
+	 * We also initialise the EFR (if any) to zero for later.  The
+	 * EFR occupies the same register location as the FCR and IIR.
+	 */
+	serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+	serial_out(up, UART_EFR, 0);
+	serial_out(up, UART_LCR, 0);
+
+	serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+	scratch = serial_in(up, UART_IIR) >> 6;
+
+	switch (scratch) {
+	case 0:
+		autoconfig_8250(up);
+		break;
+	case 1:
+		port->type = PORT_UNKNOWN;
+		break;
+	case 2:
+		port->type = PORT_16550;
+		break;
+	case 3:
+		autoconfig_16550a(up);
+		break;
+	}
+
+#ifdef CONFIG_SERIAL_8250_RSA
+	/*
+	 * Only probe for RSA ports if we got the region.
+	 */
+	if (port->type == PORT_16550A && up->probe & UART_PROBE_RSA &&
+	    __enable_rsa(up))
+		port->type = PORT_RSA;
+#endif
+
+	serial_out(up, UART_LCR, save_lcr);
+
+	port->fifosize = uart_config[up->port.type].fifo_size;
+	old_capabilities = up->capabilities;
+	up->capabilities = uart_config[port->type].flags;
+	up->tx_loadsz = uart_config[port->type].tx_loadsz;
+
+	if (port->type == PORT_UNKNOWN)
+		goto out_lock;
+
+	/*
+	 * Reset the UART.
+	 */
+#ifdef CONFIG_SERIAL_8250_RSA
+	if (port->type == PORT_RSA)
+		serial_out(up, UART_RSA_FRR, 0);
+#endif
+	serial_out(up, UART_MCR, save_mcr);
+	serial8250_clear_fifos(up);
+	serial_in(up, UART_RX);
+	if (up->capabilities & UART_CAP_UUE)
+		serial_out(up, UART_IER, UART_IER_UUE);
+	else
+		serial_out(up, UART_IER, 0);
+
+out_lock:
+	spin_unlock_irqrestore(&port->lock, flags);
+	if (up->capabilities != old_capabilities) {
+		printk(KERN_WARNING
+		       "ttyS%d: detected caps %08x should be %08x\n",
+		       serial_index(port), old_capabilities,
+		       up->capabilities);
+	}
+out:
+	DEBUG_AUTOCONF("iir=%d ", scratch);
+	DEBUG_AUTOCONF("type=%s\n", uart_config[port->type].name);
+}
+
+static void autoconfig_irq(struct uart_8250_port *up)
+{
+	struct uart_port *port = &up->port;
+	unsigned char save_mcr, save_ier;
+	unsigned char save_ICP = 0;
+	unsigned int ICP = 0;
+	unsigned long irqs;
+	int irq;
+
+	if (port->flags & UPF_FOURPORT) {
+		ICP = (port->iobase & 0xfe0) | 0x1f;
+		save_ICP = inb_p(ICP);
+		outb_p(0x80, ICP);
+		inb_p(ICP);
+	}
+
+	/* forget possible initially masked and pending IRQ */
+	probe_irq_off(probe_irq_on());
+	save_mcr = serial_in(up, UART_MCR);
+	save_ier = serial_in(up, UART_IER);
+	serial_out(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2);
+
+	irqs = probe_irq_on();
+	serial_out(up, UART_MCR, 0);
+	udelay(10);
+	if (port->flags & UPF_FOURPORT) {
+		serial_out(up, UART_MCR,
+			    UART_MCR_DTR | UART_MCR_RTS);
+	} else {
+		serial_out(up, UART_MCR,
+			    UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
+	}
+	serial_out(up, UART_IER, 0x0f);	/* enable all intrs */
+	serial_in(up, UART_LSR);
+	serial_in(up, UART_RX);
+	serial_in(up, UART_IIR);
+	serial_in(up, UART_MSR);
+	serial_out(up, UART_TX, 0xFF);
+	udelay(20);
+	irq = probe_irq_off(irqs);
+
+	serial_out(up, UART_MCR, save_mcr);
+	serial_out(up, UART_IER, save_ier);
+
+	if (port->flags & UPF_FOURPORT)
+		outb_p(save_ICP, ICP);
+
+	port->irq = (irq > 0) ? irq : 0;
+}
+
+static inline void __stop_tx(struct uart_8250_port *p)
+{
+	if (p->ier & UART_IER_THRI) {
+		p->ier &= ~UART_IER_THRI;
+		serial_out(p, UART_IER, p->ier);
+		serial8250_rpm_put_tx(p);
+	}
+}
+
+static void serial8250_stop_tx(struct uart_port *port)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+
+	serial8250_rpm_get(up);
+	__stop_tx(up);
+
+	/*
+	 * We really want to stop the transmitter from sending.
+	 */
+	if (port->type == PORT_16C950) {
+		up->acr |= UART_ACR_TXDIS;
+		serial_icr_write(up, UART_ACR, up->acr);
+	}
+	serial8250_rpm_put(up);
+}
+
+static void serial8250_start_tx(struct uart_port *port)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+
+	serial8250_rpm_get_tx(up);
+
+	if (up->dma && !up->dma->tx_dma(up))
+		return;
+
+	if (!(up->ier & UART_IER_THRI)) {
+		up->ier |= UART_IER_THRI;
+		serial_port_out(port, UART_IER, up->ier);
+
+		if (up->bugs & UART_BUG_TXEN) {
+			unsigned char lsr;
+			lsr = serial_in(up, UART_LSR);
+			up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+			if (lsr & UART_LSR_THRE)
+				serial8250_tx_chars(up);
+		}
+	}
+
+	/*
+	 * Re-enable the transmitter if we disabled it.
+	 */
+	if (port->type == PORT_16C950 && up->acr & UART_ACR_TXDIS) {
+		up->acr &= ~UART_ACR_TXDIS;
+		serial_icr_write(up, UART_ACR, up->acr);
+	}
+}
+
+static void serial8250_throttle(struct uart_port *port)
+{
+	port->throttle(port);
+}
+
+static void serial8250_unthrottle(struct uart_port *port)
+{
+	port->unthrottle(port);
+}
+
+static void serial8250_stop_rx(struct uart_port *port)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+
+	serial8250_rpm_get(up);
+
+	up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
+	up->port.read_status_mask &= ~UART_LSR_DR;
+	serial_port_out(port, UART_IER, up->ier);
+
+	serial8250_rpm_put(up);
+}
+
+static void serial8250_disable_ms(struct uart_port *port)
+{
+	struct uart_8250_port *up =
+		container_of(port, struct uart_8250_port, port);
+
+	/* no MSR capabilities */
+	if (up->bugs & UART_BUG_NOMSR)
+		return;
+
+	up->ier &= ~UART_IER_MSI;
+	serial_port_out(port, UART_IER, up->ier);
+}
+
+static void serial8250_enable_ms(struct uart_port *port)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+
+	/* no MSR capabilities */
+	if (up->bugs & UART_BUG_NOMSR)
+		return;
+
+	up->ier |= UART_IER_MSI;
+
+	serial8250_rpm_get(up);
+	serial_port_out(port, UART_IER, up->ier);
+	serial8250_rpm_put(up);
+}
+
+/*
+ * serial8250_rx_chars: processes according to the passed in LSR
+ * value, and returns the remaining LSR bits not handled
+ * by this Rx routine.
+ */
+unsigned char
+serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
+{
+	struct uart_port *port = &up->port;
+	unsigned char ch;
+	int max_count = 256;
+	char flag;
+
+	do {
+		if (likely(lsr & UART_LSR_DR))
+			ch = serial_in(up, UART_RX);
+		else
+			/*
+			 * Intel 82571 has a Serial Over Lan device that will
+			 * set UART_LSR_BI without setting UART_LSR_DR when
+			 * it receives a break. To avoid reading from the
+			 * receive buffer without UART_LSR_DR bit set, we
+			 * just force the read character to be 0
+			 */
+			ch = 0;
+
+		flag = TTY_NORMAL;
+		port->icount.rx++;
+
+		lsr |= up->lsr_saved_flags;
+		up->lsr_saved_flags = 0;
+
+		if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
+			if (lsr & UART_LSR_BI) {
+				lsr &= ~(UART_LSR_FE | UART_LSR_PE);
+				port->icount.brk++;
+				/*
+				 * We do the SysRQ and SAK checking
+				 * here because otherwise the break
+				 * may get masked by ignore_status_mask
+				 * or read_status_mask.
+				 */
+				if (uart_handle_break(port))
+					goto ignore_char;
+			} else if (lsr & UART_LSR_PE)
+				port->icount.parity++;
+			else if (lsr & UART_LSR_FE)
+				port->icount.frame++;
+			if (lsr & UART_LSR_OE)
+				port->icount.overrun++;
+
+			/*
+			 * Mask off conditions which should be ignored.
+			 */
+			lsr &= port->read_status_mask;
+
+			if (lsr & UART_LSR_BI) {
+				DEBUG_INTR("handling break....");
+				flag = TTY_BREAK;
+			} else if (lsr & UART_LSR_PE)
+				flag = TTY_PARITY;
+			else if (lsr & UART_LSR_FE)
+				flag = TTY_FRAME;
+		}
+		if (uart_handle_sysrq_char(port, ch))
+			goto ignore_char;
+
+		uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
+
+ignore_char:
+		lsr = serial_in(up, UART_LSR);
+	} while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (--max_count > 0));
+	spin_unlock(&port->lock);
+	tty_flip_buffer_push(&port->state->port);
+	spin_lock(&port->lock);
+	return lsr;
+}
+EXPORT_SYMBOL_GPL(serial8250_rx_chars);
+
+void serial8250_tx_chars(struct uart_8250_port *up)
+{
+	struct uart_port *port = &up->port;
+	struct circ_buf *xmit = &port->state->xmit;
+	int count;
+
+	if (port->x_char) {
+		serial_out(up, UART_TX, port->x_char);
+		port->icount.tx++;
+		port->x_char = 0;
+		return;
+	}
+	if (uart_tx_stopped(port)) {
+		serial8250_stop_tx(port);
+		return;
+	}
+	if (uart_circ_empty(xmit)) {
+		__stop_tx(up);
+		return;
+	}
+
+	count = up->tx_loadsz;
+	do {
+		serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+		port->icount.tx++;
+		if (uart_circ_empty(xmit))
+			break;
+		if (up->capabilities & UART_CAP_HFIFO) {
+			if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) !=
+			    BOTH_EMPTY)
+				break;
+		}
+	} while (--count > 0);
+
+	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+		uart_write_wakeup(port);
+
+	DEBUG_INTR("THRE...");
+
+	/*
+	 * With RPM enabled, we have to wait until the FIFO is empty before the
+	 * HW can go idle. So we get here once again with empty FIFO and disable
+	 * the interrupt and RPM in __stop_tx()
+	 */
+	if (uart_circ_empty(xmit) && !(up->capabilities & UART_CAP_RPM))
+		__stop_tx(up);
+}
+EXPORT_SYMBOL_GPL(serial8250_tx_chars);
+
+/* Caller holds uart port lock */
+unsigned int serial8250_modem_status(struct uart_8250_port *up)
+{
+	struct uart_port *port = &up->port;
+	unsigned int status = serial_in(up, UART_MSR);
+
+	status |= up->msr_saved_flags;
+	up->msr_saved_flags = 0;
+	if (status & UART_MSR_ANY_DELTA && up->ier & UART_IER_MSI &&
+	    port->state != NULL) {
+		if (status & UART_MSR_TERI)
+			port->icount.rng++;
+		if (status & UART_MSR_DDSR)
+			port->icount.dsr++;
+		if (status & UART_MSR_DDCD)
+			uart_handle_dcd_change(port, status & UART_MSR_DCD);
+		if (status & UART_MSR_DCTS)
+			uart_handle_cts_change(port, status & UART_MSR_CTS);
+
+		wake_up_interruptible(&port->state->port.delta_msr_wait);
+	}
+
+	return status;
+}
+EXPORT_SYMBOL_GPL(serial8250_modem_status);
+
+/*
+ * This handles the interrupt from one port.
+ */
+int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+{
+	unsigned char status;
+	unsigned long flags;
+	struct uart_8250_port *up = up_to_u8250p(port);
+	int dma_err = 0;
+
+	if (iir & UART_IIR_NO_INT)
+		return 0;
+
+	spin_lock_irqsave(&port->lock, flags);
+
+	status = serial_port_in(port, UART_LSR);
+
+	DEBUG_INTR("status = %x...", status);
+
+	if (status & (UART_LSR_DR | UART_LSR_BI)) {
+		if (up->dma)
+			dma_err = up->dma->rx_dma(up, iir);
+
+		if (!up->dma || dma_err)
+			status = serial8250_rx_chars(up, status);
+	}
+	serial8250_modem_status(up);
+	if ((!up->dma || (up->dma && up->dma->tx_err)) &&
+	    (status & UART_LSR_THRE))
+		serial8250_tx_chars(up);
+
+	spin_unlock_irqrestore(&port->lock, flags);
+	return 1;
+}
+EXPORT_SYMBOL_GPL(serial8250_handle_irq);
+
+static int serial8250_default_handle_irq(struct uart_port *port)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+	unsigned int iir;
+	int ret;
+
+	serial8250_rpm_get(up);
+
+	iir = serial_port_in(port, UART_IIR);
+	ret = serial8250_handle_irq(port, iir);
+
+	serial8250_rpm_put(up);
+	return ret;
+}
+
+/*
+ * These Exar UARTs have an extra interrupt indicator that could
+ * fire for a few unimplemented interrupts.  One of which is a
+ * wakeup event when coming out of sleep.  Put this here just
+ * to be on the safe side that these interrupts don't go unhandled.
+ */
+static int exar_handle_irq(struct uart_port *port)
+{
+	unsigned char int0, int1, int2, int3;
+	unsigned int iir = serial_port_in(port, UART_IIR);
+	int ret;
+
+	ret = serial8250_handle_irq(port, iir);
+
+	if ((port->type == PORT_XR17V35X) ||
+	   (port->type == PORT_XR17D15X)) {
+		int0 = serial_port_in(port, 0x80);
+		int1 = serial_port_in(port, 0x81);
+		int2 = serial_port_in(port, 0x82);
+		int3 = serial_port_in(port, 0x83);
+	}
+
+	return ret;
+}
+
+static unsigned int serial8250_tx_empty(struct uart_port *port)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+	unsigned long flags;
+	unsigned int lsr;
+
+	serial8250_rpm_get(up);
+
+	spin_lock_irqsave(&port->lock, flags);
+	lsr = serial_port_in(port, UART_LSR);
+	up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	serial8250_rpm_put(up);
+
+	return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int serial8250_get_mctrl(struct uart_port *port)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+	unsigned int status;
+	unsigned int ret;
+
+	serial8250_rpm_get(up);
+	status = serial8250_modem_status(up);
+	serial8250_rpm_put(up);
+
+	ret = 0;
+	if (status & UART_MSR_DCD)
+		ret |= TIOCM_CAR;
+	if (status & UART_MSR_RI)
+		ret |= TIOCM_RNG;
+	if (status & UART_MSR_DSR)
+		ret |= TIOCM_DSR;
+	if (status & UART_MSR_CTS)
+		ret |= TIOCM_CTS;
+	return ret;
+}
+
+void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+	unsigned char mcr = 0;
+
+	if (mctrl & TIOCM_RTS)
+		mcr |= UART_MCR_RTS;
+	if (mctrl & TIOCM_DTR)
+		mcr |= UART_MCR_DTR;
+	if (mctrl & TIOCM_OUT1)
+		mcr |= UART_MCR_OUT1;
+	if (mctrl & TIOCM_OUT2)
+		mcr |= UART_MCR_OUT2;
+	if (mctrl & TIOCM_LOOP)
+		mcr |= UART_MCR_LOOP;
+
+	mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
+
+	serial_port_out(port, UART_MCR, mcr);
+}
+EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl);
+
+static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+	if (port->set_mctrl)
+		port->set_mctrl(port, mctrl);
+	else
+		serial8250_do_set_mctrl(port, mctrl);
+}
+
+static void serial8250_break_ctl(struct uart_port *port, int break_state)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+	unsigned long flags;
+
+	serial8250_rpm_get(up);
+	spin_lock_irqsave(&port->lock, flags);
+	if (break_state == -1)
+		up->lcr |= UART_LCR_SBC;
+	else
+		up->lcr &= ~UART_LCR_SBC;
+	serial_port_out(port, UART_LCR, up->lcr);
+	spin_unlock_irqrestore(&port->lock, flags);
+	serial8250_rpm_put(up);
+}
+
+/*
+ *	Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct uart_8250_port *up, int bits)
+{
+	unsigned int status, tmout = 10000;
+
+	/* Wait up to 10ms for the character(s) to be sent. */
+	for (;;) {
+		status = serial_in(up, UART_LSR);
+
+		up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
+
+		if ((status & bits) == bits)
+			break;
+		if (--tmout == 0)
+			break;
+		udelay(1);
+	}
+
+	/* Wait up to 1s for flow control if necessary */
+	if (up->port.flags & UPF_CONS_FLOW) {
+		unsigned int tmout;
+		for (tmout = 1000000; tmout; tmout--) {
+			unsigned int msr = serial_in(up, UART_MSR);
+			up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
+			if (msr & UART_MSR_CTS)
+				break;
+			udelay(1);
+			touch_nmi_watchdog();
+		}
+	}
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context.
+ */
+
+static int serial8250_get_poll_char(struct uart_port *port)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+	unsigned char lsr;
+	int status;
+
+	serial8250_rpm_get(up);
+
+	lsr = serial_port_in(port, UART_LSR);
+
+	if (!(lsr & UART_LSR_DR)) {
+		status = NO_POLL_CHAR;
+		goto out;
+	}
+
+	status = serial_port_in(port, UART_RX);
+out:
+	serial8250_rpm_put(up);
+	return status;
+}
+
+
+static void serial8250_put_poll_char(struct uart_port *port,
+			 unsigned char c)
+{
+	unsigned int ier;
+	struct uart_8250_port *up = up_to_u8250p(port);
+
+	serial8250_rpm_get(up);
+	/*
+	 *	First save the IER then disable the interrupts
+	 */
+	ier = serial_port_in(port, UART_IER);
+	if (up->capabilities & UART_CAP_UUE)
+		serial_port_out(port, UART_IER, UART_IER_UUE);
+	else
+		serial_port_out(port, UART_IER, 0);
+
+	wait_for_xmitr(up, BOTH_EMPTY);
+	/*
+	 *	Send the character out.
+	 */
+	serial_port_out(port, UART_TX, c);
+
+	/*
+	 *	Finally, wait for transmitter to become empty
+	 *	and restore the IER
+	 */
+	wait_for_xmitr(up, BOTH_EMPTY);
+	serial_port_out(port, UART_IER, ier);
+	serial8250_rpm_put(up);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
+int serial8250_do_startup(struct uart_port *port)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+	unsigned long flags;
+	unsigned char lsr, iir;
+	int retval;
+
+	if (port->type == PORT_8250_CIR)
+		return -ENODEV;
+
+	if (!port->fifosize)
+		port->fifosize = uart_config[port->type].fifo_size;
+	if (!up->tx_loadsz)
+		up->tx_loadsz = uart_config[port->type].tx_loadsz;
+	if (!up->capabilities)
+		up->capabilities = uart_config[port->type].flags;
+	up->mcr = 0;
+
+	if (port->iotype != up->cur_iotype)
+		set_io_from_upio(port);
+
+	serial8250_rpm_get(up);
+	if (port->type == PORT_16C950) {
+		/* Wake up and initialize UART */
+		up->acr = 0;
+		serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
+		serial_port_out(port, UART_EFR, UART_EFR_ECB);
+		serial_port_out(port, UART_IER, 0);
+		serial_port_out(port, UART_LCR, 0);
+		serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
+		serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
+		serial_port_out(port, UART_EFR, UART_EFR_ECB);
+		serial_port_out(port, UART_LCR, 0);
+	}
+
+#ifdef CONFIG_SERIAL_8250_RSA
+	/*
+	 * If this is an RSA port, see if we can kick it up to the
+	 * higher speed clock.
+	 */
+	enable_rsa(up);
+#endif
+
+	if (port->type == PORT_XR17V35X) {
+		/*
+		 * First enable access to IER [7:5], ISR [5:4], FCR [5:4],
+		 * MCR [7:5] and MSR [7:0]
+		 */
+		serial_port_out(port, UART_XR_EFR, UART_EFR_ECB);
+
+		/*
+		 * Make sure all interrups are masked until initialization is
+		 * complete and the FIFOs are cleared
+		 */
+		serial_port_out(port, UART_IER, 0);
+	}
+
+	/*
+	 * Clear the FIFO buffers and disable them.
+	 * (they will be reenabled in set_termios())
+	 */
+	serial8250_clear_fifos(up);
+
+	/*
+	 * Clear the interrupt registers.
+	 */
+	serial_port_in(port, UART_LSR);
+	serial_port_in(port, UART_RX);
+	serial_port_in(port, UART_IIR);
+	serial_port_in(port, UART_MSR);
+
+	/*
+	 * At this point, there's no way the LSR could still be 0xff;
+	 * if it is, then bail out, because there's likely no UART
+	 * here.
+	 */
+	if (!(port->flags & UPF_BUGGY_UART) &&
+	    (serial_port_in(port, UART_LSR) == 0xff)) {
+		printk_ratelimited(KERN_INFO "ttyS%d: LSR safety check engaged!\n",
+				   serial_index(port));
+		retval = -ENODEV;
+		goto out;
+	}
+
+	/*
+	 * For a XR16C850, we need to set the trigger levels
+	 */
+	if (port->type == PORT_16850) {
+		unsigned char fctr;
+
+		serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+		fctr = serial_in(up, UART_FCTR) & ~(UART_FCTR_RX|UART_FCTR_TX);
+		serial_port_out(port, UART_FCTR,
+				fctr | UART_FCTR_TRGD | UART_FCTR_RX);
+		serial_port_out(port, UART_TRG, UART_TRG_96);
+		serial_port_out(port, UART_FCTR,
+				fctr | UART_FCTR_TRGD | UART_FCTR_TX);
+		serial_port_out(port, UART_TRG, UART_TRG_96);
+
+		serial_port_out(port, UART_LCR, 0);
+	}
+
+	if (port->irq) {
+		unsigned char iir1;
+		/*
+		 * Test for UARTs that do not reassert THRE when the
+		 * transmitter is idle and the interrupt has already
+		 * been cleared.  Real 16550s should always reassert
+		 * this interrupt whenever the transmitter is idle and
+		 * the interrupt is enabled.  Delays are necessary to
+		 * allow register changes to become visible.
+		 */
+		spin_lock_irqsave(&port->lock, flags);
+		if (up->port.irqflags & IRQF_SHARED)
+			disable_irq_nosync(port->irq);
+
+		wait_for_xmitr(up, UART_LSR_THRE);
+		serial_port_out_sync(port, UART_IER, UART_IER_THRI);
+		udelay(1); /* allow THRE to set */
+		iir1 = serial_port_in(port, UART_IIR);
+		serial_port_out(port, UART_IER, 0);
+		serial_port_out_sync(port, UART_IER, UART_IER_THRI);
+		udelay(1); /* allow a working UART time to re-assert THRE */
+		iir = serial_port_in(port, UART_IIR);
+		serial_port_out(port, UART_IER, 0);
+
+		if (port->irqflags & IRQF_SHARED)
+			enable_irq(port->irq);
+		spin_unlock_irqrestore(&port->lock, flags);
+
+		/*
+		 * If the interrupt is not reasserted, or we otherwise
+		 * don't trust the iir, setup a timer to kick the UART
+		 * on a regular basis.
+		 */
+		if ((!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) ||
+		    up->port.flags & UPF_BUG_THRE) {
+			up->bugs |= UART_BUG_THRE;
+		}
+	}
+
+	retval = up->ops->setup_irq(up);
+	if (retval)
+		goto out;
+
+	/*
+	 * Now, initialize the UART
+	 */
+	serial_port_out(port, UART_LCR, UART_LCR_WLEN8);
+
+	spin_lock_irqsave(&port->lock, flags);
+	if (up->port.flags & UPF_FOURPORT) {
+		if (!up->port.irq)
+			up->port.mctrl |= TIOCM_OUT1;
+	} else
+		/*
+		 * Most PC uarts need OUT2 raised to enable interrupts.
+		 */
+		if (port->irq)
+			up->port.mctrl |= TIOCM_OUT2;
+
+	serial8250_set_mctrl(port, port->mctrl);
+
+	/* Serial over Lan (SoL) hack:
+	   Intel 8257x Gigabit ethernet chips have a
+	   16550 emulation, to be used for Serial Over Lan.
+	   Those chips take a longer time than a normal
+	   serial device to signalize that a transmission
+	   data was queued. Due to that, the above test generally
+	   fails. One solution would be to delay the reading of
+	   iir. However, this is not reliable, since the timeout
+	   is variable. So, let's just don't test if we receive
+	   TX irq. This way, we'll never enable UART_BUG_TXEN.
+	 */
+	if (up->port.flags & UPF_NO_TXEN_TEST)
+		goto dont_test_tx_en;
+
+	/*
+	 * Do a quick test to see if we receive an
+	 * interrupt when we enable the TX irq.
+	 */
+	serial_port_out(port, UART_IER, UART_IER_THRI);
+	lsr = serial_port_in(port, UART_LSR);
+	iir = serial_port_in(port, UART_IIR);
+	serial_port_out(port, UART_IER, 0);
+
+	if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
+		if (!(up->bugs & UART_BUG_TXEN)) {
+			up->bugs |= UART_BUG_TXEN;
+			pr_debug("ttyS%d - enabling bad tx status workarounds\n",
+				 serial_index(port));
+		}
+	} else {
+		up->bugs &= ~UART_BUG_TXEN;
+	}
+
+dont_test_tx_en:
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	/*
+	 * Clear the interrupt registers again for luck, and clear the
+	 * saved flags to avoid getting false values from polling
+	 * routines or the previous session.
+	 */
+	serial_port_in(port, UART_LSR);
+	serial_port_in(port, UART_RX);
+	serial_port_in(port, UART_IIR);
+	serial_port_in(port, UART_MSR);
+	up->lsr_saved_flags = 0;
+	up->msr_saved_flags = 0;
+
+	/*
+	 * Request DMA channels for both RX and TX.
+	 */
+	if (up->dma) {
+		retval = serial8250_request_dma(up);
+		if (retval) {
+			pr_warn_ratelimited("ttyS%d - failed to request DMA\n",
+					    serial_index(port));
+			up->dma = NULL;
+		}
+	}
+
+	/*
+	 * Set the IER shadow for rx interrupts but defer actual interrupt
+	 * enable until after the FIFOs are enabled; otherwise, an already-
+	 * active sender can swamp the interrupt handler with "too much work".
+	 */
+	up->ier = UART_IER_RLSI | UART_IER_RDI;
+
+	if (port->flags & UPF_FOURPORT) {
+		unsigned int icp;
+		/*
+		 * Enable interrupts on the AST Fourport board
+		 */
+		icp = (port->iobase & 0xfe0) | 0x01f;
+		outb_p(0x80, icp);
+		inb_p(icp);
+	}
+	retval = 0;
+out:
+	serial8250_rpm_put(up);
+	return retval;
+}
+EXPORT_SYMBOL_GPL(serial8250_do_startup);
+
+static int serial8250_startup(struct uart_port *port)
+{
+	if (port->startup)
+		return port->startup(port);
+	return serial8250_do_startup(port);
+}
+
+void serial8250_do_shutdown(struct uart_port *port)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+	unsigned long flags;
+
+	serial8250_rpm_get(up);
+	/*
+	 * Disable interrupts from this port
+	 */
+	up->ier = 0;
+	serial_port_out(port, UART_IER, 0);
+
+	if (up->dma)
+		serial8250_release_dma(up);
+
+	spin_lock_irqsave(&port->lock, flags);
+	if (port->flags & UPF_FOURPORT) {
+		/* reset interrupts on the AST Fourport board */
+		inb((port->iobase & 0xfe0) | 0x1f);
+		port->mctrl |= TIOCM_OUT1;
+	} else
+		port->mctrl &= ~TIOCM_OUT2;
+
+	serial8250_set_mctrl(port, port->mctrl);
+	spin_unlock_irqrestore(&port->lock, flags);
+
+	/*
+	 * Disable break condition and FIFOs
+	 */
+	serial_port_out(port, UART_LCR,
+			serial_port_in(port, UART_LCR) & ~UART_LCR_SBC);
+	serial8250_clear_fifos(up);
+
+#ifdef CONFIG_SERIAL_8250_RSA
+	/*
+	 * Reset the RSA board back to 115kbps compat mode.
+	 */
+	disable_rsa(up);
+#endif
+
+	/*
+	 * Read data port to reset things, and then unlink from
+	 * the IRQ chain.
+	 */
+	serial_port_in(port, UART_RX);
+	serial8250_rpm_put(up);
+
+	up->ops->release_irq(up);
+}
+EXPORT_SYMBOL_GPL(serial8250_do_shutdown);
+
+static void serial8250_shutdown(struct uart_port *port)
+{
+	if (port->shutdown)
+		port->shutdown(port);
+	else
+		serial8250_do_shutdown(port);
+}
+
+/*
+ * XR17V35x UARTs have an extra fractional divisor register (DLD)
+ * Calculate divisor with extra 4-bit fractional portion
+ */
+static unsigned int xr17v35x_get_divisor(struct uart_8250_port *up,
+					 unsigned int baud,
+					 unsigned int *frac)
+{
+	struct uart_port *port = &up->port;
+	unsigned int quot_16;
+
+	quot_16 = DIV_ROUND_CLOSEST(port->uartclk, baud);
+	*frac = quot_16 & 0x0f;
+
+	return quot_16 >> 4;
+}
+
+static unsigned int serial8250_get_divisor(struct uart_8250_port *up,
+					   unsigned int baud,
+					   unsigned int *frac)
+{
+	struct uart_port *port = &up->port;
+	unsigned int quot;
+
+	/*
+	 * Handle magic divisors for baud rates above baud_base on
+	 * SMSC SuperIO chips.
+	 *
+	 */
+	if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
+	    baud == (port->uartclk/4))
+		quot = 0x8001;
+	else if ((port->flags & UPF_MAGIC_MULTIPLIER) &&
+		 baud == (port->uartclk/8))
+		quot = 0x8002;
+	else if (up->port.type == PORT_XR17V35X)
+		quot = xr17v35x_get_divisor(up, baud, frac);
+	else
+		quot = uart_get_divisor(port, baud);
+
+	/*
+	 * Oxford Semi 952 rev B workaround
+	 */
+	if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0)
+		quot++;
+
+	return quot;
+}
+
+static unsigned char serial8250_compute_lcr(struct uart_8250_port *up,
+					    tcflag_t c_cflag)
+{
+	unsigned char cval;
+
+	switch (c_cflag & CSIZE) {
+	case CS5:
+		cval = UART_LCR_WLEN5;
+		break;
+	case CS6:
+		cval = UART_LCR_WLEN6;
+		break;
+	case CS7:
+		cval = UART_LCR_WLEN7;
+		break;
+	default:
+	case CS8:
+		cval = UART_LCR_WLEN8;
+		break;
+	}
+
+	if (c_cflag & CSTOPB)
+		cval |= UART_LCR_STOP;
+	if (c_cflag & PARENB) {
+		cval |= UART_LCR_PARITY;
+		if (up->bugs & UART_BUG_PARITY)
+			up->fifo_bug = true;
+	}
+	if (!(c_cflag & PARODD))
+		cval |= UART_LCR_EPAR;
+#ifdef CMSPAR
+	if (c_cflag & CMSPAR)
+		cval |= UART_LCR_SPAR;
+#endif
+
+	return cval;
+}
+
+static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
+			    unsigned int quot, unsigned int quot_frac)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+
+	/* Workaround to enable 115200 baud on OMAP1510 internal ports */
+	if (is_omap1510_8250(up)) {
+		if (baud == 115200) {
+			quot = 1;
+			serial_port_out(port, UART_OMAP_OSC_12M_SEL, 1);
+		} else
+			serial_port_out(port, UART_OMAP_OSC_12M_SEL, 0);
+	}
+
+	/*
+	 * For NatSemi, switch to bank 2 not bank 1, to avoid resetting EXCR2,
+	 * otherwise just set DLAB
+	 */
+	if (up->capabilities & UART_NATSEMI)
+		serial_port_out(port, UART_LCR, 0xe0);
+	else
+		serial_port_out(port, UART_LCR, up->lcr | UART_LCR_DLAB);
+
+	serial_dl_write(up, quot);
+
+	/* XR17V35x UARTs have an extra fractional divisor register (DLD) */
+	if (up->port.type == PORT_XR17V35X)
+		serial_port_out(port, 0x2, quot_frac);
+}
+
+void
+serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
+		          struct ktermios *old)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+	unsigned char cval;
+	unsigned long flags;
+	unsigned int baud, quot, frac = 0;
+
+	cval = serial8250_compute_lcr(up, termios->c_cflag);
+
+	/*
+	 * Ask the core to calculate the divisor for us.
+	 */
+	baud = uart_get_baud_rate(port, termios, old,
+				  port->uartclk / 16 / 0xffff,
+				  port->uartclk / 16);
+	quot = serial8250_get_divisor(up, baud, &frac);
+
+	/*
+	 * Ok, we're now changing the port state.  Do it with
+	 * interrupts disabled.
+	 */
+	serial8250_rpm_get(up);
+	spin_lock_irqsave(&port->lock, flags);
+
+	up->lcr = cval;					/* Save computed LCR */
+
+	if (up->capabilities & UART_CAP_FIFO && port->fifosize > 1) {
+		/* NOTE: If fifo_bug is not set, a user can set RX_trigger. */
+		if ((baud < 2400 && !up->dma) || up->fifo_bug) {
+			up->fcr &= ~UART_FCR_TRIGGER_MASK;
+			up->fcr |= UART_FCR_TRIGGER_1;
+		}
+	}
+
+	/*
+	 * MCR-based auto flow control.  When AFE is enabled, RTS will be
+	 * deasserted when the receive FIFO contains more characters than
+	 * the trigger, or the MCR RTS bit is cleared.  In the case where
+	 * the remote UART is not using CTS auto flow control, we must
+	 * have sufficient FIFO entries for the latency of the remote
+	 * UART to respond.  IOW, at least 32 bytes of FIFO.
+	 */
+	if (up->capabilities & UART_CAP_AFE && port->fifosize >= 32) {
+		up->mcr &= ~UART_MCR_AFE;
+		if (termios->c_cflag & CRTSCTS)
+			up->mcr |= UART_MCR_AFE;
+	}
+
+	/*
+	 * Update the per-port timeout.
+	 */
+	uart_update_timeout(port, termios->c_cflag, baud);
+
+	port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+	if (termios->c_iflag & INPCK)
+		port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+	if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+		port->read_status_mask |= UART_LSR_BI;
+
+	/*
+	 * Characteres to ignore
+	 */
+	port->ignore_status_mask = 0;
+	if (termios->c_iflag & IGNPAR)
+		port->ignore_status_mask |= UART_LSR_PE | UART_LSR_FE;
+	if (termios->c_iflag & IGNBRK) {
+		port->ignore_status_mask |= UART_LSR_BI;
+		/*
+		 * If we're ignoring parity and break indicators,
+		 * ignore overruns too (for real raw support).
+		 */
+		if (termios->c_iflag & IGNPAR)
+			port->ignore_status_mask |= UART_LSR_OE;
+	}
+
+	/*
+	 * ignore all characters if CREAD is not set
+	 */
+	if ((termios->c_cflag & CREAD) == 0)
+		port->ignore_status_mask |= UART_LSR_DR;
+
+	/*
+	 * CTS flow control flag and modem status interrupts
+	 */
+	up->ier &= ~UART_IER_MSI;
+	if (!(up->bugs & UART_BUG_NOMSR) &&
+			UART_ENABLE_MS(&up->port, termios->c_cflag))
+		up->ier |= UART_IER_MSI;
+	if (up->capabilities & UART_CAP_UUE)
+		up->ier |= UART_IER_UUE;
+	if (up->capabilities & UART_CAP_RTOIE)
+		up->ier |= UART_IER_RTOIE;
+
+	serial_port_out(port, UART_IER, up->ier);
+
+	if (up->capabilities & UART_CAP_EFR) {
+		unsigned char efr = 0;
+		/*
+		 * TI16C752/Startech hardware flow control.  FIXME:
+		 * - TI16C752 requires control thresholds to be set.
+		 * - UART_MCR_RTS is ineffective if auto-RTS mode is enabled.
+		 */
+		if (termios->c_cflag & CRTSCTS)
+			efr |= UART_EFR_CTS;
+
+		serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
+		if (port->flags & UPF_EXAR_EFR)
+			serial_port_out(port, UART_XR_EFR, efr);
+		else
+			serial_port_out(port, UART_EFR, efr);
+	}
+
+	serial8250_set_divisor(port, baud, quot, frac);
+
+	/*
+	 * LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
+	 * is written without DLAB set, this mode will be disabled.
+	 */
+	if (port->type == PORT_16750)
+		serial_port_out(port, UART_FCR, up->fcr);
+
+	serial_port_out(port, UART_LCR, up->lcr);	/* reset DLAB */
+	if (port->type != PORT_16750) {
+		/* emulated UARTs (Lucent Venus 167x) need two steps */
+		if (up->fcr & UART_FCR_ENABLE_FIFO)
+			serial_port_out(port, UART_FCR, UART_FCR_ENABLE_FIFO);
+		serial_port_out(port, UART_FCR, up->fcr);	/* set fcr */
+	}
+	serial8250_set_mctrl(port, port->mctrl);
+	spin_unlock_irqrestore(&port->lock, flags);
+	serial8250_rpm_put(up);
+
+	/* Don't rewrite B0 */
+	if (tty_termios_baud_rate(termios))
+		tty_termios_encode_baud_rate(termios, baud, baud);
+}
+EXPORT_SYMBOL(serial8250_do_set_termios);
+
+static void
+serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
+		       struct ktermios *old)
+{
+	if (port->set_termios)
+		port->set_termios(port, termios, old);
+	else
+		serial8250_do_set_termios(port, termios, old);
+}
+
+static void
+serial8250_set_ldisc(struct uart_port *port, struct ktermios *termios)
+{
+	if (termios->c_line == N_PPS) {
+		port->flags |= UPF_HARDPPS_CD;
+		spin_lock_irq(&port->lock);
+		serial8250_enable_ms(port);
+		spin_unlock_irq(&port->lock);
+	} else {
+		port->flags &= ~UPF_HARDPPS_CD;
+		if (!UART_ENABLE_MS(port, termios->c_cflag)) {
+			spin_lock_irq(&port->lock);
+			serial8250_disable_ms(port);
+			spin_unlock_irq(&port->lock);
+		}
+	}
+}
+
+
+void serial8250_do_pm(struct uart_port *port, unsigned int state,
+		      unsigned int oldstate)
+{
+	struct uart_8250_port *p = up_to_u8250p(port);
+
+	serial8250_set_sleep(p, state != 0);
+}
+EXPORT_SYMBOL(serial8250_do_pm);
+
+static void
+serial8250_pm(struct uart_port *port, unsigned int state,
+	      unsigned int oldstate)
+{
+	if (port->pm)
+		port->pm(port, state, oldstate);
+	else
+		serial8250_do_pm(port, state, oldstate);
+}
+
+static unsigned int serial8250_port_size(struct uart_8250_port *pt)
+{
+	if (pt->port.mapsize)
+		return pt->port.mapsize;
+	if (pt->port.iotype == UPIO_AU) {
+		if (pt->port.type == PORT_RT2880)
+			return 0x100;
+		return 0x1000;
+	}
+	if (is_omap1_8250(pt))
+		return 0x16 << pt->port.regshift;
+
+	return 8 << pt->port.regshift;
+}
+
+/*
+ * Resource handling.
+ */
+static int serial8250_request_std_resource(struct uart_8250_port *up)
+{
+	unsigned int size = serial8250_port_size(up);
+	struct uart_port *port = &up->port;
+	int ret = 0;
+
+	switch (port->iotype) {
+	case UPIO_AU:
+	case UPIO_TSI:
+	case UPIO_MEM32:
+	case UPIO_MEM32BE:
+	case UPIO_MEM:
+		if (!port->mapbase)
+			break;
+
+		if (!request_mem_region(port->mapbase, size, "serial")) {
+			ret = -EBUSY;
+			break;
+		}
+
+		if (port->flags & UPF_IOREMAP) {
+			port->membase = ioremap_nocache(port->mapbase, size);
+			if (!port->membase) {
+				release_mem_region(port->mapbase, size);
+				ret = -ENOMEM;
+			}
+		}
+		break;
+
+	case UPIO_HUB6:
+	case UPIO_PORT:
+		if (!request_region(port->iobase, size, "serial"))
+			ret = -EBUSY;
+		break;
+	}
+	return ret;
+}
+
+static void serial8250_release_std_resource(struct uart_8250_port *up)
+{
+	unsigned int size = serial8250_port_size(up);
+	struct uart_port *port = &up->port;
+
+	switch (port->iotype) {
+	case UPIO_AU:
+	case UPIO_TSI:
+	case UPIO_MEM32:
+	case UPIO_MEM32BE:
+	case UPIO_MEM:
+		if (!port->mapbase)
+			break;
+
+		if (port->flags & UPF_IOREMAP) {
+			iounmap(port->membase);
+			port->membase = NULL;
+		}
+
+		release_mem_region(port->mapbase, size);
+		break;
+
+	case UPIO_HUB6:
+	case UPIO_PORT:
+		release_region(port->iobase, size);
+		break;
+	}
+}
+
+static void serial8250_release_port(struct uart_port *port)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+
+	serial8250_release_std_resource(up);
+}
+
+static int serial8250_request_port(struct uart_port *port)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+	int ret;
+
+	if (port->type == PORT_8250_CIR)
+		return -ENODEV;
+
+	ret = serial8250_request_std_resource(up);
+
+	return ret;
+}
+
+static int fcr_get_rxtrig_bytes(struct uart_8250_port *up)
+{
+	const struct serial8250_config *conf_type = &uart_config[up->port.type];
+	unsigned char bytes;
+
+	bytes = conf_type->rxtrig_bytes[UART_FCR_R_TRIG_BITS(up->fcr)];
+
+	return bytes ? bytes : -EOPNOTSUPP;
+}
+
+static int bytes_to_fcr_rxtrig(struct uart_8250_port *up, unsigned char bytes)
+{
+	const struct serial8250_config *conf_type = &uart_config[up->port.type];
+	int i;
+
+	if (!conf_type->rxtrig_bytes[UART_FCR_R_TRIG_BITS(UART_FCR_R_TRIG_00)])
+		return -EOPNOTSUPP;
+
+	for (i = 1; i < UART_FCR_R_TRIG_MAX_STATE; i++) {
+		if (bytes < conf_type->rxtrig_bytes[i])
+			/* Use the nearest lower value */
+			return (--i) << UART_FCR_R_TRIG_SHIFT;
+	}
+
+	return UART_FCR_R_TRIG_11;
+}
+
+static int do_get_rxtrig(struct tty_port *port)
+{
+	struct uart_state *state = container_of(port, struct uart_state, port);
+	struct uart_port *uport = state->uart_port;
+	struct uart_8250_port *up =
+		container_of(uport, struct uart_8250_port, port);
+
+	if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1)
+		return -EINVAL;
+
+	return fcr_get_rxtrig_bytes(up);
+}
+
+static int do_serial8250_get_rxtrig(struct tty_port *port)
+{
+	int rxtrig_bytes;
+
+	mutex_lock(&port->mutex);
+	rxtrig_bytes = do_get_rxtrig(port);
+	mutex_unlock(&port->mutex);
+
+	return rxtrig_bytes;
+}
+
+static ssize_t serial8250_get_attr_rx_trig_bytes(struct device *dev,
+	struct device_attribute *attr, char *buf)
+{
+	struct tty_port *port = dev_get_drvdata(dev);
+	int rxtrig_bytes;
+
+	rxtrig_bytes = do_serial8250_get_rxtrig(port);
+	if (rxtrig_bytes < 0)
+		return rxtrig_bytes;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", rxtrig_bytes);
+}
+
+static int do_set_rxtrig(struct tty_port *port, unsigned char bytes)
+{
+	struct uart_state *state = container_of(port, struct uart_state, port);
+	struct uart_port *uport = state->uart_port;
+	struct uart_8250_port *up =
+		container_of(uport, struct uart_8250_port, port);
+	int rxtrig;
+
+	if (!(up->capabilities & UART_CAP_FIFO) || uport->fifosize <= 1 ||
+	    up->fifo_bug)
+		return -EINVAL;
+
+	rxtrig = bytes_to_fcr_rxtrig(up, bytes);
+	if (rxtrig < 0)
+		return rxtrig;
+
+	serial8250_clear_fifos(up);
+	up->fcr &= ~UART_FCR_TRIGGER_MASK;
+	up->fcr |= (unsigned char)rxtrig;
+	serial_out(up, UART_FCR, up->fcr);
+	return 0;
+}
+
+static int do_serial8250_set_rxtrig(struct tty_port *port, unsigned char bytes)
+{
+	int ret;
+
+	mutex_lock(&port->mutex);
+	ret = do_set_rxtrig(port, bytes);
+	mutex_unlock(&port->mutex);
+
+	return ret;
+}
+
+static ssize_t serial8250_set_attr_rx_trig_bytes(struct device *dev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct tty_port *port = dev_get_drvdata(dev);
+	unsigned char bytes;
+	int ret;
+
+	if (!count)
+		return -EINVAL;
+
+	ret = kstrtou8(buf, 10, &bytes);
+	if (ret < 0)
+		return ret;
+
+	ret = do_serial8250_set_rxtrig(port, bytes);
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static DEVICE_ATTR(rx_trig_bytes, S_IRUSR | S_IWUSR | S_IRGRP,
+		   serial8250_get_attr_rx_trig_bytes,
+		   serial8250_set_attr_rx_trig_bytes);
+
+static struct attribute *serial8250_dev_attrs[] = {
+	&dev_attr_rx_trig_bytes.attr,
+	NULL,
+	};
+
+static struct attribute_group serial8250_dev_attr_group = {
+	.attrs = serial8250_dev_attrs,
+	};
+
+static void register_dev_spec_attr_grp(struct uart_8250_port *up)
+{
+	const struct serial8250_config *conf_type = &uart_config[up->port.type];
+
+	if (conf_type->rxtrig_bytes[0])
+		up->port.attr_group = &serial8250_dev_attr_group;
+}
+
+static void serial8250_config_port(struct uart_port *port, int flags)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+	int ret;
+
+	if (port->type == PORT_8250_CIR)
+		return;
+
+	/*
+	 * Find the region that we can probe for.  This in turn
+	 * tells us whether we can probe for the type of port.
+	 */
+	ret = serial8250_request_std_resource(up);
+	if (ret < 0)
+		return;
+
+	if (port->iotype != up->cur_iotype)
+		set_io_from_upio(port);
+
+	if (flags & UART_CONFIG_TYPE)
+		autoconfig(up);
+
+	/* if access method is AU, it is a 16550 with a quirk */
+	if (port->type == PORT_16550A && port->iotype == UPIO_AU)
+		up->bugs |= UART_BUG_NOMSR;
+
+	/* HW bugs may trigger IRQ while IIR == NO_INT */
+	if (port->type == PORT_TEGRA)
+		up->bugs |= UART_BUG_NOMSR;
+
+	if (port->type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
+		autoconfig_irq(up);
+
+	if (port->type == PORT_UNKNOWN)
+		serial8250_release_std_resource(up);
+
+	/* Fixme: probably not the best place for this */
+	if ((port->type == PORT_XR17V35X) ||
+	   (port->type == PORT_XR17D15X))
+		port->handle_irq = exar_handle_irq;
+
+	register_dev_spec_attr_grp(up);
+	up->fcr = uart_config[up->port.type].fcr;
+}
+
+static int
+serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+	if (ser->irq >= nr_irqs || ser->irq < 0 ||
+	    ser->baud_base < 9600 || ser->type < PORT_UNKNOWN ||
+	    ser->type >= ARRAY_SIZE(uart_config) || ser->type == PORT_CIRRUS ||
+	    ser->type == PORT_STARTECH)
+		return -EINVAL;
+	return 0;
+}
+
+static const char *
+serial8250_type(struct uart_port *port)
+{
+	int type = port->type;
+
+	if (type >= ARRAY_SIZE(uart_config))
+		type = 0;
+	return uart_config[type].name;
+}
+
+static const struct uart_ops serial8250_pops = {
+	.tx_empty	= serial8250_tx_empty,
+	.set_mctrl	= serial8250_set_mctrl,
+	.get_mctrl	= serial8250_get_mctrl,
+	.stop_tx	= serial8250_stop_tx,
+	.start_tx	= serial8250_start_tx,
+	.throttle	= serial8250_throttle,
+	.unthrottle	= serial8250_unthrottle,
+	.stop_rx	= serial8250_stop_rx,
+	.enable_ms	= serial8250_enable_ms,
+	.break_ctl	= serial8250_break_ctl,
+	.startup	= serial8250_startup,
+	.shutdown	= serial8250_shutdown,
+	.set_termios	= serial8250_set_termios,
+	.set_ldisc	= serial8250_set_ldisc,
+	.pm		= serial8250_pm,
+	.type		= serial8250_type,
+	.release_port	= serial8250_release_port,
+	.request_port	= serial8250_request_port,
+	.config_port	= serial8250_config_port,
+	.verify_port	= serial8250_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+	.poll_get_char = serial8250_get_poll_char,
+	.poll_put_char = serial8250_put_poll_char,
+#endif
+};
+
+void serial8250_init_port(struct uart_8250_port *up)
+{
+	struct uart_port *port = &up->port;
+
+	spin_lock_init(&port->lock);
+	port->ops = &serial8250_pops;
+
+	up->cur_iotype = 0xFF;
+}
+EXPORT_SYMBOL_GPL(serial8250_init_port);
+
+void serial8250_set_defaults(struct uart_8250_port *up)
+{
+	struct uart_port *port = &up->port;
+
+	if (up->port.flags & UPF_FIXED_TYPE) {
+		unsigned int type = up->port.type;
+
+		if (!up->port.fifosize)
+			up->port.fifosize = uart_config[type].fifo_size;
+		if (!up->tx_loadsz)
+			up->tx_loadsz = uart_config[type].tx_loadsz;
+		if (!up->capabilities)
+			up->capabilities = uart_config[type].flags;
+	}
+
+	set_io_from_upio(port);
+
+	/* default dma handlers */
+	if (up->dma) {
+		if (!up->dma->tx_dma)
+			up->dma->tx_dma = serial8250_tx_dma;
+		if (!up->dma->rx_dma)
+			up->dma->rx_dma = serial8250_rx_dma;
+	}
+}
+EXPORT_SYMBOL_GPL(serial8250_set_defaults);
+
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+
+static void serial8250_console_putchar(struct uart_port *port, int ch)
+{
+	struct uart_8250_port *up = up_to_u8250p(port);
+
+	wait_for_xmitr(up, UART_LSR_THRE);
+	serial_port_out(port, UART_TX, ch);
+}
+
+/*
+ *	Print a string to the serial port trying not to disturb
+ *	any possible real use of the port...
+ *
+ *	The console_lock must be held when we get here.
+ */
+void serial8250_console_write(struct uart_8250_port *up, const char *s,
+			      unsigned int count)
+{
+	struct uart_port *port = &up->port;
+	unsigned long flags;
+	unsigned int ier;
+	int locked = 1;
+
+	touch_nmi_watchdog();
+
+	serial8250_rpm_get(up);
+
+	if (port->sysrq)
+		locked = 0;
+	else if (oops_in_progress)
+		locked = spin_trylock_irqsave(&port->lock, flags);
+	else
+		spin_lock_irqsave(&port->lock, flags);
+
+	/*
+	 *	First save the IER then disable the interrupts
+	 */
+	ier = serial_port_in(port, UART_IER);
+
+	if (up->capabilities & UART_CAP_UUE)
+		serial_port_out(port, UART_IER, UART_IER_UUE);
+	else
+		serial_port_out(port, UART_IER, 0);
+
+	/* check scratch reg to see if port powered off during system sleep */
+	if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
+		struct ktermios termios;
+		unsigned int baud, quot, frac = 0;
+
+		termios.c_cflag = port->cons->cflag;
+		if (port->state->port.tty && termios.c_cflag == 0)
+			termios.c_cflag = port->state->port.tty->termios.c_cflag;
+
+		baud = uart_get_baud_rate(port, &termios, NULL,
+					  port->uartclk / 16 / 0xffff,
+					  port->uartclk / 16);
+		quot = serial8250_get_divisor(up, baud, &frac);
+
+		serial8250_set_divisor(port, baud, quot, frac);
+		serial_port_out(port, UART_LCR, up->lcr);
+		serial_port_out(port, UART_MCR, UART_MCR_DTR | UART_MCR_RTS);
+
+		up->canary = 0;
+	}
+
+	uart_console_write(port, s, count, serial8250_console_putchar);
+
+	/*
+	 *	Finally, wait for transmitter to become empty
+	 *	and restore the IER
+	 */
+	wait_for_xmitr(up, BOTH_EMPTY);
+	serial_port_out(port, UART_IER, ier);
+
+	/*
+	 *	The receive handling will happen properly because the
+	 *	receive ready bit will still be set; it is not cleared
+	 *	on read.  However, modem control will not, we must
+	 *	call it if we have saved something in the saved flags
+	 *	while processing with interrupts off.
+	 */
+	if (up->msr_saved_flags)
+		serial8250_modem_status(up);
+
+	if (locked)
+		spin_unlock_irqrestore(&port->lock, flags);
+	serial8250_rpm_put(up);
+}
+
+static unsigned int probe_baud(struct uart_port *port)
+{
+	unsigned char lcr, dll, dlm;
+	unsigned int quot;
+
+	lcr = serial_port_in(port, UART_LCR);
+	serial_port_out(port, UART_LCR, lcr | UART_LCR_DLAB);
+	dll = serial_port_in(port, UART_DLL);
+	dlm = serial_port_in(port, UART_DLM);
+	serial_port_out(port, UART_LCR, lcr);
+
+	quot = (dlm << 8) | dll;
+	return (port->uartclk / 16) / quot;
+}
+
+int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
+{
+	int baud = 9600;
+	int bits = 8;
+	int parity = 'n';
+	int flow = 'n';
+
+	if (!port->iobase && !port->membase)
+		return -ENODEV;
+
+	if (options)
+		uart_parse_options(options, &baud, &parity, &bits, &flow);
+	else if (probe)
+		baud = probe_baud(port);
+
+	return uart_set_options(port, port->cons, baud, parity, bits, flow);
+}
+
+#endif /* CONFIG_SERIAL_8250_CONSOLE */
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index 7d79425..d11621e 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -218,6 +218,7 @@
 	ret = serial8250_register_8250_port(&up);
 	if (ret < 0) {
 		dev_err(dev, "failed to register 8250 port\n");
+		clk_disable_unprepare(priv->clk);
 		return ret;
 	}
 
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 7062959..39c6d22 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -2,10 +2,11 @@
 # Makefile for the 8250 serial device drivers.
 #
 
-obj-$(CONFIG_SERIAL_8250)		+= 8250.o
+obj-$(CONFIG_SERIAL_8250)		+= 8250.o 8250_base.o
 8250-y					:= 8250_core.o
 8250-$(CONFIG_SERIAL_8250_PNP)		+= 8250_pnp.o
-8250-$(CONFIG_SERIAL_8250_DMA)		+= 8250_dma.o
+8250_base-y				:= 8250_port.o
+8250_base-$(CONFIG_SERIAL_8250_DMA)	+= 8250_dma.o
 obj-$(CONFIG_SERIAL_8250_GSC)		+= 8250_gsc.o
 obj-$(CONFIG_SERIAL_8250_PCI)		+= 8250_pci.o
 obj-$(CONFIG_SERIAL_8250_HP300)		+= 8250_hp300.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 15b4079..ed299b9 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -47,12 +47,12 @@
 
 config SERIAL_AMBA_PL011
 	tristate "ARM AMBA PL011 serial port support"
-	depends on ARM_AMBA
+	depends on ARM_AMBA || SOC_ZX296702
 	select SERIAL_CORE
 	help
 	  This selects the ARM(R) AMBA(R) PrimeCell PL011 UART.  If you have
 	  an Integrator/PP2, Integrator/CP or Versatile platform, say Y or M
-	  here.
+	  here. Say Y or M if you have SOC_ZX296702.
 
 	  If unsure, say N.
 
@@ -594,7 +594,7 @@
 
 config SERIAL_UARTLITE
 	tristate "Xilinx uartlite serial port support"
-	depends on PPC32 || MICROBLAZE || MFD_TIMBERDALE || ARCH_ZYNQ
+	depends on HAS_IOMEM
 	select SERIAL_CORE
 	help
 	  Say Y here if you want to use the Xilinx uartlite serial controller.
@@ -1067,6 +1067,7 @@
 	bool "ETRAX FS serial port support"
 	depends on ETRAX_ARCH_V32 && OF
 	select SERIAL_CORE
+	select SERIAL_MCTRL_GPIO if GPIOLIB
 
 config SERIAL_ETRAXFS_CONSOLE
 	bool "ETRAX FS serial console support"
@@ -1376,7 +1377,8 @@
 
 config SERIAL_IFX6X60
         tristate "SPI protocol driver for Infineon 6x60 modem (EXPERIMENTAL)"
-	depends on GPIOLIB && SPI && HAS_DMA
+	depends on GPIOLIB || COMPILE_TEST
+	depends on SPI && HAS_DMA
 	help
 	  Support for the IFX6x60 modem devices on Intel MID platforms.
 
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index fd27e98..2af09ab 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -74,8 +74,13 @@
 /* There is by now at least one vendor with differing details, so handle it */
 struct vendor_data {
 	unsigned int		ifls;
+	unsigned int		fr_busy;
+	unsigned int		fr_dsr;
+	unsigned int		fr_cts;
+	unsigned int		fr_ri;
 	unsigned int		lcrh_tx;
 	unsigned int		lcrh_rx;
+	u16			*reg_lut;
 	bool			oversampling;
 	bool			dma_threshold;
 	bool			cts_event_workaround;
@@ -85,6 +90,48 @@
 	unsigned int (*get_fifosize)(struct amba_device *dev);
 };
 
+/* Max address offset of register in use is 0x48 */
+#define REG_NR		(0x48 >> 2)
+#define IDX(x)		(x >> 2)
+enum reg_idx {
+	REG_DR		= IDX(UART01x_DR),
+	REG_RSR		= IDX(UART01x_RSR),
+	REG_ST_DMAWM	= IDX(ST_UART011_DMAWM),
+	REG_FR		= IDX(UART01x_FR),
+	REG_ST_LCRH_RX  = IDX(ST_UART011_LCRH_RX),
+	REG_ILPR	= IDX(UART01x_ILPR),
+	REG_IBRD	= IDX(UART011_IBRD),
+	REG_FBRD	= IDX(UART011_FBRD),
+	REG_LCRH	= IDX(UART011_LCRH),
+	REG_CR		= IDX(UART011_CR),
+	REG_IFLS	= IDX(UART011_IFLS),
+	REG_IMSC	= IDX(UART011_IMSC),
+	REG_RIS		= IDX(UART011_RIS),
+	REG_MIS		= IDX(UART011_MIS),
+	REG_ICR		= IDX(UART011_ICR),
+	REG_DMACR	= IDX(UART011_DMACR),
+};
+
+static u16 arm_reg[] = {
+	[REG_DR]		= UART01x_DR,
+	[REG_RSR]		= UART01x_RSR,
+	[REG_ST_DMAWM]		= ~0,
+	[REG_FR]		= UART01x_FR,
+	[REG_ST_LCRH_RX]	= ~0,
+	[REG_ILPR]		= UART01x_ILPR,
+	[REG_IBRD]		= UART011_IBRD,
+	[REG_FBRD]		= UART011_FBRD,
+	[REG_LCRH]		= UART011_LCRH,
+	[REG_CR]		= UART011_CR,
+	[REG_IFLS]		= UART011_IFLS,
+	[REG_IMSC]		= UART011_IMSC,
+	[REG_RIS]		= UART011_RIS,
+	[REG_MIS]		= UART011_MIS,
+	[REG_ICR]		= UART011_ICR,
+	[REG_DMACR]		= UART011_DMACR,
+};
+
+#ifdef CONFIG_ARM_AMBA
 static unsigned int get_fifosize_arm(struct amba_device *dev)
 {
 	return amba_rev(dev) < 3 ? 16 : 32;
@@ -92,8 +139,13 @@
 
 static struct vendor_data vendor_arm = {
 	.ifls			= UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
-	.lcrh_tx		= UART011_LCRH,
-	.lcrh_rx		= UART011_LCRH,
+	.fr_busy		= UART01x_FR_BUSY,
+	.fr_dsr			= UART01x_FR_DSR,
+	.fr_cts			= UART01x_FR_CTS,
+	.fr_ri			= UART011_FR_RI,
+	.lcrh_tx		= REG_LCRH,
+	.lcrh_rx		= REG_LCRH,
+	.reg_lut		= arm_reg,
 	.oversampling		= false,
 	.dma_threshold		= false,
 	.cts_event_workaround	= false,
@@ -101,8 +153,14 @@
 	.fixed_options		= false,
 	.get_fifosize		= get_fifosize_arm,
 };
+#endif
 
 static struct vendor_data vendor_sbsa = {
+	.fr_busy		= UART01x_FR_BUSY,
+	.fr_dsr			= UART01x_FR_DSR,
+	.fr_cts			= UART01x_FR_CTS,
+	.fr_ri			= UART011_FR_RI,
+	.reg_lut		= arm_reg,
 	.oversampling		= false,
 	.dma_threshold		= false,
 	.cts_event_workaround	= false,
@@ -110,6 +168,26 @@
 	.fixed_options		= true,
 };
 
+#ifdef CONFIG_ARM_AMBA
+static u16 st_reg[] = {
+	[REG_DR]		= UART01x_DR,
+	[REG_RSR]		= UART01x_RSR,
+	[REG_ST_DMAWM]		= ST_UART011_DMAWM,
+	[REG_FR]		= UART01x_FR,
+	[REG_ST_LCRH_RX]	= ST_UART011_LCRH_RX,
+	[REG_ILPR]		= UART01x_ILPR,
+	[REG_IBRD]		= UART011_IBRD,
+	[REG_FBRD]		= UART011_FBRD,
+	[REG_LCRH]		= UART011_LCRH,
+	[REG_CR]		= UART011_CR,
+	[REG_IFLS]		= UART011_IFLS,
+	[REG_IMSC]		= UART011_IMSC,
+	[REG_RIS]		= UART011_RIS,
+	[REG_MIS]		= UART011_MIS,
+	[REG_ICR]		= UART011_ICR,
+	[REG_DMACR]		= UART011_DMACR,
+};
+
 static unsigned int get_fifosize_st(struct amba_device *dev)
 {
 	return 64;
@@ -117,8 +195,13 @@
 
 static struct vendor_data vendor_st = {
 	.ifls			= UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
-	.lcrh_tx		= ST_UART011_LCRH_TX,
-	.lcrh_rx		= ST_UART011_LCRH_RX,
+	.fr_busy		= UART01x_FR_BUSY,
+	.fr_dsr			= UART01x_FR_DSR,
+	.fr_cts			= UART01x_FR_CTS,
+	.fr_ri			= UART011_FR_RI,
+	.lcrh_tx		= REG_LCRH,
+	.lcrh_rx		= REG_ST_LCRH_RX,
+	.reg_lut		= st_reg,
 	.oversampling		= true,
 	.dma_threshold		= true,
 	.cts_event_workaround	= true,
@@ -126,6 +209,43 @@
 	.fixed_options		= false,
 	.get_fifosize		= get_fifosize_st,
 };
+#endif
+
+#ifdef CONFIG_SOC_ZX296702
+static u16 zte_reg[] = {
+	[REG_DR]		= ZX_UART01x_DR,
+	[REG_RSR]		= UART01x_RSR,
+	[REG_ST_DMAWM]		= ST_UART011_DMAWM,
+	[REG_FR]		= ZX_UART01x_FR,
+	[REG_ST_LCRH_RX]	= ST_UART011_LCRH_RX,
+	[REG_ILPR]		= UART01x_ILPR,
+	[REG_IBRD]		= UART011_IBRD,
+	[REG_FBRD]		= UART011_FBRD,
+	[REG_LCRH]		= ZX_UART011_LCRH_TX,
+	[REG_CR]		= ZX_UART011_CR,
+	[REG_IFLS]		= ZX_UART011_IFLS,
+	[REG_IMSC]		= ZX_UART011_IMSC,
+	[REG_RIS]		= ZX_UART011_RIS,
+	[REG_MIS]		= ZX_UART011_MIS,
+	[REG_ICR]		= ZX_UART011_ICR,
+	[REG_DMACR]		= ZX_UART011_DMACR,
+};
+
+static struct vendor_data vendor_zte = {
+	.ifls			= UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
+	.fr_busy		= ZX_UART01x_FR_BUSY,
+	.fr_dsr			= ZX_UART01x_FR_DSR,
+	.fr_cts			= ZX_UART01x_FR_CTS,
+	.fr_ri			= ZX_UART011_FR_RI,
+	.lcrh_tx		= REG_LCRH,
+	.lcrh_rx		= REG_ST_LCRH_RX,
+	.reg_lut		= zte_reg,
+	.oversampling		= false,
+	.dma_threshold		= false,
+	.cts_event_workaround	= false,
+	.fixed_options		= false,
+};
+#endif
 
 /* Deals with DMA transactions */
 
@@ -164,10 +284,15 @@
 	struct uart_port	port;
 	struct clk		*clk;
 	const struct vendor_data *vendor;
+	u16			*reg_lut;
 	unsigned int		dmacr;		/* dma control reg */
 	unsigned int		im;		/* interrupt mask */
 	unsigned int		old_status;
 	unsigned int		fifosize;	/* vendor-specific */
+	unsigned int		fr_busy;        /* vendor-specific */
+	unsigned int		fr_dsr;		/* vendor-specific */
+	unsigned int		fr_cts;         /* vendor-specific */
+	unsigned int		fr_ri;		/* vendor-specific */
 	unsigned int		lcrh_tx;	/* vendor-specific */
 	unsigned int		lcrh_rx;	/* vendor-specific */
 	unsigned int		old_cr;		/* state during shutdown */
@@ -184,6 +309,29 @@
 #endif
 };
 
+static bool is_implemented(struct uart_amba_port *uap, unsigned int reg)
+{
+	return uap->reg_lut[reg] != (u16)~0;
+}
+
+static unsigned int pl011_readw(struct uart_amba_port *uap, int index)
+{
+	WARN_ON(index > REG_NR);
+	return readw_relaxed(uap->port.membase + uap->reg_lut[index]);
+}
+
+static void pl011_writew(struct uart_amba_port *uap, int val, int index)
+{
+	WARN_ON(index > REG_NR);
+	writew_relaxed(val, uap->port.membase + uap->reg_lut[index]);
+}
+
+static void pl011_writeb(struct uart_amba_port *uap, u8 val, int index)
+{
+	WARN_ON(index > REG_NR);
+	writeb_relaxed(val, uap->port.membase + uap->reg_lut[index]);
+}
+
 /*
  * Reads up to 256 characters from the FIFO or until it's empty and
  * inserts them into the TTY layer. Returns the number of characters
@@ -196,12 +344,12 @@
 	int fifotaken = 0;
 
 	while (max_count--) {
-		status = readw(uap->port.membase + UART01x_FR);
+		status = pl011_readw(uap, REG_FR);
 		if (status & UART01x_FR_RXFE)
 			break;
 
 		/* Take chars from the FIFO and update status */
-		ch = readw(uap->port.membase + UART01x_DR) |
+		ch = pl011_readw(uap, REG_DR) |
 			UART_DUMMY_DR_RX;
 		flag = TTY_NORMAL;
 		uap->port.icount.rx++;
@@ -284,7 +432,7 @@
 	struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
 	struct device *dev = uap->port.dev;
 	struct dma_slave_config tx_conf = {
-		.dst_addr = uap->port.mapbase + UART01x_DR,
+		.dst_addr = uap->port.mapbase + uap->reg_lut[REG_DR],
 		.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 		.direction = DMA_MEM_TO_DEV,
 		.dst_maxburst = uap->fifosize >> 1,
@@ -339,7 +487,7 @@
 
 	if (chan) {
 		struct dma_slave_config rx_conf = {
-			.src_addr = uap->port.mapbase + UART01x_DR,
+			.src_addr = uap->port.mapbase + uap->reg_lut[REG_DR],
 			.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
 			.direction = DMA_DEV_TO_MEM,
 			.src_maxburst = uap->fifosize >> 2,
@@ -438,7 +586,7 @@
 
 	dmacr = uap->dmacr;
 	uap->dmacr = dmacr & ~UART011_TXDMAE;
-	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+	pl011_writew(uap, uap->dmacr, REG_DMACR);
 
 	/*
 	 * If TX DMA was disabled, it means that we've stopped the DMA for
@@ -552,7 +700,7 @@
 	dma_dev->device_issue_pending(chan);
 
 	uap->dmacr |= UART011_TXDMAE;
-	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+	pl011_writew(uap, uap->dmacr, REG_DMACR);
 	uap->dmatx.queued = true;
 
 	/*
@@ -588,9 +736,9 @@
 	 */
 	if (uap->dmatx.queued) {
 		uap->dmacr |= UART011_TXDMAE;
-		writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+		pl011_writew(uap, uap->dmacr, REG_DMACR);
 		uap->im &= ~UART011_TXIM;
-		writew(uap->im, uap->port.membase + UART011_IMSC);
+		pl011_writew(uap, uap->im, REG_IMSC);
 		return true;
 	}
 
@@ -600,7 +748,7 @@
 	 */
 	if (pl011_dma_tx_refill(uap) > 0) {
 		uap->im &= ~UART011_TXIM;
-		writew(uap->im, uap->port.membase + UART011_IMSC);
+		pl011_writew(uap, uap->im, REG_IMSC);
 		return true;
 	}
 	return false;
@@ -614,7 +762,7 @@
 {
 	if (uap->dmatx.queued) {
 		uap->dmacr &= ~UART011_TXDMAE;
-		writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+		pl011_writew(uap, uap->dmacr, REG_DMACR);
 	}
 }
 
@@ -640,14 +788,12 @@
 		if (!uap->dmatx.queued) {
 			if (pl011_dma_tx_refill(uap) > 0) {
 				uap->im &= ~UART011_TXIM;
-				writew(uap->im, uap->port.membase +
-				       UART011_IMSC);
+				pl011_writew(uap, uap->im, REG_IMSC);
 			} else
 				ret = false;
 		} else if (!(uap->dmacr & UART011_TXDMAE)) {
 			uap->dmacr |= UART011_TXDMAE;
-			writew(uap->dmacr,
-				       uap->port.membase + UART011_DMACR);
+			pl011_writew(uap, uap->dmacr, REG_DMACR);
 		}
 		return ret;
 	}
@@ -658,9 +804,9 @@
 	 */
 	dmacr = uap->dmacr;
 	uap->dmacr &= ~UART011_TXDMAE;
-	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+	pl011_writew(uap, uap->dmacr, REG_DMACR);
 
-	if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
+	if (pl011_readw(uap, REG_FR) & UART01x_FR_TXFF) {
 		/*
 		 * No space in the FIFO, so enable the transmit interrupt
 		 * so we know when there is space.  Note that once we've
@@ -669,13 +815,13 @@
 		return false;
 	}
 
-	writew(uap->port.x_char, uap->port.membase + UART01x_DR);
+	pl011_writew(uap, uap->port.x_char, REG_DR);
 	uap->port.icount.tx++;
 	uap->port.x_char = 0;
 
 	/* Success - restore the DMA state */
 	uap->dmacr = dmacr;
-	writew(dmacr, uap->port.membase + UART011_DMACR);
+	pl011_writew(uap, dmacr, REG_DMACR);
 
 	return true;
 }
@@ -703,7 +849,7 @@
 			     DMA_TO_DEVICE);
 		uap->dmatx.queued = false;
 		uap->dmacr &= ~UART011_TXDMAE;
-		writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+		pl011_writew(uap, uap->dmacr, REG_DMACR);
 	}
 }
 
@@ -743,11 +889,11 @@
 	dma_async_issue_pending(rxchan);
 
 	uap->dmacr |= UART011_RXDMAE;
-	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+	pl011_writew(uap, uap->dmacr, REG_DMACR);
 	uap->dmarx.running = true;
 
 	uap->im &= ~UART011_RXIM;
-	writew(uap->im, uap->port.membase + UART011_IMSC);
+	pl011_writew(uap, uap->im, REG_IMSC);
 
 	return 0;
 }
@@ -805,8 +951,9 @@
 	 */
 	if (dma_count == pending && readfifo) {
 		/* Clear any error flags */
-		writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
-		       uap->port.membase + UART011_ICR);
+		pl011_writew(uap,
+			     UART011_OEIS | UART011_BEIS | UART011_PEIS
+			     | UART011_FEIS, REG_ICR);
 
 		/*
 		 * If we read all the DMA'd characters, and we had an
@@ -854,7 +1001,7 @@
 
 	/* Disable RX DMA - incoming data will wait in the FIFO */
 	uap->dmacr &= ~UART011_RXDMAE;
-	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+	pl011_writew(uap, uap->dmacr, REG_DMACR);
 	uap->dmarx.running = false;
 
 	pending = sgbuf->sg.length - state.residue;
@@ -874,7 +1021,7 @@
 		dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
 			"fall back to interrupt mode\n");
 		uap->im |= UART011_RXIM;
-		writew(uap->im, uap->port.membase + UART011_IMSC);
+		pl011_writew(uap, uap->im, REG_IMSC);
 	}
 }
 
@@ -922,7 +1069,7 @@
 		dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
 			"fall back to interrupt mode\n");
 		uap->im |= UART011_RXIM;
-		writew(uap->im, uap->port.membase + UART011_IMSC);
+		pl011_writew(uap, uap->im, REG_IMSC);
 	}
 }
 
@@ -935,7 +1082,7 @@
 {
 	/* FIXME.  Just disable the DMA enable */
 	uap->dmacr &= ~UART011_RXDMAE;
-	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+	pl011_writew(uap, uap->dmacr, REG_DMACR);
 }
 
 /*
@@ -979,7 +1126,7 @@
 		spin_lock_irqsave(&uap->port.lock, flags);
 		pl011_dma_rx_stop(uap);
 		uap->im |= UART011_RXIM;
-		writew(uap->im, uap->port.membase + UART011_IMSC);
+		pl011_writew(uap, uap->im, REG_IMSC);
 		spin_unlock_irqrestore(&uap->port.lock, flags);
 
 		uap->dmarx.running = false;
@@ -1041,7 +1188,7 @@
 skip_rx:
 	/* Turn on DMA error (RX/TX will be enabled on demand) */
 	uap->dmacr |= UART011_DMAONERR;
-	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+	pl011_writew(uap, uap->dmacr, REG_DMACR);
 
 	/*
 	 * ST Micro variants has some specific dma burst threshold
@@ -1049,8 +1196,9 @@
 	 * be issued above/below 16 bytes.
 	 */
 	if (uap->vendor->dma_threshold)
-		writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
-			       uap->port.membase + ST_UART011_DMAWM);
+		pl011_writew(uap,
+			     ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
+			     REG_ST_DMAWM);
 
 	if (uap->using_rx_dma) {
 		if (pl011_dma_rx_trigger_dma(uap))
@@ -1075,12 +1223,12 @@
 		return;
 
 	/* Disable RX and TX DMA */
-	while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
+	while (pl011_readw(uap, REG_FR) & uap->fr_busy)
 		barrier();
 
 	spin_lock_irq(&uap->port.lock);
 	uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
-	writew(uap->dmacr, uap->port.membase + UART011_DMACR);
+	pl011_writew(uap, uap->dmacr, REG_DMACR);
 	spin_unlock_irq(&uap->port.lock);
 
 	if (uap->using_tx_dma) {
@@ -1181,7 +1329,7 @@
 	    container_of(port, struct uart_amba_port, port);
 
 	uap->im &= ~UART011_TXIM;
-	writew(uap->im, uap->port.membase + UART011_IMSC);
+	pl011_writew(uap, uap->im, REG_IMSC);
 	pl011_dma_tx_stop(uap);
 }
 
@@ -1191,7 +1339,7 @@
 static void pl011_start_tx_pio(struct uart_amba_port *uap)
 {
 	uap->im |= UART011_TXIM;
-	writew(uap->im, uap->port.membase + UART011_IMSC);
+	pl011_writew(uap, uap->im, REG_IMSC);
 	pl011_tx_chars(uap, false);
 }
 
@@ -1211,7 +1359,7 @@
 
 	uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
 		     UART011_PEIM|UART011_BEIM|UART011_OEIM);
-	writew(uap->im, uap->port.membase + UART011_IMSC);
+	pl011_writew(uap, uap->im, REG_IMSC);
 
 	pl011_dma_rx_stop(uap);
 }
@@ -1222,7 +1370,7 @@
 	    container_of(port, struct uart_amba_port, port);
 
 	uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
-	writew(uap->im, uap->port.membase + UART011_IMSC);
+	pl011_writew(uap, uap->im, REG_IMSC);
 }
 
 static void pl011_rx_chars(struct uart_amba_port *uap)
@@ -1242,7 +1390,7 @@
 			dev_dbg(uap->port.dev, "could not trigger RX DMA job "
 				"fall back to interrupt mode again\n");
 			uap->im |= UART011_RXIM;
-			writew(uap->im, uap->port.membase + UART011_IMSC);
+			pl011_writew(uap, uap->im, REG_IMSC);
 		} else {
 #ifdef CONFIG_DMA_ENGINE
 			/* Start Rx DMA poll */
@@ -1263,10 +1411,10 @@
 			  bool from_irq)
 {
 	if (unlikely(!from_irq) &&
-	    readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
+	    pl011_readw(uap, REG_FR) & UART01x_FR_TXFF)
 		return false; /* unable to transmit character */
 
-	writew(c, uap->port.membase + UART01x_DR);
+	pl011_writew(uap, c, REG_DR);
 	uap->port.icount.tx++;
 
 	return true;
@@ -1313,7 +1461,7 @@
 {
 	unsigned int status, delta;
 
-	status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
+	status = pl011_readw(uap, REG_FR) & UART01x_FR_MODEM_ANY;
 
 	delta = status ^ uap->old_status;
 	uap->old_status = status;
@@ -1324,11 +1472,11 @@
 	if (delta & UART01x_FR_DCD)
 		uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
 
-	if (delta & UART01x_FR_DSR)
+	if (delta & uap->fr_dsr)
 		uap->port.icount.dsr++;
 
-	if (delta & UART01x_FR_CTS)
-		uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
+	if (delta & uap->fr_cts)
+		uart_handle_cts_change(&uap->port, status & uap->fr_cts);
 
 	wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
 }
@@ -1341,15 +1489,15 @@
 		return;
 
 	/* workaround to make sure that all bits are unlocked.. */
-	writew(0x00, uap->port.membase + UART011_ICR);
+	pl011_writew(uap, 0x00, REG_ICR);
 
 	/*
 	 * WA: introduce 26ns(1 uart clk) delay before W1C;
 	 * single apb access will incur 2 pclk(133.12Mhz) delay,
 	 * so add 2 dummy reads
 	 */
-	dummy_read = readw(uap->port.membase + UART011_ICR);
-	dummy_read = readw(uap->port.membase + UART011_ICR);
+	dummy_read = pl011_readw(uap, REG_ICR);
+	dummy_read = pl011_readw(uap, REG_ICR);
 }
 
 static irqreturn_t pl011_int(int irq, void *dev_id)
@@ -1361,15 +1509,13 @@
 	int handled = 0;
 
 	spin_lock_irqsave(&uap->port.lock, flags);
-	imsc = readw(uap->port.membase + UART011_IMSC);
-	status = readw(uap->port.membase + UART011_RIS) & imsc;
+	imsc = pl011_readw(uap, REG_IMSC);
+	status = pl011_readw(uap, REG_RIS) & imsc;
 	if (status) {
 		do {
 			check_apply_cts_event_workaround(uap);
-
-			writew(status & ~(UART011_TXIS|UART011_RTIS|
-					  UART011_RXIS),
-			       uap->port.membase + UART011_ICR);
+			pl011_writew(uap, status & ~(UART011_TXIS|UART011_RTIS|
+				     UART011_RXIS), REG_ICR);
 
 			if (status & (UART011_RTIS|UART011_RXIS)) {
 				if (pl011_dma_rx_running(uap))
@@ -1386,7 +1532,7 @@
 			if (pass_counter-- == 0)
 				break;
 
-			status = readw(uap->port.membase + UART011_RIS) & imsc;
+			status = pl011_readw(uap, REG_RIS) & imsc;
 		} while (status != 0);
 		handled = 1;
 	}
@@ -1400,8 +1546,8 @@
 {
 	struct uart_amba_port *uap =
 	    container_of(port, struct uart_amba_port, port);
-	unsigned int status = readw(uap->port.membase + UART01x_FR);
-	return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
+	unsigned int status = pl011_readw(uap, REG_FR);
+	return status & (uap->fr_busy|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
 }
 
 static unsigned int pl011_get_mctrl(struct uart_port *port)
@@ -1409,16 +1555,16 @@
 	struct uart_amba_port *uap =
 	    container_of(port, struct uart_amba_port, port);
 	unsigned int result = 0;
-	unsigned int status = readw(uap->port.membase + UART01x_FR);
+	unsigned int status = pl011_readw(uap, REG_FR);
 
 #define TIOCMBIT(uartbit, tiocmbit)	\
 	if (status & uartbit)		\
 		result |= tiocmbit
 
 	TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
-	TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
-	TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
-	TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
+	TIOCMBIT(uap->fr_dsr, TIOCM_DSR);
+	TIOCMBIT(uap->fr_cts, TIOCM_CTS);
+	TIOCMBIT(uap->fr_ri, TIOCM_RNG);
 #undef TIOCMBIT
 	return result;
 }
@@ -1429,7 +1575,7 @@
 	    container_of(port, struct uart_amba_port, port);
 	unsigned int cr;
 
-	cr = readw(uap->port.membase + UART011_CR);
+	cr = pl011_readw(uap, REG_CR);
 
 #define	TIOCMBIT(tiocmbit, uartbit)		\
 	if (mctrl & tiocmbit)		\
@@ -1449,7 +1595,7 @@
 	}
 #undef TIOCMBIT
 
-	writew(cr, uap->port.membase + UART011_CR);
+	pl011_writew(uap, cr, REG_CR);
 }
 
 static void pl011_break_ctl(struct uart_port *port, int break_state)
@@ -1460,12 +1606,12 @@
 	unsigned int lcr_h;
 
 	spin_lock_irqsave(&uap->port.lock, flags);
-	lcr_h = readw(uap->port.membase + uap->lcrh_tx);
+	lcr_h = pl011_readw(uap, uap->lcrh_tx);
 	if (break_state == -1)
 		lcr_h |= UART01x_LCRH_BRK;
 	else
 		lcr_h &= ~UART01x_LCRH_BRK;
-	writew(lcr_h, uap->port.membase + uap->lcrh_tx);
+	pl011_writew(uap, lcr_h, uap->lcrh_tx);
 	spin_unlock_irqrestore(&uap->port.lock, flags);
 }
 
@@ -1475,9 +1621,8 @@
 {
 	struct uart_amba_port *uap =
 	    container_of(port, struct uart_amba_port, port);
-	unsigned char __iomem *regs = uap->port.membase;
 
-	writew(readw(regs + UART011_MIS), regs + UART011_ICR);
+	pl011_writew(uap, pl011_readw(uap, REG_MIS), REG_ICR);
 	/*
 	 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
 	 * we simply mask it. start_tx() will unmask it.
@@ -1491,7 +1636,7 @@
 	 * (including tx queue), so we're also fine with start_tx()'s caller
 	 * side.
 	 */
-	writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC);
+	pl011_writew(uap, pl011_readw(uap, REG_IMSC) & ~UART011_TXIM, REG_IMSC);
 }
 
 static int pl011_get_poll_char(struct uart_port *port)
@@ -1506,11 +1651,11 @@
 	 */
 	pl011_quiesce_irqs(port);
 
-	status = readw(uap->port.membase + UART01x_FR);
+	status = pl011_readw(uap, REG_FR);
 	if (status & UART01x_FR_RXFE)
 		return NO_POLL_CHAR;
 
-	return readw(uap->port.membase + UART01x_DR);
+	return pl011_readw(uap, REG_DR);
 }
 
 static void pl011_put_poll_char(struct uart_port *port,
@@ -1519,10 +1664,10 @@
 	struct uart_amba_port *uap =
 	    container_of(port, struct uart_amba_port, port);
 
-	while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
+	while (pl011_readw(uap, REG_FR) & UART01x_FR_TXFF)
 		barrier();
 
-	writew(ch, uap->port.membase + UART01x_DR);
+	pl011_writew(uap, ch, REG_DR);
 }
 
 #endif /* CONFIG_CONSOLE_POLL */
@@ -1546,15 +1691,15 @@
 	uap->port.uartclk = clk_get_rate(uap->clk);
 
 	/* Clear pending error and receive interrupts */
-	writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
-	       UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
+	pl011_writew(uap, UART011_OEIS | UART011_BEIS | UART011_PEIS |
+		     UART011_FEIS | UART011_RTIS | UART011_RXIS, REG_ICR);
 
 	/*
 	 * Save interrupts enable mask, and enable RX interrupts in case if
 	 * the interrupt is used for NMI entry.
 	 */
-	uap->im = readw(uap->port.membase + UART011_IMSC);
-	writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC);
+	uap->im = pl011_readw(uap, REG_IMSC);
+	pl011_writew(uap, UART011_RTIM | UART011_RXIM, REG_IMSC);
 
 	if (dev_get_platdata(uap->port.dev)) {
 		struct amba_pl011_data *plat;
@@ -1568,22 +1713,22 @@
 
 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
 {
-	writew(lcr_h, uap->port.membase + uap->lcrh_rx);
-	if (uap->lcrh_rx != uap->lcrh_tx) {
+	pl011_writew(uap, lcr_h, uap->lcrh_rx);
+	if (is_implemented(uap, REG_ST_LCRH_RX)) {
 		int i;
 		/*
 		 * Wait 10 PCLKs before writing LCRH_TX register,
 		 * to get this delay write read only register 10 times
 		 */
 		for (i = 0; i < 10; ++i)
-			writew(0xff, uap->port.membase + UART011_MIS);
-		writew(lcr_h, uap->port.membase + uap->lcrh_tx);
+			pl011_writew(uap, 0xff, REG_MIS);
+		pl011_writew(uap, lcr_h, uap->lcrh_tx);
 	}
 }
 
 static int pl011_allocate_irq(struct uart_amba_port *uap)
 {
-	writew(uap->im, uap->port.membase + UART011_IMSC);
+	pl011_writew(uap, uap->im, REG_IMSC);
 
 	return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
 }
@@ -1598,12 +1743,11 @@
 	spin_lock_irq(&uap->port.lock);
 
 	/* Clear out any spuriously appearing RX interrupts */
-	writew(UART011_RTIS | UART011_RXIS,
-	       uap->port.membase + UART011_ICR);
+	pl011_writew(uap, UART011_RTIS | UART011_RXIS, REG_ICR);
 	uap->im = UART011_RTIM;
 	if (!pl011_dma_rx_running(uap))
 		uap->im |= UART011_RXIM;
-	writew(uap->im, uap->port.membase + UART011_IMSC);
+	pl011_writew(uap, uap->im, REG_IMSC);
 	spin_unlock_irq(&uap->port.lock);
 }
 
@@ -1622,21 +1766,21 @@
 	if (retval)
 		goto clk_dis;
 
-	writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
+	pl011_writew(uap, uap->vendor->ifls, REG_IFLS);
 
 	spin_lock_irq(&uap->port.lock);
 
 	/* restore RTS and DTR */
 	cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
 	cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
-	writew(cr, uap->port.membase + UART011_CR);
+	pl011_writew(uap, cr, REG_CR);
 
 	spin_unlock_irq(&uap->port.lock);
 
 	/*
 	 * initialise the old status of the modem signals
 	 */
-	uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
+	uap->old_status = pl011_readw(uap, REG_FR) & UART01x_FR_MODEM_ANY;
 
 	/* Startup DMA */
 	pl011_dma_startup(uap);
@@ -1675,11 +1819,11 @@
 static void pl011_shutdown_channel(struct uart_amba_port *uap,
 					unsigned int lcrh)
 {
-      unsigned long val;
+	unsigned long val;
 
-      val = readw(uap->port.membase + lcrh);
-      val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
-      writew(val, uap->port.membase + lcrh);
+	val = pl011_readw(uap, lcrh);
+	val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
+	pl011_writew(uap, val, lcrh);
 }
 
 /*
@@ -1693,18 +1837,18 @@
 
 	uap->autorts = false;
 	spin_lock_irq(&uap->port.lock);
-	cr = readw(uap->port.membase + UART011_CR);
+	cr = pl011_readw(uap, REG_CR);
 	uap->old_cr = cr;
 	cr &= UART011_CR_RTS | UART011_CR_DTR;
 	cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
-	writew(cr, uap->port.membase + UART011_CR);
+	pl011_writew(uap, cr, REG_CR);
 	spin_unlock_irq(&uap->port.lock);
 
 	/*
 	 * disable break condition and fifos
 	 */
 	pl011_shutdown_channel(uap, uap->lcrh_rx);
-	if (uap->lcrh_rx != uap->lcrh_tx)
+	if (is_implemented(uap, REG_ST_LCRH_RX))
 		pl011_shutdown_channel(uap, uap->lcrh_tx);
 }
 
@@ -1714,8 +1858,8 @@
 
 	/* mask all interrupts and clear all pending ones */
 	uap->im = 0;
-	writew(uap->im, uap->port.membase + UART011_IMSC);
-	writew(0xffff, uap->port.membase + UART011_ICR);
+	pl011_writew(uap, uap->im, REG_IMSC);
+	pl011_writew(uap, 0xffff, REG_ICR);
 
 	spin_unlock_irq(&uap->port.lock);
 }
@@ -1867,8 +2011,8 @@
 		pl011_enable_ms(port);
 
 	/* first, disable everything */
-	old_cr = readw(port->membase + UART011_CR);
-	writew(0, port->membase + UART011_CR);
+	old_cr = pl011_readw(uap, REG_CR);
+	pl011_writew(uap, 0, REG_CR);
 
 	if (termios->c_cflag & CRTSCTS) {
 		if (old_cr & UART011_CR_RTS)
@@ -1901,17 +2045,17 @@
 			quot -= 2;
 	}
 	/* Set baud rate */
-	writew(quot & 0x3f, port->membase + UART011_FBRD);
-	writew(quot >> 6, port->membase + UART011_IBRD);
+	pl011_writew(uap, quot & 0x3f, REG_FBRD);
+	pl011_writew(uap, quot >> 6, REG_IBRD);
 
 	/*
 	 * ----------v----------v----------v----------v-----
 	 * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
-	 * UART011_FBRD & UART011_IBRD.
+	 * REG_FBRD & REG_IBRD.
 	 * ----------^----------^----------^----------^-----
 	 */
 	pl011_write_lcr_h(uap, lcr_h);
-	writew(old_cr, port->membase + UART011_CR);
+	pl011_writew(uap, old_cr, REG_CR);
 
 	spin_unlock_irqrestore(&port->lock, flags);
 }
@@ -2052,9 +2196,9 @@
 	struct uart_amba_port *uap =
 	    container_of(port, struct uart_amba_port, port);
 
-	while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
+	while (pl011_readw(uap, REG_FR) & UART01x_FR_TXFF)
 		barrier();
-	writew(ch, uap->port.membase + UART01x_DR);
+	pl011_writew(uap, ch, REG_DR);
 }
 
 static void
@@ -2079,10 +2223,10 @@
 	 *	First save the CR then disable the interrupts
 	 */
 	if (!uap->vendor->always_enabled) {
-		old_cr = readw(uap->port.membase + UART011_CR);
+		old_cr = pl011_readw(uap, REG_CR);
 		new_cr = old_cr & ~UART011_CR_CTSEN;
 		new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
-		writew(new_cr, uap->port.membase + UART011_CR);
+		pl011_writew(uap, new_cr, REG_CR);
 	}
 
 	uart_console_write(&uap->port, s, count, pl011_console_putchar);
@@ -2092,10 +2236,10 @@
 	 *	and restore the TCR
 	 */
 	do {
-		status = readw(uap->port.membase + UART01x_FR);
-	} while (status & UART01x_FR_BUSY);
+		status = pl011_readw(uap, REG_FR);
+	} while (status & uap->fr_busy);
 	if (!uap->vendor->always_enabled)
-		writew(old_cr, uap->port.membase + UART011_CR);
+		pl011_writew(uap, old_cr, REG_CR);
 
 	if (locked)
 		spin_unlock(&uap->port.lock);
@@ -2108,10 +2252,10 @@
 pl011_console_get_options(struct uart_amba_port *uap, int *baud,
 			     int *parity, int *bits)
 {
-	if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
+	if (pl011_readw(uap, REG_CR) & UART01x_CR_UARTEN) {
 		unsigned int lcr_h, ibrd, fbrd;
 
-		lcr_h = readw(uap->port.membase + uap->lcrh_tx);
+		lcr_h = pl011_readw(uap, uap->lcrh_tx);
 
 		*parity = 'n';
 		if (lcr_h & UART01x_LCRH_PEN) {
@@ -2126,13 +2270,13 @@
 		else
 			*bits = 8;
 
-		ibrd = readw(uap->port.membase + UART011_IBRD);
-		fbrd = readw(uap->port.membase + UART011_FBRD);
+		ibrd = pl011_readw(uap, REG_IBRD);
+		fbrd = pl011_readw(uap, REG_FBRD);
 
 		*baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
 
 		if (uap->vendor->oversampling) {
-			if (readw(uap->port.membase + UART011_CR)
+			if (pl011_readw(uap, REG_CR)
 				  & ST_UART011_CR_OVSFACT)
 				*baud *= 2;
 		}
@@ -2204,10 +2348,13 @@
 
 static void pl011_putc(struct uart_port *port, int c)
 {
-	while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
+	struct uart_amba_port *uap =
+	    container_of(port, struct uart_amba_port, port);
+
+	while (pl011_readw(uap, REG_FR) & UART01x_FR_TXFF)
 		;
-	writeb(c, port->membase + UART01x_DR);
-	while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
+	pl011_writeb(uap, c, REG_DR);
+	while (pl011_readw(uap, REG_FR) & uap->fr_busy)
 		;
 }
 
@@ -2334,8 +2481,8 @@
 	int ret;
 
 	/* Ensure interrupts from this UART are masked and cleared */
-	writew(0, uap->port.membase + UART011_IMSC);
-	writew(0xffff, uap->port.membase + UART011_ICR);
+	pl011_writew(uap, 0, REG_IMSC);
+	pl011_writew(uap, 0xffff, REG_ICR);
 
 	if (!amba_reg.state) {
 		ret = uart_register_driver(&amba_reg);
@@ -2353,6 +2500,7 @@
 	return ret;
 }
 
+#ifdef CONFIG_ARM_AMBA
 static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
 {
 	struct uart_amba_port *uap;
@@ -2373,8 +2521,13 @@
 		return PTR_ERR(uap->clk);
 
 	uap->vendor = vendor;
+	uap->reg_lut = vendor->reg_lut;
 	uap->lcrh_rx = vendor->lcrh_rx;
 	uap->lcrh_tx = vendor->lcrh_tx;
+	uap->fr_busy = vendor->fr_busy;
+	uap->fr_dsr = vendor->fr_dsr;
+	uap->fr_cts = vendor->fr_cts;
+	uap->fr_ri = vendor->fr_ri;
 	uap->fifosize = vendor->get_fifosize(dev);
 	uap->port.irq = dev->irq[0];
 	uap->port.ops = &amba_pl011_pops;
@@ -2398,6 +2551,67 @@
 	pl011_unregister_port(uap);
 	return 0;
 }
+#endif
+
+#ifdef CONFIG_SOC_ZX296702
+static int zx_uart_probe(struct platform_device *pdev)
+{
+	struct uart_amba_port *uap;
+	struct vendor_data *vendor = &vendor_zte;
+	struct resource *res;
+	int portnr, ret;
+
+	portnr = pl011_find_free_port();
+	if (portnr < 0)
+		return portnr;
+
+	uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
+			GFP_KERNEL);
+	if (!uap) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	uap->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(uap->clk)) {
+		ret = PTR_ERR(uap->clk);
+		goto out;
+	}
+
+	uap->vendor	= vendor;
+	uap->reg_lut	= vendor->reg_lut;
+	uap->lcrh_rx	= vendor->lcrh_rx;
+	uap->lcrh_tx	= vendor->lcrh_tx;
+	uap->fr_busy	= vendor->fr_busy;
+	uap->fr_dsr	= vendor->fr_dsr;
+	uap->fr_cts	= vendor->fr_cts;
+	uap->fr_ri	= vendor->fr_ri;
+	uap->fifosize	= 16;
+	uap->port.irq	= platform_get_irq(pdev, 0);
+	uap->port.ops	= &amba_pl011_pops;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	ret = pl011_setup_port(&pdev->dev, uap, res, portnr);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, uap);
+
+	return pl011_register_port(uap);
+out:
+	return ret;
+}
+
+static int zx_uart_remove(struct platform_device *pdev)
+{
+	struct uart_amba_port *uap = platform_get_drvdata(pdev);
+
+	uart_remove_one_port(&amba_reg, &uap->port);
+	pl011_unregister_port(uap);
+	return 0;
+}
+#endif
 
 #ifdef CONFIG_PM_SLEEP
 static int pl011_suspend(struct device *dev)
@@ -2454,6 +2668,11 @@
 		return -ENOMEM;
 
 	uap->vendor	= &vendor_sbsa;
+	uap->reg_lut	= vendor_sbsa.reg_lut;
+	uap->fr_busy	= vendor_sbsa.fr_busy;
+	uap->fr_dsr	= vendor_sbsa.fr_dsr;
+	uap->fr_cts	= vendor_sbsa.fr_cts;
+	uap->fr_ri	= vendor_sbsa.fr_ri;
 	uap->fifosize	= 32;
 	uap->port.irq	= platform_get_irq(pdev, 0);
 	uap->port.ops	= &sbsa_uart_pops;
@@ -2503,6 +2722,7 @@
 	},
 };
 
+#ifdef CONFIG_ARM_AMBA
 static struct amba_id pl011_ids[] = {
 	{
 		.id	= 0x00041011,
@@ -2528,20 +2748,57 @@
 	.probe		= pl011_probe,
 	.remove		= pl011_remove,
 };
+#endif
+
+#ifdef CONFIG_SOC_ZX296702
+static const struct of_device_id zx_uart_dt_ids[] = {
+	{ .compatible = "zte,zx296702-uart", },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, zx_uart_dt_ids);
+
+static struct platform_driver zx_uart_driver = {
+	.driver = {
+		.name	= "zx-uart",
+		.owner	= THIS_MODULE,
+		.pm	= &pl011_dev_pm_ops,
+		.of_match_table = zx_uart_dt_ids,
+	},
+	.probe		= zx_uart_probe,
+	.remove		= zx_uart_remove,
+};
+#endif
+
 
 static int __init pl011_init(void)
 {
+	int ret;
 	printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
 
 	if (platform_driver_register(&arm_sbsa_uart_platform_driver))
 		pr_warn("could not register SBSA UART platform driver\n");
-	return amba_driver_register(&pl011_driver);
+
+#ifdef CONFIG_SOC_ZX296702
+	ret = platform_driver_register(&zx_uart_driver);
+	if (ret)
+		pr_warn("could not register ZX UART platform driver\n");
+#endif
+
+#ifdef CONFIG_ARM_AMBA
+	ret = amba_driver_register(&pl011_driver);
+#endif
+	return ret;
 }
 
 static void __exit pl011_exit(void)
 {
 	platform_driver_unregister(&arm_sbsa_uart_platform_driver);
+#ifdef CONFIG_SOC_ZX296702
+	platform_driver_unregister(&zx_uart_driver);
+#endif
+#ifdef CONFIG_ARM_AMBA
 	amba_driver_unregister(&pl011_driver);
+#endif
 }
 
 /*
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 2a8f5281..5ca5cf3 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -56,6 +56,15 @@
 /* Revisit: We should calculate this based on the actual port settings */
 #define PDC_RX_TIMEOUT		(3 * 10)		/* 3 bytes */
 
+/* The minium number of data FIFOs should be able to contain */
+#define ATMEL_MIN_FIFO_SIZE	8
+/*
+ * These two offsets are substracted from the RX FIFO size to define the RTS
+ * high and low thresholds
+ */
+#define ATMEL_RTS_HIGH_OFFSET	16
+#define ATMEL_RTS_LOW_OFFSET	20
+
 #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
 #define SUPPORT_SYSRQ
 #endif
@@ -88,37 +97,6 @@
 
 #define ATMEL_ISR_PASS_LIMIT	256
 
-/* UART registers. CR is write-only, hence no GET macro */
-#define UART_PUT_CR(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_CR)
-#define UART_GET_MR(port)	__raw_readl((port)->membase + ATMEL_US_MR)
-#define UART_PUT_MR(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_MR)
-#define UART_PUT_IER(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_IER)
-#define UART_PUT_IDR(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_IDR)
-#define UART_GET_IMR(port)	__raw_readl((port)->membase + ATMEL_US_IMR)
-#define UART_GET_CSR(port)	__raw_readl((port)->membase + ATMEL_US_CSR)
-#define UART_GET_CHAR(port)	__raw_readl((port)->membase + ATMEL_US_RHR)
-#define UART_PUT_CHAR(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_THR)
-#define UART_GET_BRGR(port)	__raw_readl((port)->membase + ATMEL_US_BRGR)
-#define UART_PUT_BRGR(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_BRGR)
-#define UART_PUT_RTOR(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_RTOR)
-#define UART_PUT_TTGR(port, v)	__raw_writel(v, (port)->membase + ATMEL_US_TTGR)
-#define UART_GET_IP_NAME(port)	__raw_readl((port)->membase + ATMEL_US_NAME)
-#define UART_GET_IP_VERSION(port) __raw_readl((port)->membase + ATMEL_US_VERSION)
-
- /* PDC registers */
-#define UART_PUT_PTCR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
-#define UART_GET_PTSR(port)	__raw_readl((port)->membase + ATMEL_PDC_PTSR)
-
-#define UART_PUT_RPR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_RPR)
-#define UART_GET_RPR(port)	__raw_readl((port)->membase + ATMEL_PDC_RPR)
-#define UART_PUT_RCR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_RCR)
-#define UART_PUT_RNPR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_RNPR)
-#define UART_PUT_RNCR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_RNCR)
-
-#define UART_PUT_TPR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_TPR)
-#define UART_PUT_TCR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_TCR)
-#define UART_GET_TCR(port)	__raw_readl((port)->membase + ATMEL_PDC_TCR)
-
 struct atmel_dma_buffer {
 	unsigned char	*buf;
 	dma_addr_t	dma_addr;
@@ -166,12 +144,16 @@
 	unsigned int		irq_status;
 	unsigned int		irq_status_prev;
 	unsigned int		status_change;
+	unsigned int		tx_len;
 
 	struct circ_buf		rx_ring;
 
 	struct mctrl_gpios	*gpios;
 	int			gpio_irq[UART_GPIO_MAX];
 	unsigned int		tx_done_mask;
+	u32			fifo_size;
+	u32			rts_high;
+	u32			rts_low;
 	bool			ms_irq_enabled;
 	bool			is_usart;	/* usart or uart */
 	struct timer_list	uart_timer;	/* uart timer */
@@ -212,6 +194,43 @@
 	return container_of(uart, struct atmel_uart_port, uart);
 }
 
+static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
+{
+	return __raw_readl(port->membase + reg);
+}
+
+static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
+{
+	__raw_writel(value, port->membase + reg);
+}
+
+#ifdef CONFIG_AVR32
+
+/* AVR32 cannot handle 8 or 16bit I/O accesses but only 32bit I/O accesses */
+static inline u8 atmel_uart_read_char(struct uart_port *port)
+{
+	return __raw_readl(port->membase + ATMEL_US_RHR);
+}
+
+static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
+{
+	__raw_writel(value, port->membase + ATMEL_US_THR);
+}
+
+#else
+
+static inline u8 atmel_uart_read_char(struct uart_port *port)
+{
+	return __raw_readb(port->membase + ATMEL_US_RHR);
+}
+
+static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
+{
+	__raw_writeb(value, port->membase + ATMEL_US_THR);
+}
+
+#endif
+
 #ifdef CONFIG_SERIAL_ATMEL_PDC
 static bool atmel_use_pdc_rx(struct uart_port *port)
 {
@@ -257,7 +276,7 @@
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 	unsigned int status, ret = 0;
 
-	status = UART_GET_CSR(port);
+	status = atmel_uart_readl(port, ATMEL_US_CSR);
 
 	mctrl_gpio_get(atmel_port->gpios, &ret);
 
@@ -304,9 +323,9 @@
 	unsigned int mode;
 
 	/* Disable interrupts */
-	UART_PUT_IDR(port, atmel_port->tx_done_mask);
+	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
 
-	mode = UART_GET_MR(port);
+	mode = atmel_uart_readl(port, ATMEL_US_MR);
 
 	/* Resetting serial mode to RS232 (0x0) */
 	mode &= ~ATMEL_US_USMODE;
@@ -316,7 +335,8 @@
 	if (rs485conf->flags & SER_RS485_ENABLED) {
 		dev_dbg(port->dev, "Setting UART to RS485\n");
 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
-		UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
+		atmel_uart_writel(port, ATMEL_US_TTGR,
+				  rs485conf->delay_rts_after_send);
 		mode |= ATMEL_US_USMODE_RS485;
 	} else {
 		dev_dbg(port->dev, "Setting UART to RS232\n");
@@ -326,10 +346,10 @@
 		else
 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
 	}
-	UART_PUT_MR(port, mode);
+	atmel_uart_writel(port, ATMEL_US_MR, mode);
 
 	/* Enable interrupts */
-	UART_PUT_IER(port, atmel_port->tx_done_mask);
+	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
 
 	return 0;
 }
@@ -339,7 +359,9 @@
  */
 static u_int atmel_tx_empty(struct uart_port *port)
 {
-	return (UART_GET_CSR(port) & ATMEL_US_TXEMPTY) ? TIOCSER_TEMT : 0;
+	return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
+		TIOCSER_TEMT :
+		0;
 }
 
 /*
@@ -348,13 +370,14 @@
 static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
 {
 	unsigned int control = 0;
-	unsigned int mode = UART_GET_MR(port);
+	unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
 	unsigned int rts_paused, rts_ready;
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
 	/* override mode to RS485 if needed, otherwise keep the current mode */
 	if (port->rs485.flags & SER_RS485_ENABLED) {
-		UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
+		atmel_uart_writel(port, ATMEL_US_TTGR,
+				  port->rs485.delay_rts_after_send);
 		mode &= ~ATMEL_US_USMODE;
 		mode |= ATMEL_US_USMODE_RS485;
 	}
@@ -384,7 +407,7 @@
 	else
 		control |= ATMEL_US_DTRDIS;
 
-	UART_PUT_CR(port, control);
+	atmel_uart_writel(port, ATMEL_US_CR, control);
 
 	mctrl_gpio_set(atmel_port->gpios, mctrl);
 
@@ -395,7 +418,7 @@
 	else
 		mode |= ATMEL_US_CHMODE_NORMAL;
 
-	UART_PUT_MR(port, mode);
+	atmel_uart_writel(port, ATMEL_US_MR, mode);
 }
 
 /*
@@ -406,7 +429,7 @@
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 	unsigned int ret = 0, status;
 
-	status = UART_GET_CSR(port);
+	status = atmel_uart_readl(port, ATMEL_US_CSR);
 
 	/*
 	 * The control signals are active low.
@@ -432,10 +455,10 @@
 
 	if (atmel_use_pdc_tx(port)) {
 		/* disable PDC transmit */
-		UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
+		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
 	}
 	/* Disable interrupts */
-	UART_PUT_IDR(port, atmel_port->tx_done_mask);
+	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
 
 	if ((port->rs485.flags & SER_RS485_ENABLED) &&
 	    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
@@ -450,7 +473,7 @@
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
 	if (atmel_use_pdc_tx(port)) {
-		if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN)
+		if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)
 			/* The transmitter is already running.  Yes, we
 			   really need this.*/
 			return;
@@ -460,10 +483,10 @@
 			atmel_stop_rx(port);
 
 		/* re-enable PDC transmit */
-		UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
+		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
 	}
 	/* Enable interrupts */
-	UART_PUT_IER(port, atmel_port->tx_done_mask);
+	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
 }
 
 /*
@@ -471,17 +494,19 @@
  */
 static void atmel_start_rx(struct uart_port *port)
 {
-	UART_PUT_CR(port, ATMEL_US_RSTSTA);  /* reset status and receiver */
+	/* reset status and receiver */
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
 
-	UART_PUT_CR(port, ATMEL_US_RXEN);
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
 
 	if (atmel_use_pdc_rx(port)) {
 		/* enable PDC controller */
-		UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
-			port->read_status_mask);
-		UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
+		atmel_uart_writel(port, ATMEL_US_IER,
+				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
+				  port->read_status_mask);
+		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
 	} else {
-		UART_PUT_IER(port, ATMEL_US_RXRDY);
+		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
 	}
 }
 
@@ -490,15 +515,16 @@
  */
 static void atmel_stop_rx(struct uart_port *port)
 {
-	UART_PUT_CR(port, ATMEL_US_RXDIS);
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
 
 	if (atmel_use_pdc_rx(port)) {
 		/* disable PDC receive */
-		UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
-		UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
-			port->read_status_mask);
+		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
+		atmel_uart_writel(port, ATMEL_US_IDR,
+				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
+				  port->read_status_mask);
 	} else {
-		UART_PUT_IDR(port, ATMEL_US_RXRDY);
+		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
 	}
 }
 
@@ -538,7 +564,7 @@
 	else
 		ier |= ATMEL_US_DCDIC;
 
-	UART_PUT_IER(port, ier);
+	atmel_uart_writel(port, ATMEL_US_IER, ier);
 }
 
 /*
@@ -577,7 +603,7 @@
 	else
 		idr |= ATMEL_US_DCDIC;
 
-	UART_PUT_IDR(port, idr);
+	atmel_uart_writel(port, ATMEL_US_IDR, idr);
 }
 
 /*
@@ -586,9 +612,11 @@
 static void atmel_break_ctl(struct uart_port *port, int break_state)
 {
 	if (break_state != 0)
-		UART_PUT_CR(port, ATMEL_US_STTBRK);	/* start break */
+		/* start break */
+		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
 	else
-		UART_PUT_CR(port, ATMEL_US_STPBRK);	/* stop break */
+		/* stop break */
+		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
 }
 
 /*
@@ -622,7 +650,7 @@
 static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
 {
 	/* clear error */
-	UART_PUT_CR(port, ATMEL_US_RSTSTA);
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
 
 	if (status & ATMEL_US_RXBRK) {
 		/* ignore side-effect */
@@ -645,9 +673,9 @@
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 	unsigned int status, ch;
 
-	status = UART_GET_CSR(port);
+	status = atmel_uart_readl(port, ATMEL_US_CSR);
 	while (status & ATMEL_US_RXRDY) {
-		ch = UART_GET_CHAR(port);
+		ch = atmel_uart_read_char(port);
 
 		/*
 		 * note that the error handling code is
@@ -658,12 +686,13 @@
 			     || atmel_port->break_active)) {
 
 			/* clear error */
-			UART_PUT_CR(port, ATMEL_US_RSTSTA);
+			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
 
 			if (status & ATMEL_US_RXBRK
 			    && !atmel_port->break_active) {
 				atmel_port->break_active = 1;
-				UART_PUT_IER(port, ATMEL_US_RXBRK);
+				atmel_uart_writel(port, ATMEL_US_IER,
+						  ATMEL_US_RXBRK);
 			} else {
 				/*
 				 * This is either the end-of-break
@@ -672,14 +701,15 @@
 				 * being set. In both cases, the next
 				 * RXBRK will indicate start-of-break.
 				 */
-				UART_PUT_IDR(port, ATMEL_US_RXBRK);
+				atmel_uart_writel(port, ATMEL_US_IDR,
+						  ATMEL_US_RXBRK);
 				status &= ~ATMEL_US_RXBRK;
 				atmel_port->break_active = 0;
 			}
 		}
 
 		atmel_buffer_rx_char(port, status, ch);
-		status = UART_GET_CSR(port);
+		status = atmel_uart_readl(port, ATMEL_US_CSR);
 	}
 
 	tasklet_schedule(&atmel_port->tasklet);
@@ -694,16 +724,18 @@
 	struct circ_buf *xmit = &port->state->xmit;
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
-	if (port->x_char && UART_GET_CSR(port) & atmel_port->tx_done_mask) {
-		UART_PUT_CHAR(port, port->x_char);
+	if (port->x_char &&
+	    (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
+		atmel_uart_write_char(port, port->x_char);
 		port->icount.tx++;
 		port->x_char = 0;
 	}
 	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
 		return;
 
-	while (UART_GET_CSR(port) & atmel_port->tx_done_mask) {
-		UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
+	while (atmel_uart_readl(port, ATMEL_US_CSR) &
+	       atmel_port->tx_done_mask) {
+		atmel_uart_write_char(port, xmit->buf[xmit->tail]);
 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
 		port->icount.tx++;
 		if (uart_circ_empty(xmit))
@@ -715,7 +747,8 @@
 
 	if (!uart_circ_empty(xmit))
 		/* Enable interrupts */
-		UART_PUT_IER(port, atmel_port->tx_done_mask);
+		atmel_uart_writel(port, ATMEL_US_IER,
+				  atmel_port->tx_done_mask);
 }
 
 static void atmel_complete_tx_dma(void *arg)
@@ -730,10 +763,10 @@
 
 	if (chan)
 		dmaengine_terminate_all(chan);
-	xmit->tail += sg_dma_len(&atmel_port->sg_tx);
+	xmit->tail += atmel_port->tx_len;
 	xmit->tail &= UART_XMIT_SIZE - 1;
 
-	port->icount.tx += sg_dma_len(&atmel_port->sg_tx);
+	port->icount.tx += atmel_port->tx_len;
 
 	spin_lock_irq(&atmel_port->lock_tx);
 	async_tx_ack(atmel_port->desc_tx);
@@ -781,7 +814,9 @@
 	struct circ_buf *xmit = &port->state->xmit;
 	struct dma_chan *chan = atmel_port->chan_tx;
 	struct dma_async_tx_descriptor *desc;
-	struct scatterlist *sg = &atmel_port->sg_tx;
+	struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx;
+	unsigned int tx_len, part1_len, part2_len, sg_len;
+	dma_addr_t phys_addr;
 
 	/* Make sure we have an idle channel */
 	if (atmel_port->desc_tx != NULL)
@@ -797,18 +832,46 @@
 		 * Take the port lock to get a
 		 * consistent xmit buffer state.
 		 */
-		sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
-		sg_dma_address(sg) = (sg_dma_address(sg) &
-					~(UART_XMIT_SIZE - 1))
-					+ sg->offset;
-		sg_dma_len(sg) = CIRC_CNT_TO_END(xmit->head,
-						xmit->tail,
-						UART_XMIT_SIZE);
-		BUG_ON(!sg_dma_len(sg));
+		tx_len = CIRC_CNT_TO_END(xmit->head,
+					 xmit->tail,
+					 UART_XMIT_SIZE);
+
+		if (atmel_port->fifo_size) {
+			/* multi data mode */
+			part1_len = (tx_len & ~0x3); /* DWORD access */
+			part2_len = (tx_len & 0x3); /* BYTE access */
+		} else {
+			/* single data (legacy) mode */
+			part1_len = 0;
+			part2_len = tx_len; /* BYTE access only */
+		}
+
+		sg_init_table(sgl, 2);
+		sg_len = 0;
+		phys_addr = sg_dma_address(sg_tx) + xmit->tail;
+		if (part1_len) {
+			sg = &sgl[sg_len++];
+			sg_dma_address(sg) = phys_addr;
+			sg_dma_len(sg) = part1_len;
+
+			phys_addr += part1_len;
+		}
+
+		if (part2_len) {
+			sg = &sgl[sg_len++];
+			sg_dma_address(sg) = phys_addr;
+			sg_dma_len(sg) = part2_len;
+		}
+
+		/*
+		 * save tx_len so atmel_complete_tx_dma() will increase
+		 * xmit->tail correctly
+		 */
+		atmel_port->tx_len = tx_len;
 
 		desc = dmaengine_prep_slave_sg(chan,
-					       sg,
-					       1,
+					       sgl,
+					       sg_len,
 					       DMA_MEM_TO_DEV,
 					       DMA_PREP_INTERRUPT |
 					       DMA_CTRL_ACK);
@@ -817,7 +880,7 @@
 			return;
 		}
 
-		dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
+		dma_sync_sg_for_device(port->dev, sg_tx, 1, DMA_TO_DEVICE);
 
 		atmel_port->desc_tx = desc;
 		desc->callback = atmel_complete_tx_dma;
@@ -877,7 +940,9 @@
 	/* Configure the slave DMA */
 	memset(&config, 0, sizeof(config));
 	config.direction = DMA_MEM_TO_DEV;
-	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+	config.dst_addr_width = (atmel_port->fifo_size) ?
+				DMA_SLAVE_BUSWIDTH_4_BYTES :
+				DMA_SLAVE_BUSWIDTH_1_BYTE;
 	config.dst_addr = port->mapbase + ATMEL_US_THR;
 	config.dst_maxburst = 1;
 
@@ -935,14 +1000,14 @@
 
 
 	/* Reset the UART timeout early so that we don't miss one */
-	UART_PUT_CR(port, ATMEL_US_STTTO);
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
 	dmastat = dmaengine_tx_status(chan,
 				atmel_port->cookie_rx,
 				&state);
 	/* Restart a new tasklet if DMA status is error */
 	if (dmastat == DMA_ERROR) {
 		dev_dbg(port->dev, "Get residue error, restart tasklet\n");
-		UART_PUT_IER(port, ATMEL_US_TIMEOUT);
+		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
 		tasklet_schedule(&atmel_port->tasklet);
 		return;
 	}
@@ -1008,7 +1073,7 @@
 	tty_flip_buffer_push(tport);
 	spin_lock(&port->lock);
 
-	UART_PUT_IER(port, ATMEL_US_TIMEOUT);
+	atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
 }
 
 static int atmel_prepare_rx_dma(struct uart_port *port)
@@ -1118,8 +1183,8 @@
 		 * the moment.
 		 */
 		if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
-			UART_PUT_IDR(port, (ATMEL_US_ENDRX
-						| ATMEL_US_TIMEOUT));
+			atmel_uart_writel(port, ATMEL_US_IDR,
+					  (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
 			tasklet_schedule(&atmel_port->tasklet);
 		}
 
@@ -1130,7 +1195,8 @@
 
 	if (atmel_use_dma_rx(port)) {
 		if (pending & ATMEL_US_TIMEOUT) {
-			UART_PUT_IDR(port, ATMEL_US_TIMEOUT);
+			atmel_uart_writel(port, ATMEL_US_IDR,
+					  ATMEL_US_TIMEOUT);
 			tasklet_schedule(&atmel_port->tasklet);
 		}
 	}
@@ -1143,8 +1209,8 @@
 		 * End of break detected. If it came along with a
 		 * character, atmel_rx_chars will handle it.
 		 */
-		UART_PUT_CR(port, ATMEL_US_RSTSTA);
-		UART_PUT_IDR(port, ATMEL_US_RXBRK);
+		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
+		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
 		atmel_port->break_active = 0;
 	}
 }
@@ -1159,7 +1225,8 @@
 
 	if (pending & atmel_port->tx_done_mask) {
 		/* Either PDC or interrupt transmission */
-		UART_PUT_IDR(port, atmel_port->tx_done_mask);
+		atmel_uart_writel(port, ATMEL_US_IDR,
+				  atmel_port->tx_done_mask);
 		tasklet_schedule(&atmel_port->tasklet);
 	}
 }
@@ -1197,7 +1264,7 @@
 
 	do {
 		status = atmel_get_lines_status(port);
-		mask = UART_GET_IMR(port);
+		mask = atmel_uart_readl(port, ATMEL_US_IMR);
 		pending = status & mask;
 		if (!gpio_handled) {
 			/*
@@ -1223,7 +1290,7 @@
 		if (atmel_port->suspended) {
 			atmel_port->pending |= pending;
 			atmel_port->pending_status = status;
-			UART_PUT_IDR(port, mask);
+			atmel_uart_writel(port, ATMEL_US_IDR, mask);
 			pm_system_wakeup();
 			break;
 		}
@@ -1260,7 +1327,7 @@
 	int count;
 
 	/* nothing left to transmit? */
-	if (UART_GET_TCR(port))
+	if (atmel_uart_readl(port, ATMEL_PDC_TCR))
 		return;
 
 	xmit->tail += pdc->ofs;
@@ -1272,7 +1339,7 @@
 	/* more to transmit - setup next transfer */
 
 	/* disable PDC transmit */
-	UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
+	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
 
 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
 		dma_sync_single_for_device(port->dev,
@@ -1283,12 +1350,14 @@
 		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
 		pdc->ofs = count;
 
-		UART_PUT_TPR(port, pdc->dma_addr + xmit->tail);
-		UART_PUT_TCR(port, count);
+		atmel_uart_writel(port, ATMEL_PDC_TPR,
+				  pdc->dma_addr + xmit->tail);
+		atmel_uart_writel(port, ATMEL_PDC_TCR, count);
 		/* re-enable PDC transmit */
-		UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
+		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
 		/* Enable interrupts */
-		UART_PUT_IER(port, atmel_port->tx_done_mask);
+		atmel_uart_writel(port, ATMEL_US_IER,
+				  atmel_port->tx_done_mask);
 	} else {
 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
@@ -1414,10 +1483,10 @@
 
 	do {
 		/* Reset the UART timeout early so that we don't miss one */
-		UART_PUT_CR(port, ATMEL_US_STTTO);
+		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
 
 		pdc = &atmel_port->pdc_rx[rx_idx];
-		head = UART_GET_RPR(port) - pdc->dma_addr;
+		head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
 		tail = pdc->ofs;
 
 		/* If the PDC has switched buffers, RPR won't contain
@@ -1460,8 +1529,8 @@
 		 */
 		if (head >= pdc->dma_size) {
 			pdc->ofs = 0;
-			UART_PUT_RNPR(port, pdc->dma_addr);
-			UART_PUT_RNCR(port, pdc->dma_size);
+			atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
+			atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
 
 			rx_idx = !rx_idx;
 			atmel_port->pdc_rx_idx = rx_idx;
@@ -1476,7 +1545,8 @@
 	tty_flip_buffer_push(tport);
 	spin_lock(&port->lock);
 
-	UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
+	atmel_uart_writel(port, ATMEL_US_IER,
+			  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
 }
 
 static int atmel_prepare_rx_pdc(struct uart_port *port)
@@ -1509,11 +1579,12 @@
 
 	atmel_port->pdc_rx_idx = 0;
 
-	UART_PUT_RPR(port, atmel_port->pdc_rx[0].dma_addr);
-	UART_PUT_RCR(port, PDC_BUFFER_SIZE);
+	atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
+	atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
 
-	UART_PUT_RNPR(port, atmel_port->pdc_rx[1].dma_addr);
-	UART_PUT_RNCR(port, PDC_BUFFER_SIZE);
+	atmel_uart_writel(port, ATMEL_PDC_RNPR,
+			  atmel_port->pdc_rx[1].dma_addr);
+	atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
 
 	return 0;
 }
@@ -1667,7 +1738,7 @@
 static void atmel_get_ip_name(struct uart_port *port)
 {
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
-	int name = UART_GET_IP_NAME(port);
+	int name = atmel_uart_readl(port, ATMEL_US_NAME);
 	u32 version;
 	int usart, uart;
 	/* usart and uart ascii */
@@ -1684,7 +1755,7 @@
 		atmel_port->is_usart = false;
 	} else {
 		/* fallback for older SoCs: use version field */
-		version = UART_GET_IP_VERSION(port);
+		version = atmel_uart_readl(port, ATMEL_US_VERSION);
 		switch (version) {
 		case 0x302:
 		case 0x10213:
@@ -1756,7 +1827,7 @@
 	 * request_irq() is called we could get stuck trying to
 	 * handle an unexpected interrupt
 	 */
-	UART_PUT_IDR(port, -1);
+	atmel_uart_writel(port, ATMEL_US_IDR, -1);
 	atmel_port->ms_irq_enabled = false;
 
 	/*
@@ -1797,6 +1868,32 @@
 			atmel_set_ops(port);
 	}
 
+	/*
+	 * Enable FIFO when available
+	 */
+	if (atmel_port->fifo_size) {
+		unsigned int txrdym = ATMEL_US_ONE_DATA;
+		unsigned int rxrdym = ATMEL_US_ONE_DATA;
+		unsigned int fmr;
+
+		atmel_uart_writel(port, ATMEL_US_CR,
+				  ATMEL_US_FIFOEN |
+				  ATMEL_US_RXFCLR |
+				  ATMEL_US_TXFLCLR);
+
+		if (atmel_use_dma_tx(port))
+			txrdym = ATMEL_US_FOUR_DATA;
+
+		fmr = ATMEL_US_TXRDYM(txrdym) | ATMEL_US_RXRDYM(rxrdym);
+		if (atmel_port->rts_high &&
+		    atmel_port->rts_low)
+			fmr |=	ATMEL_US_FRTSC |
+				ATMEL_US_RXFTHRES(atmel_port->rts_high) |
+				ATMEL_US_RXFTHRES2(atmel_port->rts_low);
+
+		atmel_uart_writel(port, ATMEL_US_FMR, fmr);
+	}
+
 	/* Save current CSR for comparison in atmel_tasklet_func() */
 	atmel_port->irq_status_prev = atmel_get_lines_status(port);
 	atmel_port->irq_status = atmel_port->irq_status_prev;
@@ -1804,9 +1901,9 @@
 	/*
 	 * Finally, enable the serial port
 	 */
-	UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
 	/* enable xmit & rcvr */
-	UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
 
 	setup_timer(&atmel_port->uart_timer,
 			atmel_uart_timer_callback,
@@ -1819,13 +1916,14 @@
 					jiffies + uart_poll_timeout(port));
 		/* set USART timeout */
 		} else {
-			UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
-			UART_PUT_CR(port, ATMEL_US_STTTO);
+			atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
+			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
 
-			UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
+			atmel_uart_writel(port, ATMEL_US_IER,
+					  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
 		}
 		/* enable PDC controller */
-		UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
+		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
 	} else if (atmel_use_dma_rx(port)) {
 		/* set UART timeout */
 		if (!atmel_port->is_usart) {
@@ -1833,14 +1931,15 @@
 					jiffies + uart_poll_timeout(port));
 		/* set USART timeout */
 		} else {
-			UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
-			UART_PUT_CR(port, ATMEL_US_STTTO);
+			atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
+			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
 
-			UART_PUT_IER(port, ATMEL_US_TIMEOUT);
+			atmel_uart_writel(port, ATMEL_US_IER,
+					  ATMEL_US_TIMEOUT);
 		}
 	} else {
 		/* enable receive only */
-		UART_PUT_IER(port, ATMEL_US_RXRDY);
+		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
 	}
 
 	return 0;
@@ -1860,7 +1959,7 @@
 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
 	if (atmel_use_pdc_tx(port)) {
-		UART_PUT_TCR(port, 0);
+		atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
 		atmel_port->pdc_tx.ofs = 0;
 	}
 }
@@ -1892,8 +1991,8 @@
 	atmel_stop_rx(port);
 	atmel_stop_tx(port);
 
-	UART_PUT_CR(port, ATMEL_US_RSTSTA);
-	UART_PUT_IDR(port, -1);
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
+	atmel_uart_writel(port, ATMEL_US_IDR, -1);
 
 
 	/*
@@ -1938,12 +2037,12 @@
 		clk_prepare_enable(atmel_port->clk);
 
 		/* re-enable interrupts if we disabled some on suspend */
-		UART_PUT_IER(port, atmel_port->backup_imr);
+		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
 		break;
 	case 3:
 		/* Back up the interrupt mask and disable all interrupts */
-		atmel_port->backup_imr = UART_GET_IMR(port);
-		UART_PUT_IDR(port, -1);
+		atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
+		atmel_uart_writel(port, ATMEL_US_IDR, -1);
 
 		/*
 		 * Disable the peripheral clock for this serial port.
@@ -1966,7 +2065,7 @@
 	unsigned int old_mode, mode, imr, quot, baud;
 
 	/* save the current mode register */
-	mode = old_mode = UART_GET_MR(port);
+	mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
 
 	/* reset the mode, clock divisor, parity, stop bits and data size */
 	mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
@@ -2025,7 +2124,7 @@
 
 	if (atmel_use_pdc_rx(port))
 		/* need to enable error interrupts */
-		UART_PUT_IER(port, port->read_status_mask);
+		atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
 
 	/*
 	 * Characters to ignore
@@ -2052,15 +2151,16 @@
 	 * transmitter is empty if requested by the caller, so there's
 	 * no need to wait for it here.
 	 */
-	imr = UART_GET_IMR(port);
-	UART_PUT_IDR(port, -1);
+	imr = atmel_uart_readl(port, ATMEL_US_IMR);
+	atmel_uart_writel(port, ATMEL_US_IDR, -1);
 
 	/* disable receiver and transmitter */
-	UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
 
 	/* mode */
 	if (port->rs485.flags & SER_RS485_ENABLED) {
-		UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
+		atmel_uart_writel(port, ATMEL_US_TTGR,
+				  port->rs485.delay_rts_after_send);
 		mode |= ATMEL_US_USMODE_RS485;
 	} else if (termios->c_cflag & CRTSCTS) {
 		/* RS232 with hardware handshake (RTS/CTS) */
@@ -2071,7 +2171,7 @@
 	}
 
 	/* set the mode, clock divisor, parity, stop bits and data size */
-	UART_PUT_MR(port, mode);
+	atmel_uart_writel(port, ATMEL_US_MR, mode);
 
 	/*
 	 * when switching the mode, set the RTS line state according to the
@@ -2088,16 +2188,16 @@
 			rts_state = ATMEL_US_RTSEN;
 		}
 
-		UART_PUT_CR(port, rts_state);
+		atmel_uart_writel(port, ATMEL_US_CR, rts_state);
 	}
 
 	/* set the baud rate */
-	UART_PUT_BRGR(port, quot);
-	UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
-	UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
+	atmel_uart_writel(port, ATMEL_US_BRGR, quot);
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
 
 	/* restore interrupts */
-	UART_PUT_IER(port, imr);
+	atmel_uart_writel(port, ATMEL_US_IER, imr);
 
 	/* CTS flow-control and modem-status interrupts */
 	if (UART_ENABLE_MS(port, termios->c_cflag))
@@ -2208,18 +2308,18 @@
 #ifdef CONFIG_CONSOLE_POLL
 static int atmel_poll_get_char(struct uart_port *port)
 {
-	while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY))
+	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
 		cpu_relax();
 
-	return UART_GET_CHAR(port);
+	return atmel_uart_read_char(port);
 }
 
 static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
 {
-	while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
+	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
 		cpu_relax();
 
-	UART_PUT_CHAR(port, ch);
+	atmel_uart_write_char(port, ch);
 }
 #endif
 
@@ -2324,9 +2424,9 @@
 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
 static void atmel_console_putchar(struct uart_port *port, int ch)
 {
-	while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
+	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
 		cpu_relax();
-	UART_PUT_CHAR(port, ch);
+	atmel_uart_write_char(port, ch);
 }
 
 /*
@@ -2342,12 +2442,13 @@
 	/*
 	 * First, save IMR and then disable interrupts
 	 */
-	imr = UART_GET_IMR(port);
-	UART_PUT_IDR(port, ATMEL_US_RXRDY | atmel_port->tx_done_mask);
+	imr = atmel_uart_readl(port, ATMEL_US_IMR);
+	atmel_uart_writel(port, ATMEL_US_IDR,
+			  ATMEL_US_RXRDY | atmel_port->tx_done_mask);
 
 	/* Store PDC transmit status and disable it */
-	pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN;
-	UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
+	pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
+	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
 
 	uart_console_write(port, s, count, atmel_console_putchar);
 
@@ -2356,15 +2457,15 @@
 	 * and restore IMR
 	 */
 	do {
-		status = UART_GET_CSR(port);
+		status = atmel_uart_readl(port, ATMEL_US_CSR);
 	} while (!(status & ATMEL_US_TXRDY));
 
 	/* Restore PDC transmit status */
 	if (pdc_tx)
-		UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
+		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
 
 	/* set interrupts back the way they were */
-	UART_PUT_IER(port, imr);
+	atmel_uart_writel(port, ATMEL_US_IER, imr);
 }
 
 /*
@@ -2380,17 +2481,17 @@
 	 * If the baud rate generator isn't running, the port wasn't
 	 * initialized by the boot loader.
 	 */
-	quot = UART_GET_BRGR(port) & ATMEL_US_CD;
+	quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
 	if (!quot)
 		return;
 
-	mr = UART_GET_MR(port) & ATMEL_US_CHRL;
+	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
 	if (mr == ATMEL_US_CHRL_8)
 		*bits = 8;
 	else
 		*bits = 7;
 
-	mr = UART_GET_MR(port) & ATMEL_US_PAR;
+	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
 	if (mr == ATMEL_US_PAR_EVEN)
 		*parity = 'e';
 	else if (mr == ATMEL_US_PAR_ODD)
@@ -2423,9 +2524,9 @@
 	if (ret)
 		return ret;
 
-	UART_PUT_IDR(port, -1);
-	UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
-	UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
+	atmel_uart_writel(port, ATMEL_US_IDR, -1);
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
 
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -2532,7 +2633,8 @@
 
 	if (atmel_is_console_port(port) && console_suspend_enabled) {
 		/* Drain the TX shifter */
-		while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY))
+		while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
+			 ATMEL_US_TXEMPTY))
 			cpu_relax();
 	}
 
@@ -2599,6 +2701,48 @@
 	return 0;
 }
 
+static void atmel_serial_probe_fifos(struct atmel_uart_port *port,
+				     struct platform_device *pdev)
+{
+	port->fifo_size = 0;
+	port->rts_low = 0;
+	port->rts_high = 0;
+
+	if (of_property_read_u32(pdev->dev.of_node,
+				 "atmel,fifo-size",
+				 &port->fifo_size))
+		return;
+
+	if (!port->fifo_size)
+		return;
+
+	if (port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
+		port->fifo_size = 0;
+		dev_err(&pdev->dev, "Invalid FIFO size\n");
+		return;
+	}
+
+	/*
+	 * 0 <= rts_low <= rts_high <= fifo_size
+	 * Once their CTS line asserted by the remote peer, some x86 UARTs tend
+	 * to flush their internal TX FIFO, commonly up to 16 data, before
+	 * actually stopping to send new data. So we try to set the RTS High
+	 * Threshold to a reasonably high value respecting this 16 data
+	 * empirical rule when possible.
+	 */
+	port->rts_high = max_t(int, port->fifo_size >> 1,
+			       port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
+	port->rts_low  = max_t(int, port->fifo_size >> 2,
+			       port->fifo_size - ATMEL_RTS_LOW_OFFSET);
+
+	dev_info(&pdev->dev, "Using FIFO (%u data)\n",
+		 port->fifo_size);
+	dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
+		port->rts_high);
+	dev_dbg(&pdev->dev, "RTS Low Threshold  : %2u data\n",
+		port->rts_low);
+}
+
 static int atmel_serial_probe(struct platform_device *pdev)
 {
 	struct atmel_uart_port *port;
@@ -2635,6 +2779,7 @@
 	port = &atmel_ports[ret];
 	port->backup_imr = 0;
 	port->uart.line = ret;
+	atmel_serial_probe_fifos(port, pdev);
 
 	spin_lock_init(&port->lock_suspended);
 
@@ -2684,8 +2829,9 @@
 	clk_prepare_enable(port->clk);
 
 	if (rs485_enabled) {
-		UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
-		UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
+		atmel_uart_writel(&port->uart, ATMEL_US_MR,
+				  ATMEL_US_USMODE_NORMAL);
+		atmel_uart_writel(&port->uart, ATMEL_US_CR, ATMEL_US_RTSEN);
 	}
 
 	/*
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
index 679709f..6813e31 100644
--- a/drivers/tty/serial/etraxfs-uart.c
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -10,6 +10,8 @@
 #include <linux/of_address.h>
 #include <hwregs/ser_defs.h>
 
+#include "serial_mctrl_gpio.h"
+
 #define DRV_NAME "etraxfs-uart"
 #define UART_NR CONFIG_ETRAX_SERIAL_PORTS
 
@@ -28,10 +30,7 @@
 
 	void __iomem *regi_ser;
 
-	struct gpio_desc *dtr_pin;
-	struct gpio_desc *dsr_pin;
-	struct gpio_desc *ri_pin;
-	struct gpio_desc *cd_pin;
+	struct mctrl_gpios *gpios;
 
 	int write_ongoing;
 };
@@ -112,17 +111,10 @@
 	return 0;
 }
 
-static struct tty_driver *cris_console_device(struct console *co, int *index)
-{
-	struct uart_driver *p = co->data;
-	*index = co->index;
-	return p->tty_driver;
-}
-
 static struct console cris_console = {
 	.name = "ttyS",
 	.write = cris_console_write,
-	.device = cris_console_device,
+	.device = uart_console_device,
 	.setup = cris_console_setup,
 	.flags = CON_PRINTBUFFER,
 	.index = -1,
@@ -373,14 +365,6 @@
 	REG_WR(ser, regi_ser, rw_rec_ctrl, rec_ctrl);
 }
 
-static void etraxfs_uart_enable_ms(struct uart_port *port)
-{
-}
-
-static void check_modem_status(struct uart_cris_port *up)
-{
-}
-
 static unsigned int etraxfs_uart_tx_empty(struct uart_port *port)
 {
 	struct uart_cris_port *up = (struct uart_cris_port *)port;
@@ -404,21 +388,9 @@
 	ret = 0;
 	if (crisv32_serial_get_rts(up))
 		ret |= TIOCM_RTS;
-	/* DTR is active low */
-	if (up->dtr_pin && !gpiod_get_raw_value(up->dtr_pin))
-		ret |= TIOCM_DTR;
-	/* CD is active low */
-	if (up->cd_pin && !gpiod_get_raw_value(up->cd_pin))
-		ret |= TIOCM_CD;
-	/* RI is active low */
-	if (up->ri_pin && !gpiod_get_raw_value(up->ri_pin))
-		ret |= TIOCM_RI;
-	/* DSR is active low */
-	if (up->dsr_pin && !gpiod_get_raw_value(up->dsr_pin))
-		ret |= TIOCM_DSR;
 	if (crisv32_serial_get_cts(up))
 		ret |= TIOCM_CTS;
-	return ret;
+	return mctrl_gpio_get(up->gpios, &ret);
 }
 
 static void etraxfs_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -426,15 +398,7 @@
 	struct uart_cris_port *up = (struct uart_cris_port *)port;
 
 	crisv32_serial_set_rts(up, mctrl & TIOCM_RTS ? 1 : 0, 0);
-	/* DTR is active low */
-	if (up->dtr_pin)
-		gpiod_set_raw_value(up->dtr_pin, mctrl & TIOCM_DTR ? 0 : 1);
-	/* RI is active low */
-	if (up->ri_pin)
-		gpiod_set_raw_value(up->ri_pin, mctrl & TIOCM_RNG ? 0 : 1);
-	/* CD is active low */
-	if (up->cd_pin)
-		gpiod_set_raw_value(up->cd_pin, mctrl & TIOCM_CD ? 0 : 1);
+	mctrl_gpio_set(up->gpios, mctrl);
 }
 
 static void etraxfs_uart_break_ctl(struct uart_port *port, int break_state)
@@ -598,7 +562,6 @@
 			receive_chars_no_dma(up);
 			handled = 1;
 		}
-		check_modem_status(up);
 
 		if (masked_intr.tr_rdy) {
 			transmit_chars_no_dma(up);
@@ -862,7 +825,6 @@
 	.start_tx = etraxfs_uart_start_tx,
 	.send_xchar = etraxfs_uart_send_xchar,
 	.stop_rx = etraxfs_uart_stop_rx,
-	.enable_ms = etraxfs_uart_enable_ms,
 	.break_ctl = etraxfs_uart_break_ctl,
 	.startup = etraxfs_uart_startup,
 	.shutdown = etraxfs_uart_shutdown,
@@ -930,11 +892,12 @@
 
 	up->irq = irq_of_parse_and_map(np, 0);
 	up->regi_ser = of_iomap(np, 0);
-	up->dtr_pin = devm_gpiod_get_optional(&pdev->dev, "dtr");
-	up->dsr_pin = devm_gpiod_get_optional(&pdev->dev, "dsr");
-	up->ri_pin = devm_gpiod_get_optional(&pdev->dev, "ri");
-	up->cd_pin = devm_gpiod_get_optional(&pdev->dev, "cd");
 	up->port.dev = &pdev->dev;
+
+	up->gpios = mctrl_gpio_init(&pdev->dev, 0);
+	if (IS_ERR(up->gpios))
+		return PTR_ERR(up->gpios);
+
 	cris_serial_port_init(&up->port, dev_id);
 
 	etraxfs_uart_ports[dev_id] = up;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 54fdc78..fe3d41c 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -216,6 +216,8 @@
 	unsigned int		tx_bytes;
 	unsigned int		dma_tx_nents;
 	wait_queue_head_t	dma_wait;
+	unsigned int            saved_reg[10];
+	bool			context_saved;
 };
 
 struct imx_port_ucrs {
@@ -700,7 +702,8 @@
 		if (sport->port.ignore_status_mask & URXD_DUMMY_READ)
 			goto out;
 
-		tty_insert_flip_char(port, rx, flg);
+		if (tty_insert_flip_char(port, rx, flg) == 0)
+			sport->port.icount.buf_overrun++;
 	}
 
 out:
@@ -766,7 +769,6 @@
 		writel(USR1_AWAKE, sport->port.membase + USR1);
 
 	if (sts2 & USR2_ORE) {
-		dev_err(sport->port.dev, "Rx FIFO overrun\n");
 		sport->port.icount.overrun++;
 		writel(USR2_ORE, sport->port.membase + USR2);
 	}
@@ -921,8 +923,13 @@
 	dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
 
 	if (count) {
-		if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ))
-			tty_insert_flip_string(port, sport->rx_buf, count);
+		if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
+			int bytes = tty_insert_flip_string(port, sport->rx_buf,
+					count);
+
+			if (bytes != count)
+				sport->port.icount.buf_overrun++;
+		}
 		tty_flip_buffer_push(port);
 
 		start_rx_dma(sport);
@@ -1624,12 +1631,12 @@
 	int locked = 1;
 	int retval;
 
-	retval = clk_enable(sport->clk_per);
+	retval = clk_prepare_enable(sport->clk_per);
 	if (retval)
 		return;
-	retval = clk_enable(sport->clk_ipg);
+	retval = clk_prepare_enable(sport->clk_ipg);
 	if (retval) {
-		clk_disable(sport->clk_per);
+		clk_disable_unprepare(sport->clk_per);
 		return;
 	}
 
@@ -1668,8 +1675,8 @@
 	if (locked)
 		spin_unlock_irqrestore(&sport->port.lock, flags);
 
-	clk_disable(sport->clk_ipg);
-	clk_disable(sport->clk_per);
+	clk_disable_unprepare(sport->clk_ipg);
+	clk_disable_unprepare(sport->clk_per);
 }
 
 /*
@@ -1770,15 +1777,7 @@
 
 	retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
 
-	clk_disable(sport->clk_ipg);
-	if (retval) {
-		clk_unprepare(sport->clk_ipg);
-		goto error_console;
-	}
-
-	retval = clk_prepare(sport->clk_per);
-	if (retval)
-		clk_disable_unprepare(sport->clk_ipg);
+	clk_disable_unprepare(sport->clk_ipg);
 
 error_console:
 	return retval;
@@ -1810,36 +1809,6 @@
 	.cons           = IMX_CONSOLE,
 };
 
-static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
-{
-	struct imx_port *sport = platform_get_drvdata(dev);
-	unsigned int val;
-
-	/* enable wakeup from i.MX UART */
-	val = readl(sport->port.membase + UCR3);
-	val |= UCR3_AWAKEN;
-	writel(val, sport->port.membase + UCR3);
-
-	uart_suspend_port(&imx_reg, &sport->port);
-
-	return 0;
-}
-
-static int serial_imx_resume(struct platform_device *dev)
-{
-	struct imx_port *sport = platform_get_drvdata(dev);
-	unsigned int val;
-
-	/* disable wakeup from i.MX UART */
-	val = readl(sport->port.membase + UCR3);
-	val &= ~UCR3_AWAKEN;
-	writel(val, sport->port.membase + UCR3);
-
-	uart_resume_port(&imx_reg, &sport->port);
-
-	return 0;
-}
-
 #ifdef CONFIG_OF
 /*
  * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it
@@ -1901,7 +1870,7 @@
 {
 	struct imx_port *sport;
 	void __iomem *base;
-	int ret = 0;
+	int ret = 0, reg;
 	struct resource *res;
 	int txirq, rxirq, rtsirq;
 
@@ -1956,6 +1925,19 @@
 
 	sport->port.uartclk = clk_get_rate(sport->clk_per);
 
+	/* For register access, we only need to enable the ipg clock. */
+	ret = clk_prepare_enable(sport->clk_ipg);
+	if (ret)
+		return ret;
+
+	/* Disable interrupts before requesting them */
+	reg = readl_relaxed(sport->port.membase + UCR1);
+	reg &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN |
+		 UCR1_TXMPTYEN | UCR1_RTSDEN);
+	writel_relaxed(reg, sport->port.membase + UCR1);
+
+	clk_disable_unprepare(sport->clk_ipg);
+
 	/*
 	 * Allocate the IRQ(s) i.MX1 has three interrupts whereas later
 	 * chips only have one interrupt.
@@ -1991,16 +1973,135 @@
 	return uart_remove_one_port(&imx_reg, &sport->port);
 }
 
+static void serial_imx_restore_context(struct imx_port *sport)
+{
+	if (!sport->context_saved)
+		return;
+
+	writel(sport->saved_reg[4], sport->port.membase + UFCR);
+	writel(sport->saved_reg[5], sport->port.membase + UESC);
+	writel(sport->saved_reg[6], sport->port.membase + UTIM);
+	writel(sport->saved_reg[7], sport->port.membase + UBIR);
+	writel(sport->saved_reg[8], sport->port.membase + UBMR);
+	writel(sport->saved_reg[9], sport->port.membase + IMX21_UTS);
+	writel(sport->saved_reg[0], sport->port.membase + UCR1);
+	writel(sport->saved_reg[1] | UCR2_SRST, sport->port.membase + UCR2);
+	writel(sport->saved_reg[2], sport->port.membase + UCR3);
+	writel(sport->saved_reg[3], sport->port.membase + UCR4);
+	sport->context_saved = false;
+}
+
+static void serial_imx_save_context(struct imx_port *sport)
+{
+	/* Save necessary regs */
+	sport->saved_reg[0] = readl(sport->port.membase + UCR1);
+	sport->saved_reg[1] = readl(sport->port.membase + UCR2);
+	sport->saved_reg[2] = readl(sport->port.membase + UCR3);
+	sport->saved_reg[3] = readl(sport->port.membase + UCR4);
+	sport->saved_reg[4] = readl(sport->port.membase + UFCR);
+	sport->saved_reg[5] = readl(sport->port.membase + UESC);
+	sport->saved_reg[6] = readl(sport->port.membase + UTIM);
+	sport->saved_reg[7] = readl(sport->port.membase + UBIR);
+	sport->saved_reg[8] = readl(sport->port.membase + UBMR);
+	sport->saved_reg[9] = readl(sport->port.membase + IMX21_UTS);
+	sport->context_saved = true;
+}
+
+static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
+{
+	unsigned int val;
+
+	val = readl(sport->port.membase + UCR3);
+	if (on)
+		val |= UCR3_AWAKEN;
+	else
+		val &= ~UCR3_AWAKEN;
+	writel(val, sport->port.membase + UCR3);
+
+	val = readl(sport->port.membase + UCR1);
+	if (on)
+		val |= UCR1_RTSDEN;
+	else
+		val &= ~UCR1_RTSDEN;
+	writel(val, sport->port.membase + UCR1);
+}
+
+static int imx_serial_port_suspend_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct imx_port *sport = platform_get_drvdata(pdev);
+	int ret;
+
+	ret = clk_enable(sport->clk_ipg);
+	if (ret)
+		return ret;
+
+	serial_imx_save_context(sport);
+
+	clk_disable(sport->clk_ipg);
+
+	return 0;
+}
+
+static int imx_serial_port_resume_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct imx_port *sport = platform_get_drvdata(pdev);
+	int ret;
+
+	ret = clk_enable(sport->clk_ipg);
+	if (ret)
+		return ret;
+
+	serial_imx_restore_context(sport);
+
+	clk_disable(sport->clk_ipg);
+
+	return 0;
+}
+
+static int imx_serial_port_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct imx_port *sport = platform_get_drvdata(pdev);
+
+	/* enable wakeup from i.MX UART */
+	serial_imx_enable_wakeup(sport, true);
+
+	uart_suspend_port(&imx_reg, &sport->port);
+
+	return 0;
+}
+
+static int imx_serial_port_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct imx_port *sport = platform_get_drvdata(pdev);
+
+	/* disable wakeup from i.MX UART */
+	serial_imx_enable_wakeup(sport, false);
+
+	uart_resume_port(&imx_reg, &sport->port);
+
+	return 0;
+}
+
+static const struct dev_pm_ops imx_serial_port_pm_ops = {
+	.suspend_noirq = imx_serial_port_suspend_noirq,
+	.resume_noirq = imx_serial_port_resume_noirq,
+	.suspend = imx_serial_port_suspend,
+	.resume = imx_serial_port_resume,
+};
+
 static struct platform_driver serial_imx_driver = {
 	.probe		= serial_imx_probe,
 	.remove		= serial_imx_remove,
 
-	.suspend	= serial_imx_suspend,
-	.resume		= serial_imx_resume,
 	.id_table	= imx_uart_devtype,
 	.driver		= {
 		.name	= "imx-uart",
 		.of_match_table = imx_uart_dt_ids,
+		.pm	= &imx_serial_port_pm_ops,
 	},
 };
 
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 4ccc039..b88832e 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -21,7 +21,6 @@
  */
 
 #include <linux/slab.h>
-#include <linux/module.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
 #include <linux/console.h>
@@ -740,7 +739,6 @@
 	{ .compatible = DRVNAME },
 	{},
 };
-MODULE_DEVICE_TABLE(of, ltq_asc_match);
 
 static struct platform_driver lqasc_driver = {
 	.driver		= {
@@ -764,8 +762,4 @@
 
 	return ret;
 }
-
-module_init(init_lqasc);
-
-MODULE_DESCRIPTION("Lantiq serial port driver");
-MODULE_LICENSE("GPL");
+device_initcall(init_lqasc);
diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
index 35c5550..b90e7b3 100644
--- a/drivers/tty/serial/men_z135_uart.c
+++ b/drivers/tty/serial/men_z135_uart.c
@@ -392,7 +392,6 @@
 	struct men_z135_port *uart = (struct men_z135_port *)data;
 	struct uart_port *port = &uart->port;
 	bool handled = false;
-	unsigned long flags;
 	int irq_id;
 
 	uart->stat_reg = ioread32(port->membase + MEN_Z135_STAT_REG);
@@ -401,7 +400,7 @@
 	if (!irq_id)
 		goto out;
 
-	spin_lock_irqsave(&port->lock, flags);
+	spin_lock(&port->lock);
 	/* It's save to write to IIR[7:6] RXC[9:8] */
 	iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG);
 
@@ -427,7 +426,7 @@
 		handled = true;
 	}
 
-	spin_unlock_irqrestore(&port->lock, flags);
+	spin_unlock(&port->lock);
 out:
 	return IRQ_RETVAL(handled);
 }
@@ -717,7 +716,7 @@
 
 	baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16);
 
-	spin_lock(&port->lock);
+	spin_lock_irq(&port->lock);
 	if (tty_termios_baud_rate(termios))
 		tty_termios_encode_baud_rate(termios, baud, baud);
 
@@ -725,7 +724,7 @@
 	iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG);
 
 	uart_update_timeout(port, termios->c_cflag, baud);
-	spin_unlock(&port->lock);
+	spin_unlock_irq(&port->lock);
 }
 
 static const char *men_z135_type(struct uart_port *port)
@@ -840,7 +839,6 @@
 	uart->port.membase = NULL;
 	uart->mdev = mdev;
 
-	spin_lock_init(&uart->port.lock);
 	spin_lock_init(&uart->lock);
 
 	err = uart_add_one_port(&men_z135_driver, &uart->port);
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 6fc07eb..41de374 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -239,8 +239,9 @@
 
 static int mpc52xx_psc_tx_empty(struct uart_port *port)
 {
-	return in_be16(&PSC(port)->mpc52xx_psc_status)
-	    & MPC52xx_PSC_SR_TXEMP;
+	u16 sts = in_be16(&PSC(port)->mpc52xx_psc_status);
+
+	return (sts & MPC52xx_PSC_SR_TXEMP) ? TIOCSER_TEMT : 0;
 }
 
 static void mpc52xx_psc_start_tx(struct uart_port *port)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 13cf773..7c7f308 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -100,6 +100,8 @@
 #define AUART_CTRL2_TXE				(1 << 8)
 #define AUART_CTRL2_UARTEN			(1 << 0)
 
+#define AUART_LINECTRL_BAUD_DIV_MAX		0x003fffc0
+#define AUART_LINECTRL_BAUD_DIV_MIN		0x000000ec
 #define AUART_LINECTRL_BAUD_DIVINT_SHIFT	16
 #define AUART_LINECTRL_BAUD_DIVINT_MASK		0xffff0000
 #define AUART_LINECTRL_BAUD_DIVINT(v)		(((v) & 0xffff) << 16)
@@ -659,7 +661,7 @@
 {
 	struct mxs_auart_port *s = to_auart_port(u);
 	u32 bm, ctrl, ctrl2, div;
-	unsigned int cflag, baud;
+	unsigned int cflag, baud, baud_min, baud_max;
 
 	cflag = termios->c_cflag;
 
@@ -752,7 +754,9 @@
 	}
 
 	/* set baud rate */
-	baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk);
+	baud_min = DIV_ROUND_UP(u->uartclk * 32, AUART_LINECTRL_BAUD_DIV_MAX);
+	baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
+	baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
 	div = u->uartclk * 32 / baud;
 	ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
 	ctrl |= AUART_LINECTRL_BAUD_DIVINT(div >> 6);
@@ -842,7 +846,7 @@
 	return IRQ_HANDLED;
 }
 
-static void mxs_auart_reset(struct uart_port *u)
+static void mxs_auart_reset_deassert(struct uart_port *u)
 {
 	int i;
 	unsigned int reg;
@@ -858,6 +862,30 @@
 	writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
 }
 
+static void mxs_auart_reset_assert(struct uart_port *u)
+{
+	int i;
+	u32 reg;
+
+	reg = readl(u->membase + AUART_CTRL0);
+	/* if already in reset state, keep it untouched */
+	if (reg & AUART_CTRL0_SFTRST)
+		return;
+
+	writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+	writel(AUART_CTRL0_SFTRST, u->membase + AUART_CTRL0_SET);
+
+	for (i = 0; i < 1000; i++) {
+		reg = readl(u->membase + AUART_CTRL0);
+		/* reset is finished when the clock is gated */
+		if (reg & AUART_CTRL0_CLKGATE)
+			return;
+		udelay(10);
+	}
+
+	dev_err(u->dev, "Failed to reset the unit.");
+}
+
 static int mxs_auart_startup(struct uart_port *u)
 {
 	int ret;
@@ -867,7 +895,13 @@
 	if (ret)
 		return ret;
 
-	writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+	if (uart_console(u)) {
+		writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
+	} else {
+		/* reset the unit to a well known state */
+		mxs_auart_reset_assert(u);
+		mxs_auart_reset_deassert(u);
+	}
 
 	writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_SET);
 
@@ -899,12 +933,14 @@
 	if (auart_dma_enabled(s))
 		mxs_auart_dma_exit(s);
 
-	writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_CLR);
-
-	writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
-			u->membase + AUART_INTR_CLR);
-
-	writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_SET);
+	if (uart_console(u)) {
+		writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_CLR);
+		writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
+				u->membase + AUART_INTR_CLR);
+		writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_SET);
+	} else {
+		mxs_auart_reset_assert(u);
+	}
 
 	clk_disable_unprepare(s->clk);
 }
@@ -1291,7 +1327,7 @@
 
 	auart_port[s->port.line] = s;
 
-	mxs_auart_reset(&s->port);
+	mxs_auart_reset_deassert(&s->port);
 
 	ret = uart_add_one_port(&auart_driver, &s->port);
 	if (ret)
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 67d0c21..856686d 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -53,7 +53,6 @@
 #include "samsung.h"
 
 #if	defined(CONFIG_SERIAL_SAMSUNG_DEBUG) &&	\
-	defined(CONFIG_DEBUG_LL) &&		\
 	!defined(MODULE)
 
 extern void printascii(const char *);
@@ -295,15 +294,6 @@
 	if (ourport->tx_mode != S3C24XX_TX_DMA)
 		enable_tx_dma(ourport);
 
-	while (xmit->tail & (dma_get_cache_alignment() - 1)) {
-		if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
-			return 0;
-		wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
-		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
-		port->icount.tx++;
-		count--;
-	}
-
 	dma->tx_size = count & ~(dma_get_cache_alignment() - 1);
 	dma->tx_transfer_addr = dma->tx_addr + xmit->tail;
 
@@ -342,7 +332,9 @@
 		return;
 	}
 
-	if (!ourport->dma || !ourport->dma->tx_chan || count < port->fifosize)
+	if (!ourport->dma || !ourport->dma->tx_chan ||
+	    count < ourport->min_dma_size ||
+	    xmit->tail & (dma_get_cache_alignment() - 1))
 		s3c24xx_serial_start_tx_pio(ourport);
 	else
 		s3c24xx_serial_start_tx_dma(ourport, count);
@@ -736,15 +728,20 @@
 	struct uart_port *port = &ourport->port;
 	struct circ_buf *xmit = &port->state->xmit;
 	unsigned long flags;
-	int count;
+	int count, dma_count = 0;
 
 	spin_lock_irqsave(&port->lock, flags);
 
 	count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
 
-	if (ourport->dma && ourport->dma->tx_chan && count >= port->fifosize) {
-		s3c24xx_serial_start_tx_dma(ourport, count);
-		goto out;
+	if (ourport->dma && ourport->dma->tx_chan &&
+	    count >= ourport->min_dma_size) {
+		int align = dma_get_cache_alignment() -
+			(xmit->tail & (dma_get_cache_alignment() - 1));
+		if (count-align >= ourport->min_dma_size) {
+			dma_count = count-align;
+			count = align;
+		}
 	}
 
 	if (port->x_char) {
@@ -765,14 +762,24 @@
 
 	/* try and drain the buffer... */
 
-	count = port->fifosize;
-	while (!uart_circ_empty(xmit) && count-- > 0) {
+	if (count > port->fifosize) {
+		count = port->fifosize;
+		dma_count = 0;
+	}
+
+	while (!uart_circ_empty(xmit) && count > 0) {
 		if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
 			break;
 
 		wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
 		port->icount.tx++;
+		count--;
+	}
+
+	if (!count && dma_count) {
+		s3c24xx_serial_start_tx_dma(ourport, dma_count);
+		goto out;
 	}
 
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) {
@@ -1838,6 +1845,13 @@
 	else if (ourport->info->fifosize)
 		ourport->port.fifosize = ourport->info->fifosize;
 
+	/*
+	 * DMA transfers must be aligned at least to cache line size,
+	 * so find minimal transfer size suitable for DMA mode
+	 */
+	ourport->min_dma_size = max_t(int, ourport->port.fifosize,
+				    dma_get_cache_alignment());
+
 	probe_index++;
 
 	dbg("%s: initialising port %p...\n", __func__, ourport);
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index d275032..fc5deaa 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -82,6 +82,7 @@
 	unsigned char			tx_claimed;
 	unsigned int			pm_level;
 	unsigned long			baudclk_rate;
+	unsigned int			min_dma_size;
 
 	unsigned int			rx_irq;
 	unsigned int			tx_irq;
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 5ccc698c..72ffd0d 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -11,6 +11,8 @@
  *
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/bitops.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
@@ -29,6 +31,7 @@
 #include <linux/uaccess.h>
 
 #define SC16IS7XX_NAME			"sc16is7xx"
+#define SC16IS7XX_MAX_DEVS		8
 
 /* SC16IS7XX register definitions */
 #define SC16IS7XX_RHR_REG		(0x00) /* RX FIFO */
@@ -312,14 +315,14 @@
 
 struct sc16is7xx_one {
 	struct uart_port		port;
+	u8				line;
 	struct kthread_work		tx_work;
 	struct kthread_work		reg_work;
 	struct sc16is7xx_one_config	config;
 };
 
 struct sc16is7xx_port {
-	struct uart_driver		uart;
-	struct sc16is7xx_devtype	*devtype;
+	const struct sc16is7xx_devtype	*devtype;
 	struct regmap			*regmap;
 	struct clk			*clk;
 #ifdef CONFIG_GPIOLIB
@@ -332,16 +335,31 @@
 	struct sc16is7xx_one		p[0];
 };
 
+static unsigned long sc16is7xx_lines;
+
+static struct uart_driver sc16is7xx_uart = {
+	.owner		= THIS_MODULE,
+	.dev_name	= "ttySC",
+	.nr		= SC16IS7XX_MAX_DEVS,
+};
+
 #define to_sc16is7xx_port(p,e)	((container_of((p), struct sc16is7xx_port, e)))
 #define to_sc16is7xx_one(p,e)	((container_of((p), struct sc16is7xx_one, e)))
 
+static int sc16is7xx_line(struct uart_port *port)
+{
+	struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
+
+	return one->line;
+}
+
 static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg)
 {
 	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
 	unsigned int val = 0;
+	const u8 line = sc16is7xx_line(port);
 
-	regmap_read(s->regmap,
-		    (reg << SC16IS7XX_REG_SHIFT) | port->line, &val);
+	regmap_read(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, &val);
 
 	return val;
 }
@@ -349,15 +367,16 @@
 static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
 {
 	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+	const u8 line = sc16is7xx_line(port);
 
-	regmap_write(s->regmap,
-		     (reg << SC16IS7XX_REG_SHIFT) | port->line, val);
+	regmap_write(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line, val);
 }
 
 static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
 {
 	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
-	u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line;
+	const u8 line = sc16is7xx_line(port);
+	u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | line;
 
 	regcache_cache_bypass(s->regmap, true);
 	regmap_raw_read(s->regmap, addr, s->buf, rxlen);
@@ -367,7 +386,8 @@
 static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
 {
 	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
-	u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line;
+	const u8 line = sc16is7xx_line(port);
+	u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | line;
 
 	regcache_cache_bypass(s->regmap, true);
 	regmap_raw_write(s->regmap, addr, s->buf, to_send);
@@ -378,12 +398,24 @@
 				  u8 mask, u8 val)
 {
 	struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
+	const u8 line = sc16is7xx_line(port);
 
-	regmap_update_bits(s->regmap,
-			   (reg << SC16IS7XX_REG_SHIFT) | port->line,
+	regmap_update_bits(s->regmap, (reg << SC16IS7XX_REG_SHIFT) | line,
 			   mask, val);
 }
 
+static int sc16is7xx_alloc_line(void)
+{
+	int i;
+
+	BUILD_BUG_ON(SC16IS7XX_MAX_DEVS > BITS_PER_LONG);
+
+	for (i = 0; i < SC16IS7XX_MAX_DEVS; i++)
+		if (!test_and_set_bit(i, &sc16is7xx_lines))
+			break;
+
+	return i;
+}
 
 static void sc16is7xx_power(struct uart_port *port, int on)
 {
@@ -508,7 +540,7 @@
 
 	if (unlikely(rxlen >= sizeof(s->buf))) {
 		dev_warn_ratelimited(port->dev,
-				     "Port %i: Possible RX FIFO overrun: %d\n",
+				     "ttySC%i: Possible RX FIFO overrun: %d\n",
 				     port->line, rxlen);
 		port->icount.buf_overrun++;
 		/* Ensure sanity of RX level */
@@ -649,7 +681,7 @@
 			break;
 		default:
 			dev_err_ratelimited(port->dev,
-					    "Port %i: Unexpected interrupt: %x",
+					    "ttySC%i: Unexpected interrupt: %x",
 					    port->line, iir);
 			break;
 		}
@@ -661,7 +693,7 @@
 	struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
 	int i;
 
-	for (i = 0; i < s->uart.nr; ++i)
+	for (i = 0; i < s->devtype->nr_uart; ++i)
 		sc16is7xx_port_irq(s, i);
 }
 
@@ -1099,7 +1131,7 @@
 #endif
 
 static int sc16is7xx_probe(struct device *dev,
-			   struct sc16is7xx_devtype *devtype,
+			   const struct sc16is7xx_devtype *devtype,
 			   struct regmap *regmap, int irq, unsigned long flags)
 {
 	struct sched_param sched_param = { .sched_priority = MAX_RT_PRIO / 2 };
@@ -1134,23 +1166,13 @@
 	s->devtype = devtype;
 	dev_set_drvdata(dev, s);
 
-	/* Register UART driver */
-	s->uart.owner		= THIS_MODULE;
-	s->uart.dev_name	= "ttySC";
-	s->uart.nr		= devtype->nr_uart;
-	ret = uart_register_driver(&s->uart);
-	if (ret) {
-		dev_err(dev, "Registering UART driver failed\n");
-		goto out_clk;
-	}
-
 	init_kthread_worker(&s->kworker);
 	init_kthread_work(&s->irq_work, sc16is7xx_ist);
 	s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
 				      "sc16is7xx");
 	if (IS_ERR(s->kworker_task)) {
 		ret = PTR_ERR(s->kworker_task);
-		goto out_uart;
+		goto out_clk;
 	}
 	sched_setscheduler(s->kworker_task, SCHED_FIFO, &sched_param);
 
@@ -1174,8 +1196,8 @@
 #endif
 
 	for (i = 0; i < devtype->nr_uart; ++i) {
+		s->p[i].line		= i;
 		/* Initialize port data */
-		s->p[i].port.line	= i;
 		s->p[i].port.dev	= dev;
 		s->p[i].port.irq	= irq;
 		s->p[i].port.type	= PORT_SC16IS7XX;
@@ -1185,6 +1207,12 @@
 		s->p[i].port.uartclk	= freq;
 		s->p[i].port.rs485_config = sc16is7xx_config_rs485;
 		s->p[i].port.ops	= &sc16is7xx_ops;
+		s->p[i].port.line	= sc16is7xx_alloc_line();
+		if (s->p[i].port.line >= SC16IS7XX_MAX_DEVS) {
+			ret = -ENOMEM;
+			goto out_ports;
+		}
+
 		/* Disable all interrupts */
 		sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_IER_REG, 0);
 		/* Disable TX/RX */
@@ -1195,7 +1223,7 @@
 		init_kthread_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
 		init_kthread_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
 		/* Register port */
-		uart_add_one_port(&s->uart, &s->p[i].port);
+		uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
 		/* Go to suspend mode */
 		sc16is7xx_power(&s->p[i].port, 0);
 	}
@@ -1206,8 +1234,11 @@
 	if (!ret)
 		return 0;
 
-	for (i = 0; i < s->uart.nr; i++)
-		uart_remove_one_port(&s->uart, &s->p[i].port);
+out_ports:
+	for (i--; i >= 0; i--) {
+		uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+		clear_bit(s->p[i].port.line, &sc16is7xx_lines);
+	}
 
 #ifdef CONFIG_GPIOLIB
 	if (devtype->nr_gpio)
@@ -1217,9 +1248,6 @@
 #endif
 	kthread_stop(s->kworker_task);
 
-out_uart:
-	uart_unregister_driver(&s->uart);
-
 out_clk:
 	if (!IS_ERR(s->clk))
 		clk_disable_unprepare(s->clk);
@@ -1237,15 +1265,15 @@
 		gpiochip_remove(&s->gpio);
 #endif
 
-	for (i = 0; i < s->uart.nr; i++) {
-		uart_remove_one_port(&s->uart, &s->p[i].port);
+	for (i = 0; i < s->devtype->nr_uart; i++) {
+		uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+		clear_bit(s->p[i].port.line, &sc16is7xx_lines);
 		sc16is7xx_power(&s->p[i].port, 0);
 	}
 
 	flush_kthread_worker(&s->kworker);
 	kthread_stop(s->kworker_task);
 
-	uart_unregister_driver(&s->uart);
 	if (!IS_ERR(s->clk))
 		clk_disable_unprepare(s->clk);
 
@@ -1275,7 +1303,7 @@
 #ifdef CONFIG_SERIAL_SC16IS7XX_SPI
 static int sc16is7xx_spi_probe(struct spi_device *spi)
 {
-	struct sc16is7xx_devtype *devtype;
+	const struct sc16is7xx_devtype *devtype;
 	unsigned long flags = 0;
 	struct regmap *regmap;
 	int ret;
@@ -1344,7 +1372,7 @@
 static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
 			       const struct i2c_device_id *id)
 {
-	struct sc16is7xx_devtype *devtype;
+	const struct sc16is7xx_devtype *devtype;
 	unsigned long flags = 0;
 	struct regmap *regmap;
 
@@ -1385,7 +1413,6 @@
 static struct i2c_driver sc16is7xx_i2c_uart_driver = {
 	.driver = {
 		.name		= SC16IS7XX_NAME,
-		.owner		= THIS_MODULE,
 		.of_match_table	= of_match_ptr(sc16is7xx_dt_ids),
 	},
 	.probe		= sc16is7xx_i2c_probe,
@@ -1398,7 +1425,14 @@
 
 static int __init sc16is7xx_init(void)
 {
-	int ret = 0;
+	int ret;
+
+	ret = uart_register_driver(&sc16is7xx_uart);
+	if (ret) {
+		pr_err("Registering UART driver failed\n");
+		return ret;
+	}
+
 #ifdef CONFIG_SERIAL_SC16IS7XX_I2C
 	ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
 	if (ret < 0) {
@@ -1427,6 +1461,7 @@
 #ifdef CONFIG_SERIAL_SC16IS7XX_SPI
 	spi_unregister_driver(&sc16is7xx_spi_uart_driver);
 #endif
+	uart_unregister_driver(&sc16is7xx_uart);
 }
 module_exit(sc16is7xx_exit);
 
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index f368520..603d2cc 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1377,7 +1377,6 @@
 	struct uart_state *state = tty->driver_data;
 	struct tty_port *port;
 	struct uart_port *uport;
-	unsigned long flags;
 
 	if (!state) {
 		struct uart_driver *drv = tty->driver->driver_state;
@@ -1403,10 +1402,9 @@
 	 * disable the receive line status interrupts.
 	 */
 	if (port->flags & ASYNC_INITIALIZED) {
-		unsigned long flags;
-		spin_lock_irqsave(&uport->lock, flags);
+		spin_lock_irq(&uport->lock);
 		uport->ops->stop_rx(uport);
-		spin_unlock_irqrestore(&uport->lock, flags);
+		spin_unlock_irq(&uport->lock);
 		/*
 		 * Before we drop DTR, make sure the UART transmitter
 		 * has completely drained; this is especially
@@ -1419,17 +1417,17 @@
 	uart_shutdown(tty, state);
 	tty_port_tty_set(port, NULL);
 
-	spin_lock_irqsave(&port->lock, flags);
+	spin_lock_irq(&port->lock);
 
 	if (port->blocked_open) {
-		spin_unlock_irqrestore(&port->lock, flags);
+		spin_unlock_irq(&port->lock);
 		if (port->close_delay)
 			msleep_interruptible(jiffies_to_msecs(port->close_delay));
-		spin_lock_irqsave(&port->lock, flags);
+		spin_lock_irq(&port->lock);
 	} else if (!uart_console(uport)) {
-		spin_unlock_irqrestore(&port->lock, flags);
+		spin_unlock_irq(&port->lock);
 		uart_change_pm(state, UART_PM_STATE_OFF);
-		spin_lock_irqsave(&port->lock, flags);
+		spin_lock_irq(&port->lock);
 	}
 
 	/*
@@ -1437,7 +1435,7 @@
 	 */
 	clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
 	clear_bit(ASYNCB_CLOSING, &port->flags);
-	spin_unlock_irqrestore(&port->lock, flags);
+	spin_unlock_irq(&port->lock);
 	wake_up_interruptible(&port->open_wait);
 	wake_up_interruptible(&port->close_wait);
 
@@ -1532,11 +1530,6 @@
 	mutex_unlock(&port->mutex);
 }
 
-static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
-{
-	return 0;
-}
-
 static void uart_port_shutdown(struct tty_port *port)
 {
 	struct uart_state *state = container_of(port, struct uart_state, port);
@@ -2379,8 +2372,6 @@
 };
 
 static const struct tty_port_operations uart_port_ops = {
-	.activate	= uart_port_activate,
-	.shutdown	= uart_port_shutdown,
 	.carrier_raised = uart_carrier_raised,
 	.dtr_rts	= uart_dtr_rts,
 };
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 653cdd5..c6657de 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -413,7 +413,6 @@
 			break;
 	}
 
-	sirfport->rx_io_count += rx_count;
 	port->icount.rx += rx_count;
 
 	return rx_count;
@@ -600,7 +599,6 @@
 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
 	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
-	sirfport->rx_io_count = 0;
 	wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
 		rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
 		~SIRFUART_IO_MODE);
@@ -632,31 +630,6 @@
 				sirfport->uart_reg->uart_type));
 }
 
-static void sirfsoc_uart_start_rx(struct uart_port *port)
-{
-	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
-	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
-	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
-
-	sirfport->rx_io_count = 0;
-	wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
-	wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
-	wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
-	if (sirfport->rx_dma_chan)
-		sirfsoc_uart_start_next_rx_dma(port);
-	else {
-		if (!sirfport->is_atlas7)
-			wr_regl(port, ureg->sirfsoc_int_en_reg,
-				rd_regl(port, ureg->sirfsoc_int_en_reg) |
-				SIRFUART_RX_IO_INT_EN(uint_en,
-					sirfport->uart_reg->uart_type));
-		else
-			wr_regl(port, ureg->sirfsoc_int_en_reg,
-				SIRFUART_RX_IO_INT_EN(uint_en,
-					sirfport->uart_reg->uart_type));
-	}
-}
-
 static unsigned int
 sirfsoc_usp_calc_sample_div(unsigned long set_rate,
 		unsigned long ioclk_rate, unsigned long *sample_reg)
@@ -850,7 +823,6 @@
 	rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000);
 	rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out);
 	txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op);
-	wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_STOP);
 	wr_regl(port, ureg->sirfsoc_tx_fifo_op,
 			(txfifo_op_reg & ~SIRFUART_FIFO_START));
 	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
@@ -886,9 +858,13 @@
 	else
 		wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
 	if (sirfport->rx_dma_chan)
-		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
+		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
+			~SIRFUART_IO_MODE);
 	else
-		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
+		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
+			SIRFUART_IO_MODE);
 	sirfport->rx_period_time = 20000000;
 	/* Reset Rx/Tx FIFO Threshold level for proper baudrate */
 	if (set_baud < 1000000)
@@ -902,7 +878,6 @@
 	txfifo_op_reg |= SIRFUART_FIFO_START;
 	wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg);
 	uart_update_timeout(port, termios->c_cflag, set_baud);
-	sirfsoc_uart_start_rx(port);
 	wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN);
 	spin_unlock_irqrestore(&port->lock, flags);
 }
@@ -921,6 +896,7 @@
 {
 	struct sirfsoc_uart_port *sirfport	= to_sirfport(port);
 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+	struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
 	unsigned int index			= port->line;
 	int ret;
 	irq_modify_status(port->irq, IRQ_NOREQUEST, IRQ_NOAUTOEN);
@@ -958,9 +934,9 @@
 	wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
 	if (sirfport->rx_dma_chan)
 		wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
-			SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
-			SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
-			SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
+			SIRFUART_RX_FIFO_CHK_SC(port->line, 0x1) |
+			SIRFUART_RX_FIFO_CHK_LC(port->line, 0x2) |
+			SIRFUART_RX_FIFO_CHK_HC(port->line, 0x4));
 	if (sirfport->tx_dma_chan) {
 		sirfport->tx_dma_state = TX_DMA_IDLE;
 		wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
@@ -981,16 +957,41 @@
 			goto init_rx_err;
 		}
 	}
-	enable_irq(port->irq);
+	if (sirfport->uart_reg->uart_type == SIRF_REAL_UART &&
+		sirfport->rx_dma_chan)
+		wr_regl(port, ureg->sirfsoc_swh_dma_io,
+			SIRFUART_CLEAR_RX_ADDR_EN);
+	if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
+			sirfport->rx_dma_chan)
+		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
+			SIRFSOC_USP_FRADDR_CLR_EN);
 	if (sirfport->rx_dma_chan && !sirfport->is_hrt_enabled) {
 		sirfport->is_hrt_enabled = true;
 		sirfport->rx_period_time = 20000000;
+		sirfport->rx_last_pos = -1;
+		sirfport->pio_fetch_cnt = 0;
 		sirfport->rx_dma_items.xmit.tail =
 			sirfport->rx_dma_items.xmit.head = 0;
 		hrtimer_start(&sirfport->hrt,
 			ns_to_ktime(sirfport->rx_period_time),
 			HRTIMER_MODE_REL);
 	}
+	wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
+	if (sirfport->rx_dma_chan)
+		sirfsoc_uart_start_next_rx_dma(port);
+	else {
+		if (!sirfport->is_atlas7)
+			wr_regl(port, ureg->sirfsoc_int_en_reg,
+				rd_regl(port, ureg->sirfsoc_int_en_reg) |
+				SIRFUART_RX_IO_INT_EN(uint_en,
+					sirfport->uart_reg->uart_type));
+		else
+			wr_regl(port, ureg->sirfsoc_int_en_reg,
+				SIRFUART_RX_IO_INT_EN(uint_en,
+					sirfport->uart_reg->uart_type));
+	}
+	enable_irq(port->irq);
 
 	return 0;
 init_rx_err:
@@ -1003,6 +1004,9 @@
 {
 	struct sirfsoc_uart_port *sirfport = to_sirfport(port);
 	struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+	struct circ_buf *xmit;
+
+	xmit = &sirfport->rx_dma_items.xmit;
 	if (!sirfport->is_atlas7)
 		wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
 	else
@@ -1019,8 +1023,10 @@
 	if (sirfport->tx_dma_chan)
 		sirfport->tx_dma_state = TX_DMA_IDLE;
 	if (sirfport->rx_dma_chan && sirfport->is_hrt_enabled) {
-		while ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
-			SIRFUART_RX_FIFO_MASK) > 0)
+		while (((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
+			SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt) &&
+			!CIRC_CNT(xmit->head, xmit->tail,
+			SIRFSOC_RX_DMA_BUF_SIZE))
 			;
 		sirfport->is_hrt_enabled = false;
 		hrtimer_cancel(&sirfport->hrt);
@@ -1169,6 +1175,8 @@
 	struct tty_struct *tty;
 	struct sirfsoc_register *ureg;
 	struct circ_buf *xmit;
+	struct sirfsoc_fifo_status *ufifo_st;
+	int max_pio_cnt;
 
 	sirfport = container_of(hrt, struct sirfsoc_uart_port, hrt);
 	port = &sirfport->port;
@@ -1176,9 +1184,16 @@
 	tty = port->state->port.tty;
 	ureg = &sirfport->uart_reg->uart_reg;
 	xmit = &sirfport->rx_dma_items.xmit;
+	ufifo_st = &sirfport->uart_reg->fifo_status;
+
 	dmaengine_tx_status(sirfport->rx_dma_chan,
-		sirfport->rx_dma_items.cookie, &tx_state);
-	xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
+			sirfport->rx_dma_items.cookie, &tx_state);
+	if (SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue !=
+		sirfport->rx_last_pos) {
+		xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
+		sirfport->rx_last_pos = xmit->head;
+		sirfport->pio_fetch_cnt = 0;
+	}
 	count = CIRC_CNT_TO_END(xmit->head, xmit->tail,
 			SIRFSOC_RX_DMA_BUF_SIZE);
 	while (count > 0) {
@@ -1200,23 +1215,38 @@
 	 */
 	if (!inserted && !count &&
 		((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
-		SIRFUART_RX_FIFO_MASK) > 0)) {
+		SIRFUART_RX_FIFO_MASK) > sirfport->pio_fetch_cnt)) {
+		dmaengine_pause(sirfport->rx_dma_chan);
 		/* switch to pio mode */
 		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
 			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
 			SIRFUART_IO_MODE);
-		while ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
-			SIRFUART_RX_FIFO_MASK) > 0) {
-			if (sirfsoc_uart_pio_rx_chars(port, 16) > 0)
-				tty_flip_buffer_push(tty->port);
+		/*
+		 * UART controller SWH_DMA_IO register have CLEAR_RX_ADDR_EN
+		 * When found changing I/O to DMA mode, it clears
+		 * two low bits of read point;
+		 * USP have similar FRADDR_CLR_EN bit in USP_RX_DMA_IO_CTRL.
+		 * Fetch data out from rxfifo into DMA buffer in PIO mode,
+		 * while switch back to DMA mode, the data fetched will override
+		 * by DMA, as hardware have a strange behaviour:
+		 * after switch back to DMA mode, check rxfifo status it will
+		 * be the number PIO fetched, so record the fetched data count
+		 * to avoid the repeated fetch
+		 */
+		max_pio_cnt = 3;
+		while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
+			ufifo_st->ff_empty(port)) && max_pio_cnt--) {
+			xmit->buf[xmit->head] =
+				rd_regl(port, ureg->sirfsoc_rx_fifo_data);
+			xmit->head = (xmit->head + 1) &
+					(SIRFSOC_RX_DMA_BUF_SIZE - 1);
+			sirfport->pio_fetch_cnt++;
 		}
-		wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
-		wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
-		wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
 		/* switch back to dma mode */
 		wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
 			rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
 			~SIRFUART_IO_MODE);
+		dmaengine_resume(sirfport->rx_dma_chan);
 	}
 next_hrt:
 	hrtimer_forward_now(hrt, ns_to_ktime(sirfport->rx_period_time));
@@ -1239,7 +1269,7 @@
 	struct resource *res;
 	int ret;
 	struct dma_slave_config slv_cfg = {
-		.src_maxburst = 2,
+		.src_maxburst = 1,
 	};
 	struct dma_slave_config tx_slv_cfg = {
 		.dst_maxburst = 2,
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index eb162b0..c3a885b 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -296,6 +296,7 @@
 #define SIRFUART_DMA_MODE			0x0
 #define SIRFUART_RX_DMA_FLUSH			0x4
 
+#define SIRFUART_CLEAR_RX_ADDR_EN		0x2
 /* Baud Rate Calculation */
 #define SIRF_USP_MIN_SAMPLE_DIV			0x1
 #define SIRF_MIN_SAMPLE_DIV			0xf
@@ -325,6 +326,7 @@
 #define SIRFSOC_USP_ASYNC_DIV2_MASK		0x3f
 #define SIRFSOC_USP_ASYNC_DIV2_OFFSET		16
 #define SIRFSOC_USP_LOOP_BACK_CTRL		BIT(2)
+#define SIRFSOC_USP_FRADDR_CLR_EN		BIT(1)
 /* USP-UART Common */
 #define SIRFSOC_UART_RX_TIMEOUT(br, to)	(((br) * (((to) + 999) / 1000)) / 1000)
 #define SIRFUART_RECV_TIMEOUT_VALUE(x)	\
@@ -421,7 +423,6 @@
 	struct dma_chan			*tx_dma_chan;
 	dma_addr_t			tx_dma_addr;
 	struct dma_async_tx_descriptor	*tx_dma_desc;
-	unsigned int			rx_io_count;
 	unsigned long			transfer_size;
 	enum sirfsoc_tx_state		tx_dma_state;
 	unsigned int			cts_gpio;
@@ -431,6 +432,8 @@
 	struct hrtimer			hrt;
 	bool				is_hrt_enabled;
 	unsigned long			rx_period_time;
+	unsigned long			rx_last_pos;
+	unsigned long			pio_fetch_cnt;
 };
 
 /* Register Access Control */
diff --git a/drivers/tty/serial/sn_console.c b/drivers/tty/serial/sn_console.c
index 33e94e5..d4692d8 100644
--- a/drivers/tty/serial/sn_console.c
+++ b/drivers/tty/serial/sn_console.c
@@ -42,7 +42,7 @@
 #include <linux/tty_flip.h>
 #include <linux/serial.h>
 #include <linux/console.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/sysrq.h>
 #include <linux/circ_buf.h>
 #include <linux/serial_reg.h>
@@ -659,7 +659,7 @@
  * @port: Our sn_cons_port (which contains the uart port)
  *
  * So this is used by sn_sal_serial_console_init (early on, before we're
- * registered with serial core).  It's also used by sn_sal_module_init
+ * registered with serial core).  It's also used by sn_sal_init
  * right after we've registered with serial core.  The later only happens
  * if we didn't already come through here via sn_sal_serial_console_init.
  *
@@ -709,7 +709,7 @@
  * sn_sal_switch_to_interrupts - Switch to interrupt driven mode
  * @port: Our sn_cons_port (which contains the uart port)
  *
- * In sn_sal_module_init, after we're registered with serial core and
+ * In sn_sal_init, after we're registered with serial core and
  * the port is added, this function is called to switch us to interrupt
  * mode.  We were previously in asynch/polling mode (using init_timer).
  *
@@ -773,7 +773,7 @@
 };
 
 /**
- * sn_sal_module_init - When the kernel loads us, get us rolling w/ serial core
+ * sn_sal_init - When the kernel loads us, get us rolling w/ serial core
  *
  * Before this is called, we've been printing kernel messages in a special
  * early mode not making use of the serial core infrastructure.  When our
@@ -781,7 +781,7 @@
  * core and try to enable interrupt driven mode.
  *
  */
-static int __init sn_sal_module_init(void)
+static int __init sn_sal_init(void)
 {
 	int retval;
 
@@ -811,7 +811,7 @@
 
 	if (uart_register_driver(&sal_console_uart) < 0) {
 		printk
-		    ("ERROR sn_sal_module_init failed uart_register_driver, line %d\n",
+		    ("ERROR sn_sal_init failed uart_register_driver, line %d\n",
 		     __LINE__);
 		return -ENODEV;
 	}
@@ -832,33 +832,19 @@
 
 	/* when this driver is compiled in, the console initialization
 	 * will have already switched us into asynchronous operation
-	 * before we get here through the module initcalls */
+	 * before we get here through the initcalls */
 	if (!sal_console_port.sc_is_asynch) {
 		sn_sal_switch_to_asynch(&sal_console_port);
 	}
 
-	/* at this point (module_init) we can try to turn on interrupts */
+	/* at this point (device_init) we can try to turn on interrupts */
 	if (!IS_RUNNING_ON_SIMULATOR()) {
 		sn_sal_switch_to_interrupts(&sal_console_port);
 	}
 	sn_process_input = 1;
 	return 0;
 }
-
-/**
- * sn_sal_module_exit - When we're unloaded, remove the driver/port
- *
- */
-static void __exit sn_sal_module_exit(void)
-{
-	del_timer_sync(&sal_console_port.sc_timer);
-	uart_remove_one_port(&sal_console_uart, &sal_console_port.sc_port);
-	uart_unregister_driver(&sal_console_uart);
-	misc_deregister(&misc);
-}
-
-module_init(sn_sal_module_init);
-module_exit(sn_sal_module_exit);
+device_initcall(sn_sal_init);
 
 /**
  * puts_raw_fixed - sn_sal_console_write helper for adding \r's as required
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 4a6eab6..e3de9c6 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -6,7 +6,7 @@
  * Inspired by st-asc.c from STMicroelectronics (c)
  */
 
-#if defined(CONFIG_SERIAL_STM32_USART_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#if defined(CONFIG_SERIAL_STM32_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
 #define SUPPORT_SYSRQ
 #endif
 
diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c
index 6e4ac8d..127472b 100644
--- a/drivers/tty/serial/suncore.c
+++ b/drivers/tty/serial/suncore.c
@@ -10,7 +10,6 @@
  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
  */
 
-#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/console.h>
 #include <linux/tty.h>
@@ -234,14 +233,10 @@
 {
 	return 0;
 }
+device_initcall(suncore_init);
 
-static void __exit suncore_exit(void)
-{
-}
-
-module_init(suncore_init);
-module_exit(suncore_exit);
-
+#if 0 /* ..def MODULE ; never supported as such */
 MODULE_AUTHOR("Eddie C. Dost, David S. Miller");
 MODULE_DESCRIPTION("Sun serial common layer");
 MODULE_LICENSE("GPL");
+#endif
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 5347544..0640318 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -3,7 +3,6 @@
  * Copyright (C) 2006, 2007 David S. Miller (davem@davemloft.net)
  */
 
-#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/tty.h>
@@ -621,7 +620,6 @@
 	},
 	{},
 };
-MODULE_DEVICE_TABLE(of, hv_match);
 
 static struct platform_driver hv_driver = {
 	.driver = {
@@ -639,16 +637,11 @@
 
 	return platform_driver_register(&hv_driver);
 }
+device_initcall(sunhv_init);
 
-static void __exit sunhv_exit(void)
-{
-	platform_driver_unregister(&hv_driver);
-}
-
-module_init(sunhv_init);
-module_exit(sunhv_exit);
-
+#if 0 /* ...def MODULE ; never supported as such */
 MODULE_AUTHOR("David S. Miller");
 MODULE_DESCRIPTION("SUN4V Hypervisor console driver");
 MODULE_VERSION("2.0");
 MODULE_LICENSE("GPL");
+#endif
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 7d2532b..73190f5 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -950,7 +950,7 @@
 	if ((termios->c_cflag & CREAD) == 0)
 		port->read_status_mask &= ~BD_SC_EMPTY;
 
-	baud = uart_get_baud_rate(port, termios, old, 0, 115200);
+	baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
 
 	/* Do we really need a spinlock here? */
 	spin_lock_irqsave(&port->lock, flags);
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 4cf263d..5a3fa89 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -291,12 +291,11 @@
 			n->flags = flags;
 			buf->tail = n;
 			b->commit = b->used;
-			/* paired w/ barrier in flush_to_ldisc(); ensures the
+			/* paired w/ acquire in flush_to_ldisc(); ensures the
 			 * latest commit value can be read before the head is
 			 * advanced to the next buffer
 			 */
-			smp_wmb();
-			b->next = n;
+			smp_store_release(&b->next, n);
 		} else if (change)
 			size = 0;
 		else
@@ -445,7 +444,6 @@
 		if (count)
 			disc->ops->receive_buf(tty, p, f, count);
 	}
-	head->read += count;
 	return count;
 }
 
@@ -488,12 +486,11 @@
 		if (atomic_read(&buf->priority))
 			break;
 
-		next = head->next;
-		/* paired w/ barrier in __tty_buffer_request_room();
+		/* paired w/ release in __tty_buffer_request_room();
 		 * ensures commit value read is not stale if the head
 		 * is advancing to the next buffer
 		 */
-		smp_rmb();
+		next = smp_load_acquire(&head->next);
 		count = head->commit - head->read;
 		if (!count) {
 			if (next == NULL) {
@@ -508,6 +505,7 @@
 		count = receive_buf(tty, head, count);
 		if (!count)
 			break;
+		head->read += count;
 	}
 
 	mutex_unlock(&buf->lock);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 57fc6ee..02785d8 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -106,6 +106,11 @@
 #include <linux/nsproxy.h>
 
 #undef TTY_DEBUG_HANGUP
+#ifdef TTY_DEBUG_HANGUP
+# define tty_debug_hangup(tty, f, args...)	tty_debug(tty, f, ##args)
+#else
+# define tty_debug_hangup(tty, f, args...)	do { } while (0)
+#endif
 
 #define TTY_PARANOIA_CHECK 1
 #define CHECK_TTY_COUNT 1
@@ -388,33 +393,40 @@
 int tty_check_change(struct tty_struct *tty)
 {
 	unsigned long flags;
+	struct pid *pgrp;
 	int ret = 0;
 
 	if (current->signal->tty != tty)
 		return 0;
 
+	rcu_read_lock();
+	pgrp = task_pgrp(current);
+
 	spin_lock_irqsave(&tty->ctrl_lock, flags);
 
 	if (!tty->pgrp) {
 		printk(KERN_WARNING "tty_check_change: tty->pgrp == NULL!\n");
 		goto out_unlock;
 	}
-	if (task_pgrp(current) == tty->pgrp)
+	if (pgrp == tty->pgrp)
 		goto out_unlock;
 	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
 	if (is_ignored(SIGTTOU))
-		goto out;
+		goto out_rcuunlock;
 	if (is_current_pgrp_orphaned()) {
 		ret = -EIO;
-		goto out;
+		goto out_rcuunlock;
 	}
-	kill_pgrp(task_pgrp(current), SIGTTOU, 1);
+	kill_pgrp(pgrp, SIGTTOU, 1);
+	rcu_read_unlock();
 	set_thread_flag(TIF_SIGPENDING);
 	ret = -ERESTARTSYS;
-out:
 	return ret;
 out_unlock:
 	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+out_rcuunlock:
+	rcu_read_unlock();
 	return ret;
 }
 
@@ -524,7 +536,8 @@
 	spin_unlock_irqrestore(&tty->ctrl_lock, flags);
 	tty->session = get_pid(task_session(current));
 	if (current->signal->tty) {
-		printk(KERN_DEBUG "tty not NULL!!\n");
+		tty_debug(tty, "current tty %s not NULL!!\n",
+			  current->signal->tty->name);
 		tty_kref_put(current->signal->tty);
 	}
 	put_pid(current->signal->tty_old_pgrp);
@@ -766,9 +779,7 @@
 
 void tty_hangup(struct tty_struct *tty)
 {
-#ifdef TTY_DEBUG_HANGUP
-	printk(KERN_DEBUG "%s hangup...\n", tty_name(tty));
-#endif
+	tty_debug_hangup(tty, "\n");
 	schedule_work(&tty->hangup_work);
 }
 
@@ -785,9 +796,7 @@
 
 void tty_vhangup(struct tty_struct *tty)
 {
-#ifdef TTY_DEBUG_HANGUP
-	printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty));
-#endif
+	tty_debug_hangup(tty, "\n");
 	__tty_hangup(tty, 0);
 }
 
@@ -824,9 +833,7 @@
 
 static void tty_vhangup_session(struct tty_struct *tty)
 {
-#ifdef TTY_DEBUG_HANGUP
-	printk(KERN_DEBUG "%s vhangup session...\n", tty_name(tty));
-#endif
+	tty_debug_hangup(tty, "\n");
 	__tty_hangup(tty, 1);
 }
 
@@ -920,12 +927,8 @@
 		tty->pgrp = NULL;
 		spin_unlock_irqrestore(&tty->ctrl_lock, flags);
 		tty_kref_put(tty);
-	} else {
-#ifdef TTY_DEBUG_HANGUP
-		printk(KERN_DEBUG "error attempted to write to tty [0x%p]"
-		       " = NULL", tty);
-#endif
-	}
+	} else
+		tty_debug_hangup(tty, "no current tty\n");
 
 	spin_unlock_irq(&current->sighand->siglock);
 	/* Now clear signal->tty under the lock */
@@ -1705,8 +1708,7 @@
 {
 #ifdef TTY_PARANOIA_CHECK
 	if (idx < 0 || idx >= tty->driver->num) {
-		printk(KERN_DEBUG "%s: bad idx when trying to free (%s)\n",
-				__func__, tty->name);
+		tty_debug(tty, "bad idx %d\n", idx);
 		return -1;
 	}
 
@@ -1715,20 +1717,20 @@
 		return 0;
 
 	if (tty != tty->driver->ttys[idx]) {
-		printk(KERN_DEBUG "%s: driver.table[%d] not tty for (%s)\n",
-				__func__, idx, tty->name);
+		tty_debug(tty, "bad driver table[%d] = %p\n",
+			  idx, tty->driver->ttys[idx]);
 		return -1;
 	}
 	if (tty->driver->other) {
 		struct tty_struct *o_tty = tty->link;
 
 		if (o_tty != tty->driver->other->ttys[idx]) {
-			printk(KERN_DEBUG "%s: other->table[%d] not o_tty for (%s)\n",
-					__func__, idx, tty->name);
+			tty_debug(tty, "bad other table[%d] = %p\n",
+				  idx, tty->driver->other->ttys[idx]);
 			return -1;
 		}
 		if (o_tty->link != tty) {
-			printk(KERN_DEBUG "%s: bad pty pointers\n", __func__);
+			tty_debug(tty, "bad link = %p\n", o_tty->link);
 			return -1;
 		}
 	}
@@ -1782,10 +1784,7 @@
 		return 0;
 	}
 
-#ifdef TTY_DEBUG_HANGUP
-	printk(KERN_DEBUG "%s: %s (tty count=%d)...\n", __func__,
-			tty_name(tty), tty->count);
-#endif
+	tty_debug_hangup(tty, "(tty count=%d)...\n", tty->count);
 
 	if (tty->ops->close)
 		tty->ops->close(tty, filp);
@@ -1895,9 +1894,7 @@
 	if (!final)
 		return 0;
 
-#ifdef TTY_DEBUG_HANGUP
-	printk(KERN_DEBUG "%s: %s: final close\n", __func__, tty_name(tty));
-#endif
+	tty_debug_hangup(tty, "final close\n");
 	/*
 	 * Ask the line discipline code to release its structures
 	 */
@@ -1906,10 +1903,7 @@
 	/* Wait for pending work before tty destruction commmences */
 	tty_flush_works(tty);
 
-#ifdef TTY_DEBUG_HANGUP
-	printk(KERN_DEBUG "%s: %s: freeing structure...\n", __func__,
-	       tty_name(tty));
-#endif
+	tty_debug_hangup(tty, "freeing structure...\n");
 	/*
 	 * The release_tty function takes care of the details of clearing
 	 * the slots and preserving the termios structure. The tty_unlock_pair
@@ -2098,9 +2092,9 @@
 	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
 	    tty->driver->subtype == PTY_TYPE_MASTER)
 		noctty = 1;
-#ifdef TTY_DEBUG_HANGUP
-	printk(KERN_DEBUG "%s: opening %s...\n", __func__, tty->name);
-#endif
+
+	tty_debug_hangup(tty, "(tty count=%d)\n", tty->count);
+
 	if (tty->ops->open)
 		retval = tty->ops->open(tty, filp);
 	else
@@ -2108,10 +2102,8 @@
 	filp->f_flags = saved_flags;
 
 	if (retval) {
-#ifdef TTY_DEBUG_HANGUP
-		printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
-				retval, tty->name);
-#endif
+		tty_debug_hangup(tty, "error %d, releasing...\n", retval);
+
 		tty_unlock(tty); /* need to call tty_release without BTM */
 		tty_release(inode, filp);
 		if (retval != -ERESTARTSYS)
@@ -3160,9 +3152,12 @@
 		unsigned int index, unsigned int count)
 {
 	/* init here, since reused cdevs cause crashes */
-	cdev_init(&driver->cdevs[index], &tty_fops);
-	driver->cdevs[index].owner = driver->owner;
-	return cdev_add(&driver->cdevs[index], dev, count);
+	driver->cdevs[index] = cdev_alloc();
+	if (!driver->cdevs[index])
+		return -ENOMEM;
+	cdev_init(driver->cdevs[index], &tty_fops);
+	driver->cdevs[index]->owner = driver->owner;
+	return cdev_add(driver->cdevs[index], dev, count);
 }
 
 /**
@@ -3268,8 +3263,10 @@
 
 error:
 	put_device(dev);
-	if (cdev)
-		cdev_del(&driver->cdevs[index]);
+	if (cdev) {
+		cdev_del(driver->cdevs[index]);
+		driver->cdevs[index] = NULL;
+	}
 	return ERR_PTR(retval);
 }
 EXPORT_SYMBOL_GPL(tty_register_device_attr);
@@ -3289,8 +3286,10 @@
 {
 	device_destroy(tty_class,
 		MKDEV(driver->major, driver->minor_start) + index);
-	if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC))
-		cdev_del(&driver->cdevs[index]);
+	if (!(driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)) {
+		cdev_del(driver->cdevs[index]);
+		driver->cdevs[index] = NULL;
+	}
 }
 EXPORT_SYMBOL(tty_unregister_device);
 
@@ -3355,6 +3354,7 @@
 	kfree(driver->ports);
 	kfree(driver->ttys);
 	kfree(driver->termios);
+	kfree(driver->cdevs);
 	kfree(driver);
 	return ERR_PTR(err);
 }
@@ -3383,7 +3383,7 @@
 		}
 		proc_tty_unregister_driver(driver);
 		if (driver->flags & TTY_DRIVER_DYNAMIC_ALLOC)
-			cdev_del(&driver->cdevs[0]);
+			cdev_del(driver->cdevs[0]);
 	}
 	kfree(driver->cdevs);
 	kfree(driver->ports);
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 5232fb6..9c5aebf 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -26,6 +26,12 @@
 
 #undef TTY_DEBUG_WAIT_UNTIL_SENT
 
+#ifdef TTY_DEBUG_WAIT_UNTIL_SENT
+# define tty_debug_wait_until_sent(tty, f, args...)    tty_debug(tty, f, ##args)
+#else
+# define tty_debug_wait_until_sent(tty, f, args...)    do {} while (0)
+#endif
+
 #undef	DEBUG
 
 /*
@@ -210,9 +216,8 @@
 
 void tty_wait_until_sent(struct tty_struct *tty, long timeout)
 {
-#ifdef TTY_DEBUG_WAIT_UNTIL_SENT
-	printk(KERN_DEBUG "%s wait until sent...\n", tty_name(tty));
-#endif
+	tty_debug_wait_until_sent(tty, "\n");
+
 	if (!timeout)
 		timeout = MAX_SCHEDULE_TIMEOUT;
 
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index c07fb5d..71750cb 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -22,9 +22,7 @@
 #undef LDISC_DEBUG_HANGUP
 
 #ifdef LDISC_DEBUG_HANGUP
-#define tty_ldisc_debug(tty, f, args...) ({				  \
-	printk(KERN_DEBUG "%s: %s: " f, __func__, tty_name(tty), ##args); \
-})
+#define tty_ldisc_debug(tty, f, args...)	tty_debug(tty, f, ##args)
 #else
 #define tty_ldisc_debug(tty, f, args...)
 #endif
@@ -449,6 +447,8 @@
 		ret = ld->ops->open(tty);
 		if (ret)
 			clear_bit(TTY_LDISC_OPEN, &tty->flags);
+
+		tty_ldisc_debug(tty, "%p: opened\n", tty->ldisc);
 		return ret;
 	}
 	return 0;
@@ -469,6 +469,7 @@
 	clear_bit(TTY_LDISC_OPEN, &tty->flags);
 	if (ld->ops->close)
 		ld->ops->close(tty);
+	tty_ldisc_debug(tty, "%p: closed\n", tty->ldisc);
 }
 
 /**
@@ -662,7 +663,7 @@
 	int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS;
 	int err = 0;
 
-	tty_ldisc_debug(tty, "closing ldisc: %p\n", tty->ldisc);
+	tty_ldisc_debug(tty, "%p: closing\n", tty->ldisc);
 
 	ld = tty_ldisc_ref(tty);
 	if (ld != NULL) {
@@ -712,7 +713,7 @@
 	if (reset)
 		tty_reset_termios(tty);
 
-	tty_ldisc_debug(tty, "re-opened ldisc: %p\n", tty->ldisc);
+	tty_ldisc_debug(tty, "%p: re-opened\n", tty->ldisc);
 }
 
 /**
@@ -776,8 +777,6 @@
 	 * it does not race with the set_ldisc code path.
 	 */
 
-	tty_ldisc_debug(tty, "closing ldisc: %p\n", tty->ldisc);
-
 	tty_ldisc_lock_pair(tty, o_tty);
 	tty_ldisc_kill(tty);
 	if (o_tty)
@@ -787,7 +786,7 @@
 	/* And the memory resources remaining (buffers, termios) will be
 	   disposed of when the kref hits zero */
 
-	tty_ldisc_debug(tty, "ldisc closed\n");
+	tty_ldisc_debug(tty, "released\n");
 }
 
 /**
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 48fb1d9..52c98ce 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -127,7 +127,7 @@
 config UIO_PRUSS
 	tristate "Texas Instruments PRUSS driver"
 	select GENERIC_ALLOCATOR
-	depends on HAS_IOMEM
+	depends on HAS_IOMEM && HAS_DMA
 	help
 	  PRUSS driver for OMAPL138/DA850/AM18XX devices
 	  PRUSS driver requires user space components, examples and user space
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 3257d42..8196581 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -896,6 +896,7 @@
 static void __exit uio_exit(void)
 {
 	release_uio_class();
+	idr_destroy(&uio_idr);
 }
 
 module_init(uio_init)
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
index b6cac91..2bcf80c 100644
--- a/drivers/uio/uio_fsl_elbc_gpcm.c
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -480,19 +480,7 @@
 	.probe = uio_fsl_elbc_gpcm_probe,
 	.remove = uio_fsl_elbc_gpcm_remove,
 };
-
-static int __init uio_fsl_elbc_gpcm_init(void)
-{
-	return platform_driver_register(&uio_fsl_elbc_gpcm_driver);
-}
-
-static void __exit uio_fsl_elbc_gpcm_exit(void)
-{
-	platform_driver_unregister(&uio_fsl_elbc_gpcm_driver);
-}
-
-module_init(uio_fsl_elbc_gpcm_init);
-module_exit(uio_fsl_elbc_gpcm_exit);
+module_platform_driver(uio_fsl_elbc_gpcm_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("John Ogness <john.ogness@linutronix.de>");
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 813d4d3..1173f9c 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -270,6 +270,7 @@
 static ssize_t cxacru_sysfs_showattr_bool(u32 value, char *buf)
 {
 	static char *str[] = { "no", "yes" };
+
 	if (unlikely(value >= ARRAY_SIZE(str)))
 		return snprintf(buf, PAGE_SIZE, "%u\n", value);
 	return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
@@ -278,6 +279,7 @@
 static ssize_t cxacru_sysfs_showattr_LINK(u32 value, char *buf)
 {
 	static char *str[] = { NULL, "not connected", "connected", "lost" };
+
 	if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL))
 		return snprintf(buf, PAGE_SIZE, "%u\n", value);
 	return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
@@ -702,6 +704,7 @@
 	len = ret / 4;
 	for (offb = 0; offb < len; ) {
 		int l = le32_to_cpu(buf[offb++]);
+
 		if (l < 0 || l > stride || l > (len - offb) / 2) {
 			if (printk_ratelimit())
 				usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n",
@@ -732,6 +735,7 @@
 static int cxacru_card_status(struct cxacru_data *instance)
 {
 	int ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_STATUS, NULL, 0, NULL, 0);
+
 	if (ret < 0) {		/* firmware not loaded */
 		usb_dbg(instance->usbatm, "cxacru_adsl_start: CARD_GET_STATUS returned %d\n", ret);
 		return ret;
@@ -945,6 +949,7 @@
 	offb = offd = 0;
 	do {
 		int l = min_t(int, stride, size - offd);
+
 		buf[offb++] = fw;
 		buf[offb++] = l;
 		buf[offb++] = code1;
@@ -1091,8 +1096,8 @@
 {
 	const struct firmware *fw, *bp;
 	struct cxacru_data *instance = usbatm_instance->driver_data;
-
 	int ret = cxacru_find_firmware(instance, "fw", &fw);
+
 	if (ret) {
 		usb_warn(usbatm_instance, "firmware (cxacru-fw.bin) unavailable (system misconfigured?)\n");
 		return ret;
diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h
index 3cb9bda..e462f55 100644
--- a/drivers/usb/chipidea/bits.h
+++ b/drivers/usb/chipidea/bits.h
@@ -25,6 +25,9 @@
 #define VERSION		      (0xF << 25)
 #define CIVERSION	      (0x7 << 29)
 
+/* SBUSCFG */
+#define AHBBRST_MASK		0x7
+
 /* HCCPARAMS */
 #define HCCPARAMS_LEN         BIT(17)
 
@@ -53,6 +56,15 @@
 #define DEVICEADDR_USBADRA    BIT(24)
 #define DEVICEADDR_USBADR     (0x7FUL << 25)
 
+/* TTCTRL */
+#define TTCTRL_TTHA_MASK	(0x7fUL << 24)
+/* Set non-zero value for internal TT Hub address representation */
+#define TTCTRL_TTHA		(0x7fUL << 24)
+
+/* BURSTSIZE */
+#define RX_BURST_MASK		0xff
+#define TX_BURST_MASK		0xff00
+
 /* PORTSC */
 #define PORTSC_CCS            BIT(0)
 #define PORTSC_CSC            BIT(1)
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 6d6200e..41d7cf6 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -50,6 +50,8 @@
 	OP_USBINTR,
 	OP_DEVICEADDR,
 	OP_ENDPTLISTADDR,
+	OP_TTCTRL,
+	OP_BURSTSIZE,
 	OP_PORTSC,
 	OP_DEVLC,
 	OP_OTGSC,
@@ -406,8 +408,11 @@
 static inline bool ci_otg_is_fsm_mode(struct ci_hdrc *ci)
 {
 #ifdef CONFIG_USB_OTG_FSM
+	struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
+
 	return ci->is_otg && ci->roles[CI_ROLE_HOST] &&
-					ci->roles[CI_ROLE_GADGET];
+		ci->roles[CI_ROLE_GADGET] && (otg_caps->srp_support ||
+		otg_caps->hnp_support || otg_caps->adp_support);
 #else
 	return false;
 #endif
@@ -426,4 +431,6 @@
 int hw_wait_reg(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask,
 				u32 value, unsigned int timeout_ms);
 
+void ci_platform_configure(struct ci_hdrc *ci);
+
 #endif	/* __DRIVERS_USB_CHIPIDEA_CI_H */
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 389f0e0..867e9f3 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -29,26 +29,31 @@
 };
 
 static const struct ci_hdrc_imx_platform_flag imx27_usb_data = {
+		CI_HDRC_DISABLE_STREAMING,
 };
 
 static const struct ci_hdrc_imx_platform_flag imx28_usb_data = {
 	.flags = CI_HDRC_IMX28_WRITE_FIX |
-		CI_HDRC_TURN_VBUS_EARLY_ON,
+		CI_HDRC_TURN_VBUS_EARLY_ON |
+		CI_HDRC_DISABLE_STREAMING,
 };
 
 static const struct ci_hdrc_imx_platform_flag imx6q_usb_data = {
 	.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
-		CI_HDRC_TURN_VBUS_EARLY_ON,
+		CI_HDRC_TURN_VBUS_EARLY_ON |
+		CI_HDRC_DISABLE_STREAMING,
 };
 
 static const struct ci_hdrc_imx_platform_flag imx6sl_usb_data = {
 	.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
-		CI_HDRC_TURN_VBUS_EARLY_ON,
+		CI_HDRC_TURN_VBUS_EARLY_ON |
+		CI_HDRC_DISABLE_HOST_STREAMING,
 };
 
 static const struct ci_hdrc_imx_platform_flag imx6sx_usb_data = {
 	.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
-		CI_HDRC_TURN_VBUS_EARLY_ON,
+		CI_HDRC_TURN_VBUS_EARLY_ON |
+		CI_HDRC_DISABLE_HOST_STREAMING,
 };
 
 static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
@@ -104,7 +109,7 @@
 	misc_pdev = of_find_device_by_node(args.np);
 	of_node_put(args.np);
 
-	if (!misc_pdev)
+	if (!misc_pdev || !platform_get_drvdata(misc_pdev))
 		return ERR_PTR(-EPROBE_DEFER);
 
 	data->dev = &misc_pdev->dev;
@@ -126,7 +131,7 @@
 	struct ci_hdrc_platform_data pdata = {
 		.name		= dev_name(&pdev->dev),
 		.capoffset	= DEF_CAPOFFSET,
-		.flags		= CI_HDRC_DISABLE_STREAMING,
+		.flags		= CI_HDRC_SET_NON_ZERO_TTHA,
 	};
 	int ret;
 	const struct of_device_id *of_id =
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 3ad48e1..3feebf7 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -64,6 +64,7 @@
 #include <linux/of.h>
 #include <linux/phy.h>
 #include <linux/regulator/consumer.h>
+#include <linux/usb/ehci_def.h>
 
 #include "ci.h"
 #include "udc.h"
@@ -84,6 +85,8 @@
 	[OP_USBINTR]		= 0x08U,
 	[OP_DEVICEADDR]		= 0x14U,
 	[OP_ENDPTLISTADDR]	= 0x18U,
+	[OP_TTCTRL]		= 0x1CU,
+	[OP_BURSTSIZE]		= 0x20U,
 	[OP_PORTSC]		= 0x44U,
 	[OP_DEVLC]		= 0x84U,
 	[OP_OTGSC]		= 0x64U,
@@ -106,6 +109,8 @@
 	[OP_USBINTR]		= 0x08U,
 	[OP_DEVICEADDR]		= 0x14U,
 	[OP_ENDPTLISTADDR]	= 0x18U,
+	[OP_TTCTRL]		= 0x1CU,
+	[OP_BURSTSIZE]		= 0x20U,
 	[OP_PORTSC]		= 0x44U,
 	[OP_DEVLC]		= 0x84U,
 	[OP_OTGSC]		= 0xC4U,
@@ -118,7 +123,7 @@
 	[OP_ENDPTCTRL]		= 0xECU,
 };
 
-static int hw_alloc_regmap(struct ci_hdrc *ci, bool is_lpm)
+static void hw_alloc_regmap(struct ci_hdrc *ci, bool is_lpm)
 {
 	int i;
 
@@ -134,7 +139,6 @@
 			 ? ci_regs_lpm[OP_ENDPTCTRL]
 			 : ci_regs_nolpm[OP_ENDPTCTRL]);
 
-	return 0;
 }
 
 static enum ci_revision ci_get_revision(struct ci_hdrc *ci)
@@ -403,6 +407,55 @@
 	return ret;
 }
 
+
+/**
+ * ci_platform_configure: do controller configure
+ * @ci: the controller
+ *
+ */
+void ci_platform_configure(struct ci_hdrc *ci)
+{
+	bool is_device_mode, is_host_mode;
+
+	is_device_mode = hw_read(ci, OP_USBMODE, USBMODE_CM) == USBMODE_CM_DC;
+	is_host_mode = hw_read(ci, OP_USBMODE, USBMODE_CM) == USBMODE_CM_HC;
+
+	if (is_device_mode &&
+		(ci->platdata->flags & CI_HDRC_DISABLE_DEVICE_STREAMING))
+		hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
+
+	if (is_host_mode &&
+		(ci->platdata->flags & CI_HDRC_DISABLE_HOST_STREAMING))
+		hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
+
+	if (ci->platdata->flags & CI_HDRC_FORCE_FULLSPEED) {
+		if (ci->hw_bank.lpm)
+			hw_write(ci, OP_DEVLC, DEVLC_PFSC, DEVLC_PFSC);
+		else
+			hw_write(ci, OP_PORTSC, PORTSC_PFSC, PORTSC_PFSC);
+	}
+
+	if (ci->platdata->flags & CI_HDRC_SET_NON_ZERO_TTHA)
+		hw_write(ci, OP_TTCTRL, TTCTRL_TTHA_MASK, TTCTRL_TTHA);
+
+	hw_write(ci, OP_USBCMD, 0xff0000, ci->platdata->itc_setting << 16);
+
+	if (ci->platdata->flags & CI_HDRC_OVERRIDE_AHB_BURST)
+		hw_write_id_reg(ci, ID_SBUSCFG, AHBBRST_MASK,
+			ci->platdata->ahb_burst_config);
+
+	/* override burst size, take effect only when ahb_burst_config is 0 */
+	if (!hw_read_id_reg(ci, ID_SBUSCFG, AHBBRST_MASK)) {
+		if (ci->platdata->flags & CI_HDRC_OVERRIDE_TX_BURST)
+			hw_write(ci, OP_BURSTSIZE, TX_BURST_MASK,
+			ci->platdata->tx_burst_size << __ffs(TX_BURST_MASK));
+
+		if (ci->platdata->flags & CI_HDRC_OVERRIDE_RX_BURST)
+			hw_write(ci, OP_BURSTSIZE, RX_BURST_MASK,
+				ci->platdata->rx_burst_size);
+	}
+}
+
 /**
  * hw_controller_reset: do controller reset
  * @ci: the controller
@@ -447,16 +500,6 @@
 		ci->platdata->notify_event(ci,
 			CI_HDRC_CONTROLLER_RESET_EVENT);
 
-	if (ci->platdata->flags & CI_HDRC_DISABLE_STREAMING)
-		hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
-
-	if (ci->platdata->flags & CI_HDRC_FORCE_FULLSPEED) {
-		if (ci->hw_bank.lpm)
-			hw_write(ci, OP_DEVLC, DEVLC_PFSC, DEVLC_PFSC);
-		else
-			hw_write(ci, OP_PORTSC, PORTSC_PFSC, PORTSC_PFSC);
-	}
-
 	/* USBMODE should be configured step by step */
 	hw_write(ci, OP_USBMODE, USBMODE_CM, USBMODE_CM_IDLE);
 	hw_write(ci, OP_USBMODE, USBMODE_CM, USBMODE_CM_DC);
@@ -469,6 +512,8 @@
 		return -ENODEV;
 	}
 
+	ci_platform_configure(ci);
+
 	return 0;
 }
 
@@ -560,6 +605,8 @@
 static int ci_get_platdata(struct device *dev,
 		struct ci_hdrc_platform_data *platdata)
 {
+	int ret;
+
 	if (!platdata->phy_mode)
 		platdata->phy_mode = of_usb_get_phy_mode(dev->of_node);
 
@@ -588,9 +635,66 @@
 				of_usb_host_tpl_support(dev->of_node);
 	}
 
+	if (platdata->dr_mode == USB_DR_MODE_OTG) {
+		/* We can support HNP and SRP of OTG 2.0 */
+		platdata->ci_otg_caps.otg_rev = 0x0200;
+		platdata->ci_otg_caps.hnp_support = true;
+		platdata->ci_otg_caps.srp_support = true;
+
+		/* Update otg capabilities by DT properties */
+		ret = of_usb_update_otg_caps(dev->of_node,
+					&platdata->ci_otg_caps);
+		if (ret)
+			return ret;
+	}
+
 	if (of_usb_get_maximum_speed(dev->of_node) == USB_SPEED_FULL)
 		platdata->flags |= CI_HDRC_FORCE_FULLSPEED;
 
+	platdata->itc_setting = 1;
+	if (of_find_property(dev->of_node, "itc-setting", NULL)) {
+		ret = of_property_read_u32(dev->of_node, "itc-setting",
+			&platdata->itc_setting);
+		if (ret) {
+			dev_err(dev,
+				"failed to get itc-setting\n");
+			return ret;
+		}
+	}
+
+	if (of_find_property(dev->of_node, "ahb-burst-config", NULL)) {
+		ret = of_property_read_u32(dev->of_node, "ahb-burst-config",
+			&platdata->ahb_burst_config);
+		if (ret) {
+			dev_err(dev,
+				"failed to get ahb-burst-config\n");
+			return ret;
+		}
+		platdata->flags |= CI_HDRC_OVERRIDE_AHB_BURST;
+	}
+
+	if (of_find_property(dev->of_node, "tx-burst-size-dword", NULL)) {
+		ret = of_property_read_u32(dev->of_node, "tx-burst-size-dword",
+			&platdata->tx_burst_size);
+		if (ret) {
+			dev_err(dev,
+				"failed to get tx-burst-size-dword\n");
+			return ret;
+		}
+		platdata->flags |= CI_HDRC_OVERRIDE_TX_BURST;
+	}
+
+	if (of_find_property(dev->of_node, "rx-burst-size-dword", NULL)) {
+		ret = of_property_read_u32(dev->of_node, "rx-burst-size-dword",
+			&platdata->rx_burst_size);
+		if (ret) {
+			dev_err(dev,
+				"failed to get rx-burst-size-dword\n");
+			return ret;
+		}
+		platdata->flags |= CI_HDRC_OVERRIDE_RX_BURST;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 5b7061a..080b7be 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -10,6 +10,7 @@
 #include <linux/usb/phy.h>
 #include <linux/usb/otg.h>
 #include <linux/usb/otg-fsm.h>
+#include <linux/usb/chipidea.h>
 
 #include "ci.h"
 #include "udc.h"
@@ -66,9 +67,11 @@
 	unsigned long flags;
 	unsigned mode;
 
+	pm_runtime_get_sync(ci->dev);
 	spin_lock_irqsave(&ci->lock, flags);
 	mode = hw_port_test_get(ci);
 	spin_unlock_irqrestore(&ci->lock, flags);
+	pm_runtime_put_sync(ci->dev);
 
 	seq_printf(s, "mode = %u\n", mode);
 
@@ -98,9 +101,11 @@
 	if (sscanf(buf, "%u", &mode) != 1)
 		return -EINVAL;
 
+	pm_runtime_get_sync(ci->dev);
 	spin_lock_irqsave(&ci->lock, flags);
 	ret = hw_port_test_set(ci, mode);
 	spin_unlock_irqrestore(&ci->lock, flags);
+	pm_runtime_put_sync(ci->dev);
 
 	return ret ? ret : count;
 }
@@ -316,8 +321,10 @@
 	if (role == CI_ROLE_END || role == ci->role)
 		return -EINVAL;
 
+	pm_runtime_get_sync(ci->dev);
 	ci_role_stop(ci);
 	ret = ci_role_start(ci, role);
+	pm_runtime_put_sync(ci->dev);
 
 	return ret ? ret : count;
 }
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 7161439..3d24304 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -37,15 +37,14 @@
 
 struct ehci_ci_priv {
 	struct regulator *reg_vbus;
-	struct ci_hdrc *ci;
 };
 
 static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
 {
 	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
 	struct ehci_ci_priv *priv = (struct ehci_ci_priv *)ehci->priv;
-	struct ci_hdrc *ci = priv->ci;
 	struct device *dev = hcd->self.controller;
+	struct ci_hdrc *ci = dev_get_drvdata(dev);
 	int ret = 0;
 	int port = HCS_N_PORTS(ehci->hcs_params);
 
@@ -78,9 +77,25 @@
 	return 0;
 };
 
+static int ehci_ci_reset(struct usb_hcd *hcd)
+{
+	struct device *dev = hcd->self.controller;
+	struct ci_hdrc *ci = dev_get_drvdata(dev);
+	int ret;
+
+	ret = ehci_setup(hcd);
+	if (ret)
+		return ret;
+
+	ci_platform_configure(ci);
+
+	return ret;
+}
+
 static const struct ehci_driver_overrides ehci_ci_overrides = {
 	.extra_priv_size = sizeof(struct ehci_ci_priv),
 	.port_power	 = ehci_ci_portpower,
+	.reset		 = ehci_ci_reset,
 };
 
 static irqreturn_t host_irq(struct ci_hdrc *ci)
@@ -123,7 +138,6 @@
 
 	priv = (struct ehci_ci_priv *)ehci->priv;
 	priv->reg_vbus = NULL;
-	priv->ci = ci;
 
 	if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci)) {
 		if (ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON) {
@@ -153,12 +167,6 @@
 		}
 	}
 
-	if (ci->platdata->flags & CI_HDRC_DISABLE_STREAMING)
-		hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
-
-	if (ci->platdata->flags & CI_HDRC_FORCE_FULLSPEED)
-		hw_write(ci, OP_PORTSC, PORTSC_PFSC, PORTSC_PFSC);
-
 	return ret;
 
 disable_reg:
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 19d655a..00ab59d 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -525,7 +525,6 @@
 		ci_role_start(ci, CI_ROLE_HOST);
 	} else {
 		ci_role_stop(ci);
-		hw_device_reset(ci);
 		ci_role_start(ci, CI_ROLE_GADGET);
 	}
 	return 0;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 764f668..a637da2 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -445,7 +445,7 @@
 		rest -= count;
 	}
 
-	if (hwreq->req.zero && hwreq->req.length
+	if (hwreq->req.zero && hwreq->req.length && hwep->dir == TX
 	    && (hwreq->req.length % hwep->ep.maxpacket == 0))
 		add_td_to_list(hwep, hwreq, 0);
 
@@ -1090,6 +1090,13 @@
 				if (ci_otg_is_fsm_mode(ci))
 					err = otg_a_alt_hnp_support(ci);
 				break;
+			case USB_DEVICE_A_HNP_SUPPORT:
+				if (ci_otg_is_fsm_mode(ci)) {
+					ci->gadget.a_hnp_support = 1;
+					err = isr_setup_status_phase(
+							ci);
+				}
+				break;
 			default:
 				goto delegate;
 			}
@@ -1624,6 +1631,20 @@
 
 			hwep->ep.name      = hwep->name;
 			hwep->ep.ops       = &usb_ep_ops;
+
+			if (i == 0) {
+				hwep->ep.caps.type_control = true;
+			} else {
+				hwep->ep.caps.type_iso = true;
+				hwep->ep.caps.type_bulk = true;
+				hwep->ep.caps.type_int = true;
+			}
+
+			if (j == TX)
+				hwep->ep.caps.dir_in = true;
+			else
+				hwep->ep.caps.dir_out = true;
+
 			/*
 			 * for ep0: maxP defined in desc, for other
 			 * eps, maxP is set by epautoconfig() called
@@ -1827,6 +1848,7 @@
 static int udc_start(struct ci_hdrc *ci)
 {
 	struct device *dev = ci->dev;
+	struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
 	int retval = 0;
 
 	spin_lock_init(&ci->lock);
@@ -1834,8 +1856,12 @@
 	ci->gadget.ops          = &usb_gadget_ops;
 	ci->gadget.speed        = USB_SPEED_UNKNOWN;
 	ci->gadget.max_speed    = USB_SPEED_HIGH;
-	ci->gadget.is_otg       = ci->is_otg ? 1 : 0;
 	ci->gadget.name         = ci->platdata->name;
+	ci->gadget.otg_caps	= otg_caps;
+
+	if (ci->is_otg && (otg_caps->hnp_support || otg_caps->srp_support ||
+						otg_caps->adp_support))
+		ci->gadget.is_otg = 1;
 
 	INIT_LIST_HEAD(&ci->gadget.ep_list);
 
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 3cefd49..5ddab30 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -54,6 +54,7 @@
 #define MX53_USB_PHYCTRL1_PLLDIV_MASK	0x3
 #define MX53_USB_PLL_DIV_24_MHZ		0x01
 
+#define MX6_BM_NON_BURST_SETTING	BIT(1)
 #define MX6_BM_OVER_CUR_DIS		BIT(7)
 #define MX6_BM_WAKEUP_ENABLE		BIT(10)
 #define MX6_BM_ID_WAKEUP		BIT(16)
@@ -255,14 +256,21 @@
 	if (data->index > 3)
 		return -EINVAL;
 
+	spin_lock_irqsave(&usbmisc->lock, flags);
+
 	if (data->disable_oc) {
-		spin_lock_irqsave(&usbmisc->lock, flags);
 		reg = readl(usbmisc->base + data->index * 4);
 		writel(reg | MX6_BM_OVER_CUR_DIS,
 			usbmisc->base + data->index * 4);
-		spin_unlock_irqrestore(&usbmisc->lock, flags);
 	}
 
+	/* SoC non-burst setting */
+	reg = readl(usbmisc->base + data->index * 4);
+	writel(reg | MX6_BM_NON_BURST_SETTING,
+			usbmisc->base + data->index * 4);
+
+	spin_unlock_irqrestore(&usbmisc->lock, flags);
+
 	usbmisc_imx6q_set_wakeup(data, false);
 
 	return 0;
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index f38e875..433bbc3 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -57,6 +57,7 @@
 #include <linux/mutex.h>
 #undef DEBUG
 #include <linux/usb.h>
+#include <linux/usb/ch9.h>
 #include <linux/ratelimit.h>
 
 /*
@@ -79,12 +80,20 @@
 #define IOCNR_SOFT_RESET		7
 /* Get device_id string: */
 #define LPIOC_GET_DEVICE_ID(len) _IOC(_IOC_READ, 'P', IOCNR_GET_DEVICE_ID, len)
-/* The following ioctls were added for http://hpoj.sourceforge.net: */
-/* Get two-int array:
- * [0]=current protocol (1=7/1/1, 2=7/1/2, 3=7/1/3),
- * [1]=supported protocol mask (mask&(1<<n)!=0 means 7/1/n supported): */
+/* The following ioctls were added for http://hpoj.sourceforge.net:
+ * Get two-int array:
+ * [0]=current protocol
+ *     (1=USB_CLASS_PRINTER/1/1, 2=USB_CLASS_PRINTER/1/2,
+ *         3=USB_CLASS_PRINTER/1/3),
+ * [1]=supported protocol mask (mask&(1<<n)!=0 means
+ *     USB_CLASS_PRINTER/1/n supported):
+ */
 #define LPIOC_GET_PROTOCOLS(len) _IOC(_IOC_READ, 'P', IOCNR_GET_PROTOCOLS, len)
-/* Set protocol (arg: 1=7/1/1, 2=7/1/2, 3=7/1/3): */
+/*
+ * Set protocol
+ *     (arg: 1=USB_CLASS_PRINTER/1/1, 2=USB_CLASS_PRINTER/1/2,
+ *         3=USB_CLASS_PRINTER/1/3):
+ */
 #define LPIOC_SET_PROTOCOL _IOC(_IOC_WRITE, 'P', IOCNR_SET_PROTOCOL, 0)
 /* Set channel number (HP Vendor-specific command): */
 #define LPIOC_HP_SET_CHANNEL _IOC(_IOC_WRITE, 'P', IOCNR_HP_SET_CHANNEL, 0)
@@ -146,8 +155,10 @@
 	int			readcount;		/* Counter for reads */
 	int			ifnum;			/* Interface number */
 	struct usb_interface	*intf;			/* The interface */
-	/* Alternate-setting numbers and endpoints for each protocol
-	 * (7/1/{index=1,2,3}) that the device supports: */
+	/*
+	 * Alternate-setting numbers and endpoints for each protocol
+	 * (USB_CLASS_PRINTER/1/{index=1,2,3}) that the device supports:
+	 */
 	struct {
 		int				alt_setting;
 		struct usb_endpoint_descriptor	*epwrite;
@@ -1206,19 +1217,23 @@
  * but our requirements are too intricate for simple match to handle.
  *
  * The "proto_bias" option may be used to specify the preferred protocol
- * for all USB printers (1=7/1/1, 2=7/1/2, 3=7/1/3).  If the device
- * supports the preferred protocol, then we bind to it.
+ * for all USB printers (1=USB_CLASS_PRINTER/1/1, 2=USB_CLASS_PRINTER/1/2,
+ * 3=USB_CLASS_PRINTER/1/3).  If the device supports the preferred protocol,
+ * then we bind to it.
  *
- * The best interface for us is 7/1/2, because it is compatible
- * with a stream of characters. If we find it, we bind to it.
+ * The best interface for us is USB_CLASS_PRINTER/1/2, because it
+ * is compatible with a stream of characters. If we find it, we bind to it.
  *
  * Note that the people from hpoj.sourceforge.net need to be able to
- * bind to 7/1/3 (MLC/1284.4), so we provide them ioctls for this purpose.
+ * bind to USB_CLASS_PRINTER/1/3 (MLC/1284.4), so we provide them ioctls
+ * for this purpose.
  *
- * Failing 7/1/2, we look for 7/1/3, even though it's probably not
- * stream-compatible, because this matches the behaviour of the old code.
+ * Failing USB_CLASS_PRINTER/1/2, we look for USB_CLASS_PRINTER/1/3,
+ * even though it's probably not stream-compatible, because this matches
+ * the behaviour of the old code.
  *
- * If nothing else, we bind to 7/1/1 - the unidirectional interface.
+ * If nothing else, we bind to USB_CLASS_PRINTER/1/1
+ * - the unidirectional interface.
  */
 static int usblp_select_alts(struct usblp *usblp)
 {
@@ -1236,7 +1251,8 @@
 	for (i = 0; i < if_alt->num_altsetting; i++) {
 		ifd = &if_alt->altsetting[i];
 
-		if (ifd->desc.bInterfaceClass != 7 || ifd->desc.bInterfaceSubClass != 1)
+		if (ifd->desc.bInterfaceClass != USB_CLASS_PRINTER ||
+		    ifd->desc.bInterfaceSubClass != 1)
 			if (!(usblp->quirks & USBLP_QUIRK_BAD_CLASS))
 				continue;
 
@@ -1262,8 +1278,10 @@
 		if (!epwrite || (ifd->desc.bInterfaceProtocol > 1 && !epread))
 			continue;
 
-		/* Turn off reads for 7/1/1 (unidirectional) interfaces
-		 * and buggy bidirectional printers. */
+		/*
+		 * Turn off reads for USB_CLASS_PRINTER/1/1 (unidirectional)
+		 * interfaces and buggy bidirectional printers.
+		 */
 		if (ifd->desc.bInterfaceProtocol == 1) {
 			epread = NULL;
 		} else if (usblp->quirks & USBLP_QUIRK_BIDIR) {
@@ -1406,12 +1424,12 @@
 }
 
 static const struct usb_device_id usblp_ids[] = {
-	{ USB_DEVICE_INFO(7, 1, 1) },
-	{ USB_DEVICE_INFO(7, 1, 2) },
-	{ USB_DEVICE_INFO(7, 1, 3) },
-	{ USB_INTERFACE_INFO(7, 1, 1) },
-	{ USB_INTERFACE_INFO(7, 1, 2) },
-	{ USB_INTERFACE_INFO(7, 1, 3) },
+	{ USB_DEVICE_INFO(USB_CLASS_PRINTER, 1, 1) },
+	{ USB_DEVICE_INFO(USB_CLASS_PRINTER, 1, 2) },
+	{ USB_DEVICE_INFO(USB_CLASS_PRINTER, 1, 3) },
+	{ USB_INTERFACE_INFO(USB_CLASS_PRINTER, 1, 1) },
+	{ USB_INTERFACE_INFO(USB_CLASS_PRINTER, 1, 2) },
+	{ USB_INTERFACE_INFO(USB_CLASS_PRINTER, 1, 3) },
 	{ USB_DEVICE(0x04b8, 0x0202) },	/* Seiko Epson Receipt Printer M129C */
 	{ }						/* Terminating entry */
 };
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index b530fd4..9e39286 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -154,6 +154,62 @@
 	return false;
 }
 EXPORT_SYMBOL_GPL(of_usb_host_tpl_support);
+
+/**
+ * of_usb_update_otg_caps - to update usb otg capabilities according to
+ * the passed properties in DT.
+ * @np: Pointer to the given device_node
+ * @otg_caps: Pointer to the target usb_otg_caps to be set
+ *
+ * The function updates the otg capabilities
+ */
+int of_usb_update_otg_caps(struct device_node *np,
+			struct usb_otg_caps *otg_caps)
+{
+	u32 otg_rev;
+
+	if (!otg_caps)
+		return -EINVAL;
+
+	if (!of_property_read_u32(np, "otg-rev", &otg_rev)) {
+		switch (otg_rev) {
+		case 0x0100:
+		case 0x0120:
+		case 0x0130:
+		case 0x0200:
+			/* Choose the lesser one if it's already been set */
+			if (otg_caps->otg_rev)
+				otg_caps->otg_rev = min_t(u16, otg_rev,
+							otg_caps->otg_rev);
+			else
+				otg_caps->otg_rev = otg_rev;
+			break;
+		default:
+			pr_err("%s: unsupported otg-rev: 0x%x\n",
+						np->full_name, otg_rev);
+			return -EINVAL;
+		}
+	} else {
+		/*
+		 * otg-rev is mandatory for otg properties, if not passed
+		 * we set it to be 0 and assume it's a legacy otg device.
+		 * Non-dt platform can set it afterwards.
+		 */
+		otg_caps->otg_rev = 0;
+	}
+
+	if (of_find_property(np, "hnp-disable", NULL))
+		otg_caps->hnp_support = false;
+	if (of_find_property(np, "srp-disable", NULL))
+		otg_caps->srp_support = false;
+	if (of_find_property(np, "adp-disable", NULL) ||
+				(otg_caps->otg_rev < 0x0200))
+		otg_caps->adp_support = false;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(of_usb_update_otg_caps);
+
 #endif
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 986abde..38ae877c 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -103,7 +103,7 @@
 #define snoop(dev, format, arg...)				\
 	do {							\
 		if (usbfs_snoop)				\
-			dev_info(dev , format , ## arg);	\
+			dev_info(dev, format, ## arg);		\
 	} while (0)
 
 enum snoop_when {
@@ -1082,7 +1082,8 @@
 	ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
 	if (ret)
 		return ret;
-	if (!(tbuf = kmalloc(len1, GFP_KERNEL))) {
+	tbuf = kmalloc(len1, GFP_KERNEL);
+	if (!tbuf) {
 		ret = -ENOMEM;
 		goto done;
 	}
@@ -1224,7 +1225,8 @@
 
 	if (copy_from_user(&setintf, arg, sizeof(setintf)))
 		return -EFAULT;
-	if ((ret = checkintf(ps, setintf.interface)))
+	ret = checkintf(ps, setintf.interface);
+	if (ret)
 		return ret;
 
 	destroy_async_on_interface(ps, setintf.interface);
@@ -1319,7 +1321,7 @@
 	is_in = (uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0;
 
 	u = 0;
-	switch(uurb->type) {
+	switch (uurb->type) {
 	case USBDEVFS_URB_TYPE_CONTROL:
 		if (!usb_endpoint_xfer_control(&ep->desc))
 			return -EINVAL;
@@ -1393,7 +1395,8 @@
 		number_of_packets = uurb->number_of_packets;
 		isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) *
 				   number_of_packets;
-		if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL)))
+		isopkt = kmalloc(isofrmlen, GFP_KERNEL);
+		if (!isopkt)
 			return -ENOMEM;
 		if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) {
 			ret = -EFAULT;
@@ -1904,7 +1907,8 @@
 
 	if (get_user(ifnum, (unsigned int __user *)arg))
 		return -EFAULT;
-	if ((ret = releaseintf(ps, ifnum)) < 0)
+	ret = releaseintf(ps, ifnum);
+	if (ret < 0)
 		return ret;
 	destroy_async_on_interface (ps, ifnum);
 	return 0;
@@ -1919,7 +1923,8 @@
 	struct usb_driver       *driver = NULL;
 
 	/* alloc buffer */
-	if ((size = _IOC_SIZE(ctl->ioctl_code)) > 0) {
+	size = _IOC_SIZE(ctl->ioctl_code);
+	if (size > 0) {
 		buf = kmalloc(size, GFP_KERNEL);
 		if (buf == NULL)
 			return -ENOMEM;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 818369a..6b5063e 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -160,6 +160,7 @@
 	spin_lock(&usb_driver->dynids.lock);
 	list_for_each_entry_safe(dynid, n, &usb_driver->dynids.list, node) {
 		struct usb_device_id *id = &dynid->id;
+
 		if ((id->idVendor == idVendor) &&
 		    (id->idProduct == idProduct)) {
 			list_del(&dynid->node);
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 39a2402..101983b 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -51,7 +51,7 @@
 {
 	struct ep_device *ep = to_ep_device(dev);
 	return sprintf(buf, "%04x\n",
-		        usb_endpoint_maxp(ep->desc) & 0x07ff);
+			usb_endpoint_maxp(ep->desc) & 0x07ff);
 }
 static DEVICE_ATTR_RO(wMaxPacketSize);
 
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index cbcd092..4d64e5c4 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2686,12 +2686,14 @@
 	 * bottom up so that hcds can customize the root hubs before hub_wq
 	 * starts talking to them.  (Note, bus id is assigned early too.)
 	 */
-	if ((retval = hcd_buffer_create(hcd)) != 0) {
+	retval = hcd_buffer_create(hcd);
+	if (retval != 0) {
 		dev_dbg(hcd->self.controller, "pool alloc failed\n");
 		goto err_create_buf;
 	}
 
-	if ((retval = usb_register_bus(&hcd->self)) < 0)
+	retval = usb_register_bus(&hcd->self);
+	if (retval < 0)
 		goto err_register_bus;
 
 	rhdev = usb_alloc_dev(NULL, &hcd->self, 0);
@@ -2737,9 +2739,13 @@
 	/* "reset" is misnamed; its role is now one-time init. the controller
 	 * should already have been reset (and boot firmware kicked off etc).
 	 */
-	if (hcd->driver->reset && (retval = hcd->driver->reset(hcd)) < 0) {
-		dev_err(hcd->self.controller, "can't setup: %d\n", retval);
-		goto err_hcd_driver_setup;
+	if (hcd->driver->reset) {
+		retval = hcd->driver->reset(hcd);
+		if (retval < 0) {
+			dev_err(hcd->self.controller, "can't setup: %d\n",
+					retval);
+			goto err_hcd_driver_setup;
+		}
 	}
 	hcd->rh_pollable = 1;
 
@@ -2769,7 +2775,8 @@
 	}
 
 	/* starting here, usbcore will pay attention to this root hub */
-	if ((retval = register_root_hub(hcd)) != 0)
+	retval = register_root_hub(hcd);
+	if (retval != 0)
 		goto err_register_root_hub;
 
 	retval = sysfs_create_group(&rhdev->dev.kobj, &usb_bus_attr_group);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 73dfa19..431839b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -50,8 +50,8 @@
 
 /* cycle leds on hubs that aren't blinking for attention */
 static bool blinkenlights = 0;
-module_param (blinkenlights, bool, S_IRUGO);
-MODULE_PARM_DESC (blinkenlights, "true to cycle leds on hubs");
+module_param(blinkenlights, bool, S_IRUGO);
+MODULE_PARM_DESC(blinkenlights, "true to cycle leds on hubs");
 
 /*
  * Device SATA8000 FW1.0 from DATAST0R Technology Corp requires about
@@ -439,7 +439,7 @@
 
 #define	LED_CYCLE_PERIOD	((2*HZ)/3)
 
-static void led_work (struct work_struct *work)
+static void led_work(struct work_struct *work)
 {
 	struct usb_hub		*hub =
 		container_of(work, struct usb_hub, leds.work);
@@ -646,7 +646,7 @@
 
 	default:		/* presumably an error */
 		/* Cause a hub reset after 10 consecutive errors */
-		dev_dbg (hub->intfdev, "transfer --> %d\n", status);
+		dev_dbg(hub->intfdev, "transfer --> %d\n", status);
 		if ((++hub->nerrors < 10) || hub->error)
 			goto resubmit;
 		hub->error = status;
@@ -671,14 +671,14 @@
 	if (hub->quiescing)
 		return;
 
-	if ((status = usb_submit_urb (hub->urb, GFP_ATOMIC)) != 0
-			&& status != -ENODEV && status != -EPERM)
-		dev_err (hub->intfdev, "resubmit --> %d\n", status);
+	status = usb_submit_urb(hub->urb, GFP_ATOMIC);
+	if (status != 0 && status != -ENODEV && status != -EPERM)
+		dev_err(hub->intfdev, "resubmit --> %d\n", status);
 }
 
 /* USB 2.0 spec Section 11.24.2.3 */
 static inline int
-hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
+hub_clear_tt_buffer(struct usb_device *hdev, u16 devinfo, u16 tt)
 {
 	/* Need to clear both directions for control ep */
 	if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
@@ -706,7 +706,7 @@
 		container_of(work, struct usb_hub, tt.clear_work);
 	unsigned long		flags;
 
-	spin_lock_irqsave (&hub->tt.lock, flags);
+	spin_lock_irqsave(&hub->tt.lock, flags);
 	while (!list_empty(&hub->tt.clear_list)) {
 		struct list_head	*next;
 		struct usb_tt_clear	*clear;
@@ -715,14 +715,14 @@
 		int			status;
 
 		next = hub->tt.clear_list.next;
-		clear = list_entry (next, struct usb_tt_clear, clear_list);
-		list_del (&clear->clear_list);
+		clear = list_entry(next, struct usb_tt_clear, clear_list);
+		list_del(&clear->clear_list);
 
 		/* drop lock so HCD can concurrently report other TT errors */
-		spin_unlock_irqrestore (&hub->tt.lock, flags);
-		status = hub_clear_tt_buffer (hdev, clear->devinfo, clear->tt);
+		spin_unlock_irqrestore(&hub->tt.lock, flags);
+		status = hub_clear_tt_buffer(hdev, clear->devinfo, clear->tt);
 		if (status && status != -ENODEV)
-			dev_err (&hdev->dev,
+			dev_err(&hdev->dev,
 				"clear tt %d (%04x) error %d\n",
 				clear->tt, clear->devinfo, status);
 
@@ -734,7 +734,7 @@
 		kfree(clear);
 		spin_lock_irqsave(&hub->tt.lock, flags);
 	}
-	spin_unlock_irqrestore (&hub->tt.lock, flags);
+	spin_unlock_irqrestore(&hub->tt.lock, flags);
 }
 
 /**
@@ -797,7 +797,7 @@
 	 */
 	clear = kmalloc(sizeof *clear, GFP_ATOMIC);
 	if (clear == NULL) {
-		dev_err (&udev->dev, "can't save CLEAR_TT_BUFFER state\n");
+		dev_err(&udev->dev, "can't save CLEAR_TT_BUFFER state\n");
 		/* FIXME recover somehow ... RESET_TT? */
 		return -ENOMEM;
 	}
@@ -806,10 +806,10 @@
 	clear->tt = tt->multi ? udev->ttport : 1;
 	clear->devinfo = usb_pipeendpoint (pipe);
 	clear->devinfo |= udev->devnum << 4;
-	clear->devinfo |= usb_pipecontrol (pipe)
+	clear->devinfo |= usb_pipecontrol(pipe)
 			? (USB_ENDPOINT_XFER_CONTROL << 11)
 			: (USB_ENDPOINT_XFER_BULK << 11);
-	if (usb_pipein (pipe))
+	if (usb_pipein(pipe))
 		clear->devinfo |= 1 << 15;
 
 	/* info for completion callback */
@@ -817,10 +817,10 @@
 	clear->ep = urb->ep;
 
 	/* tell keventd to clear state for this TT */
-	spin_lock_irqsave (&tt->lock, flags);
-	list_add_tail (&clear->clear_list, &tt->clear_list);
+	spin_lock_irqsave(&tt->lock, flags);
+	list_add_tail(&clear->clear_list, &tt->clear_list);
 	schedule_work(&tt->clear_work);
-	spin_unlock_irqrestore (&tt->lock, flags);
+	spin_unlock_irqrestore(&tt->lock, flags);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(usb_hub_clear_tt_buffer);
@@ -1442,8 +1442,8 @@
 		break;
 	}
 
-	spin_lock_init (&hub->tt.lock);
-	INIT_LIST_HEAD (&hub->tt.clear_list);
+	spin_lock_init(&hub->tt.lock);
+	INIT_LIST_HEAD(&hub->tt.clear_list);
 	INIT_WORK(&hub->tt.clear_work, hub_tt_work);
 	switch (hdev->descriptor.bDeviceProtocol) {
 	case USB_HUB_PR_FS:
@@ -1632,7 +1632,7 @@
 	return 0;
 
 fail:
-	dev_err (hub_dev, "config failed, %s (err %d)\n",
+	dev_err(hub_dev, "config failed, %s (err %d)\n",
 			message, ret);
 	/* hub_disconnect() frees urb and descriptor */
 	return ret;
@@ -1775,7 +1775,7 @@
 	if ((desc->desc.bInterfaceSubClass != 0) &&
 	    (desc->desc.bInterfaceSubClass != 1)) {
 descriptor_error:
-		dev_err (&intf->dev, "bad descriptor, ignoring hub\n");
+		dev_err(&intf->dev, "bad descriptor, ignoring hub\n");
 		return -EIO;
 	}
 
@@ -1790,11 +1790,11 @@
 		goto descriptor_error;
 
 	/* We found a hub */
-	dev_info (&intf->dev, "USB hub found\n");
+	dev_info(&intf->dev, "USB hub found\n");
 
 	hub = kzalloc(sizeof(*hub), GFP_KERNEL);
 	if (!hub) {
-		dev_dbg (&intf->dev, "couldn't kmalloc hub struct\n");
+		dev_dbg(&intf->dev, "couldn't kmalloc hub struct\n");
 		return -ENOMEM;
 	}
 
@@ -1807,7 +1807,7 @@
 	usb_get_intf(intf);
 	usb_get_dev(hdev);
 
-	usb_set_intfdata (intf, hub);
+	usb_set_intfdata(intf, hub);
 	intf->needs_remote_wakeup = 1;
 	pm_suspend_ignore_children(&intf->dev, true);
 
@@ -1820,14 +1820,14 @@
 	if (hub_configure(hub, endpoint) >= 0)
 		return 0;
 
-	hub_disconnect (intf);
+	hub_disconnect(intf);
 	return -ENODEV;
 }
 
 static int
 hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
 {
-	struct usb_device *hdev = interface_to_usbdev (intf);
+	struct usb_device *hdev = interface_to_usbdev(intf);
 	struct usb_hub *hub = usb_hub_to_struct_hub(hdev);
 
 	/* assert ifno == 0 (part of hub spec) */
@@ -2143,7 +2143,7 @@
 	 * cleaning up all state associated with the current configuration
 	 * so that the hardware is now fully quiesced.
 	 */
-	dev_dbg (&udev->dev, "unregistering device\n");
+	dev_dbg(&udev->dev, "unregistering device\n");
 	usb_disable_device(udev, 0);
 	usb_hcd_synchronize_unlinks(udev);
 
@@ -2242,7 +2242,7 @@
 		struct usb_bus			*bus = udev->bus;
 
 		/* descriptor may appear anywhere in config */
-		if (__usb_get_extra_descriptor (udev->rawdescriptors[0],
+		if (__usb_get_extra_descriptor(udev->rawdescriptors[0],
 					le16_to_cpu(udev->config[0].desc.wTotalLength),
 					USB_DT_OTG, (void **) &desc) == 0) {
 			if (desc->bmAttributes & USB_OTG_HNP) {
@@ -3526,7 +3526,7 @@
 
 static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
 {
-	struct usb_hub		*hub = usb_get_intfdata (intf);
+	struct usb_hub		*hub = usb_get_intfdata(intf);
 	struct usb_device	*hdev = hub->hdev;
 	unsigned		port1;
 	int			status;
@@ -3950,6 +3950,8 @@
 	if (usb_disable_link_state(hcd, udev, USB3_LPM_U2))
 		goto enable_lpm;
 
+	udev->usb3_lpm_enabled = 0;
+
 	return 0;
 
 enable_lpm:
@@ -4007,6 +4009,8 @@
 
 	usb_enable_link_state(hcd, udev, USB3_LPM_U1);
 	usb_enable_link_state(hcd, udev, USB3_LPM_U2);
+
+	udev->usb3_lpm_enabled = 1;
 }
 EXPORT_SYMBOL_GPL(usb_enable_lpm);
 
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index a6315ab..a95b0c9 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -16,7 +16,7 @@
  * YOU _SHOULD_ CHANGE THIS LIST TO MATCH YOUR PRODUCT AND ITS TESTING!
  */
 
-static struct usb_device_id whitelist_table [] = {
+static struct usb_device_id whitelist_table[] = {
 
 /* hubs are optional in OTG, but very handy ... */
 { USB_DEVICE_INFO(USB_CLASS_HUB, 0, 0), },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index d269738..cfc68c1 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -531,6 +531,25 @@
 }
 static DEVICE_ATTR_RW(usb2_lpm_besl);
 
+static ssize_t usb3_hardware_lpm_show(struct device *dev,
+				      struct device_attribute *attr, char *buf)
+{
+	struct usb_device *udev = to_usb_device(dev);
+	const char *p;
+
+	usb_lock_device(udev);
+
+	if (udev->usb3_lpm_enabled)
+		p = "enabled";
+	else
+		p = "disabled";
+
+	usb_unlock_device(udev);
+
+	return sprintf(buf, "%s\n", p);
+}
+static DEVICE_ATTR_RO(usb3_hardware_lpm);
+
 static struct attribute *usb2_hardware_lpm_attr[] = {
 	&dev_attr_usb2_hardware_lpm.attr,
 	&dev_attr_usb2_lpm_l1_timeout.attr,
@@ -542,6 +561,15 @@
 	.attrs	= usb2_hardware_lpm_attr,
 };
 
+static struct attribute *usb3_hardware_lpm_attr[] = {
+	&dev_attr_usb3_hardware_lpm.attr,
+	NULL,
+};
+static struct attribute_group usb3_hardware_lpm_attr_group = {
+	.name	= power_group_name,
+	.attrs	= usb3_hardware_lpm_attr,
+};
+
 static struct attribute *power_attrs[] = {
 	&dev_attr_autosuspend.attr,
 	&dev_attr_level.attr,
@@ -564,6 +592,9 @@
 		if (udev->usb2_hw_lpm_capable == 1)
 			rc = sysfs_merge_group(&dev->kobj,
 					&usb2_hardware_lpm_attr_group);
+		if (udev->lpm_capable == 1)
+			rc = sysfs_merge_group(&dev->kobj,
+					&usb3_hardware_lpm_attr_group);
 	}
 
 	return rc;
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index c3cc1a7..b00fe95 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -3175,7 +3175,7 @@
 		hw->hs_phy_type);
 	dev_dbg(hsotg->dev, "  fs_phy_type=%d\n",
 		hw->fs_phy_type);
-	dev_dbg(hsotg->dev, "  utmi_phy_data_wdith=%d\n",
+	dev_dbg(hsotg->dev, "  utmi_phy_data_width=%d\n",
 		hw->utmi_phy_data_width);
 	dev_dbg(hsotg->dev, "  num_dev_ep=%d\n",
 		hw->num_dev_ep);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 4d47b7c..3ee5b4c 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2880,7 +2880,7 @@
 		epctl = readl(hs->regs + epreg);
 
 		if (value) {
-			epctl |= DXEPCTL_STALL + DXEPCTL_SNAK;
+			epctl |= DXEPCTL_STALL | DXEPCTL_SNAK;
 			if (epctl & DXEPCTL_EPENA)
 				epctl |= DXEPCTL_EPDIS;
 		} else {
@@ -3289,6 +3289,19 @@
 	usb_ep_set_maxpacket_limit(&hs_ep->ep, epnum ? 1024 : EP0_MPS_LIMIT);
 	hs_ep->ep.ops = &s3c_hsotg_ep_ops;
 
+	if (epnum == 0) {
+		hs_ep->ep.caps.type_control = true;
+	} else {
+		hs_ep->ep.caps.type_iso = true;
+		hs_ep->ep.caps.type_bulk = true;
+		hs_ep->ep.caps.type_int = true;
+	}
+
+	if (dir_in)
+		hs_ep->ep.caps.dir_in = true;
+	else
+		hs_ep->ep.caps.dir_out = true;
+
 	/*
 	 * if we're using dma, we need to set the next-endpoint pointer
 	 * to be something valid.
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index dede32e..5a42c45 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -104,11 +104,4 @@
 	  Recent Qualcomm SoCs ship with one DesignWare Core USB3 IP inside,
 	  say 'Y' or 'M' if you have one such device.
 
-comment "Debugging features"
-
-config USB_DWC3_DEBUG
-	bool "Enable Debugging Messages"
-	help
-	  Say Y here to enable debugging messages on DWC3 Driver.
-
 endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index c7076e3..acc951d 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -1,8 +1,6 @@
 # define_trace.h needs to know how to find our header
 CFLAGS_trace.o				:= -I$(src)
 
-ccflags-$(CONFIG_USB_DWC3_DEBUG)	:= -DDEBUG
-
 obj-$(CONFIG_USB_DWC3)			+= dwc3.o
 
 dwc3-y					:= core.o debug.o trace.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ff5773c..064123e 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -455,8 +455,6 @@
 			reg |= DWC3_GUSB2PHYCFG_ULPI_UTMI;
 			dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
 		} else {
-			dev_warn(dwc->dev, "HSPHY Interface not defined\n");
-
 			/* Relying on default value. */
 			if (!(reg & DWC3_GUSB2PHYCFG_ULPI_UTMI))
 				break;
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 7bd0a95..dd5cb55 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -145,7 +145,7 @@
 
 	exynos->susp_clk = devm_clk_get(dev, "usbdrd30_susp_clk");
 	if (IS_ERR(exynos->susp_clk)) {
-		dev_dbg(dev, "no suspend clk specified\n");
+		dev_info(dev, "no suspend clk specified\n");
 		exynos->susp_clk = NULL;
 	}
 	clk_prepare_enable(exynos->susp_clk);
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index fe3b933..2be268d 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -115,7 +115,7 @@
 
 	error = clk_prepare_enable(kdwc->clk);
 	if (error < 0) {
-		dev_dbg(kdwc->dev, "unable to enable usb clock, err %d\n",
+		dev_err(kdwc->dev, "unable to enable usb clock, error %d\n",
 			error);
 		return error;
 	}
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 6b486a3..a5a1b7c 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -128,8 +128,7 @@
 
 	u32			dma_status:1;
 
-	struct extcon_specific_cable_nb extcon_vbus_dev;
-	struct extcon_specific_cable_nb extcon_id_dev;
+	struct extcon_dev	*edev;
 	struct notifier_block	vbus_nb;
 	struct notifier_block	id_nb;
 
@@ -225,12 +224,10 @@
 
 	switch (status) {
 	case OMAP_DWC3_ID_GROUND:
-		dev_dbg(omap->dev, "ID GND\n");
-
 		if (omap->vbus_reg) {
 			ret = regulator_enable(omap->vbus_reg);
 			if (ret) {
-				dev_dbg(omap->dev, "regulator enable failed\n");
+				dev_err(omap->dev, "regulator enable failed\n");
 				return;
 			}
 		}
@@ -245,8 +242,6 @@
 		break;
 
 	case OMAP_DWC3_VBUS_VALID:
-		dev_dbg(omap->dev, "VBUS Connect\n");
-
 		val = dwc3_omap_read_utmi_ctrl(omap);
 		val &= ~USBOTGSS_UTMI_OTG_CTRL_SESSEND;
 		val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG
@@ -261,8 +256,6 @@
 			regulator_disable(omap->vbus_reg);
 
 	case OMAP_DWC3_VBUS_OFF:
-		dev_dbg(omap->dev, "VBUS Disconnect\n");
-
 		val = dwc3_omap_read_utmi_ctrl(omap);
 		val &= ~(USBOTGSS_UTMI_OTG_CTRL_SESSVALID
 				| USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
@@ -273,7 +266,7 @@
 		break;
 
 	default:
-		dev_dbg(omap->dev, "invalid state\n");
+		dev_WARN(omap->dev, "invalid state\n");
 	}
 }
 
@@ -284,37 +277,8 @@
 
 	reg = dwc3_omap_read_irqmisc_status(omap);
 
-	if (reg & USBOTGSS_IRQMISC_DMADISABLECLR) {
-		dev_dbg(omap->dev, "DMA Disable was Cleared\n");
+	if (reg & USBOTGSS_IRQMISC_DMADISABLECLR)
 		omap->dma_status = false;
-	}
-
-	if (reg & USBOTGSS_IRQMISC_OEVT)
-		dev_dbg(omap->dev, "OTG Event\n");
-
-	if (reg & USBOTGSS_IRQMISC_DRVVBUS_RISE)
-		dev_dbg(omap->dev, "DRVVBUS Rise\n");
-
-	if (reg & USBOTGSS_IRQMISC_CHRGVBUS_RISE)
-		dev_dbg(omap->dev, "CHRGVBUS Rise\n");
-
-	if (reg & USBOTGSS_IRQMISC_DISCHRGVBUS_RISE)
-		dev_dbg(omap->dev, "DISCHRGVBUS Rise\n");
-
-	if (reg & USBOTGSS_IRQMISC_IDPULLUP_RISE)
-		dev_dbg(omap->dev, "IDPULLUP Rise\n");
-
-	if (reg & USBOTGSS_IRQMISC_DRVVBUS_FALL)
-		dev_dbg(omap->dev, "DRVVBUS Fall\n");
-
-	if (reg & USBOTGSS_IRQMISC_CHRGVBUS_FALL)
-		dev_dbg(omap->dev, "CHRGVBUS Fall\n");
-
-	if (reg & USBOTGSS_IRQMISC_DISCHRGVBUS_FALL)
-		dev_dbg(omap->dev, "DISCHRGVBUS Fall\n");
-
-	if (reg & USBOTGSS_IRQMISC_IDPULLUP_FALL)
-		dev_dbg(omap->dev, "IDPULLUP Fall\n");
 
 	dwc3_omap_write_irqmisc_status(omap, reg);
 
@@ -434,7 +398,7 @@
 		reg &= ~USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
 		break;
 	default:
-		dev_dbg(omap->dev, "UNKNOWN utmi mode %d\n", utmi_mode);
+		dev_WARN(omap->dev, "UNKNOWN utmi mode %d\n", utmi_mode);
 	}
 
 	dwc3_omap_write_utmi_ctrl(omap, reg);
@@ -454,23 +418,23 @@
 		}
 
 		omap->vbus_nb.notifier_call = dwc3_omap_vbus_notifier;
-		ret = extcon_register_interest(&omap->extcon_vbus_dev,
-					       edev->name, "USB",
-					       &omap->vbus_nb);
+		ret = extcon_register_notifier(edev, EXTCON_USB,
+						&omap->vbus_nb);
 		if (ret < 0)
 			dev_vdbg(omap->dev, "failed to register notifier for USB\n");
 
 		omap->id_nb.notifier_call = dwc3_omap_id_notifier;
-		ret = extcon_register_interest(&omap->extcon_id_dev,
-					       edev->name, "USB-HOST",
-					       &omap->id_nb);
+		ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
+						&omap->id_nb);
 		if (ret < 0)
 			dev_vdbg(omap->dev, "failed to register notifier for USB-HOST\n");
 
-		if (extcon_get_cable_state(edev, "USB") == true)
+		if (extcon_get_cable_state_(edev, EXTCON_USB) == true)
 			dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
-		if (extcon_get_cable_state(edev, "USB-HOST") == true)
+		if (extcon_get_cable_state_(edev, EXTCON_USB_HOST) == true)
 			dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+
+		omap->edev = edev;
 	}
 
 	return 0;
@@ -565,11 +529,8 @@
 	return 0;
 
 err3:
-	if (omap->extcon_vbus_dev.edev)
-		extcon_unregister_interest(&omap->extcon_vbus_dev);
-	if (omap->extcon_id_dev.edev)
-		extcon_unregister_interest(&omap->extcon_id_dev);
-
+	extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
+	extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
 err2:
 	dwc3_omap_disable_irqs(omap);
 
@@ -586,10 +547,8 @@
 {
 	struct dwc3_omap	*omap = platform_get_drvdata(pdev);
 
-	if (omap->extcon_vbus_dev.edev)
-		extcon_unregister_interest(&omap->extcon_vbus_dev);
-	if (omap->extcon_id_dev.edev)
-		extcon_unregister_interest(&omap->extcon_id_dev);
+	extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
+	extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
 	dwc3_omap_disable_irqs(omap);
 	of_platform_depopulate(omap->dev);
 	pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 27e4fc8..f626179 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -83,17 +83,23 @@
 		acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
 					  acpi_dwc3_byt_gpios);
 
-		/* These GPIOs will turn on the USB2 PHY */
-		gpio = gpiod_get(&pdev->dev, "cs");
-		if (!IS_ERR(gpio)) {
-			gpiod_direction_output(gpio, 0);
-			gpiod_set_value_cansleep(gpio, 1);
-			gpiod_put(gpio);
-		}
+		/*
+		 * These GPIOs will turn on the USB2 PHY. Note that we have to
+		 * put the gpio descriptors again here because the phy driver
+		 * might want to grab them, too.
+		 */
+		gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
+		if (IS_ERR(gpio))
+			return PTR_ERR(gpio);
 
-		gpio = gpiod_get(&pdev->dev, "reset");
-		if (!IS_ERR(gpio)) {
-			gpiod_direction_output(gpio, 0);
+		gpiod_set_value_cansleep(gpio, 1);
+		gpiod_put(gpio);
+
+		gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+		if (IS_ERR(gpio))
+			return PTR_ERR(gpio);
+
+		if (gpio) {
 			gpiod_set_value_cansleep(gpio, 1);
 			gpiod_put(gpio);
 			usleep_range(10000, 11000);
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 8c2e8ee..0880260 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -48,13 +48,13 @@
 
 	qdwc->iface_clk = devm_clk_get(qdwc->dev, "iface");
 	if (IS_ERR(qdwc->iface_clk)) {
-		dev_dbg(qdwc->dev, "failed to get optional iface clock\n");
+		dev_info(qdwc->dev, "failed to get optional iface clock\n");
 		qdwc->iface_clk = NULL;
 	}
 
 	qdwc->sleep_clk = devm_clk_get(qdwc->dev, "sleep");
 	if (IS_ERR(qdwc->sleep_clk)) {
-		dev_dbg(qdwc->dev, "failed to get optional sleep clock\n");
+		dev_info(qdwc->dev, "failed to get optional sleep clock\n");
 		qdwc->sleep_clk = NULL;
 	}
 
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 4a1a543..de4d52f 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -135,8 +135,6 @@
 			| USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
 
 		val |= USB3_DEVICE_NOT_HOST;
-
-		dev_dbg(dwc3_data->dev, "Configuring as Device\n");
 		break;
 
 	case USB_DR_MODE_HOST:
@@ -154,8 +152,6 @@
 		 */
 
 		val |= USB3_DELAY_VBUSVALID;
-
-		dev_dbg(dwc3_data->dev, "Configuring as Host\n");
 		break;
 
 	default:
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 69e769c..5320e93 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -56,7 +56,7 @@
 }
 
 static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
-		u32 len, u32 type)
+		u32 len, u32 type, bool chain)
 {
 	struct dwc3_gadget_ep_cmd_params params;
 	struct dwc3_trb			*trb;
@@ -70,7 +70,10 @@
 		return 0;
 	}
 
-	trb = dwc->ep0_trb;
+	trb = &dwc->ep0_trb[dep->free_slot];
+
+	if (chain)
+		dep->free_slot++;
 
 	trb->bpl = lower_32_bits(buf_dma);
 	trb->bph = upper_32_bits(buf_dma);
@@ -78,10 +81,17 @@
 	trb->ctrl = type;
 
 	trb->ctrl |= (DWC3_TRB_CTRL_HWO
-			| DWC3_TRB_CTRL_LST
-			| DWC3_TRB_CTRL_IOC
 			| DWC3_TRB_CTRL_ISP_IMI);
 
+	if (chain)
+		trb->ctrl |= DWC3_TRB_CTRL_CHN;
+	else
+		trb->ctrl |= (DWC3_TRB_CTRL_IOC
+				| DWC3_TRB_CTRL_LST);
+
+	if (chain)
+		return 0;
+
 	memset(&params, 0, sizeof(params));
 	params.param0 = upper_32_bits(dwc->ep0_trb_addr);
 	params.param1 = lower_32_bits(dwc->ep0_trb_addr);
@@ -302,7 +312,7 @@
 	int				ret;
 
 	ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
-			DWC3_TRBCTL_CONTROL_SETUP);
+			DWC3_TRBCTL_CONTROL_SETUP, false);
 	WARN_ON(ret < 0);
 }
 
@@ -783,7 +793,11 @@
 	struct usb_request	*ur;
 	struct dwc3_trb		*trb;
 	struct dwc3_ep		*ep0;
-	u32			transferred;
+	unsigned		transfer_size = 0;
+	unsigned		maxp;
+	unsigned		remaining_ur_length;
+	void			*buf;
+	u32			transferred = 0;
 	u32			status;
 	u32			length;
 	u8			epnum;
@@ -812,17 +826,37 @@
 	}
 
 	ur = &r->request;
+	buf = ur->buf;
+	remaining_ur_length = ur->length;
 
 	length = trb->size & DWC3_TRB_SIZE_MASK;
 
-	if (dwc->ep0_bounced) {
-		unsigned transfer_size = ur->length;
-		unsigned maxp = ep0->endpoint.maxpacket;
+	maxp = ep0->endpoint.maxpacket;
 
-		transfer_size += (maxp - (transfer_size % maxp));
-		transferred = min_t(u32, ur->length,
-				transfer_size - length);
-		memcpy(ur->buf, dwc->ep0_bounce, transferred);
+	if (dwc->ep0_bounced) {
+		/*
+		 * Handle the first TRB before handling the bounce buffer if
+		 * the request length is greater than the bounce buffer size
+		 */
+		if (ur->length > DWC3_EP0_BOUNCE_SIZE) {
+			transfer_size = ALIGN(ur->length - maxp, maxp);
+			transferred = transfer_size - length;
+			buf = (u8 *)buf + transferred;
+			ur->actual += transferred;
+			remaining_ur_length -= transferred;
+
+			trb++;
+			length = trb->size & DWC3_TRB_SIZE_MASK;
+
+			ep0->free_slot = 0;
+		}
+
+		transfer_size = roundup((ur->length - transfer_size),
+					maxp);
+
+		transferred = min_t(u32, remaining_ur_length,
+				    transfer_size - length);
+		memcpy(buf, dwc->ep0_bounce, transferred);
 	} else {
 		transferred = ur->length - length;
 	}
@@ -844,7 +878,7 @@
 
 			ret = dwc3_ep0_start_trans(dwc, epnum,
 					dwc->ctrl_req_addr, 0,
-					DWC3_TRBCTL_CONTROL_DATA);
+					DWC3_TRBCTL_CONTROL_DATA, false);
 			WARN_ON(ret < 0);
 		}
 	}
@@ -928,10 +962,10 @@
 	if (req->request.length == 0) {
 		ret = dwc3_ep0_start_trans(dwc, dep->number,
 				dwc->ctrl_req_addr, 0,
-				DWC3_TRBCTL_CONTROL_DATA);
+				DWC3_TRBCTL_CONTROL_DATA, false);
 	} else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket)
 			&& (dep->number == 0)) {
-		u32	transfer_size;
+		u32	transfer_size = 0;
 		u32	maxpacket;
 
 		ret = usb_gadget_map_request(&dwc->gadget, &req->request,
@@ -941,21 +975,26 @@
 			return;
 		}
 
-		WARN_ON(req->request.length > DWC3_EP0_BOUNCE_SIZE);
-
 		maxpacket = dep->endpoint.maxpacket;
-		transfer_size = roundup(req->request.length, maxpacket);
+
+		if (req->request.length > DWC3_EP0_BOUNCE_SIZE) {
+			transfer_size = ALIGN(req->request.length - maxpacket,
+					      maxpacket);
+			ret = dwc3_ep0_start_trans(dwc, dep->number,
+						   req->request.dma,
+						   transfer_size,
+						   DWC3_TRBCTL_CONTROL_DATA,
+						   true);
+		}
+
+		transfer_size = roundup((req->request.length - transfer_size),
+					maxpacket);
 
 		dwc->ep0_bounced = true;
 
-		/*
-		 * REVISIT in case request length is bigger than
-		 * DWC3_EP0_BOUNCE_SIZE we will need two chained
-		 * TRBs to handle the transfer.
-		 */
 		ret = dwc3_ep0_start_trans(dwc, dep->number,
 				dwc->ep0_bounce_addr, transfer_size,
-				DWC3_TRBCTL_CONTROL_DATA);
+				DWC3_TRBCTL_CONTROL_DATA, false);
 	} else {
 		ret = usb_gadget_map_request(&dwc->gadget, &req->request,
 				dep->number);
@@ -965,7 +1004,8 @@
 		}
 
 		ret = dwc3_ep0_start_trans(dwc, dep->number, req->request.dma,
-				req->request.length, DWC3_TRBCTL_CONTROL_DATA);
+				req->request.length, DWC3_TRBCTL_CONTROL_DATA,
+				false);
 	}
 
 	WARN_ON(ret < 0);
@@ -980,7 +1020,7 @@
 		: DWC3_TRBCTL_CONTROL_STATUS2;
 
 	return dwc3_ep0_start_trans(dwc, dep->number,
-			dwc->ctrl_req_addr, 0, type);
+			dwc->ctrl_req_addr, 0, type, false);
 }
 
 static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 333a7c0..0c25704 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -547,6 +547,23 @@
 		trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
 	}
 
+	switch (usb_endpoint_type(desc)) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		strlcat(dep->name, "-control", sizeof(dep->name));
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		strlcat(dep->name, "-isoc", sizeof(dep->name));
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		strlcat(dep->name, "-bulk", sizeof(dep->name));
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		strlcat(dep->name, "-int", sizeof(dep->name));
+		break;
+	default:
+		dev_err(dwc->dev, "invalid endpoint transfer type\n");
+	}
+
 	return 0;
 }
 
@@ -586,6 +603,8 @@
 	struct dwc3		*dwc = dep->dwc;
 	u32			reg;
 
+	dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
+
 	dwc3_remove_requests(dwc, dep);
 
 	/* make sure HW endpoint isn't stalled */
@@ -602,6 +621,10 @@
 	dep->type = 0;
 	dep->flags = 0;
 
+	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
+			dep->number >> 1,
+			(dep->number & 1) ? "in" : "out");
+
 	return 0;
 }
 
@@ -647,23 +670,6 @@
 		return 0;
 	}
 
-	switch (usb_endpoint_type(desc)) {
-	case USB_ENDPOINT_XFER_CONTROL:
-		strlcat(dep->name, "-control", sizeof(dep->name));
-		break;
-	case USB_ENDPOINT_XFER_ISOC:
-		strlcat(dep->name, "-isoc", sizeof(dep->name));
-		break;
-	case USB_ENDPOINT_XFER_BULK:
-		strlcat(dep->name, "-bulk", sizeof(dep->name));
-		break;
-	case USB_ENDPOINT_XFER_INT:
-		strlcat(dep->name, "-int", sizeof(dep->name));
-		break;
-	default:
-		dev_err(dwc->dev, "invalid endpoint transfer type\n");
-	}
-
 	spin_lock_irqsave(&dwc->lock, flags);
 	ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
 	spin_unlock_irqrestore(&dwc->lock, flags);
@@ -692,10 +698,6 @@
 		return 0;
 	}
 
-	snprintf(dep->name, sizeof(dep->name), "ep%d%s",
-			dep->number >> 1,
-			(dep->number & 1) ? "in" : "out");
-
 	spin_lock_irqsave(&dwc->lock, flags);
 	ret = __dwc3_gadget_ep_disable(dep);
 	spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1713,6 +1715,17 @@
 				return ret;
 		}
 
+		if (epnum == 0 || epnum == 1) {
+			dep->endpoint.caps.type_control = true;
+		} else {
+			dep->endpoint.caps.type_iso = true;
+			dep->endpoint.caps.type_bulk = true;
+			dep->endpoint.caps.type_int = true;
+		}
+
+		dep->endpoint.caps.dir_in = !!direction;
+		dep->endpoint.caps.dir_out = !direction;
+
 		INIT_LIST_HEAD(&dep->request_list);
 		INIT_LIST_HEAD(&dep->req_queued);
 	}
@@ -2685,7 +2698,7 @@
 		goto err0;
 	}
 
-	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+	dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
 			&dwc->ep0_trb_addr, GFP_KERNEL);
 	if (!dwc->ep0_trb) {
 		dev_err(dwc->dev, "failed to allocate ep0 trb\n");
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 58b4657..b474499 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -19,6 +19,7 @@
 #include <linux/utsname.h>
 
 #include <linux/usb/composite.h>
+#include <linux/usb/otg.h>
 #include <asm/unaligned.h>
 
 #include "u_os_desc.h"
@@ -209,6 +210,12 @@
 	function->config = config;
 	list_add_tail(&function->list, &config->functions);
 
+	if (function->bind_deactivated) {
+		value = usb_function_deactivate(function);
+		if (value)
+			goto done;
+	}
+
 	/* REVISIT *require* function->bind? */
 	if (function->bind) {
 		value = function->bind(config, function);
@@ -279,7 +286,7 @@
 	spin_lock_irqsave(&cdev->lock, flags);
 
 	if (cdev->deactivations == 0)
-		status = usb_gadget_disconnect(cdev->gadget);
+		status = usb_gadget_deactivate(cdev->gadget);
 	if (status == 0)
 		cdev->deactivations++;
 
@@ -311,7 +318,7 @@
 	else {
 		cdev->deactivations--;
 		if (cdev->deactivations == 0)
-			status = usb_gadget_connect(cdev->gadget);
+			status = usb_gadget_activate(cdev->gadget);
 	}
 
 	spin_unlock_irqrestore(&cdev->lock, flags);
@@ -896,7 +903,7 @@
 
 /* We support strings in multiple languages ... string descriptor zero
  * says which languages are supported.  The typical case will be that
- * only one language (probably English) is used, with I18N handled on
+ * only one language (probably English) is used, with i18n handled on
  * the host side.
  */
 
@@ -949,7 +956,7 @@
 	struct usb_function		*f;
 	int				len;
 
-	/* Yes, not only is USB's I18N support probably more than most
+	/* Yes, not only is USB's i18n support probably more than most
 	 * folk will ever care about ... also, it's all supported here.
 	 * (Except for UTF8 support for Unicode's "Astral Planes".)
 	 */
@@ -1534,6 +1541,32 @@
 				value = min(w_length, (u16) value);
 			}
 			break;
+		case USB_DT_OTG:
+			if (gadget_is_otg(gadget)) {
+				struct usb_configuration *config;
+				int otg_desc_len = 0;
+
+				if (cdev->config)
+					config = cdev->config;
+				else
+					config = list_first_entry(
+							&cdev->configs,
+						struct usb_configuration, list);
+				if (!config)
+					goto done;
+
+				if (gadget->otg_caps &&
+					(gadget->otg_caps->otg_rev >= 0x0200))
+					otg_desc_len += sizeof(
+						struct usb_otg20_descriptor);
+				else
+					otg_desc_len += sizeof(
+						struct usb_otg_descriptor);
+
+				value = min_t(int, w_length, otg_desc_len);
+				memcpy(req->buf, config->descriptors[0], value);
+			}
+			break;
 		}
 		break;
 
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 34e12fc..0fafa7a 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -20,6 +20,7 @@
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
 #include <linux/usb/composite.h>
+#include <linux/usb/otg.h>
 
 /**
  * usb_descriptor_fillbuf - fill buffer with descriptors
@@ -195,3 +196,58 @@
 	usb_free_descriptors(f->ss_descriptors);
 }
 EXPORT_SYMBOL_GPL(usb_free_all_descriptors);
+
+struct usb_descriptor_header *usb_otg_descriptor_alloc(
+				struct usb_gadget *gadget)
+{
+	struct usb_descriptor_header *otg_desc;
+	unsigned length = 0;
+
+	if (gadget->otg_caps && (gadget->otg_caps->otg_rev >= 0x0200))
+		length = sizeof(struct usb_otg20_descriptor);
+	else
+		length = sizeof(struct usb_otg_descriptor);
+
+	otg_desc = kzalloc(length, GFP_KERNEL);
+	return otg_desc;
+}
+EXPORT_SYMBOL_GPL(usb_otg_descriptor_alloc);
+
+int usb_otg_descriptor_init(struct usb_gadget *gadget,
+		struct usb_descriptor_header *otg_desc)
+{
+	struct usb_otg_descriptor *otg1x_desc;
+	struct usb_otg20_descriptor *otg20_desc;
+	struct usb_otg_caps *otg_caps = gadget->otg_caps;
+	u8 otg_attributes = 0;
+
+	if (!otg_desc)
+		return -EINVAL;
+
+	if (otg_caps && otg_caps->otg_rev) {
+		if (otg_caps->hnp_support)
+			otg_attributes |= USB_OTG_HNP;
+		if (otg_caps->srp_support)
+			otg_attributes |= USB_OTG_SRP;
+		if (otg_caps->adp_support && (otg_caps->otg_rev >= 0x0200))
+			otg_attributes |= USB_OTG_ADP;
+	} else {
+		otg_attributes = USB_OTG_SRP | USB_OTG_HNP;
+	}
+
+	if (otg_caps && (otg_caps->otg_rev >= 0x0200)) {
+		otg20_desc = (struct usb_otg20_descriptor *)otg_desc;
+		otg20_desc->bLength = sizeof(struct usb_otg20_descriptor);
+		otg20_desc->bDescriptorType = USB_DT_OTG;
+		otg20_desc->bmAttributes = otg_attributes;
+		otg20_desc->bcdOTG = cpu_to_le16(otg_caps->otg_rev);
+	} else {
+		otg1x_desc = (struct usb_otg_descriptor *)otg_desc;
+		otg1x_desc->bLength = sizeof(struct usb_otg_descriptor);
+		otg1x_desc->bDescriptorType = USB_DT_OTG;
+		otg1x_desc->bmAttributes = otg_attributes;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(usb_otg_descriptor_init);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 289e201..294eb74 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -41,6 +41,8 @@
 #define MAX_NAME_LEN	40
 #define MAX_USB_STRING_LANGS 2
 
+static const struct usb_descriptor_header *otg_desc[2];
+
 struct gadget_info {
 	struct config_group group;
 	struct config_group functions_group;
@@ -55,9 +57,6 @@
 	struct list_head available_func;
 
 	const char *udc_name;
-#ifdef CONFIG_USB_OTG
-	struct usb_otg_descriptor otg;
-#endif
 	struct usb_composite_driver composite;
 	struct usb_composite_dev cdev;
 	bool use_os_desc;
@@ -1376,6 +1375,19 @@
 		memcpy(cdev->qw_sign, gi->qw_sign, OS_STRING_QW_SIGN_LEN);
 	}
 
+	if (gadget_is_otg(gadget) && !otg_desc[0]) {
+		struct usb_descriptor_header *usb_desc;
+
+		usb_desc = usb_otg_descriptor_alloc(gadget);
+		if (!usb_desc) {
+			ret = -ENOMEM;
+			goto err_comp_cleanup;
+		}
+		usb_otg_descriptor_init(gadget, usb_desc);
+		otg_desc[0] = usb_desc;
+		otg_desc[1] = NULL;
+	}
+
 	/* Go through all configs, attach all functions */
 	list_for_each_entry(c, &gi->cdev.configs, list) {
 		struct config_usb_cfg *cfg;
@@ -1383,6 +1395,9 @@
 		struct usb_function *tmp;
 		struct gadget_config_name *cn;
 
+		if (gadget_is_otg(gadget))
+			c->descriptors = otg_desc;
+
 		cfg = container_of(c, struct config_usb_cfg, c);
 		if (!list_empty(&cfg->string_list)) {
 			i = 0;
@@ -1437,6 +1452,8 @@
 	cdev = get_gadget_data(gadget);
 	gi = container_of(cdev, struct gadget_info, cdev);
 
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
 	purge_configs_funcs(gi);
 	composite_dev_cleanup(cdev);
 	usb_ep_autoconfig_reset(cdev->gadget);
@@ -1510,12 +1527,6 @@
 	if (!gi->composite.gadget_driver.function)
 		goto err;
 
-#ifdef CONFIG_USB_OTG
-	gi->otg.bLength = sizeof(struct usb_otg_descriptor);
-	gi->otg.bDescriptorType = USB_DT_OTG;
-	gi->otg.bmAttributes = USB_OTG_SRP | USB_OTG_HNP;
-#endif
-
 	config_group_init_type_name(&gi->group, name,
 				&gadget_root_type);
 	return &gi->group;
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 919cdfd..978435a 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -20,186 +20,6 @@
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
 
-#include "gadget_chips.h"
-
-/*
- * This should work with endpoints from controller drivers sharing the
- * same endpoint naming convention.  By example:
- *
- *	- ep1, ep2, ... address is fixed, not direction or type
- *	- ep1in, ep2out, ... address and direction are fixed, not type
- *	- ep1-bulk, ep2-bulk, ... address and type are fixed, not direction
- *	- ep1in-bulk, ep2out-iso, ... all three are fixed
- *	- ep-* ... no functionality restrictions
- *
- * Type suffixes are "-bulk", "-iso", or "-int".  Numbers are decimal.
- * Less common restrictions are implied by gadget_is_*().
- *
- * NOTE:  each endpoint is unidirectional, as specified by its USB
- * descriptor; and isn't specific to a configuration or altsetting.
- */
-static int
-ep_matches (
-	struct usb_gadget		*gadget,
-	struct usb_ep			*ep,
-	struct usb_endpoint_descriptor	*desc,
-	struct usb_ss_ep_comp_descriptor *ep_comp
-)
-{
-	u8		type;
-	const char	*tmp;
-	u16		max;
-
-	int		num_req_streams = 0;
-
-	/* endpoint already claimed? */
-	if (NULL != ep->driver_data)
-		return 0;
-
-	/* only support ep0 for portable CONTROL traffic */
-	type = usb_endpoint_type(desc);
-	if (USB_ENDPOINT_XFER_CONTROL == type)
-		return 0;
-
-	/* some other naming convention */
-	if ('e' != ep->name[0])
-		return 0;
-
-	/* type-restriction:  "-iso", "-bulk", or "-int".
-	 * direction-restriction:  "in", "out".
-	 */
-	if ('-' != ep->name[2]) {
-		tmp = strrchr (ep->name, '-');
-		if (tmp) {
-			switch (type) {
-			case USB_ENDPOINT_XFER_INT:
-				/* bulk endpoints handle interrupt transfers,
-				 * except the toggle-quirky iso-synch kind
-				 */
-				if ('s' == tmp[2])	// == "-iso"
-					return 0;
-				/* for now, avoid PXA "interrupt-in";
-				 * it's documented as never using DATA1.
-				 */
-				if (gadget_is_pxa (gadget)
-						&& 'i' == tmp [1])
-					return 0;
-				break;
-			case USB_ENDPOINT_XFER_BULK:
-				if ('b' != tmp[1])	// != "-bulk"
-					return 0;
-				break;
-			case USB_ENDPOINT_XFER_ISOC:
-				if ('s' != tmp[2])	// != "-iso"
-					return 0;
-			}
-		} else {
-			tmp = ep->name + strlen (ep->name);
-		}
-
-		/* direction-restriction:  "..in-..", "out-.." */
-		tmp--;
-		if (!isdigit (*tmp)) {
-			if (desc->bEndpointAddress & USB_DIR_IN) {
-				if ('n' != *tmp)
-					return 0;
-			} else {
-				if ('t' != *tmp)
-					return 0;
-			}
-		}
-	}
-
-	/*
-	 * Get the number of required streams from the EP companion
-	 * descriptor and see if the EP matches it
-	 */
-	if (usb_endpoint_xfer_bulk(desc)) {
-		if (ep_comp && gadget->max_speed >= USB_SPEED_SUPER) {
-			num_req_streams = ep_comp->bmAttributes & 0x1f;
-			if (num_req_streams > ep->max_streams)
-				return 0;
-		}
-
-	}
-
-	/*
-	 * If the protocol driver hasn't yet decided on wMaxPacketSize
-	 * and wants to know the maximum possible, provide the info.
-	 */
-	if (desc->wMaxPacketSize == 0)
-		desc->wMaxPacketSize = cpu_to_le16(ep->maxpacket_limit);
-
-	/* endpoint maxpacket size is an input parameter, except for bulk
-	 * where it's an output parameter representing the full speed limit.
-	 * the usb spec fixes high speed bulk maxpacket at 512 bytes.
-	 */
-	max = 0x7ff & usb_endpoint_maxp(desc);
-	switch (type) {
-	case USB_ENDPOINT_XFER_INT:
-		/* INT:  limit 64 bytes full speed, 1024 high/super speed */
-		if (!gadget_is_dualspeed(gadget) && max > 64)
-			return 0;
-		/* FALLTHROUGH */
-
-	case USB_ENDPOINT_XFER_ISOC:
-		/* ISO:  limit 1023 bytes full speed, 1024 high/super speed */
-		if (ep->maxpacket_limit < max)
-			return 0;
-		if (!gadget_is_dualspeed(gadget) && max > 1023)
-			return 0;
-
-		/* BOTH:  "high bandwidth" works only at high speed */
-		if ((desc->wMaxPacketSize & cpu_to_le16(3<<11))) {
-			if (!gadget_is_dualspeed(gadget))
-				return 0;
-			/* configure your hardware with enough buffering!! */
-		}
-		break;
-	}
-
-	/* MATCH!! */
-
-	/* report address */
-	desc->bEndpointAddress &= USB_DIR_IN;
-	if (isdigit (ep->name [2])) {
-		u8	num = simple_strtoul (&ep->name [2], NULL, 10);
-		desc->bEndpointAddress |= num;
-	} else if (desc->bEndpointAddress & USB_DIR_IN) {
-		if (++gadget->in_epnum > 15)
-			return 0;
-		desc->bEndpointAddress = USB_DIR_IN | gadget->in_epnum;
-	} else {
-		if (++gadget->out_epnum > 15)
-			return 0;
-		desc->bEndpointAddress |= gadget->out_epnum;
-	}
-
-	/* report (variable) full speed bulk maxpacket */
-	if ((USB_ENDPOINT_XFER_BULK == type) && !ep_comp) {
-		int size = ep->maxpacket_limit;
-
-		/* min() doesn't work on bitfields with gcc-3.5 */
-		if (size > 64)
-			size = 64;
-		desc->wMaxPacketSize = cpu_to_le16(size);
-	}
-	ep->address = desc->bEndpointAddress;
-	return 1;
-}
-
-static struct usb_ep *
-find_ep (struct usb_gadget *gadget, const char *name)
-{
-	struct usb_ep	*ep;
-
-	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
-		if (0 == strcmp (ep->name, name))
-			return ep;
-	}
-	return NULL;
-}
-
 /**
  * usb_ep_autoconfig_ss() - choose an endpoint matching the ep
  * descriptor and ep companion descriptor
@@ -240,7 +60,7 @@
  * updated with the assigned number of streams if it is
  * different from the original value. To prevent the endpoint
  * from being returned by a later autoconfig call, claim it by
- * assigning ep->driver_data to some non-null value.
+ * assigning ep->claimed to true.
  *
  * On failure, this returns a null endpoint descriptor.
  */
@@ -255,74 +75,58 @@
 
 	type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
 
-	/* First, apply chip-specific "best usage" knowledge.
-	 * This might make a good usb_gadget_ops hook ...
-	 */
-	if (gadget_is_net2280(gadget)) {
-		char name[8];
-
-		if (type == USB_ENDPOINT_XFER_INT) {
-			/* ep-e, ep-f are PIO with only 64 byte fifos */
-			ep = find_ep(gadget, "ep-e");
-			if (ep && ep_matches(gadget, ep, desc, ep_comp))
-				goto found_ep;
-			ep = find_ep(gadget, "ep-f");
-			if (ep && ep_matches(gadget, ep, desc, ep_comp))
-				goto found_ep;
-		}
-
-		/* USB3380: use same address for usb and hardware endpoints */
-		snprintf(name, sizeof(name), "ep%d%s", usb_endpoint_num(desc),
-				usb_endpoint_dir_in(desc) ? "in" : "out");
-		ep = find_ep(gadget, name);
-		if (ep && ep_matches(gadget, ep, desc, ep_comp))
+	if (gadget->ops->match_ep) {
+		ep = gadget->ops->match_ep(gadget, desc, ep_comp);
+		if (ep)
 			goto found_ep;
-	} else if (gadget_is_goku (gadget)) {
-		if (USB_ENDPOINT_XFER_INT == type) {
-			/* single buffering is enough */
-			ep = find_ep(gadget, "ep3-bulk");
-			if (ep && ep_matches(gadget, ep, desc, ep_comp))
-				goto found_ep;
-		} else if (USB_ENDPOINT_XFER_BULK == type
-				&& (USB_DIR_IN & desc->bEndpointAddress)) {
-			/* DMA may be available */
-			ep = find_ep(gadget, "ep2-bulk");
-			if (ep && ep_matches(gadget, ep, desc,
-					      ep_comp))
-				goto found_ep;
-		}
-
-#ifdef CONFIG_BLACKFIN
-	} else if (gadget_is_musbhdrc(gadget)) {
-		if ((USB_ENDPOINT_XFER_BULK == type) ||
-		    (USB_ENDPOINT_XFER_ISOC == type)) {
-			if (USB_DIR_IN & desc->bEndpointAddress)
-				ep = find_ep (gadget, "ep5in");
-			else
-				ep = find_ep (gadget, "ep6out");
-		} else if (USB_ENDPOINT_XFER_INT == type) {
-			if (USB_DIR_IN & desc->bEndpointAddress)
-				ep = find_ep(gadget, "ep1in");
-			else
-				ep = find_ep(gadget, "ep2out");
-		} else
-			ep = NULL;
-		if (ep && ep_matches(gadget, ep, desc, ep_comp))
-			goto found_ep;
-#endif
 	}
 
 	/* Second, look at endpoints until an unclaimed one looks usable */
 	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
-		if (ep_matches(gadget, ep, desc, ep_comp))
+		if (usb_gadget_ep_match_desc(gadget, ep, desc, ep_comp))
 			goto found_ep;
 	}
 
 	/* Fail */
 	return NULL;
 found_ep:
+
+	/*
+	 * If the protocol driver hasn't yet decided on wMaxPacketSize
+	 * and wants to know the maximum possible, provide the info.
+	 */
+	if (desc->wMaxPacketSize == 0)
+		desc->wMaxPacketSize = cpu_to_le16(ep->maxpacket_limit);
+
+	/* report address */
+	desc->bEndpointAddress &= USB_DIR_IN;
+	if (isdigit(ep->name[2])) {
+		u8 num = simple_strtoul(&ep->name[2], NULL, 10);
+		desc->bEndpointAddress |= num;
+	} else if (desc->bEndpointAddress & USB_DIR_IN) {
+		if (++gadget->in_epnum > 15)
+			return NULL;
+		desc->bEndpointAddress = USB_DIR_IN | gadget->in_epnum;
+	} else {
+		if (++gadget->out_epnum > 15)
+			return NULL;
+		desc->bEndpointAddress |= gadget->out_epnum;
+	}
+
+	/* report (variable) full speed bulk maxpacket */
+	if ((type == USB_ENDPOINT_XFER_BULK) && !ep_comp) {
+		int size = ep->maxpacket_limit;
+
+		/* min() doesn't work on bitfields with gcc-3.5 */
+		if (size > 64)
+			size = 64;
+		desc->wMaxPacketSize = cpu_to_le16(size);
+	}
+
+	ep->address = desc->bEndpointAddress;
 	ep->desc = NULL;
 	ep->comp_desc = NULL;
+	ep->claimed = true;
 	return ep;
 }
 EXPORT_SYMBOL_GPL(usb_ep_autoconfig_ss);
@@ -354,7 +158,7 @@
  * descriptor bEndpointAddress.  For bulk endpoints, the wMaxPacket value
  * is initialized as if the endpoint were used at full speed.  To prevent
  * the endpoint from being returned by a later autoconfig call, claim it
- * by assigning ep->driver_data to some non-null value.
+ * by assigning ep->claimed to true.
  *
  * On failure, this returns a null endpoint descriptor.
  */
@@ -373,7 +177,7 @@
  *
  * Use this for devices where one configuration may need to assign
  * endpoint resources very differently from the next one.  It clears
- * state such as ep->driver_data and the record of assigned endpoints
+ * state such as ep->claimed and the record of assigned endpoints
  * used by usb_ep_autoconfig().
  */
 void usb_ep_autoconfig_reset (struct usb_gadget *gadget)
@@ -381,7 +185,7 @@
 	struct usb_ep	*ep;
 
 	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
-		ep->driver_data = NULL;
+		ep->claimed = false;
 	}
 	gadget->in_epnum = 0;
 	gadget->out_epnum = 0;
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index aad8165..be9df09 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -21,7 +21,6 @@
 #include <linux/err.h>
 
 #include "u_serial.h"
-#include "gadget_chips.h"
 
 
 /*
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index 798760f..7b7424f 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -585,8 +585,8 @@
 			/* Enable zlps by default for ECM conformance;
 			 * override for musb_hdrc (avoids txdma ovhead).
 			 */
-			ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
-				);
+			ecm->port.is_zlp_ok =
+				gadget_is_zlp_supported(cdev->gadget);
 			ecm->port.cdc_filter = DEFAULT_FILTER;
 			DBG(cdev, "activate ecm\n");
 			net = gether_connect(&ecm->port);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 6e7be91..adc6d52 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2897,11 +2897,17 @@
 			 struct usb_function *f)
 {
 	struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
+	struct ffs_function *func = ffs_func_from_usb(f);
+	int ret;
 
 	if (IS_ERR(ffs_opts))
 		return PTR_ERR(ffs_opts);
 
-	return _ffs_func_bind(c, f);
+	ret = _ffs_func_bind(c, f);
+	if (ret && !--ffs_opts->refcnt)
+		functionfs_unbind(func->ffs);
+
+	return ret;
 }
 
 
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index 39f49f1..6e2fe63 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -28,11 +28,6 @@
  * This takes messages of various sizes written OUT to a device, and loops
  * them back so they can be read IN from it.  It has been used by certain
  * test applications.  It supports limited testing of data queueing logic.
- *
- *
- * This is currently packaged as a configuration driver, which can't be
- * combined with other functions to make composite devices.  However, it
- * can be combined with other independent configurations.
  */
 struct f_loopback {
 	struct usb_function	function;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index f936268..a6eb537 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -54,7 +54,7 @@
  * following fields:
  *
  *	nluns		Number of LUNs function have (anywhere from 1
- *				to FSG_MAX_LUNS which is 8).
+ *				to FSG_MAX_LUNS).
  *	luns		An array of LUN configuration values.  This
  *				should be filled for each LUN that
  *				function will include (ie. for "nluns"
@@ -214,12 +214,12 @@
 #include <linux/string.h>
 #include <linux/freezer.h>
 #include <linux/module.h>
+#include <linux/uaccess.h>
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
 #include <linux/usb/composite.h>
 
-#include "gadget_chips.h"
 #include "configfs.h"
 
 
@@ -279,9 +279,8 @@
 	int			cmnd_size;
 	u8			cmnd[MAX_COMMAND_SIZE];
 
-	unsigned int		nluns;
 	unsigned int		lun;
-	struct fsg_lun		**luns;
+	struct fsg_lun		*luns[FSG_MAX_LUNS];
 	struct fsg_lun		*curlun;
 
 	unsigned int		bulk_out_maxpacket;
@@ -490,6 +489,16 @@
 	spin_unlock(&common->lock);
 }
 
+static int _fsg_common_get_max_lun(struct fsg_common *common)
+{
+	int i = ARRAY_SIZE(common->luns) - 1;
+
+	while (i >= 0 && !common->luns[i])
+		--i;
+
+	return i;
+}
+
 static int fsg_setup(struct usb_function *f,
 		     const struct usb_ctrlrequest *ctrl)
 {
@@ -533,7 +542,7 @@
 				w_length != 1)
 			return -EDOM;
 		VDBG(fsg, "get max LUN\n");
-		*(u8 *)req->buf = fsg->common->nluns - 1;
+		*(u8 *)req->buf = _fsg_common_get_max_lun(fsg->common);
 
 		/* Respond with data/status */
 		req->length = min((u16)1, w_length);
@@ -2131,8 +2140,9 @@
 	}
 
 	/* Is the CBW meaningful? */
-	if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN ||
-			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
+	if (cbw->Lun >= ARRAY_SIZE(common->luns) ||
+	    cbw->Flags & ~US_BULK_FLAG_IN || cbw->Length <= 0 ||
+	    cbw->Length > MAX_COMMAND_SIZE) {
 		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
 				"cmdlen %u\n",
 				cbw->Lun, cbw->Flags, cbw->Length);
@@ -2159,7 +2169,7 @@
 	if (common->data_size == 0)
 		common->data_dir = DATA_DIR_NONE;
 	common->lun = cbw->Lun;
-	if (common->lun < common->nluns)
+	if (common->lun < ARRAY_SIZE(common->luns))
 		common->curlun = common->luns[common->lun];
 	else
 		common->curlun = NULL;
@@ -2307,7 +2317,7 @@
 	}
 
 	common->running = 1;
-	for (i = 0; i < common->nluns; ++i)
+	for (i = 0; i < ARRAY_SIZE(common->luns); ++i)
 		if (common->luns[i])
 			common->luns[i]->unit_attention_data =
 				SS_RESET_OCCURRED;
@@ -2409,7 +2419,7 @@
 	if (old_state == FSG_STATE_ABORT_BULK_OUT)
 		common->state = FSG_STATE_STATUS_PHASE;
 	else {
-		for (i = 0; i < common->nluns; ++i) {
+		for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
 			curlun = common->luns[i];
 			if (!curlun)
 				continue;
@@ -2453,7 +2463,7 @@
 		 * a waste of time.  Ditto for the INTERFACE_CHANGE and
 		 * CONFIG_CHANGE cases.
 		 */
-		/* for (i = 0; i < common->nluns; ++i) */
+		/* for (i = 0; i < common->ARRAY_SIZE(common->luns); ++i) */
 		/*	if (common->luns[i]) */
 		/*		common->luns[i]->unit_attention_data = */
 		/*			SS_RESET_OCCURRED;  */
@@ -2552,12 +2562,11 @@
 
 	if (!common->ops || !common->ops->thread_exits
 	 || common->ops->thread_exits(common) < 0) {
-		struct fsg_lun **curlun_it = common->luns;
-		unsigned i = common->nluns;
+		int i;
 
 		down_write(&common->filesem);
-		for (; i--; ++curlun_it) {
-			struct fsg_lun *curlun = *curlun_it;
+		for (i = 0; i < ARRAY_SIZE(common->luns); --i) {
+			struct fsg_lun *curlun = common->luns[i];
 			if (!curlun || !fsg_lun_is_open(curlun))
 				continue;
 
@@ -2676,6 +2685,7 @@
 	init_completion(&common->thread_notifier);
 	init_waitqueue_head(&common->fsg_wait);
 	common->state = FSG_STATE_TERMINATED;
+	memset(common->luns, 0, sizeof(common->luns));
 
 	return common;
 }
@@ -2742,9 +2752,9 @@
 }
 EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers);
 
-void fsg_common_remove_lun(struct fsg_lun *lun, bool sysfs)
+void fsg_common_remove_lun(struct fsg_lun *lun)
 {
-	if (sysfs)
+	if (device_is_registered(&lun->dev))
 		device_unregister(&lun->dev);
 	fsg_lun_close(lun);
 	kfree(lun);
@@ -2757,48 +2767,16 @@
 
 	for (i = 0; i < n; ++i)
 		if (common->luns[i]) {
-			fsg_common_remove_lun(common->luns[i], common->sysfs);
+			fsg_common_remove_lun(common->luns[i]);
 			common->luns[i] = NULL;
 		}
 }
-EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
 
 void fsg_common_remove_luns(struct fsg_common *common)
 {
-	_fsg_common_remove_luns(common, common->nluns);
+	_fsg_common_remove_luns(common, ARRAY_SIZE(common->luns));
 }
-
-void fsg_common_free_luns(struct fsg_common *common)
-{
-	fsg_common_remove_luns(common);
-	kfree(common->luns);
-	common->luns = NULL;
-}
-EXPORT_SYMBOL_GPL(fsg_common_free_luns);
-
-int fsg_common_set_nluns(struct fsg_common *common, int nluns)
-{
-	struct fsg_lun **curlun;
-
-	/* Find out how many LUNs there should be */
-	if (nluns < 1 || nluns > FSG_MAX_LUNS) {
-		pr_err("invalid number of LUNs: %u\n", nluns);
-		return -EINVAL;
-	}
-
-	curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL);
-	if (unlikely(!curlun))
-		return -ENOMEM;
-
-	if (common->luns)
-		fsg_common_free_luns(common);
-
-	common->luns = curlun;
-	common->nluns = nluns;
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(fsg_common_set_nluns);
+EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
 
 void fsg_common_set_ops(struct fsg_common *common,
 			const struct fsg_operations *ops)
@@ -2836,7 +2814,8 @@
 	 * halt bulk endpoints correctly.  If one of them is present,
 	 * disable stalls.
 	 */
-	common->can_stall = can_stall && !(gadget_is_at91(common->gadget));
+	common->can_stall = can_stall &&
+			gadget_is_stall_supported(common->gadget);
 
 	return 0;
 }
@@ -2880,7 +2859,7 @@
 	char *pathbuf, *p;
 	int rc = -ENOMEM;
 
-	if (!common->nluns || !common->luns)
+	if (id >= ARRAY_SIZE(common->luns))
 		return -ENODEV;
 
 	if (common->luns[id])
@@ -2949,7 +2928,7 @@
 	return 0;
 
 error_lun:
-	if (common->sysfs)
+	if (device_is_registered(&lun->dev))
 		device_unregister(&lun->dev);
 	fsg_lun_close(lun);
 	common->luns[id] = NULL;
@@ -2964,14 +2943,16 @@
 	char buf[8]; /* enough for 100000000 different numbers, decimal */
 	int i, rc;
 
-	for (i = 0; i < common->nluns; ++i) {
+	fsg_common_remove_luns(common);
+
+	for (i = 0; i < cfg->nluns; ++i) {
 		snprintf(buf, sizeof(buf), "lun%d", i);
 		rc = fsg_common_create_lun(common, &cfg->luns[i], i, buf, NULL);
 		if (rc)
 			goto fail;
 	}
 
-	pr_info("Number of LUNs=%d\n", common->nluns);
+	pr_info("Number of LUNs=%d\n", cfg->nluns);
 
 	return 0;
 
@@ -3020,6 +3001,7 @@
 static void fsg_common_release(struct kref *ref)
 {
 	struct fsg_common *common = container_of(ref, struct fsg_common, ref);
+	int i;
 
 	/* If the thread isn't already dead, tell it to exit now */
 	if (common->state != FSG_STATE_TERMINATED) {
@@ -3027,22 +3009,14 @@
 		wait_for_completion(&common->thread_notifier);
 	}
 
-	if (likely(common->luns)) {
-		struct fsg_lun **lun_it = common->luns;
-		unsigned i = common->nluns;
-
-		/* In error recovery common->nluns may be zero. */
-		for (; i; --i, ++lun_it) {
-			struct fsg_lun *lun = *lun_it;
-			if (!lun)
-				continue;
-			fsg_lun_close(lun);
-			if (common->sysfs)
-				device_unregister(&lun->dev);
-			kfree(lun);
-		}
-
-		kfree(common->luns);
+	for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
+		struct fsg_lun *lun = common->luns[i];
+		if (!lun)
+			continue;
+		fsg_lun_close(lun);
+		if (device_is_registered(&lun->dev))
+			device_unregister(&lun->dev);
+		kfree(lun);
 	}
 
 	_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
@@ -3056,6 +3030,7 @@
 static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
 {
 	struct fsg_dev		*fsg = fsg_from_func(f);
+	struct fsg_common	*common = fsg->common;
 	struct usb_gadget	*gadget = c->cdev->gadget;
 	int			i;
 	struct usb_ep		*ep;
@@ -3063,6 +3038,13 @@
 	int			ret;
 	struct fsg_opts		*opts;
 
+	/* Don't allow to bind if we don't have at least one LUN */
+	ret = _fsg_common_get_max_lun(common);
+	if (ret < 0) {
+		pr_err("There should be at least one LUN.\n");
+		return -EINVAL;
+	}
+
 	opts = fsg_opts_from_func_inst(f->fi);
 	if (!opts->no_configfs) {
 		ret = fsg_common_set_cdev(fsg->common, c->cdev,
@@ -3080,7 +3062,7 @@
 	/* New interface */
 	i = usb_interface_id(c, f);
 	if (i < 0)
-		return i;
+		goto fail;
 	fsg_intf_desc.bInterfaceNumber = i;
 	fsg->interface_number = i;
 
@@ -3123,7 +3105,14 @@
 
 autoconf_fail:
 	ERROR(fsg, "unable to autoconfigure all endpoints\n");
-	return -ENOTSUPP;
+	i = -ENOTSUPP;
+fail:
+	/* terminate the thread */
+	if (fsg->common->state != FSG_STATE_TERMINATED) {
+		raise_exception(fsg->common, FSG_STATE_EXIT);
+		wait_for_completion(&fsg->common->thread_notifier);
+	}
+	return i;
 }
 
 /****************************** ALLOCATE FUNCTION *************************/
@@ -3355,7 +3344,7 @@
 		unregister_gadget_item(gadget);
 	}
 
-	fsg_common_remove_lun(lun_opts->lun, fsg_opts->common->sysfs);
+	fsg_common_remove_lun(lun_opts->lun);
 	fsg_opts->common->luns[lun_opts->lun_id] = NULL;
 	lun_opts->lun_id = 0;
 	mutex_unlock(&fsg_opts->lock);
@@ -3509,14 +3498,11 @@
 		rc = PTR_ERR(opts->common);
 		goto release_opts;
 	}
-	rc = fsg_common_set_nluns(opts->common, FSG_MAX_LUNS);
-	if (rc)
-		goto release_opts;
 
 	rc = fsg_common_set_num_buffers(opts->common,
 					CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS);
 	if (rc)
-		goto release_luns;
+		goto release_opts;
 
 	pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
 
@@ -3524,6 +3510,9 @@
 	config.removable = true;
 	rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0",
 			(const char **)&opts->func_inst.group.cg_item.ci_name);
+	if (rc)
+		goto release_buffers;
+
 	opts->lun0.lun = opts->common->luns[0];
 	opts->lun0.lun_id = 0;
 	config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type);
@@ -3534,8 +3523,8 @@
 
 	return &opts->func_inst;
 
-release_luns:
-	kfree(opts->common->luns);
+release_buffers:
+	fsg_common_free_buffers(opts->common);
 release_opts:
 	kfree(opts);
 	return ERR_PTR(rc);
@@ -3561,23 +3550,12 @@
 	struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
 	struct fsg_common *common = opts->common;
 	struct fsg_dev *fsg;
-	unsigned nluns, i;
 
 	fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
 	if (unlikely(!fsg))
 		return ERR_PTR(-ENOMEM);
 
 	mutex_lock(&opts->lock);
-	if (!opts->refcnt) {
-		for (nluns = i = 0; i < FSG_MAX_LUNS; ++i)
-			if (common->luns[i])
-				nluns = i + 1;
-		if (!nluns)
-			pr_warn("No LUNS defined, continuing anyway\n");
-		else
-			common->nluns = nluns;
-		pr_info("Number of LUNs=%u\n", common->nluns);
-	}
 	opts->refcnt++;
 	mutex_unlock(&opts->lock);
 
diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
index b4866fc..445df67 100644
--- a/drivers/usb/gadget/function/f_mass_storage.h
+++ b/drivers/usb/gadget/function/f_mass_storage.h
@@ -137,14 +137,10 @@
 int fsg_common_set_cdev(struct fsg_common *common,
 			struct usb_composite_dev *cdev, bool can_stall);
 
-void fsg_common_remove_lun(struct fsg_lun *lun, bool sysfs);
+void fsg_common_remove_lun(struct fsg_lun *lun);
 
 void fsg_common_remove_luns(struct fsg_common *common);
 
-void fsg_common_free_luns(struct fsg_common *common);
-
-int fsg_common_set_nluns(struct fsg_common *common, int nluns);
-
 void fsg_common_set_ops(struct fsg_common *common,
 			const struct fsg_operations *ops);
 
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index ad50a67..a287a48 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -329,6 +329,10 @@
 	unsigned i;
 	int err;
 
+	/* For Control Device interface we do nothing */
+	if (intf == 0)
+		return 0;
+
 	err = f_midi_start_ep(midi, f, midi->in_ep);
 	if (err)
 		return err;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index bdcda9f..3f05c6bd 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -853,9 +853,8 @@
 			/* Enable zlps by default for NCM conformance;
 			 * override for musb_hdrc (avoids txdma ovhead)
 			 */
-			ncm->port.is_zlp_ok = !(
-				gadget_is_musbhdrc(cdev->gadget)
-				);
+			ncm->port.is_zlp_ok =
+				gadget_is_zlp_supported(cdev->gadget);
 			ncm->port.cdc_filter = DEFAULT_FILTER;
 			DBG(cdev, "activate ncm\n");
 			net = gether_connect(&ncm->port);
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index a1b79c5..5460426 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -20,7 +20,6 @@
 #include <linux/module.h>
 
 #include "u_serial.h"
-#include "gadget_chips.h"
 
 
 /*
@@ -37,7 +36,6 @@
 	u8				data_id;
 	u8				cur_alt;
 	u8				port_num;
-	u8				can_activate;
 };
 
 static inline struct f_obex *func_to_obex(struct usb_function *f)
@@ -268,9 +266,6 @@
 	struct usb_composite_dev *cdev = g->func.config->cdev;
 	int			status;
 
-	if (!obex->can_activate)
-		return;
-
 	status = usb_function_activate(&g->func);
 	if (status)
 		dev_dbg(&cdev->gadget->dev,
@@ -284,9 +279,6 @@
 	struct usb_composite_dev *cdev = g->func.config->cdev;
 	int			status;
 
-	if (!obex->can_activate)
-		return;
-
 	status = usb_function_deactivate(&g->func);
 	if (status)
 		dev_dbg(&cdev->gadget->dev,
@@ -304,7 +296,7 @@
 	 *
 	 * Altsettings are mandatory, however...
 	 */
-	if (!gadget_supports_altsettings(c->cdev->gadget))
+	if (!gadget_is_altset_supported(c->cdev->gadget))
 		return false;
 
 	/* everything else is *probably* fine ... */
@@ -378,17 +370,6 @@
 	if (status)
 		goto fail;
 
-	/* Avoid letting this gadget enumerate until the userspace
-	 * OBEX server is active.
-	 */
-	status = usb_function_deactivate(f);
-	if (status < 0)
-		WARNING(cdev, "obex ttyGS%d: can't prevent enumeration, %d\n",
-			obex->port_num, status);
-	else
-		obex->can_activate = true;
-
-
 	dev_dbg(&cdev->gadget->dev, "obex ttyGS%d: %s speed IN/%s OUT/%s\n",
 		obex->port_num,
 		gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
@@ -529,6 +510,7 @@
 	obex->port.func.get_alt = obex_get_alt;
 	obex->port.func.disable = obex_disable;
 	obex->port.func.free_func = obex_free;
+	obex->port.func.bind_deactivated = true;
 
 	return &obex->port.func;
 }
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 357f63f..8e2b6be 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -804,6 +804,8 @@
 
 static void printer_reset_interface(struct printer_dev *dev)
 {
+	unsigned long	flags;
+
 	if (dev->interface < 0)
 		return;
 
@@ -815,9 +817,11 @@
 	if (dev->out_ep->desc)
 		usb_ep_disable(dev->out_ep);
 
+	spin_lock_irqsave(&dev->lock, flags);
 	dev->in_ep->desc = NULL;
 	dev->out_ep->desc = NULL;
 	dev->interface = -1;
+	spin_unlock_irqrestore(&dev->lock, flags);
 }
 
 /* Change our operational Interface. */
@@ -1131,13 +1135,10 @@
 static void printer_func_disable(struct usb_function *f)
 {
 	struct printer_dev *dev = func_to_printer(f);
-	unsigned long		flags;
 
 	DBG(dev, "%s\n", __func__);
 
-	spin_lock_irqsave(&dev->lock, flags);
 	printer_reset_interface(dev);
-	spin_unlock_irqrestore(&dev->lock, flags);
 }
 
 static inline struct f_printer_opts
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index 2e02dfa..1d162e2 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -16,7 +16,6 @@
 #include <linux/device.h>
 
 #include "u_serial.h"
-#include "gadget_chips.h"
 
 
 /*
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 3a5ae99..cbfaf86 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -20,7 +20,6 @@
 #include <linux/err.h>
 
 #include "g_zero.h"
-#include "gadget_chips.h"
 #include "u_f.h"
 
 /*
@@ -42,11 +41,6 @@
  * queues are relatively independent, will receive a range of packet sizes,
  * and can often be made to run out completely.  Those issues are important
  * when stress testing peripheral controller drivers.
- *
- *
- * This is currently packaged as a configuration driver, which can't be
- * combined with other functions to make composite devices.  However, it
- * can be combined with other independent configurations.
  */
 struct f_sourcesink {
 	struct usb_function	function;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 5318615..f8de7ea 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -975,6 +975,29 @@
 			"%s:%d Error!\n", __func__, __LINE__);
 }
 
+static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
+	struct usb_endpoint_descriptor *ep_desc,
+	unsigned int factor, bool is_playback)
+{
+	int chmask, srate, ssize;
+	u16 max_packet_size;
+
+	if (is_playback) {
+		chmask = uac2_opts->p_chmask;
+		srate = uac2_opts->p_srate;
+		ssize = uac2_opts->p_ssize;
+	} else {
+		chmask = uac2_opts->c_chmask;
+		srate = uac2_opts->c_srate;
+		ssize = uac2_opts->c_ssize;
+	}
+
+	max_packet_size = num_channels(chmask) * ssize *
+		DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
+	ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size,
+				le16_to_cpu(ep_desc->wMaxPacketSize)));
+}
+
 static int
 afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
 {
@@ -1070,10 +1093,14 @@
 	uac2->p_prm.uac2 = uac2;
 	uac2->c_prm.uac2 = uac2;
 
+	/* Calculate wMaxPacketSize according to audio bandwidth */
+	set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
+	set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
+	set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
+	set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
+
 	hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
-	hs_epout_desc.wMaxPacketSize = fs_epout_desc.wMaxPacketSize;
 	hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
-	hs_epin_desc.wMaxPacketSize = fs_epin_desc.wMaxPacketSize;
 
 	ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL);
 	if (ret)
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index cf0df8f..743be34 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -733,12 +733,6 @@
 	uvc->control_req->complete = uvc_function_ep0_complete;
 	uvc->control_req->context = uvc;
 
-	/* Avoid letting this gadget enumerate until the userspace server is
-	 * active.
-	 */
-	if ((ret = usb_function_deactivate(f)) < 0)
-		goto error;
-
 	if (v4l2_device_register(&cdev->gadget->dev, &uvc->v4l2_dev)) {
 		printk(KERN_INFO "v4l2_device_register failed\n");
 		goto error;
@@ -949,6 +943,7 @@
 	uvc->func.disable = uvc_function_disable;
 	uvc->func.setup = uvc_function_setup;
 	uvc->func.free_func = uvc_free;
+	uvc->func.bind_deactivated = true;
 
 	return &uvc->func;
 }
diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h
index 70c8914..c3544e6 100644
--- a/drivers/usb/gadget/function/storage_common.h
+++ b/drivers/usb/gadget/function/storage_common.h
@@ -123,7 +123,7 @@
 #define FSG_BUFLEN	((u32)16384)
 
 /* Maximal number of LUNs supported in mass storage function */
-#define FSG_MAX_LUNS	8
+#define FSG_MAX_LUNS	16
 
 enum fsg_buffer_state {
 	BUF_STATE_EMPTY = 0,
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index 334b389..c77145b 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -20,8 +20,6 @@
 #include <linux/usb/cdc.h>
 #include <linux/netdevice.h>
 
-#include "gadget_chips.h"
-
 #define QMULT_DEFAULT 5
 
 /*
@@ -259,7 +257,7 @@
 /* Some controllers can't support CDC Ethernet (ECM) ... */
 static inline bool can_support_ecm(struct usb_gadget *gadget)
 {
-	if (!gadget_supports_altsettings(gadget))
+	if (!gadget_is_altset_supported(gadget))
 		return false;
 
 	/* Everything else is *presumably* fine ... but this is a bit
diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h
index fe386df..5c2ac8e 100644
--- a/drivers/usb/gadget/function/u_uac1.h
+++ b/drivers/usb/gadget/function/u_uac1.h
@@ -21,8 +21,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 
-#include "gadget_chips.h"
-
 #define FILE_PCM_PLAYBACK	"/dev/snd/pcmC0D0p"
 #define FILE_PCM_CAPTURE	"/dev/snd/pcmC0D0c"
 #define FILE_CONTROL		"/dev/snd/controlC0"
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index d5a7102..4d682ad 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -339,6 +339,7 @@
 config USB_G_NOKIA
 	tristate "Nokia composite gadget"
 	depends on PHONET
+	depends on BLOCK
 	select USB_LIBCOMPOSITE
 	select USB_U_SERIAL
 	select USB_U_ETHER
@@ -346,6 +347,7 @@
 	select USB_F_OBEX
 	select USB_F_PHONET
 	select USB_F_ECM
+	select USB_F_MASS_STORAGE
 	help
 	  The Nokia composite gadget provides support for acm, obex
 	  and phonet in only one composite gadget driver.
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index 1194b09..4b158e2 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -58,21 +58,7 @@
 	/*.bNumConfigurations =	DYNAMIC*/
 };
 
-static struct usb_otg_descriptor otg_descriptor = {
-	.bLength =		sizeof otg_descriptor,
-	.bDescriptorType =	USB_DT_OTG,
-
-	/*
-	 * REVISIT SRP-only hardware is possible, although
-	 * it would not be called "OTG" ...
-	 */
-	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
-	(struct usb_descriptor_header *) &otg_descriptor,
-	NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
 
 /* string IDs are assigned dynamically */
 static struct usb_string strings_dev[] = {
@@ -200,10 +186,6 @@
 	if (status)
 		goto fail;
 
-	status = fsg_common_set_nluns(opts->common, config.nluns);
-	if (status)
-		goto fail_set_nluns;
-
 	status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
 	if (status)
 		goto fail_set_cdev;
@@ -225,10 +207,21 @@
 	device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
 	device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
 
+	if (gadget_is_otg(gadget) && !otg_desc[0]) {
+		struct usb_descriptor_header *usb_desc;
+
+		usb_desc = usb_otg_descriptor_alloc(gadget);
+		if (!usb_desc)
+			goto fail_string_ids;
+		usb_otg_descriptor_init(gadget, usb_desc);
+		otg_desc[0] = usb_desc;
+		otg_desc[1] = NULL;
+	}
+
 	/* register our configuration */
 	status = usb_add_config(cdev, &acm_ms_config_driver, acm_ms_do_config);
 	if (status < 0)
-		goto fail_string_ids;
+		goto fail_otg_desc;
 
 	usb_composite_overwrite_options(cdev, &coverwrite);
 	dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
@@ -236,11 +229,12 @@
 	return 0;
 
 	/* error recovery */
+fail_otg_desc:
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
 fail_string_ids:
 	fsg_common_remove_luns(opts->common);
 fail_set_cdev:
-	fsg_common_free_luns(opts->common);
-fail_set_nluns:
 	fsg_common_free_buffers(opts->common);
 fail:
 	usb_put_function_instance(fi_msg);
@@ -255,6 +249,9 @@
 	usb_put_function_instance(fi_msg);
 	usb_put_function(f_acm);
 	usb_put_function_instance(f_acm_inst);
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/legacy/audio.c b/drivers/usb/gadget/legacy/audio.c
index f289caf..685cf3b 100644
--- a/drivers/usb/gadget/legacy/audio.c
+++ b/drivers/usb/gadget/legacy/audio.c
@@ -15,7 +15,6 @@
 #include <linux/module.h>
 #include <linux/usb/composite.h>
 
-#include "gadget_chips.h"
 #define DRIVER_DESC		"Linux USB Audio Gadget"
 #define DRIVER_VERSION		"Feb 2, 2012"
 
@@ -124,7 +123,7 @@
 	.bLength =		sizeof device_desc,
 	.bDescriptorType =	USB_DT_DEVICE,
 
-	.bcdUSB =		__constant_cpu_to_le16(0x200),
+	.bcdUSB =		cpu_to_le16(0x200),
 
 #ifdef CONFIG_GADGET_UAC1
 	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
@@ -141,8 +140,8 @@
 	 * we support.  (As does bNumConfigurations.)  These values can
 	 * also be overridden by module parameters.
 	 */
-	.idVendor =		__constant_cpu_to_le16(AUDIO_VENDOR_NUM),
-	.idProduct =		__constant_cpu_to_le16(AUDIO_PRODUCT_NUM),
+	.idVendor =		cpu_to_le16(AUDIO_VENDOR_NUM),
+	.idProduct =		cpu_to_le16(AUDIO_PRODUCT_NUM),
 	/* .bcdDevice = f(hardware) */
 	/* .iManufacturer = DYNAMIC */
 	/* .iProduct = DYNAMIC */
@@ -150,20 +149,7 @@
 	.bNumConfigurations =	1,
 };
 
-static struct usb_otg_descriptor otg_descriptor = {
-	.bLength =		sizeof otg_descriptor,
-	.bDescriptorType =	USB_DT_OTG,
-
-	/* REVISIT SRP-only hardware is possible, although
-	 * it would not be called "OTG" ...
-	 */
-	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
-	(struct usb_descriptor_header *) &otg_descriptor,
-	NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
 
 /*-------------------------------------------------------------------------*/
 
@@ -259,14 +245,28 @@
 	device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
 	device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
 
+	if (gadget_is_otg(cdev->gadget) && !otg_desc[0]) {
+		struct usb_descriptor_header *usb_desc;
+
+		usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
+		if (!usb_desc)
+			goto fail;
+		usb_otg_descriptor_init(cdev->gadget, usb_desc);
+		otg_desc[0] = usb_desc;
+		otg_desc[1] = NULL;
+	}
+
 	status = usb_add_config(cdev, &audio_config_driver, audio_do_config);
 	if (status < 0)
-		goto fail;
+		goto fail_otg_desc;
 	usb_composite_overwrite_options(cdev, &coverwrite);
 
 	INFO(cdev, "%s, version: %s\n", DRIVER_DESC, DRIVER_VERSION);
 	return 0;
 
+fail_otg_desc:
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
 fail:
 #ifndef CONFIG_GADGET_UAC1
 	usb_put_function_instance(fi_uac2);
@@ -289,6 +289,9 @@
 	if (!IS_ERR_OR_NULL(fi_uac2))
 		usb_put_function_instance(fi_uac2);
 #endif
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/legacy/cdc2.c b/drivers/usb/gadget/legacy/cdc2.c
index afd3e37..ecd8c8d 100644
--- a/drivers/usb/gadget/legacy/cdc2.c
+++ b/drivers/usb/gadget/legacy/cdc2.c
@@ -60,21 +60,7 @@
 	.bNumConfigurations =	1,
 };
 
-static struct usb_otg_descriptor otg_descriptor = {
-	.bLength =		sizeof otg_descriptor,
-	.bDescriptorType =	USB_DT_OTG,
-
-	/* REVISIT SRP-only hardware is possible, although
-	 * it would not be called "OTG" ...
-	 */
-	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
-	(struct usb_descriptor_header *) &otg_descriptor,
-	NULL,
-};
-
+static const struct usb_descriptor_header *otg_desc[2];
 
 /* string IDs are assigned dynamically */
 static struct usb_string strings_dev[] = {
@@ -193,10 +179,21 @@
 	device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
 	device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
 
+	if (gadget_is_otg(gadget) && !otg_desc[0]) {
+		struct usb_descriptor_header *usb_desc;
+
+		usb_desc = usb_otg_descriptor_alloc(gadget);
+		if (!usb_desc)
+			goto fail1;
+		usb_otg_descriptor_init(gadget, usb_desc);
+		otg_desc[0] = usb_desc;
+		otg_desc[1] = NULL;
+	}
+
 	/* register our configuration */
 	status = usb_add_config(cdev, &cdc_config_driver, cdc_do_config);
 	if (status < 0)
-		goto fail1;
+		goto fail2;
 
 	usb_composite_overwrite_options(cdev, &coverwrite);
 	dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
@@ -204,6 +201,9 @@
 
 	return 0;
 
+fail2:
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
 fail1:
 	usb_put_function_instance(fi_serial);
 fail:
@@ -219,6 +219,9 @@
 		usb_put_function(f_ecm);
 	if (!IS_ERR_OR_NULL(fi_ecm))
 		usb_put_function_instance(fi_ecm);
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
index 204b10b..5231a32 100644
--- a/drivers/usb/gadget/legacy/dbgp.c
+++ b/drivers/usb/gadget/legacy/dbgp.c
@@ -35,10 +35,10 @@
 static struct usb_device_descriptor device_desc = {
 	.bLength = sizeof device_desc,
 	.bDescriptorType = USB_DT_DEVICE,
-	.bcdUSB = __constant_cpu_to_le16(0x0200),
+	.bcdUSB = cpu_to_le16(0x0200),
 	.bDeviceClass =	USB_CLASS_VENDOR_SPEC,
-	.idVendor = __constant_cpu_to_le16(DRIVER_VENDOR_ID),
-	.idProduct = __constant_cpu_to_le16(DRIVER_PRODUCT_ID),
+	.idVendor = cpu_to_le16(DRIVER_VENDOR_ID),
+	.idProduct = cpu_to_le16(DRIVER_PRODUCT_ID),
 	.bNumConfigurations = 1,
 };
 
@@ -251,7 +251,7 @@
 
 	dbgp.i_ep->driver_data = gadget;
 	i_desc.wMaxPacketSize =
-		__constant_cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
+		cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
 
 	dbgp.o_ep = usb_ep_autoconfig(gadget, &o_desc);
 	if (!dbgp.o_ep) {
@@ -262,7 +262,7 @@
 
 	dbgp.o_ep->driver_data = gadget;
 	o_desc.wMaxPacketSize =
-		__constant_cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
+		cpu_to_le16(USB_DEBUG_MAX_PACKET_SIZE);
 
 	dbg_desc.bDebugInEndpoint = i_desc.bEndpointAddress;
 	dbg_desc.bDebugOutEndpoint = o_desc.bEndpointAddress;
diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
index a3323dc..31e9160 100644
--- a/drivers/usb/gadget/legacy/ether.c
+++ b/drivers/usb/gadget/legacy/ether.c
@@ -171,20 +171,7 @@
 	.bNumConfigurations =	1,
 };
 
-static struct usb_otg_descriptor otg_descriptor = {
-	.bLength =		sizeof otg_descriptor,
-	.bDescriptorType =	USB_DT_OTG,
-
-	/* REVISIT SRP-only hardware is possible, although
-	 * it would not be called "OTG" ...
-	 */
-	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
-	(struct usb_descriptor_header *) &otg_descriptor,
-	NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
 
 static struct usb_string strings_dev[] = {
 	[USB_GADGET_MANUFACTURER_IDX].s = "",
@@ -416,17 +403,28 @@
 	device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
 	device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
 
+	if (gadget_is_otg(gadget) && !otg_desc[0]) {
+		struct usb_descriptor_header *usb_desc;
+
+		usb_desc = usb_otg_descriptor_alloc(gadget);
+		if (!usb_desc)
+			goto fail1;
+		usb_otg_descriptor_init(gadget, usb_desc);
+		otg_desc[0] = usb_desc;
+		otg_desc[1] = NULL;
+	}
+
 	/* register our configuration(s); RNDIS first, if it's used */
 	if (has_rndis()) {
 		status = usb_add_config(cdev, &rndis_config_driver,
 				rndis_do_config);
 		if (status < 0)
-			goto fail1;
+			goto fail2;
 	}
 
 	status = usb_add_config(cdev, &eth_config_driver, eth_do_config);
 	if (status < 0)
-		goto fail1;
+		goto fail2;
 
 	usb_composite_overwrite_options(cdev, &coverwrite);
 	dev_info(&gadget->dev, "%s, version: " DRIVER_VERSION "\n",
@@ -434,6 +432,9 @@
 
 	return 0;
 
+fail2:
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
 fail1:
 	if (has_rndis())
 		usb_put_function_instance(fi_rndis);
@@ -463,6 +464,9 @@
 		usb_put_function(f_geth);
 		usb_put_function_instance(fi_geth);
 	}
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/legacy/g_ffs.c b/drivers/usb/gadget/legacy/g_ffs.c
index e821931..320a81b 100644
--- a/drivers/usb/gadget/legacy/g_ffs.c
+++ b/drivers/usb/gadget/legacy/g_ffs.c
@@ -88,21 +88,7 @@
 module_param_array_named(functions, func_names, charp, &func_num, 0);
 MODULE_PARM_DESC(functions, "USB Functions list");
 
-static const struct usb_descriptor_header *gfs_otg_desc[] = {
-	(const struct usb_descriptor_header *)
-	&(const struct usb_otg_descriptor) {
-		.bLength		= sizeof(struct usb_otg_descriptor),
-		.bDescriptorType	= USB_DT_OTG,
-
-		/*
-		 * REVISIT SRP-only hardware is possible, although
-		 * it would not be called "OTG" ...
-		 */
-		.bmAttributes		= USB_OTG_SRP | USB_OTG_HNP,
-	},
-
-	NULL
-};
+static const struct usb_descriptor_header *gfs_otg_desc[2];
 
 /* String IDs are assigned dynamically */
 static struct usb_string gfs_strings[] = {
@@ -412,6 +398,17 @@
 		goto error_rndis;
 	gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id;
 
+	if (gadget_is_otg(cdev->gadget) && !gfs_otg_desc[0]) {
+		struct usb_descriptor_header *usb_desc;
+
+		usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
+		if (!usb_desc)
+			goto error_rndis;
+		usb_otg_descriptor_init(cdev->gadget, usb_desc);
+		gfs_otg_desc[0] = usb_desc;
+		gfs_otg_desc[1] = NULL;
+	}
+
 	for (i = 0; i < ARRAY_SIZE(gfs_configurations); ++i) {
 		struct gfs_configuration *c = gfs_configurations + i;
 		int sid = USB_GADGET_FIRST_AVAIL_IDX + i;
@@ -432,6 +429,8 @@
 
 /* TODO */
 error_unbind:
+	kfree(gfs_otg_desc[0]);
+	gfs_otg_desc[0] = NULL;
 error_rndis:
 #ifdef CONFIG_USB_FUNCTIONFS_RNDIS
 	usb_put_function_instance(fi_rndis);
@@ -473,6 +472,9 @@
 	for (i = 0; i < N_CONF * func_num; ++i)
 		usb_put_function(*(f_ffs[0] + i));
 
+	kfree(gfs_otg_desc[0]);
+	gfs_otg_desc[0] = NULL;
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/legacy/gmidi.c b/drivers/usb/gadget/legacy/gmidi.c
index da19c48..8a18348 100644
--- a/drivers/usb/gadget/legacy/gmidi.c
+++ b/drivers/usb/gadget/legacy/gmidi.c
@@ -35,8 +35,6 @@
 #include <linux/usb/audio.h>
 #include <linux/usb/midi.h>
 
-#include "gadget_chips.h"
-
 #include "u_midi.h"
 
 /*-------------------------------------------------------------------------*/
@@ -88,10 +86,10 @@
 static struct usb_device_descriptor device_desc = {
 	.bLength =		USB_DT_DEVICE_SIZE,
 	.bDescriptorType =	USB_DT_DEVICE,
-	.bcdUSB =		__constant_cpu_to_le16(0x0200),
+	.bcdUSB =		cpu_to_le16(0x0200),
 	.bDeviceClass =		USB_CLASS_PER_INTERFACE,
-	.idVendor =		__constant_cpu_to_le16(DRIVER_VENDOR_NUM),
-	.idProduct =		__constant_cpu_to_le16(DRIVER_PRODUCT_NUM),
+	.idVendor =		cpu_to_le16(DRIVER_VENDOR_NUM),
+	.idProduct =		cpu_to_le16(DRIVER_PRODUCT_NUM),
 	/* .iManufacturer =	DYNAMIC */
 	/* .iProduct =		DYNAMIC */
 	.bNumConfigurations =	1,
diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c
index 2baa572..7e5d2c4 100644
--- a/drivers/usb/gadget/legacy/hid.c
+++ b/drivers/usb/gadget/legacy/hid.c
@@ -19,7 +19,6 @@
 #include <linux/usb/composite.h>
 #include <linux/usb/g_hid.h>
 
-#include "gadget_chips.h"
 #define DRIVER_DESC		"HID Gadget"
 #define DRIVER_VERSION		"2010/03/16"
 
@@ -68,21 +67,7 @@
 	.bNumConfigurations =	1,
 };
 
-static struct usb_otg_descriptor otg_descriptor = {
-	.bLength =		sizeof otg_descriptor,
-	.bDescriptorType =	USB_DT_OTG,
-
-	/* REVISIT SRP-only hardware is possible, although
-	 * it would not be called "OTG" ...
-	 */
-	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
-	(struct usb_descriptor_header *) &otg_descriptor,
-	NULL,
-};
-
+static const struct usb_descriptor_header *otg_desc[2];
 
 /* string IDs are assigned dynamically */
 static struct usb_string strings_dev[] = {
@@ -186,16 +171,30 @@
 	device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
 	device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
 
+	if (gadget_is_otg(gadget) && !otg_desc[0]) {
+		struct usb_descriptor_header *usb_desc;
+
+		usb_desc = usb_otg_descriptor_alloc(gadget);
+		if (!usb_desc)
+			goto put;
+		usb_otg_descriptor_init(gadget, usb_desc);
+		otg_desc[0] = usb_desc;
+		otg_desc[1] = NULL;
+	}
+
 	/* register our configuration */
 	status = usb_add_config(cdev, &config_driver, do_config);
 	if (status < 0)
-		goto put;
+		goto free_otg_desc;
 
 	usb_composite_overwrite_options(cdev, &coverwrite);
 	dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
 
 	return 0;
 
+free_otg_desc:
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
 put:
 	list_for_each_entry(m, &hidg_func_list, node) {
 		if (m == n)
@@ -213,6 +212,10 @@
 		usb_put_function(n->f);
 		usb_put_function_instance(n->fi);
 	}
+
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index e7bfb08..bda3c51 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -64,21 +64,7 @@
 	.bNumConfigurations =	1,
 };
 
-static struct usb_otg_descriptor otg_descriptor = {
-	.bLength =		sizeof otg_descriptor,
-	.bDescriptorType =	USB_DT_OTG,
-
-	/*
-	 * REVISIT SRP-only hardware is possible, although
-	 * it would not be called "OTG" ...
-	 */
-	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
-	(struct usb_descriptor_header *) &otg_descriptor,
-	NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
 
 static struct usb_string strings_dev[] = {
 	[USB_GADGET_MANUFACTURER_IDX].s = "",
@@ -191,10 +177,6 @@
 	if (status)
 		goto fail;
 
-	status = fsg_common_set_nluns(opts->common, config.nluns);
-	if (status)
-		goto fail_set_nluns;
-
 	fsg_common_set_ops(opts->common, &ops);
 
 	status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
@@ -214,9 +196,20 @@
 		goto fail_string_ids;
 	msg_device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
 
+	if (gadget_is_otg(cdev->gadget) && !otg_desc[0]) {
+		struct usb_descriptor_header *usb_desc;
+
+		usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
+		if (!usb_desc)
+			goto fail_string_ids;
+		usb_otg_descriptor_init(cdev->gadget, usb_desc);
+		otg_desc[0] = usb_desc;
+		otg_desc[1] = NULL;
+	}
+
 	status = usb_add_config(cdev, &msg_config_driver, msg_do_config);
 	if (status < 0)
-		goto fail_string_ids;
+		goto fail_otg_desc;
 
 	usb_composite_overwrite_options(cdev, &coverwrite);
 	dev_info(&cdev->gadget->dev,
@@ -224,11 +217,12 @@
 	set_bit(0, &msg_registered);
 	return 0;
 
+fail_otg_desc:
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
 fail_string_ids:
 	fsg_common_remove_luns(opts->common);
 fail_set_cdev:
-	fsg_common_free_luns(opts->common);
-fail_set_nluns:
 	fsg_common_free_buffers(opts->common);
 fail:
 	usb_put_function_instance(fi_msg);
@@ -243,6 +237,9 @@
 	if (!IS_ERR(fi_msg))
 		usb_put_function_instance(fi_msg);
 
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
index b21b51f..4fe794d 100644
--- a/drivers/usb/gadget/legacy/multi.c
+++ b/drivers/usb/gadget/legacy/multi.c
@@ -78,21 +78,7 @@
 	.idProduct =		cpu_to_le16(MULTI_PRODUCT_NUM),
 };
 
-
-static const struct usb_descriptor_header *otg_desc[] = {
-	(struct usb_descriptor_header *) &(struct usb_otg_descriptor){
-		.bLength =		sizeof(struct usb_otg_descriptor),
-		.bDescriptorType =	USB_DT_OTG,
-
-		/*
-		 * REVISIT SRP-only hardware is possible, although
-		 * it would not be called "OTG" ...
-		 */
-		.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
-	},
-	NULL,
-};
-
+static const struct usb_descriptor_header *otg_desc[2];
 
 enum {
 	MULTI_STRING_RNDIS_CONFIG_IDX = USB_GADGET_FIRST_AVAIL_IDX,
@@ -407,10 +393,6 @@
 	if (status)
 		goto fail2;
 
-	status = fsg_common_set_nluns(fsg_opts->common, config.nluns);
-	if (status)
-		goto fail_set_nluns;
-
 	status = fsg_common_set_cdev(fsg_opts->common, cdev, config.can_stall);
 	if (status)
 		goto fail_set_cdev;
@@ -429,14 +411,25 @@
 		goto fail_string_ids;
 	device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
 
+	if (gadget_is_otg(gadget) && !otg_desc[0]) {
+		struct usb_descriptor_header *usb_desc;
+
+		usb_desc = usb_otg_descriptor_alloc(gadget);
+		if (!usb_desc)
+			goto fail_string_ids;
+		usb_otg_descriptor_init(gadget, usb_desc);
+		otg_desc[0] = usb_desc;
+		otg_desc[1] = NULL;
+	}
+
 	/* register configurations */
 	status = rndis_config_register(cdev);
 	if (unlikely(status < 0))
-		goto fail_string_ids;
+		goto fail_otg_desc;
 
 	status = cdc_config_register(cdev);
 	if (unlikely(status < 0))
-		goto fail_string_ids;
+		goto fail_otg_desc;
 	usb_composite_overwrite_options(cdev, &coverwrite);
 
 	/* we're done */
@@ -445,11 +438,12 @@
 
 
 	/* error recovery */
+fail_otg_desc:
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
 fail_string_ids:
 	fsg_common_remove_luns(fsg_opts->common);
 fail_set_cdev:
-	fsg_common_free_luns(fsg_opts->common);
-fail_set_nluns:
 	fsg_common_free_buffers(fsg_opts->common);
 fail2:
 	usb_put_function_instance(fi_msg);
@@ -490,6 +484,9 @@
 	usb_put_function(f_ecm);
 	usb_put_function_instance(fi_ecm);
 #endif
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/legacy/ncm.c b/drivers/usb/gadget/legacy/ncm.c
index 6ce7421..2bae438 100644
--- a/drivers/usb/gadget/legacy/ncm.c
+++ b/drivers/usb/gadget/legacy/ncm.c
@@ -69,20 +69,7 @@
 	.bNumConfigurations =	1,
 };
 
-static struct usb_otg_descriptor otg_descriptor = {
-	.bLength =		sizeof otg_descriptor,
-	.bDescriptorType =	USB_DT_OTG,
-
-	/* REVISIT SRP-only hardware is possible, although
-	 * it would not be called "OTG" ...
-	 */
-	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
-	(struct usb_descriptor_header *) &otg_descriptor,
-	NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
 
 /* string IDs are assigned dynamically */
 static struct usb_string strings_dev[] = {
@@ -171,16 +158,30 @@
 	device_desc.iManufacturer = strings_dev[USB_GADGET_MANUFACTURER_IDX].id;
 	device_desc.iProduct = strings_dev[USB_GADGET_PRODUCT_IDX].id;
 
+	if (gadget_is_otg(gadget) && !otg_desc[0]) {
+		struct usb_descriptor_header *usb_desc;
+
+		usb_desc = usb_otg_descriptor_alloc(gadget);
+		if (!usb_desc)
+			goto fail;
+		usb_otg_descriptor_init(gadget, usb_desc);
+		otg_desc[0] = usb_desc;
+		otg_desc[1] = NULL;
+	}
+
 	status = usb_add_config(cdev, &ncm_config_driver,
 				ncm_do_config);
 	if (status < 0)
-		goto fail;
+		goto fail1;
 
 	usb_composite_overwrite_options(cdev, &coverwrite);
 	dev_info(&gadget->dev, "%s\n", DRIVER_DESC);
 
 	return 0;
 
+fail1:
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
 fail:
 	usb_put_function_instance(f_ncm_inst);
 	return status;
@@ -192,6 +193,9 @@
 		usb_put_function(f_ncm);
 	if (!IS_ERR_OR_NULL(f_ncm_inst))
 		usb_put_function_instance(f_ncm_inst);
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/legacy/nokia.c b/drivers/usb/gadget/legacy/nokia.c
index 4bb498a..8b3f6fb 100644
--- a/drivers/usb/gadget/legacy/nokia.c
+++ b/drivers/usb/gadget/legacy/nokia.c
@@ -23,7 +23,7 @@
 #include "u_ether.h"
 #include "u_phonet.h"
 #include "u_ecm.h"
-#include "gadget_chips.h"
+#include "f_mass_storage.h"
 
 /* Defines */
 
@@ -34,6 +34,29 @@
 
 USB_ETHERNET_MODULE_PARAMETERS();
 
+static struct fsg_module_parameters fsg_mod_data = {
+	.stall = 0,
+	.luns = 2,
+	.removable_count = 2,
+	.removable = { 1, 1, },
+};
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
+
+#else
+
+/*
+ * Number of buffers we will use.
+ * 2 is usually enough for good buffering pipeline
+ */
+#define fsg_num_buffers	CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
+
+#endif /* CONFIG_USB_DEBUG */
+
+FSG_MODULE_PARAMETERS(/* no prefix */, fsg_mod_data);
+
 #define NOKIA_VENDOR_ID			0x0421	/* Nokia */
 #define NOKIA_PRODUCT_ID		0x01c8	/* Nokia Gadget */
 
@@ -66,10 +89,10 @@
 static struct usb_device_descriptor device_desc = {
 	.bLength		= USB_DT_DEVICE_SIZE,
 	.bDescriptorType	= USB_DT_DEVICE,
-	.bcdUSB			= __constant_cpu_to_le16(0x0200),
+	.bcdUSB			= cpu_to_le16(0x0200),
 	.bDeviceClass		= USB_CLASS_COMM,
-	.idVendor		= __constant_cpu_to_le16(NOKIA_VENDOR_ID),
-	.idProduct		= __constant_cpu_to_le16(NOKIA_PRODUCT_ID),
+	.idVendor		= cpu_to_le16(NOKIA_VENDOR_ID),
+	.idProduct		= cpu_to_le16(NOKIA_PRODUCT_ID),
 	.bcdDevice		= cpu_to_le16(NOKIA_VERSION_NUM),
 	/* .iManufacturer = DYNAMIC */
 	/* .iProduct = DYNAMIC */
@@ -94,6 +117,8 @@
 static struct usb_function *f_obex2_cfg2;
 static struct usb_function *f_phonet_cfg1;
 static struct usb_function *f_phonet_cfg2;
+static struct usb_function *f_msg_cfg1;
+static struct usb_function *f_msg_cfg2;
 
 
 static struct usb_configuration nokia_config_500ma_driver = {
@@ -117,6 +142,7 @@
 static struct usb_function_instance *fi_obex1;
 static struct usb_function_instance *fi_obex2;
 static struct usb_function_instance *fi_phonet;
+static struct usb_function_instance *fi_msg;
 
 static int nokia_bind_config(struct usb_configuration *c)
 {
@@ -125,6 +151,8 @@
 	struct usb_function *f_obex1 = NULL;
 	struct usb_function *f_ecm;
 	struct usb_function *f_obex2 = NULL;
+	struct usb_function *f_msg;
+	struct fsg_opts *fsg_opts;
 	int status = 0;
 	int obex1_stat = -1;
 	int obex2_stat = -1;
@@ -160,6 +188,12 @@
 		goto err_get_ecm;
 	}
 
+	f_msg = usb_get_function(fi_msg);
+	if (IS_ERR(f_msg)) {
+		status = PTR_ERR(f_msg);
+		goto err_get_msg;
+	}
+
 	if (!IS_ERR_OR_NULL(f_phonet)) {
 		phonet_stat = usb_add_function(c, f_phonet);
 		if (phonet_stat)
@@ -187,21 +221,36 @@
 		pr_debug("could not bind ecm config %d\n", status);
 		goto err_ecm;
 	}
+
+	fsg_opts = fsg_opts_from_func_inst(fi_msg);
+
+	status = fsg_common_run_thread(fsg_opts->common);
+	if (status)
+		goto err_msg;
+
+	status = usb_add_function(c, f_msg);
+	if (status)
+		goto err_msg;
+
 	if (c == &nokia_config_500ma_driver) {
 		f_acm_cfg1 = f_acm;
 		f_ecm_cfg1 = f_ecm;
 		f_phonet_cfg1 = f_phonet;
 		f_obex1_cfg1 = f_obex1;
 		f_obex2_cfg1 = f_obex2;
+		f_msg_cfg1 = f_msg;
 	} else {
 		f_acm_cfg2 = f_acm;
 		f_ecm_cfg2 = f_ecm;
 		f_phonet_cfg2 = f_phonet;
 		f_obex1_cfg2 = f_obex1;
 		f_obex2_cfg2 = f_obex2;
+		f_msg_cfg2 = f_msg;
 	}
 
 	return status;
+err_msg:
+	usb_remove_function(c, f_ecm);
 err_ecm:
 	usb_remove_function(c, f_acm);
 err_conf:
@@ -211,6 +260,8 @@
 		usb_remove_function(c, f_obex1);
 	if (!phonet_stat)
 		usb_remove_function(c, f_phonet);
+	usb_put_function(f_msg);
+err_get_msg:
 	usb_put_function(f_ecm);
 err_get_ecm:
 	usb_put_function(f_acm);
@@ -227,6 +278,8 @@
 static int nokia_bind(struct usb_composite_dev *cdev)
 {
 	struct usb_gadget	*gadget = cdev->gadget;
+	struct fsg_opts		*fsg_opts;
+	struct fsg_config	fsg_config;
 	int			status;
 
 	status = usb_string_ids_tab(cdev, strings_dev);
@@ -238,7 +291,7 @@
 	nokia_config_500ma_driver.iConfiguration = status;
 	nokia_config_100ma_driver.iConfiguration = status;
 
-	if (!gadget_supports_altsettings(gadget)) {
+	if (!gadget_is_altset_supported(gadget)) {
 		status = -ENODEV;
 		goto err_usb;
 	}
@@ -267,11 +320,42 @@
 		goto err_acm_inst;
 	}
 
+	fi_msg = usb_get_function_instance("mass_storage");
+	if (IS_ERR(fi_msg)) {
+		status = PTR_ERR(fi_msg);
+		goto err_ecm_inst;
+	}
+
+	/* set up mass storage function */
+	fsg_config_from_params(&fsg_config, &fsg_mod_data, fsg_num_buffers);
+	fsg_config.vendor_name = "Nokia";
+	fsg_config.product_name = "N900";
+
+	fsg_opts = fsg_opts_from_func_inst(fi_msg);
+	fsg_opts->no_configfs = true;
+
+	status = fsg_common_set_num_buffers(fsg_opts->common, fsg_num_buffers);
+	if (status)
+		goto err_msg_inst;
+
+	status = fsg_common_set_cdev(fsg_opts->common, cdev, fsg_config.can_stall);
+	if (status)
+		goto err_msg_buf;
+
+	fsg_common_set_sysfs(fsg_opts->common, true);
+
+	status = fsg_common_create_luns(fsg_opts->common, &fsg_config);
+	if (status)
+		goto err_msg_buf;
+
+	fsg_common_set_inquiry_string(fsg_opts->common, fsg_config.vendor_name,
+				      fsg_config.product_name);
+
 	/* finally register the configuration */
 	status = usb_add_config(cdev, &nokia_config_500ma_driver,
 			nokia_bind_config);
 	if (status < 0)
-		goto err_ecm_inst;
+		goto err_msg_luns;
 
 	status = usb_add_config(cdev, &nokia_config_100ma_driver,
 			nokia_bind_config);
@@ -292,6 +376,12 @@
 	if (!IS_ERR_OR_NULL(f_phonet_cfg1))
 		usb_put_function(f_phonet_cfg1);
 	usb_put_function(f_ecm_cfg1);
+err_msg_luns:
+	fsg_common_remove_luns(fsg_opts->common);
+err_msg_buf:
+	fsg_common_free_buffers(fsg_opts->common);
+err_msg_inst:
+	usb_put_function_instance(fi_msg);
 err_ecm_inst:
 	usb_put_function_instance(fi_ecm);
 err_acm_inst:
@@ -325,7 +415,10 @@
 	usb_put_function(f_acm_cfg2);
 	usb_put_function(f_ecm_cfg1);
 	usb_put_function(f_ecm_cfg2);
+	usb_put_function(f_msg_cfg1);
+	usb_put_function(f_msg_cfg2);
 
+	usb_put_function_instance(fi_msg);
 	usb_put_function_instance(fi_ecm);
 	if (!IS_ERR(fi_obex2))
 		usb_put_function_instance(fi_obex2);
diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
index 1ce7df1..a22d30a 100644
--- a/drivers/usb/gadget/legacy/printer.c
+++ b/drivers/usb/gadget/legacy/printer.c
@@ -19,8 +19,6 @@
 #include <linux/usb/gadget.h>
 #include <linux/usb/g_printer.h>
 
-#include "gadget_chips.h"
-
 USB_GADGET_COMPOSITE_OPTIONS();
 
 #define DRIVER_DESC		"Printer Gadget"
@@ -82,16 +80,7 @@
 	.bNumConfigurations =	1
 };
 
-static struct usb_otg_descriptor otg_descriptor = {
-	.bLength =              sizeof otg_descriptor,
-	.bDescriptorType =      USB_DT_OTG,
-	.bmAttributes =         USB_OTG_SRP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
-	(struct usb_descriptor_header *) &otg_descriptor,
-	NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
 
 /*-------------------------------------------------------------------------*/
 
@@ -136,7 +125,6 @@
 	usb_gadget_set_selfpowered(gadget);
 
 	if (gadget_is_otg(gadget)) {
-		otg_descriptor.bmAttributes |= USB_OTG_HNP;
 		printer_cfg_driver.descriptors = otg_desc;
 		printer_cfg_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
 	}
@@ -174,21 +162,39 @@
 	opts->q_len = QLEN;
 
 	ret = usb_string_ids_tab(cdev, strings);
-	if (ret < 0) {
-		usb_put_function_instance(fi_printer);
-		return ret;
-	}
+	if (ret < 0)
+		goto fail_put_func_inst;
+
 	device_desc.iManufacturer = strings[USB_GADGET_MANUFACTURER_IDX].id;
 	device_desc.iProduct = strings[USB_GADGET_PRODUCT_IDX].id;
 	device_desc.iSerialNumber = strings[USB_GADGET_SERIAL_IDX].id;
 
-	ret = usb_add_config(cdev, &printer_cfg_driver, printer_do_config);
-	if (ret) {
-		usb_put_function_instance(fi_printer);
-		return ret;
+	if (gadget_is_otg(cdev->gadget) && !otg_desc[0]) {
+		struct usb_descriptor_header *usb_desc;
+
+		usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
+		if (!usb_desc) {
+			ret = -ENOMEM;
+			goto fail_put_func_inst;
+		}
+		usb_otg_descriptor_init(cdev->gadget, usb_desc);
+		otg_desc[0] = usb_desc;
+		otg_desc[1] = NULL;
 	}
+
+	ret = usb_add_config(cdev, &printer_cfg_driver, printer_do_config);
+	if (ret)
+		goto fail_free_otg_desc;
+
 	usb_composite_overwrite_options(cdev, &coverwrite);
 	return ret;
+
+fail_free_otg_desc:
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
+fail_put_func_inst:
+	usb_put_function_instance(fi_printer);
+	return ret;
 }
 
 static int printer_unbind(struct usb_composite_dev *cdev)
@@ -196,6 +202,9 @@
 	usb_put_function(f_printer);
 	usb_put_function_instance(fi_printer);
 
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c
index 8b7528f..c5d42e0 100644
--- a/drivers/usb/gadget/legacy/serial.c
+++ b/drivers/usb/gadget/legacy/serial.c
@@ -17,7 +17,6 @@
 #include <linux/tty_flip.h>
 
 #include "u_serial.h"
-#include "gadget_chips.h"
 
 
 /* Defines */
@@ -79,20 +78,7 @@
 	.bNumConfigurations =	1,
 };
 
-static struct usb_otg_descriptor otg_descriptor = {
-	.bLength =		sizeof otg_descriptor,
-	.bDescriptorType =	USB_DT_OTG,
-
-	/* REVISIT SRP-only hardware is possible, although
-	 * it would not be called "OTG" ...
-	 */
-	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
-	(struct usb_descriptor_header *) &otg_descriptor,
-	NULL,
-};
+static const struct usb_descriptor_header *otg_desc[2];
 
 /*-------------------------------------------------------------------------*/
 
@@ -191,6 +177,18 @@
 	serial_config_driver.iConfiguration = status;
 
 	if (gadget_is_otg(cdev->gadget)) {
+		if (!otg_desc[0]) {
+			struct usb_descriptor_header *usb_desc;
+
+			usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
+			if (!usb_desc) {
+				status = -ENOMEM;
+				goto fail;
+			}
+			usb_otg_descriptor_init(cdev->gadget, usb_desc);
+			otg_desc[0] = usb_desc;
+			otg_desc[1] = NULL;
+		}
 		serial_config_driver.descriptors = otg_desc;
 		serial_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
 	}
@@ -208,13 +206,15 @@
 				"gser");
 	}
 	if (status < 0)
-		goto fail;
+		goto fail1;
 
 	usb_composite_overwrite_options(cdev, &coverwrite);
 	INFO(cdev, "%s\n", GS_VERSION_NAME);
 
 	return 0;
-
+fail1:
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
 fail:
 	return status;
 }
@@ -227,6 +227,10 @@
 		usb_put_function(f_serial[i]);
 		usb_put_function_instance(fi_serial[i]);
 	}
+
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c
index c986e8a..37a4100 100644
--- a/drivers/usb/gadget/legacy/zero.c
+++ b/drivers/usb/gadget/legacy/zero.c
@@ -121,24 +121,7 @@
 	.bNumConfigurations =	2,
 };
 
-#ifdef CONFIG_USB_OTG
-static struct usb_otg_descriptor otg_descriptor = {
-	.bLength =		sizeof otg_descriptor,
-	.bDescriptorType =	USB_DT_OTG,
-
-	/* REVISIT SRP-only hardware is possible, although
-	 * it would not be called "OTG" ...
-	 */
-	.bmAttributes =		USB_OTG_SRP | USB_OTG_HNP,
-};
-
-static const struct usb_descriptor_header *otg_desc[] = {
-	(struct usb_descriptor_header *) &otg_descriptor,
-	NULL,
-};
-#else
-#define otg_desc	NULL
-#endif
+static const struct usb_descriptor_header *otg_desc[2];
 
 /* string IDs are assigned dynamically */
 /* default serial number takes at least two packets */
@@ -341,6 +324,18 @@
 
 	/* support OTG systems */
 	if (gadget_is_otg(cdev->gadget)) {
+		if (!otg_desc[0]) {
+			struct usb_descriptor_header *usb_desc;
+
+			usb_desc = usb_otg_descriptor_alloc(cdev->gadget);
+			if (!usb_desc) {
+				status = -ENOMEM;
+				goto err_conf_flb;
+			}
+			usb_otg_descriptor_init(cdev->gadget, usb_desc);
+			otg_desc[0] = usb_desc;
+			otg_desc[1] = NULL;
+		}
 		sourcesink_driver.descriptors = otg_desc;
 		sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
 		loopback_driver.descriptors = otg_desc;
@@ -359,12 +354,12 @@
 	}
 	status = usb_add_function(&sourcesink_driver, func_ss);
 	if (status)
-		goto err_conf_flb;
+		goto err_free_otg_desc;
 
 	usb_ep_autoconfig_reset(cdev->gadget);
 	status = usb_add_function(&loopback_driver, func_lb);
 	if (status)
-		goto err_conf_flb;
+		goto err_free_otg_desc;
 
 	usb_ep_autoconfig_reset(cdev->gadget);
 	usb_composite_overwrite_options(cdev, &coverwrite);
@@ -373,6 +368,9 @@
 
 	return 0;
 
+err_free_otg_desc:
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
 err_conf_flb:
 	usb_put_function(func_lb);
 	func_lb = NULL;
@@ -397,6 +395,9 @@
 	if (!IS_ERR_OR_NULL(func_lb))
 		usb_put_function(func_lb);
 	usb_put_function_instance(func_inst_lb);
+	kfree(otg_desc[0]);
+	otg_desc[0] = NULL;
+
 	return 0;
 }
 
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c
index de7e5e2..fdacddb 100644
--- a/drivers/usb/gadget/udc/amd5536udc.c
+++ b/drivers/usb/gadget/udc/amd5536udc.c
@@ -138,15 +138,82 @@
 
 /* endpoint names used for print */
 static const char ep0_string[] = "ep0in";
-static const char *const ep_string[] = {
-	ep0_string,
-	"ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
-	"ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
-	"ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
-	"ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
-	"ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
-	"ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
-	"ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
+static const struct {
+	const char *name;
+	const struct usb_ep_caps caps;
+} ep_info[] = {
+#define EP_INFO(_name, _caps) \
+	{ \
+		.name = _name, \
+		.caps = _caps, \
+	}
+
+	EP_INFO(ep0_string,
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep1in-int",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep2in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep3in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep4in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep5in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep6in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep7in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep8in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep9in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep10in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep11in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep12in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep13in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep14in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep15in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep0out",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep1out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep2out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep3out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep4out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep5out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep6out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep7out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep8out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep9out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep10out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep11out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep12out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep13out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep14out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep15out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+
+#undef EP_INFO
 };
 
 /* DMA usage flag */
@@ -1517,7 +1584,8 @@
 	for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
 		ep = &dev->ep[tmp];
 		ep->dev = dev;
-		ep->ep.name = ep_string[tmp];
+		ep->ep.name = ep_info[tmp].name;
+		ep->ep.caps = ep_info[tmp].caps;
 		ep->num = tmp;
 		/* txfifo size is calculated at enable time */
 		ep->txfifo = dev->txfifo;
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index fc42264..d0d1894 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -59,15 +59,34 @@
 #define	DRIVER_VERSION	"3 May 2006"
 
 static const char driver_name [] = "at91_udc";
-static const char * const ep_names[] = {
-	"ep0",
-	"ep1",
-	"ep2",
-	"ep3-int",
-	"ep4",
-	"ep5",
+
+static const struct {
+	const char *name;
+	const struct usb_ep_caps caps;
+} ep_info[] = {
+#define EP_INFO(_name, _caps) \
+	{ \
+		.name = _name, \
+		.caps = _caps, \
+	}
+
+	EP_INFO("ep0",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep1",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep2",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep3-int",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep4",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep5",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+
+#undef EP_INFO
 };
-#define ep0name		ep_names[0]
+
+#define ep0name		ep_info[0].name
 
 #define VBUS_POLL_TIMEOUT	msecs_to_jiffies(1000)
 
@@ -825,6 +844,7 @@
 
 	INIT_LIST_HEAD(&udc->gadget.ep_list);
 	INIT_LIST_HEAD(&udc->gadget.ep0->ep_list);
+	udc->gadget.quirk_stall_not_supp = 1;
 
 	for (i = 0; i < NUM_ENDPOINTS; i++) {
 		struct at91_ep *ep = &udc->ep[i];
@@ -1830,7 +1850,8 @@
 
 	for (i = 0; i < NUM_ENDPOINTS; i++) {
 		ep = &udc->ep[i];
-		ep->ep.name = ep_names[i];
+		ep->ep.name = ep_info[i].name;
+		ep->ep.caps = ep_info[i].caps;
 		ep->ep.ops = &at91_ep_ops;
 		ep->udc = udc;
 		ep->int_mask = BIT(i);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 4095cce..3dfada8 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -22,7 +22,6 @@
 #include <linux/usb/gadget.h>
 #include <linux/usb/atmel_usba_udc.h>
 #include <linux/delay.h>
-#include <linux/platform_data/atmel.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 
@@ -1989,6 +1988,10 @@
 		ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc");
 
 		ret = of_property_read_string(pp, "name", &name);
+		if (ret) {
+			dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
+			goto err;
+		}
 		ep->ep.name = name;
 
 		ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
@@ -2063,6 +2066,17 @@
 		ep->can_dma = pdata->ep[i].can_dma;
 		ep->can_isoc = pdata->ep[i].can_isoc;
 
+		if (i == 0) {
+			ep->ep.caps.type_control = true;
+		} else {
+			ep->ep.caps.type_iso = ep->can_isoc;
+			ep->ep.caps.type_bulk = true;
+			ep->ep.caps.type_int = true;
+		}
+
+		ep->ep.caps.dir_in = true;
+		ep->ep.caps.dir_out = true;
+
 		if (i)
 			list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
 	}
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index 9db968b..8cbb003 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -44,9 +44,29 @@
 #define DRV_MODULE_NAME		"bcm63xx_udc"
 
 static const char bcm63xx_ep0name[] = "ep0";
-static const char *const bcm63xx_ep_name[] = {
-	bcm63xx_ep0name,
-	"ep1in-bulk", "ep2out-bulk", "ep3in-int", "ep4out-int",
+
+static const struct {
+	const char *name;
+	const struct usb_ep_caps caps;
+} bcm63xx_ep_info[] = {
+#define EP_INFO(_name, _caps) \
+	{ \
+		.name = _name, \
+		.caps = _caps, \
+	}
+
+	EP_INFO(bcm63xx_ep0name,
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep1in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep2out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep3in-int",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep4out-int",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)),
+
+#undef EP_INFO
 };
 
 static bool use_fullspeed;
@@ -943,7 +963,8 @@
 	for (i = 0; i < BCM63XX_NUM_EP; i++) {
 		struct bcm63xx_ep *bep = &udc->bep[i];
 
-		bep->ep.name = bcm63xx_ep_name[i];
+		bep->ep.name = bcm63xx_ep_info[i].name;
+		bep->ep.caps = bcm63xx_ep_info[i].caps;
 		bep->ep_num = i;
 		bep->ep.ops = &bcm63xx_udc_ep_ops;
 		list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
diff --git a/drivers/usb/gadget/udc/bdc/bdc.h b/drivers/usb/gadget/udc/bdc/bdc.h
index dc18a20..916d471 100644
--- a/drivers/usb/gadget/udc/bdc/bdc.h
+++ b/drivers/usb/gadget/udc/bdc/bdc.h
@@ -290,7 +290,7 @@
 	__le32 offset[4];
 };
 
-/* bd_table: contigous bd's in a table */
+/* bd_table: contiguous bd's in a table */
 struct bd_table {
 	struct bdc_bd *start_bd;
 	/* dma address of start bd of table*/
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index 1efa612..d1b8153 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -1952,12 +1952,18 @@
 	ep->bdc = bdc;
 	ep->dir = dir;
 
+	if (dir)
+		ep->usb_ep.caps.dir_in = true;
+	else
+		ep->usb_ep.caps.dir_out = true;
+
 	/* ep->ep_num is the index inside bdc_ep */
 	if (epnum == 1) {
 		ep->ep_num = 1;
 		bdc->bdc_ep_array[ep->ep_num] = ep;
 		snprintf(ep->name, sizeof(ep->name), "ep%d", epnum - 1);
 		usb_ep_set_maxpacket_limit(&ep->usb_ep, EP0_MAX_PKT_SIZE);
+		ep->usb_ep.caps.type_control = true;
 		ep->comp_desc = NULL;
 		bdc->gadget.ep0 = &ep->usb_ep;
 	} else {
@@ -1971,6 +1977,9 @@
 			 dir & 1 ? "in" : "out");
 
 		usb_ep_set_maxpacket_limit(&ep->usb_ep, 1024);
+		ep->usb_ep.caps.type_iso = true;
+		ep->usb_ep.caps.type_bulk = true;
+		ep->usb_ep.caps.type_int = true;
 		ep->usb_ep.max_streams = 0;
 		list_add_tail(&ep->usb_ep.ep_list, &bdc->gadget.ep_list);
 	}
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 181112c..1379ad4 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -127,23 +127,87 @@
 
 static const char ep0name[] = "ep0";
 
-static const char *const ep_name[] = {
-	ep0name,				/* everyone has ep0 */
+static const struct {
+	const char *name;
+	const struct usb_ep_caps caps;
+} ep_info[] = {
+#define EP_INFO(_name, _caps) \
+	{ \
+		.name = _name, \
+		.caps = _caps, \
+	}
 
+	/* everyone has ep0 */
+	EP_INFO(ep0name,
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
 	/* act like a pxa250: fifteen fixed function endpoints */
-	"ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
-	"ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
-	"ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
-		"ep15in-int",
-
+	EP_INFO("ep1in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep2out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep3in-iso",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep4out-iso",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep5in-int",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep6in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep7out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep8in-iso",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep9out-iso",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep10in-int",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep11in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep12out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep13in-iso",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep14out-iso",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep15in-int",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
 	/* or like sa1100: two fixed function endpoints */
-	"ep1out-bulk", "ep2in-bulk",
-
+	EP_INFO("ep1out-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep2in-bulk",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
 	/* and now some generic EPs so we have enough in multi config */
-	"ep3out", "ep4in", "ep5out", "ep6out", "ep7in", "ep8out", "ep9in",
-	"ep10out", "ep11out", "ep12in", "ep13out", "ep14in", "ep15out",
+	EP_INFO("ep3out",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep4in",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep5out",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep6out",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep7in",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep8out",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep9in",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep10out",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep11out",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep12in",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep13out",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep14in",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep15out",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+
+#undef EP_INFO
 };
-#define DUMMY_ENDPOINTS	ARRAY_SIZE(ep_name)
+
+#define DUMMY_ENDPOINTS	ARRAY_SIZE(ep_info)
 
 /*-------------------------------------------------------------------------*/
 
@@ -938,9 +1002,10 @@
 	for (i = 0; i < DUMMY_ENDPOINTS; i++) {
 		struct dummy_ep	*ep = &dum->ep[i];
 
-		if (!ep_name[i])
+		if (!ep_info[i].name)
 			break;
-		ep->ep.name = ep_name[i];
+		ep->ep.name = ep_info[i].name;
+		ep->ep.caps = ep_info[i].caps;
 		ep->ep.ops = &dummy_ep_ops;
 		list_add_tail(&ep->ep.ep_list, &dum->gadget.ep_list);
 		ep->halted = ep->wedged = ep->already_seen =
@@ -1684,7 +1749,7 @@
 	}
 
 	for (i = 0; i < DUMMY_ENDPOINTS; i++) {
-		if (!ep_name[i])
+		if (!ep_info[i].name)
 			break;
 		dum->ep[i].already_seen = 0;
 	}
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 1137e33..6ba122c 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -384,25 +384,15 @@
 		return;
 	}
 	if (ep->dir_in) { /* if IN */
-		if (req->req.length) {
-			fotg210_start_dma(ep, req);
-		} else {
-			pr_err("%s : req->req.length = 0x%x\n",
-			       __func__, req->req.length);
-		}
+		fotg210_start_dma(ep, req);
 		if ((req->req.length == req->req.actual) ||
 		    (req->req.actual < ep->ep.maxpacket))
 			fotg210_done(ep, req, 0);
 	} else { /* OUT */
-		if (!req->req.length) {
-			fotg210_done(ep, req, 0);
-		} else {
-			u32 value = ioread32(ep->fotg210->reg +
-						FOTG210_DMISGR0);
+		u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
 
-			value &= ~DMISGR0_MCX_OUT_INT;
-			iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR0);
-		}
+		value &= ~DMISGR0_MCX_OUT_INT;
+		iowrite32(value, ep->fotg210->reg + FOTG210_DMISGR0);
 	}
 }
 
@@ -1153,6 +1143,17 @@
 		ep->ep.name = fotg210_ep_name[i];
 		ep->ep.ops = &fotg210_ep_ops;
 		usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
+
+		if (i == 0) {
+			ep->ep.caps.type_control = true;
+		} else {
+			ep->ep.caps.type_iso = true;
+			ep->ep.caps.type_bulk = true;
+			ep->ep.caps.type_int = true;
+		}
+
+		ep->ep.caps.dir_in = true;
+		ep->ep.caps.dir_out = true;
 	}
 	usb_ep_set_maxpacket_limit(&fotg210->ep[0]->ep, 0x40);
 	fotg210->gadget.ep0 = &fotg210->ep[0]->ep;
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index e0822f1..5fb6f8b 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2417,6 +2417,17 @@
 	strcpy(ep->name, ep_name[pipe_num]);
 	ep->ep.name = ep_name[pipe_num];
 
+	if (pipe_num == 0) {
+		ep->ep.caps.type_control = true;
+	} else {
+		ep->ep.caps.type_iso = true;
+		ep->ep.caps.type_bulk = true;
+		ep->ep.caps.type_int = true;
+	}
+
+	ep->ep.caps.dir_in = true;
+	ep->ep.caps.dir_out = true;
+
 	ep->ep.ops = &qe_ep_ops;
 	ep->stopped = 1;
 	usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index c60022b..aab5221 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -2313,6 +2313,19 @@
 	ep->ep.ops = &fsl_ep_ops;
 	ep->stopped = 0;
 
+	if (index == 0) {
+		ep->ep.caps.type_control = true;
+	} else {
+		ep->ep.caps.type_iso = true;
+		ep->ep.caps.type_bulk = true;
+		ep->ep.caps.type_int = true;
+	}
+
+	if (index & 1)
+		ep->ep.caps.dir_in = true;
+	else
+		ep->ep.caps.dir_out = true;
+
 	/* for ep0: maxP defined in desc
 	 * for other eps, maxP is set by epautoconfig() called by gadget layer
 	 */
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 3970f45..948845c 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1450,6 +1450,17 @@
 		ep->ep.name = fusb300_ep_name[i];
 		ep->ep.ops = &fusb300_ep_ops;
 		usb_ep_set_maxpacket_limit(&ep->ep, HS_BULK_MAX_PACKET_SIZE);
+
+		if (i == 0) {
+			ep->ep.caps.type_control = true;
+		} else {
+			ep->ep.caps.type_iso = true;
+			ep->ep.caps.type_bulk = true;
+			ep->ep.caps.type_int = true;
+		}
+
+		ep->ep.caps.dir_in = true;
+		ep->ep.caps.dir_out = true;
 	}
 	usb_ep_set_maxpacket_limit(&fusb300->ep[0]->ep, HS_CTL_MAX_PACKET_SIZE);
 	fusb300->ep[0]->epnum = 0;
diff --git a/drivers/usb/gadget/udc/gadget_chips.h b/drivers/usb/gadget/udc/gadget_chips.h
deleted file mode 100644
index bcd04bc..0000000
--- a/drivers/usb/gadget/udc/gadget_chips.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * USB device controllers have lots of quirks.  Use these macros in
- * gadget drivers or other code that needs to deal with them, and which
- * autoconfigures instead of using early binding to the hardware.
- *
- * This SHOULD eventually work like the ARM mach_is_*() stuff, driven by
- * some config file that gets updated as new hardware is supported.
- * (And avoiding all runtime comparisons in typical one-choice configs!)
- *
- * NOTE:  some of these controller drivers may not be available yet.
- * Some are available on 2.4 kernels; several are available, but not
- * yet pushed in the 2.6 mainline tree.
- */
-
-#ifndef __GADGET_CHIPS_H
-#define __GADGET_CHIPS_H
-
-#include <linux/usb/gadget.h>
-
-/*
- * NOTICE: the entries below are alphabetical and should be kept
- * that way.
- *
- * Always be sure to add new entries to the correct position or
- * accept the bashing later.
- *
- * If you have forgotten the alphabetical order let VIM/EMACS
- * do that for you.
- */
-#define gadget_is_at91(g)		(!strcmp("at91_udc", (g)->name))
-#define gadget_is_goku(g)		(!strcmp("goku_udc", (g)->name))
-#define gadget_is_musbhdrc(g)		(!strcmp("musb-hdrc", (g)->name))
-#define gadget_is_net2280(g)		(!strcmp("net2280", (g)->name))
-#define gadget_is_pxa(g)		(!strcmp("pxa25x_udc", (g)->name))
-#define gadget_is_pxa27x(g)		(!strcmp("pxa27x_udc", (g)->name))
-
-/**
- * gadget_supports_altsettings - return true if altsettings work
- * @gadget: the gadget in question
- */
-static inline bool gadget_supports_altsettings(struct usb_gadget *gadget)
-{
-	/* PXA 21x/25x/26x has no altsettings at all */
-	if (gadget_is_pxa(gadget))
-		return false;
-
-	/* PXA 27x and 3xx have *broken* altsetting support */
-	if (gadget_is_pxa27x(gadget))
-		return false;
-
-	/* Everything else is *presumably* fine ... */
-	return true;
-}
-
-#endif /* __GADGET_CHIPS_H */
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index 9e8d842..1fdfec1 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -990,6 +990,35 @@
 	return -EOPNOTSUPP;
 }
 
+static struct usb_ep *goku_match_ep(struct usb_gadget *g,
+		struct usb_endpoint_descriptor *desc,
+		struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+	struct goku_udc	*dev = to_goku_udc(g);
+	struct usb_ep *ep;
+
+	switch (usb_endpoint_type(desc)) {
+	case USB_ENDPOINT_XFER_INT:
+		/* single buffering is enough */
+		ep = &dev->ep[3].ep;
+		if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
+			return ep;
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		if (usb_endpoint_dir_in(desc)) {
+			/* DMA may be available */
+			ep = &dev->ep[2].ep;
+			if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
+				return ep;
+		}
+		break;
+	default:
+		/* nothing */ ;
+	}
+
+	return NULL;
+}
+
 static int goku_udc_start(struct usb_gadget *g,
 		struct usb_gadget_driver *driver);
 static int goku_udc_stop(struct usb_gadget *g);
@@ -998,6 +1027,7 @@
 	.get_frame	= goku_get_frame,
 	.udc_start	= goku_udc_start,
 	.udc_stop	= goku_udc_stop,
+	.match_ep	= goku_match_ep,
 	// no remote wakeup
 	// not selfpowered
 };
@@ -1257,6 +1287,14 @@
 		INIT_LIST_HEAD (&ep->queue);
 
 		ep_reset(NULL, ep);
+
+		if (i == 0)
+			ep->ep.caps.type_control = true;
+		else
+			ep->ep.caps.type_bulk = true;
+
+		ep->ep.caps.dir_in = true;
+		ep->ep.caps.dir_out = true;
 	}
 
 	dev->ep[0].reg_mode = NULL;
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index c886887..8aa2593 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2018,12 +2018,23 @@
 
 		usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
 		ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
+
+		ep->ep.caps.type_control = true;
 	} else {
 		usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
 		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+
+		ep->ep.caps.type_iso = true;
+		ep->ep.caps.type_bulk = true;
+		ep->ep.caps.type_int = true;
 	}
 	list_add_tail(&ep->ep_list, &dev->ep_list);
 
+	if (is_in)
+		ep->ep.caps.dir_in = true;
+	else
+		ep->ep.caps.dir_out = true;
+
 	ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
 					 &ep->tailbuf_paddr, GFP_ATOMIC);
 	if (!ep->tailbuf)
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index 3b6a785..00b5006 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -2575,6 +2575,8 @@
 		.ep = {
 			.name	= "ep0",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 64,
 		.hwep_num_base	= 0,
@@ -2586,6 +2588,8 @@
 		.ep = {
 			.name	= "ep1-int",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 64,
 		.hwep_num_base	= 2,
@@ -2597,6 +2601,8 @@
 		.ep = {
 			.name	= "ep2-bulk",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 64,
 		.hwep_num_base	= 4,
@@ -2608,6 +2614,8 @@
 		.ep = {
 			.name	= "ep3-iso",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 1023,
 		.hwep_num_base	= 6,
@@ -2619,6 +2627,8 @@
 		.ep = {
 			.name	= "ep4-int",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 64,
 		.hwep_num_base	= 8,
@@ -2630,6 +2640,8 @@
 		.ep = {
 			.name	= "ep5-bulk",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 64,
 		.hwep_num_base	= 10,
@@ -2641,6 +2653,8 @@
 		.ep = {
 			.name	= "ep6-iso",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 1023,
 		.hwep_num_base	= 12,
@@ -2652,6 +2666,8 @@
 		.ep = {
 			.name	= "ep7-int",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 64,
 		.hwep_num_base	= 14,
@@ -2663,6 +2679,8 @@
 		.ep = {
 			.name	= "ep8-bulk",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 64,
 		.hwep_num_base	= 16,
@@ -2674,6 +2692,8 @@
 		.ep = {
 			.name	= "ep9-iso",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 1023,
 		.hwep_num_base	= 18,
@@ -2685,6 +2705,8 @@
 		.ep = {
 			.name	= "ep10-int",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 64,
 		.hwep_num_base	= 20,
@@ -2696,6 +2718,8 @@
 		.ep = {
 			.name	= "ep11-bulk",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 64,
 		.hwep_num_base	= 22,
@@ -2707,6 +2731,8 @@
 		.ep = {
 			.name	= "ep12-iso",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 1023,
 		.hwep_num_base	= 24,
@@ -2718,6 +2744,8 @@
 		.ep = {
 			.name	= "ep13-int",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 64,
 		.hwep_num_base	= 26,
@@ -2729,6 +2757,8 @@
 		.ep = {
 			.name	= "ep14-bulk",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 64,
 		.hwep_num_base	= 28,
@@ -2740,6 +2770,8 @@
 		.ep = {
 			.name	= "ep15-bulk",
 			.ops	= &lpc32xx_ep_ops,
+			.caps	= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+					USB_EP_CAPS_DIR_ALL),
 		},
 		.maxpacket	= 1023,
 		.hwep_num_base	= 30,
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index 309706f..b1cfa96 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1052,7 +1052,7 @@
 				tmp = m66592_read(m66592, M66592_INTSTS0) &
 								M66592_CTSQ;
 				udelay(1);
-			} while (tmp != M66592_CS_IDST || timeout-- > 0);
+			} while (tmp != M66592_CS_IDST && timeout-- > 0);
 
 			if (tmp == M66592_CS_IDST)
 				m66592_bset(m66592,
@@ -1644,6 +1644,17 @@
 		ep->ep.name = m66592_ep_name[i];
 		ep->ep.ops = &m66592_ep_ops;
 		usb_ep_set_maxpacket_limit(&ep->ep, 512);
+
+		if (i == 0) {
+			ep->ep.caps.type_control = true;
+		} else {
+			ep->ep.caps.type_iso = true;
+			ep->ep.caps.type_bulk = true;
+			ep->ep.caps.type_int = true;
+		}
+
+		ep->ep.caps.dir_in = true;
+		ep->ep.caps.dir_out = true;
 	}
 	usb_ep_set_maxpacket_limit(&m66592->ep[0].ep, 64);
 	m66592->ep[0].pipenum = 0;
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index ea35a24..4c48969 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1324,6 +1324,9 @@
 	ep->ep.ops = &mv_u3d_ep_ops;
 	ep->wedge = 0;
 	usb_ep_set_maxpacket_limit(&ep->ep, MV_U3D_EP0_MAX_PKT_SIZE);
+	ep->ep.caps.type_control = true;
+	ep->ep.caps.dir_in = true;
+	ep->ep.caps.dir_out = true;
 	ep->ep_num = 0;
 	ep->ep.desc = &mv_u3d_ep0_desc;
 	INIT_LIST_HEAD(&ep->queue);
@@ -1339,14 +1342,20 @@
 		if (i & 1) {
 			snprintf(name, sizeof(name), "ep%din", i >> 1);
 			ep->direction = MV_U3D_EP_DIR_IN;
+			ep->ep.caps.dir_in = true;
 		} else {
 			snprintf(name, sizeof(name), "ep%dout", i >> 1);
 			ep->direction = MV_U3D_EP_DIR_OUT;
+			ep->ep.caps.dir_out = true;
 		}
 		ep->u3d = u3d;
 		strncpy(ep->name, name, sizeof(ep->name));
 		ep->ep.name = ep->name;
 
+		ep->ep.caps.type_iso = true;
+		ep->ep.caps.type_bulk = true;
+		ep->ep.caps.type_int = true;
+
 		ep->ep.ops = &mv_u3d_ep_ops;
 		usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
 		ep->ep_num = i / 2;
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 5da37c9..339af51 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -1257,6 +1257,9 @@
 	ep->wedge = 0;
 	ep->stopped = 0;
 	usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
+	ep->ep.caps.type_control = true;
+	ep->ep.caps.dir_in = true;
+	ep->ep.caps.dir_out = true;
 	ep->ep_num = 0;
 	ep->ep.desc = &mv_ep0_desc;
 	INIT_LIST_HEAD(&ep->queue);
@@ -1269,14 +1272,20 @@
 		if (i % 2) {
 			snprintf(name, sizeof(name), "ep%din", i / 2);
 			ep->direction = EP_DIR_IN;
+			ep->ep.caps.dir_in = true;
 		} else {
 			snprintf(name, sizeof(name), "ep%dout", i / 2);
 			ep->direction = EP_DIR_OUT;
+			ep->ep.caps.dir_out = true;
 		}
 		ep->udc = udc;
 		strncpy(ep->name, name, sizeof(ep->name));
 		ep->ep.name = ep->name;
 
+		ep->ep.caps.type_iso = true;
+		ep->ep.caps.type_bulk = true;
+		ep->ep.caps.type_int = true;
+
 		ep->ep.ops = &mv_ep_ops;
 		ep->stopped = 0;
 		usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 195baf3..18f5ebd 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -1404,6 +1404,17 @@
 		else
 			ep->fifo_size = 64;
 		net2272_ep_reset(ep);
+
+		if (i == 0) {
+			ep->ep.caps.type_control = true;
+		} else {
+			ep->ep.caps.type_iso = true;
+			ep->ep.caps.type_bulk = true;
+			ep->ep.caps.type_int = true;
+		}
+
+		ep->ep.caps.dir_in = true;
+		ep->ep.caps.dir_out = true;
 	}
 	usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
 
@@ -1826,9 +1837,9 @@
 				if (!e || u.r.wLength > 2)
 					goto do_stall;
 				if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
-					status = __constant_cpu_to_le16(1);
+					status = cpu_to_le16(1);
 				else
-					status = __constant_cpu_to_le16(0);
+					status = cpu_to_le16(0);
 
 				/* don't bother with a request object! */
 				net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 2bee912..cf0ed42 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -74,18 +74,57 @@
 
 static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
 static const char ep0name[] = "ep0";
-static const char *const ep_name[] = {
-	ep0name,
-	"ep-a", "ep-b", "ep-c", "ep-d",
-	"ep-e", "ep-f", "ep-g", "ep-h",
+
+#define EP_INFO(_name, _caps) \
+	{ \
+		.name = _name, \
+		.caps = _caps, \
+	}
+
+static const struct {
+	const char *name;
+	const struct usb_ep_caps caps;
+} ep_info_dft[] = { /* Default endpoint configuration */
+	EP_INFO(ep0name,
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep-a",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep-b",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep-c",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep-d",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep-e",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep-f",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep-g",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep-h",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
+}, ep_info_adv[] = { /* Endpoints for usb3380 advance mode */
+	EP_INFO(ep0name,
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
+	EP_INFO("ep1in",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep2out",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep3in",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep4out",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep1out",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep2in",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
+	EP_INFO("ep3out",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
+	EP_INFO("ep4in",
+		USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
 };
 
-/* Endpoint names for usb3380 advance mode */
-static const char *const ep_name_adv[] = {
-	ep0name,
-	"ep1in", "ep2out", "ep3in", "ep4out",
-	"ep1out", "ep2in", "ep3out", "ep4in",
-};
+#undef EP_INFO
 
 /* mode 0 == ep-{a,b,c,d} 1K fifo each
  * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
@@ -1511,6 +1550,33 @@
 	return 0;
 }
 
+static struct usb_ep *net2280_match_ep(struct usb_gadget *_gadget,
+		struct usb_endpoint_descriptor *desc,
+		struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+	char name[8];
+	struct usb_ep *ep;
+
+	if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) {
+		/* ep-e, ep-f are PIO with only 64 byte fifos */
+		ep = gadget_find_ep_by_name(_gadget, "ep-e");
+		if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+			return ep;
+		ep = gadget_find_ep_by_name(_gadget, "ep-f");
+		if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+			return ep;
+	}
+
+	/* USB3380: use same address for usb and hardware endpoints */
+	snprintf(name, sizeof(name), "ep%d%s", usb_endpoint_num(desc),
+			usb_endpoint_dir_in(desc) ? "in" : "out");
+	ep = gadget_find_ep_by_name(_gadget, name);
+	if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
+		return ep;
+
+	return NULL;
+}
+
 static int net2280_start(struct usb_gadget *_gadget,
 		struct usb_gadget_driver *driver);
 static int net2280_stop(struct usb_gadget *_gadget);
@@ -1522,6 +1588,7 @@
 	.pullup		= net2280_pullup,
 	.udc_start	= net2280_start,
 	.udc_stop	= net2280_stop,
+	.match_ep	= net2280_match_ep,
 };
 
 /*-------------------------------------------------------------------------*/
@@ -2055,7 +2122,8 @@
 	for (tmp = 0; tmp < 7; tmp++) {
 		struct net2280_ep	*ep = &dev->ep[tmp];
 
-		ep->ep.name = ep_name[tmp];
+		ep->ep.name = ep_info_dft[tmp].name;
+		ep->ep.caps = ep_info_dft[tmp].caps;
 		ep->dev = dev;
 		ep->num = tmp;
 
@@ -2095,7 +2163,10 @@
 	for (i = 0; i < dev->n_ep; i++) {
 		struct net2280_ep *ep = &dev->ep[i];
 
-		ep->ep.name = dev->enhanced_mode ? ep_name_adv[i] : ep_name[i];
+		ep->ep.name = dev->enhanced_mode ? ep_info_adv[i].name :
+						   ep_info_dft[i].name;
+		ep->ep.caps = dev->enhanced_mode ? ep_info_adv[i].caps :
+						   ep_info_dft[i].caps;
 		ep->dev = dev;
 		ep->num = i;
 
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index e2fcdb8..9b7d394 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2579,6 +2579,28 @@
 	ep->double_buf = dbuf;
 	ep->udc = udc;
 
+	switch (type) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		ep->ep.caps.type_control = true;
+		ep->ep.caps.dir_in = true;
+		ep->ep.caps.dir_out = true;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		ep->ep.caps.type_iso = true;
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		ep->ep.caps.type_bulk = true;
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		ep->ep.caps.type_int = true;
+		break;
+	};
+
+	if (addr & USB_DIR_IN)
+		ep->ep.caps.dir_in = true;
+	else
+		ep->ep.caps.dir_out = true;
+
 	ep->ep.name = ep->name;
 	ep->ep.ops = &omap_ep_ops;
 	ep->maxpacket = maxp;
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 613547f..e5f4c52 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -620,9 +620,9 @@
 		dev->vbus_session = 1;
 	} else {
 		if (dev->driver && dev->driver->disconnect) {
-			spin_unlock(&dev->lock);
-			dev->driver->disconnect(&dev->gadget);
 			spin_lock(&dev->lock);
+			dev->driver->disconnect(&dev->gadget);
+			spin_unlock(&dev->lock);
 		}
 		pch_udc_set_disconnect(dev);
 		dev->vbus_session = 0;
@@ -1191,9 +1191,9 @@
 		pch_udc_reconnect(dev);
 	} else {
 		if (dev->driver && dev->driver->disconnect) {
-			spin_unlock(&dev->lock);
-			dev->driver->disconnect(&dev->gadget);
 			spin_lock(&dev->lock);
+			dev->driver->disconnect(&dev->gadget);
+			spin_unlock(&dev->lock);
 		}
 		pch_udc_set_disconnect(dev);
 	}
@@ -1488,11 +1488,11 @@
 		req->dma_mapped = 0;
 	}
 	ep->halted = 1;
-	spin_unlock(&dev->lock);
+	spin_lock(&dev->lock);
 	if (!ep->in)
 		pch_udc_ep_clear_rrdy(ep);
 	usb_gadget_giveback_request(&ep->ep, &req->req);
-	spin_lock(&dev->lock);
+	spin_unlock(&dev->lock);
 	ep->halted = halted;
 }
 
@@ -1793,7 +1793,7 @@
 	}
 	/* prevent from using desc. - set HOST BUSY */
 	dma_desc->status |= PCH_UDC_BS_HST_BSY;
-	dma_desc->dataptr = __constant_cpu_to_le32(DMA_ADDR_INVALID);
+	dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
 	req->td_data = dma_desc;
 	req->td_data_last = dma_desc;
 	req->chain_len = 1;
@@ -2414,7 +2414,7 @@
 			dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
 		else /* OUT */
 			dev->gadget.ep0 = &ep->ep;
-		spin_unlock(&dev->lock);
+		spin_lock(&dev->lock);
 		/* If Mass storage Reset */
 		if ((dev->setup_data.bRequestType == 0x21) &&
 		    (dev->setup_data.bRequest == 0xFF))
@@ -2422,7 +2422,7 @@
 		/* call gadget with setup data received */
 		setup_supported = dev->driver->setup(&dev->gadget,
 						     &dev->setup_data);
-		spin_lock(&dev->lock);
+		spin_unlock(&dev->lock);
 
 		if (dev->setup_data.bRequestType & USB_DIR_IN) {
 			ep->td_data->status = (ep->td_data->status &
@@ -2594,9 +2594,9 @@
 		empty_req_queue(ep);
 	}
 	if (dev->driver) {
-		spin_unlock(&dev->lock);
-		usb_gadget_udc_reset(&dev->gadget, dev->driver);
 		spin_lock(&dev->lock);
+		usb_gadget_udc_reset(&dev->gadget, dev->driver);
+		spin_unlock(&dev->lock);
 	}
 }
 
@@ -2675,9 +2675,9 @@
 		dev->ep[i].halted = 0;
 	}
 	dev->stall = 0;
-	spin_unlock(&dev->lock);
-	ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
 	spin_lock(&dev->lock);
+	ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
+	spin_unlock(&dev->lock);
 }
 
 /**
@@ -2712,9 +2712,9 @@
 	dev->stall = 0;
 
 	/* call gadget zero with setup data received */
-	spin_unlock(&dev->lock);
-	ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
 	spin_lock(&dev->lock);
+	ret = dev->driver->setup(&dev->gadget, &dev->setup_data);
+	spin_unlock(&dev->lock);
 }
 
 /**
@@ -2747,18 +2747,18 @@
 	if (dev_intr & UDC_DEVINT_US) {
 		if (dev->driver
 			&& dev->driver->suspend) {
-			spin_unlock(&dev->lock);
-			dev->driver->suspend(&dev->gadget);
 			spin_lock(&dev->lock);
+			dev->driver->suspend(&dev->gadget);
+			spin_unlock(&dev->lock);
 		}
 
 		vbus = pch_vbus_gpio_get_value(dev);
 		if ((dev->vbus_session == 0)
 			&& (vbus != 1)) {
 			if (dev->driver && dev->driver->disconnect) {
-				spin_unlock(&dev->lock);
-				dev->driver->disconnect(&dev->gadget);
 				spin_lock(&dev->lock);
+				dev->driver->disconnect(&dev->gadget);
+				spin_unlock(&dev->lock);
 			}
 			pch_udc_reconnect(dev);
 		} else if ((dev->vbus_session == 0)
@@ -2895,11 +2895,21 @@
 		ep->in = ~i & 1;
 		ep->ep.name = ep_string[i];
 		ep->ep.ops = &pch_udc_ep_ops;
-		if (ep->in)
+		if (ep->in) {
 			ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
-		else
+			ep->ep.caps.dir_in = true;
+		} else {
 			ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
 					  UDC_EP_REG_SHIFT;
+			ep->ep.caps.dir_out = true;
+		}
+		if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
+			ep->ep.caps.type_control = true;
+		} else {
+			ep->ep.caps.type_iso = true;
+			ep->ep.caps.type_bulk = true;
+			ep->ep.caps.type_int = true;
+		}
 		/* need to set ep->ep.maxpacket and set Default Configuration?*/
 		usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
 		list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index f6cbe66..b82cb14 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -1176,6 +1176,7 @@
 	INIT_LIST_HEAD (&dev->gadget.ep_list);
 	INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
 	dev->ep0state = EP0_IDLE;
+	dev->gadget.quirk_altset_not_supp = 1;
 
 	/* basic endpoint records init */
 	for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
@@ -1821,6 +1822,8 @@
 			.name		= ep0name,
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= EP0_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
+						USB_EP_CAPS_DIR_ALL),
 		},
 		.dev		= &memory,
 		.reg_udccs	= &UDCCS0,
@@ -1833,6 +1836,8 @@
 			.name		= "ep1in-bulk",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= BULK_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+						USB_EP_CAPS_DIR_IN),
 		},
 		.dev		= &memory,
 		.fifo_size	= BULK_FIFO_SIZE,
@@ -1846,6 +1851,8 @@
 			.name		= "ep2out-bulk",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= BULK_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+						USB_EP_CAPS_DIR_OUT),
 		},
 		.dev		= &memory,
 		.fifo_size	= BULK_FIFO_SIZE,
@@ -1861,6 +1868,8 @@
 			.name		= "ep3in-iso",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= ISO_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+						USB_EP_CAPS_DIR_IN),
 		},
 		.dev		= &memory,
 		.fifo_size	= ISO_FIFO_SIZE,
@@ -1874,6 +1883,8 @@
 			.name		= "ep4out-iso",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= ISO_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+						USB_EP_CAPS_DIR_OUT),
 		},
 		.dev		= &memory,
 		.fifo_size	= ISO_FIFO_SIZE,
@@ -1888,6 +1899,7 @@
 			.name		= "ep5in-int",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= INT_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(0, 0),
 		},
 		.dev		= &memory,
 		.fifo_size	= INT_FIFO_SIZE,
@@ -1903,6 +1915,8 @@
 			.name		= "ep6in-bulk",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= BULK_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+						USB_EP_CAPS_DIR_IN),
 		},
 		.dev		= &memory,
 		.fifo_size	= BULK_FIFO_SIZE,
@@ -1916,6 +1930,8 @@
 			.name		= "ep7out-bulk",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= BULK_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+						USB_EP_CAPS_DIR_OUT),
 		},
 		.dev		= &memory,
 		.fifo_size	= BULK_FIFO_SIZE,
@@ -1930,6 +1946,8 @@
 			.name		= "ep8in-iso",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= ISO_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+						USB_EP_CAPS_DIR_IN),
 		},
 		.dev		= &memory,
 		.fifo_size	= ISO_FIFO_SIZE,
@@ -1943,6 +1961,8 @@
 			.name		= "ep9out-iso",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= ISO_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+						USB_EP_CAPS_DIR_OUT),
 		},
 		.dev		= &memory,
 		.fifo_size	= ISO_FIFO_SIZE,
@@ -1957,6 +1977,7 @@
 			.name		= "ep10in-int",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= INT_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(0, 0),
 		},
 		.dev		= &memory,
 		.fifo_size	= INT_FIFO_SIZE,
@@ -1972,6 +1993,8 @@
 			.name		= "ep11in-bulk",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= BULK_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+						USB_EP_CAPS_DIR_IN),
 		},
 		.dev		= &memory,
 		.fifo_size	= BULK_FIFO_SIZE,
@@ -1985,6 +2008,8 @@
 			.name		= "ep12out-bulk",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= BULK_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+						USB_EP_CAPS_DIR_OUT),
 		},
 		.dev		= &memory,
 		.fifo_size	= BULK_FIFO_SIZE,
@@ -1999,6 +2024,8 @@
 			.name		= "ep13in-iso",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= ISO_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+						USB_EP_CAPS_DIR_IN),
 		},
 		.dev		= &memory,
 		.fifo_size	= ISO_FIFO_SIZE,
@@ -2012,6 +2039,8 @@
 			.name		= "ep14out-iso",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= ISO_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+						USB_EP_CAPS_DIR_OUT),
 		},
 		.dev		= &memory,
 		.fifo_size	= ISO_FIFO_SIZE,
@@ -2026,6 +2055,7 @@
 			.name		= "ep15in-int",
 			.ops		= &pxa25x_ep_ops,
 			.maxpacket	= INT_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(0, 0),
 		},
 		.dev		= &memory,
 		.fifo_size	= INT_FIFO_SIZE,
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index b51226a..670ac0b 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -1710,6 +1710,7 @@
 	INIT_LIST_HEAD(&dev->gadget.ep_list);
 	INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
 	dev->udc_usb_ep[0].pxa_ep = &dev->pxa_ep[0];
+	dev->gadget.quirk_altset_not_supp = 1;
 	ep0_idle(dev);
 
 	/* PXA endpoints init */
@@ -2422,7 +2423,7 @@
 		}
 		udc->udc_command = mach->udc_command;
 	} else {
-		udc->gpiod = devm_gpiod_get(&pdev->dev, NULL);
+		udc->gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_ASIS);
 	}
 
 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.h b/drivers/usb/gadget/udc/pxa27x_udc.h
index 11e1423..cea2cb7 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.h
+++ b/drivers/usb/gadget/udc/pxa27x_udc.h
@@ -234,25 +234,35 @@
 /*
  * Endpoint definition helpers
  */
-#define USB_EP_DEF(addr, bname, dir, type, maxpkt) \
-{ .usb_ep = { .name = bname, .ops = &pxa_ep_ops, .maxpacket = maxpkt, }, \
+#define USB_EP_DEF(addr, bname, dir, type, maxpkt, ctype, cdir) \
+{ .usb_ep = {	.name = bname, .ops = &pxa_ep_ops, .maxpacket = maxpkt, \
+		.caps = USB_EP_CAPS(ctype, cdir), }, \
   .desc = {	.bEndpointAddress = addr | (dir ? USB_DIR_IN : 0), \
-		.bmAttributes = type, \
+		.bmAttributes = USB_ENDPOINT_XFER_ ## type, \
 		.wMaxPacketSize = maxpkt, }, \
   .dev = &memory \
 }
-#define USB_EP_BULK(addr, bname, dir) \
-  USB_EP_DEF(addr, bname, dir, USB_ENDPOINT_XFER_BULK, BULK_FIFO_SIZE)
-#define USB_EP_ISO(addr, bname, dir) \
-  USB_EP_DEF(addr, bname, dir, USB_ENDPOINT_XFER_ISOC, ISO_FIFO_SIZE)
-#define USB_EP_INT(addr, bname, dir) \
-  USB_EP_DEF(addr, bname, dir, USB_ENDPOINT_XFER_INT, INT_FIFO_SIZE)
-#define USB_EP_IN_BULK(n)	USB_EP_BULK(n, "ep" #n "in-bulk", 1)
-#define USB_EP_OUT_BULK(n)	USB_EP_BULK(n, "ep" #n "out-bulk", 0)
-#define USB_EP_IN_ISO(n)	USB_EP_ISO(n,  "ep" #n "in-iso", 1)
-#define USB_EP_OUT_ISO(n)	USB_EP_ISO(n,  "ep" #n "out-iso", 0)
-#define USB_EP_IN_INT(n)	USB_EP_INT(n,  "ep" #n "in-int", 1)
-#define USB_EP_CTRL		USB_EP_DEF(0,  "ep0", 0, 0, EP0_FIFO_SIZE)
+#define USB_EP_BULK(addr, bname, dir, cdir) \
+	USB_EP_DEF(addr, bname, dir, BULK, BULK_FIFO_SIZE, \
+		USB_EP_CAPS_TYPE_BULK, cdir)
+#define USB_EP_ISO(addr, bname, dir, cdir) \
+	USB_EP_DEF(addr, bname, dir, ISOC, ISO_FIFO_SIZE, \
+		USB_EP_CAPS_TYPE_ISO, cdir)
+#define USB_EP_INT(addr, bname, dir, cdir) \
+	USB_EP_DEF(addr, bname, dir, INT, INT_FIFO_SIZE, \
+		USB_EP_CAPS_TYPE_INT, cdir)
+#define USB_EP_IN_BULK(n)	USB_EP_BULK(n, "ep" #n "in-bulk", 1, \
+					USB_EP_CAPS_DIR_IN)
+#define USB_EP_OUT_BULK(n)	USB_EP_BULK(n, "ep" #n "out-bulk", 0, \
+					USB_EP_CAPS_DIR_OUT)
+#define USB_EP_IN_ISO(n)	USB_EP_ISO(n,  "ep" #n "in-iso", 1, \
+					USB_EP_CAPS_DIR_IN)
+#define USB_EP_OUT_ISO(n)	USB_EP_ISO(n,  "ep" #n "out-iso", 0, \
+					USB_EP_CAPS_DIR_OUT)
+#define USB_EP_IN_INT(n)	USB_EP_INT(n,  "ep" #n "in-int", 1, \
+					USB_EP_CAPS_DIR_IN)
+#define USB_EP_CTRL	USB_EP_DEF(0,  "ep0", 0, CONTROL, EP0_FIFO_SIZE, \
+				USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)
 
 #define PXA_EP_DEF(_idx, _addr, dir, _type, maxpkt, _config, iface, altset) \
 { \
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 0293f71..baa0609 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1935,6 +1935,16 @@
 		ep->ep.name = r8a66597_ep_name[i];
 		ep->ep.ops = &r8a66597_ep_ops;
 		usb_ep_set_maxpacket_limit(&ep->ep, 512);
+
+		if (i == 0) {
+			ep->ep.caps.type_control = true;
+		} else {
+			ep->ep.caps.type_iso = true;
+			ep->ep.caps.type_bulk = true;
+			ep->ep.caps.type_int = true;
+		}
+		ep->ep.caps.dir_in = true;
+		ep->ep.caps.dir_out = true;
 	}
 	usb_ep_set_maxpacket_limit(&r8a66597->ep[0].ep, 64);
 	r8a66597->ep[0].pipenum = 0;
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
index 85a712a..e9def42 100644
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ b/drivers/usb/gadget/udc/s3c-hsudc.c
@@ -1005,6 +1005,21 @@
 	hsep->stopped = 0;
 	hsep->wedge = 0;
 
+	if (epnum == 0) {
+		hsep->ep.caps.type_control = true;
+		hsep->ep.caps.dir_in = true;
+		hsep->ep.caps.dir_out = true;
+	} else {
+		hsep->ep.caps.type_iso = true;
+		hsep->ep.caps.type_bulk = true;
+		hsep->ep.caps.type_int = true;
+	}
+
+	if (epnum & 1)
+		hsep->ep.caps.dir_in = true;
+	else
+		hsep->ep.caps.dir_out = true;
+
 	set_index(hsudc, epnum);
 	writel(hsep->ep.maxpacket, hsudc->regs + S3C_MPR);
 }
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
index 5d9aa81..eb3571e 100644
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ b/drivers/usb/gadget/udc/s3c2410_udc.c
@@ -1691,6 +1691,8 @@
 			.name		= ep0name,
 			.ops		= &s3c2410_ep_ops,
 			.maxpacket	= EP0_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
+						USB_EP_CAPS_DIR_ALL),
 		},
 		.dev		= &memory,
 	},
@@ -1702,6 +1704,8 @@
 			.name		= "ep1-bulk",
 			.ops		= &s3c2410_ep_ops,
 			.maxpacket	= EP_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+						USB_EP_CAPS_DIR_ALL),
 		},
 		.dev		= &memory,
 		.fifo_size	= EP_FIFO_SIZE,
@@ -1714,6 +1718,8 @@
 			.name		= "ep2-bulk",
 			.ops		= &s3c2410_ep_ops,
 			.maxpacket	= EP_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+						USB_EP_CAPS_DIR_ALL),
 		},
 		.dev		= &memory,
 		.fifo_size	= EP_FIFO_SIZE,
@@ -1726,6 +1732,8 @@
 			.name		= "ep3-bulk",
 			.ops		= &s3c2410_ep_ops,
 			.maxpacket	= EP_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+						USB_EP_CAPS_DIR_ALL),
 		},
 		.dev		= &memory,
 		.fifo_size	= EP_FIFO_SIZE,
@@ -1738,6 +1746,8 @@
 			.name		= "ep4-bulk",
 			.ops		= &s3c2410_ep_ops,
 			.maxpacket	= EP_FIFO_SIZE,
+			.caps		= USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+						USB_EP_CAPS_DIR_ALL),
 		},
 		.dev		= &memory,
 		.fifo_size	= EP_FIFO_SIZE,
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index 89ed5e7..f660afb 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -131,6 +131,96 @@
 
 /* ------------------------------------------------------------------------- */
 
+/**
+ * gadget_find_ep_by_name - returns ep whose name is the same as sting passed
+ *	in second parameter or NULL if searched endpoint not found
+ * @g: controller to check for quirk
+ * @name: name of searched endpoint
+ */
+struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g, const char *name)
+{
+	struct usb_ep *ep;
+
+	gadget_for_each_ep(ep, g) {
+		if (!strcmp(ep->name, name))
+			return ep;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(gadget_find_ep_by_name);
+
+/* ------------------------------------------------------------------------- */
+
+int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
+		struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
+		struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+	u8		type;
+	u16		max;
+	int		num_req_streams = 0;
+
+	/* endpoint already claimed? */
+	if (ep->claimed)
+		return 0;
+
+	type = usb_endpoint_type(desc);
+	max = 0x7ff & usb_endpoint_maxp(desc);
+
+	if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
+		return 0;
+	if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out)
+		return 0;
+
+	if (max > ep->maxpacket_limit)
+		return 0;
+
+	/* "high bandwidth" works only at high speed */
+	if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11))
+		return 0;
+
+	switch (type) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		/* only support ep0 for portable CONTROL traffic */
+		return 0;
+	case USB_ENDPOINT_XFER_ISOC:
+		if (!ep->caps.type_iso)
+			return 0;
+		/* ISO:  limit 1023 bytes full speed, 1024 high/super speed */
+		if (!gadget_is_dualspeed(gadget) && max > 1023)
+			return 0;
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		if (!ep->caps.type_bulk)
+			return 0;
+		if (ep_comp && gadget_is_superspeed(gadget)) {
+			/* Get the number of required streams from the
+			 * EP companion descriptor and see if the EP
+			 * matches it
+			 */
+			num_req_streams = ep_comp->bmAttributes & 0x1f;
+			if (num_req_streams > ep->max_streams)
+				return 0;
+		}
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		/* Bulk endpoints handle interrupt transfers,
+		 * except the toggle-quirky iso-synch kind
+		 */
+		if (!ep->caps.type_int && !ep->caps.type_bulk)
+			return 0;
+		/* INT:  limit 64 bytes full speed, 1024 high/super speed */
+		if (!gadget_is_dualspeed(gadget) && max > 64)
+			return 0;
+		break;
+	}
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc);
+
+/* ------------------------------------------------------------------------- */
+
 static void usb_gadget_state_work(struct work_struct *work)
 {
 	struct usb_gadget *gadget = work_to_gadget(work);
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 1f24274..1cbb0ac 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1317,12 +1317,21 @@
 			snprintf(ep->name, EPNAME_SIZE, "ep%d", ep_number);
 			ep->ep_usb.name = ep->name;
 			ep->ep_usb.ops = &xusb_ep_ops;
+
+			ep->ep_usb.caps.type_iso = true;
+			ep->ep_usb.caps.type_bulk = true;
+			ep->ep_usb.caps.type_int = true;
 		} else {
 			ep->ep_usb.name = ep0name;
 			usb_ep_set_maxpacket_limit(&ep->ep_usb, EP0_MAX_PACKET);
 			ep->ep_usb.ops = &xusb_ep0_ops;
+
+			ep->ep_usb.caps.type_control = true;
 		}
 
+		ep->ep_usb.caps.dir_in = true;
+		ep->ep_usb.caps.dir_out = true;
+
 		ep->udc = udc;
 		ep->epnumber = ep_number;
 		ep->desc = NULL;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 8afc3c1..079991e 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -32,7 +32,14 @@
        default y
 
 config USB_XHCI_PLATFORM
-	tristate
+	tristate "Generic xHCI driver for a platform device"
+	---help---
+	  Adds an xHCI host driver for a generic platform device, which
+	  provides a memory space and an irq.
+	  It is also a prerequisite for platform specific drivers that
+	  implement some extra quirks.
+
+	  If unsure, say N.
 
 config USB_XHCI_MVEBU
 	tristate "xHCI support for Marvell Armada 375/38x"
@@ -441,10 +448,10 @@
 	  PXA27x/PXA3xx chips.
 
 config USB_OHCI_HCD_AT91
-        tristate "Support for Atmel on-chip OHCI USB controller"
-        depends on USB_OHCI_HCD && ARCH_AT91
-        default y
-        ---help---
+	tristate "Support for Atmel on-chip OHCI USB controller"
+	depends on USB_OHCI_HCD && ARCH_AT91 && OF
+	default y
+	---help---
           Enables support for the on-chip OHCI controller on
           Atmel chips.
 
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 526cfab..5398e3d 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -2,7 +2,8 @@
  * Broadcom specific Advanced Microcontroller Bus
  * Broadcom USB-core driver (BCMA bus glue)
  *
- * Copyright 2011-2012 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2011-2015 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright 2015 Felix Fietkau <nbd@openwrt.org>
  *
  * Based on ssb-ohci driver
  * Copyright 2007 Michael Buesch <m@bues.ch>
@@ -23,6 +24,8 @@
 #include <linux/platform_device.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
 #include <linux/usb/ehci_pdriver.h>
 #include <linux/usb/ohci_pdriver.h>
 
@@ -88,7 +91,7 @@
 }
 
 /* based on arch/mips/brcm-boards/bcm947xx/pcibios.c */
-static void bcma_hcd_init_chip(struct bcma_device *dev)
+static void bcma_hcd_init_chip_mips(struct bcma_device *dev)
 {
 	u32 tmp;
 
@@ -159,6 +162,87 @@
 	}
 }
 
+static void bcma_hcd_init_chip_arm_phy(struct bcma_device *dev)
+{
+	struct bcma_device *arm_core;
+	void __iomem *dmu;
+
+	arm_core = bcma_find_core(dev->bus, BCMA_CORE_ARMCA9);
+	if (!arm_core) {
+		dev_err(&dev->dev, "can not find ARM Cortex A9 ihost core\n");
+		return;
+	}
+
+	dmu = ioremap_nocache(arm_core->addr_s[0], 0x1000);
+	if (!dmu) {
+		dev_err(&dev->dev, "can not map ARM Cortex A9 ihost core\n");
+		return;
+	}
+
+	/* Unlock DMU PLL settings */
+	iowrite32(0x0000ea68, dmu + 0x180);
+
+	/* Write USB 2.0 PLL control setting */
+	iowrite32(0x00dd10c3, dmu + 0x164);
+
+	/* Lock DMU PLL settings */
+	iowrite32(0x00000000, dmu + 0x180);
+
+	iounmap(dmu);
+}
+
+static void bcma_hcd_init_chip_arm_hc(struct bcma_device *dev)
+{
+	u32 val;
+
+	/*
+	 * Delay after PHY initialized to ensure HC is ready to be configured
+	 */
+	usleep_range(1000, 2000);
+
+	/* Set packet buffer OUT threshold */
+	val = bcma_read32(dev, 0x94);
+	val &= 0xffff;
+	val |= 0x80 << 16;
+	bcma_write32(dev, 0x94, val);
+
+	/* Enable break memory transfer */
+	val = bcma_read32(dev, 0x9c);
+	val |= 1;
+	bcma_write32(dev, 0x9c, val);
+}
+
+static void bcma_hcd_init_chip_arm(struct bcma_device *dev)
+{
+	bcma_core_enable(dev, 0);
+
+	if (dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM4707 ||
+	    dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM53018) {
+		if (dev->bus->chipinfo.pkg == BCMA_PKG_ID_BCM4707 ||
+		    dev->bus->chipinfo.pkg == BCMA_PKG_ID_BCM4708)
+			bcma_hcd_init_chip_arm_phy(dev);
+
+		bcma_hcd_init_chip_arm_hc(dev);
+	}
+}
+
+static void bcma_hci_platform_power_gpio(struct bcma_device *dev, bool val)
+{
+	int gpio;
+
+	gpio = of_get_named_gpio(dev->dev.of_node, "vcc-gpio", 0);
+	if (!gpio_is_valid(gpio))
+		return;
+
+	if (val) {
+		gpio_request(gpio, "bcma-hcd-gpio");
+		gpio_set_value(gpio, 1);
+	} else {
+		gpio_set_value(gpio, 0);
+		gpio_free(gpio);
+	}
+}
+
 static const struct usb_ehci_pdata ehci_pdata = {
 };
 
@@ -169,7 +253,7 @@
 {
 	struct platform_device *hci_dev;
 	struct resource hci_res[2];
-	int ret = -ENOMEM;
+	int ret;
 
 	memset(hci_res, 0, sizeof(hci_res));
 
@@ -183,7 +267,7 @@
 	hci_dev = platform_device_alloc(ohci ? "ohci-platform" :
 					"ehci-platform" , 0);
 	if (!hci_dev)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	hci_dev->dev.parent = &dev->dev;
 	hci_dev->dev.dma_mask = &hci_dev->dev.coherent_dma_mask;
@@ -214,39 +298,45 @@
 static int bcma_hcd_probe(struct bcma_device *dev)
 {
 	int err;
-	u16 chipid_top;
 	u32 ohci_addr;
 	struct bcma_hcd_device *usb_dev;
 	struct bcma_chipinfo *chipinfo;
 
 	chipinfo = &dev->bus->chipinfo;
-	/* USBcores are only connected on embedded devices. */
-	chipid_top = (chipinfo->id & 0xFF00);
-	if (chipid_top != 0x4700 && chipid_top != 0x5300)
-		return -ENODEV;
 
 	/* TODO: Probably need checks here; is the core connected? */
 
 	if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
 		return -EOPNOTSUPP;
 
-	usb_dev = kzalloc(sizeof(struct bcma_hcd_device), GFP_KERNEL);
+	usb_dev = devm_kzalloc(&dev->dev, sizeof(struct bcma_hcd_device),
+			       GFP_KERNEL);
 	if (!usb_dev)
 		return -ENOMEM;
 
-	bcma_hcd_init_chip(dev);
+	bcma_hci_platform_power_gpio(dev, true);
+
+	switch (dev->id.id) {
+	case BCMA_CORE_NS_USB20:
+		bcma_hcd_init_chip_arm(dev);
+		break;
+	case BCMA_CORE_USB20_HOST:
+		bcma_hcd_init_chip_mips(dev);
+		break;
+	default:
+		return -ENODEV;
+	}
 
 	/* In AI chips EHCI is addrspace 0, OHCI is 1 */
 	ohci_addr = dev->addr_s[0];
-	if ((chipinfo->id == 0x5357 || chipinfo->id == 0x4749)
+	if ((chipinfo->id == BCMA_CHIP_ID_BCM5357 ||
+	     chipinfo->id == BCMA_CHIP_ID_BCM4749)
 	    && chipinfo->rev == 0)
 		ohci_addr = 0x18009000;
 
 	usb_dev->ohci_dev = bcma_hcd_create_pdev(dev, true, ohci_addr);
-	if (IS_ERR(usb_dev->ohci_dev)) {
-		err = PTR_ERR(usb_dev->ohci_dev);
-		goto err_free_usb_dev;
-	}
+	if (IS_ERR(usb_dev->ohci_dev))
+		return PTR_ERR(usb_dev->ohci_dev);
 
 	usb_dev->ehci_dev = bcma_hcd_create_pdev(dev, false, dev->addr);
 	if (IS_ERR(usb_dev->ehci_dev)) {
@@ -259,8 +349,6 @@
 
 err_unregister_ohci_dev:
 	platform_device_unregister(usb_dev->ohci_dev);
-err_free_usb_dev:
-	kfree(usb_dev);
 	return err;
 }
 
@@ -280,6 +368,7 @@
 
 static void bcma_hcd_shutdown(struct bcma_device *dev)
 {
+	bcma_hci_platform_power_gpio(dev, false);
 	bcma_core_disable(dev, 0);
 }
 
@@ -287,6 +376,7 @@
 
 static int bcma_hcd_suspend(struct bcma_device *dev)
 {
+	bcma_hci_platform_power_gpio(dev, false);
 	bcma_core_disable(dev, 0);
 
 	return 0;
@@ -294,6 +384,7 @@
 
 static int bcma_hcd_resume(struct bcma_device *dev)
 {
+	bcma_hci_platform_power_gpio(dev, true);
 	bcma_core_enable(dev, 0);
 
 	return 0;
@@ -306,6 +397,7 @@
 
 static const struct bcma_device_id bcma_hcd_table[] = {
 	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_USB20_HOST, BCMA_ANY_REV, BCMA_ANY_CLASS),
+	BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_USB20, BCMA_ANY_REV, BCMA_ANY_CLASS),
 	{},
 };
 MODULE_DEVICE_TABLE(bcma, bcma_hcd_table);
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 5352e74..3b6eb21 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -127,7 +127,18 @@
 
 	/* Enable USB controller, 83xx or 8536 */
 	if (pdata->have_sysif_regs && pdata->controller_ver < FSL_USB_VER_1_6)
-		setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
+		clrsetbits_be32(hcd->regs + FSL_SOC_USB_CTRL,
+				CONTROL_REGISTER_W1C_MASK, 0x4);
+
+	/*
+	 * Enable UTMI phy and program PTS field in UTMI mode before asserting
+	 * controller reset for USB Controller version 2.5
+	 */
+	if (pdata->has_fsl_erratum_a007792) {
+		clrsetbits_be32(hcd->regs + FSL_SOC_USB_CTRL,
+				CONTROL_REGISTER_W1C_MASK, CTRL_UTMI_PHY_EN);
+		writel(PORT_PTS_UTMI, hcd->regs + FSL_SOC_USB_PORTSC1);
+	}
 
 	/* Don't need to set host mode here. It will be done by tdi_reset() */
 
@@ -191,9 +202,11 @@
 	case FSL_USB2_PHY_ULPI:
 		if (pdata->have_sysif_regs && pdata->controller_ver) {
 			/* controller version 1.6 or above */
-			clrbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
-			setbits32(non_ehci + FSL_SOC_USB_CTRL,
-				ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
+			clrbits32(non_ehci + FSL_SOC_USB_CTRL,
+				  CONTROL_REGISTER_W1C_MASK | UTMI_PHY_EN);
+			clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
+					CONTROL_REGISTER_W1C_MASK,
+					ULPI_PHY_CLK_SEL | USB_CTRL_USB_EN);
 		}
 		portsc |= PORT_PTS_ULPI;
 		break;
@@ -204,30 +217,33 @@
 		portsc |= PORT_PTS_PTW;
 		/* fall through */
 	case FSL_USB2_PHY_UTMI:
+	case FSL_USB2_PHY_UTMI_DUAL:
 		if (pdata->have_sysif_regs && pdata->controller_ver) {
 			/* controller version 1.6 or above */
-			setbits32(non_ehci + FSL_SOC_USB_CTRL, UTMI_PHY_EN);
+			clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
+					CONTROL_REGISTER_W1C_MASK, UTMI_PHY_EN);
 			mdelay(FSL_UTMI_PHY_DLY);  /* Delay for UTMI PHY CLK to
 						become stable - 10ms*/
 		}
 		/* enable UTMI PHY */
 		if (pdata->have_sysif_regs)
-			setbits32(non_ehci + FSL_SOC_USB_CTRL,
-				  CTRL_UTMI_PHY_EN);
+			clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
+					CONTROL_REGISTER_W1C_MASK,
+					CTRL_UTMI_PHY_EN);
 		portsc |= PORT_PTS_UTMI;
 		break;
 	case FSL_USB2_PHY_NONE:
 		break;
 	}
 
-	if (pdata->have_sysif_regs &&
-	    pdata->controller_ver > FSL_USB_VER_1_6 &&
-	    (phy_mode == FSL_USB2_PHY_ULPI)) {
-		/* check PHY_CLK_VALID to get phy clk valid */
-		if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) &
-				PHY_CLK_VALID, FSL_USB_PHY_CLK_TIMEOUT, 0) ||
-				in_be32(non_ehci + FSL_SOC_USB_PRICTRL))) {
-			dev_warn(hcd->self.controller, "USB PHY clock invalid\n");
+	/*
+	 * check PHY_CLK_VALID to determine phy clock presence before writing
+	 * to portsc
+	 */
+	if (pdata->check_phy_clk_valid) {
+		if (!(in_be32(non_ehci + FSL_SOC_USB_CTRL) & PHY_CLK_VALID)) {
+			dev_warn(hcd->self.controller,
+				 "USB PHY clock invalid\n");
 			return -EINVAL;
 		}
 	}
@@ -235,7 +251,8 @@
 	ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
 
 	if (phy_mode != FSL_USB2_PHY_ULPI && pdata->have_sysif_regs)
-		setbits32(non_ehci + FSL_SOC_USB_CTRL, USB_CTRL_USB_EN);
+		clrsetbits_be32(non_ehci + FSL_SOC_USB_CTRL,
+				CONTROL_REGISTER_W1C_MASK, USB_CTRL_USB_EN);
 
 	return 0;
 }
@@ -261,6 +278,10 @@
 		out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB);
 	}
 
+	/* Deal with USB erratum A-005275 */
+	if (pdata->has_fsl_erratum_a005275 == 1)
+		ehci->has_fsl_hs_errata = 1;
+
 	if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
 			(pdata->operating_mode == FSL_USB2_DR_OTG))
 		if (ehci_fsl_setup_phy(hcd, pdata->phy_mode, 0))
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index dbd292e..1a8a60a 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -52,6 +52,7 @@
 #define SNOOP_SIZE_2GB		0x1e
 
 /* control Register Bit Masks */
+#define CONTROL_REGISTER_W1C_MASK       0x00020000  /* W1C: PHY_CLK_VALID */
 #define ULPI_INT_EN             (1<<0)
 #define WU_INT_EN               (1<<1)
 #define USB_CTRL_USB_EN         (1<<2)
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 22abb68..086a711 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -1221,6 +1221,13 @@
 				 */
 				ehci->reset_done [wIndex] = jiffies
 						+ msecs_to_jiffies (50);
+
+				/*
+				 * Force full-speed connect for FSL high-speed
+				 * erratum; disable HS Chirp by setting PFSC bit
+				 */
+				if (ehci_has_fsl_hs_errata(ehci))
+					temp |= (1 << PORTSC_FSL_PFSC);
 			}
 			ehci_writel(ehci, temp, status_reg);
 			break;
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 2593def..5c3c085 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -45,6 +45,7 @@
 	struct reset_control *rst;
 	struct phy **phys;
 	int num_phys;
+	bool reset_on_resume;
 };
 
 static const char hcd_name[] = "ehci-platform";
@@ -56,7 +57,6 @@
 	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
 	int retval;
 
-	hcd->has_tt = pdata->has_tt;
 	ehci->has_synopsys_hc_bug = pdata->has_synopsys_hc_bug;
 
 	if (pdata->pre_setup) {
@@ -193,11 +193,11 @@
 
 		if (of_property_read_bool(dev->dev.of_node,
 					  "needs-reset-on-resume"))
-			pdata->reset_on_resume = 1;
+			priv->reset_on_resume = true;
 
 		if (of_property_read_bool(dev->dev.of_node,
 					  "has-transaction-translator"))
-			pdata->has_tt = 1;
+			hcd->has_tt = 1;
 
 		priv->num_phys = of_count_phandle_with_args(dev->dev.of_node,
 				"phys", "#phy-cells");
@@ -247,6 +247,10 @@
 		ehci->big_endian_desc = 1;
 	if (pdata->big_endian_mmio)
 		ehci->big_endian_mmio = 1;
+	if (pdata->has_tt)
+		hcd->has_tt = 1;
+	if (pdata->reset_on_resume)
+		priv->reset_on_resume = true;
 
 #ifndef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
 	if (ehci->big_endian_mmio) {
@@ -359,6 +363,7 @@
 	struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
 	struct platform_device *pdev =
 		container_of(dev, struct platform_device, dev);
+	struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
 
 	if (pdata->power_on) {
 		int err = pdata->power_on(pdev);
@@ -366,7 +371,7 @@
 			return err;
 	}
 
-	ehci_resume(hcd, pdata->reset_on_resume);
+	ehci_resume(hcd, priv->reset_on_resume);
 	return 0;
 }
 #endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index 7e4bd39..b7c5cfa 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -54,7 +54,6 @@
 	struct platform_device *pdev = to_platform_device(hcd->self.controller);
 	struct usb_ehci_pdata *pdata = dev_get_platdata(&pdev->dev);
 	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
-	int retval;
 	u32 threshold;
 
 	/* Set EHCI packet buffer IN/OUT threshold to 128 bytes */
@@ -62,11 +61,7 @@
 	writel(threshold, hcd->regs + AHB2STBUS_INSREG01);
 
 	ehci->caps = hcd->regs + pdata->caps_offset;
-	retval = ehci_setup(hcd);
-	if (retval)
-		return retval;
-
-	return 0;
+	return ehci_setup(hcd);
 }
 
 static int st_ehci_platform_power_on(struct platform_device *dev)
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
index 5e44407..5216f2b 100644
--- a/drivers/usb/host/ehci-sysfs.c
+++ b/drivers/usb/host/ehci-sysfs.c
@@ -29,7 +29,7 @@
 	int			count = PAGE_SIZE;
 	char			*ptr = buf;
 
-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+	ehci = hcd_to_ehci(dev_get_drvdata(dev));
 	nports = HCS_N_PORTS(ehci->hcs_params);
 
 	for (index = 0; index < nports; ++index) {
@@ -54,7 +54,7 @@
 	struct ehci_hcd		*ehci;
 	int			portnum, new_owner;
 
-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+	ehci = hcd_to_ehci(dev_get_drvdata(dev));
 	new_owner = PORT_OWNER;		/* Owned by companion */
 	if (sscanf(buf, "%d", &portnum) != 1)
 		return -EINVAL;
@@ -85,7 +85,7 @@
 	struct ehci_hcd		*ehci;
 	int			n;
 
-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+	ehci = hcd_to_ehci(dev_get_drvdata(dev));
 	n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
 	return n;
 }
@@ -101,7 +101,7 @@
 	unsigned long		flags;
 	ssize_t			ret;
 
-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+	ehci = hcd_to_ehci(dev_get_drvdata(dev));
 	if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
 		return -EINVAL;
 
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index f700157..46f62e4 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -215,6 +215,7 @@
 	/* SILICON QUIRKS */
 	unsigned		no_selective_suspend:1;
 	unsigned		has_fsl_port_bug:1; /* FreeScale */
+	unsigned		has_fsl_hs_errata:1;	/* Freescale HS quirk */
 	unsigned		big_endian_mmio:1;
 	unsigned		big_endian_desc:1;
 	unsigned		big_endian_capbase:1;
@@ -686,6 +687,17 @@
 #define	ehci_has_fsl_portno_bug(e)		(0)
 #endif
 
+#define PORTSC_FSL_PFSC	24	/* Port Force Full-Speed Connect */
+
+#if defined(CONFIG_PPC_85xx)
+/* Some Freescale processors have an erratum (USB A-005275) in which
+ * incoming packets get corrupted in HS mode
+ */
+#define ehci_has_fsl_hs_errata(e)	((e)->has_fsl_hs_errata)
+#else
+#define ehci_has_fsl_hs_errata(e)	(0)
+#endif
+
 /*
  * While most USB host controllers implement their registers in
  * little-endian format, a minority (celleb companion chip) implement
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 5e0d600..534c4c5 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -69,6 +69,8 @@
 		return FSL_USB2_PHY_UTMI;
 	if (!strcasecmp(phy_type, "utmi_wide"))
 		return FSL_USB2_PHY_UTMI_WIDE;
+	if (!strcasecmp(phy_type, "utmi_dual"))
+		return FSL_USB2_PHY_UTMI_DUAL;
 	if (!strcasecmp(phy_type, "serial"))
 		return FSL_USB2_PHY_SERIAL;
 
@@ -119,9 +121,9 @@
 
 static const struct of_device_id fsl_usb2_mph_dr_of_match[];
 
-static int usb_get_ver_info(struct device_node *np)
+static enum fsl_usb2_controller_ver usb_get_ver_info(struct device_node *np)
 {
-	int ver = -1;
+	enum fsl_usb2_controller_ver ver = FSL_USB_VER_NONE;
 
 	/*
 	 * returns 1 for usb controller version 1.6
@@ -142,7 +144,7 @@
 		else /* for previous controller versions */
 			ver = FSL_USB_VER_OLD;
 
-		if (ver > -1)
+		if (ver > FSL_USB_VER_NONE)
 			return ver;
 	}
 
@@ -214,8 +216,27 @@
 	pdata->phy_mode = determine_usb_phy(prop);
 	pdata->controller_ver = usb_get_ver_info(np);
 
+	/* Activate Erratum by reading property in device tree */
+	if (of_get_property(np, "fsl,usb-erratum-a007792", NULL))
+		pdata->has_fsl_erratum_a007792 = 1;
+	else
+		pdata->has_fsl_erratum_a007792 = 0;
+	if (of_get_property(np, "fsl,usb-erratum-a005275", NULL))
+		pdata->has_fsl_erratum_a005275 = 1;
+	else
+		pdata->has_fsl_erratum_a005275 = 0;
+
+	/*
+	 * Determine whether phy_clk_valid needs to be checked
+	 * by reading property in device tree
+	 */
+	if (of_get_property(np, "phy-clk-valid", NULL))
+		pdata->check_phy_clk_valid = 1;
+	else
+		pdata->check_phy_clk_valid = 0;
+
 	if (pdata->have_sysif_regs) {
-		if (pdata->controller_ver < 0) {
+		if (pdata->controller_ver == FSL_USB_VER_NONE) {
 			dev_warn(&ofdev->dev, "Could not get controller version\n");
 			return -ENODEV;
 		}
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 15df00c..342ffd1 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -36,6 +36,17 @@
 #define hcd_to_ohci_at91_priv(h) \
 	((struct ohci_at91_priv *)hcd_to_ohci(h)->priv)
 
+#define AT91_MAX_USBH_PORTS	3
+struct at91_usbh_data {
+	int vbus_pin[AT91_MAX_USBH_PORTS];	/* port power-control pin */
+	int overcurrent_pin[AT91_MAX_USBH_PORTS];
+	u8 ports;				/* number of ports on root hub */
+	u8 overcurrent_supported;
+	u8 vbus_pin_active_low[AT91_MAX_USBH_PORTS];
+	u8 overcurrent_status[AT91_MAX_USBH_PORTS];
+	u8 overcurrent_changed[AT91_MAX_USBH_PORTS];
+};
+
 struct ohci_at91_priv {
 	struct clk *iclk;
 	struct clk *fclk;
@@ -431,7 +442,6 @@
 	return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_OF
 static const struct of_device_id at91_ohci_dt_ids[] = {
 	{ .compatible = "atmel,at91rm9200-ohci" },
 	{ /* sentinel */ }
@@ -439,16 +449,17 @@
 
 MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
 
-static int ohci_at91_of_init(struct platform_device *pdev)
+/*-------------------------------------------------------------------------*/
+
+static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
-	int i, gpio, ret;
-	enum of_gpio_flags flags;
 	struct at91_usbh_data	*pdata;
-	u32 ports;
-
-	if (!np)
-		return 0;
+	int			i;
+	int			gpio;
+	int			ret;
+	enum of_gpio_flags	flags;
+	u32			ports;
 
 	/* Right now device-tree probed devices don't get dma_mask set.
 	 * Since shared usb code relies on it, set it here for now.
@@ -466,111 +477,83 @@
 		pdata->ports = ports;
 
 	at91_for_each_port(i) {
-		gpio = of_get_named_gpio_flags(np, "atmel,vbus-gpio", i, &flags);
+		/*
+		 * do not configure PIO if not in relation with
+		 * real USB port on board
+		 */
+		if (i >= pdata->ports) {
+			pdata->vbus_pin[i] = -EINVAL;
+			continue;
+		}
+
+		gpio = of_get_named_gpio_flags(np, "atmel,vbus-gpio", i,
+					       &flags);
 		pdata->vbus_pin[i] = gpio;
 		if (!gpio_is_valid(gpio))
 			continue;
 		pdata->vbus_pin_active_low[i] = flags & OF_GPIO_ACTIVE_LOW;
+
+		ret = gpio_request(gpio, "ohci_vbus");
+		if (ret) {
+			dev_err(&pdev->dev,
+				"can't request vbus gpio %d\n", gpio);
+			continue;
+		}
+		ret = gpio_direction_output(gpio,
+					!pdata->vbus_pin_active_low[i]);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"can't put vbus gpio %d as output %d\n",
+				gpio, !pdata->vbus_pin_active_low[i]);
+			gpio_free(gpio);
+			continue;
+		}
+
+		ohci_at91_usb_set_power(pdata, i, 1);
 	}
 
-	at91_for_each_port(i)
+	at91_for_each_port(i) {
+		if (i >= pdata->ports) {
+			pdata->overcurrent_pin[i] = -EINVAL;
+			continue;
+		}
+
 		pdata->overcurrent_pin[i] =
 			of_get_named_gpio_flags(np, "atmel,oc-gpio", i, &flags);
 
-	pdev->dev.platform_data = pdata;
+		if (!gpio_is_valid(pdata->overcurrent_pin[i]))
+			continue;
+		gpio = pdata->overcurrent_pin[i];
 
-	return 0;
-}
-#else
-static int ohci_at91_of_init(struct platform_device *pdev)
-{
-	return 0;
-}
-#endif
-
-/*-------------------------------------------------------------------------*/
-
-static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
-{
-	struct at91_usbh_data	*pdata;
-	int			i;
-	int			gpio;
-	int			ret;
-
-	ret = ohci_at91_of_init(pdev);
-	if (ret)
-		return ret;
-
-	pdata = dev_get_platdata(&pdev->dev);
-
-	if (pdata) {
-		at91_for_each_port(i) {
-			/*
-			 * do not configure PIO if not in relation with
-			 * real USB port on board
-			 */
-			if (i >= pdata->ports) {
-				pdata->vbus_pin[i] = -EINVAL;
-				pdata->overcurrent_pin[i] = -EINVAL;
-				break;
-			}
-
-			if (!gpio_is_valid(pdata->vbus_pin[i]))
-				continue;
-			gpio = pdata->vbus_pin[i];
-
-			ret = gpio_request(gpio, "ohci_vbus");
-			if (ret) {
-				dev_err(&pdev->dev,
-					"can't request vbus gpio %d\n", gpio);
-				continue;
-			}
-			ret = gpio_direction_output(gpio,
-						!pdata->vbus_pin_active_low[i]);
-			if (ret) {
-				dev_err(&pdev->dev,
-					"can't put vbus gpio %d as output %d\n",
-					gpio, !pdata->vbus_pin_active_low[i]);
-				gpio_free(gpio);
-				continue;
-			}
-
-			ohci_at91_usb_set_power(pdata, i, 1);
+		ret = gpio_request(gpio, "ohci_overcurrent");
+		if (ret) {
+			dev_err(&pdev->dev,
+				"can't request overcurrent gpio %d\n",
+				gpio);
+			continue;
 		}
 
-		at91_for_each_port(i) {
-			if (!gpio_is_valid(pdata->overcurrent_pin[i]))
-				continue;
-			gpio = pdata->overcurrent_pin[i];
+		ret = gpio_direction_input(gpio);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"can't configure overcurrent gpio %d as input\n",
+				gpio);
+			gpio_free(gpio);
+			continue;
+		}
 
-			ret = gpio_request(gpio, "ohci_overcurrent");
-			if (ret) {
-				dev_err(&pdev->dev,
-					"can't request overcurrent gpio %d\n",
-					gpio);
-				continue;
-			}
-
-			ret = gpio_direction_input(gpio);
-			if (ret) {
-				dev_err(&pdev->dev,
-					"can't configure overcurrent gpio %d as input\n",
-					gpio);
-				gpio_free(gpio);
-				continue;
-			}
-
-			ret = request_irq(gpio_to_irq(gpio),
-					  ohci_hcd_at91_overcurrent_irq,
-					  IRQF_SHARED, "ohci_overcurrent", pdev);
-			if (ret) {
-				gpio_free(gpio);
-				dev_err(&pdev->dev,
-					"can't get gpio IRQ for overcurrent\n");
-			}
+		ret = request_irq(gpio_to_irq(gpio),
+				  ohci_hcd_at91_overcurrent_irq,
+				  IRQF_SHARED, "ohci_overcurrent", pdev);
+		if (ret) {
+			gpio_free(gpio);
+			dev_err(&pdev->dev,
+				"can't get gpio IRQ for overcurrent\n");
 		}
 	}
 
+	pdev->dev.platform_data = pdata;
+
 	device_init_wakeup(&pdev->dev, 1);
 	return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev);
 }
@@ -673,7 +656,7 @@
 	.driver		= {
 		.name	= "at91_ohci",
 		.pm	= &ohci_hcd_at91_pm_ops,
-		.of_match_table	= of_match_ptr(at91_ohci_dt_ids),
+		.of_match_table	= at91_ohci_dt_ids,
 	},
 };
 
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 6352f54..fe3bd1c 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -2670,7 +2670,6 @@
 static int oxu_reset(struct usb_hcd *hcd)
 {
 	struct oxu_hcd *oxu = hcd_to_oxu(hcd);
-	int ret;
 
 	spin_lock_init(&oxu->mem_lock);
 	INIT_LIST_HEAD(&oxu->urb_list);
@@ -2696,11 +2695,7 @@
 	oxu->hcs_params = readl(&oxu->caps->hcs_params);
 	oxu->sbrn = 0x20;
 
-	ret = oxu_hcd_init(hcd);
-	if (ret)
-		return ret;
-
-	return 0;
+	return oxu_hcd_init(hcd);
 }
 
 static int oxu_run(struct usb_hcd *hcd)
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index d516877..a67bd50 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -1542,11 +1542,8 @@
 		(fit ^ FIT) | u132->hc_fminterval);
 	if (retval)
 		return retval;
-	retval = u132_write_pcimem(u132, periodicstart,
-		((9 * fi) / 10) & 0x3fff);
-	if (retval)
-		return retval;
-	return 0;
+	return u132_write_pcimem(u132, periodicstart,
+	       ((9 * fi) / 10) & 0x3fff);
 }
 
 static char *hcfs2string(int state)
@@ -2701,28 +2698,18 @@
 	if (wIndex == 0 || wIndex > u132->num_ports) {
 		return -EINVAL;
 	} else {
-		int retval;
 		int port_index = wIndex - 1;
 		struct u132_port *port = &u132->port[port_index];
 		port->Status &= ~(1 << wValue);
 		switch (wValue) {
 		case USB_PORT_FEAT_SUSPEND:
-			retval = u132_write_pcimem(u132,
-				roothub.portstatus[port_index], RH_PS_PSS);
-			if (retval)
-				return retval;
-			return 0;
+			return u132_write_pcimem(u132,
+			       roothub.portstatus[port_index], RH_PS_PSS);
 		case USB_PORT_FEAT_POWER:
-			retval = u132_write_pcimem(u132,
-				roothub.portstatus[port_index], RH_PS_PPS);
-			if (retval)
-				return retval;
-			return 0;
+			return u132_write_pcimem(u132,
+			       roothub.portstatus[port_index], RH_PS_PPS);
 		case USB_PORT_FEAT_RESET:
-			retval = u132_roothub_portreset(u132, port_index);
-			if (retval)
-				return retval;
-			return 0;
+			return u132_roothub_portreset(u132, port_index);
 		default:
 			return -EPIPE;
 		}
@@ -2737,7 +2724,6 @@
 	} else {
 		int port_index = wIndex - 1;
 		u32 temp;
-		int retval;
 		struct u132_port *port = &u132->port[port_index];
 		port->Status &= ~(1 << wValue);
 		switch (wValue) {
@@ -2773,11 +2759,8 @@
 		default:
 			return -EPIPE;
 		}
-		retval = u132_write_pcimem(u132, roothub.portstatus[port_index],
-			 temp);
-		if (retval)
-			return retval;
-		return 0;
+		return u132_write_pcimem(u132, roothub.portstatus[port_index],
+		       temp);
 	}
 }
 
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 745717e..2d16fae 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -99,6 +99,10 @@
 	xhci_dbg(xhci, "HCC PARAMS 0x%x:\n", (unsigned int) temp);
 	xhci_dbg(xhci, "  HC generates %s bit addresses\n",
 			HCC_64BIT_ADDR(temp) ? "64" : "32");
+	xhci_dbg(xhci, "  HC %s Contiguous Frame ID Capability\n",
+			HCC_CFC(temp) ? "has" : "hasn't");
+	xhci_dbg(xhci, "  HC %s generate Stopped - Short Package event\n",
+			HCC_SPC(temp) ? "can" : "can't");
 	/* FIXME */
 	xhci_dbg(xhci, "  FIXME: more HCCPARAMS debugging\n");
 
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 32f4d56..a47a1e8 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1812,7 +1812,9 @@
 	if (skip)
 		goto td_cleanup;
 
-	if (trb_comp_code == COMP_STOP_INVAL || trb_comp_code == COMP_STOP) {
+	if (trb_comp_code == COMP_STOP_INVAL ||
+			trb_comp_code == COMP_STOP ||
+			trb_comp_code == COMP_STOP_SHORT) {
 		/* The Endpoint Stop Command completion will take care of any
 		 * stopped TDs.  A stopped TD may be restarted, so don't update
 		 * the ring dequeue pointer or take this TD off any lists yet.
@@ -1919,8 +1921,22 @@
 		else
 			*status = 0;
 		break;
-	case COMP_STOP_INVAL:
+	case COMP_STOP_SHORT:
+		if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
+			xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
+		else
+			td->urb->actual_length =
+				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+		return finish_td(xhci, td, event_trb, event, ep, status, false);
 	case COMP_STOP:
+		/* Did we stop at data stage? */
+		if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
+			td->urb->actual_length =
+				td->urb->transfer_buffer_length -
+				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+		/* fall through */
+	case COMP_STOP_INVAL:
 		return finish_td(xhci, td, event_trb, event, ep, status, false);
 	default:
 		if (!xhci_requires_manual_halt_cleanup(xhci,
@@ -2014,6 +2030,8 @@
 		}
 		if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
 			trb_comp_code = COMP_SHORT_TX;
+	/* fallthrough */
+	case COMP_STOP_SHORT:
 	case COMP_SHORT_TX:
 		frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
 				-EREMOTEIO : 0;
@@ -2049,6 +2067,10 @@
 	if (trb_comp_code == COMP_SUCCESS || skip_td) {
 		frame->actual_length = frame->length;
 		td->urb->actual_length += frame->length;
+	} else if (trb_comp_code == COMP_STOP_SHORT) {
+		frame->actual_length =
+			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+		td->urb->actual_length += frame->actual_length;
 	} else {
 		for (cur_trb = ep_ring->dequeue,
 		     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
@@ -2129,6 +2151,7 @@
 			*status = 0;
 		}
 		break;
+	case COMP_STOP_SHORT:
 	case COMP_SHORT_TX:
 		if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
 			*status = -EREMOTEIO;
@@ -2145,8 +2168,20 @@
 				td->urb->ep->desc.bEndpointAddress,
 				td->urb->transfer_buffer_length,
 				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+	/* Stopped - short packet completion */
+	if (trb_comp_code == COMP_STOP_SHORT) {
+		td->urb->actual_length =
+			EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+		if (td->urb->transfer_buffer_length <
+				td->urb->actual_length) {
+			xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
+				EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+			td->urb->actual_length = 0;
+			 /* status will be set by usb core for canceled urbs */
+		}
 	/* Fast path - was this the last TRB in the TD for this URB? */
-	if (event_trb == td->last_trb) {
+	} else if (event_trb == td->last_trb) {
 		if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
 			td->urb->actual_length =
 				td->urb->transfer_buffer_length -
@@ -2300,6 +2335,9 @@
 	case COMP_STOP_INVAL:
 		xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
 		break;
+	case COMP_STOP_SHORT:
+		xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
+		break;
 	case COMP_STALL:
 		xhci_dbg(xhci, "Stalled endpoint\n");
 		ep->ep_state |= EP_HALTED;
@@ -3041,9 +3079,11 @@
 	struct xhci_td *td;
 	struct scatterlist *sg;
 	int num_sgs;
-	int trb_buff_len, this_sg_len, running_total;
+	int trb_buff_len, this_sg_len, running_total, ret;
 	unsigned int total_packet_count;
+	bool zero_length_needed;
 	bool first_trb;
+	int last_trb_num;
 	u64 addr;
 	bool more_trbs_coming;
 
@@ -3059,13 +3099,27 @@
 	total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
 			usb_endpoint_maxp(&urb->ep->desc));
 
-	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
+	ret = prepare_transfer(xhci, xhci->devs[slot_id],
 			ep_index, urb->stream_id,
 			num_trbs, urb, 0, mem_flags);
-	if (trb_buff_len < 0)
-		return trb_buff_len;
+	if (ret < 0)
+		return ret;
 
 	urb_priv = urb->hcpriv;
+
+	/* Deal with URB_ZERO_PACKET - need one more td/trb */
+	zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
+		urb_priv->length == 2;
+	if (zero_length_needed) {
+		num_trbs++;
+		xhci_dbg(xhci, "Creating zero length td.\n");
+		ret = prepare_transfer(xhci, xhci->devs[slot_id],
+				ep_index, urb->stream_id,
+				1, urb, 1, mem_flags);
+		if (ret < 0)
+			return ret;
+	}
+
 	td = urb_priv->td[0];
 
 	/*
@@ -3095,6 +3149,7 @@
 		trb_buff_len = urb->transfer_buffer_length;
 
 	first_trb = true;
+	last_trb_num = zero_length_needed ? 2 : 1;
 	/* Queue the first TRB, even if it's zero-length */
 	do {
 		u32 field = 0;
@@ -3112,12 +3167,15 @@
 		/* Chain all the TRBs together; clear the chain bit in the last
 		 * TRB to indicate it's the last TRB in the chain.
 		 */
-		if (num_trbs > 1) {
+		if (num_trbs > last_trb_num) {
 			field |= TRB_CHAIN;
-		} else {
-			/* FIXME - add check for ZERO_PACKET flag before this */
+		} else if (num_trbs == last_trb_num) {
 			td->last_trb = ep_ring->enqueue;
 			field |= TRB_IOC;
+		} else if (zero_length_needed && num_trbs == 1) {
+			trb_buff_len = 0;
+			urb_priv->td[1]->last_trb = ep_ring->enqueue;
+			field |= TRB_IOC;
 		}
 
 		/* Only set interrupt on short packet for IN endpoints */
@@ -3179,7 +3237,7 @@
 		if (running_total + trb_buff_len > urb->transfer_buffer_length)
 			trb_buff_len =
 				urb->transfer_buffer_length - running_total;
-	} while (running_total < urb->transfer_buffer_length);
+	} while (num_trbs > 0);
 
 	check_trb_math(urb, num_trbs, running_total);
 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
@@ -3197,7 +3255,9 @@
 	int num_trbs;
 	struct xhci_generic_trb *start_trb;
 	bool first_trb;
+	int last_trb_num;
 	bool more_trbs_coming;
+	bool zero_length_needed;
 	int start_cycle;
 	u32 field, length_field;
 
@@ -3228,7 +3288,6 @@
 		num_trbs++;
 		running_total += TRB_MAX_BUFF_SIZE;
 	}
-	/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
 
 	ret = prepare_transfer(xhci, xhci->devs[slot_id],
 			ep_index, urb->stream_id,
@@ -3237,6 +3296,20 @@
 		return ret;
 
 	urb_priv = urb->hcpriv;
+
+	/* Deal with URB_ZERO_PACKET - need one more td/trb */
+	zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
+		urb_priv->length == 2;
+	if (zero_length_needed) {
+		num_trbs++;
+		xhci_dbg(xhci, "Creating zero length td.\n");
+		ret = prepare_transfer(xhci, xhci->devs[slot_id],
+				ep_index, urb->stream_id,
+				1, urb, 1, mem_flags);
+		if (ret < 0)
+			return ret;
+	}
+
 	td = urb_priv->td[0];
 
 	/*
@@ -3258,7 +3331,7 @@
 		trb_buff_len = urb->transfer_buffer_length;
 
 	first_trb = true;
-
+	last_trb_num = zero_length_needed ? 2 : 1;
 	/* Queue the first TRB, even if it's zero-length */
 	do {
 		u32 remainder = 0;
@@ -3275,12 +3348,15 @@
 		/* Chain all the TRBs together; clear the chain bit in the last
 		 * TRB to indicate it's the last TRB in the chain.
 		 */
-		if (num_trbs > 1) {
+		if (num_trbs > last_trb_num) {
 			field |= TRB_CHAIN;
-		} else {
-			/* FIXME - add check for ZERO_PACKET flag before this */
+		} else if (num_trbs == last_trb_num) {
 			td->last_trb = ep_ring->enqueue;
 			field |= TRB_IOC;
+		} else if (zero_length_needed && num_trbs == 1) {
+			trb_buff_len = 0;
+			urb_priv->td[1]->last_trb = ep_ring->enqueue;
+			field |= TRB_IOC;
 		}
 
 		/* Only set interrupt on short packet for IN endpoints */
@@ -3318,7 +3394,7 @@
 		trb_buff_len = urb->transfer_buffer_length - running_total;
 		if (trb_buff_len > TRB_MAX_BUFF_SIZE)
 			trb_buff_len = TRB_MAX_BUFF_SIZE;
-	} while (running_total < urb->transfer_buffer_length);
+	} while (num_trbs > 0);
 
 	check_trb_math(urb, num_trbs, running_total);
 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
@@ -3517,6 +3593,97 @@
 	}
 }
 
+/*
+ * Calculates Frame ID field of the isochronous TRB identifies the
+ * target frame that the Interval associated with this Isochronous
+ * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
+ *
+ * Returns actual frame id on success, negative value on error.
+ */
+static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
+		struct urb *urb, int index)
+{
+	int start_frame, ist, ret = 0;
+	int start_frame_id, end_frame_id, current_frame_id;
+
+	if (urb->dev->speed == USB_SPEED_LOW ||
+			urb->dev->speed == USB_SPEED_FULL)
+		start_frame = urb->start_frame + index * urb->interval;
+	else
+		start_frame = (urb->start_frame + index * urb->interval) >> 3;
+
+	/* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
+	 *
+	 * If bit [3] of IST is cleared to '0', software can add a TRB no
+	 * later than IST[2:0] Microframes before that TRB is scheduled to
+	 * be executed.
+	 * If bit [3] of IST is set to '1', software can add a TRB no later
+	 * than IST[2:0] Frames before that TRB is scheduled to be executed.
+	 */
+	ist = HCS_IST(xhci->hcs_params2) & 0x7;
+	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
+		ist <<= 3;
+
+	/* Software shall not schedule an Isoch TD with a Frame ID value that
+	 * is less than the Start Frame ID or greater than the End Frame ID,
+	 * where:
+	 *
+	 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
+	 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
+	 *
+	 * Both the End Frame ID and Start Frame ID values are calculated
+	 * in microframes. When software determines the valid Frame ID value;
+	 * The End Frame ID value should be rounded down to the nearest Frame
+	 * boundary, and the Start Frame ID value should be rounded up to the
+	 * nearest Frame boundary.
+	 */
+	current_frame_id = readl(&xhci->run_regs->microframe_index);
+	start_frame_id = roundup(current_frame_id + ist + 1, 8);
+	end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
+
+	start_frame &= 0x7ff;
+	start_frame_id = (start_frame_id >> 3) & 0x7ff;
+	end_frame_id = (end_frame_id >> 3) & 0x7ff;
+
+	xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
+		 __func__, index, readl(&xhci->run_regs->microframe_index),
+		 start_frame_id, end_frame_id, start_frame);
+
+	if (start_frame_id < end_frame_id) {
+		if (start_frame > end_frame_id ||
+				start_frame < start_frame_id)
+			ret = -EINVAL;
+	} else if (start_frame_id > end_frame_id) {
+		if ((start_frame > end_frame_id &&
+				start_frame < start_frame_id))
+			ret = -EINVAL;
+	} else {
+			ret = -EINVAL;
+	}
+
+	if (index == 0) {
+		if (ret == -EINVAL || start_frame == start_frame_id) {
+			start_frame = start_frame_id + 1;
+			if (urb->dev->speed == USB_SPEED_LOW ||
+					urb->dev->speed == USB_SPEED_FULL)
+				urb->start_frame = start_frame;
+			else
+				urb->start_frame = start_frame << 3;
+			ret = 0;
+		}
+	}
+
+	if (ret) {
+		xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
+				start_frame, current_frame_id, index,
+				start_frame_id, end_frame_id);
+		xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
+		return ret;
+	}
+
+	return start_frame;
+}
+
 /* This is for isoc transfer */
 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 		struct urb *urb, int slot_id, unsigned int ep_index)
@@ -3533,7 +3700,9 @@
 	u64 start_addr, addr;
 	int i, j;
 	bool more_trbs_coming;
+	struct xhci_virt_ep *xep;
 
+	xep = &xhci->devs[slot_id]->eps[ep_index];
 	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
 
 	num_tds = urb->number_of_packets;
@@ -3581,6 +3750,7 @@
 
 		td = urb_priv->td[i];
 		for (j = 0; j < trbs_per_td; j++) {
+			int frame_id = 0;
 			u32 remainder = 0;
 			field = 0;
 
@@ -3589,8 +3759,20 @@
 					TRB_TLBPC(residue);
 				/* Queue the isoc TRB */
 				field |= TRB_TYPE(TRB_ISOC);
-				/* Assume URB_ISO_ASAP is set */
-				field |= TRB_SIA;
+
+				/* Calculate Frame ID and SIA fields */
+				if (!(urb->transfer_flags & URB_ISO_ASAP) &&
+						HCC_CFC(xhci->hcc_params)) {
+					frame_id = xhci_get_isoc_frame_id(xhci,
+									  urb,
+									  i);
+					if (frame_id >= 0)
+						field |= TRB_FRAME_ID(frame_id);
+					else
+						field |= TRB_SIA;
+				} else
+					field |= TRB_SIA;
+
 				if (i == 0) {
 					if (start_cycle == 0)
 						field |= 0x1;
@@ -3666,6 +3848,10 @@
 		}
 	}
 
+	/* store the next frame id */
+	if (HCC_CFC(xhci->hcc_params))
+		xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
+
 	if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
 		if (xhci->quirks & XHCI_AMD_PLL_FIX)
 			usb_amd_quirk_pll_disable();
@@ -3699,12 +3885,34 @@
 	return ret;
 }
 
+static int ep_ring_is_processing(struct xhci_hcd *xhci,
+		int slot_id, unsigned int ep_index)
+{
+	struct xhci_virt_device *xdev;
+	struct xhci_ring *ep_ring;
+	struct xhci_ep_ctx *ep_ctx;
+	struct xhci_virt_ep *xep;
+	dma_addr_t hw_deq;
+
+	xdev = xhci->devs[slot_id];
+	xep = &xhci->devs[slot_id]->eps[ep_index];
+	ep_ring = xep->ring;
+	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+
+	if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING)
+		return 0;
+
+	hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
+	return (hw_deq !=
+		xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue));
+}
+
 /*
  * Check transfer ring to guarantee there is enough room for the urb.
  * Update ISO URB start_frame and interval.
- * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
- * update the urb->start_frame by now.
- * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
+ * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
+ * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
+ * Contiguous Frame ID is not supported by HC.
  */
 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
 		struct urb *urb, int slot_id, unsigned int ep_index)
@@ -3717,8 +3925,11 @@
 	int ep_interval;
 	int num_tds, num_trbs, i;
 	int ret;
+	struct xhci_virt_ep *xep;
+	int ist;
 
 	xdev = xhci->devs[slot_id];
+	xep = &xhci->devs[slot_id]->eps[ep_index];
 	ep_ring = xdev->eps[ep_index].ring;
 	ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
 
@@ -3735,14 +3946,10 @@
 	if (ret)
 		return ret;
 
-	start_frame = readl(&xhci->run_regs->microframe_index);
-	start_frame &= 0x3fff;
-
-	urb->start_frame = start_frame;
-	if (urb->dev->speed == USB_SPEED_LOW ||
-			urb->dev->speed == USB_SPEED_FULL)
-		urb->start_frame >>= 3;
-
+	/*
+	 * Check interval value. This should be done before we start to
+	 * calculate the start frame value.
+	 */
 	xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
 	ep_interval = urb->interval;
 	/* Convert to microframes */
@@ -3763,6 +3970,40 @@
 				urb->dev->speed == USB_SPEED_FULL)
 			urb->interval /= 8;
 	}
+
+	/* Calculate the start frame and put it in urb->start_frame. */
+	if (HCC_CFC(xhci->hcc_params) &&
+			ep_ring_is_processing(xhci, slot_id, ep_index)) {
+		urb->start_frame = xep->next_frame_id;
+		goto skip_start_over;
+	}
+
+	start_frame = readl(&xhci->run_regs->microframe_index);
+	start_frame &= 0x3fff;
+	/*
+	 * Round up to the next frame and consider the time before trb really
+	 * gets scheduled by hardare.
+	 */
+	ist = HCS_IST(xhci->hcs_params2) & 0x7;
+	if (HCS_IST(xhci->hcs_params2) & (1 << 3))
+		ist <<= 3;
+	start_frame += ist + XHCI_CFC_DELAY;
+	start_frame = roundup(start_frame, 8);
+
+	/*
+	 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
+	 * is greate than 8 microframes.
+	 */
+	if (urb->dev->speed == USB_SPEED_LOW ||
+			urb->dev->speed == USB_SPEED_FULL) {
+		start_frame = roundup(start_frame, urb->interval << 3);
+		urb->start_frame = start_frame >> 3;
+	} else {
+		start_frame = roundup(start_frame, urb->interval);
+		urb->start_frame = start_frame;
+	}
+
+skip_start_over:
 	ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
 
 	return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 526ebc0..6b0f4a4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1340,6 +1340,11 @@
 
 	if (usb_endpoint_xfer_isoc(&urb->ep->desc))
 		size = urb->number_of_packets;
+	else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
+	    urb->transfer_buffer_length > 0 &&
+	    urb->transfer_flags & URB_ZERO_PACKET &&
+	    !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
+		size = 2;
 	else
 		size = 1;
 
@@ -3117,7 +3122,7 @@
 }
 
 /*
- * The USB device drivers use this function (though the HCD interface in USB
+ * The USB device drivers use this function (through the HCD interface in USB
  * core) to prepare a set of bulk endpoints to use streams.  Streams are used to
  * coordinate mass storage command queueing across multiple endpoints (basically
  * a stream ID == a task ID).
@@ -4678,7 +4683,6 @@
 {
 	struct xhci_hcd	*xhci;
 	u16 mel;
-	int ret;
 
 	xhci = hcd_to_xhci(hcd);
 	if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
@@ -4686,10 +4690,7 @@
 		return 0;
 
 	mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
-	ret = xhci_change_max_exit_latency(xhci, udev, mel);
-	if (ret)
-		return ret;
-	return 0;
+	return xhci_change_max_exit_latency(xhci, udev, mel);
 }
 #else /* CONFIG_PM */
 
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ed2ebf6..dbda41e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -119,6 +119,10 @@
 #define HCC_LTC(p)		((p) & (1 << 6))
 /* true: no secondary Stream ID Support */
 #define HCC_NSS(p)		((p) & (1 << 7))
+/* true: HC supports Stopped - Short Packet */
+#define HCC_SPC(p)		((p) & (1 << 9))
+/* true: HC has Contiguous Frame ID Capability */
+#define HCC_CFC(p)		((p) & (1 << 11))
 /* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
 #define HCC_MAX_PSA(p)		(1 << ((((p) >> 12) & 0xf) + 1))
 /* Extended Capabilities pointer from PCI base - section 5.3.6 */
@@ -891,6 +895,8 @@
 	/* Bandwidth checking storage */
 	struct xhci_bw_info	bw_info;
 	struct list_head	bw_endpoint_list;
+	/* Isoch Frame ID checking storage */
+	int			next_frame_id;
 };
 
 enum xhci_overhead_type {
@@ -1059,8 +1065,8 @@
 #define COMP_STOP	26
 /* Same as COMP_EP_STOPPED, but the transferred length in the event is invalid */
 #define COMP_STOP_INVAL	27
-/* Control Abort Error - Debug Capability - control pipe aborted */
-#define COMP_DBG_ABORT	28
+/* Same as COMP_EP_STOPPED, but a short packet detected */
+#define COMP_STOP_SHORT	28
 /* Max Exit Latency Too Large Error */
 #define COMP_MEL_ERR	29
 /* TRB type 30 reserved */
@@ -1165,6 +1171,7 @@
 
 /* Isochronous TRB specific fields */
 #define TRB_SIA			(1<<31)
+#define TRB_FRAME_ID(p)		(((p) & 0x7ff) << 20)
 
 struct xhci_generic_trb {
 	__le32 field[4];
@@ -1601,6 +1608,8 @@
 	int (*start)(struct usb_hcd *hcd);
 };
 
+#define	XHCI_CFC_DELAY		10
+
 /* convert between an HCD pointer and the corresponding EHCI_HCD */
 static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
 {
diff --git a/drivers/usb/isp1760/isp1760-udc.c b/drivers/usb/isp1760/isp1760-udc.c
index 18ebf5b..1c3d0fd 100644
--- a/drivers/usb/isp1760/isp1760-udc.c
+++ b/drivers/usb/isp1760/isp1760-udc.c
@@ -1382,14 +1382,25 @@
 		 * This fits in the 8kB FIFO without double-buffering.
 		 */
 		if (ep_num == 0) {
-			ep->ep.maxpacket = 64;
+			usb_ep_set_maxpacket_limit(&ep->ep, 64);
+			ep->ep.caps.type_control = true;
+			ep->ep.caps.dir_in = true;
+			ep->ep.caps.dir_out = true;
 			ep->maxpacket = 64;
 			udc->gadget.ep0 = &ep->ep;
 		} else {
-			ep->ep.maxpacket = 512;
+			usb_ep_set_maxpacket_limit(&ep->ep, 512);
+			ep->ep.caps.type_iso = true;
+			ep->ep.caps.type_bulk = true;
+			ep->ep.caps.type_int = true;
 			ep->maxpacket = 0;
 			list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
 		}
+
+		if (is_in)
+			ep->ep.caps.dir_in = true;
+		else
+			ep->ep.caps.dir_out = true;
 	}
 }
 
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 8ab1f8f..52c27ca 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -2568,11 +2568,7 @@
 					    0x00);
 	if (UxxxStatus)
 		return UxxxStatus;
-	UxxxStatus = ftdi_elan_read_config(ftdi, activePCIfn | reg, 0,
-					   &pcidata);
-	if (UxxxStatus)
-		return UxxxStatus;
-	return 0;
+	return ftdi_elan_read_config(ftdi, activePCIfn | reg, 0, &pcidata);
 }
 
 static int ftdi_elan_found_controller(struct usb_ftdi *ftdi, int fn, int quirk)
@@ -2695,11 +2691,7 @@
 		}
 	}
 	if (ftdi->function > 0) {
-		UxxxStatus = ftdi_elan_setup_controller(ftdi,
-							ftdi->function - 1);
-		if (UxxxStatus)
-			return UxxxStatus;
-		return 0;
+		return ftdi_elan_setup_controller(ftdi,	ftdi->function - 1);
 	} else if (controllers > 0) {
 		return -ENXIO;
 	} else if (unrecognized > 0) {
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 0bbafe7..9517812 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1925,10 +1925,11 @@
 	memset(urbs, 0, sizeof(urbs));
 	udev = testdev_to_usbdev(dev);
 	dev_info(&dev->intf->dev,
-		"... iso period %d %sframes, wMaxPacket %04x\n",
+		"iso period %d %sframes, wMaxPacket %d, transactions: %d\n",
 		1 << (desc->bInterval - 1),
 		(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
-		usb_endpoint_maxp(desc));
+		usb_endpoint_maxp(desc) & 0x7ff,
+		1 + (0x3 & (usb_endpoint_maxp(desc) >> 11)));
 
 	for (i = 0; i < param->sglen; i++) {
 		urbs[i] = iso_alloc_urb(udev, pipe, desc,
@@ -1942,7 +1943,7 @@
 	}
 	packets *= param->iterations;
 	dev_info(&dev->intf->dev,
-		"... total %lu msec (%lu packets)\n",
+		"total %lu msec (%lu packets)\n",
 		(packets * (1 << (desc->bInterval - 1)))
 			/ ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
 		packets);
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 39db8b6..1f2037b 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -5,7 +5,7 @@
 
 # (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
 config USB_MUSB_HDRC
-	tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
+	tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, AW, ...)'
 	depends on (USB || USB_GADGET)
 	help
 	  Say Y here if your system has a dual role high speed USB
@@ -20,6 +20,8 @@
 	  Analog Devices parts using this IP include Blackfin BF54x,
 	  BF525 and BF527.
 
+	  Allwinner SoCs using this IP include A10, A13, A20, ...
+
 	  If you do not know what this is, please say N.
 
 	  To compile this driver as a module, choose M here; the
@@ -60,6 +62,15 @@
 
 comment "Platform Glue Layer"
 
+config USB_MUSB_SUNXI
+	tristate "Allwinner (sunxi)"
+	depends on ARCH_SUNXI
+	depends on NOP_USB_XCEIV
+	depends on PHY_SUN4I_USB
+	depends on EXTCON
+	depends on GENERIC_PHY
+	select SUNXI_SRAM
+
 config USB_MUSB_DAVINCI
 	tristate "DaVinci"
 	depends on ARCH_DAVINCI_DMx
@@ -113,19 +124,20 @@
 config USB_MUSB_AM335X_CHILD
 	tristate
 
-choice
-	prompt 'MUSB DMA mode'
-	default MUSB_PIO_ONLY if ARCH_MULTIPLATFORM || USB_MUSB_JZ4740
-	default USB_UX500_DMA if USB_MUSB_UX500
-	default USB_INVENTRA_DMA if USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
-	default USB_TI_CPPI_DMA if USB_MUSB_DAVINCI
-	default USB_TUSB_OMAP_DMA if USB_MUSB_TUSB6010
-	default MUSB_PIO_ONLY if USB_MUSB_TUSB6010 || USB_MUSB_DA8XX || USB_MUSB_AM35X \
-				|| USB_MUSB_DSPS
+comment "MUSB DMA mode"
+
+config MUSB_PIO_ONLY
+	bool 'Disable DMA (always use PIO)'
 	help
-	  Unfortunately, only one option can be enabled here. Ideally one
-	  should be able to build all these drivers into one kernel to
-	  allow using DMA on multiplatform kernels.
+	  All data is copied between memory and FIFO by the CPU.
+	  DMA controllers are ignored.
+
+	  Do not choose this unless DMA support for your SOC or board
+	  is unavailable (or unstable).  When DMA is enabled at compile time,
+	  you can still disable it at run time using the "use_dma=n" module
+	  parameter.
+
+if !MUSB_PIO_ONLY
 
 config USB_UX500_DMA
 	bool 'ST Ericsson Ux500'
@@ -157,17 +169,6 @@
 	help
 	  Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
 
-config MUSB_PIO_ONLY
-	bool 'Disable DMA (always use PIO)'
-	help
-	  All data is copied between memory and FIFO by the CPU.
-	  DMA controllers are ignored.
-
-	  Do not choose this unless DMA support for your SOC or board
-	  is unavailable (or unstable).  When DMA is enabled at compile time,
-	  you can still disable it at run time using the "use_dma=n" module
-	  parameter.
-
-endchoice
+endif # !MUSB_PIO_ONLY
 
 endif # USB_MUSB_HDRC
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index ba49501..f95befe 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_USB_MUSB_BLACKFIN)			+= blackfin.o
 obj-$(CONFIG_USB_MUSB_UX500)			+= ux500.o
 obj-$(CONFIG_USB_MUSB_JZ4740)			+= jz4740.o
+obj-$(CONFIG_USB_MUSB_SUNXI)			+= sunxi.o
 
 
 obj-$(CONFIG_USB_MUSB_AM335X_CHILD)		+= musb_am335x.o
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 4d1b44c..d07cafb 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -614,7 +614,7 @@
 {
 	struct musb *musb = controller->musb;
 	struct device *dev = musb->controller;
-	struct device_node *np = dev->of_node;
+	struct device_node *np = dev->parent->of_node;
 	struct cppi41_dma_channel *cppi41_channel;
 	int count;
 	int i;
@@ -664,7 +664,7 @@
 		musb_dma->status = MUSB_DMA_STATUS_FREE;
 		musb_dma->max_len = SZ_4M;
 
-		dc = dma_request_slave_channel(dev, str);
+		dc = dma_request_slave_channel(dev->parent, str);
 		if (!dc) {
 			dev_err(dev, "Failed to request %s.\n", str);
 			ret = -EPROBE_DEFER;
@@ -695,7 +695,7 @@
 	struct cppi41_dma_controller *controller;
 	int ret = 0;
 
-	if (!musb->controller->of_node) {
+	if (!musb->controller->parent->of_node) {
 		dev_err(musb->controller, "Need DT for the DMA engine.\n");
 		return NULL;
 	}
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 1334a3d..a0cfead 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -482,11 +482,7 @@
 		dsps_writeb(musb->mregs, MUSB_BABBLE_CTL, val);
 	}
 
-	ret = dsps_musb_dbg_init(musb, glue);
-	if (ret)
-		return ret;
-
-	return 0;
+	return dsps_musb_dbg_init(musb, glue);
 }
 
 static int dsps_musb_exit(struct musb *musb)
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 625d482f..67ad630 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -313,8 +313,7 @@
 
 		/* MUSB_TXCSR_P_ISO is still set correctly */
 
-#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
-		{
+		if (musb_dma_inventra(musb) || musb_dma_ux500(musb)) {
 			if (request_size < musb_ep->packet_sz)
 				musb_ep->dma->desired_mode = 0;
 			else
@@ -365,7 +364,6 @@
 			}
 		}
 
-#endif
 		if (is_cppi_enabled(musb)) {
 			/* program endpoint CSR first, then setup DMA */
 			csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
@@ -641,8 +639,10 @@
 			use_mode_1 = 0;
 
 		if (request->actual < request->length) {
-#ifdef CONFIG_USB_INVENTRA_DMA
-			if (is_buffer_mapped(req)) {
+			if (!is_buffer_mapped(req))
+				goto buffer_aint_mapped;
+
+			if (musb_dma_inventra(musb)) {
 				struct dma_controller	*c;
 				struct dma_channel	*channel;
 				int			use_dma = 0;
@@ -716,8 +716,8 @@
 				if (use_dma)
 					return;
 			}
-#elif defined(CONFIG_USB_UX500_DMA)
-			if ((is_buffer_mapped(req)) &&
+
+			if ((musb_dma_ux500(musb)) &&
 				(request->actual < request->length)) {
 
 				struct dma_controller *c;
@@ -765,7 +765,6 @@
 
 					return;
 			}
-#endif	/* Mentor's DMA */
 
 			len = request->length - request->actual;
 			dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
@@ -775,8 +774,7 @@
 
 			fifo_count = min_t(unsigned, len, fifo_count);
 
-#ifdef	CONFIG_USB_TUSB_OMAP_DMA
-			if (tusb_dma_omap(musb) && is_buffer_mapped(req)) {
+			if (tusb_dma_omap(musb)) {
 				struct dma_controller *c = musb->dma_controller;
 				struct dma_channel *channel = musb_ep->dma;
 				u32 dma_addr = request->dma + request->actual;
@@ -790,23 +788,22 @@
 				if (ret)
 					return;
 			}
-#endif
+
 			/*
 			 * Unmap the dma buffer back to cpu if dma channel
 			 * programming fails. This buffer is mapped if the
 			 * channel allocation is successful
 			 */
-			 if (is_buffer_mapped(req)) {
-				unmap_dma_buffer(req, musb);
+			unmap_dma_buffer(req, musb);
 
-				/*
-				 * Clear DMAENAB and AUTOCLEAR for the
-				 * PIO mode transfer
-				 */
-				csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
-				musb_writew(epio, MUSB_RXCSR, csr);
-			}
+			/*
+			 * Clear DMAENAB and AUTOCLEAR for the
+			 * PIO mode transfer
+			 */
+			csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
+			musb_writew(epio, MUSB_RXCSR, csr);
 
+buffer_aint_mapped:
 			musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
 					(request->buf + request->actual));
 			request->actual += fifo_count;
@@ -1684,6 +1681,40 @@
 	return 0;
 }
 
+#ifdef CONFIG_BLACKFIN
+static struct usb_ep *musb_match_ep(struct usb_gadget *g,
+		struct usb_endpoint_descriptor *desc,
+		struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+	struct usb_ep *ep = NULL;
+
+	switch (usb_endpoint_type(desc)) {
+	case USB_ENDPOINT_XFER_ISOC:
+	case USB_ENDPOINT_XFER_BULK:
+		if (usb_endpoint_dir_in(desc))
+			ep = gadget_find_ep_by_name(g, "ep5in");
+		else
+			ep = gadget_find_ep_by_name(g, "ep6out");
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		if (usb_endpoint_dir_in(desc))
+			ep = gadget_find_ep_by_name(g, "ep1in");
+		else
+			ep = gadget_find_ep_by_name(g, "ep2out");
+		break;
+	default:
+		break;
+	}
+
+	if (ep && usb_gadget_ep_match_desc(g, ep, desc, ep_comp))
+		return ep;
+
+	return NULL;
+}
+#else
+#define musb_match_ep NULL
+#endif
+
 static int musb_gadget_start(struct usb_gadget *g,
 		struct usb_gadget_driver *driver);
 static int musb_gadget_stop(struct usb_gadget *g);
@@ -1697,6 +1728,7 @@
 	.pullup			= musb_gadget_pullup,
 	.udc_start		= musb_gadget_start,
 	.udc_stop		= musb_gadget_stop,
+	.match_ep		= musb_match_ep,
 };
 
 /* ----------------------------------------------------------------------- */
@@ -1729,6 +1761,7 @@
 	INIT_LIST_HEAD(&ep->end_point.ep_list);
 	if (!epnum) {
 		usb_ep_set_maxpacket_limit(&ep->end_point, 64);
+		ep->end_point.caps.type_control = true;
 		ep->end_point.ops = &musb_g_ep0_ops;
 		musb->g.ep0 = &ep->end_point;
 	} else {
@@ -1736,9 +1769,20 @@
 			usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_tx);
 		else
 			usb_ep_set_maxpacket_limit(&ep->end_point, hw_ep->max_packet_sz_rx);
+		ep->end_point.caps.type_iso = true;
+		ep->end_point.caps.type_bulk = true;
+		ep->end_point.caps.type_int = true;
 		ep->end_point.ops = &musb_ep_ops;
 		list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
 	}
+
+	if (!epnum || hw_ep->is_shared_fifo) {
+		ep->end_point.caps.dir_in = true;
+		ep->end_point.caps.dir_out = true;
+	} else if (is_in)
+		ep->end_point.caps.dir_in = true;
+	else
+		ep->end_point.caps.dir_out = true;
 }
 
 /*
@@ -2075,6 +2119,7 @@
 	musb->g.b_hnp_enable = 0;
 	musb->g.a_alt_hnp_support = 0;
 	musb->g.a_hnp_support = 0;
+	musb->g.quirk_zlp_not_supp = 1;
 
 	/* Normal reset, as B-Device;
 	 * or else after HNP, as A-Device
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
new file mode 100644
index 0000000..f9f6304
--- /dev/null
+++ b/drivers/usb/musb/sunxi.c
@@ -0,0 +1,756 @@
+/*
+ * Allwinner sun4i MUSB Glue Layer
+ *
+ * Copyright (C) 2015 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Based on code from
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/extcon.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy-sun4i-usb.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
+#include <linux/usb/musb.h>
+#include <linux/usb/of.h>
+#include <linux/usb/usb_phy_generic.h>
+#include <linux/workqueue.h>
+#include "musb_core.h"
+
+/*
+ * Register offsets, note sunxi musb has a different layout then most
+ * musb implementations, we translate the layout in musb_readb & friends.
+ */
+#define SUNXI_MUSB_POWER			0x0040
+#define SUNXI_MUSB_DEVCTL			0x0041
+#define SUNXI_MUSB_INDEX			0x0042
+#define SUNXI_MUSB_VEND0			0x0043
+#define SUNXI_MUSB_INTRTX			0x0044
+#define SUNXI_MUSB_INTRRX			0x0046
+#define SUNXI_MUSB_INTRTXE			0x0048
+#define SUNXI_MUSB_INTRRXE			0x004a
+#define SUNXI_MUSB_INTRUSB			0x004c
+#define SUNXI_MUSB_INTRUSBE			0x0050
+#define SUNXI_MUSB_FRAME			0x0054
+#define SUNXI_MUSB_TXFIFOSZ			0x0090
+#define SUNXI_MUSB_TXFIFOADD			0x0092
+#define SUNXI_MUSB_RXFIFOSZ			0x0094
+#define SUNXI_MUSB_RXFIFOADD			0x0096
+#define SUNXI_MUSB_FADDR			0x0098
+#define SUNXI_MUSB_TXFUNCADDR			0x0098
+#define SUNXI_MUSB_TXHUBADDR			0x009a
+#define SUNXI_MUSB_TXHUBPORT			0x009b
+#define SUNXI_MUSB_RXFUNCADDR			0x009c
+#define SUNXI_MUSB_RXHUBADDR			0x009e
+#define SUNXI_MUSB_RXHUBPORT			0x009f
+#define SUNXI_MUSB_CONFIGDATA			0x00c0
+
+/* VEND0 bits */
+#define SUNXI_MUSB_VEND0_PIO_MODE		0
+
+/* flags */
+#define SUNXI_MUSB_FL_ENABLED			0
+#define SUNXI_MUSB_FL_HOSTMODE			1
+#define SUNXI_MUSB_FL_HOSTMODE_PEND		2
+#define SUNXI_MUSB_FL_VBUS_ON			3
+#define SUNXI_MUSB_FL_PHY_ON			4
+#define SUNXI_MUSB_FL_HAS_SRAM			5
+#define SUNXI_MUSB_FL_HAS_RESET			6
+#define SUNXI_MUSB_FL_NO_CONFIGDATA		7
+
+/* Our read/write methods need access and do not get passed in a musb ref :| */
+static struct musb *sunxi_musb;
+
+struct sunxi_glue {
+	struct device		*dev;
+	struct platform_device	*musb;
+	struct clk		*clk;
+	struct reset_control	*rst;
+	struct phy		*phy;
+	struct platform_device	*usb_phy;
+	struct usb_phy		*xceiv;
+	unsigned long		flags;
+	struct work_struct	work;
+	struct extcon_dev	*extcon;
+	struct notifier_block	host_nb;
+};
+
+/* phy_power_on / off may sleep, so we use a workqueue  */
+static void sunxi_musb_work(struct work_struct *work)
+{
+	struct sunxi_glue *glue = container_of(work, struct sunxi_glue, work);
+	bool vbus_on, phy_on;
+
+	if (!test_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
+		return;
+
+	if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) {
+		struct musb *musb = platform_get_drvdata(glue->musb);
+		unsigned long flags;
+		u8 devctl;
+
+		spin_lock_irqsave(&musb->lock, flags);
+
+		devctl = readb(musb->mregs + SUNXI_MUSB_DEVCTL);
+		if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) {
+			set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+			musb->xceiv->otg->default_a = 1;
+			musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+			MUSB_HST_MODE(musb);
+			devctl |= MUSB_DEVCTL_SESSION;
+		} else {
+			clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+			musb->xceiv->otg->default_a = 0;
+			musb->xceiv->otg->state = OTG_STATE_B_IDLE;
+			MUSB_DEV_MODE(musb);
+			devctl &= ~MUSB_DEVCTL_SESSION;
+		}
+		writeb(devctl, musb->mregs + SUNXI_MUSB_DEVCTL);
+
+		spin_unlock_irqrestore(&musb->lock, flags);
+	}
+
+	vbus_on = test_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+	phy_on = test_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+
+	if (phy_on != vbus_on) {
+		if (vbus_on) {
+			phy_power_on(glue->phy);
+			set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+		} else {
+			phy_power_off(glue->phy);
+			clear_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+		}
+	}
+}
+
+static void sunxi_musb_set_vbus(struct musb *musb, int is_on)
+{
+	struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+	if (is_on)
+		set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+	else
+		clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+
+	schedule_work(&glue->work);
+}
+
+static void sunxi_musb_pre_root_reset_end(struct musb *musb)
+{
+	struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+	sun4i_usb_phy_set_squelch_detect(glue->phy, false);
+}
+
+static void sunxi_musb_post_root_reset_end(struct musb *musb)
+{
+	struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+	sun4i_usb_phy_set_squelch_detect(glue->phy, true);
+}
+
+static irqreturn_t sunxi_musb_interrupt(int irq, void *__hci)
+{
+	struct musb *musb = __hci;
+	unsigned long flags;
+
+	spin_lock_irqsave(&musb->lock, flags);
+
+	musb->int_usb = readb(musb->mregs + SUNXI_MUSB_INTRUSB);
+	if (musb->int_usb)
+		writeb(musb->int_usb, musb->mregs + SUNXI_MUSB_INTRUSB);
+
+	/*
+	 * sunxi musb often signals babble on low / full speed device
+	 * disconnect, without ever raising MUSB_INTR_DISCONNECT, since
+	 * normally babble never happens treat it as disconnect.
+	 */
+	if ((musb->int_usb & MUSB_INTR_BABBLE) && is_host_active(musb)) {
+		musb->int_usb &= ~MUSB_INTR_BABBLE;
+		musb->int_usb |= MUSB_INTR_DISCONNECT;
+	}
+
+	if ((musb->int_usb & MUSB_INTR_RESET) && !is_host_active(musb)) {
+		/* ep0 FADDR must be 0 when (re)entering peripheral mode */
+		musb_ep_select(musb->mregs, 0);
+		musb_writeb(musb->mregs, MUSB_FADDR, 0);
+	}
+
+	musb->int_tx = readw(musb->mregs + SUNXI_MUSB_INTRTX);
+	if (musb->int_tx)
+		writew(musb->int_tx, musb->mregs + SUNXI_MUSB_INTRTX);
+
+	musb->int_rx = readw(musb->mregs + SUNXI_MUSB_INTRRX);
+	if (musb->int_rx)
+		writew(musb->int_rx, musb->mregs + SUNXI_MUSB_INTRRX);
+
+	musb_interrupt(musb);
+
+	spin_unlock_irqrestore(&musb->lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+static int sunxi_musb_host_notifier(struct notifier_block *nb,
+				    unsigned long event, void *ptr)
+{
+	struct sunxi_glue *glue = container_of(nb, struct sunxi_glue, host_nb);
+
+	if (event)
+		set_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags);
+	else
+		clear_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags);
+
+	set_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags);
+	schedule_work(&glue->work);
+
+	return NOTIFY_DONE;
+}
+
+static int sunxi_musb_init(struct musb *musb)
+{
+	struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+	int ret;
+
+	sunxi_musb = musb;
+	musb->phy = glue->phy;
+	musb->xceiv = glue->xceiv;
+
+	if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags)) {
+		ret = sunxi_sram_claim(musb->controller->parent);
+		if (ret)
+			return ret;
+	}
+
+	ret = clk_prepare_enable(glue->clk);
+	if (ret)
+		goto error_sram_release;
+
+	if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags)) {
+		ret = reset_control_deassert(glue->rst);
+		if (ret)
+			goto error_clk_disable;
+	}
+
+	writeb(SUNXI_MUSB_VEND0_PIO_MODE, musb->mregs + SUNXI_MUSB_VEND0);
+
+	/* Register notifier before calling phy_init() */
+	if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) {
+		ret = extcon_register_notifier(glue->extcon, EXTCON_USB_HOST,
+					       &glue->host_nb);
+		if (ret)
+			goto error_reset_assert;
+	}
+
+	ret = phy_init(glue->phy);
+	if (ret)
+		goto error_unregister_notifier;
+
+	if (musb->port_mode == MUSB_PORT_MODE_HOST) {
+		ret = phy_power_on(glue->phy);
+		if (ret)
+			goto error_phy_exit;
+		set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+		/* Stop musb work from turning vbus off again */
+		set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+	}
+
+	musb->isr = sunxi_musb_interrupt;
+
+	/* Stop the musb-core from doing runtime pm (not supported on sunxi) */
+	pm_runtime_get(musb->controller);
+
+	return 0;
+
+error_phy_exit:
+	phy_exit(glue->phy);
+error_unregister_notifier:
+	if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
+		extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
+					   &glue->host_nb);
+error_reset_assert:
+	if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
+		reset_control_assert(glue->rst);
+error_clk_disable:
+	clk_disable_unprepare(glue->clk);
+error_sram_release:
+	if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
+		sunxi_sram_release(musb->controller->parent);
+	return ret;
+}
+
+static int sunxi_musb_exit(struct musb *musb)
+{
+	struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+	pm_runtime_put(musb->controller);
+
+	cancel_work_sync(&glue->work);
+	if (test_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags))
+		phy_power_off(glue->phy);
+
+	phy_exit(glue->phy);
+
+	if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
+		extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
+					   &glue->host_nb);
+
+	if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
+		reset_control_assert(glue->rst);
+
+	clk_disable_unprepare(glue->clk);
+	if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
+		sunxi_sram_release(musb->controller->parent);
+
+	return 0;
+}
+
+static void sunxi_musb_enable(struct musb *musb)
+{
+	struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+	/* musb_core does not call us in a balanced manner */
+	if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
+		return;
+
+	schedule_work(&glue->work);
+}
+
+static void sunxi_musb_disable(struct musb *musb)
+{
+	struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+
+	clear_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags);
+}
+
+/*
+ * sunxi musb register layout
+ * 0x00 - 0x17	fifo regs, 1 long per fifo
+ * 0x40 - 0x57	generic control regs (power - frame)
+ * 0x80 - 0x8f	ep control regs (addressed through hw_ep->regs, indexed)
+ * 0x90 - 0x97	fifo control regs (indexed)
+ * 0x98 - 0x9f	multipoint / busctl regs (indexed)
+ * 0xc0		configdata reg
+ */
+
+static u32 sunxi_musb_fifo_offset(u8 epnum)
+{
+	return (epnum * 4);
+}
+
+static u32 sunxi_musb_ep_offset(u8 epnum, u16 offset)
+{
+	WARN_ONCE(offset != 0,
+		  "sunxi_musb_ep_offset called with non 0 offset\n");
+
+	return 0x80; /* indexed, so ignore epnum */
+}
+
+static u32 sunxi_musb_busctl_offset(u8 epnum, u16 offset)
+{
+	return SUNXI_MUSB_TXFUNCADDR + offset;
+}
+
+static u8 sunxi_musb_readb(const void __iomem *addr, unsigned offset)
+{
+	struct sunxi_glue *glue;
+
+	if (addr == sunxi_musb->mregs) {
+		/* generic control or fifo control reg access */
+		switch (offset) {
+		case MUSB_FADDR:
+			return readb(addr + SUNXI_MUSB_FADDR);
+		case MUSB_POWER:
+			return readb(addr + SUNXI_MUSB_POWER);
+		case MUSB_INTRUSB:
+			return readb(addr + SUNXI_MUSB_INTRUSB);
+		case MUSB_INTRUSBE:
+			return readb(addr + SUNXI_MUSB_INTRUSBE);
+		case MUSB_INDEX:
+			return readb(addr + SUNXI_MUSB_INDEX);
+		case MUSB_TESTMODE:
+			return 0; /* No testmode on sunxi */
+		case MUSB_DEVCTL:
+			return readb(addr + SUNXI_MUSB_DEVCTL);
+		case MUSB_TXFIFOSZ:
+			return readb(addr + SUNXI_MUSB_TXFIFOSZ);
+		case MUSB_RXFIFOSZ:
+			return readb(addr + SUNXI_MUSB_RXFIFOSZ);
+		case MUSB_CONFIGDATA + 0x10: /* See musb_read_configdata() */
+			glue = dev_get_drvdata(sunxi_musb->controller->parent);
+			/* A33 saves a reg, and we get to hardcode this */
+			if (test_bit(SUNXI_MUSB_FL_NO_CONFIGDATA,
+				     &glue->flags))
+				return 0xde;
+
+			return readb(addr + SUNXI_MUSB_CONFIGDATA);
+		/* Offset for these is fixed by sunxi_musb_busctl_offset() */
+		case SUNXI_MUSB_TXFUNCADDR:
+		case SUNXI_MUSB_TXHUBADDR:
+		case SUNXI_MUSB_TXHUBPORT:
+		case SUNXI_MUSB_RXFUNCADDR:
+		case SUNXI_MUSB_RXHUBADDR:
+		case SUNXI_MUSB_RXHUBPORT:
+			/* multipoint / busctl reg access */
+			return readb(addr + offset);
+		default:
+			dev_err(sunxi_musb->controller->parent,
+				"Error unknown readb offset %u\n", offset);
+			return 0;
+		}
+	} else if (addr == (sunxi_musb->mregs + 0x80)) {
+		/* ep control reg access */
+		/* sunxi has a 2 byte hole before the txtype register */
+		if (offset >= MUSB_TXTYPE)
+			offset += 2;
+		return readb(addr + offset);
+	}
+
+	dev_err(sunxi_musb->controller->parent,
+		"Error unknown readb at 0x%x bytes offset\n",
+		(int)(addr - sunxi_musb->mregs));
+	return 0;
+}
+
+static void sunxi_musb_writeb(void __iomem *addr, unsigned offset, u8 data)
+{
+	if (addr == sunxi_musb->mregs) {
+		/* generic control or fifo control reg access */
+		switch (offset) {
+		case MUSB_FADDR:
+			return writeb(data, addr + SUNXI_MUSB_FADDR);
+		case MUSB_POWER:
+			return writeb(data, addr + SUNXI_MUSB_POWER);
+		case MUSB_INTRUSB:
+			return writeb(data, addr + SUNXI_MUSB_INTRUSB);
+		case MUSB_INTRUSBE:
+			return writeb(data, addr + SUNXI_MUSB_INTRUSBE);
+		case MUSB_INDEX:
+			return writeb(data, addr + SUNXI_MUSB_INDEX);
+		case MUSB_TESTMODE:
+			if (data)
+				dev_warn(sunxi_musb->controller->parent,
+					"sunxi-musb does not have testmode\n");
+			return;
+		case MUSB_DEVCTL:
+			return writeb(data, addr + SUNXI_MUSB_DEVCTL);
+		case MUSB_TXFIFOSZ:
+			return writeb(data, addr + SUNXI_MUSB_TXFIFOSZ);
+		case MUSB_RXFIFOSZ:
+			return writeb(data, addr + SUNXI_MUSB_RXFIFOSZ);
+		/* Offset for these is fixed by sunxi_musb_busctl_offset() */
+		case SUNXI_MUSB_TXFUNCADDR:
+		case SUNXI_MUSB_TXHUBADDR:
+		case SUNXI_MUSB_TXHUBPORT:
+		case SUNXI_MUSB_RXFUNCADDR:
+		case SUNXI_MUSB_RXHUBADDR:
+		case SUNXI_MUSB_RXHUBPORT:
+			/* multipoint / busctl reg access */
+			return writeb(data, addr + offset);
+		default:
+			dev_err(sunxi_musb->controller->parent,
+				"Error unknown writeb offset %u\n", offset);
+			return;
+		}
+	} else if (addr == (sunxi_musb->mregs + 0x80)) {
+		/* ep control reg access */
+		if (offset >= MUSB_TXTYPE)
+			offset += 2;
+		return writeb(data, addr + offset);
+	}
+
+	dev_err(sunxi_musb->controller->parent,
+		"Error unknown writeb at 0x%x bytes offset\n",
+		(int)(addr - sunxi_musb->mregs));
+}
+
+static u16 sunxi_musb_readw(const void __iomem *addr, unsigned offset)
+{
+	if (addr == sunxi_musb->mregs) {
+		/* generic control or fifo control reg access */
+		switch (offset) {
+		case MUSB_INTRTX:
+			return readw(addr + SUNXI_MUSB_INTRTX);
+		case MUSB_INTRRX:
+			return readw(addr + SUNXI_MUSB_INTRRX);
+		case MUSB_INTRTXE:
+			return readw(addr + SUNXI_MUSB_INTRTXE);
+		case MUSB_INTRRXE:
+			return readw(addr + SUNXI_MUSB_INTRRXE);
+		case MUSB_FRAME:
+			return readw(addr + SUNXI_MUSB_FRAME);
+		case MUSB_TXFIFOADD:
+			return readw(addr + SUNXI_MUSB_TXFIFOADD);
+		case MUSB_RXFIFOADD:
+			return readw(addr + SUNXI_MUSB_RXFIFOADD);
+		case MUSB_HWVERS:
+			return 0; /* sunxi musb version is not known */
+		default:
+			dev_err(sunxi_musb->controller->parent,
+				"Error unknown readw offset %u\n", offset);
+			return 0;
+		}
+	} else if (addr == (sunxi_musb->mregs + 0x80)) {
+		/* ep control reg access */
+		return readw(addr + offset);
+	}
+
+	dev_err(sunxi_musb->controller->parent,
+		"Error unknown readw at 0x%x bytes offset\n",
+		(int)(addr - sunxi_musb->mregs));
+	return 0;
+}
+
+static void sunxi_musb_writew(void __iomem *addr, unsigned offset, u16 data)
+{
+	if (addr == sunxi_musb->mregs) {
+		/* generic control or fifo control reg access */
+		switch (offset) {
+		case MUSB_INTRTX:
+			return writew(data, addr + SUNXI_MUSB_INTRTX);
+		case MUSB_INTRRX:
+			return writew(data, addr + SUNXI_MUSB_INTRRX);
+		case MUSB_INTRTXE:
+			return writew(data, addr + SUNXI_MUSB_INTRTXE);
+		case MUSB_INTRRXE:
+			return writew(data, addr + SUNXI_MUSB_INTRRXE);
+		case MUSB_FRAME:
+			return writew(data, addr + SUNXI_MUSB_FRAME);
+		case MUSB_TXFIFOADD:
+			return writew(data, addr + SUNXI_MUSB_TXFIFOADD);
+		case MUSB_RXFIFOADD:
+			return writew(data, addr + SUNXI_MUSB_RXFIFOADD);
+		default:
+			dev_err(sunxi_musb->controller->parent,
+				"Error unknown writew offset %u\n", offset);
+			return;
+		}
+	} else if (addr == (sunxi_musb->mregs + 0x80)) {
+		/* ep control reg access */
+		return writew(data, addr + offset);
+	}
+
+	dev_err(sunxi_musb->controller->parent,
+		"Error unknown writew at 0x%x bytes offset\n",
+		(int)(addr - sunxi_musb->mregs));
+}
+
+static const struct musb_platform_ops sunxi_musb_ops = {
+	.quirks		= MUSB_INDEXED_EP,
+	.init		= sunxi_musb_init,
+	.exit		= sunxi_musb_exit,
+	.enable		= sunxi_musb_enable,
+	.disable	= sunxi_musb_disable,
+	.fifo_offset	= sunxi_musb_fifo_offset,
+	.ep_offset	= sunxi_musb_ep_offset,
+	.busctl_offset	= sunxi_musb_busctl_offset,
+	.readb		= sunxi_musb_readb,
+	.writeb		= sunxi_musb_writeb,
+	.readw		= sunxi_musb_readw,
+	.writew		= sunxi_musb_writew,
+	.set_vbus	= sunxi_musb_set_vbus,
+	.pre_root_reset_end = sunxi_musb_pre_root_reset_end,
+	.post_root_reset_end = sunxi_musb_post_root_reset_end,
+};
+
+/* Allwinner OTG supports up to 5 endpoints */
+#define SUNXI_MUSB_MAX_EP_NUM	6
+#define SUNXI_MUSB_RAM_BITS	11
+
+static struct musb_fifo_cfg sunxi_musb_mode_cfg[] = {
+	MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
+	MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
+	MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
+	MUSB_EP_FIFO_SINGLE(2, FIFO_RX, 512),
+	MUSB_EP_FIFO_SINGLE(3, FIFO_TX, 512),
+	MUSB_EP_FIFO_SINGLE(3, FIFO_RX, 512),
+	MUSB_EP_FIFO_SINGLE(4, FIFO_TX, 512),
+	MUSB_EP_FIFO_SINGLE(4, FIFO_RX, 512),
+	MUSB_EP_FIFO_SINGLE(5, FIFO_TX, 512),
+	MUSB_EP_FIFO_SINGLE(5, FIFO_RX, 512),
+};
+
+static struct musb_hdrc_config sunxi_musb_hdrc_config = {
+	.fifo_cfg       = sunxi_musb_mode_cfg,
+	.fifo_cfg_size  = ARRAY_SIZE(sunxi_musb_mode_cfg),
+	.multipoint	= true,
+	.dyn_fifo	= true,
+	.soft_con       = true,
+	.num_eps	= SUNXI_MUSB_MAX_EP_NUM,
+	.ram_bits	= SUNXI_MUSB_RAM_BITS,
+	.dma		= 0,
+};
+
+static int sunxi_musb_probe(struct platform_device *pdev)
+{
+	struct musb_hdrc_platform_data	pdata;
+	struct platform_device_info	pinfo;
+	struct sunxi_glue		*glue;
+	struct device_node		*np = pdev->dev.of_node;
+	int ret;
+
+	if (!np) {
+		dev_err(&pdev->dev, "Error no device tree node found\n");
+		return -EINVAL;
+	}
+
+	glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+	if (!glue)
+		return -ENOMEM;
+
+	memset(&pdata, 0, sizeof(pdata));
+	switch (of_usb_get_dr_mode(np)) {
+#if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_HOST
+	case USB_DR_MODE_HOST:
+		pdata.mode = MUSB_PORT_MODE_HOST;
+		break;
+#endif
+#ifdef CONFIG_USB_MUSB_DUAL_ROLE
+	case USB_DR_MODE_OTG:
+		glue->extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
+		if (IS_ERR(glue->extcon)) {
+			if (PTR_ERR(glue->extcon) == -EPROBE_DEFER)
+				return -EPROBE_DEFER;
+			dev_err(&pdev->dev, "Invalid or missing extcon\n");
+			return PTR_ERR(glue->extcon);
+		}
+		pdata.mode = MUSB_PORT_MODE_DUAL_ROLE;
+		break;
+#endif
+	default:
+		dev_err(&pdev->dev, "Invalid or missing 'dr_mode' property\n");
+		return -EINVAL;
+	}
+	pdata.platform_ops	= &sunxi_musb_ops;
+	pdata.config		= &sunxi_musb_hdrc_config;
+
+	glue->dev = &pdev->dev;
+	INIT_WORK(&glue->work, sunxi_musb_work);
+	glue->host_nb.notifier_call = sunxi_musb_host_notifier;
+
+	if (of_device_is_compatible(np, "allwinner,sun4i-a10-musb"))
+		set_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags);
+
+	if (of_device_is_compatible(np, "allwinner,sun6i-a31-musb"))
+		set_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags);
+
+	if (of_device_is_compatible(np, "allwinner,sun8i-a33-musb")) {
+		set_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags);
+		set_bit(SUNXI_MUSB_FL_NO_CONFIGDATA, &glue->flags);
+	}
+
+	glue->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(glue->clk)) {
+		dev_err(&pdev->dev, "Error getting clock: %ld\n",
+			PTR_ERR(glue->clk));
+		return PTR_ERR(glue->clk);
+	}
+
+	if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags)) {
+		glue->rst = devm_reset_control_get(&pdev->dev, NULL);
+		if (IS_ERR(glue->rst)) {
+			if (PTR_ERR(glue->rst) == -EPROBE_DEFER)
+				return -EPROBE_DEFER;
+			dev_err(&pdev->dev, "Error getting reset %ld\n",
+				PTR_ERR(glue->rst));
+			return PTR_ERR(glue->rst);
+		}
+	}
+
+	glue->phy = devm_phy_get(&pdev->dev, "usb");
+	if (IS_ERR(glue->phy)) {
+		if (PTR_ERR(glue->phy) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		dev_err(&pdev->dev, "Error getting phy %ld\n",
+			PTR_ERR(glue->phy));
+		return PTR_ERR(glue->phy);
+	}
+
+	glue->usb_phy = usb_phy_generic_register();
+	if (IS_ERR(glue->usb_phy)) {
+		dev_err(&pdev->dev, "Error registering usb-phy %ld\n",
+			PTR_ERR(glue->usb_phy));
+		return PTR_ERR(glue->usb_phy);
+	}
+
+	glue->xceiv = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
+	if (IS_ERR(glue->xceiv)) {
+		ret = PTR_ERR(glue->xceiv);
+		dev_err(&pdev->dev, "Error getting usb-phy %d\n", ret);
+		goto err_unregister_usb_phy;
+	}
+
+	platform_set_drvdata(pdev, glue);
+
+	memset(&pinfo, 0, sizeof(pinfo));
+	pinfo.name	 = "musb-hdrc";
+	pinfo.id	= PLATFORM_DEVID_AUTO;
+	pinfo.parent	= &pdev->dev;
+	pinfo.res	= pdev->resource;
+	pinfo.num_res	= pdev->num_resources;
+	pinfo.data	= &pdata;
+	pinfo.size_data = sizeof(pdata);
+
+	glue->musb = platform_device_register_full(&pinfo);
+	if (IS_ERR(glue->musb)) {
+		ret = PTR_ERR(glue->musb);
+		dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret);
+		goto err_unregister_usb_phy;
+	}
+
+	return 0;
+
+err_unregister_usb_phy:
+	usb_phy_generic_unregister(glue->usb_phy);
+	return ret;
+}
+
+static int sunxi_musb_remove(struct platform_device *pdev)
+{
+	struct sunxi_glue *glue = platform_get_drvdata(pdev);
+	struct platform_device *usb_phy = glue->usb_phy;
+
+	platform_device_unregister(glue->musb); /* Frees glue ! */
+	usb_phy_generic_unregister(usb_phy);
+
+	return 0;
+}
+
+static const struct of_device_id sunxi_musb_match[] = {
+	{ .compatible = "allwinner,sun4i-a10-musb", },
+	{ .compatible = "allwinner,sun6i-a31-musb", },
+	{ .compatible = "allwinner,sun8i-a33-musb", },
+	{}
+};
+
+static struct platform_driver sunxi_musb_driver = {
+	.probe = sunxi_musb_probe,
+	.remove = sunxi_musb_remove,
+	.driver = {
+		.name = "musb-sunxi",
+		.of_match_table = sunxi_musb_match,
+	},
+};
+module_platform_driver(sunxi_musb_driver);
+
+MODULE_DESCRIPTION("Allwinner sunxi MUSB Glue Layer");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 869c0cfcad..7d3beee 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -152,6 +152,20 @@
 	  This driver is not supported on boards like trout which
 	  has an external PHY.
 
+config USB_QCOM_8X16_PHY
+	tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support"
+	depends on ARCH_QCOM || COMPILE_TEST
+	depends on RESET_CONTROLLER
+	select USB_PHY
+	select USB_ULPI_VIEWPORT
+	help
+	  Enable this to support the USB transceiver on Qualcomm 8x16 chipsets.
+	  It handles PHY initialization, clock management, power management,
+	  and workarounds required after resetting the hardware.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called phy-qcom-8x16-usb.
+
 config USB_MV_OTG
 	tristate "Marvell USB OTG support"
 	depends on USB_EHCI_MV && USB_MV_UDC && PM
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index e36ab1d..19c0dcc 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -20,6 +20,7 @@
 obj-$(CONFIG_USB_GPIO_VBUS)		+= phy-gpio-vbus-usb.o
 obj-$(CONFIG_USB_ISP1301)		+= phy-isp1301.o
 obj-$(CONFIG_USB_MSM_OTG)		+= phy-msm-usb.o
+obj-$(CONFIG_USB_QCOM_8X16_PHY)	+= phy-qcom-8x16-usb.o
 obj-$(CONFIG_USB_MV_OTG)		+= phy-mv-usb.o
 obj-$(CONFIG_USB_MXS_PHY)		+= phy-mxs-usb.o
 obj-$(CONFIG_USB_RCAR_PHY)		+= phy-rcar-usb.o
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index deee68e..ec6ecd0 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -218,11 +218,13 @@
 			clk_rate = 0;
 
 		needs_vcc = of_property_read_bool(node, "vcc-supply");
-		nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset");
+		nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
+							   GPIOD_ASIS);
 		err = PTR_ERR_OR_ZERO(nop->gpiod_reset);
 		if (!err) {
 			nop->gpiod_vbus = devm_gpiod_get_optional(dev,
-							 "vbus-detect");
+							 "vbus-detect",
+							 GPIOD_ASIS);
 			err = PTR_ERR_OR_ZERO(nop->gpiod_vbus);
 		}
 	} else if (pdata) {
diff --git a/drivers/usb/phy/phy-keystone.c b/drivers/usb/phy/phy-keystone.c
index e0556f7..01d4e4c 100644
--- a/drivers/usb/phy/phy-keystone.c
+++ b/drivers/usb/phy/phy-keystone.c
@@ -96,11 +96,7 @@
 
 	platform_set_drvdata(pdev, k_phy);
 
-	ret = usb_add_phy_dev(&k_phy->usb_phy_gen.phy);
-	if (ret)
-		return ret;
-
-	return 0;
+	return usb_add_phy_dev(&k_phy->usb_phy_gen.phy);
 }
 
 static int keystone_usbphy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 00c49bb..c58c3c0 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -18,6 +18,7 @@
 
 #include <linux/module.h>
 #include <linux/device.h>
+#include <linux/gpio/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
@@ -32,6 +33,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/reboot.h>
 #include <linux/reset.h>
 
 #include <linux/usb.h>
@@ -1471,6 +1473,14 @@
 	else
 		clear_bit(B_SESS_VLD, &motg->inputs);
 
+	if (test_bit(B_SESS_VLD, &motg->inputs)) {
+		/* Switch D+/D- lines to Device connector */
+		gpiod_set_value_cansleep(motg->switch_gpio, 0);
+	} else {
+		/* Switch D+/D- lines to Hub */
+		gpiod_set_value_cansleep(motg->switch_gpio, 1);
+	}
+
 	schedule_work(&motg->sm_work);
 
 	return NOTIFY_DONE;
@@ -1546,6 +1556,11 @@
 
 	motg->manual_pullup = of_property_read_bool(node, "qcom,manual-pullup");
 
+	motg->switch_gpio = devm_gpiod_get_optional(&pdev->dev, "switch",
+						    GPIOD_OUT_LOW);
+	if (IS_ERR(motg->switch_gpio))
+		return PTR_ERR(motg->switch_gpio);
+
 	ext_id = ERR_PTR(-ENODEV);
 	ext_vbus = ERR_PTR(-ENODEV);
 	if (of_property_read_bool(node, "extcon")) {
@@ -1561,15 +1576,16 @@
 	}
 
 	if (!IS_ERR(ext_vbus)) {
+		motg->vbus.extcon = ext_vbus;
 		motg->vbus.nb.notifier_call = msm_otg_vbus_notifier;
-		ret = extcon_register_interest(&motg->vbus.conn, ext_vbus->name,
-					       "USB", &motg->vbus.nb);
+		ret = extcon_register_notifier(ext_vbus, EXTCON_USB,
+						&motg->vbus.nb);
 		if (ret < 0) {
 			dev_err(&pdev->dev, "register VBUS notifier failed\n");
 			return ret;
 		}
 
-		ret = extcon_get_cable_state(ext_vbus, "USB");
+		ret = extcon_get_cable_state_(ext_vbus, EXTCON_USB);
 		if (ret)
 			set_bit(B_SESS_VLD, &motg->inputs);
 		else
@@ -1577,15 +1593,16 @@
 	}
 
 	if (!IS_ERR(ext_id)) {
+		motg->id.extcon = ext_id;
 		motg->id.nb.notifier_call = msm_otg_id_notifier;
-		ret = extcon_register_interest(&motg->id.conn, ext_id->name,
-					       "USB-HOST", &motg->id.nb);
+		ret = extcon_register_notifier(ext_id, EXTCON_USB_HOST,
+						&motg->id.nb);
 		if (ret < 0) {
 			dev_err(&pdev->dev, "register ID notifier failed\n");
 			return ret;
 		}
 
-		ret = extcon_get_cable_state(ext_id, "USB-HOST");
+		ret = extcon_get_cable_state_(ext_id, EXTCON_USB_HOST);
 		if (ret)
 			clear_bit(ID, &motg->inputs);
 		else
@@ -1615,6 +1632,19 @@
 	return 0;
 }
 
+static int msm_otg_reboot_notify(struct notifier_block *this,
+				 unsigned long code, void *unused)
+{
+	struct msm_otg *motg = container_of(this, struct msm_otg, reboot);
+
+	/*
+	 * Ensure that D+/D- lines are routed to uB connector, so
+	 * we could load bootloader/kernel at next reboot
+	 */
+	gpiod_set_value_cansleep(motg->switch_gpio, 0);
+	return NOTIFY_DONE;
+}
+
 static int msm_otg_probe(struct platform_device *pdev)
 {
 	struct regulator_bulk_data regs[3];
@@ -1779,6 +1809,17 @@
 			dev_dbg(&pdev->dev, "Can not create mode change file\n");
 	}
 
+	if (test_bit(B_SESS_VLD, &motg->inputs)) {
+		/* Switch D+/D- lines to Device connector */
+		gpiod_set_value_cansleep(motg->switch_gpio, 0);
+	} else {
+		/* Switch D+/D- lines to Hub */
+		gpiod_set_value_cansleep(motg->switch_gpio, 1);
+	}
+
+	motg->reboot.notifier_call = msm_otg_reboot_notify;
+	register_reboot_notifier(&motg->reboot);
+
 	pm_runtime_set_active(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
 
@@ -1805,10 +1846,16 @@
 	if (phy->otg->host || phy->otg->gadget)
 		return -EBUSY;
 
-	if (motg->id.conn.edev)
-		extcon_unregister_interest(&motg->id.conn);
-	if (motg->vbus.conn.edev)
-		extcon_unregister_interest(&motg->vbus.conn);
+	unregister_reboot_notifier(&motg->reboot);
+
+	/*
+	 * Ensure that D+/D- lines are routed to uB connector, so
+	 * we could load bootloader/kernel at next reboot
+	 */
+	gpiod_set_value_cansleep(motg->switch_gpio, 0);
+
+	extcon_unregister_notifier(motg->id.extcon, EXTCON_USB_HOST, &motg->id.nb);
+	extcon_unregister_notifier(motg->vbus.extcon, EXTCON_USB, &motg->vbus.nb);
 
 	msm_otg_debugfs_cleanup();
 	cancel_delayed_work_sync(&motg->chg_work);
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 3fcc048..4d863eb 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -506,11 +506,7 @@
 
 	device_set_wakeup_capable(&pdev->dev, true);
 
-	ret = usb_add_phy_dev(&mxs_phy->phy);
-	if (ret)
-		return ret;
-
-	return 0;
+	return usb_add_phy_dev(&mxs_phy->phy);
 }
 
 static int mxs_phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c
index 56ee760..1270906 100644
--- a/drivers/usb/phy/phy-omap-otg.c
+++ b/drivers/usb/phy/phy-omap-otg.c
@@ -30,8 +30,7 @@
 	void __iomem			*base;
 	bool				id;
 	bool				vbus;
-	struct extcon_specific_cable_nb	vbus_dev;
-	struct extcon_specific_cable_nb	id_dev;
+	struct extcon_dev		*extcon;
 	struct notifier_block		vbus_nb;
 	struct notifier_block		id_nb;
 };
@@ -106,6 +105,7 @@
 	extcon = extcon_get_extcon_dev(config->extcon);
 	if (!extcon)
 		return -EPROBE_DEFER;
+	otg_dev->extcon = extcon;
 
 	otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL);
 	if (!otg_dev)
@@ -118,20 +118,19 @@
 	otg_dev->id_nb.notifier_call = omap_otg_id_notifier;
 	otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier;
 
-	ret = extcon_register_interest(&otg_dev->id_dev, config->extcon,
-				       "USB-HOST", &otg_dev->id_nb);
+	ret = extcon_register_notifier(extcon, EXTCON_USB_HOST, &otg_dev->id_nb);
 	if (ret)
 		return ret;
 
-	ret = extcon_register_interest(&otg_dev->vbus_dev, config->extcon,
-				       "USB", &otg_dev->vbus_nb);
+	ret = extcon_register_notifier(extcon, EXTCON_USB, &otg_dev->vbus_nb);
 	if (ret) {
-		extcon_unregister_interest(&otg_dev->id_dev);
+		extcon_unregister_notifier(extcon, EXTCON_USB_HOST,
+					&otg_dev->id_nb);
 		return ret;
 	}
 
-	otg_dev->id = extcon_get_cable_state(extcon, "USB-HOST");
-	otg_dev->vbus = extcon_get_cable_state(extcon, "USB");
+	otg_dev->id = extcon_get_cable_state_(extcon, EXTCON_USB_HOST);
+	otg_dev->vbus = extcon_get_cable_state_(extcon, EXTCON_USB);
 	omap_otg_set_mode(otg_dev);
 
 	rev = readl(otg_dev->base);
@@ -147,9 +146,10 @@
 static int omap_otg_remove(struct platform_device *pdev)
 {
 	struct otg_device *otg_dev = platform_get_drvdata(pdev);
+	struct extcon_dev *edev = otg_dev->extcon;
 
-	extcon_unregister_interest(&otg_dev->id_dev);
-	extcon_unregister_interest(&otg_dev->vbus_dev);
+	extcon_unregister_notifier(edev, EXTCON_USB_HOST,&otg_dev->id_nb);
+	extcon_unregister_notifier(edev, EXTCON_USB, &otg_dev->vbus_nb);
 
 	return 0;
 }
diff --git a/drivers/usb/phy/phy-qcom-8x16-usb.c b/drivers/usb/phy/phy-qcom-8x16-usb.c
new file mode 100644
index 0000000..5d357a9
--- /dev/null
+++ b/drivers/usb/phy/phy-qcom-8x16-usb.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/extcon.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/ulpi.h>
+
+#define HSPHY_AHBBURST			0x0090
+#define HSPHY_AHBMODE			0x0098
+#define HSPHY_GENCONFIG			0x009c
+#define HSPHY_GENCONFIG_2		0x00a0
+
+#define HSPHY_USBCMD			0x0140
+#define HSPHY_ULPI_VIEWPORT		0x0170
+#define HSPHY_CTRL			0x0240
+
+#define HSPHY_TXFIFO_IDLE_FORCE_DIS	BIT(4)
+#define HSPHY_SESS_VLD_CTRL_EN		BIT(7)
+#define HSPHY_POR_ASSERT		BIT(0)
+#define HSPHY_RETEN			BIT(1)
+
+#define HSPHY_SESS_VLD_CTRL		BIT(25)
+
+#define ULPI_PWR_CLK_MNG_REG		0x88
+#define ULPI_PWR_OTG_COMP_DISABLE	BIT(0)
+
+#define ULPI_MISC_A			0x96
+#define ULPI_MISC_A_VBUSVLDEXTSEL	BIT(1)
+#define ULPI_MISC_A_VBUSVLDEXT		BIT(0)
+
+#define HSPHY_3P3_MIN			3050000 /* uV */
+#define HSPHY_3P3_MAX			3300000 /* uV */
+
+#define HSPHY_1P8_MIN			1800000 /* uV */
+#define HSPHY_1P8_MAX			1800000 /* uV */
+
+#define HSPHY_VDD_MIN			5
+#define HSPHY_VDD_MAX			7
+
+struct phy_8x16 {
+	struct usb_phy			phy;
+	void __iomem			*regs;
+	struct clk			*core_clk;
+	struct clk			*iface_clk;
+	struct regulator		*v3p3;
+	struct regulator		*v1p8;
+	struct regulator		*vdd;
+
+	struct reset_control		*phy_reset;
+
+	struct extcon_specific_cable_nb vbus_cable;
+	struct notifier_block		vbus_notify;
+
+	struct gpio_desc		*switch_gpio;
+	struct notifier_block		reboot_notify;
+};
+
+static int phy_8x16_regulators_enable(struct phy_8x16 *qphy)
+{
+	int ret;
+
+	ret = regulator_set_voltage(qphy->vdd, HSPHY_VDD_MIN, HSPHY_VDD_MAX);
+	if (ret)
+		return ret;
+
+	ret = regulator_enable(qphy->vdd);
+	if (ret)
+		return ret;
+
+	ret = regulator_set_voltage(qphy->v3p3, HSPHY_3P3_MIN, HSPHY_3P3_MAX);
+	if (ret)
+		goto off_vdd;
+
+	ret = regulator_enable(qphy->v3p3);
+	if (ret)
+		goto off_vdd;
+
+	ret = regulator_set_voltage(qphy->v1p8, HSPHY_1P8_MIN, HSPHY_1P8_MAX);
+	if (ret)
+		goto off_3p3;
+
+	ret = regulator_enable(qphy->v1p8);
+	if (ret)
+		goto off_3p3;
+
+	return 0;
+
+off_3p3:
+	regulator_disable(qphy->v3p3);
+off_vdd:
+	regulator_disable(qphy->vdd);
+
+	return ret;
+}
+
+static void phy_8x16_regulators_disable(struct phy_8x16 *qphy)
+{
+	regulator_disable(qphy->v1p8);
+	regulator_disable(qphy->v3p3);
+	regulator_disable(qphy->vdd);
+}
+
+static int phy_8x16_notify_connect(struct usb_phy *phy,
+				   enum usb_device_speed speed)
+{
+	struct phy_8x16 *qphy = container_of(phy, struct phy_8x16, phy);
+	u32 val;
+
+	val = ULPI_MISC_A_VBUSVLDEXTSEL | ULPI_MISC_A_VBUSVLDEXT;
+	usb_phy_io_write(&qphy->phy, val, ULPI_SET(ULPI_MISC_A));
+
+	val = readl(qphy->regs + HSPHY_USBCMD);
+	val |= HSPHY_SESS_VLD_CTRL;
+	writel(val, qphy->regs + HSPHY_USBCMD);
+
+	return 0;
+}
+
+static int phy_8x16_notify_disconnect(struct usb_phy *phy,
+				      enum usb_device_speed speed)
+{
+	struct phy_8x16 *qphy = container_of(phy, struct phy_8x16, phy);
+	u32 val;
+
+	val = ULPI_MISC_A_VBUSVLDEXT | ULPI_MISC_A_VBUSVLDEXTSEL;
+	usb_phy_io_write(&qphy->phy, val, ULPI_CLR(ULPI_MISC_A));
+
+	val = readl(qphy->regs + HSPHY_USBCMD);
+	val &= ~HSPHY_SESS_VLD_CTRL;
+	writel(val, qphy->regs + HSPHY_USBCMD);
+
+	return 0;
+}
+
+static int phy_8x16_vbus_on(struct phy_8x16 *qphy)
+{
+	phy_8x16_notify_connect(&qphy->phy, USB_SPEED_UNKNOWN);
+
+	/* Switch D+/D- lines to Device connector */
+	gpiod_set_value_cansleep(qphy->switch_gpio, 0);
+
+	return 0;
+}
+
+static int phy_8x16_vbus_off(struct phy_8x16 *qphy)
+{
+	phy_8x16_notify_disconnect(&qphy->phy, USB_SPEED_UNKNOWN);
+
+	/* Switch D+/D- lines to USB HUB */
+	gpiod_set_value_cansleep(qphy->switch_gpio, 1);
+
+	return 0;
+}
+
+static int phy_8x16_vbus_notify(struct notifier_block *nb, unsigned long event,
+				void *ptr)
+{
+	struct phy_8x16 *qphy = container_of(nb, struct phy_8x16, vbus_notify);
+
+	if (event)
+		phy_8x16_vbus_on(qphy);
+	else
+		phy_8x16_vbus_off(qphy);
+
+	return NOTIFY_DONE;
+}
+
+static int phy_8x16_init(struct usb_phy *phy)
+{
+	struct phy_8x16 *qphy = container_of(phy, struct phy_8x16, phy);
+	u32 val, init[] = {0x44, 0x6B, 0x24, 0x13};
+	u32 addr = ULPI_EXT_VENDOR_SPECIFIC;
+	int idx, state;
+
+	for (idx = 0; idx < ARRAY_SIZE(init); idx++)
+		usb_phy_io_write(phy, init[idx], addr + idx);
+
+	reset_control_reset(qphy->phy_reset);
+
+	/* Assert USB HSPHY_POR */
+	val = readl(qphy->regs + HSPHY_CTRL);
+	val |= HSPHY_POR_ASSERT;
+	writel(val, qphy->regs + HSPHY_CTRL);
+
+	/*
+	 * wait for minimum 10 microseconds as suggested in HPG.
+	 * Use a slightly larger value since the exact value didn't
+	 * work 100% of the time.
+	 */
+	usleep_range(12, 15);
+
+	/* Deassert USB HSPHY_POR */
+	val = readl(qphy->regs + HSPHY_CTRL);
+	val &= ~HSPHY_POR_ASSERT;
+	writel(val, qphy->regs + HSPHY_CTRL);
+
+	usleep_range(10, 15);
+
+	writel(0x00, qphy->regs + HSPHY_AHBBURST);
+	writel(0x08, qphy->regs + HSPHY_AHBMODE);
+
+	/* workaround for rx buffer collision issue */
+	val = readl(qphy->regs + HSPHY_GENCONFIG);
+	val &= ~HSPHY_TXFIFO_IDLE_FORCE_DIS;
+	writel(val, qphy->regs + HSPHY_GENCONFIG);
+
+	val = readl(qphy->regs + HSPHY_GENCONFIG_2);
+	val |= HSPHY_SESS_VLD_CTRL_EN;
+	writel(val, qphy->regs + HSPHY_GENCONFIG_2);
+
+	val = ULPI_PWR_OTG_COMP_DISABLE;
+	usb_phy_io_write(phy, val, ULPI_SET(ULPI_PWR_CLK_MNG_REG));
+
+	state = extcon_get_cable_state(qphy->vbus_cable.edev, "USB");
+	if (state)
+		phy_8x16_vbus_on(qphy);
+	else
+		phy_8x16_vbus_off(qphy);
+
+	val = usb_phy_io_read(&qphy->phy, ULPI_FUNC_CTRL);
+	val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+	val |= ULPI_FUNC_CTRL_OPMODE_NORMAL;
+	usb_phy_io_write(&qphy->phy, val, ULPI_FUNC_CTRL);
+
+	return 0;
+}
+
+static void phy_8x16_shutdown(struct usb_phy *phy)
+{
+	u32 val;
+
+	/* Put the controller in non-driving mode */
+	val = usb_phy_io_read(phy, ULPI_FUNC_CTRL);
+	val &= ~ULPI_FUNC_CTRL_OPMODE_MASK;
+	val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING;
+	usb_phy_io_write(phy, val, ULPI_FUNC_CTRL);
+}
+
+static int phy_8x16_read_devicetree(struct phy_8x16 *qphy)
+{
+	struct regulator_bulk_data regs[3];
+	struct device *dev = qphy->phy.dev;
+	int ret;
+
+	qphy->core_clk = devm_clk_get(dev, "core");
+	if (IS_ERR(qphy->core_clk))
+		return PTR_ERR(qphy->core_clk);
+
+	qphy->iface_clk = devm_clk_get(dev, "iface");
+	if (IS_ERR(qphy->iface_clk))
+		return PTR_ERR(qphy->iface_clk);
+
+	regs[0].supply = "v3p3";
+	regs[1].supply = "v1p8";
+	regs[2].supply = "vddcx";
+
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(regs), regs);
+	if (ret)
+		return ret;
+
+	qphy->v3p3 = regs[0].consumer;
+	qphy->v1p8 = regs[1].consumer;
+	qphy->vdd  = regs[2].consumer;
+
+	qphy->phy_reset = devm_reset_control_get(dev, "phy");
+	if (IS_ERR(qphy->phy_reset))
+		return PTR_ERR(qphy->phy_reset);
+
+	qphy->switch_gpio = devm_gpiod_get_optional(dev, "switch",
+						   GPIOD_OUT_LOW);
+	if (IS_ERR(qphy->switch_gpio))
+		return PTR_ERR(qphy->switch_gpio);
+
+	return 0;
+}
+
+static int phy_8x16_reboot_notify(struct notifier_block *this,
+				  unsigned long code, void *unused)
+{
+	struct phy_8x16 *qphy;
+
+	qphy = container_of(this, struct phy_8x16, reboot_notify);
+
+	/*
+	 * Ensure that D+/D- lines are routed to uB connector, so
+	 * we could load bootloader/kernel at next reboot_notify
+	 */
+	gpiod_set_value_cansleep(qphy->switch_gpio, 0);
+	return NOTIFY_DONE;
+}
+
+static int phy_8x16_probe(struct platform_device *pdev)
+{
+	struct extcon_dev *edev;
+	struct phy_8x16 *qphy;
+	struct resource *res;
+	struct usb_phy *phy;
+	int ret;
+
+	qphy = devm_kzalloc(&pdev->dev, sizeof(*qphy), GFP_KERNEL);
+	if (!qphy)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, qphy);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -EINVAL;
+
+	qphy->regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!qphy->regs)
+		return -ENOMEM;
+
+	phy			= &qphy->phy;
+	phy->dev		= &pdev->dev;
+	phy->label		= dev_name(&pdev->dev);
+	phy->init		= phy_8x16_init;
+	phy->shutdown		= phy_8x16_shutdown;
+	phy->notify_connect	= phy_8x16_notify_connect;
+	phy->notify_disconnect	= phy_8x16_notify_disconnect;
+	phy->io_priv		= qphy->regs + HSPHY_ULPI_VIEWPORT;
+	phy->io_ops		= &ulpi_viewport_access_ops;
+	phy->type		= USB_PHY_TYPE_USB2;
+
+	ret = phy_8x16_read_devicetree(qphy);
+	if (ret < 0)
+		return ret;
+
+	edev = extcon_get_edev_by_phandle(phy->dev, 0);
+	if (IS_ERR(edev))
+		return PTR_ERR(edev);
+
+	ret = clk_set_rate(qphy->core_clk, INT_MAX);
+	if (ret < 0)
+		dev_dbg(phy->dev, "Can't boost core clock\n");
+
+	ret = clk_prepare_enable(qphy->core_clk);
+	if (ret < 0)
+		return ret;
+
+	ret = clk_prepare_enable(qphy->iface_clk);
+	if (ret < 0)
+		goto off_core;
+
+	ret = phy_8x16_regulators_enable(qphy);
+	if (0 && ret)
+		goto off_clks;
+
+	qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify;
+	ret = extcon_register_interest(&qphy->vbus_cable, edev->name,
+				       "USB", &qphy->vbus_notify);
+	if (ret < 0)
+		goto off_power;
+
+	ret = usb_add_phy_dev(&qphy->phy);
+	if (ret)
+		goto off_extcon;
+
+	qphy->reboot_notify.notifier_call = phy_8x16_reboot_notify;
+	register_reboot_notifier(&qphy->reboot_notify);
+
+	return 0;
+
+off_extcon:
+	extcon_unregister_interest(&qphy->vbus_cable);
+off_power:
+	phy_8x16_regulators_disable(qphy);
+off_clks:
+	clk_disable_unprepare(qphy->iface_clk);
+off_core:
+	clk_disable_unprepare(qphy->core_clk);
+	return ret;
+}
+
+static int phy_8x16_remove(struct platform_device *pdev)
+{
+	struct phy_8x16 *qphy = platform_get_drvdata(pdev);
+
+	unregister_reboot_notifier(&qphy->reboot_notify);
+	extcon_unregister_interest(&qphy->vbus_cable);
+
+	/*
+	 * Ensure that D+/D- lines are routed to uB connector, so
+	 * we could load bootloader/kernel at next reboot_notify
+	 */
+	gpiod_set_value_cansleep(qphy->switch_gpio, 0);
+
+	usb_remove_phy(&qphy->phy);
+
+	clk_disable_unprepare(qphy->iface_clk);
+	clk_disable_unprepare(qphy->core_clk);
+	phy_8x16_regulators_disable(qphy);
+	return 0;
+}
+
+static const struct of_device_id phy_8x16_dt_match[] = {
+	{ .compatible = "qcom,usb-8x16-phy" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, phy_8x16_dt_match);
+
+static struct platform_driver phy_8x16_driver = {
+	.probe	= phy_8x16_probe,
+	.remove = phy_8x16_remove,
+	.driver = {
+		.name = "phy-qcom-8x16-usb",
+		.of_match_table = phy_8x16_dt_match,
+	},
+};
+module_platform_driver(phy_8x16_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm APQ8016/MSM8916 chipsets USB transceiver driver");
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index b40d6a8..ab5d364 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -57,7 +57,7 @@
 	struct clk		*ick;
 	int			irq;
 	int			tahvo_mode;
-	struct extcon_dev	extcon;
+	struct extcon_dev	*extcon;
 };
 
 static const unsigned int tahvo_cable[] = {
@@ -121,7 +121,7 @@
 	prev_state = tu->vbus_state;
 	tu->vbus_state = reg & TAHVO_STAT_VBUS;
 	if (prev_state != tu->vbus_state) {
-		extcon_set_cable_state(&tu->extcon, "USB", tu->vbus_state);
+		extcon_set_cable_state_(tu->extcon, EXTCON_USB, tu->vbus_state);
 		sysfs_notify(&tu->pt_dev->dev.kobj, NULL, "vbus_state");
 	}
 }
@@ -130,7 +130,7 @@
 {
 	struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
 
-	extcon_set_cable_state(&tu->extcon, "USB-HOST", true);
+	extcon_set_cable_state_(tu->extcon, EXTCON_USB_HOST, true);
 
 	/* Power up the transceiver in USB host mode */
 	retu_write(rdev, TAHVO_REG_USBR, USBR_REGOUT | USBR_NSUSPEND |
@@ -149,7 +149,7 @@
 {
 	struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent);
 
-	extcon_set_cable_state(&tu->extcon, "USB-HOST", false);
+	extcon_set_cable_state_(tu->extcon, EXTCON_USB_HOST, false);
 
 	/* Power up transceiver and set it in USB peripheral mode */
 	retu_write(rdev, TAHVO_REG_USBR, USBR_SLAVE_CONTROL | USBR_REGOUT |
@@ -365,11 +365,13 @@
 	 */
 	tu->vbus_state = retu_read(rdev, TAHVO_REG_IDSR) & TAHVO_STAT_VBUS;
 
-	tu->extcon.name = DRIVER_NAME;
-	tu->extcon.supported_cable = tahvo_cable;
-	tu->extcon.dev.parent = &pdev->dev;
+	tu->extcon = devm_extcon_dev_allocate(&pdev->dev, tahvo_cable);
+	if (IS_ERR(tu->extcon)) {
+		dev_err(&pdev->dev, "failed to allocate memory for extcon\n");
+		return -ENOMEM;
+	}
 
-	ret = extcon_dev_register(&tu->extcon);
+	ret = devm_extcon_dev_register(&pdev->dev, tu->extcon);
 	if (ret) {
 		dev_err(&pdev->dev, "could not register extcon device: %d\n",
 			ret);
@@ -377,9 +379,9 @@
 	}
 
 	/* Set the initial cable state. */
-	extcon_set_cable_state(&tu->extcon, "USB-HOST",
+	extcon_set_cable_state_(tu->extcon, EXTCON_USB_HOST,
 			       tu->tahvo_mode == TAHVO_MODE_HOST);
-	extcon_set_cable_state(&tu->extcon, "USB", tu->vbus_state);
+	extcon_set_cable_state_(tu->extcon, EXTCON_USB, tu->vbus_state);
 
 	/* Create OTG interface */
 	tahvo_usb_power_off(tu);
@@ -396,7 +398,7 @@
 	if (ret < 0) {
 		dev_err(&pdev->dev, "cannot register USB transceiver: %d\n",
 			ret);
-		goto err_extcon_unreg;
+		goto err_disable_clk;
 	}
 
 	dev_set_drvdata(&pdev->dev, tu);
@@ -424,8 +426,6 @@
 	free_irq(tu->irq, tu);
 err_remove_phy:
 	usb_remove_phy(&tu->phy);
-err_extcon_unreg:
-	extcon_dev_unregister(&tu->extcon);
 err_disable_clk:
 	if (!IS_ERR(tu->ick))
 		clk_disable(tu->ick);
@@ -440,7 +440,6 @@
 	sysfs_remove_group(&pdev->dev.kobj, &tahvo_attr_group);
 	free_irq(tu->irq, tu);
 	usb_remove_phy(&tu->phy);
-	extcon_dev_unregister(&tu->extcon);
 	if (!IS_ERR(tu->ick))
 		clk_disable(tu->ick);
 
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index e8bf408..7b98e1d 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -388,7 +388,7 @@
 
 	if (enable && !mod) {
 		if (priv->edev) {
-			cable = extcon_get_cable_state(priv->edev, "USB-HOST");
+			cable = extcon_get_cable_state_(priv->edev, EXTCON_USB_HOST);
 			if ((cable > 0 && id != USBHS_HOST) ||
 			    (!cable && id != USBHS_GADGET)) {
 				dev_info(&pdev->dev,
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index dc2aa32..de4f97d 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -21,6 +21,7 @@
 #include <linux/platform_device.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
 #include "common.h"
 
 /*
@@ -50,6 +51,8 @@
 	int			 uep_size;
 
 	struct usb_gadget_driver	*driver;
+	struct usb_phy		*transceiver;
+	bool			 vbus_active;
 
 	u32	status;
 #define USBHSG_STATUS_STARTED		(1 << 0)
@@ -873,6 +876,27 @@
 }
 
 /*
+ * VBUS provided by the PHY
+ */
+static int usbhsm_phy_get_vbus(struct platform_device *pdev)
+{
+	struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
+	struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+
+	return  gpriv->vbus_active;
+}
+
+static void usbhs_mod_phy_mode(struct usbhs_priv *priv)
+{
+	struct usbhs_mod_info *info = &priv->mod_info;
+
+	info->irq_vbus		= NULL;
+	priv->pfunc.get_vbus	= usbhsm_phy_get_vbus;
+
+	usbhs_irq_callback_update(priv, NULL);
+}
+
+/*
  *
  *		linux usb function
  *
@@ -882,12 +906,28 @@
 {
 	struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
 	struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
+	struct device *dev = usbhs_priv_to_dev(priv);
+	int ret;
 
 	if (!driver		||
 	    !driver->setup	||
 	    driver->max_speed < USB_SPEED_FULL)
 		return -EINVAL;
 
+	/* connect to bus through transceiver */
+	if (!IS_ERR_OR_NULL(gpriv->transceiver)) {
+		ret = otg_set_peripheral(gpriv->transceiver->otg,
+					&gpriv->gadget);
+		if (ret) {
+			dev_err(dev, "%s: can't bind to transceiver\n",
+				gpriv->gadget.name);
+			return ret;
+		}
+
+		/* get vbus using phy versions */
+		usbhs_mod_phy_mode(priv);
+	}
+
 	/* first hook up the driver ... */
 	gpriv->driver = driver;
 
@@ -900,6 +940,10 @@
 	struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
 
 	usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
+
+	if (!IS_ERR_OR_NULL(gpriv->transceiver))
+		otg_set_peripheral(gpriv->transceiver->otg, NULL);
+
 	gpriv->driver = NULL;
 
 	return 0;
@@ -947,12 +991,26 @@
 	return 0;
 }
 
+static int usbhsg_vbus_session(struct usb_gadget *gadget, int is_active)
+{
+	struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
+	struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
+	struct platform_device *pdev = usbhs_priv_to_pdev(priv);
+
+	gpriv->vbus_active = !!is_active;
+
+	renesas_usbhs_call_notify_hotplug(pdev);
+
+	return 0;
+}
+
 static const struct usb_gadget_ops usbhsg_gadget_ops = {
 	.get_frame		= usbhsg_get_frame,
 	.set_selfpowered	= usbhsg_set_selfpowered,
 	.udc_start		= usbhsg_gadget_start,
 	.udc_stop		= usbhsg_gadget_stop,
 	.pullup			= usbhsg_pullup,
+	.vbus_session		= usbhsg_vbus_session,
 };
 
 static int usbhsg_start(struct usbhs_priv *priv)
@@ -994,6 +1052,10 @@
 		goto usbhs_mod_gadget_probe_err_gpriv;
 	}
 
+	gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
+	dev_info(dev, "%stransceiver found\n",
+		 gpriv->transceiver ? "" : "no ");
+
 	/*
 	 * CAUTION
 	 *
@@ -1041,12 +1103,18 @@
 		if (usbhsg_is_dcp(uep)) {
 			gpriv->gadget.ep0 = &uep->ep;
 			usb_ep_set_maxpacket_limit(&uep->ep, 64);
+			uep->ep.caps.type_control = true;
 		}
 		/* init normal pipe */
 		else {
 			usb_ep_set_maxpacket_limit(&uep->ep, 512);
+			uep->ep.caps.type_iso = true;
+			uep->ep.caps.type_bulk = true;
+			uep->ep.caps.type_int = true;
 			list_add_tail(&uep->ep.ep_list, &gpriv->gadget.ep_list);
 		}
+		uep->ep.caps.dir_in = true;
+		uep->ep.caps.dir_out = true;
 	}
 
 	ret = usb_add_gadget_udc(dev, &gpriv->gadget);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 4c8b3b8..a5a0376 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -605,6 +605,10 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2WI_PID) },
+	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX3_PID) },
 	/*
 	 * ELV devices:
 	 */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 792e054..67c6d44 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -568,6 +568,14 @@
  */
 #define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
 
+/*
+ * CustomWare / ShipModul NMEA multiplexers product ids (FTDI_VID)
+ */
+#define FTDI_CUSTOMWARE_MINIPLEX_PID	0xfd48	/* MiniPlex first generation NMEA Multiplexer */
+#define FTDI_CUSTOMWARE_MINIPLEX2_PID	0xfd49	/* MiniPlex-USB and MiniPlex-2 series */
+#define FTDI_CUSTOMWARE_MINIPLEX2WI_PID	0xfd4a	/* MiniPlex-2Wi */
+#define FTDI_CUSTOMWARE_MINIPLEX3_PID	0xfd4b	/* MiniPlex-3 series */
+
 
 /********************************/
 /** third-party VID/PID combos **/
@@ -1365,7 +1373,7 @@
 #define FTDI_CTI_NANO_PID	0xF60B
 
 /*
- * ZeitControl cardsystems GmbH rfid-readers http://zeitconrol.de
+ * ZeitControl cardsystems GmbH rfid-readers http://zeitcontrol.de
  */
 /* TagTracer MIFARE*/
 #define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID	0xF7C0
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index ddbb8fe..0ac1b10 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -71,6 +71,25 @@
 	__u8	hardware_type;		/* Type of hardware */
 } __attribute__((packed));
 
+/*
+ * Edgeport firmware header
+ *
+ * "build_number" has been set to 0 in all three of the images I have
+ * seen, and Digi Tech Support suggests that it is safe to ignore it.
+ *
+ * "length" is the number of bytes of actual data following the header.
+ *
+ * "checksum" is the low order byte resulting from adding the values of
+ * all the data bytes.
+ */
+struct edgeport_fw_hdr {
+	u8 major_version;
+	u8 minor_version;
+	__le16 build_number;
+	__le16 length;
+	u8 checksum;
+} __packed;
+
 struct edgeport_port {
 	__u16 uart_base;
 	__u16 dma_address;
@@ -101,6 +120,9 @@
 	struct mutex es_lock;
 	int num_ports_open;
 	struct usb_serial *serial;
+	struct delayed_work heartbeat_work;
+	int fw_version;
+	bool use_heartbeat;
 };
 
 
@@ -187,10 +209,6 @@
 
 MODULE_DEVICE_TABLE(usb, id_table_combined);
 
-static unsigned char OperationalMajorVersion;
-static unsigned char OperationalMinorVersion;
-static unsigned short OperationalBuildNumber;
-
 static int closing_wait = EDGE_CLOSING_WAIT;
 static bool ignore_cpu_rev;
 static int default_uart_mode;		/* RS232 */
@@ -209,6 +227,26 @@
 static int edge_create_sysfs_attrs(struct usb_serial_port *port);
 static int edge_remove_sysfs_attrs(struct usb_serial_port *port);
 
+/*
+ * Some release of Edgeport firmware "down3.bin" after version 4.80
+ * introduced code to automatically disconnect idle devices on some
+ * Edgeport models after periods of inactivity, typically ~60 seconds.
+ * This occurs without regard to whether ports on the device are open
+ * or not.  Digi International Tech Support suggested:
+ *
+ * 1.  Adding driver "heartbeat" code to reset the firmware timer by
+ *     requesting a descriptor record every 15 seconds, which should be
+ *     effective with newer firmware versions that require it, and benign
+ *     with older versions that do not. In practice 40 seconds seems often
+ *     enough.
+ * 2.  The heartbeat code is currently required only on Edgeport/416 models.
+ */
+#define FW_HEARTBEAT_VERSION_CUTOFF ((4 << 8) + 80)
+#define FW_HEARTBEAT_SECS 40
+
+/* Timeouts in msecs: firmware downloads take longer */
+#define TI_VSEND_TIMEOUT_DEFAULT 1000
+#define TI_VSEND_TIMEOUT_FW_DOWNLOAD 10000
 
 static int ti_vread_sync(struct usb_device *dev, __u8 request,
 				__u16 value, __u16 index, u8 *data, int size)
@@ -228,14 +266,14 @@
 	return 0;
 }
 
-static int ti_vsend_sync(struct usb_device *dev, __u8 request,
-				__u16 value, __u16 index, u8 *data, int size)
+static int ti_vsend_sync(struct usb_device *dev, u8 request, u16 value,
+		u16 index, u8 *data, int size, int timeout)
 {
 	int status;
 
 	status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
 			(USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT),
-			value, index, data, size, 1000);
+			value, index, data, size, timeout);
 	if (status < 0)
 		return status;
 	if (status != size) {
@@ -250,7 +288,8 @@
 				__u8 moduleid, __u16 value, u8 *data,
 				int size)
 {
-	return ti_vsend_sync(dev, command, value, moduleid, data, size);
+	return ti_vsend_sync(dev, command, value, moduleid, data, size,
+			TI_VSEND_TIMEOUT_DEFAULT);
 }
 
 /* clear tx/rx buffers and fifo in TI UMP */
@@ -378,9 +417,9 @@
 	}
 
 	for (i = 0; i < length; ++i) {
-		status = ti_vsend_sync(serial->serial->dev,
-				UMPC_MEMORY_WRITE, buffer[i],
-				(__u16)(i + start_address), NULL, 0);
+		status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
+				buffer[i], (u16)(i + start_address), NULL,
+				0, TI_VSEND_TIMEOUT_DEFAULT);
 		if (status)
 			return status;
 	}
@@ -421,10 +460,9 @@
 	 *       regardless of host byte order.
 	 */
 	be_start_address = swab16((u16)start_address);
-	status = ti_vsend_sync(serial->serial->dev,
-				UMPC_MEMORY_WRITE, (__u16)address_type,
-				be_start_address,
-				buffer,	write_length);
+	status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
+				(u16)address_type, be_start_address,
+				buffer,	write_length, TI_VSEND_TIMEOUT_DEFAULT);
 	if (status) {
 		dev_dbg(dev, "%s - ERROR %d\n", __func__, status);
 		return status;
@@ -454,9 +492,8 @@
 		 */
 		be_start_address = swab16((u16)start_address);
 		status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
-				(__u16)address_type,
-				be_start_address,
-				buffer, write_length);
+				(u16)address_type, be_start_address, buffer,
+				write_length, TI_VSEND_TIMEOUT_DEFAULT);
 		if (status) {
 			dev_err(dev, "%s - ERROR %d\n", __func__, status);
 			return status;
@@ -748,18 +785,17 @@
 }
 
 /* Build firmware header used for firmware update */
-static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
+static int build_i2c_fw_hdr(u8 *header, struct device *dev,
+		const struct firmware *fw)
 {
 	__u8 *buffer;
 	int buffer_size;
 	int i;
-	int err;
 	__u8 cs = 0;
 	struct ti_i2c_desc *i2c_header;
 	struct ti_i2c_image_header *img_header;
 	struct ti_i2c_firmware_rec *firmware_rec;
-	const struct firmware *fw;
-	const char *fw_name = "edgeport/down3.bin";
+	struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data;
 
 	/* In order to update the I2C firmware we must change the type 2 record
 	 * to type 0xF2.  This will force the UMP to come up in Boot Mode.
@@ -782,24 +818,11 @@
 	// Set entire image of 0xffs
 	memset(buffer, 0xff, buffer_size);
 
-	err = request_firmware(&fw, fw_name, dev);
-	if (err) {
-		dev_err(dev, "Failed to load image \"%s\" err %d\n",
-			fw_name, err);
-		kfree(buffer);
-		return err;
-	}
-
-	/* Save Download Version Number */
-	OperationalMajorVersion = fw->data[0];
-	OperationalMinorVersion = fw->data[1];
-	OperationalBuildNumber = fw->data[2] | (fw->data[3] << 8);
-
 	/* Copy version number into firmware record */
 	firmware_rec = (struct ti_i2c_firmware_rec *)buffer;
 
-	firmware_rec->Ver_Major	= OperationalMajorVersion;
-	firmware_rec->Ver_Minor	= OperationalMinorVersion;
+	firmware_rec->Ver_Major	= fw_hdr->major_version;
+	firmware_rec->Ver_Minor	= fw_hdr->minor_version;
 
 	/* Pointer to fw_down memory image */
 	img_header = (struct ti_i2c_image_header *)&fw->data[4];
@@ -808,8 +831,6 @@
 		&fw->data[4 + sizeof(struct ti_i2c_image_header)],
 		le16_to_cpu(img_header->Length));
 
-	release_firmware(fw);
-
 	for (i=0; i < buffer_size; i++) {
 		cs = (__u8)(cs + buffer[i]);
 	}
@@ -823,8 +844,8 @@
 	i2c_header->Type	= I2C_DESC_TYPE_FIRMWARE_BLANK;
 	i2c_header->Size	= cpu_to_le16(buffer_size);
 	i2c_header->CheckSum	= cs;
-	firmware_rec->Ver_Major	= OperationalMajorVersion;
-	firmware_rec->Ver_Minor	= OperationalMinorVersion;
+	firmware_rec->Ver_Major	= fw_hdr->major_version;
+	firmware_rec->Ver_Minor	= fw_hdr->minor_version;
 
 	return 0;
 }
@@ -925,13 +946,49 @@
 	return TI_GET_CPU_REVISION(desc->CpuRev_BoardRev);
 }
 
+static int check_fw_sanity(struct edgeport_serial *serial,
+		const struct firmware *fw)
+{
+	u16 length_total;
+	u8 checksum = 0;
+	int pos;
+	struct device *dev = &serial->serial->interface->dev;
+	struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data;
+
+	if (fw->size < sizeof(struct edgeport_fw_hdr)) {
+		dev_err(dev, "incomplete fw header\n");
+		return -EINVAL;
+	}
+
+	length_total = le16_to_cpu(fw_hdr->length) +
+			sizeof(struct edgeport_fw_hdr);
+
+	if (fw->size != length_total) {
+		dev_err(dev, "bad fw size (expected: %u, got: %zu)\n",
+				length_total, fw->size);
+		return -EINVAL;
+	}
+
+	for (pos = sizeof(struct edgeport_fw_hdr); pos < fw->size; ++pos)
+		checksum += fw->data[pos];
+
+	if (checksum != fw_hdr->checksum) {
+		dev_err(dev, "bad fw checksum (expected: 0x%x, got: 0x%x)\n",
+				fw_hdr->checksum, checksum);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 /**
  * DownloadTIFirmware - Download run-time operating firmware to the TI5052
  *
  * This routine downloads the main operating code into the TI5052, using the
  * boot code already burned into E2PROM or ROM.
  */
-static int download_fw(struct edgeport_serial *serial)
+static int download_fw(struct edgeport_serial *serial,
+		const struct firmware *fw)
 {
 	struct device *dev = &serial->serial->dev->dev;
 	int status = 0;
@@ -940,6 +997,14 @@
 	struct usb_interface_descriptor *interface;
 	int download_cur_ver;
 	int download_new_ver;
+	struct edgeport_fw_hdr *fw_hdr = (struct edgeport_fw_hdr *)fw->data;
+
+	if (check_fw_sanity(serial, fw))
+		return -EINVAL;
+
+	/* If on-board version is newer, "fw_version" will be updated below. */
+	serial->fw_version = (fw_hdr->major_version << 8) +
+			fw_hdr->minor_version;
 
 	/* This routine is entered by both the BOOT mode and the Download mode
 	 * We can determine which code is running by the reading the config
@@ -1047,14 +1112,13 @@
 			   version in I2c */
 			download_cur_ver = (firmware_version->Ver_Major << 8) +
 					   (firmware_version->Ver_Minor);
-			download_new_ver = (OperationalMajorVersion << 8) +
-					   (OperationalMinorVersion);
+			download_new_ver = (fw_hdr->major_version << 8) +
+					   (fw_hdr->minor_version);
 
 			dev_dbg(dev, "%s - >> FW Versions Device %d.%d  Driver %d.%d\n",
 				__func__, firmware_version->Ver_Major,
 				firmware_version->Ver_Minor,
-				OperationalMajorVersion,
-				OperationalMinorVersion);
+				fw_hdr->major_version, fw_hdr->minor_version);
 
 			/* Check if we have an old version in the I2C and
 			   update if necessary */
@@ -1063,8 +1127,8 @@
 					__func__,
 					firmware_version->Ver_Major,
 					firmware_version->Ver_Minor,
-					OperationalMajorVersion,
-					OperationalMinorVersion);
+					fw_hdr->major_version,
+					fw_hdr->minor_version);
 
 				record = kmalloc(1, GFP_KERNEL);
 				if (!record) {
@@ -1129,7 +1193,8 @@
 				/* Reset UMP -- Back to BOOT MODE */
 				status = ti_vsend_sync(serial->serial->dev,
 						UMPC_HARDWARE_RESET,
-						0, 0, NULL, 0);
+						0, 0, NULL, 0,
+						TI_VSEND_TIMEOUT_DEFAULT);
 
 				dev_dbg(dev, "%s - HARDWARE RESET return %d\n", __func__, status);
 
@@ -1139,6 +1204,9 @@
 				kfree(rom_desc);
 				kfree(ti_manuf_desc);
 				return -ENODEV;
+			} else {
+				/* Same or newer fw version is already loaded */
+				serial->fw_version = download_cur_ver;
 			}
 			kfree(firmware_version);
 		}
@@ -1177,7 +1245,7 @@
 			 * UMP Ram to I2C and the firmware will update the
 			 * record type from 0xf2 to 0x02.
 			 */
-			status = build_i2c_fw_hdr(header, dev);
+			status = build_i2c_fw_hdr(header, dev, fw);
 			if (status) {
 				kfree(vheader);
 				kfree(header);
@@ -1229,7 +1297,9 @@
 
 			/* Tell firmware to copy download image into I2C */
 			status = ti_vsend_sync(serial->serial->dev,
-					UMPC_COPY_DNLD_TO_I2C, 0, 0, NULL, 0);
+					UMPC_COPY_DNLD_TO_I2C,
+					0, 0, NULL, 0,
+					TI_VSEND_TIMEOUT_FW_DOWNLOAD);
 
 		  	dev_dbg(dev, "%s - Update complete 0x%x\n", __func__, status);
 			if (status) {
@@ -1278,9 +1348,6 @@
 		__u8 cs = 0;
 		__u8 *buffer;
 		int buffer_size;
-		int err;
-		const struct firmware *fw;
-		const char *fw_name = "edgeport/down3.bin";
 
 		/* Validate Hardware version number
 		 * Read Manufacturing Descriptor from TI Based Edgeport
@@ -1328,16 +1395,7 @@
 
 		/* Initialize the buffer to 0xff (pad the buffer) */
 		memset(buffer, 0xff, buffer_size);
-
-		err = request_firmware(&fw, fw_name, dev);
-		if (err) {
-			dev_err(dev, "Failed to load image \"%s\" err %d\n",
-				fw_name, err);
-			kfree(buffer);
-			return err;
-		}
 		memcpy(buffer, &fw->data[4], fw->size - 4);
-		release_firmware(fw);
 
 		for (i = sizeof(struct ti_i2c_image_header);
 				i < buffer_size; i++) {
@@ -1352,7 +1410,9 @@
 		header->CheckSum = cs;
 
 		/* Download the operational code  */
-		dev_dbg(dev, "%s - Downloading operational code image (TI UMP)\n", __func__);
+		dev_dbg(dev, "%s - Downloading operational code image version %d.%d (TI UMP)\n",
+				__func__,
+				fw_hdr->major_version, fw_hdr->minor_version);
 		status = download_code(serial, buffer, buffer_size);
 
 		kfree(buffer);
@@ -2373,10 +2433,44 @@
 			__func__, status);
 }
 
+static void edge_heartbeat_schedule(struct edgeport_serial *edge_serial)
+{
+	if (!edge_serial->use_heartbeat)
+		return;
+
+	schedule_delayed_work(&edge_serial->heartbeat_work,
+			FW_HEARTBEAT_SECS * HZ);
+}
+
+static void edge_heartbeat_work(struct work_struct *work)
+{
+	struct edgeport_serial *serial;
+	struct ti_i2c_desc *rom_desc;
+
+	serial = container_of(work, struct edgeport_serial,
+			heartbeat_work.work);
+
+	rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
+
+	/* Descriptor address request is enough to reset the firmware timer */
+	if (!rom_desc || !get_descriptor_addr(serial, I2C_DESC_TYPE_ION,
+			rom_desc)) {
+		dev_err(&serial->serial->interface->dev,
+				"%s - Incomplete heartbeat\n", __func__);
+	}
+	kfree(rom_desc);
+
+	edge_heartbeat_schedule(serial);
+}
+
 static int edge_startup(struct usb_serial *serial)
 {
 	struct edgeport_serial *edge_serial;
 	int status;
+	const struct firmware *fw;
+	const char *fw_name = "edgeport/down3.bin";
+	struct device *dev = &serial->interface->dev;
+	u16 product_id;
 
 	/* create our private serial structure */
 	edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
@@ -2387,12 +2481,35 @@
 	edge_serial->serial = serial;
 	usb_set_serial_data(serial, edge_serial);
 
-	status = download_fw(edge_serial);
+	status = request_firmware(&fw, fw_name, dev);
+	if (status) {
+		dev_err(dev, "Failed to load image \"%s\" err %d\n",
+				fw_name, status);
+		kfree(edge_serial);
+		return status;
+	}
+
+	status = download_fw(edge_serial, fw);
+	release_firmware(fw);
 	if (status) {
 		kfree(edge_serial);
 		return status;
 	}
 
+	product_id = le16_to_cpu(
+			edge_serial->serial->dev->descriptor.idProduct);
+
+	/* Currently only the EP/416 models require heartbeat support */
+	if (edge_serial->fw_version > FW_HEARTBEAT_VERSION_CUTOFF) {
+		if (product_id == ION_DEVICE_ID_TI_EDGEPORT_416 ||
+			product_id == ION_DEVICE_ID_TI_EDGEPORT_416B) {
+			edge_serial->use_heartbeat = true;
+		}
+	}
+
+	INIT_DELAYED_WORK(&edge_serial->heartbeat_work, edge_heartbeat_work);
+	edge_heartbeat_schedule(edge_serial);
+
 	return 0;
 }
 
@@ -2402,7 +2519,10 @@
 
 static void edge_release(struct usb_serial *serial)
 {
-	kfree(usb_get_serial_data(serial));
+	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+
+	cancel_delayed_work_sync(&edge_serial->heartbeat_work);
+	kfree(edge_serial);
 }
 
 static int edge_port_probe(struct usb_serial_port *port)
@@ -2506,6 +2626,25 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM
+static int edge_suspend(struct usb_serial *serial, pm_message_t message)
+{
+	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+
+	cancel_delayed_work_sync(&edge_serial->heartbeat_work);
+
+	return 0;
+}
+
+static int edge_resume(struct usb_serial *serial)
+{
+	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+
+	edge_heartbeat_schedule(edge_serial);
+
+	return 0;
+}
+#endif
 
 static struct usb_serial_driver edgeport_1port_device = {
 	.driver = {
@@ -2538,6 +2677,10 @@
 	.read_int_callback	= edge_interrupt_callback,
 	.read_bulk_callback	= edge_bulk_in_callback,
 	.write_bulk_callback	= edge_bulk_out_callback,
+#ifdef CONFIG_PM
+	.suspend		= edge_suspend,
+	.resume			= edge_resume,
+#endif
 };
 
 static struct usb_serial_driver edgeport_2port_device = {
@@ -2571,6 +2714,10 @@
 	.read_int_callback	= edge_interrupt_callback,
 	.read_bulk_callback	= edge_bulk_in_callback,
 	.write_bulk_callback	= edge_bulk_out_callback,
+#ifdef CONFIG_PM
+	.suspend		= edge_suspend,
+	.resume			= edge_resume,
+#endif
 };
 
 static struct usb_serial_driver * const serial_drivers[] = {
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index 460a406..31a8b47 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -1137,13 +1137,9 @@
 		return err;
 
 	/* Set interface (RS-232) */
-	err = mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_INTERFACE,
-				    MX_INT_RS232,
-				    port->port_number);
-	if (err)
-		return err;
-
-	return 0;
+	return mxuport_send_ctrl_urb(serial, RQ_VENDOR_SET_INTERFACE,
+				     MX_INT_RS232,
+				     port->port_number);
 }
 
 static int mxuport_alloc_write_urb(struct usb_serial *serial,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 876423b..6d1941a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1941,7 +1941,7 @@
 	} else if (status == -ENOENT || status == -ESHUTDOWN) {
 		dev_dbg(dev, "%s: urb stopped: %d\n", __func__, status);
 	} else
-		dev_err(dev, "%s: error %d\n", __func__, status);
+		dev_dbg(dev, "%s: error %d\n", __func__, status);
 
 	/* Resubmit urb so we continue receiving IRQ data */
 	if (status != -ESHUTDOWN && status != -ENOENT) {
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index f5257af..ae682e4 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -362,21 +362,38 @@
 static speed_t pl2303_encode_baud_rate_divisor(unsigned char buf[4],
 								speed_t baud)
 {
-	unsigned int tmp;
+	unsigned int baseline, mantissa, exponent;
 
 	/*
 	 * Apparently the formula is:
-	 * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
+	 *   baudrate = 12M * 32 / (mantissa * 4^exponent)
+	 * where
+	 *   mantissa = buf[8:0]
+	 *   exponent = buf[11:9]
 	 */
-	tmp = 12000000 * 32 / baud;
+	baseline = 12000000 * 32;
+	mantissa = baseline / baud;
+	if (mantissa == 0)
+		mantissa = 1;	/* Avoid dividing by zero if baud > 32*12M. */
+	exponent = 0;
+	while (mantissa >= 512) {
+		if (exponent < 7) {
+			mantissa >>= 2;	/* divide by 4 */
+			exponent++;
+		} else {
+			/* Exponent is maxed. Trim mantissa and leave. */
+			mantissa = 511;
+			break;
+		}
+	}
+
 	buf[3] = 0x80;
 	buf[2] = 0;
-	buf[1] = (tmp >= 256);
-	while (tmp >= 256) {
-		tmp >>= 2;
-		buf[1] <<= 1;
-	}
-	buf[0] = tmp;
+	buf[1] = exponent << 1 | mantissa >> 8;
+	buf[0] = mantissa & 0xff;
+
+	/* Calculate and return the exact baud rate. */
+	baud = (baseline / mantissa) >> (exponent << 1);
 
 	return baud;
 }
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index d156545..ebcec8c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -139,6 +139,7 @@
 	{USB_DEVICE(0x0AF0, 0x8120)},	/* Option GTM681W */
 
 	/* non-Gobi Sierra Wireless devices */
+	{DEVICE_SWI(0x03f0, 0x4e1d)},	/* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
 	{DEVICE_SWI(0x0f3d, 0x68a2)},	/* Sierra Wireless MC7700 */
 	{DEVICE_SWI(0x114f, 0x68a2)},	/* Sierra Wireless MC7750 */
 	{DEVICE_SWI(0x1199, 0x68a2)},	/* Sierra Wireless MC7710 */
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 8fceec7..37f3ad1 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -60,17 +60,15 @@
 
 	usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data);
 
+	/*
+	 * Data from the device comes with a 1 byte header:
+	 *
+	 * <size of data> <data>...
+	 */
 	if (urb->actual_length > 1) {
-		data_length = urb->actual_length - 1;
-
-		/*
-		 * Data from the device comes with a 1 byte header:
-		 *
-		 * <size of data>data...
-		 * 	This is real data to be sent to the tty layer
-		 * we pretty much just ignore the size and send everything
-		 * else to the tty layer.
-		 */
+		data_length = data[0];
+		if (data_length > (urb->actual_length - 1))
+			data_length = urb->actual_length - 1;
 		tty_insert_flip_string(&port->port, &data[1], data_length);
 		tty_flip_buffer_push(&port->port);
 	} else {
@@ -94,7 +92,7 @@
 
 static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
-	struct symbol_private *priv = usb_get_serial_data(port->serial);
+	struct symbol_private *priv = usb_get_serial_port_data(port);
 	unsigned long flags;
 	int result = 0;
 
@@ -120,7 +118,7 @@
 static void symbol_throttle(struct tty_struct *tty)
 {
 	struct usb_serial_port *port = tty->driver_data;
-	struct symbol_private *priv = usb_get_serial_data(port->serial);
+	struct symbol_private *priv = usb_get_serial_port_data(port);
 
 	spin_lock_irq(&priv->lock);
 	priv->throttled = true;
@@ -130,7 +128,7 @@
 static void symbol_unthrottle(struct tty_struct *tty)
 {
 	struct usb_serial_port *port = tty->driver_data;
-	struct symbol_private *priv = usb_get_serial_data(port->serial);
+	struct symbol_private *priv = usb_get_serial_port_data(port);
 	int result;
 	bool was_throttled;
 
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 2f805cb..825305c 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -282,7 +282,7 @@
 	/* Resubmit urb so we continue receiving */
 	err = usb_submit_urb(urb, GFP_ATOMIC);
 	if (err) {
-		if (err != -EPERM) {
+		if (err != -EPERM && err != -ENODEV) {
 			dev_err(dev, "%s: resubmit read urb failed. (%d)\n",
 				__func__, err);
 			/* busy also in error unless we are killed */
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 540add2..5e67f63 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1111,7 +1111,7 @@
 	 * command phase and the data phase.  Some devices need a little
 	 * more than that, probably because of clock rate inaccuracies. */
 	if (unlikely(us->fflags & US_FL_GO_SLOW))
-		udelay(125);
+		usleep_range(125, 150);
 
 	if (transfer_length) {
 		unsigned int pipe = srb->sc_data_direction == DMA_FROM_DEVICE ? 
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index dfcc02c..f114a9d 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1573,9 +1573,9 @@
 	return misc_register(&vhost_scsi_misc);
 }
 
-static int vhost_scsi_deregister(void)
+static void vhost_scsi_deregister(void)
 {
-	return misc_deregister(&vhost_scsi_misc);
+	misc_deregister(&vhost_scsi_misc);
 }
 
 static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 0505b79..5ffa4b4 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -299,6 +299,13 @@
 	  If you have an Sharp SL-6000 Zaurus say Y to enable a driver
 	  for its backlight
 
+config BACKLIGHT_PM8941_WLED
+	tristate "Qualcomm PM8941 WLED Driver"
+	select REGMAP
+	help
+	  If you have the Qualcomm PM8941, say Y to enable a driver for the
+	  WLED block.
+
 config BACKLIGHT_SAHARA
 	tristate "Tabletkiosk Sahara Touch-iT Backlight Driver"
 	depends on X86
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index d67073f..16ec534 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -48,6 +48,7 @@
 obj-$(CONFIG_BACKLIGHT_OT200)		+= ot200_bl.o
 obj-$(CONFIG_BACKLIGHT_PANDORA)		+= pandora_bl.o
 obj-$(CONFIG_BACKLIGHT_PCF50633)	+= pcf50633-backlight.o
+obj-$(CONFIG_BACKLIGHT_PM8941_WLED)	+= pm8941-wled.o
 obj-$(CONFIG_BACKLIGHT_PWM)		+= pwm_bl.o
 obj-$(CONFIG_BACKLIGHT_SAHARA)		+= kb3886_bl.o
 obj-$(CONFIG_BACKLIGHT_SKY81452)	+= sky81452-backlight.o
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 88116b4..f88df9e 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -73,6 +73,7 @@
 	struct device *dev;
 	struct lp855x_platform_data *pdata;
 	struct pwm_device *pwm;
+	struct regulator *supply;	/* regulator for VDD input */
 };
 
 static int lp855x_write_byte(struct lp855x *lp, u8 reg, u8 data)
@@ -378,13 +379,6 @@
 		pdata->rom_data = &rom[0];
 	}
 
-	pdata->supply = devm_regulator_get(dev, "power");
-	if (IS_ERR(pdata->supply)) {
-		if (PTR_ERR(pdata->supply) == -EPROBE_DEFER)
-			return -EPROBE_DEFER;
-		pdata->supply = NULL;
-	}
-
 	lp->pdata = pdata;
 
 	return 0;
@@ -425,8 +419,15 @@
 	else
 		lp->mode = REGISTER_BASED;
 
-	if (lp->pdata->supply) {
-		ret = regulator_enable(lp->pdata->supply);
+	lp->supply = devm_regulator_get(lp->dev, "power");
+	if (IS_ERR(lp->supply)) {
+		if (PTR_ERR(lp->supply) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+		lp->supply = NULL;
+	}
+
+	if (lp->supply) {
+		ret = regulator_enable(lp->supply);
 		if (ret < 0) {
 			dev_err(&cl->dev, "failed to enable supply: %d\n", ret);
 			return ret;
@@ -464,8 +465,8 @@
 
 	lp->bl->props.brightness = 0;
 	backlight_update_status(lp->bl);
-	if (lp->pdata->supply)
-		regulator_disable(lp->pdata->supply);
+	if (lp->supply)
+		regulator_disable(lp->supply);
 	sysfs_remove_group(&lp->dev->kobj, &lp855x_attr_group);
 
 	return 0;
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index e418d5b..5d583d7 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -221,8 +221,7 @@
 {
 	struct backlight_device *bl_dev = bl->bl_dev;
 
-	if (bl_dev)
-		backlight_device_unregister(bl_dev);
+	backlight_device_unregister(bl_dev);
 }
 
 static ssize_t lp8788_get_bl_ctl_mode(struct device *dev,
diff --git a/drivers/video/backlight/pm8941-wled.c b/drivers/video/backlight/pm8941-wled.c
new file mode 100644
index 0000000..c704c32
--- /dev/null
+++ b/drivers/video/backlight/pm8941-wled.c
@@ -0,0 +1,427 @@
+/* Copyright (c) 2015, Sony Mobile Communications, AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define PM8941_WLED_REG_VAL_BASE		0x40
+#define  PM8941_WLED_REG_VAL_MAX		0xFFF
+
+#define PM8941_WLED_REG_MOD_EN			0x46
+#define  PM8941_WLED_REG_MOD_EN_BIT		BIT(7)
+#define  PM8941_WLED_REG_MOD_EN_MASK		BIT(7)
+
+#define PM8941_WLED_REG_SYNC			0x47
+#define  PM8941_WLED_REG_SYNC_MASK		0x07
+#define  PM8941_WLED_REG_SYNC_LED1		BIT(0)
+#define  PM8941_WLED_REG_SYNC_LED2		BIT(1)
+#define  PM8941_WLED_REG_SYNC_LED3		BIT(2)
+#define  PM8941_WLED_REG_SYNC_ALL		0x07
+#define  PM8941_WLED_REG_SYNC_CLEAR		0x00
+
+#define PM8941_WLED_REG_FREQ			0x4c
+#define  PM8941_WLED_REG_FREQ_MASK		0x0f
+
+#define PM8941_WLED_REG_OVP			0x4d
+#define  PM8941_WLED_REG_OVP_MASK		0x03
+
+#define PM8941_WLED_REG_BOOST			0x4e
+#define  PM8941_WLED_REG_BOOST_MASK		0x07
+
+#define PM8941_WLED_REG_SINK			0x4f
+#define  PM8941_WLED_REG_SINK_MASK		0xe0
+#define  PM8941_WLED_REG_SINK_SHFT		0x05
+
+/* Per-'string' registers below */
+#define PM8941_WLED_REG_STR_OFFSET		0x10
+
+#define PM8941_WLED_REG_STR_MOD_EN_BASE		0x60
+#define  PM8941_WLED_REG_STR_MOD_MASK		BIT(7)
+#define  PM8941_WLED_REG_STR_MOD_EN		BIT(7)
+
+#define PM8941_WLED_REG_STR_SCALE_BASE		0x62
+#define  PM8941_WLED_REG_STR_SCALE_MASK		0x1f
+
+#define PM8941_WLED_REG_STR_MOD_SRC_BASE	0x63
+#define  PM8941_WLED_REG_STR_MOD_SRC_MASK	0x01
+#define  PM8941_WLED_REG_STR_MOD_SRC_INT	0x00
+#define  PM8941_WLED_REG_STR_MOD_SRC_EXT	0x01
+
+#define PM8941_WLED_REG_STR_CABC_BASE		0x66
+#define  PM8941_WLED_REG_STR_CABC_MASK		BIT(7)
+#define  PM8941_WLED_REG_STR_CABC_EN		BIT(7)
+
+struct pm8941_wled_config {
+	u32 i_boost_limit;
+	u32 ovp;
+	u32 switch_freq;
+	u32 num_strings;
+	u32 i_limit;
+	bool cs_out_en;
+	bool ext_gen;
+	bool cabc_en;
+};
+
+struct pm8941_wled {
+	const char *name;
+	struct regmap *regmap;
+	u16 addr;
+
+	struct pm8941_wled_config cfg;
+};
+
+static int pm8941_wled_update_status(struct backlight_device *bl)
+{
+	struct pm8941_wled *wled = bl_get_data(bl);
+	u16 val = bl->props.brightness;
+	u8 ctrl = 0;
+	int rc;
+	int i;
+
+	if (bl->props.power != FB_BLANK_UNBLANK ||
+	    bl->props.fb_blank != FB_BLANK_UNBLANK ||
+	    bl->props.state & BL_CORE_FBBLANK)
+		val = 0;
+
+	if (val != 0)
+		ctrl = PM8941_WLED_REG_MOD_EN_BIT;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_MOD_EN,
+			PM8941_WLED_REG_MOD_EN_MASK, ctrl);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < wled->cfg.num_strings; ++i) {
+		u8 v[2] = { val & 0xff, (val >> 8) & 0xf };
+
+		rc = regmap_bulk_write(wled->regmap,
+				wled->addr + PM8941_WLED_REG_VAL_BASE + 2 * i,
+				v, 2);
+		if (rc)
+			return rc;
+	}
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_SYNC,
+			PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_ALL);
+	if (rc)
+		return rc;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_SYNC,
+			PM8941_WLED_REG_SYNC_MASK, PM8941_WLED_REG_SYNC_CLEAR);
+	return rc;
+}
+
+static int pm8941_wled_setup(struct pm8941_wled *wled)
+{
+	int rc;
+	int i;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_OVP,
+			PM8941_WLED_REG_OVP_MASK, wled->cfg.ovp);
+	if (rc)
+		return rc;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_BOOST,
+			PM8941_WLED_REG_BOOST_MASK, wled->cfg.i_boost_limit);
+	if (rc)
+		return rc;
+
+	rc = regmap_update_bits(wled->regmap,
+			wled->addr + PM8941_WLED_REG_FREQ,
+			PM8941_WLED_REG_FREQ_MASK, wled->cfg.switch_freq);
+	if (rc)
+		return rc;
+
+	if (wled->cfg.cs_out_en) {
+		u8 all = (BIT(wled->cfg.num_strings) - 1)
+				<< PM8941_WLED_REG_SINK_SHFT;
+
+		rc = regmap_update_bits(wled->regmap,
+				wled->addr + PM8941_WLED_REG_SINK,
+				PM8941_WLED_REG_SINK_MASK, all);
+		if (rc)
+			return rc;
+	}
+
+	for (i = 0; i < wled->cfg.num_strings; ++i) {
+		u16 addr = wled->addr + PM8941_WLED_REG_STR_OFFSET * i;
+
+		rc = regmap_update_bits(wled->regmap,
+				addr + PM8941_WLED_REG_STR_MOD_EN_BASE,
+				PM8941_WLED_REG_STR_MOD_MASK,
+				PM8941_WLED_REG_STR_MOD_EN);
+		if (rc)
+			return rc;
+
+		if (wled->cfg.ext_gen) {
+			rc = regmap_update_bits(wled->regmap,
+					addr + PM8941_WLED_REG_STR_MOD_SRC_BASE,
+					PM8941_WLED_REG_STR_MOD_SRC_MASK,
+					PM8941_WLED_REG_STR_MOD_SRC_EXT);
+			if (rc)
+				return rc;
+		}
+
+		rc = regmap_update_bits(wled->regmap,
+				addr + PM8941_WLED_REG_STR_SCALE_BASE,
+				PM8941_WLED_REG_STR_SCALE_MASK,
+				wled->cfg.i_limit);
+		if (rc)
+			return rc;
+
+		rc = regmap_update_bits(wled->regmap,
+				addr + PM8941_WLED_REG_STR_CABC_BASE,
+				PM8941_WLED_REG_STR_CABC_MASK,
+				wled->cfg.cabc_en ?
+					PM8941_WLED_REG_STR_CABC_EN : 0);
+		if (rc)
+			return rc;
+	}
+
+	return 0;
+}
+
+static const struct pm8941_wled_config pm8941_wled_config_defaults = {
+	.i_boost_limit = 3,
+	.i_limit = 20,
+	.ovp = 2,
+	.switch_freq = 5,
+	.num_strings = 0,
+	.cs_out_en = false,
+	.ext_gen = false,
+	.cabc_en = false,
+};
+
+struct pm8941_wled_var_cfg {
+	const u32 *values;
+	u32 (*fn)(u32);
+	int size;
+};
+
+static const u32 pm8941_wled_i_boost_limit_values[] = {
+	105, 385, 525, 805, 980, 1260, 1400, 1680,
+};
+
+static const struct pm8941_wled_var_cfg pm8941_wled_i_boost_limit_cfg = {
+	.values = pm8941_wled_i_boost_limit_values,
+	.size = ARRAY_SIZE(pm8941_wled_i_boost_limit_values),
+};
+
+static const u32 pm8941_wled_ovp_values[] = {
+	35, 32, 29, 27,
+};
+
+static const struct pm8941_wled_var_cfg pm8941_wled_ovp_cfg = {
+	.values = pm8941_wled_ovp_values,
+	.size = ARRAY_SIZE(pm8941_wled_ovp_values),
+};
+
+static u32 pm8941_wled_num_strings_values_fn(u32 idx)
+{
+	return idx + 1;
+}
+
+static const struct pm8941_wled_var_cfg pm8941_wled_num_strings_cfg = {
+	.fn = pm8941_wled_num_strings_values_fn,
+	.size = 3,
+};
+
+static u32 pm8941_wled_switch_freq_values_fn(u32 idx)
+{
+	return 19200 / (2 * (1 + idx));
+}
+
+static const struct pm8941_wled_var_cfg pm8941_wled_switch_freq_cfg = {
+	.fn = pm8941_wled_switch_freq_values_fn,
+	.size = 16,
+};
+
+static const struct pm8941_wled_var_cfg pm8941_wled_i_limit_cfg = {
+	.size = 26,
+};
+
+static u32 pm8941_wled_values(const struct pm8941_wled_var_cfg *cfg, u32 idx)
+{
+	if (idx >= cfg->size)
+		return UINT_MAX;
+	if (cfg->fn)
+		return cfg->fn(idx);
+	if (cfg->values)
+		return cfg->values[idx];
+	return idx;
+}
+
+static int pm8941_wled_configure(struct pm8941_wled *wled, struct device *dev)
+{
+	struct pm8941_wled_config *cfg = &wled->cfg;
+	u32 val;
+	int rc;
+	u32 c;
+	int i;
+	int j;
+
+	const struct {
+		const char *name;
+		u32 *val_ptr;
+		const struct pm8941_wled_var_cfg *cfg;
+	} u32_opts[] = {
+		{
+			"qcom,current-boost-limit",
+			&cfg->i_boost_limit,
+			.cfg = &pm8941_wled_i_boost_limit_cfg,
+		},
+		{
+			"qcom,current-limit",
+			&cfg->i_limit,
+			.cfg = &pm8941_wled_i_limit_cfg,
+		},
+		{
+			"qcom,ovp",
+			&cfg->ovp,
+			.cfg = &pm8941_wled_ovp_cfg,
+		},
+		{
+			"qcom,switching-freq",
+			&cfg->switch_freq,
+			.cfg = &pm8941_wled_switch_freq_cfg,
+		},
+		{
+			"qcom,num-strings",
+			&cfg->num_strings,
+			.cfg = &pm8941_wled_num_strings_cfg,
+		},
+	};
+	const struct {
+		const char *name;
+		bool *val_ptr;
+	} bool_opts[] = {
+		{ "qcom,cs-out", &cfg->cs_out_en, },
+		{ "qcom,ext-gen", &cfg->ext_gen, },
+		{ "qcom,cabc", &cfg->cabc_en, },
+	};
+
+	rc = of_property_read_u32(dev->of_node, "reg", &val);
+	if (rc || val > 0xffff) {
+		dev_err(dev, "invalid IO resources\n");
+		return rc ? rc : -EINVAL;
+	}
+	wled->addr = val;
+
+	rc = of_property_read_string(dev->of_node, "label", &wled->name);
+	if (rc)
+		wled->name = dev->of_node->name;
+
+	*cfg = pm8941_wled_config_defaults;
+	for (i = 0; i < ARRAY_SIZE(u32_opts); ++i) {
+		rc = of_property_read_u32(dev->of_node, u32_opts[i].name, &val);
+		if (rc == -EINVAL) {
+			continue;
+		} else if (rc) {
+			dev_err(dev, "error reading '%s'\n", u32_opts[i].name);
+			return rc;
+		}
+
+		c = UINT_MAX;
+		for (j = 0; c != val; j++) {
+			c = pm8941_wled_values(u32_opts[i].cfg, j);
+			if (c == UINT_MAX) {
+				dev_err(dev, "invalid value for '%s'\n",
+					u32_opts[i].name);
+				return -EINVAL;
+			}
+		}
+
+		dev_dbg(dev, "'%s' = %u\n", u32_opts[i].name, c);
+		*u32_opts[i].val_ptr = j;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(bool_opts); ++i) {
+		if (of_property_read_bool(dev->of_node, bool_opts[i].name))
+			*bool_opts[i].val_ptr = true;
+	}
+
+	cfg->num_strings = cfg->num_strings + 1;
+
+	return 0;
+}
+
+static const struct backlight_ops pm8941_wled_ops = {
+	.update_status = pm8941_wled_update_status,
+};
+
+static int pm8941_wled_probe(struct platform_device *pdev)
+{
+	struct backlight_properties props;
+	struct backlight_device *bl;
+	struct pm8941_wled *wled;
+	struct regmap *regmap;
+	int rc;
+
+	regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!regmap) {
+		dev_err(&pdev->dev, "Unable to get regmap\n");
+		return -EINVAL;
+	}
+
+	wled = devm_kzalloc(&pdev->dev, sizeof(*wled), GFP_KERNEL);
+	if (!wled)
+		return -ENOMEM;
+
+	wled->regmap = regmap;
+
+	rc = pm8941_wled_configure(wled, &pdev->dev);
+	if (rc)
+		return rc;
+
+	rc = pm8941_wled_setup(wled);
+	if (rc)
+		return rc;
+
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.type = BACKLIGHT_RAW;
+	props.max_brightness = PM8941_WLED_REG_VAL_MAX;
+	bl = devm_backlight_device_register(&pdev->dev, wled->name,
+					    &pdev->dev, wled,
+					    &pm8941_wled_ops, &props);
+	if (IS_ERR(bl))
+		return PTR_ERR(bl);
+
+	return 0;
+};
+
+static const struct of_device_id pm8941_wled_match_table[] = {
+	{ .compatible = "qcom,pm8941-wled" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, pm8941_wled_match_table);
+
+static struct platform_driver pm8941_wled_driver = {
+	.probe = pm8941_wled_probe,
+	.driver	= {
+		.name = "pm8941-wled",
+		.of_match_table	= pm8941_wled_match_table,
+	},
+};
+
+module_platform_driver(pm8941_wled_driver);
+
+MODULE_DESCRIPTION("pm8941 wled driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
index 052fa1b..d414c7a 100644
--- a/drivers/video/backlight/sky81452-backlight.c
+++ b/drivers/video/backlight/sky81452-backlight.c
@@ -65,7 +65,7 @@
 
 	if (brightness > 0) {
 		ret = regmap_write(regmap, SKY81452_REG0, brightness - 1);
-		if (IS_ERR_VALUE(ret))
+		if (ret < 0)
 			return ret;
 
 		return regmap_update_bits(regmap, SKY81452_REG1, SKY81452_EN,
@@ -87,12 +87,12 @@
 	int ret;
 
 	ret = kstrtoul(buf, 16, &value);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	ret = regmap_update_bits(regmap, SKY81452_REG1, SKY81452_EN,
 					value << CTZ(SKY81452_EN));
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	return count;
@@ -108,7 +108,7 @@
 
 	reg = !strcmp(attr->attr.name, "open") ? SKY81452_REG5 : SKY81452_REG4;
 	ret = regmap_read(regmap, reg, &value);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	if (value & SKY81452_SHRT) {
@@ -136,7 +136,7 @@
 	int ret;
 
 	ret = regmap_read(regmap, SKY81452_REG4, &value);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		return ret;
 
 	*buf = 0;
@@ -196,7 +196,7 @@
 	pdata->gpio_enable = of_get_gpio(np, 0);
 
 	ret = of_property_count_u32_elems(np, "led-sources");
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		pdata->enable = SKY81452_EN >> CTZ(SKY81452_EN);
 	} else {
 		num_entry = ret;
@@ -205,7 +205,7 @@
 
 		ret = of_property_read_u32_array(np, "led-sources", sources,
 					num_entry);
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(dev, "led-sources node is invalid.\n");
 			return ERR_PTR(-EINVAL);
 		}
@@ -218,12 +218,12 @@
 	ret = of_property_read_u32(np,
 			"skyworks,short-detection-threshold-volt",
 			&pdata->short_detection_threshold);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		pdata->short_detection_threshold = 7;
 
 	ret = of_property_read_u32(np, "skyworks,current-limit-mA",
 			&pdata->boost_current_limit);
-	if (IS_ERR_VALUE(ret))
+	if (ret < 0)
 		pdata->boost_current_limit = 2750;
 
 	of_node_put(np);
@@ -278,14 +278,14 @@
 	if (gpio_is_valid(pdata->gpio_enable)) {
 		ret = devm_gpio_request_one(dev, pdata->gpio_enable,
 					GPIOF_OUT_INIT_HIGH, "sky81452-en");
-		if (IS_ERR_VALUE(ret)) {
+		if (ret < 0) {
 			dev_err(dev, "failed to request GPIO. err=%d\n", ret);
 			return ret;
 		}
 	}
 
 	ret = sky81452_bl_init_device(regmap, pdata);
-	if (IS_ERR_VALUE(ret)) {
+	if (ret < 0) {
 		dev_err(dev, "failed to initialize. err=%d\n", ret);
 		return ret;
 	}
@@ -302,8 +302,8 @@
 
 	platform_set_drvdata(pdev, bd);
 
-	ret  = sysfs_create_group(&bd->dev.kobj, &sky81452_bl_attr_group);
-	if (IS_ERR_VALUE(ret)) {
+	ret = sysfs_create_group(&bd->dev.kobj, &sky81452_bl_attr_group);
+	if (ret < 0) {
 		dev_err(dev, "failed to create attribute. err=%d\n", ret);
 		return ret;
 	}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 3ad6765..83742d8 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -158,6 +158,7 @@
 	{ "tosa-bl", 0 },
 	{ },
 };
+MODULE_DEVICE_TABLE(i2c, tosa_bl_id);
 
 static struct i2c_driver tosa_bl_driver = {
 	.driver = {
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index f888561..811acfc 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2475,3 +2475,17 @@
 	help
 	  This driver implements support for the Solomon SSD1307
 	  OLED controller over I2C.
+
+config FB_SM712
+	tristate "Silicon Motion SM712 framebuffer support"
+	depends on FB && PCI
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	help
+	  Frame buffer driver for the Silicon Motion SM710, SM712, SM721
+	  and SM722 chips.
+
+	  This driver is also available as a module. The module will be
+	  called sm712fb. If you want to compile it as a module, say M
+	  here and read <file:Documentation/kbuild/modules.txt>.
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index cecea50..50ed1b4 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -131,6 +131,7 @@
 obj-$(CONFIG_FB_PUV3_UNIGFX)      += fb-puv3.o
 obj-$(CONFIG_FB_HYPERV)		  += hyperv_fb.o
 obj-$(CONFIG_FB_OPENCORES)	  += ocfb.o
+obj-$(CONFIG_FB_SM712)		  += sm712fb.o
 
 # Platform or fallback drivers go here
 obj-$(CONFIG_FB_UVESA)            += uvesafb.o
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index b305a1e..6a317de 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -26,13 +26,9 @@
 #include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
 #include <video/vga.h>
 
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
-
 struct arkfb_info {
 	int mclk_freq;
-	int mtrr_reg;
+	int wc_cookie;
 
 	struct dac_info *dac;
 	struct vgastate state;
@@ -102,10 +98,6 @@
 
 static char *mode_option = "640x480-8@60";
 
-#ifdef CONFIG_MTRR
-static int mtrr = 1;
-#endif
-
 MODULE_AUTHOR("(c) 2007 Ondrej Zajicek <santiago@crfreenet.org>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("fbdev driver for ARK 2000PV");
@@ -115,11 +107,6 @@
 module_param_named(mode, mode_option, charp, 0444);
 MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc) (deprecated)");
 
-#ifdef CONFIG_MTRR
-module_param(mtrr, int, 0444);
-MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
-#endif
-
 static int threshold = 4;
 
 module_param(threshold, int, 0644);
@@ -1002,7 +989,7 @@
 	info->fix.smem_len = pci_resource_len(dev, 0);
 
 	/* Map physical IO memory address into kernel space */
-	info->screen_base = pci_iomap(dev, 0, 0);
+	info->screen_base = pci_iomap_wc(dev, 0, 0);
 	if (! info->screen_base) {
 		rc = -ENOMEM;
 		dev_err(info->device, "iomap for framebuffer failed\n");
@@ -1057,14 +1044,8 @@
 
 	/* Record a reference to the driver data */
 	pci_set_drvdata(dev, info);
-
-#ifdef CONFIG_MTRR
-	if (mtrr) {
-		par->mtrr_reg = -1;
-		par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
-	}
-#endif
-
+	par->wc_cookie = arch_phys_wc_add(info->fix.smem_start,
+					  info->fix.smem_len);
 	return 0;
 
 	/* Error handling */
@@ -1092,14 +1073,7 @@
 
 	if (info) {
 		struct arkfb_info *par = info->par;
-
-#ifdef CONFIG_MTRR
-		if (par->mtrr_reg >= 0) {
-			mtrr_del(par->mtrr_reg, 0, 0);
-			par->mtrr_reg = -1;
-		}
-#endif
-
+		arch_phys_wc_del(par->wc_cookie);
 		dac_release(par->dac);
 		unregister_framebuffer(info);
 		fb_dealloc_cmap(&info->cmap);
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index 1f39a62..63c4842 100644
--- a/drivers/video/fbdev/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
@@ -182,10 +182,7 @@
 	unsigned long irq_flags;
 	unsigned int irq;
 	spinlock_t int_lock;
-#ifdef CONFIG_MTRR
-	int mtrr_aper;
-	int mtrr_reg;
-#endif
+	int wc_cookie;
 	u32 mem_cntl;
 	struct crtc saved_crtc;
 	union aty_pll saved_pll;
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 8789e48..f34ed47f 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -98,9 +98,6 @@
 #ifdef CONFIG_PMAC_BACKLIGHT
 #include <asm/backlight.h>
 #endif
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
 
 /*
  * Debug flags.
@@ -303,9 +300,7 @@
 };
 
 static bool noaccel;
-#ifdef CONFIG_MTRR
 static bool nomtrr;
-#endif
 static int vram;
 static int pll;
 static int mclk;
@@ -427,6 +422,20 @@
 #endif /* CONFIG_FB_ATY_CT */
 };
 
+/*
+ * Last page of 8 MB (4 MB on ISA) aperture is MMIO,
+ * unless the auxiliary register aperture is used.
+ */
+static void aty_fudge_framebuffer_len(struct fb_info *info)
+{
+	struct atyfb_par *par = (struct atyfb_par *) info->par;
+
+	if (!par->aux_start &&
+	    (info->fix.smem_len == 0x800000 ||
+	     (par->bus_type == ISA && info->fix.smem_len == 0x400000)))
+		info->fix.smem_len -= GUI_RESERVE;
+}
+
 static int correct_chipset(struct atyfb_par *par)
 {
 	u8 rev;
@@ -2603,14 +2612,7 @@
 	if (par->pll_ops->resume_pll)
 		par->pll_ops->resume_pll(info, &par->pll);
 
-	/*
-	 * Last page of 8 MB (4 MB on ISA) aperture is MMIO,
-	 * unless the auxiliary register aperture is used.
-	 */
-	if (!par->aux_start &&
-	    (info->fix.smem_len == 0x800000 ||
-	     (par->bus_type == ISA && info->fix.smem_len == 0x400000)))
-		info->fix.smem_len -= GUI_RESERVE;
+	aty_fudge_framebuffer_len(info);
 
 	/*
 	 * Disable register access through the linear aperture
@@ -2621,25 +2623,13 @@
 		aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) |
 			    BUS_APER_REG_DIS, par);
 
-#ifdef CONFIG_MTRR
-	par->mtrr_aper = -1;
-	par->mtrr_reg = -1;
-	if (!nomtrr) {
-		/* Cover the whole resource. */
-		par->mtrr_aper = mtrr_add(par->res_start, par->res_size,
-					  MTRR_TYPE_WRCOMB, 1);
-		if (par->mtrr_aper >= 0 && !par->aux_start) {
-			/* Make a hole for mmio. */
-			par->mtrr_reg = mtrr_add(par->res_start + 0x800000 -
-						 GUI_RESERVE, GUI_RESERVE,
-						 MTRR_TYPE_UNCACHABLE, 1);
-			if (par->mtrr_reg < 0) {
-				mtrr_del(par->mtrr_aper, 0, 0);
-				par->mtrr_aper = -1;
-			}
-		}
-	}
-#endif
+	if (!nomtrr)
+		/*
+		 * Only the ioremap_wc()'d area will get WC here
+		 * since ioremap_uc() was used on the entire PCI BAR.
+		 */
+		par->wc_cookie = arch_phys_wc_add(par->res_start,
+						  par->res_size);
 
 	info->fbops = &atyfb_ops;
 	info->pseudo_palette = par->pseudo_palette;
@@ -2767,17 +2757,8 @@
 	/* restore video mode */
 	aty_set_crtc(par, &par->saved_crtc);
 	par->pll_ops->set_pll(info, &par->saved_pll);
+	arch_phys_wc_del(par->wc_cookie);
 
-#ifdef CONFIG_MTRR
-	if (par->mtrr_reg >= 0) {
-		mtrr_del(par->mtrr_reg, 0, 0);
-		par->mtrr_reg = -1;
-	}
-	if (par->mtrr_aper >= 0) {
-		mtrr_del(par->mtrr_aper, 0, 0);
-		par->mtrr_aper = -1;
-	}
-#endif
 	return ret;
 }
 
@@ -3459,7 +3440,11 @@
 	}
 
 	info->fix.mmio_start = raddr;
-	par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000);
+	/*
+	 * By using strong UC we force the MTRR to never have an
+	 * effect on the MMIO region on both non-PAT and PAT systems.
+	 */
+	par->ati_regbase = ioremap_uc(info->fix.mmio_start, 0x1000);
 	if (par->ati_regbase == NULL)
 		return -ENOMEM;
 
@@ -3482,7 +3467,24 @@
 
 	/* Map in frame buffer */
 	info->fix.smem_start = addr;
-	info->screen_base = ioremap(addr, 0x800000);
+
+	/*
+	 * The framebuffer is not always 8 MiB, that's just the size of the
+	 * PCI BAR. We temporarily abuse smem_len here to store the size
+	 * of the BAR. aty_init() will later correct it to match the actual
+	 * framebuffer size.
+	 *
+	 * On devices that don't have the auxiliary register aperture, the
+	 * registers are housed at the top end of the framebuffer PCI BAR.
+	 * aty_fudge_framebuffer_len() is used to reduce smem_len to not
+	 * overlap with the registers.
+	 */
+	info->fix.smem_len = 0x800000;
+
+	aty_fudge_framebuffer_len(info);
+
+	info->screen_base = ioremap_wc(info->fix.smem_start,
+				       info->fix.smem_len);
 	if (info->screen_base == NULL) {
 		ret = -ENOMEM;
 		goto atyfb_setup_generic_fail;
@@ -3554,6 +3556,7 @@
 		return -ENOMEM;
 	}
 	par = info->par;
+	par->bus_type = PCI;
 	info->fix = atyfb_fix;
 	info->device = &pdev->dev;
 	par->pci_id = pdev->device;
@@ -3655,7 +3658,8 @@
 		 * Map the video memory (physical address given)
 		 * to somewhere in the kernel address space.
 		 */
-		info->screen_base = ioremap(phys_vmembase[m64_num], phys_size[m64_num]);
+		info->screen_base = ioremap_wc(phys_vmembase[m64_num],
+					       phys_size[m64_num]);
 		info->fix.smem_start = (unsigned long)info->screen_base; /* Fake! */
 		par->ati_regbase = ioremap(phys_guiregbase[m64_num], 0x10000) +
 						0xFC00ul;
@@ -3721,17 +3725,8 @@
 	if (M64_HAS(MOBIL_BUS))
 		aty_bl_exit(info->bl_dev);
 #endif
+	arch_phys_wc_del(par->wc_cookie);
 
-#ifdef CONFIG_MTRR
-	if (par->mtrr_reg >= 0) {
-		mtrr_del(par->mtrr_reg, 0, 0);
-		par->mtrr_reg = -1;
-	}
-	if (par->mtrr_aper >= 0) {
-		mtrr_del(par->mtrr_aper, 0, 0);
-		par->mtrr_aper = -1;
-	}
-#endif
 #ifndef __sparc__
 	if (par->ati_regbase)
 		iounmap(par->ati_regbase);
@@ -3847,10 +3842,8 @@
 	while ((this_opt = strsep(&options, ",")) != NULL) {
 		if (!strncmp(this_opt, "noaccel", 7)) {
 			noaccel = 1;
-#ifdef CONFIG_MTRR
 		} else if (!strncmp(this_opt, "nomtrr", 6)) {
 			nomtrr = 1;
-#endif
 		} else if (!strncmp(this_opt, "vram:", 5))
 			vram = simple_strtoul(this_opt + 5, NULL, 0);
 		else if (!strncmp(this_opt, "pll:", 4))
@@ -4020,7 +4013,5 @@
 MODULE_PARM_DESC(comp_sync, "Set composite sync signal to low (0) or high (1)");
 module_param(mode, charp, 0);
 MODULE_PARM_DESC(mode, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" ");
-#ifdef CONFIG_MTRR
 module_param(nomtrr, bool, 0);
 MODULE_PARM_DESC(nomtrr, "bool: disable use of MTRR registers");
-#endif
diff --git a/drivers/video/fbdev/ep93xx-fb.c b/drivers/video/fbdev/ep93xx-fb.c
index 7ec251c..5b10810 100644
--- a/drivers/video/fbdev/ep93xx-fb.c
+++ b/drivers/video/fbdev/ep93xx-fb.c
@@ -419,36 +419,15 @@
 	.fb_mmap	= ep93xxfb_mmap,
 };
 
-static int ep93xxfb_calc_fbsize(struct ep93xxfb_mach_info *mach_info)
-{
-	int i, fb_size = 0;
-
-	if (mach_info->num_modes == EP93XXFB_USE_MODEDB) {
-		fb_size = EP93XXFB_MAX_XRES * EP93XXFB_MAX_YRES *
-			mach_info->bpp / 8;
-	} else {
-		for (i = 0; i < mach_info->num_modes; i++) {
-			const struct fb_videomode *mode;
-			int size;
-
-			mode = &mach_info->modes[i];
-			size = mode->xres * mode->yres * mach_info->bpp / 8;
-			if (size > fb_size)
-				fb_size = size;
-		}
-	}
-
-	return fb_size;
-}
-
 static int ep93xxfb_alloc_videomem(struct fb_info *info)
 {
-	struct ep93xx_fbi *fbi = info->par;
 	char __iomem *virt_addr;
 	dma_addr_t phys_addr;
 	unsigned int fb_size;
 
-	fb_size = ep93xxfb_calc_fbsize(fbi->mach_info);
+	/* Maximum 16bpp -> used memory is maximum x*y*2 bytes */
+	fb_size = EP93XXFB_MAX_XRES * EP93XXFB_MAX_YRES * 2;
+
 	virt_addr = dma_alloc_writecombine(info->dev, fb_size,
 					   &phys_addr, GFP_KERNEL);
 	if (!virt_addr)
@@ -550,8 +529,7 @@
 
 	fb_get_options("ep93xx-fb", &video_mode);
 	err = fb_find_mode(&info->var, info, video_mode,
-			   fbi->mach_info->modes, fbi->mach_info->num_modes,
-			   fbi->mach_info->default_mode, fbi->mach_info->bpp);
+			   NULL, 0, NULL, 16);
 	if (err == 0) {
 		dev_err(info->dev, "No suitable video mode found\n");
 		err = -EINVAL;
diff --git a/drivers/video/fbdev/gxt4500.c b/drivers/video/fbdev/gxt4500.c
index 135d78a..f19133a 100644
--- a/drivers/video/fbdev/gxt4500.c
+++ b/drivers/video/fbdev/gxt4500.c
@@ -662,7 +662,7 @@
 
 	info->fix.smem_start = fb_phys;
 	info->fix.smem_len = pci_resource_len(pdev, 1);
-	info->screen_base = pci_ioremap_bar(pdev, 1);
+	info->screen_base = pci_ioremap_wc_bar(pdev, 1);
 	if (!info->screen_base) {
 		dev_err(&pdev->dev, "gxt4500: cannot map framebuffer\n");
 		goto err_unmap_regs;
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 807ee22..e2451bd 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -213,7 +213,7 @@
 
 struct hvfb_par {
 	struct fb_info *info;
-	struct resource mem;
+	struct resource *mem;
 	bool fb_ready; /* fb device is ready */
 	struct completion wait;
 	u32 synthvid_version;
@@ -677,26 +677,18 @@
 
 
 /* Get framebuffer memory from Hyper-V video pci space */
-static int hvfb_getmem(struct fb_info *info)
+static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
 {
 	struct hvfb_par *par = info->par;
 	struct pci_dev *pdev  = NULL;
 	void __iomem *fb_virt;
 	int gen2vm = efi_enabled(EFI_BOOT);
+	resource_size_t pot_start, pot_end;
 	int ret;
 
-	par->mem.name = KBUILD_MODNAME;
-	par->mem.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 	if (gen2vm) {
-		ret = allocate_resource(&hyperv_mmio, &par->mem,
-					screen_fb_size,
-					0, -1,
-					screen_fb_size,
-					NULL, NULL);
-		if (ret != 0) {
-			pr_err("Unable to allocate framebuffer memory\n");
-			return -ENODEV;
-		}
+		pot_start = 0;
+		pot_end = -1;
 	} else {
 		pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
 			      PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
@@ -709,16 +701,18 @@
 		    pci_resource_len(pdev, 0) < screen_fb_size)
 			goto err1;
 
-		par->mem.end = pci_resource_end(pdev, 0);
-		par->mem.start = par->mem.end - screen_fb_size + 1;
-		ret = request_resource(&pdev->resource[0], &par->mem);
-		if (ret != 0) {
-			pr_err("Unable to request framebuffer memory\n");
-			goto err1;
-		}
+		pot_end = pci_resource_end(pdev, 0);
+		pot_start = pot_end - screen_fb_size + 1;
 	}
 
-	fb_virt = ioremap(par->mem.start, screen_fb_size);
+	ret = vmbus_allocate_mmio(&par->mem, hdev, pot_start, pot_end,
+				  screen_fb_size, 0x100000, true);
+	if (ret != 0) {
+		pr_err("Unable to allocate framebuffer memory\n");
+		goto err1;
+	}
+
+	fb_virt = ioremap(par->mem->start, screen_fb_size);
 	if (!fb_virt)
 		goto err2;
 
@@ -736,7 +730,7 @@
 		info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
 	}
 
-	info->fix.smem_start = par->mem.start;
+	info->fix.smem_start = par->mem->start;
 	info->fix.smem_len = screen_fb_size;
 	info->screen_base = fb_virt;
 	info->screen_size = screen_fb_size;
@@ -749,7 +743,8 @@
 err3:
 	iounmap(fb_virt);
 err2:
-	release_resource(&par->mem);
+	release_mem_region(par->mem->start, screen_fb_size);
+	par->mem = NULL;
 err1:
 	if (!gen2vm)
 		pci_dev_put(pdev);
@@ -763,7 +758,8 @@
 	struct hvfb_par *par = info->par;
 
 	iounmap(info->screen_base);
-	release_resource(&par->mem);
+	release_mem_region(par->mem->start, screen_fb_size);
+	par->mem = NULL;
 }
 
 
@@ -794,7 +790,7 @@
 		goto error1;
 	}
 
-	ret = hvfb_getmem(info);
+	ret = hvfb_getmem(hdev, info);
 	if (ret) {
 		pr_err("No memory for framebuffer\n");
 		goto error2;
diff --git a/drivers/video/fbdev/i740fb.c b/drivers/video/fbdev/i740fb.c
index a2b4204..452e116 100644
--- a/drivers/video/fbdev/i740fb.c
+++ b/drivers/video/fbdev/i740fb.c
@@ -27,24 +27,15 @@
 #include <linux/console.h>
 #include <video/vga.h>
 
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
-
 #include "i740_reg.h"
 
 static char *mode_option;
-
-#ifdef CONFIG_MTRR
 static int mtrr = 1;
-#endif
 
 struct i740fb_par {
 	unsigned char __iomem *regs;
 	bool has_sgram;
-#ifdef CONFIG_MTRR
-	int mtrr_reg;
-#endif
+	int wc_cookie;
 	bool ddc_registered;
 	struct i2c_adapter ddc_adapter;
 	struct i2c_algo_bit_data ddc_algo;
@@ -1040,7 +1031,7 @@
 		goto err_request_regions;
 	}
 
-	info->screen_base = pci_ioremap_bar(dev, 0);
+	info->screen_base = pci_ioremap_wc_bar(dev, 0);
 	if (!info->screen_base) {
 		dev_err(info->device, "error remapping base\n");
 		ret = -ENOMEM;
@@ -1144,13 +1135,9 @@
 
 	fb_info(info, "%s frame buffer device\n", info->fix.id);
 	pci_set_drvdata(dev, info);
-#ifdef CONFIG_MTRR
-	if (mtrr) {
-		par->mtrr_reg = -1;
-		par->mtrr_reg = mtrr_add(info->fix.smem_start,
-				info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
-	}
-#endif
+	if (mtrr)
+		par->wc_cookie = arch_phys_wc_add(info->fix.smem_start,
+						  info->fix.smem_len);
 	return 0;
 
 err_reg_framebuffer:
@@ -1177,13 +1164,7 @@
 
 	if (info) {
 		struct i740fb_par *par = info->par;
-
-#ifdef CONFIG_MTRR
-		if (par->mtrr_reg >= 0) {
-			mtrr_del(par->mtrr_reg, 0, 0);
-			par->mtrr_reg = -1;
-		}
-#endif
+		arch_phys_wc_del(par->wc_cookie);
 		unregister_framebuffer(info);
 		fb_dealloc_cmap(&info->cmap);
 		if (par->ddc_registered)
@@ -1287,10 +1268,8 @@
 	while ((opt = strsep(&options, ",")) != NULL) {
 		if (!*opt)
 			continue;
-#ifdef CONFIG_MTRR
 		else if (!strncmp(opt, "mtrr:", 5))
 			mtrr = simple_strtoul(opt + 5, NULL, 0);
-#endif
 		else
 			mode_option = opt;
 	}
@@ -1327,7 +1306,5 @@
 module_param(mode_option, charp, 0444);
 MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
 
-#ifdef CONFIG_MTRR
 module_param(mtrr, int, 0444);
 MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
-#endif
diff --git a/drivers/video/fbdev/kyro/fbdev.c b/drivers/video/fbdev/kyro/fbdev.c
index 65041e1..5bb0153 100644
--- a/drivers/video/fbdev/kyro/fbdev.c
+++ b/drivers/video/fbdev/kyro/fbdev.c
@@ -22,9 +22,6 @@
 #include <linux/pci.h>
 #include <asm/io.h>
 #include <linux/uaccess.h>
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
 
 #include <video/kyro.h>
 
@@ -84,9 +81,7 @@
 static char *mode_option = NULL;
 static int nopan = 0;
 static int nowrap = 1;
-#ifdef CONFIG_MTRR
 static int nomtrr = 0;
-#endif
 
 /* PCI driver prototypes */
 static int kyrofb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -570,10 +565,8 @@
 			nopan = 1;
 		} else if (strcmp(this_opt, "nowrap") == 0) {
 			nowrap = 1;
-#ifdef CONFIG_MTRR
 		} else if (strcmp(this_opt, "nomtrr") == 0) {
 			nomtrr = 1;
-#endif
 		} else {
 			mode_option = this_opt;
 		}
@@ -691,17 +684,16 @@
 
 	currentpar->regbase = deviceInfo.pSTGReg =
 		ioremap_nocache(kyro_fix.mmio_start, kyro_fix.mmio_len);
+	if (!currentpar->regbase)
+		goto out_free_fb;
 
-	info->screen_base = ioremap_nocache(kyro_fix.smem_start,
-					    kyro_fix.smem_len);
+	info->screen_base = pci_ioremap_wc_bar(pdev, 0);
+	if (!info->screen_base)
+		goto out_unmap_regs;
 
-#ifdef CONFIG_MTRR
 	if (!nomtrr)
-		currentpar->mtrr_handle =
-			mtrr_add(kyro_fix.smem_start,
-				 kyro_fix.smem_len,
-				 MTRR_TYPE_WRCOMB, 1);
-#endif
+		currentpar->wc_cookie = arch_phys_wc_add(kyro_fix.smem_start,
+							 kyro_fix.smem_len);
 
 	kyro_fix.ypanstep	= nopan ? 0 : 1;
 	kyro_fix.ywrapstep	= nowrap ? 0 : 1;
@@ -745,8 +737,10 @@
 	return 0;
 
 out_unmap:
-	iounmap(currentpar->regbase);
 	iounmap(info->screen_base);
+out_unmap_regs:
+	iounmap(currentpar->regbase);
+out_free_fb:
 	framebuffer_release(info);
 
 	return -EINVAL;
@@ -770,12 +764,7 @@
 	iounmap(info->screen_base);
 	iounmap(par->regbase);
 
-#ifdef CONFIG_MTRR
-	if (par->mtrr_handle)
-		mtrr_del(par->mtrr_handle,
-			 info->fix.smem_start,
-			 info->fix.smem_len);
-#endif
+	arch_phys_wc_del(par->wc_cookie);
 
 	unregister_framebuffer(info);
 	framebuffer_release(info);
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index 7245611..94813af 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -1668,7 +1668,6 @@
 
 	switch (val) {
 	case CPUFREQ_ADJUST:
-	case CPUFREQ_INCOMPATIBLE:
 		pr_debug("min dma period: %d ps, "
 			"new clock %d kHz\n", pxafb_display_dma_period(var),
 			policy->max);
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index f0ae61a..13b1090 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -28,13 +28,9 @@
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
 
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
-
 struct s3fb_info {
 	int chip, rev, mclk_freq;
-	int mtrr_reg;
+	int wc_cookie;
 	struct vgastate state;
 	struct mutex open_lock;
 	unsigned int ref_count;
@@ -154,11 +150,7 @@
 
 
 static char *mode_option;
-
-#ifdef CONFIG_MTRR
 static int mtrr = 1;
-#endif
-
 static int fasttext = 1;
 
 
@@ -170,11 +162,8 @@
 MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
 module_param_named(mode, mode_option, charp, 0444);
 MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc) (deprecated)");
-
-#ifdef CONFIG_MTRR
 module_param(mtrr, int, 0444);
 MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
-#endif
 
 module_param(fasttext, int, 0644);
 MODULE_PARM_DESC(fasttext, "Enable S3 fast text mode (1=enable, 0=disable, default=1)");
@@ -1168,7 +1157,7 @@
 	info->fix.smem_len = pci_resource_len(dev, 0);
 
 	/* Map physical IO memory address into kernel space */
-	info->screen_base = pci_iomap(dev, 0, 0);
+	info->screen_base = pci_iomap_wc(dev, 0, 0);
 	if (! info->screen_base) {
 		rc = -ENOMEM;
 		dev_err(info->device, "iomap for framebuffer failed\n");
@@ -1365,12 +1354,9 @@
 	/* Record a reference to the driver data */
 	pci_set_drvdata(dev, info);
 
-#ifdef CONFIG_MTRR
-	if (mtrr) {
-		par->mtrr_reg = -1;
-		par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
-	}
-#endif
+	if (mtrr)
+		par->wc_cookie = arch_phys_wc_add(info->fix.smem_start,
+						  info->fix.smem_len);
 
 	return 0;
 
@@ -1405,14 +1391,7 @@
 
 	if (info) {
 		par = info->par;
-
-#ifdef CONFIG_MTRR
-		if (par->mtrr_reg >= 0) {
-			mtrr_del(par->mtrr_reg, 0, 0);
-			par->mtrr_reg = -1;
-		}
-#endif
-
+		arch_phys_wc_del(par->wc_cookie);
 		unregister_framebuffer(info);
 		fb_dealloc_cmap(&info->cmap);
 
@@ -1551,10 +1530,8 @@
 
 		if (!*opt)
 			continue;
-#ifdef CONFIG_MTRR
 		else if (!strncmp(opt, "mtrr:", 5))
 			mtrr = simple_strtoul(opt + 5, NULL, 0);
-#endif
 		else if (!strncmp(opt, "fasttext:", 9))
 			fasttext = simple_strtoul(opt + 9, NULL, 0);
 		else
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 89dd7e0..dcf774c 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1042,7 +1042,6 @@
 
 	switch (val) {
 	case CPUFREQ_ADJUST:
-	case CPUFREQ_INCOMPATIBLE:
 		dev_dbg(fbi->dev, "min dma period: %d ps, "
 			"new clock %d kHz\n", sa1100fb_min_dma_period(fbi),
 			policy->max);
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 1085c04..52c5c7e 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -26,6 +26,7 @@
 #include <linux/module.h>
 #include <linux/platform_data/simplefb.h>
 #include <linux/platform_device.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/of_platform.h>
 
diff --git a/drivers/video/fbdev/sm712.h b/drivers/video/fbdev/sm712.h
new file mode 100644
index 0000000..aad1cc4
--- /dev/null
+++ b/drivers/video/fbdev/sm712.h
@@ -0,0 +1,116 @@
+/*
+ * Silicon Motion SM712 frame buffer device
+ *
+ * Copyright (C) 2006 Silicon Motion Technology Corp.
+ * Authors:	Ge Wang, gewang@siliconmotion.com
+ *		Boyod boyod.yang@siliconmotion.com.cn
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author: Wu Zhangjin, wuzhangjin@gmail.com
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License. See the file COPYING in the main directory of this archive for
+ *  more details.
+ */
+
+#define FB_ACCEL_SMI_LYNX 88
+
+#define SCREEN_X_RES      1024
+#define SCREEN_Y_RES      600
+#define SCREEN_BPP        16
+
+/*Assume SM712 graphics chip has 4MB VRAM */
+#define SM712_VIDEOMEMORYSIZE	  0x00400000
+/*Assume SM722 graphics chip has 8MB VRAM */
+#define SM722_VIDEOMEMORYSIZE	  0x00800000
+
+#define dac_reg	(0x3c8)
+#define dac_val	(0x3c9)
+
+extern void __iomem *smtc_regbaseaddress;
+#define smtc_mmiowb(dat, reg)	writeb(dat, smtc_regbaseaddress + reg)
+
+#define smtc_mmiorb(reg)	readb(smtc_regbaseaddress + reg)
+
+#define SIZE_SR00_SR04      (0x04 - 0x00 + 1)
+#define SIZE_SR10_SR24      (0x24 - 0x10 + 1)
+#define SIZE_SR30_SR75      (0x75 - 0x30 + 1)
+#define SIZE_SR80_SR93      (0x93 - 0x80 + 1)
+#define SIZE_SRA0_SRAF      (0xAF - 0xA0 + 1)
+#define SIZE_GR00_GR08      (0x08 - 0x00 + 1)
+#define SIZE_AR00_AR14      (0x14 - 0x00 + 1)
+#define SIZE_CR00_CR18      (0x18 - 0x00 + 1)
+#define SIZE_CR30_CR4D      (0x4D - 0x30 + 1)
+#define SIZE_CR90_CRA7      (0xA7 - 0x90 + 1)
+
+static inline void smtc_crtcw(int reg, int val)
+{
+	smtc_mmiowb(reg, 0x3d4);
+	smtc_mmiowb(val, 0x3d5);
+}
+
+static inline void smtc_grphw(int reg, int val)
+{
+	smtc_mmiowb(reg, 0x3ce);
+	smtc_mmiowb(val, 0x3cf);
+}
+
+static inline void smtc_attrw(int reg, int val)
+{
+	smtc_mmiorb(0x3da);
+	smtc_mmiowb(reg, 0x3c0);
+	smtc_mmiorb(0x3c1);
+	smtc_mmiowb(val, 0x3c0);
+}
+
+static inline void smtc_seqw(int reg, int val)
+{
+	smtc_mmiowb(reg, 0x3c4);
+	smtc_mmiowb(val, 0x3c5);
+}
+
+static inline unsigned int smtc_seqr(int reg)
+{
+	smtc_mmiowb(reg, 0x3c4);
+	return smtc_mmiorb(0x3c5);
+}
+
+/* The next structure holds all information relevant for a specific video mode.
+ */
+
+struct modeinit {
+	int mmsizex;
+	int mmsizey;
+	int bpp;
+	int hz;
+	unsigned char init_misc;
+	unsigned char init_sr00_sr04[SIZE_SR00_SR04];
+	unsigned char init_sr10_sr24[SIZE_SR10_SR24];
+	unsigned char init_sr30_sr75[SIZE_SR30_SR75];
+	unsigned char init_sr80_sr93[SIZE_SR80_SR93];
+	unsigned char init_sra0_sraf[SIZE_SRA0_SRAF];
+	unsigned char init_gr00_gr08[SIZE_GR00_GR08];
+	unsigned char init_ar00_ar14[SIZE_AR00_AR14];
+	unsigned char init_cr00_cr18[SIZE_CR00_CR18];
+	unsigned char init_cr30_cr4d[SIZE_CR30_CR4D];
+	unsigned char init_cr90_cra7[SIZE_CR90_CRA7];
+};
+
+#ifdef __BIG_ENDIAN
+#define pal_rgb(r, g, b, val)	(((r & 0xf800) >> 8) | \
+				((g & 0xe000) >> 13) | \
+				((g & 0x1c00) << 3) | \
+				((b & 0xf800) >> 3))
+#define big_addr		0x800000
+#define mmio_addr		0x00800000
+#define seqw17()		smtc_seqw(0x17, 0x30)
+#define big_pixel_depth(p, d)	{if (p == 24) {p = 32; d = 32; } }
+#define big_swap(p)		((p & 0xff00ff00 >> 8) | (p & 0x00ff00ff << 8))
+#else
+#define pal_rgb(r, g, b, val)	val
+#define big_addr		0
+#define mmio_addr		0x00c00000
+#define seqw17()		do { } while (0)
+#define big_pixel_depth(p, d)	do { } while (0)
+#define big_swap(p)		p
+#endif
diff --git a/drivers/video/fbdev/sm712fb.c b/drivers/video/fbdev/sm712fb.c
new file mode 100644
index 0000000..629bfa2
--- /dev/null
+++ b/drivers/video/fbdev/sm712fb.c
@@ -0,0 +1,1653 @@
+/*
+ * Silicon Motion SM7XX frame buffer device
+ *
+ * Copyright (C) 2006 Silicon Motion Technology Corp.
+ * Authors:  Ge Wang, gewang@siliconmotion.com
+ *	     Boyod boyod.yang@siliconmotion.com.cn
+ *
+ * Copyright (C) 2009 Lemote, Inc.
+ * Author:   Wu Zhangjin, wuzhangjin@gmail.com
+ *
+ * Copyright (C) 2011 Igalia, S.L.
+ * Author:   Javier M. Mellid <jmunhoz@igalia.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ * Framebuffer driver for Silicon Motion SM710, SM712, SM721 and SM722 chips
+ */
+
+#include <linux/io.h>
+#include <linux/fb.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/screen_info.h>
+
+#ifdef CONFIG_PM
+#include <linux/pm.h>
+#endif
+
+#include "sm712.h"
+
+/*
+* Private structure
+*/
+struct smtcfb_info {
+	struct pci_dev *pdev;
+	struct fb_info *fb;
+	u16 chip_id;
+	u8  chip_rev_id;
+
+	void __iomem *lfb;	/* linear frame buffer */
+	void __iomem *dp_regs;	/* drawing processor control regs */
+	void __iomem *vp_regs;	/* video processor control regs */
+	void __iomem *cp_regs;	/* capture processor control regs */
+	void __iomem *mmio;	/* memory map IO port */
+
+	u_int width;
+	u_int height;
+	u_int hz;
+
+	u32 colreg[17];
+};
+
+void __iomem *smtc_regbaseaddress;	/* Memory Map IO starting address */
+
+static struct fb_var_screeninfo smtcfb_var = {
+	.xres           = 1024,
+	.yres           = 600,
+	.xres_virtual   = 1024,
+	.yres_virtual   = 600,
+	.bits_per_pixel = 16,
+	.red            = {16, 8, 0},
+	.green          = {8, 8, 0},
+	.blue           = {0, 8, 0},
+	.activate       = FB_ACTIVATE_NOW,
+	.height         = -1,
+	.width          = -1,
+	.vmode          = FB_VMODE_NONINTERLACED,
+	.nonstd         = 0,
+	.accel_flags    = FB_ACCELF_TEXT,
+};
+
+static struct fb_fix_screeninfo smtcfb_fix = {
+	.id             = "smXXXfb",
+	.type           = FB_TYPE_PACKED_PIXELS,
+	.visual         = FB_VISUAL_TRUECOLOR,
+	.line_length    = 800 * 3,
+	.accel          = FB_ACCEL_SMI_LYNX,
+	.type_aux       = 0,
+	.xpanstep       = 0,
+	.ypanstep       = 0,
+	.ywrapstep      = 0,
+};
+
+struct vesa_mode {
+	char index[6];
+	u16  lfb_width;
+	u16  lfb_height;
+	u16  lfb_depth;
+};
+
+static const struct vesa_mode vesa_mode_table[] = {
+	{"0x301", 640,  480,  8},
+	{"0x303", 800,  600,  8},
+	{"0x305", 1024, 768,  8},
+	{"0x307", 1280, 1024, 8},
+
+	{"0x311", 640,  480,  16},
+	{"0x314", 800,  600,  16},
+	{"0x317", 1024, 768,  16},
+	{"0x31A", 1280, 1024, 16},
+
+	{"0x312", 640,  480,  24},
+	{"0x315", 800,  600,  24},
+	{"0x318", 1024, 768,  24},
+	{"0x31B", 1280, 1024, 24},
+};
+
+/**********************************************************************
+			 SM712 Mode table.
+ **********************************************************************/
+static const struct modeinit vgamode[] = {
+	{
+		/*  mode#0: 640 x 480  16Bpp  60Hz */
+		640, 480, 16, 60,
+		/*  Init_MISC */
+		0xE3,
+		{	/*  Init_SR0_SR4 */
+			0x03, 0x01, 0x0F, 0x00, 0x0E,
+		},
+		{	/*  Init_SR10_SR24 */
+			0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+			0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xC4, 0x30, 0x02, 0x01, 0x01,
+		},
+		{	/*  Init_SR30_SR75 */
+			0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
+			0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
+			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+			0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
+			0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
+			0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
+			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+			0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
+			0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
+		},
+		{	/*  Init_SR80_SR93 */
+			0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
+			0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
+			0x00, 0x00, 0x00, 0x00,
+		},
+		{	/*  Init_SRA0_SRAF */
+			0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+			0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
+		},
+		{	/*  Init_GR00_GR08 */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+			0xFF,
+		},
+		{	/*  Init_AR00_AR14 */
+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x41, 0x00, 0x0F, 0x00, 0x00,
+		},
+		{	/*  Init_CR00_CR18 */
+			0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
+			0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
+			0xFF,
+		},
+		{	/*  Init_CR30_CR4D */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
+			0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
+			0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
+			0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
+		},
+		{	/*  Init_CR90_CRA7 */
+			0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
+			0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
+			0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
+		},
+	},
+	{
+		/*  mode#1: 640 x 480  24Bpp  60Hz */
+		640, 480, 24, 60,
+		/*  Init_MISC */
+		0xE3,
+		{	/*  Init_SR0_SR4 */
+			0x03, 0x01, 0x0F, 0x00, 0x0E,
+		},
+		{	/*  Init_SR10_SR24 */
+			0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+			0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xC4, 0x30, 0x02, 0x01, 0x01,
+		},
+		{	/*  Init_SR30_SR75 */
+			0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
+			0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
+			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+			0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
+			0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
+			0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
+			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+			0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
+			0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
+		},
+		{	/*  Init_SR80_SR93 */
+			0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
+			0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
+			0x00, 0x00, 0x00, 0x00,
+		},
+		{	/*  Init_SRA0_SRAF */
+			0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+			0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
+		},
+		{	/*  Init_GR00_GR08 */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+			0xFF,
+		},
+		{	/*  Init_AR00_AR14 */
+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x41, 0x00, 0x0F, 0x00, 0x00,
+		},
+		{	/*  Init_CR00_CR18 */
+			0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
+			0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
+			0xFF,
+		},
+		{	/*  Init_CR30_CR4D */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
+			0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
+			0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
+			0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
+		},
+		{	/*  Init_CR90_CRA7 */
+			0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
+			0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
+			0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
+		},
+	},
+	{
+		/*  mode#0: 640 x 480  32Bpp  60Hz */
+		640, 480, 32, 60,
+		/*  Init_MISC */
+		0xE3,
+		{	/*  Init_SR0_SR4 */
+			0x03, 0x01, 0x0F, 0x00, 0x0E,
+		},
+		{	/*  Init_SR10_SR24 */
+			0xFF, 0xBE, 0xEF, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+			0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xC4, 0x30, 0x02, 0x01, 0x01,
+		},
+		{	/*  Init_SR30_SR75 */
+			0x32, 0x03, 0xA0, 0x09, 0xC0, 0x32, 0x32, 0x32,
+			0x32, 0x32, 0x32, 0x32, 0x00, 0x00, 0x03, 0xFF,
+			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+			0x20, 0x0C, 0x44, 0x20, 0x00, 0x32, 0x32, 0x32,
+			0x04, 0x24, 0x63, 0x4F, 0x52, 0x0B, 0xDF, 0xEA,
+			0x04, 0x50, 0x19, 0x32, 0x32, 0x00, 0x00, 0x32,
+			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+			0x50, 0x03, 0x74, 0x14, 0x07, 0x82, 0x07, 0x04,
+			0x00, 0x45, 0x30, 0x30, 0x40, 0x30,
+		},
+		{	/*  Init_SR80_SR93 */
+			0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x32,
+			0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x32, 0x32,
+			0x00, 0x00, 0x00, 0x00,
+		},
+		{	/*  Init_SRA0_SRAF */
+			0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+			0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xFF, 0xDF,
+		},
+		{	/*  Init_GR00_GR08 */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+			0xFF,
+		},
+		{	/*  Init_AR00_AR14 */
+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x41, 0x00, 0x0F, 0x00, 0x00,
+		},
+		{	/*  Init_CR00_CR18 */
+			0x5F, 0x4F, 0x4F, 0x00, 0x53, 0x1F, 0x0B, 0x3E,
+			0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xEA, 0x0C, 0xDF, 0x50, 0x40, 0xDF, 0x00, 0xE3,
+			0xFF,
+		},
+		{	/*  Init_CR30_CR4D */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x03, 0x20,
+			0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xFF, 0xFD,
+			0x5F, 0x4F, 0x00, 0x54, 0x00, 0x0B, 0xDF, 0x00,
+			0xEA, 0x0C, 0x2E, 0x00, 0x4F, 0xDF,
+		},
+		{	/*  Init_CR90_CRA7 */
+			0x56, 0xDD, 0x5E, 0xEA, 0x87, 0x44, 0x8F, 0x55,
+			0x0A, 0x8F, 0x55, 0x0A, 0x00, 0x00, 0x18, 0x00,
+			0x11, 0x10, 0x0B, 0x0A, 0x0A, 0x0A, 0x0A, 0x00,
+		},
+	},
+
+	{	/*  mode#2: 800 x 600  16Bpp  60Hz */
+		800, 600, 16, 60,
+		/*  Init_MISC */
+		0x2B,
+		{	/*  Init_SR0_SR4 */
+			0x03, 0x01, 0x0F, 0x03, 0x0E,
+		},
+		{	/*  Init_SR10_SR24 */
+			0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+			0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xC4, 0x30, 0x02, 0x01, 0x01,
+		},
+		{	/*  Init_SR30_SR75 */
+			0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
+			0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
+			0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
+			0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
+			0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
+			0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
+			0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+			0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
+			0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
+		},
+		{	/*  Init_SR80_SR93 */
+			0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
+			0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
+			0x00, 0x00, 0x00, 0x00,
+		},
+		{	/*  Init_SRA0_SRAF */
+			0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+			0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
+		},
+		{	/*  Init_GR00_GR08 */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+			0xFF,
+		},
+		{	/*  Init_AR00_AR14 */
+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x41, 0x00, 0x0F, 0x00, 0x00,
+		},
+		{	/*  Init_CR00_CR18 */
+			0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
+			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
+			0xFF,
+		},
+		{	/*  Init_CR30_CR4D */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
+			0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
+			0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
+			0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
+		},
+		{	/*  Init_CR90_CRA7 */
+			0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
+			0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
+			0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
+		},
+	},
+	{	/*  mode#3: 800 x 600  24Bpp  60Hz */
+		800, 600, 24, 60,
+		0x2B,
+		{	/*  Init_SR0_SR4 */
+			0x03, 0x01, 0x0F, 0x03, 0x0E,
+		},
+		{	/*  Init_SR10_SR24 */
+			0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+			0x99, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xC4, 0x30, 0x02, 0x01, 0x01,
+		},
+		{	/*  Init_SR30_SR75 */
+			0x36, 0x03, 0x20, 0x09, 0xC0, 0x36, 0x36, 0x36,
+			0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x03, 0xFF,
+			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+			0x20, 0x0C, 0x44, 0x20, 0x00, 0x36, 0x36, 0x36,
+			0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
+			0x04, 0x55, 0x59, 0x36, 0x36, 0x00, 0x00, 0x36,
+			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+			0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
+			0x02, 0x45, 0x30, 0x30, 0x40, 0x20,
+		},
+		{	/*  Init_SR80_SR93 */
+			0xFF, 0x07, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x36,
+			0xF7, 0x00, 0x00, 0x00, 0xEF, 0xFF, 0x36, 0x36,
+			0x00, 0x00, 0x00, 0x00,
+		},
+		{	/*  Init_SRA0_SRAF */
+			0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+			0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
+		},
+		{	/*  Init_GR00_GR08 */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+			0xFF,
+		},
+		{	/*  Init_AR00_AR14 */
+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x41, 0x00, 0x0F, 0x00, 0x00,
+		},
+		{	/*  Init_CR00_CR18 */
+			0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
+			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
+			0xFF,
+		},
+		{	/*  Init_CR30_CR4D */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
+			0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
+			0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
+			0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
+		},
+		{	/*  Init_CR90_CRA7 */
+			0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
+			0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
+			0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
+		},
+	},
+	{	/*  mode#7: 800 x 600  32Bpp  60Hz */
+		800, 600, 32, 60,
+		/*  Init_MISC */
+		0x2B,
+		{	/*  Init_SR0_SR4 */
+			0x03, 0x01, 0x0F, 0x03, 0x0E,
+		},
+		{	/*  Init_SR10_SR24 */
+			0xFF, 0xBE, 0xEE, 0xFF, 0x00, 0x0E, 0x17, 0x2C,
+			0x99, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xC4, 0x30, 0x02, 0x01, 0x01,
+		},
+		{	/*  Init_SR30_SR75 */
+			0x34, 0x03, 0x20, 0x09, 0xC0, 0x24, 0x24, 0x24,
+			0x24, 0x24, 0x24, 0x24, 0x00, 0x00, 0x03, 0xFF,
+			0x00, 0xFC, 0x00, 0x00, 0x20, 0x38, 0x00, 0xFC,
+			0x20, 0x0C, 0x44, 0x20, 0x00, 0x24, 0x24, 0x24,
+			0x04, 0x48, 0x83, 0x63, 0x68, 0x72, 0x57, 0x58,
+			0x04, 0x55, 0x59, 0x24, 0x24, 0x00, 0x00, 0x24,
+			0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+			0x50, 0x03, 0x74, 0x14, 0x1C, 0x85, 0x35, 0x13,
+			0x02, 0x45, 0x30, 0x35, 0x40, 0x20,
+		},
+		{	/*  Init_SR80_SR93 */
+			0x00, 0x00, 0x00, 0x6F, 0x7F, 0x7F, 0xFF, 0x24,
+			0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x24, 0x24,
+			0x00, 0x00, 0x00, 0x00,
+		},
+		{	/*  Init_SRA0_SRAF */
+			0x00, 0xFF, 0xBF, 0xFF, 0xFF, 0xED, 0xED, 0xED,
+			0x7B, 0xFF, 0xFF, 0xFF, 0xBF, 0xEF, 0xBF, 0xDF,
+		},
+		{	/*  Init_GR00_GR08 */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+			0xFF,
+		},
+		{	/*  Init_AR00_AR14 */
+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x41, 0x00, 0x0F, 0x00, 0x00,
+		},
+		{	/*  Init_CR00_CR18 */
+			0x7F, 0x63, 0x63, 0x00, 0x68, 0x18, 0x72, 0xF0,
+			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x58, 0x0C, 0x57, 0x64, 0x40, 0x57, 0x00, 0xE3,
+			0xFF,
+		},
+		{	/*  Init_CR30_CR4D */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x03, 0x20,
+			0x00, 0x00, 0x00, 0x40, 0x00, 0xE7, 0xBF, 0xFD,
+			0x7F, 0x63, 0x00, 0x69, 0x18, 0x72, 0x57, 0x00,
+			0x58, 0x0C, 0xE0, 0x20, 0x63, 0x57,
+		},
+		{	/*  Init_CR90_CRA7 */
+			0x56, 0x4B, 0x5E, 0x55, 0x86, 0x9D, 0x8E, 0xAA,
+			0xDB, 0x2A, 0xDF, 0x33, 0x00, 0x00, 0x18, 0x00,
+			0x20, 0x1F, 0x1A, 0x19, 0x0F, 0x0F, 0x0F, 0x00,
+		},
+	},
+	/* We use 1024x768 table to light 1024x600 panel for lemote */
+	{	/*  mode#4: 1024 x 600  16Bpp  60Hz  */
+		1024, 600, 16, 60,
+		/*  Init_MISC */
+		0xEB,
+		{	/*  Init_SR0_SR4 */
+			0x03, 0x01, 0x0F, 0x00, 0x0E,
+		},
+		{	/*  Init_SR10_SR24 */
+			0xC8, 0x40, 0x14, 0x60, 0x00, 0x0A, 0x17, 0x20,
+			0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xC4, 0x30, 0x02, 0x00, 0x01,
+		},
+		{	/*  Init_SR30_SR75 */
+			0x22, 0x03, 0x24, 0x09, 0xC0, 0x22, 0x22, 0x22,
+			0x22, 0x22, 0x22, 0x22, 0x00, 0x00, 0x03, 0xFF,
+			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+			0x20, 0x0C, 0x44, 0x20, 0x00, 0x22, 0x22, 0x22,
+			0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+			0x00, 0x60, 0x59, 0x22, 0x22, 0x00, 0x00, 0x22,
+			0x01, 0x80, 0x7A, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+			0x50, 0x03, 0x16, 0x02, 0x0D, 0x82, 0x09, 0x02,
+			0x04, 0x45, 0x3F, 0x30, 0x40, 0x20,
+		},
+		{	/*  Init_SR80_SR93 */
+			0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+			0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+			0x00, 0x00, 0x00, 0x00,
+		},
+		{	/*  Init_SRA0_SRAF */
+			0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+			0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+		},
+		{	/*  Init_GR00_GR08 */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+			0xFF,
+		},
+		{	/*  Init_AR00_AR14 */
+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x41, 0x00, 0x0F, 0x00, 0x00,
+		},
+		{	/*  Init_CR00_CR18 */
+			0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+			0xFF,
+		},
+		{	/*  Init_CR30_CR4D */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+			0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+			0xA3, 0x7F, 0x00, 0x82, 0x0b, 0x6f, 0x57, 0x00,
+			0x5c, 0x0f, 0xE0, 0xe0, 0x7F, 0x57,
+		},
+		{	/*  Init_CR90_CRA7 */
+			0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+			0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+		},
+	},
+	{	/*  mode#5: 1024 x 768  24Bpp  60Hz */
+		1024, 768, 24, 60,
+		/*  Init_MISC */
+		0xEB,
+		{	/*  Init_SR0_SR4 */
+			0x03, 0x01, 0x0F, 0x03, 0x0E,
+		},
+		{	/*  Init_SR10_SR24 */
+			0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+			0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xC4, 0x30, 0x02, 0x01, 0x01,
+		},
+		{	/*  Init_SR30_SR75 */
+			0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+			0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+			0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+			0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+			0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+			0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
+			0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+		},
+		{	/*  Init_SR80_SR93 */
+			0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+			0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+			0x00, 0x00, 0x00, 0x00,
+		},
+		{	/*  Init_SRA0_SRAF */
+			0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+			0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+		},
+		{	/*  Init_GR00_GR08 */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+			0xFF,
+		},
+		{	/*  Init_AR00_AR14 */
+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x41, 0x00, 0x0F, 0x00, 0x00,
+		},
+		{	/*  Init_CR00_CR18 */
+			0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+			0xFF,
+		},
+		{	/*  Init_CR30_CR4D */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+			0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+			0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
+			0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
+		},
+		{	/*  Init_CR90_CRA7 */
+			0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+			0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+		},
+	},
+	{	/*  mode#4: 1024 x 768  32Bpp  60Hz */
+		1024, 768, 32, 60,
+		/*  Init_MISC */
+		0xEB,
+		{	/*  Init_SR0_SR4 */
+			0x03, 0x01, 0x0F, 0x03, 0x0E,
+		},
+		{	/*  Init_SR10_SR24 */
+			0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+			0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xC4, 0x32, 0x02, 0x01, 0x01,
+		},
+		{	/*  Init_SR30_SR75 */
+			0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+			0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+			0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+			0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+			0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+			0x50, 0x03, 0x74, 0x14, 0x3B, 0x0D, 0x09, 0x02,
+			0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+		},
+		{	/*  Init_SR80_SR93 */
+			0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+			0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+			0x00, 0x00, 0x00, 0x00,
+		},
+		{	/*  Init_SRA0_SRAF */
+			0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+			0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+		},
+		{	/*  Init_GR00_GR08 */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+			0xFF,
+		},
+		{	/*  Init_AR00_AR14 */
+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x41, 0x00, 0x0F, 0x00, 0x00,
+		},
+		{	/*  Init_CR00_CR18 */
+			0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+			0xFF,
+		},
+		{	/*  Init_CR30_CR4D */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+			0x00, 0x00, 0x00, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+			0xA3, 0x7F, 0x00, 0x86, 0x15, 0x24, 0xFF, 0x00,
+			0x01, 0x07, 0xE5, 0x20, 0x7F, 0xFF,
+		},
+		{	/*  Init_CR90_CRA7 */
+			0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+			0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+		},
+	},
+	{	/*  mode#6: 320 x 240  16Bpp  60Hz */
+		320, 240, 16, 60,
+		/*  Init_MISC */
+		0xEB,
+		{	/*  Init_SR0_SR4 */
+			0x03, 0x01, 0x0F, 0x03, 0x0E,
+		},
+		{	/*  Init_SR10_SR24 */
+			0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+			0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xC4, 0x32, 0x02, 0x01, 0x01,
+		},
+		{	/*  Init_SR30_SR75 */
+			0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+			0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+			0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+			0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+			0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+			0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
+			0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+		},
+		{	/*  Init_SR80_SR93 */
+			0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+			0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+			0x00, 0x00, 0x00, 0x00,
+		},
+		{	/*  Init_SRA0_SRAF */
+			0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+			0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+		},
+		{	/*  Init_GR00_GR08 */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+			0xFF,
+		},
+		{	/*  Init_AR00_AR14 */
+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x41, 0x00, 0x0F, 0x00, 0x00,
+		},
+		{	/*  Init_CR00_CR18 */
+			0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+			0xFF,
+		},
+		{	/*  Init_CR30_CR4D */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+			0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+			0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
+			0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
+		},
+		{	/*  Init_CR90_CRA7 */
+			0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+			0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+		},
+	},
+
+	{	/*  mode#8: 320 x 240  32Bpp  60Hz */
+		320, 240, 32, 60,
+		/*  Init_MISC */
+		0xEB,
+		{	/*  Init_SR0_SR4 */
+			0x03, 0x01, 0x0F, 0x03, 0x0E,
+		},
+		{	/*  Init_SR10_SR24 */
+			0xF3, 0xB6, 0xC0, 0xDD, 0x00, 0x0E, 0x17, 0x2C,
+			0x99, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0xC4, 0x32, 0x02, 0x01, 0x01,
+		},
+		{	/*  Init_SR30_SR75 */
+			0x38, 0x03, 0x20, 0x09, 0xC0, 0x3A, 0x3A, 0x3A,
+			0x3A, 0x3A, 0x3A, 0x3A, 0x00, 0x00, 0x03, 0xFF,
+			0x00, 0xFC, 0x00, 0x00, 0x20, 0x18, 0x00, 0xFC,
+			0x20, 0x0C, 0x44, 0x20, 0x00, 0x00, 0x00, 0x3A,
+			0x06, 0x68, 0xA7, 0x7F, 0x83, 0x24, 0xFF, 0x03,
+			0x00, 0x60, 0x59, 0x3A, 0x3A, 0x00, 0x00, 0x3A,
+			0x01, 0x80, 0x7E, 0x1A, 0x1A, 0x00, 0x00, 0x00,
+			0x50, 0x03, 0x74, 0x14, 0x08, 0x43, 0x08, 0x43,
+			0x04, 0x45, 0x30, 0x30, 0x40, 0x20,
+		},
+		{	/*  Init_SR80_SR93 */
+			0xFF, 0x07, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x3A,
+			0xF7, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0x3A, 0x3A,
+			0x00, 0x00, 0x00, 0x00,
+		},
+		{	/*  Init_SRA0_SRAF */
+			0x00, 0xFB, 0x9F, 0x01, 0x00, 0xED, 0xED, 0xED,
+			0x7B, 0xFB, 0xFF, 0xFF, 0x97, 0xEF, 0xBF, 0xDF,
+		},
+		{	/*  Init_GR00_GR08 */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0F,
+			0xFF,
+		},
+		{	/*  Init_AR00_AR14 */
+			0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+			0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+			0x41, 0x00, 0x0F, 0x00, 0x00,
+		},
+		{	/*  Init_CR00_CR18 */
+			0xA3, 0x7F, 0x7F, 0x00, 0x85, 0x16, 0x24, 0xF5,
+			0x00, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+			0x03, 0x09, 0xFF, 0x80, 0x40, 0xFF, 0x00, 0xE3,
+			0xFF,
+		},
+		{	/*  Init_CR30_CR4D */
+			0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x20,
+			0x00, 0x00, 0x30, 0x40, 0x00, 0xFF, 0xBF, 0xFF,
+			0x2E, 0x27, 0x00, 0x2b, 0x0c, 0x0F, 0xEF, 0x00,
+			0xFe, 0x0f, 0x01, 0xC0, 0x27, 0xEF,
+		},
+		{	/*  Init_CR90_CRA7 */
+			0x55, 0xD9, 0x5D, 0xE1, 0x86, 0x1B, 0x8E, 0x26,
+			0xDA, 0x8D, 0xDE, 0x94, 0x00, 0x00, 0x18, 0x00,
+			0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x15, 0x03,
+		},
+	},
+};
+
+static struct screen_info smtc_scr_info;
+
+static char *mode_option;
+
+/* process command line options, get vga parameter */
+static void __init sm7xx_vga_setup(char *options)
+{
+	int i;
+
+	if (!options || !*options)
+		return;
+
+	smtc_scr_info.lfb_width = 0;
+	smtc_scr_info.lfb_height = 0;
+	smtc_scr_info.lfb_depth = 0;
+
+	pr_debug("sm7xx_vga_setup = %s\n", options);
+
+	for (i = 0; i < ARRAY_SIZE(vesa_mode_table); i++) {
+		if (strstr(options, vesa_mode_table[i].index)) {
+			smtc_scr_info.lfb_width  = vesa_mode_table[i].lfb_width;
+			smtc_scr_info.lfb_height =
+						vesa_mode_table[i].lfb_height;
+			smtc_scr_info.lfb_depth  = vesa_mode_table[i].lfb_depth;
+			return;
+		}
+	}
+}
+
+static void sm712_setpalette(int regno, unsigned red, unsigned green,
+			     unsigned blue, struct fb_info *info)
+{
+	/* set bit 5:4 = 01 (write LCD RAM only) */
+	smtc_seqw(0x66, (smtc_seqr(0x66) & 0xC3) | 0x10);
+
+	smtc_mmiowb(regno, dac_reg);
+	smtc_mmiowb(red >> 10, dac_val);
+	smtc_mmiowb(green >> 10, dac_val);
+	smtc_mmiowb(blue >> 10, dac_val);
+}
+
+/* chan_to_field
+ *
+ * convert a colour value into a field position
+ *
+ * from pxafb.c
+ */
+
+static inline unsigned int chan_to_field(unsigned int chan,
+					 struct fb_bitfield *bf)
+{
+	chan &= 0xffff;
+	chan >>= 16 - bf->length;
+	return chan << bf->offset;
+}
+
+static int smtc_blank(int blank_mode, struct fb_info *info)
+{
+	/* clear DPMS setting */
+	switch (blank_mode) {
+	case FB_BLANK_UNBLANK:
+		/* Screen On: HSync: On, VSync : On */
+		smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
+		smtc_seqw(0x6a, 0x16);
+		smtc_seqw(0x6b, 0x02);
+		smtc_seqw(0x21, (smtc_seqr(0x21) & 0x77));
+		smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
+		smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
+		smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
+		smtc_seqw(0x31, (smtc_seqr(0x31) | 0x03));
+		break;
+	case FB_BLANK_NORMAL:
+		/* Screen Off: HSync: On, VSync : On   Soft blank */
+		smtc_seqw(0x01, (smtc_seqr(0x01) & (~0x20)));
+		smtc_seqw(0x6a, 0x16);
+		smtc_seqw(0x6b, 0x02);
+		smtc_seqw(0x22, (smtc_seqr(0x22) & (~0x30)));
+		smtc_seqw(0x23, (smtc_seqr(0x23) & (~0xc0)));
+		smtc_seqw(0x24, (smtc_seqr(0x24) | 0x01));
+		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+		break;
+	case FB_BLANK_VSYNC_SUSPEND:
+		/* Screen On: HSync: On, VSync : Off */
+		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+		smtc_seqw(0x6a, 0x0c);
+		smtc_seqw(0x6b, 0x02);
+		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x20));
+		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0x20));
+		smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+		smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+		break;
+	case FB_BLANK_HSYNC_SUSPEND:
+		/* Screen On: HSync: Off, VSync : On */
+		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+		smtc_seqw(0x6a, 0x0c);
+		smtc_seqw(0x6b, 0x02);
+		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x10));
+		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+		smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+		smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+		break;
+	case FB_BLANK_POWERDOWN:
+		/* Screen On: HSync: Off, VSync : Off */
+		smtc_seqw(0x01, (smtc_seqr(0x01) | 0x20));
+		smtc_seqw(0x20, (smtc_seqr(0x20) & (~0xB0)));
+		smtc_seqw(0x6a, 0x0c);
+		smtc_seqw(0x6b, 0x02);
+		smtc_seqw(0x21, (smtc_seqr(0x21) | 0x88));
+		smtc_seqw(0x22, ((smtc_seqr(0x22) & (~0x30)) | 0x30));
+		smtc_seqw(0x23, ((smtc_seqr(0x23) & (~0xc0)) | 0xD8));
+		smtc_seqw(0x24, (smtc_seqr(0x24) & (~0x01)));
+		smtc_seqw(0x31, ((smtc_seqr(0x31) & (~0x07)) | 0x00));
+		smtc_seqw(0x34, (smtc_seqr(0x34) | 0x80));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int smtc_setcolreg(unsigned regno, unsigned red, unsigned green,
+			  unsigned blue, unsigned trans, struct fb_info *info)
+{
+	struct smtcfb_info *sfb;
+	u32 val;
+
+	sfb = info->par;
+
+	if (regno > 255)
+		return 1;
+
+	switch (sfb->fb->fix.visual) {
+	case FB_VISUAL_DIRECTCOLOR:
+	case FB_VISUAL_TRUECOLOR:
+		/*
+		 * 16/32 bit true-colour, use pseudo-palette for 16 base color
+		 */
+		if (regno >= 16)
+			break;
+		if (sfb->fb->var.bits_per_pixel == 16) {
+			u32 *pal = sfb->fb->pseudo_palette;
+
+			val = chan_to_field(red, &sfb->fb->var.red);
+			val |= chan_to_field(green, &sfb->fb->var.green);
+			val |= chan_to_field(blue, &sfb->fb->var.blue);
+			pal[regno] = pal_rgb(red, green, blue, val);
+		} else {
+			u32 *pal = sfb->fb->pseudo_palette;
+
+			val = chan_to_field(red, &sfb->fb->var.red);
+			val |= chan_to_field(green, &sfb->fb->var.green);
+			val |= chan_to_field(blue, &sfb->fb->var.blue);
+			pal[regno] = big_swap(val);
+		}
+		break;
+
+	case FB_VISUAL_PSEUDOCOLOR:
+		/* color depth 8 bit */
+		sm712_setpalette(regno, red, green, blue, info);
+		break;
+
+	default:
+		return 1;	/* unknown type */
+	}
+
+	return 0;
+}
+
+static ssize_t smtcfb_read(struct fb_info *info, char __user *buf,
+			   size_t count, loff_t *ppos)
+{
+	unsigned long p = *ppos;
+
+	u32 *buffer, *dst;
+	u32 __iomem *src;
+	int c, i, cnt = 0, err = 0;
+	unsigned long total_size;
+
+	if (!info || !info->screen_base)
+		return -ENODEV;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return -EPERM;
+
+	total_size = info->screen_size;
+
+	if (total_size == 0)
+		total_size = info->fix.smem_len;
+
+	if (p >= total_size)
+		return 0;
+
+	if (count >= total_size)
+		count = total_size;
+
+	if (count + p > total_size)
+		count = total_size - p;
+
+	buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	src = (u32 __iomem *)(info->screen_base + p);
+
+	if (info->fbops->fb_sync)
+		info->fbops->fb_sync(info);
+
+	while (count) {
+		c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+		dst = buffer;
+		for (i = c >> 2; i--;) {
+			*dst = fb_readl(src++);
+			*dst = big_swap(*dst);
+			dst++;
+		}
+		if (c & 3) {
+			u8 *dst8 = (u8 *)dst;
+			u8 __iomem *src8 = (u8 __iomem *)src;
+
+			for (i = c & 3; i--;) {
+				if (i & 1) {
+					*dst8++ = fb_readb(++src8);
+				} else {
+					*dst8++ = fb_readb(--src8);
+					src8 += 2;
+				}
+			}
+			src = (u32 __iomem *)src8;
+		}
+
+		if (copy_to_user(buf, buffer, c)) {
+			err = -EFAULT;
+			break;
+		}
+		*ppos += c;
+		buf += c;
+		cnt += c;
+		count -= c;
+	}
+
+	kfree(buffer);
+
+	return (err) ? err : cnt;
+}
+
+static ssize_t smtcfb_write(struct fb_info *info, const char __user *buf,
+			    size_t count, loff_t *ppos)
+{
+	unsigned long p = *ppos;
+
+	u32 *buffer, *src;
+	u32 __iomem *dst;
+	int c, i, cnt = 0, err = 0;
+	unsigned long total_size;
+
+	if (!info || !info->screen_base)
+		return -ENODEV;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return -EPERM;
+
+	total_size = info->screen_size;
+
+	if (total_size == 0)
+		total_size = info->fix.smem_len;
+
+	if (p > total_size)
+		return -EFBIG;
+
+	if (count > total_size) {
+		err = -EFBIG;
+		count = total_size;
+	}
+
+	if (count + p > total_size) {
+		if (!err)
+			err = -ENOSPC;
+
+		count = total_size - p;
+	}
+
+	buffer = kmalloc((count > PAGE_SIZE) ? PAGE_SIZE : count, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	dst = (u32 __iomem *)(info->screen_base + p);
+
+	if (info->fbops->fb_sync)
+		info->fbops->fb_sync(info);
+
+	while (count) {
+		c = (count > PAGE_SIZE) ? PAGE_SIZE : count;
+		src = buffer;
+
+		if (copy_from_user(src, buf, c)) {
+			err = -EFAULT;
+			break;
+		}
+
+		for (i = c >> 2; i--;) {
+			fb_writel(big_swap(*src), dst++);
+			src++;
+		}
+		if (c & 3) {
+			u8 *src8 = (u8 *)src;
+			u8 __iomem *dst8 = (u8 __iomem *)dst;
+
+			for (i = c & 3; i--;) {
+				if (i & 1) {
+					fb_writeb(*src8++, ++dst8);
+				} else {
+					fb_writeb(*src8++, --dst8);
+					dst8 += 2;
+				}
+			}
+			dst = (u32 __iomem *)dst8;
+		}
+
+		*ppos += c;
+		buf += c;
+		cnt += c;
+		count -= c;
+	}
+
+	kfree(buffer);
+
+	return (cnt) ? cnt : err;
+}
+
+static void sm7xx_set_timing(struct smtcfb_info *sfb)
+{
+	int i = 0, j = 0;
+	u32 m_nscreenstride;
+
+	dev_dbg(&sfb->pdev->dev,
+		"sfb->width=%d sfb->height=%d sfb->fb->var.bits_per_pixel=%d sfb->hz=%d\n",
+		sfb->width, sfb->height, sfb->fb->var.bits_per_pixel, sfb->hz);
+
+	for (j = 0; j < ARRAY_SIZE(vgamode); j++) {
+		if (vgamode[j].mmsizex != sfb->width ||
+		    vgamode[j].mmsizey != sfb->height ||
+		    vgamode[j].bpp != sfb->fb->var.bits_per_pixel ||
+		    vgamode[j].hz != sfb->hz)
+			continue;
+
+		dev_dbg(&sfb->pdev->dev,
+			"vgamode[j].mmsizex=%d vgamode[j].mmSizeY=%d vgamode[j].bpp=%d vgamode[j].hz=%d\n",
+			vgamode[j].mmsizex, vgamode[j].mmsizey,
+			vgamode[j].bpp, vgamode[j].hz);
+
+		dev_dbg(&sfb->pdev->dev, "vgamode index=%d\n", j);
+
+		smtc_mmiowb(0x0, 0x3c6);
+
+		smtc_seqw(0, 0x1);
+
+		smtc_mmiowb(vgamode[j].init_misc, 0x3c2);
+
+		/* init SEQ register SR00 - SR04 */
+		for (i = 0; i < SIZE_SR00_SR04; i++)
+			smtc_seqw(i, vgamode[j].init_sr00_sr04[i]);
+
+		/* init SEQ register SR10 - SR24 */
+		for (i = 0; i < SIZE_SR10_SR24; i++)
+			smtc_seqw(i + 0x10, vgamode[j].init_sr10_sr24[i]);
+
+		/* init SEQ register SR30 - SR75 */
+		for (i = 0; i < SIZE_SR30_SR75; i++)
+			if ((i + 0x30) != 0x62 && (i + 0x30) != 0x6a &&
+			    (i + 0x30) != 0x6b)
+				smtc_seqw(i + 0x30,
+					  vgamode[j].init_sr30_sr75[i]);
+
+		/* init SEQ register SR80 - SR93 */
+		for (i = 0; i < SIZE_SR80_SR93; i++)
+			smtc_seqw(i + 0x80, vgamode[j].init_sr80_sr93[i]);
+
+		/* init SEQ register SRA0 - SRAF */
+		for (i = 0; i < SIZE_SRA0_SRAF; i++)
+			smtc_seqw(i + 0xa0, vgamode[j].init_sra0_sraf[i]);
+
+		/* init Graphic register GR00 - GR08 */
+		for (i = 0; i < SIZE_GR00_GR08; i++)
+			smtc_grphw(i, vgamode[j].init_gr00_gr08[i]);
+
+		/* init Attribute register AR00 - AR14 */
+		for (i = 0; i < SIZE_AR00_AR14; i++)
+			smtc_attrw(i, vgamode[j].init_ar00_ar14[i]);
+
+		/* init CRTC register CR00 - CR18 */
+		for (i = 0; i < SIZE_CR00_CR18; i++)
+			smtc_crtcw(i, vgamode[j].init_cr00_cr18[i]);
+
+		/* init CRTC register CR30 - CR4D */
+		for (i = 0; i < SIZE_CR30_CR4D; i++)
+			smtc_crtcw(i + 0x30, vgamode[j].init_cr30_cr4d[i]);
+
+		/* init CRTC register CR90 - CRA7 */
+		for (i = 0; i < SIZE_CR90_CRA7; i++)
+			smtc_crtcw(i + 0x90, vgamode[j].init_cr90_cra7[i]);
+	}
+	smtc_mmiowb(0x67, 0x3c2);
+
+	/* set VPR registers */
+	writel(0x0, sfb->vp_regs + 0x0C);
+	writel(0x0, sfb->vp_regs + 0x40);
+
+	/* set data width */
+	m_nscreenstride = (sfb->width * sfb->fb->var.bits_per_pixel) / 64;
+	switch (sfb->fb->var.bits_per_pixel) {
+	case 8:
+		writel(0x0, sfb->vp_regs + 0x0);
+		break;
+	case 16:
+		writel(0x00020000, sfb->vp_regs + 0x0);
+		break;
+	case 24:
+		writel(0x00040000, sfb->vp_regs + 0x0);
+		break;
+	case 32:
+		writel(0x00030000, sfb->vp_regs + 0x0);
+		break;
+	}
+	writel((u32)(((m_nscreenstride + 2) << 16) | m_nscreenstride),
+	       sfb->vp_regs + 0x10);
+}
+
+static void smtc_set_timing(struct smtcfb_info *sfb)
+{
+	switch (sfb->chip_id) {
+	case 0x710:
+	case 0x712:
+	case 0x720:
+		sm7xx_set_timing(sfb);
+		break;
+	}
+}
+
+static void smtcfb_setmode(struct smtcfb_info *sfb)
+{
+	switch (sfb->fb->var.bits_per_pixel) {
+	case 32:
+		sfb->fb->fix.visual       = FB_VISUAL_TRUECOLOR;
+		sfb->fb->fix.line_length  = sfb->fb->var.xres * 4;
+		sfb->fb->var.red.length   = 8;
+		sfb->fb->var.green.length = 8;
+		sfb->fb->var.blue.length  = 8;
+		sfb->fb->var.red.offset   = 16;
+		sfb->fb->var.green.offset = 8;
+		sfb->fb->var.blue.offset  = 0;
+		break;
+	case 24:
+		sfb->fb->fix.visual       = FB_VISUAL_TRUECOLOR;
+		sfb->fb->fix.line_length  = sfb->fb->var.xres * 3;
+		sfb->fb->var.red.length   = 8;
+		sfb->fb->var.green.length = 8;
+		sfb->fb->var.blue.length  = 8;
+		sfb->fb->var.red.offset   = 16;
+		sfb->fb->var.green.offset = 8;
+		sfb->fb->var.blue.offset  = 0;
+		break;
+	case 8:
+		sfb->fb->fix.visual       = FB_VISUAL_PSEUDOCOLOR;
+		sfb->fb->fix.line_length  = sfb->fb->var.xres;
+		sfb->fb->var.red.length   = 3;
+		sfb->fb->var.green.length = 3;
+		sfb->fb->var.blue.length  = 2;
+		sfb->fb->var.red.offset   = 5;
+		sfb->fb->var.green.offset = 2;
+		sfb->fb->var.blue.offset  = 0;
+		break;
+	case 16:
+	default:
+		sfb->fb->fix.visual       = FB_VISUAL_TRUECOLOR;
+		sfb->fb->fix.line_length  = sfb->fb->var.xres * 2;
+		sfb->fb->var.red.length   = 5;
+		sfb->fb->var.green.length = 6;
+		sfb->fb->var.blue.length  = 5;
+		sfb->fb->var.red.offset   = 11;
+		sfb->fb->var.green.offset = 5;
+		sfb->fb->var.blue.offset  = 0;
+		break;
+	}
+
+	sfb->width  = sfb->fb->var.xres;
+	sfb->height = sfb->fb->var.yres;
+	sfb->hz = 60;
+	smtc_set_timing(sfb);
+}
+
+static int smtc_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+	/* sanity checks */
+	if (var->xres_virtual < var->xres)
+		var->xres_virtual = var->xres;
+
+	if (var->yres_virtual < var->yres)
+		var->yres_virtual = var->yres;
+
+	/* set valid default bpp */
+	if ((var->bits_per_pixel != 8)  && (var->bits_per_pixel != 16) &&
+	    (var->bits_per_pixel != 24) && (var->bits_per_pixel != 32))
+		var->bits_per_pixel = 16;
+
+	return 0;
+}
+
+static int smtc_set_par(struct fb_info *info)
+{
+	smtcfb_setmode(info->par);
+
+	return 0;
+}
+
+static struct fb_ops smtcfb_ops = {
+	.owner        = THIS_MODULE,
+	.fb_check_var = smtc_check_var,
+	.fb_set_par   = smtc_set_par,
+	.fb_setcolreg = smtc_setcolreg,
+	.fb_blank     = smtc_blank,
+	.fb_fillrect  = cfb_fillrect,
+	.fb_imageblit = cfb_imageblit,
+	.fb_copyarea  = cfb_copyarea,
+	.fb_read      = smtcfb_read,
+	.fb_write     = smtcfb_write,
+};
+
+/*
+ * Unmap in the memory mapped IO registers
+ */
+
+static void smtc_unmap_mmio(struct smtcfb_info *sfb)
+{
+	if (sfb && smtc_regbaseaddress)
+		smtc_regbaseaddress = NULL;
+}
+
+/*
+ * Map in the screen memory
+ */
+
+static int smtc_map_smem(struct smtcfb_info *sfb,
+			 struct pci_dev *pdev, u_long smem_len)
+{
+	sfb->fb->fix.smem_start = pci_resource_start(pdev, 0);
+
+	if (sfb->fb->var.bits_per_pixel == 32)
+		sfb->fb->fix.smem_start += big_addr;
+
+	sfb->fb->fix.smem_len = smem_len;
+
+	sfb->fb->screen_base = sfb->lfb;
+
+	if (!sfb->fb->screen_base) {
+		dev_err(&pdev->dev,
+			"%s: unable to map screen memory\n", sfb->fb->fix.id);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/*
+ * Unmap in the screen memory
+ *
+ */
+static void smtc_unmap_smem(struct smtcfb_info *sfb)
+{
+	if (sfb && sfb->fb->screen_base) {
+		iounmap(sfb->fb->screen_base);
+		sfb->fb->screen_base = NULL;
+	}
+}
+
+/*
+ * We need to wake up the device and make sure its in linear memory mode.
+ */
+static inline void sm7xx_init_hw(void)
+{
+	outb_p(0x18, 0x3c4);
+	outb_p(0x11, 0x3c5);
+}
+
+static int smtcfb_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	struct smtcfb_info *sfb;
+	struct fb_info *info;
+	u_long smem_size = 0x00800000;	/* default 8MB */
+	int err;
+	unsigned long mmio_base;
+
+	dev_info(&pdev->dev, "Silicon Motion display driver.\n");
+
+	err = pci_enable_device(pdev);	/* enable SMTC chip */
+	if (err)
+		return err;
+
+	err = pci_request_region(pdev, 0, "sm7xxfb");
+	if (err < 0) {
+		dev_err(&pdev->dev, "cannot reserve framebuffer region\n");
+		goto failed_regions;
+	}
+
+	sprintf(smtcfb_fix.id, "sm%Xfb", ent->device);
+
+	info = framebuffer_alloc(sizeof(*sfb), &pdev->dev);
+	if (!info) {
+		dev_err(&pdev->dev, "framebuffer_alloc failed\n");
+		err = -ENOMEM;
+		goto failed_free;
+	}
+
+	sfb = info->par;
+	sfb->fb = info;
+	sfb->chip_id = ent->device;
+	sfb->pdev = pdev;
+	info->flags = FBINFO_FLAG_DEFAULT;
+	info->fbops = &smtcfb_ops;
+	info->fix = smtcfb_fix;
+	info->var = smtcfb_var;
+	info->pseudo_palette = sfb->colreg;
+	info->par = sfb;
+
+	pci_set_drvdata(pdev, sfb);
+
+	sm7xx_init_hw();
+
+	/* get mode parameter from smtc_scr_info */
+	if (smtc_scr_info.lfb_width != 0) {
+		sfb->fb->var.xres = smtc_scr_info.lfb_width;
+		sfb->fb->var.yres = smtc_scr_info.lfb_height;
+		sfb->fb->var.bits_per_pixel = smtc_scr_info.lfb_depth;
+	} else {
+		/* default resolution 1024x600 16bit mode */
+		sfb->fb->var.xres = SCREEN_X_RES;
+		sfb->fb->var.yres = SCREEN_Y_RES;
+		sfb->fb->var.bits_per_pixel = SCREEN_BPP;
+	}
+
+	big_pixel_depth(sfb->fb->var.bits_per_pixel, smtc_scr_info.lfb_depth);
+	/* Map address and memory detection */
+	mmio_base = pci_resource_start(pdev, 0);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &sfb->chip_rev_id);
+
+	switch (sfb->chip_id) {
+	case 0x710:
+	case 0x712:
+		sfb->fb->fix.mmio_start = mmio_base + 0x00400000;
+		sfb->fb->fix.mmio_len = 0x00400000;
+		smem_size = SM712_VIDEOMEMORYSIZE;
+		sfb->lfb = ioremap(mmio_base, mmio_addr);
+		if (!sfb->lfb) {
+			dev_err(&pdev->dev,
+				"%s: unable to map memory mapped IO!\n",
+				sfb->fb->fix.id);
+			err = -ENOMEM;
+			goto failed_fb;
+		}
+
+		sfb->mmio = (smtc_regbaseaddress =
+		    sfb->lfb + 0x00700000);
+		sfb->dp_regs = sfb->lfb + 0x00408000;
+		sfb->vp_regs = sfb->lfb + 0x0040c000;
+		if (sfb->fb->var.bits_per_pixel == 32) {
+			sfb->lfb += big_addr;
+			dev_info(&pdev->dev, "sfb->lfb=%p\n", sfb->lfb);
+		}
+
+		/* set MCLK = 14.31818 * (0x16 / 0x2) */
+		smtc_seqw(0x6a, 0x16);
+		smtc_seqw(0x6b, 0x02);
+		smtc_seqw(0x62, 0x3e);
+		/* enable PCI burst */
+		smtc_seqw(0x17, 0x20);
+		/* enable word swap */
+		if (sfb->fb->var.bits_per_pixel == 32)
+			seqw17();
+		break;
+	case 0x720:
+		sfb->fb->fix.mmio_start = mmio_base;
+		sfb->fb->fix.mmio_len = 0x00200000;
+		smem_size = SM722_VIDEOMEMORYSIZE;
+		sfb->dp_regs = ioremap(mmio_base, 0x00a00000);
+		sfb->lfb = sfb->dp_regs + 0x00200000;
+		sfb->mmio = (smtc_regbaseaddress =
+		    sfb->dp_regs + 0x000c0000);
+		sfb->vp_regs = sfb->dp_regs + 0x800;
+
+		smtc_seqw(0x62, 0xff);
+		smtc_seqw(0x6a, 0x0d);
+		smtc_seqw(0x6b, 0x02);
+		break;
+	default:
+		dev_err(&pdev->dev,
+			"No valid Silicon Motion display chip was detected!\n");
+
+		goto failed_fb;
+	}
+
+	/* can support 32 bpp */
+	if (15 == sfb->fb->var.bits_per_pixel)
+		sfb->fb->var.bits_per_pixel = 16;
+
+	sfb->fb->var.xres_virtual = sfb->fb->var.xres;
+	sfb->fb->var.yres_virtual = sfb->fb->var.yres;
+	err = smtc_map_smem(sfb, pdev, smem_size);
+	if (err)
+		goto failed;
+
+	smtcfb_setmode(sfb);
+
+	err = register_framebuffer(info);
+	if (err < 0)
+		goto failed;
+
+	dev_info(&pdev->dev,
+		 "Silicon Motion SM%X Rev%X primary display mode %dx%d-%d Init Complete.\n",
+		 sfb->chip_id, sfb->chip_rev_id, sfb->fb->var.xres,
+		 sfb->fb->var.yres, sfb->fb->var.bits_per_pixel);
+
+	return 0;
+
+failed:
+	dev_err(&pdev->dev, "Silicon Motion, Inc. primary display init fail.\n");
+
+	smtc_unmap_smem(sfb);
+	smtc_unmap_mmio(sfb);
+failed_fb:
+	framebuffer_release(info);
+
+failed_free:
+	pci_release_region(pdev, 0);
+
+failed_regions:
+	pci_disable_device(pdev);
+
+	return err;
+}
+
+/*
+ * 0x710 (LynxEM)
+ * 0x712 (LynxEM+)
+ * 0x720 (Lynx3DM, Lynx3DM+)
+ */
+static const struct pci_device_id smtcfb_pci_table[] = {
+	{ PCI_DEVICE(0x126f, 0x710), },
+	{ PCI_DEVICE(0x126f, 0x712), },
+	{ PCI_DEVICE(0x126f, 0x720), },
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, smtcfb_pci_table);
+
+static void smtcfb_pci_remove(struct pci_dev *pdev)
+{
+	struct smtcfb_info *sfb;
+
+	sfb = pci_get_drvdata(pdev);
+	smtc_unmap_smem(sfb);
+	smtc_unmap_mmio(sfb);
+	unregister_framebuffer(sfb->fb);
+	framebuffer_release(sfb->fb);
+	pci_release_region(pdev, 0);
+	pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int smtcfb_pci_suspend(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct smtcfb_info *sfb;
+
+	sfb = pci_get_drvdata(pdev);
+
+	/* set the hw in sleep mode use external clock and self memory refresh
+	 * so that we can turn off internal PLLs later on
+	 */
+	smtc_seqw(0x20, (smtc_seqr(0x20) | 0xc0));
+	smtc_seqw(0x69, (smtc_seqr(0x69) & 0xf7));
+
+	console_lock();
+	fb_set_suspend(sfb->fb, 1);
+	console_unlock();
+
+	/* additionally turn off all function blocks including internal PLLs */
+	smtc_seqw(0x21, 0xff);
+
+	return 0;
+}
+
+static int smtcfb_pci_resume(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct smtcfb_info *sfb;
+
+	sfb = pci_get_drvdata(pdev);
+
+	/* reinit hardware */
+	sm7xx_init_hw();
+	switch (sfb->chip_id) {
+	case 0x710:
+	case 0x712:
+		/* set MCLK = 14.31818 *  (0x16 / 0x2) */
+		smtc_seqw(0x6a, 0x16);
+		smtc_seqw(0x6b, 0x02);
+		smtc_seqw(0x62, 0x3e);
+		/* enable PCI burst */
+		smtc_seqw(0x17, 0x20);
+		if (sfb->fb->var.bits_per_pixel == 32)
+			seqw17();
+		break;
+	case 0x720:
+		smtc_seqw(0x62, 0xff);
+		smtc_seqw(0x6a, 0x0d);
+		smtc_seqw(0x6b, 0x02);
+		break;
+	}
+
+	smtc_seqw(0x34, (smtc_seqr(0x34) | 0xc0));
+	smtc_seqw(0x33, ((smtc_seqr(0x33) | 0x08) & 0xfb));
+
+	smtcfb_setmode(sfb);
+
+	console_lock();
+	fb_set_suspend(sfb->fb, 0);
+	console_unlock();
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(sm7xx_pm_ops, smtcfb_pci_suspend, smtcfb_pci_resume);
+#define SM7XX_PM_OPS (&sm7xx_pm_ops)
+
+#else  /* !CONFIG_PM */
+
+#define SM7XX_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
+static struct pci_driver smtcfb_driver = {
+	.name = "smtcfb",
+	.id_table = smtcfb_pci_table,
+	.probe = smtcfb_pci_probe,
+	.remove = smtcfb_pci_remove,
+	.driver.pm  = SM7XX_PM_OPS,
+};
+
+static int __init sm712fb_init(void)
+{
+	char *option = NULL;
+
+	if (fb_get_options("sm712fb", &option))
+		return -ENODEV;
+	if (option && *option)
+		mode_option = option;
+	sm7xx_vga_setup(mode_option);
+
+	return pci_register_driver(&smtcfb_driver);
+}
+
+module_init(sm712fb_init);
+
+static void __exit sm712fb_exit(void)
+{
+	pci_unregister_driver(&smtcfb_driver);
+}
+
+module_exit(sm712fb_exit);
+
+MODULE_AUTHOR("Siliconmotion ");
+MODULE_DESCRIPTION("Framebuffer driver for SMI Graphic Cards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index 8bac309..dd0f18e 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -26,13 +26,9 @@
 #include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
 #include <video/vga.h>
 
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
-
 struct vt8623fb_info {
 	char __iomem *mmio_base;
-	int mtrr_reg;
+	int wc_cookie;
 	struct vgastate state;
 	struct mutex open_lock;
 	unsigned int ref_count;
@@ -99,10 +95,7 @@
 /* Module parameters */
 
 static char *mode_option = "640x480-8@60";
-
-#ifdef CONFIG_MTRR
 static int mtrr = 1;
-#endif
 
 MODULE_AUTHOR("(c) 2006 Ondrej Zajicek <santiago@crfreenet.org>");
 MODULE_LICENSE("GPL");
@@ -112,11 +105,8 @@
 MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
 module_param_named(mode, mode_option, charp, 0);
 MODULE_PARM_DESC(mode, "Default video mode e.g. '648x480-8@60' (deprecated)");
-
-#ifdef CONFIG_MTRR
 module_param(mtrr, int, 0444);
 MODULE_PARM_DESC(mtrr, "Enable write-combining with MTRR (1=enable, 0=disable, default=1)");
-#endif
 
 
 /* ------------------------------------------------------------------------- */
@@ -710,7 +700,7 @@
 	info->fix.mmio_len = pci_resource_len(dev, 1);
 
 	/* Map physical IO memory address into kernel space */
-	info->screen_base = pci_iomap(dev, 0, 0);
+	info->screen_base = pci_iomap_wc(dev, 0, 0);
 	if (! info->screen_base) {
 		rc = -ENOMEM;
 		dev_err(info->device, "iomap for framebuffer failed\n");
@@ -781,12 +771,9 @@
 	/* Record a reference to the driver data */
 	pci_set_drvdata(dev, info);
 
-#ifdef CONFIG_MTRR
-	if (mtrr) {
-		par->mtrr_reg = -1;
-		par->mtrr_reg = mtrr_add(info->fix.smem_start, info->fix.smem_len, MTRR_TYPE_WRCOMB, 1);
-	}
-#endif
+	if (mtrr)
+		par->wc_cookie = arch_phys_wc_add(info->fix.smem_start,
+						  info->fix.smem_len);
 
 	return 0;
 
@@ -816,13 +803,7 @@
 	if (info) {
 		struct vt8623fb_info *par = info->par;
 
-#ifdef CONFIG_MTRR
-		if (par->mtrr_reg >= 0) {
-			mtrr_del(par->mtrr_reg, 0, 0);
-			par->mtrr_reg = -1;
-		}
-#endif
-
+		arch_phys_wc_del(par->wc_cookie);
 		unregister_framebuffer(info);
 		fb_dealloc_cmap(&info->cmap);
 
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index a674409..b05e8fe 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -97,7 +97,6 @@
 
 static struct i2c_driver ds2482_driver = {
 	.driver = {
-		.owner	= THIS_MODULE,
 		.name	= "ds2482",
 	},
 	.probe		= ds2482_probe,
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index d8667b0..3749db8 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -40,7 +40,7 @@
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
-MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio).");
+MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire protocol) over VGA DDC(matrox gpio).");
 
 static struct pci_device_id matrox_w1_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G400) },
@@ -232,16 +232,4 @@
 	}
 	kfree(dev);
 }
-
-static int __init matrox_w1_init(void)
-{
-	return pci_register_driver(&matrox_w1_pci_driver);
-}
-
-static void __exit matrox_w1_fini(void)
-{
-	pci_unregister_driver(&matrox_w1_pci_driver);
-}
-
-module_init(matrox_w1_init);
-module_exit(matrox_w1_fini);
+module_pci_driver(matrox_w1_pci_driver);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 241fafd..55c4b5b 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -797,7 +797,8 @@
 	tristate "Intel TCO Timer/Watchdog"
 	depends on (X86 || IA64) && PCI
 	select WATCHDOG_CORE
-	select LPC_ICH
+	select LPC_ICH if !EXPERT
+	select I2C_I801 if !EXPERT
 	---help---
 	  Hardware driver for the intel TCO timer based watchdog devices.
 	  These drivers are included in the Intel 82801 I/O Controller
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index 41cecb5..9ba1153 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -269,9 +269,8 @@
 	if (res)
 		dev_warn(dev, "failed to unregister restart handler\n");
 
-	res = misc_deregister(&at91wdt_miscdev);
-	if (!res)
-		at91wdt_miscdev.parent = NULL;
+	misc_deregister(&at91wdt_miscdev);
+	at91wdt_miscdev.parent = NULL;
 
 	return res;
 }
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 3c3fd41..0acc6c5 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -66,8 +66,7 @@
 #include <linux/spinlock.h>		/* For spin_lock/spin_unlock/... */
 #include <linux/uaccess.h>		/* For copy_to_user/put_user/... */
 #include <linux/io.h>			/* For inb/outb/... */
-#include <linux/mfd/core.h>
-#include <linux/mfd/lpc_ich.h>
+#include <linux/platform_data/itco_wdt.h>
 
 #include "iTCO_vendor.h"
 
@@ -146,59 +145,67 @@
 	return iTCO_wdt_private.iTCO_version == 3 ? ticks : (ticks * 6) / 10;
 }
 
+static inline u32 no_reboot_bit(void)
+{
+	u32 enable_bit;
+
+	switch (iTCO_wdt_private.iTCO_version) {
+	case 3:
+		enable_bit = 0x00000010;
+		break;
+	case 2:
+		enable_bit = 0x00000020;
+		break;
+	case 4:
+	case 1:
+	default:
+		enable_bit = 0x00000002;
+		break;
+	}
+
+	return enable_bit;
+}
+
 static void iTCO_wdt_set_NO_REBOOT_bit(void)
 {
 	u32 val32;
 
 	/* Set the NO_REBOOT bit: this disables reboots */
-	if (iTCO_wdt_private.iTCO_version == 3) {
+	if (iTCO_wdt_private.iTCO_version >= 2) {
 		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		val32 |= 0x00000010;
-		writel(val32, iTCO_wdt_private.gcs_pmc);
-	} else if (iTCO_wdt_private.iTCO_version == 2) {
-		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		val32 |= 0x00000020;
+		val32 |= no_reboot_bit();
 		writel(val32, iTCO_wdt_private.gcs_pmc);
 	} else if (iTCO_wdt_private.iTCO_version == 1) {
 		pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32);
-		val32 |= 0x00000002;
+		val32 |= no_reboot_bit();
 		pci_write_config_dword(iTCO_wdt_private.pdev, 0xd4, val32);
 	}
 }
 
 static int iTCO_wdt_unset_NO_REBOOT_bit(void)
 {
-	int ret = 0;
-	u32 val32;
+	u32 enable_bit = no_reboot_bit();
+	u32 val32 = 0;
 
 	/* Unset the NO_REBOOT bit: this enables reboots */
-	if (iTCO_wdt_private.iTCO_version == 3) {
+	if (iTCO_wdt_private.iTCO_version >= 2) {
 		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		val32 &= 0xffffffef;
+		val32 &= ~enable_bit;
 		writel(val32, iTCO_wdt_private.gcs_pmc);
 
 		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		if (val32 & 0x00000010)
-			ret = -EIO;
-	} else if (iTCO_wdt_private.iTCO_version == 2) {
-		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		val32 &= 0xffffffdf;
-		writel(val32, iTCO_wdt_private.gcs_pmc);
-
-		val32 = readl(iTCO_wdt_private.gcs_pmc);
-		if (val32 & 0x00000020)
-			ret = -EIO;
 	} else if (iTCO_wdt_private.iTCO_version == 1) {
 		pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32);
-		val32 &= 0xfffffffd;
+		val32 &= ~enable_bit;
 		pci_write_config_dword(iTCO_wdt_private.pdev, 0xd4, val32);
 
 		pci_read_config_dword(iTCO_wdt_private.pdev, 0xd4, &val32);
-		if (val32 & 0x00000002)
-			ret = -EIO;
 	}
 
-	return ret; /* returns: 0 = OK, -EIO = Error */
+	if (val32 & enable_bit)
+		return -EIO;
+
+	return 0;
 }
 
 static int iTCO_wdt_start(struct watchdog_device *wd_dev)
@@ -418,9 +425,9 @@
 {
 	int ret = -ENODEV;
 	unsigned long val32;
-	struct lpc_ich_info *ich_info = dev_get_platdata(&dev->dev);
+	struct itco_wdt_platform_data *pdata = dev_get_platdata(&dev->dev);
 
-	if (!ich_info)
+	if (!pdata)
 		goto out;
 
 	spin_lock_init(&iTCO_wdt_private.io_lock);
@@ -435,7 +442,7 @@
 	if (!iTCO_wdt_private.smi_res)
 		goto out;
 
-	iTCO_wdt_private.iTCO_version = ich_info->iTCO_version;
+	iTCO_wdt_private.iTCO_version = pdata->version;
 	iTCO_wdt_private.dev = dev;
 	iTCO_wdt_private.pdev = to_pci_dev(dev->dev.parent);
 
@@ -501,15 +508,24 @@
 	}
 
 	pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
-		ich_info->name, ich_info->iTCO_version, (u64)TCOBASE);
+		pdata->name, pdata->version, (u64)TCOBASE);
 
 	/* Clear out the (probably old) status */
-	if (iTCO_wdt_private.iTCO_version == 3) {
+	switch (iTCO_wdt_private.iTCO_version) {
+	case 4:
+		outw(0x0008, TCO1_STS);	/* Clear the Time Out Status bit */
+		outw(0x0002, TCO2_STS);	/* Clear SECOND_TO_STS bit */
+		break;
+	case 3:
 		outl(0x20008, TCO1_STS);
-	} else {
+		break;
+	case 2:
+	case 1:
+	default:
 		outw(0x0008, TCO1_STS);	/* Clear the Time Out Status bit */
 		outw(0x0002, TCO2_STS);	/* Clear SECOND_TO_STS bit */
 		outw(0x0004, TCO2_STS);	/* Clear BOOT_STS bit */
+		break;
 	}
 
 	iTCO_wdt_watchdog_dev.bootstatus = 0;
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index b7ea39b..1e41818 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -254,13 +254,10 @@
 
 static int ks8695wdt_remove(struct platform_device *pdev)
 {
-	int res;
+	misc_deregister(&ks8695wdt_miscdev);
+	ks8695wdt_miscdev.parent = NULL;
 
-	res = misc_deregister(&ks8695wdt_miscdev);
-	if (!res)
-		ks8695wdt_miscdev.parent = NULL;
-
-	return res;
+	return 0;
 }
 
 static void ks8695wdt_shutdown(struct platform_device *pdev)
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 119beb7..4b54193 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -428,7 +428,8 @@
 
 static int ts72xx_wdt_remove(struct platform_device *pdev)
 {
-	return misc_deregister(&ts72xx_wdt_miscdev);
+	misc_deregister(&ts72xx_wdt_miscdev);
+	return 0;
 }
 
 static struct platform_driver ts72xx_wdt_driver = {
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 96093ae..ed8bf10 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -336,7 +336,7 @@
 
 	BUG_ON(irq == -1);
 #ifdef CONFIG_SMP
-	cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu));
+	cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
 #endif
 	xen_evtchn_port_bind_to_cpu(info, cpu);
 
@@ -373,7 +373,7 @@
 	struct irq_info *info;
 #ifdef CONFIG_SMP
 	/* By default all event channels notify CPU#0. */
-	cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(0));
+	cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
 #endif
 
 	info = kzalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
index a1800c1..08cb419 100644
--- a/drivers/xen/preempt.c
+++ b/drivers/xen/preempt.c
@@ -31,7 +31,7 @@
 asmlinkage __visible void xen_maybe_preempt_hcall(void)
 {
 	if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
-		     && should_resched())) {
+		     && need_resched())) {
 		/*
 		 * Clear flag as we may be rescheduled on a different
 		 * cpu.
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 59fc190..70fa438 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -560,11 +560,9 @@
 
 	return 0;
 err_unregister:
-	for_each_possible_cpu(i) {
-		struct acpi_processor_performance *perf;
-		perf = per_cpu_ptr(acpi_perf_data, i);
-		acpi_processor_unregister_performance(perf, i);
-	}
+	for_each_possible_cpu(i)
+		acpi_processor_unregister_performance(i);
+
 err_out:
 	/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
 	free_acpi_perf_data();
@@ -579,11 +577,9 @@
 	kfree(acpi_ids_done);
 	kfree(acpi_id_present);
 	kfree(acpi_id_cst_present);
-	for_each_possible_cpu(i) {
-		struct acpi_processor_performance *perf;
-		perf = per_cpu_ptr(acpi_perf_data, i);
-		acpi_processor_unregister_performance(perf, i);
-	}
+	for_each_possible_cpu(i)
+		acpi_processor_unregister_performance(i);
+
 	free_acpi_perf_data();
 }
 
diff --git a/fs/Kconfig b/fs/Kconfig
index 011f433..da3f32f 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -11,18 +11,15 @@
 if BLOCK
 
 source "fs/ext2/Kconfig"
-source "fs/ext3/Kconfig"
 source "fs/ext4/Kconfig"
-source "fs/jbd/Kconfig"
 source "fs/jbd2/Kconfig"
 
 config FS_MBCACHE
 # Meta block cache for Extended Attributes (ext2/ext3/ext4)
 	tristate
 	default y if EXT2_FS=y && EXT2_FS_XATTR
-	default y if EXT3_FS=y && EXT3_FS_XATTR
 	default y if EXT4_FS=y
-	default m if EXT2_FS_XATTR || EXT3_FS_XATTR || EXT4_FS
+	default m if EXT2_FS_XATTR || EXT4_FS
 
 source "fs/reiserfs/Kconfig"
 source "fs/jfs/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index cb20e4b..09e051f 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -62,12 +62,10 @@
 # Do not add any filesystems before this line
 obj-$(CONFIG_FSCACHE)		+= fscache/
 obj-$(CONFIG_REISERFS_FS)	+= reiserfs/
-obj-$(CONFIG_EXT3_FS)		+= ext3/ # Before ext2 so root fs can be ext3
 obj-$(CONFIG_EXT2_FS)		+= ext2/
 # We place ext4 after ext2 so plain ext2 root fs's are mounted using ext2
 # unless explicitly requested by rootfstype
 obj-$(CONFIG_EXT4_FS)		+= ext4/
-obj-$(CONFIG_JBD)		+= jbd/
 obj-$(CONFIG_JBD2)		+= jbd2/
 obj-$(CONFIG_CRAMFS)		+= cramfs/
 obj-$(CONFIG_SQUASHFS)		+= squashfs/
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index ce7dec8..541fbfa 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -343,7 +343,7 @@
 		struct btrfsic_state *state,
 		struct btrfsic_block *const block,
 		struct btrfs_super_block *const super_hdr);
-static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status);
+static void btrfsic_bio_end_io(struct bio *bp);
 static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate);
 static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state,
 					      const struct btrfsic_block *block,
@@ -2207,7 +2207,7 @@
 	goto again;
 }
 
-static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status)
+static void btrfsic_bio_end_io(struct bio *bp)
 {
 	struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private;
 	int iodone_w_error;
@@ -2215,7 +2215,7 @@
 	/* mutex is not held! This is not save if IO is not yet completed
 	 * on umount */
 	iodone_w_error = 0;
-	if (bio_error_status)
+	if (bp->bi_error)
 		iodone_w_error = 1;
 
 	BUG_ON(NULL == block);
@@ -2230,7 +2230,7 @@
 		     BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
 			printk(KERN_INFO
 			       "bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
-			       bio_error_status,
+			       bp->bi_error,
 			       btrfsic_get_block_type(dev_state->state, block),
 			       block->logical_bytenr, dev_state->name,
 			       block->dev_bytenr, block->mirror_num);
@@ -2252,7 +2252,7 @@
 		block = next_block;
 	} while (NULL != block);
 
-	bp->bi_end_io(bp, bio_error_status);
+	bp->bi_end_io(bp);
 }
 
 static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index ce62324..57ee8ca 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -97,10 +97,7 @@
 static struct bio *compressed_bio_alloc(struct block_device *bdev,
 					u64 first_byte, gfp_t gfp_flags)
 {
-	int nr_vecs;
-
-	nr_vecs = bio_get_nr_vecs(bdev);
-	return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags);
+	return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags);
 }
 
 static int check_compressed_csum(struct inode *inode,
@@ -152,7 +149,7 @@
  * The compressed pages are freed here, and it must be run
  * in process context
  */
-static void end_compressed_bio_read(struct bio *bio, int err)
+static void end_compressed_bio_read(struct bio *bio)
 {
 	struct compressed_bio *cb = bio->bi_private;
 	struct inode *inode;
@@ -160,7 +157,7 @@
 	unsigned long index;
 	int ret;
 
-	if (err)
+	if (bio->bi_error)
 		cb->errors = 1;
 
 	/* if there are more bios still pending for this compressed
@@ -210,7 +207,7 @@
 		bio_for_each_segment_all(bvec, cb->orig_bio, i)
 			SetPageChecked(bvec->bv_page);
 
-		bio_endio(cb->orig_bio, 0);
+		bio_endio(cb->orig_bio);
 	}
 
 	/* finally free the cb struct */
@@ -266,7 +263,7 @@
  * This also calls the writeback end hooks for the file pages so that
  * metadata and checksums can be updated in the file.
  */
-static void end_compressed_bio_write(struct bio *bio, int err)
+static void end_compressed_bio_write(struct bio *bio)
 {
 	struct extent_io_tree *tree;
 	struct compressed_bio *cb = bio->bi_private;
@@ -274,7 +271,7 @@
 	struct page *page;
 	unsigned long index;
 
-	if (err)
+	if (bio->bi_error)
 		cb->errors = 1;
 
 	/* if there are more bios still pending for this compressed
@@ -293,7 +290,7 @@
 					 cb->start,
 					 cb->start + cb->len - 1,
 					 NULL,
-					 err ? 0 : 1);
+					 bio->bi_error ? 0 : 1);
 	cb->compressed_pages[0]->mapping = NULL;
 
 	end_compressed_writeback(inode, cb);
@@ -697,8 +694,10 @@
 
 			ret = btrfs_map_bio(root, READ, comp_bio,
 					    mirror_num, 0);
-			if (ret)
-				bio_endio(comp_bio, ret);
+			if (ret) {
+				bio->bi_error = ret;
+				bio_endio(comp_bio);
+			}
 
 			bio_put(comp_bio);
 
@@ -724,8 +723,10 @@
 	}
 
 	ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
-	if (ret)
-		bio_endio(comp_bio, ret);
+	if (ret) {
+		bio->bi_error = ret;
+		bio_endio(comp_bio);
+	}
 
 	bio_put(comp_bio);
 	return 0;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f556c37..5e307bd 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -703,7 +703,7 @@
 	return -EIO;	/* we fixed nothing */
 }
 
-static void end_workqueue_bio(struct bio *bio, int err)
+static void end_workqueue_bio(struct bio *bio)
 {
 	struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
 	struct btrfs_fs_info *fs_info;
@@ -711,7 +711,7 @@
 	btrfs_work_func_t func;
 
 	fs_info = end_io_wq->info;
-	end_io_wq->error = err;
+	end_io_wq->error = bio->bi_error;
 
 	if (bio->bi_rw & REQ_WRITE) {
 		if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
@@ -808,7 +808,8 @@
 
 	/* If an error occured we just want to clean up the bio and move on */
 	if (async->error) {
-		bio_endio(async->bio, async->error);
+		async->bio->bi_error = async->error;
+		bio_endio(async->bio);
 		return;
 	}
 
@@ -908,8 +909,10 @@
 	 * submission context.  Just jump into btrfs_map_bio
 	 */
 	ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
-	if (ret)
-		bio_endio(bio, ret);
+	if (ret) {
+		bio->bi_error = ret;
+		bio_endio(bio);
+	}
 	return ret;
 }
 
@@ -960,10 +963,13 @@
 					  __btree_submit_bio_done);
 	}
 
-	if (ret) {
+	if (ret)
+		goto out_w_error;
+	return 0;
+
 out_w_error:
-		bio_endio(bio, ret);
-	}
+	bio->bi_error = ret;
+	bio_endio(bio);
 	return ret;
 }
 
@@ -1735,16 +1741,15 @@
 {
 	struct bio *bio;
 	struct btrfs_end_io_wq *end_io_wq;
-	int error;
 
 	end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
 	bio = end_io_wq->bio;
 
-	error = end_io_wq->error;
+	bio->bi_error = end_io_wq->error;
 	bio->bi_private = end_io_wq->private;
 	bio->bi_end_io = end_io_wq->end_io;
 	kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
-	bio_endio(bio, error);
+	bio_endio(bio);
 }
 
 static int cleaner_kthread(void *arg)
@@ -3324,10 +3329,8 @@
  * endio for the write_dev_flush, this will wake anyone waiting
  * for the barrier when it is done
  */
-static void btrfs_end_empty_barrier(struct bio *bio, int err)
+static void btrfs_end_empty_barrier(struct bio *bio)
 {
-	if (err)
-		clear_bit(BIO_UPTODATE, &bio->bi_flags);
 	if (bio->bi_private)
 		complete(bio->bi_private);
 	bio_put(bio);
@@ -3355,8 +3358,8 @@
 
 		wait_for_completion(&device->flush_wait);
 
-		if (!bio_flagged(bio, BIO_UPTODATE)) {
-			ret = -EIO;
+		if (bio->bi_error) {
+			ret = bio->bi_error;
 			btrfs_dev_stat_inc_and_print(device,
 				BTRFS_DEV_STAT_FLUSH_ERRS);
 		}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 02d0581..68b12bb 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2486,7 +2486,7 @@
  * Scheduling is not allowed, so the extent state tree is expected
  * to have one and only one object corresponding to this IO.
  */
-static void end_bio_extent_writepage(struct bio *bio, int err)
+static void end_bio_extent_writepage(struct bio *bio)
 {
 	struct bio_vec *bvec;
 	u64 start;
@@ -2516,7 +2516,7 @@
 		start = page_offset(page);
 		end = start + bvec->bv_offset + bvec->bv_len - 1;
 
-		if (end_extent_writepage(page, err, start, end))
+		if (end_extent_writepage(page, bio->bi_error, start, end))
 			continue;
 
 		end_page_writeback(page);
@@ -2548,10 +2548,10 @@
  * Scheduling is not allowed, so the extent state tree is expected
  * to have one and only one object corresponding to this IO.
  */
-static void end_bio_extent_readpage(struct bio *bio, int err)
+static void end_bio_extent_readpage(struct bio *bio)
 {
 	struct bio_vec *bvec;
-	int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+	int uptodate = !bio->bi_error;
 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
 	struct extent_io_tree *tree;
 	u64 offset = 0;
@@ -2564,16 +2564,13 @@
 	int ret;
 	int i;
 
-	if (err)
-		uptodate = 0;
-
 	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
 		struct inode *inode = page->mapping->host;
 
 		pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
-			 "mirror=%u\n", (u64)bio->bi_iter.bi_sector, err,
-			 io_bio->mirror_num);
+			 "mirror=%u\n", (u64)bio->bi_iter.bi_sector,
+			 bio->bi_error, io_bio->mirror_num);
 		tree = &BTRFS_I(inode)->io_tree;
 
 		/* We always issue full-page reads, but if some block
@@ -2614,8 +2611,7 @@
 
 		if (tree->ops && tree->ops->readpage_io_failed_hook) {
 			ret = tree->ops->readpage_io_failed_hook(page, mirror);
-			if (!ret && !err &&
-			    test_bit(BIO_UPTODATE, &bio->bi_flags))
+			if (!ret && !bio->bi_error)
 				uptodate = 1;
 		} else {
 			/*
@@ -2631,10 +2627,7 @@
 			ret = bio_readpage_error(bio, offset, page, start, end,
 						 mirror);
 			if (ret == 0) {
-				uptodate =
-					test_bit(BIO_UPTODATE, &bio->bi_flags);
-				if (err)
-					uptodate = 0;
+				uptodate = !bio->bi_error;
 				offset += len;
 				continue;
 			}
@@ -2684,7 +2677,7 @@
 		endio_readpage_release_extent(tree, extent_start, extent_len,
 					      uptodate);
 	if (io_bio->end_io)
-		io_bio->end_io(io_bio, err);
+		io_bio->end_io(io_bio, bio->bi_error);
 	bio_put(bio);
 }
 
@@ -2802,9 +2795,7 @@
 {
 	int ret = 0;
 	struct bio *bio;
-	int nr;
 	int contig = 0;
-	int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
 	int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
 	size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
 
@@ -2829,12 +2820,9 @@
 			return 0;
 		}
 	}
-	if (this_compressed)
-		nr = BIO_MAX_PAGES;
-	else
-		nr = bio_get_nr_vecs(bdev);
 
-	bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
+	bio = btrfs_bio_alloc(bdev, sector, BIO_MAX_PAGES,
+			GFP_NOFS | __GFP_HIGH);
 	if (!bio)
 		return -ENOMEM;
 
@@ -3696,7 +3684,7 @@
 	}
 }
 
-static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
+static void end_bio_extent_buffer_writepage(struct bio *bio)
 {
 	struct bio_vec *bvec;
 	struct extent_buffer *eb;
@@ -3709,7 +3697,8 @@
 		BUG_ON(!eb);
 		done = atomic_dec_and_test(&eb->io_pages);
 
-		if (err || test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
+		if (bio->bi_error ||
+		    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
 			ClearPageUptodate(page);
 			set_btree_ioerr(page);
 		}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e33dff3..f924d9a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1845,8 +1845,10 @@
 	int ret;
 
 	ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
-	if (ret)
-		bio_endio(bio, ret);
+	if (ret) {
+		bio->bi_error = ret;
+		bio_endio(bio);
+	}
 	return ret;
 }
 
@@ -1906,8 +1908,10 @@
 	ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
 
 out:
-	if (ret < 0)
-		bio_endio(bio, ret);
+	if (ret < 0) {
+		bio->bi_error = ret;
+		bio_endio(bio);
+	}
 	return ret;
 }
 
@@ -7688,13 +7692,13 @@
 	int uptodate;
 };
 
-static void btrfs_retry_endio_nocsum(struct bio *bio, int err)
+static void btrfs_retry_endio_nocsum(struct bio *bio)
 {
 	struct btrfs_retry_complete *done = bio->bi_private;
 	struct bio_vec *bvec;
 	int i;
 
-	if (err)
+	if (bio->bi_error)
 		goto end;
 
 	done->uptodate = 1;
@@ -7743,7 +7747,7 @@
 	return 0;
 }
 
-static void btrfs_retry_endio(struct bio *bio, int err)
+static void btrfs_retry_endio(struct bio *bio)
 {
 	struct btrfs_retry_complete *done = bio->bi_private;
 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
@@ -7752,7 +7756,7 @@
 	int ret;
 	int i;
 
-	if (err)
+	if (bio->bi_error)
 		goto end;
 
 	uptodate = 1;
@@ -7835,12 +7839,13 @@
 	}
 }
 
-static void btrfs_endio_direct_read(struct bio *bio, int err)
+static void btrfs_endio_direct_read(struct bio *bio)
 {
 	struct btrfs_dio_private *dip = bio->bi_private;
 	struct inode *inode = dip->inode;
 	struct bio *dio_bio;
 	struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
+	int err = bio->bi_error;
 
 	if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
 		err = btrfs_subio_endio_read(inode, io_bio, err);
@@ -7851,17 +7856,14 @@
 
 	kfree(dip);
 
-	/* If we had a csum failure make sure to clear the uptodate flag */
-	if (err)
-		clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
-	dio_end_io(dio_bio, err);
+	dio_end_io(dio_bio, bio->bi_error);
 
 	if (io_bio->end_io)
 		io_bio->end_io(io_bio, err);
 	bio_put(bio);
 }
 
-static void btrfs_endio_direct_write(struct bio *bio, int err)
+static void btrfs_endio_direct_write(struct bio *bio)
 {
 	struct btrfs_dio_private *dip = bio->bi_private;
 	struct inode *inode = dip->inode;
@@ -7875,7 +7877,8 @@
 again:
 	ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
 						   &ordered_offset,
-						   ordered_bytes, !err);
+						   ordered_bytes,
+						   !bio->bi_error);
 	if (!ret)
 		goto out_test;
 
@@ -7898,10 +7901,7 @@
 
 	kfree(dip);
 
-	/* If we had an error make sure to clear the uptodate flag */
-	if (err)
-		clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
-	dio_end_io(dio_bio, err);
+	dio_end_io(dio_bio, bio->bi_error);
 	bio_put(bio);
 }
 
@@ -7916,9 +7916,10 @@
 	return 0;
 }
 
-static void btrfs_end_dio_bio(struct bio *bio, int err)
+static void btrfs_end_dio_bio(struct bio *bio)
 {
 	struct btrfs_dio_private *dip = bio->bi_private;
+	int err = bio->bi_error;
 
 	if (err)
 		btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
@@ -7947,8 +7948,8 @@
 	if (dip->errors) {
 		bio_io_error(dip->orig_bio);
 	} else {
-		set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags);
-		bio_endio(dip->orig_bio, 0);
+		dip->dio_bio->bi_error = 0;
+		bio_endio(dip->orig_bio);
 	}
 out:
 	bio_put(bio);
@@ -7957,8 +7958,7 @@
 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
 				       u64 first_sector, gfp_t gfp_flags)
 {
-	int nr_vecs = bio_get_nr_vecs(bdev);
-	return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
+	return btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags);
 }
 
 static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
@@ -8219,7 +8219,8 @@
 	 * callbacks - they require an allocated dip and a clone of dio_bio.
 	 */
 	if (io_bio && dip) {
-		bio_endio(io_bio, ret);
+		io_bio->bi_error = -EIO;
+		bio_endio(io_bio);
 		/*
 		 * The end io callbacks free our dip, do the final put on io_bio
 		 * and all the cleanup and final put for dio_bio (through
@@ -8246,7 +8247,7 @@
 			unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
 			      file_offset + dio_bio->bi_iter.bi_size - 1);
 		}
-		clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
+		dio_bio->bi_error = -EIO;
 		/*
 		 * Releases and cleans up our dio_bio, no need to bio_put()
 		 * nor bio_endio()/bio_io_error() against dio_bio.
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 8a82029..d904ee1 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -376,7 +376,7 @@
 		qgroup = find_qgroup_rb(fs_info, found_key.offset);
 		if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
 		    (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
-			btrfs_err(fs_info, "inconsitent qgroup config");
+			btrfs_err(fs_info, "inconsistent qgroup config");
 			flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
 		}
 		if (!qgroup) {
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index fa72068..0a02e24 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -851,7 +851,7 @@
  * this frees the rbio and runs through all the bios in the
  * bio_list and calls end_io on them
  */
-static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
+static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
 {
 	struct bio *cur = bio_list_get(&rbio->bio_list);
 	struct bio *next;
@@ -864,9 +864,8 @@
 	while (cur) {
 		next = cur->bi_next;
 		cur->bi_next = NULL;
-		if (uptodate)
-			set_bit(BIO_UPTODATE, &cur->bi_flags);
-		bio_endio(cur, err);
+		cur->bi_error = err;
+		bio_endio(cur);
 		cur = next;
 	}
 }
@@ -875,9 +874,10 @@
  * end io function used by finish_rmw.  When we finally
  * get here, we've written a full stripe
  */
-static void raid_write_end_io(struct bio *bio, int err)
+static void raid_write_end_io(struct bio *bio)
 {
 	struct btrfs_raid_bio *rbio = bio->bi_private;
+	int err = bio->bi_error;
 
 	if (err)
 		fail_bio_stripe(rbio, bio);
@@ -893,7 +893,7 @@
 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
 		err = -EIO;
 
-	rbio_orig_end_io(rbio, err, 0);
+	rbio_orig_end_io(rbio, err);
 	return;
 }
 
@@ -1071,7 +1071,7 @@
 		 * devices or if they are not contiguous
 		 */
 		if (last_end == disk_start && stripe->dev->bdev &&
-		    test_bit(BIO_UPTODATE, &last->bi_flags) &&
+		    !last->bi_error &&
 		    last->bi_bdev == stripe->dev->bdev) {
 			ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
 			if (ret == PAGE_CACHE_SIZE)
@@ -1087,7 +1087,6 @@
 	bio->bi_iter.bi_size = 0;
 	bio->bi_bdev = stripe->dev->bdev;
 	bio->bi_iter.bi_sector = disk_start >> 9;
-	set_bit(BIO_UPTODATE, &bio->bi_flags);
 
 	bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
 	bio_list_add(bio_list, bio);
@@ -1312,13 +1311,12 @@
 
 		bio->bi_private = rbio;
 		bio->bi_end_io = raid_write_end_io;
-		BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
 		submit_bio(WRITE, bio);
 	}
 	return;
 
 cleanup:
-	rbio_orig_end_io(rbio, -EIO, 0);
+	rbio_orig_end_io(rbio, -EIO);
 }
 
 /*
@@ -1441,11 +1439,11 @@
  * This will usually kick off finish_rmw once all the bios are read in, but it
  * may trigger parity reconstruction if we had any errors along the way
  */
-static void raid_rmw_end_io(struct bio *bio, int err)
+static void raid_rmw_end_io(struct bio *bio)
 {
 	struct btrfs_raid_bio *rbio = bio->bi_private;
 
-	if (err)
+	if (bio->bi_error)
 		fail_bio_stripe(rbio, bio);
 	else
 		set_bio_pages_uptodate(bio);
@@ -1455,7 +1453,6 @@
 	if (!atomic_dec_and_test(&rbio->stripes_pending))
 		return;
 
-	err = 0;
 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
 		goto cleanup;
 
@@ -1469,7 +1466,7 @@
 
 cleanup:
 
-	rbio_orig_end_io(rbio, -EIO, 0);
+	rbio_orig_end_io(rbio, -EIO);
 }
 
 static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
@@ -1572,14 +1569,13 @@
 		btrfs_bio_wq_end_io(rbio->fs_info, bio,
 				    BTRFS_WQ_ENDIO_RAID56);
 
-		BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
 		submit_bio(READ, bio);
 	}
 	/* the actual write will happen once the reads are done */
 	return 0;
 
 cleanup:
-	rbio_orig_end_io(rbio, -EIO, 0);
+	rbio_orig_end_io(rbio, -EIO);
 	return -EIO;
 
 finish:
@@ -1964,7 +1960,7 @@
 		else
 			clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
 
-		rbio_orig_end_io(rbio, err, err == 0);
+		rbio_orig_end_io(rbio, err);
 	} else if (err == 0) {
 		rbio->faila = -1;
 		rbio->failb = -1;
@@ -1976,7 +1972,7 @@
 		else
 			BUG();
 	} else {
-		rbio_orig_end_io(rbio, err, 0);
+		rbio_orig_end_io(rbio, err);
 	}
 }
 
@@ -1984,7 +1980,7 @@
  * This is called only for stripes we've read from disk to
  * reconstruct the parity.
  */
-static void raid_recover_end_io(struct bio *bio, int err)
+static void raid_recover_end_io(struct bio *bio)
 {
 	struct btrfs_raid_bio *rbio = bio->bi_private;
 
@@ -1992,7 +1988,7 @@
 	 * we only read stripe pages off the disk, set them
 	 * up to date if there were no errors
 	 */
-	if (err)
+	if (bio->bi_error)
 		fail_bio_stripe(rbio, bio);
 	else
 		set_bio_pages_uptodate(bio);
@@ -2002,7 +1998,7 @@
 		return;
 
 	if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
-		rbio_orig_end_io(rbio, -EIO, 0);
+		rbio_orig_end_io(rbio, -EIO);
 	else
 		__raid_recover_end_io(rbio);
 }
@@ -2094,7 +2090,6 @@
 		btrfs_bio_wq_end_io(rbio->fs_info, bio,
 				    BTRFS_WQ_ENDIO_RAID56);
 
-		BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
 		submit_bio(READ, bio);
 	}
 out:
@@ -2102,7 +2097,7 @@
 
 cleanup:
 	if (rbio->operation == BTRFS_RBIO_READ_REBUILD)
-		rbio_orig_end_io(rbio, -EIO, 0);
+		rbio_orig_end_io(rbio, -EIO);
 	return -EIO;
 }
 
@@ -2277,11 +2272,12 @@
  * end io function used by finish_rmw.  When we finally
  * get here, we've written a full stripe
  */
-static void raid_write_parity_end_io(struct bio *bio, int err)
+static void raid_write_parity_end_io(struct bio *bio)
 {
 	struct btrfs_raid_bio *rbio = bio->bi_private;
+	int err = bio->bi_error;
 
-	if (err)
+	if (bio->bi_error)
 		fail_bio_stripe(rbio, bio);
 
 	bio_put(bio);
@@ -2294,7 +2290,7 @@
 	if (atomic_read(&rbio->error))
 		err = -EIO;
 
-	rbio_orig_end_io(rbio, err, 0);
+	rbio_orig_end_io(rbio, err);
 }
 
 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
@@ -2437,7 +2433,7 @@
 	nr_data = bio_list_size(&bio_list);
 	if (!nr_data) {
 		/* Every parity is right */
-		rbio_orig_end_io(rbio, 0, 0);
+		rbio_orig_end_io(rbio, 0);
 		return;
 	}
 
@@ -2450,13 +2446,12 @@
 
 		bio->bi_private = rbio;
 		bio->bi_end_io = raid_write_parity_end_io;
-		BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
 		submit_bio(WRITE, bio);
 	}
 	return;
 
 cleanup:
-	rbio_orig_end_io(rbio, -EIO, 0);
+	rbio_orig_end_io(rbio, -EIO);
 }
 
 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
@@ -2524,7 +2519,7 @@
 	return;
 
 cleanup:
-	rbio_orig_end_io(rbio, -EIO, 0);
+	rbio_orig_end_io(rbio, -EIO);
 }
 
 /*
@@ -2535,11 +2530,11 @@
  * This will usually kick off finish_rmw once all the bios are read in, but it
  * may trigger parity reconstruction if we had any errors along the way
  */
-static void raid56_parity_scrub_end_io(struct bio *bio, int err)
+static void raid56_parity_scrub_end_io(struct bio *bio)
 {
 	struct btrfs_raid_bio *rbio = bio->bi_private;
 
-	if (err)
+	if (bio->bi_error)
 		fail_bio_stripe(rbio, bio);
 	else
 		set_bio_pages_uptodate(bio);
@@ -2632,14 +2627,13 @@
 		btrfs_bio_wq_end_io(rbio->fs_info, bio,
 				    BTRFS_WQ_ENDIO_RAID56);
 
-		BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
 		submit_bio(READ, bio);
 	}
 	/* the actual write will happen once the reads are done */
 	return;
 
 cleanup:
-	rbio_orig_end_io(rbio, -EIO, 0);
+	rbio_orig_end_io(rbio, -EIO);
 	return;
 
 finish:
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 94db0fa..9c146d8 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -278,7 +278,7 @@
 		       u64 physical, struct btrfs_device *dev, u64 flags,
 		       u64 gen, int mirror_num, u8 *csum, int force,
 		       u64 physical_for_dev_replace);
-static void scrub_bio_end_io(struct bio *bio, int err);
+static void scrub_bio_end_io(struct bio *bio);
 static void scrub_bio_end_io_worker(struct btrfs_work *work);
 static void scrub_block_complete(struct scrub_block *sblock);
 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
@@ -295,7 +295,7 @@
 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 				    struct scrub_page *spage);
 static void scrub_wr_submit(struct scrub_ctx *sctx);
-static void scrub_wr_bio_end_io(struct bio *bio, int err);
+static void scrub_wr_bio_end_io(struct bio *bio);
 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
 static int write_page_nocow(struct scrub_ctx *sctx,
 			    u64 physical_for_dev_replace, struct page *page);
@@ -454,27 +454,14 @@
 	struct scrub_ctx *sctx;
 	int		i;
 	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
-	int pages_per_rd_bio;
 	int ret;
 
-	/*
-	 * the setting of pages_per_rd_bio is correct for scrub but might
-	 * be wrong for the dev_replace code where we might read from
-	 * different devices in the initial huge bios. However, that
-	 * code is able to correctly handle the case when adding a page
-	 * to a bio fails.
-	 */
-	if (dev->bdev)
-		pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
-					 bio_get_nr_vecs(dev->bdev));
-	else
-		pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
 	sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
 	if (!sctx)
 		goto nomem;
 	atomic_set(&sctx->refs, 1);
 	sctx->is_dev_replace = is_dev_replace;
-	sctx->pages_per_rd_bio = pages_per_rd_bio;
+	sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
 	sctx->curr = -1;
 	sctx->dev_root = dev->dev_root;
 	for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
@@ -1429,11 +1416,11 @@
 	int error;
 };
 
-static void scrub_bio_wait_endio(struct bio *bio, int error)
+static void scrub_bio_wait_endio(struct bio *bio)
 {
 	struct scrub_bio_ret *ret = bio->bi_private;
 
-	ret->error = error;
+	ret->error = bio->bi_error;
 	complete(&ret->event);
 }
 
@@ -1790,12 +1777,12 @@
 	btrfsic_submit_bio(WRITE, sbio->bio);
 }
 
-static void scrub_wr_bio_end_io(struct bio *bio, int err)
+static void scrub_wr_bio_end_io(struct bio *bio)
 {
 	struct scrub_bio *sbio = bio->bi_private;
 	struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
 
-	sbio->err = err;
+	sbio->err = bio->bi_error;
 	sbio->bio = bio;
 
 	btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
@@ -2098,7 +2085,7 @@
 		 */
 		printk_ratelimited(KERN_WARNING
 			"BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
-		bio_endio(sbio->bio, -EIO);
+		bio_io_error(sbio->bio);
 	} else {
 		btrfsic_submit_bio(READ, sbio->bio);
 	}
@@ -2260,12 +2247,12 @@
 	return 0;
 }
 
-static void scrub_bio_end_io(struct bio *bio, int err)
+static void scrub_bio_end_io(struct bio *bio)
 {
 	struct scrub_bio *sbio = bio->bi_private;
 	struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
 
-	sbio->err = err;
+	sbio->err = bio->bi_error;
 	sbio->bio = bio;
 
 	btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
@@ -2672,11 +2659,11 @@
 	scrub_pending_bio_dec(sctx);
 }
 
-static void scrub_parity_bio_endio(struct bio *bio, int error)
+static void scrub_parity_bio_endio(struct bio *bio)
 {
 	struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
 
-	if (error)
+	if (bio->bi_error)
 		bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
 			  sparity->nsectors);
 
@@ -3896,8 +3883,7 @@
 		return 0;
 
 	WARN_ON(!dev->bdev);
-	wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
-					 bio_get_nr_vecs(dev->bdev));
+	wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
 	wr_ctx->tgtdev = dev;
 	atomic_set(&wr_ctx->flush_all_writes, 0);
 	return 0;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index cd7ef34..6bad633 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2163,8 +2163,7 @@
 
 static void btrfs_interface_exit(void)
 {
-	if (misc_deregister(&btrfs_misc) < 0)
-		printk(KERN_INFO "BTRFS: misc_deregister failed for control device\n");
+	misc_deregister(&btrfs_misc);
 }
 
 static void btrfs_print_info(void)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index fbe7c10..762476f 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -5741,23 +5741,23 @@
 	return 0;
 }
 
-static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int err)
+static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
 {
 	bio->bi_private = bbio->private;
 	bio->bi_end_io = bbio->end_io;
-	bio_endio(bio, err);
+	bio_endio(bio);
 
 	btrfs_put_bbio(bbio);
 }
 
-static void btrfs_end_bio(struct bio *bio, int err)
+static void btrfs_end_bio(struct bio *bio)
 {
 	struct btrfs_bio *bbio = bio->bi_private;
 	int is_orig_bio = 0;
 
-	if (err) {
+	if (bio->bi_error) {
 		atomic_inc(&bbio->error);
-		if (err == -EIO || err == -EREMOTEIO) {
+		if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
 			unsigned int stripe_index =
 				btrfs_io_bio(bio)->stripe_index;
 			struct btrfs_device *dev;
@@ -5795,17 +5795,16 @@
 		 * beyond the tolerance of the btrfs bio
 		 */
 		if (atomic_read(&bbio->error) > bbio->max_errors) {
-			err = -EIO;
+			bio->bi_error = -EIO;
 		} else {
 			/*
 			 * this bio is actually up to date, we didn't
 			 * go over the max number of errors
 			 */
-			set_bit(BIO_UPTODATE, &bio->bi_flags);
-			err = 0;
+			bio->bi_error = 0;
 		}
 
-		btrfs_end_bbio(bbio, bio, err);
+		btrfs_end_bbio(bbio, bio);
 	} else if (!is_orig_bio) {
 		bio_put(bio);
 	}
@@ -5826,7 +5825,7 @@
 	struct btrfs_pending_bios *pending_bios;
 
 	if (device->missing || !device->bdev) {
-		bio_endio(bio, -EIO);
+		bio_io_error(bio);
 		return;
 	}
 
@@ -5871,34 +5870,6 @@
 				 &device->work);
 }
 
-static int bio_size_ok(struct block_device *bdev, struct bio *bio,
-		       sector_t sector)
-{
-	struct bio_vec *prev;
-	struct request_queue *q = bdev_get_queue(bdev);
-	unsigned int max_sectors = queue_max_sectors(q);
-	struct bvec_merge_data bvm = {
-		.bi_bdev = bdev,
-		.bi_sector = sector,
-		.bi_rw = bio->bi_rw,
-	};
-
-	if (WARN_ON(bio->bi_vcnt == 0))
-		return 1;
-
-	prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
-	if (bio_sectors(bio) > max_sectors)
-		return 0;
-
-	if (!q->merge_bvec_fn)
-		return 1;
-
-	bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len;
-	if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
-		return 0;
-	return 1;
-}
-
 static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
 			      struct bio *bio, u64 physical, int dev_nr,
 			      int rw, int async)
@@ -5932,38 +5903,6 @@
 		btrfsic_submit_bio(rw, bio);
 }
 
-static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
-			      struct bio *first_bio, struct btrfs_device *dev,
-			      int dev_nr, int rw, int async)
-{
-	struct bio_vec *bvec = first_bio->bi_io_vec;
-	struct bio *bio;
-	int nr_vecs = bio_get_nr_vecs(dev->bdev);
-	u64 physical = bbio->stripes[dev_nr].physical;
-
-again:
-	bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS);
-	if (!bio)
-		return -ENOMEM;
-
-	while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
-		if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
-				 bvec->bv_offset) < bvec->bv_len) {
-			u64 len = bio->bi_iter.bi_size;
-
-			atomic_inc(&bbio->stripes_pending);
-			submit_stripe_bio(root, bbio, bio, physical, dev_nr,
-					  rw, async);
-			physical += len;
-			goto again;
-		}
-		bvec++;
-	}
-
-	submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async);
-	return 0;
-}
-
 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
 {
 	atomic_inc(&bbio->error);
@@ -5973,8 +5912,8 @@
 
 		btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
 		bio->bi_iter.bi_sector = logical >> 9;
-
-		btrfs_end_bbio(bbio, bio, -EIO);
+		bio->bi_error = -EIO;
+		btrfs_end_bbio(bbio, bio);
 	}
 }
 
@@ -6036,18 +5975,6 @@
 			continue;
 		}
 
-		/*
-		 * Check and see if we're ok with this bio based on it's size
-		 * and offset with the given device.
-		 */
-		if (!bio_size_ok(dev->bdev, first_bio,
-				 bbio->stripes[dev_nr].physical >> 9)) {
-			ret = breakup_stripe_bio(root, bbio, first_bio, dev,
-						 dev_nr, rw, async_submit);
-			BUG_ON(ret);
-			continue;
-		}
-
 		if (dev_nr < total_devs - 1) {
 			bio = btrfs_bio_clone(first_bio, GFP_NOFS);
 			BUG_ON(!bio); /* -ENOMEM */
diff --git a/fs/buffer.c b/fs/buffer.c
index 1cf7a53..82283ab 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2957,14 +2957,14 @@
 }
 EXPORT_SYMBOL(generic_block_bmap);
 
-static void end_bio_bh_io_sync(struct bio *bio, int err)
+static void end_bio_bh_io_sync(struct bio *bio)
 {
 	struct buffer_head *bh = bio->bi_private;
 
-	if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
+	if (unlikely(bio_flagged(bio, BIO_QUIET)))
 		set_bit(BH_Quiet, &bh->b_state);
 
-	bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
+	bh->b_end_io(bh, !bio->bi_error);
 	bio_put(bio);
 }
 
@@ -3046,12 +3046,9 @@
 
 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 	bio->bi_bdev = bh->b_bdev;
-	bio->bi_io_vec[0].bv_page = bh->b_page;
-	bio->bi_io_vec[0].bv_len = bh->b_size;
-	bio->bi_io_vec[0].bv_offset = bh_offset(bh);
 
-	bio->bi_vcnt = 1;
-	bio->bi_iter.bi_size = bh->b_size;
+	bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
+	BUG_ON(bio->bi_iter.bi_size != bh->b_size);
 
 	bio->bi_end_io = end_bio_bh_io_sync;
 	bio->bi_private = bh;
diff --git a/fs/char_dev.c b/fs/char_dev.c
index ea06a3d..24b1425 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -274,7 +274,7 @@
 }
 
 /**
- * unregister_chrdev_region() - return a range of device numbers
+ * unregister_chrdev_region() - unregister a range of device numbers
  * @from: the first in the range of numbers to unregister
  * @count: the number of device numbers to unregister
  *
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 745d234..1125629 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -285,7 +285,7 @@
 /*
  * Asynchronous IO callback. 
  */
-static void dio_bio_end_aio(struct bio *bio, int error)
+static void dio_bio_end_aio(struct bio *bio)
 {
 	struct dio *dio = bio->bi_private;
 	unsigned long remaining;
@@ -318,7 +318,7 @@
  * During I/O bi_private points at the dio.  After I/O, bi_private is used to
  * implement a singly-linked list of completed BIOs, at dio->bio_list.
  */
-static void dio_bio_end_io(struct bio *bio, int error)
+static void dio_bio_end_io(struct bio *bio)
 {
 	struct dio *dio = bio->bi_private;
 	unsigned long flags;
@@ -345,9 +345,9 @@
 	struct dio *dio = bio->bi_private;
 
 	if (dio->is_async)
-		dio_bio_end_aio(bio, error);
+		dio_bio_end_aio(bio);
 	else
-		dio_bio_end_io(bio, error);
+		dio_bio_end_io(bio);
 }
 EXPORT_SYMBOL_GPL(dio_end_io);
 
@@ -457,15 +457,16 @@
  */
 static int dio_bio_complete(struct dio *dio, struct bio *bio)
 {
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct bio_vec *bvec;
 	unsigned i;
+	int err;
 
-	if (!uptodate)
+	if (bio->bi_error)
 		dio->io_error = -EIO;
 
 	if (dio->is_async && dio->rw == READ) {
 		bio_check_pages_dirty(bio);	/* transfers ownership */
+		err = bio->bi_error;
 	} else {
 		bio_for_each_segment_all(bvec, bio, i) {
 			struct page *page = bvec->bv_page;
@@ -474,9 +475,10 @@
 				set_page_dirty_lock(page);
 			page_cache_release(page);
 		}
+		err = bio->bi_error;
 		bio_put(bio);
 	}
-	return uptodate ? 0 : -EIO;
+	return err;
 }
 
 /*
@@ -653,7 +655,7 @@
 	if (ret)
 		goto out;
 	sector = start_sector << (sdio->blkbits - 9);
-	nr_pages = min(sdio->pages_in_io, bio_get_nr_vecs(map_bh->b_bdev));
+	nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES);
 	BUG_ON(nr_pages <= 0);
 	dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
 	sdio->boundary = 0;
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 754fd6c..87e9d79 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -120,12 +120,11 @@
 	struct cbuf cb;
 	int retries;
 #define MAX_CONNECT_RETRIES 3
-	int sctp_assoc;
 	struct hlist_node list;
 	struct connection *othercon;
 	struct work_struct rwork; /* Receive workqueue */
 	struct work_struct swork; /* Send workqueue */
-	bool try_new_addr;
+	void (*orig_error_report)(struct sock *sk);
 };
 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
 
@@ -252,26 +251,6 @@
 	return con;
 }
 
-/* This is a bit drastic, but only called when things go wrong */
-static struct connection *assoc2con(int assoc_id)
-{
-	int i;
-	struct connection *con;
-
-	mutex_lock(&connections_lock);
-
-	for (i = 0 ; i < CONN_HASH_SIZE; i++) {
-		hlist_for_each_entry(con, &connection_hash[i], list) {
-			if (con->sctp_assoc == assoc_id) {
-				mutex_unlock(&connections_lock);
-				return con;
-			}
-		}
-	}
-	mutex_unlock(&connections_lock);
-	return NULL;
-}
-
 static struct dlm_node_addr *find_node_addr(int nodeid)
 {
 	struct dlm_node_addr *na;
@@ -322,14 +301,14 @@
 	spin_lock(&dlm_node_addrs_spin);
 	na = find_node_addr(nodeid);
 	if (na && na->addr_count) {
+		memcpy(&sas, na->addr[na->curr_addr_index],
+		       sizeof(struct sockaddr_storage));
+
 		if (try_new_addr) {
 			na->curr_addr_index++;
 			if (na->curr_addr_index == na->addr_count)
 				na->curr_addr_index = 0;
 		}
-
-		memcpy(&sas, na->addr[na->curr_addr_index ],
-			sizeof(struct sockaddr_storage));
 	}
 	spin_unlock(&dlm_node_addrs_spin);
 
@@ -459,18 +438,23 @@
 
 static void lowcomms_state_change(struct sock *sk)
 {
-	if (sk->sk_state == TCP_ESTABLISHED)
+	/* SCTP layer is not calling sk_data_ready when the connection
+	 * is done, so we catch the signal through here. Also, it
+	 * doesn't switch socket state when entering shutdown, so we
+	 * skip the write in that case.
+	 */
+	if (sk->sk_shutdown) {
+		if (sk->sk_shutdown == RCV_SHUTDOWN)
+			lowcomms_data_ready(sk);
+	} else if (sk->sk_state == TCP_ESTABLISHED) {
 		lowcomms_write_space(sk);
+	}
 }
 
 int dlm_lowcomms_connect_node(int nodeid)
 {
 	struct connection *con;
 
-	/* with sctp there's no connecting without sending */
-	if (dlm_config.ci_protocol != 0)
-		return 0;
-
 	if (nodeid == dlm_our_nodeid())
 		return 0;
 
@@ -481,6 +465,43 @@
 	return 0;
 }
 
+static void lowcomms_error_report(struct sock *sk)
+{
+	struct connection *con = sock2con(sk);
+	struct sockaddr_storage saddr;
+
+	if (nodeid_to_addr(con->nodeid, &saddr, NULL, false)) {
+		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
+				   "sending to node %d, port %d, "
+				   "sk_err=%d/%d\n", dlm_our_nodeid(),
+				   con->nodeid, dlm_config.ci_tcp_port,
+				   sk->sk_err, sk->sk_err_soft);
+		return;
+	} else if (saddr.ss_family == AF_INET) {
+		struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
+
+		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
+				   "sending to node %d at %pI4, port %d, "
+				   "sk_err=%d/%d\n", dlm_our_nodeid(),
+				   con->nodeid, &sin4->sin_addr.s_addr,
+				   dlm_config.ci_tcp_port, sk->sk_err,
+				   sk->sk_err_soft);
+	} else {
+		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
+
+		printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
+				   "sending to node %d at %u.%u.%u.%u, "
+				   "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
+				   con->nodeid, sin6->sin6_addr.s6_addr32[0],
+				   sin6->sin6_addr.s6_addr32[1],
+				   sin6->sin6_addr.s6_addr32[2],
+				   sin6->sin6_addr.s6_addr32[3],
+				   dlm_config.ci_tcp_port, sk->sk_err,
+				   sk->sk_err_soft);
+	}
+	con->orig_error_report(sk);
+}
+
 /* Make a socket active */
 static void add_sock(struct socket *sock, struct connection *con)
 {
@@ -492,6 +513,8 @@
 	con->sock->sk->sk_state_change = lowcomms_state_change;
 	con->sock->sk->sk_user_data = con;
 	con->sock->sk->sk_allocation = GFP_NOFS;
+	con->orig_error_report = con->sock->sk->sk_error_report;
+	con->sock->sk->sk_error_report = lowcomms_error_report;
 }
 
 /* Add the port number to an IPv6 or 4 sockaddr and return the address
@@ -514,17 +537,24 @@
 }
 
 /* Close a remote connection and tidy up */
-static void close_connection(struct connection *con, bool and_other)
+static void close_connection(struct connection *con, bool and_other,
+			     bool tx, bool rx)
 {
-	mutex_lock(&con->sock_mutex);
+	clear_bit(CF_CONNECT_PENDING, &con->flags);
+	clear_bit(CF_WRITE_PENDING, &con->flags);
+	if (tx && cancel_work_sync(&con->swork))
+		log_print("canceled swork for node %d", con->nodeid);
+	if (rx && cancel_work_sync(&con->rwork))
+		log_print("canceled rwork for node %d", con->nodeid);
 
+	mutex_lock(&con->sock_mutex);
 	if (con->sock) {
 		sock_release(con->sock);
 		con->sock = NULL;
 	}
 	if (con->othercon && and_other) {
 		/* Will only re-enter once. */
-		close_connection(con->othercon, false);
+		close_connection(con->othercon, false, true, true);
 	}
 	if (con->rx_page) {
 		__free_page(con->rx_page);
@@ -535,254 +565,6 @@
 	mutex_unlock(&con->sock_mutex);
 }
 
-/* We only send shutdown messages to nodes that are not part of the cluster */
-static void sctp_send_shutdown(sctp_assoc_t associd)
-{
-	static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
-	struct msghdr outmessage;
-	struct cmsghdr *cmsg;
-	struct sctp_sndrcvinfo *sinfo;
-	int ret;
-	struct connection *con;
-
-	con = nodeid2con(0,0);
-	BUG_ON(con == NULL);
-
-	outmessage.msg_name = NULL;
-	outmessage.msg_namelen = 0;
-	outmessage.msg_control = outcmsg;
-	outmessage.msg_controllen = sizeof(outcmsg);
-	outmessage.msg_flags = MSG_EOR;
-
-	cmsg = CMSG_FIRSTHDR(&outmessage);
-	cmsg->cmsg_level = IPPROTO_SCTP;
-	cmsg->cmsg_type = SCTP_SNDRCV;
-	cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
-	outmessage.msg_controllen = cmsg->cmsg_len;
-	sinfo = CMSG_DATA(cmsg);
-	memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
-
-	sinfo->sinfo_flags |= MSG_EOF;
-	sinfo->sinfo_assoc_id = associd;
-
-	ret = kernel_sendmsg(con->sock, &outmessage, NULL, 0, 0);
-
-	if (ret != 0)
-		log_print("send EOF to node failed: %d", ret);
-}
-
-static void sctp_init_failed_foreach(struct connection *con)
-{
-
-	/*
-	 * Don't try to recover base con and handle race where the
-	 * other node's assoc init creates a assoc and we get that
-	 * notification, then we get a notification that our attempt
-	 * failed due. This happens when we are still trying the primary
-	 * address, but the other node has already tried secondary addrs
-	 * and found one that worked.
-	 */
-	if (!con->nodeid || con->sctp_assoc)
-		return;
-
-	log_print("Retrying SCTP association init for node %d\n", con->nodeid);
-
-	con->try_new_addr = true;
-	con->sctp_assoc = 0;
-	if (test_and_clear_bit(CF_INIT_PENDING, &con->flags)) {
-		if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
-			queue_work(send_workqueue, &con->swork);
-	}
-}
-
-/* INIT failed but we don't know which node...
-   restart INIT on all pending nodes */
-static void sctp_init_failed(void)
-{
-	mutex_lock(&connections_lock);
-
-	foreach_conn(sctp_init_failed_foreach);
-
-	mutex_unlock(&connections_lock);
-}
-
-static void retry_failed_sctp_send(struct connection *recv_con,
-				   struct sctp_send_failed *sn_send_failed,
-				   char *buf)
-{
-	int len = sn_send_failed->ssf_length - sizeof(struct sctp_send_failed);
-	struct dlm_mhandle *mh;
-	struct connection *con;
-	char *retry_buf;
-	int nodeid = sn_send_failed->ssf_info.sinfo_ppid;
-
-	log_print("Retry sending %d bytes to node id %d", len, nodeid);
-	
-	if (!nodeid) {
-		log_print("Shouldn't resend data via listening connection.");
-		return;
-	}
-
-	con = nodeid2con(nodeid, 0);
-	if (!con) {
-		log_print("Could not look up con for nodeid %d\n",
-			  nodeid);
-		return;
-	}
-
-	mh = dlm_lowcomms_get_buffer(nodeid, len, GFP_NOFS, &retry_buf);
-	if (!mh) {
-		log_print("Could not allocate buf for retry.");
-		return;
-	}
-	memcpy(retry_buf, buf + sizeof(struct sctp_send_failed), len);
-	dlm_lowcomms_commit_buffer(mh);
-
-	/*
-	 * If we got a assoc changed event before the send failed event then
-	 * we only need to retry the send.
-	 */
-	if (con->sctp_assoc) {
-		if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
-			queue_work(send_workqueue, &con->swork);
-	} else
-		sctp_init_failed_foreach(con);
-}
-
-/* Something happened to an association */
-static void process_sctp_notification(struct connection *con,
-				      struct msghdr *msg, char *buf)
-{
-	union sctp_notification *sn = (union sctp_notification *)buf;
-	struct linger linger;
-
-	switch (sn->sn_header.sn_type) {
-	case SCTP_SEND_FAILED:
-		retry_failed_sctp_send(con, &sn->sn_send_failed, buf);
-		break;
-	case SCTP_ASSOC_CHANGE:
-		switch (sn->sn_assoc_change.sac_state) {
-		case SCTP_COMM_UP:
-		case SCTP_RESTART:
-		{
-			/* Check that the new node is in the lockspace */
-			struct sctp_prim prim;
-			int nodeid;
-			int prim_len, ret;
-			int addr_len;
-			struct connection *new_con;
-
-			/*
-			 * We get this before any data for an association.
-			 * We verify that the node is in the cluster and
-			 * then peel off a socket for it.
-			 */
-			if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) {
-				log_print("COMM_UP for invalid assoc ID %d",
-					 (int)sn->sn_assoc_change.sac_assoc_id);
-				sctp_init_failed();
-				return;
-			}
-			memset(&prim, 0, sizeof(struct sctp_prim));
-			prim_len = sizeof(struct sctp_prim);
-			prim.ssp_assoc_id = sn->sn_assoc_change.sac_assoc_id;
-
-			ret = kernel_getsockopt(con->sock,
-						IPPROTO_SCTP,
-						SCTP_PRIMARY_ADDR,
-						(char*)&prim,
-						&prim_len);
-			if (ret < 0) {
-				log_print("getsockopt/sctp_primary_addr on "
-					  "new assoc %d failed : %d",
-					  (int)sn->sn_assoc_change.sac_assoc_id,
-					  ret);
-
-				/* Retry INIT later */
-				new_con = assoc2con(sn->sn_assoc_change.sac_assoc_id);
-				if (new_con)
-					clear_bit(CF_CONNECT_PENDING, &con->flags);
-				return;
-			}
-			make_sockaddr(&prim.ssp_addr, 0, &addr_len);
-			if (addr_to_nodeid(&prim.ssp_addr, &nodeid)) {
-				unsigned char *b=(unsigned char *)&prim.ssp_addr;
-				log_print("reject connect from unknown addr");
-				print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, 
-						     b, sizeof(struct sockaddr_storage));
-				sctp_send_shutdown(prim.ssp_assoc_id);
-				return;
-			}
-
-			new_con = nodeid2con(nodeid, GFP_NOFS);
-			if (!new_con)
-				return;
-
-			/* Peel off a new sock */
-			lock_sock(con->sock->sk);
-			ret = sctp_do_peeloff(con->sock->sk,
-				sn->sn_assoc_change.sac_assoc_id,
-				&new_con->sock);
-			release_sock(con->sock->sk);
-			if (ret < 0) {
-				log_print("Can't peel off a socket for "
-					  "connection %d to node %d: err=%d",
-					  (int)sn->sn_assoc_change.sac_assoc_id,
-					  nodeid, ret);
-				return;
-			}
-			add_sock(new_con->sock, new_con);
-
-			linger.l_onoff = 1;
-			linger.l_linger = 0;
-			ret = kernel_setsockopt(new_con->sock, SOL_SOCKET, SO_LINGER,
-						(char *)&linger, sizeof(linger));
-			if (ret < 0)
-				log_print("set socket option SO_LINGER failed");
-
-			log_print("connecting to %d sctp association %d",
-				 nodeid, (int)sn->sn_assoc_change.sac_assoc_id);
-
-			new_con->sctp_assoc = sn->sn_assoc_change.sac_assoc_id;
-			new_con->try_new_addr = false;
-			/* Send any pending writes */
-			clear_bit(CF_CONNECT_PENDING, &new_con->flags);
-			clear_bit(CF_INIT_PENDING, &new_con->flags);
-			if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) {
-				queue_work(send_workqueue, &new_con->swork);
-			}
-			if (!test_and_set_bit(CF_READ_PENDING, &new_con->flags))
-				queue_work(recv_workqueue, &new_con->rwork);
-		}
-		break;
-
-		case SCTP_COMM_LOST:
-		case SCTP_SHUTDOWN_COMP:
-		{
-			con = assoc2con(sn->sn_assoc_change.sac_assoc_id);
-			if (con) {
-				con->sctp_assoc = 0;
-			}
-		}
-		break;
-
-		case SCTP_CANT_STR_ASSOC:
-		{
-			/* Will retry init when we get the send failed notification */
-			log_print("Can't start SCTP association - retrying");
-		}
-		break;
-
-		default:
-			log_print("unexpected SCTP assoc change id=%d state=%d",
-				  (int)sn->sn_assoc_change.sac_assoc_id,
-				  sn->sn_assoc_change.sac_state);
-		}
-	default:
-		; /* fall through */
-	}
-}
-
 /* Data received from remote end */
 static int receive_from_sock(struct connection *con)
 {
@@ -793,7 +575,6 @@
 	int r;
 	int call_again_soon = 0;
 	int nvec;
-	char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
 
 	mutex_lock(&con->sock_mutex);
 
@@ -801,6 +582,10 @@
 		ret = -EAGAIN;
 		goto out_close;
 	}
+	if (con->nodeid == 0) {
+		ret = -EINVAL;
+		goto out_close;
+	}
 
 	if (con->rx_page == NULL) {
 		/*
@@ -813,11 +598,6 @@
 		cbuf_init(&con->cb, PAGE_CACHE_SIZE);
 	}
 
-	/* Only SCTP needs these really */
-	memset(&incmsg, 0, sizeof(incmsg));
-	msg.msg_control = incmsg;
-	msg.msg_controllen = sizeof(incmsg);
-
 	/*
 	 * iov[0] is the bit of the circular buffer between the current end
 	 * point (cb.base + cb.len) and the end of the buffer.
@@ -843,31 +623,18 @@
 			       MSG_DONTWAIT | MSG_NOSIGNAL);
 	if (ret <= 0)
 		goto out_close;
-
-	/* Process SCTP notifications */
-	if (msg.msg_flags & MSG_NOTIFICATION) {
-		msg.msg_control = incmsg;
-		msg.msg_controllen = sizeof(incmsg);
-
-		process_sctp_notification(con, &msg,
-				page_address(con->rx_page) + con->cb.base);
-		mutex_unlock(&con->sock_mutex);
-		return 0;
-	}
-	BUG_ON(con->nodeid == 0);
-
-	if (ret == len)
+	else if (ret == len)
 		call_again_soon = 1;
+
 	cbuf_add(&con->cb, ret);
 	ret = dlm_process_incoming_buffer(con->nodeid,
 					  page_address(con->rx_page),
 					  con->cb.base, con->cb.len,
 					  PAGE_CACHE_SIZE);
 	if (ret == -EBADMSG) {
-		log_print("lowcomms: addr=%p, base=%u, len=%u, "
-			  "iov_len=%u, iov_base[0]=%p, read=%d",
-			  page_address(con->rx_page), con->cb.base, con->cb.len,
-			  len, iov[0].iov_base, r);
+		log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
+			  page_address(con->rx_page), con->cb.base,
+			  con->cb.len, r);
 	}
 	if (ret < 0)
 		goto out_close;
@@ -892,7 +659,7 @@
 out_close:
 	mutex_unlock(&con->sock_mutex);
 	if (ret != -EAGAIN) {
-		close_connection(con, false);
+		close_connection(con, false, true, false);
 		/* Reconnect when there is something to send */
 	}
 	/* Don't return success if we really got EOF */
@@ -1033,6 +800,120 @@
 	return result;
 }
 
+static int sctp_accept_from_sock(struct connection *con)
+{
+	/* Check that the new node is in the lockspace */
+	struct sctp_prim prim;
+	int nodeid;
+	int prim_len, ret;
+	int addr_len;
+	struct connection *newcon;
+	struct connection *addcon;
+	struct socket *newsock;
+
+	mutex_lock(&connections_lock);
+	if (!dlm_allow_conn) {
+		mutex_unlock(&connections_lock);
+		return -1;
+	}
+	mutex_unlock(&connections_lock);
+
+	mutex_lock_nested(&con->sock_mutex, 0);
+
+	ret = kernel_accept(con->sock, &newsock, O_NONBLOCK);
+	if (ret < 0)
+		goto accept_err;
+
+	memset(&prim, 0, sizeof(struct sctp_prim));
+	prim_len = sizeof(struct sctp_prim);
+
+	ret = kernel_getsockopt(newsock, IPPROTO_SCTP, SCTP_PRIMARY_ADDR,
+				(char *)&prim, &prim_len);
+	if (ret < 0) {
+		log_print("getsockopt/sctp_primary_addr failed: %d", ret);
+		goto accept_err;
+	}
+
+	make_sockaddr(&prim.ssp_addr, 0, &addr_len);
+	if (addr_to_nodeid(&prim.ssp_addr, &nodeid)) {
+		unsigned char *b = (unsigned char *)&prim.ssp_addr;
+
+		log_print("reject connect from unknown addr");
+		print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
+				     b, sizeof(struct sockaddr_storage));
+		goto accept_err;
+	}
+
+	newcon = nodeid2con(nodeid, GFP_NOFS);
+	if (!newcon) {
+		ret = -ENOMEM;
+		goto accept_err;
+	}
+
+	mutex_lock_nested(&newcon->sock_mutex, 1);
+
+	if (newcon->sock) {
+		struct connection *othercon = newcon->othercon;
+
+		if (!othercon) {
+			othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
+			if (!othercon) {
+				log_print("failed to allocate incoming socket");
+				mutex_unlock(&newcon->sock_mutex);
+				ret = -ENOMEM;
+				goto accept_err;
+			}
+			othercon->nodeid = nodeid;
+			othercon->rx_action = receive_from_sock;
+			mutex_init(&othercon->sock_mutex);
+			INIT_WORK(&othercon->swork, process_send_sockets);
+			INIT_WORK(&othercon->rwork, process_recv_sockets);
+			set_bit(CF_IS_OTHERCON, &othercon->flags);
+		}
+		if (!othercon->sock) {
+			newcon->othercon = othercon;
+			othercon->sock = newsock;
+			newsock->sk->sk_user_data = othercon;
+			add_sock(newsock, othercon);
+			addcon = othercon;
+		} else {
+			printk("Extra connection from node %d attempted\n", nodeid);
+			ret = -EAGAIN;
+			mutex_unlock(&newcon->sock_mutex);
+			goto accept_err;
+		}
+	} else {
+		newsock->sk->sk_user_data = newcon;
+		newcon->rx_action = receive_from_sock;
+		add_sock(newsock, newcon);
+		addcon = newcon;
+	}
+
+	log_print("connected to %d", nodeid);
+
+	mutex_unlock(&newcon->sock_mutex);
+
+	/*
+	 * Add it to the active queue in case we got data
+	 * between processing the accept adding the socket
+	 * to the read_sockets list
+	 */
+	if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
+		queue_work(recv_workqueue, &addcon->rwork);
+	mutex_unlock(&con->sock_mutex);
+
+	return 0;
+
+accept_err:
+	mutex_unlock(&con->sock_mutex);
+	if (newsock)
+		sock_release(newsock);
+	if (ret != -EAGAIN)
+		log_print("error accepting connection from node: %d", ret);
+
+	return ret;
+}
+
 static void free_entry(struct writequeue_entry *e)
 {
 	__free_page(e->page);
@@ -1057,97 +938,129 @@
 	}
 }
 
+/*
+ * sctp_bind_addrs - bind a SCTP socket to all our addresses
+ */
+static int sctp_bind_addrs(struct connection *con, uint16_t port)
+{
+	struct sockaddr_storage localaddr;
+	int i, addr_len, result = 0;
+
+	for (i = 0; i < dlm_local_count; i++) {
+		memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
+		make_sockaddr(&localaddr, port, &addr_len);
+
+		if (!i)
+			result = kernel_bind(con->sock,
+					     (struct sockaddr *)&localaddr,
+					     addr_len);
+		else
+			result = kernel_setsockopt(con->sock, SOL_SCTP,
+						   SCTP_SOCKOPT_BINDX_ADD,
+						   (char *)&localaddr, addr_len);
+
+		if (result < 0) {
+			log_print("Can't bind to %d addr number %d, %d.\n",
+				  port, i + 1, result);
+			break;
+		}
+	}
+	return result;
+}
+
 /* Initiate an SCTP association.
    This is a special case of send_to_sock() in that we don't yet have a
    peeled-off socket for this association, so we use the listening socket
    and add the primary IP address of the remote node.
  */
-static void sctp_init_assoc(struct connection *con)
+static void sctp_connect_to_sock(struct connection *con)
 {
-	struct sockaddr_storage rem_addr;
-	char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
-	struct msghdr outmessage;
-	struct cmsghdr *cmsg;
-	struct sctp_sndrcvinfo *sinfo;
-	struct connection *base_con;
-	struct writequeue_entry *e;
-	int len, offset;
-	int ret;
-	int addrlen;
-	struct kvec iov[1];
+	struct sockaddr_storage daddr;
+	int one = 1;
+	int result;
+	int addr_len;
+	struct socket *sock;
+
+	if (con->nodeid == 0) {
+		log_print("attempt to connect sock 0 foiled");
+		return;
+	}
 
 	mutex_lock(&con->sock_mutex);
-	if (test_and_set_bit(CF_INIT_PENDING, &con->flags))
-		goto unlock;
 
-	if (nodeid_to_addr(con->nodeid, NULL, (struct sockaddr *)&rem_addr,
-			   con->try_new_addr)) {
+	/* Some odd races can cause double-connects, ignore them */
+	if (con->retries++ > MAX_CONNECT_RETRIES)
+		goto out;
+
+	if (con->sock) {
+		log_print("node %d already connected.", con->nodeid);
+		goto out;
+	}
+
+	memset(&daddr, 0, sizeof(daddr));
+	result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
+	if (result < 0) {
 		log_print("no address for nodeid %d", con->nodeid);
-		goto unlock;
-	}
-	base_con = nodeid2con(0, 0);
-	BUG_ON(base_con == NULL);
-
-	make_sockaddr(&rem_addr, dlm_config.ci_tcp_port, &addrlen);
-
-	outmessage.msg_name = &rem_addr;
-	outmessage.msg_namelen = addrlen;
-	outmessage.msg_control = outcmsg;
-	outmessage.msg_controllen = sizeof(outcmsg);
-	outmessage.msg_flags = MSG_EOR;
-
-	spin_lock(&con->writequeue_lock);
-
-	if (list_empty(&con->writequeue)) {
-		spin_unlock(&con->writequeue_lock);
-		log_print("writequeue empty for nodeid %d", con->nodeid);
-		goto unlock;
+		goto out;
 	}
 
-	e = list_first_entry(&con->writequeue, struct writequeue_entry, list);
-	len = e->len;
-	offset = e->offset;
+	/* Create a socket to communicate with */
+	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
+				  SOCK_STREAM, IPPROTO_SCTP, &sock);
+	if (result < 0)
+		goto socket_err;
 
-	/* Send the first block off the write queue */
-	iov[0].iov_base = page_address(e->page)+offset;
-	iov[0].iov_len = len;
-	spin_unlock(&con->writequeue_lock);
+	sock->sk->sk_user_data = con;
+	con->rx_action = receive_from_sock;
+	con->connect_action = sctp_connect_to_sock;
+	add_sock(sock, con);
 
-	if (rem_addr.ss_family == AF_INET) {
-		struct sockaddr_in *sin = (struct sockaddr_in *)&rem_addr;
-		log_print("Trying to connect to %pI4", &sin->sin_addr.s_addr);
-	} else {
-		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&rem_addr;
-		log_print("Trying to connect to %pI6", &sin6->sin6_addr);
-	}
+	/* Bind to all addresses. */
+	if (sctp_bind_addrs(con, 0))
+		goto bind_err;
 
-	cmsg = CMSG_FIRSTHDR(&outmessage);
-	cmsg->cmsg_level = IPPROTO_SCTP;
-	cmsg->cmsg_type = SCTP_SNDRCV;
-	cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
-	sinfo = CMSG_DATA(cmsg);
-	memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
-	sinfo->sinfo_ppid = cpu_to_le32(con->nodeid);
-	outmessage.msg_controllen = cmsg->cmsg_len;
-	sinfo->sinfo_flags |= SCTP_ADDR_OVER;
+	make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
 
-	ret = kernel_sendmsg(base_con->sock, &outmessage, iov, 1, len);
-	if (ret < 0) {
-		log_print("Send first packet to node %d failed: %d",
-			  con->nodeid, ret);
+	log_print("connecting to %d", con->nodeid);
 
-		/* Try again later */
+	/* Turn off Nagle's algorithm */
+	kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
+			  sizeof(one));
+
+	result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
+				   O_NONBLOCK);
+	if (result == -EINPROGRESS)
+		result = 0;
+	if (result == 0)
+		goto out;
+
+
+bind_err:
+	con->sock = NULL;
+	sock_release(sock);
+
+socket_err:
+	/*
+	 * Some errors are fatal and this list might need adjusting. For other
+	 * errors we try again until the max number of retries is reached.
+	 */
+	if (result != -EHOSTUNREACH &&
+	    result != -ENETUNREACH &&
+	    result != -ENETDOWN &&
+	    result != -EINVAL &&
+	    result != -EPROTONOSUPPORT) {
+		log_print("connect %d try %d error %d", con->nodeid,
+			  con->retries, result);
+		mutex_unlock(&con->sock_mutex);
+		msleep(1000);
 		clear_bit(CF_CONNECT_PENDING, &con->flags);
-		clear_bit(CF_INIT_PENDING, &con->flags);
-	}
-	else {
-		spin_lock(&con->writequeue_lock);
-		writequeue_entry_complete(e, ret);
-		spin_unlock(&con->writequeue_lock);
+		lowcomms_connect_sock(con);
+		return;
 	}
 
-unlock:
+out:
 	mutex_unlock(&con->sock_mutex);
+	set_bit(CF_WRITE_PENDING, &con->flags);
 }
 
 /* Connect a new socket to its peer */
@@ -1236,11 +1149,13 @@
 			  con->retries, result);
 		mutex_unlock(&con->sock_mutex);
 		msleep(1000);
+		clear_bit(CF_CONNECT_PENDING, &con->flags);
 		lowcomms_connect_sock(con);
 		return;
 	}
 out:
 	mutex_unlock(&con->sock_mutex);
+	set_bit(CF_WRITE_PENDING, &con->flags);
 	return;
 }
 
@@ -1325,37 +1240,11 @@
 	}
 }
 
-/* Bind to an IP address. SCTP allows multiple address so it can do
-   multi-homing */
-static int add_sctp_bind_addr(struct connection *sctp_con,
-			      struct sockaddr_storage *addr,
-			      int addr_len, int num)
-{
-	int result = 0;
-
-	if (num == 1)
-		result = kernel_bind(sctp_con->sock,
-				     (struct sockaddr *) addr,
-				     addr_len);
-	else
-		result = kernel_setsockopt(sctp_con->sock, SOL_SCTP,
-					   SCTP_SOCKOPT_BINDX_ADD,
-					   (char *)addr, addr_len);
-
-	if (result < 0)
-		log_print("Can't bind to port %d addr number %d",
-			  dlm_config.ci_tcp_port, num);
-
-	return result;
-}
-
 /* Initialise SCTP socket and bind to all interfaces */
 static int sctp_listen_for_all(void)
 {
 	struct socket *sock = NULL;
-	struct sockaddr_storage localaddr;
-	struct sctp_event_subscribe subscribe;
-	int result = -EINVAL, num = 1, i, addr_len;
+	int result = -EINVAL;
 	struct connection *con = nodeid2con(0, GFP_NOFS);
 	int bufsize = NEEDED_RMEM;
 	int one = 1;
@@ -1366,33 +1255,17 @@
 	log_print("Using SCTP for communications");
 
 	result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
-				  SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
+				  SOCK_STREAM, IPPROTO_SCTP, &sock);
 	if (result < 0) {
 		log_print("Can't create comms socket, check SCTP is loaded");
 		goto out;
 	}
 
-	/* Listen for events */
-	memset(&subscribe, 0, sizeof(subscribe));
-	subscribe.sctp_data_io_event = 1;
-	subscribe.sctp_association_event = 1;
-	subscribe.sctp_send_failure_event = 1;
-	subscribe.sctp_shutdown_event = 1;
-	subscribe.sctp_partial_delivery_event = 1;
-
 	result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE,
 				 (char *)&bufsize, sizeof(bufsize));
 	if (result)
 		log_print("Error increasing buffer space on socket %d", result);
 
-	result = kernel_setsockopt(sock, SOL_SCTP, SCTP_EVENTS,
-				   (char *)&subscribe, sizeof(subscribe));
-	if (result < 0) {
-		log_print("Failed to set SCTP_EVENTS on socket: result=%d",
-			  result);
-		goto create_delsock;
-	}
-
 	result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one,
 				   sizeof(one));
 	if (result < 0)
@@ -1402,19 +1275,12 @@
 	sock->sk->sk_user_data = con;
 	con->sock = sock;
 	con->sock->sk->sk_data_ready = lowcomms_data_ready;
-	con->rx_action = receive_from_sock;
-	con->connect_action = sctp_init_assoc;
+	con->rx_action = sctp_accept_from_sock;
+	con->connect_action = sctp_connect_to_sock;
 
-	/* Bind to all interfaces. */
-	for (i = 0; i < dlm_local_count; i++) {
-		memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
-		make_sockaddr(&localaddr, dlm_config.ci_tcp_port, &addr_len);
-
-		result = add_sctp_bind_addr(con, &localaddr, addr_len, num);
-		if (result)
-			goto create_delsock;
-		++num;
-	}
+	/* Bind to all addresses. */
+	if (sctp_bind_addrs(con, dlm_config.ci_tcp_port))
+		goto create_delsock;
 
 	result = sock->ops->listen(sock, 5);
 	if (result < 0) {
@@ -1612,14 +1478,13 @@
 
 send_error:
 	mutex_unlock(&con->sock_mutex);
-	close_connection(con, false);
+	close_connection(con, false, false, true);
 	lowcomms_connect_sock(con);
 	return;
 
 out_connect:
 	mutex_unlock(&con->sock_mutex);
-	if (!test_bit(CF_INIT_PENDING, &con->flags))
-		lowcomms_connect_sock(con);
+	lowcomms_connect_sock(con);
 }
 
 static void clean_one_writequeue(struct connection *con)
@@ -1644,15 +1509,9 @@
 	log_print("closing connection to node %d", nodeid);
 	con = nodeid2con(nodeid, 0);
 	if (con) {
-		clear_bit(CF_CONNECT_PENDING, &con->flags);
-		clear_bit(CF_WRITE_PENDING, &con->flags);
 		set_bit(CF_CLOSE, &con->flags);
-		if (cancel_work_sync(&con->swork))
-			log_print("canceled swork for node %d", nodeid);
-		if (cancel_work_sync(&con->rwork))
-			log_print("canceled rwork for node %d", nodeid);
+		close_connection(con, true, true, true);
 		clean_one_writequeue(con);
-		close_connection(con, true);
 	}
 
 	spin_lock(&dlm_node_addrs_spin);
@@ -1685,10 +1544,8 @@
 {
 	struct connection *con = container_of(work, struct connection, swork);
 
-	if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
+	if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags))
 		con->connect_action(con);
-		set_bit(CF_WRITE_PENDING, &con->flags);
-	}
 	if (test_and_clear_bit(CF_WRITE_PENDING, &con->flags))
 		send_to_sock(con);
 }
@@ -1735,7 +1592,7 @@
 
 static void free_conn(struct connection *con)
 {
-	close_connection(con, true);
+	close_connection(con, true, true, true);
 	if (con->othercon)
 		kmem_cache_free(con_cache, con->othercon);
 	hlist_del(&con->list);
@@ -1806,7 +1663,7 @@
 	dlm_allow_conn = 0;
 	con = nodeid2con(0,0);
 	if (con) {
-		close_connection(con, false);
+		close_connection(con, false, true, true);
 		kmem_cache_free(con_cache, con);
 	}
 fail_destroy:
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index e0ab3a9..5532f09 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -509,7 +509,6 @@
 
 void dlm_plock_exit(void)
 {
-	if (misc_deregister(&plock_dev_misc) < 0)
-		log_print("dlm_plock_exit: misc_deregister failed");
+	misc_deregister(&plock_dev_misc);
 }
 
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index fb85f32..173b387 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -362,18 +362,15 @@
 
 int dlm_device_deregister(struct dlm_ls *ls)
 {
-	int error;
-
 	/* The device is not registered.  This happens when the lockspace
 	   was never used from userspace, or when device_create_lockspace()
 	   calls dlm_release_lockspace() after the register fails. */
 	if (!ls->ls_device.name)
 		return 0;
 
-	error = misc_deregister(&ls->ls_device);
-	if (!error)
-		kfree(ls->ls_device.name);
-	return error;
+	misc_deregister(&ls->ls_device);
+	kfree(ls->ls_device.name);
+	return 0;
 }
 
 static int device_user_purge(struct dlm_user_proc *proc,
@@ -785,6 +782,7 @@
 	DECLARE_WAITQUEUE(wait, current);
 	struct dlm_callback cb;
 	int rv, resid, copy_lvb = 0;
+	int old_mode, new_mode;
 
 	if (count == sizeof(struct dlm_device_version)) {
 		rv = copy_version_to_user(buf, count);
@@ -841,6 +839,9 @@
 
 	lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
 
+	/* rem_lkb_callback sets a new lkb_last_cast */
+	old_mode = lkb->lkb_last_cast.mode;
+
 	rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
 	if (rv < 0) {
 		/* this shouldn't happen; lkb should have been removed from
@@ -864,9 +865,6 @@
 	}
 
 	if (cb.flags & DLM_CB_CAST) {
-		int old_mode, new_mode;
-
-		old_mode = lkb->lkb_last_cast.mode;
 		new_mode = cb.mode;
 
 		if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index cf20852..caba848 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -299,7 +299,7 @@
 			rc = ecryptfs_read_lower_page_segment(
 				page, index, 0, PAGE_CACHE_SIZE, mapping->host);
 			if (rc) {
-				printk(KERN_ERR "%s: Error attemping to read "
+				printk(KERN_ERR "%s: Error attempting to read "
 				       "lower page segment; rc = [%d]\n",
 				       __func__, rc);
 				ClearPageUptodate(page);
diff --git a/fs/exec.c b/fs/exec.c
index 1977c2a..b06623a 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -98,6 +98,12 @@
 	module_put(fmt->module);
 }
 
+bool path_noexec(const struct path *path)
+{
+	return (path->mnt->mnt_flags & MNT_NOEXEC) ||
+	       (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
+}
+
 #ifdef CONFIG_USELIB
 /*
  * Note that a shared library must be both readable and executable due to
@@ -132,7 +138,7 @@
 		goto exit;
 
 	error = -EACCES;
-	if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
+	if (path_noexec(&file->f_path))
 		goto exit;
 
 	fsnotify_open(file);
@@ -777,7 +783,7 @@
 	if (!S_ISREG(file_inode(file)->i_mode))
 		goto exit;
 
-	if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
+	if (path_noexec(&file->f_path))
 		goto exit;
 
 	err = deny_write_access(file);
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 5c04a0d..efe5fb2 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -577,7 +577,10 @@
 		goto fail;
 	}
 
-	dquot_initialize(inode);
+	err = dquot_initialize(inode);
+	if (err)
+		goto fail_drop;
+
 	err = dquot_alloc_inode(inode);
 	if (err)
 		goto fail_drop;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 5c09776..a3a404c 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1552,8 +1552,11 @@
 	if (error)
 		return error;
 
-	if (is_quota_modification(inode, iattr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, iattr)) {
+		error = dquot_initialize(inode);
+		if (error)
+			return error;
+	}
 	if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
 	    (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
 		error = dquot_transfer(inode, iattr);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 13ec54a..b4841e3 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -96,8 +96,11 @@
 static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode, bool excl)
 {
 	struct inode *inode;
+	int err;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	inode = ext2_new_inode(dir, mode, &dentry->d_name);
 	if (IS_ERR(inode))
@@ -143,7 +146,9 @@
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	inode = ext2_new_inode (dir, mode, &dentry->d_name);
 	err = PTR_ERR(inode);
@@ -169,7 +174,9 @@
 	if (l > sb->s_blocksize)
 		goto out;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		goto out;
 
 	inode = ext2_new_inode (dir, S_IFLNK | S_IRWXUGO, &dentry->d_name);
 	err = PTR_ERR(inode);
@@ -212,7 +219,9 @@
 	struct inode *inode = d_inode(old_dentry);
 	int err;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	inode->i_ctime = CURRENT_TIME_SEC;
 	inode_inc_link_count(inode);
@@ -233,7 +242,9 @@
 	struct inode * inode;
 	int err;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	inode_inc_link_count(dir);
 
@@ -279,13 +290,17 @@
 	struct inode * inode = d_inode(dentry);
 	struct ext2_dir_entry_2 * de;
 	struct page * page;
-	int err = -ENOENT;
+	int err;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		goto out;
 
 	de = ext2_find_entry (dir, &dentry->d_name, &page);
-	if (!de)
+	if (!de) {
+		err = -ENOENT;
 		goto out;
+	}
 
 	err = ext2_delete_entry (de, page);
 	if (err)
@@ -323,14 +338,21 @@
 	struct ext2_dir_entry_2 * dir_de = NULL;
 	struct page * old_page;
 	struct ext2_dir_entry_2 * old_de;
-	int err = -ENOENT;
+	int err;
 
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
+	err = dquot_initialize(old_dir);
+	if (err)
+		goto out;
+
+	err = dquot_initialize(new_dir);
+	if (err)
+		goto out;
 
 	old_de = ext2_find_entry (old_dir, &old_dentry->d_name, &old_page);
-	if (!old_de)
+	if (!old_de) {
+		err = -ENOENT;
 		goto out;
+	}
 
 	if (S_ISDIR(old_inode->i_mode)) {
 		err = -EIO;
diff --git a/fs/ext3/Kconfig b/fs/ext3/Kconfig
deleted file mode 100644
index e8c6ba0..0000000
--- a/fs/ext3/Kconfig
+++ /dev/null
@@ -1,89 +0,0 @@
-config EXT3_FS
-	tristate "Ext3 journalling file system support"
-	select JBD
-	help
-	  This is the journalling version of the Second extended file system
-	  (often called ext3), the de facto standard Linux file system
-	  (method to organize files on a storage device) for hard disks.
-
-	  The journalling code included in this driver means you do not have
-	  to run e2fsck (file system checker) on your file systems after a
-	  crash.  The journal keeps track of any changes that were being made
-	  at the time the system crashed, and can ensure that your file system
-	  is consistent without the need for a lengthy check.
-
-	  Other than adding the journal to the file system, the on-disk format
-	  of ext3 is identical to ext2.  It is possible to freely switch
-	  between using the ext3 driver and the ext2 driver, as long as the
-	  file system has been cleanly unmounted, or e2fsck is run on the file
-	  system.
-
-	  To add a journal on an existing ext2 file system or change the
-	  behavior of ext3 file systems, you can use the tune2fs utility ("man
-	  tune2fs").  To modify attributes of files and directories on ext3
-	  file systems, use chattr ("man chattr").  You need to be using
-	  e2fsprogs version 1.20 or later in order to create ext3 journals
-	  (available at <http://sourceforge.net/projects/e2fsprogs/>).
-
-	  To compile this file system support as a module, choose M here: the
-	  module will be called ext3.
-
-config EXT3_DEFAULTS_TO_ORDERED
-	bool "Default to 'data=ordered' in ext3"
-	depends on EXT3_FS
-	default y
-	help
-	  The journal mode options for ext3 have different tradeoffs
-	  between when data is guaranteed to be on disk and
-	  performance.	The use of "data=writeback" can cause
-	  unwritten data to appear in files after an system crash or
-	  power failure, which can be a security issue.	 However,
-	  "data=ordered" mode can also result in major performance
-	  problems, including seconds-long delays before an fsync()
-	  call returns.	 For details, see:
-
-	  http://ext4.wiki.kernel.org/index.php/Ext3_data_mode_tradeoffs
-
-	  If you have been historically happy with ext3's performance,
-	  data=ordered mode will be a safe choice and you should
-	  answer 'y' here.  If you understand the reliability and data
-	  privacy issues of data=writeback and are willing to make
-	  that trade off, answer 'n'.
-
-config EXT3_FS_XATTR
-	bool "Ext3 extended attributes"
-	depends on EXT3_FS
-	default y
-	help
-	  Extended attributes are name:value pairs associated with inodes by
-	  the kernel or by users (see the attr(5) manual page, or visit
-	  <http://acl.bestbits.at/> for details).
-
-	  If unsure, say N.
-
-	  You need this for POSIX ACL support on ext3.
-
-config EXT3_FS_POSIX_ACL
-	bool "Ext3 POSIX Access Control Lists"
-	depends on EXT3_FS_XATTR
-	select FS_POSIX_ACL
-	help
-	  Posix Access Control Lists (ACLs) support permissions for users and
-	  groups beyond the owner/group/world scheme.
-
-	  To learn more about Access Control Lists, visit the Posix ACLs for
-	  Linux website <http://acl.bestbits.at/>.
-
-	  If you don't know what Access Control Lists are, say N
-
-config EXT3_FS_SECURITY
-	bool "Ext3 Security Labels"
-	depends on EXT3_FS_XATTR
-	help
-	  Security labels support alternative access control models
-	  implemented by security modules like SELinux.  This option
-	  enables an extended attribute handler for file security
-	  labels in the ext3 filesystem.
-
-	  If you are not using a security module that requires using
-	  extended attributes for file security labels, say N.
diff --git a/fs/ext3/Makefile b/fs/ext3/Makefile
deleted file mode 100644
index e77766a..0000000
--- a/fs/ext3/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Makefile for the linux ext3-filesystem routines.
-#
-
-obj-$(CONFIG_EXT3_FS) += ext3.o
-
-ext3-y	:= balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o \
-	   ioctl.o namei.o super.o symlink.o hash.o resize.o ext3_jbd.o
-
-ext3-$(CONFIG_EXT3_FS_XATTR)	 += xattr.o xattr_user.o xattr_trusted.o
-ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
-ext3-$(CONFIG_EXT3_FS_SECURITY)	 += xattr_security.o
diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c
deleted file mode 100644
index 8bbaf5b..0000000
--- a/fs/ext3/acl.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * linux/fs/ext3/acl.c
- *
- * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
- */
-
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-
-/*
- * Convert from filesystem to in-memory representation.
- */
-static struct posix_acl *
-ext3_acl_from_disk(const void *value, size_t size)
-{
-	const char *end = (char *)value + size;
-	int n, count;
-	struct posix_acl *acl;
-
-	if (!value)
-		return NULL;
-	if (size < sizeof(ext3_acl_header))
-		 return ERR_PTR(-EINVAL);
-	if (((ext3_acl_header *)value)->a_version !=
-	    cpu_to_le32(EXT3_ACL_VERSION))
-		return ERR_PTR(-EINVAL);
-	value = (char *)value + sizeof(ext3_acl_header);
-	count = ext3_acl_count(size);
-	if (count < 0)
-		return ERR_PTR(-EINVAL);
-	if (count == 0)
-		return NULL;
-	acl = posix_acl_alloc(count, GFP_NOFS);
-	if (!acl)
-		return ERR_PTR(-ENOMEM);
-	for (n=0; n < count; n++) {
-		ext3_acl_entry *entry =
-			(ext3_acl_entry *)value;
-		if ((char *)value + sizeof(ext3_acl_entry_short) > end)
-			goto fail;
-		acl->a_entries[n].e_tag  = le16_to_cpu(entry->e_tag);
-		acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
-		switch(acl->a_entries[n].e_tag) {
-			case ACL_USER_OBJ:
-			case ACL_GROUP_OBJ:
-			case ACL_MASK:
-			case ACL_OTHER:
-				value = (char *)value +
-					sizeof(ext3_acl_entry_short);
-				break;
-
-			case ACL_USER:
-				value = (char *)value + sizeof(ext3_acl_entry);
-				if ((char *)value > end)
-					goto fail;
-				acl->a_entries[n].e_uid =
-					make_kuid(&init_user_ns,
-						  le32_to_cpu(entry->e_id));
-				break;
-			case ACL_GROUP:
-				value = (char *)value + sizeof(ext3_acl_entry);
-				if ((char *)value > end)
-					goto fail;
-				acl->a_entries[n].e_gid =
-					make_kgid(&init_user_ns,
-						  le32_to_cpu(entry->e_id));
-				break;
-
-			default:
-				goto fail;
-		}
-	}
-	if (value != end)
-		goto fail;
-	return acl;
-
-fail:
-	posix_acl_release(acl);
-	return ERR_PTR(-EINVAL);
-}
-
-/*
- * Convert from in-memory to filesystem representation.
- */
-static void *
-ext3_acl_to_disk(const struct posix_acl *acl, size_t *size)
-{
-	ext3_acl_header *ext_acl;
-	char *e;
-	size_t n;
-
-	*size = ext3_acl_size(acl->a_count);
-	ext_acl = kmalloc(sizeof(ext3_acl_header) + acl->a_count *
-			sizeof(ext3_acl_entry), GFP_NOFS);
-	if (!ext_acl)
-		return ERR_PTR(-ENOMEM);
-	ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION);
-	e = (char *)ext_acl + sizeof(ext3_acl_header);
-	for (n=0; n < acl->a_count; n++) {
-		const struct posix_acl_entry *acl_e = &acl->a_entries[n];
-		ext3_acl_entry *entry = (ext3_acl_entry *)e;
-		entry->e_tag  = cpu_to_le16(acl_e->e_tag);
-		entry->e_perm = cpu_to_le16(acl_e->e_perm);
-		switch(acl_e->e_tag) {
-			case ACL_USER:
-				entry->e_id = cpu_to_le32(
-					from_kuid(&init_user_ns, acl_e->e_uid));
-				e += sizeof(ext3_acl_entry);
-				break;
-			case ACL_GROUP:
-				entry->e_id = cpu_to_le32(
-					from_kgid(&init_user_ns, acl_e->e_gid));
-				e += sizeof(ext3_acl_entry);
-				break;
-
-			case ACL_USER_OBJ:
-			case ACL_GROUP_OBJ:
-			case ACL_MASK:
-			case ACL_OTHER:
-				e += sizeof(ext3_acl_entry_short);
-				break;
-
-			default:
-				goto fail;
-		}
-	}
-	return (char *)ext_acl;
-
-fail:
-	kfree(ext_acl);
-	return ERR_PTR(-EINVAL);
-}
-
-/*
- * Inode operation get_posix_acl().
- *
- * inode->i_mutex: don't care
- */
-struct posix_acl *
-ext3_get_acl(struct inode *inode, int type)
-{
-	int name_index;
-	char *value = NULL;
-	struct posix_acl *acl;
-	int retval;
-
-	switch (type) {
-	case ACL_TYPE_ACCESS:
-		name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
-		break;
-	case ACL_TYPE_DEFAULT:
-		name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT;
-		break;
-	default:
-		BUG();
-	}
-
-	retval = ext3_xattr_get(inode, name_index, "", NULL, 0);
-	if (retval > 0) {
-		value = kmalloc(retval, GFP_NOFS);
-		if (!value)
-			return ERR_PTR(-ENOMEM);
-		retval = ext3_xattr_get(inode, name_index, "", value, retval);
-	}
-	if (retval > 0)
-		acl = ext3_acl_from_disk(value, retval);
-	else if (retval == -ENODATA || retval == -ENOSYS)
-		acl = NULL;
-	else
-		acl = ERR_PTR(retval);
-	kfree(value);
-
-	if (!IS_ERR(acl))
-		set_cached_acl(inode, type, acl);
-
-	return acl;
-}
-
-/*
- * Set the access or default ACL of an inode.
- *
- * inode->i_mutex: down unless called from ext3_new_inode
- */
-static int
-__ext3_set_acl(handle_t *handle, struct inode *inode, int type,
-	     struct posix_acl *acl)
-{
-	int name_index;
-	void *value = NULL;
-	size_t size = 0;
-	int error;
-
-	switch(type) {
-		case ACL_TYPE_ACCESS:
-			name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS;
-			if (acl) {
-				error = posix_acl_equiv_mode(acl, &inode->i_mode);
-				if (error < 0)
-					return error;
-				else {
-					inode->i_ctime = CURRENT_TIME_SEC;
-					ext3_mark_inode_dirty(handle, inode);
-					if (error == 0)
-						acl = NULL;
-				}
-			}
-			break;
-
-		case ACL_TYPE_DEFAULT:
-			name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT;
-			if (!S_ISDIR(inode->i_mode))
-				return acl ? -EACCES : 0;
-			break;
-
-		default:
-			return -EINVAL;
-	}
-	if (acl) {
-		value = ext3_acl_to_disk(acl, &size);
-		if (IS_ERR(value))
-			return (int)PTR_ERR(value);
-	}
-
-	error = ext3_xattr_set_handle(handle, inode, name_index, "",
-				      value, size, 0);
-
-	kfree(value);
-
-	if (!error)
-		set_cached_acl(inode, type, acl);
-
-	return error;
-}
-
-int
-ext3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
-{
-	handle_t *handle;
-	int error, retries = 0;
-
-retry:
-	handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS(inode->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	error = __ext3_set_acl(handle, inode, type, acl);
-	ext3_journal_stop(handle);
-	if (error == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
-		goto retry;
-	return error;
-}
-
-/*
- * Initialize the ACLs of a new inode. Called from ext3_new_inode.
- *
- * dir->i_mutex: down
- * inode->i_mutex: up (access to inode is still exclusive)
- */
-int
-ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
-{
-	struct posix_acl *default_acl, *acl;
-	int error;
-
-	error = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
-	if (error)
-		return error;
-
-	if (default_acl) {
-		error = __ext3_set_acl(handle, inode, ACL_TYPE_DEFAULT,
-				       default_acl);
-		posix_acl_release(default_acl);
-	}
-	if (acl) {
-		if (!error)
-			error = __ext3_set_acl(handle, inode, ACL_TYPE_ACCESS,
-					       acl);
-		posix_acl_release(acl);
-	}
-	return error;
-}
diff --git a/fs/ext3/acl.h b/fs/ext3/acl.h
deleted file mode 100644
index ea1c69e..0000000
--- a/fs/ext3/acl.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-  File: fs/ext3/acl.h
-
-  (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
-*/
-
-#include <linux/posix_acl_xattr.h>
-
-#define EXT3_ACL_VERSION	0x0001
-
-typedef struct {
-	__le16		e_tag;
-	__le16		e_perm;
-	__le32		e_id;
-} ext3_acl_entry;
-
-typedef struct {
-	__le16		e_tag;
-	__le16		e_perm;
-} ext3_acl_entry_short;
-
-typedef struct {
-	__le32		a_version;
-} ext3_acl_header;
-
-static inline size_t ext3_acl_size(int count)
-{
-	if (count <= 4) {
-		return sizeof(ext3_acl_header) +
-		       count * sizeof(ext3_acl_entry_short);
-	} else {
-		return sizeof(ext3_acl_header) +
-		       4 * sizeof(ext3_acl_entry_short) +
-		       (count - 4) * sizeof(ext3_acl_entry);
-	}
-}
-
-static inline int ext3_acl_count(size_t size)
-{
-	ssize_t s;
-	size -= sizeof(ext3_acl_header);
-	s = size - 4 * sizeof(ext3_acl_entry_short);
-	if (s < 0) {
-		if (size % sizeof(ext3_acl_entry_short))
-			return -1;
-		return size / sizeof(ext3_acl_entry_short);
-	} else {
-		if (s % sizeof(ext3_acl_entry))
-			return -1;
-		return s / sizeof(ext3_acl_entry) + 4;
-	}
-}
-
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-
-/* acl.c */
-extern struct posix_acl *ext3_get_acl(struct inode *inode, int type);
-extern int ext3_set_acl(struct inode *inode, struct posix_acl *acl, int type);
-extern int ext3_init_acl (handle_t *, struct inode *, struct inode *);
-
-#else  /* CONFIG_EXT3_FS_POSIX_ACL */
-#include <linux/sched.h>
-#define ext3_get_acl NULL
-#define ext3_set_acl NULL
-
-static inline int
-ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
-{
-	return 0;
-}
-#endif  /* CONFIG_EXT3_FS_POSIX_ACL */
-
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
deleted file mode 100644
index 158b5d4..0000000
--- a/fs/ext3/balloc.c
+++ /dev/null
@@ -1,2158 +0,0 @@
-/*
- *  linux/fs/ext3/balloc.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- */
-
-#include <linux/quotaops.h>
-#include <linux/blkdev.h>
-#include "ext3.h"
-
-/*
- * balloc.c contains the blocks allocation and deallocation routines
- */
-
-/*
- * The free blocks are managed by bitmaps.  A file system contains several
- * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
- * block for inodes, N blocks for the inode table and data blocks.
- *
- * The file system contains group descriptors which are located after the
- * super block.  Each descriptor contains the number of the bitmap block and
- * the free blocks count in the block.  The descriptors are loaded in memory
- * when a file system is mounted (see ext3_fill_super).
- */
-
-
-#define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
-
-/*
- * Calculate the block group number and offset, given a block number
- */
-static void ext3_get_group_no_and_offset(struct super_block *sb,
-	ext3_fsblk_t blocknr, unsigned long *blockgrpp, ext3_grpblk_t *offsetp)
-{
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-
-	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
-	if (offsetp)
-		*offsetp = blocknr % EXT3_BLOCKS_PER_GROUP(sb);
-	if (blockgrpp)
-		*blockgrpp = blocknr / EXT3_BLOCKS_PER_GROUP(sb);
-}
-
-/**
- * ext3_get_group_desc() -- load group descriptor from disk
- * @sb:			super block
- * @block_group:	given block group
- * @bh:			pointer to the buffer head to store the block
- *			group descriptor
- */
-struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
-					     unsigned int block_group,
-					     struct buffer_head ** bh)
-{
-	unsigned long group_desc;
-	unsigned long offset;
-	struct ext3_group_desc * desc;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-
-	if (block_group >= sbi->s_groups_count) {
-		ext3_error (sb, "ext3_get_group_desc",
-			    "block_group >= groups_count - "
-			    "block_group = %d, groups_count = %lu",
-			    block_group, sbi->s_groups_count);
-
-		return NULL;
-	}
-	smp_rmb();
-
-	group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
-	offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
-	if (!sbi->s_group_desc[group_desc]) {
-		ext3_error (sb, "ext3_get_group_desc",
-			    "Group descriptor not loaded - "
-			    "block_group = %d, group_desc = %lu, desc = %lu",
-			     block_group, group_desc, offset);
-		return NULL;
-	}
-
-	desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data;
-	if (bh)
-		*bh = sbi->s_group_desc[group_desc];
-	return desc + offset;
-}
-
-static int ext3_valid_block_bitmap(struct super_block *sb,
-					struct ext3_group_desc *desc,
-					unsigned int block_group,
-					struct buffer_head *bh)
-{
-	ext3_grpblk_t offset;
-	ext3_grpblk_t next_zero_bit;
-	ext3_fsblk_t bitmap_blk;
-	ext3_fsblk_t group_first_block;
-
-	group_first_block = ext3_group_first_block_no(sb, block_group);
-
-	/* check whether block bitmap block number is set */
-	bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
-	offset = bitmap_blk - group_first_block;
-	if (!ext3_test_bit(offset, bh->b_data))
-		/* bad block bitmap */
-		goto err_out;
-
-	/* check whether the inode bitmap block number is set */
-	bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
-	offset = bitmap_blk - group_first_block;
-	if (!ext3_test_bit(offset, bh->b_data))
-		/* bad block bitmap */
-		goto err_out;
-
-	/* check whether the inode table block number is set */
-	bitmap_blk = le32_to_cpu(desc->bg_inode_table);
-	offset = bitmap_blk - group_first_block;
-	next_zero_bit = ext3_find_next_zero_bit(bh->b_data,
-				offset + EXT3_SB(sb)->s_itb_per_group,
-				offset);
-	if (next_zero_bit >= offset + EXT3_SB(sb)->s_itb_per_group)
-		/* good bitmap for inode tables */
-		return 1;
-
-err_out:
-	ext3_error(sb, __func__,
-			"Invalid block bitmap - "
-			"block_group = %d, block = %lu",
-			block_group, bitmap_blk);
-	return 0;
-}
-
-/**
- * read_block_bitmap()
- * @sb:			super block
- * @block_group:	given block group
- *
- * Read the bitmap for a given block_group,and validate the
- * bits for block/inode/inode tables are set in the bitmaps
- *
- * Return buffer_head on success or NULL in case of failure.
- */
-static struct buffer_head *
-read_block_bitmap(struct super_block *sb, unsigned int block_group)
-{
-	struct ext3_group_desc * desc;
-	struct buffer_head * bh = NULL;
-	ext3_fsblk_t bitmap_blk;
-
-	desc = ext3_get_group_desc(sb, block_group, NULL);
-	if (!desc)
-		return NULL;
-	trace_ext3_read_block_bitmap(sb, block_group);
-	bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
-	bh = sb_getblk(sb, bitmap_blk);
-	if (unlikely(!bh)) {
-		ext3_error(sb, __func__,
-			    "Cannot read block bitmap - "
-			    "block_group = %d, block_bitmap = %u",
-			    block_group, le32_to_cpu(desc->bg_block_bitmap));
-		return NULL;
-	}
-	if (likely(bh_uptodate_or_lock(bh)))
-		return bh;
-
-	if (bh_submit_read(bh) < 0) {
-		brelse(bh);
-		ext3_error(sb, __func__,
-			    "Cannot read block bitmap - "
-			    "block_group = %d, block_bitmap = %u",
-			    block_group, le32_to_cpu(desc->bg_block_bitmap));
-		return NULL;
-	}
-	ext3_valid_block_bitmap(sb, desc, block_group, bh);
-	/*
-	 * file system mounted not to panic on error, continue with corrupt
-	 * bitmap
-	 */
-	return bh;
-}
-/*
- * The reservation window structure operations
- * --------------------------------------------
- * Operations include:
- * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
- *
- * We use a red-black tree to represent per-filesystem reservation
- * windows.
- *
- */
-
-/**
- * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
- * @rb_root:		root of per-filesystem reservation rb tree
- * @verbose:		verbose mode
- * @fn:			function which wishes to dump the reservation map
- *
- * If verbose is turned on, it will print the whole block reservation
- * windows(start, end).	Otherwise, it will only print out the "bad" windows,
- * those windows that overlap with their immediate neighbors.
- */
-#if 1
-static void __rsv_window_dump(struct rb_root *root, int verbose,
-			      const char *fn)
-{
-	struct rb_node *n;
-	struct ext3_reserve_window_node *rsv, *prev;
-	int bad;
-
-restart:
-	n = rb_first(root);
-	bad = 0;
-	prev = NULL;
-
-	printk("Block Allocation Reservation Windows Map (%s):\n", fn);
-	while (n) {
-		rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
-		if (verbose)
-			printk("reservation window 0x%p "
-			       "start:  %lu, end:  %lu\n",
-			       rsv, rsv->rsv_start, rsv->rsv_end);
-		if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
-			printk("Bad reservation %p (start >= end)\n",
-			       rsv);
-			bad = 1;
-		}
-		if (prev && prev->rsv_end >= rsv->rsv_start) {
-			printk("Bad reservation %p (prev->end >= start)\n",
-			       rsv);
-			bad = 1;
-		}
-		if (bad) {
-			if (!verbose) {
-				printk("Restarting reservation walk in verbose mode\n");
-				verbose = 1;
-				goto restart;
-			}
-		}
-		n = rb_next(n);
-		prev = rsv;
-	}
-	printk("Window map complete.\n");
-	BUG_ON(bad);
-}
-#define rsv_window_dump(root, verbose) \
-	__rsv_window_dump((root), (verbose), __func__)
-#else
-#define rsv_window_dump(root, verbose) do {} while (0)
-#endif
-
-/**
- * goal_in_my_reservation()
- * @rsv:		inode's reservation window
- * @grp_goal:		given goal block relative to the allocation block group
- * @group:		the current allocation block group
- * @sb:			filesystem super block
- *
- * Test if the given goal block (group relative) is within the file's
- * own block reservation window range.
- *
- * If the reservation window is outside the goal allocation group, return 0;
- * grp_goal (given goal block) could be -1, which means no specific
- * goal block. In this case, always return 1.
- * If the goal block is within the reservation window, return 1;
- * otherwise, return 0;
- */
-static int
-goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
-			unsigned int group, struct super_block * sb)
-{
-	ext3_fsblk_t group_first_block, group_last_block;
-
-	group_first_block = ext3_group_first_block_no(sb, group);
-	group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
-
-	if ((rsv->_rsv_start > group_last_block) ||
-	    (rsv->_rsv_end < group_first_block))
-		return 0;
-	if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
-		|| (grp_goal + group_first_block > rsv->_rsv_end)))
-		return 0;
-	return 1;
-}
-
-/**
- * search_reserve_window()
- * @rb_root:		root of reservation tree
- * @goal:		target allocation block
- *
- * Find the reserved window which includes the goal, or the previous one
- * if the goal is not in any window.
- * Returns NULL if there are no windows or if all windows start after the goal.
- */
-static struct ext3_reserve_window_node *
-search_reserve_window(struct rb_root *root, ext3_fsblk_t goal)
-{
-	struct rb_node *n = root->rb_node;
-	struct ext3_reserve_window_node *rsv;
-
-	if (!n)
-		return NULL;
-
-	do {
-		rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
-
-		if (goal < rsv->rsv_start)
-			n = n->rb_left;
-		else if (goal > rsv->rsv_end)
-			n = n->rb_right;
-		else
-			return rsv;
-	} while (n);
-	/*
-	 * We've fallen off the end of the tree: the goal wasn't inside
-	 * any particular node.  OK, the previous node must be to one
-	 * side of the interval containing the goal.  If it's the RHS,
-	 * we need to back up one.
-	 */
-	if (rsv->rsv_start > goal) {
-		n = rb_prev(&rsv->rsv_node);
-		rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
-	}
-	return rsv;
-}
-
-/**
- * ext3_rsv_window_add() -- Insert a window to the block reservation rb tree.
- * @sb:			super block
- * @rsv:		reservation window to add
- *
- * Must be called with rsv_lock hold.
- */
-void ext3_rsv_window_add(struct super_block *sb,
-		    struct ext3_reserve_window_node *rsv)
-{
-	struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root;
-	struct rb_node *node = &rsv->rsv_node;
-	ext3_fsblk_t start = rsv->rsv_start;
-
-	struct rb_node ** p = &root->rb_node;
-	struct rb_node * parent = NULL;
-	struct ext3_reserve_window_node *this;
-
-	trace_ext3_rsv_window_add(sb, rsv);
-	while (*p)
-	{
-		parent = *p;
-		this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node);
-
-		if (start < this->rsv_start)
-			p = &(*p)->rb_left;
-		else if (start > this->rsv_end)
-			p = &(*p)->rb_right;
-		else {
-			rsv_window_dump(root, 1);
-			BUG();
-		}
-	}
-
-	rb_link_node(node, parent, p);
-	rb_insert_color(node, root);
-}
-
-/**
- * ext3_rsv_window_remove() -- unlink a window from the reservation rb tree
- * @sb:			super block
- * @rsv:		reservation window to remove
- *
- * Mark the block reservation window as not allocated, and unlink it
- * from the filesystem reservation window rb tree. Must be called with
- * rsv_lock hold.
- */
-static void rsv_window_remove(struct super_block *sb,
-			      struct ext3_reserve_window_node *rsv)
-{
-	rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-	rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-	rsv->rsv_alloc_hit = 0;
-	rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root);
-}
-
-/*
- * rsv_is_empty() -- Check if the reservation window is allocated.
- * @rsv:		given reservation window to check
- *
- * returns 1 if the end block is EXT3_RESERVE_WINDOW_NOT_ALLOCATED.
- */
-static inline int rsv_is_empty(struct ext3_reserve_window *rsv)
-{
-	/* a valid reservation end block could not be 0 */
-	return rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-}
-
-/**
- * ext3_init_block_alloc_info()
- * @inode:		file inode structure
- *
- * Allocate and initialize the	reservation window structure, and
- * link the window to the ext3 inode structure at last
- *
- * The reservation window structure is only dynamically allocated
- * and linked to ext3 inode the first time the open file
- * needs a new block. So, before every ext3_new_block(s) call, for
- * regular files, we should check whether the reservation window
- * structure exists or not. In the latter case, this function is called.
- * Fail to do so will result in block reservation being turned off for that
- * open file.
- *
- * This function is called from ext3_get_blocks_handle(), also called
- * when setting the reservation window size through ioctl before the file
- * is open for write (needs block allocation).
- *
- * Needs truncate_mutex protection prior to call this function.
- */
-void ext3_init_block_alloc_info(struct inode *inode)
-{
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct ext3_block_alloc_info *block_i;
-	struct super_block *sb = inode->i_sb;
-
-	block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
-	if (block_i) {
-		struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node;
-
-		rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-		rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-
-		/*
-		 * if filesystem is mounted with NORESERVATION, the goal
-		 * reservation window size is set to zero to indicate
-		 * block reservation is off
-		 */
-		if (!test_opt(sb, RESERVATION))
-			rsv->rsv_goal_size = 0;
-		else
-			rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS;
-		rsv->rsv_alloc_hit = 0;
-		block_i->last_alloc_logical_block = 0;
-		block_i->last_alloc_physical_block = 0;
-	}
-	ei->i_block_alloc_info = block_i;
-}
-
-/**
- * ext3_discard_reservation()
- * @inode:		inode
- *
- * Discard(free) block reservation window on last file close, or truncate
- * or at last iput().
- *
- * It is being called in three cases:
- *	ext3_release_file(): last writer close the file
- *	ext3_clear_inode(): last iput(), when nobody link to this file.
- *	ext3_truncate(): when the block indirect map is about to change.
- *
- */
-void ext3_discard_reservation(struct inode *inode)
-{
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
-	struct ext3_reserve_window_node *rsv;
-	spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock;
-
-	if (!block_i)
-		return;
-
-	rsv = &block_i->rsv_window_node;
-	if (!rsv_is_empty(&rsv->rsv_window)) {
-		spin_lock(rsv_lock);
-		if (!rsv_is_empty(&rsv->rsv_window)) {
-			trace_ext3_discard_reservation(inode, rsv);
-			rsv_window_remove(inode->i_sb, rsv);
-		}
-		spin_unlock(rsv_lock);
-	}
-}
-
-/**
- * ext3_free_blocks_sb() -- Free given blocks and update quota
- * @handle:			handle to this transaction
- * @sb:				super block
- * @block:			start physical block to free
- * @count:			number of blocks to free
- * @pdquot_freed_blocks:	pointer to quota
- */
-void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
-			 ext3_fsblk_t block, unsigned long count,
-			 unsigned long *pdquot_freed_blocks)
-{
-	struct buffer_head *bitmap_bh = NULL;
-	struct buffer_head *gd_bh;
-	unsigned long block_group;
-	ext3_grpblk_t bit;
-	unsigned long i;
-	unsigned long overflow;
-	struct ext3_group_desc * desc;
-	struct ext3_super_block * es;
-	struct ext3_sb_info *sbi;
-	int err = 0, ret;
-	ext3_grpblk_t group_freed;
-
-	*pdquot_freed_blocks = 0;
-	sbi = EXT3_SB(sb);
-	es = sbi->s_es;
-	if (block < le32_to_cpu(es->s_first_data_block) ||
-	    block + count < block ||
-	    block + count > le32_to_cpu(es->s_blocks_count)) {
-		ext3_error (sb, "ext3_free_blocks",
-			    "Freeing blocks not in datazone - "
-			    "block = "E3FSBLK", count = %lu", block, count);
-		goto error_return;
-	}
-
-	ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1);
-
-do_more:
-	overflow = 0;
-	block_group = (block - le32_to_cpu(es->s_first_data_block)) /
-		      EXT3_BLOCKS_PER_GROUP(sb);
-	bit = (block - le32_to_cpu(es->s_first_data_block)) %
-		      EXT3_BLOCKS_PER_GROUP(sb);
-	/*
-	 * Check to see if we are freeing blocks across a group
-	 * boundary.
-	 */
-	if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
-		overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
-		count -= overflow;
-	}
-	brelse(bitmap_bh);
-	bitmap_bh = read_block_bitmap(sb, block_group);
-	if (!bitmap_bh)
-		goto error_return;
-	desc = ext3_get_group_desc (sb, block_group, &gd_bh);
-	if (!desc)
-		goto error_return;
-
-	if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) ||
-	    in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) ||
-	    in_range (block, le32_to_cpu(desc->bg_inode_table),
-		      sbi->s_itb_per_group) ||
-	    in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
-		      sbi->s_itb_per_group)) {
-		ext3_error (sb, "ext3_free_blocks",
-			    "Freeing blocks in system zones - "
-			    "Block = "E3FSBLK", count = %lu",
-			    block, count);
-		goto error_return;
-	}
-
-	/*
-	 * We are about to start releasing blocks in the bitmap,
-	 * so we need undo access.
-	 */
-	/* @@@ check errors */
-	BUFFER_TRACE(bitmap_bh, "getting undo access");
-	err = ext3_journal_get_undo_access(handle, bitmap_bh);
-	if (err)
-		goto error_return;
-
-	/*
-	 * We are about to modify some metadata.  Call the journal APIs
-	 * to unshare ->b_data if a currently-committing transaction is
-	 * using it
-	 */
-	BUFFER_TRACE(gd_bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, gd_bh);
-	if (err)
-		goto error_return;
-
-	jbd_lock_bh_state(bitmap_bh);
-
-	for (i = 0, group_freed = 0; i < count; i++) {
-		/*
-		 * An HJ special.  This is expensive...
-		 */
-#ifdef CONFIG_JBD_DEBUG
-		jbd_unlock_bh_state(bitmap_bh);
-		{
-			struct buffer_head *debug_bh;
-			debug_bh = sb_find_get_block(sb, block + i);
-			if (debug_bh) {
-				BUFFER_TRACE(debug_bh, "Deleted!");
-				if (!bh2jh(bitmap_bh)->b_committed_data)
-					BUFFER_TRACE(debug_bh,
-						"No committed data in bitmap");
-				BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
-				__brelse(debug_bh);
-			}
-		}
-		jbd_lock_bh_state(bitmap_bh);
-#endif
-		if (need_resched()) {
-			jbd_unlock_bh_state(bitmap_bh);
-			cond_resched();
-			jbd_lock_bh_state(bitmap_bh);
-		}
-		/* @@@ This prevents newly-allocated data from being
-		 * freed and then reallocated within the same
-		 * transaction.
-		 *
-		 * Ideally we would want to allow that to happen, but to
-		 * do so requires making journal_forget() capable of
-		 * revoking the queued write of a data block, which
-		 * implies blocking on the journal lock.  *forget()
-		 * cannot block due to truncate races.
-		 *
-		 * Eventually we can fix this by making journal_forget()
-		 * return a status indicating whether or not it was able
-		 * to revoke the buffer.  On successful revoke, it is
-		 * safe not to set the allocation bit in the committed
-		 * bitmap, because we know that there is no outstanding
-		 * activity on the buffer any more and so it is safe to
-		 * reallocate it.
-		 */
-		BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
-		J_ASSERT_BH(bitmap_bh,
-				bh2jh(bitmap_bh)->b_committed_data != NULL);
-		ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
-				bh2jh(bitmap_bh)->b_committed_data);
-
-		/*
-		 * We clear the bit in the bitmap after setting the committed
-		 * data bit, because this is the reverse order to that which
-		 * the allocator uses.
-		 */
-		BUFFER_TRACE(bitmap_bh, "clear bit");
-		if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
-						bit + i, bitmap_bh->b_data)) {
-			jbd_unlock_bh_state(bitmap_bh);
-			ext3_error(sb, __func__,
-				"bit already cleared for block "E3FSBLK,
-				 block + i);
-			jbd_lock_bh_state(bitmap_bh);
-			BUFFER_TRACE(bitmap_bh, "bit already cleared");
-		} else {
-			group_freed++;
-		}
-	}
-	jbd_unlock_bh_state(bitmap_bh);
-
-	spin_lock(sb_bgl_lock(sbi, block_group));
-	le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
-	spin_unlock(sb_bgl_lock(sbi, block_group));
-	percpu_counter_add(&sbi->s_freeblocks_counter, count);
-
-	/* We dirtied the bitmap block */
-	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-
-	/* And the group descriptor block */
-	BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
-	ret = ext3_journal_dirty_metadata(handle, gd_bh);
-	if (!err) err = ret;
-	*pdquot_freed_blocks += group_freed;
-
-	if (overflow && !err) {
-		block += count;
-		count = overflow;
-		goto do_more;
-	}
-
-error_return:
-	brelse(bitmap_bh);
-	ext3_std_error(sb, err);
-	return;
-}
-
-/**
- * ext3_free_blocks() -- Free given blocks and update quota
- * @handle:		handle for this transaction
- * @inode:		inode
- * @block:		start physical block to free
- * @count:		number of blocks to count
- */
-void ext3_free_blocks(handle_t *handle, struct inode *inode,
-			ext3_fsblk_t block, unsigned long count)
-{
-	struct super_block *sb = inode->i_sb;
-	unsigned long dquot_freed_blocks;
-
-	trace_ext3_free_blocks(inode, block, count);
-	ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
-	if (dquot_freed_blocks)
-		dquot_free_block(inode, dquot_freed_blocks);
-	return;
-}
-
-/**
- * ext3_test_allocatable()
- * @nr:			given allocation block group
- * @bh:			bufferhead contains the bitmap of the given block group
- *
- * For ext3 allocations, we must not reuse any blocks which are
- * allocated in the bitmap buffer's "last committed data" copy.  This
- * prevents deletes from freeing up the page for reuse until we have
- * committed the delete transaction.
- *
- * If we didn't do this, then deleting something and reallocating it as
- * data would allow the old block to be overwritten before the
- * transaction committed (because we force data to disk before commit).
- * This would lead to corruption if we crashed between overwriting the
- * data and committing the delete.
- *
- * @@@ We may want to make this allocation behaviour conditional on
- * data-writes at some point, and disable it for metadata allocations or
- * sync-data inodes.
- */
-static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh)
-{
-	int ret;
-	struct journal_head *jh = bh2jh(bh);
-
-	if (ext3_test_bit(nr, bh->b_data))
-		return 0;
-
-	jbd_lock_bh_state(bh);
-	if (!jh->b_committed_data)
-		ret = 1;
-	else
-		ret = !ext3_test_bit(nr, jh->b_committed_data);
-	jbd_unlock_bh_state(bh);
-	return ret;
-}
-
-/**
- * bitmap_search_next_usable_block()
- * @start:		the starting block (group relative) of the search
- * @bh:			bufferhead contains the block group bitmap
- * @maxblocks:		the ending block (group relative) of the reservation
- *
- * The bitmap search --- search forward alternately through the actual
- * bitmap on disk and the last-committed copy in journal, until we find a
- * bit free in both bitmaps.
- */
-static ext3_grpblk_t
-bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
-					ext3_grpblk_t maxblocks)
-{
-	ext3_grpblk_t next;
-	struct journal_head *jh = bh2jh(bh);
-
-	while (start < maxblocks) {
-		next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start);
-		if (next >= maxblocks)
-			return -1;
-		if (ext3_test_allocatable(next, bh))
-			return next;
-		jbd_lock_bh_state(bh);
-		if (jh->b_committed_data)
-			start = ext3_find_next_zero_bit(jh->b_committed_data,
-							maxblocks, next);
-		jbd_unlock_bh_state(bh);
-	}
-	return -1;
-}
-
-/**
- * find_next_usable_block()
- * @start:		the starting block (group relative) to find next
- *			allocatable block in bitmap.
- * @bh:			bufferhead contains the block group bitmap
- * @maxblocks:		the ending block (group relative) for the search
- *
- * Find an allocatable block in a bitmap.  We honor both the bitmap and
- * its last-committed copy (if that exists), and perform the "most
- * appropriate allocation" algorithm of looking for a free block near
- * the initial goal; then for a free byte somewhere in the bitmap; then
- * for any free bit in the bitmap.
- */
-static ext3_grpblk_t
-find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
-			ext3_grpblk_t maxblocks)
-{
-	ext3_grpblk_t here, next;
-	char *p, *r;
-
-	if (start > 0) {
-		/*
-		 * The goal was occupied; search forward for a free
-		 * block within the next XX blocks.
-		 *
-		 * end_goal is more or less random, but it has to be
-		 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the
-		 * next 64-bit boundary is simple..
-		 */
-		ext3_grpblk_t end_goal = (start + 63) & ~63;
-		if (end_goal > maxblocks)
-			end_goal = maxblocks;
-		here = ext3_find_next_zero_bit(bh->b_data, end_goal, start);
-		if (here < end_goal && ext3_test_allocatable(here, bh))
-			return here;
-		ext3_debug("Bit not found near goal\n");
-	}
-
-	here = start;
-	if (here < 0)
-		here = 0;
-
-	p = bh->b_data + (here >> 3);
-	r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
-	next = (r - bh->b_data) << 3;
-
-	if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
-		return next;
-
-	/*
-	 * The bitmap search --- search forward alternately through the actual
-	 * bitmap and the last-committed copy until we find a bit free in
-	 * both
-	 */
-	here = bitmap_search_next_usable_block(here, bh, maxblocks);
-	return here;
-}
-
-/**
- * claim_block()
- * @lock:		the spin lock for this block group
- * @block:		the free block (group relative) to allocate
- * @bh:			the buffer_head contains the block group bitmap
- *
- * We think we can allocate this block in this bitmap.  Try to set the bit.
- * If that succeeds then check that nobody has allocated and then freed the
- * block since we saw that is was not marked in b_committed_data.  If it _was_
- * allocated and freed then clear the bit in the bitmap again and return
- * zero (failure).
- */
-static inline int
-claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh)
-{
-	struct journal_head *jh = bh2jh(bh);
-	int ret;
-
-	if (ext3_set_bit_atomic(lock, block, bh->b_data))
-		return 0;
-	jbd_lock_bh_state(bh);
-	if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) {
-		ext3_clear_bit_atomic(lock, block, bh->b_data);
-		ret = 0;
-	} else {
-		ret = 1;
-	}
-	jbd_unlock_bh_state(bh);
-	return ret;
-}
-
-/**
- * ext3_try_to_allocate()
- * @sb:			superblock
- * @handle:		handle to this transaction
- * @group:		given allocation block group
- * @bitmap_bh:		bufferhead holds the block bitmap
- * @grp_goal:		given target block within the group
- * @count:		target number of blocks to allocate
- * @my_rsv:		reservation window
- *
- * Attempt to allocate blocks within a give range. Set the range of allocation
- * first, then find the first free bit(s) from the bitmap (within the range),
- * and at last, allocate the blocks by claiming the found free bit as allocated.
- *
- * To set the range of this allocation:
- *	if there is a reservation window, only try to allocate block(s) from the
- *	file's own reservation window;
- *	Otherwise, the allocation range starts from the give goal block, ends at
- *	the block group's last block.
- *
- * If we failed to allocate the desired block then we may end up crossing to a
- * new bitmap.  In that case we must release write access to the old one via
- * ext3_journal_release_buffer(), else we'll run out of credits.
- */
-static ext3_grpblk_t
-ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
-			struct buffer_head *bitmap_bh, ext3_grpblk_t grp_goal,
-			unsigned long *count, struct ext3_reserve_window *my_rsv)
-{
-	ext3_fsblk_t group_first_block;
-	ext3_grpblk_t start, end;
-	unsigned long num = 0;
-
-	/* we do allocation within the reservation window if we have a window */
-	if (my_rsv) {
-		group_first_block = ext3_group_first_block_no(sb, group);
-		if (my_rsv->_rsv_start >= group_first_block)
-			start = my_rsv->_rsv_start - group_first_block;
-		else
-			/* reservation window cross group boundary */
-			start = 0;
-		end = my_rsv->_rsv_end - group_first_block + 1;
-		if (end > EXT3_BLOCKS_PER_GROUP(sb))
-			/* reservation window crosses group boundary */
-			end = EXT3_BLOCKS_PER_GROUP(sb);
-		if ((start <= grp_goal) && (grp_goal < end))
-			start = grp_goal;
-		else
-			grp_goal = -1;
-	} else {
-		if (grp_goal > 0)
-			start = grp_goal;
-		else
-			start = 0;
-		end = EXT3_BLOCKS_PER_GROUP(sb);
-	}
-
-	BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb));
-
-repeat:
-	if (grp_goal < 0 || !ext3_test_allocatable(grp_goal, bitmap_bh)) {
-		grp_goal = find_next_usable_block(start, bitmap_bh, end);
-		if (grp_goal < 0)
-			goto fail_access;
-		if (!my_rsv) {
-			int i;
-
-			for (i = 0; i < 7 && grp_goal > start &&
-					ext3_test_allocatable(grp_goal - 1,
-								bitmap_bh);
-					i++, grp_goal--)
-				;
-		}
-	}
-	start = grp_goal;
-
-	if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group),
-		grp_goal, bitmap_bh)) {
-		/*
-		 * The block was allocated by another thread, or it was
-		 * allocated and then freed by another thread
-		 */
-		start++;
-		grp_goal++;
-		if (start >= end)
-			goto fail_access;
-		goto repeat;
-	}
-	num++;
-	grp_goal++;
-	while (num < *count && grp_goal < end
-		&& ext3_test_allocatable(grp_goal, bitmap_bh)
-		&& claim_block(sb_bgl_lock(EXT3_SB(sb), group),
-				grp_goal, bitmap_bh)) {
-		num++;
-		grp_goal++;
-	}
-	*count = num;
-	return grp_goal - num;
-fail_access:
-	*count = num;
-	return -1;
-}
-
-/**
- *	find_next_reservable_window():
- *		find a reservable space within the given range.
- *		It does not allocate the reservation window for now:
- *		alloc_new_reservation() will do the work later.
- *
- *	@search_head: the head of the searching list;
- *		This is not necessarily the list head of the whole filesystem
- *
- *		We have both head and start_block to assist the search
- *		for the reservable space. The list starts from head,
- *		but we will shift to the place where start_block is,
- *		then start from there, when looking for a reservable space.
- *
- *	@my_rsv: the reservation window
- *
- *	@sb: the super block
- *
- *	@start_block: the first block we consider to start
- *			the real search from
- *
- *	@last_block:
- *		the maximum block number that our goal reservable space
- *		could start from. This is normally the last block in this
- *		group. The search will end when we found the start of next
- *		possible reservable space is out of this boundary.
- *		This could handle the cross boundary reservation window
- *		request.
- *
- *	basically we search from the given range, rather than the whole
- *	reservation double linked list, (start_block, last_block)
- *	to find a free region that is of my size and has not
- *	been reserved.
- *
- */
-static int find_next_reservable_window(
-				struct ext3_reserve_window_node *search_head,
-				struct ext3_reserve_window_node *my_rsv,
-				struct super_block * sb,
-				ext3_fsblk_t start_block,
-				ext3_fsblk_t last_block)
-{
-	struct rb_node *next;
-	struct ext3_reserve_window_node *rsv, *prev;
-	ext3_fsblk_t cur;
-	int size = my_rsv->rsv_goal_size;
-
-	/* TODO: make the start of the reservation window byte-aligned */
-	/* cur = *start_block & ~7;*/
-	cur = start_block;
-	rsv = search_head;
-	if (!rsv)
-		return -1;
-
-	while (1) {
-		if (cur <= rsv->rsv_end)
-			cur = rsv->rsv_end + 1;
-
-		/* TODO?
-		 * in the case we could not find a reservable space
-		 * that is what is expected, during the re-search, we could
-		 * remember what's the largest reservable space we could have
-		 * and return that one.
-		 *
-		 * For now it will fail if we could not find the reservable
-		 * space with expected-size (or more)...
-		 */
-		if (cur > last_block)
-			return -1;		/* fail */
-
-		prev = rsv;
-		next = rb_next(&rsv->rsv_node);
-		rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node);
-
-		/*
-		 * Reached the last reservation, we can just append to the
-		 * previous one.
-		 */
-		if (!next)
-			break;
-
-		if (cur + size <= rsv->rsv_start) {
-			/*
-			 * Found a reserveable space big enough.  We could
-			 * have a reservation across the group boundary here
-			 */
-			break;
-		}
-	}
-	/*
-	 * we come here either :
-	 * when we reach the end of the whole list,
-	 * and there is empty reservable space after last entry in the list.
-	 * append it to the end of the list.
-	 *
-	 * or we found one reservable space in the middle of the list,
-	 * return the reservation window that we could append to.
-	 * succeed.
-	 */
-
-	if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
-		rsv_window_remove(sb, my_rsv);
-
-	/*
-	 * Let's book the whole available window for now.  We will check the
-	 * disk bitmap later and then, if there are free blocks then we adjust
-	 * the window size if it's larger than requested.
-	 * Otherwise, we will remove this node from the tree next time
-	 * call find_next_reservable_window.
-	 */
-	my_rsv->rsv_start = cur;
-	my_rsv->rsv_end = cur + size - 1;
-	my_rsv->rsv_alloc_hit = 0;
-
-	if (prev != my_rsv)
-		ext3_rsv_window_add(sb, my_rsv);
-
-	return 0;
-}
-
-/**
- *	alloc_new_reservation()--allocate a new reservation window
- *
- *		To make a new reservation, we search part of the filesystem
- *		reservation list (the list that inside the group). We try to
- *		allocate a new reservation window near the allocation goal,
- *		or the beginning of the group, if there is no goal.
- *
- *		We first find a reservable space after the goal, then from
- *		there, we check the bitmap for the first free block after
- *		it. If there is no free block until the end of group, then the
- *		whole group is full, we failed. Otherwise, check if the free
- *		block is inside the expected reservable space, if so, we
- *		succeed.
- *		If the first free block is outside the reservable space, then
- *		start from the first free block, we search for next available
- *		space, and go on.
- *
- *	on succeed, a new reservation will be found and inserted into the list
- *	It contains at least one free block, and it does not overlap with other
- *	reservation windows.
- *
- *	failed: we failed to find a reservation window in this group
- *
- *	@my_rsv: the reservation window
- *
- *	@grp_goal: The goal (group-relative).  It is where the search for a
- *		free reservable space should start from.
- *		if we have a grp_goal(grp_goal >0 ), then start from there,
- *		no grp_goal(grp_goal = -1), we start from the first block
- *		of the group.
- *
- *	@sb: the super block
- *	@group: the group we are trying to allocate in
- *	@bitmap_bh: the block group block bitmap
- *
- */
-static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
-		ext3_grpblk_t grp_goal, struct super_block *sb,
-		unsigned int group, struct buffer_head *bitmap_bh)
-{
-	struct ext3_reserve_window_node *search_head;
-	ext3_fsblk_t group_first_block, group_end_block, start_block;
-	ext3_grpblk_t first_free_block;
-	struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root;
-	unsigned long size;
-	int ret;
-	spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
-
-	group_first_block = ext3_group_first_block_no(sb, group);
-	group_end_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
-
-	if (grp_goal < 0)
-		start_block = group_first_block;
-	else
-		start_block = grp_goal + group_first_block;
-
-	trace_ext3_alloc_new_reservation(sb, start_block);
-	size = my_rsv->rsv_goal_size;
-
-	if (!rsv_is_empty(&my_rsv->rsv_window)) {
-		/*
-		 * if the old reservation is cross group boundary
-		 * and if the goal is inside the old reservation window,
-		 * we will come here when we just failed to allocate from
-		 * the first part of the window. We still have another part
-		 * that belongs to the next group. In this case, there is no
-		 * point to discard our window and try to allocate a new one
-		 * in this group(which will fail). we should
-		 * keep the reservation window, just simply move on.
-		 *
-		 * Maybe we could shift the start block of the reservation
-		 * window to the first block of next group.
-		 */
-
-		if ((my_rsv->rsv_start <= group_end_block) &&
-				(my_rsv->rsv_end > group_end_block) &&
-				(start_block >= my_rsv->rsv_start))
-			return -1;
-
-		if ((my_rsv->rsv_alloc_hit >
-		     (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
-			/*
-			 * if the previously allocation hit ratio is
-			 * greater than 1/2, then we double the size of
-			 * the reservation window the next time,
-			 * otherwise we keep the same size window
-			 */
-			size = size * 2;
-			if (size > EXT3_MAX_RESERVE_BLOCKS)
-				size = EXT3_MAX_RESERVE_BLOCKS;
-			my_rsv->rsv_goal_size= size;
-		}
-	}
-
-	spin_lock(rsv_lock);
-	/*
-	 * shift the search start to the window near the goal block
-	 */
-	search_head = search_reserve_window(fs_rsv_root, start_block);
-
-	/*
-	 * find_next_reservable_window() simply finds a reservable window
-	 * inside the given range(start_block, group_end_block).
-	 *
-	 * To make sure the reservation window has a free bit inside it, we
-	 * need to check the bitmap after we found a reservable window.
-	 */
-retry:
-	ret = find_next_reservable_window(search_head, my_rsv, sb,
-						start_block, group_end_block);
-
-	if (ret == -1) {
-		if (!rsv_is_empty(&my_rsv->rsv_window))
-			rsv_window_remove(sb, my_rsv);
-		spin_unlock(rsv_lock);
-		return -1;
-	}
-
-	/*
-	 * On success, find_next_reservable_window() returns the
-	 * reservation window where there is a reservable space after it.
-	 * Before we reserve this reservable space, we need
-	 * to make sure there is at least a free block inside this region.
-	 *
-	 * searching the first free bit on the block bitmap and copy of
-	 * last committed bitmap alternatively, until we found a allocatable
-	 * block. Search start from the start block of the reservable space
-	 * we just found.
-	 */
-	spin_unlock(rsv_lock);
-	first_free_block = bitmap_search_next_usable_block(
-			my_rsv->rsv_start - group_first_block,
-			bitmap_bh, group_end_block - group_first_block + 1);
-
-	if (first_free_block < 0) {
-		/*
-		 * no free block left on the bitmap, no point
-		 * to reserve the space. return failed.
-		 */
-		spin_lock(rsv_lock);
-		if (!rsv_is_empty(&my_rsv->rsv_window))
-			rsv_window_remove(sb, my_rsv);
-		spin_unlock(rsv_lock);
-		return -1;		/* failed */
-	}
-
-	start_block = first_free_block + group_first_block;
-	/*
-	 * check if the first free block is within the
-	 * free space we just reserved
-	 */
-	if (start_block >= my_rsv->rsv_start &&
-	    start_block <= my_rsv->rsv_end) {
-		trace_ext3_reserved(sb, start_block, my_rsv);
-		return 0;		/* success */
-	}
-	/*
-	 * if the first free bit we found is out of the reservable space
-	 * continue search for next reservable space,
-	 * start from where the free block is,
-	 * we also shift the list head to where we stopped last time
-	 */
-	search_head = my_rsv;
-	spin_lock(rsv_lock);
-	goto retry;
-}
-
-/**
- * try_to_extend_reservation()
- * @my_rsv:		given reservation window
- * @sb:			super block
- * @size:		the delta to extend
- *
- * Attempt to expand the reservation window large enough to have
- * required number of free blocks
- *
- * Since ext3_try_to_allocate() will always allocate blocks within
- * the reservation window range, if the window size is too small,
- * multiple blocks allocation has to stop at the end of the reservation
- * window. To make this more efficient, given the total number of
- * blocks needed and the current size of the window, we try to
- * expand the reservation window size if necessary on a best-effort
- * basis before ext3_new_blocks() tries to allocate blocks,
- */
-static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
-			struct super_block *sb, int size)
-{
-	struct ext3_reserve_window_node *next_rsv;
-	struct rb_node *next;
-	spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
-
-	if (!spin_trylock(rsv_lock))
-		return;
-
-	next = rb_next(&my_rsv->rsv_node);
-
-	if (!next)
-		my_rsv->rsv_end += size;
-	else {
-		next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node);
-
-		if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
-			my_rsv->rsv_end += size;
-		else
-			my_rsv->rsv_end = next_rsv->rsv_start - 1;
-	}
-	spin_unlock(rsv_lock);
-}
-
-/**
- * ext3_try_to_allocate_with_rsv()
- * @sb:			superblock
- * @handle:		handle to this transaction
- * @group:		given allocation block group
- * @bitmap_bh:		bufferhead holds the block bitmap
- * @grp_goal:		given target block within the group
- * @my_rsv:		reservation window
- * @count:		target number of blocks to allocate
- * @errp:		pointer to store the error code
- *
- * This is the main function used to allocate a new block and its reservation
- * window.
- *
- * Each time when a new block allocation is need, first try to allocate from
- * its own reservation.  If it does not have a reservation window, instead of
- * looking for a free bit on bitmap first, then look up the reservation list to
- * see if it is inside somebody else's reservation window, we try to allocate a
- * reservation window for it starting from the goal first. Then do the block
- * allocation within the reservation window.
- *
- * This will avoid keeping on searching the reservation list again and
- * again when somebody is looking for a free block (without
- * reservation), and there are lots of free blocks, but they are all
- * being reserved.
- *
- * We use a red-black tree for the per-filesystem reservation list.
- *
- */
-static ext3_grpblk_t
-ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
-			unsigned int group, struct buffer_head *bitmap_bh,
-			ext3_grpblk_t grp_goal,
-			struct ext3_reserve_window_node * my_rsv,
-			unsigned long *count, int *errp)
-{
-	ext3_fsblk_t group_first_block, group_last_block;
-	ext3_grpblk_t ret = 0;
-	int fatal;
-	unsigned long num = *count;
-
-	*errp = 0;
-
-	/*
-	 * Make sure we use undo access for the bitmap, because it is critical
-	 * that we do the frozen_data COW on bitmap buffers in all cases even
-	 * if the buffer is in BJ_Forget state in the committing transaction.
-	 */
-	BUFFER_TRACE(bitmap_bh, "get undo access for new block");
-	fatal = ext3_journal_get_undo_access(handle, bitmap_bh);
-	if (fatal) {
-		*errp = fatal;
-		return -1;
-	}
-
-	/*
-	 * we don't deal with reservation when
-	 * filesystem is mounted without reservation
-	 * or the file is not a regular file
-	 * or last attempt to allocate a block with reservation turned on failed
-	 */
-	if (my_rsv == NULL ) {
-		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
-						grp_goal, count, NULL);
-		goto out;
-	}
-	/*
-	 * grp_goal is a group relative block number (if there is a goal)
-	 * 0 <= grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
-	 * first block is a filesystem wide block number
-	 * first block is the block number of the first block in this group
-	 */
-	group_first_block = ext3_group_first_block_no(sb, group);
-	group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
-
-	/*
-	 * Basically we will allocate a new block from inode's reservation
-	 * window.
-	 *
-	 * We need to allocate a new reservation window, if:
-	 * a) inode does not have a reservation window; or
-	 * b) last attempt to allocate a block from existing reservation
-	 *    failed; or
-	 * c) we come here with a goal and with a reservation window
-	 *
-	 * We do not need to allocate a new reservation window if we come here
-	 * at the beginning with a goal and the goal is inside the window, or
-	 * we don't have a goal but already have a reservation window.
-	 * then we could go to allocate from the reservation window directly.
-	 */
-	while (1) {
-		if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
-			!goal_in_my_reservation(&my_rsv->rsv_window,
-						grp_goal, group, sb)) {
-			if (my_rsv->rsv_goal_size < *count)
-				my_rsv->rsv_goal_size = *count;
-			ret = alloc_new_reservation(my_rsv, grp_goal, sb,
-							group, bitmap_bh);
-			if (ret < 0)
-				break;			/* failed */
-
-			if (!goal_in_my_reservation(&my_rsv->rsv_window,
-							grp_goal, group, sb))
-				grp_goal = -1;
-		} else if (grp_goal >= 0) {
-			int curr = my_rsv->rsv_end -
-					(grp_goal + group_first_block) + 1;
-
-			if (curr < *count)
-				try_to_extend_reservation(my_rsv, sb,
-							*count - curr);
-		}
-
-		if ((my_rsv->rsv_start > group_last_block) ||
-				(my_rsv->rsv_end < group_first_block)) {
-			rsv_window_dump(&EXT3_SB(sb)->s_rsv_window_root, 1);
-			BUG();
-		}
-		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
-					   grp_goal, &num, &my_rsv->rsv_window);
-		if (ret >= 0) {
-			my_rsv->rsv_alloc_hit += num;
-			*count = num;
-			break;				/* succeed */
-		}
-		num = *count;
-	}
-out:
-	if (ret >= 0) {
-		BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
-					"bitmap block");
-		fatal = ext3_journal_dirty_metadata(handle, bitmap_bh);
-		if (fatal) {
-			*errp = fatal;
-			return -1;
-		}
-		return ret;
-	}
-
-	BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
-	ext3_journal_release_buffer(handle, bitmap_bh);
-	return ret;
-}
-
-/**
- * ext3_has_free_blocks()
- * @sbi:		in-core super block structure.
- *
- * Check if filesystem has at least 1 free block available for allocation.
- */
-static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
-{
-	ext3_fsblk_t free_blocks, root_blocks;
-
-	free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
-	root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
-	if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
-		!use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
-		(gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
-		 !in_group_p (sbi->s_resgid))) {
-		return 0;
-	}
-	return 1;
-}
-
-/**
- * ext3_should_retry_alloc()
- * @sb:			super block
- * @retries		number of attemps has been made
- *
- * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
- * it is profitable to retry the operation, this function will wait
- * for the current or committing transaction to complete, and then
- * return TRUE.
- *
- * if the total number of retries exceed three times, return FALSE.
- */
-int ext3_should_retry_alloc(struct super_block *sb, int *retries)
-{
-	if (!ext3_has_free_blocks(EXT3_SB(sb), 0) || (*retries)++ > 3)
-		return 0;
-
-	jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
-
-	return journal_force_commit_nested(EXT3_SB(sb)->s_journal);
-}
-
-/**
- * ext3_new_blocks() -- core block(s) allocation function
- * @handle:		handle to this transaction
- * @inode:		file inode
- * @goal:		given target block(filesystem wide)
- * @count:		target number of blocks to allocate
- * @errp:		error code
- *
- * ext3_new_blocks uses a goal block to assist allocation.  It tries to
- * allocate block(s) from the block group contains the goal block first. If that
- * fails, it will try to allocate block(s) from other block groups without
- * any specific goal block.
- *
- */
-ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, unsigned long *count, int *errp)
-{
-	struct buffer_head *bitmap_bh = NULL;
-	struct buffer_head *gdp_bh;
-	int group_no;
-	int goal_group;
-	ext3_grpblk_t grp_target_blk;	/* blockgroup relative goal block */
-	ext3_grpblk_t grp_alloc_blk;	/* blockgroup-relative allocated block*/
-	ext3_fsblk_t ret_block;		/* filesyetem-wide allocated block */
-	int bgi;			/* blockgroup iteration index */
-	int fatal = 0, err;
-	int performed_allocation = 0;
-	ext3_grpblk_t free_blocks;	/* number of free blocks in a group */
-	struct super_block *sb;
-	struct ext3_group_desc *gdp;
-	struct ext3_super_block *es;
-	struct ext3_sb_info *sbi;
-	struct ext3_reserve_window_node *my_rsv = NULL;
-	struct ext3_block_alloc_info *block_i;
-	unsigned short windowsz = 0;
-#ifdef EXT3FS_DEBUG
-	static int goal_hits, goal_attempts;
-#endif
-	unsigned long ngroups;
-	unsigned long num = *count;
-
-	*errp = -ENOSPC;
-	sb = inode->i_sb;
-
-	/*
-	 * Check quota for allocation of this block.
-	 */
-	err = dquot_alloc_block(inode, num);
-	if (err) {
-		*errp = err;
-		return 0;
-	}
-
-	trace_ext3_request_blocks(inode, goal, num);
-
-	sbi = EXT3_SB(sb);
-	es = sbi->s_es;
-	ext3_debug("goal=%lu.\n", goal);
-	/*
-	 * Allocate a block from reservation only when
-	 * filesystem is mounted with reservation(default,-o reservation), and
-	 * it's a regular file, and
-	 * the desired window size is greater than 0 (One could use ioctl
-	 * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off
-	 * reservation on that particular file)
-	 */
-	block_i = EXT3_I(inode)->i_block_alloc_info;
-	if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
-		my_rsv = &block_i->rsv_window_node;
-
-	if (!ext3_has_free_blocks(sbi, IS_NOQUOTA(inode))) {
-		*errp = -ENOSPC;
-		goto out;
-	}
-
-	/*
-	 * First, test whether the goal block is free.
-	 */
-	if (goal < le32_to_cpu(es->s_first_data_block) ||
-	    goal >= le32_to_cpu(es->s_blocks_count))
-		goal = le32_to_cpu(es->s_first_data_block);
-	group_no = (goal - le32_to_cpu(es->s_first_data_block)) /
-			EXT3_BLOCKS_PER_GROUP(sb);
-	goal_group = group_no;
-retry_alloc:
-	gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
-	if (!gdp)
-		goto io_error;
-
-	free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
-	/*
-	 * if there is not enough free blocks to make a new resevation
-	 * turn off reservation for this allocation
-	 */
-	if (my_rsv && (free_blocks < windowsz)
-		&& (free_blocks > 0)
-		&& (rsv_is_empty(&my_rsv->rsv_window)))
-		my_rsv = NULL;
-
-	if (free_blocks > 0) {
-		grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) %
-				EXT3_BLOCKS_PER_GROUP(sb));
-		bitmap_bh = read_block_bitmap(sb, group_no);
-		if (!bitmap_bh)
-			goto io_error;
-		grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
-					group_no, bitmap_bh, grp_target_blk,
-					my_rsv,	&num, &fatal);
-		if (fatal)
-			goto out;
-		if (grp_alloc_blk >= 0)
-			goto allocated;
-	}
-
-	ngroups = EXT3_SB(sb)->s_groups_count;
-	smp_rmb();
-
-	/*
-	 * Now search the rest of the groups.  We assume that
-	 * group_no and gdp correctly point to the last group visited.
-	 */
-	for (bgi = 0; bgi < ngroups; bgi++) {
-		group_no++;
-		if (group_no >= ngroups)
-			group_no = 0;
-		gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
-		if (!gdp)
-			goto io_error;
-		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
-		/*
-		 * skip this group (and avoid loading bitmap) if there
-		 * are no free blocks
-		 */
-		if (!free_blocks)
-			continue;
-		/*
-		 * skip this group if the number of
-		 * free blocks is less than half of the reservation
-		 * window size.
-		 */
-		if (my_rsv && (free_blocks <= (windowsz/2)))
-			continue;
-
-		brelse(bitmap_bh);
-		bitmap_bh = read_block_bitmap(sb, group_no);
-		if (!bitmap_bh)
-			goto io_error;
-		/*
-		 * try to allocate block(s) from this group, without a goal(-1).
-		 */
-		grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
-					group_no, bitmap_bh, -1, my_rsv,
-					&num, &fatal);
-		if (fatal)
-			goto out;
-		if (grp_alloc_blk >= 0)
-			goto allocated;
-	}
-	/*
-	 * We may end up a bogus earlier ENOSPC error due to
-	 * filesystem is "full" of reservations, but
-	 * there maybe indeed free blocks available on disk
-	 * In this case, we just forget about the reservations
-	 * just do block allocation as without reservations.
-	 */
-	if (my_rsv) {
-		my_rsv = NULL;
-		windowsz = 0;
-		group_no = goal_group;
-		goto retry_alloc;
-	}
-	/* No space left on the device */
-	*errp = -ENOSPC;
-	goto out;
-
-allocated:
-
-	ext3_debug("using block group %d(%d)\n",
-			group_no, gdp->bg_free_blocks_count);
-
-	BUFFER_TRACE(gdp_bh, "get_write_access");
-	fatal = ext3_journal_get_write_access(handle, gdp_bh);
-	if (fatal)
-		goto out;
-
-	ret_block = grp_alloc_blk + ext3_group_first_block_no(sb, group_no);
-
-	if (in_range(le32_to_cpu(gdp->bg_block_bitmap), ret_block, num) ||
-	    in_range(le32_to_cpu(gdp->bg_inode_bitmap), ret_block, num) ||
-	    in_range(ret_block, le32_to_cpu(gdp->bg_inode_table),
-		      EXT3_SB(sb)->s_itb_per_group) ||
-	    in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
-		      EXT3_SB(sb)->s_itb_per_group)) {
-		ext3_error(sb, "ext3_new_block",
-			    "Allocating block in system zone - "
-			    "blocks from "E3FSBLK", length %lu",
-			     ret_block, num);
-		/*
-		 * claim_block() marked the blocks we allocated as in use. So we
-		 * may want to selectively mark some of the blocks as free.
-		 */
-		goto retry_alloc;
-	}
-
-	performed_allocation = 1;
-
-#ifdef CONFIG_JBD_DEBUG
-	{
-		struct buffer_head *debug_bh;
-
-		/* Record bitmap buffer state in the newly allocated block */
-		debug_bh = sb_find_get_block(sb, ret_block);
-		if (debug_bh) {
-			BUFFER_TRACE(debug_bh, "state when allocated");
-			BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
-			brelse(debug_bh);
-		}
-	}
-	jbd_lock_bh_state(bitmap_bh);
-	spin_lock(sb_bgl_lock(sbi, group_no));
-	if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
-		int i;
-
-		for (i = 0; i < num; i++) {
-			if (ext3_test_bit(grp_alloc_blk+i,
-					bh2jh(bitmap_bh)->b_committed_data)) {
-				printk("%s: block was unexpectedly set in "
-					"b_committed_data\n", __func__);
-			}
-		}
-	}
-	ext3_debug("found bit %d\n", grp_alloc_blk);
-	spin_unlock(sb_bgl_lock(sbi, group_no));
-	jbd_unlock_bh_state(bitmap_bh);
-#endif
-
-	if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
-		ext3_error(sb, "ext3_new_block",
-			    "block("E3FSBLK") >= blocks count(%d) - "
-			    "block_group = %d, es == %p ", ret_block,
-			le32_to_cpu(es->s_blocks_count), group_no, es);
-		goto out;
-	}
-
-	/*
-	 * It is up to the caller to add the new buffer to a journal
-	 * list of some description.  We don't know in advance whether
-	 * the caller wants to use it as metadata or data.
-	 */
-	ext3_debug("allocating block %lu. Goal hits %d of %d.\n",
-			ret_block, goal_hits, goal_attempts);
-
-	spin_lock(sb_bgl_lock(sbi, group_no));
-	le16_add_cpu(&gdp->bg_free_blocks_count, -num);
-	spin_unlock(sb_bgl_lock(sbi, group_no));
-	percpu_counter_sub(&sbi->s_freeblocks_counter, num);
-
-	BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
-	fatal = ext3_journal_dirty_metadata(handle, gdp_bh);
-	if (fatal)
-		goto out;
-
-	*errp = 0;
-	brelse(bitmap_bh);
-
-	if (num < *count) {
-		dquot_free_block(inode, *count-num);
-		*count = num;
-	}
-
-	trace_ext3_allocate_blocks(inode, goal, num,
-				   (unsigned long long)ret_block);
-
-	return ret_block;
-
-io_error:
-	*errp = -EIO;
-out:
-	if (fatal) {
-		*errp = fatal;
-		ext3_std_error(sb, fatal);
-	}
-	/*
-	 * Undo the block allocation
-	 */
-	if (!performed_allocation)
-		dquot_free_block(inode, *count);
-	brelse(bitmap_bh);
-	return 0;
-}
-
-ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, int *errp)
-{
-	unsigned long count = 1;
-
-	return ext3_new_blocks(handle, inode, goal, &count, errp);
-}
-
-/**
- * ext3_count_free_blocks() -- count filesystem free blocks
- * @sb:		superblock
- *
- * Adds up the number of free blocks from each block group.
- */
-ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb)
-{
-	ext3_fsblk_t desc_count;
-	struct ext3_group_desc *gdp;
-	int i;
-	unsigned long ngroups = EXT3_SB(sb)->s_groups_count;
-#ifdef EXT3FS_DEBUG
-	struct ext3_super_block *es;
-	ext3_fsblk_t bitmap_count;
-	unsigned long x;
-	struct buffer_head *bitmap_bh = NULL;
-
-	es = EXT3_SB(sb)->s_es;
-	desc_count = 0;
-	bitmap_count = 0;
-	gdp = NULL;
-
-	smp_rmb();
-	for (i = 0; i < ngroups; i++) {
-		gdp = ext3_get_group_desc(sb, i, NULL);
-		if (!gdp)
-			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
-		brelse(bitmap_bh);
-		bitmap_bh = read_block_bitmap(sb, i);
-		if (bitmap_bh == NULL)
-			continue;
-
-		x = ext3_count_free(bitmap_bh, sb->s_blocksize);
-		printk("group %d: stored = %d, counted = %lu\n",
-			i, le16_to_cpu(gdp->bg_free_blocks_count), x);
-		bitmap_count += x;
-	}
-	brelse(bitmap_bh);
-	printk("ext3_count_free_blocks: stored = "E3FSBLK
-		", computed = "E3FSBLK", "E3FSBLK"\n",
-	       (ext3_fsblk_t)le32_to_cpu(es->s_free_blocks_count),
-		desc_count, bitmap_count);
-	return bitmap_count;
-#else
-	desc_count = 0;
-	smp_rmb();
-	for (i = 0; i < ngroups; i++) {
-		gdp = ext3_get_group_desc(sb, i, NULL);
-		if (!gdp)
-			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
-	}
-
-	return desc_count;
-#endif
-}
-
-static inline int test_root(int a, int b)
-{
-	int num = b;
-
-	while (a > num)
-		num *= b;
-	return num == a;
-}
-
-static int ext3_group_sparse(int group)
-{
-	if (group <= 1)
-		return 1;
-	if (!(group & 1))
-		return 0;
-	return (test_root(group, 7) || test_root(group, 5) ||
-		test_root(group, 3));
-}
-
-/**
- *	ext3_bg_has_super - number of blocks used by the superblock in group
- *	@sb: superblock for filesystem
- *	@group: group number to check
- *
- *	Return the number of blocks used by the superblock (primary or backup)
- *	in this group.  Currently this will be only 0 or 1.
- */
-int ext3_bg_has_super(struct super_block *sb, int group)
-{
-	if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
-				EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
-			!ext3_group_sparse(group))
-		return 0;
-	return 1;
-}
-
-static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group)
-{
-	unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
-	unsigned long first = metagroup * EXT3_DESC_PER_BLOCK(sb);
-	unsigned long last = first + EXT3_DESC_PER_BLOCK(sb) - 1;
-
-	if (group == first || group == first + 1 || group == last)
-		return 1;
-	return 0;
-}
-
-static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group)
-{
-	return ext3_bg_has_super(sb, group) ? EXT3_SB(sb)->s_gdb_count : 0;
-}
-
-/**
- *	ext3_bg_num_gdb - number of blocks used by the group table in group
- *	@sb: superblock for filesystem
- *	@group: group number to check
- *
- *	Return the number of blocks used by the group descriptor table
- *	(primary or backup) in this group.  In the future there may be a
- *	different number of descriptor blocks in each group.
- */
-unsigned long ext3_bg_num_gdb(struct super_block *sb, int group)
-{
-	unsigned long first_meta_bg =
-			le32_to_cpu(EXT3_SB(sb)->s_es->s_first_meta_bg);
-	unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
-
-	if (!EXT3_HAS_INCOMPAT_FEATURE(sb,EXT3_FEATURE_INCOMPAT_META_BG) ||
-			metagroup < first_meta_bg)
-		return ext3_bg_num_gdb_nometa(sb,group);
-
-	return ext3_bg_num_gdb_meta(sb,group);
-
-}
-
-/**
- * ext3_trim_all_free -- function to trim all free space in alloc. group
- * @sb:			super block for file system
- * @group:		allocation group to trim
- * @start:		first group block to examine
- * @max:		last group block to examine
- * @gdp:		allocation group description structure
- * @minblocks:		minimum extent block count
- *
- * ext3_trim_all_free walks through group's block bitmap searching for free
- * blocks. When the free block is found, it tries to allocate this block and
- * consequent free block to get the biggest free extent possible, until it
- * reaches any used block. Then issue a TRIM command on this extent and free
- * the extent in the block bitmap. This is done until whole group is scanned.
- */
-static ext3_grpblk_t ext3_trim_all_free(struct super_block *sb,
-					unsigned int group,
-					ext3_grpblk_t start, ext3_grpblk_t max,
-					ext3_grpblk_t minblocks)
-{
-	handle_t *handle;
-	ext3_grpblk_t next, free_blocks, bit, freed, count = 0;
-	ext3_fsblk_t discard_block;
-	struct ext3_sb_info *sbi;
-	struct buffer_head *gdp_bh, *bitmap_bh = NULL;
-	struct ext3_group_desc *gdp;
-	int err = 0, ret = 0;
-
-	/*
-	 * We will update one block bitmap, and one group descriptor
-	 */
-	handle = ext3_journal_start_sb(sb, 2);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	bitmap_bh = read_block_bitmap(sb, group);
-	if (!bitmap_bh) {
-		err = -EIO;
-		goto err_out;
-	}
-
-	BUFFER_TRACE(bitmap_bh, "getting undo access");
-	err = ext3_journal_get_undo_access(handle, bitmap_bh);
-	if (err)
-		goto err_out;
-
-	gdp = ext3_get_group_desc(sb, group, &gdp_bh);
-	if (!gdp) {
-		err = -EIO;
-		goto err_out;
-	}
-
-	BUFFER_TRACE(gdp_bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, gdp_bh);
-	if (err)
-		goto err_out;
-
-	free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
-	sbi = EXT3_SB(sb);
-
-	 /* Walk through the whole group */
-	while (start <= max) {
-		start = bitmap_search_next_usable_block(start, bitmap_bh, max);
-		if (start < 0)
-			break;
-		next = start;
-
-		/*
-		 * Allocate contiguous free extents by setting bits in the
-		 * block bitmap
-		 */
-		while (next <= max
-			&& claim_block(sb_bgl_lock(sbi, group),
-					next, bitmap_bh)) {
-			next++;
-		}
-
-		 /* We did not claim any blocks */
-		if (next == start)
-			continue;
-
-		discard_block = (ext3_fsblk_t)start +
-				ext3_group_first_block_no(sb, group);
-
-		/* Update counters */
-		spin_lock(sb_bgl_lock(sbi, group));
-		le16_add_cpu(&gdp->bg_free_blocks_count, start - next);
-		spin_unlock(sb_bgl_lock(sbi, group));
-		percpu_counter_sub(&sbi->s_freeblocks_counter, next - start);
-
-		free_blocks -= next - start;
-		/* Do not issue a TRIM on extents smaller than minblocks */
-		if ((next - start) < minblocks)
-			goto free_extent;
-
-		trace_ext3_discard_blocks(sb, discard_block, next - start);
-		 /* Send the TRIM command down to the device */
-		err = sb_issue_discard(sb, discard_block, next - start,
-				       GFP_NOFS, 0);
-		count += (next - start);
-free_extent:
-		freed = 0;
-
-		/*
-		 * Clear bits in the bitmap
-		 */
-		for (bit = start; bit < next; bit++) {
-			BUFFER_TRACE(bitmap_bh, "clear bit");
-			if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, group),
-						bit, bitmap_bh->b_data)) {
-				ext3_error(sb, __func__,
-					"bit already cleared for block "E3FSBLK,
-					 (unsigned long)bit);
-				BUFFER_TRACE(bitmap_bh, "bit already cleared");
-			} else {
-				freed++;
-			}
-		}
-
-		/* Update couters */
-		spin_lock(sb_bgl_lock(sbi, group));
-		le16_add_cpu(&gdp->bg_free_blocks_count, freed);
-		spin_unlock(sb_bgl_lock(sbi, group));
-		percpu_counter_add(&sbi->s_freeblocks_counter, freed);
-
-		start = next;
-		if (err < 0) {
-			if (err != -EOPNOTSUPP)
-				ext3_warning(sb, __func__, "Discard command "
-					     "returned error %d\n", err);
-			break;
-		}
-
-		if (fatal_signal_pending(current)) {
-			err = -ERESTARTSYS;
-			break;
-		}
-
-		cond_resched();
-
-		/* No more suitable extents */
-		if (free_blocks < minblocks)
-			break;
-	}
-
-	/* We dirtied the bitmap block */
-	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-	ret = ext3_journal_dirty_metadata(handle, bitmap_bh);
-	if (!err)
-		err = ret;
-
-	/* And the group descriptor block */
-	BUFFER_TRACE(gdp_bh, "dirtied group descriptor block");
-	ret = ext3_journal_dirty_metadata(handle, gdp_bh);
-	if (!err)
-		err = ret;
-
-	ext3_debug("trimmed %d blocks in the group %d\n",
-		count, group);
-
-err_out:
-	if (err)
-		count = err;
-	ext3_journal_stop(handle);
-	brelse(bitmap_bh);
-
-	return count;
-}
-
-/**
- * ext3_trim_fs() -- trim ioctl handle function
- * @sb:			superblock for filesystem
- * @start:		First Byte to trim
- * @len:		number of Bytes to trim from start
- * @minlen:		minimum extent length in Bytes
- *
- * ext3_trim_fs goes through all allocation groups containing Bytes from
- * start to start+len. For each such a group ext3_trim_all_free function
- * is invoked to trim all free space.
- */
-int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
-{
-	ext3_grpblk_t last_block, first_block;
-	unsigned long group, first_group, last_group;
-	struct ext3_group_desc *gdp;
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-	uint64_t start, minlen, end, trimmed = 0;
-	ext3_fsblk_t first_data_blk =
-			le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
-	ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count);
-	int ret = 0;
-
-	start = range->start >> sb->s_blocksize_bits;
-	end = start + (range->len >> sb->s_blocksize_bits) - 1;
-	minlen = range->minlen >> sb->s_blocksize_bits;
-
-	if (minlen > EXT3_BLOCKS_PER_GROUP(sb) ||
-	    start >= max_blks ||
-	    range->len < sb->s_blocksize)
-		return -EINVAL;
-	if (end >= max_blks)
-		end = max_blks - 1;
-	if (end <= first_data_blk)
-		goto out;
-	if (start < first_data_blk)
-		start = first_data_blk;
-
-	smp_rmb();
-
-	/* Determine first and last group to examine based on start and len */
-	ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) start,
-				     &first_group, &first_block);
-	ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) end,
-				     &last_group, &last_block);
-
-	/* end now represents the last block to discard in this group */
-	end = EXT3_BLOCKS_PER_GROUP(sb) - 1;
-
-	for (group = first_group; group <= last_group; group++) {
-		gdp = ext3_get_group_desc(sb, group, NULL);
-		if (!gdp)
-			break;
-
-		/*
-		 * For all the groups except the last one, last block will
-		 * always be EXT3_BLOCKS_PER_GROUP(sb)-1, so we only need to
-		 * change it for the last group, note that last_block is
-		 * already computed earlier by ext3_get_group_no_and_offset()
-		 */
-		if (group == last_group)
-			end = last_block;
-
-		if (le16_to_cpu(gdp->bg_free_blocks_count) >= minlen) {
-			ret = ext3_trim_all_free(sb, group, first_block,
-						 end, minlen);
-			if (ret < 0)
-				break;
-			trimmed += ret;
-		}
-
-		/*
-		 * For every group except the first one, we are sure
-		 * that the first block to discard will be block #0.
-		 */
-		first_block = 0;
-	}
-
-	if (ret > 0)
-		ret = 0;
-
-out:
-	range->len = trimmed * sb->s_blocksize;
-	return ret;
-}
diff --git a/fs/ext3/bitmap.c b/fs/ext3/bitmap.c
deleted file mode 100644
index ef9c643..0000000
--- a/fs/ext3/bitmap.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  linux/fs/ext3/bitmap.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- */
-
-#include "ext3.h"
-
-#ifdef EXT3FS_DEBUG
-
-unsigned long ext3_count_free (struct buffer_head * map, unsigned int numchars)
-{
-	return numchars * BITS_PER_BYTE - memweight(map->b_data, numchars);
-}
-
-#endif  /*  EXT3FS_DEBUG  */
-
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
deleted file mode 100644
index 17742ee..0000000
--- a/fs/ext3/dir.c
+++ /dev/null
@@ -1,537 +0,0 @@
-/*
- *  linux/fs/ext3/dir.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/dir.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  ext3 directory handling functions
- *
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- *
- * Hash Tree Directory indexing (c) 2001  Daniel Phillips
- *
- */
-
-#include <linux/compat.h>
-#include "ext3.h"
-
-static unsigned char ext3_filetype_table[] = {
-	DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
-};
-
-static int ext3_dx_readdir(struct file *, struct dir_context *);
-
-static unsigned char get_dtype(struct super_block *sb, int filetype)
-{
-	if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE) ||
-	    (filetype >= EXT3_FT_MAX))
-		return DT_UNKNOWN;
-
-	return (ext3_filetype_table[filetype]);
-}
-
-/**
- * Check if the given dir-inode refers to an htree-indexed directory
- * (or a directory which could potentially get converted to use htree
- * indexing).
- *
- * Return 1 if it is a dx dir, 0 if not
- */
-static int is_dx_dir(struct inode *inode)
-{
-	struct super_block *sb = inode->i_sb;
-
-	if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
-		     EXT3_FEATURE_COMPAT_DIR_INDEX) &&
-	    ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
-	     ((inode->i_size >> sb->s_blocksize_bits) == 1)))
-		return 1;
-
-	return 0;
-}
-
-int ext3_check_dir_entry (const char * function, struct inode * dir,
-			  struct ext3_dir_entry_2 * de,
-			  struct buffer_head * bh,
-			  unsigned long offset)
-{
-	const char * error_msg = NULL;
-	const int rlen = ext3_rec_len_from_disk(de->rec_len);
-
-	if (unlikely(rlen < EXT3_DIR_REC_LEN(1)))
-		error_msg = "rec_len is smaller than minimal";
-	else if (unlikely(rlen % 4 != 0))
-		error_msg = "rec_len % 4 != 0";
-	else if (unlikely(rlen < EXT3_DIR_REC_LEN(de->name_len)))
-		error_msg = "rec_len is too small for name_len";
-	else if (unlikely((((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)))
-		error_msg = "directory entry across blocks";
-	else if (unlikely(le32_to_cpu(de->inode) >
-			le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)))
-		error_msg = "inode out of bounds";
-
-	if (unlikely(error_msg != NULL))
-		ext3_error (dir->i_sb, function,
-			"bad entry in directory #%lu: %s - "
-			"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
-			dir->i_ino, error_msg, offset,
-			(unsigned long) le32_to_cpu(de->inode),
-			rlen, de->name_len);
-
-	return error_msg == NULL ? 1 : 0;
-}
-
-static int ext3_readdir(struct file *file, struct dir_context *ctx)
-{
-	unsigned long offset;
-	int i;
-	struct ext3_dir_entry_2 *de;
-	int err;
-	struct inode *inode = file_inode(file);
-	struct super_block *sb = inode->i_sb;
-	int dir_has_error = 0;
-
-	if (is_dx_dir(inode)) {
-		err = ext3_dx_readdir(file, ctx);
-		if (err != ERR_BAD_DX_DIR)
-			return err;
-		/*
-		 * We don't set the inode dirty flag since it's not
-		 * critical that it get flushed back to the disk.
-		 */
-		EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
-	}
-	offset = ctx->pos & (sb->s_blocksize - 1);
-
-	while (ctx->pos < inode->i_size) {
-		unsigned long blk = ctx->pos >> EXT3_BLOCK_SIZE_BITS(sb);
-		struct buffer_head map_bh;
-		struct buffer_head *bh = NULL;
-
-		map_bh.b_state = 0;
-		err = ext3_get_blocks_handle(NULL, inode, blk, 1, &map_bh, 0);
-		if (err > 0) {
-			pgoff_t index = map_bh.b_blocknr >>
-					(PAGE_CACHE_SHIFT - inode->i_blkbits);
-			if (!ra_has_index(&file->f_ra, index))
-				page_cache_sync_readahead(
-					sb->s_bdev->bd_inode->i_mapping,
-					&file->f_ra, file,
-					index, 1);
-			file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
-			bh = ext3_bread(NULL, inode, blk, 0, &err);
-		}
-
-		/*
-		 * We ignore I/O errors on directories so users have a chance
-		 * of recovering data when there's a bad sector
-		 */
-		if (!bh) {
-			if (!dir_has_error) {
-				ext3_error(sb, __func__, "directory #%lu "
-					"contains a hole at offset %lld",
-					inode->i_ino, ctx->pos);
-				dir_has_error = 1;
-			}
-			/* corrupt size?  Maybe no more blocks to read */
-			if (ctx->pos > inode->i_blocks << 9)
-				break;
-			ctx->pos += sb->s_blocksize - offset;
-			continue;
-		}
-
-		/* If the dir block has changed since the last call to
-		 * readdir(2), then we might be pointing to an invalid
-		 * dirent right now.  Scan from the start of the block
-		 * to make sure. */
-		if (offset && file->f_version != inode->i_version) {
-			for (i = 0; i < sb->s_blocksize && i < offset; ) {
-				de = (struct ext3_dir_entry_2 *)
-					(bh->b_data + i);
-				/* It's too expensive to do a full
-				 * dirent test each time round this
-				 * loop, but we do have to test at
-				 * least that it is non-zero.  A
-				 * failure will be detected in the
-				 * dirent test below. */
-				if (ext3_rec_len_from_disk(de->rec_len) <
-						EXT3_DIR_REC_LEN(1))
-					break;
-				i += ext3_rec_len_from_disk(de->rec_len);
-			}
-			offset = i;
-			ctx->pos = (ctx->pos & ~(sb->s_blocksize - 1))
-				| offset;
-			file->f_version = inode->i_version;
-		}
-
-		while (ctx->pos < inode->i_size
-		       && offset < sb->s_blocksize) {
-			de = (struct ext3_dir_entry_2 *) (bh->b_data + offset);
-			if (!ext3_check_dir_entry ("ext3_readdir", inode, de,
-						   bh, offset)) {
-				/* On error, skip the to the
-                                   next block. */
-				ctx->pos = (ctx->pos |
-						(sb->s_blocksize - 1)) + 1;
-				break;
-			}
-			offset += ext3_rec_len_from_disk(de->rec_len);
-			if (le32_to_cpu(de->inode)) {
-				if (!dir_emit(ctx, de->name, de->name_len,
-					      le32_to_cpu(de->inode),
-					      get_dtype(sb, de->file_type))) {
-					brelse(bh);
-					return 0;
-				}
-			}
-			ctx->pos += ext3_rec_len_from_disk(de->rec_len);
-		}
-		offset = 0;
-		brelse (bh);
-		if (ctx->pos < inode->i_size)
-			if (!dir_relax(inode))
-				return 0;
-	}
-	return 0;
-}
-
-static inline int is_32bit_api(void)
-{
-#ifdef CONFIG_COMPAT
-	return is_compat_task();
-#else
-	return (BITS_PER_LONG == 32);
-#endif
-}
-
-/*
- * These functions convert from the major/minor hash to an f_pos
- * value for dx directories
- *
- * Upper layer (for example NFS) should specify FMODE_32BITHASH or
- * FMODE_64BITHASH explicitly. On the other hand, we allow ext3 to be mounted
- * directly on both 32-bit and 64-bit nodes, under such case, neither
- * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
- */
-static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
-{
-	if ((filp->f_mode & FMODE_32BITHASH) ||
-	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
-		return major >> 1;
-	else
-		return ((__u64)(major >> 1) << 32) | (__u64)minor;
-}
-
-static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
-{
-	if ((filp->f_mode & FMODE_32BITHASH) ||
-	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
-		return (pos << 1) & 0xffffffff;
-	else
-		return ((pos >> 32) << 1) & 0xffffffff;
-}
-
-static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
-{
-	if ((filp->f_mode & FMODE_32BITHASH) ||
-	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
-		return 0;
-	else
-		return pos & 0xffffffff;
-}
-
-/*
- * Return 32- or 64-bit end-of-file for dx directories
- */
-static inline loff_t ext3_get_htree_eof(struct file *filp)
-{
-	if ((filp->f_mode & FMODE_32BITHASH) ||
-	    (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
-		return EXT3_HTREE_EOF_32BIT;
-	else
-		return EXT3_HTREE_EOF_64BIT;
-}
-
-
-/*
- * ext3_dir_llseek() calls generic_file_llseek[_size]() to handle both
- * non-htree and htree directories, where the "offset" is in terms
- * of the filename hash value instead of the byte offset.
- *
- * Because we may return a 64-bit hash that is well beyond s_maxbytes,
- * we need to pass the max hash as the maximum allowable offset in
- * the htree directory case.
- *
- * NOTE: offsets obtained *before* ext3_set_inode_flag(dir, EXT3_INODE_INDEX)
- *       will be invalid once the directory was converted into a dx directory
- */
-static loff_t ext3_dir_llseek(struct file *file, loff_t offset, int whence)
-{
-	struct inode *inode = file->f_mapping->host;
-	int dx_dir = is_dx_dir(inode);
-	loff_t htree_max = ext3_get_htree_eof(file);
-
-	if (likely(dx_dir))
-		return generic_file_llseek_size(file, offset, whence,
-					        htree_max, htree_max);
-	else
-		return generic_file_llseek(file, offset, whence);
-}
-
-/*
- * This structure holds the nodes of the red-black tree used to store
- * the directory entry in hash order.
- */
-struct fname {
-	__u32		hash;
-	__u32		minor_hash;
-	struct rb_node	rb_hash;
-	struct fname	*next;
-	__u32		inode;
-	__u8		name_len;
-	__u8		file_type;
-	char		name[0];
-};
-
-/*
- * This functoin implements a non-recursive way of freeing all of the
- * nodes in the red-black tree.
- */
-static void free_rb_tree_fname(struct rb_root *root)
-{
-	struct fname *fname, *next;
-
-	rbtree_postorder_for_each_entry_safe(fname, next, root, rb_hash)
-		do {
-			struct fname *old = fname;
-			fname = fname->next;
-			kfree(old);
-		} while (fname);
-
-	*root = RB_ROOT;
-}
-
-static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp,
-							   loff_t pos)
-{
-	struct dir_private_info *p;
-
-	p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
-	if (!p)
-		return NULL;
-	p->curr_hash = pos2maj_hash(filp, pos);
-	p->curr_minor_hash = pos2min_hash(filp, pos);
-	return p;
-}
-
-void ext3_htree_free_dir_info(struct dir_private_info *p)
-{
-	free_rb_tree_fname(&p->root);
-	kfree(p);
-}
-
-/*
- * Given a directory entry, enter it into the fname rb tree.
- */
-int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
-			     __u32 minor_hash,
-			     struct ext3_dir_entry_2 *dirent)
-{
-	struct rb_node **p, *parent = NULL;
-	struct fname * fname, *new_fn;
-	struct dir_private_info *info;
-	int len;
-
-	info = (struct dir_private_info *) dir_file->private_data;
-	p = &info->root.rb_node;
-
-	/* Create and allocate the fname structure */
-	len = sizeof(struct fname) + dirent->name_len + 1;
-	new_fn = kzalloc(len, GFP_KERNEL);
-	if (!new_fn)
-		return -ENOMEM;
-	new_fn->hash = hash;
-	new_fn->minor_hash = minor_hash;
-	new_fn->inode = le32_to_cpu(dirent->inode);
-	new_fn->name_len = dirent->name_len;
-	new_fn->file_type = dirent->file_type;
-	memcpy(new_fn->name, dirent->name, dirent->name_len);
-	new_fn->name[dirent->name_len] = 0;
-
-	while (*p) {
-		parent = *p;
-		fname = rb_entry(parent, struct fname, rb_hash);
-
-		/*
-		 * If the hash and minor hash match up, then we put
-		 * them on a linked list.  This rarely happens...
-		 */
-		if ((new_fn->hash == fname->hash) &&
-		    (new_fn->minor_hash == fname->minor_hash)) {
-			new_fn->next = fname->next;
-			fname->next = new_fn;
-			return 0;
-		}
-
-		if (new_fn->hash < fname->hash)
-			p = &(*p)->rb_left;
-		else if (new_fn->hash > fname->hash)
-			p = &(*p)->rb_right;
-		else if (new_fn->minor_hash < fname->minor_hash)
-			p = &(*p)->rb_left;
-		else /* if (new_fn->minor_hash > fname->minor_hash) */
-			p = &(*p)->rb_right;
-	}
-
-	rb_link_node(&new_fn->rb_hash, parent, p);
-	rb_insert_color(&new_fn->rb_hash, &info->root);
-	return 0;
-}
-
-
-
-/*
- * This is a helper function for ext3_dx_readdir.  It calls filldir
- * for all entres on the fname linked list.  (Normally there is only
- * one entry on the linked list, unless there are 62 bit hash collisions.)
- */
-static bool call_filldir(struct file *file, struct dir_context *ctx,
-			struct fname *fname)
-{
-	struct dir_private_info *info = file->private_data;
-	struct inode *inode = file_inode(file);
-	struct super_block *sb = inode->i_sb;
-
-	if (!fname) {
-		printk("call_filldir: called with null fname?!?\n");
-		return true;
-	}
-	ctx->pos = hash2pos(file, fname->hash, fname->minor_hash);
-	while (fname) {
-		if (!dir_emit(ctx, fname->name, fname->name_len,
-				fname->inode,
-				get_dtype(sb, fname->file_type))) {
-			info->extra_fname = fname;
-			return false;
-		}
-		fname = fname->next;
-	}
-	return true;
-}
-
-static int ext3_dx_readdir(struct file *file, struct dir_context *ctx)
-{
-	struct dir_private_info *info = file->private_data;
-	struct inode *inode = file_inode(file);
-	struct fname *fname;
-	int	ret;
-
-	if (!info) {
-		info = ext3_htree_create_dir_info(file, ctx->pos);
-		if (!info)
-			return -ENOMEM;
-		file->private_data = info;
-	}
-
-	if (ctx->pos == ext3_get_htree_eof(file))
-		return 0;	/* EOF */
-
-	/* Some one has messed with f_pos; reset the world */
-	if (info->last_pos != ctx->pos) {
-		free_rb_tree_fname(&info->root);
-		info->curr_node = NULL;
-		info->extra_fname = NULL;
-		info->curr_hash = pos2maj_hash(file, ctx->pos);
-		info->curr_minor_hash = pos2min_hash(file, ctx->pos);
-	}
-
-	/*
-	 * If there are any leftover names on the hash collision
-	 * chain, return them first.
-	 */
-	if (info->extra_fname) {
-		if (!call_filldir(file, ctx, info->extra_fname))
-			goto finished;
-		info->extra_fname = NULL;
-		goto next_node;
-	} else if (!info->curr_node)
-		info->curr_node = rb_first(&info->root);
-
-	while (1) {
-		/*
-		 * Fill the rbtree if we have no more entries,
-		 * or the inode has changed since we last read in the
-		 * cached entries.
-		 */
-		if ((!info->curr_node) ||
-		    (file->f_version != inode->i_version)) {
-			info->curr_node = NULL;
-			free_rb_tree_fname(&info->root);
-			file->f_version = inode->i_version;
-			ret = ext3_htree_fill_tree(file, info->curr_hash,
-						   info->curr_minor_hash,
-						   &info->next_hash);
-			if (ret < 0)
-				return ret;
-			if (ret == 0) {
-				ctx->pos = ext3_get_htree_eof(file);
-				break;
-			}
-			info->curr_node = rb_first(&info->root);
-		}
-
-		fname = rb_entry(info->curr_node, struct fname, rb_hash);
-		info->curr_hash = fname->hash;
-		info->curr_minor_hash = fname->minor_hash;
-		if (!call_filldir(file, ctx, fname))
-			break;
-	next_node:
-		info->curr_node = rb_next(info->curr_node);
-		if (info->curr_node) {
-			fname = rb_entry(info->curr_node, struct fname,
-					 rb_hash);
-			info->curr_hash = fname->hash;
-			info->curr_minor_hash = fname->minor_hash;
-		} else {
-			if (info->next_hash == ~0) {
-				ctx->pos = ext3_get_htree_eof(file);
-				break;
-			}
-			info->curr_hash = info->next_hash;
-			info->curr_minor_hash = 0;
-		}
-	}
-finished:
-	info->last_pos = ctx->pos;
-	return 0;
-}
-
-static int ext3_release_dir (struct inode * inode, struct file * filp)
-{
-       if (filp->private_data)
-		ext3_htree_free_dir_info(filp->private_data);
-
-	return 0;
-}
-
-const struct file_operations ext3_dir_operations = {
-	.llseek		= ext3_dir_llseek,
-	.read		= generic_read_dir,
-	.iterate	= ext3_readdir,
-	.unlocked_ioctl = ext3_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl	= ext3_compat_ioctl,
-#endif
-	.fsync		= ext3_sync_file,
-	.release	= ext3_release_dir,
-};
diff --git a/fs/ext3/ext3.h b/fs/ext3/ext3.h
deleted file mode 100644
index f483a80..0000000
--- a/fs/ext3/ext3.h
+++ /dev/null
@@ -1,1332 +0,0 @@
-/*
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
- *
- * Copyright 1998--1999 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/include/linux/minix_fs.h
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- */
-
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/magic.h>
-#include <linux/bug.h>
-#include <linux/blockgroup_lock.h>
-
-/*
- * The second extended filesystem constants/structures
- */
-
-/*
- * Define EXT3FS_DEBUG to produce debug messages
- */
-#undef EXT3FS_DEBUG
-
-/*
- * Define EXT3_RESERVATION to reserve data blocks for expanding files
- */
-#define EXT3_DEFAULT_RESERVE_BLOCKS     8
-/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
-#define EXT3_MAX_RESERVE_BLOCKS         1027
-#define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0
-
-/*
- * Debug code
- */
-#ifdef EXT3FS_DEBUG
-#define ext3_debug(f, a...)						\
-	do {								\
-		printk (KERN_DEBUG "EXT3-fs DEBUG (%s, %d): %s:",	\
-			__FILE__, __LINE__, __func__);		\
-		printk (KERN_DEBUG f, ## a);				\
-	} while (0)
-#else
-#define ext3_debug(f, a...)	do {} while (0)
-#endif
-
-/*
- * Special inodes numbers
- */
-#define	EXT3_BAD_INO		 1	/* Bad blocks inode */
-#define EXT3_ROOT_INO		 2	/* Root inode */
-#define EXT3_BOOT_LOADER_INO	 5	/* Boot loader inode */
-#define EXT3_UNDEL_DIR_INO	 6	/* Undelete directory inode */
-#define EXT3_RESIZE_INO		 7	/* Reserved group descriptors inode */
-#define EXT3_JOURNAL_INO	 8	/* Journal inode */
-
-/* First non-reserved inode for old ext3 filesystems */
-#define EXT3_GOOD_OLD_FIRST_INO	11
-
-/*
- * Maximal count of links to a file
- */
-#define EXT3_LINK_MAX		32000
-
-/*
- * Macro-instructions used to manage several block sizes
- */
-#define EXT3_MIN_BLOCK_SIZE		1024
-#define	EXT3_MAX_BLOCK_SIZE		65536
-#define EXT3_MIN_BLOCK_LOG_SIZE		10
-#define EXT3_BLOCK_SIZE(s)		((s)->s_blocksize)
-#define	EXT3_ADDR_PER_BLOCK(s)		(EXT3_BLOCK_SIZE(s) / sizeof (__u32))
-#define EXT3_BLOCK_SIZE_BITS(s)	((s)->s_blocksize_bits)
-#define	EXT3_ADDR_PER_BLOCK_BITS(s)	(EXT3_SB(s)->s_addr_per_block_bits)
-#define EXT3_INODE_SIZE(s)		(EXT3_SB(s)->s_inode_size)
-#define EXT3_FIRST_INO(s)		(EXT3_SB(s)->s_first_ino)
-
-/*
- * Macro-instructions used to manage fragments
- */
-#define EXT3_MIN_FRAG_SIZE		1024
-#define	EXT3_MAX_FRAG_SIZE		4096
-#define EXT3_MIN_FRAG_LOG_SIZE		  10
-#define EXT3_FRAG_SIZE(s)		(EXT3_SB(s)->s_frag_size)
-#define EXT3_FRAGS_PER_BLOCK(s)		(EXT3_SB(s)->s_frags_per_block)
-
-/*
- * Structure of a blocks group descriptor
- */
-struct ext3_group_desc
-{
-	__le32	bg_block_bitmap;		/* Blocks bitmap block */
-	__le32	bg_inode_bitmap;		/* Inodes bitmap block */
-	__le32	bg_inode_table;		/* Inodes table block */
-	__le16	bg_free_blocks_count;	/* Free blocks count */
-	__le16	bg_free_inodes_count;	/* Free inodes count */
-	__le16	bg_used_dirs_count;	/* Directories count */
-	__u16	bg_pad;
-	__le32	bg_reserved[3];
-};
-
-/*
- * Macro-instructions used to manage group descriptors
- */
-#define EXT3_BLOCKS_PER_GROUP(s)	(EXT3_SB(s)->s_blocks_per_group)
-#define EXT3_DESC_PER_BLOCK(s)		(EXT3_SB(s)->s_desc_per_block)
-#define EXT3_INODES_PER_GROUP(s)	(EXT3_SB(s)->s_inodes_per_group)
-#define EXT3_DESC_PER_BLOCK_BITS(s)	(EXT3_SB(s)->s_desc_per_block_bits)
-
-/*
- * Constants relative to the data blocks
- */
-#define	EXT3_NDIR_BLOCKS		12
-#define	EXT3_IND_BLOCK			EXT3_NDIR_BLOCKS
-#define	EXT3_DIND_BLOCK			(EXT3_IND_BLOCK + 1)
-#define	EXT3_TIND_BLOCK			(EXT3_DIND_BLOCK + 1)
-#define	EXT3_N_BLOCKS			(EXT3_TIND_BLOCK + 1)
-
-/*
- * Inode flags
- */
-#define	EXT3_SECRM_FL			0x00000001 /* Secure deletion */
-#define	EXT3_UNRM_FL			0x00000002 /* Undelete */
-#define	EXT3_COMPR_FL			0x00000004 /* Compress file */
-#define EXT3_SYNC_FL			0x00000008 /* Synchronous updates */
-#define EXT3_IMMUTABLE_FL		0x00000010 /* Immutable file */
-#define EXT3_APPEND_FL			0x00000020 /* writes to file may only append */
-#define EXT3_NODUMP_FL			0x00000040 /* do not dump file */
-#define EXT3_NOATIME_FL			0x00000080 /* do not update atime */
-/* Reserved for compression usage... */
-#define EXT3_DIRTY_FL			0x00000100
-#define EXT3_COMPRBLK_FL		0x00000200 /* One or more compressed clusters */
-#define EXT3_NOCOMPR_FL			0x00000400 /* Don't compress */
-#define EXT3_ECOMPR_FL			0x00000800 /* Compression error */
-/* End compression flags --- maybe not all used */
-#define EXT3_INDEX_FL			0x00001000 /* hash-indexed directory */
-#define EXT3_IMAGIC_FL			0x00002000 /* AFS directory */
-#define EXT3_JOURNAL_DATA_FL		0x00004000 /* file data should be journaled */
-#define EXT3_NOTAIL_FL			0x00008000 /* file tail should not be merged */
-#define EXT3_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
-#define EXT3_TOPDIR_FL			0x00020000 /* Top of directory hierarchies*/
-#define EXT3_RESERVED_FL		0x80000000 /* reserved for ext3 lib */
-
-#define EXT3_FL_USER_VISIBLE		0x0003DFFF /* User visible flags */
-#define EXT3_FL_USER_MODIFIABLE		0x000380FF /* User modifiable flags */
-
-/* Flags that should be inherited by new inodes from their parent. */
-#define EXT3_FL_INHERITED (EXT3_SECRM_FL | EXT3_UNRM_FL | EXT3_COMPR_FL |\
-			   EXT3_SYNC_FL | EXT3_NODUMP_FL |\
-			   EXT3_NOATIME_FL | EXT3_COMPRBLK_FL |\
-			   EXT3_NOCOMPR_FL | EXT3_JOURNAL_DATA_FL |\
-			   EXT3_NOTAIL_FL | EXT3_DIRSYNC_FL)
-
-/* Flags that are appropriate for regular files (all but dir-specific ones). */
-#define EXT3_REG_FLMASK (~(EXT3_DIRSYNC_FL | EXT3_TOPDIR_FL))
-
-/* Flags that are appropriate for non-directories/regular files. */
-#define EXT3_OTHER_FLMASK (EXT3_NODUMP_FL | EXT3_NOATIME_FL)
-
-/* Mask out flags that are inappropriate for the given type of inode. */
-static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
-{
-	if (S_ISDIR(mode))
-		return flags;
-	else if (S_ISREG(mode))
-		return flags & EXT3_REG_FLMASK;
-	else
-		return flags & EXT3_OTHER_FLMASK;
-}
-
-/* Used to pass group descriptor data when online resize is done */
-struct ext3_new_group_input {
-	__u32 group;            /* Group number for this data */
-	__u32 block_bitmap;     /* Absolute block number of block bitmap */
-	__u32 inode_bitmap;     /* Absolute block number of inode bitmap */
-	__u32 inode_table;      /* Absolute block number of inode table start */
-	__u32 blocks_count;     /* Total number of blocks in this group */
-	__u16 reserved_blocks;  /* Number of reserved blocks in this group */
-	__u16 unused;
-};
-
-/* The struct ext3_new_group_input in kernel space, with free_blocks_count */
-struct ext3_new_group_data {
-	__u32 group;
-	__u32 block_bitmap;
-	__u32 inode_bitmap;
-	__u32 inode_table;
-	__u32 blocks_count;
-	__u16 reserved_blocks;
-	__u16 unused;
-	__u32 free_blocks_count;
-};
-
-
-/*
- * ioctl commands
- */
-#define	EXT3_IOC_GETFLAGS		FS_IOC_GETFLAGS
-#define	EXT3_IOC_SETFLAGS		FS_IOC_SETFLAGS
-#define	EXT3_IOC_GETVERSION		_IOR('f', 3, long)
-#define	EXT3_IOC_SETVERSION		_IOW('f', 4, long)
-#define EXT3_IOC_GROUP_EXTEND		_IOW('f', 7, unsigned long)
-#define EXT3_IOC_GROUP_ADD		_IOW('f', 8,struct ext3_new_group_input)
-#define	EXT3_IOC_GETVERSION_OLD		FS_IOC_GETVERSION
-#define	EXT3_IOC_SETVERSION_OLD		FS_IOC_SETVERSION
-#ifdef CONFIG_JBD_DEBUG
-#define EXT3_IOC_WAIT_FOR_READONLY	_IOR('f', 99, long)
-#endif
-#define EXT3_IOC_GETRSVSZ		_IOR('f', 5, long)
-#define EXT3_IOC_SETRSVSZ		_IOW('f', 6, long)
-
-/*
- * ioctl commands in 32 bit emulation
- */
-#define EXT3_IOC32_GETFLAGS		FS_IOC32_GETFLAGS
-#define EXT3_IOC32_SETFLAGS		FS_IOC32_SETFLAGS
-#define EXT3_IOC32_GETVERSION		_IOR('f', 3, int)
-#define EXT3_IOC32_SETVERSION		_IOW('f', 4, int)
-#define EXT3_IOC32_GETRSVSZ		_IOR('f', 5, int)
-#define EXT3_IOC32_SETRSVSZ		_IOW('f', 6, int)
-#define EXT3_IOC32_GROUP_EXTEND		_IOW('f', 7, unsigned int)
-#ifdef CONFIG_JBD_DEBUG
-#define EXT3_IOC32_WAIT_FOR_READONLY	_IOR('f', 99, int)
-#endif
-#define EXT3_IOC32_GETVERSION_OLD	FS_IOC32_GETVERSION
-#define EXT3_IOC32_SETVERSION_OLD	FS_IOC32_SETVERSION
-
-/* Number of supported quota types */
-#define EXT3_MAXQUOTAS 2
-
-/*
- *  Mount options
- */
-struct ext3_mount_options {
-	unsigned long s_mount_opt;
-	kuid_t s_resuid;
-	kgid_t s_resgid;
-	unsigned long s_commit_interval;
-#ifdef CONFIG_QUOTA
-	int s_jquota_fmt;
-	char *s_qf_names[EXT3_MAXQUOTAS];
-#endif
-};
-
-/*
- * Structure of an inode on the disk
- */
-struct ext3_inode {
-	__le16	i_mode;		/* File mode */
-	__le16	i_uid;		/* Low 16 bits of Owner Uid */
-	__le32	i_size;		/* Size in bytes */
-	__le32	i_atime;	/* Access time */
-	__le32	i_ctime;	/* Creation time */
-	__le32	i_mtime;	/* Modification time */
-	__le32	i_dtime;	/* Deletion Time */
-	__le16	i_gid;		/* Low 16 bits of Group Id */
-	__le16	i_links_count;	/* Links count */
-	__le32	i_blocks;	/* Blocks count */
-	__le32	i_flags;	/* File flags */
-	union {
-		struct {
-			__u32  l_i_reserved1;
-		} linux1;
-		struct {
-			__u32  h_i_translator;
-		} hurd1;
-		struct {
-			__u32  m_i_reserved1;
-		} masix1;
-	} osd1;				/* OS dependent 1 */
-	__le32	i_block[EXT3_N_BLOCKS];/* Pointers to blocks */
-	__le32	i_generation;	/* File version (for NFS) */
-	__le32	i_file_acl;	/* File ACL */
-	__le32	i_dir_acl;	/* Directory ACL */
-	__le32	i_faddr;	/* Fragment address */
-	union {
-		struct {
-			__u8	l_i_frag;	/* Fragment number */
-			__u8	l_i_fsize;	/* Fragment size */
-			__u16	i_pad1;
-			__le16	l_i_uid_high;	/* these 2 fields    */
-			__le16	l_i_gid_high;	/* were reserved2[0] */
-			__u32	l_i_reserved2;
-		} linux2;
-		struct {
-			__u8	h_i_frag;	/* Fragment number */
-			__u8	h_i_fsize;	/* Fragment size */
-			__u16	h_i_mode_high;
-			__u16	h_i_uid_high;
-			__u16	h_i_gid_high;
-			__u32	h_i_author;
-		} hurd2;
-		struct {
-			__u8	m_i_frag;	/* Fragment number */
-			__u8	m_i_fsize;	/* Fragment size */
-			__u16	m_pad1;
-			__u32	m_i_reserved2[2];
-		} masix2;
-	} osd2;				/* OS dependent 2 */
-	__le16	i_extra_isize;
-	__le16	i_pad1;
-};
-
-#define i_size_high	i_dir_acl
-
-#define i_reserved1	osd1.linux1.l_i_reserved1
-#define i_frag		osd2.linux2.l_i_frag
-#define i_fsize		osd2.linux2.l_i_fsize
-#define i_uid_low	i_uid
-#define i_gid_low	i_gid
-#define i_uid_high	osd2.linux2.l_i_uid_high
-#define i_gid_high	osd2.linux2.l_i_gid_high
-#define i_reserved2	osd2.linux2.l_i_reserved2
-
-/*
- * File system states
- */
-#define	EXT3_VALID_FS			0x0001	/* Unmounted cleanly */
-#define	EXT3_ERROR_FS			0x0002	/* Errors detected */
-#define	EXT3_ORPHAN_FS			0x0004	/* Orphans being recovered */
-
-/*
- * Misc. filesystem flags
- */
-#define EXT2_FLAGS_SIGNED_HASH		0x0001  /* Signed dirhash in use */
-#define EXT2_FLAGS_UNSIGNED_HASH	0x0002  /* Unsigned dirhash in use */
-#define EXT2_FLAGS_TEST_FILESYS		0x0004	/* to test development code */
-
-/*
- * Mount flags
- */
-#define EXT3_MOUNT_CHECK		0x00001	/* Do mount-time checks */
-/* EXT3_MOUNT_OLDALLOC was there */
-#define EXT3_MOUNT_GRPID		0x00004	/* Create files with directory's group */
-#define EXT3_MOUNT_DEBUG		0x00008	/* Some debugging messages */
-#define EXT3_MOUNT_ERRORS_CONT		0x00010	/* Continue on errors */
-#define EXT3_MOUNT_ERRORS_RO		0x00020	/* Remount fs ro on errors */
-#define EXT3_MOUNT_ERRORS_PANIC		0x00040	/* Panic on errors */
-#define EXT3_MOUNT_MINIX_DF		0x00080	/* Mimics the Minix statfs */
-#define EXT3_MOUNT_NOLOAD		0x00100	/* Don't use existing journal*/
-#define EXT3_MOUNT_ABORT		0x00200	/* Fatal error detected */
-#define EXT3_MOUNT_DATA_FLAGS		0x00C00	/* Mode for data writes: */
-#define EXT3_MOUNT_JOURNAL_DATA		0x00400	/* Write data to journal */
-#define EXT3_MOUNT_ORDERED_DATA		0x00800	/* Flush data before commit */
-#define EXT3_MOUNT_WRITEBACK_DATA	0x00C00	/* No data ordering */
-#define EXT3_MOUNT_UPDATE_JOURNAL	0x01000	/* Update the journal format */
-#define EXT3_MOUNT_NO_UID32		0x02000  /* Disable 32-bit UIDs */
-#define EXT3_MOUNT_XATTR_USER		0x04000	/* Extended user attributes */
-#define EXT3_MOUNT_POSIX_ACL		0x08000	/* POSIX Access Control Lists */
-#define EXT3_MOUNT_RESERVATION		0x10000	/* Preallocation */
-#define EXT3_MOUNT_BARRIER		0x20000 /* Use block barriers */
-#define EXT3_MOUNT_QUOTA		0x80000 /* Some quota option set */
-#define EXT3_MOUNT_USRQUOTA		0x100000 /* "old" user quota */
-#define EXT3_MOUNT_GRPQUOTA		0x200000 /* "old" group quota */
-#define EXT3_MOUNT_DATA_ERR_ABORT	0x400000 /* Abort on file data write
-						  * error in ordered mode */
-
-/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
-#ifndef _LINUX_EXT2_FS_H
-#define clear_opt(o, opt)		o &= ~EXT3_MOUNT_##opt
-#define set_opt(o, opt)			o |= EXT3_MOUNT_##opt
-#define test_opt(sb, opt)		(EXT3_SB(sb)->s_mount_opt & \
-					 EXT3_MOUNT_##opt)
-#else
-#define EXT2_MOUNT_NOLOAD		EXT3_MOUNT_NOLOAD
-#define EXT2_MOUNT_ABORT		EXT3_MOUNT_ABORT
-#define EXT2_MOUNT_DATA_FLAGS		EXT3_MOUNT_DATA_FLAGS
-#endif
-
-#define ext3_set_bit			__set_bit_le
-#define ext3_set_bit_atomic		ext2_set_bit_atomic
-#define ext3_clear_bit			__clear_bit_le
-#define ext3_clear_bit_atomic		ext2_clear_bit_atomic
-#define ext3_test_bit			test_bit_le
-#define ext3_find_next_zero_bit		find_next_zero_bit_le
-
-/*
- * Maximal mount counts between two filesystem checks
- */
-#define EXT3_DFL_MAX_MNT_COUNT		20	/* Allow 20 mounts */
-#define EXT3_DFL_CHECKINTERVAL		0	/* Don't use interval check */
-
-/*
- * Behaviour when detecting errors
- */
-#define EXT3_ERRORS_CONTINUE		1	/* Continue execution */
-#define EXT3_ERRORS_RO			2	/* Remount fs read-only */
-#define EXT3_ERRORS_PANIC		3	/* Panic */
-#define EXT3_ERRORS_DEFAULT		EXT3_ERRORS_CONTINUE
-
-/*
- * Structure of the super block
- */
-struct ext3_super_block {
-/*00*/	__le32	s_inodes_count;		/* Inodes count */
-	__le32	s_blocks_count;		/* Blocks count */
-	__le32	s_r_blocks_count;	/* Reserved blocks count */
-	__le32	s_free_blocks_count;	/* Free blocks count */
-/*10*/	__le32	s_free_inodes_count;	/* Free inodes count */
-	__le32	s_first_data_block;	/* First Data Block */
-	__le32	s_log_block_size;	/* Block size */
-	__le32	s_log_frag_size;	/* Fragment size */
-/*20*/	__le32	s_blocks_per_group;	/* # Blocks per group */
-	__le32	s_frags_per_group;	/* # Fragments per group */
-	__le32	s_inodes_per_group;	/* # Inodes per group */
-	__le32	s_mtime;		/* Mount time */
-/*30*/	__le32	s_wtime;		/* Write time */
-	__le16	s_mnt_count;		/* Mount count */
-	__le16	s_max_mnt_count;	/* Maximal mount count */
-	__le16	s_magic;		/* Magic signature */
-	__le16	s_state;		/* File system state */
-	__le16	s_errors;		/* Behaviour when detecting errors */
-	__le16	s_minor_rev_level;	/* minor revision level */
-/*40*/	__le32	s_lastcheck;		/* time of last check */
-	__le32	s_checkinterval;	/* max. time between checks */
-	__le32	s_creator_os;		/* OS */
-	__le32	s_rev_level;		/* Revision level */
-/*50*/	__le16	s_def_resuid;		/* Default uid for reserved blocks */
-	__le16	s_def_resgid;		/* Default gid for reserved blocks */
-	/*
-	 * These fields are for EXT3_DYNAMIC_REV superblocks only.
-	 *
-	 * Note: the difference between the compatible feature set and
-	 * the incompatible feature set is that if there is a bit set
-	 * in the incompatible feature set that the kernel doesn't
-	 * know about, it should refuse to mount the filesystem.
-	 *
-	 * e2fsck's requirements are more strict; if it doesn't know
-	 * about a feature in either the compatible or incompatible
-	 * feature set, it must abort and not try to meddle with
-	 * things it doesn't understand...
-	 */
-	__le32	s_first_ino;		/* First non-reserved inode */
-	__le16   s_inode_size;		/* size of inode structure */
-	__le16	s_block_group_nr;	/* block group # of this superblock */
-	__le32	s_feature_compat;	/* compatible feature set */
-/*60*/	__le32	s_feature_incompat;	/* incompatible feature set */
-	__le32	s_feature_ro_compat;	/* readonly-compatible feature set */
-/*68*/	__u8	s_uuid[16];		/* 128-bit uuid for volume */
-/*78*/	char	s_volume_name[16];	/* volume name */
-/*88*/	char	s_last_mounted[64];	/* directory where last mounted */
-/*C8*/	__le32	s_algorithm_usage_bitmap; /* For compression */
-	/*
-	 * Performance hints.  Directory preallocation should only
-	 * happen if the EXT3_FEATURE_COMPAT_DIR_PREALLOC flag is on.
-	 */
-	__u8	s_prealloc_blocks;	/* Nr of blocks to try to preallocate*/
-	__u8	s_prealloc_dir_blocks;	/* Nr to preallocate for dirs */
-	__le16	s_reserved_gdt_blocks;	/* Per group desc for online growth */
-	/*
-	 * Journaling support valid if EXT3_FEATURE_COMPAT_HAS_JOURNAL set.
-	 */
-/*D0*/	__u8	s_journal_uuid[16];	/* uuid of journal superblock */
-/*E0*/	__le32	s_journal_inum;		/* inode number of journal file */
-	__le32	s_journal_dev;		/* device number of journal file */
-	__le32	s_last_orphan;		/* start of list of inodes to delete */
-	__le32	s_hash_seed[4];		/* HTREE hash seed */
-	__u8	s_def_hash_version;	/* Default hash version to use */
-	__u8	s_reserved_char_pad;
-	__u16	s_reserved_word_pad;
-	__le32	s_default_mount_opts;
-	__le32	s_first_meta_bg;	/* First metablock block group */
-	__le32	s_mkfs_time;		/* When the filesystem was created */
-	__le32	s_jnl_blocks[17];	/* Backup of the journal inode */
-	/* 64bit support valid if EXT4_FEATURE_COMPAT_64BIT */
-/*150*/	__le32	s_blocks_count_hi;	/* Blocks count */
-	__le32	s_r_blocks_count_hi;	/* Reserved blocks count */
-	__le32	s_free_blocks_count_hi;	/* Free blocks count */
-	__le16	s_min_extra_isize;	/* All inodes have at least # bytes */
-	__le16	s_want_extra_isize; 	/* New inodes should reserve # bytes */
-	__le32	s_flags;		/* Miscellaneous flags */
-	__le16  s_raid_stride;		/* RAID stride */
-	__le16  s_mmp_interval;         /* # seconds to wait in MMP checking */
-	__le64  s_mmp_block;            /* Block for multi-mount protection */
-	__le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
-	__u8	s_log_groups_per_flex;  /* FLEX_BG group size */
-	__u8	s_reserved_char_pad2;
-	__le16  s_reserved_pad;
-	__u32   s_reserved[162];        /* Padding to the end of the block */
-};
-
-/* data type for block offset of block group */
-typedef int ext3_grpblk_t;
-
-/* data type for filesystem-wide blocks number */
-typedef unsigned long ext3_fsblk_t;
-
-#define E3FSBLK "%lu"
-
-struct ext3_reserve_window {
-	ext3_fsblk_t	_rsv_start;	/* First byte reserved */
-	ext3_fsblk_t	_rsv_end;	/* Last byte reserved or 0 */
-};
-
-struct ext3_reserve_window_node {
-	struct rb_node		rsv_node;
-	__u32			rsv_goal_size;
-	__u32			rsv_alloc_hit;
-	struct ext3_reserve_window	rsv_window;
-};
-
-struct ext3_block_alloc_info {
-	/* information about reservation window */
-	struct ext3_reserve_window_node	rsv_window_node;
-	/*
-	 * was i_next_alloc_block in ext3_inode_info
-	 * is the logical (file-relative) number of the
-	 * most-recently-allocated block in this file.
-	 * We use this for detecting linearly ascending allocation requests.
-	 */
-	__u32                   last_alloc_logical_block;
-	/*
-	 * Was i_next_alloc_goal in ext3_inode_info
-	 * is the *physical* companion to i_next_alloc_block.
-	 * it the physical block number of the block which was most-recentl
-	 * allocated to this file.  This give us the goal (target) for the next
-	 * allocation when we detect linearly ascending requests.
-	 */
-	ext3_fsblk_t		last_alloc_physical_block;
-};
-
-#define rsv_start rsv_window._rsv_start
-#define rsv_end rsv_window._rsv_end
-
-/*
- * third extended file system inode data in memory
- */
-struct ext3_inode_info {
-	__le32	i_data[15];	/* unconverted */
-	__u32	i_flags;
-#ifdef EXT3_FRAGMENTS
-	__u32	i_faddr;
-	__u8	i_frag_no;
-	__u8	i_frag_size;
-#endif
-	ext3_fsblk_t	i_file_acl;
-	__u32	i_dir_acl;
-	__u32	i_dtime;
-
-	/*
-	 * i_block_group is the number of the block group which contains
-	 * this file's inode.  Constant across the lifetime of the inode,
-	 * it is ued for making block allocation decisions - we try to
-	 * place a file's data blocks near its inode block, and new inodes
-	 * near to their parent directory's inode.
-	 */
-	__u32	i_block_group;
-	unsigned long	i_state_flags;	/* Dynamic state flags for ext3 */
-
-	/* block reservation info */
-	struct ext3_block_alloc_info *i_block_alloc_info;
-
-	__u32	i_dir_start_lookup;
-#ifdef CONFIG_EXT3_FS_XATTR
-	/*
-	 * Extended attributes can be read independently of the main file
-	 * data. Taking i_mutex even when reading would cause contention
-	 * between readers of EAs and writers of regular file data, so
-	 * instead we synchronize on xattr_sem when reading or changing
-	 * EAs.
-	 */
-	struct rw_semaphore xattr_sem;
-#endif
-
-	struct list_head i_orphan;	/* unlinked but open inodes */
-
-	/*
-	 * i_disksize keeps track of what the inode size is ON DISK, not
-	 * in memory.  During truncate, i_size is set to the new size by
-	 * the VFS prior to calling ext3_truncate(), but the filesystem won't
-	 * set i_disksize to 0 until the truncate is actually under way.
-	 *
-	 * The intent is that i_disksize always represents the blocks which
-	 * are used by this file.  This allows recovery to restart truncate
-	 * on orphans if we crash during truncate.  We actually write i_disksize
-	 * into the on-disk inode when writing inodes out, instead of i_size.
-	 *
-	 * The only time when i_disksize and i_size may be different is when
-	 * a truncate is in progress.  The only things which change i_disksize
-	 * are ext3_get_block (growth) and ext3_truncate (shrinkth).
-	 */
-	loff_t	i_disksize;
-
-	/* on-disk additional length */
-	__u16 i_extra_isize;
-
-	/*
-	 * truncate_mutex is for serialising ext3_truncate() against
-	 * ext3_getblock().  In the 2.4 ext2 design, great chunks of inode's
-	 * data tree are chopped off during truncate. We can't do that in
-	 * ext3 because whenever we perform intermediate commits during
-	 * truncate, the inode and all the metadata blocks *must* be in a
-	 * consistent state which allows truncation of the orphans to restart
-	 * during recovery.  Hence we must fix the get_block-vs-truncate race
-	 * by other means, so we have truncate_mutex.
-	 */
-	struct mutex truncate_mutex;
-
-	/*
-	 * Transactions that contain inode's metadata needed to complete
-	 * fsync and fdatasync, respectively.
-	 */
-	atomic_t i_sync_tid;
-	atomic_t i_datasync_tid;
-
-#ifdef CONFIG_QUOTA
-	struct dquot *i_dquot[MAXQUOTAS];
-#endif
-
-	struct inode vfs_inode;
-};
-
-/*
- * third extended-fs super-block data in memory
- */
-struct ext3_sb_info {
-	unsigned long s_frag_size;	/* Size of a fragment in bytes */
-	unsigned long s_frags_per_block;/* Number of fragments per block */
-	unsigned long s_inodes_per_block;/* Number of inodes per block */
-	unsigned long s_frags_per_group;/* Number of fragments in a group */
-	unsigned long s_blocks_per_group;/* Number of blocks in a group */
-	unsigned long s_inodes_per_group;/* Number of inodes in a group */
-	unsigned long s_itb_per_group;	/* Number of inode table blocks per group */
-	unsigned long s_gdb_count;	/* Number of group descriptor blocks */
-	unsigned long s_desc_per_block;	/* Number of group descriptors per block */
-	unsigned long s_groups_count;	/* Number of groups in the fs */
-	unsigned long s_overhead_last;  /* Last calculated overhead */
-	unsigned long s_blocks_last;    /* Last seen block count */
-	struct buffer_head * s_sbh;	/* Buffer containing the super block */
-	struct ext3_super_block * s_es;	/* Pointer to the super block in the buffer */
-	struct buffer_head ** s_group_desc;
-	unsigned long  s_mount_opt;
-	ext3_fsblk_t s_sb_block;
-	kuid_t s_resuid;
-	kgid_t s_resgid;
-	unsigned short s_mount_state;
-	unsigned short s_pad;
-	int s_addr_per_block_bits;
-	int s_desc_per_block_bits;
-	int s_inode_size;
-	int s_first_ino;
-	spinlock_t s_next_gen_lock;
-	u32 s_next_generation;
-	u32 s_hash_seed[4];
-	int s_def_hash_version;
-	int s_hash_unsigned;	/* 3 if hash should be signed, 0 if not */
-	struct percpu_counter s_freeblocks_counter;
-	struct percpu_counter s_freeinodes_counter;
-	struct percpu_counter s_dirs_counter;
-	struct blockgroup_lock *s_blockgroup_lock;
-
-	/* root of the per fs reservation window tree */
-	spinlock_t s_rsv_window_lock;
-	struct rb_root s_rsv_window_root;
-	struct ext3_reserve_window_node s_rsv_window_head;
-
-	/* Journaling */
-	struct inode * s_journal_inode;
-	struct journal_s * s_journal;
-	struct list_head s_orphan;
-	struct mutex s_orphan_lock;
-	struct mutex s_resize_lock;
-	unsigned long s_commit_interval;
-	struct block_device *journal_bdev;
-#ifdef CONFIG_QUOTA
-	char *s_qf_names[EXT3_MAXQUOTAS];	/* Names of quota files with journalled quota */
-	int s_jquota_fmt;			/* Format of quota to use */
-#endif
-};
-
-static inline spinlock_t *
-sb_bgl_lock(struct ext3_sb_info *sbi, unsigned int block_group)
-{
-	return bgl_lock_ptr(sbi->s_blockgroup_lock, block_group);
-}
-
-static inline struct ext3_sb_info * EXT3_SB(struct super_block *sb)
-{
-	return sb->s_fs_info;
-}
-static inline struct ext3_inode_info *EXT3_I(struct inode *inode)
-{
-	return container_of(inode, struct ext3_inode_info, vfs_inode);
-}
-
-static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino)
-{
-	return ino == EXT3_ROOT_INO ||
-		ino == EXT3_JOURNAL_INO ||
-		ino == EXT3_RESIZE_INO ||
-		(ino >= EXT3_FIRST_INO(sb) &&
-		 ino <= le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count));
-}
-
-/*
- * Inode dynamic state flags
- */
-enum {
-	EXT3_STATE_JDATA,		/* journaled data exists */
-	EXT3_STATE_NEW,			/* inode is newly created */
-	EXT3_STATE_XATTR,		/* has in-inode xattrs */
-	EXT3_STATE_FLUSH_ON_CLOSE,	/* flush dirty pages on close */
-};
-
-static inline int ext3_test_inode_state(struct inode *inode, int bit)
-{
-	return test_bit(bit, &EXT3_I(inode)->i_state_flags);
-}
-
-static inline void ext3_set_inode_state(struct inode *inode, int bit)
-{
-	set_bit(bit, &EXT3_I(inode)->i_state_flags);
-}
-
-static inline void ext3_clear_inode_state(struct inode *inode, int bit)
-{
-	clear_bit(bit, &EXT3_I(inode)->i_state_flags);
-}
-
-#define NEXT_ORPHAN(inode) EXT3_I(inode)->i_dtime
-
-/*
- * Codes for operating systems
- */
-#define EXT3_OS_LINUX		0
-#define EXT3_OS_HURD		1
-#define EXT3_OS_MASIX		2
-#define EXT3_OS_FREEBSD		3
-#define EXT3_OS_LITES		4
-
-/*
- * Revision levels
- */
-#define EXT3_GOOD_OLD_REV	0	/* The good old (original) format */
-#define EXT3_DYNAMIC_REV	1	/* V2 format w/ dynamic inode sizes */
-
-#define EXT3_CURRENT_REV	EXT3_GOOD_OLD_REV
-#define EXT3_MAX_SUPP_REV	EXT3_DYNAMIC_REV
-
-#define EXT3_GOOD_OLD_INODE_SIZE 128
-
-/*
- * Feature set definitions
- */
-
-#define EXT3_HAS_COMPAT_FEATURE(sb,mask)			\
-	( EXT3_SB(sb)->s_es->s_feature_compat & cpu_to_le32(mask) )
-#define EXT3_HAS_RO_COMPAT_FEATURE(sb,mask)			\
-	( EXT3_SB(sb)->s_es->s_feature_ro_compat & cpu_to_le32(mask) )
-#define EXT3_HAS_INCOMPAT_FEATURE(sb,mask)			\
-	( EXT3_SB(sb)->s_es->s_feature_incompat & cpu_to_le32(mask) )
-#define EXT3_SET_COMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_compat |= cpu_to_le32(mask)
-#define EXT3_SET_RO_COMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_ro_compat |= cpu_to_le32(mask)
-#define EXT3_SET_INCOMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_incompat |= cpu_to_le32(mask)
-#define EXT3_CLEAR_COMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_compat &= ~cpu_to_le32(mask)
-#define EXT3_CLEAR_RO_COMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_ro_compat &= ~cpu_to_le32(mask)
-#define EXT3_CLEAR_INCOMPAT_FEATURE(sb,mask)			\
-	EXT3_SB(sb)->s_es->s_feature_incompat &= ~cpu_to_le32(mask)
-
-#define EXT3_FEATURE_COMPAT_DIR_PREALLOC	0x0001
-#define EXT3_FEATURE_COMPAT_IMAGIC_INODES	0x0002
-#define EXT3_FEATURE_COMPAT_HAS_JOURNAL		0x0004
-#define EXT3_FEATURE_COMPAT_EXT_ATTR		0x0008
-#define EXT3_FEATURE_COMPAT_RESIZE_INODE	0x0010
-#define EXT3_FEATURE_COMPAT_DIR_INDEX		0x0020
-
-#define EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER	0x0001
-#define EXT3_FEATURE_RO_COMPAT_LARGE_FILE	0x0002
-#define EXT3_FEATURE_RO_COMPAT_BTREE_DIR	0x0004
-
-#define EXT3_FEATURE_INCOMPAT_COMPRESSION	0x0001
-#define EXT3_FEATURE_INCOMPAT_FILETYPE		0x0002
-#define EXT3_FEATURE_INCOMPAT_RECOVER		0x0004 /* Needs recovery */
-#define EXT3_FEATURE_INCOMPAT_JOURNAL_DEV	0x0008 /* Journal device */
-#define EXT3_FEATURE_INCOMPAT_META_BG		0x0010
-
-#define EXT3_FEATURE_COMPAT_SUPP	EXT2_FEATURE_COMPAT_EXT_ATTR
-#define EXT3_FEATURE_INCOMPAT_SUPP	(EXT3_FEATURE_INCOMPAT_FILETYPE| \
-					 EXT3_FEATURE_INCOMPAT_RECOVER| \
-					 EXT3_FEATURE_INCOMPAT_META_BG)
-#define EXT3_FEATURE_RO_COMPAT_SUPP	(EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER| \
-					 EXT3_FEATURE_RO_COMPAT_LARGE_FILE| \
-					 EXT3_FEATURE_RO_COMPAT_BTREE_DIR)
-
-/*
- * Default values for user and/or group using reserved blocks
- */
-#define	EXT3_DEF_RESUID		0
-#define	EXT3_DEF_RESGID		0
-
-/*
- * Default mount options
- */
-#define EXT3_DEFM_DEBUG		0x0001
-#define EXT3_DEFM_BSDGROUPS	0x0002
-#define EXT3_DEFM_XATTR_USER	0x0004
-#define EXT3_DEFM_ACL		0x0008
-#define EXT3_DEFM_UID16		0x0010
-#define EXT3_DEFM_JMODE		0x0060
-#define EXT3_DEFM_JMODE_DATA	0x0020
-#define EXT3_DEFM_JMODE_ORDERED	0x0040
-#define EXT3_DEFM_JMODE_WBACK	0x0060
-
-/*
- * Structure of a directory entry
- */
-#define EXT3_NAME_LEN 255
-
-struct ext3_dir_entry {
-	__le32	inode;			/* Inode number */
-	__le16	rec_len;		/* Directory entry length */
-	__le16	name_len;		/* Name length */
-	char	name[EXT3_NAME_LEN];	/* File name */
-};
-
-/*
- * The new version of the directory entry.  Since EXT3 structures are
- * stored in intel byte order, and the name_len field could never be
- * bigger than 255 chars, it's safe to reclaim the extra byte for the
- * file_type field.
- */
-struct ext3_dir_entry_2 {
-	__le32	inode;			/* Inode number */
-	__le16	rec_len;		/* Directory entry length */
-	__u8	name_len;		/* Name length */
-	__u8	file_type;
-	char	name[EXT3_NAME_LEN];	/* File name */
-};
-
-/*
- * Ext3 directory file types.  Only the low 3 bits are used.  The
- * other bits are reserved for now.
- */
-#define EXT3_FT_UNKNOWN		0
-#define EXT3_FT_REG_FILE	1
-#define EXT3_FT_DIR		2
-#define EXT3_FT_CHRDEV		3
-#define EXT3_FT_BLKDEV		4
-#define EXT3_FT_FIFO		5
-#define EXT3_FT_SOCK		6
-#define EXT3_FT_SYMLINK		7
-
-#define EXT3_FT_MAX		8
-
-/*
- * EXT3_DIR_PAD defines the directory entries boundaries
- *
- * NOTE: It must be a multiple of 4
- */
-#define EXT3_DIR_PAD			4
-#define EXT3_DIR_ROUND			(EXT3_DIR_PAD - 1)
-#define EXT3_DIR_REC_LEN(name_len)	(((name_len) + 8 + EXT3_DIR_ROUND) & \
-					 ~EXT3_DIR_ROUND)
-#define EXT3_MAX_REC_LEN		((1<<16)-1)
-
-/*
- * Tests against MAX_REC_LEN etc were put in place for 64k block
- * sizes; if that is not possible on this arch, we can skip
- * those tests and speed things up.
- */
-static inline unsigned ext3_rec_len_from_disk(__le16 dlen)
-{
-	unsigned len = le16_to_cpu(dlen);
-
-#if (PAGE_CACHE_SIZE >= 65536)
-	if (len == EXT3_MAX_REC_LEN)
-		return 1 << 16;
-#endif
-	return len;
-}
-
-static inline __le16 ext3_rec_len_to_disk(unsigned len)
-{
-#if (PAGE_CACHE_SIZE >= 65536)
-	if (len == (1 << 16))
-		return cpu_to_le16(EXT3_MAX_REC_LEN);
-	else if (len > (1 << 16))
-		BUG();
-#endif
-	return cpu_to_le16(len);
-}
-
-/*
- * Hash Tree Directory indexing
- * (c) Daniel Phillips, 2001
- */
-
-#define is_dx(dir) (EXT3_HAS_COMPAT_FEATURE(dir->i_sb, \
-				      EXT3_FEATURE_COMPAT_DIR_INDEX) && \
-		      (EXT3_I(dir)->i_flags & EXT3_INDEX_FL))
-#define EXT3_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT3_LINK_MAX)
-#define EXT3_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
-
-/* Legal values for the dx_root hash_version field: */
-
-#define DX_HASH_LEGACY		0
-#define DX_HASH_HALF_MD4	1
-#define DX_HASH_TEA		2
-#define DX_HASH_LEGACY_UNSIGNED	3
-#define DX_HASH_HALF_MD4_UNSIGNED	4
-#define DX_HASH_TEA_UNSIGNED		5
-
-/* hash info structure used by the directory hash */
-struct dx_hash_info
-{
-	u32		hash;
-	u32		minor_hash;
-	int		hash_version;
-	u32		*seed;
-};
-
-
-/* 32 and 64 bit signed EOF for dx directories */
-#define EXT3_HTREE_EOF_32BIT   ((1UL  << (32 - 1)) - 1)
-#define EXT3_HTREE_EOF_64BIT   ((1ULL << (64 - 1)) - 1)
-
-
-/*
- * Control parameters used by ext3_htree_next_block
- */
-#define HASH_NB_ALWAYS		1
-
-
-/*
- * Describe an inode's exact location on disk and in memory
- */
-struct ext3_iloc
-{
-	struct buffer_head *bh;
-	unsigned long offset;
-	unsigned long block_group;
-};
-
-static inline struct ext3_inode *ext3_raw_inode(struct ext3_iloc *iloc)
-{
-	return (struct ext3_inode *) (iloc->bh->b_data + iloc->offset);
-}
-
-/*
- * This structure is stuffed into the struct file's private_data field
- * for directories.  It is where we put information so that we can do
- * readdir operations in hash tree order.
- */
-struct dir_private_info {
-	struct rb_root	root;
-	struct rb_node	*curr_node;
-	struct fname	*extra_fname;
-	loff_t		last_pos;
-	__u32		curr_hash;
-	__u32		curr_minor_hash;
-	__u32		next_hash;
-};
-
-/* calculate the first block number of the group */
-static inline ext3_fsblk_t
-ext3_group_first_block_no(struct super_block *sb, unsigned long group_no)
-{
-	return group_no * (ext3_fsblk_t)EXT3_BLOCKS_PER_GROUP(sb) +
-		le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
-}
-
-/*
- * Special error return code only used by dx_probe() and its callers.
- */
-#define ERR_BAD_DX_DIR	-75000
-
-/*
- * Function prototypes
- */
-
-/*
- * Ok, these declarations are also in <linux/kernel.h> but none of the
- * ext3 source programs needs to include it so they are duplicated here.
- */
-# define NORET_TYPE    /**/
-# define ATTRIB_NORET  __attribute__((noreturn))
-# define NORET_AND     noreturn,
-
-/* balloc.c */
-extern int ext3_bg_has_super(struct super_block *sb, int group);
-extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
-extern ext3_fsblk_t ext3_new_block (handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, int *errp);
-extern ext3_fsblk_t ext3_new_blocks (handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, unsigned long *count, int *errp);
-extern void ext3_free_blocks (handle_t *handle, struct inode *inode,
-			ext3_fsblk_t block, unsigned long count);
-extern void ext3_free_blocks_sb (handle_t *handle, struct super_block *sb,
-				 ext3_fsblk_t block, unsigned long count,
-				unsigned long *pdquot_freed_blocks);
-extern ext3_fsblk_t ext3_count_free_blocks (struct super_block *);
-extern void ext3_check_blocks_bitmap (struct super_block *);
-extern struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
-						    unsigned int block_group,
-						    struct buffer_head ** bh);
-extern int ext3_should_retry_alloc(struct super_block *sb, int *retries);
-extern void ext3_init_block_alloc_info(struct inode *);
-extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv);
-extern int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range);
-
-/* dir.c */
-extern int ext3_check_dir_entry(const char *, struct inode *,
-				struct ext3_dir_entry_2 *,
-				struct buffer_head *, unsigned long);
-extern int ext3_htree_store_dirent(struct file *dir_file, __u32 hash,
-				    __u32 minor_hash,
-				    struct ext3_dir_entry_2 *dirent);
-extern void ext3_htree_free_dir_info(struct dir_private_info *p);
-
-/* fsync.c */
-extern int ext3_sync_file(struct file *, loff_t, loff_t, int);
-
-/* hash.c */
-extern int ext3fs_dirhash(const char *name, int len, struct
-			  dx_hash_info *hinfo);
-
-/* ialloc.c */
-extern struct inode * ext3_new_inode (handle_t *, struct inode *,
-				      const struct qstr *, umode_t);
-extern void ext3_free_inode (handle_t *, struct inode *);
-extern struct inode * ext3_orphan_get (struct super_block *, unsigned long);
-extern unsigned long ext3_count_free_inodes (struct super_block *);
-extern unsigned long ext3_count_dirs (struct super_block *);
-extern void ext3_check_inodes_bitmap (struct super_block *);
-extern unsigned long ext3_count_free (struct buffer_head *, unsigned);
-
-
-/* inode.c */
-int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
-		struct buffer_head *bh, ext3_fsblk_t blocknr);
-struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
-struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
-int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
-	sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
-	int create);
-
-extern struct inode *ext3_iget(struct super_block *, unsigned long);
-extern int  ext3_write_inode (struct inode *, struct writeback_control *);
-extern int  ext3_setattr (struct dentry *, struct iattr *);
-extern void ext3_evict_inode (struct inode *);
-extern int  ext3_sync_inode (handle_t *, struct inode *);
-extern void ext3_discard_reservation (struct inode *);
-extern void ext3_dirty_inode(struct inode *, int);
-extern int ext3_change_inode_journal_flag(struct inode *, int);
-extern int ext3_get_inode_loc(struct inode *, struct ext3_iloc *);
-extern int ext3_can_truncate(struct inode *inode);
-extern void ext3_truncate(struct inode *inode);
-extern void ext3_set_inode_flags(struct inode *);
-extern void ext3_get_inode_flags(struct ext3_inode_info *);
-extern void ext3_set_aops(struct inode *inode);
-extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
-		       u64 start, u64 len);
-
-/* ioctl.c */
-extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
-extern long ext3_compat_ioctl(struct file *, unsigned int, unsigned long);
-
-/* namei.c */
-extern int ext3_orphan_add(handle_t *, struct inode *);
-extern int ext3_orphan_del(handle_t *, struct inode *);
-extern int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-				__u32 start_minor_hash, __u32 *next_hash);
-
-/* resize.c */
-extern int ext3_group_add(struct super_block *sb,
-				struct ext3_new_group_data *input);
-extern int ext3_group_extend(struct super_block *sb,
-				struct ext3_super_block *es,
-				ext3_fsblk_t n_blocks_count);
-
-/* super.c */
-extern __printf(3, 4)
-void ext3_error(struct super_block *, const char *, const char *, ...);
-extern void __ext3_std_error (struct super_block *, const char *, int);
-extern __printf(3, 4)
-void ext3_abort(struct super_block *, const char *, const char *, ...);
-extern __printf(3, 4)
-void ext3_warning(struct super_block *, const char *, const char *, ...);
-extern __printf(3, 4)
-void ext3_msg(struct super_block *, const char *, const char *, ...);
-extern void ext3_update_dynamic_rev (struct super_block *sb);
-
-#define ext3_std_error(sb, errno)				\
-do {								\
-	if ((errno))						\
-		__ext3_std_error((sb), __func__, (errno));	\
-} while (0)
-
-/*
- * Inodes and files operations
- */
-
-/* dir.c */
-extern const struct file_operations ext3_dir_operations;
-
-/* file.c */
-extern const struct inode_operations ext3_file_inode_operations;
-extern const struct file_operations ext3_file_operations;
-
-/* namei.c */
-extern const struct inode_operations ext3_dir_inode_operations;
-extern const struct inode_operations ext3_special_inode_operations;
-
-/* symlink.c */
-extern const struct inode_operations ext3_symlink_inode_operations;
-extern const struct inode_operations ext3_fast_symlink_inode_operations;
-
-#define EXT3_JOURNAL(inode)	(EXT3_SB((inode)->i_sb)->s_journal)
-
-/* Define the number of blocks we need to account to a transaction to
- * modify one block of data.
- *
- * We may have to touch one inode, one bitmap buffer, up to three
- * indirection blocks, the group and superblock summaries, and the data
- * block to complete the transaction.  */
-
-#define EXT3_SINGLEDATA_TRANS_BLOCKS	8U
-
-/* Extended attribute operations touch at most two data buffers,
- * two bitmap buffers, and two group summaries, in addition to the inode
- * and the superblock, which are already accounted for. */
-
-#define EXT3_XATTR_TRANS_BLOCKS		6U
-
-/* Define the minimum size for a transaction which modifies data.  This
- * needs to take into account the fact that we may end up modifying two
- * quota files too (one for the group, one for the user quota).  The
- * superblock only gets updated once, of course, so don't bother
- * counting that again for the quota updates. */
-
-#define EXT3_DATA_TRANS_BLOCKS(sb)	(EXT3_SINGLEDATA_TRANS_BLOCKS + \
-					 EXT3_XATTR_TRANS_BLOCKS - 2 + \
-					 EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
-
-/* Delete operations potentially hit one directory's namespace plus an
- * entire inode, plus arbitrary amounts of bitmap/indirection data.  Be
- * generous.  We can grow the delete transaction later if necessary. */
-
-#define EXT3_DELETE_TRANS_BLOCKS(sb)   (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
-
-/* Define an arbitrary limit for the amount of data we will anticipate
- * writing to any given transaction.  For unbounded transactions such as
- * write(2) and truncate(2) we can write more than this, but we always
- * start off at the maximum transaction size and grow the transaction
- * optimistically as we go. */
-
-#define EXT3_MAX_TRANS_DATA		64U
-
-/* We break up a large truncate or write transaction once the handle's
- * buffer credits gets this low, we need either to extend the
- * transaction or to start a new one.  Reserve enough space here for
- * inode, bitmap, superblock, group and indirection updates for at least
- * one block, plus two quota updates.  Quota allocations are not
- * needed. */
-
-#define EXT3_RESERVE_TRANS_BLOCKS	12U
-
-#define EXT3_INDEX_EXTRA_TRANS_BLOCKS	8
-
-#ifdef CONFIG_QUOTA
-/* Amount of blocks needed for quota update - we know that the structure was
- * allocated so we need to update only inode+data */
-#define EXT3_QUOTA_TRANS_BLOCKS(sb) (test_opt(sb, QUOTA) ? 2 : 0)
-/* Amount of blocks needed for quota insert/delete - we do some block writes
- * but inode, sb and group updates are done only once */
-#define EXT3_QUOTA_INIT_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_INIT_ALLOC*\
-		(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_INIT_REWRITE) : 0)
-#define EXT3_QUOTA_DEL_BLOCKS(sb) (test_opt(sb, QUOTA) ? (DQUOT_DEL_ALLOC*\
-		(EXT3_SINGLEDATA_TRANS_BLOCKS-3)+3+DQUOT_DEL_REWRITE) : 0)
-#else
-#define EXT3_QUOTA_TRANS_BLOCKS(sb) 0
-#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
-#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
-#endif
-#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (EXT3_MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
-#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (EXT3_MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
-
-int
-ext3_mark_iloc_dirty(handle_t *handle,
-		     struct inode *inode,
-		     struct ext3_iloc *iloc);
-
-/*
- * On success, We end up with an outstanding reference count against
- * iloc->bh.  This _must_ be cleaned up later.
- */
-
-int ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
-			struct ext3_iloc *iloc);
-
-int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode);
-
-/*
- * Wrapper functions with which ext3 calls into JBD.  The intent here is
- * to allow these to be turned into appropriate stubs so ext3 can control
- * ext2 filesystems, so ext2+ext3 systems only nee one fs.  This work hasn't
- * been done yet.
- */
-
-static inline void ext3_journal_release_buffer(handle_t *handle,
-						struct buffer_head *bh)
-{
-	journal_release_buffer(handle, bh);
-}
-
-void ext3_journal_abort_handle(const char *caller, const char *err_fn,
-		struct buffer_head *bh, handle_t *handle, int err);
-
-int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
-				struct buffer_head *bh);
-
-int __ext3_journal_get_write_access(const char *where, handle_t *handle,
-				struct buffer_head *bh);
-
-int __ext3_journal_forget(const char *where, handle_t *handle,
-				struct buffer_head *bh);
-
-int __ext3_journal_revoke(const char *where, handle_t *handle,
-				unsigned long blocknr, struct buffer_head *bh);
-
-int __ext3_journal_get_create_access(const char *where,
-				handle_t *handle, struct buffer_head *bh);
-
-int __ext3_journal_dirty_metadata(const char *where,
-				handle_t *handle, struct buffer_head *bh);
-
-#define ext3_journal_get_undo_access(handle, bh) \
-	__ext3_journal_get_undo_access(__func__, (handle), (bh))
-#define ext3_journal_get_write_access(handle, bh) \
-	__ext3_journal_get_write_access(__func__, (handle), (bh))
-#define ext3_journal_revoke(handle, blocknr, bh) \
-	__ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
-#define ext3_journal_get_create_access(handle, bh) \
-	__ext3_journal_get_create_access(__func__, (handle), (bh))
-#define ext3_journal_dirty_metadata(handle, bh) \
-	__ext3_journal_dirty_metadata(__func__, (handle), (bh))
-#define ext3_journal_forget(handle, bh) \
-	__ext3_journal_forget(__func__, (handle), (bh))
-
-int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
-
-handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks);
-int __ext3_journal_stop(const char *where, handle_t *handle);
-
-static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
-{
-	return ext3_journal_start_sb(inode->i_sb, nblocks);
-}
-
-#define ext3_journal_stop(handle) \
-	__ext3_journal_stop(__func__, (handle))
-
-static inline handle_t *ext3_journal_current_handle(void)
-{
-	return journal_current_handle();
-}
-
-static inline int ext3_journal_extend(handle_t *handle, int nblocks)
-{
-	return journal_extend(handle, nblocks);
-}
-
-static inline int ext3_journal_restart(handle_t *handle, int nblocks)
-{
-	return journal_restart(handle, nblocks);
-}
-
-static inline int ext3_journal_blocks_per_page(struct inode *inode)
-{
-	return journal_blocks_per_page(inode);
-}
-
-static inline int ext3_journal_force_commit(journal_t *journal)
-{
-	return journal_force_commit(journal);
-}
-
-/* super.c */
-int ext3_force_commit(struct super_block *sb);
-
-static inline int ext3_should_journal_data(struct inode *inode)
-{
-	if (!S_ISREG(inode->i_mode))
-		return 1;
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA)
-		return 1;
-	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-		return 1;
-	return 0;
-}
-
-static inline int ext3_should_order_data(struct inode *inode)
-{
-	if (!S_ISREG(inode->i_mode))
-		return 0;
-	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-		return 0;
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA)
-		return 1;
-	return 0;
-}
-
-static inline int ext3_should_writeback_data(struct inode *inode)
-{
-	if (!S_ISREG(inode->i_mode))
-		return 0;
-	if (EXT3_I(inode)->i_flags & EXT3_JOURNAL_DATA_FL)
-		return 0;
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)
-		return 1;
-	return 0;
-}
-
-#include <trace/events/ext3.h>
diff --git a/fs/ext3/ext3_jbd.c b/fs/ext3/ext3_jbd.c
deleted file mode 100644
index 785a326..0000000
--- a/fs/ext3/ext3_jbd.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Interface between ext3 and JBD
- */
-
-#include "ext3.h"
-
-int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = journal_get_undo_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_get_write_access(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = journal_get_write_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_forget(const char *where, handle_t *handle,
-				struct buffer_head *bh)
-{
-	int err = journal_forget(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_revoke(const char *where, handle_t *handle,
-				unsigned long blocknr, struct buffer_head *bh)
-{
-	int err = journal_revoke(handle, blocknr, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_get_create_access(const char *where,
-				handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_get_create_access(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
-
-int __ext3_journal_dirty_metadata(const char *where,
-				handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_dirty_metadata(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(where, __func__, bh, handle,err);
-	return err;
-}
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
deleted file mode 100644
index 3b8f650..0000000
--- a/fs/ext3/file.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- *  linux/fs/ext3/file.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/file.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  ext3 fs regular file handling primitives
- *
- *  64-bit file support on 64-bit platforms by Jakub Jelinek
- *	(jj@sunsite.ms.mff.cuni.cz)
- */
-
-#include <linux/quotaops.h>
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-
-/*
- * Called when an inode is released. Note that this is different
- * from ext3_file_open: open gets called at every open, but release
- * gets called only when /all/ the files are closed.
- */
-static int ext3_release_file (struct inode * inode, struct file * filp)
-{
-	if (ext3_test_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE)) {
-		filemap_flush(inode->i_mapping);
-		ext3_clear_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
-	}
-	/* if we are the last writer on the inode, drop the block reservation */
-	if ((filp->f_mode & FMODE_WRITE) &&
-			(atomic_read(&inode->i_writecount) == 1))
-	{
-		mutex_lock(&EXT3_I(inode)->truncate_mutex);
-		ext3_discard_reservation(inode);
-		mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-	}
-	if (is_dx(inode) && filp->private_data)
-		ext3_htree_free_dir_info(filp->private_data);
-
-	return 0;
-}
-
-const struct file_operations ext3_file_operations = {
-	.llseek		= generic_file_llseek,
-	.read_iter	= generic_file_read_iter,
-	.write_iter	= generic_file_write_iter,
-	.unlocked_ioctl	= ext3_ioctl,
-#ifdef CONFIG_COMPAT
-	.compat_ioctl	= ext3_compat_ioctl,
-#endif
-	.mmap		= generic_file_mmap,
-	.open		= dquot_file_open,
-	.release	= ext3_release_file,
-	.fsync		= ext3_sync_file,
-	.splice_read	= generic_file_splice_read,
-	.splice_write	= iter_file_splice_write,
-};
-
-const struct inode_operations ext3_file_inode_operations = {
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-	.get_acl	= ext3_get_acl,
-	.set_acl	= ext3_set_acl,
-	.fiemap		= ext3_fiemap,
-};
-
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
deleted file mode 100644
index 1cb9c7e..0000000
--- a/fs/ext3/fsync.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- *  linux/fs/ext3/fsync.c
- *
- *  Copyright (C) 1993  Stephen Tweedie (sct@redhat.com)
- *  from
- *  Copyright (C) 1992  Remy Card (card@masi.ibp.fr)
- *                      Laboratoire MASI - Institut Blaise Pascal
- *                      Universite Pierre et Marie Curie (Paris VI)
- *  from
- *  linux/fs/minix/truncate.c   Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  ext3fs fsync primitive
- *
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- *
- *  Removed unnecessary code duplication for little endian machines
- *  and excessive __inline__s.
- *        Andi Kleen, 1997
- *
- * Major simplications and cleanup - we only need to do the metadata, because
- * we can depend on generic_block_fdatasync() to sync the data blocks.
- */
-
-#include <linux/blkdev.h>
-#include <linux/writeback.h>
-#include "ext3.h"
-
-/*
- * akpm: A new design for ext3_sync_file().
- *
- * This is only called from sys_fsync(), sys_fdatasync() and sys_msync().
- * There cannot be a transaction open by this task.
- * Another task could have dirtied this inode.  Its data can be in any
- * state in the journalling system.
- *
- * What we do is just kick off a commit and wait on it.  This will snapshot the
- * inode to disk.
- */
-
-int ext3_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
-{
-	struct inode *inode = file->f_mapping->host;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
-	int ret, needs_barrier = 0;
-	tid_t commit_tid;
-
-	trace_ext3_sync_file_enter(file, datasync);
-
-	if (inode->i_sb->s_flags & MS_RDONLY) {
-		/* Make sure that we read updated state */
-		smp_rmb();
-		if (EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS)
-			return -EROFS;
-		return 0;
-	}
-	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
-	if (ret)
-		goto out;
-
-	J_ASSERT(ext3_journal_current_handle() == NULL);
-
-	/*
-	 * data=writeback,ordered:
-	 *  The caller's filemap_fdatawrite()/wait will sync the data.
-	 *  Metadata is in the journal, we wait for a proper transaction
-	 *  to commit here.
-	 *
-	 * data=journal:
-	 *  filemap_fdatawrite won't do anything (the buffers are clean).
-	 *  ext3_force_commit will write the file data into the journal and
-	 *  will wait on that.
-	 *  filemap_fdatawait() will encounter a ton of newly-dirtied pages
-	 *  (they were dirtied by commit).  But that's OK - the blocks are
-	 *  safe in-journal, which is all fsync() needs to ensure.
-	 */
-	if (ext3_should_journal_data(inode)) {
-		ret = ext3_force_commit(inode->i_sb);
-		goto out;
-	}
-
-	if (datasync)
-		commit_tid = atomic_read(&ei->i_datasync_tid);
-	else
-		commit_tid = atomic_read(&ei->i_sync_tid);
-
-	if (test_opt(inode->i_sb, BARRIER) &&
-	    !journal_trans_will_send_data_barrier(journal, commit_tid))
-		needs_barrier = 1;
-	log_start_commit(journal, commit_tid);
-	ret = log_wait_commit(journal, commit_tid);
-
-	/*
-	 * In case we didn't commit a transaction, we have to flush
-	 * disk caches manually so that data really is on persistent
-	 * storage
-	 */
-	if (needs_barrier) {
-		int err;
-
-		err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
-		if (!ret)
-			ret = err;
-	}
-out:
-	trace_ext3_sync_file_exit(inode, ret);
-	return ret;
-}
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
deleted file mode 100644
index ede315c..0000000
--- a/fs/ext3/hash.c
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- *  linux/fs/ext3/hash.c
- *
- * Copyright (C) 2002 by Theodore Ts'o
- *
- * This file is released under the GPL v2.
- *
- * This file may be redistributed under the terms of the GNU Public
- * License.
- */
-
-#include "ext3.h"
-#include <linux/cryptohash.h>
-
-#define DELTA 0x9E3779B9
-
-static void TEA_transform(__u32 buf[4], __u32 const in[])
-{
-	__u32	sum = 0;
-	__u32	b0 = buf[0], b1 = buf[1];
-	__u32	a = in[0], b = in[1], c = in[2], d = in[3];
-	int	n = 16;
-
-	do {
-		sum += DELTA;
-		b0 += ((b1 << 4)+a) ^ (b1+sum) ^ ((b1 >> 5)+b);
-		b1 += ((b0 << 4)+c) ^ (b0+sum) ^ ((b0 >> 5)+d);
-	} while(--n);
-
-	buf[0] += b0;
-	buf[1] += b1;
-}
-
-
-/* The old legacy hash */
-static __u32 dx_hack_hash_unsigned(const char *name, int len)
-{
-	__u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
-	const unsigned char *ucp = (const unsigned char *) name;
-
-	while (len--) {
-		hash = hash1 + (hash0 ^ (((int) *ucp++) * 7152373));
-
-		if (hash & 0x80000000)
-			hash -= 0x7fffffff;
-		hash1 = hash0;
-		hash0 = hash;
-	}
-	return hash0 << 1;
-}
-
-static __u32 dx_hack_hash_signed(const char *name, int len)
-{
-	__u32 hash, hash0 = 0x12a3fe2d, hash1 = 0x37abe8f9;
-	const signed char *scp = (const signed char *) name;
-
-	while (len--) {
-		hash = hash1 + (hash0 ^ (((int) *scp++) * 7152373));
-
-		if (hash & 0x80000000)
-			hash -= 0x7fffffff;
-		hash1 = hash0;
-		hash0 = hash;
-	}
-	return hash0 << 1;
-}
-
-static void str2hashbuf_signed(const char *msg, int len, __u32 *buf, int num)
-{
-	__u32	pad, val;
-	int	i;
-	const signed char *scp = (const signed char *) msg;
-
-	pad = (__u32)len | ((__u32)len << 8);
-	pad |= pad << 16;
-
-	val = pad;
-	if (len > num*4)
-		len = num * 4;
-	for (i = 0; i < len; i++) {
-		if ((i % 4) == 0)
-			val = pad;
-		val = ((int) scp[i]) + (val << 8);
-		if ((i % 4) == 3) {
-			*buf++ = val;
-			val = pad;
-			num--;
-		}
-	}
-	if (--num >= 0)
-		*buf++ = val;
-	while (--num >= 0)
-		*buf++ = pad;
-}
-
-static void str2hashbuf_unsigned(const char *msg, int len, __u32 *buf, int num)
-{
-	__u32	pad, val;
-	int	i;
-	const unsigned char *ucp = (const unsigned char *) msg;
-
-	pad = (__u32)len | ((__u32)len << 8);
-	pad |= pad << 16;
-
-	val = pad;
-	if (len > num*4)
-		len = num * 4;
-	for (i=0; i < len; i++) {
-		if ((i % 4) == 0)
-			val = pad;
-		val = ((int) ucp[i]) + (val << 8);
-		if ((i % 4) == 3) {
-			*buf++ = val;
-			val = pad;
-			num--;
-		}
-	}
-	if (--num >= 0)
-		*buf++ = val;
-	while (--num >= 0)
-		*buf++ = pad;
-}
-
-/*
- * Returns the hash of a filename.  If len is 0 and name is NULL, then
- * this function can be used to test whether or not a hash version is
- * supported.
- *
- * The seed is an 4 longword (32 bits) "secret" which can be used to
- * uniquify a hash.  If the seed is all zero's, then some default seed
- * may be used.
- *
- * A particular hash version specifies whether or not the seed is
- * represented, and whether or not the returned hash is 32 bits or 64
- * bits.  32 bit hashes will return 0 for the minor hash.
- */
-int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
-{
-	__u32	hash;
-	__u32	minor_hash = 0;
-	const char	*p;
-	int		i;
-	__u32		in[8], buf[4];
-	void		(*str2hashbuf)(const char *, int, __u32 *, int) =
-				str2hashbuf_signed;
-
-	/* Initialize the default seed for the hash checksum functions */
-	buf[0] = 0x67452301;
-	buf[1] = 0xefcdab89;
-	buf[2] = 0x98badcfe;
-	buf[3] = 0x10325476;
-
-	/* Check to see if the seed is all zero's */
-	if (hinfo->seed) {
-		for (i=0; i < 4; i++) {
-			if (hinfo->seed[i])
-				break;
-		}
-		if (i < 4)
-			memcpy(buf, hinfo->seed, sizeof(buf));
-	}
-
-	switch (hinfo->hash_version) {
-	case DX_HASH_LEGACY_UNSIGNED:
-		hash = dx_hack_hash_unsigned(name, len);
-		break;
-	case DX_HASH_LEGACY:
-		hash = dx_hack_hash_signed(name, len);
-		break;
-	case DX_HASH_HALF_MD4_UNSIGNED:
-		str2hashbuf = str2hashbuf_unsigned;
-	case DX_HASH_HALF_MD4:
-		p = name;
-		while (len > 0) {
-			(*str2hashbuf)(p, len, in, 8);
-			half_md4_transform(buf, in);
-			len -= 32;
-			p += 32;
-		}
-		minor_hash = buf[2];
-		hash = buf[1];
-		break;
-	case DX_HASH_TEA_UNSIGNED:
-		str2hashbuf = str2hashbuf_unsigned;
-	case DX_HASH_TEA:
-		p = name;
-		while (len > 0) {
-			(*str2hashbuf)(p, len, in, 4);
-			TEA_transform(buf, in);
-			len -= 16;
-			p += 16;
-		}
-		hash = buf[0];
-		minor_hash = buf[1];
-		break;
-	default:
-		hinfo->hash = 0;
-		return -1;
-	}
-	hash = hash & ~1;
-	if (hash == (EXT3_HTREE_EOF_32BIT << 1))
-		hash = (EXT3_HTREE_EOF_32BIT - 1) << 1;
-	hinfo->hash = hash;
-	hinfo->minor_hash = minor_hash;
-	return 0;
-}
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
deleted file mode 100644
index 3ad242e..0000000
--- a/fs/ext3/ialloc.c
+++ /dev/null
@@ -1,706 +0,0 @@
-/*
- *  linux/fs/ext3/ialloc.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  BSD ufs-inspired inode and directory allocation by
- *  Stephen Tweedie (sct@redhat.com), 1993
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- */
-
-#include <linux/quotaops.h>
-#include <linux/random.h>
-
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-
-/*
- * ialloc.c contains the inodes allocation and deallocation routines
- */
-
-/*
- * The free inodes are managed by bitmaps.  A file system contains several
- * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
- * block for inodes, N blocks for the inode table and data blocks.
- *
- * The file system contains group descriptors which are located after the
- * super block.  Each descriptor contains the number of the bitmap block and
- * the free blocks count in the block.
- */
-
-
-/*
- * Read the inode allocation bitmap for a given block_group, reading
- * into the specified slot in the superblock's bitmap cache.
- *
- * Return buffer_head of bitmap on success or NULL.
- */
-static struct buffer_head *
-read_inode_bitmap(struct super_block * sb, unsigned long block_group)
-{
-	struct ext3_group_desc *desc;
-	struct buffer_head *bh = NULL;
-
-	desc = ext3_get_group_desc(sb, block_group, NULL);
-	if (!desc)
-		goto error_out;
-
-	bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
-	if (!bh)
-		ext3_error(sb, "read_inode_bitmap",
-			    "Cannot read inode bitmap - "
-			    "block_group = %lu, inode_bitmap = %u",
-			    block_group, le32_to_cpu(desc->bg_inode_bitmap));
-error_out:
-	return bh;
-}
-
-/*
- * NOTE! When we get the inode, we're the only people
- * that have access to it, and as such there are no
- * race conditions we have to worry about. The inode
- * is not on the hash-lists, and it cannot be reached
- * through the filesystem because the directory entry
- * has been deleted earlier.
- *
- * HOWEVER: we must make sure that we get no aliases,
- * which means that we have to call "clear_inode()"
- * _before_ we mark the inode not in use in the inode
- * bitmaps. Otherwise a newly created file might use
- * the same inode number (not actually the same pointer
- * though), and then we'd have two inodes sharing the
- * same inode number and space on the harddisk.
- */
-void ext3_free_inode (handle_t *handle, struct inode * inode)
-{
-	struct super_block * sb = inode->i_sb;
-	int is_directory;
-	unsigned long ino;
-	struct buffer_head *bitmap_bh = NULL;
-	struct buffer_head *bh2;
-	unsigned long block_group;
-	unsigned long bit;
-	struct ext3_group_desc * gdp;
-	struct ext3_super_block * es;
-	struct ext3_sb_info *sbi;
-	int fatal = 0, err;
-
-	if (atomic_read(&inode->i_count) > 1) {
-		printk ("ext3_free_inode: inode has count=%d\n",
-					atomic_read(&inode->i_count));
-		return;
-	}
-	if (inode->i_nlink) {
-		printk ("ext3_free_inode: inode has nlink=%d\n",
-			inode->i_nlink);
-		return;
-	}
-	if (!sb) {
-		printk("ext3_free_inode: inode on nonexistent device\n");
-		return;
-	}
-	sbi = EXT3_SB(sb);
-
-	ino = inode->i_ino;
-	ext3_debug ("freeing inode %lu\n", ino);
-	trace_ext3_free_inode(inode);
-
-	is_directory = S_ISDIR(inode->i_mode);
-
-	es = EXT3_SB(sb)->s_es;
-	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
-		ext3_error (sb, "ext3_free_inode",
-			    "reserved or nonexistent inode %lu", ino);
-		goto error_return;
-	}
-	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
-	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
-	bitmap_bh = read_inode_bitmap(sb, block_group);
-	if (!bitmap_bh)
-		goto error_return;
-
-	BUFFER_TRACE(bitmap_bh, "get_write_access");
-	fatal = ext3_journal_get_write_access(handle, bitmap_bh);
-	if (fatal)
-		goto error_return;
-
-	/* Ok, now we can actually update the inode bitmaps.. */
-	if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
-					bit, bitmap_bh->b_data))
-		ext3_error (sb, "ext3_free_inode",
-			      "bit already cleared for inode %lu", ino);
-	else {
-		gdp = ext3_get_group_desc (sb, block_group, &bh2);
-
-		BUFFER_TRACE(bh2, "get_write_access");
-		fatal = ext3_journal_get_write_access(handle, bh2);
-		if (fatal) goto error_return;
-
-		if (gdp) {
-			spin_lock(sb_bgl_lock(sbi, block_group));
-			le16_add_cpu(&gdp->bg_free_inodes_count, 1);
-			if (is_directory)
-				le16_add_cpu(&gdp->bg_used_dirs_count, -1);
-			spin_unlock(sb_bgl_lock(sbi, block_group));
-			percpu_counter_inc(&sbi->s_freeinodes_counter);
-			if (is_directory)
-				percpu_counter_dec(&sbi->s_dirs_counter);
-
-		}
-		BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
-		err = ext3_journal_dirty_metadata(handle, bh2);
-		if (!fatal) fatal = err;
-	}
-	BUFFER_TRACE(bitmap_bh, "call ext3_journal_dirty_metadata");
-	err = ext3_journal_dirty_metadata(handle, bitmap_bh);
-	if (!fatal)
-		fatal = err;
-
-error_return:
-	brelse(bitmap_bh);
-	ext3_std_error(sb, fatal);
-}
-
-/*
- * Orlov's allocator for directories.
- *
- * We always try to spread first-level directories.
- *
- * If there are blockgroups with both free inodes and free blocks counts
- * not worse than average we return one with smallest directory count.
- * Otherwise we simply return a random group.
- *
- * For the rest rules look so:
- *
- * It's OK to put directory into a group unless
- * it has too many directories already (max_dirs) or
- * it has too few free inodes left (min_inodes) or
- * it has too few free blocks left (min_blocks).
- * Parent's group is preferred, if it doesn't satisfy these
- * conditions we search cyclically through the rest. If none
- * of the groups look good we just look for a group with more
- * free inodes than average (starting at parent's group).
- *
- * Debt is incremented each time we allocate a directory and decremented
- * when we allocate an inode, within 0--255.
- */
-
-static int find_group_orlov(struct super_block *sb, struct inode *parent)
-{
-	int parent_group = EXT3_I(parent)->i_block_group;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	int ngroups = sbi->s_groups_count;
-	int inodes_per_group = EXT3_INODES_PER_GROUP(sb);
-	unsigned int freei, avefreei;
-	ext3_fsblk_t freeb, avefreeb;
-	unsigned int ndirs;
-	int max_dirs, min_inodes;
-	ext3_grpblk_t min_blocks;
-	int group = -1, i;
-	struct ext3_group_desc *desc;
-
-	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
-	avefreei = freei / ngroups;
-	freeb = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
-	avefreeb = freeb / ngroups;
-	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
-
-	if ((parent == d_inode(sb->s_root)) ||
-	    (EXT3_I(parent)->i_flags & EXT3_TOPDIR_FL)) {
-		int best_ndir = inodes_per_group;
-		int best_group = -1;
-
-		group = prandom_u32();
-		parent_group = (unsigned)group % ngroups;
-		for (i = 0; i < ngroups; i++) {
-			group = (parent_group + i) % ngroups;
-			desc = ext3_get_group_desc (sb, group, NULL);
-			if (!desc || !desc->bg_free_inodes_count)
-				continue;
-			if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir)
-				continue;
-			if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei)
-				continue;
-			if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb)
-				continue;
-			best_group = group;
-			best_ndir = le16_to_cpu(desc->bg_used_dirs_count);
-		}
-		if (best_group >= 0)
-			return best_group;
-		goto fallback;
-	}
-
-	max_dirs = ndirs / ngroups + inodes_per_group / 16;
-	min_inodes = avefreei - inodes_per_group / 4;
-	min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4;
-
-	for (i = 0; i < ngroups; i++) {
-		group = (parent_group + i) % ngroups;
-		desc = ext3_get_group_desc (sb, group, NULL);
-		if (!desc || !desc->bg_free_inodes_count)
-			continue;
-		if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs)
-			continue;
-		if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes)
-			continue;
-		if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks)
-			continue;
-		return group;
-	}
-
-fallback:
-	for (i = 0; i < ngroups; i++) {
-		group = (parent_group + i) % ngroups;
-		desc = ext3_get_group_desc (sb, group, NULL);
-		if (!desc || !desc->bg_free_inodes_count)
-			continue;
-		if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei)
-			return group;
-	}
-
-	if (avefreei) {
-		/*
-		 * The free-inodes counter is approximate, and for really small
-		 * filesystems the above test can fail to find any blockgroups
-		 */
-		avefreei = 0;
-		goto fallback;
-	}
-
-	return -1;
-}
-
-static int find_group_other(struct super_block *sb, struct inode *parent)
-{
-	int parent_group = EXT3_I(parent)->i_block_group;
-	int ngroups = EXT3_SB(sb)->s_groups_count;
-	struct ext3_group_desc *desc;
-	int group, i;
-
-	/*
-	 * Try to place the inode in its parent directory
-	 */
-	group = parent_group;
-	desc = ext3_get_group_desc (sb, group, NULL);
-	if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
-			le16_to_cpu(desc->bg_free_blocks_count))
-		return group;
-
-	/*
-	 * We're going to place this inode in a different blockgroup from its
-	 * parent.  We want to cause files in a common directory to all land in
-	 * the same blockgroup.  But we want files which are in a different
-	 * directory which shares a blockgroup with our parent to land in a
-	 * different blockgroup.
-	 *
-	 * So add our directory's i_ino into the starting point for the hash.
-	 */
-	group = (group + parent->i_ino) % ngroups;
-
-	/*
-	 * Use a quadratic hash to find a group with a free inode and some free
-	 * blocks.
-	 */
-	for (i = 1; i < ngroups; i <<= 1) {
-		group += i;
-		if (group >= ngroups)
-			group -= ngroups;
-		desc = ext3_get_group_desc (sb, group, NULL);
-		if (desc && le16_to_cpu(desc->bg_free_inodes_count) &&
-				le16_to_cpu(desc->bg_free_blocks_count))
-			return group;
-	}
-
-	/*
-	 * That failed: try linear search for a free inode, even if that group
-	 * has no free blocks.
-	 */
-	group = parent_group;
-	for (i = 0; i < ngroups; i++) {
-		if (++group >= ngroups)
-			group = 0;
-		desc = ext3_get_group_desc (sb, group, NULL);
-		if (desc && le16_to_cpu(desc->bg_free_inodes_count))
-			return group;
-	}
-
-	return -1;
-}
-
-/*
- * There are two policies for allocating an inode.  If the new inode is
- * a directory, then a forward search is made for a block group with both
- * free space and a low directory-to-inode ratio; if that fails, then of
- * the groups with above-average free space, that group with the fewest
- * directories already is chosen.
- *
- * For other inodes, search forward from the parent directory's block
- * group to find a free inode.
- */
-struct inode *ext3_new_inode(handle_t *handle, struct inode * dir,
-			     const struct qstr *qstr, umode_t mode)
-{
-	struct super_block *sb;
-	struct buffer_head *bitmap_bh = NULL;
-	struct buffer_head *bh2;
-	int group;
-	unsigned long ino = 0;
-	struct inode * inode;
-	struct ext3_group_desc * gdp = NULL;
-	struct ext3_super_block * es;
-	struct ext3_inode_info *ei;
-	struct ext3_sb_info *sbi;
-	int err = 0;
-	struct inode *ret;
-	int i;
-
-	/* Cannot create files in a deleted directory */
-	if (!dir || !dir->i_nlink)
-		return ERR_PTR(-EPERM);
-
-	sb = dir->i_sb;
-	trace_ext3_request_inode(dir, mode);
-	inode = new_inode(sb);
-	if (!inode)
-		return ERR_PTR(-ENOMEM);
-	ei = EXT3_I(inode);
-
-	sbi = EXT3_SB(sb);
-	es = sbi->s_es;
-	if (S_ISDIR(mode))
-		group = find_group_orlov(sb, dir);
-	else
-		group = find_group_other(sb, dir);
-
-	err = -ENOSPC;
-	if (group == -1)
-		goto out;
-
-	for (i = 0; i < sbi->s_groups_count; i++) {
-		err = -EIO;
-
-		gdp = ext3_get_group_desc(sb, group, &bh2);
-		if (!gdp)
-			goto fail;
-
-		brelse(bitmap_bh);
-		bitmap_bh = read_inode_bitmap(sb, group);
-		if (!bitmap_bh)
-			goto fail;
-
-		ino = 0;
-
-repeat_in_this_group:
-		ino = ext3_find_next_zero_bit((unsigned long *)
-				bitmap_bh->b_data, EXT3_INODES_PER_GROUP(sb), ino);
-		if (ino < EXT3_INODES_PER_GROUP(sb)) {
-
-			BUFFER_TRACE(bitmap_bh, "get_write_access");
-			err = ext3_journal_get_write_access(handle, bitmap_bh);
-			if (err)
-				goto fail;
-
-			if (!ext3_set_bit_atomic(sb_bgl_lock(sbi, group),
-						ino, bitmap_bh->b_data)) {
-				/* we won it */
-				BUFFER_TRACE(bitmap_bh,
-					"call ext3_journal_dirty_metadata");
-				err = ext3_journal_dirty_metadata(handle,
-								bitmap_bh);
-				if (err)
-					goto fail;
-				goto got;
-			}
-			/* we lost it */
-			journal_release_buffer(handle, bitmap_bh);
-
-			if (++ino < EXT3_INODES_PER_GROUP(sb))
-				goto repeat_in_this_group;
-		}
-
-		/*
-		 * This case is possible in concurrent environment.  It is very
-		 * rare.  We cannot repeat the find_group_xxx() call because
-		 * that will simply return the same blockgroup, because the
-		 * group descriptor metadata has not yet been updated.
-		 * So we just go onto the next blockgroup.
-		 */
-		if (++group == sbi->s_groups_count)
-			group = 0;
-	}
-	err = -ENOSPC;
-	goto out;
-
-got:
-	ino += group * EXT3_INODES_PER_GROUP(sb) + 1;
-	if (ino < EXT3_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
-		ext3_error (sb, "ext3_new_inode",
-			    "reserved inode or inode > inodes count - "
-			    "block_group = %d, inode=%lu", group, ino);
-		err = -EIO;
-		goto fail;
-	}
-
-	BUFFER_TRACE(bh2, "get_write_access");
-	err = ext3_journal_get_write_access(handle, bh2);
-	if (err) goto fail;
-	spin_lock(sb_bgl_lock(sbi, group));
-	le16_add_cpu(&gdp->bg_free_inodes_count, -1);
-	if (S_ISDIR(mode)) {
-		le16_add_cpu(&gdp->bg_used_dirs_count, 1);
-	}
-	spin_unlock(sb_bgl_lock(sbi, group));
-	BUFFER_TRACE(bh2, "call ext3_journal_dirty_metadata");
-	err = ext3_journal_dirty_metadata(handle, bh2);
-	if (err) goto fail;
-
-	percpu_counter_dec(&sbi->s_freeinodes_counter);
-	if (S_ISDIR(mode))
-		percpu_counter_inc(&sbi->s_dirs_counter);
-
-
-	if (test_opt(sb, GRPID)) {
-		inode->i_mode = mode;
-		inode->i_uid = current_fsuid();
-		inode->i_gid = dir->i_gid;
-	} else
-		inode_init_owner(inode, dir, mode);
-
-	inode->i_ino = ino;
-	/* This is the optimal IO size (for stat), not the fs block size */
-	inode->i_blocks = 0;
-	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
-
-	memset(ei->i_data, 0, sizeof(ei->i_data));
-	ei->i_dir_start_lookup = 0;
-	ei->i_disksize = 0;
-
-	ei->i_flags =
-		ext3_mask_flags(mode, EXT3_I(dir)->i_flags & EXT3_FL_INHERITED);
-#ifdef EXT3_FRAGMENTS
-	ei->i_faddr = 0;
-	ei->i_frag_no = 0;
-	ei->i_frag_size = 0;
-#endif
-	ei->i_file_acl = 0;
-	ei->i_dir_acl = 0;
-	ei->i_dtime = 0;
-	ei->i_block_alloc_info = NULL;
-	ei->i_block_group = group;
-
-	ext3_set_inode_flags(inode);
-	if (IS_DIRSYNC(inode))
-		handle->h_sync = 1;
-	if (insert_inode_locked(inode) < 0) {
-		/*
-		 * Likely a bitmap corruption causing inode to be allocated
-		 * twice.
-		 */
-		err = -EIO;
-		goto fail;
-	}
-	spin_lock(&sbi->s_next_gen_lock);
-	inode->i_generation = sbi->s_next_generation++;
-	spin_unlock(&sbi->s_next_gen_lock);
-
-	ei->i_state_flags = 0;
-	ext3_set_inode_state(inode, EXT3_STATE_NEW);
-
-	/* See comment in ext3_iget for explanation */
-	if (ino >= EXT3_FIRST_INO(sb) + 1 &&
-	    EXT3_INODE_SIZE(sb) > EXT3_GOOD_OLD_INODE_SIZE) {
-		ei->i_extra_isize =
-			sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE;
-	} else {
-		ei->i_extra_isize = 0;
-	}
-
-	ret = inode;
-	dquot_initialize(inode);
-	err = dquot_alloc_inode(inode);
-	if (err)
-		goto fail_drop;
-
-	err = ext3_init_acl(handle, inode, dir);
-	if (err)
-		goto fail_free_drop;
-
-	err = ext3_init_security(handle, inode, dir, qstr);
-	if (err)
-		goto fail_free_drop;
-
-	err = ext3_mark_inode_dirty(handle, inode);
-	if (err) {
-		ext3_std_error(sb, err);
-		goto fail_free_drop;
-	}
-
-	ext3_debug("allocating inode %lu\n", inode->i_ino);
-	trace_ext3_allocate_inode(inode, dir, mode);
-	goto really_out;
-fail:
-	ext3_std_error(sb, err);
-out:
-	iput(inode);
-	ret = ERR_PTR(err);
-really_out:
-	brelse(bitmap_bh);
-	return ret;
-
-fail_free_drop:
-	dquot_free_inode(inode);
-
-fail_drop:
-	dquot_drop(inode);
-	inode->i_flags |= S_NOQUOTA;
-	clear_nlink(inode);
-	unlock_new_inode(inode);
-	iput(inode);
-	brelse(bitmap_bh);
-	return ERR_PTR(err);
-}
-
-/* Verify that we are loading a valid orphan from disk */
-struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
-{
-	unsigned long max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count);
-	unsigned long block_group;
-	int bit;
-	struct buffer_head *bitmap_bh;
-	struct inode *inode = NULL;
-	long err = -EIO;
-
-	/* Error cases - e2fsck has already cleaned up for us */
-	if (ino > max_ino) {
-		ext3_warning(sb, __func__,
-			     "bad orphan ino %lu!  e2fsck was run?", ino);
-		goto error;
-	}
-
-	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
-	bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
-	bitmap_bh = read_inode_bitmap(sb, block_group);
-	if (!bitmap_bh) {
-		ext3_warning(sb, __func__,
-			     "inode bitmap error for orphan %lu", ino);
-		goto error;
-	}
-
-	/* Having the inode bit set should be a 100% indicator that this
-	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
-	 * inodes that were being truncated, so we can't check i_nlink==0.
-	 */
-	if (!ext3_test_bit(bit, bitmap_bh->b_data))
-		goto bad_orphan;
-
-	inode = ext3_iget(sb, ino);
-	if (IS_ERR(inode))
-		goto iget_failed;
-
-	/*
-	 * If the orphans has i_nlinks > 0 then it should be able to be
-	 * truncated, otherwise it won't be removed from the orphan list
-	 * during processing and an infinite loop will result.
-	 */
-	if (inode->i_nlink && !ext3_can_truncate(inode))
-		goto bad_orphan;
-
-	if (NEXT_ORPHAN(inode) > max_ino)
-		goto bad_orphan;
-	brelse(bitmap_bh);
-	return inode;
-
-iget_failed:
-	err = PTR_ERR(inode);
-	inode = NULL;
-bad_orphan:
-	ext3_warning(sb, __func__,
-		     "bad orphan inode %lu!  e2fsck was run?", ino);
-	printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n",
-	       bit, (unsigned long long)bitmap_bh->b_blocknr,
-	       ext3_test_bit(bit, bitmap_bh->b_data));
-	printk(KERN_NOTICE "inode=%p\n", inode);
-	if (inode) {
-		printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
-		       is_bad_inode(inode));
-		printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
-		       NEXT_ORPHAN(inode));
-		printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
-		printk(KERN_NOTICE "i_nlink=%u\n", inode->i_nlink);
-		/* Avoid freeing blocks if we got a bad deleted inode */
-		if (inode->i_nlink == 0)
-			inode->i_blocks = 0;
-		iput(inode);
-	}
-	brelse(bitmap_bh);
-error:
-	return ERR_PTR(err);
-}
-
-unsigned long ext3_count_free_inodes (struct super_block * sb)
-{
-	unsigned long desc_count;
-	struct ext3_group_desc *gdp;
-	int i;
-#ifdef EXT3FS_DEBUG
-	struct ext3_super_block *es;
-	unsigned long bitmap_count, x;
-	struct buffer_head *bitmap_bh = NULL;
-
-	es = EXT3_SB(sb)->s_es;
-	desc_count = 0;
-	bitmap_count = 0;
-	gdp = NULL;
-	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
-		gdp = ext3_get_group_desc (sb, i, NULL);
-		if (!gdp)
-			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
-		brelse(bitmap_bh);
-		bitmap_bh = read_inode_bitmap(sb, i);
-		if (!bitmap_bh)
-			continue;
-
-		x = ext3_count_free(bitmap_bh, EXT3_INODES_PER_GROUP(sb) / 8);
-		printk("group %d: stored = %d, counted = %lu\n",
-			i, le16_to_cpu(gdp->bg_free_inodes_count), x);
-		bitmap_count += x;
-	}
-	brelse(bitmap_bh);
-	printk("ext3_count_free_inodes: stored = %u, computed = %lu, %lu\n",
-		le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
-	return desc_count;
-#else
-	desc_count = 0;
-	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
-		gdp = ext3_get_group_desc (sb, i, NULL);
-		if (!gdp)
-			continue;
-		desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
-		cond_resched();
-	}
-	return desc_count;
-#endif
-}
-
-/* Called at mount-time, super-block is locked */
-unsigned long ext3_count_dirs (struct super_block * sb)
-{
-	unsigned long count = 0;
-	int i;
-
-	for (i = 0; i < EXT3_SB(sb)->s_groups_count; i++) {
-		struct ext3_group_desc *gdp = ext3_get_group_desc (sb, i, NULL);
-		if (!gdp)
-			continue;
-		count += le16_to_cpu(gdp->bg_used_dirs_count);
-	}
-	return count;
-}
-
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
deleted file mode 100644
index 6c7e546..0000000
--- a/fs/ext3/inode.c
+++ /dev/null
@@ -1,3574 +0,0 @@
-/*
- *  linux/fs/ext3/inode.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/inode.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  Goal-directed block allocation by Stephen Tweedie
- *	(sct@redhat.com), 1993, 1998
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- *  64-bit file support on 64-bit platforms by Jakub Jelinek
- *	(jj@sunsite.ms.mff.cuni.cz)
- *
- *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
- */
-
-#include <linux/highuid.h>
-#include <linux/quotaops.h>
-#include <linux/writeback.h>
-#include <linux/mpage.h>
-#include <linux/namei.h>
-#include <linux/uio.h>
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-
-static int ext3_writepage_trans_blocks(struct inode *inode);
-static int ext3_block_truncate_page(struct inode *inode, loff_t from);
-
-/*
- * Test whether an inode is a fast symlink.
- */
-static int ext3_inode_is_fast_symlink(struct inode *inode)
-{
-	int ea_blocks = EXT3_I(inode)->i_file_acl ?
-		(inode->i_sb->s_blocksize >> 9) : 0;
-
-	return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
-}
-
-/*
- * The ext3 forget function must perform a revoke if we are freeing data
- * which has been journaled.  Metadata (eg. indirect blocks) must be
- * revoked in all cases.
- *
- * "bh" may be NULL: a metadata block may have been freed from memory
- * but there may still be a record of it in the journal, and that record
- * still needs to be revoked.
- */
-int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
-			struct buffer_head *bh, ext3_fsblk_t blocknr)
-{
-	int err;
-
-	might_sleep();
-
-	trace_ext3_forget(inode, is_metadata, blocknr);
-	BUFFER_TRACE(bh, "enter");
-
-	jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
-		  "data mode %lx\n",
-		  bh, is_metadata, inode->i_mode,
-		  test_opt(inode->i_sb, DATA_FLAGS));
-
-	/* Never use the revoke function if we are doing full data
-	 * journaling: there is no need to, and a V1 superblock won't
-	 * support it.  Otherwise, only skip the revoke on un-journaled
-	 * data blocks. */
-
-	if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
-	    (!is_metadata && !ext3_should_journal_data(inode))) {
-		if (bh) {
-			BUFFER_TRACE(bh, "call journal_forget");
-			return ext3_journal_forget(handle, bh);
-		}
-		return 0;
-	}
-
-	/*
-	 * data!=journal && (is_metadata || should_journal_data(inode))
-	 */
-	BUFFER_TRACE(bh, "call ext3_journal_revoke");
-	err = ext3_journal_revoke(handle, blocknr, bh);
-	if (err)
-		ext3_abort(inode->i_sb, __func__,
-			   "error %d when attempting revoke", err);
-	BUFFER_TRACE(bh, "exit");
-	return err;
-}
-
-/*
- * Work out how many blocks we need to proceed with the next chunk of a
- * truncate transaction.
- */
-static unsigned long blocks_for_truncate(struct inode *inode)
-{
-	unsigned long needed;
-
-	needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
-
-	/* Give ourselves just enough room to cope with inodes in which
-	 * i_blocks is corrupt: we've seen disk corruptions in the past
-	 * which resulted in random data in an inode which looked enough
-	 * like a regular file for ext3 to try to delete it.  Things
-	 * will go a bit crazy if that happens, but at least we should
-	 * try not to panic the whole kernel. */
-	if (needed < 2)
-		needed = 2;
-
-	/* But we need to bound the transaction so we don't overflow the
-	 * journal. */
-	if (needed > EXT3_MAX_TRANS_DATA)
-		needed = EXT3_MAX_TRANS_DATA;
-
-	return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
-}
-
-/*
- * Truncate transactions can be complex and absolutely huge.  So we need to
- * be able to restart the transaction at a conventient checkpoint to make
- * sure we don't overflow the journal.
- *
- * start_transaction gets us a new handle for a truncate transaction,
- * and extend_transaction tries to extend the existing one a bit.  If
- * extend fails, we need to propagate the failure up and restart the
- * transaction in the top-level truncate loop. --sct
- */
-static handle_t *start_transaction(struct inode *inode)
-{
-	handle_t *result;
-
-	result = ext3_journal_start(inode, blocks_for_truncate(inode));
-	if (!IS_ERR(result))
-		return result;
-
-	ext3_std_error(inode->i_sb, PTR_ERR(result));
-	return result;
-}
-
-/*
- * Try to extend this transaction for the purposes of truncation.
- *
- * Returns 0 if we managed to create more room.  If we can't create more
- * room, and the transaction must be restarted we return 1.
- */
-static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
-{
-	if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
-		return 0;
-	if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
-		return 0;
-	return 1;
-}
-
-/*
- * Restart the transaction associated with *handle.  This does a commit,
- * so before we call here everything must be consistently dirtied against
- * this transaction.
- */
-static int truncate_restart_transaction(handle_t *handle, struct inode *inode)
-{
-	int ret;
-
-	jbd_debug(2, "restarting handle %p\n", handle);
-	/*
-	 * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle
-	 * At this moment, get_block can be called only for blocks inside
-	 * i_size since page cache has been already dropped and writes are
-	 * blocked by i_mutex. So we can safely drop the truncate_mutex.
-	 */
-	mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-	ret = ext3_journal_restart(handle, blocks_for_truncate(inode));
-	mutex_lock(&EXT3_I(inode)->truncate_mutex);
-	return ret;
-}
-
-/*
- * Called at inode eviction from icache
- */
-void ext3_evict_inode (struct inode *inode)
-{
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct ext3_block_alloc_info *rsv;
-	handle_t *handle;
-	int want_delete = 0;
-
-	trace_ext3_evict_inode(inode);
-	if (!inode->i_nlink && !is_bad_inode(inode)) {
-		dquot_initialize(inode);
-		want_delete = 1;
-	}
-
-	/*
-	 * When journalling data dirty buffers are tracked only in the journal.
-	 * So although mm thinks everything is clean and ready for reaping the
-	 * inode might still have some pages to write in the running
-	 * transaction or waiting to be checkpointed. Thus calling
-	 * journal_invalidatepage() (via truncate_inode_pages()) to discard
-	 * these buffers can cause data loss. Also even if we did not discard
-	 * these buffers, we would have no way to find them after the inode
-	 * is reaped and thus user could see stale data if he tries to read
-	 * them before the transaction is checkpointed. So be careful and
-	 * force everything to disk here... We use ei->i_datasync_tid to
-	 * store the newest transaction containing inode's data.
-	 *
-	 * Note that directories do not have this problem because they don't
-	 * use page cache.
-	 *
-	 * The s_journal check handles the case when ext3_get_journal() fails
-	 * and puts the journal inode.
-	 */
-	if (inode->i_nlink && ext3_should_journal_data(inode) &&
-	    EXT3_SB(inode->i_sb)->s_journal &&
-	    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
-	    inode->i_ino != EXT3_JOURNAL_INO) {
-		tid_t commit_tid = atomic_read(&ei->i_datasync_tid);
-		journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
-
-		log_start_commit(journal, commit_tid);
-		log_wait_commit(journal, commit_tid);
-		filemap_write_and_wait(&inode->i_data);
-	}
-	truncate_inode_pages_final(&inode->i_data);
-
-	ext3_discard_reservation(inode);
-	rsv = ei->i_block_alloc_info;
-	ei->i_block_alloc_info = NULL;
-	if (unlikely(rsv))
-		kfree(rsv);
-
-	if (!want_delete)
-		goto no_delete;
-
-	handle = start_transaction(inode);
-	if (IS_ERR(handle)) {
-		/*
-		 * If we're going to skip the normal cleanup, we still need to
-		 * make sure that the in-core orphan linked list is properly
-		 * cleaned up.
-		 */
-		ext3_orphan_del(NULL, inode);
-		goto no_delete;
-	}
-
-	if (IS_SYNC(inode))
-		handle->h_sync = 1;
-	inode->i_size = 0;
-	if (inode->i_blocks)
-		ext3_truncate(inode);
-	/*
-	 * Kill off the orphan record created when the inode lost the last
-	 * link.  Note that ext3_orphan_del() has to be able to cope with the
-	 * deletion of a non-existent orphan - ext3_truncate() could
-	 * have removed the record.
-	 */
-	ext3_orphan_del(handle, inode);
-	ei->i_dtime = get_seconds();
-
-	/*
-	 * One subtle ordering requirement: if anything has gone wrong
-	 * (transaction abort, IO errors, whatever), then we can still
-	 * do these next steps (the fs will already have been marked as
-	 * having errors), but we can't free the inode if the mark_dirty
-	 * fails.
-	 */
-	if (ext3_mark_inode_dirty(handle, inode)) {
-		/* If that failed, just dquot_drop() and be done with that */
-		dquot_drop(inode);
-		clear_inode(inode);
-	} else {
-		ext3_xattr_delete_inode(handle, inode);
-		dquot_free_inode(inode);
-		dquot_drop(inode);
-		clear_inode(inode);
-		ext3_free_inode(handle, inode);
-	}
-	ext3_journal_stop(handle);
-	return;
-no_delete:
-	clear_inode(inode);
-	dquot_drop(inode);
-}
-
-typedef struct {
-	__le32	*p;
-	__le32	key;
-	struct buffer_head *bh;
-} Indirect;
-
-static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
-{
-	p->key = *(p->p = v);
-	p->bh = bh;
-}
-
-static int verify_chain(Indirect *from, Indirect *to)
-{
-	while (from <= to && from->key == *from->p)
-		from++;
-	return (from > to);
-}
-
-/**
- *	ext3_block_to_path - parse the block number into array of offsets
- *	@inode: inode in question (we are only interested in its superblock)
- *	@i_block: block number to be parsed
- *	@offsets: array to store the offsets in
- *      @boundary: set this non-zero if the referred-to block is likely to be
- *             followed (on disk) by an indirect block.
- *
- *	To store the locations of file's data ext3 uses a data structure common
- *	for UNIX filesystems - tree of pointers anchored in the inode, with
- *	data blocks at leaves and indirect blocks in intermediate nodes.
- *	This function translates the block number into path in that tree -
- *	return value is the path length and @offsets[n] is the offset of
- *	pointer to (n+1)th node in the nth one. If @block is out of range
- *	(negative or too large) warning is printed and zero returned.
- *
- *	Note: function doesn't find node addresses, so no IO is needed. All
- *	we need to know is the capacity of indirect blocks (taken from the
- *	inode->i_sb).
- */
-
-/*
- * Portability note: the last comparison (check that we fit into triple
- * indirect block) is spelled differently, because otherwise on an
- * architecture with 32-bit longs and 8Kb pages we might get into trouble
- * if our filesystem had 8Kb blocks. We might use long long, but that would
- * kill us on x86. Oh, well, at least the sign propagation does not matter -
- * i_block would have to be negative in the very beginning, so we would not
- * get there at all.
- */
-
-static int ext3_block_to_path(struct inode *inode,
-			long i_block, int offsets[4], int *boundary)
-{
-	int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
-	int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
-	const long direct_blocks = EXT3_NDIR_BLOCKS,
-		indirect_blocks = ptrs,
-		double_blocks = (1 << (ptrs_bits * 2));
-	int n = 0;
-	int final = 0;
-
-	if (i_block < 0) {
-		ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
-	} else if (i_block < direct_blocks) {
-		offsets[n++] = i_block;
-		final = direct_blocks;
-	} else if ( (i_block -= direct_blocks) < indirect_blocks) {
-		offsets[n++] = EXT3_IND_BLOCK;
-		offsets[n++] = i_block;
-		final = ptrs;
-	} else if ((i_block -= indirect_blocks) < double_blocks) {
-		offsets[n++] = EXT3_DIND_BLOCK;
-		offsets[n++] = i_block >> ptrs_bits;
-		offsets[n++] = i_block & (ptrs - 1);
-		final = ptrs;
-	} else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
-		offsets[n++] = EXT3_TIND_BLOCK;
-		offsets[n++] = i_block >> (ptrs_bits * 2);
-		offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
-		offsets[n++] = i_block & (ptrs - 1);
-		final = ptrs;
-	} else {
-		ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
-	}
-	if (boundary)
-		*boundary = final - 1 - (i_block & (ptrs - 1));
-	return n;
-}
-
-/**
- *	ext3_get_branch - read the chain of indirect blocks leading to data
- *	@inode: inode in question
- *	@depth: depth of the chain (1 - direct pointer, etc.)
- *	@offsets: offsets of pointers in inode/indirect blocks
- *	@chain: place to store the result
- *	@err: here we store the error value
- *
- *	Function fills the array of triples <key, p, bh> and returns %NULL
- *	if everything went OK or the pointer to the last filled triple
- *	(incomplete one) otherwise. Upon the return chain[i].key contains
- *	the number of (i+1)-th block in the chain (as it is stored in memory,
- *	i.e. little-endian 32-bit), chain[i].p contains the address of that
- *	number (it points into struct inode for i==0 and into the bh->b_data
- *	for i>0) and chain[i].bh points to the buffer_head of i-th indirect
- *	block for i>0 and NULL for i==0. In other words, it holds the block
- *	numbers of the chain, addresses they were taken from (and where we can
- *	verify that chain did not change) and buffer_heads hosting these
- *	numbers.
- *
- *	Function stops when it stumbles upon zero pointer (absent block)
- *		(pointer to last triple returned, *@err == 0)
- *	or when it gets an IO error reading an indirect block
- *		(ditto, *@err == -EIO)
- *	or when it notices that chain had been changed while it was reading
- *		(ditto, *@err == -EAGAIN)
- *	or when it reads all @depth-1 indirect blocks successfully and finds
- *	the whole chain, all way to the data (returns %NULL, *err == 0).
- */
-static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
-				 Indirect chain[4], int *err)
-{
-	struct super_block *sb = inode->i_sb;
-	Indirect *p = chain;
-	struct buffer_head *bh;
-
-	*err = 0;
-	/* i_data is not going away, no lock needed */
-	add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
-	if (!p->key)
-		goto no_block;
-	while (--depth) {
-		bh = sb_bread(sb, le32_to_cpu(p->key));
-		if (!bh)
-			goto failure;
-		/* Reader: pointers */
-		if (!verify_chain(chain, p))
-			goto changed;
-		add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
-		/* Reader: end */
-		if (!p->key)
-			goto no_block;
-	}
-	return NULL;
-
-changed:
-	brelse(bh);
-	*err = -EAGAIN;
-	goto no_block;
-failure:
-	*err = -EIO;
-no_block:
-	return p;
-}
-
-/**
- *	ext3_find_near - find a place for allocation with sufficient locality
- *	@inode: owner
- *	@ind: descriptor of indirect block.
- *
- *	This function returns the preferred place for block allocation.
- *	It is used when heuristic for sequential allocation fails.
- *	Rules are:
- *	  + if there is a block to the left of our position - allocate near it.
- *	  + if pointer will live in indirect block - allocate near that block.
- *	  + if pointer will live in inode - allocate in the same
- *	    cylinder group.
- *
- * In the latter case we colour the starting block by the callers PID to
- * prevent it from clashing with concurrent allocations for a different inode
- * in the same block group.   The PID is used here so that functionally related
- * files will be close-by on-disk.
- *
- *	Caller must make sure that @ind is valid and will stay that way.
- */
-static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
-{
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	__le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
-	__le32 *p;
-	ext3_fsblk_t bg_start;
-	ext3_grpblk_t colour;
-
-	/* Try to find previous block */
-	for (p = ind->p - 1; p >= start; p--) {
-		if (*p)
-			return le32_to_cpu(*p);
-	}
-
-	/* No such thing, so let's try location of indirect block */
-	if (ind->bh)
-		return ind->bh->b_blocknr;
-
-	/*
-	 * It is going to be referred to from the inode itself? OK, just put it
-	 * into the same cylinder group then.
-	 */
-	bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
-	colour = (current->pid % 16) *
-			(EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
-	return bg_start + colour;
-}
-
-/**
- *	ext3_find_goal - find a preferred place for allocation.
- *	@inode: owner
- *	@block:  block we want
- *	@partial: pointer to the last triple within a chain
- *
- *	Normally this function find the preferred place for block allocation,
- *	returns it.
- */
-
-static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
-				   Indirect *partial)
-{
-	struct ext3_block_alloc_info *block_i;
-
-	block_i =  EXT3_I(inode)->i_block_alloc_info;
-
-	/*
-	 * try the heuristic for sequential allocation,
-	 * failing that at least try to get decent locality.
-	 */
-	if (block_i && (block == block_i->last_alloc_logical_block + 1)
-		&& (block_i->last_alloc_physical_block != 0)) {
-		return block_i->last_alloc_physical_block + 1;
-	}
-
-	return ext3_find_near(inode, partial);
-}
-
-/**
- *	ext3_blks_to_allocate - Look up the block map and count the number
- *	of direct blocks need to be allocated for the given branch.
- *
- *	@branch: chain of indirect blocks
- *	@k: number of blocks need for indirect blocks
- *	@blks: number of data blocks to be mapped.
- *	@blocks_to_boundary:  the offset in the indirect block
- *
- *	return the total number of blocks to be allocate, including the
- *	direct and indirect blocks.
- */
-static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
-		int blocks_to_boundary)
-{
-	unsigned long count = 0;
-
-	/*
-	 * Simple case, [t,d]Indirect block(s) has not allocated yet
-	 * then it's clear blocks on that path have not allocated
-	 */
-	if (k > 0) {
-		/* right now we don't handle cross boundary allocation */
-		if (blks < blocks_to_boundary + 1)
-			count += blks;
-		else
-			count += blocks_to_boundary + 1;
-		return count;
-	}
-
-	count++;
-	while (count < blks && count <= blocks_to_boundary &&
-		le32_to_cpu(*(branch[0].p + count)) == 0) {
-		count++;
-	}
-	return count;
-}
-
-/**
- *	ext3_alloc_blocks - multiple allocate blocks needed for a branch
- *	@handle: handle for this transaction
- *	@inode: owner
- *	@goal: preferred place for allocation
- *	@indirect_blks: the number of blocks need to allocate for indirect
- *			blocks
- *	@blks:	number of blocks need to allocated for direct blocks
- *	@new_blocks: on return it will store the new block numbers for
- *	the indirect blocks(if needed) and the first direct block,
- *	@err: here we store the error value
- *
- *	return the number of direct blocks allocated
- */
-static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
-			ext3_fsblk_t goal, int indirect_blks, int blks,
-			ext3_fsblk_t new_blocks[4], int *err)
-{
-	int target, i;
-	unsigned long count = 0;
-	int index = 0;
-	ext3_fsblk_t current_block = 0;
-	int ret = 0;
-
-	/*
-	 * Here we try to allocate the requested multiple blocks at once,
-	 * on a best-effort basis.
-	 * To build a branch, we should allocate blocks for
-	 * the indirect blocks(if not allocated yet), and at least
-	 * the first direct block of this branch.  That's the
-	 * minimum number of blocks need to allocate(required)
-	 */
-	target = blks + indirect_blks;
-
-	while (1) {
-		count = target;
-		/* allocating blocks for indirect blocks and direct blocks */
-		current_block = ext3_new_blocks(handle,inode,goal,&count,err);
-		if (*err)
-			goto failed_out;
-
-		target -= count;
-		/* allocate blocks for indirect blocks */
-		while (index < indirect_blks && count) {
-			new_blocks[index++] = current_block++;
-			count--;
-		}
-
-		if (count > 0)
-			break;
-	}
-
-	/* save the new block number for the first direct block */
-	new_blocks[index] = current_block;
-
-	/* total number of blocks allocated for direct blocks */
-	ret = count;
-	*err = 0;
-	return ret;
-failed_out:
-	for (i = 0; i <index; i++)
-		ext3_free_blocks(handle, inode, new_blocks[i], 1);
-	return ret;
-}
-
-/**
- *	ext3_alloc_branch - allocate and set up a chain of blocks.
- *	@handle: handle for this transaction
- *	@inode: owner
- *	@indirect_blks: number of allocated indirect blocks
- *	@blks: number of allocated direct blocks
- *	@goal: preferred place for allocation
- *	@offsets: offsets (in the blocks) to store the pointers to next.
- *	@branch: place to store the chain in.
- *
- *	This function allocates blocks, zeroes out all but the last one,
- *	links them into chain and (if we are synchronous) writes them to disk.
- *	In other words, it prepares a branch that can be spliced onto the
- *	inode. It stores the information about that chain in the branch[], in
- *	the same format as ext3_get_branch() would do. We are calling it after
- *	we had read the existing part of chain and partial points to the last
- *	triple of that (one with zero ->key). Upon the exit we have the same
- *	picture as after the successful ext3_get_block(), except that in one
- *	place chain is disconnected - *branch->p is still zero (we did not
- *	set the last link), but branch->key contains the number that should
- *	be placed into *branch->p to fill that gap.
- *
- *	If allocation fails we free all blocks we've allocated (and forget
- *	their buffer_heads) and return the error value the from failed
- *	ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
- *	as described above and return 0.
- */
-static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
-			int indirect_blks, int *blks, ext3_fsblk_t goal,
-			int *offsets, Indirect *branch)
-{
-	int blocksize = inode->i_sb->s_blocksize;
-	int i, n = 0;
-	int err = 0;
-	struct buffer_head *bh;
-	int num;
-	ext3_fsblk_t new_blocks[4];
-	ext3_fsblk_t current_block;
-
-	num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
-				*blks, new_blocks, &err);
-	if (err)
-		return err;
-
-	branch[0].key = cpu_to_le32(new_blocks[0]);
-	/*
-	 * metadata blocks and data blocks are allocated.
-	 */
-	for (n = 1; n <= indirect_blks;  n++) {
-		/*
-		 * Get buffer_head for parent block, zero it out
-		 * and set the pointer to new one, then send
-		 * parent to disk.
-		 */
-		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
-		if (unlikely(!bh)) {
-			err = -ENOMEM;
-			goto failed;
-		}
-		branch[n].bh = bh;
-		lock_buffer(bh);
-		BUFFER_TRACE(bh, "call get_create_access");
-		err = ext3_journal_get_create_access(handle, bh);
-		if (err) {
-			unlock_buffer(bh);
-			brelse(bh);
-			goto failed;
-		}
-
-		memset(bh->b_data, 0, blocksize);
-		branch[n].p = (__le32 *) bh->b_data + offsets[n];
-		branch[n].key = cpu_to_le32(new_blocks[n]);
-		*branch[n].p = branch[n].key;
-		if ( n == indirect_blks) {
-			current_block = new_blocks[n];
-			/*
-			 * End of chain, update the last new metablock of
-			 * the chain to point to the new allocated
-			 * data blocks numbers
-			 */
-			for (i=1; i < num; i++)
-				*(branch[n].p + i) = cpu_to_le32(++current_block);
-		}
-		BUFFER_TRACE(bh, "marking uptodate");
-		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
-
-		BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-		err = ext3_journal_dirty_metadata(handle, bh);
-		if (err)
-			goto failed;
-	}
-	*blks = num;
-	return err;
-failed:
-	/* Allocation failed, free what we already allocated */
-	for (i = 1; i <= n ; i++) {
-		BUFFER_TRACE(branch[i].bh, "call journal_forget");
-		ext3_journal_forget(handle, branch[i].bh);
-	}
-	for (i = 0; i < indirect_blks; i++)
-		ext3_free_blocks(handle, inode, new_blocks[i], 1);
-
-	ext3_free_blocks(handle, inode, new_blocks[i], num);
-
-	return err;
-}
-
-/**
- * ext3_splice_branch - splice the allocated branch onto inode.
- * @handle: handle for this transaction
- * @inode: owner
- * @block: (logical) number of block we are adding
- * @where: location of missing link
- * @num:   number of indirect blocks we are adding
- * @blks:  number of direct blocks we are adding
- *
- * This function fills the missing link and does all housekeeping needed in
- * inode (->i_blocks, etc.). In case of success we end up with the full
- * chain to new block and return 0.
- */
-static int ext3_splice_branch(handle_t *handle, struct inode *inode,
-			long block, Indirect *where, int num, int blks)
-{
-	int i;
-	int err = 0;
-	struct ext3_block_alloc_info *block_i;
-	ext3_fsblk_t current_block;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct timespec now;
-
-	block_i = ei->i_block_alloc_info;
-	/*
-	 * If we're splicing into a [td]indirect block (as opposed to the
-	 * inode) then we need to get write access to the [td]indirect block
-	 * before the splice.
-	 */
-	if (where->bh) {
-		BUFFER_TRACE(where->bh, "get_write_access");
-		err = ext3_journal_get_write_access(handle, where->bh);
-		if (err)
-			goto err_out;
-	}
-	/* That's it */
-
-	*where->p = where->key;
-
-	/*
-	 * Update the host buffer_head or inode to point to more just allocated
-	 * direct blocks blocks
-	 */
-	if (num == 0 && blks > 1) {
-		current_block = le32_to_cpu(where->key) + 1;
-		for (i = 1; i < blks; i++)
-			*(where->p + i ) = cpu_to_le32(current_block++);
-	}
-
-	/*
-	 * update the most recently allocated logical & physical block
-	 * in i_block_alloc_info, to assist find the proper goal block for next
-	 * allocation
-	 */
-	if (block_i) {
-		block_i->last_alloc_logical_block = block + blks - 1;
-		block_i->last_alloc_physical_block =
-				le32_to_cpu(where[num].key) + blks - 1;
-	}
-
-	/* We are done with atomic stuff, now do the rest of housekeeping */
-	now = CURRENT_TIME_SEC;
-	if (!timespec_equal(&inode->i_ctime, &now) || !where->bh) {
-		inode->i_ctime = now;
-		ext3_mark_inode_dirty(handle, inode);
-	}
-	/* ext3_mark_inode_dirty already updated i_sync_tid */
-	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
-
-	/* had we spliced it onto indirect block? */
-	if (where->bh) {
-		/*
-		 * If we spliced it onto an indirect block, we haven't
-		 * altered the inode.  Note however that if it is being spliced
-		 * onto an indirect block at the very end of the file (the
-		 * file is growing) then we *will* alter the inode to reflect
-		 * the new i_size.  But that is not done here - it is done in
-		 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
-		 */
-		jbd_debug(5, "splicing indirect only\n");
-		BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
-		err = ext3_journal_dirty_metadata(handle, where->bh);
-		if (err)
-			goto err_out;
-	} else {
-		/*
-		 * OK, we spliced it into the inode itself on a direct block.
-		 * Inode was dirtied above.
-		 */
-		jbd_debug(5, "splicing direct\n");
-	}
-	return err;
-
-err_out:
-	for (i = 1; i <= num; i++) {
-		BUFFER_TRACE(where[i].bh, "call journal_forget");
-		ext3_journal_forget(handle, where[i].bh);
-		ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
-	}
-	ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
-
-	return err;
-}
-
-/*
- * Allocation strategy is simple: if we have to allocate something, we will
- * have to go the whole way to leaf. So let's do it before attaching anything
- * to tree, set linkage between the newborn blocks, write them if sync is
- * required, recheck the path, free and repeat if check fails, otherwise
- * set the last missing link (that will protect us from any truncate-generated
- * removals - all blocks on the path are immune now) and possibly force the
- * write on the parent block.
- * That has a nice additional property: no special recovery from the failed
- * allocations is needed - we simply release blocks and do not touch anything
- * reachable from inode.
- *
- * `handle' can be NULL if create == 0.
- *
- * The BKL may not be held on entry here.  Be sure to take it early.
- * return > 0, # of blocks mapped or allocated.
- * return = 0, if plain lookup failed.
- * return < 0, error case.
- */
-int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
-		sector_t iblock, unsigned long maxblocks,
-		struct buffer_head *bh_result,
-		int create)
-{
-	int err = -EIO;
-	int offsets[4];
-	Indirect chain[4];
-	Indirect *partial;
-	ext3_fsblk_t goal;
-	int indirect_blks;
-	int blocks_to_boundary = 0;
-	int depth;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	int count = 0;
-	ext3_fsblk_t first_block = 0;
-
-
-	trace_ext3_get_blocks_enter(inode, iblock, maxblocks, create);
-	J_ASSERT(handle != NULL || create == 0);
-	depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
-
-	if (depth == 0)
-		goto out;
-
-	partial = ext3_get_branch(inode, depth, offsets, chain, &err);
-
-	/* Simplest case - block found, no allocation needed */
-	if (!partial) {
-		first_block = le32_to_cpu(chain[depth - 1].key);
-		clear_buffer_new(bh_result);
-		count++;
-		/*map more blocks*/
-		while (count < maxblocks && count <= blocks_to_boundary) {
-			ext3_fsblk_t blk;
-
-			if (!verify_chain(chain, chain + depth - 1)) {
-				/*
-				 * Indirect block might be removed by
-				 * truncate while we were reading it.
-				 * Handling of that case: forget what we've
-				 * got now. Flag the err as EAGAIN, so it
-				 * will reread.
-				 */
-				err = -EAGAIN;
-				count = 0;
-				break;
-			}
-			blk = le32_to_cpu(*(chain[depth-1].p + count));
-
-			if (blk == first_block + count)
-				count++;
-			else
-				break;
-		}
-		if (err != -EAGAIN)
-			goto got_it;
-	}
-
-	/* Next simple case - plain lookup or failed read of indirect block */
-	if (!create || err == -EIO)
-		goto cleanup;
-
-	/*
-	 * Block out ext3_truncate while we alter the tree
-	 */
-	mutex_lock(&ei->truncate_mutex);
-
-	/*
-	 * If the indirect block is missing while we are reading
-	 * the chain(ext3_get_branch() returns -EAGAIN err), or
-	 * if the chain has been changed after we grab the semaphore,
-	 * (either because another process truncated this branch, or
-	 * another get_block allocated this branch) re-grab the chain to see if
-	 * the request block has been allocated or not.
-	 *
-	 * Since we already block the truncate/other get_block
-	 * at this point, we will have the current copy of the chain when we
-	 * splice the branch into the tree.
-	 */
-	if (err == -EAGAIN || !verify_chain(chain, partial)) {
-		while (partial > chain) {
-			brelse(partial->bh);
-			partial--;
-		}
-		partial = ext3_get_branch(inode, depth, offsets, chain, &err);
-		if (!partial) {
-			count++;
-			mutex_unlock(&ei->truncate_mutex);
-			if (err)
-				goto cleanup;
-			clear_buffer_new(bh_result);
-			goto got_it;
-		}
-	}
-
-	/*
-	 * Okay, we need to do block allocation.  Lazily initialize the block
-	 * allocation info here if necessary
-	*/
-	if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
-		ext3_init_block_alloc_info(inode);
-
-	goal = ext3_find_goal(inode, iblock, partial);
-
-	/* the number of blocks need to allocate for [d,t]indirect blocks */
-	indirect_blks = (chain + depth) - partial - 1;
-
-	/*
-	 * Next look up the indirect map to count the totoal number of
-	 * direct blocks to allocate for this branch.
-	 */
-	count = ext3_blks_to_allocate(partial, indirect_blks,
-					maxblocks, blocks_to_boundary);
-	err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
-				offsets + (partial - chain), partial);
-
-	/*
-	 * The ext3_splice_branch call will free and forget any buffers
-	 * on the new chain if there is a failure, but that risks using
-	 * up transaction credits, especially for bitmaps where the
-	 * credits cannot be returned.  Can we handle this somehow?  We
-	 * may need to return -EAGAIN upwards in the worst case.  --sct
-	 */
-	if (!err)
-		err = ext3_splice_branch(handle, inode, iblock,
-					partial, indirect_blks, count);
-	mutex_unlock(&ei->truncate_mutex);
-	if (err)
-		goto cleanup;
-
-	set_buffer_new(bh_result);
-got_it:
-	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
-	if (count > blocks_to_boundary)
-		set_buffer_boundary(bh_result);
-	err = count;
-	/* Clean up and exit */
-	partial = chain + depth - 1;	/* the whole chain */
-cleanup:
-	while (partial > chain) {
-		BUFFER_TRACE(partial->bh, "call brelse");
-		brelse(partial->bh);
-		partial--;
-	}
-	BUFFER_TRACE(bh_result, "returned");
-out:
-	trace_ext3_get_blocks_exit(inode, iblock,
-				   depth ? le32_to_cpu(chain[depth-1].key) : 0,
-				   count, err);
-	return err;
-}
-
-/* Maximum number of blocks we map for direct IO at once. */
-#define DIO_MAX_BLOCKS 4096
-/*
- * Number of credits we need for writing DIO_MAX_BLOCKS:
- * We need sb + group descriptor + bitmap + inode -> 4
- * For B blocks with A block pointers per block we need:
- * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
- * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
- */
-#define DIO_CREDITS 25
-
-static int ext3_get_block(struct inode *inode, sector_t iblock,
-			struct buffer_head *bh_result, int create)
-{
-	handle_t *handle = ext3_journal_current_handle();
-	int ret = 0, started = 0;
-	unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
-
-	if (create && !handle) {	/* Direct IO write... */
-		if (max_blocks > DIO_MAX_BLOCKS)
-			max_blocks = DIO_MAX_BLOCKS;
-		handle = ext3_journal_start(inode, DIO_CREDITS +
-				EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
-		if (IS_ERR(handle)) {
-			ret = PTR_ERR(handle);
-			goto out;
-		}
-		started = 1;
-	}
-
-	ret = ext3_get_blocks_handle(handle, inode, iblock,
-					max_blocks, bh_result, create);
-	if (ret > 0) {
-		bh_result->b_size = (ret << inode->i_blkbits);
-		ret = 0;
-	}
-	if (started)
-		ext3_journal_stop(handle);
-out:
-	return ret;
-}
-
-int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
-		u64 start, u64 len)
-{
-	return generic_block_fiemap(inode, fieinfo, start, len,
-				    ext3_get_block);
-}
-
-/*
- * `handle' can be NULL if create is zero
- */
-struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
-				long block, int create, int *errp)
-{
-	struct buffer_head dummy;
-	int fatal = 0, err;
-
-	J_ASSERT(handle != NULL || create == 0);
-
-	dummy.b_state = 0;
-	dummy.b_blocknr = -1000;
-	buffer_trace_init(&dummy.b_history);
-	err = ext3_get_blocks_handle(handle, inode, block, 1,
-					&dummy, create);
-	/*
-	 * ext3_get_blocks_handle() returns number of blocks
-	 * mapped. 0 in case of a HOLE.
-	 */
-	if (err > 0) {
-		WARN_ON(err > 1);
-		err = 0;
-	}
-	*errp = err;
-	if (!err && buffer_mapped(&dummy)) {
-		struct buffer_head *bh;
-		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
-		if (unlikely(!bh)) {
-			*errp = -ENOMEM;
-			goto err;
-		}
-		if (buffer_new(&dummy)) {
-			J_ASSERT(create != 0);
-			J_ASSERT(handle != NULL);
-
-			/*
-			 * Now that we do not always journal data, we should
-			 * keep in mind whether this should always journal the
-			 * new buffer as metadata.  For now, regular file
-			 * writes use ext3_get_block instead, so it's not a
-			 * problem.
-			 */
-			lock_buffer(bh);
-			BUFFER_TRACE(bh, "call get_create_access");
-			fatal = ext3_journal_get_create_access(handle, bh);
-			if (!fatal && !buffer_uptodate(bh)) {
-				memset(bh->b_data,0,inode->i_sb->s_blocksize);
-				set_buffer_uptodate(bh);
-			}
-			unlock_buffer(bh);
-			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			err = ext3_journal_dirty_metadata(handle, bh);
-			if (!fatal)
-				fatal = err;
-		} else {
-			BUFFER_TRACE(bh, "not a new buffer");
-		}
-		if (fatal) {
-			*errp = fatal;
-			brelse(bh);
-			bh = NULL;
-		}
-		return bh;
-	}
-err:
-	return NULL;
-}
-
-struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
-			       int block, int create, int *err)
-{
-	struct buffer_head * bh;
-
-	bh = ext3_getblk(handle, inode, block, create, err);
-	if (!bh)
-		return bh;
-	if (bh_uptodate_or_lock(bh))
-		return bh;
-	get_bh(bh);
-	bh->b_end_io = end_buffer_read_sync;
-	submit_bh(READ | REQ_META | REQ_PRIO, bh);
-	wait_on_buffer(bh);
-	if (buffer_uptodate(bh))
-		return bh;
-	put_bh(bh);
-	*err = -EIO;
-	return NULL;
-}
-
-static int walk_page_buffers(	handle_t *handle,
-				struct buffer_head *head,
-				unsigned from,
-				unsigned to,
-				int *partial,
-				int (*fn)(	handle_t *handle,
-						struct buffer_head *bh))
-{
-	struct buffer_head *bh;
-	unsigned block_start, block_end;
-	unsigned blocksize = head->b_size;
-	int err, ret = 0;
-	struct buffer_head *next;
-
-	for (	bh = head, block_start = 0;
-		ret == 0 && (bh != head || !block_start);
-		block_start = block_end, bh = next)
-	{
-		next = bh->b_this_page;
-		block_end = block_start + blocksize;
-		if (block_end <= from || block_start >= to) {
-			if (partial && !buffer_uptodate(bh))
-				*partial = 1;
-			continue;
-		}
-		err = (*fn)(handle, bh);
-		if (!ret)
-			ret = err;
-	}
-	return ret;
-}
-
-/*
- * To preserve ordering, it is essential that the hole instantiation and
- * the data write be encapsulated in a single transaction.  We cannot
- * close off a transaction and start a new one between the ext3_get_block()
- * and the commit_write().  So doing the journal_start at the start of
- * prepare_write() is the right place.
- *
- * Also, this function can nest inside ext3_writepage() ->
- * block_write_full_page(). In that case, we *know* that ext3_writepage()
- * has generated enough buffer credits to do the whole page.  So we won't
- * block on the journal in that case, which is good, because the caller may
- * be PF_MEMALLOC.
- *
- * By accident, ext3 can be reentered when a transaction is open via
- * quota file writes.  If we were to commit the transaction while thus
- * reentered, there can be a deadlock - we would be holding a quota
- * lock, and the commit would never complete if another thread had a
- * transaction open and was blocking on the quota lock - a ranking
- * violation.
- *
- * So what we do is to rely on the fact that journal_stop/journal_start
- * will _not_ run commit under these circumstances because handle->h_ref
- * is elevated.  We'll still have enough credits for the tiny quotafile
- * write.
- */
-static int do_journal_get_write_access(handle_t *handle,
-					struct buffer_head *bh)
-{
-	int dirty = buffer_dirty(bh);
-	int ret;
-
-	if (!buffer_mapped(bh) || buffer_freed(bh))
-		return 0;
-	/*
-	 * __block_prepare_write() could have dirtied some buffers. Clean
-	 * the dirty bit as jbd2_journal_get_write_access() could complain
-	 * otherwise about fs integrity issues. Setting of the dirty bit
-	 * by __block_prepare_write() isn't a real problem here as we clear
-	 * the bit before releasing a page lock and thus writeback cannot
-	 * ever write the buffer.
-	 */
-	if (dirty)
-		clear_buffer_dirty(bh);
-	ret = ext3_journal_get_write_access(handle, bh);
-	if (!ret && dirty)
-		ret = ext3_journal_dirty_metadata(handle, bh);
-	return ret;
-}
-
-/*
- * Truncate blocks that were not used by write. We have to truncate the
- * pagecache as well so that corresponding buffers get properly unmapped.
- */
-static void ext3_truncate_failed_write(struct inode *inode)
-{
-	truncate_inode_pages(inode->i_mapping, inode->i_size);
-	ext3_truncate(inode);
-}
-
-/*
- * Truncate blocks that were not used by direct IO write. We have to zero out
- * the last file block as well because direct IO might have written to it.
- */
-static void ext3_truncate_failed_direct_write(struct inode *inode)
-{
-	ext3_block_truncate_page(inode, inode->i_size);
-	ext3_truncate(inode);
-}
-
-static int ext3_write_begin(struct file *file, struct address_space *mapping,
-				loff_t pos, unsigned len, unsigned flags,
-				struct page **pagep, void **fsdata)
-{
-	struct inode *inode = mapping->host;
-	int ret;
-	handle_t *handle;
-	int retries = 0;
-	struct page *page;
-	pgoff_t index;
-	unsigned from, to;
-	/* Reserve one block more for addition to orphan list in case
-	 * we allocate blocks but write fails for some reason */
-	int needed_blocks = ext3_writepage_trans_blocks(inode) + 1;
-
-	trace_ext3_write_begin(inode, pos, len, flags);
-
-	index = pos >> PAGE_CACHE_SHIFT;
-	from = pos & (PAGE_CACHE_SIZE - 1);
-	to = from + len;
-
-retry:
-	page = grab_cache_page_write_begin(mapping, index, flags);
-	if (!page)
-		return -ENOMEM;
-	*pagep = page;
-
-	handle = ext3_journal_start(inode, needed_blocks);
-	if (IS_ERR(handle)) {
-		unlock_page(page);
-		page_cache_release(page);
-		ret = PTR_ERR(handle);
-		goto out;
-	}
-	ret = __block_write_begin(page, pos, len, ext3_get_block);
-	if (ret)
-		goto write_begin_failed;
-
-	if (ext3_should_journal_data(inode)) {
-		ret = walk_page_buffers(handle, page_buffers(page),
-				from, to, NULL, do_journal_get_write_access);
-	}
-write_begin_failed:
-	if (ret) {
-		/*
-		 * block_write_begin may have instantiated a few blocks
-		 * outside i_size.  Trim these off again. Don't need
-		 * i_size_read because we hold i_mutex.
-		 *
-		 * Add inode to orphan list in case we crash before truncate
-		 * finishes. Do this only if ext3_can_truncate() agrees so
-		 * that orphan processing code is happy.
-		 */
-		if (pos + len > inode->i_size && ext3_can_truncate(inode))
-			ext3_orphan_add(handle, inode);
-		ext3_journal_stop(handle);
-		unlock_page(page);
-		page_cache_release(page);
-		if (pos + len > inode->i_size)
-			ext3_truncate_failed_write(inode);
-	}
-	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
-		goto retry;
-out:
-	return ret;
-}
-
-
-int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
-{
-	int err = journal_dirty_data(handle, bh);
-	if (err)
-		ext3_journal_abort_handle(__func__, __func__,
-						bh, handle, err);
-	return err;
-}
-
-/* For ordered writepage and write_end functions */
-static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
-{
-	/*
-	 * Write could have mapped the buffer but it didn't copy the data in
-	 * yet. So avoid filing such buffer into a transaction.
-	 */
-	if (buffer_mapped(bh) && buffer_uptodate(bh))
-		return ext3_journal_dirty_data(handle, bh);
-	return 0;
-}
-
-/* For write_end() in data=journal mode */
-static int write_end_fn(handle_t *handle, struct buffer_head *bh)
-{
-	if (!buffer_mapped(bh) || buffer_freed(bh))
-		return 0;
-	set_buffer_uptodate(bh);
-	return ext3_journal_dirty_metadata(handle, bh);
-}
-
-/*
- * This is nasty and subtle: ext3_write_begin() could have allocated blocks
- * for the whole page but later we failed to copy the data in. Update inode
- * size according to what we managed to copy. The rest is going to be
- * truncated in write_end function.
- */
-static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied)
-{
-	/* What matters to us is i_disksize. We don't write i_size anywhere */
-	if (pos + copied > inode->i_size)
-		i_size_write(inode, pos + copied);
-	if (pos + copied > EXT3_I(inode)->i_disksize) {
-		EXT3_I(inode)->i_disksize = pos + copied;
-		mark_inode_dirty(inode);
-	}
-}
-
-/*
- * We need to pick up the new inode size which generic_commit_write gave us
- * `file' can be NULL - eg, when called from page_symlink().
- *
- * ext3 never places buffers on inode->i_mapping->private_list.  metadata
- * buffers are managed internally.
- */
-static int ext3_ordered_write_end(struct file *file,
-				struct address_space *mapping,
-				loff_t pos, unsigned len, unsigned copied,
-				struct page *page, void *fsdata)
-{
-	handle_t *handle = ext3_journal_current_handle();
-	struct inode *inode = file->f_mapping->host;
-	unsigned from, to;
-	int ret = 0, ret2;
-
-	trace_ext3_ordered_write_end(inode, pos, len, copied);
-	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
-
-	from = pos & (PAGE_CACHE_SIZE - 1);
-	to = from + copied;
-	ret = walk_page_buffers(handle, page_buffers(page),
-		from, to, NULL, journal_dirty_data_fn);
-
-	if (ret == 0)
-		update_file_sizes(inode, pos, copied);
-	/*
-	 * There may be allocated blocks outside of i_size because
-	 * we failed to copy some data. Prepare for truncate.
-	 */
-	if (pos + len > inode->i_size && ext3_can_truncate(inode))
-		ext3_orphan_add(handle, inode);
-	ret2 = ext3_journal_stop(handle);
-	if (!ret)
-		ret = ret2;
-	unlock_page(page);
-	page_cache_release(page);
-
-	if (pos + len > inode->i_size)
-		ext3_truncate_failed_write(inode);
-	return ret ? ret : copied;
-}
-
-static int ext3_writeback_write_end(struct file *file,
-				struct address_space *mapping,
-				loff_t pos, unsigned len, unsigned copied,
-				struct page *page, void *fsdata)
-{
-	handle_t *handle = ext3_journal_current_handle();
-	struct inode *inode = file->f_mapping->host;
-	int ret;
-
-	trace_ext3_writeback_write_end(inode, pos, len, copied);
-	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
-	update_file_sizes(inode, pos, copied);
-	/*
-	 * There may be allocated blocks outside of i_size because
-	 * we failed to copy some data. Prepare for truncate.
-	 */
-	if (pos + len > inode->i_size && ext3_can_truncate(inode))
-		ext3_orphan_add(handle, inode);
-	ret = ext3_journal_stop(handle);
-	unlock_page(page);
-	page_cache_release(page);
-
-	if (pos + len > inode->i_size)
-		ext3_truncate_failed_write(inode);
-	return ret ? ret : copied;
-}
-
-static int ext3_journalled_write_end(struct file *file,
-				struct address_space *mapping,
-				loff_t pos, unsigned len, unsigned copied,
-				struct page *page, void *fsdata)
-{
-	handle_t *handle = ext3_journal_current_handle();
-	struct inode *inode = mapping->host;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	int ret = 0, ret2;
-	int partial = 0;
-	unsigned from, to;
-
-	trace_ext3_journalled_write_end(inode, pos, len, copied);
-	from = pos & (PAGE_CACHE_SIZE - 1);
-	to = from + len;
-
-	if (copied < len) {
-		if (!PageUptodate(page))
-			copied = 0;
-		page_zero_new_buffers(page, from + copied, to);
-		to = from + copied;
-	}
-
-	ret = walk_page_buffers(handle, page_buffers(page), from,
-				to, &partial, write_end_fn);
-	if (!partial)
-		SetPageUptodate(page);
-
-	if (pos + copied > inode->i_size)
-		i_size_write(inode, pos + copied);
-	/*
-	 * There may be allocated blocks outside of i_size because
-	 * we failed to copy some data. Prepare for truncate.
-	 */
-	if (pos + len > inode->i_size && ext3_can_truncate(inode))
-		ext3_orphan_add(handle, inode);
-	ext3_set_inode_state(inode, EXT3_STATE_JDATA);
-	atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
-	if (inode->i_size > ei->i_disksize) {
-		ei->i_disksize = inode->i_size;
-		ret2 = ext3_mark_inode_dirty(handle, inode);
-		if (!ret)
-			ret = ret2;
-	}
-
-	ret2 = ext3_journal_stop(handle);
-	if (!ret)
-		ret = ret2;
-	unlock_page(page);
-	page_cache_release(page);
-
-	if (pos + len > inode->i_size)
-		ext3_truncate_failed_write(inode);
-	return ret ? ret : copied;
-}
-
-/*
- * bmap() is special.  It gets used by applications such as lilo and by
- * the swapper to find the on-disk block of a specific piece of data.
- *
- * Naturally, this is dangerous if the block concerned is still in the
- * journal.  If somebody makes a swapfile on an ext3 data-journaling
- * filesystem and enables swap, then they may get a nasty shock when the
- * data getting swapped to that swapfile suddenly gets overwritten by
- * the original zero's written out previously to the journal and
- * awaiting writeback in the kernel's buffer cache.
- *
- * So, if we see any bmap calls here on a modified, data-journaled file,
- * take extra steps to flush any blocks which might be in the cache.
- */
-static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
-{
-	struct inode *inode = mapping->host;
-	journal_t *journal;
-	int err;
-
-	if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) {
-		/*
-		 * This is a REALLY heavyweight approach, but the use of
-		 * bmap on dirty files is expected to be extremely rare:
-		 * only if we run lilo or swapon on a freshly made file
-		 * do we expect this to happen.
-		 *
-		 * (bmap requires CAP_SYS_RAWIO so this does not
-		 * represent an unprivileged user DOS attack --- we'd be
-		 * in trouble if mortal users could trigger this path at
-		 * will.)
-		 *
-		 * NB. EXT3_STATE_JDATA is not set on files other than
-		 * regular files.  If somebody wants to bmap a directory
-		 * or symlink and gets confused because the buffer
-		 * hasn't yet been flushed to disk, they deserve
-		 * everything they get.
-		 */
-
-		ext3_clear_inode_state(inode, EXT3_STATE_JDATA);
-		journal = EXT3_JOURNAL(inode);
-		journal_lock_updates(journal);
-		err = journal_flush(journal);
-		journal_unlock_updates(journal);
-
-		if (err)
-			return 0;
-	}
-
-	return generic_block_bmap(mapping,block,ext3_get_block);
-}
-
-static int bget_one(handle_t *handle, struct buffer_head *bh)
-{
-	get_bh(bh);
-	return 0;
-}
-
-static int bput_one(handle_t *handle, struct buffer_head *bh)
-{
-	put_bh(bh);
-	return 0;
-}
-
-static int buffer_unmapped(handle_t *handle, struct buffer_head *bh)
-{
-	return !buffer_mapped(bh);
-}
-
-/*
- * Note that whenever we need to map blocks we start a transaction even if
- * we're not journalling data.  This is to preserve ordering: any hole
- * instantiation within __block_write_full_page -> ext3_get_block() should be
- * journalled along with the data so we don't crash and then get metadata which
- * refers to old data.
- *
- * In all journalling modes block_write_full_page() will start the I/O.
- *
- * We don't honour synchronous mounts for writepage().  That would be
- * disastrous.  Any write() or metadata operation will sync the fs for
- * us.
- */
-static int ext3_ordered_writepage(struct page *page,
-				struct writeback_control *wbc)
-{
-	struct inode *inode = page->mapping->host;
-	struct buffer_head *page_bufs;
-	handle_t *handle = NULL;
-	int ret = 0;
-	int err;
-
-	J_ASSERT(PageLocked(page));
-	/*
-	 * We don't want to warn for emergency remount. The condition is
-	 * ordered to avoid dereferencing inode->i_sb in non-error case to
-	 * avoid slow-downs.
-	 */
-	WARN_ON_ONCE(IS_RDONLY(inode) &&
-		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
-
-	/*
-	 * We give up here if we're reentered, because it might be for a
-	 * different filesystem.
-	 */
-	if (ext3_journal_current_handle())
-		goto out_fail;
-
-	trace_ext3_ordered_writepage(page);
-	if (!page_has_buffers(page)) {
-		create_empty_buffers(page, inode->i_sb->s_blocksize,
-				(1 << BH_Dirty)|(1 << BH_Uptodate));
-		page_bufs = page_buffers(page);
-	} else {
-		page_bufs = page_buffers(page);
-		if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE,
-				       NULL, buffer_unmapped)) {
-			/* Provide NULL get_block() to catch bugs if buffers
-			 * weren't really mapped */
-			return block_write_full_page(page, NULL, wbc);
-		}
-	}
-	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
-
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-		goto out_fail;
-	}
-
-	walk_page_buffers(handle, page_bufs, 0,
-			PAGE_CACHE_SIZE, NULL, bget_one);
-
-	ret = block_write_full_page(page, ext3_get_block, wbc);
-
-	/*
-	 * The page can become unlocked at any point now, and
-	 * truncate can then come in and change things.  So we
-	 * can't touch *page from now on.  But *page_bufs is
-	 * safe due to elevated refcount.
-	 */
-
-	/*
-	 * And attach them to the current transaction.  But only if
-	 * block_write_full_page() succeeded.  Otherwise they are unmapped,
-	 * and generally junk.
-	 */
-	if (ret == 0)
-		ret = walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE,
-					NULL, journal_dirty_data_fn);
-	walk_page_buffers(handle, page_bufs, 0,
-			PAGE_CACHE_SIZE, NULL, bput_one);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-
-out_fail:
-	redirty_page_for_writepage(wbc, page);
-	unlock_page(page);
-	return ret;
-}
-
-static int ext3_writeback_writepage(struct page *page,
-				struct writeback_control *wbc)
-{
-	struct inode *inode = page->mapping->host;
-	handle_t *handle = NULL;
-	int ret = 0;
-	int err;
-
-	J_ASSERT(PageLocked(page));
-	/*
-	 * We don't want to warn for emergency remount. The condition is
-	 * ordered to avoid dereferencing inode->i_sb in non-error case to
-	 * avoid slow-downs.
-	 */
-	WARN_ON_ONCE(IS_RDONLY(inode) &&
-		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
-
-	if (ext3_journal_current_handle())
-		goto out_fail;
-
-	trace_ext3_writeback_writepage(page);
-	if (page_has_buffers(page)) {
-		if (!walk_page_buffers(NULL, page_buffers(page), 0,
-				      PAGE_CACHE_SIZE, NULL, buffer_unmapped)) {
-			/* Provide NULL get_block() to catch bugs if buffers
-			 * weren't really mapped */
-			return block_write_full_page(page, NULL, wbc);
-		}
-	}
-
-	handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-		goto out_fail;
-	}
-
-	ret = block_write_full_page(page, ext3_get_block, wbc);
-
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-
-out_fail:
-	redirty_page_for_writepage(wbc, page);
-	unlock_page(page);
-	return ret;
-}
-
-static int ext3_journalled_writepage(struct page *page,
-				struct writeback_control *wbc)
-{
-	struct inode *inode = page->mapping->host;
-	handle_t *handle = NULL;
-	int ret = 0;
-	int err;
-
-	J_ASSERT(PageLocked(page));
-	/*
-	 * We don't want to warn for emergency remount. The condition is
-	 * ordered to avoid dereferencing inode->i_sb in non-error case to
-	 * avoid slow-downs.
-	 */
-	WARN_ON_ONCE(IS_RDONLY(inode) &&
-		     !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS));
-
-	trace_ext3_journalled_writepage(page);
-	if (!page_has_buffers(page) || PageChecked(page)) {
-		if (ext3_journal_current_handle())
-			goto no_write;
-
-		handle = ext3_journal_start(inode,
-					    ext3_writepage_trans_blocks(inode));
-		if (IS_ERR(handle)) {
-			ret = PTR_ERR(handle);
-			goto no_write;
-		}
-		/*
-		 * It's mmapped pagecache.  Add buffers and journal it.  There
-		 * doesn't seem much point in redirtying the page here.
-		 */
-		ClearPageChecked(page);
-		ret = __block_write_begin(page, 0, PAGE_CACHE_SIZE,
-					  ext3_get_block);
-		if (ret != 0) {
-			ext3_journal_stop(handle);
-			goto out_unlock;
-		}
-		ret = walk_page_buffers(handle, page_buffers(page), 0,
-			PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
-
-		err = walk_page_buffers(handle, page_buffers(page), 0,
-				PAGE_CACHE_SIZE, NULL, write_end_fn);
-		if (ret == 0)
-			ret = err;
-		ext3_set_inode_state(inode, EXT3_STATE_JDATA);
-		atomic_set(&EXT3_I(inode)->i_datasync_tid,
-			   handle->h_transaction->t_tid);
-		unlock_page(page);
-		err = ext3_journal_stop(handle);
-		if (!ret)
-			ret = err;
-	} else {
-		/*
-		 * It is a page full of checkpoint-mode buffers. Go and write
-		 * them. They should have been already mapped when they went
-		 * to the journal so provide NULL get_block function to catch
-		 * errors.
-		 */
-		ret = block_write_full_page(page, NULL, wbc);
-	}
-out:
-	return ret;
-
-no_write:
-	redirty_page_for_writepage(wbc, page);
-out_unlock:
-	unlock_page(page);
-	goto out;
-}
-
-static int ext3_readpage(struct file *file, struct page *page)
-{
-	trace_ext3_readpage(page);
-	return mpage_readpage(page, ext3_get_block);
-}
-
-static int
-ext3_readpages(struct file *file, struct address_space *mapping,
-		struct list_head *pages, unsigned nr_pages)
-{
-	return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
-}
-
-static void ext3_invalidatepage(struct page *page, unsigned int offset,
-				unsigned int length)
-{
-	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
-
-	trace_ext3_invalidatepage(page, offset, length);
-
-	/*
-	 * If it's a full truncate we just forget about the pending dirtying
-	 */
-	if (offset == 0 && length == PAGE_CACHE_SIZE)
-		ClearPageChecked(page);
-
-	journal_invalidatepage(journal, page, offset, length);
-}
-
-static int ext3_releasepage(struct page *page, gfp_t wait)
-{
-	journal_t *journal = EXT3_JOURNAL(page->mapping->host);
-
-	trace_ext3_releasepage(page);
-	WARN_ON(PageChecked(page));
-	if (!page_has_buffers(page))
-		return 0;
-	return journal_try_to_free_buffers(journal, page, wait);
-}
-
-/*
- * If the O_DIRECT write will extend the file then add this inode to the
- * orphan list.  So recovery will truncate it back to the original size
- * if the machine crashes during the write.
- *
- * If the O_DIRECT write is intantiating holes inside i_size and the machine
- * crashes then stale disk data _may_ be exposed inside the file. But current
- * VFS code falls back into buffered path in that case so we are safe.
- */
-static ssize_t ext3_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
-			      loff_t offset)
-{
-	struct file *file = iocb->ki_filp;
-	struct inode *inode = file->f_mapping->host;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	handle_t *handle;
-	ssize_t ret;
-	int orphan = 0;
-	size_t count = iov_iter_count(iter);
-	int retries = 0;
-
-	trace_ext3_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
-
-	if (iov_iter_rw(iter) == WRITE) {
-		loff_t final_size = offset + count;
-
-		if (final_size > inode->i_size) {
-			/* Credits for sb + inode write */
-			handle = ext3_journal_start(inode, 2);
-			if (IS_ERR(handle)) {
-				ret = PTR_ERR(handle);
-				goto out;
-			}
-			ret = ext3_orphan_add(handle, inode);
-			if (ret) {
-				ext3_journal_stop(handle);
-				goto out;
-			}
-			orphan = 1;
-			ei->i_disksize = inode->i_size;
-			ext3_journal_stop(handle);
-		}
-	}
-
-retry:
-	ret = blockdev_direct_IO(iocb, inode, iter, offset, ext3_get_block);
-	/*
-	 * In case of error extending write may have instantiated a few
-	 * blocks outside i_size. Trim these off again.
-	 */
-	if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
-		loff_t isize = i_size_read(inode);
-		loff_t end = offset + count;
-
-		if (end > isize)
-			ext3_truncate_failed_direct_write(inode);
-	}
-	if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
-		goto retry;
-
-	if (orphan) {
-		int err;
-
-		/* Credits for sb + inode write */
-		handle = ext3_journal_start(inode, 2);
-		if (IS_ERR(handle)) {
-			/* This is really bad luck. We've written the data
-			 * but cannot extend i_size. Truncate allocated blocks
-			 * and pretend the write failed... */
-			ext3_truncate_failed_direct_write(inode);
-			ret = PTR_ERR(handle);
-			if (inode->i_nlink)
-				ext3_orphan_del(NULL, inode);
-			goto out;
-		}
-		if (inode->i_nlink)
-			ext3_orphan_del(handle, inode);
-		if (ret > 0) {
-			loff_t end = offset + ret;
-			if (end > inode->i_size) {
-				ei->i_disksize = end;
-				i_size_write(inode, end);
-				/*
-				 * We're going to return a positive `ret'
-				 * here due to non-zero-length I/O, so there's
-				 * no way of reporting error returns from
-				 * ext3_mark_inode_dirty() to userspace.  So
-				 * ignore it.
-				 */
-				ext3_mark_inode_dirty(handle, inode);
-			}
-		}
-		err = ext3_journal_stop(handle);
-		if (ret == 0)
-			ret = err;
-	}
-out:
-	trace_ext3_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
-	return ret;
-}
-
-/*
- * Pages can be marked dirty completely asynchronously from ext3's journalling
- * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
- * much here because ->set_page_dirty is called under VFS locks.  The page is
- * not necessarily locked.
- *
- * We cannot just dirty the page and leave attached buffers clean, because the
- * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
- * or jbddirty because all the journalling code will explode.
- *
- * So what we do is to mark the page "pending dirty" and next time writepage
- * is called, propagate that into the buffers appropriately.
- */
-static int ext3_journalled_set_page_dirty(struct page *page)
-{
-	SetPageChecked(page);
-	return __set_page_dirty_nobuffers(page);
-}
-
-static const struct address_space_operations ext3_ordered_aops = {
-	.readpage		= ext3_readpage,
-	.readpages		= ext3_readpages,
-	.writepage		= ext3_ordered_writepage,
-	.write_begin		= ext3_write_begin,
-	.write_end		= ext3_ordered_write_end,
-	.bmap			= ext3_bmap,
-	.invalidatepage		= ext3_invalidatepage,
-	.releasepage		= ext3_releasepage,
-	.direct_IO		= ext3_direct_IO,
-	.migratepage		= buffer_migrate_page,
-	.is_partially_uptodate  = block_is_partially_uptodate,
-	.is_dirty_writeback	= buffer_check_dirty_writeback,
-	.error_remove_page	= generic_error_remove_page,
-};
-
-static const struct address_space_operations ext3_writeback_aops = {
-	.readpage		= ext3_readpage,
-	.readpages		= ext3_readpages,
-	.writepage		= ext3_writeback_writepage,
-	.write_begin		= ext3_write_begin,
-	.write_end		= ext3_writeback_write_end,
-	.bmap			= ext3_bmap,
-	.invalidatepage		= ext3_invalidatepage,
-	.releasepage		= ext3_releasepage,
-	.direct_IO		= ext3_direct_IO,
-	.migratepage		= buffer_migrate_page,
-	.is_partially_uptodate  = block_is_partially_uptodate,
-	.error_remove_page	= generic_error_remove_page,
-};
-
-static const struct address_space_operations ext3_journalled_aops = {
-	.readpage		= ext3_readpage,
-	.readpages		= ext3_readpages,
-	.writepage		= ext3_journalled_writepage,
-	.write_begin		= ext3_write_begin,
-	.write_end		= ext3_journalled_write_end,
-	.set_page_dirty		= ext3_journalled_set_page_dirty,
-	.bmap			= ext3_bmap,
-	.invalidatepage		= ext3_invalidatepage,
-	.releasepage		= ext3_releasepage,
-	.is_partially_uptodate  = block_is_partially_uptodate,
-	.error_remove_page	= generic_error_remove_page,
-};
-
-void ext3_set_aops(struct inode *inode)
-{
-	if (ext3_should_order_data(inode))
-		inode->i_mapping->a_ops = &ext3_ordered_aops;
-	else if (ext3_should_writeback_data(inode))
-		inode->i_mapping->a_ops = &ext3_writeback_aops;
-	else
-		inode->i_mapping->a_ops = &ext3_journalled_aops;
-}
-
-/*
- * ext3_block_truncate_page() zeroes out a mapping from file offset `from'
- * up to the end of the block which corresponds to `from'.
- * This required during truncate. We need to physically zero the tail end
- * of that block so it doesn't yield old data if the file is later grown.
- */
-static int ext3_block_truncate_page(struct inode *inode, loff_t from)
-{
-	ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
-	unsigned offset = from & (PAGE_CACHE_SIZE - 1);
-	unsigned blocksize, iblock, length, pos;
-	struct page *page;
-	handle_t *handle = NULL;
-	struct buffer_head *bh;
-	int err = 0;
-
-	/* Truncated on block boundary - nothing to do */
-	blocksize = inode->i_sb->s_blocksize;
-	if ((from & (blocksize - 1)) == 0)
-		return 0;
-
-	page = grab_cache_page(inode->i_mapping, index);
-	if (!page)
-		return -ENOMEM;
-	length = blocksize - (offset & (blocksize - 1));
-	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
-
-	if (!page_has_buffers(page))
-		create_empty_buffers(page, blocksize, 0);
-
-	/* Find the buffer that contains "offset" */
-	bh = page_buffers(page);
-	pos = blocksize;
-	while (offset >= pos) {
-		bh = bh->b_this_page;
-		iblock++;
-		pos += blocksize;
-	}
-
-	err = 0;
-	if (buffer_freed(bh)) {
-		BUFFER_TRACE(bh, "freed: skip");
-		goto unlock;
-	}
-
-	if (!buffer_mapped(bh)) {
-		BUFFER_TRACE(bh, "unmapped");
-		ext3_get_block(inode, iblock, bh, 0);
-		/* unmapped? It's a hole - nothing to do */
-		if (!buffer_mapped(bh)) {
-			BUFFER_TRACE(bh, "still unmapped");
-			goto unlock;
-		}
-	}
-
-	/* Ok, it's mapped. Make sure it's up-to-date */
-	if (PageUptodate(page))
-		set_buffer_uptodate(bh);
-
-	if (!bh_uptodate_or_lock(bh)) {
-		err = bh_submit_read(bh);
-		/* Uhhuh. Read error. Complain and punt. */
-		if (err)
-			goto unlock;
-	}
-
-	/* data=writeback mode doesn't need transaction to zero-out data */
-	if (!ext3_should_writeback_data(inode)) {
-		/* We journal at most one block */
-		handle = ext3_journal_start(inode, 1);
-		if (IS_ERR(handle)) {
-			clear_highpage(page);
-			flush_dcache_page(page);
-			err = PTR_ERR(handle);
-			goto unlock;
-		}
-	}
-
-	if (ext3_should_journal_data(inode)) {
-		BUFFER_TRACE(bh, "get write access");
-		err = ext3_journal_get_write_access(handle, bh);
-		if (err)
-			goto stop;
-	}
-
-	zero_user(page, offset, length);
-	BUFFER_TRACE(bh, "zeroed end of block");
-
-	err = 0;
-	if (ext3_should_journal_data(inode)) {
-		err = ext3_journal_dirty_metadata(handle, bh);
-	} else {
-		if (ext3_should_order_data(inode))
-			err = ext3_journal_dirty_data(handle, bh);
-		mark_buffer_dirty(bh);
-	}
-stop:
-	if (handle)
-		ext3_journal_stop(handle);
-
-unlock:
-	unlock_page(page);
-	page_cache_release(page);
-	return err;
-}
-
-/*
- * Probably it should be a library function... search for first non-zero word
- * or memcmp with zero_page, whatever is better for particular architecture.
- * Linus?
- */
-static inline int all_zeroes(__le32 *p, __le32 *q)
-{
-	while (p < q)
-		if (*p++)
-			return 0;
-	return 1;
-}
-
-/**
- *	ext3_find_shared - find the indirect blocks for partial truncation.
- *	@inode:	  inode in question
- *	@depth:	  depth of the affected branch
- *	@offsets: offsets of pointers in that branch (see ext3_block_to_path)
- *	@chain:	  place to store the pointers to partial indirect blocks
- *	@top:	  place to the (detached) top of branch
- *
- *	This is a helper function used by ext3_truncate().
- *
- *	When we do truncate() we may have to clean the ends of several
- *	indirect blocks but leave the blocks themselves alive. Block is
- *	partially truncated if some data below the new i_size is referred
- *	from it (and it is on the path to the first completely truncated
- *	data block, indeed).  We have to free the top of that path along
- *	with everything to the right of the path. Since no allocation
- *	past the truncation point is possible until ext3_truncate()
- *	finishes, we may safely do the latter, but top of branch may
- *	require special attention - pageout below the truncation point
- *	might try to populate it.
- *
- *	We atomically detach the top of branch from the tree, store the
- *	block number of its root in *@top, pointers to buffer_heads of
- *	partially truncated blocks - in @chain[].bh and pointers to
- *	their last elements that should not be removed - in
- *	@chain[].p. Return value is the pointer to last filled element
- *	of @chain.
- *
- *	The work left to caller to do the actual freeing of subtrees:
- *		a) free the subtree starting from *@top
- *		b) free the subtrees whose roots are stored in
- *			(@chain[i].p+1 .. end of @chain[i].bh->b_data)
- *		c) free the subtrees growing from the inode past the @chain[0].
- *			(no partially truncated stuff there).  */
-
-static Indirect *ext3_find_shared(struct inode *inode, int depth,
-			int offsets[4], Indirect chain[4], __le32 *top)
-{
-	Indirect *partial, *p;
-	int k, err;
-
-	*top = 0;
-	/* Make k index the deepest non-null offset + 1 */
-	for (k = depth; k > 1 && !offsets[k-1]; k--)
-		;
-	partial = ext3_get_branch(inode, k, offsets, chain, &err);
-	/* Writer: pointers */
-	if (!partial)
-		partial = chain + k-1;
-	/*
-	 * If the branch acquired continuation since we've looked at it -
-	 * fine, it should all survive and (new) top doesn't belong to us.
-	 */
-	if (!partial->key && *partial->p)
-		/* Writer: end */
-		goto no_top;
-	for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
-		;
-	/*
-	 * OK, we've found the last block that must survive. The rest of our
-	 * branch should be detached before unlocking. However, if that rest
-	 * of branch is all ours and does not grow immediately from the inode
-	 * it's easier to cheat and just decrement partial->p.
-	 */
-	if (p == chain + k - 1 && p > chain) {
-		p->p--;
-	} else {
-		*top = *p->p;
-		/* Nope, don't do this in ext3.  Must leave the tree intact */
-#if 0
-		*p->p = 0;
-#endif
-	}
-	/* Writer: end */
-
-	while(partial > p) {
-		brelse(partial->bh);
-		partial--;
-	}
-no_top:
-	return partial;
-}
-
-/*
- * Zero a number of block pointers in either an inode or an indirect block.
- * If we restart the transaction we must again get write access to the
- * indirect block for further modification.
- *
- * We release `count' blocks on disk, but (last - first) may be greater
- * than `count' because there can be holes in there.
- */
-static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
-		struct buffer_head *bh, ext3_fsblk_t block_to_free,
-		unsigned long count, __le32 *first, __le32 *last)
-{
-	__le32 *p;
-	if (try_to_extend_transaction(handle, inode)) {
-		if (bh) {
-			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			if (ext3_journal_dirty_metadata(handle, bh))
-				return;
-		}
-		ext3_mark_inode_dirty(handle, inode);
-		truncate_restart_transaction(handle, inode);
-		if (bh) {
-			BUFFER_TRACE(bh, "retaking write access");
-			if (ext3_journal_get_write_access(handle, bh))
-				return;
-		}
-	}
-
-	/*
-	 * Any buffers which are on the journal will be in memory. We find
-	 * them on the hash table so journal_revoke() will run journal_forget()
-	 * on them.  We've already detached each block from the file, so
-	 * bforget() in journal_forget() should be safe.
-	 *
-	 * AKPM: turn on bforget in journal_forget()!!!
-	 */
-	for (p = first; p < last; p++) {
-		u32 nr = le32_to_cpu(*p);
-		if (nr) {
-			struct buffer_head *bh;
-
-			*p = 0;
-			bh = sb_find_get_block(inode->i_sb, nr);
-			ext3_forget(handle, 0, inode, bh, nr);
-		}
-	}
-
-	ext3_free_blocks(handle, inode, block_to_free, count);
-}
-
-/**
- * ext3_free_data - free a list of data blocks
- * @handle:	handle for this transaction
- * @inode:	inode we are dealing with
- * @this_bh:	indirect buffer_head which contains *@first and *@last
- * @first:	array of block numbers
- * @last:	points immediately past the end of array
- *
- * We are freeing all blocks referred from that array (numbers are stored as
- * little-endian 32-bit) and updating @inode->i_blocks appropriately.
- *
- * We accumulate contiguous runs of blocks to free.  Conveniently, if these
- * blocks are contiguous then releasing them at one time will only affect one
- * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
- * actually use a lot of journal space.
- *
- * @this_bh will be %NULL if @first and @last point into the inode's direct
- * block pointers.
- */
-static void ext3_free_data(handle_t *handle, struct inode *inode,
-			   struct buffer_head *this_bh,
-			   __le32 *first, __le32 *last)
-{
-	ext3_fsblk_t block_to_free = 0;    /* Starting block # of a run */
-	unsigned long count = 0;	    /* Number of blocks in the run */
-	__le32 *block_to_free_p = NULL;	    /* Pointer into inode/ind
-					       corresponding to
-					       block_to_free */
-	ext3_fsblk_t nr;		    /* Current block # */
-	__le32 *p;			    /* Pointer into inode/ind
-					       for current block */
-	int err;
-
-	if (this_bh) {				/* For indirect block */
-		BUFFER_TRACE(this_bh, "get_write_access");
-		err = ext3_journal_get_write_access(handle, this_bh);
-		/* Important: if we can't update the indirect pointers
-		 * to the blocks, we can't free them. */
-		if (err)
-			return;
-	}
-
-	for (p = first; p < last; p++) {
-		nr = le32_to_cpu(*p);
-		if (nr) {
-			/* accumulate blocks to free if they're contiguous */
-			if (count == 0) {
-				block_to_free = nr;
-				block_to_free_p = p;
-				count = 1;
-			} else if (nr == block_to_free + count) {
-				count++;
-			} else {
-				ext3_clear_blocks(handle, inode, this_bh,
-						  block_to_free,
-						  count, block_to_free_p, p);
-				block_to_free = nr;
-				block_to_free_p = p;
-				count = 1;
-			}
-		}
-	}
-
-	if (count > 0)
-		ext3_clear_blocks(handle, inode, this_bh, block_to_free,
-				  count, block_to_free_p, p);
-
-	if (this_bh) {
-		BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
-
-		/*
-		 * The buffer head should have an attached journal head at this
-		 * point. However, if the data is corrupted and an indirect
-		 * block pointed to itself, it would have been detached when
-		 * the block was cleared. Check for this instead of OOPSing.
-		 */
-		if (bh2jh(this_bh))
-			ext3_journal_dirty_metadata(handle, this_bh);
-		else
-			ext3_error(inode->i_sb, "ext3_free_data",
-				   "circular indirect block detected, "
-				   "inode=%lu, block=%llu",
-				   inode->i_ino,
-				   (unsigned long long)this_bh->b_blocknr);
-	}
-}
-
-/**
- *	ext3_free_branches - free an array of branches
- *	@handle: JBD handle for this transaction
- *	@inode:	inode we are dealing with
- *	@parent_bh: the buffer_head which contains *@first and *@last
- *	@first:	array of block numbers
- *	@last:	pointer immediately past the end of array
- *	@depth:	depth of the branches to free
- *
- *	We are freeing all blocks referred from these branches (numbers are
- *	stored as little-endian 32-bit) and updating @inode->i_blocks
- *	appropriately.
- */
-static void ext3_free_branches(handle_t *handle, struct inode *inode,
-			       struct buffer_head *parent_bh,
-			       __le32 *first, __le32 *last, int depth)
-{
-	ext3_fsblk_t nr;
-	__le32 *p;
-
-	if (is_handle_aborted(handle))
-		return;
-
-	if (depth--) {
-		struct buffer_head *bh;
-		int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
-		p = last;
-		while (--p >= first) {
-			nr = le32_to_cpu(*p);
-			if (!nr)
-				continue;		/* A hole */
-
-			/* Go read the buffer for the next level down */
-			bh = sb_bread(inode->i_sb, nr);
-
-			/*
-			 * A read failure? Report error and clear slot
-			 * (should be rare).
-			 */
-			if (!bh) {
-				ext3_error(inode->i_sb, "ext3_free_branches",
-					   "Read failure, inode=%lu, block="E3FSBLK,
-					   inode->i_ino, nr);
-				continue;
-			}
-
-			/* This zaps the entire block.  Bottom up. */
-			BUFFER_TRACE(bh, "free child branches");
-			ext3_free_branches(handle, inode, bh,
-					   (__le32*)bh->b_data,
-					   (__le32*)bh->b_data + addr_per_block,
-					   depth);
-
-			/*
-			 * Everything below this this pointer has been
-			 * released.  Now let this top-of-subtree go.
-			 *
-			 * We want the freeing of this indirect block to be
-			 * atomic in the journal with the updating of the
-			 * bitmap block which owns it.  So make some room in
-			 * the journal.
-			 *
-			 * We zero the parent pointer *after* freeing its
-			 * pointee in the bitmaps, so if extend_transaction()
-			 * for some reason fails to put the bitmap changes and
-			 * the release into the same transaction, recovery
-			 * will merely complain about releasing a free block,
-			 * rather than leaking blocks.
-			 */
-			if (is_handle_aborted(handle))
-				return;
-			if (try_to_extend_transaction(handle, inode)) {
-				ext3_mark_inode_dirty(handle, inode);
-				truncate_restart_transaction(handle, inode);
-			}
-
-			/*
-			 * We've probably journalled the indirect block several
-			 * times during the truncate.  But it's no longer
-			 * needed and we now drop it from the transaction via
-			 * journal_revoke().
-			 *
-			 * That's easy if it's exclusively part of this
-			 * transaction.  But if it's part of the committing
-			 * transaction then journal_forget() will simply
-			 * brelse() it.  That means that if the underlying
-			 * block is reallocated in ext3_get_block(),
-			 * unmap_underlying_metadata() will find this block
-			 * and will try to get rid of it.  damn, damn. Thus
-			 * we don't allow a block to be reallocated until
-			 * a transaction freeing it has fully committed.
-			 *
-			 * We also have to make sure journal replay after a
-			 * crash does not overwrite non-journaled data blocks
-			 * with old metadata when the block got reallocated for
-			 * data.  Thus we have to store a revoke record for a
-			 * block in the same transaction in which we free the
-			 * block.
-			 */
-			ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
-
-			ext3_free_blocks(handle, inode, nr, 1);
-
-			if (parent_bh) {
-				/*
-				 * The block which we have just freed is
-				 * pointed to by an indirect block: journal it
-				 */
-				BUFFER_TRACE(parent_bh, "get_write_access");
-				if (!ext3_journal_get_write_access(handle,
-								   parent_bh)){
-					*p = 0;
-					BUFFER_TRACE(parent_bh,
-					"call ext3_journal_dirty_metadata");
-					ext3_journal_dirty_metadata(handle,
-								    parent_bh);
-				}
-			}
-		}
-	} else {
-		/* We have reached the bottom of the tree. */
-		BUFFER_TRACE(parent_bh, "free data blocks");
-		ext3_free_data(handle, inode, parent_bh, first, last);
-	}
-}
-
-int ext3_can_truncate(struct inode *inode)
-{
-	if (S_ISREG(inode->i_mode))
-		return 1;
-	if (S_ISDIR(inode->i_mode))
-		return 1;
-	if (S_ISLNK(inode->i_mode))
-		return !ext3_inode_is_fast_symlink(inode);
-	return 0;
-}
-
-/*
- * ext3_truncate()
- *
- * We block out ext3_get_block() block instantiations across the entire
- * transaction, and VFS/VM ensures that ext3_truncate() cannot run
- * simultaneously on behalf of the same inode.
- *
- * As we work through the truncate and commit bits of it to the journal there
- * is one core, guiding principle: the file's tree must always be consistent on
- * disk.  We must be able to restart the truncate after a crash.
- *
- * The file's tree may be transiently inconsistent in memory (although it
- * probably isn't), but whenever we close off and commit a journal transaction,
- * the contents of (the filesystem + the journal) must be consistent and
- * restartable.  It's pretty simple, really: bottom up, right to left (although
- * left-to-right works OK too).
- *
- * Note that at recovery time, journal replay occurs *before* the restart of
- * truncate against the orphan inode list.
- *
- * The committed inode has the new, desired i_size (which is the same as
- * i_disksize in this case).  After a crash, ext3_orphan_cleanup() will see
- * that this inode's truncate did not complete and it will again call
- * ext3_truncate() to have another go.  So there will be instantiated blocks
- * to the right of the truncation point in a crashed ext3 filesystem.  But
- * that's fine - as long as they are linked from the inode, the post-crash
- * ext3_truncate() run will find them and release them.
- */
-void ext3_truncate(struct inode *inode)
-{
-	handle_t *handle;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	__le32 *i_data = ei->i_data;
-	int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb);
-	int offsets[4];
-	Indirect chain[4];
-	Indirect *partial;
-	__le32 nr = 0;
-	int n;
-	long last_block;
-	unsigned blocksize = inode->i_sb->s_blocksize;
-
-	trace_ext3_truncate_enter(inode);
-
-	if (!ext3_can_truncate(inode))
-		goto out_notrans;
-
-	if (inode->i_size == 0 && ext3_should_writeback_data(inode))
-		ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
-
-	handle = start_transaction(inode);
-	if (IS_ERR(handle))
-		goto out_notrans;
-
-	last_block = (inode->i_size + blocksize-1)
-					>> EXT3_BLOCK_SIZE_BITS(inode->i_sb);
-	n = ext3_block_to_path(inode, last_block, offsets, NULL);
-	if (n == 0)
-		goto out_stop;	/* error */
-
-	/*
-	 * OK.  This truncate is going to happen.  We add the inode to the
-	 * orphan list, so that if this truncate spans multiple transactions,
-	 * and we crash, we will resume the truncate when the filesystem
-	 * recovers.  It also marks the inode dirty, to catch the new size.
-	 *
-	 * Implication: the file must always be in a sane, consistent
-	 * truncatable state while each transaction commits.
-	 */
-	if (ext3_orphan_add(handle, inode))
-		goto out_stop;
-
-	/*
-	 * The orphan list entry will now protect us from any crash which
-	 * occurs before the truncate completes, so it is now safe to propagate
-	 * the new, shorter inode size (held for now in i_size) into the
-	 * on-disk inode. We do this via i_disksize, which is the value which
-	 * ext3 *really* writes onto the disk inode.
-	 */
-	ei->i_disksize = inode->i_size;
-
-	/*
-	 * From here we block out all ext3_get_block() callers who want to
-	 * modify the block allocation tree.
-	 */
-	mutex_lock(&ei->truncate_mutex);
-
-	if (n == 1) {		/* direct blocks */
-		ext3_free_data(handle, inode, NULL, i_data+offsets[0],
-			       i_data + EXT3_NDIR_BLOCKS);
-		goto do_indirects;
-	}
-
-	partial = ext3_find_shared(inode, n, offsets, chain, &nr);
-	/* Kill the top of shared branch (not detached) */
-	if (nr) {
-		if (partial == chain) {
-			/* Shared branch grows from the inode */
-			ext3_free_branches(handle, inode, NULL,
-					   &nr, &nr+1, (chain+n-1) - partial);
-			*partial->p = 0;
-			/*
-			 * We mark the inode dirty prior to restart,
-			 * and prior to stop.  No need for it here.
-			 */
-		} else {
-			/* Shared branch grows from an indirect block */
-			ext3_free_branches(handle, inode, partial->bh,
-					partial->p,
-					partial->p+1, (chain+n-1) - partial);
-		}
-	}
-	/* Clear the ends of indirect blocks on the shared branch */
-	while (partial > chain) {
-		ext3_free_branches(handle, inode, partial->bh, partial->p + 1,
-				   (__le32*)partial->bh->b_data+addr_per_block,
-				   (chain+n-1) - partial);
-		BUFFER_TRACE(partial->bh, "call brelse");
-		brelse (partial->bh);
-		partial--;
-	}
-do_indirects:
-	/* Kill the remaining (whole) subtrees */
-	switch (offsets[0]) {
-	default:
-		nr = i_data[EXT3_IND_BLOCK];
-		if (nr) {
-			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
-			i_data[EXT3_IND_BLOCK] = 0;
-		}
-	case EXT3_IND_BLOCK:
-		nr = i_data[EXT3_DIND_BLOCK];
-		if (nr) {
-			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
-			i_data[EXT3_DIND_BLOCK] = 0;
-		}
-	case EXT3_DIND_BLOCK:
-		nr = i_data[EXT3_TIND_BLOCK];
-		if (nr) {
-			ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
-			i_data[EXT3_TIND_BLOCK] = 0;
-		}
-	case EXT3_TIND_BLOCK:
-		;
-	}
-
-	ext3_discard_reservation(inode);
-
-	mutex_unlock(&ei->truncate_mutex);
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
-	ext3_mark_inode_dirty(handle, inode);
-
-	/*
-	 * In a multi-transaction truncate, we only make the final transaction
-	 * synchronous
-	 */
-	if (IS_SYNC(inode))
-		handle->h_sync = 1;
-out_stop:
-	/*
-	 * If this was a simple ftruncate(), and the file will remain alive
-	 * then we need to clear up the orphan record which we created above.
-	 * However, if this was a real unlink then we were called by
-	 * ext3_evict_inode(), and we allow that function to clean up the
-	 * orphan info for us.
-	 */
-	if (inode->i_nlink)
-		ext3_orphan_del(handle, inode);
-
-	ext3_journal_stop(handle);
-	trace_ext3_truncate_exit(inode);
-	return;
-out_notrans:
-	/*
-	 * Delete the inode from orphan list so that it doesn't stay there
-	 * forever and trigger assertion on umount.
-	 */
-	if (inode->i_nlink)
-		ext3_orphan_del(NULL, inode);
-	trace_ext3_truncate_exit(inode);
-}
-
-static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
-		unsigned long ino, struct ext3_iloc *iloc)
-{
-	unsigned long block_group;
-	unsigned long offset;
-	ext3_fsblk_t block;
-	struct ext3_group_desc *gdp;
-
-	if (!ext3_valid_inum(sb, ino)) {
-		/*
-		 * This error is already checked for in namei.c unless we are
-		 * looking at an NFS filehandle, in which case no error
-		 * report is needed
-		 */
-		return 0;
-	}
-
-	block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
-	gdp = ext3_get_group_desc(sb, block_group, NULL);
-	if (!gdp)
-		return 0;
-	/*
-	 * Figure out the offset within the block group inode table
-	 */
-	offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
-		EXT3_INODE_SIZE(sb);
-	block = le32_to_cpu(gdp->bg_inode_table) +
-		(offset >> EXT3_BLOCK_SIZE_BITS(sb));
-
-	iloc->block_group = block_group;
-	iloc->offset = offset & (EXT3_BLOCK_SIZE(sb) - 1);
-	return block;
-}
-
-/*
- * ext3_get_inode_loc returns with an extra refcount against the inode's
- * underlying buffer_head on success. If 'in_mem' is true, we have all
- * data in memory that is needed to recreate the on-disk version of this
- * inode.
- */
-static int __ext3_get_inode_loc(struct inode *inode,
-				struct ext3_iloc *iloc, int in_mem)
-{
-	ext3_fsblk_t block;
-	struct buffer_head *bh;
-
-	block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
-	if (!block)
-		return -EIO;
-
-	bh = sb_getblk(inode->i_sb, block);
-	if (unlikely(!bh)) {
-		ext3_error (inode->i_sb, "ext3_get_inode_loc",
-				"unable to read inode block - "
-				"inode=%lu, block="E3FSBLK,
-				 inode->i_ino, block);
-		return -ENOMEM;
-	}
-	if (!buffer_uptodate(bh)) {
-		lock_buffer(bh);
-
-		/*
-		 * If the buffer has the write error flag, we have failed
-		 * to write out another inode in the same block.  In this
-		 * case, we don't have to read the block because we may
-		 * read the old inode data successfully.
-		 */
-		if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
-			set_buffer_uptodate(bh);
-
-		if (buffer_uptodate(bh)) {
-			/* someone brought it uptodate while we waited */
-			unlock_buffer(bh);
-			goto has_buffer;
-		}
-
-		/*
-		 * If we have all information of the inode in memory and this
-		 * is the only valid inode in the block, we need not read the
-		 * block.
-		 */
-		if (in_mem) {
-			struct buffer_head *bitmap_bh;
-			struct ext3_group_desc *desc;
-			int inodes_per_buffer;
-			int inode_offset, i;
-			int block_group;
-			int start;
-
-			block_group = (inode->i_ino - 1) /
-					EXT3_INODES_PER_GROUP(inode->i_sb);
-			inodes_per_buffer = bh->b_size /
-				EXT3_INODE_SIZE(inode->i_sb);
-			inode_offset = ((inode->i_ino - 1) %
-					EXT3_INODES_PER_GROUP(inode->i_sb));
-			start = inode_offset & ~(inodes_per_buffer - 1);
-
-			/* Is the inode bitmap in cache? */
-			desc = ext3_get_group_desc(inode->i_sb,
-						block_group, NULL);
-			if (!desc)
-				goto make_io;
-
-			bitmap_bh = sb_getblk(inode->i_sb,
-					le32_to_cpu(desc->bg_inode_bitmap));
-			if (unlikely(!bitmap_bh))
-				goto make_io;
-
-			/*
-			 * If the inode bitmap isn't in cache then the
-			 * optimisation may end up performing two reads instead
-			 * of one, so skip it.
-			 */
-			if (!buffer_uptodate(bitmap_bh)) {
-				brelse(bitmap_bh);
-				goto make_io;
-			}
-			for (i = start; i < start + inodes_per_buffer; i++) {
-				if (i == inode_offset)
-					continue;
-				if (ext3_test_bit(i, bitmap_bh->b_data))
-					break;
-			}
-			brelse(bitmap_bh);
-			if (i == start + inodes_per_buffer) {
-				/* all other inodes are free, so skip I/O */
-				memset(bh->b_data, 0, bh->b_size);
-				set_buffer_uptodate(bh);
-				unlock_buffer(bh);
-				goto has_buffer;
-			}
-		}
-
-make_io:
-		/*
-		 * There are other valid inodes in the buffer, this inode
-		 * has in-inode xattrs, or we don't have this inode in memory.
-		 * Read the block from disk.
-		 */
-		trace_ext3_load_inode(inode);
-		get_bh(bh);
-		bh->b_end_io = end_buffer_read_sync;
-		submit_bh(READ | REQ_META | REQ_PRIO, bh);
-		wait_on_buffer(bh);
-		if (!buffer_uptodate(bh)) {
-			ext3_error(inode->i_sb, "ext3_get_inode_loc",
-					"unable to read inode block - "
-					"inode=%lu, block="E3FSBLK,
-					inode->i_ino, block);
-			brelse(bh);
-			return -EIO;
-		}
-	}
-has_buffer:
-	iloc->bh = bh;
-	return 0;
-}
-
-int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
-{
-	/* We have all inode data except xattrs in memory here. */
-	return __ext3_get_inode_loc(inode, iloc,
-		!ext3_test_inode_state(inode, EXT3_STATE_XATTR));
-}
-
-void ext3_set_inode_flags(struct inode *inode)
-{
-	unsigned int flags = EXT3_I(inode)->i_flags;
-
-	inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
-	if (flags & EXT3_SYNC_FL)
-		inode->i_flags |= S_SYNC;
-	if (flags & EXT3_APPEND_FL)
-		inode->i_flags |= S_APPEND;
-	if (flags & EXT3_IMMUTABLE_FL)
-		inode->i_flags |= S_IMMUTABLE;
-	if (flags & EXT3_NOATIME_FL)
-		inode->i_flags |= S_NOATIME;
-	if (flags & EXT3_DIRSYNC_FL)
-		inode->i_flags |= S_DIRSYNC;
-}
-
-/* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
-void ext3_get_inode_flags(struct ext3_inode_info *ei)
-{
-	unsigned int flags = ei->vfs_inode.i_flags;
-
-	ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
-			EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
-	if (flags & S_SYNC)
-		ei->i_flags |= EXT3_SYNC_FL;
-	if (flags & S_APPEND)
-		ei->i_flags |= EXT3_APPEND_FL;
-	if (flags & S_IMMUTABLE)
-		ei->i_flags |= EXT3_IMMUTABLE_FL;
-	if (flags & S_NOATIME)
-		ei->i_flags |= EXT3_NOATIME_FL;
-	if (flags & S_DIRSYNC)
-		ei->i_flags |= EXT3_DIRSYNC_FL;
-}
-
-struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
-{
-	struct ext3_iloc iloc;
-	struct ext3_inode *raw_inode;
-	struct ext3_inode_info *ei;
-	struct buffer_head *bh;
-	struct inode *inode;
-	journal_t *journal = EXT3_SB(sb)->s_journal;
-	transaction_t *transaction;
-	long ret;
-	int block;
-	uid_t i_uid;
-	gid_t i_gid;
-
-	inode = iget_locked(sb, ino);
-	if (!inode)
-		return ERR_PTR(-ENOMEM);
-	if (!(inode->i_state & I_NEW))
-		return inode;
-
-	ei = EXT3_I(inode);
-	ei->i_block_alloc_info = NULL;
-
-	ret = __ext3_get_inode_loc(inode, &iloc, 0);
-	if (ret < 0)
-		goto bad_inode;
-	bh = iloc.bh;
-	raw_inode = ext3_raw_inode(&iloc);
-	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
-	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
-	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
-	if(!(test_opt (inode->i_sb, NO_UID32))) {
-		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
-		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
-	}
-	i_uid_write(inode, i_uid);
-	i_gid_write(inode, i_gid);
-	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
-	inode->i_size = le32_to_cpu(raw_inode->i_size);
-	inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
-	inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
-	inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
-	inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
-
-	ei->i_state_flags = 0;
-	ei->i_dir_start_lookup = 0;
-	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
-	/* We now have enough fields to check if the inode was active or not.
-	 * This is needed because nfsd might try to access dead inodes
-	 * the test is that same one that e2fsck uses
-	 * NeilBrown 1999oct15
-	 */
-	if (inode->i_nlink == 0) {
-		if (inode->i_mode == 0 ||
-		    !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
-			/* this inode is deleted */
-			brelse (bh);
-			ret = -ESTALE;
-			goto bad_inode;
-		}
-		/* The only unlinked inodes we let through here have
-		 * valid i_mode and are being read by the orphan
-		 * recovery code: that's fine, we're about to complete
-		 * the process of deleting those. */
-	}
-	inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
-	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
-#ifdef EXT3_FRAGMENTS
-	ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
-	ei->i_frag_no = raw_inode->i_frag;
-	ei->i_frag_size = raw_inode->i_fsize;
-#endif
-	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
-	if (!S_ISREG(inode->i_mode)) {
-		ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
-	} else {
-		inode->i_size |=
-			((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
-	}
-	ei->i_disksize = inode->i_size;
-	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
-	ei->i_block_group = iloc.block_group;
-	/*
-	 * NOTE! The in-memory inode i_data array is in little-endian order
-	 * even on big-endian machines: we do NOT byteswap the block numbers!
-	 */
-	for (block = 0; block < EXT3_N_BLOCKS; block++)
-		ei->i_data[block] = raw_inode->i_block[block];
-	INIT_LIST_HEAD(&ei->i_orphan);
-
-	/*
-	 * Set transaction id's of transactions that have to be committed
-	 * to finish f[data]sync. We set them to currently running transaction
-	 * as we cannot be sure that the inode or some of its metadata isn't
-	 * part of the transaction - the inode could have been reclaimed and
-	 * now it is reread from disk.
-	 */
-	if (journal) {
-		tid_t tid;
-
-		spin_lock(&journal->j_state_lock);
-		if (journal->j_running_transaction)
-			transaction = journal->j_running_transaction;
-		else
-			transaction = journal->j_committing_transaction;
-		if (transaction)
-			tid = transaction->t_tid;
-		else
-			tid = journal->j_commit_sequence;
-		spin_unlock(&journal->j_state_lock);
-		atomic_set(&ei->i_sync_tid, tid);
-		atomic_set(&ei->i_datasync_tid, tid);
-	}
-
-	if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
-	    EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
-		/*
-		 * When mke2fs creates big inodes it does not zero out
-		 * the unused bytes above EXT3_GOOD_OLD_INODE_SIZE,
-		 * so ignore those first few inodes.
-		 */
-		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
-		if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
-		    EXT3_INODE_SIZE(inode->i_sb)) {
-			brelse (bh);
-			ret = -EIO;
-			goto bad_inode;
-		}
-		if (ei->i_extra_isize == 0) {
-			/* The extra space is currently unused. Use it. */
-			ei->i_extra_isize = sizeof(struct ext3_inode) -
-					    EXT3_GOOD_OLD_INODE_SIZE;
-		} else {
-			__le32 *magic = (void *)raw_inode +
-					EXT3_GOOD_OLD_INODE_SIZE +
-					ei->i_extra_isize;
-			if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
-				 ext3_set_inode_state(inode, EXT3_STATE_XATTR);
-		}
-	} else
-		ei->i_extra_isize = 0;
-
-	if (S_ISREG(inode->i_mode)) {
-		inode->i_op = &ext3_file_inode_operations;
-		inode->i_fop = &ext3_file_operations;
-		ext3_set_aops(inode);
-	} else if (S_ISDIR(inode->i_mode)) {
-		inode->i_op = &ext3_dir_inode_operations;
-		inode->i_fop = &ext3_dir_operations;
-	} else if (S_ISLNK(inode->i_mode)) {
-		if (ext3_inode_is_fast_symlink(inode)) {
-			inode->i_op = &ext3_fast_symlink_inode_operations;
-			nd_terminate_link(ei->i_data, inode->i_size,
-				sizeof(ei->i_data) - 1);
-			inode->i_link = (char *)ei->i_data;
-		} else {
-			inode->i_op = &ext3_symlink_inode_operations;
-			ext3_set_aops(inode);
-		}
-	} else {
-		inode->i_op = &ext3_special_inode_operations;
-		if (raw_inode->i_block[0])
-			init_special_inode(inode, inode->i_mode,
-			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
-		else
-			init_special_inode(inode, inode->i_mode,
-			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
-	}
-	brelse (iloc.bh);
-	ext3_set_inode_flags(inode);
-	unlock_new_inode(inode);
-	return inode;
-
-bad_inode:
-	iget_failed(inode);
-	return ERR_PTR(ret);
-}
-
-/*
- * Post the struct inode info into an on-disk inode location in the
- * buffer-cache.  This gobbles the caller's reference to the
- * buffer_head in the inode location struct.
- *
- * The caller must have write access to iloc->bh.
- */
-static int ext3_do_update_inode(handle_t *handle,
-				struct inode *inode,
-				struct ext3_iloc *iloc)
-{
-	struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct buffer_head *bh = iloc->bh;
-	int err = 0, rc, block;
-	int need_datasync = 0;
-	__le32 disksize;
-	uid_t i_uid;
-	gid_t i_gid;
-
-again:
-	/* we can't allow multiple procs in here at once, its a bit racey */
-	lock_buffer(bh);
-
-	/* For fields not not tracking in the in-memory inode,
-	 * initialise them to zero for new inodes. */
-	if (ext3_test_inode_state(inode, EXT3_STATE_NEW))
-		memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
-
-	ext3_get_inode_flags(ei);
-	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
-	i_uid = i_uid_read(inode);
-	i_gid = i_gid_read(inode);
-	if(!(test_opt(inode->i_sb, NO_UID32))) {
-		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
-		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
-/*
- * Fix up interoperability with old kernels. Otherwise, old inodes get
- * re-used with the upper 16 bits of the uid/gid intact
- */
-		if(!ei->i_dtime) {
-			raw_inode->i_uid_high =
-				cpu_to_le16(high_16_bits(i_uid));
-			raw_inode->i_gid_high =
-				cpu_to_le16(high_16_bits(i_gid));
-		} else {
-			raw_inode->i_uid_high = 0;
-			raw_inode->i_gid_high = 0;
-		}
-	} else {
-		raw_inode->i_uid_low =
-			cpu_to_le16(fs_high2lowuid(i_uid));
-		raw_inode->i_gid_low =
-			cpu_to_le16(fs_high2lowgid(i_gid));
-		raw_inode->i_uid_high = 0;
-		raw_inode->i_gid_high = 0;
-	}
-	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
-	disksize = cpu_to_le32(ei->i_disksize);
-	if (disksize != raw_inode->i_size) {
-		need_datasync = 1;
-		raw_inode->i_size = disksize;
-	}
-	raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
-	raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
-	raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
-	raw_inode->i_blocks = cpu_to_le32(inode->i_blocks);
-	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
-	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
-#ifdef EXT3_FRAGMENTS
-	raw_inode->i_faddr = cpu_to_le32(ei->i_faddr);
-	raw_inode->i_frag = ei->i_frag_no;
-	raw_inode->i_fsize = ei->i_frag_size;
-#endif
-	raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl);
-	if (!S_ISREG(inode->i_mode)) {
-		raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl);
-	} else {
-		disksize = cpu_to_le32(ei->i_disksize >> 32);
-		if (disksize != raw_inode->i_size_high) {
-			raw_inode->i_size_high = disksize;
-			need_datasync = 1;
-		}
-		if (ei->i_disksize > 0x7fffffffULL) {
-			struct super_block *sb = inode->i_sb;
-			if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
-					EXT3_FEATURE_RO_COMPAT_LARGE_FILE) ||
-			    EXT3_SB(sb)->s_es->s_rev_level ==
-					cpu_to_le32(EXT3_GOOD_OLD_REV)) {
-			       /* If this is the first large file
-				* created, add a flag to the superblock.
-				*/
-				unlock_buffer(bh);
-				err = ext3_journal_get_write_access(handle,
-						EXT3_SB(sb)->s_sbh);
-				if (err)
-					goto out_brelse;
-
-				ext3_update_dynamic_rev(sb);
-				EXT3_SET_RO_COMPAT_FEATURE(sb,
-					EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
-				handle->h_sync = 1;
-				err = ext3_journal_dirty_metadata(handle,
-						EXT3_SB(sb)->s_sbh);
-				/* get our lock and start over */
-				goto again;
-			}
-		}
-	}
-	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
-	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
-		if (old_valid_dev(inode->i_rdev)) {
-			raw_inode->i_block[0] =
-				cpu_to_le32(old_encode_dev(inode->i_rdev));
-			raw_inode->i_block[1] = 0;
-		} else {
-			raw_inode->i_block[0] = 0;
-			raw_inode->i_block[1] =
-				cpu_to_le32(new_encode_dev(inode->i_rdev));
-			raw_inode->i_block[2] = 0;
-		}
-	} else for (block = 0; block < EXT3_N_BLOCKS; block++)
-		raw_inode->i_block[block] = ei->i_data[block];
-
-	if (ei->i_extra_isize)
-		raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
-
-	BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-	unlock_buffer(bh);
-	rc = ext3_journal_dirty_metadata(handle, bh);
-	if (!err)
-		err = rc;
-	ext3_clear_inode_state(inode, EXT3_STATE_NEW);
-
-	atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
-	if (need_datasync)
-		atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
-out_brelse:
-	brelse (bh);
-	ext3_std_error(inode->i_sb, err);
-	return err;
-}
-
-/*
- * ext3_write_inode()
- *
- * We are called from a few places:
- *
- * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
- *   Here, there will be no transaction running. We wait for any running
- *   transaction to commit.
- *
- * - Within flush work (for sys_sync(), kupdate and such).
- *   We wait on commit, if told to.
- *
- * - Within iput_final() -> write_inode_now()
- *   We wait on commit, if told to.
- *
- * In all cases it is actually safe for us to return without doing anything,
- * because the inode has been copied into a raw inode buffer in
- * ext3_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
- * writeback.
- *
- * Note that we are absolutely dependent upon all inode dirtiers doing the
- * right thing: they *must* call mark_inode_dirty() after dirtying info in
- * which we are interested.
- *
- * It would be a bug for them to not do this.  The code:
- *
- *	mark_inode_dirty(inode)
- *	stuff();
- *	inode->i_size = expr;
- *
- * is in error because write_inode() could occur while `stuff()' is running,
- * and the new i_size will be lost.  Plus the inode will no longer be on the
- * superblock's dirty inode list.
- */
-int ext3_write_inode(struct inode *inode, struct writeback_control *wbc)
-{
-	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
-		return 0;
-
-	if (ext3_journal_current_handle()) {
-		jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
-		dump_stack();
-		return -EIO;
-	}
-
-	/*
-	 * No need to force transaction in WB_SYNC_NONE mode. Also
-	 * ext3_sync_fs() will force the commit after everything is
-	 * written.
-	 */
-	if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
-		return 0;
-
-	return ext3_force_commit(inode->i_sb);
-}
-
-/*
- * ext3_setattr()
- *
- * Called from notify_change.
- *
- * We want to trap VFS attempts to truncate the file as soon as
- * possible.  In particular, we want to make sure that when the VFS
- * shrinks i_size, we put the inode on the orphan list and modify
- * i_disksize immediately, so that during the subsequent flushing of
- * dirty pages and freeing of disk blocks, we can guarantee that any
- * commit will leave the blocks being flushed in an unused state on
- * disk.  (On recovery, the inode will get truncated and the blocks will
- * be freed, so we have a strong guarantee that no future commit will
- * leave these blocks visible to the user.)
- *
- * Called with inode->sem down.
- */
-int ext3_setattr(struct dentry *dentry, struct iattr *attr)
-{
-	struct inode *inode = d_inode(dentry);
-	int error, rc = 0;
-	const unsigned int ia_valid = attr->ia_valid;
-
-	error = inode_change_ok(inode, attr);
-	if (error)
-		return error;
-
-	if (is_quota_modification(inode, attr))
-		dquot_initialize(inode);
-	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
-	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
-		handle_t *handle;
-
-		/* (user+group)*(old+new) structure, inode write (sb,
-		 * inode block, ? - but truncate inode update has it) */
-		handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
-					EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3);
-		if (IS_ERR(handle)) {
-			error = PTR_ERR(handle);
-			goto err_out;
-		}
-		error = dquot_transfer(inode, attr);
-		if (error) {
-			ext3_journal_stop(handle);
-			return error;
-		}
-		/* Update corresponding info in inode so that everything is in
-		 * one transaction */
-		if (attr->ia_valid & ATTR_UID)
-			inode->i_uid = attr->ia_uid;
-		if (attr->ia_valid & ATTR_GID)
-			inode->i_gid = attr->ia_gid;
-		error = ext3_mark_inode_dirty(handle, inode);
-		ext3_journal_stop(handle);
-	}
-
-	if (attr->ia_valid & ATTR_SIZE)
-		inode_dio_wait(inode);
-
-	if (S_ISREG(inode->i_mode) &&
-	    attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
-		handle_t *handle;
-
-		handle = ext3_journal_start(inode, 3);
-		if (IS_ERR(handle)) {
-			error = PTR_ERR(handle);
-			goto err_out;
-		}
-
-		error = ext3_orphan_add(handle, inode);
-		if (error) {
-			ext3_journal_stop(handle);
-			goto err_out;
-		}
-		EXT3_I(inode)->i_disksize = attr->ia_size;
-		error = ext3_mark_inode_dirty(handle, inode);
-		ext3_journal_stop(handle);
-		if (error) {
-			/* Some hard fs error must have happened. Bail out. */
-			ext3_orphan_del(NULL, inode);
-			goto err_out;
-		}
-		rc = ext3_block_truncate_page(inode, attr->ia_size);
-		if (rc) {
-			/* Cleanup orphan list and exit */
-			handle = ext3_journal_start(inode, 3);
-			if (IS_ERR(handle)) {
-				ext3_orphan_del(NULL, inode);
-				goto err_out;
-			}
-			ext3_orphan_del(handle, inode);
-			ext3_journal_stop(handle);
-			goto err_out;
-		}
-	}
-
-	if ((attr->ia_valid & ATTR_SIZE) &&
-	    attr->ia_size != i_size_read(inode)) {
-		truncate_setsize(inode, attr->ia_size);
-		ext3_truncate(inode);
-	}
-
-	setattr_copy(inode, attr);
-	mark_inode_dirty(inode);
-
-	if (ia_valid & ATTR_MODE)
-		rc = posix_acl_chmod(inode, inode->i_mode);
-
-err_out:
-	ext3_std_error(inode->i_sb, error);
-	if (!error)
-		error = rc;
-	return error;
-}
-
-
-/*
- * How many blocks doth make a writepage()?
- *
- * With N blocks per page, it may be:
- * N data blocks
- * 2 indirect block
- * 2 dindirect
- * 1 tindirect
- * N+5 bitmap blocks (from the above)
- * N+5 group descriptor summary blocks
- * 1 inode block
- * 1 superblock.
- * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files
- *
- * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS
- *
- * With ordered or writeback data it's the same, less the N data blocks.
- *
- * If the inode's direct blocks can hold an integral number of pages then a
- * page cannot straddle two indirect blocks, and we can only touch one indirect
- * and dindirect block, and the "5" above becomes "3".
- *
- * This still overestimates under most circumstances.  If we were to pass the
- * start and end offsets in here as well we could do block_to_path() on each
- * block and work out the exact number of indirects which are touched.  Pah.
- */
-
-static int ext3_writepage_trans_blocks(struct inode *inode)
-{
-	int bpp = ext3_journal_blocks_per_page(inode);
-	int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3;
-	int ret;
-
-	if (ext3_should_journal_data(inode))
-		ret = 3 * (bpp + indirects) + 2;
-	else
-		ret = 2 * (bpp + indirects) + indirects + 2;
-
-#ifdef CONFIG_QUOTA
-	/* We know that structure was already allocated during dquot_initialize so
-	 * we will be updating only the data blocks + inodes */
-	ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
-#endif
-
-	return ret;
-}
-
-/*
- * The caller must have previously called ext3_reserve_inode_write().
- * Give this, we know that the caller already has write access to iloc->bh.
- */
-int ext3_mark_iloc_dirty(handle_t *handle,
-		struct inode *inode, struct ext3_iloc *iloc)
-{
-	int err = 0;
-
-	/* the do_update_inode consumes one bh->b_count */
-	get_bh(iloc->bh);
-
-	/* ext3_do_update_inode() does journal_dirty_metadata */
-	err = ext3_do_update_inode(handle, inode, iloc);
-	put_bh(iloc->bh);
-	return err;
-}
-
-/*
- * On success, We end up with an outstanding reference count against
- * iloc->bh.  This _must_ be cleaned up later.
- */
-
-int
-ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
-			 struct ext3_iloc *iloc)
-{
-	int err = 0;
-	if (handle) {
-		err = ext3_get_inode_loc(inode, iloc);
-		if (!err) {
-			BUFFER_TRACE(iloc->bh, "get_write_access");
-			err = ext3_journal_get_write_access(handle, iloc->bh);
-			if (err) {
-				brelse(iloc->bh);
-				iloc->bh = NULL;
-			}
-		}
-	}
-	ext3_std_error(inode->i_sb, err);
-	return err;
-}
-
-/*
- * What we do here is to mark the in-core inode as clean with respect to inode
- * dirtiness (it may still be data-dirty).
- * This means that the in-core inode may be reaped by prune_icache
- * without having to perform any I/O.  This is a very good thing,
- * because *any* task may call prune_icache - even ones which
- * have a transaction open against a different journal.
- *
- * Is this cheating?  Not really.  Sure, we haven't written the
- * inode out, but prune_icache isn't a user-visible syncing function.
- * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
- * we start and wait on commits.
- */
-int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
-{
-	struct ext3_iloc iloc;
-	int err;
-
-	might_sleep();
-	trace_ext3_mark_inode_dirty(inode, _RET_IP_);
-	err = ext3_reserve_inode_write(handle, inode, &iloc);
-	if (!err)
-		err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-	return err;
-}
-
-/*
- * ext3_dirty_inode() is called from __mark_inode_dirty()
- *
- * We're really interested in the case where a file is being extended.
- * i_size has been changed by generic_commit_write() and we thus need
- * to include the updated inode in the current transaction.
- *
- * Also, dquot_alloc_space() will always dirty the inode when blocks
- * are allocated to the file.
- *
- * If the inode is marked synchronous, we don't honour that here - doing
- * so would cause a commit on atime updates, which we don't bother doing.
- * We handle synchronous inodes at the highest possible level.
- */
-void ext3_dirty_inode(struct inode *inode, int flags)
-{
-	handle_t *current_handle = ext3_journal_current_handle();
-	handle_t *handle;
-
-	handle = ext3_journal_start(inode, 2);
-	if (IS_ERR(handle))
-		goto out;
-	if (current_handle &&
-		current_handle->h_transaction != handle->h_transaction) {
-		/* This task has a transaction open against a different fs */
-		printk(KERN_EMERG "%s: transactions do not match!\n",
-		       __func__);
-	} else {
-		jbd_debug(5, "marking dirty.  outer handle=%p\n",
-				current_handle);
-		ext3_mark_inode_dirty(handle, inode);
-	}
-	ext3_journal_stop(handle);
-out:
-	return;
-}
-
-#if 0
-/*
- * Bind an inode's backing buffer_head into this transaction, to prevent
- * it from being flushed to disk early.  Unlike
- * ext3_reserve_inode_write, this leaves behind no bh reference and
- * returns no iloc structure, so the caller needs to repeat the iloc
- * lookup to mark the inode dirty later.
- */
-static int ext3_pin_inode(handle_t *handle, struct inode *inode)
-{
-	struct ext3_iloc iloc;
-
-	int err = 0;
-	if (handle) {
-		err = ext3_get_inode_loc(inode, &iloc);
-		if (!err) {
-			BUFFER_TRACE(iloc.bh, "get_write_access");
-			err = journal_get_write_access(handle, iloc.bh);
-			if (!err)
-				err = ext3_journal_dirty_metadata(handle,
-								  iloc.bh);
-			brelse(iloc.bh);
-		}
-	}
-	ext3_std_error(inode->i_sb, err);
-	return err;
-}
-#endif
-
-int ext3_change_inode_journal_flag(struct inode *inode, int val)
-{
-	journal_t *journal;
-	handle_t *handle;
-	int err;
-
-	/*
-	 * We have to be very careful here: changing a data block's
-	 * journaling status dynamically is dangerous.  If we write a
-	 * data block to the journal, change the status and then delete
-	 * that block, we risk forgetting to revoke the old log record
-	 * from the journal and so a subsequent replay can corrupt data.
-	 * So, first we make sure that the journal is empty and that
-	 * nobody is changing anything.
-	 */
-
-	journal = EXT3_JOURNAL(inode);
-	if (is_journal_aborted(journal))
-		return -EROFS;
-
-	journal_lock_updates(journal);
-	journal_flush(journal);
-
-	/*
-	 * OK, there are no updates running now, and all cached data is
-	 * synced to disk.  We are now in a completely consistent state
-	 * which doesn't have anything in the journal, and we know that
-	 * no filesystem updates are running, so it is safe to modify
-	 * the inode's in-core data-journaling state flag now.
-	 */
-
-	if (val)
-		EXT3_I(inode)->i_flags |= EXT3_JOURNAL_DATA_FL;
-	else
-		EXT3_I(inode)->i_flags &= ~EXT3_JOURNAL_DATA_FL;
-	ext3_set_aops(inode);
-
-	journal_unlock_updates(journal);
-
-	/* Finally we can mark the inode as dirty. */
-
-	handle = ext3_journal_start(inode, 1);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	err = ext3_mark_inode_dirty(handle, inode);
-	handle->h_sync = 1;
-	ext3_journal_stop(handle);
-	ext3_std_error(inode->i_sb, err);
-
-	return err;
-}
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
deleted file mode 100644
index 4d96e9a..0000000
--- a/fs/ext3/ioctl.c
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * linux/fs/ext3/ioctl.c
- *
- * Copyright (C) 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- */
-
-#include <linux/mount.h>
-#include <linux/compat.h>
-#include <asm/uaccess.h>
-#include "ext3.h"
-
-long ext3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-	struct inode *inode = file_inode(filp);
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	unsigned int flags;
-	unsigned short rsv_window_size;
-
-	ext3_debug ("cmd = %u, arg = %lu\n", cmd, arg);
-
-	switch (cmd) {
-	case EXT3_IOC_GETFLAGS:
-		ext3_get_inode_flags(ei);
-		flags = ei->i_flags & EXT3_FL_USER_VISIBLE;
-		return put_user(flags, (int __user *) arg);
-	case EXT3_IOC_SETFLAGS: {
-		handle_t *handle = NULL;
-		int err;
-		struct ext3_iloc iloc;
-		unsigned int oldflags;
-		unsigned int jflag;
-
-		if (!inode_owner_or_capable(inode))
-			return -EACCES;
-
-		if (get_user(flags, (int __user *) arg))
-			return -EFAULT;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-
-		flags = ext3_mask_flags(inode->i_mode, flags);
-
-		mutex_lock(&inode->i_mutex);
-
-		/* Is it quota file? Do not allow user to mess with it */
-		err = -EPERM;
-		if (IS_NOQUOTA(inode))
-			goto flags_out;
-
-		oldflags = ei->i_flags;
-
-		/* The JOURNAL_DATA flag is modifiable only by root */
-		jflag = flags & EXT3_JOURNAL_DATA_FL;
-
-		/*
-		 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
-		 * the relevant capability.
-		 *
-		 * This test looks nicer. Thanks to Pauline Middelink
-		 */
-		if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) {
-			if (!capable(CAP_LINUX_IMMUTABLE))
-				goto flags_out;
-		}
-
-		/*
-		 * The JOURNAL_DATA flag can only be changed by
-		 * the relevant capability.
-		 */
-		if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL)) {
-			if (!capable(CAP_SYS_RESOURCE))
-				goto flags_out;
-		}
-
-		handle = ext3_journal_start(inode, 1);
-		if (IS_ERR(handle)) {
-			err = PTR_ERR(handle);
-			goto flags_out;
-		}
-		if (IS_SYNC(inode))
-			handle->h_sync = 1;
-		err = ext3_reserve_inode_write(handle, inode, &iloc);
-		if (err)
-			goto flags_err;
-
-		flags = flags & EXT3_FL_USER_MODIFIABLE;
-		flags |= oldflags & ~EXT3_FL_USER_MODIFIABLE;
-		ei->i_flags = flags;
-
-		ext3_set_inode_flags(inode);
-		inode->i_ctime = CURRENT_TIME_SEC;
-
-		err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-flags_err:
-		ext3_journal_stop(handle);
-		if (err)
-			goto flags_out;
-
-		if ((jflag ^ oldflags) & (EXT3_JOURNAL_DATA_FL))
-			err = ext3_change_inode_journal_flag(inode, jflag);
-flags_out:
-		mutex_unlock(&inode->i_mutex);
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case EXT3_IOC_GETVERSION:
-	case EXT3_IOC_GETVERSION_OLD:
-		return put_user(inode->i_generation, (int __user *) arg);
-	case EXT3_IOC_SETVERSION:
-	case EXT3_IOC_SETVERSION_OLD: {
-		handle_t *handle;
-		struct ext3_iloc iloc;
-		__u32 generation;
-		int err;
-
-		if (!inode_owner_or_capable(inode))
-			return -EPERM;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-		if (get_user(generation, (int __user *) arg)) {
-			err = -EFAULT;
-			goto setversion_out;
-		}
-
-		mutex_lock(&inode->i_mutex);
-		handle = ext3_journal_start(inode, 1);
-		if (IS_ERR(handle)) {
-			err = PTR_ERR(handle);
-			goto unlock_out;
-		}
-		err = ext3_reserve_inode_write(handle, inode, &iloc);
-		if (err == 0) {
-			inode->i_ctime = CURRENT_TIME_SEC;
-			inode->i_generation = generation;
-			err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-		}
-		ext3_journal_stop(handle);
-
-unlock_out:
-		mutex_unlock(&inode->i_mutex);
-setversion_out:
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case EXT3_IOC_GETRSVSZ:
-		if (test_opt(inode->i_sb, RESERVATION)
-			&& S_ISREG(inode->i_mode)
-			&& ei->i_block_alloc_info) {
-			rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size;
-			return put_user(rsv_window_size, (int __user *)arg);
-		}
-		return -ENOTTY;
-	case EXT3_IOC_SETRSVSZ: {
-		int err;
-
-		if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
-			return -ENOTTY;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-
-		if (!inode_owner_or_capable(inode)) {
-			err = -EACCES;
-			goto setrsvsz_out;
-		}
-
-		if (get_user(rsv_window_size, (int __user *)arg)) {
-			err = -EFAULT;
-			goto setrsvsz_out;
-		}
-
-		if (rsv_window_size > EXT3_MAX_RESERVE_BLOCKS)
-			rsv_window_size = EXT3_MAX_RESERVE_BLOCKS;
-
-		/*
-		 * need to allocate reservation structure for this inode
-		 * before set the window size
-		 */
-		mutex_lock(&ei->truncate_mutex);
-		if (!ei->i_block_alloc_info)
-			ext3_init_block_alloc_info(inode);
-
-		if (ei->i_block_alloc_info){
-			struct ext3_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
-			rsv->rsv_goal_size = rsv_window_size;
-		}
-		mutex_unlock(&ei->truncate_mutex);
-setrsvsz_out:
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case EXT3_IOC_GROUP_EXTEND: {
-		ext3_fsblk_t n_blocks_count;
-		struct super_block *sb = inode->i_sb;
-		int err, err2;
-
-		if (!capable(CAP_SYS_RESOURCE))
-			return -EPERM;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-
-		if (get_user(n_blocks_count, (__u32 __user *)arg)) {
-			err = -EFAULT;
-			goto group_extend_out;
-		}
-		err = ext3_group_extend(sb, EXT3_SB(sb)->s_es, n_blocks_count);
-		journal_lock_updates(EXT3_SB(sb)->s_journal);
-		err2 = journal_flush(EXT3_SB(sb)->s_journal);
-		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-		if (err == 0)
-			err = err2;
-group_extend_out:
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case EXT3_IOC_GROUP_ADD: {
-		struct ext3_new_group_data input;
-		struct super_block *sb = inode->i_sb;
-		int err, err2;
-
-		if (!capable(CAP_SYS_RESOURCE))
-			return -EPERM;
-
-		err = mnt_want_write_file(filp);
-		if (err)
-			return err;
-
-		if (copy_from_user(&input, (struct ext3_new_group_input __user *)arg,
-				sizeof(input))) {
-			err = -EFAULT;
-			goto group_add_out;
-		}
-
-		err = ext3_group_add(sb, &input);
-		journal_lock_updates(EXT3_SB(sb)->s_journal);
-		err2 = journal_flush(EXT3_SB(sb)->s_journal);
-		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-		if (err == 0)
-			err = err2;
-group_add_out:
-		mnt_drop_write_file(filp);
-		return err;
-	}
-	case FITRIM: {
-
-		struct super_block *sb = inode->i_sb;
-		struct fstrim_range range;
-		int ret = 0;
-
-		if (!capable(CAP_SYS_ADMIN))
-			return -EPERM;
-
-		if (copy_from_user(&range, (struct fstrim_range __user *)arg,
-				   sizeof(range)))
-			return -EFAULT;
-
-		ret = ext3_trim_fs(sb, &range);
-		if (ret < 0)
-			return ret;
-
-		if (copy_to_user((struct fstrim_range __user *)arg, &range,
-				 sizeof(range)))
-			return -EFAULT;
-
-		return 0;
-	}
-
-	default:
-		return -ENOTTY;
-	}
-}
-
-#ifdef CONFIG_COMPAT
-long ext3_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	/* These are just misnamed, they actually get/put from/to user an int */
-	switch (cmd) {
-	case EXT3_IOC32_GETFLAGS:
-		cmd = EXT3_IOC_GETFLAGS;
-		break;
-	case EXT3_IOC32_SETFLAGS:
-		cmd = EXT3_IOC_SETFLAGS;
-		break;
-	case EXT3_IOC32_GETVERSION:
-		cmd = EXT3_IOC_GETVERSION;
-		break;
-	case EXT3_IOC32_SETVERSION:
-		cmd = EXT3_IOC_SETVERSION;
-		break;
-	case EXT3_IOC32_GROUP_EXTEND:
-		cmd = EXT3_IOC_GROUP_EXTEND;
-		break;
-	case EXT3_IOC32_GETVERSION_OLD:
-		cmd = EXT3_IOC_GETVERSION_OLD;
-		break;
-	case EXT3_IOC32_SETVERSION_OLD:
-		cmd = EXT3_IOC_SETVERSION_OLD;
-		break;
-#ifdef CONFIG_JBD_DEBUG
-	case EXT3_IOC32_WAIT_FOR_READONLY:
-		cmd = EXT3_IOC_WAIT_FOR_READONLY;
-		break;
-#endif
-	case EXT3_IOC32_GETRSVSZ:
-		cmd = EXT3_IOC_GETRSVSZ;
-		break;
-	case EXT3_IOC32_SETRSVSZ:
-		cmd = EXT3_IOC_SETRSVSZ;
-		break;
-	case EXT3_IOC_GROUP_ADD:
-		break;
-	default:
-		return -ENOIOCTLCMD;
-	}
-	return ext3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
-}
-#endif
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
deleted file mode 100644
index c9e767c..0000000
--- a/fs/ext3/namei.c
+++ /dev/null
@@ -1,2586 +0,0 @@
-/*
- *  linux/fs/ext3/namei.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/namei.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- *  Directory entry file type support and forward compatibility hooks
- *	for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
- *  Hash Tree Directory indexing (c)
- *	Daniel Phillips, 2001
- *  Hash Tree Directory indexing porting
- *	Christopher Li, 2002
- *  Hash Tree Directory indexing cleanup
- *	Theodore Ts'o, 2002
- */
-
-#include <linux/quotaops.h>
-#include "ext3.h"
-#include "namei.h"
-#include "xattr.h"
-#include "acl.h"
-
-/*
- * define how far ahead to read directories while searching them.
- */
-#define NAMEI_RA_CHUNKS  2
-#define NAMEI_RA_BLOCKS  4
-#define NAMEI_RA_SIZE        (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
-
-static struct buffer_head *ext3_append(handle_t *handle,
-					struct inode *inode,
-					u32 *block, int *err)
-{
-	struct buffer_head *bh;
-
-	*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
-
-	if ((bh = ext3_dir_bread(handle, inode, *block, 1, err))) {
-		inode->i_size += inode->i_sb->s_blocksize;
-		EXT3_I(inode)->i_disksize = inode->i_size;
-		*err = ext3_journal_get_write_access(handle, bh);
-		if (*err) {
-			brelse(bh);
-			bh = NULL;
-		}
-	}
-	return bh;
-}
-
-#ifndef assert
-#define assert(test) J_ASSERT(test)
-#endif
-
-#ifdef DX_DEBUG
-#define dxtrace(command) command
-#else
-#define dxtrace(command)
-#endif
-
-struct fake_dirent
-{
-	__le32 inode;
-	__le16 rec_len;
-	u8 name_len;
-	u8 file_type;
-};
-
-struct dx_countlimit
-{
-	__le16 limit;
-	__le16 count;
-};
-
-struct dx_entry
-{
-	__le32 hash;
-	__le32 block;
-};
-
-/*
- * dx_root_info is laid out so that if it should somehow get overlaid by a
- * dirent the two low bits of the hash version will be zero.  Therefore, the
- * hash version mod 4 should never be 0.  Sincerely, the paranoia department.
- */
-
-struct dx_root
-{
-	struct fake_dirent dot;
-	char dot_name[4];
-	struct fake_dirent dotdot;
-	char dotdot_name[4];
-	struct dx_root_info
-	{
-		__le32 reserved_zero;
-		u8 hash_version;
-		u8 info_length; /* 8 */
-		u8 indirect_levels;
-		u8 unused_flags;
-	}
-	info;
-	struct dx_entry	entries[0];
-};
-
-struct dx_node
-{
-	struct fake_dirent fake;
-	struct dx_entry	entries[0];
-};
-
-
-struct dx_frame
-{
-	struct buffer_head *bh;
-	struct dx_entry *entries;
-	struct dx_entry *at;
-};
-
-struct dx_map_entry
-{
-	u32 hash;
-	u16 offs;
-	u16 size;
-};
-
-static inline unsigned dx_get_block (struct dx_entry *entry);
-static void dx_set_block (struct dx_entry *entry, unsigned value);
-static inline unsigned dx_get_hash (struct dx_entry *entry);
-static void dx_set_hash (struct dx_entry *entry, unsigned value);
-static unsigned dx_get_count (struct dx_entry *entries);
-static unsigned dx_get_limit (struct dx_entry *entries);
-static void dx_set_count (struct dx_entry *entries, unsigned value);
-static void dx_set_limit (struct dx_entry *entries, unsigned value);
-static unsigned dx_root_limit (struct inode *dir, unsigned infosize);
-static unsigned dx_node_limit (struct inode *dir);
-static struct dx_frame *dx_probe(struct qstr *entry,
-				 struct inode *dir,
-				 struct dx_hash_info *hinfo,
-				 struct dx_frame *frame,
-				 int *err);
-static void dx_release (struct dx_frame *frames);
-static int dx_make_map(struct ext3_dir_entry_2 *de, unsigned blocksize,
-			struct dx_hash_info *hinfo, struct dx_map_entry map[]);
-static void dx_sort_map(struct dx_map_entry *map, unsigned count);
-static struct ext3_dir_entry_2 *dx_move_dirents (char *from, char *to,
-		struct dx_map_entry *offsets, int count);
-static struct ext3_dir_entry_2 *dx_pack_dirents(char *base, unsigned blocksize);
-static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
-static int ext3_htree_next_block(struct inode *dir, __u32 hash,
-				 struct dx_frame *frame,
-				 struct dx_frame *frames,
-				 __u32 *start_hash);
-static struct buffer_head * ext3_dx_find_entry(struct inode *dir,
-			struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
-			int *err);
-static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
-			     struct inode *inode);
-
-/*
- * p is at least 6 bytes before the end of page
- */
-static inline struct ext3_dir_entry_2 *
-ext3_next_entry(struct ext3_dir_entry_2 *p)
-{
-	return (struct ext3_dir_entry_2 *)((char *)p +
-		ext3_rec_len_from_disk(p->rec_len));
-}
-
-/*
- * Future: use high four bits of block for coalesce-on-delete flags
- * Mask them off for now.
- */
-
-static inline unsigned dx_get_block (struct dx_entry *entry)
-{
-	return le32_to_cpu(entry->block) & 0x00ffffff;
-}
-
-static inline void dx_set_block (struct dx_entry *entry, unsigned value)
-{
-	entry->block = cpu_to_le32(value);
-}
-
-static inline unsigned dx_get_hash (struct dx_entry *entry)
-{
-	return le32_to_cpu(entry->hash);
-}
-
-static inline void dx_set_hash (struct dx_entry *entry, unsigned value)
-{
-	entry->hash = cpu_to_le32(value);
-}
-
-static inline unsigned dx_get_count (struct dx_entry *entries)
-{
-	return le16_to_cpu(((struct dx_countlimit *) entries)->count);
-}
-
-static inline unsigned dx_get_limit (struct dx_entry *entries)
-{
-	return le16_to_cpu(((struct dx_countlimit *) entries)->limit);
-}
-
-static inline void dx_set_count (struct dx_entry *entries, unsigned value)
-{
-	((struct dx_countlimit *) entries)->count = cpu_to_le16(value);
-}
-
-static inline void dx_set_limit (struct dx_entry *entries, unsigned value)
-{
-	((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
-}
-
-static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize)
-{
-	unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) -
-		EXT3_DIR_REC_LEN(2) - infosize;
-	return entry_space / sizeof(struct dx_entry);
-}
-
-static inline unsigned dx_node_limit (struct inode *dir)
-{
-	unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0);
-	return entry_space / sizeof(struct dx_entry);
-}
-
-/*
- * Debug
- */
-#ifdef DX_DEBUG
-static void dx_show_index (char * label, struct dx_entry *entries)
-{
-        int i, n = dx_get_count (entries);
-        printk("%s index ", label);
-        for (i = 0; i < n; i++)
-        {
-                printk("%x->%u ", i? dx_get_hash(entries + i): 0, dx_get_block(entries + i));
-        }
-        printk("\n");
-}
-
-struct stats
-{
-	unsigned names;
-	unsigned space;
-	unsigned bcount;
-};
-
-static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext3_dir_entry_2 *de,
-				 int size, int show_names)
-{
-	unsigned names = 0, space = 0;
-	char *base = (char *) de;
-	struct dx_hash_info h = *hinfo;
-
-	printk("names: ");
-	while ((char *) de < base + size)
-	{
-		if (de->inode)
-		{
-			if (show_names)
-			{
-				int len = de->name_len;
-				char *name = de->name;
-				while (len--) printk("%c", *name++);
-				ext3fs_dirhash(de->name, de->name_len, &h);
-				printk(":%x.%u ", h.hash,
-				       (unsigned) ((char *) de - base));
-			}
-			space += EXT3_DIR_REC_LEN(de->name_len);
-			names++;
-		}
-		de = ext3_next_entry(de);
-	}
-	printk("(%i)\n", names);
-	return (struct stats) { names, space, 1 };
-}
-
-struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
-			     struct dx_entry *entries, int levels)
-{
-	unsigned blocksize = dir->i_sb->s_blocksize;
-	unsigned count = dx_get_count (entries), names = 0, space = 0, i;
-	unsigned bcount = 0;
-	struct buffer_head *bh;
-	int err;
-	printk("%i indexed blocks...\n", count);
-	for (i = 0; i < count; i++, entries++)
-	{
-		u32 block = dx_get_block(entries), hash = i? dx_get_hash(entries): 0;
-		u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
-		struct stats stats;
-		printk("%s%3u:%03u hash %8x/%8x ",levels?"":"   ", i, block, hash, range);
-		if (!(bh = ext3_bread (NULL,dir, block, 0,&err))) continue;
-		stats = levels?
-		   dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
-		   dx_show_leaf(hinfo, (struct ext3_dir_entry_2 *) bh->b_data, blocksize, 0);
-		names += stats.names;
-		space += stats.space;
-		bcount += stats.bcount;
-		brelse (bh);
-	}
-	if (bcount)
-		printk("%snames %u, fullness %u (%u%%)\n", levels?"":"   ",
-			names, space/bcount,(space/bcount)*100/blocksize);
-	return (struct stats) { names, space, bcount};
-}
-#endif /* DX_DEBUG */
-
-/*
- * Probe for a directory leaf block to search.
- *
- * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
- * error in the directory index, and the caller should fall back to
- * searching the directory normally.  The callers of dx_probe **MUST**
- * check for this error code, and make sure it never gets reflected
- * back to userspace.
- */
-static struct dx_frame *
-dx_probe(struct qstr *entry, struct inode *dir,
-	 struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
-{
-	unsigned count, indirect;
-	struct dx_entry *at, *entries, *p, *q, *m;
-	struct dx_root *root;
-	struct buffer_head *bh;
-	struct dx_frame *frame = frame_in;
-	u32 hash;
-
-	frame->bh = NULL;
-	if (!(bh = ext3_dir_bread(NULL, dir, 0, 0, err))) {
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-	root = (struct dx_root *) bh->b_data;
-	if (root->info.hash_version != DX_HASH_TEA &&
-	    root->info.hash_version != DX_HASH_HALF_MD4 &&
-	    root->info.hash_version != DX_HASH_LEGACY) {
-		ext3_warning(dir->i_sb, __func__,
-			     "Unrecognised inode hash code %d",
-			     root->info.hash_version);
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-	hinfo->hash_version = root->info.hash_version;
-	if (hinfo->hash_version <= DX_HASH_TEA)
-		hinfo->hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
-	hinfo->seed = EXT3_SB(dir->i_sb)->s_hash_seed;
-	if (entry)
-		ext3fs_dirhash(entry->name, entry->len, hinfo);
-	hash = hinfo->hash;
-
-	if (root->info.unused_flags & 1) {
-		ext3_warning(dir->i_sb, __func__,
-			     "Unimplemented inode hash flags: %#06x",
-			     root->info.unused_flags);
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-
-	if ((indirect = root->info.indirect_levels) > 1) {
-		ext3_warning(dir->i_sb, __func__,
-			     "Unimplemented inode hash depth: %#06x",
-			     root->info.indirect_levels);
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-
-	entries = (struct dx_entry *) (((char *)&root->info) +
-				       root->info.info_length);
-
-	if (dx_get_limit(entries) != dx_root_limit(dir,
-						   root->info.info_length)) {
-		ext3_warning(dir->i_sb, __func__,
-			     "dx entry: limit != root limit");
-		brelse(bh);
-		*err = ERR_BAD_DX_DIR;
-		goto fail;
-	}
-
-	dxtrace (printk("Look up %x", hash));
-	while (1)
-	{
-		count = dx_get_count(entries);
-		if (!count || count > dx_get_limit(entries)) {
-			ext3_warning(dir->i_sb, __func__,
-				     "dx entry: no count or count > limit");
-			brelse(bh);
-			*err = ERR_BAD_DX_DIR;
-			goto fail2;
-		}
-
-		p = entries + 1;
-		q = entries + count - 1;
-		while (p <= q)
-		{
-			m = p + (q - p)/2;
-			dxtrace(printk("."));
-			if (dx_get_hash(m) > hash)
-				q = m - 1;
-			else
-				p = m + 1;
-		}
-
-		if (0) // linear search cross check
-		{
-			unsigned n = count - 1;
-			at = entries;
-			while (n--)
-			{
-				dxtrace(printk(","));
-				if (dx_get_hash(++at) > hash)
-				{
-					at--;
-					break;
-				}
-			}
-			assert (at == p - 1);
-		}
-
-		at = p - 1;
-		dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
-		frame->bh = bh;
-		frame->entries = entries;
-		frame->at = at;
-		if (!indirect--) return frame;
-		if (!(bh = ext3_dir_bread(NULL, dir, dx_get_block(at), 0, err))) {
-			*err = ERR_BAD_DX_DIR;
-			goto fail2;
-		}
-		at = entries = ((struct dx_node *) bh->b_data)->entries;
-		if (dx_get_limit(entries) != dx_node_limit (dir)) {
-			ext3_warning(dir->i_sb, __func__,
-				     "dx entry: limit != node limit");
-			brelse(bh);
-			*err = ERR_BAD_DX_DIR;
-			goto fail2;
-		}
-		frame++;
-		frame->bh = NULL;
-	}
-fail2:
-	while (frame >= frame_in) {
-		brelse(frame->bh);
-		frame--;
-	}
-fail:
-	if (*err == ERR_BAD_DX_DIR)
-		ext3_warning(dir->i_sb, __func__,
-			     "Corrupt dir inode %ld, running e2fsck is "
-			     "recommended.", dir->i_ino);
-	return NULL;
-}
-
-static void dx_release (struct dx_frame *frames)
-{
-	if (frames[0].bh == NULL)
-		return;
-
-	if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
-		brelse(frames[1].bh);
-	brelse(frames[0].bh);
-}
-
-/*
- * This function increments the frame pointer to search the next leaf
- * block, and reads in the necessary intervening nodes if the search
- * should be necessary.  Whether or not the search is necessary is
- * controlled by the hash parameter.  If the hash value is even, then
- * the search is only continued if the next block starts with that
- * hash value.  This is used if we are searching for a specific file.
- *
- * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
- *
- * This function returns 1 if the caller should continue to search,
- * or 0 if it should not.  If there is an error reading one of the
- * index blocks, it will a negative error code.
- *
- * If start_hash is non-null, it will be filled in with the starting
- * hash of the next page.
- */
-static int ext3_htree_next_block(struct inode *dir, __u32 hash,
-				 struct dx_frame *frame,
-				 struct dx_frame *frames,
-				 __u32 *start_hash)
-{
-	struct dx_frame *p;
-	struct buffer_head *bh;
-	int err, num_frames = 0;
-	__u32 bhash;
-
-	p = frame;
-	/*
-	 * Find the next leaf page by incrementing the frame pointer.
-	 * If we run out of entries in the interior node, loop around and
-	 * increment pointer in the parent node.  When we break out of
-	 * this loop, num_frames indicates the number of interior
-	 * nodes need to be read.
-	 */
-	while (1) {
-		if (++(p->at) < p->entries + dx_get_count(p->entries))
-			break;
-		if (p == frames)
-			return 0;
-		num_frames++;
-		p--;
-	}
-
-	/*
-	 * If the hash is 1, then continue only if the next page has a
-	 * continuation hash of any value.  This is used for readdir
-	 * handling.  Otherwise, check to see if the hash matches the
-	 * desired contiuation hash.  If it doesn't, return since
-	 * there's no point to read in the successive index pages.
-	 */
-	bhash = dx_get_hash(p->at);
-	if (start_hash)
-		*start_hash = bhash;
-	if ((hash & 1) == 0) {
-		if ((bhash & ~1) != hash)
-			return 0;
-	}
-	/*
-	 * If the hash is HASH_NB_ALWAYS, we always go to the next
-	 * block so no check is necessary
-	 */
-	while (num_frames--) {
-		if (!(bh = ext3_dir_bread(NULL, dir, dx_get_block(p->at),
-					  0, &err)))
-			return err; /* Failure */
-		p++;
-		brelse (p->bh);
-		p->bh = bh;
-		p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
-	}
-	return 1;
-}
-
-
-/*
- * This function fills a red-black tree with information from a
- * directory block.  It returns the number directory entries loaded
- * into the tree.  If there is an error it is returned in err.
- */
-static int htree_dirblock_to_tree(struct file *dir_file,
-				  struct inode *dir, int block,
-				  struct dx_hash_info *hinfo,
-				  __u32 start_hash, __u32 start_minor_hash)
-{
-	struct buffer_head *bh;
-	struct ext3_dir_entry_2 *de, *top;
-	int err = 0, count = 0;
-
-	dxtrace(printk("In htree dirblock_to_tree: block %d\n", block));
-
-	if (!(bh = ext3_dir_bread(NULL, dir, block, 0, &err)))
-		return err;
-
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	top = (struct ext3_dir_entry_2 *) ((char *) de +
-					   dir->i_sb->s_blocksize -
-					   EXT3_DIR_REC_LEN(0));
-	for (; de < top; de = ext3_next_entry(de)) {
-		if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
-					(block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
-						+((char *)de - bh->b_data))) {
-			/* silently ignore the rest of the block */
-			break;
-		}
-		ext3fs_dirhash(de->name, de->name_len, hinfo);
-		if ((hinfo->hash < start_hash) ||
-		    ((hinfo->hash == start_hash) &&
-		     (hinfo->minor_hash < start_minor_hash)))
-			continue;
-		if (de->inode == 0)
-			continue;
-		if ((err = ext3_htree_store_dirent(dir_file,
-				   hinfo->hash, hinfo->minor_hash, de)) != 0) {
-			brelse(bh);
-			return err;
-		}
-		count++;
-	}
-	brelse(bh);
-	return count;
-}
-
-
-/*
- * This function fills a red-black tree with information from a
- * directory.  We start scanning the directory in hash order, starting
- * at start_hash and start_minor_hash.
- *
- * This function returns the number of entries inserted into the tree,
- * or a negative error code.
- */
-int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
-			 __u32 start_minor_hash, __u32 *next_hash)
-{
-	struct dx_hash_info hinfo;
-	struct ext3_dir_entry_2 *de;
-	struct dx_frame frames[2], *frame;
-	struct inode *dir;
-	int block, err;
-	int count = 0;
-	int ret;
-	__u32 hashval;
-
-	dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash,
-		       start_minor_hash));
-	dir = file_inode(dir_file);
-	if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) {
-		hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
-		if (hinfo.hash_version <= DX_HASH_TEA)
-			hinfo.hash_version +=
-				EXT3_SB(dir->i_sb)->s_hash_unsigned;
-		hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
-		count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
-					       start_hash, start_minor_hash);
-		*next_hash = ~0;
-		return count;
-	}
-	hinfo.hash = start_hash;
-	hinfo.minor_hash = 0;
-	frame = dx_probe(NULL, file_inode(dir_file), &hinfo, frames, &err);
-	if (!frame)
-		return err;
-
-	/* Add '.' and '..' from the htree header */
-	if (!start_hash && !start_minor_hash) {
-		de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
-		if ((err = ext3_htree_store_dirent(dir_file, 0, 0, de)) != 0)
-			goto errout;
-		count++;
-	}
-	if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
-		de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
-		de = ext3_next_entry(de);
-		if ((err = ext3_htree_store_dirent(dir_file, 2, 0, de)) != 0)
-			goto errout;
-		count++;
-	}
-
-	while (1) {
-		block = dx_get_block(frame->at);
-		ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
-					     start_hash, start_minor_hash);
-		if (ret < 0) {
-			err = ret;
-			goto errout;
-		}
-		count += ret;
-		hashval = ~0;
-		ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS,
-					    frame, frames, &hashval);
-		*next_hash = hashval;
-		if (ret < 0) {
-			err = ret;
-			goto errout;
-		}
-		/*
-		 * Stop if:  (a) there are no more entries, or
-		 * (b) we have inserted at least one entry and the
-		 * next hash value is not a continuation
-		 */
-		if ((ret == 0) ||
-		    (count && ((hashval & 1) == 0)))
-			break;
-	}
-	dx_release(frames);
-	dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n",
-		       count, *next_hash));
-	return count;
-errout:
-	dx_release(frames);
-	return (err);
-}
-
-
-/*
- * Directory block splitting, compacting
- */
-
-/*
- * Create map of hash values, offsets, and sizes, stored at end of block.
- * Returns number of entries mapped.
- */
-static int dx_make_map(struct ext3_dir_entry_2 *de, unsigned blocksize,
-		struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
-{
-	int count = 0;
-	char *base = (char *) de;
-	struct dx_hash_info h = *hinfo;
-
-	while ((char *) de < base + blocksize)
-	{
-		if (de->name_len && de->inode) {
-			ext3fs_dirhash(de->name, de->name_len, &h);
-			map_tail--;
-			map_tail->hash = h.hash;
-			map_tail->offs = (u16) ((char *) de - base);
-			map_tail->size = le16_to_cpu(de->rec_len);
-			count++;
-			cond_resched();
-		}
-		/* XXX: do we need to check rec_len == 0 case? -Chris */
-		de = ext3_next_entry(de);
-	}
-	return count;
-}
-
-/* Sort map by hash value */
-static void dx_sort_map (struct dx_map_entry *map, unsigned count)
-{
-        struct dx_map_entry *p, *q, *top = map + count - 1;
-        int more;
-        /* Combsort until bubble sort doesn't suck */
-        while (count > 2)
-	{
-                count = count*10/13;
-                if (count - 9 < 2) /* 9, 10 -> 11 */
-                        count = 11;
-                for (p = top, q = p - count; q >= map; p--, q--)
-                        if (p->hash < q->hash)
-                                swap(*p, *q);
-        }
-        /* Garden variety bubble sort */
-        do {
-                more = 0;
-                q = top;
-                while (q-- > map)
-		{
-                        if (q[1].hash >= q[0].hash)
-				continue;
-                        swap(*(q+1), *q);
-                        more = 1;
-		}
-	} while(more);
-}
-
-static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block)
-{
-	struct dx_entry *entries = frame->entries;
-	struct dx_entry *old = frame->at, *new = old + 1;
-	int count = dx_get_count(entries);
-
-	assert(count < dx_get_limit(entries));
-	assert(old < entries + count);
-	memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
-	dx_set_hash(new, hash);
-	dx_set_block(new, block);
-	dx_set_count(entries, count + 1);
-}
-
-static void ext3_update_dx_flag(struct inode *inode)
-{
-	if (!EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
-				     EXT3_FEATURE_COMPAT_DIR_INDEX))
-		EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
-}
-
-/*
- * NOTE! unlike strncmp, ext3_match returns 1 for success, 0 for failure.
- *
- * `len <= EXT3_NAME_LEN' is guaranteed by caller.
- * `de != NULL' is guaranteed by caller.
- */
-static inline int ext3_match (int len, const char * const name,
-			      struct ext3_dir_entry_2 * de)
-{
-	if (len != de->name_len)
-		return 0;
-	if (!de->inode)
-		return 0;
-	return !memcmp(name, de->name, len);
-}
-
-/*
- * Returns 0 if not found, -1 on failure, and 1 on success
- */
-static inline int search_dirblock(struct buffer_head * bh,
-				  struct inode *dir,
-				  struct qstr *child,
-				  unsigned long offset,
-				  struct ext3_dir_entry_2 ** res_dir)
-{
-	struct ext3_dir_entry_2 * de;
-	char * dlimit;
-	int de_len;
-	const char *name = child->name;
-	int namelen = child->len;
-
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	dlimit = bh->b_data + dir->i_sb->s_blocksize;
-	while ((char *) de < dlimit) {
-		/* this code is executed quadratically often */
-		/* do minimal checking `by hand' */
-
-		if ((char *) de + namelen <= dlimit &&
-		    ext3_match (namelen, name, de)) {
-			/* found a match - just to be sure, do a full check */
-			if (!ext3_check_dir_entry("ext3_find_entry",
-						  dir, de, bh, offset))
-				return -1;
-			*res_dir = de;
-			return 1;
-		}
-		/* prevent looping on a bad block */
-		de_len = ext3_rec_len_from_disk(de->rec_len);
-		if (de_len <= 0)
-			return -1;
-		offset += de_len;
-		de = (struct ext3_dir_entry_2 *) ((char *) de + de_len);
-	}
-	return 0;
-}
-
-
-/*
- *	ext3_find_entry()
- *
- * finds an entry in the specified directory with the wanted name. It
- * returns the cache buffer in which the entry was found, and the entry
- * itself (as a parameter - res_dir). It does NOT read the inode of the
- * entry - you'll have to do that yourself if you want to.
- *
- * The returned buffer_head has ->b_count elevated.  The caller is expected
- * to brelse() it when appropriate.
- */
-static struct buffer_head *ext3_find_entry(struct inode *dir,
-					struct qstr *entry,
-					struct ext3_dir_entry_2 **res_dir)
-{
-	struct super_block * sb;
-	struct buffer_head * bh_use[NAMEI_RA_SIZE];
-	struct buffer_head * bh, *ret = NULL;
-	unsigned long start, block, b;
-	const u8 *name = entry->name;
-	int ra_max = 0;		/* Number of bh's in the readahead
-				   buffer, bh_use[] */
-	int ra_ptr = 0;		/* Current index into readahead
-				   buffer */
-	int num = 0;
-	int nblocks, i, err;
-	int namelen;
-
-	*res_dir = NULL;
-	sb = dir->i_sb;
-	namelen = entry->len;
-	if (namelen > EXT3_NAME_LEN)
-		return NULL;
-	if ((namelen <= 2) && (name[0] == '.') &&
-	    (name[1] == '.' || name[1] == 0)) {
-		/*
-		 * "." or ".." will only be in the first block
-		 * NFS may look up ".."; "." should be handled by the VFS
-		 */
-		block = start = 0;
-		nblocks = 1;
-		goto restart;
-	}
-	if (is_dx(dir)) {
-		bh = ext3_dx_find_entry(dir, entry, res_dir, &err);
-		/*
-		 * On success, or if the error was file not found,
-		 * return.  Otherwise, fall back to doing a search the
-		 * old fashioned way.
-		 */
-		if (bh || (err != ERR_BAD_DX_DIR))
-			return bh;
-		dxtrace(printk("ext3_find_entry: dx failed, falling back\n"));
-	}
-	nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
-	start = EXT3_I(dir)->i_dir_start_lookup;
-	if (start >= nblocks)
-		start = 0;
-	block = start;
-restart:
-	do {
-		/*
-		 * We deal with the read-ahead logic here.
-		 */
-		if (ra_ptr >= ra_max) {
-			/* Refill the readahead buffer */
-			ra_ptr = 0;
-			b = block;
-			for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
-				/*
-				 * Terminate if we reach the end of the
-				 * directory and must wrap, or if our
-				 * search has finished at this block.
-				 */
-				if (b >= nblocks || (num && block == start)) {
-					bh_use[ra_max] = NULL;
-					break;
-				}
-				num++;
-				bh = ext3_getblk(NULL, dir, b++, 0, &err);
-				bh_use[ra_max] = bh;
-				if (bh && !bh_uptodate_or_lock(bh)) {
-					get_bh(bh);
-					bh->b_end_io = end_buffer_read_sync;
-					submit_bh(READ | REQ_META | REQ_PRIO,
-						  bh);
-				}
-			}
-		}
-		if ((bh = bh_use[ra_ptr++]) == NULL)
-			goto next;
-		wait_on_buffer(bh);
-		if (!buffer_uptodate(bh)) {
-			/* read error, skip block & hope for the best */
-			ext3_error(sb, __func__, "reading directory #%lu "
-				   "offset %lu", dir->i_ino, block);
-			brelse(bh);
-			goto next;
-		}
-		i = search_dirblock(bh, dir, entry,
-			    block << EXT3_BLOCK_SIZE_BITS(sb), res_dir);
-		if (i == 1) {
-			EXT3_I(dir)->i_dir_start_lookup = block;
-			ret = bh;
-			goto cleanup_and_exit;
-		} else {
-			brelse(bh);
-			if (i < 0)
-				goto cleanup_and_exit;
-		}
-	next:
-		if (++block >= nblocks)
-			block = 0;
-	} while (block != start);
-
-	/*
-	 * If the directory has grown while we were searching, then
-	 * search the last part of the directory before giving up.
-	 */
-	block = nblocks;
-	nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
-	if (block < nblocks) {
-		start = 0;
-		goto restart;
-	}
-
-cleanup_and_exit:
-	/* Clean up the read-ahead blocks */
-	for (; ra_ptr < ra_max; ra_ptr++)
-		brelse (bh_use[ra_ptr]);
-	return ret;
-}
-
-static struct buffer_head * ext3_dx_find_entry(struct inode *dir,
-			struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
-			int *err)
-{
-	struct super_block *sb = dir->i_sb;
-	struct dx_hash_info	hinfo;
-	struct dx_frame frames[2], *frame;
-	struct buffer_head *bh;
-	unsigned long block;
-	int retval;
-
-	if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
-		return NULL;
-	do {
-		block = dx_get_block(frame->at);
-		if (!(bh = ext3_dir_bread (NULL, dir, block, 0, err)))
-			goto errout;
-
-		retval = search_dirblock(bh, dir, entry,
-					 block << EXT3_BLOCK_SIZE_BITS(sb),
-					 res_dir);
-		if (retval == 1) {
-			dx_release(frames);
-			return bh;
-		}
-		brelse(bh);
-		if (retval == -1) {
-			*err = ERR_BAD_DX_DIR;
-			goto errout;
-		}
-
-		/* Check to see if we should continue to search */
-		retval = ext3_htree_next_block(dir, hinfo.hash, frame,
-					       frames, NULL);
-		if (retval < 0) {
-			ext3_warning(sb, __func__,
-			     "error reading index page in directory #%lu",
-			     dir->i_ino);
-			*err = retval;
-			goto errout;
-		}
-	} while (retval == 1);
-
-	*err = -ENOENT;
-errout:
-	dxtrace(printk("%s not found\n", entry->name));
-	dx_release (frames);
-	return NULL;
-}
-
-static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, unsigned int flags)
-{
-	struct inode * inode;
-	struct ext3_dir_entry_2 * de;
-	struct buffer_head * bh;
-
-	if (dentry->d_name.len > EXT3_NAME_LEN)
-		return ERR_PTR(-ENAMETOOLONG);
-
-	bh = ext3_find_entry(dir, &dentry->d_name, &de);
-	inode = NULL;
-	if (bh) {
-		unsigned long ino = le32_to_cpu(de->inode);
-		brelse (bh);
-		if (!ext3_valid_inum(dir->i_sb, ino)) {
-			ext3_error(dir->i_sb, "ext3_lookup",
-				   "bad inode number: %lu", ino);
-			return ERR_PTR(-EIO);
-		}
-		inode = ext3_iget(dir->i_sb, ino);
-		if (inode == ERR_PTR(-ESTALE)) {
-			ext3_error(dir->i_sb, __func__,
-					"deleted inode referenced: %lu",
-					ino);
-			return ERR_PTR(-EIO);
-		}
-	}
-	return d_splice_alias(inode, dentry);
-}
-
-
-struct dentry *ext3_get_parent(struct dentry *child)
-{
-	unsigned long ino;
-	struct qstr dotdot = QSTR_INIT("..", 2);
-	struct ext3_dir_entry_2 * de;
-	struct buffer_head *bh;
-
-	bh = ext3_find_entry(d_inode(child), &dotdot, &de);
-	if (!bh)
-		return ERR_PTR(-ENOENT);
-	ino = le32_to_cpu(de->inode);
-	brelse(bh);
-
-	if (!ext3_valid_inum(d_inode(child)->i_sb, ino)) {
-		ext3_error(d_inode(child)->i_sb, "ext3_get_parent",
-			   "bad inode number: %lu", ino);
-		return ERR_PTR(-EIO);
-	}
-
-	return d_obtain_alias(ext3_iget(d_inode(child)->i_sb, ino));
-}
-
-#define S_SHIFT 12
-static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = {
-	[S_IFREG >> S_SHIFT]	= EXT3_FT_REG_FILE,
-	[S_IFDIR >> S_SHIFT]	= EXT3_FT_DIR,
-	[S_IFCHR >> S_SHIFT]	= EXT3_FT_CHRDEV,
-	[S_IFBLK >> S_SHIFT]	= EXT3_FT_BLKDEV,
-	[S_IFIFO >> S_SHIFT]	= EXT3_FT_FIFO,
-	[S_IFSOCK >> S_SHIFT]	= EXT3_FT_SOCK,
-	[S_IFLNK >> S_SHIFT]	= EXT3_FT_SYMLINK,
-};
-
-static inline void ext3_set_de_type(struct super_block *sb,
-				struct ext3_dir_entry_2 *de,
-				umode_t mode) {
-	if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE))
-		de->file_type = ext3_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
-}
-
-/*
- * Move count entries from end of map between two memory locations.
- * Returns pointer to last entry moved.
- */
-static struct ext3_dir_entry_2 *
-dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
-{
-	unsigned rec_len = 0;
-
-	while (count--) {
-		struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *) (from + map->offs);
-		rec_len = EXT3_DIR_REC_LEN(de->name_len);
-		memcpy (to, de, rec_len);
-		((struct ext3_dir_entry_2 *) to)->rec_len =
-				ext3_rec_len_to_disk(rec_len);
-		de->inode = 0;
-		map++;
-		to += rec_len;
-	}
-	return (struct ext3_dir_entry_2 *) (to - rec_len);
-}
-
-/*
- * Compact each dir entry in the range to the minimal rec_len.
- * Returns pointer to last entry in range.
- */
-static struct ext3_dir_entry_2 *dx_pack_dirents(char *base, unsigned blocksize)
-{
-	struct ext3_dir_entry_2 *next, *to, *prev;
-	struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *)base;
-	unsigned rec_len = 0;
-
-	prev = to = de;
-	while ((char *)de < base + blocksize) {
-		next = ext3_next_entry(de);
-		if (de->inode && de->name_len) {
-			rec_len = EXT3_DIR_REC_LEN(de->name_len);
-			if (de > to)
-				memmove(to, de, rec_len);
-			to->rec_len = ext3_rec_len_to_disk(rec_len);
-			prev = to;
-			to = (struct ext3_dir_entry_2 *) (((char *) to) + rec_len);
-		}
-		de = next;
-	}
-	return prev;
-}
-
-/*
- * Split a full leaf block to make room for a new dir entry.
- * Allocate a new block, and move entries so that they are approx. equally full.
- * Returns pointer to de in block into which the new entry will be inserted.
- */
-static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
-			struct buffer_head **bh,struct dx_frame *frame,
-			struct dx_hash_info *hinfo, int *error)
-{
-	unsigned blocksize = dir->i_sb->s_blocksize;
-	unsigned count, continued;
-	struct buffer_head *bh2;
-	u32 newblock;
-	u32 hash2;
-	struct dx_map_entry *map;
-	char *data1 = (*bh)->b_data, *data2;
-	unsigned split, move, size;
-	struct ext3_dir_entry_2 *de = NULL, *de2;
-	int	err = 0, i;
-
-	bh2 = ext3_append (handle, dir, &newblock, &err);
-	if (!(bh2)) {
-		brelse(*bh);
-		*bh = NULL;
-		goto errout;
-	}
-
-	BUFFER_TRACE(*bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, *bh);
-	if (err)
-		goto journal_error;
-
-	BUFFER_TRACE(frame->bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, frame->bh);
-	if (err)
-		goto journal_error;
-
-	data2 = bh2->b_data;
-
-	/* create map in the end of data2 block */
-	map = (struct dx_map_entry *) (data2 + blocksize);
-	count = dx_make_map ((struct ext3_dir_entry_2 *) data1,
-			     blocksize, hinfo, map);
-	map -= count;
-	dx_sort_map (map, count);
-	/* Split the existing block in the middle, size-wise */
-	size = 0;
-	move = 0;
-	for (i = count-1; i >= 0; i--) {
-		/* is more than half of this entry in 2nd half of the block? */
-		if (size + map[i].size/2 > blocksize/2)
-			break;
-		size += map[i].size;
-		move++;
-	}
-	/* map index at which we will split */
-	split = count - move;
-	hash2 = map[split].hash;
-	continued = hash2 == map[split - 1].hash;
-	dxtrace(printk("Split block %i at %x, %i/%i\n",
-		dx_get_block(frame->at), hash2, split, count-split));
-
-	/* Fancy dance to stay within two buffers */
-	de2 = dx_move_dirents(data1, data2, map + split, count - split);
-	de = dx_pack_dirents(data1,blocksize);
-	de->rec_len = ext3_rec_len_to_disk(data1 + blocksize - (char *) de);
-	de2->rec_len = ext3_rec_len_to_disk(data2 + blocksize - (char *) de2);
-	dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data1, blocksize, 1));
-	dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data2, blocksize, 1));
-
-	/* Which block gets the new entry? */
-	if (hinfo->hash >= hash2)
-	{
-		swap(*bh, bh2);
-		de = de2;
-	}
-	dx_insert_block (frame, hash2 + continued, newblock);
-	err = ext3_journal_dirty_metadata (handle, bh2);
-	if (err)
-		goto journal_error;
-	err = ext3_journal_dirty_metadata (handle, frame->bh);
-	if (err)
-		goto journal_error;
-	brelse (bh2);
-	dxtrace(dx_show_index ("frame", frame->entries));
-	return de;
-
-journal_error:
-	brelse(*bh);
-	brelse(bh2);
-	*bh = NULL;
-	ext3_std_error(dir->i_sb, err);
-errout:
-	*error = err;
-	return NULL;
-}
-
-
-/*
- * Add a new entry into a directory (leaf) block.  If de is non-NULL,
- * it points to a directory entry which is guaranteed to be large
- * enough for new directory entry.  If de is NULL, then
- * add_dirent_to_buf will attempt search the directory block for
- * space.  It will return -ENOSPC if no space is available, and -EIO
- * and -EEXIST if directory entry already exists.
- *
- * NOTE!  bh is NOT released in the case where ENOSPC is returned.  In
- * all other cases bh is released.
- */
-static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
-			     struct inode *inode, struct ext3_dir_entry_2 *de,
-			     struct buffer_head * bh)
-{
-	struct inode	*dir = d_inode(dentry->d_parent);
-	const char	*name = dentry->d_name.name;
-	int		namelen = dentry->d_name.len;
-	unsigned long	offset = 0;
-	unsigned short	reclen;
-	int		nlen, rlen, err;
-	char		*top;
-
-	reclen = EXT3_DIR_REC_LEN(namelen);
-	if (!de) {
-		de = (struct ext3_dir_entry_2 *)bh->b_data;
-		top = bh->b_data + dir->i_sb->s_blocksize - reclen;
-		while ((char *) de <= top) {
-			if (!ext3_check_dir_entry("ext3_add_entry", dir, de,
-						  bh, offset)) {
-				brelse (bh);
-				return -EIO;
-			}
-			if (ext3_match (namelen, name, de)) {
-				brelse (bh);
-				return -EEXIST;
-			}
-			nlen = EXT3_DIR_REC_LEN(de->name_len);
-			rlen = ext3_rec_len_from_disk(de->rec_len);
-			if ((de->inode? rlen - nlen: rlen) >= reclen)
-				break;
-			de = (struct ext3_dir_entry_2 *)((char *)de + rlen);
-			offset += rlen;
-		}
-		if ((char *) de > top)
-			return -ENOSPC;
-	}
-	BUFFER_TRACE(bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, bh);
-	if (err) {
-		ext3_std_error(dir->i_sb, err);
-		brelse(bh);
-		return err;
-	}
-
-	/* By now the buffer is marked for journaling */
-	nlen = EXT3_DIR_REC_LEN(de->name_len);
-	rlen = ext3_rec_len_from_disk(de->rec_len);
-	if (de->inode) {
-		struct ext3_dir_entry_2 *de1 = (struct ext3_dir_entry_2 *)((char *)de + nlen);
-		de1->rec_len = ext3_rec_len_to_disk(rlen - nlen);
-		de->rec_len = ext3_rec_len_to_disk(nlen);
-		de = de1;
-	}
-	de->file_type = EXT3_FT_UNKNOWN;
-	if (inode) {
-		de->inode = cpu_to_le32(inode->i_ino);
-		ext3_set_de_type(dir->i_sb, de, inode->i_mode);
-	} else
-		de->inode = 0;
-	de->name_len = namelen;
-	memcpy (de->name, name, namelen);
-	/*
-	 * XXX shouldn't update any times until successful
-	 * completion of syscall, but too many callers depend
-	 * on this.
-	 *
-	 * XXX similarly, too many callers depend on
-	 * ext3_new_inode() setting the times, but error
-	 * recovery deletes the inode, so the worst that can
-	 * happen is that the times are slightly out of date
-	 * and/or different from the directory change time.
-	 */
-	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
-	ext3_update_dx_flag(dir);
-	dir->i_version++;
-	ext3_mark_inode_dirty(handle, dir);
-	BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-	err = ext3_journal_dirty_metadata(handle, bh);
-	if (err)
-		ext3_std_error(dir->i_sb, err);
-	brelse(bh);
-	return 0;
-}
-
-/*
- * This converts a one block unindexed directory to a 3 block indexed
- * directory, and adds the dentry to the indexed directory.
- */
-static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
-			    struct inode *inode, struct buffer_head *bh)
-{
-	struct inode	*dir = d_inode(dentry->d_parent);
-	const char	*name = dentry->d_name.name;
-	int		namelen = dentry->d_name.len;
-	struct buffer_head *bh2;
-	struct dx_root	*root;
-	struct dx_frame	frames[2], *frame;
-	struct dx_entry *entries;
-	struct ext3_dir_entry_2	*de, *de2;
-	char		*data1, *top;
-	unsigned	len;
-	int		retval;
-	unsigned	blocksize;
-	struct dx_hash_info hinfo;
-	u32		block;
-	struct fake_dirent *fde;
-
-	blocksize =  dir->i_sb->s_blocksize;
-	dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
-	retval = ext3_journal_get_write_access(handle, bh);
-	if (retval) {
-		ext3_std_error(dir->i_sb, retval);
-		brelse(bh);
-		return retval;
-	}
-	root = (struct dx_root *) bh->b_data;
-
-	/* The 0th block becomes the root, move the dirents out */
-	fde = &root->dotdot;
-	de = (struct ext3_dir_entry_2 *)((char *)fde +
-			ext3_rec_len_from_disk(fde->rec_len));
-	if ((char *) de >= (((char *) root) + blocksize)) {
-		ext3_error(dir->i_sb, __func__,
-			   "invalid rec_len for '..' in inode %lu",
-			   dir->i_ino);
-		brelse(bh);
-		return -EIO;
-	}
-	len = ((char *) root) + blocksize - (char *) de;
-
-	bh2 = ext3_append (handle, dir, &block, &retval);
-	if (!(bh2)) {
-		brelse(bh);
-		return retval;
-	}
-	EXT3_I(dir)->i_flags |= EXT3_INDEX_FL;
-	data1 = bh2->b_data;
-
-	memcpy (data1, de, len);
-	de = (struct ext3_dir_entry_2 *) data1;
-	top = data1 + len;
-	while ((char *)(de2 = ext3_next_entry(de)) < top)
-		de = de2;
-	de->rec_len = ext3_rec_len_to_disk(data1 + blocksize - (char *) de);
-	/* Initialize the root; the dot dirents already exist */
-	de = (struct ext3_dir_entry_2 *) (&root->dotdot);
-	de->rec_len = ext3_rec_len_to_disk(blocksize - EXT3_DIR_REC_LEN(2));
-	memset (&root->info, 0, sizeof(root->info));
-	root->info.info_length = sizeof(root->info);
-	root->info.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
-	entries = root->entries;
-	dx_set_block (entries, 1);
-	dx_set_count (entries, 1);
-	dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info)));
-
-	/* Initialize as for dx_probe */
-	hinfo.hash_version = root->info.hash_version;
-	if (hinfo.hash_version <= DX_HASH_TEA)
-		hinfo.hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
-	hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
-	ext3fs_dirhash(name, namelen, &hinfo);
-	frame = frames;
-	frame->entries = entries;
-	frame->at = entries;
-	frame->bh = bh;
-	bh = bh2;
-	/*
-	 * Mark buffers dirty here so that if do_split() fails we write a
-	 * consistent set of buffers to disk.
-	 */
-	ext3_journal_dirty_metadata(handle, frame->bh);
-	ext3_journal_dirty_metadata(handle, bh);
-	de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
-	if (!de) {
-		ext3_mark_inode_dirty(handle, dir);
-		dx_release(frames);
-		return retval;
-	}
-	dx_release(frames);
-
-	return add_dirent_to_buf(handle, dentry, inode, de, bh);
-}
-
-/*
- *	ext3_add_entry()
- *
- * adds a file entry to the specified directory, using the same
- * semantics as ext3_find_entry(). It returns NULL if it failed.
- *
- * NOTE!! The inode part of 'de' is left at 0 - which means you
- * may not sleep between calling this and putting something into
- * the entry, as someone else might have used it while you slept.
- */
-static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
-	struct inode *inode)
-{
-	struct inode *dir = d_inode(dentry->d_parent);
-	struct buffer_head * bh;
-	struct ext3_dir_entry_2 *de;
-	struct super_block * sb;
-	int	retval;
-	int	dx_fallback=0;
-	unsigned blocksize;
-	u32 block, blocks;
-
-	sb = dir->i_sb;
-	blocksize = sb->s_blocksize;
-	if (!dentry->d_name.len)
-		return -EINVAL;
-	if (is_dx(dir)) {
-		retval = ext3_dx_add_entry(handle, dentry, inode);
-		if (!retval || (retval != ERR_BAD_DX_DIR))
-			return retval;
-		EXT3_I(dir)->i_flags &= ~EXT3_INDEX_FL;
-		dx_fallback++;
-		ext3_mark_inode_dirty(handle, dir);
-	}
-	blocks = dir->i_size >> sb->s_blocksize_bits;
-	for (block = 0; block < blocks; block++) {
-		if (!(bh = ext3_dir_bread(handle, dir, block, 0, &retval)))
-			return retval;
-
-		retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
-		if (retval != -ENOSPC)
-			return retval;
-
-		if (blocks == 1 && !dx_fallback &&
-		    EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
-			return make_indexed_dir(handle, dentry, inode, bh);
-		brelse(bh);
-	}
-	bh = ext3_append(handle, dir, &block, &retval);
-	if (!bh)
-		return retval;
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	de->inode = 0;
-	de->rec_len = ext3_rec_len_to_disk(blocksize);
-	return add_dirent_to_buf(handle, dentry, inode, de, bh);
-}
-
-/*
- * Returns 0 for success, or a negative error value
- */
-static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
-			     struct inode *inode)
-{
-	struct dx_frame frames[2], *frame;
-	struct dx_entry *entries, *at;
-	struct dx_hash_info hinfo;
-	struct buffer_head * bh;
-	struct inode *dir = d_inode(dentry->d_parent);
-	struct super_block * sb = dir->i_sb;
-	struct ext3_dir_entry_2 *de;
-	int err;
-
-	frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
-	if (!frame)
-		return err;
-	entries = frame->entries;
-	at = frame->at;
-
-	if (!(bh = ext3_dir_bread(handle, dir, dx_get_block(frame->at), 0, &err)))
-		goto cleanup;
-
-	BUFFER_TRACE(bh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, bh);
-	if (err)
-		goto journal_error;
-
-	err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
-	if (err != -ENOSPC) {
-		bh = NULL;
-		goto cleanup;
-	}
-
-	/* Block full, should compress but for now just split */
-	dxtrace(printk("using %u of %u node entries\n",
-		       dx_get_count(entries), dx_get_limit(entries)));
-	/* Need to split index? */
-	if (dx_get_count(entries) == dx_get_limit(entries)) {
-		u32 newblock;
-		unsigned icount = dx_get_count(entries);
-		int levels = frame - frames;
-		struct dx_entry *entries2;
-		struct dx_node *node2;
-		struct buffer_head *bh2;
-
-		if (levels && (dx_get_count(frames->entries) ==
-			       dx_get_limit(frames->entries))) {
-			ext3_warning(sb, __func__,
-				     "Directory index full!");
-			err = -ENOSPC;
-			goto cleanup;
-		}
-		bh2 = ext3_append (handle, dir, &newblock, &err);
-		if (!(bh2))
-			goto cleanup;
-		node2 = (struct dx_node *)(bh2->b_data);
-		entries2 = node2->entries;
-		memset(&node2->fake, 0, sizeof(struct fake_dirent));
-		node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize);
-		BUFFER_TRACE(frame->bh, "get_write_access");
-		err = ext3_journal_get_write_access(handle, frame->bh);
-		if (err)
-			goto journal_error;
-		if (levels) {
-			unsigned icount1 = icount/2, icount2 = icount - icount1;
-			unsigned hash2 = dx_get_hash(entries + icount1);
-			dxtrace(printk("Split index %i/%i\n", icount1, icount2));
-
-			BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
-			err = ext3_journal_get_write_access(handle,
-							     frames[0].bh);
-			if (err)
-				goto journal_error;
-
-			memcpy ((char *) entries2, (char *) (entries + icount1),
-				icount2 * sizeof(struct dx_entry));
-			dx_set_count (entries, icount1);
-			dx_set_count (entries2, icount2);
-			dx_set_limit (entries2, dx_node_limit(dir));
-
-			/* Which index block gets the new entry? */
-			if (at - entries >= icount1) {
-				frame->at = at = at - entries - icount1 + entries2;
-				frame->entries = entries = entries2;
-				swap(frame->bh, bh2);
-			}
-			dx_insert_block (frames + 0, hash2, newblock);
-			dxtrace(dx_show_index ("node", frames[1].entries));
-			dxtrace(dx_show_index ("node",
-			       ((struct dx_node *) bh2->b_data)->entries));
-			err = ext3_journal_dirty_metadata(handle, bh2);
-			if (err)
-				goto journal_error;
-			brelse (bh2);
-		} else {
-			dxtrace(printk("Creating second level index...\n"));
-			memcpy((char *) entries2, (char *) entries,
-			       icount * sizeof(struct dx_entry));
-			dx_set_limit(entries2, dx_node_limit(dir));
-
-			/* Set up root */
-			dx_set_count(entries, 1);
-			dx_set_block(entries + 0, newblock);
-			((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
-
-			/* Add new access path frame */
-			frame = frames + 1;
-			frame->at = at = at - entries + entries2;
-			frame->entries = entries = entries2;
-			frame->bh = bh2;
-			err = ext3_journal_get_write_access(handle,
-							     frame->bh);
-			if (err)
-				goto journal_error;
-		}
-		err = ext3_journal_dirty_metadata(handle, frames[0].bh);
-		if (err)
-			goto journal_error;
-	}
-	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
-	if (!de)
-		goto cleanup;
-	err = add_dirent_to_buf(handle, dentry, inode, de, bh);
-	bh = NULL;
-	goto cleanup;
-
-journal_error:
-	ext3_std_error(dir->i_sb, err);
-cleanup:
-	if (bh)
-		brelse(bh);
-	dx_release(frames);
-	return err;
-}
-
-/*
- * ext3_delete_entry deletes a directory entry by merging it with the
- * previous entry
- */
-static int ext3_delete_entry (handle_t *handle,
-			      struct inode * dir,
-			      struct ext3_dir_entry_2 * de_del,
-			      struct buffer_head * bh)
-{
-	struct ext3_dir_entry_2 * de, * pde;
-	int i;
-
-	i = 0;
-	pde = NULL;
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	while (i < bh->b_size) {
-		if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i))
-			return -EIO;
-		if (de == de_del)  {
-			int err;
-
-			BUFFER_TRACE(bh, "get_write_access");
-			err = ext3_journal_get_write_access(handle, bh);
-			if (err)
-				goto journal_error;
-
-			if (pde)
-				pde->rec_len = ext3_rec_len_to_disk(
-					ext3_rec_len_from_disk(pde->rec_len) +
-					ext3_rec_len_from_disk(de->rec_len));
-			else
-				de->inode = 0;
-			dir->i_version++;
-			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			err = ext3_journal_dirty_metadata(handle, bh);
-			if (err) {
-journal_error:
-				ext3_std_error(dir->i_sb, err);
-				return err;
-			}
-			return 0;
-		}
-		i += ext3_rec_len_from_disk(de->rec_len);
-		pde = de;
-		de = ext3_next_entry(de);
-	}
-	return -ENOENT;
-}
-
-static int ext3_add_nondir(handle_t *handle,
-		struct dentry *dentry, struct inode *inode)
-{
-	int err = ext3_add_entry(handle, dentry, inode);
-	if (!err) {
-		ext3_mark_inode_dirty(handle, inode);
-		unlock_new_inode(inode);
-		d_instantiate(dentry, inode);
-		return 0;
-	}
-	drop_nlink(inode);
-	unlock_new_inode(inode);
-	iput(inode);
-	return err;
-}
-
-/*
- * By the time this is called, we already have created
- * the directory cache entry for the new file, but it
- * is so far negative - it has no inode.
- *
- * If the create succeeds, we fill in the inode information
- * with d_instantiate().
- */
-static int ext3_create (struct inode * dir, struct dentry * dentry, umode_t mode,
-		bool excl)
-{
-	handle_t *handle;
-	struct inode * inode;
-	int err, retries = 0;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-					EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = ext3_new_inode (handle, dir, &dentry->d_name, mode);
-	err = PTR_ERR(inode);
-	if (!IS_ERR(inode)) {
-		inode->i_op = &ext3_file_inode_operations;
-		inode->i_fop = &ext3_file_operations;
-		ext3_set_aops(inode);
-		err = ext3_add_nondir(handle, dentry, inode);
-	}
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-}
-
-static int ext3_mknod (struct inode * dir, struct dentry *dentry,
-			umode_t mode, dev_t rdev)
-{
-	handle_t *handle;
-	struct inode *inode;
-	int err, retries = 0;
-
-	if (!new_valid_dev(rdev))
-		return -EINVAL;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-					EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = ext3_new_inode (handle, dir, &dentry->d_name, mode);
-	err = PTR_ERR(inode);
-	if (!IS_ERR(inode)) {
-		init_special_inode(inode, inode->i_mode, rdev);
-#ifdef CONFIG_EXT3_FS_XATTR
-		inode->i_op = &ext3_special_inode_operations;
-#endif
-		err = ext3_add_nondir(handle, dentry, inode);
-	}
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-}
-
-static int ext3_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
-{
-	handle_t *handle;
-	struct inode *inode;
-	int err, retries = 0;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
-			  4 + EXT3_XATTR_TRANS_BLOCKS);
-
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	inode = ext3_new_inode (handle, dir, NULL, mode);
-	err = PTR_ERR(inode);
-	if (!IS_ERR(inode)) {
-		inode->i_op = &ext3_file_inode_operations;
-		inode->i_fop = &ext3_file_operations;
-		ext3_set_aops(inode);
-		d_tmpfile(dentry, inode);
-		err = ext3_orphan_add(handle, inode);
-		if (err)
-			goto err_unlock_inode;
-		mark_inode_dirty(inode);
-		unlock_new_inode(inode);
-	}
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-err_unlock_inode:
-	ext3_journal_stop(handle);
-	unlock_new_inode(inode);
-	return err;
-}
-
-static int ext3_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
-{
-	handle_t *handle;
-	struct inode * inode;
-	struct buffer_head * dir_block = NULL;
-	struct ext3_dir_entry_2 * de;
-	int err, retries = 0;
-
-	if (dir->i_nlink >= EXT3_LINK_MAX)
-		return -EMLINK;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-					EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = ext3_new_inode (handle, dir, &dentry->d_name, S_IFDIR | mode);
-	err = PTR_ERR(inode);
-	if (IS_ERR(inode))
-		goto out_stop;
-
-	inode->i_op = &ext3_dir_inode_operations;
-	inode->i_fop = &ext3_dir_operations;
-	inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
-	if (!(dir_block = ext3_dir_bread(handle, inode, 0, 1, &err)))
-		goto out_clear_inode;
-
-	BUFFER_TRACE(dir_block, "get_write_access");
-	err = ext3_journal_get_write_access(handle, dir_block);
-	if (err)
-		goto out_clear_inode;
-
-	de = (struct ext3_dir_entry_2 *) dir_block->b_data;
-	de->inode = cpu_to_le32(inode->i_ino);
-	de->name_len = 1;
-	de->rec_len = ext3_rec_len_to_disk(EXT3_DIR_REC_LEN(de->name_len));
-	strcpy (de->name, ".");
-	ext3_set_de_type(dir->i_sb, de, S_IFDIR);
-	de = ext3_next_entry(de);
-	de->inode = cpu_to_le32(dir->i_ino);
-	de->rec_len = ext3_rec_len_to_disk(inode->i_sb->s_blocksize -
-					EXT3_DIR_REC_LEN(1));
-	de->name_len = 2;
-	strcpy (de->name, "..");
-	ext3_set_de_type(dir->i_sb, de, S_IFDIR);
-	set_nlink(inode, 2);
-	BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata");
-	err = ext3_journal_dirty_metadata(handle, dir_block);
-	if (err)
-		goto out_clear_inode;
-
-	err = ext3_mark_inode_dirty(handle, inode);
-	if (!err)
-		err = ext3_add_entry (handle, dentry, inode);
-
-	if (err) {
-out_clear_inode:
-		clear_nlink(inode);
-		unlock_new_inode(inode);
-		ext3_mark_inode_dirty(handle, inode);
-		iput (inode);
-		goto out_stop;
-	}
-	inc_nlink(dir);
-	ext3_update_dx_flag(dir);
-	err = ext3_mark_inode_dirty(handle, dir);
-	if (err)
-		goto out_clear_inode;
-
-	unlock_new_inode(inode);
-	d_instantiate(dentry, inode);
-out_stop:
-	brelse(dir_block);
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-}
-
-/*
- * routine to check that the specified directory is empty (for rmdir)
- */
-static int empty_dir (struct inode * inode)
-{
-	unsigned long offset;
-	struct buffer_head * bh;
-	struct ext3_dir_entry_2 * de, * de1;
-	struct super_block * sb;
-	int err = 0;
-
-	sb = inode->i_sb;
-	if (inode->i_size < EXT3_DIR_REC_LEN(1) + EXT3_DIR_REC_LEN(2) ||
-	    !(bh = ext3_dir_bread(NULL, inode, 0, 0, &err))) {
-		if (err)
-			ext3_error(inode->i_sb, __func__,
-				   "error %d reading directory #%lu offset 0",
-				   err, inode->i_ino);
-		else
-			ext3_warning(inode->i_sb, __func__,
-				     "bad directory (dir #%lu) - no data block",
-				     inode->i_ino);
-		return 1;
-	}
-	de = (struct ext3_dir_entry_2 *) bh->b_data;
-	de1 = ext3_next_entry(de);
-	if (le32_to_cpu(de->inode) != inode->i_ino ||
-			!le32_to_cpu(de1->inode) ||
-			strcmp (".", de->name) ||
-			strcmp ("..", de1->name)) {
-		ext3_warning (inode->i_sb, "empty_dir",
-			      "bad directory (dir #%lu) - no `.' or `..'",
-			      inode->i_ino);
-		brelse (bh);
-		return 1;
-	}
-	offset = ext3_rec_len_from_disk(de->rec_len) +
-			ext3_rec_len_from_disk(de1->rec_len);
-	de = ext3_next_entry(de1);
-	while (offset < inode->i_size ) {
-		if (!bh ||
-			(void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
-			err = 0;
-			brelse (bh);
-			if (!(bh = ext3_dir_bread (NULL, inode,
-				offset >> EXT3_BLOCK_SIZE_BITS(sb), 0, &err))) {
-				if (err)
-					ext3_error(sb, __func__,
-						   "error %d reading directory"
-						   " #%lu offset %lu",
-						   err, inode->i_ino, offset);
-				offset += sb->s_blocksize;
-				continue;
-			}
-			de = (struct ext3_dir_entry_2 *) bh->b_data;
-		}
-		if (!ext3_check_dir_entry("empty_dir", inode, de, bh, offset)) {
-			de = (struct ext3_dir_entry_2 *)(bh->b_data +
-							 sb->s_blocksize);
-			offset = (offset | (sb->s_blocksize - 1)) + 1;
-			continue;
-		}
-		if (le32_to_cpu(de->inode)) {
-			brelse (bh);
-			return 0;
-		}
-		offset += ext3_rec_len_from_disk(de->rec_len);
-		de = ext3_next_entry(de);
-	}
-	brelse (bh);
-	return 1;
-}
-
-/* ext3_orphan_add() links an unlinked or truncated inode into a list of
- * such inodes, starting at the superblock, in case we crash before the
- * file is closed/deleted, or in case the inode truncate spans multiple
- * transactions and the last transaction is not recovered after a crash.
- *
- * At filesystem recovery time, we walk this list deleting unlinked
- * inodes and truncating linked inodes in ext3_orphan_cleanup().
- */
-int ext3_orphan_add(handle_t *handle, struct inode *inode)
-{
-	struct super_block *sb = inode->i_sb;
-	struct ext3_iloc iloc;
-	int err = 0, rc;
-
-	mutex_lock(&EXT3_SB(sb)->s_orphan_lock);
-	if (!list_empty(&EXT3_I(inode)->i_orphan))
-		goto out_unlock;
-
-	/* Orphan handling is only valid for files with data blocks
-	 * being truncated, or files being unlinked. */
-
-	/* @@@ FIXME: Observation from aviro:
-	 * I think I can trigger J_ASSERT in ext3_orphan_add().  We block
-	 * here (on s_orphan_lock), so race with ext3_link() which might bump
-	 * ->i_nlink. For, say it, character device. Not a regular file,
-	 * not a directory, not a symlink and ->i_nlink > 0.
-	 *
-	 * tytso, 4/25/2009: I'm not sure how that could happen;
-	 * shouldn't the fs core protect us from these sort of
-	 * unlink()/link() races?
-	 */
-	J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-		S_ISLNK(inode->i_mode)) || inode->i_nlink == 0);
-
-	BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "get_write_access");
-	err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh);
-	if (err)
-		goto out_unlock;
-
-	err = ext3_reserve_inode_write(handle, inode, &iloc);
-	if (err)
-		goto out_unlock;
-
-	/* Insert this inode at the head of the on-disk orphan list... */
-	NEXT_ORPHAN(inode) = le32_to_cpu(EXT3_SB(sb)->s_es->s_last_orphan);
-	EXT3_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
-	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-	rc = ext3_mark_iloc_dirty(handle, inode, &iloc);
-	if (!err)
-		err = rc;
-
-	/* Only add to the head of the in-memory list if all the
-	 * previous operations succeeded.  If the orphan_add is going to
-	 * fail (possibly taking the journal offline), we can't risk
-	 * leaving the inode on the orphan list: stray orphan-list
-	 * entries can cause panics at unmount time.
-	 *
-	 * This is safe: on error we're going to ignore the orphan list
-	 * anyway on the next recovery. */
-	if (!err)
-		list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
-
-	jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
-	jbd_debug(4, "orphan inode %lu will point to %d\n",
-			inode->i_ino, NEXT_ORPHAN(inode));
-out_unlock:
-	mutex_unlock(&EXT3_SB(sb)->s_orphan_lock);
-	ext3_std_error(inode->i_sb, err);
-	return err;
-}
-
-/*
- * ext3_orphan_del() removes an unlinked or truncated inode from the list
- * of such inodes stored on disk, because it is finally being cleaned up.
- */
-int ext3_orphan_del(handle_t *handle, struct inode *inode)
-{
-	struct list_head *prev;
-	struct ext3_inode_info *ei = EXT3_I(inode);
-	struct ext3_sb_info *sbi;
-	unsigned long ino_next;
-	struct ext3_iloc iloc;
-	int err = 0;
-
-	mutex_lock(&EXT3_SB(inode->i_sb)->s_orphan_lock);
-	if (list_empty(&ei->i_orphan))
-		goto out;
-
-	ino_next = NEXT_ORPHAN(inode);
-	prev = ei->i_orphan.prev;
-	sbi = EXT3_SB(inode->i_sb);
-
-	jbd_debug(4, "remove inode %lu from orphan list\n", inode->i_ino);
-
-	list_del_init(&ei->i_orphan);
-
-	/* If we're on an error path, we may not have a valid
-	 * transaction handle with which to update the orphan list on
-	 * disk, but we still need to remove the inode from the linked
-	 * list in memory. */
-	if (!handle)
-		goto out;
-
-	err = ext3_reserve_inode_write(handle, inode, &iloc);
-	if (err)
-		goto out_err;
-
-	if (prev == &sbi->s_orphan) {
-		jbd_debug(4, "superblock will point to %lu\n", ino_next);
-		BUFFER_TRACE(sbi->s_sbh, "get_write_access");
-		err = ext3_journal_get_write_access(handle, sbi->s_sbh);
-		if (err)
-			goto out_brelse;
-		sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
-		err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
-	} else {
-		struct ext3_iloc iloc2;
-		struct inode *i_prev =
-			&list_entry(prev, struct ext3_inode_info, i_orphan)->vfs_inode;
-
-		jbd_debug(4, "orphan inode %lu will point to %lu\n",
-			  i_prev->i_ino, ino_next);
-		err = ext3_reserve_inode_write(handle, i_prev, &iloc2);
-		if (err)
-			goto out_brelse;
-		NEXT_ORPHAN(i_prev) = ino_next;
-		err = ext3_mark_iloc_dirty(handle, i_prev, &iloc2);
-	}
-	if (err)
-		goto out_brelse;
-	NEXT_ORPHAN(inode) = 0;
-	err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-
-out_err:
-	ext3_std_error(inode->i_sb, err);
-out:
-	mutex_unlock(&EXT3_SB(inode->i_sb)->s_orphan_lock);
-	return err;
-
-out_brelse:
-	brelse(iloc.bh);
-	goto out_err;
-}
-
-static int ext3_rmdir (struct inode * dir, struct dentry *dentry)
-{
-	int retval;
-	struct inode * inode;
-	struct buffer_head * bh;
-	struct ext3_dir_entry_2 * de;
-	handle_t *handle;
-
-	/* Initialize quotas before so that eventual writes go in
-	 * separate transaction */
-	dquot_initialize(dir);
-	dquot_initialize(d_inode(dentry));
-
-	handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	retval = -ENOENT;
-	bh = ext3_find_entry(dir, &dentry->d_name, &de);
-	if (!bh)
-		goto end_rmdir;
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = d_inode(dentry);
-
-	retval = -EIO;
-	if (le32_to_cpu(de->inode) != inode->i_ino)
-		goto end_rmdir;
-
-	retval = -ENOTEMPTY;
-	if (!empty_dir (inode))
-		goto end_rmdir;
-
-	retval = ext3_delete_entry(handle, dir, de, bh);
-	if (retval)
-		goto end_rmdir;
-	if (inode->i_nlink != 2)
-		ext3_warning (inode->i_sb, "ext3_rmdir",
-			      "empty directory has nlink!=2 (%d)",
-			      inode->i_nlink);
-	inode->i_version++;
-	clear_nlink(inode);
-	/* There's no need to set i_disksize: the fact that i_nlink is
-	 * zero will ensure that the right thing happens during any
-	 * recovery. */
-	inode->i_size = 0;
-	ext3_orphan_add(handle, inode);
-	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
-	ext3_mark_inode_dirty(handle, inode);
-	drop_nlink(dir);
-	ext3_update_dx_flag(dir);
-	ext3_mark_inode_dirty(handle, dir);
-
-end_rmdir:
-	ext3_journal_stop(handle);
-	brelse (bh);
-	return retval;
-}
-
-static int ext3_unlink(struct inode * dir, struct dentry *dentry)
-{
-	int retval;
-	struct inode * inode;
-	struct buffer_head * bh;
-	struct ext3_dir_entry_2 * de;
-	handle_t *handle;
-
-	trace_ext3_unlink_enter(dir, dentry);
-	/* Initialize quotas before so that eventual writes go
-	 * in separate transaction */
-	dquot_initialize(dir);
-	dquot_initialize(d_inode(dentry));
-
-	handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	retval = -ENOENT;
-	bh = ext3_find_entry(dir, &dentry->d_name, &de);
-	if (!bh)
-		goto end_unlink;
-
-	inode = d_inode(dentry);
-
-	retval = -EIO;
-	if (le32_to_cpu(de->inode) != inode->i_ino)
-		goto end_unlink;
-
-	if (!inode->i_nlink) {
-		ext3_warning (inode->i_sb, "ext3_unlink",
-			      "Deleting nonexistent file (%lu), %d",
-			      inode->i_ino, inode->i_nlink);
-		set_nlink(inode, 1);
-	}
-	retval = ext3_delete_entry(handle, dir, de, bh);
-	if (retval)
-		goto end_unlink;
-	dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
-	ext3_update_dx_flag(dir);
-	ext3_mark_inode_dirty(handle, dir);
-	drop_nlink(inode);
-	if (!inode->i_nlink)
-		ext3_orphan_add(handle, inode);
-	inode->i_ctime = dir->i_ctime;
-	ext3_mark_inode_dirty(handle, inode);
-	retval = 0;
-
-end_unlink:
-	ext3_journal_stop(handle);
-	brelse (bh);
-	trace_ext3_unlink_exit(dentry, retval);
-	return retval;
-}
-
-static int ext3_symlink (struct inode * dir,
-		struct dentry *dentry, const char * symname)
-{
-	handle_t *handle;
-	struct inode * inode;
-	int l, err, retries = 0;
-	int credits;
-
-	l = strlen(symname)+1;
-	if (l > dir->i_sb->s_blocksize)
-		return -ENAMETOOLONG;
-
-	dquot_initialize(dir);
-
-	if (l > EXT3_N_BLOCKS * 4) {
-		/*
-		 * For non-fast symlinks, we just allocate inode and put it on
-		 * orphan list in the first transaction => we need bitmap,
-		 * group descriptor, sb, inode block, quota blocks, and
-		 * possibly selinux xattr blocks.
-		 */
-		credits = 4 + EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
-			  EXT3_XATTR_TRANS_BLOCKS;
-	} else {
-		/*
-		 * Fast symlink. We have to add entry to directory
-		 * (EXT3_DATA_TRANS_BLOCKS + EXT3_INDEX_EXTRA_TRANS_BLOCKS),
-		 * allocate new inode (bitmap, group descriptor, inode block,
-		 * quota blocks, sb is already counted in previous macros).
-		 */
-		credits = EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-			  EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
-			  EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb);
-	}
-retry:
-	handle = ext3_journal_start(dir, credits);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode = ext3_new_inode (handle, dir, &dentry->d_name, S_IFLNK|S_IRWXUGO);
-	err = PTR_ERR(inode);
-	if (IS_ERR(inode))
-		goto out_stop;
-
-	if (l > EXT3_N_BLOCKS * 4) {
-		inode->i_op = &ext3_symlink_inode_operations;
-		ext3_set_aops(inode);
-		/*
-		 * We cannot call page_symlink() with transaction started
-		 * because it calls into ext3_write_begin() which acquires page
-		 * lock which ranks below transaction start (and it can also
-		 * wait for journal commit if we are running out of space). So
-		 * we have to stop transaction now and restart it when symlink
-		 * contents is written. 
-		 *
-		 * To keep fs consistent in case of crash, we have to put inode
-		 * to orphan list in the mean time.
-		 */
-		drop_nlink(inode);
-		err = ext3_orphan_add(handle, inode);
-		ext3_journal_stop(handle);
-		if (err)
-			goto err_drop_inode;
-		err = __page_symlink(inode, symname, l, 1);
-		if (err)
-			goto err_drop_inode;
-		/*
-		 * Now inode is being linked into dir (EXT3_DATA_TRANS_BLOCKS
-		 * + EXT3_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
-		 */
-		handle = ext3_journal_start(dir,
-				EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-				EXT3_INDEX_EXTRA_TRANS_BLOCKS + 1);
-		if (IS_ERR(handle)) {
-			err = PTR_ERR(handle);
-			goto err_drop_inode;
-		}
-		set_nlink(inode, 1);
-		err = ext3_orphan_del(handle, inode);
-		if (err) {
-			ext3_journal_stop(handle);
-			drop_nlink(inode);
-			goto err_drop_inode;
-		}
-	} else {
-		inode->i_op = &ext3_fast_symlink_inode_operations;
-		inode->i_link = (char*)&EXT3_I(inode)->i_data;
-		memcpy(inode->i_link, symname, l);
-		inode->i_size = l-1;
-	}
-	EXT3_I(inode)->i_disksize = inode->i_size;
-	err = ext3_add_nondir(handle, dentry, inode);
-out_stop:
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-err_drop_inode:
-	unlock_new_inode(inode);
-	iput(inode);
-	return err;
-}
-
-static int ext3_link (struct dentry * old_dentry,
-		struct inode * dir, struct dentry *dentry)
-{
-	handle_t *handle;
-	struct inode *inode = d_inode(old_dentry);
-	int err, retries = 0;
-
-	if (inode->i_nlink >= EXT3_LINK_MAX)
-		return -EMLINK;
-
-	dquot_initialize(dir);
-
-retry:
-	handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 1);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(dir))
-		handle->h_sync = 1;
-
-	inode->i_ctime = CURRENT_TIME_SEC;
-	inc_nlink(inode);
-	ihold(inode);
-
-	err = ext3_add_entry(handle, dentry, inode);
-	if (!err) {
-		ext3_mark_inode_dirty(handle, inode);
-		/* this can happen only for tmpfile being
-		 * linked the first time
-		 */
-		if (inode->i_nlink == 1)
-			ext3_orphan_del(handle, inode);
-		d_instantiate(dentry, inode);
-	} else {
-		drop_nlink(inode);
-		iput(inode);
-	}
-	ext3_journal_stop(handle);
-	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
-		goto retry;
-	return err;
-}
-
-#define PARENT_INO(buffer) \
-	(ext3_next_entry((struct ext3_dir_entry_2 *)(buffer))->inode)
-
-/*
- * Anybody can rename anything with this: the permission checks are left to the
- * higher-level routines.
- */
-static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
-			   struct inode * new_dir,struct dentry *new_dentry)
-{
-	handle_t *handle;
-	struct inode * old_inode, * new_inode;
-	struct buffer_head * old_bh, * new_bh, * dir_bh;
-	struct ext3_dir_entry_2 * old_de, * new_de;
-	int retval, flush_file = 0;
-
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
-
-	old_bh = new_bh = dir_bh = NULL;
-
-	/* Initialize quotas before so that eventual writes go
-	 * in separate transaction */
-	if (d_really_is_positive(new_dentry))
-		dquot_initialize(d_inode(new_dentry));
-	handle = ext3_journal_start(old_dir, 2 *
-					EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) +
-					EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
-		handle->h_sync = 1;
-
-	old_bh = ext3_find_entry(old_dir, &old_dentry->d_name, &old_de);
-	/*
-	 *  Check for inode number is _not_ due to possible IO errors.
-	 *  We might rmdir the source, keep it as pwd of some process
-	 *  and merrily kill the link to whatever was created under the
-	 *  same name. Goodbye sticky bit ;-<
-	 */
-	old_inode = d_inode(old_dentry);
-	retval = -ENOENT;
-	if (!old_bh || le32_to_cpu(old_de->inode) != old_inode->i_ino)
-		goto end_rename;
-
-	new_inode = d_inode(new_dentry);
-	new_bh = ext3_find_entry(new_dir, &new_dentry->d_name, &new_de);
-	if (new_bh) {
-		if (!new_inode) {
-			brelse (new_bh);
-			new_bh = NULL;
-		}
-	}
-	if (S_ISDIR(old_inode->i_mode)) {
-		if (new_inode) {
-			retval = -ENOTEMPTY;
-			if (!empty_dir (new_inode))
-				goto end_rename;
-		}
-		retval = -EIO;
-		dir_bh = ext3_dir_bread(handle, old_inode, 0, 0, &retval);
-		if (!dir_bh)
-			goto end_rename;
-		if (le32_to_cpu(PARENT_INO(dir_bh->b_data)) != old_dir->i_ino)
-			goto end_rename;
-		retval = -EMLINK;
-		if (!new_inode && new_dir!=old_dir &&
-				new_dir->i_nlink >= EXT3_LINK_MAX)
-			goto end_rename;
-	}
-	if (!new_bh) {
-		retval = ext3_add_entry (handle, new_dentry, old_inode);
-		if (retval)
-			goto end_rename;
-	} else {
-		BUFFER_TRACE(new_bh, "get write access");
-		retval = ext3_journal_get_write_access(handle, new_bh);
-		if (retval)
-			goto journal_error;
-		new_de->inode = cpu_to_le32(old_inode->i_ino);
-		if (EXT3_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
-					      EXT3_FEATURE_INCOMPAT_FILETYPE))
-			new_de->file_type = old_de->file_type;
-		new_dir->i_version++;
-		new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC;
-		ext3_mark_inode_dirty(handle, new_dir);
-		BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata");
-		retval = ext3_journal_dirty_metadata(handle, new_bh);
-		if (retval)
-			goto journal_error;
-		brelse(new_bh);
-		new_bh = NULL;
-	}
-
-	/*
-	 * Like most other Unix systems, set the ctime for inodes on a
-	 * rename.
-	 */
-	old_inode->i_ctime = CURRENT_TIME_SEC;
-	ext3_mark_inode_dirty(handle, old_inode);
-
-	/*
-	 * ok, that's it
-	 */
-	if (le32_to_cpu(old_de->inode) != old_inode->i_ino ||
-	    old_de->name_len != old_dentry->d_name.len ||
-	    strncmp(old_de->name, old_dentry->d_name.name, old_de->name_len) ||
-	    (retval = ext3_delete_entry(handle, old_dir,
-					old_de, old_bh)) == -ENOENT) {
-		/* old_de could have moved from under us during htree split, so
-		 * make sure that we are deleting the right entry.  We might
-		 * also be pointing to a stale entry in the unused part of
-		 * old_bh so just checking inum and the name isn't enough. */
-		struct buffer_head *old_bh2;
-		struct ext3_dir_entry_2 *old_de2;
-
-		old_bh2 = ext3_find_entry(old_dir, &old_dentry->d_name,
-					  &old_de2);
-		if (old_bh2) {
-			retval = ext3_delete_entry(handle, old_dir,
-						   old_de2, old_bh2);
-			brelse(old_bh2);
-		}
-	}
-	if (retval) {
-		ext3_warning(old_dir->i_sb, "ext3_rename",
-				"Deleting old file (%lu), %d, error=%d",
-				old_dir->i_ino, old_dir->i_nlink, retval);
-	}
-
-	if (new_inode) {
-		drop_nlink(new_inode);
-		new_inode->i_ctime = CURRENT_TIME_SEC;
-	}
-	old_dir->i_ctime = old_dir->i_mtime = CURRENT_TIME_SEC;
-	ext3_update_dx_flag(old_dir);
-	if (dir_bh) {
-		BUFFER_TRACE(dir_bh, "get_write_access");
-		retval = ext3_journal_get_write_access(handle, dir_bh);
-		if (retval)
-			goto journal_error;
-		PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
-		BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata");
-		retval = ext3_journal_dirty_metadata(handle, dir_bh);
-		if (retval) {
-journal_error:
-			ext3_std_error(new_dir->i_sb, retval);
-			goto end_rename;
-		}
-		drop_nlink(old_dir);
-		if (new_inode) {
-			drop_nlink(new_inode);
-		} else {
-			inc_nlink(new_dir);
-			ext3_update_dx_flag(new_dir);
-			ext3_mark_inode_dirty(handle, new_dir);
-		}
-	}
-	ext3_mark_inode_dirty(handle, old_dir);
-	if (new_inode) {
-		ext3_mark_inode_dirty(handle, new_inode);
-		if (!new_inode->i_nlink)
-			ext3_orphan_add(handle, new_inode);
-		if (ext3_should_writeback_data(new_inode))
-			flush_file = 1;
-	}
-	retval = 0;
-
-end_rename:
-	brelse (dir_bh);
-	brelse (old_bh);
-	brelse (new_bh);
-	ext3_journal_stop(handle);
-	if (retval == 0 && flush_file)
-		filemap_flush(old_inode->i_mapping);
-	return retval;
-}
-
-/*
- * directories can handle most operations...
- */
-const struct inode_operations ext3_dir_inode_operations = {
-	.create		= ext3_create,
-	.lookup		= ext3_lookup,
-	.link		= ext3_link,
-	.unlink		= ext3_unlink,
-	.symlink	= ext3_symlink,
-	.mkdir		= ext3_mkdir,
-	.rmdir		= ext3_rmdir,
-	.mknod		= ext3_mknod,
-	.tmpfile	= ext3_tmpfile,
-	.rename		= ext3_rename,
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-	.get_acl	= ext3_get_acl,
-	.set_acl	= ext3_set_acl,
-};
-
-const struct inode_operations ext3_special_inode_operations = {
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-	.get_acl	= ext3_get_acl,
-	.set_acl	= ext3_set_acl,
-};
diff --git a/fs/ext3/namei.h b/fs/ext3/namei.h
deleted file mode 100644
index 46304d8..0000000
--- a/fs/ext3/namei.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*  linux/fs/ext3/namei.h
- *
- * Copyright (C) 2005 Simtec Electronics
- *	Ben Dooks <ben@simtec.co.uk>
- *
-*/
-
-extern struct dentry *ext3_get_parent(struct dentry *child);
-
-static inline struct buffer_head *ext3_dir_bread(handle_t *handle,
-						 struct inode *inode,
-						 int block, int create,
-						 int *err)
-{
-	struct buffer_head *bh;
-
-	bh = ext3_bread(handle, inode, block, create, err);
-
-	if (!bh && !(*err)) {
-		*err = -EIO;
-		ext3_error(inode->i_sb, __func__,
-			   "Directory hole detected on inode %lu\n",
-			   inode->i_ino);
-		return NULL;
-	}
-	return bh;
-}
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
deleted file mode 100644
index 2710565..0000000
--- a/fs/ext3/resize.c
+++ /dev/null
@@ -1,1117 +0,0 @@
-/*
- *  linux/fs/ext3/resize.c
- *
- * Support for resizing an ext3 filesystem while it is mounted.
- *
- * Copyright (C) 2001, 2002 Andreas Dilger <adilger@clusterfs.com>
- *
- * This could probably be made into a module, because it is not often in use.
- */
-
-
-#define EXT3FS_DEBUG
-
-#include "ext3.h"
-
-
-#define outside(b, first, last)	((b) < (first) || (b) >= (last))
-#define inside(b, first, last)	((b) >= (first) && (b) < (last))
-
-static int verify_group_input(struct super_block *sb,
-			      struct ext3_new_group_data *input)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	ext3_fsblk_t start = le32_to_cpu(es->s_blocks_count);
-	ext3_fsblk_t end = start + input->blocks_count;
-	unsigned group = input->group;
-	ext3_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
-	unsigned overhead = ext3_bg_has_super(sb, group) ?
-		(1 + ext3_bg_num_gdb(sb, group) +
-		 le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
-	ext3_fsblk_t metaend = start + overhead;
-	struct buffer_head *bh = NULL;
-	ext3_grpblk_t free_blocks_count;
-	int err = -EINVAL;
-
-	input->free_blocks_count = free_blocks_count =
-		input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
-
-	if (test_opt(sb, DEBUG))
-		printk(KERN_DEBUG "EXT3-fs: adding %s group %u: %u blocks "
-		       "(%d free, %u reserved)\n",
-		       ext3_bg_has_super(sb, input->group) ? "normal" :
-		       "no-super", input->group, input->blocks_count,
-		       free_blocks_count, input->reserved_blocks);
-
-	if (group != sbi->s_groups_count)
-		ext3_warning(sb, __func__,
-			     "Cannot add at group %u (only %lu groups)",
-			     input->group, sbi->s_groups_count);
-	else if ((start - le32_to_cpu(es->s_first_data_block)) %
-		 EXT3_BLOCKS_PER_GROUP(sb))
-		ext3_warning(sb, __func__, "Last group not full");
-	else if (input->reserved_blocks > input->blocks_count / 5)
-		ext3_warning(sb, __func__, "Reserved blocks too high (%u)",
-			     input->reserved_blocks);
-	else if (free_blocks_count < 0)
-		ext3_warning(sb, __func__, "Bad blocks count %u",
-			     input->blocks_count);
-	else if (!(bh = sb_bread(sb, end - 1)))
-		ext3_warning(sb, __func__,
-			     "Cannot read last block ("E3FSBLK")",
-			     end - 1);
-	else if (outside(input->block_bitmap, start, end))
-		ext3_warning(sb, __func__,
-			     "Block bitmap not in group (block %u)",
-			     input->block_bitmap);
-	else if (outside(input->inode_bitmap, start, end))
-		ext3_warning(sb, __func__,
-			     "Inode bitmap not in group (block %u)",
-			     input->inode_bitmap);
-	else if (outside(input->inode_table, start, end) ||
-	         outside(itend - 1, start, end))
-		ext3_warning(sb, __func__,
-			     "Inode table not in group (blocks %u-"E3FSBLK")",
-			     input->inode_table, itend - 1);
-	else if (input->inode_bitmap == input->block_bitmap)
-		ext3_warning(sb, __func__,
-			     "Block bitmap same as inode bitmap (%u)",
-			     input->block_bitmap);
-	else if (inside(input->block_bitmap, input->inode_table, itend))
-		ext3_warning(sb, __func__,
-			     "Block bitmap (%u) in inode table (%u-"E3FSBLK")",
-			     input->block_bitmap, input->inode_table, itend-1);
-	else if (inside(input->inode_bitmap, input->inode_table, itend))
-		ext3_warning(sb, __func__,
-			     "Inode bitmap (%u) in inode table (%u-"E3FSBLK")",
-			     input->inode_bitmap, input->inode_table, itend-1);
-	else if (inside(input->block_bitmap, start, metaend))
-		ext3_warning(sb, __func__,
-			     "Block bitmap (%u) in GDT table"
-			     " ("E3FSBLK"-"E3FSBLK")",
-			     input->block_bitmap, start, metaend - 1);
-	else if (inside(input->inode_bitmap, start, metaend))
-		ext3_warning(sb, __func__,
-			     "Inode bitmap (%u) in GDT table"
-			     " ("E3FSBLK"-"E3FSBLK")",
-			     input->inode_bitmap, start, metaend - 1);
-	else if (inside(input->inode_table, start, metaend) ||
-	         inside(itend - 1, start, metaend))
-		ext3_warning(sb, __func__,
-			     "Inode table (%u-"E3FSBLK") overlaps"
-			     "GDT table ("E3FSBLK"-"E3FSBLK")",
-			     input->inode_table, itend - 1, start, metaend - 1);
-	else
-		err = 0;
-	brelse(bh);
-
-	return err;
-}
-
-static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
-				  ext3_fsblk_t blk)
-{
-	struct buffer_head *bh;
-	int err;
-
-	bh = sb_getblk(sb, blk);
-	if (unlikely(!bh))
-		return ERR_PTR(-ENOMEM);
-	if ((err = ext3_journal_get_write_access(handle, bh))) {
-		brelse(bh);
-		bh = ERR_PTR(err);
-	} else {
-		lock_buffer(bh);
-		memset(bh->b_data, 0, sb->s_blocksize);
-		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
-	}
-
-	return bh;
-}
-
-/*
- * To avoid calling the atomic setbit hundreds or thousands of times, we only
- * need to use it within a single byte (to ensure we get endianness right).
- * We can use memset for the rest of the bitmap as there are no other users.
- */
-static void mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
-{
-	int i;
-
-	if (start_bit >= end_bit)
-		return;
-
-	ext3_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
-	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
-		ext3_set_bit(i, bitmap);
-	if (i < end_bit)
-		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
-}
-
-/*
- * If we have fewer than thresh credits, extend by EXT3_MAX_TRANS_DATA.
- * If that fails, restart the transaction & regain write access for the
- * buffer head which is used for block_bitmap modifications.
- */
-static int extend_or_restart_transaction(handle_t *handle, int thresh,
-					 struct buffer_head *bh)
-{
-	int err;
-
-	if (handle->h_buffer_credits >= thresh)
-		return 0;
-
-	err = ext3_journal_extend(handle, EXT3_MAX_TRANS_DATA);
-	if (err < 0)
-		return err;
-	if (err) {
-		err = ext3_journal_restart(handle, EXT3_MAX_TRANS_DATA);
-		if (err)
-			return err;
-		err = ext3_journal_get_write_access(handle, bh);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-/*
- * Set up the block and inode bitmaps, and the inode table for the new group.
- * This doesn't need to be part of the main transaction, since we are only
- * changing blocks outside the actual filesystem.  We still do journaling to
- * ensure the recovery is correct in case of a failure just after resize.
- * If any part of this fails, we simply abort the resize.
- */
-static int setup_new_group_blocks(struct super_block *sb,
-				  struct ext3_new_group_data *input)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	ext3_fsblk_t start = ext3_group_first_block_no(sb, input->group);
-	int reserved_gdb = ext3_bg_has_super(sb, input->group) ?
-		le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0;
-	unsigned long gdblocks = ext3_bg_num_gdb(sb, input->group);
-	struct buffer_head *bh;
-	handle_t *handle;
-	ext3_fsblk_t block;
-	ext3_grpblk_t bit;
-	int i;
-	int err = 0, err2;
-
-	/* This transaction may be extended/restarted along the way */
-	handle = ext3_journal_start_sb(sb, EXT3_MAX_TRANS_DATA);
-
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-
-	mutex_lock(&sbi->s_resize_lock);
-	if (input->group != sbi->s_groups_count) {
-		err = -EBUSY;
-		goto exit_journal;
-	}
-
-	if (IS_ERR(bh = bclean(handle, sb, input->block_bitmap))) {
-		err = PTR_ERR(bh);
-		goto exit_journal;
-	}
-
-	if (ext3_bg_has_super(sb, input->group)) {
-		ext3_debug("mark backup superblock %#04lx (+0)\n", start);
-		ext3_set_bit(0, bh->b_data);
-	}
-
-	/* Copy all of the GDT blocks into the backup in this group */
-	for (i = 0, bit = 1, block = start + 1;
-	     i < gdblocks; i++, block++, bit++) {
-		struct buffer_head *gdb;
-
-		ext3_debug("update backup group %#04lx (+%d)\n", block, bit);
-
-		err = extend_or_restart_transaction(handle, 1, bh);
-		if (err)
-			goto exit_bh;
-
-		gdb = sb_getblk(sb, block);
-		if (unlikely(!gdb)) {
-			err = -ENOMEM;
-			goto exit_bh;
-		}
-		if ((err = ext3_journal_get_write_access(handle, gdb))) {
-			brelse(gdb);
-			goto exit_bh;
-		}
-		lock_buffer(gdb);
-		memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
-		set_buffer_uptodate(gdb);
-		unlock_buffer(gdb);
-		err = ext3_journal_dirty_metadata(handle, gdb);
-		if (err) {
-			brelse(gdb);
-			goto exit_bh;
-		}
-		ext3_set_bit(bit, bh->b_data);
-		brelse(gdb);
-	}
-
-	/* Zero out all of the reserved backup group descriptor table blocks */
-	for (i = 0, bit = gdblocks + 1, block = start + bit;
-	     i < reserved_gdb; i++, block++, bit++) {
-		struct buffer_head *gdb;
-
-		ext3_debug("clear reserved block %#04lx (+%d)\n", block, bit);
-
-		err = extend_or_restart_transaction(handle, 1, bh);
-		if (err)
-			goto exit_bh;
-
-		if (IS_ERR(gdb = bclean(handle, sb, block))) {
-			err = PTR_ERR(gdb);
-			goto exit_bh;
-		}
-		err = ext3_journal_dirty_metadata(handle, gdb);
-		if (err) {
-			brelse(gdb);
-			goto exit_bh;
-		}
-		ext3_set_bit(bit, bh->b_data);
-		brelse(gdb);
-	}
-	ext3_debug("mark block bitmap %#04x (+%ld)\n", input->block_bitmap,
-		   input->block_bitmap - start);
-	ext3_set_bit(input->block_bitmap - start, bh->b_data);
-	ext3_debug("mark inode bitmap %#04x (+%ld)\n", input->inode_bitmap,
-		   input->inode_bitmap - start);
-	ext3_set_bit(input->inode_bitmap - start, bh->b_data);
-
-	/* Zero out all of the inode table blocks */
-	for (i = 0, block = input->inode_table, bit = block - start;
-	     i < sbi->s_itb_per_group; i++, bit++, block++) {
-		struct buffer_head *it;
-
-		ext3_debug("clear inode block %#04lx (+%d)\n", block, bit);
-
-		err = extend_or_restart_transaction(handle, 1, bh);
-		if (err)
-			goto exit_bh;
-
-		if (IS_ERR(it = bclean(handle, sb, block))) {
-			err = PTR_ERR(it);
-			goto exit_bh;
-		}
-		err = ext3_journal_dirty_metadata(handle, it);
-		if (err) {
-			brelse(it);
-			goto exit_bh;
-		}
-		brelse(it);
-		ext3_set_bit(bit, bh->b_data);
-	}
-
-	err = extend_or_restart_transaction(handle, 2, bh);
-	if (err)
-		goto exit_bh;
-
-	mark_bitmap_end(input->blocks_count, EXT3_BLOCKS_PER_GROUP(sb),
-			bh->b_data);
-	err = ext3_journal_dirty_metadata(handle, bh);
-	if (err)
-		goto exit_bh;
-	brelse(bh);
-
-	/* Mark unused entries in inode bitmap used */
-	ext3_debug("clear inode bitmap %#04x (+%ld)\n",
-		   input->inode_bitmap, input->inode_bitmap - start);
-	if (IS_ERR(bh = bclean(handle, sb, input->inode_bitmap))) {
-		err = PTR_ERR(bh);
-		goto exit_journal;
-	}
-
-	mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb),
-			bh->b_data);
-	err = ext3_journal_dirty_metadata(handle, bh);
-exit_bh:
-	brelse(bh);
-
-exit_journal:
-	mutex_unlock(&sbi->s_resize_lock);
-	if ((err2 = ext3_journal_stop(handle)) && !err)
-		err = err2;
-
-	return err;
-}
-
-/*
- * Iterate through the groups which hold BACKUP superblock/GDT copies in an
- * ext3 filesystem.  The counters should be initialized to 1, 5, and 7 before
- * calling this for the first time.  In a sparse filesystem it will be the
- * sequence of powers of 3, 5, and 7: 1, 3, 5, 7, 9, 25, 27, 49, 81, ...
- * For a non-sparse filesystem it will be every group: 1, 2, 3, 4, ...
- */
-static unsigned ext3_list_backups(struct super_block *sb, unsigned *three,
-				  unsigned *five, unsigned *seven)
-{
-	unsigned *min = three;
-	int mult = 3;
-	unsigned ret;
-
-	if (!EXT3_HAS_RO_COMPAT_FEATURE(sb,
-					EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
-		ret = *min;
-		*min += 1;
-		return ret;
-	}
-
-	if (*five < *min) {
-		min = five;
-		mult = 5;
-	}
-	if (*seven < *min) {
-		min = seven;
-		mult = 7;
-	}
-
-	ret = *min;
-	*min *= mult;
-
-	return ret;
-}
-
-/*
- * Check that all of the backup GDT blocks are held in the primary GDT block.
- * It is assumed that they are stored in group order.  Returns the number of
- * groups in current filesystem that have BACKUPS, or -ve error code.
- */
-static int verify_reserved_gdb(struct super_block *sb,
-			       struct buffer_head *primary)
-{
-	const ext3_fsblk_t blk = primary->b_blocknr;
-	const unsigned long end = EXT3_SB(sb)->s_groups_count;
-	unsigned three = 1;
-	unsigned five = 5;
-	unsigned seven = 7;
-	unsigned grp;
-	__le32 *p = (__le32 *)primary->b_data;
-	int gdbackups = 0;
-
-	while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) {
-		if (le32_to_cpu(*p++) != grp * EXT3_BLOCKS_PER_GROUP(sb) + blk){
-			ext3_warning(sb, __func__,
-				     "reserved GDT "E3FSBLK
-				     " missing grp %d ("E3FSBLK")",
-				     blk, grp,
-				     grp * EXT3_BLOCKS_PER_GROUP(sb) + blk);
-			return -EINVAL;
-		}
-		if (++gdbackups > EXT3_ADDR_PER_BLOCK(sb))
-			return -EFBIG;
-	}
-
-	return gdbackups;
-}
-
-/*
- * Called when we need to bring a reserved group descriptor table block into
- * use from the resize inode.  The primary copy of the new GDT block currently
- * is an indirect block (under the double indirect block in the resize inode).
- * The new backup GDT blocks will be stored as leaf blocks in this indirect
- * block, in group order.  Even though we know all the block numbers we need,
- * we check to ensure that the resize inode has actually reserved these blocks.
- *
- * Don't need to update the block bitmaps because the blocks are still in use.
- *
- * We get all of the error cases out of the way, so that we are sure to not
- * fail once we start modifying the data on disk, because JBD has no rollback.
- */
-static int add_new_gdb(handle_t *handle, struct inode *inode,
-		       struct ext3_new_group_data *input,
-		       struct buffer_head **primary)
-{
-	struct super_block *sb = inode->i_sb;
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-	unsigned long gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb);
-	ext3_fsblk_t gdblock = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
-	struct buffer_head **o_group_desc, **n_group_desc;
-	struct buffer_head *dind;
-	int gdbackups;
-	struct ext3_iloc iloc;
-	__le32 *data;
-	int err;
-
-	if (test_opt(sb, DEBUG))
-		printk(KERN_DEBUG
-		       "EXT3-fs: ext3_add_new_gdb: adding group block %lu\n",
-		       gdb_num);
-
-	/*
-	 * If we are not using the primary superblock/GDT copy don't resize,
-	 * because the user tools have no way of handling this.  Probably a
-	 * bad time to do it anyways.
-	 */
-	if (EXT3_SB(sb)->s_sbh->b_blocknr !=
-	    le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) {
-		ext3_warning(sb, __func__,
-			"won't resize using backup superblock at %llu",
-			(unsigned long long)EXT3_SB(sb)->s_sbh->b_blocknr);
-		return -EPERM;
-	}
-
-	*primary = sb_bread(sb, gdblock);
-	if (!*primary)
-		return -EIO;
-
-	if ((gdbackups = verify_reserved_gdb(sb, *primary)) < 0) {
-		err = gdbackups;
-		goto exit_bh;
-	}
-
-	data = EXT3_I(inode)->i_data + EXT3_DIND_BLOCK;
-	dind = sb_bread(sb, le32_to_cpu(*data));
-	if (!dind) {
-		err = -EIO;
-		goto exit_bh;
-	}
-
-	data = (__le32 *)dind->b_data;
-	if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) {
-		ext3_warning(sb, __func__,
-			     "new group %u GDT block "E3FSBLK" not reserved",
-			     input->group, gdblock);
-		err = -EINVAL;
-		goto exit_dind;
-	}
-
-	if ((err = ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh)))
-		goto exit_dind;
-
-	if ((err = ext3_journal_get_write_access(handle, *primary)))
-		goto exit_sbh;
-
-	if ((err = ext3_journal_get_write_access(handle, dind)))
-		goto exit_primary;
-
-	/* ext3_reserve_inode_write() gets a reference on the iloc */
-	if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
-		goto exit_dindj;
-
-	n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
-			GFP_NOFS);
-	if (!n_group_desc) {
-		err = -ENOMEM;
-		ext3_warning (sb, __func__,
-			      "not enough memory for %lu groups", gdb_num + 1);
-		goto exit_inode;
-	}
-
-	/*
-	 * Finally, we have all of the possible failures behind us...
-	 *
-	 * Remove new GDT block from inode double-indirect block and clear out
-	 * the new GDT block for use (which also "frees" the backup GDT blocks
-	 * from the reserved inode).  We don't need to change the bitmaps for
-	 * these blocks, because they are marked as in-use from being in the
-	 * reserved inode, and will become GDT blocks (primary and backup).
-	 */
-	data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)] = 0;
-	err = ext3_journal_dirty_metadata(handle, dind);
-	if (err)
-		goto exit_group_desc;
-	brelse(dind);
-	dind = NULL;
-	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
-	err = ext3_mark_iloc_dirty(handle, inode, &iloc);
-	if (err)
-		goto exit_group_desc;
-	memset((*primary)->b_data, 0, sb->s_blocksize);
-	err = ext3_journal_dirty_metadata(handle, *primary);
-	if (err)
-		goto exit_group_desc;
-
-	o_group_desc = EXT3_SB(sb)->s_group_desc;
-	memcpy(n_group_desc, o_group_desc,
-	       EXT3_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
-	n_group_desc[gdb_num] = *primary;
-	EXT3_SB(sb)->s_group_desc = n_group_desc;
-	EXT3_SB(sb)->s_gdb_count++;
-	kfree(o_group_desc);
-
-	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-	if (err)
-		goto exit_inode;
-
-	return 0;
-
-exit_group_desc:
-	kfree(n_group_desc);
-exit_inode:
-	//ext3_journal_release_buffer(handle, iloc.bh);
-	brelse(iloc.bh);
-exit_dindj:
-	//ext3_journal_release_buffer(handle, dind);
-exit_primary:
-	//ext3_journal_release_buffer(handle, *primary);
-exit_sbh:
-	//ext3_journal_release_buffer(handle, *primary);
-exit_dind:
-	brelse(dind);
-exit_bh:
-	brelse(*primary);
-
-	ext3_debug("leaving with error %d\n", err);
-	return err;
-}
-
-/*
- * Called when we are adding a new group which has a backup copy of each of
- * the GDT blocks (i.e. sparse group) and there are reserved GDT blocks.
- * We need to add these reserved backup GDT blocks to the resize inode, so
- * that they are kept for future resizing and not allocated to files.
- *
- * Each reserved backup GDT block will go into a different indirect block.
- * The indirect blocks are actually the primary reserved GDT blocks,
- * so we know in advance what their block numbers are.  We only get the
- * double-indirect block to verify it is pointing to the primary reserved
- * GDT blocks so we don't overwrite a data block by accident.  The reserved
- * backup GDT blocks are stored in their reserved primary GDT block.
- */
-static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
-			      struct ext3_new_group_data *input)
-{
-	struct super_block *sb = inode->i_sb;
-	int reserved_gdb =le16_to_cpu(EXT3_SB(sb)->s_es->s_reserved_gdt_blocks);
-	struct buffer_head **primary;
-	struct buffer_head *dind;
-	struct ext3_iloc iloc;
-	ext3_fsblk_t blk;
-	__le32 *data, *end;
-	int gdbackups = 0;
-	int res, i;
-	int err;
-
-	primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS);
-	if (!primary)
-		return -ENOMEM;
-
-	data = EXT3_I(inode)->i_data + EXT3_DIND_BLOCK;
-	dind = sb_bread(sb, le32_to_cpu(*data));
-	if (!dind) {
-		err = -EIO;
-		goto exit_free;
-	}
-
-	blk = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + EXT3_SB(sb)->s_gdb_count;
-	data = (__le32 *)dind->b_data + (EXT3_SB(sb)->s_gdb_count %
-					 EXT3_ADDR_PER_BLOCK(sb));
-	end = (__le32 *)dind->b_data + EXT3_ADDR_PER_BLOCK(sb);
-
-	/* Get each reserved primary GDT block and verify it holds backups */
-	for (res = 0; res < reserved_gdb; res++, blk++) {
-		if (le32_to_cpu(*data) != blk) {
-			ext3_warning(sb, __func__,
-				     "reserved block "E3FSBLK
-				     " not at offset %ld",
-				     blk,
-				     (long)(data - (__le32 *)dind->b_data));
-			err = -EINVAL;
-			goto exit_bh;
-		}
-		primary[res] = sb_bread(sb, blk);
-		if (!primary[res]) {
-			err = -EIO;
-			goto exit_bh;
-		}
-		if ((gdbackups = verify_reserved_gdb(sb, primary[res])) < 0) {
-			brelse(primary[res]);
-			err = gdbackups;
-			goto exit_bh;
-		}
-		if (++data >= end)
-			data = (__le32 *)dind->b_data;
-	}
-
-	for (i = 0; i < reserved_gdb; i++) {
-		if ((err = ext3_journal_get_write_access(handle, primary[i]))) {
-			/*
-			int j;
-			for (j = 0; j < i; j++)
-				ext3_journal_release_buffer(handle, primary[j]);
-			 */
-			goto exit_bh;
-		}
-	}
-
-	if ((err = ext3_reserve_inode_write(handle, inode, &iloc)))
-		goto exit_bh;
-
-	/*
-	 * Finally we can add each of the reserved backup GDT blocks from
-	 * the new group to its reserved primary GDT block.
-	 */
-	blk = input->group * EXT3_BLOCKS_PER_GROUP(sb);
-	for (i = 0; i < reserved_gdb; i++) {
-		int err2;
-		data = (__le32 *)primary[i]->b_data;
-		/* printk("reserving backup %lu[%u] = %lu\n",
-		       primary[i]->b_blocknr, gdbackups,
-		       blk + primary[i]->b_blocknr); */
-		data[gdbackups] = cpu_to_le32(blk + primary[i]->b_blocknr);
-		err2 = ext3_journal_dirty_metadata(handle, primary[i]);
-		if (!err)
-			err = err2;
-	}
-	inode->i_blocks += reserved_gdb * sb->s_blocksize >> 9;
-	ext3_mark_iloc_dirty(handle, inode, &iloc);
-
-exit_bh:
-	while (--res >= 0)
-		brelse(primary[res]);
-	brelse(dind);
-
-exit_free:
-	kfree(primary);
-
-	return err;
-}
-
-/*
- * Update the backup copies of the ext3 metadata.  These don't need to be part
- * of the main resize transaction, because e2fsck will re-write them if there
- * is a problem (basically only OOM will cause a problem).  However, we
- * _should_ update the backups if possible, in case the primary gets trashed
- * for some reason and we need to run e2fsck from a backup superblock.  The
- * important part is that the new block and inode counts are in the backup
- * superblocks, and the location of the new group metadata in the GDT backups.
- *
- * We do not need take the s_resize_lock for this, because these
- * blocks are not otherwise touched by the filesystem code when it is
- * mounted.  We don't need to worry about last changing from
- * sbi->s_groups_count, because the worst that can happen is that we
- * do not copy the full number of backups at this time.  The resize
- * which changed s_groups_count will backup again.
- */
-static void update_backups(struct super_block *sb,
-			   int blk_off, char *data, int size)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	const unsigned long last = sbi->s_groups_count;
-	const int bpg = EXT3_BLOCKS_PER_GROUP(sb);
-	unsigned three = 1;
-	unsigned five = 5;
-	unsigned seven = 7;
-	unsigned group;
-	int rest = sb->s_blocksize - size;
-	handle_t *handle;
-	int err = 0, err2;
-
-	handle = ext3_journal_start_sb(sb, EXT3_MAX_TRANS_DATA);
-	if (IS_ERR(handle)) {
-		group = 1;
-		err = PTR_ERR(handle);
-		goto exit_err;
-	}
-
-	while ((group = ext3_list_backups(sb, &three, &five, &seven)) < last) {
-		struct buffer_head *bh;
-
-		/* Out of journal space, and can't get more - abort - so sad */
-		if (handle->h_buffer_credits == 0 &&
-		    ext3_journal_extend(handle, EXT3_MAX_TRANS_DATA) &&
-		    (err = ext3_journal_restart(handle, EXT3_MAX_TRANS_DATA)))
-			break;
-
-		bh = sb_getblk(sb, group * bpg + blk_off);
-		if (unlikely(!bh)) {
-			err = -ENOMEM;
-			break;
-		}
-		ext3_debug("update metadata backup %#04lx\n",
-			  (unsigned long)bh->b_blocknr);
-		if ((err = ext3_journal_get_write_access(handle, bh))) {
-			brelse(bh);
-			break;
-		}
-		lock_buffer(bh);
-		memcpy(bh->b_data, data, size);
-		if (rest)
-			memset(bh->b_data + size, 0, rest);
-		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
-		err = ext3_journal_dirty_metadata(handle, bh);
-		brelse(bh);
-		if (err)
-			break;
-	}
-	if ((err2 = ext3_journal_stop(handle)) && !err)
-		err = err2;
-
-	/*
-	 * Ugh! Need to have e2fsck write the backup copies.  It is too
-	 * late to revert the resize, we shouldn't fail just because of
-	 * the backup copies (they are only needed in case of corruption).
-	 *
-	 * However, if we got here we have a journal problem too, so we
-	 * can't really start a transaction to mark the superblock.
-	 * Chicken out and just set the flag on the hope it will be written
-	 * to disk, and if not - we will simply wait until next fsck.
-	 */
-exit_err:
-	if (err) {
-		ext3_warning(sb, __func__,
-			     "can't update backup for group %d (err %d), "
-			     "forcing fsck on next reboot", group, err);
-		sbi->s_mount_state &= ~EXT3_VALID_FS;
-		sbi->s_es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
-		mark_buffer_dirty(sbi->s_sbh);
-	}
-}
-
-/* Add group descriptor data to an existing or new group descriptor block.
- * Ensure we handle all possible error conditions _before_ we start modifying
- * the filesystem, because we cannot abort the transaction and not have it
- * write the data to disk.
- *
- * If we are on a GDT block boundary, we need to get the reserved GDT block.
- * Otherwise, we may need to add backup GDT blocks for a sparse group.
- *
- * We only need to hold the superblock lock while we are actually adding
- * in the new group's counts to the superblock.  Prior to that we have
- * not really "added" the group at all.  We re-check that we are still
- * adding in the last group in case things have changed since verifying.
- */
-int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	int reserved_gdb = ext3_bg_has_super(sb, input->group) ?
-		le16_to_cpu(es->s_reserved_gdt_blocks) : 0;
-	struct buffer_head *primary = NULL;
-	struct ext3_group_desc *gdp;
-	struct inode *inode = NULL;
-	handle_t *handle;
-	int gdb_off, gdb_num;
-	int err, err2;
-
-	gdb_num = input->group / EXT3_DESC_PER_BLOCK(sb);
-	gdb_off = input->group % EXT3_DESC_PER_BLOCK(sb);
-
-	if (gdb_off == 0 && !EXT3_HAS_RO_COMPAT_FEATURE(sb,
-					EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
-		ext3_warning(sb, __func__,
-			     "Can't resize non-sparse filesystem further");
-		return -EPERM;
-	}
-
-	if (le32_to_cpu(es->s_blocks_count) + input->blocks_count <
-	    le32_to_cpu(es->s_blocks_count)) {
-		ext3_warning(sb, __func__, "blocks_count overflow\n");
-		return -EINVAL;
-	}
-
-	if (le32_to_cpu(es->s_inodes_count) + EXT3_INODES_PER_GROUP(sb) <
-	    le32_to_cpu(es->s_inodes_count)) {
-		ext3_warning(sb, __func__, "inodes_count overflow\n");
-		return -EINVAL;
-	}
-
-	if (reserved_gdb || gdb_off == 0) {
-		if (!EXT3_HAS_COMPAT_FEATURE(sb,
-					     EXT3_FEATURE_COMPAT_RESIZE_INODE)
-		    || !le16_to_cpu(es->s_reserved_gdt_blocks)) {
-			ext3_warning(sb, __func__,
-				     "No reserved GDT blocks, can't resize");
-			return -EPERM;
-		}
-		inode = ext3_iget(sb, EXT3_RESIZE_INO);
-		if (IS_ERR(inode)) {
-			ext3_warning(sb, __func__,
-				     "Error opening resize inode");
-			return PTR_ERR(inode);
-		}
-	}
-
-	if ((err = verify_group_input(sb, input)))
-		goto exit_put;
-
-	if ((err = setup_new_group_blocks(sb, input)))
-		goto exit_put;
-
-	/*
-	 * We will always be modifying at least the superblock and a GDT
-	 * block.  If we are adding a group past the last current GDT block,
-	 * we will also modify the inode and the dindirect block.  If we
-	 * are adding a group with superblock/GDT backups  we will also
-	 * modify each of the reserved GDT dindirect blocks.
-	 */
-	handle = ext3_journal_start_sb(sb,
-				       ext3_bg_has_super(sb, input->group) ?
-				       3 + reserved_gdb : 4);
-	if (IS_ERR(handle)) {
-		err = PTR_ERR(handle);
-		goto exit_put;
-	}
-
-	mutex_lock(&sbi->s_resize_lock);
-	if (input->group != sbi->s_groups_count) {
-		ext3_warning(sb, __func__,
-			     "multiple resizers run on filesystem!");
-		err = -EBUSY;
-		goto exit_journal;
-	}
-
-	if ((err = ext3_journal_get_write_access(handle, sbi->s_sbh)))
-		goto exit_journal;
-
-	/*
-	 * We will only either add reserved group blocks to a backup group
-	 * or remove reserved blocks for the first group in a new group block.
-	 * Doing both would be mean more complex code, and sane people don't
-	 * use non-sparse filesystems anymore.  This is already checked above.
-	 */
-	if (gdb_off) {
-		primary = sbi->s_group_desc[gdb_num];
-		if ((err = ext3_journal_get_write_access(handle, primary)))
-			goto exit_journal;
-
-		if (reserved_gdb && ext3_bg_num_gdb(sb, input->group) &&
-		    (err = reserve_backup_gdb(handle, inode, input)))
-			goto exit_journal;
-	} else if ((err = add_new_gdb(handle, inode, input, &primary)))
-		goto exit_journal;
-
-	/*
-	 * OK, now we've set up the new group.  Time to make it active.
-	 *
-	 * We do not lock all allocations via s_resize_lock
-	 * so we have to be safe wrt. concurrent accesses the group
-	 * data.  So we need to be careful to set all of the relevant
-	 * group descriptor data etc. *before* we enable the group.
-	 *
-	 * The key field here is sbi->s_groups_count: as long as
-	 * that retains its old value, nobody is going to access the new
-	 * group.
-	 *
-	 * So first we update all the descriptor metadata for the new
-	 * group; then we update the total disk blocks count; then we
-	 * update the groups count to enable the group; then finally we
-	 * update the free space counts so that the system can start
-	 * using the new disk blocks.
-	 */
-
-	/* Update group descriptor block for new group */
-	gdp = (struct ext3_group_desc *)primary->b_data + gdb_off;
-
-	gdp->bg_block_bitmap = cpu_to_le32(input->block_bitmap);
-	gdp->bg_inode_bitmap = cpu_to_le32(input->inode_bitmap);
-	gdp->bg_inode_table = cpu_to_le32(input->inode_table);
-	gdp->bg_free_blocks_count = cpu_to_le16(input->free_blocks_count);
-	gdp->bg_free_inodes_count = cpu_to_le16(EXT3_INODES_PER_GROUP(sb));
-
-	/*
-	 * Make the new blocks and inodes valid next.  We do this before
-	 * increasing the group count so that once the group is enabled,
-	 * all of its blocks and inodes are already valid.
-	 *
-	 * We always allocate group-by-group, then block-by-block or
-	 * inode-by-inode within a group, so enabling these
-	 * blocks/inodes before the group is live won't actually let us
-	 * allocate the new space yet.
-	 */
-	le32_add_cpu(&es->s_blocks_count, input->blocks_count);
-	le32_add_cpu(&es->s_inodes_count, EXT3_INODES_PER_GROUP(sb));
-
-	/*
-	 * We need to protect s_groups_count against other CPUs seeing
-	 * inconsistent state in the superblock.
-	 *
-	 * The precise rules we use are:
-	 *
-	 * * Writers of s_groups_count *must* hold s_resize_lock
-	 * AND
-	 * * Writers must perform a smp_wmb() after updating all dependent
-	 *   data and before modifying the groups count
-	 *
-	 * * Readers must hold s_resize_lock over the access
-	 * OR
-	 * * Readers must perform an smp_rmb() after reading the groups count
-	 *   and before reading any dependent data.
-	 *
-	 * NB. These rules can be relaxed when checking the group count
-	 * while freeing data, as we can only allocate from a block
-	 * group after serialising against the group count, and we can
-	 * only then free after serialising in turn against that
-	 * allocation.
-	 */
-	smp_wmb();
-
-	/* Update the global fs size fields */
-	sbi->s_groups_count++;
-
-	err = ext3_journal_dirty_metadata(handle, primary);
-	if (err)
-		goto exit_journal;
-
-	/* Update the reserved block counts only once the new group is
-	 * active. */
-	le32_add_cpu(&es->s_r_blocks_count, input->reserved_blocks);
-
-	/* Update the free space counts */
-	percpu_counter_add(&sbi->s_freeblocks_counter,
-			   input->free_blocks_count);
-	percpu_counter_add(&sbi->s_freeinodes_counter,
-			   EXT3_INODES_PER_GROUP(sb));
-
-	err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
-
-exit_journal:
-	mutex_unlock(&sbi->s_resize_lock);
-	if ((err2 = ext3_journal_stop(handle)) && !err)
-		err = err2;
-	if (!err) {
-		update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
-			       sizeof(struct ext3_super_block));
-		update_backups(sb, primary->b_blocknr, primary->b_data,
-			       primary->b_size);
-	}
-exit_put:
-	iput(inode);
-	return err;
-} /* ext3_group_add */
-
-/* Extend the filesystem to the new number of blocks specified.  This entry
- * point is only used to extend the current filesystem to the end of the last
- * existing group.  It can be accessed via ioctl, or by "remount,resize=<size>"
- * for emergencies (because it has no dependencies on reserved blocks).
- *
- * If we _really_ wanted, we could use default values to call ext3_group_add()
- * allow the "remount" trick to work for arbitrary resizing, assuming enough
- * GDT blocks are reserved to grow to the desired size.
- */
-int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
-		      ext3_fsblk_t n_blocks_count)
-{
-	ext3_fsblk_t o_blocks_count;
-	ext3_grpblk_t last;
-	ext3_grpblk_t add;
-	struct buffer_head * bh;
-	handle_t *handle;
-	int err;
-	unsigned long freed_blocks;
-
-	/* We don't need to worry about locking wrt other resizers just
-	 * yet: we're going to revalidate es->s_blocks_count after
-	 * taking the s_resize_lock below. */
-	o_blocks_count = le32_to_cpu(es->s_blocks_count);
-
-	if (test_opt(sb, DEBUG))
-		printk(KERN_DEBUG "EXT3-fs: extending last group from "E3FSBLK
-		       " up to "E3FSBLK" blocks\n",
-		       o_blocks_count, n_blocks_count);
-
-	if (n_blocks_count == 0 || n_blocks_count == o_blocks_count)
-		return 0;
-
-	if (n_blocks_count > (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) {
-		printk(KERN_ERR "EXT3-fs: filesystem on %s:"
-			" too large to resize to "E3FSBLK" blocks safely\n",
-			sb->s_id, n_blocks_count);
-		if (sizeof(sector_t) < 8)
-			ext3_warning(sb, __func__,
-			"CONFIG_LBDAF not enabled\n");
-		return -EINVAL;
-	}
-
-	if (n_blocks_count < o_blocks_count) {
-		ext3_warning(sb, __func__,
-			     "can't shrink FS - resize aborted");
-		return -EBUSY;
-	}
-
-	/* Handle the remaining blocks in the last group only. */
-	last = (o_blocks_count - le32_to_cpu(es->s_first_data_block)) %
-		EXT3_BLOCKS_PER_GROUP(sb);
-
-	if (last == 0) {
-		ext3_warning(sb, __func__,
-			     "need to use ext2online to resize further");
-		return -EPERM;
-	}
-
-	add = EXT3_BLOCKS_PER_GROUP(sb) - last;
-
-	if (o_blocks_count + add < o_blocks_count) {
-		ext3_warning(sb, __func__, "blocks_count overflow");
-		return -EINVAL;
-	}
-
-	if (o_blocks_count + add > n_blocks_count)
-		add = n_blocks_count - o_blocks_count;
-
-	if (o_blocks_count + add < n_blocks_count)
-		ext3_warning(sb, __func__,
-			     "will only finish group ("E3FSBLK
-			     " blocks, %u new)",
-			     o_blocks_count + add, add);
-
-	/* See if the device is actually as big as what was requested */
-	bh = sb_bread(sb, o_blocks_count + add -1);
-	if (!bh) {
-		ext3_warning(sb, __func__,
-			     "can't read last block, resize aborted");
-		return -ENOSPC;
-	}
-	brelse(bh);
-
-	/* We will update the superblock, one block bitmap, and
-	 * one group descriptor via ext3_free_blocks().
-	 */
-	handle = ext3_journal_start_sb(sb, 3);
-	if (IS_ERR(handle)) {
-		err = PTR_ERR(handle);
-		ext3_warning(sb, __func__, "error %d on journal start",err);
-		goto exit_put;
-	}
-
-	mutex_lock(&EXT3_SB(sb)->s_resize_lock);
-	if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) {
-		ext3_warning(sb, __func__,
-			     "multiple resizers run on filesystem!");
-		mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
-		ext3_journal_stop(handle);
-		err = -EBUSY;
-		goto exit_put;
-	}
-
-	if ((err = ext3_journal_get_write_access(handle,
-						 EXT3_SB(sb)->s_sbh))) {
-		ext3_warning(sb, __func__,
-			     "error %d on journal write access", err);
-		mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
-		ext3_journal_stop(handle);
-		goto exit_put;
-	}
-	es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
-	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-	mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
-	if (err) {
-		ext3_warning(sb, __func__,
-			     "error %d on journal dirty metadata", err);
-		ext3_journal_stop(handle);
-		goto exit_put;
-	}
-	ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n",
-		   o_blocks_count, o_blocks_count + add);
-	ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
-	ext3_debug("freed blocks "E3FSBLK" through "E3FSBLK"\n",
-		   o_blocks_count, o_blocks_count + add);
-	if ((err = ext3_journal_stop(handle)))
-		goto exit_put;
-	if (test_opt(sb, DEBUG))
-		printk(KERN_DEBUG "EXT3-fs: extended group to %u blocks\n",
-		       le32_to_cpu(es->s_blocks_count));
-	update_backups(sb, EXT3_SB(sb)->s_sbh->b_blocknr, (char *)es,
-		       sizeof(struct ext3_super_block));
-exit_put:
-	return err;
-} /* ext3_group_extend */
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
deleted file mode 100644
index 5ed0044..0000000
--- a/fs/ext3/super.c
+++ /dev/null
@@ -1,3165 +0,0 @@
-/*
- *  linux/fs/ext3/super.c
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/inode.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  Big-endian to little-endian byte-swapping/bitmaps by
- *        David S. Miller (davem@caip.rutgers.edu), 1995
- */
-
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/parser.h>
-#include <linux/exportfs.h>
-#include <linux/statfs.h>
-#include <linux/random.h>
-#include <linux/mount.h>
-#include <linux/quotaops.h>
-#include <linux/seq_file.h>
-#include <linux/log2.h>
-#include <linux/cleancache.h>
-#include <linux/namei.h>
-
-#include <asm/uaccess.h>
-
-#define CREATE_TRACE_POINTS
-
-#include "ext3.h"
-#include "xattr.h"
-#include "acl.h"
-#include "namei.h"
-
-#ifdef CONFIG_EXT3_DEFAULTS_TO_ORDERED
-  #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_ORDERED_DATA
-#else
-  #define EXT3_MOUNT_DEFAULT_DATA_MODE EXT3_MOUNT_WRITEBACK_DATA
-#endif
-
-static int ext3_load_journal(struct super_block *, struct ext3_super_block *,
-			     unsigned long journal_devnum);
-static int ext3_create_journal(struct super_block *, struct ext3_super_block *,
-			       unsigned int);
-static int ext3_commit_super(struct super_block *sb,
-			       struct ext3_super_block *es,
-			       int sync);
-static void ext3_mark_recovery_complete(struct super_block * sb,
-					struct ext3_super_block * es);
-static void ext3_clear_journal_err(struct super_block * sb,
-				   struct ext3_super_block * es);
-static int ext3_sync_fs(struct super_block *sb, int wait);
-static const char *ext3_decode_error(struct super_block * sb, int errno,
-				     char nbuf[16]);
-static int ext3_remount (struct super_block * sb, int * flags, char * data);
-static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf);
-static int ext3_unfreeze(struct super_block *sb);
-static int ext3_freeze(struct super_block *sb);
-
-/*
- * Wrappers for journal_start/end.
- */
-handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
-{
-	journal_t *journal;
-
-	if (sb->s_flags & MS_RDONLY)
-		return ERR_PTR(-EROFS);
-
-	/* Special case here: if the journal has aborted behind our
-	 * backs (eg. EIO in the commit thread), then we still need to
-	 * take the FS itself readonly cleanly. */
-	journal = EXT3_SB(sb)->s_journal;
-	if (is_journal_aborted(journal)) {
-		ext3_abort(sb, __func__,
-			   "Detected aborted journal");
-		return ERR_PTR(-EROFS);
-	}
-
-	return journal_start(journal, nblocks);
-}
-
-int __ext3_journal_stop(const char *where, handle_t *handle)
-{
-	struct super_block *sb;
-	int err;
-	int rc;
-
-	sb = handle->h_transaction->t_journal->j_private;
-	err = handle->h_err;
-	rc = journal_stop(handle);
-
-	if (!err)
-		err = rc;
-	if (err)
-		__ext3_std_error(sb, where, err);
-	return err;
-}
-
-void ext3_journal_abort_handle(const char *caller, const char *err_fn,
-		struct buffer_head *bh, handle_t *handle, int err)
-{
-	char nbuf[16];
-	const char *errstr = ext3_decode_error(NULL, err, nbuf);
-
-	if (bh)
-		BUFFER_TRACE(bh, "abort");
-
-	if (!handle->h_err)
-		handle->h_err = err;
-
-	if (is_handle_aborted(handle))
-		return;
-
-	printk(KERN_ERR "EXT3-fs: %s: aborting transaction: %s in %s\n",
-		caller, errstr, err_fn);
-
-	journal_abort_handle(handle);
-}
-
-void ext3_msg(struct super_block *sb, const char *prefix,
-		const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
-
-	va_end(args);
-}
-
-/* Deal with the reporting of failure conditions on a filesystem such as
- * inconsistencies detected or read IO failures.
- *
- * On ext2, we can store the error state of the filesystem in the
- * superblock.  That is not possible on ext3, because we may have other
- * write ordering constraints on the superblock which prevent us from
- * writing it out straight away; and given that the journal is about to
- * be aborted, we can't rely on the current, or future, transactions to
- * write out the superblock safely.
- *
- * We'll just use the journal_abort() error code to record an error in
- * the journal instead.  On recovery, the journal will complain about
- * that error until we've noted it down and cleared it.
- */
-
-static void ext3_handle_error(struct super_block *sb)
-{
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-
-	EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
-	es->s_state |= cpu_to_le16(EXT3_ERROR_FS);
-
-	if (sb->s_flags & MS_RDONLY)
-		return;
-
-	if (!test_opt (sb, ERRORS_CONT)) {
-		journal_t *journal = EXT3_SB(sb)->s_journal;
-
-		set_opt(EXT3_SB(sb)->s_mount_opt, ABORT);
-		if (journal)
-			journal_abort(journal, -EIO);
-	}
-	if (test_opt (sb, ERRORS_RO)) {
-		ext3_msg(sb, KERN_CRIT,
-			"error: remounting filesystem read-only");
-		/*
-		 * Make sure updated value of ->s_mount_state will be visible
-		 * before ->s_flags update.
-		 */
-		smp_wmb();
-		sb->s_flags |= MS_RDONLY;
-	}
-	ext3_commit_super(sb, es, 1);
-	if (test_opt(sb, ERRORS_PANIC))
-		panic("EXT3-fs (%s): panic forced after error\n",
-			sb->s_id);
-}
-
-void ext3_error(struct super_block *sb, const char *function,
-		const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n",
-	       sb->s_id, function, &vaf);
-
-	va_end(args);
-
-	ext3_handle_error(sb);
-}
-
-static const char *ext3_decode_error(struct super_block * sb, int errno,
-				     char nbuf[16])
-{
-	char *errstr = NULL;
-
-	switch (errno) {
-	case -EIO:
-		errstr = "IO failure";
-		break;
-	case -ENOMEM:
-		errstr = "Out of memory";
-		break;
-	case -EROFS:
-		if (!sb || EXT3_SB(sb)->s_journal->j_flags & JFS_ABORT)
-			errstr = "Journal has aborted";
-		else
-			errstr = "Readonly filesystem";
-		break;
-	default:
-		/* If the caller passed in an extra buffer for unknown
-		 * errors, textualise them now.  Else we just return
-		 * NULL. */
-		if (nbuf) {
-			/* Check for truncated error codes... */
-			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
-				errstr = nbuf;
-		}
-		break;
-	}
-
-	return errstr;
-}
-
-/* __ext3_std_error decodes expected errors from journaling functions
- * automatically and invokes the appropriate error response.  */
-
-void __ext3_std_error (struct super_block * sb, const char * function,
-		       int errno)
-{
-	char nbuf[16];
-	const char *errstr;
-
-	/* Special case: if the error is EROFS, and we're not already
-	 * inside a transaction, then there's really no point in logging
-	 * an error. */
-	if (errno == -EROFS && journal_current_handle() == NULL &&
-	    (sb->s_flags & MS_RDONLY))
-		return;
-
-	errstr = ext3_decode_error(sb, errno, nbuf);
-	ext3_msg(sb, KERN_CRIT, "error in %s: %s", function, errstr);
-
-	ext3_handle_error(sb);
-}
-
-/*
- * ext3_abort is a much stronger failure handler than ext3_error.  The
- * abort function may be used to deal with unrecoverable failures such
- * as journal IO errors or ENOMEM at a critical moment in log management.
- *
- * We unconditionally force the filesystem into an ABORT|READONLY state,
- * unless the error response on the fs has been set to panic in which
- * case we take the easy way out and panic immediately.
- */
-
-void ext3_abort(struct super_block *sb, const char *function,
-		 const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n",
-	       sb->s_id, function, &vaf);
-
-	va_end(args);
-
-	if (test_opt(sb, ERRORS_PANIC))
-		panic("EXT3-fs: panic from previous error\n");
-
-	if (sb->s_flags & MS_RDONLY)
-		return;
-
-	ext3_msg(sb, KERN_CRIT,
-		"error: remounting filesystem read-only");
-	EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
-	set_opt(EXT3_SB(sb)->s_mount_opt, ABORT);
-	/*
-	 * Make sure updated value of ->s_mount_state will be visible
-	 * before ->s_flags update.
-	 */
-	smp_wmb();
-	sb->s_flags |= MS_RDONLY;
-
-	if (EXT3_SB(sb)->s_journal)
-		journal_abort(EXT3_SB(sb)->s_journal, -EIO);
-}
-
-void ext3_warning(struct super_block *sb, const char *function,
-		  const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	va_start(args, fmt);
-
-	vaf.fmt = fmt;
-	vaf.va = &args;
-
-	printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n",
-	       sb->s_id, function, &vaf);
-
-	va_end(args);
-}
-
-void ext3_update_dynamic_rev(struct super_block *sb)
-{
-	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
-
-	if (le32_to_cpu(es->s_rev_level) > EXT3_GOOD_OLD_REV)
-		return;
-
-	ext3_msg(sb, KERN_WARNING,
-		"warning: updating to rev %d because of "
-		"new feature flag, running e2fsck is recommended",
-		EXT3_DYNAMIC_REV);
-
-	es->s_first_ino = cpu_to_le32(EXT3_GOOD_OLD_FIRST_INO);
-	es->s_inode_size = cpu_to_le16(EXT3_GOOD_OLD_INODE_SIZE);
-	es->s_rev_level = cpu_to_le32(EXT3_DYNAMIC_REV);
-	/* leave es->s_feature_*compat flags alone */
-	/* es->s_uuid will be set by e2fsck if empty */
-
-	/*
-	 * The rest of the superblock fields should be zero, and if not it
-	 * means they are likely already in use, so leave them alone.  We
-	 * can leave it up to e2fsck to clean up any inconsistencies there.
-	 */
-}
-
-/*
- * Open the external journal device
- */
-static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb)
-{
-	struct block_device *bdev;
-	char b[BDEVNAME_SIZE];
-
-	bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
-	if (IS_ERR(bdev))
-		goto fail;
-	return bdev;
-
-fail:
-	ext3_msg(sb, KERN_ERR, "error: failed to open journal device %s: %ld",
-		__bdevname(dev, b), PTR_ERR(bdev));
-
-	return NULL;
-}
-
-/*
- * Release the journal device
- */
-static void ext3_blkdev_put(struct block_device *bdev)
-{
-	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
-}
-
-static void ext3_blkdev_remove(struct ext3_sb_info *sbi)
-{
-	struct block_device *bdev;
-	bdev = sbi->journal_bdev;
-	if (bdev) {
-		ext3_blkdev_put(bdev);
-		sbi->journal_bdev = NULL;
-	}
-}
-
-static inline struct inode *orphan_list_entry(struct list_head *l)
-{
-	return &list_entry(l, struct ext3_inode_info, i_orphan)->vfs_inode;
-}
-
-static void dump_orphan_list(struct super_block *sb, struct ext3_sb_info *sbi)
-{
-	struct list_head *l;
-
-	ext3_msg(sb, KERN_ERR, "error: sb orphan head is %d",
-	       le32_to_cpu(sbi->s_es->s_last_orphan));
-
-	ext3_msg(sb, KERN_ERR, "sb_info orphan list:");
-	list_for_each(l, &sbi->s_orphan) {
-		struct inode *inode = orphan_list_entry(l);
-		ext3_msg(sb, KERN_ERR, "  "
-		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
-		       inode->i_sb->s_id, inode->i_ino, inode,
-		       inode->i_mode, inode->i_nlink,
-		       NEXT_ORPHAN(inode));
-	}
-}
-
-static void ext3_put_super (struct super_block * sb)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	int i, err;
-
-	dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
-	ext3_xattr_put_super(sb);
-	err = journal_destroy(sbi->s_journal);
-	sbi->s_journal = NULL;
-	if (err < 0)
-		ext3_abort(sb, __func__, "Couldn't clean up the journal");
-
-	if (!(sb->s_flags & MS_RDONLY)) {
-		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-		es->s_state = cpu_to_le16(sbi->s_mount_state);
-		BUFFER_TRACE(sbi->s_sbh, "marking dirty");
-		mark_buffer_dirty(sbi->s_sbh);
-		ext3_commit_super(sb, es, 1);
-	}
-
-	for (i = 0; i < sbi->s_gdb_count; i++)
-		brelse(sbi->s_group_desc[i]);
-	kfree(sbi->s_group_desc);
-	percpu_counter_destroy(&sbi->s_freeblocks_counter);
-	percpu_counter_destroy(&sbi->s_freeinodes_counter);
-	percpu_counter_destroy(&sbi->s_dirs_counter);
-	brelse(sbi->s_sbh);
-#ifdef CONFIG_QUOTA
-	for (i = 0; i < EXT3_MAXQUOTAS; i++)
-		kfree(sbi->s_qf_names[i]);
-#endif
-
-	/* Debugging code just in case the in-memory inode orphan list
-	 * isn't empty.  The on-disk one can be non-empty if we've
-	 * detected an error and taken the fs readonly, but the
-	 * in-memory list had better be clean by this point. */
-	if (!list_empty(&sbi->s_orphan))
-		dump_orphan_list(sb, sbi);
-	J_ASSERT(list_empty(&sbi->s_orphan));
-
-	invalidate_bdev(sb->s_bdev);
-	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
-		/*
-		 * Invalidate the journal device's buffers.  We don't want them
-		 * floating about in memory - the physical journal device may
-		 * hotswapped, and it breaks the `ro-after' testing code.
-		 */
-		sync_blockdev(sbi->journal_bdev);
-		invalidate_bdev(sbi->journal_bdev);
-		ext3_blkdev_remove(sbi);
-	}
-	sb->s_fs_info = NULL;
-	kfree(sbi->s_blockgroup_lock);
-	mutex_destroy(&sbi->s_orphan_lock);
-	mutex_destroy(&sbi->s_resize_lock);
-	kfree(sbi);
-}
-
-static struct kmem_cache *ext3_inode_cachep;
-
-/*
- * Called inside transaction, so use GFP_NOFS
- */
-static struct inode *ext3_alloc_inode(struct super_block *sb)
-{
-	struct ext3_inode_info *ei;
-
-	ei = kmem_cache_alloc(ext3_inode_cachep, GFP_NOFS);
-	if (!ei)
-		return NULL;
-	ei->i_block_alloc_info = NULL;
-	ei->vfs_inode.i_version = 1;
-	atomic_set(&ei->i_datasync_tid, 0);
-	atomic_set(&ei->i_sync_tid, 0);
-#ifdef CONFIG_QUOTA
-	memset(&ei->i_dquot, 0, sizeof(ei->i_dquot));
-#endif
-
-	return &ei->vfs_inode;
-}
-
-static int ext3_drop_inode(struct inode *inode)
-{
-	int drop = generic_drop_inode(inode);
-
-	trace_ext3_drop_inode(inode, drop);
-	return drop;
-}
-
-static void ext3_i_callback(struct rcu_head *head)
-{
-	struct inode *inode = container_of(head, struct inode, i_rcu);
-	kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
-}
-
-static void ext3_destroy_inode(struct inode *inode)
-{
-	if (!list_empty(&(EXT3_I(inode)->i_orphan))) {
-		printk("EXT3 Inode %p: orphan list check failed!\n",
-			EXT3_I(inode));
-		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
-				EXT3_I(inode), sizeof(struct ext3_inode_info),
-				false);
-		dump_stack();
-	}
-	call_rcu(&inode->i_rcu, ext3_i_callback);
-}
-
-static void init_once(void *foo)
-{
-	struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;
-
-	INIT_LIST_HEAD(&ei->i_orphan);
-#ifdef CONFIG_EXT3_FS_XATTR
-	init_rwsem(&ei->xattr_sem);
-#endif
-	mutex_init(&ei->truncate_mutex);
-	inode_init_once(&ei->vfs_inode);
-}
-
-static int __init init_inodecache(void)
-{
-	ext3_inode_cachep = kmem_cache_create("ext3_inode_cache",
-					     sizeof(struct ext3_inode_info),
-					     0, (SLAB_RECLAIM_ACCOUNT|
-						SLAB_MEM_SPREAD),
-					     init_once);
-	if (ext3_inode_cachep == NULL)
-		return -ENOMEM;
-	return 0;
-}
-
-static void destroy_inodecache(void)
-{
-	/*
-	 * Make sure all delayed rcu free inodes are flushed before we
-	 * destroy cache.
-	 */
-	rcu_barrier();
-	kmem_cache_destroy(ext3_inode_cachep);
-}
-
-static inline void ext3_show_quota_options(struct seq_file *seq, struct super_block *sb)
-{
-#if defined(CONFIG_QUOTA)
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-
-	if (sbi->s_jquota_fmt) {
-		char *fmtname = "";
-
-		switch (sbi->s_jquota_fmt) {
-		case QFMT_VFS_OLD:
-			fmtname = "vfsold";
-			break;
-		case QFMT_VFS_V0:
-			fmtname = "vfsv0";
-			break;
-		case QFMT_VFS_V1:
-			fmtname = "vfsv1";
-			break;
-		}
-		seq_printf(seq, ",jqfmt=%s", fmtname);
-	}
-
-	if (sbi->s_qf_names[USRQUOTA])
-		seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
-
-	if (sbi->s_qf_names[GRPQUOTA])
-		seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
-
-	if (test_opt(sb, USRQUOTA))
-		seq_puts(seq, ",usrquota");
-
-	if (test_opt(sb, GRPQUOTA))
-		seq_puts(seq, ",grpquota");
-#endif
-}
-
-static char *data_mode_string(unsigned long mode)
-{
-	switch (mode) {
-	case EXT3_MOUNT_JOURNAL_DATA:
-		return "journal";
-	case EXT3_MOUNT_ORDERED_DATA:
-		return "ordered";
-	case EXT3_MOUNT_WRITEBACK_DATA:
-		return "writeback";
-	}
-	return "unknown";
-}
-
-/*
- * Show an option if
- *  - it's set to a non-default value OR
- *  - if the per-sb default is different from the global default
- */
-static int ext3_show_options(struct seq_file *seq, struct dentry *root)
-{
-	struct super_block *sb = root->d_sb;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	unsigned long def_mount_opts;
-
-	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
-
-	if (sbi->s_sb_block != 1)
-		seq_printf(seq, ",sb=%lu", sbi->s_sb_block);
-	if (test_opt(sb, MINIX_DF))
-		seq_puts(seq, ",minixdf");
-	if (test_opt(sb, GRPID))
-		seq_puts(seq, ",grpid");
-	if (!test_opt(sb, GRPID) && (def_mount_opts & EXT3_DEFM_BSDGROUPS))
-		seq_puts(seq, ",nogrpid");
-	if (!uid_eq(sbi->s_resuid, make_kuid(&init_user_ns, EXT3_DEF_RESUID)) ||
-	    le16_to_cpu(es->s_def_resuid) != EXT3_DEF_RESUID) {
-		seq_printf(seq, ",resuid=%u",
-				from_kuid_munged(&init_user_ns, sbi->s_resuid));
-	}
-	if (!gid_eq(sbi->s_resgid, make_kgid(&init_user_ns, EXT3_DEF_RESGID)) ||
-	    le16_to_cpu(es->s_def_resgid) != EXT3_DEF_RESGID) {
-		seq_printf(seq, ",resgid=%u",
-				from_kgid_munged(&init_user_ns, sbi->s_resgid));
-	}
-	if (test_opt(sb, ERRORS_RO)) {
-		int def_errors = le16_to_cpu(es->s_errors);
-
-		if (def_errors == EXT3_ERRORS_PANIC ||
-		    def_errors == EXT3_ERRORS_CONTINUE) {
-			seq_puts(seq, ",errors=remount-ro");
-		}
-	}
-	if (test_opt(sb, ERRORS_CONT))
-		seq_puts(seq, ",errors=continue");
-	if (test_opt(sb, ERRORS_PANIC))
-		seq_puts(seq, ",errors=panic");
-	if (test_opt(sb, NO_UID32))
-		seq_puts(seq, ",nouid32");
-	if (test_opt(sb, DEBUG))
-		seq_puts(seq, ",debug");
-#ifdef CONFIG_EXT3_FS_XATTR
-	if (test_opt(sb, XATTR_USER))
-		seq_puts(seq, ",user_xattr");
-	if (!test_opt(sb, XATTR_USER) &&
-	    (def_mount_opts & EXT3_DEFM_XATTR_USER)) {
-		seq_puts(seq, ",nouser_xattr");
-	}
-#endif
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-	if (test_opt(sb, POSIX_ACL))
-		seq_puts(seq, ",acl");
-	if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT3_DEFM_ACL))
-		seq_puts(seq, ",noacl");
-#endif
-	if (!test_opt(sb, RESERVATION))
-		seq_puts(seq, ",noreservation");
-	if (sbi->s_commit_interval) {
-		seq_printf(seq, ",commit=%u",
-			   (unsigned) (sbi->s_commit_interval / HZ));
-	}
-
-	/*
-	 * Always display barrier state so it's clear what the status is.
-	 */
-	seq_puts(seq, ",barrier=");
-	seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
-	seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS)));
-	if (test_opt(sb, DATA_ERR_ABORT))
-		seq_puts(seq, ",data_err=abort");
-
-	if (test_opt(sb, NOLOAD))
-		seq_puts(seq, ",norecovery");
-
-	ext3_show_quota_options(seq, sb);
-
-	return 0;
-}
-
-
-static struct inode *ext3_nfs_get_inode(struct super_block *sb,
-		u64 ino, u32 generation)
-{
-	struct inode *inode;
-
-	if (ino < EXT3_FIRST_INO(sb) && ino != EXT3_ROOT_INO)
-		return ERR_PTR(-ESTALE);
-	if (ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count))
-		return ERR_PTR(-ESTALE);
-
-	/* iget isn't really right if the inode is currently unallocated!!
-	 *
-	 * ext3_read_inode will return a bad_inode if the inode had been
-	 * deleted, so we should be safe.
-	 *
-	 * Currently we don't know the generation for parent directory, so
-	 * a generation of 0 means "accept any"
-	 */
-	inode = ext3_iget(sb, ino);
-	if (IS_ERR(inode))
-		return ERR_CAST(inode);
-	if (generation && inode->i_generation != generation) {
-		iput(inode);
-		return ERR_PTR(-ESTALE);
-	}
-
-	return inode;
-}
-
-static struct dentry *ext3_fh_to_dentry(struct super_block *sb, struct fid *fid,
-		int fh_len, int fh_type)
-{
-	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
-				    ext3_nfs_get_inode);
-}
-
-static struct dentry *ext3_fh_to_parent(struct super_block *sb, struct fid *fid,
-		int fh_len, int fh_type)
-{
-	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
-				    ext3_nfs_get_inode);
-}
-
-/*
- * Try to release metadata pages (indirect blocks, directories) which are
- * mapped via the block device.  Since these pages could have journal heads
- * which would prevent try_to_free_buffers() from freeing them, we must use
- * jbd layer's try_to_free_buffers() function to release them.
- */
-static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
-				 gfp_t wait)
-{
-	journal_t *journal = EXT3_SB(sb)->s_journal;
-
-	WARN_ON(PageChecked(page));
-	if (!page_has_buffers(page))
-		return 0;
-	if (journal)
-		return journal_try_to_free_buffers(journal, page, 
-						   wait & ~__GFP_WAIT);
-	return try_to_free_buffers(page);
-}
-
-#ifdef CONFIG_QUOTA
-#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
-#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
-
-static int ext3_write_dquot(struct dquot *dquot);
-static int ext3_acquire_dquot(struct dquot *dquot);
-static int ext3_release_dquot(struct dquot *dquot);
-static int ext3_mark_dquot_dirty(struct dquot *dquot);
-static int ext3_write_info(struct super_block *sb, int type);
-static int ext3_quota_on(struct super_block *sb, int type, int format_id,
-			 struct path *path);
-static int ext3_quota_on_mount(struct super_block *sb, int type);
-static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
-			       size_t len, loff_t off);
-static ssize_t ext3_quota_write(struct super_block *sb, int type,
-				const char *data, size_t len, loff_t off);
-static struct dquot **ext3_get_dquots(struct inode *inode)
-{
-	return EXT3_I(inode)->i_dquot;
-}
-
-static const struct dquot_operations ext3_quota_operations = {
-	.write_dquot	= ext3_write_dquot,
-	.acquire_dquot	= ext3_acquire_dquot,
-	.release_dquot	= ext3_release_dquot,
-	.mark_dirty	= ext3_mark_dquot_dirty,
-	.write_info	= ext3_write_info,
-	.alloc_dquot	= dquot_alloc,
-	.destroy_dquot	= dquot_destroy,
-};
-
-static const struct quotactl_ops ext3_qctl_operations = {
-	.quota_on	= ext3_quota_on,
-	.quota_off	= dquot_quota_off,
-	.quota_sync	= dquot_quota_sync,
-	.get_state	= dquot_get_state,
-	.set_info	= dquot_set_dqinfo,
-	.get_dqblk	= dquot_get_dqblk,
-	.set_dqblk	= dquot_set_dqblk
-};
-#endif
-
-static const struct super_operations ext3_sops = {
-	.alloc_inode	= ext3_alloc_inode,
-	.destroy_inode	= ext3_destroy_inode,
-	.write_inode	= ext3_write_inode,
-	.dirty_inode	= ext3_dirty_inode,
-	.drop_inode	= ext3_drop_inode,
-	.evict_inode	= ext3_evict_inode,
-	.put_super	= ext3_put_super,
-	.sync_fs	= ext3_sync_fs,
-	.freeze_fs	= ext3_freeze,
-	.unfreeze_fs	= ext3_unfreeze,
-	.statfs		= ext3_statfs,
-	.remount_fs	= ext3_remount,
-	.show_options	= ext3_show_options,
-#ifdef CONFIG_QUOTA
-	.quota_read	= ext3_quota_read,
-	.quota_write	= ext3_quota_write,
-	.get_dquots	= ext3_get_dquots,
-#endif
-	.bdev_try_to_free_page = bdev_try_to_free_page,
-};
-
-static const struct export_operations ext3_export_ops = {
-	.fh_to_dentry = ext3_fh_to_dentry,
-	.fh_to_parent = ext3_fh_to_parent,
-	.get_parent = ext3_get_parent,
-};
-
-enum {
-	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
-	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
-	Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
-	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
-	Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
-	Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
-	Opt_journal_path,
-	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
-	Opt_data_err_abort, Opt_data_err_ignore,
-	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
-	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
-	Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
-	Opt_resize, Opt_usrquota, Opt_grpquota
-};
-
-static const match_table_t tokens = {
-	{Opt_bsd_df, "bsddf"},
-	{Opt_minix_df, "minixdf"},
-	{Opt_grpid, "grpid"},
-	{Opt_grpid, "bsdgroups"},
-	{Opt_nogrpid, "nogrpid"},
-	{Opt_nogrpid, "sysvgroups"},
-	{Opt_resgid, "resgid=%u"},
-	{Opt_resuid, "resuid=%u"},
-	{Opt_sb, "sb=%u"},
-	{Opt_err_cont, "errors=continue"},
-	{Opt_err_panic, "errors=panic"},
-	{Opt_err_ro, "errors=remount-ro"},
-	{Opt_nouid32, "nouid32"},
-	{Opt_nocheck, "nocheck"},
-	{Opt_nocheck, "check=none"},
-	{Opt_debug, "debug"},
-	{Opt_oldalloc, "oldalloc"},
-	{Opt_orlov, "orlov"},
-	{Opt_user_xattr, "user_xattr"},
-	{Opt_nouser_xattr, "nouser_xattr"},
-	{Opt_acl, "acl"},
-	{Opt_noacl, "noacl"},
-	{Opt_reservation, "reservation"},
-	{Opt_noreservation, "noreservation"},
-	{Opt_noload, "noload"},
-	{Opt_noload, "norecovery"},
-	{Opt_nobh, "nobh"},
-	{Opt_bh, "bh"},
-	{Opt_commit, "commit=%u"},
-	{Opt_journal_update, "journal=update"},
-	{Opt_journal_inum, "journal=%u"},
-	{Opt_journal_dev, "journal_dev=%u"},
-	{Opt_journal_path, "journal_path=%s"},
-	{Opt_abort, "abort"},
-	{Opt_data_journal, "data=journal"},
-	{Opt_data_ordered, "data=ordered"},
-	{Opt_data_writeback, "data=writeback"},
-	{Opt_data_err_abort, "data_err=abort"},
-	{Opt_data_err_ignore, "data_err=ignore"},
-	{Opt_offusrjquota, "usrjquota="},
-	{Opt_usrjquota, "usrjquota=%s"},
-	{Opt_offgrpjquota, "grpjquota="},
-	{Opt_grpjquota, "grpjquota=%s"},
-	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
-	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
-	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
-	{Opt_grpquota, "grpquota"},
-	{Opt_noquota, "noquota"},
-	{Opt_quota, "quota"},
-	{Opt_usrquota, "usrquota"},
-	{Opt_barrier, "barrier=%u"},
-	{Opt_barrier, "barrier"},
-	{Opt_nobarrier, "nobarrier"},
-	{Opt_resize, "resize"},
-	{Opt_err, NULL},
-};
-
-static ext3_fsblk_t get_sb_block(void **data, struct super_block *sb)
-{
-	ext3_fsblk_t	sb_block;
-	char		*options = (char *) *data;
-
-	if (!options || strncmp(options, "sb=", 3) != 0)
-		return 1;	/* Default location */
-	options += 3;
-	/*todo: use simple_strtoll with >32bit ext3 */
-	sb_block = simple_strtoul(options, &options, 0);
-	if (*options && *options != ',') {
-		ext3_msg(sb, KERN_ERR, "error: invalid sb specification: %s",
-		       (char *) *data);
-		return 1;
-	}
-	if (*options == ',')
-		options++;
-	*data = (void *) options;
-	return sb_block;
-}
-
-#ifdef CONFIG_QUOTA
-static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	char *qname;
-
-	if (sb_any_quota_loaded(sb) &&
-		!sbi->s_qf_names[qtype]) {
-		ext3_msg(sb, KERN_ERR,
-			"Cannot change journaled "
-			"quota options when quota turned on");
-		return 0;
-	}
-	qname = match_strdup(args);
-	if (!qname) {
-		ext3_msg(sb, KERN_ERR,
-			"Not enough memory for storing quotafile name");
-		return 0;
-	}
-	if (sbi->s_qf_names[qtype]) {
-		int same = !strcmp(sbi->s_qf_names[qtype], qname);
-
-		kfree(qname);
-		if (!same) {
-			ext3_msg(sb, KERN_ERR,
-				 "%s quota file already specified",
-				 QTYPE2NAME(qtype));
-		}
-		return same;
-	}
-	if (strchr(qname, '/')) {
-		ext3_msg(sb, KERN_ERR,
-			"quotafile must be on filesystem root");
-		kfree(qname);
-		return 0;
-	}
-	sbi->s_qf_names[qtype] = qname;
-	set_opt(sbi->s_mount_opt, QUOTA);
-	return 1;
-}
-
-static int clear_qf_name(struct super_block *sb, int qtype) {
-
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-
-	if (sb_any_quota_loaded(sb) &&
-		sbi->s_qf_names[qtype]) {
-		ext3_msg(sb, KERN_ERR, "Cannot change journaled quota options"
-			" when quota turned on");
-		return 0;
-	}
-	if (sbi->s_qf_names[qtype]) {
-		kfree(sbi->s_qf_names[qtype]);
-		sbi->s_qf_names[qtype] = NULL;
-	}
-	return 1;
-}
-#endif
-
-static int parse_options (char *options, struct super_block *sb,
-			  unsigned int *inum, unsigned long *journal_devnum,
-			  ext3_fsblk_t *n_blocks_count, int is_remount)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	char * p;
-	substring_t args[MAX_OPT_ARGS];
-	int data_opt = 0;
-	int option;
-	kuid_t uid;
-	kgid_t gid;
-	char *journal_path;
-	struct inode *journal_inode;
-	struct path path;
-	int error;
-
-#ifdef CONFIG_QUOTA
-	int qfmt;
-#endif
-
-	if (!options)
-		return 1;
-
-	while ((p = strsep (&options, ",")) != NULL) {
-		int token;
-		if (!*p)
-			continue;
-		/*
-		 * Initialize args struct so we know whether arg was
-		 * found; some options take optional arguments.
-		 */
-		args[0].to = args[0].from = NULL;
-		token = match_token(p, tokens, args);
-		switch (token) {
-		case Opt_bsd_df:
-			clear_opt (sbi->s_mount_opt, MINIX_DF);
-			break;
-		case Opt_minix_df:
-			set_opt (sbi->s_mount_opt, MINIX_DF);
-			break;
-		case Opt_grpid:
-			set_opt (sbi->s_mount_opt, GRPID);
-			break;
-		case Opt_nogrpid:
-			clear_opt (sbi->s_mount_opt, GRPID);
-			break;
-		case Opt_resuid:
-			if (match_int(&args[0], &option))
-				return 0;
-			uid = make_kuid(current_user_ns(), option);
-			if (!uid_valid(uid)) {
-				ext3_msg(sb, KERN_ERR, "Invalid uid value %d", option);
-				return 0;
-
-			}
-			sbi->s_resuid = uid;
-			break;
-		case Opt_resgid:
-			if (match_int(&args[0], &option))
-				return 0;
-			gid = make_kgid(current_user_ns(), option);
-			if (!gid_valid(gid)) {
-				ext3_msg(sb, KERN_ERR, "Invalid gid value %d", option);
-				return 0;
-			}
-			sbi->s_resgid = gid;
-			break;
-		case Opt_sb:
-			/* handled by get_sb_block() instead of here */
-			/* *sb_block = match_int(&args[0]); */
-			break;
-		case Opt_err_panic:
-			clear_opt (sbi->s_mount_opt, ERRORS_CONT);
-			clear_opt (sbi->s_mount_opt, ERRORS_RO);
-			set_opt (sbi->s_mount_opt, ERRORS_PANIC);
-			break;
-		case Opt_err_ro:
-			clear_opt (sbi->s_mount_opt, ERRORS_CONT);
-			clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
-			set_opt (sbi->s_mount_opt, ERRORS_RO);
-			break;
-		case Opt_err_cont:
-			clear_opt (sbi->s_mount_opt, ERRORS_RO);
-			clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
-			set_opt (sbi->s_mount_opt, ERRORS_CONT);
-			break;
-		case Opt_nouid32:
-			set_opt (sbi->s_mount_opt, NO_UID32);
-			break;
-		case Opt_nocheck:
-			clear_opt (sbi->s_mount_opt, CHECK);
-			break;
-		case Opt_debug:
-			set_opt (sbi->s_mount_opt, DEBUG);
-			break;
-		case Opt_oldalloc:
-			ext3_msg(sb, KERN_WARNING,
-				"Ignoring deprecated oldalloc option");
-			break;
-		case Opt_orlov:
-			ext3_msg(sb, KERN_WARNING,
-				"Ignoring deprecated orlov option");
-			break;
-#ifdef CONFIG_EXT3_FS_XATTR
-		case Opt_user_xattr:
-			set_opt (sbi->s_mount_opt, XATTR_USER);
-			break;
-		case Opt_nouser_xattr:
-			clear_opt (sbi->s_mount_opt, XATTR_USER);
-			break;
-#else
-		case Opt_user_xattr:
-		case Opt_nouser_xattr:
-			ext3_msg(sb, KERN_INFO,
-				"(no)user_xattr options not supported");
-			break;
-#endif
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-		case Opt_acl:
-			set_opt(sbi->s_mount_opt, POSIX_ACL);
-			break;
-		case Opt_noacl:
-			clear_opt(sbi->s_mount_opt, POSIX_ACL);
-			break;
-#else
-		case Opt_acl:
-		case Opt_noacl:
-			ext3_msg(sb, KERN_INFO,
-				"(no)acl options not supported");
-			break;
-#endif
-		case Opt_reservation:
-			set_opt(sbi->s_mount_opt, RESERVATION);
-			break;
-		case Opt_noreservation:
-			clear_opt(sbi->s_mount_opt, RESERVATION);
-			break;
-		case Opt_journal_update:
-			/* @@@ FIXME */
-			/* Eventually we will want to be able to create
-			   a journal file here.  For now, only allow the
-			   user to specify an existing inode to be the
-			   journal file. */
-			if (is_remount) {
-				ext3_msg(sb, KERN_ERR, "error: cannot specify "
-					"journal on remount");
-				return 0;
-			}
-			set_opt (sbi->s_mount_opt, UPDATE_JOURNAL);
-			break;
-		case Opt_journal_inum:
-			if (is_remount) {
-				ext3_msg(sb, KERN_ERR, "error: cannot specify "
-				       "journal on remount");
-				return 0;
-			}
-			if (match_int(&args[0], &option))
-				return 0;
-			*inum = option;
-			break;
-		case Opt_journal_dev:
-			if (is_remount) {
-				ext3_msg(sb, KERN_ERR, "error: cannot specify "
-				       "journal on remount");
-				return 0;
-			}
-			if (match_int(&args[0], &option))
-				return 0;
-			*journal_devnum = option;
-			break;
-		case Opt_journal_path:
-			if (is_remount) {
-				ext3_msg(sb, KERN_ERR, "error: cannot specify "
-				       "journal on remount");
-				return 0;
-			}
-
-			journal_path = match_strdup(&args[0]);
-			if (!journal_path) {
-				ext3_msg(sb, KERN_ERR, "error: could not dup "
-					"journal device string");
-				return 0;
-			}
-
-			error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
-			if (error) {
-				ext3_msg(sb, KERN_ERR, "error: could not find "
-					"journal device path: error %d", error);
-				kfree(journal_path);
-				return 0;
-			}
-
-			journal_inode = d_inode(path.dentry);
-			if (!S_ISBLK(journal_inode->i_mode)) {
-				ext3_msg(sb, KERN_ERR, "error: journal path %s "
-					"is not a block device", journal_path);
-				path_put(&path);
-				kfree(journal_path);
-				return 0;
-			}
-
-			*journal_devnum = new_encode_dev(journal_inode->i_rdev);
-			path_put(&path);
-			kfree(journal_path);
-			break;
-		case Opt_noload:
-			set_opt (sbi->s_mount_opt, NOLOAD);
-			break;
-		case Opt_commit:
-			if (match_int(&args[0], &option))
-				return 0;
-			if (option < 0)
-				return 0;
-			if (option == 0)
-				option = JBD_DEFAULT_MAX_COMMIT_AGE;
-			sbi->s_commit_interval = HZ * option;
-			break;
-		case Opt_data_journal:
-			data_opt = EXT3_MOUNT_JOURNAL_DATA;
-			goto datacheck;
-		case Opt_data_ordered:
-			data_opt = EXT3_MOUNT_ORDERED_DATA;
-			goto datacheck;
-		case Opt_data_writeback:
-			data_opt = EXT3_MOUNT_WRITEBACK_DATA;
-		datacheck:
-			if (is_remount) {
-				if (test_opt(sb, DATA_FLAGS) == data_opt)
-					break;
-				ext3_msg(sb, KERN_ERR,
-					"error: cannot change "
-					"data mode on remount. The filesystem "
-					"is mounted in data=%s mode and you "
-					"try to remount it in data=%s mode.",
-					data_mode_string(test_opt(sb,
-							DATA_FLAGS)),
-					data_mode_string(data_opt));
-				return 0;
-			} else {
-				clear_opt(sbi->s_mount_opt, DATA_FLAGS);
-				sbi->s_mount_opt |= data_opt;
-			}
-			break;
-		case Opt_data_err_abort:
-			set_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
-			break;
-		case Opt_data_err_ignore:
-			clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
-			break;
-#ifdef CONFIG_QUOTA
-		case Opt_usrjquota:
-			if (!set_qf_name(sb, USRQUOTA, &args[0]))
-				return 0;
-			break;
-		case Opt_grpjquota:
-			if (!set_qf_name(sb, GRPQUOTA, &args[0]))
-				return 0;
-			break;
-		case Opt_offusrjquota:
-			if (!clear_qf_name(sb, USRQUOTA))
-				return 0;
-			break;
-		case Opt_offgrpjquota:
-			if (!clear_qf_name(sb, GRPQUOTA))
-				return 0;
-			break;
-		case Opt_jqfmt_vfsold:
-			qfmt = QFMT_VFS_OLD;
-			goto set_qf_format;
-		case Opt_jqfmt_vfsv0:
-			qfmt = QFMT_VFS_V0;
-			goto set_qf_format;
-		case Opt_jqfmt_vfsv1:
-			qfmt = QFMT_VFS_V1;
-set_qf_format:
-			if (sb_any_quota_loaded(sb) &&
-			    sbi->s_jquota_fmt != qfmt) {
-				ext3_msg(sb, KERN_ERR, "error: cannot change "
-					"journaled quota options when "
-					"quota turned on.");
-				return 0;
-			}
-			sbi->s_jquota_fmt = qfmt;
-			break;
-		case Opt_quota:
-		case Opt_usrquota:
-			set_opt(sbi->s_mount_opt, QUOTA);
-			set_opt(sbi->s_mount_opt, USRQUOTA);
-			break;
-		case Opt_grpquota:
-			set_opt(sbi->s_mount_opt, QUOTA);
-			set_opt(sbi->s_mount_opt, GRPQUOTA);
-			break;
-		case Opt_noquota:
-			if (sb_any_quota_loaded(sb)) {
-				ext3_msg(sb, KERN_ERR, "error: cannot change "
-					"quota options when quota turned on.");
-				return 0;
-			}
-			clear_opt(sbi->s_mount_opt, QUOTA);
-			clear_opt(sbi->s_mount_opt, USRQUOTA);
-			clear_opt(sbi->s_mount_opt, GRPQUOTA);
-			break;
-#else
-		case Opt_quota:
-		case Opt_usrquota:
-		case Opt_grpquota:
-			ext3_msg(sb, KERN_ERR,
-				"error: quota options not supported.");
-			break;
-		case Opt_usrjquota:
-		case Opt_grpjquota:
-		case Opt_offusrjquota:
-		case Opt_offgrpjquota:
-		case Opt_jqfmt_vfsold:
-		case Opt_jqfmt_vfsv0:
-		case Opt_jqfmt_vfsv1:
-			ext3_msg(sb, KERN_ERR,
-				"error: journaled quota options not "
-				"supported.");
-			break;
-		case Opt_noquota:
-			break;
-#endif
-		case Opt_abort:
-			set_opt(sbi->s_mount_opt, ABORT);
-			break;
-		case Opt_nobarrier:
-			clear_opt(sbi->s_mount_opt, BARRIER);
-			break;
-		case Opt_barrier:
-			if (args[0].from) {
-				if (match_int(&args[0], &option))
-					return 0;
-			} else
-				option = 1;	/* No argument, default to 1 */
-			if (option)
-				set_opt(sbi->s_mount_opt, BARRIER);
-			else
-				clear_opt(sbi->s_mount_opt, BARRIER);
-			break;
-		case Opt_ignore:
-			break;
-		case Opt_resize:
-			if (!is_remount) {
-				ext3_msg(sb, KERN_ERR,
-					"error: resize option only available "
-					"for remount");
-				return 0;
-			}
-			if (match_int(&args[0], &option) != 0)
-				return 0;
-			*n_blocks_count = option;
-			break;
-		case Opt_nobh:
-			ext3_msg(sb, KERN_WARNING,
-				"warning: ignoring deprecated nobh option");
-			break;
-		case Opt_bh:
-			ext3_msg(sb, KERN_WARNING,
-				"warning: ignoring deprecated bh option");
-			break;
-		default:
-			ext3_msg(sb, KERN_ERR,
-				"error: unrecognized mount option \"%s\" "
-				"or missing value", p);
-			return 0;
-		}
-	}
-#ifdef CONFIG_QUOTA
-	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
-		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
-			clear_opt(sbi->s_mount_opt, USRQUOTA);
-		if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
-			clear_opt(sbi->s_mount_opt, GRPQUOTA);
-
-		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
-			ext3_msg(sb, KERN_ERR, "error: old and new quota "
-					"format mixing.");
-			return 0;
-		}
-
-		if (!sbi->s_jquota_fmt) {
-			ext3_msg(sb, KERN_ERR, "error: journaled quota format "
-					"not specified.");
-			return 0;
-		}
-	}
-#endif
-	return 1;
-}
-
-static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
-			    int read_only)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	int res = 0;
-
-	if (le32_to_cpu(es->s_rev_level) > EXT3_MAX_SUPP_REV) {
-		ext3_msg(sb, KERN_ERR,
-			"error: revision level too high, "
-			"forcing read-only mode");
-		res = MS_RDONLY;
-	}
-	if (read_only)
-		return res;
-	if (!(sbi->s_mount_state & EXT3_VALID_FS))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: mounting unchecked fs, "
-			"running e2fsck is recommended");
-	else if ((sbi->s_mount_state & EXT3_ERROR_FS))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: mounting fs with errors, "
-			"running e2fsck is recommended");
-	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
-		 le16_to_cpu(es->s_mnt_count) >=
-			le16_to_cpu(es->s_max_mnt_count))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: maximal mount count reached, "
-			"running e2fsck is recommended");
-	else if (le32_to_cpu(es->s_checkinterval) &&
-		(le32_to_cpu(es->s_lastcheck) +
-			le32_to_cpu(es->s_checkinterval) <= get_seconds()))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: checktime reached, "
-			"running e2fsck is recommended");
-#if 0
-		/* @@@ We _will_ want to clear the valid bit if we find
-                   inconsistencies, to force a fsck at reboot.  But for
-                   a plain journaled filesystem we can keep it set as
-                   valid forever! :) */
-	es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
-#endif
-	if (!le16_to_cpu(es->s_max_mnt_count))
-		es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
-	le16_add_cpu(&es->s_mnt_count, 1);
-	es->s_mtime = cpu_to_le32(get_seconds());
-	ext3_update_dynamic_rev(sb);
-	EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-
-	ext3_commit_super(sb, es, 1);
-	if (test_opt(sb, DEBUG))
-		ext3_msg(sb, KERN_INFO, "[bs=%lu, gc=%lu, "
-				"bpg=%lu, ipg=%lu, mo=%04lx]",
-			sb->s_blocksize,
-			sbi->s_groups_count,
-			EXT3_BLOCKS_PER_GROUP(sb),
-			EXT3_INODES_PER_GROUP(sb),
-			sbi->s_mount_opt);
-
-	if (EXT3_SB(sb)->s_journal->j_inode == NULL) {
-		char b[BDEVNAME_SIZE];
-		ext3_msg(sb, KERN_INFO, "using external journal on %s",
-			bdevname(EXT3_SB(sb)->s_journal->j_dev, b));
-	} else {
-		ext3_msg(sb, KERN_INFO, "using internal journal");
-	}
-	cleancache_init_fs(sb);
-	return res;
-}
-
-/* Called at mount-time, super-block is locked */
-static int ext3_check_descriptors(struct super_block *sb)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	int i;
-
-	ext3_debug ("Checking group descriptors");
-
-	for (i = 0; i < sbi->s_groups_count; i++) {
-		struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL);
-		ext3_fsblk_t first_block = ext3_group_first_block_no(sb, i);
-		ext3_fsblk_t last_block;
-
-		if (i == sbi->s_groups_count - 1)
-			last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
-		else
-			last_block = first_block +
-				(EXT3_BLOCKS_PER_GROUP(sb) - 1);
-
-		if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
-		    le32_to_cpu(gdp->bg_block_bitmap) > last_block)
-		{
-			ext3_error (sb, "ext3_check_descriptors",
-				    "Block bitmap for group %d"
-				    " not in group (block %lu)!",
-				    i, (unsigned long)
-					le32_to_cpu(gdp->bg_block_bitmap));
-			return 0;
-		}
-		if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block ||
-		    le32_to_cpu(gdp->bg_inode_bitmap) > last_block)
-		{
-			ext3_error (sb, "ext3_check_descriptors",
-				    "Inode bitmap for group %d"
-				    " not in group (block %lu)!",
-				    i, (unsigned long)
-					le32_to_cpu(gdp->bg_inode_bitmap));
-			return 0;
-		}
-		if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
-		    le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 >
-		    last_block)
-		{
-			ext3_error (sb, "ext3_check_descriptors",
-				    "Inode table for group %d"
-				    " not in group (block %lu)!",
-				    i, (unsigned long)
-					le32_to_cpu(gdp->bg_inode_table));
-			return 0;
-		}
-	}
-
-	sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb));
-	sbi->s_es->s_free_inodes_count=cpu_to_le32(ext3_count_free_inodes(sb));
-	return 1;
-}
-
-
-/* ext3_orphan_cleanup() walks a singly-linked list of inodes (starting at
- * the superblock) which were deleted from all directories, but held open by
- * a process at the time of a crash.  We walk the list and try to delete these
- * inodes at recovery time (only with a read-write filesystem).
- *
- * In order to keep the orphan inode chain consistent during traversal (in
- * case of crash during recovery), we link each inode into the superblock
- * orphan list_head and handle it the same way as an inode deletion during
- * normal operation (which journals the operations for us).
- *
- * We only do an iget() and an iput() on each inode, which is very safe if we
- * accidentally point at an in-use or already deleted inode.  The worst that
- * can happen in this case is that we get a "bit already cleared" message from
- * ext3_free_inode().  The only reason we would point at a wrong inode is if
- * e2fsck was run on this filesystem, and it must have already done the orphan
- * inode cleanup for us, so we can safely abort without any further action.
- */
-static void ext3_orphan_cleanup (struct super_block * sb,
-				 struct ext3_super_block * es)
-{
-	unsigned int s_flags = sb->s_flags;
-	int nr_orphans = 0, nr_truncates = 0;
-#ifdef CONFIG_QUOTA
-	int i;
-#endif
-	if (!es->s_last_orphan) {
-		jbd_debug(4, "no orphan inodes to clean up\n");
-		return;
-	}
-
-	if (bdev_read_only(sb->s_bdev)) {
-		ext3_msg(sb, KERN_ERR, "error: write access "
-			"unavailable, skipping orphan cleanup.");
-		return;
-	}
-
-	/* Check if feature set allows readwrite operations */
-	if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) {
-		ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
-			 "unknown ROCOMPAT features");
-		return;
-	}
-
-	if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
-		/* don't clear list on RO mount w/ errors */
-		if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
-			jbd_debug(1, "Errors on filesystem, "
-				  "clearing orphan list.\n");
-			es->s_last_orphan = 0;
-		}
-		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
-		return;
-	}
-
-	if (s_flags & MS_RDONLY) {
-		ext3_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
-		sb->s_flags &= ~MS_RDONLY;
-	}
-#ifdef CONFIG_QUOTA
-	/* Needed for iput() to work correctly and not trash data */
-	sb->s_flags |= MS_ACTIVE;
-	/* Turn on quotas so that they are updated correctly */
-	for (i = 0; i < EXT3_MAXQUOTAS; i++) {
-		if (EXT3_SB(sb)->s_qf_names[i]) {
-			int ret = ext3_quota_on_mount(sb, i);
-			if (ret < 0)
-				ext3_msg(sb, KERN_ERR,
-					"error: cannot turn on journaled "
-					"quota: %d", ret);
-		}
-	}
-#endif
-
-	while (es->s_last_orphan) {
-		struct inode *inode;
-
-		inode = ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
-		if (IS_ERR(inode)) {
-			es->s_last_orphan = 0;
-			break;
-		}
-
-		list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan);
-		dquot_initialize(inode);
-		if (inode->i_nlink) {
-			printk(KERN_DEBUG
-				"%s: truncating inode %lu to %Ld bytes\n",
-				__func__, inode->i_ino, inode->i_size);
-			jbd_debug(2, "truncating inode %lu to %Ld bytes\n",
-				  inode->i_ino, inode->i_size);
-			ext3_truncate(inode);
-			nr_truncates++;
-		} else {
-			printk(KERN_DEBUG
-				"%s: deleting unreferenced inode %lu\n",
-				__func__, inode->i_ino);
-			jbd_debug(2, "deleting unreferenced inode %lu\n",
-				  inode->i_ino);
-			nr_orphans++;
-		}
-		iput(inode);  /* The delete magic happens here! */
-	}
-
-#define PLURAL(x) (x), ((x)==1) ? "" : "s"
-
-	if (nr_orphans)
-		ext3_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
-		       PLURAL(nr_orphans));
-	if (nr_truncates)
-		ext3_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
-		       PLURAL(nr_truncates));
-#ifdef CONFIG_QUOTA
-	/* Turn quotas off */
-	for (i = 0; i < EXT3_MAXQUOTAS; i++) {
-		if (sb_dqopt(sb)->files[i])
-			dquot_quota_off(sb, i);
-	}
-#endif
-	sb->s_flags = s_flags; /* Restore MS_RDONLY status */
-}
-
-/*
- * Maximal file size.  There is a direct, and {,double-,triple-}indirect
- * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks.
- * We need to be 1 filesystem block less than the 2^32 sector limit.
- */
-static loff_t ext3_max_size(int bits)
-{
-	loff_t res = EXT3_NDIR_BLOCKS;
-	int meta_blocks;
-	loff_t upper_limit;
-
-	/* This is calculated to be the largest file size for a
-	 * dense, file such that the total number of
-	 * sectors in the file, including data and all indirect blocks,
-	 * does not exceed 2^32 -1
-	 * __u32 i_blocks representing the total number of
-	 * 512 bytes blocks of the file
-	 */
-	upper_limit = (1LL << 32) - 1;
-
-	/* total blocks in file system block size */
-	upper_limit >>= (bits - 9);
-
-
-	/* indirect blocks */
-	meta_blocks = 1;
-	/* double indirect blocks */
-	meta_blocks += 1 + (1LL << (bits-2));
-	/* tripple indirect blocks */
-	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
-
-	upper_limit -= meta_blocks;
-	upper_limit <<= bits;
-
-	res += 1LL << (bits-2);
-	res += 1LL << (2*(bits-2));
-	res += 1LL << (3*(bits-2));
-	res <<= bits;
-	if (res > upper_limit)
-		res = upper_limit;
-
-	if (res > MAX_LFS_FILESIZE)
-		res = MAX_LFS_FILESIZE;
-
-	return res;
-}
-
-static ext3_fsblk_t descriptor_loc(struct super_block *sb,
-				    ext3_fsblk_t logic_sb_block,
-				    int nr)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	unsigned long bg, first_meta_bg;
-	int has_super = 0;
-
-	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
-
-	if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_META_BG) ||
-	    nr < first_meta_bg)
-		return (logic_sb_block + nr + 1);
-	bg = sbi->s_desc_per_block * nr;
-	if (ext3_bg_has_super(sb, bg))
-		has_super = 1;
-	return (has_super + ext3_group_first_block_no(sb, bg));
-}
-
-
-static int ext3_fill_super (struct super_block *sb, void *data, int silent)
-{
-	struct buffer_head * bh;
-	struct ext3_super_block *es = NULL;
-	struct ext3_sb_info *sbi;
-	ext3_fsblk_t block;
-	ext3_fsblk_t sb_block = get_sb_block(&data, sb);
-	ext3_fsblk_t logic_sb_block;
-	unsigned long offset = 0;
-	unsigned int journal_inum = 0;
-	unsigned long journal_devnum = 0;
-	unsigned long def_mount_opts;
-	struct inode *root;
-	int blocksize;
-	int hblock;
-	int db_count;
-	int i;
-	int needs_recovery;
-	int ret = -EINVAL;
-	__le32 features;
-	int err;
-
-	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
-	if (!sbi)
-		return -ENOMEM;
-
-	sbi->s_blockgroup_lock =
-		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
-	if (!sbi->s_blockgroup_lock) {
-		kfree(sbi);
-		return -ENOMEM;
-	}
-	sb->s_fs_info = sbi;
-	sbi->s_sb_block = sb_block;
-
-	blocksize = sb_min_blocksize(sb, EXT3_MIN_BLOCK_SIZE);
-	if (!blocksize) {
-		ext3_msg(sb, KERN_ERR, "error: unable to set blocksize");
-		goto out_fail;
-	}
-
-	/*
-	 * The ext3 superblock will not be buffer aligned for other than 1kB
-	 * block sizes.  We need to calculate the offset from buffer start.
-	 */
-	if (blocksize != EXT3_MIN_BLOCK_SIZE) {
-		logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
-		offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
-	} else {
-		logic_sb_block = sb_block;
-	}
-
-	if (!(bh = sb_bread(sb, logic_sb_block))) {
-		ext3_msg(sb, KERN_ERR, "error: unable to read superblock");
-		goto out_fail;
-	}
-	/*
-	 * Note: s_es must be initialized as soon as possible because
-	 *       some ext3 macro-instructions depend on its value
-	 */
-	es = (struct ext3_super_block *) (bh->b_data + offset);
-	sbi->s_es = es;
-	sb->s_magic = le16_to_cpu(es->s_magic);
-	if (sb->s_magic != EXT3_SUPER_MAGIC)
-		goto cantfind_ext3;
-
-	/* Set defaults before we parse the mount options */
-	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
-	if (def_mount_opts & EXT3_DEFM_DEBUG)
-		set_opt(sbi->s_mount_opt, DEBUG);
-	if (def_mount_opts & EXT3_DEFM_BSDGROUPS)
-		set_opt(sbi->s_mount_opt, GRPID);
-	if (def_mount_opts & EXT3_DEFM_UID16)
-		set_opt(sbi->s_mount_opt, NO_UID32);
-#ifdef CONFIG_EXT3_FS_XATTR
-	if (def_mount_opts & EXT3_DEFM_XATTR_USER)
-		set_opt(sbi->s_mount_opt, XATTR_USER);
-#endif
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-	if (def_mount_opts & EXT3_DEFM_ACL)
-		set_opt(sbi->s_mount_opt, POSIX_ACL);
-#endif
-	if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA)
-		set_opt(sbi->s_mount_opt, JOURNAL_DATA);
-	else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED)
-		set_opt(sbi->s_mount_opt, ORDERED_DATA);
-	else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK)
-		set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
-
-	if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC)
-		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
-	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_CONTINUE)
-		set_opt(sbi->s_mount_opt, ERRORS_CONT);
-	else
-		set_opt(sbi->s_mount_opt, ERRORS_RO);
-
-	sbi->s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
-	sbi->s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
-
-	/* enable barriers by default */
-	set_opt(sbi->s_mount_opt, BARRIER);
-	set_opt(sbi->s_mount_opt, RESERVATION);
-
-	if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum,
-			    NULL, 0))
-		goto failed_mount;
-
-	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
-
-	if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV &&
-	    (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) ||
-	     EXT3_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
-	     EXT3_HAS_INCOMPAT_FEATURE(sb, ~0U)))
-		ext3_msg(sb, KERN_WARNING,
-			"warning: feature flags set on rev 0 fs, "
-			"running e2fsck is recommended");
-	/*
-	 * Check feature flags regardless of the revision level, since we
-	 * previously didn't change the revision level when setting the flags,
-	 * so there is a chance incompat flags are set on a rev 0 filesystem.
-	 */
-	features = EXT3_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP);
-	if (features) {
-		ext3_msg(sb, KERN_ERR,
-			"error: couldn't mount because of unsupported "
-			"optional features (%x)", le32_to_cpu(features));
-		goto failed_mount;
-	}
-	features = EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP);
-	if (!(sb->s_flags & MS_RDONLY) && features) {
-		ext3_msg(sb, KERN_ERR,
-			"error: couldn't mount RDWR because of unsupported "
-			"optional features (%x)", le32_to_cpu(features));
-		goto failed_mount;
-	}
-	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
-
-	if (blocksize < EXT3_MIN_BLOCK_SIZE ||
-	    blocksize > EXT3_MAX_BLOCK_SIZE) {
-		ext3_msg(sb, KERN_ERR,
-			"error: couldn't mount because of unsupported "
-			"filesystem blocksize %d", blocksize);
-		goto failed_mount;
-	}
-
-	hblock = bdev_logical_block_size(sb->s_bdev);
-	if (sb->s_blocksize != blocksize) {
-		/*
-		 * Make sure the blocksize for the filesystem is larger
-		 * than the hardware sectorsize for the machine.
-		 */
-		if (blocksize < hblock) {
-			ext3_msg(sb, KERN_ERR,
-				"error: fsblocksize %d too small for "
-				"hardware sectorsize %d", blocksize, hblock);
-			goto failed_mount;
-		}
-
-		brelse (bh);
-		if (!sb_set_blocksize(sb, blocksize)) {
-			ext3_msg(sb, KERN_ERR,
-				"error: bad blocksize %d", blocksize);
-			goto out_fail;
-		}
-		logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
-		offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
-		bh = sb_bread(sb, logic_sb_block);
-		if (!bh) {
-			ext3_msg(sb, KERN_ERR,
-			       "error: can't read superblock on 2nd try");
-			goto failed_mount;
-		}
-		es = (struct ext3_super_block *)(bh->b_data + offset);
-		sbi->s_es = es;
-		if (es->s_magic != cpu_to_le16(EXT3_SUPER_MAGIC)) {
-			ext3_msg(sb, KERN_ERR,
-				"error: magic mismatch");
-			goto failed_mount;
-		}
-	}
-
-	sb->s_maxbytes = ext3_max_size(sb->s_blocksize_bits);
-
-	if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV) {
-		sbi->s_inode_size = EXT3_GOOD_OLD_INODE_SIZE;
-		sbi->s_first_ino = EXT3_GOOD_OLD_FIRST_INO;
-	} else {
-		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
-		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
-		if ((sbi->s_inode_size < EXT3_GOOD_OLD_INODE_SIZE) ||
-		    (!is_power_of_2(sbi->s_inode_size)) ||
-		    (sbi->s_inode_size > blocksize)) {
-			ext3_msg(sb, KERN_ERR,
-				"error: unsupported inode size: %d",
-				sbi->s_inode_size);
-			goto failed_mount;
-		}
-	}
-	sbi->s_frag_size = EXT3_MIN_FRAG_SIZE <<
-				   le32_to_cpu(es->s_log_frag_size);
-	if (blocksize != sbi->s_frag_size) {
-		ext3_msg(sb, KERN_ERR,
-		       "error: fragsize %lu != blocksize %u (unsupported)",
-		       sbi->s_frag_size, blocksize);
-		goto failed_mount;
-	}
-	sbi->s_frags_per_block = 1;
-	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
-	sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
-	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
-	if (EXT3_INODE_SIZE(sb) == 0 || EXT3_INODES_PER_GROUP(sb) == 0)
-		goto cantfind_ext3;
-	sbi->s_inodes_per_block = blocksize / EXT3_INODE_SIZE(sb);
-	if (sbi->s_inodes_per_block == 0)
-		goto cantfind_ext3;
-	sbi->s_itb_per_group = sbi->s_inodes_per_group /
-					sbi->s_inodes_per_block;
-	sbi->s_desc_per_block = blocksize / sizeof(struct ext3_group_desc);
-	sbi->s_sbh = bh;
-	sbi->s_mount_state = le16_to_cpu(es->s_state);
-	sbi->s_addr_per_block_bits = ilog2(EXT3_ADDR_PER_BLOCK(sb));
-	sbi->s_desc_per_block_bits = ilog2(EXT3_DESC_PER_BLOCK(sb));
-	for (i = 0; i < 4; i++)
-		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
-	sbi->s_def_hash_version = es->s_def_hash_version;
-	i = le32_to_cpu(es->s_flags);
-	if (i & EXT2_FLAGS_UNSIGNED_HASH)
-		sbi->s_hash_unsigned = 3;
-	else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
-#ifdef __CHAR_UNSIGNED__
-		es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
-		sbi->s_hash_unsigned = 3;
-#else
-		es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
-#endif
-	}
-
-	if (sbi->s_blocks_per_group > blocksize * 8) {
-		ext3_msg(sb, KERN_ERR,
-			"#blocks per group too big: %lu",
-			sbi->s_blocks_per_group);
-		goto failed_mount;
-	}
-	if (sbi->s_frags_per_group > blocksize * 8) {
-		ext3_msg(sb, KERN_ERR,
-			"error: #fragments per group too big: %lu",
-			sbi->s_frags_per_group);
-		goto failed_mount;
-	}
-	if (sbi->s_inodes_per_group > blocksize * 8) {
-		ext3_msg(sb, KERN_ERR,
-			"error: #inodes per group too big: %lu",
-			sbi->s_inodes_per_group);
-		goto failed_mount;
-	}
-
-	err = generic_check_addressable(sb->s_blocksize_bits,
-					le32_to_cpu(es->s_blocks_count));
-	if (err) {
-		ext3_msg(sb, KERN_ERR,
-			"error: filesystem is too large to mount safely");
-		if (sizeof(sector_t) < 8)
-			ext3_msg(sb, KERN_ERR,
-				"error: CONFIG_LBDAF not enabled");
-		ret = err;
-		goto failed_mount;
-	}
-
-	if (EXT3_BLOCKS_PER_GROUP(sb) == 0)
-		goto cantfind_ext3;
-	sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
-			       le32_to_cpu(es->s_first_data_block) - 1)
-				       / EXT3_BLOCKS_PER_GROUP(sb)) + 1;
-	db_count = DIV_ROUND_UP(sbi->s_groups_count, EXT3_DESC_PER_BLOCK(sb));
-	sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *),
-				    GFP_KERNEL);
-	if (sbi->s_group_desc == NULL) {
-		ext3_msg(sb, KERN_ERR,
-			"error: not enough memory");
-		ret = -ENOMEM;
-		goto failed_mount;
-	}
-
-	bgl_lock_init(sbi->s_blockgroup_lock);
-
-	for (i = 0; i < db_count; i++) {
-		block = descriptor_loc(sb, logic_sb_block, i);
-		sbi->s_group_desc[i] = sb_bread(sb, block);
-		if (!sbi->s_group_desc[i]) {
-			ext3_msg(sb, KERN_ERR,
-				"error: can't read group descriptor %d", i);
-			db_count = i;
-			goto failed_mount2;
-		}
-	}
-	if (!ext3_check_descriptors (sb)) {
-		ext3_msg(sb, KERN_ERR,
-			"error: group descriptors corrupted");
-		goto failed_mount2;
-	}
-	sbi->s_gdb_count = db_count;
-	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
-	spin_lock_init(&sbi->s_next_gen_lock);
-
-	/* per fileystem reservation list head & lock */
-	spin_lock_init(&sbi->s_rsv_window_lock);
-	sbi->s_rsv_window_root = RB_ROOT;
-	/* Add a single, static dummy reservation to the start of the
-	 * reservation window list --- it gives us a placeholder for
-	 * append-at-start-of-list which makes the allocation logic
-	 * _much_ simpler. */
-	sbi->s_rsv_window_head.rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-	sbi->s_rsv_window_head.rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
-	sbi->s_rsv_window_head.rsv_alloc_hit = 0;
-	sbi->s_rsv_window_head.rsv_goal_size = 0;
-	ext3_rsv_window_add(sb, &sbi->s_rsv_window_head);
-
-	/*
-	 * set up enough so that it can read an inode
-	 */
-	sb->s_op = &ext3_sops;
-	sb->s_export_op = &ext3_export_ops;
-	sb->s_xattr = ext3_xattr_handlers;
-#ifdef CONFIG_QUOTA
-	sb->s_qcop = &ext3_qctl_operations;
-	sb->dq_op = &ext3_quota_operations;
-	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
-#endif
-	memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
-	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
-	mutex_init(&sbi->s_orphan_lock);
-	mutex_init(&sbi->s_resize_lock);
-
-	sb->s_root = NULL;
-
-	needs_recovery = (es->s_last_orphan != 0 ||
-			  EXT3_HAS_INCOMPAT_FEATURE(sb,
-				    EXT3_FEATURE_INCOMPAT_RECOVER));
-
-	/*
-	 * The first inode we look at is the journal inode.  Don't try
-	 * root first: it may be modified in the journal!
-	 */
-	if (!test_opt(sb, NOLOAD) &&
-	    EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL)) {
-		if (ext3_load_journal(sb, es, journal_devnum))
-			goto failed_mount2;
-	} else if (journal_inum) {
-		if (ext3_create_journal(sb, es, journal_inum))
-			goto failed_mount2;
-	} else {
-		if (!silent)
-			ext3_msg(sb, KERN_ERR,
-				"error: no journal found. "
-				"mounting ext3 over ext2?");
-		goto failed_mount2;
-	}
-	err = percpu_counter_init(&sbi->s_freeblocks_counter,
-			ext3_count_free_blocks(sb), GFP_KERNEL);
-	if (!err) {
-		err = percpu_counter_init(&sbi->s_freeinodes_counter,
-				ext3_count_free_inodes(sb), GFP_KERNEL);
-	}
-	if (!err) {
-		err = percpu_counter_init(&sbi->s_dirs_counter,
-				ext3_count_dirs(sb), GFP_KERNEL);
-	}
-	if (err) {
-		ext3_msg(sb, KERN_ERR, "error: insufficient memory");
-		ret = err;
-		goto failed_mount3;
-	}
-
-	/* We have now updated the journal if required, so we can
-	 * validate the data journaling mode. */
-	switch (test_opt(sb, DATA_FLAGS)) {
-	case 0:
-		/* No mode set, assume a default based on the journal
-                   capabilities: ORDERED_DATA if the journal can
-                   cope, else JOURNAL_DATA */
-		if (journal_check_available_features
-		    (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE))
-			set_opt(sbi->s_mount_opt, DEFAULT_DATA_MODE);
-		else
-			set_opt(sbi->s_mount_opt, JOURNAL_DATA);
-		break;
-
-	case EXT3_MOUNT_ORDERED_DATA:
-	case EXT3_MOUNT_WRITEBACK_DATA:
-		if (!journal_check_available_features
-		    (sbi->s_journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)) {
-			ext3_msg(sb, KERN_ERR,
-				"error: journal does not support "
-				"requested data journaling mode");
-			goto failed_mount3;
-		}
-	default:
-		break;
-	}
-
-	/*
-	 * The journal_load will have done any necessary log recovery,
-	 * so we can safely mount the rest of the filesystem now.
-	 */
-
-	root = ext3_iget(sb, EXT3_ROOT_INO);
-	if (IS_ERR(root)) {
-		ext3_msg(sb, KERN_ERR, "error: get root inode failed");
-		ret = PTR_ERR(root);
-		goto failed_mount3;
-	}
-	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
-		iput(root);
-		ext3_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
-		goto failed_mount3;
-	}
-	sb->s_root = d_make_root(root);
-	if (!sb->s_root) {
-		ext3_msg(sb, KERN_ERR, "error: get root dentry failed");
-		ret = -ENOMEM;
-		goto failed_mount3;
-	}
-
-	if (ext3_setup_super(sb, es, sb->s_flags & MS_RDONLY))
-		sb->s_flags |= MS_RDONLY;
-
-	EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS;
-	ext3_orphan_cleanup(sb, es);
-	EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS;
-	if (needs_recovery) {
-		ext3_mark_recovery_complete(sb, es);
-		ext3_msg(sb, KERN_INFO, "recovery complete");
-	}
-	ext3_msg(sb, KERN_INFO, "mounted filesystem with %s data mode",
-		test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ? "journal":
-		test_opt(sb,DATA_FLAGS) == EXT3_MOUNT_ORDERED_DATA ? "ordered":
-		"writeback");
-
-	return 0;
-
-cantfind_ext3:
-	if (!silent)
-		ext3_msg(sb, KERN_INFO,
-			"error: can't find ext3 filesystem on dev %s.",
-		       sb->s_id);
-	goto failed_mount;
-
-failed_mount3:
-	percpu_counter_destroy(&sbi->s_freeblocks_counter);
-	percpu_counter_destroy(&sbi->s_freeinodes_counter);
-	percpu_counter_destroy(&sbi->s_dirs_counter);
-	journal_destroy(sbi->s_journal);
-failed_mount2:
-	for (i = 0; i < db_count; i++)
-		brelse(sbi->s_group_desc[i]);
-	kfree(sbi->s_group_desc);
-failed_mount:
-#ifdef CONFIG_QUOTA
-	for (i = 0; i < EXT3_MAXQUOTAS; i++)
-		kfree(sbi->s_qf_names[i]);
-#endif
-	ext3_blkdev_remove(sbi);
-	brelse(bh);
-out_fail:
-	sb->s_fs_info = NULL;
-	kfree(sbi->s_blockgroup_lock);
-	kfree(sbi);
-	return ret;
-}
-
-/*
- * Setup any per-fs journal parameters now.  We'll do this both on
- * initial mount, once the journal has been initialised but before we've
- * done any recovery; and again on any subsequent remount.
- */
-static void ext3_init_journal_params(struct super_block *sb, journal_t *journal)
-{
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-
-	if (sbi->s_commit_interval)
-		journal->j_commit_interval = sbi->s_commit_interval;
-	/* We could also set up an ext3-specific default for the commit
-	 * interval here, but for now we'll just fall back to the jbd
-	 * default. */
-
-	spin_lock(&journal->j_state_lock);
-	if (test_opt(sb, BARRIER))
-		journal->j_flags |= JFS_BARRIER;
-	else
-		journal->j_flags &= ~JFS_BARRIER;
-	if (test_opt(sb, DATA_ERR_ABORT))
-		journal->j_flags |= JFS_ABORT_ON_SYNCDATA_ERR;
-	else
-		journal->j_flags &= ~JFS_ABORT_ON_SYNCDATA_ERR;
-	spin_unlock(&journal->j_state_lock);
-}
-
-static journal_t *ext3_get_journal(struct super_block *sb,
-				   unsigned int journal_inum)
-{
-	struct inode *journal_inode;
-	journal_t *journal;
-
-	/* First, test for the existence of a valid inode on disk.  Bad
-	 * things happen if we iget() an unused inode, as the subsequent
-	 * iput() will try to delete it. */
-
-	journal_inode = ext3_iget(sb, journal_inum);
-	if (IS_ERR(journal_inode)) {
-		ext3_msg(sb, KERN_ERR, "error: no journal found");
-		return NULL;
-	}
-	if (!journal_inode->i_nlink) {
-		make_bad_inode(journal_inode);
-		iput(journal_inode);
-		ext3_msg(sb, KERN_ERR, "error: journal inode is deleted");
-		return NULL;
-	}
-
-	jbd_debug(2, "Journal inode found at %p: %Ld bytes\n",
-		  journal_inode, journal_inode->i_size);
-	if (!S_ISREG(journal_inode->i_mode)) {
-		ext3_msg(sb, KERN_ERR, "error: invalid journal inode");
-		iput(journal_inode);
-		return NULL;
-	}
-
-	journal = journal_init_inode(journal_inode);
-	if (!journal) {
-		ext3_msg(sb, KERN_ERR, "error: could not load journal inode");
-		iput(journal_inode);
-		return NULL;
-	}
-	journal->j_private = sb;
-	ext3_init_journal_params(sb, journal);
-	return journal;
-}
-
-static journal_t *ext3_get_dev_journal(struct super_block *sb,
-				       dev_t j_dev)
-{
-	struct buffer_head * bh;
-	journal_t *journal;
-	ext3_fsblk_t start;
-	ext3_fsblk_t len;
-	int hblock, blocksize;
-	ext3_fsblk_t sb_block;
-	unsigned long offset;
-	struct ext3_super_block * es;
-	struct block_device *bdev;
-
-	bdev = ext3_blkdev_get(j_dev, sb);
-	if (bdev == NULL)
-		return NULL;
-
-	blocksize = sb->s_blocksize;
-	hblock = bdev_logical_block_size(bdev);
-	if (blocksize < hblock) {
-		ext3_msg(sb, KERN_ERR,
-			"error: blocksize too small for journal device");
-		goto out_bdev;
-	}
-
-	sb_block = EXT3_MIN_BLOCK_SIZE / blocksize;
-	offset = EXT3_MIN_BLOCK_SIZE % blocksize;
-	set_blocksize(bdev, blocksize);
-	if (!(bh = __bread(bdev, sb_block, blocksize))) {
-		ext3_msg(sb, KERN_ERR, "error: couldn't read superblock of "
-			"external journal");
-		goto out_bdev;
-	}
-
-	es = (struct ext3_super_block *) (bh->b_data + offset);
-	if ((le16_to_cpu(es->s_magic) != EXT3_SUPER_MAGIC) ||
-	    !(le32_to_cpu(es->s_feature_incompat) &
-	      EXT3_FEATURE_INCOMPAT_JOURNAL_DEV)) {
-		ext3_msg(sb, KERN_ERR, "error: external journal has "
-			"bad superblock");
-		brelse(bh);
-		goto out_bdev;
-	}
-
-	if (memcmp(EXT3_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
-		ext3_msg(sb, KERN_ERR, "error: journal UUID does not match");
-		brelse(bh);
-		goto out_bdev;
-	}
-
-	len = le32_to_cpu(es->s_blocks_count);
-	start = sb_block + 1;
-	brelse(bh);	/* we're done with the superblock */
-
-	journal = journal_init_dev(bdev, sb->s_bdev,
-					start, len, blocksize);
-	if (!journal) {
-		ext3_msg(sb, KERN_ERR,
-			"error: failed to create device journal");
-		goto out_bdev;
-	}
-	journal->j_private = sb;
-	if (!bh_uptodate_or_lock(journal->j_sb_buffer)) {
-		if (bh_submit_read(journal->j_sb_buffer)) {
-			ext3_msg(sb, KERN_ERR, "I/O error on journal device");
-			goto out_journal;
-		}
-	}
-	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
-		ext3_msg(sb, KERN_ERR,
-			"error: external journal has more than one "
-			"user (unsupported) - %d",
-			be32_to_cpu(journal->j_superblock->s_nr_users));
-		goto out_journal;
-	}
-	EXT3_SB(sb)->journal_bdev = bdev;
-	ext3_init_journal_params(sb, journal);
-	return journal;
-out_journal:
-	journal_destroy(journal);
-out_bdev:
-	ext3_blkdev_put(bdev);
-	return NULL;
-}
-
-static int ext3_load_journal(struct super_block *sb,
-			     struct ext3_super_block *es,
-			     unsigned long journal_devnum)
-{
-	journal_t *journal;
-	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
-	dev_t journal_dev;
-	int err = 0;
-	int really_read_only;
-
-	if (journal_devnum &&
-	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
-		ext3_msg(sb, KERN_INFO, "external journal device major/minor "
-			"numbers have changed");
-		journal_dev = new_decode_dev(journal_devnum);
-	} else
-		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
-
-	really_read_only = bdev_read_only(sb->s_bdev);
-
-	/*
-	 * Are we loading a blank journal or performing recovery after a
-	 * crash?  For recovery, we need to check in advance whether we
-	 * can get read-write access to the device.
-	 */
-
-	if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER)) {
-		if (sb->s_flags & MS_RDONLY) {
-			ext3_msg(sb, KERN_INFO,
-				"recovery required on readonly filesystem");
-			if (really_read_only) {
-				ext3_msg(sb, KERN_ERR, "error: write access "
-					"unavailable, cannot proceed");
-				return -EROFS;
-			}
-			ext3_msg(sb, KERN_INFO,
-				"write access will be enabled during recovery");
-		}
-	}
-
-	if (journal_inum && journal_dev) {
-		ext3_msg(sb, KERN_ERR, "error: filesystem has both journal "
-		       "and inode journals");
-		return -EINVAL;
-	}
-
-	if (journal_inum) {
-		if (!(journal = ext3_get_journal(sb, journal_inum)))
-			return -EINVAL;
-	} else {
-		if (!(journal = ext3_get_dev_journal(sb, journal_dev)))
-			return -EINVAL;
-	}
-
-	if (!(journal->j_flags & JFS_BARRIER))
-		printk(KERN_INFO "EXT3-fs: barriers not enabled\n");
-
-	if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
-		err = journal_update_format(journal);
-		if (err)  {
-			ext3_msg(sb, KERN_ERR, "error updating journal");
-			journal_destroy(journal);
-			return err;
-		}
-	}
-
-	if (!EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER))
-		err = journal_wipe(journal, !really_read_only);
-	if (!err)
-		err = journal_load(journal);
-
-	if (err) {
-		ext3_msg(sb, KERN_ERR, "error loading journal");
-		journal_destroy(journal);
-		return err;
-	}
-
-	EXT3_SB(sb)->s_journal = journal;
-	ext3_clear_journal_err(sb, es);
-
-	if (!really_read_only && journal_devnum &&
-	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
-		es->s_journal_dev = cpu_to_le32(journal_devnum);
-
-		/* Make sure we flush the recovery flag to disk. */
-		ext3_commit_super(sb, es, 1);
-	}
-
-	return 0;
-}
-
-static int ext3_create_journal(struct super_block *sb,
-			       struct ext3_super_block *es,
-			       unsigned int journal_inum)
-{
-	journal_t *journal;
-	int err;
-
-	if (sb->s_flags & MS_RDONLY) {
-		ext3_msg(sb, KERN_ERR,
-			"error: readonly filesystem when trying to "
-			"create journal");
-		return -EROFS;
-	}
-
-	journal = ext3_get_journal(sb, journal_inum);
-	if (!journal)
-		return -EINVAL;
-
-	ext3_msg(sb, KERN_INFO, "creating new journal on inode %u",
-	       journal_inum);
-
-	err = journal_create(journal);
-	if (err) {
-		ext3_msg(sb, KERN_ERR, "error creating journal");
-		journal_destroy(journal);
-		return -EIO;
-	}
-
-	EXT3_SB(sb)->s_journal = journal;
-
-	ext3_update_dynamic_rev(sb);
-	EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-	EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL);
-
-	es->s_journal_inum = cpu_to_le32(journal_inum);
-
-	/* Make sure we flush the recovery flag to disk. */
-	ext3_commit_super(sb, es, 1);
-
-	return 0;
-}
-
-static int ext3_commit_super(struct super_block *sb,
-			       struct ext3_super_block *es,
-			       int sync)
-{
-	struct buffer_head *sbh = EXT3_SB(sb)->s_sbh;
-	int error = 0;
-
-	if (!sbh)
-		return error;
-
-	if (buffer_write_io_error(sbh)) {
-		/*
-		 * Oh, dear.  A previous attempt to write the
-		 * superblock failed.  This could happen because the
-		 * USB device was yanked out.  Or it could happen to
-		 * be a transient write error and maybe the block will
-		 * be remapped.  Nothing we can do but to retry the
-		 * write and hope for the best.
-		 */
-		ext3_msg(sb, KERN_ERR, "previous I/O error to "
-		       "superblock detected");
-		clear_buffer_write_io_error(sbh);
-		set_buffer_uptodate(sbh);
-	}
-	/*
-	 * If the file system is mounted read-only, don't update the
-	 * superblock write time.  This avoids updating the superblock
-	 * write time when we are mounting the root file system
-	 * read/only but we need to replay the journal; at that point,
-	 * for people who are east of GMT and who make their clock
-	 * tick in localtime for Windows bug-for-bug compatibility,
-	 * the clock is set in the future, and this will cause e2fsck
-	 * to complain and force a full file system check.
-	 */
-	if (!(sb->s_flags & MS_RDONLY))
-		es->s_wtime = cpu_to_le32(get_seconds());
-	es->s_free_blocks_count = cpu_to_le32(ext3_count_free_blocks(sb));
-	es->s_free_inodes_count = cpu_to_le32(ext3_count_free_inodes(sb));
-	BUFFER_TRACE(sbh, "marking dirty");
-	mark_buffer_dirty(sbh);
-	if (sync) {
-		error = sync_dirty_buffer(sbh);
-		if (buffer_write_io_error(sbh)) {
-			ext3_msg(sb, KERN_ERR, "I/O error while writing "
-			       "superblock");
-			clear_buffer_write_io_error(sbh);
-			set_buffer_uptodate(sbh);
-		}
-	}
-	return error;
-}
-
-
-/*
- * Have we just finished recovery?  If so, and if we are mounting (or
- * remounting) the filesystem readonly, then we will end up with a
- * consistent fs on disk.  Record that fact.
- */
-static void ext3_mark_recovery_complete(struct super_block * sb,
-					struct ext3_super_block * es)
-{
-	journal_t *journal = EXT3_SB(sb)->s_journal;
-
-	journal_lock_updates(journal);
-	if (journal_flush(journal) < 0)
-		goto out;
-
-	if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) &&
-	    sb->s_flags & MS_RDONLY) {
-		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-		ext3_commit_super(sb, es, 1);
-	}
-
-out:
-	journal_unlock_updates(journal);
-}
-
-/*
- * If we are mounting (or read-write remounting) a filesystem whose journal
- * has recorded an error from a previous lifetime, move that error to the
- * main filesystem now.
- */
-static void ext3_clear_journal_err(struct super_block *sb,
-				   struct ext3_super_block *es)
-{
-	journal_t *journal;
-	int j_errno;
-	const char *errstr;
-
-	journal = EXT3_SB(sb)->s_journal;
-
-	/*
-	 * Now check for any error status which may have been recorded in the
-	 * journal by a prior ext3_error() or ext3_abort()
-	 */
-
-	j_errno = journal_errno(journal);
-	if (j_errno) {
-		char nbuf[16];
-
-		errstr = ext3_decode_error(sb, j_errno, nbuf);
-		ext3_warning(sb, __func__, "Filesystem error recorded "
-			     "from previous mount: %s", errstr);
-		ext3_warning(sb, __func__, "Marking fs in need of "
-			     "filesystem check.");
-
-		EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
-		es->s_state |= cpu_to_le16(EXT3_ERROR_FS);
-		ext3_commit_super (sb, es, 1);
-
-		journal_clear_err(journal);
-	}
-}
-
-/*
- * Force the running and committing transactions to commit,
- * and wait on the commit.
- */
-int ext3_force_commit(struct super_block *sb)
-{
-	journal_t *journal;
-	int ret;
-
-	if (sb->s_flags & MS_RDONLY)
-		return 0;
-
-	journal = EXT3_SB(sb)->s_journal;
-	ret = ext3_journal_force_commit(journal);
-	return ret;
-}
-
-static int ext3_sync_fs(struct super_block *sb, int wait)
-{
-	tid_t target;
-
-	trace_ext3_sync_fs(sb, wait);
-	/*
-	 * Writeback quota in non-journalled quota case - journalled quota has
-	 * no dirty dquots
-	 */
-	dquot_writeback_dquots(sb, -1);
-	if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) {
-		if (wait)
-			log_wait_commit(EXT3_SB(sb)->s_journal, target);
-	}
-	return 0;
-}
-
-/*
- * LVM calls this function before a (read-only) snapshot is created.  This
- * gives us a chance to flush the journal completely and mark the fs clean.
- */
-static int ext3_freeze(struct super_block *sb)
-{
-	int error = 0;
-	journal_t *journal;
-
-	if (!(sb->s_flags & MS_RDONLY)) {
-		journal = EXT3_SB(sb)->s_journal;
-
-		/* Now we set up the journal barrier. */
-		journal_lock_updates(journal);
-
-		/*
-		 * We don't want to clear needs_recovery flag when we failed
-		 * to flush the journal.
-		 */
-		error = journal_flush(journal);
-		if (error < 0)
-			goto out;
-
-		/* Journal blocked and flushed, clear needs_recovery flag. */
-		EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-		error = ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1);
-		if (error)
-			goto out;
-	}
-	return 0;
-
-out:
-	journal_unlock_updates(journal);
-	return error;
-}
-
-/*
- * Called by LVM after the snapshot is done.  We need to reset the RECOVER
- * flag here, even though the filesystem is not technically dirty yet.
- */
-static int ext3_unfreeze(struct super_block *sb)
-{
-	if (!(sb->s_flags & MS_RDONLY)) {
-		/* Reser the needs_recovery flag before the fs is unlocked. */
-		EXT3_SET_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER);
-		ext3_commit_super(sb, EXT3_SB(sb)->s_es, 1);
-		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-	}
-	return 0;
-}
-
-static int ext3_remount (struct super_block * sb, int * flags, char * data)
-{
-	struct ext3_super_block * es;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	ext3_fsblk_t n_blocks_count = 0;
-	unsigned long old_sb_flags;
-	struct ext3_mount_options old_opts;
-	int enable_quota = 0;
-	int err;
-#ifdef CONFIG_QUOTA
-	int i;
-#endif
-
-	sync_filesystem(sb);
-
-	/* Store the original options */
-	old_sb_flags = sb->s_flags;
-	old_opts.s_mount_opt = sbi->s_mount_opt;
-	old_opts.s_resuid = sbi->s_resuid;
-	old_opts.s_resgid = sbi->s_resgid;
-	old_opts.s_commit_interval = sbi->s_commit_interval;
-#ifdef CONFIG_QUOTA
-	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
-	for (i = 0; i < EXT3_MAXQUOTAS; i++)
-		if (sbi->s_qf_names[i]) {
-			old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
-							 GFP_KERNEL);
-			if (!old_opts.s_qf_names[i]) {
-				int j;
-
-				for (j = 0; j < i; j++)
-					kfree(old_opts.s_qf_names[j]);
-				return -ENOMEM;
-			}
-		} else
-			old_opts.s_qf_names[i] = NULL;
-#endif
-
-	/*
-	 * Allow the "check" option to be passed as a remount option.
-	 */
-	if (!parse_options(data, sb, NULL, NULL, &n_blocks_count, 1)) {
-		err = -EINVAL;
-		goto restore_opts;
-	}
-
-	if (test_opt(sb, ABORT))
-		ext3_abort(sb, __func__, "Abort forced by user");
-
-	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
-		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
-
-	es = sbi->s_es;
-
-	ext3_init_journal_params(sb, sbi->s_journal);
-
-	if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) ||
-		n_blocks_count > le32_to_cpu(es->s_blocks_count)) {
-		if (test_opt(sb, ABORT)) {
-			err = -EROFS;
-			goto restore_opts;
-		}
-
-		if (*flags & MS_RDONLY) {
-			err = dquot_suspend(sb, -1);
-			if (err < 0)
-				goto restore_opts;
-
-			/*
-			 * First of all, the unconditional stuff we have to do
-			 * to disable replay of the journal when we next remount
-			 */
-			sb->s_flags |= MS_RDONLY;
-
-			/*
-			 * OK, test if we are remounting a valid rw partition
-			 * readonly, and if so set the rdonly flag and then
-			 * mark the partition as valid again.
-			 */
-			if (!(es->s_state & cpu_to_le16(EXT3_VALID_FS)) &&
-			    (sbi->s_mount_state & EXT3_VALID_FS))
-				es->s_state = cpu_to_le16(sbi->s_mount_state);
-
-			ext3_mark_recovery_complete(sb, es);
-		} else {
-			__le32 ret;
-			if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb,
-					~EXT3_FEATURE_RO_COMPAT_SUPP))) {
-				ext3_msg(sb, KERN_WARNING,
-					"warning: couldn't remount RDWR "
-					"because of unsupported optional "
-					"features (%x)", le32_to_cpu(ret));
-				err = -EROFS;
-				goto restore_opts;
-			}
-
-			/*
-			 * If we have an unprocessed orphan list hanging
-			 * around from a previously readonly bdev mount,
-			 * require a full umount & mount for now.
-			 */
-			if (es->s_last_orphan) {
-				ext3_msg(sb, KERN_WARNING, "warning: couldn't "
-				       "remount RDWR because of unprocessed "
-				       "orphan inode list.  Please "
-				       "umount & mount instead.");
-				err = -EINVAL;
-				goto restore_opts;
-			}
-
-			/*
-			 * Mounting a RDONLY partition read-write, so reread
-			 * and store the current valid flag.  (It may have
-			 * been changed by e2fsck since we originally mounted
-			 * the partition.)
-			 */
-			ext3_clear_journal_err(sb, es);
-			sbi->s_mount_state = le16_to_cpu(es->s_state);
-			if ((err = ext3_group_extend(sb, es, n_blocks_count)))
-				goto restore_opts;
-			if (!ext3_setup_super (sb, es, 0))
-				sb->s_flags &= ~MS_RDONLY;
-			enable_quota = 1;
-		}
-	}
-#ifdef CONFIG_QUOTA
-	/* Release old quota file names */
-	for (i = 0; i < EXT3_MAXQUOTAS; i++)
-		kfree(old_opts.s_qf_names[i]);
-#endif
-	if (enable_quota)
-		dquot_resume(sb, -1);
-	return 0;
-restore_opts:
-	sb->s_flags = old_sb_flags;
-	sbi->s_mount_opt = old_opts.s_mount_opt;
-	sbi->s_resuid = old_opts.s_resuid;
-	sbi->s_resgid = old_opts.s_resgid;
-	sbi->s_commit_interval = old_opts.s_commit_interval;
-#ifdef CONFIG_QUOTA
-	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
-	for (i = 0; i < EXT3_MAXQUOTAS; i++) {
-		kfree(sbi->s_qf_names[i]);
-		sbi->s_qf_names[i] = old_opts.s_qf_names[i];
-	}
-#endif
-	return err;
-}
-
-static int ext3_statfs (struct dentry * dentry, struct kstatfs * buf)
-{
-	struct super_block *sb = dentry->d_sb;
-	struct ext3_sb_info *sbi = EXT3_SB(sb);
-	struct ext3_super_block *es = sbi->s_es;
-	u64 fsid;
-
-	if (test_opt(sb, MINIX_DF)) {
-		sbi->s_overhead_last = 0;
-	} else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
-		unsigned long ngroups = sbi->s_groups_count, i;
-		ext3_fsblk_t overhead = 0;
-		smp_rmb();
-
-		/*
-		 * Compute the overhead (FS structures).  This is constant
-		 * for a given filesystem unless the number of block groups
-		 * changes so we cache the previous value until it does.
-		 */
-
-		/*
-		 * All of the blocks before first_data_block are
-		 * overhead
-		 */
-		overhead = le32_to_cpu(es->s_first_data_block);
-
-		/*
-		 * Add the overhead attributed to the superblock and
-		 * block group descriptors.  If the sparse superblocks
-		 * feature is turned on, then not all groups have this.
-		 */
-		for (i = 0; i < ngroups; i++) {
-			overhead += ext3_bg_has_super(sb, i) +
-				ext3_bg_num_gdb(sb, i);
-			cond_resched();
-		}
-
-		/*
-		 * Every block group has an inode bitmap, a block
-		 * bitmap, and an inode table.
-		 */
-		overhead += ngroups * (2 + sbi->s_itb_per_group);
-
-		/* Add the internal journal blocks as well */
-		if (sbi->s_journal && !sbi->journal_bdev)
-			overhead += sbi->s_journal->j_maxlen;
-
-		sbi->s_overhead_last = overhead;
-		smp_wmb();
-		sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count);
-	}
-
-	buf->f_type = EXT3_SUPER_MAGIC;
-	buf->f_bsize = sb->s_blocksize;
-	buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last;
-	buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter);
-	buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
-	if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
-		buf->f_bavail = 0;
-	buf->f_files = le32_to_cpu(es->s_inodes_count);
-	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
-	buf->f_namelen = EXT3_NAME_LEN;
-	fsid = le64_to_cpup((void *)es->s_uuid) ^
-	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
-	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
-	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
-	return 0;
-}
-
-/* Helper function for writing quotas on sync - we need to start transaction before quota file
- * is locked for write. Otherwise the are possible deadlocks:
- * Process 1                         Process 2
- * ext3_create()                     quota_sync()
- *   journal_start()                   write_dquot()
- *   dquot_initialize()                       down(dqio_mutex)
- *     down(dqio_mutex)                    journal_start()
- *
- */
-
-#ifdef CONFIG_QUOTA
-
-static inline struct inode *dquot_to_inode(struct dquot *dquot)
-{
-	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_id.type];
-}
-
-static int ext3_write_dquot(struct dquot *dquot)
-{
-	int ret, err;
-	handle_t *handle;
-	struct inode *inode;
-
-	inode = dquot_to_inode(dquot);
-	handle = ext3_journal_start(inode,
-					EXT3_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	ret = dquot_commit(dquot);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-static int ext3_acquire_dquot(struct dquot *dquot)
-{
-	int ret, err;
-	handle_t *handle;
-
-	handle = ext3_journal_start(dquot_to_inode(dquot),
-					EXT3_QUOTA_INIT_BLOCKS(dquot->dq_sb));
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	ret = dquot_acquire(dquot);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-static int ext3_release_dquot(struct dquot *dquot)
-{
-	int ret, err;
-	handle_t *handle;
-
-	handle = ext3_journal_start(dquot_to_inode(dquot),
-					EXT3_QUOTA_DEL_BLOCKS(dquot->dq_sb));
-	if (IS_ERR(handle)) {
-		/* Release dquot anyway to avoid endless cycle in dqput() */
-		dquot_release(dquot);
-		return PTR_ERR(handle);
-	}
-	ret = dquot_release(dquot);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-static int ext3_mark_dquot_dirty(struct dquot *dquot)
-{
-	/* Are we journaling quotas? */
-	if (EXT3_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
-	    EXT3_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
-		dquot_mark_dquot_dirty(dquot);
-		return ext3_write_dquot(dquot);
-	} else {
-		return dquot_mark_dquot_dirty(dquot);
-	}
-}
-
-static int ext3_write_info(struct super_block *sb, int type)
-{
-	int ret, err;
-	handle_t *handle;
-
-	/* Data block + inode block */
-	handle = ext3_journal_start(d_inode(sb->s_root), 2);
-	if (IS_ERR(handle))
-		return PTR_ERR(handle);
-	ret = dquot_commit_info(sb, type);
-	err = ext3_journal_stop(handle);
-	if (!ret)
-		ret = err;
-	return ret;
-}
-
-/*
- * Turn on quotas during mount time - we need to find
- * the quota file and such...
- */
-static int ext3_quota_on_mount(struct super_block *sb, int type)
-{
-	return dquot_quota_on_mount(sb, EXT3_SB(sb)->s_qf_names[type],
-					EXT3_SB(sb)->s_jquota_fmt, type);
-}
-
-/*
- * Standard function to be called on quota_on
- */
-static int ext3_quota_on(struct super_block *sb, int type, int format_id,
-			 struct path *path)
-{
-	int err;
-
-	if (!test_opt(sb, QUOTA))
-		return -EINVAL;
-
-	/* Quotafile not on the same filesystem? */
-	if (path->dentry->d_sb != sb)
-		return -EXDEV;
-	/* Journaling quota? */
-	if (EXT3_SB(sb)->s_qf_names[type]) {
-		/* Quotafile not of fs root? */
-		if (path->dentry->d_parent != sb->s_root)
-			ext3_msg(sb, KERN_WARNING,
-				"warning: Quota file not on filesystem root. "
-				"Journaled quota will not work.");
-	}
-
-	/*
-	 * When we journal data on quota file, we have to flush journal to see
-	 * all updates to the file when we bypass pagecache...
-	 */
-	if (ext3_should_journal_data(d_inode(path->dentry))) {
-		/*
-		 * We don't need to lock updates but journal_flush() could
-		 * otherwise be livelocked...
-		 */
-		journal_lock_updates(EXT3_SB(sb)->s_journal);
-		err = journal_flush(EXT3_SB(sb)->s_journal);
-		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-		if (err)
-			return err;
-	}
-
-	return dquot_quota_on(sb, type, format_id, path);
-}
-
-/* Read data from quotafile - avoid pagecache and such because we cannot afford
- * acquiring the locks... As quota files are never truncated and quota code
- * itself serializes the operations (and no one else should touch the files)
- * we don't have to be afraid of races */
-static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
-			       size_t len, loff_t off)
-{
-	struct inode *inode = sb_dqopt(sb)->files[type];
-	sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb);
-	int err = 0;
-	int offset = off & (sb->s_blocksize - 1);
-	int tocopy;
-	size_t toread;
-	struct buffer_head *bh;
-	loff_t i_size = i_size_read(inode);
-
-	if (off > i_size)
-		return 0;
-	if (off+len > i_size)
-		len = i_size-off;
-	toread = len;
-	while (toread > 0) {
-		tocopy = sb->s_blocksize - offset < toread ?
-				sb->s_blocksize - offset : toread;
-		bh = ext3_bread(NULL, inode, blk, 0, &err);
-		if (err)
-			return err;
-		if (!bh)	/* A hole? */
-			memset(data, 0, tocopy);
-		else
-			memcpy(data, bh->b_data+offset, tocopy);
-		brelse(bh);
-		offset = 0;
-		toread -= tocopy;
-		data += tocopy;
-		blk++;
-	}
-	return len;
-}
-
-/* Write to quotafile (we know the transaction is already started and has
- * enough credits) */
-static ssize_t ext3_quota_write(struct super_block *sb, int type,
-				const char *data, size_t len, loff_t off)
-{
-	struct inode *inode = sb_dqopt(sb)->files[type];
-	sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb);
-	int err = 0;
-	int offset = off & (sb->s_blocksize - 1);
-	int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL;
-	struct buffer_head *bh;
-	handle_t *handle = journal_current_handle();
-
-	if (!handle) {
-		ext3_msg(sb, KERN_WARNING,
-			"warning: quota write (off=%llu, len=%llu)"
-			" cancelled because transaction is not started.",
-			(unsigned long long)off, (unsigned long long)len);
-		return -EIO;
-	}
-
-	/*
-	 * Since we account only one data block in transaction credits,
-	 * then it is impossible to cross a block boundary.
-	 */
-	if (sb->s_blocksize - offset < len) {
-		ext3_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
-			" cancelled because not block aligned",
-			(unsigned long long)off, (unsigned long long)len);
-		return -EIO;
-	}
-	bh = ext3_bread(handle, inode, blk, 1, &err);
-	if (!bh)
-		goto out;
-	if (journal_quota) {
-		err = ext3_journal_get_write_access(handle, bh);
-		if (err) {
-			brelse(bh);
-			goto out;
-		}
-	}
-	lock_buffer(bh);
-	memcpy(bh->b_data+offset, data, len);
-	flush_dcache_page(bh->b_page);
-	unlock_buffer(bh);
-	if (journal_quota)
-		err = ext3_journal_dirty_metadata(handle, bh);
-	else {
-		/* Always do at least ordered writes for quotas */
-		err = ext3_journal_dirty_data(handle, bh);
-		mark_buffer_dirty(bh);
-	}
-	brelse(bh);
-out:
-	if (err)
-		return err;
-	if (inode->i_size < off + len) {
-		i_size_write(inode, off + len);
-		EXT3_I(inode)->i_disksize = inode->i_size;
-	}
-	inode->i_version++;
-	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-	ext3_mark_inode_dirty(handle, inode);
-	return len;
-}
-
-#endif
-
-static struct dentry *ext3_mount(struct file_system_type *fs_type,
-	int flags, const char *dev_name, void *data)
-{
-	return mount_bdev(fs_type, flags, dev_name, data, ext3_fill_super);
-}
-
-static struct file_system_type ext3_fs_type = {
-	.owner		= THIS_MODULE,
-	.name		= "ext3",
-	.mount		= ext3_mount,
-	.kill_sb	= kill_block_super,
-	.fs_flags	= FS_REQUIRES_DEV,
-};
-MODULE_ALIAS_FS("ext3");
-
-static int __init init_ext3_fs(void)
-{
-	int err = init_ext3_xattr();
-	if (err)
-		return err;
-	err = init_inodecache();
-	if (err)
-		goto out1;
-        err = register_filesystem(&ext3_fs_type);
-	if (err)
-		goto out;
-	return 0;
-out:
-	destroy_inodecache();
-out1:
-	exit_ext3_xattr();
-	return err;
-}
-
-static void __exit exit_ext3_fs(void)
-{
-	unregister_filesystem(&ext3_fs_type);
-	destroy_inodecache();
-	exit_ext3_xattr();
-}
-
-MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
-MODULE_DESCRIPTION("Second Extended Filesystem with journaling extensions");
-MODULE_LICENSE("GPL");
-module_init(init_ext3_fs)
-module_exit(exit_ext3_fs)
diff --git a/fs/ext3/symlink.c b/fs/ext3/symlink.c
deleted file mode 100644
index c08c590..0000000
--- a/fs/ext3/symlink.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- *  linux/fs/ext3/symlink.c
- *
- * Only fast symlinks left here - the rest is done by generic code. AV, 1999
- *
- * Copyright (C) 1992, 1993, 1994, 1995
- * Remy Card (card@masi.ibp.fr)
- * Laboratoire MASI - Institut Blaise Pascal
- * Universite Pierre et Marie Curie (Paris VI)
- *
- *  from
- *
- *  linux/fs/minix/symlink.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- *  ext3 symlink handling code
- */
-
-#include "ext3.h"
-#include "xattr.h"
-
-const struct inode_operations ext3_symlink_inode_operations = {
-	.readlink	= generic_readlink,
-	.follow_link	= page_follow_link_light,
-	.put_link	= page_put_link,
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-};
-
-const struct inode_operations ext3_fast_symlink_inode_operations = {
-	.readlink	= generic_readlink,
-	.follow_link	= simple_follow_link,
-	.setattr	= ext3_setattr,
-#ifdef CONFIG_EXT3_FS_XATTR
-	.setxattr	= generic_setxattr,
-	.getxattr	= generic_getxattr,
-	.listxattr	= ext3_listxattr,
-	.removexattr	= generic_removexattr,
-#endif
-};
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
deleted file mode 100644
index 7cf3650..0000000
--- a/fs/ext3/xattr.c
+++ /dev/null
@@ -1,1330 +0,0 @@
-/*
- * linux/fs/ext3/xattr.c
- *
- * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
- *
- * Fix by Harrison Xing <harrison@mountainviewdata.com>.
- * Ext3 code with a lot of help from Eric Jarman <ejarman@acm.org>.
- * Extended attributes for symlinks and special files added per
- *  suggestion of Luka Renko <luka.renko@hermes.si>.
- * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
- *  Red Hat Inc.
- * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
- *  and Andreas Gruenbacher <agruen@suse.de>.
- */
-
-/*
- * Extended attributes are stored directly in inodes (on file systems with
- * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
- * field contains the block number if an inode uses an additional block. All
- * attributes must fit in the inode and one additional block. Blocks that
- * contain the identical set of attributes may be shared among several inodes.
- * Identical blocks are detected by keeping a cache of blocks that have
- * recently been accessed.
- *
- * The attributes in inodes and on blocks have a different header; the entries
- * are stored in the same format:
- *
- *   +------------------+
- *   | header           |
- *   | entry 1          | |
- *   | entry 2          | | growing downwards
- *   | entry 3          | v
- *   | four null bytes  |
- *   | . . .            |
- *   | value 1          | ^
- *   | value 3          | | growing upwards
- *   | value 2          | |
- *   +------------------+
- *
- * The header is followed by multiple entry descriptors. In disk blocks, the
- * entry descriptors are kept sorted. In inodes, they are unsorted. The
- * attribute values are aligned to the end of the block in no specific order.
- *
- * Locking strategy
- * ----------------
- * EXT3_I(inode)->i_file_acl is protected by EXT3_I(inode)->xattr_sem.
- * EA blocks are only changed if they are exclusive to an inode, so
- * holding xattr_sem also means that nothing but the EA block's reference
- * count can change. Multiple writers to the same block are synchronized
- * by the buffer lock.
- */
-
-#include "ext3.h"
-#include <linux/mbcache.h>
-#include <linux/quotaops.h>
-#include "xattr.h"
-#include "acl.h"
-
-#define BHDR(bh) ((struct ext3_xattr_header *)((bh)->b_data))
-#define ENTRY(ptr) ((struct ext3_xattr_entry *)(ptr))
-#define BFIRST(bh) ENTRY(BHDR(bh)+1)
-#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
-
-#define IHDR(inode, raw_inode) \
-	((struct ext3_xattr_ibody_header *) \
-		((void *)raw_inode + \
-		 EXT3_GOOD_OLD_INODE_SIZE + \
-		 EXT3_I(inode)->i_extra_isize))
-#define IFIRST(hdr) ((struct ext3_xattr_entry *)((hdr)+1))
-
-#ifdef EXT3_XATTR_DEBUG
-# define ea_idebug(inode, f...) do { \
-		printk(KERN_DEBUG "inode %s:%lu: ", \
-			inode->i_sb->s_id, inode->i_ino); \
-		printk(f); \
-		printk("\n"); \
-	} while (0)
-# define ea_bdebug(bh, f...) do { \
-		char b[BDEVNAME_SIZE]; \
-		printk(KERN_DEBUG "block %s:%lu: ", \
-			bdevname(bh->b_bdev, b), \
-			(unsigned long) bh->b_blocknr); \
-		printk(f); \
-		printk("\n"); \
-	} while (0)
-#else
-# define ea_idebug(f...)
-# define ea_bdebug(f...)
-#endif
-
-static void ext3_xattr_cache_insert(struct buffer_head *);
-static struct buffer_head *ext3_xattr_cache_find(struct inode *,
-						 struct ext3_xattr_header *,
-						 struct mb_cache_entry **);
-static void ext3_xattr_rehash(struct ext3_xattr_header *,
-			      struct ext3_xattr_entry *);
-static int ext3_xattr_list(struct dentry *dentry, char *buffer,
-			   size_t buffer_size);
-
-static struct mb_cache *ext3_xattr_cache;
-
-static const struct xattr_handler *ext3_xattr_handler_map[] = {
-	[EXT3_XATTR_INDEX_USER]		     = &ext3_xattr_user_handler,
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-	[EXT3_XATTR_INDEX_POSIX_ACL_ACCESS]  = &posix_acl_access_xattr_handler,
-	[EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
-#endif
-	[EXT3_XATTR_INDEX_TRUSTED]	     = &ext3_xattr_trusted_handler,
-#ifdef CONFIG_EXT3_FS_SECURITY
-	[EXT3_XATTR_INDEX_SECURITY]	     = &ext3_xattr_security_handler,
-#endif
-};
-
-const struct xattr_handler *ext3_xattr_handlers[] = {
-	&ext3_xattr_user_handler,
-	&ext3_xattr_trusted_handler,
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-	&posix_acl_access_xattr_handler,
-	&posix_acl_default_xattr_handler,
-#endif
-#ifdef CONFIG_EXT3_FS_SECURITY
-	&ext3_xattr_security_handler,
-#endif
-	NULL
-};
-
-static inline const struct xattr_handler *
-ext3_xattr_handler(int name_index)
-{
-	const struct xattr_handler *handler = NULL;
-
-	if (name_index > 0 && name_index < ARRAY_SIZE(ext3_xattr_handler_map))
-		handler = ext3_xattr_handler_map[name_index];
-	return handler;
-}
-
-/*
- * Inode operation listxattr()
- *
- * d_inode(dentry)->i_mutex: don't care
- */
-ssize_t
-ext3_listxattr(struct dentry *dentry, char *buffer, size_t size)
-{
-	return ext3_xattr_list(dentry, buffer, size);
-}
-
-static int
-ext3_xattr_check_names(struct ext3_xattr_entry *entry, void *end)
-{
-	while (!IS_LAST_ENTRY(entry)) {
-		struct ext3_xattr_entry *next = EXT3_XATTR_NEXT(entry);
-		if ((void *)next >= end)
-			return -EIO;
-		entry = next;
-	}
-	return 0;
-}
-
-static inline int
-ext3_xattr_check_block(struct buffer_head *bh)
-{
-	int error;
-
-	if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
-	    BHDR(bh)->h_blocks != cpu_to_le32(1))
-		return -EIO;
-	error = ext3_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
-	return error;
-}
-
-static inline int
-ext3_xattr_check_entry(struct ext3_xattr_entry *entry, size_t size)
-{
-	size_t value_size = le32_to_cpu(entry->e_value_size);
-
-	if (entry->e_value_block != 0 || value_size > size ||
-	    le16_to_cpu(entry->e_value_offs) + value_size > size)
-		return -EIO;
-	return 0;
-}
-
-static int
-ext3_xattr_find_entry(struct ext3_xattr_entry **pentry, int name_index,
-		      const char *name, size_t size, int sorted)
-{
-	struct ext3_xattr_entry *entry;
-	size_t name_len;
-	int cmp = 1;
-
-	if (name == NULL)
-		return -EINVAL;
-	name_len = strlen(name);
-	entry = *pentry;
-	for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
-		cmp = name_index - entry->e_name_index;
-		if (!cmp)
-			cmp = name_len - entry->e_name_len;
-		if (!cmp)
-			cmp = memcmp(name, entry->e_name, name_len);
-		if (cmp <= 0 && (sorted || cmp == 0))
-			break;
-	}
-	*pentry = entry;
-	if (!cmp && ext3_xattr_check_entry(entry, size))
-			return -EIO;
-	return cmp ? -ENODATA : 0;
-}
-
-static int
-ext3_xattr_block_get(struct inode *inode, int name_index, const char *name,
-		     void *buffer, size_t buffer_size)
-{
-	struct buffer_head *bh = NULL;
-	struct ext3_xattr_entry *entry;
-	size_t size;
-	int error;
-
-	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
-		  name_index, name, buffer, (long)buffer_size);
-
-	error = -ENODATA;
-	if (!EXT3_I(inode)->i_file_acl)
-		goto cleanup;
-	ea_idebug(inode, "reading block %u", EXT3_I(inode)->i_file_acl);
-	bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
-	if (!bh)
-		goto cleanup;
-	ea_bdebug(bh, "b_count=%d, refcount=%d",
-		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
-	if (ext3_xattr_check_block(bh)) {
-bad_block:	ext3_error(inode->i_sb, __func__,
-			   "inode %lu: bad block "E3FSBLK, inode->i_ino,
-			   EXT3_I(inode)->i_file_acl);
-		error = -EIO;
-		goto cleanup;
-	}
-	ext3_xattr_cache_insert(bh);
-	entry = BFIRST(bh);
-	error = ext3_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
-	if (error == -EIO)
-		goto bad_block;
-	if (error)
-		goto cleanup;
-	size = le32_to_cpu(entry->e_value_size);
-	if (buffer) {
-		error = -ERANGE;
-		if (size > buffer_size)
-			goto cleanup;
-		memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
-		       size);
-	}
-	error = size;
-
-cleanup:
-	brelse(bh);
-	return error;
-}
-
-static int
-ext3_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
-		     void *buffer, size_t buffer_size)
-{
-	struct ext3_xattr_ibody_header *header;
-	struct ext3_xattr_entry *entry;
-	struct ext3_inode *raw_inode;
-	struct ext3_iloc iloc;
-	size_t size;
-	void *end;
-	int error;
-
-	if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR))
-		return -ENODATA;
-	error = ext3_get_inode_loc(inode, &iloc);
-	if (error)
-		return error;
-	raw_inode = ext3_raw_inode(&iloc);
-	header = IHDR(inode, raw_inode);
-	entry = IFIRST(header);
-	end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-	error = ext3_xattr_check_names(entry, end);
-	if (error)
-		goto cleanup;
-	error = ext3_xattr_find_entry(&entry, name_index, name,
-				      end - (void *)entry, 0);
-	if (error)
-		goto cleanup;
-	size = le32_to_cpu(entry->e_value_size);
-	if (buffer) {
-		error = -ERANGE;
-		if (size > buffer_size)
-			goto cleanup;
-		memcpy(buffer, (void *)IFIRST(header) +
-		       le16_to_cpu(entry->e_value_offs), size);
-	}
-	error = size;
-
-cleanup:
-	brelse(iloc.bh);
-	return error;
-}
-
-/*
- * ext3_xattr_get()
- *
- * Copy an extended attribute into the buffer
- * provided, or compute the buffer size required.
- * Buffer is NULL to compute the size of the buffer required.
- *
- * Returns a negative error number on failure, or the number of bytes
- * used / required on success.
- */
-int
-ext3_xattr_get(struct inode *inode, int name_index, const char *name,
-	       void *buffer, size_t buffer_size)
-{
-	int error;
-
-	down_read(&EXT3_I(inode)->xattr_sem);
-	error = ext3_xattr_ibody_get(inode, name_index, name, buffer,
-				     buffer_size);
-	if (error == -ENODATA)
-		error = ext3_xattr_block_get(inode, name_index, name, buffer,
-					     buffer_size);
-	up_read(&EXT3_I(inode)->xattr_sem);
-	return error;
-}
-
-static int
-ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
-			char *buffer, size_t buffer_size)
-{
-	size_t rest = buffer_size;
-
-	for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
-		const struct xattr_handler *handler =
-			ext3_xattr_handler(entry->e_name_index);
-
-		if (handler) {
-			size_t size = handler->list(dentry, buffer, rest,
-						    entry->e_name,
-						    entry->e_name_len,
-						    handler->flags);
-			if (buffer) {
-				if (size > rest)
-					return -ERANGE;
-				buffer += size;
-			}
-			rest -= size;
-		}
-	}
-	return buffer_size - rest;
-}
-
-static int
-ext3_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
-{
-	struct inode *inode = d_inode(dentry);
-	struct buffer_head *bh = NULL;
-	int error;
-
-	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
-		  buffer, (long)buffer_size);
-
-	error = 0;
-	if (!EXT3_I(inode)->i_file_acl)
-		goto cleanup;
-	ea_idebug(inode, "reading block %u", EXT3_I(inode)->i_file_acl);
-	bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
-	error = -EIO;
-	if (!bh)
-		goto cleanup;
-	ea_bdebug(bh, "b_count=%d, refcount=%d",
-		atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
-	if (ext3_xattr_check_block(bh)) {
-		ext3_error(inode->i_sb, __func__,
-			   "inode %lu: bad block "E3FSBLK, inode->i_ino,
-			   EXT3_I(inode)->i_file_acl);
-		error = -EIO;
-		goto cleanup;
-	}
-	ext3_xattr_cache_insert(bh);
-	error = ext3_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
-
-cleanup:
-	brelse(bh);
-
-	return error;
-}
-
-static int
-ext3_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
-{
-	struct inode *inode = d_inode(dentry);
-	struct ext3_xattr_ibody_header *header;
-	struct ext3_inode *raw_inode;
-	struct ext3_iloc iloc;
-	void *end;
-	int error;
-
-	if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR))
-		return 0;
-	error = ext3_get_inode_loc(inode, &iloc);
-	if (error)
-		return error;
-	raw_inode = ext3_raw_inode(&iloc);
-	header = IHDR(inode, raw_inode);
-	end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-	error = ext3_xattr_check_names(IFIRST(header), end);
-	if (error)
-		goto cleanup;
-	error = ext3_xattr_list_entries(dentry, IFIRST(header),
-					buffer, buffer_size);
-
-cleanup:
-	brelse(iloc.bh);
-	return error;
-}
-
-/*
- * ext3_xattr_list()
- *
- * Copy a list of attribute names into the buffer
- * provided, or compute the buffer size required.
- * Buffer is NULL to compute the size of the buffer required.
- *
- * Returns a negative error number on failure, or the number of bytes
- * used / required on success.
- */
-static int
-ext3_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
-{
-	int i_error, b_error;
-
-	down_read(&EXT3_I(d_inode(dentry))->xattr_sem);
-	i_error = ext3_xattr_ibody_list(dentry, buffer, buffer_size);
-	if (i_error < 0) {
-		b_error = 0;
-	} else {
-		if (buffer) {
-			buffer += i_error;
-			buffer_size -= i_error;
-		}
-		b_error = ext3_xattr_block_list(dentry, buffer, buffer_size);
-		if (b_error < 0)
-			i_error = 0;
-	}
-	up_read(&EXT3_I(d_inode(dentry))->xattr_sem);
-	return i_error + b_error;
-}
-
-/*
- * If the EXT3_FEATURE_COMPAT_EXT_ATTR feature of this file system is
- * not set, set it.
- */
-static void ext3_xattr_update_super_block(handle_t *handle,
-					  struct super_block *sb)
-{
-	if (EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR))
-		return;
-
-	if (ext3_journal_get_write_access(handle, EXT3_SB(sb)->s_sbh) == 0) {
-		EXT3_SET_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_EXT_ATTR);
-		ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
-	}
-}
-
-/*
- * Release the xattr block BH: If the reference count is > 1, decrement
- * it; otherwise free the block.
- */
-static void
-ext3_xattr_release_block(handle_t *handle, struct inode *inode,
-			 struct buffer_head *bh)
-{
-	struct mb_cache_entry *ce = NULL;
-	int error = 0;
-
-	ce = mb_cache_entry_get(ext3_xattr_cache, bh->b_bdev, bh->b_blocknr);
-	error = ext3_journal_get_write_access(handle, bh);
-	if (error)
-		 goto out;
-
-	lock_buffer(bh);
-
-	if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
-		ea_bdebug(bh, "refcount now=0; freeing");
-		if (ce)
-			mb_cache_entry_free(ce);
-		ext3_free_blocks(handle, inode, bh->b_blocknr, 1);
-		get_bh(bh);
-		ext3_forget(handle, 1, inode, bh, bh->b_blocknr);
-	} else {
-		le32_add_cpu(&BHDR(bh)->h_refcount, -1);
-		error = ext3_journal_dirty_metadata(handle, bh);
-		if (IS_SYNC(inode))
-			handle->h_sync = 1;
-		dquot_free_block(inode, 1);
-		ea_bdebug(bh, "refcount now=%d; releasing",
-			  le32_to_cpu(BHDR(bh)->h_refcount));
-		if (ce)
-			mb_cache_entry_release(ce);
-	}
-	unlock_buffer(bh);
-out:
-	ext3_std_error(inode->i_sb, error);
-	return;
-}
-
-struct ext3_xattr_info {
-	int name_index;
-	const char *name;
-	const void *value;
-	size_t value_len;
-};
-
-struct ext3_xattr_search {
-	struct ext3_xattr_entry *first;
-	void *base;
-	void *end;
-	struct ext3_xattr_entry *here;
-	int not_found;
-};
-
-static int
-ext3_xattr_set_entry(struct ext3_xattr_info *i, struct ext3_xattr_search *s)
-{
-	struct ext3_xattr_entry *last;
-	size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
-
-	/* Compute min_offs and last. */
-	last = s->first;
-	for (; !IS_LAST_ENTRY(last); last = EXT3_XATTR_NEXT(last)) {
-		if (!last->e_value_block && last->e_value_size) {
-			size_t offs = le16_to_cpu(last->e_value_offs);
-			if (offs < min_offs)
-				min_offs = offs;
-		}
-	}
-	free = min_offs - ((void *)last - s->base) - sizeof(__u32);
-	if (!s->not_found) {
-		if (!s->here->e_value_block && s->here->e_value_size) {
-			size_t size = le32_to_cpu(s->here->e_value_size);
-			free += EXT3_XATTR_SIZE(size);
-		}
-		free += EXT3_XATTR_LEN(name_len);
-	}
-	if (i->value) {
-		if (free < EXT3_XATTR_LEN(name_len) +
-			   EXT3_XATTR_SIZE(i->value_len))
-			return -ENOSPC;
-	}
-
-	if (i->value && s->not_found) {
-		/* Insert the new name. */
-		size_t size = EXT3_XATTR_LEN(name_len);
-		size_t rest = (void *)last - (void *)s->here + sizeof(__u32);
-		memmove((void *)s->here + size, s->here, rest);
-		memset(s->here, 0, size);
-		s->here->e_name_index = i->name_index;
-		s->here->e_name_len = name_len;
-		memcpy(s->here->e_name, i->name, name_len);
-	} else {
-		if (!s->here->e_value_block && s->here->e_value_size) {
-			void *first_val = s->base + min_offs;
-			size_t offs = le16_to_cpu(s->here->e_value_offs);
-			void *val = s->base + offs;
-			size_t size = EXT3_XATTR_SIZE(
-				le32_to_cpu(s->here->e_value_size));
-
-			if (i->value && size == EXT3_XATTR_SIZE(i->value_len)) {
-				/* The old and the new value have the same
-				   size. Just replace. */
-				s->here->e_value_size =
-					cpu_to_le32(i->value_len);
-				memset(val + size - EXT3_XATTR_PAD, 0,
-				       EXT3_XATTR_PAD); /* Clear pad bytes. */
-				memcpy(val, i->value, i->value_len);
-				return 0;
-			}
-
-			/* Remove the old value. */
-			memmove(first_val + size, first_val, val - first_val);
-			memset(first_val, 0, size);
-			s->here->e_value_size = 0;
-			s->here->e_value_offs = 0;
-			min_offs += size;
-
-			/* Adjust all value offsets. */
-			last = s->first;
-			while (!IS_LAST_ENTRY(last)) {
-				size_t o = le16_to_cpu(last->e_value_offs);
-				if (!last->e_value_block &&
-				    last->e_value_size && o < offs)
-					last->e_value_offs =
-						cpu_to_le16(o + size);
-				last = EXT3_XATTR_NEXT(last);
-			}
-		}
-		if (!i->value) {
-			/* Remove the old name. */
-			size_t size = EXT3_XATTR_LEN(name_len);
-			last = ENTRY((void *)last - size);
-			memmove(s->here, (void *)s->here + size,
-				(void *)last - (void *)s->here + sizeof(__u32));
-			memset(last, 0, size);
-		}
-	}
-
-	if (i->value) {
-		/* Insert the new value. */
-		s->here->e_value_size = cpu_to_le32(i->value_len);
-		if (i->value_len) {
-			size_t size = EXT3_XATTR_SIZE(i->value_len);
-			void *val = s->base + min_offs - size;
-			s->here->e_value_offs = cpu_to_le16(min_offs - size);
-			memset(val + size - EXT3_XATTR_PAD, 0,
-			       EXT3_XATTR_PAD); /* Clear the pad bytes. */
-			memcpy(val, i->value, i->value_len);
-		}
-	}
-	return 0;
-}
-
-struct ext3_xattr_block_find {
-	struct ext3_xattr_search s;
-	struct buffer_head *bh;
-};
-
-static int
-ext3_xattr_block_find(struct inode *inode, struct ext3_xattr_info *i,
-		      struct ext3_xattr_block_find *bs)
-{
-	struct super_block *sb = inode->i_sb;
-	int error;
-
-	ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
-		  i->name_index, i->name, i->value, (long)i->value_len);
-
-	if (EXT3_I(inode)->i_file_acl) {
-		/* The inode already has an extended attribute block. */
-		bs->bh = sb_bread(sb, EXT3_I(inode)->i_file_acl);
-		error = -EIO;
-		if (!bs->bh)
-			goto cleanup;
-		ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
-			atomic_read(&(bs->bh->b_count)),
-			le32_to_cpu(BHDR(bs->bh)->h_refcount));
-		if (ext3_xattr_check_block(bs->bh)) {
-			ext3_error(sb, __func__,
-				"inode %lu: bad block "E3FSBLK, inode->i_ino,
-				EXT3_I(inode)->i_file_acl);
-			error = -EIO;
-			goto cleanup;
-		}
-		/* Find the named attribute. */
-		bs->s.base = BHDR(bs->bh);
-		bs->s.first = BFIRST(bs->bh);
-		bs->s.end = bs->bh->b_data + bs->bh->b_size;
-		bs->s.here = bs->s.first;
-		error = ext3_xattr_find_entry(&bs->s.here, i->name_index,
-					      i->name, bs->bh->b_size, 1);
-		if (error && error != -ENODATA)
-			goto cleanup;
-		bs->s.not_found = error;
-	}
-	error = 0;
-
-cleanup:
-	return error;
-}
-
-static int
-ext3_xattr_block_set(handle_t *handle, struct inode *inode,
-		     struct ext3_xattr_info *i,
-		     struct ext3_xattr_block_find *bs)
-{
-	struct super_block *sb = inode->i_sb;
-	struct buffer_head *new_bh = NULL;
-	struct ext3_xattr_search *s = &bs->s;
-	struct mb_cache_entry *ce = NULL;
-	int error = 0;
-
-#define header(x) ((struct ext3_xattr_header *)(x))
-
-	if (i->value && i->value_len > sb->s_blocksize)
-		return -ENOSPC;
-	if (s->base) {
-		ce = mb_cache_entry_get(ext3_xattr_cache, bs->bh->b_bdev,
-					bs->bh->b_blocknr);
-		error = ext3_journal_get_write_access(handle, bs->bh);
-		if (error)
-			goto cleanup;
-		lock_buffer(bs->bh);
-
-		if (header(s->base)->h_refcount == cpu_to_le32(1)) {
-			if (ce) {
-				mb_cache_entry_free(ce);
-				ce = NULL;
-			}
-			ea_bdebug(bs->bh, "modifying in-place");
-			error = ext3_xattr_set_entry(i, s);
-			if (!error) {
-				if (!IS_LAST_ENTRY(s->first))
-					ext3_xattr_rehash(header(s->base),
-							  s->here);
-				ext3_xattr_cache_insert(bs->bh);
-			}
-			unlock_buffer(bs->bh);
-			if (error == -EIO)
-				goto bad_block;
-			if (!error)
-				error = ext3_journal_dirty_metadata(handle,
-								    bs->bh);
-			if (error)
-				goto cleanup;
-			goto inserted;
-		} else {
-			int offset = (char *)s->here - bs->bh->b_data;
-
-			unlock_buffer(bs->bh);
-			journal_release_buffer(handle, bs->bh);
-
-			if (ce) {
-				mb_cache_entry_release(ce);
-				ce = NULL;
-			}
-			ea_bdebug(bs->bh, "cloning");
-			s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
-			error = -ENOMEM;
-			if (s->base == NULL)
-				goto cleanup;
-			memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
-			s->first = ENTRY(header(s->base)+1);
-			header(s->base)->h_refcount = cpu_to_le32(1);
-			s->here = ENTRY(s->base + offset);
-			s->end = s->base + bs->bh->b_size;
-		}
-	} else {
-		/* Allocate a buffer where we construct the new block. */
-		s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
-		/* assert(header == s->base) */
-		error = -ENOMEM;
-		if (s->base == NULL)
-			goto cleanup;
-		header(s->base)->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC);
-		header(s->base)->h_blocks = cpu_to_le32(1);
-		header(s->base)->h_refcount = cpu_to_le32(1);
-		s->first = ENTRY(header(s->base)+1);
-		s->here = ENTRY(header(s->base)+1);
-		s->end = s->base + sb->s_blocksize;
-	}
-
-	error = ext3_xattr_set_entry(i, s);
-	if (error == -EIO)
-		goto bad_block;
-	if (error)
-		goto cleanup;
-	if (!IS_LAST_ENTRY(s->first))
-		ext3_xattr_rehash(header(s->base), s->here);
-
-inserted:
-	if (!IS_LAST_ENTRY(s->first)) {
-		new_bh = ext3_xattr_cache_find(inode, header(s->base), &ce);
-		if (new_bh) {
-			/* We found an identical block in the cache. */
-			if (new_bh == bs->bh)
-				ea_bdebug(new_bh, "keeping");
-			else {
-				/* The old block is released after updating
-				   the inode. */
-				error = dquot_alloc_block(inode, 1);
-				if (error)
-					goto cleanup;
-				error = ext3_journal_get_write_access(handle,
-								      new_bh);
-				if (error)
-					goto cleanup_dquot;
-				lock_buffer(new_bh);
-				le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
-				ea_bdebug(new_bh, "reusing; refcount now=%d",
-					le32_to_cpu(BHDR(new_bh)->h_refcount));
-				unlock_buffer(new_bh);
-				error = ext3_journal_dirty_metadata(handle,
-								    new_bh);
-				if (error)
-					goto cleanup_dquot;
-			}
-			mb_cache_entry_release(ce);
-			ce = NULL;
-		} else if (bs->bh && s->base == bs->bh->b_data) {
-			/* We were modifying this block in-place. */
-			ea_bdebug(bs->bh, "keeping this block");
-			new_bh = bs->bh;
-			get_bh(new_bh);
-		} else {
-			/* We need to allocate a new block */
-			ext3_fsblk_t goal = ext3_group_first_block_no(sb,
-						EXT3_I(inode)->i_block_group);
-			ext3_fsblk_t block;
-
-			/*
-			 * Protect us agaist concurrent allocations to the
-			 * same inode from ext3_..._writepage(). Reservation
-			 * code does not expect racing allocations.
-			 */
-			mutex_lock(&EXT3_I(inode)->truncate_mutex);
-			block = ext3_new_block(handle, inode, goal, &error);
-			mutex_unlock(&EXT3_I(inode)->truncate_mutex);
-			if (error)
-				goto cleanup;
-			ea_idebug(inode, "creating block %d", block);
-
-			new_bh = sb_getblk(sb, block);
-			if (unlikely(!new_bh)) {
-getblk_failed:
-				ext3_free_blocks(handle, inode, block, 1);
-				error = -ENOMEM;
-				goto cleanup;
-			}
-			lock_buffer(new_bh);
-			error = ext3_journal_get_create_access(handle, new_bh);
-			if (error) {
-				unlock_buffer(new_bh);
-				goto getblk_failed;
-			}
-			memcpy(new_bh->b_data, s->base, new_bh->b_size);
-			set_buffer_uptodate(new_bh);
-			unlock_buffer(new_bh);
-			ext3_xattr_cache_insert(new_bh);
-			error = ext3_journal_dirty_metadata(handle, new_bh);
-			if (error)
-				goto cleanup;
-		}
-	}
-
-	/* Update the inode. */
-	EXT3_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
-
-	/* Drop the previous xattr block. */
-	if (bs->bh && bs->bh != new_bh)
-		ext3_xattr_release_block(handle, inode, bs->bh);
-	error = 0;
-
-cleanup:
-	if (ce)
-		mb_cache_entry_release(ce);
-	brelse(new_bh);
-	if (!(bs->bh && s->base == bs->bh->b_data))
-		kfree(s->base);
-
-	return error;
-
-cleanup_dquot:
-	dquot_free_block(inode, 1);
-	goto cleanup;
-
-bad_block:
-	ext3_error(inode->i_sb, __func__,
-		   "inode %lu: bad block "E3FSBLK, inode->i_ino,
-		   EXT3_I(inode)->i_file_acl);
-	goto cleanup;
-
-#undef header
-}
-
-struct ext3_xattr_ibody_find {
-	struct ext3_xattr_search s;
-	struct ext3_iloc iloc;
-};
-
-static int
-ext3_xattr_ibody_find(struct inode *inode, struct ext3_xattr_info *i,
-		      struct ext3_xattr_ibody_find *is)
-{
-	struct ext3_xattr_ibody_header *header;
-	struct ext3_inode *raw_inode;
-	int error;
-
-	if (EXT3_I(inode)->i_extra_isize == 0)
-		return 0;
-	raw_inode = ext3_raw_inode(&is->iloc);
-	header = IHDR(inode, raw_inode);
-	is->s.base = is->s.first = IFIRST(header);
-	is->s.here = is->s.first;
-	is->s.end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
-	if (ext3_test_inode_state(inode, EXT3_STATE_XATTR)) {
-		error = ext3_xattr_check_names(IFIRST(header), is->s.end);
-		if (error)
-			return error;
-		/* Find the named attribute. */
-		error = ext3_xattr_find_entry(&is->s.here, i->name_index,
-					      i->name, is->s.end -
-					      (void *)is->s.base, 0);
-		if (error && error != -ENODATA)
-			return error;
-		is->s.not_found = error;
-	}
-	return 0;
-}
-
-static int
-ext3_xattr_ibody_set(handle_t *handle, struct inode *inode,
-		     struct ext3_xattr_info *i,
-		     struct ext3_xattr_ibody_find *is)
-{
-	struct ext3_xattr_ibody_header *header;
-	struct ext3_xattr_search *s = &is->s;
-	int error;
-
-	if (EXT3_I(inode)->i_extra_isize == 0)
-		return -ENOSPC;
-	error = ext3_xattr_set_entry(i, s);
-	if (error)
-		return error;
-	header = IHDR(inode, ext3_raw_inode(&is->iloc));
-	if (!IS_LAST_ENTRY(s->first)) {
-		header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC);
-		ext3_set_inode_state(inode, EXT3_STATE_XATTR);
-	} else {
-		header->h_magic = cpu_to_le32(0);
-		ext3_clear_inode_state(inode, EXT3_STATE_XATTR);
-	}
-	return 0;
-}
-
-/*
- * ext3_xattr_set_handle()
- *
- * Create, replace or remove an extended attribute for this inode.  Value
- * is NULL to remove an existing extended attribute, and non-NULL to
- * either replace an existing extended attribute, or create a new extended
- * attribute. The flags XATTR_REPLACE and XATTR_CREATE
- * specify that an extended attribute must exist and must not exist
- * previous to the call, respectively.
- *
- * Returns 0, or a negative error number on failure.
- */
-int
-ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
-		      const char *name, const void *value, size_t value_len,
-		      int flags)
-{
-	struct ext3_xattr_info i = {
-		.name_index = name_index,
-		.name = name,
-		.value = value,
-		.value_len = value_len,
-
-	};
-	struct ext3_xattr_ibody_find is = {
-		.s = { .not_found = -ENODATA, },
-	};
-	struct ext3_xattr_block_find bs = {
-		.s = { .not_found = -ENODATA, },
-	};
-	int error;
-
-	if (!name)
-		return -EINVAL;
-	if (strlen(name) > 255)
-		return -ERANGE;
-	down_write(&EXT3_I(inode)->xattr_sem);
-	error = ext3_get_inode_loc(inode, &is.iloc);
-	if (error)
-		goto cleanup;
-
-	error = ext3_journal_get_write_access(handle, is.iloc.bh);
-	if (error)
-		goto cleanup;
-
-	if (ext3_test_inode_state(inode, EXT3_STATE_NEW)) {
-		struct ext3_inode *raw_inode = ext3_raw_inode(&is.iloc);
-		memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
-		ext3_clear_inode_state(inode, EXT3_STATE_NEW);
-	}
-
-	error = ext3_xattr_ibody_find(inode, &i, &is);
-	if (error)
-		goto cleanup;
-	if (is.s.not_found)
-		error = ext3_xattr_block_find(inode, &i, &bs);
-	if (error)
-		goto cleanup;
-	if (is.s.not_found && bs.s.not_found) {
-		error = -ENODATA;
-		if (flags & XATTR_REPLACE)
-			goto cleanup;
-		error = 0;
-		if (!value)
-			goto cleanup;
-	} else {
-		error = -EEXIST;
-		if (flags & XATTR_CREATE)
-			goto cleanup;
-	}
-	if (!value) {
-		if (!is.s.not_found)
-			error = ext3_xattr_ibody_set(handle, inode, &i, &is);
-		else if (!bs.s.not_found)
-			error = ext3_xattr_block_set(handle, inode, &i, &bs);
-	} else {
-		error = ext3_xattr_ibody_set(handle, inode, &i, &is);
-		if (!error && !bs.s.not_found) {
-			i.value = NULL;
-			error = ext3_xattr_block_set(handle, inode, &i, &bs);
-		} else if (error == -ENOSPC) {
-			if (EXT3_I(inode)->i_file_acl && !bs.s.base) {
-				error = ext3_xattr_block_find(inode, &i, &bs);
-				if (error)
-					goto cleanup;
-			}
-			error = ext3_xattr_block_set(handle, inode, &i, &bs);
-			if (error)
-				goto cleanup;
-			if (!is.s.not_found) {
-				i.value = NULL;
-				error = ext3_xattr_ibody_set(handle, inode, &i,
-							     &is);
-			}
-		}
-	}
-	if (!error) {
-		ext3_xattr_update_super_block(handle, inode->i_sb);
-		inode->i_ctime = CURRENT_TIME_SEC;
-		error = ext3_mark_iloc_dirty(handle, inode, &is.iloc);
-		/*
-		 * The bh is consumed by ext3_mark_iloc_dirty, even with
-		 * error != 0.
-		 */
-		is.iloc.bh = NULL;
-		if (IS_SYNC(inode))
-			handle->h_sync = 1;
-	}
-
-cleanup:
-	brelse(is.iloc.bh);
-	brelse(bs.bh);
-	up_write(&EXT3_I(inode)->xattr_sem);
-	return error;
-}
-
-/*
- * ext3_xattr_set()
- *
- * Like ext3_xattr_set_handle, but start from an inode. This extended
- * attribute modification is a filesystem transaction by itself.
- *
- * Returns 0, or a negative error number on failure.
- */
-int
-ext3_xattr_set(struct inode *inode, int name_index, const char *name,
-	       const void *value, size_t value_len, int flags)
-{
-	handle_t *handle;
-	int error, retries = 0;
-
-retry:
-	handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS(inode->i_sb));
-	if (IS_ERR(handle)) {
-		error = PTR_ERR(handle);
-	} else {
-		int error2;
-
-		error = ext3_xattr_set_handle(handle, inode, name_index, name,
-					      value, value_len, flags);
-		error2 = ext3_journal_stop(handle);
-		if (error == -ENOSPC &&
-		    ext3_should_retry_alloc(inode->i_sb, &retries))
-			goto retry;
-		if (error == 0)
-			error = error2;
-	}
-
-	return error;
-}
-
-/*
- * ext3_xattr_delete_inode()
- *
- * Free extended attribute resources associated with this inode. This
- * is called immediately before an inode is freed. We have exclusive
- * access to the inode.
- */
-void
-ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
-{
-	struct buffer_head *bh = NULL;
-
-	if (!EXT3_I(inode)->i_file_acl)
-		goto cleanup;
-	bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
-	if (!bh) {
-		ext3_error(inode->i_sb, __func__,
-			"inode %lu: block "E3FSBLK" read error", inode->i_ino,
-			EXT3_I(inode)->i_file_acl);
-		goto cleanup;
-	}
-	if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
-	    BHDR(bh)->h_blocks != cpu_to_le32(1)) {
-		ext3_error(inode->i_sb, __func__,
-			"inode %lu: bad block "E3FSBLK, inode->i_ino,
-			EXT3_I(inode)->i_file_acl);
-		goto cleanup;
-	}
-	ext3_xattr_release_block(handle, inode, bh);
-	EXT3_I(inode)->i_file_acl = 0;
-
-cleanup:
-	brelse(bh);
-}
-
-/*
- * ext3_xattr_put_super()
- *
- * This is called when a file system is unmounted.
- */
-void
-ext3_xattr_put_super(struct super_block *sb)
-{
-	mb_cache_shrink(sb->s_bdev);
-}
-
-/*
- * ext3_xattr_cache_insert()
- *
- * Create a new entry in the extended attribute cache, and insert
- * it unless such an entry is already in the cache.
- *
- * Returns 0, or a negative error number on failure.
- */
-static void
-ext3_xattr_cache_insert(struct buffer_head *bh)
-{
-	__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
-	struct mb_cache_entry *ce;
-	int error;
-
-	ce = mb_cache_entry_alloc(ext3_xattr_cache, GFP_NOFS);
-	if (!ce) {
-		ea_bdebug(bh, "out of memory");
-		return;
-	}
-	error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
-	if (error) {
-		mb_cache_entry_free(ce);
-		if (error == -EBUSY) {
-			ea_bdebug(bh, "already in cache");
-			error = 0;
-		}
-	} else {
-		ea_bdebug(bh, "inserting [%x]", (int)hash);
-		mb_cache_entry_release(ce);
-	}
-}
-
-/*
- * ext3_xattr_cmp()
- *
- * Compare two extended attribute blocks for equality.
- *
- * Returns 0 if the blocks are equal, 1 if they differ, and
- * a negative error number on errors.
- */
-static int
-ext3_xattr_cmp(struct ext3_xattr_header *header1,
-	       struct ext3_xattr_header *header2)
-{
-	struct ext3_xattr_entry *entry1, *entry2;
-
-	entry1 = ENTRY(header1+1);
-	entry2 = ENTRY(header2+1);
-	while (!IS_LAST_ENTRY(entry1)) {
-		if (IS_LAST_ENTRY(entry2))
-			return 1;
-		if (entry1->e_hash != entry2->e_hash ||
-		    entry1->e_name_index != entry2->e_name_index ||
-		    entry1->e_name_len != entry2->e_name_len ||
-		    entry1->e_value_size != entry2->e_value_size ||
-		    memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
-			return 1;
-		if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
-			return -EIO;
-		if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
-			   (char *)header2 + le16_to_cpu(entry2->e_value_offs),
-			   le32_to_cpu(entry1->e_value_size)))
-			return 1;
-
-		entry1 = EXT3_XATTR_NEXT(entry1);
-		entry2 = EXT3_XATTR_NEXT(entry2);
-	}
-	if (!IS_LAST_ENTRY(entry2))
-		return 1;
-	return 0;
-}
-
-/*
- * ext3_xattr_cache_find()
- *
- * Find an identical extended attribute block.
- *
- * Returns a pointer to the block found, or NULL if such a block was
- * not found or an error occurred.
- */
-static struct buffer_head *
-ext3_xattr_cache_find(struct inode *inode, struct ext3_xattr_header *header,
-		      struct mb_cache_entry **pce)
-{
-	__u32 hash = le32_to_cpu(header->h_hash);
-	struct mb_cache_entry *ce;
-
-	if (!header->h_hash)
-		return NULL;  /* never share */
-	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
-again:
-	ce = mb_cache_entry_find_first(ext3_xattr_cache, inode->i_sb->s_bdev,
-				       hash);
-	while (ce) {
-		struct buffer_head *bh;
-
-		if (IS_ERR(ce)) {
-			if (PTR_ERR(ce) == -EAGAIN)
-				goto again;
-			break;
-		}
-		bh = sb_bread(inode->i_sb, ce->e_block);
-		if (!bh) {
-			ext3_error(inode->i_sb, __func__,
-				"inode %lu: block %lu read error",
-				inode->i_ino, (unsigned long) ce->e_block);
-		} else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
-				EXT3_XATTR_REFCOUNT_MAX) {
-			ea_idebug(inode, "block %lu refcount %d>=%d",
-				  (unsigned long) ce->e_block,
-				  le32_to_cpu(BHDR(bh)->h_refcount),
-					  EXT3_XATTR_REFCOUNT_MAX);
-		} else if (ext3_xattr_cmp(header, BHDR(bh)) == 0) {
-			*pce = ce;
-			return bh;
-		}
-		brelse(bh);
-		ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
-	}
-	return NULL;
-}
-
-#define NAME_HASH_SHIFT 5
-#define VALUE_HASH_SHIFT 16
-
-/*
- * ext3_xattr_hash_entry()
- *
- * Compute the hash of an extended attribute.
- */
-static inline void ext3_xattr_hash_entry(struct ext3_xattr_header *header,
-					 struct ext3_xattr_entry *entry)
-{
-	__u32 hash = 0;
-	char *name = entry->e_name;
-	int n;
-
-	for (n=0; n < entry->e_name_len; n++) {
-		hash = (hash << NAME_HASH_SHIFT) ^
-		       (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
-		       *name++;
-	}
-
-	if (entry->e_value_block == 0 && entry->e_value_size != 0) {
-		__le32 *value = (__le32 *)((char *)header +
-			le16_to_cpu(entry->e_value_offs));
-		for (n = (le32_to_cpu(entry->e_value_size) +
-		     EXT3_XATTR_ROUND) >> EXT3_XATTR_PAD_BITS; n; n--) {
-			hash = (hash << VALUE_HASH_SHIFT) ^
-			       (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
-			       le32_to_cpu(*value++);
-		}
-	}
-	entry->e_hash = cpu_to_le32(hash);
-}
-
-#undef NAME_HASH_SHIFT
-#undef VALUE_HASH_SHIFT
-
-#define BLOCK_HASH_SHIFT 16
-
-/*
- * ext3_xattr_rehash()
- *
- * Re-compute the extended attribute hash value after an entry has changed.
- */
-static void ext3_xattr_rehash(struct ext3_xattr_header *header,
-			      struct ext3_xattr_entry *entry)
-{
-	struct ext3_xattr_entry *here;
-	__u32 hash = 0;
-
-	ext3_xattr_hash_entry(header, entry);
-	here = ENTRY(header+1);
-	while (!IS_LAST_ENTRY(here)) {
-		if (!here->e_hash) {
-			/* Block is not shared if an entry's hash value == 0 */
-			hash = 0;
-			break;
-		}
-		hash = (hash << BLOCK_HASH_SHIFT) ^
-		       (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
-		       le32_to_cpu(here->e_hash);
-		here = EXT3_XATTR_NEXT(here);
-	}
-	header->h_hash = cpu_to_le32(hash);
-}
-
-#undef BLOCK_HASH_SHIFT
-
-int __init
-init_ext3_xattr(void)
-{
-	ext3_xattr_cache = mb_cache_create("ext3_xattr", 6);
-	if (!ext3_xattr_cache)
-		return -ENOMEM;
-	return 0;
-}
-
-void
-exit_ext3_xattr(void)
-{
-	if (ext3_xattr_cache)
-		mb_cache_destroy(ext3_xattr_cache);
-	ext3_xattr_cache = NULL;
-}
diff --git a/fs/ext3/xattr.h b/fs/ext3/xattr.h
deleted file mode 100644
index 32e93eb..0000000
--- a/fs/ext3/xattr.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
-  File: fs/ext3/xattr.h
-
-  On-disk format of extended attributes for the ext3 filesystem.
-
-  (C) 2001 Andreas Gruenbacher, <a.gruenbacher@computer.org>
-*/
-
-#include <linux/xattr.h>
-
-/* Magic value in attribute blocks */
-#define EXT3_XATTR_MAGIC		0xEA020000
-
-/* Maximum number of references to one attribute block */
-#define EXT3_XATTR_REFCOUNT_MAX		1024
-
-/* Name indexes */
-#define EXT3_XATTR_INDEX_USER			1
-#define EXT3_XATTR_INDEX_POSIX_ACL_ACCESS	2
-#define EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT	3
-#define EXT3_XATTR_INDEX_TRUSTED		4
-#define	EXT3_XATTR_INDEX_LUSTRE			5
-#define EXT3_XATTR_INDEX_SECURITY	        6
-
-struct ext3_xattr_header {
-	__le32	h_magic;	/* magic number for identification */
-	__le32	h_refcount;	/* reference count */
-	__le32	h_blocks;	/* number of disk blocks used */
-	__le32	h_hash;		/* hash value of all attributes */
-	__u32	h_reserved[4];	/* zero right now */
-};
-
-struct ext3_xattr_ibody_header {
-	__le32	h_magic;	/* magic number for identification */
-};
-
-struct ext3_xattr_entry {
-	__u8	e_name_len;	/* length of name */
-	__u8	e_name_index;	/* attribute name index */
-	__le16	e_value_offs;	/* offset in disk block of value */
-	__le32	e_value_block;	/* disk block attribute is stored on (n/i) */
-	__le32	e_value_size;	/* size of attribute value */
-	__le32	e_hash;		/* hash value of name and value */
-	char	e_name[0];	/* attribute name */
-};
-
-#define EXT3_XATTR_PAD_BITS		2
-#define EXT3_XATTR_PAD		(1<<EXT3_XATTR_PAD_BITS)
-#define EXT3_XATTR_ROUND		(EXT3_XATTR_PAD-1)
-#define EXT3_XATTR_LEN(name_len) \
-	(((name_len) + EXT3_XATTR_ROUND + \
-	sizeof(struct ext3_xattr_entry)) & ~EXT3_XATTR_ROUND)
-#define EXT3_XATTR_NEXT(entry) \
-	( (struct ext3_xattr_entry *)( \
-	  (char *)(entry) + EXT3_XATTR_LEN((entry)->e_name_len)) )
-#define EXT3_XATTR_SIZE(size) \
-	(((size) + EXT3_XATTR_ROUND) & ~EXT3_XATTR_ROUND)
-
-# ifdef CONFIG_EXT3_FS_XATTR
-
-extern const struct xattr_handler ext3_xattr_user_handler;
-extern const struct xattr_handler ext3_xattr_trusted_handler;
-extern const struct xattr_handler ext3_xattr_security_handler;
-
-extern ssize_t ext3_listxattr(struct dentry *, char *, size_t);
-
-extern int ext3_xattr_get(struct inode *, int, const char *, void *, size_t);
-extern int ext3_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
-extern int ext3_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
-
-extern void ext3_xattr_delete_inode(handle_t *, struct inode *);
-extern void ext3_xattr_put_super(struct super_block *);
-
-extern int init_ext3_xattr(void);
-extern void exit_ext3_xattr(void);
-
-extern const struct xattr_handler *ext3_xattr_handlers[];
-
-# else  /* CONFIG_EXT3_FS_XATTR */
-
-static inline int
-ext3_xattr_get(struct inode *inode, int name_index, const char *name,
-	       void *buffer, size_t size, int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int
-ext3_xattr_set(struct inode *inode, int name_index, const char *name,
-	       const void *value, size_t size, int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline int
-ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
-	       const char *name, const void *value, size_t size, int flags)
-{
-	return -EOPNOTSUPP;
-}
-
-static inline void
-ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
-{
-}
-
-static inline void
-ext3_xattr_put_super(struct super_block *sb)
-{
-}
-
-static inline int
-init_ext3_xattr(void)
-{
-	return 0;
-}
-
-static inline void
-exit_ext3_xattr(void)
-{
-}
-
-#define ext3_xattr_handlers	NULL
-
-# endif  /* CONFIG_EXT3_FS_XATTR */
-
-#ifdef CONFIG_EXT3_FS_SECURITY
-extern int ext3_init_security(handle_t *handle, struct inode *inode,
-			      struct inode *dir, const struct qstr *qstr);
-#else
-static inline int ext3_init_security(handle_t *handle, struct inode *inode,
-				     struct inode *dir, const struct qstr *qstr)
-{
-	return 0;
-}
-#endif
diff --git a/fs/ext3/xattr_security.c b/fs/ext3/xattr_security.c
deleted file mode 100644
index c9506d5..0000000
--- a/fs/ext3/xattr_security.c
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * linux/fs/ext3/xattr_security.c
- * Handler for storing security labels as extended attributes.
- */
-
-#include <linux/security.h>
-#include "ext3.h"
-#include "xattr.h"
-
-static size_t
-ext3_xattr_security_list(struct dentry *dentry, char *list, size_t list_size,
-			 const char *name, size_t name_len, int type)
-{
-	const size_t prefix_len = XATTR_SECURITY_PREFIX_LEN;
-	const size_t total_len = prefix_len + name_len + 1;
-
-
-	if (list && total_len <= list_size) {
-		memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
-		memcpy(list+prefix_len, name, name_len);
-		list[prefix_len + name_len] = '\0';
-	}
-	return total_len;
-}
-
-static int
-ext3_xattr_security_get(struct dentry *dentry, const char *name,
-		void *buffer, size_t size, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_SECURITY,
-			      name, buffer, size);
-}
-
-static int
-ext3_xattr_security_set(struct dentry *dentry, const char *name,
-		const void *value, size_t size, int flags, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_SECURITY,
-			      name, value, size, flags);
-}
-
-static int ext3_initxattrs(struct inode *inode,
-			   const struct xattr *xattr_array,
-			   void *fs_info)
-{
-	const struct xattr *xattr;
-	handle_t *handle = fs_info;
-	int err = 0;
-
-	for (xattr = xattr_array; xattr->name != NULL; xattr++) {
-		err = ext3_xattr_set_handle(handle, inode,
-					    EXT3_XATTR_INDEX_SECURITY,
-					    xattr->name, xattr->value,
-					    xattr->value_len, 0);
-		if (err < 0)
-			break;
-	}
-	return err;
-}
-
-int
-ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
-		   const struct qstr *qstr)
-{
-	return security_inode_init_security(inode, dir, qstr,
-					    &ext3_initxattrs, handle);
-}
-
-const struct xattr_handler ext3_xattr_security_handler = {
-	.prefix	= XATTR_SECURITY_PREFIX,
-	.list	= ext3_xattr_security_list,
-	.get	= ext3_xattr_security_get,
-	.set	= ext3_xattr_security_set,
-};
diff --git a/fs/ext3/xattr_trusted.c b/fs/ext3/xattr_trusted.c
deleted file mode 100644
index 206cc66..0000000
--- a/fs/ext3/xattr_trusted.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * linux/fs/ext3/xattr_trusted.c
- * Handler for trusted extended attributes.
- *
- * Copyright (C) 2003 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
- */
-
-#include "ext3.h"
-#include "xattr.h"
-
-static size_t
-ext3_xattr_trusted_list(struct dentry *dentry, char *list, size_t list_size,
-		const char *name, size_t name_len, int type)
-{
-	const size_t prefix_len = XATTR_TRUSTED_PREFIX_LEN;
-	const size_t total_len = prefix_len + name_len + 1;
-
-	if (!capable(CAP_SYS_ADMIN))
-		return 0;
-
-	if (list && total_len <= list_size) {
-		memcpy(list, XATTR_TRUSTED_PREFIX, prefix_len);
-		memcpy(list+prefix_len, name, name_len);
-		list[prefix_len + name_len] = '\0';
-	}
-	return total_len;
-}
-
-static int
-ext3_xattr_trusted_get(struct dentry *dentry, const char *name,
-		       void *buffer, size_t size, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_TRUSTED,
-			      name, buffer, size);
-}
-
-static int
-ext3_xattr_trusted_set(struct dentry *dentry, const char *name,
-		const void *value, size_t size, int flags, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_TRUSTED, name,
-			      value, size, flags);
-}
-
-const struct xattr_handler ext3_xattr_trusted_handler = {
-	.prefix	= XATTR_TRUSTED_PREFIX,
-	.list	= ext3_xattr_trusted_list,
-	.get	= ext3_xattr_trusted_get,
-	.set	= ext3_xattr_trusted_set,
-};
diff --git a/fs/ext3/xattr_user.c b/fs/ext3/xattr_user.c
deleted file mode 100644
index 021508a..0000000
--- a/fs/ext3/xattr_user.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * linux/fs/ext3/xattr_user.c
- * Handler for extended user attributes.
- *
- * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
- */
-
-#include "ext3.h"
-#include "xattr.h"
-
-static size_t
-ext3_xattr_user_list(struct dentry *dentry, char *list, size_t list_size,
-		const char *name, size_t name_len, int type)
-{
-	const size_t prefix_len = XATTR_USER_PREFIX_LEN;
-	const size_t total_len = prefix_len + name_len + 1;
-
-	if (!test_opt(dentry->d_sb, XATTR_USER))
-		return 0;
-
-	if (list && total_len <= list_size) {
-		memcpy(list, XATTR_USER_PREFIX, prefix_len);
-		memcpy(list+prefix_len, name, name_len);
-		list[prefix_len + name_len] = '\0';
-	}
-	return total_len;
-}
-
-static int
-ext3_xattr_user_get(struct dentry *dentry, const char *name, void *buffer,
-		size_t size, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	if (!test_opt(dentry->d_sb, XATTR_USER))
-		return -EOPNOTSUPP;
-	return ext3_xattr_get(d_inode(dentry), EXT3_XATTR_INDEX_USER,
-			      name, buffer, size);
-}
-
-static int
-ext3_xattr_user_set(struct dentry *dentry, const char *name,
-		const void *value, size_t size, int flags, int type)
-{
-	if (strcmp(name, "") == 0)
-		return -EINVAL;
-	if (!test_opt(dentry->d_sb, XATTR_USER))
-		return -EOPNOTSUPP;
-	return ext3_xattr_set(d_inode(dentry), EXT3_XATTR_INDEX_USER,
-			      name, value, size, flags);
-}
-
-const struct xattr_handler ext3_xattr_user_handler = {
-	.prefix	= XATTR_USER_PREFIX,
-	.list	= ext3_xattr_user_list,
-	.get	= ext3_xattr_user_get,
-	.set	= ext3_xattr_user_set,
-};
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index bf8bc8a..47728da 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -1,5 +1,38 @@
+# Ext3 configs are here for backward compatibility with old configs which may
+# have EXT3_FS set but not EXT4_FS set and thus would result in non-bootable
+# kernels after the removal of ext3 driver.
+config EXT3_FS
+	tristate "The Extended 3 (ext3) filesystem"
+	# These must match EXT4_FS selects...
+	select EXT4_FS
+	select JBD2
+	select CRC16
+	select CRYPTO
+	select CRYPTO_CRC32C
+	help
+	  This config option is here only for backward compatibility. ext3
+	  filesystem is now handled by the ext4 driver.
+
+config EXT3_FS_POSIX_ACL
+	bool "Ext3 POSIX Access Control Lists"
+	depends on EXT3_FS
+	select EXT4_FS_POSIX_ACL
+	select FS_POSIX_ACL
+	help
+	  This config option is here only for backward compatibility. ext3
+	  filesystem is now handled by the ext4 driver.
+
+config EXT3_FS_SECURITY
+	bool "Ext3 Security Labels"
+	depends on EXT3_FS
+	select EXT4_FS_SECURITY
+	help
+	  This config option is here only for backward compatibility. ext3
+	  filesystem is now handled by the ext4 driver.
+
 config EXT4_FS
 	tristate "The Extended 4 (ext4) filesystem"
+	# Please update EXT3_FS selects when changing these
 	select JBD2
 	select CRC16
 	select CRYPTO
@@ -16,26 +49,27 @@
 	  up fsck time.  For more information, please see the web pages at
 	  http://ext4.wiki.kernel.org.
 
-	  The ext4 filesystem will support mounting an ext3
-	  filesystem; while there will be some performance gains from
-	  the delayed allocation and inode table readahead, the best
-	  performance gains will require enabling ext4 features in the
-	  filesystem, or formatting a new filesystem as an ext4
-	  filesystem initially.
+	  The ext4 filesystem supports mounting an ext3 filesystem; while there
+	  are some performance gains from the delayed allocation and inode
+	  table readahead, the best performance gains require enabling ext4
+	  features in the filesystem using tune2fs, or formatting a new
+	  filesystem as an ext4 filesystem initially. Without explicit enabling
+	  of ext4 features, the on disk filesystem format stays fully backward
+	  compatible.
 
 	  To compile this file system support as a module, choose M here. The
 	  module will be called ext4.
 
 	  If unsure, say N.
 
-config EXT4_USE_FOR_EXT23
+config EXT4_USE_FOR_EXT2
 	bool "Use ext4 for ext2/ext3 file systems"
 	depends on EXT4_FS
-	depends on EXT3_FS=n || EXT2_FS=n
+	depends on EXT2_FS=n
 	default y
 	help
-	  Allow the ext4 file system driver code to be used for ext2 or
-	  ext3 file system mounts.  This allows users to reduce their
+	  Allow the ext4 file system driver code to be used for ext2
+	  file system mounts.  This allows users to reduce their
 	  compiled kernel size by using one file system driver for
 	  ext2, ext3, and ext4 file systems.
 
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
index 7dc4eb5..847f919 100644
--- a/fs/ext4/crypto_fname.c
+++ b/fs/ext4/crypto_fname.c
@@ -19,7 +19,6 @@
 #include <linux/gfp.h>
 #include <linux/kernel.h>
 #include <linux/key.h>
-#include <linux/key.h>
 #include <linux/list.h>
 #include <linux/mempool.h>
 #include <linux/random.h>
@@ -329,6 +328,10 @@
 			return oname->len;
 		}
 	}
+	if (iname->len < EXT4_CRYPTO_BLOCK_SIZE) {
+		EXT4_ERROR_INODE(inode, "encrypted inode too small");
+		return -EUCLEAN;
+	}
 	if (EXT4_I(inode)->i_crypt_info)
 		return ext4_fname_decrypt(inode, iname, oname);
 
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
index 442d24e..1d510c1 100644
--- a/fs/ext4/crypto_key.c
+++ b/fs/ext4/crypto_key.c
@@ -30,7 +30,7 @@
 
 /**
  * ext4_derive_key_aes() - Derive a key using AES-128-ECB
- * @deriving_key: Encryption key used for derivatio.
+ * @deriving_key: Encryption key used for derivation.
  * @source_key:   Source key to which to apply derivation.
  * @derived_key:  Derived key.
  *
@@ -220,6 +220,8 @@
 	BUG_ON(master_key->size != EXT4_AES_256_XTS_KEY_SIZE);
 	res = ext4_derive_key_aes(ctx.nonce, master_key->raw,
 				  raw_key);
+	if (res)
+		goto out;
 got_key:
 	ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0);
 	if (!ctfm || IS_ERR(ctfm)) {
diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c
index 02c4e5d..a640ec2 100644
--- a/fs/ext4/crypto_policy.c
+++ b/fs/ext4/crypto_policy.c
@@ -12,6 +12,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 
+#include "ext4_jbd2.h"
 #include "ext4.h"
 #include "xattr.h"
 
@@ -49,7 +50,8 @@
 	struct inode *inode, const struct ext4_encryption_policy *policy)
 {
 	struct ext4_encryption_context ctx;
-	int res = 0;
+	handle_t *handle;
+	int res, res2;
 
 	res = ext4_convert_inline_data(inode);
 	if (res)
@@ -78,11 +80,22 @@
 	BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE);
 	get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
 
+	handle = ext4_journal_start(inode, EXT4_HT_MISC,
+				    ext4_jbd2_credits_xattr(inode));
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
 	res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
 			     EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
 			     sizeof(ctx), 0);
-	if (!res)
+	if (!res) {
 		ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+		res = ext4_mark_inode_dirty(handle, inode);
+		if (res)
+			EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
+	}
+	res2 = ext4_journal_stop(handle);
+	if (!res)
+		res = res2;
 	return res;
 }
 
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index f5e9f04..32071f5 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -187,7 +187,7 @@
 } ext4_io_end_t;
 
 struct ext4_io_submit {
-	int			io_op;
+	struct writeback_control *io_wbc;
 	struct bio		*io_bio;
 	ext4_io_end_t		*io_end;
 	sector_t		io_next_block;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 173c1ae..619bfc1 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -721,7 +721,7 @@
 	struct ext4_group_desc *gdp = NULL;
 	struct ext4_inode_info *ei;
 	struct ext4_sb_info *sbi;
-	int ret2, err = 0;
+	int ret2, err;
 	struct inode *ret;
 	ext4_group_t i;
 	ext4_group_t flex_group;
@@ -769,7 +769,9 @@
 		inode->i_gid = dir->i_gid;
 	} else
 		inode_init_owner(inode, dir, mode);
-	dquot_initialize(inode);
+	err = dquot_initialize(inode);
+	if (err)
+		goto out;
 
 	if (!goal)
 		goal = sbi->s_inode_goal;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cecf9aa..29f1af7 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4661,8 +4661,11 @@
 	if (error)
 		return error;
 
-	if (is_quota_modification(inode, attr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, attr)) {
+		error = dquot_initialize(inode);
+		if (error)
+			return error;
+	}
 	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
 	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
 		handle_t *handle;
@@ -4725,6 +4728,14 @@
 				error = ext4_orphan_add(handle, inode);
 				orphan = 1;
 			}
+			/*
+			 * Update c/mtime on truncate up, ext4_truncate() will
+			 * update c/mtime in shrink case below
+			 */
+			if (!shrink) {
+				inode->i_mtime = ext4_current_time(inode);
+				inode->i_ctime = inode->i_mtime;
+			}
 			down_write(&EXT4_I(inode)->i_data_sem);
 			EXT4_I(inode)->i_disksize = attr->ia_size;
 			rc = ext4_mark_inode_dirty(handle, inode);
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 8313ca3..6eb1a61 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -69,6 +69,7 @@
 			  ext4_fsblk_t mmp_block)
 {
 	struct mmp_struct *mmp;
+	int ret;
 
 	if (*bh)
 		clear_buffer_uptodate(*bh);
@@ -76,33 +77,36 @@
 	/* This would be sb_bread(sb, mmp_block), except we need to be sure
 	 * that the MD RAID device cache has been bypassed, and that the read
 	 * is not blocked in the elevator. */
-	if (!*bh)
+	if (!*bh) {
 		*bh = sb_getblk(sb, mmp_block);
-	if (!*bh)
-		return -ENOMEM;
-	if (*bh) {
-		get_bh(*bh);
-		lock_buffer(*bh);
-		(*bh)->b_end_io = end_buffer_read_sync;
-		submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh);
-		wait_on_buffer(*bh);
-		if (!buffer_uptodate(*bh)) {
-			brelse(*bh);
-			*bh = NULL;
+		if (!*bh) {
+			ret = -ENOMEM;
+			goto warn_exit;
 		}
 	}
-	if (unlikely(!*bh)) {
-		ext4_warning(sb, "Error while reading MMP block %llu",
-			     mmp_block);
-		return -EIO;
+
+	get_bh(*bh);
+	lock_buffer(*bh);
+	(*bh)->b_end_io = end_buffer_read_sync;
+	submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh);
+	wait_on_buffer(*bh);
+	if (!buffer_uptodate(*bh)) {
+		brelse(*bh);
+		*bh = NULL;
+		ret = -EIO;
+		goto warn_exit;
 	}
 
 	mmp = (struct mmp_struct *)((*bh)->b_data);
-	if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC ||
-	    !ext4_mmp_csum_verify(sb, mmp))
-		return -EINVAL;
+	if (le32_to_cpu(mmp->mmp_magic) == EXT4_MMP_MAGIC &&
+	    ext4_mmp_csum_verify(sb, mmp))
+		return 0;
+	ret = -EINVAL;
 
-	return 0;
+warn_exit:
+	ext4_warning(sb, "Error %d while reading MMP block %llu",
+		     ret, mmp_block);
+	return ret;
 }
 
 /*
@@ -111,7 +115,7 @@
 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
 		    const char *function, unsigned int line, const char *msg)
 {
-	__ext4_warning(sb, function, line, msg);
+	__ext4_warning(sb, function, line, "%s", msg);
 	__ext4_warning(sb, function, line,
 		       "MMP failure info: last update time: %llu, last update "
 		       "node: %s, last update device: %s\n",
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 011dcfb..9f61e76 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2436,7 +2436,9 @@
 	struct inode *inode;
 	int err, credits, retries = 0;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
 		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
@@ -2470,7 +2472,9 @@
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
 		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
@@ -2499,7 +2503,9 @@
 	struct inode *inode;
 	int err, retries = 0;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 retry:
 	inode = ext4_new_inode_start_handle(dir, mode,
@@ -2612,7 +2618,9 @@
 	if (EXT4_DIR_LINK_MAX(dir))
 		return -EMLINK;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 	credits = (EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
 		   EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3);
@@ -2910,8 +2918,12 @@
 
 	/* Initialize quotas before so that eventual writes go in
 	 * separate transaction */
-	dquot_initialize(dir);
-	dquot_initialize(d_inode(dentry));
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(d_inode(dentry));
+	if (retval)
+		return retval;
 
 	retval = -ENOENT;
 	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
@@ -2980,8 +2992,12 @@
 	trace_ext4_unlink_enter(dir, dentry);
 	/* Initialize quotas before so that eventual writes go
 	 * in separate transaction */
-	dquot_initialize(dir);
-	dquot_initialize(d_inode(dentry));
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(d_inode(dentry));
+	if (retval)
+		return retval;
 
 	retval = -ENOENT;
 	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
@@ -3066,7 +3082,9 @@
 		goto err_free_sd;
 	}
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		goto err_free_sd;
 
 	if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
 		/*
@@ -3197,7 +3215,9 @@
 	if (ext4_encrypted_inode(dir) &&
 	    !ext4_is_child_context_consistent_with_parent(dir, inode))
 		return -EPERM;
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err)
+		return err;
 
 retry:
 	handle = ext4_journal_start(dir, EXT4_HT_DIR,
@@ -3476,13 +3496,20 @@
 	int credits;
 	u8 old_file_type;
 
-	dquot_initialize(old.dir);
-	dquot_initialize(new.dir);
+	retval = dquot_initialize(old.dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(new.dir);
+	if (retval)
+		return retval;
 
 	/* Initialize quotas before so that eventual writes go
 	 * in separate transaction */
-	if (new.inode)
-		dquot_initialize(new.inode);
+	if (new.inode) {
+		retval = dquot_initialize(new.inode);
+		if (retval)
+			return retval;
+	}
 
 	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
 	if (IS_ERR(old.bh))
@@ -3678,8 +3705,12 @@
 							   new.inode)))
 		return -EPERM;
 
-	dquot_initialize(old.dir);
-	dquot_initialize(new.dir);
+	retval = dquot_initialize(old.dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(new.dir);
+	if (retval)
+		return retval;
 
 	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name,
 				 &old.de, &old.inlined);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 5602450..84ba4d2 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -61,7 +61,6 @@
 static void ext4_finish_bio(struct bio *bio)
 {
 	int i;
-	int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct bio_vec *bvec;
 
 	bio_for_each_segment_all(bvec, bio, i) {
@@ -88,7 +87,7 @@
 		}
 #endif
 
-		if (error) {
+		if (bio->bi_error) {
 			SetPageError(page);
 			set_bit(AS_EIO, &page->mapping->flags);
 		}
@@ -107,7 +106,7 @@
 				continue;
 			}
 			clear_buffer_async_write(bh);
-			if (error)
+			if (bio->bi_error)
 				buffer_io_error(bh);
 		} while ((bh = bh->b_this_page) != head);
 		bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
@@ -310,27 +309,25 @@
 }
 
 /* BIO completion function for page writeback */
-static void ext4_end_bio(struct bio *bio, int error)
+static void ext4_end_bio(struct bio *bio)
 {
 	ext4_io_end_t *io_end = bio->bi_private;
 	sector_t bi_sector = bio->bi_iter.bi_sector;
 
 	BUG_ON(!io_end);
 	bio->bi_end_io = NULL;
-	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
-		error = 0;
 
-	if (error) {
+	if (bio->bi_error) {
 		struct inode *inode = io_end->inode;
 
 		ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
 			     "(offset %llu size %ld starting block %llu)",
-			     error, inode->i_ino,
+			     bio->bi_error, inode->i_ino,
 			     (unsigned long long) io_end->offset,
 			     (long) io_end->size,
 			     (unsigned long long)
 			     bi_sector >> (inode->i_blkbits - 9));
-		mapping_set_error(inode->i_mapping, error);
+		mapping_set_error(inode->i_mapping, bio->bi_error);
 	}
 
 	if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
@@ -357,8 +354,10 @@
 	struct bio *bio = io->io_bio;
 
 	if (bio) {
+		int io_op = io->io_wbc->sync_mode == WB_SYNC_ALL ?
+			    WRITE_SYNC : WRITE;
 		bio_get(io->io_bio);
-		submit_bio(io->io_op, io->io_bio);
+		submit_bio(io_op, io->io_bio);
 		bio_put(io->io_bio);
 	}
 	io->io_bio = NULL;
@@ -367,7 +366,7 @@
 void ext4_io_submit_init(struct ext4_io_submit *io,
 			 struct writeback_control *wbc)
 {
-	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
+	io->io_wbc = wbc;
 	io->io_bio = NULL;
 	io->io_end = NULL;
 }
@@ -375,12 +374,12 @@
 static int io_submit_init_bio(struct ext4_io_submit *io,
 			      struct buffer_head *bh)
 {
-	int nvecs = bio_get_nr_vecs(bh->b_bdev);
 	struct bio *bio;
 
-	bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
+	bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
 	if (!bio)
 		return -ENOMEM;
+	wbc_init_bio(io->io_wbc, bio);
 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 	bio->bi_bdev = bh->b_bdev;
 	bio->bi_end_io = ext4_end_bio;
@@ -409,6 +408,7 @@
 	ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
 	if (ret != bh->b_size)
 		goto submit_and_retry;
+	wbc_account_io(io->io_wbc, page, bh->b_size);
 	io->io_next_block++;
 	return 0;
 }
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index ec3ef93..e26803f 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -98,7 +98,7 @@
  * status of that page is hard.  See end_buffer_async_read() for the details.
  * There is no point in duplicating all that complexity.
  */
-static void mpage_end_io(struct bio *bio, int err)
+static void mpage_end_io(struct bio *bio)
 {
 	struct bio_vec *bv;
 	int i;
@@ -106,7 +106,7 @@
 	if (ext4_bio_encrypted(bio)) {
 		struct ext4_crypto_ctx *ctx = bio->bi_private;
 
-		if (err) {
+		if (bio->bi_error) {
 			ext4_release_crypto_ctx(ctx);
 		} else {
 			INIT_WORK(&ctx->r.work, completion_pages);
@@ -118,7 +118,7 @@
 	bio_for_each_segment_all(bv, bio, i) {
 		struct page *page = bv->bv_page;
 
-		if (!err) {
+		if (!bio->bi_error) {
 			SetPageUptodate(page);
 		} else {
 			ClearPageUptodate(page);
@@ -284,7 +284,7 @@
 					goto set_error_page;
 			}
 			bio = bio_alloc(GFP_KERNEL,
-				min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
+				min_t(int, nr_pages, BIO_MAX_PAGES));
 			if (!bio) {
 				if (ctx)
 					ext4_release_crypto_ctx(ctx);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 58987b5..ee38782 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -60,6 +60,7 @@
 static struct mutex ext4_li_mtx;
 static struct ext4_features *ext4_feat;
 static int ext4_mballoc_ready;
+static struct ratelimit_state ext4_mount_msg_ratelimit;
 
 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
 			     unsigned long journal_devnum);
@@ -84,7 +85,7 @@
 static void ext4_clear_request_list(void);
 static int ext4_reserve_clusters(struct ext4_sb_info *, ext4_fsblk_t);
 
-#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
+#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
 static struct file_system_type ext2_fs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "ext2",
@@ -100,7 +101,6 @@
 #endif
 
 
-#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
 static struct file_system_type ext3_fs_type = {
 	.owner		= THIS_MODULE,
 	.name		= "ext3",
@@ -111,9 +111,6 @@
 MODULE_ALIAS_FS("ext3");
 MODULE_ALIAS("ext3");
 #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
-#else
-#define IS_EXT3_SB(sb) (0)
-#endif
 
 static int ext4_verify_csum_type(struct super_block *sb,
 				 struct ext4_super_block *es)
@@ -325,6 +322,22 @@
 	ext4_commit_super(sb, 1);
 }
 
+/*
+ * The del_gendisk() function uninitializes the disk-specific data
+ * structures, including the bdi structure, without telling anyone
+ * else.  Once this happens, any attempt to call mark_buffer_dirty()
+ * (for example, by ext4_commit_super), will cause a kernel OOPS.
+ * This is a kludge to prevent these oops until we can put in a proper
+ * hook in del_gendisk() to inform the VFS and file system layers.
+ */
+static int block_device_ejected(struct super_block *sb)
+{
+	struct inode *bd_inode = sb->s_bdev->bd_inode;
+	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
+
+	return bdi->dev == NULL;
+}
+
 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
 {
 	struct super_block		*sb = journal->j_private;
@@ -1394,9 +1407,9 @@
 	{Opt_stripe, 0, MOPT_GTE0},
 	{Opt_resuid, 0, MOPT_GTE0},
 	{Opt_resgid, 0, MOPT_GTE0},
-	{Opt_journal_dev, 0, MOPT_GTE0},
-	{Opt_journal_path, 0, MOPT_STRING},
-	{Opt_journal_ioprio, 0, MOPT_GTE0},
+	{Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0},
+	{Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING},
+	{Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0},
 	{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
 	{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
 	{Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA,
@@ -3643,6 +3656,8 @@
 		}
 		if (test_opt(sb, DELALLOC))
 			clear_opt(sb, DELALLOC);
+	} else {
+		sb->s_iflags |= SB_I_CGROUPWB;
 	}
 
 	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
@@ -4275,9 +4290,10 @@
 				 "the device does not support discard");
 	}
 
-	ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
-		 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
-		 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
+	if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
+		ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
+			 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
+			 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
 
 	if (es->s_error_count)
 		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
@@ -4617,7 +4633,7 @@
 	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
 	int error = 0;
 
-	if (!sbh)
+	if (!sbh || block_device_ejected(sb))
 		return error;
 	if (buffer_write_io_error(sbh)) {
 		/*
@@ -4665,7 +4681,8 @@
 	ext4_superblock_csum_set(sb);
 	mark_buffer_dirty(sbh);
 	if (sync) {
-		error = sync_dirty_buffer(sbh);
+		error = __sync_dirty_buffer(sbh,
+			test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
 		if (error)
 			return error;
 
@@ -4833,10 +4850,11 @@
 		error = jbd2_journal_flush(journal);
 		if (error < 0)
 			goto out;
+
+		/* Journal blocked and flushed, clear needs_recovery flag. */
+		EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
 	}
 
-	/* Journal blocked and flushed, clear needs_recovery flag. */
-	EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
 	error = ext4_commit_super(sb, 1);
 out:
 	if (journal)
@@ -4854,8 +4872,11 @@
 	if (sb->s_flags & MS_RDONLY)
 		return 0;
 
-	/* Reset the needs_recovery flag before the fs is unlocked. */
-	EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+	if (EXT4_SB(sb)->s_journal) {
+		/* Reset the needs_recovery flag before the fs is unlocked. */
+		EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+	}
+
 	ext4_commit_super(sb, 1);
 	return 0;
 }
@@ -5500,7 +5521,7 @@
 	return mount_bdev(fs_type, flags, dev_name, data, ext4_fill_super);
 }
 
-#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
+#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT2)
 static inline void register_as_ext2(void)
 {
 	int err = register_filesystem(&ext2_fs_type);
@@ -5530,7 +5551,6 @@
 static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
 #endif
 
-#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
 static inline void register_as_ext3(void)
 {
 	int err = register_filesystem(&ext3_fs_type);
@@ -5556,11 +5576,6 @@
 		return 0;
 	return 1;
 }
-#else
-static inline void register_as_ext3(void) { }
-static inline void unregister_as_ext3(void) { }
-static inline int ext3_feature_set_ok(struct super_block *sb) { return 0; }
-#endif
 
 static struct file_system_type ext4_fs_type = {
 	.owner		= THIS_MODULE,
@@ -5610,6 +5625,7 @@
 {
 	int i, err;
 
+	ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64);
 	ext4_li_info = NULL;
 	mutex_init(&ext4_li_mtx);
 
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index c629762..b0a9dc9 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -45,7 +45,7 @@
 	default y
 	help
 	  Posix Access Control Lists (ACLs) support permissions for users and
-	  gourps beyond the owner/group/world scheme.
+	  groups beyond the owner/group/world scheme.
 
 	  To learn more about Access Control Lists, visit the POSIX ACLs for
 	  Linux website <http://acl.bestbits.at/>.
diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile
index 396be1a..08e101e 100644
--- a/fs/f2fs/Makefile
+++ b/fs/f2fs/Makefile
@@ -2,6 +2,7 @@
 
 f2fs-y		:= dir.o file.o inode.o namei.o hash.o super.o inline.o
 f2fs-y		+= checkpoint.o gc.o data.o node.o segment.o recovery.o
+f2fs-y		+= shrinker.o extent_cache.o
 f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
 f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
 f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index b70bbe1..c5a38e3 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -69,14 +69,24 @@
 
 	fio.page = page;
 
-	if (f2fs_submit_page_bio(&fio))
+	if (f2fs_submit_page_bio(&fio)) {
+		f2fs_put_page(page, 1);
 		goto repeat;
+	}
 
 	lock_page(page);
 	if (unlikely(page->mapping != mapping)) {
 		f2fs_put_page(page, 1);
 		goto repeat;
 	}
+
+	/*
+	 * if there is any IO error when accessing device, make our filesystem
+	 * readonly and make sure do not write checkpoint with non-uptodate
+	 * meta page.
+	 */
+	if (unlikely(!PageUptodate(page)))
+		f2fs_stop_checkpoint(sbi);
 out:
 	return page;
 }
@@ -326,26 +336,18 @@
 static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
 {
 	struct inode_management *im = &sbi->im[type];
-	struct ino_entry *e;
+	struct ino_entry *e, *tmp;
+
+	tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
 retry:
-	if (radix_tree_preload(GFP_NOFS)) {
-		cond_resched();
-		goto retry;
-	}
+	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
 
 	spin_lock(&im->ino_lock);
-
 	e = radix_tree_lookup(&im->ino_root, ino);
 	if (!e) {
-		e = kmem_cache_alloc(ino_entry_slab, GFP_ATOMIC);
-		if (!e) {
-			spin_unlock(&im->ino_lock);
-			radix_tree_preload_end();
-			goto retry;
-		}
+		e = tmp;
 		if (radix_tree_insert(&im->ino_root, ino, e)) {
 			spin_unlock(&im->ino_lock);
-			kmem_cache_free(ino_entry_slab, e);
 			radix_tree_preload_end();
 			goto retry;
 		}
@@ -358,6 +360,9 @@
 	}
 	spin_unlock(&im->ino_lock);
 	radix_tree_preload_end();
+
+	if (e != tmp)
+		kmem_cache_free(ino_entry_slab, tmp);
 }
 
 static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
@@ -458,24 +463,34 @@
 	__remove_ino_entry(sbi, ino, ORPHAN_INO);
 }
 
-static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
+static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
 {
-	struct inode *inode = f2fs_iget(sbi->sb, ino);
-	f2fs_bug_on(sbi, IS_ERR(inode));
+	struct inode *inode;
+
+	inode = f2fs_iget(sbi->sb, ino);
+	if (IS_ERR(inode)) {
+		/*
+		 * there should be a bug that we can't find the entry
+		 * to orphan inode.
+		 */
+		f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
+		return PTR_ERR(inode);
+	}
+
 	clear_nlink(inode);
 
 	/* truncate all the data during iput */
 	iput(inode);
+	return 0;
 }
 
-void recover_orphan_inodes(struct f2fs_sb_info *sbi)
+int recover_orphan_inodes(struct f2fs_sb_info *sbi)
 {
 	block_t start_blk, orphan_blocks, i, j;
+	int err;
 
 	if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
-		return;
-
-	set_sbi_flag(sbi, SBI_POR_DOING);
+		return 0;
 
 	start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
 	orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
@@ -489,14 +504,17 @@
 		orphan_blk = (struct f2fs_orphan_block *)page_address(page);
 		for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
 			nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
-			recover_orphan_inode(sbi, ino);
+			err = recover_orphan_inode(sbi, ino);
+			if (err) {
+				f2fs_put_page(page, 1);
+				return err;
+			}
 		}
 		f2fs_put_page(page, 1);
 	}
 	/* clear Orphan Flag */
 	clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
-	clear_sbi_flag(sbi, SBI_POR_DOING);
-	return;
+	return 0;
 }
 
 static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
@@ -504,7 +522,7 @@
 	struct list_head *head;
 	struct f2fs_orphan_block *orphan_blk = NULL;
 	unsigned int nentries = 0;
-	unsigned short index;
+	unsigned short index = 1;
 	unsigned short orphan_blocks;
 	struct page *page = NULL;
 	struct ino_entry *orphan = NULL;
@@ -512,11 +530,6 @@
 
 	orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
 
-	for (index = 0; index < orphan_blocks; index++)
-		grab_meta_page(sbi, start_blk + index);
-
-	index = 1;
-
 	/*
 	 * we don't need to do spin_lock(&im->ino_lock) here, since all the
 	 * orphan inode operations are covered under f2fs_lock_op().
@@ -527,12 +540,10 @@
 	/* loop for each orphan inode entry and write them in Jornal block */
 	list_for_each_entry(orphan, head, list) {
 		if (!page) {
-			page = find_get_page(META_MAPPING(sbi), start_blk++);
-			f2fs_bug_on(sbi, !page);
+			page = grab_meta_page(sbi, start_blk++);
 			orphan_blk =
 				(struct f2fs_orphan_block *)page_address(page);
 			memset(orphan_blk, 0, sizeof(*orphan_blk));
-			f2fs_put_page(page, 0);
 		}
 
 		orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
@@ -704,7 +715,8 @@
 	struct inode_entry *new;
 	int ret = 0;
 
-	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode))
+	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
+			!S_ISLNK(inode->i_mode))
 		return;
 
 	if (!S_ISDIR(inode->i_mode)) {
@@ -892,12 +904,15 @@
 	__u32 crc32 = 0;
 	int i;
 	int cp_payload_blks = __cp_payload(sbi);
+	block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
+	bool invalidate = false;
 
 	/*
 	 * This avoids to conduct wrong roll-forward operations and uses
 	 * metapages, so should be called prior to sync_meta_pages below.
 	 */
-	discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg));
+	if (discard_next_dnode(sbi, discard_blk))
+		invalidate = true;
 
 	/* Flush all the NAT/SIT pages */
 	while (get_pages(sbi, F2FS_DIRTY_META)) {
@@ -1026,6 +1041,14 @@
 	/* wait for previous submitted meta pages writeback */
 	wait_on_all_pages_writeback(sbi);
 
+	/*
+	 * invalidate meta page which is used temporarily for zeroing out
+	 * block at the end of warm node chain.
+	 */
+	if (invalidate)
+		invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
+								discard_blk);
+
 	release_dirty_inode(sbi);
 
 	if (unlikely(f2fs_cp_error(sbi)))
diff --git a/fs/f2fs/crypto_key.c b/fs/f2fs/crypto_key.c
index 95b8f93..9f77de2 100644
--- a/fs/f2fs/crypto_key.c
+++ b/fs/f2fs/crypto_key.c
@@ -92,8 +92,7 @@
 	if (!ci)
 		return;
 
-	if (ci->ci_keyring_key)
-		key_put(ci->ci_keyring_key);
+	key_put(ci->ci_keyring_key);
 	crypto_free_ablkcipher(ci->ci_ctfm);
 	kmem_cache_free(f2fs_crypt_info_cachep, ci);
 }
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index f71e19a..a82abe9 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -14,6 +14,7 @@
 #include <linux/mpage.h>
 #include <linux/writeback.h>
 #include <linux/backing-dev.h>
+#include <linux/pagevec.h>
 #include <linux/blkdev.h>
 #include <linux/bio.h>
 #include <linux/prefetch.h>
@@ -26,16 +27,13 @@
 #include "trace.h"
 #include <trace/events/f2fs.h>
 
-static struct kmem_cache *extent_tree_slab;
-static struct kmem_cache *extent_node_slab;
-
-static void f2fs_read_end_io(struct bio *bio, int err)
+static void f2fs_read_end_io(struct bio *bio)
 {
 	struct bio_vec *bvec;
 	int i;
 
 	if (f2fs_bio_encrypted(bio)) {
-		if (err) {
+		if (bio->bi_error) {
 			f2fs_release_crypto_ctx(bio->bi_private);
 		} else {
 			f2fs_end_io_crypto_work(bio->bi_private, bio);
@@ -46,7 +44,7 @@
 	bio_for_each_segment_all(bvec, bio, i) {
 		struct page *page = bvec->bv_page;
 
-		if (!err) {
+		if (!bio->bi_error) {
 			SetPageUptodate(page);
 		} else {
 			ClearPageUptodate(page);
@@ -57,7 +55,7 @@
 	bio_put(bio);
 }
 
-static void f2fs_write_end_io(struct bio *bio, int err)
+static void f2fs_write_end_io(struct bio *bio)
 {
 	struct f2fs_sb_info *sbi = bio->bi_private;
 	struct bio_vec *bvec;
@@ -68,7 +66,7 @@
 
 		f2fs_restore_and_release_control_page(&page);
 
-		if (unlikely(err)) {
+		if (unlikely(bio->bi_error)) {
 			set_page_dirty(page);
 			set_bit(AS_EIO, &page->mapping->flags);
 			f2fs_stop_checkpoint(sbi);
@@ -92,8 +90,7 @@
 {
 	struct bio *bio;
 
-	/* No failure on bio allocation */
-	bio = bio_alloc(GFP_NOIO, npages);
+	bio = f2fs_bio_alloc(npages);
 
 	bio->bi_bdev = sbi->sb->s_bdev;
 	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
@@ -158,7 +155,6 @@
 
 	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
 		bio_put(bio);
-		f2fs_put_page(page, 1);
 		return -EFAULT;
 	}
 
@@ -266,645 +262,17 @@
 	return err;
 }
 
-static bool lookup_extent_info(struct inode *inode, pgoff_t pgofs,
-							struct extent_info *ei)
+int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
 {
-	struct f2fs_inode_info *fi = F2FS_I(inode);
-	pgoff_t start_fofs, end_fofs;
-	block_t start_blkaddr;
-
-	read_lock(&fi->ext_lock);
-	if (fi->ext.len == 0) {
-		read_unlock(&fi->ext_lock);
-		return false;
-	}
-
-	stat_inc_total_hit(inode->i_sb);
-
-	start_fofs = fi->ext.fofs;
-	end_fofs = fi->ext.fofs + fi->ext.len - 1;
-	start_blkaddr = fi->ext.blk;
-
-	if (pgofs >= start_fofs && pgofs <= end_fofs) {
-		*ei = fi->ext;
-		stat_inc_read_hit(inode->i_sb);
-		read_unlock(&fi->ext_lock);
-		return true;
-	}
-	read_unlock(&fi->ext_lock);
-	return false;
-}
-
-static bool update_extent_info(struct inode *inode, pgoff_t fofs,
-								block_t blkaddr)
-{
-	struct f2fs_inode_info *fi = F2FS_I(inode);
-	pgoff_t start_fofs, end_fofs;
-	block_t start_blkaddr, end_blkaddr;
-	int need_update = true;
-
-	write_lock(&fi->ext_lock);
-
-	start_fofs = fi->ext.fofs;
-	end_fofs = fi->ext.fofs + fi->ext.len - 1;
-	start_blkaddr = fi->ext.blk;
-	end_blkaddr = fi->ext.blk + fi->ext.len - 1;
-
-	/* Drop and initialize the matched extent */
-	if (fi->ext.len == 1 && fofs == start_fofs)
-		fi->ext.len = 0;
-
-	/* Initial extent */
-	if (fi->ext.len == 0) {
-		if (blkaddr != NULL_ADDR) {
-			fi->ext.fofs = fofs;
-			fi->ext.blk = blkaddr;
-			fi->ext.len = 1;
-		}
-		goto end_update;
-	}
-
-	/* Front merge */
-	if (fofs == start_fofs - 1 && blkaddr == start_blkaddr - 1) {
-		fi->ext.fofs--;
-		fi->ext.blk--;
-		fi->ext.len++;
-		goto end_update;
-	}
-
-	/* Back merge */
-	if (fofs == end_fofs + 1 && blkaddr == end_blkaddr + 1) {
-		fi->ext.len++;
-		goto end_update;
-	}
-
-	/* Split the existing extent */
-	if (fi->ext.len > 1 &&
-		fofs >= start_fofs && fofs <= end_fofs) {
-		if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
-			fi->ext.len = fofs - start_fofs;
-		} else {
-			fi->ext.fofs = fofs + 1;
-			fi->ext.blk = start_blkaddr + fofs - start_fofs + 1;
-			fi->ext.len -= fofs - start_fofs + 1;
-		}
-	} else {
-		need_update = false;
-	}
-
-	/* Finally, if the extent is very fragmented, let's drop the cache. */
-	if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
-		fi->ext.len = 0;
-		set_inode_flag(fi, FI_NO_EXTENT);
-		need_update = true;
-	}
-end_update:
-	write_unlock(&fi->ext_lock);
-	return need_update;
-}
-
-static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_info *ei,
-				struct rb_node *parent, struct rb_node **p)
-{
-	struct extent_node *en;
-
-	en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
-	if (!en)
-		return NULL;
-
-	en->ei = *ei;
-	INIT_LIST_HEAD(&en->list);
-
-	rb_link_node(&en->rb_node, parent, p);
-	rb_insert_color(&en->rb_node, &et->root);
-	et->count++;
-	atomic_inc(&sbi->total_ext_node);
-	return en;
-}
-
-static void __detach_extent_node(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_node *en)
-{
-	rb_erase(&en->rb_node, &et->root);
-	et->count--;
-	atomic_dec(&sbi->total_ext_node);
-
-	if (et->cached_en == en)
-		et->cached_en = NULL;
-}
-
-static struct extent_tree *__find_extent_tree(struct f2fs_sb_info *sbi,
-							nid_t ino)
-{
-	struct extent_tree *et;
-
-	down_read(&sbi->extent_tree_lock);
-	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
-	if (!et) {
-		up_read(&sbi->extent_tree_lock);
-		return NULL;
-	}
-	atomic_inc(&et->refcount);
-	up_read(&sbi->extent_tree_lock);
-
-	return et;
-}
-
-static struct extent_tree *__grab_extent_tree(struct inode *inode)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	nid_t ino = inode->i_ino;
-
-	down_write(&sbi->extent_tree_lock);
-	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
-	if (!et) {
-		et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
-		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
-		memset(et, 0, sizeof(struct extent_tree));
-		et->ino = ino;
-		et->root = RB_ROOT;
-		et->cached_en = NULL;
-		rwlock_init(&et->lock);
-		atomic_set(&et->refcount, 0);
-		et->count = 0;
-		sbi->total_ext_tree++;
-	}
-	atomic_inc(&et->refcount);
-	up_write(&sbi->extent_tree_lock);
-
-	return et;
-}
-
-static struct extent_node *__lookup_extent_tree(struct extent_tree *et,
-							unsigned int fofs)
-{
-	struct rb_node *node = et->root.rb_node;
-	struct extent_node *en;
-
-	if (et->cached_en) {
-		struct extent_info *cei = &et->cached_en->ei;
-
-		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
-			return et->cached_en;
-	}
-
-	while (node) {
-		en = rb_entry(node, struct extent_node, rb_node);
-
-		if (fofs < en->ei.fofs) {
-			node = node->rb_left;
-		} else if (fofs >= en->ei.fofs + en->ei.len) {
-			node = node->rb_right;
-		} else {
-			et->cached_en = en;
-			return en;
-		}
-	}
-	return NULL;
-}
-
-static struct extent_node *__try_back_merge(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_node *en)
-{
-	struct extent_node *prev;
-	struct rb_node *node;
-
-	node = rb_prev(&en->rb_node);
-	if (!node)
-		return NULL;
-
-	prev = rb_entry(node, struct extent_node, rb_node);
-	if (__is_back_mergeable(&en->ei, &prev->ei)) {
-		en->ei.fofs = prev->ei.fofs;
-		en->ei.blk = prev->ei.blk;
-		en->ei.len += prev->ei.len;
-		__detach_extent_node(sbi, et, prev);
-		return prev;
-	}
-	return NULL;
-}
-
-static struct extent_node *__try_front_merge(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_node *en)
-{
-	struct extent_node *next;
-	struct rb_node *node;
-
-	node = rb_next(&en->rb_node);
-	if (!node)
-		return NULL;
-
-	next = rb_entry(node, struct extent_node, rb_node);
-	if (__is_front_mergeable(&en->ei, &next->ei)) {
-		en->ei.len += next->ei.len;
-		__detach_extent_node(sbi, et, next);
-		return next;
-	}
-	return NULL;
-}
-
-static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
-				struct extent_tree *et, struct extent_info *ei,
-				struct extent_node **den)
-{
-	struct rb_node **p = &et->root.rb_node;
-	struct rb_node *parent = NULL;
-	struct extent_node *en;
-
-	while (*p) {
-		parent = *p;
-		en = rb_entry(parent, struct extent_node, rb_node);
-
-		if (ei->fofs < en->ei.fofs) {
-			if (__is_front_mergeable(ei, &en->ei)) {
-				f2fs_bug_on(sbi, !den);
-				en->ei.fofs = ei->fofs;
-				en->ei.blk = ei->blk;
-				en->ei.len += ei->len;
-				*den = __try_back_merge(sbi, et, en);
-				return en;
-			}
-			p = &(*p)->rb_left;
-		} else if (ei->fofs >= en->ei.fofs + en->ei.len) {
-			if (__is_back_mergeable(ei, &en->ei)) {
-				f2fs_bug_on(sbi, !den);
-				en->ei.len += ei->len;
-				*den = __try_front_merge(sbi, et, en);
-				return en;
-			}
-			p = &(*p)->rb_right;
-		} else {
-			f2fs_bug_on(sbi, 1);
-		}
-	}
-
-	return __attach_extent_node(sbi, et, ei, parent, p);
-}
-
-static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
-					struct extent_tree *et, bool free_all)
-{
-	struct rb_node *node, *next;
-	struct extent_node *en;
-	unsigned int count = et->count;
-
-	node = rb_first(&et->root);
-	while (node) {
-		next = rb_next(node);
-		en = rb_entry(node, struct extent_node, rb_node);
-
-		if (free_all) {
-			spin_lock(&sbi->extent_lock);
-			if (!list_empty(&en->list))
-				list_del_init(&en->list);
-			spin_unlock(&sbi->extent_lock);
-		}
-
-		if (free_all || list_empty(&en->list)) {
-			__detach_extent_node(sbi, et, en);
-			kmem_cache_free(extent_node_slab, en);
-		}
-		node = next;
-	}
-
-	return count - et->count;
-}
-
-static void f2fs_init_extent_tree(struct inode *inode,
-						struct f2fs_extent *i_ext)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	struct extent_node *en;
 	struct extent_info ei;
+	struct inode *inode = dn->inode;
 
-	if (le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
-		return;
-
-	et = __grab_extent_tree(inode);
-
-	write_lock(&et->lock);
-	if (et->count)
-		goto out;
-
-	set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
-		le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
-
-	en = __insert_extent_tree(sbi, et, &ei, NULL);
-	if (en) {
-		et->cached_en = en;
-
-		spin_lock(&sbi->extent_lock);
-		list_add_tail(&en->list, &sbi->extent_list);
-		spin_unlock(&sbi->extent_lock);
-	}
-out:
-	write_unlock(&et->lock);
-	atomic_dec(&et->refcount);
-}
-
-static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
-							struct extent_info *ei)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	struct extent_node *en;
-
-	trace_f2fs_lookup_extent_tree_start(inode, pgofs);
-
-	et = __find_extent_tree(sbi, inode->i_ino);
-	if (!et)
-		return false;
-
-	read_lock(&et->lock);
-	en = __lookup_extent_tree(et, pgofs);
-	if (en) {
-		*ei = en->ei;
-		spin_lock(&sbi->extent_lock);
-		if (!list_empty(&en->list))
-			list_move_tail(&en->list, &sbi->extent_list);
-		spin_unlock(&sbi->extent_lock);
-		stat_inc_read_hit(sbi->sb);
-	}
-	stat_inc_total_hit(sbi->sb);
-	read_unlock(&et->lock);
-
-	trace_f2fs_lookup_extent_tree_end(inode, pgofs, en);
-
-	atomic_dec(&et->refcount);
-	return en ? true : false;
-}
-
-static void f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs,
-							block_t blkaddr)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
-	struct extent_node *den = NULL;
-	struct extent_info ei, dei;
-	unsigned int endofs;
-
-	trace_f2fs_update_extent_tree(inode, fofs, blkaddr);
-
-	et = __grab_extent_tree(inode);
-
-	write_lock(&et->lock);
-
-	/* 1. lookup and remove existing extent info in cache */
-	en = __lookup_extent_tree(et, fofs);
-	if (!en)
-		goto update_extent;
-
-	dei = en->ei;
-	__detach_extent_node(sbi, et, en);
-
-	/* 2. if extent can be split more, split and insert the left part */
-	if (dei.len > 1) {
-		/*  insert left part of split extent into cache */
-		if (fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
-			set_extent_info(&ei, dei.fofs, dei.blk,
-							fofs - dei.fofs);
-			en1 = __insert_extent_tree(sbi, et, &ei, NULL);
-		}
-
-		/* insert right part of split extent into cache */
-		endofs = dei.fofs + dei.len - 1;
-		if (endofs - fofs >= F2FS_MIN_EXTENT_LEN) {
-			set_extent_info(&ei, fofs + 1,
-				fofs - dei.fofs + dei.blk, endofs - fofs);
-			en2 = __insert_extent_tree(sbi, et, &ei, NULL);
-		}
+	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+		dn->data_blkaddr = ei.blk + index - ei.fofs;
+		return 0;
 	}
 
-update_extent:
-	/* 3. update extent in extent cache */
-	if (blkaddr) {
-		set_extent_info(&ei, fofs, blkaddr, 1);
-		en3 = __insert_extent_tree(sbi, et, &ei, &den);
-	}
-
-	/* 4. update in global extent list */
-	spin_lock(&sbi->extent_lock);
-	if (en && !list_empty(&en->list))
-		list_del(&en->list);
-	/*
-	 * en1 and en2 split from en, they will become more and more smaller
-	 * fragments after splitting several times. So if the length is smaller
-	 * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree.
-	 */
-	if (en1)
-		list_add_tail(&en1->list, &sbi->extent_list);
-	if (en2)
-		list_add_tail(&en2->list, &sbi->extent_list);
-	if (en3) {
-		if (list_empty(&en3->list))
-			list_add_tail(&en3->list, &sbi->extent_list);
-		else
-			list_move_tail(&en3->list, &sbi->extent_list);
-	}
-	if (den && !list_empty(&den->list))
-		list_del(&den->list);
-	spin_unlock(&sbi->extent_lock);
-
-	/* 5. release extent node */
-	if (en)
-		kmem_cache_free(extent_node_slab, en);
-	if (den)
-		kmem_cache_free(extent_node_slab, den);
-
-	write_unlock(&et->lock);
-	atomic_dec(&et->refcount);
-}
-
-void f2fs_preserve_extent_tree(struct inode *inode)
-{
-	struct extent_tree *et;
-	struct extent_info *ext = &F2FS_I(inode)->ext;
-	bool sync = false;
-
-	if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
-		return;
-
-	et = __find_extent_tree(F2FS_I_SB(inode), inode->i_ino);
-	if (!et) {
-		if (ext->len) {
-			ext->len = 0;
-			update_inode_page(inode);
-		}
-		return;
-	}
-
-	read_lock(&et->lock);
-	if (et->count) {
-		struct extent_node *en;
-
-		if (et->cached_en) {
-			en = et->cached_en;
-		} else {
-			struct rb_node *node = rb_first(&et->root);
-
-			if (!node)
-				node = rb_last(&et->root);
-			en = rb_entry(node, struct extent_node, rb_node);
-		}
-
-		if (__is_extent_same(ext, &en->ei))
-			goto out;
-
-		*ext = en->ei;
-		sync = true;
-	} else if (ext->len) {
-		ext->len = 0;
-		sync = true;
-	}
-out:
-	read_unlock(&et->lock);
-	atomic_dec(&et->refcount);
-
-	if (sync)
-		update_inode_page(inode);
-}
-
-void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
-{
-	struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
-	struct extent_node *en, *tmp;
-	unsigned long ino = F2FS_ROOT_INO(sbi);
-	struct radix_tree_iter iter;
-	void **slot;
-	unsigned int found;
-	unsigned int node_cnt = 0, tree_cnt = 0;
-
-	if (!test_opt(sbi, EXTENT_CACHE))
-		return;
-
-	if (available_free_memory(sbi, EXTENT_CACHE))
-		return;
-
-	spin_lock(&sbi->extent_lock);
-	list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
-		if (!nr_shrink--)
-			break;
-		list_del_init(&en->list);
-	}
-	spin_unlock(&sbi->extent_lock);
-
-	down_read(&sbi->extent_tree_lock);
-	while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root,
-				(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
-		unsigned i;
-
-		ino = treevec[found - 1]->ino + 1;
-		for (i = 0; i < found; i++) {
-			struct extent_tree *et = treevec[i];
-
-			atomic_inc(&et->refcount);
-			write_lock(&et->lock);
-			node_cnt += __free_extent_tree(sbi, et, false);
-			write_unlock(&et->lock);
-			atomic_dec(&et->refcount);
-		}
-	}
-	up_read(&sbi->extent_tree_lock);
-
-	down_write(&sbi->extent_tree_lock);
-	radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter,
-							F2FS_ROOT_INO(sbi)) {
-		struct extent_tree *et = (struct extent_tree *)*slot;
-
-		if (!atomic_read(&et->refcount) && !et->count) {
-			radix_tree_delete(&sbi->extent_tree_root, et->ino);
-			kmem_cache_free(extent_tree_slab, et);
-			sbi->total_ext_tree--;
-			tree_cnt++;
-		}
-	}
-	up_write(&sbi->extent_tree_lock);
-
-	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
-}
-
-void f2fs_destroy_extent_tree(struct inode *inode)
-{
-	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct extent_tree *et;
-	unsigned int node_cnt = 0;
-
-	if (!test_opt(sbi, EXTENT_CACHE))
-		return;
-
-	et = __find_extent_tree(sbi, inode->i_ino);
-	if (!et)
-		goto out;
-
-	/* free all extent info belong to this extent tree */
-	write_lock(&et->lock);
-	node_cnt = __free_extent_tree(sbi, et, true);
-	write_unlock(&et->lock);
-
-	atomic_dec(&et->refcount);
-
-	/* try to find and delete extent tree entry in radix tree */
-	down_write(&sbi->extent_tree_lock);
-	et = radix_tree_lookup(&sbi->extent_tree_root, inode->i_ino);
-	if (!et) {
-		up_write(&sbi->extent_tree_lock);
-		goto out;
-	}
-	f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
-	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
-	kmem_cache_free(extent_tree_slab, et);
-	sbi->total_ext_tree--;
-	up_write(&sbi->extent_tree_lock);
-out:
-	trace_f2fs_destroy_extent_tree(inode, node_cnt);
-	return;
-}
-
-void f2fs_init_extent_cache(struct inode *inode, struct f2fs_extent *i_ext)
-{
-	if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
-		f2fs_init_extent_tree(inode, i_ext);
-
-	write_lock(&F2FS_I(inode)->ext_lock);
-	get_extent_info(&F2FS_I(inode)->ext, *i_ext);
-	write_unlock(&F2FS_I(inode)->ext_lock);
-}
-
-static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
-							struct extent_info *ei)
-{
-	if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
-		return false;
-
-	if (test_opt(F2FS_I_SB(inode), EXTENT_CACHE))
-		return f2fs_lookup_extent_tree(inode, pgofs, ei);
-
-	return lookup_extent_info(inode, pgofs, ei);
-}
-
-void f2fs_update_extent_cache(struct dnode_of_data *dn)
-{
-	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
-	pgoff_t fofs;
-
-	f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
-
-	if (is_inode_flag_set(fi, FI_NO_EXTENT))
-		return;
-
-	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
-							dn->ofs_in_node;
-
-	if (test_opt(F2FS_I_SB(dn->inode), EXTENT_CACHE))
-		return f2fs_update_extent_tree(dn->inode, fofs,
-							dn->data_blkaddr);
-
-	if (update_extent_info(dn->inode, fofs, dn->data_blkaddr))
-		sync_inode_page(dn);
+	return f2fs_reserve_block(dn, index);
 }
 
 struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
@@ -935,15 +303,13 @@
 
 	set_new_dnode(&dn, inode, NULL, NULL, 0);
 	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
-	if (err) {
-		f2fs_put_page(page, 1);
-		return ERR_PTR(err);
-	}
+	if (err)
+		goto put_err;
 	f2fs_put_dnode(&dn);
 
 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
-		f2fs_put_page(page, 1);
-		return ERR_PTR(-ENOENT);
+		err = -ENOENT;
+		goto put_err;
 	}
 got_it:
 	if (PageUptodate(page)) {
@@ -968,8 +334,12 @@
 	fio.page = page;
 	err = f2fs_submit_page_bio(&fio);
 	if (err)
-		return ERR_PTR(err);
+		goto put_err;
 	return page;
+
+put_err:
+	f2fs_put_page(page, 1);
+	return ERR_PTR(err);
 }
 
 struct page *find_data_page(struct inode *inode, pgoff_t index)
@@ -1030,7 +400,8 @@
  *
  * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
  * f2fs_unlock_op().
- * Note that, ipage is set only by make_empty_dir.
+ * Note that, ipage is set only by make_empty_dir, and if any error occur,
+ * ipage should be released by this function.
  */
 struct page *get_new_data_page(struct inode *inode,
 		struct page *ipage, pgoff_t index, bool new_i_size)
@@ -1041,8 +412,14 @@
 	int err;
 repeat:
 	page = grab_cache_page(mapping, index);
-	if (!page)
+	if (!page) {
+		/*
+		 * before exiting, we should make sure ipage will be released
+		 * if any error occur.
+		 */
+		f2fs_put_page(ipage, 1);
 		return ERR_PTR(-ENOMEM);
+	}
 
 	set_new_dnode(&dn, inode, ipage, NULL, 0);
 	err = f2fs_reserve_block(&dn, index);
@@ -1107,8 +484,6 @@
 
 	allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
 								&sum, seg);
-
-	/* direct IO doesn't use extent cache to maximize the performance */
 	set_data_blkaddr(dn);
 
 	/* update i_size */
@@ -1117,6 +492,9 @@
 	if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
 		i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
 
+	/* direct IO doesn't use extent cache to maximize the performance */
+	f2fs_drop_largest_extent(dn->inode, fofs);
+
 	return 0;
 }
 
@@ -1183,7 +561,7 @@
  *     c. give the block addresses to blockdev
  */
 static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
-			int create, bool fiemap)
+						int create, int flag)
 {
 	unsigned int maxblocks = map->m_len;
 	struct dnode_of_data dn;
@@ -1217,8 +595,19 @@
 			err = 0;
 		goto unlock_out;
 	}
-	if (dn.data_blkaddr == NEW_ADDR && !fiemap)
-		goto put_out;
+	if (dn.data_blkaddr == NEW_ADDR) {
+		if (flag == F2FS_GET_BLOCK_BMAP) {
+			err = -ENOENT;
+			goto put_out;
+		} else if (flag == F2FS_GET_BLOCK_READ ||
+				flag == F2FS_GET_BLOCK_DIO) {
+			goto put_out;
+		}
+		/*
+		 * if it is in fiemap call path (flag = F2FS_GET_BLOCK_FIEMAP),
+		 * mark it as mapped and unwritten block.
+		 */
+	}
 
 	if (dn.data_blkaddr != NULL_ADDR) {
 		map->m_flags = F2FS_MAP_MAPPED;
@@ -1233,6 +622,8 @@
 		map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED;
 		map->m_pblk = dn.data_blkaddr;
 	} else {
+		if (flag == F2FS_GET_BLOCK_BMAP)
+			err = -ENOENT;
 		goto put_out;
 	}
 
@@ -1255,7 +646,9 @@
 				err = 0;
 			goto unlock_out;
 		}
-		if (dn.data_blkaddr == NEW_ADDR && !fiemap)
+
+		if (dn.data_blkaddr == NEW_ADDR &&
+				flag != F2FS_GET_BLOCK_FIEMAP)
 			goto put_out;
 
 		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
@@ -1297,7 +690,7 @@
 }
 
 static int __get_data_block(struct inode *inode, sector_t iblock,
-			struct buffer_head *bh, int create, bool fiemap)
+			struct buffer_head *bh, int create, int flag)
 {
 	struct f2fs_map_blocks map;
 	int ret;
@@ -1305,7 +698,7 @@
 	map.m_lblk = iblock;
 	map.m_len = bh->b_size >> inode->i_blkbits;
 
-	ret = f2fs_map_blocks(inode, &map, create, fiemap);
+	ret = f2fs_map_blocks(inode, &map, create, flag);
 	if (!ret) {
 		map_bh(bh, inode->i_sb, map.m_pblk);
 		bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
@@ -1315,15 +708,23 @@
 }
 
 static int get_data_block(struct inode *inode, sector_t iblock,
-			struct buffer_head *bh_result, int create)
+			struct buffer_head *bh_result, int create, int flag)
 {
-	return __get_data_block(inode, iblock, bh_result, create, false);
+	return __get_data_block(inode, iblock, bh_result, create, flag);
 }
 
-static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
+static int get_data_block_dio(struct inode *inode, sector_t iblock,
 			struct buffer_head *bh_result, int create)
 {
-	return __get_data_block(inode, iblock, bh_result, create, true);
+	return __get_data_block(inode, iblock, bh_result, create,
+						F2FS_GET_BLOCK_DIO);
+}
+
+static int get_data_block_bmap(struct inode *inode, sector_t iblock,
+			struct buffer_head *bh_result, int create)
+{
+	return __get_data_block(inode, iblock, bh_result, create,
+						F2FS_GET_BLOCK_BMAP);
 }
 
 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
@@ -1367,7 +768,8 @@
 	memset(&map_bh, 0, sizeof(struct buffer_head));
 	map_bh.b_size = len;
 
-	ret = get_data_block_fiemap(inode, start_blk, &map_bh, 0);
+	ret = get_data_block(inode, start_blk, &map_bh, 0,
+					F2FS_GET_BLOCK_FIEMAP);
 	if (ret)
 		goto out;
 
@@ -1552,7 +954,7 @@
 			}
 
 			bio = bio_alloc(GFP_KERNEL,
-				min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
+				min_t(int, nr_pages, BIO_MAX_PAGES));
 			if (!bio) {
 				if (ctx)
 					f2fs_release_crypto_ctx(ctx);
@@ -1770,6 +1172,137 @@
 	return ret;
 }
 
+/*
+ * This function was copied from write_cche_pages from mm/page-writeback.c.
+ * The major change is making write step of cold data page separately from
+ * warm/hot data page.
+ */
+static int f2fs_write_cache_pages(struct address_space *mapping,
+			struct writeback_control *wbc, writepage_t writepage,
+			void *data)
+{
+	int ret = 0;
+	int done = 0;
+	struct pagevec pvec;
+	int nr_pages;
+	pgoff_t uninitialized_var(writeback_index);
+	pgoff_t index;
+	pgoff_t end;		/* Inclusive */
+	pgoff_t done_index;
+	int cycled;
+	int range_whole = 0;
+	int tag;
+	int step = 0;
+
+	pagevec_init(&pvec, 0);
+next:
+	if (wbc->range_cyclic) {
+		writeback_index = mapping->writeback_index; /* prev offset */
+		index = writeback_index;
+		if (index == 0)
+			cycled = 1;
+		else
+			cycled = 0;
+		end = -1;
+	} else {
+		index = wbc->range_start >> PAGE_CACHE_SHIFT;
+		end = wbc->range_end >> PAGE_CACHE_SHIFT;
+		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+			range_whole = 1;
+		cycled = 1; /* ignore range_cyclic tests */
+	}
+	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+		tag = PAGECACHE_TAG_TOWRITE;
+	else
+		tag = PAGECACHE_TAG_DIRTY;
+retry:
+	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
+		tag_pages_for_writeback(mapping, index, end);
+	done_index = index;
+	while (!done && (index <= end)) {
+		int i;
+
+		nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
+			      min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
+		if (nr_pages == 0)
+			break;
+
+		for (i = 0; i < nr_pages; i++) {
+			struct page *page = pvec.pages[i];
+
+			if (page->index > end) {
+				done = 1;
+				break;
+			}
+
+			done_index = page->index;
+
+			lock_page(page);
+
+			if (unlikely(page->mapping != mapping)) {
+continue_unlock:
+				unlock_page(page);
+				continue;
+			}
+
+			if (!PageDirty(page)) {
+				/* someone wrote it for us */
+				goto continue_unlock;
+			}
+
+			if (step == is_cold_data(page))
+				goto continue_unlock;
+
+			if (PageWriteback(page)) {
+				if (wbc->sync_mode != WB_SYNC_NONE)
+					f2fs_wait_on_page_writeback(page, DATA);
+				else
+					goto continue_unlock;
+			}
+
+			BUG_ON(PageWriteback(page));
+			if (!clear_page_dirty_for_io(page))
+				goto continue_unlock;
+
+			ret = (*writepage)(page, wbc, data);
+			if (unlikely(ret)) {
+				if (ret == AOP_WRITEPAGE_ACTIVATE) {
+					unlock_page(page);
+					ret = 0;
+				} else {
+					done_index = page->index + 1;
+					done = 1;
+					break;
+				}
+			}
+
+			if (--wbc->nr_to_write <= 0 &&
+			    wbc->sync_mode == WB_SYNC_NONE) {
+				done = 1;
+				break;
+			}
+		}
+		pagevec_release(&pvec);
+		cond_resched();
+	}
+
+	if (step < 1) {
+		step++;
+		goto next;
+	}
+
+	if (!cycled && !done) {
+		cycled = 1;
+		index = 0;
+		end = writeback_index - 1;
+		goto retry;
+	}
+	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
+		mapping->writeback_index = done_index;
+
+	return ret;
+}
+
 static int f2fs_write_data_pages(struct address_space *mapping,
 			    struct writeback_control *wbc)
 {
@@ -1785,6 +1318,10 @@
 	if (!mapping->a_ops->writepage)
 		return 0;
 
+	/* skip writing if there is no dirty page in this inode */
+	if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
+		return 0;
+
 	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
 			get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
 			available_free_memory(sbi, DIRTY_DENTS))
@@ -1800,12 +1337,11 @@
 		mutex_lock(&sbi->writepages);
 		locked = true;
 	}
-	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
+	ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
+	f2fs_submit_merged_bio(sbi, DATA, WRITE);
 	if (locked)
 		mutex_unlock(&sbi->writepages);
 
-	f2fs_submit_merged_bio(sbi, DATA, WRITE);
-
 	remove_dirty_dir_inode(inode);
 
 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
@@ -1832,7 +1368,8 @@
 {
 	struct inode *inode = mapping->host;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	struct page *page, *ipage;
+	struct page *page = NULL;
+	struct page *ipage;
 	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
 	struct dnode_of_data dn;
 	int err = 0;
@@ -1882,25 +1419,28 @@
 		if (err)
 			goto put_fail;
 	}
-	err = f2fs_reserve_block(&dn, index);
+
+	err = f2fs_get_block(&dn, index);
 	if (err)
 		goto put_fail;
 put_next:
 	f2fs_put_dnode(&dn);
 	f2fs_unlock_op(sbi);
 
-	if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
-		return 0;
-
 	f2fs_wait_on_page_writeback(page, DATA);
 
+	if (len == PAGE_CACHE_SIZE)
+		goto out_update;
+	if (PageUptodate(page))
+		goto out_clear;
+
 	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
 		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
 		unsigned end = start + len;
 
 		/* Reading beyond i_size is simple: memset to zero */
 		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
-		goto out;
+		goto out_update;
 	}
 
 	if (dn.data_blkaddr == NEW_ADDR) {
@@ -1920,7 +1460,6 @@
 
 		lock_page(page);
 		if (unlikely(!PageUptodate(page))) {
-			f2fs_put_page(page, 1);
 			err = -EIO;
 			goto fail;
 		}
@@ -1932,14 +1471,13 @@
 		/* avoid symlink page */
 		if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
 			err = f2fs_decrypt_one(inode, page);
-			if (err) {
-				f2fs_put_page(page, 1);
+			if (err)
 				goto fail;
-			}
 		}
 	}
-out:
+out_update:
 	SetPageUptodate(page);
+out_clear:
 	clear_cold_data(page);
 	return 0;
 
@@ -1947,8 +1485,8 @@
 	f2fs_put_dnode(&dn);
 unlock_fail:
 	f2fs_unlock_op(sbi);
-	f2fs_put_page(page, 1);
 fail:
+	f2fs_put_page(page, 1);
 	f2fs_write_failed(mapping, pos + len);
 	return err;
 }
@@ -1979,9 +1517,6 @@
 {
 	unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
 
-	if (iov_iter_rw(iter) == READ)
-		return 0;
-
 	if (offset & blocksize_mask)
 		return -EINVAL;
 
@@ -2010,15 +1545,16 @@
 	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
 		return 0;
 
-	if (check_direct_IO(inode, iter, offset))
-		return 0;
+	err = check_direct_IO(inode, iter, offset);
+	if (err)
+		return err;
 
 	trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 
 	if (iov_iter_rw(iter) == WRITE)
 		__allocate_data_blocks(inode, offset, count);
 
-	err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block);
+	err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
 	if (err < 0 && iov_iter_rw(iter) == WRITE)
 		f2fs_write_failed(mapping, offset + count);
 
@@ -2045,6 +1581,11 @@
 		else
 			inode_dec_dirty_pages(inode);
 	}
+
+	/* This is atomic written page, keep Private */
+	if (IS_ATOMIC_WRITTEN_PAGE(page))
+		return;
+
 	ClearPagePrivate(page);
 }
 
@@ -2054,6 +1595,10 @@
 	if (PageDirty(page))
 		return 0;
 
+	/* This is atomic written page, keep Private */
+	if (IS_ATOMIC_WRITTEN_PAGE(page))
+		return 0;
+
 	ClearPagePrivate(page);
 	return 1;
 }
@@ -2068,8 +1613,15 @@
 	SetPageUptodate(page);
 
 	if (f2fs_is_atomic_file(inode)) {
-		register_inmem_page(inode, page);
-		return 1;
+		if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
+			register_inmem_page(inode, page);
+			return 1;
+		}
+		/*
+		 * Previously, this page has been registered, we just
+		 * return here.
+		 */
+		return 0;
 	}
 
 	if (!PageDirty(page)) {
@@ -2090,38 +1642,7 @@
 		if (err)
 			return err;
 	}
-	return generic_block_bmap(mapping, block, get_data_block);
-}
-
-void init_extent_cache_info(struct f2fs_sb_info *sbi)
-{
-	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
-	init_rwsem(&sbi->extent_tree_lock);
-	INIT_LIST_HEAD(&sbi->extent_list);
-	spin_lock_init(&sbi->extent_lock);
-	sbi->total_ext_tree = 0;
-	atomic_set(&sbi->total_ext_node, 0);
-}
-
-int __init create_extent_cache(void)
-{
-	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
-			sizeof(struct extent_tree));
-	if (!extent_tree_slab)
-		return -ENOMEM;
-	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
-			sizeof(struct extent_node));
-	if (!extent_node_slab) {
-		kmem_cache_destroy(extent_tree_slab);
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-void destroy_extent_cache(void)
-{
-	kmem_cache_destroy(extent_node_slab);
-	kmem_cache_destroy(extent_tree_slab);
+	return generic_block_bmap(mapping, block, get_data_block_bmap);
 }
 
 const struct address_space_operations f2fs_dblock_aops = {
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 75176e0..d013d84 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -33,8 +33,11 @@
 	int i;
 
 	/* validation check of the segment numbers */
-	si->hit_ext = sbi->read_hit_ext;
-	si->total_ext = sbi->total_hit_ext;
+	si->hit_largest = atomic_read(&sbi->read_hit_largest);
+	si->hit_cached = atomic_read(&sbi->read_hit_cached);
+	si->hit_rbtree = atomic_read(&sbi->read_hit_rbtree);
+	si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
+	si->total_ext = atomic_read(&sbi->total_hit_ext);
 	si->ext_tree = sbi->total_ext_tree;
 	si->ext_node = atomic_read(&sbi->total_ext_node);
 	si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
@@ -49,6 +52,7 @@
 	si->valid_count = valid_user_blocks(sbi);
 	si->valid_node_count = valid_node_count(sbi);
 	si->valid_inode_count = valid_inode_count(sbi);
+	si->inline_xattr = atomic_read(&sbi->inline_xattr);
 	si->inline_inode = atomic_read(&sbi->inline_inode);
 	si->inline_dir = atomic_read(&sbi->inline_dir);
 	si->utilization = utilization(sbi);
@@ -226,6 +230,8 @@
 		seq_printf(s, "Other: %u)\n  - Data: %u\n",
 			   si->valid_node_count - si->valid_inode_count,
 			   si->valid_count - si->valid_node_count);
+		seq_printf(s, "  - Inline_xattr Inode: %u\n",
+			   si->inline_xattr);
 		seq_printf(s, "  - Inline_data Inode: %u\n",
 			   si->inline_inode);
 		seq_printf(s, "  - Inline_dentry Inode: %u\n",
@@ -276,10 +282,16 @@
 				si->bg_data_blks);
 		seq_printf(s, "  - node blocks : %d (%d)\n", si->node_blks,
 				si->bg_node_blks);
-		seq_printf(s, "\nExtent Hit Ratio: %d / %d\n",
-			   si->hit_ext, si->total_ext);
-		seq_printf(s, "\nExtent Tree Count: %d\n", si->ext_tree);
-		seq_printf(s, "\nExtent Node Count: %d\n", si->ext_node);
+		seq_puts(s, "\nExtent Cache:\n");
+		seq_printf(s, "  - Hit Count: L1-1:%d L1-2:%d L2:%d\n",
+				si->hit_largest, si->hit_cached,
+				si->hit_rbtree);
+		seq_printf(s, "  - Hit Ratio: %d%% (%d / %d)\n",
+				!si->total_ext ? 0 :
+				(si->hit_total * 100) / si->total_ext,
+				si->hit_total, si->total_ext);
+		seq_printf(s, "  - Inner Struct Count: tree: %d, node: %d\n",
+				si->ext_tree, si->ext_node);
 		seq_puts(s, "\nBalancing F2FS Async:\n");
 		seq_printf(s, "  - inmem: %4d, wb: %4d\n",
 			   si->inmem_pages, si->wb_pages);
@@ -366,6 +378,12 @@
 	si->sbi = sbi;
 	sbi->stat_info = si;
 
+	atomic_set(&sbi->total_hit_ext, 0);
+	atomic_set(&sbi->read_hit_rbtree, 0);
+	atomic_set(&sbi->read_hit_largest, 0);
+	atomic_set(&sbi->read_hit_cached, 0);
+
+	atomic_set(&sbi->inline_xattr, 0);
 	atomic_set(&sbi->inline_inode, 0);
 	atomic_set(&sbi->inline_dir, 0);
 	atomic_set(&sbi->inplace_count, 0);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index a34ebd8..8f15fc1 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -718,8 +718,8 @@
 	if (inode)
 		f2fs_drop_nlink(dir, inode, NULL);
 
-	if (bit_pos == NR_DENTRY_IN_BLOCK) {
-		truncate_hole(dir, page->index, page->index + 1);
+	if (bit_pos == NR_DENTRY_IN_BLOCK &&
+			!truncate_hole(dir, page->index, page->index + 1)) {
 		clear_page_dirty_for_io(page);
 		ClearPagePrivate(page);
 		ClearPageUptodate(page);
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
new file mode 100644
index 0000000..997ac86
--- /dev/null
+++ b/fs/f2fs/extent_cache.c
@@ -0,0 +1,791 @@
+/*
+ * f2fs extent cache support
+ *
+ * Copyright (c) 2015 Motorola Mobility
+ * Copyright (c) 2015 Samsung Electronics
+ * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
+ *          Chao Yu <chao2.yu@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+
+#include "f2fs.h"
+#include "node.h"
+#include <trace/events/f2fs.h>
+
+static struct kmem_cache *extent_tree_slab;
+static struct kmem_cache *extent_node_slab;
+
+static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_info *ei,
+				struct rb_node *parent, struct rb_node **p)
+{
+	struct extent_node *en;
+
+	en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
+	if (!en)
+		return NULL;
+
+	en->ei = *ei;
+	INIT_LIST_HEAD(&en->list);
+
+	rb_link_node(&en->rb_node, parent, p);
+	rb_insert_color(&en->rb_node, &et->root);
+	et->count++;
+	atomic_inc(&sbi->total_ext_node);
+	return en;
+}
+
+static void __detach_extent_node(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_node *en)
+{
+	rb_erase(&en->rb_node, &et->root);
+	et->count--;
+	atomic_dec(&sbi->total_ext_node);
+
+	if (et->cached_en == en)
+		et->cached_en = NULL;
+}
+
+static struct extent_tree *__grab_extent_tree(struct inode *inode)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et;
+	nid_t ino = inode->i_ino;
+
+	down_write(&sbi->extent_tree_lock);
+	et = radix_tree_lookup(&sbi->extent_tree_root, ino);
+	if (!et) {
+		et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
+		f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et);
+		memset(et, 0, sizeof(struct extent_tree));
+		et->ino = ino;
+		et->root = RB_ROOT;
+		et->cached_en = NULL;
+		rwlock_init(&et->lock);
+		atomic_set(&et->refcount, 0);
+		et->count = 0;
+		sbi->total_ext_tree++;
+	}
+	atomic_inc(&et->refcount);
+	up_write(&sbi->extent_tree_lock);
+
+	/* never died until evict_inode */
+	F2FS_I(inode)->extent_tree = et;
+
+	return et;
+}
+
+static struct extent_node *__lookup_extent_tree(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, unsigned int fofs)
+{
+	struct rb_node *node = et->root.rb_node;
+	struct extent_node *en = et->cached_en;
+
+	if (en) {
+		struct extent_info *cei = &en->ei;
+
+		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) {
+			stat_inc_cached_node_hit(sbi);
+			return en;
+		}
+	}
+
+	while (node) {
+		en = rb_entry(node, struct extent_node, rb_node);
+
+		if (fofs < en->ei.fofs) {
+			node = node->rb_left;
+		} else if (fofs >= en->ei.fofs + en->ei.len) {
+			node = node->rb_right;
+		} else {
+			stat_inc_rbtree_node_hit(sbi);
+			return en;
+		}
+	}
+	return NULL;
+}
+
+static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_info *ei)
+{
+	struct rb_node **p = &et->root.rb_node;
+	struct extent_node *en;
+
+	en = __attach_extent_node(sbi, et, ei, NULL, p);
+	if (!en)
+		return NULL;
+
+	et->largest = en->ei;
+	et->cached_en = en;
+	return en;
+}
+
+static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
+					struct extent_tree *et, bool free_all)
+{
+	struct rb_node *node, *next;
+	struct extent_node *en;
+	unsigned int count = et->count;
+
+	node = rb_first(&et->root);
+	while (node) {
+		next = rb_next(node);
+		en = rb_entry(node, struct extent_node, rb_node);
+
+		if (free_all) {
+			spin_lock(&sbi->extent_lock);
+			if (!list_empty(&en->list))
+				list_del_init(&en->list);
+			spin_unlock(&sbi->extent_lock);
+		}
+
+		if (free_all || list_empty(&en->list)) {
+			__detach_extent_node(sbi, et, en);
+			kmem_cache_free(extent_node_slab, en);
+		}
+		node = next;
+	}
+
+	return count - et->count;
+}
+
+static void __drop_largest_extent(struct inode *inode, pgoff_t fofs)
+{
+	struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
+
+	if (largest->fofs <= fofs && largest->fofs + largest->len > fofs)
+		largest->len = 0;
+}
+
+void f2fs_drop_largest_extent(struct inode *inode, pgoff_t fofs)
+{
+	if (!f2fs_may_extent_tree(inode))
+		return;
+
+	__drop_largest_extent(inode, fofs);
+}
+
+void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et;
+	struct extent_node *en;
+	struct extent_info ei;
+
+	if (!f2fs_may_extent_tree(inode))
+		return;
+
+	et = __grab_extent_tree(inode);
+
+	if (!i_ext || le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN)
+		return;
+
+	set_extent_info(&ei, le32_to_cpu(i_ext->fofs),
+		le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len));
+
+	write_lock(&et->lock);
+	if (et->count)
+		goto out;
+
+	en = __init_extent_tree(sbi, et, &ei);
+	if (en) {
+		spin_lock(&sbi->extent_lock);
+		list_add_tail(&en->list, &sbi->extent_list);
+		spin_unlock(&sbi->extent_lock);
+	}
+out:
+	write_unlock(&et->lock);
+}
+
+static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
+							struct extent_info *ei)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	struct extent_node *en;
+	bool ret = false;
+
+	f2fs_bug_on(sbi, !et);
+
+	trace_f2fs_lookup_extent_tree_start(inode, pgofs);
+
+	read_lock(&et->lock);
+
+	if (et->largest.fofs <= pgofs &&
+			et->largest.fofs + et->largest.len > pgofs) {
+		*ei = et->largest;
+		ret = true;
+		stat_inc_largest_node_hit(sbi);
+		goto out;
+	}
+
+	en = __lookup_extent_tree(sbi, et, pgofs);
+	if (en) {
+		*ei = en->ei;
+		spin_lock(&sbi->extent_lock);
+		if (!list_empty(&en->list))
+			list_move_tail(&en->list, &sbi->extent_list);
+		et->cached_en = en;
+		spin_unlock(&sbi->extent_lock);
+		ret = true;
+	}
+out:
+	stat_inc_total_hit(sbi);
+	read_unlock(&et->lock);
+
+	trace_f2fs_lookup_extent_tree_end(inode, pgofs, ei);
+	return ret;
+}
+
+
+/*
+ * lookup extent at @fofs, if hit, return the extent
+ * if not, return NULL and
+ * @prev_ex: extent before fofs
+ * @next_ex: extent after fofs
+ * @insert_p: insert point for new extent at fofs
+ * in order to simpfy the insertion after.
+ * tree must stay unchanged between lookup and insertion.
+ */
+static struct extent_node *__lookup_extent_tree_ret(struct extent_tree *et,
+				unsigned int fofs,
+				struct extent_node **prev_ex,
+				struct extent_node **next_ex,
+				struct rb_node ***insert_p,
+				struct rb_node **insert_parent)
+{
+	struct rb_node **pnode = &et->root.rb_node;
+	struct rb_node *parent = NULL, *tmp_node;
+	struct extent_node *en = et->cached_en;
+
+	*insert_p = NULL;
+	*insert_parent = NULL;
+	*prev_ex = NULL;
+	*next_ex = NULL;
+
+	if (RB_EMPTY_ROOT(&et->root))
+		return NULL;
+
+	if (en) {
+		struct extent_info *cei = &en->ei;
+
+		if (cei->fofs <= fofs && cei->fofs + cei->len > fofs)
+			goto lookup_neighbors;
+	}
+
+	while (*pnode) {
+		parent = *pnode;
+		en = rb_entry(*pnode, struct extent_node, rb_node);
+
+		if (fofs < en->ei.fofs)
+			pnode = &(*pnode)->rb_left;
+		else if (fofs >= en->ei.fofs + en->ei.len)
+			pnode = &(*pnode)->rb_right;
+		else
+			goto lookup_neighbors;
+	}
+
+	*insert_p = pnode;
+	*insert_parent = parent;
+
+	en = rb_entry(parent, struct extent_node, rb_node);
+	tmp_node = parent;
+	if (parent && fofs > en->ei.fofs)
+		tmp_node = rb_next(parent);
+	*next_ex = tmp_node ?
+		rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+
+	tmp_node = parent;
+	if (parent && fofs < en->ei.fofs)
+		tmp_node = rb_prev(parent);
+	*prev_ex = tmp_node ?
+		rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+	return NULL;
+
+lookup_neighbors:
+	if (fofs == en->ei.fofs) {
+		/* lookup prev node for merging backward later */
+		tmp_node = rb_prev(&en->rb_node);
+		*prev_ex = tmp_node ?
+			rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+	}
+	if (fofs == en->ei.fofs + en->ei.len - 1) {
+		/* lookup next node for merging frontward later */
+		tmp_node = rb_next(&en->rb_node);
+		*next_ex = tmp_node ?
+			rb_entry(tmp_node, struct extent_node, rb_node) : NULL;
+	}
+	return en;
+}
+
+static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_info *ei,
+				struct extent_node **den,
+				struct extent_node *prev_ex,
+				struct extent_node *next_ex)
+{
+	struct extent_node *en = NULL;
+
+	if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
+		prev_ex->ei.len += ei->len;
+		ei = &prev_ex->ei;
+		en = prev_ex;
+	}
+
+	if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
+		if (en) {
+			__detach_extent_node(sbi, et, prev_ex);
+			*den = prev_ex;
+		}
+		next_ex->ei.fofs = ei->fofs;
+		next_ex->ei.blk = ei->blk;
+		next_ex->ei.len += ei->len;
+		en = next_ex;
+	}
+
+	if (en) {
+		if (en->ei.len > et->largest.len)
+			et->largest = en->ei;
+		et->cached_en = en;
+	}
+	return en;
+}
+
+static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
+				struct extent_tree *et, struct extent_info *ei,
+				struct rb_node **insert_p,
+				struct rb_node *insert_parent)
+{
+	struct rb_node **p = &et->root.rb_node;
+	struct rb_node *parent = NULL;
+	struct extent_node *en = NULL;
+
+	if (insert_p && insert_parent) {
+		parent = insert_parent;
+		p = insert_p;
+		goto do_insert;
+	}
+
+	while (*p) {
+		parent = *p;
+		en = rb_entry(parent, struct extent_node, rb_node);
+
+		if (ei->fofs < en->ei.fofs)
+			p = &(*p)->rb_left;
+		else if (ei->fofs >= en->ei.fofs + en->ei.len)
+			p = &(*p)->rb_right;
+		else
+			f2fs_bug_on(sbi, 1);
+	}
+do_insert:
+	en = __attach_extent_node(sbi, et, ei, parent, p);
+	if (!en)
+		return NULL;
+
+	if (en->ei.len > et->largest.len)
+		et->largest = en->ei;
+	et->cached_en = en;
+	return en;
+}
+
+unsigned int f2fs_update_extent_tree_range(struct inode *inode,
+				pgoff_t fofs, block_t blkaddr, unsigned int len)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL;
+	struct extent_node *prev_en = NULL, *next_en = NULL;
+	struct extent_info ei, dei, prev;
+	struct rb_node **insert_p = NULL, *insert_parent = NULL;
+	unsigned int end = fofs + len;
+	unsigned int pos = (unsigned int)fofs;
+
+	if (!et)
+		return false;
+
+	write_lock(&et->lock);
+
+	if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) {
+		write_unlock(&et->lock);
+		return false;
+	}
+
+	prev = et->largest;
+	dei.len = 0;
+
+	/* we do not guarantee that the largest extent is cached all the time */
+	__drop_largest_extent(inode, fofs);
+
+	/* 1. lookup first extent node in range [fofs, fofs + len - 1] */
+	en = __lookup_extent_tree_ret(et, fofs, &prev_en, &next_en,
+					&insert_p, &insert_parent);
+	if (!en) {
+		if (next_en) {
+			en = next_en;
+			f2fs_bug_on(sbi, en->ei.fofs <= pos);
+			pos = en->ei.fofs;
+		} else {
+			/*
+			 * skip searching in the tree since there is no
+			 * larger extent node in the cache.
+			 */
+			goto update_extent;
+		}
+	}
+
+	/* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
+	while (en) {
+		struct rb_node *node;
+
+		if (pos >= end)
+			break;
+
+		dei = en->ei;
+		en1 = en2 = NULL;
+
+		node = rb_next(&en->rb_node);
+
+		/*
+		 * 2.1 there are four cases when we invalidate blkaddr in extent
+		 * node, |V: valid address, X: will be invalidated|
+		 */
+		/* case#1, invalidate right part of extent node |VVVVVXXXXX| */
+		if (pos > dei.fofs && end >= dei.fofs + dei.len) {
+			en->ei.len = pos - dei.fofs;
+
+			if (en->ei.len < F2FS_MIN_EXTENT_LEN) {
+				__detach_extent_node(sbi, et, en);
+				insert_p = NULL;
+				insert_parent = NULL;
+				goto update;
+			}
+
+			if (__is_extent_same(&dei, &et->largest))
+				et->largest = en->ei;
+			goto next;
+		}
+
+		/* case#2, invalidate left part of extent node |XXXXXVVVVV| */
+		if (pos <= dei.fofs && end < dei.fofs + dei.len) {
+			en->ei.fofs = end;
+			en->ei.blk += end - dei.fofs;
+			en->ei.len -= end - dei.fofs;
+
+			if (en->ei.len < F2FS_MIN_EXTENT_LEN) {
+				__detach_extent_node(sbi, et, en);
+				insert_p = NULL;
+				insert_parent = NULL;
+				goto update;
+			}
+
+			if (__is_extent_same(&dei, &et->largest))
+				et->largest = en->ei;
+			goto next;
+		}
+
+		__detach_extent_node(sbi, et, en);
+
+		/*
+		 * if we remove node in rb-tree, our parent node pointer may
+		 * point the wrong place, discard them.
+		 */
+		insert_p = NULL;
+		insert_parent = NULL;
+
+		/* case#3, invalidate entire extent node |XXXXXXXXXX| */
+		if (pos <= dei.fofs && end >= dei.fofs + dei.len) {
+			if (__is_extent_same(&dei, &et->largest))
+				et->largest.len = 0;
+			goto update;
+		}
+
+		/*
+		 * case#4, invalidate data in the middle of extent node
+		 * |VVVXXXXVVV|
+		 */
+		if (dei.len > F2FS_MIN_EXTENT_LEN) {
+			unsigned int endofs;
+
+			/*  insert left part of split extent into cache */
+			if (pos - dei.fofs >= F2FS_MIN_EXTENT_LEN) {
+				set_extent_info(&ei, dei.fofs, dei.blk,
+							pos - dei.fofs);
+				en1 = __insert_extent_tree(sbi, et, &ei,
+								NULL, NULL);
+			}
+
+			/* insert right part of split extent into cache */
+			endofs = dei.fofs + dei.len;
+			if (endofs - end >= F2FS_MIN_EXTENT_LEN) {
+				set_extent_info(&ei, end,
+						end - dei.fofs + dei.blk,
+						endofs - end);
+				en2 = __insert_extent_tree(sbi, et, &ei,
+								NULL, NULL);
+			}
+		}
+update:
+		/* 2.2 update in global extent list */
+		spin_lock(&sbi->extent_lock);
+		if (en && !list_empty(&en->list))
+			list_del(&en->list);
+		if (en1)
+			list_add_tail(&en1->list, &sbi->extent_list);
+		if (en2)
+			list_add_tail(&en2->list, &sbi->extent_list);
+		spin_unlock(&sbi->extent_lock);
+
+		/* 2.3 release extent node */
+		if (en)
+			kmem_cache_free(extent_node_slab, en);
+next:
+		en = node ? rb_entry(node, struct extent_node, rb_node) : NULL;
+		next_en = en;
+		if (en)
+			pos = en->ei.fofs;
+	}
+
+update_extent:
+	/* 3. update extent in extent cache */
+	if (blkaddr) {
+		struct extent_node *den = NULL;
+
+		set_extent_info(&ei, fofs, blkaddr, len);
+		en3 = __try_merge_extent_node(sbi, et, &ei, &den,
+							prev_en, next_en);
+		if (!en3)
+			en3 = __insert_extent_tree(sbi, et, &ei,
+						insert_p, insert_parent);
+
+		/* give up extent_cache, if split and small updates happen */
+		if (dei.len >= 1 &&
+				prev.len < F2FS_MIN_EXTENT_LEN &&
+				et->largest.len < F2FS_MIN_EXTENT_LEN) {
+			et->largest.len = 0;
+			set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
+		}
+
+		spin_lock(&sbi->extent_lock);
+		if (en3) {
+			if (list_empty(&en3->list))
+				list_add_tail(&en3->list, &sbi->extent_list);
+			else
+				list_move_tail(&en3->list, &sbi->extent_list);
+		}
+		if (den && !list_empty(&den->list))
+			list_del(&den->list);
+		spin_unlock(&sbi->extent_lock);
+
+		if (den)
+			kmem_cache_free(extent_node_slab, den);
+	}
+
+	if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
+		__free_extent_tree(sbi, et, true);
+
+	write_unlock(&et->lock);
+
+	return !__is_extent_same(&prev, &et->largest);
+}
+
+unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+	struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
+	struct extent_node *en, *tmp;
+	unsigned long ino = F2FS_ROOT_INO(sbi);
+	struct radix_tree_root *root = &sbi->extent_tree_root;
+	unsigned int found;
+	unsigned int node_cnt = 0, tree_cnt = 0;
+	int remained;
+
+	if (!test_opt(sbi, EXTENT_CACHE))
+		return 0;
+
+	if (!down_write_trylock(&sbi->extent_tree_lock))
+		goto out;
+
+	/* 1. remove unreferenced extent tree */
+	while ((found = radix_tree_gang_lookup(root,
+				(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
+		unsigned i;
+
+		ino = treevec[found - 1]->ino + 1;
+		for (i = 0; i < found; i++) {
+			struct extent_tree *et = treevec[i];
+
+			if (!atomic_read(&et->refcount)) {
+				write_lock(&et->lock);
+				node_cnt += __free_extent_tree(sbi, et, true);
+				write_unlock(&et->lock);
+
+				radix_tree_delete(root, et->ino);
+				kmem_cache_free(extent_tree_slab, et);
+				sbi->total_ext_tree--;
+				tree_cnt++;
+
+				if (node_cnt + tree_cnt >= nr_shrink)
+					goto unlock_out;
+			}
+		}
+	}
+	up_write(&sbi->extent_tree_lock);
+
+	/* 2. remove LRU extent entries */
+	if (!down_write_trylock(&sbi->extent_tree_lock))
+		goto out;
+
+	remained = nr_shrink - (node_cnt + tree_cnt);
+
+	spin_lock(&sbi->extent_lock);
+	list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
+		if (!remained--)
+			break;
+		list_del_init(&en->list);
+	}
+	spin_unlock(&sbi->extent_lock);
+
+	while ((found = radix_tree_gang_lookup(root,
+				(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
+		unsigned i;
+
+		ino = treevec[found - 1]->ino + 1;
+		for (i = 0; i < found; i++) {
+			struct extent_tree *et = treevec[i];
+
+			write_lock(&et->lock);
+			node_cnt += __free_extent_tree(sbi, et, false);
+			write_unlock(&et->lock);
+
+			if (node_cnt + tree_cnt >= nr_shrink)
+				break;
+		}
+	}
+unlock_out:
+	up_write(&sbi->extent_tree_lock);
+out:
+	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
+
+	return node_cnt + tree_cnt;
+}
+
+unsigned int f2fs_destroy_extent_node(struct inode *inode)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	unsigned int node_cnt = 0;
+
+	if (!et)
+		return 0;
+
+	write_lock(&et->lock);
+	node_cnt = __free_extent_tree(sbi, et, true);
+	write_unlock(&et->lock);
+
+	return node_cnt;
+}
+
+void f2fs_destroy_extent_tree(struct inode *inode)
+{
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	struct extent_tree *et = F2FS_I(inode)->extent_tree;
+	unsigned int node_cnt = 0;
+
+	if (!et)
+		return;
+
+	if (inode->i_nlink && !is_bad_inode(inode) && et->count) {
+		atomic_dec(&et->refcount);
+		return;
+	}
+
+	/* free all extent info belong to this extent tree */
+	node_cnt = f2fs_destroy_extent_node(inode);
+
+	/* delete extent tree entry in radix tree */
+	down_write(&sbi->extent_tree_lock);
+	atomic_dec(&et->refcount);
+	f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count);
+	radix_tree_delete(&sbi->extent_tree_root, inode->i_ino);
+	kmem_cache_free(extent_tree_slab, et);
+	sbi->total_ext_tree--;
+	up_write(&sbi->extent_tree_lock);
+
+	F2FS_I(inode)->extent_tree = NULL;
+
+	trace_f2fs_destroy_extent_tree(inode, node_cnt);
+}
+
+bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
+					struct extent_info *ei)
+{
+	if (!f2fs_may_extent_tree(inode))
+		return false;
+
+	return f2fs_lookup_extent_tree(inode, pgofs, ei);
+}
+
+void f2fs_update_extent_cache(struct dnode_of_data *dn)
+{
+	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
+	pgoff_t fofs;
+
+	if (!f2fs_may_extent_tree(dn->inode))
+		return;
+
+	f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
+
+
+	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
+							dn->ofs_in_node;
+
+	if (f2fs_update_extent_tree_range(dn->inode, fofs, dn->data_blkaddr, 1))
+		sync_inode_page(dn);
+}
+
+void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
+				pgoff_t fofs, block_t blkaddr, unsigned int len)
+
+{
+	if (!f2fs_may_extent_tree(dn->inode))
+		return;
+
+	if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len))
+		sync_inode_page(dn);
+}
+
+void init_extent_cache_info(struct f2fs_sb_info *sbi)
+{
+	INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO);
+	init_rwsem(&sbi->extent_tree_lock);
+	INIT_LIST_HEAD(&sbi->extent_list);
+	spin_lock_init(&sbi->extent_lock);
+	sbi->total_ext_tree = 0;
+	atomic_set(&sbi->total_ext_node, 0);
+}
+
+int __init create_extent_cache(void)
+{
+	extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
+			sizeof(struct extent_tree));
+	if (!extent_tree_slab)
+		return -ENOMEM;
+	extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
+			sizeof(struct extent_node));
+	if (!extent_node_slab) {
+		kmem_cache_destroy(extent_tree_slab);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+void destroy_extent_cache(void)
+{
+	kmem_cache_destroy(extent_node_slab);
+	kmem_cache_destroy(extent_tree_slab);
+}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index a8327ed..f1a90ff 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -19,6 +19,7 @@
 #include <linux/magic.h>
 #include <linux/kobject.h>
 #include <linux/sched.h>
+#include <linux/bio.h>
 
 #ifdef CONFIG_F2FS_CHECK_FS
 #define f2fs_bug_on(sbi, condition)	BUG_ON(condition)
@@ -228,6 +229,7 @@
 #define F2FS_IOC_START_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 3)
 #define F2FS_IOC_RELEASE_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 4)
 #define F2FS_IOC_ABORT_VOLATILE_WRITE	_IO(F2FS_IOCTL_MAGIC, 5)
+#define F2FS_IOC_GARBAGE_COLLECT	_IO(F2FS_IOCTL_MAGIC, 6)
 
 #define F2FS_IOC_SET_ENCRYPTION_POLICY					\
 		_IOR('f', 19, struct f2fs_encryption_policy)
@@ -320,7 +322,7 @@
 					 */
 };
 
-#define F2FS_LINK_MAX		32000	/* maximum link count per file */
+#define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
 
 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
 
@@ -349,6 +351,7 @@
 	nid_t ino;			/* inode number */
 	struct rb_root root;		/* root of extent info rb-tree */
 	struct extent_node *cached_en;	/* recently accessed extent node */
+	struct extent_info largest;	/* largested extent info */
 	rwlock_t lock;			/* protect extent info rb-tree */
 	atomic_t refcount;		/* reference count of rb-tree */
 	unsigned int count;		/* # of extent node in rb-tree*/
@@ -372,6 +375,12 @@
 	unsigned int m_flags;
 };
 
+/* for flag in get_data_block */
+#define F2FS_GET_BLOCK_READ		0
+#define F2FS_GET_BLOCK_DIO		1
+#define F2FS_GET_BLOCK_FIEMAP		2
+#define F2FS_GET_BLOCK_BMAP		3
+
 /*
  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
  */
@@ -420,14 +429,13 @@
 	unsigned int clevel;		/* maximum level of given file name */
 	nid_t i_xattr_nid;		/* node id that contains xattrs */
 	unsigned long long xattr_ver;	/* cp version of xattr modification */
-	struct extent_info ext;		/* in-memory extent cache entry */
-	rwlock_t ext_lock;		/* rwlock for single extent cache */
 	struct inode_entry *dirty_dir;	/* the pointer of dirty dir */
 
-	struct radix_tree_root inmem_root;	/* radix tree for inmem pages */
 	struct list_head inmem_pages;	/* inmemory pages managed by f2fs */
 	struct mutex inmem_lock;	/* lock for inmemory pages */
 
+	struct extent_tree *extent_tree;	/* cached extent_tree entry */
+
 #ifdef CONFIG_F2FS_FS_ENCRYPTION
 	/* Encryption params */
 	struct f2fs_crypt_info *i_crypt_info;
@@ -779,7 +787,11 @@
 	unsigned int segment_count[2];		/* # of allocated segments */
 	unsigned int block_count[2];		/* # of allocated blocks */
 	atomic_t inplace_count;		/* # of inplace update */
-	int total_hit_ext, read_hit_ext;	/* extent cache hit ratio */
+	atomic_t total_hit_ext;			/* # of lookup extent cache */
+	atomic_t read_hit_rbtree;		/* # of hit rbtree extent node */
+	atomic_t read_hit_largest;		/* # of hit largest extent node */
+	atomic_t read_hit_cached;		/* # of hit cached extent node */
+	atomic_t inline_xattr;			/* # of inline_xattr inodes */
 	atomic_t inline_inode;			/* # of inline_data inodes */
 	atomic_t inline_dir;			/* # of inline_dentry inodes */
 	int bg_gc;				/* background gc calls */
@@ -791,6 +803,11 @@
 	/* For sysfs suppport */
 	struct kobject s_kobj;
 	struct completion s_kobj_unregister;
+
+	/* For shrinker support */
+	struct list_head s_list;
+	struct mutex umount_mutex;
+	unsigned int shrinker_run_no;
 };
 
 /*
@@ -1039,7 +1056,8 @@
 
 static inline void inode_dec_dirty_pages(struct inode *inode)
 {
-	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode))
+	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
+			!S_ISLNK(inode->i_mode))
 		return;
 
 	atomic_dec(&F2FS_I(inode)->dirty_pages);
@@ -1234,16 +1252,24 @@
 						gfp_t flags)
 {
 	void *entry;
-retry:
-	entry = kmem_cache_alloc(cachep, flags);
-	if (!entry) {
-		cond_resched();
-		goto retry;
-	}
 
+	entry = kmem_cache_alloc(cachep, flags);
+	if (!entry)
+		entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
 	return entry;
 }
 
+static inline struct bio *f2fs_bio_alloc(int npages)
+{
+	struct bio *bio;
+
+	/* No failure on bio allocation */
+	bio = bio_alloc(GFP_NOIO, npages);
+	if (!bio)
+		bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
+	return bio;
+}
+
 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
 				unsigned long index, void *item)
 {
@@ -1342,6 +1368,7 @@
 	FI_INC_LINK,		/* need to increment i_nlink */
 	FI_ACL_MODE,		/* indicate acl mode */
 	FI_NO_ALLOC,		/* should not allocate any blocks */
+	FI_FREE_NID,		/* free allocated nide */
 	FI_UPDATE_DIR,		/* should update inode block for consistency */
 	FI_DELAY_IPUT,		/* used for the recovery */
 	FI_NO_EXTENT,		/* not to use the extent cache */
@@ -1541,6 +1568,17 @@
 	return false;
 }
 
+static inline bool f2fs_may_extent_tree(struct inode *inode)
+{
+	mode_t mode = inode->i_mode;
+
+	if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
+			is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
+		return false;
+
+	return S_ISREG(mode);
+}
+
 #define get_inode_mode(i) \
 	((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
 	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
@@ -1557,7 +1595,7 @@
 int f2fs_sync_file(struct file *, loff_t, loff_t, int);
 void truncate_data_blocks(struct dnode_of_data *);
 int truncate_blocks(struct inode *, u64, bool);
-void f2fs_truncate(struct inode *);
+int f2fs_truncate(struct inode *, bool);
 int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 int f2fs_setattr(struct dentry *, struct iattr *);
 int truncate_hole(struct inode *, pgoff_t, pgoff_t);
@@ -1649,7 +1687,7 @@
 int truncate_inode_blocks(struct inode *, pgoff_t);
 int truncate_xattr_node(struct inode *, struct page *);
 int wait_on_node_pages_writeback(struct f2fs_sb_info *, nid_t);
-void remove_inode_page(struct inode *);
+int remove_inode_page(struct inode *);
 struct page *new_inode_page(struct inode *);
 struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
 void ra_node_page(struct f2fs_sb_info *, nid_t);
@@ -1660,6 +1698,7 @@
 bool alloc_nid(struct f2fs_sb_info *, nid_t *);
 void alloc_nid_done(struct f2fs_sb_info *, nid_t);
 void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
+int try_to_free_nids(struct f2fs_sb_info *, int);
 void recover_inline_xattr(struct inode *, struct page *);
 void recover_xattr_data(struct inode *, struct page *, block_t);
 int recover_inode_page(struct f2fs_sb_info *, struct page *);
@@ -1675,7 +1714,7 @@
  * segment.c
  */
 void register_inmem_page(struct inode *, struct page *);
-void commit_inmem_pages(struct inode *, bool);
+int commit_inmem_pages(struct inode *, bool);
 void f2fs_balance_fs(struct f2fs_sb_info *);
 void f2fs_balance_fs_bg(struct f2fs_sb_info *);
 int f2fs_issue_flush(struct f2fs_sb_info *);
@@ -1685,7 +1724,7 @@
 void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
 void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
 void release_discard_addrs(struct f2fs_sb_info *);
-void discard_next_dnode(struct f2fs_sb_info *, block_t);
+bool discard_next_dnode(struct f2fs_sb_info *, block_t);
 int npages_for_summary_flush(struct f2fs_sb_info *, bool);
 void allocate_new_segments(struct f2fs_sb_info *);
 int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
@@ -1727,7 +1766,7 @@
 void release_orphan_inode(struct f2fs_sb_info *);
 void add_orphan_inode(struct f2fs_sb_info *, nid_t);
 void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
-void recover_orphan_inodes(struct f2fs_sb_info *);
+int recover_orphan_inodes(struct f2fs_sb_info *);
 int get_valid_checkpoint(struct f2fs_sb_info *);
 void update_dirty_page(struct inode *, struct page *);
 void add_dirty_dir_inode(struct inode *);
@@ -1746,21 +1785,14 @@
 void f2fs_submit_page_mbio(struct f2fs_io_info *);
 void set_data_blkaddr(struct dnode_of_data *);
 int reserve_new_block(struct dnode_of_data *);
+int f2fs_get_block(struct dnode_of_data *, pgoff_t);
 int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
-void f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
-void f2fs_destroy_extent_tree(struct inode *);
-void f2fs_init_extent_cache(struct inode *, struct f2fs_extent *);
-void f2fs_update_extent_cache(struct dnode_of_data *);
-void f2fs_preserve_extent_tree(struct inode *);
 struct page *get_read_data_page(struct inode *, pgoff_t, int);
 struct page *find_data_page(struct inode *, pgoff_t);
 struct page *get_lock_data_page(struct inode *, pgoff_t);
 struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
 int do_write_data_page(struct f2fs_io_info *);
 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
-void init_extent_cache_info(struct f2fs_sb_info *);
-int __init create_extent_cache(void);
-void destroy_extent_cache(void);
 void f2fs_invalidate_page(struct page *, unsigned int, unsigned int);
 int f2fs_release_page(struct page *, gfp_t);
 
@@ -1788,11 +1820,13 @@
 	struct f2fs_sb_info *sbi;
 	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
 	int main_area_segs, main_area_sections, main_area_zones;
-	int hit_ext, total_ext, ext_tree, ext_node;
+	int hit_largest, hit_cached, hit_rbtree, hit_total, total_ext;
+	int ext_tree, ext_node;
 	int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta;
 	int nats, dirty_nats, sits, dirty_sits, fnids;
 	int total_count, utilization;
-	int bg_gc, inline_inode, inline_dir, inmem_pages, wb_pages;
+	int bg_gc, inmem_pages, wb_pages;
+	int inline_xattr, inline_inode, inline_dir;
 	unsigned int valid_count, valid_node_count, valid_inode_count;
 	unsigned int bimodal, avg_vblocks;
 	int util_free, util_valid, util_invalid;
@@ -1823,8 +1857,20 @@
 #define stat_inc_bggc_count(sbi)	((sbi)->bg_gc++)
 #define stat_inc_dirty_dir(sbi)		((sbi)->n_dirty_dirs++)
 #define stat_dec_dirty_dir(sbi)		((sbi)->n_dirty_dirs--)
-#define stat_inc_total_hit(sb)		((F2FS_SB(sb))->total_hit_ext++)
-#define stat_inc_read_hit(sb)		((F2FS_SB(sb))->read_hit_ext++)
+#define stat_inc_total_hit(sbi)		(atomic_inc(&(sbi)->total_hit_ext))
+#define stat_inc_rbtree_node_hit(sbi)	(atomic_inc(&(sbi)->read_hit_rbtree))
+#define stat_inc_largest_node_hit(sbi)	(atomic_inc(&(sbi)->read_hit_largest))
+#define stat_inc_cached_node_hit(sbi)	(atomic_inc(&(sbi)->read_hit_cached))
+#define stat_inc_inline_xattr(inode)					\
+	do {								\
+		if (f2fs_has_inline_xattr(inode))			\
+			(atomic_inc(&F2FS_I_SB(inode)->inline_xattr));	\
+	} while (0)
+#define stat_dec_inline_xattr(inode)					\
+	do {								\
+		if (f2fs_has_inline_xattr(inode))			\
+			(atomic_dec(&F2FS_I_SB(inode)->inline_xattr));	\
+	} while (0)
 #define stat_inc_inline_inode(inode)					\
 	do {								\
 		if (f2fs_has_inline_data(inode))			\
@@ -1894,7 +1940,11 @@
 #define stat_inc_dirty_dir(sbi)
 #define stat_dec_dirty_dir(sbi)
 #define stat_inc_total_hit(sb)
-#define stat_inc_read_hit(sb)
+#define stat_inc_rbtree_node_hit(sb)
+#define stat_inc_largest_node_hit(sbi)
+#define stat_inc_cached_node_hit(sbi)
+#define stat_inc_inline_xattr(inode)
+#define stat_dec_inline_xattr(inode)
 #define stat_inc_inline_inode(inode)
 #define stat_dec_inline_inode(inode)
 #define stat_inc_inline_dir(inode)
@@ -1950,6 +2000,30 @@
 						struct f2fs_str *);
 
 /*
+ * shrinker.c
+ */
+unsigned long f2fs_shrink_count(struct shrinker *, struct shrink_control *);
+unsigned long f2fs_shrink_scan(struct shrinker *, struct shrink_control *);
+void f2fs_join_shrinker(struct f2fs_sb_info *);
+void f2fs_leave_shrinker(struct f2fs_sb_info *);
+
+/*
+ * extent_cache.c
+ */
+unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
+void f2fs_drop_largest_extent(struct inode *, pgoff_t);
+void f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
+unsigned int f2fs_destroy_extent_node(struct inode *);
+void f2fs_destroy_extent_tree(struct inode *);
+bool f2fs_lookup_extent_cache(struct inode *, pgoff_t, struct extent_info *);
+void f2fs_update_extent_cache(struct dnode_of_data *);
+void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
+						pgoff_t, block_t, unsigned int);
+void init_extent_cache_info(struct f2fs_sb_info *);
+int __init create_extent_cache(void);
+void destroy_extent_cache(void);
+
+/*
  * crypto support
  */
 static inline int f2fs_encrypted_inode(struct inode *inode)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index b0f38c3..8120f86 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -27,6 +27,7 @@
 #include "segment.h"
 #include "xattr.h"
 #include "acl.h"
+#include "gc.h"
 #include "trace.h"
 #include <trace/events/f2fs.h>
 
@@ -85,6 +86,8 @@
 mapped:
 	/* fill the page */
 	f2fs_wait_on_page_writeback(page, DATA);
+	/* if gced page is attached, don't write to cold segment */
+	clear_cold_data(page);
 out:
 	sb_end_pagefault(inode->i_sb);
 	return block_page_mkwrite_return(err);
@@ -203,8 +206,8 @@
 	}
 
 	/* if the inode is dirty, let's recover all the time */
-	if (!datasync && is_inode_flag_set(fi, FI_DIRTY_INODE)) {
-		update_inode_page(inode);
+	if (!datasync) {
+		f2fs_write_inode(inode, NULL);
 		goto go_write;
 	}
 
@@ -442,9 +445,9 @@
 
 int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
 {
-	int nr_free = 0, ofs = dn->ofs_in_node;
 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
 	struct f2fs_node *raw_node;
+	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
 	__le32 *addr;
 
 	raw_node = F2FS_NODE(dn->node_page);
@@ -457,14 +460,22 @@
 
 		dn->data_blkaddr = NULL_ADDR;
 		set_data_blkaddr(dn);
-		f2fs_update_extent_cache(dn);
 		invalidate_blocks(sbi, blkaddr);
 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
 			clear_inode_flag(F2FS_I(dn->inode),
 						FI_FIRST_BLOCK_WRITTEN);
 		nr_free++;
 	}
+
 	if (nr_free) {
+		pgoff_t fofs;
+		/*
+		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
+		 * we will invalidate all blkaddr in the whole range.
+		 */
+		fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
+						F2FS_I(dn->inode)) + ofs;
+		f2fs_update_extent_cache_range(dn, fofs, 0, len);
 		dec_valid_block_count(sbi, dn->inode, nr_free);
 		set_page_dirty(dn->node_page);
 		sync_inode_page(dn);
@@ -576,24 +587,30 @@
 	return err;
 }
 
-void f2fs_truncate(struct inode *inode)
+int f2fs_truncate(struct inode *inode, bool lock)
 {
+	int err;
+
 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
 				S_ISLNK(inode->i_mode)))
-		return;
+		return 0;
 
 	trace_f2fs_truncate(inode);
 
 	/* we should check inline_data size */
 	if (f2fs_has_inline_data(inode) && !f2fs_may_inline_data(inode)) {
-		if (f2fs_convert_inline_inode(inode))
-			return;
+		err = f2fs_convert_inline_inode(inode);
+		if (err)
+			return err;
 	}
 
-	if (!truncate_blocks(inode, i_size_read(inode), true)) {
-		inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-		mark_inode_dirty(inode);
-	}
+	err = truncate_blocks(inode, i_size_read(inode), lock);
+	if (err)
+		return err;
+
+	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+	mark_inode_dirty(inode);
+	return 0;
 }
 
 int f2fs_getattr(struct vfsmount *mnt,
@@ -653,7 +670,9 @@
 
 		if (attr->ia_size <= i_size_read(inode)) {
 			truncate_setsize(inode, attr->ia_size);
-			f2fs_truncate(inode);
+			err = f2fs_truncate(inode, true);
+			if (err)
+				return err;
 			f2fs_balance_fs(F2FS_I_SB(inode));
 		} else {
 			/*
@@ -692,14 +711,14 @@
 	.fiemap		= f2fs_fiemap,
 };
 
-static void fill_zero(struct inode *inode, pgoff_t index,
+static int fill_zero(struct inode *inode, pgoff_t index,
 					loff_t start, loff_t len)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct page *page;
 
 	if (!len)
-		return;
+		return 0;
 
 	f2fs_balance_fs(sbi);
 
@@ -707,12 +726,14 @@
 	page = get_new_data_page(inode, NULL, index, false);
 	f2fs_unlock_op(sbi);
 
-	if (!IS_ERR(page)) {
-		f2fs_wait_on_page_writeback(page, DATA);
-		zero_user(page, start, len);
-		set_page_dirty(page);
-		f2fs_put_page(page, 1);
-	}
+	if (IS_ERR(page))
+		return PTR_ERR(page);
+
+	f2fs_wait_on_page_writeback(page, DATA);
+	zero_user(page, start, len);
+	set_page_dirty(page);
+	f2fs_put_page(page, 1);
+	return 0;
 }
 
 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
@@ -760,14 +781,22 @@
 	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
 
 	if (pg_start == pg_end) {
-		fill_zero(inode, pg_start, off_start,
+		ret = fill_zero(inode, pg_start, off_start,
 						off_end - off_start);
+		if (ret)
+			return ret;
 	} else {
-		if (off_start)
-			fill_zero(inode, pg_start++, off_start,
-					PAGE_CACHE_SIZE - off_start);
-		if (off_end)
-			fill_zero(inode, pg_end, 0, off_end);
+		if (off_start) {
+			ret = fill_zero(inode, pg_start++, off_start,
+						PAGE_CACHE_SIZE - off_start);
+			if (ret)
+				return ret;
+		}
+		if (off_end) {
+			ret = fill_zero(inode, pg_end, 0, off_end);
+			if (ret)
+				return ret;
+		}
 
 		if (pg_start < pg_end) {
 			struct address_space *mapping = inode->i_mapping;
@@ -797,11 +826,11 @@
 	pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
 	int ret = 0;
 
-	f2fs_lock_op(sbi);
-
 	for (; end < nrpages; start++, end++) {
 		block_t new_addr, old_addr;
 
+		f2fs_lock_op(sbi);
+
 		set_new_dnode(&dn, inode, NULL, NULL, 0);
 		ret = get_dnode_of_data(&dn, end, LOOKUP_NODE_RA);
 		if (ret && ret != -ENOENT) {
@@ -817,13 +846,16 @@
 		if (new_addr == NULL_ADDR) {
 			set_new_dnode(&dn, inode, NULL, NULL, 0);
 			ret = get_dnode_of_data(&dn, start, LOOKUP_NODE_RA);
-			if (ret && ret != -ENOENT)
+			if (ret && ret != -ENOENT) {
 				goto out;
-			else if (ret == -ENOENT)
+			} else if (ret == -ENOENT) {
+				f2fs_unlock_op(sbi);
 				continue;
+			}
 
 			if (dn.data_blkaddr == NULL_ADDR) {
 				f2fs_put_dnode(&dn);
+				f2fs_unlock_op(sbi);
 				continue;
 			} else {
 				truncate_data_blocks_range(&dn, 1);
@@ -862,8 +894,9 @@
 
 			f2fs_put_dnode(&dn);
 		}
+		f2fs_unlock_op(sbi);
 	}
-	ret = 0;
+	return 0;
 out:
 	f2fs_unlock_op(sbi);
 	return ret;
@@ -885,6 +918,14 @@
 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
 		return -EINVAL;
 
+	f2fs_balance_fs(F2FS_I_SB(inode));
+
+	if (f2fs_has_inline_data(inode)) {
+		ret = f2fs_convert_inline_inode(inode);
+		if (ret)
+			return ret;
+	}
+
 	pg_start = offset >> PAGE_CACHE_SHIFT;
 	pg_end = (offset + len) >> PAGE_CACHE_SHIFT;
 
@@ -946,14 +987,21 @@
 	off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
 
 	if (pg_start == pg_end) {
-		fill_zero(inode, pg_start, off_start, off_end - off_start);
+		ret = fill_zero(inode, pg_start, off_start,
+						off_end - off_start);
+		if (ret)
+			return ret;
+
 		if (offset + len > new_size)
 			new_size = offset + len;
 		new_size = max_t(loff_t, new_size, offset + len);
 	} else {
 		if (off_start) {
-			fill_zero(inode, pg_start++, off_start,
-					PAGE_CACHE_SIZE - off_start);
+			ret = fill_zero(inode, pg_start++, off_start,
+						PAGE_CACHE_SIZE - off_start);
+			if (ret)
+				return ret;
+
 			new_size = max_t(loff_t, new_size,
 						pg_start << PAGE_CACHE_SHIFT);
 		}
@@ -995,7 +1043,10 @@
 		}
 
 		if (off_end) {
-			fill_zero(inode, pg_end, 0, off_end);
+			ret = fill_zero(inode, pg_end, 0, off_end);
+			if (ret)
+				goto out;
+
 			new_size = max_t(loff_t, new_size, offset + len);
 		}
 	}
@@ -1033,6 +1084,12 @@
 
 	f2fs_balance_fs(sbi);
 
+	if (f2fs_has_inline_data(inode)) {
+		ret = f2fs_convert_inline_inode(inode);
+		if (ret)
+			return ret;
+	}
+
 	ret = truncate_blocks(inode, i_size_read(inode), true);
 	if (ret)
 		return ret;
@@ -1302,6 +1359,7 @@
 static int f2fs_ioc_start_atomic_write(struct file *filp)
 {
 	struct inode *inode = file_inode(filp);
+	int ret;
 
 	if (!inode_owner_or_capable(inode))
 		return -EACCES;
@@ -1311,9 +1369,12 @@
 	if (f2fs_is_atomic_file(inode))
 		return 0;
 
-	set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+	ret = f2fs_convert_inline_inode(inode);
+	if (ret)
+		return ret;
 
-	return f2fs_convert_inline_inode(inode);
+	set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+	return 0;
 }
 
 static int f2fs_ioc_commit_atomic_write(struct file *filp)
@@ -1333,10 +1394,13 @@
 
 	if (f2fs_is_atomic_file(inode)) {
 		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
-		commit_inmem_pages(inode, false);
+		ret = commit_inmem_pages(inode, false);
+		if (ret)
+			goto err_out;
 	}
 
-	ret = f2fs_sync_file(filp, 0, LONG_MAX, 0);
+	ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0);
+err_out:
 	mnt_drop_write_file(filp);
 	return ret;
 }
@@ -1344,6 +1408,7 @@
 static int f2fs_ioc_start_volatile_write(struct file *filp)
 {
 	struct inode *inode = file_inode(filp);
+	int ret;
 
 	if (!inode_owner_or_capable(inode))
 		return -EACCES;
@@ -1351,9 +1416,12 @@
 	if (f2fs_is_volatile_file(inode))
 		return 0;
 
-	set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
+	ret = f2fs_convert_inline_inode(inode);
+	if (ret)
+		return ret;
 
-	return f2fs_convert_inline_inode(inode);
+	set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
+	return 0;
 }
 
 static int f2fs_ioc_release_volatile_write(struct file *filp)
@@ -1389,7 +1457,7 @@
 
 	if (f2fs_is_atomic_file(inode)) {
 		clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
-		commit_inmem_pages(inode, false);
+		commit_inmem_pages(inode, true);
 	}
 
 	if (f2fs_is_volatile_file(inode))
@@ -1544,6 +1612,35 @@
 	return 0;
 }
 
+static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
+{
+	struct inode *inode = file_inode(filp);
+	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	__u32 i, count;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (get_user(count, (__u32 __user *)arg))
+		return -EFAULT;
+
+	if (!count || count > F2FS_BATCH_GC_MAX_NUM)
+		return -EINVAL;
+
+	for (i = 0; i < count; i++) {
+		if (!mutex_trylock(&sbi->gc_mutex))
+			break;
+
+		if (f2fs_gc(sbi))
+			break;
+	}
+
+	if (put_user(i, (__u32 __user *)arg))
+		return -EFAULT;
+
+	return 0;
+}
+
 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
 	switch (cmd) {
@@ -1573,6 +1670,8 @@
 		return f2fs_ioc_get_encryption_policy(filp, arg);
 	case F2FS_IOC_GET_ENCRYPTION_PWSALT:
 		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
+	case F2FS_IOC_GARBAGE_COLLECT:
+		return f2fs_ioc_gc(filp, arg);
 	default:
 		return -ENOTTY;
 	}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 22fb5ef..782b8e7 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -391,23 +391,27 @@
  * On validity, copy that node with cold status, otherwise (invalid node)
  * ignore that.
  */
-static void gc_node_segment(struct f2fs_sb_info *sbi,
+static int gc_node_segment(struct f2fs_sb_info *sbi,
 		struct f2fs_summary *sum, unsigned int segno, int gc_type)
 {
 	bool initial = true;
 	struct f2fs_summary *entry;
+	block_t start_addr;
 	int off;
 
+	start_addr = START_BLOCK(sbi, segno);
+
 next_step:
 	entry = sum;
 
 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
 		nid_t nid = le32_to_cpu(entry->nid);
 		struct page *node_page;
+		struct node_info ni;
 
 		/* stop BG_GC if there is not enough free sections. */
 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
-			return;
+			return 0;
 
 		if (check_valid_map(sbi, segno, off) == 0)
 			continue;
@@ -426,6 +430,12 @@
 			continue;
 		}
 
+		get_node_info(sbi, nid, &ni);
+		if (ni.blk_addr != start_addr + off) {
+			f2fs_put_page(node_page, 1);
+			continue;
+		}
+
 		/* set page dirty and write it */
 		if (gc_type == FG_GC) {
 			f2fs_wait_on_page_writeback(node_page, NODE);
@@ -451,13 +461,11 @@
 		};
 		sync_node_pages(sbi, 0, &wbc);
 
-		/*
-		 * In the case of FG_GC, it'd be better to reclaim this victim
-		 * completely.
-		 */
-		if (get_valid_blocks(sbi, segno, 1) != 0)
-			goto next_step;
+		/* return 1 only if FG_GC succefully reclaimed one */
+		if (get_valid_blocks(sbi, segno, 1) == 0)
+			return 1;
 	}
+	return 0;
 }
 
 /*
@@ -487,7 +495,7 @@
 	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
 }
 
-static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
 {
 	struct page *node_page;
@@ -500,13 +508,13 @@
 
 	node_page = get_node_page(sbi, nid);
 	if (IS_ERR(node_page))
-		return 0;
+		return false;
 
 	get_node_info(sbi, nid, dni);
 
 	if (sum->version != dni->version) {
 		f2fs_put_page(node_page, 1);
-		return 0;
+		return false;
 	}
 
 	*nofs = ofs_of_node(node_page);
@@ -514,8 +522,8 @@
 	f2fs_put_page(node_page, 1);
 
 	if (source_blkaddr != blkaddr)
-		return 0;
-	return 1;
+		return false;
+	return true;
 }
 
 static void move_encrypted_block(struct inode *inode, block_t bidx)
@@ -552,7 +560,10 @@
 	fio.page = page;
 	fio.blk_addr = dn.data_blkaddr;
 
-	fio.encrypted_page = grab_cache_page(META_MAPPING(fio.sbi), fio.blk_addr);
+	fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi),
+					fio.blk_addr,
+					FGP_LOCK|FGP_CREAT,
+					GFP_NOFS);
 	if (!fio.encrypted_page)
 		goto put_out;
 
@@ -636,7 +647,7 @@
  * If the parent node is not valid or the data block address is different,
  * the victim data block is ignored.
  */
-static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
 		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
 {
 	struct super_block *sb = sbi->sb;
@@ -659,7 +670,7 @@
 
 		/* stop BG_GC if there is not enough free sections. */
 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
-			return;
+			return 0;
 
 		if (check_valid_map(sbi, segno, off) == 0)
 			continue;
@@ -670,7 +681,7 @@
 		}
 
 		/* Get an inode by ino with checking validity */
-		if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
+		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
 			continue;
 
 		if (phase == 1) {
@@ -724,15 +735,11 @@
 	if (gc_type == FG_GC) {
 		f2fs_submit_merged_bio(sbi, DATA, WRITE);
 
-		/*
-		 * In the case of FG_GC, it'd be better to reclaim this victim
-		 * completely.
-		 */
-		if (get_valid_blocks(sbi, segno, 1) != 0) {
-			phase = 2;
-			goto next_step;
-		}
+		/* return 1 only if FG_GC succefully reclaimed one */
+		if (get_valid_blocks(sbi, segno, 1) == 0)
+			return 1;
 	}
+	return 0;
 }
 
 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -748,12 +755,13 @@
 	return ret;
 }
 
-static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
+static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
 				struct gc_inode_list *gc_list, int gc_type)
 {
 	struct page *sum_page;
 	struct f2fs_summary_block *sum;
 	struct blk_plug plug;
+	int nfree = 0;
 
 	/* read segment summary of victim */
 	sum_page = get_sum_page(sbi, segno);
@@ -773,10 +781,11 @@
 
 	switch (GET_SUM_TYPE((&sum->footer))) {
 	case SUM_TYPE_NODE:
-		gc_node_segment(sbi, sum->entries, segno, gc_type);
+		nfree = gc_node_segment(sbi, sum->entries, segno, gc_type);
 		break;
 	case SUM_TYPE_DATA:
-		gc_data_segment(sbi, sum->entries, gc_list, segno, gc_type);
+		nfree = gc_data_segment(sbi, sum->entries, gc_list,
+							segno, gc_type);
 		break;
 	}
 	blk_finish_plug(&plug);
@@ -785,11 +794,13 @@
 	stat_inc_call_count(sbi->stat_info);
 
 	f2fs_put_page(sum_page, 0);
+	return nfree;
 }
 
 int f2fs_gc(struct f2fs_sb_info *sbi)
 {
-	unsigned int segno, i;
+	unsigned int segno = NULL_SEGNO;
+	unsigned int i;
 	int gc_type = BG_GC;
 	int nfree = 0;
 	int ret = -1;
@@ -808,10 +819,11 @@
 
 	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
 		gc_type = FG_GC;
-		write_checkpoint(sbi, &cpc);
+		if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
+			write_checkpoint(sbi, &cpc);
 	}
 
-	if (!__get_victim(sbi, &segno, gc_type))
+	if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
 		goto stop;
 	ret = 0;
 
@@ -821,13 +833,10 @@
 								META_SSA);
 
 	for (i = 0; i < sbi->segs_per_sec; i++)
-		do_garbage_collect(sbi, segno + i, &gc_list, gc_type);
+		nfree += do_garbage_collect(sbi, segno + i, &gc_list, gc_type);
 
-	if (gc_type == FG_GC) {
+	if (gc_type == FG_GC)
 		sbi->cur_victim_sec = NULL_SEGNO;
-		nfree++;
-		WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
-	}
 
 	if (has_not_enough_free_secs(sbi, nfree))
 		goto gc_more;
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index b4a65be..c5a055b 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -19,6 +19,12 @@
 #define LIMIT_INVALID_BLOCK	40 /* percentage over total user space */
 #define LIMIT_FREE_BLOCK	40 /* percentage over invalid + free space */
 
+/*
+ * with this macro, we can control the max time we do garbage collection,
+ * when user triggers batch mode gc by ioctl.
+ */
+#define F2FS_BATCH_GC_MAX_NUM		16
+
 /* Search max. number of dirty segments to select a victim segment */
 #define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
 
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index a13ffcc..3d143be 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -360,6 +360,10 @@
 	return 0;
 }
 
+/*
+ * NOTE: ipage is grabbed by caller, but if any error occurs, we should
+ * release ipage in this function.
+ */
 static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
 				struct f2fs_inline_dentry *inline_dentry)
 {
@@ -369,8 +373,10 @@
 	int err;
 
 	page = grab_cache_page(dir->i_mapping, 0);
-	if (!page)
+	if (!page) {
+		f2fs_put_page(ipage, 1);
 		return -ENOMEM;
+	}
 
 	set_new_dnode(&dn, dir, ipage, NULL, 0);
 	err = f2fs_reserve_block(&dn, 0);
@@ -378,13 +384,21 @@
 		goto out;
 
 	f2fs_wait_on_page_writeback(page, DATA);
-	zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+	zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
 
 	dentry_blk = kmap_atomic(page);
 
 	/* copy data from inline dentry block to new dentry block */
 	memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap,
 					INLINE_DENTRY_BITMAP_SIZE);
+	memset(dentry_blk->dentry_bitmap + INLINE_DENTRY_BITMAP_SIZE, 0,
+			SIZE_OF_DENTRY_BITMAP - INLINE_DENTRY_BITMAP_SIZE);
+	/*
+	 * we do not need to zero out remainder part of dentry and filename
+	 * field, since we have used bitmap for marking the usage status of
+	 * them, besides, we can also ignore copying/zeroing reserved space
+	 * of dentry block, because them haven't been used so far.
+	 */
 	memcpy(dentry_blk->dentry, inline_dentry->dentry,
 			sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY);
 	memcpy(dentry_blk->filename, inline_dentry->filename,
@@ -434,8 +448,9 @@
 						slots, NR_INLINE_DENTRY);
 	if (bit_pos >= NR_INLINE_DENTRY) {
 		err = f2fs_convert_inline_dir(dir, ipage, dentry_blk);
-		if (!err)
-			err = -EAGAIN;
+		if (err)
+			return err;
+		err = -EAGAIN;
 		goto out;
 	}
 
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 2550868..35aae65 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -12,7 +12,6 @@
 #include <linux/f2fs_fs.h>
 #include <linux/buffer_head.h>
 #include <linux/writeback.h>
-#include <linux/bitops.h>
 
 #include "f2fs.h"
 #include "node.h"
@@ -34,8 +33,8 @@
 		new_fl |= S_NOATIME;
 	if (flags & FS_DIRSYNC_FL)
 		new_fl |= S_DIRSYNC;
-	set_mask_bits(&inode->i_flags,
-			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
+	inode_set_flags(inode, new_fl,
+			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
 }
 
 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -139,7 +138,7 @@
 	fi->i_pino = le32_to_cpu(ri->i_pino);
 	fi->i_dir_level = ri->i_dir_level;
 
-	f2fs_init_extent_cache(inode, &ri->i_ext);
+	f2fs_init_extent_tree(inode, &ri->i_ext);
 
 	get_inline_info(fi, ri);
 
@@ -155,6 +154,7 @@
 
 	f2fs_put_page(node_page, 1);
 
+	stat_inc_inline_xattr(inode);
 	stat_inc_inline_inode(inode);
 	stat_inc_inline_dir(inode);
 
@@ -237,10 +237,11 @@
 	ri->i_size = cpu_to_le64(i_size_read(inode));
 	ri->i_blocks = cpu_to_le64(inode->i_blocks);
 
-	read_lock(&F2FS_I(inode)->ext_lock);
-	set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext);
-	read_unlock(&F2FS_I(inode)->ext_lock);
-
+	if (F2FS_I(inode)->extent_tree)
+		set_raw_extent(&F2FS_I(inode)->extent_tree->largest,
+							&ri->i_ext);
+	else
+		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
 	set_raw_inline(F2FS_I(inode), ri);
 
 	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
@@ -314,7 +315,9 @@
 void f2fs_evict_inode(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
+	struct f2fs_inode_info *fi = F2FS_I(inode);
+	nid_t xnid = fi->i_xattr_nid;
+	int err = 0;
 
 	/* some remained atomic pages should discarded */
 	if (f2fs_is_atomic_file(inode))
@@ -330,41 +333,62 @@
 	f2fs_bug_on(sbi, get_dirty_pages(inode));
 	remove_dirty_dir_inode(inode);
 
+	f2fs_destroy_extent_tree(inode);
+
 	if (inode->i_nlink || is_bad_inode(inode))
 		goto no_delete;
 
 	sb_start_intwrite(inode->i_sb);
-	set_inode_flag(F2FS_I(inode), FI_NO_ALLOC);
+	set_inode_flag(fi, FI_NO_ALLOC);
 	i_size_write(inode, 0);
 
 	if (F2FS_HAS_BLOCKS(inode))
-		f2fs_truncate(inode);
+		err = f2fs_truncate(inode, true);
 
-	f2fs_lock_op(sbi);
-	remove_inode_page(inode);
-	f2fs_unlock_op(sbi);
+	if (!err) {
+		f2fs_lock_op(sbi);
+		err = remove_inode_page(inode);
+		f2fs_unlock_op(sbi);
+	}
 
 	sb_end_intwrite(inode->i_sb);
 no_delete:
+	stat_dec_inline_xattr(inode);
 	stat_dec_inline_dir(inode);
 	stat_dec_inline_inode(inode);
 
-	/* update extent info in inode */
-	if (inode->i_nlink)
-		f2fs_preserve_extent_tree(inode);
-	f2fs_destroy_extent_tree(inode);
-
 	invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
 	if (xnid)
 		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
-	if (is_inode_flag_set(F2FS_I(inode), FI_APPEND_WRITE))
+	if (is_inode_flag_set(fi, FI_APPEND_WRITE))
 		add_dirty_inode(sbi, inode->i_ino, APPEND_INO);
-	if (is_inode_flag_set(F2FS_I(inode), FI_UPDATE_WRITE))
+	if (is_inode_flag_set(fi, FI_UPDATE_WRITE))
 		add_dirty_inode(sbi, inode->i_ino, UPDATE_INO);
+	if (is_inode_flag_set(fi, FI_FREE_NID)) {
+		if (err && err != -ENOENT)
+			alloc_nid_done(sbi, inode->i_ino);
+		else
+			alloc_nid_failed(sbi, inode->i_ino);
+		clear_inode_flag(fi, FI_FREE_NID);
+	}
+
+	if (err && err != -ENOENT) {
+		if (!exist_written_data(sbi, inode->i_ino, ORPHAN_INO)) {
+			/*
+			 * get here because we failed to release resource
+			 * of inode previously, reminder our user to run fsck
+			 * for fixing.
+			 */
+			set_sbi_flag(sbi, SBI_NEED_FSCK);
+			f2fs_msg(sbi->sb, KERN_WARNING,
+				"inode (ino:%lu) resource leak, run fsck "
+				"to fix this issue!", inode->i_ino);
+		}
+	}
 out_clear:
 #ifdef CONFIG_F2FS_FS_ENCRYPTION
-	if (F2FS_I(inode)->i_crypt_info)
-		f2fs_free_encryption_info(inode, F2FS_I(inode)->i_crypt_info);
+	if (fi->i_crypt_info)
+		f2fs_free_encryption_info(inode, fi->i_crypt_info);
 #endif
 	clear_inode(inode);
 }
@@ -373,6 +397,7 @@
 void handle_failed_inode(struct inode *inode)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+	int err = 0;
 
 	clear_nlink(inode);
 	make_bad_inode(inode);
@@ -380,13 +405,29 @@
 
 	i_size_write(inode, 0);
 	if (F2FS_HAS_BLOCKS(inode))
-		f2fs_truncate(inode);
+		err = f2fs_truncate(inode, false);
 
-	remove_inode_page(inode);
+	if (!err)
+		err = remove_inode_page(inode);
 
-	clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
-	clear_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY);
-	alloc_nid_failed(sbi, inode->i_ino);
+	/*
+	 * if we skip truncate_node in remove_inode_page bacause we failed
+	 * before, it's better to find another way to release resource of
+	 * this inode (e.g. valid block count, node block or nid). Here we
+	 * choose to add this inode to orphan list, so that we can call iput
+	 * for releasing in orphan recovery flow.
+	 *
+	 * Note: we should add inode to orphan list before f2fs_unlock_op()
+	 * so we can prevent losing this orphan when encoutering checkpoint
+	 * and following suddenly power-off.
+	 */
+	if (err && err != -ENOENT) {
+		err = acquire_orphan_inode(sbi);
+		if (!err)
+			add_orphan_inode(sbi, inode->i_ino);
+	}
+
+	set_inode_flag(F2FS_I(inode), FI_FREE_NID);
 	f2fs_unlock_op(sbi);
 
 	/* iput will drop the inode object */
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index fdbae21..a680bf3 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -53,7 +53,7 @@
 	if (err) {
 		err = -EINVAL;
 		nid_free = true;
-		goto out;
+		goto fail;
 	}
 
 	/* If the directory encrypted, then we should encrypt the inode. */
@@ -65,6 +65,9 @@
 	if (f2fs_may_inline_dentry(inode))
 		set_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY);
 
+	f2fs_init_extent_tree(inode, NULL);
+
+	stat_inc_inline_xattr(inode);
 	stat_inc_inline_inode(inode);
 	stat_inc_inline_dir(inode);
 
@@ -72,15 +75,12 @@
 	mark_inode_dirty(inode);
 	return inode;
 
-out:
-	clear_nlink(inode);
-	unlock_new_inode(inode);
 fail:
 	trace_f2fs_new_inode(inode, err);
 	make_bad_inode(inode);
-	iput(inode);
 	if (nid_free)
-		alloc_nid_failed(sbi, ino);
+		set_inode_flag(F2FS_I(inode), FI_FREE_NID);
+	iput(inode);
 	return ERR_PTR(err);
 }
 
@@ -89,7 +89,14 @@
 	size_t slen = strlen(s);
 	size_t sublen = strlen(sub);
 
-	if (sublen > slen)
+	/*
+	 * filename format of multimedia file should be defined as:
+	 * "filename + '.' + extension".
+	 */
+	if (slen < sublen + 2)
+		return 0;
+
+	if (s[slen - sublen - 1] != '.')
 		return 0;
 
 	return !strncasecmp(s + slen - sublen, sub, sublen);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 7dd63b7..27d1a74 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -159,7 +159,7 @@
 
 	head = radix_tree_lookup(&nm_i->nat_set_root, set);
 	if (!head) {
-		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC);
+		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
 
 		INIT_LIST_HEAD(&head->entry_list);
 		INIT_LIST_HEAD(&head->set_list);
@@ -246,7 +246,7 @@
 {
 	struct nat_entry *new;
 
-	new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
+	new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
 	f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
 	memset(new, 0, sizeof(struct nat_entry));
 	nat_set_nid(new, nid);
@@ -306,6 +306,10 @@
 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
 		unsigned char version = nat_get_version(e);
 		nat_set_version(e, inc_node_version(version));
+
+		/* in order to reuse the nid */
+		if (nm_i->next_scan_nid > ni->nid)
+			nm_i->next_scan_nid = ni->nid;
 	}
 
 	/* change address */
@@ -328,11 +332,11 @@
 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
 {
 	struct f2fs_nm_info *nm_i = NM_I(sbi);
+	int nr = nr_shrink;
 
-	if (available_free_memory(sbi, NAT_ENTRIES))
+	if (!down_write_trylock(&nm_i->nat_tree_lock))
 		return 0;
 
-	down_write(&nm_i->nat_tree_lock);
 	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
 		struct nat_entry *ne;
 		ne = list_first_entry(&nm_i->nat_entries,
@@ -341,7 +345,7 @@
 		nr_shrink--;
 	}
 	up_write(&nm_i->nat_tree_lock);
-	return nr_shrink;
+	return nr - nr_shrink;
 }
 
 /*
@@ -898,17 +902,20 @@
  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
  * f2fs_unlock_op().
  */
-void remove_inode_page(struct inode *inode)
+int remove_inode_page(struct inode *inode)
 {
 	struct dnode_of_data dn;
+	int err;
 
 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
-	if (get_dnode_of_data(&dn, 0, LOOKUP_NODE))
-		return;
+	err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
+	if (err)
+		return err;
 
-	if (truncate_xattr_node(inode, dn.inode_page)) {
+	err = truncate_xattr_node(inode, dn.inode_page);
+	if (err) {
 		f2fs_put_dnode(&dn);
-		return;
+		return err;
 	}
 
 	/* remove potential inline_data blocks */
@@ -922,6 +929,7 @@
 
 	/* will put inode & node pages */
 	truncate_node(&dn);
+	return 0;
 }
 
 struct page *new_inode_page(struct inode *inode)
@@ -991,8 +999,7 @@
 /*
  * Caller should do after getting the following values.
  * 0: f2fs_put_page(page, 0)
- * LOCKED_PAGE: f2fs_put_page(page, 1)
- * error: nothing
+ * LOCKED_PAGE or error: f2fs_put_page(page, 1)
  */
 static int read_node_page(struct page *page, int rw)
 {
@@ -1010,7 +1017,6 @@
 
 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
 		ClearPageUptodate(page);
-		f2fs_put_page(page, 1);
 		return -ENOENT;
 	}
 
@@ -1041,10 +1047,7 @@
 		return;
 
 	err = read_node_page(apage, READA);
-	if (err == 0)
-		f2fs_put_page(apage, 0);
-	else if (err == LOCKED_PAGE)
-		f2fs_put_page(apage, 1);
+	f2fs_put_page(apage, err ? 1 : 0);
 }
 
 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
@@ -1057,10 +1060,12 @@
 		return ERR_PTR(-ENOMEM);
 
 	err = read_node_page(page, READ_SYNC);
-	if (err < 0)
+	if (err < 0) {
+		f2fs_put_page(page, 1);
 		return ERR_PTR(err);
-	else if (err != LOCKED_PAGE)
+	} else if (err != LOCKED_PAGE) {
 		lock_page(page);
+	}
 
 	if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) {
 		ClearPageUptodate(page);
@@ -1096,10 +1101,12 @@
 		return ERR_PTR(-ENOMEM);
 
 	err = read_node_page(page, READ_SYNC);
-	if (err < 0)
+	if (err < 0) {
+		f2fs_put_page(page, 1);
 		return ERR_PTR(err);
-	else if (err == LOCKED_PAGE)
+	} else if (err == LOCKED_PAGE) {
 		goto page_hit;
+	}
 
 	blk_start_plug(&plug);
 
@@ -1533,7 +1540,7 @@
 		if (unlikely(nid >= nm_i->max_nid))
 			nid = 0;
 
-		if (i++ == FREE_NID_PAGES)
+		if (++i >= FREE_NID_PAGES)
 			break;
 	}
 
@@ -1570,6 +1577,8 @@
 
 	/* We should not use stale free nids created by build_free_nids */
 	if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
+		struct node_info ni;
+
 		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
 		list_for_each_entry(i, &nm_i->free_nid_list, list)
 			if (i->state == NID_NEW)
@@ -1580,6 +1589,13 @@
 		i->state = NID_ALLOC;
 		nm_i->fcnt--;
 		spin_unlock(&nm_i->free_nid_list_lock);
+
+		/* check nid is allocated already */
+		get_node_info(sbi, *nid, &ni);
+		if (ni.blk_addr != NULL_ADDR) {
+			alloc_nid_done(sbi, *nid);
+			goto retry;
+		}
 		return true;
 	}
 	spin_unlock(&nm_i->free_nid_list_lock);
@@ -1636,6 +1652,32 @@
 		kmem_cache_free(free_nid_slab, i);
 }
 
+int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
+{
+	struct f2fs_nm_info *nm_i = NM_I(sbi);
+	struct free_nid *i, *next;
+	int nr = nr_shrink;
+
+	if (!mutex_trylock(&nm_i->build_lock))
+		return 0;
+
+	spin_lock(&nm_i->free_nid_list_lock);
+	list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
+		if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
+			break;
+		if (i->state == NID_ALLOC)
+			continue;
+		__del_from_free_nid_list(nm_i, i);
+		kmem_cache_free(free_nid_slab, i);
+		nm_i->fcnt--;
+		nr_shrink--;
+	}
+	spin_unlock(&nm_i->free_nid_list_lock);
+	mutex_unlock(&nm_i->build_lock);
+
+	return nr - nr_shrink;
+}
+
 void recover_inline_xattr(struct inode *inode, struct page *page)
 {
 	void *src_addr, *dst_addr;
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 24a8c1d..faec2ca 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -399,14 +399,35 @@
 	f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
 	f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
 
-	for (; start < end; start++) {
+	for (; start < end; start++, dn.ofs_in_node++) {
 		block_t src, dest;
 
 		src = datablock_addr(dn.node_page, dn.ofs_in_node);
 		dest = datablock_addr(page, dn.ofs_in_node);
 
-		if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR &&
-			is_valid_blkaddr(sbi, dest, META_POR)) {
+		/* skip recovering if dest is the same as src */
+		if (src == dest)
+			continue;
+
+		/* dest is invalid, just invalidate src block */
+		if (dest == NULL_ADDR) {
+			truncate_data_blocks_range(&dn, 1);
+			continue;
+		}
+
+		/*
+		 * dest is reserved block, invalidate src block
+		 * and then reserve one new block in dnode page.
+		 */
+		if (dest == NEW_ADDR) {
+			truncate_data_blocks_range(&dn, 1);
+			err = reserve_new_block(&dn);
+			f2fs_bug_on(sbi, err);
+			continue;
+		}
+
+		/* dest is valid block, try to recover from src to dest */
+		if (is_valid_blkaddr(sbi, dest, META_POR)) {
 
 			if (src == NULL_ADDR) {
 				err = reserve_new_block(&dn);
@@ -424,7 +445,6 @@
 							ni.version, false);
 			recovered++;
 		}
-		dn.ofs_in_node++;
 	}
 
 	if (IS_INODE(dn.node_page))
@@ -525,14 +545,12 @@
 
 	INIT_LIST_HEAD(&inode_list);
 
-	/* step #1: find fsynced inode numbers */
-	set_sbi_flag(sbi, SBI_POR_DOING);
-
 	/* prevent checkpoint */
 	mutex_lock(&sbi->cp_mutex);
 
 	blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
 
+	/* step #1: find fsynced inode numbers */
 	err = find_fsync_dnodes(sbi, &inode_list);
 	if (err)
 		goto out;
@@ -561,11 +579,20 @@
 
 	clear_sbi_flag(sbi, SBI_POR_DOING);
 	if (err) {
-		discard_next_dnode(sbi, blkaddr);
+		bool invalidate = false;
+
+		if (discard_next_dnode(sbi, blkaddr))
+			invalidate = true;
 
 		/* Flush all the NAT/SIT pages */
 		while (get_pages(sbi, F2FS_DIRTY_META))
 			sync_meta_pages(sbi, META, LONG_MAX);
+
+		/* invalidate temporary meta page */
+		if (invalidate)
+			invalidate_mapping_pages(META_MAPPING(sbi),
+							blkaddr, blkaddr);
+
 		set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
 		mutex_unlock(&sbi->cp_mutex);
 	} else if (need_writecp) {
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 61b97f9..78e6d06 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -197,28 +197,20 @@
 {
 	struct f2fs_inode_info *fi = F2FS_I(inode);
 	struct inmem_pages *new;
-	int err;
 
-	SetPagePrivate(page);
 	f2fs_trace_pid(page);
 
+	set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
+	SetPagePrivate(page);
+
 	new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
 
 	/* add atomic page indices to the list */
 	new->page = page;
 	INIT_LIST_HEAD(&new->list);
-retry:
+
 	/* increase reference count with clean state */
 	mutex_lock(&fi->inmem_lock);
-	err = radix_tree_insert(&fi->inmem_root, page->index, new);
-	if (err == -EEXIST) {
-		mutex_unlock(&fi->inmem_lock);
-		kmem_cache_free(inmem_entry_slab, new);
-		return;
-	} else if (err) {
-		mutex_unlock(&fi->inmem_lock);
-		goto retry;
-	}
 	get_page(page);
 	list_add_tail(&new->list, &fi->inmem_pages);
 	inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
@@ -227,7 +219,7 @@
 	trace_f2fs_register_inmem_page(page, INMEM);
 }
 
-void commit_inmem_pages(struct inode *inode, bool abort)
+int commit_inmem_pages(struct inode *inode, bool abort)
 {
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	struct f2fs_inode_info *fi = F2FS_I(inode);
@@ -239,6 +231,7 @@
 		.rw = WRITE_SYNC | REQ_PRIO,
 		.encrypted_page = NULL,
 	};
+	int err = 0;
 
 	/*
 	 * The abort is true only when f2fs_evict_inode is called.
@@ -254,8 +247,8 @@
 
 	mutex_lock(&fi->inmem_lock);
 	list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
+		lock_page(cur->page);
 		if (!abort) {
-			lock_page(cur->page);
 			if (cur->page->mapping == inode->i_mapping) {
 				set_page_dirty(cur->page);
 				f2fs_wait_on_page_writeback(cur->page, DATA);
@@ -263,15 +256,20 @@
 					inode_dec_dirty_pages(inode);
 				trace_f2fs_commit_inmem_page(cur->page, INMEM);
 				fio.page = cur->page;
-				do_write_data_page(&fio);
+				err = do_write_data_page(&fio);
 				submit_bio = true;
+				if (err) {
+					unlock_page(cur->page);
+					break;
+				}
 			}
-			f2fs_put_page(cur->page, 1);
 		} else {
 			trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP);
-			put_page(cur->page);
 		}
-		radix_tree_delete(&fi->inmem_root, cur->page->index);
+		set_page_private(cur->page, 0);
+		ClearPagePrivate(cur->page);
+		f2fs_put_page(cur->page, 1);
+
 		list_del(&cur->list);
 		kmem_cache_free(inmem_entry_slab, cur);
 		dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
@@ -283,6 +281,7 @@
 		if (submit_bio)
 			f2fs_submit_merged_bio(sbi, DATA, WRITE);
 	}
+	return err;
 }
 
 /*
@@ -304,10 +303,18 @@
 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
 {
 	/* try to shrink extent cache when there is no enough memory */
-	f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
+	if (!available_free_memory(sbi, EXTENT_CACHE))
+		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
 
-	/* check the # of cached NAT entries and prefree segments */
-	if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
+	/* check the # of cached NAT entries */
+	if (!available_free_memory(sbi, NAT_ENTRIES))
+		try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
+
+	if (!available_free_memory(sbi, FREE_NIDS))
+		try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES);
+
+	/* checkpoint is the only way to shrink partial cached entries */
+	if (!available_free_memory(sbi, NAT_ENTRIES) ||
 			excess_prefree_segs(sbi) ||
 			!available_free_memory(sbi, INO_ENTRIES))
 		f2fs_sync_fs(sbi->sb, true);
@@ -323,10 +330,12 @@
 		return 0;
 
 	if (!llist_empty(&fcc->issue_list)) {
-		struct bio *bio = bio_alloc(GFP_NOIO, 0);
+		struct bio *bio;
 		struct flush_cmd *cmd, *next;
 		int ret;
 
+		bio = f2fs_bio_alloc(0);
+
 		fcc->dispatch_list = llist_del_all(&fcc->issue_list);
 		fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
 
@@ -358,8 +367,15 @@
 	if (test_opt(sbi, NOBARRIER))
 		return 0;
 
-	if (!test_opt(sbi, FLUSH_MERGE))
-		return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL);
+	if (!test_opt(sbi, FLUSH_MERGE)) {
+		struct bio *bio = f2fs_bio_alloc(0);
+		int ret;
+
+		bio->bi_bdev = sbi->sb->s_bdev;
+		ret = submit_bio_wait(WRITE_FLUSH, bio);
+		bio_put(bio);
+		return ret;
+	}
 
 	init_completion(&cmd.wait);
 
@@ -503,7 +519,7 @@
 	return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
 }
 
-void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
+bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
 {
 	int err = -ENOTSUPP;
 
@@ -513,13 +529,16 @@
 		unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
 
 		if (f2fs_test_bit(offset, se->discard_map))
-			return;
+			return false;
 
 		err = f2fs_issue_discard(sbi, blkaddr, 1);
 	}
 
-	if (err)
+	if (err) {
 		update_meta_page(sbi, NULL, blkaddr);
+		return true;
+	}
+	return false;
 }
 
 static void __add_discard_entry(struct f2fs_sb_info *sbi,
@@ -1218,7 +1237,8 @@
 	mutex_lock(&sit_i->sentry_lock);
 
 	/* direct_io'ed data is aligned to the segment for better performance */
-	if (direct_io && curseg->next_blkoff)
+	if (direct_io && curseg->next_blkoff &&
+				!has_not_enough_free_secs(sbi, 0))
 		__allocate_new_segments(sbi, type);
 
 	*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
@@ -1733,7 +1753,7 @@
 static struct sit_entry_set *grab_sit_entry_set(void)
 {
 	struct sit_entry_set *ses =
-			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC);
+			f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
 
 	ses->entry_cnt = 0;
 	INIT_LIST_HEAD(&ses->set_list);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 79e7b87..b6e4ed1 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -177,6 +177,15 @@
 	void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
 };
 
+/*
+ * this value is set in page as a private data which indicate that
+ * the page is atomically written, and it is in inmem_pages list.
+ */
+#define ATOMIC_WRITTEN_PAGE		0x0000ffff
+
+#define IS_ATOMIC_WRITTEN_PAGE(page)			\
+		(page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
+
 struct inmem_pages {
 	struct list_head list;
 	struct page *page;
@@ -555,16 +564,15 @@
 	return curseg->next_blkoff;
 }
 
-#ifdef CONFIG_F2FS_CHECK_FS
 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
 {
-	BUG_ON(segno > TOTAL_SEGS(sbi) - 1);
+	f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
 }
 
 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
 {
-	BUG_ON(blk_addr < SEG0_BLKADDR(sbi));
-	BUG_ON(blk_addr >= MAX_BLKADDR(sbi));
+	f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi)
+					|| blk_addr >= MAX_BLKADDR(sbi));
 }
 
 /*
@@ -573,16 +581,11 @@
 static inline void check_block_count(struct f2fs_sb_info *sbi,
 		int segno, struct f2fs_sit_entry *raw_sit)
 {
+#ifdef CONFIG_F2FS_CHECK_FS
 	bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
 	int valid_blocks = 0;
 	int cur_pos = 0, next_pos;
 
-	/* check segment usage */
-	BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg);
-
-	/* check boundary of a given segment number */
-	BUG_ON(segno > TOTAL_SEGS(sbi) - 1);
-
 	/* check bitmap with valid block count */
 	do {
 		if (is_valid) {
@@ -598,35 +601,11 @@
 		is_valid = !is_valid;
 	} while (cur_pos < sbi->blocks_per_seg);
 	BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
-}
-#else
-static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
-{
-	if (segno > TOTAL_SEGS(sbi) - 1)
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-}
-
-static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
-{
-	if (blk_addr < SEG0_BLKADDR(sbi) || blk_addr >= MAX_BLKADDR(sbi))
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-}
-
-/*
- * Summary block is always treated as an invalid block
- */
-static inline void check_block_count(struct f2fs_sb_info *sbi,
-		int segno, struct f2fs_sit_entry *raw_sit)
-{
-	/* check segment usage */
-	if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-
-	/* check boundary of a given segment number */
-	if (segno > TOTAL_SEGS(sbi) - 1)
-		set_sbi_flag(sbi, SBI_NEED_FSCK);
-}
 #endif
+	/* check segment usage, and check boundary of a given segment number */
+	f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
+					|| segno > TOTAL_SEGS(sbi) - 1);
+}
 
 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
 						unsigned int start)
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
new file mode 100644
index 0000000..da0d8e0
--- /dev/null
+++ b/fs/f2fs/shrinker.c
@@ -0,0 +1,139 @@
+/*
+ * f2fs shrinker support
+ *   the basic infra was copied from fs/ubifs/shrinker.c
+ *
+ * Copyright (c) 2015 Motorola Mobility
+ * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/fs.h>
+#include <linux/f2fs_fs.h>
+
+#include "f2fs.h"
+
+static LIST_HEAD(f2fs_list);
+static DEFINE_SPINLOCK(f2fs_list_lock);
+static unsigned int shrinker_run_no;
+
+static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
+{
+	return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
+}
+
+static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
+{
+	if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
+		return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
+	return 0;
+}
+
+static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
+{
+	return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
+}
+
+unsigned long f2fs_shrink_count(struct shrinker *shrink,
+				struct shrink_control *sc)
+{
+	struct f2fs_sb_info *sbi;
+	struct list_head *p;
+	unsigned long count = 0;
+
+	spin_lock(&f2fs_list_lock);
+	p = f2fs_list.next;
+	while (p != &f2fs_list) {
+		sbi = list_entry(p, struct f2fs_sb_info, s_list);
+
+		/* stop f2fs_put_super */
+		if (!mutex_trylock(&sbi->umount_mutex)) {
+			p = p->next;
+			continue;
+		}
+		spin_unlock(&f2fs_list_lock);
+
+		/* count extent cache entries */
+		count += __count_extent_cache(sbi);
+
+		/* shrink clean nat cache entries */
+		count += __count_nat_entries(sbi);
+
+		/* count free nids cache entries */
+		count += __count_free_nids(sbi);
+
+		spin_lock(&f2fs_list_lock);
+		p = p->next;
+		mutex_unlock(&sbi->umount_mutex);
+	}
+	spin_unlock(&f2fs_list_lock);
+	return count;
+}
+
+unsigned long f2fs_shrink_scan(struct shrinker *shrink,
+				struct shrink_control *sc)
+{
+	unsigned long nr = sc->nr_to_scan;
+	struct f2fs_sb_info *sbi;
+	struct list_head *p;
+	unsigned int run_no;
+	unsigned long freed = 0;
+
+	spin_lock(&f2fs_list_lock);
+	do {
+		run_no = ++shrinker_run_no;
+	} while (run_no == 0);
+	p = f2fs_list.next;
+	while (p != &f2fs_list) {
+		sbi = list_entry(p, struct f2fs_sb_info, s_list);
+
+		if (sbi->shrinker_run_no == run_no)
+			break;
+
+		/* stop f2fs_put_super */
+		if (!mutex_trylock(&sbi->umount_mutex)) {
+			p = p->next;
+			continue;
+		}
+		spin_unlock(&f2fs_list_lock);
+
+		sbi->shrinker_run_no = run_no;
+
+		/* shrink extent cache entries */
+		freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
+
+		/* shrink clean nat cache entries */
+		if (freed < nr)
+			freed += try_to_free_nats(sbi, nr - freed);
+
+		/* shrink free nids cache entries */
+		if (freed < nr)
+			freed += try_to_free_nids(sbi, nr - freed);
+
+		spin_lock(&f2fs_list_lock);
+		p = p->next;
+		list_move_tail(&sbi->s_list, &f2fs_list);
+		mutex_unlock(&sbi->umount_mutex);
+		if (freed >= nr)
+			break;
+	}
+	spin_unlock(&f2fs_list_lock);
+	return freed;
+}
+
+void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
+{
+	spin_lock(&f2fs_list_lock);
+	list_add_tail(&sbi->s_list, &f2fs_list);
+	spin_unlock(&f2fs_list_lock);
+}
+
+void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
+{
+	f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
+
+	spin_lock(&f2fs_list_lock);
+	list_del(&sbi->s_list);
+	spin_unlock(&f2fs_list_lock);
+}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index a06b0b4..f794781 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -39,6 +39,13 @@
 static struct kmem_cache *f2fs_inode_cachep;
 static struct kset *f2fs_kset;
 
+/* f2fs-wide shrinker description */
+static struct shrinker f2fs_shrinker_info = {
+	.scan_objects = f2fs_shrink_scan,
+	.count_objects = f2fs_shrink_count,
+	.seeks = DEFAULT_SEEKS,
+};
+
 enum {
 	Opt_gc_background,
 	Opt_disable_roll_forward,
@@ -58,6 +65,7 @@
 	Opt_nobarrier,
 	Opt_fastboot,
 	Opt_extent_cache,
+	Opt_noextent_cache,
 	Opt_noinline_data,
 	Opt_err,
 };
@@ -81,6 +89,7 @@
 	{Opt_nobarrier, "nobarrier"},
 	{Opt_fastboot, "fastboot"},
 	{Opt_extent_cache, "extent_cache"},
+	{Opt_noextent_cache, "noextent_cache"},
 	{Opt_noinline_data, "noinline_data"},
 	{Opt_err, NULL},
 };
@@ -382,6 +391,9 @@
 		case Opt_extent_cache:
 			set_opt(sbi, EXTENT_CACHE);
 			break;
+		case Opt_noextent_cache:
+			clear_opt(sbi, EXTENT_CACHE);
+			break;
 		case Opt_noinline_data:
 			clear_opt(sbi, INLINE_DATA);
 			break;
@@ -410,9 +422,7 @@
 	atomic_set(&fi->dirty_pages, 0);
 	fi->i_current_depth = 1;
 	fi->i_advise = 0;
-	rwlock_init(&fi->ext_lock);
 	init_rwsem(&fi->i_sem);
-	INIT_RADIX_TREE(&fi->inmem_root, GFP_NOFS);
 	INIT_LIST_HEAD(&fi->inmem_pages);
 	mutex_init(&fi->inmem_lock);
 
@@ -441,17 +451,22 @@
 	 */
 	if (!inode_unhashed(inode) && inode->i_state & I_SYNC) {
 		if (!inode->i_nlink && !is_bad_inode(inode)) {
+			/* to avoid evict_inode call simultaneously */
+			atomic_inc(&inode->i_count);
 			spin_unlock(&inode->i_lock);
 
 			/* some remained atomic pages should discarded */
 			if (f2fs_is_atomic_file(inode))
 				commit_inmem_pages(inode, true);
 
+			/* should remain fi->extent_tree for writepage */
+			f2fs_destroy_extent_node(inode);
+
 			sb_start_intwrite(inode->i_sb);
 			i_size_write(inode, 0);
 
 			if (F2FS_HAS_BLOCKS(inode))
-				f2fs_truncate(inode);
+				f2fs_truncate(inode, true);
 
 			sb_end_intwrite(inode->i_sb);
 
@@ -461,6 +476,7 @@
 					F2FS_I(inode)->i_crypt_info);
 #endif
 			spin_lock(&inode->i_lock);
+			atomic_dec(&inode->i_count);
 		}
 		return 0;
 	}
@@ -498,9 +514,11 @@
 	}
 	kobject_del(&sbi->s_kobj);
 
-	f2fs_destroy_stats(sbi);
 	stop_gc_thread(sbi);
 
+	/* prevent remaining shrinker jobs */
+	mutex_lock(&sbi->umount_mutex);
+
 	/*
 	 * We don't need to do checkpoint when superblock is clean.
 	 * But, the previous checkpoint was not done by umount, it needs to do
@@ -514,6 +532,9 @@
 		write_checkpoint(sbi, &cpc);
 	}
 
+	/* write_checkpoint can update stat informaion */
+	f2fs_destroy_stats(sbi);
+
 	/*
 	 * normally superblock is clean, so we need to release this.
 	 * In addition, EIO will skip do checkpoint, we need this as well.
@@ -521,6 +542,9 @@
 	release_dirty_inode(sbi);
 	release_discard_addrs(sbi);
 
+	f2fs_leave_shrinker(sbi);
+	mutex_unlock(&sbi->umount_mutex);
+
 	iput(sbi->node_inode);
 	iput(sbi->meta_inode);
 
@@ -647,6 +671,8 @@
 		seq_puts(seq, ",fastboot");
 	if (test_opt(sbi, EXTENT_CACHE))
 		seq_puts(seq, ",extent_cache");
+	else
+		seq_puts(seq, ",noextent_cache");
 	seq_printf(seq, ",active_logs=%u", sbi->active_logs);
 
 	return 0;
@@ -667,7 +693,7 @@
 		struct seg_entry *se = get_seg_entry(sbi, i);
 
 		if ((i % 10) == 0)
-			seq_printf(seq, "%-5d", i);
+			seq_printf(seq, "%-10d", i);
 		seq_printf(seq, "%d|%-3u", se->type,
 					get_valid_blocks(sbi, i, 1));
 		if ((i % 10) == 9 || i == (total_segs - 1))
@@ -699,6 +725,7 @@
 
 	set_opt(sbi, BG_GC);
 	set_opt(sbi, INLINE_DATA);
+	set_opt(sbi, EXTENT_CACHE);
 
 #ifdef CONFIG_F2FS_FS_XATTR
 	set_opt(sbi, XATTR_USER);
@@ -970,6 +997,9 @@
 
 	sbi->dir_level = DEF_DIR_LEVEL;
 	clear_sbi_flag(sbi, SBI_NEED_FSCK);
+
+	INIT_LIST_HEAD(&sbi->s_list);
+	mutex_init(&sbi->umount_mutex);
 }
 
 /*
@@ -1135,7 +1165,9 @@
 	mutex_init(&sbi->writepages);
 	mutex_init(&sbi->cp_mutex);
 	init_rwsem(&sbi->node_write);
-	clear_sbi_flag(sbi, SBI_POR_DOING);
+
+	/* disallow all the data/node/meta page writes */
+	set_sbi_flag(sbi, SBI_POR_DOING);
 	spin_lock_init(&sbi->stat_lock);
 
 	init_rwsem(&sbi->read_io.io_rwsem);
@@ -1212,8 +1244,12 @@
 		goto free_nm;
 	}
 
+	f2fs_join_shrinker(sbi);
+
 	/* if there are nt orphan nodes free them */
-	recover_orphan_inodes(sbi);
+	err = recover_orphan_inodes(sbi);
+	if (err)
+		goto free_node_inode;
 
 	/* read root inode and dentry */
 	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
@@ -1275,6 +1311,8 @@
 			goto free_kobj;
 		}
 	}
+	/* recover_fsync_data() cleared this already */
+	clear_sbi_flag(sbi, SBI_POR_DOING);
 
 	/*
 	 * If filesystem is not mounted as read-only then
@@ -1308,7 +1346,10 @@
 	dput(sb->s_root);
 	sb->s_root = NULL;
 free_node_inode:
+	mutex_lock(&sbi->umount_mutex);
+	f2fs_leave_shrinker(sbi);
 	iput(sbi->node_inode);
+	mutex_unlock(&sbi->umount_mutex);
 free_nm:
 	destroy_node_manager(sbi);
 free_sm:
@@ -1404,13 +1445,20 @@
 	err = f2fs_init_crypto();
 	if (err)
 		goto free_kset;
-	err = register_filesystem(&f2fs_fs_type);
+
+	err = register_shrinker(&f2fs_shrinker_info);
 	if (err)
 		goto free_crypto;
+
+	err = register_filesystem(&f2fs_fs_type);
+	if (err)
+		goto free_shrinker;
 	f2fs_create_root_stats();
 	f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
 	return 0;
 
+free_shrinker:
+	unregister_shrinker(&f2fs_shrinker_info);
 free_crypto:
 	f2fs_exit_crypto();
 free_kset:
@@ -1433,6 +1481,7 @@
 {
 	remove_proc_entry("fs/f2fs", NULL);
 	f2fs_destroy_root_stats();
+	unregister_shrinker(&f2fs_shrinker_info);
 	unregister_filesystem(&f2fs_fs_type);
 	f2fs_exit_crypto();
 	destroy_extent_cache();
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 07449b98..4de2286 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -499,9 +499,12 @@
 
 	len = strlen(name);
 
-	if (len > F2FS_NAME_LEN || size > MAX_VALUE_LEN(inode))
+	if (len > F2FS_NAME_LEN)
 		return -ERANGE;
 
+	if (size > MAX_VALUE_LEN(inode))
+		return -E2BIG;
+
 	base_addr = read_all_xattrs(inode, ipage);
 	if (!base_addr)
 		goto exit;
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 484b32d..1cff72d 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -192,7 +192,7 @@
  *   by @dp in @dip.
  *
  * Returns:
- *   A NULL-pointer on success, else an negative error code encoded
+ *   A NULL-pointer on success, else a negative error code encoded
  *   in the return pointer.
  */
 static struct dentry *
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 518c629..5fa588e 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -844,14 +844,15 @@
 	struct wb_iter iter;
 
 	might_sleep();
-
-	if (!bdi_has_dirty_io(bdi))
-		return;
 restart:
 	rcu_read_lock();
 	bdi_for_each_wb(wb, bdi, &iter, next_blkcg_id) {
-		if (!wb_has_dirty_io(wb) ||
-		    (skip_if_busy && writeback_in_progress(wb)))
+		/* SYNC_ALL writes out I_DIRTY_TIME too */
+		if (!wb_has_dirty_io(wb) &&
+		    (base_work->sync_mode == WB_SYNC_NONE ||
+		     list_empty(&wb->b_dirty_time)))
+			continue;
+		if (skip_if_busy && writeback_in_progress(wb))
 			continue;
 
 		base_work->nr_pages = wb_split_bdi_pages(wb, nr_pages);
@@ -899,8 +900,7 @@
 {
 	might_sleep();
 
-	if (bdi_has_dirty_io(bdi) &&
-	    (!skip_if_busy || !writeback_in_progress(&bdi->wb))) {
+	if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
 		base_work->auto_free = 0;
 		base_work->single_wait = 0;
 		base_work->single_done = 0;
@@ -2275,8 +2275,12 @@
 	};
 	struct backing_dev_info *bdi = sb->s_bdi;
 
-	/* Nothing to do? */
-	if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
+	/*
+	 * Can't skip on !bdi_has_dirty() because we should wait for !dirty
+	 * inodes under writeback and I_DIRTY_TIME inodes ignored by
+	 * bdi_has_dirty() need to be written out too.
+	 */
+	if (bdi == &noop_backing_dev_info)
 		return;
 	WARN_ON(!rwsem_is_locked(&sb->s_umount));
 
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 2c1ae86..92324ac 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -202,22 +202,22 @@
  *
  */
 
-static void gfs2_end_log_write(struct bio *bio, int error)
+static void gfs2_end_log_write(struct bio *bio)
 {
 	struct gfs2_sbd *sdp = bio->bi_private;
 	struct bio_vec *bvec;
 	struct page *page;
 	int i;
 
-	if (error) {
-		sdp->sd_log_error = error;
-		fs_err(sdp, "Error %d writing to log\n", error);
+	if (bio->bi_error) {
+		sdp->sd_log_error = bio->bi_error;
+		fs_err(sdp, "Error %d writing to log\n", bio->bi_error);
 	}
 
 	bio_for_each_segment_all(bvec, bio, i) {
 		page = bvec->bv_page;
 		if (page_has_buffers(page))
-			gfs2_end_log_write_bh(sdp, bvec, error);
+			gfs2_end_log_write_bh(sdp, bvec, bio->bi_error);
 		else
 			mempool_free(page, gfs2_page_pool);
 	}
@@ -261,18 +261,11 @@
 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
 {
 	struct super_block *sb = sdp->sd_vfs;
-	unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev);
 	struct bio *bio;
 
 	BUG_ON(sdp->sd_log_bio);
 
-	while (1) {
-		bio = bio_alloc(GFP_NOIO, nrvecs);
-		if (likely(bio))
-			break;
-		nrvecs = max(nrvecs/2, 1U);
-	}
-
+	bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
 	bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
 	bio->bi_bdev = sb->s_bdev;
 	bio->bi_end_io = gfs2_end_log_write;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 1e3a93f..02586e7 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -171,14 +171,14 @@
 	return -EINVAL;
 }
 
-static void end_bio_io_page(struct bio *bio, int error)
+static void end_bio_io_page(struct bio *bio)
 {
 	struct page *page = bio->bi_private;
 
-	if (!error)
+	if (!bio->bi_error)
 		SetPageUptodate(page);
 	else
-		pr_warn("error %d reading superblock\n", error);
+		pr_warn("error %d reading superblock\n", bio->bi_error);
 	unlock_page(page);
 }
 
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index 8057fe4..f626114 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -10,6 +10,30 @@
 #include <linux/blkdev.h>
 #include "hpfs_fn.h"
 
+secno hpfs_search_hotfix_map(struct super_block *s, secno sec)
+{
+	unsigned i;
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
+		if (sbi->hotfix_from[i] == sec) {
+			return sbi->hotfix_to[i];
+		}
+	}
+	return sec;
+}
+
+unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n)
+{
+	unsigned i;
+	struct hpfs_sb_info *sbi = hpfs_sb(s);
+	for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
+		if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) {
+			n = sbi->hotfix_from[i] - sec;
+		}
+	}
+	return n;
+}
+
 void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n)
 {
 	struct buffer_head *bh;
@@ -18,6 +42,9 @@
 	if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size))
 		return;
 
+	if (unlikely(hpfs_search_hotfix_map_for_range(s, secno, n) != n))
+		return;
+
 	bh = sb_find_get_block(s, secno);
 	if (bh) {
 		if (buffer_uptodate(bh)) {
@@ -51,7 +78,7 @@
 
 	cond_resched();
 
-	*bhp = bh = sb_bread(s, secno);
+	*bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno));
 	if (bh != NULL)
 		return bh->b_data;
 	else {
@@ -71,7 +98,7 @@
 
 	cond_resched();
 
-	if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
+	if ((*bhp = bh = sb_getblk(s, hpfs_search_hotfix_map(s, secno))) != NULL) {
 		if (!buffer_uptodate(bh)) wait_on_buffer(bh);
 		set_buffer_uptodate(bh);
 		return bh->b_data;
@@ -99,10 +126,10 @@
 
 	hpfs_prefetch_sectors(s, secno, 4 + ahead);
 
-	if (!(qbh->bh[0] = sb_bread(s, secno + 0))) goto bail0;
-	if (!(qbh->bh[1] = sb_bread(s, secno + 1))) goto bail1;
-	if (!(qbh->bh[2] = sb_bread(s, secno + 2))) goto bail2;
-	if (!(qbh->bh[3] = sb_bread(s, secno + 3))) goto bail3;
+	if (!hpfs_map_sector(s, secno + 0, &qbh->bh[0], 0)) goto bail0;
+	if (!hpfs_map_sector(s, secno + 1, &qbh->bh[1], 0)) goto bail1;
+	if (!hpfs_map_sector(s, secno + 2, &qbh->bh[2], 0)) goto bail2;
+	if (!hpfs_map_sector(s, secno + 3, &qbh->bh[3], 0)) goto bail3;
 
 	if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
 	    likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 7ca28d6..d3bcdd9 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -83,6 +83,11 @@
 	if (s) {
 		if (bh_result->b_size >> 9 < n_secs)
 			n_secs = bh_result->b_size >> 9;
+		n_secs = hpfs_search_hotfix_map_for_range(inode->i_sb, s, n_secs);
+		if (unlikely(!n_secs)) {
+			s = hpfs_search_hotfix_map(inode->i_sb, s);
+			n_secs = 1;
+		}
 		map_bh(bh_result, inode->i_sb, s);
 		bh_result->b_size = n_secs << 9;
 		goto ret_0;
@@ -101,7 +106,7 @@
 	inode->i_blocks++;
 	hpfs_i(inode)->mmu_private += 512;
 	set_buffer_new(bh_result);
-	map_bh(bh_result, inode->i_sb, s);
+	map_bh(bh_result, inode->i_sb, hpfs_search_hotfix_map(inode->i_sb, s));
 	ret_0:
 	r = 0;
 	ret_r:
@@ -181,7 +186,7 @@
 
 static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block)
 {
-	return generic_block_bmap(mapping,block,hpfs_get_block);
+	return generic_block_bmap(mapping, block, hpfs_get_block);
 }
 
 const struct address_space_operations hpfs_aops = {
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index c4867b5..975654a 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -88,6 +88,10 @@
 	unsigned sb_max_fwd_alloc;	/* max forwad allocation */
 	int sb_timeshift;
 	struct rcu_head rcu;
+
+	unsigned n_hotfixes;
+	secno hotfix_from[256];
+	secno hotfix_to[256];
 };
 
 /* Four 512-byte buffers and the 2k block obtained by concatenating them */
@@ -217,6 +221,8 @@
 
 /* buffer.c */
 
+secno hpfs_search_hotfix_map(struct super_block *s, secno sec);
+unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n);
 void hpfs_prefetch_sectors(struct super_block *, unsigned, int);
 void *hpfs_map_sector(struct super_block *, unsigned, struct buffer_head **, int);
 void *hpfs_get_sector(struct super_block *, unsigned, struct buffer_head **);
@@ -285,6 +291,7 @@
 void hpfs_prefetch_bitmap(struct super_block *, unsigned);
 unsigned char *hpfs_load_code_page(struct super_block *, secno);
 __le32 *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
+void hpfs_load_hotfix_map(struct super_block *s, struct hpfs_spare_block *spareblock);
 struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **);
 struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **);
 struct dnode *hpfs_map_dnode(struct super_block *s, dnode_secno, struct quad_buffer_head *);
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index 442770e..a69bbc1 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -130,6 +130,32 @@
 	return b;
 }
 
+void hpfs_load_hotfix_map(struct super_block *s, struct hpfs_spare_block *spareblock)
+{
+	struct quad_buffer_head qbh;
+	u32 *directory;
+	u32 n_hotfixes, n_used_hotfixes;
+	unsigned i;
+
+	n_hotfixes = le32_to_cpu(spareblock->n_spares);
+	n_used_hotfixes = le32_to_cpu(spareblock->n_spares_used);
+
+	if (n_hotfixes > 256 || n_used_hotfixes > n_hotfixes) {
+		hpfs_error(s, "invalid number of hotfixes: %u, used: %u", n_hotfixes, n_used_hotfixes);
+		return;
+	}
+	if (!(directory = hpfs_map_4sectors(s, le32_to_cpu(spareblock->hotfix_map), &qbh, 0))) {
+		hpfs_error(s, "can't load hotfix map");
+		return;
+	}
+	for (i = 0; i < n_used_hotfixes; i++) {
+		hpfs_sb(s)->hotfix_from[i] = le32_to_cpu(directory[i]);
+		hpfs_sb(s)->hotfix_to[i] = le32_to_cpu(directory[n_hotfixes + i]);
+	}
+	hpfs_sb(s)->n_hotfixes = n_used_hotfixes;
+	hpfs_brelse4(&qbh);
+}
+
 /*
  * Load fnode to memory
  */
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index a0872f2..9e92c9c 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -8,6 +8,17 @@
 #include <linux/sched.h>
 #include "hpfs_fn.h"
 
+static void hpfs_update_directory_times(struct inode *dir)
+{
+	time_t t = get_seconds();
+	if (t == dir->i_mtime.tv_sec &&
+	    t == dir->i_ctime.tv_sec)
+		return;
+	dir->i_mtime.tv_sec = dir->i_ctime.tv_sec = t;
+	dir->i_mtime.tv_nsec = dir->i_ctime.tv_nsec = 0;
+	hpfs_write_inode_nolock(dir);
+}
+
 static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
 	const unsigned char *name = dentry->d_name.name;
@@ -99,6 +110,7 @@
 		result->i_mode = mode | S_IFDIR;
 		hpfs_write_inode_nolock(result);
 	}
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	hpfs_unlock(dir->i_sb);
 	return 0;
@@ -187,6 +199,7 @@
 		result->i_mode = mode | S_IFREG;
 		hpfs_write_inode_nolock(result);
 	}
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	hpfs_unlock(dir->i_sb);
 	return 0;
@@ -262,6 +275,7 @@
 	insert_inode_hash(result);
 
 	hpfs_write_inode_nolock(result);
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	brelse(bh);
 	hpfs_unlock(dir->i_sb);
@@ -340,6 +354,7 @@
 	insert_inode_hash(result);
 
 	hpfs_write_inode_nolock(result);
+	hpfs_update_directory_times(dir);
 	d_instantiate(dentry, result);
 	hpfs_unlock(dir->i_sb);
 	return 0;
@@ -423,6 +438,8 @@
 out1:
 	hpfs_brelse4(&qbh);
 out:
+	if (!err)
+		hpfs_update_directory_times(dir);
 	hpfs_unlock(dir->i_sb);
 	return err;
 }
@@ -477,6 +494,8 @@
 out1:
 	hpfs_brelse4(&qbh);
 out:
+	if (!err)
+		hpfs_update_directory_times(dir);
 	hpfs_unlock(dir->i_sb);
 	return err;
 }
@@ -595,7 +614,7 @@
 		goto end1;
 	}
 
-	end:
+end:
 	hpfs_i(i)->i_parent_dir = new_dir->i_ino;
 	if (S_ISDIR(i->i_mode)) {
 		inc_nlink(new_dir);
@@ -610,6 +629,10 @@
 		brelse(bh);
 	}
 end1:
+	if (!err) {
+		hpfs_update_directory_times(old_dir);
+		hpfs_update_directory_times(new_dir);
+	}
 	hpfs_unlock(i->i_sb);
 	return err;
 }
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 68a9bed..a561591 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -628,6 +628,9 @@
 		goto bail4;
 	}
 
+	if (spareblock->n_spares_used)
+		hpfs_load_hotfix_map(s, spareblock);
+
 	/* Load bitmap directory */
 	if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps))))
 		goto bail4;
@@ -647,18 +650,6 @@
 		mark_buffer_dirty(bh2);
 	}
 
-	if (spareblock->hotfixes_used || spareblock->n_spares_used) {
-		if (errs >= 2) {
-			pr_err("Hotfixes not supported here, try chkdsk\n");
-			mark_dirty(s, 0);
-			goto bail4;
-		}
-		hpfs_error(s, "hotfixes not supported here, try chkdsk");
-		if (errs == 0)
-			pr_err("Proceeding, but your filesystem will be probably corrupted by this driver...\n");
-		else
-			pr_err("This driver may read bad files or crash when operating on disk with hotfixes.\n");
-	}
 	if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) {
 		if (errs >= 2) {
 			pr_err("Spare dnodes used, try chkdsk\n");
diff --git a/fs/jbd/Kconfig b/fs/jbd/Kconfig
deleted file mode 100644
index 4e28bee..0000000
--- a/fs/jbd/Kconfig
+++ /dev/null
@@ -1,30 +0,0 @@
-config JBD
-	tristate
-	help
-	  This is a generic journalling layer for block devices.  It is
-	  currently used by the ext3 file system, but it could also be
-	  used to add journal support to other file systems or block
-	  devices such as RAID or LVM.
-
-	  If you are using the ext3 file system, you need to say Y here.
-	  If you are not using ext3 then you will probably want to say N.
-
-	  To compile this device as a module, choose M here: the module will be
-	  called jbd.  If you are compiling ext3 into the kernel, you
-	  cannot compile this code as a module.
-
-config JBD_DEBUG
-	bool "JBD (ext3) debugging support"
-	depends on JBD && DEBUG_FS
-	help
-	  If you are using the ext3 journaled file system (or potentially any
-	  other file system/device using JBD), this option allows you to
-	  enable debugging output while the system is running, in order to
-	  help track down any problems you are having.  By default the
-	  debugging output will be turned off.
-
-	  If you select Y here, then you will be able to turn on debugging
-	  with "echo N > /sys/kernel/debug/jbd/jbd-debug", where N is a
-	  number between 1 and 5, the higher the number, the more debugging
-	  output is generated.  To turn debugging off again, do
-	  "echo 0 > /sys/kernel/debug/jbd/jbd-debug".
diff --git a/fs/jbd/Makefile b/fs/jbd/Makefile
deleted file mode 100644
index 54aca48..0000000
--- a/fs/jbd/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the linux journaling routines.
-#
-
-obj-$(CONFIG_JBD) += jbd.o
-
-jbd-objs := transaction.o commit.o recovery.o checkpoint.o revoke.o journal.o
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
deleted file mode 100644
index 08c0304..0000000
--- a/fs/jbd/checkpoint.c
+++ /dev/null
@@ -1,782 +0,0 @@
-/*
- * linux/fs/jbd/checkpoint.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
- *
- * Copyright 1999 Red Hat Software --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Checkpoint routines for the generic filesystem journaling code.
- * Part of the ext2fs journaling system.
- *
- * Checkpointing is the process of ensuring that a section of the log is
- * committed fully to disk, so that that portion of the log can be
- * reused.
- */
-
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <trace/events/jbd.h>
-
-/*
- * Unlink a buffer from a transaction checkpoint list.
- *
- * Called with j_list_lock held.
- */
-static inline void __buffer_unlink_first(struct journal_head *jh)
-{
-	transaction_t *transaction = jh->b_cp_transaction;
-
-	jh->b_cpnext->b_cpprev = jh->b_cpprev;
-	jh->b_cpprev->b_cpnext = jh->b_cpnext;
-	if (transaction->t_checkpoint_list == jh) {
-		transaction->t_checkpoint_list = jh->b_cpnext;
-		if (transaction->t_checkpoint_list == jh)
-			transaction->t_checkpoint_list = NULL;
-	}
-}
-
-/*
- * Unlink a buffer from a transaction checkpoint(io) list.
- *
- * Called with j_list_lock held.
- */
-static inline void __buffer_unlink(struct journal_head *jh)
-{
-	transaction_t *transaction = jh->b_cp_transaction;
-
-	__buffer_unlink_first(jh);
-	if (transaction->t_checkpoint_io_list == jh) {
-		transaction->t_checkpoint_io_list = jh->b_cpnext;
-		if (transaction->t_checkpoint_io_list == jh)
-			transaction->t_checkpoint_io_list = NULL;
-	}
-}
-
-/*
- * Move a buffer from the checkpoint list to the checkpoint io list
- *
- * Called with j_list_lock held
- */
-static inline void __buffer_relink_io(struct journal_head *jh)
-{
-	transaction_t *transaction = jh->b_cp_transaction;
-
-	__buffer_unlink_first(jh);
-
-	if (!transaction->t_checkpoint_io_list) {
-		jh->b_cpnext = jh->b_cpprev = jh;
-	} else {
-		jh->b_cpnext = transaction->t_checkpoint_io_list;
-		jh->b_cpprev = transaction->t_checkpoint_io_list->b_cpprev;
-		jh->b_cpprev->b_cpnext = jh;
-		jh->b_cpnext->b_cpprev = jh;
-	}
-	transaction->t_checkpoint_io_list = jh;
-}
-
-/*
- * Try to release a checkpointed buffer from its transaction.
- * Returns 1 if we released it and 2 if we also released the
- * whole transaction.
- *
- * Requires j_list_lock
- * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
- */
-static int __try_to_free_cp_buf(struct journal_head *jh)
-{
-	int ret = 0;
-	struct buffer_head *bh = jh2bh(jh);
-
-	if (jh->b_jlist == BJ_None && !buffer_locked(bh) &&
-	    !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
-		/*
-		 * Get our reference so that bh cannot be freed before
-		 * we unlock it
-		 */
-		get_bh(bh);
-		JBUFFER_TRACE(jh, "remove from checkpoint list");
-		ret = __journal_remove_checkpoint(jh) + 1;
-		jbd_unlock_bh_state(bh);
-		BUFFER_TRACE(bh, "release");
-		__brelse(bh);
-	} else {
-		jbd_unlock_bh_state(bh);
-	}
-	return ret;
-}
-
-/*
- * __log_wait_for_space: wait until there is space in the journal.
- *
- * Called under j-state_lock *only*.  It will be unlocked if we have to wait
- * for a checkpoint to free up some space in the log.
- */
-void __log_wait_for_space(journal_t *journal)
-{
-	int nblocks, space_left;
-	assert_spin_locked(&journal->j_state_lock);
-
-	nblocks = jbd_space_needed(journal);
-	while (__log_space_left(journal) < nblocks) {
-		if (journal->j_flags & JFS_ABORT)
-			return;
-		spin_unlock(&journal->j_state_lock);
-		mutex_lock(&journal->j_checkpoint_mutex);
-
-		/*
-		 * Test again, another process may have checkpointed while we
-		 * were waiting for the checkpoint lock. If there are no
-		 * transactions ready to be checkpointed, try to recover
-		 * journal space by calling cleanup_journal_tail(), and if
-		 * that doesn't work, by waiting for the currently committing
-		 * transaction to complete.  If there is absolutely no way
-		 * to make progress, this is either a BUG or corrupted
-		 * filesystem, so abort the journal and leave a stack
-		 * trace for forensic evidence.
-		 */
-		spin_lock(&journal->j_state_lock);
-		spin_lock(&journal->j_list_lock);
-		nblocks = jbd_space_needed(journal);
-		space_left = __log_space_left(journal);
-		if (space_left < nblocks) {
-			int chkpt = journal->j_checkpoint_transactions != NULL;
-			tid_t tid = 0;
-
-			if (journal->j_committing_transaction)
-				tid = journal->j_committing_transaction->t_tid;
-			spin_unlock(&journal->j_list_lock);
-			spin_unlock(&journal->j_state_lock);
-			if (chkpt) {
-				log_do_checkpoint(journal);
-			} else if (cleanup_journal_tail(journal) == 0) {
-				/* We were able to recover space; yay! */
-				;
-			} else if (tid) {
-				log_wait_commit(journal, tid);
-			} else {
-				printk(KERN_ERR "%s: needed %d blocks and "
-				       "only had %d space available\n",
-				       __func__, nblocks, space_left);
-				printk(KERN_ERR "%s: no way to get more "
-				       "journal space\n", __func__);
-				WARN_ON(1);
-				journal_abort(journal, 0);
-			}
-			spin_lock(&journal->j_state_lock);
-		} else {
-			spin_unlock(&journal->j_list_lock);
-		}
-		mutex_unlock(&journal->j_checkpoint_mutex);
-	}
-}
-
-/*
- * We were unable to perform jbd_trylock_bh_state() inside j_list_lock.
- * The caller must restart a list walk.  Wait for someone else to run
- * jbd_unlock_bh_state().
- */
-static void jbd_sync_bh(journal_t *journal, struct buffer_head *bh)
-	__releases(journal->j_list_lock)
-{
-	get_bh(bh);
-	spin_unlock(&journal->j_list_lock);
-	jbd_lock_bh_state(bh);
-	jbd_unlock_bh_state(bh);
-	put_bh(bh);
-}
-
-/*
- * Clean up transaction's list of buffers submitted for io.
- * We wait for any pending IO to complete and remove any clean
- * buffers. Note that we take the buffers in the opposite ordering
- * from the one in which they were submitted for IO.
- *
- * Return 0 on success, and return <0 if some buffers have failed
- * to be written out.
- *
- * Called with j_list_lock held.
- */
-static int __wait_cp_io(journal_t *journal, transaction_t *transaction)
-{
-	struct journal_head *jh;
-	struct buffer_head *bh;
-	tid_t this_tid;
-	int released = 0;
-	int ret = 0;
-
-	this_tid = transaction->t_tid;
-restart:
-	/* Did somebody clean up the transaction in the meanwhile? */
-	if (journal->j_checkpoint_transactions != transaction ||
-			transaction->t_tid != this_tid)
-		return ret;
-	while (!released && transaction->t_checkpoint_io_list) {
-		jh = transaction->t_checkpoint_io_list;
-		bh = jh2bh(jh);
-		if (!jbd_trylock_bh_state(bh)) {
-			jbd_sync_bh(journal, bh);
-			spin_lock(&journal->j_list_lock);
-			goto restart;
-		}
-		get_bh(bh);
-		if (buffer_locked(bh)) {
-			spin_unlock(&journal->j_list_lock);
-			jbd_unlock_bh_state(bh);
-			wait_on_buffer(bh);
-			/* the journal_head may have gone by now */
-			BUFFER_TRACE(bh, "brelse");
-			__brelse(bh);
-			spin_lock(&journal->j_list_lock);
-			goto restart;
-		}
-		if (unlikely(buffer_write_io_error(bh)))
-			ret = -EIO;
-
-		/*
-		 * Now in whatever state the buffer currently is, we know that
-		 * it has been written out and so we can drop it from the list
-		 */
-		released = __journal_remove_checkpoint(jh);
-		jbd_unlock_bh_state(bh);
-		__brelse(bh);
-	}
-
-	return ret;
-}
-
-#define NR_BATCH	64
-
-static void
-__flush_batch(journal_t *journal, struct buffer_head **bhs, int *batch_count)
-{
-	int i;
-	struct blk_plug plug;
-
-	blk_start_plug(&plug);
-	for (i = 0; i < *batch_count; i++)
-		write_dirty_buffer(bhs[i], WRITE_SYNC);
-	blk_finish_plug(&plug);
-
-	for (i = 0; i < *batch_count; i++) {
-		struct buffer_head *bh = bhs[i];
-		clear_buffer_jwrite(bh);
-		BUFFER_TRACE(bh, "brelse");
-		__brelse(bh);
-	}
-	*batch_count = 0;
-}
-
-/*
- * Try to flush one buffer from the checkpoint list to disk.
- *
- * Return 1 if something happened which requires us to abort the current
- * scan of the checkpoint list.  Return <0 if the buffer has failed to
- * be written out.
- *
- * Called with j_list_lock held and drops it if 1 is returned
- * Called under jbd_lock_bh_state(jh2bh(jh)), and drops it
- */
-static int __process_buffer(journal_t *journal, struct journal_head *jh,
-			struct buffer_head **bhs, int *batch_count)
-{
-	struct buffer_head *bh = jh2bh(jh);
-	int ret = 0;
-
-	if (buffer_locked(bh)) {
-		get_bh(bh);
-		spin_unlock(&journal->j_list_lock);
-		jbd_unlock_bh_state(bh);
-		wait_on_buffer(bh);
-		/* the journal_head may have gone by now */
-		BUFFER_TRACE(bh, "brelse");
-		__brelse(bh);
-		ret = 1;
-	} else if (jh->b_transaction != NULL) {
-		transaction_t *t = jh->b_transaction;
-		tid_t tid = t->t_tid;
-
-		spin_unlock(&journal->j_list_lock);
-		jbd_unlock_bh_state(bh);
-		log_start_commit(journal, tid);
-		log_wait_commit(journal, tid);
-		ret = 1;
-	} else if (!buffer_dirty(bh)) {
-		ret = 1;
-		if (unlikely(buffer_write_io_error(bh)))
-			ret = -EIO;
-		get_bh(bh);
-		J_ASSERT_JH(jh, !buffer_jbddirty(bh));
-		BUFFER_TRACE(bh, "remove from checkpoint");
-		__journal_remove_checkpoint(jh);
-		spin_unlock(&journal->j_list_lock);
-		jbd_unlock_bh_state(bh);
-		__brelse(bh);
-	} else {
-		/*
-		 * Important: we are about to write the buffer, and
-		 * possibly block, while still holding the journal lock.
-		 * We cannot afford to let the transaction logic start
-		 * messing around with this buffer before we write it to
-		 * disk, as that would break recoverability.
-		 */
-		BUFFER_TRACE(bh, "queue");
-		get_bh(bh);
-		J_ASSERT_BH(bh, !buffer_jwrite(bh));
-		set_buffer_jwrite(bh);
-		bhs[*batch_count] = bh;
-		__buffer_relink_io(jh);
-		jbd_unlock_bh_state(bh);
-		(*batch_count)++;
-		if (*batch_count == NR_BATCH) {
-			spin_unlock(&journal->j_list_lock);
-			__flush_batch(journal, bhs, batch_count);
-			ret = 1;
-		}
-	}
-	return ret;
-}
-
-/*
- * Perform an actual checkpoint. We take the first transaction on the
- * list of transactions to be checkpointed and send all its buffers
- * to disk. We submit larger chunks of data at once.
- *
- * The journal should be locked before calling this function.
- * Called with j_checkpoint_mutex held.
- */
-int log_do_checkpoint(journal_t *journal)
-{
-	transaction_t *transaction;
-	tid_t this_tid;
-	int result;
-
-	jbd_debug(1, "Start checkpoint\n");
-
-	/*
-	 * First thing: if there are any transactions in the log which
-	 * don't need checkpointing, just eliminate them from the
-	 * journal straight away.
-	 */
-	result = cleanup_journal_tail(journal);
-	trace_jbd_checkpoint(journal, result);
-	jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
-	if (result <= 0)
-		return result;
-
-	/*
-	 * OK, we need to start writing disk blocks.  Take one transaction
-	 * and write it.
-	 */
-	result = 0;
-	spin_lock(&journal->j_list_lock);
-	if (!journal->j_checkpoint_transactions)
-		goto out;
-	transaction = journal->j_checkpoint_transactions;
-	this_tid = transaction->t_tid;
-restart:
-	/*
-	 * If someone cleaned up this transaction while we slept, we're
-	 * done (maybe it's a new transaction, but it fell at the same
-	 * address).
-	 */
-	if (journal->j_checkpoint_transactions == transaction &&
-			transaction->t_tid == this_tid) {
-		int batch_count = 0;
-		struct buffer_head *bhs[NR_BATCH];
-		struct journal_head *jh;
-		int retry = 0, err;
-
-		while (!retry && transaction->t_checkpoint_list) {
-			struct buffer_head *bh;
-
-			jh = transaction->t_checkpoint_list;
-			bh = jh2bh(jh);
-			if (!jbd_trylock_bh_state(bh)) {
-				jbd_sync_bh(journal, bh);
-				retry = 1;
-				break;
-			}
-			retry = __process_buffer(journal, jh, bhs,&batch_count);
-			if (retry < 0 && !result)
-				result = retry;
-			if (!retry && (need_resched() ||
-				spin_needbreak(&journal->j_list_lock))) {
-				spin_unlock(&journal->j_list_lock);
-				retry = 1;
-				break;
-			}
-		}
-
-		if (batch_count) {
-			if (!retry) {
-				spin_unlock(&journal->j_list_lock);
-				retry = 1;
-			}
-			__flush_batch(journal, bhs, &batch_count);
-		}
-
-		if (retry) {
-			spin_lock(&journal->j_list_lock);
-			goto restart;
-		}
-		/*
-		 * Now we have cleaned up the first transaction's checkpoint
-		 * list. Let's clean up the second one
-		 */
-		err = __wait_cp_io(journal, transaction);
-		if (!result)
-			result = err;
-	}
-out:
-	spin_unlock(&journal->j_list_lock);
-	if (result < 0)
-		journal_abort(journal, result);
-	else
-		result = cleanup_journal_tail(journal);
-
-	return (result < 0) ? result : 0;
-}
-
-/*
- * Check the list of checkpoint transactions for the journal to see if
- * we have already got rid of any since the last update of the log tail
- * in the journal superblock.  If so, we can instantly roll the
- * superblock forward to remove those transactions from the log.
- *
- * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
- *
- * This is the only part of the journaling code which really needs to be
- * aware of transaction aborts.  Checkpointing involves writing to the
- * main filesystem area rather than to the journal, so it can proceed
- * even in abort state, but we must not update the super block if
- * checkpointing may have failed.  Otherwise, we would lose some metadata
- * buffers which should be written-back to the filesystem.
- */
-
-int cleanup_journal_tail(journal_t *journal)
-{
-	transaction_t * transaction;
-	tid_t		first_tid;
-	unsigned int	blocknr, freed;
-
-	if (is_journal_aborted(journal))
-		return 1;
-
-	/*
-	 * OK, work out the oldest transaction remaining in the log, and
-	 * the log block it starts at.
-	 *
-	 * If the log is now empty, we need to work out which is the
-	 * next transaction ID we will write, and where it will
-	 * start.
-	 */
-	spin_lock(&journal->j_state_lock);
-	spin_lock(&journal->j_list_lock);
-	transaction = journal->j_checkpoint_transactions;
-	if (transaction) {
-		first_tid = transaction->t_tid;
-		blocknr = transaction->t_log_start;
-	} else if ((transaction = journal->j_committing_transaction) != NULL) {
-		first_tid = transaction->t_tid;
-		blocknr = transaction->t_log_start;
-	} else if ((transaction = journal->j_running_transaction) != NULL) {
-		first_tid = transaction->t_tid;
-		blocknr = journal->j_head;
-	} else {
-		first_tid = journal->j_transaction_sequence;
-		blocknr = journal->j_head;
-	}
-	spin_unlock(&journal->j_list_lock);
-	J_ASSERT(blocknr != 0);
-
-	/* If the oldest pinned transaction is at the tail of the log
-           already then there's not much we can do right now. */
-	if (journal->j_tail_sequence == first_tid) {
-		spin_unlock(&journal->j_state_lock);
-		return 1;
-	}
-	spin_unlock(&journal->j_state_lock);
-
-	/*
-	 * We need to make sure that any blocks that were recently written out
-	 * --- perhaps by log_do_checkpoint() --- are flushed out before we
-	 * drop the transactions from the journal. Similarly we need to be sure
-	 * superblock makes it to disk before next transaction starts reusing
-	 * freed space (otherwise we could replay some blocks of the new
-	 * transaction thinking they belong to the old one). So we use
-	 * WRITE_FLUSH_FUA. It's unlikely this will be necessary, especially
-	 * with an appropriately sized journal, but we need this to guarantee
-	 * correctness.  Fortunately cleanup_journal_tail() doesn't get called
-	 * all that often.
-	 */
-	journal_update_sb_log_tail(journal, first_tid, blocknr,
-				   WRITE_FLUSH_FUA);
-
-	spin_lock(&journal->j_state_lock);
-	/* OK, update the superblock to recover the freed space.
-	 * Physical blocks come first: have we wrapped beyond the end of
-	 * the log?  */
-	freed = blocknr - journal->j_tail;
-	if (blocknr < journal->j_tail)
-		freed = freed + journal->j_last - journal->j_first;
-
-	trace_jbd_cleanup_journal_tail(journal, first_tid, blocknr, freed);
-	jbd_debug(1,
-		  "Cleaning journal tail from %d to %d (offset %u), "
-		  "freeing %u\n",
-		  journal->j_tail_sequence, first_tid, blocknr, freed);
-
-	journal->j_free += freed;
-	journal->j_tail_sequence = first_tid;
-	journal->j_tail = blocknr;
-	spin_unlock(&journal->j_state_lock);
-	return 0;
-}
-
-
-/* Checkpoint list management */
-
-/*
- * journal_clean_one_cp_list
- *
- * Find all the written-back checkpoint buffers in the given list and release
- * them.
- *
- * Called with j_list_lock held.
- * Returns number of buffers reaped (for debug)
- */
-
-static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
-{
-	struct journal_head *last_jh;
-	struct journal_head *next_jh = jh;
-	int ret, freed = 0;
-
-	*released = 0;
-	if (!jh)
-		return 0;
-
-	last_jh = jh->b_cpprev;
-	do {
-		jh = next_jh;
-		next_jh = jh->b_cpnext;
-		/* Use trylock because of the ranking */
-		if (jbd_trylock_bh_state(jh2bh(jh))) {
-			ret = __try_to_free_cp_buf(jh);
-			if (ret) {
-				freed++;
-				if (ret == 2) {
-					*released = 1;
-					return freed;
-				}
-			}
-		}
-		/*
-		 * This function only frees up some memory
-		 * if possible so we dont have an obligation
-		 * to finish processing. Bail out if preemption
-		 * requested:
-		 */
-		if (need_resched())
-			return freed;
-	} while (jh != last_jh);
-
-	return freed;
-}
-
-/*
- * journal_clean_checkpoint_list
- *
- * Find all the written-back checkpoint buffers in the journal and release them.
- *
- * Called with the journal locked.
- * Called with j_list_lock held.
- * Returns number of buffers reaped (for debug)
- */
-
-int __journal_clean_checkpoint_list(journal_t *journal)
-{
-	transaction_t *transaction, *last_transaction, *next_transaction;
-	int ret = 0;
-	int released;
-
-	transaction = journal->j_checkpoint_transactions;
-	if (!transaction)
-		goto out;
-
-	last_transaction = transaction->t_cpprev;
-	next_transaction = transaction;
-	do {
-		transaction = next_transaction;
-		next_transaction = transaction->t_cpnext;
-		ret += journal_clean_one_cp_list(transaction->
-				t_checkpoint_list, &released);
-		/*
-		 * This function only frees up some memory if possible so we
-		 * dont have an obligation to finish processing. Bail out if
-		 * preemption requested:
-		 */
-		if (need_resched())
-			goto out;
-		if (released)
-			continue;
-		/*
-		 * It is essential that we are as careful as in the case of
-		 * t_checkpoint_list with removing the buffer from the list as
-		 * we can possibly see not yet submitted buffers on io_list
-		 */
-		ret += journal_clean_one_cp_list(transaction->
-				t_checkpoint_io_list, &released);
-		if (need_resched())
-			goto out;
-	} while (transaction != last_transaction);
-out:
-	return ret;
-}
-
-/*
- * journal_remove_checkpoint: called after a buffer has been committed
- * to disk (either by being write-back flushed to disk, or being
- * committed to the log).
- *
- * We cannot safely clean a transaction out of the log until all of the
- * buffer updates committed in that transaction have safely been stored
- * elsewhere on disk.  To achieve this, all of the buffers in a
- * transaction need to be maintained on the transaction's checkpoint
- * lists until they have been rewritten, at which point this function is
- * called to remove the buffer from the existing transaction's
- * checkpoint lists.
- *
- * The function returns 1 if it frees the transaction, 0 otherwise.
- * The function can free jh and bh.
- *
- * This function is called with j_list_lock held.
- * This function is called with jbd_lock_bh_state(jh2bh(jh))
- */
-
-int __journal_remove_checkpoint(struct journal_head *jh)
-{
-	transaction_t *transaction;
-	journal_t *journal;
-	int ret = 0;
-
-	JBUFFER_TRACE(jh, "entry");
-
-	if ((transaction = jh->b_cp_transaction) == NULL) {
-		JBUFFER_TRACE(jh, "not on transaction");
-		goto out;
-	}
-	journal = transaction->t_journal;
-
-	JBUFFER_TRACE(jh, "removing from transaction");
-	__buffer_unlink(jh);
-	jh->b_cp_transaction = NULL;
-	journal_put_journal_head(jh);
-
-	if (transaction->t_checkpoint_list != NULL ||
-	    transaction->t_checkpoint_io_list != NULL)
-		goto out;
-
-	/*
-	 * There is one special case to worry about: if we have just pulled the
-	 * buffer off a running or committing transaction's checkpoing list,
-	 * then even if the checkpoint list is empty, the transaction obviously
-	 * cannot be dropped!
-	 *
-	 * The locking here around t_state is a bit sleazy.
-	 * See the comment at the end of journal_commit_transaction().
-	 */
-	if (transaction->t_state != T_FINISHED)
-		goto out;
-
-	/* OK, that was the last buffer for the transaction: we can now
-	   safely remove this transaction from the log */
-
-	__journal_drop_transaction(journal, transaction);
-
-	/* Just in case anybody was waiting for more transactions to be
-           checkpointed... */
-	wake_up(&journal->j_wait_logspace);
-	ret = 1;
-out:
-	return ret;
-}
-
-/*
- * journal_insert_checkpoint: put a committed buffer onto a checkpoint
- * list so that we know when it is safe to clean the transaction out of
- * the log.
- *
- * Called with the journal locked.
- * Called with j_list_lock held.
- */
-void __journal_insert_checkpoint(struct journal_head *jh,
-			       transaction_t *transaction)
-{
-	JBUFFER_TRACE(jh, "entry");
-	J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
-	J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
-
-	/* Get reference for checkpointing transaction */
-	journal_grab_journal_head(jh2bh(jh));
-	jh->b_cp_transaction = transaction;
-
-	if (!transaction->t_checkpoint_list) {
-		jh->b_cpnext = jh->b_cpprev = jh;
-	} else {
-		jh->b_cpnext = transaction->t_checkpoint_list;
-		jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev;
-		jh->b_cpprev->b_cpnext = jh;
-		jh->b_cpnext->b_cpprev = jh;
-	}
-	transaction->t_checkpoint_list = jh;
-}
-
-/*
- * We've finished with this transaction structure: adios...
- *
- * The transaction must have no links except for the checkpoint by this
- * point.
- *
- * Called with the journal locked.
- * Called with j_list_lock held.
- */
-
-void __journal_drop_transaction(journal_t *journal, transaction_t *transaction)
-{
-	assert_spin_locked(&journal->j_list_lock);
-	if (transaction->t_cpnext) {
-		transaction->t_cpnext->t_cpprev = transaction->t_cpprev;
-		transaction->t_cpprev->t_cpnext = transaction->t_cpnext;
-		if (journal->j_checkpoint_transactions == transaction)
-			journal->j_checkpoint_transactions =
-				transaction->t_cpnext;
-		if (journal->j_checkpoint_transactions == transaction)
-			journal->j_checkpoint_transactions = NULL;
-	}
-
-	J_ASSERT(transaction->t_state == T_FINISHED);
-	J_ASSERT(transaction->t_buffers == NULL);
-	J_ASSERT(transaction->t_sync_datalist == NULL);
-	J_ASSERT(transaction->t_forget == NULL);
-	J_ASSERT(transaction->t_iobuf_list == NULL);
-	J_ASSERT(transaction->t_shadow_list == NULL);
-	J_ASSERT(transaction->t_log_list == NULL);
-	J_ASSERT(transaction->t_checkpoint_list == NULL);
-	J_ASSERT(transaction->t_checkpoint_io_list == NULL);
-	J_ASSERT(transaction->t_updates == 0);
-	J_ASSERT(journal->j_committing_transaction != transaction);
-	J_ASSERT(journal->j_running_transaction != transaction);
-
-	trace_jbd_drop_transaction(journal, transaction);
-	jbd_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);
-	kfree(transaction);
-}
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
deleted file mode 100644
index bb217dc..0000000
--- a/fs/jbd/commit.c
+++ /dev/null
@@ -1,1021 +0,0 @@
-/*
- * linux/fs/jbd/commit.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
- *
- * Copyright 1998 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Journal commit routines for the generic filesystem journaling code;
- * part of the ext2fs journaling system.
- */
-
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/bio.h>
-#include <linux/blkdev.h>
-#include <trace/events/jbd.h>
-
-/*
- * Default IO end handler for temporary BJ_IO buffer_heads.
- */
-static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
-{
-	BUFFER_TRACE(bh, "");
-	if (uptodate)
-		set_buffer_uptodate(bh);
-	else
-		clear_buffer_uptodate(bh);
-	unlock_buffer(bh);
-}
-
-/*
- * When an ext3-ordered file is truncated, it is possible that many pages are
- * not successfully freed, because they are attached to a committing transaction.
- * After the transaction commits, these pages are left on the LRU, with no
- * ->mapping, and with attached buffers.  These pages are trivially reclaimable
- * by the VM, but their apparent absence upsets the VM accounting, and it makes
- * the numbers in /proc/meminfo look odd.
- *
- * So here, we have a buffer which has just come off the forget list.  Look to
- * see if we can strip all buffers from the backing page.
- *
- * Called under journal->j_list_lock.  The caller provided us with a ref
- * against the buffer, and we drop that here.
- */
-static void release_buffer_page(struct buffer_head *bh)
-{
-	struct page *page;
-
-	if (buffer_dirty(bh))
-		goto nope;
-	if (atomic_read(&bh->b_count) != 1)
-		goto nope;
-	page = bh->b_page;
-	if (!page)
-		goto nope;
-	if (page->mapping)
-		goto nope;
-
-	/* OK, it's a truncated page */
-	if (!trylock_page(page))
-		goto nope;
-
-	page_cache_get(page);
-	__brelse(bh);
-	try_to_free_buffers(page);
-	unlock_page(page);
-	page_cache_release(page);
-	return;
-
-nope:
-	__brelse(bh);
-}
-
-/*
- * Decrement reference counter for data buffer. If it has been marked
- * 'BH_Freed', release it and the page to which it belongs if possible.
- */
-static void release_data_buffer(struct buffer_head *bh)
-{
-	if (buffer_freed(bh)) {
-		WARN_ON_ONCE(buffer_dirty(bh));
-		clear_buffer_freed(bh);
-		clear_buffer_mapped(bh);
-		clear_buffer_new(bh);
-		clear_buffer_req(bh);
-		bh->b_bdev = NULL;
-		release_buffer_page(bh);
-	} else
-		put_bh(bh);
-}
-
-/*
- * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
- * held.  For ranking reasons we must trylock.  If we lose, schedule away and
- * return 0.  j_list_lock is dropped in this case.
- */
-static int inverted_lock(journal_t *journal, struct buffer_head *bh)
-{
-	if (!jbd_trylock_bh_state(bh)) {
-		spin_unlock(&journal->j_list_lock);
-		schedule();
-		return 0;
-	}
-	return 1;
-}
-
-/* Done it all: now write the commit record.  We should have
- * cleaned up our previous buffers by now, so if we are in abort
- * mode we can now just skip the rest of the journal write
- * entirely.
- *
- * Returns 1 if the journal needs to be aborted or 0 on success
- */
-static int journal_write_commit_record(journal_t *journal,
-					transaction_t *commit_transaction)
-{
-	struct journal_head *descriptor;
-	struct buffer_head *bh;
-	journal_header_t *header;
-	int ret;
-
-	if (is_journal_aborted(journal))
-		return 0;
-
-	descriptor = journal_get_descriptor_buffer(journal);
-	if (!descriptor)
-		return 1;
-
-	bh = jh2bh(descriptor);
-
-	header = (journal_header_t *)(bh->b_data);
-	header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
-	header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK);
-	header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
-
-	JBUFFER_TRACE(descriptor, "write commit block");
-	set_buffer_dirty(bh);
-
-	if (journal->j_flags & JFS_BARRIER)
-		ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_FLUSH_FUA);
-	else
-		ret = sync_dirty_buffer(bh);
-
-	put_bh(bh);		/* One for getblk() */
-	journal_put_journal_head(descriptor);
-
-	return (ret == -EIO);
-}
-
-static void journal_do_submit_data(struct buffer_head **wbuf, int bufs,
-				   int write_op)
-{
-	int i;
-
-	for (i = 0; i < bufs; i++) {
-		wbuf[i]->b_end_io = end_buffer_write_sync;
-		/*
-		 * Here we write back pagecache data that may be mmaped. Since
-		 * we cannot afford to clean the page and set PageWriteback
-		 * here due to lock ordering (page lock ranks above transaction
-		 * start), the data can change while IO is in flight. Tell the
-		 * block layer it should bounce the bio pages if stable data
-		 * during write is required.
-		 *
-		 * We use up our safety reference in submit_bh().
-		 */
-		_submit_bh(write_op, wbuf[i], 1 << BIO_SNAP_STABLE);
-	}
-}
-
-/*
- *  Submit all the data buffers to disk
- */
-static int journal_submit_data_buffers(journal_t *journal,
-				       transaction_t *commit_transaction,
-				       int write_op)
-{
-	struct journal_head *jh;
-	struct buffer_head *bh;
-	int locked;
-	int bufs = 0;
-	struct buffer_head **wbuf = journal->j_wbuf;
-	int err = 0;
-
-	/*
-	 * Whenever we unlock the journal and sleep, things can get added
-	 * onto ->t_sync_datalist, so we have to keep looping back to
-	 * write_out_data until we *know* that the list is empty.
-	 *
-	 * Cleanup any flushed data buffers from the data list.  Even in
-	 * abort mode, we want to flush this out as soon as possible.
-	 */
-write_out_data:
-	cond_resched();
-	spin_lock(&journal->j_list_lock);
-
-	while (commit_transaction->t_sync_datalist) {
-		jh = commit_transaction->t_sync_datalist;
-		bh = jh2bh(jh);
-		locked = 0;
-
-		/* Get reference just to make sure buffer does not disappear
-		 * when we are forced to drop various locks */
-		get_bh(bh);
-		/* If the buffer is dirty, we need to submit IO and hence
-		 * we need the buffer lock. We try to lock the buffer without
-		 * blocking. If we fail, we need to drop j_list_lock and do
-		 * blocking lock_buffer().
-		 */
-		if (buffer_dirty(bh)) {
-			if (!trylock_buffer(bh)) {
-				BUFFER_TRACE(bh, "needs blocking lock");
-				spin_unlock(&journal->j_list_lock);
-				trace_jbd_do_submit_data(journal,
-						     commit_transaction);
-				/* Write out all data to prevent deadlocks */
-				journal_do_submit_data(wbuf, bufs, write_op);
-				bufs = 0;
-				lock_buffer(bh);
-				spin_lock(&journal->j_list_lock);
-			}
-			locked = 1;
-		}
-		/* We have to get bh_state lock. Again out of order, sigh. */
-		if (!inverted_lock(journal, bh)) {
-			jbd_lock_bh_state(bh);
-			spin_lock(&journal->j_list_lock);
-		}
-		/* Someone already cleaned up the buffer? */
-		if (!buffer_jbd(bh) || bh2jh(bh) != jh
-			|| jh->b_transaction != commit_transaction
-			|| jh->b_jlist != BJ_SyncData) {
-			jbd_unlock_bh_state(bh);
-			if (locked)
-				unlock_buffer(bh);
-			BUFFER_TRACE(bh, "already cleaned up");
-			release_data_buffer(bh);
-			continue;
-		}
-		if (locked && test_clear_buffer_dirty(bh)) {
-			BUFFER_TRACE(bh, "needs writeout, adding to array");
-			wbuf[bufs++] = bh;
-			__journal_file_buffer(jh, commit_transaction,
-						BJ_Locked);
-			jbd_unlock_bh_state(bh);
-			if (bufs == journal->j_wbufsize) {
-				spin_unlock(&journal->j_list_lock);
-				trace_jbd_do_submit_data(journal,
-						     commit_transaction);
-				journal_do_submit_data(wbuf, bufs, write_op);
-				bufs = 0;
-				goto write_out_data;
-			}
-		} else if (!locked && buffer_locked(bh)) {
-			__journal_file_buffer(jh, commit_transaction,
-						BJ_Locked);
-			jbd_unlock_bh_state(bh);
-			put_bh(bh);
-		} else {
-			BUFFER_TRACE(bh, "writeout complete: unfile");
-			if (unlikely(!buffer_uptodate(bh)))
-				err = -EIO;
-			__journal_unfile_buffer(jh);
-			jbd_unlock_bh_state(bh);
-			if (locked)
-				unlock_buffer(bh);
-			release_data_buffer(bh);
-		}
-
-		if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
-			spin_unlock(&journal->j_list_lock);
-			goto write_out_data;
-		}
-	}
-	spin_unlock(&journal->j_list_lock);
-	trace_jbd_do_submit_data(journal, commit_transaction);
-	journal_do_submit_data(wbuf, bufs, write_op);
-
-	return err;
-}
-
-/*
- * journal_commit_transaction
- *
- * The primary function for committing a transaction to the log.  This
- * function is called by the journal thread to begin a complete commit.
- */
-void journal_commit_transaction(journal_t *journal)
-{
-	transaction_t *commit_transaction;
-	struct journal_head *jh, *new_jh, *descriptor;
-	struct buffer_head **wbuf = journal->j_wbuf;
-	int bufs;
-	int flags;
-	int err;
-	unsigned int blocknr;
-	ktime_t start_time;
-	u64 commit_time;
-	char *tagp = NULL;
-	journal_header_t *header;
-	journal_block_tag_t *tag = NULL;
-	int space_left = 0;
-	int first_tag = 0;
-	int tag_flag;
-	int i;
-	struct blk_plug plug;
-	int write_op = WRITE;
-
-	/*
-	 * First job: lock down the current transaction and wait for
-	 * all outstanding updates to complete.
-	 */
-
-	/* Do we need to erase the effects of a prior journal_flush? */
-	if (journal->j_flags & JFS_FLUSHED) {
-		jbd_debug(3, "super block updated\n");
-		mutex_lock(&journal->j_checkpoint_mutex);
-		/*
-		 * We hold j_checkpoint_mutex so tail cannot change under us.
-		 * We don't need any special data guarantees for writing sb
-		 * since journal is empty and it is ok for write to be
-		 * flushed only with transaction commit.
-		 */
-		journal_update_sb_log_tail(journal, journal->j_tail_sequence,
-					   journal->j_tail, WRITE_SYNC);
-		mutex_unlock(&journal->j_checkpoint_mutex);
-	} else {
-		jbd_debug(3, "superblock not updated\n");
-	}
-
-	J_ASSERT(journal->j_running_transaction != NULL);
-	J_ASSERT(journal->j_committing_transaction == NULL);
-
-	commit_transaction = journal->j_running_transaction;
-
-	trace_jbd_start_commit(journal, commit_transaction);
-	jbd_debug(1, "JBD: starting commit of transaction %d\n",
-			commit_transaction->t_tid);
-
-	spin_lock(&journal->j_state_lock);
-	J_ASSERT(commit_transaction->t_state == T_RUNNING);
-	commit_transaction->t_state = T_LOCKED;
-
-	trace_jbd_commit_locking(journal, commit_transaction);
-	spin_lock(&commit_transaction->t_handle_lock);
-	while (commit_transaction->t_updates) {
-		DEFINE_WAIT(wait);
-
-		prepare_to_wait(&journal->j_wait_updates, &wait,
-					TASK_UNINTERRUPTIBLE);
-		if (commit_transaction->t_updates) {
-			spin_unlock(&commit_transaction->t_handle_lock);
-			spin_unlock(&journal->j_state_lock);
-			schedule();
-			spin_lock(&journal->j_state_lock);
-			spin_lock(&commit_transaction->t_handle_lock);
-		}
-		finish_wait(&journal->j_wait_updates, &wait);
-	}
-	spin_unlock(&commit_transaction->t_handle_lock);
-
-	J_ASSERT (commit_transaction->t_outstanding_credits <=
-			journal->j_max_transaction_buffers);
-
-	/*
-	 * First thing we are allowed to do is to discard any remaining
-	 * BJ_Reserved buffers.  Note, it is _not_ permissible to assume
-	 * that there are no such buffers: if a large filesystem
-	 * operation like a truncate needs to split itself over multiple
-	 * transactions, then it may try to do a journal_restart() while
-	 * there are still BJ_Reserved buffers outstanding.  These must
-	 * be released cleanly from the current transaction.
-	 *
-	 * In this case, the filesystem must still reserve write access
-	 * again before modifying the buffer in the new transaction, but
-	 * we do not require it to remember exactly which old buffers it
-	 * has reserved.  This is consistent with the existing behaviour
-	 * that multiple journal_get_write_access() calls to the same
-	 * buffer are perfectly permissible.
-	 */
-	while (commit_transaction->t_reserved_list) {
-		jh = commit_transaction->t_reserved_list;
-		JBUFFER_TRACE(jh, "reserved, unused: refile");
-		/*
-		 * A journal_get_undo_access()+journal_release_buffer() may
-		 * leave undo-committed data.
-		 */
-		if (jh->b_committed_data) {
-			struct buffer_head *bh = jh2bh(jh);
-
-			jbd_lock_bh_state(bh);
-			jbd_free(jh->b_committed_data, bh->b_size);
-			jh->b_committed_data = NULL;
-			jbd_unlock_bh_state(bh);
-		}
-		journal_refile_buffer(journal, jh);
-	}
-
-	/*
-	 * Now try to drop any written-back buffers from the journal's
-	 * checkpoint lists.  We do this *before* commit because it potentially
-	 * frees some memory
-	 */
-	spin_lock(&journal->j_list_lock);
-	__journal_clean_checkpoint_list(journal);
-	spin_unlock(&journal->j_list_lock);
-
-	jbd_debug (3, "JBD: commit phase 1\n");
-
-	/*
-	 * Clear revoked flag to reflect there is no revoked buffers
-	 * in the next transaction which is going to be started.
-	 */
-	journal_clear_buffer_revoked_flags(journal);
-
-	/*
-	 * Switch to a new revoke table.
-	 */
-	journal_switch_revoke_table(journal);
-
-	trace_jbd_commit_flushing(journal, commit_transaction);
-	commit_transaction->t_state = T_FLUSH;
-	journal->j_committing_transaction = commit_transaction;
-	journal->j_running_transaction = NULL;
-	start_time = ktime_get();
-	commit_transaction->t_log_start = journal->j_head;
-	wake_up(&journal->j_wait_transaction_locked);
-	spin_unlock(&journal->j_state_lock);
-
-	jbd_debug (3, "JBD: commit phase 2\n");
-
-	if (tid_geq(journal->j_commit_waited, commit_transaction->t_tid))
-		write_op = WRITE_SYNC;
-
-	/*
-	 * Now start flushing things to disk, in the order they appear
-	 * on the transaction lists.  Data blocks go first.
-	 */
-	blk_start_plug(&plug);
-	err = journal_submit_data_buffers(journal, commit_transaction,
-					  write_op);
-	blk_finish_plug(&plug);
-
-	/*
-	 * Wait for all previously submitted IO to complete.
-	 */
-	spin_lock(&journal->j_list_lock);
-	while (commit_transaction->t_locked_list) {
-		struct buffer_head *bh;
-
-		jh = commit_transaction->t_locked_list->b_tprev;
-		bh = jh2bh(jh);
-		get_bh(bh);
-		if (buffer_locked(bh)) {
-			spin_unlock(&journal->j_list_lock);
-			wait_on_buffer(bh);
-			spin_lock(&journal->j_list_lock);
-		}
-		if (unlikely(!buffer_uptodate(bh))) {
-			if (!trylock_page(bh->b_page)) {
-				spin_unlock(&journal->j_list_lock);
-				lock_page(bh->b_page);
-				spin_lock(&journal->j_list_lock);
-			}
-			if (bh->b_page->mapping)
-				set_bit(AS_EIO, &bh->b_page->mapping->flags);
-
-			unlock_page(bh->b_page);
-			SetPageError(bh->b_page);
-			err = -EIO;
-		}
-		if (!inverted_lock(journal, bh)) {
-			put_bh(bh);
-			spin_lock(&journal->j_list_lock);
-			continue;
-		}
-		if (buffer_jbd(bh) && bh2jh(bh) == jh &&
-		    jh->b_transaction == commit_transaction &&
-		    jh->b_jlist == BJ_Locked)
-			__journal_unfile_buffer(jh);
-		jbd_unlock_bh_state(bh);
-		release_data_buffer(bh);
-		cond_resched_lock(&journal->j_list_lock);
-	}
-	spin_unlock(&journal->j_list_lock);
-
-	if (err) {
-		char b[BDEVNAME_SIZE];
-
-		printk(KERN_WARNING
-			"JBD: Detected IO errors while flushing file data "
-			"on %s\n", bdevname(journal->j_fs_dev, b));
-		if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR)
-			journal_abort(journal, err);
-		err = 0;
-	}
-
-	blk_start_plug(&plug);
-
-	journal_write_revoke_records(journal, commit_transaction, write_op);
-
-	/*
-	 * If we found any dirty or locked buffers, then we should have
-	 * looped back up to the write_out_data label.  If there weren't
-	 * any then journal_clean_data_list should have wiped the list
-	 * clean by now, so check that it is in fact empty.
-	 */
-	J_ASSERT (commit_transaction->t_sync_datalist == NULL);
-
-	jbd_debug (3, "JBD: commit phase 3\n");
-
-	/*
-	 * Way to go: we have now written out all of the data for a
-	 * transaction!  Now comes the tricky part: we need to write out
-	 * metadata.  Loop over the transaction's entire buffer list:
-	 */
-	spin_lock(&journal->j_state_lock);
-	commit_transaction->t_state = T_COMMIT;
-	spin_unlock(&journal->j_state_lock);
-
-	trace_jbd_commit_logging(journal, commit_transaction);
-	J_ASSERT(commit_transaction->t_nr_buffers <=
-		 commit_transaction->t_outstanding_credits);
-
-	descriptor = NULL;
-	bufs = 0;
-	while (commit_transaction->t_buffers) {
-
-		/* Find the next buffer to be journaled... */
-
-		jh = commit_transaction->t_buffers;
-
-		/* If we're in abort mode, we just un-journal the buffer and
-		   release it. */
-
-		if (is_journal_aborted(journal)) {
-			clear_buffer_jbddirty(jh2bh(jh));
-			JBUFFER_TRACE(jh, "journal is aborting: refile");
-			journal_refile_buffer(journal, jh);
-			/* If that was the last one, we need to clean up
-			 * any descriptor buffers which may have been
-			 * already allocated, even if we are now
-			 * aborting. */
-			if (!commit_transaction->t_buffers)
-				goto start_journal_io;
-			continue;
-		}
-
-		/* Make sure we have a descriptor block in which to
-		   record the metadata buffer. */
-
-		if (!descriptor) {
-			struct buffer_head *bh;
-
-			J_ASSERT (bufs == 0);
-
-			jbd_debug(4, "JBD: get descriptor\n");
-
-			descriptor = journal_get_descriptor_buffer(journal);
-			if (!descriptor) {
-				journal_abort(journal, -EIO);
-				continue;
-			}
-
-			bh = jh2bh(descriptor);
-			jbd_debug(4, "JBD: got buffer %llu (%p)\n",
-				(unsigned long long)bh->b_blocknr, bh->b_data);
-			header = (journal_header_t *)&bh->b_data[0];
-			header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
-			header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK);
-			header->h_sequence  = cpu_to_be32(commit_transaction->t_tid);
-
-			tagp = &bh->b_data[sizeof(journal_header_t)];
-			space_left = bh->b_size - sizeof(journal_header_t);
-			first_tag = 1;
-			set_buffer_jwrite(bh);
-			set_buffer_dirty(bh);
-			wbuf[bufs++] = bh;
-
-			/* Record it so that we can wait for IO
-                           completion later */
-			BUFFER_TRACE(bh, "ph3: file as descriptor");
-			journal_file_buffer(descriptor, commit_transaction,
-					BJ_LogCtl);
-		}
-
-		/* Where is the buffer to be written? */
-
-		err = journal_next_log_block(journal, &blocknr);
-		/* If the block mapping failed, just abandon the buffer
-		   and repeat this loop: we'll fall into the
-		   refile-on-abort condition above. */
-		if (err) {
-			journal_abort(journal, err);
-			continue;
-		}
-
-		/*
-		 * start_this_handle() uses t_outstanding_credits to determine
-		 * the free space in the log, but this counter is changed
-		 * by journal_next_log_block() also.
-		 */
-		commit_transaction->t_outstanding_credits--;
-
-		/* Bump b_count to prevent truncate from stumbling over
-                   the shadowed buffer!  @@@ This can go if we ever get
-                   rid of the BJ_IO/BJ_Shadow pairing of buffers. */
-		get_bh(jh2bh(jh));
-
-		/* Make a temporary IO buffer with which to write it out
-                   (this will requeue both the metadata buffer and the
-                   temporary IO buffer). new_bh goes on BJ_IO*/
-
-		set_buffer_jwrite(jh2bh(jh));
-		/*
-		 * akpm: journal_write_metadata_buffer() sets
-		 * new_bh->b_transaction to commit_transaction.
-		 * We need to clean this up before we release new_bh
-		 * (which is of type BJ_IO)
-		 */
-		JBUFFER_TRACE(jh, "ph3: write metadata");
-		flags = journal_write_metadata_buffer(commit_transaction,
-						      jh, &new_jh, blocknr);
-		set_buffer_jwrite(jh2bh(new_jh));
-		wbuf[bufs++] = jh2bh(new_jh);
-
-		/* Record the new block's tag in the current descriptor
-                   buffer */
-
-		tag_flag = 0;
-		if (flags & 1)
-			tag_flag |= JFS_FLAG_ESCAPE;
-		if (!first_tag)
-			tag_flag |= JFS_FLAG_SAME_UUID;
-
-		tag = (journal_block_tag_t *) tagp;
-		tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr);
-		tag->t_flags = cpu_to_be32(tag_flag);
-		tagp += sizeof(journal_block_tag_t);
-		space_left -= sizeof(journal_block_tag_t);
-
-		if (first_tag) {
-			memcpy (tagp, journal->j_uuid, 16);
-			tagp += 16;
-			space_left -= 16;
-			first_tag = 0;
-		}
-
-		/* If there's no more to do, or if the descriptor is full,
-		   let the IO rip! */
-
-		if (bufs == journal->j_wbufsize ||
-		    commit_transaction->t_buffers == NULL ||
-		    space_left < sizeof(journal_block_tag_t) + 16) {
-
-			jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
-
-			/* Write an end-of-descriptor marker before
-                           submitting the IOs.  "tag" still points to
-                           the last tag we set up. */
-
-			tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG);
-
-start_journal_io:
-			for (i = 0; i < bufs; i++) {
-				struct buffer_head *bh = wbuf[i];
-				lock_buffer(bh);
-				clear_buffer_dirty(bh);
-				set_buffer_uptodate(bh);
-				bh->b_end_io = journal_end_buffer_io_sync;
-				/*
-				 * In data=journal mode, here we can end up
-				 * writing pagecache data that might be
-				 * mmapped. Since we can't afford to clean the
-				 * page and set PageWriteback (see the comment
-				 * near the other use of _submit_bh()), the
-				 * data can change while the write is in
-				 * flight.  Tell the block layer to bounce the
-				 * bio pages if stable pages are required.
-				 */
-				_submit_bh(write_op, bh, 1 << BIO_SNAP_STABLE);
-			}
-			cond_resched();
-
-			/* Force a new descriptor to be generated next
-                           time round the loop. */
-			descriptor = NULL;
-			bufs = 0;
-		}
-	}
-
-	blk_finish_plug(&plug);
-
-	/* Lo and behold: we have just managed to send a transaction to
-           the log.  Before we can commit it, wait for the IO so far to
-           complete.  Control buffers being written are on the
-           transaction's t_log_list queue, and metadata buffers are on
-           the t_iobuf_list queue.
-
-	   Wait for the buffers in reverse order.  That way we are
-	   less likely to be woken up until all IOs have completed, and
-	   so we incur less scheduling load.
-	*/
-
-	jbd_debug(3, "JBD: commit phase 4\n");
-
-	/*
-	 * akpm: these are BJ_IO, and j_list_lock is not needed.
-	 * See __journal_try_to_free_buffer.
-	 */
-wait_for_iobuf:
-	while (commit_transaction->t_iobuf_list != NULL) {
-		struct buffer_head *bh;
-
-		jh = commit_transaction->t_iobuf_list->b_tprev;
-		bh = jh2bh(jh);
-		if (buffer_locked(bh)) {
-			wait_on_buffer(bh);
-			goto wait_for_iobuf;
-		}
-		if (cond_resched())
-			goto wait_for_iobuf;
-
-		if (unlikely(!buffer_uptodate(bh)))
-			err = -EIO;
-
-		clear_buffer_jwrite(bh);
-
-		JBUFFER_TRACE(jh, "ph4: unfile after journal write");
-		journal_unfile_buffer(journal, jh);
-
-		/*
-		 * ->t_iobuf_list should contain only dummy buffer_heads
-		 * which were created by journal_write_metadata_buffer().
-		 */
-		BUFFER_TRACE(bh, "dumping temporary bh");
-		journal_put_journal_head(jh);
-		__brelse(bh);
-		J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
-		free_buffer_head(bh);
-
-		/* We also have to unlock and free the corresponding
-                   shadowed buffer */
-		jh = commit_transaction->t_shadow_list->b_tprev;
-		bh = jh2bh(jh);
-		clear_buffer_jwrite(bh);
-		J_ASSERT_BH(bh, buffer_jbddirty(bh));
-
-		/* The metadata is now released for reuse, but we need
-                   to remember it against this transaction so that when
-                   we finally commit, we can do any checkpointing
-                   required. */
-		JBUFFER_TRACE(jh, "file as BJ_Forget");
-		journal_file_buffer(jh, commit_transaction, BJ_Forget);
-		/*
-		 * Wake up any transactions which were waiting for this
-		 * IO to complete. The barrier must be here so that changes
-		 * by journal_file_buffer() take effect before wake_up_bit()
-		 * does the waitqueue check.
-		 */
-		smp_mb();
-		wake_up_bit(&bh->b_state, BH_Unshadow);
-		JBUFFER_TRACE(jh, "brelse shadowed buffer");
-		__brelse(bh);
-	}
-
-	J_ASSERT (commit_transaction->t_shadow_list == NULL);
-
-	jbd_debug(3, "JBD: commit phase 5\n");
-
-	/* Here we wait for the revoke record and descriptor record buffers */
- wait_for_ctlbuf:
-	while (commit_transaction->t_log_list != NULL) {
-		struct buffer_head *bh;
-
-		jh = commit_transaction->t_log_list->b_tprev;
-		bh = jh2bh(jh);
-		if (buffer_locked(bh)) {
-			wait_on_buffer(bh);
-			goto wait_for_ctlbuf;
-		}
-		if (cond_resched())
-			goto wait_for_ctlbuf;
-
-		if (unlikely(!buffer_uptodate(bh)))
-			err = -EIO;
-
-		BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
-		clear_buffer_jwrite(bh);
-		journal_unfile_buffer(journal, jh);
-		journal_put_journal_head(jh);
-		__brelse(bh);		/* One for getblk */
-		/* AKPM: bforget here */
-	}
-
-	if (err)
-		journal_abort(journal, err);
-
-	jbd_debug(3, "JBD: commit phase 6\n");
-
-	/* All metadata is written, now write commit record and do cleanup */
-	spin_lock(&journal->j_state_lock);
-	J_ASSERT(commit_transaction->t_state == T_COMMIT);
-	commit_transaction->t_state = T_COMMIT_RECORD;
-	spin_unlock(&journal->j_state_lock);
-
-	if (journal_write_commit_record(journal, commit_transaction))
-		err = -EIO;
-
-	if (err)
-		journal_abort(journal, err);
-
-	/* End of a transaction!  Finally, we can do checkpoint
-           processing: any buffers committed as a result of this
-           transaction can be removed from any checkpoint list it was on
-           before. */
-
-	jbd_debug(3, "JBD: commit phase 7\n");
-
-	J_ASSERT(commit_transaction->t_sync_datalist == NULL);
-	J_ASSERT(commit_transaction->t_buffers == NULL);
-	J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
-	J_ASSERT(commit_transaction->t_iobuf_list == NULL);
-	J_ASSERT(commit_transaction->t_shadow_list == NULL);
-	J_ASSERT(commit_transaction->t_log_list == NULL);
-
-restart_loop:
-	/*
-	 * As there are other places (journal_unmap_buffer()) adding buffers
-	 * to this list we have to be careful and hold the j_list_lock.
-	 */
-	spin_lock(&journal->j_list_lock);
-	while (commit_transaction->t_forget) {
-		transaction_t *cp_transaction;
-		struct buffer_head *bh;
-		int try_to_free = 0;
-
-		jh = commit_transaction->t_forget;
-		spin_unlock(&journal->j_list_lock);
-		bh = jh2bh(jh);
-		/*
-		 * Get a reference so that bh cannot be freed before we are
-		 * done with it.
-		 */
-		get_bh(bh);
-		jbd_lock_bh_state(bh);
-		J_ASSERT_JH(jh,	jh->b_transaction == commit_transaction ||
-			jh->b_transaction == journal->j_running_transaction);
-
-		/*
-		 * If there is undo-protected committed data against
-		 * this buffer, then we can remove it now.  If it is a
-		 * buffer needing such protection, the old frozen_data
-		 * field now points to a committed version of the
-		 * buffer, so rotate that field to the new committed
-		 * data.
-		 *
-		 * Otherwise, we can just throw away the frozen data now.
-		 */
-		if (jh->b_committed_data) {
-			jbd_free(jh->b_committed_data, bh->b_size);
-			jh->b_committed_data = NULL;
-			if (jh->b_frozen_data) {
-				jh->b_committed_data = jh->b_frozen_data;
-				jh->b_frozen_data = NULL;
-			}
-		} else if (jh->b_frozen_data) {
-			jbd_free(jh->b_frozen_data, bh->b_size);
-			jh->b_frozen_data = NULL;
-		}
-
-		spin_lock(&journal->j_list_lock);
-		cp_transaction = jh->b_cp_transaction;
-		if (cp_transaction) {
-			JBUFFER_TRACE(jh, "remove from old cp transaction");
-			__journal_remove_checkpoint(jh);
-		}
-
-		/* Only re-checkpoint the buffer_head if it is marked
-		 * dirty.  If the buffer was added to the BJ_Forget list
-		 * by journal_forget, it may no longer be dirty and
-		 * there's no point in keeping a checkpoint record for
-		 * it. */
-
-		/*
-		 * A buffer which has been freed while still being journaled by
-		 * a previous transaction.
-		 */
-		if (buffer_freed(bh)) {
-			/*
-			 * If the running transaction is the one containing
-			 * "add to orphan" operation (b_next_transaction !=
-			 * NULL), we have to wait for that transaction to
-			 * commit before we can really get rid of the buffer.
-			 * So just clear b_modified to not confuse transaction
-			 * credit accounting and refile the buffer to
-			 * BJ_Forget of the running transaction. If the just
-			 * committed transaction contains "add to orphan"
-			 * operation, we can completely invalidate the buffer
-			 * now. We are rather throughout in that since the
-			 * buffer may be still accessible when blocksize <
-			 * pagesize and it is attached to the last partial
-			 * page.
-			 */
-			jh->b_modified = 0;
-			if (!jh->b_next_transaction) {
-				clear_buffer_freed(bh);
-				clear_buffer_jbddirty(bh);
-				clear_buffer_mapped(bh);
-				clear_buffer_new(bh);
-				clear_buffer_req(bh);
-				bh->b_bdev = NULL;
-			}
-		}
-
-		if (buffer_jbddirty(bh)) {
-			JBUFFER_TRACE(jh, "add to new checkpointing trans");
-			__journal_insert_checkpoint(jh, commit_transaction);
-			if (is_journal_aborted(journal))
-				clear_buffer_jbddirty(bh);
-		} else {
-			J_ASSERT_BH(bh, !buffer_dirty(bh));
-			/*
-			 * The buffer on BJ_Forget list and not jbddirty means
-			 * it has been freed by this transaction and hence it
-			 * could not have been reallocated until this
-			 * transaction has committed. *BUT* it could be
-			 * reallocated once we have written all the data to
-			 * disk and before we process the buffer on BJ_Forget
-			 * list.
-			 */
-			if (!jh->b_next_transaction)
-				try_to_free = 1;
-		}
-		JBUFFER_TRACE(jh, "refile or unfile freed buffer");
-		__journal_refile_buffer(jh);
-		jbd_unlock_bh_state(bh);
-		if (try_to_free)
-			release_buffer_page(bh);
-		else
-			__brelse(bh);
-		cond_resched_lock(&journal->j_list_lock);
-	}
-	spin_unlock(&journal->j_list_lock);
-	/*
-	 * This is a bit sleazy.  We use j_list_lock to protect transition
-	 * of a transaction into T_FINISHED state and calling
-	 * __journal_drop_transaction(). Otherwise we could race with
-	 * other checkpointing code processing the transaction...
-	 */
-	spin_lock(&journal->j_state_lock);
-	spin_lock(&journal->j_list_lock);
-	/*
-	 * Now recheck if some buffers did not get attached to the transaction
-	 * while the lock was dropped...
-	 */
-	if (commit_transaction->t_forget) {
-		spin_unlock(&journal->j_list_lock);
-		spin_unlock(&journal->j_state_lock);
-		goto restart_loop;
-	}
-
-	/* Done with this transaction! */
-
-	jbd_debug(3, "JBD: commit phase 8\n");
-
-	J_ASSERT(commit_transaction->t_state == T_COMMIT_RECORD);
-
-	commit_transaction->t_state = T_FINISHED;
-	J_ASSERT(commit_transaction == journal->j_committing_transaction);
-	journal->j_commit_sequence = commit_transaction->t_tid;
-	journal->j_committing_transaction = NULL;
-	commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
-
-	/*
-	 * weight the commit time higher than the average time so we don't
-	 * react too strongly to vast changes in commit time
-	 */
-	if (likely(journal->j_average_commit_time))
-		journal->j_average_commit_time = (commit_time*3 +
-				journal->j_average_commit_time) / 4;
-	else
-		journal->j_average_commit_time = commit_time;
-
-	spin_unlock(&journal->j_state_lock);
-
-	if (commit_transaction->t_checkpoint_list == NULL &&
-	    commit_transaction->t_checkpoint_io_list == NULL) {
-		__journal_drop_transaction(journal, commit_transaction);
-	} else {
-		if (journal->j_checkpoint_transactions == NULL) {
-			journal->j_checkpoint_transactions = commit_transaction;
-			commit_transaction->t_cpnext = commit_transaction;
-			commit_transaction->t_cpprev = commit_transaction;
-		} else {
-			commit_transaction->t_cpnext =
-				journal->j_checkpoint_transactions;
-			commit_transaction->t_cpprev =
-				commit_transaction->t_cpnext->t_cpprev;
-			commit_transaction->t_cpnext->t_cpprev =
-				commit_transaction;
-			commit_transaction->t_cpprev->t_cpnext =
-				commit_transaction;
-		}
-	}
-	spin_unlock(&journal->j_list_lock);
-
-	trace_jbd_end_commit(journal, commit_transaction);
-	jbd_debug(1, "JBD: commit %d complete, head %d\n",
-		  journal->j_commit_sequence, journal->j_tail_sequence);
-
-	wake_up(&journal->j_wait_done_commit);
-}
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
deleted file mode 100644
index c46a79a..0000000
--- a/fs/jbd/journal.c
+++ /dev/null
@@ -1,2145 +0,0 @@
-/*
- * linux/fs/jbd/journal.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
- *
- * Copyright 1998 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Generic filesystem journal-writing code; part of the ext2fs
- * journaling system.
- *
- * This file manages journals: areas of disk reserved for logging
- * transactional updates.  This includes the kernel journaling thread
- * which is responsible for scheduling updates to the log.
- *
- * We do not actually manage the physical storage of the journal in this
- * file: that is left to a per-journal policy function, which allows us
- * to store the journal within a filesystem-specified area for ext2
- * journaling (ext2 can use a reserved inode for storing the log).
- */
-
-#include <linux/module.h>
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/freezer.h>
-#include <linux/pagemap.h>
-#include <linux/kthread.h>
-#include <linux/poison.h>
-#include <linux/proc_fs.h>
-#include <linux/debugfs.h>
-#include <linux/ratelimit.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/jbd.h>
-
-#include <asm/uaccess.h>
-#include <asm/page.h>
-
-EXPORT_SYMBOL(journal_start);
-EXPORT_SYMBOL(journal_restart);
-EXPORT_SYMBOL(journal_extend);
-EXPORT_SYMBOL(journal_stop);
-EXPORT_SYMBOL(journal_lock_updates);
-EXPORT_SYMBOL(journal_unlock_updates);
-EXPORT_SYMBOL(journal_get_write_access);
-EXPORT_SYMBOL(journal_get_create_access);
-EXPORT_SYMBOL(journal_get_undo_access);
-EXPORT_SYMBOL(journal_dirty_data);
-EXPORT_SYMBOL(journal_dirty_metadata);
-EXPORT_SYMBOL(journal_release_buffer);
-EXPORT_SYMBOL(journal_forget);
-#if 0
-EXPORT_SYMBOL(journal_sync_buffer);
-#endif
-EXPORT_SYMBOL(journal_flush);
-EXPORT_SYMBOL(journal_revoke);
-
-EXPORT_SYMBOL(journal_init_dev);
-EXPORT_SYMBOL(journal_init_inode);
-EXPORT_SYMBOL(journal_update_format);
-EXPORT_SYMBOL(journal_check_used_features);
-EXPORT_SYMBOL(journal_check_available_features);
-EXPORT_SYMBOL(journal_set_features);
-EXPORT_SYMBOL(journal_create);
-EXPORT_SYMBOL(journal_load);
-EXPORT_SYMBOL(journal_destroy);
-EXPORT_SYMBOL(journal_abort);
-EXPORT_SYMBOL(journal_errno);
-EXPORT_SYMBOL(journal_ack_err);
-EXPORT_SYMBOL(journal_clear_err);
-EXPORT_SYMBOL(log_wait_commit);
-EXPORT_SYMBOL(log_start_commit);
-EXPORT_SYMBOL(journal_start_commit);
-EXPORT_SYMBOL(journal_force_commit_nested);
-EXPORT_SYMBOL(journal_wipe);
-EXPORT_SYMBOL(journal_blocks_per_page);
-EXPORT_SYMBOL(journal_invalidatepage);
-EXPORT_SYMBOL(journal_try_to_free_buffers);
-EXPORT_SYMBOL(journal_force_commit);
-
-static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
-static void __journal_abort_soft (journal_t *journal, int errno);
-static const char *journal_dev_name(journal_t *journal, char *buffer);
-
-#ifdef CONFIG_JBD_DEBUG
-void __jbd_debug(int level, const char *file, const char *func,
-		 unsigned int line, const char *fmt, ...)
-{
-	struct va_format vaf;
-	va_list args;
-
-	if (level > journal_enable_debug)
-		return;
-	va_start(args, fmt);
-	vaf.fmt = fmt;
-	vaf.va = &args;
-	printk(KERN_DEBUG "%s: (%s, %u): %pV\n", file, func, line, &vaf);
-	va_end(args);
-}
-EXPORT_SYMBOL(__jbd_debug);
-#endif
-
-/*
- * Helper function used to manage commit timeouts
- */
-
-static void commit_timeout(unsigned long __data)
-{
-	struct task_struct * p = (struct task_struct *) __data;
-
-	wake_up_process(p);
-}
-
-/*
- * kjournald: The main thread function used to manage a logging device
- * journal.
- *
- * This kernel thread is responsible for two things:
- *
- * 1) COMMIT:  Every so often we need to commit the current state of the
- *    filesystem to disk.  The journal thread is responsible for writing
- *    all of the metadata buffers to disk.
- *
- * 2) CHECKPOINT: We cannot reuse a used section of the log file until all
- *    of the data in that part of the log has been rewritten elsewhere on
- *    the disk.  Flushing these old buffers to reclaim space in the log is
- *    known as checkpointing, and this thread is responsible for that job.
- */
-
-static int kjournald(void *arg)
-{
-	journal_t *journal = arg;
-	transaction_t *transaction;
-
-	/*
-	 * Set up an interval timer which can be used to trigger a commit wakeup
-	 * after the commit interval expires
-	 */
-	setup_timer(&journal->j_commit_timer, commit_timeout,
-			(unsigned long)current);
-
-	set_freezable();
-
-	/* Record that the journal thread is running */
-	journal->j_task = current;
-	wake_up(&journal->j_wait_done_commit);
-
-	printk(KERN_INFO "kjournald starting.  Commit interval %ld seconds\n",
-			journal->j_commit_interval / HZ);
-
-	/*
-	 * And now, wait forever for commit wakeup events.
-	 */
-	spin_lock(&journal->j_state_lock);
-
-loop:
-	if (journal->j_flags & JFS_UNMOUNT)
-		goto end_loop;
-
-	jbd_debug(1, "commit_sequence=%d, commit_request=%d\n",
-		journal->j_commit_sequence, journal->j_commit_request);
-
-	if (journal->j_commit_sequence != journal->j_commit_request) {
-		jbd_debug(1, "OK, requests differ\n");
-		spin_unlock(&journal->j_state_lock);
-		del_timer_sync(&journal->j_commit_timer);
-		journal_commit_transaction(journal);
-		spin_lock(&journal->j_state_lock);
-		goto loop;
-	}
-
-	wake_up(&journal->j_wait_done_commit);
-	if (freezing(current)) {
-		/*
-		 * The simpler the better. Flushing journal isn't a
-		 * good idea, because that depends on threads that may
-		 * be already stopped.
-		 */
-		jbd_debug(1, "Now suspending kjournald\n");
-		spin_unlock(&journal->j_state_lock);
-		try_to_freeze();
-		spin_lock(&journal->j_state_lock);
-	} else {
-		/*
-		 * We assume on resume that commits are already there,
-		 * so we don't sleep
-		 */
-		DEFINE_WAIT(wait);
-		int should_sleep = 1;
-
-		prepare_to_wait(&journal->j_wait_commit, &wait,
-				TASK_INTERRUPTIBLE);
-		if (journal->j_commit_sequence != journal->j_commit_request)
-			should_sleep = 0;
-		transaction = journal->j_running_transaction;
-		if (transaction && time_after_eq(jiffies,
-						transaction->t_expires))
-			should_sleep = 0;
-		if (journal->j_flags & JFS_UNMOUNT)
-			should_sleep = 0;
-		if (should_sleep) {
-			spin_unlock(&journal->j_state_lock);
-			schedule();
-			spin_lock(&journal->j_state_lock);
-		}
-		finish_wait(&journal->j_wait_commit, &wait);
-	}
-
-	jbd_debug(1, "kjournald wakes\n");
-
-	/*
-	 * Were we woken up by a commit wakeup event?
-	 */
-	transaction = journal->j_running_transaction;
-	if (transaction && time_after_eq(jiffies, transaction->t_expires)) {
-		journal->j_commit_request = transaction->t_tid;
-		jbd_debug(1, "woke because of timeout\n");
-	}
-	goto loop;
-
-end_loop:
-	spin_unlock(&journal->j_state_lock);
-	del_timer_sync(&journal->j_commit_timer);
-	journal->j_task = NULL;
-	wake_up(&journal->j_wait_done_commit);
-	jbd_debug(1, "Journal thread exiting.\n");
-	return 0;
-}
-
-static int journal_start_thread(journal_t *journal)
-{
-	struct task_struct *t;
-
-	t = kthread_run(kjournald, journal, "kjournald");
-	if (IS_ERR(t))
-		return PTR_ERR(t);
-
-	wait_event(journal->j_wait_done_commit, journal->j_task != NULL);
-	return 0;
-}
-
-static void journal_kill_thread(journal_t *journal)
-{
-	spin_lock(&journal->j_state_lock);
-	journal->j_flags |= JFS_UNMOUNT;
-
-	while (journal->j_task) {
-		wake_up(&journal->j_wait_commit);
-		spin_unlock(&journal->j_state_lock);
-		wait_event(journal->j_wait_done_commit,
-				journal->j_task == NULL);
-		spin_lock(&journal->j_state_lock);
-	}
-	spin_unlock(&journal->j_state_lock);
-}
-
-/*
- * journal_write_metadata_buffer: write a metadata buffer to the journal.
- *
- * Writes a metadata buffer to a given disk block.  The actual IO is not
- * performed but a new buffer_head is constructed which labels the data
- * to be written with the correct destination disk block.
- *
- * Any magic-number escaping which needs to be done will cause a
- * copy-out here.  If the buffer happens to start with the
- * JFS_MAGIC_NUMBER, then we can't write it to the log directly: the
- * magic number is only written to the log for descripter blocks.  In
- * this case, we copy the data and replace the first word with 0, and we
- * return a result code which indicates that this buffer needs to be
- * marked as an escaped buffer in the corresponding log descriptor
- * block.  The missing word can then be restored when the block is read
- * during recovery.
- *
- * If the source buffer has already been modified by a new transaction
- * since we took the last commit snapshot, we use the frozen copy of
- * that data for IO.  If we end up using the existing buffer_head's data
- * for the write, then we *have* to lock the buffer to prevent anyone
- * else from using and possibly modifying it while the IO is in
- * progress.
- *
- * The function returns a pointer to the buffer_heads to be used for IO.
- *
- * We assume that the journal has already been locked in this function.
- *
- * Return value:
- *  <0: Error
- * >=0: Finished OK
- *
- * On success:
- * Bit 0 set == escape performed on the data
- * Bit 1 set == buffer copy-out performed (kfree the data after IO)
- */
-
-int journal_write_metadata_buffer(transaction_t *transaction,
-				  struct journal_head  *jh_in,
-				  struct journal_head **jh_out,
-				  unsigned int blocknr)
-{
-	int need_copy_out = 0;
-	int done_copy_out = 0;
-	int do_escape = 0;
-	char *mapped_data;
-	struct buffer_head *new_bh;
-	struct journal_head *new_jh;
-	struct page *new_page;
-	unsigned int new_offset;
-	struct buffer_head *bh_in = jh2bh(jh_in);
-	journal_t *journal = transaction->t_journal;
-
-	/*
-	 * The buffer really shouldn't be locked: only the current committing
-	 * transaction is allowed to write it, so nobody else is allowed
-	 * to do any IO.
-	 *
-	 * akpm: except if we're journalling data, and write() output is
-	 * also part of a shared mapping, and another thread has
-	 * decided to launch a writepage() against this buffer.
-	 */
-	J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
-
-	new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
-	/* keep subsequent assertions sane */
-	atomic_set(&new_bh->b_count, 1);
-	new_jh = journal_add_journal_head(new_bh);	/* This sleeps */
-
-	/*
-	 * If a new transaction has already done a buffer copy-out, then
-	 * we use that version of the data for the commit.
-	 */
-	jbd_lock_bh_state(bh_in);
-repeat:
-	if (jh_in->b_frozen_data) {
-		done_copy_out = 1;
-		new_page = virt_to_page(jh_in->b_frozen_data);
-		new_offset = offset_in_page(jh_in->b_frozen_data);
-	} else {
-		new_page = jh2bh(jh_in)->b_page;
-		new_offset = offset_in_page(jh2bh(jh_in)->b_data);
-	}
-
-	mapped_data = kmap_atomic(new_page);
-	/*
-	 * Check for escaping
-	 */
-	if (*((__be32 *)(mapped_data + new_offset)) ==
-				cpu_to_be32(JFS_MAGIC_NUMBER)) {
-		need_copy_out = 1;
-		do_escape = 1;
-	}
-	kunmap_atomic(mapped_data);
-
-	/*
-	 * Do we need to do a data copy?
-	 */
-	if (need_copy_out && !done_copy_out) {
-		char *tmp;
-
-		jbd_unlock_bh_state(bh_in);
-		tmp = jbd_alloc(bh_in->b_size, GFP_NOFS);
-		jbd_lock_bh_state(bh_in);
-		if (jh_in->b_frozen_data) {
-			jbd_free(tmp, bh_in->b_size);
-			goto repeat;
-		}
-
-		jh_in->b_frozen_data = tmp;
-		mapped_data = kmap_atomic(new_page);
-		memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
-		kunmap_atomic(mapped_data);
-
-		new_page = virt_to_page(tmp);
-		new_offset = offset_in_page(tmp);
-		done_copy_out = 1;
-	}
-
-	/*
-	 * Did we need to do an escaping?  Now we've done all the
-	 * copying, we can finally do so.
-	 */
-	if (do_escape) {
-		mapped_data = kmap_atomic(new_page);
-		*((unsigned int *)(mapped_data + new_offset)) = 0;
-		kunmap_atomic(mapped_data);
-	}
-
-	set_bh_page(new_bh, new_page, new_offset);
-	new_jh->b_transaction = NULL;
-	new_bh->b_size = jh2bh(jh_in)->b_size;
-	new_bh->b_bdev = transaction->t_journal->j_dev;
-	new_bh->b_blocknr = blocknr;
-	set_buffer_mapped(new_bh);
-	set_buffer_dirty(new_bh);
-
-	*jh_out = new_jh;
-
-	/*
-	 * The to-be-written buffer needs to get moved to the io queue,
-	 * and the original buffer whose contents we are shadowing or
-	 * copying is moved to the transaction's shadow queue.
-	 */
-	JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
-	spin_lock(&journal->j_list_lock);
-	__journal_file_buffer(jh_in, transaction, BJ_Shadow);
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh_in);
-
-	JBUFFER_TRACE(new_jh, "file as BJ_IO");
-	journal_file_buffer(new_jh, transaction, BJ_IO);
-
-	return do_escape | (done_copy_out << 1);
-}
-
-/*
- * Allocation code for the journal file.  Manage the space left in the
- * journal, so that we can begin checkpointing when appropriate.
- */
-
-/*
- * __log_space_left: Return the number of free blocks left in the journal.
- *
- * Called with the journal already locked.
- *
- * Called under j_state_lock
- */
-
-int __log_space_left(journal_t *journal)
-{
-	int left = journal->j_free;
-
-	assert_spin_locked(&journal->j_state_lock);
-
-	/*
-	 * Be pessimistic here about the number of those free blocks which
-	 * might be required for log descriptor control blocks.
-	 */
-
-#define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */
-
-	left -= MIN_LOG_RESERVED_BLOCKS;
-
-	if (left <= 0)
-		return 0;
-	left -= (left >> 3);
-	return left;
-}
-
-/*
- * Called under j_state_lock.  Returns true if a transaction commit was started.
- */
-int __log_start_commit(journal_t *journal, tid_t target)
-{
-	/*
-	 * The only transaction we can possibly wait upon is the
-	 * currently running transaction (if it exists).  Otherwise,
-	 * the target tid must be an old one.
-	 */
-	if (journal->j_commit_request != target &&
-	    journal->j_running_transaction &&
-	    journal->j_running_transaction->t_tid == target) {
-		/*
-		 * We want a new commit: OK, mark the request and wakeup the
-		 * commit thread.  We do _not_ do the commit ourselves.
-		 */
-
-		journal->j_commit_request = target;
-		jbd_debug(1, "JBD: requesting commit %d/%d\n",
-			  journal->j_commit_request,
-			  journal->j_commit_sequence);
-		wake_up(&journal->j_wait_commit);
-		return 1;
-	} else if (!tid_geq(journal->j_commit_request, target))
-		/* This should never happen, but if it does, preserve
-		   the evidence before kjournald goes into a loop and
-		   increments j_commit_sequence beyond all recognition. */
-		WARN_ONCE(1, "jbd: bad log_start_commit: %u %u %u %u\n",
-		    journal->j_commit_request, journal->j_commit_sequence,
-		    target, journal->j_running_transaction ?
-		    journal->j_running_transaction->t_tid : 0);
-	return 0;
-}
-
-int log_start_commit(journal_t *journal, tid_t tid)
-{
-	int ret;
-
-	spin_lock(&journal->j_state_lock);
-	ret = __log_start_commit(journal, tid);
-	spin_unlock(&journal->j_state_lock);
-	return ret;
-}
-
-/*
- * Force and wait upon a commit if the calling process is not within
- * transaction.  This is used for forcing out undo-protected data which contains
- * bitmaps, when the fs is running out of space.
- *
- * We can only force the running transaction if we don't have an active handle;
- * otherwise, we will deadlock.
- *
- * Returns true if a transaction was started.
- */
-int journal_force_commit_nested(journal_t *journal)
-{
-	transaction_t *transaction = NULL;
-	tid_t tid;
-
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_running_transaction && !current->journal_info) {
-		transaction = journal->j_running_transaction;
-		__log_start_commit(journal, transaction->t_tid);
-	} else if (journal->j_committing_transaction)
-		transaction = journal->j_committing_transaction;
-
-	if (!transaction) {
-		spin_unlock(&journal->j_state_lock);
-		return 0;	/* Nothing to retry */
-	}
-
-	tid = transaction->t_tid;
-	spin_unlock(&journal->j_state_lock);
-	log_wait_commit(journal, tid);
-	return 1;
-}
-
-/*
- * Start a commit of the current running transaction (if any).  Returns true
- * if a transaction is going to be committed (or is currently already
- * committing), and fills its tid in at *ptid
- */
-int journal_start_commit(journal_t *journal, tid_t *ptid)
-{
-	int ret = 0;
-
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_running_transaction) {
-		tid_t tid = journal->j_running_transaction->t_tid;
-
-		__log_start_commit(journal, tid);
-		/* There's a running transaction and we've just made sure
-		 * it's commit has been scheduled. */
-		if (ptid)
-			*ptid = tid;
-		ret = 1;
-	} else if (journal->j_committing_transaction) {
-		/*
-		 * If commit has been started, then we have to wait for
-		 * completion of that transaction.
-		 */
-		if (ptid)
-			*ptid = journal->j_committing_transaction->t_tid;
-		ret = 1;
-	}
-	spin_unlock(&journal->j_state_lock);
-	return ret;
-}
-
-/*
- * Wait for a specified commit to complete.
- * The caller may not hold the journal lock.
- */
-int log_wait_commit(journal_t *journal, tid_t tid)
-{
-	int err = 0;
-
-#ifdef CONFIG_JBD_DEBUG
-	spin_lock(&journal->j_state_lock);
-	if (!tid_geq(journal->j_commit_request, tid)) {
-		printk(KERN_ERR
-		       "%s: error: j_commit_request=%d, tid=%d\n",
-		       __func__, journal->j_commit_request, tid);
-	}
-	spin_unlock(&journal->j_state_lock);
-#endif
-	spin_lock(&journal->j_state_lock);
-	/*
-	 * Not running or committing trans? Must be already committed. This
-	 * saves us from waiting for a *long* time when tid overflows.
-	 */
-	if (!((journal->j_running_transaction &&
-	       journal->j_running_transaction->t_tid == tid) ||
-	      (journal->j_committing_transaction &&
-	       journal->j_committing_transaction->t_tid == tid)))
-		goto out_unlock;
-
-	if (!tid_geq(journal->j_commit_waited, tid))
-		journal->j_commit_waited = tid;
-	while (tid_gt(tid, journal->j_commit_sequence)) {
-		jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n",
-				  tid, journal->j_commit_sequence);
-		wake_up(&journal->j_wait_commit);
-		spin_unlock(&journal->j_state_lock);
-		wait_event(journal->j_wait_done_commit,
-				!tid_gt(tid, journal->j_commit_sequence));
-		spin_lock(&journal->j_state_lock);
-	}
-out_unlock:
-	spin_unlock(&journal->j_state_lock);
-
-	if (unlikely(is_journal_aborted(journal)))
-		err = -EIO;
-	return err;
-}
-
-/*
- * Return 1 if a given transaction has not yet sent barrier request
- * connected with a transaction commit. If 0 is returned, transaction
- * may or may not have sent the barrier. Used to avoid sending barrier
- * twice in common cases.
- */
-int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
-{
-	int ret = 0;
-	transaction_t *commit_trans;
-
-	if (!(journal->j_flags & JFS_BARRIER))
-		return 0;
-	spin_lock(&journal->j_state_lock);
-	/* Transaction already committed? */
-	if (tid_geq(journal->j_commit_sequence, tid))
-		goto out;
-	/*
-	 * Transaction is being committed and we already proceeded to
-	 * writing commit record?
-	 */
-	commit_trans = journal->j_committing_transaction;
-	if (commit_trans && commit_trans->t_tid == tid &&
-	    commit_trans->t_state >= T_COMMIT_RECORD)
-		goto out;
-	ret = 1;
-out:
-	spin_unlock(&journal->j_state_lock);
-	return ret;
-}
-EXPORT_SYMBOL(journal_trans_will_send_data_barrier);
-
-/*
- * Log buffer allocation routines:
- */
-
-int journal_next_log_block(journal_t *journal, unsigned int *retp)
-{
-	unsigned int blocknr;
-
-	spin_lock(&journal->j_state_lock);
-	J_ASSERT(journal->j_free > 1);
-
-	blocknr = journal->j_head;
-	journal->j_head++;
-	journal->j_free--;
-	if (journal->j_head == journal->j_last)
-		journal->j_head = journal->j_first;
-	spin_unlock(&journal->j_state_lock);
-	return journal_bmap(journal, blocknr, retp);
-}
-
-/*
- * Conversion of logical to physical block numbers for the journal
- *
- * On external journals the journal blocks are identity-mapped, so
- * this is a no-op.  If needed, we can use j_blk_offset - everything is
- * ready.
- */
-int journal_bmap(journal_t *journal, unsigned int blocknr,
-		 unsigned int *retp)
-{
-	int err = 0;
-	unsigned int ret;
-
-	if (journal->j_inode) {
-		ret = bmap(journal->j_inode, blocknr);
-		if (ret)
-			*retp = ret;
-		else {
-			char b[BDEVNAME_SIZE];
-
-			printk(KERN_ALERT "%s: journal block not found "
-					"at offset %u on %s\n",
-				__func__,
-				blocknr,
-				bdevname(journal->j_dev, b));
-			err = -EIO;
-			__journal_abort_soft(journal, err);
-		}
-	} else {
-		*retp = blocknr; /* +journal->j_blk_offset */
-	}
-	return err;
-}
-
-/*
- * We play buffer_head aliasing tricks to write data/metadata blocks to
- * the journal without copying their contents, but for journal
- * descriptor blocks we do need to generate bona fide buffers.
- *
- * After the caller of journal_get_descriptor_buffer() has finished modifying
- * the buffer's contents they really should run flush_dcache_page(bh->b_page).
- * But we don't bother doing that, so there will be coherency problems with
- * mmaps of blockdevs which hold live JBD-controlled filesystems.
- */
-struct journal_head *journal_get_descriptor_buffer(journal_t *journal)
-{
-	struct buffer_head *bh;
-	unsigned int blocknr;
-	int err;
-
-	err = journal_next_log_block(journal, &blocknr);
-
-	if (err)
-		return NULL;
-
-	bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-	if (!bh)
-		return NULL;
-	lock_buffer(bh);
-	memset(bh->b_data, 0, journal->j_blocksize);
-	set_buffer_uptodate(bh);
-	unlock_buffer(bh);
-	BUFFER_TRACE(bh, "return this buffer");
-	return journal_add_journal_head(bh);
-}
-
-/*
- * Management for journal control blocks: functions to create and
- * destroy journal_t structures, and to initialise and read existing
- * journal blocks from disk.  */
-
-/* First: create and setup a journal_t object in memory.  We initialise
- * very few fields yet: that has to wait until we have created the
- * journal structures from from scratch, or loaded them from disk. */
-
-static journal_t * journal_init_common (void)
-{
-	journal_t *journal;
-	int err;
-
-	journal = kzalloc(sizeof(*journal), GFP_KERNEL);
-	if (!journal)
-		goto fail;
-
-	init_waitqueue_head(&journal->j_wait_transaction_locked);
-	init_waitqueue_head(&journal->j_wait_logspace);
-	init_waitqueue_head(&journal->j_wait_done_commit);
-	init_waitqueue_head(&journal->j_wait_checkpoint);
-	init_waitqueue_head(&journal->j_wait_commit);
-	init_waitqueue_head(&journal->j_wait_updates);
-	mutex_init(&journal->j_checkpoint_mutex);
-	spin_lock_init(&journal->j_revoke_lock);
-	spin_lock_init(&journal->j_list_lock);
-	spin_lock_init(&journal->j_state_lock);
-
-	journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE);
-
-	/* The journal is marked for error until we succeed with recovery! */
-	journal->j_flags = JFS_ABORT;
-
-	/* Set up a default-sized revoke table for the new mount. */
-	err = journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
-	if (err) {
-		kfree(journal);
-		goto fail;
-	}
-	return journal;
-fail:
-	return NULL;
-}
-
-/* journal_init_dev and journal_init_inode:
- *
- * Create a journal structure assigned some fixed set of disk blocks to
- * the journal.  We don't actually touch those disk blocks yet, but we
- * need to set up all of the mapping information to tell the journaling
- * system where the journal blocks are.
- *
- */
-
-/**
- *  journal_t * journal_init_dev() - creates and initialises a journal structure
- *  @bdev: Block device on which to create the journal
- *  @fs_dev: Device which hold journalled filesystem for this journal.
- *  @start: Block nr Start of journal.
- *  @len:  Length of the journal in blocks.
- *  @blocksize: blocksize of journalling device
- *
- *  Returns: a newly created journal_t *
- *
- *  journal_init_dev creates a journal which maps a fixed contiguous
- *  range of blocks on an arbitrary block device.
- *
- */
-journal_t * journal_init_dev(struct block_device *bdev,
-			struct block_device *fs_dev,
-			int start, int len, int blocksize)
-{
-	journal_t *journal = journal_init_common();
-	struct buffer_head *bh;
-	int n;
-
-	if (!journal)
-		return NULL;
-
-	/* journal descriptor can store up to n blocks -bzzz */
-	journal->j_blocksize = blocksize;
-	n = journal->j_blocksize / sizeof(journal_block_tag_t);
-	journal->j_wbufsize = n;
-	journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
-	if (!journal->j_wbuf) {
-		printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
-			__func__);
-		goto out_err;
-	}
-	journal->j_dev = bdev;
-	journal->j_fs_dev = fs_dev;
-	journal->j_blk_offset = start;
-	journal->j_maxlen = len;
-
-	bh = __getblk(journal->j_dev, start, journal->j_blocksize);
-	if (!bh) {
-		printk(KERN_ERR
-		       "%s: Cannot get buffer for journal superblock\n",
-		       __func__);
-		goto out_err;
-	}
-	journal->j_sb_buffer = bh;
-	journal->j_superblock = (journal_superblock_t *)bh->b_data;
-
-	return journal;
-out_err:
-	kfree(journal->j_wbuf);
-	kfree(journal);
-	return NULL;
-}
-
-/**
- *  journal_t * journal_init_inode () - creates a journal which maps to a inode.
- *  @inode: An inode to create the journal in
- *
- * journal_init_inode creates a journal which maps an on-disk inode as
- * the journal.  The inode must exist already, must support bmap() and
- * must have all data blocks preallocated.
- */
-journal_t * journal_init_inode (struct inode *inode)
-{
-	struct buffer_head *bh;
-	journal_t *journal = journal_init_common();
-	int err;
-	int n;
-	unsigned int blocknr;
-
-	if (!journal)
-		return NULL;
-
-	journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev;
-	journal->j_inode = inode;
-	jbd_debug(1,
-		  "journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
-		  journal, inode->i_sb->s_id, inode->i_ino,
-		  (long long) inode->i_size,
-		  inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
-
-	journal->j_maxlen = inode->i_size >> inode->i_sb->s_blocksize_bits;
-	journal->j_blocksize = inode->i_sb->s_blocksize;
-
-	/* journal descriptor can store up to n blocks -bzzz */
-	n = journal->j_blocksize / sizeof(journal_block_tag_t);
-	journal->j_wbufsize = n;
-	journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
-	if (!journal->j_wbuf) {
-		printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
-			__func__);
-		goto out_err;
-	}
-
-	err = journal_bmap(journal, 0, &blocknr);
-	/* If that failed, give up */
-	if (err) {
-		printk(KERN_ERR "%s: Cannot locate journal superblock\n",
-		       __func__);
-		goto out_err;
-	}
-
-	bh = getblk_unmovable(journal->j_dev, blocknr, journal->j_blocksize);
-	if (!bh) {
-		printk(KERN_ERR
-		       "%s: Cannot get buffer for journal superblock\n",
-		       __func__);
-		goto out_err;
-	}
-	journal->j_sb_buffer = bh;
-	journal->j_superblock = (journal_superblock_t *)bh->b_data;
-
-	return journal;
-out_err:
-	kfree(journal->j_wbuf);
-	kfree(journal);
-	return NULL;
-}
-
-/*
- * If the journal init or create aborts, we need to mark the journal
- * superblock as being NULL to prevent the journal destroy from writing
- * back a bogus superblock.
- */
-static void journal_fail_superblock (journal_t *journal)
-{
-	struct buffer_head *bh = journal->j_sb_buffer;
-	brelse(bh);
-	journal->j_sb_buffer = NULL;
-}
-
-/*
- * Given a journal_t structure, initialise the various fields for
- * startup of a new journaling session.  We use this both when creating
- * a journal, and after recovering an old journal to reset it for
- * subsequent use.
- */
-
-static int journal_reset(journal_t *journal)
-{
-	journal_superblock_t *sb = journal->j_superblock;
-	unsigned int first, last;
-
-	first = be32_to_cpu(sb->s_first);
-	last = be32_to_cpu(sb->s_maxlen);
-	if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) {
-		printk(KERN_ERR "JBD: Journal too short (blocks %u-%u).\n",
-		       first, last);
-		journal_fail_superblock(journal);
-		return -EINVAL;
-	}
-
-	journal->j_first = first;
-	journal->j_last = last;
-
-	journal->j_head = first;
-	journal->j_tail = first;
-	journal->j_free = last - first;
-
-	journal->j_tail_sequence = journal->j_transaction_sequence;
-	journal->j_commit_sequence = journal->j_transaction_sequence - 1;
-	journal->j_commit_request = journal->j_commit_sequence;
-
-	journal->j_max_transaction_buffers = journal->j_maxlen / 4;
-
-	/*
-	 * As a special case, if the on-disk copy is already marked as needing
-	 * no recovery (s_start == 0), then we can safely defer the superblock
-	 * update until the next commit by setting JFS_FLUSHED.  This avoids
-	 * attempting a write to a potential-readonly device.
-	 */
-	if (sb->s_start == 0) {
-		jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
-			"(start %u, seq %d, errno %d)\n",
-			journal->j_tail, journal->j_tail_sequence,
-			journal->j_errno);
-		journal->j_flags |= JFS_FLUSHED;
-	} else {
-		/* Lock here to make assertions happy... */
-		mutex_lock(&journal->j_checkpoint_mutex);
-		/*
-		 * Update log tail information. We use WRITE_FUA since new
-		 * transaction will start reusing journal space and so we
-		 * must make sure information about current log tail is on
-		 * disk before that.
-		 */
-		journal_update_sb_log_tail(journal,
-					   journal->j_tail_sequence,
-					   journal->j_tail,
-					   WRITE_FUA);
-		mutex_unlock(&journal->j_checkpoint_mutex);
-	}
-	return journal_start_thread(journal);
-}
-
-/**
- * int journal_create() - Initialise the new journal file
- * @journal: Journal to create. This structure must have been initialised
- *
- * Given a journal_t structure which tells us which disk blocks we can
- * use, create a new journal superblock and initialise all of the
- * journal fields from scratch.
- **/
-int journal_create(journal_t *journal)
-{
-	unsigned int blocknr;
-	struct buffer_head *bh;
-	journal_superblock_t *sb;
-	int i, err;
-
-	if (journal->j_maxlen < JFS_MIN_JOURNAL_BLOCKS) {
-		printk (KERN_ERR "Journal length (%d blocks) too short.\n",
-			journal->j_maxlen);
-		journal_fail_superblock(journal);
-		return -EINVAL;
-	}
-
-	if (journal->j_inode == NULL) {
-		/*
-		 * We don't know what block to start at!
-		 */
-		printk(KERN_EMERG
-		       "%s: creation of journal on external device!\n",
-		       __func__);
-		BUG();
-	}
-
-	/* Zero out the entire journal on disk.  We cannot afford to
-	   have any blocks on disk beginning with JFS_MAGIC_NUMBER. */
-	jbd_debug(1, "JBD: Zeroing out journal blocks...\n");
-	for (i = 0; i < journal->j_maxlen; i++) {
-		err = journal_bmap(journal, i, &blocknr);
-		if (err)
-			return err;
-		bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-		if (unlikely(!bh))
-			return -ENOMEM;
-		lock_buffer(bh);
-		memset (bh->b_data, 0, journal->j_blocksize);
-		BUFFER_TRACE(bh, "marking dirty");
-		mark_buffer_dirty(bh);
-		BUFFER_TRACE(bh, "marking uptodate");
-		set_buffer_uptodate(bh);
-		unlock_buffer(bh);
-		__brelse(bh);
-	}
-
-	sync_blockdev(journal->j_dev);
-	jbd_debug(1, "JBD: journal cleared.\n");
-
-	/* OK, fill in the initial static fields in the new superblock */
-	sb = journal->j_superblock;
-
-	sb->s_header.h_magic	 = cpu_to_be32(JFS_MAGIC_NUMBER);
-	sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
-
-	sb->s_blocksize	= cpu_to_be32(journal->j_blocksize);
-	sb->s_maxlen	= cpu_to_be32(journal->j_maxlen);
-	sb->s_first	= cpu_to_be32(1);
-
-	journal->j_transaction_sequence = 1;
-
-	journal->j_flags &= ~JFS_ABORT;
-	journal->j_format_version = 2;
-
-	return journal_reset(journal);
-}
-
-static void journal_write_superblock(journal_t *journal, int write_op)
-{
-	struct buffer_head *bh = journal->j_sb_buffer;
-	int ret;
-
-	trace_journal_write_superblock(journal, write_op);
-	if (!(journal->j_flags & JFS_BARRIER))
-		write_op &= ~(REQ_FUA | REQ_FLUSH);
-	lock_buffer(bh);
-	if (buffer_write_io_error(bh)) {
-		char b[BDEVNAME_SIZE];
-		/*
-		 * Oh, dear.  A previous attempt to write the journal
-		 * superblock failed.  This could happen because the
-		 * USB device was yanked out.  Or it could happen to
-		 * be a transient write error and maybe the block will
-		 * be remapped.  Nothing we can do but to retry the
-		 * write and hope for the best.
-		 */
-		printk(KERN_ERR "JBD: previous I/O error detected "
-		       "for journal superblock update for %s.\n",
-		       journal_dev_name(journal, b));
-		clear_buffer_write_io_error(bh);
-		set_buffer_uptodate(bh);
-	}
-
-	get_bh(bh);
-	bh->b_end_io = end_buffer_write_sync;
-	ret = submit_bh(write_op, bh);
-	wait_on_buffer(bh);
-	if (buffer_write_io_error(bh)) {
-		clear_buffer_write_io_error(bh);
-		set_buffer_uptodate(bh);
-		ret = -EIO;
-	}
-	if (ret) {
-		char b[BDEVNAME_SIZE];
-		printk(KERN_ERR "JBD: Error %d detected "
-		       "when updating journal superblock for %s.\n",
-		       ret, journal_dev_name(journal, b));
-	}
-}
-
-/**
- * journal_update_sb_log_tail() - Update log tail in journal sb on disk.
- * @journal: The journal to update.
- * @tail_tid: TID of the new transaction at the tail of the log
- * @tail_block: The first block of the transaction at the tail of the log
- * @write_op: With which operation should we write the journal sb
- *
- * Update a journal's superblock information about log tail and write it to
- * disk, waiting for the IO to complete.
- */
-void journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
-				unsigned int tail_block, int write_op)
-{
-	journal_superblock_t *sb = journal->j_superblock;
-
-	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
-	jbd_debug(1,"JBD: updating superblock (start %u, seq %u)\n",
-		  tail_block, tail_tid);
-
-	sb->s_sequence = cpu_to_be32(tail_tid);
-	sb->s_start    = cpu_to_be32(tail_block);
-
-	journal_write_superblock(journal, write_op);
-
-	/* Log is no longer empty */
-	spin_lock(&journal->j_state_lock);
-	WARN_ON(!sb->s_sequence);
-	journal->j_flags &= ~JFS_FLUSHED;
-	spin_unlock(&journal->j_state_lock);
-}
-
-/**
- * mark_journal_empty() - Mark on disk journal as empty.
- * @journal: The journal to update.
- *
- * Update a journal's dynamic superblock fields to show that journal is empty.
- * Write updated superblock to disk waiting for IO to complete.
- */
-static void mark_journal_empty(journal_t *journal)
-{
-	journal_superblock_t *sb = journal->j_superblock;
-
-	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
-	spin_lock(&journal->j_state_lock);
-	/* Is it already empty? */
-	if (sb->s_start == 0) {
-		spin_unlock(&journal->j_state_lock);
-		return;
-	}
-	jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",
-        	  journal->j_tail_sequence);
-
-	sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
-	sb->s_start    = cpu_to_be32(0);
-	spin_unlock(&journal->j_state_lock);
-
-	journal_write_superblock(journal, WRITE_FUA);
-
-	spin_lock(&journal->j_state_lock);
-	/* Log is empty */
-	journal->j_flags |= JFS_FLUSHED;
-	spin_unlock(&journal->j_state_lock);
-}
-
-/**
- * journal_update_sb_errno() - Update error in the journal.
- * @journal: The journal to update.
- *
- * Update a journal's errno.  Write updated superblock to disk waiting for IO
- * to complete.
- */
-static void journal_update_sb_errno(journal_t *journal)
-{
-	journal_superblock_t *sb = journal->j_superblock;
-
-	spin_lock(&journal->j_state_lock);
-	jbd_debug(1, "JBD: updating superblock error (errno %d)\n",
-        	  journal->j_errno);
-	sb->s_errno = cpu_to_be32(journal->j_errno);
-	spin_unlock(&journal->j_state_lock);
-
-	journal_write_superblock(journal, WRITE_SYNC);
-}
-
-/*
- * Read the superblock for a given journal, performing initial
- * validation of the format.
- */
-
-static int journal_get_superblock(journal_t *journal)
-{
-	struct buffer_head *bh;
-	journal_superblock_t *sb;
-	int err = -EIO;
-
-	bh = journal->j_sb_buffer;
-
-	J_ASSERT(bh != NULL);
-	if (!buffer_uptodate(bh)) {
-		ll_rw_block(READ, 1, &bh);
-		wait_on_buffer(bh);
-		if (!buffer_uptodate(bh)) {
-			printk (KERN_ERR
-				"JBD: IO error reading journal superblock\n");
-			goto out;
-		}
-	}
-
-	sb = journal->j_superblock;
-
-	err = -EINVAL;
-
-	if (sb->s_header.h_magic != cpu_to_be32(JFS_MAGIC_NUMBER) ||
-	    sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) {
-		printk(KERN_WARNING "JBD: no valid journal superblock found\n");
-		goto out;
-	}
-
-	switch(be32_to_cpu(sb->s_header.h_blocktype)) {
-	case JFS_SUPERBLOCK_V1:
-		journal->j_format_version = 1;
-		break;
-	case JFS_SUPERBLOCK_V2:
-		journal->j_format_version = 2;
-		break;
-	default:
-		printk(KERN_WARNING "JBD: unrecognised superblock format ID\n");
-		goto out;
-	}
-
-	if (be32_to_cpu(sb->s_maxlen) < journal->j_maxlen)
-		journal->j_maxlen = be32_to_cpu(sb->s_maxlen);
-	else if (be32_to_cpu(sb->s_maxlen) > journal->j_maxlen) {
-		printk (KERN_WARNING "JBD: journal file too short\n");
-		goto out;
-	}
-
-	if (be32_to_cpu(sb->s_first) == 0 ||
-	    be32_to_cpu(sb->s_first) >= journal->j_maxlen) {
-		printk(KERN_WARNING
-			"JBD: Invalid start block of journal: %u\n",
-			be32_to_cpu(sb->s_first));
-		goto out;
-	}
-
-	return 0;
-
-out:
-	journal_fail_superblock(journal);
-	return err;
-}
-
-/*
- * Load the on-disk journal superblock and read the key fields into the
- * journal_t.
- */
-
-static int load_superblock(journal_t *journal)
-{
-	int err;
-	journal_superblock_t *sb;
-
-	err = journal_get_superblock(journal);
-	if (err)
-		return err;
-
-	sb = journal->j_superblock;
-
-	journal->j_tail_sequence = be32_to_cpu(sb->s_sequence);
-	journal->j_tail = be32_to_cpu(sb->s_start);
-	journal->j_first = be32_to_cpu(sb->s_first);
-	journal->j_last = be32_to_cpu(sb->s_maxlen);
-	journal->j_errno = be32_to_cpu(sb->s_errno);
-
-	return 0;
-}
-
-
-/**
- * int journal_load() - Read journal from disk.
- * @journal: Journal to act on.
- *
- * Given a journal_t structure which tells us which disk blocks contain
- * a journal, read the journal from disk to initialise the in-memory
- * structures.
- */
-int journal_load(journal_t *journal)
-{
-	int err;
-	journal_superblock_t *sb;
-
-	err = load_superblock(journal);
-	if (err)
-		return err;
-
-	sb = journal->j_superblock;
-	/* If this is a V2 superblock, then we have to check the
-	 * features flags on it. */
-
-	if (journal->j_format_version >= 2) {
-		if ((sb->s_feature_ro_compat &
-		     ~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
-		    (sb->s_feature_incompat &
-		     ~cpu_to_be32(JFS_KNOWN_INCOMPAT_FEATURES))) {
-			printk (KERN_WARNING
-				"JBD: Unrecognised features on journal\n");
-			return -EINVAL;
-		}
-	}
-
-	/* Let the recovery code check whether it needs to recover any
-	 * data from the journal. */
-	if (journal_recover(journal))
-		goto recovery_error;
-
-	/* OK, we've finished with the dynamic journal bits:
-	 * reinitialise the dynamic contents of the superblock in memory
-	 * and reset them on disk. */
-	if (journal_reset(journal))
-		goto recovery_error;
-
-	journal->j_flags &= ~JFS_ABORT;
-	journal->j_flags |= JFS_LOADED;
-	return 0;
-
-recovery_error:
-	printk (KERN_WARNING "JBD: recovery failed\n");
-	return -EIO;
-}
-
-/**
- * void journal_destroy() - Release a journal_t structure.
- * @journal: Journal to act on.
- *
- * Release a journal_t structure once it is no longer in use by the
- * journaled object.
- * Return <0 if we couldn't clean up the journal.
- */
-int journal_destroy(journal_t *journal)
-{
-	int err = 0;
-
-	
-	/* Wait for the commit thread to wake up and die. */
-	journal_kill_thread(journal);
-
-	/* Force a final log commit */
-	if (journal->j_running_transaction)
-		journal_commit_transaction(journal);
-
-	/* Force any old transactions to disk */
-
-	/* We cannot race with anybody but must keep assertions happy */
-	mutex_lock(&journal->j_checkpoint_mutex);
-	/* Totally anal locking here... */
-	spin_lock(&journal->j_list_lock);
-	while (journal->j_checkpoint_transactions != NULL) {
-		spin_unlock(&journal->j_list_lock);
-		log_do_checkpoint(journal);
-		spin_lock(&journal->j_list_lock);
-	}
-
-	J_ASSERT(journal->j_running_transaction == NULL);
-	J_ASSERT(journal->j_committing_transaction == NULL);
-	J_ASSERT(journal->j_checkpoint_transactions == NULL);
-	spin_unlock(&journal->j_list_lock);
-
-	if (journal->j_sb_buffer) {
-		if (!is_journal_aborted(journal)) {
-			journal->j_tail_sequence =
-				++journal->j_transaction_sequence;
-			mark_journal_empty(journal);
-		} else
-			err = -EIO;
-		brelse(journal->j_sb_buffer);
-	}
-	mutex_unlock(&journal->j_checkpoint_mutex);
-
-	iput(journal->j_inode);
-	if (journal->j_revoke)
-		journal_destroy_revoke(journal);
-	kfree(journal->j_wbuf);
-	kfree(journal);
-
-	return err;
-}
-
-
-/**
- *int journal_check_used_features () - Check if features specified are used.
- * @journal: Journal to check.
- * @compat: bitmask of compatible features
- * @ro: bitmask of features that force read-only mount
- * @incompat: bitmask of incompatible features
- *
- * Check whether the journal uses all of a given set of
- * features.  Return true (non-zero) if it does.
- **/
-
-int journal_check_used_features (journal_t *journal, unsigned long compat,
-				 unsigned long ro, unsigned long incompat)
-{
-	journal_superblock_t *sb;
-
-	if (!compat && !ro && !incompat)
-		return 1;
-	if (journal->j_format_version == 1)
-		return 0;
-
-	sb = journal->j_superblock;
-
-	if (((be32_to_cpu(sb->s_feature_compat) & compat) == compat) &&
-	    ((be32_to_cpu(sb->s_feature_ro_compat) & ro) == ro) &&
-	    ((be32_to_cpu(sb->s_feature_incompat) & incompat) == incompat))
-		return 1;
-
-	return 0;
-}
-
-/**
- * int journal_check_available_features() - Check feature set in journalling layer
- * @journal: Journal to check.
- * @compat: bitmask of compatible features
- * @ro: bitmask of features that force read-only mount
- * @incompat: bitmask of incompatible features
- *
- * Check whether the journaling code supports the use of
- * all of a given set of features on this journal.  Return true
- * (non-zero) if it can. */
-
-int journal_check_available_features (journal_t *journal, unsigned long compat,
-				      unsigned long ro, unsigned long incompat)
-{
-	if (!compat && !ro && !incompat)
-		return 1;
-
-	/* We can support any known requested features iff the
-	 * superblock is in version 2.  Otherwise we fail to support any
-	 * extended sb features. */
-
-	if (journal->j_format_version != 2)
-		return 0;
-
-	if ((compat   & JFS_KNOWN_COMPAT_FEATURES) == compat &&
-	    (ro       & JFS_KNOWN_ROCOMPAT_FEATURES) == ro &&
-	    (incompat & JFS_KNOWN_INCOMPAT_FEATURES) == incompat)
-		return 1;
-
-	return 0;
-}
-
-/**
- * int journal_set_features () - Mark a given journal feature in the superblock
- * @journal: Journal to act on.
- * @compat: bitmask of compatible features
- * @ro: bitmask of features that force read-only mount
- * @incompat: bitmask of incompatible features
- *
- * Mark a given journal feature as present on the
- * superblock.  Returns true if the requested features could be set.
- *
- */
-
-int journal_set_features (journal_t *journal, unsigned long compat,
-			  unsigned long ro, unsigned long incompat)
-{
-	journal_superblock_t *sb;
-
-	if (journal_check_used_features(journal, compat, ro, incompat))
-		return 1;
-
-	if (!journal_check_available_features(journal, compat, ro, incompat))
-		return 0;
-
-	jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
-		  compat, ro, incompat);
-
-	sb = journal->j_superblock;
-
-	sb->s_feature_compat    |= cpu_to_be32(compat);
-	sb->s_feature_ro_compat |= cpu_to_be32(ro);
-	sb->s_feature_incompat  |= cpu_to_be32(incompat);
-
-	return 1;
-}
-
-
-/**
- * int journal_update_format () - Update on-disk journal structure.
- * @journal: Journal to act on.
- *
- * Given an initialised but unloaded journal struct, poke about in the
- * on-disk structure to update it to the most recent supported version.
- */
-int journal_update_format (journal_t *journal)
-{
-	journal_superblock_t *sb;
-	int err;
-
-	err = journal_get_superblock(journal);
-	if (err)
-		return err;
-
-	sb = journal->j_superblock;
-
-	switch (be32_to_cpu(sb->s_header.h_blocktype)) {
-	case JFS_SUPERBLOCK_V2:
-		return 0;
-	case JFS_SUPERBLOCK_V1:
-		return journal_convert_superblock_v1(journal, sb);
-	default:
-		break;
-	}
-	return -EINVAL;
-}
-
-static int journal_convert_superblock_v1(journal_t *journal,
-					 journal_superblock_t *sb)
-{
-	int offset, blocksize;
-	struct buffer_head *bh;
-
-	printk(KERN_WARNING
-		"JBD: Converting superblock from version 1 to 2.\n");
-
-	/* Pre-initialise new fields to zero */
-	offset = ((char *) &(sb->s_feature_compat)) - ((char *) sb);
-	blocksize = be32_to_cpu(sb->s_blocksize);
-	memset(&sb->s_feature_compat, 0, blocksize-offset);
-
-	sb->s_nr_users = cpu_to_be32(1);
-	sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
-	journal->j_format_version = 2;
-
-	bh = journal->j_sb_buffer;
-	BUFFER_TRACE(bh, "marking dirty");
-	mark_buffer_dirty(bh);
-	sync_dirty_buffer(bh);
-	return 0;
-}
-
-
-/**
- * int journal_flush () - Flush journal
- * @journal: Journal to act on.
- *
- * Flush all data for a given journal to disk and empty the journal.
- * Filesystems can use this when remounting readonly to ensure that
- * recovery does not need to happen on remount.
- */
-
-int journal_flush(journal_t *journal)
-{
-	int err = 0;
-	transaction_t *transaction = NULL;
-
-	spin_lock(&journal->j_state_lock);
-
-	/* Force everything buffered to the log... */
-	if (journal->j_running_transaction) {
-		transaction = journal->j_running_transaction;
-		__log_start_commit(journal, transaction->t_tid);
-	} else if (journal->j_committing_transaction)
-		transaction = journal->j_committing_transaction;
-
-	/* Wait for the log commit to complete... */
-	if (transaction) {
-		tid_t tid = transaction->t_tid;
-
-		spin_unlock(&journal->j_state_lock);
-		log_wait_commit(journal, tid);
-	} else {
-		spin_unlock(&journal->j_state_lock);
-	}
-
-	/* ...and flush everything in the log out to disk. */
-	spin_lock(&journal->j_list_lock);
-	while (!err && journal->j_checkpoint_transactions != NULL) {
-		spin_unlock(&journal->j_list_lock);
-		mutex_lock(&journal->j_checkpoint_mutex);
-		err = log_do_checkpoint(journal);
-		mutex_unlock(&journal->j_checkpoint_mutex);
-		spin_lock(&journal->j_list_lock);
-	}
-	spin_unlock(&journal->j_list_lock);
-
-	if (is_journal_aborted(journal))
-		return -EIO;
-
-	mutex_lock(&journal->j_checkpoint_mutex);
-	cleanup_journal_tail(journal);
-
-	/* Finally, mark the journal as really needing no recovery.
-	 * This sets s_start==0 in the underlying superblock, which is
-	 * the magic code for a fully-recovered superblock.  Any future
-	 * commits of data to the journal will restore the current
-	 * s_start value. */
-	mark_journal_empty(journal);
-	mutex_unlock(&journal->j_checkpoint_mutex);
-	spin_lock(&journal->j_state_lock);
-	J_ASSERT(!journal->j_running_transaction);
-	J_ASSERT(!journal->j_committing_transaction);
-	J_ASSERT(!journal->j_checkpoint_transactions);
-	J_ASSERT(journal->j_head == journal->j_tail);
-	J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
-	spin_unlock(&journal->j_state_lock);
-	return 0;
-}
-
-/**
- * int journal_wipe() - Wipe journal contents
- * @journal: Journal to act on.
- * @write: flag (see below)
- *
- * Wipe out all of the contents of a journal, safely.  This will produce
- * a warning if the journal contains any valid recovery information.
- * Must be called between journal_init_*() and journal_load().
- *
- * If 'write' is non-zero, then we wipe out the journal on disk; otherwise
- * we merely suppress recovery.
- */
-
-int journal_wipe(journal_t *journal, int write)
-{
-	int err = 0;
-
-	J_ASSERT (!(journal->j_flags & JFS_LOADED));
-
-	err = load_superblock(journal);
-	if (err)
-		return err;
-
-	if (!journal->j_tail)
-		goto no_recovery;
-
-	printk (KERN_WARNING "JBD: %s recovery information on journal\n",
-		write ? "Clearing" : "Ignoring");
-
-	err = journal_skip_recovery(journal);
-	if (write) {
-		/* Lock to make assertions happy... */
-		mutex_lock(&journal->j_checkpoint_mutex);
-		mark_journal_empty(journal);
-		mutex_unlock(&journal->j_checkpoint_mutex);
-	}
-
- no_recovery:
-	return err;
-}
-
-/*
- * journal_dev_name: format a character string to describe on what
- * device this journal is present.
- */
-
-static const char *journal_dev_name(journal_t *journal, char *buffer)
-{
-	struct block_device *bdev;
-
-	if (journal->j_inode)
-		bdev = journal->j_inode->i_sb->s_bdev;
-	else
-		bdev = journal->j_dev;
-
-	return bdevname(bdev, buffer);
-}
-
-/*
- * Journal abort has very specific semantics, which we describe
- * for journal abort.
- *
- * Two internal function, which provide abort to te jbd layer
- * itself are here.
- */
-
-/*
- * Quick version for internal journal use (doesn't lock the journal).
- * Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
- * and don't attempt to make any other journal updates.
- */
-static void __journal_abort_hard(journal_t *journal)
-{
-	transaction_t *transaction;
-	char b[BDEVNAME_SIZE];
-
-	if (journal->j_flags & JFS_ABORT)
-		return;
-
-	printk(KERN_ERR "Aborting journal on device %s.\n",
-		journal_dev_name(journal, b));
-
-	spin_lock(&journal->j_state_lock);
-	journal->j_flags |= JFS_ABORT;
-	transaction = journal->j_running_transaction;
-	if (transaction)
-		__log_start_commit(journal, transaction->t_tid);
-	spin_unlock(&journal->j_state_lock);
-}
-
-/* Soft abort: record the abort error status in the journal superblock,
- * but don't do any other IO. */
-static void __journal_abort_soft (journal_t *journal, int errno)
-{
-	if (journal->j_flags & JFS_ABORT)
-		return;
-
-	if (!journal->j_errno)
-		journal->j_errno = errno;
-
-	__journal_abort_hard(journal);
-
-	if (errno)
-		journal_update_sb_errno(journal);
-}
-
-/**
- * void journal_abort () - Shutdown the journal immediately.
- * @journal: the journal to shutdown.
- * @errno:   an error number to record in the journal indicating
- *           the reason for the shutdown.
- *
- * Perform a complete, immediate shutdown of the ENTIRE
- * journal (not of a single transaction).  This operation cannot be
- * undone without closing and reopening the journal.
- *
- * The journal_abort function is intended to support higher level error
- * recovery mechanisms such as the ext2/ext3 remount-readonly error
- * mode.
- *
- * Journal abort has very specific semantics.  Any existing dirty,
- * unjournaled buffers in the main filesystem will still be written to
- * disk by bdflush, but the journaling mechanism will be suspended
- * immediately and no further transaction commits will be honoured.
- *
- * Any dirty, journaled buffers will be written back to disk without
- * hitting the journal.  Atomicity cannot be guaranteed on an aborted
- * filesystem, but we _do_ attempt to leave as much data as possible
- * behind for fsck to use for cleanup.
- *
- * Any attempt to get a new transaction handle on a journal which is in
- * ABORT state will just result in an -EROFS error return.  A
- * journal_stop on an existing handle will return -EIO if we have
- * entered abort state during the update.
- *
- * Recursive transactions are not disturbed by journal abort until the
- * final journal_stop, which will receive the -EIO error.
- *
- * Finally, the journal_abort call allows the caller to supply an errno
- * which will be recorded (if possible) in the journal superblock.  This
- * allows a client to record failure conditions in the middle of a
- * transaction without having to complete the transaction to record the
- * failure to disk.  ext3_error, for example, now uses this
- * functionality.
- *
- * Errors which originate from within the journaling layer will NOT
- * supply an errno; a null errno implies that absolutely no further
- * writes are done to the journal (unless there are any already in
- * progress).
- *
- */
-
-void journal_abort(journal_t *journal, int errno)
-{
-	__journal_abort_soft(journal, errno);
-}
-
-/**
- * int journal_errno () - returns the journal's error state.
- * @journal: journal to examine.
- *
- * This is the errno numbet set with journal_abort(), the last
- * time the journal was mounted - if the journal was stopped
- * without calling abort this will be 0.
- *
- * If the journal has been aborted on this mount time -EROFS will
- * be returned.
- */
-int journal_errno(journal_t *journal)
-{
-	int err;
-
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_flags & JFS_ABORT)
-		err = -EROFS;
-	else
-		err = journal->j_errno;
-	spin_unlock(&journal->j_state_lock);
-	return err;
-}
-
-/**
- * int journal_clear_err () - clears the journal's error state
- * @journal: journal to act on.
- *
- * An error must be cleared or Acked to take a FS out of readonly
- * mode.
- */
-int journal_clear_err(journal_t *journal)
-{
-	int err = 0;
-
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_flags & JFS_ABORT)
-		err = -EROFS;
-	else
-		journal->j_errno = 0;
-	spin_unlock(&journal->j_state_lock);
-	return err;
-}
-
-/**
- * void journal_ack_err() - Ack journal err.
- * @journal: journal to act on.
- *
- * An error must be cleared or Acked to take a FS out of readonly
- * mode.
- */
-void journal_ack_err(journal_t *journal)
-{
-	spin_lock(&journal->j_state_lock);
-	if (journal->j_errno)
-		journal->j_flags |= JFS_ACK_ERR;
-	spin_unlock(&journal->j_state_lock);
-}
-
-int journal_blocks_per_page(struct inode *inode)
-{
-	return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
-}
-
-/*
- * Journal_head storage management
- */
-static struct kmem_cache *journal_head_cache;
-#ifdef CONFIG_JBD_DEBUG
-static atomic_t nr_journal_heads = ATOMIC_INIT(0);
-#endif
-
-static int journal_init_journal_head_cache(void)
-{
-	int retval;
-
-	J_ASSERT(journal_head_cache == NULL);
-	journal_head_cache = kmem_cache_create("journal_head",
-				sizeof(struct journal_head),
-				0,		/* offset */
-				SLAB_TEMPORARY,	/* flags */
-				NULL);		/* ctor */
-	retval = 0;
-	if (!journal_head_cache) {
-		retval = -ENOMEM;
-		printk(KERN_EMERG "JBD: no memory for journal_head cache\n");
-	}
-	return retval;
-}
-
-static void journal_destroy_journal_head_cache(void)
-{
-	if (journal_head_cache) {
-		kmem_cache_destroy(journal_head_cache);
-		journal_head_cache = NULL;
-	}
-}
-
-/*
- * journal_head splicing and dicing
- */
-static struct journal_head *journal_alloc_journal_head(void)
-{
-	struct journal_head *ret;
-
-#ifdef CONFIG_JBD_DEBUG
-	atomic_inc(&nr_journal_heads);
-#endif
-	ret = kmem_cache_zalloc(journal_head_cache, GFP_NOFS);
-	if (ret == NULL) {
-		jbd_debug(1, "out of memory for journal_head\n");
-		printk_ratelimited(KERN_NOTICE "ENOMEM in %s, retrying.\n",
-				   __func__);
-
-		while (ret == NULL) {
-			yield();
-			ret = kmem_cache_zalloc(journal_head_cache, GFP_NOFS);
-		}
-	}
-	return ret;
-}
-
-static void journal_free_journal_head(struct journal_head *jh)
-{
-#ifdef CONFIG_JBD_DEBUG
-	atomic_dec(&nr_journal_heads);
-	memset(jh, JBD_POISON_FREE, sizeof(*jh));
-#endif
-	kmem_cache_free(journal_head_cache, jh);
-}
-
-/*
- * A journal_head is attached to a buffer_head whenever JBD has an
- * interest in the buffer.
- *
- * Whenever a buffer has an attached journal_head, its ->b_state:BH_JBD bit
- * is set.  This bit is tested in core kernel code where we need to take
- * JBD-specific actions.  Testing the zeroness of ->b_private is not reliable
- * there.
- *
- * When a buffer has its BH_JBD bit set, its ->b_count is elevated by one.
- *
- * When a buffer has its BH_JBD bit set it is immune from being released by
- * core kernel code, mainly via ->b_count.
- *
- * A journal_head is detached from its buffer_head when the journal_head's
- * b_jcount reaches zero. Running transaction (b_transaction) and checkpoint
- * transaction (b_cp_transaction) hold their references to b_jcount.
- *
- * Various places in the kernel want to attach a journal_head to a buffer_head
- * _before_ attaching the journal_head to a transaction.  To protect the
- * journal_head in this situation, journal_add_journal_head elevates the
- * journal_head's b_jcount refcount by one.  The caller must call
- * journal_put_journal_head() to undo this.
- *
- * So the typical usage would be:
- *
- *	(Attach a journal_head if needed.  Increments b_jcount)
- *	struct journal_head *jh = journal_add_journal_head(bh);
- *	...
- *      (Get another reference for transaction)
- *      journal_grab_journal_head(bh);
- *      jh->b_transaction = xxx;
- *      (Put original reference)
- *      journal_put_journal_head(jh);
- */
-
-/*
- * Give a buffer_head a journal_head.
- *
- * May sleep.
- */
-struct journal_head *journal_add_journal_head(struct buffer_head *bh)
-{
-	struct journal_head *jh;
-	struct journal_head *new_jh = NULL;
-
-repeat:
-	if (!buffer_jbd(bh))
-		new_jh = journal_alloc_journal_head();
-
-	jbd_lock_bh_journal_head(bh);
-	if (buffer_jbd(bh)) {
-		jh = bh2jh(bh);
-	} else {
-		J_ASSERT_BH(bh,
-			(atomic_read(&bh->b_count) > 0) ||
-			(bh->b_page && bh->b_page->mapping));
-
-		if (!new_jh) {
-			jbd_unlock_bh_journal_head(bh);
-			goto repeat;
-		}
-
-		jh = new_jh;
-		new_jh = NULL;		/* We consumed it */
-		set_buffer_jbd(bh);
-		bh->b_private = jh;
-		jh->b_bh = bh;
-		get_bh(bh);
-		BUFFER_TRACE(bh, "added journal_head");
-	}
-	jh->b_jcount++;
-	jbd_unlock_bh_journal_head(bh);
-	if (new_jh)
-		journal_free_journal_head(new_jh);
-	return bh->b_private;
-}
-
-/*
- * Grab a ref against this buffer_head's journal_head.  If it ended up not
- * having a journal_head, return NULL
- */
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh)
-{
-	struct journal_head *jh = NULL;
-
-	jbd_lock_bh_journal_head(bh);
-	if (buffer_jbd(bh)) {
-		jh = bh2jh(bh);
-		jh->b_jcount++;
-	}
-	jbd_unlock_bh_journal_head(bh);
-	return jh;
-}
-
-static void __journal_remove_journal_head(struct buffer_head *bh)
-{
-	struct journal_head *jh = bh2jh(bh);
-
-	J_ASSERT_JH(jh, jh->b_jcount >= 0);
-	J_ASSERT_JH(jh, jh->b_transaction == NULL);
-	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-	J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
-	J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
-	J_ASSERT_BH(bh, buffer_jbd(bh));
-	J_ASSERT_BH(bh, jh2bh(jh) == bh);
-	BUFFER_TRACE(bh, "remove journal_head");
-	if (jh->b_frozen_data) {
-		printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__);
-		jbd_free(jh->b_frozen_data, bh->b_size);
-	}
-	if (jh->b_committed_data) {
-		printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__);
-		jbd_free(jh->b_committed_data, bh->b_size);
-	}
-	bh->b_private = NULL;
-	jh->b_bh = NULL;	/* debug, really */
-	clear_buffer_jbd(bh);
-	journal_free_journal_head(jh);
-}
-
-/*
- * Drop a reference on the passed journal_head.  If it fell to zero then
- * release the journal_head from the buffer_head.
- */
-void journal_put_journal_head(struct journal_head *jh)
-{
-	struct buffer_head *bh = jh2bh(jh);
-
-	jbd_lock_bh_journal_head(bh);
-	J_ASSERT_JH(jh, jh->b_jcount > 0);
-	--jh->b_jcount;
-	if (!jh->b_jcount) {
-		__journal_remove_journal_head(bh);
-		jbd_unlock_bh_journal_head(bh);
-		__brelse(bh);
-	} else
-		jbd_unlock_bh_journal_head(bh);
-}
-
-/*
- * debugfs tunables
- */
-#ifdef CONFIG_JBD_DEBUG
-
-u8 journal_enable_debug __read_mostly;
-EXPORT_SYMBOL(journal_enable_debug);
-
-static struct dentry *jbd_debugfs_dir;
-static struct dentry *jbd_debug;
-
-static void __init jbd_create_debugfs_entry(void)
-{
-	jbd_debugfs_dir = debugfs_create_dir("jbd", NULL);
-	if (jbd_debugfs_dir)
-		jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO | S_IWUSR,
-					       jbd_debugfs_dir,
-					       &journal_enable_debug);
-}
-
-static void __exit jbd_remove_debugfs_entry(void)
-{
-	debugfs_remove(jbd_debug);
-	debugfs_remove(jbd_debugfs_dir);
-}
-
-#else
-
-static inline void jbd_create_debugfs_entry(void)
-{
-}
-
-static inline void jbd_remove_debugfs_entry(void)
-{
-}
-
-#endif
-
-struct kmem_cache *jbd_handle_cache;
-
-static int __init journal_init_handle_cache(void)
-{
-	jbd_handle_cache = kmem_cache_create("journal_handle",
-				sizeof(handle_t),
-				0,		/* offset */
-				SLAB_TEMPORARY,	/* flags */
-				NULL);		/* ctor */
-	if (jbd_handle_cache == NULL) {
-		printk(KERN_EMERG "JBD: failed to create handle cache\n");
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-static void journal_destroy_handle_cache(void)
-{
-	if (jbd_handle_cache)
-		kmem_cache_destroy(jbd_handle_cache);
-}
-
-/*
- * Module startup and shutdown
- */
-
-static int __init journal_init_caches(void)
-{
-	int ret;
-
-	ret = journal_init_revoke_caches();
-	if (ret == 0)
-		ret = journal_init_journal_head_cache();
-	if (ret == 0)
-		ret = journal_init_handle_cache();
-	return ret;
-}
-
-static void journal_destroy_caches(void)
-{
-	journal_destroy_revoke_caches();
-	journal_destroy_journal_head_cache();
-	journal_destroy_handle_cache();
-}
-
-static int __init journal_init(void)
-{
-	int ret;
-
-	BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
-
-	ret = journal_init_caches();
-	if (ret != 0)
-		journal_destroy_caches();
-	jbd_create_debugfs_entry();
-	return ret;
-}
-
-static void __exit journal_exit(void)
-{
-#ifdef CONFIG_JBD_DEBUG
-	int n = atomic_read(&nr_journal_heads);
-	if (n)
-		printk(KERN_ERR "JBD: leaked %d journal_heads!\n", n);
-#endif
-	jbd_remove_debugfs_entry();
-	journal_destroy_caches();
-}
-
-MODULE_LICENSE("GPL");
-module_init(journal_init);
-module_exit(journal_exit);
-
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
deleted file mode 100644
index a748fe2..0000000
--- a/fs/jbd/recovery.c
+++ /dev/null
@@ -1,594 +0,0 @@
-/*
- * linux/fs/jbd/recovery.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
- *
- * Copyright 1999-2000 Red Hat Software --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Journal recovery routines for the generic filesystem journaling code;
- * part of the ext2fs journaling system.
- */
-
-#ifndef __KERNEL__
-#include "jfs_user.h"
-#else
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/blkdev.h>
-#endif
-
-/*
- * Maintain information about the progress of the recovery job, so that
- * the different passes can carry information between them.
- */
-struct recovery_info
-{
-	tid_t		start_transaction;
-	tid_t		end_transaction;
-
-	int		nr_replays;
-	int		nr_revokes;
-	int		nr_revoke_hits;
-};
-
-enum passtype {PASS_SCAN, PASS_REVOKE, PASS_REPLAY};
-static int do_one_pass(journal_t *journal,
-				struct recovery_info *info, enum passtype pass);
-static int scan_revoke_records(journal_t *, struct buffer_head *,
-				tid_t, struct recovery_info *);
-
-#ifdef __KERNEL__
-
-/* Release readahead buffers after use */
-static void journal_brelse_array(struct buffer_head *b[], int n)
-{
-	while (--n >= 0)
-		brelse (b[n]);
-}
-
-
-/*
- * When reading from the journal, we are going through the block device
- * layer directly and so there is no readahead being done for us.  We
- * need to implement any readahead ourselves if we want it to happen at
- * all.  Recovery is basically one long sequential read, so make sure we
- * do the IO in reasonably large chunks.
- *
- * This is not so critical that we need to be enormously clever about
- * the readahead size, though.  128K is a purely arbitrary, good-enough
- * fixed value.
- */
-
-#define MAXBUF 8
-static int do_readahead(journal_t *journal, unsigned int start)
-{
-	int err;
-	unsigned int max, nbufs, next;
-	unsigned int blocknr;
-	struct buffer_head *bh;
-
-	struct buffer_head * bufs[MAXBUF];
-
-	/* Do up to 128K of readahead */
-	max = start + (128 * 1024 / journal->j_blocksize);
-	if (max > journal->j_maxlen)
-		max = journal->j_maxlen;
-
-	/* Do the readahead itself.  We'll submit MAXBUF buffer_heads at
-	 * a time to the block device IO layer. */
-
-	nbufs = 0;
-
-	for (next = start; next < max; next++) {
-		err = journal_bmap(journal, next, &blocknr);
-
-		if (err) {
-			printk (KERN_ERR "JBD: bad block at offset %u\n",
-				next);
-			goto failed;
-		}
-
-		bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-		if (!bh) {
-			err = -ENOMEM;
-			goto failed;
-		}
-
-		if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
-			bufs[nbufs++] = bh;
-			if (nbufs == MAXBUF) {
-				ll_rw_block(READ, nbufs, bufs);
-				journal_brelse_array(bufs, nbufs);
-				nbufs = 0;
-			}
-		} else
-			brelse(bh);
-	}
-
-	if (nbufs)
-		ll_rw_block(READ, nbufs, bufs);
-	err = 0;
-
-failed:
-	if (nbufs)
-		journal_brelse_array(bufs, nbufs);
-	return err;
-}
-
-#endif /* __KERNEL__ */
-
-
-/*
- * Read a block from the journal
- */
-
-static int jread(struct buffer_head **bhp, journal_t *journal,
-		 unsigned int offset)
-{
-	int err;
-	unsigned int blocknr;
-	struct buffer_head *bh;
-
-	*bhp = NULL;
-
-	if (offset >= journal->j_maxlen) {
-		printk(KERN_ERR "JBD: corrupted journal superblock\n");
-		return -EIO;
-	}
-
-	err = journal_bmap(journal, offset, &blocknr);
-
-	if (err) {
-		printk (KERN_ERR "JBD: bad block at offset %u\n",
-			offset);
-		return err;
-	}
-
-	bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
-	if (!bh)
-		return -ENOMEM;
-
-	if (!buffer_uptodate(bh)) {
-		/* If this is a brand new buffer, start readahead.
-                   Otherwise, we assume we are already reading it.  */
-		if (!buffer_req(bh))
-			do_readahead(journal, offset);
-		wait_on_buffer(bh);
-	}
-
-	if (!buffer_uptodate(bh)) {
-		printk (KERN_ERR "JBD: Failed to read block at offset %u\n",
-			offset);
-		brelse(bh);
-		return -EIO;
-	}
-
-	*bhp = bh;
-	return 0;
-}
-
-
-/*
- * Count the number of in-use tags in a journal descriptor block.
- */
-
-static int count_tags(struct buffer_head *bh, int size)
-{
-	char *			tagp;
-	journal_block_tag_t *	tag;
-	int			nr = 0;
-
-	tagp = &bh->b_data[sizeof(journal_header_t)];
-
-	while ((tagp - bh->b_data + sizeof(journal_block_tag_t)) <= size) {
-		tag = (journal_block_tag_t *) tagp;
-
-		nr++;
-		tagp += sizeof(journal_block_tag_t);
-		if (!(tag->t_flags & cpu_to_be32(JFS_FLAG_SAME_UUID)))
-			tagp += 16;
-
-		if (tag->t_flags & cpu_to_be32(JFS_FLAG_LAST_TAG))
-			break;
-	}
-
-	return nr;
-}
-
-
-/* Make sure we wrap around the log correctly! */
-#define wrap(journal, var)						\
-do {									\
-	if (var >= (journal)->j_last)					\
-		var -= ((journal)->j_last - (journal)->j_first);	\
-} while (0)
-
-/**
- * journal_recover - recovers a on-disk journal
- * @journal: the journal to recover
- *
- * The primary function for recovering the log contents when mounting a
- * journaled device.
- *
- * Recovery is done in three passes.  In the first pass, we look for the
- * end of the log.  In the second, we assemble the list of revoke
- * blocks.  In the third and final pass, we replay any un-revoked blocks
- * in the log.
- */
-int journal_recover(journal_t *journal)
-{
-	int			err, err2;
-	journal_superblock_t *	sb;
-
-	struct recovery_info	info;
-
-	memset(&info, 0, sizeof(info));
-	sb = journal->j_superblock;
-
-	/*
-	 * The journal superblock's s_start field (the current log head)
-	 * is always zero if, and only if, the journal was cleanly
-	 * unmounted.
-	 */
-
-	if (!sb->s_start) {
-		jbd_debug(1, "No recovery required, last transaction %d\n",
-			  be32_to_cpu(sb->s_sequence));
-		journal->j_transaction_sequence = be32_to_cpu(sb->s_sequence) + 1;
-		return 0;
-	}
-
-	err = do_one_pass(journal, &info, PASS_SCAN);
-	if (!err)
-		err = do_one_pass(journal, &info, PASS_REVOKE);
-	if (!err)
-		err = do_one_pass(journal, &info, PASS_REPLAY);
-
-	jbd_debug(1, "JBD: recovery, exit status %d, "
-		  "recovered transactions %u to %u\n",
-		  err, info.start_transaction, info.end_transaction);
-	jbd_debug(1, "JBD: Replayed %d and revoked %d/%d blocks\n",
-		  info.nr_replays, info.nr_revoke_hits, info.nr_revokes);
-
-	/* Restart the log at the next transaction ID, thus invalidating
-	 * any existing commit records in the log. */
-	journal->j_transaction_sequence = ++info.end_transaction;
-
-	journal_clear_revoke(journal);
-	err2 = sync_blockdev(journal->j_fs_dev);
-	if (!err)
-		err = err2;
-	/* Flush disk caches to get replayed data on the permanent storage */
-	if (journal->j_flags & JFS_BARRIER) {
-		err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
-		if (!err)
-			err = err2;
-	}
-
-	return err;
-}
-
-/**
- * journal_skip_recovery - Start journal and wipe exiting records
- * @journal: journal to startup
- *
- * Locate any valid recovery information from the journal and set up the
- * journal structures in memory to ignore it (presumably because the
- * caller has evidence that it is out of date).
- * This function does'nt appear to be exorted..
- *
- * We perform one pass over the journal to allow us to tell the user how
- * much recovery information is being erased, and to let us initialise
- * the journal transaction sequence numbers to the next unused ID.
- */
-int journal_skip_recovery(journal_t *journal)
-{
-	int			err;
-	struct recovery_info	info;
-
-	memset (&info, 0, sizeof(info));
-
-	err = do_one_pass(journal, &info, PASS_SCAN);
-
-	if (err) {
-		printk(KERN_ERR "JBD: error %d scanning journal\n", err);
-		++journal->j_transaction_sequence;
-	} else {
-#ifdef CONFIG_JBD_DEBUG
-		int dropped = info.end_transaction -
-			      be32_to_cpu(journal->j_superblock->s_sequence);
-		jbd_debug(1,
-			  "JBD: ignoring %d transaction%s from the journal.\n",
-			  dropped, (dropped == 1) ? "" : "s");
-#endif
-		journal->j_transaction_sequence = ++info.end_transaction;
-	}
-
-	journal->j_tail = 0;
-	return err;
-}
-
-static int do_one_pass(journal_t *journal,
-			struct recovery_info *info, enum passtype pass)
-{
-	unsigned int		first_commit_ID, next_commit_ID;
-	unsigned int		next_log_block;
-	int			err, success = 0;
-	journal_superblock_t *	sb;
-	journal_header_t *	tmp;
-	struct buffer_head *	bh;
-	unsigned int		sequence;
-	int			blocktype;
-
-	/*
-	 * First thing is to establish what we expect to find in the log
-	 * (in terms of transaction IDs), and where (in terms of log
-	 * block offsets): query the superblock.
-	 */
-
-	sb = journal->j_superblock;
-	next_commit_ID = be32_to_cpu(sb->s_sequence);
-	next_log_block = be32_to_cpu(sb->s_start);
-
-	first_commit_ID = next_commit_ID;
-	if (pass == PASS_SCAN)
-		info->start_transaction = first_commit_ID;
-
-	jbd_debug(1, "Starting recovery pass %d\n", pass);
-
-	/*
-	 * Now we walk through the log, transaction by transaction,
-	 * making sure that each transaction has a commit block in the
-	 * expected place.  Each complete transaction gets replayed back
-	 * into the main filesystem.
-	 */
-
-	while (1) {
-		int			flags;
-		char *			tagp;
-		journal_block_tag_t *	tag;
-		struct buffer_head *	obh;
-		struct buffer_head *	nbh;
-
-		cond_resched();
-
-		/* If we already know where to stop the log traversal,
-		 * check right now that we haven't gone past the end of
-		 * the log. */
-
-		if (pass != PASS_SCAN)
-			if (tid_geq(next_commit_ID, info->end_transaction))
-				break;
-
-		jbd_debug(2, "Scanning for sequence ID %u at %u/%u\n",
-			  next_commit_ID, next_log_block, journal->j_last);
-
-		/* Skip over each chunk of the transaction looking
-		 * either the next descriptor block or the final commit
-		 * record. */
-
-		jbd_debug(3, "JBD: checking block %u\n", next_log_block);
-		err = jread(&bh, journal, next_log_block);
-		if (err)
-			goto failed;
-
-		next_log_block++;
-		wrap(journal, next_log_block);
-
-		/* What kind of buffer is it?
-		 *
-		 * If it is a descriptor block, check that it has the
-		 * expected sequence number.  Otherwise, we're all done
-		 * here. */
-
-		tmp = (journal_header_t *)bh->b_data;
-
-		if (tmp->h_magic != cpu_to_be32(JFS_MAGIC_NUMBER)) {
-			brelse(bh);
-			break;
-		}
-
-		blocktype = be32_to_cpu(tmp->h_blocktype);
-		sequence = be32_to_cpu(tmp->h_sequence);
-		jbd_debug(3, "Found magic %d, sequence %d\n",
-			  blocktype, sequence);
-
-		if (sequence != next_commit_ID) {
-			brelse(bh);
-			break;
-		}
-
-		/* OK, we have a valid descriptor block which matches
-		 * all of the sequence number checks.  What are we going
-		 * to do with it?  That depends on the pass... */
-
-		switch(blocktype) {
-		case JFS_DESCRIPTOR_BLOCK:
-			/* If it is a valid descriptor block, replay it
-			 * in pass REPLAY; otherwise, just skip over the
-			 * blocks it describes. */
-			if (pass != PASS_REPLAY) {
-				next_log_block +=
-					count_tags(bh, journal->j_blocksize);
-				wrap(journal, next_log_block);
-				brelse(bh);
-				continue;
-			}
-
-			/* A descriptor block: we can now write all of
-			 * the data blocks.  Yay, useful work is finally
-			 * getting done here! */
-
-			tagp = &bh->b_data[sizeof(journal_header_t)];
-			while ((tagp - bh->b_data +sizeof(journal_block_tag_t))
-			       <= journal->j_blocksize) {
-				unsigned int io_block;
-
-				tag = (journal_block_tag_t *) tagp;
-				flags = be32_to_cpu(tag->t_flags);
-
-				io_block = next_log_block++;
-				wrap(journal, next_log_block);
-				err = jread(&obh, journal, io_block);
-				if (err) {
-					/* Recover what we can, but
-					 * report failure at the end. */
-					success = err;
-					printk (KERN_ERR
-						"JBD: IO error %d recovering "
-						"block %u in log\n",
-						err, io_block);
-				} else {
-					unsigned int blocknr;
-
-					J_ASSERT(obh != NULL);
-					blocknr = be32_to_cpu(tag->t_blocknr);
-
-					/* If the block has been
-					 * revoked, then we're all done
-					 * here. */
-					if (journal_test_revoke
-					    (journal, blocknr,
-					     next_commit_ID)) {
-						brelse(obh);
-						++info->nr_revoke_hits;
-						goto skip_write;
-					}
-
-					/* Find a buffer for the new
-					 * data being restored */
-					nbh = __getblk(journal->j_fs_dev,
-							blocknr,
-							journal->j_blocksize);
-					if (nbh == NULL) {
-						printk(KERN_ERR
-						       "JBD: Out of memory "
-						       "during recovery.\n");
-						err = -ENOMEM;
-						brelse(bh);
-						brelse(obh);
-						goto failed;
-					}
-
-					lock_buffer(nbh);
-					memcpy(nbh->b_data, obh->b_data,
-							journal->j_blocksize);
-					if (flags & JFS_FLAG_ESCAPE) {
-						*((__be32 *)nbh->b_data) =
-						cpu_to_be32(JFS_MAGIC_NUMBER);
-					}
-
-					BUFFER_TRACE(nbh, "marking dirty");
-					set_buffer_uptodate(nbh);
-					mark_buffer_dirty(nbh);
-					BUFFER_TRACE(nbh, "marking uptodate");
-					++info->nr_replays;
-					/* ll_rw_block(WRITE, 1, &nbh); */
-					unlock_buffer(nbh);
-					brelse(obh);
-					brelse(nbh);
-				}
-
-			skip_write:
-				tagp += sizeof(journal_block_tag_t);
-				if (!(flags & JFS_FLAG_SAME_UUID))
-					tagp += 16;
-
-				if (flags & JFS_FLAG_LAST_TAG)
-					break;
-			}
-
-			brelse(bh);
-			continue;
-
-		case JFS_COMMIT_BLOCK:
-			/* Found an expected commit block: not much to
-			 * do other than move on to the next sequence
-			 * number. */
-			brelse(bh);
-			next_commit_ID++;
-			continue;
-
-		case JFS_REVOKE_BLOCK:
-			/* If we aren't in the REVOKE pass, then we can
-			 * just skip over this block. */
-			if (pass != PASS_REVOKE) {
-				brelse(bh);
-				continue;
-			}
-
-			err = scan_revoke_records(journal, bh,
-						  next_commit_ID, info);
-			brelse(bh);
-			if (err)
-				goto failed;
-			continue;
-
-		default:
-			jbd_debug(3, "Unrecognised magic %d, end of scan.\n",
-				  blocktype);
-			brelse(bh);
-			goto done;
-		}
-	}
-
- done:
-	/*
-	 * We broke out of the log scan loop: either we came to the
-	 * known end of the log or we found an unexpected block in the
-	 * log.  If the latter happened, then we know that the "current"
-	 * transaction marks the end of the valid log.
-	 */
-
-	if (pass == PASS_SCAN)
-		info->end_transaction = next_commit_ID;
-	else {
-		/* It's really bad news if different passes end up at
-		 * different places (but possible due to IO errors). */
-		if (info->end_transaction != next_commit_ID) {
-			printk (KERN_ERR "JBD: recovery pass %d ended at "
-				"transaction %u, expected %u\n",
-				pass, next_commit_ID, info->end_transaction);
-			if (!success)
-				success = -EIO;
-		}
-	}
-
-	return success;
-
- failed:
-	return err;
-}
-
-
-/* Scan a revoke record, marking all blocks mentioned as revoked. */
-
-static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
-			       tid_t sequence, struct recovery_info *info)
-{
-	journal_revoke_header_t *header;
-	int offset, max;
-
-	header = (journal_revoke_header_t *) bh->b_data;
-	offset = sizeof(journal_revoke_header_t);
-	max = be32_to_cpu(header->r_count);
-
-	while (offset < max) {
-		unsigned int blocknr;
-		int err;
-
-		blocknr = be32_to_cpu(* ((__be32 *) (bh->b_data+offset)));
-		offset += 4;
-		err = journal_set_revoke(journal, blocknr, sequence);
-		if (err)
-			return err;
-		++info->nr_revokes;
-	}
-	return 0;
-}
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
deleted file mode 100644
index dcead63..0000000
--- a/fs/jbd/revoke.c
+++ /dev/null
@@ -1,733 +0,0 @@
-/*
- * linux/fs/jbd/revoke.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 2000
- *
- * Copyright 2000 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Journal revoke routines for the generic filesystem journaling code;
- * part of the ext2fs journaling system.
- *
- * Revoke is the mechanism used to prevent old log records for deleted
- * metadata from being replayed on top of newer data using the same
- * blocks.  The revoke mechanism is used in two separate places:
- *
- * + Commit: during commit we write the entire list of the current
- *   transaction's revoked blocks to the journal
- *
- * + Recovery: during recovery we record the transaction ID of all
- *   revoked blocks.  If there are multiple revoke records in the log
- *   for a single block, only the last one counts, and if there is a log
- *   entry for a block beyond the last revoke, then that log entry still
- *   gets replayed.
- *
- * We can get interactions between revokes and new log data within a
- * single transaction:
- *
- * Block is revoked and then journaled:
- *   The desired end result is the journaling of the new block, so we
- *   cancel the revoke before the transaction commits.
- *
- * Block is journaled and then revoked:
- *   The revoke must take precedence over the write of the block, so we
- *   need either to cancel the journal entry or to write the revoke
- *   later in the log than the log block.  In this case, we choose the
- *   latter: journaling a block cancels any revoke record for that block
- *   in the current transaction, so any revoke for that block in the
- *   transaction must have happened after the block was journaled and so
- *   the revoke must take precedence.
- *
- * Block is revoked and then written as data:
- *   The data write is allowed to succeed, but the revoke is _not_
- *   cancelled.  We still need to prevent old log records from
- *   overwriting the new data.  We don't even need to clear the revoke
- *   bit here.
- *
- * We cache revoke status of a buffer in the current transaction in b_states
- * bits.  As the name says, revokevalid flag indicates that the cached revoke
- * status of a buffer is valid and we can rely on the cached status.
- *
- * Revoke information on buffers is a tri-state value:
- *
- * RevokeValid clear:	no cached revoke status, need to look it up
- * RevokeValid set, Revoked clear:
- *			buffer has not been revoked, and cancel_revoke
- *			need do nothing.
- * RevokeValid set, Revoked set:
- *			buffer has been revoked.
- *
- * Locking rules:
- * We keep two hash tables of revoke records. One hashtable belongs to the
- * running transaction (is pointed to by journal->j_revoke), the other one
- * belongs to the committing transaction. Accesses to the second hash table
- * happen only from the kjournald and no other thread touches this table.  Also
- * journal_switch_revoke_table() which switches which hashtable belongs to the
- * running and which to the committing transaction is called only from
- * kjournald. Therefore we need no locks when accessing the hashtable belonging
- * to the committing transaction.
- *
- * All users operating on the hash table belonging to the running transaction
- * have a handle to the transaction. Therefore they are safe from kjournald
- * switching hash tables under them. For operations on the lists of entries in
- * the hash table j_revoke_lock is used.
- *
- * Finally, also replay code uses the hash tables but at this moment no one else
- * can touch them (filesystem isn't mounted yet) and hence no locking is
- * needed.
- */
-
-#ifndef __KERNEL__
-#include "jfs_user.h"
-#else
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/init.h>
-#include <linux/bio.h>
-#endif
-#include <linux/log2.h>
-#include <linux/hash.h>
-
-static struct kmem_cache *revoke_record_cache;
-static struct kmem_cache *revoke_table_cache;
-
-/* Each revoke record represents one single revoked block.  During
-   journal replay, this involves recording the transaction ID of the
-   last transaction to revoke this block. */
-
-struct jbd_revoke_record_s
-{
-	struct list_head  hash;
-	tid_t		  sequence;	/* Used for recovery only */
-	unsigned int	  blocknr;
-};
-
-
-/* The revoke table is just a simple hash table of revoke records. */
-struct jbd_revoke_table_s
-{
-	/* It is conceivable that we might want a larger hash table
-	 * for recovery.  Must be a power of two. */
-	int		  hash_size;
-	int		  hash_shift;
-	struct list_head *hash_table;
-};
-
-
-#ifdef __KERNEL__
-static void write_one_revoke_record(journal_t *, transaction_t *,
-				    struct journal_head **, int *,
-				    struct jbd_revoke_record_s *, int);
-static void flush_descriptor(journal_t *, struct journal_head *, int, int);
-#endif
-
-/* Utility functions to maintain the revoke table */
-
-static inline int hash(journal_t *journal, unsigned int block)
-{
-	struct jbd_revoke_table_s *table = journal->j_revoke;
-
-	return hash_32(block, table->hash_shift);
-}
-
-static int insert_revoke_hash(journal_t *journal, unsigned int blocknr,
-			      tid_t seq)
-{
-	struct list_head *hash_list;
-	struct jbd_revoke_record_s *record;
-
-repeat:
-	record = kmem_cache_alloc(revoke_record_cache, GFP_NOFS);
-	if (!record)
-		goto oom;
-
-	record->sequence = seq;
-	record->blocknr = blocknr;
-	hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
-	spin_lock(&journal->j_revoke_lock);
-	list_add(&record->hash, hash_list);
-	spin_unlock(&journal->j_revoke_lock);
-	return 0;
-
-oom:
-	if (!journal_oom_retry)
-		return -ENOMEM;
-	jbd_debug(1, "ENOMEM in %s, retrying\n", __func__);
-	yield();
-	goto repeat;
-}
-
-/* Find a revoke record in the journal's hash table. */
-
-static struct jbd_revoke_record_s *find_revoke_record(journal_t *journal,
-						      unsigned int blocknr)
-{
-	struct list_head *hash_list;
-	struct jbd_revoke_record_s *record;
-
-	hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
-
-	spin_lock(&journal->j_revoke_lock);
-	record = (struct jbd_revoke_record_s *) hash_list->next;
-	while (&(record->hash) != hash_list) {
-		if (record->blocknr == blocknr) {
-			spin_unlock(&journal->j_revoke_lock);
-			return record;
-		}
-		record = (struct jbd_revoke_record_s *) record->hash.next;
-	}
-	spin_unlock(&journal->j_revoke_lock);
-	return NULL;
-}
-
-void journal_destroy_revoke_caches(void)
-{
-	if (revoke_record_cache) {
-		kmem_cache_destroy(revoke_record_cache);
-		revoke_record_cache = NULL;
-	}
-	if (revoke_table_cache) {
-		kmem_cache_destroy(revoke_table_cache);
-		revoke_table_cache = NULL;
-	}
-}
-
-int __init journal_init_revoke_caches(void)
-{
-	J_ASSERT(!revoke_record_cache);
-	J_ASSERT(!revoke_table_cache);
-
-	revoke_record_cache = kmem_cache_create("revoke_record",
-					   sizeof(struct jbd_revoke_record_s),
-					   0,
-					   SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
-					   NULL);
-	if (!revoke_record_cache)
-		goto record_cache_failure;
-
-	revoke_table_cache = kmem_cache_create("revoke_table",
-					   sizeof(struct jbd_revoke_table_s),
-					   0, SLAB_TEMPORARY, NULL);
-	if (!revoke_table_cache)
-		goto table_cache_failure;
-
-	return 0;
-
-table_cache_failure:
-	journal_destroy_revoke_caches();
-record_cache_failure:
-	return -ENOMEM;
-}
-
-static struct jbd_revoke_table_s *journal_init_revoke_table(int hash_size)
-{
-	int i;
-	struct jbd_revoke_table_s *table;
-
-	table = kmem_cache_alloc(revoke_table_cache, GFP_KERNEL);
-	if (!table)
-		goto out;
-
-	table->hash_size = hash_size;
-	table->hash_shift = ilog2(hash_size);
-	table->hash_table =
-		kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
-	if (!table->hash_table) {
-		kmem_cache_free(revoke_table_cache, table);
-		table = NULL;
-		goto out;
-	}
-
-	for (i = 0; i < hash_size; i++)
-		INIT_LIST_HEAD(&table->hash_table[i]);
-
-out:
-	return table;
-}
-
-static void journal_destroy_revoke_table(struct jbd_revoke_table_s *table)
-{
-	int i;
-	struct list_head *hash_list;
-
-	for (i = 0; i < table->hash_size; i++) {
-		hash_list = &table->hash_table[i];
-		J_ASSERT(list_empty(hash_list));
-	}
-
-	kfree(table->hash_table);
-	kmem_cache_free(revoke_table_cache, table);
-}
-
-/* Initialise the revoke table for a given journal to a given size. */
-int journal_init_revoke(journal_t *journal, int hash_size)
-{
-	J_ASSERT(journal->j_revoke_table[0] == NULL);
-	J_ASSERT(is_power_of_2(hash_size));
-
-	journal->j_revoke_table[0] = journal_init_revoke_table(hash_size);
-	if (!journal->j_revoke_table[0])
-		goto fail0;
-
-	journal->j_revoke_table[1] = journal_init_revoke_table(hash_size);
-	if (!journal->j_revoke_table[1])
-		goto fail1;
-
-	journal->j_revoke = journal->j_revoke_table[1];
-
-	spin_lock_init(&journal->j_revoke_lock);
-
-	return 0;
-
-fail1:
-	journal_destroy_revoke_table(journal->j_revoke_table[0]);
-fail0:
-	return -ENOMEM;
-}
-
-/* Destroy a journal's revoke table.  The table must already be empty! */
-void journal_destroy_revoke(journal_t *journal)
-{
-	journal->j_revoke = NULL;
-	if (journal->j_revoke_table[0])
-		journal_destroy_revoke_table(journal->j_revoke_table[0]);
-	if (journal->j_revoke_table[1])
-		journal_destroy_revoke_table(journal->j_revoke_table[1]);
-}
-
-
-#ifdef __KERNEL__
-
-/*
- * journal_revoke: revoke a given buffer_head from the journal.  This
- * prevents the block from being replayed during recovery if we take a
- * crash after this current transaction commits.  Any subsequent
- * metadata writes of the buffer in this transaction cancel the
- * revoke.
- *
- * Note that this call may block --- it is up to the caller to make
- * sure that there are no further calls to journal_write_metadata
- * before the revoke is complete.  In ext3, this implies calling the
- * revoke before clearing the block bitmap when we are deleting
- * metadata.
- *
- * Revoke performs a journal_forget on any buffer_head passed in as a
- * parameter, but does _not_ forget the buffer_head if the bh was only
- * found implicitly.
- *
- * bh_in may not be a journalled buffer - it may have come off
- * the hash tables without an attached journal_head.
- *
- * If bh_in is non-zero, journal_revoke() will decrement its b_count
- * by one.
- */
-
-int journal_revoke(handle_t *handle, unsigned int blocknr,
-		   struct buffer_head *bh_in)
-{
-	struct buffer_head *bh = NULL;
-	journal_t *journal;
-	struct block_device *bdev;
-	int err;
-
-	might_sleep();
-	if (bh_in)
-		BUFFER_TRACE(bh_in, "enter");
-
-	journal = handle->h_transaction->t_journal;
-	if (!journal_set_features(journal, 0, 0, JFS_FEATURE_INCOMPAT_REVOKE)){
-		J_ASSERT (!"Cannot set revoke feature!");
-		return -EINVAL;
-	}
-
-	bdev = journal->j_fs_dev;
-	bh = bh_in;
-
-	if (!bh) {
-		bh = __find_get_block(bdev, blocknr, journal->j_blocksize);
-		if (bh)
-			BUFFER_TRACE(bh, "found on hash");
-	}
-#ifdef JBD_EXPENSIVE_CHECKING
-	else {
-		struct buffer_head *bh2;
-
-		/* If there is a different buffer_head lying around in
-		 * memory anywhere... */
-		bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize);
-		if (bh2) {
-			/* ... and it has RevokeValid status... */
-			if (bh2 != bh && buffer_revokevalid(bh2))
-				/* ...then it better be revoked too,
-				 * since it's illegal to create a revoke
-				 * record against a buffer_head which is
-				 * not marked revoked --- that would
-				 * risk missing a subsequent revoke
-				 * cancel. */
-				J_ASSERT_BH(bh2, buffer_revoked(bh2));
-			put_bh(bh2);
-		}
-	}
-#endif
-
-	/* We really ought not ever to revoke twice in a row without
-           first having the revoke cancelled: it's illegal to free a
-           block twice without allocating it in between! */
-	if (bh) {
-		if (!J_EXPECT_BH(bh, !buffer_revoked(bh),
-				 "inconsistent data on disk")) {
-			if (!bh_in)
-				brelse(bh);
-			return -EIO;
-		}
-		set_buffer_revoked(bh);
-		set_buffer_revokevalid(bh);
-		if (bh_in) {
-			BUFFER_TRACE(bh_in, "call journal_forget");
-			journal_forget(handle, bh_in);
-		} else {
-			BUFFER_TRACE(bh, "call brelse");
-			__brelse(bh);
-		}
-	}
-
-	jbd_debug(2, "insert revoke for block %u, bh_in=%p\n", blocknr, bh_in);
-	err = insert_revoke_hash(journal, blocknr,
-				handle->h_transaction->t_tid);
-	BUFFER_TRACE(bh_in, "exit");
-	return err;
-}
-
-/*
- * Cancel an outstanding revoke.  For use only internally by the
- * journaling code (called from journal_get_write_access).
- *
- * We trust buffer_revoked() on the buffer if the buffer is already
- * being journaled: if there is no revoke pending on the buffer, then we
- * don't do anything here.
- *
- * This would break if it were possible for a buffer to be revoked and
- * discarded, and then reallocated within the same transaction.  In such
- * a case we would have lost the revoked bit, but when we arrived here
- * the second time we would still have a pending revoke to cancel.  So,
- * do not trust the Revoked bit on buffers unless RevokeValid is also
- * set.
- */
-int journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
-{
-	struct jbd_revoke_record_s *record;
-	journal_t *journal = handle->h_transaction->t_journal;
-	int need_cancel;
-	int did_revoke = 0;	/* akpm: debug */
-	struct buffer_head *bh = jh2bh(jh);
-
-	jbd_debug(4, "journal_head %p, cancelling revoke\n", jh);
-
-	/* Is the existing Revoke bit valid?  If so, we trust it, and
-	 * only perform the full cancel if the revoke bit is set.  If
-	 * not, we can't trust the revoke bit, and we need to do the
-	 * full search for a revoke record. */
-	if (test_set_buffer_revokevalid(bh)) {
-		need_cancel = test_clear_buffer_revoked(bh);
-	} else {
-		need_cancel = 1;
-		clear_buffer_revoked(bh);
-	}
-
-	if (need_cancel) {
-		record = find_revoke_record(journal, bh->b_blocknr);
-		if (record) {
-			jbd_debug(4, "cancelled existing revoke on "
-				  "blocknr %llu\n", (unsigned long long)bh->b_blocknr);
-			spin_lock(&journal->j_revoke_lock);
-			list_del(&record->hash);
-			spin_unlock(&journal->j_revoke_lock);
-			kmem_cache_free(revoke_record_cache, record);
-			did_revoke = 1;
-		}
-	}
-
-#ifdef JBD_EXPENSIVE_CHECKING
-	/* There better not be one left behind by now! */
-	record = find_revoke_record(journal, bh->b_blocknr);
-	J_ASSERT_JH(jh, record == NULL);
-#endif
-
-	/* Finally, have we just cleared revoke on an unhashed
-	 * buffer_head?  If so, we'd better make sure we clear the
-	 * revoked status on any hashed alias too, otherwise the revoke
-	 * state machine will get very upset later on. */
-	if (need_cancel) {
-		struct buffer_head *bh2;
-		bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size);
-		if (bh2) {
-			if (bh2 != bh)
-				clear_buffer_revoked(bh2);
-			__brelse(bh2);
-		}
-	}
-	return did_revoke;
-}
-
-/*
- * journal_clear_revoked_flags clears revoked flag of buffers in
- * revoke table to reflect there is no revoked buffer in the next
- * transaction which is going to be started.
- */
-void journal_clear_buffer_revoked_flags(journal_t *journal)
-{
-	struct jbd_revoke_table_s *revoke = journal->j_revoke;
-	int i = 0;
-
-	for (i = 0; i < revoke->hash_size; i++) {
-		struct list_head *hash_list;
-		struct list_head *list_entry;
-		hash_list = &revoke->hash_table[i];
-
-		list_for_each(list_entry, hash_list) {
-			struct jbd_revoke_record_s *record;
-			struct buffer_head *bh;
-			record = (struct jbd_revoke_record_s *)list_entry;
-			bh = __find_get_block(journal->j_fs_dev,
-					      record->blocknr,
-					      journal->j_blocksize);
-			if (bh) {
-				clear_buffer_revoked(bh);
-				__brelse(bh);
-			}
-		}
-	}
-}
-
-/* journal_switch_revoke table select j_revoke for next transaction
- * we do not want to suspend any processing until all revokes are
- * written -bzzz
- */
-void journal_switch_revoke_table(journal_t *journal)
-{
-	int i;
-
-	if (journal->j_revoke == journal->j_revoke_table[0])
-		journal->j_revoke = journal->j_revoke_table[1];
-	else
-		journal->j_revoke = journal->j_revoke_table[0];
-
-	for (i = 0; i < journal->j_revoke->hash_size; i++)
-		INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]);
-}
-
-/*
- * Write revoke records to the journal for all entries in the current
- * revoke hash, deleting the entries as we go.
- */
-void journal_write_revoke_records(journal_t *journal,
-				  transaction_t *transaction, int write_op)
-{
-	struct journal_head *descriptor;
-	struct jbd_revoke_record_s *record;
-	struct jbd_revoke_table_s *revoke;
-	struct list_head *hash_list;
-	int i, offset, count;
-
-	descriptor = NULL;
-	offset = 0;
-	count = 0;
-
-	/* select revoke table for committing transaction */
-	revoke = journal->j_revoke == journal->j_revoke_table[0] ?
-		journal->j_revoke_table[1] : journal->j_revoke_table[0];
-
-	for (i = 0; i < revoke->hash_size; i++) {
-		hash_list = &revoke->hash_table[i];
-
-		while (!list_empty(hash_list)) {
-			record = (struct jbd_revoke_record_s *)
-				hash_list->next;
-			write_one_revoke_record(journal, transaction,
-						&descriptor, &offset,
-						record, write_op);
-			count++;
-			list_del(&record->hash);
-			kmem_cache_free(revoke_record_cache, record);
-		}
-	}
-	if (descriptor)
-		flush_descriptor(journal, descriptor, offset, write_op);
-	jbd_debug(1, "Wrote %d revoke records\n", count);
-}
-
-/*
- * Write out one revoke record.  We need to create a new descriptor
- * block if the old one is full or if we have not already created one.
- */
-
-static void write_one_revoke_record(journal_t *journal,
-				    transaction_t *transaction,
-				    struct journal_head **descriptorp,
-				    int *offsetp,
-				    struct jbd_revoke_record_s *record,
-				    int write_op)
-{
-	struct journal_head *descriptor;
-	int offset;
-	journal_header_t *header;
-
-	/* If we are already aborting, this all becomes a noop.  We
-           still need to go round the loop in
-           journal_write_revoke_records in order to free all of the
-           revoke records: only the IO to the journal is omitted. */
-	if (is_journal_aborted(journal))
-		return;
-
-	descriptor = *descriptorp;
-	offset = *offsetp;
-
-	/* Make sure we have a descriptor with space left for the record */
-	if (descriptor) {
-		if (offset == journal->j_blocksize) {
-			flush_descriptor(journal, descriptor, offset, write_op);
-			descriptor = NULL;
-		}
-	}
-
-	if (!descriptor) {
-		descriptor = journal_get_descriptor_buffer(journal);
-		if (!descriptor)
-			return;
-		header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
-		header->h_magic     = cpu_to_be32(JFS_MAGIC_NUMBER);
-		header->h_blocktype = cpu_to_be32(JFS_REVOKE_BLOCK);
-		header->h_sequence  = cpu_to_be32(transaction->t_tid);
-
-		/* Record it so that we can wait for IO completion later */
-		JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
-		journal_file_buffer(descriptor, transaction, BJ_LogCtl);
-
-		offset = sizeof(journal_revoke_header_t);
-		*descriptorp = descriptor;
-	}
-
-	* ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
-		cpu_to_be32(record->blocknr);
-	offset += 4;
-	*offsetp = offset;
-}
-
-/*
- * Flush a revoke descriptor out to the journal.  If we are aborting,
- * this is a noop; otherwise we are generating a buffer which needs to
- * be waited for during commit, so it has to go onto the appropriate
- * journal buffer list.
- */
-
-static void flush_descriptor(journal_t *journal,
-			     struct journal_head *descriptor,
-			     int offset, int write_op)
-{
-	journal_revoke_header_t *header;
-	struct buffer_head *bh = jh2bh(descriptor);
-
-	if (is_journal_aborted(journal)) {
-		put_bh(bh);
-		return;
-	}
-
-	header = (journal_revoke_header_t *) jh2bh(descriptor)->b_data;
-	header->r_count = cpu_to_be32(offset);
-	set_buffer_jwrite(bh);
-	BUFFER_TRACE(bh, "write");
-	set_buffer_dirty(bh);
-	write_dirty_buffer(bh, write_op);
-}
-#endif
-
-/*
- * Revoke support for recovery.
- *
- * Recovery needs to be able to:
- *
- *  record all revoke records, including the tid of the latest instance
- *  of each revoke in the journal
- *
- *  check whether a given block in a given transaction should be replayed
- *  (ie. has not been revoked by a revoke record in that or a subsequent
- *  transaction)
- *
- *  empty the revoke table after recovery.
- */
-
-/*
- * First, setting revoke records.  We create a new revoke record for
- * every block ever revoked in the log as we scan it for recovery, and
- * we update the existing records if we find multiple revokes for a
- * single block.
- */
-
-int journal_set_revoke(journal_t *journal,
-		       unsigned int blocknr,
-		       tid_t sequence)
-{
-	struct jbd_revoke_record_s *record;
-
-	record = find_revoke_record(journal, blocknr);
-	if (record) {
-		/* If we have multiple occurrences, only record the
-		 * latest sequence number in the hashed record */
-		if (tid_gt(sequence, record->sequence))
-			record->sequence = sequence;
-		return 0;
-	}
-	return insert_revoke_hash(journal, blocknr, sequence);
-}
-
-/*
- * Test revoke records.  For a given block referenced in the log, has
- * that block been revoked?  A revoke record with a given transaction
- * sequence number revokes all blocks in that transaction and earlier
- * ones, but later transactions still need replayed.
- */
-
-int journal_test_revoke(journal_t *journal,
-			unsigned int blocknr,
-			tid_t sequence)
-{
-	struct jbd_revoke_record_s *record;
-
-	record = find_revoke_record(journal, blocknr);
-	if (!record)
-		return 0;
-	if (tid_gt(sequence, record->sequence))
-		return 0;
-	return 1;
-}
-
-/*
- * Finally, once recovery is over, we need to clear the revoke table so
- * that it can be reused by the running filesystem.
- */
-
-void journal_clear_revoke(journal_t *journal)
-{
-	int i;
-	struct list_head *hash_list;
-	struct jbd_revoke_record_s *record;
-	struct jbd_revoke_table_s *revoke;
-
-	revoke = journal->j_revoke;
-
-	for (i = 0; i < revoke->hash_size; i++) {
-		hash_list = &revoke->hash_table[i];
-		while (!list_empty(hash_list)) {
-			record = (struct jbd_revoke_record_s*) hash_list->next;
-			list_del(&record->hash);
-			kmem_cache_free(revoke_record_cache, record);
-		}
-	}
-}
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
deleted file mode 100644
index 1695ba8..0000000
--- a/fs/jbd/transaction.c
+++ /dev/null
@@ -1,2237 +0,0 @@
-/*
- * linux/fs/jbd/transaction.c
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
- *
- * Copyright 1998 Red Hat corp --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Generic filesystem transaction handling code; part of the ext2fs
- * journaling system.
- *
- * This file manages transactions (compound commits managed by the
- * journaling code) and handles (individual atomic operations by the
- * filesystem).
- */
-
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/jbd.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/hrtimer.h>
-
-static void __journal_temp_unlink_buffer(struct journal_head *jh);
-
-/*
- * get_transaction: obtain a new transaction_t object.
- *
- * Simply allocate and initialise a new transaction.  Create it in
- * RUNNING state and add it to the current journal (which should not
- * have an existing running transaction: we only make a new transaction
- * once we have started to commit the old one).
- *
- * Preconditions:
- *	The journal MUST be locked.  We don't perform atomic mallocs on the
- *	new transaction	and we can't block without protecting against other
- *	processes trying to touch the journal while it is in transition.
- *
- * Called under j_state_lock
- */
-
-static transaction_t *
-get_transaction(journal_t *journal, transaction_t *transaction)
-{
-	transaction->t_journal = journal;
-	transaction->t_state = T_RUNNING;
-	transaction->t_start_time = ktime_get();
-	transaction->t_tid = journal->j_transaction_sequence++;
-	transaction->t_expires = jiffies + journal->j_commit_interval;
-	spin_lock_init(&transaction->t_handle_lock);
-
-	/* Set up the commit timer for the new transaction. */
-	journal->j_commit_timer.expires =
-				round_jiffies_up(transaction->t_expires);
-	add_timer(&journal->j_commit_timer);
-
-	J_ASSERT(journal->j_running_transaction == NULL);
-	journal->j_running_transaction = transaction;
-
-	return transaction;
-}
-
-/*
- * Handle management.
- *
- * A handle_t is an object which represents a single atomic update to a
- * filesystem, and which tracks all of the modifications which form part
- * of that one update.
- */
-
-/*
- * start_this_handle: Given a handle, deal with any locking or stalling
- * needed to make sure that there is enough journal space for the handle
- * to begin.  Attach the handle to a transaction and set up the
- * transaction's buffer credits.
- */
-
-static int start_this_handle(journal_t *journal, handle_t *handle)
-{
-	transaction_t *transaction;
-	int needed;
-	int nblocks = handle->h_buffer_credits;
-	transaction_t *new_transaction = NULL;
-	int ret = 0;
-
-	if (nblocks > journal->j_max_transaction_buffers) {
-		printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
-		       current->comm, nblocks,
-		       journal->j_max_transaction_buffers);
-		ret = -ENOSPC;
-		goto out;
-	}
-
-alloc_transaction:
-	if (!journal->j_running_transaction) {
-		new_transaction = kzalloc(sizeof(*new_transaction),
-						GFP_NOFS|__GFP_NOFAIL);
-		if (!new_transaction) {
-			ret = -ENOMEM;
-			goto out;
-		}
-	}
-
-	jbd_debug(3, "New handle %p going live.\n", handle);
-
-repeat:
-
-	/*
-	 * We need to hold j_state_lock until t_updates has been incremented,
-	 * for proper journal barrier handling
-	 */
-	spin_lock(&journal->j_state_lock);
-repeat_locked:
-	if (is_journal_aborted(journal) ||
-	    (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) {
-		spin_unlock(&journal->j_state_lock);
-		ret = -EROFS;
-		goto out;
-	}
-
-	/* Wait on the journal's transaction barrier if necessary */
-	if (journal->j_barrier_count) {
-		spin_unlock(&journal->j_state_lock);
-		wait_event(journal->j_wait_transaction_locked,
-				journal->j_barrier_count == 0);
-		goto repeat;
-	}
-
-	if (!journal->j_running_transaction) {
-		if (!new_transaction) {
-			spin_unlock(&journal->j_state_lock);
-			goto alloc_transaction;
-		}
-		get_transaction(journal, new_transaction);
-		new_transaction = NULL;
-	}
-
-	transaction = journal->j_running_transaction;
-
-	/*
-	 * If the current transaction is locked down for commit, wait for the
-	 * lock to be released.
-	 */
-	if (transaction->t_state == T_LOCKED) {
-		DEFINE_WAIT(wait);
-
-		prepare_to_wait(&journal->j_wait_transaction_locked,
-					&wait, TASK_UNINTERRUPTIBLE);
-		spin_unlock(&journal->j_state_lock);
-		schedule();
-		finish_wait(&journal->j_wait_transaction_locked, &wait);
-		goto repeat;
-	}
-
-	/*
-	 * If there is not enough space left in the log to write all potential
-	 * buffers requested by this operation, we need to stall pending a log
-	 * checkpoint to free some more log space.
-	 */
-	spin_lock(&transaction->t_handle_lock);
-	needed = transaction->t_outstanding_credits + nblocks;
-
-	if (needed > journal->j_max_transaction_buffers) {
-		/*
-		 * If the current transaction is already too large, then start
-		 * to commit it: we can then go back and attach this handle to
-		 * a new transaction.
-		 */
-		DEFINE_WAIT(wait);
-
-		jbd_debug(2, "Handle %p starting new commit...\n", handle);
-		spin_unlock(&transaction->t_handle_lock);
-		prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
-				TASK_UNINTERRUPTIBLE);
-		__log_start_commit(journal, transaction->t_tid);
-		spin_unlock(&journal->j_state_lock);
-		schedule();
-		finish_wait(&journal->j_wait_transaction_locked, &wait);
-		goto repeat;
-	}
-
-	/*
-	 * The commit code assumes that it can get enough log space
-	 * without forcing a checkpoint.  This is *critical* for
-	 * correctness: a checkpoint of a buffer which is also
-	 * associated with a committing transaction creates a deadlock,
-	 * so commit simply cannot force through checkpoints.
-	 *
-	 * We must therefore ensure the necessary space in the journal
-	 * *before* starting to dirty potentially checkpointed buffers
-	 * in the new transaction.
-	 *
-	 * The worst part is, any transaction currently committing can
-	 * reduce the free space arbitrarily.  Be careful to account for
-	 * those buffers when checkpointing.
-	 */
-
-	/*
-	 * @@@ AKPM: This seems rather over-defensive.  We're giving commit
-	 * a _lot_ of headroom: 1/4 of the journal plus the size of
-	 * the committing transaction.  Really, we only need to give it
-	 * committing_transaction->t_outstanding_credits plus "enough" for
-	 * the log control blocks.
-	 * Also, this test is inconsistent with the matching one in
-	 * journal_extend().
-	 */
-	if (__log_space_left(journal) < jbd_space_needed(journal)) {
-		jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
-		spin_unlock(&transaction->t_handle_lock);
-		__log_wait_for_space(journal);
-		goto repeat_locked;
-	}
-
-	/* OK, account for the buffers that this operation expects to
-	 * use and add the handle to the running transaction. */
-
-	handle->h_transaction = transaction;
-	transaction->t_outstanding_credits += nblocks;
-	transaction->t_updates++;
-	transaction->t_handle_count++;
-	jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
-		  handle, nblocks, transaction->t_outstanding_credits,
-		  __log_space_left(journal));
-	spin_unlock(&transaction->t_handle_lock);
-	spin_unlock(&journal->j_state_lock);
-
-	lock_map_acquire(&handle->h_lockdep_map);
-out:
-	if (unlikely(new_transaction))		/* It's usually NULL */
-		kfree(new_transaction);
-	return ret;
-}
-
-static struct lock_class_key jbd_handle_key;
-
-/* Allocate a new handle.  This should probably be in a slab... */
-static handle_t *new_handle(int nblocks)
-{
-	handle_t *handle = jbd_alloc_handle(GFP_NOFS);
-	if (!handle)
-		return NULL;
-	handle->h_buffer_credits = nblocks;
-	handle->h_ref = 1;
-
-	lockdep_init_map(&handle->h_lockdep_map, "jbd_handle", &jbd_handle_key, 0);
-
-	return handle;
-}
-
-/**
- * handle_t *journal_start() - Obtain a new handle.
- * @journal: Journal to start transaction on.
- * @nblocks: number of block buffer we might modify
- *
- * We make sure that the transaction can guarantee at least nblocks of
- * modified buffers in the log.  We block until the log can guarantee
- * that much space.
- *
- * This function is visible to journal users (like ext3fs), so is not
- * called with the journal already locked.
- *
- * Return a pointer to a newly allocated handle, or an ERR_PTR() value
- * on failure.
- */
-handle_t *journal_start(journal_t *journal, int nblocks)
-{
-	handle_t *handle = journal_current_handle();
-	int err;
-
-	if (!journal)
-		return ERR_PTR(-EROFS);
-
-	if (handle) {
-		J_ASSERT(handle->h_transaction->t_journal == journal);
-		handle->h_ref++;
-		return handle;
-	}
-
-	handle = new_handle(nblocks);
-	if (!handle)
-		return ERR_PTR(-ENOMEM);
-
-	current->journal_info = handle;
-
-	err = start_this_handle(journal, handle);
-	if (err < 0) {
-		jbd_free_handle(handle);
-		current->journal_info = NULL;
-		handle = ERR_PTR(err);
-	}
-	return handle;
-}
-
-/**
- * int journal_extend() - extend buffer credits.
- * @handle:  handle to 'extend'
- * @nblocks: nr blocks to try to extend by.
- *
- * Some transactions, such as large extends and truncates, can be done
- * atomically all at once or in several stages.  The operation requests
- * a credit for a number of buffer modications in advance, but can
- * extend its credit if it needs more.
- *
- * journal_extend tries to give the running handle more buffer credits.
- * It does not guarantee that allocation - this is a best-effort only.
- * The calling process MUST be able to deal cleanly with a failure to
- * extend here.
- *
- * Return 0 on success, non-zero on failure.
- *
- * return code < 0 implies an error
- * return code > 0 implies normal transaction-full status.
- */
-int journal_extend(handle_t *handle, int nblocks)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	int result;
-	int wanted;
-
-	result = -EIO;
-	if (is_handle_aborted(handle))
-		goto out;
-
-	result = 1;
-
-	spin_lock(&journal->j_state_lock);
-
-	/* Don't extend a locked-down transaction! */
-	if (handle->h_transaction->t_state != T_RUNNING) {
-		jbd_debug(3, "denied handle %p %d blocks: "
-			  "transaction not running\n", handle, nblocks);
-		goto error_out;
-	}
-
-	spin_lock(&transaction->t_handle_lock);
-	wanted = transaction->t_outstanding_credits + nblocks;
-
-	if (wanted > journal->j_max_transaction_buffers) {
-		jbd_debug(3, "denied handle %p %d blocks: "
-			  "transaction too large\n", handle, nblocks);
-		goto unlock;
-	}
-
-	if (wanted > __log_space_left(journal)) {
-		jbd_debug(3, "denied handle %p %d blocks: "
-			  "insufficient log space\n", handle, nblocks);
-		goto unlock;
-	}
-
-	handle->h_buffer_credits += nblocks;
-	transaction->t_outstanding_credits += nblocks;
-	result = 0;
-
-	jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
-unlock:
-	spin_unlock(&transaction->t_handle_lock);
-error_out:
-	spin_unlock(&journal->j_state_lock);
-out:
-	return result;
-}
-
-
-/**
- * int journal_restart() - restart a handle.
- * @handle:  handle to restart
- * @nblocks: nr credits requested
- *
- * Restart a handle for a multi-transaction filesystem
- * operation.
- *
- * If the journal_extend() call above fails to grant new buffer credits
- * to a running handle, a call to journal_restart will commit the
- * handle's transaction so far and reattach the handle to a new
- * transaction capabable of guaranteeing the requested number of
- * credits.
- */
-
-int journal_restart(handle_t *handle, int nblocks)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	int ret;
-
-	/* If we've had an abort of any type, don't even think about
-	 * actually doing the restart! */
-	if (is_handle_aborted(handle))
-		return 0;
-
-	/*
-	 * First unlink the handle from its current transaction, and start the
-	 * commit on that.
-	 */
-	J_ASSERT(transaction->t_updates > 0);
-	J_ASSERT(journal_current_handle() == handle);
-
-	spin_lock(&journal->j_state_lock);
-	spin_lock(&transaction->t_handle_lock);
-	transaction->t_outstanding_credits -= handle->h_buffer_credits;
-	transaction->t_updates--;
-
-	if (!transaction->t_updates)
-		wake_up(&journal->j_wait_updates);
-	spin_unlock(&transaction->t_handle_lock);
-
-	jbd_debug(2, "restarting handle %p\n", handle);
-	__log_start_commit(journal, transaction->t_tid);
-	spin_unlock(&journal->j_state_lock);
-
-	lock_map_release(&handle->h_lockdep_map);
-	handle->h_buffer_credits = nblocks;
-	ret = start_this_handle(journal, handle);
-	return ret;
-}
-
-
-/**
- * void journal_lock_updates () - establish a transaction barrier.
- * @journal:  Journal to establish a barrier on.
- *
- * This locks out any further updates from being started, and blocks until all
- * existing updates have completed, returning only once the journal is in a
- * quiescent state with no updates running.
- *
- * We do not use simple mutex for synchronization as there are syscalls which
- * want to return with filesystem locked and that trips up lockdep. Also
- * hibernate needs to lock filesystem but locked mutex then blocks hibernation.
- * Since locking filesystem is rare operation, we use simple counter and
- * waitqueue for locking.
- */
-void journal_lock_updates(journal_t *journal)
-{
-	DEFINE_WAIT(wait);
-
-wait:
-	/* Wait for previous locked operation to finish */
-	wait_event(journal->j_wait_transaction_locked,
-		   journal->j_barrier_count == 0);
-
-	spin_lock(&journal->j_state_lock);
-	/*
-	 * Check reliably under the lock whether we are the ones winning the race
-	 * and locking the journal
-	 */
-	if (journal->j_barrier_count > 0) {
-		spin_unlock(&journal->j_state_lock);
-		goto wait;
-	}
-	++journal->j_barrier_count;
-
-	/* Wait until there are no running updates */
-	while (1) {
-		transaction_t *transaction = journal->j_running_transaction;
-
-		if (!transaction)
-			break;
-
-		spin_lock(&transaction->t_handle_lock);
-		if (!transaction->t_updates) {
-			spin_unlock(&transaction->t_handle_lock);
-			break;
-		}
-		prepare_to_wait(&journal->j_wait_updates, &wait,
-				TASK_UNINTERRUPTIBLE);
-		spin_unlock(&transaction->t_handle_lock);
-		spin_unlock(&journal->j_state_lock);
-		schedule();
-		finish_wait(&journal->j_wait_updates, &wait);
-		spin_lock(&journal->j_state_lock);
-	}
-	spin_unlock(&journal->j_state_lock);
-}
-
-/**
- * void journal_unlock_updates (journal_t* journal) - release barrier
- * @journal:  Journal to release the barrier on.
- *
- * Release a transaction barrier obtained with journal_lock_updates().
- */
-void journal_unlock_updates (journal_t *journal)
-{
-	J_ASSERT(journal->j_barrier_count != 0);
-
-	spin_lock(&journal->j_state_lock);
-	--journal->j_barrier_count;
-	spin_unlock(&journal->j_state_lock);
-	wake_up(&journal->j_wait_transaction_locked);
-}
-
-static void warn_dirty_buffer(struct buffer_head *bh)
-{
-	char b[BDEVNAME_SIZE];
-
-	printk(KERN_WARNING
-	       "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
-	       "There's a risk of filesystem corruption in case of system "
-	       "crash.\n",
-	       bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
-}
-
-/*
- * If the buffer is already part of the current transaction, then there
- * is nothing we need to do.  If it is already part of a prior
- * transaction which we are still committing to disk, then we need to
- * make sure that we do not overwrite the old copy: we do copy-out to
- * preserve the copy going to disk.  We also account the buffer against
- * the handle's metadata buffer credits (unless the buffer is already
- * part of the transaction, that is).
- *
- */
-static int
-do_get_write_access(handle_t *handle, struct journal_head *jh,
-			int force_copy)
-{
-	struct buffer_head *bh;
-	transaction_t *transaction;
-	journal_t *journal;
-	int error;
-	char *frozen_buffer = NULL;
-	int need_copy = 0;
-
-	if (is_handle_aborted(handle))
-		return -EROFS;
-
-	transaction = handle->h_transaction;
-	journal = transaction->t_journal;
-
-	jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
-
-	JBUFFER_TRACE(jh, "entry");
-repeat:
-	bh = jh2bh(jh);
-
-	/* @@@ Need to check for errors here at some point. */
-
-	lock_buffer(bh);
-	jbd_lock_bh_state(bh);
-
-	/* We now hold the buffer lock so it is safe to query the buffer
-	 * state.  Is the buffer dirty?
-	 *
-	 * If so, there are two possibilities.  The buffer may be
-	 * non-journaled, and undergoing a quite legitimate writeback.
-	 * Otherwise, it is journaled, and we don't expect dirty buffers
-	 * in that state (the buffers should be marked JBD_Dirty
-	 * instead.)  So either the IO is being done under our own
-	 * control and this is a bug, or it's a third party IO such as
-	 * dump(8) (which may leave the buffer scheduled for read ---
-	 * ie. locked but not dirty) or tune2fs (which may actually have
-	 * the buffer dirtied, ugh.)  */
-
-	if (buffer_dirty(bh)) {
-		/*
-		 * First question: is this buffer already part of the current
-		 * transaction or the existing committing transaction?
-		 */
-		if (jh->b_transaction) {
-			J_ASSERT_JH(jh,
-				jh->b_transaction == transaction ||
-				jh->b_transaction ==
-					journal->j_committing_transaction);
-			if (jh->b_next_transaction)
-				J_ASSERT_JH(jh, jh->b_next_transaction ==
-							transaction);
-			warn_dirty_buffer(bh);
-		}
-		/*
-		 * In any case we need to clean the dirty flag and we must
-		 * do it under the buffer lock to be sure we don't race
-		 * with running write-out.
-		 */
-		JBUFFER_TRACE(jh, "Journalling dirty buffer");
-		clear_buffer_dirty(bh);
-		set_buffer_jbddirty(bh);
-	}
-
-	unlock_buffer(bh);
-
-	error = -EROFS;
-	if (is_handle_aborted(handle)) {
-		jbd_unlock_bh_state(bh);
-		goto out;
-	}
-	error = 0;
-
-	/*
-	 * The buffer is already part of this transaction if b_transaction or
-	 * b_next_transaction points to it
-	 */
-	if (jh->b_transaction == transaction ||
-	    jh->b_next_transaction == transaction)
-		goto done;
-
-	/*
-	 * this is the first time this transaction is touching this buffer,
-	 * reset the modified flag
-	 */
-	jh->b_modified = 0;
-
-	/*
-	 * If there is already a copy-out version of this buffer, then we don't
-	 * need to make another one
-	 */
-	if (jh->b_frozen_data) {
-		JBUFFER_TRACE(jh, "has frozen data");
-		J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-		jh->b_next_transaction = transaction;
-		goto done;
-	}
-
-	/* Is there data here we need to preserve? */
-
-	if (jh->b_transaction && jh->b_transaction != transaction) {
-		JBUFFER_TRACE(jh, "owned by older transaction");
-		J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-		J_ASSERT_JH(jh, jh->b_transaction ==
-					journal->j_committing_transaction);
-
-		/* There is one case we have to be very careful about.
-		 * If the committing transaction is currently writing
-		 * this buffer out to disk and has NOT made a copy-out,
-		 * then we cannot modify the buffer contents at all
-		 * right now.  The essence of copy-out is that it is the
-		 * extra copy, not the primary copy, which gets
-		 * journaled.  If the primary copy is already going to
-		 * disk then we cannot do copy-out here. */
-
-		if (jh->b_jlist == BJ_Shadow) {
-			DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
-			wait_queue_head_t *wqh;
-
-			wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
-
-			JBUFFER_TRACE(jh, "on shadow: sleep");
-			jbd_unlock_bh_state(bh);
-			/* commit wakes up all shadow buffers after IO */
-			for ( ; ; ) {
-				prepare_to_wait(wqh, &wait.wait,
-						TASK_UNINTERRUPTIBLE);
-				if (jh->b_jlist != BJ_Shadow)
-					break;
-				schedule();
-			}
-			finish_wait(wqh, &wait.wait);
-			goto repeat;
-		}
-
-		/* Only do the copy if the currently-owning transaction
-		 * still needs it.  If it is on the Forget list, the
-		 * committing transaction is past that stage.  The
-		 * buffer had better remain locked during the kmalloc,
-		 * but that should be true --- we hold the journal lock
-		 * still and the buffer is already on the BUF_JOURNAL
-		 * list so won't be flushed.
-		 *
-		 * Subtle point, though: if this is a get_undo_access,
-		 * then we will be relying on the frozen_data to contain
-		 * the new value of the committed_data record after the
-		 * transaction, so we HAVE to force the frozen_data copy
-		 * in that case. */
-
-		if (jh->b_jlist != BJ_Forget || force_copy) {
-			JBUFFER_TRACE(jh, "generate frozen data");
-			if (!frozen_buffer) {
-				JBUFFER_TRACE(jh, "allocate memory for buffer");
-				jbd_unlock_bh_state(bh);
-				frozen_buffer =
-					jbd_alloc(jh2bh(jh)->b_size,
-							 GFP_NOFS);
-				if (!frozen_buffer) {
-					printk(KERN_ERR
-					       "%s: OOM for frozen_buffer\n",
-					       __func__);
-					JBUFFER_TRACE(jh, "oom!");
-					error = -ENOMEM;
-					jbd_lock_bh_state(bh);
-					goto done;
-				}
-				goto repeat;
-			}
-			jh->b_frozen_data = frozen_buffer;
-			frozen_buffer = NULL;
-			need_copy = 1;
-		}
-		jh->b_next_transaction = transaction;
-	}
-
-
-	/*
-	 * Finally, if the buffer is not journaled right now, we need to make
-	 * sure it doesn't get written to disk before the caller actually
-	 * commits the new data
-	 */
-	if (!jh->b_transaction) {
-		JBUFFER_TRACE(jh, "no transaction");
-		J_ASSERT_JH(jh, !jh->b_next_transaction);
-		JBUFFER_TRACE(jh, "file as BJ_Reserved");
-		spin_lock(&journal->j_list_lock);
-		__journal_file_buffer(jh, transaction, BJ_Reserved);
-		spin_unlock(&journal->j_list_lock);
-	}
-
-done:
-	if (need_copy) {
-		struct page *page;
-		int offset;
-		char *source;
-
-		J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
-			    "Possible IO failure.\n");
-		page = jh2bh(jh)->b_page;
-		offset = offset_in_page(jh2bh(jh)->b_data);
-		source = kmap_atomic(page);
-		memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
-		kunmap_atomic(source);
-	}
-	jbd_unlock_bh_state(bh);
-
-	/*
-	 * If we are about to journal a buffer, then any revoke pending on it is
-	 * no longer valid
-	 */
-	journal_cancel_revoke(handle, jh);
-
-out:
-	if (unlikely(frozen_buffer))	/* It's usually NULL */
-		jbd_free(frozen_buffer, bh->b_size);
-
-	JBUFFER_TRACE(jh, "exit");
-	return error;
-}
-
-/**
- * int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
- * @handle: transaction to add buffer modifications to
- * @bh:     bh to be used for metadata writes
- *
- * Returns an error code or 0 on success.
- *
- * In full data journalling mode the buffer may be of type BJ_AsyncData,
- * because we're write()ing a buffer which is also part of a shared mapping.
- */
-
-int journal_get_write_access(handle_t *handle, struct buffer_head *bh)
-{
-	struct journal_head *jh = journal_add_journal_head(bh);
-	int rc;
-
-	/* We do not want to get caught playing with fields which the
-	 * log thread also manipulates.  Make sure that the buffer
-	 * completes any outstanding IO before proceeding. */
-	rc = do_get_write_access(handle, jh, 0);
-	journal_put_journal_head(jh);
-	return rc;
-}
-
-
-/*
- * When the user wants to journal a newly created buffer_head
- * (ie. getblk() returned a new buffer and we are going to populate it
- * manually rather than reading off disk), then we need to keep the
- * buffer_head locked until it has been completely filled with new
- * data.  In this case, we should be able to make the assertion that
- * the bh is not already part of an existing transaction.
- *
- * The buffer should already be locked by the caller by this point.
- * There is no lock ranking violation: it was a newly created,
- * unlocked buffer beforehand. */
-
-/**
- * int journal_get_create_access () - notify intent to use newly created bh
- * @handle: transaction to new buffer to
- * @bh: new buffer.
- *
- * Call this if you create a new bh.
- */
-int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	struct journal_head *jh = journal_add_journal_head(bh);
-	int err;
-
-	jbd_debug(5, "journal_head %p\n", jh);
-	err = -EROFS;
-	if (is_handle_aborted(handle))
-		goto out;
-	err = 0;
-
-	JBUFFER_TRACE(jh, "entry");
-	/*
-	 * The buffer may already belong to this transaction due to pre-zeroing
-	 * in the filesystem's new_block code.  It may also be on the previous,
-	 * committing transaction's lists, but it HAS to be in Forget state in
-	 * that case: the transaction must have deleted the buffer for it to be
-	 * reused here.
-	 */
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-	J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
-		jh->b_transaction == NULL ||
-		(jh->b_transaction == journal->j_committing_transaction &&
-			  jh->b_jlist == BJ_Forget)));
-
-	J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-	J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
-
-	if (jh->b_transaction == NULL) {
-		/*
-		 * Previous journal_forget() could have left the buffer
-		 * with jbddirty bit set because it was being committed. When
-		 * the commit finished, we've filed the buffer for
-		 * checkpointing and marked it dirty. Now we are reallocating
-		 * the buffer so the transaction freeing it must have
-		 * committed and so it's safe to clear the dirty bit.
-		 */
-		clear_buffer_dirty(jh2bh(jh));
-
-		/* first access by this transaction */
-		jh->b_modified = 0;
-
-		JBUFFER_TRACE(jh, "file as BJ_Reserved");
-		__journal_file_buffer(jh, transaction, BJ_Reserved);
-	} else if (jh->b_transaction == journal->j_committing_transaction) {
-		/* first access by this transaction */
-		jh->b_modified = 0;
-
-		JBUFFER_TRACE(jh, "set next transaction");
-		jh->b_next_transaction = transaction;
-	}
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-
-	/*
-	 * akpm: I added this.  ext3_alloc_branch can pick up new indirect
-	 * blocks which contain freed but then revoked metadata.  We need
-	 * to cancel the revoke in case we end up freeing it yet again
-	 * and the reallocating as data - this would cause a second revoke,
-	 * which hits an assertion error.
-	 */
-	JBUFFER_TRACE(jh, "cancelling revoke");
-	journal_cancel_revoke(handle, jh);
-out:
-	journal_put_journal_head(jh);
-	return err;
-}
-
-/**
- * int journal_get_undo_access() - Notify intent to modify metadata with non-rewindable consequences
- * @handle: transaction
- * @bh: buffer to undo
- *
- * Sometimes there is a need to distinguish between metadata which has
- * been committed to disk and that which has not.  The ext3fs code uses
- * this for freeing and allocating space, we have to make sure that we
- * do not reuse freed space until the deallocation has been committed,
- * since if we overwrote that space we would make the delete
- * un-rewindable in case of a crash.
- *
- * To deal with that, journal_get_undo_access requests write access to a
- * buffer for parts of non-rewindable operations such as delete
- * operations on the bitmaps.  The journaling code must keep a copy of
- * the buffer's contents prior to the undo_access call until such time
- * as we know that the buffer has definitely been committed to disk.
- *
- * We never need to know which transaction the committed data is part
- * of, buffers touched here are guaranteed to be dirtied later and so
- * will be committed to a new transaction in due course, at which point
- * we can discard the old committed data pointer.
- *
- * Returns error number or 0 on success.
- */
-int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
-{
-	int err;
-	struct journal_head *jh = journal_add_journal_head(bh);
-	char *committed_data = NULL;
-
-	JBUFFER_TRACE(jh, "entry");
-
-	/*
-	 * Do this first --- it can drop the journal lock, so we want to
-	 * make sure that obtaining the committed_data is done
-	 * atomically wrt. completion of any outstanding commits.
-	 */
-	err = do_get_write_access(handle, jh, 1);
-	if (err)
-		goto out;
-
-repeat:
-	if (!jh->b_committed_data) {
-		committed_data = jbd_alloc(jh2bh(jh)->b_size, GFP_NOFS);
-		if (!committed_data) {
-			printk(KERN_ERR "%s: No memory for committed data\n",
-				__func__);
-			err = -ENOMEM;
-			goto out;
-		}
-	}
-
-	jbd_lock_bh_state(bh);
-	if (!jh->b_committed_data) {
-		/* Copy out the current buffer contents into the
-		 * preserved, committed copy. */
-		JBUFFER_TRACE(jh, "generate b_committed data");
-		if (!committed_data) {
-			jbd_unlock_bh_state(bh);
-			goto repeat;
-		}
-
-		jh->b_committed_data = committed_data;
-		committed_data = NULL;
-		memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
-	}
-	jbd_unlock_bh_state(bh);
-out:
-	journal_put_journal_head(jh);
-	if (unlikely(committed_data))
-		jbd_free(committed_data, bh->b_size);
-	return err;
-}
-
-/**
- * int journal_dirty_data() - mark a buffer as containing dirty data to be flushed
- * @handle: transaction
- * @bh: bufferhead to mark
- *
- * Description:
- * Mark a buffer as containing dirty data which needs to be flushed before
- * we can commit the current transaction.
- *
- * The buffer is placed on the transaction's data list and is marked as
- * belonging to the transaction.
- *
- * Returns error number or 0 on success.
- *
- * journal_dirty_data() can be called via page_launder->ext3_writepage
- * by kswapd.
- */
-int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
-{
-	journal_t *journal = handle->h_transaction->t_journal;
-	int need_brelse = 0;
-	struct journal_head *jh;
-	int ret = 0;
-
-	if (is_handle_aborted(handle))
-		return ret;
-
-	jh = journal_add_journal_head(bh);
-	JBUFFER_TRACE(jh, "entry");
-
-	/*
-	 * The buffer could *already* be dirty.  Writeout can start
-	 * at any time.
-	 */
-	jbd_debug(4, "jh: %p, tid:%d\n", jh, handle->h_transaction->t_tid);
-
-	/*
-	 * What if the buffer is already part of a running transaction?
-	 *
-	 * There are two cases:
-	 * 1) It is part of the current running transaction.  Refile it,
-	 *    just in case we have allocated it as metadata, deallocated
-	 *    it, then reallocated it as data.
-	 * 2) It is part of the previous, still-committing transaction.
-	 *    If all we want to do is to guarantee that the buffer will be
-	 *    written to disk before this new transaction commits, then
-	 *    being sure that the *previous* transaction has this same
-	 *    property is sufficient for us!  Just leave it on its old
-	 *    transaction.
-	 *
-	 * In case (2), the buffer must not already exist as metadata
-	 * --- that would violate write ordering (a transaction is free
-	 * to write its data at any point, even before the previous
-	 * committing transaction has committed).  The caller must
-	 * never, ever allow this to happen: there's nothing we can do
-	 * about it in this layer.
-	 */
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-
-	/* Now that we have bh_state locked, are we really still mapped? */
-	if (!buffer_mapped(bh)) {
-		JBUFFER_TRACE(jh, "unmapped buffer, bailing out");
-		goto no_journal;
-	}
-
-	if (jh->b_transaction) {
-		JBUFFER_TRACE(jh, "has transaction");
-		if (jh->b_transaction != handle->h_transaction) {
-			JBUFFER_TRACE(jh, "belongs to older transaction");
-			J_ASSERT_JH(jh, jh->b_transaction ==
-					journal->j_committing_transaction);
-
-			/* @@@ IS THIS TRUE  ? */
-			/*
-			 * Not any more.  Scenario: someone does a write()
-			 * in data=journal mode.  The buffer's transaction has
-			 * moved into commit.  Then someone does another
-			 * write() to the file.  We do the frozen data copyout
-			 * and set b_next_transaction to point to j_running_t.
-			 * And while we're in that state, someone does a
-			 * writepage() in an attempt to pageout the same area
-			 * of the file via a shared mapping.  At present that
-			 * calls journal_dirty_data(), and we get right here.
-			 * It may be too late to journal the data.  Simply
-			 * falling through to the next test will suffice: the
-			 * data will be dirty and wil be checkpointed.  The
-			 * ordering comments in the next comment block still
-			 * apply.
-			 */
-			//J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
-
-			/*
-			 * If we're journalling data, and this buffer was
-			 * subject to a write(), it could be metadata, forget
-			 * or shadow against the committing transaction.  Now,
-			 * someone has dirtied the same darn page via a mapping
-			 * and it is being writepage()'d.
-			 * We *could* just steal the page from commit, with some
-			 * fancy locking there.  Instead, we just skip it -
-			 * don't tie the page's buffers to the new transaction
-			 * at all.
-			 * Implication: if we crash before the writepage() data
-			 * is written into the filesystem, recovery will replay
-			 * the write() data.
-			 */
-			if (jh->b_jlist != BJ_None &&
-					jh->b_jlist != BJ_SyncData &&
-					jh->b_jlist != BJ_Locked) {
-				JBUFFER_TRACE(jh, "Not stealing");
-				goto no_journal;
-			}
-
-			/*
-			 * This buffer may be undergoing writeout in commit.  We
-			 * can't return from here and let the caller dirty it
-			 * again because that can cause the write-out loop in
-			 * commit to never terminate.
-			 */
-			if (buffer_dirty(bh)) {
-				get_bh(bh);
-				spin_unlock(&journal->j_list_lock);
-				jbd_unlock_bh_state(bh);
-				need_brelse = 1;
-				sync_dirty_buffer(bh);
-				jbd_lock_bh_state(bh);
-				spin_lock(&journal->j_list_lock);
-				/* Since we dropped the lock... */
-				if (!buffer_mapped(bh)) {
-					JBUFFER_TRACE(jh, "buffer got unmapped");
-					goto no_journal;
-				}
-				/* The buffer may become locked again at any
-				   time if it is redirtied */
-			}
-
-			/*
-			 * We cannot remove the buffer with io error from the
-			 * committing transaction, because otherwise it would
-			 * miss the error and the commit would not abort.
-			 */
-			if (unlikely(!buffer_uptodate(bh))) {
-				ret = -EIO;
-				goto no_journal;
-			}
-			/* We might have slept so buffer could be refiled now */
-			if (jh->b_transaction != NULL &&
-			    jh->b_transaction != handle->h_transaction) {
-				JBUFFER_TRACE(jh, "unfile from commit");
-				__journal_temp_unlink_buffer(jh);
-				/* It still points to the committing
-				 * transaction; move it to this one so
-				 * that the refile assert checks are
-				 * happy. */
-				jh->b_transaction = handle->h_transaction;
-			}
-			/* The buffer will be refiled below */
-
-		}
-		/*
-		 * Special case --- the buffer might actually have been
-		 * allocated and then immediately deallocated in the previous,
-		 * committing transaction, so might still be left on that
-		 * transaction's metadata lists.
-		 */
-		if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) {
-			JBUFFER_TRACE(jh, "not on correct data list: unfile");
-			J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow);
-			JBUFFER_TRACE(jh, "file as data");
-			__journal_file_buffer(jh, handle->h_transaction,
-						BJ_SyncData);
-		}
-	} else {
-		JBUFFER_TRACE(jh, "not on a transaction");
-		__journal_file_buffer(jh, handle->h_transaction, BJ_SyncData);
-	}
-no_journal:
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-	if (need_brelse) {
-		BUFFER_TRACE(bh, "brelse");
-		__brelse(bh);
-	}
-	JBUFFER_TRACE(jh, "exit");
-	journal_put_journal_head(jh);
-	return ret;
-}
-
-/**
- * int journal_dirty_metadata() - mark a buffer as containing dirty metadata
- * @handle: transaction to add buffer to.
- * @bh: buffer to mark
- *
- * Mark dirty metadata which needs to be journaled as part of the current
- * transaction.
- *
- * The buffer is placed on the transaction's metadata list and is marked
- * as belonging to the transaction.
- *
- * Returns error number or 0 on success.
- *
- * Special care needs to be taken if the buffer already belongs to the
- * current committing transaction (in which case we should have frozen
- * data present for that commit).  In that case, we don't relink the
- * buffer: that only gets done when the old transaction finally
- * completes its commit.
- */
-int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	struct journal_head *jh = bh2jh(bh);
-
-	jbd_debug(5, "journal_head %p\n", jh);
-	JBUFFER_TRACE(jh, "entry");
-	if (is_handle_aborted(handle))
-		goto out;
-
-	jbd_lock_bh_state(bh);
-
-	if (jh->b_modified == 0) {
-		/*
-		 * This buffer's got modified and becoming part
-		 * of the transaction. This needs to be done
-		 * once a transaction -bzzz
-		 */
-		jh->b_modified = 1;
-		J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
-		handle->h_buffer_credits--;
-	}
-
-	/*
-	 * fastpath, to avoid expensive locking.  If this buffer is already
-	 * on the running transaction's metadata list there is nothing to do.
-	 * Nobody can take it off again because there is a handle open.
-	 * I _think_ we're OK here with SMP barriers - a mistaken decision will
-	 * result in this test being false, so we go in and take the locks.
-	 */
-	if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
-		JBUFFER_TRACE(jh, "fastpath");
-		J_ASSERT_JH(jh, jh->b_transaction ==
-					journal->j_running_transaction);
-		goto out_unlock_bh;
-	}
-
-	set_buffer_jbddirty(bh);
-
-	/*
-	 * Metadata already on the current transaction list doesn't
-	 * need to be filed.  Metadata on another transaction's list must
-	 * be committing, and will be refiled once the commit completes:
-	 * leave it alone for now.
-	 */
-	if (jh->b_transaction != transaction) {
-		JBUFFER_TRACE(jh, "already on other transaction");
-		J_ASSERT_JH(jh, jh->b_transaction ==
-					journal->j_committing_transaction);
-		J_ASSERT_JH(jh, jh->b_next_transaction == transaction);
-		/* And this case is illegal: we can't reuse another
-		 * transaction's data buffer, ever. */
-		goto out_unlock_bh;
-	}
-
-	/* That test should have eliminated the following case: */
-	J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
-
-	JBUFFER_TRACE(jh, "file as BJ_Metadata");
-	spin_lock(&journal->j_list_lock);
-	__journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
-	spin_unlock(&journal->j_list_lock);
-out_unlock_bh:
-	jbd_unlock_bh_state(bh);
-out:
-	JBUFFER_TRACE(jh, "exit");
-	return 0;
-}
-
-/*
- * journal_release_buffer: undo a get_write_access without any buffer
- * updates, if the update decided in the end that it didn't need access.
- *
- */
-void
-journal_release_buffer(handle_t *handle, struct buffer_head *bh)
-{
-	BUFFER_TRACE(bh, "entry");
-}
-
-/**
- * void journal_forget() - bforget() for potentially-journaled buffers.
- * @handle: transaction handle
- * @bh:     bh to 'forget'
- *
- * We can only do the bforget if there are no commits pending against the
- * buffer.  If the buffer is dirty in the current running transaction we
- * can safely unlink it.
- *
- * bh may not be a journalled buffer at all - it may be a non-JBD
- * buffer which came off the hashtable.  Check for this.
- *
- * Decrements bh->b_count by one.
- *
- * Allow this call even if the handle has aborted --- it may be part of
- * the caller's cleanup after an abort.
- */
-int journal_forget (handle_t *handle, struct buffer_head *bh)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	struct journal_head *jh;
-	int drop_reserve = 0;
-	int err = 0;
-	int was_modified = 0;
-
-	BUFFER_TRACE(bh, "entry");
-
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-
-	if (!buffer_jbd(bh))
-		goto not_jbd;
-	jh = bh2jh(bh);
-
-	/* Critical error: attempting to delete a bitmap buffer, maybe?
-	 * Don't do any jbd operations, and return an error. */
-	if (!J_EXPECT_JH(jh, !jh->b_committed_data,
-			 "inconsistent data on disk")) {
-		err = -EIO;
-		goto not_jbd;
-	}
-
-	/* keep track of whether or not this transaction modified us */
-	was_modified = jh->b_modified;
-
-	/*
-	 * The buffer's going from the transaction, we must drop
-	 * all references -bzzz
-	 */
-	jh->b_modified = 0;
-
-	if (jh->b_transaction == handle->h_transaction) {
-		J_ASSERT_JH(jh, !jh->b_frozen_data);
-
-		/* If we are forgetting a buffer which is already part
-		 * of this transaction, then we can just drop it from
-		 * the transaction immediately. */
-		clear_buffer_dirty(bh);
-		clear_buffer_jbddirty(bh);
-
-		JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
-
-		/*
-		 * we only want to drop a reference if this transaction
-		 * modified the buffer
-		 */
-		if (was_modified)
-			drop_reserve = 1;
-
-		/*
-		 * We are no longer going to journal this buffer.
-		 * However, the commit of this transaction is still
-		 * important to the buffer: the delete that we are now
-		 * processing might obsolete an old log entry, so by
-		 * committing, we can satisfy the buffer's checkpoint.
-		 *
-		 * So, if we have a checkpoint on the buffer, we should
-		 * now refile the buffer on our BJ_Forget list so that
-		 * we know to remove the checkpoint after we commit.
-		 */
-
-		if (jh->b_cp_transaction) {
-			__journal_temp_unlink_buffer(jh);
-			__journal_file_buffer(jh, transaction, BJ_Forget);
-		} else {
-			__journal_unfile_buffer(jh);
-			if (!buffer_jbd(bh)) {
-				spin_unlock(&journal->j_list_lock);
-				jbd_unlock_bh_state(bh);
-				__bforget(bh);
-				goto drop;
-			}
-		}
-	} else if (jh->b_transaction) {
-		J_ASSERT_JH(jh, (jh->b_transaction ==
-				 journal->j_committing_transaction));
-		/* However, if the buffer is still owned by a prior
-		 * (committing) transaction, we can't drop it yet... */
-		JBUFFER_TRACE(jh, "belongs to older transaction");
-		/* ... but we CAN drop it from the new transaction if we
-		 * have also modified it since the original commit. */
-
-		if (jh->b_next_transaction) {
-			J_ASSERT(jh->b_next_transaction == transaction);
-			jh->b_next_transaction = NULL;
-
-			/*
-			 * only drop a reference if this transaction modified
-			 * the buffer
-			 */
-			if (was_modified)
-				drop_reserve = 1;
-		}
-	}
-
-not_jbd:
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-	__brelse(bh);
-drop:
-	if (drop_reserve) {
-		/* no need to reserve log space for this block -bzzz */
-		handle->h_buffer_credits++;
-	}
-	return err;
-}
-
-/**
- * int journal_stop() - complete a transaction
- * @handle: tranaction to complete.
- *
- * All done for a particular handle.
- *
- * There is not much action needed here.  We just return any remaining
- * buffer credits to the transaction and remove the handle.  The only
- * complication is that we need to start a commit operation if the
- * filesystem is marked for synchronous update.
- *
- * journal_stop itself will not usually return an error, but it may
- * do so in unusual circumstances.  In particular, expect it to
- * return -EIO if a journal_abort has been executed since the
- * transaction began.
- */
-int journal_stop(handle_t *handle)
-{
-	transaction_t *transaction = handle->h_transaction;
-	journal_t *journal = transaction->t_journal;
-	int err;
-	pid_t pid;
-
-	J_ASSERT(journal_current_handle() == handle);
-
-	if (is_handle_aborted(handle))
-		err = -EIO;
-	else {
-		J_ASSERT(transaction->t_updates > 0);
-		err = 0;
-	}
-
-	if (--handle->h_ref > 0) {
-		jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
-			  handle->h_ref);
-		return err;
-	}
-
-	jbd_debug(4, "Handle %p going down\n", handle);
-
-	/*
-	 * Implement synchronous transaction batching.  If the handle
-	 * was synchronous, don't force a commit immediately.  Let's
-	 * yield and let another thread piggyback onto this transaction.
-	 * Keep doing that while new threads continue to arrive.
-	 * It doesn't cost much - we're about to run a commit and sleep
-	 * on IO anyway.  Speeds up many-threaded, many-dir operations
-	 * by 30x or more...
-	 *
-	 * We try and optimize the sleep time against what the underlying disk
-	 * can do, instead of having a static sleep time.  This is useful for
-	 * the case where our storage is so fast that it is more optimal to go
-	 * ahead and force a flush and wait for the transaction to be committed
-	 * than it is to wait for an arbitrary amount of time for new writers to
-	 * join the transaction.  We achieve this by measuring how long it takes
-	 * to commit a transaction, and compare it with how long this
-	 * transaction has been running, and if run time < commit time then we
-	 * sleep for the delta and commit.  This greatly helps super fast disks
-	 * that would see slowdowns as more threads started doing fsyncs.
-	 *
-	 * But don't do this if this process was the most recent one to
-	 * perform a synchronous write.  We do this to detect the case where a
-	 * single process is doing a stream of sync writes.  No point in waiting
-	 * for joiners in that case.
-	 */
-	pid = current->pid;
-	if (handle->h_sync && journal->j_last_sync_writer != pid) {
-		u64 commit_time, trans_time;
-
-		journal->j_last_sync_writer = pid;
-
-		spin_lock(&journal->j_state_lock);
-		commit_time = journal->j_average_commit_time;
-		spin_unlock(&journal->j_state_lock);
-
-		trans_time = ktime_to_ns(ktime_sub(ktime_get(),
-						   transaction->t_start_time));
-
-		commit_time = min_t(u64, commit_time,
-				    1000*jiffies_to_usecs(1));
-
-		if (trans_time < commit_time) {
-			ktime_t expires = ktime_add_ns(ktime_get(),
-						       commit_time);
-			set_current_state(TASK_UNINTERRUPTIBLE);
-			schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
-		}
-	}
-
-	current->journal_info = NULL;
-	spin_lock(&journal->j_state_lock);
-	spin_lock(&transaction->t_handle_lock);
-	transaction->t_outstanding_credits -= handle->h_buffer_credits;
-	transaction->t_updates--;
-	if (!transaction->t_updates) {
-		wake_up(&journal->j_wait_updates);
-		if (journal->j_barrier_count)
-			wake_up(&journal->j_wait_transaction_locked);
-	}
-
-	/*
-	 * If the handle is marked SYNC, we need to set another commit
-	 * going!  We also want to force a commit if the current
-	 * transaction is occupying too much of the log, or if the
-	 * transaction is too old now.
-	 */
-	if (handle->h_sync ||
-			transaction->t_outstanding_credits >
-				journal->j_max_transaction_buffers ||
-			time_after_eq(jiffies, transaction->t_expires)) {
-		/* Do this even for aborted journals: an abort still
-		 * completes the commit thread, it just doesn't write
-		 * anything to disk. */
-		tid_t tid = transaction->t_tid;
-
-		spin_unlock(&transaction->t_handle_lock);
-		jbd_debug(2, "transaction too old, requesting commit for "
-					"handle %p\n", handle);
-		/* This is non-blocking */
-		__log_start_commit(journal, transaction->t_tid);
-		spin_unlock(&journal->j_state_lock);
-
-		/*
-		 * Special case: JFS_SYNC synchronous updates require us
-		 * to wait for the commit to complete.
-		 */
-		if (handle->h_sync && !(current->flags & PF_MEMALLOC))
-			err = log_wait_commit(journal, tid);
-	} else {
-		spin_unlock(&transaction->t_handle_lock);
-		spin_unlock(&journal->j_state_lock);
-	}
-
-	lock_map_release(&handle->h_lockdep_map);
-
-	jbd_free_handle(handle);
-	return err;
-}
-
-/**
- * int journal_force_commit() - force any uncommitted transactions
- * @journal: journal to force
- *
- * For synchronous operations: force any uncommitted transactions
- * to disk.  May seem kludgy, but it reuses all the handle batching
- * code in a very simple manner.
- */
-int journal_force_commit(journal_t *journal)
-{
-	handle_t *handle;
-	int ret;
-
-	handle = journal_start(journal, 1);
-	if (IS_ERR(handle)) {
-		ret = PTR_ERR(handle);
-	} else {
-		handle->h_sync = 1;
-		ret = journal_stop(handle);
-	}
-	return ret;
-}
-
-/*
- *
- * List management code snippets: various functions for manipulating the
- * transaction buffer lists.
- *
- */
-
-/*
- * Append a buffer to a transaction list, given the transaction's list head
- * pointer.
- *
- * j_list_lock is held.
- *
- * jbd_lock_bh_state(jh2bh(jh)) is held.
- */
-
-static inline void
-__blist_add_buffer(struct journal_head **list, struct journal_head *jh)
-{
-	if (!*list) {
-		jh->b_tnext = jh->b_tprev = jh;
-		*list = jh;
-	} else {
-		/* Insert at the tail of the list to preserve order */
-		struct journal_head *first = *list, *last = first->b_tprev;
-		jh->b_tprev = last;
-		jh->b_tnext = first;
-		last->b_tnext = first->b_tprev = jh;
-	}
-}
-
-/*
- * Remove a buffer from a transaction list, given the transaction's list
- * head pointer.
- *
- * Called with j_list_lock held, and the journal may not be locked.
- *
- * jbd_lock_bh_state(jh2bh(jh)) is held.
- */
-
-static inline void
-__blist_del_buffer(struct journal_head **list, struct journal_head *jh)
-{
-	if (*list == jh) {
-		*list = jh->b_tnext;
-		if (*list == jh)
-			*list = NULL;
-	}
-	jh->b_tprev->b_tnext = jh->b_tnext;
-	jh->b_tnext->b_tprev = jh->b_tprev;
-}
-
-/*
- * Remove a buffer from the appropriate transaction list.
- *
- * Note that this function can *change* the value of
- * bh->b_transaction->t_sync_datalist, t_buffers, t_forget,
- * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list.  If the caller
- * is holding onto a copy of one of thee pointers, it could go bad.
- * Generally the caller needs to re-read the pointer from the transaction_t.
- *
- * Called under j_list_lock.  The journal may not be locked.
- */
-static void __journal_temp_unlink_buffer(struct journal_head *jh)
-{
-	struct journal_head **list = NULL;
-	transaction_t *transaction;
-	struct buffer_head *bh = jh2bh(jh);
-
-	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
-	transaction = jh->b_transaction;
-	if (transaction)
-		assert_spin_locked(&transaction->t_journal->j_list_lock);
-
-	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
-	if (jh->b_jlist != BJ_None)
-		J_ASSERT_JH(jh, transaction != NULL);
-
-	switch (jh->b_jlist) {
-	case BJ_None:
-		return;
-	case BJ_SyncData:
-		list = &transaction->t_sync_datalist;
-		break;
-	case BJ_Metadata:
-		transaction->t_nr_buffers--;
-		J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
-		list = &transaction->t_buffers;
-		break;
-	case BJ_Forget:
-		list = &transaction->t_forget;
-		break;
-	case BJ_IO:
-		list = &transaction->t_iobuf_list;
-		break;
-	case BJ_Shadow:
-		list = &transaction->t_shadow_list;
-		break;
-	case BJ_LogCtl:
-		list = &transaction->t_log_list;
-		break;
-	case BJ_Reserved:
-		list = &transaction->t_reserved_list;
-		break;
-	case BJ_Locked:
-		list = &transaction->t_locked_list;
-		break;
-	}
-
-	__blist_del_buffer(list, jh);
-	jh->b_jlist = BJ_None;
-	if (test_clear_buffer_jbddirty(bh))
-		mark_buffer_dirty(bh);	/* Expose it to the VM */
-}
-
-/*
- * Remove buffer from all transactions.
- *
- * Called with bh_state lock and j_list_lock
- *
- * jh and bh may be already freed when this function returns.
- */
-void __journal_unfile_buffer(struct journal_head *jh)
-{
-	__journal_temp_unlink_buffer(jh);
-	jh->b_transaction = NULL;
-	journal_put_journal_head(jh);
-}
-
-void journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
-{
-	struct buffer_head *bh = jh2bh(jh);
-
-	/* Get reference so that buffer cannot be freed before we unlock it */
-	get_bh(bh);
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-	__journal_unfile_buffer(jh);
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-	__brelse(bh);
-}
-
-/*
- * Called from journal_try_to_free_buffers().
- *
- * Called under jbd_lock_bh_state(bh)
- */
-static void
-__journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
-{
-	struct journal_head *jh;
-
-	jh = bh2jh(bh);
-
-	if (buffer_locked(bh) || buffer_dirty(bh))
-		goto out;
-
-	if (jh->b_next_transaction != NULL)
-		goto out;
-
-	spin_lock(&journal->j_list_lock);
-	if (jh->b_transaction != NULL && jh->b_cp_transaction == NULL) {
-		if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) {
-			/* A written-back ordered data buffer */
-			JBUFFER_TRACE(jh, "release data");
-			__journal_unfile_buffer(jh);
-		}
-	} else if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
-		/* written-back checkpointed metadata buffer */
-		if (jh->b_jlist == BJ_None) {
-			JBUFFER_TRACE(jh, "remove from checkpoint list");
-			__journal_remove_checkpoint(jh);
-		}
-	}
-	spin_unlock(&journal->j_list_lock);
-out:
-	return;
-}
-
-/**
- * int journal_try_to_free_buffers() - try to free page buffers.
- * @journal: journal for operation
- * @page: to try and free
- * @gfp_mask: we use the mask to detect how hard should we try to release
- * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
- * release the buffers.
- *
- *
- * For all the buffers on this page,
- * if they are fully written out ordered data, move them onto BUF_CLEAN
- * so try_to_free_buffers() can reap them.
- *
- * This function returns non-zero if we wish try_to_free_buffers()
- * to be called. We do this if the page is releasable by try_to_free_buffers().
- * We also do it if the page has locked or dirty buffers and the caller wants
- * us to perform sync or async writeout.
- *
- * This complicates JBD locking somewhat.  We aren't protected by the
- * BKL here.  We wish to remove the buffer from its committing or
- * running transaction's ->t_datalist via __journal_unfile_buffer.
- *
- * This may *change* the value of transaction_t->t_datalist, so anyone
- * who looks at t_datalist needs to lock against this function.
- *
- * Even worse, someone may be doing a journal_dirty_data on this
- * buffer.  So we need to lock against that.  journal_dirty_data()
- * will come out of the lock with the buffer dirty, which makes it
- * ineligible for release here.
- *
- * Who else is affected by this?  hmm...  Really the only contender
- * is do_get_write_access() - it could be looking at the buffer while
- * journal_try_to_free_buffer() is changing its state.  But that
- * cannot happen because we never reallocate freed data as metadata
- * while the data is part of a transaction.  Yes?
- *
- * Return 0 on failure, 1 on success
- */
-int journal_try_to_free_buffers(journal_t *journal,
-				struct page *page, gfp_t gfp_mask)
-{
-	struct buffer_head *head;
-	struct buffer_head *bh;
-	int ret = 0;
-
-	J_ASSERT(PageLocked(page));
-
-	head = page_buffers(page);
-	bh = head;
-	do {
-		struct journal_head *jh;
-
-		/*
-		 * We take our own ref against the journal_head here to avoid
-		 * having to add tons of locking around each instance of
-		 * journal_put_journal_head().
-		 */
-		jh = journal_grab_journal_head(bh);
-		if (!jh)
-			continue;
-
-		jbd_lock_bh_state(bh);
-		__journal_try_to_free_buffer(journal, bh);
-		journal_put_journal_head(jh);
-		jbd_unlock_bh_state(bh);
-		if (buffer_jbd(bh))
-			goto busy;
-	} while ((bh = bh->b_this_page) != head);
-
-	ret = try_to_free_buffers(page);
-
-busy:
-	return ret;
-}
-
-/*
- * This buffer is no longer needed.  If it is on an older transaction's
- * checkpoint list we need to record it on this transaction's forget list
- * to pin this buffer (and hence its checkpointing transaction) down until
- * this transaction commits.  If the buffer isn't on a checkpoint list, we
- * release it.
- * Returns non-zero if JBD no longer has an interest in the buffer.
- *
- * Called under j_list_lock.
- *
- * Called under jbd_lock_bh_state(bh).
- */
-static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
-{
-	int may_free = 1;
-	struct buffer_head *bh = jh2bh(jh);
-
-	if (jh->b_cp_transaction) {
-		JBUFFER_TRACE(jh, "on running+cp transaction");
-		__journal_temp_unlink_buffer(jh);
-		/*
-		 * We don't want to write the buffer anymore, clear the
-		 * bit so that we don't confuse checks in
-		 * __journal_file_buffer
-		 */
-		clear_buffer_dirty(bh);
-		__journal_file_buffer(jh, transaction, BJ_Forget);
-		may_free = 0;
-	} else {
-		JBUFFER_TRACE(jh, "on running transaction");
-		__journal_unfile_buffer(jh);
-	}
-	return may_free;
-}
-
-/*
- * journal_invalidatepage
- *
- * This code is tricky.  It has a number of cases to deal with.
- *
- * There are two invariants which this code relies on:
- *
- * i_size must be updated on disk before we start calling invalidatepage on the
- * data.
- *
- *  This is done in ext3 by defining an ext3_setattr method which
- *  updates i_size before truncate gets going.  By maintaining this
- *  invariant, we can be sure that it is safe to throw away any buffers
- *  attached to the current transaction: once the transaction commits,
- *  we know that the data will not be needed.
- *
- *  Note however that we can *not* throw away data belonging to the
- *  previous, committing transaction!
- *
- * Any disk blocks which *are* part of the previous, committing
- * transaction (and which therefore cannot be discarded immediately) are
- * not going to be reused in the new running transaction
- *
- *  The bitmap committed_data images guarantee this: any block which is
- *  allocated in one transaction and removed in the next will be marked
- *  as in-use in the committed_data bitmap, so cannot be reused until
- *  the next transaction to delete the block commits.  This means that
- *  leaving committing buffers dirty is quite safe: the disk blocks
- *  cannot be reallocated to a different file and so buffer aliasing is
- *  not possible.
- *
- *
- * The above applies mainly to ordered data mode.  In writeback mode we
- * don't make guarantees about the order in which data hits disk --- in
- * particular we don't guarantee that new dirty data is flushed before
- * transaction commit --- so it is always safe just to discard data
- * immediately in that mode.  --sct
- */
-
-/*
- * The journal_unmap_buffer helper function returns zero if the buffer
- * concerned remains pinned as an anonymous buffer belonging to an older
- * transaction.
- *
- * We're outside-transaction here.  Either or both of j_running_transaction
- * and j_committing_transaction may be NULL.
- */
-static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
-				int partial_page)
-{
-	transaction_t *transaction;
-	struct journal_head *jh;
-	int may_free = 1;
-
-	BUFFER_TRACE(bh, "entry");
-
-retry:
-	/*
-	 * It is safe to proceed here without the j_list_lock because the
-	 * buffers cannot be stolen by try_to_free_buffers as long as we are
-	 * holding the page lock. --sct
-	 */
-
-	if (!buffer_jbd(bh))
-		goto zap_buffer_unlocked;
-
-	spin_lock(&journal->j_state_lock);
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-
-	jh = journal_grab_journal_head(bh);
-	if (!jh)
-		goto zap_buffer_no_jh;
-
-	/*
-	 * We cannot remove the buffer from checkpoint lists until the
-	 * transaction adding inode to orphan list (let's call it T)
-	 * is committed.  Otherwise if the transaction changing the
-	 * buffer would be cleaned from the journal before T is
-	 * committed, a crash will cause that the correct contents of
-	 * the buffer will be lost.  On the other hand we have to
-	 * clear the buffer dirty bit at latest at the moment when the
-	 * transaction marking the buffer as freed in the filesystem
-	 * structures is committed because from that moment on the
-	 * block can be reallocated and used by a different page.
-	 * Since the block hasn't been freed yet but the inode has
-	 * already been added to orphan list, it is safe for us to add
-	 * the buffer to BJ_Forget list of the newest transaction.
-	 *
-	 * Also we have to clear buffer_mapped flag of a truncated buffer
-	 * because the buffer_head may be attached to the page straddling
-	 * i_size (can happen only when blocksize < pagesize) and thus the
-	 * buffer_head can be reused when the file is extended again. So we end
-	 * up keeping around invalidated buffers attached to transactions'
-	 * BJ_Forget list just to stop checkpointing code from cleaning up
-	 * the transaction this buffer was modified in.
-	 */
-	transaction = jh->b_transaction;
-	if (transaction == NULL) {
-		/* First case: not on any transaction.  If it
-		 * has no checkpoint link, then we can zap it:
-		 * it's a writeback-mode buffer so we don't care
-		 * if it hits disk safely. */
-		if (!jh->b_cp_transaction) {
-			JBUFFER_TRACE(jh, "not on any transaction: zap");
-			goto zap_buffer;
-		}
-
-		if (!buffer_dirty(bh)) {
-			/* bdflush has written it.  We can drop it now */
-			goto zap_buffer;
-		}
-
-		/* OK, it must be in the journal but still not
-		 * written fully to disk: it's metadata or
-		 * journaled data... */
-
-		if (journal->j_running_transaction) {
-			/* ... and once the current transaction has
-			 * committed, the buffer won't be needed any
-			 * longer. */
-			JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
-			may_free = __dispose_buffer(jh,
-					journal->j_running_transaction);
-			goto zap_buffer;
-		} else {
-			/* There is no currently-running transaction. So the
-			 * orphan record which we wrote for this file must have
-			 * passed into commit.  We must attach this buffer to
-			 * the committing transaction, if it exists. */
-			if (journal->j_committing_transaction) {
-				JBUFFER_TRACE(jh, "give to committing trans");
-				may_free = __dispose_buffer(jh,
-					journal->j_committing_transaction);
-				goto zap_buffer;
-			} else {
-				/* The orphan record's transaction has
-				 * committed.  We can cleanse this buffer */
-				clear_buffer_jbddirty(bh);
-				goto zap_buffer;
-			}
-		}
-	} else if (transaction == journal->j_committing_transaction) {
-		JBUFFER_TRACE(jh, "on committing transaction");
-		if (jh->b_jlist == BJ_Locked) {
-			/*
-			 * The buffer is on the committing transaction's locked
-			 * list.  We have the buffer locked, so I/O has
-			 * completed.  So we can nail the buffer now.
-			 */
-			may_free = __dispose_buffer(jh, transaction);
-			goto zap_buffer;
-		}
-		/*
-		 * The buffer is committing, we simply cannot touch
-		 * it. If the page is straddling i_size we have to wait
-		 * for commit and try again.
-		 */
-		if (partial_page) {
-			tid_t tid = journal->j_committing_transaction->t_tid;
-
-			journal_put_journal_head(jh);
-			spin_unlock(&journal->j_list_lock);
-			jbd_unlock_bh_state(bh);
-			spin_unlock(&journal->j_state_lock);
-			unlock_buffer(bh);
-			log_wait_commit(journal, tid);
-			lock_buffer(bh);
-			goto retry;
-		}
-		/*
-		 * OK, buffer won't be reachable after truncate. We just set
-		 * j_next_transaction to the running transaction (if there is
-		 * one) and mark buffer as freed so that commit code knows it
-		 * should clear dirty bits when it is done with the buffer.
-		 */
-		set_buffer_freed(bh);
-		if (journal->j_running_transaction && buffer_jbddirty(bh))
-			jh->b_next_transaction = journal->j_running_transaction;
-		journal_put_journal_head(jh);
-		spin_unlock(&journal->j_list_lock);
-		jbd_unlock_bh_state(bh);
-		spin_unlock(&journal->j_state_lock);
-		return 0;
-	} else {
-		/* Good, the buffer belongs to the running transaction.
-		 * We are writing our own transaction's data, not any
-		 * previous one's, so it is safe to throw it away
-		 * (remember that we expect the filesystem to have set
-		 * i_size already for this truncate so recovery will not
-		 * expose the disk blocks we are discarding here.) */
-		J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
-		JBUFFER_TRACE(jh, "on running transaction");
-		may_free = __dispose_buffer(jh, transaction);
-	}
-
-zap_buffer:
-	/*
-	 * This is tricky. Although the buffer is truncated, it may be reused
-	 * if blocksize < pagesize and it is attached to the page straddling
-	 * EOF. Since the buffer might have been added to BJ_Forget list of the
-	 * running transaction, journal_get_write_access() won't clear
-	 * b_modified and credit accounting gets confused. So clear b_modified
-	 * here. */
-	jh->b_modified = 0;
-	journal_put_journal_head(jh);
-zap_buffer_no_jh:
-	spin_unlock(&journal->j_list_lock);
-	jbd_unlock_bh_state(bh);
-	spin_unlock(&journal->j_state_lock);
-zap_buffer_unlocked:
-	clear_buffer_dirty(bh);
-	J_ASSERT_BH(bh, !buffer_jbddirty(bh));
-	clear_buffer_mapped(bh);
-	clear_buffer_req(bh);
-	clear_buffer_new(bh);
-	bh->b_bdev = NULL;
-	return may_free;
-}
-
-/**
- * void journal_invalidatepage() - invalidate a journal page
- * @journal: journal to use for flush
- * @page:    page to flush
- * @offset:  offset of the range to invalidate
- * @length:  length of the range to invalidate
- *
- * Reap page buffers containing data in specified range in page.
- */
-void journal_invalidatepage(journal_t *journal,
-		      struct page *page,
-		      unsigned int offset,
-		      unsigned int length)
-{
-	struct buffer_head *head, *bh, *next;
-	unsigned int stop = offset + length;
-	unsigned int curr_off = 0;
-	int partial_page = (offset || length < PAGE_CACHE_SIZE);
-	int may_free = 1;
-
-	if (!PageLocked(page))
-		BUG();
-	if (!page_has_buffers(page))
-		return;
-
-	BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
-
-	/* We will potentially be playing with lists other than just the
-	 * data lists (especially for journaled data mode), so be
-	 * cautious in our locking. */
-
-	head = bh = page_buffers(page);
-	do {
-		unsigned int next_off = curr_off + bh->b_size;
-		next = bh->b_this_page;
-
-		if (next_off > stop)
-			return;
-
-		if (offset <= curr_off) {
-			/* This block is wholly outside the truncation point */
-			lock_buffer(bh);
-			may_free &= journal_unmap_buffer(journal, bh,
-							 partial_page);
-			unlock_buffer(bh);
-		}
-		curr_off = next_off;
-		bh = next;
-
-	} while (bh != head);
-
-	if (!partial_page) {
-		if (may_free && try_to_free_buffers(page))
-			J_ASSERT(!page_has_buffers(page));
-	}
-}
-
-/*
- * File a buffer on the given transaction list.
- */
-void __journal_file_buffer(struct journal_head *jh,
-			transaction_t *transaction, int jlist)
-{
-	struct journal_head **list = NULL;
-	int was_dirty = 0;
-	struct buffer_head *bh = jh2bh(jh);
-
-	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
-	assert_spin_locked(&transaction->t_journal->j_list_lock);
-
-	J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
-	J_ASSERT_JH(jh, jh->b_transaction == transaction ||
-				jh->b_transaction == NULL);
-
-	if (jh->b_transaction && jh->b_jlist == jlist)
-		return;
-
-	if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
-	    jlist == BJ_Shadow || jlist == BJ_Forget) {
-		/*
-		 * For metadata buffers, we track dirty bit in buffer_jbddirty
-		 * instead of buffer_dirty. We should not see a dirty bit set
-		 * here because we clear it in do_get_write_access but e.g.
-		 * tune2fs can modify the sb and set the dirty bit at any time
-		 * so we try to gracefully handle that.
-		 */
-		if (buffer_dirty(bh))
-			warn_dirty_buffer(bh);
-		if (test_clear_buffer_dirty(bh) ||
-		    test_clear_buffer_jbddirty(bh))
-			was_dirty = 1;
-	}
-
-	if (jh->b_transaction)
-		__journal_temp_unlink_buffer(jh);
-	else
-		journal_grab_journal_head(bh);
-	jh->b_transaction = transaction;
-
-	switch (jlist) {
-	case BJ_None:
-		J_ASSERT_JH(jh, !jh->b_committed_data);
-		J_ASSERT_JH(jh, !jh->b_frozen_data);
-		return;
-	case BJ_SyncData:
-		list = &transaction->t_sync_datalist;
-		break;
-	case BJ_Metadata:
-		transaction->t_nr_buffers++;
-		list = &transaction->t_buffers;
-		break;
-	case BJ_Forget:
-		list = &transaction->t_forget;
-		break;
-	case BJ_IO:
-		list = &transaction->t_iobuf_list;
-		break;
-	case BJ_Shadow:
-		list = &transaction->t_shadow_list;
-		break;
-	case BJ_LogCtl:
-		list = &transaction->t_log_list;
-		break;
-	case BJ_Reserved:
-		list = &transaction->t_reserved_list;
-		break;
-	case BJ_Locked:
-		list =  &transaction->t_locked_list;
-		break;
-	}
-
-	__blist_add_buffer(list, jh);
-	jh->b_jlist = jlist;
-
-	if (was_dirty)
-		set_buffer_jbddirty(bh);
-}
-
-void journal_file_buffer(struct journal_head *jh,
-				transaction_t *transaction, int jlist)
-{
-	jbd_lock_bh_state(jh2bh(jh));
-	spin_lock(&transaction->t_journal->j_list_lock);
-	__journal_file_buffer(jh, transaction, jlist);
-	spin_unlock(&transaction->t_journal->j_list_lock);
-	jbd_unlock_bh_state(jh2bh(jh));
-}
-
-/*
- * Remove a buffer from its current buffer list in preparation for
- * dropping it from its current transaction entirely.  If the buffer has
- * already started to be used by a subsequent transaction, refile the
- * buffer on that transaction's metadata list.
- *
- * Called under j_list_lock
- * Called under jbd_lock_bh_state(jh2bh(jh))
- *
- * jh and bh may be already free when this function returns
- */
-void __journal_refile_buffer(struct journal_head *jh)
-{
-	int was_dirty, jlist;
-	struct buffer_head *bh = jh2bh(jh);
-
-	J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
-	if (jh->b_transaction)
-		assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
-
-	/* If the buffer is now unused, just drop it. */
-	if (jh->b_next_transaction == NULL) {
-		__journal_unfile_buffer(jh);
-		return;
-	}
-
-	/*
-	 * It has been modified by a later transaction: add it to the new
-	 * transaction's metadata list.
-	 */
-
-	was_dirty = test_clear_buffer_jbddirty(bh);
-	__journal_temp_unlink_buffer(jh);
-	/*
-	 * We set b_transaction here because b_next_transaction will inherit
-	 * our jh reference and thus __journal_file_buffer() must not take a
-	 * new one.
-	 */
-	jh->b_transaction = jh->b_next_transaction;
-	jh->b_next_transaction = NULL;
-	if (buffer_freed(bh))
-		jlist = BJ_Forget;
-	else if (jh->b_modified)
-		jlist = BJ_Metadata;
-	else
-		jlist = BJ_Reserved;
-	__journal_file_buffer(jh, jh->b_transaction, jlist);
-	J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
-
-	if (was_dirty)
-		set_buffer_jbddirty(bh);
-}
-
-/*
- * __journal_refile_buffer() with necessary locking added. We take our bh
- * reference so that we can safely unlock bh.
- *
- * The jh and bh may be freed by this call.
- */
-void journal_refile_buffer(journal_t *journal, struct journal_head *jh)
-{
-	struct buffer_head *bh = jh2bh(jh);
-
-	/* Get reference so that buffer cannot be freed before we unlock it */
-	get_bh(bh);
-	jbd_lock_bh_state(bh);
-	spin_lock(&journal->j_list_lock);
-	__journal_refile_buffer(jh);
-	jbd_unlock_bh_state(bh);
-	spin_unlock(&journal->j_list_lock);
-	__brelse(bh);
-}
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 4227dc4..8c44654 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -417,12 +417,12 @@
  * journal_clean_one_cp_list
  *
  * Find all the written-back checkpoint buffers in the given list and
- * release them.
+ * release them. If 'destroy' is set, clean all buffers unconditionally.
  *
  * Called with j_list_lock held.
  * Returns 1 if we freed the transaction, 0 otherwise.
  */
-static int journal_clean_one_cp_list(struct journal_head *jh)
+static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
 {
 	struct journal_head *last_jh;
 	struct journal_head *next_jh = jh;
@@ -436,7 +436,10 @@
 	do {
 		jh = next_jh;
 		next_jh = jh->b_cpnext;
-		ret = __try_to_free_cp_buf(jh);
+		if (!destroy)
+			ret = __try_to_free_cp_buf(jh);
+		else
+			ret = __jbd2_journal_remove_checkpoint(jh) + 1;
 		if (!ret)
 			return freed;
 		if (ret == 2)
@@ -459,10 +462,11 @@
  * journal_clean_checkpoint_list
  *
  * Find all the written-back checkpoint buffers in the journal and release them.
+ * If 'destroy' is set, release all buffers unconditionally.
  *
  * Called with j_list_lock held.
  */
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
 {
 	transaction_t *transaction, *last_transaction, *next_transaction;
 	int ret;
@@ -476,7 +480,8 @@
 	do {
 		transaction = next_transaction;
 		next_transaction = transaction->t_cpnext;
-		ret = journal_clean_one_cp_list(transaction->t_checkpoint_list);
+		ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
+						destroy);
 		/*
 		 * This function only frees up some memory if possible so we
 		 * dont have an obligation to finish processing. Bail out if
@@ -492,7 +497,7 @@
 		 * we can possibly see not yet submitted buffers on io_list
 		 */
 		ret = journal_clean_one_cp_list(transaction->
-				t_checkpoint_io_list);
+				t_checkpoint_io_list, destroy);
 		if (need_resched())
 			return;
 		/*
@@ -506,6 +511,28 @@
 }
 
 /*
+ * Remove buffers from all checkpoint lists as journal is aborted and we just
+ * need to free memory
+ */
+void jbd2_journal_destroy_checkpoint(journal_t *journal)
+{
+	/*
+	 * We loop because __jbd2_journal_clean_checkpoint_list() may abort
+	 * early due to a need of rescheduling.
+	 */
+	while (1) {
+		spin_lock(&journal->j_list_lock);
+		if (!journal->j_checkpoint_transactions) {
+			spin_unlock(&journal->j_list_lock);
+			break;
+		}
+		__jbd2_journal_clean_checkpoint_list(journal, true);
+		spin_unlock(&journal->j_list_lock);
+		cond_resched();
+	}
+}
+
+/*
  * journal_remove_checkpoint: called after a buffer has been committed
  * to disk (either by being write-back flushed to disk, or being
  * committed to the log).
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index b73e021..362e5f6 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -510,7 +510,7 @@
 	 * frees some memory
 	 */
 	spin_lock(&journal->j_list_lock);
-	__jbd2_journal_clean_checkpoint_list(journal);
+	__jbd2_journal_clean_checkpoint_list(journal, false);
 	spin_unlock(&journal->j_list_lock);
 
 	jbd_debug(3, "JBD2: commit phase 1\n");
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 4ff3fad..8270fe9 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1456,7 +1456,7 @@
 	sb->s_errno    = cpu_to_be32(journal->j_errno);
 	read_unlock(&journal->j_state_lock);
 
-	jbd2_write_superblock(journal, WRITE_SYNC);
+	jbd2_write_superblock(journal, WRITE_FUA);
 }
 EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
 
@@ -1693,8 +1693,17 @@
 	while (journal->j_checkpoint_transactions != NULL) {
 		spin_unlock(&journal->j_list_lock);
 		mutex_lock(&journal->j_checkpoint_mutex);
-		jbd2_log_do_checkpoint(journal);
+		err = jbd2_log_do_checkpoint(journal);
 		mutex_unlock(&journal->j_checkpoint_mutex);
+		/*
+		 * If checkpointing failed, just free the buffers to avoid
+		 * looping forever
+		 */
+		if (err) {
+			jbd2_journal_destroy_checkpoint(journal);
+			spin_lock(&journal->j_list_lock);
+			break;
+		}
 		spin_lock(&journal->j_list_lock);
 	}
 
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index f3d0617..6b8338e 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -204,6 +204,20 @@
 		 * attach this handle to a new transaction.
 		 */
 		atomic_sub(total, &t->t_outstanding_credits);
+
+		/*
+		 * Is the number of reserved credits in the current transaction too
+		 * big to fit this handle? Wait until reserved credits are freed.
+		 */
+		if (atomic_read(&journal->j_reserved_credits) + total >
+		    journal->j_max_transaction_buffers) {
+			read_unlock(&journal->j_state_lock);
+			wait_event(journal->j_wait_reserved,
+				   atomic_read(&journal->j_reserved_credits) + total <=
+				   journal->j_max_transaction_buffers);
+			return 1;
+		}
+
 		wait_transaction_locked(journal);
 		return 1;
 	}
@@ -262,20 +276,24 @@
 	int		rsv_blocks = 0;
 	unsigned long ts = jiffies;
 
-	/*
-	 * 1/2 of transaction can be reserved so we can practically handle
-	 * only 1/2 of maximum transaction size per operation
-	 */
-	if (WARN_ON(blocks > journal->j_max_transaction_buffers / 2)) {
-		printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n",
-		       current->comm, blocks,
-		       journal->j_max_transaction_buffers / 2);
-		return -ENOSPC;
-	}
-
 	if (handle->h_rsv_handle)
 		rsv_blocks = handle->h_rsv_handle->h_buffer_credits;
 
+	/*
+	 * Limit the number of reserved credits to 1/2 of maximum transaction
+	 * size and limit the number of total credits to not exceed maximum
+	 * transaction size per operation.
+	 */
+	if ((rsv_blocks > journal->j_max_transaction_buffers / 2) ||
+	    (rsv_blocks + blocks > journal->j_max_transaction_buffers)) {
+		printk(KERN_ERR "JBD2: %s wants too many credits "
+		       "credits:%d rsv_credits:%d max:%d\n",
+		       current->comm, blocks, rsv_blocks,
+		       journal->j_max_transaction_buffers);
+		WARN_ON(1);
+		return -ENOSPC;
+	}
+
 alloc_transaction:
 	if (!journal->j_running_transaction) {
 		/*
@@ -1280,8 +1298,6 @@
 	triggers->t_abort(triggers, jh2bh(jh));
 }
 
-
-
 /**
  * int jbd2_journal_dirty_metadata() -  mark a buffer as containing dirty metadata
  * @handle: transaction to add buffer to.
@@ -1314,12 +1330,41 @@
 
 	if (is_handle_aborted(handle))
 		return -EROFS;
-	journal = transaction->t_journal;
-	jh = jbd2_journal_grab_journal_head(bh);
-	if (!jh) {
+	if (!buffer_jbd(bh)) {
 		ret = -EUCLEAN;
 		goto out;
 	}
+	/*
+	 * We don't grab jh reference here since the buffer must be part
+	 * of the running transaction.
+	 */
+	jh = bh2jh(bh);
+	/*
+	 * This and the following assertions are unreliable since we may see jh
+	 * in inconsistent state unless we grab bh_state lock. But this is
+	 * crucial to catch bugs so let's do a reliable check until the
+	 * lockless handling is fully proven.
+	 */
+	if (jh->b_transaction != transaction &&
+	    jh->b_next_transaction != transaction) {
+		jbd_lock_bh_state(bh);
+		J_ASSERT_JH(jh, jh->b_transaction == transaction ||
+				jh->b_next_transaction == transaction);
+		jbd_unlock_bh_state(bh);
+	}
+	if (jh->b_modified == 1) {
+		/* If it's in our transaction it must be in BJ_Metadata list. */
+		if (jh->b_transaction == transaction &&
+		    jh->b_jlist != BJ_Metadata) {
+			jbd_lock_bh_state(bh);
+			J_ASSERT_JH(jh, jh->b_transaction != transaction ||
+					jh->b_jlist == BJ_Metadata);
+			jbd_unlock_bh_state(bh);
+		}
+		goto out;
+	}
+
+	journal = transaction->t_journal;
 	jbd_debug(5, "journal_head %p\n", jh);
 	JBUFFER_TRACE(jh, "entry");
 
@@ -1410,7 +1455,6 @@
 	spin_unlock(&journal->j_list_lock);
 out_unlock_bh:
 	jbd_unlock_bh_state(bh);
-	jbd2_journal_put_journal_head(jh);
 out:
 	JBUFFER_TRACE(jh, "exit");
 	return ret;
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index b9dc23c..0e026a7 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -107,8 +107,11 @@
 	if (rc)
 		return rc;
 
-	if (is_quota_modification(inode, iattr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, iattr)) {
+		rc = dquot_initialize(inode);
+		if (rc)
+			return rc;
+	}
 	if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) ||
 	    (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) {
 		rc = dquot_transfer(inode, iattr);
diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c
index 6b0f816..cf7936f 100644
--- a/fs/jfs/jfs_inode.c
+++ b/fs/jfs/jfs_inode.c
@@ -109,7 +109,9 @@
 	/*
 	 * Allocate inode to quota.
 	 */
-	dquot_initialize(inode);
+	rc = dquot_initialize(inode);
+	if (rc)
+		goto fail_drop;
 	rc = dquot_alloc_inode(inode);
 	if (rc)
 		goto fail_drop;
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index bc462dc..a69bdf2 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1999,19 +1999,16 @@
 
 	bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
 	bio->bi_bdev = log->bdev;
-	bio->bi_io_vec[0].bv_page = bp->l_page;
-	bio->bi_io_vec[0].bv_len = LOGPSIZE;
-	bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
-	bio->bi_vcnt = 1;
-	bio->bi_iter.bi_size = LOGPSIZE;
+	bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
+	BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
 
 	bio->bi_end_io = lbmIODone;
 	bio->bi_private = bp;
 	/*check if journaling to disk has been disabled*/
 	if (log->no_integrity) {
 		bio->bi_iter.bi_size = 0;
-		lbmIODone(bio, 0);
+		lbmIODone(bio);
 	} else {
 		submit_bio(READ_SYNC, bio);
 	}
@@ -2145,12 +2142,9 @@
 	bio = bio_alloc(GFP_NOFS, 1);
 	bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
 	bio->bi_bdev = log->bdev;
-	bio->bi_io_vec[0].bv_page = bp->l_page;
-	bio->bi_io_vec[0].bv_len = LOGPSIZE;
-	bio->bi_io_vec[0].bv_offset = bp->l_offset;
 
-	bio->bi_vcnt = 1;
-	bio->bi_iter.bi_size = LOGPSIZE;
+	bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
+	BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
 
 	bio->bi_end_io = lbmIODone;
 	bio->bi_private = bp;
@@ -2158,7 +2152,7 @@
 	/* check if journaling to disk has been disabled */
 	if (log->no_integrity) {
 		bio->bi_iter.bi_size = 0;
-		lbmIODone(bio, 0);
+		lbmIODone(bio);
 	} else {
 		submit_bio(WRITE_SYNC, bio);
 		INCREMENT(lmStat.submitted);
@@ -2196,7 +2190,7 @@
  *
  * executed at INTIODONE level
  */
-static void lbmIODone(struct bio *bio, int error)
+static void lbmIODone(struct bio *bio)
 {
 	struct lbuf *bp = bio->bi_private;
 	struct lbuf *nextbp, *tail;
@@ -2212,7 +2206,7 @@
 
 	bp->l_flag |= lbmDONE;
 
-	if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+	if (bio->bi_error) {
 		bp->l_flag |= lbmERROR;
 
 		jfs_err("lbmIODone: I/O error in JFS log");
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 16a0922..a3eb316 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -276,11 +276,11 @@
 	unlock_page(page);
 }
 
-static void metapage_read_end_io(struct bio *bio, int err)
+static void metapage_read_end_io(struct bio *bio)
 {
 	struct page *page = bio->bi_private;
 
-	if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+	if (bio->bi_error) {
 		printk(KERN_ERR "metapage_read_end_io: I/O error\n");
 		SetPageError(page);
 	}
@@ -331,13 +331,13 @@
 	end_page_writeback(page);
 }
 
-static void metapage_write_end_io(struct bio *bio, int err)
+static void metapage_write_end_io(struct bio *bio)
 {
 	struct page *page = bio->bi_private;
 
 	BUG_ON(!PagePrivate(page));
 
-	if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+	if (bio->bi_error) {
 		printk(KERN_ERR "metapage_write_end_io: I/O error\n");
 		SetPageError(page);
 	}
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index a5ac97b..35976bd 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -86,7 +86,9 @@
 
 	jfs_info("jfs_create: dip:0x%p name:%pd", dip, dentry);
 
-	dquot_initialize(dip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out1;
 
 	/*
 	 * search parent directory for entry/freespace
@@ -218,7 +220,9 @@
 
 	jfs_info("jfs_mkdir: dip:0x%p name:%pd", dip, dentry);
 
-	dquot_initialize(dip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out1;
 
 	/*
 	 * search parent directory for entry/freespace
@@ -355,8 +359,12 @@
 	jfs_info("jfs_rmdir: dip:0x%p name:%pd", dip, dentry);
 
 	/* Init inode for quota operations. */
-	dquot_initialize(dip);
-	dquot_initialize(ip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out;
+	rc = dquot_initialize(ip);
+	if (rc)
+		goto out;
 
 	/* directory must be empty to be removed */
 	if (!dtEmpty(ip)) {
@@ -483,8 +491,12 @@
 	jfs_info("jfs_unlink: dip:0x%p name:%pd", dip, dentry);
 
 	/* Init inode for quota operations. */
-	dquot_initialize(dip);
-	dquot_initialize(ip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out;
+	rc = dquot_initialize(ip);
+	if (rc)
+		goto out;
 
 	if ((rc = get_UCSname(&dname, dentry)))
 		goto out;
@@ -799,7 +811,9 @@
 
 	jfs_info("jfs_link: %pd %pd", old_dentry, dentry);
 
-	dquot_initialize(dir);
+	rc = dquot_initialize(dir);
+	if (rc)
+		goto out;
 
 	tid = txBegin(ip->i_sb, 0);
 
@@ -810,7 +824,7 @@
 	 * scan parent directory for entry/freespace
 	 */
 	if ((rc = get_UCSname(&dname, dentry)))
-		goto out;
+		goto out_tx;
 
 	if ((rc = dtSearch(dir, &dname, &ino, &btstack, JFS_CREATE)))
 		goto free_dname;
@@ -842,12 +856,13 @@
       free_dname:
 	free_UCSname(&dname);
 
-      out:
+      out_tx:
 	txEnd(tid);
 
 	mutex_unlock(&JFS_IP(ip)->commit_mutex);
 	mutex_unlock(&JFS_IP(dir)->commit_mutex);
 
+      out:
 	jfs_info("jfs_link: rc:%d", rc);
 	return rc;
 }
@@ -891,7 +906,9 @@
 
 	jfs_info("jfs_symlink: dip:0x%p name:%s", dip, name);
 
-	dquot_initialize(dip);
+	rc = dquot_initialize(dip);
+	if (rc)
+		goto out1;
 
 	ssize = strlen(name) + 1;
 
@@ -1082,8 +1099,12 @@
 
 	jfs_info("jfs_rename: %pd %pd", old_dentry, new_dentry);
 
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
+	rc = dquot_initialize(old_dir);
+	if (rc)
+		goto out1;
+	rc = dquot_initialize(new_dir);
+	if (rc)
+		goto out1;
 
 	old_ip = d_inode(old_dentry);
 	new_ip = d_inode(new_dentry);
@@ -1130,7 +1151,9 @@
 	} else if (new_ip) {
 		IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL);
 		/* Init inode for quota operations. */
-		dquot_initialize(new_ip);
+		rc = dquot_initialize(new_ip);
+		if (rc)
+			goto out_unlock;
 	}
 
 	/*
@@ -1318,6 +1341,7 @@
 
 		clear_cflag(COMMIT_Stale, old_dir);
 	}
+      out_unlock:
 	if (new_ip && !S_ISDIR(new_ip->i_mode))
 		IWRITE_UNLOCK(new_ip);
       out3:
@@ -1353,7 +1377,9 @@
 
 	jfs_info("jfs_mknod: %pd", dentry);
 
-	dquot_initialize(dir);
+	rc = dquot_initialize(dir);
+	if (rc)
+		goto out;
 
 	if ((rc = get_UCSname(&dname, dentry)))
 		goto out;
diff --git a/fs/libfs.c b/fs/libfs.c
index 102edfd..c7cbfb0 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -1185,7 +1185,7 @@
 	inode->i_uid = GLOBAL_ROOT_UID;
 	inode->i_gid = GLOBAL_ROOT_GID;
 	inode->i_rdev = 0;
-	inode->i_size = 2;
+	inode->i_size = 0;
 	inode->i_blkbits = PAGE_SHIFT;
 	inode->i_blocks = 0;
 
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 76279e1..a7fdbd8 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -53,16 +53,14 @@
 
 static DECLARE_WAIT_QUEUE_HEAD(wq);
 
-static void writeseg_end_io(struct bio *bio, int err)
+static void writeseg_end_io(struct bio *bio)
 {
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct bio_vec *bvec;
 	int i;
 	struct super_block *sb = bio->bi_private;
 	struct logfs_super *super = logfs_super(sb);
 
-	BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
-	BUG_ON(err);
+	BUG_ON(bio->bi_error); /* FIXME: Retry io or write elsewhere */
 
 	bio_for_each_segment_all(bvec, bio, i) {
 		end_page_writeback(bvec->bv_page);
@@ -83,7 +81,7 @@
 	unsigned int max_pages;
 	int i;
 
-	max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
+	max_pages = min(nr_pages, BIO_MAX_PAGES);
 
 	bio = bio_alloc(GFP_NOFS, max_pages);
 	BUG_ON(!bio);
@@ -153,14 +151,12 @@
 }
 
 
-static void erase_end_io(struct bio *bio, int err) 
+static void erase_end_io(struct bio *bio)
 { 
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 
 	struct super_block *sb = bio->bi_private; 
 	struct logfs_super *super = logfs_super(sb); 
 
-	BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */ 
-	BUG_ON(err); 
+	BUG_ON(bio->bi_error); /* FIXME: Retry io or write elsewhere */ 
 	BUG_ON(bio->bi_vcnt == 0); 
 	bio_put(bio); 
 	if (atomic_dec_and_test(&super->s_pending_writes))
@@ -175,7 +171,7 @@
 	unsigned int max_pages;
 	int i;
 
-	max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev));
+	max_pages = min(nr_pages, BIO_MAX_PAGES);
 
 	bio = bio_alloc(GFP_NOFS, max_pages);
 	BUG_ON(!bio);
diff --git a/fs/mpage.c b/fs/mpage.c
index ca0244b..778a4dd 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -42,14 +42,14 @@
  * status of that page is hard.  See end_buffer_async_read() for the details.
  * There is no point in duplicating all that complexity.
  */
-static void mpage_end_io(struct bio *bio, int err)
+static void mpage_end_io(struct bio *bio)
 {
 	struct bio_vec *bv;
 	int i;
 
 	bio_for_each_segment_all(bv, bio, i) {
 		struct page *page = bv->bv_page;
-		page_endio(page, bio_data_dir(bio), err);
+		page_endio(page, bio_data_dir(bio), bio->bi_error);
 	}
 
 	bio_put(bio);
@@ -277,7 +277,7 @@
 				goto out;
 		}
 		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
-			  	min_t(int, nr_pages, bio_get_nr_vecs(bdev)),
+				min_t(int, nr_pages, BIO_MAX_PAGES),
 				GFP_KERNEL);
 		if (bio == NULL)
 			goto confused;
@@ -602,7 +602,7 @@
 			}
 		}
 		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
-				bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH);
+				BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
 		if (bio == NULL)
 			goto confused;
 
diff --git a/fs/namespace.c b/fs/namespace.c
index 2b8aa15..0570729 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3218,6 +3218,8 @@
 	down_read(&namespace_sem);
 	list_for_each_entry(mnt, &ns->list, mnt_list) {
 		struct mount *child;
+		int mnt_flags;
+
 		if (mnt->mnt.mnt_sb->s_type != type)
 			continue;
 
@@ -3227,17 +3229,30 @@
 		if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
 			continue;
 
+		/* Read the mount flags and filter out flags that
+		 * may safely be ignored.
+		 */
+		mnt_flags = mnt->mnt.mnt_flags;
+		if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
+			mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
+
 		/* Verify the mount flags are equal to or more permissive
 		 * than the proposed new mount.
 		 */
-		if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
+		if ((mnt_flags & MNT_LOCK_READONLY) &&
 		    !(new_flags & MNT_READONLY))
 			continue;
-		if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
+		if ((mnt_flags & MNT_LOCK_NODEV) &&
 		    !(new_flags & MNT_NODEV))
 			continue;
-		if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
-		    ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
+		if ((mnt_flags & MNT_LOCK_NOSUID) &&
+		    !(new_flags & MNT_NOSUID))
+			continue;
+		if ((mnt_flags & MNT_LOCK_NOEXEC) &&
+		    !(new_flags & MNT_NOEXEC))
+			continue;
+		if ((mnt_flags & MNT_LOCK_ATIME) &&
+		    ((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
 			continue;
 
 		/* This mount is not fully visible if there are any
@@ -3247,16 +3262,18 @@
 		list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
 			struct inode *inode = child->mnt_mountpoint->d_inode;
 			/* Only worry about locked mounts */
-			if (!(mnt->mnt.mnt_flags & MNT_LOCKED))
+			if (!(mnt_flags & MNT_LOCKED))
 				continue;
 			/* Is the directory permanetly empty? */
 			if (!is_empty_dir_inode(inode))
 				goto next;
 		}
 		/* Preserve the locked attributes */
-		*new_mnt_flags |= mnt->mnt.mnt_flags & (MNT_LOCK_READONLY | \
-							MNT_LOCK_NODEV    | \
-							MNT_LOCK_ATIME);
+		*new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
+					       MNT_LOCK_NODEV    | \
+					       MNT_LOCK_NOSUID   | \
+					       MNT_LOCK_NOEXEC   | \
+					       MNT_LOCK_ATIME);
 		visible = true;
 		goto found;
 	next:	;
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index d2554fe..9cd4eb3 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -116,7 +116,7 @@
 
 static struct bio *
 bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
-		void (*end_io)(struct bio *, int err), struct parallel_io *par)
+		bio_end_io_t end_io, struct parallel_io *par)
 {
 	struct bio *bio;
 
@@ -139,8 +139,7 @@
 static struct bio *
 do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
 		struct page *page, struct pnfs_block_dev_map *map,
-		struct pnfs_block_extent *be,
-		void (*end_io)(struct bio *, int err),
+		struct pnfs_block_extent *be, bio_end_io_t end_io,
 		struct parallel_io *par, unsigned int offset, int *len)
 {
 	struct pnfs_block_dev *dev =
@@ -183,11 +182,11 @@
 	return bio;
 }
 
-static void bl_end_io_read(struct bio *bio, int err)
+static void bl_end_io_read(struct bio *bio)
 {
 	struct parallel_io *par = bio->bi_private;
 
-	if (err) {
+	if (bio->bi_error) {
 		struct nfs_pgio_header *header = par->data;
 
 		if (!header->pnfs_error)
@@ -316,13 +315,12 @@
 	return PNFS_ATTEMPTED;
 }
 
-static void bl_end_io_write(struct bio *bio, int err)
+static void bl_end_io_write(struct bio *bio)
 {
 	struct parallel_io *par = bio->bi_private;
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct nfs_pgio_header *header = par->data;
 
-	if (!uptodate) {
+	if (bio->bi_error) {
 		if (!header->pnfs_error)
 			header->pnfs_error = -EIO;
 		pnfs_set_lo_fail(header->lseg);
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 42468e5..f63620c 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -338,12 +338,11 @@
 /*
  * BIO operations
  */
-static void nilfs_end_bio_write(struct bio *bio, int err)
+static void nilfs_end_bio_write(struct bio *bio)
 {
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct nilfs_segment_buffer *segbuf = bio->bi_private;
 
-	if (!uptodate)
+	if (bio->bi_error)
 		atomic_inc(&segbuf->sb_err);
 
 	bio_put(bio);
@@ -415,7 +414,7 @@
 {
 	wi->bio = NULL;
 	wi->rest_blocks = segbuf->sb_sum.nblocks;
-	wi->max_pages = bio_get_nr_vecs(wi->nilfs->ns_bdev);
+	wi->max_pages = BIO_MAX_PAGES;
 	wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
 	wi->start = wi->end = 0;
 	wi->blocknr = segbuf->sb_pseg_start;
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 99521e7..e4905fb 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -4,6 +4,7 @@
 #include <linux/proc_ns.h>
 #include <linux/magic.h>
 #include <linux/ktime.h>
+#include <linux/seq_file.h>
 
 static struct vfsmount *nsfs_mnt;
 
@@ -136,9 +137,18 @@
 	return ERR_PTR(-EINVAL);
 }
 
+static int nsfs_show_path(struct seq_file *seq, struct dentry *dentry)
+{
+	struct inode *inode = d_inode(dentry);
+	const struct proc_ns_operations *ns_ops = dentry->d_fsdata;
+
+	return seq_printf(seq, "%s:[%lu]", ns_ops->name, inode->i_ino);
+}
+
 static const struct super_operations nsfs_ops = {
 	.statfs = simple_statfs,
 	.evict_inode = nsfs_evict,
+	.show_path = nsfs_show_path,
 };
 static struct dentry *nsfs_mount(struct file_system_type *fs_type,
 			int flags, const char *dev_name, void *data)
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index 9e1e112..c1128bc 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -543,7 +543,7 @@
 			return -EROFS;
 		}
 		if (!ntfs_stamp_usnjrnl(vol)) {
-			ntfs_error(sb, "Failed to stamp transation log "
+			ntfs_error(sb, "Failed to stamp transaction log "
 					"($UsnJrnl)%s", es);
 			NVolSetErrors(vol);
 			return -EROFS;
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 16eff45..140de3c 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -372,14 +372,13 @@
 	wait_for_completion(&wc->wc_io_complete);
 }
 
-static void o2hb_bio_end_io(struct bio *bio,
-			   int error)
+static void o2hb_bio_end_io(struct bio *bio)
 {
 	struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
 
-	if (error) {
-		mlog(ML_ERROR, "IO Error %d\n", error);
-		wc->wc_error = error;
+	if (bio->bi_error) {
+		mlog(ML_ERROR, "IO Error %d\n", bio->bi_error);
+		wc->wc_error = bio->bi_error;
 	}
 
 	o2hb_bio_wait_dec(wc, 1);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 719f7f4..7210583 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -105,8 +105,11 @@
 			      file->f_path.dentry->d_name.len,
 			      file->f_path.dentry->d_name.name, mode);
 
-	if (file->f_mode & FMODE_WRITE)
-		dquot_initialize(inode);
+	if (file->f_mode & FMODE_WRITE) {
+		status = dquot_initialize(inode);
+		if (status)
+			goto leave;
+	}
 
 	spin_lock(&oi->ip_lock);
 
@@ -1155,8 +1158,11 @@
 	if (status)
 		return status;
 
-	if (is_quota_modification(inode, attr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, attr)) {
+		status = dquot_initialize(inode);
+		if (status)
+			return status;
+	}
 	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
 	if (size_change) {
 		status = ocfs2_rw_lock(inode, 1);
@@ -1209,8 +1215,8 @@
 		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
 		    OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
 			transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
-			if (!transfer_to[USRQUOTA]) {
-				status = -ESRCH;
+			if (IS_ERR(transfer_to[USRQUOTA])) {
+				status = PTR_ERR(transfer_to[USRQUOTA]);
 				goto bail_unlock;
 			}
 		}
@@ -1218,8 +1224,8 @@
 		    && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
 		    OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
 			transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
-			if (!transfer_to[GRPQUOTA]) {
-				status = -ESRCH;
+			if (IS_ERR(transfer_to[GRPQUOTA])) {
+				status = PTR_ERR(transfer_to[GRPQUOTA]);
 				goto bail_unlock;
 			}
 		}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 6e6abb9..948681e 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -200,11 +200,12 @@
 static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
 {
 	struct inode *inode;
+	int status;
 
 	inode = new_inode(dir->i_sb);
 	if (!inode) {
 		mlog(ML_ERROR, "new_inode failed!\n");
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	/* populate as many fields early on as possible - many of
@@ -213,7 +214,10 @@
 	if (S_ISDIR(mode))
 		set_nlink(inode, 2);
 	inode_init_owner(inode, dir, mode);
-	dquot_initialize(inode);
+	status = dquot_initialize(inode);
+	if (status)
+		return ERR_PTR(status);
+
 	return inode;
 }
 
@@ -264,7 +268,11 @@
 			  (unsigned long long)OCFS2_I(dir)->ip_blkno,
 			  (unsigned long)dev, mode);
 
-	dquot_initialize(dir);
+	status = dquot_initialize(dir);
+	if (status) {
+		mlog_errno(status);
+		return status;
+	}
 
 	/* get our super block */
 	osb = OCFS2_SB(dir->i_sb);
@@ -311,8 +319,9 @@
 	}
 
 	inode = ocfs2_get_init_inode(dir, mode);
-	if (!inode) {
-		status = -ENOMEM;
+	if (IS_ERR(inode)) {
+		status = PTR_ERR(inode);
+		inode = NULL;
 		mlog_errno(status);
 		goto leave;
 	}
@@ -708,7 +717,11 @@
 	if (S_ISDIR(inode->i_mode))
 		return -EPERM;
 
-	dquot_initialize(dir);
+	err = dquot_initialize(dir);
+	if (err) {
+		mlog_errno(err);
+		return err;
+	}
 
 	err = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
 			&parent_fe_bh, dir, 0);
@@ -896,7 +909,11 @@
 			   (unsigned long long)OCFS2_I(dir)->ip_blkno,
 			   (unsigned long long)OCFS2_I(inode)->ip_blkno);
 
-	dquot_initialize(dir);
+	status = dquot_initialize(dir);
+	if (status) {
+		mlog_errno(status);
+		return status;
+	}
 
 	BUG_ON(d_inode(dentry->d_parent) != dir);
 
@@ -1230,8 +1247,16 @@
 			   old_dentry->d_name.len, old_dentry->d_name.name,
 			   new_dentry->d_name.len, new_dentry->d_name.name);
 
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
+	status = dquot_initialize(old_dir);
+	if (status) {
+		mlog_errno(status);
+		goto bail;
+	}
+	status = dquot_initialize(new_dir);
+	if (status) {
+		mlog_errno(status);
+		goto bail;
+	}
 
 	osb = OCFS2_SB(old_dir->i_sb);
 
@@ -1786,7 +1811,11 @@
 	trace_ocfs2_symlink_begin(dir, dentry, symname,
 				  dentry->d_name.len, dentry->d_name.name);
 
-	dquot_initialize(dir);
+	status = dquot_initialize(dir);
+	if (status) {
+		mlog_errno(status);
+		goto bail;
+	}
 
 	sb = dir->i_sb;
 	osb = OCFS2_SB(sb);
@@ -1831,8 +1860,9 @@
 	}
 
 	inode = ocfs2_get_init_inode(dir, S_IFLNK | S_IRWXUGO);
-	if (!inode) {
-		status = -ENOMEM;
+	if (IS_ERR(inode)) {
+		status = PTR_ERR(inode);
+		inode = NULL;
 		mlog_errno(status);
 		goto bail;
 	}
@@ -2485,8 +2515,9 @@
 	}
 
 	inode = ocfs2_get_init_inode(dir, mode);
-	if (!inode) {
-		status = -ENOMEM;
+	if (IS_ERR(inode)) {
+		status = PTR_ERR(inode);
+		inode = NULL;
 		mlog_errno(status);
 		goto leave;
 	}
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index db64ce2..540ab5b 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -168,7 +168,7 @@
 /* Refcount tree support */
 #define OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE	0x1000
 
-/* Discontigous block groups */
+/* Discontiguous block groups */
 #define OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG	0x2000
 
 /*
@@ -939,7 +939,7 @@
 			/*
 			 * Block groups may be discontiguous when
 			 * OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG is set.
-			 * The extents of a discontigous block group are
+			 * The extents of a discontiguous block group are
 			 * stored in bg_list.  It is a flat list.
 			 * l_tree_depth must always be zero.  A
 			 * discontiguous group is signified by a non-zero
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 3d0b63d..bb07004 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -499,8 +499,8 @@
 			dquot = dqget(sb,
 				      make_kqid(&init_user_ns, type,
 						le64_to_cpu(dqblk->dqb_id)));
-			if (!dquot) {
-				status = -EIO;
+			if (IS_ERR(dquot)) {
+				status = PTR_ERR(dquot);
 				mlog(ML_ERROR, "Failed to get quota structure "
 				     "for id %u, type %d. Cannot finish quota "
 				     "file recovery.\n",
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index b69dd14..7dc818b 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4419,8 +4419,9 @@
 	}
 
 	mutex_lock(&inode->i_mutex);
-	dquot_initialize(dir);
-	error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
+	error = dquot_initialize(dir);
+	if (!error)
+		error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
 	mutex_unlock(&inode->i_mutex);
 	if (!error)
 		fsnotify_create(dir, new_dentry);
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index 2768eb1..ced70c8 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -655,14 +655,7 @@
 
 static void ocfs2_control_exit(void)
 {
-	int rc;
-
-	rc = misc_deregister(&ocfs2_control_device);
-	if (rc)
-		printk(KERN_ERR
-		       "ocfs2: Unable to deregister ocfs2_control device "
-		       "(errno %d)\n",
-		       -rc);
+	misc_deregister(&ocfs2_control_device);
 }
 
 static void fsdlm_lock_ast_wrapper(void *astarg)
diff --git a/fs/open.c b/fs/open.c
index e33dab2..b6f1e96 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -377,7 +377,7 @@
 		 * with the "noexec" flag.
 		 */
 		res = -EACCES;
-		if (path.mnt->mnt_flags & MNT_NOEXEC)
+		if (path_noexec(&path))
 			goto out_path_release;
 	}
 
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 68feb0f..361ab4e 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -134,6 +134,8 @@
 		}
 
 		sb->s_flags |= MS_ACTIVE;
+		/* User space would break if executables appear on proc */
+		sb->s_iflags |= SB_I_NOEXEC;
 	}
 
 	return dget(sb->s_root);
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 20d1f74..fed66e2c9 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -247,7 +247,7 @@
 EXPORT_SYMBOL(dqstats);
 
 static qsize_t inode_get_rsv_space(struct inode *inode);
-static void __dquot_initialize(struct inode *inode, int type);
+static int __dquot_initialize(struct inode *inode, int type);
 
 static inline unsigned int
 hashfn(const struct super_block *sb, struct kqid qid)
@@ -832,16 +832,17 @@
 struct dquot *dqget(struct super_block *sb, struct kqid qid)
 {
 	unsigned int hashent = hashfn(sb, qid);
-	struct dquot *dquot = NULL, *empty = NULL;
+	struct dquot *dquot, *empty = NULL;
 
         if (!sb_has_quota_active(sb, qid.type))
-		return NULL;
+		return ERR_PTR(-ESRCH);
 we_slept:
 	spin_lock(&dq_list_lock);
 	spin_lock(&dq_state_lock);
 	if (!sb_has_quota_active(sb, qid.type)) {
 		spin_unlock(&dq_state_lock);
 		spin_unlock(&dq_list_lock);
+		dquot = ERR_PTR(-ESRCH);
 		goto out;
 	}
 	spin_unlock(&dq_state_lock);
@@ -876,11 +877,15 @@
 	 * already finished or it will be canceled due to dq_count > 1 test */
 	wait_on_dquot(dquot);
 	/* Read the dquot / allocate space in quota file */
-	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
-	    sb->dq_op->acquire_dquot(dquot) < 0) {
-		dqput(dquot);
-		dquot = NULL;
-		goto out;
+	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+		int err;
+
+		err = sb->dq_op->acquire_dquot(dquot);
+		if (err < 0) {
+			dqput(dquot);
+			dquot = ERR_PTR(err);
+			goto out;
+		}
 	}
 #ifdef CONFIG_QUOTA_DEBUG
 	BUG_ON(!dquot->dq_sb);	/* Has somebody invalidated entry under us? */
@@ -1390,15 +1395,16 @@
  * It is better to call this function outside of any transaction as it
  * might need a lot of space in journal for dquot structure allocation.
  */
-static void __dquot_initialize(struct inode *inode, int type)
+static int __dquot_initialize(struct inode *inode, int type)
 {
 	int cnt, init_needed = 0;
 	struct dquot **dquots, *got[MAXQUOTAS];
 	struct super_block *sb = inode->i_sb;
 	qsize_t rsv;
+	int ret = 0;
 
 	if (!dquot_active(inode))
-		return;
+		return 0;
 
 	dquots = i_dquot(inode);
 
@@ -1407,6 +1413,7 @@
 		struct kqid qid;
 		kprojid_t projid;
 		int rc;
+		struct dquot *dquot;
 
 		got[cnt] = NULL;
 		if (type != -1 && cnt != type)
@@ -1438,16 +1445,25 @@
 			qid = make_kqid_projid(projid);
 			break;
 		}
-		got[cnt] = dqget(sb, qid);
+		dquot = dqget(sb, qid);
+		if (IS_ERR(dquot)) {
+			/* We raced with somebody turning quotas off... */
+			if (PTR_ERR(dquot) != -ESRCH) {
+				ret = PTR_ERR(dquot);
+				goto out_put;
+			}
+			dquot = NULL;
+		}
+		got[cnt] = dquot;
 	}
 
 	/* All required i_dquot has been initialized */
 	if (!init_needed)
-		return;
+		return 0;
 
 	spin_lock(&dq_data_lock);
 	if (IS_NOQUOTA(inode))
-		goto out_err;
+		goto out_lock;
 	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
 		if (type != -1 && cnt != type)
 			continue;
@@ -1469,15 +1485,18 @@
 				dquot_resv_space(dquots[cnt], rsv);
 		}
 	}
-out_err:
+out_lock:
 	spin_unlock(&dq_data_lock);
+out_put:
 	/* Drop unused references */
 	dqput_all(got);
+
+	return ret;
 }
 
-void dquot_initialize(struct inode *inode)
+int dquot_initialize(struct inode *inode)
 {
-	__dquot_initialize(inode, -1);
+	return __dquot_initialize(inode, -1);
 }
 EXPORT_SYMBOL(dquot_initialize);
 
@@ -1961,18 +1980,37 @@
 int dquot_transfer(struct inode *inode, struct iattr *iattr)
 {
 	struct dquot *transfer_to[MAXQUOTAS] = {};
+	struct dquot *dquot;
 	struct super_block *sb = inode->i_sb;
 	int ret;
 
 	if (!dquot_active(inode))
 		return 0;
 
-	if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid))
-		transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(iattr->ia_uid));
-	if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))
-		transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(iattr->ia_gid));
-
+	if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){
+		dquot = dqget(sb, make_kqid_uid(iattr->ia_uid));
+		if (IS_ERR(dquot)) {
+			if (PTR_ERR(dquot) != -ESRCH) {
+				ret = PTR_ERR(dquot);
+				goto out_put;
+			}
+			dquot = NULL;
+		}
+		transfer_to[USRQUOTA] = dquot;
+	}
+	if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)){
+		dquot = dqget(sb, make_kqid_gid(iattr->ia_gid));
+		if (IS_ERR(dquot)) {
+			if (PTR_ERR(dquot) != -ESRCH) {
+				ret = PTR_ERR(dquot);
+				goto out_put;
+			}
+			dquot = NULL;
+		}
+		transfer_to[GRPQUOTA] = dquot;
+	}
 	ret = __dquot_transfer(inode, transfer_to);
+out_put:
 	dqput_all(transfer_to);
 	return ret;
 }
@@ -2518,8 +2556,8 @@
 	struct dquot *dquot;
 
 	dquot = dqget(sb, qid);
-	if (!dquot)
-		return -ESRCH;
+	if (IS_ERR(dquot))
+		return PTR_ERR(dquot);
 	do_get_dqblk(dquot, di);
 	dqput(dquot);
 
@@ -2631,8 +2669,8 @@
 	int rc;
 
 	dquot = dqget(sb, qid);
-	if (!dquot) {
-		rc = -ESRCH;
+	if (IS_ERR(dquot)) {
+		rc = PTR_ERR(dquot);
 		goto out;
 	}
 	rc = do_set_dqblk(dquot, di);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 86ded73..3746367 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -141,9 +141,9 @@
 	if (tstate->flags & QCI_ROOT_SQUASH)
 		uinfo.dqi_flags |= DQF_ROOT_SQUASH;
 	uinfo.dqi_valid = IIF_ALL;
-	if (!ret && copy_to_user(addr, &uinfo, sizeof(uinfo)))
+	if (copy_to_user(addr, &uinfo, sizeof(uinfo)))
 		return -EFAULT;
-	return ret;
+	return 0;
 }
 
 static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index f6f2fba..3d8e7e6 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3319,8 +3319,11 @@
 	/* must be turned off for recursive notify_change calls */
 	ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
 
-	if (is_quota_modification(inode, attr))
-		dquot_initialize(inode);
+	if (is_quota_modification(inode, attr)) {
+		error = dquot_initialize(inode);
+		if (error)
+			return error;
+	}
 	reiserfs_write_lock(inode->i_sb);
 	if (attr->ia_valid & ATTR_SIZE) {
 		/*
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index b55a074..5f1c9c2 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -613,8 +613,7 @@
 	 * we have to set uid and gid here
 	 */
 	inode_init_owner(inode, dir, mode);
-	dquot_initialize(inode);
-	return 0;
+	return dquot_initialize(inode);
 }
 
 static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
@@ -633,12 +632,18 @@
 	struct reiserfs_transaction_handle th;
 	struct reiserfs_security_handle security;
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	if (!(inode = new_inode(dir->i_sb))) {
 		return -ENOMEM;
 	}
-	new_inode_init(inode, dir, mode);
+	retval = new_inode_init(inode, dir, mode);
+	if (retval) {
+		drop_new_inode(inode);
+		return retval;
+	}
 
 	jbegin_count += reiserfs_cache_default_acl(dir);
 	retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security);
@@ -710,12 +715,18 @@
 	if (!new_valid_dev(rdev))
 		return -EINVAL;
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	if (!(inode = new_inode(dir->i_sb))) {
 		return -ENOMEM;
 	}
-	new_inode_init(inode, dir, mode);
+	retval = new_inode_init(inode, dir, mode);
+	if (retval) {
+		drop_new_inode(inode);
+		return retval;
+	}
 
 	jbegin_count += reiserfs_cache_default_acl(dir);
 	retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security);
@@ -787,7 +798,9 @@
 	    2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
 		 REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb));
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
 	/*
@@ -800,7 +813,11 @@
 	if (!(inode = new_inode(dir->i_sb))) {
 		return -ENOMEM;
 	}
-	new_inode_init(inode, dir, mode);
+	retval = new_inode_init(inode, dir, mode);
+	if (retval) {
+		drop_new_inode(inode);
+		return retval;
+	}
 
 	jbegin_count += reiserfs_cache_default_acl(dir);
 	retval = reiserfs_security_init(dir, inode, &dentry->d_name, &security);
@@ -899,7 +916,9 @@
 	    JOURNAL_PER_BALANCE_CNT * 2 + 2 +
 	    4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	reiserfs_write_lock(dir->i_sb);
 	retval = journal_begin(&th, dir->i_sb, jbegin_count);
@@ -985,7 +1004,9 @@
 	int jbegin_count;
 	unsigned long savelink;
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	inode = d_inode(dentry);
 
@@ -1095,12 +1116,18 @@
 	    2 * (REISERFS_QUOTA_INIT_BLOCKS(parent_dir->i_sb) +
 		 REISERFS_QUOTA_TRANS_BLOCKS(parent_dir->i_sb));
 
-	dquot_initialize(parent_dir);
+	retval = dquot_initialize(parent_dir);
+	if (retval)
+		return retval;
 
 	if (!(inode = new_inode(parent_dir->i_sb))) {
 		return -ENOMEM;
 	}
-	new_inode_init(inode, parent_dir, mode);
+	retval = new_inode_init(inode, parent_dir, mode);
+	if (retval) {
+		drop_new_inode(inode);
+		return retval;
+	}
 
 	retval = reiserfs_security_init(parent_dir, inode, &dentry->d_name,
 					&security);
@@ -1184,7 +1211,9 @@
 	    JOURNAL_PER_BALANCE_CNT * 3 +
 	    2 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
 
-	dquot_initialize(dir);
+	retval = dquot_initialize(dir);
+	if (retval)
+		return retval;
 
 	reiserfs_write_lock(dir->i_sb);
 	if (inode->i_nlink >= REISERFS_LINK_MAX) {
@@ -1308,8 +1337,12 @@
 	    JOURNAL_PER_BALANCE_CNT * 3 + 5 +
 	    4 * REISERFS_QUOTA_TRANS_BLOCKS(old_dir->i_sb);
 
-	dquot_initialize(old_dir);
-	dquot_initialize(new_dir);
+	retval = dquot_initialize(old_dir);
+	if (retval)
+		return retval;
+	retval = dquot_initialize(new_dir);
+	if (retval)
+		return retval;
 
 	old_inode = d_inode(old_dentry);
 	new_dentry_inode = d_inode(new_dentry);
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 1c6ac6f..f3db820 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -40,6 +40,10 @@
 				SYSFS_MAGIC, &new_sb, ns);
 	if (IS_ERR(root) || !new_sb)
 		kobj_ns_drop(KOBJ_NS_TYPE_NET, ns);
+	else if (new_sb)
+		/* Userspace would break if executables appear on sysfs */
+		root->d_sb->s_iflags |= SB_I_NOEXEC;
+
 	return root;
 }
 
diff --git a/fs/udf/super.c b/fs/udf/super.c
index b96f190..81155b9 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -2070,6 +2070,7 @@
 	struct udf_options uopt;
 	struct kernel_lb_addr rootdir, fileset;
 	struct udf_sb_info *sbi;
+	bool lvid_open = false;
 
 	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
 	uopt.uid = INVALID_UID;
@@ -2216,8 +2217,10 @@
 			 le16_to_cpu(ts.year), ts.month, ts.day,
 			 ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
 	}
-	if (!(sb->s_flags & MS_RDONLY))
+	if (!(sb->s_flags & MS_RDONLY)) {
 		udf_open_lvid(sb);
+		lvid_open = true;
+	}
 
 	/* Assign the root inode */
 	/* assign inodes by physical block number */
@@ -2248,7 +2251,7 @@
 	if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
 		unload_nls(sbi->s_nls_map);
 #endif
-	if (!(sb->s_flags & MS_RDONLY))
+	if (lvid_open)
 		udf_close_lvid(sb);
 	brelse(sbi->s_lvid_bh);
 	udf_sb_free_partitions(sb);
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 3859f5e..c77499b 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -351,12 +351,11 @@
  */
 STATIC void
 xfs_end_bio(
-	struct bio		*bio,
-	int			error)
+	struct bio		*bio)
 {
 	xfs_ioend_t		*ioend = bio->bi_private;
 
-	ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
+	ioend->io_error = bio->bi_error;
 
 	/* Toss bio and pass work off to an xfsdatad thread */
 	bio->bi_private = NULL;
@@ -382,8 +381,7 @@
 xfs_alloc_ioend_bio(
 	struct buffer_head	*bh)
 {
-	int			nvecs = bio_get_nr_vecs(bh->b_bdev);
-	struct bio		*bio = bio_alloc(GFP_NOIO, nvecs);
+	struct bio		*bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
 
 	ASSERT(bio->bi_private == NULL);
 	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index a4b7d92..01bd678 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1096,8 +1096,7 @@
 
 STATIC void
 xfs_buf_bio_end_io(
-	struct bio		*bio,
-	int			error)
+	struct bio		*bio)
 {
 	xfs_buf_t		*bp = (xfs_buf_t *)bio->bi_private;
 
@@ -1105,10 +1104,10 @@
 	 * don't overwrite existing errors - otherwise we can lose errors on
 	 * buffers that require multiple bios to complete.
 	 */
-	if (error) {
+	if (bio->bi_error) {
 		spin_lock(&bp->b_lock);
 		if (!bp->b_io_error)
-			bp->b_io_error = error;
+			bp->b_io_error = bio->bi_error;
 		spin_unlock(&bp->b_lock);
 	}
 
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index 6b040f4..fcf9080 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -147,6 +147,7 @@
  *        (Intended for BIOS use only)
  */
 #define ACPI_PLD_REV1_BUFFER_SIZE               16	/* For Revision 1 of the buffer (From ACPI spec) */
+#define ACPI_PLD_REV2_BUFFER_SIZE               20	/* For Revision 2 of the buffer (From ACPI spec) */
 #define ACPI_PLD_BUFFER_SIZE                    20	/* For Revision 2 of the buffer (From ACPI spec) */
 
 /* First 32-bit dword, bits 0:32 */
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 03aacfb..e11611c 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -136,10 +136,6 @@
 
 #define ACPI_ROOT_TABLE_SIZE_INCREMENT  4
 
-/* Maximum number of While() loop iterations before forced abort */
-
-#define ACPI_MAX_LOOP_ITERATIONS        0xFFFF
-
 /* Maximum sleep allowed via Sleep() operator */
 
 #define ACPI_MAX_SLEEP                  2000	/* 2000 millisec == two seconds */
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 11c3a01..9f20eb4a 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -192,8 +192,9 @@
 #define AE_AML_BAD_RESOURCE_LENGTH      EXCEP_AML (0x001F)
 #define AE_AML_ILLEGAL_ADDRESS          EXCEP_AML (0x0020)
 #define AE_AML_INFINITE_LOOP            EXCEP_AML (0x0021)
+#define AE_AML_UNINITIALIZED_NODE       EXCEP_AML (0x0022)
 
-#define AE_CODE_AML_MAX                 0x0021
+#define AE_CODE_AML_MAX                 0x0022
 
 /*
  * Internal exceptions used for control
@@ -355,7 +356,9 @@
 	EXCEP_TXT("AE_AML_ILLEGAL_ADDRESS",
 		  "A memory, I/O, or PCI configuration address is invalid"),
 	EXCEP_TXT("AE_AML_INFINITE_LOOP",
-		  "An apparent infinite AML While loop, method was aborted")
+		  "An apparent infinite AML While loop, method was aborted"),
+	EXCEP_TXT("AE_AML_UNINITIALIZED_NODE",
+		  "A namespace node is uninitialized or unresolved")
 };
 
 static const struct acpi_exception_info acpi_gbl_exception_names_ctrl[] = {
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index f56de8c..908d4f9 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -88,7 +88,8 @@
 #define ACPI_LV_DEBUG_OBJECT        0x00000002
 #define ACPI_LV_INFO                0x00000004
 #define ACPI_LV_REPAIR              0x00000008
-#define ACPI_LV_ALL_EXCEPTIONS      0x0000000F
+#define ACPI_LV_TRACE_POINT         0x00000010
+#define ACPI_LV_ALL_EXCEPTIONS      0x0000001F
 
 /* Trace verbosity level 1 [Standard Trace Level] */
 
@@ -147,6 +148,7 @@
 #define ACPI_DB_DEBUG_OBJECT        ACPI_DEBUG_LEVEL (ACPI_LV_DEBUG_OBJECT)
 #define ACPI_DB_INFO                ACPI_DEBUG_LEVEL (ACPI_LV_INFO)
 #define ACPI_DB_REPAIR              ACPI_DEBUG_LEVEL (ACPI_LV_REPAIR)
+#define ACPI_DB_TRACE_POINT         ACPI_DEBUG_LEVEL (ACPI_LV_TRACE_POINT)
 #define ACPI_DB_ALL_EXCEPTIONS      ACPI_DEBUG_LEVEL (ACPI_LV_ALL_EXCEPTIONS)
 
 /* Trace level -- also used in the global "DebugLevel" */
@@ -182,6 +184,20 @@
 #define ACPI_NORMAL_DEFAULT         (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
 #define ACPI_DEBUG_ALL              (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
 
+/*
+ * Global trace flags
+ */
+#define ACPI_TRACE_ENABLED          ((u32) 4)
+#define ACPI_TRACE_ONESHOT          ((u32) 2)
+#define ACPI_TRACE_OPCODE           ((u32) 1)
+
+/* Defaults for trace debugging level/layer */
+
+#define ACPI_TRACE_LEVEL_ALL        ACPI_LV_ALL
+#define ACPI_TRACE_LAYER_ALL        0x000001FF
+#define ACPI_TRACE_LEVEL_DEFAULT    ACPI_LV_TRACE_POINT
+#define ACPI_TRACE_LAYER_DEFAULT    ACPI_EXECUTER
+
 #if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
 /*
  * The module name is used primarily for error and debug messages.
@@ -432,6 +448,8 @@
 #define ACPI_DUMP_PATHNAME(a, b, c, d)  acpi_ns_dump_pathname(a, b, c, d)
 #define ACPI_DUMP_BUFFER(a, b)          acpi_ut_debug_dump_buffer((u8 *) a, b, DB_BYTE_DISPLAY, _COMPONENT)
 
+#define ACPI_TRACE_POINT(a, b, c, d)    acpi_trace_point (a, b, c, d)
+
 #else				/* ACPI_DEBUG_OUTPUT */
 /*
  * This is the non-debug case -- make everything go away,
@@ -453,6 +471,7 @@
 #define ACPI_DUMP_PATHNAME(a, b, c, d)
 #define ACPI_DUMP_BUFFER(a, b)
 #define ACPI_IS_DEBUG_ENABLED(level, component) 0
+#define ACPI_TRACE_POINT(a, b, c, d)
 
 /* Return macros must have a return statement at the minimum */
 
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 83061ca..5ba8fb6 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -16,10 +16,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index ea6428b..29c6912 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -16,10 +16,6 @@
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  *  General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index d02df0a..a54ad1c 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -430,4 +430,10 @@
 acpi_status acpi_os_set_file_offset(ACPI_FILE file, long offset, u8 from);
 #endif
 
+#ifndef ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_trace_point
+void
+acpi_os_trace_point(acpi_trace_event_type type,
+		    u8 begin, u8 *aml, char *pathname);
+#endif
+
 #endif				/* __ACPIOSXF_H__ */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e8ec18a..c33eeab 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20150619
+#define ACPI_CA_VERSION                 0x20150818
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -251,7 +251,9 @@
  * traced each time it is executed.
  */
 ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_flags, 0);
-ACPI_INIT_GLOBAL(acpi_name, acpi_gbl_trace_method_name, 0);
+ACPI_INIT_GLOBAL(const char *, acpi_gbl_trace_method_name, NULL);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_level, ACPI_TRACE_LEVEL_DEFAULT);
+ACPI_INIT_GLOBAL(u32, acpi_gbl_trace_dbg_layer, ACPI_TRACE_LAYER_DEFAULT);
 
 /*
  * Runtime configuration of debug output control masks. We want the debug
@@ -504,7 +506,7 @@
 					   acpi_object_handler handler,
 					   void **data))
 ACPI_EXTERNAL_RETURN_STATUS(acpi_status
-			     acpi_debug_trace(char *name, u32 debug_level,
+			     acpi_debug_trace(const char *name, u32 debug_level,
 					      u32 debug_layer, u32 flags))
 
 /*
@@ -907,9 +909,17 @@
 						     const char *module_name,
 						     u32 component_id,
 						     const char *format, ...))
+
+ACPI_DBG_DEPENDENT_RETURN_VOID(void
+			       acpi_trace_point(acpi_trace_event_type type,
+						u8 begin,
+						u8 *aml, char *pathname))
 ACPI_APP_DEPENDENT_RETURN_VOID(ACPI_PRINTF_LIKE(1)
 				void ACPI_INTERNAL_VAR_XFACE
 				acpi_log_error(const char *format, ...))
+ acpi_status acpi_initialize_debugger(void);
+
+void acpi_terminate_debugger(void);
 
 /*
  * Divergences
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index a948fc5..6e28f54 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -1186,20 +1186,29 @@
  * December 19, 2014
  *
  * NOTE: There are two versions of the table with the same signature --
- * the client version and the server version.
+ * the client version and the server version. The common platform_class
+ * field is used to differentiate the two types of tables.
  *
  ******************************************************************************/
 
-struct acpi_table_tcpa_client {
+struct acpi_table_tcpa_hdr {
 	struct acpi_table_header header;	/* Common ACPI table header */
 	u16 platform_class;
+};
+
+/*
+ * Values for platform_class above.
+ * This is how the client and server subtables are differentiated
+ */
+#define ACPI_TCPA_CLIENT_TABLE          0
+#define ACPI_TCPA_SERVER_TABLE          1
+
+struct acpi_table_tcpa_client {
 	u32 minimum_log_length;	/* Minimum length for the event log area */
 	u64 log_address;	/* Address of the event log area */
 };
 
 struct acpi_table_tcpa_server {
-	struct acpi_table_header header;	/* Common ACPI table header */
-	u16 platform_class;
 	u16 reserved;
 	u64 minimum_log_length;	/* Minimum length for the event log area */
 	u64 log_address;	/* Address of the event log area */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index c2a41d2..f914958 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -662,6 +662,7 @@
 #define ACPI_TYPE_DEBUG_OBJECT          0x10
 
 #define ACPI_TYPE_EXTERNAL_MAX          0x10
+#define ACPI_NUM_TYPES                  (ACPI_TYPE_EXTERNAL_MAX + 1)
 
 /*
  * These are object types that do not map directly to the ACPI
@@ -683,6 +684,7 @@
 #define ACPI_TYPE_LOCAL_SCOPE           0x1B	/* 1 Name, multiple object_list Nodes */
 
 #define ACPI_TYPE_NS_NODE_MAX           0x1B	/* Last typecode used within a NS Node */
+#define ACPI_TOTAL_TYPES                (ACPI_TYPE_NS_NODE_MAX + 1)
 
 /*
  * These are special object types that never appear in
@@ -985,7 +987,8 @@
  */
 #define ACPI_FULL_PATHNAME              0
 #define ACPI_SINGLE_NAME                1
-#define ACPI_NAME_TYPE_MAX              1
+#define ACPI_FULL_PATHNAME_NO_TRAILING  2
+#define ACPI_NAME_TYPE_MAX              2
 
 /*
  * Predefined Namespace items
@@ -1246,6 +1249,14 @@
 #endif
 };
 
+/* Definitions of trace event types */
+
+typedef enum {
+	ACPI_TRACE_AML_METHOD,
+	ACPI_TRACE_AML_OPCODE,
+	ACPI_TRACE_AML_REGION
+} acpi_trace_event_type;
+
 /* Definitions of _OSI support */
 
 #define ACPI_VENDOR_STRINGS                 0x01
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 3cedd43..ec00e2b 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -70,13 +70,14 @@
 
 #ifdef ACPI_ASL_COMPILER
 #define ACPI_APPLICATION
-#define ACPI_DISASSEMBLER
 #define ACPI_DEBUG_OUTPUT
 #define ACPI_CONSTANT_EVAL_ONLY
 #define ACPI_LARGE_NAMESPACE_NODE
 #define ACPI_DATA_TABLE_DISASSEMBLY
 #define ACPI_SINGLE_THREADED
 #define ACPI_32BIT_PHYSICAL_ADDRESS
+
+#define ACPI_DISASSEMBLER 1
 #endif
 
 /* acpi_exec configuration. Multithreaded with full AML debugger */
@@ -89,8 +90,8 @@
 #endif
 
 /*
- * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example configuration.
- * All single threaded.
+ * acpi_bin/acpi_dump/acpi_help/acpi_names/acpi_src/acpi_xtract/Example
+ * configuration. All single threaded.
  */
 #if (defined ACPI_BIN_APP)      || \
 	(defined ACPI_DUMP_APP)     || \
@@ -123,7 +124,7 @@
 #define ACPI_USE_NATIVE_RSDP_POINTER
 #endif
 
-/* acpi_dump configuration. Native mapping used if provied by OSPMs */
+/* acpi_dump configuration. Native mapping used if provided by the host */
 
 #ifdef ACPI_DUMP_APP
 #define ACPI_USE_NATIVE_MEMORY_MAPPING
@@ -151,12 +152,12 @@
 #define ACPI_USE_LOCAL_CACHE
 #endif
 
-/* Common debug support */
+/* Common debug/disassembler support */
 
 #ifdef ACPI_FULL_DEBUG
-#define ACPI_DEBUGGER
 #define ACPI_DEBUG_OUTPUT
-#define ACPI_DISASSEMBLER
+#define ACPI_DEBUGGER 1
+#define ACPI_DISASSEMBLER 1
 #endif
 
 
@@ -323,8 +324,8 @@
  * ACPI_USE_STANDARD_HEADERS - Define this if linking to a C library and
  *      the standard header files may be used.
  *
- * The ACPICA subsystem only uses low level C library functions that do not call
- * operating system services and may therefore be inlined in the code.
+ * The ACPICA subsystem only uses low level C library functions that do not
+ * call operating system services and may therefore be inlined in the code.
  *
  * It may be necessary to tailor these include files to the target
  * generation environment.
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 0a7dc8e..2f296cb 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -56,6 +56,9 @@
 #if defined(_LINUX) || defined(__linux__)
 #include <acpi/platform/aclinuxex.h>
 
+#elif defined(WIN32)
+#include "acwinex.h"
+
 #elif defined(_AED_EFI)
 #include "acefiex.h"
 
diff --git a/include/acpi/platform/acmsvcex.h b/include/acpi/platform/acmsvcex.h
new file mode 100644
index 0000000..b647974
--- /dev/null
+++ b/include/acpi/platform/acmsvcex.h
@@ -0,0 +1,54 @@
+/******************************************************************************
+ *
+ * Name: acmsvcex.h - Extra VC specific defines, etc.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACMSVCEX_H__
+#define __ACMSVCEX_H__
+
+/* Debug support. */
+
+#ifdef _DEBUG
+#define _CRTDBG_MAP_ALLOC	/* Enables specific file/lineno for leaks */
+#include <crtdbg.h>
+#endif
+
+#endif				/* __ACMSVCEX_H__ */
diff --git a/include/acpi/platform/acwinex.h b/include/acpi/platform/acwinex.h
new file mode 100644
index 0000000..6ed1d71
--- /dev/null
+++ b/include/acpi/platform/acwinex.h
@@ -0,0 +1,49 @@
+/******************************************************************************
+ *
+ * Name: acwinex.h - Extra OS specific defines, etc.
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2015, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACWINEX_H__
+#define __ACWINEX_H__
+
+/* Windows uses VC */
+
+#endif				/* __ACWINEX_H__ */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 4188a4d..ff5f135 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -228,10 +228,7 @@
 
 extern int acpi_processor_register_performance(struct acpi_processor_performance
 					       *performance, unsigned int cpu);
-extern void acpi_processor_unregister_performance(struct
-						  acpi_processor_performance
-						  *performance,
-						  unsigned int cpu);
+extern void acpi_processor_unregister_performance(unsigned int cpu);
 
 /* note: this locks both the calling module and the processor module
          if a _PPC object exists, rmmod is disallowed then */
@@ -318,6 +315,7 @@
 void acpi_processor_set_pdc(acpi_handle handle);
 
 /* in processor_throttling.c */
+#ifdef CONFIG_ACPI_CPU_FREQ_PSS
 int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
 int acpi_processor_get_throttling_info(struct acpi_processor *pr);
 extern int acpi_processor_set_throttling(struct acpi_processor *pr,
@@ -330,14 +328,59 @@
 			unsigned long action);
 extern const struct file_operations acpi_processor_throttling_fops;
 extern void acpi_processor_throttling_init(void);
+#else
+static inline int acpi_processor_tstate_has_changed(struct acpi_processor *pr)
+{
+	return 0;
+}
+
+static inline int acpi_processor_get_throttling_info(struct acpi_processor *pr)
+{
+	return -ENODEV;
+}
+
+static inline int acpi_processor_set_throttling(struct acpi_processor *pr,
+					 int state, bool force)
+{
+	return -ENODEV;
+}
+
+static inline void acpi_processor_reevaluate_tstate(struct acpi_processor *pr,
+			unsigned long action) {}
+
+static inline void acpi_processor_throttling_init(void) {}
+#endif	/* CONFIG_ACPI_CPU_FREQ_PSS */
+
 /* in processor_idle.c */
+extern struct cpuidle_driver acpi_idle_driver;
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
 int acpi_processor_power_init(struct acpi_processor *pr);
 int acpi_processor_power_exit(struct acpi_processor *pr);
 int acpi_processor_cst_has_changed(struct acpi_processor *pr);
 int acpi_processor_hotplug(struct acpi_processor *pr);
-extern struct cpuidle_driver acpi_idle_driver;
+#else
+static inline int acpi_processor_power_init(struct acpi_processor *pr)
+{
+	return -ENODEV;
+}
 
-#ifdef CONFIG_PM_SLEEP
+static inline int acpi_processor_power_exit(struct acpi_processor *pr)
+{
+	return -ENODEV;
+}
+
+static inline int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+{
+	return -ENODEV;
+}
+
+static inline int acpi_processor_hotplug(struct acpi_processor *pr)
+{
+	return -ENODEV;
+}
+#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
+
+#if defined(CONFIG_PM_SLEEP) & defined(CONFIG_ACPI_PROCESSOR_IDLE)
 void acpi_processor_syscore_init(void);
 void acpi_processor_syscore_exit(void);
 #else
@@ -348,7 +391,7 @@
 /* in processor_thermal.c */
 int acpi_processor_get_limit_info(struct acpi_processor *pr);
 extern const struct thermal_cooling_device_ops processor_cooling_ops;
-#ifdef CONFIG_CPU_FREQ
+#if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
 void acpi_thermal_cpufreq_init(void);
 void acpi_thermal_cpufreq_exit(void);
 #else
@@ -360,6 +403,6 @@
 {
 	return;
 }
-#endif
+#endif	/* CONFIG_ACPI_CPU_FREQ_PSS */
 
 #endif
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index b7babf0..a94cbeb 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -23,236 +23,159 @@
 typedef atomic64_t atomic_long_t;
 
 #define ATOMIC_LONG_INIT(i)	ATOMIC64_INIT(i)
+#define ATOMIC_LONG_PFX(x)	atomic64 ## x
 
-static inline long atomic_long_read(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_read(v);
-}
-
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_set(v, i);
-}
-
-static inline void atomic_long_inc(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_inc(v);
-}
-
-static inline void atomic_long_dec(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_dec(v);
-}
-
-static inline void atomic_long_add(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_add(i, v);
-}
-
-static inline void atomic_long_sub(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	atomic64_sub(i, v);
-}
-
-static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return atomic64_sub_and_test(i, v);
-}
-
-static inline int atomic_long_dec_and_test(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return atomic64_dec_and_test(v);
-}
-
-static inline int atomic_long_inc_and_test(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return atomic64_inc_and_test(v);
-}
-
-static inline int atomic_long_add_negative(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return atomic64_add_negative(i, v);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_add_return(i, v);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_sub_return(i, v);
-}
-
-static inline long atomic_long_inc_return(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_inc_return(v);
-}
-
-static inline long atomic_long_dec_return(atomic_long_t *l)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_dec_return(v);
-}
-
-static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
-{
-	atomic64_t *v = (atomic64_t *)l;
-
-	return (long)atomic64_add_unless(v, a, u);
-}
-
-#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
-
-#define atomic_long_cmpxchg(l, old, new) \
-	(atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
-	(atomic64_xchg((atomic64_t *)(v), (new)))
-
-#else  /*  BITS_PER_LONG == 64  */
+#else
 
 typedef atomic_t atomic_long_t;
 
 #define ATOMIC_LONG_INIT(i)	ATOMIC_INIT(i)
-static inline long atomic_long_read(atomic_long_t *l)
-{
-	atomic_t *v = (atomic_t *)l;
+#define ATOMIC_LONG_PFX(x)	atomic ## x
 
-	return (long)atomic_read(v);
+#endif
+
+#define ATOMIC_LONG_READ_OP(mo)						\
+static inline long atomic_long_read##mo(atomic_long_t *l)		\
+{									\
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
+									\
+	return (long)ATOMIC_LONG_PFX(_read##mo)(v);			\
 }
+ATOMIC_LONG_READ_OP()
+ATOMIC_LONG_READ_OP(_acquire)
 
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
-	atomic_t *v = (atomic_t *)l;
+#undef ATOMIC_LONG_READ_OP
 
-	atomic_set(v, i);
+#define ATOMIC_LONG_SET_OP(mo)						\
+static inline void atomic_long_set##mo(atomic_long_t *l, long i)	\
+{									\
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
+									\
+	ATOMIC_LONG_PFX(_set##mo)(v, i);				\
 }
+ATOMIC_LONG_SET_OP()
+ATOMIC_LONG_SET_OP(_release)
+
+#undef ATOMIC_LONG_SET_OP
+
+#define ATOMIC_LONG_ADD_SUB_OP(op, mo)					\
+static inline long							\
+atomic_long_##op##_return##mo(long i, atomic_long_t *l)			\
+{									\
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
+									\
+	return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v);		\
+}
+ATOMIC_LONG_ADD_SUB_OP(add,)
+ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(add, _release)
+ATOMIC_LONG_ADD_SUB_OP(sub,)
+ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
+ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
+ATOMIC_LONG_ADD_SUB_OP(sub, _release)
+
+#undef ATOMIC_LONG_ADD_SUB_OP
+
+#define atomic_long_cmpxchg_relaxed(l, old, new) \
+	(ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
+					   (old), (new)))
+#define atomic_long_cmpxchg_acquire(l, old, new) \
+	(ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
+					   (old), (new)))
+#define atomic_long_cmpxchg_release(l, old, new) \
+	(ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
+					   (old), (new)))
+#define atomic_long_cmpxchg(l, old, new) \
+	(ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
+
+#define atomic_long_xchg_relaxed(v, new) \
+	(ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_acquire(v, new) \
+	(ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg_release(v, new) \
+	(ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
+#define atomic_long_xchg(v, new) \
+	(ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
 
 static inline void atomic_long_inc(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	atomic_inc(v);
+	ATOMIC_LONG_PFX(_inc)(v);
 }
 
 static inline void atomic_long_dec(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	atomic_dec(v);
+	ATOMIC_LONG_PFX(_dec)(v);
 }
 
 static inline void atomic_long_add(long i, atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	atomic_add(i, v);
+	ATOMIC_LONG_PFX(_add)(i, v);
 }
 
 static inline void atomic_long_sub(long i, atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	atomic_sub(i, v);
+	ATOMIC_LONG_PFX(_sub)(i, v);
 }
 
 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return atomic_sub_and_test(i, v);
+	return ATOMIC_LONG_PFX(_sub_and_test)(i, v);
 }
 
 static inline int atomic_long_dec_and_test(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return atomic_dec_and_test(v);
+	return ATOMIC_LONG_PFX(_dec_and_test)(v);
 }
 
 static inline int atomic_long_inc_and_test(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return atomic_inc_and_test(v);
+	return ATOMIC_LONG_PFX(_inc_and_test)(v);
 }
 
 static inline int atomic_long_add_negative(long i, atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return atomic_add_negative(i, v);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
-{
-	atomic_t *v = (atomic_t *)l;
-
-	return (long)atomic_add_return(i, v);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
-	atomic_t *v = (atomic_t *)l;
-
-	return (long)atomic_sub_return(i, v);
+	return ATOMIC_LONG_PFX(_add_negative)(i, v);
 }
 
 static inline long atomic_long_inc_return(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return (long)atomic_inc_return(v);
+	return (long)ATOMIC_LONG_PFX(_inc_return)(v);
 }
 
 static inline long atomic_long_dec_return(atomic_long_t *l)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return (long)atomic_dec_return(v);
+	return (long)ATOMIC_LONG_PFX(_dec_return)(v);
 }
 
 static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
 {
-	atomic_t *v = (atomic_t *)l;
+	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
 
-	return (long)atomic_add_unless(v, a, u);
+	return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u);
 }
 
-#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
-
-#define atomic_long_cmpxchg(l, old, new) \
-	(atomic_cmpxchg((atomic_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
-	(atomic_xchg((atomic_t *)(v), (new)))
-
-#endif  /*  BITS_PER_LONG == 64  */
+#define atomic_long_inc_not_zero(l) \
+	ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
 
 #endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 1973ad2..d4d7e33 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -98,15 +98,16 @@
 ATOMIC_OP_RETURN(sub, -)
 #endif
 
-#ifndef atomic_clear_mask
+#ifndef atomic_and
 ATOMIC_OP(and, &)
-#define atomic_clear_mask(i, v) atomic_and(~(i), (v))
 #endif
 
-#ifndef atomic_set_mask
-#define CONFIG_ARCH_HAS_ATOMIC_OR
+#ifndef atomic_or
 ATOMIC_OP(or, |)
-#define atomic_set_mask(i, v)	atomic_or((i), (v))
+#endif
+
+#ifndef atomic_xor
+ATOMIC_OP(xor, ^)
 #endif
 
 #undef ATOMIC_OP_RETURN
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 30ad9c8..d48e78c 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -32,6 +32,10 @@
 ATOMIC64_OPS(add)
 ATOMIC64_OPS(sub)
 
+ATOMIC64_OP(and)
+ATOMIC64_OP(or)
+ATOMIC64_OP(xor)
+
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 55e3abc..b42afad 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -108,12 +108,12 @@
 do {									\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
-	ACCESS_ONCE(*p) = (v);						\
+	WRITE_ONCE(*p, v);						\
 } while (0)
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1 = ACCESS_ONCE(*p);				\
+	typeof(*p) ___p1 = READ_ONCE(*p);				\
 	compiletime_assert_atomic_type(*p);				\
 	smp_mb();							\
 	___p1;								\
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index f56094c..eed3bbe 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -736,6 +736,35 @@
 }
 #endif
 
+/**
+ * DOC: ioremap() and ioremap_*() variants
+ *
+ * If you have an IOMMU your architecture is expected to have both ioremap()
+ * and iounmap() implemented otherwise the asm-generic helpers will provide a
+ * direct mapping.
+ *
+ * There are ioremap_*() call variants, if you have no IOMMU we naturally will
+ * default to direct mapping for all of them, you can override these defaults.
+ * If you have an IOMMU you are highly encouraged to provide your own
+ * ioremap variant implementation as there currently is no safe architecture
+ * agnostic default. To avoid possible improper behaviour default asm-generic
+ * ioremap_*() variants all return NULL when an IOMMU is available. If you've
+ * defined your own ioremap_*() variant you must then declare your own
+ * ioremap_*() variant as defined to itself to avoid the default NULL return.
+ */
+
+#ifdef CONFIG_MMU
+
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+{
+	return NULL;
+}
+#endif
+
+#else /* !CONFIG_MMU */
+
 /*
  * Change "struct page" to physical address.
  *
@@ -743,7 +772,6 @@
  * you'll need to provide your own definitions.
  */
 
-#ifndef CONFIG_MMU
 #ifndef ioremap
 #define ioremap ioremap
 static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h
index 7389c87..b1e17fc 100644
--- a/include/asm-generic/pci_iomap.h
+++ b/include/asm-generic/pci_iomap.h
@@ -15,9 +15,13 @@
 #ifdef CONFIG_PCI
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max);
 extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
 				     unsigned long offset,
 				     unsigned long maxlen);
+extern void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
+					unsigned long offset,
+					unsigned long maxlen);
 /* Create a virtual mapping cookie for a port on a given PCI device.
  * Do not call this directly, it exists to make it easier for architectures
  * to override */
@@ -34,12 +38,22 @@
 	return NULL;
 }
 
+static inline void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long max)
+{
+	return NULL;
+}
 static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar,
 					    unsigned long offset,
 					    unsigned long maxlen)
 {
 	return NULL;
 }
+static inline void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar,
+					       unsigned long offset,
+					       unsigned long maxlen)
+{
+	return NULL;
+}
 #endif
 
 #endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index d0a7a47..0bec580 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -71,9 +71,10 @@
 /*
  * Returns true when we need to resched and can (barring IRQ state).
  */
-static __always_inline bool should_resched(void)
+static __always_inline bool should_resched(int preempt_offset)
 {
-	return unlikely(!preempt_count() && tif_need_resched());
+	return unlikely(preempt_count() == preempt_offset &&
+			tif_need_resched());
 }
 
 #ifdef CONFIG_PREEMPT
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 6383d54..54a8e65 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -36,39 +36,39 @@
 /*
  * External function declarations
  */
-extern void queue_read_lock_slowpath(struct qrwlock *lock);
-extern void queue_write_lock_slowpath(struct qrwlock *lock);
+extern void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts);
+extern void queued_write_lock_slowpath(struct qrwlock *lock);
 
 /**
- * queue_read_can_lock- would read_trylock() succeed?
+ * queued_read_can_lock- would read_trylock() succeed?
  * @lock: Pointer to queue rwlock structure
  */
-static inline int queue_read_can_lock(struct qrwlock *lock)
+static inline int queued_read_can_lock(struct qrwlock *lock)
 {
 	return !(atomic_read(&lock->cnts) & _QW_WMASK);
 }
 
 /**
- * queue_write_can_lock- would write_trylock() succeed?
+ * queued_write_can_lock- would write_trylock() succeed?
  * @lock: Pointer to queue rwlock structure
  */
-static inline int queue_write_can_lock(struct qrwlock *lock)
+static inline int queued_write_can_lock(struct qrwlock *lock)
 {
 	return !atomic_read(&lock->cnts);
 }
 
 /**
- * queue_read_trylock - try to acquire read lock of a queue rwlock
+ * queued_read_trylock - try to acquire read lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
-static inline int queue_read_trylock(struct qrwlock *lock)
+static inline int queued_read_trylock(struct qrwlock *lock)
 {
 	u32 cnts;
 
 	cnts = atomic_read(&lock->cnts);
 	if (likely(!(cnts & _QW_WMASK))) {
-		cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
+		cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
 		if (likely(!(cnts & _QW_WMASK)))
 			return 1;
 		atomic_sub(_QR_BIAS, &lock->cnts);
@@ -77,11 +77,11 @@
 }
 
 /**
- * queue_write_trylock - try to acquire write lock of a queue rwlock
+ * queued_write_trylock - try to acquire write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  * Return: 1 if lock acquired, 0 if failed
  */
-static inline int queue_write_trylock(struct qrwlock *lock)
+static inline int queued_write_trylock(struct qrwlock *lock)
 {
 	u32 cnts;
 
@@ -89,78 +89,70 @@
 	if (unlikely(cnts))
 		return 0;
 
-	return likely(atomic_cmpxchg(&lock->cnts,
-				     cnts, cnts | _QW_LOCKED) == cnts);
+	return likely(atomic_cmpxchg_acquire(&lock->cnts,
+					     cnts, cnts | _QW_LOCKED) == cnts);
 }
 /**
- * queue_read_lock - acquire read lock of a queue rwlock
+ * queued_read_lock - acquire read lock of a queue rwlock
  * @lock: Pointer to queue rwlock structure
  */
-static inline void queue_read_lock(struct qrwlock *lock)
+static inline void queued_read_lock(struct qrwlock *lock)
 {
 	u32 cnts;
 
-	cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
+	cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
 	if (likely(!(cnts & _QW_WMASK)))
 		return;
 
 	/* The slowpath will decrement the reader count, if necessary. */
-	queue_read_lock_slowpath(lock);
+	queued_read_lock_slowpath(lock, cnts);
 }
 
 /**
- * queue_write_lock - acquire write lock of a queue rwlock
+ * queued_write_lock - acquire write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-static inline void queue_write_lock(struct qrwlock *lock)
+static inline void queued_write_lock(struct qrwlock *lock)
 {
 	/* Optimize for the unfair lock case where the fair flag is 0. */
-	if (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0)
+	if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
 		return;
 
-	queue_write_lock_slowpath(lock);
+	queued_write_lock_slowpath(lock);
 }
 
 /**
- * queue_read_unlock - release read lock of a queue rwlock
+ * queued_read_unlock - release read lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-static inline void queue_read_unlock(struct qrwlock *lock)
+static inline void queued_read_unlock(struct qrwlock *lock)
 {
 	/*
 	 * Atomically decrement the reader count
 	 */
-	smp_mb__before_atomic();
-	atomic_sub(_QR_BIAS, &lock->cnts);
+	(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
 }
 
-#ifndef queue_write_unlock
 /**
- * queue_write_unlock - release write lock of a queue rwlock
+ * queued_write_unlock - release write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-static inline void queue_write_unlock(struct qrwlock *lock)
+static inline void queued_write_unlock(struct qrwlock *lock)
 {
-	/*
-	 * If the writer field is atomic, it can be cleared directly.
-	 * Otherwise, an atomic subtraction will be used to clear it.
-	 */
-	smp_mb__before_atomic();
-	atomic_sub(_QW_LOCKED, &lock->cnts);
+	smp_store_release((u8 *)&lock->cnts, 0);
 }
-#endif
 
 /*
  * Remapping rwlock architecture specific functions to the corresponding
  * queue rwlock functions.
  */
-#define arch_read_can_lock(l)	queue_read_can_lock(l)
-#define arch_write_can_lock(l)	queue_write_can_lock(l)
-#define arch_read_lock(l)	queue_read_lock(l)
-#define arch_write_lock(l)	queue_write_lock(l)
-#define arch_read_trylock(l)	queue_read_trylock(l)
-#define arch_write_trylock(l)	queue_write_trylock(l)
-#define arch_read_unlock(l)	queue_read_unlock(l)
-#define arch_write_unlock(l)	queue_write_unlock(l)
+#define arch_read_can_lock(l)	queued_read_can_lock(l)
+#define arch_write_can_lock(l)	queued_write_can_lock(l)
+#define arch_read_lock(l)	queued_read_lock(l)
+#define arch_write_lock(l)	queued_write_lock(l)
+#define arch_read_trylock(l)	queued_read_trylock(l)
+#define arch_write_trylock(l)	queued_write_trylock(l)
+#define arch_read_unlock(l)	queued_read_unlock(l)
+#define arch_write_unlock(l)	queued_write_unlock(l)
 
 #endif /* __ASM_GENERIC_QRWLOCK_H */
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 7169ad0..077cae1 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -1,7 +1,7 @@
 /*
  * AEAD: Authenticated Encryption with Associated Data
  * 
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -45,16 +45,40 @@
  * a breach in the integrity of the message. In essence, that -EBADMSG error
  * code is the key bonus an AEAD cipher has over "standard" block chaining
  * modes.
+ *
+ * Memory Structure:
+ *
+ * To support the needs of the most prominent user of AEAD ciphers, namely
+ * IPSEC, the AEAD ciphers have a special memory layout the caller must adhere
+ * to.
+ *
+ * The scatter list pointing to the input data must contain:
+ *
+ * * for RFC4106 ciphers, the concatenation of
+ * associated authentication data || IV || plaintext or ciphertext. Note, the
+ * same IV (buffer) is also set with the aead_request_set_crypt call. Note,
+ * the API call of aead_request_set_ad must provide the length of the AAD and
+ * the IV. The API call of aead_request_set_crypt only points to the size of
+ * the input plaintext or ciphertext.
+ *
+ * * for "normal" AEAD ciphers, the concatenation of
+ * associated authentication data || plaintext or ciphertext.
+ *
+ * It is important to note that if multiple scatter gather list entries form
+ * the input data mentioned above, the first entry must not point to a NULL
+ * buffer. If there is any potential where the AAD buffer can be NULL, the
+ * calling code must contain a precaution to ensure that this does not result
+ * in the first scatter gather list entry pointing to a NULL buffer.
  */
 
+struct crypto_aead;
+
 /**
  *	struct aead_request - AEAD request
  *	@base: Common attributes for async crypto requests
- *	@old: Boolean whether the old or new AEAD API is used
  *	@assoclen: Length in bytes of associated data for authentication
  *	@cryptlen: Length of data to be encrypted or decrypted
  *	@iv: Initialisation vector
- *	@assoc: Associated data
  *	@src: Source data
  *	@dst: Destination data
  *	@__ctx: Start of private context data
@@ -62,14 +86,11 @@
 struct aead_request {
 	struct crypto_async_request base;
 
-	bool old;
-
 	unsigned int assoclen;
 	unsigned int cryptlen;
 
 	u8 *iv;
 
-	struct scatterlist *assoc;
 	struct scatterlist *src;
 	struct scatterlist *dst;
 
@@ -77,19 +98,6 @@
 };
 
 /**
- *	struct aead_givcrypt_request - AEAD request with IV generation
- *	@seq: Sequence number for IV generation
- *	@giv: Space for generated IV
- *	@areq: The AEAD request itself
- */
-struct aead_givcrypt_request {
-	u64 seq;
-	u8 *giv;
-
-	struct aead_request areq;
-};
-
-/**
  * struct aead_alg - AEAD cipher definition
  * @maxauthsize: Set the maximum authentication tag size supported by the
  *		 transformation. A transformation may support smaller tag sizes.
@@ -141,16 +149,6 @@
 };
 
 struct crypto_aead {
-	int (*setkey)(struct crypto_aead *tfm, const u8 *key,
-	              unsigned int keylen);
-	int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
-	int (*encrypt)(struct aead_request *req);
-	int (*decrypt)(struct aead_request *req);
-	int (*givencrypt)(struct aead_givcrypt_request *req);
-	int (*givdecrypt)(struct aead_givcrypt_request *req);
-
-	struct crypto_aead *child;
-
 	unsigned int authsize;
 	unsigned int reqsize;
 
@@ -192,16 +190,6 @@
 	crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
 }
 
-static inline struct crypto_aead *crypto_aead_crt(struct crypto_aead *tfm)
-{
-	return tfm;
-}
-
-static inline struct old_aead_alg *crypto_old_aead_alg(struct crypto_aead *tfm)
-{
-	return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
-}
-
 static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
 {
 	return container_of(crypto_aead_tfm(tfm)->__crt_alg,
@@ -210,8 +198,7 @@
 
 static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg)
 {
-	return alg->base.cra_aead.encrypt ? alg->base.cra_aead.ivsize :
-					    alg->ivsize;
+	return alg->ivsize;
 }
 
 /**
@@ -337,7 +324,7 @@
  */
 static inline int crypto_aead_encrypt(struct aead_request *req)
 {
-	return crypto_aead_reqtfm(req)->encrypt(req);
+	return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req);
 }
 
 /**
@@ -364,10 +351,12 @@
  */
 static inline int crypto_aead_decrypt(struct aead_request *req)
 {
-	if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+	if (req->cryptlen < crypto_aead_authsize(aead))
 		return -EINVAL;
 
-	return crypto_aead_reqtfm(req)->decrypt(req);
+	return crypto_aead_alg(aead)->decrypt(req);
 }
 
 /**
@@ -387,7 +376,10 @@
  *
  * Return: number of bytes
  */
-unsigned int crypto_aead_reqsize(struct crypto_aead *tfm);
+static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
+{
+	return tfm->reqsize;
+}
 
 /**
  * aead_request_set_tfm() - update cipher handle reference in request
@@ -400,7 +392,7 @@
 static inline void aead_request_set_tfm(struct aead_request *req,
 					struct crypto_aead *tfm)
 {
-	req->base.tfm = crypto_aead_tfm(tfm->child);
+	req->base.tfm = crypto_aead_tfm(tfm);
 }
 
 /**
@@ -526,23 +518,6 @@
 }
 
 /**
- * aead_request_set_assoc() - set the associated data scatter / gather list
- * @req: request handle
- * @assoc: associated data scatter / gather list
- * @assoclen: number of bytes to process from @assoc
- *
- * Obsolete, do not use.
- */
-static inline void aead_request_set_assoc(struct aead_request *req,
-					  struct scatterlist *assoc,
-					  unsigned int assoclen)
-{
-	req->assoc = assoc;
-	req->assoclen = assoclen;
-	req->old = true;
-}
-
-/**
  * aead_request_set_ad - set associated data information
  * @req: request handle
  * @assoclen: number of bytes in associated data
@@ -554,77 +529,6 @@
 				       unsigned int assoclen)
 {
 	req->assoclen = assoclen;
-	req->old = false;
-}
-
-static inline struct crypto_aead *aead_givcrypt_reqtfm(
-	struct aead_givcrypt_request *req)
-{
-	return crypto_aead_reqtfm(&req->areq);
-}
-
-static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req)
-{
-	return aead_givcrypt_reqtfm(req)->givencrypt(req);
-};
-
-static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req)
-{
-	return aead_givcrypt_reqtfm(req)->givdecrypt(req);
-};
-
-static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req,
-					 struct crypto_aead *tfm)
-{
-	req->areq.base.tfm = crypto_aead_tfm(tfm);
-}
-
-static inline struct aead_givcrypt_request *aead_givcrypt_alloc(
-	struct crypto_aead *tfm, gfp_t gfp)
-{
-	struct aead_givcrypt_request *req;
-
-	req = kmalloc(sizeof(struct aead_givcrypt_request) +
-		      crypto_aead_reqsize(tfm), gfp);
-
-	if (likely(req))
-		aead_givcrypt_set_tfm(req, tfm);
-
-	return req;
-}
-
-static inline void aead_givcrypt_free(struct aead_givcrypt_request *req)
-{
-	kfree(req);
-}
-
-static inline void aead_givcrypt_set_callback(
-	struct aead_givcrypt_request *req, u32 flags,
-	crypto_completion_t compl, void *data)
-{
-	aead_request_set_callback(&req->areq, flags, compl, data);
-}
-
-static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
-					   struct scatterlist *src,
-					   struct scatterlist *dst,
-					   unsigned int nbytes, void *iv)
-{
-	aead_request_set_crypt(&req->areq, src, dst, nbytes, iv);
-}
-
-static inline void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req,
-					   struct scatterlist *assoc,
-					   unsigned int assoclen)
-{
-	aead_request_set_assoc(&req->areq, assoc, assoclen);
-}
-
-static inline void aead_givcrypt_set_giv(struct aead_givcrypt_request *req,
-					 u8 *giv, u64 seq)
-{
-	req->giv = giv;
-	req->seq = seq;
 }
 
 #endif	/* _CRYPTO_AEAD_H */
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index d4ebf6e..c9fe145 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -18,6 +18,7 @@
 #include <linux/skbuff.h>
 
 struct crypto_aead;
+struct crypto_instance;
 struct module;
 struct rtattr;
 struct seq_file;
@@ -30,6 +31,7 @@
 	void (*show)(struct seq_file *m, struct crypto_alg *alg);
 	int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
 	struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
+	void (*free)(struct crypto_instance *inst);
 
 	unsigned int type;
 	unsigned int maskclear;
@@ -180,7 +182,6 @@
 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
 int crypto_enqueue_request(struct crypto_queue *queue,
 			   struct crypto_async_request *request);
-void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
 
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
new file mode 100644
index 0000000..274bbae
--- /dev/null
+++ b/include/crypto/chacha20.h
@@ -0,0 +1,25 @@
+/*
+ * Common values for the ChaCha20 algorithm
+ */
+
+#ifndef _CRYPTO_CHACHA20_H
+#define _CRYPTO_CHACHA20_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define CHACHA20_IV_SIZE	16
+#define CHACHA20_KEY_SIZE	32
+#define CHACHA20_BLOCK_SIZE	64
+
+struct chacha20_ctx {
+	u32 key[8];
+};
+
+void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
+int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
+			   unsigned int keysize);
+int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+			  struct scatterlist *src, unsigned int nbytes);
+
+#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 57c8a6e..8e920b4 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -63,6 +63,11 @@
 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 };
 
+#define AHASH_REQUEST_ON_STACK(name, ahash) \
+	char __##name##_desc[sizeof(struct ahash_request) + \
+		crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \
+	struct ahash_request *name = (void *)__##name##_desc
+
 /**
  * struct ahash_alg - asynchronous message digest definition
  * @init: Initialize the transformation context. Intended only to initialize the
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 4b25471..5554cdd 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -1,7 +1,7 @@
 /*
  * AEAD: Authenticated Encryption with Associated Data
  * 
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -21,6 +21,7 @@
 struct rtattr;
 
 struct aead_instance {
+	void (*free)(struct aead_instance *inst);
 	union {
 		struct {
 			char head[offsetof(struct aead_alg, base)];
@@ -34,20 +35,15 @@
 	struct crypto_spawn base;
 };
 
-extern const struct crypto_type crypto_aead_type;
-extern const struct crypto_type crypto_nivaead_type;
+struct aead_queue {
+	struct crypto_queue base;
+};
 
 static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
 {
 	return crypto_tfm_ctx(&tfm->base);
 }
 
-static inline struct crypto_instance *crypto_aead_alg_instance(
-	struct crypto_aead *aead)
-{
-	return crypto_tfm_alg_instance(&aead->base);
-}
-
 static inline struct crypto_instance *aead_crypto_instance(
 	struct aead_instance *inst)
 {
@@ -61,7 +57,7 @@
 
 static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead)
 {
-	return aead_instance(crypto_aead_alg_instance(aead));
+	return aead_instance(crypto_tfm_alg_instance(&aead->base));
 }
 
 static inline void *aead_instance_ctx(struct aead_instance *inst)
@@ -90,8 +86,6 @@
 	crypto_set_spawn(&spawn->base, inst);
 }
 
-struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask);
-
 int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
 		     u32 type, u32 mask);
 
@@ -100,12 +94,6 @@
 	crypto_drop_spawn(&spawn->base);
 }
 
-static inline struct crypto_alg *crypto_aead_spawn_alg(
-	struct crypto_aead_spawn *spawn)
-{
-	return spawn->base.alg;
-}
-
 static inline struct aead_alg *crypto_spawn_aead_alg(
 	struct crypto_aead_spawn *spawn)
 {
@@ -118,38 +106,15 @@
 	return crypto_spawn_tfm2(&spawn->base);
 }
 
-struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
-				       struct rtattr **tb, u32 type, u32 mask);
-void aead_geniv_free(struct aead_instance *inst);
-int aead_geniv_init(struct crypto_tfm *tfm);
-void aead_geniv_exit(struct crypto_tfm *tfm);
-
-static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv)
-{
-	return geniv->child;
-}
-
-static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req)
-{
-	return aead_request_ctx(&req->areq);
-}
-
-static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req,
-					  int err)
-{
-	aead_request_complete(&req->areq, err);
-}
-
 static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
 					   unsigned int reqsize)
 {
-	crypto_aead_crt(aead)->reqsize = reqsize;
+	aead->reqsize = reqsize;
 }
 
 static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
 {
-	return alg->base.cra_aead.encrypt ? alg->base.cra_aead.maxauthsize :
-					    alg->maxauthsize;
+	return alg->maxauthsize;
 }
 
 static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
@@ -157,6 +122,37 @@
 	return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
 }
 
+static inline void aead_init_queue(struct aead_queue *queue,
+				   unsigned int max_qlen)
+{
+	crypto_init_queue(&queue->base, max_qlen);
+}
+
+static inline int aead_enqueue_request(struct aead_queue *queue,
+				       struct aead_request *request)
+{
+	return crypto_enqueue_request(&queue->base, &request->base);
+}
+
+static inline struct aead_request *aead_dequeue_request(
+	struct aead_queue *queue)
+{
+	struct crypto_async_request *req;
+
+	req = crypto_dequeue_request(&queue->base);
+
+	return req ? container_of(req, struct aead_request, base) : NULL;
+}
+
+static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
+{
+	struct crypto_async_request *req;
+
+	req = crypto_get_backlog(&queue->base);
+
+	return req ? container_of(req, struct aead_request, base) : NULL;
+}
+
 int crypto_register_aead(struct aead_alg *alg);
 void crypto_unregister_aead(struct aead_alg *alg);
 int crypto_register_aeads(struct aead_alg *algs, int count);
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 9ca9b87..5933363 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -15,10 +15,19 @@
 
 #include <crypto/internal/aead.h>
 #include <linux/spinlock.h>
+#include <linux/types.h>
 
 struct aead_geniv_ctx {
 	spinlock_t lock;
 	struct crypto_aead *child;
+	struct crypto_blkcipher *null;
+	u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
 };
 
+struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
+				       struct rtattr **tb, u32 type, u32 mask);
+void aead_geniv_free(struct aead_instance *inst);
+int aead_init_geniv(struct crypto_aead *tfm);
+void aead_exit_geniv(struct crypto_aead *tfm);
+
 #endif	/* _CRYPTO_INTERNAL_GENIV_H */
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index b3a46c5..2cf7a61 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -107,5 +107,20 @@
 	return req->base.flags;
 }
 
+static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
+{
+	return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *skcipher_request_ctx(struct skcipher_request *req)
+{
+	return req->__ctx;
+}
+
+static inline u32 skcipher_request_flags(struct skcipher_request *req)
+{
+	return req->base.flags;
+}
+
 #endif	/* _CRYPTO_INTERNAL_SKCIPHER_H */
 
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
new file mode 100644
index 0000000..894df59
--- /dev/null
+++ b/include/crypto/poly1305.h
@@ -0,0 +1,41 @@
+/*
+ * Common values for the Poly1305 algorithm
+ */
+
+#ifndef _CRYPTO_POLY1305_H
+#define _CRYPTO_POLY1305_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define POLY1305_BLOCK_SIZE	16
+#define POLY1305_KEY_SIZE	32
+#define POLY1305_DIGEST_SIZE	16
+
+struct poly1305_desc_ctx {
+	/* key */
+	u32 r[5];
+	/* finalize key */
+	u32 s[4];
+	/* accumulator */
+	u32 h[5];
+	/* partial buffer */
+	u8 buf[POLY1305_BLOCK_SIZE];
+	/* bytes used in partial buffer */
+	unsigned int buflen;
+	/* r key has been set */
+	bool rset;
+	/* s key has been set */
+	bool sset;
+};
+
+int crypto_poly1305_init(struct shash_desc *desc);
+int crypto_poly1305_setkey(struct crypto_shash *tfm,
+			   const u8 *key, unsigned int keylen);
+unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
+					const u8 *src, unsigned int srclen);
+int crypto_poly1305_update(struct shash_desc *desc,
+			   const u8 *src, unsigned int srclen);
+int crypto_poly1305_final(struct shash_desc *desc, u8 *dst);
+
+#endif
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 96670e7..35f99b6 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -25,14 +25,6 @@
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
 
-static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
-					struct scatterlist *sg2)
-{
-	sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
-	sg1[num - 1].page_link &= ~0x02;
-	sg1[num - 1].page_link |= 0x01;
-}
-
 static inline void scatterwalk_crypto_chain(struct scatterlist *head,
 					    struct scatterlist *sg,
 					    int chain, int num)
@@ -43,7 +35,7 @@
 	}
 
 	if (sg)
-		scatterwalk_sg_chain(head, num, sg);
+		sg_chain(head, num, sg);
 	else
 		sg_mark_end(head);
 }
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 07d245f..d8dd41f 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -1,7 +1,7 @@
 /*
  * Symmetric key ciphers.
  * 
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -18,6 +18,28 @@
 #include <linux/slab.h>
 
 /**
+ *	struct skcipher_request - Symmetric key cipher request
+ *	@cryptlen: Number of bytes to encrypt or decrypt
+ *	@iv: Initialisation Vector
+ *	@src: Source SG list
+ *	@dst: Destination SG list
+ *	@base: Underlying async request request
+ *	@__ctx: Start of private context data
+ */
+struct skcipher_request {
+	unsigned int cryptlen;
+
+	u8 *iv;
+
+	struct scatterlist *src;
+	struct scatterlist *dst;
+
+	struct crypto_async_request base;
+
+	void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
  *	struct skcipher_givcrypt_request - Crypto request with IV generation
  *	@seq: Sequence number for IV generation
  *	@giv: Space for generated IV
@@ -30,6 +52,23 @@
 	struct ablkcipher_request creq;
 };
 
+struct crypto_skcipher {
+	int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
+	              unsigned int keylen);
+	int (*encrypt)(struct skcipher_request *req);
+	int (*decrypt)(struct skcipher_request *req);
+
+	unsigned int ivsize;
+	unsigned int reqsize;
+
+	struct crypto_tfm base;
+};
+
+#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
+	char __##name##_desc[sizeof(struct skcipher_request) + \
+		crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
+	struct skcipher_request *name = (void *)__##name##_desc
+
 static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm(
 	struct skcipher_givcrypt_request *req)
 {
@@ -106,5 +145,355 @@
 	req->seq = seq;
 }
 
+/**
+ * DOC: Symmetric Key Cipher API
+ *
+ * Symmetric key cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto).
+ *
+ * Asynchronous cipher operations imply that the function invocation for a
+ * cipher request returns immediately before the completion of the operation.
+ * The cipher request is scheduled as a separate kernel thread and therefore
+ * load-balanced on the different CPUs via the process scheduler. To allow
+ * the kernel crypto API to inform the caller about the completion of a cipher
+ * request, the caller must provide a callback function. That function is
+ * invoked with the cipher handle when the request completes.
+ *
+ * To support the asynchronous operation, additional information than just the
+ * cipher handle must be supplied to the kernel crypto API. That additional
+ * information is given by filling in the skcipher_request data structure.
+ *
+ * For the symmetric key cipher API, the state is maintained with the tfm
+ * cipher handle. A single tfm can be used across multiple calls and in
+ * parallel. For asynchronous block cipher calls, context data supplied and
+ * only used by the caller can be referenced the request data structure in
+ * addition to the IV used for the cipher request. The maintenance of such
+ * state information would be important for a crypto driver implementer to
+ * have, because when calling the callback function upon completion of the
+ * cipher operation, that callback function may need some information about
+ * which operation just finished if it invoked multiple in parallel. This
+ * state information is unused by the kernel crypto API.
+ */
+
+static inline struct crypto_skcipher *__crypto_skcipher_cast(
+	struct crypto_tfm *tfm)
+{
+	return container_of(tfm, struct crypto_skcipher, base);
+}
+
+/**
+ * crypto_alloc_skcipher() - allocate symmetric key cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      skcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an skcipher. The returned struct
+ * crypto_skcipher is the cipher handle that is required for any subsequent
+ * API invocation for that skcipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
+					      u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_skcipher_tfm(
+	struct crypto_skcipher *tfm)
+{
+	return &tfm->base;
+}
+
+/**
+ * crypto_free_skcipher() - zeroize and free cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
+{
+	crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_skcipher() - Search for the availability of an skcipher.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      skcipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the skcipher is known to the kernel crypto API; false
+ *	   otherwise
+ */
+static inline int crypto_has_skcipher(const char *alg_name, u32 type,
+					u32 mask)
+{
+	return crypto_has_alg(alg_name, crypto_skcipher_type(type),
+			      crypto_skcipher_mask(mask));
+}
+
+/**
+ * crypto_skcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the skcipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
+{
+	return tfm->ivsize;
+}
+
+/**
+ * crypto_skcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the skcipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_skcipher_blocksize(
+	struct crypto_skcipher *tfm)
+{
+	return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_skcipher_alignmask(
+	struct crypto_skcipher *tfm)
+{
+	return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm));
+}
+
+static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm)
+{
+	return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm));
+}
+
+static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm,
+					       u32 flags)
+{
+	crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags);
+}
+
+static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
+						 u32 flags)
+{
+	crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_skcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the skcipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
+					 const u8 *key, unsigned int keylen)
+{
+	return tfm->setkey(tfm, key, keylen);
+}
+
+/**
+ * crypto_skcipher_reqtfm() - obtain cipher handle from request
+ * @req: skcipher_request out of which the cipher handle is to be obtained
+ *
+ * Return the crypto_skcipher handle when furnishing an skcipher_request
+ * data structure.
+ *
+ * Return: crypto_skcipher handle
+ */
+static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
+	struct skcipher_request *req)
+{
+	return __crypto_skcipher_cast(req->base.tfm);
+}
+
+/**
+ * crypto_skcipher_encrypt() - encrypt plaintext
+ * @req: reference to the skcipher_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the skcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * skcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+	return tfm->encrypt(req);
+}
+
+/**
+ * crypto_skcipher_decrypt() - decrypt ciphertext
+ * @req: reference to the skcipher_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the skcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * skcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
+{
+	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+	return tfm->decrypt(req);
+}
+
+/**
+ * DOC: Symmetric Key Cipher Request Handle
+ *
+ * The skcipher_request data structure contains all pointers to data
+ * required for the symmetric key cipher operation. This includes the cipher
+ * handle (which can be used by multiple skcipher_request instances), pointer
+ * to plaintext and ciphertext, asynchronous callback function, etc. It acts
+ * as a handle to the skcipher_request_* API calls in a similar way as
+ * skcipher handle to the crypto_skcipher_* API calls.
+ */
+
+/**
+ * crypto_skcipher_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
+static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm)
+{
+	return tfm->reqsize;
+}
+
+/**
+ * skcipher_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing skcipher handle in the request
+ * data structure with a different one.
+ */
+static inline void skcipher_request_set_tfm(struct skcipher_request *req,
+					    struct crypto_skcipher *tfm)
+{
+	req->base.tfm = crypto_skcipher_tfm(tfm);
+}
+
+static inline struct skcipher_request *skcipher_request_cast(
+	struct crypto_async_request *req)
+{
+	return container_of(req, struct skcipher_request, base);
+}
+
+/**
+ * skcipher_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the skcipher
+ * encrypt and decrypt API calls. During the allocation, the provided skcipher
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
+static inline struct skcipher_request *skcipher_request_alloc(
+	struct crypto_skcipher *tfm, gfp_t gfp)
+{
+	struct skcipher_request *req;
+
+	req = kmalloc(sizeof(struct skcipher_request) +
+		      crypto_skcipher_reqsize(tfm), gfp);
+
+	if (likely(req))
+		skcipher_request_set_tfm(req, tfm);
+
+	return req;
+}
+
+/**
+ * skcipher_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
+static inline void skcipher_request_free(struct skcipher_request *req)
+{
+	kzfree(req);
+}
+
+/**
+ * skcipher_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ *         CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ *	   increase the wait queue beyond the initial maximum size;
+ *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ *	  crypto API, but provided to the callback function for it to use. Here,
+ *	  the caller can provide a reference to memory the callback function can
+ *	  operate on. As the callback function is invoked asynchronously to the
+ *	  related functionality, it may need to access data structures of the
+ *	  related functionality which can be referenced using this pointer. The
+ *	  callback function can access the memory via the "data" field in the
+ *	  crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once the
+ * cipher operation completes.
+ *
+ * The callback function is registered with the skcipher_request handle and
+ * must comply with the following template
+ *
+ *	void callback_function(struct crypto_async_request *req, int error)
+ */
+static inline void skcipher_request_set_callback(struct skcipher_request *req,
+						 u32 flags,
+						 crypto_completion_t compl,
+						 void *data)
+{
+	req->base.complete = compl;
+	req->base.data = data;
+	req->base.flags = flags;
+}
+
+/**
+ * skcipher_request_set_crypt() - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @cryptlen: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ *      by crypto_skcipher_ivsize
+ *
+ * This function allows setting of the source data and destination data
+ * scatter / gather lists.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed - the source is the ciphertext and the destination is the plaintext.
+ */
+static inline void skcipher_request_set_crypt(
+	struct skcipher_request *req,
+	struct scatterlist *src, struct scatterlist *dst,
+	unsigned int cryptlen, void *iv)
+{
+	req->src = src;
+	req->dst = dst;
+	req->cryptlen = cryptlen;
+	req->iv = iv;
+}
+
 #endif	/* _CRYPTO_SKCIPHER_H */
 
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index c9a8b64..b2d56dd 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -34,6 +34,17 @@
 		void (*codec_wake_override)(struct device *, bool enable);
 		int (*get_cdclk_freq)(struct device *);
 	} *ops;
+
+	const struct i915_audio_component_audio_ops {
+		void *audio_ptr;
+		/**
+		 * Call from i915 driver, notifying the HDA driver that
+		 * pin sense and/or ELD information has changed.
+		 * @audio_ptr:		HDA driver object
+		 * @port:		Which port has changed (PORTA / PORTB / PORTC etc)
+		 */
+		void (*pin_eld_notify)(void *audio_ptr, int port);
+	} *audio_ops;
 };
 
 #endif /* _I915_COMPONENT_H_ */
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index aab088d..63d01c1 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -31,6 +31,7 @@
 #define CLK_FOUT_VPLL			4
 #define CLK_FOUT_UPLL			5
 #define CLK_FOUT_MPLL			6
+#define CLK_ARM_CLK			7
 
 /* Muxes */
 #define CLK_MOUT_MPLL_USER_L		16
diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h
index 4273891..8183d1c 100644
--- a/include/dt-bindings/clock/exynos5250.h
+++ b/include/dt-bindings/clock/exynos5250.h
@@ -21,6 +21,7 @@
 #define CLK_FOUT_CPLL		6
 #define CLK_FOUT_EPLL		7
 #define CLK_FOUT_VPLL		8
+#define CLK_ARM_CLK		9
 
 /* gate for special clocks (sclk) */
 #define CLK_SCLK_CAM_BAYER	128
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 8780868..8de173f 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -251,6 +251,9 @@
 #define IMX6QDL_CLK_VIDEO_27M			238
 #define IMX6QDL_CLK_MIPI_CORE_CFG		239
 #define IMX6QDL_CLK_MIPI_IPG			240
-#define IMX6QDL_CLK_END				241
+#define IMX6QDL_CLK_CAAM_MEM			241
+#define IMX6QDL_CLK_CAAM_ACLK			242
+#define IMX6QDL_CLK_CAAM_IPG			243
+#define IMX6QDL_CLK_END				244
 
 #endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
new file mode 100644
index 0000000..c343894
--- /dev/null
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX6UL_H
+#define __DT_BINDINGS_CLOCK_IMX6UL_H
+
+#define IMX6UL_CLK_DUMMY		0
+#define IMX6UL_CLK_CKIL			1
+#define IMX6UL_CLK_CKIH			2
+#define IMX6UL_CLK_OSC			3
+#define IMX6UL_PLL1_BYPASS_SRC		4
+#define IMX6UL_PLL2_BYPASS_SRC		5
+#define IMX6UL_PLL3_BYPASS_SRC		6
+#define IMX6UL_PLL4_BYPASS_SRC		7
+#define IMX6UL_PLL5_BYPASS_SRC		8
+#define IMX6UL_PLL6_BYPASS_SRC		9
+#define IMX6UL_PLL7_BYPASS_SRC		10
+#define IMX6UL_CLK_PLL1 		11
+#define IMX6UL_CLK_PLL2 		12
+#define IMX6UL_CLK_PLL3 		13
+#define IMX6UL_CLK_PLL4 		14
+#define IMX6UL_CLK_PLL5 		15
+#define IMX6UL_CLK_PLL6 		16
+#define IMX6UL_CLK_PLL7 		17
+#define IMX6UL_PLL1_BYPASS		18
+#define IMX6UL_PLL2_BYPASS		19
+#define IMX6UL_PLL3_BYPASS		20
+#define IMX6UL_PLL4_BYPASS		21
+#define IMX6UL_PLL5_BYPASS		22
+#define IMX6UL_PLL6_BYPASS		23
+#define IMX6UL_PLL7_BYPASS		24
+#define IMX6UL_CLK_PLL1_SYS		25
+#define IMX6UL_CLK_PLL2_BUS		26
+#define IMX6UL_CLK_PLL3_USB_OTG 	27
+#define IMX6UL_CLK_PLL4_AUDIO		28
+#define IMX6UL_CLK_PLL5_VIDEO		29
+#define IMX6UL_CLK_PLL6_ENET		30
+#define IMX6UL_CLK_PLL7_USB_HOST	31
+#define IMX6UL_CLK_USBPHY1		32
+#define IMX6UL_CLK_USBPHY2		33
+#define IMX6UL_CLK_USBPHY1_GATE		34
+#define IMX6UL_CLK_USBPHY2_GATE		35
+#define IMX6UL_CLK_PLL2_PFD0		36
+#define IMX6UL_CLK_PLL2_PFD1		37
+#define IMX6UL_CLK_PLL2_PFD2		38
+#define IMX6UL_CLK_PLL2_PFD3		39
+#define IMX6UL_CLK_PLL3_PFD0		40
+#define IMX6UL_CLK_PLL3_PFD1		41
+#define IMX6UL_CLK_PLL3_PFD2		42
+#define IMX6UL_CLK_PLL3_PFD3		43
+#define IMX6UL_CLK_ENET_REF		44
+#define IMX6UL_CLK_ENET2_REF		45
+#define IMX6UL_CLK_ENET2_REF_125M	46
+#define IMX6UL_CLK_ENET_PTP_REF		47
+#define IMX6UL_CLK_ENET_PTP		48
+#define IMX6UL_CLK_PLL4_POST_DIV	49
+#define IMX6UL_CLK_PLL4_AUDIO_DIV	50
+#define IMX6UL_CLK_PLL5_POST_DIV	51
+#define IMX6UL_CLK_PLL5_VIDEO_DIV	52
+#define IMX6UL_CLK_PLL2_198M		53
+#define IMX6UL_CLK_PLL3_80M		54
+#define IMX6UL_CLK_PLL3_60M		55
+#define IMX6UL_CLK_STEP 		56
+#define IMX6UL_CLK_PLL1_SW		57
+#define IMX6UL_CLK_AXI_ALT_SEL		58
+#define IMX6UL_CLK_AXI_SEL		59
+#define IMX6UL_CLK_PERIPH_PRE		60
+#define IMX6UL_CLK_PERIPH2_PRE		61
+#define IMX6UL_CLK_PERIPH_CLK2_SEL	62
+#define IMX6UL_CLK_PERIPH2_CLK2_SEL	63
+#define IMX6UL_CLK_USDHC1_SEL		64
+#define IMX6UL_CLK_USDHC2_SEL		65
+#define IMX6UL_CLK_BCH_SEL		66
+#define IMX6UL_CLK_GPMI_SEL		67
+#define IMX6UL_CLK_EIM_SLOW_SEL 	68
+#define IMX6UL_CLK_SPDIF_SEL		69
+#define IMX6UL_CLK_SAI1_SEL		70
+#define IMX6UL_CLK_SAI2_SEL		71
+#define IMX6UL_CLK_SAI3_SEL		72
+#define IMX6UL_CLK_LCDIF_PRE_SEL	73
+#define IMX6UL_CLK_SIM_PRE_SEL		74
+#define IMX6UL_CLK_LDB_DI0_SEL		75
+#define IMX6UL_CLK_LDB_DI1_SEL		76
+#define IMX6UL_CLK_ENFC_SEL		77
+#define IMX6UL_CLK_CAN_SEL		78
+#define IMX6UL_CLK_ECSPI_SEL		79
+#define IMX6UL_CLK_UART_SEL		80
+#define IMX6UL_CLK_QSPI1_SEL		81
+#define IMX6UL_CLK_PERCLK_SEL		82
+#define IMX6UL_CLK_LCDIF_SEL		83
+#define IMX6UL_CLK_SIM_SEL		84
+#define IMX6UL_CLK_PERIPH		85
+#define IMX6UL_CLK_PERIPH2		86
+#define IMX6UL_CLK_LDB_DI0_DIV_3_5	87
+#define IMX6UL_CLK_LDB_DI0_DIV_7	88
+#define IMX6UL_CLK_LDB_DI1_DIV_3_5	89
+#define IMX6UL_CLK_LDB_DI1_DIV_7	90
+#define IMX6UL_CLK_LDB_DI0_DIV_SEL	91
+#define IMX6UL_CLK_LDB_DI1_DIV_SEL	92
+#define IMX6UL_CLK_ARM			93
+#define IMX6UL_CLK_PERIPH_CLK2		94
+#define IMX6UL_CLK_PERIPH2_CLK2 	95
+#define IMX6UL_CLK_AHB			96
+#define IMX6UL_CLK_MMDC_PODF 		97
+#define IMX6UL_CLK_AXI_PODF		98
+#define IMX6UL_CLK_PERCLK		99
+#define IMX6UL_CLK_IPG			100
+#define IMX6UL_CLK_USDHC1_PODF		101
+#define IMX6UL_CLK_USDHC2_PODF		102
+#define IMX6UL_CLK_BCH_PODF		103
+#define IMX6UL_CLK_GPMI_PODF		104
+#define IMX6UL_CLK_EIM_SLOW_PODF	105
+#define IMX6UL_CLK_SPDIF_PRED		106
+#define IMX6UL_CLK_SPDIF_PODF		107
+#define IMX6UL_CLK_SAI1_PRED		108
+#define IMX6UL_CLK_SAI1_PODF		109
+#define IMX6UL_CLK_SAI2_PRED		110
+#define IMX6UL_CLK_SAI2_PODF		111
+#define IMX6UL_CLK_SAI3_PRED		112
+#define IMX6UL_CLK_SAI3_PODF		113
+#define IMX6UL_CLK_LCDIF_PRED		114
+#define IMX6UL_CLK_LCDIF_PODF		115
+#define IMX6UL_CLK_SIM_PODF		116
+#define IMX6UL_CLK_QSPI1_PDOF		117
+#define IMX6UL_CLK_ENFC_PRED		118
+#define IMX6UL_CLK_ENFC_PODF		119
+#define IMX6UL_CLK_CAN_PODF		120
+#define IMX6UL_CLK_ECSPI_PODF		121
+#define IMX6UL_CLK_UART_PODF		122
+#define IMX6UL_CLK_ADC1 		123
+#define IMX6UL_CLK_ADC2 		124
+#define IMX6UL_CLK_AIPSTZ1		125
+#define IMX6UL_CLK_AIPSTZ2		126
+#define IMX6UL_CLK_AIPSTZ3		127
+#define IMX6UL_CLK_APBHDMA		128
+#define IMX6UL_CLK_ASRC_IPG		129
+#define IMX6UL_CLK_ASRC_MEM		130
+#define IMX6UL_CLK_GPMI_BCH_APB  	131
+#define IMX6UL_CLK_GPMI_BCH 		132
+#define IMX6UL_CLK_GPMI_IO		133
+#define IMX6UL_CLK_GPMI_APB		134
+#define IMX6UL_CLK_CAAM_MEM		135
+#define IMX6UL_CLK_CAAM_ACLK		136
+#define IMX6UL_CLK_CAAM_IPG		137
+#define IMX6UL_CLK_CSI			138
+#define IMX6UL_CLK_ECSPI1		139
+#define IMX6UL_CLK_ECSPI2		140
+#define IMX6UL_CLK_ECSPI3		141
+#define IMX6UL_CLK_ECSPI4		142
+#define IMX6UL_CLK_EIM			143
+#define IMX6UL_CLK_ENET  		144
+#define IMX6UL_CLK_ENET_AHB		145
+#define IMX6UL_CLK_EPIT1		146
+#define IMX6UL_CLK_EPIT2		147
+#define IMX6UL_CLK_CAN1_IPG		148
+#define IMX6UL_CLK_CAN1_SERIAL		149
+#define IMX6UL_CLK_CAN2_IPG		150
+#define IMX6UL_CLK_CAN2_SERIAL		151
+#define IMX6UL_CLK_GPT1_BUS		152
+#define IMX6UL_CLK_GPT1_SERIAL		153
+#define IMX6UL_CLK_GPT2_BUS		154
+#define IMX6UL_CLK_GPT2_SERIAL		155
+#define IMX6UL_CLK_I2C1 		156
+#define IMX6UL_CLK_I2C2 		157
+#define IMX6UL_CLK_I2C3 		158
+#define IMX6UL_CLK_I2C4 		159
+#define IMX6UL_CLK_IOMUXC 		160
+#define IMX6UL_CLK_LCDIF_APB 		161
+#define IMX6UL_CLK_LCDIF_PIX 		162
+#define IMX6UL_CLK_MMDC_P0_FAST 	163
+#define IMX6UL_CLK_MMDC_P0_IPG  	164
+#define IMX6UL_CLK_OCOTP 		165
+#define IMX6UL_CLK_OCRAM 		166
+#define IMX6UL_CLK_PWM1 		167
+#define IMX6UL_CLK_PWM2 		168
+#define IMX6UL_CLK_PWM3 		169
+#define IMX6UL_CLK_PWM4 		170
+#define IMX6UL_CLK_PWM5 		171
+#define IMX6UL_CLK_PWM6 		172
+#define IMX6UL_CLK_PWM7 		173
+#define IMX6UL_CLK_PWM8 		174
+#define IMX6UL_CLK_PXP  		175
+#define IMX6UL_CLK_QSPI 		176
+#define IMX6UL_CLK_ROM  		177
+#define IMX6UL_CLK_SAI1 		178
+#define IMX6UL_CLK_SAI1_IPG 		179
+#define IMX6UL_CLK_SAI2 		180
+#define IMX6UL_CLK_SAI2_IPG 		181
+#define IMX6UL_CLK_SAI3 		182
+#define IMX6UL_CLK_SAI3_IPG 		183
+#define IMX6UL_CLK_SDMA 		184
+#define IMX6UL_CLK_SIM  		185
+#define IMX6UL_CLK_SIM_S 		186
+#define IMX6UL_CLK_SPBA 		187
+#define IMX6UL_CLK_SPDIF 		188
+#define IMX6UL_CLK_UART1_IPG 		189
+#define IMX6UL_CLK_UART1_SERIAL 	190
+#define IMX6UL_CLK_UART2_IPG 		191
+#define IMX6UL_CLK_UART2_SERIAL 	192
+#define IMX6UL_CLK_UART3_IPG 		193
+#define IMX6UL_CLK_UART3_SERIAL 	194
+#define IMX6UL_CLK_UART4_IPG 		195
+#define IMX6UL_CLK_UART4_SERIAL 	196
+#define IMX6UL_CLK_UART5_IPG 		197
+#define IMX6UL_CLK_UART5_SERIAL 	198
+#define IMX6UL_CLK_UART6_IPG 		199
+#define IMX6UL_CLK_UART6_SERIAL 	200
+#define IMX6UL_CLK_UART7_IPG 		201
+#define IMX6UL_CLK_UART7_SERIAL 	202
+#define IMX6UL_CLK_UART8_IPG 		203
+#define IMX6UL_CLK_UART8_SERIAL 	204
+#define IMX6UL_CLK_USBOH3 		205
+#define IMX6UL_CLK_USDHC1 		206
+#define IMX6UL_CLK_USDHC2 		207
+#define IMX6UL_CLK_WDOG1 		208
+#define IMX6UL_CLK_WDOG2 		209
+#define IMX6UL_CLK_WDOG3 		210
+#define IMX6UL_CLK_LDB_DI0		211
+#define IMX6UL_CLK_AXI  		212
+#define IMX6UL_CLK_SPDIF_GCLK		213
+#define IMX6UL_CLK_GPT_3M		214
+#define IMX6UL_CLK_SIM2			215
+#define IMX6UL_CLK_SIM1			216
+#define IMX6UL_CLK_IPP_DI0		217
+#define IMX6UL_CLK_IPP_DI1		218
+#define IMX6UL_CA7_SECONDARY_SEL	219
+#define IMX6UL_CLK_PER_BCH		220
+#define IMX6UL_CLK_CSI_SEL		221
+#define IMX6UL_CLK_CSI_PODF		222
+#define IMX6UL_CLK_PLL3_120M		223
+
+#define IMX6UL_CLK_END			224
+
+#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index ff7ca35..7b1ad89 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -108,6 +108,7 @@
 #define R8A7790_CLK_VIN2		9
 #define R8A7790_CLK_VIN1		10
 #define R8A7790_CLK_VIN0		11
+#define R8A7790_CLK_ETHERAVB		12
 #define R8A7790_CLK_ETHER		13
 #define R8A7790_CLK_SATA1		14
 #define R8A7790_CLK_SATA0		15
@@ -143,6 +144,8 @@
 #define R8A7790_CLK_SCU_ALL		17
 #define R8A7790_CLK_SCU_DVC1		18
 #define R8A7790_CLK_SCU_DVC0		19
+#define R8A7790_CLK_SCU_CTU1_MIX1	20
+#define R8A7790_CLK_SCU_CTU0_MIX0	21
 #define R8A7790_CLK_SCU_SRC9		22
 #define R8A7790_CLK_SCU_SRC8		23
 #define R8A7790_CLK_SCU_SRC7		24
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
index 4022683..dd09b73 100644
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ b/include/dt-bindings/clock/r8a7791-clock.h
@@ -141,6 +141,8 @@
 #define R8A7791_CLK_SCU_ALL		17
 #define R8A7791_CLK_SCU_DVC1		18
 #define R8A7791_CLK_SCU_DVC0		19
+#define R8A7791_CLK_SCU_CTU1_MIX1	20
+#define R8A7791_CLK_SCU_CTU0_MIX0	21
 #define R8A7791_CLK_SCU_SRC9		22
 #define R8A7791_CLK_SCU_SRC8		23
 #define R8A7791_CLK_SCU_SRC7		24
diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h
new file mode 100644
index 0000000..1579e07
--- /dev/null
+++ b/include/dt-bindings/clock/r8a7793-clock.h
@@ -0,0 +1,164 @@
+/*
+ * r8a7793 clock definition
+ *
+ * Copyright (C) 2014  Renesas Electronics Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_R8A7793_H__
+#define __DT_BINDINGS_CLOCK_R8A7793_H__
+
+/* CPG */
+#define R8A7793_CLK_MAIN		0
+#define R8A7793_CLK_PLL0		1
+#define R8A7793_CLK_PLL1		2
+#define R8A7793_CLK_PLL3		3
+#define R8A7793_CLK_LB			4
+#define R8A7793_CLK_QSPI		5
+#define R8A7793_CLK_SDH			6
+#define R8A7793_CLK_SD0			7
+#define R8A7793_CLK_Z			8
+#define R8A7793_CLK_RCAN		9
+#define R8A7793_CLK_ADSP		10
+
+/* MSTP0 */
+#define R8A7793_CLK_MSIOF0		0
+
+/* MSTP1 */
+#define R8A7793_CLK_VCP0		1
+#define R8A7793_CLK_VPC0		3
+#define R8A7793_CLK_SSP1		9
+#define R8A7793_CLK_TMU1		11
+#define R8A7793_CLK_3DG			12
+#define R8A7793_CLK_2DDMAC		15
+#define R8A7793_CLK_FDP1_1		18
+#define R8A7793_CLK_FDP1_0		19
+#define R8A7793_CLK_TMU3		21
+#define R8A7793_CLK_TMU2		22
+#define R8A7793_CLK_CMT0		24
+#define R8A7793_CLK_TMU0		25
+#define R8A7793_CLK_VSP1_DU1		27
+#define R8A7793_CLK_VSP1_DU0		28
+#define R8A7793_CLK_VSP1_S		31
+
+/* MSTP2 */
+#define R8A7793_CLK_SCIFA2		2
+#define R8A7793_CLK_SCIFA1		3
+#define R8A7793_CLK_SCIFA0		4
+#define R8A7793_CLK_MSIOF2		5
+#define R8A7793_CLK_SCIFB0		6
+#define R8A7793_CLK_SCIFB1		7
+#define R8A7793_CLK_MSIOF1		8
+#define R8A7793_CLK_SCIFB2		16
+#define R8A7793_CLK_SYS_DMAC1		18
+#define R8A7793_CLK_SYS_DMAC0		19
+
+/* MSTP3 */
+#define R8A7793_CLK_TPU0		4
+#define R8A7793_CLK_SDHI2		11
+#define R8A7793_CLK_SDHI1		12
+#define R8A7793_CLK_SDHI0		14
+#define R8A7793_CLK_MMCIF0		15
+#define R8A7793_CLK_IIC0		18
+#define R8A7793_CLK_PCIEC		19
+#define R8A7793_CLK_IIC1		23
+#define R8A7793_CLK_SSUSB		28
+#define R8A7793_CLK_CMT1		29
+#define R8A7793_CLK_USBDMAC0		30
+#define R8A7793_CLK_USBDMAC1		31
+
+/* MSTP4 */
+#define R8A7793_CLK_IRQC		7
+
+/* MSTP5 */
+#define R8A7793_CLK_AUDIO_DMAC1         1
+#define R8A7793_CLK_AUDIO_DMAC0         2
+#define R8A7793_CLK_ADSP_MOD		6
+#define R8A7793_CLK_THERMAL		22
+#define R8A7793_CLK_PWM			23
+
+/* MSTP7 */
+#define R8A7793_CLK_EHCI		3
+#define R8A7793_CLK_HSUSB		4
+#define R8A7793_CLK_HSCIF2		13
+#define R8A7793_CLK_SCIF5		14
+#define R8A7793_CLK_SCIF4		15
+#define R8A7793_CLK_HSCIF1		16
+#define R8A7793_CLK_HSCIF0		17
+#define R8A7793_CLK_SCIF3		18
+#define R8A7793_CLK_SCIF2		19
+#define R8A7793_CLK_SCIF1		20
+#define R8A7793_CLK_SCIF0		21
+#define R8A7793_CLK_DU1			23
+#define R8A7793_CLK_DU0			24
+#define R8A7793_CLK_LVDS0		26
+
+/* MSTP8 */
+#define R8A7793_CLK_IPMMU_SGX		0
+#define R8A7793_CLK_VIN2		9
+#define R8A7793_CLK_VIN1		10
+#define R8A7793_CLK_VIN0		11
+#define R8A7793_CLK_ETHER		13
+#define R8A7793_CLK_SATA1		14
+#define R8A7793_CLK_SATA0		15
+
+/* MSTP9 */
+#define R8A7793_CLK_GPIO7		4
+#define R8A7793_CLK_GPIO6		5
+#define R8A7793_CLK_GPIO5		7
+#define R8A7793_CLK_GPIO4		8
+#define R8A7793_CLK_GPIO3		9
+#define R8A7793_CLK_GPIO2		10
+#define R8A7793_CLK_GPIO1		11
+#define R8A7793_CLK_GPIO0		12
+#define R8A7793_CLK_RCAN1		15
+#define R8A7793_CLK_RCAN0		16
+#define R8A7793_CLK_QSPI_MOD		17
+#define R8A7793_CLK_I2C5		25
+#define R8A7793_CLK_IICDVFS		26
+#define R8A7793_CLK_I2C4		27
+#define R8A7793_CLK_I2C3		28
+#define R8A7793_CLK_I2C2		29
+#define R8A7793_CLK_I2C1		30
+#define R8A7793_CLK_I2C0		31
+
+/* MSTP10 */
+#define R8A7793_CLK_SSI_ALL		5
+#define R8A7793_CLK_SSI9		6
+#define R8A7793_CLK_SSI8		7
+#define R8A7793_CLK_SSI7		8
+#define R8A7793_CLK_SSI6		9
+#define R8A7793_CLK_SSI5		10
+#define R8A7793_CLK_SSI4		11
+#define R8A7793_CLK_SSI3		12
+#define R8A7793_CLK_SSI2		13
+#define R8A7793_CLK_SSI1		14
+#define R8A7793_CLK_SSI0		15
+#define R8A7793_CLK_SCU_ALL		17
+#define R8A7793_CLK_SCU_DVC1		18
+#define R8A7793_CLK_SCU_DVC0		19
+#define R8A7793_CLK_SCU_SRC9		22
+#define R8A7793_CLK_SCU_SRC8		23
+#define R8A7793_CLK_SCU_SRC7		24
+#define R8A7793_CLK_SCU_SRC6		25
+#define R8A7793_CLK_SCU_SRC5		26
+#define R8A7793_CLK_SCU_SRC4		27
+#define R8A7793_CLK_SCU_SRC3		28
+#define R8A7793_CLK_SCU_SRC2		29
+#define R8A7793_CLK_SCU_SRC1		30
+#define R8A7793_CLK_SCU_SRC0		31
+
+/* MSTP11 */
+#define R8A7793_CLK_SCIFA3		6
+#define R8A7793_CLK_SCIFA4		7
+#define R8A7793_CLK_SCIFA5		8
+
+#endif /* __DT_BINDINGS_CLOCK_R8A7793_H__ */
diff --git a/include/dt-bindings/clock/rk3066a-cru.h b/include/dt-bindings/clock/rk3066a-cru.h
index bc1ed1d..d3a9824 100644
--- a/include/dt-bindings/clock/rk3066a-cru.h
+++ b/include/dt-bindings/clock/rk3066a-cru.h
@@ -13,6 +13,9 @@
  * GNU General Public License for more details.
  */
 
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3066A_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3066A_H
+
 #include <dt-bindings/clock/rk3188-cru-common.h>
 
 /* soft-reset indices */
@@ -33,3 +36,5 @@
 #define SRST_HDMI		96
 #define SRST_HDMI_APB		97
 #define SRST_CIF1		111
+
+#endif
diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h
index 6a37050..8df77a7 100644
--- a/include/dt-bindings/clock/rk3188-cru-common.h
+++ b/include/dt-bindings/clock/rk3188-cru-common.h
@@ -13,6 +13,9 @@
  * GNU General Public License for more details.
  */
 
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3188_COMMON_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3188_COMMON_H
+
 /* core clocks from */
 #define PLL_APLL		1
 #define PLL_DPLL		2
@@ -248,3 +251,5 @@
 #define SRST_PTM1_ATB		141
 #define SRST_CTM		142
 #define SRST_TS			143
+
+#endif
diff --git a/include/dt-bindings/clock/rk3188-cru.h b/include/dt-bindings/clock/rk3188-cru.h
index 9fac8ed..9f2e631 100644
--- a/include/dt-bindings/clock/rk3188-cru.h
+++ b/include/dt-bindings/clock/rk3188-cru.h
@@ -13,6 +13,9 @@
  * GNU General Public License for more details.
  */
 
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3188_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3188_H
+
 #include <dt-bindings/clock/rk3188-cru-common.h>
 
 /* soft-reset indices */
@@ -49,3 +52,5 @@
 #define SRST_GPU_BRIDGE		121
 #define SRST_CTI3		123
 #define SRST_CTI3_APB		124
+
+#endif
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
index dea4197..c719aac 100644
--- a/include/dt-bindings/clock/rk3288-cru.h
+++ b/include/dt-bindings/clock/rk3288-cru.h
@@ -13,6 +13,9 @@
  * GNU General Public License for more details.
  */
 
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3288_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3288_H
+
 /* core clocks */
 #define PLL_APLL		1
 #define PLL_DPLL		2
@@ -376,3 +379,5 @@
 #define SRST_TSP_CLKIN0		189
 #define SRST_TSP_CLKIN1		190
 #define SRST_TSP_27M		191
+
+#endif
diff --git a/include/dt-bindings/clock/rk3368-cru.h b/include/dt-bindings/clock/rk3368-cru.h
new file mode 100644
index 0000000..9c5dd9b
--- /dev/null
+++ b/include/dt-bindings/clock/rk3368-cru.h
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2015 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_RK3368_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_RK3368_H
+
+/* core clocks */
+#define PLL_APLLB		1
+#define PLL_APLLL		2
+#define PLL_DPLL		3
+#define PLL_CPLL		4
+#define PLL_GPLL		5
+#define PLL_NPLL		6
+#define ARMCLKB			7
+#define ARMCLKL			8
+
+/* sclk gates (special clocks) */
+#define SCLK_GPU_CORE		64
+#define SCLK_SPI0		65
+#define SCLK_SPI1		66
+#define SCLK_SPI2		67
+#define SCLK_SDMMC		68
+#define SCLK_SDIO0		69
+#define SCLK_EMMC		71
+#define SCLK_TSADC		72
+#define SCLK_SARADC		73
+#define SCLK_NANDC0		75
+#define SCLK_UART0		77
+#define SCLK_UART1		78
+#define SCLK_UART2		79
+#define SCLK_UART3		80
+#define SCLK_UART4		81
+#define SCLK_I2S_8CH		82
+#define SCLK_SPDIF_8CH		83
+#define SCLK_I2S_2CH		84
+#define SCLK_TIMER0		85
+#define SCLK_TIMER1		86
+#define SCLK_TIMER2		87
+#define SCLK_TIMER3		88
+#define SCLK_TIMER4		89
+#define SCLK_TIMER5		90
+#define SCLK_TIMER6		91
+#define SCLK_OTGPHY0		93
+#define SCLK_OTG_ADP		96
+#define SCLK_HSICPHY480M	97
+#define SCLK_HSICPHY12M		98
+#define SCLK_MACREF		99
+#define SCLK_VOP0_PWM		100
+#define SCLK_MAC_RX		102
+#define SCLK_MAC_TX		103
+#define SCLK_EDP_24M		104
+#define SCLK_EDP		105
+#define SCLK_RGA		106
+#define SCLK_ISP		107
+#define SCLK_HDCP		108
+#define SCLK_HDMI_HDCP		109
+#define SCLK_HDMI_CEC		110
+#define SCLK_HEVC_CABAC		111
+#define SCLK_HEVC_CORE		112
+#define SCLK_I2S_8CH_OUT	113
+#define SCLK_SDMMC_DRV		114
+#define SCLK_SDIO0_DRV		115
+#define SCLK_EMMC_DRV		117
+#define SCLK_SDMMC_SAMPLE	118
+#define SCLK_SDIO0_SAMPLE	119
+#define SCLK_EMMC_SAMPLE	121
+#define SCLK_USBPHY480M		122
+#define SCLK_PVTM_CORE		123
+#define SCLK_PVTM_GPU		124
+#define SCLK_PVTM_PMU		125
+#define SCLK_SFC		126
+#define SCLK_MAC		127
+#define SCLK_MACREF_OUT		128
+
+#define DCLK_VOP		190
+#define MCLK_CRYPTO		191
+
+/* aclk gates */
+#define ACLK_GPU_MEM		192
+#define ACLK_GPU_CFG		193
+#define ACLK_DMAC_BUS		194
+#define ACLK_DMAC_PERI		195
+#define ACLK_PERI_MMU		196
+#define ACLK_GMAC		197
+#define ACLK_VOP		198
+#define ACLK_VOP_IEP		199
+#define ACLK_RGA		200
+#define ACLK_HDCP		201
+#define ACLK_IEP		202
+#define ACLK_VIO0_NOC		203
+#define ACLK_VIP		204
+#define ACLK_ISP		205
+#define ACLK_VIO1_NOC		206
+#define ACLK_VIDEO		208
+#define ACLK_BUS		209
+#define ACLK_PERI		210
+
+/* pclk gates */
+#define PCLK_GPIO0		320
+#define PCLK_GPIO1		321
+#define PCLK_GPIO2		322
+#define PCLK_GPIO3		323
+#define PCLK_PMUGRF		324
+#define PCLK_MAILBOX		325
+#define PCLK_GRF		329
+#define PCLK_SGRF		330
+#define PCLK_PMU		331
+#define PCLK_I2C0		332
+#define PCLK_I2C1		333
+#define PCLK_I2C2		334
+#define PCLK_I2C3		335
+#define PCLK_I2C4		336
+#define PCLK_I2C5		337
+#define PCLK_SPI0		338
+#define PCLK_SPI1		339
+#define PCLK_SPI2		340
+#define PCLK_UART0		341
+#define PCLK_UART1		342
+#define PCLK_UART2		343
+#define PCLK_UART3		344
+#define PCLK_UART4		345
+#define PCLK_TSADC		346
+#define PCLK_SARADC		347
+#define PCLK_SIM		348
+#define PCLK_GMAC		349
+#define PCLK_PWM0		350
+#define PCLK_PWM1		351
+#define PCLK_TIMER0		353
+#define PCLK_TIMER1		354
+#define PCLK_EDP_CTRL		355
+#define PCLK_MIPI_DSI0		356
+#define PCLK_MIPI_CSI		358
+#define PCLK_HDCP		359
+#define PCLK_HDMI_CTRL		360
+#define PCLK_VIO_H2P		361
+#define PCLK_BUS		362
+#define PCLK_PERI		363
+#define PCLK_DDRUPCTL		364
+#define PCLK_DDRPHY		365
+#define PCLK_ISP		366
+#define PCLK_VIP		367
+#define PCLK_WDT		368
+
+/* hclk gates */
+#define HCLK_SFC		448
+#define HCLK_OTG0		449
+#define HCLK_HOST0		450
+#define HCLK_HOST1		451
+#define HCLK_HSIC		452
+#define HCLK_NANDC0		453
+#define HCLK_TSP		455
+#define HCLK_SDMMC		456
+#define HCLK_SDIO0		457
+#define HCLK_EMMC		459
+#define HCLK_HSADC		460
+#define HCLK_CRYPTO		461
+#define HCLK_I2S_2CH		462
+#define HCLK_I2S_8CH		463
+#define HCLK_SPDIF		464
+#define HCLK_VOP		465
+#define HCLK_ROM		467
+#define HCLK_IEP		468
+#define HCLK_ISP		469
+#define HCLK_RGA		470
+#define HCLK_VIO_AHB_ARBI	471
+#define HCLK_VIO_NOC		472
+#define HCLK_VIP		473
+#define HCLK_VIO_H2P		474
+#define HCLK_VIO_HDCPMMU	475
+#define HCLK_VIDEO		476
+#define HCLK_BUS		477
+#define HCLK_PERI		478
+
+#define CLK_NR_CLKS		(HCLK_PERI + 1)
+
+/* soft-reset indices */
+#define SRST_CORE_B0		0
+#define SRST_CORE_B1		1
+#define SRST_CORE_B2		2
+#define SRST_CORE_B3		3
+#define SRST_CORE_B0_PO		4
+#define SRST_CORE_B1_PO		5
+#define SRST_CORE_B2_PO		6
+#define SRST_CORE_B3_PO		7
+#define SRST_L2_B		8
+#define SRST_ADB_B		9
+#define SRST_PD_CORE_B_NIU	10
+#define SRST_PDBUS_STRSYS	11
+#define SRST_SOCDBG_B		14
+#define SRST_CORE_B_DBG		15
+
+#define SRST_DMAC1		18
+#define SRST_INTMEM		19
+#define SRST_ROM		20
+#define SRST_SPDIF8CH		21
+#define SRST_I2S8CH		23
+#define SRST_MAILBOX		24
+#define SRST_I2S2CH		25
+#define SRST_EFUSE_256		26
+#define SRST_MCU_SYS		28
+#define SRST_MCU_PO		29
+#define SRST_MCU_NOC		30
+#define SRST_EFUSE		31
+
+#define SRST_GPIO0		32
+#define SRST_GPIO1		33
+#define SRST_GPIO2		34
+#define SRST_GPIO3		35
+#define SRST_GPIO4		36
+#define SRST_PMUGRF		41
+#define SRST_I2C0		42
+#define SRST_I2C1		43
+#define SRST_I2C2		44
+#define SRST_I2C3		45
+#define SRST_I2C4		46
+#define SRST_I2C5		47
+
+#define SRST_DWPWM		48
+#define SRST_MMC_PERI		49
+#define SRST_PERIPH_MMU		50
+#define SRST_GRF		55
+#define SRST_PMU		56
+#define SRST_PERIPH_AXI		57
+#define SRST_PERIPH_AHB		58
+#define SRST_PERIPH_APB		59
+#define SRST_PERIPH_NIU		60
+#define SRST_PDPERI_AHB_ARBI	61
+#define SRST_EMEM		62
+#define SRST_USB_PERI		63
+
+#define SRST_DMAC2		64
+#define SRST_MAC		66
+#define SRST_GPS		67
+#define SRST_RKPWM		69
+#define SRST_USBHOST0		72
+#define SRST_HSIC		73
+#define SRST_HSIC_AUX		74
+#define SRST_HSIC_PHY		75
+#define SRST_HSADC		76
+#define SRST_NANDC0		77
+#define SRST_SFC		79
+
+#define SRST_SPI0		83
+#define SRST_SPI1		84
+#define SRST_SPI2		85
+#define SRST_SARADC		87
+#define SRST_PDALIVE_NIU	88
+#define SRST_PDPMU_INTMEM	89
+#define SRST_PDPMU_NIU		90
+#define SRST_SGRF		91
+
+#define SRST_VIO_ARBI		96
+#define SRST_RGA_NIU		97
+#define SRST_VIO0_NIU_AXI	98
+#define SRST_VIO_NIU_AHB	99
+#define SRST_LCDC0_AXI		100
+#define SRST_LCDC0_AHB		101
+#define SRST_LCDC0_DCLK		102
+#define SRST_VIP		104
+#define SRST_RGA_CORE		105
+#define SRST_IEP_AXI		106
+#define SRST_IEP_AHB		107
+#define SRST_RGA_AXI		108
+#define SRST_RGA_AHB		109
+#define SRST_ISP		110
+#define SRST_EDP_24M		111
+
+#define SRST_VIDEO_AXI		112
+#define SRST_VIDEO_AHB		113
+#define SRST_MIPIDPHYTX		114
+#define SRST_MIPIDSI0		115
+#define SRST_MIPIDPHYRX		116
+#define SRST_MIPICSI		117
+#define SRST_GPU		120
+#define SRST_HDMI		121
+#define SRST_EDP		122
+#define SRST_PMU_PVTM		123
+#define SRST_CORE_PVTM		124
+#define SRST_GPU_PVTM		125
+#define SRST_GPU_SYS		126
+#define SRST_GPU_MEM_NIU	127
+
+#define SRST_MMC0		128
+#define SRST_SDIO0		129
+#define SRST_EMMC		131
+#define SRST_USBOTG_AHB		132
+#define SRST_USBOTG_PHY		133
+#define SRST_USBOTG_CON		134
+#define SRST_USBHOST0_AHB	135
+#define SRST_USBHOST0_PHY	136
+#define SRST_USBHOST0_CON	137
+#define SRST_USBOTG_UTMI	138
+#define SRST_USBHOST1_UTMI	139
+#define SRST_USB_ADP		141
+
+#define SRST_CORESIGHT		144
+#define SRST_PD_CORE_AHB_NOC	145
+#define SRST_PD_CORE_APB_NOC	146
+#define SRST_GIC		148
+#define SRST_LCDC_PWM0		149
+#define SRST_RGA_H2P_BRG	153
+#define SRST_VIDEO		154
+#define SRST_GPU_CFG_NIU	157
+#define SRST_TSADC		159
+
+#define SRST_DDRPHY0		160
+#define SRST_DDRPHY0_APB	161
+#define SRST_DDRCTRL0		162
+#define SRST_DDRCTRL0_APB	163
+#define SRST_VIDEO_NIU		165
+#define SRST_VIDEO_NIU_AHB	167
+#define SRST_DDRMSCH0		170
+#define SRST_PDBUS_AHB		173
+#define SRST_CRYPTO		174
+
+#define SRST_UART0		179
+#define SRST_UART1		180
+#define SRST_UART2		181
+#define SRST_UART3		182
+#define SRST_UART4		183
+#define SRST_SIMC		186
+#define SRST_TSP		188
+#define SRST_TSP_CLKIN0		189
+
+#define SRST_CORE_L0		192
+#define SRST_CORE_L1		193
+#define SRST_CORE_L2		194
+#define SRST_CORE_L3		195
+#define SRST_CORE_L0_PO		195
+#define SRST_CORE_L1_PO		197
+#define SRST_CORE_L2_PO		198
+#define SRST_CORE_L3_PO		199
+#define SRST_L2_L		200
+#define SRST_ADB_L		201
+#define SRST_PD_CORE_L_NIU	202
+#define SRST_CCI_SYS		203
+#define SRST_CCI_DDR		204
+#define SRST_CCI		205
+#define SRST_SOCDBG_L		206
+#define SRST_CORE_L_DBG		207
+
+#define SRST_CORE_B0_NC		208
+#define SRST_CORE_B0_PO_NC	209
+#define SRST_L2_B_NC		210
+#define SRST_ADB_B_NC		211
+#define SRST_PD_CORE_B_NIU_NC	212
+#define SRST_PDBUS_STRSYS_NC	213
+#define SRST_CORE_L0_NC		214
+#define SRST_CORE_L0_PO_NC	215
+#define SRST_L2_L_NC		216
+#define SRST_ADB_L_NC		217
+#define SRST_PD_CORE_L_NIU_NC	218
+#define SRST_CCI_SYS_NC		219
+#define SRST_CCI_DDR_NC		220
+#define SRST_CCI_NC		221
+#define SRST_TRACE_NC		222
+
+#define SRST_TIMER00		224
+#define SRST_TIMER01		225
+#define SRST_TIMER02		226
+#define SRST_TIMER03		227
+#define SRST_TIMER04		228
+#define SRST_TIMER05		229
+#define SRST_TIMER10		230
+#define SRST_TIMER11		231
+#define SRST_TIMER12		232
+#define SRST_TIMER13		233
+#define SRST_TIMER14		234
+#define SRST_TIMER15		235
+#define SRST_TIMER0_APB		236
+#define SRST_TIMER1_APB		237
+
+#endif
diff --git a/include/dt-bindings/clock/zx296702-clock.h b/include/dt-bindings/clock/zx296702-clock.h
index e683dbb..26ee564 100644
--- a/include/dt-bindings/clock/zx296702-clock.h
+++ b/include/dt-bindings/clock/zx296702-clock.h
@@ -153,7 +153,16 @@
 #define ZX296702_I2S0_WCLK			9
 #define ZX296702_I2S0_PCLK			10
 #define ZX296702_I2S0_DIV			11
-#define ZX296702_LSP0CLK_END			12
+#define ZX296702_I2S1_WCLK_MUX			12
+#define ZX296702_I2S1_WCLK			13
+#define ZX296702_I2S1_PCLK			14
+#define ZX296702_I2S1_DIV			15
+#define ZX296702_I2S2_WCLK_MUX			16
+#define ZX296702_I2S2_WCLK			17
+#define ZX296702_I2S2_PCLK			18
+#define ZX296702_I2S2_DIV			19
+#define ZX296702_GPIO_CLK			20
+#define ZX296702_LSP0CLK_END			21
 
 #define ZX296702_UART0_WCLK_MUX			0
 #define ZX296702_UART0_WCLK			1
@@ -165,6 +174,10 @@
 #define ZX296702_SDMMC0_WCLK_DIV		7
 #define ZX296702_SDMMC0_WCLK			8
 #define ZX296702_SDMMC0_PCLK			9
-#define ZX296702_LSP1CLK_END			10
+#define ZX296702_SPDIF1_WCLK_MUX		10
+#define ZX296702_SPDIF1_WCLK			11
+#define ZX296702_SPDIF1_PCLK			12
+#define ZX296702_SPDIF1_DIV			13
+#define ZX296702_LSP1CLK_END			14
 
 #endif /* __DT_BINDINGS_CLOCK_ZX296702_H */
diff --git a/include/dt-bindings/dma/axi-dmac.h b/include/dt-bindings/dma/axi-dmac.h
new file mode 100644
index 0000000..ad9e6ec
--- /dev/null
+++ b/include/dt-bindings/dma/axi-dmac.h
@@ -0,0 +1,48 @@
+/*
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DT_BINDINGS_DMA_AXI_DMAC_H__
+#define __DT_BINDINGS_DMA_AXI_DMAC_H__
+
+#define AXI_DMAC_BUS_TYPE_AXI_MM		0
+#define AXI_DMAC_BUS_TYPE_AXI_STREAM	1
+#define AXI_DMAC_BUS_TYPE_FIFO			2
+
+#endif
diff --git a/include/dt-bindings/dma/jz4780-dma.h b/include/dt-bindings/dma/jz4780-dma.h
deleted file mode 100644
index df017fd..0000000
--- a/include/dt-bindings/dma/jz4780-dma.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef __DT_BINDINGS_DMA_JZ4780_DMA_H__
-#define __DT_BINDINGS_DMA_JZ4780_DMA_H__
-
-/*
- * Request type numbers for the JZ4780 DMA controller (written to the DRTn
- * register for the channel).
- */
-#define JZ4780_DMA_I2S1_TX	0x4
-#define JZ4780_DMA_I2S1_RX	0x5
-#define JZ4780_DMA_I2S0_TX	0x6
-#define JZ4780_DMA_I2S0_RX	0x7
-#define JZ4780_DMA_AUTO		0x8
-#define JZ4780_DMA_SADC_RX	0x9
-#define JZ4780_DMA_UART4_TX	0xc
-#define JZ4780_DMA_UART4_RX	0xd
-#define JZ4780_DMA_UART3_TX	0xe
-#define JZ4780_DMA_UART3_RX	0xf
-#define JZ4780_DMA_UART2_TX	0x10
-#define JZ4780_DMA_UART2_RX	0x11
-#define JZ4780_DMA_UART1_TX	0x12
-#define JZ4780_DMA_UART1_RX	0x13
-#define JZ4780_DMA_UART0_TX	0x14
-#define JZ4780_DMA_UART0_RX	0x15
-#define JZ4780_DMA_SSI0_TX	0x16
-#define JZ4780_DMA_SSI0_RX	0x17
-#define JZ4780_DMA_SSI1_TX	0x18
-#define JZ4780_DMA_SSI1_RX	0x19
-#define JZ4780_DMA_MSC0_TX	0x1a
-#define JZ4780_DMA_MSC0_RX	0x1b
-#define JZ4780_DMA_MSC1_TX	0x1c
-#define JZ4780_DMA_MSC1_RX	0x1d
-#define JZ4780_DMA_MSC2_TX	0x1e
-#define JZ4780_DMA_MSC2_RX	0x1f
-#define JZ4780_DMA_PCM0_TX	0x20
-#define JZ4780_DMA_PCM0_RX	0x21
-#define JZ4780_DMA_SMB0_TX	0x24
-#define JZ4780_DMA_SMB0_RX	0x25
-#define JZ4780_DMA_SMB1_TX	0x26
-#define JZ4780_DMA_SMB1_RX	0x27
-#define JZ4780_DMA_SMB2_TX	0x28
-#define JZ4780_DMA_SMB2_RX	0x29
-#define JZ4780_DMA_SMB3_TX	0x2a
-#define JZ4780_DMA_SMB3_RX	0x2b
-#define JZ4780_DMA_SMB4_TX	0x2c
-#define JZ4780_DMA_SMB4_RX	0x2d
-#define JZ4780_DMA_DES_TX	0x2e
-#define JZ4780_DMA_DES_RX	0x2f
-
-#endif /* __DT_BINDINGS_DMA_JZ4780_DMA_H__ */
diff --git a/include/dt-bindings/leds/leds-ns2.h b/include/dt-bindings/leds/leds-ns2.h
new file mode 100644
index 0000000..491c5f9
--- /dev/null
+++ b/include/dt-bindings/leds/leds-ns2.h
@@ -0,0 +1,8 @@
+#ifndef _DT_BINDINGS_LEDS_NS2_H
+#define _DT_BINDINGS_LEDS_NS2_H
+
+#define NS_V2_LED_OFF	0
+#define NS_V2_LED_ON	1
+#define NS_V2_LED_SATA	2
+
+#endif
diff --git a/include/dt-bindings/memory/tegra210-mc.h b/include/dt-bindings/memory/tegra210-mc.h
new file mode 100644
index 0000000..d1731bc
--- /dev/null
+++ b/include/dt-bindings/memory/tegra210-mc.h
@@ -0,0 +1,36 @@
+#ifndef DT_BINDINGS_MEMORY_TEGRA210_MC_H
+#define DT_BINDINGS_MEMORY_TEGRA210_MC_H
+
+#define TEGRA_SWGROUP_PTC	0
+#define TEGRA_SWGROUP_DC	1
+#define TEGRA_SWGROUP_DCB	2
+#define TEGRA_SWGROUP_AFI	3
+#define TEGRA_SWGROUP_AVPC	4
+#define TEGRA_SWGROUP_HDA	5
+#define TEGRA_SWGROUP_HC	6
+#define TEGRA_SWGROUP_NVENC	7
+#define TEGRA_SWGROUP_PPCS	8
+#define TEGRA_SWGROUP_SATA	9
+#define TEGRA_SWGROUP_MPCORE	10
+#define TEGRA_SWGROUP_ISP2	11
+#define TEGRA_SWGROUP_XUSB_HOST	12
+#define TEGRA_SWGROUP_XUSB_DEV	13
+#define TEGRA_SWGROUP_ISP2B	14
+#define TEGRA_SWGROUP_TSEC	15
+#define TEGRA_SWGROUP_A9AVP	16
+#define TEGRA_SWGROUP_GPU	17
+#define TEGRA_SWGROUP_SDMMC1A	18
+#define TEGRA_SWGROUP_SDMMC2A	19
+#define TEGRA_SWGROUP_SDMMC3A	20
+#define TEGRA_SWGROUP_SDMMC4A	21
+#define TEGRA_SWGROUP_VIC	22
+#define TEGRA_SWGROUP_VI	23
+#define TEGRA_SWGROUP_NVDEC	24
+#define TEGRA_SWGROUP_APE	25
+#define TEGRA_SWGROUP_NVJPG	26
+#define TEGRA_SWGROUP_SE	27
+#define TEGRA_SWGROUP_AXIAP	28
+#define TEGRA_SWGROUP_ETR	29
+#define TEGRA_SWGROUP_TSECB	30
+
+#endif
diff --git a/include/dt-bindings/mfd/st-lpc.h b/include/dt-bindings/mfd/st-lpc.h
index e3e6c75..d05894a 100644
--- a/include/dt-bindings/mfd/st-lpc.h
+++ b/include/dt-bindings/mfd/st-lpc.h
@@ -11,5 +11,6 @@
 
 #define ST_LPC_MODE_RTC		0
 #define ST_LPC_MODE_WDT		1
+#define ST_LPC_MODE_CLKSRC	2
 
 #endif /* __DT_BINDINGS_ST_LPC_H__ */
diff --git a/include/dt-bindings/pinctrl/am43xx.h b/include/dt-bindings/pinctrl/am43xx.h
index b00bbc9..774dc1e 100644
--- a/include/dt-bindings/pinctrl/am43xx.h
+++ b/include/dt-bindings/pinctrl/am43xx.h
@@ -14,6 +14,7 @@
 #define MUX_MODE6	6
 #define MUX_MODE7	7
 #define MUX_MODE8	8
+#define MUX_MODE9	9
 
 #define PULL_DISABLE		(1 << 16)
 #define PULL_UP			(1 << 17)
diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
index 7448edf..4379e29 100644
--- a/include/dt-bindings/pinctrl/dra.h
+++ b/include/dt-bindings/pinctrl/dra.h
@@ -30,6 +30,26 @@
 #define MUX_MODE14	0xe
 #define MUX_MODE15	0xf
 
+/* Certain pins need virtual mode, but note: they may glitch */
+#define MUX_VIRTUAL_MODE0	(MODE_SELECT | (0x0 << 4))
+#define MUX_VIRTUAL_MODE1	(MODE_SELECT | (0x1 << 4))
+#define MUX_VIRTUAL_MODE2	(MODE_SELECT | (0x2 << 4))
+#define MUX_VIRTUAL_MODE3	(MODE_SELECT | (0x3 << 4))
+#define MUX_VIRTUAL_MODE4	(MODE_SELECT | (0x4 << 4))
+#define MUX_VIRTUAL_MODE5	(MODE_SELECT | (0x5 << 4))
+#define MUX_VIRTUAL_MODE6	(MODE_SELECT | (0x6 << 4))
+#define MUX_VIRTUAL_MODE7	(MODE_SELECT | (0x7 << 4))
+#define MUX_VIRTUAL_MODE8	(MODE_SELECT | (0x8 << 4))
+#define MUX_VIRTUAL_MODE9	(MODE_SELECT | (0x9 << 4))
+#define MUX_VIRTUAL_MODE10	(MODE_SELECT | (0xa << 4))
+#define MUX_VIRTUAL_MODE11	(MODE_SELECT | (0xb << 4))
+#define MUX_VIRTUAL_MODE12	(MODE_SELECT | (0xc << 4))
+#define MUX_VIRTUAL_MODE13	(MODE_SELECT | (0xd << 4))
+#define MUX_VIRTUAL_MODE14	(MODE_SELECT | (0xe << 4))
+#define MUX_VIRTUAL_MODE15	(MODE_SELECT | (0xf << 4))
+
+#define MODE_SELECT		(1 << 8)
+
 #define PULL_ENA		(0 << 16)
 #define PULL_DIS		(1 << 16)
 #define PULL_UP			(1 << 17)
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
index c102054..a15c170 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -7,6 +7,47 @@
 #define _DT_BINDINGS_PINCTRL_QCOM_PMIC_MPP_H
 
 /* power-source */
+
+/* Digital Input/Output: level [PM8058] */
+#define PM8058_MPP_VPH			0
+#define PM8058_MPP_S3			1
+#define PM8058_MPP_L2			2
+#define PM8058_MPP_L3			3
+
+/* Digital Input/Output: level [PM8901] */
+#define PM8901_MPP_MSMIO		0
+#define PM8901_MPP_DIG			1
+#define PM8901_MPP_L5			2
+#define PM8901_MPP_S4			3
+#define PM8901_MPP_VPH			4
+
+/* Digital Input/Output: level [PM8921] */
+#define PM8921_MPP_S4			1
+#define PM8921_MPP_L15			3
+#define PM8921_MPP_L17			4
+#define PM8921_MPP_VPH			7
+
+/* Digital Input/Output: level [PM8821] */
+#define PM8821_MPP_1P8			0
+#define PM8821_MPP_VPH			7
+
+/* Digital Input/Output: level [PM8018] */
+#define PM8018_MPP_L4			0
+#define PM8018_MPP_L14			1
+#define PM8018_MPP_S3			2
+#define PM8018_MPP_L6			3
+#define PM8018_MPP_L2			4
+#define PM8018_MPP_L5			5
+#define PM8018_MPP_VPH			7
+
+/* Digital Input/Output: level [PM8038] */
+#define PM8038_MPP_L20			0
+#define PM8038_MPP_L11			1
+#define PM8038_MPP_L5			2
+#define PM8038_MPP_L15			3
+#define PM8038_MPP_L17			4
+#define PM8038_MPP_VPH			7
+
 #define PM8841_MPP_VPH			0
 #define PM8841_MPP_S3			2
 
@@ -37,6 +78,16 @@
 #define PMIC_MPP_AMUX_ROUTE_ABUS3	6
 #define PMIC_MPP_AMUX_ROUTE_ABUS4	7
 
+/* Analog Output: level */
+#define PMIC_MPP_AOUT_LVL_1V25		0
+#define PMIC_MPP_AOUT_LVL_1V25_2	1
+#define PMIC_MPP_AOUT_LVL_0V625		2
+#define PMIC_MPP_AOUT_LVL_0V3125	3
+#define PMIC_MPP_AOUT_LVL_MPP		4
+#define PMIC_MPP_AOUT_LVL_ABUS1		5
+#define PMIC_MPP_AOUT_LVL_ABUS2		6
+#define PMIC_MPP_AOUT_LVL_ABUS3		7
+
 /* To be used with "function" */
 #define PMIC_MPP_FUNC_NORMAL		"normal"
 #define PMIC_MPP_FUNC_PAIRED		"paired"
diff --git a/include/dt-bindings/power/mt8173-power.h b/include/dt-bindings/power/mt8173-power.h
new file mode 100644
index 0000000..b34cee9
--- /dev/null
+++ b/include/dt-bindings/power/mt8173-power.h
@@ -0,0 +1,15 @@
+#ifndef _DT_BINDINGS_POWER_MT8183_POWER_H
+#define _DT_BINDINGS_POWER_MT8183_POWER_H
+
+#define MT8173_POWER_DOMAIN_VDEC	0
+#define MT8173_POWER_DOMAIN_VENC	1
+#define MT8173_POWER_DOMAIN_ISP		2
+#define MT8173_POWER_DOMAIN_MM		3
+#define MT8173_POWER_DOMAIN_VENC_LT	4
+#define MT8173_POWER_DOMAIN_AUDIO	5
+#define MT8173_POWER_DOMAIN_USB		6
+#define MT8173_POWER_DOMAIN_MFG_ASYNC	7
+#define MT8173_POWER_DOMAIN_MFG_2D	8
+#define MT8173_POWER_DOMAIN_MFG		9
+
+#endif /* _DT_BINDINGS_POWER_MT8183_POWER_H */
diff --git a/include/dt-bindings/reset/altr,rst-mgr-a10.h b/include/dt-bindings/reset/altr,rst-mgr-a10.h
new file mode 100644
index 0000000..acb0bbf
--- /dev/null
+++ b/include/dt-bindings/reset/altr,rst-mgr-a10.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2014, Steffen Trumtrar <s.trumtrar@pengutronix.de>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_A10_H
+#define _DT_BINDINGS_RESET_ALTR_RST_MGR_A10_H
+
+/* MPUMODRST */
+#define CPU0_RESET		0
+#define CPU1_RESET		1
+#define WDS_RESET		2
+#define SCUPER_RESET		3
+
+/* PER0MODRST */
+#define EMAC0_RESET		32
+#define EMAC1_RESET		33
+#define EMAC2_RESET		34
+#define USB0_RESET		35
+#define USB1_RESET		36
+#define NAND_RESET		37
+#define QSPI_RESET		38
+#define SDMMC_RESET		39
+#define EMAC0_OCP_RESET		40
+#define EMAC1_OCP_RESET		41
+#define EMAC2_OCP_RESET		42
+#define USB0_OCP_RESET		43
+#define USB1_OCP_RESET		44
+#define NAND_OCP_RESET		45
+#define QSPI_OCP_RESET		46
+#define SDMMC_OCP_RESET		47
+#define DMA_RESET		48
+#define SPIM0_RESET		49
+#define SPIM1_RESET		50
+#define SPIS0_RESET		51
+#define SPIS1_RESET		52
+#define DMA_OCP_RESET		53
+#define EMAC_PTP_RESET		54
+/* 55 is empty*/
+#define DMAIF0_RESET		56
+#define DMAIF1_RESET		57
+#define DMAIF2_RESET		58
+#define DMAIF3_RESET		59
+#define DMAIF4_RESET		60
+#define DMAIF5_RESET		61
+#define DMAIF6_RESET		62
+#define DMAIF7_RESET		63
+
+/* PER1MODRST */
+#define L4WD0_RESET		64
+#define L4WD1_RESET		65
+#define L4SYSTIMER0_RESET	66
+#define L4SYSTIMER1_RESET	67
+#define SPTIMER0_RESET		68
+#define SPTIMER1_RESET		69
+/* 70-71 is reserved */
+#define I2C0_RESET		72
+#define I2C1_RESET		73
+#define I2C2_RESET		74
+#define I2C3_RESET		75
+#define I2C4_RESET		76
+/* 77-79 is reserved */
+#define UART0_RESET		80
+#define UART1_RESET		81
+/* 82-87 is reserved */
+#define GPIO0_RESET		88
+#define GPIO1_RESET		89
+#define GPIO2_RESET		90
+
+/* BRGMODRST */
+#define HPS2FPGA_RESET		96
+#define LWHPS2FPGA_RESET	97
+#define FPGA2HPS_RESET		98
+#define F2SSDRAM0_RESET		99
+#define F2SSDRAM1_RESET		100
+#define F2SSDRAM2_RESET		101
+#define DDRSCH_RESET		102
+
+/* SYSMODRST*/
+#define ROM_RESET		128
+#define OCRAM_RESET		129
+/* 130 is reserved */
+#define FPGAMGR_RESET		131
+#define S2F_RESET		132
+#define SYSDBG_RESET		133
+#define OCRAM_OCP_RESET		134
+
+/* COLDMODRST */
+#define CLKMGRCOLD_RESET	160
+/* 161-162 is reserved */
+#define S2FCOLD_RESET		163
+#define TIMESTAMPCOLD_RESET	164
+#define TAPCOLD_RESET		165
+#define HMCCOLD_RESET		166
+#define IOMGRCOLD_RESET		167
+
+/* NRSTMODRST */
+#define NRSTPINOE_RESET		192
+
+/* DBGMODRST */
+#define DBG_RESET		224
+#endif
diff --git a/include/dt-bindings/reset-controller/stih407-resets.h b/include/dt-bindings/reset/stih407-resets.h
similarity index 100%
rename from include/dt-bindings/reset-controller/stih407-resets.h
rename to include/dt-bindings/reset/stih407-resets.h
diff --git a/include/dt-bindings/reset-controller/stih415-resets.h b/include/dt-bindings/reset/stih415-resets.h
similarity index 100%
rename from include/dt-bindings/reset-controller/stih415-resets.h
rename to include/dt-bindings/reset/stih415-resets.h
diff --git a/include/dt-bindings/reset-controller/stih416-resets.h b/include/dt-bindings/reset/stih416-resets.h
similarity index 100%
rename from include/dt-bindings/reset-controller/stih416-resets.h
rename to include/dt-bindings/reset/stih416-resets.h
diff --git a/include/dt-bindings/reset/tegra124-car.h b/include/dt-bindings/reset/tegra124-car.h
new file mode 100644
index 0000000..070e4f6
--- /dev/null
+++ b/include/dt-bindings/reset/tegra124-car.h
@@ -0,0 +1,12 @@
+/*
+ * This header provides Tegra124-specific constants for binding
+ * nvidia,tegra124-car.
+ */
+
+#ifndef _DT_BINDINGS_RESET_TEGRA124_CAR_H
+#define _DT_BINDINGS_RESET_TEGRA124_CAR_H
+
+#define TEGRA124_RESET(x)		(6 * 32 + (x))
+#define TEGRA124_RST_DFLL_DVCO		TEGRA124_RESET(0)
+
+#endif	/* _DT_BINDINGS_RESET_TEGRA124_CAR_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d2445fa..7235c48 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -15,10 +15,6 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
@@ -221,7 +217,7 @@
 
 int acpi_pci_irq_enable (struct pci_dev *dev);
 void acpi_penalize_isa_irq(int irq, int active);
-
+void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
 void acpi_pci_irq_disable (struct pci_dev *dev);
 
 extern int ec_read(u8 addr, u8 *val);
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index 0ddb5c0..6a0a89e 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -33,12 +33,14 @@
 #define UART01x_DR		0x00	/* Data read or written from the interface. */
 #define UART01x_RSR		0x04	/* Receive status register (Read). */
 #define UART01x_ECR		0x04	/* Error clear register (Write). */
+#define ZX_UART01x_DR		0x04	/* Data read or written from the interface. */
 #define UART010_LCRH		0x08	/* Line control register, high byte. */
 #define ST_UART011_DMAWM	0x08    /* DMA watermark configure register. */
 #define UART010_LCRM		0x0C	/* Line control register, middle byte. */
 #define ST_UART011_TIMEOUT	0x0C    /* Timeout period register. */
 #define UART010_LCRL		0x10	/* Line control register, low byte. */
 #define UART010_CR		0x14	/* Control register. */
+#define ZX_UART01x_FR		0x14	/* Flag register (Read only). */
 #define UART01x_FR		0x18	/* Flag register (Read only). */
 #define UART010_IIR		0x1C	/* Interrupt identification register (Read). */
 #define UART010_ICR		0x1C	/* Interrupt clear register (Write). */
@@ -49,13 +51,21 @@
 #define UART011_LCRH		0x2c	/* Line control register. */
 #define ST_UART011_LCRH_TX	0x2c    /* Tx Line control register. */
 #define UART011_CR		0x30	/* Control register. */
+#define ZX_UART011_LCRH_TX	0x30    /* Tx Line control register. */
 #define UART011_IFLS		0x34	/* Interrupt fifo level select. */
+#define ZX_UART011_CR		0x34	/* Control register. */
+#define ZX_UART011_IFLS		0x38	/* Interrupt fifo level select. */
 #define UART011_IMSC		0x38	/* Interrupt mask. */
 #define UART011_RIS		0x3c	/* Raw interrupt status. */
 #define UART011_MIS		0x40	/* Masked interrupt status. */
+#define ZX_UART011_IMSC		0x40	/* Interrupt mask. */
 #define UART011_ICR		0x44	/* Interrupt clear register. */
+#define ZX_UART011_RIS		0x44	/* Raw interrupt status. */
 #define UART011_DMACR		0x48	/* DMA control register. */
+#define ZX_UART011_MIS		0x48	/* Masked interrupt status. */
+#define ZX_UART011_ICR		0x4c	/* Interrupt clear register. */
 #define ST_UART011_XFCR		0x50	/* XON/XOFF control register. */
+#define ZX_UART011_DMACR	0x50	/* DMA control register. */
 #define ST_UART011_XON1		0x54	/* XON1 register. */
 #define ST_UART011_XON2		0x58	/* XON2 register. */
 #define ST_UART011_XOFF1	0x5C	/* XON1 register. */
@@ -75,15 +85,19 @@
 #define UART01x_RSR_PE 		0x02
 #define UART01x_RSR_FE 		0x01
 
+#define ZX_UART01x_FR_BUSY	0x300
 #define UART011_FR_RI		0x100
 #define UART011_FR_TXFE		0x080
 #define UART011_FR_RXFF		0x040
 #define UART01x_FR_TXFF		0x020
 #define UART01x_FR_RXFE		0x010
 #define UART01x_FR_BUSY		0x008
+#define ZX_UART01x_FR_DSR       0x008
 #define UART01x_FR_DCD 		0x004
 #define UART01x_FR_DSR 		0x002
+#define ZX_UART01x_FR_CTS	0x002
 #define UART01x_FR_CTS 		0x001
+#define ZX_UART011_FR_RI	0x001
 #define UART01x_FR_TMSK		(UART01x_FR_TXFF + UART01x_FR_BUSY)
 
 #define UART011_CR_CTSEN	0x8000	/* CTS hardware flow control */
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
index 00beddf..ee696d7 100644
--- a/include/linux/atmel_serial.h
+++ b/include/linux/atmel_serial.h
@@ -16,115 +16,151 @@
 #ifndef ATMEL_SERIAL_H
 #define ATMEL_SERIAL_H
 
-#define ATMEL_US_CR		0x00			/* Control Register */
-#define		ATMEL_US_RSTRX		(1 <<  2)		/* Reset Receiver */
-#define		ATMEL_US_RSTTX		(1 <<  3)		/* Reset Transmitter */
-#define		ATMEL_US_RXEN		(1 <<  4)		/* Receiver Enable */
-#define		ATMEL_US_RXDIS		(1 <<  5)		/* Receiver Disable */
-#define		ATMEL_US_TXEN		(1 <<  6)		/* Transmitter Enable */
-#define		ATMEL_US_TXDIS		(1 <<  7)		/* Transmitter Disable */
-#define		ATMEL_US_RSTSTA		(1 <<  8)		/* Reset Status Bits */
-#define		ATMEL_US_STTBRK		(1 <<  9)		/* Start Break */
-#define		ATMEL_US_STPBRK		(1 << 10)		/* Stop Break */
-#define		ATMEL_US_STTTO		(1 << 11)		/* Start Time-out */
-#define		ATMEL_US_SENDA		(1 << 12)		/* Send Address */
-#define		ATMEL_US_RSTIT		(1 << 13)		/* Reset Iterations */
-#define		ATMEL_US_RSTNACK	(1 << 14)		/* Reset Non Acknowledge */
-#define		ATMEL_US_RETTO		(1 << 15)		/* Rearm Time-out */
-#define		ATMEL_US_DTREN		(1 << 16)		/* Data Terminal Ready Enable [AT91RM9200 only] */
-#define		ATMEL_US_DTRDIS		(1 << 17)		/* Data Terminal Ready Disable [AT91RM9200 only] */
-#define		ATMEL_US_RTSEN		(1 << 18)		/* Request To Send Enable */
-#define		ATMEL_US_RTSDIS		(1 << 19)		/* Request To Send Disable */
+#define ATMEL_US_CR		0x00	/* Control Register */
+#define	ATMEL_US_RSTRX		BIT(2)	/* Reset Receiver */
+#define	ATMEL_US_RSTTX		BIT(3)	/* Reset Transmitter */
+#define	ATMEL_US_RXEN		BIT(4)	/* Receiver Enable */
+#define	ATMEL_US_RXDIS		BIT(5)	/* Receiver Disable */
+#define	ATMEL_US_TXEN		BIT(6)	/* Transmitter Enable */
+#define	ATMEL_US_TXDIS		BIT(7)	/* Transmitter Disable */
+#define	ATMEL_US_RSTSTA		BIT(8)	/* Reset Status Bits */
+#define	ATMEL_US_STTBRK		BIT(9)	/* Start Break */
+#define	ATMEL_US_STPBRK		BIT(10)	/* Stop Break */
+#define	ATMEL_US_STTTO		BIT(11)	/* Start Time-out */
+#define	ATMEL_US_SENDA		BIT(12)	/* Send Address */
+#define	ATMEL_US_RSTIT		BIT(13)	/* Reset Iterations */
+#define	ATMEL_US_RSTNACK	BIT(14)	/* Reset Non Acknowledge */
+#define	ATMEL_US_RETTO		BIT(15)	/* Rearm Time-out */
+#define	ATMEL_US_DTREN		BIT(16)	/* Data Terminal Ready Enable */
+#define	ATMEL_US_DTRDIS		BIT(17)	/* Data Terminal Ready Disable */
+#define	ATMEL_US_RTSEN		BIT(18)	/* Request To Send Enable */
+#define	ATMEL_US_RTSDIS		BIT(19)	/* Request To Send Disable */
+#define	ATMEL_US_TXFCLR		BIT(24)	/* Transmit FIFO Clear */
+#define	ATMEL_US_RXFCLR		BIT(25)	/* Receive FIFO Clear */
+#define	ATMEL_US_TXFLCLR	BIT(26)	/* Transmit FIFO Lock Clear */
+#define	ATMEL_US_FIFOEN		BIT(30)	/* FIFO enable */
+#define	ATMEL_US_FIFODIS	BIT(31)	/* FIFO disable */
 
-#define ATMEL_US_MR		0x04			/* Mode Register */
-#define		ATMEL_US_USMODE		(0xf <<  0)		/* Mode of the USART */
-#define			ATMEL_US_USMODE_NORMAL		0
-#define			ATMEL_US_USMODE_RS485		1
-#define			ATMEL_US_USMODE_HWHS		2
-#define			ATMEL_US_USMODE_MODEM		3
-#define			ATMEL_US_USMODE_ISO7816_T0	4
-#define			ATMEL_US_USMODE_ISO7816_T1	6
-#define			ATMEL_US_USMODE_IRDA		8
-#define		ATMEL_US_USCLKS		(3   <<  4)		/* Clock Selection */
-#define			ATMEL_US_USCLKS_MCK		(0 <<  4)
-#define			ATMEL_US_USCLKS_MCK_DIV8	(1 <<  4)
-#define			ATMEL_US_USCLKS_SCK		(3 <<  4)
-#define		ATMEL_US_CHRL		(3   <<  6)		/* Character Length */
-#define			ATMEL_US_CHRL_5			(0 <<  6)
-#define			ATMEL_US_CHRL_6			(1 <<  6)
-#define			ATMEL_US_CHRL_7			(2 <<  6)
-#define			ATMEL_US_CHRL_8			(3 <<  6)
-#define		ATMEL_US_SYNC		(1 <<  8)		/* Synchronous Mode Select */
-#define		ATMEL_US_PAR		(7 <<  9)		/* Parity Type */
-#define			ATMEL_US_PAR_EVEN		(0 <<  9)
-#define			ATMEL_US_PAR_ODD		(1 <<  9)
-#define			ATMEL_US_PAR_SPACE		(2 <<  9)
-#define			ATMEL_US_PAR_MARK		(3 <<  9)
-#define			ATMEL_US_PAR_NONE		(4 <<  9)
-#define			ATMEL_US_PAR_MULTI_DROP		(6 <<  9)
-#define		ATMEL_US_NBSTOP		(3 << 12)		/* Number of Stop Bits */
-#define			ATMEL_US_NBSTOP_1		(0 << 12)
-#define			ATMEL_US_NBSTOP_1_5		(1 << 12)
-#define			ATMEL_US_NBSTOP_2		(2 << 12)
-#define		ATMEL_US_CHMODE		(3 << 14)		/* Channel Mode */
-#define			ATMEL_US_CHMODE_NORMAL		(0 << 14)
-#define			ATMEL_US_CHMODE_ECHO		(1 << 14)
-#define			ATMEL_US_CHMODE_LOC_LOOP	(2 << 14)
-#define			ATMEL_US_CHMODE_REM_LOOP	(3 << 14)
-#define		ATMEL_US_MSBF		(1 << 16)		/* Bit Order */
-#define		ATMEL_US_MODE9		(1 << 17)		/* 9-bit Character Length */
-#define		ATMEL_US_CLKO		(1 << 18)		/* Clock Output Select */
-#define		ATMEL_US_OVER		(1 << 19)		/* Oversampling Mode */
-#define		ATMEL_US_INACK		(1 << 20)		/* Inhibit Non Acknowledge */
-#define		ATMEL_US_DSNACK		(1 << 21)		/* Disable Successive NACK */
-#define		ATMEL_US_MAX_ITER	(7 << 24)		/* Max Iterations */
-#define		ATMEL_US_FILTER		(1 << 28)		/* Infrared Receive Line Filter */
+#define ATMEL_US_MR		0x04	/* Mode Register */
+#define	ATMEL_US_USMODE		GENMASK(3, 0)	/* Mode of the USART */
+#define		ATMEL_US_USMODE_NORMAL		0
+#define		ATMEL_US_USMODE_RS485		1
+#define		ATMEL_US_USMODE_HWHS		2
+#define		ATMEL_US_USMODE_MODEM		3
+#define		ATMEL_US_USMODE_ISO7816_T0	4
+#define		ATMEL_US_USMODE_ISO7816_T1	6
+#define		ATMEL_US_USMODE_IRDA		8
+#define	ATMEL_US_USCLKS		GENMASK(5, 4)	/* Clock Selection */
+#define		ATMEL_US_USCLKS_MCK		(0 <<  4)
+#define		ATMEL_US_USCLKS_MCK_DIV8	(1 <<  4)
+#define		ATMEL_US_USCLKS_SCK		(3 <<  4)
+#define	ATMEL_US_CHRL		GENMASK(7, 6)	/* Character Length */
+#define		ATMEL_US_CHRL_5			(0 <<  6)
+#define		ATMEL_US_CHRL_6			(1 <<  6)
+#define		ATMEL_US_CHRL_7			(2 <<  6)
+#define		ATMEL_US_CHRL_8			(3 <<  6)
+#define	ATMEL_US_SYNC		BIT(8)		/* Synchronous Mode Select */
+#define	ATMEL_US_PAR		GENMASK(11, 9)	/* Parity Type */
+#define		ATMEL_US_PAR_EVEN		(0 <<  9)
+#define		ATMEL_US_PAR_ODD		(1 <<  9)
+#define		ATMEL_US_PAR_SPACE		(2 <<  9)
+#define		ATMEL_US_PAR_MARK		(3 <<  9)
+#define		ATMEL_US_PAR_NONE		(4 <<  9)
+#define		ATMEL_US_PAR_MULTI_DROP		(6 <<  9)
+#define	ATMEL_US_NBSTOP		GENMASK(13, 12)	/* Number of Stop Bits */
+#define		ATMEL_US_NBSTOP_1		(0 << 12)
+#define		ATMEL_US_NBSTOP_1_5		(1 << 12)
+#define		ATMEL_US_NBSTOP_2		(2 << 12)
+#define	ATMEL_US_CHMODE		GENMASK(15, 14)	/* Channel Mode */
+#define		ATMEL_US_CHMODE_NORMAL		(0 << 14)
+#define		ATMEL_US_CHMODE_ECHO		(1 << 14)
+#define		ATMEL_US_CHMODE_LOC_LOOP	(2 << 14)
+#define		ATMEL_US_CHMODE_REM_LOOP	(3 << 14)
+#define	ATMEL_US_MSBF		BIT(16)	/* Bit Order */
+#define	ATMEL_US_MODE9		BIT(17)	/* 9-bit Character Length */
+#define	ATMEL_US_CLKO		BIT(18)	/* Clock Output Select */
+#define	ATMEL_US_OVER		BIT(19)	/* Oversampling Mode */
+#define	ATMEL_US_INACK		BIT(20)	/* Inhibit Non Acknowledge */
+#define	ATMEL_US_DSNACK		BIT(21)	/* Disable Successive NACK */
+#define	ATMEL_US_MAX_ITER	GENMASK(26, 24)	/* Max Iterations */
+#define	ATMEL_US_FILTER		BIT(28)	/* Infrared Receive Line Filter */
 
-#define ATMEL_US_IER		0x08			/* Interrupt Enable Register */
-#define		ATMEL_US_RXRDY		(1 <<  0)		/* Receiver Ready */
-#define		ATMEL_US_TXRDY		(1 <<  1)		/* Transmitter Ready */
-#define		ATMEL_US_RXBRK		(1 <<  2)		/* Break Received / End of Break */
-#define		ATMEL_US_ENDRX		(1 <<  3)		/* End of Receiver Transfer */
-#define		ATMEL_US_ENDTX		(1 <<  4)		/* End of Transmitter Transfer */
-#define		ATMEL_US_OVRE		(1 <<  5)		/* Overrun Error */
-#define		ATMEL_US_FRAME		(1 <<  6)		/* Framing Error */
-#define		ATMEL_US_PARE		(1 <<  7)		/* Parity Error */
-#define		ATMEL_US_TIMEOUT	(1 <<  8)		/* Receiver Time-out */
-#define		ATMEL_US_TXEMPTY	(1 <<  9)		/* Transmitter Empty */
-#define		ATMEL_US_ITERATION	(1 << 10)		/* Max number of Repetitions Reached */
-#define		ATMEL_US_TXBUFE		(1 << 11)		/* Transmission Buffer Empty */
-#define		ATMEL_US_RXBUFF		(1 << 12)		/* Reception Buffer Full */
-#define		ATMEL_US_NACK		(1 << 13)		/* Non Acknowledge */
-#define		ATMEL_US_RIIC		(1 << 16)		/* Ring Indicator Input Change [AT91RM9200 only] */
-#define		ATMEL_US_DSRIC		(1 << 17)		/* Data Set Ready Input Change [AT91RM9200 only] */
-#define		ATMEL_US_DCDIC		(1 << 18)		/* Data Carrier Detect Input Change [AT91RM9200 only] */
-#define		ATMEL_US_CTSIC		(1 << 19)		/* Clear to Send Input Change */
-#define		ATMEL_US_RI		(1 << 20)		/* RI */
-#define		ATMEL_US_DSR		(1 << 21)		/* DSR */
-#define		ATMEL_US_DCD		(1 << 22)		/* DCD */
-#define		ATMEL_US_CTS		(1 << 23)		/* CTS */
+#define ATMEL_US_IER		0x08	/* Interrupt Enable Register */
+#define	ATMEL_US_RXRDY		BIT(0)	/* Receiver Ready */
+#define	ATMEL_US_TXRDY		BIT(1)	/* Transmitter Ready */
+#define	ATMEL_US_RXBRK		BIT(2)	/* Break Received / End of Break */
+#define	ATMEL_US_ENDRX		BIT(3)	/* End of Receiver Transfer */
+#define	ATMEL_US_ENDTX		BIT(4)	/* End of Transmitter Transfer */
+#define	ATMEL_US_OVRE		BIT(5)	/* Overrun Error */
+#define	ATMEL_US_FRAME		BIT(6)	/* Framing Error */
+#define	ATMEL_US_PARE		BIT(7)	/* Parity Error */
+#define	ATMEL_US_TIMEOUT	BIT(8)	/* Receiver Time-out */
+#define	ATMEL_US_TXEMPTY	BIT(9)	/* Transmitter Empty */
+#define	ATMEL_US_ITERATION	BIT(10)	/* Max number of Repetitions Reached */
+#define	ATMEL_US_TXBUFE		BIT(11)	/* Transmission Buffer Empty */
+#define	ATMEL_US_RXBUFF		BIT(12)	/* Reception Buffer Full */
+#define	ATMEL_US_NACK		BIT(13)	/* Non Acknowledge */
+#define	ATMEL_US_RIIC		BIT(16)	/* Ring Indicator Input Change */
+#define	ATMEL_US_DSRIC		BIT(17)	/* Data Set Ready Input Change */
+#define	ATMEL_US_DCDIC		BIT(18)	/* Data Carrier Detect Input Change */
+#define	ATMEL_US_CTSIC		BIT(19)	/* Clear to Send Input Change */
+#define	ATMEL_US_RI		BIT(20)	/* RI */
+#define	ATMEL_US_DSR		BIT(21)	/* DSR */
+#define	ATMEL_US_DCD		BIT(22)	/* DCD */
+#define	ATMEL_US_CTS		BIT(23)	/* CTS */
 
-#define ATMEL_US_IDR		0x0c			/* Interrupt Disable Register */
-#define ATMEL_US_IMR		0x10			/* Interrupt Mask Register */
-#define ATMEL_US_CSR		0x14			/* Channel Status Register */
-#define ATMEL_US_RHR		0x18			/* Receiver Holding Register */
-#define ATMEL_US_THR		0x1c			/* Transmitter Holding Register */
-#define		ATMEL_US_SYNH		(1 << 15)		/* Transmit/Receive Sync [AT91SAM9261 only] */
+#define ATMEL_US_IDR		0x0c	/* Interrupt Disable Register */
+#define ATMEL_US_IMR		0x10	/* Interrupt Mask Register */
+#define ATMEL_US_CSR		0x14	/* Channel Status Register */
+#define ATMEL_US_RHR		0x18	/* Receiver Holding Register */
+#define ATMEL_US_THR		0x1c	/* Transmitter Holding Register */
+#define	ATMEL_US_SYNH		BIT(15)	/* Transmit/Receive Sync */
 
-#define ATMEL_US_BRGR		0x20			/* Baud Rate Generator Register */
-#define		ATMEL_US_CD		(0xffff << 0)		/* Clock Divider */
+#define ATMEL_US_BRGR		0x20	/* Baud Rate Generator Register */
+#define	ATMEL_US_CD		GENMASK(15, 0)	/* Clock Divider */
 
-#define ATMEL_US_RTOR		0x24			/* Receiver Time-out Register */
-#define		ATMEL_US_TO		(0xffff << 0)		/* Time-out Value */
+#define ATMEL_US_RTOR		0x24	/* Receiver Time-out Register */
+#define	ATMEL_US_TO		GENMASK(15, 0)	/* Time-out Value */
 
-#define ATMEL_US_TTGR		0x28			/* Transmitter Timeguard Register */
-#define		ATMEL_US_TG		(0xff << 0)		/* Timeguard Value */
+#define ATMEL_US_TTGR		0x28	/* Transmitter Timeguard Register */
+#define	ATMEL_US_TG		GENMASK(7, 0)	/* Timeguard Value */
 
-#define ATMEL_US_FIDI		0x40			/* FI DI Ratio Register */
-#define ATMEL_US_NER		0x44			/* Number of Errors Register */
-#define ATMEL_US_IF		0x4c			/* IrDA Filter Register */
+#define ATMEL_US_FIDI		0x40	/* FI DI Ratio Register */
+#define ATMEL_US_NER		0x44	/* Number of Errors Register */
+#define ATMEL_US_IF		0x4c	/* IrDA Filter Register */
 
-#define ATMEL_US_NAME		0xf0			/* Ip Name */
-#define ATMEL_US_VERSION	0xfc			/* Ip Version */
+#define ATMEL_US_CMPR		0x90	/* Comparaison Register */
+#define ATMEL_US_FMR		0xa0	/* FIFO Mode Register */
+#define	ATMEL_US_TXRDYM(data)	(((data) & 0x3) << 0)	/* TX Ready Mode */
+#define	ATMEL_US_RXRDYM(data)	(((data) & 0x3) << 4)	/* RX Ready Mode */
+#define		ATMEL_US_ONE_DATA	0x0
+#define		ATMEL_US_TWO_DATA	0x1
+#define		ATMEL_US_FOUR_DATA	0x2
+#define	ATMEL_US_FRTSC		BIT(7)	/* FIFO RTS pin Control */
+#define	ATMEL_US_TXFTHRES(thr)	(((thr) & 0x3f) << 8)	/* TX FIFO Threshold */
+#define	ATMEL_US_RXFTHRES(thr)	(((thr) & 0x3f) << 16)	/* RX FIFO Threshold */
+#define	ATMEL_US_RXFTHRES2(thr)	(((thr) & 0x3f) << 24)	/* RX FIFO Threshold2 */
+
+#define ATMEL_US_FLR		0xa4	/* FIFO Level Register */
+#define	ATMEL_US_TXFL(reg)	(((reg) >> 0) & 0x3f)	/* TX FIFO Level */
+#define	ATMEL_US_RXFL(reg)	(((reg) >> 16) & 0x3f)	/* RX FIFO Level */
+
+#define ATMEL_US_FIER		0xa8	/* FIFO Interrupt Enable Register */
+#define ATMEL_US_FIDR		0xac	/* FIFO Interrupt Disable Register */
+#define ATMEL_US_FIMR		0xb0	/* FIFO Interrupt Mask Register */
+#define ATMEL_US_FESR		0xb4	/* FIFO Event Status Register */
+#define	ATMEL_US_TXFEF		BIT(0)	/* Transmit FIFO Empty Flag */
+#define	ATMEL_US_TXFFF		BIT(1)	/* Transmit FIFO Full Flag */
+#define	ATMEL_US_TXFTHF		BIT(2)	/* Transmit FIFO Threshold Flag */
+#define	ATMEL_US_RXFEF		BIT(3)	/* Receive FIFO Empty Flag */
+#define	ATMEL_US_RXFFF		BIT(4)	/* Receive FIFO Full Flag */
+#define	ATMEL_US_RXFTHF		BIT(5)	/* Receive FIFO Threshold Flag */
+#define	ATMEL_US_TXFPTEF	BIT(6)	/* Transmit FIFO Pointer Error Flag */
+#define	ATMEL_US_RXFPTEF	BIT(7)	/* Receive FIFO Pointer Error Flag */
+#define	ATMEL_US_TXFLOCK	BIT(8)	/* Transmit FIFO Lock (FESR only) */
+#define	ATMEL_US_RXFTHF2	BIT(9)	/* Receive FIFO Threshold Flag 2 */
+
+#define ATMEL_US_NAME		0xf0	/* Ip Name */
+#define ATMEL_US_VERSION	0xfc	/* Ip Version */
 
 #endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 5b08a85..00a5763 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,329 @@
 #ifndef _LINUX_ATOMIC_H
 #define _LINUX_ATOMIC_H
 #include <asm/atomic.h>
+#include <asm/barrier.h>
+
+/*
+ * Relaxed variants of xchg, cmpxchg and some atomic operations.
+ *
+ * We support four variants:
+ *
+ * - Fully ordered: The default implementation, no suffix required.
+ * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
+ * - Release: Provides RELEASE semantics, _release suffix.
+ * - Relaxed: No ordering guarantees, _relaxed suffix.
+ *
+ * For compound atomics performing both a load and a store, ACQUIRE
+ * semantics apply only to the load and RELEASE semantics only to the
+ * store portion of the operation. Note that a failed cmpxchg_acquire
+ * does -not- imply any memory ordering constraints.
+ *
+ * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
+ */
+
+#ifndef atomic_read_acquire
+#define  atomic_read_acquire(v)		smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic_set_release
+#define  atomic_set_release(v, i)	smp_store_release(&(v)->counter, (i))
+#endif
+
+/*
+ * The idea here is to build acquire/release variants by adding explicit
+ * barriers on top of the relaxed variant. In the case where the relaxed
+ * variant is already fully ordered, no additional barriers are needed.
+ */
+#define __atomic_op_acquire(op, args...)				\
+({									\
+	typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);		\
+	smp_mb__after_atomic();						\
+	__ret;								\
+})
+
+#define __atomic_op_release(op, args...)				\
+({									\
+	smp_mb__before_atomic();					\
+	op##_relaxed(args);						\
+})
+
+#define __atomic_op_fence(op, args...)					\
+({									\
+	typeof(op##_relaxed(args)) __ret;				\
+	smp_mb__before_atomic();					\
+	__ret = op##_relaxed(args);					\
+	smp_mb__after_atomic();						\
+	__ret;								\
+})
+
+/* atomic_add_return_relaxed */
+#ifndef atomic_add_return_relaxed
+#define  atomic_add_return_relaxed	atomic_add_return
+#define  atomic_add_return_acquire	atomic_add_return
+#define  atomic_add_return_release	atomic_add_return
+
+#else /* atomic_add_return_relaxed */
+
+#ifndef atomic_add_return_acquire
+#define  atomic_add_return_acquire(...)					\
+	__atomic_op_acquire(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return_release
+#define  atomic_add_return_release(...)					\
+	__atomic_op_release(atomic_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_add_return
+#define  atomic_add_return(...)						\
+	__atomic_op_fence(atomic_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic_add_return_relaxed */
+
+/* atomic_sub_return_relaxed */
+#ifndef atomic_sub_return_relaxed
+#define  atomic_sub_return_relaxed	atomic_sub_return
+#define  atomic_sub_return_acquire	atomic_sub_return
+#define  atomic_sub_return_release	atomic_sub_return
+
+#else /* atomic_sub_return_relaxed */
+
+#ifndef atomic_sub_return_acquire
+#define  atomic_sub_return_acquire(...)					\
+	__atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return_release
+#define  atomic_sub_return_release(...)					\
+	__atomic_op_release(atomic_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic_sub_return
+#define  atomic_sub_return(...)						\
+	__atomic_op_fence(atomic_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic_sub_return_relaxed */
+
+/* atomic_xchg_relaxed */
+#ifndef atomic_xchg_relaxed
+#define  atomic_xchg_relaxed		atomic_xchg
+#define  atomic_xchg_acquire		atomic_xchg
+#define  atomic_xchg_release		atomic_xchg
+
+#else /* atomic_xchg_relaxed */
+
+#ifndef atomic_xchg_acquire
+#define  atomic_xchg_acquire(...)					\
+	__atomic_op_acquire(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg_release
+#define  atomic_xchg_release(...)					\
+	__atomic_op_release(atomic_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_xchg
+#define  atomic_xchg(...)						\
+	__atomic_op_fence(atomic_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic_xchg_relaxed */
+
+/* atomic_cmpxchg_relaxed */
+#ifndef atomic_cmpxchg_relaxed
+#define  atomic_cmpxchg_relaxed		atomic_cmpxchg
+#define  atomic_cmpxchg_acquire		atomic_cmpxchg
+#define  atomic_cmpxchg_release		atomic_cmpxchg
+
+#else /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic_cmpxchg_acquire
+#define  atomic_cmpxchg_acquire(...)					\
+	__atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg_release
+#define  atomic_cmpxchg_release(...)					\
+	__atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic_cmpxchg
+#define  atomic_cmpxchg(...)						\
+	__atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic_cmpxchg_relaxed */
+
+#ifndef atomic64_read_acquire
+#define  atomic64_read_acquire(v)	smp_load_acquire(&(v)->counter)
+#endif
+
+#ifndef atomic64_set_release
+#define  atomic64_set_release(v, i)	smp_store_release(&(v)->counter, (i))
+#endif
+
+/* atomic64_add_return_relaxed */
+#ifndef atomic64_add_return_relaxed
+#define  atomic64_add_return_relaxed	atomic64_add_return
+#define  atomic64_add_return_acquire	atomic64_add_return
+#define  atomic64_add_return_release	atomic64_add_return
+
+#else /* atomic64_add_return_relaxed */
+
+#ifndef atomic64_add_return_acquire
+#define  atomic64_add_return_acquire(...)				\
+	__atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return_release
+#define  atomic64_add_return_release(...)				\
+	__atomic_op_release(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return
+#define  atomic64_add_return(...)					\
+	__atomic_op_fence(atomic64_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_add_return_relaxed */
+
+/* atomic64_sub_return_relaxed */
+#ifndef atomic64_sub_return_relaxed
+#define  atomic64_sub_return_relaxed	atomic64_sub_return
+#define  atomic64_sub_return_acquire	atomic64_sub_return
+#define  atomic64_sub_return_release	atomic64_sub_return
+
+#else /* atomic64_sub_return_relaxed */
+
+#ifndef atomic64_sub_return_acquire
+#define  atomic64_sub_return_acquire(...)				\
+	__atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return_release
+#define  atomic64_sub_return_release(...)				\
+	__atomic_op_release(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return
+#define  atomic64_sub_return(...)					\
+	__atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_sub_return_relaxed */
+
+/* atomic64_xchg_relaxed */
+#ifndef atomic64_xchg_relaxed
+#define  atomic64_xchg_relaxed		atomic64_xchg
+#define  atomic64_xchg_acquire		atomic64_xchg
+#define  atomic64_xchg_release		atomic64_xchg
+
+#else /* atomic64_xchg_relaxed */
+
+#ifndef atomic64_xchg_acquire
+#define  atomic64_xchg_acquire(...)					\
+	__atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg_release
+#define  atomic64_xchg_release(...)					\
+	__atomic_op_release(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg
+#define  atomic64_xchg(...)						\
+	__atomic_op_fence(atomic64_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_xchg_relaxed */
+
+/* atomic64_cmpxchg_relaxed */
+#ifndef atomic64_cmpxchg_relaxed
+#define  atomic64_cmpxchg_relaxed	atomic64_cmpxchg
+#define  atomic64_cmpxchg_acquire	atomic64_cmpxchg
+#define  atomic64_cmpxchg_release	atomic64_cmpxchg
+
+#else /* atomic64_cmpxchg_relaxed */
+
+#ifndef atomic64_cmpxchg_acquire
+#define  atomic64_cmpxchg_acquire(...)					\
+	__atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg_release
+#define  atomic64_cmpxchg_release(...)					\
+	__atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg
+#define  atomic64_cmpxchg(...)						\
+	__atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_cmpxchg_relaxed */
+
+/* cmpxchg_relaxed */
+#ifndef cmpxchg_relaxed
+#define  cmpxchg_relaxed		cmpxchg
+#define  cmpxchg_acquire		cmpxchg
+#define  cmpxchg_release		cmpxchg
+
+#else /* cmpxchg_relaxed */
+
+#ifndef cmpxchg_acquire
+#define  cmpxchg_acquire(...)						\
+	__atomic_op_acquire(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg_release
+#define  cmpxchg_release(...)						\
+	__atomic_op_release(cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg
+#define  cmpxchg(...)							\
+	__atomic_op_fence(cmpxchg, __VA_ARGS__)
+#endif
+#endif /* cmpxchg_relaxed */
+
+/* cmpxchg64_relaxed */
+#ifndef cmpxchg64_relaxed
+#define  cmpxchg64_relaxed		cmpxchg64
+#define  cmpxchg64_acquire		cmpxchg64
+#define  cmpxchg64_release		cmpxchg64
+
+#else /* cmpxchg64_relaxed */
+
+#ifndef cmpxchg64_acquire
+#define  cmpxchg64_acquire(...)						\
+	__atomic_op_acquire(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64_release
+#define  cmpxchg64_release(...)						\
+	__atomic_op_release(cmpxchg64, __VA_ARGS__)
+#endif
+
+#ifndef cmpxchg64
+#define  cmpxchg64(...)							\
+	__atomic_op_fence(cmpxchg64, __VA_ARGS__)
+#endif
+#endif /* cmpxchg64_relaxed */
+
+/* xchg_relaxed */
+#ifndef xchg_relaxed
+#define  xchg_relaxed			xchg
+#define  xchg_acquire			xchg
+#define  xchg_release			xchg
+
+#else /* xchg_relaxed */
+
+#ifndef xchg_acquire
+#define  xchg_acquire(...)		__atomic_op_acquire(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg_release
+#define  xchg_release(...)		__atomic_op_release(xchg, __VA_ARGS__)
+#endif
+
+#ifndef xchg
+#define  xchg(...)			__atomic_op_fence(xchg, __VA_ARGS__)
+#endif
+#endif /* xchg_relaxed */
 
 /**
  * atomic_add_unless - add unless the number is already a given value
@@ -28,6 +351,23 @@
 #define atomic_inc_not_zero(v)		atomic_add_unless((v), 1, 0)
 #endif
 
+#ifndef atomic_andnot
+static inline void atomic_andnot(int i, atomic_t *v)
+{
+	atomic_and(~i, v);
+}
+#endif
+
+static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
+{
+	atomic_andnot(mask, v);
+}
+
+static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
+{
+	atomic_or(mask, v);
+}
+
 /**
  * atomic_inc_not_zero_hint - increment if not null
  * @v: pointer of type atomic_t
@@ -111,21 +451,16 @@
 }
 #endif
 
-#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
-static inline void atomic_or(int i, atomic_t *v)
-{
-	int old;
-	int new;
-
-	do {
-		old = atomic_read(v);
-		new = old | i;
-	} while (atomic_cmpxchg(v, old, new) != old);
-}
-#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
-
 #include <asm-generic/atomic-long.h>
 #ifdef CONFIG_GENERIC_ATOMIC64
 #include <asm-generic/atomic64.h>
 #endif
+
+#ifndef atomic64_andnot
+static inline void atomic64_andnot(long long i, atomic64_t *v)
+{
+	atomic64_and(~i, v);
+}
+#endif
+
 #endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/average.h b/include/linux/average.h
index c6028fd..d04aa58 100644
--- a/include/linux/average.h
+++ b/include/linux/average.h
@@ -3,28 +3,43 @@
 
 /* Exponentially weighted moving average (EWMA) */
 
-/* For more documentation see lib/average.c */
-
-struct ewma {
-	unsigned long internal;
-	unsigned long factor;
-	unsigned long weight;
-};
-
-extern void ewma_init(struct ewma *avg, unsigned long factor,
-		      unsigned long weight);
-
-extern struct ewma *ewma_add(struct ewma *avg, unsigned long val);
-
-/**
- * ewma_read() - Get average value
- * @avg: Average structure
- *
- * Returns the average value held in @avg.
- */
-static inline unsigned long ewma_read(const struct ewma *avg)
-{
-	return avg->internal >> avg->factor;
-}
+#define DECLARE_EWMA(name, _factor, _weight)				\
+	struct ewma_##name {						\
+		unsigned long internal;					\
+	};								\
+	static inline void ewma_##name##_init(struct ewma_##name *e)	\
+	{								\
+		BUILD_BUG_ON(!__builtin_constant_p(_factor));		\
+		BUILD_BUG_ON(!__builtin_constant_p(_weight));		\
+		BUILD_BUG_ON_NOT_POWER_OF_2(_factor);			\
+		BUILD_BUG_ON_NOT_POWER_OF_2(_weight);			\
+		e->internal = 0;					\
+	}								\
+	static inline unsigned long					\
+	ewma_##name##_read(struct ewma_##name *e)			\
+	{								\
+		BUILD_BUG_ON(!__builtin_constant_p(_factor));		\
+		BUILD_BUG_ON(!__builtin_constant_p(_weight));		\
+		BUILD_BUG_ON_NOT_POWER_OF_2(_factor);			\
+		BUILD_BUG_ON_NOT_POWER_OF_2(_weight);			\
+		return e->internal >> ilog2(_factor);			\
+	}								\
+	static inline void ewma_##name##_add(struct ewma_##name *e,	\
+					     unsigned long val)		\
+	{								\
+		unsigned long internal = ACCESS_ONCE(e->internal);	\
+		unsigned long weight = ilog2(_weight);			\
+		unsigned long factor = ilog2(_factor);			\
+									\
+		BUILD_BUG_ON(!__builtin_constant_p(_factor));		\
+		BUILD_BUG_ON(!__builtin_constant_p(_weight));		\
+		BUILD_BUG_ON_NOT_POWER_OF_2(_factor);			\
+		BUILD_BUG_ON_NOT_POWER_OF_2(_weight);			\
+									\
+		ACCESS_ONCE(e->internal) = internal ?			\
+			(((internal << weight) - internal) +		\
+				(val << factor)) >> weight :		\
+			(val << factor);				\
+	}
 
 #endif /* _LINUX_AVERAGE_H */
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 14eea94..ed3768f 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -75,5 +75,6 @@
 #define BGPIOF_UNREADABLE_REG_DIR	BIT(2) /* reg_dir is unreadable */
 #define BGPIOF_BIG_ENDIAN_BYTE_ORDER	BIT(3)
 #define BGPIOF_READ_OUTPUT_REG_SET     BIT(4) /* reg_set stores output value */
+#define BGPIOF_NO_OUTPUT		BIT(5) /* only input */
 
 #endif /* __BASIC_MMIO_GPIO_H */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 6cceedf..cf03843 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -640,7 +640,6 @@
 	spinlock_t gpio_lock;
 #ifdef CONFIG_BCMA_DRIVER_GPIO
 	struct gpio_chip gpio;
-	struct irq_domain *irq_domain;
 #endif
 };
 
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5e963a6..b9b6e04 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -187,17 +187,6 @@
 	__BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
 
 /*
- * Check if adding a bio_vec after bprv with offset would create a gap in
- * the SG list. Most drivers don't care about this, but some do.
- */
-static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
-{
-	return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
-}
-
-#define bio_io_error(bio) bio_endio((bio), -EIO)
-
-/*
  * drivers should _never_ use the all version - the bio may have been split
  * before it got to the driver and the driver won't own all of it
  */
@@ -306,6 +295,21 @@
 	atomic_set(&bio->__bi_cnt, count);
 }
 
+static inline bool bio_flagged(struct bio *bio, unsigned int bit)
+{
+	return (bio->bi_flags & (1U << bit)) != 0;
+}
+
+static inline void bio_set_flag(struct bio *bio, unsigned int bit)
+{
+	bio->bi_flags |= (1U << bit);
+}
+
+static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
+{
+	bio->bi_flags &= ~(1U << bit);
+}
+
 enum bip_flags {
 	BIP_BLOCK_INTEGRITY	= 1 << 0, /* block layer owns integrity data */
 	BIP_MAPPED_INTEGRITY	= 1 << 1, /* ref tag has been remapped */
@@ -426,7 +430,14 @@
 
 }
 
-extern void bio_endio(struct bio *, int);
+extern void bio_endio(struct bio *);
+
+static inline void bio_io_error(struct bio *bio)
+{
+	bio->bi_error = -EIO;
+	bio_endio(bio);
+}
+
 struct request_queue;
 extern int bio_phys_segments(struct request_queue *, struct bio *);
 
@@ -440,7 +451,6 @@
 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
 			   unsigned int, unsigned int);
-extern int bio_get_nr_vecs(struct block_device *);
 struct rq_map_data;
 extern struct bio *bio_map_user_iov(struct request_queue *,
 				    const struct iov_iter *, gfp_t);
@@ -717,7 +727,7 @@
 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
 extern bool bio_integrity_enabled(struct bio *bio);
 extern int bio_integrity_prep(struct bio *);
-extern void bio_integrity_endio(struct bio *, int);
+extern void bio_integrity_endio(struct bio *);
 extern void bio_integrity_advance(struct bio *, unsigned int);
 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index ea17cca..9653fdb 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -295,7 +295,7 @@
 	return find_first_zero_bit(src, nbits) == nbits;
 }
 
-static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
+static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
 {
 	if (small_const_nbits(nbits))
 		return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 297f5bd..e635533 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -57,7 +57,7 @@
 	     (bit) < (size);					\
 	     (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
 
-static __inline__ int get_bitmask_order(unsigned int count)
+static inline int get_bitmask_order(unsigned int count)
 {
 	int order;
 
@@ -65,7 +65,7 @@
 	return order;	/* We could be slightly more clever with -1 here... */
 }
 
-static __inline__ int get_count_order(unsigned int count)
+static inline int get_count_order(unsigned int count)
 {
 	int order;
 
@@ -75,7 +75,7 @@
 	return order;
 }
 
-static inline unsigned long hweight_long(unsigned long w)
+static __always_inline unsigned long hweight_long(unsigned long w)
 {
 	return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
 }
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 1b62d76..a4cd164 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -374,7 +374,7 @@
 	 * root_rl in such cases.
 	 */
 	blkg = blkg_lookup_create(blkcg, q);
-	if (unlikely(IS_ERR(blkg)))
+	if (IS_ERR(blkg))
 		goto root_rl;
 
 	blkg_get(blkg);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 7303b34..e813013 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -14,7 +14,7 @@
 struct block_device;
 struct io_context;
 struct cgroup_subsys_state;
-typedef void (bio_end_io_t) (struct bio *, int);
+typedef void (bio_end_io_t) (struct bio *);
 typedef void (bio_destructor_t) (struct bio *);
 
 /*
@@ -46,7 +46,8 @@
 struct bio {
 	struct bio		*bi_next;	/* request queue link */
 	struct block_device	*bi_bdev;
-	unsigned long		bi_flags;	/* status, command, etc */
+	unsigned int		bi_flags;	/* status, command, etc */
+	int			bi_error;
 	unsigned long		bi_rw;		/* bottom bits READ/WRITE,
 						 * top bits priority
 						 */
@@ -111,16 +112,14 @@
 /*
  * bio flags
  */
-#define BIO_UPTODATE	0	/* ok after I/O completion */
 #define BIO_SEG_VALID	1	/* bi_phys_segments valid */
 #define BIO_CLONED	2	/* doesn't own data */
 #define BIO_BOUNCED	3	/* bio is a bounce bio */
 #define BIO_USER_MAPPED 4	/* contains user pages */
 #define BIO_NULL_MAPPED 5	/* contains invalid user pages */
 #define BIO_QUIET	6	/* Make BIO Quiet */
-#define BIO_SNAP_STABLE	7	/* bio data must be snapshotted during write */
-#define BIO_CHAIN	8	/* chained bio, ->bi_remaining in effect */
-#define BIO_REFFED	9	/* bio has elevated ->bi_cnt */
+#define BIO_CHAIN	7	/* chained bio, ->bi_remaining in effect */
+#define BIO_REFFED	8	/* bio has elevated ->bi_cnt */
 
 /*
  * Flags starting here get preserved by bio_reset() - this includes
@@ -129,14 +128,12 @@
 #define BIO_RESET_BITS	13
 #define BIO_OWNS_VEC	13	/* bio_free() should free bvec */
 
-#define bio_flagged(bio, flag)	((bio)->bi_flags & (1 << (flag)))
-
 /*
  * top 4 bits of bio flags indicate the pool this bio came from
  */
 #define BIO_POOL_BITS		(4)
 #define BIO_POOL_NONE		((1UL << BIO_POOL_BITS) - 1)
-#define BIO_POOL_OFFSET		(BITS_PER_LONG - BIO_POOL_BITS)
+#define BIO_POOL_OFFSET		(32 - BIO_POOL_BITS)
 #define BIO_POOL_MASK		(1UL << BIO_POOL_OFFSET)
 #define BIO_POOL_IDX(bio)	((bio)->bi_flags >> BIO_POOL_OFFSET)
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d4068c1..a622f27 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -213,14 +213,6 @@
 typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
 
 struct bio_vec;
-struct bvec_merge_data {
-	struct block_device *bi_bdev;
-	sector_t bi_sector;
-	unsigned bi_size;
-	unsigned long bi_rw;
-};
-typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
-			     struct bio_vec *);
 typedef void (softirq_done_fn)(struct request *);
 typedef int (dma_drain_needed_fn)(struct request *);
 typedef int (lld_busy_fn) (struct request_queue *q);
@@ -258,6 +250,7 @@
 struct queue_limits {
 	unsigned long		bounce_pfn;
 	unsigned long		seg_boundary_mask;
+	unsigned long		virt_boundary_mask;
 
 	unsigned int		max_hw_sectors;
 	unsigned int		chunk_sectors;
@@ -268,6 +261,7 @@
 	unsigned int		io_min;
 	unsigned int		io_opt;
 	unsigned int		max_discard_sectors;
+	unsigned int		max_hw_discard_sectors;
 	unsigned int		max_write_same_sectors;
 	unsigned int		discard_granularity;
 	unsigned int		discard_alignment;
@@ -305,7 +299,6 @@
 	make_request_fn		*make_request_fn;
 	prep_rq_fn		*prep_rq_fn;
 	unprep_rq_fn		*unprep_rq_fn;
-	merge_bvec_fn		*merge_bvec_fn;
 	softirq_done_fn		*softirq_done_fn;
 	rq_timed_out_fn		*rq_timed_out_fn;
 	dma_drain_needed_fn	*dma_drain_needed;
@@ -462,6 +455,7 @@
 
 	struct blk_mq_tag_set	*tag_set;
 	struct list_head	tag_set_list;
+	struct bio_set		*bio_split;
 };
 
 #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
@@ -486,7 +480,6 @@
 #define QUEUE_FLAG_DEAD        19	/* queue tear-down finished */
 #define QUEUE_FLAG_INIT_DONE   20	/* queue is initialized */
 #define QUEUE_FLAG_NO_SG_MERGE 21	/* don't attempt to merge SG segments*/
-#define QUEUE_FLAG_SG_GAPS     22	/* queue doesn't support SG gaps */
 
 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
@@ -782,6 +775,8 @@
 extern int blk_insert_cloned_request(struct request_queue *q,
 				     struct request *rq);
 extern void blk_delay_queue(struct request_queue *, unsigned long);
+extern void blk_queue_split(struct request_queue *, struct bio **,
+			    struct bio_set *);
 extern void blk_recount_segments(struct request_queue *, struct bio *);
 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
@@ -986,9 +981,9 @@
 			       void *buf, unsigned int size);
 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
+extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
-extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
 extern void blk_queue_dma_alignment(struct request_queue *, int);
 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
@@ -1138,6 +1133,7 @@
 enum blk_default_limits {
 	BLK_MAX_SEGMENTS	= 128,
 	BLK_SAFE_MAX_SECTORS	= 255,
+	BLK_DEF_MAX_SECTORS	= 2560,
 	BLK_MAX_SEGMENT_SIZE	= 65536,
 	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL,
 };
@@ -1154,6 +1150,11 @@
 	return q->limits.seg_boundary_mask;
 }
 
+static inline unsigned long queue_virt_boundary(struct request_queue *q)
+{
+	return q->limits.virt_boundary_mask;
+}
+
 static inline unsigned int queue_max_sectors(struct request_queue *q)
 {
 	return q->limits.max_sectors;
@@ -1354,6 +1355,19 @@
 	page_cache_release(p.v);
 }
 
+/*
+ * Check if adding a bio_vec after bprv with offset would create a gap in
+ * the SG list. Most drivers don't care about this, but some do.
+ */
+static inline bool bvec_gap_to_prev(struct request_queue *q,
+				struct bio_vec *bprv, unsigned int offset)
+{
+	if (!queue_virt_boundary(q))
+		return false;
+	return offset ||
+		((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+}
+
 struct work_struct;
 int kblockd_schedule_work(struct work_struct *work);
 int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 4383476..f57d7fe 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -10,6 +10,7 @@
 #include <uapi/linux/bpf.h>
 #include <linux/workqueue.h>
 #include <linux/file.h>
+#include <linux/perf_event.h>
 
 struct bpf_map;
 
@@ -24,6 +25,10 @@
 	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
 	int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
 	int (*map_delete_elem)(struct bpf_map *map, void *key);
+
+	/* funcs called by prog_array and perf_event_array map */
+	void *(*map_fd_get_ptr) (struct bpf_map *map, int fd);
+	void (*map_fd_put_ptr) (void *ptr);
 };
 
 struct bpf_map {
@@ -142,13 +147,13 @@
 	bool owner_jited;
 	union {
 		char value[0] __aligned(8);
-		struct bpf_prog *prog[0] __aligned(8);
+		void *ptrs[0] __aligned(8);
 	};
 };
 #define MAX_TAIL_CALL_CNT 32
 
 u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
-void bpf_prog_array_map_clear(struct bpf_map *map);
+void bpf_fd_array_map_clear(struct bpf_map *map);
 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
 
@@ -185,6 +190,7 @@
 extern const struct bpf_func_proto bpf_map_update_elem_proto;
 extern const struct bpf_func_proto bpf_map_delete_elem_proto;
 
+extern const struct bpf_func_proto bpf_perf_event_read_proto;
 extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
 extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
 extern const struct bpf_func_proto bpf_tail_call_proto;
@@ -192,5 +198,7 @@
 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
 extern const struct bpf_func_proto bpf_get_current_comm_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
 
 #endif /* _LINUX_BPF_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 93755a6..4d8fcf2 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -34,12 +34,17 @@
 
 /* define the enumeration of all cgroup subsystems */
 #define SUBSYS(_x) _x ## _cgrp_id,
+#define SUBSYS_TAG(_t) CGROUP_ ## _t, \
+	__unused_tag_ ## _t = CGROUP_ ## _t - 1,
 enum cgroup_subsys_id {
 #include <linux/cgroup_subsys.h>
 	CGROUP_SUBSYS_COUNT,
 };
+#undef SUBSYS_TAG
 #undef SUBSYS
 
+#define CGROUP_CANFORK_COUNT (CGROUP_CANFORK_END - CGROUP_CANFORK_START)
+
 /* bits in struct cgroup_subsys_state flags field */
 enum {
 	CSS_NO_REF	= (1 << 0), /* no reference counting for this css */
@@ -318,7 +323,7 @@
 	 * end of cftype array.
 	 */
 	char name[MAX_CFTYPE_NAME];
-	int private;
+	unsigned long private;
 	/*
 	 * If not 0, file mode is set to this value, otherwise it will
 	 * be figured out automatically
@@ -406,7 +411,9 @@
 			      struct cgroup_taskset *tset);
 	void (*attach)(struct cgroup_subsys_state *css,
 		       struct cgroup_taskset *tset);
-	void (*fork)(struct task_struct *task);
+	int (*can_fork)(struct task_struct *task, void **priv_p);
+	void (*cancel_fork)(struct task_struct *task, void *priv);
+	void (*fork)(struct task_struct *task, void *priv);
 	void (*exit)(struct cgroup_subsys_state *css,
 		     struct cgroup_subsys_state *old_css,
 		     struct task_struct *task);
@@ -434,6 +441,9 @@
 	int id;
 	const char *name;
 
+	/* optional, initialized automatically during boot if not set */
+	const char *legacy_name;
+
 	/* link to parent, protected by cgroup_lock() */
 	struct cgroup_root *root;
 
@@ -491,6 +501,7 @@
 
 #else	/* CONFIG_CGROUPS */
 
+#define CGROUP_CANFORK_COUNT 0
 #define CGROUP_SUBSYS_COUNT 0
 
 static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) {}
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index a593e29..eb7ca55 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -22,6 +22,15 @@
 
 #ifdef CONFIG_CGROUPS
 
+/*
+ * All weight knobs on the default hierarhcy should use the following min,
+ * default and max values.  The default value is the logarithmic center of
+ * MIN and MAX and allows 100x to be expressed in both directions.
+ */
+#define CGROUP_WEIGHT_MIN		1
+#define CGROUP_WEIGHT_DFL		100
+#define CGROUP_WEIGHT_MAX		10000
+
 /* a css_task_iter should be treated as an opaque object */
 struct css_task_iter {
 	struct cgroup_subsys		*ss;
@@ -62,7 +71,12 @@
 		     struct pid *pid, struct task_struct *tsk);
 
 void cgroup_fork(struct task_struct *p);
-void cgroup_post_fork(struct task_struct *p);
+extern int cgroup_can_fork(struct task_struct *p,
+			   void *ss_priv[CGROUP_CANFORK_COUNT]);
+extern void cgroup_cancel_fork(struct task_struct *p,
+			       void *ss_priv[CGROUP_CANFORK_COUNT]);
+extern void cgroup_post_fork(struct task_struct *p,
+			     void *old_ss_priv[CGROUP_CANFORK_COUNT]);
 void cgroup_exit(struct task_struct *p);
 
 int cgroup_init_early(void);
@@ -524,7 +538,13 @@
 				    struct dentry *dentry) { return -EINVAL; }
 
 static inline void cgroup_fork(struct task_struct *p) {}
-static inline void cgroup_post_fork(struct task_struct *p) {}
+static inline int cgroup_can_fork(struct task_struct *p,
+				  void *ss_priv[CGROUP_CANFORK_COUNT])
+{ return 0; }
+static inline void cgroup_cancel_fork(struct task_struct *p,
+				      void *ss_priv[CGROUP_CANFORK_COUNT]) {}
+static inline void cgroup_post_fork(struct task_struct *p,
+				    void *ss_priv[CGROUP_CANFORK_COUNT]) {}
 static inline void cgroup_exit(struct task_struct *p) {}
 
 static inline int cgroup_init_early(void) { return 0; }
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index e4a96fb..1f36945 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -3,6 +3,17 @@
  *
  * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
  */
+
+/*
+ * This file *must* be included with SUBSYS() defined.
+ * SUBSYS_TAG() is a noop if undefined.
+ */
+
+#ifndef SUBSYS_TAG
+#define __TMP_SUBSYS_TAG
+#define SUBSYS_TAG(_x)
+#endif
+
 #if IS_ENABLED(CONFIG_CPUSETS)
 SUBSYS(cpuset)
 #endif
@@ -48,11 +59,28 @@
 #endif
 
 /*
+ * Subsystems that implement the can_fork() family of callbacks.
+ */
+SUBSYS_TAG(CANFORK_START)
+
+#if IS_ENABLED(CONFIG_CGROUP_PIDS)
+SUBSYS(pids)
+#endif
+
+SUBSYS_TAG(CANFORK_END)
+
+/*
  * The following subsystems are not supported on the default hierarchy.
  */
 #if IS_ENABLED(CONFIG_CGROUP_DEBUG)
 SUBSYS(debug)
 #endif
+
+#ifdef __TMP_SUBSYS_TAG
+#undef __TMP_SUBSYS_TAG
+#undef SUBSYS_TAG
+#endif
+
 /*
  * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
  */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 78842f4..3ecc07d 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -11,7 +11,6 @@
 #ifndef __LINUX_CLK_PROVIDER_H
 #define __LINUX_CLK_PROVIDER_H
 
-#include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/of.h>
 
@@ -33,11 +32,34 @@
 #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
 #define CLK_RECALC_NEW_RATES	BIT(9) /* recalc rates after notifications */
 
+struct clk;
 struct clk_hw;
 struct clk_core;
 struct dentry;
 
 /**
+ * struct clk_rate_request - Structure encoding the clk constraints that
+ * a clock user might require.
+ *
+ * @rate:		Requested clock rate. This field will be adjusted by
+ *			clock drivers according to hardware capabilities.
+ * @min_rate:		Minimum rate imposed by clk users.
+ * @max_rate:		Maximum rate a imposed by clk users.
+ * @best_parent_rate:	The best parent rate a parent can provide to fulfill the
+ *			requested constraints.
+ * @best_parent_hw:	The most appropriate parent clock that fulfills the
+ *			requested constraints.
+ *
+ */
+struct clk_rate_request {
+	unsigned long rate;
+	unsigned long min_rate;
+	unsigned long max_rate;
+	unsigned long best_parent_rate;
+	struct clk_hw *best_parent_hw;
+};
+
+/**
  * struct clk_ops -  Callback operations for hardware clocks; these are to
  * be provided by the clock implementation, and will be called by drivers
  * through the clk_* api.
@@ -176,12 +198,8 @@
 					unsigned long parent_rate);
 	long		(*round_rate)(struct clk_hw *hw, unsigned long rate,
 					unsigned long *parent_rate);
-	long		(*determine_rate)(struct clk_hw *hw,
-					  unsigned long rate,
-					  unsigned long min_rate,
-					  unsigned long max_rate,
-					  unsigned long *best_parent_rate,
-					  struct clk_hw **best_parent_hw);
+	int		(*determine_rate)(struct clk_hw *hw,
+					  struct clk_rate_request *req);
 	int		(*set_parent)(struct clk_hw *hw, u8 index);
 	u8		(*get_parent)(struct clk_hw *hw);
 	int		(*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -343,6 +361,9 @@
  *	to the closest integer instead of the up one.
  * CLK_DIVIDER_READ_ONLY - The divider settings are preconfigured and should
  *	not be changed by the clock framework.
+ * CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED
+ *	except when the value read from the register is zero, the divisor is
+ *	2^width of the field.
  */
 struct clk_divider {
 	struct clk_hw	hw;
@@ -360,6 +381,7 @@
 #define CLK_DIVIDER_HIWORD_MASK		BIT(3)
 #define CLK_DIVIDER_ROUND_CLOSEST	BIT(4)
 #define CLK_DIVIDER_READ_ONLY		BIT(5)
+#define CLK_DIVIDER_MAX_AT_ZERO		BIT(6)
 
 extern const struct clk_ops clk_divider_ops;
 
@@ -550,6 +572,23 @@
 void of_gpio_clk_gate_setup(struct device_node *node);
 
 /**
+ * struct clk_gpio_mux - gpio controlled clock multiplexer
+ *
+ * @hw:		see struct clk_gpio
+ * @gpiod:	gpio descriptor to select the parent of this clock multiplexer
+ *
+ * Clock with a gpio control for selecting the parent clock.
+ * Implements .get_parent, .set_parent and .determine_rate
+ */
+
+extern const struct clk_ops clk_gpio_mux_ops;
+struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
+		const char * const *parent_names, u8 num_parents, unsigned gpio,
+		bool active_low, unsigned long flags);
+
+void of_gpio_mux_clk_setup(struct device_node *node);
+
+/**
  * clk_register - allocate a new clock, register it and return an opaque cookie
  * @dev: device that is registering this clock
  * @hw: link to hardware-specific clock data
@@ -568,31 +607,27 @@
 
 /* helper functions */
 const char *__clk_get_name(struct clk *clk);
+const char *clk_hw_get_name(const struct clk_hw *hw);
 struct clk_hw *__clk_get_hw(struct clk *clk);
-u8 __clk_get_num_parents(struct clk *clk);
-struct clk *__clk_get_parent(struct clk *clk);
-struct clk *clk_get_parent_by_index(struct clk *clk, u8 index);
+unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
+struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
+struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
+					  unsigned int index);
 unsigned int __clk_get_enable_count(struct clk *clk);
-unsigned long __clk_get_rate(struct clk *clk);
+unsigned long clk_hw_get_rate(const struct clk_hw *hw);
 unsigned long __clk_get_flags(struct clk *clk);
-bool __clk_is_prepared(struct clk *clk);
+unsigned long clk_hw_get_flags(const struct clk_hw *hw);
+bool clk_hw_is_prepared(const struct clk_hw *hw);
 bool __clk_is_enabled(struct clk *clk);
 struct clk *__clk_lookup(const char *name);
-long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
-			      unsigned long min_rate,
-			      unsigned long max_rate,
-			      unsigned long *best_parent_rate,
-			      struct clk_hw **best_parent_p);
-unsigned long __clk_determine_rate(struct clk_hw *core,
-				   unsigned long rate,
-				   unsigned long min_rate,
-				   unsigned long max_rate);
-long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
-			      unsigned long min_rate,
-			      unsigned long max_rate,
-			      unsigned long *best_parent_rate,
-			      struct clk_hw **best_parent_p);
+int __clk_mux_determine_rate(struct clk_hw *hw,
+			     struct clk_rate_request *req);
+int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
+int __clk_mux_determine_rate_closest(struct clk_hw *hw,
+				     struct clk_rate_request *req);
 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
+void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
+			   unsigned long max_rate);
 
 static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
 {
@@ -603,7 +638,7 @@
 /*
  * FIXME clock api without lock protection
  */
-unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
+unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
 
 struct of_device_id;
 
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
index f3050e1..e0c3623 100644
--- a/include/linux/clk/clk-conf.h
+++ b/include/linux/clk/clk-conf.h
@@ -7,6 +7,8 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/types.h>
+
 struct device_node;
 
 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
diff --git a/include/linux/clk/shmobile.h b/include/linux/clk/shmobile.h
index 63a8159..cb19cc1 100644
--- a/include/linux/clk/shmobile.h
+++ b/include/linux/clk/shmobile.h
@@ -16,8 +16,20 @@
 
 #include <linux/types.h>
 
+struct device;
+struct device_node;
+struct generic_pm_domain;
+
 void r8a7778_clocks_init(u32 mode);
 void r8a7779_clocks_init(u32 mode);
 void rcar_gen2_clocks_init(u32 mode);
 
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+void cpg_mstp_add_clk_domain(struct device_node *np);
+int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev);
+void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev);
+#else
+static inline void cpg_mstp_add_clk_domain(struct device_node *np) {}
+#endif
+
 #endif
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index 19c4208..57bf7aa 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -17,7 +17,8 @@
 #ifndef __LINUX_CLK_TEGRA_H_
 #define __LINUX_CLK_TEGRA_H_
 
-#include <linux/clk.h>
+#include <linux/types.h>
+#include <linux/bug.h>
 
 /*
  * Tegra CPU clock and reset control ops
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 79b76e1..223be69 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -188,33 +188,6 @@
 /* DPLL Type and DCO Selection Flags */
 #define DPLL_J_TYPE		0x1
 
-/* Composite clock component types */
-enum {
-	CLK_COMPONENT_TYPE_GATE = 0,
-	CLK_COMPONENT_TYPE_DIVIDER,
-	CLK_COMPONENT_TYPE_MUX,
-	CLK_COMPONENT_TYPE_MAX,
-};
-
-/**
- * struct ti_dt_clk - OMAP DT clock alias declarations
- * @lk: clock lookup definition
- * @node_name: clock DT node to map to
- */
-struct ti_dt_clk {
-	struct clk_lookup		lk;
-	char				*node_name;
-};
-
-#define DT_CLK(dev, con, name)		\
-	{				\
-		.lk = {			\
-			.dev_id = dev,	\
-			.con_id = con,	\
-		},			\
-		.node_name = name,	\
-	}
-
 /* Static memmap indices */
 enum {
 	TI_CLKM_CM = 0,
@@ -225,8 +198,6 @@
 	CLK_MAX_MEMMAPS
 };
 
-typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
-
 /**
  * struct clk_omap_reg - OMAP register declaration
  * @offset: offset from the master IP module base address
@@ -238,98 +209,62 @@
 };
 
 /**
- * struct ti_clk_ll_ops - low-level register access ops for a clock
+ * struct ti_clk_ll_ops - low-level ops for clocks
  * @clk_readl: pointer to register read function
  * @clk_writel: pointer to register write function
+ * @clkdm_clk_enable: pointer to clockdomain enable function
+ * @clkdm_clk_disable: pointer to clockdomain disable function
+ * @cm_wait_module_ready: pointer to CM module wait ready function
+ * @cm_split_idlest_reg: pointer to CM module function to split idlest reg
  *
- * Low-level register access ops are generally used by the basic clock types
- * (clk-gate, clk-mux, clk-divider etc.) to provide support for various
- * low-level hardware interfaces (direct MMIO, regmap etc.), but can also be
- * used by other hardware-specific clock drivers if needed.
+ * Low-level ops are generally used by the basic clock types (clk-gate,
+ * clk-mux, clk-divider etc.) to provide support for various low-level
+ * hadrware interfaces (direct MMIO, regmap etc.), and is initialized
+ * by board code. Low-level ops also contain some other platform specific
+ * operations not provided directly by clock drivers.
  */
 struct ti_clk_ll_ops {
 	u32	(*clk_readl)(void __iomem *reg);
 	void	(*clk_writel)(u32 val, void __iomem *reg);
+	int	(*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk);
+	int	(*clkdm_clk_disable)(struct clockdomain *clkdm,
+				     struct clk *clk);
+	int	(*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg,
+					u8 idlest_shift);
+	int	(*cm_split_idlest_reg)(void __iomem *idlest_reg, s16 *prcm_inst,
+				       u8 *idlest_reg_id);
 };
 
-extern struct ti_clk_ll_ops *ti_clk_ll_ops;
-
-extern const struct clk_ops ti_clk_divider_ops;
-extern const struct clk_ops ti_clk_mux_ops;
-
 #define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
 
-void omap2_init_clk_hw_omap_clocks(struct clk *clk);
-int omap3_noncore_dpll_enable(struct clk_hw *hw);
-void omap3_noncore_dpll_disable(struct clk_hw *hw);
-int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
-int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
-				unsigned long parent_rate);
-int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
-					   unsigned long rate,
-					   unsigned long parent_rate,
-					   u8 index);
-long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
-				       unsigned long rate,
-				       unsigned long min_rate,
-				       unsigned long max_rate,
-				       unsigned long *best_parent_rate,
-				       struct clk_hw **best_parent_clk);
-unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
-					 unsigned long parent_rate);
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
-				    unsigned long target_rate,
-				    unsigned long *parent_rate);
-long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
-					unsigned long rate,
-					unsigned long min_rate,
-					unsigned long max_rate,
-					unsigned long *best_parent_rate,
-					struct clk_hw **best_parent_clk);
-u8 omap2_init_dpll_parent(struct clk_hw *hw);
-unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
-			   unsigned long *parent_rate);
 void omap2_init_clk_clkdm(struct clk_hw *clk);
-unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
-				    unsigned long parent_rate);
-int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
-					unsigned long parent_rate);
-long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
-		unsigned long *prate);
-int omap2_clkops_enable_clkdm(struct clk_hw *hw);
-void omap2_clkops_disable_clkdm(struct clk_hw *hw);
 int omap2_clk_disable_autoidle_all(void);
-void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
-int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
-			 unsigned long parent_rate);
-int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
-				    unsigned long parent_rate, u8 index);
-int omap2_dflt_clk_enable(struct clk_hw *hw);
-void omap2_dflt_clk_disable(struct clk_hw *hw);
-int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
-void omap3_clk_lock_dpll5(void);
+int omap2_clk_enable_autoidle_all(void);
+int omap2_clk_allow_idle(struct clk *clk);
+int omap2_clk_deny_idle(struct clk *clk);
 unsigned long omap2_dpllcore_recalc(struct clk_hw *hw,
 				    unsigned long parent_rate);
 int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate,
 			     unsigned long parent_rate);
 void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw);
 void omap2xxx_clkt_vps_init(void);
+unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk);
 
-void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
-void ti_dt_clocks_register(struct ti_dt_clk *oclks);
-void ti_dt_clk_init_provider(struct device_node *np, int index);
 void ti_dt_clk_init_retry_clks(void);
 void ti_dt_clockdomains_setup(void);
-int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
-		      ti_of_clk_init_cb_t func);
-int of_ti_clk_autoidle_setup(struct device_node *node);
-int ti_clk_add_component(struct device_node *node, struct clk_hw *hw, int type);
+int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops);
+
+struct regmap;
+
+int omap2_clk_provider_init(struct device_node *parent, int index,
+			    struct regmap *syscon, void __iomem *mem);
+void omap2_clk_legacy_provider_init(int index, void __iomem *mem);
 
 int omap3430_dt_clk_init(void);
 int omap3630_dt_clk_init(void);
 int am35xx_dt_clk_init(void);
-int ti81xx_dt_clk_init(void);
+int dm814x_dt_clk_init(void);
+int dm816x_dt_clk_init(void);
 int omap4xxx_dt_clk_init(void);
 int omap5xxx_dt_clk_init(void);
 int dra7xx_dt_clk_init(void);
@@ -338,27 +273,24 @@
 int omap2420_dt_clk_init(void);
 int omap2430_dt_clk_init(void);
 
-#ifdef CONFIG_OF
-void of_ti_clk_allow_autoidle_all(void);
-void of_ti_clk_deny_autoidle_all(void);
-#else
-static inline void of_ti_clk_allow_autoidle_all(void) { }
-static inline void of_ti_clk_deny_autoidle_all(void) { }
-#endif
+struct ti_clk_features {
+	u32 flags;
+	long fint_min;
+	long fint_max;
+	long fint_band1_max;
+	long fint_band2_min;
+	u8 dpll_bypass_vals;
+	u8 cm_idlest_val;
+};
+
+#define TI_CLK_DPLL_HAS_FREQSEL			BIT(0)
+#define TI_CLK_DPLL4_DENY_REPROGRAM		BIT(1)
+#define TI_CLK_DISABLE_CLKDM_CONTROL		BIT(2)
+
+void ti_clk_setup_features(struct ti_clk_features *features);
+const struct ti_clk_features *ti_clk_get_features(void);
 
 extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll;
-extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3_dpll;
-extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx;
-extern const struct clk_hw_omap_ops clkhwops_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_dss_usbhost_wait;
-extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait;
-extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_wait;
-extern const struct clk_hw_omap_ops clkhwops_iclk;
-extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
-extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
 
 #ifdef CONFIG_ATAGS
 int omap3430_clk_legacy_init(void);
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 597a1e8..31ce435 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -234,13 +234,10 @@
 static inline void tick_setup_hrtimer_broadcast(void) { }
 # endif
 
-extern int clockevents_notify(unsigned long reason, void *arg);
-
 #else /* !CONFIG_GENERIC_CLOCKEVENTS: */
 
 static inline void clockevents_suspend(void) { }
 static inline void clockevents_resume(void) { }
-static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
 static inline int tick_check_broadcast_expired(void) { return 0; }
 static inline void tick_setup_hrtimer_broadcast(void) { }
 
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index e08a6ae..c836eb2 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -252,7 +252,12 @@
 	({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
 
 #define WRITE_ONCE(x, val) \
-	({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+({							\
+	union { typeof(x) __val; char __c[1]; } __u =	\
+		{ .__val = (__force typeof(x)) (val) }; \
+	__write_once_size(&(x), __u.__c, sizeof(x));	\
+	__u.__val;					\
+})
 
 /**
  * READ_ONCE_CTRL - Read a value heading a control dependency
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index b96bd29..008fc67 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -49,13 +49,28 @@
 	}
 }
 
+
+/**
+ * ct_state() - return the current context tracking state if known
+ *
+ * Returns the current cpu's context tracking state if context tracking
+ * is enabled.  If context tracking is disabled, returns
+ * CONTEXT_DISABLED.  This should be used primarily for debugging.
+ */
+static inline enum ctx_state ct_state(void)
+{
+	return context_tracking_is_enabled() ?
+		this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
+}
 #else
 static inline void user_enter(void) { }
 static inline void user_exit(void) { }
 static inline enum ctx_state exception_enter(void) { return 0; }
 static inline void exception_exit(enum ctx_state prev_ctx) { }
+static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
 #endif /* !CONFIG_CONTEXT_TRACKING */
 
+#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond))
 
 #ifdef CONFIG_CONTEXT_TRACKING_FORCE
 extern void context_tracking_init(void);
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index 678ecdf..ee956c5 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -14,6 +14,7 @@
 	bool active;
 	int recursion;
 	enum ctx_state {
+		CONTEXT_DISABLED = -1,	/* returned by ct_state() if unknown */
 		CONTEXT_KERNEL = 0,
 		CONTEXT_USER,
 		CONTEXT_GUEST,
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 3486b90..c69e1b9 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -14,6 +14,7 @@
 #define _LINUX_CORESIGHT_H
 
 #include <linux/device.h>
+#include <linux/sched.h>
 
 /* Peripheral id registers (0xFD0-0xFEC) */
 #define CORESIGHT_PERIPHIDR4	0xfd0
@@ -248,4 +249,24 @@
 	struct device *dev, struct device_node *node) { return NULL; }
 #endif
 
+#ifdef CONFIG_PID_NS
+static inline unsigned long
+coresight_vpid_to_pid(unsigned long vpid)
+{
+	struct task_struct *task = NULL;
+	unsigned long pid = 0;
+
+	rcu_read_lock();
+	task = find_task_by_vpid(vpid);
+	if (task)
+		pid = task_pid_nr(task);
+	rcu_read_unlock();
+
+	return pid;
+}
+#else
+static inline unsigned long
+coresight_vpid_to_pid(unsigned long vpid) { return vpid; }
+#endif
+
 #endif
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index c4d4eb8..986c06c 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -11,6 +11,7 @@
 
 #ifdef CONFIG_GENERIC_CPU_AUTOPROBE
 
+#include <linux/init.h>
 #include <linux/mod_devicetable.h>
 #include <asm/cpufeature.h>
 
@@ -43,16 +44,16 @@
  * For a list of legal values for 'feature', please consult the file
  * 'asm/cpufeature.h' of your favorite architecture.
  */
-#define module_cpu_feature_match(x, __init)			\
+#define module_cpu_feature_match(x, __initfunc)			\
 static struct cpu_feature const cpu_feature_match_ ## x[] =	\
 	{ { .feature = cpu_feature(x) }, { } };			\
 MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x);		\
 								\
-static int cpu_feature_match_ ## x ## _init(void)		\
+static int __init cpu_feature_match_ ## x ## _init(void)	\
 {								\
 	if (!cpu_have_feature(cpu_feature(x)))			\
 		return -ENODEV;					\
-	return __init();					\
+	return __initfunc();					\
 }								\
 module_init(cpu_feature_match_ ## x ## _init)
 
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index bde1e56..430efcb 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -51,11 +51,9 @@
 	unsigned int		transition_latency;
 };
 
-struct cpufreq_real_policy {
+struct cpufreq_user_policy {
 	unsigned int		min;    /* in kHz */
 	unsigned int		max;    /* in kHz */
-	unsigned int		policy; /* see above */
-	struct cpufreq_governor	*governor; /* see below */
 };
 
 struct cpufreq_policy {
@@ -88,7 +86,7 @@
 	struct work_struct	update; /* if update_policy() needs to be
 					 * called, but you're in IRQ context */
 
-	struct cpufreq_real_policy	user_policy;
+	struct cpufreq_user_policy user_policy;
 	struct cpufreq_frequency_table	*freq_table;
 
 	struct list_head        policy_list;
@@ -369,11 +367,10 @@
 
 /* Policy Notifiers  */
 #define CPUFREQ_ADJUST			(0)
-#define CPUFREQ_INCOMPATIBLE		(1)
-#define CPUFREQ_NOTIFY			(2)
-#define CPUFREQ_START			(3)
-#define CPUFREQ_CREATE_POLICY		(4)
-#define CPUFREQ_REMOVE_POLICY		(5)
+#define CPUFREQ_NOTIFY			(1)
+#define CPUFREQ_START			(2)
+#define CPUFREQ_CREATE_POLICY		(3)
+#define CPUFREQ_REMOVE_POLICY		(4)
 
 #ifdef CONFIG_CPU_FREQ
 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
@@ -578,6 +575,8 @@
 int cpufreq_boost_trigger_state(int state);
 int cpufreq_boost_supported(void);
 int cpufreq_boost_enabled(void);
+int cpufreq_enable_boost_support(void);
+bool policy_has_boost_freq(struct cpufreq_policy *policy);
 #else
 static inline int cpufreq_boost_trigger_state(int state)
 {
@@ -591,12 +590,23 @@
 {
 	return 0;
 }
+
+static inline int cpufreq_enable_boost_support(void)
+{
+	return -EINVAL;
+}
+
+static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
+{
+	return false;
+}
 #endif
 /* the following funtion is for cpufreq core use only */
 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
 
 /* the following are really really optional */
 extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
+extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs;
 extern struct freq_attr *cpufreq_generic_attr[];
 int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
 				      struct cpufreq_frequency_table *table);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index d075d34..786ad32 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -84,7 +84,6 @@
 	struct list_head 	device_list;
 
 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
-	int			safe_state_index;
 	cpumask_t		coupled_cpus;
 	struct cpuidle_coupled	*coupled;
 #endif
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 81ef938..e71cb70 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -102,12 +102,6 @@
 #define CRYPTO_ALG_INTERNAL		0x00002000
 
 /*
- * Temporary flag used to prevent legacy AEAD implementations from
- * being used by user-space.
- */
-#define CRYPTO_ALG_AEAD_NEW		0x00004000
-
-/*
  * Transform masks and values (for crt_flags).
  */
 #define CRYPTO_TFM_REQ_MASK		0x000fff00
@@ -142,13 +136,10 @@
 struct scatterlist;
 struct crypto_ablkcipher;
 struct crypto_async_request;
-struct crypto_aead;
 struct crypto_blkcipher;
 struct crypto_hash;
 struct crypto_tfm;
 struct crypto_type;
-struct aead_request;
-struct aead_givcrypt_request;
 struct skcipher_givcrypt_request;
 
 typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
@@ -275,47 +266,6 @@
 };
 
 /**
- * struct old_aead_alg - AEAD cipher definition
- * @maxauthsize: Set the maximum authentication tag size supported by the
- *		 transformation. A transformation may support smaller tag sizes.
- *		 As the authentication tag is a message digest to ensure the
- *		 integrity of the encrypted data, a consumer typically wants the
- *		 largest authentication tag possible as defined by this
- *		 variable.
- * @setauthsize: Set authentication size for the AEAD transformation. This
- *		 function is used to specify the consumer requested size of the
- * 		 authentication tag to be either generated by the transformation
- *		 during encryption or the size of the authentication tag to be
- *		 supplied during the decryption operation. This function is also
- *		 responsible for checking the authentication tag size for
- *		 validity.
- * @setkey: see struct ablkcipher_alg
- * @encrypt: see struct ablkcipher_alg
- * @decrypt: see struct ablkcipher_alg
- * @givencrypt: see struct ablkcipher_alg
- * @givdecrypt: see struct ablkcipher_alg
- * @geniv: see struct ablkcipher_alg
- * @ivsize: see struct ablkcipher_alg
- *
- * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
- * mandatory and must be filled.
- */
-struct old_aead_alg {
-	int (*setkey)(struct crypto_aead *tfm, const u8 *key,
-	              unsigned int keylen);
-	int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
-	int (*encrypt)(struct aead_request *req);
-	int (*decrypt)(struct aead_request *req);
-	int (*givencrypt)(struct aead_givcrypt_request *req);
-	int (*givdecrypt)(struct aead_givcrypt_request *req);
-
-	const char *geniv;
-
-	unsigned int ivsize;
-	unsigned int maxauthsize;
-};
-
-/**
  * struct blkcipher_alg - synchronous block cipher definition
  * @min_keysize: see struct ablkcipher_alg
  * @max_keysize: see struct ablkcipher_alg
@@ -409,7 +359,6 @@
 
 
 #define cra_ablkcipher	cra_u.ablkcipher
-#define cra_aead	cra_u.aead
 #define cra_blkcipher	cra_u.blkcipher
 #define cra_cipher	cra_u.cipher
 #define cra_compress	cra_u.compress
@@ -460,7 +409,7 @@
  *	      struct crypto_type, which implements callbacks common for all
  *	      transformation types. There are multiple options:
  *	      &crypto_blkcipher_type, &crypto_ablkcipher_type,
- *	      &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
+ *	      &crypto_ahash_type, &crypto_rng_type.
  *	      This field might be empty. In that case, there are no common
  *	      callbacks. This is the case for: cipher, compress, shash.
  * @cra_u: Callbacks implementing the transformation. This is a union of
@@ -508,7 +457,6 @@
 
 	union {
 		struct ablkcipher_alg ablkcipher;
-		struct old_aead_alg aead;
 		struct blkcipher_alg blkcipher;
 		struct cipher_alg cipher;
 		struct compress_alg compress;
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 51cc1de..76d23fa 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -82,9 +82,6 @@
 typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
 			    unsigned long arg);
 
-typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
-			    struct bio_vec *biovec, int max_size);
-
 /*
  * These iteration functions are typically used to check (and combine)
  * properties of underlying devices.
@@ -160,7 +157,6 @@
 	dm_status_fn status;
 	dm_message_fn message;
 	dm_ioctl_fn ioctl;
-	dm_merge_fn merge;
 	dm_busy_fn busy;
 	dm_iterate_devices_fn iterate_devices;
 	dm_io_hints_fn io_hints;
diff --git a/include/linux/device.h b/include/linux/device.h
index a2b4ea7..5d7bc63 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -341,7 +341,7 @@
 	struct bus_type *subsys;
 	struct list_head node;
 	int (*add_dev)(struct device *dev, struct subsys_interface *sif);
-	int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
+	void (*remove_dev)(struct device *dev, struct subsys_interface *sif);
 };
 
 int subsys_interface_register(struct subsys_interface *sif);
@@ -714,6 +714,8 @@
  * 		along with subsystem-level and driver-level callbacks.
  * @pins:	For device pin management.
  *		See Documentation/pinctrl.txt for details.
+ * @msi_list:	Hosts MSI descriptors
+ * @msi_domain: The generic MSI domain this device is using.
  * @numa_node:	NUMA node this device is close to.
  * @dma_mask:	Dma mask (if dma'ble device).
  * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
@@ -774,9 +776,15 @@
 	struct dev_pm_info	power;
 	struct dev_pm_domain	*pm_domain;
 
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+	struct irq_domain	*msi_domain;
+#endif
 #ifdef CONFIG_PINCTRL
 	struct dev_pin_info	*pins;
 #endif
+#ifdef CONFIG_GENERIC_MSI_IRQ
+	struct list_head	msi_list;
+#endif
 
 #ifdef CONFIG_NUMA
 	int		numa_node;	/* NUMA node this device is close to */
@@ -861,6 +869,22 @@
 }
 #endif
 
+static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+	return dev->msi_domain;
+#else
+	return NULL;
+#endif
+}
+
+static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
+{
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+	dev->msi_domain = d;
+#endif
+}
+
 static inline void *dev_get_drvdata(const struct device *dev)
 {
 	return dev->driver_data;
@@ -959,6 +983,8 @@
 extern void device_del(struct device *dev);
 extern int device_for_each_child(struct device *dev, void *data,
 		     int (*fn)(struct device *dev, void *data));
+extern int device_for_each_child_reverse(struct device *dev, void *data,
+		     int (*fn)(struct device *dev, void *data));
 extern struct device *device_find_child(struct device *dev, void *data,
 				int (*match)(struct device *dev, void *data));
 extern int device_rename(struct device *dev, const char *new_name);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index e2f5eb4..7ea9184 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -66,6 +66,7 @@
 	DMA_XOR_VAL,
 	DMA_PQ_VAL,
 	DMA_MEMSET,
+	DMA_MEMSET_SG,
 	DMA_INTERRUPT,
 	DMA_SG,
 	DMA_PRIVATE,
@@ -183,6 +184,8 @@
  *  operation it continues the calculation with new sources
  * @DMA_PREP_FENCE - tell the driver that subsequent operations depend
  *  on the result of this operation
+ * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
+ *  cleared or freed
  */
 enum dma_ctrl_flags {
 	DMA_PREP_INTERRUPT = (1 << 0),
@@ -191,6 +194,7 @@
 	DMA_PREP_PQ_DISABLE_Q = (1 << 3),
 	DMA_PREP_CONTINUE = (1 << 4),
 	DMA_PREP_FENCE = (1 << 5),
+	DMA_CTRL_REUSE = (1 << 6),
 };
 
 /**
@@ -400,6 +404,8 @@
  * @cmd_pause: true, if pause and thereby resume is supported
  * @cmd_terminate: true, if terminate cmd is supported
  * @residue_granularity: granularity of the reported transfer residue
+ * @descriptor_reuse: if a descriptor can be reused by client and
+ * resubmitted multiple times
  */
 struct dma_slave_caps {
 	u32 src_addr_widths;
@@ -408,6 +414,7 @@
 	bool cmd_pause;
 	bool cmd_terminate;
 	enum dma_residue_granularity residue_granularity;
+	bool descriptor_reuse;
 };
 
 static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -467,6 +474,7 @@
 	dma_addr_t phys;
 	struct dma_chan *chan;
 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
+	int (*desc_free)(struct dma_async_tx_descriptor *tx);
 	dma_async_tx_callback callback;
 	void *callback_param;
 	struct dmaengine_unmap_data *unmap;
@@ -585,6 +593,20 @@
 };
 
 /**
+ * enum dmaengine_alignment - defines alignment of the DMA async tx
+ * buffers
+ */
+enum dmaengine_alignment {
+	DMAENGINE_ALIGN_1_BYTE = 0,
+	DMAENGINE_ALIGN_2_BYTES = 1,
+	DMAENGINE_ALIGN_4_BYTES = 2,
+	DMAENGINE_ALIGN_8_BYTES = 3,
+	DMAENGINE_ALIGN_16_BYTES = 4,
+	DMAENGINE_ALIGN_32_BYTES = 5,
+	DMAENGINE_ALIGN_64_BYTES = 6,
+};
+
+/**
  * struct dma_device - info on the entity supplying DMA services
  * @chancnt: how many DMA channels are supported
  * @privatecnt: how many DMA channels are requested by dma_request_channel
@@ -616,6 +638,7 @@
  * @device_prep_dma_pq: prepares a pq operation
  * @device_prep_dma_pq_val: prepares a pqzero_sum operation
  * @device_prep_dma_memset: prepares a memset operation
+ * @device_prep_dma_memset_sg: prepares a memset operation over a scatter list
  * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
  * @device_prep_slave_sg: prepares a slave dma operation
  * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
@@ -645,10 +668,10 @@
 	dma_cap_mask_t  cap_mask;
 	unsigned short max_xor;
 	unsigned short max_pq;
-	u8 copy_align;
-	u8 xor_align;
-	u8 pq_align;
-	u8 fill_align;
+	enum dmaengine_alignment copy_align;
+	enum dmaengine_alignment xor_align;
+	enum dmaengine_alignment pq_align;
+	enum dmaengine_alignment fill_align;
 	#define DMA_HAS_PQ_CONTINUE (1 << 15)
 
 	int dev_id;
@@ -682,6 +705,9 @@
 	struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
 		struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
 		unsigned long flags);
+	struct dma_async_tx_descriptor *(*device_prep_dma_memset_sg)(
+		struct dma_chan *chan, struct scatterlist *sg,
+		unsigned int nents, int value, unsigned long flags);
 	struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
 		struct dma_chan *chan, unsigned long flags);
 	struct dma_async_tx_descriptor *(*device_prep_dma_sg)(
@@ -833,7 +859,8 @@
 	return desc->tx_submit(desc);
 }
 
-static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
+static inline bool dmaengine_check_align(enum dmaengine_alignment align,
+					 size_t off1, size_t off2, size_t len)
 {
 	size_t mask;
 
@@ -1155,6 +1182,39 @@
 }
 #endif
 
+static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
+{
+	struct dma_slave_caps caps;
+
+	dma_get_slave_caps(tx->chan, &caps);
+
+	if (caps.descriptor_reuse) {
+		tx->flags |= DMA_CTRL_REUSE;
+		return 0;
+	} else {
+		return -EPERM;
+	}
+}
+
+static inline void dmaengine_desc_clear_reuse(struct dma_async_tx_descriptor *tx)
+{
+	tx->flags &= ~DMA_CTRL_REUSE;
+}
+
+static inline bool dmaengine_desc_test_reuse(struct dma_async_tx_descriptor *tx)
+{
+	return (tx->flags & DMA_CTRL_REUSE) == DMA_CTRL_REUSE;
+}
+
+static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
+{
+	/* this is supported for reusable desc, so check that */
+	if (dmaengine_desc_test_reuse(desc))
+		return desc->desc_free(desc);
+	else
+		return -EPERM;
+}
+
 /* --- DMA device --- */
 
 int dma_async_device_register(struct dma_device *device);
@@ -1169,7 +1229,7 @@
 static inline struct dma_chan
 *__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
 				  dma_filter_fn fn, void *fn_param,
-				  struct device *dev, char *name)
+				  struct device *dev, const char *name)
 {
 	struct dma_chan *chan;
 
@@ -1177,6 +1237,9 @@
 	if (chan)
 		return chan;
 
+	if (!fn || !fn_param)
+		return NULL;
+
 	return __dma_request_channel(mask, fn, fn_param);
 }
 #endif /* DMAENGINE_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 9012f87..eb049c6 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -76,7 +76,7 @@
 
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 	return (((*(const u32 *)addr) ^ (*(const u32 *)b)) |
-		((a[2] ^ b[2]) & m)) == 0;
+		(__force int)((a[2] ^ b[2]) & m)) == 0;
 #else
 	return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
 #endif
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index b16d929..c0f8c4f 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -27,8 +27,6 @@
 #define __LINUX_EXTCON_H__
 
 #include <linux/device.h>
-#include <linux/notifier.h>
-#include <linux/sysfs.h>
 
 /*
  * Define the unique id of supported external connectors
@@ -77,8 +75,6 @@
  *			be attached simulataneously. {0x7, 0} is equivalent to
  *			{0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
  *			can be no simultaneous connections.
- * @print_state:	An optional callback to override the method to print the
- *			status of the extcon device.
  * @dev:		Device of this extcon.
  * @state:		Attach/detach state of this extcon. Do not provide at
  *			register-time.
@@ -102,9 +98,6 @@
 	const unsigned int *supported_cable;
 	const u32 *mutually_exclusive;
 
-	/* Optional callbacks to override class functions */
-	ssize_t	(*print_state)(struct extcon_dev *edev, char *buf);
-
 	/* Internal data. Please do not set. */
 	struct device dev;
 	struct raw_notifier_head *nh;
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 920408a..25c6324 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -417,15 +417,25 @@
 
 #define GET_DENTRY_SLOTS(x)	((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
 
-/* the number of dentry in a block */
-#define NR_DENTRY_IN_BLOCK	214
-
 /* MAX level for dir lookup */
 #define MAX_DIR_HASH_DEPTH	63
 
 /* MAX buckets in one level of dir */
 #define MAX_DIR_BUCKETS		(1 << ((MAX_DIR_HASH_DEPTH / 2) - 1))
 
+/*
+ * space utilization of regular dentry and inline dentry
+ *		regular dentry			inline dentry
+ * bitmap	1 * 27 = 27			1 * 23 = 23
+ * reserved	1 * 3 = 3			1 * 7 = 7
+ * dentry	11 * 214 = 2354			11 * 182 = 2002
+ * filename	8 * 214 = 1712			8 * 182 = 1456
+ * total	4096				3488
+ *
+ * Note: there are more reserved space in inline dentry than in regular
+ * dentry, when converting inline dentry we should handle this carefully.
+ */
+#define NR_DENTRY_IN_BLOCK	214	/* the number of dentry in a block */
 #define SIZE_OF_DIR_ENTRY	11	/* by byte */
 #define SIZE_OF_DENTRY_BITMAP	((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
 					BITS_PER_BYTE)
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index fbb8874..674e3e2 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -86,8 +86,8 @@
 
 static inline struct file *fcheck_files(struct files_struct *files, unsigned int fd)
 {
-	rcu_lockdep_assert(rcu_read_lock_held() ||
-			   lockdep_is_held(&files->file_lock),
+	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
+			   !lockdep_is_held(&files->file_lock),
 			   "suspicious rcu_dereference_check() usage");
 	return __fcheck_files(files, fd);
 }
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 17724f6..fa2cab9 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -12,6 +12,7 @@
 #include <linux/linkage.h>
 #include <linux/printk.h>
 #include <linux/workqueue.h>
+#include <linux/sched.h>
 
 #include <asm/cacheflush.h>
 
@@ -354,6 +355,16 @@
 		   offsetof(struct bpf_prog, insns[proglen]));
 }
 
+static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
+{
+	/* When classic BPF programs have been loaded and the arch
+	 * does not have a classic BPF JIT (anymore), they have been
+	 * converted via bpf_migrate_filter() to eBPF and thus always
+	 * have an unspec program type.
+	 */
+	return prog->type == BPF_PROG_TYPE_UNSPEC;
+}
+
 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
 
 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
@@ -411,6 +422,7 @@
 
 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 void bpf_int_jit_compile(struct bpf_prog *fp);
+bool bpf_helper_changes_skb_data(void *func);
 
 #ifdef CONFIG_BPF_JIT
 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -427,8 +439,9 @@
 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
 				u32 pass, void *image)
 {
-	pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
-	       flen, proglen, pass, image);
+	pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+	       proglen, pass, image, current->comm, task_pid_nr(current));
+
 	if (image)
 		print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
 			       16, 1, image, proglen, false);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 84b783f..fbd780c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1260,6 +1260,7 @@
 
 /* sb->s_iflags */
 #define SB_I_CGROUPWB	0x00000001	/* cgroup-aware writeback enabled */
+#define SB_I_NOEXEC	0x00000002	/* Ignore executables on this fs */
 
 /* Possible states of 'frozen' field */
 enum {
@@ -3041,4 +3042,6 @@
 	return !IS_DEADDIR(inode);
 }
 
+extern bool path_noexec(const struct path *path);
+
 #endif /* _LINUX_FS_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 2a2f56b..f291291 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -20,11 +20,6 @@
 #define FSL_UTMI_PHY_DLY	10	/*As per P1010RM, delay for UTMI
 				PHY CLK to become stable - 10ms*/
 #define FSL_USB_PHY_CLK_TIMEOUT	10000	/* uSec */
-#define FSL_USB_VER_OLD		0
-#define FSL_USB_VER_1_6		1
-#define FSL_USB_VER_2_2		2
-#define FSL_USB_VER_2_4		3
-#define FSL_USB_VER_2_5		4
 
 #include <linux/types.h>
 
@@ -52,6 +47,15 @@
  *
  */
 
+enum fsl_usb2_controller_ver {
+	FSL_USB_VER_NONE = -1,
+	FSL_USB_VER_OLD = 0,
+	FSL_USB_VER_1_6 = 1,
+	FSL_USB_VER_2_2 = 2,
+	FSL_USB_VER_2_4 = 3,
+	FSL_USB_VER_2_5 = 4,
+};
+
 enum fsl_usb2_operating_modes {
 	FSL_USB2_MPH_HOST,
 	FSL_USB2_DR_HOST,
@@ -65,6 +69,7 @@
 	FSL_USB2_PHY_UTMI,
 	FSL_USB2_PHY_UTMI_WIDE,
 	FSL_USB2_PHY_SERIAL,
+	FSL_USB2_PHY_UTMI_DUAL,
 };
 
 struct clk;
@@ -72,7 +77,7 @@
 
 struct fsl_usb2_platform_data {
 	/* board specific information */
-	int				controller_ver;
+	enum fsl_usb2_controller_ver	controller_ver;
 	enum fsl_usb2_operating_modes	operating_mode;
 	enum fsl_usb2_phy_modes		phy_mode;
 	unsigned int			port_enables;
@@ -93,6 +98,9 @@
 
 	unsigned	suspended:1;
 	unsigned	already_suspended:1;
+	unsigned        has_fsl_erratum_a007792:1;
+	unsigned        has_fsl_erratum_a005275:1;
+	unsigned        check_phy_clk_valid:1;
 
 	/* register save area for suspend/resume */
 	u32		pm_command;
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index bf0321e..0023088 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -841,9 +841,59 @@
 
 	u32 nand_stat;
 	wait_queue_head_t nand_wait;
+	bool little_endian;
 };
 
 extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev;
 
+static inline u32 ifc_in32(void __iomem *addr)
+{
+	u32 val;
+
+	if (fsl_ifc_ctrl_dev->little_endian)
+		val = ioread32(addr);
+	else
+		val = ioread32be(addr);
+
+	return val;
+}
+
+static inline u16 ifc_in16(void __iomem *addr)
+{
+	u16 val;
+
+	if (fsl_ifc_ctrl_dev->little_endian)
+		val = ioread16(addr);
+	else
+		val = ioread16be(addr);
+
+	return val;
+}
+
+static inline u8 ifc_in8(void __iomem *addr)
+{
+	return ioread8(addr);
+}
+
+static inline void ifc_out32(u32 val, void __iomem *addr)
+{
+	if (fsl_ifc_ctrl_dev->little_endian)
+		iowrite32(val, addr);
+	else
+		iowrite32be(val, addr);
+}
+
+static inline void ifc_out16(u16 val, void __iomem *addr)
+{
+	if (fsl_ifc_ctrl_dev->little_endian)
+		iowrite16(val, addr);
+	else
+		iowrite16be(val, addr);
+}
+
+static inline void ifc_out8(u8 val, void __iomem *addr)
+{
+	iowrite8(val, addr);
+}
 
 #endif /* __ASM_FSL_IFC_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index ec274e0..2adbfa6 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -13,6 +13,7 @@
 #include <linux/kdev_t.h>
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
+#include <linux/percpu-refcount.h>
 
 #ifdef CONFIG_BLOCK
 
@@ -124,7 +125,7 @@
 #else
 	struct disk_stats dkstats;
 #endif
-	atomic_t ref;
+	struct percpu_ref ref;
 	struct rcu_head rcu_head;
 };
 
@@ -611,7 +612,7 @@
 						     sector_t len, int flags,
 						     struct partition_meta_info
 						       *info);
-extern void __delete_partition(struct hd_struct *);
+extern void __delete_partition(struct percpu_ref *);
 extern void delete_partition(struct gendisk *, int);
 extern void printk_all_partitions(void);
 
@@ -640,27 +641,39 @@
 			       const char *buf, size_t count);
 #endif /* CONFIG_FAIL_MAKE_REQUEST */
 
-static inline void hd_ref_init(struct hd_struct *part)
+static inline int hd_ref_init(struct hd_struct *part)
 {
-	atomic_set(&part->ref, 1);
-	smp_mb();
+	if (percpu_ref_init(&part->ref, __delete_partition, 0,
+				GFP_KERNEL))
+		return -ENOMEM;
+	return 0;
 }
 
 static inline void hd_struct_get(struct hd_struct *part)
 {
-	atomic_inc(&part->ref);
-	smp_mb__after_atomic();
+	percpu_ref_get(&part->ref);
 }
 
 static inline int hd_struct_try_get(struct hd_struct *part)
 {
-	return atomic_inc_not_zero(&part->ref);
+	return percpu_ref_tryget_live(&part->ref);
 }
 
 static inline void hd_struct_put(struct hd_struct *part)
 {
-	if (atomic_dec_and_test(&part->ref))
-		__delete_partition(part);
+	percpu_ref_put(&part->ref);
+}
+
+static inline void hd_struct_kill(struct hd_struct *part)
+{
+	percpu_ref_kill(&part->ref);
+}
+
+static inline void hd_free_part(struct hd_struct *part)
+{
+	free_part_stats(part);
+	free_part_info(part);
+	percpu_ref_exit(&part->ref);
 }
 
 /*
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index adac255..14cac67 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -47,17 +47,17 @@
 int gpiod_count(struct device *dev, const char *con_id);
 
 /* Acquire and dispose GPIOs */
-struct gpio_desc *__must_check __gpiod_get(struct device *dev,
+struct gpio_desc *__must_check gpiod_get(struct device *dev,
 					 const char *con_id,
 					 enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
 					       const char *con_id,
 					       unsigned int idx,
 					       enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
 						  const char *con_id,
 						  enum gpiod_flags flags);
-struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
 							const char *con_id,
 							unsigned int index,
 							enum gpiod_flags flags);
@@ -70,18 +70,18 @@
 void gpiod_put(struct gpio_desc *desc);
 void gpiod_put_array(struct gpio_descs *descs);
 
-struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
 					      const char *con_id,
 					      enum gpiod_flags flags);
-struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
 						    const char *con_id,
 						    unsigned int idx,
 						    enum gpiod_flags flags);
-struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
+struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
 						       const char *con_id,
 						       enum gpiod_flags flags);
 struct gpio_desc *__must_check
-__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
 			      unsigned int index, enum gpiod_flags flags);
 struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
 						     const char *con_id,
@@ -146,31 +146,31 @@
 	return 0;
 }
 
-static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
-						const char *con_id,
-						enum gpiod_flags flags)
+static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
+						       const char *con_id,
+						       enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
 static inline struct gpio_desc *__must_check
-__gpiod_get_index(struct device *dev,
-		  const char *con_id,
-		  unsigned int idx,
-		  enum gpiod_flags flags)
+gpiod_get_index(struct device *dev,
+		const char *con_id,
+		unsigned int idx,
+		enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
 
 static inline struct gpio_desc *__must_check
-__gpiod_get_optional(struct device *dev, const char *con_id,
-		     enum gpiod_flags flags)
+gpiod_get_optional(struct device *dev, const char *con_id,
+		   enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
 
 static inline struct gpio_desc *__must_check
-__gpiod_get_index_optional(struct device *dev, const char *con_id,
-			   unsigned int index, enum gpiod_flags flags)
+gpiod_get_index_optional(struct device *dev, const char *con_id,
+			 unsigned int index, enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
@@ -206,7 +206,7 @@
 }
 
 static inline struct gpio_desc *__must_check
-__devm_gpiod_get(struct device *dev,
+devm_gpiod_get(struct device *dev,
 		 const char *con_id,
 		 enum gpiod_flags flags)
 {
@@ -214,7 +214,7 @@
 }
 static inline
 struct gpio_desc *__must_check
-__devm_gpiod_get_index(struct device *dev,
+devm_gpiod_get_index(struct device *dev,
 		       const char *con_id,
 		       unsigned int idx,
 		       enum gpiod_flags flags)
@@ -223,14 +223,14 @@
 }
 
 static inline struct gpio_desc *__must_check
-__devm_gpiod_get_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_optional(struct device *dev, const char *con_id,
 			  enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
 }
 
 static inline struct gpio_desc *__must_check
-__devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
+devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
 				unsigned int index, enum gpiod_flags flags)
 {
 	return ERR_PTR(-ENOSYS);
@@ -424,42 +424,6 @@
 
 #endif /* CONFIG_GPIOLIB */
 
-/*
- * Vararg-hacks! This is done to transition the kernel to always pass
- * the options flags argument to the below functions. During a transition
- * phase these vararg macros make both old-and-newstyle code compile,
- * but when all calls to the elder API are removed, these should go away
- * and the __gpiod_get() etc functions above be renamed just gpiod_get()
- * etc.
- */
-#define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags)
-#define gpiod_get(varargs...) __gpiod_get(varargs, GPIOD_ASIS)
-#define __gpiod_get_index(dev, con_id, index, flags, ...)		\
-	__gpiod_get_index(dev, con_id, index, flags)
-#define gpiod_get_index(varargs...) __gpiod_get_index(varargs, GPIOD_ASIS)
-#define __gpiod_get_optional(dev, con_id, flags, ...)			\
-	__gpiod_get_optional(dev, con_id, flags)
-#define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, GPIOD_ASIS)
-#define __gpiod_get_index_optional(dev, con_id, index, flags, ...)	\
-	__gpiod_get_index_optional(dev, con_id, index, flags)
-#define gpiod_get_index_optional(varargs...)				\
-	__gpiod_get_index_optional(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get(dev, con_id, flags, ...)			\
-	__devm_gpiod_get(dev, con_id, flags)
-#define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_index(dev, con_id, index, flags, ...)		\
-	__devm_gpiod_get_index(dev, con_id, index, flags)
-#define devm_gpiod_get_index(varargs...)				\
-	__devm_gpiod_get_index(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_optional(dev, con_id, flags, ...)		\
-	__devm_gpiod_get_optional(dev, con_id, flags)
-#define devm_gpiod_get_optional(varargs...)				\
-	__devm_gpiod_get_optional(varargs, GPIOD_ASIS)
-#define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...)	\
-	__devm_gpiod_get_index_optional(dev, con_id, index, flags)
-#define devm_gpiod_get_index_optional(varargs...)			\
-	__devm_gpiod_get_index_optional(varargs, GPIOD_ASIS)
-
 #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
 
 int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c8393cd..1aed31c 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -6,6 +6,7 @@
 #include <linux/irq.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqdomain.h>
+#include <linux/lockdep.h>
 #include <linux/pinctrl/pinctrl.h>
 
 struct device;
@@ -64,6 +65,17 @@
  *	registers.
  * @irq_not_threaded: flag must be set if @can_sleep is set but the
  *	IRQs don't need to be threaded
+ * @irqchip: GPIO IRQ chip impl, provided by GPIO driver
+ * @irqdomain: Interrupt translation domain; responsible for mapping
+ *	between GPIO hwirq number and linux irq number
+ * @irq_base: first linux IRQ number assigned to GPIO IRQ chip (deprecated)
+ * @irq_handler: the irq handler to use (often a predefined irq core function)
+ *	for GPIO IRQs, provided by GPIO driver
+ * @irq_default_type: default IRQ triggering type applied during GPIO driver
+ *	initialization, provided by GPIO driver
+ * @irq_parent: GPIO IRQ chip parent/bank linux irq number,
+ *	provided by GPIO driver
+ * @lock_key: per GPIO IRQ chip lockdep class
  *
  * A gpio_chip can help platforms abstract various sources of GPIOs so
  * they can all be accessed through a common programing interface.
@@ -126,6 +138,7 @@
 	irq_flow_handler_t	irq_handler;
 	unsigned int		irq_default_type;
 	int			irq_parent;
+	struct lock_class_key	*lock_key;
 #endif
 
 #if defined(CONFIG_OF_GPIO)
@@ -171,11 +184,25 @@
 		int parent_irq,
 		irq_flow_handler_t parent_handler);
 
-int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
-		struct irq_chip *irqchip,
-		unsigned int first_irq,
-		irq_flow_handler_t handler,
-		unsigned int type);
+int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+			  struct irq_chip *irqchip,
+			  unsigned int first_irq,
+			  irq_flow_handler_t handler,
+			  unsigned int type,
+			  struct lock_class_key *lock_key);
+
+#ifdef CONFIG_LOCKDEP
+#define gpiochip_irqchip_add(...)				\
+(								\
+	({							\
+		static struct lock_class_key _key;		\
+		_gpiochip_irqchip_add(__VA_ARGS__, &_key);	\
+	})							\
+)
+#else
+#define gpiochip_irqchip_add(...)				\
+	_gpiochip_irqchip_add(__VA_ARGS__, NULL)
+#endif
 
 #endif /* CONFIG_GPIOLIB_IRQCHIP */
 
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 30d3a1f..54733d5 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -977,6 +977,11 @@
 					 const char *mod_name);
 void vmbus_driver_unregister(struct hv_driver *hv_driver);
 
+int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
+			resource_size_t min, resource_size_t max,
+			resource_size_t size, resource_size_t align,
+			bool fb_overlap_ok);
+
 /**
  * VMBUS_DEVICE - macro used to describe a specific hyperv vmbus device
  *
@@ -1233,8 +1238,6 @@
 
 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
 
-extern struct resource hyperv_mmio;
-
 /*
  * Negotiated version with the Host.
  */
diff --git a/include/linux/i2c/atmel_mxt_ts.h b/include/linux/i2c/atmel_mxt_ts.h
deleted file mode 100644
index 02bf6ea..0000000
--- a/include/linux/i2c/atmel_mxt_ts.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Atmel maXTouch Touchscreen driver
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef __LINUX_ATMEL_MXT_TS_H
-#define __LINUX_ATMEL_MXT_TS_H
-
-#include <linux/types.h>
-
-/* The platform data for the Atmel maXTouch touchscreen driver */
-struct mxt_platform_data {
-	unsigned long irqflags;
-	u8 t19_num_keys;
-	const unsigned int *t19_keymap;
-};
-
-#endif /* __LINUX_ATMEL_MXT_TS_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b9c7897..cfa906f 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2074,8 +2074,8 @@
 #define WLAN_EXT_CAPA5_TDLS_PROHIBITED	BIT(6)
 #define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED	BIT(7)
 
+#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED	BIT(5)
 #define WLAN_EXT_CAPA8_OPMODE_NOTIF	BIT(6)
-#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED	BIT(7)
 
 /* TDLS specific payload type in the LLC/SNAP header */
 #define WLAN_TDLS_SNAP_RFTYPE	0x2
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 193ad48..9084292 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -37,6 +37,7 @@
 	return (struct igmpv3_query *)skb_transport_header(skb);
 }
 
+extern int sysctl_igmp_llm_reports;
 extern int sysctl_igmp_max_memberships;
 extern int sysctl_igmp_max_msf;
 extern int sysctl_igmp_qrv;
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 2c476ac..3c17cd7 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -166,6 +166,7 @@
 /**
  * struct st_sensor_settings - ST specific sensor settings
  * @wai: Contents of WhoAmI register.
+ * @wai_addr: The address of WhoAmI register.
  * @sensors_supported: List of supported sensors by struct itself.
  * @ch: IIO channels for the sensor.
  * @odr: Output data rate register and ODR list available.
@@ -179,6 +180,7 @@
  */
 struct st_sensor_settings {
 	u8 wai;
+	u8 wai_addr;
 	char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
 	struct iio_chan_spec *ch;
 	int num_ch;
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 26fb8f6..fad5867 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -100,7 +100,7 @@
 
 /**
  * iio_channel_cb_get_channels() - get access to the underlying channels.
- * @cb_buff:		The callback buffer from whom we want the channel
+ * @cb_buffer:		The callback buffer from whom we want the channel
  *			information.
  *
  * This function allows one to obtain information about the channels.
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index f791482..7bb7f67 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -645,6 +645,15 @@
 #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
 
 /**
+ * IIO_RAD_TO_DEGREE() - Convert rad to degree
+ * @rad: A value in rad
+ *
+ * Returns the given value converted from rad to degree
+ */
+#define IIO_RAD_TO_DEGREE(rad) \
+	(((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
+
+/**
  * IIO_G_TO_M_S_2() - Convert g to meter / second**2
  * @g: A value in g
  *
@@ -652,4 +661,12 @@
  */
 #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
 
+/**
+ * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
+ * @ms2: A value in meter / second**2
+ *
+ * Returns the given value converted from meter / second**2 to g
+ */
+#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
+
 #endif /* _INDUSTRIAL_IO_H_ */
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index 8a1d186..9cd8f74 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -18,7 +18,8 @@
  * struct iio_dev_attr - iio specific device attribute
  * @dev_attr:	underlying device attribute
  * @address:	associated register address
- * @l:		list head for maintaining list of dynamically created attrs.
+ * @l:		list head for maintaining list of dynamically created attrs
+ * @c:		specification for the underlying channel
  */
 struct iio_dev_attr {
 	struct device_attribute dev_attr;
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index fa76c79..1c9e028 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -18,6 +18,9 @@
 	bool enabled;
 };
 
+struct iio_dev;
+struct iio_trigger;
+
 /**
  * struct iio_trigger_ops - operations structure for an iio_trigger.
  * @owner:		used to monitor usage count of the trigger.
diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h
index c378ebe..f72f70d 100644
--- a/include/linux/iio/triggered_buffer.h
+++ b/include/linux/iio/triggered_buffer.h
@@ -7,8 +7,8 @@
 struct iio_buffer_setup_ops;
 
 int iio_triggered_buffer_setup(struct iio_dev *indio_dev,
-	irqreturn_t (*pollfunc_bh)(int irq, void *p),
-	irqreturn_t (*pollfunc_th)(int irq, void *p),
+	irqreturn_t (*h)(int irq, void *p),
+	irqreturn_t (*thread)(int irq, void *p),
 	const struct iio_buffer_setup_ops *setup_ops);
 void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev);
 
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e8493fe..d0b380e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -32,6 +32,14 @@
 #define INIT_CPUSET_SEQ(tsk)
 #endif
 
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+#define INIT_PREV_CPUTIME(x)	.prev_cputime = {			\
+	.lock = __RAW_SPIN_LOCK_UNLOCKED(x.prev_cputime.lock),		\
+},
+#else
+#define INIT_PREV_CPUTIME(x)
+#endif
+
 #define INIT_SIGNALS(sig) {						\
 	.nr_threads	= 1,						\
 	.thread_head	= LIST_HEAD_INIT(init_task.thread_node),	\
@@ -46,6 +54,7 @@
 		.cputime_atomic	= INIT_CPUTIME_ATOMIC,			\
 		.running	= 0,					\
 	},								\
+	INIT_PREV_CPUTIME(sig)						\
 	.cred_guard_mutex =						\
 		 __MUTEX_INITIALIZER(sig.cred_guard_mutex),		\
 }
@@ -246,6 +255,7 @@
 	INIT_TASK_RCU_TASKS(tsk)					\
 	INIT_CPUSET_SEQ(tsk)						\
 	INIT_RT_MUTEXES(tsk)						\
+	INIT_PREV_CPUTIME(tsk)						\
 	INIT_VTIME(tsk)							\
 	INIT_NUMA_BALANCING(tsk)					\
 	INIT_KASAN(tsk)							\
diff --git a/include/linux/input/pixcir_ts.h b/include/linux/input/pixcir_ts.h
deleted file mode 100644
index 7bae83b..0000000
--- a/include/linux/input/pixcir_ts.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef	_PIXCIR_I2C_TS_H
-#define	_PIXCIR_I2C_TS_H
-
-/*
- * Register map
- */
-#define PIXCIR_REG_POWER_MODE	51
-#define PIXCIR_REG_INT_MODE	52
-
-/*
- * Power modes:
- * active: max scan speed
- * idle: lower scan speed with automatic transition to active on touch
- * halt: datasheet says sleep but this is more like halt as the chip
- *       clocks are cut and it can only be brought out of this mode
- *	 using the RESET pin.
- */
-enum pixcir_power_mode {
-	PIXCIR_POWER_ACTIVE,
-	PIXCIR_POWER_IDLE,
-	PIXCIR_POWER_HALT,
-};
-
-#define PIXCIR_POWER_MODE_MASK	0x03
-#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2)
-
-/*
- * Interrupt modes:
- * periodical: interrupt is asserted periodicaly
- * diff coordinates: interrupt is asserted when coordinates change
- * level on touch: interrupt level asserted during touch
- * pulse on touch: interrupt pulse asserted druing touch
- *
- */
-enum pixcir_int_mode {
-	PIXCIR_INT_PERIODICAL,
-	PIXCIR_INT_DIFF_COORD,
-	PIXCIR_INT_LEVEL_TOUCH,
-	PIXCIR_INT_PULSE_TOUCH,
-};
-
-#define PIXCIR_INT_MODE_MASK	0x03
-#define PIXCIR_INT_ENABLE	(1UL << 3)
-#define PIXCIR_INT_POL_HIGH	(1UL << 2)
-
-/**
- * struct pixcir_irc_chip_data - chip related data
- * @max_fingers:	Max number of fingers reported simultaneously by h/w
- * @has_hw_ids:		Hardware supports finger tracking IDs
- *
- */
-struct pixcir_i2c_chip_data {
-	u8 max_fingers;
-	bool has_hw_ids;
-};
-
-struct pixcir_ts_platform_data {
-	int x_max;
-	int y_max;
-	int gpio_attb;		/* GPIO connected to ATTB line */
-	struct pixcir_i2c_chip_data chip;
-};
-
-#endif
diff --git a/include/linux/input/touchscreen.h b/include/linux/input/touchscreen.h
index eecc9ea..c91e137 100644
--- a/include/linux/input/touchscreen.h
+++ b/include/linux/input/touchscreen.h
@@ -9,15 +9,8 @@
 #ifndef _TOUCHSCREEN_H
 #define _TOUCHSCREEN_H
 
-#include <linux/input.h>
+struct input_dev;
 
-#ifdef CONFIG_OF
-void touchscreen_parse_of_params(struct input_dev *dev, bool multitouch);
-#else
-static inline void touchscreen_parse_of_params(struct input_dev *dev,
-					       bool multitouch)
-{
-}
-#endif
+void touchscreen_parse_properties(struct input_dev *dev, bool multitouch);
 
 #endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 82806c6..f1f32af 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -29,7 +29,9 @@
 	__s32		max_desync_factor;
 	__s32		max_addresses;
 	__s32		accept_ra_defrtr;
+	__s32		accept_ra_min_hop_limit;
 	__s32		accept_ra_pinfo;
+	__s32		ignore_routes_with_linkdown;
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	__s32		accept_ra_rtr_pref;
 	__s32		rtr_probe_interval;
@@ -57,6 +59,7 @@
 		bool initialized;
 		struct in6_addr secret;
 	} stable_secret;
+	__s32		use_oif_addrs_only;
 	void		*sysctl;
 };
 
@@ -94,7 +97,6 @@
 struct inet6_skb_parm {
 	int			iif;
 	__be16			ra;
-	__u16			hop;
 	__u16			dst0;
 	__u16			srcrt;
 	__u16			dst1;
@@ -111,6 +113,7 @@
 #define IP6SKB_REROUTED		4
 #define IP6SKB_ROUTERALERT	8
 #define IP6SKB_FRAGMENTED      16
+#define IP6SKB_HOPBYHOP        32
 };
 
 #define IP6CB(skb)	((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 51744bc..6f8b340 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -324,8 +324,10 @@
  * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips
  * @irq_cpu_online:	configure an interrupt source for a secondary CPU
  * @irq_cpu_offline:	un-configure an interrupt source for a secondary CPU
- * @irq_suspend:	function called from core code on suspend once per chip
- * @irq_resume:		function called from core code on resume once per chip
+ * @irq_suspend:	function called from core code on suspend once per
+ *			chip, when one or more interrupts are installed
+ * @irq_resume:		function called from core code on resume once per chip,
+ *			when one ore more interrupts are installed
  * @irq_pm_shutdown:	function called from core code on shutdown once per chip
  * @irq_calc_mask:	Optional function to set irq_data.mask for special cases
  * @irq_print_chip:	optional to print special chip info in show_interrupts
@@ -488,8 +490,7 @@
 #endif
 
 /* Handling of unhandled and spurious interrupts: */
-extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
-			   irqreturn_t action_ret);
+extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);
 
 
 /* Enable/disable irq debugging output: */
@@ -640,7 +641,7 @@
 	return d ? d->msi_desc : NULL;
 }
 
-static inline struct msi_desc *irq_data_get_msi(struct irq_data *d)
+static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d)
 {
 	return d->msi_desc;
 }
@@ -762,6 +763,12 @@
  * @reg_base:		Register base address (virtual)
  * @reg_readl:		Alternate I/O accessor (defaults to readl if NULL)
  * @reg_writel:		Alternate I/O accessor (defaults to writel if NULL)
+ * @suspend:		Function called from core code on suspend once per
+ *			chip; can be useful instead of irq_chip::suspend to
+ *			handle chip details even when no interrupts are in use
+ * @resume:		Function called from core code on resume once per chip;
+ *			can be useful instead of irq_chip::suspend to handle
+ *			chip details even when no interrupts are in use
  * @irq_base:		Interrupt base nr for this chip
  * @irq_cnt:		Number of interrupts handled by this chip
  * @mask_cache:		Cached mask register shared between all chip types
@@ -788,6 +795,8 @@
 	void __iomem		*reg_base;
 	u32			(*reg_readl)(void __iomem *addr);
 	void			(*reg_writel)(u32 val, void __iomem *addr);
+	void			(*suspend)(struct irq_chip_generic *gc);
+	void			(*resume)(struct irq_chip_generic *gc);
 	unsigned int		irq_base;
 	unsigned int		irq_cnt;
 	u32			mask_cache;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index ffbc034..bf982e0 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -360,6 +360,7 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/stringify.h>
+#include <asm/msi.h>
 
 /*
  * We need a value to serve as a irq-type for LPIs. Choose one that will
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 9de976b..65da435 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -95,11 +95,10 @@
 
 struct device_node;
 
-void gic_set_irqchip_flags(unsigned long flags);
 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
 		    u32 offset, struct device_node *);
 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
-void gic_cpu_if_down(void);
+int gic_cpu_if_down(unsigned int gic_nr);
 
 static inline void gic_init(unsigned int nr, int start,
 			    void __iomem *dist , void __iomem *cpu)
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 9b1ad37..4e68616 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -41,12 +41,20 @@
 
 /* Shared Global Counter */
 #define GIC_SH_COUNTER_31_00_OFS	0x0010
+/* 64-bit counter register for CM3 */
+#define GIC_SH_COUNTER_OFS		GIC_SH_COUNTER_31_00_OFS
 #define GIC_SH_COUNTER_63_32_OFS	0x0014
 #define GIC_SH_REVISIONID_OFS		0x0020
 
 /* Convert an interrupt number to a byte offset/bit for multi-word registers */
-#define GIC_INTR_OFS(intr)		(((intr) / 32) * 4)
-#define GIC_INTR_BIT(intr)		((intr) % 32)
+#define GIC_INTR_OFS(intr) ({				\
+	unsigned bits = mips_cm_is64 ? 64 : 32;		\
+	unsigned reg_idx = (intr) / bits;		\
+	unsigned reg_width = bits / 8;			\
+							\
+	reg_idx * reg_width;				\
+})
+#define GIC_INTR_BIT(intr)		((intr) % (mips_cm_is64 ? 64 : 32))
 
 /* Polarity : Reset Value is always 0 */
 #define GIC_SH_SET_POLARITY_OFS		0x0100
@@ -98,6 +106,8 @@
 #define GIC_VPE_WD_COUNT0_OFS		0x0094
 #define GIC_VPE_WD_INITIAL0_OFS		0x0098
 #define GIC_VPE_COMPARE_LO_OFS		0x00a0
+/* 64-bit Compare register on CM3 */
+#define GIC_VPE_COMPARE_OFS		GIC_VPE_COMPARE_LO_OFS
 #define GIC_VPE_COMPARE_HI_OFS		0x00a4
 
 #define GIC_VPE_EIC_SHADOW_SET_BASE_OFS	0x0100
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index fcea4e4..5acfa26 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -166,10 +166,14 @@
 #endif
 
 /* Test to see if a driver has successfully requested an irq */
+static inline int irq_desc_has_action(struct irq_desc *desc)
+{
+	return desc->action != NULL;
+}
+
 static inline int irq_has_action(unsigned int irq)
 {
-	struct irq_desc *desc = irq_to_desc(irq);
-	return desc->action != NULL;
+	return irq_desc_has_action(irq_to_desc(irq));
 }
 
 /* caller has locked the irq_desc and both params are valid */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 744ac0e..d3ca792 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -45,6 +45,20 @@
 /* Number of irqs reserved for a legacy isa controller */
 #define NUM_ISA_INTERRUPTS	16
 
+/*
+ * Should several domains have the same device node, but serve
+ * different purposes (for example one domain is for PCI/MSI, and the
+ * other for wired IRQs), they can be distinguished using a
+ * bus-specific token. Most domains are expected to only carry
+ * DOMAIN_BUS_ANY.
+ */
+enum irq_domain_bus_token {
+	DOMAIN_BUS_ANY		= 0,
+	DOMAIN_BUS_PCI_MSI,
+	DOMAIN_BUS_PLATFORM_MSI,
+	DOMAIN_BUS_NEXUS,
+};
+
 /**
  * struct irq_domain_ops - Methods for irq_domain objects
  * @match: Match an interrupt controller device node to a host, returns
@@ -61,7 +75,8 @@
  * to setup the irq_desc when returning from map().
  */
 struct irq_domain_ops {
-	int (*match)(struct irq_domain *d, struct device_node *node);
+	int (*match)(struct irq_domain *d, struct device_node *node,
+		     enum irq_domain_bus_token bus_token);
 	int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
 	void (*unmap)(struct irq_domain *d, unsigned int virq);
 	int (*xlate)(struct irq_domain *d, struct device_node *node,
@@ -116,6 +131,7 @@
 
 	/* Optional data */
 	struct device_node *of_node;
+	enum irq_domain_bus_token bus_token;
 	struct irq_domain_chip_generic *gc;
 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
 	struct irq_domain *parent;
@@ -161,9 +177,15 @@
 					 irq_hw_number_t first_hwirq,
 					 const struct irq_domain_ops *ops,
 					 void *host_data);
-extern struct irq_domain *irq_find_host(struct device_node *node);
+extern struct irq_domain *irq_find_matching_host(struct device_node *node,
+						 enum irq_domain_bus_token bus_token);
 extern void irq_set_default_host(struct irq_domain *host);
 
+static inline struct irq_domain *irq_find_host(struct device_node *node)
+{
+	return irq_find_matching_host(node, DOMAIN_BUS_ANY);
+}
+
 /**
  * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
  * @of_node: pointer to interrupt controller's device tree node.
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
deleted file mode 100644
index d326152..0000000
--- a/include/linux/jbd.h
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * linux/include/linux/jbd.h
- *
- * Written by Stephen C. Tweedie <sct@redhat.com>
- *
- * Copyright 1998-2000 Red Hat, Inc --- All Rights Reserved
- *
- * This file is part of the Linux kernel and is made available under
- * the terms of the GNU General Public License, version 2, or at your
- * option, any later version, incorporated herein by reference.
- *
- * Definitions for transaction data structures for the buffer cache
- * filesystem journaling support.
- */
-
-#ifndef _LINUX_JBD_H
-#define _LINUX_JBD_H
-
-/* Allow this file to be included directly into e2fsprogs */
-#ifndef __KERNEL__
-#include "jfs_compat.h"
-#define JFS_DEBUG
-#define jfs_debug jbd_debug
-#else
-
-#include <linux/types.h>
-#include <linux/buffer_head.h>
-#include <linux/journal-head.h>
-#include <linux/stddef.h>
-#include <linux/mutex.h>
-#include <linux/timer.h>
-#include <linux/lockdep.h>
-#include <linux/slab.h>
-
-#define journal_oom_retry 1
-
-/*
- * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
- * certain classes of error which can occur due to failed IOs.  Under
- * normal use we want ext3 to continue after such errors, because
- * hardware _can_ fail, but for debugging purposes when running tests on
- * known-good hardware we may want to trap these errors.
- */
-#undef JBD_PARANOID_IOFAIL
-
-/*
- * The default maximum commit age, in seconds.
- */
-#define JBD_DEFAULT_MAX_COMMIT_AGE 5
-
-#ifdef CONFIG_JBD_DEBUG
-/*
- * Define JBD_EXPENSIVE_CHECKING to enable more expensive internal
- * consistency checks.  By default we don't do this unless
- * CONFIG_JBD_DEBUG is on.
- */
-#define JBD_EXPENSIVE_CHECKING
-extern u8 journal_enable_debug;
-
-void __jbd_debug(int level, const char *file, const char *func,
-		 unsigned int line, const char *fmt, ...);
-
-#define jbd_debug(n, fmt, a...) \
-	__jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
-#else
-#define jbd_debug(n, fmt, a...)    /**/
-#endif
-
-static inline void *jbd_alloc(size_t size, gfp_t flags)
-{
-	return (void *)__get_free_pages(flags, get_order(size));
-}
-
-static inline void jbd_free(void *ptr, size_t size)
-{
-	free_pages((unsigned long)ptr, get_order(size));
-}
-
-#define JFS_MIN_JOURNAL_BLOCKS 1024
-
-
-/**
- * typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
- *
- * All filesystem modifications made by the process go
- * through this handle.  Recursive operations (such as quota operations)
- * are gathered into a single update.
- *
- * The buffer credits field is used to account for journaled buffers
- * being modified by the running process.  To ensure that there is
- * enough log space for all outstanding operations, we need to limit the
- * number of outstanding buffers possible at any time.  When the
- * operation completes, any buffer credits not used are credited back to
- * the transaction, so that at all times we know how many buffers the
- * outstanding updates on a transaction might possibly touch.
- *
- * This is an opaque datatype.
- **/
-typedef struct handle_s		handle_t;	/* Atomic operation type */
-
-
-/**
- * typedef journal_t - The journal_t maintains all of the journaling state information for a single filesystem.
- *
- * journal_t is linked to from the fs superblock structure.
- *
- * We use the journal_t to keep track of all outstanding transaction
- * activity on the filesystem, and to manage the state of the log
- * writing process.
- *
- * This is an opaque datatype.
- **/
-typedef struct journal_s	journal_t;	/* Journal control structure */
-#endif
-
-/*
- * Internal structures used by the logging mechanism:
- */
-
-#define JFS_MAGIC_NUMBER 0xc03b3998U /* The first 4 bytes of /dev/random! */
-
-/*
- * On-disk structures
- */
-
-/*
- * Descriptor block types:
- */
-
-#define JFS_DESCRIPTOR_BLOCK	1
-#define JFS_COMMIT_BLOCK	2
-#define JFS_SUPERBLOCK_V1	3
-#define JFS_SUPERBLOCK_V2	4
-#define JFS_REVOKE_BLOCK	5
-
-/*
- * Standard header for all descriptor blocks:
- */
-typedef struct journal_header_s
-{
-	__be32		h_magic;
-	__be32		h_blocktype;
-	__be32		h_sequence;
-} journal_header_t;
-
-
-/*
- * The block tag: used to describe a single buffer in the journal
- */
-typedef struct journal_block_tag_s
-{
-	__be32		t_blocknr;	/* The on-disk block number */
-	__be32		t_flags;	/* See below */
-} journal_block_tag_t;
-
-/*
- * The revoke descriptor: used on disk to describe a series of blocks to
- * be revoked from the log
- */
-typedef struct journal_revoke_header_s
-{
-	journal_header_t r_header;
-	__be32		 r_count;	/* Count of bytes used in the block */
-} journal_revoke_header_t;
-
-
-/* Definitions for the journal tag flags word: */
-#define JFS_FLAG_ESCAPE		1	/* on-disk block is escaped */
-#define JFS_FLAG_SAME_UUID	2	/* block has same uuid as previous */
-#define JFS_FLAG_DELETED	4	/* block deleted by this transaction */
-#define JFS_FLAG_LAST_TAG	8	/* last tag in this descriptor block */
-
-
-/*
- * The journal superblock.  All fields are in big-endian byte order.
- */
-typedef struct journal_superblock_s
-{
-/* 0x0000 */
-	journal_header_t s_header;
-
-/* 0x000C */
-	/* Static information describing the journal */
-	__be32	s_blocksize;		/* journal device blocksize */
-	__be32	s_maxlen;		/* total blocks in journal file */
-	__be32	s_first;		/* first block of log information */
-
-/* 0x0018 */
-	/* Dynamic information describing the current state of the log */
-	__be32	s_sequence;		/* first commit ID expected in log */
-	__be32	s_start;		/* blocknr of start of log */
-
-/* 0x0020 */
-	/* Error value, as set by journal_abort(). */
-	__be32	s_errno;
-
-/* 0x0024 */
-	/* Remaining fields are only valid in a version-2 superblock */
-	__be32	s_feature_compat;	/* compatible feature set */
-	__be32	s_feature_incompat;	/* incompatible feature set */
-	__be32	s_feature_ro_compat;	/* readonly-compatible feature set */
-/* 0x0030 */
-	__u8	s_uuid[16];		/* 128-bit uuid for journal */
-
-/* 0x0040 */
-	__be32	s_nr_users;		/* Nr of filesystems sharing log */
-
-	__be32	s_dynsuper;		/* Blocknr of dynamic superblock copy*/
-
-/* 0x0048 */
-	__be32	s_max_transaction;	/* Limit of journal blocks per trans.*/
-	__be32	s_max_trans_data;	/* Limit of data blocks per trans. */
-
-/* 0x0050 */
-	__u32	s_padding[44];
-
-/* 0x0100 */
-	__u8	s_users[16*48];		/* ids of all fs'es sharing the log */
-/* 0x0400 */
-} journal_superblock_t;
-
-#define JFS_HAS_COMPAT_FEATURE(j,mask)					\
-	((j)->j_format_version >= 2 &&					\
-	 ((j)->j_superblock->s_feature_compat & cpu_to_be32((mask))))
-#define JFS_HAS_RO_COMPAT_FEATURE(j,mask)				\
-	((j)->j_format_version >= 2 &&					\
-	 ((j)->j_superblock->s_feature_ro_compat & cpu_to_be32((mask))))
-#define JFS_HAS_INCOMPAT_FEATURE(j,mask)				\
-	((j)->j_format_version >= 2 &&					\
-	 ((j)->j_superblock->s_feature_incompat & cpu_to_be32((mask))))
-
-#define JFS_FEATURE_INCOMPAT_REVOKE	0x00000001
-
-/* Features known to this kernel version: */
-#define JFS_KNOWN_COMPAT_FEATURES	0
-#define JFS_KNOWN_ROCOMPAT_FEATURES	0
-#define JFS_KNOWN_INCOMPAT_FEATURES	JFS_FEATURE_INCOMPAT_REVOKE
-
-#ifdef __KERNEL__
-
-#include <linux/fs.h>
-#include <linux/sched.h>
-
-enum jbd_state_bits {
-	BH_JBD			/* Has an attached ext3 journal_head */
-	  = BH_PrivateStart,
-	BH_JWrite,		/* Being written to log (@@@ DEBUGGING) */
-	BH_Freed,		/* Has been freed (truncated) */
-	BH_Revoked,		/* Has been revoked from the log */
-	BH_RevokeValid,		/* Revoked flag is valid */
-	BH_JBDDirty,		/* Is dirty but journaled */
-	BH_State,		/* Pins most journal_head state */
-	BH_JournalHead,		/* Pins bh->b_private and jh->b_bh */
-	BH_Unshadow,		/* Dummy bit, for BJ_Shadow wakeup filtering */
-	BH_JBDPrivateStart,	/* First bit available for private use by FS */
-};
-
-BUFFER_FNS(JBD, jbd)
-BUFFER_FNS(JWrite, jwrite)
-BUFFER_FNS(JBDDirty, jbddirty)
-TAS_BUFFER_FNS(JBDDirty, jbddirty)
-BUFFER_FNS(Revoked, revoked)
-TAS_BUFFER_FNS(Revoked, revoked)
-BUFFER_FNS(RevokeValid, revokevalid)
-TAS_BUFFER_FNS(RevokeValid, revokevalid)
-BUFFER_FNS(Freed, freed)
-
-#include <linux/jbd_common.h>
-
-#define J_ASSERT(assert)	BUG_ON(!(assert))
-
-#define J_ASSERT_BH(bh, expr)	J_ASSERT(expr)
-#define J_ASSERT_JH(jh, expr)	J_ASSERT(expr)
-
-#if defined(JBD_PARANOID_IOFAIL)
-#define J_EXPECT(expr, why...)		J_ASSERT(expr)
-#define J_EXPECT_BH(bh, expr, why...)	J_ASSERT_BH(bh, expr)
-#define J_EXPECT_JH(jh, expr, why...)	J_ASSERT_JH(jh, expr)
-#else
-#define __journal_expect(expr, why...)					     \
-	({								     \
-		int val = (expr);					     \
-		if (!val) {						     \
-			printk(KERN_ERR					     \
-				"EXT3-fs unexpected failure: %s;\n",# expr); \
-			printk(KERN_ERR why "\n");			     \
-		}							     \
-		val;							     \
-	})
-#define J_EXPECT(expr, why...)		__journal_expect(expr, ## why)
-#define J_EXPECT_BH(bh, expr, why...)	__journal_expect(expr, ## why)
-#define J_EXPECT_JH(jh, expr, why...)	__journal_expect(expr, ## why)
-#endif
-
-struct jbd_revoke_table_s;
-
-/**
- * struct handle_s - this is the concrete type associated with handle_t.
- * @h_transaction: Which compound transaction is this update a part of?
- * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
- * @h_ref: Reference count on this handle
- * @h_err: Field for caller's use to track errors through large fs operations
- * @h_sync: flag for sync-on-close
- * @h_jdata: flag to force data journaling
- * @h_aborted: flag indicating fatal error on handle
- * @h_lockdep_map: lockdep info for debugging lock problems
- */
-struct handle_s
-{
-	/* Which compound transaction is this update a part of? */
-	transaction_t		*h_transaction;
-
-	/* Number of remaining buffers we are allowed to dirty: */
-	int			h_buffer_credits;
-
-	/* Reference count on this handle */
-	int			h_ref;
-
-	/* Field for caller's use to track errors through large fs */
-	/* operations */
-	int			h_err;
-
-	/* Flags [no locking] */
-	unsigned int	h_sync:		1;	/* sync-on-close */
-	unsigned int	h_jdata:	1;	/* force data journaling */
-	unsigned int	h_aborted:	1;	/* fatal error on handle */
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	struct lockdep_map	h_lockdep_map;
-#endif
-};
-
-
-/* The transaction_t type is the guts of the journaling mechanism.  It
- * tracks a compound transaction through its various states:
- *
- * RUNNING:	accepting new updates
- * LOCKED:	Updates still running but we don't accept new ones
- * RUNDOWN:	Updates are tidying up but have finished requesting
- *		new buffers to modify (state not used for now)
- * FLUSH:       All updates complete, but we are still writing to disk
- * COMMIT:      All data on disk, writing commit record
- * FINISHED:	We still have to keep the transaction for checkpointing.
- *
- * The transaction keeps track of all of the buffers modified by a
- * running transaction, and all of the buffers committed but not yet
- * flushed to home for finished transactions.
- */
-
-/*
- * Lock ranking:
- *
- *    j_list_lock
- *      ->jbd_lock_bh_journal_head()	(This is "innermost")
- *
- *    j_state_lock
- *    ->jbd_lock_bh_state()
- *
- *    jbd_lock_bh_state()
- *    ->j_list_lock
- *
- *    j_state_lock
- *    ->t_handle_lock
- *
- *    j_state_lock
- *    ->j_list_lock			(journal_unmap_buffer)
- *
- */
-
-struct transaction_s
-{
-	/* Pointer to the journal for this transaction. [no locking] */
-	journal_t		*t_journal;
-
-	/* Sequence number for this transaction [no locking] */
-	tid_t			t_tid;
-
-	/*
-	 * Transaction's current state
-	 * [no locking - only kjournald alters this]
-	 * [j_list_lock] guards transition of a transaction into T_FINISHED
-	 * state and subsequent call of __journal_drop_transaction()
-	 * FIXME: needs barriers
-	 * KLUDGE: [use j_state_lock]
-	 */
-	enum {
-		T_RUNNING,
-		T_LOCKED,
-		T_FLUSH,
-		T_COMMIT,
-		T_COMMIT_RECORD,
-		T_FINISHED
-	}			t_state;
-
-	/*
-	 * Where in the log does this transaction's commit start? [no locking]
-	 */
-	unsigned int		t_log_start;
-
-	/* Number of buffers on the t_buffers list [j_list_lock] */
-	int			t_nr_buffers;
-
-	/*
-	 * Doubly-linked circular list of all buffers reserved but not yet
-	 * modified by this transaction [j_list_lock]
-	 */
-	struct journal_head	*t_reserved_list;
-
-	/*
-	 * Doubly-linked circular list of all buffers under writeout during
-	 * commit [j_list_lock]
-	 */
-	struct journal_head	*t_locked_list;
-
-	/*
-	 * Doubly-linked circular list of all metadata buffers owned by this
-	 * transaction [j_list_lock]
-	 */
-	struct journal_head	*t_buffers;
-
-	/*
-	 * Doubly-linked circular list of all data buffers still to be
-	 * flushed before this transaction can be committed [j_list_lock]
-	 */
-	struct journal_head	*t_sync_datalist;
-
-	/*
-	 * Doubly-linked circular list of all forget buffers (superseded
-	 * buffers which we can un-checkpoint once this transaction commits)
-	 * [j_list_lock]
-	 */
-	struct journal_head	*t_forget;
-
-	/*
-	 * Doubly-linked circular list of all buffers still to be flushed before
-	 * this transaction can be checkpointed. [j_list_lock]
-	 */
-	struct journal_head	*t_checkpoint_list;
-
-	/*
-	 * Doubly-linked circular list of all buffers submitted for IO while
-	 * checkpointing. [j_list_lock]
-	 */
-	struct journal_head	*t_checkpoint_io_list;
-
-	/*
-	 * Doubly-linked circular list of temporary buffers currently undergoing
-	 * IO in the log [j_list_lock]
-	 */
-	struct journal_head	*t_iobuf_list;
-
-	/*
-	 * Doubly-linked circular list of metadata buffers being shadowed by log
-	 * IO.  The IO buffers on the iobuf list and the shadow buffers on this
-	 * list match each other one for one at all times. [j_list_lock]
-	 */
-	struct journal_head	*t_shadow_list;
-
-	/*
-	 * Doubly-linked circular list of control buffers being written to the
-	 * log. [j_list_lock]
-	 */
-	struct journal_head	*t_log_list;
-
-	/*
-	 * Protects info related to handles
-	 */
-	spinlock_t		t_handle_lock;
-
-	/*
-	 * Number of outstanding updates running on this transaction
-	 * [t_handle_lock]
-	 */
-	int			t_updates;
-
-	/*
-	 * Number of buffers reserved for use by all handles in this transaction
-	 * handle but not yet modified. [t_handle_lock]
-	 */
-	int			t_outstanding_credits;
-
-	/*
-	 * Forward and backward links for the circular list of all transactions
-	 * awaiting checkpoint. [j_list_lock]
-	 */
-	transaction_t		*t_cpnext, *t_cpprev;
-
-	/*
-	 * When will the transaction expire (become due for commit), in jiffies?
-	 * [no locking]
-	 */
-	unsigned long		t_expires;
-
-	/*
-	 * When this transaction started, in nanoseconds [no locking]
-	 */
-	ktime_t			t_start_time;
-
-	/*
-	 * How many handles used this transaction? [t_handle_lock]
-	 */
-	int t_handle_count;
-};
-
-/**
- * struct journal_s - this is the concrete type associated with journal_t.
- * @j_flags:  General journaling state flags
- * @j_errno:  Is there an outstanding uncleared error on the journal (from a
- *     prior abort)?
- * @j_sb_buffer: First part of superblock buffer
- * @j_superblock: Second part of superblock buffer
- * @j_format_version: Version of the superblock format
- * @j_state_lock: Protect the various scalars in the journal
- * @j_barrier_count:  Number of processes waiting to create a barrier lock
- * @j_running_transaction: The current running transaction..
- * @j_committing_transaction: the transaction we are pushing to disk
- * @j_checkpoint_transactions: a linked circular list of all transactions
- *  waiting for checkpointing
- * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction
- *  to start committing, or for a barrier lock to be released
- * @j_wait_logspace: Wait queue for waiting for checkpointing to complete
- * @j_wait_done_commit: Wait queue for waiting for commit to complete
- * @j_wait_checkpoint:  Wait queue to trigger checkpointing
- * @j_wait_commit: Wait queue to trigger commit
- * @j_wait_updates: Wait queue to wait for updates to complete
- * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints
- * @j_head: Journal head - identifies the first unused block in the journal
- * @j_tail: Journal tail - identifies the oldest still-used block in the
- *  journal.
- * @j_free: Journal free - how many free blocks are there in the journal?
- * @j_first: The block number of the first usable block
- * @j_last: The block number one beyond the last usable block
- * @j_dev: Device where we store the journal
- * @j_blocksize: blocksize for the location where we store the journal.
- * @j_blk_offset: starting block offset for into the device where we store the
- *     journal
- * @j_fs_dev: Device which holds the client fs.  For internal journal this will
- *     be equal to j_dev
- * @j_maxlen: Total maximum capacity of the journal region on disk.
- * @j_list_lock: Protects the buffer lists and internal buffer state.
- * @j_inode: Optional inode where we store the journal.  If present, all journal
- *     block numbers are mapped into this inode via bmap().
- * @j_tail_sequence:  Sequence number of the oldest transaction in the log
- * @j_transaction_sequence: Sequence number of the next transaction to grant
- * @j_commit_sequence: Sequence number of the most recently committed
- *  transaction
- * @j_commit_request: Sequence number of the most recent transaction wanting
- *     commit
- * @j_commit_waited: Sequence number of the most recent transaction someone
- *     is waiting for to commit.
- * @j_uuid: Uuid of client object.
- * @j_task: Pointer to the current commit thread for this journal
- * @j_max_transaction_buffers:  Maximum number of metadata buffers to allow in a
- *     single compound commit transaction
- * @j_commit_interval: What is the maximum transaction lifetime before we begin
- *  a commit?
- * @j_commit_timer:  The timer used to wakeup the commit thread
- * @j_revoke_lock: Protect the revoke table
- * @j_revoke: The revoke table - maintains the list of revoked blocks in the
- *     current transaction.
- * @j_revoke_table: alternate revoke tables for j_revoke
- * @j_wbuf: array of buffer_heads for journal_commit_transaction
- * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the
- *	number that will fit in j_blocksize
- * @j_last_sync_writer: most recent pid which did a synchronous write
- * @j_average_commit_time: the average amount of time in nanoseconds it
- *	takes to commit a transaction to the disk.
- * @j_private: An opaque pointer to fs-private information.
- */
-
-struct journal_s
-{
-	/* General journaling state flags [j_state_lock] */
-	unsigned long		j_flags;
-
-	/*
-	 * Is there an outstanding uncleared error on the journal (from a prior
-	 * abort)? [j_state_lock]
-	 */
-	int			j_errno;
-
-	/* The superblock buffer */
-	struct buffer_head	*j_sb_buffer;
-	journal_superblock_t	*j_superblock;
-
-	/* Version of the superblock format */
-	int			j_format_version;
-
-	/*
-	 * Protect the various scalars in the journal
-	 */
-	spinlock_t		j_state_lock;
-
-	/*
-	 * Number of processes waiting to create a barrier lock [j_state_lock]
-	 */
-	int			j_barrier_count;
-
-	/*
-	 * Transactions: The current running transaction...
-	 * [j_state_lock] [caller holding open handle]
-	 */
-	transaction_t		*j_running_transaction;
-
-	/*
-	 * the transaction we are pushing to disk
-	 * [j_state_lock] [caller holding open handle]
-	 */
-	transaction_t		*j_committing_transaction;
-
-	/*
-	 * ... and a linked circular list of all transactions waiting for
-	 * checkpointing. [j_list_lock]
-	 */
-	transaction_t		*j_checkpoint_transactions;
-
-	/*
-	 * Wait queue for waiting for a locked transaction to start committing,
-	 * or for a barrier lock to be released
-	 */
-	wait_queue_head_t	j_wait_transaction_locked;
-
-	/* Wait queue for waiting for checkpointing to complete */
-	wait_queue_head_t	j_wait_logspace;
-
-	/* Wait queue for waiting for commit to complete */
-	wait_queue_head_t	j_wait_done_commit;
-
-	/* Wait queue to trigger checkpointing */
-	wait_queue_head_t	j_wait_checkpoint;
-
-	/* Wait queue to trigger commit */
-	wait_queue_head_t	j_wait_commit;
-
-	/* Wait queue to wait for updates to complete */
-	wait_queue_head_t	j_wait_updates;
-
-	/* Semaphore for locking against concurrent checkpoints */
-	struct mutex		j_checkpoint_mutex;
-
-	/*
-	 * Journal head: identifies the first unused block in the journal.
-	 * [j_state_lock]
-	 */
-	unsigned int		j_head;
-
-	/*
-	 * Journal tail: identifies the oldest still-used block in the journal.
-	 * [j_state_lock]
-	 */
-	unsigned int		j_tail;
-
-	/*
-	 * Journal free: how many free blocks are there in the journal?
-	 * [j_state_lock]
-	 */
-	unsigned int		j_free;
-
-	/*
-	 * Journal start and end: the block numbers of the first usable block
-	 * and one beyond the last usable block in the journal. [j_state_lock]
-	 */
-	unsigned int		j_first;
-	unsigned int		j_last;
-
-	/*
-	 * Device, blocksize and starting block offset for the location where we
-	 * store the journal.
-	 */
-	struct block_device	*j_dev;
-	int			j_blocksize;
-	unsigned int		j_blk_offset;
-
-	/*
-	 * Device which holds the client fs.  For internal journal this will be
-	 * equal to j_dev.
-	 */
-	struct block_device	*j_fs_dev;
-
-	/* Total maximum capacity of the journal region on disk. */
-	unsigned int		j_maxlen;
-
-	/*
-	 * Protects the buffer lists and internal buffer state.
-	 */
-	spinlock_t		j_list_lock;
-
-	/* Optional inode where we store the journal.  If present, all */
-	/* journal block numbers are mapped into this inode via */
-	/* bmap(). */
-	struct inode		*j_inode;
-
-	/*
-	 * Sequence number of the oldest transaction in the log [j_state_lock]
-	 */
-	tid_t			j_tail_sequence;
-
-	/*
-	 * Sequence number of the next transaction to grant [j_state_lock]
-	 */
-	tid_t			j_transaction_sequence;
-
-	/*
-	 * Sequence number of the most recently committed transaction
-	 * [j_state_lock].
-	 */
-	tid_t			j_commit_sequence;
-
-	/*
-	 * Sequence number of the most recent transaction wanting commit
-	 * [j_state_lock]
-	 */
-	tid_t			j_commit_request;
-
-	/*
-	 * Sequence number of the most recent transaction someone is waiting
-	 * for to commit.
-	 * [j_state_lock]
-	 */
-	tid_t                   j_commit_waited;
-
-	/*
-	 * Journal uuid: identifies the object (filesystem, LVM volume etc)
-	 * backed by this journal.  This will eventually be replaced by an array
-	 * of uuids, allowing us to index multiple devices within a single
-	 * journal and to perform atomic updates across them.
-	 */
-	__u8			j_uuid[16];
-
-	/* Pointer to the current commit thread for this journal */
-	struct task_struct	*j_task;
-
-	/*
-	 * Maximum number of metadata buffers to allow in a single compound
-	 * commit transaction
-	 */
-	int			j_max_transaction_buffers;
-
-	/*
-	 * What is the maximum transaction lifetime before we begin a commit?
-	 */
-	unsigned long		j_commit_interval;
-
-	/* The timer used to wakeup the commit thread: */
-	struct timer_list	j_commit_timer;
-
-	/*
-	 * The revoke table: maintains the list of revoked blocks in the
-	 * current transaction.  [j_revoke_lock]
-	 */
-	spinlock_t		j_revoke_lock;
-	struct jbd_revoke_table_s *j_revoke;
-	struct jbd_revoke_table_s *j_revoke_table[2];
-
-	/*
-	 * array of bhs for journal_commit_transaction
-	 */
-	struct buffer_head	**j_wbuf;
-	int			j_wbufsize;
-
-	/*
-	 * this is the pid of the last person to run a synchronous operation
-	 * through the journal.
-	 */
-	pid_t			j_last_sync_writer;
-
-	/*
-	 * the average amount of time in nanoseconds it takes to commit a
-	 * transaction to the disk.  [j_state_lock]
-	 */
-	u64			j_average_commit_time;
-
-	/*
-	 * An opaque pointer to fs-private information.  ext3 puts its
-	 * superblock pointer here
-	 */
-	void *j_private;
-};
-
-/*
- * Journal flag definitions
- */
-#define JFS_UNMOUNT	0x001	/* Journal thread is being destroyed */
-#define JFS_ABORT	0x002	/* Journaling has been aborted for errors. */
-#define JFS_ACK_ERR	0x004	/* The errno in the sb has been acked */
-#define JFS_FLUSHED	0x008	/* The journal superblock has been flushed */
-#define JFS_LOADED	0x010	/* The journal superblock has been loaded */
-#define JFS_BARRIER	0x020	/* Use IDE barriers */
-#define JFS_ABORT_ON_SYNCDATA_ERR	0x040  /* Abort the journal on file
-						* data write error in ordered
-						* mode */
-
-/*
- * Function declarations for the journaling transaction and buffer
- * management
- */
-
-/* Filing buffers */
-extern void journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __journal_unfile_buffer(struct journal_head *);
-extern void __journal_refile_buffer(struct journal_head *);
-extern void journal_refile_buffer(journal_t *, struct journal_head *);
-extern void __journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
-extern void journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
-
-/* Log buffer allocation */
-extern struct journal_head * journal_get_descriptor_buffer(journal_t *);
-int journal_next_log_block(journal_t *, unsigned int *);
-
-/* Commit management */
-extern void journal_commit_transaction(journal_t *);
-
-/* Checkpoint list management */
-int __journal_clean_checkpoint_list(journal_t *journal);
-int __journal_remove_checkpoint(struct journal_head *);
-void __journal_insert_checkpoint(struct journal_head *, transaction_t *);
-
-/* Buffer IO */
-extern int
-journal_write_metadata_buffer(transaction_t	  *transaction,
-			      struct journal_head  *jh_in,
-			      struct journal_head **jh_out,
-			      unsigned int blocknr);
-
-/* Transaction locking */
-extern void		__wait_on_journal (journal_t *);
-
-/*
- * Journal locking.
- *
- * We need to lock the journal during transaction state changes so that nobody
- * ever tries to take a handle on the running transaction while we are in the
- * middle of moving it to the commit phase.  j_state_lock does this.
- *
- * Note that the locking is completely interrupt unsafe.  We never touch
- * journal structures from interrupts.
- */
-
-static inline handle_t *journal_current_handle(void)
-{
-	return current->journal_info;
-}
-
-/* The journaling code user interface:
- *
- * Create and destroy handles
- * Register buffer modifications against the current transaction.
- */
-
-extern handle_t *journal_start(journal_t *, int nblocks);
-extern int	 journal_restart (handle_t *, int nblocks);
-extern int	 journal_extend (handle_t *, int nblocks);
-extern int	 journal_get_write_access(handle_t *, struct buffer_head *);
-extern int	 journal_get_create_access (handle_t *, struct buffer_head *);
-extern int	 journal_get_undo_access(handle_t *, struct buffer_head *);
-extern int	 journal_dirty_data (handle_t *, struct buffer_head *);
-extern int	 journal_dirty_metadata (handle_t *, struct buffer_head *);
-extern void	 journal_release_buffer (handle_t *, struct buffer_head *);
-extern int	 journal_forget (handle_t *, struct buffer_head *);
-extern void	 journal_sync_buffer (struct buffer_head *);
-extern void	 journal_invalidatepage(journal_t *,
-				struct page *, unsigned int, unsigned int);
-extern int	 journal_try_to_free_buffers(journal_t *, struct page *, gfp_t);
-extern int	 journal_stop(handle_t *);
-extern int	 journal_flush (journal_t *);
-extern void	 journal_lock_updates (journal_t *);
-extern void	 journal_unlock_updates (journal_t *);
-
-extern journal_t * journal_init_dev(struct block_device *bdev,
-				struct block_device *fs_dev,
-				int start, int len, int bsize);
-extern journal_t * journal_init_inode (struct inode *);
-extern int	   journal_update_format (journal_t *);
-extern int	   journal_check_used_features
-		   (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int	   journal_check_available_features
-		   (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int	   journal_set_features
-		   (journal_t *, unsigned long, unsigned long, unsigned long);
-extern int	   journal_create     (journal_t *);
-extern int	   journal_load       (journal_t *journal);
-extern int	   journal_destroy    (journal_t *);
-extern int	   journal_recover    (journal_t *journal);
-extern int	   journal_wipe       (journal_t *, int);
-extern int	   journal_skip_recovery	(journal_t *);
-extern void	   journal_update_sb_log_tail	(journal_t *, tid_t, unsigned int,
-						 int);
-extern void	   journal_abort      (journal_t *, int);
-extern int	   journal_errno      (journal_t *);
-extern void	   journal_ack_err    (journal_t *);
-extern int	   journal_clear_err  (journal_t *);
-extern int	   journal_bmap(journal_t *, unsigned int, unsigned int *);
-extern int	   journal_force_commit(journal_t *);
-
-/*
- * journal_head management
- */
-struct journal_head *journal_add_journal_head(struct buffer_head *bh);
-struct journal_head *journal_grab_journal_head(struct buffer_head *bh);
-void journal_put_journal_head(struct journal_head *jh);
-
-/*
- * handle management
- */
-extern struct kmem_cache *jbd_handle_cache;
-
-static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
-{
-	return kmem_cache_zalloc(jbd_handle_cache, gfp_flags);
-}
-
-static inline void jbd_free_handle(handle_t *handle)
-{
-	kmem_cache_free(jbd_handle_cache, handle);
-}
-
-/* Primary revoke support */
-#define JOURNAL_REVOKE_DEFAULT_HASH 256
-extern int	   journal_init_revoke(journal_t *, int);
-extern void	   journal_destroy_revoke_caches(void);
-extern int	   journal_init_revoke_caches(void);
-
-extern void	   journal_destroy_revoke(journal_t *);
-extern int	   journal_revoke (handle_t *,
-				unsigned int, struct buffer_head *);
-extern int	   journal_cancel_revoke(handle_t *, struct journal_head *);
-extern void	   journal_write_revoke_records(journal_t *,
-						transaction_t *, int);
-
-/* Recovery revoke support */
-extern int	journal_set_revoke(journal_t *, unsigned int, tid_t);
-extern int	journal_test_revoke(journal_t *, unsigned int, tid_t);
-extern void	journal_clear_revoke(journal_t *);
-extern void	journal_switch_revoke_table(journal_t *journal);
-extern void	journal_clear_buffer_revoked_flags(journal_t *journal);
-
-/*
- * The log thread user interface:
- *
- * Request space in the current transaction, and force transaction commit
- * transitions on demand.
- */
-
-int __log_space_left(journal_t *); /* Called with journal locked */
-int log_start_commit(journal_t *journal, tid_t tid);
-int __log_start_commit(journal_t *journal, tid_t tid);
-int journal_start_commit(journal_t *journal, tid_t *tid);
-int journal_force_commit_nested(journal_t *journal);
-int log_wait_commit(journal_t *journal, tid_t tid);
-int log_do_checkpoint(journal_t *journal);
-int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
-
-void __log_wait_for_space(journal_t *journal);
-extern void	__journal_drop_transaction(journal_t *, transaction_t *);
-extern int	cleanup_journal_tail(journal_t *);
-
-/*
- * is_journal_abort
- *
- * Simple test wrapper function to test the JFS_ABORT state flag.  This
- * bit, when set, indicates that we have had a fatal error somewhere,
- * either inside the journaling layer or indicated to us by the client
- * (eg. ext3), and that we and should not commit any further
- * transactions.
- */
-
-static inline int is_journal_aborted(journal_t *journal)
-{
-	return journal->j_flags & JFS_ABORT;
-}
-
-static inline int is_handle_aborted(handle_t *handle)
-{
-	if (handle->h_aborted)
-		return 1;
-	return is_journal_aborted(handle->h_transaction->t_journal);
-}
-
-static inline void journal_abort_handle(handle_t *handle)
-{
-	handle->h_aborted = 1;
-}
-
-#endif /* __KERNEL__   */
-
-/* Comparison functions for transaction IDs: perform comparisons using
- * modulo arithmetic so that they work over sequence number wraps. */
-
-static inline int tid_gt(tid_t x, tid_t y)
-{
-	int difference = (x - y);
-	return (difference > 0);
-}
-
-static inline int tid_geq(tid_t x, tid_t y)
-{
-	int difference = (x - y);
-	return (difference >= 0);
-}
-
-extern int journal_blocks_per_page(struct inode *inode);
-
-/*
- * Return the minimum number of blocks which must be free in the journal
- * before a new transaction may be started.  Must be called under j_state_lock.
- */
-static inline int jbd_space_needed(journal_t *journal)
-{
-	int nblocks = journal->j_max_transaction_buffers;
-	if (journal->j_committing_transaction)
-		nblocks += journal->j_committing_transaction->
-					t_outstanding_credits;
-	return nblocks;
-}
-
-/*
- * Definitions which augment the buffer_head layer
- */
-
-/* journaling buffer types */
-#define BJ_None		0	/* Not journaled */
-#define BJ_SyncData	1	/* Normal data: flush before commit */
-#define BJ_Metadata	2	/* Normal journaled metadata */
-#define BJ_Forget	3	/* Buffer superseded by this transaction */
-#define BJ_IO		4	/* Buffer is for temporary IO use */
-#define BJ_Shadow	5	/* Buffer contents being shadowed to the log */
-#define BJ_LogCtl	6	/* Buffer contains log descriptors */
-#define BJ_Reserved	7	/* Buffer is reserved for access by journal */
-#define BJ_Locked	8	/* Locked for I/O during commit */
-#define BJ_Types	9
-
-extern int jbd_blocks_per_page(struct inode *inode);
-
-#ifdef __KERNEL__
-
-#define buffer_trace_init(bh)	do {} while (0)
-#define print_buffer_fields(bh)	do {} while (0)
-#define print_buffer_trace(bh)	do {} while (0)
-#define BUFFER_TRACE(bh, info)	do {} while (0)
-#define BUFFER_TRACE2(bh, bh2, info)	do {} while (0)
-#define JBUFFER_TRACE(jh, info)	do {} while (0)
-
-#endif	/* __KERNEL__ */
-
-#endif	/* _LINUX_JBD_H */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index edb640a..df07e78 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -29,6 +29,7 @@
 #include <linux/mutex.h>
 #include <linux/timer.h>
 #include <linux/slab.h>
+#include <linux/bit_spinlock.h>
 #include <crypto/hash.h>
 #endif
 
@@ -336,7 +337,45 @@
 BUFFER_FNS(Shadow, shadow)
 BUFFER_FNS(Verified, verified)
 
-#include <linux/jbd_common.h>
+static inline struct buffer_head *jh2bh(struct journal_head *jh)
+{
+	return jh->b_bh;
+}
+
+static inline struct journal_head *bh2jh(struct buffer_head *bh)
+{
+	return bh->b_private;
+}
+
+static inline void jbd_lock_bh_state(struct buffer_head *bh)
+{
+	bit_spin_lock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+{
+	return bit_spin_trylock(BH_State, &bh->b_state);
+}
+
+static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+{
+	return bit_spin_is_locked(BH_State, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+{
+	bit_spin_unlock(BH_State, &bh->b_state);
+}
+
+static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+{
+	bit_spin_lock(BH_JournalHead, &bh->b_state);
+}
+
+static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+{
+	bit_spin_unlock(BH_JournalHead, &bh->b_state);
+}
 
 #define J_ASSERT(assert)	BUG_ON(!(assert))
 
@@ -1042,8 +1081,9 @@
 extern void jbd2_journal_commit_transaction(journal_t *);
 
 /* Checkpoint list management */
-void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
 int __jbd2_journal_remove_checkpoint(struct journal_head *);
+void jbd2_journal_destroy_checkpoint(journal_t *journal);
 void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
 
 
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
deleted file mode 100644
index 3dc5343..0000000
--- a/include/linux/jbd_common.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef _LINUX_JBD_STATE_H
-#define _LINUX_JBD_STATE_H
-
-#include <linux/bit_spinlock.h>
-
-static inline struct buffer_head *jh2bh(struct journal_head *jh)
-{
-	return jh->b_bh;
-}
-
-static inline struct journal_head *bh2jh(struct buffer_head *bh)
-{
-	return bh->b_private;
-}
-
-static inline void jbd_lock_bh_state(struct buffer_head *bh)
-{
-	bit_spin_lock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_trylock_bh_state(struct buffer_head *bh)
-{
-	return bit_spin_trylock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
-{
-	return bit_spin_is_locked(BH_State, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_state(struct buffer_head *bh)
-{
-	bit_spin_unlock(BH_State, &bh->b_state);
-}
-
-static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
-{
-	bit_spin_lock(BH_JournalHead, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
-{
-	bit_spin_unlock(BH_JournalHead, &bh->b_state);
-}
-
-#endif
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 535fd3b..5fdc553 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -351,7 +351,7 @@
  * directly here and from __msecs_to_jiffies() in the case where
  * constant folding is not possible.
  */
-static inline unsigned long msecs_to_jiffies(const unsigned int m)
+static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
 {
 	if (__builtin_constant_p(m)) {
 		if ((int)m < 0)
@@ -363,18 +363,11 @@
 }
 
 extern unsigned long __usecs_to_jiffies(const unsigned int u);
-#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+#if !(USEC_PER_SEC % HZ)
 static inline unsigned long _usecs_to_jiffies(const unsigned int u)
 {
 	return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
 }
-#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
-static inline unsigned long _usecs_to_jiffies(const unsigned int u)
-{
-	return u * (HZ / USEC_PER_SEC);
-}
-static inline unsigned long _usecs_to_jiffies(const unsigned int u)
-{
 #else
 static inline unsigned long _usecs_to_jiffies(const unsigned int u)
 {
@@ -405,7 +398,7 @@
  * directly here and from __msecs_to_jiffies() in the case where
  * constant folding is not possible.
  */
-static inline unsigned long usecs_to_jiffies(const unsigned int u)
+static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
 {
 	if (__builtin_constant_p(u)) {
 		if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
@@ -416,9 +409,25 @@
 	}
 }
 
-extern unsigned long timespec_to_jiffies(const struct timespec *value);
-extern void jiffies_to_timespec(const unsigned long jiffies,
-				struct timespec *value);
+extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
+extern void jiffies_to_timespec64(const unsigned long jiffies,
+				  struct timespec64 *value);
+static inline unsigned long timespec_to_jiffies(const struct timespec *value)
+{
+	struct timespec64 ts = timespec_to_timespec64(*value);
+
+	return timespec64_to_jiffies(&ts);
+}
+
+static inline void jiffies_to_timespec(const unsigned long jiffies,
+				       struct timespec *value)
+{
+	struct timespec64 ts;
+
+	jiffies_to_timespec64(jiffies, &ts);
+	*value = timespec64_to_timespec(ts);
+}
+
 extern unsigned long timeval_to_jiffies(const struct timeval *value);
 extern void jiffies_to_timeval(const unsigned long jiffies,
 			       struct timeval *value);
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index f4de473..7f653e8 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -7,17 +7,52 @@
  * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
  * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
  *
+ * DEPRECATED API:
+ *
+ * The use of 'struct static_key' directly, is now DEPRECATED. In addition
+ * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
+ *
+ * struct static_key false = STATIC_KEY_INIT_FALSE;
+ * struct static_key true = STATIC_KEY_INIT_TRUE;
+ * static_key_true()
+ * static_key_false()
+ *
+ * The updated API replacements are:
+ *
+ * DEFINE_STATIC_KEY_TRUE(key);
+ * DEFINE_STATIC_KEY_FALSE(key);
+ * static_key_likely()
+ * statick_key_unlikely()
+ *
  * Jump labels provide an interface to generate dynamic branches using
- * self-modifying code. Assuming toolchain and architecture support, the result
- * of a "if (static_key_false(&key))" statement is an unconditional branch (which
- * defaults to false - and the true block is placed out of line).
+ * self-modifying code. Assuming toolchain and architecture support, if we
+ * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
+ * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
+ * (which defaults to false - and the true block is placed out of line).
+ * Similarly, we can define an initially true key via
+ * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
+ * "if (static_branch_unlikely(&key))", in which case we will generate an
+ * unconditional branch to the out-of-line true branch. Keys that are
+ * initially true or false can be using in both static_branch_unlikely()
+ * and static_branch_likely() statements.
  *
- * However at runtime we can change the branch target using
- * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
- * object, and for as long as there are references all branches referring to
- * that particular key will point to the (out of line) true block.
+ * At runtime we can change the branch target by setting the key
+ * to true via a call to static_branch_enable(), or false using
+ * static_branch_disable(). If the direction of the branch is switched by
+ * these calls then we run-time modify the branch target via a
+ * no-op -> jump or jump -> no-op conversion. For example, for an
+ * initially false key that is used in an "if (static_branch_unlikely(&key))"
+ * statement, setting the key to true requires us to patch in a jump
+ * to the out-of-line of true branch.
  *
- * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions
+ * In addtion to static_branch_{enable,disable}, we can also reference count
+ * the key or branch direction via static_branch_{inc,dec}. Thus,
+ * static_branch_inc() can be thought of as a 'make more true' and
+ * static_branch_dec() as a 'make more false'. The inc()/dec()
+ * interface is meant to be used exclusively from the inc()/dec() for a given
+ * key.
+ *
+ * Since this relies on modifying code, the branch modifying functions
  * must be considered absolute slow paths (machine wide synchronization etc.).
  * OTOH, since the affected branches are unconditional, their runtime overhead
  * will be absolutely minimal, esp. in the default (off) case where the total
@@ -29,20 +64,10 @@
  * cause significant performance degradation. Struct static_key_deferred and
  * static_key_slow_dec_deferred() provide for this.
  *
- * Lacking toolchain and or architecture support, jump labels fall back to a simple
- * conditional branch.
+ * Lacking toolchain and or architecture support, static keys fall back to a
+ * simple conditional branch.
  *
- * struct static_key my_key = STATIC_KEY_INIT_TRUE;
- *
- *   if (static_key_true(&my_key)) {
- *   }
- *
- * will result in the true case being in-line and starts the key with a single
- * reference. Mixing static_key_true() and static_key_false() on the same key is not
- * allowed.
- *
- * Not initializing the key (static data is initialized to 0s anyway) is the
- * same as using STATIC_KEY_INIT_FALSE.
+ * Additional babbling in: Documentation/static-keys.txt
  */
 
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -86,8 +111,8 @@
 #ifndef __ASSEMBLY__
 
 enum jump_label_type {
-	JUMP_LABEL_DISABLE = 0,
-	JUMP_LABEL_ENABLE,
+	JUMP_LABEL_NOP = 0,
+	JUMP_LABEL_JMP,
 };
 
 struct module;
@@ -101,33 +126,18 @@
 
 #ifdef HAVE_JUMP_LABEL
 
-#define JUMP_LABEL_TYPE_FALSE_BRANCH	0UL
-#define JUMP_LABEL_TYPE_TRUE_BRANCH	1UL
-#define JUMP_LABEL_TYPE_MASK		1UL
-
-static
-inline struct jump_entry *jump_label_get_entries(struct static_key *key)
-{
-	return (struct jump_entry *)((unsigned long)key->entries
-						& ~JUMP_LABEL_TYPE_MASK);
-}
-
-static inline bool jump_label_get_branch_default(struct static_key *key)
-{
-	if (((unsigned long)key->entries & JUMP_LABEL_TYPE_MASK) ==
-	    JUMP_LABEL_TYPE_TRUE_BRANCH)
-		return true;
-	return false;
-}
+#define JUMP_TYPE_FALSE	0UL
+#define JUMP_TYPE_TRUE	1UL
+#define JUMP_TYPE_MASK	1UL
 
 static __always_inline bool static_key_false(struct static_key *key)
 {
-	return arch_static_branch(key);
+	return arch_static_branch(key, false);
 }
 
 static __always_inline bool static_key_true(struct static_key *key)
 {
-	return !static_key_false(key);
+	return !arch_static_branch(key, true);
 }
 
 extern struct jump_entry __start___jump_table[];
@@ -145,12 +155,12 @@
 extern void static_key_slow_dec(struct static_key *key);
 extern void jump_label_apply_nops(struct module *mod);
 
-#define STATIC_KEY_INIT_TRUE ((struct static_key)		\
+#define STATIC_KEY_INIT_TRUE					\
 	{ .enabled = ATOMIC_INIT(1),				\
-	  .entries = (void *)JUMP_LABEL_TYPE_TRUE_BRANCH })
-#define STATIC_KEY_INIT_FALSE ((struct static_key)		\
+	  .entries = (void *)JUMP_TYPE_TRUE }
+#define STATIC_KEY_INIT_FALSE					\
 	{ .enabled = ATOMIC_INIT(0),				\
-	  .entries = (void *)JUMP_LABEL_TYPE_FALSE_BRANCH })
+	  .entries = (void *)JUMP_TYPE_FALSE }
 
 #else  /* !HAVE_JUMP_LABEL */
 
@@ -198,10 +208,8 @@
 	return 0;
 }
 
-#define STATIC_KEY_INIT_TRUE ((struct static_key) \
-		{ .enabled = ATOMIC_INIT(1) })
-#define STATIC_KEY_INIT_FALSE ((struct static_key) \
-		{ .enabled = ATOMIC_INIT(0) })
+#define STATIC_KEY_INIT_TRUE	{ .enabled = ATOMIC_INIT(1) }
+#define STATIC_KEY_INIT_FALSE	{ .enabled = ATOMIC_INIT(0) }
 
 #endif	/* HAVE_JUMP_LABEL */
 
@@ -213,6 +221,157 @@
 	return static_key_count(key) > 0;
 }
 
+static inline void static_key_enable(struct static_key *key)
+{
+	int count = static_key_count(key);
+
+	WARN_ON_ONCE(count < 0 || count > 1);
+
+	if (!count)
+		static_key_slow_inc(key);
+}
+
+static inline void static_key_disable(struct static_key *key)
+{
+	int count = static_key_count(key);
+
+	WARN_ON_ONCE(count < 0 || count > 1);
+
+	if (count)
+		static_key_slow_dec(key);
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * Two type wrappers around static_key, such that we can use compile time
+ * type differentiation to emit the right code.
+ *
+ * All the below code is macros in order to play type games.
+ */
+
+struct static_key_true {
+	struct static_key key;
+};
+
+struct static_key_false {
+	struct static_key key;
+};
+
+#define STATIC_KEY_TRUE_INIT  (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE,  }
+#define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
+
+#define DEFINE_STATIC_KEY_TRUE(name)	\
+	struct static_key_true name = STATIC_KEY_TRUE_INIT
+
+#define DEFINE_STATIC_KEY_FALSE(name)	\
+	struct static_key_false name = STATIC_KEY_FALSE_INIT
+
+#ifdef HAVE_JUMP_LABEL
+
+/*
+ * Combine the right initial value (type) with the right branch order
+ * to generate the desired result.
+ *
+ *
+ * type\branch|	likely (1)	      |	unlikely (0)
+ * -----------+-----------------------+------------------
+ *            |                       |
+ *  true (1)  |	   ...		      |	   ...
+ *            |    NOP		      |	   JMP L
+ *            |    <br-stmts>	      |	1: ...
+ *            |	L: ...		      |
+ *            |			      |
+ *            |			      |	L: <br-stmts>
+ *            |			      |	   jmp 1b
+ *            |                       |
+ * -----------+-----------------------+------------------
+ *            |                       |
+ *  false (0) |	   ...		      |	   ...
+ *            |    JMP L	      |	   NOP
+ *            |    <br-stmts>	      |	1: ...
+ *            |	L: ...		      |
+ *            |			      |
+ *            |			      |	L: <br-stmts>
+ *            |			      |	   jmp 1b
+ *            |                       |
+ * -----------+-----------------------+------------------
+ *
+ * The initial value is encoded in the LSB of static_key::entries,
+ * type: 0 = false, 1 = true.
+ *
+ * The branch type is encoded in the LSB of jump_entry::key,
+ * branch: 0 = unlikely, 1 = likely.
+ *
+ * This gives the following logic table:
+ *
+ *	enabled	type	branch	  instuction
+ * -----------------------------+-----------
+ *	0	0	0	| NOP
+ *	0	0	1	| JMP
+ *	0	1	0	| NOP
+ *	0	1	1	| JMP
+ *
+ *	1	0	0	| JMP
+ *	1	0	1	| NOP
+ *	1	1	0	| JMP
+ *	1	1	1	| NOP
+ *
+ * Which gives the following functions:
+ *
+ *   dynamic: instruction = enabled ^ branch
+ *   static:  instruction = type ^ branch
+ *
+ * See jump_label_type() / jump_label_init_type().
+ */
+
+extern bool ____wrong_branch_error(void);
+
+#define static_branch_likely(x)							\
+({										\
+	bool branch;								\
+	if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))	\
+		branch = !arch_static_branch(&(x)->key, true);			\
+	else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+		branch = !arch_static_branch_jump(&(x)->key, true);		\
+	else									\
+		branch = ____wrong_branch_error();				\
+	branch;									\
+})
+
+#define static_branch_unlikely(x)						\
+({										\
+	bool branch;								\
+	if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))	\
+		branch = arch_static_branch_jump(&(x)->key, false);		\
+	else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
+		branch = arch_static_branch(&(x)->key, false);			\
+	else									\
+		branch = ____wrong_branch_error();				\
+	branch;									\
+})
+
+#else /* !HAVE_JUMP_LABEL */
+
+#define static_branch_likely(x)		likely(static_key_enabled(&(x)->key))
+#define static_branch_unlikely(x)	unlikely(static_key_enabled(&(x)->key))
+
+#endif /* HAVE_JUMP_LABEL */
+
+/*
+ * Advanced usage; refcount, branch is enabled when: count != 0
+ */
+
+#define static_branch_inc(x)		static_key_slow_inc(&(x)->key)
+#define static_branch_dec(x)		static_key_slow_dec(&(x)->key)
+
+/*
+ * Normal usage; boolean enable/disable.
+ */
+
+#define static_branch_enable(x)		static_key_enable(&(x)->key)
+#define static_branch_disable(x)	static_key_disable(&(x)->key)
+
 #endif	/* _LINUX_JUMP_LABEL_H */
 
 #endif /* __ASSEMBLY__ */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5486d77..4b9f85c 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -10,11 +10,19 @@
 #ifdef CONFIG_KASAN
 
 #define KASAN_SHADOW_SCALE_SHIFT 3
-#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
 
 #include <asm/kasan.h>
+#include <asm/pgtable.h>
 #include <linux/sched.h>
 
+extern unsigned char kasan_zero_page[PAGE_SIZE];
+extern pte_t kasan_zero_pte[PTRS_PER_PTE];
+extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
+extern pud_t kasan_zero_pud[PTRS_PER_PUD];
+
+void kasan_populate_zero_shadow(const void *shadow_start,
+				const void *shadow_end);
+
 static inline void *kasan_mem_to_shadow(const void *addr)
 {
 	return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index e804306..b63218f 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -323,6 +323,7 @@
 struct task_struct;
 static inline void crash_kexec(struct pt_regs *regs) { }
 static inline int kexec_should_crash(struct task_struct *p) { return 0; }
+#define kexec_in_progress false
 #endif /* CONFIG_KEXEC */
 
 #endif /* !defined(__ASSEBMLY__) */
diff --git a/include/linux/klist.h b/include/linux/klist.h
index 61e5b72..953f283 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -63,6 +63,7 @@
 extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
 				 struct klist_node *n);
 extern void klist_iter_exit(struct klist_iter *i);
+extern struct klist_node *klist_prev(struct klist_iter *i);
 extern struct klist_node *klist_next(struct klist_iter *i);
 
 #endif
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 1ab5475..8f68490 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -267,6 +267,8 @@
 extern void kprobes_inc_nmissed_count(struct kprobe *p);
 extern bool arch_within_kprobe_blacklist(unsigned long addr);
 
+extern bool within_kprobe_blacklist(unsigned long addr);
+
 struct kprobe_insn_cache {
 	struct mutex mutex;
 	void *(*alloc)(void);	/* allocate insn page */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 13d5520..869b21d 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -38,6 +38,7 @@
 })
 
 void kthread_bind(struct task_struct *k, unsigned int cpu);
+void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
 int kthread_stop(struct task_struct *k);
 bool kthread_should_stop(void);
 bool kthread_should_park(void);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 05e99b8..81089cf 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -139,6 +139,7 @@
 #define KVM_REQ_DISABLE_IBS       24
 #define KVM_REQ_APIC_PAGE_RELOAD  25
 #define KVM_REQ_SMI               26
+#define KVM_REQ_HV_CRASH          27
 
 #define KVM_USERSPACE_IRQ_SOURCE_ID		0
 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID	1
@@ -363,9 +364,6 @@
 	struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
 	struct srcu_struct srcu;
 	struct srcu_struct irq_srcu;
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-	u32 bsp_vcpu_id;
-#endif
 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
 	atomic_t online_vcpus;
 	int last_boosted_vcpu;
@@ -424,8 +422,15 @@
 #define vcpu_unimpl(vcpu, fmt, ...)					\
 	kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
 
+#define vcpu_debug(vcpu, fmt, ...)					\
+	kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
+
 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
 {
+	/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
+	 * the caller has read kvm->online_vcpus before (as is the case
+	 * for kvm_for_each_vcpu, for example).
+	 */
 	smp_rmb();
 	return kvm->vcpus[i];
 }
@@ -1055,22 +1060,9 @@
 #endif /* CONFIG_HAVE_KVM_EVENTFD */
 
 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
-static inline bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
-{
-	return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
-}
-
-static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
-{
-	return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
-}
-
 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
-
 #else
-
 static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
-
 #endif
 
 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
diff --git a/include/linux/llist.h b/include/linux/llist.h
index fbf10a0..fd4ca0b 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -55,8 +55,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#include <linux/atomic.h>
 #include <linux/kernel.h>
-#include <asm/cmpxchg.h>
 
 struct llist_head {
 	struct llist_node *first;
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index a16b1f9..0962b2c 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -6,6 +6,7 @@
 #include <linux/mod_devicetable.h>
 
 struct mei_cl_device;
+struct mei_device;
 
 typedef void (*mei_cl_event_cb_t)(struct mei_cl_device *device,
 			       u32 events, void *context);
@@ -17,6 +18,8 @@
  * Drivers for MEI devices will get an mei_cl_device pointer
  * when being probed and shall use it for doing ME bus I/O.
  *
+ * @bus_list: device on the bus list
+ * @bus: parent mei device
  * @dev: linux driver model device pointer
  * @me_cl: me client
  * @cl: mei client
@@ -25,10 +28,16 @@
  * @event_cb: Drivers register this callback to get asynchronous ME
  *	events (e.g. Rx buffer pending) notifications.
  * @event_context: event callback run context
+ * @events_mask: Events bit mask requested by driver.
  * @events: Events bitmask sent to the driver.
+ *
+ * @do_match: wheather device can be matched with a driver
+ * @is_added: device is already scanned
  * @priv_data: client private data
  */
 struct mei_cl_device {
+	struct list_head bus_list;
+	struct mei_device *bus;
 	struct device dev;
 
 	struct mei_me_client *me_cl;
@@ -38,8 +47,12 @@
 	struct work_struct event_work;
 	mei_cl_event_cb_t event_cb;
 	void *event_context;
+	unsigned long events_mask;
 	unsigned long events;
 
+	unsigned int do_match:1;
+	unsigned int is_added:1;
+
 	void *priv_data;
 };
 
@@ -65,10 +78,12 @@
 ssize_t  mei_cl_recv(struct mei_cl_device *device, u8 *buf, size_t length);
 
 int mei_cl_register_event_cb(struct mei_cl_device *device,
+			  unsigned long event_mask,
 			  mei_cl_event_cb_t read_cb, void *context);
 
 #define MEI_CL_EVENT_RX 0
 #define MEI_CL_EVENT_TX 1
+#define MEI_CL_EVENT_NOTIF 2
 
 void *mei_cl_get_drvdata(const struct mei_cl_device *device);
 void mei_cl_set_drvdata(struct mei_cl_device *device, void *data);
diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h
index 97cb283..8fcad63 100644
--- a/include/linux/mfd/88pm80x.h
+++ b/include/linux/mfd/88pm80x.h
@@ -60,60 +60,60 @@
 /* page 0 basic: slave adder 0x60 */
 
 #define PM800_STATUS_1			(0x01)
-#define PM800_ONKEY_STS1		(1 << 0)
-#define PM800_EXTON_STS1		(1 << 1)
-#define PM800_CHG_STS1			(1 << 2)
-#define PM800_BAT_STS1			(1 << 3)
-#define PM800_VBUS_STS1			(1 << 4)
-#define PM800_LDO_PGOOD_STS1	(1 << 5)
-#define PM800_BUCK_PGOOD_STS1	(1 << 6)
+#define PM800_ONKEY_STS1		BIT(0)
+#define PM800_EXTON_STS1		BIT(1)
+#define PM800_CHG_STS1			BIT(2)
+#define PM800_BAT_STS1			BIT(3)
+#define PM800_VBUS_STS1			BIT(4)
+#define PM800_LDO_PGOOD_STS1		BIT(5)
+#define PM800_BUCK_PGOOD_STS1		BIT(6)
 
 #define PM800_STATUS_2			(0x02)
-#define PM800_RTC_ALARM_STS2	(1 << 0)
+#define PM800_RTC_ALARM_STS2		BIT(0)
 
 /* Wakeup Registers */
-#define PM800_WAKEUP1		(0x0D)
+#define PM800_WAKEUP1			(0x0D)
 
-#define PM800_WAKEUP2		(0x0E)
-#define PM800_WAKEUP2_INV_INT		(1 << 0)
-#define PM800_WAKEUP2_INT_CLEAR		(1 << 1)
-#define PM800_WAKEUP2_INT_MASK		(1 << 2)
+#define PM800_WAKEUP2			(0x0E)
+#define PM800_WAKEUP2_INV_INT		BIT(0)
+#define PM800_WAKEUP2_INT_CLEAR		BIT(1)
+#define PM800_WAKEUP2_INT_MASK		BIT(2)
 
-#define PM800_POWER_UP_LOG	(0x10)
+#define PM800_POWER_UP_LOG		(0x10)
 
 /* Referance and low power registers */
 #define PM800_LOW_POWER1		(0x20)
 #define PM800_LOW_POWER2		(0x21)
-#define PM800_LOW_POWER_CONFIG3	(0x22)
-#define PM800_LOW_POWER_CONFIG4	(0x23)
+#define PM800_LOW_POWER_CONFIG3		(0x22)
+#define PM800_LOW_POWER_CONFIG4		(0x23)
 
 /* GPIO register */
 #define PM800_GPIO_0_1_CNTRL		(0x30)
-#define PM800_GPIO0_VAL				(1 << 0)
+#define PM800_GPIO0_VAL			BIT(0)
 #define PM800_GPIO0_GPIO_MODE(x)	(x << 1)
-#define PM800_GPIO1_VAL				(1 << 4)
+#define PM800_GPIO1_VAL			BIT(4)
 #define PM800_GPIO1_GPIO_MODE(x)	(x << 5)
 
 #define PM800_GPIO_2_3_CNTRL		(0x31)
-#define PM800_GPIO2_VAL				(1 << 0)
+#define PM800_GPIO2_VAL			BIT(0)
 #define PM800_GPIO2_GPIO_MODE(x)	(x << 1)
-#define PM800_GPIO3_VAL				(1 << 4)
+#define PM800_GPIO3_VAL			BIT(4)
 #define PM800_GPIO3_GPIO_MODE(x)	(x << 5)
 #define PM800_GPIO3_MODE_MASK		0x1F
 #define PM800_GPIO3_HEADSET_MODE	PM800_GPIO3_GPIO_MODE(6)
 
-#define PM800_GPIO_4_CNTRL			(0x32)
-#define PM800_GPIO4_VAL				(1 << 0)
+#define PM800_GPIO_4_CNTRL		(0x32)
+#define PM800_GPIO4_VAL			BIT(0)
 #define PM800_GPIO4_GPIO_MODE(x)	(x << 1)
 
 #define PM800_HEADSET_CNTRL		(0x38)
-#define PM800_HEADSET_DET_EN		(1 << 7)
-#define PM800_HSDET_SLP			(1 << 1)
+#define PM800_HEADSET_DET_EN		BIT(7)
+#define PM800_HSDET_SLP			BIT(1)
 /* PWM register */
-#define PM800_PWM1		(0x40)
-#define PM800_PWM2		(0x41)
-#define PM800_PWM3		(0x42)
-#define PM800_PWM4		(0x43)
+#define PM800_PWM1			(0x40)
+#define PM800_PWM2			(0x41)
+#define PM800_PWM3			(0x42)
+#define PM800_PWM4			(0x43)
 
 /* RTC Registers */
 #define PM800_RTC_CONTROL		(0xD0)
@@ -123,55 +123,55 @@
 #define PM800_RTC_MISC4			(0xE4)
 #define PM800_RTC_MISC5			(0xE7)
 /* bit definitions of RTC Register 1 (0xD0) */
-#define PM800_ALARM1_EN			(1 << 0)
-#define PM800_ALARM_WAKEUP		(1 << 4)
-#define PM800_ALARM			(1 << 5)
-#define PM800_RTC1_USE_XO		(1 << 7)
+#define PM800_ALARM1_EN			BIT(0)
+#define PM800_ALARM_WAKEUP		BIT(4)
+#define PM800_ALARM			BIT(5)
+#define PM800_RTC1_USE_XO		BIT(7)
 
 /* Regulator Control Registers: BUCK1,BUCK5,LDO1 have DVC */
 
 /* buck registers */
-#define PM800_SLEEP_BUCK1	(0x30)
+#define PM800_SLEEP_BUCK1		(0x30)
 
 /* BUCK Sleep Mode Register 1: BUCK[1..4] */
-#define PM800_BUCK_SLP1		(0x5A)
-#define PM800_BUCK1_SLP1_SHIFT	0
-#define PM800_BUCK1_SLP1_MASK	(0x3 << PM800_BUCK1_SLP1_SHIFT)
+#define PM800_BUCK_SLP1			(0x5A)
+#define PM800_BUCK1_SLP1_SHIFT		0
+#define PM800_BUCK1_SLP1_MASK		(0x3 << PM800_BUCK1_SLP1_SHIFT)
 
 /* page 2 GPADC: slave adder 0x02 */
 #define PM800_GPADC_MEAS_EN1		(0x01)
-#define PM800_MEAS_EN1_VBAT         (1 << 2)
+#define PM800_MEAS_EN1_VBAT		BIT(2)
 #define PM800_GPADC_MEAS_EN2		(0x02)
-#define PM800_MEAS_EN2_RFTMP        (1 << 0)
-#define PM800_MEAS_GP0_EN			(1 << 2)
-#define PM800_MEAS_GP1_EN			(1 << 3)
-#define PM800_MEAS_GP2_EN			(1 << 4)
-#define PM800_MEAS_GP3_EN			(1 << 5)
-#define PM800_MEAS_GP4_EN			(1 << 6)
+#define PM800_MEAS_EN2_RFTMP		BIT(0)
+#define PM800_MEAS_GP0_EN		BIT(2)
+#define PM800_MEAS_GP1_EN		BIT(3)
+#define PM800_MEAS_GP2_EN		BIT(4)
+#define PM800_MEAS_GP3_EN		BIT(5)
+#define PM800_MEAS_GP4_EN		BIT(6)
 
 #define PM800_GPADC_MISC_CONFIG1	(0x05)
 #define PM800_GPADC_MISC_CONFIG2	(0x06)
-#define PM800_GPADC_MISC_GPFSM_EN	(1 << 0)
+#define PM800_GPADC_MISC_GPFSM_EN	BIT(0)
 #define PM800_GPADC_SLOW_MODE(x)	(x << 3)
 
-#define PM800_GPADC_MISC_CONFIG3		(0x09)
-#define PM800_GPADC_MISC_CONFIG4		(0x0A)
+#define PM800_GPADC_MISC_CONFIG3	(0x09)
+#define PM800_GPADC_MISC_CONFIG4	(0x0A)
 
-#define PM800_GPADC_PREBIAS1			(0x0F)
+#define PM800_GPADC_PREBIAS1		(0x0F)
 #define PM800_GPADC0_GP_PREBIAS_TIME(x)	(x << 0)
-#define PM800_GPADC_PREBIAS2			(0x10)
+#define PM800_GPADC_PREBIAS2		(0x10)
 
-#define PM800_GP_BIAS_ENA1				(0x14)
-#define PM800_GPADC_GP_BIAS_EN0			(1 << 0)
-#define PM800_GPADC_GP_BIAS_EN1			(1 << 1)
-#define PM800_GPADC_GP_BIAS_EN2			(1 << 2)
-#define PM800_GPADC_GP_BIAS_EN3			(1 << 3)
+#define PM800_GP_BIAS_ENA1		(0x14)
+#define PM800_GPADC_GP_BIAS_EN0		BIT(0)
+#define PM800_GPADC_GP_BIAS_EN1		BIT(1)
+#define PM800_GPADC_GP_BIAS_EN2		BIT(2)
+#define PM800_GPADC_GP_BIAS_EN3		BIT(3)
 
 #define PM800_GP_BIAS_OUT1		(0x15)
-#define PM800_BIAS_OUT_GP0		(1 << 0)
-#define PM800_BIAS_OUT_GP1		(1 << 1)
-#define PM800_BIAS_OUT_GP2		(1 << 2)
-#define PM800_BIAS_OUT_GP3		(1 << 3)
+#define PM800_BIAS_OUT_GP0		BIT(0)
+#define PM800_BIAS_OUT_GP1		BIT(1)
+#define PM800_BIAS_OUT_GP2		BIT(2)
+#define PM800_BIAS_OUT_GP3		BIT(3)
 
 #define PM800_GPADC0_LOW_TH		0x20
 #define PM800_GPADC1_LOW_TH		0x21
@@ -222,37 +222,37 @@
 
 #define PM805_INT_STATUS1		(0x03)
 
-#define PM805_INT1_HP1_SHRT		(1 << 0)
-#define PM805_INT1_HP2_SHRT		(1 << 1)
-#define PM805_INT1_MIC_CONFLICT		(1 << 2)
-#define PM805_INT1_CLIP_FAULT		(1 << 3)
-#define PM805_INT1_LDO_OFF			(1 << 4)
-#define PM805_INT1_SRC_DPLL_LOCK	(1 << 5)
+#define PM805_INT1_HP1_SHRT		BIT(0)
+#define PM805_INT1_HP2_SHRT		BIT(1)
+#define PM805_INT1_MIC_CONFLICT		BIT(2)
+#define PM805_INT1_CLIP_FAULT		BIT(3)
+#define PM805_INT1_LDO_OFF		BIT(4)
+#define PM805_INT1_SRC_DPLL_LOCK	BIT(5)
 
 #define PM805_INT_STATUS2		(0x04)
 
-#define PM805_INT2_MIC_DET			(1 << 0)
-#define PM805_INT2_SHRT_BTN_DET		(1 << 1)
-#define PM805_INT2_VOLM_BTN_DET		(1 << 2)
-#define PM805_INT2_VOLP_BTN_DET		(1 << 3)
-#define PM805_INT2_RAW_PLL_FAULT	(1 << 4)
-#define PM805_INT2_FINE_PLL_FAULT	(1 << 5)
+#define PM805_INT2_MIC_DET		BIT(0)
+#define PM805_INT2_SHRT_BTN_DET		BIT(1)
+#define PM805_INT2_VOLM_BTN_DET		BIT(2)
+#define PM805_INT2_VOLP_BTN_DET		BIT(3)
+#define PM805_INT2_RAW_PLL_FAULT	BIT(4)
+#define PM805_INT2_FINE_PLL_FAULT	BIT(5)
 
 #define PM805_INT_MASK1			(0x05)
 #define PM805_INT_MASK2			(0x06)
-#define PM805_SHRT_BTN_DET		(1 << 1)
+#define PM805_SHRT_BTN_DET		BIT(1)
 
 /* number of status and int reg in a row */
 #define PM805_INT_REG_NUM		(2)
 
 #define PM805_MIC_DET1			(0x07)
-#define PM805_MIC_DET_EN_MIC_DET (1 << 0)
+#define PM805_MIC_DET_EN_MIC_DET	BIT(0)
 #define PM805_MIC_DET2			(0x08)
-#define PM805_MIC_DET_STATUS1	(0x09)
+#define PM805_MIC_DET_STATUS1		(0x09)
 
-#define PM805_MIC_DET_STATUS3	(0x0A)
-#define PM805_AUTO_SEQ_STATUS1	(0x0B)
-#define PM805_AUTO_SEQ_STATUS2	(0x0C)
+#define PM805_MIC_DET_STATUS3		(0x0A)
+#define PM805_AUTO_SEQ_STATUS1		(0x0B)
+#define PM805_AUTO_SEQ_STATUS2		(0x0C)
 
 #define PM805_ADC_SETTING1		(0x10)
 #define PM805_ADC_SETTING2		(0x11)
@@ -261,7 +261,7 @@
 #define PM805_ADC_GAIN2			(0x13)
 #define PM805_DMIC_SETTING		(0x15)
 #define PM805_DWS_SETTING		(0x16)
-#define PM805_MIC_CONFLICT_STS	(0x17)
+#define PM805_MIC_CONFLICT_STS		(0x17)
 
 #define PM805_PDM_SETTING1		(0x20)
 #define PM805_PDM_SETTING2		(0x21)
@@ -270,11 +270,11 @@
 #define PM805_PDM_CONTROL2		(0x24)
 #define PM805_PDM_CONTROL3		(0x25)
 
-#define PM805_HEADPHONE_SETTING			(0x26)
-#define PM805_HEADPHONE_GAIN_A2A		(0x27)
-#define PM805_HEADPHONE_SHORT_STATE		(0x28)
-#define PM805_EARPHONE_SETTING			(0x29)
-#define PM805_AUTO_SEQ_SETTING			(0x2A)
+#define PM805_HEADPHONE_SETTING		(0x26)
+#define PM805_HEADPHONE_GAIN_A2A	(0x27)
+#define PM805_HEADPHONE_SHORT_STATE	(0x28)
+#define PM805_EARPHONE_SETTING		(0x29)
+#define PM805_AUTO_SEQ_SETTING		(0x2A)
 
 struct pm80x_rtc_pdata {
 	int		vrtc;
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index 2f434f4..79e607e 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -25,6 +25,8 @@
 	WM5110 = 2,
 	WM8997 = 3,
 	WM8280 = 4,
+	WM8998 = 5,
+	WM1814 = 6,
 };
 
 #define ARIZONA_IRQ_GP1                    0
@@ -165,6 +167,7 @@
 
 int wm5110_patch(struct arizona *arizona);
 int wm8997_patch(struct arizona *arizona);
+int wm8998_patch(struct arizona *arizona);
 
 extern int arizona_of_get_named_gpio(struct arizona *arizona, const char *prop,
 				     bool mandatory);
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 43db4fa..1dc3858 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -101,7 +101,7 @@
 	 * useful for systems where and I2S bus with multiple data
 	 * lines is mastered.
 	 */
-	int max_channels_clocked[ARIZONA_MAX_AIF];
+	unsigned int max_channels_clocked[ARIZONA_MAX_AIF];
 
 	/** GPIO5 is used for jack detection */
 	bool jd_gpio5;
@@ -125,22 +125,22 @@
 	unsigned int hpdet_channel;
 
 	/** Extra debounce timeout used during initial mic detection (ms) */
-	int micd_detect_debounce;
+	unsigned int micd_detect_debounce;
 
 	/** GPIO for mic detection polarity */
 	int micd_pol_gpio;
 
 	/** Mic detect ramp rate */
-	int micd_bias_start_time;
+	unsigned int micd_bias_start_time;
 
 	/** Mic detect sample rate */
-	int micd_rate;
+	unsigned int micd_rate;
 
 	/** Mic detect debounce level */
-	int micd_dbtime;
+	unsigned int micd_dbtime;
 
 	/** Mic detect timeout (ms) */
-	int micd_timeout;
+	unsigned int micd_timeout;
 
 	/** Force MICBIAS on for mic detect */
 	bool micd_force_micbias;
@@ -162,6 +162,8 @@
 	/**
 	 * Mode of input structures
 	 * One of the ARIZONA_INMODE_xxx values
+	 * wm5102/wm5110/wm8280/wm8997: [0]=IN1 [1]=IN2 [2]=IN3 [3]=IN4
+	 * wm8998: [0]=IN1A [1]=IN2A [2]=IN1B [3]=IN2B
 	 */
 	int inmode[ARIZONA_MAX_INPUT];
 
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 3499d36..fdd70b3 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -39,6 +39,7 @@
 #define ARIZONA_PWM_DRIVE_3                      0x32
 #define ARIZONA_WAKE_CONTROL                     0x40
 #define ARIZONA_SEQUENCE_CONTROL                 0x41
+#define ARIZONA_SPARE_TRIGGERS                   0x42
 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1    0x61
 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2    0x62
 #define ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3    0x63
@@ -139,6 +140,7 @@
 #define ARIZONA_MIC_DETECT_LEVEL_2		 0x2A7
 #define ARIZONA_MIC_DETECT_LEVEL_3		 0x2A8
 #define ARIZONA_MIC_DETECT_LEVEL_4		 0x2A9
+#define ARIZONA_MIC_DETECT_4                     0x2AB
 #define ARIZONA_MIC_NOISE_MIX_CONTROL_1          0x2C3
 #define ARIZONA_ISOLATION_CONTROL                0x2CB
 #define ARIZONA_JACK_DETECT_ANALOGUE             0x2D3
@@ -225,14 +227,18 @@
 #define ARIZONA_DAC_VOLUME_LIMIT_6R              0x43E
 #define ARIZONA_NOISE_GATE_SELECT_6R             0x43F
 #define ARIZONA_DRE_ENABLE                       0x440
+#define ARIZONA_DRE_CONTROL_1                    0x441
 #define ARIZONA_DRE_CONTROL_2                    0x442
 #define ARIZONA_DRE_CONTROL_3                    0x443
+#define ARIZONA_EDRE_ENABLE                      0x448
 #define ARIZONA_DAC_AEC_CONTROL_1                0x450
+#define ARIZONA_DAC_AEC_CONTROL_2                0x451
 #define ARIZONA_NOISE_GATE_CONTROL               0x458
 #define ARIZONA_PDM_SPK1_CTRL_1                  0x490
 #define ARIZONA_PDM_SPK1_CTRL_2                  0x491
 #define ARIZONA_PDM_SPK2_CTRL_1                  0x492
 #define ARIZONA_PDM_SPK2_CTRL_2                  0x493
+#define ARIZONA_HP_TEST_CTRL_13                  0x49A
 #define ARIZONA_HP1_SHORT_CIRCUIT_CTRL           0x4A0
 #define ARIZONA_HP2_SHORT_CIRCUIT_CTRL           0x4A1
 #define ARIZONA_HP3_SHORT_CIRCUIT_CTRL           0x4A2
@@ -310,6 +316,10 @@
 #define ARIZONA_AIF3_TX_ENABLES                  0x599
 #define ARIZONA_AIF3_RX_ENABLES                  0x59A
 #define ARIZONA_AIF3_FORCE_WRITE                 0x59B
+#define ARIZONA_SPD1_TX_CONTROL                  0x5C2
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_1         0x5C3
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_2         0x5C4
+#define ARIZONA_SPD1_TX_CHANNEL_STATUS_3         0x5C5
 #define ARIZONA_SLIMBUS_FRAMER_REF_GEAR          0x5E3
 #define ARIZONA_SLIMBUS_RATES_1                  0x5E5
 #define ARIZONA_SLIMBUS_RATES_2                  0x5E6
@@ -643,6 +653,10 @@
 #define ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME        0x7FD
 #define ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE        0x7FE
 #define ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME        0x7FF
+#define ARIZONA_SPDIFTX1MIX_INPUT_1_SOURCE       0x800
+#define ARIZONA_SPDIFTX1MIX_INPUT_1_VOLUME       0x801
+#define ARIZONA_SPDIFTX2MIX_INPUT_1_SOURCE       0x808
+#define ARIZONA_SPDIFTX2MIX_INPUT_1_VOLUME       0x809
 #define ARIZONA_EQ1MIX_INPUT_1_SOURCE            0x880
 #define ARIZONA_EQ1MIX_INPUT_1_VOLUME            0x881
 #define ARIZONA_EQ1MIX_INPUT_2_SOURCE            0x882
@@ -868,6 +882,7 @@
 #define ARIZONA_GPIO5_CTRL                       0xC04
 #define ARIZONA_IRQ_CTRL_1                       0xC0F
 #define ARIZONA_GPIO_DEBOUNCE_CONFIG             0xC10
+#define ARIZONA_GP_SWITCH_1                      0xC18
 #define ARIZONA_MISC_PAD_CTRL_1                  0xC20
 #define ARIZONA_MISC_PAD_CTRL_2                  0xC21
 #define ARIZONA_MISC_PAD_CTRL_3                  0xC22
@@ -1169,6 +1184,13 @@
 #define ARIZONA_DSP4_SCRATCH_1                   0x1441
 #define ARIZONA_DSP4_SCRATCH_2                   0x1442
 #define ARIZONA_DSP4_SCRATCH_3                   0x1443
+#define ARIZONA_FRF_COEFF_1                      0x1700
+#define ARIZONA_FRF_COEFF_2                      0x1701
+#define ARIZONA_FRF_COEFF_3                      0x1702
+#define ARIZONA_FRF_COEFF_4                      0x1703
+#define ARIZONA_V2_DAC_COMP_1                    0x1704
+#define ARIZONA_V2_DAC_COMP_2                    0x1705
+
 
 /*
  * Field Definitions.
@@ -1431,6 +1453,42 @@
 #define ARIZONA_WSEQ_ENA_JD2_RISE_WIDTH               1  /* WSEQ_ENA_JD2_RISE */
 
 /*
+ * R66 (0x42) - Spare Triggers
+ */
+#define ARIZONA_WS_TRG8                          0x0080  /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_MASK                     0x0080  /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_SHIFT                         7  /* WS_TRG8 */
+#define ARIZONA_WS_TRG8_WIDTH                         1  /* WS_TRG8 */
+#define ARIZONA_WS_TRG7                          0x0040  /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_MASK                     0x0040  /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_SHIFT                         6  /* WS_TRG7 */
+#define ARIZONA_WS_TRG7_WIDTH                         1  /* WS_TRG7 */
+#define ARIZONA_WS_TRG6                          0x0020  /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_MASK                     0x0020  /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_SHIFT                         5  /* WS_TRG6 */
+#define ARIZONA_WS_TRG6_WIDTH                         1  /* WS_TRG6 */
+#define ARIZONA_WS_TRG5                          0x0010  /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_MASK                     0x0010  /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_SHIFT                         4  /* WS_TRG5 */
+#define ARIZONA_WS_TRG5_WIDTH                         1  /* WS_TRG5 */
+#define ARIZONA_WS_TRG4                          0x0008  /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_MASK                     0x0008  /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_SHIFT                         3  /* WS_TRG4 */
+#define ARIZONA_WS_TRG4_WIDTH                         1  /* WS_TRG4 */
+#define ARIZONA_WS_TRG3                          0x0004  /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_MASK                     0x0004  /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_SHIFT                         2  /* WS_TRG3 */
+#define ARIZONA_WS_TRG3_WIDTH                         1  /* WS_TRG3 */
+#define ARIZONA_WS_TRG2                          0x0002  /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_MASK                     0x0002  /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_SHIFT                         1  /* WS_TRG2 */
+#define ARIZONA_WS_TRG2_WIDTH                         1  /* WS_TRG2 */
+#define ARIZONA_WS_TRG1                          0x0001  /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_MASK                     0x0001  /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_SHIFT                         0  /* WS_TRG1 */
+#define ARIZONA_WS_TRG1_WIDTH                         1  /* WS_TRG1 */
+
+/*
  * R97 (0x61) - Sample Rate Sequence Select 1
  */
 #define ARIZONA_WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR_MASK 0x01FF  /* WSEQ_SAMPLE_RATE_DETECT_A_SEQ_ADDR - [8:0] */
@@ -2325,6 +2383,9 @@
 #define ARIZONA_HP_IDAC_STEER_MASK               0x0004  /* HP_IDAC_STEER */
 #define ARIZONA_HP_IDAC_STEER_SHIFT                   2  /* HP_IDAC_STEER */
 #define ARIZONA_HP_IDAC_STEER_WIDTH                   1  /* HP_IDAC_STEER */
+#define WM8998_HP_RATE_MASK                      0x0006  /* HP_RATE - [2:1] */
+#define WM8998_HP_RATE_SHIFT                          1  /* HP_RATE - [2:1] */
+#define WM8998_HP_RATE_WIDTH                          2  /* HP_RATE - [2:1] */
 #define ARIZONA_HP_RATE                          0x0002  /* HP_RATE */
 #define ARIZONA_HP_RATE_MASK                     0x0002  /* HP_RATE */
 #define ARIZONA_HP_RATE_SHIFT                         1  /* HP_RATE */
@@ -2413,6 +2474,16 @@
 #define ARIZONA_MICD_STS_WIDTH                        1  /* MICD_STS */
 
 /*
+ * R683 (0x2AB) - Mic Detect 4
+ */
+#define ARIZONA_MICDET_ADCVAL_DIFF_MASK          0xFF00  /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_DIFF_SHIFT              8  /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_DIFF_WIDTH              8  /* MICDET_ADCVAL_DIFF - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_MASK               0x007F  /* MICDET_ADCVAL - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_SHIFT                   0  /* MICDET_ADCVAL - [15:8] */
+#define ARIZONA_MICDET_ADCVAL_WIDTH                   7  /* MICDET_ADCVAL - [15:8] */
+
+/*
  * R707 (0x2C3) - Mic noise mix control 1
  */
 #define ARIZONA_MICMUTE_RATE_MASK                0x7800  /* MICMUTE_RATE - [14:11] */
@@ -2528,6 +2599,12 @@
 /*
  * R785 (0x311) - ADC Digital Volume 1L
  */
+#define ARIZONA_IN1L_SRC_MASK                    0x4000  /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_SHIFT                       14  /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_WIDTH                        1  /* IN1L_SRC - [14] */
+#define ARIZONA_IN1L_SRC_SE_MASK                 0x2000  /* IN1L_SRC - [13] */
+#define ARIZONA_IN1L_SRC_SE_SHIFT                    13  /* IN1L_SRC - [13] */
+#define ARIZONA_IN1L_SRC_SE_WIDTH                     1  /* IN1L_SRC - [13] */
 #define ARIZONA_IN_VU                            0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_MASK                       0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_SHIFT                           9  /* IN_VU */
@@ -2560,6 +2637,12 @@
 /*
  * R789 (0x315) - ADC Digital Volume 1R
  */
+#define ARIZONA_IN1R_SRC_MASK                    0x4000  /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_SHIFT                       14  /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_WIDTH                        1  /* IN1R_SRC - [14] */
+#define ARIZONA_IN1R_SRC_SE_MASK                 0x2000  /* IN1R_SRC - [13] */
+#define ARIZONA_IN1R_SRC_SE_SHIFT                    13  /* IN1R_SRC - [13] */
+#define ARIZONA_IN1R_SRC_SE_WIDTH                     1  /* IN1R_SRC - [13] */
 #define ARIZONA_IN_VU                            0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_MASK                       0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_SHIFT                           9  /* IN_VU */
@@ -2604,6 +2687,12 @@
 /*
  * R793 (0x319) - ADC Digital Volume 2L
  */
+#define ARIZONA_IN2L_SRC_MASK                    0x4000  /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_SHIFT                       14  /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_WIDTH                        1  /* IN2L_SRC - [14] */
+#define ARIZONA_IN2L_SRC_SE_MASK                 0x2000  /* IN2L_SRC - [13] */
+#define ARIZONA_IN2L_SRC_SE_SHIFT                    13  /* IN2L_SRC - [13] */
+#define ARIZONA_IN2L_SRC_SE_WIDTH                     1  /* IN2L_SRC - [13] */
 #define ARIZONA_IN_VU                            0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_MASK                       0x0200  /* IN_VU */
 #define ARIZONA_IN_VU_SHIFT                           9  /* IN_VU */
@@ -3412,11 +3501,45 @@
 #define ARIZONA_DRE1L_ENA_WIDTH                       1  /* DRE1L_ENA */
 
 /*
+ * R1088 (0x440) - DRE Enable (WM8998)
+ */
+#define WM8998_DRE3L_ENA                          0x0020  /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_MASK                     0x0020  /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_SHIFT                         5  /* DRE3L_ENA */
+#define WM8998_DRE3L_ENA_WIDTH                         1  /* DRE3L_ENA */
+#define WM8998_DRE2L_ENA                          0x0008  /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_MASK                     0x0008  /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_SHIFT                         3  /* DRE2L_ENA */
+#define WM8998_DRE2L_ENA_WIDTH                         1  /* DRE2L_ENA */
+#define WM8998_DRE2R_ENA                          0x0004  /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_MASK                     0x0004  /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_SHIFT                         2  /* DRE2R_ENA */
+#define WM8998_DRE2R_ENA_WIDTH                         1  /* DRE2R_ENA */
+#define WM8998_DRE1L_ENA                          0x0002  /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_MASK                     0x0002  /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_SHIFT                         1  /* DRE1L_ENA */
+#define WM8998_DRE1L_ENA_WIDTH                         1  /* DRE1L_ENA */
+#define WM8998_DRE1R_ENA                          0x0001  /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_MASK                     0x0001  /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_SHIFT                         0  /* DRE1R_ENA */
+#define WM8998_DRE1R_ENA_WIDTH                         1  /* DRE1R_ENA */
+
+/*
+ * R1089 (0x441) - DRE Control 1
+ */
+#define ARIZONA_DRE_ENV_TC_FAST_MASK             0x0F00  /* DRE_ENV_TC_FAST - [11:8] */
+#define ARIZONA_DRE_ENV_TC_FAST_SHIFT                 8  /* DRE_ENV_TC_FAST - [11:8] */
+#define ARIZONA_DRE_ENV_TC_FAST_WIDTH                 4  /* DRE_ENV_TC_FAST - [11:8] */
+
+/*
  * R1090 (0x442) - DRE Control 2
  */
 #define ARIZONA_DRE_T_LOW_MASK                   0x3F00  /* DRE_T_LOW - [13:8] */
 #define ARIZONA_DRE_T_LOW_SHIFT                       8  /* DRE_T_LOW - [13:8] */
 #define ARIZONA_DRE_T_LOW_WIDTH                       6  /* DRE_T_LOW - [13:8] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_MASK          0x000F  /* DRE_ALOG_VOL_DELAY - [3:0] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_SHIFT              0  /* DRE_ALOG_VOL_DELAY - [3:0] */
+#define ARIZONA_DRE_ALOG_VOL_DELAY_WIDTH              4  /* DRE_ALOG_VOL_DELAY - [3:0] */
 
 /*
  * R1091 (0x443) - DRE Control 3
@@ -3428,6 +3551,49 @@
 #define ARIZONA_DRE_LOW_LEVEL_ABS_SHIFT               0  /* LOW_LEVEL_ABS - [3:0] */
 #define ARIZONA_DRE_LOW_LEVEL_ABS_WIDTH               4  /* LOW_LEVEL_ABS - [3:0] */
 
+/* R486 (0x448) - EDRE_Enable
+ */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA              0x0200  /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_MASK         0x0200  /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_SHIFT             9  /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR2_ENA_WIDTH             1  /* EDRE_OUT4L_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA              0x0100  /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_MASK         0x0100  /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_SHIFT             8  /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4R_THR2_ENA_WIDTH             1  /* EDRE_OUT4R_THR2_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA              0x0080  /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_MASK         0x0080  /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_SHIFT             7  /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4L_THR1_ENA_WIDTH             1  /* EDRE_OUT4L_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA              0x0040  /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_MASK         0x0040  /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_SHIFT             6  /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT4R_THR1_ENA_WIDTH             1  /* EDRE_OUT4R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA              0x0020  /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_MASK         0x0020  /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_SHIFT             5  /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3L_THR1_ENA_WIDTH             1  /* EDRE_OUT3L_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA              0x0010  /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_MASK         0x0010  /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_SHIFT             4  /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT3R_THR1_ENA_WIDTH             1  /* EDRE_OUT3R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA              0x0008  /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_MASK         0x0008  /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_SHIFT             3  /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2L_THR1_ENA_WIDTH             1  /* EDRE_OUT2L_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA              0x0004  /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_MASK         0x0004  /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_SHIFT             2  /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT2R_THR1_ENA_WIDTH             1  /* EDRE_OUT2R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA              0x0002  /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_MASK         0x0002  /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_SHIFT             1  /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1L_THR1_ENA_WIDTH             1  /* EDRE_OUT1L_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA              0x0001  /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_MASK         0x0001  /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_SHIFT             0  /* EDRE_OUT1R_THR1_ENA */
+#define ARIZONA_EDRE_OUT1R_THR1_ENA_WIDTH             1  /* EDRE_OUT1R_THR1_ENA */
+
 /*
  * R1104 (0x450) - DAC AEC Control 1
  */
@@ -4308,6 +4474,86 @@
 #define ARIZONA_AIF3_FRC_WR_WIDTH                     1  /* AIF3_FRC_WR */
 
 /*
+ * R1474 (0x5C2) - SPD1 TX Control
+ */
+#define ARIZONA_SPD1_VAL2                        0x2000  /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_MASK                   0x2000  /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_SHIFT                      13  /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL2_WIDTH                       1  /* SPD1_VAL2 */
+#define ARIZONA_SPD1_VAL1                        0x1000  /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_MASK                   0x1000  /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_SHIFT                      12  /* SPD1_VAL1 */
+#define ARIZONA_SPD1_VAL1_WIDTH                       1  /* SPD1_VAL1 */
+#define ARIZONA_SPD1_RATE_MASK                   0x00F0  /* SPD1_RATE */
+#define ARIZONA_SPD1_RATE_SHIFT                       4  /* SPD1_RATE */
+#define ARIZONA_SPD1_RATE_WIDTH                       4  /* SPD1_RATE */
+#define ARIZONA_SPD1_ENA                         0x0001  /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_MASK                    0x0001  /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_SHIFT                        0  /* SPD1_ENA */
+#define ARIZONA_SPD1_ENA_WIDTH                        1  /* SPD1_ENA */
+
+/*
+ * R1475 (0x5C3) - SPD1 TX Channel Status 1
+ */
+#define ARIZONA_SPD1_CATCODE_MASK                0xFF00  /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CATCODE_SHIFT                    8  /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CATCODE_WIDTH                    8  /* SPD1_CATCODE */
+#define ARIZONA_SPD1_CHSTMODE_MASK               0x00C0  /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_CHSTMODE_SHIFT                   6  /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_CHSTMODE_WIDTH                   2  /* SPD1_CHSTMODE */
+#define ARIZONA_SPD1_PREEMPH_MASK                0x0038  /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_PREEMPH_SHIFT                    3  /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_PREEMPH_WIDTH                    3  /* SPD1_PREEMPH */
+#define ARIZONA_SPD1_NOCOPY                      0x0004  /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_MASK                 0x0004  /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_SHIFT                     2  /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOCOPY_WIDTH                     1  /* SPD1_NOCOPY */
+#define ARIZONA_SPD1_NOAUDIO                     0x0002  /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_MASK                0x0002  /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_SHIFT                    1  /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_NOAUDIO_WIDTH                    1  /* SPD1_NOAUDIO */
+#define ARIZONA_SPD1_PRO                         0x0001  /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_MASK                    0x0001  /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_SHIFT                        0  /* SPD1_PRO */
+#define ARIZONA_SPD1_PRO_WIDTH                        1  /* SPD1_PRO */
+
+/*
+ * R1475 (0x5C4) - SPD1 TX Channel Status 2
+ */
+#define ARIZONA_SPD1_FREQ_MASK                   0xF000  /* SPD1_FREQ */
+#define ARIZONA_SPD1_FREQ_SHIFT                      12  /* SPD1_FREQ */
+#define ARIZONA_SPD1_FREQ_WIDTH                       4  /* SPD1_FREQ */
+#define ARIZONA_SPD1_CHNUM2_MASK                 0x0F00  /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM2_SHIFT                     8  /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM2_WIDTH                     4  /* SPD1_CHNUM2 */
+#define ARIZONA_SPD1_CHNUM1_MASK                 0x00F0  /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_CHNUM1_SHIFT                     4  /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_CHNUM1_WIDTH                     4  /* SPD1_CHNUM1 */
+#define ARIZONA_SPD1_SRCNUM_MASK                 0x000F  /* SPD1_SRCNUM */
+#define ARIZONA_SPD1_SRCNUM_SHIFT                     0  /* SPD1_SRCNUM */
+#define ARIZONA_SPD1_SRCNUM_WIDTH                     4  /* SPD1_SRCNUM */
+
+/*
+ * R1475 (0x5C5) - SPD1 TX Channel Status 3
+ */
+#define ARIZONA_SPD1_ORGSAMP_MASK                 0x0F00  /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_ORGSAMP_SHIFT                     8  /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_ORGSAMP_WIDTH                     4  /* SPD1_ORGSAMP */
+#define ARIZONA_SPD1_TXWL_MASK                    0x00E0  /* SPD1_TXWL */
+#define ARIZONA_SPD1_TXWL_SHIFT                        5  /* SPD1_TXWL */
+#define ARIZONA_SPD1_TXWL_WIDTH                        3  /* SPD1_TXWL */
+#define ARIZONA_SPD1_MAXWL                        0x0010  /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_MASK                   0x0010  /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_SHIFT                       4  /* SPD1_MAXWL */
+#define ARIZONA_SPD1_MAXWL_WIDTH                       1  /* SPD1_MAXWL */
+#define ARIZONA_SPD1_CS31_30_MASK                 0x000C  /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CS31_30_SHIFT                     2  /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CS31_30_WIDTH                     2  /* SPD1_CS31_30 */
+#define ARIZONA_SPD1_CLKACU_MASK                  0x0003  /* SPD1_CLKACU */
+#define ARIZONA_SPD1_CLKACU_SHIFT                      2  /* SPD1_CLKACU */
+#define ARIZONA_SPD1_CLKACU_WIDTH                      0  /* SPD1_CLKACU */
+
+/*
  * R1507 (0x5E3) - SLIMbus Framer Ref Gear
  */
 #define ARIZONA_SLIMCLK_SRC                      0x0010  /* SLIMCLK_SRC */
@@ -4562,6 +4808,13 @@
 #define ARIZONA_GP_DBTIME_WIDTH                       4  /* GP_DBTIME - [15:12] */
 
 /*
+ * R3096 (0xC18) - GP Switch 1
+ */
+#define ARIZONA_SW1_MODE_MASK                    0x0003  /* SW1_MODE - [1:0] */
+#define ARIZONA_SW1_MODE_SHIFT                        0  /* SW1_MODE - [1:0] */
+#define ARIZONA_SW1_MODE_WIDTH                        2  /* SW1_MODE - [1:0] */
+
+/*
  * R3104 (0xC20) - Misc Pad Ctrl 1
  */
 #define ARIZONA_LDO1ENA_PD                       0x8000  /* LDO1ENA_PD */
@@ -6301,6 +6554,10 @@
 /*
  * R3366 (0xD26) - Interrupt Raw Status 8
  */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS            0x8000  /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_MASK       0x8000  /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_SHIFT          15  /* SPDIF_OVERCLOCKED_STS */
+#define ARIZONA_SPDIF_OVERCLOCKED_STS_WIDTH           1  /* SPDIF_OVERCLOCKED_STS */
 #define ARIZONA_AIF3_UNDERCLOCKED_STS            0x0400  /* AIF3_UNDERCLOCKED_STS */
 #define ARIZONA_AIF3_UNDERCLOCKED_STS_MASK       0x0400  /* AIF3_UNDERCLOCKED_STS */
 #define ARIZONA_AIF3_UNDERCLOCKED_STS_SHIFT          10  /* AIF3_UNDERCLOCKED_STS */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index c2aa853..cc8ad1e 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -12,7 +12,8 @@
 #define __LINUX_MFD_AXP20X_H
 
 enum {
-	AXP202_ID = 0,
+	AXP152_ID = 0,
+	AXP202_ID,
 	AXP209_ID,
 	AXP221_ID,
 	AXP288_ID,
@@ -22,6 +23,24 @@
 #define AXP20X_DATACACHE(m)		(0x04 + (m))
 
 /* Power supply */
+#define AXP152_PWR_OP_MODE		0x01
+#define AXP152_LDO3456_DC1234_CTRL	0x12
+#define AXP152_ALDO_OP_MODE		0x13
+#define AXP152_LDO0_CTRL		0x15
+#define AXP152_DCDC2_V_OUT		0x23
+#define AXP152_DCDC2_V_SCAL		0x25
+#define AXP152_DCDC1_V_OUT		0x26
+#define AXP152_DCDC3_V_OUT		0x27
+#define AXP152_ALDO12_V_OUT		0x28
+#define AXP152_DLDO1_V_OUT		0x29
+#define AXP152_DLDO2_V_OUT		0x2a
+#define AXP152_DCDC4_V_OUT		0x2b
+#define AXP152_V_OFF			0x31
+#define AXP152_OFF_CTRL			0x32
+#define AXP152_PEK_KEY			0x36
+#define AXP152_DCDC_FREQ		0x37
+#define AXP152_DCDC_MODE		0x80
+
 #define AXP20X_PWR_INPUT_STATUS		0x00
 #define AXP20X_PWR_OP_MODE		0x01
 #define AXP20X_USB_OTG_STATUS		0x02
@@ -69,6 +88,13 @@
 #define AXP22X_CHRG_CTRL3		0x35
 
 /* Interrupt */
+#define AXP152_IRQ1_EN			0x40
+#define AXP152_IRQ2_EN			0x41
+#define AXP152_IRQ3_EN			0x42
+#define AXP152_IRQ1_STATE		0x48
+#define AXP152_IRQ2_STATE		0x49
+#define AXP152_IRQ3_STATE		0x4a
+
 #define AXP20X_IRQ1_EN			0x40
 #define AXP20X_IRQ2_EN			0x41
 #define AXP20X_IRQ3_EN			0x42
@@ -127,6 +153,19 @@
 #define AXP22X_PWREN_CTRL2		0x8d
 
 /* GPIO */
+#define AXP152_GPIO0_CTRL		0x90
+#define AXP152_GPIO1_CTRL		0x91
+#define AXP152_GPIO2_CTRL		0x92
+#define AXP152_GPIO3_CTRL		0x93
+#define AXP152_LDOGPIO2_V_OUT		0x96
+#define AXP152_GPIO_INPUT		0x97
+#define AXP152_PWM0_FREQ_X		0x98
+#define AXP152_PWM0_FREQ_Y		0x99
+#define AXP152_PWM0_DUTY_CYCLE		0x9a
+#define AXP152_PWM1_FREQ_X		0x9b
+#define AXP152_PWM1_FREQ_Y		0x9c
+#define AXP152_PWM1_DUTY_CYCLE		0x9d
+
 #define AXP20X_GPIO0_CTRL		0x90
 #define AXP20X_LDO5_V_OUT		0x91
 #define AXP20X_GPIO1_CTRL		0x92
@@ -151,6 +190,12 @@
 #define AXP20X_CC_CTRL			0xb8
 #define AXP20X_FG_RES			0xb9
 
+/* OCV */
+#define AXP20X_RDC_H			0xba
+#define AXP20X_RDC_L			0xbb
+#define AXP20X_OCV(m)			(0xc0 + (m))
+#define AXP20X_OCV_MAX			0xf
+
 /* AXP22X specific registers */
 #define AXP22X_BATLOW_THRES1		0xe6
 
@@ -218,6 +263,26 @@
 
 /* IRQs */
 enum {
+	AXP152_IRQ_LDO0IN_CONNECT = 1,
+	AXP152_IRQ_LDO0IN_REMOVAL,
+	AXP152_IRQ_ALDO0IN_CONNECT,
+	AXP152_IRQ_ALDO0IN_REMOVAL,
+	AXP152_IRQ_DCDC1_V_LOW,
+	AXP152_IRQ_DCDC2_V_LOW,
+	AXP152_IRQ_DCDC3_V_LOW,
+	AXP152_IRQ_DCDC4_V_LOW,
+	AXP152_IRQ_PEK_SHORT,
+	AXP152_IRQ_PEK_LONG,
+	AXP152_IRQ_TIMER,
+	AXP152_IRQ_PEK_RIS_EDGE,
+	AXP152_IRQ_PEK_FAL_EDGE,
+	AXP152_IRQ_GPIO3_INPUT,
+	AXP152_IRQ_GPIO2_INPUT,
+	AXP152_IRQ_GPIO1_INPUT,
+	AXP152_IRQ_GPIO0_INPUT,
+};
+
+enum {
 	AXP20X_IRQ_ACIN_OVER_V = 1,
 	AXP20X_IRQ_ACIN_PLUGIN,
 	AXP20X_IRQ_ACIN_REMOVAL,
diff --git a/include/linux/mfd/da9062/core.h b/include/linux/mfd/da9062/core.h
new file mode 100644
index 0000000..376ba84
--- /dev/null
+++ b/include/linux/mfd/da9062/core.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015  Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_DA9062_CORE_H__
+#define __MFD_DA9062_CORE_H__
+
+#include <linux/interrupt.h>
+#include <linux/mfd/da9062/registers.h>
+
+/* Interrupts */
+enum da9062_irqs {
+	/* IRQ A */
+	DA9062_IRQ_ONKEY,
+	DA9062_IRQ_ALARM,
+	DA9062_IRQ_TICK,
+	DA9062_IRQ_WDG_WARN,
+	DA9062_IRQ_SEQ_RDY,
+	/* IRQ B*/
+	DA9062_IRQ_TEMP,
+	DA9062_IRQ_LDO_LIM,
+	DA9062_IRQ_DVC_RDY,
+	DA9062_IRQ_VDD_WARN,
+	/* IRQ C */
+	DA9062_IRQ_GPI0,
+	DA9062_IRQ_GPI1,
+	DA9062_IRQ_GPI2,
+	DA9062_IRQ_GPI3,
+	DA9062_IRQ_GPI4,
+
+	DA9062_NUM_IRQ,
+};
+
+struct da9062 {
+	struct device *dev;
+	struct regmap *regmap;
+	struct regmap_irq_chip_data *regmap_irq;
+};
+
+#endif /* __MFD_DA9062_CORE_H__ */
diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h
new file mode 100644
index 0000000..97790d1
--- /dev/null
+++ b/include/linux/mfd/da9062/registers.h
@@ -0,0 +1,1108 @@
+/*
+ * registers.h - REGISTERS H for DA9062
+ * Copyright (C) 2015  Dialog Semiconductor Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DA9062_H__
+#define __DA9062_H__
+
+#define DA9062_PMIC_DEVICE_ID		0x62
+#define DA9062_PMIC_VARIANT_MRC_AA	0x01
+
+#define DA9062_I2C_PAGE_SEL_SHIFT	1
+
+/*
+ * Registers
+ */
+
+#define DA9062AA_PAGE_CON		0x000
+#define DA9062AA_STATUS_A		0x001
+#define DA9062AA_STATUS_B		0x002
+#define DA9062AA_STATUS_D		0x004
+#define DA9062AA_FAULT_LOG		0x005
+#define DA9062AA_EVENT_A		0x006
+#define DA9062AA_EVENT_B		0x007
+#define DA9062AA_EVENT_C		0x008
+#define DA9062AA_IRQ_MASK_A		0x00A
+#define DA9062AA_IRQ_MASK_B		0x00B
+#define DA9062AA_IRQ_MASK_C		0x00C
+#define DA9062AA_CONTROL_A		0x00E
+#define DA9062AA_CONTROL_B		0x00F
+#define DA9062AA_CONTROL_C		0x010
+#define DA9062AA_CONTROL_D		0x011
+#define DA9062AA_CONTROL_E		0x012
+#define DA9062AA_CONTROL_F		0x013
+#define DA9062AA_PD_DIS			0x014
+#define DA9062AA_GPIO_0_1		0x015
+#define DA9062AA_GPIO_2_3		0x016
+#define DA9062AA_GPIO_4			0x017
+#define DA9062AA_GPIO_WKUP_MODE		0x01C
+#define DA9062AA_GPIO_MODE0_4		0x01D
+#define DA9062AA_GPIO_OUT0_2		0x01E
+#define DA9062AA_GPIO_OUT3_4		0x01F
+#define DA9062AA_BUCK2_CONT		0x020
+#define DA9062AA_BUCK1_CONT		0x021
+#define DA9062AA_BUCK4_CONT		0x022
+#define DA9062AA_BUCK3_CONT		0x024
+#define DA9062AA_LDO1_CONT		0x026
+#define DA9062AA_LDO2_CONT		0x027
+#define DA9062AA_LDO3_CONT		0x028
+#define DA9062AA_LDO4_CONT		0x029
+#define DA9062AA_DVC_1			0x032
+#define DA9062AA_COUNT_S		0x040
+#define DA9062AA_COUNT_MI		0x041
+#define DA9062AA_COUNT_H		0x042
+#define DA9062AA_COUNT_D		0x043
+#define DA9062AA_COUNT_MO		0x044
+#define DA9062AA_COUNT_Y		0x045
+#define DA9062AA_ALARM_S		0x046
+#define DA9062AA_ALARM_MI		0x047
+#define DA9062AA_ALARM_H		0x048
+#define DA9062AA_ALARM_D		0x049
+#define DA9062AA_ALARM_MO		0x04A
+#define DA9062AA_ALARM_Y		0x04B
+#define DA9062AA_SECOND_A		0x04C
+#define DA9062AA_SECOND_B		0x04D
+#define DA9062AA_SECOND_C		0x04E
+#define DA9062AA_SECOND_D		0x04F
+#define DA9062AA_SEQ			0x081
+#define DA9062AA_SEQ_TIMER		0x082
+#define DA9062AA_ID_2_1			0x083
+#define DA9062AA_ID_4_3			0x084
+#define DA9062AA_ID_12_11		0x088
+#define DA9062AA_ID_14_13		0x089
+#define DA9062AA_ID_16_15		0x08A
+#define DA9062AA_ID_22_21		0x08D
+#define DA9062AA_ID_24_23		0x08E
+#define DA9062AA_ID_26_25		0x08F
+#define DA9062AA_ID_28_27		0x090
+#define DA9062AA_ID_30_29		0x091
+#define DA9062AA_ID_32_31		0x092
+#define DA9062AA_SEQ_A			0x095
+#define DA9062AA_SEQ_B			0x096
+#define DA9062AA_WAIT			0x097
+#define DA9062AA_EN_32K			0x098
+#define DA9062AA_RESET			0x099
+#define DA9062AA_BUCK_ILIM_A		0x09A
+#define DA9062AA_BUCK_ILIM_B		0x09B
+#define DA9062AA_BUCK_ILIM_C		0x09C
+#define DA9062AA_BUCK2_CFG		0x09D
+#define DA9062AA_BUCK1_CFG		0x09E
+#define DA9062AA_BUCK4_CFG		0x09F
+#define DA9062AA_BUCK3_CFG		0x0A0
+#define DA9062AA_VBUCK2_A		0x0A3
+#define DA9062AA_VBUCK1_A		0x0A4
+#define DA9062AA_VBUCK4_A		0x0A5
+#define DA9062AA_VBUCK3_A		0x0A7
+#define DA9062AA_VLDO1_A		0x0A9
+#define DA9062AA_VLDO2_A		0x0AA
+#define DA9062AA_VLDO3_A		0x0AB
+#define DA9062AA_VLDO4_A		0x0AC
+#define DA9062AA_VBUCK2_B		0x0B4
+#define DA9062AA_VBUCK1_B		0x0B5
+#define DA9062AA_VBUCK4_B		0x0B6
+#define DA9062AA_VBUCK3_B		0x0B8
+#define DA9062AA_VLDO1_B		0x0BA
+#define DA9062AA_VLDO2_B		0x0BB
+#define DA9062AA_VLDO3_B		0x0BC
+#define DA9062AA_VLDO4_B		0x0BD
+#define DA9062AA_BBAT_CONT		0x0C5
+#define DA9062AA_INTERFACE		0x105
+#define DA9062AA_CONFIG_A		0x106
+#define DA9062AA_CONFIG_B		0x107
+#define DA9062AA_CONFIG_C		0x108
+#define DA9062AA_CONFIG_D		0x109
+#define DA9062AA_CONFIG_E		0x10A
+#define DA9062AA_CONFIG_G		0x10C
+#define DA9062AA_CONFIG_H		0x10D
+#define DA9062AA_CONFIG_I		0x10E
+#define DA9062AA_CONFIG_J		0x10F
+#define DA9062AA_CONFIG_K		0x110
+#define DA9062AA_CONFIG_M		0x112
+#define DA9062AA_TRIM_CLDR		0x120
+#define DA9062AA_GP_ID_0		0x121
+#define DA9062AA_GP_ID_1		0x122
+#define DA9062AA_GP_ID_2		0x123
+#define DA9062AA_GP_ID_3		0x124
+#define DA9062AA_GP_ID_4		0x125
+#define DA9062AA_GP_ID_5		0x126
+#define DA9062AA_GP_ID_6		0x127
+#define DA9062AA_GP_ID_7		0x128
+#define DA9062AA_GP_ID_8		0x129
+#define DA9062AA_GP_ID_9		0x12A
+#define DA9062AA_GP_ID_10		0x12B
+#define DA9062AA_GP_ID_11		0x12C
+#define DA9062AA_GP_ID_12		0x12D
+#define DA9062AA_GP_ID_13		0x12E
+#define DA9062AA_GP_ID_14		0x12F
+#define DA9062AA_GP_ID_15		0x130
+#define DA9062AA_GP_ID_16		0x131
+#define DA9062AA_GP_ID_17		0x132
+#define DA9062AA_GP_ID_18		0x133
+#define DA9062AA_GP_ID_19		0x134
+#define DA9062AA_DEVICE_ID		0x181
+#define DA9062AA_VARIANT_ID		0x182
+#define DA9062AA_CUSTOMER_ID		0x183
+#define DA9062AA_CONFIG_ID		0x184
+
+/*
+ * Bit fields
+ */
+
+/* DA9062AA_PAGE_CON = 0x000 */
+#define DA9062AA_PAGE_SHIFT		0
+#define DA9062AA_PAGE_MASK		0x3f
+#define DA9062AA_WRITE_MODE_SHIFT	6
+#define DA9062AA_WRITE_MODE_MASK	BIT(6)
+#define DA9062AA_REVERT_SHIFT		7
+#define DA9062AA_REVERT_MASK		BIT(7)
+
+/* DA9062AA_STATUS_A = 0x001 */
+#define DA9062AA_NONKEY_SHIFT		0
+#define DA9062AA_NONKEY_MASK		0x01
+#define DA9062AA_DVC_BUSY_SHIFT		2
+#define DA9062AA_DVC_BUSY_MASK		BIT(2)
+
+/* DA9062AA_STATUS_B = 0x002 */
+#define DA9062AA_GPI0_SHIFT		0
+#define DA9062AA_GPI0_MASK		0x01
+#define DA9062AA_GPI1_SHIFT		1
+#define DA9062AA_GPI1_MASK		BIT(1)
+#define DA9062AA_GPI2_SHIFT		2
+#define DA9062AA_GPI2_MASK		BIT(2)
+#define DA9062AA_GPI3_SHIFT		3
+#define DA9062AA_GPI3_MASK		BIT(3)
+#define DA9062AA_GPI4_SHIFT		4
+#define DA9062AA_GPI4_MASK		BIT(4)
+
+/* DA9062AA_STATUS_D = 0x004 */
+#define DA9062AA_LDO1_ILIM_SHIFT	0
+#define DA9062AA_LDO1_ILIM_MASK		0x01
+#define DA9062AA_LDO2_ILIM_SHIFT	1
+#define DA9062AA_LDO2_ILIM_MASK		BIT(1)
+#define DA9062AA_LDO3_ILIM_SHIFT	2
+#define DA9062AA_LDO3_ILIM_MASK		BIT(2)
+#define DA9062AA_LDO4_ILIM_SHIFT	3
+#define DA9062AA_LDO4_ILIM_MASK		BIT(3)
+
+/* DA9062AA_FAULT_LOG = 0x005 */
+#define DA9062AA_TWD_ERROR_SHIFT	0
+#define DA9062AA_TWD_ERROR_MASK		0x01
+#define DA9062AA_POR_SHIFT		1
+#define DA9062AA_POR_MASK		BIT(1)
+#define DA9062AA_VDD_FAULT_SHIFT	2
+#define DA9062AA_VDD_FAULT_MASK		BIT(2)
+#define DA9062AA_VDD_START_SHIFT	3
+#define DA9062AA_VDD_START_MASK		BIT(3)
+#define DA9062AA_TEMP_CRIT_SHIFT	4
+#define DA9062AA_TEMP_CRIT_MASK		BIT(4)
+#define DA9062AA_KEY_RESET_SHIFT	5
+#define DA9062AA_KEY_RESET_MASK		BIT(5)
+#define DA9062AA_NSHUTDOWN_SHIFT	6
+#define DA9062AA_NSHUTDOWN_MASK		BIT(6)
+#define DA9062AA_WAIT_SHUT_SHIFT	7
+#define DA9062AA_WAIT_SHUT_MASK		BIT(7)
+
+/* DA9062AA_EVENT_A = 0x006 */
+#define DA9062AA_E_NONKEY_SHIFT		0
+#define DA9062AA_E_NONKEY_MASK		0x01
+#define DA9062AA_E_ALARM_SHIFT		1
+#define DA9062AA_E_ALARM_MASK		BIT(1)
+#define DA9062AA_E_TICK_SHIFT		2
+#define DA9062AA_E_TICK_MASK		BIT(2)
+#define DA9062AA_E_WDG_WARN_SHIFT	3
+#define DA9062AA_E_WDG_WARN_MASK	BIT(3)
+#define DA9062AA_E_SEQ_RDY_SHIFT	4
+#define DA9062AA_E_SEQ_RDY_MASK		BIT(4)
+#define DA9062AA_EVENTS_B_SHIFT		5
+#define DA9062AA_EVENTS_B_MASK		BIT(5)
+#define DA9062AA_EVENTS_C_SHIFT		6
+#define DA9062AA_EVENTS_C_MASK		BIT(6)
+
+/* DA9062AA_EVENT_B = 0x007 */
+#define DA9062AA_E_TEMP_SHIFT		1
+#define DA9062AA_E_TEMP_MASK		BIT(1)
+#define DA9062AA_E_LDO_LIM_SHIFT	3
+#define DA9062AA_E_LDO_LIM_MASK		BIT(3)
+#define DA9062AA_E_DVC_RDY_SHIFT	5
+#define DA9062AA_E_DVC_RDY_MASK		BIT(5)
+#define DA9062AA_E_VDD_WARN_SHIFT	7
+#define DA9062AA_E_VDD_WARN_MASK	BIT(7)
+
+/* DA9062AA_EVENT_C = 0x008 */
+#define DA9062AA_E_GPI0_SHIFT		0
+#define DA9062AA_E_GPI0_MASK		0x01
+#define DA9062AA_E_GPI1_SHIFT		1
+#define DA9062AA_E_GPI1_MASK		BIT(1)
+#define DA9062AA_E_GPI2_SHIFT		2
+#define DA9062AA_E_GPI2_MASK		BIT(2)
+#define DA9062AA_E_GPI3_SHIFT		3
+#define DA9062AA_E_GPI3_MASK		BIT(3)
+#define DA9062AA_E_GPI4_SHIFT		4
+#define DA9062AA_E_GPI4_MASK		BIT(4)
+
+/* DA9062AA_IRQ_MASK_A = 0x00A */
+#define DA9062AA_M_NONKEY_SHIFT		0
+#define DA9062AA_M_NONKEY_MASK		0x01
+#define DA9062AA_M_ALARM_SHIFT		1
+#define DA9062AA_M_ALARM_MASK		BIT(1)
+#define DA9062AA_M_TICK_SHIFT		2
+#define DA9062AA_M_TICK_MASK		BIT(2)
+#define DA9062AA_M_WDG_WARN_SHIFT	3
+#define DA9062AA_M_WDG_WARN_MASK	BIT(3)
+#define DA9062AA_M_SEQ_RDY_SHIFT	4
+#define DA9062AA_M_SEQ_RDY_MASK		BIT(4)
+
+/* DA9062AA_IRQ_MASK_B = 0x00B */
+#define DA9062AA_M_TEMP_SHIFT		1
+#define DA9062AA_M_TEMP_MASK		BIT(1)
+#define DA9062AA_M_LDO_LIM_SHIFT	3
+#define DA9062AA_M_LDO_LIM_MASK		BIT(3)
+#define DA9062AA_M_DVC_RDY_SHIFT	5
+#define DA9062AA_M_DVC_RDY_MASK		BIT(5)
+#define DA9062AA_M_VDD_WARN_SHIFT	7
+#define DA9062AA_M_VDD_WARN_MASK	BIT(7)
+
+/* DA9062AA_IRQ_MASK_C = 0x00C */
+#define DA9062AA_M_GPI0_SHIFT		0
+#define DA9062AA_M_GPI0_MASK		0x01
+#define DA9062AA_M_GPI1_SHIFT		1
+#define DA9062AA_M_GPI1_MASK		BIT(1)
+#define DA9062AA_M_GPI2_SHIFT		2
+#define DA9062AA_M_GPI2_MASK		BIT(2)
+#define DA9062AA_M_GPI3_SHIFT		3
+#define DA9062AA_M_GPI3_MASK		BIT(3)
+#define DA9062AA_M_GPI4_SHIFT		4
+#define DA9062AA_M_GPI4_MASK		BIT(4)
+
+/* DA9062AA_CONTROL_A = 0x00E */
+#define DA9062AA_SYSTEM_EN_SHIFT	0
+#define DA9062AA_SYSTEM_EN_MASK		0x01
+#define DA9062AA_POWER_EN_SHIFT		1
+#define DA9062AA_POWER_EN_MASK		BIT(1)
+#define DA9062AA_POWER1_EN_SHIFT	2
+#define DA9062AA_POWER1_EN_MASK		BIT(2)
+#define DA9062AA_STANDBY_SHIFT		3
+#define DA9062AA_STANDBY_MASK		BIT(3)
+#define DA9062AA_M_SYSTEM_EN_SHIFT	4
+#define DA9062AA_M_SYSTEM_EN_MASK	BIT(4)
+#define DA9062AA_M_POWER_EN_SHIFT	5
+#define DA9062AA_M_POWER_EN_MASK	BIT(5)
+#define DA9062AA_M_POWER1_EN_SHIFT	6
+#define DA9062AA_M_POWER1_EN_MASK	BIT(6)
+
+/* DA9062AA_CONTROL_B = 0x00F */
+#define DA9062AA_WATCHDOG_PD_SHIFT	1
+#define DA9062AA_WATCHDOG_PD_MASK	BIT(1)
+#define DA9062AA_FREEZE_EN_SHIFT	2
+#define DA9062AA_FREEZE_EN_MASK		BIT(2)
+#define DA9062AA_NRES_MODE_SHIFT	3
+#define DA9062AA_NRES_MODE_MASK		BIT(3)
+#define DA9062AA_NONKEY_LOCK_SHIFT	4
+#define DA9062AA_NONKEY_LOCK_MASK	BIT(4)
+#define DA9062AA_NFREEZE_SHIFT		5
+#define DA9062AA_NFREEZE_MASK		(0x03 << 5)
+#define DA9062AA_BUCK_SLOWSTART_SHIFT	7
+#define DA9062AA_BUCK_SLOWSTART_MASK	BIT(7)
+
+/* DA9062AA_CONTROL_C = 0x010 */
+#define DA9062AA_DEBOUNCING_SHIFT	0
+#define DA9062AA_DEBOUNCING_MASK	0x07
+#define DA9062AA_AUTO_BOOT_SHIFT	3
+#define DA9062AA_AUTO_BOOT_MASK		BIT(3)
+#define DA9062AA_OTPREAD_EN_SHIFT	4
+#define DA9062AA_OTPREAD_EN_MASK	BIT(4)
+#define DA9062AA_SLEW_RATE_SHIFT	5
+#define DA9062AA_SLEW_RATE_MASK		(0x03 << 5)
+#define DA9062AA_DEF_SUPPLY_SHIFT	7
+#define DA9062AA_DEF_SUPPLY_MASK	BIT(7)
+
+/* DA9062AA_CONTROL_D = 0x011 */
+#define DA9062AA_TWDSCALE_SHIFT		0
+#define DA9062AA_TWDSCALE_MASK		0x07
+
+/* DA9062AA_CONTROL_E = 0x012 */
+#define DA9062AA_RTC_MODE_PD_SHIFT	0
+#define DA9062AA_RTC_MODE_PD_MASK	0x01
+#define DA9062AA_RTC_MODE_SD_SHIFT	1
+#define DA9062AA_RTC_MODE_SD_MASK	BIT(1)
+#define DA9062AA_RTC_EN_SHIFT		2
+#define DA9062AA_RTC_EN_MASK		BIT(2)
+#define DA9062AA_V_LOCK_SHIFT		7
+#define DA9062AA_V_LOCK_MASK		BIT(7)
+
+/* DA9062AA_CONTROL_F = 0x013 */
+#define DA9062AA_WATCHDOG_SHIFT		0
+#define DA9062AA_WATCHDOG_MASK		0x01
+#define DA9062AA_SHUTDOWN_SHIFT		1
+#define DA9062AA_SHUTDOWN_MASK		BIT(1)
+#define DA9062AA_WAKE_UP_SHIFT		2
+#define DA9062AA_WAKE_UP_MASK		BIT(2)
+
+/* DA9062AA_PD_DIS = 0x014 */
+#define DA9062AA_GPI_DIS_SHIFT		0
+#define DA9062AA_GPI_DIS_MASK		0x01
+#define DA9062AA_PMIF_DIS_SHIFT		2
+#define DA9062AA_PMIF_DIS_MASK		BIT(2)
+#define DA9062AA_CLDR_PAUSE_SHIFT	4
+#define DA9062AA_CLDR_PAUSE_MASK	BIT(4)
+#define DA9062AA_BBAT_DIS_SHIFT		5
+#define DA9062AA_BBAT_DIS_MASK		BIT(5)
+#define DA9062AA_OUT32K_PAUSE_SHIFT	6
+#define DA9062AA_OUT32K_PAUSE_MASK	BIT(6)
+#define DA9062AA_PMCONT_DIS_SHIFT	7
+#define DA9062AA_PMCONT_DIS_MASK	BIT(7)
+
+/* DA9062AA_GPIO_0_1 = 0x015 */
+#define DA9062AA_GPIO0_PIN_SHIFT	0
+#define DA9062AA_GPIO0_PIN_MASK		0x03
+#define DA9062AA_GPIO0_TYPE_SHIFT	2
+#define DA9062AA_GPIO0_TYPE_MASK	BIT(2)
+#define DA9062AA_GPIO0_WEN_SHIFT	3
+#define DA9062AA_GPIO0_WEN_MASK		BIT(3)
+#define DA9062AA_GPIO1_PIN_SHIFT	4
+#define DA9062AA_GPIO1_PIN_MASK		(0x03 << 4)
+#define DA9062AA_GPIO1_TYPE_SHIFT	6
+#define DA9062AA_GPIO1_TYPE_MASK	BIT(6)
+#define DA9062AA_GPIO1_WEN_SHIFT	7
+#define DA9062AA_GPIO1_WEN_MASK		BIT(7)
+
+/* DA9062AA_GPIO_2_3 = 0x016 */
+#define DA9062AA_GPIO2_PIN_SHIFT	0
+#define DA9062AA_GPIO2_PIN_MASK		0x03
+#define DA9062AA_GPIO2_TYPE_SHIFT	2
+#define DA9062AA_GPIO2_TYPE_MASK	BIT(2)
+#define DA9062AA_GPIO2_WEN_SHIFT	3
+#define DA9062AA_GPIO2_WEN_MASK		BIT(3)
+#define DA9062AA_GPIO3_PIN_SHIFT	4
+#define DA9062AA_GPIO3_PIN_MASK		(0x03 << 4)
+#define DA9062AA_GPIO3_TYPE_SHIFT	6
+#define DA9062AA_GPIO3_TYPE_MASK	BIT(6)
+#define DA9062AA_GPIO3_WEN_SHIFT	7
+#define DA9062AA_GPIO3_WEN_MASK		BIT(7)
+
+/* DA9062AA_GPIO_4 = 0x017 */
+#define DA9062AA_GPIO4_PIN_SHIFT	0
+#define DA9062AA_GPIO4_PIN_MASK		0x03
+#define DA9062AA_GPIO4_TYPE_SHIFT	2
+#define DA9062AA_GPIO4_TYPE_MASK	BIT(2)
+#define DA9062AA_GPIO4_WEN_SHIFT	3
+#define DA9062AA_GPIO4_WEN_MASK		BIT(3)
+
+/* DA9062AA_GPIO_WKUP_MODE = 0x01C */
+#define DA9062AA_GPIO0_WKUP_MODE_SHIFT	0
+#define DA9062AA_GPIO0_WKUP_MODE_MASK	0x01
+#define DA9062AA_GPIO1_WKUP_MODE_SHIFT	1
+#define DA9062AA_GPIO1_WKUP_MODE_MASK	BIT(1)
+#define DA9062AA_GPIO2_WKUP_MODE_SHIFT	2
+#define DA9062AA_GPIO2_WKUP_MODE_MASK	BIT(2)
+#define DA9062AA_GPIO3_WKUP_MODE_SHIFT	3
+#define DA9062AA_GPIO3_WKUP_MODE_MASK	BIT(3)
+#define DA9062AA_GPIO4_WKUP_MODE_SHIFT	4
+#define DA9062AA_GPIO4_WKUP_MODE_MASK	BIT(4)
+
+/* DA9062AA_GPIO_MODE0_4 = 0x01D */
+#define DA9062AA_GPIO0_MODE_SHIFT	0
+#define DA9062AA_GPIO0_MODE_MASK	0x01
+#define DA9062AA_GPIO1_MODE_SHIFT	1
+#define DA9062AA_GPIO1_MODE_MASK	BIT(1)
+#define DA9062AA_GPIO2_MODE_SHIFT	2
+#define DA9062AA_GPIO2_MODE_MASK	BIT(2)
+#define DA9062AA_GPIO3_MODE_SHIFT	3
+#define DA9062AA_GPIO3_MODE_MASK	BIT(3)
+#define DA9062AA_GPIO4_MODE_SHIFT	4
+#define DA9062AA_GPIO4_MODE_MASK	BIT(4)
+
+/* DA9062AA_GPIO_OUT0_2 = 0x01E */
+#define DA9062AA_GPIO0_OUT_SHIFT	0
+#define DA9062AA_GPIO0_OUT_MASK		0x07
+#define DA9062AA_GPIO1_OUT_SHIFT	3
+#define DA9062AA_GPIO1_OUT_MASK		(0x07 << 3)
+#define DA9062AA_GPIO2_OUT_SHIFT	6
+#define DA9062AA_GPIO2_OUT_MASK		(0x03 << 6)
+
+/* DA9062AA_GPIO_OUT3_4 = 0x01F */
+#define DA9062AA_GPIO3_OUT_SHIFT	0
+#define DA9062AA_GPIO3_OUT_MASK		0x07
+#define DA9062AA_GPIO4_OUT_SHIFT	3
+#define DA9062AA_GPIO4_OUT_MASK		(0x03 << 3)
+
+/* DA9062AA_BUCK2_CONT = 0x020 */
+#define DA9062AA_BUCK2_EN_SHIFT		0
+#define DA9062AA_BUCK2_EN_MASK		0x01
+#define DA9062AA_BUCK2_GPI_SHIFT	1
+#define DA9062AA_BUCK2_GPI_MASK		(0x03 << 1)
+#define DA9062AA_BUCK2_CONF_SHIFT	3
+#define DA9062AA_BUCK2_CONF_MASK	BIT(3)
+#define DA9062AA_VBUCK2_GPI_SHIFT	5
+#define DA9062AA_VBUCK2_GPI_MASK	(0x03 << 5)
+
+/* DA9062AA_BUCK1_CONT = 0x021 */
+#define DA9062AA_BUCK1_EN_SHIFT		0
+#define DA9062AA_BUCK1_EN_MASK		0x01
+#define DA9062AA_BUCK1_GPI_SHIFT	1
+#define DA9062AA_BUCK1_GPI_MASK		(0x03 << 1)
+#define DA9062AA_BUCK1_CONF_SHIFT	3
+#define DA9062AA_BUCK1_CONF_MASK	BIT(3)
+#define DA9062AA_VBUCK1_GPI_SHIFT	5
+#define DA9062AA_VBUCK1_GPI_MASK	(0x03 << 5)
+
+/* DA9062AA_BUCK4_CONT = 0x022 */
+#define DA9062AA_BUCK4_EN_SHIFT		0
+#define DA9062AA_BUCK4_EN_MASK		0x01
+#define DA9062AA_BUCK4_GPI_SHIFT	1
+#define DA9062AA_BUCK4_GPI_MASK		(0x03 << 1)
+#define DA9062AA_BUCK4_CONF_SHIFT	3
+#define DA9062AA_BUCK4_CONF_MASK	BIT(3)
+#define DA9062AA_VBUCK4_GPI_SHIFT	5
+#define DA9062AA_VBUCK4_GPI_MASK	(0x03 << 5)
+
+/* DA9062AA_BUCK3_CONT = 0x024 */
+#define DA9062AA_BUCK3_EN_SHIFT		0
+#define DA9062AA_BUCK3_EN_MASK		0x01
+#define DA9062AA_BUCK3_GPI_SHIFT	1
+#define DA9062AA_BUCK3_GPI_MASK		(0x03 << 1)
+#define DA9062AA_BUCK3_CONF_SHIFT	3
+#define DA9062AA_BUCK3_CONF_MASK	BIT(3)
+#define DA9062AA_VBUCK3_GPI_SHIFT	5
+#define DA9062AA_VBUCK3_GPI_MASK	(0x03 << 5)
+
+/* DA9062AA_LDO1_CONT = 0x026 */
+#define DA9062AA_LDO1_EN_SHIFT		0
+#define DA9062AA_LDO1_EN_MASK		0x01
+#define DA9062AA_LDO1_GPI_SHIFT		1
+#define DA9062AA_LDO1_GPI_MASK		(0x03 << 1)
+#define DA9062AA_LDO1_PD_DIS_SHIFT	3
+#define DA9062AA_LDO1_PD_DIS_MASK	BIT(3)
+#define DA9062AA_VLDO1_GPI_SHIFT	5
+#define DA9062AA_VLDO1_GPI_MASK		(0x03 << 5)
+#define DA9062AA_LDO1_CONF_SHIFT	7
+#define DA9062AA_LDO1_CONF_MASK		BIT(7)
+
+/* DA9062AA_LDO2_CONT = 0x027 */
+#define DA9062AA_LDO2_EN_SHIFT		0
+#define DA9062AA_LDO2_EN_MASK		0x01
+#define DA9062AA_LDO2_GPI_SHIFT		1
+#define DA9062AA_LDO2_GPI_MASK		(0x03 << 1)
+#define DA9062AA_LDO2_PD_DIS_SHIFT	3
+#define DA9062AA_LDO2_PD_DIS_MASK	BIT(3)
+#define DA9062AA_VLDO2_GPI_SHIFT	5
+#define DA9062AA_VLDO2_GPI_MASK		(0x03 << 5)
+#define DA9062AA_LDO2_CONF_SHIFT	7
+#define DA9062AA_LDO2_CONF_MASK		BIT(7)
+
+/* DA9062AA_LDO3_CONT = 0x028 */
+#define DA9062AA_LDO3_EN_SHIFT		0
+#define DA9062AA_LDO3_EN_MASK		0x01
+#define DA9062AA_LDO3_GPI_SHIFT		1
+#define DA9062AA_LDO3_GPI_MASK		(0x03 << 1)
+#define DA9062AA_LDO3_PD_DIS_SHIFT	3
+#define DA9062AA_LDO3_PD_DIS_MASK	BIT(3)
+#define DA9062AA_VLDO3_GPI_SHIFT	5
+#define DA9062AA_VLDO3_GPI_MASK		(0x03 << 5)
+#define DA9062AA_LDO3_CONF_SHIFT	7
+#define DA9062AA_LDO3_CONF_MASK		BIT(7)
+
+/* DA9062AA_LDO4_CONT = 0x029 */
+#define DA9062AA_LDO4_EN_SHIFT		0
+#define DA9062AA_LDO4_EN_MASK		0x01
+#define DA9062AA_LDO4_GPI_SHIFT		1
+#define DA9062AA_LDO4_GPI_MASK		(0x03 << 1)
+#define DA9062AA_LDO4_PD_DIS_SHIFT	3
+#define DA9062AA_LDO4_PD_DIS_MASK	BIT(3)
+#define DA9062AA_VLDO4_GPI_SHIFT	5
+#define DA9062AA_VLDO4_GPI_MASK		(0x03 << 5)
+#define DA9062AA_LDO4_CONF_SHIFT	7
+#define DA9062AA_LDO4_CONF_MASK		BIT(7)
+
+/* DA9062AA_DVC_1 = 0x032 */
+#define DA9062AA_VBUCK1_SEL_SHIFT	0
+#define DA9062AA_VBUCK1_SEL_MASK	0x01
+#define DA9062AA_VBUCK2_SEL_SHIFT	1
+#define DA9062AA_VBUCK2_SEL_MASK	BIT(1)
+#define DA9062AA_VBUCK4_SEL_SHIFT	2
+#define DA9062AA_VBUCK4_SEL_MASK	BIT(2)
+#define DA9062AA_VBUCK3_SEL_SHIFT	3
+#define DA9062AA_VBUCK3_SEL_MASK	BIT(3)
+#define DA9062AA_VLDO1_SEL_SHIFT	4
+#define DA9062AA_VLDO1_SEL_MASK		BIT(4)
+#define DA9062AA_VLDO2_SEL_SHIFT	5
+#define DA9062AA_VLDO2_SEL_MASK		BIT(5)
+#define DA9062AA_VLDO3_SEL_SHIFT	6
+#define DA9062AA_VLDO3_SEL_MASK		BIT(6)
+#define DA9062AA_VLDO4_SEL_SHIFT	7
+#define DA9062AA_VLDO4_SEL_MASK		BIT(7)
+
+/* DA9062AA_COUNT_S = 0x040 */
+#define DA9062AA_COUNT_SEC_SHIFT	0
+#define DA9062AA_COUNT_SEC_MASK		0x3f
+#define DA9062AA_RTC_READ_SHIFT		7
+#define DA9062AA_RTC_READ_MASK		BIT(7)
+
+/* DA9062AA_COUNT_MI = 0x041 */
+#define DA9062AA_COUNT_MIN_SHIFT	0
+#define DA9062AA_COUNT_MIN_MASK		0x3f
+
+/* DA9062AA_COUNT_H = 0x042 */
+#define DA9062AA_COUNT_HOUR_SHIFT	0
+#define DA9062AA_COUNT_HOUR_MASK	0x1f
+
+/* DA9062AA_COUNT_D = 0x043 */
+#define DA9062AA_COUNT_DAY_SHIFT	0
+#define DA9062AA_COUNT_DAY_MASK		0x1f
+
+/* DA9062AA_COUNT_MO = 0x044 */
+#define DA9062AA_COUNT_MONTH_SHIFT	0
+#define DA9062AA_COUNT_MONTH_MASK	0x0f
+
+/* DA9062AA_COUNT_Y = 0x045 */
+#define DA9062AA_COUNT_YEAR_SHIFT	0
+#define DA9062AA_COUNT_YEAR_MASK	0x3f
+#define DA9062AA_MONITOR_SHIFT		6
+#define DA9062AA_MONITOR_MASK		BIT(6)
+
+/* DA9062AA_ALARM_S = 0x046 */
+#define DA9062AA_ALARM_SEC_SHIFT	0
+#define DA9062AA_ALARM_SEC_MASK		0x3f
+#define DA9062AA_ALARM_STATUS_SHIFT	6
+#define DA9062AA_ALARM_STATUS_MASK	(0x03 << 6)
+
+/* DA9062AA_ALARM_MI = 0x047 */
+#define DA9062AA_ALARM_MIN_SHIFT	0
+#define DA9062AA_ALARM_MIN_MASK		0x3f
+
+/* DA9062AA_ALARM_H = 0x048 */
+#define DA9062AA_ALARM_HOUR_SHIFT	0
+#define DA9062AA_ALARM_HOUR_MASK	0x1f
+
+/* DA9062AA_ALARM_D = 0x049 */
+#define DA9062AA_ALARM_DAY_SHIFT	0
+#define DA9062AA_ALARM_DAY_MASK		0x1f
+
+/* DA9062AA_ALARM_MO = 0x04A */
+#define DA9062AA_ALARM_MONTH_SHIFT	0
+#define DA9062AA_ALARM_MONTH_MASK	0x0f
+#define DA9062AA_TICK_TYPE_SHIFT	4
+#define DA9062AA_TICK_TYPE_MASK		BIT(4)
+#define DA9062AA_TICK_WAKE_SHIFT	5
+#define DA9062AA_TICK_WAKE_MASK		BIT(5)
+
+/* DA9062AA_ALARM_Y = 0x04B */
+#define DA9062AA_ALARM_YEAR_SHIFT	0
+#define DA9062AA_ALARM_YEAR_MASK	0x3f
+#define DA9062AA_ALARM_ON_SHIFT		6
+#define DA9062AA_ALARM_ON_MASK		BIT(6)
+#define DA9062AA_TICK_ON_SHIFT		7
+#define DA9062AA_TICK_ON_MASK		BIT(7)
+
+/* DA9062AA_SECOND_A = 0x04C */
+#define DA9062AA_SECONDS_A_SHIFT	0
+#define DA9062AA_SECONDS_A_MASK		0xff
+
+/* DA9062AA_SECOND_B = 0x04D */
+#define DA9062AA_SECONDS_B_SHIFT	0
+#define DA9062AA_SECONDS_B_MASK		0xff
+
+/* DA9062AA_SECOND_C = 0x04E */
+#define DA9062AA_SECONDS_C_SHIFT	0
+#define DA9062AA_SECONDS_C_MASK		0xff
+
+/* DA9062AA_SECOND_D = 0x04F */
+#define DA9062AA_SECONDS_D_SHIFT	0
+#define DA9062AA_SECONDS_D_MASK		0xff
+
+/* DA9062AA_SEQ = 0x081 */
+#define DA9062AA_SEQ_POINTER_SHIFT	0
+#define DA9062AA_SEQ_POINTER_MASK	0x0f
+#define DA9062AA_NXT_SEQ_START_SHIFT	4
+#define DA9062AA_NXT_SEQ_START_MASK	(0x0f << 4)
+
+/* DA9062AA_SEQ_TIMER = 0x082 */
+#define DA9062AA_SEQ_TIME_SHIFT		0
+#define DA9062AA_SEQ_TIME_MASK		0x0f
+#define DA9062AA_SEQ_DUMMY_SHIFT	4
+#define DA9062AA_SEQ_DUMMY_MASK		(0x0f << 4)
+
+/* DA9062AA_ID_2_1 = 0x083 */
+#define DA9062AA_LDO1_STEP_SHIFT	0
+#define DA9062AA_LDO1_STEP_MASK		0x0f
+#define DA9062AA_LDO2_STEP_SHIFT	4
+#define DA9062AA_LDO2_STEP_MASK		(0x0f << 4)
+
+/* DA9062AA_ID_4_3 = 0x084 */
+#define DA9062AA_LDO3_STEP_SHIFT	0
+#define DA9062AA_LDO3_STEP_MASK		0x0f
+#define DA9062AA_LDO4_STEP_SHIFT	4
+#define DA9062AA_LDO4_STEP_MASK		(0x0f << 4)
+
+/* DA9062AA_ID_12_11 = 0x088 */
+#define DA9062AA_PD_DIS_STEP_SHIFT	4
+#define DA9062AA_PD_DIS_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_14_13 = 0x089 */
+#define DA9062AA_BUCK1_STEP_SHIFT	0
+#define DA9062AA_BUCK1_STEP_MASK	0x0f
+#define DA9062AA_BUCK2_STEP_SHIFT	4
+#define DA9062AA_BUCK2_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_16_15 = 0x08A */
+#define DA9062AA_BUCK4_STEP_SHIFT	0
+#define DA9062AA_BUCK4_STEP_MASK	0x0f
+#define DA9062AA_BUCK3_STEP_SHIFT	4
+#define DA9062AA_BUCK3_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_22_21 = 0x08D */
+#define DA9062AA_GP_RISE1_STEP_SHIFT	0
+#define DA9062AA_GP_RISE1_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL1_STEP_SHIFT	4
+#define DA9062AA_GP_FALL1_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_24_23 = 0x08E */
+#define DA9062AA_GP_RISE2_STEP_SHIFT	0
+#define DA9062AA_GP_RISE2_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL2_STEP_SHIFT	4
+#define DA9062AA_GP_FALL2_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_26_25 = 0x08F */
+#define DA9062AA_GP_RISE3_STEP_SHIFT	0
+#define DA9062AA_GP_RISE3_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL3_STEP_SHIFT	4
+#define DA9062AA_GP_FALL3_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_28_27 = 0x090 */
+#define DA9062AA_GP_RISE4_STEP_SHIFT	0
+#define DA9062AA_GP_RISE4_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL4_STEP_SHIFT	4
+#define DA9062AA_GP_FALL4_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_30_29 = 0x091 */
+#define DA9062AA_GP_RISE5_STEP_SHIFT	0
+#define DA9062AA_GP_RISE5_STEP_MASK	0x0f
+#define DA9062AA_GP_FALL5_STEP_SHIFT	4
+#define DA9062AA_GP_FALL5_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_ID_32_31 = 0x092 */
+#define DA9062AA_WAIT_STEP_SHIFT	0
+#define DA9062AA_WAIT_STEP_MASK		0x0f
+#define DA9062AA_EN32K_STEP_SHIFT	4
+#define DA9062AA_EN32K_STEP_MASK	(0x0f << 4)
+
+/* DA9062AA_SEQ_A = 0x095 */
+#define DA9062AA_SYSTEM_END_SHIFT	0
+#define DA9062AA_SYSTEM_END_MASK	0x0f
+#define DA9062AA_POWER_END_SHIFT	4
+#define DA9062AA_POWER_END_MASK		(0x0f << 4)
+
+/* DA9062AA_SEQ_B = 0x096 */
+#define DA9062AA_MAX_COUNT_SHIFT	0
+#define DA9062AA_MAX_COUNT_MASK		0x0f
+#define DA9062AA_PART_DOWN_SHIFT	4
+#define DA9062AA_PART_DOWN_MASK		(0x0f << 4)
+
+/* DA9062AA_WAIT = 0x097 */
+#define DA9062AA_WAIT_TIME_SHIFT	0
+#define DA9062AA_WAIT_TIME_MASK		0x0f
+#define DA9062AA_WAIT_MODE_SHIFT	4
+#define DA9062AA_WAIT_MODE_MASK		BIT(4)
+#define DA9062AA_TIME_OUT_SHIFT		5
+#define DA9062AA_TIME_OUT_MASK		BIT(5)
+#define DA9062AA_WAIT_DIR_SHIFT		6
+#define DA9062AA_WAIT_DIR_MASK		(0x03 << 6)
+
+/* DA9062AA_EN_32K = 0x098 */
+#define DA9062AA_STABILISATION_TIME_SHIFT	0
+#define DA9062AA_STABILISATION_TIME_MASK	0x07
+#define DA9062AA_CRYSTAL_SHIFT			3
+#define DA9062AA_CRYSTAL_MASK			BIT(3)
+#define DA9062AA_DELAY_MODE_SHIFT		4
+#define DA9062AA_DELAY_MODE_MASK		BIT(4)
+#define DA9062AA_OUT_CLOCK_SHIFT		5
+#define DA9062AA_OUT_CLOCK_MASK			BIT(5)
+#define DA9062AA_RTC_CLOCK_SHIFT		6
+#define DA9062AA_RTC_CLOCK_MASK			BIT(6)
+#define DA9062AA_EN_32KOUT_SHIFT		7
+#define DA9062AA_EN_32KOUT_MASK			BIT(7)
+
+/* DA9062AA_RESET = 0x099 */
+#define DA9062AA_RESET_TIMER_SHIFT	0
+#define DA9062AA_RESET_TIMER_MASK	0x3f
+#define DA9062AA_RESET_EVENT_SHIFT	6
+#define DA9062AA_RESET_EVENT_MASK	(0x03 << 6)
+
+/* DA9062AA_BUCK_ILIM_A = 0x09A */
+#define DA9062AA_BUCK3_ILIM_SHIFT	0
+#define DA9062AA_BUCK3_ILIM_MASK	0x0f
+
+/* DA9062AA_BUCK_ILIM_B = 0x09B */
+#define DA9062AA_BUCK4_ILIM_SHIFT	0
+#define DA9062AA_BUCK4_ILIM_MASK	0x0f
+
+/* DA9062AA_BUCK_ILIM_C = 0x09C */
+#define DA9062AA_BUCK1_ILIM_SHIFT	0
+#define DA9062AA_BUCK1_ILIM_MASK	0x0f
+#define DA9062AA_BUCK2_ILIM_SHIFT	4
+#define DA9062AA_BUCK2_ILIM_MASK	(0x0f << 4)
+
+/* DA9062AA_BUCK2_CFG = 0x09D */
+#define DA9062AA_BUCK2_PD_DIS_SHIFT	5
+#define DA9062AA_BUCK2_PD_DIS_MASK	BIT(5)
+#define DA9062AA_BUCK2_MODE_SHIFT	6
+#define DA9062AA_BUCK2_MODE_MASK	(0x03 << 6)
+
+/* DA9062AA_BUCK1_CFG = 0x09E */
+#define DA9062AA_BUCK1_PD_DIS_SHIFT	5
+#define DA9062AA_BUCK1_PD_DIS_MASK	BIT(5)
+#define DA9062AA_BUCK1_MODE_SHIFT	6
+#define DA9062AA_BUCK1_MODE_MASK	(0x03 << 6)
+
+/* DA9062AA_BUCK4_CFG = 0x09F */
+#define DA9062AA_BUCK4_VTTR_EN_SHIFT	3
+#define DA9062AA_BUCK4_VTTR_EN_MASK	BIT(3)
+#define DA9062AA_BUCK4_VTT_EN_SHIFT	4
+#define DA9062AA_BUCK4_VTT_EN_MASK	BIT(4)
+#define DA9062AA_BUCK4_PD_DIS_SHIFT	5
+#define DA9062AA_BUCK4_PD_DIS_MASK	BIT(5)
+#define DA9062AA_BUCK4_MODE_SHIFT	6
+#define DA9062AA_BUCK4_MODE_MASK	(0x03 << 6)
+
+/* DA9062AA_BUCK3_CFG = 0x0A0 */
+#define DA9062AA_BUCK3_PD_DIS_SHIFT	5
+#define DA9062AA_BUCK3_PD_DIS_MASK	BIT(5)
+#define DA9062AA_BUCK3_MODE_SHIFT	6
+#define DA9062AA_BUCK3_MODE_MASK	(0x03 << 6)
+
+/* DA9062AA_VBUCK2_A = 0x0A3 */
+#define DA9062AA_VBUCK2_A_SHIFT		0
+#define DA9062AA_VBUCK2_A_MASK		0x7f
+#define DA9062AA_BUCK2_SL_A_SHIFT	7
+#define DA9062AA_BUCK2_SL_A_MASK	BIT(7)
+
+/* DA9062AA_VBUCK1_A = 0x0A4 */
+#define DA9062AA_VBUCK1_A_SHIFT		0
+#define DA9062AA_VBUCK1_A_MASK		0x7f
+#define DA9062AA_BUCK1_SL_A_SHIFT	7
+#define DA9062AA_BUCK1_SL_A_MASK	BIT(7)
+
+/* DA9062AA_VBUCK4_A = 0x0A5 */
+#define DA9062AA_VBUCK4_A_SHIFT		0
+#define DA9062AA_VBUCK4_A_MASK		0x7f
+#define DA9062AA_BUCK4_SL_A_SHIFT	7
+#define DA9062AA_BUCK4_SL_A_MASK	BIT(7)
+
+/* DA9062AA_VBUCK3_A = 0x0A7 */
+#define DA9062AA_VBUCK3_A_SHIFT		0
+#define DA9062AA_VBUCK3_A_MASK		0x7f
+#define DA9062AA_BUCK3_SL_A_SHIFT	7
+#define DA9062AA_BUCK3_SL_A_MASK	BIT(7)
+
+/* DA9062AA_VLDO1_A = 0x0A9 */
+#define DA9062AA_VLDO1_A_SHIFT		0
+#define DA9062AA_VLDO1_A_MASK		0x3f
+#define DA9062AA_LDO1_SL_A_SHIFT	7
+#define DA9062AA_LDO1_SL_A_MASK		BIT(7)
+
+/* DA9062AA_VLDO2_A = 0x0AA */
+#define DA9062AA_VLDO2_A_SHIFT		0
+#define DA9062AA_VLDO2_A_MASK		0x3f
+#define DA9062AA_LDO2_SL_A_SHIFT	7
+#define DA9062AA_LDO2_SL_A_MASK		BIT(7)
+
+/* DA9062AA_VLDO3_A = 0x0AB */
+#define DA9062AA_VLDO3_A_SHIFT		0
+#define DA9062AA_VLDO3_A_MASK		0x3f
+#define DA9062AA_LDO3_SL_A_SHIFT	7
+#define DA9062AA_LDO3_SL_A_MASK		BIT(7)
+
+/* DA9062AA_VLDO4_A = 0x0AC */
+#define DA9062AA_VLDO4_A_SHIFT		0
+#define DA9062AA_VLDO4_A_MASK		0x3f
+#define DA9062AA_LDO4_SL_A_SHIFT	7
+#define DA9062AA_LDO4_SL_A_MASK		BIT(7)
+
+/* DA9062AA_VBUCK2_B = 0x0B4 */
+#define DA9062AA_VBUCK2_B_SHIFT		0
+#define DA9062AA_VBUCK2_B_MASK		0x7f
+#define DA9062AA_BUCK2_SL_B_SHIFT	7
+#define DA9062AA_BUCK2_SL_B_MASK	BIT(7)
+
+/* DA9062AA_VBUCK1_B = 0x0B5 */
+#define DA9062AA_VBUCK1_B_SHIFT		0
+#define DA9062AA_VBUCK1_B_MASK		0x7f
+#define DA9062AA_BUCK1_SL_B_SHIFT	7
+#define DA9062AA_BUCK1_SL_B_MASK	BIT(7)
+
+/* DA9062AA_VBUCK4_B = 0x0B6 */
+#define DA9062AA_VBUCK4_B_SHIFT		0
+#define DA9062AA_VBUCK4_B_MASK		0x7f
+#define DA9062AA_BUCK4_SL_B_SHIFT	7
+#define DA9062AA_BUCK4_SL_B_MASK	BIT(7)
+
+/* DA9062AA_VBUCK3_B = 0x0B8 */
+#define DA9062AA_VBUCK3_B_SHIFT		0
+#define DA9062AA_VBUCK3_B_MASK		0x7f
+#define DA9062AA_BUCK3_SL_B_SHIFT	7
+#define DA9062AA_BUCK3_SL_B_MASK	BIT(7)
+
+/* DA9062AA_VLDO1_B = 0x0BA */
+#define DA9062AA_VLDO1_B_SHIFT		0
+#define DA9062AA_VLDO1_B_MASK		0x3f
+#define DA9062AA_LDO1_SL_B_SHIFT	7
+#define DA9062AA_LDO1_SL_B_MASK		BIT(7)
+
+/* DA9062AA_VLDO2_B = 0x0BB */
+#define DA9062AA_VLDO2_B_SHIFT		0
+#define DA9062AA_VLDO2_B_MASK		0x3f
+#define DA9062AA_LDO2_SL_B_SHIFT	7
+#define DA9062AA_LDO2_SL_B_MASK		BIT(7)
+
+/* DA9062AA_VLDO3_B = 0x0BC */
+#define DA9062AA_VLDO3_B_SHIFT		0
+#define DA9062AA_VLDO3_B_MASK		0x3f
+#define DA9062AA_LDO3_SL_B_SHIFT	7
+#define DA9062AA_LDO3_SL_B_MASK		BIT(7)
+
+/* DA9062AA_VLDO4_B = 0x0BD */
+#define DA9062AA_VLDO4_B_SHIFT		0
+#define DA9062AA_VLDO4_B_MASK		0x3f
+#define DA9062AA_LDO4_SL_B_SHIFT	7
+#define DA9062AA_LDO4_SL_B_MASK		BIT(7)
+
+/* DA9062AA_BBAT_CONT = 0x0C5 */
+#define DA9062AA_BCHG_VSET_SHIFT	0
+#define DA9062AA_BCHG_VSET_MASK		0x0f
+#define DA9062AA_BCHG_ISET_SHIFT	4
+#define DA9062AA_BCHG_ISET_MASK		(0x0f << 4)
+
+/* DA9062AA_INTERFACE = 0x105 */
+#define DA9062AA_IF_BASE_ADDR_SHIFT	4
+#define DA9062AA_IF_BASE_ADDR_MASK	(0x0f << 4)
+
+/* DA9062AA_CONFIG_A = 0x106 */
+#define DA9062AA_PM_I_V_SHIFT		0
+#define DA9062AA_PM_I_V_MASK		0x01
+#define DA9062AA_PM_O_TYPE_SHIFT	2
+#define DA9062AA_PM_O_TYPE_MASK		BIT(2)
+#define DA9062AA_IRQ_TYPE_SHIFT		3
+#define DA9062AA_IRQ_TYPE_MASK		BIT(3)
+#define DA9062AA_PM_IF_V_SHIFT		4
+#define DA9062AA_PM_IF_V_MASK		BIT(4)
+#define DA9062AA_PM_IF_FMP_SHIFT	5
+#define DA9062AA_PM_IF_FMP_MASK		BIT(5)
+#define DA9062AA_PM_IF_HSM_SHIFT	6
+#define DA9062AA_PM_IF_HSM_MASK		BIT(6)
+
+/* DA9062AA_CONFIG_B = 0x107 */
+#define DA9062AA_VDD_FAULT_ADJ_SHIFT	0
+#define DA9062AA_VDD_FAULT_ADJ_MASK	0x0f
+#define DA9062AA_VDD_HYST_ADJ_SHIFT	4
+#define DA9062AA_VDD_HYST_ADJ_MASK	(0x07 << 4)
+
+/* DA9062AA_CONFIG_C = 0x108 */
+#define DA9062AA_BUCK_ACTV_DISCHRG_SHIFT	2
+#define DA9062AA_BUCK_ACTV_DISCHRG_MASK		BIT(2)
+#define DA9062AA_BUCK1_CLK_INV_SHIFT		3
+#define DA9062AA_BUCK1_CLK_INV_MASK		BIT(3)
+#define DA9062AA_BUCK4_CLK_INV_SHIFT		4
+#define DA9062AA_BUCK4_CLK_INV_MASK		BIT(4)
+#define DA9062AA_BUCK3_CLK_INV_SHIFT		6
+#define DA9062AA_BUCK3_CLK_INV_MASK		BIT(6)
+
+/* DA9062AA_CONFIG_D = 0x109 */
+#define DA9062AA_GPI_V_SHIFT		0
+#define DA9062AA_GPI_V_MASK		0x01
+#define DA9062AA_NIRQ_MODE_SHIFT	1
+#define DA9062AA_NIRQ_MODE_MASK		BIT(1)
+#define DA9062AA_SYSTEM_EN_RD_SHIFT	2
+#define DA9062AA_SYSTEM_EN_RD_MASK	BIT(2)
+#define DA9062AA_FORCE_RESET_SHIFT	5
+#define DA9062AA_FORCE_RESET_MASK	BIT(5)
+
+/* DA9062AA_CONFIG_E = 0x10A */
+#define DA9062AA_BUCK1_AUTO_SHIFT	0
+#define DA9062AA_BUCK1_AUTO_MASK	0x01
+#define DA9062AA_BUCK2_AUTO_SHIFT	1
+#define DA9062AA_BUCK2_AUTO_MASK	BIT(1)
+#define DA9062AA_BUCK4_AUTO_SHIFT	2
+#define DA9062AA_BUCK4_AUTO_MASK	BIT(2)
+#define DA9062AA_BUCK3_AUTO_SHIFT	4
+#define DA9062AA_BUCK3_AUTO_MASK	BIT(4)
+
+/* DA9062AA_CONFIG_G = 0x10C */
+#define DA9062AA_LDO1_AUTO_SHIFT	0
+#define DA9062AA_LDO1_AUTO_MASK		0x01
+#define DA9062AA_LDO2_AUTO_SHIFT	1
+#define DA9062AA_LDO2_AUTO_MASK		BIT(1)
+#define DA9062AA_LDO3_AUTO_SHIFT	2
+#define DA9062AA_LDO3_AUTO_MASK		BIT(2)
+#define DA9062AA_LDO4_AUTO_SHIFT	3
+#define DA9062AA_LDO4_AUTO_MASK		BIT(3)
+
+/* DA9062AA_CONFIG_H = 0x10D */
+#define DA9062AA_BUCK1_2_MERGE_SHIFT	3
+#define DA9062AA_BUCK1_2_MERGE_MASK	BIT(3)
+#define DA9062AA_BUCK2_OD_SHIFT		5
+#define DA9062AA_BUCK2_OD_MASK		BIT(5)
+#define DA9062AA_BUCK1_OD_SHIFT		6
+#define DA9062AA_BUCK1_OD_MASK		BIT(6)
+
+/* DA9062AA_CONFIG_I = 0x10E */
+#define DA9062AA_NONKEY_PIN_SHIFT	0
+#define DA9062AA_NONKEY_PIN_MASK	0x03
+#define DA9062AA_nONKEY_SD_SHIFT	2
+#define DA9062AA_nONKEY_SD_MASK		BIT(2)
+#define DA9062AA_WATCHDOG_SD_SHIFT	3
+#define DA9062AA_WATCHDOG_SD_MASK	BIT(3)
+#define DA9062AA_KEY_SD_MODE_SHIFT	4
+#define DA9062AA_KEY_SD_MODE_MASK	BIT(4)
+#define DA9062AA_HOST_SD_MODE_SHIFT	5
+#define DA9062AA_HOST_SD_MODE_MASK	BIT(5)
+#define DA9062AA_INT_SD_MODE_SHIFT	6
+#define DA9062AA_INT_SD_MODE_MASK	BIT(6)
+#define DA9062AA_LDO_SD_SHIFT		7
+#define DA9062AA_LDO_SD_MASK		BIT(7)
+
+/* DA9062AA_CONFIG_J = 0x10F */
+#define DA9062AA_KEY_DELAY_SHIFT	0
+#define DA9062AA_KEY_DELAY_MASK		0x03
+#define DA9062AA_SHUT_DELAY_SHIFT	2
+#define DA9062AA_SHUT_DELAY_MASK	(0x03 << 2)
+#define DA9062AA_RESET_DURATION_SHIFT	4
+#define DA9062AA_RESET_DURATION_MASK	(0x03 << 4)
+#define DA9062AA_TWOWIRE_TO_SHIFT	6
+#define DA9062AA_TWOWIRE_TO_MASK	BIT(6)
+#define DA9062AA_IF_RESET_SHIFT		7
+#define DA9062AA_IF_RESET_MASK		BIT(7)
+
+/* DA9062AA_CONFIG_K = 0x110 */
+#define DA9062AA_GPIO0_PUPD_SHIFT	0
+#define DA9062AA_GPIO0_PUPD_MASK	0x01
+#define DA9062AA_GPIO1_PUPD_SHIFT	1
+#define DA9062AA_GPIO1_PUPD_MASK	BIT(1)
+#define DA9062AA_GPIO2_PUPD_SHIFT	2
+#define DA9062AA_GPIO2_PUPD_MASK	BIT(2)
+#define DA9062AA_GPIO3_PUPD_SHIFT	3
+#define DA9062AA_GPIO3_PUPD_MASK	BIT(3)
+#define DA9062AA_GPIO4_PUPD_SHIFT	4
+#define DA9062AA_GPIO4_PUPD_MASK	BIT(4)
+
+/* DA9062AA_CONFIG_M = 0x112 */
+#define DA9062AA_NSHUTDOWN_PU_SHIFT	1
+#define DA9062AA_NSHUTDOWN_PU_MASK	BIT(1)
+#define DA9062AA_WDG_MODE_SHIFT		3
+#define DA9062AA_WDG_MODE_MASK		BIT(3)
+#define DA9062AA_OSC_FRQ_SHIFT		4
+#define DA9062AA_OSC_FRQ_MASK		(0x0f << 4)
+
+/* DA9062AA_TRIM_CLDR = 0x120 */
+#define DA9062AA_TRIM_CLDR_SHIFT	0
+#define DA9062AA_TRIM_CLDR_MASK		0xff
+
+/* DA9062AA_GP_ID_0 = 0x121 */
+#define DA9062AA_GP_0_SHIFT		0
+#define DA9062AA_GP_0_MASK		0xff
+
+/* DA9062AA_GP_ID_1 = 0x122 */
+#define DA9062AA_GP_1_SHIFT		0
+#define DA9062AA_GP_1_MASK		0xff
+
+/* DA9062AA_GP_ID_2 = 0x123 */
+#define DA9062AA_GP_2_SHIFT		0
+#define DA9062AA_GP_2_MASK		0xff
+
+/* DA9062AA_GP_ID_3 = 0x124 */
+#define DA9062AA_GP_3_SHIFT		0
+#define DA9062AA_GP_3_MASK		0xff
+
+/* DA9062AA_GP_ID_4 = 0x125 */
+#define DA9062AA_GP_4_SHIFT		0
+#define DA9062AA_GP_4_MASK		0xff
+
+/* DA9062AA_GP_ID_5 = 0x126 */
+#define DA9062AA_GP_5_SHIFT		0
+#define DA9062AA_GP_5_MASK		0xff
+
+/* DA9062AA_GP_ID_6 = 0x127 */
+#define DA9062AA_GP_6_SHIFT		0
+#define DA9062AA_GP_6_MASK		0xff
+
+/* DA9062AA_GP_ID_7 = 0x128 */
+#define DA9062AA_GP_7_SHIFT		0
+#define DA9062AA_GP_7_MASK		0xff
+
+/* DA9062AA_GP_ID_8 = 0x129 */
+#define DA9062AA_GP_8_SHIFT		0
+#define DA9062AA_GP_8_MASK		0xff
+
+/* DA9062AA_GP_ID_9 = 0x12A */
+#define DA9062AA_GP_9_SHIFT		0
+#define DA9062AA_GP_9_MASK		0xff
+
+/* DA9062AA_GP_ID_10 = 0x12B */
+#define DA9062AA_GP_10_SHIFT		0
+#define DA9062AA_GP_10_MASK		0xff
+
+/* DA9062AA_GP_ID_11 = 0x12C */
+#define DA9062AA_GP_11_SHIFT		0
+#define DA9062AA_GP_11_MASK		0xff
+
+/* DA9062AA_GP_ID_12 = 0x12D */
+#define DA9062AA_GP_12_SHIFT		0
+#define DA9062AA_GP_12_MASK		0xff
+
+/* DA9062AA_GP_ID_13 = 0x12E */
+#define DA9062AA_GP_13_SHIFT		0
+#define DA9062AA_GP_13_MASK		0xff
+
+/* DA9062AA_GP_ID_14 = 0x12F */
+#define DA9062AA_GP_14_SHIFT		0
+#define DA9062AA_GP_14_MASK		0xff
+
+/* DA9062AA_GP_ID_15 = 0x130 */
+#define DA9062AA_GP_15_SHIFT		0
+#define DA9062AA_GP_15_MASK		0xff
+
+/* DA9062AA_GP_ID_16 = 0x131 */
+#define DA9062AA_GP_16_SHIFT		0
+#define DA9062AA_GP_16_MASK		0xff
+
+/* DA9062AA_GP_ID_17 = 0x132 */
+#define DA9062AA_GP_17_SHIFT		0
+#define DA9062AA_GP_17_MASK		0xff
+
+/* DA9062AA_GP_ID_18 = 0x133 */
+#define DA9062AA_GP_18_SHIFT		0
+#define DA9062AA_GP_18_MASK		0xff
+
+/* DA9062AA_GP_ID_19 = 0x134 */
+#define DA9062AA_GP_19_SHIFT		0
+#define DA9062AA_GP_19_MASK		0xff
+
+/* DA9062AA_DEVICE_ID = 0x181 */
+#define DA9062AA_DEV_ID_SHIFT		0
+#define DA9062AA_DEV_ID_MASK		0xff
+
+/* DA9062AA_VARIANT_ID = 0x182 */
+#define DA9062AA_VRC_SHIFT		0
+#define DA9062AA_VRC_MASK		0x0f
+#define DA9062AA_MRC_SHIFT		4
+#define DA9062AA_MRC_MASK		(0x0f << 4)
+
+/* DA9062AA_CUSTOMER_ID = 0x183 */
+#define DA9062AA_CUST_ID_SHIFT		0
+#define DA9062AA_CUST_ID_MASK		0xff
+
+/* DA9062AA_CONFIG_ID = 0x184 */
+#define DA9062AA_CONFIG_REV_SHIFT	0
+#define DA9062AA_CONFIG_REV_MASK	0xff
+
+#endif /* __DA9062_H__ */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 79f4d82..621af82 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -51,6 +51,7 @@
 	DA9063_IRQ_COMP_1V2,
 	DA9063_IRQ_LDO_LIM,
 	DA9063_IRQ_REG_UVOV,
+	DA9063_IRQ_DVC_RDY,
 	DA9063_IRQ_VDD_MON,
 	DA9063_IRQ_WARN,
 	DA9063_IRQ_GPI0,
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index 8feac78..2b300b4 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -20,12 +20,6 @@
 #ifndef LPC_ICH_H
 #define LPC_ICH_H
 
-/* Watchdog resources */
-#define ICH_RES_IO_TCO		0
-#define ICH_RES_IO_SMI		1
-#define ICH_RES_MEM_OFF		2
-#define ICH_RES_MEM_GCS_PMC	0
-
 /* GPIO resources */
 #define ICH_RES_GPIO	0
 #define ICH_RES_GPE0	1
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
new file mode 100644
index 0000000..095b121
--- /dev/null
+++ b/include/linux/mfd/max77693-common.h
@@ -0,0 +1,49 @@
+/*
+ * Common data shared between Maxim 77693 and 77843 drivers
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_MFD_MAX77693_COMMON_H
+#define __LINUX_MFD_MAX77693_COMMON_H
+
+enum max77693_types {
+	TYPE_MAX77693_UNKNOWN,
+	TYPE_MAX77693,
+	TYPE_MAX77843,
+
+	TYPE_MAX77693_NUM,
+};
+
+/*
+ * Shared also with max77843.
+ */
+struct max77693_dev {
+	struct device *dev;
+	struct i2c_client *i2c;		/* 0xCC , PMIC, Charger, Flash LED */
+	struct i2c_client *i2c_muic;	/* 0x4A , MUIC */
+	struct i2c_client *i2c_haptic;	/* MAX77693: 0x90 , Haptic */
+	struct i2c_client *i2c_chg;	/* MAX77843: 0xD2, Charger */
+
+	enum max77693_types type;
+
+	struct regmap *regmap;
+	struct regmap *regmap_muic;
+	struct regmap *regmap_haptic;	/* Only MAX77693 */
+	struct regmap *regmap_chg;	/* Only MAX77843 */
+
+	struct regmap_irq_chip_data *irq_data_led;
+	struct regmap_irq_chip_data *irq_data_topsys;
+	struct regmap_irq_chip_data *irq_data_chg; /* Only MAX77693 */
+	struct regmap_irq_chip_data *irq_data_muic;
+
+	int irq;
+};
+
+
+#endif /*  __LINUX_MFD_MAX77693_COMMON_H */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 51633ea..3c7a63b 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -310,30 +310,30 @@
 #define INTMASK2_CHGTYP_MASK		(1 << INTMASK2_CHGTYP_SHIFT)
 
 /* MAX77693 MUIC - STATUS1~3 Register */
-#define STATUS1_ADC_SHIFT		(0)
-#define STATUS1_ADCLOW_SHIFT		(5)
-#define STATUS1_ADCERR_SHIFT		(6)
-#define STATUS1_ADC1K_SHIFT		(7)
-#define STATUS1_ADC_MASK		(0x1f << STATUS1_ADC_SHIFT)
-#define STATUS1_ADCLOW_MASK		(0x1 << STATUS1_ADCLOW_SHIFT)
-#define STATUS1_ADCERR_MASK		(0x1 << STATUS1_ADCERR_SHIFT)
-#define STATUS1_ADC1K_MASK		(0x1 << STATUS1_ADC1K_SHIFT)
+#define MAX77693_STATUS1_ADC_SHIFT		0
+#define MAX77693_STATUS1_ADCLOW_SHIFT		5
+#define MAX77693_STATUS1_ADCERR_SHIFT		6
+#define MAX77693_STATUS1_ADC1K_SHIFT		7
+#define MAX77693_STATUS1_ADC_MASK		(0x1f << MAX77693_STATUS1_ADC_SHIFT)
+#define MAX77693_STATUS1_ADCLOW_MASK		BIT(MAX77693_STATUS1_ADCLOW_SHIFT)
+#define MAX77693_STATUS1_ADCERR_MASK		BIT(MAX77693_STATUS1_ADCERR_SHIFT)
+#define MAX77693_STATUS1_ADC1K_MASK		BIT(MAX77693_STATUS1_ADC1K_SHIFT)
 
-#define STATUS2_CHGTYP_SHIFT		(0)
-#define STATUS2_CHGDETRUN_SHIFT		(3)
-#define STATUS2_DCDTMR_SHIFT		(4)
-#define STATUS2_DXOVP_SHIFT		(5)
-#define STATUS2_VBVOLT_SHIFT		(6)
-#define STATUS2_VIDRM_SHIFT		(7)
-#define STATUS2_CHGTYP_MASK		(0x7 << STATUS2_CHGTYP_SHIFT)
-#define STATUS2_CHGDETRUN_MASK		(0x1 << STATUS2_CHGDETRUN_SHIFT)
-#define STATUS2_DCDTMR_MASK		(0x1 << STATUS2_DCDTMR_SHIFT)
-#define STATUS2_DXOVP_MASK		(0x1 << STATUS2_DXOVP_SHIFT)
-#define STATUS2_VBVOLT_MASK		(0x1 << STATUS2_VBVOLT_SHIFT)
-#define STATUS2_VIDRM_MASK		(0x1 << STATUS2_VIDRM_SHIFT)
+#define MAX77693_STATUS2_CHGTYP_SHIFT		0
+#define MAX77693_STATUS2_CHGDETRUN_SHIFT	3
+#define MAX77693_STATUS2_DCDTMR_SHIFT		4
+#define MAX77693_STATUS2_DXOVP_SHIFT		5
+#define MAX77693_STATUS2_VBVOLT_SHIFT		6
+#define MAX77693_STATUS2_VIDRM_SHIFT		7
+#define MAX77693_STATUS2_CHGTYP_MASK		(0x7 << MAX77693_STATUS2_CHGTYP_SHIFT)
+#define MAX77693_STATUS2_CHGDETRUN_MASK		BIT(MAX77693_STATUS2_CHGDETRUN_SHIFT)
+#define MAX77693_STATUS2_DCDTMR_MASK		BIT(MAX77693_STATUS2_DCDTMR_SHIFT)
+#define MAX77693_STATUS2_DXOVP_MASK		BIT(MAX77693_STATUS2_DXOVP_SHIFT)
+#define MAX77693_STATUS2_VBVOLT_MASK		BIT(MAX77693_STATUS2_VBVOLT_SHIFT)
+#define MAX77693_STATUS2_VIDRM_MASK		BIT(MAX77693_STATUS2_VIDRM_SHIFT)
 
-#define STATUS3_OVP_SHIFT		(2)
-#define STATUS3_OVP_MASK		(0x1 << STATUS3_OVP_SHIFT)
+#define MAX77693_STATUS3_OVP_SHIFT		2
+#define MAX77693_STATUS3_OVP_MASK		BIT(MAX77693_STATUS3_OVP_SHIFT)
 
 /* MAX77693 CDETCTRL1~2 register */
 #define CDETCTRL1_CHGDETEN_SHIFT	(0)
@@ -362,38 +362,38 @@
 #define COMN1SW_MASK			(0x7 << COMN1SW_SHIFT)
 #define COMP2SW_MASK			(0x7 << COMP2SW_SHIFT)
 #define COMP_SW_MASK			(COMP2SW_MASK | COMN1SW_MASK)
-#define CONTROL1_SW_USB			((1 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_USB	((1 << COMP2SW_SHIFT) \
 						| (1 << COMN1SW_SHIFT))
-#define CONTROL1_SW_AUDIO		((2 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_AUDIO	((2 << COMP2SW_SHIFT) \
 						| (2 << COMN1SW_SHIFT))
-#define CONTROL1_SW_UART		((3 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_UART	((3 << COMP2SW_SHIFT) \
 						| (3 << COMN1SW_SHIFT))
-#define CONTROL1_SW_OPEN		((0 << COMP2SW_SHIFT) \
+#define MAX77693_CONTROL1_SW_OPEN	((0 << COMP2SW_SHIFT) \
 						| (0 << COMN1SW_SHIFT))
 
-#define CONTROL2_LOWPWR_SHIFT		(0)
-#define CONTROL2_ADCEN_SHIFT		(1)
-#define CONTROL2_CPEN_SHIFT		(2)
-#define CONTROL2_SFOUTASRT_SHIFT	(3)
-#define CONTROL2_SFOUTORD_SHIFT		(4)
-#define CONTROL2_ACCDET_SHIFT		(5)
-#define CONTROL2_USBCPINT_SHIFT		(6)
-#define CONTROL2_RCPS_SHIFT		(7)
-#define CONTROL2_LOWPWR_MASK		(0x1 << CONTROL2_LOWPWR_SHIFT)
-#define CONTROL2_ADCEN_MASK		(0x1 << CONTROL2_ADCEN_SHIFT)
-#define CONTROL2_CPEN_MASK		(0x1 << CONTROL2_CPEN_SHIFT)
-#define CONTROL2_SFOUTASRT_MASK		(0x1 << CONTROL2_SFOUTASRT_SHIFT)
-#define CONTROL2_SFOUTORD_MASK		(0x1 << CONTROL2_SFOUTORD_SHIFT)
-#define CONTROL2_ACCDET_MASK		(0x1 << CONTROL2_ACCDET_SHIFT)
-#define CONTROL2_USBCPINT_MASK		(0x1 << CONTROL2_USBCPINT_SHIFT)
-#define CONTROL2_RCPS_MASK		(0x1 << CONTROL2_RCPS_SHIFT)
+#define MAX77693_CONTROL2_LOWPWR_SHIFT		0
+#define MAX77693_CONTROL2_ADCEN_SHIFT		1
+#define MAX77693_CONTROL2_CPEN_SHIFT		2
+#define MAX77693_CONTROL2_SFOUTASRT_SHIFT	3
+#define MAX77693_CONTROL2_SFOUTORD_SHIFT	4
+#define MAX77693_CONTROL2_ACCDET_SHIFT		5
+#define MAX77693_CONTROL2_USBCPINT_SHIFT	6
+#define MAX77693_CONTROL2_RCPS_SHIFT		7
+#define MAX77693_CONTROL2_LOWPWR_MASK		BIT(MAX77693_CONTROL2_LOWPWR_SHIFT)
+#define MAX77693_CONTROL2_ADCEN_MASK		BIT(MAX77693_CONTROL2_ADCEN_SHIFT)
+#define MAX77693_CONTROL2_CPEN_MASK		BIT(MAX77693_CONTROL2_CPEN_SHIFT)
+#define MAX77693_CONTROL2_SFOUTASRT_MASK	BIT(MAX77693_CONTROL2_SFOUTASRT_SHIFT)
+#define MAX77693_CONTROL2_SFOUTORD_MASK		BIT(MAX77693_CONTROL2_SFOUTORD_SHIFT)
+#define MAX77693_CONTROL2_ACCDET_MASK		BIT(MAX77693_CONTROL2_ACCDET_SHIFT)
+#define MAX77693_CONTROL2_USBCPINT_MASK		BIT(MAX77693_CONTROL2_USBCPINT_SHIFT)
+#define MAX77693_CONTROL2_RCPS_MASK		BIT(MAX77693_CONTROL2_RCPS_SHIFT)
 
-#define CONTROL3_JIGSET_SHIFT		(0)
-#define CONTROL3_BTLDSET_SHIFT		(2)
-#define CONTROL3_ADCDBSET_SHIFT		(4)
-#define CONTROL3_JIGSET_MASK		(0x3 << CONTROL3_JIGSET_SHIFT)
-#define CONTROL3_BTLDSET_MASK		(0x3 << CONTROL3_BTLDSET_SHIFT)
-#define CONTROL3_ADCDBSET_MASK		(0x3 << CONTROL3_ADCDBSET_SHIFT)
+#define MAX77693_CONTROL3_JIGSET_SHIFT		0
+#define MAX77693_CONTROL3_BTLDSET_SHIFT		2
+#define MAX77693_CONTROL3_ADCDBSET_SHIFT	4
+#define MAX77693_CONTROL3_JIGSET_MASK		(0x3 << MAX77693_CONTROL3_JIGSET_SHIFT)
+#define MAX77693_CONTROL3_BTLDSET_MASK		(0x3 << MAX77693_CONTROL3_BTLDSET_SHIFT)
+#define MAX77693_CONTROL3_ADCDBSET_MASK		(0x3 << MAX77693_CONTROL3_ADCDBSET_SHIFT)
 
 /* Slave addr = 0x90: Haptic */
 enum max77693_haptic_reg {
@@ -529,36 +529,4 @@
 	MAX77693_MUIC_IRQ_NR,
 };
 
-struct max77693_dev {
-	struct device *dev;
-	struct i2c_client *i2c;		/* 0xCC , PMIC, Charger, Flash LED */
-	struct i2c_client *muic;	/* 0x4A , MUIC */
-	struct i2c_client *haptic;	/* 0x90 , Haptic */
-
-	int type;
-
-	struct regmap *regmap;
-	struct regmap *regmap_muic;
-	struct regmap *regmap_haptic;
-
-	struct regmap_irq_chip_data *irq_data_led;
-	struct regmap_irq_chip_data *irq_data_topsys;
-	struct regmap_irq_chip_data *irq_data_charger;
-	struct regmap_irq_chip_data *irq_data_muic;
-
-	int irq;
-	int irq_gpio;
-	struct mutex irqlock;
-	int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
-	int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
-};
-
-enum max77693_types {
-	TYPE_MAX77693,
-};
-
-extern int max77693_irq_init(struct max77693_dev *max77686);
-extern void max77693_irq_exit(struct max77693_dev *max77686);
-extern int max77693_irq_resume(struct max77693_dev *max77686);
-
 #endif /*  __LINUX_MFD_MAX77693_PRIV_H */
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index 7178ace..c19303b 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -318,62 +318,62 @@
 	MAX77843_INTSRCMASK_SYS_MASK | MAX77843_INTSRCMASK_CHGR_MASK)
 
 /* MAX77843 STATUS register*/
-#define STATUS1_ADC_SHIFT			0
-#define STATUS1_ADCERROR_SHIFT			6
-#define STATUS1_ADC1K_SHIFT			7
-#define STATUS2_CHGTYP_SHIFT			0
-#define STATUS2_CHGDETRUN_SHIFT			3
-#define STATUS2_DCDTMR_SHIFT			4
-#define STATUS2_DXOVP_SHIFT			5
-#define STATUS2_VBVOLT_SHIFT			6
-#define STATUS3_VBADC_SHIFT			0
-#define STATUS3_VDNMON_SHIFT			4
-#define STATUS3_DNRES_SHIFT			5
-#define STATUS3_MPNACK_SHIFT			6
+#define MAX77843_MUIC_STATUS1_ADC_SHIFT		0
+#define MAX77843_MUIC_STATUS1_ADCERROR_SHIFT	6
+#define MAX77843_MUIC_STATUS1_ADC1K_SHIFT	7
+#define MAX77843_MUIC_STATUS2_CHGTYP_SHIFT	0
+#define MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT	3
+#define MAX77843_MUIC_STATUS2_DCDTMR_SHIFT	4
+#define MAX77843_MUIC_STATUS2_DXOVP_SHIFT	5
+#define MAX77843_MUIC_STATUS2_VBVOLT_SHIFT	6
+#define MAX77843_MUIC_STATUS3_VBADC_SHIFT	0
+#define MAX77843_MUIC_STATUS3_VDNMON_SHIFT	4
+#define MAX77843_MUIC_STATUS3_DNRES_SHIFT	5
+#define MAX77843_MUIC_STATUS3_MPNACK_SHIFT	6
 
-#define MAX77843_MUIC_STATUS1_ADC_MASK		(0x1f << STATUS1_ADC_SHIFT)
-#define MAX77843_MUIC_STATUS1_ADCERROR_MASK	BIT(STATUS1_ADCERROR_SHIFT)
-#define MAX77843_MUIC_STATUS1_ADC1K_MASK	BIT(STATUS1_ADC1K_SHIFT)
-#define MAX77843_MUIC_STATUS2_CHGTYP_MASK	(0x7 << STATUS2_CHGTYP_SHIFT)
-#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK	BIT(STATUS2_CHGDETRUN_SHIFT)
-#define MAX77843_MUIC_STATUS2_DCDTMR_MASK	BIT(STATUS2_DCDTMR_SHIFT)
-#define MAX77843_MUIC_STATUS2_DXOVP_MASK	BIT(STATUS2_DXOVP_SHIFT)
-#define MAX77843_MUIC_STATUS2_VBVOLT_MASK	BIT(STATUS2_VBVOLT_SHIFT)
-#define MAX77843_MUIC_STATUS3_VBADC_MASK	(0xf << STATUS3_VBADC_SHIFT)
-#define MAX77843_MUIC_STATUS3_VDNMON_MASK	BIT(STATUS3_VDNMON_SHIFT)
-#define MAX77843_MUIC_STATUS3_DNRES_MASK	BIT(STATUS3_DNRES_SHIFT)
-#define MAX77843_MUIC_STATUS3_MPNACK_MASK	BIT(STATUS3_MPNACK_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADC_MASK		(0x1f << MAX77843_MUIC_STATUS1_ADC_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADCERROR_MASK	BIT(MAX77843_MUIC_STATUS1_ADCERROR_SHIFT)
+#define MAX77843_MUIC_STATUS1_ADC1K_MASK	BIT(MAX77843_MUIC_STATUS1_ADC1K_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGTYP_MASK	(0x7 << MAX77843_MUIC_STATUS2_CHGTYP_SHIFT)
+#define MAX77843_MUIC_STATUS2_CHGDETRUN_MASK	BIT(MAX77843_MUIC_STATUS2_CHGDETRUN_SHIFT)
+#define MAX77843_MUIC_STATUS2_DCDTMR_MASK	BIT(MAX77843_MUIC_STATUS2_DCDTMR_SHIFT)
+#define MAX77843_MUIC_STATUS2_DXOVP_MASK	BIT(MAX77843_MUIC_STATUS2_DXOVP_SHIFT)
+#define MAX77843_MUIC_STATUS2_VBVOLT_MASK	BIT(MAX77843_MUIC_STATUS2_VBVOLT_SHIFT)
+#define MAX77843_MUIC_STATUS3_VBADC_MASK	(0xf << MAX77843_MUIC_STATUS3_VBADC_SHIFT)
+#define MAX77843_MUIC_STATUS3_VDNMON_MASK	BIT(MAX77843_MUIC_STATUS3_VDNMON_SHIFT)
+#define MAX77843_MUIC_STATUS3_DNRES_MASK	BIT(MAX77843_MUIC_STATUS3_DNRES_SHIFT)
+#define MAX77843_MUIC_STATUS3_MPNACK_MASK	BIT(MAX77843_MUIC_STATUS3_MPNACK_SHIFT)
 
 /* MAX77843 CONTROL register */
-#define CONTROL1_COMP1SW_SHIFT			0
-#define CONTROL1_COMP2SW_SHIFT			3
-#define CONTROL1_IDBEN_SHIFT			7
-#define CONTROL2_LOWPWR_SHIFT			0
-#define CONTROL2_ADCEN_SHIFT			1
-#define CONTROL2_CPEN_SHIFT			2
-#define CONTROL2_ACC_DET_SHIFT			5
-#define CONTROL2_USBCPINT_SHIFT			6
-#define CONTROL2_RCPS_SHIFT			7
-#define CONTROL3_JIGSET_SHIFT			0
-#define CONTROL4_ADCDBSET_SHIFT			0
-#define CONTROL4_USBAUTO_SHIFT			4
-#define CONTROL4_FCTAUTO_SHIFT			5
-#define CONTROL4_ADCMODE_SHIFT			6
+#define MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT	0
+#define MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT	3
+#define MAX77843_MUIC_CONTROL1_IDBEN_SHIFT	7
+#define MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT	0
+#define MAX77843_MUIC_CONTROL2_ADCEN_SHIFT	1
+#define MAX77843_MUIC_CONTROL2_CPEN_SHIFT	2
+#define MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT	5
+#define MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT	6
+#define MAX77843_MUIC_CONTROL2_RCPS_SHIFT	7
+#define MAX77843_MUIC_CONTROL3_JIGSET_SHIFT	0
+#define MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT	0
+#define MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT	4
+#define MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT	5
+#define MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT	6
 
-#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK	(0x7 << CONTROL1_COMP1SW_SHIFT)
-#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK	(0x7 << CONTROL1_COMP2SW_SHIFT)
-#define MAX77843_MUIC_CONTROL1_IDBEN_MASK	BIT(CONTROL1_IDBEN_SHIFT)
-#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK	BIT(CONTROL2_LOWPWR_SHIFT)
-#define MAX77843_MUIC_CONTROL2_ADCEN_MASK	BIT(CONTROL2_ADCEN_SHIFT)
-#define MAX77843_MUIC_CONTROL2_CPEN_MASK	BIT(CONTROL2_CPEN_SHIFT)
-#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK	BIT(CONTROL2_ACC_DET_SHIFT)
-#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK	BIT(CONTROL2_USBCPINT_SHIFT)
-#define MAX77843_MUIC_CONTROL2_RCPS_MASK	BIT(CONTROL2_RCPS_SHIFT)
-#define MAX77843_MUIC_CONTROL3_JIGSET_MASK	(0x3 << CONTROL3_JIGSET_SHIFT)
-#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK	(0x3 << CONTROL4_ADCDBSET_SHIFT)
-#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK	BIT(CONTROL4_USBAUTO_SHIFT)
-#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK	BIT(CONTROL4_FCTAUTO_SHIFT)
-#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK	(0x3 << CONTROL4_ADCMODE_SHIFT)
+#define MAX77843_MUIC_CONTROL1_COMP1SW_MASK	(0x7 << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_COMP2SW_MASK	(0x7 << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT)
+#define MAX77843_MUIC_CONTROL1_IDBEN_MASK	BIT(MAX77843_MUIC_CONTROL1_IDBEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_LOWPWR_MASK	BIT(MAX77843_MUIC_CONTROL2_LOWPWR_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ADCEN_MASK	BIT(MAX77843_MUIC_CONTROL2_ADCEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_CPEN_MASK	BIT(MAX77843_MUIC_CONTROL2_CPEN_SHIFT)
+#define MAX77843_MUIC_CONTROL2_ACC_DET_MASK	BIT(MAX77843_MUIC_CONTROL2_ACC_DET_SHIFT)
+#define MAX77843_MUIC_CONTROL2_USBCPINT_MASK	BIT(MAX77843_MUIC_CONTROL2_USBCPINT_SHIFT)
+#define MAX77843_MUIC_CONTROL2_RCPS_MASK	BIT(MAX77843_MUIC_CONTROL2_RCPS_SHIFT)
+#define MAX77843_MUIC_CONTROL3_JIGSET_MASK	(0x3 << MAX77843_MUIC_CONTROL3_JIGSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCDBSET_MASK	(0x3 << MAX77843_MUIC_CONTROL4_ADCDBSET_SHIFT)
+#define MAX77843_MUIC_CONTROL4_USBAUTO_MASK	BIT(MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_FCTAUTO_MASK	BIT(MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT)
+#define MAX77843_MUIC_CONTROL4_ADCMODE_MASK	(0x3 << MAX77843_MUIC_CONTROL4_ADCMODE_SHIFT)
 
 /* MAX77843 switch port */
 #define COM_OPEN				0
@@ -383,38 +383,38 @@
 #define COM_AUX_USB				4
 #define COM_AUX_UART				5
 
-#define CONTROL1_COM_SW \
+#define MAX77843_MUIC_CONTROL1_COM_SW \
 	((MAX77843_MUIC_CONTROL1_COMP1SW_MASK | \
 	 MAX77843_MUIC_CONTROL1_COMP2SW_MASK))
 
-#define CONTROL1_SW_OPEN \
-	((COM_OPEN << CONTROL1_COMP1SW_SHIFT | \
-	 COM_OPEN << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_USB \
-	((COM_USB << CONTROL1_COMP1SW_SHIFT | \
-	 COM_USB << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_AUDIO \
-	((COM_AUDIO << CONTROL1_COMP1SW_SHIFT | \
-	 COM_AUDIO << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_UART \
-	((COM_UART << CONTROL1_COMP1SW_SHIFT | \
-	 COM_UART << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_AUX_USB \
-	((COM_AUX_USB << CONTROL1_COMP1SW_SHIFT | \
-	 COM_AUX_USB << CONTROL1_COMP2SW_SHIFT))
-#define CONTROL1_SW_AUX_UART \
-	((COM_AUX_UART << CONTROL1_COMP1SW_SHIFT | \
-	 COM_AUX_UART << CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_OPEN \
+	((COM_OPEN << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+	 COM_OPEN << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_USB \
+	((COM_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+	 COM_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_AUDIO \
+	((COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+	 COM_AUDIO << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_UART \
+	((COM_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+	 COM_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_AUX_USB \
+	((COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+	 COM_AUX_USB << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
+#define MAX77843_MUIC_CONTROL1_SW_AUX_UART \
+	((COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP1SW_SHIFT | \
+	 COM_AUX_UART << MAX77843_MUIC_CONTROL1_COMP2SW_SHIFT))
 
 #define MAX77843_DISABLE			0
 #define MAX77843_ENABLE				1
 
 #define CONTROL4_AUTO_DISABLE \
-	((MAX77843_DISABLE << CONTROL4_USBAUTO_SHIFT) | \
-	(MAX77843_DISABLE << CONTROL4_FCTAUTO_SHIFT))
+	((MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
+	(MAX77843_DISABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
 #define CONTROL4_AUTO_ENABLE \
-	((MAX77843_ENABLE << CONTROL4_USBAUTO_SHIFT) | \
-	(MAX77843_ENABLE << CONTROL4_FCTAUTO_SHIFT))
+	((MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_USBAUTO_SHIFT) | \
+	(MAX77843_ENABLE << MAX77843_MUIC_CONTROL4_FCTAUTO_SHIFT))
 
 /* MAX77843 SAFEOUT LDO Control register */
 #define SAFEOUTCTRL_SAFEOUT1_SHIFT		0
@@ -431,24 +431,4 @@
 #define MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK \
 		(0x3 << SAFEOUTCTRL_SAFEOUT2_SHIFT)
 
-struct max77843 {
-	struct device *dev;
-
-	struct i2c_client *i2c;
-	struct i2c_client *i2c_chg;
-	struct i2c_client *i2c_fuel;
-	struct i2c_client *i2c_muic;
-
-	struct regmap *regmap;
-	struct regmap *regmap_chg;
-	struct regmap *regmap_fuel;
-	struct regmap *regmap_muic;
-
-	struct regmap_irq_chip_data *irq_data;
-	struct regmap_irq_chip_data *irq_data_chg;
-	struct regmap_irq_chip_data *irq_data_fuel;
-	struct regmap_irq_chip_data *irq_data_muic;
-
-	int irq;
-};
 #endif /* __MAX77843_H__ */
diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h
index cf5265b..45b8e8a 100644
--- a/include/linux/mfd/mt6397/core.h
+++ b/include/linux/mfd/mt6397/core.h
@@ -57,6 +57,7 @@
 	int irq;
 	struct irq_domain *irq_domain;
 	struct mutex irqlock;
+	u16 wake_mask[2];
 	u16 irq_masks_cur[2];
 	u16 irq_masks_cache[2];
 };
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index bb270bd..13e1d96 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -21,6 +21,7 @@
 #include <linux/regmap.h>
 #include <linux/regulator/driver.h>
 #include <linux/extcon.h>
+#include <linux/of_gpio.h>
 #include <linux/usb/phy_companion.h>
 
 #define PALMAS_NUM_CLIENTS		3
@@ -551,10 +552,16 @@
 	int vbus_otg_irq;
 	int vbus_irq;
 
+	int gpio_id_irq;
+	struct gpio_desc *id_gpiod;
+	unsigned long sw_debounce_jiffies;
+	struct delayed_work wq_detectid;
+
 	enum palmas_usb_state linkstat;
 	int wakeup;
 	bool enable_vbus_detection;
 	bool enable_id_detection;
+	bool enable_gpio_id_detection;
 };
 
 #define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator)
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index d16f4c8..558a485 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -435,4 +435,12 @@
 #define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS			(0x1 << 1)
 #define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK			(0x1 << 1)
 
+/* For imx6ul iomux gpr register field define */
+#define IMX6UL_GPR1_ENET1_CLK_DIR		(0x1 << 17)
+#define IMX6UL_GPR1_ENET2_CLK_DIR		(0x1 << 18)
+#define IMX6UL_GPR1_ENET1_CLK_OUTPUT		(0x1 << 17)
+#define IMX6UL_GPR1_ENET2_CLK_OUTPUT		(0x1 << 18)
+#define IMX6UL_GPR1_ENET_CLK_DIR		(0x3 << 17)
+#define IMX6UL_GPR1_ENET_CLK_OUTPUT		(0x3 << 17)
+
 #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 819077c..81f6e42 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -67,7 +67,7 @@
 };
 
 extern int misc_register(struct miscdevice *misc);
-extern int misc_deregister(struct miscdevice *misc);
+extern void misc_deregister(struct miscdevice *misc);
 
 #define MODULE_ALIAS_MISCDEV(minor)				\
 	MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR)	\
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index e7ecc12..09cebe5 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -88,7 +88,8 @@
 
 enum {
 	MLX4_CQE_L2_TUNNEL_IPOK		= 1 << 31,
-	MLX4_CQE_VLAN_PRESENT_MASK	= 1 << 29,
+	MLX4_CQE_CVLAN_PRESENT_MASK	= 1 << 29,
+	MLX4_CQE_SVLAN_PRESENT_MASK	= 1 << 30,
 	MLX4_CQE_L2_TUNNEL		= 1 << 27,
 	MLX4_CQE_L2_TUNNEL_CSUM		= 1 << 26,
 	MLX4_CQE_L2_TUNNEL_IPV4		= 1 << 25,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index fd13c1c..bcbf8c7 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -211,6 +211,8 @@
 	MLX4_DEV_CAP_FLAG2_ETS_CFG		= 1LL <<  26,
 	MLX4_DEV_CAP_FLAG2_PORT_BEACON		= 1LL <<  27,
 	MLX4_DEV_CAP_FLAG2_IGNORE_FCS		= 1LL <<  28,
+	MLX4_DEV_CAP_FLAG2_PHV_EN		= 1LL <<  29,
+	MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN	= 1LL <<  30,
 };
 
 enum {
@@ -581,6 +583,7 @@
 	u64			phys_port_id[MLX4_MAX_PORTS + 1];
 	int			tunnel_offload_mode;
 	u8			rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
+	u8			phv_bit[MLX4_MAX_PORTS + 1];
 	u8			alloc_res_qp_mask;
 	u32			dmfs_high_rate_qpn_base;
 	u32			dmfs_high_rate_qpn_range;
@@ -1332,6 +1335,8 @@
 int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
 			    u8 ignore_fcs_value);
 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 6fed539..de45a51 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -272,7 +272,8 @@
 	MLX4_WQE_CTRL_SOLICITED		= 1 << 1,
 	MLX4_WQE_CTRL_IP_CSUM		= 1 << 4,
 	MLX4_WQE_CTRL_TCP_UDP_CSUM	= 1 << 5,
-	MLX4_WQE_CTRL_INS_VLAN		= 1 << 6,
+	MLX4_WQE_CTRL_INS_CVLAN		= 1 << 6,
+	MLX4_WQE_CTRL_INS_SVLAN		= 1 << 7,
 	MLX4_WQE_CTRL_STRONG_ORDER	= 1 << 7,
 	MLX4_WQE_CTRL_FORCE_LOOPBACK	= 1 << 0,
 };
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index b943cd9..250b1ff 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1182,6 +1182,16 @@
 	MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR	= 0x40,
 };
 
+enum {
+	MLX5_IEEE_802_3_COUNTERS_GROUP	      = 0x0,
+	MLX5_RFC_2863_COUNTERS_GROUP	      = 0x1,
+	MLX5_RFC_2819_COUNTERS_GROUP	      = 0x2,
+	MLX5_RFC_3635_COUNTERS_GROUP	      = 0x3,
+	MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
+	MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
+	MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11
+};
+
 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
 {
 	if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5722d88..8b6d6f2 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -103,6 +103,8 @@
 	MLX5_REG_PMTU		 = 0x5003,
 	MLX5_REG_PTYS		 = 0x5004,
 	MLX5_REG_PAOS		 = 0x5006,
+	MLX5_REG_PFCC            = 0x5007,
+	MLX5_REG_PPCNT		 = 0x5008,
 	MLX5_REG_PMAOS		 = 0x5012,
 	MLX5_REG_PUDE		 = 0x5009,
 	MLX5_REG_PMPE		 = 0x5010,
@@ -151,8 +153,8 @@
 };
 
 enum mlx5_port_status {
-	MLX5_PORT_UP        = 1 << 1,
-	MLX5_PORT_DOWN      = 1 << 2,
+	MLX5_PORT_UP        = 1,
+	MLX5_PORT_DOWN      = 2,
 };
 
 struct mlx5_uuar_info {
@@ -380,7 +382,7 @@
 	u32			index;
 	struct list_head	bf_list;
 	unsigned		free_bf_bmap;
-	void __iomem	       *wc_map;
+	void __iomem	       *bf_map;
 	void __iomem	       *map;
 };
 
@@ -435,6 +437,8 @@
 	struct mlx5_uuar_info	uuari;
 	MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
 
+	struct io_mapping	*bf_mapping;
+
 	/* pages stuff */
 	struct workqueue_struct *pg_wq;
 	struct rb_root		page_root;
@@ -463,6 +467,10 @@
 	/* end: mr staff */
 
 	/* start: alloc staff */
+	/* protect buffer alocation according to numa node */
+	struct mutex            alloc_mutex;
+	int                     numa_node;
+
 	struct mutex            pgdir_mutex;
 	struct list_head        pgdir_list;
 	/* end: alloc staff */
@@ -672,6 +680,8 @@
 void  __init mlx5_health_init(void);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+			struct mlx5_buf *buf, int node);
 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -752,9 +762,10 @@
 			       u8 local_port);
 int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
 			int proto_mask);
-int mlx5_set_port_status(struct mlx5_core_dev *dev,
-			 enum mlx5_port_status status);
-int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+			       enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+				 enum mlx5_port_status *status);
 
 int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
 void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
@@ -764,6 +775,10 @@
 int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
 			      u8 *vl_hw_cap, u8 local_port);
 
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+			  u32 *rx_pause, u32 *tx_pause);
+
 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
@@ -773,6 +788,8 @@
 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
+		       int node);
 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
 
 const char *mlx5_command_str(int command);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 6d2f6fe..dd20974 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1936,9 +1936,9 @@
 };
 
 enum {
-	MLX5_TIRC_RX_HASH_FN_HASH_NONE           = 0x0,
-	MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8  = 0x1,
-	MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ       = 0x2,
+	MLX5_RX_HASH_FN_NONE           = 0x0,
+	MLX5_RX_HASH_FN_INVERTED_XOR8  = 0x1,
+	MLX5_RX_HASH_FN_TOEPLITZ       = 0x2,
 };
 
 enum {
@@ -4050,6 +4050,13 @@
 	struct mlx5_ifc_tisc_bits ctx;
 };
 
+struct mlx5_ifc_modify_tir_bitmask_bits {
+	u8	   reserved[0x20];
+
+	u8         reserved1[0x1f];
+	u8         lro[0x1];
+};
+
 struct mlx5_ifc_modify_tir_out_bits {
 	u8         status[0x8];
 	u8         reserved_0[0x18];
@@ -4071,7 +4078,7 @@
 
 	u8         reserved_3[0x20];
 
-	u8         modify_bitmask[0x40];
+	struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
 
 	u8         reserved_4[0x40];
 
@@ -4116,6 +4123,13 @@
 	u8         reserved_1[0x40];
 };
 
+struct mlx5_ifc_rqt_bitmask_bits {
+	u8	   reserved[0x20];
+
+	u8         reserved1[0x1f];
+	u8         rqn_list[0x1];
+};
+
 struct mlx5_ifc_modify_rqt_in_bits {
 	u8         opcode[0x10];
 	u8         reserved_0[0x10];
@@ -4128,7 +4142,7 @@
 
 	u8         reserved_3[0x20];
 
-	u8         modify_bitmask[0x40];
+	struct mlx5_ifc_rqt_bitmask_bits bitmask;
 
 	u8         reserved_4[0x40];
 
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 34f25b7..688997a 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -253,7 +253,7 @@
 
 	__u32 		prod_id_hash[4];
 
-	/* not matched against in kernelspace*/
+	/* not matched against in kernelspace */
 	const char *	prod_id[4];
 
 	/* not matched against */
diff --git a/include/linux/mpls_iptunnel.h b/include/linux/mpls_iptunnel.h
new file mode 100644
index 0000000..ef29eb2
--- /dev/null
+++ b/include/linux/mpls_iptunnel.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MPLS_IPTUNNEL_H
+#define _LINUX_MPLS_IPTUNNEL_H
+
+#include <uapi/linux/mpls_iptunnel.h>
+
+#endif  /* _LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8ac4a68..ad939d0 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -14,38 +14,85 @@
 /* Helper functions */
 struct irq_data;
 struct msi_desc;
+struct pci_dev;
+struct platform_msi_priv_data;
 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
 
-struct msi_desc {
-	struct {
-		__u8	is_msix	: 1;
-		__u8	multiple: 3;	/* log2 num of messages allocated */
-		__u8	multi_cap : 3;	/* log2 num of messages supported */
-		__u8	maskbit	: 1;	/* mask-pending bit supported ? */
-		__u8	is_64	: 1;	/* Address size: 0=32bit 1=64bit */
-		__u16	entry_nr;	/* specific enabled entry */
-		unsigned default_irq;	/* default pre-assigned irq */
-	} msi_attrib;
+typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
+				    struct msi_msg *msg);
 
-	u32 masked;			/* mask bits */
-	unsigned int irq;
-	unsigned int nvec_used;		/* number of messages */
-	struct list_head list;
+/**
+ * platform_msi_desc - Platform device specific msi descriptor data
+ * @msi_priv_data:	Pointer to platform private data
+ * @msi_index:		The index of the MSI descriptor for multi MSI
+ */
+struct platform_msi_desc {
+	struct platform_msi_priv_data	*msi_priv_data;
+	u16				msi_index;
+};
+
+/**
+ * struct msi_desc - Descriptor structure for MSI based interrupts
+ * @list:	List head for management
+ * @irq:	The base interrupt number
+ * @nvec_used:	The number of vectors used
+ * @dev:	Pointer to the device which uses this descriptor
+ * @msg:	The last set MSI message cached for reuse
+ *
+ * @masked:	[PCI MSI/X] Mask bits
+ * @is_msix:	[PCI MSI/X] True if MSI-X
+ * @multiple:	[PCI MSI/X] log2 num of messages allocated
+ * @multi_cap:	[PCI MSI/X] log2 num of messages supported
+ * @maskbit:	[PCI MSI/X] Mask-Pending bit supported?
+ * @is_64:	[PCI MSI/X] Address size: 0=32bit 1=64bit
+ * @entry_nr:	[PCI MSI/X] Entry which is described by this descriptor
+ * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @mask_pos:	[PCI MSI]   Mask register position
+ * @mask_base:	[PCI MSI-X] Mask register base address
+ * @platform:	[platform]  Platform device specific msi descriptor data
+ */
+struct msi_desc {
+	/* Shared device/bus type independent data */
+	struct list_head		list;
+	unsigned int			irq;
+	unsigned int			nvec_used;
+	struct device			*dev;
+	struct msi_msg			msg;
 
 	union {
-		void __iomem *mask_base;
-		u8 mask_pos;
-	};
-	struct pci_dev *dev;
+		/* PCI MSI/X specific data */
+		struct {
+			u32 masked;
+			struct {
+				__u8	is_msix		: 1;
+				__u8	multiple	: 3;
+				__u8	multi_cap	: 3;
+				__u8	maskbit		: 1;
+				__u8	is_64		: 1;
+				__u16	entry_nr;
+				unsigned default_irq;
+			} msi_attrib;
+			union {
+				u8	mask_pos;
+				void __iomem *mask_base;
+			};
+		};
 
-	/* Last set MSI message */
-	struct msi_msg msg;
+		/*
+		 * Non PCI variants add their data structure here. New
+		 * entries need to use a named structure. We want
+		 * proper name spaces for this. The PCI part is
+		 * anonymous for now as it would require an immediate
+		 * tree wide cleanup.
+		 */
+		struct platform_msi_desc platform;
+	};
 };
 
 /* Helpers to hide struct msi_desc implementation details */
-#define msi_desc_to_dev(desc)		(&(desc)->dev.dev)
-#define dev_to_msi_list(dev)		(&to_pci_dev((dev))->msi_list)
+#define msi_desc_to_dev(desc)		((desc)->dev)
+#define dev_to_msi_list(dev)		(&(dev)->msi_list)
 #define first_msi_entry(dev)		\
 	list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
 #define for_each_msi_entry(desc, dev)	\
@@ -56,12 +103,17 @@
 #define for_each_pci_msi_entry(desc, pdev)	\
 	for_each_msi_entry((desc), &(pdev)->dev)
 
-static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
+void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
+#else /* CONFIG_PCI_MSI */
+static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
 {
-	return desc->dev;
+	return NULL;
 }
 #endif /* CONFIG_PCI_MSI */
 
+struct msi_desc *alloc_msi_entry(struct device *dev);
+void free_msi_entry(struct msi_desc *entry);
 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
@@ -108,9 +160,6 @@
 	struct device *dev;
 	struct device_node *of_node;
 	struct list_head list;
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
-	struct irq_domain *domain;
-#endif
 
 	int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
 			 struct msi_desc *desc);
@@ -221,6 +270,12 @@
 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
 
+struct irq_domain *platform_msi_create_irq_domain(struct device_node *np,
+						  struct msi_domain_info *info,
+						  struct irq_domain *parent);
+int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
+				   irq_write_msi_msg_t write_msi_msg);
+void platform_msi_domain_free_irqs(struct device *dev);
 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
 
 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
diff --git a/include/linux/net.h b/include/linux/net.h
index 04aa068..049d4b0 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -239,8 +239,16 @@
 	net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
 #define net_info_ratelimited(fmt, ...)				\
 	net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
+#if defined(DEBUG)
 #define net_dbg_ratelimited(fmt, ...)				\
 	net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
+#else
+#define net_dbg_ratelimited(fmt, ...)				\
+	do {							\
+		if (0)						\
+			no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
+	} while (0)
+#endif
 
 bool __net_get_random_once(void *buf, int nbytes, bool *done,
 			   struct static_key *done_key);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e20979d..88a0069 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -766,6 +766,13 @@
 	unsigned char id_len;
 };
 
+static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
+					    struct netdev_phys_item_id *b)
+{
+	return a->id_len == b->id_len &&
+	       memcmp(a->id, b->id, a->id_len) == 0;
+}
+
 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
 				       struct sk_buff *skb);
 
@@ -1041,6 +1048,12 @@
  *	TX queue.
  * int (*ndo_get_iflink)(const struct net_device *dev);
  *	Called to get the iflink value of this device.
+ * void (*ndo_change_proto_down)(struct net_device *dev,
+ *				  bool proto_down);
+ *	This function is used to pass protocol port error state information
+ *	to the switch driver. The switch driver can react to the proto_down
+ *      by doing a phys down on the associated switch port.
+ *
  */
 struct net_device_ops {
 	int			(*ndo_init)(struct net_device *dev);
@@ -1211,6 +1224,8 @@
 						      int queue_index,
 						      u32 maxrate);
 	int			(*ndo_get_iflink)(const struct net_device *dev);
+	int			(*ndo_change_proto_down)(struct net_device *dev,
+							 bool proto_down);
 };
 
 /**
@@ -1225,13 +1240,8 @@
  *
  * @IFF_802_1Q_VLAN: 802.1Q VLAN device
  * @IFF_EBRIDGE: Ethernet bridging device
- * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
- * @IFF_MASTER_8023AD: bonding master, 802.3ad
- * @IFF_MASTER_ALB: bonding master, balance-alb
  * @IFF_BONDING: bonding master or slave
- * @IFF_SLAVE_NEEDARP: need ARPs for validation
  * @IFF_ISATAP: ISATAP interface (RFC4214)
- * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
  * @IFF_WAN_HDLC: WAN HDLC device
  * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
  *	release skb->dst
@@ -1247,44 +1257,40 @@
  * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
  *	change when it's running
  * @IFF_MACVLAN: Macvlan device
+ * @IFF_VRF_MASTER: device is a VRF master
+ * @IFF_NO_QUEUE: device can run without qdisc attached
+ * @IFF_OPENVSWITCH: device is a Open vSwitch master
  */
 enum netdev_priv_flags {
 	IFF_802_1Q_VLAN			= 1<<0,
 	IFF_EBRIDGE			= 1<<1,
-	IFF_SLAVE_INACTIVE		= 1<<2,
-	IFF_MASTER_8023AD		= 1<<3,
-	IFF_MASTER_ALB			= 1<<4,
-	IFF_BONDING			= 1<<5,
-	IFF_SLAVE_NEEDARP		= 1<<6,
-	IFF_ISATAP			= 1<<7,
-	IFF_MASTER_ARPMON		= 1<<8,
-	IFF_WAN_HDLC			= 1<<9,
-	IFF_XMIT_DST_RELEASE		= 1<<10,
-	IFF_DONT_BRIDGE			= 1<<11,
-	IFF_DISABLE_NETPOLL		= 1<<12,
-	IFF_MACVLAN_PORT		= 1<<13,
-	IFF_BRIDGE_PORT			= 1<<14,
-	IFF_OVS_DATAPATH		= 1<<15,
-	IFF_TX_SKB_SHARING		= 1<<16,
-	IFF_UNICAST_FLT			= 1<<17,
-	IFF_TEAM_PORT			= 1<<18,
-	IFF_SUPP_NOFCS			= 1<<19,
-	IFF_LIVE_ADDR_CHANGE		= 1<<20,
-	IFF_MACVLAN			= 1<<21,
-	IFF_XMIT_DST_RELEASE_PERM	= 1<<22,
-	IFF_IPVLAN_MASTER		= 1<<23,
-	IFF_IPVLAN_SLAVE		= 1<<24,
+	IFF_BONDING			= 1<<2,
+	IFF_ISATAP			= 1<<3,
+	IFF_WAN_HDLC			= 1<<4,
+	IFF_XMIT_DST_RELEASE		= 1<<5,
+	IFF_DONT_BRIDGE			= 1<<6,
+	IFF_DISABLE_NETPOLL		= 1<<7,
+	IFF_MACVLAN_PORT		= 1<<8,
+	IFF_BRIDGE_PORT			= 1<<9,
+	IFF_OVS_DATAPATH		= 1<<10,
+	IFF_TX_SKB_SHARING		= 1<<11,
+	IFF_UNICAST_FLT			= 1<<12,
+	IFF_TEAM_PORT			= 1<<13,
+	IFF_SUPP_NOFCS			= 1<<14,
+	IFF_LIVE_ADDR_CHANGE		= 1<<15,
+	IFF_MACVLAN			= 1<<16,
+	IFF_XMIT_DST_RELEASE_PERM	= 1<<17,
+	IFF_IPVLAN_MASTER		= 1<<18,
+	IFF_IPVLAN_SLAVE		= 1<<19,
+	IFF_VRF_MASTER			= 1<<20,
+	IFF_NO_QUEUE			= 1<<21,
+	IFF_OPENVSWITCH			= 1<<22,
 };
 
 #define IFF_802_1Q_VLAN			IFF_802_1Q_VLAN
 #define IFF_EBRIDGE			IFF_EBRIDGE
-#define IFF_SLAVE_INACTIVE		IFF_SLAVE_INACTIVE
-#define IFF_MASTER_8023AD		IFF_MASTER_8023AD
-#define IFF_MASTER_ALB			IFF_MASTER_ALB
 #define IFF_BONDING			IFF_BONDING
-#define IFF_SLAVE_NEEDARP		IFF_SLAVE_NEEDARP
 #define IFF_ISATAP			IFF_ISATAP
-#define IFF_MASTER_ARPMON		IFF_MASTER_ARPMON
 #define IFF_WAN_HDLC			IFF_WAN_HDLC
 #define IFF_XMIT_DST_RELEASE		IFF_XMIT_DST_RELEASE
 #define IFF_DONT_BRIDGE			IFF_DONT_BRIDGE
@@ -1301,6 +1307,9 @@
 #define IFF_XMIT_DST_RELEASE_PERM	IFF_XMIT_DST_RELEASE_PERM
 #define IFF_IPVLAN_MASTER		IFF_IPVLAN_MASTER
 #define IFF_IPVLAN_SLAVE		IFF_IPVLAN_SLAVE
+#define IFF_VRF_MASTER			IFF_VRF_MASTER
+#define IFF_NO_QUEUE			IFF_NO_QUEUE
+#define IFF_OPENVSWITCH			IFF_OPENVSWITCH
 
 /**
  *	struct net_device - The DEVICE structure.
@@ -1417,6 +1426,7 @@
  *	@dn_ptr:	DECnet specific data
  *	@ip6_ptr:	IPv6 specific data
  *	@ax25_ptr:	AX.25 specific data
+ *	@vrf_ptr:	VRF specific data
  *	@ieee80211_ptr:	IEEE 802.11 specific data, assign before registering
  *
  *	@last_rx:	Time of last Rx
@@ -1448,6 +1458,8 @@
  *
  *	@xps_maps:	XXX: need comments on this one
  *
+ *	@offload_fwd_mark:	Offload device fwding mark
+ *
  *	@trans_start:		Time (in jiffies) of last Tx
  *	@watchdog_timeo:	Represents the timeout that is used by
  *				the watchdog ( see dev_watchdog() )
@@ -1502,6 +1514,10 @@
  *
  *	@qdisc_tx_busylock:	XXX: need comments on this one
  *
+ *	@proto_down:	protocol port state information can be sent to the
+ *			switch driver and used to set the phys state of the
+ *			switch port.
+ *
  *	FIXME: cleanup struct net_device such that network protocol info
  *	moves out.
  */
@@ -1629,6 +1645,7 @@
 	struct dn_dev __rcu     *dn_ptr;
 	struct inet6_dev __rcu	*ip6_ptr;
 	void			*ax25_ptr;
+	struct net_vrf_dev __rcu *vrf_ptr;
 	struct wireless_dev	*ieee80211_ptr;
 	struct wpan_dev		*ieee802154_ptr;
 #if IS_ENABLED(CONFIG_MPLS_ROUTING)
@@ -1685,6 +1702,10 @@
 	struct xps_dev_maps __rcu *xps_maps;
 #endif
 
+#ifdef CONFIG_NET_SWITCHDEV
+	u32			offload_fwd_mark;
+#endif
+
 	/* These may be needed for future network-power-down code. */
 
 	/*
@@ -1762,6 +1783,7 @@
 #endif
 	struct phy_device *phydev;
 	struct lock_class_key *qdisc_tx_busylock;
+	bool proto_down;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
 
@@ -2093,6 +2115,13 @@
 	unsigned int flags_changed;
 };
 
+struct netdev_notifier_changeupper_info {
+	struct netdev_notifier_info info; /* must be first */
+	struct net_device *upper_dev; /* new upper dev */
+	bool master; /* is upper dev master */
+	bool linking; /* is the nofication for link or unlink */
+};
+
 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
 					     struct net_device *dev)
 {
@@ -2277,8 +2306,7 @@
 
 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
 {
-	return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) ==
-		skb_gro_offset(skb));
+	return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
 }
 
 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
@@ -2374,37 +2402,58 @@
 	grc->delta = 0;
 }
 
-static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
-					   int start, int offset,
-					   struct gro_remcsum *grc,
-					   bool nopartial)
+static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
+					    unsigned int off, size_t hdrlen,
+					    int start, int offset,
+					    struct gro_remcsum *grc,
+					    bool nopartial)
 {
 	__wsum delta;
+	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
 
 	BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
 
 	if (!nopartial) {
-		NAPI_GRO_CB(skb)->gro_remcsum_start =
-		    ((unsigned char *)ptr + start) - skb->head;
-		return;
+		NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
+		return ptr;
 	}
 
-	delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset);
+	ptr = skb_gro_header_fast(skb, off);
+	if (skb_gro_header_hard(skb, off + plen)) {
+		ptr = skb_gro_header_slow(skb, off + plen, off);
+		if (!ptr)
+			return NULL;
+	}
+
+	delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
+			       start, offset);
 
 	/* Adjust skb->csum since we changed the packet */
 	NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
 
-	grc->offset = (ptr + offset) - (void *)skb->head;
+	grc->offset = off + hdrlen + offset;
 	grc->delta = delta;
+
+	return ptr;
 }
 
 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
 					   struct gro_remcsum *grc)
 {
+	void *ptr;
+	size_t plen = grc->offset + sizeof(u16);
+
 	if (!grc->delta)
 		return;
 
-	remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta);
+	ptr = skb_gro_header_fast(skb, grc->offset);
+	if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
+		ptr = skb_gro_header_slow(skb, plen, grc->offset);
+		if (!ptr)
+			return;
+	}
+
+	remcsum_unadjust((__sum16 *)ptr, grc->delta);
 }
 
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -2982,6 +3031,7 @@
 			 struct netdev_phys_item_id *ppid);
 int dev_get_phys_port_name(struct net_device *dev,
 			   char *name, size_t len);
+int dev_change_proto_down(struct net_device *dev, bool proto_down);
 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
 				    struct netdev_queue *txq, int *ret);
@@ -3781,6 +3831,42 @@
 	return dev->priv_flags & IFF_SUPP_NOFCS;
 }
 
+static inline bool netif_is_vrf(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_VRF_MASTER;
+}
+
+static inline bool netif_is_bridge_master(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_EBRIDGE;
+}
+
+static inline bool netif_is_ovs_master(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_OPENVSWITCH;
+}
+
+static inline bool netif_index_is_vrf(struct net *net, int ifindex)
+{
+	bool rc = false;
+
+#if IS_ENABLED(CONFIG_NET_VRF)
+	struct net_device *dev;
+
+	if (ifindex == 0)
+		return false;
+
+	rcu_read_lock();
+
+	dev = dev_get_by_index_rcu(net, ifindex);
+	if (dev)
+		rc = netif_is_vrf(dev);
+
+	rcu_read_unlock();
+#endif
+	return rc;
+}
+
 /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
 static inline void netif_keep_dst(struct net_device *dev)
 {
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 00050df..36a6525 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -11,6 +11,8 @@
 #include <linux/list.h>
 #include <linux/static_key.h>
 #include <linux/netfilter_defs.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
 
 #ifdef CONFIG_NETFILTER
 static inline int NF_DROP_GETERR(int verdict)
@@ -118,6 +120,13 @@
 };
 
 /* Function to register/unregister hook points. */
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
+int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+			  unsigned int n);
+void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+			     unsigned int n);
+
 int nf_register_hook(struct nf_hook_ops *reg);
 void nf_unregister_hook(struct nf_hook_ops *reg);
 int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
@@ -128,33 +137,26 @@
 int nf_register_sockopt(struct nf_sockopt_ops *reg);
 void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
 
-extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
-
 #ifdef HAVE_JUMP_LABEL
 extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
 				       u_int8_t pf, unsigned int hook)
 {
 	if (__builtin_constant_p(pf) &&
 	    __builtin_constant_p(hook))
 		return static_key_false(&nf_hooks_needed[pf][hook]);
 
-	return !list_empty(nf_hook_list);
+	return !list_empty(hook_list);
 }
 #else
-static inline bool nf_hook_list_active(struct list_head *nf_hook_list,
+static inline bool nf_hook_list_active(struct list_head *hook_list,
 				       u_int8_t pf, unsigned int hook)
 {
-	return !list_empty(nf_hook_list);
+	return !list_empty(hook_list);
 }
 #endif
 
-static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
-{
-	return nf_hook_list_active(&nf_hooks[pf][hook], pf, hook);
-}
-
 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
 
 /**
@@ -172,10 +174,13 @@
 				 int (*okfn)(struct sock *, struct sk_buff *),
 				 int thresh)
 {
-	if (nf_hooks_active(pf, hook)) {
+	struct net *net = dev_net(indev ? indev : outdev);
+	struct list_head *hook_list = &net->nf.hooks[pf][hook];
+
+	if (nf_hook_list_active(hook_list, pf, hook)) {
 		struct nf_hook_state state;
 
-		nf_hook_state_init(&state, &nf_hooks[pf][hook], hook, thresh,
+		nf_hook_state_init(&state, hook_list, hook, thresh,
 				   pf, indev, outdev, sk, okfn);
 		return nf_hook_slow(skb, &state);
 	}
@@ -363,6 +368,8 @@
 #endif /*CONFIG_NETFILTER*/
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#include <linux/netfilter/nf_conntrack_zones_common.h>
+
 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
 void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
 extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
@@ -385,4 +392,15 @@
 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
 #endif
 
+/**
+ * nf_skb_duplicated - TEE target has sent a packet
+ *
+ * When a xtables target sends a packet, the OUTPUT and POSTROUTING
+ * hooks are traversed again, i.e. nft and xtables are invoked recursively.
+ *
+ * This is used by xtables TEE target to prevent the duplicated skb from
+ * being duplicated again.
+ */
+DECLARE_PER_CPU(bool, nf_skb_duplicated);
+
 #endif /*__LINUX_NETFILTER_H*/
diff --git a/include/linux/netfilter/nf_conntrack_zones_common.h b/include/linux/netfilter/nf_conntrack_zones_common.h
new file mode 100644
index 0000000..5d7cf36
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_zones_common.h
@@ -0,0 +1,23 @@
+#ifndef _NF_CONNTRACK_ZONES_COMMON_H
+#define _NF_CONNTRACK_ZONES_COMMON_H
+
+#include <uapi/linux/netfilter/nf_conntrack_tuple_common.h>
+
+#define NF_CT_DEFAULT_ZONE_ID	0
+
+#define NF_CT_ZONE_DIR_ORIG	(1 << IP_CT_DIR_ORIGINAL)
+#define NF_CT_ZONE_DIR_REPL	(1 << IP_CT_DIR_REPLY)
+
+#define NF_CT_DEFAULT_ZONE_DIR	(NF_CT_ZONE_DIR_ORIG | NF_CT_ZONE_DIR_REPL)
+
+#define NF_CT_FLAG_MARK		1
+
+struct nf_conntrack_zone {
+	u16	id;
+	u8	flags;
+	u8	dir;
+};
+
+extern const struct nf_conntrack_zone nf_ct_zone_dflt;
+
+#endif /* _NF_CONNTRACK_ZONES_COMMON_H */
diff --git a/include/linux/netfilter/nfnetlink_acct.h b/include/linux/netfilter/nfnetlink_acct.h
index 6ec9757..80ca889 100644
--- a/include/linux/netfilter/nfnetlink_acct.h
+++ b/include/linux/netfilter/nfnetlink_acct.h
@@ -2,6 +2,7 @@
 #define _NFNL_ACCT_H_
 
 #include <uapi/linux/netfilter/nfnetlink_acct.h>
+#include <net/net_namespace.h>
 
 enum {
 	NFACCT_NO_QUOTA		= -1,
@@ -11,7 +12,7 @@
 
 struct nf_acct;
 
-struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *filter_name);
 void nfnl_acct_put(struct nf_acct *acct);
 void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
 extern int nfnl_acct_overquota(const struct sk_buff *skb,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 286098a..b006b71 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -3,6 +3,7 @@
 
 
 #include <linux/netdevice.h>
+#include <linux/static_key.h>
 #include <uapi/linux/netfilter/x_tables.h>
 
 /**
@@ -222,7 +223,6 @@
 	 * @stacksize jumps (number of user chains) can possibly be made.
 	 */
 	unsigned int stacksize;
-	unsigned int __percpu *stackptr;
 	void ***jumpstack;
 
 	unsigned char entries[0] __aligned(8);
@@ -281,6 +281,12 @@
  */
 DECLARE_PER_CPU(seqcount_t, xt_recseq);
 
+/* xt_tee_enabled - true if x_tables needs to handle reentrancy
+ *
+ * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
+ */
+extern struct static_key xt_tee_enabled;
+
 /**
  * xt_write_recseq_begin - start of a write section
  *
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 6d80fc6..2437b8a 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -17,9 +17,6 @@
 
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 
-#define BRNF_BRIDGED_DNAT		0x02
-#define BRNF_NF_BRIDGE_PREROUTING	0x08
-
 int br_handle_frame_finish(struct sock *sk, struct sk_buff *skb);
 
 static inline void br_drop_fake_rtable(struct sk_buff *skb)
@@ -63,8 +60,17 @@
 {
 	return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
 }
+
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+	return skb->nf_bridge && skb->nf_bridge->in_prerouting;
+}
 #else
 #define br_drop_fake_rtable(skb)	        do { } while (0)
+static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
+{
+	return false;
+}
 #endif /* CONFIG_BRIDGE_NETFILTER */
 
 #endif
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 8b7d28f..7715746 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -9,15 +9,6 @@
 
 #include <uapi/linux/netfilter_ipv6.h>
 
-
-#ifdef CONFIG_NETFILTER
-int ip6_route_me_harder(struct sk_buff *skb);
-__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
-			unsigned int dataoff, u_int8_t protocol);
-
-int ipv6_netfilter_init(void);
-void ipv6_netfilter_fini(void);
-
 /*
  * Hook functions for ipv6 to allow xt_* modules to be built-in even
  * if IPv6 is a module.
@@ -30,6 +21,14 @@
 			int (*output)(struct sock *, struct sk_buff *));
 };
 
+#ifdef CONFIG_NETFILTER
+int ip6_route_me_harder(struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+			unsigned int dataoff, u_int8_t protocol);
+
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
+
 extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops;
 static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void)
 {
@@ -39,6 +38,7 @@
 #else /* CONFIG_NETFILTER */
 static inline int ipv6_netfilter_init(void) { return 0; }
 static inline void ipv6_netfilter_fini(void) { return; }
+static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return NULL; }
 #endif /* CONFIG_NETFILTER */
 
 #endif /*__LINUX_IP6_NETFILTER_H*/
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index c0d94ed..b5812c3 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -28,18 +28,32 @@
 	__u32			cc;	/* Controller Configuration */
 	__u32			rsvd1;	/* Reserved */
 	__u32			csts;	/* Controller Status */
-	__u32			rsvd2;	/* Reserved */
+	__u32			nssr;	/* Subsystem Reset */
 	__u32			aqa;	/* Admin Queue Attributes */
 	__u64			asq;	/* Admin SQ Base Address */
 	__u64			acq;	/* Admin CQ Base Address */
+	__u32			cmbloc; /* Controller Memory Buffer Location */
+	__u32			cmbsz;  /* Controller Memory Buffer Size */
 };
 
 #define NVME_CAP_MQES(cap)	((cap) & 0xffff)
 #define NVME_CAP_TIMEOUT(cap)	(((cap) >> 24) & 0xff)
 #define NVME_CAP_STRIDE(cap)	(((cap) >> 32) & 0xf)
+#define NVME_CAP_NSSRC(cap)	(((cap) >> 36) & 0x1)
 #define NVME_CAP_MPSMIN(cap)	(((cap) >> 48) & 0xf)
 #define NVME_CAP_MPSMAX(cap)	(((cap) >> 52) & 0xf)
 
+#define NVME_CMB_BIR(cmbloc)	((cmbloc) & 0x7)
+#define NVME_CMB_OFST(cmbloc)	(((cmbloc) >> 12) & 0xfffff)
+#define NVME_CMB_SZ(cmbsz)	(((cmbsz) >> 12) & 0xfffff)
+#define NVME_CMB_SZU(cmbsz)	(((cmbsz) >> 8) & 0xf)
+
+#define NVME_CMB_WDS(cmbsz)	((cmbsz) & 0x10)
+#define NVME_CMB_RDS(cmbsz)	((cmbsz) & 0x8)
+#define NVME_CMB_LISTS(cmbsz)	((cmbsz) & 0x4)
+#define NVME_CMB_CQS(cmbsz)	((cmbsz) & 0x2)
+#define NVME_CMB_SQS(cmbsz)	((cmbsz) & 0x1)
+
 enum {
 	NVME_CC_ENABLE		= 1 << 0,
 	NVME_CC_CSS_NVM		= 0 << 4,
@@ -55,6 +69,7 @@
 	NVME_CC_IOCQES		= 4 << 20,
 	NVME_CSTS_RDY		= 1 << 0,
 	NVME_CSTS_CFS		= 1 << 1,
+	NVME_CSTS_NSSRO		= 1 << 4,
 	NVME_CSTS_SHST_NORMAL	= 0 << 2,
 	NVME_CSTS_SHST_OCCUR	= 1 << 2,
 	NVME_CSTS_SHST_CMPLT	= 2 << 2,
@@ -97,9 +112,14 @@
 	char serial[20];
 	char model[40];
 	char firmware_rev[8];
+	bool subsystem;
 	u32 max_hw_sectors;
 	u32 stripe_size;
 	u32 page_size;
+	void __iomem *cmb;
+	dma_addr_t cmb_dma_addr;
+	u64 cmb_size;
+	u32 cmbsz;
 	u16 oncs;
 	u16 abort_limit;
 	u8 event_limit;
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
new file mode 100644
index 0000000..9bb77d3
--- /dev/null
+++ b/include/linux/nvmem-consumer.h
@@ -0,0 +1,157 @@
+/*
+ * nvmem framework consumer.
+ *
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _LINUX_NVMEM_CONSUMER_H
+#define _LINUX_NVMEM_CONSUMER_H
+
+struct device;
+struct device_node;
+/* consumer cookie */
+struct nvmem_cell;
+struct nvmem_device;
+
+struct nvmem_cell_info {
+	const char		*name;
+	unsigned int		offset;
+	unsigned int		bytes;
+	unsigned int		bit_offset;
+	unsigned int		nbits;
+};
+
+#if IS_ENABLED(CONFIG_NVMEM)
+
+/* Cell based interface */
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name);
+struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name);
+void nvmem_cell_put(struct nvmem_cell *cell);
+void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
+void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
+int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
+
+/* direct nvmem device read/write interface */
+struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
+struct nvmem_device *devm_nvmem_device_get(struct device *dev,
+					   const char *name);
+void nvmem_device_put(struct nvmem_device *nvmem);
+void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem);
+int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset,
+		      size_t bytes, void *buf);
+int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset,
+		       size_t bytes, void *buf);
+ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+			   struct nvmem_cell_info *info, void *buf);
+int nvmem_device_cell_write(struct nvmem_device *nvmem,
+			    struct nvmem_cell_info *info, void *buf);
+
+#else
+
+static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
+						const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev,
+				       const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static inline void devm_nvmem_cell_put(struct device *dev,
+				       struct nvmem_cell *cell)
+{
+
+}
+static inline void nvmem_cell_put(struct nvmem_cell *cell)
+{
+}
+
+static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static inline int nvmem_cell_write(struct nvmem_cell *cell,
+				    const char *buf, size_t len)
+{
+	return -ENOSYS;
+}
+
+static inline struct nvmem_device *nvmem_device_get(struct device *dev,
+						    const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev,
+							 const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static inline void nvmem_device_put(struct nvmem_device *nvmem)
+{
+}
+
+static inline void devm_nvmem_device_put(struct device *dev,
+					 struct nvmem_device *nvmem)
+{
+}
+
+static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
+					 struct nvmem_cell_info *info,
+					 void *buf)
+{
+	return -ENOSYS;
+}
+
+static inline int nvmem_device_cell_write(struct nvmem_device *nvmem,
+					  struct nvmem_cell_info *info,
+					  void *buf)
+{
+	return -ENOSYS;
+}
+
+static inline int nvmem_device_read(struct nvmem_device *nvmem,
+				    unsigned int offset, size_t bytes,
+				    void *buf)
+{
+	return -ENOSYS;
+}
+
+static inline int nvmem_device_write(struct nvmem_device *nvmem,
+				     unsigned int offset, size_t bytes,
+				     void *buf)
+{
+	return -ENOSYS;
+}
+#endif /* CONFIG_NVMEM */
+
+#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
+struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
+				     const char *name);
+struct nvmem_device *of_nvmem_device_get(struct device_node *np,
+					 const char *name);
+#else
+static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
+				     const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
+						       const char *name)
+{
+	return ERR_PTR(-ENOSYS);
+}
+#endif /* CONFIG_NVMEM && CONFIG_OF */
+
+#endif  /* ifndef _LINUX_NVMEM_CONSUMER_H */
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
new file mode 100644
index 0000000..0b68caf
--- /dev/null
+++ b/include/linux/nvmem-provider.h
@@ -0,0 +1,47 @@
+/*
+ * nvmem framework provider.
+ *
+ * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _LINUX_NVMEM_PROVIDER_H
+#define _LINUX_NVMEM_PROVIDER_H
+
+struct nvmem_device;
+struct nvmem_cell_info;
+
+struct nvmem_config {
+	struct device		*dev;
+	const char		*name;
+	int			id;
+	struct module		*owner;
+	const struct nvmem_cell_info	*cells;
+	int			ncells;
+	bool			read_only;
+};
+
+#if IS_ENABLED(CONFIG_NVMEM)
+
+struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
+int nvmem_unregister(struct nvmem_device *nvmem);
+
+#else
+
+static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static inline int nvmem_unregister(struct nvmem_device *nvmem)
+{
+	return -ENOSYS;
+}
+
+#endif /* CONFIG_NVMEM */
+
+#endif  /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index edc068d..2194b8c 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -136,7 +136,8 @@
 
 static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
 {
-	return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL;
+	return is_of_node(fwnode) ?
+		container_of(fwnode, struct device_node, fwnode) : NULL;
 }
 
 static inline bool of_have_populated_dt(void)
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 69dbe31..f319182 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -54,7 +54,7 @@
 			      struct of_mm_gpio_chip *mm_gc);
 extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc);
 
-extern void of_gpiochip_add(struct gpio_chip *gc);
+extern int of_gpiochip_add(struct gpio_chip *gc);
 extern void of_gpiochip_remove(struct gpio_chip *gc);
 extern int of_gpio_simple_xlate(struct gpio_chip *gc,
 				const struct of_phandle_args *gpiospec,
@@ -76,7 +76,7 @@
 	return -ENOSYS;
 }
 
-static inline void of_gpiochip_add(struct gpio_chip *gc) { }
+static inline int of_gpiochip_add(struct gpio_chip *gc) { return 0; }
 static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
 
 #endif /* CONFIG_OF_GPIO */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index d884929..4bcbd58 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -74,6 +74,7 @@
  */
 extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
 extern struct device_node *of_irq_find_parent(struct device_node *child);
+extern void of_msi_configure(struct device *dev, struct device_node *np);
 
 #else /* !CONFIG_OF */
 static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 611a6911..956a100 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -72,6 +72,9 @@
 				const struct of_device_id *matches,
 				const struct of_dev_auxdata *lookup,
 				struct device *parent);
+extern int of_platform_default_populate(struct device_node *root,
+					const struct of_dev_auxdata *lookup,
+					struct device *parent);
 extern void of_platform_depopulate(struct device *parent);
 #else
 static inline int of_platform_populate(struct device_node *root,
@@ -81,6 +84,12 @@
 {
 	return -ENODEV;
 }
+static inline int of_platform_default_populate(struct device_node *root,
+					       const struct of_dev_auxdata *lookup,
+					       struct device *parent)
+{
+	return -ENODEV;
+}
 static inline void of_platform_depopulate(struct device *parent) { }
 #endif
 
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 7203178..57e0b82 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -3,55 +3,6 @@
 
 #include <linux/pci.h>
 
-/* Address Translation Service */
-struct pci_ats {
-	int pos;        /* capability position */
-	int stu;        /* Smallest Translation Unit */
-	int qdep;       /* Invalidate Queue Depth */
-	int ref_cnt;    /* Physical Function reference count */
-	unsigned int is_enabled:1;      /* Enable bit is set */
-};
-
-#ifdef CONFIG_PCI_ATS
-
-int pci_enable_ats(struct pci_dev *dev, int ps);
-void pci_disable_ats(struct pci_dev *dev);
-int pci_ats_queue_depth(struct pci_dev *dev);
-
-/**
- * pci_ats_enabled - query the ATS status
- * @dev: the PCI device
- *
- * Returns 1 if ATS capability is enabled, or 0 if not.
- */
-static inline int pci_ats_enabled(struct pci_dev *dev)
-{
-	return dev->ats && dev->ats->is_enabled;
-}
-
-#else /* CONFIG_PCI_ATS */
-
-static inline int pci_enable_ats(struct pci_dev *dev, int ps)
-{
-	return -ENODEV;
-}
-
-static inline void pci_disable_ats(struct pci_dev *dev)
-{
-}
-
-static inline int pci_ats_queue_depth(struct pci_dev *dev)
-{
-	return -ENODEV;
-}
-
-static inline int pci_ats_enabled(struct pci_dev *dev)
-{
-	return 0;
-}
-
-#endif /* CONFIG_PCI_ATS */
-
 #ifdef CONFIG_PCI_PRI
 
 int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 8a0321a..1a64733 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -180,6 +180,8 @@
 	PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
 	/* Do not use PM reset even if device advertises NoSoftRst- */
 	PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
+	/* Get VPD from function 0 VPD */
+	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
 };
 
 enum pci_irq_reroute_variant {
@@ -343,6 +345,7 @@
 	unsigned int	msi_enabled:1;
 	unsigned int	msix_enabled:1;
 	unsigned int	ari_enabled:1;	/* ARI forwarding */
+	unsigned int	ats_enabled:1;	/* Address Translation Service */
 	unsigned int	is_managed:1;
 	unsigned int    needs_freset:1; /* Dev requires fundamental reset */
 	unsigned int	state_saved:1;
@@ -366,7 +369,6 @@
 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
 #ifdef CONFIG_PCI_MSI
-	struct list_head msi_list;
 	const struct attribute_group **msi_irq_groups;
 #endif
 	struct pci_vpd *vpd;
@@ -375,7 +377,9 @@
 		struct pci_sriov *sriov;	/* SR-IOV capability related */
 		struct pci_dev *physfn;	/* the PF this VF is associated with */
 	};
-	struct pci_ats	*ats;	/* Address Translation Service */
+	u16		ats_cap;	/* ATS Capability offset */
+	u8		ats_stu;	/* ATS Smallest Translation Unit */
+	atomic_t	ats_ref_cnt;	/* number of VFs with ATS enabled */
 #endif
 	phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
 	size_t romlen; /* Length of ROM if it's not from the BAR */
@@ -446,7 +450,8 @@
 	struct list_head children;	/* list of child buses */
 	struct list_head devices;	/* list of devices on this bus */
 	struct pci_dev	*self;		/* bridge device as seen by parent */
-	struct list_head slots;		/* list of slots on this bus */
+	struct list_head slots;		/* list of slots on this bus;
+					   protected by pci_slot_mutex */
 	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
 	struct list_head resources;	/* address space routed to this bus */
 	struct resource busn_res;	/* bus numbers routed to this bus */
@@ -738,10 +743,11 @@
 void pcie_bus_configure_settings(struct pci_bus *bus);
 
 enum pcie_bus_config_types {
-	PCIE_BUS_TUNE_OFF,
-	PCIE_BUS_SAFE,
-	PCIE_BUS_PERFORMANCE,
-	PCIE_BUS_PEER2PEER,
+	PCIE_BUS_TUNE_OFF,	/* don't touch MPS at all */
+	PCIE_BUS_DEFAULT,	/* ensure MPS matches upstream bridge */
+	PCIE_BUS_SAFE,		/* use largest MPS boot-time devices support */
+	PCIE_BUS_PERFORMANCE,	/* use MPS and MRRS for best performance */
+	PCIE_BUS_PEER2PEER,	/* set MPS = 128 for all devices */
 };
 
 extern enum pcie_bus_config_types pcie_bus_config;
@@ -787,6 +793,10 @@
 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
 void pci_bus_release_busn_res(struct pci_bus *b);
+struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
+				      struct pci_ops *ops, void *sysdata,
+				      struct list_head *resources,
+				      struct msi_controller *msi);
 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
 					     struct pci_ops *ops, void *sysdata,
 					     struct list_head *resources);
@@ -797,6 +807,11 @@
 				 const char *name,
 				 struct hotplug_slot *hotplug);
 void pci_destroy_slot(struct pci_slot *slot);
+#ifdef CONFIG_SYSFS
+void pci_dev_assign_slot(struct pci_dev *dev);
+#else
+static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
+#endif
 int pci_scan_slot(struct pci_bus *bus, int devfn);
 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@@ -963,6 +978,23 @@
 	return pdev->is_managed;
 }
 
+static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
+{
+	pdev->irq = irq;
+	pdev->irq_managed = 1;
+}
+
+static inline void pci_reset_managed_irq(struct pci_dev *pdev)
+{
+	pdev->irq = 0;
+	pdev->irq_managed = 0;
+}
+
+static inline bool pci_has_managed_irq(struct pci_dev *pdev)
+{
+	return pdev->irq_managed && pdev->irq > 0;
+}
+
 void pci_disable_device(struct pci_dev *dev);
 
 extern unsigned int pcibios_max_latency;
@@ -1202,6 +1234,7 @@
 	u16	entry;	/* driver uses to specify entry, OS writes */
 };
 
+void pci_msi_setup_pci_dev(struct pci_dev *dev);
 
 #ifdef CONFIG_PCI_MSI
 int pci_msi_vec_count(struct pci_dev *dev);
@@ -1294,6 +1327,19 @@
 void ht_destroy_irq(unsigned int irq);
 #endif /* CONFIG_HT_IRQ */
 
+#ifdef CONFIG_PCI_ATS
+/* Address Translation Service */
+void pci_ats_init(struct pci_dev *dev);
+int pci_enable_ats(struct pci_dev *dev, int ps);
+void pci_disable_ats(struct pci_dev *dev);
+int pci_ats_queue_depth(struct pci_dev *dev);
+#else
+static inline void pci_ats_init(struct pci_dev *d) { }
+static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
+static inline void pci_disable_ats(struct pci_dev *d) { }
+static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
+#endif
+
 void pci_cfg_access_lock(struct pci_dev *dev);
 bool pci_cfg_access_trylock(struct pci_dev *dev);
 void pci_cfg_access_unlock(struct pci_dev *dev);
@@ -1645,6 +1691,8 @@
 int pcibios_add_device(struct pci_dev *dev);
 void pcibios_release_device(struct pci_dev *dev);
 void pcibios_penalize_isa_irq(int irq, int active);
+int pcibios_alloc_irq(struct pci_dev *dev);
+void pcibios_free_irq(struct pci_dev *dev);
 
 #ifdef CONFIG_HIBERNATE_CALLBACKS
 extern struct dev_pm_ops pcibios_pm_ops;
@@ -1661,6 +1709,7 @@
 int pci_ext_cfg_avail(void);
 
 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
+void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
 
 #ifdef CONFIG_PCI_IOV
 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
@@ -1842,10 +1891,12 @@
 /* PCI <-> OF binding helpers */
 #ifdef CONFIG_OF
 struct device_node;
+struct irq_domain;
 void pci_set_of_node(struct pci_dev *dev);
 void pci_release_of_node(struct pci_dev *dev);
 void pci_set_bus_of_node(struct pci_bus *bus);
 void pci_release_bus_of_node(struct pci_bus *bus);
+struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
 
 /* Arch may override this (weak) */
 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
@@ -1868,6 +1919,8 @@
 static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
 static inline struct device_node *
 pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; }
+static inline struct irq_domain *
+pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
 #endif  /* CONFIG_OF */
 
 #ifdef CONFIG_EEH
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 57f3a1c..8f16299 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -488,10 +488,8 @@
 #define __this_cpu_dec_return(pcp)	__this_cpu_add_return(pcp, -1)
 
 /*
- * Operations with implied preemption protection.  These operations can be
- * used without worrying about preemption.  Note that interrupts may still
- * occur while an operation is in progress and if the interrupt modifies
- * the variable too then RMW actions may not be reliable.
+ * Operations with implied preemption/interrupt protection.  These
+ * operations can be used without worrying about preemption or interrupt.
  */
 #define this_cpu_read(pcp)		__pcpu_size_call_return(this_cpu_read_, pcp)
 #define this_cpu_write(pcp, val)	__pcpu_size_call(this_cpu_write_, pcp, val)
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
new file mode 100644
index 0000000..bfa673b
--- /dev/null
+++ b/include/linux/perf/arm_pmu.h
@@ -0,0 +1,154 @@
+/*
+ *  linux/arch/arm/include/asm/pmu.h
+ *
+ *  Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ARM_PMU_H__
+#define __ARM_PMU_H__
+
+#include <linux/interrupt.h>
+#include <linux/perf_event.h>
+
+#include <asm/cputype.h>
+
+/*
+ * struct arm_pmu_platdata - ARM PMU platform data
+ *
+ * @handle_irq: an optional handler which will be called from the
+ *	interrupt and passed the address of the low level handler,
+ *	and can be used to implement any platform specific handling
+ *	before or after calling it.
+ */
+struct arm_pmu_platdata {
+	irqreturn_t (*handle_irq)(int irq, void *dev,
+				  irq_handler_t pmu_handler);
+};
+
+#ifdef CONFIG_ARM_PMU
+
+/*
+ * The ARMv7 CPU PMU supports up to 32 event counters.
+ */
+#define ARMPMU_MAX_HWEVENTS		32
+
+#define HW_OP_UNSUPPORTED		0xFFFF
+#define C(_x)				PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED		0xFFFF
+
+#define PERF_MAP_ALL_UNSUPPORTED					\
+	[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED					\
+[0 ... C(MAX) - 1] = {							\
+	[0 ... C(OP_MAX) - 1] = {					\
+		[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED,	\
+	},								\
+}
+
+/* The events for a given PMU register set. */
+struct pmu_hw_events {
+	/*
+	 * The events that are active on the PMU for the given index.
+	 */
+	struct perf_event	*events[ARMPMU_MAX_HWEVENTS];
+
+	/*
+	 * A 1 bit for an index indicates that the counter is being used for
+	 * an event. A 0 means that the counter can be used.
+	 */
+	DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
+
+	/*
+	 * Hardware lock to serialize accesses to PMU registers. Needed for the
+	 * read/modify/write sequences.
+	 */
+	raw_spinlock_t		pmu_lock;
+
+	/*
+	 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
+	 * already have to allocate this struct per cpu.
+	 */
+	struct arm_pmu		*percpu_pmu;
+};
+
+struct arm_pmu {
+	struct pmu	pmu;
+	cpumask_t	active_irqs;
+	cpumask_t	supported_cpus;
+	int		*irq_affinity;
+	char		*name;
+	irqreturn_t	(*handle_irq)(int irq_num, void *dev);
+	void		(*enable)(struct perf_event *event);
+	void		(*disable)(struct perf_event *event);
+	int		(*get_event_idx)(struct pmu_hw_events *hw_events,
+					 struct perf_event *event);
+	void		(*clear_event_idx)(struct pmu_hw_events *hw_events,
+					 struct perf_event *event);
+	int		(*set_event_filter)(struct hw_perf_event *evt,
+					    struct perf_event_attr *attr);
+	u32		(*read_counter)(struct perf_event *event);
+	void		(*write_counter)(struct perf_event *event, u32 val);
+	void		(*start)(struct arm_pmu *);
+	void		(*stop)(struct arm_pmu *);
+	void		(*reset)(void *);
+	int		(*request_irq)(struct arm_pmu *, irq_handler_t handler);
+	void		(*free_irq)(struct arm_pmu *);
+	int		(*map_event)(struct perf_event *event);
+	int		num_events;
+	atomic_t	active_events;
+	struct mutex	reserve_mutex;
+	u64		max_period;
+	struct platform_device	*plat_device;
+	struct pmu_hw_events	__percpu *hw_events;
+	struct notifier_block	hotplug_nb;
+};
+
+#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
+
+int armpmu_register(struct arm_pmu *armpmu, int type);
+
+u64 armpmu_event_update(struct perf_event *event);
+
+int armpmu_event_set_period(struct perf_event *event);
+
+int armpmu_map_event(struct perf_event *event,
+		     const unsigned (*event_map)[PERF_COUNT_HW_MAX],
+		     const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
+						[PERF_COUNT_HW_CACHE_OP_MAX]
+						[PERF_COUNT_HW_CACHE_RESULT_MAX],
+		     u32 raw_event_mask);
+
+struct pmu_probe_info {
+	unsigned int cpuid;
+	unsigned int mask;
+	int (*init)(struct arm_pmu *);
+};
+
+#define PMU_PROBE(_cpuid, _mask, _fn)	\
+{					\
+	.cpuid = (_cpuid),		\
+	.mask = (_mask),		\
+	.init = (_fn),			\
+}
+
+#define ARM_PMU_PROBE(_cpuid, _fn) \
+	PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
+
+#define ARM_PMU_XSCALE_MASK	((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
+
+#define XSCALE_PMU_PROBE(_version, _fn) \
+	PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
+
+int arm_pmu_device_probe(struct platform_device *pdev,
+			 const struct of_device_id *of_table,
+			 const struct pmu_probe_info *probe_table);
+
+#endif /* CONFIG_ARM_PMU */
+
+#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2027809..092a0e8 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -641,6 +641,8 @@
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
 extern void perf_event_delayed_put(struct task_struct *task);
+extern struct perf_event *perf_event_get(unsigned int fd);
+extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
 extern void perf_event_print_debug(void);
 extern void perf_pmu_disable(struct pmu *pmu);
 extern void perf_pmu_enable(struct pmu *pmu);
@@ -659,6 +661,7 @@
 				void *context);
 extern void perf_pmu_migrate_context(struct pmu *pmu,
 				int src_cpu, int dst_cpu);
+extern u64 perf_event_read_local(struct perf_event *event);
 extern u64 perf_event_read_value(struct perf_event *event,
 				 u64 *enabled, u64 *running);
 
@@ -979,6 +982,12 @@
 static inline void perf_event_exit_task(struct task_struct *child)	{ }
 static inline void perf_event_free_task(struct task_struct *task)	{ }
 static inline void perf_event_delayed_put(struct task_struct *task)	{ }
+static inline struct perf_event *perf_event_get(unsigned int fd)	{ return ERR_PTR(-EINVAL); }
+static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
+{
+	return ERR_PTR(-EINVAL);
+}
+static inline u64 perf_event_read_local(struct perf_event *event)	{ return -EINVAL; }
 static inline void perf_event_print_debug(void)				{ }
 static inline int perf_event_task_disable(void)				{ return -EINVAL; }
 static inline int perf_event_task_enable(void)				{ return -EINVAL; }
@@ -1011,6 +1020,7 @@
 static inline void perf_event_disable(struct perf_event *event)		{ }
 static inline int __perf_event_disable(void *info)			{ return -1; }
 static inline void perf_event_task_tick(void)				{ }
+static inline int perf_event_release_kernel(struct perf_event *event)	{ return 0; }
 #endif
 
 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_NO_HZ_FULL)
diff --git a/include/linux/phy.h b/include/linux/phy.h
index a26c3f8..962387a 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -330,6 +330,7 @@
  * c45_ids: 802.3-c45 Device Identifers if is_c45.
  * is_c45:  Set to true if this phy uses clause 45 addressing.
  * is_internal: Set to true if this phy is internal to a MAC.
+ * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
  * has_fixups: Set to true if this phy has fixups/quirks.
  * suspended: Set to true if this phy has been suspended successfully.
  * state: state of the PHY for management purposes
@@ -368,6 +369,7 @@
 	struct phy_c45_device_ids c45_ids;
 	bool is_c45;
 	bool is_internal;
+	bool is_pseudo_fixed_link;
 	bool has_fixups;
 	bool suspended;
 
@@ -424,6 +426,8 @@
 
 	struct net_device *attached_dev;
 
+	u8 mdix;
+
 	void (*adjust_link)(struct net_device *dev);
 };
 #define to_phy_device(d) container_of(d, struct phy_device, dev)
@@ -686,6 +690,16 @@
 {
 	return phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
 		phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID;
+};
+
+/*
+ * phy_is_pseudo_fixed_link - Convenience function for testing if this
+ * PHY is the CPU port facing side of an Ethernet switch, or similar.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
+{
+	return phydev->is_pseudo_fixed_link;
 }
 
 /**
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index fe5732d..2400d2e 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -13,9 +13,11 @@
 
 #if IS_ENABLED(CONFIG_FIXED_PHY)
 extern int fixed_phy_add(unsigned int irq, int phy_id,
-			 struct fixed_phy_status *status);
+			 struct fixed_phy_status *status,
+			 int link_gpio);
 extern struct phy_device *fixed_phy_register(unsigned int irq,
 					     struct fixed_phy_status *status,
+					     int link_gpio,
 					     struct device_node *np);
 extern void fixed_phy_del(int phy_addr);
 extern int fixed_phy_set_link_update(struct phy_device *phydev,
@@ -26,12 +28,14 @@
 			   const struct fixed_phy_status *changed);
 #else
 static inline int fixed_phy_add(unsigned int irq, int phy_id,
-				struct fixed_phy_status *status)
+				struct fixed_phy_status *status,
+				int link_gpio)
 {
 	return -ENODEV;
 }
 static inline struct phy_device *fixed_phy_register(unsigned int irq,
 						struct fixed_phy_status *status,
+						int gpio_link,
 						struct device_node *np)
 {
 	return ERR_PTR(-ENODEV);
diff --git a/include/linux/platform_data/atmel.h b/include/linux/platform_data/atmel.h
index 4b452c6..527a85c 100644
--- a/include/linux/platform_data/atmel.h
+++ b/include/linux/platform_data/atmel.h
@@ -46,18 +46,6 @@
 #define AT91_IDE_SWAP_A0_A2	0x02
 };
 
- /* USB Host */
-#define AT91_MAX_USBH_PORTS	3
-struct at91_usbh_data {
-	int		vbus_pin[AT91_MAX_USBH_PORTS];	/* port power-control pin */
-	int             overcurrent_pin[AT91_MAX_USBH_PORTS];
-	u8		ports;				/* number of ports on root hub */
-	u8              overcurrent_supported;
-	u8              vbus_pin_active_low[AT91_MAX_USBH_PORTS];
-	u8              overcurrent_status[AT91_MAX_USBH_PORTS];
-	u8              overcurrent_changed[AT91_MAX_USBH_PORTS];
-};
-
  /* NAND / SmartMedia */
 struct atmel_nand_data {
 	int		enable_pin;		/* chip enable */
diff --git a/include/linux/platform_data/atmel_mxt_ts.h b/include/linux/platform_data/atmel_mxt_ts.h
new file mode 100644
index 0000000..695035a
--- /dev/null
+++ b/include/linux/platform_data/atmel_mxt_ts.h
@@ -0,0 +1,31 @@
+/*
+ * Atmel maXTouch Touchscreen driver
+ *
+ * Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Author: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
+#define __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H
+
+#include <linux/types.h>
+
+enum mxt_suspend_mode {
+	MXT_SUSPEND_DEEP_SLEEP	= 0,
+	MXT_SUSPEND_T9_CTRL	= 1,
+};
+
+/* The platform data for the Atmel maXTouch touchscreen driver */
+struct mxt_platform_data {
+	unsigned long irqflags;
+	u8 t19_num_keys;
+	const unsigned int *t19_keymap;
+	enum mxt_suspend_mode suspend_mode;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_ATMEL_MXT_TS_H */
diff --git a/include/linux/platform_data/clk-ux500.h b/include/linux/platform_data/clk-ux500.h
index 97baf83..3af0da1 100644
--- a/include/linux/platform_data/clk-ux500.h
+++ b/include/linux/platform_data/clk-ux500.h
@@ -10,14 +10,8 @@
 #ifndef __CLK_UX500_H
 #define __CLK_UX500_H
 
-void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
-		       u32 clkrst5_base, u32 clkrst6_base);
-
-void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
-		    u32 clkrst5_base, u32 clkrst6_base);
-void u9540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
-		    u32 clkrst5_base, u32 clkrst6_base);
-void u8540_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base,
-		    u32 clkrst5_base, u32 clkrst6_base);
+void u8500_clk_init(void);
+void u9540_clk_init(void);
+void u8540_clk_init(void);
 
 #endif /* __CLK_UX500_H */
diff --git a/include/linux/platform_data/gpio-em.h b/include/linux/platform_data/gpio-em.h
deleted file mode 100644
index 7c5a519..0000000
--- a/include/linux/platform_data/gpio-em.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __GPIO_EM_H__
-#define __GPIO_EM_H__
-
-struct gpio_em_config {
-	unsigned int gpio_base;
-	unsigned int irq_base;
-	unsigned int number_of_pins;
-	const char *pctl_name;
-};
-
-#endif /* __GPIO_EM_H__ */
diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h
new file mode 100644
index 0000000..f16542c
--- /dev/null
+++ b/include/linux/platform_data/itco_wdt.h
@@ -0,0 +1,19 @@
+/*
+ * Platform data for the Intel TCO Watchdog
+ */
+
+#ifndef _ITCO_WDT_H_
+#define _ITCO_WDT_H_
+
+/* Watchdog resources */
+#define ICH_RES_IO_TCO		0
+#define ICH_RES_IO_SMI		1
+#define ICH_RES_MEM_OFF		2
+#define ICH_RES_MEM_GCS_PMC	0
+
+struct itco_wdt_platform_data {
+	char name[32];
+	unsigned int version;
+};
+
+#endif /* _ITCO_WDT_H_ */
diff --git a/include/linux/platform_data/leds-kirkwood-ns2.h b/include/linux/platform_data/leds-kirkwood-ns2.h
index 6a9fed5..eb8a686 100644
--- a/include/linux/platform_data/leds-kirkwood-ns2.h
+++ b/include/linux/platform_data/leds-kirkwood-ns2.h
@@ -9,11 +9,25 @@
 #ifndef __LEDS_KIRKWOOD_NS2_H
 #define __LEDS_KIRKWOOD_NS2_H
 
+enum ns2_led_modes {
+	NS_V2_LED_OFF,
+	NS_V2_LED_ON,
+	NS_V2_LED_SATA,
+};
+
+struct ns2_led_modval {
+	enum ns2_led_modes	mode;
+	int			cmd_level;
+	int			slow_level;
+};
+
 struct ns2_led {
 	const char	*name;
 	const char	*default_trigger;
 	unsigned	cmd;
 	unsigned	slow;
+	int		num_modes;
+	struct ns2_led_modval *modval;
 };
 
 struct ns2_led_platform_data {
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index 9c7fd1e..1b2ba24 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -136,7 +136,6 @@
 		Only valid when mode is PWM_BASED.
  * @size_program : total size of lp855x_rom_data
  * @rom_data : list of new eeprom/eprom registers
- * @supply : regulator that supplies 3V input
  */
 struct lp855x_platform_data {
 	const char *name;
@@ -145,7 +144,6 @@
 	unsigned int period_ns;
 	int size_program;
 	struct lp855x_rom_data *rom_data;
-	struct regulator *supply;
 };
 
 #endif
diff --git a/include/linux/platform_data/pixcir_i2c_ts.h b/include/linux/platform_data/pixcir_i2c_ts.h
new file mode 100644
index 0000000..646af6f
--- /dev/null
+++ b/include/linux/platform_data/pixcir_i2c_ts.h
@@ -0,0 +1,63 @@
+#ifndef	_PIXCIR_I2C_TS_H
+#define	_PIXCIR_I2C_TS_H
+
+/*
+ * Register map
+ */
+#define PIXCIR_REG_POWER_MODE	51
+#define PIXCIR_REG_INT_MODE	52
+
+/*
+ * Power modes:
+ * active: max scan speed
+ * idle: lower scan speed with automatic transition to active on touch
+ * halt: datasheet says sleep but this is more like halt as the chip
+ *       clocks are cut and it can only be brought out of this mode
+ *	 using the RESET pin.
+ */
+enum pixcir_power_mode {
+	PIXCIR_POWER_ACTIVE,
+	PIXCIR_POWER_IDLE,
+	PIXCIR_POWER_HALT,
+};
+
+#define PIXCIR_POWER_MODE_MASK	0x03
+#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2)
+
+/*
+ * Interrupt modes:
+ * periodical: interrupt is asserted periodicaly
+ * diff coordinates: interrupt is asserted when coordinates change
+ * level on touch: interrupt level asserted during touch
+ * pulse on touch: interrupt pulse asserted druing touch
+ *
+ */
+enum pixcir_int_mode {
+	PIXCIR_INT_PERIODICAL,
+	PIXCIR_INT_DIFF_COORD,
+	PIXCIR_INT_LEVEL_TOUCH,
+	PIXCIR_INT_PULSE_TOUCH,
+};
+
+#define PIXCIR_INT_MODE_MASK	0x03
+#define PIXCIR_INT_ENABLE	(1UL << 3)
+#define PIXCIR_INT_POL_HIGH	(1UL << 2)
+
+/**
+ * struct pixcir_irc_chip_data - chip related data
+ * @max_fingers:	Max number of fingers reported simultaneously by h/w
+ * @has_hw_ids:		Hardware supports finger tracking IDs
+ *
+ */
+struct pixcir_i2c_chip_data {
+	u8 max_fingers;
+	bool has_hw_ids;
+};
+
+struct pixcir_ts_platform_data {
+	int x_max;
+	int y_max;
+	struct pixcir_i2c_chip_data chip;
+};
+
+#endif
diff --git a/include/linux/platform_data/spi-davinci.h b/include/linux/platform_data/spi-davinci.h
index 8dc2fa47..f4edcb0 100644
--- a/include/linux/platform_data/spi-davinci.h
+++ b/include/linux/platform_data/spi-davinci.h
@@ -49,6 +49,7 @@
 	u8			num_chipselect;
 	u8			intr_line;
 	u8			*chip_sel;
+	u8			prescaler_limit;
 	bool			cshold_bug;
 	enum dma_event_q	dma_event_q;
 };
diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
new file mode 100644
index 0000000..54b0448
--- /dev/null
+++ b/include/linux/platform_data/spi-mt65xx.h
@@ -0,0 +1,20 @@
+/*
+ *  MTK SPI bus driver definitions
+ *
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Leilk Liu <leilk.liu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ____LINUX_PLATFORM_DATA_SPI_MTK_H
+#define ____LINUX_PLATFORM_DATA_SPI_MTK_H
+
+/* Board specific platform_data */
+struct mtk_chip_config {
+	u32 tx_mlsb;
+	u32 rx_mlsb;
+};
+#endif
diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h
deleted file mode 100644
index d9d400a..0000000
--- a/include/linux/platform_data/st_nci.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Driver include for ST NCI NFC chip family.
- *
- * Copyright (C) 2014-2015  STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _ST_NCI_H_
-#define _ST_NCI_H_
-
-#define ST_NCI_DRIVER_NAME "st_nci"
-
-struct st_nci_nfc_platform_data {
-	unsigned int gpio_reset;
-	unsigned int irq_polarity;
-};
-
-#endif /* _ST_NCI_H_ */
diff --git a/include/linux/platform_data/video-ep93xx.h b/include/linux/platform_data/video-ep93xx.h
index 92fc2b2..699ac41 100644
--- a/include/linux/platform_data/video-ep93xx.h
+++ b/include/linux/platform_data/video-ep93xx.h
@@ -2,11 +2,8 @@
 #define __VIDEO_EP93XX_H
 
 struct platform_device;
-struct fb_videomode;
 struct fb_info;
 
-#define EP93XXFB_USE_MODEDB		0
-
 /* VideoAttributes flags */
 #define EP93XXFB_STATE_MACHINE_ENABLE	(1 << 0)
 #define EP93XXFB_PIXEL_CLOCK_ENABLE	(1 << 1)
@@ -38,12 +35,7 @@
 					 EP93XXFB_PIXEL_DATA_ENABLE)
 
 struct ep93xxfb_mach_info {
-	unsigned int			num_modes;
-	const struct fb_videomode	*modes;
-	const struct fb_videomode	*default_mode;
-	int				bpp;
 	unsigned int			flags;
-
 	int	(*setup)(struct platform_device *pdev);
 	void	(*teardown)(struct platform_device *pdev);
 	void	(*blank)(int blank_mode, struct fb_info *info);
diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h
index 0472ab2..7bdece8 100644
--- a/include/linux/platform_data/zforce_ts.h
+++ b/include/linux/platform_data/zforce_ts.h
@@ -16,9 +16,6 @@
 #define _LINUX_INPUT_ZFORCE_TS_H
 
 struct zforce_ts_platdata {
-	int gpio_int;
-	int gpio_rst;
-
 	unsigned int x_max;
 	unsigned int y_max;
 };
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 681ccb0..b1cf7e7 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -22,9 +22,6 @@
 
 enum gpd_status {
 	GPD_STATE_ACTIVE = 0,	/* PM domain is active */
-	GPD_STATE_WAIT_MASTER,	/* PM domain's master is being waited for */
-	GPD_STATE_BUSY,		/* Something is happening to the PM domain */
-	GPD_STATE_REPEAT,	/* Power off in progress, to be repeated */
 	GPD_STATE_POWER_OFF,	/* PM domain is off */
 };
 
@@ -59,9 +56,6 @@
 	unsigned int in_progress;	/* Number of devices being suspended now */
 	atomic_t sd_count;	/* Number of subdomains with power "on" */
 	enum gpd_status status;	/* Current state of the domain */
-	wait_queue_head_t status_wait_queue;
-	struct task_struct *poweroff_task;	/* Powering off task */
-	unsigned int resume_count;	/* Number of devices being resumed */
 	unsigned int device_count;	/* Number of devices */
 	unsigned int suspended_count;	/* System suspend device counter */
 	unsigned int prepared_count;	/* Suspend counter of prepared devices */
@@ -113,7 +107,6 @@
 	struct pm_domain_data base;
 	struct gpd_timing_data td;
 	struct notifier_block nb;
-	int need_restore;
 };
 
 #ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -228,8 +221,6 @@
 	return -ENOSYS;
 }
 static inline void pm_genpd_poweroff_unused(void) {}
-#define simple_qos_governor NULL
-#define pm_domain_always_on_gov NULL
 #endif
 
 static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cec2d45..cab7ba5 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -30,7 +30,10 @@
 
 unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
 
+bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
+
 int dev_pm_opp_get_opp_count(struct device *dev);
+unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
 
 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
 					      unsigned long freq,
@@ -62,11 +65,21 @@
 	return 0;
 }
 
+static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
+{
+	return false;
+}
+
 static inline int dev_pm_opp_get_opp_count(struct device *dev)
 {
 	return 0;
 }
 
+static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
+{
+	return 0;
+}
+
 static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
 					unsigned long freq, bool available)
 {
@@ -115,6 +128,10 @@
 #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
 int of_init_opp_table(struct device *dev);
 void of_free_opp_table(struct device *dev);
+int of_cpumask_init_opp_table(cpumask_var_t cpumask);
+void of_cpumask_free_opp_table(cpumask_var_t cpumask);
+int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
+int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask);
 #else
 static inline int of_init_opp_table(struct device *dev)
 {
@@ -124,6 +141,25 @@
 static inline void of_free_opp_table(struct device *dev)
 {
 }
+
+static inline int of_cpumask_init_opp_table(cpumask_var_t cpumask)
+{
+	return -ENOSYS;
+}
+
+static inline void of_cpumask_free_opp_table(cpumask_var_t cpumask)
+{
+}
+
+static inline int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+	return -ENOSYS;
+}
+
+static inline int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+	return -ENOSYS;
+}
 #endif
 
 #endif		/* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 7b3ae0c..0f65d36 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -161,6 +161,8 @@
 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
+int dev_pm_qos_expose_latency_tolerance(struct device *dev);
+void dev_pm_qos_hide_latency_tolerance(struct device *dev);
 
 static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
 {
@@ -229,6 +231,9 @@
 			{ return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
 static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
 			{ return 0; }
+static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
+			{ return 0; }
+static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
 
 static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
 static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 30e84d4..3bdbb41 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -98,11 +98,6 @@
 	return dev->power.runtime_status == RPM_SUSPENDED;
 }
 
-static inline bool pm_runtime_suspended_if_enabled(struct device *dev)
-{
-	return pm_runtime_status_suspended(dev) && dev->power.disable_depth == 1;
-}
-
 static inline bool pm_runtime_enabled(struct device *dev)
 {
 	return !dev->power.disable_depth;
@@ -164,7 +159,6 @@
 static inline bool pm_runtime_suspended(struct device *dev) { return false; }
 static inline bool pm_runtime_active(struct device *dev) { return true; }
 static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
-static inline bool pm_runtime_suspended_if_enabled(struct device *dev) { return false; }
 static inline bool pm_runtime_enabled(struct device *dev) { return false; }
 
 static inline void pm_runtime_no_callbacks(struct device *dev) {}
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 84991f1..bea8dd8 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -84,13 +84,21 @@
  */
 #define in_nmi()	(preempt_count() & NMI_MASK)
 
+/*
+ * The preempt_count offset after preempt_disable();
+ */
 #if defined(CONFIG_PREEMPT_COUNT)
-# define PREEMPT_DISABLE_OFFSET 1
+# define PREEMPT_DISABLE_OFFSET	PREEMPT_OFFSET
 #else
-# define PREEMPT_DISABLE_OFFSET 0
+# define PREEMPT_DISABLE_OFFSET	0
 #endif
 
 /*
+ * The preempt_count offset after spin_lock()
+ */
+#define PREEMPT_LOCK_OFFSET	PREEMPT_DISABLE_OFFSET
+
+/*
  * The preempt_count offset needed for things like:
  *
  *  spin_lock_bh()
@@ -103,7 +111,7 @@
  *
  * Work as expected.
  */
-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
 
 /*
  * Are we running in atomic context?  WARNING: this macro cannot
@@ -124,7 +132,8 @@
 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
 extern void preempt_count_add(int val);
 extern void preempt_count_sub(int val);
-#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
+#define preempt_count_dec_and_test() \
+	({ preempt_count_sub(1); should_resched(0); })
 #else
 #define preempt_count_add(val)	__preempt_count_add(val)
 #define preempt_count_sub(val)	__preempt_count_sub(val)
@@ -184,7 +193,7 @@
 
 #define preempt_check_resched() \
 do { \
-	if (should_resched()) \
+	if (should_resched(0)) \
 		__preempt_schedule(); \
 } while (0)
 
diff --git a/include/linux/property.h b/include/linux/property.h
index 76ebde9..a59c6ee 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -166,4 +166,8 @@
 
 bool device_dma_is_coherent(struct device *dev);
 
+int device_get_phy_mode(struct device *dev);
+
+void *device_get_mac_address(struct device *dev, char *addr, int alen);
+
 #endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 00e8e8f..5440f64 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -33,7 +33,7 @@
 /*
  * global proportion descriptor
  *
- * this is needed to consitently flip prop_global structures.
+ * this is needed to consistently flip prop_global structures.
  */
 struct prop_descriptor {
 	int index;
diff --git a/include/linux/psci.h b/include/linux/psci.h
new file mode 100644
index 0000000..a682fcc
--- /dev/null
+++ b/include/linux/psci.h
@@ -0,0 +1,52 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2015 ARM Limited
+ */
+
+#ifndef __LINUX_PSCI_H
+#define __LINUX_PSCI_H
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+#define PSCI_POWER_STATE_TYPE_STANDBY		0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN	1
+
+bool psci_tos_resident_on(int cpu);
+
+struct psci_operations {
+	int (*cpu_suspend)(u32 state, unsigned long entry_point);
+	int (*cpu_off)(u32 state);
+	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+	int (*migrate)(unsigned long cpuid);
+	int (*affinity_info)(unsigned long target_affinity,
+			unsigned long lowest_affinity_level);
+	int (*migrate_info_type)(void);
+};
+
+extern struct psci_operations psci_ops;
+
+#if defined(CONFIG_ARM_PSCI_FW)
+int __init psci_dt_init(void);
+#else
+static inline int psci_dt_init(void) { return 0; }
+#endif
+
+#if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI)
+int __init psci_acpi_init(void);
+bool __init acpi_psci_present(void);
+bool __init acpi_psci_use_hvc(void);
+#else
+static inline int psci_acpi_init(void) { return 0; }
+static inline bool acpi_psci_present(void) { return false; }
+#endif
+
+#endif /* __LINUX_PSCI_H */
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 0485bab..9227377 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -197,6 +197,7 @@
 	QUARK_X1000_SSP,
 	LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
 	LPSS_BYT_SSP,
+	LPSS_SPT_SSP,
 };
 
 struct ssp_device {
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 77ca660..7a57c28 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -43,7 +43,7 @@
 void inode_sub_rsv_space(struct inode *inode, qsize_t number);
 void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
 
-void dquot_initialize(struct inode *inode);
+int dquot_initialize(struct inode *inode);
 void dquot_drop(struct inode *inode);
 struct dquot *dqget(struct super_block *sb, struct kqid qid);
 static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -200,8 +200,9 @@
 	return 0;
 }
 
-static inline void dquot_initialize(struct inode *inode)
+static inline int dquot_initialize(struct inode *inode)
 {
+	return 0;
 }
 
 static inline void dquot_drop(struct inode *inode)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 4cf5f51..ff47651 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -226,6 +226,37 @@
 };
 void wakeme_after_rcu(struct rcu_head *head);
 
+void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
+		   struct rcu_synchronize *rs_array);
+
+#define _wait_rcu_gp(checktiny, ...) \
+do { \
+	call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \
+	const int __n = ARRAY_SIZE(__crcu_array); \
+	struct rcu_synchronize __rs_array[__n]; \
+	\
+	__wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \
+} while (0)
+
+#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
+
+/**
+ * synchronize_rcu_mult - Wait concurrently for multiple grace periods
+ * @...: List of call_rcu() functions for the flavors to wait on.
+ *
+ * This macro waits concurrently for multiple flavors of RCU grace periods.
+ * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait
+ * on concurrent RCU and RCU-bh grace periods.  Waiting on a give SRCU
+ * domain requires you to write a wrapper function for that SRCU domain's
+ * call_srcu() function, supplying the corresponding srcu_struct.
+ *
+ * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
+ * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called
+ * is automatically a grace period.
+ */
+#define synchronize_rcu_mult(...) \
+	_wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
+
 /**
  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
  * @head: structure to be used for queueing the RCU updates.
@@ -309,7 +340,7 @@
 }
 #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
 
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_NO_HZ_FULL
 void rcu_user_enter(void);
 void rcu_user_exit(void);
 #else
@@ -317,7 +348,7 @@
 static inline void rcu_user_exit(void) { }
 static inline void rcu_user_hooks_switch(struct task_struct *prev,
 					 struct task_struct *next) { }
-#endif /* CONFIG_RCU_USER_QS */
+#endif /* CONFIG_NO_HZ_FULL */
 
 #ifdef CONFIG_RCU_NOCB_CPU
 void rcu_init_nohz(void);
@@ -392,10 +423,6 @@
  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
  */
 
-typedef void call_rcu_func_t(struct rcu_head *head,
-			     void (*func)(struct rcu_head *head));
-void wait_rcu_gp(call_rcu_func_t crf);
-
 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
 #include <linux/rcutree.h>
 #elif defined(CONFIG_TINY_RCU)
@@ -469,46 +496,10 @@
  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
  * RCU-sched read-side critical section.  In absence of
  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
- * critical section unless it can prove otherwise.  Note that disabling
- * of preemption (including disabling irqs) counts as an RCU-sched
- * read-side critical section.  This is useful for debug checks in functions
- * that required that they be called within an RCU-sched read-side
- * critical section.
- *
- * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
- * and while lockdep is disabled.
- *
- * Note that if the CPU is in the idle loop from an RCU point of
- * view (ie: that we are in the section between rcu_idle_enter() and
- * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
- * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
- * that are in such a section, considering these as in extended quiescent
- * state, so such a CPU is effectively never in an RCU read-side critical
- * section regardless of what RCU primitives it invokes.  This state of
- * affairs is required --- we need to keep an RCU-free window in idle
- * where the CPU may possibly enter into low power mode. This way we can
- * notice an extended quiescent state to other CPUs that started a grace
- * period. Otherwise we would delay any grace period as long as we run in
- * the idle task.
- *
- * Similarly, we avoid claiming an SRCU read lock held if the current
- * CPU is offline.
+ * critical section unless it can prove otherwise.
  */
 #ifdef CONFIG_PREEMPT_COUNT
-static inline int rcu_read_lock_sched_held(void)
-{
-	int lockdep_opinion = 0;
-
-	if (!debug_lockdep_rcu_enabled())
-		return 1;
-	if (!rcu_is_watching())
-		return 0;
-	if (!rcu_lockdep_current_cpu_online())
-		return 0;
-	if (debug_locks)
-		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
-	return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
-}
+int rcu_read_lock_sched_held(void);
 #else /* #ifdef CONFIG_PREEMPT_COUNT */
 static inline int rcu_read_lock_sched_held(void)
 {
@@ -545,6 +536,11 @@
 
 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
+/* Deprecate rcu_lockdep_assert():  Use RCU_LOCKDEP_WARN() instead. */
+static inline void __attribute((deprecated)) deprecate_rcu_lockdep_assert(void)
+{
+}
+
 #ifdef CONFIG_PROVE_RCU
 
 /**
@@ -555,17 +551,32 @@
 #define rcu_lockdep_assert(c, s)					\
 	do {								\
 		static bool __section(.data.unlikely) __warned;		\
+		deprecate_rcu_lockdep_assert();				\
 		if (debug_lockdep_rcu_enabled() && !__warned && !(c)) {	\
 			__warned = true;				\
 			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\
 		}							\
 	} while (0)
 
+/**
+ * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
+ * @c: condition to check
+ * @s: informative message
+ */
+#define RCU_LOCKDEP_WARN(c, s)						\
+	do {								\
+		static bool __section(.data.unlikely) __warned;		\
+		if (debug_lockdep_rcu_enabled() && !__warned && (c)) {	\
+			__warned = true;				\
+			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\
+		}							\
+	} while (0)
+
 #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
 static inline void rcu_preempt_sleep_check(void)
 {
-	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
-			   "Illegal context switch in RCU read-side critical section");
+	RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
+			 "Illegal context switch in RCU read-side critical section");
 }
 #else /* #ifdef CONFIG_PROVE_RCU */
 static inline void rcu_preempt_sleep_check(void)
@@ -576,15 +587,16 @@
 #define rcu_sleep_check()						\
 	do {								\
 		rcu_preempt_sleep_check();				\
-		rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),	\
-				   "Illegal context switch in RCU-bh read-side critical section"); \
-		rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),	\
-				   "Illegal context switch in RCU-sched read-side critical section"); \
+		RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),	\
+				 "Illegal context switch in RCU-bh read-side critical section"); \
+		RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),	\
+				 "Illegal context switch in RCU-sched read-side critical section"); \
 	} while (0)
 
 #else /* #ifdef CONFIG_PROVE_RCU */
 
-#define rcu_lockdep_assert(c, s) do { } while (0)
+#define rcu_lockdep_assert(c, s) deprecate_rcu_lockdep_assert()
+#define RCU_LOCKDEP_WARN(c, s) do { } while (0)
 #define rcu_sleep_check() do { } while (0)
 
 #endif /* #else #ifdef CONFIG_PROVE_RCU */
@@ -615,13 +627,13 @@
 ({ \
 	/* Dependency order vs. p above. */ \
 	typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
-	rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
+	RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
 	rcu_dereference_sparse(p, space); \
 	((typeof(*p) __force __kernel *)(________p1)); \
 })
 #define __rcu_dereference_protected(p, c, space) \
 ({ \
-	rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
+	RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
 	rcu_dereference_sparse(p, space); \
 	((typeof(*p) __force __kernel *)(p)); \
 })
@@ -845,8 +857,8 @@
 	__rcu_read_lock();
 	__acquire(RCU);
 	rcu_lock_acquire(&rcu_lock_map);
-	rcu_lockdep_assert(rcu_is_watching(),
-			   "rcu_read_lock() used illegally while idle");
+	RCU_LOCKDEP_WARN(!rcu_is_watching(),
+			 "rcu_read_lock() used illegally while idle");
 }
 
 /*
@@ -896,8 +908,8 @@
  */
 static inline void rcu_read_unlock(void)
 {
-	rcu_lockdep_assert(rcu_is_watching(),
-			   "rcu_read_unlock() used illegally while idle");
+	RCU_LOCKDEP_WARN(!rcu_is_watching(),
+			 "rcu_read_unlock() used illegally while idle");
 	__release(RCU);
 	__rcu_read_unlock();
 	rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
@@ -925,8 +937,8 @@
 	local_bh_disable();
 	__acquire(RCU_BH);
 	rcu_lock_acquire(&rcu_bh_lock_map);
-	rcu_lockdep_assert(rcu_is_watching(),
-			   "rcu_read_lock_bh() used illegally while idle");
+	RCU_LOCKDEP_WARN(!rcu_is_watching(),
+			 "rcu_read_lock_bh() used illegally while idle");
 }
 
 /*
@@ -936,8 +948,8 @@
  */
 static inline void rcu_read_unlock_bh(void)
 {
-	rcu_lockdep_assert(rcu_is_watching(),
-			   "rcu_read_unlock_bh() used illegally while idle");
+	RCU_LOCKDEP_WARN(!rcu_is_watching(),
+			 "rcu_read_unlock_bh() used illegally while idle");
 	rcu_lock_release(&rcu_bh_lock_map);
 	__release(RCU_BH);
 	local_bh_enable();
@@ -961,8 +973,8 @@
 	preempt_disable();
 	__acquire(RCU_SCHED);
 	rcu_lock_acquire(&rcu_sched_lock_map);
-	rcu_lockdep_assert(rcu_is_watching(),
-			   "rcu_read_lock_sched() used illegally while idle");
+	RCU_LOCKDEP_WARN(!rcu_is_watching(),
+			 "rcu_read_lock_sched() used illegally while idle");
 }
 
 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
@@ -979,8 +991,8 @@
  */
 static inline void rcu_read_unlock_sched(void)
 {
-	rcu_lockdep_assert(rcu_is_watching(),
-			   "rcu_read_unlock_sched() used illegally while idle");
+	RCU_LOCKDEP_WARN(!rcu_is_watching(),
+			 "rcu_read_unlock_sched() used illegally while idle");
 	rcu_lock_release(&rcu_sched_lock_map);
 	__release(RCU_SCHED);
 	preempt_enable();
@@ -1031,7 +1043,7 @@
 #define RCU_INIT_POINTER(p, v) \
 	do { \
 		rcu_dereference_sparse(p, __rcu); \
-		p = RCU_INITIALIZER(v); \
+		WRITE_ONCE(p, RCU_INITIALIZER(v)); \
 	} while (0)
 
 /**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 3df6c1e..ff968b7 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -37,6 +37,16 @@
 	might_sleep();
 }
 
+static inline unsigned long get_state_synchronize_sched(void)
+{
+	return 0;
+}
+
+static inline void cond_synchronize_sched(unsigned long oldstate)
+{
+	might_sleep();
+}
+
 static inline void rcu_barrier_bh(void)
 {
 	wait_rcu_gp(call_rcu_bh);
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 4568791..5abec82 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -76,6 +76,8 @@
 void rcu_barrier_sched(void);
 unsigned long get_state_synchronize_rcu(void);
 void cond_synchronize_rcu(unsigned long oldstate);
+unsigned long get_state_synchronize_sched(void);
+void cond_synchronize_sched(unsigned long oldstate);
 
 extern unsigned long rcutorture_testseq;
 extern unsigned long rcutorture_vernum;
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 59c55ea..4a67590 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -50,6 +50,17 @@
 	unsigned int def;
 };
 
+/**
+ * Register/value pairs for sequences of writes
+ *
+ * @reg: Register address.
+ * @def: Register value.
+ */
+struct reg_sequence {
+	unsigned int reg;
+	unsigned int def;
+};
+
 #ifdef CONFIG_REGMAP
 
 enum regmap_endian {
@@ -410,10 +421,10 @@
 		     const void *val, size_t val_len);
 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
 			size_t val_count);
-int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
+int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
 			int num_regs);
 int regmap_multi_reg_write_bypassed(struct regmap *map,
-				    const struct reg_default *regs,
+				    const struct reg_sequence *regs,
 				    int num_regs);
 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
 			   const void *val, size_t val_len);
@@ -424,6 +435,8 @@
 		     size_t val_count);
 int regmap_update_bits(struct regmap *map, unsigned int reg,
 		       unsigned int mask, unsigned int val);
+int regmap_write_bits(struct regmap *map, unsigned int reg,
+		       unsigned int mask, unsigned int val);
 int regmap_update_bits_async(struct regmap *map, unsigned int reg,
 			     unsigned int mask, unsigned int val);
 int regmap_update_bits_check(struct regmap *map, unsigned int reg,
@@ -450,7 +463,7 @@
 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
 			      const struct regmap_access_table *table);
 
-int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
+int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
 			  int num_regs);
 int regmap_parse_val(struct regmap *map, const void *buf,
 				unsigned int *val);
@@ -503,6 +516,8 @@
 
 int regmap_fields_write(struct regmap_field *field, unsigned int id,
 			unsigned int val);
+int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
+			unsigned int val);
 int regmap_fields_read(struct regmap_field *field, unsigned int id,
 		       unsigned int *val);
 int regmap_fields_update_bits(struct regmap_field *field,  unsigned int id,
@@ -645,6 +660,13 @@
 	return -EINVAL;
 }
 
+static inline int regmap_write_bits(struct regmap *map, unsigned int reg,
+				     unsigned int mask, unsigned int val)
+{
+	WARN_ONCE(1, "regmap API is disabled");
+	return -EINVAL;
+}
+
 static inline int regmap_update_bits_async(struct regmap *map,
 					   unsigned int reg,
 					   unsigned int mask, unsigned int val)
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f8a689e..9e0e769 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -550,8 +550,24 @@
 {
 	return 0;
 }
+
+static inline int regulator_list_voltage(struct regulator *regulator, unsigned selector)
+{
+	return -EINVAL;
+}
+
 #endif
 
+static inline int regulator_set_voltage_triplet(struct regulator *regulator,
+						int min_uV, int target_uV,
+						int max_uV)
+{
+	if (regulator_set_voltage(regulator, target_uV, max_uV) == 0)
+		return 0;
+
+	return regulator_set_voltage(regulator, min_uV, max_uV);
+}
+
 static inline int regulator_set_voltage_tol(struct regulator *regulator,
 					    int new_uV, int tol_uV)
 {
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 5dd65ac..a43a5ca 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -1,16 +1,16 @@
 /*
- * da9211.h - Regulator device driver for DA9211/DA9213
- * Copyright (C) 2014  Dialog Semiconductor Ltd.
+ * da9211.h - Regulator device driver for DA9211/DA9213/DA9215
+ * Copyright (C) 2015  Dialog Semiconductor Ltd.
  *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
  *
- * This library is distributed in the hope that it will be useful,
+ * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Library General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
  */
 
 #ifndef __LINUX_REGULATOR_DA9211_H
@@ -23,6 +23,7 @@
 enum da9211_chip_id {
 	DA9211,
 	DA9213,
+	DA9215,
 };
 
 struct da9211_pdata {
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4db9fbe..4593222 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -148,6 +148,7 @@
 	int (*get_current_limit) (struct regulator_dev *);
 
 	int (*set_input_current_limit) (struct regulator_dev *, int lim_uA);
+	int (*set_over_current_protection) (struct regulator_dev *);
 
 	/* enable/disable regulator */
 	int (*enable) (struct regulator_dev *);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index b11be12..a1067d0 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -147,6 +147,7 @@
 	unsigned ramp_disable:1; /* disable ramp delay */
 	unsigned soft_start:1;	/* ramp voltage slowly */
 	unsigned pull_down:1;	/* pull down resistor when regulator off */
+	unsigned over_current_protection:1; /* auto disable on over current */
 };
 
 /**
diff --git a/include/linux/regulator/mt6311.h b/include/linux/regulator/mt6311.h
new file mode 100644
index 0000000..8473259
--- /dev/null
+++ b/include/linux/regulator/mt6311.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Henry Chen <henryc.chen@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6311_H
+#define __LINUX_REGULATOR_MT6311_H
+
+#define MT6311_MAX_REGULATORS	2
+
+enum {
+	MT6311_ID_VDVFS = 0,
+	MT6311_ID_VBIASN,
+};
+
+#define MT6311_E1_CID_CODE    0x10
+#define MT6311_E2_CID_CODE    0x20
+#define MT6311_E3_CID_CODE    0x30
+
+#endif /* __LINUX_REGULATOR_MT6311_H */
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 9b1ef0c..556ec1e 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -161,10 +161,6 @@
 static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
 			    struct scatterlist *sgl)
 {
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
-	BUG();
-#endif
-
 	/*
 	 * offset and length are unused for chain entry.  Clear them.
 	 */
@@ -251,6 +247,11 @@
 struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
 void sg_init_table(struct scatterlist *, unsigned int);
 void sg_init_one(struct scatterlist *, const void *, unsigned int);
+int sg_split(struct scatterlist *in, const int in_mapped_nents,
+	     const off_t skip, const int nb_splits,
+	     const size_t *split_sizes,
+	     struct scatterlist **out, int *out_mapped_nents,
+	     gfp_t gfp_mask);
 
 typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
 typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 04b5ada..119823d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -530,39 +530,49 @@
 };
 
 /**
- * struct cputime - snaphsot of system and user cputime
+ * struct prev_cputime - snaphsot of system and user cputime
  * @utime: time spent in user mode
  * @stime: time spent in system mode
+ * @lock: protects the above two fields
  *
- * Gathers a generic snapshot of user and system time.
+ * Stores previous user/system time values such that we can guarantee
+ * monotonicity.
  */
-struct cputime {
+struct prev_cputime {
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 	cputime_t utime;
 	cputime_t stime;
+	raw_spinlock_t lock;
+#endif
 };
 
+static inline void prev_cputime_init(struct prev_cputime *prev)
+{
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+	prev->utime = prev->stime = 0;
+	raw_spin_lock_init(&prev->lock);
+#endif
+}
+
 /**
  * struct task_cputime - collected CPU time counts
  * @utime:		time spent in user mode, in &cputime_t units
  * @stime:		time spent in kernel mode, in &cputime_t units
  * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
  *
- * This is an extension of struct cputime that includes the total runtime
- * spent by the task from the scheduler point of view.
- *
- * As a result, this structure groups together three kinds of CPU time
- * that are tracked for threads and thread groups.  Most things considering
- * CPU time want to group these counts together and treat all three
- * of them in parallel.
+ * This structure groups together three kinds of CPU time that are tracked for
+ * threads and thread groups.  Most things considering CPU time want to group
+ * these counts together and treat all three of them in parallel.
  */
 struct task_cputime {
 	cputime_t utime;
 	cputime_t stime;
 	unsigned long long sum_exec_runtime;
 };
+
 /* Alternate field names when used to cache expirations. */
-#define prof_exp	stime
 #define virt_exp	utime
+#define prof_exp	stime
 #define sched_exp	sum_exec_runtime
 
 #define INIT_CPUTIME	\
@@ -715,9 +725,7 @@
 	cputime_t utime, stime, cutime, cstime;
 	cputime_t gtime;
 	cputime_t cgtime;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-	struct cputime prev_cputime;
-#endif
+	struct prev_cputime prev_cputime;
 	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
 	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
 	unsigned long inblock, oublock, cinblock, coublock;
@@ -1167,29 +1175,24 @@
 	u32 inv_weight;
 };
 
+/*
+ * The load_avg/util_avg accumulates an infinite geometric series.
+ * 1) load_avg factors the amount of time that a sched_entity is
+ * runnable on a rq into its weight. For cfs_rq, it is the aggregated
+ * such weights of all runnable and blocked sched_entities.
+ * 2) util_avg factors frequency scaling into the amount of time
+ * that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
+ * For cfs_rq, it is the aggregated such times of all runnable and
+ * blocked sched_entities.
+ * The 64 bit load_sum can:
+ * 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with
+ * the highest weight (=88761) always runnable, we should not overflow
+ * 2) for entity, support any load.weight always runnable
+ */
 struct sched_avg {
-	u64 last_runnable_update;
-	s64 decay_count;
-	/*
-	 * utilization_avg_contrib describes the amount of time that a
-	 * sched_entity is running on a CPU. It is based on running_avg_sum
-	 * and is scaled in the range [0..SCHED_LOAD_SCALE].
-	 * load_avg_contrib described the amount of time that a sched_entity
-	 * is runnable on a rq. It is based on both runnable_avg_sum and the
-	 * weight of the task.
-	 */
-	unsigned long load_avg_contrib, utilization_avg_contrib;
-	/*
-	 * These sums represent an infinite geometric series and so are bound
-	 * above by 1024/(1-y).  Thus we only need a u32 to store them for all
-	 * choices of y < 1-2^(-32)*1024.
-	 * running_avg_sum reflects the time that the sched_entity is
-	 * effectively running on the CPU.
-	 * runnable_avg_sum represents the amount of time a sched_entity is on
-	 * a runqueue which includes the running time that is monitored by
-	 * running_avg_sum.
-	 */
-	u32 runnable_avg_sum, avg_period, running_avg_sum;
+	u64 last_update_time, load_sum;
+	u32 util_sum, period_contrib;
+	unsigned long load_avg, util_avg;
 };
 
 #ifdef CONFIG_SCHEDSTATS
@@ -1255,7 +1258,7 @@
 #endif
 
 #ifdef CONFIG_SMP
-	/* Per-entity load-tracking */
+	/* Per entity load average tracking */
 	struct sched_avg	avg;
 #endif
 };
@@ -1351,9 +1354,9 @@
 #ifdef CONFIG_SMP
 	struct llist_node wake_entry;
 	int on_cpu;
-	struct task_struct *last_wakee;
-	unsigned long wakee_flips;
+	unsigned int wakee_flips;
 	unsigned long wakee_flip_decay_ts;
+	struct task_struct *last_wakee;
 
 	int wake_cpu;
 #endif
@@ -1481,9 +1484,7 @@
 
 	cputime_t utime, stime, utimescaled, stimescaled;
 	cputime_t gtime;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-	struct cputime prev_cputime;
-#endif
+	struct prev_cputime prev_cputime;
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 	seqlock_t vtime_seqlock;
 	unsigned long long vtime_snap;
@@ -2214,13 +2215,6 @@
 static inline void calc_load_exit_idle(void) { }
 #endif /* CONFIG_NO_HZ_COMMON */
 
-#ifndef CONFIG_CPUMASK_OFFSTACK
-static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
-{
-	return set_cpus_allowed_ptr(p, &new_mask);
-}
-#endif
-
 /*
  * Do not use outside of architecture code which knows its limitations.
  *
@@ -2897,12 +2891,6 @@
 
 extern int __cond_resched_lock(spinlock_t *lock);
 
-#ifdef CONFIG_PREEMPT_COUNT
-#define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
-#else
-#define PREEMPT_LOCK_OFFSET	0
-#endif
-
 #define cond_resched_lock(lock) ({				\
 	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
 	__cond_resched_lock(lock);				\
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index ba82c07..faa0e03 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -136,8 +136,6 @@
 
 extern int early_serial_setup(struct uart_port *port);
 
-extern unsigned int serial8250_early_in(struct uart_port *port, int offset);
-extern void serial8250_early_out(struct uart_port *port, int offset, int value);
 extern int early_serial8250_setup(struct earlycon_device *device,
 					 const char *options);
 extern void serial8250_do_set_termios(struct uart_port *port,
@@ -152,6 +150,11 @@
 unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
 void serial8250_tx_chars(struct uart_8250_port *up);
 unsigned int serial8250_modem_status(struct uart_8250_port *up);
+void serial8250_init_port(struct uart_8250_port *up);
+void serial8250_set_defaults(struct uart_8250_port *up);
+void serial8250_console_write(struct uart_8250_port *up, const char *s,
+			      unsigned int count);
+int serial8250_console_setup(struct uart_port *port, char *options, bool probe);
 
 extern void serial8250_set_isa_configurator(void (*v)
 					(int port, struct uart_port *up,
diff --git a/include/linux/serio.h b/include/linux/serio.h
index 9f779c7..df4ab5d 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -18,6 +18,8 @@
 #include <linux/mod_devicetable.h>
 #include <uapi/linux/serio.h>
 
+extern struct bus_type serio_bus;
+
 struct serio {
 	void *port_data;
 
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index dd0ba50..d927647 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -128,7 +128,10 @@
 #if IS_ENABLED(CONFIG_SH_DMAE_BASE)
 bool shdma_chan_filter(struct dma_chan *chan, void *arg);
 #else
-#define shdma_chan_filter NULL
+static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg)
+{
+	return false;
+}
 #endif
 
 #endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9b88536..2738d35 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,7 @@
 #include <net/flow_dissector.h>
 #include <linux/splice.h>
 #include <linux/in6.h>
+#include <net/flow.h>
 
 /* A. Checksumming of received packets by device.
  *
@@ -173,17 +174,24 @@
 		BRNF_PROTO_8021Q,
 		BRNF_PROTO_PPPOE
 	} orig_proto:8;
-	bool			pkt_otherhost;
+	u8			pkt_otherhost:1;
+	u8			in_prerouting:1;
+	u8			bridged_dnat:1;
 	__u16			frag_max_size;
-	unsigned int		mask;
 	struct net_device	*physindev;
 	union {
-		struct net_device *physoutdev;
-		char neigh_header[8];
-	};
-	union {
+		/* prerouting: detect dnat in orig/reply direction */
 		__be32          ipv4_daddr;
 		struct in6_addr ipv6_daddr;
+
+		/* after prerouting + nat detected: store original source
+		 * mac since neigh resolution overwrites it, only used while
+		 * skb is out in neigh layer.
+		 */
+		char neigh_header[8];
+
+		/* always valid & non-NULL from FORWARD on, for physdev match */
+		struct net_device *physoutdev;
 	};
 };
 #endif
@@ -506,6 +514,7 @@
  *	@no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
   *	@napi_id: id of the NAPI struct this skb came from
  *	@secmark: security marking
+ *	@offload_fwd_mark: fwding offload mark
  *	@mark: Generic packet mark
  *	@vlan_proto: vlan encapsulation protocol
  *	@vlan_tci: vlan tag control information
@@ -650,9 +659,15 @@
 		unsigned int	sender_cpu;
 	};
 #endif
+	union {
 #ifdef CONFIG_NETWORK_SECMARK
-	__u32			secmark;
+		__u32		secmark;
 #endif
+#ifdef CONFIG_NET_SWITCHDEV
+		__u32		offload_fwd_mark;
+#endif
+	};
+
 	union {
 		__u32		mark;
 		__u32		reserved_tailroom;
@@ -922,29 +937,6 @@
 	PKT_HASH_TYPE_L4,	/* Input: src_IP, dst_IP, src_port, dst_port */
 };
 
-static inline void
-skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
-{
-	skb->l4_hash = (type == PKT_HASH_TYPE_L4);
-	skb->sw_hash = 0;
-	skb->hash = hash;
-}
-
-static inline __u32 skb_get_hash(struct sk_buff *skb)
-{
-	if (!skb->l4_hash && !skb->sw_hash)
-		__skb_get_hash(skb);
-
-	return skb->hash;
-}
-
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
-
-static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
-{
-	return skb->hash;
-}
-
 static inline void skb_clear_hash(struct sk_buff *skb)
 {
 	skb->hash = 0;
@@ -958,6 +950,120 @@
 		skb_clear_hash(skb);
 }
 
+static inline void
+__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
+{
+	skb->l4_hash = is_l4;
+	skb->sw_hash = is_sw;
+	skb->hash = hash;
+}
+
+static inline void
+skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
+{
+	/* Used by drivers to set hash from HW */
+	__skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
+}
+
+static inline void
+__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
+{
+	__skb_set_hash(skb, hash, true, is_l4);
+}
+
+void __skb_get_hash(struct sk_buff *skb);
+u32 skb_get_poff(const struct sk_buff *skb);
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+		   const struct flow_keys *keys, int hlen);
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+			    void *data, int hlen_proto);
+
+static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
+					int thoff, u8 ip_proto)
+{
+	return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
+}
+
+void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
+			     const struct flow_dissector_key *key,
+			     unsigned int key_count);
+
+bool __skb_flow_dissect(const struct sk_buff *skb,
+			struct flow_dissector *flow_dissector,
+			void *target_container,
+			void *data, __be16 proto, int nhoff, int hlen,
+			unsigned int flags);
+
+static inline bool skb_flow_dissect(const struct sk_buff *skb,
+				    struct flow_dissector *flow_dissector,
+				    void *target_container, unsigned int flags)
+{
+	return __skb_flow_dissect(skb, flow_dissector, target_container,
+				  NULL, 0, 0, 0, flags);
+}
+
+static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
+					      struct flow_keys *flow,
+					      unsigned int flags)
+{
+	memset(flow, 0, sizeof(*flow));
+	return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
+				  NULL, 0, 0, 0, flags);
+}
+
+static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
+						  void *data, __be16 proto,
+						  int nhoff, int hlen,
+						  unsigned int flags)
+{
+	memset(flow, 0, sizeof(*flow));
+	return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
+				  data, proto, nhoff, hlen, flags);
+}
+
+static inline __u32 skb_get_hash(struct sk_buff *skb)
+{
+	if (!skb->l4_hash && !skb->sw_hash)
+		__skb_get_hash(skb);
+
+	return skb->hash;
+}
+
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
+
+static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
+{
+	if (!skb->l4_hash && !skb->sw_hash) {
+		struct flow_keys keys;
+		__u32 hash = __get_hash_from_flowi6(fl6, &keys);
+
+		__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+	}
+
+	return skb->hash;
+}
+
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
+
+static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
+{
+	if (!skb->l4_hash && !skb->sw_hash) {
+		struct flow_keys keys;
+		__u32 hash = __get_hash_from_flowi4(fl4, &keys);
+
+		__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
+	}
+
+	return skb->hash;
+}
+
+__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+
+static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
+{
+	return skb->hash;
+}
+
 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
 {
 	to->hash = from->hash;
@@ -1943,7 +2049,7 @@
 
 	if (skb_transport_header_was_set(skb))
 		return;
-	else if (skb_flow_dissect_flow_keys(skb, &keys))
+	else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
 		skb_set_transport_header(skb, keys.control.thoff);
 	else
 		skb_set_transport_header(skb, offset_hint);
@@ -2667,12 +2773,6 @@
 	skb_shinfo(skb)->frag_list = NULL;
 }
 
-static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
-{
-	frag->next = skb_shinfo(skb)->frag_list;
-	skb_shinfo(skb)->frag_list = frag;
-}
-
 #define skb_walk_frags(skb, iter)	\
 	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
 
@@ -3464,5 +3564,6 @@
 			       skb_network_header(skb);
 	return hdr_len + skb_gso_transport_seglen(skb);
 }
+
 #endif	/* __KERNEL__ */
 #endif	/* _LINUX_SKBUFF_H */
diff --git a/include/linux/soc/dove/pmu.h b/include/linux/soc/dove/pmu.h
new file mode 100644
index 0000000..9c99f84
--- /dev/null
+++ b/include/linux/soc/dove/pmu.h
@@ -0,0 +1,6 @@
+#ifndef LINUX_SOC_DOVE_PMU_H
+#define LINUX_SOC_DOVE_PMU_H
+
+int dove_init_pmu(void);
+
+#endif
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
new file mode 100644
index 0000000..a5714e9
--- /dev/null
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -0,0 +1,26 @@
+#ifndef __SOC_MEDIATEK_INFRACFG_H
+#define __SOC_MEDIATEK_INFRACFG_H
+
+#define MT8173_TOP_AXI_PROT_EN_MCI_M2		BIT(0)
+#define MT8173_TOP_AXI_PROT_EN_MM_M0		BIT(1)
+#define MT8173_TOP_AXI_PROT_EN_MM_M1		BIT(2)
+#define MT8173_TOP_AXI_PROT_EN_MMAPB_S		BIT(6)
+#define MT8173_TOP_AXI_PROT_EN_L2C_M2		BIT(9)
+#define MT8173_TOP_AXI_PROT_EN_L2SS_SMI		BIT(11)
+#define MT8173_TOP_AXI_PROT_EN_L2SS_ADD		BIT(12)
+#define MT8173_TOP_AXI_PROT_EN_CCI_M2		BIT(13)
+#define MT8173_TOP_AXI_PROT_EN_MFG_S		BIT(14)
+#define MT8173_TOP_AXI_PROT_EN_PERI_M0		BIT(15)
+#define MT8173_TOP_AXI_PROT_EN_PERI_M1		BIT(16)
+#define MT8173_TOP_AXI_PROT_EN_DEBUGSYS		BIT(17)
+#define MT8173_TOP_AXI_PROT_EN_CQ_DMA		BIT(18)
+#define MT8173_TOP_AXI_PROT_EN_GCPU		BIT(19)
+#define MT8173_TOP_AXI_PROT_EN_IOMMU		BIT(20)
+#define MT8173_TOP_AXI_PROT_EN_MFG_M0		BIT(21)
+#define MT8173_TOP_AXI_PROT_EN_MFG_M1		BIT(22)
+#define MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT	BIT(23)
+
+int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask);
+int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask);
+
+#endif /* __SOC_MEDIATEK_INFRACFG_H */
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
new file mode 100644
index 0000000..2a53dca
--- /dev/null
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -0,0 +1,35 @@
+#ifndef __QCOM_SMD_RPM_H__
+#define __QCOM_SMD_RPM_H__
+
+struct qcom_smd_rpm;
+
+#define QCOM_SMD_RPM_ACTIVE_STATE        0
+#define QCOM_SMD_RPM_SLEEP_STATE         1
+
+/*
+ * Constants used for addressing resources in the RPM.
+ */
+#define QCOM_SMD_RPM_BOOST	0x61747362
+#define QCOM_SMD_RPM_BUS_CLK	0x316b6c63
+#define QCOM_SMD_RPM_BUS_MASTER	0x73616d62
+#define QCOM_SMD_RPM_BUS_SLAVE	0x766c7362
+#define QCOM_SMD_RPM_CLK_BUF_A	0x616B6C63
+#define QCOM_SMD_RPM_LDOA	0x616f646c
+#define QCOM_SMD_RPM_LDOB	0x626F646C
+#define QCOM_SMD_RPM_MEM_CLK	0x326b6c63
+#define QCOM_SMD_RPM_MISC_CLK	0x306b6c63
+#define QCOM_SMD_RPM_NCPA	0x6170636E
+#define QCOM_SMD_RPM_NCPB	0x6270636E
+#define QCOM_SMD_RPM_OCMEM_PWR	0x706d636f
+#define QCOM_SMD_RPM_QPIC_CLK	0x63697071
+#define QCOM_SMD_RPM_SMPA	0x61706d73
+#define QCOM_SMD_RPM_SMPB	0x62706d73
+#define QCOM_SMD_RPM_SPDM	0x63707362
+#define QCOM_SMD_RPM_VSA	0x00617376
+
+int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
+		       int state,
+		       u32 resource_type, u32 resource_id,
+		       void *buf, size_t count);
+
+#endif
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
new file mode 100644
index 0000000..d7e50aa
--- /dev/null
+++ b/include/linux/soc/qcom/smd.h
@@ -0,0 +1,46 @@
+#ifndef __QCOM_SMD_H__
+#define __QCOM_SMD_H__
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct qcom_smd;
+struct qcom_smd_channel;
+struct qcom_smd_lookup;
+
+/**
+ * struct qcom_smd_device - smd device struct
+ * @dev:	the device struct
+ * @channel:	handle to the smd channel for this device
+ */
+struct qcom_smd_device {
+	struct device dev;
+	struct qcom_smd_channel *channel;
+};
+
+/**
+ * struct qcom_smd_driver - smd driver struct
+ * @driver:	underlying device driver
+ * @probe:	invoked when the smd channel is found
+ * @remove:	invoked when the smd channel is closed
+ * @callback:	invoked when an inbound message is received on the channel,
+ *		should return 0 on success or -EBUSY if the data cannot be
+ *		consumed at this time
+ */
+struct qcom_smd_driver {
+	struct device_driver driver;
+	int (*probe)(struct qcom_smd_device *dev);
+	void (*remove)(struct qcom_smd_device *dev);
+	int (*callback)(struct qcom_smd_device *, const void *, size_t);
+};
+
+int qcom_smd_driver_register(struct qcom_smd_driver *drv);
+void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
+
+#define module_qcom_smd_driver(__smd_driver) \
+	module_driver(__smd_driver, qcom_smd_driver_register, \
+		      qcom_smd_driver_unregister)
+
+int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
+
+#endif
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
new file mode 100644
index 0000000..bc9630d
--- /dev/null
+++ b/include/linux/soc/qcom/smem.h
@@ -0,0 +1,11 @@
+#ifndef __QCOM_SMEM_H__
+#define __QCOM_SMEM_H__
+
+#define QCOM_SMEM_HOST_ANY -1
+
+int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
+int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size);
+
+int qcom_smem_get_free_space(unsigned host);
+
+#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index d673072..269e8af 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -23,6 +23,8 @@
 #include <linux/scatterlist.h>
 
 struct dma_chan;
+struct spi_master;
+struct spi_transfer;
 
 /*
  * INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -31,6 +33,59 @@
 extern struct bus_type spi_bus_type;
 
 /**
+ * struct spi_statistics - statistics for spi transfers
+ * @clock:         lock protecting this structure
+ *
+ * @messages:      number of spi-messages handled
+ * @transfers:     number of spi_transfers handled
+ * @errors:        number of errors during spi_transfer
+ * @timedout:      number of timeouts during spi_transfer
+ *
+ * @spi_sync:      number of times spi_sync is used
+ * @spi_sync_immediate:
+ *                 number of times spi_sync is executed immediately
+ *                 in calling context without queuing and scheduling
+ * @spi_async:     number of times spi_async is used
+ *
+ * @bytes:         number of bytes transferred to/from device
+ * @bytes_tx:      number of bytes sent to device
+ * @bytes_rx:      number of bytes received from device
+ *
+ */
+struct spi_statistics {
+	spinlock_t		lock; /* lock for the whole structure */
+
+	unsigned long		messages;
+	unsigned long		transfers;
+	unsigned long		errors;
+	unsigned long		timedout;
+
+	unsigned long		spi_sync;
+	unsigned long		spi_sync_immediate;
+	unsigned long		spi_async;
+
+	unsigned long long	bytes;
+	unsigned long long	bytes_rx;
+	unsigned long long	bytes_tx;
+
+};
+
+void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
+				       struct spi_transfer *xfer,
+				       struct spi_master *master);
+
+#define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count)	\
+	do {							\
+		unsigned long flags;				\
+		spin_lock_irqsave(&(stats)->lock, flags);	\
+		(stats)->field += count;			\
+		spin_unlock_irqrestore(&(stats)->lock, flags);	\
+	} while (0)
+
+#define SPI_STATISTICS_INCREMENT_FIELD(stats, field)	\
+	SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
+
+/**
  * struct spi_device - Master side proxy for an SPI slave device
  * @dev: Driver model representation of the device.
  * @master: SPI controller used with the device.
@@ -60,6 +115,8 @@
  * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
  *	when not using a GPIO line)
  *
+ * @statistics: statistics for the spi_device
+ *
  * A @spi_device is used to interchange data between an SPI slave
  * (usually a discrete chip) and CPU memory.
  *
@@ -98,6 +155,9 @@
 	char			modalias[SPI_NAME_SIZE];
 	int			cs_gpio;	/* chip select gpio */
 
+	/* the statistics */
+	struct spi_statistics	statistics;
+
 	/*
 	 * likely need more hooks for more protocol options affecting how
 	 * the controller talks to each chip, like:
@@ -296,6 +356,7 @@
  * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
  *	number. Any individual value may be -ENOENT for CS lines that
  *	are not GPIOs (driven by the SPI controller itself).
+ * @statistics: statistics for the spi_master
  * @dma_tx: DMA transmit channel
  * @dma_rx: DMA receive channel
  * @dummy_rx: dummy receive buffer for full-duplex devices
@@ -452,6 +513,9 @@
 	/* gpio chip select */
 	int			*cs_gpios;
 
+	/* statistics */
+	struct spi_statistics	statistics;
+
 	/* DMA channels for use with core dmaengine helpers */
 	struct dma_chan		*dma_tx;
 	struct dma_chan		*dma_rx;
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 0063b24..47dd0ce 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -130,16 +130,6 @@
 #define smp_mb__before_spinlock()	smp_wmb()
 #endif
 
-/*
- * Place this after a lock-acquisition primitive to guarantee that
- * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
- * if the UNLOCK and LOCK are executed by the same CPU or if the
- * UNLOCK and LOCK operate on the same lock variable.
- */
-#ifndef smp_mb__after_unlock_lock
-#define smp_mb__after_unlock_lock()	do { } while (0)
-#endif
-
 /**
  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
  * @lock: the spinlock in question.
@@ -296,7 +286,7 @@
  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  */
 
-static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
+static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
 {
 	return &lock->rlock;
 }
@@ -307,17 +297,17 @@
 	raw_spin_lock_init(&(_lock)->rlock);		\
 } while (0)
 
-static inline void spin_lock(spinlock_t *lock)
+static __always_inline void spin_lock(spinlock_t *lock)
 {
 	raw_spin_lock(&lock->rlock);
 }
 
-static inline void spin_lock_bh(spinlock_t *lock)
+static __always_inline void spin_lock_bh(spinlock_t *lock)
 {
 	raw_spin_lock_bh(&lock->rlock);
 }
 
-static inline int spin_trylock(spinlock_t *lock)
+static __always_inline int spin_trylock(spinlock_t *lock)
 {
 	return raw_spin_trylock(&lock->rlock);
 }
@@ -337,7 +327,7 @@
 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
 } while (0)
 
-static inline void spin_lock_irq(spinlock_t *lock)
+static __always_inline void spin_lock_irq(spinlock_t *lock)
 {
 	raw_spin_lock_irq(&lock->rlock);
 }
@@ -352,32 +342,32 @@
 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
 } while (0)
 
-static inline void spin_unlock(spinlock_t *lock)
+static __always_inline void spin_unlock(spinlock_t *lock)
 {
 	raw_spin_unlock(&lock->rlock);
 }
 
-static inline void spin_unlock_bh(spinlock_t *lock)
+static __always_inline void spin_unlock_bh(spinlock_t *lock)
 {
 	raw_spin_unlock_bh(&lock->rlock);
 }
 
-static inline void spin_unlock_irq(spinlock_t *lock)
+static __always_inline void spin_unlock_irq(spinlock_t *lock)
 {
 	raw_spin_unlock_irq(&lock->rlock);
 }
 
-static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
 {
 	raw_spin_unlock_irqrestore(&lock->rlock, flags);
 }
 
-static inline int spin_trylock_bh(spinlock_t *lock)
+static __always_inline int spin_trylock_bh(spinlock_t *lock)
 {
 	return raw_spin_trylock_bh(&lock->rlock);
 }
 
-static inline int spin_trylock_irq(spinlock_t *lock)
+static __always_inline int spin_trylock_irq(spinlock_t *lock)
 {
 	return raw_spin_trylock_irq(&lock->rlock);
 }
@@ -387,22 +377,22 @@
 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
 })
 
-static inline void spin_unlock_wait(spinlock_t *lock)
+static __always_inline void spin_unlock_wait(spinlock_t *lock)
 {
 	raw_spin_unlock_wait(&lock->rlock);
 }
 
-static inline int spin_is_locked(spinlock_t *lock)
+static __always_inline int spin_is_locked(spinlock_t *lock)
 {
 	return raw_spin_is_locked(&lock->rlock);
 }
 
-static inline int spin_is_contended(spinlock_t *lock)
+static __always_inline int spin_is_contended(spinlock_t *lock)
 {
 	return raw_spin_is_contended(&lock->rlock);
 }
 
-static inline int spin_can_lock(spinlock_t *lock)
+static __always_inline int spin_can_lock(spinlock_t *lock)
 {
 	return raw_spin_can_lock(&lock->rlock);
 }
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c735f5c..eead8ab 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -119,30 +119,8 @@
 	int rx_fifo_size;
 	void (*fix_mac_speed)(void *priv, unsigned int speed);
 	void (*bus_setup)(void __iomem *ioaddr);
-	void *(*setup)(struct platform_device *pdev);
-	void (*free)(struct platform_device *pdev, void *priv);
 	int (*init)(struct platform_device *pdev, void *priv);
 	void (*exit)(struct platform_device *pdev, void *priv);
-	void *custom_cfg;
-	void *custom_data;
 	void *bsp_priv;
 };
-
-/* of_data for SoC glue layer device tree bindings */
-
-struct stmmac_of_data {
-	int has_gmac;
-	int enh_desc;
-	int tx_coe;
-	int rx_coe;
-	int bugged_jumbo;
-	int pmt;
-	int riwt_off;
-	void (*fix_mac_speed)(void *priv, unsigned int speed);
-	void (*bus_setup)(void __iomem *ioaddr);
-	void *(*setup)(struct platform_device *pdev);
-	void (*free)(struct platform_device *pdev, void *priv);
-	int (*init)(struct platform_device *pdev, void *priv);
-	void (*exit)(struct platform_device *pdev, void *priv);
-};
 #endif
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index d2abbdb..414d924 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -112,25 +112,13 @@
  *
  * This can be thought of as a very heavy write lock, equivalent to
  * grabbing every spinlock in the kernel. */
-int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
+int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
 
-/**
- * __stop_machine: freeze the machine on all CPUs and run this function
- * @fn: the function to run
- * @data: the data ptr for the @fn
- * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
- *
- * Description: This is a special version of the above, which assumes cpus
- * won't come or go while it's being called.  Used by hotplug cpu.
- */
-int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus);
-
-int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
 				   const struct cpumask *cpus);
-
 #else	 /* CONFIG_STOP_MACHINE && CONFIG_SMP */
 
-static inline int __stop_machine(int (*fn)(void *), void *data,
+static inline int stop_machine(cpu_stop_fn_t fn, void *data,
 				 const struct cpumask *cpus)
 {
 	unsigned long flags;
@@ -141,16 +129,10 @@
 	return ret;
 }
 
-static inline int stop_machine(int (*fn)(void *), void *data,
-			       const struct cpumask *cpus)
-{
-	return __stop_machine(fn, data, cpus);
-}
-
-static inline int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
 						 const struct cpumask *cpus)
 {
-	return __stop_machine(fn, data, cpus);
+	return stop_machine(fn, data, cpus);
 }
 
 #endif	/* CONFIG_STOP_MACHINE && CONFIG_SMP */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 3887472..31496d2 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -373,9 +373,9 @@
 /* linux/mm/page_io.c */
 extern int swap_readpage(struct page *);
 extern int swap_writepage(struct page *page, struct writeback_control *wbc);
-extern void end_swap_bio_write(struct bio *bio, int err);
+extern void end_swap_bio_write(struct bio *bio);
 extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
-	void (*end_write_func)(struct bio *, int));
+	bio_end_io_t end_write_func);
 extern int swap_set_page_dirty(struct page *page);
 
 int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index c78dcfe..d4217ef 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -86,7 +86,6 @@
 extern long st_register(struct st_proto_s *);
 extern long st_unregister(struct st_proto_s *);
 
-extern struct ti_st_plat_data   *dt_pdata;
 
 /*
  * header information used by st_core.c
diff --git a/include/linux/tick.h b/include/linux/tick.h
index edbfc9a..48d901f 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -147,22 +147,29 @@
 		cpumask_or(mask, mask, tick_nohz_full_mask);
 }
 
-extern void __tick_nohz_full_check(void);
 extern void tick_nohz_full_kick(void);
 extern void tick_nohz_full_kick_cpu(int cpu);
 extern void tick_nohz_full_kick_all(void);
-extern void __tick_nohz_task_switch(struct task_struct *tsk);
+extern void __tick_nohz_task_switch(void);
 #else
 static inline bool tick_nohz_full_enabled(void) { return false; }
 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
 static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
-static inline void __tick_nohz_full_check(void) { }
 static inline void tick_nohz_full_kick_cpu(int cpu) { }
 static inline void tick_nohz_full_kick(void) { }
 static inline void tick_nohz_full_kick_all(void) { }
-static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
+static inline void __tick_nohz_task_switch(void) { }
 #endif
 
+static inline const struct cpumask *housekeeping_cpumask(void)
+{
+#ifdef CONFIG_NO_HZ_FULL
+	if (tick_nohz_full_enabled())
+		return housekeeping_mask;
+#endif
+	return cpu_possible_mask;
+}
+
 static inline bool is_housekeeping_cpu(int cpu)
 {
 #ifdef CONFIG_NO_HZ_FULL
@@ -181,16 +188,10 @@
 #endif
 }
 
-static inline void tick_nohz_full_check(void)
+static inline void tick_nohz_task_switch(void)
 {
 	if (tick_nohz_full_enabled())
-		__tick_nohz_full_check();
-}
-
-static inline void tick_nohz_task_switch(struct task_struct *tsk)
-{
-	if (tick_nohz_full_enabled())
-		__tick_nohz_task_switch(tsk);
+		__tick_nohz_task_switch();
 }
 
 #endif
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 77b5df2..367d5af 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -12,11 +12,18 @@
  */
 #if __BITS_PER_LONG == 64
 # define timespec64 timespec
+#define itimerspec64 itimerspec
 #else
 struct timespec64 {
 	time64_t	tv_sec;			/* seconds */
 	long		tv_nsec;		/* nanoseconds */
 };
+
+struct itimerspec64 {
+	struct timespec64 it_interval;
+	struct timespec64 it_value;
+};
+
 #endif
 
 /* Parameters used to convert the timespec values: */
@@ -45,6 +52,16 @@
 	return ts;
 }
 
+static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
+{
+	return *its64;
+}
+
+static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
+{
+	return *its;
+}
+
 # define timespec64_equal		timespec_equal
 # define timespec64_compare		timespec_compare
 # define set_normalized_timespec64	set_normalized_timespec
@@ -77,6 +94,24 @@
 	return ret;
 }
 
+static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
+{
+	struct itimerspec ret;
+
+	ret.it_interval = timespec64_to_timespec(its64->it_interval);
+	ret.it_value = timespec64_to_timespec(its64->it_value);
+	return ret;
+}
+
+static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
+{
+	struct itimerspec64 ret;
+
+	ret.it_interval = timespec_to_timespec64(its->it_interval);
+	ret.it_value = timespec_to_timespec64(its->it_value);
+	return ret;
+}
+
 static inline int timespec64_equal(const struct timespec64 *a,
 				   const struct timespec64 *b)
 {
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 6e191e4..ba0ae09 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -18,10 +18,17 @@
  * Kernel time accessors
  */
 unsigned long get_seconds(void);
-struct timespec current_kernel_time(void);
+struct timespec64 current_kernel_time64(void);
 /* does not take xtime_lock */
 struct timespec __current_kernel_time(void);
 
+static inline struct timespec current_kernel_time(void)
+{
+	struct timespec64 now = current_kernel_time64();
+
+	return timespec64_to_timespec(now);
+}
+
 /*
  * timespec based interfaces
  */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 1063c85..ed27917 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -243,6 +243,7 @@
 	TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
 	TRACE_EVENT_FL_TRACEPOINT_BIT,
 	TRACE_EVENT_FL_KPROBE_BIT,
+	TRACE_EVENT_FL_UPROBE_BIT,
 };
 
 /*
@@ -257,6 +258,7 @@
  *  USE_CALL_FILTER - For trace internal events, don't use file filter
  *  TRACEPOINT    - Event is a tracepoint
  *  KPROBE        - Event is a kprobe
+ *  UPROBE        - Event is a uprobe
  */
 enum {
 	TRACE_EVENT_FL_FILTERED		= (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -267,8 +269,11 @@
 	TRACE_EVENT_FL_USE_CALL_FILTER	= (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
 	TRACE_EVENT_FL_TRACEPOINT	= (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
 	TRACE_EVENT_FL_KPROBE		= (1 << TRACE_EVENT_FL_KPROBE_BIT),
+	TRACE_EVENT_FL_UPROBE		= (1 << TRACE_EVENT_FL_UPROBE_BIT),
 };
 
+#define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
+
 struct trace_event_call {
 	struct list_head	list;
 	struct trace_event_class *class;
@@ -542,7 +547,7 @@
 		event_triggers_post_call(file, tt);
 }
 
-#ifdef CONFIG_BPF_SYSCALL
+#ifdef CONFIG_BPF_EVENTS
 unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx);
 #else
 static inline unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index ad6c891..d072ded 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -709,4 +709,10 @@
 static inline void proc_tty_unregister_driver(struct tty_driver *d) {}
 #endif
 
+#define tty_debug(tty, f, args...)					\
+	do {								\
+		printk(KERN_DEBUG "%s: %s: " f, __func__,		\
+		       tty_name(tty), ##args);				\
+	} while (0)
+
 #endif
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 92e337c..1610524 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -296,7 +296,7 @@
 struct tty_driver {
 	int	magic;		/* magic number for this structure */
 	struct kref kref;	/* Reference management */
-	struct cdev *cdevs;
+	struct cdev **cdevs;
 	struct module	*owner;
 	const char	*driver_name;
 	const char	*name;
diff --git a/include/linux/types.h b/include/linux/types.h
index 8715287..c314989 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -212,6 +212,9 @@
 };
 #define rcu_head callback_head
 
+typedef void (*rcu_callback_t)(struct rcu_head *head);
+typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
+
 /* clocksource cycle base type */
 typedef u64 cycle_t;
 
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ae572c1..d6f2c2c 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -129,4 +129,6 @@
 extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
 extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
 
+extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+
 #endif		/* __LINUX_UACCESS_H__ */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 60beb5d..0bdc72f 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -92,6 +92,22 @@
 	unsigned int			depth;
 };
 
+struct return_instance {
+	struct uprobe		*uprobe;
+	unsigned long		func;
+	unsigned long		stack;		/* stack pointer */
+	unsigned long		orig_ret_vaddr; /* original return address */
+	bool			chained;	/* true, if instance is nested */
+
+	struct return_instance	*next;		/* keep as stack */
+};
+
+enum rp_check {
+	RP_CHECK_CALL,
+	RP_CHECK_CHAIN_CALL,
+	RP_CHECK_RET,
+};
+
 struct xol_area;
 
 struct uprobes_state {
@@ -128,6 +144,7 @@
 extern int  arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
 extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
 extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
+extern bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, struct pt_regs *regs);
 extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
 extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
 					 void *src, unsigned long len);
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index ab94f78..a41833c 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -19,8 +19,11 @@
 	enum usb_phy_interface phy_mode;
 	unsigned long	 flags;
 #define CI_HDRC_REGS_SHARED		BIT(0)
+#define CI_HDRC_DISABLE_DEVICE_STREAMING	BIT(1)
 #define CI_HDRC_SUPPORTS_RUNTIME_PM	BIT(2)
-#define CI_HDRC_DISABLE_STREAMING	BIT(3)
+#define CI_HDRC_DISABLE_HOST_STREAMING	BIT(3)
+#define CI_HDRC_DISABLE_STREAMING (CI_HDRC_DISABLE_DEVICE_STREAMING |	\
+		CI_HDRC_DISABLE_HOST_STREAMING)
 	/*
 	 * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
 	 * but otg is not supported (no register otgsc).
@@ -29,12 +32,22 @@
 #define CI_HDRC_IMX28_WRITE_FIX		BIT(5)
 #define CI_HDRC_FORCE_FULLSPEED		BIT(6)
 #define CI_HDRC_TURN_VBUS_EARLY_ON	BIT(7)
+#define CI_HDRC_SET_NON_ZERO_TTHA	BIT(8)
+#define CI_HDRC_OVERRIDE_AHB_BURST	BIT(9)
+#define CI_HDRC_OVERRIDE_TX_BURST	BIT(10)
+#define CI_HDRC_OVERRIDE_RX_BURST	BIT(11)
 	enum usb_dr_mode	dr_mode;
 #define CI_HDRC_CONTROLLER_RESET_EVENT		0
 #define CI_HDRC_CONTROLLER_STOPPED_EVENT	1
 	void	(*notify_event) (struct ci_hdrc *ci, unsigned event);
 	struct regulator	*reg_vbus;
+	struct usb_otg_caps	ci_otg_caps;
 	bool			tpl_support;
+	/* interrupt threshold setting */
+	u32			itc_setting;
+	u32			ahb_burst_config;
+	u32			tx_burst_size;
+	u32			rx_burst_size;
 };
 
 /* Default offset of capability registers */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 2511469..1074b89 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -228,6 +228,8 @@
 	struct list_head		list;
 	DECLARE_BITMAP(endpoints, 32);
 	const struct usb_function_instance *fi;
+
+	unsigned int		bind_deactivated:1;
 };
 
 int usb_add_function(struct usb_configuration *, struct usb_function *);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 4f3dfb7..c14a69b 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -141,10 +141,49 @@
 };
 
 /**
+ * struct usb_ep_caps - endpoint capabilities description
+ * @type_control:Endpoint supports control type (reserved for ep0).
+ * @type_iso:Endpoint supports isochronous transfers.
+ * @type_bulk:Endpoint supports bulk transfers.
+ * @type_int:Endpoint supports interrupt transfers.
+ * @dir_in:Endpoint supports IN direction.
+ * @dir_out:Endpoint supports OUT direction.
+ */
+struct usb_ep_caps {
+	unsigned type_control:1;
+	unsigned type_iso:1;
+	unsigned type_bulk:1;
+	unsigned type_int:1;
+	unsigned dir_in:1;
+	unsigned dir_out:1;
+};
+
+#define USB_EP_CAPS_TYPE_CONTROL     0x01
+#define USB_EP_CAPS_TYPE_ISO         0x02
+#define USB_EP_CAPS_TYPE_BULK        0x04
+#define USB_EP_CAPS_TYPE_INT         0x08
+#define USB_EP_CAPS_TYPE_ALL \
+	(USB_EP_CAPS_TYPE_ISO | USB_EP_CAPS_TYPE_BULK | USB_EP_CAPS_TYPE_INT)
+#define USB_EP_CAPS_DIR_IN           0x01
+#define USB_EP_CAPS_DIR_OUT          0x02
+#define USB_EP_CAPS_DIR_ALL  (USB_EP_CAPS_DIR_IN | USB_EP_CAPS_DIR_OUT)
+
+#define USB_EP_CAPS(_type, _dir) \
+	{ \
+		.type_control = !!(_type & USB_EP_CAPS_TYPE_CONTROL), \
+		.type_iso = !!(_type & USB_EP_CAPS_TYPE_ISO), \
+		.type_bulk = !!(_type & USB_EP_CAPS_TYPE_BULK), \
+		.type_int = !!(_type & USB_EP_CAPS_TYPE_INT), \
+		.dir_in = !!(_dir & USB_EP_CAPS_DIR_IN), \
+		.dir_out = !!(_dir & USB_EP_CAPS_DIR_OUT), \
+	}
+
+/**
  * struct usb_ep - device side representation of USB endpoint
  * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk"
  * @ops: Function pointers used to access hardware-specific operations.
  * @ep_list:the gadget's ep_list holds all of its endpoints
+ * @caps:The structure describing types and directions supported by endoint.
  * @maxpacket:The maximum packet size used on this endpoint.  The initial
  *	value can sometimes be reduced (hardware allowing), according to
  *      the endpoint descriptor used to configure the endpoint.
@@ -167,12 +206,15 @@
  * gadget->ep_list.  the control endpoint (gadget->ep0) is not in that list,
  * and is accessed only in response to a driver setup() callback.
  */
+
 struct usb_ep {
 	void			*driver_data;
 
 	const char		*name;
 	const struct usb_ep_ops	*ops;
 	struct list_head	ep_list;
+	struct usb_ep_caps	caps;
+	bool			claimed;
 	unsigned		maxpacket:16;
 	unsigned		maxpacket_limit:16;
 	unsigned		max_streams:16;
@@ -492,6 +534,9 @@
 	int	(*udc_start)(struct usb_gadget *,
 			struct usb_gadget_driver *);
 	int	(*udc_stop)(struct usb_gadget *);
+	struct usb_ep *(*match_ep)(struct usb_gadget *,
+			struct usb_endpoint_descriptor *,
+			struct usb_ss_ep_comp_descriptor *);
 };
 
 /**
@@ -511,6 +556,7 @@
  * @dev: Driver model state for this abstract device.
  * @out_epnum: last used out ep number
  * @in_epnum: last used in ep number
+ * @otg_caps: OTG capabilities of this gadget.
  * @sg_supported: true if we can handle scatter-gather
  * @is_otg: True if the USB device port uses a Mini-AB jack, so that the
  *	gadget driver must provide a USB OTG descriptor.
@@ -526,6 +572,9 @@
  * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to
  *	MaxPacketSize.
  * @is_selfpowered: if the gadget is self-powered.
+ * @deactivated: True if gadget is deactivated - in deactivated state it cannot
+ *	be connected.
+ * @connected: True if gadget is connected.
  *
  * Gadgets have a mostly-portable "gadget driver" implementing device
  * functions, handling all usb configurations and interfaces.  Gadget
@@ -559,6 +608,7 @@
 	struct device			dev;
 	unsigned			out_epnum;
 	unsigned			in_epnum;
+	struct usb_otg_caps		*otg_caps;
 
 	unsigned			sg_supported:1;
 	unsigned			is_otg:1;
@@ -567,7 +617,12 @@
 	unsigned			a_hnp_support:1;
 	unsigned			a_alt_hnp_support:1;
 	unsigned			quirk_ep_out_aligned_size:1;
+	unsigned			quirk_altset_not_supp:1;
+	unsigned			quirk_stall_not_supp:1;
+	unsigned			quirk_zlp_not_supp:1;
 	unsigned			is_selfpowered:1;
+	unsigned			deactivated:1;
+	unsigned			connected:1;
 };
 #define work_to_gadget(w)	(container_of((w), struct usb_gadget, work))
 
@@ -584,7 +639,6 @@
 #define gadget_for_each_ep(tmp, gadget) \
 	list_for_each_entry(tmp, &(gadget)->ep_list, ep_list)
 
-
 /**
  * usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget
  *	requires quirk_ep_out_aligned_size, otherwise reguens len.
@@ -603,6 +657,34 @@
 }
 
 /**
+ * gadget_is_altset_supported - return true iff the hardware supports
+ *	altsettings
+ * @g: controller to check for quirk
+ */
+static inline int gadget_is_altset_supported(struct usb_gadget *g)
+{
+	return !g->quirk_altset_not_supp;
+}
+
+/**
+ * gadget_is_stall_supported - return true iff the hardware supports stalling
+ * @g: controller to check for quirk
+ */
+static inline int gadget_is_stall_supported(struct usb_gadget *g)
+{
+	return !g->quirk_stall_not_supp;
+}
+
+/**
+ * gadget_is_zlp_supported - return true iff the hardware supports zlp
+ * @g: controller to check for quirk
+ */
+static inline int gadget_is_zlp_supported(struct usb_gadget *g)
+{
+	return !g->quirk_zlp_not_supp;
+}
+
+/**
  * gadget_is_dualspeed - return true iff the hardware handles high speed
  * @g: controller that might support both high and full speeds
  */
@@ -771,9 +853,24 @@
  */
 static inline int usb_gadget_connect(struct usb_gadget *gadget)
 {
+	int ret;
+
 	if (!gadget->ops->pullup)
 		return -EOPNOTSUPP;
-	return gadget->ops->pullup(gadget, 1);
+
+	if (gadget->deactivated) {
+		/*
+		 * If gadget is deactivated we only save new state.
+		 * Gadget will be connected automatically after activation.
+		 */
+		gadget->connected = true;
+		return 0;
+	}
+
+	ret = gadget->ops->pullup(gadget, 1);
+	if (!ret)
+		gadget->connected = 1;
+	return ret;
 }
 
 /**
@@ -784,20 +881,88 @@
  * as a disconnect (when a VBUS session is active).  Not all systems
  * support software pullup controls.
  *
- * This routine may be used during the gadget driver bind() call to prevent
- * the peripheral from ever being visible to the USB host, unless later
- * usb_gadget_connect() is called.  For example, user mode components may
- * need to be activated before the system can talk to hosts.
- *
  * Returns zero on success, else negative errno.
  */
 static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
 {
+	int ret;
+
 	if (!gadget->ops->pullup)
 		return -EOPNOTSUPP;
-	return gadget->ops->pullup(gadget, 0);
+
+	if (gadget->deactivated) {
+		/*
+		 * If gadget is deactivated we only save new state.
+		 * Gadget will stay disconnected after activation.
+		 */
+		gadget->connected = false;
+		return 0;
+	}
+
+	ret = gadget->ops->pullup(gadget, 0);
+	if (!ret)
+		gadget->connected = 0;
+	return ret;
 }
 
+/**
+ * usb_gadget_deactivate - deactivate function which is not ready to work
+ * @gadget: the peripheral being deactivated
+ *
+ * This routine may be used during the gadget driver bind() call to prevent
+ * the peripheral from ever being visible to the USB host, unless later
+ * usb_gadget_activate() is called.  For example, user mode components may
+ * need to be activated before the system can talk to hosts.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_deactivate(struct usb_gadget *gadget)
+{
+	int ret;
+
+	if (gadget->deactivated)
+		return 0;
+
+	if (gadget->connected) {
+		ret = usb_gadget_disconnect(gadget);
+		if (ret)
+			return ret;
+		/*
+		 * If gadget was being connected before deactivation, we want
+		 * to reconnect it in usb_gadget_activate().
+		 */
+		gadget->connected = true;
+	}
+	gadget->deactivated = true;
+
+	return 0;
+}
+
+/**
+ * usb_gadget_activate - activate function which is not ready to work
+ * @gadget: the peripheral being activated
+ *
+ * This routine activates gadget which was previously deactivated with
+ * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_activate(struct usb_gadget *gadget)
+{
+	if (!gadget->deactivated)
+		return 0;
+
+	gadget->deactivated = false;
+
+	/*
+	 * If gadget has been connected before deactivation, or became connected
+	 * while it was being deactivated, we call usb_gadget_connect().
+	 */
+	if (gadget->connected)
+		return usb_gadget_connect(gadget);
+
+	return 0;
+}
 
 /*-------------------------------------------------------------------------*/
 
@@ -1002,6 +1167,10 @@
 		struct usb_descriptor_header **ss);
 void usb_free_all_descriptors(struct usb_function *f);
 
+struct usb_descriptor_header *usb_otg_descriptor_alloc(
+				struct usb_gadget *gadget);
+int usb_otg_descriptor_init(struct usb_gadget *gadget,
+		struct usb_descriptor_header *otg_desc);
 /*-------------------------------------------------------------------------*/
 
 /* utility to simplify map/unmap of usb_requests to/from DMA */
@@ -1034,6 +1203,21 @@
 
 /*-------------------------------------------------------------------------*/
 
+/* utility to find endpoint by name */
+
+extern struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g,
+		const char *name);
+
+/*-------------------------------------------------------------------------*/
+
+/* utility to check if endpoint caps match descriptor needs */
+
+extern int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
+		struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
+		struct usb_ss_ep_comp_descriptor *ep_comp);
+
+/*-------------------------------------------------------------------------*/
+
 /* utility to update vbus status for udc core, it may be scheduled */
 extern void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status);
 
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index c9aa779..d2784c1 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -564,9 +564,9 @@
 
 /*-------------------------------------------------------------------------*/
 
-/* class requests from USB 3.0 hub spec, table 10-5 */
-#define SetHubDepth		(0x3000 | HUB_SET_DEPTH)
-#define GetPortErrorCount	(0x8000 | HUB_GET_PORT_ERR_COUNT)
+/* class requests from USB 3.1 hub spec, table 10-7 */
+#define SetHubDepth		(0x2000 | HUB_SET_DEPTH)
+#define GetPortErrorCount	(0xa300 | HUB_GET_PORT_ERR_COUNT)
 
 /*
  * Generic bandwidth allocation constants/support
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index e55a150..8c8f685 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -128,7 +128,7 @@
  */
 struct msm_usb_cable {
 	struct notifier_block		nb;
-	struct extcon_specific_cable_nb conn;
+	struct extcon_dev		*extcon;
 };
 
 /**
@@ -155,6 +155,10 @@
  *	starting controller using usbcmd run/stop bit.
  * @vbus: VBUS signal state trakining, using extcon framework
  * @id: ID signal state trakining, using extcon framework
+ * @switch_gpio: Descriptor for GPIO used to control external Dual
+ *               SPDT USB Switch.
+ * @reboot: Used to inform the driver to route USB D+/D- line to Device
+ *	    connector
  */
 struct msm_otg {
 	struct usb_phy phy;
@@ -188,6 +192,9 @@
 
 	struct msm_usb_cable vbus;
 	struct msm_usb_cable id;
+
+	struct gpio_desc *switch_gpio;
+	struct notifier_block reboot;
 };
 
 #endif
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index cfe0528..8c5a818 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -15,6 +15,8 @@
 enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
 enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np);
 bool of_usb_host_tpl_support(struct device_node *np);
+int of_usb_update_otg_caps(struct device_node *np,
+			struct usb_otg_caps *otg_caps);
 #else
 static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
 {
@@ -30,6 +32,11 @@
 {
 	return false;
 }
+static inline int of_usb_update_otg_caps(struct device_node *np,
+				struct usb_otg_caps *otg_caps)
+{
+	return 0;
+}
 #endif
 
 #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 52661c5..bd1dcf8 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -41,6 +41,21 @@
 
 };
 
+/**
+ * struct usb_otg_caps - describes the otg capabilities of the device
+ * @otg_rev: The OTG revision number the device is compliant with, it's
+ *		in binary-coded decimal (i.e. 2.0 is 0200H).
+ * @hnp_support: Indicates if the device supports HNP.
+ * @srp_support: Indicates if the device supports SRP.
+ * @adp_support: Indicates if the device supports ADP.
+ */
+struct usb_otg_caps {
+	u16 otg_rev;
+	bool hnp_support;
+	bool srp_support;
+	bool adp_support;
+};
+
 extern const char *usb_otg_state_string(enum usb_otg_state state);
 
 /* Context: can sleep */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 738b30b..0197358 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -265,7 +265,7 @@
 /**
  * delayed_work_pending - Find out whether a delayable work item is currently
  * pending
- * @work: The work item in question
+ * @w: The work item in question
  */
 #define delayed_work_pending(w) \
 	work_pending(&(w)->work)
@@ -366,7 +366,7 @@
  * @fmt: printf format for the name of the workqueue
  * @flags: WQ_* flags
  * @max_active: max in-flight work items, 0 for default
- * @args: args for @fmt
+ * @args...: args for @fmt
  *
  * Allocate a workqueue with the specified parameters.  For detailed
  * information on WQ_* flags, please refer to Documentation/workqueue.txt.
@@ -398,7 +398,7 @@
  * alloc_ordered_workqueue - allocate an ordered workqueue
  * @fmt: printf format for the name of the workqueue
  * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
- * @args: args for @fmt
+ * @args...: args for @fmt
  *
  * Allocate an ordered workqueue.  An ordered workqueue executes at
  * most one work item at any given time in the queued order.  They are
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index 8c6e825..d760aa7 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -37,7 +37,7 @@
  *
  * about the mmap helpers (videobuf_mmap_*):
  *
- * The mmaper function allows to map any subset of contingous buffers.
+ * The mmaper function allows to map any subset of contiguous buffers.
  * This includes one mmap() call for all buffers (which the original
  * video4linux API uses) as well as one mmap() for every single buffer
  * (which v4l2 uses).
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index 7a6c1d6..f2ffe5b 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -200,4 +200,14 @@
 ssize_t cxl_fd_read(struct file *file, char __user *buf, size_t count,
 			   loff_t *off);
 
+/*
+ * For EEH, a driver may want to assert a PERST will reload the same image
+ * from flash into the FPGA.
+ *
+ * This is a property of the entire adapter, not a single AFU, so drivers
+ * should set this property with care!
+ */
+void cxl_perst_reloads_same_image(struct cxl_afu *afu,
+				  bool perst_reloads_same_image);
+
 #endif /* _MISC_CXL_H */
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index dc03d77..a2f59ec 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -197,6 +197,27 @@
 #define LOWPAN_NHC_UDP_CS_P_11	0xF3 /* source & dest = 0xF0B + 4bit inline */
 #define LOWPAN_NHC_UDP_CS_C	0x04 /* checksum elided */
 
+#define LOWPAN_PRIV_SIZE(llpriv_size)	\
+	(sizeof(struct lowpan_priv) + llpriv_size)
+
+enum lowpan_lltypes {
+	LOWPAN_LLTYPE_BTLE,
+	LOWPAN_LLTYPE_IEEE802154,
+};
+
+struct lowpan_priv {
+	enum lowpan_lltypes lltype;
+
+	/* must be last */
+	u8 priv[0] __aligned(sizeof(void *));
+};
+
+static inline
+struct lowpan_priv *lowpan_priv(const struct net_device *dev)
+{
+	return netdev_priv(dev);
+}
+
 #ifdef DEBUG
 /* print data in line */
 static inline void raw_dump_inline(const char *caller, char *msg,
@@ -372,6 +393,8 @@
 	return skb->len + uncomp_header - ret;
 }
 
+void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
+
 int
 lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
 			 const u8 *saddr, const u8 saddr_type,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 931738b..9d446f13 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -21,6 +21,8 @@
 	struct gnet_stats_rate_est64	tcfc_rate_est;
 	spinlock_t			tcfc_lock;
 	struct rcu_head			tcfc_rcu;
+	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+	struct gnet_stats_queue __percpu *cpu_qstats;
 };
 #define tcf_head	common.tcfc_head
 #define tcf_index	common.tcfc_index
@@ -68,6 +70,17 @@
 	kfree(hf->htab);
 }
 
+/* Update lastuse only if needed, to avoid dirtying a cache line.
+ * We use a temp variable to avoid fetching jiffies twice.
+ */
+static inline void tcf_lastuse_update(struct tcf_t *tm)
+{
+	unsigned long now = jiffies;
+
+	if (tm->lastuse != now)
+		tm->lastuse = now;
+}
+
 #ifdef CONFIG_NET_CLS_ACT
 
 #define ACT_P_CREATED 1
@@ -98,11 +111,10 @@
 };
 
 int tcf_hash_search(struct tc_action *a, u32 index);
-void tcf_hash_destroy(struct tc_action *a);
 u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
 int tcf_hash_check(u32 index, struct tc_action *a, int bind);
 int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
-		    int size, int bind);
+		    int size, int bind, bool cpustats);
 void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
 void tcf_hash_insert(struct tc_action *a);
 
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index def59d3..0c3ac5a 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -158,8 +158,8 @@
 				 const struct in6_addr *addr);
 	int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
 				 const struct in6_addr *addr);
-	int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst,
-				struct flowi6 *fl6);
+	int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
+			       struct dst_entry **dst, struct flowi6 *fl6);
 	void (*udpv6_encap_enable)(void);
 	void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh,
 			      const struct in6_addr *daddr,
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 3bd618d..9e1a59e 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -512,9 +512,11 @@
 		HCI_AUTO_CONN_DIRECT,
 		HCI_AUTO_CONN_ALWAYS,
 		HCI_AUTO_CONN_LINK_LOSS,
+		HCI_AUTO_CONN_EXPLICIT,
 	} auto_connect;
 
 	struct hci_conn *conn;
+	bool explicit_connect;
 };
 
 extern struct list_head hci_dev_list;
@@ -639,6 +641,7 @@
 	HCI_CONN_DROP,
 	HCI_CONN_PARAM_REMOVAL_PEND,
 	HCI_CONN_NEW_LINK_KEY,
+	HCI_CONN_SCANNING,
 };
 
 static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
@@ -808,6 +811,26 @@
 	return NULL;
 }
 
+static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev)
+{
+	struct hci_conn_hash *h = &hdev->conn_hash;
+	struct hci_conn  *c;
+
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(c, &h->list, list) {
+		if (c->type == LE_LINK && c->state == BT_CONNECT &&
+		    !test_bit(HCI_CONN_SCANNING, &c->flags)) {
+			rcu_read_unlock();
+			return c;
+		}
+	}
+
+	rcu_read_unlock();
+
+	return NULL;
+}
+
 int hci_disconnect(struct hci_conn *conn, __u8 reason);
 bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
 void hci_sco_setup(struct hci_conn *conn, __u8 status);
@@ -823,6 +846,9 @@
 void hci_chan_list_flush(struct hci_conn *conn);
 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
 
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+				     u8 dst_type, u8 sec_level,
+				     u16 conn_timeout, u8 role);
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
 				u8 dst_type, u8 sec_level, u16 conn_timeout,
 				u8 role);
@@ -988,6 +1014,9 @@
 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
 						  bdaddr_t *addr,
 						  u8 addr_type);
+struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
+						    bdaddr_t *addr,
+						    u8 addr_type);
 
 void hci_uuids_clear(struct hci_dev *hdev);
 
@@ -1297,7 +1326,7 @@
 	if (max >= to_multiplier * 8)
 		return -EINVAL;
 
-	max_latency = (to_multiplier * 8 / max) - 1;
+	max_latency = (to_multiplier * 4 / max) - 1;
 	if (latency > 499 || latency > max_latency)
 		return -EINVAL;
 
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 2239a37..c98afc0 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -55,6 +55,8 @@
 #define L2CAP_INFO_TIMEOUT		msecs_to_jiffies(4000)
 #define L2CAP_MOVE_TIMEOUT		msecs_to_jiffies(4000)
 #define L2CAP_MOVE_ERTX_TIMEOUT		msecs_to_jiffies(60000)
+#define L2CAP_WAIT_ACK_POLL_PERIOD	msecs_to_jiffies(200)
+#define L2CAP_WAIT_ACK_TIMEOUT		msecs_to_jiffies(10000)
 
 #define L2CAP_A2MP_DEFAULT_MTU		670
 
diff --git a/include/net/bond_options.h b/include/net/bond_options.h
index c28aca2..1797235 100644
--- a/include/net/bond_options.h
+++ b/include/net/bond_options.h
@@ -66,6 +66,7 @@
 	BOND_OPT_AD_ACTOR_SYS_PRIO,
 	BOND_OPT_AD_ACTOR_SYSTEM,
 	BOND_OPT_AD_USER_PORT_KEY,
+	BOND_OPT_NUM_PEER_NOTIF_ALIAS,
 	BOND_OPT_LAST
 };
 
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 883fe1e..f0889a2 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -2369,8 +2369,7 @@
  *	method returns 0.)
  *
  * @mgmt_frame_register: Notify driver that a management frame type was
- *	registered. Note that this callback may not sleep, and cannot run
- *	concurrently with itself.
+ *	registered. The callback is allowed to sleep.
  *
  * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device.
  *	Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index 290a9a6..76b1ffa 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -34,6 +34,8 @@
 							   int type);
 	void	(*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
 					       struct net_device *dev);
+	int	(*suspend)(struct wpan_phy *wpan_phy);
+	int	(*resume)(struct wpan_phy *wpan_phy);
 	int	(*add_virtual_intf)(struct wpan_phy *wpan_phy,
 				    const char *name,
 				    unsigned char name_assign_type,
@@ -61,6 +63,8 @@
 					 s8 max_frame_retries);
 	int	(*set_lbt_mode)(struct wpan_phy *wpan_phy,
 				struct wpan_dev *wpan_dev, bool mode);
+	int	(*set_ackreq_default)(struct wpan_phy *wpan_phy,
+				      struct wpan_dev *wpan_dev, bool ackreq);
 };
 
 static inline bool
@@ -171,6 +175,9 @@
 	struct list_head list;
 	struct net_device *netdev;
 
+	/* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */
+	struct net_device *lowpan_dev;
+
 	u32 identifier;
 
 	/* MAC PIB */
@@ -191,6 +198,9 @@
 	bool lbt;
 
 	bool promiscuous_mode;
+
+	/* fallback for acknowledgment bit setting */
+	bool ackreq;
 };
 
 #define to_phy(_dev)	container_of(_dev, struct wpan_phy, dev)
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 2d1d73c..9fcaedf 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -140,14 +140,16 @@
 
 struct sk_buff;
 void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
-			      __be32 from, __be32 to, int pseudohdr);
+			      __be32 from, __be32 to, bool pseudohdr);
 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
 			       const __be32 *from, const __be32 *to,
-			       int pseudohdr);
+			       bool pseudohdr);
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+				     __wsum diff, bool pseudohdr);
 
 static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
 					    __be16 from, __be16 to,
-					    int pseudohdr)
+					    bool pseudohdr)
 {
 	inet_proto_csum_replace4(sum, skb, (__force __be32)from,
 				 (__force __be32)to, pseudohdr);
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index c15d394..ccd6d8b 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -49,9 +49,38 @@
 	if (classid != sk->sk_classid)
 		sk->sk_classid = classid;
 }
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+	u32 classid = task_cls_state(current)->classid;
+
+	/* Due to the nature of the classifier it is required to ignore all
+	 * packets originating from softirq context as accessing `current'
+	 * would lead to false results.
+	 *
+	 * This test assumes that all callers of dev_queue_xmit() explicitly
+	 * disable bh. Knowing this, it is possible to detect softirq based
+	 * calls by looking at the number of nested bh disable calls because
+	 * softirqs always disables bh.
+	 */
+	if (in_serving_softirq()) {
+		/* If there is an sk_classid we'll use that. */
+		if (!skb->sk)
+			return 0;
+
+		classid = skb->sk->sk_classid;
+	}
+
+	return classid;
+}
 #else /* !CONFIG_CGROUP_NET_CLASSID */
 static inline void sock_update_classid(struct sock *sk)
 {
 }
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+	return 0;
+}
 #endif /* CONFIG_CGROUP_NET_CLASSID */
 #endif  /* _NET_CLS_CGROUP_H */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index fbca63b..b34d812 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -171,6 +171,11 @@
 	return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
 }
 
+static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
+{
+	return !!((ds->dsa_port_mask) & (1 << p));
+}
+
 static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
 {
 	return ds->phys_port_mask & (1 << p) && ds->ports[p];
@@ -296,12 +301,28 @@
 				     u32 br_port_mask);
 	int	(*port_stp_update)(struct dsa_switch *ds, int port,
 				   u8 state);
-	int	(*fdb_add)(struct dsa_switch *ds, int port,
-			   const unsigned char *addr, u16 vid);
-	int	(*fdb_del)(struct dsa_switch *ds, int port,
-			   const unsigned char *addr, u16 vid);
-	int	(*fdb_getnext)(struct dsa_switch *ds, int port,
-			       unsigned char *addr, bool *is_static);
+
+	/*
+	 * VLAN support
+	 */
+	int	(*port_pvid_get)(struct dsa_switch *ds, int port, u16 *pvid);
+	int	(*port_pvid_set)(struct dsa_switch *ds, int port, u16 pvid);
+	int	(*port_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
+				 bool untagged);
+	int	(*port_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
+	int	(*vlan_getnext)(struct dsa_switch *ds, u16 *vid,
+				unsigned long *ports, unsigned long *untagged);
+
+	/*
+	 * Forwarding database
+	 */
+	int	(*port_fdb_add)(struct dsa_switch *ds, int port,
+				const unsigned char *addr, u16 vid);
+	int	(*port_fdb_del)(struct dsa_switch *ds, int port,
+				const unsigned char *addr, u16 vid);
+	int	(*port_fdb_getnext)(struct dsa_switch *ds, int port,
+				    unsigned char *addr, u16 *vid,
+				    bool *is_static);
 };
 
 void register_switch_driver(struct dsa_switch_driver *type);
diff --git a/include/net/dst.h b/include/net/dst.h
index 2bc73f8a..9261d92 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -57,6 +57,7 @@
 #define DST_FAKE_RTABLE		0x0040
 #define DST_XFRM_TUNNEL		0x0080
 #define DST_XFRM_QUEUE		0x0100
+#define DST_METADATA		0x0200
 
 	unsigned short		pending_confirm;
 
@@ -83,12 +84,13 @@
 	__u32			__pad2;
 #endif
 
+#ifdef CONFIG_64BIT
+	struct lwtunnel_state   *lwtstate;
 	/*
 	 * Align __refcnt to a 64 bytes alignment
 	 * (L1_CACHE_SIZE would be too much)
 	 */
-#ifdef CONFIG_64BIT
-	long			__pad_to_align_refcnt[2];
+	long			__pad_to_align_refcnt[1];
 #endif
 	/*
 	 * __refcnt wants to be on a different cache line from
@@ -97,6 +99,9 @@
 	atomic_t		__refcnt;	/* client references	*/
 	int			__use;
 	unsigned long		lastuse;
+#ifndef CONFIG_64BIT
+	struct lwtunnel_state   *lwtstate;
+#endif
 	union {
 		struct dst_entry	*next;
 		struct rtable __rcu	*rt_next;
@@ -202,6 +207,12 @@
 		p[metric-1] = val;
 }
 
+/* Kernel-internal feature bits that are unallocated in user space. */
+#define DST_FEATURE_ECN_CA	(1 << 31)
+
+#define DST_FEATURE_MASK	(DST_FEATURE_ECN_CA)
+#define DST_FEATURE_ECN_MASK	(DST_FEATURE_ECN_CA | RTAX_FEATURE_ECN)
+
 static inline u32
 dst_feature(const struct dst_entry *dst, u32 feature)
 {
@@ -284,13 +295,18 @@
 	}
 }
 
-static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
+static inline void __skb_dst_copy(struct sk_buff *nskb, unsigned long refdst)
 {
-	nskb->_skb_refdst = oskb->_skb_refdst;
+	nskb->_skb_refdst = refdst;
 	if (!(nskb->_skb_refdst & SKB_DST_NOREF))
 		dst_clone(skb_dst(nskb));
 }
 
+static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
+{
+	__skb_dst_copy(nskb, oskb->_skb_refdst);
+}
+
 /**
  * skb_dst_force - makes sure skb dst is refcounted
  * @skb: buffer
@@ -356,6 +372,9 @@
 }
 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
 		int initial_obsolete, unsigned short flags);
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+	      struct net_device *dev, int initial_ref, int initial_obsolete,
+	      unsigned short flags);
 void __dst_free(struct dst_entry *dst);
 struct dst_entry *dst_destroy(struct dst_entry *dst);
 
@@ -457,7 +476,7 @@
 	return dst;
 }
 
-void dst_init(void);
+void dst_subsys_init(void);
 
 /* Flags for xfrm_lookup flags argument. */
 enum {
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
new file mode 100644
index 0000000..af9d538
--- /dev/null
+++ b/include/net/dst_metadata.h
@@ -0,0 +1,108 @@
+#ifndef __NET_DST_METADATA_H
+#define __NET_DST_METADATA_H 1
+
+#include <linux/skbuff.h>
+#include <net/ip_tunnels.h>
+#include <net/dst.h>
+
+struct metadata_dst {
+	struct dst_entry		dst;
+	union {
+		struct ip_tunnel_info	tun_info;
+	} u;
+};
+
+static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb)
+{
+	struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
+
+	if (md_dst && md_dst->dst.flags & DST_METADATA)
+		return md_dst;
+
+	return NULL;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb)
+{
+	struct metadata_dst *md_dst = skb_metadata_dst(skb);
+	struct dst_entry *dst;
+
+	if (md_dst)
+		return &md_dst->u.tun_info;
+
+	dst = skb_dst(skb);
+	if (dst && dst->lwtstate)
+		return lwt_tun_info(dst->lwtstate);
+
+	return NULL;
+}
+
+static inline bool skb_valid_dst(const struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+
+	return dst && !(dst->flags & DST_METADATA);
+}
+
+struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
+struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
+
+static inline struct metadata_dst *tun_rx_dst(int md_size)
+{
+	struct metadata_dst *tun_dst;
+
+	tun_dst = metadata_dst_alloc(md_size, GFP_ATOMIC);
+	if (!tun_dst)
+		return NULL;
+
+	tun_dst->u.tun_info.options_len = 0;
+	tun_dst->u.tun_info.mode = 0;
+	return tun_dst;
+}
+
+static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
+						 __be16 flags,
+						 __be64 tunnel_id,
+						 int md_size)
+{
+	const struct iphdr *iph = ip_hdr(skb);
+	struct metadata_dst *tun_dst;
+
+	tun_dst = tun_rx_dst(md_size);
+	if (!tun_dst)
+		return NULL;
+
+	ip_tunnel_key_init(&tun_dst->u.tun_info.key,
+			   iph->saddr, iph->daddr, iph->tos, iph->ttl,
+			   0, 0, tunnel_id, flags);
+	return tun_dst;
+}
+
+static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb,
+						 __be16 flags,
+						 __be64 tunnel_id,
+						 int md_size)
+{
+	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+	struct metadata_dst *tun_dst;
+	struct ip_tunnel_info *info;
+
+	tun_dst = tun_rx_dst(md_size);
+	if (!tun_dst)
+		return NULL;
+
+	info = &tun_dst->u.tun_info;
+	info->mode = IP_TUNNEL_INFO_IPV6;
+	info->key.tun_flags = flags;
+	info->key.tun_id = tunnel_id;
+	info->key.tp_src = 0;
+	info->key.tp_dst = 0;
+
+	info->key.u.ipv6.src = ip6h->saddr;
+	info->key.u.ipv6.dst = ip6h->daddr;
+	info->key.tos = ipv6_get_dsfield(ip6h);
+	info->key.ttl = ip6h->hop_limit;
+	return tun_dst;
+}
+
+#endif /* __NET_DST_METADATA_H */
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 903a55e..4e8f804 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -19,6 +19,7 @@
 	u8			action;
 	/* 3 bytes hole, try to use */
 	u32			target;
+	__be64			tun_id;
 	struct fib_rule __rcu	*ctarget;
 	struct net		*fr_net;
 
diff --git a/include/net/flow.h b/include/net/flow.h
index 8109a15..acd6a09 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -10,6 +10,7 @@
 #include <linux/socket.h>
 #include <linux/in6.h>
 #include <linux/atomic.h>
+#include <net/flow_dissector.h>
 
 /*
  * ifindex generation is per-net namespace, and loopback is
@@ -19,6 +20,10 @@
 
 #define LOOPBACK_IFINDEX	1
 
+struct flowi_tunnel {
+	__be64			tun_id;
+};
+
 struct flowi_common {
 	int	flowic_oif;
 	int	flowic_iif;
@@ -29,7 +34,9 @@
 	__u8	flowic_flags;
 #define FLOWI_FLAG_ANYSRC		0x01
 #define FLOWI_FLAG_KNOWN_NH		0x02
+#define FLOWI_FLAG_VRFSRC		0x04
 	__u32	flowic_secid;
+	struct flowi_tunnel flowic_tun_key;
 };
 
 union flowi_uli {
@@ -66,6 +73,7 @@
 #define flowi4_proto		__fl_common.flowic_proto
 #define flowi4_flags		__fl_common.flowic_flags
 #define flowi4_secid		__fl_common.flowic_secid
+#define flowi4_tun_key		__fl_common.flowic_tun_key
 
 	/* (saddr,daddr) must be grouped, same order as in IP header */
 	__be32			saddr;
@@ -95,6 +103,7 @@
 	fl4->flowi4_proto = proto;
 	fl4->flowi4_flags = flags;
 	fl4->flowi4_secid = 0;
+	fl4->flowi4_tun_key.tun_id = 0;
 	fl4->daddr = daddr;
 	fl4->saddr = saddr;
 	fl4->fl4_dport = dport;
@@ -122,6 +131,7 @@
 #define flowi6_proto		__fl_common.flowic_proto
 #define flowi6_flags		__fl_common.flowic_flags
 #define flowi6_secid		__fl_common.flowic_secid
+#define flowi6_tun_key		__fl_common.flowic_tun_key
 	struct in6_addr		daddr;
 	struct in6_addr		saddr;
 	__be32			flowlabel;
@@ -165,6 +175,7 @@
 #define flowi_proto	u.__fl_common.flowic_proto
 #define flowi_flags	u.__fl_common.flowic_flags
 #define flowi_secid	u.__fl_common.flowic_secid
+#define flowi_tun_key	u.__fl_common.flowic_tun_key
 } __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
@@ -233,4 +244,22 @@
 void flow_cache_flush_deferred(struct net *net);
 extern atomic_t flow_cache_genid;
 
+__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
+
+static inline __u32 get_hash_from_flowi6(const struct flowi6 *fl6)
+{
+	struct flow_keys keys;
+
+	return __get_hash_from_flowi6(fl6, &keys);
+}
+
+__u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys);
+
+static inline __u32 get_hash_from_flowi4(const struct flowi4 *fl4)
+{
+	struct flow_keys keys;
+
+	return __get_hash_from_flowi4(fl4, &keys);
+}
+
 #endif
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 1a8c224..8c8548c 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -2,7 +2,6 @@
 #define _NET_FLOW_DISSECTOR_H
 
 #include <linux/types.h>
-#include <linux/skbuff.h>
 #include <linux/in6.h>
 #include <uapi/linux/if_ether.h>
 
@@ -13,8 +12,13 @@
 struct flow_dissector_key_control {
 	u16	thoff;
 	u16	addr_type;
+	u32	flags;
 };
 
+#define FLOW_DIS_IS_FRAGMENT	BIT(0)
+#define FLOW_DIS_FIRST_FRAG	BIT(1)
+#define FLOW_DIS_ENCAPSULATION	BIT(2)
+
 /**
  * struct flow_dissector_key_basic:
  * @thoff: Transport header offset
@@ -123,6 +127,11 @@
 	FLOW_DISSECTOR_KEY_MAX,
 };
 
+#define FLOW_DISSECTOR_F_PARSE_1ST_FRAG		BIT(0)
+#define FLOW_DISSECTOR_F_STOP_AT_L3		BIT(1)
+#define FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL	BIT(2)
+#define FLOW_DISSECTOR_F_STOP_AT_ENCAP		BIT(3)
+
 struct flow_dissector_key {
 	enum flow_dissector_key_id key_id;
 	size_t offset; /* offset of struct flow_dissector_key_*
@@ -134,23 +143,6 @@
 	unsigned short int offset[FLOW_DISSECTOR_KEY_MAX];
 };
 
-void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
-			     const struct flow_dissector_key *key,
-			     unsigned int key_count);
-
-bool __skb_flow_dissect(const struct sk_buff *skb,
-			struct flow_dissector *flow_dissector,
-			void *target_container,
-			void *data, __be16 proto, int nhoff, int hlen);
-
-static inline bool skb_flow_dissect(const struct sk_buff *skb,
-				    struct flow_dissector *flow_dissector,
-				    void *target_container)
-{
-	return __skb_flow_dissect(skb, flow_dissector, target_container,
-				  NULL, 0, 0, 0);
-}
-
 struct flow_keys {
 	struct flow_dissector_key_control control;
 #define FLOW_KEYS_HASH_START_FIELD basic
@@ -170,38 +162,6 @@
 extern struct flow_dissector flow_keys_dissector;
 extern struct flow_dissector flow_keys_buf_dissector;
 
-static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
-					      struct flow_keys *flow)
-{
-	memset(flow, 0, sizeof(*flow));
-	return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
-				  NULL, 0, 0, 0);
-}
-
-static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
-						  void *data, __be16 proto,
-						  int nhoff, int hlen)
-{
-	memset(flow, 0, sizeof(*flow));
-	return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
-				  data, proto, nhoff, hlen);
-}
-
-__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
-			    void *data, int hlen_proto);
-
-static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
-					int thoff, u8 ip_proto)
-{
-	return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
-}
-
-u32 flow_hash_from_keys(struct flow_keys *keys);
-void __skb_get_hash(struct sk_buff *skb);
-u32 skb_get_poff(const struct sk_buff *skb);
-u32 __skb_get_poff(const struct sk_buff *skb, void *data,
-		   const struct flow_keys *keys, int hlen);
-
 /* struct flow_keys_digest:
  *
  * This structure is used to hold a digest of the full flow keys. This is a
@@ -217,4 +177,11 @@
 void make_flow_keys_digest(struct flow_keys_digest *digest,
 			   const struct flow_keys *flow);
 
+static inline bool flow_keys_have_l4(struct flow_keys *keys)
+{
+	return (keys->ports.ports || keys->tags.flow_label);
+}
+
+u32 flow_hash_from_keys(struct flow_keys *keys);
+
 #endif
diff --git a/include/net/geneve.h b/include/net/geneve.h
index 2a0543a..3106ed6 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -62,40 +62,9 @@
 	struct geneve_opt options[];
 };
 
-static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
-{
-	return (struct genevehdr *)(udp_hdr(skb) + 1);
-}
-
 #ifdef CONFIG_INET
-struct geneve_sock;
-
-typedef void (geneve_rcv_t)(struct geneve_sock *gs, struct sk_buff *skb);
-
-struct geneve_sock {
-	struct list_head	list;
-	geneve_rcv_t		*rcv;
-	void			*rcv_data;
-	struct socket		*sock;
-	struct rcu_head		rcu;
-	int			refcnt;
-	struct udp_offload	udp_offloads;
-};
-
-#define GENEVE_VER 0
-#define GENEVE_BASE_HLEN (sizeof(struct udphdr) + sizeof(struct genevehdr))
-
-struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
-				    geneve_rcv_t *rcv, void *data,
-				    bool no_share, bool ipv6);
-
-void geneve_sock_release(struct geneve_sock *vs);
-
-int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
-		    struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
-		    __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
-		    __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
-		    bool csum, bool xnet);
+struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
+					u8 name_assign_type, u16 dst_port);
 #endif /*ifdef CONFIG_INET */
 
 #endif /*ifdef__NET_GENEVE_H */
diff --git a/include/net/gre.h b/include/net/gre.h
index b531820..97eafdc 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -4,6 +4,12 @@
 #include <linux/skbuff.h>
 #include <net/ip_tunnels.h>
 
+struct gre_base_hdr {
+	__be16 flags;
+	__be16 protocol;
+};
+#define GRE_HEADER_SECTION 4
+
 #define GREPROTO_CISCO		0
 #define GREPROTO_PPTP		1
 #define GREPROTO_MAX		2
@@ -14,91 +20,9 @@
 	void (*err_handler)(struct sk_buff *skb, u32 info);
 };
 
-struct gre_base_hdr {
-	__be16 flags;
-	__be16 protocol;
-};
-#define GRE_HEADER_SECTION 4
-
 int gre_add_protocol(const struct gre_protocol *proto, u8 version);
 int gre_del_protocol(const struct gre_protocol *proto, u8 version);
 
-struct gre_cisco_protocol {
-	int (*handler)(struct sk_buff *skb, const struct tnl_ptk_info *tpi);
-	int (*err_handler)(struct sk_buff *skb, u32 info,
-			   const struct tnl_ptk_info *tpi);
-	u8 priority;
-};
-
-int gre_cisco_register(struct gre_cisco_protocol *proto);
-int gre_cisco_unregister(struct gre_cisco_protocol *proto);
-
-void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
-		      int hdr_len);
-
-static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
-						  bool csum)
-{
-	return iptunnel_handle_offloads(skb, csum,
-					csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
-}
-
-
-static inline int ip_gre_calc_hlen(__be16 o_flags)
-{
-	int addend = 4;
-
-	if (o_flags&TUNNEL_CSUM)
-		addend += 4;
-	if (o_flags&TUNNEL_KEY)
-		addend += 4;
-	if (o_flags&TUNNEL_SEQ)
-		addend += 4;
-	return addend;
-}
-
-static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
-{
-	__be16 tflags = 0;
-
-	if (flags & GRE_CSUM)
-		tflags |= TUNNEL_CSUM;
-	if (flags & GRE_ROUTING)
-		tflags |= TUNNEL_ROUTING;
-	if (flags & GRE_KEY)
-		tflags |= TUNNEL_KEY;
-	if (flags & GRE_SEQ)
-		tflags |= TUNNEL_SEQ;
-	if (flags & GRE_STRICT)
-		tflags |= TUNNEL_STRICT;
-	if (flags & GRE_REC)
-		tflags |= TUNNEL_REC;
-	if (flags & GRE_VERSION)
-		tflags |= TUNNEL_VERSION;
-
-	return tflags;
-}
-
-static inline __be16 tnl_flags_to_gre_flags(__be16 tflags)
-{
-	__be16 flags = 0;
-
-	if (tflags & TUNNEL_CSUM)
-		flags |= GRE_CSUM;
-	if (tflags & TUNNEL_ROUTING)
-		flags |= GRE_ROUTING;
-	if (tflags & TUNNEL_KEY)
-		flags |= GRE_KEY;
-	if (tflags & TUNNEL_SEQ)
-		flags |= GRE_SEQ;
-	if (tflags & TUNNEL_STRICT)
-		flags |= GRE_STRICT;
-	if (tflags & TUNNEL_REC)
-		flags |= GRE_REC;
-	if (tflags & TUNNEL_VERSION)
-		flags |= GRE_VERSION;
-
-	return flags;
-}
-
+struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
+				       u8 name_assign_type);
 #endif
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index 0f712c0..cf6c745 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -32,37 +32,28 @@
 		return;
 	}
 
-	/* We run in BH context */
-	spin_lock(&cell->napi_skbs.lock);
-
 	__skb_queue_tail(&cell->napi_skbs, skb);
 	if (skb_queue_len(&cell->napi_skbs) == 1)
 		napi_schedule(&cell->napi);
-
-	spin_unlock(&cell->napi_skbs.lock);
 }
 
-/* called unser BH context */
+/* called under BH context */
 static inline int gro_cell_poll(struct napi_struct *napi, int budget)
 {
 	struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
 	struct sk_buff *skb;
 	int work_done = 0;
 
-	spin_lock(&cell->napi_skbs.lock);
 	while (work_done < budget) {
 		skb = __skb_dequeue(&cell->napi_skbs);
 		if (!skb)
 			break;
-		spin_unlock(&cell->napi_skbs.lock);
 		napi_gro_receive(napi, skb);
 		work_done++;
-		spin_lock(&cell->napi_skbs.lock);
 	}
 
 	if (work_done < budget)
-		napi_complete(napi);
-	spin_unlock(&cell->napi_skbs.lock);
+		napi_complete_done(napi, work_done);
 	return work_done;
 }
 
@@ -77,7 +68,7 @@
 	for_each_possible_cpu(i) {
 		struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
 
-		skb_queue_head_init(&cell->napi_skbs);
+		__skb_queue_head_init(&cell->napi_skbs);
 		netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
 		napi_enable(&cell->napi);
 	}
@@ -92,8 +83,9 @@
 		return;
 	for_each_possible_cpu(i) {
 		struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+
 		netif_napi_del(&cell->napi);
-		skb_queue_purge(&cell->napi_skbs);
+		__skb_queue_purge(&cell->napi_skbs);
 	}
 	free_percpu(gcells->cells);
 	gcells->cells = NULL;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index b73c88a..b07d126 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -205,8 +205,8 @@
 
 void inet_hashinfo_init(struct inet_hashinfo *h);
 
-int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
-int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw);
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk);
+void __inet_hash(struct sock *sk, struct sock *osk);
 void inet_hash(struct sock *sk);
 void inet_unhash(struct sock *sk);
 
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index 360c480..879d6e5 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -100,10 +100,8 @@
 void inet_twsk_free(struct inet_timewait_sock *tw);
 void inet_twsk_put(struct inet_timewait_sock *tw);
 
-int inet_twsk_unhash(struct inet_timewait_sock *tw);
-
-int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
-			  struct inet_hashinfo *hashinfo);
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+			   struct inet_hashinfo *hashinfo);
 
 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
 					   struct inet_timewait_death_row *dr,
@@ -113,7 +111,7 @@
 			   struct inet_hashinfo *hashinfo);
 
 void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
-void inet_twsk_deschedule(struct inet_timewait_sock *tw);
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
 
 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
 		     struct inet_timewait_death_row *twdr, int family);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index d5332dd..4a6009d 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -15,16 +15,20 @@
 #include <net/ipv6.h>
 #include <linux/atomic.h>
 
-struct inetpeer_addr_base {
-	union {
-		__be32			a4;
-		__be32			a6[4];
-		struct in6_addr		in6;
-	};
+/* IPv4 address key for cache lookups */
+struct ipv4_addr_key {
+	__be32	addr;
+	int	vif;
 };
 
+#define INETPEER_MAXKEYSZ   (sizeof(struct in6_addr) / sizeof(u32))
+
 struct inetpeer_addr {
-	struct inetpeer_addr_base	addr;
+	union {
+		struct ipv4_addr_key	a4;
+		struct in6_addr		a6;
+		u32			key[INETPEER_MAXKEYSZ];
+	};
 	__u16				family;
 };
 
@@ -65,69 +69,33 @@
 	int			total;
 };
 
-#define INETPEER_BASE_BIT	0x1UL
-
-static inline struct inet_peer *inetpeer_ptr(unsigned long val)
-{
-	BUG_ON(val & INETPEER_BASE_BIT);
-	return (struct inet_peer *) val;
-}
-
-static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val)
-{
-	if (!(val & INETPEER_BASE_BIT))
-		return NULL;
-	val &= ~INETPEER_BASE_BIT;
-	return (struct inet_peer_base *) val;
-}
-
-static inline bool inetpeer_ptr_is_peer(unsigned long val)
-{
-	return !(val & INETPEER_BASE_BIT);
-}
-
-static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer)
-{
-	/* This implicitly clears INETPEER_BASE_BIT */
-	*val = (unsigned long) peer;
-}
-
-static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer)
-{
-	unsigned long val = (unsigned long) peer;
-	unsigned long orig = *ptr;
-
-	if (!(orig & INETPEER_BASE_BIT) ||
-	    cmpxchg(ptr, orig, val) != orig)
-		return false;
-	return true;
-}
-
-static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base)
-{
-	*ptr = (unsigned long) base | INETPEER_BASE_BIT;
-}
-
-static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from)
-{
-	unsigned long val = *from;
-
-	*to = val;
-	if (inetpeer_ptr_is_peer(val)) {
-		struct inet_peer *peer = inetpeer_ptr(val);
-		atomic_inc(&peer->refcnt);
-	}
-}
-
 void inet_peer_base_init(struct inet_peer_base *);
 
 void inet_initpeers(void) __init;
 
 #define INETPEER_METRICS_NEW	(~(u32) 0)
 
-static inline bool inet_metrics_new(const struct inet_peer *p)
+static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip)
 {
-	return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
+	iaddr->a4.addr = ip;
+	iaddr->family = AF_INET;
+}
+
+static inline __be32 inetpeer_get_addr_v4(struct inetpeer_addr *iaddr)
+{
+	return iaddr->a4.addr;
+}
+
+static inline void inetpeer_set_addr_v6(struct inetpeer_addr *iaddr,
+					struct in6_addr *in6)
+{
+	iaddr->a6 = *in6;
+	iaddr->family = AF_INET6;
+}
+
+static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr)
+{
+	return &iaddr->a6;
 }
 
 /* can be called with or without local BH being disabled */
@@ -137,11 +105,12 @@
 
 static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
 						__be32 v4daddr,
-						int create)
+						int vif, int create)
 {
 	struct inetpeer_addr daddr;
 
-	daddr.addr.a4 = v4daddr;
+	daddr.a4.addr = v4daddr;
+	daddr.a4.vif = vif;
 	daddr.family = AF_INET;
 	return inet_getpeer(base, &daddr, create);
 }
@@ -152,23 +121,36 @@
 {
 	struct inetpeer_addr daddr;
 
-	daddr.addr.in6 = *v6daddr;
+	daddr.a6 = *v6daddr;
 	daddr.family = AF_INET6;
 	return inet_getpeer(base, &daddr, create);
 }
 
+static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a,
+				    const struct inetpeer_addr *b)
+{
+	int i, n;
+
+	if (a->family == AF_INET)
+		n = sizeof(a->a4) / sizeof(u32);
+	else
+		n = sizeof(a->a6) / sizeof(u32);
+
+	for (i = 0; i < n; i++) {
+		if (a->key[i] == b->key[i])
+			continue;
+		if (a->key[i] < b->key[i])
+			return -1;
+		return 1;
+	}
+
+	return 0;
+}
+
 /* can be called from BH context or outside */
 void inet_putpeer(struct inet_peer *p);
 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
 
 void inetpeer_invalidate_tree(struct inet_peer_base *);
 
-/*
- * temporary check to make sure we dont access rid, tcp_ts,
- * tcp_ts_stamp if no refcount is taken on inet_peer
- */
-static inline void inet_peer_refcheck(const struct inet_peer *p)
-{
-	WARN_ON_ONCE(atomic_read(&p->refcnt) <= 0);
-}
 #endif /* _NET_INETPEER_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index d5fe9f2..9b9ca28 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -202,10 +202,20 @@
 #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
 #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
 
+u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
 unsigned long snmp_fold_field(void __percpu *mib, int offt);
 #if BITS_PER_LONG==32
+u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
+			 size_t syncp_offset);
 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
 #else
+static inline u64  snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
+					size_t syncp_offset)
+{
+	return snmp_get_cpu_field(mib, cpu, offct);
+
+}
+
 static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
 {
 	return snmp_fold_field(mib, offt);
@@ -370,22 +380,6 @@
 	flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
 }
 
-static inline void inet_set_txhash(struct sock *sk)
-{
-	struct inet_sock *inet = inet_sk(sk);
-	struct flow_keys keys;
-
-	memset(&keys, 0, sizeof(keys));
-
-	keys.addrs.v4addrs.src = inet->inet_saddr;
-	keys.addrs.v4addrs.dst = inet->inet_daddr;
-	keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
-	keys.ports.src = inet->inet_sport;
-	keys.ports.dst = inet->inet_dport;
-
-	sk->sk_txhash = flow_hash_from_keys(&keys);
-}
-
 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
 {
 	const struct iphdr *iph = skb_gro_network_header(skb);
@@ -474,6 +468,11 @@
 
 #endif
 
+static inline unsigned int ipv4_addr_hash(__be32 ip)
+{
+	return (__force unsigned int) ip;
+}
+
 bool ip_call_ra_chain(struct sk_buff *skb);
 
 /*
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3b76849..063d304 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -51,6 +51,8 @@
 	struct nlattr	*fc_mp;
 
 	struct nl_info	fc_nlinfo;
+	struct nlattr	*fc_encap;
+	u16		fc_encap_type;
 };
 
 struct fib6_node {
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 5fa643b..a37d043 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -44,7 +44,9 @@
 	u32			fc_flow;
 	u32			fc_nlflags;
 	struct nl_info		fc_nlinfo;
- };
+	struct nlattr		*fc_encap;
+	u16			fc_encap_type;
+};
 
 struct fib_info;
 struct rtable;
@@ -89,6 +91,7 @@
 	struct rtable __rcu * __percpu *nh_pcpu_rth_output;
 	struct rtable __rcu	*nh_rth_input;
 	struct fnhe_hash_bucket	__rcu *nh_exceptions;
+	struct lwtunnel_state	*nh_lwtstate;
 };
 
 /*
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index d8214cb..9a6a3ba 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -4,14 +4,15 @@
 #include <linux/if_tunnel.h>
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
+#include <linux/socket.h>
 #include <linux/types.h>
 #include <linux/u64_stats_sync.h>
 #include <net/dsfield.h>
 #include <net/gro_cells.h>
 #include <net/inet_ecn.h>
-#include <net/ip.h>
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
+#include <net/lwtunnel.h>
 
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
@@ -22,6 +23,44 @@
 /* Keep error state on tunnel for 30 sec */
 #define IPTUNNEL_ERR_TIMEO	(30*HZ)
 
+/* Used to memset ip_tunnel padding. */
+#define IP_TUNNEL_KEY_SIZE	offsetofend(struct ip_tunnel_key, tp_dst)
+
+/* Used to memset ipv4 address padding. */
+#define IP_TUNNEL_KEY_IPV4_PAD	offsetofend(struct ip_tunnel_key, u.ipv4.dst)
+#define IP_TUNNEL_KEY_IPV4_PAD_LEN				\
+	(FIELD_SIZEOF(struct ip_tunnel_key, u) -		\
+	 FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
+
+struct ip_tunnel_key {
+	__be64			tun_id;
+	union {
+		struct {
+			__be32	src;
+			__be32	dst;
+		} ipv4;
+		struct {
+			struct in6_addr src;
+			struct in6_addr dst;
+		} ipv6;
+	} u;
+	__be16			tun_flags;
+	u8			tos;		/* TOS for IPv4, TC for IPv6 */
+	u8			ttl;		/* TTL for IPv4, HL for IPv6 */
+	__be16			tp_src;
+	__be16			tp_dst;
+};
+
+/* Flags for ip_tunnel_info mode. */
+#define IP_TUNNEL_INFO_TX	0x01	/* represents tx tunnel parameters */
+#define IP_TUNNEL_INFO_IPV6	0x02	/* key contains IPv6 addresses */
+
+struct ip_tunnel_info {
+	struct ip_tunnel_key	key;
+	u8			options_len;
+	u8			mode;
+};
+
 /* 6rd prefix/relay information */
 #ifdef CONFIG_IPV6_SIT_6RD
 struct ip_tunnel_6rd_parm {
@@ -33,8 +72,8 @@
 #endif
 
 struct ip_tunnel_encap {
-	__u16			type;
-	__u16			flags;
+	u16			type;
+	u16			flags;
 	__be16			sport;
 	__be16			dport;
 };
@@ -51,6 +90,8 @@
 	__be32				 saddr;
 };
 
+struct metadata_dst;
+
 struct ip_tunnel {
 	struct ip_tunnel __rcu	*next;
 	struct hlist_node hash_node;
@@ -62,8 +103,8 @@
 					 * arrived */
 
 	/* These four fields used only by GRE */
-	__u32		i_seqno;	/* The last seen seqno	*/
-	__u32		o_seqno;	/* The last output seqno */
+	u32		i_seqno;	/* The last seen seqno	*/
+	u32		o_seqno;	/* The last output seqno */
 	int		tun_hlen;	/* Precalculated header length */
 	int		mlink;
 
@@ -84,6 +125,7 @@
 	unsigned int		prl_count;	/* # of entries in PRL */
 	int			ip_tnl_net_id;
 	struct gro_cells	gro_cells;
+	bool			collect_md;
 };
 
 #define TUNNEL_CSUM		__cpu_to_be16(0x01)
@@ -118,6 +160,7 @@
 struct ip_tunnel_net {
 	struct net_device *fb_tunnel_dev;
 	struct hlist_head tunnels[IP_TNL_HASH_SIZE];
+	struct ip_tunnel __rcu *collect_md_tun;
 };
 
 struct ip_tunnel_encap_ops {
@@ -136,6 +179,40 @@
 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
 			    unsigned int num);
 
+static inline void ip_tunnel_key_init(struct ip_tunnel_key *key,
+				      __be32 saddr, __be32 daddr,
+				      u8 tos, u8 ttl,
+				      __be16 tp_src, __be16 tp_dst,
+				      __be64 tun_id, __be16 tun_flags)
+{
+	key->tun_id = tun_id;
+	key->u.ipv4.src = saddr;
+	key->u.ipv4.dst = daddr;
+	memset((unsigned char *)key + IP_TUNNEL_KEY_IPV4_PAD,
+	       0, IP_TUNNEL_KEY_IPV4_PAD_LEN);
+	key->tos = tos;
+	key->ttl = ttl;
+	key->tun_flags = tun_flags;
+
+	/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
+	 * the upper tunnel are used.
+	 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
+	 */
+	key->tp_src = tp_src;
+	key->tp_dst = tp_dst;
+
+	/* Clear struct padding. */
+	if (sizeof(*key) != IP_TUNNEL_KEY_SIZE)
+		memset((unsigned char *)key + IP_TUNNEL_KEY_SIZE,
+		       0, sizeof(*key) - IP_TUNNEL_KEY_SIZE);
+}
+
+static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info
+					       *tun_info)
+{
+	return tun_info->mode & IP_TUNNEL_INFO_IPV6 ? AF_INET6 : AF_INET;
+}
+
 #ifdef CONFIG_INET
 
 int ip_tunnel_init(struct net_device *dev);
@@ -163,7 +240,8 @@
 				   __be32 key);
 
 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
-		  const struct tnl_ptk_info *tpi, bool log_ecn_error);
+		  const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+		  bool log_ecn_error);
 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
 			 struct ip_tunnel_parm *p);
 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
@@ -196,8 +274,8 @@
 
 int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
 int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
-		  __be32 src, __be32 dst, __u8 proto,
-		  __u8 tos, __u8 ttl, __be16 df, bool xnet);
+		  __be32 src, __be32 dst, u8 proto,
+		  u8 tos, u8 ttl, __be16 df, bool xnet);
 
 struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
 					 int gso_type_mask);
@@ -221,6 +299,57 @@
 	}
 }
 
+static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info)
+{
+	return info + 1;
+}
+
+static inline void ip_tunnel_info_opts_get(void *to,
+					   const struct ip_tunnel_info *info)
+{
+	memcpy(to, info + 1, info->options_len);
+}
+
+static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
+					   const void *from, int len)
+{
+	memcpy(ip_tunnel_info_opts(info), from, len);
+	info->options_len = len;
+}
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+	return (struct ip_tunnel_info *)lwtstate->data;
+}
+
+extern struct static_key ip_tunnel_metadata_cnt;
+
+/* Returns > 0 if metadata should be collected */
+static inline int ip_tunnel_collect_metadata(void)
+{
+	return static_key_false(&ip_tunnel_metadata_cnt);
+}
+
+void __init ip_tunnel_core_init(void);
+
+void ip_tunnel_need_metadata(void);
+void ip_tunnel_unneed_metadata(void);
+
+#else /* CONFIG_INET */
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+	return NULL;
+}
+
+static inline void ip_tunnel_need_metadata(void)
+{
+}
+
+static inline void ip_tunnel_unneed_metadata(void)
+{
+}
+
 #endif /* CONFIG_INET */
 
 #endif /* __NET_IP_TUNNELS_H */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 4e3731e..9b9ca87 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -846,6 +846,17 @@
 /* How much time to keep dests in trash */
 #define IP_VS_DEST_TRASH_PERIOD		(120 * HZ)
 
+struct ipvs_sync_daemon_cfg {
+	union nf_inet_addr	mcast_group;
+	int			syncid;
+	u16			sync_maxlen;
+	u16			mcast_port;
+	u8			mcast_af;
+	u8			mcast_ttl;
+	/* multicast interface name */
+	char			mcast_ifn[IP_VS_IFNAME_MAXLEN];
+};
+
 /* IPVS in network namespace */
 struct netns_ipvs {
 	int			gen;		/* Generation */
@@ -961,15 +972,10 @@
 	spinlock_t		sync_buff_lock;
 	struct task_struct	**backup_threads;
 	int			threads_mask;
-	int			send_mesg_maxlen;
-	int			recv_mesg_maxlen;
 	volatile int		sync_state;
-	volatile int		master_syncid;
-	volatile int		backup_syncid;
 	struct mutex		sync_mutex;
-	/* multicast interface name */
-	char			master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-	char			backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+	struct ipvs_sync_daemon_cfg	mcfg;	/* Master Configuration */
+	struct ipvs_sync_daemon_cfg	bcfg;	/* Backup Configuration */
 	/* net name space ptr */
 	struct net		*net;            /* Needed by timer routines */
 	/* Number of heterogeneous destinations, needed becaus heterogeneous
@@ -1408,7 +1414,8 @@
 /* IPVS sync daemon data and function prototypes
  * (from ip_vs_sync.c)
  */
-int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
+int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *cfg,
+		      int state);
 int stop_sync_thread(struct net *net, int state);
 void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
 
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 82dbdb0..711cca4 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -707,54 +707,69 @@
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static inline void ip6_set_txhash(struct sock *sk)
-{
-	struct inet_sock *inet = inet_sk(sk);
-	struct ipv6_pinfo *np = inet6_sk(sk);
-	struct flow_keys keys;
 
-	memset(&keys, 0, sizeof(keys));
+/* Sysctl settings for net ipv6.auto_flowlabels */
+#define IP6_AUTO_FLOW_LABEL_OFF		0
+#define IP6_AUTO_FLOW_LABEL_OPTOUT	1
+#define IP6_AUTO_FLOW_LABEL_OPTIN	2
+#define IP6_AUTO_FLOW_LABEL_FORCED	3
 
-	memcpy(&keys.addrs.v6addrs.src, &np->saddr,
-	       sizeof(keys.addrs.v6addrs.src));
-	memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
-	       sizeof(keys.addrs.v6addrs.dst));
-	keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
-	keys.ports.src = inet->inet_sport;
-	keys.ports.dst = inet->inet_dport;
+#define IP6_AUTO_FLOW_LABEL_MAX		IP6_AUTO_FLOW_LABEL_FORCED
 
-	sk->sk_txhash = flow_hash_from_keys(&keys);
-}
+#define IP6_DEFAULT_AUTO_FLOW_LABELS	IP6_AUTO_FLOW_LABEL_OPTOUT
 
 static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
-					__be32 flowlabel, bool autolabel)
+					__be32 flowlabel, bool autolabel,
+					struct flowi6 *fl6)
 {
-	if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
-		u32 hash;
+	u32 hash;
 
-		hash = skb_get_hash(skb);
+	if (flowlabel ||
+	    net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
+	    (!autolabel &&
+	     net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
+		return flowlabel;
 
-		/* Since this is being sent on the wire obfuscate hash a bit
-		 * to minimize possbility that any useful information to an
-		 * attacker is leaked. Only lower 20 bits are relevant.
-		 */
-		hash ^= hash >> 12;
+	hash = skb_get_hash_flowi6(skb, fl6);
 
-		flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+	/* Since this is being sent on the wire obfuscate hash a bit
+	 * to minimize possbility that any useful information to an
+	 * attacker is leaked. Only lower 20 bits are relevant.
+	 */
+	rol32(hash, 16);
 
-		if (net->ipv6.sysctl.flowlabel_state_ranges)
-			flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
-	}
+	flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+
+	if (net->ipv6.sysctl.flowlabel_state_ranges)
+		flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
 
 	return flowlabel;
 }
+
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+	switch (net->ipv6.sysctl.auto_flowlabels) {
+	case IP6_AUTO_FLOW_LABEL_OFF:
+	case IP6_AUTO_FLOW_LABEL_OPTIN:
+	default:
+		return 0;
+	case IP6_AUTO_FLOW_LABEL_OPTOUT:
+	case IP6_AUTO_FLOW_LABEL_FORCED:
+		return 1;
+	}
+}
 #else
 static inline void ip6_set_txhash(struct sock *sk) { }
 static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
-					__be32 flowlabel, bool autolabel)
+					__be32 flowlabel, bool autolabel,
+					struct flowi6 *fl6)
 {
 	return flowlabel;
 }
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+	return 0;
+}
 #endif
 
 
@@ -832,7 +847,8 @@
 			      &inet6_sk(sk)->cork);
 }
 
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+		   struct flowi6 *fl6);
 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
 				      const struct in6_addr *final_dst);
 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
new file mode 100644
index 0000000..fce0e35
--- /dev/null
+++ b/include/net/lwtunnel.h
@@ -0,0 +1,175 @@
+#ifndef __NET_LWTUNNEL_H
+#define __NET_LWTUNNEL_H 1
+
+#include <linux/lwtunnel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/route.h>
+
+#define LWTUNNEL_HASH_BITS   7
+#define LWTUNNEL_HASH_SIZE   (1 << LWTUNNEL_HASH_BITS)
+
+/* lw tunnel state flags */
+#define LWTUNNEL_STATE_OUTPUT_REDIRECT	BIT(0)
+#define LWTUNNEL_STATE_INPUT_REDIRECT	BIT(1)
+
+struct lwtunnel_state {
+	__u16		type;
+	__u16		flags;
+	atomic_t	refcnt;
+	int		(*orig_output)(struct sock *sk, struct sk_buff *skb);
+	int		(*orig_input)(struct sk_buff *);
+	int             len;
+	__u8            data[0];
+};
+
+struct lwtunnel_encap_ops {
+	int (*build_state)(struct net_device *dev, struct nlattr *encap,
+			   unsigned int family, const void *cfg,
+			   struct lwtunnel_state **ts);
+	int (*output)(struct sock *sk, struct sk_buff *skb);
+	int (*input)(struct sk_buff *skb);
+	int (*fill_encap)(struct sk_buff *skb,
+			  struct lwtunnel_state *lwtstate);
+	int (*get_encap_size)(struct lwtunnel_state *lwtstate);
+	int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
+};
+
+#ifdef CONFIG_LWTUNNEL
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+	kfree(lws);
+}
+
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+	if (lws)
+		atomic_inc(&lws->refcnt);
+
+	return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+	if (!lws)
+		return;
+
+	if (atomic_dec_and_test(&lws->refcnt))
+		lwtstate_free(lws);
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+	if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_OUTPUT_REDIRECT))
+		return true;
+
+	return false;
+}
+
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+	if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_INPUT_REDIRECT))
+		return true;
+
+	return false;
+}
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+			   unsigned int num);
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+			   unsigned int num);
+int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+			 struct nlattr *encap,
+			 unsigned int family, const void *cfg,
+			 struct lwtunnel_state **lws);
+int lwtunnel_fill_encap(struct sk_buff *skb,
+			struct lwtunnel_state *lwtstate);
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
+struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
+int lwtunnel_input(struct sk_buff *skb);
+
+#else
+
+static inline void lwtstate_free(struct lwtunnel_state *lws)
+{
+}
+
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+	return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+	return false;
+}
+
+static inline bool lwtunnel_input_redirect(struct lwtunnel_state *lwtstate)
+{
+	return false;
+}
+
+static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+					 unsigned int num)
+{
+	return -EOPNOTSUPP;
+
+}
+
+static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+					 unsigned int num)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+				       struct nlattr *encap,
+				       unsigned int family, const void *cfg,
+				       struct lwtunnel_state **lws)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_fill_encap(struct sk_buff *skb,
+				      struct lwtunnel_state *lwtstate)
+{
+	return 0;
+}
+
+static inline int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+	return 0;
+}
+
+static inline struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len)
+{
+	return NULL;
+}
+
+static inline int lwtunnel_cmp_encap(struct lwtunnel_state *a,
+				     struct lwtunnel_state *b)
+{
+	return 0;
+}
+
+static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_input(struct sk_buff *skb)
+{
+	return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif /* __NET_LWTUNNEL_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 6b1077c..e3314e5 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -973,6 +973,10 @@
  * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame.
  *	If this flag is set, the stack cannot do any replay detection
  *	hence the driver or hardware will have to do that.
+ * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
+ *	flag indicates that the PN was verified for replay protection.
+ *	Note that this flag is also currently only supported when a frame
+ *	is also decrypted (ie. @RX_FLAG_DECRYPTED must be set)
  * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on
  *	the frame.
  * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on
@@ -997,9 +1001,6 @@
  * @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
  *	number (@ampdu_reference) must be populated and be a distinct number for
  *	each A-MPDU
- * @RX_FLAG_AMPDU_REPORT_ZEROLEN: driver reports 0-length subframes
- * @RX_FLAG_AMPDU_IS_ZEROLEN: This is a zero-length subframe, for
- *	monitoring purposes only
  * @RX_FLAG_AMPDU_LAST_KNOWN: last subframe is known, should be set on all
  *	subframes of a single A-MPDU
  * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU
@@ -1039,8 +1040,8 @@
 	RX_FLAG_NO_SIGNAL_VAL		= BIT(12),
 	RX_FLAG_HT_GF			= BIT(13),
 	RX_FLAG_AMPDU_DETAILS		= BIT(14),
-	RX_FLAG_AMPDU_REPORT_ZEROLEN	= BIT(15),
-	RX_FLAG_AMPDU_IS_ZEROLEN	= BIT(16),
+	RX_FLAG_PN_VALIDATED		= BIT(15),
+	/* bit 16 free */
 	RX_FLAG_AMPDU_LAST_KNOWN	= BIT(17),
 	RX_FLAG_AMPDU_IS_LAST		= BIT(18),
 	RX_FLAG_AMPDU_DELIM_CRC_ERROR	= BIT(19),
@@ -1491,8 +1492,10 @@
  * 	- Temporal Authenticator Rx MIC Key (64 bits)
  * @icv_len: The ICV length for this key type
  * @iv_len: The IV length for this key type
+ * @drv_priv: pointer for driver use
  */
 struct ieee80211_key_conf {
+	void *drv_priv;
 	atomic64_t tx_pn;
 	u32 cipher;
 	u8 icv_len;
@@ -1675,7 +1678,6 @@
  * @tdls: indicates whether the STA is a TDLS peer
  * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
  *	valid if the STA is a TDLS peer in the first place.
- * @mfp: indicates whether the STA uses management frame protection or not.
  * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
  */
 struct ieee80211_sta {
@@ -1693,7 +1695,6 @@
 	struct ieee80211_sta_rates __rcu *rates;
 	bool tdls;
 	bool tdls_initiator;
-	bool mfp;
 
 	struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
 
@@ -1888,6 +1889,9 @@
  * @IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS: The HW supports scanning on all bands
  *	in one command, mac80211 doesn't have to run separate scans per band.
  *
+ * @IEEE80211_HW_TDLS_WIDER_BW: The device/driver supports wider bandwidth
+ *	than then BSS bandwidth for a TDLS link on the base channel.
+ *
  * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
  */
 enum ieee80211_hw_flags {
@@ -1920,6 +1924,7 @@
 	IEEE80211_HW_CHANCTX_STA_CSA,
 	IEEE80211_HW_SUPPORTS_CLONED_SKBS,
 	IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS,
+	IEEE80211_HW_TDLS_WIDER_BW,
 
 	/* keep last, obviously */
 	NUM_IEEE80211_HW_FLAGS
@@ -3696,20 +3701,28 @@
 void ieee80211_restart_hw(struct ieee80211_hw *hw);
 
 /**
- * ieee80211_napi_add - initialize mac80211 NAPI context
- * @hw: the hardware to initialize the NAPI context on
- * @napi: the NAPI context to initialize
- * @napi_dev: dummy NAPI netdevice, here to not waste the space if the
- *	driver doesn't use NAPI
- * @poll: poll function
- * @weight: default weight
+ * ieee80211_rx_napi - receive frame from NAPI context
  *
- * See also netif_napi_add().
+ * Use this function to hand received frames to mac80211. The receive
+ * buffer in @skb must start with an IEEE 802.11 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee80211
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other. Calls to
+ * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
+ * mixed for a single hardware. Must not run concurrently with
+ * ieee80211_tx_status() or ieee80211_tx_status_ni().
+ *
+ * This function must be called with BHs disabled.
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac80211 after this call
+ * @napi: the NAPI context
  */
-void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
-			struct net_device *napi_dev,
-			int (*poll)(struct napi_struct *, int),
-			int weight);
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
+		       struct napi_struct *napi);
 
 /**
  * ieee80211_rx - receive frame
@@ -3731,7 +3744,10 @@
  * @hw: the hardware this frame came in on
  * @skb: the buffer to receive, owned by mac80211 after this call
  */
-void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb);
+static inline void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+	ieee80211_rx_napi(hw, skb, NULL);
+}
 
 /**
  * ieee80211_rx_irqsafe - receive frame
@@ -4315,19 +4331,6 @@
 			    struct sk_buff *skb, u8 *p2k);
 
 /**
- * ieee80211_aes_cmac_calculate_k1_k2 - calculate the AES-CMAC sub keys
- *
- * This function computes the two AES-CMAC sub-keys, based on the
- * previously installed master key.
- *
- * @keyconf: the parameter passed with the set key
- * @k1: a buffer to be filled with the 1st sub-key
- * @k2: a buffer to be filled with the 2nd sub-key
- */
-void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
-					u8 *k1, u8 *k2);
-
-/**
  * ieee80211_get_key_tx_seq - get key TX sequence counter
  *
  * @keyconf: the parameter passed with the set key
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index f534a46..b7f9961 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -321,23 +321,6 @@
 void ieee802154_unregister_hw(struct ieee802154_hw *hw);
 
 /**
- * ieee802154_rx - receive frame
- *
- * Use this function to hand received frames to mac802154. The receive
- * buffer in @skb must start with an IEEE 802.15.4 header. In case of a
- * paged @skb is used, the driver is recommended to put the ieee802154
- * header of the frame on the linear part of the @skb to avoid memory
- * allocation and/or memcpy by the stack.
- *
- * This function may not be called in IRQ context. Calls to this function
- * for a single hardware must be synchronized against each other.
- *
- * @hw: the hardware this frame came in on
- * @skb: the buffer to receive, owned by mac802154 after this call
- */
-void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
-
-/**
  * ieee802154_rx_irqsafe - receive frame
  *
  * Like ieee802154_rx() but can be called in IRQ context
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
new file mode 100644
index 0000000..4757997
--- /dev/null
+++ b/include/net/mpls_iptunnel.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015 Cumulus Networks, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _NET_MPLS_IPTUNNEL_H
+#define _NET_MPLS_IPTUNNEL_H 1
+
+#define MAX_NEW_LABELS 2
+
+struct mpls_iptunnel_encap {
+	u32	label[MAX_NEW_LABELS];
+	u32	labels;
+};
+
+static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
+{
+	return (struct mpls_iptunnel_encap *)lwtstate->data;
+}
+
+#endif
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index b3a7751..aba5695 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -182,7 +182,8 @@
 
 void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
 		   const struct in6_addr *solicit,
-		   const struct in6_addr *daddr, const struct in6_addr *saddr);
+		   const struct in6_addr *daddr, const struct in6_addr *saddr,
+		   struct sk_buff *oskb);
 
 void ndisc_send_rs(struct net_device *dev,
 		   const struct in6_addr *saddr, const struct in6_addr *daddr);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index bd33e66..8b68384 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -125,6 +125,7 @@
 	unsigned long forced_gc_runs;	/* number of forced GC runs */
 
 	unsigned long unres_discards;	/* number of unresolved drops */
+	unsigned long table_fulls;      /* times even gc couldn't help */
 };
 
 #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field)
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index e951453..2dcea63 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -118,6 +118,9 @@
 #endif
 	struct sock		*nfnl;
 	struct sock		*nfnl_stash;
+#if IS_ENABLED(CONFIG_NETFILTER_NETLINK_ACCT)
+	struct list_head        nfnl_acct_list;
+#endif
 #endif
 #ifdef CONFIG_WEXT_CORE
 	struct sk_buff_head	wext_nlevents;
diff --git a/include/net/netfilter/ipv4/nf_dup_ipv4.h b/include/net/netfilter/ipv4/nf_dup_ipv4.h
new file mode 100644
index 0000000..42008f1
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_dup_ipv4.h
@@ -0,0 +1,7 @@
+#ifndef _NF_DUP_IPV4_H_
+#define _NF_DUP_IPV4_H_
+
+void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+		 const struct in_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_dup_ipv6.h b/include/net/netfilter/ipv6/nf_dup_ipv6.h
new file mode 100644
index 0000000..ed6bd66
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_dup_ipv6.h
@@ -0,0 +1,7 @@
+#ifndef _NF_DUP_IPV6_H_
+#define _NF_DUP_IPV6_H_
+
+void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+		 const struct in6_addr *gw, int oif);
+
+#endif /* _NF_DUP_IPV6_H_ */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 37cd391..f5e23c6 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -250,8 +250,12 @@
 void nf_ct_iterate_cleanup(struct net *net,
 			   int (*iter)(struct nf_conn *i, void *data),
 			   void *data, u32 portid, int report);
+
+struct nf_conntrack_zone;
+
 void nf_conntrack_free(struct nf_conn *ct);
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+				   const struct nf_conntrack_zone *zone,
 				   const struct nf_conntrack_tuple *orig,
 				   const struct nf_conntrack_tuple *repl,
 				   gfp_t gfp);
@@ -291,7 +295,9 @@
 extern unsigned int nf_conntrack_hash_rnd;
 void init_nf_conntrack_hash_rnd(void);
 
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+				 const struct nf_conntrack_zone *zone,
+				 gfp_t flags);
 
 #define NF_CT_STAT_INC(net, count)	  __this_cpu_inc((net)->ct.stat->count)
 #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index f2f0fa3..c03f9c4 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -52,7 +52,8 @@
 
 /* Find a connection corresponding to a tuple. */
 struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net,
+		      const struct nf_conntrack_zone *zone,
 		      const struct nf_conntrack_tuple *tuple);
 
 int __nf_conntrack_confirm(struct sk_buff *skb);
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 3f3aecb..dce56f0 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -4,7 +4,9 @@
 
 #ifndef _NF_CONNTRACK_EXPECT_H
 #define _NF_CONNTRACK_EXPECT_H
+
 #include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_zones.h>
 
 extern unsigned int nf_ct_expect_hsize;
 extern unsigned int nf_ct_expect_max;
@@ -76,15 +78,18 @@
 void nf_conntrack_expect_fini(void);
 
 struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, u16 zone,
+__nf_ct_expect_find(struct net *net,
+		    const struct nf_conntrack_zone *zone,
 		    const struct nf_conntrack_tuple *tuple);
 
 struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, u16 zone,
+nf_ct_expect_find_get(struct net *net,
+		      const struct nf_conntrack_zone *zone,
 		      const struct nf_conntrack_tuple *tuple);
 
 struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, u16 zone,
+nf_ct_find_expectation(struct net *net,
+		       const struct nf_conntrack_zone *zone,
 		       const struct nf_conntrack_tuple *tuple);
 
 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index dec6336..7e2b1d0 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -54,7 +54,11 @@
 #ifdef CONFIG_NF_CONNTRACK_LABELS
 int nf_conntrack_labels_init(void);
 void nf_conntrack_labels_fini(void);
+int nf_connlabels_get(struct net *net, unsigned int n_bits);
+void nf_connlabels_put(struct net *net);
 #else
 static inline int nf_conntrack_labels_init(void) { return 0; }
 static inline void nf_conntrack_labels_fini(void) {}
+static inline int nf_connlabels_get(struct net *net, unsigned int n_bits) { return 0; }
+static inline void nf_connlabels_put(struct net *net) {}
 #endif
diff --git a/include/net/netfilter/nf_conntrack_zones.h b/include/net/netfilter/nf_conntrack_zones.h
index 034efe8..4e32512 100644
--- a/include/net/netfilter/nf_conntrack_zones.h
+++ b/include/net/netfilter/nf_conntrack_zones.h
@@ -1,25 +1,89 @@
 #ifndef _NF_CONNTRACK_ZONES_H
 #define _NF_CONNTRACK_ZONES_H
 
-#define NF_CT_DEFAULT_ZONE	0
+#include <linux/netfilter/nf_conntrack_zones_common.h>
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #include <net/netfilter/nf_conntrack_extend.h>
 
-struct nf_conntrack_zone {
-	u16	id;
-};
+static inline const struct nf_conntrack_zone *
+nf_ct_zone(const struct nf_conn *ct)
+{
+	const struct nf_conntrack_zone *nf_ct_zone = NULL;
 
-static inline u16 nf_ct_zone(const struct nf_conn *ct)
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+	nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
+#endif
+	return nf_ct_zone ? nf_ct_zone : &nf_ct_zone_dflt;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags)
+{
+	zone->id = id;
+	zone->flags = flags;
+	zone->dir = dir;
+
+	return zone;
+}
+
+static inline const struct nf_conntrack_zone *
+nf_ct_zone_tmpl(const struct nf_conn *tmpl, const struct sk_buff *skb,
+		struct nf_conntrack_zone *tmp)
+{
+	const struct nf_conntrack_zone *zone;
+
+	if (!tmpl)
+		return &nf_ct_zone_dflt;
+
+	zone = nf_ct_zone(tmpl);
+	if (zone->flags & NF_CT_FLAG_MARK)
+		zone = nf_ct_zone_init(tmp, skb->mark, zone->dir, 0);
+
+	return zone;
+}
+
+static inline int nf_ct_zone_add(struct nf_conn *ct, gfp_t flags,
+				 const struct nf_conntrack_zone *info)
 {
 #ifdef CONFIG_NF_CONNTRACK_ZONES
 	struct nf_conntrack_zone *nf_ct_zone;
-	nf_ct_zone = nf_ct_ext_find(ct, NF_CT_EXT_ZONE);
-	if (nf_ct_zone)
-		return nf_ct_zone->id;
+
+	nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, flags);
+	if (!nf_ct_zone)
+		return -ENOMEM;
+
+	nf_ct_zone_init(nf_ct_zone, info->id, info->dir,
+			info->flags);
 #endif
-	return NF_CT_DEFAULT_ZONE;
+	return 0;
 }
 
-#endif /* CONFIG_NF_CONNTRACK || CONFIG_NF_CONNTRACK_MODULE */
+static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
+					  enum ip_conntrack_dir dir)
+{
+	return zone->dir & (1 << dir);
+}
+
+static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
+				enum ip_conntrack_dir dir)
+{
+	return nf_ct_zone_matches_dir(zone, dir) ?
+	       zone->id : NF_CT_DEFAULT_ZONE_ID;
+}
+
+static inline bool nf_ct_zone_equal(const struct nf_conn *a,
+				    const struct nf_conntrack_zone *b,
+				    enum ip_conntrack_dir dir)
+{
+	return nf_ct_zone_id(nf_ct_zone(a), dir) ==
+	       nf_ct_zone_id(b, dir);
+}
+
+static inline bool nf_ct_zone_equal_any(const struct nf_conn *a,
+					const struct nf_conntrack_zone *b)
+{
+	return nf_ct_zone(a)->id == b->id;
+}
+#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */
 #endif /* _NF_CONNTRACK_ZONES_H */
diff --git a/include/net/netfilter/nft_dup.h b/include/net/netfilter/nft_dup.h
new file mode 100644
index 0000000..6b84cf6
--- /dev/null
+++ b/include/net/netfilter/nft_dup.h
@@ -0,0 +1,9 @@
+#ifndef _NFT_DUP_H_
+#define _NFT_DUP_H_
+
+struct nft_dup_inet {
+	enum nft_registers	sreg_addr:8;
+	enum nft_registers	sreg_dev:8;
+};
+
+#endif /* _NFT_DUP_H_ */
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 8d93544..c0368db 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -31,6 +31,7 @@
 	int auto_flowlabels;
 	int icmpv6_time;
 	int anycast_src_echo_reply;
+	int ip_nonlocal_bind;
 	int fwmark_reflect;
 	int idgen_retries;
 	int idgen_delay;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 532e4ba..38aa498 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -14,5 +14,6 @@
 #ifdef CONFIG_SYSCTL
 	struct ctl_table_header *nf_log_dir_header;
 #endif
+	struct list_head hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 };
 #endif
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 01fc8c5..d0d0f1e 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -79,6 +79,7 @@
 	int   (*close)(struct nci_dev *ndev);
 	int   (*send)(struct nci_dev *ndev, struct sk_buff *skb);
 	int   (*setup)(struct nci_dev *ndev);
+	int   (*post_setup)(struct nci_dev *ndev);
 	int   (*fw_download)(struct nci_dev *ndev, const char *firmware_name);
 	__u32 (*get_rfprotocol)(struct nci_dev *ndev, __u8 rf_protocol);
 	int   (*discover_se)(struct nci_dev *ndev);
@@ -277,6 +278,8 @@
 			    unsigned long opt),
 		unsigned long opt, __u32 timeout);
 int nci_prop_cmd(struct nci_dev *ndev, __u8 oid, size_t len, __u8 *payload);
+int nci_core_reset(struct nci_dev *ndev);
+int nci_core_init(struct nci_dev *ndev);
 
 int nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
 int nci_set_config(struct nci_dev *ndev, __u8 id, size_t len, __u8 *val);
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index f9e58ae..30afc9a 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -203,6 +203,7 @@
 	int n_vendor_cmds;
 
 	struct nfc_ops *ops;
+	struct genl_info *cur_cmd_info;
 };
 #define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
 
@@ -318,4 +319,44 @@
 	return 0;
 }
 
+struct sk_buff *__nfc_alloc_vendor_cmd_reply_skb(struct nfc_dev *dev,
+						 enum nfc_attrs attr,
+						 u32 oui, u32 subcmd,
+						 int approxlen);
+int nfc_vendor_cmd_reply(struct sk_buff *skb);
+
+/**
+ * nfc_vendor_cmd_alloc_reply_skb - allocate vendor command reply
+ * @dev: nfc device
+ * @oui: vendor oui
+ * @approxlen: an upper bound of the length of the data that will
+ *      be put into the skb
+ *
+ * This function allocates and pre-fills an skb for a reply to
+ * a vendor command. Since it is intended for a reply, calling
+ * it outside of a vendor command's doit() operation is invalid.
+ *
+ * The returned skb is pre-filled with some identifying data in
+ * a way that any data that is put into the skb (with skb_put(),
+ * nla_put() or similar) will end up being within the
+ * %NFC_ATTR_VENDOR_DATA attribute, so all that needs to be done
+ * with the skb is adding data for the corresponding userspace tool
+ * which can then read that data out of the vendor data attribute.
+ * You must not modify the skb in any other way.
+ *
+ * When done, call nfc_vendor_cmd_reply() with the skb and return
+ * its error code as the result of the doit() operation.
+ *
+ * Return: An allocated and pre-filled skb. %NULL if any errors happen.
+ */
+static inline struct sk_buff *
+nfc_vendor_cmd_alloc_reply_skb(struct nfc_dev *dev,
+				u32 oui, u32 subcmd, int approxlen)
+{
+	return __nfc_alloc_vendor_cmd_reply_skb(dev,
+						NFC_ATTR_VENDOR_DATA,
+						oui,
+						subcmd, approxlen);
+}
+
 #endif /* __NET_NFC_H */
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index b0ab530..cf2713d 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -52,6 +52,8 @@
 
 	NL802154_CMD_SET_LBT_MODE,
 
+	NL802154_CMD_SET_ACKREQ_DEFAULT,
+
 	/* add new commands above here */
 
 	/* used to define NL802154_CMD_MAX below */
@@ -104,6 +106,8 @@
 
 	NL802154_ATTR_SUPPORTED_COMMANDS,
 
+	NL802154_ATTR_ACKREQ_DEFAULT,
+
 	/* add attributes here, update the policy in nl802154.c */
 
 	__NL802154_ATTR_AFTER_LAST,
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 2342bf1..401038d 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -110,10 +110,8 @@
 		__qdisc_run(q);
 }
 
-int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
-		       struct tcf_result *res);
 int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
-		struct tcf_result *res);
+		struct tcf_result *res, bool compat_mode);
 
 static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
 {
diff --git a/include/net/route.h b/include/net/route.h
index fe22d03..cc61cb9 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -188,8 +188,12 @@
 void ip_rt_send_redirect(struct sk_buff *skb);
 
 unsigned int inet_addr_type(struct net *net, __be32 addr);
+unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id);
 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
 				__be32 addr);
+unsigned int inet_addr_type_dev_table(struct net *net,
+				      const struct net_device *dev,
+				      __be32 addr);
 void ip_rt_multicast_event(struct in_device *);
 int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
 void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
@@ -250,6 +254,9 @@
 	if (inet_sk(sk)->transparent)
 		flow_flags |= FLOWI_FLAG_ANYSRC;
 
+	if (netif_index_is_vrf(sock_net(sk), oif))
+		flow_flags |= FLOWI_FLAG_VRFSRC;
+
 	flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
 			   protocol, flow_flags, dst, src, dport, sport);
 }
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 343d922..18fdb98 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -141,6 +141,7 @@
 				    unsigned char name_assign_type,
 				    const struct rtnl_link_ops *ops,
 				    struct nlattr *tb[]);
+int rtnl_delete_link(struct net_device *dev);
 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
 
 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 2738f6f..444faa8 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -340,6 +340,7 @@
 extern struct Qdisc_ops noop_qdisc_ops;
 extern struct Qdisc_ops pfifo_fast_ops;
 extern struct Qdisc_ops mq_qdisc_ops;
+extern struct Qdisc_ops noqueue_qdisc_ops;
 extern const struct Qdisc_ops *default_qdisc_ops;
 
 struct Qdisc_class_common {
@@ -513,17 +514,20 @@
 	bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
 }
 
-static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
-					   const struct sk_buff *skb)
+static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
+				     const struct sk_buff *skb)
 {
-	struct gnet_stats_basic_cpu *bstats =
-				this_cpu_ptr(sch->cpu_bstats);
-
 	u64_stats_update_begin(&bstats->syncp);
 	bstats_update(&bstats->bstats, skb);
 	u64_stats_update_end(&bstats->syncp);
 }
 
+static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
+					   const struct sk_buff *skb)
+{
+	bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
+}
+
 static inline void qdisc_bstats_update(struct Qdisc *sch,
 				       const struct sk_buff *skb)
 {
@@ -547,16 +551,24 @@
 	sch->qstats.drops += count;
 }
 
-static inline void qdisc_qstats_drop(struct Qdisc *sch)
+static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
 {
-	sch->qstats.drops++;
+	qstats->drops++;
 }
 
-static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
+static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
 {
-	struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
+	qstats->overlimits++;
+}
 
-	qstats->drops++;
+static inline void qdisc_qstats_drop(struct Qdisc *sch)
+{
+	qstats_drop_inc(&sch->qstats);
+}
+
+static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
+{
+	qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
 }
 
 static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
diff --git a/include/net/sock.h b/include/net/sock.h
index f21f070..43c6abc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -429,7 +429,9 @@
 	void			*sk_security;
 #endif
 	__u32			sk_mark;
+#ifdef CONFIG_CGROUP_NET_CLASSID
 	u32			sk_classid;
+#endif
 	struct cg_proto		*sk_cgrp;
 	void			(*sk_state_change)(struct sock *sk);
 	void			(*sk_data_ready)(struct sock *sk);
@@ -1685,6 +1687,20 @@
 kuid_t sock_i_uid(struct sock *sk);
 unsigned long sock_i_ino(struct sock *sk);
 
+static inline void sk_set_txhash(struct sock *sk)
+{
+	sk->sk_txhash = prandom_u32();
+
+	if (unlikely(!sk->sk_txhash))
+		sk->sk_txhash = 1;
+}
+
+static inline void sk_rethink_txhash(struct sock *sk)
+{
+	if (sk->sk_txhash)
+		sk_set_txhash(sk);
+}
+
 static inline struct dst_entry *
 __sk_dst_get(struct sock *sk)
 {
@@ -1709,6 +1725,8 @@
 {
 	struct dst_entry *ndst, *dst = __sk_dst_get(sk);
 
+	sk_rethink_txhash(sk);
+
 	if (dst && dst->ops->negative_advice) {
 		ndst = dst->ops->negative_advice(dst);
 
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d5671f1..319baab 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -72,6 +72,7 @@
 		struct switchdev_obj_fdb {		/* PORT_FDB */
 			const unsigned char *addr;
 			u16 vid;
+			u16 ndm_state;
 		} fdb;
 	} u;
 };
@@ -157,6 +158,9 @@
 int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
 			    struct net_device *dev,
 			    struct net_device *filter_dev, int idx);
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+				 struct net_device *group_dev,
+				 bool joining);
 
 #else
 
@@ -271,6 +275,12 @@
 	return -EOPNOTSUPP;
 }
 
+static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
+					       struct net_device *group_dev,
+					       bool joining)
+{
+}
+
 #endif
 
 #endif /* _LINUX_SWITCHDEV_H_ */
diff --git a/include/net/tc_act/tc_bpf.h b/include/net/tc_act/tc_bpf.h
index a152e98..958d69c 100644
--- a/include/net/tc_act/tc_bpf.h
+++ b/include/net/tc_act/tc_bpf.h
@@ -15,7 +15,7 @@
 
 struct tcf_bpf {
 	struct tcf_common	common;
-	struct bpf_prog		*filter;
+	struct bpf_prog __rcu	*filter;
 	union {
 		u32		bpf_fd;
 		u16		bpf_num_ops;
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index 9fc9b57..592a6bc 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -6,9 +6,10 @@
 struct tcf_gact {
 	struct tcf_common	common;
 #ifdef CONFIG_GACT_PROB
-        u16			tcfg_ptype;
-        u16			tcfg_pval;
-        int			tcfg_paction;
+	u16			tcfg_ptype;
+	u16			tcfg_pval;
+	int			tcfg_paction;
+	atomic_t		packets;
 #endif
 };
 #define to_gact(a) \
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index 4dd77a1..dae96ba 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -8,7 +8,7 @@
 	int			tcfm_eaction;
 	int			tcfm_ifindex;
 	int			tcfm_ok_push;
-	struct net_device	*tcfm_dev;
+	struct net_device __rcu	*tcfm_dev;
 	struct list_head	tcfm_list;
 };
 #define to_mirred(a) \
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 950cfec..0cab28c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -281,6 +281,8 @@
 extern int sysctl_tcp_min_tso_segs;
 extern int sysctl_tcp_autocorking;
 extern int sysctl_tcp_invalid_ratelimit;
+extern int sysctl_tcp_pacing_ss_ratio;
+extern int sysctl_tcp_pacing_ca_ratio;
 
 extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
@@ -886,7 +888,7 @@
 extern struct tcp_congestion_ops tcp_reno;
 
 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
-u32 tcp_ca_get_key_by_name(const char *name);
+u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
 #ifdef CONFIG_INET
 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
 #else
@@ -989,6 +991,11 @@
 
 #define TCP_INFINITE_SSTHRESH	0x7fffffff
 
+static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
+{
+	return tp->snd_cwnd < tp->snd_ssthresh;
+}
+
 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
 {
 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
@@ -1065,7 +1072,7 @@
 	const struct tcp_sock *tp = tcp_sk(sk);
 
 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
-	if (tp->snd_cwnd <= tp->snd_ssthresh)
+	if (tcp_in_slow_start(tp))
 		return tp->snd_cwnd < 2 * tp->max_packets_out;
 
 	return tp->is_cwnd_limited;
@@ -1160,6 +1167,19 @@
 }
 
 u32 tcp_default_init_rwnd(u32 mss);
+void tcp_cwnd_restart(struct sock *sk, s32 delta);
+
+static inline void tcp_slow_start_after_idle_check(struct sock *sk)
+{
+	struct tcp_sock *tp = tcp_sk(sk);
+	s32 delta;
+
+	if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
+		return;
+	delta = tcp_time_stamp - tp->lsndtime;
+	if (delta > inet_csk(sk)->icsk_rto)
+		tcp_cwnd_restart(sk, delta);
+}
 
 /* Determine a window scaling and initial window to offer. */
 void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 68f0eca..1a47946 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -33,9 +33,6 @@
 
 static inline void twsk_destructor(struct sock *sk)
 {
-	BUG_ON(sk == NULL);
-	BUG_ON(sk->sk_prot == NULL);
-	BUG_ON(sk->sk_prot->twsk_prot == NULL);
 	if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
 		sk->sk_prot->twsk_prot->twsk_destructor(sk);
 }
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index c491c12..cb2f89f 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -31,7 +31,8 @@
 	__be16			peer_udp_port;
 	unsigned int		use_udp_checksums:1,
 				use_udp6_tx_checksums:1,
-				use_udp6_rx_checksums:1;
+				use_udp6_rx_checksums:1,
+				ipv6_v6only:1;
 };
 
 int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
@@ -93,6 +94,10 @@
 
 void udp_tunnel_sock_release(struct socket *sock);
 
+struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
+				    __be16 flags, __be64 tunnel_id,
+				    int md_size);
+
 static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
 							 bool udp_csum)
 {
diff --git a/include/net/vrf.h b/include/net/vrf.h
new file mode 100644
index 0000000..593e609
--- /dev/null
+++ b/include/net/vrf.h
@@ -0,0 +1,178 @@
+/*
+ * include/net/net_vrf.h - adds vrf dev structure definitions
+ * Copyright (c) 2015 Cumulus Networks
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_NET_VRF_H
+#define __LINUX_NET_VRF_H
+
+struct net_vrf_dev {
+	struct rcu_head		rcu;
+	int                     ifindex; /* ifindex of master dev */
+	u32                     tb_id;   /* table id for VRF */
+};
+
+struct slave {
+	struct list_head	list;
+	struct net_device	*dev;
+};
+
+struct slave_queue {
+	struct list_head	all_slaves;
+};
+
+struct net_vrf {
+	struct slave_queue	queue;
+	struct rtable           *rth;
+	u32			tb_id;
+};
+
+
+#if IS_ENABLED(CONFIG_NET_VRF)
+/* called with rcu_read_lock() */
+static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
+{
+	struct net_vrf_dev *vrf_ptr;
+	int ifindex = 0;
+
+	if (!dev)
+		return 0;
+
+	if (netif_is_vrf(dev)) {
+		ifindex = dev->ifindex;
+	} else {
+		vrf_ptr = rcu_dereference(dev->vrf_ptr);
+		if (vrf_ptr)
+			ifindex = vrf_ptr->ifindex;
+	}
+
+	return ifindex;
+}
+
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+	int ifindex;
+
+	rcu_read_lock();
+	ifindex = vrf_master_ifindex_rcu(dev);
+	rcu_read_unlock();
+
+	return ifindex;
+}
+
+/* called with rcu_read_lock */
+static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
+{
+	u32 tb_id = 0;
+
+	if (dev) {
+		struct net_vrf_dev *vrf_ptr;
+
+		vrf_ptr = rcu_dereference(dev->vrf_ptr);
+		if (vrf_ptr)
+			tb_id = vrf_ptr->tb_id;
+	}
+	return tb_id;
+}
+
+static inline u32 vrf_dev_table(const struct net_device *dev)
+{
+	u32 tb_id;
+
+	rcu_read_lock();
+	tb_id = vrf_dev_table_rcu(dev);
+	rcu_read_unlock();
+
+	return tb_id;
+}
+
+static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+	struct net_device *dev;
+	u32 tb_id = 0;
+
+	if (!ifindex)
+		return 0;
+
+	rcu_read_lock();
+
+	dev = dev_get_by_index_rcu(net, ifindex);
+	if (dev)
+		tb_id = vrf_dev_table_rcu(dev);
+
+	rcu_read_unlock();
+
+	return tb_id;
+}
+
+/* called with rtnl */
+static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
+{
+	u32 tb_id = 0;
+
+	if (dev) {
+		struct net_vrf_dev *vrf_ptr;
+
+		vrf_ptr = rtnl_dereference(dev->vrf_ptr);
+		if (vrf_ptr)
+			tb_id = vrf_ptr->tb_id;
+	}
+	return tb_id;
+}
+
+/* caller has already checked netif_is_vrf(dev) */
+static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
+{
+	struct rtable *rth = ERR_PTR(-ENETUNREACH);
+	struct net_vrf *vrf = netdev_priv(dev);
+
+	if (vrf) {
+		rth = vrf->rth;
+		atomic_inc(&rth->dst.__refcnt);
+	}
+	return rth;
+}
+
+#else
+static inline int vrf_master_ifindex_rcu(const struct net_device *dev)
+{
+	return 0;
+}
+
+static inline int vrf_master_ifindex(const struct net_device *dev)
+{
+	return 0;
+}
+
+static inline u32 vrf_dev_table_rcu(const struct net_device *dev)
+{
+	return 0;
+}
+
+static inline u32 vrf_dev_table(const struct net_device *dev)
+{
+	return 0;
+}
+
+static inline u32 vrf_dev_table_ifindex(struct net *net, int ifindex)
+{
+	return 0;
+}
+
+static inline u32 vrf_dev_table_rtnl(const struct net_device *dev)
+{
+	return 0;
+}
+
+static inline struct rtable *vrf_dev_get_rth(const struct net_device *dev)
+{
+	return ERR_PTR(-ENETUNREACH);
+}
+#endif
+
+#endif /* __LINUX_NET_VRF_H */
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 0082b5d..480a319 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -7,6 +7,7 @@
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/udp.h>
+#include <net/dst_metadata.h>
 
 #define VNI_HASH_BITS	10
 #define VNI_HASH_SIZE	(1<<VNI_HASH_BITS)
@@ -94,20 +95,18 @@
 #define VXLAN_VNI_MASK  (VXLAN_VID_MASK << 8)
 #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
 
+#define VNI_HASH_BITS	10
+#define VNI_HASH_SIZE	(1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS	8
+#define FDB_HASH_SIZE	(1<<FDB_HASH_BITS)
+
 struct vxlan_metadata {
-	__be32		vni;
 	u32		gbp;
 };
 
-struct vxlan_sock;
-typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb,
-			   struct vxlan_metadata *md);
-
 /* per UDP socket information */
 struct vxlan_sock {
 	struct hlist_node hlist;
-	vxlan_rcv_t	 *rcv;
-	void		 *data;
 	struct work_struct del_work;
 	struct socket	 *sock;
 	struct rcu_head	  rcu;
@@ -117,6 +116,58 @@
 	u32		  flags;
 };
 
+union vxlan_addr {
+	struct sockaddr_in sin;
+	struct sockaddr_in6 sin6;
+	struct sockaddr sa;
+};
+
+struct vxlan_rdst {
+	union vxlan_addr	 remote_ip;
+	__be16			 remote_port;
+	u32			 remote_vni;
+	u32			 remote_ifindex;
+	struct list_head	 list;
+	struct rcu_head		 rcu;
+};
+
+struct vxlan_config {
+	union vxlan_addr	remote_ip;
+	union vxlan_addr	saddr;
+	u32			vni;
+	int			remote_ifindex;
+	int			mtu;
+	__be16			dst_port;
+	__u16			port_min;
+	__u16			port_max;
+	__u8			tos;
+	__u8			ttl;
+	u32			flags;
+	unsigned long		age_interval;
+	unsigned int		addrmax;
+	bool			no_share;
+};
+
+/* Pseudo network device */
+struct vxlan_dev {
+	struct hlist_node hlist;	/* vni hash table */
+	struct list_head  next;		/* vxlan's per namespace list */
+	struct vxlan_sock *vn_sock;	/* listening socket */
+	struct net_device *dev;
+	struct net	  *net;		/* netns for packet i/o */
+	struct vxlan_rdst default_dst;	/* default destination */
+	u32		  flags;	/* VXLAN_F_* in vxlan.h */
+
+	struct timer_list age_timer;
+	spinlock_t	  hash_lock;
+	unsigned int	  addrcnt;
+	struct gro_cells  gro_cells;
+
+	struct vxlan_config	cfg;
+
+	struct hlist_head fdb_head[FDB_HASH_SIZE];
+};
+
 #define VXLAN_F_LEARN			0x01
 #define VXLAN_F_PROXY			0x02
 #define VXLAN_F_RSC			0x04
@@ -130,6 +181,7 @@
 #define VXLAN_F_REMCSUM_RX		0x400
 #define VXLAN_F_GBP			0x800
 #define VXLAN_F_REMCSUM_NOPARTIAL	0x1000
+#define VXLAN_F_COLLECT_METADATA	0x2000
 
 /* Flags that are used in the receive path. These flags must match in
  * order for a socket to be shareable
@@ -137,18 +189,16 @@
 #define VXLAN_F_RCV_FLAGS		(VXLAN_F_GBP |			\
 					 VXLAN_F_UDP_ZERO_CSUM6_RX |	\
 					 VXLAN_F_REMCSUM_RX |		\
-					 VXLAN_F_REMCSUM_NOPARTIAL)
+					 VXLAN_F_REMCSUM_NOPARTIAL |	\
+					 VXLAN_F_COLLECT_METADATA)
 
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
-				  vxlan_rcv_t *rcv, void *data,
-				  bool no_share, u32 flags);
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+				    u8 name_assign_type, struct vxlan_config *conf);
 
-void vxlan_sock_release(struct vxlan_sock *vs);
-
-int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
-		   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-		   __be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
-		   bool xnet, u32 vxflags);
+static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan)
+{
+	return inet_sk(vxlan->vn_sock->sock->sk)->inet_sport;
+}
 
 static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
 						     netdev_features_t features)
@@ -191,4 +241,10 @@
 {
 }
 #endif
+
+static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
+{
+	return vs->sock->sk->sk_family;
+}
+
 #endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index f0ee97e..312e3fe 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -285,10 +285,13 @@
 	unsigned short		family;
 	struct dst_ops		*dst_ops;
 	void			(*garbage_collect)(struct net *net);
-	struct dst_entry	*(*dst_lookup)(struct net *net, int tos,
+	struct dst_entry	*(*dst_lookup)(struct net *net,
+					       int tos, int oif,
 					       const xfrm_address_t *saddr,
 					       const xfrm_address_t *daddr);
-	int			(*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
+	int			(*get_saddr)(struct net *net, int oif,
+					     xfrm_address_t *saddr,
+					     xfrm_address_t *daddr);
 	void			(*decode_session)(struct sk_buff *skb,
 						  struct flowi *fl,
 						  int reverse);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index ae84b22..50c2a36 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -57,9 +57,10 @@
 	SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED,	/* 38 07  UA reported */
 	SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED,	/* 2A 01  UA reported */
 	SDEV_EVT_LUN_CHANGE_REPORTED,			/* 3F 0E  UA reported */
+	SDEV_EVT_ALUA_STATE_CHANGE_REPORTED,		/* 2A 06  UA reported */
 
 	SDEV_EVT_FIRST		= SDEV_EVT_MEDIA_CHANGE,
-	SDEV_EVT_LAST		= SDEV_EVT_LUN_CHANGE_REPORTED,
+	SDEV_EVT_LAST		= SDEV_EVT_ALUA_STATE_CHANGE_REPORTED,
 
 	SDEV_EVT_MAXBITS	= SDEV_EVT_LAST + 1
 };
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 2555ee5..6183d20 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -241,6 +241,7 @@
 
 	/* recovery fields */
 	int recovery_tmo;
+	bool recovery_tmo_sysfs_override;
 	struct delayed_work recovery_work;
 
 	unsigned int target_id;
diff --git a/include/soc/tegra/fuse.h b/include/soc/tegra/fuse.h
index b019e34..961b821 100644
--- a/include/soc/tegra/fuse.h
+++ b/include/soc/tegra/fuse.h
@@ -22,6 +22,7 @@
 #define TEGRA114	0x35
 #define TEGRA124	0x40
 #define TEGRA132	0x13
+#define TEGRA210	0x21
 
 #define TEGRA_FUSE_SKU_CALIB_0	0xf0
 #define TEGRA30_FUSE_SATA_CALIB	0x124
@@ -47,10 +48,11 @@
 	int cpu_speedo_id;
 	int cpu_speedo_value;
 	int cpu_iddq_value;
-	int core_process_id;
+	int soc_process_id;
 	int soc_speedo_id;
-	int gpu_speedo_id;
+	int soc_speedo_value;
 	int gpu_process_id;
+	int gpu_speedo_id;
 	int gpu_speedo_value;
 	enum tegra_revision revision;
 };
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 1ab2813..370f2909 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -102,6 +102,8 @@
 	unsigned int num_address_bits;
 	unsigned int atom_size;
 
+	u8 client_id_mask;
+
 	const struct tegra_smmu_soc *smmu;
 };
 
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index f5c0de4..d18efe4 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -67,6 +67,11 @@
 #define TEGRA_POWERGATE_XUSBC	22
 #define TEGRA_POWERGATE_VIC	23
 #define TEGRA_POWERGATE_IRAM	24
+#define TEGRA_POWERGATE_NVDEC	25
+#define TEGRA_POWERGATE_NVJPG	26
+#define TEGRA_POWERGATE_AUD	27
+#define TEGRA_POWERGATE_DFD	28
+#define TEGRA_POWERGATE_VE2	29
 
 #define TEGRA_POWERGATE_3D0	TEGRA_POWERGATE_3D
 
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 0e9d75b..74bc8547 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -584,6 +584,8 @@
 void snd_ac97_suspend(struct snd_ac97 *ac97);
 void snd_ac97_resume(struct snd_ac97 *ac97);
 #endif
+int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
+	unsigned int id_mask);
 
 /* quirk types */
 enum {
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index adb5ba5..930b41e 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -4,14 +4,17 @@
 #ifndef __SOUND_HDA_I915_H
 #define __SOUND_HDA_I915_H
 
+#include <drm/i915_component.h>
+
 #ifdef CONFIG_SND_HDA_I915
 int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
 int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
 int snd_hdac_get_display_clk(struct hdac_bus *bus);
 int snd_hdac_i915_init(struct hdac_bus *bus);
 int snd_hdac_i915_exit(struct hdac_bus *bus);
+int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *);
 #else
-static int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
+static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
 {
 	return 0;
 }
@@ -31,6 +34,10 @@
 {
 	return 0;
 }
+static inline int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *ops)
+{
+	return -ENODEV;
+}
 #endif
 
 #endif /* __SOUND_HDA_I915_H */
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index ae995e5..2ae8812 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -160,6 +160,10 @@
 #define AZX_SPB_BASE			0x08
 /* Interval used to calculate the iterating register offset */
 #define AZX_SPB_INTERVAL		0x08
+/* SPIB base */
+#define AZX_SPB_SPIB			0x00
+/* SPIB MAXFIFO base*/
+#define AZX_SPB_MAXFIFO			0x04
 
 /* registers of Global Time Synchronization Capability Structure */
 #define AZX_GTS_CAP_ID			0x1
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 4caf1fd..49bc836 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -119,6 +119,7 @@
 void snd_hdac_device_unregister(struct hdac_device *codec);
 
 int snd_hdac_refresh_widgets(struct hdac_device *codec);
+int snd_hdac_refresh_widget_sysfs(struct hdac_device *codec);
 
 unsigned int snd_hdac_make_cmd(struct hdac_device *codec, hda_nid_t nid,
 			       unsigned int verb, unsigned int parm);
@@ -164,15 +165,15 @@
 }
 
 #ifdef CONFIG_PM
-void snd_hdac_power_up(struct hdac_device *codec);
-void snd_hdac_power_down(struct hdac_device *codec);
-void snd_hdac_power_up_pm(struct hdac_device *codec);
-void snd_hdac_power_down_pm(struct hdac_device *codec);
+int snd_hdac_power_up(struct hdac_device *codec);
+int snd_hdac_power_down(struct hdac_device *codec);
+int snd_hdac_power_up_pm(struct hdac_device *codec);
+int snd_hdac_power_down_pm(struct hdac_device *codec);
 #else
-static inline void snd_hdac_power_up(struct hdac_device *codec) {}
-static inline void snd_hdac_power_down(struct hdac_device *codec) {}
-static inline void snd_hdac_power_up_pm(struct hdac_device *codec) {}
-static inline void snd_hdac_power_down_pm(struct hdac_device *codec) {}
+static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
+static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
 #endif
 
 /*
@@ -437,6 +438,8 @@
 struct hdac_stream *snd_hdac_stream_assign(struct hdac_bus *bus,
 					   struct snd_pcm_substream *substream);
 void snd_hdac_stream_release(struct hdac_stream *azx_dev);
+struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus,
+					int dir, int stream_tag);
 
 int snd_hdac_stream_setup(struct hdac_stream *azx_dev);
 void snd_hdac_stream_cleanup(struct hdac_stream *azx_dev);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 0f89df1..94210dc 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -34,6 +34,7 @@
 void snd_hdac_ext_bus_exit(struct hdac_ext_bus *sbus);
 int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *sbus, int addr);
 void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
+void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
 
 #define ebus_to_hbus(ebus)	(&(ebus)->bus)
 #define hbus_to_ebus(_bus) \
@@ -62,6 +63,8 @@
  * @hstream: hdac_stream
  * @pphc_addr: processing pipe host stream pointer
  * @pplc_addr: processing pipe link stream pointer
+ * @spib_addr: software position in buffers stream pointer
+ * @fifo_addr: software position Max fifos stream pointer
  * @decoupled: stream host and link is decoupled
  * @link_locked: link is locked
  * @link_prepared: link is prepared
@@ -73,6 +76,9 @@
 	void __iomem *pphc_addr;
 	void __iomem *pplc_addr;
 
+	void __iomem *spib_addr;
+	void __iomem *fifo_addr;
+
 	bool decoupled:1;
 	bool link_locked:1;
 	bool link_prepared;
@@ -99,6 +105,11 @@
 				struct hdac_ext_stream *azx_dev, bool decouple);
 void snd_hdac_ext_stop_streams(struct hdac_ext_bus *sbus);
 
+int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
+				 struct hdac_ext_stream *stream, u32 value);
+int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
+				 struct hdac_ext_stream *stream);
+
 void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream);
 void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream);
 void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hstream);
@@ -115,6 +126,7 @@
 
 int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link);
 int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus);
 void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
 				 int stream);
 void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
@@ -129,4 +141,63 @@
 	writew(((readw(addr + reg) & ~(mask)) | (val)), \
 		addr + reg)
 
+
+struct hdac_ext_device;
+
+/* ops common to all codec drivers */
+struct hdac_ext_codec_ops {
+	int (*build_controls)(struct hdac_ext_device *dev);
+	int (*init)(struct hdac_ext_device *dev);
+	void (*free)(struct hdac_ext_device *dev);
+};
+
+struct hda_dai_map {
+	char *dai_name;
+	hda_nid_t nid;
+	u32	maxbps;
+};
+
+#define HDA_MAX_NIDS 16
+
+/**
+ * struct hdac_ext_device - HDAC Ext device
+ *
+ * @hdac: hdac core device
+ * @nid_list - the dai map which matches the dai-name with the nid
+ * @map_cur_idx - the idx in use in dai_map
+ * @ops - the hda codec ops common to all codec drivers
+ * @pvt_data - private data, for asoc contains asoc codec object
+ */
+struct hdac_ext_device {
+	struct hdac_device hdac;
+	struct hdac_ext_bus *ebus;
+
+	/* soc-dai to nid map */
+	struct hda_dai_map nid_list[HDA_MAX_NIDS];
+	unsigned int map_cur_idx;
+
+	/* codec ops */
+	struct hdac_ext_codec_ops ops;
+
+	void *private_data;
+};
+
+#define to_ehdac_device(dev) (container_of((dev), \
+				 struct hdac_ext_device, hdac))
+/*
+ * HD-audio codec base driver
+ */
+struct hdac_ext_driver {
+	struct hdac_driver hdac;
+
+	int	(*probe)(struct hdac_ext_device *dev);
+	int	(*remove)(struct hdac_ext_device *dev);
+	void	(*shutdown)(struct hdac_ext_device *dev);
+};
+
+int snd_hda_ext_driver_register(struct hdac_ext_driver *drv);
+void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv);
+
+#define to_ehdac_driver(_drv) container_of(_drv, struct hdac_ext_driver, hdac)
+
 #endif /* __SOUND_HDAUDIO_EXT_H */
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
index 4cecd0c..bb7b2eb 100644
--- a/include/sound/rcar_snd.h
+++ b/include/sound/rcar_snd.h
@@ -61,6 +61,14 @@
 /*
  * flags
  */
+struct rsnd_ctu_platform_info {
+	u32 flags;
+};
+
+struct rsnd_mix_platform_info {
+	u32 flags;
+};
+
 struct rsnd_dvc_platform_info {
 	u32 flags;
 };
@@ -68,6 +76,8 @@
 struct rsnd_dai_path_info {
 	struct rsnd_ssi_platform_info *ssi;
 	struct rsnd_src_platform_info *src;
+	struct rsnd_ctu_platform_info *ctu;
+	struct rsnd_mix_platform_info *mix;
 	struct rsnd_dvc_platform_info *dvc;
 };
 
@@ -93,6 +103,10 @@
 	int ssi_info_nr;
 	struct rsnd_src_platform_info *src_info;
 	int src_info_nr;
+	struct rsnd_ctu_platform_info *ctu_info;
+	int ctu_info_nr;
+	struct rsnd_mix_platform_info *mix_info;
+	int mix_info_nr;
 	struct rsnd_dvc_platform_info *dvc_info;
 	int dvc_info_nr;
 	struct rsnd_dai_platform_info *dai_info;
diff --git a/include/sound/rt298.h b/include/sound/rt298.h
new file mode 100644
index 0000000..7fffeaa
--- /dev/null
+++ b/include/sound/rt298.h
@@ -0,0 +1,20 @@
+/*
+ * linux/sound/rt286.h -- Platform data for RT286
+ *
+ * Copyright 2013 Realtek Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT298_H
+#define __LINUX_SND_RT298_H
+
+struct rt298_platform_data {
+	bool cbj_en; /*combo jack enable*/
+	bool gpio2_en; /*GPIO2 enable*/
+	bool suspend_power_off; /* power is off during suspend */
+};
+
+#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 37d95a8..5abba03 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -397,6 +397,7 @@
 			    const struct snd_soc_dapm_route *route, int num);
 int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
 			     const struct snd_soc_dapm_route *route, int num);
+void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w);
 
 /* dapm events */
 void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
@@ -511,9 +512,18 @@
 struct snd_soc_dapm_path {
 	const char *name;
 
-	/* source (input) and sink (output) widgets */
-	struct snd_soc_dapm_widget *source;
-	struct snd_soc_dapm_widget *sink;
+	/*
+	 * source (input) and sink (output) widgets
+	 * The union is for convience, since it is a lot nicer to type
+	 * p->source, rather than p->node[SND_SOC_DAPM_DIR_IN]
+	 */
+	union {
+		struct {
+			struct snd_soc_dapm_widget *source;
+			struct snd_soc_dapm_widget *sink;
+		};
+		struct snd_soc_dapm_widget *node[2];
+	};
 
 	/* status */
 	u32 connect:1;	/* source and sink widgets are connected */
@@ -524,8 +534,7 @@
 	int (*connected)(struct snd_soc_dapm_widget *source,
 			 struct snd_soc_dapm_widget *sink);
 
-	struct list_head list_source;
-	struct list_head list_sink;
+	struct list_head list_node[2];
 	struct list_head list_kcontrol;
 	struct list_head list;
 };
@@ -559,8 +568,7 @@
 	unsigned char new_power:1;		/* power from this run */
 	unsigned char power_checked:1;		/* power checked this run */
 	unsigned char is_supply:1;		/* Widget is a supply type widget */
-	unsigned char is_sink:1;		/* Widget is a sink type widget */
-	unsigned char is_source:1;		/* Widget is a source type widget */
+	unsigned char is_ep:2;			/* Widget is a endpoint type widget */
 	int subseq;				/* sort within widget type */
 
 	int (*power_check)(struct snd_soc_dapm_widget *w);
@@ -575,16 +583,14 @@
 	struct snd_kcontrol **kcontrols;
 	struct snd_soc_dobj dobj;
 
-	/* widget input and outputs */
-	struct list_head sources;
-	struct list_head sinks;
+	/* widget input and output edges */
+	struct list_head edges[2];
 
 	/* used during DAPM updates */
 	struct list_head work_list;
 	struct list_head power_list;
 	struct list_head dirty;
-	int inputs;
-	int outputs;
+	int endpoints[2];
 
 	struct clk *clk;
 };
@@ -672,4 +678,58 @@
 	return dapm->bias_level;
 }
 
+enum snd_soc_dapm_direction {
+	SND_SOC_DAPM_DIR_IN,
+	SND_SOC_DAPM_DIR_OUT
+};
+
+#define SND_SOC_DAPM_DIR_TO_EP(x) BIT(x)
+
+#define SND_SOC_DAPM_EP_SOURCE SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_IN)
+#define SND_SOC_DAPM_EP_SINK SND_SOC_DAPM_DIR_TO_EP(SND_SOC_DAPM_DIR_OUT)
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths in the
+ *   specified direction of a widget
+ * @w: The widget
+ * @dir: Whether to iterate over the paths where the specified widget is the
+ *       incoming or outgoing widgets
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_path(w, dir, p) \
+	list_for_each_entry(p, &w->edges[dir], list_node[dir])
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path_safe - Iterates over all paths in the
+ *   specified direction of a widget
+ * @w: The widget
+ * @dir: Whether to iterate over the paths where the specified widget is the
+ *       incoming or outgoing widgets
+ * @p: The path iterator variable
+ * @next_p: Temporary storage for the next path
+ *
+ *  This function works like snd_soc_dapm_widget_for_each_sink_path, expect that
+ *  it is safe to remove the current path from the list while iterating
+ */
+#define snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p) \
+	list_for_each_entry_safe(p, next_p, &w->edges[dir], list_node[dir])
+
+/**
+ * snd_soc_dapm_widget_for_each_sink_path - Iterates over all paths leaving a
+ *  widget
+ * @w: The widget
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_sink_path(w, p) \
+	snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_IN, p)
+
+/**
+ * snd_soc_dapm_widget_for_each_source_path - Iterates over all paths leading to
+ *  a widget
+ * @w: The widget
+ * @p: The path iterator variable
+ */
+#define snd_soc_dapm_widget_for_each_source_path(w, p) \
+	snd_soc_dapm_widget_for_each_path(w, SND_SOC_DAPM_DIR_OUT, p)
+
 #endif
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index 427bc41..086cd7f 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -89,6 +89,13 @@
 		struct snd_ctl_elem_info *uinfo);
 };
 
+/* Bytes ext operations, for TLV byte controls */
+struct snd_soc_tplg_bytes_ext_ops {
+	u32 id;
+	int (*get)(unsigned int __user *bytes, unsigned int size);
+	int (*put)(const unsigned int __user *bytes, unsigned int size);
+};
+
 /*
  * DAPM widget event handlers - used to map handlers onto widgets.
  */
@@ -136,9 +143,13 @@
 	int (*manifest)(struct snd_soc_component *,
 		struct snd_soc_tplg_manifest *);
 
-	/* bespoke kcontrol handlers available for binding */
+	/* vendor specific kcontrol handlers available for binding */
 	const struct snd_soc_tplg_kcontrol_ops *io_ops;
 	int io_ops_count;
+
+	/* vendor specific bytes ext handlers available for binding */
+	const struct snd_soc_tplg_bytes_ext_ops *bytes_ext_ops;
+	int bytes_ext_ops_count;
 };
 
 #ifdef CONFIG_SND_SOC_TOPOLOGY
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 93df8bf..884e728 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -526,7 +526,8 @@
 
 #ifdef CONFIG_SND_SOC_AC97_BUS
 struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
-struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
+	unsigned int id, unsigned int id_mask);
 void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
 
 int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
@@ -619,6 +620,7 @@
  * @pin:    name of the pin to update
  * @mask:   bits to check for in reported jack status
  * @invert: if non-zero then pin is enabled when status is not reported
+ * @list:   internal list entry
  */
 struct snd_soc_jack_pin {
 	struct list_head list;
@@ -635,7 +637,7 @@
  * @jack_type: type of jack that is expected for this voltage
  * @debounce_time: debounce_time for jack, codec driver should wait for this
  *		duration before reading the adc for voltages
- * @:list: list container
+ * @list:   internal list entry
  */
 struct snd_soc_jack_zone {
 	unsigned int min_mv;
@@ -651,12 +653,12 @@
  * @gpio:         legacy gpio number
  * @idx:          gpio descriptor index within the function of the GPIO
  *                consumer device
- * @gpiod_dev     GPIO consumer device
+ * @gpiod_dev:    GPIO consumer device
  * @name:         gpio name. Also as connection ID for the GPIO consumer
  *                device function name lookup
  * @report:       value to report when jack detected
  * @invert:       report presence in low state
- * @debouce_time: debouce time in ms
+ * @debounce_time: debounce time in ms
  * @wake:	  enable as wake source
  * @jack_status_check: callback function which overrides the detection
  *		       to provide more complex checks (eg, reading an
@@ -672,11 +674,13 @@
 	int debounce_time;
 	bool wake;
 
+	/* private: */
 	struct snd_soc_jack *jack;
 	struct delayed_work work;
 	struct gpio_desc *desc;
 
 	void *data;
+	/* public: */
 	int (*jack_status_check)(void *data);
 };
 
@@ -758,7 +762,6 @@
 
 	unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
 	unsigned int registered_as_component:1;
-	unsigned int probed:1;
 
 	struct list_head list;
 
@@ -792,7 +795,6 @@
 
 	/* Don't use these, use snd_soc_component_get_dapm() */
 	struct snd_soc_dapm_context dapm;
-	struct snd_soc_dapm_context *dapm_ptr;
 
 	const struct snd_kcontrol_new *controls;
 	unsigned int num_controls;
@@ -832,9 +834,6 @@
 	/* component */
 	struct snd_soc_component component;
 
-	/* Don't access this directly, use snd_soc_codec_get_dapm() */
-	struct snd_soc_dapm_context dapm;
-
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *debugfs_reg;
 #endif
@@ -1277,7 +1276,7 @@
 static inline struct snd_soc_codec *snd_soc_dapm_to_codec(
 	struct snd_soc_dapm_context *dapm)
 {
-	return container_of(dapm, struct snd_soc_codec, dapm);
+	return snd_soc_component_to_codec(snd_soc_dapm_to_component(dapm));
 }
 
 /**
@@ -1302,7 +1301,7 @@
 static inline struct snd_soc_dapm_context *snd_soc_component_get_dapm(
 	struct snd_soc_component *component)
 {
-	return component->dapm_ptr;
+	return &component->dapm;
 }
 
 /**
@@ -1314,12 +1313,12 @@
 static inline struct snd_soc_dapm_context *snd_soc_codec_get_dapm(
 	struct snd_soc_codec *codec)
 {
-	return &codec->dapm;
+	return snd_soc_component_get_dapm(&codec->component);
 }
 
 /**
  * snd_soc_dapm_init_bias_level() - Initialize CODEC DAPM bias level
- * @dapm: The CODEC for which to initialize the DAPM bias level
+ * @codec: The CODEC for which to initialize the DAPM bias level
  * @level: The DAPM level to initialize to
  *
  * Initializes the CODEC DAPM bias level. See snd_soc_dapm_init_bias_level().
@@ -1604,6 +1603,10 @@
 int snd_soc_of_parse_tdm_slot(struct device_node *np,
 			      unsigned int *slots,
 			      unsigned int *slot_width);
+void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+				   struct snd_soc_codec_conf *codec_conf,
+				   struct device_node *of_node,
+				   const char *propname);
 int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
 				   const char *propname);
 unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 88cf39d..317a1ed 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -8,6 +8,7 @@
 #include <linux/tracepoint.h>
 
 #define DAPM_DIRECT "(direct)"
+#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
 
 struct snd_soc_jack;
 struct snd_soc_codec;
@@ -152,62 +153,38 @@
 		  (int)__entry->path_checks, (int)__entry->neighbour_checks)
 );
 
-TRACE_EVENT(snd_soc_dapm_output_path,
+TRACE_EVENT(snd_soc_dapm_path,
 
 	TP_PROTO(struct snd_soc_dapm_widget *widget,
+		enum snd_soc_dapm_direction dir,
 		struct snd_soc_dapm_path *path),
 
-	TP_ARGS(widget, path),
+	TP_ARGS(widget, dir, path),
 
 	TP_STRUCT__entry(
 		__string(	wname,	widget->name		)
 		__string(	pname,	path->name ? path->name : DAPM_DIRECT)
-		__string(	psname,	path->sink->name	)
-		__field(	int,	path_sink		)
+		__string(	pnname,	path->node[dir]->name	)
+		__field(	int,	path_node		)
 		__field(	int,	path_connect		)
+		__field(	int,	path_dir		)
 	),
 
 	TP_fast_assign(
 		__assign_str(wname, widget->name);
 		__assign_str(pname, path->name ? path->name : DAPM_DIRECT);
-		__assign_str(psname, path->sink->name);
+		__assign_str(pnname, path->node[dir]->name);
 		__entry->path_connect = path->connect;
-		__entry->path_sink = (long)path->sink;
+		__entry->path_node = (long)path->node[dir];
+		__entry->path_dir = dir;
 	),
 
-	TP_printk("%c%s -> %s -> %s",
-		(int) __entry->path_sink &&
+	TP_printk("%c%s %s %s %s %s",
+		(int) __entry->path_node &&
 		(int) __entry->path_connect ? '*' : ' ',
-		__get_str(wname), __get_str(pname), __get_str(psname))
-);
-
-TRACE_EVENT(snd_soc_dapm_input_path,
-
-	TP_PROTO(struct snd_soc_dapm_widget *widget,
-		struct snd_soc_dapm_path *path),
-
-	TP_ARGS(widget, path),
-
-	TP_STRUCT__entry(
-		__string(	wname,	widget->name		)
-		__string(	pname,	path->name ? path->name : DAPM_DIRECT)
-		__string(	psname,	path->source->name	)
-		__field(	int,	path_source		)
-		__field(	int,	path_connect		)
-	),
-
-	TP_fast_assign(
-		__assign_str(wname, widget->name);
-		__assign_str(pname, path->name ? path->name : DAPM_DIRECT);
-		__assign_str(psname, path->source->name);
-		__entry->path_connect = path->connect;
-		__entry->path_source = (long)path->source;
-	),
-
-	TP_printk("%c%s <- %s <- %s",
-		(int) __entry->path_source &&
-		(int) __entry->path_connect ? '*' : ' ',
-		__get_str(wname), __get_str(pname), __get_str(psname))
+		__get_str(wname), DAPM_ARROW(__entry->path_dir),
+		__get_str(pname), DAPM_ARROW(__entry->path_dir),
+		__get_str(pnname))
 );
 
 TRACE_EVENT(snd_soc_dapm_connected,
diff --git a/include/trace/events/ext3.h b/include/trace/events/ext3.h
deleted file mode 100644
index fc733d2..0000000
--- a/include/trace/events/ext3.h
+++ /dev/null
@@ -1,866 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM ext3
-
-#if !defined(_TRACE_EXT3_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_EXT3_H
-
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(ext3_free_inode,
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	umode_t, mode			)
-		__field(	uid_t,	uid			)
-		__field(	gid_t,	gid			)
-		__field(	blkcnt_t, blocks		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->mode	= inode->i_mode;
-		__entry->uid	= i_uid_read(inode);
-		__entry->gid	= i_gid_read(inode);
-		__entry->blocks	= inode->i_blocks;
-	),
-
-	TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->mode, __entry->uid, __entry->gid,
-		  (unsigned long) __entry->blocks)
-);
-
-TRACE_EVENT(ext3_request_inode,
-	TP_PROTO(struct inode *dir, int mode),
-
-	TP_ARGS(dir, mode),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	dir			)
-		__field(	umode_t, mode			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= dir->i_sb->s_dev;
-		__entry->dir	= dir->i_ino;
-		__entry->mode	= mode;
-	),
-
-	TP_printk("dev %d,%d dir %lu mode 0%o",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->dir, __entry->mode)
-);
-
-TRACE_EVENT(ext3_allocate_inode,
-	TP_PROTO(struct inode *inode, struct inode *dir, int mode),
-
-	TP_ARGS(inode, dir, mode),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	ino_t,	dir			)
-		__field(	umode_t, mode			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->dir	= dir->i_ino;
-		__entry->mode	= mode;
-	),
-
-	TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long) __entry->dir, __entry->mode)
-);
-
-TRACE_EVENT(ext3_evict_inode,
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	int,	nlink			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->nlink	= inode->i_nlink;
-	),
-
-	TP_printk("dev %d,%d ino %lu nlink %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, __entry->nlink)
-);
-
-TRACE_EVENT(ext3_drop_inode,
-	TP_PROTO(struct inode *inode, int drop),
-
-	TP_ARGS(inode, drop),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	int,	drop			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->drop	= drop;
-	),
-
-	TP_printk("dev %d,%d ino %lu drop %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, __entry->drop)
-);
-
-TRACE_EVENT(ext3_mark_inode_dirty,
-	TP_PROTO(struct inode *inode, unsigned long IP),
-
-	TP_ARGS(inode, IP),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(unsigned long,	ip			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->ip	= IP;
-	),
-
-	TP_printk("dev %d,%d ino %lu caller %pS",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, (void *)__entry->ip)
-);
-
-TRACE_EVENT(ext3_write_begin,
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-		 unsigned int flags),
-
-	TP_ARGS(inode, pos, len, flags),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	loff_t,	pos			)
-		__field(	unsigned int, len		)
-		__field(	unsigned int, flags		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->pos	= pos;
-		__entry->len	= len;
-		__entry->flags	= flags;
-	),
-
-	TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long) __entry->pos, __entry->len,
-		  __entry->flags)
-);
-
-DECLARE_EVENT_CLASS(ext3__write_end,
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-			unsigned int copied),
-
-	TP_ARGS(inode, pos, len, copied),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	loff_t,	pos			)
-		__field(	unsigned int, len		)
-		__field(	unsigned int, copied		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->pos	= pos;
-		__entry->len	= len;
-		__entry->copied	= copied;
-	),
-
-	TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long) __entry->pos, __entry->len,
-		  __entry->copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_ordered_write_end,
-
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-		 unsigned int copied),
-
-	TP_ARGS(inode, pos, len, copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_writeback_write_end,
-
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-		 unsigned int copied),
-
-	TP_ARGS(inode, pos, len, copied)
-);
-
-DEFINE_EVENT(ext3__write_end, ext3_journalled_write_end,
-
-	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
-		 unsigned int copied),
-
-	TP_ARGS(inode, pos, len, copied)
-);
-
-DECLARE_EVENT_CLASS(ext3__page_op,
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	pgoff_t, index			)
-
-	),
-
-	TP_fast_assign(
-		__entry->index	= page->index;
-		__entry->ino	= page->mapping->host->i_ino;
-		__entry->dev	= page->mapping->host->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu page_index %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, __entry->index)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_ordered_writepage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_writeback_writepage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_journalled_writepage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_readpage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-DEFINE_EVENT(ext3__page_op, ext3_releasepage,
-
-	TP_PROTO(struct page *page),
-
-	TP_ARGS(page)
-);
-
-TRACE_EVENT(ext3_invalidatepage,
-	TP_PROTO(struct page *page, unsigned int offset, unsigned int length),
-
-	TP_ARGS(page, offset, length),
-
-	TP_STRUCT__entry(
-		__field(	pgoff_t, index			)
-		__field(	unsigned int, offset		)
-		__field(	unsigned int, length		)
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-
-	),
-
-	TP_fast_assign(
-		__entry->index	= page->index;
-		__entry->offset	= offset;
-		__entry->length	= length;
-		__entry->ino	= page->mapping->host->i_ino;
-		__entry->dev	= page->mapping->host->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->index, __entry->offset, __entry->length)
-);
-
-TRACE_EVENT(ext3_discard_blocks,
-	TP_PROTO(struct super_block *sb, unsigned long blk,
-			unsigned long count),
-
-	TP_ARGS(sb, blk, count),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,		dev		)
-		__field(	unsigned long,	blk		)
-		__field(	unsigned long,	count		)
-
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->blk	= blk;
-		__entry->count	= count;
-	),
-
-	TP_printk("dev %d,%d blk %lu count %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->blk, __entry->count)
-);
-
-TRACE_EVENT(ext3_request_blocks,
-	TP_PROTO(struct inode *inode, unsigned long goal,
-		 unsigned long count),
-
-	TP_ARGS(inode, goal, count),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	unsigned long, count		)
-		__field(	unsigned long,	goal		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->count	= count;
-		__entry->goal	= goal;
-	),
-
-	TP_printk("dev %d,%d ino %lu count %lu goal %lu ",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->count, __entry->goal)
-);
-
-TRACE_EVENT(ext3_allocate_blocks,
-	TP_PROTO(struct inode *inode, unsigned long goal,
-		 unsigned long count, unsigned long block),
-
-	TP_ARGS(inode, goal, count, block),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	unsigned long,	block		)
-		__field(	unsigned long, count		)
-		__field(	unsigned long,	goal		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->block	= block;
-		__entry->count	= count;
-		__entry->goal	= goal;
-	),
-
-	TP_printk("dev %d,%d ino %lu count %lu block %lu goal %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		   __entry->count, __entry->block,
-		  __entry->goal)
-);
-
-TRACE_EVENT(ext3_free_blocks,
-	TP_PROTO(struct inode *inode, unsigned long block,
-		 unsigned long count),
-
-	TP_ARGS(inode, block, count),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	umode_t, mode			)
-		__field(	unsigned long,	block		)
-		__field(	unsigned long,	count		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= inode->i_sb->s_dev;
-		__entry->ino		= inode->i_ino;
-		__entry->mode		= inode->i_mode;
-		__entry->block		= block;
-		__entry->count		= count;
-	),
-
-	TP_printk("dev %d,%d ino %lu mode 0%o block %lu count %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->mode, __entry->block, __entry->count)
-);
-
-TRACE_EVENT(ext3_sync_file_enter,
-	TP_PROTO(struct file *file, int datasync),
-
-	TP_ARGS(file, datasync),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	ino_t,	parent			)
-		__field(	int,	datasync		)
-	),
-
-	TP_fast_assign(
-		struct dentry *dentry = file->f_path.dentry;
-
-		__entry->dev		= d_inode(dentry)->i_sb->s_dev;
-		__entry->ino		= d_inode(dentry)->i_ino;
-		__entry->datasync	= datasync;
-		__entry->parent		= d_inode(dentry->d_parent)->i_ino;
-	),
-
-	TP_printk("dev %d,%d ino %lu parent %ld datasync %d ",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long) __entry->parent, __entry->datasync)
-);
-
-TRACE_EVENT(ext3_sync_file_exit,
-	TP_PROTO(struct inode *inode, int ret),
-
-	TP_ARGS(inode, ret),
-
-	TP_STRUCT__entry(
-		__field(	int,	ret			)
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->ret		= ret;
-		__entry->ino		= inode->i_ino;
-		__entry->dev		= inode->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu ret %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->ret)
-);
-
-TRACE_EVENT(ext3_sync_fs,
-	TP_PROTO(struct super_block *sb, int wait),
-
-	TP_ARGS(sb, wait),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	wait			)
-
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->wait	= wait;
-	),
-
-	TP_printk("dev %d,%d wait %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->wait)
-);
-
-TRACE_EVENT(ext3_rsv_window_add,
-	TP_PROTO(struct super_block *sb,
-		 struct ext3_reserve_window_node *rsv_node),
-
-	TP_ARGS(sb, rsv_node),
-
-	TP_STRUCT__entry(
-		__field(	unsigned long,	start		)
-		__field(	unsigned long,	end		)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->start	= rsv_node->rsv_window._rsv_start;
-		__entry->end	= rsv_node->rsv_window._rsv_end;
-	),
-
-	TP_printk("dev %d,%d start %lu end %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->start, __entry->end)
-);
-
-TRACE_EVENT(ext3_discard_reservation,
-	TP_PROTO(struct inode *inode,
-		 struct ext3_reserve_window_node *rsv_node),
-
-	TP_ARGS(inode, rsv_node),
-
-	TP_STRUCT__entry(
-		__field(	unsigned long,	start		)
-		__field(	unsigned long,	end		)
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->start	= rsv_node->rsv_window._rsv_start;
-		__entry->end	= rsv_node->rsv_window._rsv_end;
-		__entry->ino	= inode->i_ino;
-		__entry->dev	= inode->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu start %lu end %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long)__entry->ino, __entry->start,
-		  __entry->end)
-);
-
-TRACE_EVENT(ext3_alloc_new_reservation,
-	TP_PROTO(struct super_block *sb, unsigned long goal),
-
-	TP_ARGS(sb, goal),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	unsigned long,	goal		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->goal	= goal;
-	),
-
-	TP_printk("dev %d,%d goal %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->goal)
-);
-
-TRACE_EVENT(ext3_reserved,
-	TP_PROTO(struct super_block *sb, unsigned long block,
-		 struct ext3_reserve_window_node *rsv_node),
-
-	TP_ARGS(sb, block, rsv_node),
-
-	TP_STRUCT__entry(
-		__field(	unsigned long,	block		)
-		__field(	unsigned long,	start		)
-		__field(	unsigned long,	end		)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->block	= block;
-		__entry->start	= rsv_node->rsv_window._rsv_start;
-		__entry->end	= rsv_node->rsv_window._rsv_end;
-		__entry->dev	= sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d block %lu, start %lu end %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->block, __entry->start, __entry->end)
-);
-
-TRACE_EVENT(ext3_forget,
-	TP_PROTO(struct inode *inode, int is_metadata, unsigned long block),
-
-	TP_ARGS(inode, is_metadata, block),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	ino_t,	ino			)
-		__field(	umode_t, mode			)
-		__field(	int,	is_metadata		)
-		__field(	unsigned long,	block		)
-	),
-
-	TP_fast_assign(
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->ino	= inode->i_ino;
-		__entry->mode	= inode->i_mode;
-		__entry->is_metadata = is_metadata;
-		__entry->block	= block;
-	),
-
-	TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->mode, __entry->is_metadata, __entry->block)
-);
-
-TRACE_EVENT(ext3_read_block_bitmap,
-	TP_PROTO(struct super_block *sb, unsigned int group),
-
-	TP_ARGS(sb, group),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	__u32,	group			)
-
-	),
-
-	TP_fast_assign(
-		__entry->dev	= sb->s_dev;
-		__entry->group	= group;
-	),
-
-	TP_printk("dev %d,%d group %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->group)
-);
-
-TRACE_EVENT(ext3_direct_IO_enter,
-	TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
-
-	TP_ARGS(inode, offset, len, rw),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-		__field(	loff_t,	pos			)
-		__field(	unsigned long,	len		)
-		__field(	int,	rw			)
-	),
-
-	TP_fast_assign(
-		__entry->ino	= inode->i_ino;
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->pos	= offset;
-		__entry->len	= len;
-		__entry->rw	= rw;
-	),
-
-	TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long) __entry->pos, __entry->len,
-		  __entry->rw)
-);
-
-TRACE_EVENT(ext3_direct_IO_exit,
-	TP_PROTO(struct inode *inode, loff_t offset, unsigned long len,
-		 int rw, int ret),
-
-	TP_ARGS(inode, offset, len, rw, ret),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-		__field(	loff_t,	pos			)
-		__field(	unsigned long,	len		)
-		__field(	int,	rw			)
-		__field(	int,	ret			)
-	),
-
-	TP_fast_assign(
-		__entry->ino	= inode->i_ino;
-		__entry->dev	= inode->i_sb->s_dev;
-		__entry->pos	= offset;
-		__entry->len	= len;
-		__entry->rw	= rw;
-		__entry->ret	= ret;
-	),
-
-	TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long) __entry->pos, __entry->len,
-		  __entry->rw, __entry->ret)
-);
-
-TRACE_EVENT(ext3_unlink_enter,
-	TP_PROTO(struct inode *parent, struct dentry *dentry),
-
-	TP_ARGS(parent, dentry),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	parent			)
-		__field(	ino_t,	ino			)
-		__field(	loff_t,	size			)
-		__field(	dev_t,	dev			)
-	),
-
-	TP_fast_assign(
-		__entry->parent		= parent->i_ino;
-		__entry->ino		= d_inode(dentry)->i_ino;
-		__entry->size		= d_inode(dentry)->i_size;
-		__entry->dev		= d_inode(dentry)->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu size %lld parent %ld",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  (unsigned long long)__entry->size,
-		  (unsigned long) __entry->parent)
-);
-
-TRACE_EVENT(ext3_unlink_exit,
-	TP_PROTO(struct dentry *dentry, int ret),
-
-	TP_ARGS(dentry, ret),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	ino			)
-		__field(	dev_t,	dev			)
-		__field(	int,	ret			)
-	),
-
-	TP_fast_assign(
-		__entry->ino		= d_inode(dentry)->i_ino;
-		__entry->dev		= d_inode(dentry)->i_sb->s_dev;
-		__entry->ret		= ret;
-	),
-
-	TP_printk("dev %d,%d ino %lu ret %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->ret)
-);
-
-DECLARE_EVENT_CLASS(ext3__truncate,
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,		ino		)
-		__field(	dev_t,		dev		)
-		__field(	blkcnt_t,	blocks		)
-	),
-
-	TP_fast_assign(
-		__entry->ino    = inode->i_ino;
-		__entry->dev    = inode->i_sb->s_dev;
-		__entry->blocks	= inode->i_blocks;
-	),
-
-	TP_printk("dev %d,%d ino %lu blocks %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino, (unsigned long) __entry->blocks)
-);
-
-DEFINE_EVENT(ext3__truncate, ext3_truncate_enter,
-
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode)
-);
-
-DEFINE_EVENT(ext3__truncate, ext3_truncate_exit,
-
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode)
-);
-
-TRACE_EVENT(ext3_get_blocks_enter,
-	TP_PROTO(struct inode *inode, unsigned long lblk,
-		 unsigned long len, int create),
-
-	TP_ARGS(inode, lblk, len, create),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,		ino		)
-		__field(	dev_t,		dev		)
-		__field(	unsigned long,	lblk		)
-		__field(	unsigned long,	len		)
-		__field(	int,		create		)
-	),
-
-	TP_fast_assign(
-		__entry->ino    = inode->i_ino;
-		__entry->dev    = inode->i_sb->s_dev;
-		__entry->lblk	= lblk;
-		__entry->len	= len;
-		__entry->create	= create;
-	),
-
-	TP_printk("dev %d,%d ino %lu lblk %lu len %lu create %u",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		  __entry->lblk, __entry->len, __entry->create)
-);
-
-TRACE_EVENT(ext3_get_blocks_exit,
-	TP_PROTO(struct inode *inode, unsigned long lblk,
-		 unsigned long pblk, unsigned long len, int ret),
-
-	TP_ARGS(inode, lblk, pblk, len, ret),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,		ino		)
-		__field(	dev_t,		dev		)
-		__field(	unsigned long,	lblk		)
-		__field(	unsigned long,	pblk		)
-		__field(	unsigned long,	len		)
-		__field(	int,		ret		)
-	),
-
-	TP_fast_assign(
-		__entry->ino    = inode->i_ino;
-		__entry->dev    = inode->i_sb->s_dev;
-		__entry->lblk	= lblk;
-		__entry->pblk	= pblk;
-		__entry->len	= len;
-		__entry->ret	= ret;
-	),
-
-	TP_printk("dev %d,%d ino %lu lblk %lu pblk %lu len %lu ret %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino,
-		   __entry->lblk, __entry->pblk,
-		  __entry->len, __entry->ret)
-);
-
-TRACE_EVENT(ext3_load_inode,
-	TP_PROTO(struct inode *inode),
-
-	TP_ARGS(inode),
-
-	TP_STRUCT__entry(
-		__field(	ino_t,	ino		)
-		__field(	dev_t,	dev		)
-	),
-
-	TP_fast_assign(
-		__entry->ino		= inode->i_ino;
-		__entry->dev		= inode->i_sb->s_dev;
-	),
-
-	TP_printk("dev %d,%d ino %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  (unsigned long) __entry->ino)
-);
-
-#endif /* _TRACE_EXT3_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 04856a2..a019465 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1099,11 +1099,11 @@
 TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end,
 
 	TP_PROTO(struct inode *inode, unsigned int pgofs,
-						struct extent_node *en),
+						struct extent_info *ei),
 
-	TP_ARGS(inode, pgofs, en),
+	TP_ARGS(inode, pgofs, ei),
 
-	TP_CONDITION(en),
+	TP_CONDITION(ei),
 
 	TP_STRUCT__entry(
 		__field(dev_t,	dev)
@@ -1118,9 +1118,9 @@
 		__entry->dev = inode->i_sb->s_dev;
 		__entry->ino = inode->i_ino;
 		__entry->pgofs = pgofs;
-		__entry->fofs = en->ei.fofs;
-		__entry->blk = en->ei.blk;
-		__entry->len = en->ei.len;
+		__entry->fofs = ei->fofs;
+		__entry->blk = ei->blk;
+		__entry->len = ei->len;
 	),
 
 	TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, "
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
new file mode 100644
index 0000000..833cfcb
--- /dev/null
+++ b/include/trace/events/fib.h
@@ -0,0 +1,113 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fib
+
+#if !defined(_TRACE_FIB_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FIB_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/ip_fib.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(fib_table_lookup,
+
+	TP_PROTO(u32 tb_id, const struct flowi4 *flp),
+
+	TP_ARGS(tb_id, flp),
+
+	TP_STRUCT__entry(
+		__field(	u32,	tb_id		)
+		__field(	int,	oif		)
+		__field(	int,	iif		)
+		__field(	__u8,	tos		)
+		__field(	__u8,	scope		)
+		__field(	__u8,	flags		)
+		__array(	__u8,	src,	4	)
+		__array(	__u8,	dst,	4	)
+	),
+
+	TP_fast_assign(
+		__be32 *p32;
+
+		__entry->tb_id = tb_id;
+		__entry->oif = flp->flowi4_oif;
+		__entry->iif = flp->flowi4_iif;
+		__entry->tos = flp->flowi4_tos;
+		__entry->scope = flp->flowi4_scope;
+		__entry->flags = flp->flowi4_flags;
+
+		p32 = (__be32 *) __entry->src;
+		*p32 = flp->saddr;
+
+		p32 = (__be32 *) __entry->dst;
+		*p32 = flp->daddr;
+	),
+
+	TP_printk("table %u oif %d iif %d src %pI4 dst %pI4 tos %d scope %d flags %x",
+		  __entry->tb_id, __entry->oif, __entry->iif,
+		  __entry->src, __entry->dst, __entry->tos, __entry->scope,
+		  __entry->flags)
+);
+
+TRACE_EVENT(fib_table_lookup_nh,
+
+	TP_PROTO(const struct fib_nh *nh),
+
+	TP_ARGS(nh),
+
+	TP_STRUCT__entry(
+		__string(	name,	nh->nh_dev->name)
+		__field(	int,	oif		)
+		__array(	__u8,	src,	4	)
+	),
+
+	TP_fast_assign(
+		__be32 *p32 = (__be32 *) __entry->src;
+
+		__assign_str(name, nh->nh_dev ? nh->nh_dev->name : "not set");
+		__entry->oif = nh->nh_oif;
+		*p32 = nh->nh_saddr;
+	),
+
+	TP_printk("nexthop dev %s oif %d src %pI4",
+		  __get_str(name), __entry->oif, __entry->src)
+);
+
+TRACE_EVENT(fib_validate_source,
+
+	TP_PROTO(const struct net_device *dev, const struct flowi4 *flp),
+
+	TP_ARGS(dev, flp),
+
+	TP_STRUCT__entry(
+		__string(	name,	dev->name	)
+		__field(	int,	oif		)
+		__field(	int,	iif		)
+		__field(	__u8,	tos		)
+		__array(	__u8,	src,	4	)
+		__array(	__u8,	dst,	4	)
+	),
+
+	TP_fast_assign(
+		__be32 *p32;
+
+		__assign_str(name, dev ? dev->name : "not set");
+		__entry->oif = flp->flowi4_oif;
+		__entry->iif = flp->flowi4_iif;
+		__entry->tos = flp->flowi4_tos;
+
+		p32 = (__be32 *) __entry->src;
+		*p32 = flp->saddr;
+
+		p32 = (__be32 *) __entry->dst;
+		*p32 = flp->daddr;
+	),
+
+	TP_printk("dev %s oif %d iif %d tos %d src %pI4 dst %pI4",
+		  __get_str(name), __entry->oif, __entry->iif, __entry->tos,
+		  __entry->src, __entry->dst)
+);
+#endif /* _TRACE_FIB_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/jbd.h b/include/trace/events/jbd.h
deleted file mode 100644
index da6f259..0000000
--- a/include/trace/events/jbd.h
+++ /dev/null
@@ -1,194 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM jbd
-
-#if !defined(_TRACE_JBD_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_JBD_H
-
-#include <linux/jbd.h>
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(jbd_checkpoint,
-
-	TP_PROTO(journal_t *journal, int result),
-
-	TP_ARGS(journal, result),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	result			)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->result		= result;
-	),
-
-	TP_printk("dev %d,%d result %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->result)
-);
-
-DECLARE_EVENT_CLASS(jbd_commit,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	transaction		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->transaction	= commit_transaction->t_tid;
-	),
-
-	TP_printk("dev %d,%d transaction %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_start_commit,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_locking,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_flushing,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction)
-);
-
-DEFINE_EVENT(jbd_commit, jbd_commit_logging,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction)
-);
-
-TRACE_EVENT(jbd_drop_transaction,
-
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	transaction		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->transaction	= commit_transaction->t_tid;
-	),
-
-	TP_printk("dev %d,%d transaction %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->transaction)
-);
-
-TRACE_EVENT(jbd_end_commit,
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	transaction		)
-		__field(	int,	head			)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->transaction	= commit_transaction->t_tid;
-		__entry->head		= journal->j_tail_sequence;
-	),
-
-	TP_printk("dev %d,%d transaction %d head %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->transaction, __entry->head)
-);
-
-TRACE_EVENT(jbd_do_submit_data,
-	TP_PROTO(journal_t *journal, transaction_t *commit_transaction),
-
-	TP_ARGS(journal, commit_transaction),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	transaction		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->transaction	= commit_transaction->t_tid;
-	),
-
-	TP_printk("dev %d,%d transaction %d",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		   __entry->transaction)
-);
-
-TRACE_EVENT(jbd_cleanup_journal_tail,
-
-	TP_PROTO(journal_t *journal, tid_t first_tid,
-		 unsigned long block_nr, unsigned long freed),
-
-	TP_ARGS(journal, first_tid, block_nr, freed),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	tid_t,	tail_sequence		)
-		__field(	tid_t,	first_tid		)
-		__field(unsigned long,	block_nr		)
-		__field(unsigned long,	freed			)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->tail_sequence	= journal->j_tail_sequence;
-		__entry->first_tid	= first_tid;
-		__entry->block_nr	= block_nr;
-		__entry->freed		= freed;
-	),
-
-	TP_printk("dev %d,%d from %u to %u offset %lu freed %lu",
-		  MAJOR(__entry->dev), MINOR(__entry->dev),
-		  __entry->tail_sequence, __entry->first_tid,
-		  __entry->block_nr, __entry->freed)
-);
-
-TRACE_EVENT(journal_write_superblock,
-	TP_PROTO(journal_t *journal, int write_op),
-
-	TP_ARGS(journal, write_op),
-
-	TP_STRUCT__entry(
-		__field(	dev_t,	dev			)
-		__field(	int,	write_op		)
-	),
-
-	TP_fast_assign(
-		__entry->dev		= journal->j_fs_dev->bd_dev;
-		__entry->write_op	= write_op;
-	),
-
-	TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
-		  MINOR(__entry->dev), __entry->write_op)
-);
-
-#endif /* _TRACE_JBD_H */
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index c78e88c..ef72c4a 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -661,7 +661,6 @@
  * Tracepoint for _rcu_barrier() execution.  The string "s" describes
  * the _rcu_barrier phase:
  *	"Begin": _rcu_barrier() started.
- *	"Check": _rcu_barrier() checking for piggybacking.
  *	"EarlyExit": _rcu_barrier() piggybacked, thus early exit.
  *	"Inc1": _rcu_barrier() piggyback check counter incremented.
  *	"OfflineNoCB": _rcu_barrier() found callback on never-online CPU
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index d57a575..539d6bc 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -55,9 +55,9 @@
  */
 DECLARE_EVENT_CLASS(sched_wakeup_template,
 
-	TP_PROTO(struct task_struct *p, int success),
+	TP_PROTO(struct task_struct *p),
 
-	TP_ARGS(__perf_task(p), success),
+	TP_ARGS(__perf_task(p)),
 
 	TP_STRUCT__entry(
 		__array(	char,	comm,	TASK_COMM_LEN	)
@@ -71,25 +71,37 @@
 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
 		__entry->pid		= p->pid;
 		__entry->prio		= p->prio;
-		__entry->success	= success;
+		__entry->success	= 1; /* rudiment, kill when possible */
 		__entry->target_cpu	= task_cpu(p);
 	),
 
-	TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
+	TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
 		  __entry->comm, __entry->pid, __entry->prio,
-		  __entry->success, __entry->target_cpu)
+		  __entry->target_cpu)
 );
 
+/*
+ * Tracepoint called when waking a task; this tracepoint is guaranteed to be
+ * called from the waking context.
+ */
+DEFINE_EVENT(sched_wakeup_template, sched_waking,
+	     TP_PROTO(struct task_struct *p),
+	     TP_ARGS(p));
+
+/*
+ * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
+ * It it not always called from the waking context.
+ */
 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
-	     TP_PROTO(struct task_struct *p, int success),
-	     TP_ARGS(p, success));
+	     TP_PROTO(struct task_struct *p),
+	     TP_ARGS(p));
 
 /*
  * Tracepoint for waking up a new task:
  */
 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
-	     TP_PROTO(struct task_struct *p, int success),
-	     TP_ARGS(p, success));
+	     TP_PROTO(struct task_struct *p),
+	     TP_ARGS(p));
 
 #ifdef CREATE_TRACE_POINTS
 static inline long __trace_sched_switch_state(struct task_struct *p)
diff --git a/include/trace/events/spmi.h b/include/trace/events/spmi.h
new file mode 100644
index 0000000..62f005e
--- /dev/null
+++ b/include/trace/events/spmi.h
@@ -0,0 +1,135 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM spmi
+
+#if !defined(_TRACE_SPMI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SPMI_H
+
+#include <linux/spmi.h>
+#include <linux/tracepoint.h>
+
+/*
+ * drivers/spmi/spmi.c
+ */
+
+TRACE_EVENT(spmi_write_begin,
+	TP_PROTO(u8 opcode, u8 sid, u16 addr, u8 len, const u8 *buf),
+	TP_ARGS(opcode, sid, addr, len, buf),
+
+	TP_STRUCT__entry(
+		__field		( u8,         opcode    )
+		__field		( u8,         sid       )
+		__field		( u16,        addr      )
+		__field		( u8,         len       )
+		__dynamic_array	( u8,   buf,  len + 1   )
+	),
+
+	TP_fast_assign(
+		__entry->opcode = opcode;
+		__entry->sid    = sid;
+		__entry->addr   = addr;
+		__entry->len    = len + 1;
+		memcpy(__get_dynamic_array(buf), buf, len + 1);
+	),
+
+	TP_printk("opc=%d sid=%02d addr=0x%04x len=%d buf=0x[%*phD]",
+		  (int)__entry->opcode, (int)__entry->sid,
+		  (int)__entry->addr, (int)__entry->len,
+		  (int)__entry->len, __get_dynamic_array(buf))
+);
+
+TRACE_EVENT(spmi_write_end,
+	TP_PROTO(u8 opcode, u8 sid, u16 addr, int ret),
+	TP_ARGS(opcode, sid, addr, ret),
+
+	TP_STRUCT__entry(
+		__field		( u8,         opcode    )
+		__field		( u8,         sid       )
+		__field		( u16,        addr      )
+		__field		( int,        ret       )
+	),
+
+	TP_fast_assign(
+		__entry->opcode = opcode;
+		__entry->sid    = sid;
+		__entry->addr   = addr;
+		__entry->ret    = ret;
+	),
+
+	TP_printk("opc=%d sid=%02d addr=0x%04x ret=%d",
+		  (int)__entry->opcode, (int)__entry->sid,
+		  (int)__entry->addr, __entry->ret)
+);
+
+TRACE_EVENT(spmi_read_begin,
+	TP_PROTO(u8 opcode, u8 sid, u16 addr),
+	TP_ARGS(opcode, sid, addr),
+
+	TP_STRUCT__entry(
+		__field		( u8,         opcode    )
+		__field		( u8,         sid       )
+		__field		( u16,        addr      )
+	),
+
+	TP_fast_assign(
+		__entry->opcode = opcode;
+		__entry->sid    = sid;
+		__entry->addr   = addr;
+	),
+
+	TP_printk("opc=%d sid=%02d addr=0x%04x",
+		  (int)__entry->opcode, (int)__entry->sid,
+		  (int)__entry->addr)
+);
+
+TRACE_EVENT(spmi_read_end,
+	TP_PROTO(u8 opcode, u8 sid, u16 addr, int ret, u8 len, const u8 *buf),
+	TP_ARGS(opcode, sid, addr, ret, len, buf),
+
+	TP_STRUCT__entry(
+		__field		( u8,         opcode    )
+		__field		( u8,         sid       )
+		__field		( u16,        addr      )
+		__field		( int,        ret       )
+		__field		( u8,         len       )
+		__dynamic_array	( u8,   buf,  len + 1   )
+	),
+
+	TP_fast_assign(
+		__entry->opcode = opcode;
+		__entry->sid    = sid;
+		__entry->addr   = addr;
+		__entry->ret    = ret;
+		__entry->len    = len + 1;
+		memcpy(__get_dynamic_array(buf), buf, len + 1);
+	),
+
+	TP_printk("opc=%d sid=%02d addr=0x%04x ret=%d len=%02d buf=0x[%*phD]",
+		  (int)__entry->opcode, (int)__entry->sid,
+		  (int)__entry->addr, __entry->ret, (int)__entry->len,
+		  (int)__entry->len, __get_dynamic_array(buf))
+);
+
+TRACE_EVENT(spmi_cmd,
+	TP_PROTO(u8 opcode, u8 sid, int ret),
+	TP_ARGS(opcode, sid, ret),
+
+	TP_STRUCT__entry(
+		__field		( u8,         opcode    )
+		__field		( u8,         sid       )
+		__field		( int,        ret       )
+	),
+
+	TP_fast_assign(
+		__entry->opcode = opcode;
+		__entry->sid    = sid;
+		__entry->ret    = ret;
+	),
+
+	TP_printk("opc=%d sid=%02d ret=%d", (int)__entry->opcode,
+		  (int)__entry->sid, ret)
+);
+
+#endif /* _TRACE_SPMI_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 1ff9942..aafb993 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -243,6 +243,7 @@
 header-y += llc.h
 header-y += loop.h
 header-y += lp.h
+header-y += lwtunnel.h
 header-y += magic.h
 header-y += major.h
 header-y += map_to_7segment.h
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index d3475e1..1f977dd 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -382,6 +382,9 @@
 #define AUDIT_ARCH_SHEL64	(EM_SH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_SPARC	(EM_SPARC)
 #define AUDIT_ARCH_SPARC64	(EM_SPARCV9|__AUDIT_ARCH_64BIT)
+#define AUDIT_ARCH_TILEGX	(EM_TILEGX|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_TILEGX32	(EM_TILEGX|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_TILEPRO	(EM_TILEPRO|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_X86_64	(EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 
 #define AUDIT_PERM_EXEC		1
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 29ef6f9..92a48e2 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -114,6 +114,7 @@
 	BPF_MAP_TYPE_HASH,
 	BPF_MAP_TYPE_ARRAY,
 	BPF_MAP_TYPE_PROG_ARRAY,
+	BPF_MAP_TYPE_PERF_EVENT_ARRAY,
 };
 
 enum bpf_prog_type {
@@ -249,6 +250,28 @@
 	 * Return: 0 on success
 	 */
 	BPF_FUNC_get_current_comm,
+
+	/**
+	 * bpf_get_cgroup_classid(skb) - retrieve a proc's classid
+	 * @skb: pointer to skb
+	 * Return: classid if != 0
+	 */
+	BPF_FUNC_get_cgroup_classid,
+	BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */
+	BPF_FUNC_skb_vlan_pop,  /* bpf_skb_vlan_pop(skb) */
+
+	/**
+	 * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags)
+	 * retrieve or populate tunnel metadata
+	 * @skb: pointer to skb
+	 * @key: pointer to 'struct bpf_tunnel_key'
+	 * @size: size of 'struct bpf_tunnel_key'
+	 * @flags: room for future extensions
+	 * Retrun: 0 on success
+	 */
+	BPF_FUNC_skb_get_tunnel_key,
+	BPF_FUNC_skb_set_tunnel_key,
+	BPF_FUNC_perf_event_read,	/* u64 bpf_perf_event_read(&map, index) */
 	__BPF_FUNC_MAX_ID,
 };
 
@@ -269,6 +292,12 @@
 	__u32 ifindex;
 	__u32 tc_index;
 	__u32 cb[5];
+	__u32 hash;
+};
+
+struct bpf_tunnel_key {
+	__u32 tunnel_id;
+	__u32 remote_ipv4;
 };
 
 #endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/dlm_device.h b/include/uapi/linux/dlm_device.h
index 3060783..df56c8f 100644
--- a/include/uapi/linux/dlm_device.h
+++ b/include/uapi/linux/dlm_device.h
@@ -26,7 +26,7 @@
 /* Version of the device interface */
 #define DLM_DEVICE_VERSION_MAJOR 6
 #define DLM_DEVICE_VERSION_MINOR 0
-#define DLM_DEVICE_VERSION_PATCH 1
+#define DLM_DEVICE_VERSION_PATCH 2
 
 /* struct passed to the lock write */
 struct dlm_lock_params {
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 061aca3..d34611e 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@
 #define DM_DEV_SET_GEOMETRY	_IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR	4
-#define DM_VERSION_MINOR	32
+#define DM_VERSION_MINOR	33
 #define DM_VERSION_PATCHLEVEL	0
-#define DM_VERSION_EXTRA	"-ioctl (2015-6-26)"
+#define DM_VERSION_EXTRA	"-ioctl (2015-8-18)"
 
 /* Status bits */
 #define DM_READONLY_FLAG	(1 << 0) /* In/Out */
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index b088296..3429a3b 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -38,6 +38,8 @@
 #define EM_ALTERA_NIOS2	113	/* Altera Nios II soft-core processor */
 #define EM_TI_C6000	140	/* TI C6X DSPs */
 #define EM_AARCH64	183	/* ARM 64 bit */
+#define EM_TILEPRO	188	/* Tilera TILEPro */
+#define EM_TILEGX	191	/* Tilera TILE-Gx */
 #define EM_FRV		0x5441	/* Fujitsu FR-V */
 #define EM_AVR32	0x18ad	/* Atmel AVR32 */
 
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index cd67aec..cd16291 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1093,6 +1093,11 @@
  * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
  * respectively.  For example, if the device supports HWTSTAMP_TX_ON,
  * then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set.
+ *
+ * Drivers should only report the filters they actually support without
+ * upscaling in the SIOCSHWTSTAMP ioctl. If the SIOCSHWSTAMP request for
+ * HWTSTAMP_FILTER_V1_SYNC is supported by HWTSTAMP_FILTER_V1_EVENT, then the
+ * driver should only report HWTSTAMP_FILTER_V1_EVENT in this op.
  */
 struct ethtool_ts_info {
 	__u32	cmd;
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 2b82d7e..96161b8 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -43,7 +43,7 @@
 	FRA_UNUSED5,
 	FRA_FWMARK,	/* mark */
 	FRA_FLOW,	/* flow/class id */
-	FRA_UNUSED6,
+	FRA_TUN_ID,
 	FRA_SUPPRESS_IFGROUP,
 	FRA_SUPPRESS_PREFIXLEN,
 	FRA_TABLE,	/* Extended table id */
diff --git a/include/uapi/linux/gsmmux.h b/include/uapi/linux/gsmmux.h
index c06742d..ab055d8 100644
--- a/include/uapi/linux/gsmmux.h
+++ b/include/uapi/linux/gsmmux.h
@@ -3,6 +3,7 @@
 
 #include <linux/if.h>
 #include <linux/ioctl.h>
+#include <linux/types.h>
 
 struct gsm_config
 {
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index eaaea62..3635b77 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -182,6 +182,7 @@
 #define MDB_TEMPORARY 0
 #define MDB_PERMANENT 1
 	__u8 state;
+	__u16 vid;
 	struct {
 		union {
 			__be32	ip4;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 2c7e8e3..3a5f263 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -148,6 +148,7 @@
 	IFLA_PHYS_SWITCH_ID,
 	IFLA_LINK_NETNSID,
 	IFLA_PHYS_PORT_NAME,
+	IFLA_PROTO_DOWN,
 	__IFLA_MAX
 };
 
@@ -229,6 +230,8 @@
 	IFLA_BR_AGEING_TIME,
 	IFLA_BR_STP_STATE,
 	IFLA_BR_PRIORITY,
+	IFLA_BR_VLAN_FILTERING,
+	IFLA_BR_VLAN_PROTOCOL,
 	__IFLA_BR_MAX,
 };
 
@@ -339,6 +342,15 @@
 
 #define MACVLAN_FLAG_NOPROMISC	1
 
+/* VRF section */
+enum {
+	IFLA_VRF_UNSPEC,
+	IFLA_VRF_TABLE,
+	__IFLA_VRF_MAX
+};
+
+#define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1)
+
 /* IPVLAN section */
 enum {
 	IFLA_IPVLAN_UNSPEC,
@@ -381,6 +393,7 @@
 	IFLA_VXLAN_REMCSUM_RX,
 	IFLA_VXLAN_GBP,
 	IFLA_VXLAN_REMCSUM_NOPARTIAL,
+	IFLA_VXLAN_COLLECT_METADATA,
 	__IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX	(__IFLA_VXLAN_MAX - 1)
@@ -397,6 +410,8 @@
 	IFLA_GENEVE_REMOTE,
 	IFLA_GENEVE_TTL,
 	IFLA_GENEVE_TOS,
+	IFLA_GENEVE_PORT,	/* destination port */
+	IFLA_GENEVE_COLLECT_METADATA,
 	__IFLA_GENEVE_MAX
 };
 #define IFLA_GENEVE_MAX	(__IFLA_GENEVE_MAX - 1)
@@ -431,6 +446,7 @@
 	IFLA_BOND_AD_ACTOR_SYS_PRIO,
 	IFLA_BOND_AD_USER_PORT_KEY,
 	IFLA_BOND_AD_ACTOR_SYSTEM,
+	IFLA_BOND_TLB_DYNAMIC_LB,
 	__IFLA_BOND_MAX,
 };
 
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index d3d715f8c..9e7edfd 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -55,6 +55,7 @@
 #define PACKET_TX_HAS_OFF		19
 #define PACKET_QDISC_BYPASS		20
 #define PACKET_ROLLOVER_STATS		21
+#define PACKET_FANOUT_DATA		22
 
 #define PACKET_FANOUT_HASH		0
 #define PACKET_FANOUT_LB		1
@@ -62,6 +63,8 @@
 #define PACKET_FANOUT_ROLLOVER		3
 #define PACKET_FANOUT_RND		4
 #define PACKET_FANOUT_QM		5
+#define PACKET_FANOUT_CBPF		6
+#define PACKET_FANOUT_EBPF		7
 #define PACKET_FANOUT_FLAG_ROLLOVER	0x1000
 #define PACKET_FANOUT_FLAG_DEFRAG	0x8000
 
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index bd3cc11..af4de90 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -112,6 +112,7 @@
 	IFLA_GRE_ENCAP_FLAGS,
 	IFLA_GRE_ENCAP_SPORT,
 	IFLA_GRE_ENCAP_DPORT,
+	IFLA_GRE_COLLECT_METADATA,
 	__IFLA_GRE_MAX,
 };
 
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
new file mode 100644
index 0000000..7ed9e67
--- /dev/null
+++ b/include/uapi/linux/ila.h
@@ -0,0 +1,15 @@
+/* ila.h - ILA Interface */
+
+#ifndef _UAPI_LINUX_ILA_H
+#define _UAPI_LINUX_ILA_H
+
+enum {
+	ILA_ATTR_UNSPEC,
+	ILA_ATTR_LOCATOR,			/* u64 */
+
+	__ILA_ATTR_MAX,
+};
+
+#define ILA_ATTR_MAX		(__ILA_ATTR_MAX - 1)
+
+#endif /* _UAPI_LINUX_ILA_H */
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index 3199243..391395c 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -406,6 +406,11 @@
 	IPVS_DAEMON_ATTR_STATE,		/* sync daemon state (master/backup) */
 	IPVS_DAEMON_ATTR_MCAST_IFN,	/* multicast interface name */
 	IPVS_DAEMON_ATTR_SYNC_ID,	/* SyncID we belong to */
+	IPVS_DAEMON_ATTR_SYNC_MAXLEN,	/* UDP Payload Size */
+	IPVS_DAEMON_ATTR_MCAST_GROUP,	/* IPv4 Multicast Address */
+	IPVS_DAEMON_ATTR_MCAST_GROUP6,	/* IPv6 Multicast Address */
+	IPVS_DAEMON_ATTR_MCAST_PORT,	/* Multicast Port (base) */
+	IPVS_DAEMON_ATTR_MCAST_TTL,	/* Multicast TTL */
 	__IPVS_DAEMON_ATTR_MAX,
 };
 
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 5efa54a..38b4fef 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -171,6 +171,9 @@
 	DEVCONF_USE_OPTIMISTIC,
 	DEVCONF_ACCEPT_RA_MTU,
 	DEVCONF_STABLE_SECRET,
+	DEVCONF_USE_OIF_ADDRS_ONLY,
+	DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
+	DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
 	DEVCONF_MAX
 };
 
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 716ad4a..0d831f9 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -317,6 +317,7 @@
 		struct {
 #define KVM_SYSTEM_EVENT_SHUTDOWN       1
 #define KVM_SYSTEM_EVENT_RESET          2
+#define KVM_SYSTEM_EVENT_CRASH          3
 			__u32 type;
 			__u64 flags;
 		} system_event;
@@ -481,6 +482,7 @@
 	 ((ai) << 26))
 #define KVM_S390_INT_IO_MIN		0x00000000u
 #define KVM_S390_INT_IO_MAX		0xfffdffffu
+#define KVM_S390_INT_IO_AI_MASK		0x04000000u
 
 
 struct kvm_s390_interrupt {
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
new file mode 100644
index 0000000..34141a5
--- /dev/null
+++ b/include/uapi/linux/lwtunnel.h
@@ -0,0 +1,47 @@
+#ifndef _UAPI_LWTUNNEL_H_
+#define _UAPI_LWTUNNEL_H_
+
+#include <linux/types.h>
+
+enum lwtunnel_encap_types {
+	LWTUNNEL_ENCAP_NONE,
+	LWTUNNEL_ENCAP_MPLS,
+	LWTUNNEL_ENCAP_IP,
+	LWTUNNEL_ENCAP_ILA,
+	LWTUNNEL_ENCAP_IP6,
+	__LWTUNNEL_ENCAP_MAX,
+};
+
+#define LWTUNNEL_ENCAP_MAX (__LWTUNNEL_ENCAP_MAX - 1)
+
+enum lwtunnel_ip_t {
+	LWTUNNEL_IP_UNSPEC,
+	LWTUNNEL_IP_ID,
+	LWTUNNEL_IP_DST,
+	LWTUNNEL_IP_SRC,
+	LWTUNNEL_IP_TTL,
+	LWTUNNEL_IP_TOS,
+	LWTUNNEL_IP_SPORT,
+	LWTUNNEL_IP_DPORT,
+	LWTUNNEL_IP_FLAGS,
+	__LWTUNNEL_IP_MAX,
+};
+
+#define LWTUNNEL_IP_MAX (__LWTUNNEL_IP_MAX - 1)
+
+enum lwtunnel_ip6_t {
+	LWTUNNEL_IP6_UNSPEC,
+	LWTUNNEL_IP6_ID,
+	LWTUNNEL_IP6_DST,
+	LWTUNNEL_IP6_SRC,
+	LWTUNNEL_IP6_HOPLIMIT,
+	LWTUNNEL_IP6_TC,
+	LWTUNNEL_IP6_SPORT,
+	LWTUNNEL_IP6_DPORT,
+	LWTUNNEL_IP6_FLAGS,
+	__LWTUNNEL_IP6_MAX,
+};
+
+#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
+
+#endif /* _UAPI_LWTUNNEL_H_ */
diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h
index bc0d8b6..7c3b64f 100644
--- a/include/uapi/linux/mei.h
+++ b/include/uapi/linux/mei.h
@@ -107,4 +107,23 @@
 	};
 };
 
+/**
+ * DOC: set and unset event notification for a connected client
+ *
+ * The IOCTL argument is 1 for enabling event notification and 0 for
+ * disabling the service
+ * Return:  -EOPNOTSUPP if the devices doesn't support the feature
+ */
+#define IOCTL_MEI_NOTIFY_SET _IOW('H', 0x02, __u32)
+
+/**
+ * DOC: retrieve notification
+ *
+ * The IOCTL output argument is 1 if an event was is pending and 0 otherwise
+ * the ioctl has to be called in order to acknowledge pending event
+ *
+ * Return:  -EOPNOTSUPP if the devices doesn't support the feature
+ */
+#define IOCTL_MEI_NOTIFY_GET _IOR('H', 0x03, __u32)
+
 #endif /* _LINUX_MEI_H  */
diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h
index 139d4dd..24a6cb1 100644
--- a/include/uapi/linux/mpls.h
+++ b/include/uapi/linux/mpls.h
@@ -41,4 +41,6 @@
 #define MPLS_LABEL_OAMALERT		14 /* RFC3429 */
 #define MPLS_LABEL_EXTENSION		15 /* RFC7274 */
 
+#define MPLS_LABEL_FIRST_UNRESERVED	16 /* RFC3032 */
+
 #endif /* _UAPI_MPLS_H */
diff --git a/include/uapi/linux/mpls_iptunnel.h b/include/uapi/linux/mpls_iptunnel.h
new file mode 100644
index 0000000..d80a049
--- /dev/null
+++ b/include/uapi/linux/mpls_iptunnel.h
@@ -0,0 +1,28 @@
+/*
+ *	mpls tunnel api
+ *
+ *	Authors:
+ *		Roopa Prabhu <roopa@cumulusnetworks.com>
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_MPLS_IPTUNNEL_H
+#define _UAPI_LINUX_MPLS_IPTUNNEL_H
+
+/* MPLS tunnel attributes
+ * [RTA_ENCAP] = {
+ *     [MPLS_IPTUNNEL_DST]
+ * }
+ */
+enum {
+	MPLS_IPTUNNEL_UNSPEC,
+	MPLS_IPTUNNEL_DST,
+	__MPLS_IPTUNNEL_MAX,
+};
+#define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1)
+
+#endif /* _UAPI_LINUX_MPLS_IPTUNNEL_H */
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 2e35c61..788655b 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -106,6 +106,7 @@
 	__u64		ndts_rcv_probes_ucast;
 	__u64		ndts_periodic_gc_runs;
 	__u64		ndts_forced_gc_runs;
+	__u64		ndts_table_fulls;
 };
 
 enum {
diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
index ceeefe6..ed4e776 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h
@@ -13,6 +13,8 @@
 	SCTP_CONNTRACK_SHUTDOWN_SENT,
 	SCTP_CONNTRACK_SHUTDOWN_RECD,
 	SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
+	SCTP_CONNTRACK_HEARTBEAT_SENT,
+	SCTP_CONNTRACK_HEARTBEAT_ACKED,
 	SCTP_CONNTRACK_MAX
 };
 
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index a99e6a9..d8c8a7c 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -756,16 +756,25 @@
 };
 #define NFTA_CT_MAX		(__NFTA_CT_MAX - 1)
 
+enum nft_limit_type {
+	NFT_LIMIT_PKTS,
+	NFT_LIMIT_PKT_BYTES
+};
+
 /**
  * enum nft_limit_attributes - nf_tables limit expression netlink attributes
  *
  * @NFTA_LIMIT_RATE: refill rate (NLA_U64)
  * @NFTA_LIMIT_UNIT: refill unit (NLA_U64)
+ * @NFTA_LIMIT_BURST: burst (NLA_U32)
+ * @NFTA_LIMIT_TYPE: type of limit (NLA_U32: enum nft_limit_type)
  */
 enum nft_limit_attributes {
 	NFTA_LIMIT_UNSPEC,
 	NFTA_LIMIT_RATE,
 	NFTA_LIMIT_UNIT,
+	NFTA_LIMIT_BURST,
+	NFTA_LIMIT_TYPE,
 	__NFTA_LIMIT_MAX
 };
 #define NFTA_LIMIT_MAX		(__NFTA_LIMIT_MAX - 1)
@@ -936,6 +945,20 @@
 #define NFTA_REDIR_MAX		(__NFTA_REDIR_MAX - 1)
 
 /**
+ * enum nft_dup_attributes - nf_tables dup expression netlink attributes
+ *
+ * @NFTA_DUP_SREG_ADDR: source register of address (NLA_U32: nft_registers)
+ * @NFTA_DUP_SREG_DEV: source register of output interface (NLA_U32: nft_register)
+ */
+enum nft_dup_attributes {
+	NFTA_DUP_UNSPEC,
+	NFTA_DUP_SREG_ADDR,
+	NFTA_DUP_SREG_DEV,
+	__NFTA_DUP_MAX
+};
+#define NFTA_DUP_MAX		(__NFTA_DUP_MAX - 1)
+
+/**
  * enum nft_gen_attributes - nf_tables ruleset generation attributes
  *
  * @NFTA_GEN_ID: Ruleset generation ID (NLA_U32)
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index acad6c5..c1a4e144 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -61,6 +61,7 @@
 	CTA_TUPLE_UNSPEC,
 	CTA_TUPLE_IP,
 	CTA_TUPLE_PROTO,
+	CTA_TUPLE_ZONE,
 	__CTA_TUPLE_MAX
 };
 #define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
index 1ab0b97..f2c10dc 100644
--- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
+++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h
@@ -92,6 +92,8 @@
 	CTA_TIMEOUT_SCTP_SHUTDOWN_SENT,
 	CTA_TIMEOUT_SCTP_SHUTDOWN_RECD,
 	CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT,
+	CTA_TIMEOUT_SCTP_HEARTBEAT_SENT,
+	CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED,
 	__CTA_TIMEOUT_SCTP_MAX
 };
 #define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1)
diff --git a/include/uapi/linux/netfilter/xt_CT.h b/include/uapi/linux/netfilter/xt_CT.h
index 5a688c1..9e52041 100644
--- a/include/uapi/linux/netfilter/xt_CT.h
+++ b/include/uapi/linux/netfilter/xt_CT.h
@@ -6,7 +6,13 @@
 enum {
 	XT_CT_NOTRACK		= 1 << 0,
 	XT_CT_NOTRACK_ALIAS	= 1 << 1,
-	XT_CT_MASK		= XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS,
+	XT_CT_ZONE_DIR_ORIG	= 1 << 2,
+	XT_CT_ZONE_DIR_REPL	= 1 << 3,
+	XT_CT_ZONE_MARK		= 1 << 4,
+
+	XT_CT_MASK		= XT_CT_NOTRACK | XT_CT_NOTRACK_ALIAS |
+				  XT_CT_ZONE_DIR_ORIG | XT_CT_ZONE_DIR_REPL |
+				  XT_CT_ZONE_MARK,
 };
 
 struct xt_ct_target_info {
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h b/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
index 205ed62..cd2e940 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_REJECT.h
@@ -10,7 +10,9 @@
 	IP6T_ICMP6_ADDR_UNREACH,
 	IP6T_ICMP6_PORT_UNREACH,
 	IP6T_ICMP6_ECHOREPLY,
-	IP6T_TCP_RESET
+	IP6T_TCP_RESET,
+	IP6T_ICMP6_POLICY_FAIL,
+	IP6T_ICMP6_REJECT_ROUTE
 };
 
 struct ip6t_reject_info {
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index cf6a65c..6f3fe16 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -110,6 +110,7 @@
 #define NETLINK_TX_RING			7
 #define NETLINK_LISTEN_ALL_NSID		8
 #define NETLINK_LIST_MEMBERSHIPS	9
+#define NETLINK_CAP_ACK			10
 
 struct nl_pktinfo {
 	__u32	group;
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 732b32e..8864194 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -584,5 +584,6 @@
 #define NVME_IOCTL_SUBMIT_IO	_IOW('N', 0x42, struct nvme_user_io)
 #define NVME_IOCTL_IO_CMD	_IOWR('N', 0x43, struct nvme_passthru_cmd)
 #define NVME_IOCTL_RESET	_IO('N', 0x44)
+#define NVME_IOCTL_SUBSYS_RESET	_IO('N', 0x45)
 
 #endif /* _UAPI_LINUX_NVME_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 1dab776..32e07d8 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -164,6 +164,9 @@
  * %OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute, which is sent only if the
  * output port is actually a tunnel port. Contains the output tunnel key
  * extracted from the packet as nested %OVS_TUNNEL_KEY_ATTR_* attributes.
+ * @OVS_PACKET_ATTR_MRU: Present for an %OVS_PACKET_CMD_ACTION and
+ * %OVS_PACKET_ATTR_USERSPACE action specify the Maximum received fragment
+ * size.
  *
  * These attributes follow the &struct ovs_header within the Generic Netlink
  * payload for %OVS_PACKET_* commands.
@@ -180,6 +183,7 @@
 	OVS_PACKET_ATTR_UNUSED2,
 	OVS_PACKET_ATTR_PROBE,      /* Packet operation is a feature probe,
 				       error logging should be suppressed. */
+	OVS_PACKET_ATTR_MRU,	    /* Maximum received IP fragment size. */
 	__OVS_PACKET_ATTR_MAX
 };
 
@@ -319,9 +323,13 @@
 	OVS_KEY_ATTR_MPLS,      /* array of struct ovs_key_mpls.
 				 * The implementation may restrict
 				 * the accepted length of the array. */
+	OVS_KEY_ATTR_CT_STATE,	/* u8 bitmask of OVS_CS_F_* */
+	OVS_KEY_ATTR_CT_ZONE,	/* u16 connection tracking zone. */
+	OVS_KEY_ATTR_CT_MARK,	/* u32 connection tracking mark */
+	OVS_KEY_ATTR_CT_LABEL,	/* 16-octet connection tracking label */
 
 #ifdef __KERNEL__
-	OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ovs_tunnel_info */
+	OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ip_tunnel_info */
 #endif
 	__OVS_KEY_ATTR_MAX
 };
@@ -431,6 +439,20 @@
 	__u8	nd_tll[ETH_ALEN];
 };
 
+#define OVS_CT_LABEL_LEN	16
+struct ovs_key_ct_label {
+	__u8	ct_label[OVS_CT_LABEL_LEN];
+};
+
+/* OVS_KEY_ATTR_CT_STATE flags */
+#define OVS_CS_F_NEW               0x01 /* Beginning of a new connection. */
+#define OVS_CS_F_ESTABLISHED       0x02 /* Part of an existing connection. */
+#define OVS_CS_F_RELATED           0x04 /* Related to an established
+					 * connection. */
+#define OVS_CS_F_INVALID           0x20 /* Could not track connection. */
+#define OVS_CS_F_REPLY_DIR         0x40 /* Flow is in the reply direction. */
+#define OVS_CS_F_TRACKED           0x80 /* Conntrack has occurred. */
+
 /**
  * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
  * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow
@@ -595,6 +617,39 @@
 };
 
 /**
+ * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
+ * @OVS_CT_ATTR_FLAGS: u32 connection tracking flags.
+ * @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
+ * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
+ * mask, the corresponding bit in the value is copied to the connection
+ * tracking mark field in the connection.
+ * @OVS_CT_ATTR_LABEL: %OVS_CT_LABEL_LEN value followed by %OVS_CT_LABEL_LEN
+ * mask. For each bit set in the mask, the corresponding bit in the value is
+ * copied to the connection tracking label field in the connection.
+ * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
+ */
+enum ovs_ct_attr {
+	OVS_CT_ATTR_UNSPEC,
+	OVS_CT_ATTR_FLAGS,      /* u8 bitmask of OVS_CT_F_*. */
+	OVS_CT_ATTR_ZONE,       /* u16 zone id. */
+	OVS_CT_ATTR_MARK,       /* mark to associate with this connection. */
+	OVS_CT_ATTR_LABEL,      /* label to associate with this connection. */
+	OVS_CT_ATTR_HELPER,     /* netlink helper to assist detection of
+				   related connections. */
+	__OVS_CT_ATTR_MAX
+};
+
+#define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1)
+
+/*
+ * OVS_CT_ATTR_FLAGS flags - bitmask of %OVS_CT_F_*
+ * @OVS_CT_F_COMMIT: Commits the flow to the conntrack table. This allows
+ * future packets for the same connection to be identified as 'established'
+ * or 'related'.
+ */
+#define OVS_CT_F_COMMIT		0x01
+
+/**
  * enum ovs_action_attr - Action types.
  *
  * @OVS_ACTION_ATTR_OUTPUT: Output packet to port.
@@ -623,6 +678,8 @@
  * indicate the new packet contents. This could potentially still be
  * %ETH_P_MPLS if the resulting MPLS label stack is not empty.  If there
  * is no MPLS label stack, as determined by ethertype, no action is taken.
+ * @OVS_ACTION_ATTR_CT: Track the connection. Populate the conntrack-related
+ * entries in the flow key.
  *
  * Only a single header can be set with a single %OVS_ACTION_ATTR_SET.  Not all
  * fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -648,6 +705,7 @@
 				       * data immediately followed by a mask.
 				       * The data must be zero for the unmasked
 				       * bits. */
+	OVS_ACTION_ATTR_CT,           /* One nested OVS_CT_ATTR_* . */
 
 	__OVS_ACTION_ATTR_MAX,	      /* Nothing past this will be accepted
 				       * from userspace. */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index d97f84c..2881145 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -330,7 +330,8 @@
 				mmap2          :  1, /* include mmap with inode data     */
 				comm_exec      :  1, /* flag comm events that are due to an exec */
 				use_clockid    :  1, /* use @clockid for time fields */
-				__reserved_1   : 38;
+				context_switch :  1, /* context switch data */
+				__reserved_1   : 37;
 
 	union {
 		__u32		wakeup_events;	  /* wakeup every n events */
@@ -572,9 +573,11 @@
 /*
  * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
  * different events so can reuse the same bit position.
+ * Ditto PERF_RECORD_MISC_SWITCH_OUT.
  */
 #define PERF_RECORD_MISC_MMAP_DATA		(1 << 13)
 #define PERF_RECORD_MISC_COMM_EXEC		(1 << 13)
+#define PERF_RECORD_MISC_SWITCH_OUT		(1 << 13)
 /*
  * Indicates that the content of PERF_SAMPLE_IP points to
  * the actual instruction that triggered the event. See also
@@ -818,6 +821,32 @@
 	 */
 	PERF_RECORD_LOST_SAMPLES		= 13,
 
+	/*
+	 * Records a context switch in or out (flagged by
+	 * PERF_RECORD_MISC_SWITCH_OUT). See also
+	 * PERF_RECORD_SWITCH_CPU_WIDE.
+	 *
+	 * struct {
+	 *	struct perf_event_header	header;
+	 *	struct sample_id		sample_id;
+	 * };
+	 */
+	PERF_RECORD_SWITCH			= 14,
+
+	/*
+	 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
+	 * next_prev_tid that are the next (switching out) or previous
+	 * (switching in) pid/tid.
+	 *
+	 * struct {
+	 *	struct perf_event_header	header;
+	 *	u32				next_prev_pid;
+	 *	u32				next_prev_tid;
+	 *	struct sample_id		sample_id;
+	 * };
+	 */
+	PERF_RECORD_SWITCH_CPU_WIDE		= 15,
+
 	PERF_RECORD_MAX,			/* non-ABI */
 };
 
@@ -922,6 +951,7 @@
  *
  *     in_tx: running in a hardware transaction
  *     abort: aborting a hardware transaction
+ *    cycles: cycles from last branch (or 0 if not supported)
  */
 struct perf_branch_entry {
 	__u64	from;
@@ -930,7 +960,8 @@
 		predicted:1,/* target predicted */
 		in_tx:1,    /* in transaction */
 		abort:1,    /* transaction abort */
-		reserved:60;
+		cycles:16,  /* cycle count to last branch */
+		reserved:44;
 };
 
 #endif /* _UAPI_LINUX_PERF_EVENT_H */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index fdd8f07..7020247 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -308,6 +308,8 @@
 	RTA_VIA,
 	RTA_NEWDST,
 	RTA_PREF,
+	RTA_ENCAP_TYPE,
+	RTA_ENCAP,
 	__RTA_MAX
 };
 
@@ -416,10 +418,13 @@
 
 #define RTAX_MAX (__RTAX_MAX - 1)
 
-#define RTAX_FEATURE_ECN	0x00000001
-#define RTAX_FEATURE_SACK	0x00000002
-#define RTAX_FEATURE_TIMESTAMP	0x00000004
-#define RTAX_FEATURE_ALLFRAG	0x00000008
+#define RTAX_FEATURE_ECN	(1 << 0)
+#define RTAX_FEATURE_SACK	(1 << 1)
+#define RTAX_FEATURE_TIMESTAMP	(1 << 2)
+#define RTAX_FEATURE_ALLFRAG	(1 << 3)
+
+#define RTAX_FEATURE_MASK	(RTAX_FEATURE_ECN | RTAX_FEATURE_SACK | \
+				 RTAX_FEATURE_TIMESTAMP | RTAX_FEATURE_ALLFRAG)
 
 struct rta_session {
 	__u8	proto;
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index eee8968..25a9ad8 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -278,6 +278,8 @@
 	LINUX_MIB_TCPACKSKIPPEDCHALLENGE,	/* TCPACKSkippedChallenge */
 	LINUX_MIB_TCPWINPROBE,			/* TCPWinProbe */
 	LINUX_MIB_TCPKEEPALIVE,			/* TCPKeepAlive */
+	LINUX_MIB_TCPMTUPFAIL,			/* TCPMTUPFail */
+	LINUX_MIB_TCPMTUPSUCCESS,		/* TCPMTUPSuccess */
 	__LINUX_MIB_MAX
 };
 
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index aa33fd1..f7adc6e 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -674,9 +674,21 @@
 	__u8  bmAttributes;	/* support for HNP, SRP, etc */
 } __attribute__ ((packed));
 
+/* USB_DT_OTG (from OTG 2.0 supplement) */
+struct usb_otg20_descriptor {
+	__u8  bLength;
+	__u8  bDescriptorType;
+
+	__u8  bmAttributes;	/* support for HNP, SRP and ADP, etc */
+	__le16 bcdOTG;		/* OTG and EH supplement release number
+				 * in binary-coded decimal(i.e. 2.0 is 0200H)
+				 */
+} __attribute__ ((packed));
+
 /* from usb_otg_descriptor.bmAttributes */
 #define USB_OTG_SRP		(1 << 0)
 #define USB_OTG_HNP		(1 << 1)	/* swap host/device roles */
+#define USB_OTG_ADP		(1 << 2)	/* support ADP */
 
 /*-------------------------------------------------------------------------*/
 
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
index 99a8ca1..1e889aa 100644
--- a/include/uapi/misc/cxl.h
+++ b/include/uapi/misc/cxl.h
@@ -29,8 +29,10 @@
 
 #define CXL_START_WORK_AMR		0x0000000000000001ULL
 #define CXL_START_WORK_NUM_IRQS		0x0000000000000002ULL
+#define CXL_START_WORK_ERR_FF		0x0000000000000004ULL
 #define CXL_START_WORK_ALL		(CXL_START_WORK_AMR |\
-					 CXL_START_WORK_NUM_IRQS)
+					 CXL_START_WORK_NUM_IRQS |\
+					 CXL_START_WORK_ERR_FF)
 
 
 /* Possible modes that an afu can be in */
diff --git a/include/uapi/scsi/Kbuild b/include/uapi/scsi/Kbuild
index 75746d5..d791e0a 100644
--- a/include/uapi/scsi/Kbuild
+++ b/include/uapi/scsi/Kbuild
@@ -3,3 +3,4 @@
 header-y += scsi_bsg_fc.h
 header-y += scsi_netlink.h
 header-y += scsi_netlink_fc.h
+header-y += cxlflash_ioctl.h
diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h
new file mode 100644
index 0000000..831351b
--- /dev/null
+++ b/include/uapi/scsi/cxlflash_ioctl.h
@@ -0,0 +1,174 @@
+/*
+ * CXL Flash Device Driver
+ *
+ * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
+ *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
+ *
+ * Copyright (C) 2015 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _CXLFLASH_IOCTL_H
+#define _CXLFLASH_IOCTL_H
+
+#include <linux/types.h>
+
+/*
+ * Structure and flag definitions CXL Flash superpipe ioctls
+ */
+
+#define DK_CXLFLASH_VERSION_0	0
+
+struct dk_cxlflash_hdr {
+	__u16 version;			/* Version data */
+	__u16 rsvd[3];			/* Reserved for future use */
+	__u64 flags;			/* Input flags */
+	__u64 return_flags;		/* Returned flags */
+};
+
+/*
+ * Notes:
+ * -----
+ * The 'context_id' field of all ioctl structures contains the context
+ * identifier for a context in the lower 32-bits (upper 32-bits are not
+ * to be used when identifying a context to the AFU). That said, the value
+ * in its entirety (all 64-bits) is to be treated as an opaque cookie and
+ * should be presented as such when issuing ioctls.
+ *
+ * For DK_CXLFLASH_ATTACH ioctl, user specifies read/write access
+ * permissions via the O_RDONLY, O_WRONLY, and O_RDWR flags defined in
+ * the fcntl.h header file.
+ */
+#define DK_CXLFLASH_ATTACH_REUSE_CONTEXT	0x8000000000000000ULL
+
+struct dk_cxlflash_attach {
+	struct dk_cxlflash_hdr hdr;	/* Common fields */
+	__u64 num_interrupts;		/* Requested number of interrupts */
+	__u64 context_id;		/* Returned context */
+	__u64 mmio_size;		/* Returned size of MMIO area */
+	__u64 block_size;		/* Returned block size, in bytes */
+	__u64 adap_fd;			/* Returned adapter file descriptor */
+	__u64 last_lba;			/* Returned last LBA on the device */
+	__u64 max_xfer;			/* Returned max transfer size, blocks */
+	__u64 reserved[8];		/* Reserved for future use */
+};
+
+struct dk_cxlflash_detach {
+	struct dk_cxlflash_hdr hdr;	/* Common fields */
+	__u64 context_id;		/* Context to detach */
+	__u64 reserved[8];		/* Reserved for future use */
+};
+
+struct dk_cxlflash_udirect {
+	struct dk_cxlflash_hdr hdr;	/* Common fields */
+	__u64 context_id;		/* Context to own physical resources */
+	__u64 rsrc_handle;		/* Returned resource handle */
+	__u64 last_lba;			/* Returned last LBA on the device */
+	__u64 reserved[8];		/* Reserved for future use */
+};
+
+#define DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME	0x8000000000000000ULL
+
+struct dk_cxlflash_uvirtual {
+	struct dk_cxlflash_hdr hdr;	/* Common fields */
+	__u64 context_id;		/* Context to own virtual resources */
+	__u64 lun_size;			/* Requested size, in 4K blocks */
+	__u64 rsrc_handle;		/* Returned resource handle */
+	__u64 last_lba;			/* Returned last LBA of LUN */
+	__u64 reserved[8];		/* Reserved for future use */
+};
+
+struct dk_cxlflash_release {
+	struct dk_cxlflash_hdr hdr;	/* Common fields */
+	__u64 context_id;		/* Context owning resources */
+	__u64 rsrc_handle;		/* Resource handle to release */
+	__u64 reserved[8];		/* Reserved for future use */
+};
+
+struct dk_cxlflash_resize {
+	struct dk_cxlflash_hdr hdr;	/* Common fields */
+	__u64 context_id;		/* Context owning resources */
+	__u64 rsrc_handle;		/* Resource handle of LUN to resize */
+	__u64 req_size;			/* New requested size, in 4K blocks */
+	__u64 last_lba;			/* Returned last LBA of LUN */
+	__u64 reserved[8];		/* Reserved for future use */
+};
+
+struct dk_cxlflash_clone {
+	struct dk_cxlflash_hdr hdr;	/* Common fields */
+	__u64 context_id_src;		/* Context to clone from */
+	__u64 context_id_dst;		/* Context to clone to */
+	__u64 adap_fd_src;		/* Source context adapter fd */
+	__u64 reserved[8];		/* Reserved for future use */
+};
+
+#define DK_CXLFLASH_VERIFY_SENSE_LEN	18
+#define DK_CXLFLASH_VERIFY_HINT_SENSE	0x8000000000000000ULL
+
+struct dk_cxlflash_verify {
+	struct dk_cxlflash_hdr hdr;	/* Common fields */
+	__u64 context_id;		/* Context owning resources to verify */
+	__u64 rsrc_handle;		/* Resource handle of LUN */
+	__u64 hint;			/* Reasons for verify */
+	__u64 last_lba;			/* Returned last LBA of device */
+	__u8 sense_data[DK_CXLFLASH_VERIFY_SENSE_LEN]; /* SCSI sense data */
+	__u8 pad[6];			/* Pad to next 8-byte boundary */
+	__u64 reserved[8];		/* Reserved for future use */
+};
+
+#define DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET	0x8000000000000000ULL
+
+struct dk_cxlflash_recover_afu {
+	struct dk_cxlflash_hdr hdr;	/* Common fields */
+	__u64 reason;			/* Reason for recovery request */
+	__u64 context_id;		/* Context to recover / updated ID */
+	__u64 mmio_size;		/* Returned size of MMIO area */
+	__u64 adap_fd;			/* Returned adapter file descriptor */
+	__u64 reserved[8];		/* Reserved for future use */
+};
+
+#define DK_CXLFLASH_MANAGE_LUN_WWID_LEN			16
+#define DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE		0x8000000000000000ULL
+#define DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE	0x4000000000000000ULL
+#define DK_CXLFLASH_MANAGE_LUN_ALL_PORTS_ACCESSIBLE	0x2000000000000000ULL
+
+struct dk_cxlflash_manage_lun {
+	struct dk_cxlflash_hdr hdr;			/* Common fields */
+	__u8 wwid[DK_CXLFLASH_MANAGE_LUN_WWID_LEN];	/* Page83 WWID, NAA-6 */
+	__u64 reserved[8];				/* Rsvd, future use */
+};
+
+union cxlflash_ioctls {
+	struct dk_cxlflash_attach attach;
+	struct dk_cxlflash_detach detach;
+	struct dk_cxlflash_udirect udirect;
+	struct dk_cxlflash_uvirtual uvirtual;
+	struct dk_cxlflash_release release;
+	struct dk_cxlflash_resize resize;
+	struct dk_cxlflash_clone clone;
+	struct dk_cxlflash_verify verify;
+	struct dk_cxlflash_recover_afu recover_afu;
+	struct dk_cxlflash_manage_lun manage_lun;
+};
+
+#define MAX_CXLFLASH_IOCTL_SZ	(sizeof(union cxlflash_ioctls))
+
+#define CXL_MAGIC 0xCA
+#define CXL_IOWR(_n, _s)	_IOWR(CXL_MAGIC, _n, struct _s)
+
+#define DK_CXLFLASH_ATTACH		CXL_IOWR(0x80, dk_cxlflash_attach)
+#define DK_CXLFLASH_USER_DIRECT		CXL_IOWR(0x81, dk_cxlflash_udirect)
+#define DK_CXLFLASH_RELEASE		CXL_IOWR(0x82, dk_cxlflash_release)
+#define DK_CXLFLASH_DETACH		CXL_IOWR(0x83, dk_cxlflash_detach)
+#define DK_CXLFLASH_VERIFY		CXL_IOWR(0x84, dk_cxlflash_verify)
+#define DK_CXLFLASH_RECOVER_AFU		CXL_IOWR(0x85, dk_cxlflash_recover_afu)
+#define DK_CXLFLASH_MANAGE_LUN		CXL_IOWR(0x86, dk_cxlflash_manage_lun)
+#define DK_CXLFLASH_USER_VIRTUAL	CXL_IOWR(0x87, dk_cxlflash_uvirtual)
+#define DK_CXLFLASH_VLUN_RESIZE		CXL_IOWR(0x88, dk_cxlflash_resize)
+#define DK_CXLFLASH_VLUN_CLONE		CXL_IOWR(0x89, dk_cxlflash_clone)
+
+#endif /* ifndef _CXLFLASH_IOCTL_H */
diff --git a/include/video/kyro.h b/include/video/kyro.h
index c563968..b958c2e 100644
--- a/include/video/kyro.h
+++ b/include/video/kyro.h
@@ -35,9 +35,7 @@
 	/* Useful to hold depth here for Linux */
 	u8 PIXDEPTH;
 
-#ifdef CONFIG_MTRR
-	int mtrr_handle;
-#endif
+	int wc_cookie;
 };
 
 extern int kyro_dev_init(void);
diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h
index 70054cc..252ffd4 100644
--- a/include/xen/interface/io/netif.h
+++ b/include/xen/interface/io/netif.h
@@ -156,7 +156,9 @@
 /* Types of xen_netif_extra_info descriptors. */
 #define XEN_NETIF_EXTRA_TYPE_NONE	(0)  /* Never used - invalid */
 #define XEN_NETIF_EXTRA_TYPE_GSO	(1)  /* u.gso */
-#define XEN_NETIF_EXTRA_TYPE_MAX	(2)
+#define XEN_NETIF_EXTRA_TYPE_MCAST_ADD	(2)  /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL	(3)  /* u.mcast */
+#define XEN_NETIF_EXTRA_TYPE_MAX	(4)
 
 /* xen_netif_extra_info flags. */
 #define _XEN_NETIF_EXTRA_FLAG_MORE	(0)
@@ -201,6 +203,10 @@
 			uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
 		} gso;
 
+		struct {
+			uint8_t addr[6]; /* Address to add/remove. */
+		} mcast;
+
 		uint16_t pad[3];
 	} u;
 };
diff --git a/init/Kconfig b/init/Kconfig
index af09b4f..bb9b4dd 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -538,15 +538,6 @@
 config CONTEXT_TRACKING
        bool
 
-config RCU_USER_QS
-	bool
-	help
-	  This option sets hooks on kernel / userspace boundaries and
-	  puts RCU in extended quiescent state when the CPU runs in
-	  userspace. It means that when a CPU runs in userspace, it is
-	  excluded from the global RCU state machine and thus doesn't
-	  try to keep the timer tick on for RCU.
-
 config CONTEXT_TRACKING_FORCE
 	bool "Force context tracking"
 	depends on CONTEXT_TRACKING
@@ -707,6 +698,7 @@
 config RCU_NOCB_CPU
 	bool "Offload RCU callback processing from boot-selected CPUs"
 	depends on TREE_RCU || PREEMPT_RCU
+	depends on RCU_EXPERT || NO_HZ_FULL
 	default n
 	help
 	  Use this option to reduce OS jitter for aggressive HPC or
@@ -955,6 +947,22 @@
 	  Provides a way to freeze and unfreeze all tasks in a
 	  cgroup.
 
+config CGROUP_PIDS
+	bool "PIDs cgroup subsystem"
+	help
+	  Provides enforcement of process number limits in the scope of a
+	  cgroup. Any attempt to fork more processes than is allowed in the
+	  cgroup will fail. PIDs are fundamentally a global resource because it
+	  is fairly trivial to reach PID exhaustion before you reach even a
+	  conservative kmemcg limit. As a result, it is possible to grind a
+	  system to halt without being limited by other cgroup policies. The
+	  PIDs cgroup subsystem is designed to stop this from happening.
+
+	  It should be noted that organisational operations (such as attaching
+	  to a cgroup hierarchy will *not* be blocked by the PIDs subsystem),
+	  since the PIDs limit only affects a process's ability to fork, not to
+	  attach to a cgroup.
+
 config CGROUP_DEVICE
 	bool "Device controller for cgroups"
 	help
diff --git a/kernel/Makefile b/kernel/Makefile
index 43c4c92..718fb8a 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -55,6 +55,7 @@
 obj-$(CONFIG_COMPAT) += compat.o
 obj-$(CONFIG_CGROUPS) += cgroup.o
 obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o
+obj-$(CONFIG_CGROUP_PIDS) += cgroup_pids.o
 obj-$(CONFIG_CPUSETS) += cpuset.o
 obj-$(CONFIG_UTS_NS) += utsname.o
 obj-$(CONFIG_USER_NS) += user_namespace.o
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index cb31229..29ace10 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -150,15 +150,15 @@
 }
 late_initcall(register_array_map);
 
-static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
+static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
 {
-	/* only bpf_prog file descriptors can be stored in prog_array map */
+	/* only file descriptors can be stored in this type of map */
 	if (attr->value_size != sizeof(u32))
 		return ERR_PTR(-EINVAL);
 	return array_map_alloc(attr);
 }
 
-static void prog_array_map_free(struct bpf_map *map)
+static void fd_array_map_free(struct bpf_map *map)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
 	int i;
@@ -167,21 +167,21 @@
 
 	/* make sure it's empty */
 	for (i = 0; i < array->map.max_entries; i++)
-		BUG_ON(array->prog[i] != NULL);
+		BUG_ON(array->ptrs[i] != NULL);
 	kvfree(array);
 }
 
-static void *prog_array_map_lookup_elem(struct bpf_map *map, void *key)
+static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
 {
 	return NULL;
 }
 
 /* only called from syscall */
-static int prog_array_map_update_elem(struct bpf_map *map, void *key,
-				      void *value, u64 map_flags)
+static int fd_array_map_update_elem(struct bpf_map *map, void *key,
+				    void *value, u64 map_flags)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
-	struct bpf_prog *prog, *old_prog;
+	void *new_ptr, *old_ptr;
 	u32 index = *(u32 *)key, ufd;
 
 	if (map_flags != BPF_ANY)
@@ -191,57 +191,75 @@
 		return -E2BIG;
 
 	ufd = *(u32 *)value;
-	prog = bpf_prog_get(ufd);
-	if (IS_ERR(prog))
-		return PTR_ERR(prog);
+	new_ptr = map->ops->map_fd_get_ptr(map, ufd);
+	if (IS_ERR(new_ptr))
+		return PTR_ERR(new_ptr);
 
-	if (!bpf_prog_array_compatible(array, prog)) {
-		bpf_prog_put(prog);
-		return -EINVAL;
-	}
-
-	old_prog = xchg(array->prog + index, prog);
-	if (old_prog)
-		bpf_prog_put_rcu(old_prog);
+	old_ptr = xchg(array->ptrs + index, new_ptr);
+	if (old_ptr)
+		map->ops->map_fd_put_ptr(old_ptr);
 
 	return 0;
 }
 
-static int prog_array_map_delete_elem(struct bpf_map *map, void *key)
+static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
-	struct bpf_prog *old_prog;
+	void *old_ptr;
 	u32 index = *(u32 *)key;
 
 	if (index >= array->map.max_entries)
 		return -E2BIG;
 
-	old_prog = xchg(array->prog + index, NULL);
-	if (old_prog) {
-		bpf_prog_put_rcu(old_prog);
+	old_ptr = xchg(array->ptrs + index, NULL);
+	if (old_ptr) {
+		map->ops->map_fd_put_ptr(old_ptr);
 		return 0;
 	} else {
 		return -ENOENT;
 	}
 }
 
+static void *prog_fd_array_get_ptr(struct bpf_map *map, int fd)
+{
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	struct bpf_prog *prog = bpf_prog_get(fd);
+	if (IS_ERR(prog))
+		return prog;
+
+	if (!bpf_prog_array_compatible(array, prog)) {
+		bpf_prog_put(prog);
+		return ERR_PTR(-EINVAL);
+	}
+	return prog;
+}
+
+static void prog_fd_array_put_ptr(void *ptr)
+{
+	struct bpf_prog *prog = ptr;
+
+	bpf_prog_put_rcu(prog);
+}
+
 /* decrement refcnt of all bpf_progs that are stored in this map */
-void bpf_prog_array_map_clear(struct bpf_map *map)
+void bpf_fd_array_map_clear(struct bpf_map *map)
 {
 	struct bpf_array *array = container_of(map, struct bpf_array, map);
 	int i;
 
 	for (i = 0; i < array->map.max_entries; i++)
-		prog_array_map_delete_elem(map, &i);
+		fd_array_map_delete_elem(map, &i);
 }
 
 static const struct bpf_map_ops prog_array_ops = {
-	.map_alloc = prog_array_map_alloc,
-	.map_free = prog_array_map_free,
+	.map_alloc = fd_array_map_alloc,
+	.map_free = fd_array_map_free,
 	.map_get_next_key = array_map_get_next_key,
-	.map_lookup_elem = prog_array_map_lookup_elem,
-	.map_update_elem = prog_array_map_update_elem,
-	.map_delete_elem = prog_array_map_delete_elem,
+	.map_lookup_elem = fd_array_map_lookup_elem,
+	.map_update_elem = fd_array_map_update_elem,
+	.map_delete_elem = fd_array_map_delete_elem,
+	.map_fd_get_ptr = prog_fd_array_get_ptr,
+	.map_fd_put_ptr = prog_fd_array_put_ptr,
 };
 
 static struct bpf_map_type_list prog_array_type __read_mostly = {
@@ -255,3 +273,60 @@
 	return 0;
 }
 late_initcall(register_prog_array_map);
+
+static void perf_event_array_map_free(struct bpf_map *map)
+{
+	bpf_fd_array_map_clear(map);
+	fd_array_map_free(map);
+}
+
+static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd)
+{
+	struct perf_event *event;
+	const struct perf_event_attr *attr;
+
+	event = perf_event_get(fd);
+	if (IS_ERR(event))
+		return event;
+
+	attr = perf_event_attrs(event);
+	if (IS_ERR(attr))
+		return (void *)attr;
+
+	if (attr->type != PERF_TYPE_RAW &&
+	    attr->type != PERF_TYPE_HARDWARE) {
+		perf_event_release_kernel(event);
+		return ERR_PTR(-EINVAL);
+	}
+	return event;
+}
+
+static void perf_event_fd_array_put_ptr(void *ptr)
+{
+	struct perf_event *event = ptr;
+
+	perf_event_release_kernel(event);
+}
+
+static const struct bpf_map_ops perf_event_array_ops = {
+	.map_alloc = fd_array_map_alloc,
+	.map_free = perf_event_array_map_free,
+	.map_get_next_key = array_map_get_next_key,
+	.map_lookup_elem = fd_array_map_lookup_elem,
+	.map_update_elem = fd_array_map_update_elem,
+	.map_delete_elem = fd_array_map_delete_elem,
+	.map_fd_get_ptr = perf_event_fd_array_get_ptr,
+	.map_fd_put_ptr = perf_event_fd_array_put_ptr,
+};
+
+static struct bpf_map_type_list perf_event_array_type __read_mostly = {
+	.ops = &perf_event_array_ops,
+	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+};
+
+static int __init register_perf_event_array_map(void)
+{
+	bpf_register_map_type(&perf_event_array_type);
+	return 0;
+}
+late_initcall(register_perf_event_array_map);
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index c5bedc8..67c380c 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -177,6 +177,7 @@
 {
 	return 0;
 }
+EXPORT_SYMBOL_GPL(__bpf_call_base);
 
 /**
  *	__bpf_prog_run - run eBPF program on a given context
@@ -449,11 +450,15 @@
 
 		tail_call_cnt++;
 
-		prog = READ_ONCE(array->prog[index]);
+		prog = READ_ONCE(array->ptrs[index]);
 		if (unlikely(!prog))
 			goto out;
 
-		ARG1 = BPF_R1;
+		/* ARG1 at this point is guaranteed to point to CTX from
+		 * the verifier side due to the fact that the tail call is
+		 * handeled like a helper, that is, bpf_tail_call_proto,
+		 * where arg1_type is ARG_PTR_TO_CTX.
+		 */
 		insn = prog->insnsi;
 		goto select_insn;
 out:
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a1b14d1..dc9b464 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -72,7 +72,7 @@
 		/* prog_array stores refcnt-ed bpf_prog pointers
 		 * release them all when user space closes prog_array_fd
 		 */
-		bpf_prog_array_map_clear(map);
+		bpf_fd_array_map_clear(map);
 
 	bpf_map_put(map);
 	return 0;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 039d866..ed12e38 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -238,6 +238,14 @@
 	[CONST_IMM]		= "imm",
 };
 
+static const struct {
+	int map_type;
+	int func_id;
+} func_limit[] = {
+	{BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
+	{BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
+};
+
 static void print_verifier_state(struct verifier_env *env)
 {
 	enum bpf_reg_type t;
@@ -648,6 +656,9 @@
 	struct verifier_state *state = &env->cur_state;
 	int size, err = 0;
 
+	if (state->regs[regno].type == PTR_TO_STACK)
+		off += state->regs[regno].imm;
+
 	size = bpf_size_to_bytes(bpf_size);
 	if (size < 0)
 		return size;
@@ -667,7 +678,8 @@
 		if (!err && t == BPF_READ && value_regno >= 0)
 			mark_reg_unknown_value(state->regs, value_regno);
 
-	} else if (state->regs[regno].type == FRAME_PTR) {
+	} else if (state->regs[regno].type == FRAME_PTR ||
+		   state->regs[regno].type == PTR_TO_STACK) {
 		if (off >= 0 || off < -MAX_BPF_STACK) {
 			verbose("invalid stack off=%d size=%d\n", off, size);
 			return -EACCES;
@@ -833,6 +845,28 @@
 	return err;
 }
 
+static int check_map_func_compatibility(struct bpf_map *map, int func_id)
+{
+	bool bool_map, bool_func;
+	int i;
+
+	if (!map)
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
+		bool_map = (map->map_type == func_limit[i].map_type);
+		bool_func = (func_id == func_limit[i].func_id);
+		/* only when map & func pair match it can continue.
+		 * don't allow any other map type to be passed into
+		 * the special func;
+		 */
+		if (bool_map != bool_func)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
 static int check_call(struct verifier_env *env, int func_id)
 {
 	struct verifier_state *state = &env->cur_state;
@@ -908,21 +942,9 @@
 		return -EINVAL;
 	}
 
-	if (map && map->map_type == BPF_MAP_TYPE_PROG_ARRAY &&
-	    func_id != BPF_FUNC_tail_call)
-		/* prog_array map type needs extra care:
-		 * only allow to pass it into bpf_tail_call() for now.
-		 * bpf_map_delete_elem() can be allowed in the future,
-		 * while bpf_map_update_elem() must only be done via syscall
-		 */
-		return -EINVAL;
-
-	if (func_id == BPF_FUNC_tail_call &&
-	    map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
-		/* don't allow any other map type to be passed into
-		 * bpf_tail_call()
-		 */
-		return -EINVAL;
+	err = check_map_func_compatibility(map, func_id);
+	if (err)
+		return err;
 
 	return 0;
 }
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index f89d929..f3f5cd5 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -107,8 +107,8 @@
 struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
 
 #define cgroup_assert_mutex_or_rcu_locked()				\
-	rcu_lockdep_assert(rcu_read_lock_held() ||			\
-			   lockdep_is_held(&cgroup_mutex),		\
+	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
+			   !lockdep_is_held(&cgroup_mutex),		\
 			   "cgroup_mutex or RCU read lock required");
 
 /*
@@ -145,6 +145,7 @@
  * part of that cgroup.
  */
 struct cgroup_root cgrp_dfl_root;
+EXPORT_SYMBOL_GPL(cgrp_dfl_root);
 
 /*
  * The default hierarchy always exists but is hidden until mounted for the
@@ -186,6 +187,9 @@
 static unsigned long have_fork_callback __read_mostly;
 static unsigned long have_exit_callback __read_mostly;
 
+/* Ditto for the can_fork callback. */
+static unsigned long have_canfork_callback __read_mostly;
+
 static struct cftype cgroup_dfl_base_files[];
 static struct cftype cgroup_legacy_base_files[];
 
@@ -207,7 +211,7 @@
 
 	idr_preload(gfp_mask);
 	spin_lock_bh(&cgroup_idr_lock);
-	ret = idr_alloc(idr, ptr, start, end, gfp_mask);
+	ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_WAIT);
 	spin_unlock_bh(&cgroup_idr_lock);
 	idr_preload_end();
 	return ret;
@@ -1027,10 +1031,13 @@
 static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
 			      char *buf)
 {
+	struct cgroup_subsys *ss = cft->ss;
+
 	if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
 	    !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
 		snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
-			 cft->ss->name, cft->name);
+			 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
+			 cft->name);
 	else
 		strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
 	return buf;
@@ -1332,9 +1339,10 @@
 	struct cgroup_subsys *ss;
 	int ssid;
 
-	for_each_subsys(ss, ssid)
-		if (root->subsys_mask & (1 << ssid))
-			seq_printf(seq, ",%s", ss->name);
+	if (root != &cgrp_dfl_root)
+		for_each_subsys(ss, ssid)
+			if (root->subsys_mask & (1 << ssid))
+				seq_printf(seq, ",%s", ss->legacy_name);
 	if (root->flags & CGRP_ROOT_NOPREFIX)
 		seq_puts(seq, ",noprefix");
 	if (root->flags & CGRP_ROOT_XATTR)
@@ -1447,7 +1455,7 @@
 		}
 
 		for_each_subsys(ss, i) {
-			if (strcmp(token, ss->name))
+			if (strcmp(token, ss->legacy_name))
 				continue;
 			if (ss->disabled)
 				continue;
@@ -1666,7 +1674,7 @@
 
 	lockdep_assert_held(&cgroup_mutex);
 
-	ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT);
+	ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
 	if (ret < 0)
 		goto out;
 	root_cgrp->id = ret;
@@ -4579,7 +4587,7 @@
 	if (err)
 		goto err_free_css;
 
-	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT);
+	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
 	if (err < 0)
 		goto err_free_percpu_ref;
 	css->id = err;
@@ -4656,7 +4664,7 @@
 	 * Temporarily set the pointer to NULL, so idr_find() won't return
 	 * a half-baked cgroup.
 	 */
-	cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_NOWAIT);
+	cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
 	if (cgrp->id < 0) {
 		ret = -ENOMEM;
 		goto out_cancel_ref;
@@ -4955,6 +4963,7 @@
 
 	have_fork_callback |= (bool)ss->fork << ss->id;
 	have_exit_callback |= (bool)ss->exit << ss->id;
+	have_canfork_callback |= (bool)ss->can_fork << ss->id;
 
 	/* At system boot, before all subsystems have been
 	 * registered, no tasks have been forked, so we don't
@@ -4993,6 +5002,8 @@
 
 		ss->id = i;
 		ss->name = cgroup_subsys_name[i];
+		if (!ss->legacy_name)
+			ss->legacy_name = cgroup_subsys_name[i];
 
 		if (ss->early_init)
 			cgroup_init_subsys(ss, true);
@@ -5136,9 +5147,11 @@
 			continue;
 
 		seq_printf(m, "%d:", root->hierarchy_id);
-		for_each_subsys(ss, ssid)
-			if (root->subsys_mask & (1 << ssid))
-				seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
+		if (root != &cgrp_dfl_root)
+			for_each_subsys(ss, ssid)
+				if (root->subsys_mask & (1 << ssid))
+					seq_printf(m, "%s%s", count++ ? "," : "",
+						   ss->legacy_name);
 		if (strlen(root->name))
 			seq_printf(m, "%sname=%s", count ? "," : "",
 				   root->name);
@@ -5178,7 +5191,7 @@
 
 	for_each_subsys(ss, i)
 		seq_printf(m, "%s\t%d\t%d\t%d\n",
-			   ss->name, ss->root->hierarchy_id,
+			   ss->legacy_name, ss->root->hierarchy_id,
 			   atomic_read(&ss->root->nr_cgrps), !ss->disabled);
 
 	mutex_unlock(&cgroup_mutex);
@@ -5197,6 +5210,19 @@
 	.release = single_release,
 };
 
+static void **subsys_canfork_priv_p(void *ss_priv[CGROUP_CANFORK_COUNT], int i)
+{
+	if (CGROUP_CANFORK_START <= i && i < CGROUP_CANFORK_END)
+		return &ss_priv[i - CGROUP_CANFORK_START];
+	return NULL;
+}
+
+static void *subsys_canfork_priv(void *ss_priv[CGROUP_CANFORK_COUNT], int i)
+{
+	void **private = subsys_canfork_priv_p(ss_priv, i);
+	return private ? *private : NULL;
+}
+
 /**
  * cgroup_fork - initialize cgroup related fields during copy_process()
  * @child: pointer to task_struct of forking parent process.
@@ -5212,6 +5238,57 @@
 }
 
 /**
+ * cgroup_can_fork - called on a new task before the process is exposed
+ * @child: the task in question.
+ *
+ * This calls the subsystem can_fork() callbacks. If the can_fork() callback
+ * returns an error, the fork aborts with that error code. This allows for
+ * a cgroup subsystem to conditionally allow or deny new forks.
+ */
+int cgroup_can_fork(struct task_struct *child,
+		    void *ss_priv[CGROUP_CANFORK_COUNT])
+{
+	struct cgroup_subsys *ss;
+	int i, j, ret;
+
+	for_each_subsys_which(ss, i, &have_canfork_callback) {
+		ret = ss->can_fork(child, subsys_canfork_priv_p(ss_priv, i));
+		if (ret)
+			goto out_revert;
+	}
+
+	return 0;
+
+out_revert:
+	for_each_subsys(ss, j) {
+		if (j >= i)
+			break;
+		if (ss->cancel_fork)
+			ss->cancel_fork(child, subsys_canfork_priv(ss_priv, j));
+	}
+
+	return ret;
+}
+
+/**
+ * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
+ * @child: the task in question
+ *
+ * This calls the cancel_fork() callbacks if a fork failed *after*
+ * cgroup_can_fork() succeded.
+ */
+void cgroup_cancel_fork(struct task_struct *child,
+			void *ss_priv[CGROUP_CANFORK_COUNT])
+{
+	struct cgroup_subsys *ss;
+	int i;
+
+	for_each_subsys(ss, i)
+		if (ss->cancel_fork)
+			ss->cancel_fork(child, subsys_canfork_priv(ss_priv, i));
+}
+
+/**
  * cgroup_post_fork - called on a new task after adding it to the task list
  * @child: the task in question
  *
@@ -5221,7 +5298,8 @@
  * cgroup_task_iter_start() - to guarantee that the new task ends up on its
  * list.
  */
-void cgroup_post_fork(struct task_struct *child)
+void cgroup_post_fork(struct task_struct *child,
+		      void *old_ss_priv[CGROUP_CANFORK_COUNT])
 {
 	struct cgroup_subsys *ss;
 	int i;
@@ -5266,7 +5344,7 @@
 	 * and addition to css_set.
 	 */
 	for_each_subsys_which(ss, i, &have_fork_callback)
-		ss->fork(child);
+		ss->fork(child, subsys_canfork_priv(old_ss_priv, i));
 }
 
 /**
@@ -5400,12 +5478,14 @@
 			continue;
 
 		for_each_subsys(ss, i) {
-			if (!strcmp(token, ss->name)) {
-				ss->disabled = 1;
-				printk(KERN_INFO "Disabling %s control group"
-					" subsystem\n", ss->name);
-				break;
-			}
+			if (strcmp(token, ss->name) &&
+			    strcmp(token, ss->legacy_name))
+				continue;
+
+			ss->disabled = 1;
+			printk(KERN_INFO "Disabling %s control group subsystem\n",
+			       ss->name);
+			break;
 		}
 	}
 	return 1;
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 92b98cc..f1b30ad 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -203,7 +203,7 @@
  * to do anything as freezer_attach() will put @task into the appropriate
  * state.
  */
-static void freezer_fork(struct task_struct *task)
+static void freezer_fork(struct task_struct *task, void *private)
 {
 	struct freezer *freezer;
 
diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
new file mode 100644
index 0000000..806cd76
--- /dev/null
+++ b/kernel/cgroup_pids.c
@@ -0,0 +1,355 @@
+/*
+ * Process number limiting controller for cgroups.
+ *
+ * Used to allow a cgroup hierarchy to stop any new processes from fork()ing
+ * after a certain limit is reached.
+ *
+ * Since it is trivial to hit the task limit without hitting any kmemcg limits
+ * in place, PIDs are a fundamental resource. As such, PID exhaustion must be
+ * preventable in the scope of a cgroup hierarchy by allowing resource limiting
+ * of the number of tasks in a cgroup.
+ *
+ * In order to use the `pids` controller, set the maximum number of tasks in
+ * pids.max (this is not available in the root cgroup for obvious reasons). The
+ * number of processes currently in the cgroup is given by pids.current.
+ * Organisational operations are not blocked by cgroup policies, so it is
+ * possible to have pids.current > pids.max. However, it is not possible to
+ * violate a cgroup policy through fork(). fork() will return -EAGAIN if forking
+ * would cause a cgroup policy to be violated.
+ *
+ * To set a cgroup to have no limit, set pids.max to "max". This is the default
+ * for all new cgroups (N.B. that PID limits are hierarchical, so the most
+ * stringent limit in the hierarchy is followed).
+ *
+ * pids.current tracks all child cgroup hierarchies, so parent/pids.current is
+ * a superset of parent/child/pids.current.
+ *
+ * Copyright (C) 2015 Aleksa Sarai <cyphar@cyphar.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of the GNU
+ * General Public License.  See the file COPYING in the main directory of the
+ * Linux distribution for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/threads.h>
+#include <linux/atomic.h>
+#include <linux/cgroup.h>
+#include <linux/slab.h>
+
+#define PIDS_MAX (PID_MAX_LIMIT + 1ULL)
+#define PIDS_MAX_STR "max"
+
+struct pids_cgroup {
+	struct cgroup_subsys_state	css;
+
+	/*
+	 * Use 64-bit types so that we can safely represent "max" as
+	 * %PIDS_MAX = (%PID_MAX_LIMIT + 1).
+	 */
+	atomic64_t			counter;
+	int64_t				limit;
+};
+
+static struct pids_cgroup *css_pids(struct cgroup_subsys_state *css)
+{
+	return container_of(css, struct pids_cgroup, css);
+}
+
+static struct pids_cgroup *parent_pids(struct pids_cgroup *pids)
+{
+	return css_pids(pids->css.parent);
+}
+
+static struct cgroup_subsys_state *
+pids_css_alloc(struct cgroup_subsys_state *parent)
+{
+	struct pids_cgroup *pids;
+
+	pids = kzalloc(sizeof(struct pids_cgroup), GFP_KERNEL);
+	if (!pids)
+		return ERR_PTR(-ENOMEM);
+
+	pids->limit = PIDS_MAX;
+	atomic64_set(&pids->counter, 0);
+	return &pids->css;
+}
+
+static void pids_css_free(struct cgroup_subsys_state *css)
+{
+	kfree(css_pids(css));
+}
+
+/**
+ * pids_cancel - uncharge the local pid count
+ * @pids: the pid cgroup state
+ * @num: the number of pids to cancel
+ *
+ * This function will WARN if the pid count goes under 0, because such a case is
+ * a bug in the pids controller proper.
+ */
+static void pids_cancel(struct pids_cgroup *pids, int num)
+{
+	/*
+	 * A negative count (or overflow for that matter) is invalid,
+	 * and indicates a bug in the `pids` controller proper.
+	 */
+	WARN_ON_ONCE(atomic64_add_negative(-num, &pids->counter));
+}
+
+/**
+ * pids_uncharge - hierarchically uncharge the pid count
+ * @pids: the pid cgroup state
+ * @num: the number of pids to uncharge
+ */
+static void pids_uncharge(struct pids_cgroup *pids, int num)
+{
+	struct pids_cgroup *p;
+
+	for (p = pids; p; p = parent_pids(p))
+		pids_cancel(p, num);
+}
+
+/**
+ * pids_charge - hierarchically charge the pid count
+ * @pids: the pid cgroup state
+ * @num: the number of pids to charge
+ *
+ * This function does *not* follow the pid limit set. It cannot fail and the new
+ * pid count may exceed the limit. This is only used for reverting failed
+ * attaches, where there is no other way out than violating the limit.
+ */
+static void pids_charge(struct pids_cgroup *pids, int num)
+{
+	struct pids_cgroup *p;
+
+	for (p = pids; p; p = parent_pids(p))
+		atomic64_add(num, &p->counter);
+}
+
+/**
+ * pids_try_charge - hierarchically try to charge the pid count
+ * @pids: the pid cgroup state
+ * @num: the number of pids to charge
+ *
+ * This function follows the set limit. It will fail if the charge would cause
+ * the new value to exceed the hierarchical limit. Returns 0 if the charge
+ * succeded, otherwise -EAGAIN.
+ */
+static int pids_try_charge(struct pids_cgroup *pids, int num)
+{
+	struct pids_cgroup *p, *q;
+
+	for (p = pids; p; p = parent_pids(p)) {
+		int64_t new = atomic64_add_return(num, &p->counter);
+
+		/*
+		 * Since new is capped to the maximum number of pid_t, if
+		 * p->limit is %PIDS_MAX then we know that this test will never
+		 * fail.
+		 */
+		if (new > p->limit)
+			goto revert;
+	}
+
+	return 0;
+
+revert:
+	for (q = pids; q != p; q = parent_pids(q))
+		pids_cancel(q, num);
+	pids_cancel(p, num);
+
+	return -EAGAIN;
+}
+
+static int pids_can_attach(struct cgroup_subsys_state *css,
+			   struct cgroup_taskset *tset)
+{
+	struct pids_cgroup *pids = css_pids(css);
+	struct task_struct *task;
+
+	cgroup_taskset_for_each(task, tset) {
+		struct cgroup_subsys_state *old_css;
+		struct pids_cgroup *old_pids;
+
+		/*
+		 * No need to pin @old_css between here and cancel_attach()
+		 * because cgroup core protects it from being freed before
+		 * the migration completes or fails.
+		 */
+		old_css = task_css(task, pids_cgrp_id);
+		old_pids = css_pids(old_css);
+
+		pids_charge(pids, 1);
+		pids_uncharge(old_pids, 1);
+	}
+
+	return 0;
+}
+
+static void pids_cancel_attach(struct cgroup_subsys_state *css,
+			       struct cgroup_taskset *tset)
+{
+	struct pids_cgroup *pids = css_pids(css);
+	struct task_struct *task;
+
+	cgroup_taskset_for_each(task, tset) {
+		struct cgroup_subsys_state *old_css;
+		struct pids_cgroup *old_pids;
+
+		old_css = task_css(task, pids_cgrp_id);
+		old_pids = css_pids(old_css);
+
+		pids_charge(old_pids, 1);
+		pids_uncharge(pids, 1);
+	}
+}
+
+static int pids_can_fork(struct task_struct *task, void **priv_p)
+{
+	struct cgroup_subsys_state *css;
+	struct pids_cgroup *pids;
+	int err;
+
+	/*
+	 * Use the "current" task_css for the pids subsystem as the tentative
+	 * css. It is possible we will charge the wrong hierarchy, in which
+	 * case we will forcefully revert/reapply the charge on the right
+	 * hierarchy after it is committed to the task proper.
+	 */
+	css = task_get_css(current, pids_cgrp_id);
+	pids = css_pids(css);
+
+	err = pids_try_charge(pids, 1);
+	if (err)
+		goto err_css_put;
+
+	*priv_p = css;
+	return 0;
+
+err_css_put:
+	css_put(css);
+	return err;
+}
+
+static void pids_cancel_fork(struct task_struct *task, void *priv)
+{
+	struct cgroup_subsys_state *css = priv;
+	struct pids_cgroup *pids = css_pids(css);
+
+	pids_uncharge(pids, 1);
+	css_put(css);
+}
+
+static void pids_fork(struct task_struct *task, void *priv)
+{
+	struct cgroup_subsys_state *css;
+	struct cgroup_subsys_state *old_css = priv;
+	struct pids_cgroup *pids;
+	struct pids_cgroup *old_pids = css_pids(old_css);
+
+	css = task_get_css(task, pids_cgrp_id);
+	pids = css_pids(css);
+
+	/*
+	 * If the association has changed, we have to revert and reapply the
+	 * charge/uncharge on the wrong hierarchy to the current one. Since
+	 * the association can only change due to an organisation event, its
+	 * okay for us to ignore the limit in this case.
+	 */
+	if (pids != old_pids) {
+		pids_uncharge(old_pids, 1);
+		pids_charge(pids, 1);
+	}
+
+	css_put(css);
+	css_put(old_css);
+}
+
+static void pids_exit(struct cgroup_subsys_state *css,
+		      struct cgroup_subsys_state *old_css,
+		      struct task_struct *task)
+{
+	struct pids_cgroup *pids = css_pids(old_css);
+
+	pids_uncharge(pids, 1);
+}
+
+static ssize_t pids_max_write(struct kernfs_open_file *of, char *buf,
+			      size_t nbytes, loff_t off)
+{
+	struct cgroup_subsys_state *css = of_css(of);
+	struct pids_cgroup *pids = css_pids(css);
+	int64_t limit;
+	int err;
+
+	buf = strstrip(buf);
+	if (!strcmp(buf, PIDS_MAX_STR)) {
+		limit = PIDS_MAX;
+		goto set_limit;
+	}
+
+	err = kstrtoll(buf, 0, &limit);
+	if (err)
+		return err;
+
+	if (limit < 0 || limit >= PIDS_MAX)
+		return -EINVAL;
+
+set_limit:
+	/*
+	 * Limit updates don't need to be mutex'd, since it isn't
+	 * critical that any racing fork()s follow the new limit.
+	 */
+	pids->limit = limit;
+	return nbytes;
+}
+
+static int pids_max_show(struct seq_file *sf, void *v)
+{
+	struct cgroup_subsys_state *css = seq_css(sf);
+	struct pids_cgroup *pids = css_pids(css);
+	int64_t limit = pids->limit;
+
+	if (limit >= PIDS_MAX)
+		seq_printf(sf, "%s\n", PIDS_MAX_STR);
+	else
+		seq_printf(sf, "%lld\n", limit);
+
+	return 0;
+}
+
+static s64 pids_current_read(struct cgroup_subsys_state *css,
+			     struct cftype *cft)
+{
+	struct pids_cgroup *pids = css_pids(css);
+
+	return atomic64_read(&pids->counter);
+}
+
+static struct cftype pids_files[] = {
+	{
+		.name = "max",
+		.write = pids_max_write,
+		.seq_show = pids_max_show,
+		.flags = CFTYPE_NOT_ON_ROOT,
+	},
+	{
+		.name = "current",
+		.read_s64 = pids_current_read,
+	},
+	{ }	/* terminate */
+};
+
+struct cgroup_subsys pids_cgrp_subsys = {
+	.css_alloc	= pids_css_alloc,
+	.css_free	= pids_css_free,
+	.can_attach 	= pids_can_attach,
+	.cancel_attach 	= pids_cancel_attach,
+	.can_fork	= pids_can_fork,
+	.cancel_fork	= pids_cancel_fork,
+	.fork		= pids_fork,
+	.exit		= pids_exit,
+	.legacy_cftypes	= pids_files,
+	.dfl_cftypes	= pids_files,
+};
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5644ec5..82cf9df 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -191,21 +191,22 @@
 void cpu_hotplug_disable(void)
 {
 	cpu_maps_update_begin();
-	cpu_hotplug_disabled = 1;
+	cpu_hotplug_disabled++;
 	cpu_maps_update_done();
 }
+EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
 
 void cpu_hotplug_enable(void)
 {
 	cpu_maps_update_begin();
-	cpu_hotplug_disabled = 0;
+	WARN_ON(--cpu_hotplug_disabled < 0);
 	cpu_maps_update_done();
 }
-
+EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 #endif	/* CONFIG_HOTPLUG_CPU */
 
 /* Need to know about CPUs going up/down? */
-int __ref register_cpu_notifier(struct notifier_block *nb)
+int register_cpu_notifier(struct notifier_block *nb)
 {
 	int ret;
 	cpu_maps_update_begin();
@@ -214,7 +215,7 @@
 	return ret;
 }
 
-int __ref __register_cpu_notifier(struct notifier_block *nb)
+int __register_cpu_notifier(struct notifier_block *nb)
 {
 	return raw_notifier_chain_register(&cpu_chain, nb);
 }
@@ -244,7 +245,7 @@
 EXPORT_SYMBOL(register_cpu_notifier);
 EXPORT_SYMBOL(__register_cpu_notifier);
 
-void __ref unregister_cpu_notifier(struct notifier_block *nb)
+void unregister_cpu_notifier(struct notifier_block *nb)
 {
 	cpu_maps_update_begin();
 	raw_notifier_chain_unregister(&cpu_chain, nb);
@@ -252,7 +253,7 @@
 }
 EXPORT_SYMBOL(unregister_cpu_notifier);
 
-void __ref __unregister_cpu_notifier(struct notifier_block *nb)
+void __unregister_cpu_notifier(struct notifier_block *nb)
 {
 	raw_notifier_chain_unregister(&cpu_chain, nb);
 }
@@ -329,7 +330,7 @@
 };
 
 /* Take this CPU down. */
-static int __ref take_cpu_down(void *_param)
+static int take_cpu_down(void *_param)
 {
 	struct take_cpu_down_param *param = _param;
 	int err;
@@ -348,7 +349,7 @@
 }
 
 /* Requires cpu_add_remove_lock to be held */
-static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+static int _cpu_down(unsigned int cpu, int tasks_frozen)
 {
 	int err, nr_calls = 0;
 	void *hcpu = (void *)(long)cpu;
@@ -381,14 +382,14 @@
 	 * will observe it.
 	 *
 	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
-	 * not imply sync_sched(), so explicitly call both.
+	 * not imply sync_sched(), so wait for both.
 	 *
 	 * Do sync before park smpboot threads to take care the rcu boost case.
 	 */
-#ifdef CONFIG_PREEMPT
-	synchronize_sched();
-#endif
-	synchronize_rcu();
+	if (IS_ENABLED(CONFIG_PREEMPT))
+		synchronize_rcu_mult(call_rcu, call_rcu_sched);
+	else
+		synchronize_rcu();
 
 	smpboot_park_threads(cpu);
 
@@ -401,7 +402,7 @@
 	/*
 	 * So now all preempt/rcu users must observe !cpu_active().
 	 */
-	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
+	err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
 	if (err) {
 		/* CPU didn't die: tell everyone.  Can't complain. */
 		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
@@ -442,7 +443,7 @@
 	return err;
 }
 
-int __ref cpu_down(unsigned int cpu)
+int cpu_down(unsigned int cpu)
 {
 	int err;
 
@@ -608,13 +609,18 @@
 		}
 	}
 
-	if (!error) {
+	if (!error)
 		BUG_ON(num_online_cpus() > 1);
-		/* Make sure the CPUs won't be enabled by someone else */
-		cpu_hotplug_disabled = 1;
-	} else {
+	else
 		pr_err("Non-boot CPUs are not disabled\n");
-	}
+
+	/*
+	 * Make sure the CPUs won't be enabled by someone else. We need to do
+	 * this even in case of failure as all disable_nonboot_cpus() users are
+	 * supposed to do enable_nonboot_cpus() on the failure path.
+	 */
+	cpu_hotplug_disabled++;
+
 	cpu_maps_update_done();
 	return error;
 }
@@ -627,13 +633,13 @@
 {
 }
 
-void __ref enable_nonboot_cpus(void)
+void enable_nonboot_cpus(void)
 {
 	int cpu, error;
 
 	/* Allow everyone to use the CPU hotplug again */
 	cpu_maps_update_begin();
-	cpu_hotplug_disabled = 0;
+	WARN_ON(--cpu_hotplug_disabled < 0);
 	if (cpumask_empty(frozen_cpus))
 		goto out;
 
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e6feb51..e818389 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -163,6 +163,7 @@
 static atomic_t nr_comm_events __read_mostly;
 static atomic_t nr_task_events __read_mostly;
 static atomic_t nr_freq_events __read_mostly;
+static atomic_t nr_switch_events __read_mostly;
 
 static LIST_HEAD(pmus);
 static DEFINE_MUTEX(pmus_lock);
@@ -2619,6 +2620,9 @@
 	local_irq_restore(flags);
 }
 
+static void perf_event_switch(struct task_struct *task,
+			      struct task_struct *next_prev, bool sched_in);
+
 #define for_each_task_context_nr(ctxn)					\
 	for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
 
@@ -2641,6 +2645,9 @@
 	if (__this_cpu_read(perf_sched_cb_usages))
 		perf_pmu_sched_task(task, next, false);
 
+	if (atomic_read(&nr_switch_events))
+		perf_event_switch(task, next, false);
+
 	for_each_task_context_nr(ctxn)
 		perf_event_context_sched_out(task, ctxn, next);
 
@@ -2831,6 +2838,9 @@
 	if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
 		perf_cgroup_sched_in(prev, task);
 
+	if (atomic_read(&nr_switch_events))
+		perf_event_switch(task, prev, true);
+
 	if (__this_cpu_read(perf_sched_cb_usages))
 		perf_pmu_sched_task(prev, task, true);
 }
@@ -3212,6 +3222,59 @@
 	return __perf_event_count(event);
 }
 
+/*
+ * NMI-safe method to read a local event, that is an event that
+ * is:
+ *   - either for the current task, or for this CPU
+ *   - does not have inherit set, for inherited task events
+ *     will not be local and we cannot read them atomically
+ *   - must not have a pmu::count method
+ */
+u64 perf_event_read_local(struct perf_event *event)
+{
+	unsigned long flags;
+	u64 val;
+
+	/*
+	 * Disabling interrupts avoids all counter scheduling (context
+	 * switches, timer based rotation and IPIs).
+	 */
+	local_irq_save(flags);
+
+	/* If this is a per-task event, it must be for current */
+	WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) &&
+		     event->hw.target != current);
+
+	/* If this is a per-CPU event, it must be for this CPU */
+	WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) &&
+		     event->cpu != smp_processor_id());
+
+	/*
+	 * It must not be an event with inherit set, we cannot read
+	 * all child counters from atomic context.
+	 */
+	WARN_ON_ONCE(event->attr.inherit);
+
+	/*
+	 * It must not have a pmu::count method, those are not
+	 * NMI safe.
+	 */
+	WARN_ON_ONCE(event->pmu->count);
+
+	/*
+	 * If the event is currently on this CPU, its either a per-task event,
+	 * or local to this CPU. Furthermore it means its ACTIVE (otherwise
+	 * oncpu == -1).
+	 */
+	if (event->oncpu == smp_processor_id())
+		event->pmu->read(event);
+
+	val = local64_read(&event->count);
+	local_irq_restore(flags);
+
+	return val;
+}
+
 static u64 perf_event_read(struct perf_event *event)
 {
 	/*
@@ -3454,6 +3517,10 @@
 		atomic_dec(&nr_task_events);
 	if (event->attr.freq)
 		atomic_dec(&nr_freq_events);
+	if (event->attr.context_switch) {
+		static_key_slow_dec_deferred(&perf_sched_events);
+		atomic_dec(&nr_switch_events);
+	}
 	if (is_cgroup_event(event))
 		static_key_slow_dec_deferred(&perf_sched_events);
 	if (has_branch_stack(event))
@@ -6025,6 +6092,91 @@
 }
 
 /*
+ * context_switch tracking
+ */
+
+struct perf_switch_event {
+	struct task_struct	*task;
+	struct task_struct	*next_prev;
+
+	struct {
+		struct perf_event_header	header;
+		u32				next_prev_pid;
+		u32				next_prev_tid;
+	} event_id;
+};
+
+static int perf_event_switch_match(struct perf_event *event)
+{
+	return event->attr.context_switch;
+}
+
+static void perf_event_switch_output(struct perf_event *event, void *data)
+{
+	struct perf_switch_event *se = data;
+	struct perf_output_handle handle;
+	struct perf_sample_data sample;
+	int ret;
+
+	if (!perf_event_switch_match(event))
+		return;
+
+	/* Only CPU-wide events are allowed to see next/prev pid/tid */
+	if (event->ctx->task) {
+		se->event_id.header.type = PERF_RECORD_SWITCH;
+		se->event_id.header.size = sizeof(se->event_id.header);
+	} else {
+		se->event_id.header.type = PERF_RECORD_SWITCH_CPU_WIDE;
+		se->event_id.header.size = sizeof(se->event_id);
+		se->event_id.next_prev_pid =
+					perf_event_pid(event, se->next_prev);
+		se->event_id.next_prev_tid =
+					perf_event_tid(event, se->next_prev);
+	}
+
+	perf_event_header__init_id(&se->event_id.header, &sample, event);
+
+	ret = perf_output_begin(&handle, event, se->event_id.header.size);
+	if (ret)
+		return;
+
+	if (event->ctx->task)
+		perf_output_put(&handle, se->event_id.header);
+	else
+		perf_output_put(&handle, se->event_id);
+
+	perf_event__output_id_sample(event, &handle, &sample);
+
+	perf_output_end(&handle);
+}
+
+static void perf_event_switch(struct task_struct *task,
+			      struct task_struct *next_prev, bool sched_in)
+{
+	struct perf_switch_event switch_event;
+
+	/* N.B. caller checks nr_switch_events != 0 */
+
+	switch_event = (struct perf_switch_event){
+		.task		= task,
+		.next_prev	= next_prev,
+		.event_id	= {
+			.header = {
+				/* .type */
+				.misc = sched_in ? 0 : PERF_RECORD_MISC_SWITCH_OUT,
+				/* .size */
+			},
+			/* .next_prev_pid */
+			/* .next_prev_tid */
+		},
+	};
+
+	perf_event_aux(perf_event_switch_output,
+		       &switch_event,
+		       NULL);
+}
+
+/*
  * IRQ throttle logging
  */
 
@@ -6083,8 +6235,6 @@
 	    event->hw.itrace_started)
 		return;
 
-	event->hw.itrace_started = 1;
-
 	rec.header.type	= PERF_RECORD_ITRACE_START;
 	rec.header.misc	= 0;
 	rec.header.size	= sizeof(rec);
@@ -6792,8 +6942,8 @@
 	if (event->tp_event->prog)
 		return -EEXIST;
 
-	if (!(event->tp_event->flags & TRACE_EVENT_FL_KPROBE))
-		/* bpf programs can only be attached to kprobes */
+	if (!(event->tp_event->flags & TRACE_EVENT_FL_UKPROBE))
+		/* bpf programs can only be attached to u/kprobes */
 		return -EINVAL;
 
 	prog = bpf_prog_get(prog_fd);
@@ -7522,6 +7672,10 @@
 		if (atomic_inc_return(&nr_freq_events) == 1)
 			tick_nohz_full_kick_all();
 	}
+	if (event->attr.context_switch) {
+		atomic_inc(&nr_switch_events);
+		static_key_slow_inc(&perf_sched_events.key);
+	}
 	if (has_branch_stack(event))
 		static_key_slow_inc(&perf_sched_events.key);
 	if (is_cgroup_event(event))
@@ -8617,6 +8771,31 @@
 		WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
 }
 
+struct perf_event *perf_event_get(unsigned int fd)
+{
+	int err;
+	struct fd f;
+	struct perf_event *event;
+
+	err = perf_fget_light(fd, &f);
+	if (err)
+		return ERR_PTR(err);
+
+	event = f.file->private_data;
+	atomic_long_inc(&event->refcount);
+	fdput(f);
+
+	return event;
+}
+
+const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
+{
+	if (!event)
+		return ERR_PTR(-EINVAL);
+
+	return &event->attr;
+}
+
 /*
  * inherit a event from parent task to child task:
  */
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index c8aa3f7..182bc30 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -437,7 +437,10 @@
 
 	if (page && order) {
 		/*
-		 * Communicate the allocation size to the driver
+		 * Communicate the allocation size to the driver:
+		 * if we managed to secure a high-order allocation,
+		 * set its first page's private to this order;
+		 * !PagePrivate(page) means it's just a normal page.
 		 */
 		split_page(page, order);
 		SetPagePrivate(page);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index cb346f2..4e5e979 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -86,15 +86,6 @@
 	struct arch_uprobe	arch;
 };
 
-struct return_instance {
-	struct uprobe		*uprobe;
-	unsigned long		func;
-	unsigned long		orig_ret_vaddr; /* original return address */
-	bool			chained;	/* true, if instance is nested */
-
-	struct return_instance	*next;		/* keep as stack */
-};
-
 /*
  * Execute out of line area: anonymous executable mapping installed
  * by the probed task to execute the copy of the original instruction
@@ -105,17 +96,18 @@
  * allocated.
  */
 struct xol_area {
-	wait_queue_head_t 	wq;		/* if all slots are busy */
-	atomic_t 		slot_count;	/* number of in-use slots */
-	unsigned long 		*bitmap;	/* 0 = free slot */
-	struct page 		*page;
+	wait_queue_head_t 		wq;		/* if all slots are busy */
+	atomic_t 			slot_count;	/* number of in-use slots */
+	unsigned long 			*bitmap;	/* 0 = free slot */
 
+	struct vm_special_mapping	xol_mapping;
+	struct page 			*pages[2];
 	/*
 	 * We keep the vma's vm_start rather than a pointer to the vma
 	 * itself.  The probed process or a naughty kernel module could make
 	 * the vma go away, and we must handle that reasonably gracefully.
 	 */
-	unsigned long 		vaddr;		/* Page(s) of instruction slots */
+	unsigned long 			vaddr;		/* Page(s) of instruction slots */
 };
 
 /*
@@ -366,6 +358,18 @@
 	return uprobe_write_opcode(mm, vaddr, *(uprobe_opcode_t *)&auprobe->insn);
 }
 
+static struct uprobe *get_uprobe(struct uprobe *uprobe)
+{
+	atomic_inc(&uprobe->ref);
+	return uprobe;
+}
+
+static void put_uprobe(struct uprobe *uprobe)
+{
+	if (atomic_dec_and_test(&uprobe->ref))
+		kfree(uprobe);
+}
+
 static int match_uprobe(struct uprobe *l, struct uprobe *r)
 {
 	if (l->inode < r->inode)
@@ -393,10 +397,8 @@
 	while (n) {
 		uprobe = rb_entry(n, struct uprobe, rb_node);
 		match = match_uprobe(&u, uprobe);
-		if (!match) {
-			atomic_inc(&uprobe->ref);
-			return uprobe;
-		}
+		if (!match)
+			return get_uprobe(uprobe);
 
 		if (match < 0)
 			n = n->rb_left;
@@ -432,10 +434,8 @@
 		parent = *p;
 		u = rb_entry(parent, struct uprobe, rb_node);
 		match = match_uprobe(uprobe, u);
-		if (!match) {
-			atomic_inc(&u->ref);
-			return u;
-		}
+		if (!match)
+			return get_uprobe(u);
 
 		if (match < 0)
 			p = &parent->rb_left;
@@ -472,12 +472,6 @@
 	return u;
 }
 
-static void put_uprobe(struct uprobe *uprobe)
-{
-	if (atomic_dec_and_test(&uprobe->ref))
-		kfree(uprobe);
-}
-
 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
 {
 	struct uprobe *uprobe, *cur_uprobe;
@@ -1039,14 +1033,14 @@
 			if (u->inode != inode || u->offset < min)
 				break;
 			list_add(&u->pending_list, head);
-			atomic_inc(&u->ref);
+			get_uprobe(u);
 		}
 		for (t = n; (t = rb_next(t)); ) {
 			u = rb_entry(t, struct uprobe, rb_node);
 			if (u->inode != inode || u->offset > max)
 				break;
 			list_add(&u->pending_list, head);
-			atomic_inc(&u->ref);
+			get_uprobe(u);
 		}
 	}
 	spin_unlock(&uprobes_treelock);
@@ -1132,11 +1126,14 @@
 /* Slot allocation for XOL */
 static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
 {
-	int ret = -EALREADY;
+	struct vm_area_struct *vma;
+	int ret;
 
 	down_write(&mm->mmap_sem);
-	if (mm->uprobes_state.xol_area)
+	if (mm->uprobes_state.xol_area) {
+		ret = -EALREADY;
 		goto fail;
+	}
 
 	if (!area->vaddr) {
 		/* Try to map as high as possible, this is only a hint. */
@@ -1148,11 +1145,15 @@
 		}
 	}
 
-	ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
-				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page);
-	if (ret)
+	vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
+				VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
+				&area->xol_mapping);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
 		goto fail;
+	}
 
+	ret = 0;
 	smp_wmb();	/* pairs with get_xol_area() */
 	mm->uprobes_state.xol_area = area;
  fail:
@@ -1175,21 +1176,24 @@
 	if (!area->bitmap)
 		goto free_area;
 
-	area->page = alloc_page(GFP_HIGHUSER);
-	if (!area->page)
+	area->xol_mapping.name = "[uprobes]";
+	area->xol_mapping.pages = area->pages;
+	area->pages[0] = alloc_page(GFP_HIGHUSER);
+	if (!area->pages[0])
 		goto free_bitmap;
+	area->pages[1] = NULL;
 
 	area->vaddr = vaddr;
 	init_waitqueue_head(&area->wq);
 	/* Reserve the 1st slot for get_trampoline_vaddr() */
 	set_bit(0, area->bitmap);
 	atomic_set(&area->slot_count, 1);
-	copy_to_page(area->page, 0, &insn, UPROBE_SWBP_INSN_SIZE);
+	copy_to_page(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE);
 
 	if (!xol_add_vma(mm, area))
 		return area;
 
-	__free_page(area->page);
+	__free_page(area->pages[0]);
  free_bitmap:
 	kfree(area->bitmap);
  free_area:
@@ -1227,7 +1231,7 @@
 	if (!area)
 		return;
 
-	put_page(area->page);
+	put_page(area->pages[0]);
 	kfree(area->bitmap);
 	kfree(area);
 }
@@ -1296,7 +1300,7 @@
 	if (unlikely(!xol_vaddr))
 		return 0;
 
-	arch_uprobe_copy_ixol(area->page, xol_vaddr,
+	arch_uprobe_copy_ixol(area->pages[0], xol_vaddr,
 			      &uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
 
 	return xol_vaddr;
@@ -1333,6 +1337,7 @@
 
 		clear_bit(slot_nr, area->bitmap);
 		atomic_dec(&area->slot_count);
+		smp_mb__after_atomic(); /* pairs with prepare_to_wait() */
 		if (waitqueue_active(&area->wq))
 			wake_up(&area->wq);
 
@@ -1376,6 +1381,14 @@
 	return instruction_pointer(regs);
 }
 
+static struct return_instance *free_ret_instance(struct return_instance *ri)
+{
+	struct return_instance *next = ri->next;
+	put_uprobe(ri->uprobe);
+	kfree(ri);
+	return next;
+}
+
 /*
  * Called with no locks held.
  * Called in context of a exiting or a exec-ing thread.
@@ -1383,7 +1396,7 @@
 void uprobe_free_utask(struct task_struct *t)
 {
 	struct uprobe_task *utask = t->utask;
-	struct return_instance *ri, *tmp;
+	struct return_instance *ri;
 
 	if (!utask)
 		return;
@@ -1392,13 +1405,8 @@
 		put_uprobe(utask->active_uprobe);
 
 	ri = utask->return_instances;
-	while (ri) {
-		tmp = ri;
-		ri = ri->next;
-
-		put_uprobe(tmp->uprobe);
-		kfree(tmp);
-	}
+	while (ri)
+		ri = free_ret_instance(ri);
 
 	xol_free_insn_slot(t);
 	kfree(utask);
@@ -1437,7 +1445,7 @@
 			return -ENOMEM;
 
 		*n = *o;
-		atomic_inc(&n->uprobe->ref);
+		get_uprobe(n->uprobe);
 		n->next = NULL;
 
 		*p = n;
@@ -1515,12 +1523,25 @@
 	return trampoline_vaddr;
 }
 
+static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
+					struct pt_regs *regs)
+{
+	struct return_instance *ri = utask->return_instances;
+	enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
+
+	while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
+		ri = free_ret_instance(ri);
+		utask->depth--;
+	}
+	utask->return_instances = ri;
+}
+
 static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
 {
 	struct return_instance *ri;
 	struct uprobe_task *utask;
 	unsigned long orig_ret_vaddr, trampoline_vaddr;
-	bool chained = false;
+	bool chained;
 
 	if (!get_xol_area())
 		return;
@@ -1536,49 +1557,47 @@
 		return;
 	}
 
-	ri = kzalloc(sizeof(struct return_instance), GFP_KERNEL);
+	ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
 	if (!ri)
-		goto fail;
+		return;
 
 	trampoline_vaddr = get_trampoline_vaddr();
 	orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
 	if (orig_ret_vaddr == -1)
 		goto fail;
 
+	/* drop the entries invalidated by longjmp() */
+	chained = (orig_ret_vaddr == trampoline_vaddr);
+	cleanup_return_instances(utask, chained, regs);
+
 	/*
 	 * We don't want to keep trampoline address in stack, rather keep the
 	 * original return address of first caller thru all the consequent
 	 * instances. This also makes breakpoint unwrapping easier.
 	 */
-	if (orig_ret_vaddr == trampoline_vaddr) {
+	if (chained) {
 		if (!utask->return_instances) {
 			/*
 			 * This situation is not possible. Likely we have an
 			 * attack from user-space.
 			 */
-			pr_warn("uprobe: unable to set uretprobe pid/tgid=%d/%d\n",
-						current->pid, current->tgid);
+			uprobe_warn(current, "handle tail call");
 			goto fail;
 		}
-
-		chained = true;
 		orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
 	}
 
-	atomic_inc(&uprobe->ref);
-	ri->uprobe = uprobe;
+	ri->uprobe = get_uprobe(uprobe);
 	ri->func = instruction_pointer(regs);
+	ri->stack = user_stack_pointer(regs);
 	ri->orig_ret_vaddr = orig_ret_vaddr;
 	ri->chained = chained;
 
 	utask->depth++;
-
-	/* add instance to the stack */
 	ri->next = utask->return_instances;
 	utask->return_instances = ri;
 
 	return;
-
  fail:
 	kfree(ri);
 }
@@ -1766,46 +1785,58 @@
 	up_read(&uprobe->register_rwsem);
 }
 
-static bool handle_trampoline(struct pt_regs *regs)
+static struct return_instance *find_next_ret_chain(struct return_instance *ri)
+{
+	bool chained;
+
+	do {
+		chained = ri->chained;
+		ri = ri->next;	/* can't be NULL if chained */
+	} while (chained);
+
+	return ri;
+}
+
+static void handle_trampoline(struct pt_regs *regs)
 {
 	struct uprobe_task *utask;
-	struct return_instance *ri, *tmp;
-	bool chained;
+	struct return_instance *ri, *next;
+	bool valid;
 
 	utask = current->utask;
 	if (!utask)
-		return false;
+		goto sigill;
 
 	ri = utask->return_instances;
 	if (!ri)
-		return false;
+		goto sigill;
 
-	/*
-	 * TODO: we should throw out return_instance's invalidated by
-	 * longjmp(), currently we assume that the probed function always
-	 * returns.
-	 */
-	instruction_pointer_set(regs, ri->orig_ret_vaddr);
+	do {
+		/*
+		 * We should throw out the frames invalidated by longjmp().
+		 * If this chain is valid, then the next one should be alive
+		 * or NULL; the latter case means that nobody but ri->func
+		 * could hit this trampoline on return. TODO: sigaltstack().
+		 */
+		next = find_next_ret_chain(ri);
+		valid = !next || arch_uretprobe_is_alive(next, RP_CHECK_RET, regs);
 
-	for (;;) {
-		handle_uretprobe_chain(ri, regs);
-
-		chained = ri->chained;
-		put_uprobe(ri->uprobe);
-
-		tmp = ri;
-		ri = ri->next;
-		kfree(tmp);
-		utask->depth--;
-
-		if (!chained)
-			break;
-		BUG_ON(!ri);
-	}
+		instruction_pointer_set(regs, ri->orig_ret_vaddr);
+		do {
+			if (valid)
+				handle_uretprobe_chain(ri, regs);
+			ri = free_ret_instance(ri);
+			utask->depth--;
+		} while (ri != next);
+	} while (!valid);
 
 	utask->return_instances = ri;
+	return;
 
-	return true;
+ sigill:
+	uprobe_warn(current, "handle uretprobe, sending SIGILL.");
+	force_sig_info(SIGILL, SEND_SIG_FORCED, current);
+
 }
 
 bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
@@ -1813,6 +1844,12 @@
 	return false;
 }
 
+bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+					struct pt_regs *regs)
+{
+	return true;
+}
+
 /*
  * Run handler and ask thread to singlestep.
  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
@@ -1824,13 +1861,8 @@
 	int uninitialized_var(is_swbp);
 
 	bp_vaddr = uprobe_get_swbp_addr(regs);
-	if (bp_vaddr == get_trampoline_vaddr()) {
-		if (handle_trampoline(regs))
-			return;
-
-		pr_warn("uprobe: unable to handle uretprobe pid/tgid=%d/%d\n",
-						current->pid, current->tgid);
-	}
+	if (bp_vaddr == get_trampoline_vaddr())
+		return handle_trampoline(regs);
 
 	uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
 	if (!uprobe) {
diff --git a/kernel/exit.c b/kernel/exit.c
index 031325e..ea95ee1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1471,7 +1471,7 @@
 	add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
 repeat:
 	/*
-	 * If there is nothing that can match our critiera just get out.
+	 * If there is nothing that can match our criteria, just get out.
 	 * We will clear ->notask_error to zero if we see any child that
 	 * might later match our criteria, even if we are not able to reap
 	 * it yet.
diff --git a/kernel/fork.c b/kernel/fork.c
index dbd9b8d..03aa2e6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1072,6 +1072,7 @@
 	rcu_assign_pointer(tsk->sighand, sig);
 	if (!sig)
 		return -ENOMEM;
+
 	atomic_set(&sig->count, 1);
 	memcpy(sig->action, current->sighand->action, sizeof(sig->action));
 	return 0;
@@ -1133,6 +1134,7 @@
 	init_sigpending(&sig->shared_pending);
 	INIT_LIST_HEAD(&sig->posix_timers);
 	seqlock_init(&sig->stats_lock);
+	prev_cputime_init(&sig->prev_cputime);
 
 	hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	sig->real_timer.function = it_real_fn;
@@ -1244,6 +1246,7 @@
 {
 	int retval;
 	struct task_struct *p;
+	void *cgrp_ss_priv[CGROUP_CANFORK_COUNT] = {};
 
 	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
 		return ERR_PTR(-EINVAL);
@@ -1278,10 +1281,9 @@
 
 	/*
 	 * If the new process will be in a different pid or user namespace
-	 * do not allow it to share a thread group or signal handlers or
-	 * parent with the forking task.
+	 * do not allow it to share a thread group with the forking task.
 	 */
-	if (clone_flags & CLONE_SIGHAND) {
+	if (clone_flags & CLONE_THREAD) {
 		if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
 		    (task_active_pid_ns(current) !=
 				current->nsproxy->pid_ns_for_children))
@@ -1340,9 +1342,8 @@
 
 	p->utime = p->stime = p->gtime = 0;
 	p->utimescaled = p->stimescaled = 0;
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-	p->prev_cputime.utime = p->prev_cputime.stime = 0;
-#endif
+	prev_cputime_init(&p->prev_cputime);
+
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 	seqlock_init(&p->vtime_seqlock);
 	p->vtime_snap = 0;
@@ -1518,6 +1519,16 @@
 	p->task_works = NULL;
 
 	/*
+	 * Ensure that the cgroup subsystem policies allow the new process to be
+	 * forked. It should be noted the the new process's css_set can be changed
+	 * between here and cgroup_post_fork() if an organisation operation is in
+	 * progress.
+	 */
+	retval = cgroup_can_fork(p, cgrp_ss_priv);
+	if (retval)
+		goto bad_fork_free_pid;
+
+	/*
 	 * Make it visible to the rest of the system, but dont wake it up yet.
 	 * Need tasklist lock for parent etc handling!
 	 */
@@ -1553,7 +1564,7 @@
 		spin_unlock(&current->sighand->siglock);
 		write_unlock_irq(&tasklist_lock);
 		retval = -ERESTARTNOINTR;
-		goto bad_fork_free_pid;
+		goto bad_fork_cancel_cgroup;
 	}
 
 	if (likely(p->pid)) {
@@ -1595,7 +1606,7 @@
 	write_unlock_irq(&tasklist_lock);
 
 	proc_fork_connector(p);
-	cgroup_post_fork(p);
+	cgroup_post_fork(p, cgrp_ss_priv);
 	if (clone_flags & CLONE_THREAD)
 		threadgroup_change_end(current);
 	perf_event_fork(p);
@@ -1605,6 +1616,8 @@
 
 	return p;
 
+bad_fork_cancel_cgroup:
+	cgroup_cancel_fork(p, cgrp_ss_priv);
 bad_fork_free_pid:
 	if (pid != &init_struct_pid)
 		free_pid(pid);
@@ -1871,13 +1884,21 @@
 				CLONE_NEWUSER|CLONE_NEWPID))
 		return -EINVAL;
 	/*
-	 * Not implemented, but pretend it works if there is nothing to
-	 * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
-	 * needs to unshare vm.
+	 * Not implemented, but pretend it works if there is nothing
+	 * to unshare.  Note that unsharing the address space or the
+	 * signal handlers also need to unshare the signal queues (aka
+	 * CLONE_THREAD).
 	 */
 	if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
-		/* FIXME: get_task_mm() increments ->mm_users */
-		if (atomic_read(&current->mm->mm_users) > 1)
+		if (!thread_group_empty(current))
+			return -EINVAL;
+	}
+	if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
+		if (atomic_read(&current->sighand->count) > 1)
+			return -EINVAL;
+	}
+	if (unshare_flags & CLONE_VM) {
+		if (!current_is_single_threaded())
 			return -EINVAL;
 	}
 
@@ -1941,21 +1962,22 @@
 	int err;
 
 	/*
-	 * If unsharing a user namespace must also unshare the thread.
+	 * If unsharing a user namespace must also unshare the thread group
+	 * and unshare the filesystem root and working directories.
 	 */
 	if (unshare_flags & CLONE_NEWUSER)
 		unshare_flags |= CLONE_THREAD | CLONE_FS;
 	/*
-	 * If unsharing a thread from a thread group, must also unshare vm.
-	 */
-	if (unshare_flags & CLONE_THREAD)
-		unshare_flags |= CLONE_VM;
-	/*
 	 * If unsharing vm, must also unshare signal handlers.
 	 */
 	if (unshare_flags & CLONE_VM)
 		unshare_flags |= CLONE_SIGHAND;
 	/*
+	 * If unsharing a signal handlers, must also unshare the signal queues.
+	 */
+	if (unshare_flags & CLONE_SIGHAND)
+		unshare_flags |= CLONE_THREAD;
+	/*
 	 * If unsharing namespace, must also unshare filesystem information.
 	 */
 	if (unshare_flags & CLONE_NEWNS)
diff --git a/kernel/futex.c b/kernel/futex.c
index c4a182f..6e443ef 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -64,6 +64,7 @@
 #include <linux/hugetlb.h>
 #include <linux/freezer.h>
 #include <linux/bootmem.h>
+#include <linux/fault-inject.h>
 
 #include <asm/futex.h>
 
@@ -258,6 +259,66 @@
 
 static struct futex_hash_bucket *futex_queues;
 
+/*
+ * Fault injections for futexes.
+ */
+#ifdef CONFIG_FAIL_FUTEX
+
+static struct {
+	struct fault_attr attr;
+
+	u32 ignore_private;
+} fail_futex = {
+	.attr = FAULT_ATTR_INITIALIZER,
+	.ignore_private = 0,
+};
+
+static int __init setup_fail_futex(char *str)
+{
+	return setup_fault_attr(&fail_futex.attr, str);
+}
+__setup("fail_futex=", setup_fail_futex);
+
+static bool should_fail_futex(bool fshared)
+{
+	if (fail_futex.ignore_private && !fshared)
+		return false;
+
+	return should_fail(&fail_futex.attr, 1);
+}
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+static int __init fail_futex_debugfs(void)
+{
+	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+	struct dentry *dir;
+
+	dir = fault_create_debugfs_attr("fail_futex", NULL,
+					&fail_futex.attr);
+	if (IS_ERR(dir))
+		return PTR_ERR(dir);
+
+	if (!debugfs_create_bool("ignore-private", mode, dir,
+				 &fail_futex.ignore_private)) {
+		debugfs_remove_recursive(dir);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+late_initcall(fail_futex_debugfs);
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+#else
+static inline bool should_fail_futex(bool fshared)
+{
+	return false;
+}
+#endif /* CONFIG_FAIL_FUTEX */
+
 static inline void futex_get_mm(union futex_key *key)
 {
 	atomic_inc(&key->private.mm->mm_count);
@@ -413,6 +474,9 @@
 	if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
 		return -EFAULT;
 
+	if (unlikely(should_fail_futex(fshared)))
+		return -EFAULT;
+
 	/*
 	 * PROCESS_PRIVATE futexes are fast.
 	 * As the mm cannot disappear under us and the 'key' only needs
@@ -428,6 +492,10 @@
 	}
 
 again:
+	/* Ignore any VERIFY_READ mapping (futex common case) */
+	if (unlikely(should_fail_futex(fshared)))
+		return -EFAULT;
+
 	err = get_user_pages_fast(address, 1, 1, &page);
 	/*
 	 * If write access is not required (eg. FUTEX_WAIT), try
@@ -516,7 +584,7 @@
 		 * A RO anonymous page will never change and thus doesn't make
 		 * sense for futex operations.
 		 */
-		if (ro) {
+		if (unlikely(should_fail_futex(fshared)) || ro) {
 			err = -EFAULT;
 			goto out;
 		}
@@ -974,6 +1042,9 @@
 {
 	u32 uninitialized_var(curval);
 
+	if (unlikely(should_fail_futex(true)))
+		return -EFAULT;
+
 	if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
 		return -EFAULT;
 
@@ -1015,12 +1086,18 @@
 	if (get_futex_value_locked(&uval, uaddr))
 		return -EFAULT;
 
+	if (unlikely(should_fail_futex(true)))
+		return -EFAULT;
+
 	/*
 	 * Detect deadlocks.
 	 */
 	if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
 		return -EDEADLK;
 
+	if ((unlikely(should_fail_futex(true))))
+		return -EDEADLK;
+
 	/*
 	 * Lookup existing state first. If it exists, try to attach to
 	 * its pi_state.
@@ -1155,6 +1232,9 @@
 	 */
 	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
 
+	if (unlikely(should_fail_futex(true)))
+		ret = -EFAULT;
+
 	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
 		ret = -EFAULT;
 	else if (curval != uval)
@@ -1457,6 +1537,9 @@
 	if (get_futex_value_locked(&curval, pifutex))
 		return -EFAULT;
 
+	if (unlikely(should_fail_futex(true)))
+		return -EFAULT;
+
 	/*
 	 * Find the top_waiter and determine if there are additional waiters.
 	 * If the caller intends to requeue more than 1 waiter to pifutex,
@@ -2268,8 +2351,11 @@
 /*
  * Userspace tried a 0 -> TID atomic transition of the futex value
  * and failed. The kernel side here does the whole locking operation:
- * if there are waiters then it will block, it does PI, etc. (Due to
- * races the kernel might see a 0 value of the futex too.)
+ * if there are waiters then it will block as a consequence of relying
+ * on rt-mutexes, it does PI, etc. (Due to races the kernel might see
+ * a 0 value of the futex too.).
+ *
+ * Also serves as futex trylock_pi()'ing, and due semantics.
  */
 static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
 			 ktime_t *time, int trylock)
@@ -2300,6 +2386,10 @@
 
 	ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
 	if (unlikely(ret)) {
+		/*
+		 * Atomic work succeeded and we got the lock,
+		 * or failed. Either way, we do _not_ block.
+		 */
 		switch (ret) {
 		case 1:
 			/* We got the lock. */
@@ -2530,7 +2620,7 @@
  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
  * @uaddr:	the futex we initially wait on (non-pi)
  * @flags:	futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
- * 		the same type, no requeueing from private to shared, etc.
+ *		the same type, no requeueing from private to shared, etc.
  * @val:	the expected value of uaddr
  * @abs_time:	absolute timeout
  * @bitset:	32 bit wakeup bitset set by userspace, defaults to all
@@ -3005,6 +3095,8 @@
 	if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
 		      cmd == FUTEX_WAIT_BITSET ||
 		      cmd == FUTEX_WAIT_REQUEUE_PI)) {
+		if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
+			return -EFAULT;
 		if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
 			return -EFAULT;
 		if (!timespec_valid(&ts))
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index ae21682..6e40a95 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -63,7 +63,7 @@
 		return -EINVAL;
 
 	type &= IRQ_TYPE_SENSE_MASK;
-	ret = __irq_set_trigger(desc, irq, type);
+	ret = __irq_set_trigger(desc, type);
 	irq_put_desc_busunlock(desc, flags);
 	return ret;
 }
@@ -187,7 +187,7 @@
 		irq_enable(desc);
 	}
 	if (resend)
-		check_irq_resend(desc, desc->irq_data.irq);
+		check_irq_resend(desc);
 	return ret;
 }
 
@@ -315,7 +315,7 @@
 	raw_spin_lock_irq(&desc->lock);
 
 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
-	kstat_incr_irqs_this_cpu(irq, desc);
+	kstat_incr_irqs_this_cpu(desc);
 
 	action = desc->action;
 	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
@@ -328,7 +328,7 @@
 
 	action_ret = action->thread_fn(action->irq, action->dev_id);
 	if (!noirqdebug)
-		note_interrupt(irq, desc, action_ret);
+		note_interrupt(desc, action_ret);
 
 	raw_spin_lock_irq(&desc->lock);
 	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
@@ -391,7 +391,7 @@
 		goto out_unlock;
 
 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
-	kstat_incr_irqs_this_cpu(irq, desc);
+	kstat_incr_irqs_this_cpu(desc);
 
 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 		desc->istate |= IRQS_PENDING;
@@ -443,7 +443,7 @@
 		goto out_unlock;
 
 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
-	kstat_incr_irqs_this_cpu(irq, desc);
+	kstat_incr_irqs_this_cpu(desc);
 
 	/*
 	 * If its disabled or no action available
@@ -515,7 +515,7 @@
 		goto out;
 
 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
-	kstat_incr_irqs_this_cpu(irq, desc);
+	kstat_incr_irqs_this_cpu(desc);
 
 	/*
 	 * If its disabled or no action available
@@ -583,7 +583,7 @@
 		goto out_unlock;
 	}
 
-	kstat_incr_irqs_this_cpu(irq, desc);
+	kstat_incr_irqs_this_cpu(desc);
 
 	/* Start handling the irq */
 	desc->irq_data.chip->irq_ack(&desc->irq_data);
@@ -646,7 +646,7 @@
 		goto out_eoi;
 	}
 
-	kstat_incr_irqs_this_cpu(irq, desc);
+	kstat_incr_irqs_this_cpu(desc);
 
 	do {
 		if (unlikely(!desc->action))
@@ -675,7 +675,7 @@
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 
-	kstat_incr_irqs_this_cpu(irq, desc);
+	kstat_incr_irqs_this_cpu(desc);
 
 	if (chip->irq_ack)
 		chip->irq_ack(&desc->irq_data);
@@ -705,7 +705,7 @@
 	void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
 	irqreturn_t res;
 
-	kstat_incr_irqs_this_cpu(irq, desc);
+	kstat_incr_irqs_this_cpu(desc);
 
 	if (chip->irq_ack)
 		chip->irq_ack(&desc->irq_data);
@@ -1020,7 +1020,7 @@
 /**
  * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
  * @data:	Pointer to interrupt specific data
- * @dest:	The vcpu affinity information
+ * @vcpu_info:	The vcpu affinity information
  */
 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
 {
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index 15b370d..abd286a 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -553,6 +553,9 @@
 			if (data)
 				ct->chip.irq_suspend(data);
 		}
+
+		if (gc->suspend)
+			gc->suspend(gc);
 	}
 	return 0;
 }
@@ -564,6 +567,9 @@
 	list_for_each_entry(gc, &gc_list, list) {
 		struct irq_chip_type *ct = gc->chip_types;
 
+		if (gc->resume)
+			gc->resume(gc);
+
 		if (ct->chip.irq_resume) {
 			struct irq_data *data = irq_gc_get_irq_data(gc);
 
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 6354802..b6eeea8 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -30,7 +30,7 @@
 void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
 {
 	print_irq_desc(irq, desc);
-	kstat_incr_irqs_this_cpu(irq, desc);
+	kstat_incr_irqs_this_cpu(desc);
 	ack_bad_irq(irq);
 }
 
@@ -176,7 +176,7 @@
 	add_interrupt_randomness(irq, flags);
 
 	if (!noirqdebug)
-		note_interrupt(irq, desc, retval);
+		note_interrupt(desc, retval);
 	return retval;
 }
 
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 61008b8..eee4b38 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -59,10 +59,9 @@
 #include "debug.h"
 #include "settings.h"
 
-extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
-		unsigned long flags);
-extern void __disable_irq(struct irq_desc *desc, unsigned int irq);
-extern void __enable_irq(struct irq_desc *desc, unsigned int irq);
+extern int __irq_set_trigger(struct irq_desc *desc, unsigned long flags);
+extern void __disable_irq(struct irq_desc *desc);
+extern void __enable_irq(struct irq_desc *desc);
 
 extern int irq_startup(struct irq_desc *desc, bool resend);
 extern void irq_shutdown(struct irq_desc *desc);
@@ -86,7 +85,7 @@
 irqreturn_t handle_irq_event(struct irq_desc *desc);
 
 /* Resending of interrupts :*/
-void check_irq_resend(struct irq_desc *desc, unsigned int irq);
+void check_irq_resend(struct irq_desc *desc);
 bool irq_wait_for_poll(struct irq_desc *desc);
 void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action);
 
@@ -187,7 +186,7 @@
 	return __irqd_to_state(d) & mask;
 }
 
-static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc)
+static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
 {
 	__this_cpu_inc(*desc->kstat_irqs);
 	__this_cpu_inc(kstat.irqs_sum);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 4afc457..0a2a4b6 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -582,7 +582,7 @@
 
 void kstat_incr_irq_this_cpu(unsigned int irq)
 {
-	kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
+	kstat_incr_irqs_this_cpu(irq_to_desc(irq));
 }
 
 /**
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 8c3577f..79baaf8 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -187,10 +187,12 @@
 EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
 
 /**
- * irq_find_host() - Locates a domain for a given device node
+ * irq_find_matching_host() - Locates a domain for a given device node
  * @node: device-tree node of the interrupt controller
+ * @bus_token: domain-specific data
  */
-struct irq_domain *irq_find_host(struct device_node *node)
+struct irq_domain *irq_find_matching_host(struct device_node *node,
+					  enum irq_domain_bus_token bus_token)
 {
 	struct irq_domain *h, *found = NULL;
 	int rc;
@@ -199,13 +201,19 @@
 	 * it might potentially be set to match all interrupts in
 	 * the absence of a device node. This isn't a problem so far
 	 * yet though...
+	 *
+	 * bus_token == DOMAIN_BUS_ANY matches any domain, any other
+	 * values must generate an exact match for the domain to be
+	 * selected.
 	 */
 	mutex_lock(&irq_domain_mutex);
 	list_for_each_entry(h, &irq_domain_list, link) {
 		if (h->ops->match)
-			rc = h->ops->match(h, node);
+			rc = h->ops->match(h, node, bus_token);
 		else
-			rc = (h->of_node != NULL) && (h->of_node == node);
+			rc = ((h->of_node != NULL) && (h->of_node == node) &&
+			      ((bus_token == DOMAIN_BUS_ANY) ||
+			       (h->bus_token == bus_token)));
 
 		if (rc) {
 			found = h;
@@ -215,7 +223,7 @@
 	mutex_unlock(&irq_domain_mutex);
 	return found;
 }
-EXPORT_SYMBOL_GPL(irq_find_host);
+EXPORT_SYMBOL_GPL(irq_find_matching_host);
 
 /**
  * irq_set_default_host() - Set a "default" irq domain
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f974485..ad1b064 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -115,6 +115,14 @@
 #ifdef CONFIG_SMP
 cpumask_var_t irq_default_affinity;
 
+static int __irq_can_set_affinity(struct irq_desc *desc)
+{
+	if (!desc || !irqd_can_balance(&desc->irq_data) ||
+	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
+		return 0;
+	return 1;
+}
+
 /**
  *	irq_can_set_affinity - Check if the affinity of a given irq can be set
  *	@irq:		Interrupt to check
@@ -122,13 +130,7 @@
  */
 int irq_can_set_affinity(unsigned int irq)
 {
-	struct irq_desc *desc = irq_to_desc(irq);
-
-	if (!desc || !irqd_can_balance(&desc->irq_data) ||
-	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
-		return 0;
-
-	return 1;
+	return __irq_can_set_affinity(irq_to_desc(irq));
 }
 
 /**
@@ -359,14 +361,13 @@
 /*
  * Generic version of the affinity autoselector.
  */
-static int
-setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
+static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
 {
 	struct cpumask *set = irq_default_affinity;
 	int node = irq_desc_get_node(desc);
 
 	/* Excludes PER_CPU and NO_BALANCE interrupts */
-	if (!irq_can_set_affinity(irq))
+	if (!__irq_can_set_affinity(desc))
 		return 0;
 
 	/*
@@ -393,10 +394,10 @@
 	return 0;
 }
 #else
-static inline int
-setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
+/* Wrapper for ALPHA specific affinity selector magic */
+static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
 {
-	return irq_select_affinity(irq);
+	return irq_select_affinity(irq_desc_get_irq(d));
 }
 #endif
 
@@ -410,20 +411,20 @@
 	int ret;
 
 	raw_spin_lock_irqsave(&desc->lock, flags);
-	ret = setup_affinity(irq, desc, mask);
+	ret = setup_affinity(desc, mask);
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 	return ret;
 }
 
 #else
 static inline int
-setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
+setup_affinity(struct irq_desc *desc, struct cpumask *mask)
 {
 	return 0;
 }
 #endif
 
-void __disable_irq(struct irq_desc *desc, unsigned int irq)
+void __disable_irq(struct irq_desc *desc)
 {
 	if (!desc->depth++)
 		irq_disable(desc);
@@ -436,7 +437,7 @@
 
 	if (!desc)
 		return -EINVAL;
-	__disable_irq(desc, irq);
+	__disable_irq(desc);
 	irq_put_desc_busunlock(desc, flags);
 	return 0;
 }
@@ -503,12 +504,13 @@
 }
 EXPORT_SYMBOL_GPL(disable_hardirq);
 
-void __enable_irq(struct irq_desc *desc, unsigned int irq)
+void __enable_irq(struct irq_desc *desc)
 {
 	switch (desc->depth) {
 	case 0:
  err_out:
-		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
+		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
+		     irq_desc_get_irq(desc));
 		break;
 	case 1: {
 		if (desc->istate & IRQS_SUSPENDED)
@@ -516,7 +518,7 @@
 		/* Prevent probing on this irq: */
 		irq_settings_set_noprobe(desc);
 		irq_enable(desc);
-		check_irq_resend(desc, irq);
+		check_irq_resend(desc);
 		/* fall-through */
 	}
 	default:
@@ -546,7 +548,7 @@
 		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
 		goto out;
 
-	__enable_irq(desc, irq);
+	__enable_irq(desc);
 out:
 	irq_put_desc_busunlock(desc, flags);
 }
@@ -637,8 +639,7 @@
 	return canrequest;
 }
 
-int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
-		      unsigned long flags)
+int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
 {
 	struct irq_chip *chip = desc->irq_data.chip;
 	int ret, unmask = 0;
@@ -648,7 +649,8 @@
 		 * IRQF_TRIGGER_* but the PIC does not support multiple
 		 * flow-types?
 		 */
-		pr_debug("No set_type function for IRQ %d (%s)\n", irq,
+		pr_debug("No set_type function for IRQ %d (%s)\n",
+			 irq_desc_get_irq(desc),
 			 chip ? (chip->name ? : "unknown") : "unknown");
 		return 0;
 	}
@@ -685,7 +687,7 @@
 		break;
 	default:
 		pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
-		       flags, irq, chip->irq_set_type);
+		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
 	}
 	if (unmask)
 		unmask_irq(desc);
@@ -1221,8 +1223,8 @@
 
 		/* Setup the type (level, edge polarity) if configured: */
 		if (new->flags & IRQF_TRIGGER_MASK) {
-			ret = __irq_set_trigger(desc, irq,
-					new->flags & IRQF_TRIGGER_MASK);
+			ret = __irq_set_trigger(desc,
+						new->flags & IRQF_TRIGGER_MASK);
 
 			if (ret)
 				goto out_mask;
@@ -1253,7 +1255,7 @@
 		}
 
 		/* Set default affinity mask once everything is setup */
-		setup_affinity(irq, desc, mask);
+		setup_affinity(desc, mask);
 
 	} else if (new->flags & IRQF_TRIGGER_MASK) {
 		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
@@ -1280,7 +1282,7 @@
 	 */
 	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
 		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
-		__enable_irq(desc, irq);
+		__enable_irq(desc);
 	}
 
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -1650,7 +1652,7 @@
 	if (type != IRQ_TYPE_NONE) {
 		int ret;
 
-		ret = __irq_set_trigger(desc, irq, type);
+		ret = __irq_set_trigger(desc, type);
 
 		if (ret) {
 			WARN(1, "failed to set type for IRQ%d\n", irq);
@@ -1875,6 +1877,7 @@
 	irq_put_desc_busunlock(desc, flags);
 	return err;
 }
+EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
 
 /**
  *	irq_set_irqchip_state - set the state of a forwarded interrupt.
@@ -1920,3 +1923,4 @@
 	irq_put_desc_busunlock(desc, flags);
 	return err;
 }
+EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 7bf1f1b..7e6512b 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -18,6 +18,23 @@
 /* Temparory solution for building, will be removed later */
 #include <linux/pci.h>
 
+struct msi_desc *alloc_msi_entry(struct device *dev)
+{
+	struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return NULL;
+
+	INIT_LIST_HEAD(&desc->list);
+	desc->dev = dev;
+
+	return desc;
+}
+
+void free_msi_entry(struct msi_desc *entry)
+{
+	kfree(entry);
+}
+
 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
 {
 	*msg = entry->msg;
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index d22786a..21c6261 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -68,7 +68,7 @@
 		desc->cond_suspend_depth--;
 }
 
-static bool suspend_device_irq(struct irq_desc *desc, int irq)
+static bool suspend_device_irq(struct irq_desc *desc)
 {
 	if (!desc->action || desc->no_suspend_depth)
 		return false;
@@ -85,7 +85,7 @@
 	}
 
 	desc->istate |= IRQS_SUSPENDED;
-	__disable_irq(desc, irq);
+	__disable_irq(desc);
 
 	/*
 	 * Hardware which has no wakeup source configuration facility
@@ -126,7 +126,7 @@
 		if (irq_settings_is_nested_thread(desc))
 			continue;
 		raw_spin_lock_irqsave(&desc->lock, flags);
-		sync = suspend_device_irq(desc, irq);
+		sync = suspend_device_irq(desc);
 		raw_spin_unlock_irqrestore(&desc->lock, flags);
 
 		if (sync)
@@ -135,7 +135,7 @@
 }
 EXPORT_SYMBOL_GPL(suspend_device_irqs);
 
-static void resume_irq(struct irq_desc *desc, int irq)
+static void resume_irq(struct irq_desc *desc)
 {
 	irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED);
 
@@ -150,7 +150,7 @@
 	desc->depth++;
 resume:
 	desc->istate &= ~IRQS_SUSPENDED;
-	__enable_irq(desc, irq);
+	__enable_irq(desc);
 }
 
 static void resume_irqs(bool want_early)
@@ -169,7 +169,7 @@
 			continue;
 
 		raw_spin_lock_irqsave(&desc->lock, flags);
-		resume_irq(desc, irq);
+		resume_irq(desc);
 		raw_spin_unlock_irqrestore(&desc->lock, flags);
 	}
 }
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 7a5237a..dd95f44 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -53,7 +53,7 @@
  *
  * Is called with interrupts disabled and desc->lock held.
  */
-void check_irq_resend(struct irq_desc *desc, unsigned int irq)
+void check_irq_resend(struct irq_desc *desc)
 {
 	/*
 	 * We do not resend level type interrupts. Level type
@@ -74,6 +74,8 @@
 		if (!desc->irq_data.chip->irq_retrigger ||
 		    !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
 #ifdef CONFIG_HARDIRQS_SW_RESEND
+			unsigned int irq = irq_desc_get_irq(desc);
+
 			/*
 			 * If the interrupt is running in the thread
 			 * context of the parent irq we need to be
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index e2514b0..3214417 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -60,7 +60,7 @@
 /*
  * Recovery handler for misrouted interrupts.
  */
-static int try_one_irq(int irq, struct irq_desc *desc, bool force)
+static int try_one_irq(struct irq_desc *desc, bool force)
 {
 	irqreturn_t ret = IRQ_NONE;
 	struct irqaction *action;
@@ -133,7 +133,7 @@
 		if (i == irq)	/* Already tried */
 			continue;
 
-		if (try_one_irq(i, desc, false))
+		if (try_one_irq(desc, false))
 			ok = 1;
 	}
 out:
@@ -164,7 +164,7 @@
 			continue;
 
 		local_irq_disable();
-		try_one_irq(i, desc, true);
+		try_one_irq(desc, true);
 		local_irq_enable();
 	}
 out:
@@ -188,10 +188,9 @@
  * (The other 100-of-100,000 interrupts may have been a correctly
  *  functioning device sharing an IRQ with the failing one)
  */
-static void
-__report_bad_irq(unsigned int irq, struct irq_desc *desc,
-		 irqreturn_t action_ret)
+static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
 {
+	unsigned int irq = irq_desc_get_irq(desc);
 	struct irqaction *action;
 	unsigned long flags;
 
@@ -224,14 +223,13 @@
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 }
 
-static void
-report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
+static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
 {
 	static int count = 100;
 
 	if (count > 0) {
 		count--;
-		__report_bad_irq(irq, desc, action_ret);
+		__report_bad_irq(desc, action_ret);
 	}
 }
 
@@ -272,15 +270,16 @@
 
 #define SPURIOUS_DEFERRED	0x80000000
 
-void note_interrupt(unsigned int irq, struct irq_desc *desc,
-		    irqreturn_t action_ret)
+void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
 {
+	unsigned int irq;
+
 	if (desc->istate & IRQS_POLL_INPROGRESS ||
 	    irq_settings_is_polled(desc))
 		return;
 
 	if (bad_action_ret(action_ret)) {
-		report_bad_irq(irq, desc, action_ret);
+		report_bad_irq(desc, action_ret);
 		return;
 	}
 
@@ -398,6 +397,7 @@
 		desc->last_unhandled = jiffies;
 	}
 
+	irq = irq_desc_get_irq(desc);
 	if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
 		int ok = misrouted_irq(irq);
 		if (action_ret == IRQ_NONE)
@@ -413,7 +413,7 @@
 		/*
 		 * The interrupt is stuck
 		 */
-		__report_bad_irq(irq, desc, action_ret);
+		__report_bad_irq(desc, action_ret);
 		/*
 		 * Now kill the IRQ
 		 */
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 52ebaca..f7dd15d 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -54,7 +54,7 @@
 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
 }
 
-static void jump_label_update(struct static_key *key, int enable);
+static void jump_label_update(struct static_key *key);
 
 void static_key_slow_inc(struct static_key *key)
 {
@@ -63,13 +63,8 @@
 		return;
 
 	jump_label_lock();
-	if (atomic_read(&key->enabled) == 0) {
-		if (!jump_label_get_branch_default(key))
-			jump_label_update(key, JUMP_LABEL_ENABLE);
-		else
-			jump_label_update(key, JUMP_LABEL_DISABLE);
-	}
-	atomic_inc(&key->enabled);
+	if (atomic_inc_return(&key->enabled) == 1)
+		jump_label_update(key);
 	jump_label_unlock();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
@@ -87,10 +82,7 @@
 		atomic_inc(&key->enabled);
 		schedule_delayed_work(work, rate_limit);
 	} else {
-		if (!jump_label_get_branch_default(key))
-			jump_label_update(key, JUMP_LABEL_DISABLE);
-		else
-			jump_label_update(key, JUMP_LABEL_ENABLE);
+		jump_label_update(key);
 	}
 	jump_label_unlock();
 }
@@ -149,7 +141,7 @@
 	return 0;
 }
 
-/* 
+/*
  * Update code which is definitely not currently executing.
  * Architectures which need heavyweight synchronization to modify
  * running code can override this to make the non-live update case
@@ -158,37 +150,54 @@
 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
 					    enum jump_label_type type)
 {
-	arch_jump_label_transform(entry, type);	
+	arch_jump_label_transform(entry, type);
+}
+
+static inline struct jump_entry *static_key_entries(struct static_key *key)
+{
+	return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
+}
+
+static inline bool static_key_type(struct static_key *key)
+{
+	return (unsigned long)key->entries & JUMP_TYPE_MASK;
+}
+
+static inline struct static_key *jump_entry_key(struct jump_entry *entry)
+{
+	return (struct static_key *)((unsigned long)entry->key & ~1UL);
+}
+
+static bool jump_entry_branch(struct jump_entry *entry)
+{
+	return (unsigned long)entry->key & 1UL;
+}
+
+static enum jump_label_type jump_label_type(struct jump_entry *entry)
+{
+	struct static_key *key = jump_entry_key(entry);
+	bool enabled = static_key_enabled(key);
+	bool branch = jump_entry_branch(entry);
+
+	/* See the comment in linux/jump_label.h */
+	return enabled ^ branch;
 }
 
 static void __jump_label_update(struct static_key *key,
 				struct jump_entry *entry,
-				struct jump_entry *stop, int enable)
+				struct jump_entry *stop)
 {
-	for (; (entry < stop) &&
-	      (entry->key == (jump_label_t)(unsigned long)key);
-	      entry++) {
+	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
 		/*
 		 * entry->code set to 0 invalidates module init text sections
 		 * kernel_text_address() verifies we are not in core kernel
 		 * init code, see jump_label_invalidate_module_init().
 		 */
 		if (entry->code && kernel_text_address(entry->code))
-			arch_jump_label_transform(entry, enable);
+			arch_jump_label_transform(entry, jump_label_type(entry));
 	}
 }
 
-static enum jump_label_type jump_label_type(struct static_key *key)
-{
-	bool true_branch = jump_label_get_branch_default(key);
-	bool state = static_key_enabled(key);
-
-	if ((!true_branch && state) || (true_branch && !state))
-		return JUMP_LABEL_ENABLE;
-
-	return JUMP_LABEL_DISABLE;
-}
-
 void __init jump_label_init(void)
 {
 	struct jump_entry *iter_start = __start___jump_table;
@@ -202,8 +211,11 @@
 	for (iter = iter_start; iter < iter_stop; iter++) {
 		struct static_key *iterk;
 
-		iterk = (struct static_key *)(unsigned long)iter->key;
-		arch_jump_label_transform_static(iter, jump_label_type(iterk));
+		/* rewrite NOPs */
+		if (jump_label_type(iter) == JUMP_LABEL_NOP)
+			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
+
+		iterk = jump_entry_key(iter);
 		if (iterk == key)
 			continue;
 
@@ -222,6 +234,16 @@
 
 #ifdef CONFIG_MODULES
 
+static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
+{
+	struct static_key *key = jump_entry_key(entry);
+	bool type = static_key_type(key);
+	bool branch = jump_entry_branch(entry);
+
+	/* See the comment in linux/jump_label.h */
+	return type ^ branch;
+}
+
 struct static_key_mod {
 	struct static_key_mod *next;
 	struct jump_entry *entries;
@@ -243,17 +265,15 @@
 				start, end);
 }
 
-static void __jump_label_mod_update(struct static_key *key, int enable)
+static void __jump_label_mod_update(struct static_key *key)
 {
-	struct static_key_mod *mod = key->next;
+	struct static_key_mod *mod;
 
-	while (mod) {
+	for (mod = key->next; mod; mod = mod->next) {
 		struct module *m = mod->mod;
 
 		__jump_label_update(key, mod->entries,
-				    m->jump_entries + m->num_jump_entries,
-				    enable);
-		mod = mod->next;
+				    m->jump_entries + m->num_jump_entries);
 	}
 }
 
@@ -276,7 +296,9 @@
 		return;
 
 	for (iter = iter_start; iter < iter_stop; iter++) {
-		arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
+		/* Only write NOPs for arch_branch_static(). */
+		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
+			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
 	}
 }
 
@@ -297,7 +319,7 @@
 	for (iter = iter_start; iter < iter_stop; iter++) {
 		struct static_key *iterk;
 
-		iterk = (struct static_key *)(unsigned long)iter->key;
+		iterk = jump_entry_key(iter);
 		if (iterk == key)
 			continue;
 
@@ -318,8 +340,9 @@
 		jlm->next = key->next;
 		key->next = jlm;
 
-		if (jump_label_type(key) == JUMP_LABEL_ENABLE)
-			__jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
+		/* Only update if we've changed from our initial state */
+		if (jump_label_type(iter) != jump_label_init_type(iter))
+			__jump_label_update(key, iter, iter_stop);
 	}
 
 	return 0;
@@ -334,10 +357,10 @@
 	struct static_key_mod *jlm, **prev;
 
 	for (iter = iter_start; iter < iter_stop; iter++) {
-		if (iter->key == (jump_label_t)(unsigned long)key)
+		if (jump_entry_key(iter) == key)
 			continue;
 
-		key = (struct static_key *)(unsigned long)iter->key;
+		key = jump_entry_key(iter);
 
 		if (within_module(iter->key, mod))
 			continue;
@@ -439,14 +462,14 @@
 	return ret;
 }
 
-static void jump_label_update(struct static_key *key, int enable)
+static void jump_label_update(struct static_key *key)
 {
 	struct jump_entry *stop = __stop___jump_table;
-	struct jump_entry *entry = jump_label_get_entries(key);
+	struct jump_entry *entry = static_key_entries(key);
 #ifdef CONFIG_MODULES
 	struct module *mod;
 
-	__jump_label_mod_update(key, enable);
+	__jump_label_mod_update(key);
 
 	preempt_disable();
 	mod = __module_address((unsigned long)key);
@@ -456,7 +479,44 @@
 #endif
 	/* if there are no users, entry can be NULL */
 	if (entry)
-		__jump_label_update(key, entry, stop, enable);
+		__jump_label_update(key, entry, stop);
 }
 
-#endif
+#ifdef CONFIG_STATIC_KEYS_SELFTEST
+static DEFINE_STATIC_KEY_TRUE(sk_true);
+static DEFINE_STATIC_KEY_FALSE(sk_false);
+
+static __init int jump_label_test(void)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		WARN_ON(static_key_enabled(&sk_true.key) != true);
+		WARN_ON(static_key_enabled(&sk_false.key) != false);
+
+		WARN_ON(!static_branch_likely(&sk_true));
+		WARN_ON(!static_branch_unlikely(&sk_true));
+		WARN_ON(static_branch_likely(&sk_false));
+		WARN_ON(static_branch_unlikely(&sk_false));
+
+		static_branch_disable(&sk_true);
+		static_branch_enable(&sk_false);
+
+		WARN_ON(static_key_enabled(&sk_true.key) == true);
+		WARN_ON(static_key_enabled(&sk_false.key) == false);
+
+		WARN_ON(static_branch_likely(&sk_true));
+		WARN_ON(static_branch_unlikely(&sk_true));
+		WARN_ON(!static_branch_likely(&sk_false));
+		WARN_ON(!static_branch_unlikely(&sk_false));
+
+		static_branch_enable(&sk_true);
+		static_branch_disable(&sk_false);
+	}
+
+	return 0;
+}
+late_initcall(jump_label_test);
+#endif /* STATIC_KEYS_SELFTEST */
+
+#endif /* HAVE_JUMP_LABEL */
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index c90e417..d10ab6b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1332,7 +1332,7 @@
 	       addr < (unsigned long)__kprobes_text_end;
 }
 
-static bool within_kprobe_blacklist(unsigned long addr)
+bool within_kprobe_blacklist(unsigned long addr)
 {
 	struct kprobe_blacklist_entry *ent;
 
diff --git a/kernel/kthread.c b/kernel/kthread.c
index fdea0be..490924c 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -327,16 +327,30 @@
 }
 EXPORT_SYMBOL(kthread_create_on_node);
 
-static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
+static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
 {
-	/* Must have done schedule() in kthread() before we set_task_cpu */
+	unsigned long flags;
+
 	if (!wait_task_inactive(p, state)) {
 		WARN_ON(1);
 		return;
 	}
+
 	/* It's safe because the task is inactive. */
-	do_set_cpus_allowed(p, cpumask_of(cpu));
+	raw_spin_lock_irqsave(&p->pi_lock, flags);
+	do_set_cpus_allowed(p, mask);
 	p->flags |= PF_NO_SETAFFINITY;
+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+}
+
+static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
+{
+	__kthread_bind_mask(p, cpumask_of(cpu), state);
+}
+
+void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
+{
+	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
 }
 
 /**
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index c40ebcc..6e53441 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -348,8 +348,10 @@
 {
 	struct klp_ops *ops;
 
-	WARN_ON(func->state != KLP_ENABLED);
-	WARN_ON(!func->old_addr);
+	if (WARN_ON(func->state != KLP_ENABLED))
+		return;
+	if (WARN_ON(!func->old_addr))
+		return;
 
 	ops = klp_find_ops(func->old_addr);
 	if (WARN_ON(!ops))
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index 7dd5c99..3694204 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -20,7 +20,6 @@
 obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
 obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
 obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
-obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index 6c5da483..f17a3e3 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -55,27 +55,29 @@
 {
 	while ((cnts & _QW_WMASK) == _QW_LOCKED) {
 		cpu_relax_lowlatency();
-		cnts = smp_load_acquire((u32 *)&lock->cnts);
+		cnts = atomic_read_acquire(&lock->cnts);
 	}
 }
 
 /**
- * queue_read_lock_slowpath - acquire read lock of a queue rwlock
+ * queued_read_lock_slowpath - acquire read lock of a queue rwlock
  * @lock: Pointer to queue rwlock structure
+ * @cnts: Current qrwlock lock value
  */
-void queue_read_lock_slowpath(struct qrwlock *lock)
+void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
 {
-	u32 cnts;
-
 	/*
 	 * Readers come here when they cannot get the lock without waiting
 	 */
 	if (unlikely(in_interrupt())) {
 		/*
-		 * Readers in interrupt context will spin until the lock is
-		 * available without waiting in the queue.
+		 * Readers in interrupt context will get the lock immediately
+		 * if the writer is just waiting (not holding the lock yet).
+		 * The rspin_until_writer_unlock() function returns immediately
+		 * in this case. Otherwise, they will spin (with ACQUIRE
+		 * semantics) until the lock is available without waiting in
+		 * the queue.
 		 */
-		cnts = smp_load_acquire((u32 *)&lock->cnts);
 		rspin_until_writer_unlock(lock, cnts);
 		return;
 	}
@@ -87,16 +89,11 @@
 	arch_spin_lock(&lock->lock);
 
 	/*
-	 * At the head of the wait queue now, wait until the writer state
-	 * goes to 0 and then try to increment the reader count and get
-	 * the lock. It is possible that an incoming writer may steal the
-	 * lock in the interim, so it is necessary to check the writer byte
-	 * to make sure that the write lock isn't taken.
+	 * The ACQUIRE semantics of the following spinning code ensure
+	 * that accesses can't leak upwards out of our subsequent critical
+	 * section in the case that the lock is currently held for write.
 	 */
-	while (atomic_read(&lock->cnts) & _QW_WMASK)
-		cpu_relax_lowlatency();
-
-	cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
+	cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts) - _QR_BIAS;
 	rspin_until_writer_unlock(lock, cnts);
 
 	/*
@@ -104,13 +101,13 @@
 	 */
 	arch_spin_unlock(&lock->lock);
 }
-EXPORT_SYMBOL(queue_read_lock_slowpath);
+EXPORT_SYMBOL(queued_read_lock_slowpath);
 
 /**
- * queue_write_lock_slowpath - acquire write lock of a queue rwlock
+ * queued_write_lock_slowpath - acquire write lock of a queue rwlock
  * @lock : Pointer to queue rwlock structure
  */
-void queue_write_lock_slowpath(struct qrwlock *lock)
+void queued_write_lock_slowpath(struct qrwlock *lock)
 {
 	u32 cnts;
 
@@ -119,7 +116,7 @@
 
 	/* Try to acquire the lock directly if no reader is present */
 	if (!atomic_read(&lock->cnts) &&
-	    (atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0))
+	    (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
 		goto unlock;
 
 	/*
@@ -130,7 +127,7 @@
 		struct __qrwlock *l = (struct __qrwlock *)lock;
 
 		if (!READ_ONCE(l->wmode) &&
-		   (cmpxchg(&l->wmode, 0, _QW_WAITING) == 0))
+		   (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
 			break;
 
 		cpu_relax_lowlatency();
@@ -140,8 +137,8 @@
 	for (;;) {
 		cnts = atomic_read(&lock->cnts);
 		if ((cnts == _QW_WAITING) &&
-		    (atomic_cmpxchg(&lock->cnts, _QW_WAITING,
-				    _QW_LOCKED) == _QW_WAITING))
+		    (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING,
+					    _QW_LOCKED) == _QW_WAITING))
 			break;
 
 		cpu_relax_lowlatency();
@@ -149,4 +146,4 @@
 unlock:
 	arch_spin_unlock(&lock->lock);
 }
-EXPORT_SYMBOL(queue_write_lock_slowpath);
+EXPORT_SYMBOL(queued_write_lock_slowpath);
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 38c4920..337c881 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -239,8 +239,8 @@
 
 static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
 static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
-static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
-
+static __always_inline void __pv_kick_node(struct qspinlock *lock,
+					   struct mcs_spinlock *node) { }
 static __always_inline void __pv_wait_head(struct qspinlock *lock,
 					   struct mcs_spinlock *node) { }
 
@@ -440,7 +440,7 @@
 		cpu_relax();
 
 	arch_mcs_spin_unlock_contended(&next->locked);
-	pv_kick_node(next);
+	pv_kick_node(lock, next);
 
 release:
 	/*
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index df19ae4..c8e6e9a 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -22,9 +22,14 @@
 
 #define _Q_SLOW_VAL	(3U << _Q_LOCKED_OFFSET)
 
+/*
+ * Queue node uses: vcpu_running & vcpu_halted.
+ * Queue head uses: vcpu_running & vcpu_hashed.
+ */
 enum vcpu_state {
 	vcpu_running = 0,
-	vcpu_halted,
+	vcpu_halted,		/* Used only in pv_wait_node */
+	vcpu_hashed,		/* = pv_hash'ed + vcpu_halted */
 };
 
 struct pv_node {
@@ -153,7 +158,8 @@
 
 /*
  * Wait for node->locked to become true, halt the vcpu after a short spin.
- * pv_kick_node() is used to wake the vcpu again.
+ * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
+ * behalf.
  */
 static void pv_wait_node(struct mcs_spinlock *node)
 {
@@ -172,9 +178,9 @@
 		 *
 		 * [S] pn->state = vcpu_halted	  [S] next->locked = 1
 		 *     MB			      MB
-		 * [L] pn->locked		[RmW] pn->state = vcpu_running
+		 * [L] pn->locked		[RmW] pn->state = vcpu_hashed
 		 *
-		 * Matches the xchg() from pv_kick_node().
+		 * Matches the cmpxchg() from pv_kick_node().
 		 */
 		smp_store_mb(pn->state, vcpu_halted);
 
@@ -182,9 +188,10 @@
 			pv_wait(&pn->state, vcpu_halted);
 
 		/*
-		 * Reset the vCPU state to avoid unncessary CPU kicking
+		 * If pv_kick_node() changed us to vcpu_hashed, retain that value
+		 * so that pv_wait_head() knows to not also try to hash this lock.
 		 */
-		WRITE_ONCE(pn->state, vcpu_running);
+		cmpxchg(&pn->state, vcpu_halted, vcpu_running);
 
 		/*
 		 * If the locked flag is still not set after wakeup, it is a
@@ -194,6 +201,7 @@
 		 * MCS lock will be released soon.
 		 */
 	}
+
 	/*
 	 * By now our node->locked should be 1 and our caller will not actually
 	 * spin-wait for it. We do however rely on our caller to do a
@@ -202,24 +210,35 @@
 }
 
 /*
- * Called after setting next->locked = 1, used to wake those stuck in
- * pv_wait_node().
+ * Called after setting next->locked = 1 when we're the lock owner.
+ *
+ * Instead of waking the waiters stuck in pv_wait_node() advance their state such
+ * that they're waiting in pv_wait_head(), this avoids a wake/sleep cycle.
  */
-static void pv_kick_node(struct mcs_spinlock *node)
+static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
 {
 	struct pv_node *pn = (struct pv_node *)node;
+	struct __qspinlock *l = (void *)lock;
 
 	/*
-	 * Note that because node->locked is already set, this actual
-	 * mcs_spinlock entry could be re-used already.
+	 * If the vCPU is indeed halted, advance its state to match that of
+	 * pv_wait_node(). If OTOH this fails, the vCPU was running and will
+	 * observe its next->locked value and advance itself.
 	 *
-	 * This should be fine however, kicking people for no reason is
-	 * harmless.
-	 *
-	 * See the comment in pv_wait_node().
+	 * Matches with smp_store_mb() and cmpxchg() in pv_wait_node()
 	 */
-	if (xchg(&pn->state, vcpu_running) == vcpu_halted)
-		pv_kick(pn->cpu);
+	if (cmpxchg(&pn->state, vcpu_halted, vcpu_hashed) != vcpu_halted)
+		return;
+
+	/*
+	 * Put the lock into the hash table and set the _Q_SLOW_VAL.
+	 *
+	 * As this is the same vCPU that will check the _Q_SLOW_VAL value and
+	 * the hash table later on at unlock time, no atomic instruction is
+	 * needed.
+	 */
+	WRITE_ONCE(l->locked, _Q_SLOW_VAL);
+	(void)pv_hash(lock, pn);
 }
 
 /*
@@ -233,6 +252,13 @@
 	struct qspinlock **lp = NULL;
 	int loop;
 
+	/*
+	 * If pv_kick_node() already advanced our state, we don't need to
+	 * insert ourselves into the hash table anymore.
+	 */
+	if (READ_ONCE(pn->state) == vcpu_hashed)
+		lp = (struct qspinlock **)1;
+
 	for (;;) {
 		for (loop = SPIN_THRESHOLD; loop; loop--) {
 			if (!READ_ONCE(l->locked))
@@ -240,17 +266,22 @@
 			cpu_relax();
 		}
 
-		WRITE_ONCE(pn->state, vcpu_halted);
 		if (!lp) { /* ONCE */
+			WRITE_ONCE(pn->state, vcpu_hashed);
 			lp = pv_hash(lock, pn);
+
 			/*
-			 * lp must be set before setting _Q_SLOW_VAL
+			 * We must hash before setting _Q_SLOW_VAL, such that
+			 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
+			 * we'll be sure to be able to observe our hash entry.
 			 *
-			 * [S] lp = lock                [RmW] l = l->locked = 0
-			 *     MB                             MB
-			 * [S] l->locked = _Q_SLOW_VAL  [L]   lp
+			 *   [S] pn->state
+			 *   [S] <hash>                 [Rmw] l->locked == _Q_SLOW_VAL
+			 *       MB                           RMB
+			 * [RmW] l->locked = _Q_SLOW_VAL  [L] <unhash>
+			 *                                [L] pn->state
 			 *
-			 * Matches the cmpxchg() in __pv_queued_spin_unlock().
+			 * Matches the smp_rmb() in __pv_queued_spin_unlock().
 			 */
 			if (!cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL)) {
 				/*
@@ -287,24 +318,34 @@
 {
 	struct __qspinlock *l = (void *)lock;
 	struct pv_node *node;
-	u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
+	u8 locked;
 
 	/*
 	 * We must not unlock if SLOW, because in that case we must first
 	 * unhash. Otherwise it would be possible to have multiple @lock
 	 * entries, which would be BAD.
 	 */
-	if (likely(lockval == _Q_LOCKED_VAL))
+	locked = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
+	if (likely(locked == _Q_LOCKED_VAL))
 		return;
 
-	if (unlikely(lockval != _Q_SLOW_VAL)) {
-		if (debug_locks_silent)
-			return;
-		WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
+	if (unlikely(locked != _Q_SLOW_VAL)) {
+		WARN(!debug_locks_silent,
+		     "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n",
+		     (unsigned long)lock, atomic_read(&lock->val));
 		return;
 	}
 
 	/*
+	 * A failed cmpxchg doesn't provide any memory-ordering guarantees,
+	 * so we need a barrier to order the read of the node data in
+	 * pv_unhash *after* we've read the lock being _Q_SLOW_VAL.
+	 *
+	 * Matches the cmpxchg() in pv_wait_head() setting _Q_SLOW_VAL.
+	 */
+	smp_rmb();
+
+	/*
 	 * Since the above failed to release, this must be the SLOW path.
 	 * Therefore start by looking up the blocked node and unhashing it.
 	 */
@@ -319,8 +360,11 @@
 	/*
 	 * At this point the memory pointed at by lock can be freed/reused,
 	 * however we can still use the pv_node to kick the CPU.
+	 * The other vCPU may not really be halted, but kicking an active
+	 * vCPU is harmless other than the additional latency in completing
+	 * the unlock.
 	 */
-	if (READ_ONCE(node->state) == vcpu_halted)
+	if (READ_ONCE(node->state) == vcpu_hashed)
 		pv_kick(node->cpu);
 }
 /*
diff --git a/kernel/locking/rtmutex-tester.c b/kernel/locking/rtmutex-tester.c
deleted file mode 100644
index 1d96dd0..0000000
--- a/kernel/locking/rtmutex-tester.c
+++ /dev/null
@@ -1,420 +0,0 @@
-/*
- * RT-Mutex-tester: scriptable tester for rt mutexes
- *
- * started by Thomas Gleixner:
- *
- *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
- *
- */
-#include <linux/device.h>
-#include <linux/kthread.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/sched/rt.h>
-#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <linux/freezer.h>
-#include <linux/stat.h>
-
-#include "rtmutex.h"
-
-#define MAX_RT_TEST_THREADS	8
-#define MAX_RT_TEST_MUTEXES	8
-
-static spinlock_t rttest_lock;
-static atomic_t rttest_event;
-
-struct test_thread_data {
-	int			opcode;
-	int			opdata;
-	int			mutexes[MAX_RT_TEST_MUTEXES];
-	int			event;
-	struct device		dev;
-};
-
-static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
-static struct task_struct *threads[MAX_RT_TEST_THREADS];
-static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
-
-enum test_opcodes {
-	RTTEST_NOP = 0,
-	RTTEST_SCHEDOT,		/* 1 Sched other, data = nice */
-	RTTEST_SCHEDRT,		/* 2 Sched fifo, data = prio */
-	RTTEST_LOCK,		/* 3 Lock uninterruptible, data = lockindex */
-	RTTEST_LOCKNOWAIT,	/* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
-	RTTEST_LOCKINT,		/* 5 Lock interruptible, data = lockindex */
-	RTTEST_LOCKINTNOWAIT,	/* 6 Lock interruptible no wait in wakeup, data = lockindex */
-	RTTEST_LOCKCONT,	/* 7 Continue locking after the wakeup delay */
-	RTTEST_UNLOCK,		/* 8 Unlock, data = lockindex */
-	/* 9, 10 - reserved for BKL commemoration */
-	RTTEST_SIGNAL = 11,	/* 11 Signal other test thread, data = thread id */
-	RTTEST_RESETEVENT = 98,	/* 98 Reset event counter */
-	RTTEST_RESET = 99,	/* 99 Reset all pending operations */
-};
-
-static int handle_op(struct test_thread_data *td, int lockwakeup)
-{
-	int i, id, ret = -EINVAL;
-
-	switch(td->opcode) {
-
-	case RTTEST_NOP:
-		return 0;
-
-	case RTTEST_LOCKCONT:
-		td->mutexes[td->opdata] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
-		return 0;
-
-	case RTTEST_RESET:
-		for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
-			if (td->mutexes[i] == 4) {
-				rt_mutex_unlock(&mutexes[i]);
-				td->mutexes[i] = 0;
-			}
-		}
-		return 0;
-
-	case RTTEST_RESETEVENT:
-		atomic_set(&rttest_event, 0);
-		return 0;
-
-	default:
-		if (lockwakeup)
-			return ret;
-	}
-
-	switch(td->opcode) {
-
-	case RTTEST_LOCK:
-	case RTTEST_LOCKNOWAIT:
-		id = td->opdata;
-		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
-			return ret;
-
-		td->mutexes[id] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
-		rt_mutex_lock(&mutexes[id]);
-		td->event = atomic_add_return(1, &rttest_event);
-		td->mutexes[id] = 4;
-		return 0;
-
-	case RTTEST_LOCKINT:
-	case RTTEST_LOCKINTNOWAIT:
-		id = td->opdata;
-		if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
-			return ret;
-
-		td->mutexes[id] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
-		ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
-		td->event = atomic_add_return(1, &rttest_event);
-		td->mutexes[id] = ret ? 0 : 4;
-		return ret ? -EINTR : 0;
-
-	case RTTEST_UNLOCK:
-		id = td->opdata;
-		if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
-			return ret;
-
-		td->event = atomic_add_return(1, &rttest_event);
-		rt_mutex_unlock(&mutexes[id]);
-		td->event = atomic_add_return(1, &rttest_event);
-		td->mutexes[id] = 0;
-		return 0;
-
-	default:
-		break;
-	}
-	return ret;
-}
-
-/*
- * Schedule replacement for rtsem_down(). Only called for threads with
- * PF_MUTEX_TESTER set.
- *
- * This allows us to have finegrained control over the event flow.
- *
- */
-void schedule_rt_mutex_test(struct rt_mutex *mutex)
-{
-	int tid, op, dat;
-	struct test_thread_data *td;
-
-	/* We have to lookup the task */
-	for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
-		if (threads[tid] == current)
-			break;
-	}
-
-	BUG_ON(tid == MAX_RT_TEST_THREADS);
-
-	td = &thread_data[tid];
-
-	op = td->opcode;
-	dat = td->opdata;
-
-	switch (op) {
-	case RTTEST_LOCK:
-	case RTTEST_LOCKINT:
-	case RTTEST_LOCKNOWAIT:
-	case RTTEST_LOCKINTNOWAIT:
-		if (mutex != &mutexes[dat])
-			break;
-
-		if (td->mutexes[dat] != 1)
-			break;
-
-		td->mutexes[dat] = 2;
-		td->event = atomic_add_return(1, &rttest_event);
-		break;
-
-	default:
-		break;
-	}
-
-	schedule();
-
-
-	switch (op) {
-	case RTTEST_LOCK:
-	case RTTEST_LOCKINT:
-		if (mutex != &mutexes[dat])
-			return;
-
-		if (td->mutexes[dat] != 2)
-			return;
-
-		td->mutexes[dat] = 3;
-		td->event = atomic_add_return(1, &rttest_event);
-		break;
-
-	case RTTEST_LOCKNOWAIT:
-	case RTTEST_LOCKINTNOWAIT:
-		if (mutex != &mutexes[dat])
-			return;
-
-		if (td->mutexes[dat] != 2)
-			return;
-
-		td->mutexes[dat] = 1;
-		td->event = atomic_add_return(1, &rttest_event);
-		return;
-
-	default:
-		return;
-	}
-
-	td->opcode = 0;
-
-	for (;;) {
-		set_current_state(TASK_INTERRUPTIBLE);
-
-		if (td->opcode > 0) {
-			int ret;
-
-			set_current_state(TASK_RUNNING);
-			ret = handle_op(td, 1);
-			set_current_state(TASK_INTERRUPTIBLE);
-			if (td->opcode == RTTEST_LOCKCONT)
-				break;
-			td->opcode = ret;
-		}
-
-		/* Wait for the next command to be executed */
-		schedule();
-	}
-
-	/* Restore previous command and data */
-	td->opcode = op;
-	td->opdata = dat;
-}
-
-static int test_func(void *data)
-{
-	struct test_thread_data *td = data;
-	int ret;
-
-	current->flags |= PF_MUTEX_TESTER;
-	set_freezable();
-	allow_signal(SIGHUP);
-
-	for(;;) {
-
-		set_current_state(TASK_INTERRUPTIBLE);
-
-		if (td->opcode > 0) {
-			set_current_state(TASK_RUNNING);
-			ret = handle_op(td, 0);
-			set_current_state(TASK_INTERRUPTIBLE);
-			td->opcode = ret;
-		}
-
-		/* Wait for the next command to be executed */
-		schedule();
-		try_to_freeze();
-
-		if (signal_pending(current))
-			flush_signals(current);
-
-		if(kthread_should_stop())
-			break;
-	}
-	return 0;
-}
-
-/**
- * sysfs_test_command - interface for test commands
- * @dev:	thread reference
- * @buf:	command for actual step
- * @count:	length of buffer
- *
- * command syntax:
- *
- * opcode:data
- */
-static ssize_t sysfs_test_command(struct device *dev, struct device_attribute *attr,
-				  const char *buf, size_t count)
-{
-	struct sched_param schedpar;
-	struct test_thread_data *td;
-	char cmdbuf[32];
-	int op, dat, tid, ret;
-
-	td = container_of(dev, struct test_thread_data, dev);
-	tid = td->dev.id;
-
-	/* strings from sysfs write are not 0 terminated! */
-	if (count >= sizeof(cmdbuf))
-		return -EINVAL;
-
-	/* strip of \n: */
-	if (buf[count-1] == '\n')
-		count--;
-	if (count < 1)
-		return -EINVAL;
-
-	memcpy(cmdbuf, buf, count);
-	cmdbuf[count] = 0;
-
-	if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2)
-		return -EINVAL;
-
-	switch (op) {
-	case RTTEST_SCHEDOT:
-		schedpar.sched_priority = 0;
-		ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar);
-		if (ret)
-			return ret;
-		set_user_nice(current, 0);
-		break;
-
-	case RTTEST_SCHEDRT:
-		schedpar.sched_priority = dat;
-		ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar);
-		if (ret)
-			return ret;
-		break;
-
-	case RTTEST_SIGNAL:
-		send_sig(SIGHUP, threads[tid], 0);
-		break;
-
-	default:
-		if (td->opcode > 0)
-			return -EBUSY;
-		td->opdata = dat;
-		td->opcode = op;
-		wake_up_process(threads[tid]);
-	}
-
-	return count;
-}
-
-/**
- * sysfs_test_status - sysfs interface for rt tester
- * @dev:	thread to query
- * @buf:	char buffer to be filled with thread status info
- */
-static ssize_t sysfs_test_status(struct device *dev, struct device_attribute *attr,
-				 char *buf)
-{
-	struct test_thread_data *td;
-	struct task_struct *tsk;
-	char *curr = buf;
-	int i;
-
-	td = container_of(dev, struct test_thread_data, dev);
-	tsk = threads[td->dev.id];
-
-	spin_lock(&rttest_lock);
-
-	curr += sprintf(curr,
-		"O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, M:",
-		td->opcode, td->event, tsk->state,
-			(MAX_RT_PRIO - 1) - tsk->prio,
-			(MAX_RT_PRIO - 1) - tsk->normal_prio,
-		tsk->pi_blocked_on);
-
-	for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
-		curr += sprintf(curr, "%d", td->mutexes[i]);
-
-	spin_unlock(&rttest_lock);
-
-	curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
-			mutexes[td->dev.id].owner);
-
-	return curr - buf;
-}
-
-static DEVICE_ATTR(status, S_IRUSR, sysfs_test_status, NULL);
-static DEVICE_ATTR(command, S_IWUSR, NULL, sysfs_test_command);
-
-static struct bus_type rttest_subsys = {
-	.name = "rttest",
-	.dev_name = "rttest",
-};
-
-static int init_test_thread(int id)
-{
-	thread_data[id].dev.bus = &rttest_subsys;
-	thread_data[id].dev.id = id;
-
-	threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
-	if (IS_ERR(threads[id]))
-		return PTR_ERR(threads[id]);
-
-	return device_register(&thread_data[id].dev);
-}
-
-static int init_rttest(void)
-{
-	int ret, i;
-
-	spin_lock_init(&rttest_lock);
-
-	for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
-		rt_mutex_init(&mutexes[i]);
-
-	ret = subsys_system_register(&rttest_subsys, NULL);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < MAX_RT_TEST_THREADS; i++) {
-		ret = init_test_thread(i);
-		if (ret)
-			break;
-		ret = device_create_file(&thread_data[i].dev, &dev_attr_status);
-		if (ret)
-			break;
-		ret = device_create_file(&thread_data[i].dev, &dev_attr_command);
-		if (ret)
-			break;
-	}
-
-	printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" );
-
-	return ret;
-}
-
-device_initcall(init_rttest);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 5674b07..7781d80 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1120,7 +1120,7 @@
 
 		debug_rt_mutex_print_deadlock(waiter);
 
-		schedule_rt_mutex(lock);
+		schedule();
 
 		raw_spin_lock(&lock->wait_lock);
 		set_current_state(state);
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 7844f8f..4f5f83c 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -15,28 +15,6 @@
 #include <linux/rtmutex.h>
 
 /*
- * The rtmutex in kernel tester is independent of rtmutex debugging. We
- * call schedule_rt_mutex_test() instead of schedule() for the tasks which
- * belong to the tester. That way we can delay the wakeup path of those
- * threads to provoke lock stealing and testing of  complex boosting scenarios.
- */
-#ifdef CONFIG_RT_MUTEX_TESTER
-
-extern void schedule_rt_mutex_test(struct rt_mutex *lock);
-
-#define schedule_rt_mutex(_lock)				\
-  do {								\
-	if (!(current->flags & PF_MUTEX_TESTER))		\
-		schedule();					\
-	else							\
-		schedule_rt_mutex_test(_lock);			\
-  } while (0)
-
-#else
-# define schedule_rt_mutex(_lock)			schedule()
-#endif
-
-/*
  * This is the control structure for tasks blocked on a rt_mutex,
  * which is allocated on the kernel stack on of the blocked task.
  *
diff --git a/kernel/notifier.c b/kernel/notifier.c
index ae9fc7c..fd2c9ac 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -544,6 +544,8 @@
 		.signr	= sig,
 
 	};
+	RCU_LOCKDEP_WARN(!rcu_is_watching(),
+			   "notify_die called but RCU thinks we're quiescent");
 	return atomic_notifier_call_chain(&die_chain, val, &args);
 }
 NOKPROBE_SYMBOL(notify_die);
diff --git a/kernel/pid.c b/kernel/pid.c
index 4fd07d5..ca36879 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -451,9 +451,8 @@
  */
 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
 {
-	rcu_lockdep_assert(rcu_read_lock_held(),
-			   "find_task_by_pid_ns() needs rcu_read_lock()"
-			   " protection");
+	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+			 "find_task_by_pid_ns() needs rcu_read_lock() protection");
 	return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
 }
 
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 9e30231..02e8dfa 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,6 +18,16 @@
 
 	  Turning OFF this setting is NOT recommended! If in doubt, say Y.
 
+config SUSPEND_SKIP_SYNC
+	bool "Skip kernel's sys_sync() on suspend to RAM/standby"
+	depends on SUSPEND
+	depends on EXPERT
+	help
+	  Skip the kernel sys_sync() before freezing user processes.
+	  Some systems prefer not to pay this cost on every invocation
+	  of suspend, or they are content with invoking sync() from
+	  user-space before invoking suspend.  Say Y if that's your case.
+
 config HIBERNATE_CALLBACKS
 	bool
 
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 53266b7..7e4cda4 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -484,11 +484,13 @@
 	if (state == PM_SUSPEND_FREEZE)
 		freeze_begin();
 
+#ifndef CONFIG_SUSPEND_SKIP_SYNC
 	trace_suspend_resume(TPS("sync_filesystems"), 0, true);
 	printk(KERN_INFO "PM: Syncing filesystems ... ");
 	sys_sync();
 	printk("done.\n");
 	trace_suspend_resume(TPS("sync_filesystems"), 0, false);
+#endif
 
 	pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]);
 	error = suspend_prepare(state);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 2f30ca9..b2066fb 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -227,27 +227,23 @@
 	hb->error = 0;
 }
 
-static void hib_end_io(struct bio *bio, int error)
+static void hib_end_io(struct bio *bio)
 {
 	struct hib_bio_batch *hb = bio->bi_private;
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct page *page = bio->bi_io_vec[0].bv_page;
 
-	if (!uptodate || error) {
+	if (bio->bi_error) {
 		printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
 				imajor(bio->bi_bdev->bd_inode),
 				iminor(bio->bi_bdev->bd_inode),
 				(unsigned long long)bio->bi_iter.bi_sector);
-
-		if (!error)
-			error = -EIO;
 	}
 
 	if (bio_data_dir(bio) == WRITE)
 		put_page(page);
 
-	if (error && !hb->error)
-		hb->error = error;
+	if (bio->bi_error && !hb->error)
+		hb->error = bio->bi_error;
 	if (atomic_dec_and_test(&hb->count))
 		wake_up(&hb->wait);
 
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
index 019069c..1896386 100644
--- a/kernel/power/wakelock.c
+++ b/kernel/power/wakelock.c
@@ -17,6 +17,7 @@
 #include <linux/list.h>
 #include <linux/rbtree.h>
 #include <linux/slab.h>
+#include <linux/workqueue.h>
 
 #include "power.h"
 
@@ -83,7 +84,9 @@
 #define WL_GC_COUNT_MAX	100
 #define WL_GC_TIME_SEC	300
 
+static void __wakelocks_gc(struct work_struct *work);
 static LIST_HEAD(wakelocks_lru_list);
+static DECLARE_WORK(wakelock_work, __wakelocks_gc);
 static unsigned int wakelocks_gc_count;
 
 static inline void wakelocks_lru_add(struct wakelock *wl)
@@ -96,13 +99,12 @@
 	list_move(&wl->lru, &wakelocks_lru_list);
 }
 
-static void wakelocks_gc(void)
+static void __wakelocks_gc(struct work_struct *work)
 {
 	struct wakelock *wl, *aux;
 	ktime_t now;
 
-	if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
-		return;
+	mutex_lock(&wakelocks_lock);
 
 	now = ktime_get();
 	list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
@@ -127,6 +129,16 @@
 		}
 	}
 	wakelocks_gc_count = 0;
+
+	mutex_unlock(&wakelocks_lock);
+}
+
+static void wakelocks_gc(void)
+{
+	if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
+		return;
+
+	schedule_work(&wakelock_work);
 }
 #else /* !CONFIG_PM_WAKELOCKS_GC */
 static inline void wakelocks_lru_add(struct wakelock *wl) {}
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 59e3268..7719295 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -635,6 +635,8 @@
 	.deferred_free	= rcu_sched_torture_deferred_free,
 	.sync		= synchronize_sched,
 	.exp_sync	= synchronize_sched_expedited,
+	.get_state	= get_state_synchronize_sched,
+	.cond_sync	= cond_synchronize_sched,
 	.call		= call_rcu_sched,
 	.cb_barrier	= rcu_barrier_sched,
 	.fqs		= rcu_sched_force_quiescent_state,
@@ -684,10 +686,20 @@
 
 #define RCUTORTURE_TASKS_OPS &tasks_ops,
 
+static bool __maybe_unused torturing_tasks(void)
+{
+	return cur_ops == &tasks_ops;
+}
+
 #else /* #ifdef CONFIG_TASKS_RCU */
 
 #define RCUTORTURE_TASKS_OPS
 
+static bool torturing_tasks(void)
+{
+	return false;
+}
+
 #endif /* #else #ifdef CONFIG_TASKS_RCU */
 
 /*
@@ -823,9 +835,7 @@
 	}
 	if (err) {
 		VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM");
-		while (!torture_must_stop())
-			schedule_timeout_interruptible(HZ);
-		return 0;
+		goto wait_for_stop;
 	}
 	VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
 	do {
@@ -844,6 +854,7 @@
 		stutter_wait("rcu_torture_cbflood");
 	} while (!torture_must_stop());
 	vfree(rhp);
+wait_for_stop:
 	torture_kthread_stopping("rcu_torture_cbflood");
 	return 0;
 }
@@ -1088,7 +1099,8 @@
 	p = rcu_dereference_check(rcu_torture_current,
 				  rcu_read_lock_bh_held() ||
 				  rcu_read_lock_sched_held() ||
-				  srcu_read_lock_held(srcu_ctlp));
+				  srcu_read_lock_held(srcu_ctlp) ||
+				  torturing_tasks());
 	if (p == NULL) {
 		/* Leave because rcu_torture_writer is not yet underway */
 		cur_ops->readunlock(idx);
@@ -1162,7 +1174,8 @@
 		p = rcu_dereference_check(rcu_torture_current,
 					  rcu_read_lock_bh_held() ||
 					  rcu_read_lock_sched_held() ||
-					  srcu_read_lock_held(srcu_ctlp));
+					  srcu_read_lock_held(srcu_ctlp) ||
+					  torturing_tasks());
 		if (p == NULL) {
 			/* Wait for rcu_torture_writer to get underway */
 			cur_ops->readunlock(idx);
@@ -1507,7 +1520,7 @@
 	int i;
 	int ret;
 
-	if (n_barrier_cbs == 0)
+	if (n_barrier_cbs <= 0)
 		return 0;
 	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
 		pr_alert("%s" TORTURE_FLAG
@@ -1786,12 +1799,15 @@
 					  writer_task);
 	if (firsterr)
 		goto unwind;
-	fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
-				   GFP_KERNEL);
-	if (fakewriter_tasks == NULL) {
-		VERBOSE_TOROUT_ERRSTRING("out of memory");
-		firsterr = -ENOMEM;
-		goto unwind;
+	if (nfakewriters > 0) {
+		fakewriter_tasks = kzalloc(nfakewriters *
+					   sizeof(fakewriter_tasks[0]),
+					   GFP_KERNEL);
+		if (fakewriter_tasks == NULL) {
+			VERBOSE_TOROUT_ERRSTRING("out of memory");
+			firsterr = -ENOMEM;
+			goto unwind;
+		}
 	}
 	for (i = 0; i < nfakewriters; i++) {
 		firsterr = torture_create_kthread(rcu_torture_fakewriter,
@@ -1818,7 +1834,7 @@
 		if (firsterr)
 			goto unwind;
 	}
-	if (test_no_idle_hz) {
+	if (test_no_idle_hz && shuffle_interval > 0) {
 		firsterr = torture_shuffle_init(shuffle_interval * HZ);
 		if (firsterr)
 			goto unwind;
diff --git a/kernel/rcu/srcu.c b/kernel/rcu/srcu.c
index fb33d35..d3fcb2e 100644
--- a/kernel/rcu/srcu.c
+++ b/kernel/rcu/srcu.c
@@ -252,14 +252,15 @@
 }
 
 /**
- * srcu_readers_active - returns approximate number of readers.
+ * srcu_readers_active - returns true if there are readers. and false
+ *                       otherwise
  * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
  *
  * Note that this is not an atomic primitive, and can therefore suffer
  * severe errors when invoked on an active srcu_struct.  That said, it
  * can be useful as an error check at cleanup time.
  */
-static int srcu_readers_active(struct srcu_struct *sp)
+static bool srcu_readers_active(struct srcu_struct *sp)
 {
 	int cpu;
 	unsigned long sum = 0;
@@ -414,11 +415,11 @@
 	struct rcu_head *head = &rcu.head;
 	bool done = false;
 
-	rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
-			   !lock_is_held(&rcu_bh_lock_map) &&
-			   !lock_is_held(&rcu_lock_map) &&
-			   !lock_is_held(&rcu_sched_lock_map),
-			   "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
+	RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
+			 lock_is_held(&rcu_bh_lock_map) ||
+			 lock_is_held(&rcu_lock_map) ||
+			 lock_is_held(&rcu_sched_lock_map),
+			 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
 
 	might_sleep();
 	init_completion(&rcu.completion);
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index c291bd6..d047105 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -191,10 +191,10 @@
  */
 void synchronize_sched(void)
 {
-	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
-			   !lock_is_held(&rcu_lock_map) &&
-			   !lock_is_held(&rcu_sched_lock_map),
-			   "Illegal synchronize_sched() in RCU read-side critical section");
+	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+			 lock_is_held(&rcu_lock_map) ||
+			 lock_is_held(&rcu_sched_lock_map),
+			 "Illegal synchronize_sched() in RCU read-side critical section");
 	cond_resched();
 }
 EXPORT_SYMBOL_GPL(synchronize_sched);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 65137bc..9f75f25 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -70,6 +70,8 @@
 
 static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
 static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
+static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS];
+static struct lock_class_key rcu_exp_sched_class[RCU_NUM_LVLS];
 
 /*
  * In order to export the rcu_state name to the tracing tools, it
@@ -124,13 +126,8 @@
 static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
 module_param(rcu_fanout_leaf, int, 0444);
 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
-static int num_rcu_lvl[] = {  /* Number of rcu_nodes at specified level. */
-	NUM_RCU_LVL_0,
-	NUM_RCU_LVL_1,
-	NUM_RCU_LVL_2,
-	NUM_RCU_LVL_3,
-	NUM_RCU_LVL_4,
-};
+/* Number of rcu_nodes at specified level. */
+static int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
 
 /*
@@ -649,12 +646,12 @@
 	 * It is illegal to enter an extended quiescent state while
 	 * in an RCU read-side critical section.
 	 */
-	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
-			   "Illegal idle entry in RCU read-side critical section.");
-	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
-			   "Illegal idle entry in RCU-bh read-side critical section.");
-	rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
-			   "Illegal idle entry in RCU-sched read-side critical section.");
+	RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
+			 "Illegal idle entry in RCU read-side critical section.");
+	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),
+			 "Illegal idle entry in RCU-bh read-side critical section.");
+	RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),
+			 "Illegal idle entry in RCU-sched read-side critical section.");
 }
 
 /*
@@ -701,7 +698,7 @@
 }
 EXPORT_SYMBOL_GPL(rcu_idle_enter);
 
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_NO_HZ_FULL
 /**
  * rcu_user_enter - inform RCU that we are resuming userspace.
  *
@@ -714,7 +711,7 @@
 {
 	rcu_eqs_enter(1);
 }
-#endif /* CONFIG_RCU_USER_QS */
+#endif /* CONFIG_NO_HZ_FULL */
 
 /**
  * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
@@ -828,7 +825,7 @@
 }
 EXPORT_SYMBOL_GPL(rcu_idle_exit);
 
-#ifdef CONFIG_RCU_USER_QS
+#ifdef CONFIG_NO_HZ_FULL
 /**
  * rcu_user_exit - inform RCU that we are exiting userspace.
  *
@@ -839,7 +836,7 @@
 {
 	rcu_eqs_exit(1);
 }
-#endif /* CONFIG_RCU_USER_QS */
+#endif /* CONFIG_NO_HZ_FULL */
 
 /**
  * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
@@ -978,9 +975,9 @@
 {
 	bool ret;
 
-	preempt_disable();
+	preempt_disable_notrace();
 	ret = __rcu_is_watching();
-	preempt_enable();
+	preempt_enable_notrace();
 	return ret;
 }
 EXPORT_SYMBOL_GPL(rcu_is_watching);
@@ -1178,9 +1175,11 @@
 	j = jiffies;
 	gpa = READ_ONCE(rsp->gp_activity);
 	if (j - gpa > 2 * HZ)
-		pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x\n",
+		pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x s%d ->state=%#lx\n",
 		       rsp->name, j - gpa,
-		       rsp->gpnum, rsp->completed, rsp->gp_flags);
+		       rsp->gpnum, rsp->completed,
+		       rsp->gp_flags, rsp->gp_state,
+		       rsp->gp_kthread ? rsp->gp_kthread->state : 0);
 }
 
 /*
@@ -1906,6 +1905,26 @@
 }
 
 /*
+ * Helper function for wait_event_interruptible_timeout() wakeup
+ * at force-quiescent-state time.
+ */
+static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
+{
+	struct rcu_node *rnp = rcu_get_root(rsp);
+
+	/* Someone like call_rcu() requested a force-quiescent-state scan. */
+	*gfp = READ_ONCE(rsp->gp_flags);
+	if (*gfp & RCU_GP_FLAG_FQS)
+		return true;
+
+	/* The current grace period has completed. */
+	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
+		return true;
+
+	return false;
+}
+
+/*
  * Do one round of quiescent-state forcing.
  */
 static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
@@ -2041,6 +2060,7 @@
 			wait_event_interruptible(rsp->gp_wq,
 						 READ_ONCE(rsp->gp_flags) &
 						 RCU_GP_FLAG_INIT);
+			rsp->gp_state = RCU_GP_DONE_GPS;
 			/* Locking provides needed memory barrier. */
 			if (rcu_gp_init(rsp))
 				break;
@@ -2068,11 +2088,8 @@
 					       TPS("fqswait"));
 			rsp->gp_state = RCU_GP_WAIT_FQS;
 			ret = wait_event_interruptible_timeout(rsp->gp_wq,
-					((gf = READ_ONCE(rsp->gp_flags)) &
-					 RCU_GP_FLAG_FQS) ||
-					(!READ_ONCE(rnp->qsmask) &&
-					 !rcu_preempt_blocked_readers_cgp(rnp)),
-					j);
+					rcu_gp_fqs_check_wake(rsp, &gf), j);
+			rsp->gp_state = RCU_GP_DOING_FQS;
 			/* Locking provides needed memory barriers. */
 			/* If grace period done, leave loop. */
 			if (!READ_ONCE(rnp->qsmask) &&
@@ -2110,7 +2127,9 @@
 		}
 
 		/* Handle grace-period end. */
+		rsp->gp_state = RCU_GP_CLEANUP;
 		rcu_gp_cleanup(rsp);
+		rsp->gp_state = RCU_GP_CLEANED;
 	}
 }
 
@@ -3161,10 +3180,10 @@
  */
 void synchronize_sched(void)
 {
-	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
-			   !lock_is_held(&rcu_lock_map) &&
-			   !lock_is_held(&rcu_sched_lock_map),
-			   "Illegal synchronize_sched() in RCU-sched read-side critical section");
+	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+			 lock_is_held(&rcu_lock_map) ||
+			 lock_is_held(&rcu_sched_lock_map),
+			 "Illegal synchronize_sched() in RCU-sched read-side critical section");
 	if (rcu_blocking_is_gp())
 		return;
 	if (rcu_gp_is_expedited())
@@ -3188,10 +3207,10 @@
  */
 void synchronize_rcu_bh(void)
 {
-	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
-			   !lock_is_held(&rcu_lock_map) &&
-			   !lock_is_held(&rcu_sched_lock_map),
-			   "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
+	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+			 lock_is_held(&rcu_lock_map) ||
+			 lock_is_held(&rcu_sched_lock_map),
+			 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
 	if (rcu_blocking_is_gp())
 		return;
 	if (rcu_gp_is_expedited())
@@ -3253,23 +3272,247 @@
 }
 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
 
-static int synchronize_sched_expedited_cpu_stop(void *data)
+/**
+ * get_state_synchronize_sched - Snapshot current RCU-sched state
+ *
+ * Returns a cookie that is used by a later call to cond_synchronize_sched()
+ * to determine whether or not a full grace period has elapsed in the
+ * meantime.
+ */
+unsigned long get_state_synchronize_sched(void)
 {
 	/*
-	 * There must be a full memory barrier on each affected CPU
-	 * between the time that try_stop_cpus() is called and the
-	 * time that it returns.
-	 *
-	 * In the current initial implementation of cpu_stop, the
-	 * above condition is already met when the control reaches
-	 * this point and the following smp_mb() is not strictly
-	 * necessary.  Do smp_mb() anyway for documentation and
-	 * robustness against future implementation changes.
+	 * Any prior manipulation of RCU-protected data must happen
+	 * before the load from ->gpnum.
 	 */
-	smp_mb(); /* See above comment block. */
+	smp_mb();  /* ^^^ */
+
+	/*
+	 * Make sure this load happens before the purportedly
+	 * time-consuming work between get_state_synchronize_sched()
+	 * and cond_synchronize_sched().
+	 */
+	return smp_load_acquire(&rcu_sched_state.gpnum);
+}
+EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
+
+/**
+ * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period
+ *
+ * @oldstate: return value from earlier call to get_state_synchronize_sched()
+ *
+ * If a full RCU-sched grace period has elapsed since the earlier call to
+ * get_state_synchronize_sched(), just return.  Otherwise, invoke
+ * synchronize_sched() to wait for a full grace period.
+ *
+ * Yes, this function does not take counter wrap into account.  But
+ * counter wrap is harmless.  If the counter wraps, we have waited for
+ * more than 2 billion grace periods (and way more on a 64-bit system!),
+ * so waiting for one additional grace period should be just fine.
+ */
+void cond_synchronize_sched(unsigned long oldstate)
+{
+	unsigned long newstate;
+
+	/*
+	 * Ensure that this load happens before any RCU-destructive
+	 * actions the caller might carry out after we return.
+	 */
+	newstate = smp_load_acquire(&rcu_sched_state.completed);
+	if (ULONG_CMP_GE(oldstate, newstate))
+		synchronize_sched();
+}
+EXPORT_SYMBOL_GPL(cond_synchronize_sched);
+
+/* Adjust sequence number for start of update-side operation. */
+static void rcu_seq_start(unsigned long *sp)
+{
+	WRITE_ONCE(*sp, *sp + 1);
+	smp_mb(); /* Ensure update-side operation after counter increment. */
+	WARN_ON_ONCE(!(*sp & 0x1));
+}
+
+/* Adjust sequence number for end of update-side operation. */
+static void rcu_seq_end(unsigned long *sp)
+{
+	smp_mb(); /* Ensure update-side operation before counter increment. */
+	WRITE_ONCE(*sp, *sp + 1);
+	WARN_ON_ONCE(*sp & 0x1);
+}
+
+/* Take a snapshot of the update side's sequence number. */
+static unsigned long rcu_seq_snap(unsigned long *sp)
+{
+	unsigned long s;
+
+	smp_mb(); /* Caller's modifications seen first by other CPUs. */
+	s = (READ_ONCE(*sp) + 3) & ~0x1;
+	smp_mb(); /* Above access must not bleed into critical section. */
+	return s;
+}
+
+/*
+ * Given a snapshot from rcu_seq_snap(), determine whether or not a
+ * full update-side operation has occurred.
+ */
+static bool rcu_seq_done(unsigned long *sp, unsigned long s)
+{
+	return ULONG_CMP_GE(READ_ONCE(*sp), s);
+}
+
+/* Wrapper functions for expedited grace periods.  */
+static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
+{
+	rcu_seq_start(&rsp->expedited_sequence);
+}
+static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
+{
+	rcu_seq_end(&rsp->expedited_sequence);
+	smp_mb(); /* Ensure that consecutive grace periods serialize. */
+}
+static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
+{
+	return rcu_seq_snap(&rsp->expedited_sequence);
+}
+static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
+{
+	return rcu_seq_done(&rsp->expedited_sequence, s);
+}
+
+/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
+static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp,
+			       struct rcu_data *rdp,
+			       atomic_long_t *stat, unsigned long s)
+{
+	if (rcu_exp_gp_seq_done(rsp, s)) {
+		if (rnp)
+			mutex_unlock(&rnp->exp_funnel_mutex);
+		else if (rdp)
+			mutex_unlock(&rdp->exp_funnel_mutex);
+		/* Ensure test happens before caller kfree(). */
+		smp_mb__before_atomic(); /* ^^^ */
+		atomic_long_inc(stat);
+		return true;
+	}
+	return false;
+}
+
+/*
+ * Funnel-lock acquisition for expedited grace periods.  Returns a
+ * pointer to the root rcu_node structure, or NULL if some other
+ * task did the expedited grace period for us.
+ */
+static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
+{
+	struct rcu_data *rdp;
+	struct rcu_node *rnp0;
+	struct rcu_node *rnp1 = NULL;
+
+	/*
+	 * First try directly acquiring the root lock in order to reduce
+	 * latency in the common case where expedited grace periods are
+	 * rare.  We check mutex_is_locked() to avoid pathological levels of
+	 * memory contention on ->exp_funnel_mutex in the heavy-load case.
+	 */
+	rnp0 = rcu_get_root(rsp);
+	if (!mutex_is_locked(&rnp0->exp_funnel_mutex)) {
+		if (mutex_trylock(&rnp0->exp_funnel_mutex)) {
+			if (sync_exp_work_done(rsp, rnp0, NULL,
+					       &rsp->expedited_workdone0, s))
+				return NULL;
+			return rnp0;
+		}
+	}
+
+	/*
+	 * Each pass through the following loop works its way
+	 * up the rcu_node tree, returning if others have done the
+	 * work or otherwise falls through holding the root rnp's
+	 * ->exp_funnel_mutex.  The mapping from CPU to rcu_node structure
+	 * can be inexact, as it is just promoting locality and is not
+	 * strictly needed for correctness.
+	 */
+	rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
+	if (sync_exp_work_done(rsp, NULL, NULL, &rsp->expedited_workdone1, s))
+		return NULL;
+	mutex_lock(&rdp->exp_funnel_mutex);
+	rnp0 = rdp->mynode;
+	for (; rnp0 != NULL; rnp0 = rnp0->parent) {
+		if (sync_exp_work_done(rsp, rnp1, rdp,
+				       &rsp->expedited_workdone2, s))
+			return NULL;
+		mutex_lock(&rnp0->exp_funnel_mutex);
+		if (rnp1)
+			mutex_unlock(&rnp1->exp_funnel_mutex);
+		else
+			mutex_unlock(&rdp->exp_funnel_mutex);
+		rnp1 = rnp0;
+	}
+	if (sync_exp_work_done(rsp, rnp1, rdp,
+			       &rsp->expedited_workdone3, s))
+		return NULL;
+	return rnp1;
+}
+
+/* Invoked on each online non-idle CPU for expedited quiescent state. */
+static int synchronize_sched_expedited_cpu_stop(void *data)
+{
+	struct rcu_data *rdp = data;
+	struct rcu_state *rsp = rdp->rsp;
+
+	/* We are here: If we are last, do the wakeup. */
+	rdp->exp_done = true;
+	if (atomic_dec_and_test(&rsp->expedited_need_qs))
+		wake_up(&rsp->expedited_wq);
 	return 0;
 }
 
+static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
+{
+	int cpu;
+	unsigned long jiffies_stall;
+	unsigned long jiffies_start;
+	struct rcu_data *rdp;
+	int ret;
+
+	jiffies_stall = rcu_jiffies_till_stall_check();
+	jiffies_start = jiffies;
+
+	for (;;) {
+		ret = wait_event_interruptible_timeout(
+				rsp->expedited_wq,
+				!atomic_read(&rsp->expedited_need_qs),
+				jiffies_stall);
+		if (ret > 0)
+			return;
+		if (ret < 0) {
+			/* Hit a signal, disable CPU stall warnings. */
+			wait_event(rsp->expedited_wq,
+				   !atomic_read(&rsp->expedited_need_qs));
+			return;
+		}
+		pr_err("INFO: %s detected expedited stalls on CPUs: {",
+		       rsp->name);
+		for_each_online_cpu(cpu) {
+			rdp = per_cpu_ptr(rsp->rda, cpu);
+
+			if (rdp->exp_done)
+				continue;
+			pr_cont(" %d", cpu);
+		}
+		pr_cont(" } %lu jiffies s: %lu\n",
+			jiffies - jiffies_start, rsp->expedited_sequence);
+		for_each_online_cpu(cpu) {
+			rdp = per_cpu_ptr(rsp->rda, cpu);
+
+			if (rdp->exp_done)
+				continue;
+			dump_cpu_task(cpu);
+		}
+		jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
+	}
+}
+
 /**
  * synchronize_sched_expedited - Brute-force RCU-sched grace period
  *
@@ -3281,58 +3524,21 @@
  * restructure your code to batch your updates, and then use a single
  * synchronize_sched() instead.
  *
- * This implementation can be thought of as an application of ticket
- * locking to RCU, with sync_sched_expedited_started and
- * sync_sched_expedited_done taking on the roles of the halves
- * of the ticket-lock word.  Each task atomically increments
- * sync_sched_expedited_started upon entry, snapshotting the old value,
- * then attempts to stop all the CPUs.  If this succeeds, then each
- * CPU will have executed a context switch, resulting in an RCU-sched
- * grace period.  We are then done, so we use atomic_cmpxchg() to
- * update sync_sched_expedited_done to match our snapshot -- but
- * only if someone else has not already advanced past our snapshot.
- *
- * On the other hand, if try_stop_cpus() fails, we check the value
- * of sync_sched_expedited_done.  If it has advanced past our
- * initial snapshot, then someone else must have forced a grace period
- * some time after we took our snapshot.  In this case, our work is
- * done for us, and we can simply return.  Otherwise, we try again,
- * but keep our initial snapshot for purposes of checking for someone
- * doing our work for us.
- *
- * If we fail too many times in a row, we fall back to synchronize_sched().
+ * This implementation can be thought of as an application of sequence
+ * locking to expedited grace periods, but using the sequence counter to
+ * determine when someone else has already done the work instead of for
+ * retrying readers.
  */
 void synchronize_sched_expedited(void)
 {
-	cpumask_var_t cm;
-	bool cma = false;
 	int cpu;
-	long firstsnap, s, snap;
-	int trycount = 0;
+	unsigned long s;
+	struct rcu_node *rnp;
 	struct rcu_state *rsp = &rcu_sched_state;
 
-	/*
-	 * If we are in danger of counter wrap, just do synchronize_sched().
-	 * By allowing sync_sched_expedited_started to advance no more than
-	 * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
-	 * that more than 3.5 billion CPUs would be required to force a
-	 * counter wrap on a 32-bit system.  Quite a few more CPUs would of
-	 * course be required on a 64-bit system.
-	 */
-	if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
-			 (ulong)atomic_long_read(&rsp->expedited_done) +
-			 ULONG_MAX / 8)) {
-		wait_rcu_gp(call_rcu_sched);
-		atomic_long_inc(&rsp->expedited_wrap);
-		return;
-	}
+	/* Take a snapshot of the sequence number.  */
+	s = rcu_exp_gp_seq_snap(rsp);
 
-	/*
-	 * Take a ticket.  Note that atomic_inc_return() implies a
-	 * full memory barrier.
-	 */
-	snap = atomic_long_inc_return(&rsp->expedited_start);
-	firstsnap = snap;
 	if (!try_get_online_cpus()) {
 		/* CPU hotplug operation in flight, fall back to normal GP. */
 		wait_rcu_gp(call_rcu_sched);
@@ -3341,100 +3547,38 @@
 	}
 	WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
 
-	/* Offline CPUs, idle CPUs, and any CPU we run on are quiescent. */
-	cma = zalloc_cpumask_var(&cm, GFP_KERNEL);
-	if (cma) {
-		cpumask_copy(cm, cpu_online_mask);
-		cpumask_clear_cpu(raw_smp_processor_id(), cm);
-		for_each_cpu(cpu, cm) {
-			struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
-
-			if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
-				cpumask_clear_cpu(cpu, cm);
-		}
-		if (cpumask_weight(cm) == 0)
-			goto all_cpus_idle;
-	}
-
-	/*
-	 * Each pass through the following loop attempts to force a
-	 * context switch on each CPU.
-	 */
-	while (try_stop_cpus(cma ? cm : cpu_online_mask,
-			     synchronize_sched_expedited_cpu_stop,
-			     NULL) == -EAGAIN) {
+	rnp = exp_funnel_lock(rsp, s);
+	if (rnp == NULL) {
 		put_online_cpus();
-		atomic_long_inc(&rsp->expedited_tryfail);
-
-		/* Check to see if someone else did our work for us. */
-		s = atomic_long_read(&rsp->expedited_done);
-		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
-			/* ensure test happens before caller kfree */
-			smp_mb__before_atomic(); /* ^^^ */
-			atomic_long_inc(&rsp->expedited_workdone1);
-			free_cpumask_var(cm);
-			return;
-		}
-
-		/* No joy, try again later.  Or just synchronize_sched(). */
-		if (trycount++ < 10) {
-			udelay(trycount * num_online_cpus());
-		} else {
-			wait_rcu_gp(call_rcu_sched);
-			atomic_long_inc(&rsp->expedited_normal);
-			free_cpumask_var(cm);
-			return;
-		}
-
-		/* Recheck to see if someone else did our work for us. */
-		s = atomic_long_read(&rsp->expedited_done);
-		if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
-			/* ensure test happens before caller kfree */
-			smp_mb__before_atomic(); /* ^^^ */
-			atomic_long_inc(&rsp->expedited_workdone2);
-			free_cpumask_var(cm);
-			return;
-		}
-
-		/*
-		 * Refetching sync_sched_expedited_started allows later
-		 * callers to piggyback on our grace period.  We retry
-		 * after they started, so our grace period works for them,
-		 * and they started after our first try, so their grace
-		 * period works for us.
-		 */
-		if (!try_get_online_cpus()) {
-			/* CPU hotplug operation in flight, use normal GP. */
-			wait_rcu_gp(call_rcu_sched);
-			atomic_long_inc(&rsp->expedited_normal);
-			free_cpumask_var(cm);
-			return;
-		}
-		snap = atomic_long_read(&rsp->expedited_start);
-		smp_mb(); /* ensure read is before try_stop_cpus(). */
+		return;  /* Someone else did our work for us. */
 	}
-	atomic_long_inc(&rsp->expedited_stoppedcpus);
 
-all_cpus_idle:
-	free_cpumask_var(cm);
+	rcu_exp_gp_seq_start(rsp);
 
-	/*
-	 * Everyone up to our most recent fetch is covered by our grace
-	 * period.  Update the counter, but only if our work is still
-	 * relevant -- which it won't be if someone who started later
-	 * than we did already did their update.
-	 */
-	do {
-		atomic_long_inc(&rsp->expedited_done_tries);
-		s = atomic_long_read(&rsp->expedited_done);
-		if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
-			/* ensure test happens before caller kfree */
-			smp_mb__before_atomic(); /* ^^^ */
-			atomic_long_inc(&rsp->expedited_done_lost);
-			break;
-		}
-	} while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
-	atomic_long_inc(&rsp->expedited_done_exit);
+	/* Stop each CPU that is online, non-idle, and not us. */
+	init_waitqueue_head(&rsp->expedited_wq);
+	atomic_set(&rsp->expedited_need_qs, 1); /* Extra count avoids race. */
+	for_each_online_cpu(cpu) {
+		struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+		struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+		rdp->exp_done = false;
+
+		/* Skip our CPU and any idle CPUs. */
+		if (raw_smp_processor_id() == cpu ||
+		    !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
+			continue;
+		atomic_inc(&rsp->expedited_need_qs);
+		stop_one_cpu_nowait(cpu, synchronize_sched_expedited_cpu_stop,
+				    rdp, &rdp->exp_stop_work);
+	}
+
+	/* Remove extra count and, if necessary, wait for CPUs to stop. */
+	if (!atomic_dec_and_test(&rsp->expedited_need_qs))
+		synchronize_sched_expedited_wait(rsp);
+
+	rcu_exp_gp_seq_end(rsp);
+	mutex_unlock(&rnp->exp_funnel_mutex);
 
 	put_online_cpus();
 }
@@ -3571,10 +3715,10 @@
 	struct rcu_state *rsp = rdp->rsp;
 
 	if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
-		_rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
+		_rcu_barrier_trace(rsp, "LastCB", -1, rsp->barrier_sequence);
 		complete(&rsp->barrier_completion);
 	} else {
-		_rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
+		_rcu_barrier_trace(rsp, "CB", -1, rsp->barrier_sequence);
 	}
 }
 
@@ -3586,7 +3730,7 @@
 	struct rcu_state *rsp = type;
 	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
 
-	_rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
+	_rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence);
 	atomic_inc(&rsp->barrier_cpu_count);
 	rsp->call(&rdp->barrier_head, rcu_barrier_callback);
 }
@@ -3599,55 +3743,24 @@
 {
 	int cpu;
 	struct rcu_data *rdp;
-	unsigned long snap = READ_ONCE(rsp->n_barrier_done);
-	unsigned long snap_done;
+	unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
 
-	_rcu_barrier_trace(rsp, "Begin", -1, snap);
+	_rcu_barrier_trace(rsp, "Begin", -1, s);
 
 	/* Take mutex to serialize concurrent rcu_barrier() requests. */
 	mutex_lock(&rsp->barrier_mutex);
 
-	/*
-	 * Ensure that all prior references, including to ->n_barrier_done,
-	 * are ordered before the _rcu_barrier() machinery.
-	 */
-	smp_mb();  /* See above block comment. */
-
-	/*
-	 * Recheck ->n_barrier_done to see if others did our work for us.
-	 * This means checking ->n_barrier_done for an even-to-odd-to-even
-	 * transition.  The "if" expression below therefore rounds the old
-	 * value up to the next even number and adds two before comparing.
-	 */
-	snap_done = rsp->n_barrier_done;
-	_rcu_barrier_trace(rsp, "Check", -1, snap_done);
-
-	/*
-	 * If the value in snap is odd, we needed to wait for the current
-	 * rcu_barrier() to complete, then wait for the next one, in other
-	 * words, we need the value of snap_done to be three larger than
-	 * the value of snap.  On the other hand, if the value in snap is
-	 * even, we only had to wait for the next rcu_barrier() to complete,
-	 * in other words, we need the value of snap_done to be only two
-	 * greater than the value of snap.  The "(snap + 3) & ~0x1" computes
-	 * this for us (thank you, Linus!).
-	 */
-	if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
-		_rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
+	/* Did someone else do our work for us? */
+	if (rcu_seq_done(&rsp->barrier_sequence, s)) {
+		_rcu_barrier_trace(rsp, "EarlyExit", -1, rsp->barrier_sequence);
 		smp_mb(); /* caller's subsequent code after above check. */
 		mutex_unlock(&rsp->barrier_mutex);
 		return;
 	}
 
-	/*
-	 * Increment ->n_barrier_done to avoid duplicate work.  Use
-	 * WRITE_ONCE() to prevent the compiler from speculating
-	 * the increment to precede the early-exit check.
-	 */
-	WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
-	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
-	_rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
-	smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
+	/* Mark the start of the barrier operation. */
+	rcu_seq_start(&rsp->barrier_sequence);
+	_rcu_barrier_trace(rsp, "Inc1", -1, rsp->barrier_sequence);
 
 	/*
 	 * Initialize the count to one rather than to zero in order to
@@ -3671,10 +3784,10 @@
 		if (rcu_is_nocb_cpu(cpu)) {
 			if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
 				_rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
-						   rsp->n_barrier_done);
+						   rsp->barrier_sequence);
 			} else {
 				_rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
-						   rsp->n_barrier_done);
+						   rsp->barrier_sequence);
 				smp_mb__before_atomic();
 				atomic_inc(&rsp->barrier_cpu_count);
 				__call_rcu(&rdp->barrier_head,
@@ -3682,11 +3795,11 @@
 			}
 		} else if (READ_ONCE(rdp->qlen)) {
 			_rcu_barrier_trace(rsp, "OnlineQ", cpu,
-					   rsp->n_barrier_done);
+					   rsp->barrier_sequence);
 			smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
 		} else {
 			_rcu_barrier_trace(rsp, "OnlineNQ", cpu,
-					   rsp->n_barrier_done);
+					   rsp->barrier_sequence);
 		}
 	}
 	put_online_cpus();
@@ -3698,16 +3811,13 @@
 	if (atomic_dec_and_test(&rsp->barrier_cpu_count))
 		complete(&rsp->barrier_completion);
 
-	/* Increment ->n_barrier_done to prevent duplicate work. */
-	smp_mb(); /* Keep increment after above mechanism. */
-	WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
-	WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
-	_rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
-	smp_mb(); /* Keep increment before caller's subsequent code. */
-
 	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
 	wait_for_completion(&rsp->barrier_completion);
 
+	/* Mark the end of the barrier operation. */
+	_rcu_barrier_trace(rsp, "Inc2", -1, rsp->barrier_sequence);
+	rcu_seq_end(&rsp->barrier_sequence);
+
 	/* Other rcu_barrier() invocations can now safely proceed. */
 	mutex_unlock(&rsp->barrier_mutex);
 }
@@ -3770,6 +3880,7 @@
 	WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
 	rdp->cpu = cpu;
 	rdp->rsp = rsp;
+	mutex_init(&rdp->exp_funnel_mutex);
 	rcu_boot_init_nocb_percpu_data(rdp);
 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
@@ -3961,22 +4072,22 @@
  * Compute the per-level fanout, either using the exact fanout specified
  * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
  */
-static void __init rcu_init_levelspread(struct rcu_state *rsp)
+static void __init rcu_init_levelspread(int *levelspread, const int *levelcnt)
 {
 	int i;
 
 	if (rcu_fanout_exact) {
-		rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
+		levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
 		for (i = rcu_num_lvls - 2; i >= 0; i--)
-			rsp->levelspread[i] = RCU_FANOUT;
+			levelspread[i] = RCU_FANOUT;
 	} else {
 		int ccur;
 		int cprv;
 
 		cprv = nr_cpu_ids;
 		for (i = rcu_num_lvls - 1; i >= 0; i--) {
-			ccur = rsp->levelcnt[i];
-			rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
+			ccur = levelcnt[i];
+			levelspread[i] = (cprv + ccur - 1) / ccur;
 			cprv = ccur;
 		}
 	}
@@ -3988,23 +4099,20 @@
 static void __init rcu_init_one(struct rcu_state *rsp,
 		struct rcu_data __percpu *rda)
 {
-	static const char * const buf[] = {
-		"rcu_node_0",
-		"rcu_node_1",
-		"rcu_node_2",
-		"rcu_node_3" };  /* Match MAX_RCU_LVLS */
-	static const char * const fqs[] = {
-		"rcu_node_fqs_0",
-		"rcu_node_fqs_1",
-		"rcu_node_fqs_2",
-		"rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
+	static const char * const buf[] = RCU_NODE_NAME_INIT;
+	static const char * const fqs[] = RCU_FQS_NAME_INIT;
+	static const char * const exp[] = RCU_EXP_NAME_INIT;
+	static const char * const exp_sched[] = RCU_EXP_SCHED_NAME_INIT;
 	static u8 fl_mask = 0x1;
+
+	int levelcnt[RCU_NUM_LVLS];		/* # nodes in each level. */
+	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
 	int cpustride = 1;
 	int i;
 	int j;
 	struct rcu_node *rnp;
 
-	BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
+	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
 
 	/* Silence gcc 4.8 false positive about array index out of range. */
 	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
@@ -4013,19 +4121,19 @@
 	/* Initialize the level-tracking arrays. */
 
 	for (i = 0; i < rcu_num_lvls; i++)
-		rsp->levelcnt[i] = num_rcu_lvl[i];
+		levelcnt[i] = num_rcu_lvl[i];
 	for (i = 1; i < rcu_num_lvls; i++)
-		rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
-	rcu_init_levelspread(rsp);
+		rsp->level[i] = rsp->level[i - 1] + levelcnt[i - 1];
+	rcu_init_levelspread(levelspread, levelcnt);
 	rsp->flavor_mask = fl_mask;
 	fl_mask <<= 1;
 
 	/* Initialize the elements themselves, starting from the leaves. */
 
 	for (i = rcu_num_lvls - 1; i >= 0; i--) {
-		cpustride *= rsp->levelspread[i];
+		cpustride *= levelspread[i];
 		rnp = rsp->level[i];
-		for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
+		for (j = 0; j < levelcnt[i]; j++, rnp++) {
 			raw_spin_lock_init(&rnp->lock);
 			lockdep_set_class_and_name(&rnp->lock,
 						   &rcu_node_class[i], buf[i]);
@@ -4045,14 +4153,23 @@
 				rnp->grpmask = 0;
 				rnp->parent = NULL;
 			} else {
-				rnp->grpnum = j % rsp->levelspread[i - 1];
+				rnp->grpnum = j % levelspread[i - 1];
 				rnp->grpmask = 1UL << rnp->grpnum;
 				rnp->parent = rsp->level[i - 1] +
-					      j / rsp->levelspread[i - 1];
+					      j / levelspread[i - 1];
 			}
 			rnp->level = i;
 			INIT_LIST_HEAD(&rnp->blkd_tasks);
 			rcu_init_one_nocb(rnp);
+			mutex_init(&rnp->exp_funnel_mutex);
+			if (rsp == &rcu_sched_state)
+				lockdep_set_class_and_name(
+					&rnp->exp_funnel_mutex,
+					&rcu_exp_sched_class[i], exp_sched[i]);
+			else
+				lockdep_set_class_and_name(
+					&rnp->exp_funnel_mutex,
+					&rcu_exp_class[i], exp[i]);
 		}
 	}
 
@@ -4076,9 +4193,7 @@
 {
 	ulong d;
 	int i;
-	int j;
-	int n = nr_cpu_ids;
-	int rcu_capacity[MAX_RCU_LVLS + 1];
+	int rcu_capacity[RCU_NUM_LVLS];
 
 	/*
 	 * Initialize any unspecified boot parameters.
@@ -4101,47 +4216,49 @@
 		rcu_fanout_leaf, nr_cpu_ids);
 
 	/*
-	 * Compute number of nodes that can be handled an rcu_node tree
-	 * with the given number of levels.  Setting rcu_capacity[0] makes
-	 * some of the arithmetic easier.
-	 */
-	rcu_capacity[0] = 1;
-	rcu_capacity[1] = rcu_fanout_leaf;
-	for (i = 2; i <= MAX_RCU_LVLS; i++)
-		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
-
-	/*
 	 * The boot-time rcu_fanout_leaf parameter is only permitted
 	 * to increase the leaf-level fanout, not decrease it.  Of course,
 	 * the leaf-level fanout cannot exceed the number of bits in
-	 * the rcu_node masks.  Finally, the tree must be able to accommodate
-	 * the configured number of CPUs.  Complain and fall back to the
-	 * compile-time values if these limits are exceeded.
+	 * the rcu_node masks.  Complain and fall back to the compile-
+	 * time values if these limits are exceeded.
 	 */
 	if (rcu_fanout_leaf < RCU_FANOUT_LEAF ||
-	    rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
-	    n > rcu_capacity[MAX_RCU_LVLS]) {
+	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
+		rcu_fanout_leaf = RCU_FANOUT_LEAF;
 		WARN_ON(1);
 		return;
 	}
 
+	/*
+	 * Compute number of nodes that can be handled an rcu_node tree
+	 * with the given number of levels.
+	 */
+	rcu_capacity[0] = rcu_fanout_leaf;
+	for (i = 1; i < RCU_NUM_LVLS; i++)
+		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
+
+	/*
+	 * The tree must be able to accommodate the configured number of CPUs.
+	 * If this limit is exceeded than we have a serious problem elsewhere.
+	 */
+	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1])
+		panic("rcu_init_geometry: rcu_capacity[] is too small");
+
+	/* Calculate the number of levels in the tree. */
+	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
+	}
+	rcu_num_lvls = i + 1;
+
 	/* Calculate the number of rcu_nodes at each level of the tree. */
-	for (i = 1; i <= MAX_RCU_LVLS; i++)
-		if (n <= rcu_capacity[i]) {
-			for (j = 0; j <= i; j++)
-				num_rcu_lvl[j] =
-					DIV_ROUND_UP(n, rcu_capacity[i - j]);
-			rcu_num_lvls = i;
-			for (j = i + 1; j <= MAX_RCU_LVLS; j++)
-				num_rcu_lvl[j] = 0;
-			break;
-		}
+	for (i = 0; i < rcu_num_lvls; i++) {
+		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
+		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
+	}
 
 	/* Calculate the total number of rcu_node structures. */
 	rcu_num_nodes = 0;
-	for (i = 0; i <= MAX_RCU_LVLS; i++)
+	for (i = 0; i < rcu_num_lvls; i++)
 		rcu_num_nodes += num_rcu_lvl[i];
-	rcu_num_nodes -= n;
 }
 
 /*
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 4adb7ca..2e991f8 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -27,6 +27,7 @@
 #include <linux/threads.h>
 #include <linux/cpumask.h>
 #include <linux/seqlock.h>
+#include <linux/stop_machine.h>
 
 /*
  * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
@@ -36,8 +37,6 @@
  * Of course, your mileage may vary.
  */
 
-#define MAX_RCU_LVLS 4
-
 #ifdef CONFIG_RCU_FANOUT
 #define RCU_FANOUT CONFIG_RCU_FANOUT
 #else /* #ifdef CONFIG_RCU_FANOUT */
@@ -66,38 +65,53 @@
 #if NR_CPUS <= RCU_FANOUT_1
 #  define RCU_NUM_LVLS	      1
 #  define NUM_RCU_LVL_0	      1
-#  define NUM_RCU_LVL_1	      (NR_CPUS)
-#  define NUM_RCU_LVL_2	      0
-#  define NUM_RCU_LVL_3	      0
-#  define NUM_RCU_LVL_4	      0
+#  define NUM_RCU_NODES	      NUM_RCU_LVL_0
+#  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0 }
+#  define RCU_NODE_NAME_INIT  { "rcu_node_0" }
+#  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0" }
+#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0" }
+#  define RCU_EXP_SCHED_NAME_INIT \
+			      { "rcu_node_exp_sched_0" }
 #elif NR_CPUS <= RCU_FANOUT_2
 #  define RCU_NUM_LVLS	      2
 #  define NUM_RCU_LVL_0	      1
 #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
-#  define NUM_RCU_LVL_2	      (NR_CPUS)
-#  define NUM_RCU_LVL_3	      0
-#  define NUM_RCU_LVL_4	      0
+#  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
+#  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
+#  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1" }
+#  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1" }
+#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1" }
+#  define RCU_EXP_SCHED_NAME_INIT \
+			      { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1" }
 #elif NR_CPUS <= RCU_FANOUT_3
 #  define RCU_NUM_LVLS	      3
 #  define NUM_RCU_LVL_0	      1
 #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
 #  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
-#  define NUM_RCU_LVL_3	      (NR_CPUS)
-#  define NUM_RCU_LVL_4	      0
+#  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
+#  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
+#  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
+#  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
+#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
+#  define RCU_EXP_SCHED_NAME_INIT \
+			      { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1", "rcu_node_exp_sched_2" }
 #elif NR_CPUS <= RCU_FANOUT_4
 #  define RCU_NUM_LVLS	      4
 #  define NUM_RCU_LVL_0	      1
 #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
 #  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
 #  define NUM_RCU_LVL_3	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
-#  define NUM_RCU_LVL_4	      (NR_CPUS)
+#  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+#  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
+#  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
+#  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
+#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
+#  define RCU_EXP_SCHED_NAME_INIT \
+			      { "rcu_node_exp_sched_0", "rcu_node_exp_sched_1", "rcu_node_exp_sched_2", "rcu_node_exp_sched_3" }
 #else
 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
 #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
 
-#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
-#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
-
 extern int rcu_num_lvls;
 extern int rcu_num_nodes;
 
@@ -236,6 +250,8 @@
 	int need_future_gp[2];
 				/* Counts of upcoming no-CB GP requests. */
 	raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
+
+	struct mutex exp_funnel_mutex ____cacheline_internodealigned_in_smp;
 } ____cacheline_internodealigned_in_smp;
 
 /*
@@ -287,12 +303,13 @@
 	bool		gpwrap;		/* Possible gpnum/completed wrap. */
 	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
 	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
-#ifdef CONFIG_RCU_CPU_STALL_INFO
 	unsigned long	ticks_this_gp;	/* The number of scheduling-clock */
 					/*  ticks this CPU has handled */
 					/*  during and after the last grace */
 					/* period it is aware of. */
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
+	struct cpu_stop_work exp_stop_work;
+					/* Expedited grace-period control */
+					/*  for CPU stopping. */
 
 	/* 2) batch handling */
 	/*
@@ -355,11 +372,13 @@
 	unsigned long n_rp_nocb_defer_wakeup;
 	unsigned long n_rp_need_nothing;
 
-	/* 6) _rcu_barrier() and OOM callbacks. */
+	/* 6) _rcu_barrier(), OOM callbacks, and expediting. */
 	struct rcu_head barrier_head;
 #ifdef CONFIG_RCU_FAST_NO_HZ
 	struct rcu_head oom_head;
 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
+	struct mutex exp_funnel_mutex;
+	bool exp_done;			/* Expedited QS for this CPU? */
 
 	/* 7) Callback offloading. */
 #ifdef CONFIG_RCU_NOCB_CPU
@@ -387,9 +406,7 @@
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 
 	/* 8) RCU CPU stall data. */
-#ifdef CONFIG_RCU_CPU_STALL_INFO
 	unsigned int softirq_snap;	/* Snapshot of softirq activity. */
-#endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
 
 	int cpu;
 	struct rcu_state *rsp;
@@ -442,9 +459,9 @@
  */
 struct rcu_state {
 	struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */
-	struct rcu_node *level[RCU_NUM_LVLS];	/* Hierarchy levels. */
-	u32 levelcnt[MAX_RCU_LVLS + 1];		/* # nodes in each level. */
-	u8 levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
+	struct rcu_node *level[RCU_NUM_LVLS + 1];
+						/* Hierarchy levels (+1 to */
+						/*  shut bogus gcc warning) */
 	u8 flavor_mask;				/* bit in flavor mask. */
 	struct rcu_data __percpu *rda;		/* pointer of percu rcu_data. */
 	void (*call)(struct rcu_head *head,	/* call_rcu() flavor. */
@@ -479,21 +496,18 @@
 	struct mutex barrier_mutex;		/* Guards barrier fields. */
 	atomic_t barrier_cpu_count;		/* # CPUs waiting on. */
 	struct completion barrier_completion;	/* Wake at barrier end. */
-	unsigned long n_barrier_done;		/* ++ at start and end of */
+	unsigned long barrier_sequence;		/* ++ at start and end of */
 						/*  _rcu_barrier(). */
 	/* End of fields guarded by barrier_mutex. */
 
-	atomic_long_t expedited_start;		/* Starting ticket. */
-	atomic_long_t expedited_done;		/* Done ticket. */
-	atomic_long_t expedited_wrap;		/* # near-wrap incidents. */
-	atomic_long_t expedited_tryfail;	/* # acquisition failures. */
+	unsigned long expedited_sequence;	/* Take a ticket. */
+	atomic_long_t expedited_workdone0;	/* # done by others #0. */
 	atomic_long_t expedited_workdone1;	/* # done by others #1. */
 	atomic_long_t expedited_workdone2;	/* # done by others #2. */
+	atomic_long_t expedited_workdone3;	/* # done by others #3. */
 	atomic_long_t expedited_normal;		/* # fallbacks to normal. */
-	atomic_long_t expedited_stoppedcpus;	/* # successful stop_cpus. */
-	atomic_long_t expedited_done_tries;	/* # tries to update _done. */
-	atomic_long_t expedited_done_lost;	/* # times beaten to _done. */
-	atomic_long_t expedited_done_exit;	/* # times exited _done loop. */
+	atomic_t expedited_need_qs;		/* # CPUs left to check in. */
+	wait_queue_head_t expedited_wq;		/* Wait for check-ins. */
 
 	unsigned long jiffies_force_qs;		/* Time at which to invoke */
 						/*  force_quiescent_state(). */
@@ -527,7 +541,11 @@
 /* Values for rcu_state structure's gp_flags field. */
 #define RCU_GP_WAIT_INIT 0	/* Initial state. */
 #define RCU_GP_WAIT_GPS  1	/* Wait for grace-period start. */
-#define RCU_GP_WAIT_FQS  2	/* Wait for force-quiescent-state time. */
+#define RCU_GP_DONE_GPS  2	/* Wait done for grace-period start. */
+#define RCU_GP_WAIT_FQS  3	/* Wait for force-quiescent-state time. */
+#define RCU_GP_DOING_FQS 4	/* Wait done for force-quiescent-state time. */
+#define RCU_GP_CLEANUP   5	/* Grace-period cleanup started. */
+#define RCU_GP_CLEANED   6	/* Grace-period cleanup complete. */
 
 extern struct list_head rcu_struct_flavors;
 
@@ -635,3 +653,15 @@
 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
 }
 #endif /* #ifdef CONFIG_RCU_TRACE */
+
+/*
+ * Place this after a lock-acquisition primitive to guarantee that
+ * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
+ * if the UNLOCK and LOCK are executed by the same CPU or if the
+ * UNLOCK and LOCK operate on the same lock variable.
+ */
+#ifdef CONFIG_PPC
+#define smp_mb__after_unlock_lock()	smp_mb()  /* Full ordering for lock. */
+#else /* #ifdef CONFIG_PPC */
+#define smp_mb__after_unlock_lock()	do { } while (0)
+#endif /* #else #ifdef CONFIG_PPC */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 013485f..b2bf396 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -82,10 +82,8 @@
 		pr_info("\tRCU lockdep checking is enabled.\n");
 	if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_RUNNABLE))
 		pr_info("\tRCU torture testing starts during boot.\n");
-	if (IS_ENABLED(CONFIG_RCU_CPU_STALL_INFO))
-		pr_info("\tAdditional per-CPU info printed with stalls.\n");
-	if (NUM_RCU_LVL_4 != 0)
-		pr_info("\tFour-level hierarchy is enabled.\n");
+	if (RCU_NUM_LVLS >= 4)
+		pr_info("\tFour(or more)-level hierarchy is enabled.\n");
 	if (RCU_FANOUT_LEAF != 16)
 		pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
 			RCU_FANOUT_LEAF);
@@ -418,8 +416,6 @@
 		rcu_print_detail_task_stall_rnp(rnp);
 }
 
-#ifdef CONFIG_RCU_CPU_STALL_INFO
-
 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
 {
 	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
@@ -431,18 +427,6 @@
 	pr_cont("\n");
 }
 
-#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
-
-static void rcu_print_task_stall_begin(struct rcu_node *rnp)
-{
-}
-
-static void rcu_print_task_stall_end(void)
-{
-}
-
-#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
-
 /*
  * Scan the current list of tasks blocked within RCU read-side critical
  * sections, printing out the tid of each.
@@ -538,10 +522,10 @@
  */
 void synchronize_rcu(void)
 {
-	rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
-			   !lock_is_held(&rcu_lock_map) &&
-			   !lock_is_held(&rcu_sched_lock_map),
-			   "Illegal synchronize_rcu() in RCU read-side critical section");
+	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
+			 lock_is_held(&rcu_lock_map) ||
+			 lock_is_held(&rcu_sched_lock_map),
+			 "Illegal synchronize_rcu() in RCU read-side critical section");
 	if (!rcu_scheduler_active)
 		return;
 	if (rcu_gp_is_expedited())
@@ -552,8 +536,6 @@
 EXPORT_SYMBOL_GPL(synchronize_rcu);
 
 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
-static unsigned long sync_rcu_preempt_exp_count;
-static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
 
 /*
  * Return non-zero if there are any tasks in RCU read-side critical
@@ -573,7 +555,7 @@
  * for the current expedited grace period.  Works only for preemptible
  * RCU -- other RCU implementation use other means.
  *
- * Caller must hold sync_rcu_preempt_exp_mutex.
+ * Caller must hold the root rcu_node's exp_funnel_mutex.
  */
 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
 {
@@ -589,7 +571,7 @@
  * recursively up the tree.  (Calm down, calm down, we do the recursion
  * iteratively!)
  *
- * Caller must hold sync_rcu_preempt_exp_mutex.
+ * Caller must hold the root rcu_node's exp_funnel_mutex.
  */
 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
 			       bool wake)
@@ -628,7 +610,7 @@
  * set the ->expmask bits on the leaf rcu_node structures to tell phase 2
  * that work is needed here.
  *
- * Caller must hold sync_rcu_preempt_exp_mutex.
+ * Caller must hold the root rcu_node's exp_funnel_mutex.
  */
 static void
 sync_rcu_preempt_exp_init1(struct rcu_state *rsp, struct rcu_node *rnp)
@@ -671,7 +653,7 @@
  * invoke rcu_report_exp_rnp() to clear out the upper-level ->expmask bits,
  * enabling rcu_read_unlock_special() to do the bit-clearing.
  *
- * Caller must hold sync_rcu_preempt_exp_mutex.
+ * Caller must hold the root rcu_node's exp_funnel_mutex.
  */
 static void
 sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp)
@@ -719,51 +701,17 @@
 void synchronize_rcu_expedited(void)
 {
 	struct rcu_node *rnp;
+	struct rcu_node *rnp_unlock;
 	struct rcu_state *rsp = rcu_state_p;
-	unsigned long snap;
-	int trycount = 0;
+	unsigned long s;
 
-	smp_mb(); /* Caller's modifications seen first by other CPUs. */
-	snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1;
-	smp_mb(); /* Above access cannot bleed into critical section. */
+	s = rcu_exp_gp_seq_snap(rsp);
 
-	/*
-	 * Block CPU-hotplug operations.  This means that any CPU-hotplug
-	 * operation that finds an rcu_node structure with tasks in the
-	 * process of being boosted will know that all tasks blocking
-	 * this expedited grace period will already be in the process of
-	 * being boosted.  This simplifies the process of moving tasks
-	 * from leaf to root rcu_node structures.
-	 */
-	if (!try_get_online_cpus()) {
-		/* CPU-hotplug operation in flight, fall back to normal GP. */
-		wait_rcu_gp(call_rcu);
-		return;
-	}
+	rnp_unlock = exp_funnel_lock(rsp, s);
+	if (rnp_unlock == NULL)
+		return;  /* Someone else did our work for us. */
 
-	/*
-	 * Acquire lock, falling back to synchronize_rcu() if too many
-	 * lock-acquisition failures.  Of course, if someone does the
-	 * expedited grace period for us, just leave.
-	 */
-	while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
-		if (ULONG_CMP_LT(snap,
-		    READ_ONCE(sync_rcu_preempt_exp_count))) {
-			put_online_cpus();
-			goto mb_ret; /* Others did our work for us. */
-		}
-		if (trycount++ < 10) {
-			udelay(trycount * num_online_cpus());
-		} else {
-			put_online_cpus();
-			wait_rcu_gp(call_rcu);
-			return;
-		}
-	}
-	if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count))) {
-		put_online_cpus();
-		goto unlock_mb_ret; /* Others did our work for us. */
-	}
+	rcu_exp_gp_seq_start(rsp);
 
 	/* force all RCU readers onto ->blkd_tasks lists. */
 	synchronize_sched_expedited();
@@ -779,20 +727,14 @@
 	rcu_for_each_leaf_node(rsp, rnp)
 		sync_rcu_preempt_exp_init2(rsp, rnp);
 
-	put_online_cpus();
-
 	/* Wait for snapshotted ->blkd_tasks lists to drain. */
 	rnp = rcu_get_root(rsp);
 	wait_event(sync_rcu_preempt_exp_wq,
 		   sync_rcu_preempt_exp_done(rnp));
 
 	/* Clean up and exit. */
-	smp_mb(); /* ensure expedited GP seen before counter increment. */
-	WRITE_ONCE(sync_rcu_preempt_exp_count, sync_rcu_preempt_exp_count + 1);
-unlock_mb_ret:
-	mutex_unlock(&sync_rcu_preempt_exp_mutex);
-mb_ret:
-	smp_mb(); /* ensure subsequent action seen after grace period. */
+	rcu_exp_gp_seq_end(rsp);
+	mutex_unlock(&rnp_unlock->exp_funnel_mutex);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
@@ -1061,8 +1003,7 @@
 }
 
 /*
- * Priority-boosting kthread.  One per leaf rcu_node and one for the
- * root rcu_node.
+ * Priority-boosting kthread, one per leaf rcu_node.
  */
 static int rcu_boost_kthread(void *arg)
 {
@@ -1680,12 +1621,10 @@
 	 */
 	atomic_set(&oom_callback_count, 1);
 
-	get_online_cpus();
 	for_each_online_cpu(cpu) {
 		smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1);
 		cond_resched_rcu_qs();
 	}
-	put_online_cpus();
 
 	/* Unconditionally decrement: no need to wake ourselves up. */
 	atomic_dec(&oom_callback_count);
@@ -1706,8 +1645,6 @@
 
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
-#ifdef CONFIG_RCU_CPU_STALL_INFO
-
 #ifdef CONFIG_RCU_FAST_NO_HZ
 
 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
@@ -1796,33 +1733,6 @@
 		raw_cpu_inc(rsp->rda->ticks_this_gp);
 }
 
-#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
-
-static void print_cpu_stall_info_begin(void)
-{
-	pr_cont(" {");
-}
-
-static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
-{
-	pr_cont(" %d", cpu);
-}
-
-static void print_cpu_stall_info_end(void)
-{
-	pr_cont("} ");
-}
-
-static void zero_cpu_stall_ticks(struct rcu_data *rdp)
-{
-}
-
-static void increment_cpu_stall_ticks(void)
-{
-}
-
-#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
-
 #ifdef CONFIG_RCU_NOCB_CPU
 
 /*
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index 3ea7ffc..6fc4c5f 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -81,9 +81,9 @@
 static int show_rcubarrier(struct seq_file *m, void *v)
 {
 	struct rcu_state *rsp = (struct rcu_state *)m->private;
-	seq_printf(m, "bcc: %d nbd: %lu\n",
+	seq_printf(m, "bcc: %d bseq: %lu\n",
 		   atomic_read(&rsp->barrier_cpu_count),
-		   rsp->n_barrier_done);
+		   rsp->barrier_sequence);
 	return 0;
 }
 
@@ -185,18 +185,15 @@
 {
 	struct rcu_state *rsp = (struct rcu_state *)m->private;
 
-	seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
-		   atomic_long_read(&rsp->expedited_start),
-		   atomic_long_read(&rsp->expedited_done),
-		   atomic_long_read(&rsp->expedited_wrap),
-		   atomic_long_read(&rsp->expedited_tryfail),
+	seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
+		   rsp->expedited_sequence,
+		   atomic_long_read(&rsp->expedited_workdone0),
 		   atomic_long_read(&rsp->expedited_workdone1),
 		   atomic_long_read(&rsp->expedited_workdone2),
+		   atomic_long_read(&rsp->expedited_workdone3),
 		   atomic_long_read(&rsp->expedited_normal),
-		   atomic_long_read(&rsp->expedited_stoppedcpus),
-		   atomic_long_read(&rsp->expedited_done_tries),
-		   atomic_long_read(&rsp->expedited_done_lost),
-		   atomic_long_read(&rsp->expedited_done_exit));
+		   atomic_read(&rsp->expedited_need_qs),
+		   rsp->expedited_sequence / 2);
 	return 0;
 }
 
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index afaecb7..7a0b3bc 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -62,6 +62,55 @@
 
 module_param(rcu_expedited, int, 0);
 
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_PREEMPT_COUNT)
+/**
+ * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
+ *
+ * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
+ * RCU-sched read-side critical section.  In absence of
+ * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
+ * critical section unless it can prove otherwise.  Note that disabling
+ * of preemption (including disabling irqs) counts as an RCU-sched
+ * read-side critical section.  This is useful for debug checks in functions
+ * that required that they be called within an RCU-sched read-side
+ * critical section.
+ *
+ * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
+ * and while lockdep is disabled.
+ *
+ * Note that if the CPU is in the idle loop from an RCU point of
+ * view (ie: that we are in the section between rcu_idle_enter() and
+ * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
+ * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
+ * that are in such a section, considering these as in extended quiescent
+ * state, so such a CPU is effectively never in an RCU read-side critical
+ * section regardless of what RCU primitives it invokes.  This state of
+ * affairs is required --- we need to keep an RCU-free window in idle
+ * where the CPU may possibly enter into low power mode. This way we can
+ * notice an extended quiescent state to other CPUs that started a grace
+ * period. Otherwise we would delay any grace period as long as we run in
+ * the idle task.
+ *
+ * Similarly, we avoid claiming an SRCU read lock held if the current
+ * CPU is offline.
+ */
+int rcu_read_lock_sched_held(void)
+{
+	int lockdep_opinion = 0;
+
+	if (!debug_lockdep_rcu_enabled())
+		return 1;
+	if (!rcu_is_watching())
+		return 0;
+	if (!rcu_lockdep_current_cpu_online())
+		return 0;
+	if (debug_locks)
+		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
+	return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
+}
+EXPORT_SYMBOL(rcu_read_lock_sched_held);
+#endif
+
 #ifndef CONFIG_TINY_RCU
 
 static atomic_t rcu_expedited_nesting =
@@ -269,20 +318,37 @@
 	rcu = container_of(head, struct rcu_synchronize, head);
 	complete(&rcu->completion);
 }
+EXPORT_SYMBOL_GPL(wakeme_after_rcu);
 
-void wait_rcu_gp(call_rcu_func_t crf)
+void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
+		   struct rcu_synchronize *rs_array)
 {
-	struct rcu_synchronize rcu;
+	int i;
 
-	init_rcu_head_on_stack(&rcu.head);
-	init_completion(&rcu.completion);
-	/* Will wake me after RCU finished. */
-	crf(&rcu.head, wakeme_after_rcu);
-	/* Wait for it. */
-	wait_for_completion(&rcu.completion);
-	destroy_rcu_head_on_stack(&rcu.head);
+	/* Initialize and register callbacks for each flavor specified. */
+	for (i = 0; i < n; i++) {
+		if (checktiny &&
+		    (crcu_array[i] == call_rcu ||
+		     crcu_array[i] == call_rcu_bh)) {
+			might_sleep();
+			continue;
+		}
+		init_rcu_head_on_stack(&rs_array[i].head);
+		init_completion(&rs_array[i].completion);
+		(crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
+	}
+
+	/* Wait for all callbacks to be invoked. */
+	for (i = 0; i < n; i++) {
+		if (checktiny &&
+		    (crcu_array[i] == call_rcu ||
+		     crcu_array[i] == call_rcu_bh))
+			continue;
+		wait_for_completion(&rs_array[i].completion);
+		destroy_rcu_head_on_stack(&rs_array[i].head);
+	}
 }
-EXPORT_SYMBOL_GPL(wait_rcu_gp);
+EXPORT_SYMBOL_GPL(__wait_rcu_gp);
 
 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
 void init_rcu_head(struct rcu_head *head)
@@ -523,8 +589,8 @@
 void synchronize_rcu_tasks(void)
 {
 	/* Complain if the scheduler has not started.  */
-	rcu_lockdep_assert(!rcu_scheduler_active,
-			   "synchronize_rcu_tasks called too soon");
+	RCU_LOCKDEP_WARN(!rcu_scheduler_active,
+			 "synchronize_rcu_tasks called too soon");
 
 	/* Wait for the grace period. */
 	wait_rcu_gp(call_rcu_tasks);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 78b4bad10..3595403 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -164,14 +164,12 @@
 
 static void sched_feat_disable(int i)
 {
-	if (static_key_enabled(&sched_feat_keys[i]))
-		static_key_slow_dec(&sched_feat_keys[i]);
+	static_key_disable(&sched_feat_keys[i]);
 }
 
 static void sched_feat_enable(int i)
 {
-	if (!static_key_enabled(&sched_feat_keys[i]))
-		static_key_slow_inc(&sched_feat_keys[i]);
+	static_key_enable(&sched_feat_keys[i]);
 }
 #else
 static void sched_feat_disable(int i) { };
@@ -1151,15 +1149,45 @@
 	return 0;
 }
 
-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+/*
+ * sched_class::set_cpus_allowed must do the below, but is not required to
+ * actually call this function.
+ */
+void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
 {
-	if (p->sched_class->set_cpus_allowed)
-		p->sched_class->set_cpus_allowed(p, new_mask);
-
 	cpumask_copy(&p->cpus_allowed, new_mask);
 	p->nr_cpus_allowed = cpumask_weight(new_mask);
 }
 
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+	struct rq *rq = task_rq(p);
+	bool queued, running;
+
+	lockdep_assert_held(&p->pi_lock);
+
+	queued = task_on_rq_queued(p);
+	running = task_current(rq, p);
+
+	if (queued) {
+		/*
+		 * Because __kthread_bind() calls this on blocked tasks without
+		 * holding rq->lock.
+		 */
+		lockdep_assert_held(&rq->lock);
+		dequeue_task(rq, p, 0);
+	}
+	if (running)
+		put_prev_task(rq, p);
+
+	p->sched_class->set_cpus_allowed(p, new_mask);
+
+	if (running)
+		p->sched_class->set_curr_task(rq);
+	if (queued)
+		enqueue_task(rq, p, 0);
+}
+
 /*
  * Change a given task's CPU affinity. Migrate the thread to a
  * proper CPU and schedule it away if the CPU it's executing on
@@ -1169,7 +1197,8 @@
  * task must not exit() & deallocate itself prematurely. The
  * call is not atomic; no spinlocks may be held.
  */
-int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+static int __set_cpus_allowed_ptr(struct task_struct *p,
+				  const struct cpumask *new_mask, bool check)
 {
 	unsigned long flags;
 	struct rq *rq;
@@ -1178,6 +1207,15 @@
 
 	rq = task_rq_lock(p, &flags);
 
+	/*
+	 * Must re-check here, to close a race against __kthread_bind(),
+	 * sched_setaffinity() is not guaranteed to observe the flag.
+	 */
+	if (check && (p->flags & PF_NO_SETAFFINITY)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	if (cpumask_equal(&p->cpus_allowed, new_mask))
 		goto out;
 
@@ -1214,6 +1252,11 @@
 
 	return ret;
 }
+
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+{
+	return __set_cpus_allowed_ptr(p, new_mask, false);
+}
 EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
 
 void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
@@ -1595,6 +1638,15 @@
 	s64 diff = sample - *avg;
 	*avg += diff >> 3;
 }
+
+#else
+
+static inline int __set_cpus_allowed_ptr(struct task_struct *p,
+					 const struct cpumask *new_mask, bool check)
+{
+	return set_cpus_allowed_ptr(p, new_mask);
+}
+
 #endif /* CONFIG_SMP */
 
 static void
@@ -1654,9 +1706,9 @@
 ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
 {
 	check_preempt_curr(rq, p, wake_flags);
-	trace_sched_wakeup(p, true);
-
 	p->state = TASK_RUNNING;
+	trace_sched_wakeup(p);
+
 #ifdef CONFIG_SMP
 	if (p->sched_class->task_woken) {
 		/*
@@ -1874,6 +1926,8 @@
 	if (!(p->state & state))
 		goto out;
 
+	trace_sched_waking(p);
+
 	success = 1; /* we're going to change ->state */
 	cpu = task_cpu(p);
 
@@ -1949,6 +2003,8 @@
 	if (!(p->state & TASK_NORMAL))
 		goto out;
 
+	trace_sched_waking(p);
+
 	if (!task_on_rq_queued(p))
 		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 
@@ -2016,9 +2072,6 @@
 	p->se.prev_sum_exec_runtime	= 0;
 	p->se.nr_migrations		= 0;
 	p->se.vruntime			= 0;
-#ifdef CONFIG_SMP
-	p->se.avg.decay_count		= 0;
-#endif
 	INIT_LIST_HEAD(&p->se.group_node);
 
 #ifdef CONFIG_SCHEDSTATS
@@ -2200,8 +2253,8 @@
 #ifdef CONFIG_SMP
 inline struct dl_bw *dl_bw_of(int i)
 {
-	rcu_lockdep_assert(rcu_read_lock_sched_held(),
-			   "sched RCU must be held");
+	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+			 "sched RCU must be held");
 	return &cpu_rq(i)->rd->dl_bw;
 }
 
@@ -2210,8 +2263,8 @@
 	struct root_domain *rd = cpu_rq(i)->rd;
 	int cpus = 0;
 
-	rcu_lockdep_assert(rcu_read_lock_sched_held(),
-			   "sched RCU must be held");
+	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
+			 "sched RCU must be held");
 	for_each_cpu_and(i, rd->span, cpu_active_mask)
 		cpus++;
 
@@ -2303,11 +2356,11 @@
 #endif
 
 	/* Initialize new task's runnable average */
-	init_task_runnable_average(p);
+	init_entity_runnable_average(&p->se);
 	rq = __task_rq_lock(p);
 	activate_task(rq, p, 0);
 	p->on_rq = TASK_ON_RQ_QUEUED;
-	trace_sched_wakeup_new(p, true);
+	trace_sched_wakeup_new(p);
 	check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
 	if (p->sched_class->task_woken)
@@ -2469,7 +2522,6 @@
 	 */
 	prev_state = prev->state;
 	vtime_task_switch(prev);
-	finish_arch_switch(prev);
 	perf_event_task_sched_in(prev, current);
 	finish_lock_switch(rq, prev);
 	finish_arch_post_lock_switch();
@@ -2489,7 +2541,7 @@
 		put_task_struct(prev);
 	}
 
-	tick_nohz_task_switch(current);
+	tick_nohz_task_switch();
 	return rq;
 }
 
@@ -4340,7 +4392,7 @@
 	}
 #endif
 again:
-	retval = set_cpus_allowed_ptr(p, new_mask);
+	retval = __set_cpus_allowed_ptr(p, new_mask, true);
 
 	if (!retval) {
 		cpuset_cpus_allowed(p, cpus_allowed);
@@ -4492,7 +4544,7 @@
 
 int __sched _cond_resched(void)
 {
-	if (should_resched()) {
+	if (should_resched(0)) {
 		preempt_schedule_common();
 		return 1;
 	}
@@ -4510,7 +4562,7 @@
  */
 int __cond_resched_lock(spinlock_t *lock)
 {
-	int resched = should_resched();
+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
 	int ret = 0;
 
 	lockdep_assert_held(lock);
@@ -4532,7 +4584,7 @@
 {
 	BUG_ON(!in_softirq());
 
-	if (should_resched()) {
+	if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
 		local_bh_enable();
 		preempt_schedule_common();
 		local_bh_disable();
@@ -4865,7 +4917,8 @@
 	struct rq *rq = cpu_rq(cpu);
 	unsigned long flags;
 
-	raw_spin_lock_irqsave(&rq->lock, flags);
+	raw_spin_lock_irqsave(&idle->pi_lock, flags);
+	raw_spin_lock(&rq->lock);
 
 	__sched_fork(0, idle);
 	idle->state = TASK_RUNNING;
@@ -4891,7 +4944,8 @@
 #if defined(CONFIG_SMP)
 	idle->on_cpu = 1;
 #endif
-	raw_spin_unlock_irqrestore(&rq->lock, flags);
+	raw_spin_unlock(&rq->lock);
+	raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
 
 	/* Set the preempt count _outside_ the spinlocks! */
 	init_idle_preempt_count(idle, cpu);
@@ -5311,8 +5365,7 @@
 /* may be called multiple times per register */
 static void unregister_sched_domain_sysctl(void)
 {
-	if (sd_sysctl_header)
-		unregister_sysctl_table(sd_sysctl_header);
+	unregister_sysctl_table(sd_sysctl_header);
 	sd_sysctl_header = NULL;
 	if (sd_ctl_dir[0].child)
 		sd_free_ctl_entry(&sd_ctl_dir[0].child);
@@ -5433,6 +5486,14 @@
 	case CPU_STARTING:
 		set_cpu_rq_start_time();
 		return NOTIFY_OK;
+	case CPU_ONLINE:
+		/*
+		 * At this point a starting CPU has marked itself as online via
+		 * set_cpu_online(). But it might not yet have marked itself
+		 * as active, which is essential from here on.
+		 *
+		 * Thus, fall-through and help the starting CPU along.
+		 */
 	case CPU_DOWN_FAILED:
 		set_cpu_active((long)hcpu, true);
 		return NOTIFY_OK;
@@ -6445,8 +6506,10 @@
 
 	n = sched_max_numa_distance;
 
-	if (n <= 1)
+	if (sched_domains_numa_levels <= 1) {
 		sched_numa_topology_type = NUMA_DIRECT;
+		return;
+	}
 
 	for_each_online_node(a) {
 		for_each_online_node(b) {
@@ -8068,7 +8131,7 @@
 	sched_offline_group(tg);
 }
 
-static void cpu_cgroup_fork(struct task_struct *task)
+static void cpu_cgroup_fork(struct task_struct *task, void *private)
 {
 	sched_move_task(task);
 }
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index f5a64ff..8cbc3db 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -555,48 +555,43 @@
 }
 
 /*
- * Atomically advance counter to the new value. Interrupts, vcpu
- * scheduling, and scaling inaccuracies can cause cputime_advance
- * to be occasionally called with a new value smaller than counter.
- * Let's enforce atomicity.
+ * Adjust tick based cputime random precision against scheduler runtime
+ * accounting.
  *
- * Normally a caller will only go through this loop once, or not
- * at all in case a previous caller updated counter the same jiffy.
- */
-static void cputime_advance(cputime_t *counter, cputime_t new)
-{
-	cputime_t old;
-
-	while (new > (old = READ_ONCE(*counter)))
-		cmpxchg_cputime(counter, old, new);
-}
-
-/*
- * Adjust tick based cputime random precision against scheduler
- * runtime accounting.
+ * Tick based cputime accounting depend on random scheduling timeslices of a
+ * task to be interrupted or not by the timer.  Depending on these
+ * circumstances, the number of these interrupts may be over or
+ * under-optimistic, matching the real user and system cputime with a variable
+ * precision.
+ *
+ * Fix this by scaling these tick based values against the total runtime
+ * accounted by the CFS scheduler.
+ *
+ * This code provides the following guarantees:
+ *
+ *   stime + utime == rtime
+ *   stime_i+1 >= stime_i, utime_i+1 >= utime_i
+ *
+ * Assuming that rtime_i+1 >= rtime_i.
  */
 static void cputime_adjust(struct task_cputime *curr,
-			   struct cputime *prev,
+			   struct prev_cputime *prev,
 			   cputime_t *ut, cputime_t *st)
 {
 	cputime_t rtime, stime, utime;
+	unsigned long flags;
 
-	/*
-	 * Tick based cputime accounting depend on random scheduling
-	 * timeslices of a task to be interrupted or not by the timer.
-	 * Depending on these circumstances, the number of these interrupts
-	 * may be over or under-optimistic, matching the real user and system
-	 * cputime with a variable precision.
-	 *
-	 * Fix this by scaling these tick based values against the total
-	 * runtime accounted by the CFS scheduler.
-	 */
+	/* Serialize concurrent callers such that we can honour our guarantees */
+	raw_spin_lock_irqsave(&prev->lock, flags);
 	rtime = nsecs_to_cputime(curr->sum_exec_runtime);
 
 	/*
-	 * Update userspace visible utime/stime values only if actual execution
-	 * time is bigger than already exported. Note that can happen, that we
-	 * provided bigger values due to scaling inaccuracy on big numbers.
+	 * This is possible under two circumstances:
+	 *  - rtime isn't monotonic after all (a bug);
+	 *  - we got reordered by the lock.
+	 *
+	 * In both cases this acts as a filter such that the rest of the code
+	 * can assume it is monotonic regardless of anything else.
 	 */
 	if (prev->stime + prev->utime >= rtime)
 		goto out;
@@ -606,22 +601,46 @@
 
 	if (utime == 0) {
 		stime = rtime;
-	} else if (stime == 0) {
-		utime = rtime;
-	} else {
-		cputime_t total = stime + utime;
-
-		stime = scale_stime((__force u64)stime,
-				    (__force u64)rtime, (__force u64)total);
-		utime = rtime - stime;
+		goto update;
 	}
 
-	cputime_advance(&prev->stime, stime);
-	cputime_advance(&prev->utime, utime);
+	if (stime == 0) {
+		utime = rtime;
+		goto update;
+	}
 
+	stime = scale_stime((__force u64)stime, (__force u64)rtime,
+			    (__force u64)(stime + utime));
+
+	/*
+	 * Make sure stime doesn't go backwards; this preserves monotonicity
+	 * for utime because rtime is monotonic.
+	 *
+	 *  utime_i+1 = rtime_i+1 - stime_i
+	 *            = rtime_i+1 - (rtime_i - utime_i)
+	 *            = (rtime_i+1 - rtime_i) + utime_i
+	 *            >= utime_i
+	 */
+	if (stime < prev->stime)
+		stime = prev->stime;
+	utime = rtime - stime;
+
+	/*
+	 * Make sure utime doesn't go backwards; this still preserves
+	 * monotonicity for stime, analogous argument to above.
+	 */
+	if (utime < prev->utime) {
+		utime = prev->utime;
+		stime = rtime - utime;
+	}
+
+update:
+	prev->stime = stime;
+	prev->utime = utime;
 out:
 	*ut = prev->utime;
 	*st = prev->stime;
+	raw_spin_unlock_irqrestore(&prev->lock, flags);
 }
 
 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0a17af35..fc8f010 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -953,7 +953,7 @@
 
 	/*
 	 * Use the scheduling parameters of the top pi-waiter
-	 * task if we have one and its (relative) deadline is
+	 * task if we have one and its (absolute) deadline is
 	 * smaller than our one... OTW we keep our runtime and
 	 * deadline.
 	 */
@@ -1563,7 +1563,7 @@
 
 static void push_dl_tasks(struct rq *rq)
 {
-	/* Terminates as it moves a -deadline task */
+	/* push_dl_task() will return true if it moved a -deadline task */
 	while (push_dl_task(rq))
 		;
 }
@@ -1657,7 +1657,6 @@
 {
 	if (!task_running(rq, p) &&
 	    !test_tsk_need_resched(rq->curr) &&
-	    has_pushable_dl_tasks(rq) &&
 	    p->nr_cpus_allowed > 1 &&
 	    dl_task(rq->curr) &&
 	    (rq->curr->nr_cpus_allowed < 2 ||
@@ -1669,9 +1668,8 @@
 static void set_cpus_allowed_dl(struct task_struct *p,
 				const struct cpumask *new_mask)
 {
-	struct rq *rq;
 	struct root_domain *src_rd;
-	int weight;
+	struct rq *rq;
 
 	BUG_ON(!dl_task(p));
 
@@ -1697,37 +1695,7 @@
 		raw_spin_unlock(&src_dl_b->lock);
 	}
 
-	/*
-	 * Update only if the task is actually running (i.e.,
-	 * it is on the rq AND it is not throttled).
-	 */
-	if (!on_dl_rq(&p->dl))
-		return;
-
-	weight = cpumask_weight(new_mask);
-
-	/*
-	 * Only update if the process changes its state from whether it
-	 * can migrate or not.
-	 */
-	if ((p->nr_cpus_allowed > 1) == (weight > 1))
-		return;
-
-	/*
-	 * The process used to be able to migrate OR it can now migrate
-	 */
-	if (weight <= 1) {
-		if (!task_current(rq, p))
-			dequeue_pushable_dl_task(rq, p);
-		BUG_ON(!rq->dl.dl_nr_migratory);
-		rq->dl.dl_nr_migratory--;
-	} else {
-		if (!task_current(rq, p))
-			enqueue_pushable_dl_task(rq, p);
-		rq->dl.dl_nr_migratory++;
-	}
-
-	update_dl_migration(&rq->dl);
+	set_cpus_allowed_common(p, new_mask);
 }
 
 /* Assumes rq->lock is held */
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 4222ec5..6415117 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -68,13 +68,8 @@
 #define PN(F) \
 	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
 
-	if (!se) {
-		struct sched_avg *avg = &cpu_rq(cpu)->avg;
-		P(avg->runnable_avg_sum);
-		P(avg->avg_period);
+	if (!se)
 		return;
-	}
-
 
 	PN(se->exec_start);
 	PN(se->vruntime);
@@ -93,12 +88,8 @@
 #endif
 	P(se->load.weight);
 #ifdef CONFIG_SMP
-	P(se->avg.runnable_avg_sum);
-	P(se->avg.running_avg_sum);
-	P(se->avg.avg_period);
-	P(se->avg.load_avg_contrib);
-	P(se->avg.utilization_avg_contrib);
-	P(se->avg.decay_count);
+	P(se->avg.load_avg);
+	P(se->avg.util_avg);
 #endif
 #undef PN
 #undef P
@@ -214,21 +205,21 @@
 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
 #ifdef CONFIG_SMP
-	SEQ_printf(m, "  .%-30s: %ld\n", "runnable_load_avg",
+	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
+			cfs_rq->avg.load_avg);
+	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_load_avg",
 			cfs_rq->runnable_load_avg);
-	SEQ_printf(m, "  .%-30s: %ld\n", "blocked_load_avg",
-			cfs_rq->blocked_load_avg);
-	SEQ_printf(m, "  .%-30s: %ld\n", "utilization_load_avg",
-			cfs_rq->utilization_load_avg);
+	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
+			cfs_rq->avg.util_avg);
+	SEQ_printf(m, "  .%-30s: %ld\n", "removed_load_avg",
+			atomic_long_read(&cfs_rq->removed_load_avg));
+	SEQ_printf(m, "  .%-30s: %ld\n", "removed_util_avg",
+			atomic_long_read(&cfs_rq->removed_util_avg));
 #ifdef CONFIG_FAIR_GROUP_SCHED
-	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_contrib",
-			cfs_rq->tg_load_contrib);
-	SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib",
-			cfs_rq->tg_runnable_contrib);
+	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
+			cfs_rq->tg_load_avg_contrib);
 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
 			atomic_long_read(&cfs_rq->tg->load_avg));
-	SEQ_printf(m, "  .%-30s: %d\n", "tg->runnable_avg",
-			atomic_read(&cfs_rq->tg->runnable_avg));
 #endif
 #endif
 #ifdef CONFIG_CFS_BANDWIDTH
@@ -636,12 +627,11 @@
 
 	P(se.load.weight);
 #ifdef CONFIG_SMP
-	P(se.avg.runnable_avg_sum);
-	P(se.avg.running_avg_sum);
-	P(se.avg.avg_period);
-	P(se.avg.load_avg_contrib);
-	P(se.avg.utilization_avg_contrib);
-	P(se.avg.decay_count);
+	P(se.avg.load_sum);
+	P(se.avg.util_sum);
+	P(se.avg.load_avg);
+	P(se.avg.util_avg);
+	P(se.avg.last_update_time);
 #endif
 	P(policy);
 	P(prio);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d113c3b..6e2e348 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -283,9 +283,6 @@
 	return grp->my_q;
 }
 
-static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
-				       int force_update);
-
 static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
 {
 	if (!cfs_rq->on_list) {
@@ -305,8 +302,6 @@
 		}
 
 		cfs_rq->on_list = 1;
-		/* We should have no load, but we need to update last_decay. */
-		update_cfs_rq_blocked_load(cfs_rq, 0);
 	}
 }
 
@@ -616,15 +611,10 @@
  */
 static u64 __sched_period(unsigned long nr_running)
 {
-	u64 period = sysctl_sched_latency;
-	unsigned long nr_latency = sched_nr_latency;
-
-	if (unlikely(nr_running > nr_latency)) {
-		period = sysctl_sched_min_granularity;
-		period *= nr_running;
-	}
-
-	return period;
+	if (unlikely(nr_running > sched_nr_latency))
+		return nr_running * sysctl_sched_min_granularity;
+	else
+		return sysctl_sched_latency;
 }
 
 /*
@@ -669,22 +659,37 @@
 static int select_idle_sibling(struct task_struct *p, int cpu);
 static unsigned long task_h_load(struct task_struct *p);
 
-static inline void __update_task_entity_contrib(struct sched_entity *se);
-static inline void __update_task_entity_utilization(struct sched_entity *se);
+/*
+ * We choose a half-life close to 1 scheduling period.
+ * Note: The tables below are dependent on this value.
+ */
+#define LOAD_AVG_PERIOD 32
+#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
+#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
 
-/* Give new task start runnable values to heavy its load in infant time */
-void init_task_runnable_average(struct task_struct *p)
+/* Give new sched_entity start runnable values to heavy its load in infant time */
+void init_entity_runnable_average(struct sched_entity *se)
 {
-	u32 slice;
+	struct sched_avg *sa = &se->avg;
 
-	slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
-	p->se.avg.runnable_avg_sum = p->se.avg.running_avg_sum = slice;
-	p->se.avg.avg_period = slice;
-	__update_task_entity_contrib(&p->se);
-	__update_task_entity_utilization(&p->se);
+	sa->last_update_time = 0;
+	/*
+	 * sched_avg's period_contrib should be strictly less then 1024, so
+	 * we give it 1023 to make sure it is almost a period (1024us), and
+	 * will definitely be update (after enqueue).
+	 */
+	sa->period_contrib = 1023;
+	sa->load_avg = scale_load_down(se->load.weight);
+	sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
+	sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
+	sa->util_sum = LOAD_AVG_MAX;
+	/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
 }
+
+static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
+static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
 #else
-void init_task_runnable_average(struct task_struct *p)
+void init_entity_runnable_average(struct sched_entity *se)
 {
 }
 #endif
@@ -1415,8 +1420,9 @@
 	 * --------------------- vs ---------------------
 	 * src->compute_capacity    dst->compute_capacity
 	 */
-	if (src->load * dst->compute_capacity >
-	    dst->load * src->compute_capacity)
+	if (src->load * dst->compute_capacity * env->imbalance_pct >
+
+	    dst->load * src->compute_capacity * 100)
 		return true;
 
 	return false;
@@ -1702,8 +1708,8 @@
 		delta = runtime - p->last_sum_exec_runtime;
 		*period = now - p->last_task_numa_placement;
 	} else {
-		delta = p->se.avg.runnable_avg_sum;
-		*period = p->se.avg.avg_period;
+		delta = p->se.avg.load_sum / p->se.load.weight;
+		*period = LOAD_AVG_MAX;
 	}
 
 	p->last_sum_exec_runtime = runtime;
@@ -2351,13 +2357,13 @@
 	long tg_weight;
 
 	/*
-	 * Use this CPU's actual weight instead of the last load_contribution
-	 * to gain a more accurate current total weight. See
-	 * update_cfs_rq_load_contribution().
+	 * Use this CPU's real-time load instead of the last load contribution
+	 * as the updating of the contribution is delayed, and we will use the
+	 * the real-time load to calc the share. See update_tg_load_avg().
 	 */
 	tg_weight = atomic_long_read(&tg->load_avg);
-	tg_weight -= cfs_rq->tg_load_contrib;
-	tg_weight += cfs_rq->load.weight;
+	tg_weight -= cfs_rq->tg_load_avg_contrib;
+	tg_weight += cfs_rq_load_avg(cfs_rq);
 
 	return tg_weight;
 }
@@ -2367,7 +2373,7 @@
 	long tg_weight, load, shares;
 
 	tg_weight = calc_tg_weight(tg, cfs_rq);
-	load = cfs_rq->load.weight;
+	load = cfs_rq_load_avg(cfs_rq);
 
 	shares = (tg->shares * load);
 	if (tg_weight)
@@ -2429,14 +2435,6 @@
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 #ifdef CONFIG_SMP
-/*
- * We choose a half-life close to 1 scheduling period.
- * Note: The tables below are dependent on this value.
- */
-#define LOAD_AVG_PERIOD 32
-#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
-#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
-
 /* Precomputed fixed inverse multiplies for multiplication by y^n */
 static const u32 runnable_avg_yN_inv[] = {
 	0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
@@ -2485,9 +2483,8 @@
 		local_n %= LOAD_AVG_PERIOD;
 	}
 
-	val *= runnable_avg_yN_inv[local_n];
-	/* We don't use SRR here since we always want to round down. */
-	return val >> 32;
+	val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
+	return val;
 }
 
 /*
@@ -2546,23 +2543,22 @@
  *   load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
  *            = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
  */
-static __always_inline int __update_entity_runnable_avg(u64 now, int cpu,
-							struct sched_avg *sa,
-							int runnable,
-							int running)
+static __always_inline int
+__update_load_avg(u64 now, int cpu, struct sched_avg *sa,
+		  unsigned long weight, int running, struct cfs_rq *cfs_rq)
 {
 	u64 delta, periods;
-	u32 runnable_contrib;
+	u32 contrib;
 	int delta_w, decayed = 0;
 	unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu);
 
-	delta = now - sa->last_runnable_update;
+	delta = now - sa->last_update_time;
 	/*
 	 * This should only happen when time goes backwards, which it
 	 * unfortunately does during sched clock init when we swap over to TSC.
 	 */
 	if ((s64)delta < 0) {
-		sa->last_runnable_update = now;
+		sa->last_update_time = now;
 		return 0;
 	}
 
@@ -2573,26 +2569,29 @@
 	delta >>= 10;
 	if (!delta)
 		return 0;
-	sa->last_runnable_update = now;
+	sa->last_update_time = now;
 
 	/* delta_w is the amount already accumulated against our next period */
-	delta_w = sa->avg_period % 1024;
+	delta_w = sa->period_contrib;
 	if (delta + delta_w >= 1024) {
-		/* period roll-over */
 		decayed = 1;
 
+		/* how much left for next period will start over, we don't know yet */
+		sa->period_contrib = 0;
+
 		/*
 		 * Now that we know we're crossing a period boundary, figure
 		 * out how much from delta we need to complete the current
 		 * period and accrue it.
 		 */
 		delta_w = 1024 - delta_w;
-		if (runnable)
-			sa->runnable_avg_sum += delta_w;
+		if (weight) {
+			sa->load_sum += weight * delta_w;
+			if (cfs_rq)
+				cfs_rq->runnable_load_sum += weight * delta_w;
+		}
 		if (running)
-			sa->running_avg_sum += delta_w * scale_freq
-				>> SCHED_CAPACITY_SHIFT;
-		sa->avg_period += delta_w;
+			sa->util_sum += delta_w * scale_freq >> SCHED_CAPACITY_SHIFT;
 
 		delta -= delta_w;
 
@@ -2600,341 +2599,186 @@
 		periods = delta / 1024;
 		delta %= 1024;
 
-		sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
-						  periods + 1);
-		sa->running_avg_sum = decay_load(sa->running_avg_sum,
-						  periods + 1);
-		sa->avg_period = decay_load(sa->avg_period,
-						     periods + 1);
+		sa->load_sum = decay_load(sa->load_sum, periods + 1);
+		if (cfs_rq) {
+			cfs_rq->runnable_load_sum =
+				decay_load(cfs_rq->runnable_load_sum, periods + 1);
+		}
+		sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1);
 
 		/* Efficiently calculate \sum (1..n_period) 1024*y^i */
-		runnable_contrib = __compute_runnable_contrib(periods);
-		if (runnable)
-			sa->runnable_avg_sum += runnable_contrib;
+		contrib = __compute_runnable_contrib(periods);
+		if (weight) {
+			sa->load_sum += weight * contrib;
+			if (cfs_rq)
+				cfs_rq->runnable_load_sum += weight * contrib;
+		}
 		if (running)
-			sa->running_avg_sum += runnable_contrib * scale_freq
-				>> SCHED_CAPACITY_SHIFT;
-		sa->avg_period += runnable_contrib;
+			sa->util_sum += contrib * scale_freq >> SCHED_CAPACITY_SHIFT;
 	}
 
 	/* Remainder of delta accrued against u_0` */
-	if (runnable)
-		sa->runnable_avg_sum += delta;
+	if (weight) {
+		sa->load_sum += weight * delta;
+		if (cfs_rq)
+			cfs_rq->runnable_load_sum += weight * delta;
+	}
 	if (running)
-		sa->running_avg_sum += delta * scale_freq
-			>> SCHED_CAPACITY_SHIFT;
-	sa->avg_period += delta;
+		sa->util_sum += delta * scale_freq >> SCHED_CAPACITY_SHIFT;
+
+	sa->period_contrib += delta;
+
+	if (decayed) {
+		sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
+		if (cfs_rq) {
+			cfs_rq->runnable_load_avg =
+				div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
+		}
+		sa->util_avg = (sa->util_sum << SCHED_LOAD_SHIFT) / LOAD_AVG_MAX;
+	}
 
 	return decayed;
 }
 
-/* Synchronize an entity's decay with its parenting cfs_rq.*/
-static inline u64 __synchronize_entity_decay(struct sched_entity *se)
-{
-	struct cfs_rq *cfs_rq = cfs_rq_of(se);
-	u64 decays = atomic64_read(&cfs_rq->decay_counter);
-
-	decays -= se->avg.decay_count;
-	se->avg.decay_count = 0;
-	if (!decays)
-		return 0;
-
-	se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
-	se->avg.utilization_avg_contrib =
-		decay_load(se->avg.utilization_avg_contrib, decays);
-
-	return decays;
-}
-
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
-						 int force_update)
-{
-	struct task_group *tg = cfs_rq->tg;
-	long tg_contrib;
-
-	tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
-	tg_contrib -= cfs_rq->tg_load_contrib;
-
-	if (!tg_contrib)
-		return;
-
-	if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
-		atomic_long_add(tg_contrib, &tg->load_avg);
-		cfs_rq->tg_load_contrib += tg_contrib;
-	}
-}
-
 /*
- * Aggregate cfs_rq runnable averages into an equivalent task_group
- * representation for computing load contributions.
+ * Updating tg's load_avg is necessary before update_cfs_share (which is done)
+ * and effective_load (which is not done because it is too costly).
  */
-static inline void __update_tg_runnable_avg(struct sched_avg *sa,
-						  struct cfs_rq *cfs_rq)
+static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
 {
-	struct task_group *tg = cfs_rq->tg;
-	long contrib;
+	long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
 
-	/* The fraction of a cpu used by this cfs_rq */
-	contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
-			  sa->avg_period + 1);
-	contrib -= cfs_rq->tg_runnable_contrib;
-
-	if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
-		atomic_add(contrib, &tg->runnable_avg);
-		cfs_rq->tg_runnable_contrib += contrib;
+	if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
+		atomic_long_add(delta, &cfs_rq->tg->load_avg);
+		cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
 	}
 }
 
-static inline void __update_group_entity_contrib(struct sched_entity *se)
-{
-	struct cfs_rq *cfs_rq = group_cfs_rq(se);
-	struct task_group *tg = cfs_rq->tg;
-	int runnable_avg;
-
-	u64 contrib;
-
-	contrib = cfs_rq->tg_load_contrib * tg->shares;
-	se->avg.load_avg_contrib = div_u64(contrib,
-				     atomic_long_read(&tg->load_avg) + 1);
-
-	/*
-	 * For group entities we need to compute a correction term in the case
-	 * that they are consuming <1 cpu so that we would contribute the same
-	 * load as a task of equal weight.
-	 *
-	 * Explicitly co-ordinating this measurement would be expensive, but
-	 * fortunately the sum of each cpus contribution forms a usable
-	 * lower-bound on the true value.
-	 *
-	 * Consider the aggregate of 2 contributions.  Either they are disjoint
-	 * (and the sum represents true value) or they are disjoint and we are
-	 * understating by the aggregate of their overlap.
-	 *
-	 * Extending this to N cpus, for a given overlap, the maximum amount we
-	 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
-	 * cpus that overlap for this interval and w_i is the interval width.
-	 *
-	 * On a small machine; the first term is well-bounded which bounds the
-	 * total error since w_i is a subset of the period.  Whereas on a
-	 * larger machine, while this first term can be larger, if w_i is the
-	 * of consequential size guaranteed to see n_i*w_i quickly converge to
-	 * our upper bound of 1-cpu.
-	 */
-	runnable_avg = atomic_read(&tg->runnable_avg);
-	if (runnable_avg < NICE_0_LOAD) {
-		se->avg.load_avg_contrib *= runnable_avg;
-		se->avg.load_avg_contrib >>= NICE_0_SHIFT;
-	}
-}
-
-static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
-{
-	__update_entity_runnable_avg(rq_clock_task(rq), cpu_of(rq), &rq->avg,
-			runnable, runnable);
-	__update_tg_runnable_avg(&rq->avg, &rq->cfs);
-}
 #else /* CONFIG_FAIR_GROUP_SCHED */
-static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
-						 int force_update) {}
-static inline void __update_tg_runnable_avg(struct sched_avg *sa,
-						  struct cfs_rq *cfs_rq) {}
-static inline void __update_group_entity_contrib(struct sched_entity *se) {}
-static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
+static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
-static inline void __update_task_entity_contrib(struct sched_entity *se)
-{
-	u32 contrib;
-
-	/* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
-	contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
-	contrib /= (se->avg.avg_period + 1);
-	se->avg.load_avg_contrib = scale_load(contrib);
-}
-
-/* Compute the current contribution to load_avg by se, return any delta */
-static long __update_entity_load_avg_contrib(struct sched_entity *se)
-{
-	long old_contrib = se->avg.load_avg_contrib;
-
-	if (entity_is_task(se)) {
-		__update_task_entity_contrib(se);
-	} else {
-		__update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
-		__update_group_entity_contrib(se);
-	}
-
-	return se->avg.load_avg_contrib - old_contrib;
-}
-
-
-static inline void __update_task_entity_utilization(struct sched_entity *se)
-{
-	u32 contrib;
-
-	/* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
-	contrib = se->avg.running_avg_sum * scale_load_down(SCHED_LOAD_SCALE);
-	contrib /= (se->avg.avg_period + 1);
-	se->avg.utilization_avg_contrib = scale_load(contrib);
-}
-
-static long __update_entity_utilization_avg_contrib(struct sched_entity *se)
-{
-	long old_contrib = se->avg.utilization_avg_contrib;
-
-	if (entity_is_task(se))
-		__update_task_entity_utilization(se);
-	else
-		se->avg.utilization_avg_contrib =
-					group_cfs_rq(se)->utilization_load_avg;
-
-	return se->avg.utilization_avg_contrib - old_contrib;
-}
-
-static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
-						 long load_contrib)
-{
-	if (likely(load_contrib < cfs_rq->blocked_load_avg))
-		cfs_rq->blocked_load_avg -= load_contrib;
-	else
-		cfs_rq->blocked_load_avg = 0;
-}
-
 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
 
-/* Update a sched_entity's runnable average */
-static inline void update_entity_load_avg(struct sched_entity *se,
-					  int update_cfs_rq)
+/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
+static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
+{
+	int decayed;
+	struct sched_avg *sa = &cfs_rq->avg;
+
+	if (atomic_long_read(&cfs_rq->removed_load_avg)) {
+		long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
+		sa->load_avg = max_t(long, sa->load_avg - r, 0);
+		sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
+	}
+
+	if (atomic_long_read(&cfs_rq->removed_util_avg)) {
+		long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
+		sa->util_avg = max_t(long, sa->util_avg - r, 0);
+		sa->util_sum = max_t(s32, sa->util_sum -
+			((r * LOAD_AVG_MAX) >> SCHED_LOAD_SHIFT), 0);
+	}
+
+	decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
+		scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
+
+#ifndef CONFIG_64BIT
+	smp_wmb();
+	cfs_rq->load_last_update_time_copy = sa->last_update_time;
+#endif
+
+	return decayed;
+}
+
+/* Update task and its cfs_rq load average */
+static inline void update_load_avg(struct sched_entity *se, int update_tg)
 {
 	struct cfs_rq *cfs_rq = cfs_rq_of(se);
-	long contrib_delta, utilization_delta;
 	int cpu = cpu_of(rq_of(cfs_rq));
-	u64 now;
+	u64 now = cfs_rq_clock_task(cfs_rq);
 
 	/*
-	 * For a group entity we need to use their owned cfs_rq_clock_task() in
-	 * case they are the parent of a throttled hierarchy.
+	 * Track task load average for carrying it to new CPU after migrated, and
+	 * track group sched_entity load average for task_h_load calc in migration
 	 */
-	if (entity_is_task(se))
-		now = cfs_rq_clock_task(cfs_rq);
-	else
-		now = cfs_rq_clock_task(group_cfs_rq(se));
+	__update_load_avg(now, cpu, &se->avg,
+		se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);
 
-	if (!__update_entity_runnable_avg(now, cpu, &se->avg, se->on_rq,
-					cfs_rq->curr == se))
-		return;
+	if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
+		update_tg_load_avg(cfs_rq, 0);
+}
 
-	contrib_delta = __update_entity_load_avg_contrib(se);
-	utilization_delta = __update_entity_utilization_avg_contrib(se);
+/* Add the load generated by se into cfs_rq's load average */
+static inline void
+enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	struct sched_avg *sa = &se->avg;
+	u64 now = cfs_rq_clock_task(cfs_rq);
+	int migrated = 0, decayed;
 
-	if (!update_cfs_rq)
-		return;
-
-	if (se->on_rq) {
-		cfs_rq->runnable_load_avg += contrib_delta;
-		cfs_rq->utilization_load_avg += utilization_delta;
-	} else {
-		subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
+	if (sa->last_update_time == 0) {
+		sa->last_update_time = now;
+		migrated = 1;
 	}
+	else {
+		__update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
+			se->on_rq * scale_load_down(se->load.weight),
+			cfs_rq->curr == se, NULL);
+	}
+
+	decayed = update_cfs_rq_load_avg(now, cfs_rq);
+
+	cfs_rq->runnable_load_avg += sa->load_avg;
+	cfs_rq->runnable_load_sum += sa->load_sum;
+
+	if (migrated) {
+		cfs_rq->avg.load_avg += sa->load_avg;
+		cfs_rq->avg.load_sum += sa->load_sum;
+		cfs_rq->avg.util_avg += sa->util_avg;
+		cfs_rq->avg.util_sum += sa->util_sum;
+	}
+
+	if (decayed || migrated)
+		update_tg_load_avg(cfs_rq, 0);
+}
+
+/* Remove the runnable load generated by se from cfs_rq's runnable load average */
+static inline void
+dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+	update_load_avg(se, 1);
+
+	cfs_rq->runnable_load_avg =
+		max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
+	cfs_rq->runnable_load_sum =
+		max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
 }
 
 /*
- * Decay the load contributed by all blocked children and account this so that
- * their contribution may appropriately discounted when they wake up.
+ * Task first catches up with cfs_rq, and then subtract
+ * itself from the cfs_rq (task must be off the queue now).
  */
-static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
+void remove_entity_load_avg(struct sched_entity *se)
 {
-	u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
-	u64 decays;
+	struct cfs_rq *cfs_rq = cfs_rq_of(se);
+	u64 last_update_time;
 
-	decays = now - cfs_rq->last_decay;
-	if (!decays && !force_update)
-		return;
+#ifndef CONFIG_64BIT
+	u64 last_update_time_copy;
 
-	if (atomic_long_read(&cfs_rq->removed_load)) {
-		unsigned long removed_load;
-		removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
-		subtract_blocked_load_contrib(cfs_rq, removed_load);
-	}
+	do {
+		last_update_time_copy = cfs_rq->load_last_update_time_copy;
+		smp_rmb();
+		last_update_time = cfs_rq->avg.last_update_time;
+	} while (last_update_time != last_update_time_copy);
+#else
+	last_update_time = cfs_rq->avg.last_update_time;
+#endif
 
-	if (decays) {
-		cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
-						      decays);
-		atomic64_add(decays, &cfs_rq->decay_counter);
-		cfs_rq->last_decay = now;
-	}
-
-	__update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
-}
-
-/* Add the load generated by se into cfs_rq's child load-average */
-static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
-						  struct sched_entity *se,
-						  int wakeup)
-{
-	/*
-	 * We track migrations using entity decay_count <= 0, on a wake-up
-	 * migration we use a negative decay count to track the remote decays
-	 * accumulated while sleeping.
-	 *
-	 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
-	 * are seen by enqueue_entity_load_avg() as a migration with an already
-	 * constructed load_avg_contrib.
-	 */
-	if (unlikely(se->avg.decay_count <= 0)) {
-		se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
-		if (se->avg.decay_count) {
-			/*
-			 * In a wake-up migration we have to approximate the
-			 * time sleeping.  This is because we can't synchronize
-			 * clock_task between the two cpus, and it is not
-			 * guaranteed to be read-safe.  Instead, we can
-			 * approximate this using our carried decays, which are
-			 * explicitly atomically readable.
-			 */
-			se->avg.last_runnable_update -= (-se->avg.decay_count)
-							<< 20;
-			update_entity_load_avg(se, 0);
-			/* Indicate that we're now synchronized and on-rq */
-			se->avg.decay_count = 0;
-		}
-		wakeup = 0;
-	} else {
-		__synchronize_entity_decay(se);
-	}
-
-	/* migrated tasks did not contribute to our blocked load */
-	if (wakeup) {
-		subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
-		update_entity_load_avg(se, 0);
-	}
-
-	cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
-	cfs_rq->utilization_load_avg += se->avg.utilization_avg_contrib;
-	/* we force update consideration on load-balancer moves */
-	update_cfs_rq_blocked_load(cfs_rq, !wakeup);
-}
-
-/*
- * Remove se's load from this cfs_rq child load-average, if the entity is
- * transitioning to a blocked state we track its projected decay using
- * blocked_load_avg.
- */
-static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
-						  struct sched_entity *se,
-						  int sleep)
-{
-	update_entity_load_avg(se, 1);
-	/* we force update consideration on load-balancer moves */
-	update_cfs_rq_blocked_load(cfs_rq, !sleep);
-
-	cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
-	cfs_rq->utilization_load_avg -= se->avg.utilization_avg_contrib;
-	if (sleep) {
-		cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
-		se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
-	} /* migrations, e.g. sleep=0 leave decay_count == 0 */
+	__update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
+	atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
+	atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
 }
 
 /*
@@ -2944,7 +2788,6 @@
  */
 void idle_enter_fair(struct rq *this_rq)
 {
-	update_rq_runnable_avg(this_rq, 1);
 }
 
 /*
@@ -2954,24 +2797,28 @@
  */
 void idle_exit_fair(struct rq *this_rq)
 {
-	update_rq_runnable_avg(this_rq, 0);
+}
+
+static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
+{
+	return cfs_rq->runnable_load_avg;
+}
+
+static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
+{
+	return cfs_rq->avg.load_avg;
 }
 
 static int idle_balance(struct rq *this_rq);
 
 #else /* CONFIG_SMP */
 
-static inline void update_entity_load_avg(struct sched_entity *se,
-					  int update_cfs_rq) {}
-static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
-static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
-					   struct sched_entity *se,
-					   int wakeup) {}
-static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
-					   struct sched_entity *se,
-					   int sleep) {}
-static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
-					      int force_update) {}
+static inline void update_load_avg(struct sched_entity *se, int update_tg) {}
+static inline void
+enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+static inline void
+dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
+static inline void remove_entity_load_avg(struct sched_entity *se) {}
 
 static inline int idle_balance(struct rq *rq)
 {
@@ -3103,7 +2950,7 @@
 	 * Update run-time statistics of the 'current'.
 	 */
 	update_curr(cfs_rq);
-	enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
+	enqueue_entity_load_avg(cfs_rq, se);
 	account_entity_enqueue(cfs_rq, se);
 	update_cfs_shares(cfs_rq);
 
@@ -3178,7 +3025,7 @@
 	 * Update run-time statistics of the 'current'.
 	 */
 	update_curr(cfs_rq);
-	dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
+	dequeue_entity_load_avg(cfs_rq, se);
 
 	update_stats_dequeue(cfs_rq, se);
 	if (flags & DEQUEUE_SLEEP) {
@@ -3268,7 +3115,7 @@
 		 */
 		update_stats_wait_end(cfs_rq, se);
 		__dequeue_entity(cfs_rq, se);
-		update_entity_load_avg(se, 1);
+		update_load_avg(se, 1);
 	}
 
 	update_stats_curr_start(cfs_rq, se);
@@ -3368,7 +3215,7 @@
 		/* Put 'current' back into the tree. */
 		__enqueue_entity(cfs_rq, prev);
 		/* in !on_rq case, update occurred at dequeue */
-		update_entity_load_avg(prev, 1);
+		update_load_avg(prev, 0);
 	}
 	cfs_rq->curr = NULL;
 }
@@ -3384,8 +3231,7 @@
 	/*
 	 * Ensure that runnable average is periodically updated.
 	 */
-	update_entity_load_avg(curr, 1);
-	update_cfs_rq_blocked_load(cfs_rq, 1);
+	update_load_avg(curr, 1);
 	update_cfs_shares(cfs_rq);
 
 #ifdef CONFIG_SCHED_HRTICK
@@ -4258,14 +4104,13 @@
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 
+		update_load_avg(se, 1);
 		update_cfs_shares(cfs_rq);
-		update_entity_load_avg(se, 1);
 	}
 
-	if (!se) {
-		update_rq_runnable_avg(rq, rq->nr_running);
+	if (!se)
 		add_nr_running(rq, 1);
-	}
+
 	hrtick_update(rq);
 }
 
@@ -4319,14 +4164,13 @@
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 
+		update_load_avg(se, 1);
 		update_cfs_shares(cfs_rq);
-		update_entity_load_avg(se, 1);
 	}
 
-	if (!se) {
+	if (!se)
 		sub_nr_running(rq, 1);
-		update_rq_runnable_avg(rq, 1);
-	}
+
 	hrtick_update(rq);
 }
 
@@ -4439,6 +4283,12 @@
 	sched_avg_update(this_rq);
 }
 
+/* Used instead of source_load when we know the type == 0 */
+static unsigned long weighted_cpuload(const int cpu)
+{
+	return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
+}
+
 #ifdef CONFIG_NO_HZ_COMMON
 /*
  * There is no sane way to deal with nohz on smp when using jiffies because the
@@ -4460,7 +4310,7 @@
 static void update_idle_cpu_load(struct rq *this_rq)
 {
 	unsigned long curr_jiffies = READ_ONCE(jiffies);
-	unsigned long load = this_rq->cfs.runnable_load_avg;
+	unsigned long load = weighted_cpuload(cpu_of(this_rq));
 	unsigned long pending_updates;
 
 	/*
@@ -4506,7 +4356,7 @@
  */
 void update_cpu_load_active(struct rq *this_rq)
 {
-	unsigned long load = this_rq->cfs.runnable_load_avg;
+	unsigned long load = weighted_cpuload(cpu_of(this_rq));
 	/*
 	 * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
 	 */
@@ -4514,12 +4364,6 @@
 	__update_cpu_load(this_rq, load, 1);
 }
 
-/* Used instead of source_load when we know the type == 0 */
-static unsigned long weighted_cpuload(const int cpu)
-{
-	return cpu_rq(cpu)->cfs.runnable_load_avg;
-}
-
 /*
  * Return a low guess at the load of a migration-source cpu weighted
  * according to the scheduling class and "nice" value.
@@ -4567,7 +4411,7 @@
 {
 	struct rq *rq = cpu_rq(cpu);
 	unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
-	unsigned long load_avg = rq->cfs.runnable_load_avg;
+	unsigned long load_avg = weighted_cpuload(cpu);
 
 	if (nr_running)
 		return load_avg / nr_running;
@@ -4686,7 +4530,7 @@
 		/*
 		 * w = rw_i + @wl
 		 */
-		w = se->my_q->load.weight + wl;
+		w = cfs_rq_load_avg(se->my_q) + wl;
 
 		/*
 		 * wl = S * s'_i; see (2)
@@ -4707,7 +4551,7 @@
 		/*
 		 * wl = dw_i = S * (s'_i - s_i); see (3)
 		 */
-		wl -= se->load.weight;
+		wl -= se->avg.load_avg;
 
 		/*
 		 * Recursively apply this logic to all parent groups to compute
@@ -4730,26 +4574,29 @@
 
 #endif
 
+/*
+ * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
+ * A waker of many should wake a different task than the one last awakened
+ * at a frequency roughly N times higher than one of its wakees.  In order
+ * to determine whether we should let the load spread vs consolodating to
+ * shared cache, we look for a minimum 'flip' frequency of llc_size in one
+ * partner, and a factor of lls_size higher frequency in the other.  With
+ * both conditions met, we can be relatively sure that the relationship is
+ * non-monogamous, with partner count exceeding socket size.  Waker/wakee
+ * being client/server, worker/dispatcher, interrupt source or whatever is
+ * irrelevant, spread criteria is apparent partner count exceeds socket size.
+ */
 static int wake_wide(struct task_struct *p)
 {
+	unsigned int master = current->wakee_flips;
+	unsigned int slave = p->wakee_flips;
 	int factor = this_cpu_read(sd_llc_size);
 
-	/*
-	 * Yeah, it's the switching-frequency, could means many wakee or
-	 * rapidly switch, use factor here will just help to automatically
-	 * adjust the loose-degree, so bigger node will lead to more pull.
-	 */
-	if (p->wakee_flips > factor) {
-		/*
-		 * wakee is somewhat hot, it needs certain amount of cpu
-		 * resource, so if waker is far more hot, prefer to leave
-		 * it alone.
-		 */
-		if (current->wakee_flips > (factor * p->wakee_flips))
-			return 1;
-	}
-
-	return 0;
+	if (master < slave)
+		swap(master, slave);
+	if (slave < factor || master < slave * factor)
+		return 0;
+	return 1;
 }
 
 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
@@ -4761,13 +4608,6 @@
 	unsigned long weight;
 	int balanced;
 
-	/*
-	 * If we wake multiple tasks be careful to not bounce
-	 * ourselves around too much.
-	 */
-	if (wake_wide(p))
-		return 0;
-
 	idx	  = sd->wake_idx;
 	this_cpu  = smp_processor_id();
 	prev_cpu  = task_cpu(p);
@@ -4781,14 +4621,14 @@
 	 */
 	if (sync) {
 		tg = task_group(current);
-		weight = current->se.load.weight;
+		weight = current->se.avg.load_avg;
 
 		this_load += effective_load(tg, this_cpu, -weight, -weight);
 		load += effective_load(tg, prev_cpu, 0, -weight);
 	}
 
 	tg = task_group(p);
-	weight = p->se.load.weight;
+	weight = p->se.avg.load_avg;
 
 	/*
 	 * In low-load situations, where prev_cpu is idle and this_cpu is idle
@@ -4981,12 +4821,12 @@
  * tasks. The unit of the return value must be the one of capacity so we can
  * compare the usage with the capacity of the CPU that is available for CFS
  * task (ie cpu_capacity).
- * cfs.utilization_load_avg is the sum of running time of runnable tasks on a
+ * cfs.avg.util_avg is the sum of running time of runnable tasks on a
  * CPU. It represents the amount of utilization of a CPU in the range
  * [0..SCHED_LOAD_SCALE].  The usage of a CPU can't be higher than the full
  * capacity of the CPU because it's about the running time on this CPU.
- * Nevertheless, cfs.utilization_load_avg can be higher than SCHED_LOAD_SCALE
- * because of unfortunate rounding in avg_period and running_load_avg or just
+ * Nevertheless, cfs.avg.util_avg can be higher than SCHED_LOAD_SCALE
+ * because of unfortunate rounding in util_avg or just
  * after migrating tasks until the average stabilizes with the new running
  * time. So we need to check that the usage stays into the range
  * [0..cpu_capacity_orig] and cap if necessary.
@@ -4995,7 +4835,7 @@
  */
 static int get_cpu_usage(int cpu)
 {
-	unsigned long usage = cpu_rq(cpu)->cfs.utilization_load_avg;
+	unsigned long usage = cpu_rq(cpu)->cfs.avg.util_avg;
 	unsigned long capacity = capacity_orig_of(cpu);
 
 	if (usage >= SCHED_LOAD_SCALE)
@@ -5021,17 +4861,17 @@
 {
 	struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
 	int cpu = smp_processor_id();
-	int new_cpu = cpu;
+	int new_cpu = prev_cpu;
 	int want_affine = 0;
 	int sync = wake_flags & WF_SYNC;
 
 	if (sd_flag & SD_BALANCE_WAKE)
-		want_affine = cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
+		want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
 
 	rcu_read_lock();
 	for_each_domain(cpu, tmp) {
 		if (!(tmp->flags & SD_LOAD_BALANCE))
-			continue;
+			break;
 
 		/*
 		 * If both cpu and prev_cpu are part of this domain,
@@ -5045,17 +4885,21 @@
 
 		if (tmp->flags & sd_flag)
 			sd = tmp;
+		else if (!want_affine)
+			break;
 	}
 
-	if (affine_sd && cpu != prev_cpu && wake_affine(affine_sd, p, sync))
-		prev_cpu = cpu;
-
-	if (sd_flag & SD_BALANCE_WAKE) {
-		new_cpu = select_idle_sibling(p, prev_cpu);
-		goto unlock;
+	if (affine_sd) {
+		sd = NULL; /* Prefer wake_affine over balance flags */
+		if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
+			new_cpu = cpu;
 	}
 
-	while (sd) {
+	if (!sd) {
+		if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
+			new_cpu = select_idle_sibling(p, new_cpu);
+
+	} else while (sd) {
 		struct sched_group *group;
 		int weight;
 
@@ -5089,7 +4933,6 @@
 		}
 		/* while loop will break here if sd == NULL */
 	}
-unlock:
 	rcu_read_unlock();
 
 	return new_cpu;
@@ -5101,26 +4944,27 @@
  * previous cpu.  However, the caller only guarantees p->pi_lock is held; no
  * other assumptions, including the state of rq->lock, should be made.
  */
-static void
-migrate_task_rq_fair(struct task_struct *p, int next_cpu)
+static void migrate_task_rq_fair(struct task_struct *p, int next_cpu)
 {
-	struct sched_entity *se = &p->se;
-	struct cfs_rq *cfs_rq = cfs_rq_of(se);
-
 	/*
-	 * Load tracking: accumulate removed load so that it can be processed
-	 * when we next update owning cfs_rq under rq->lock.  Tasks contribute
-	 * to blocked load iff they have a positive decay-count.  It can never
-	 * be negative here since on-rq tasks have decay-count == 0.
+	 * We are supposed to update the task to "current" time, then its up to date
+	 * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
+	 * what current time is, so simply throw away the out-of-date time. This
+	 * will result in the wakee task is less decayed, but giving the wakee more
+	 * load sounds not bad.
 	 */
-	if (se->avg.decay_count) {
-		se->avg.decay_count = -__synchronize_entity_decay(se);
-		atomic_long_add(se->avg.load_avg_contrib,
-						&cfs_rq->removed_load);
-	}
+	remove_entity_load_avg(&p->se);
+
+	/* Tell new CPU we are migrated */
+	p->se.avg.last_update_time = 0;
 
 	/* We have migrated, no longer consider this task hot */
-	se->exec_start = 0;
+	p->se.exec_start = 0;
+}
+
+static void task_dead_fair(struct task_struct *p)
+{
+	remove_entity_load_avg(&p->se);
 }
 #endif /* CONFIG_SMP */
 
@@ -5670,72 +5514,39 @@
 
 #ifdef CONFIG_NUMA_BALANCING
 /*
- * Returns true if the destination node is the preferred node.
- * Needs to match fbq_classify_rq(): if there is a runnable task
- * that is not on its preferred node, we should identify it.
+ * Returns 1, if task migration degrades locality
+ * Returns 0, if task migration improves locality i.e migration preferred.
+ * Returns -1, if task migration is not affected by locality.
  */
-static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
+static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
 {
 	struct numa_group *numa_group = rcu_dereference(p->numa_group);
 	unsigned long src_faults, dst_faults;
 	int src_nid, dst_nid;
 
-	if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
-	    !(env->sd->flags & SD_NUMA)) {
-		return false;
-	}
-
-	src_nid = cpu_to_node(env->src_cpu);
-	dst_nid = cpu_to_node(env->dst_cpu);
-
-	if (src_nid == dst_nid)
-		return false;
-
-	/* Encourage migration to the preferred node. */
-	if (dst_nid == p->numa_preferred_nid)
-		return true;
-
-	/* Migrating away from the preferred node is bad. */
-	if (src_nid == p->numa_preferred_nid)
-		return false;
-
-	if (numa_group) {
-		src_faults = group_faults(p, src_nid);
-		dst_faults = group_faults(p, dst_nid);
-	} else {
-		src_faults = task_faults(p, src_nid);
-		dst_faults = task_faults(p, dst_nid);
-	}
-
-	return dst_faults > src_faults;
-}
-
-
-static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
-{
-	struct numa_group *numa_group = rcu_dereference(p->numa_group);
-	unsigned long src_faults, dst_faults;
-	int src_nid, dst_nid;
-
-	if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
-		return false;
-
 	if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
-		return false;
+		return -1;
+
+	if (!sched_feat(NUMA))
+		return -1;
 
 	src_nid = cpu_to_node(env->src_cpu);
 	dst_nid = cpu_to_node(env->dst_cpu);
 
 	if (src_nid == dst_nid)
-		return false;
+		return -1;
 
-	/* Migrating away from the preferred node is bad. */
-	if (src_nid == p->numa_preferred_nid)
-		return true;
+	/* Migrating away from the preferred node is always bad. */
+	if (src_nid == p->numa_preferred_nid) {
+		if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
+			return 1;
+		else
+			return -1;
+	}
 
 	/* Encourage migration to the preferred node. */
 	if (dst_nid == p->numa_preferred_nid)
-		return false;
+		return 0;
 
 	if (numa_group) {
 		src_faults = group_faults(p, src_nid);
@@ -5749,16 +5560,10 @@
 }
 
 #else
-static inline bool migrate_improves_locality(struct task_struct *p,
+static inline int migrate_degrades_locality(struct task_struct *p,
 					     struct lb_env *env)
 {
-	return false;
-}
-
-static inline bool migrate_degrades_locality(struct task_struct *p,
-					     struct lb_env *env)
-{
-	return false;
+	return -1;
 }
 #endif
 
@@ -5768,7 +5573,7 @@
 static
 int can_migrate_task(struct task_struct *p, struct lb_env *env)
 {
-	int tsk_cache_hot = 0;
+	int tsk_cache_hot;
 
 	lockdep_assert_held(&env->src_rq->lock);
 
@@ -5826,13 +5631,13 @@
 	 * 2) task is cache cold, or
 	 * 3) too many balance attempts have failed.
 	 */
-	tsk_cache_hot = task_hot(p, env);
-	if (!tsk_cache_hot)
-		tsk_cache_hot = migrate_degrades_locality(p, env);
+	tsk_cache_hot = migrate_degrades_locality(p, env);
+	if (tsk_cache_hot == -1)
+		tsk_cache_hot = task_hot(p, env);
 
-	if (migrate_improves_locality(p, env) || !tsk_cache_hot ||
+	if (tsk_cache_hot <= 0 ||
 	    env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
-		if (tsk_cache_hot) {
+		if (tsk_cache_hot == 1) {
 			schedstat_inc(env->sd, lb_hot_gained[env->idle]);
 			schedstat_inc(p, se.statistics.nr_forced_migrations);
 		}
@@ -5906,6 +5711,13 @@
 		return 0;
 
 	while (!list_empty(tasks)) {
+		/*
+		 * We don't want to steal all, otherwise we may be treated likewise,
+		 * which could at worst lead to a livelock crash.
+		 */
+		if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
+			break;
+
 		p = list_first_entry(tasks, struct task_struct, se.group_node);
 
 		env->loop++;
@@ -6015,39 +5827,6 @@
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-/*
- * update tg->load_weight by folding this cpu's load_avg
- */
-static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
-{
-	struct sched_entity *se = tg->se[cpu];
-	struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
-
-	/* throttled entities do not contribute to load */
-	if (throttled_hierarchy(cfs_rq))
-		return;
-
-	update_cfs_rq_blocked_load(cfs_rq, 1);
-
-	if (se) {
-		update_entity_load_avg(se, 1);
-		/*
-		 * We pivot on our runnable average having decayed to zero for
-		 * list removal.  This generally implies that all our children
-		 * have also been removed (modulo rounding error or bandwidth
-		 * control); however, such cases are rare and we can fix these
-		 * at enqueue.
-		 *
-		 * TODO: fix up out-of-order children on enqueue.
-		 */
-		if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
-			list_del_leaf_cfs_rq(cfs_rq);
-	} else {
-		struct rq *rq = rq_of(cfs_rq);
-		update_rq_runnable_avg(rq, rq->nr_running);
-	}
-}
-
 static void update_blocked_averages(int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
@@ -6056,19 +5835,19 @@
 
 	raw_spin_lock_irqsave(&rq->lock, flags);
 	update_rq_clock(rq);
+
 	/*
 	 * Iterates the task_group tree in a bottom up fashion, see
 	 * list_add_leaf_cfs_rq() for details.
 	 */
 	for_each_leaf_cfs_rq(rq, cfs_rq) {
-		/*
-		 * Note: We may want to consider periodically releasing
-		 * rq->lock about these updates so that creating many task
-		 * groups does not result in continually extending hold time.
-		 */
-		__update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
-	}
+		/* throttled entities do not contribute to load */
+		if (throttled_hierarchy(cfs_rq))
+			continue;
 
+		if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
+			update_tg_load_avg(cfs_rq, 0);
+	}
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
@@ -6096,14 +5875,14 @@
 	}
 
 	if (!se) {
-		cfs_rq->h_load = cfs_rq->runnable_load_avg;
+		cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
 		cfs_rq->last_h_load_update = now;
 	}
 
 	while ((se = cfs_rq->h_load_next) != NULL) {
 		load = cfs_rq->h_load;
-		load = div64_ul(load * se->avg.load_avg_contrib,
-				cfs_rq->runnable_load_avg + 1);
+		load = div64_ul(load * se->avg.load_avg,
+			cfs_rq_load_avg(cfs_rq) + 1);
 		cfs_rq = group_cfs_rq(se);
 		cfs_rq->h_load = load;
 		cfs_rq->last_h_load_update = now;
@@ -6115,17 +5894,25 @@
 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
 
 	update_cfs_rq_h_load(cfs_rq);
-	return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
-			cfs_rq->runnable_load_avg + 1);
+	return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
+			cfs_rq_load_avg(cfs_rq) + 1);
 }
 #else
 static inline void update_blocked_averages(int cpu)
 {
+	struct rq *rq = cpu_rq(cpu);
+	struct cfs_rq *cfs_rq = &rq->cfs;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	update_rq_clock(rq);
+	update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 static unsigned long task_h_load(struct task_struct *p)
 {
-	return p->se.avg.load_avg_contrib;
+	return p->se.avg.load_avg;
 }
 #endif
 
@@ -8025,8 +7812,6 @@
 
 	if (numabalancing_enabled)
 		task_tick_numa(rq, curr);
-
-	update_rq_runnable_avg(rq, 1);
 }
 
 /*
@@ -8125,15 +7910,18 @@
 	}
 
 #ifdef CONFIG_SMP
-	/*
-	* Remove our load from contribution when we leave sched_fair
-	* and ensure we don't carry in an old decay_count if we
-	* switch back.
-	*/
-	if (se->avg.decay_count) {
-		__synchronize_entity_decay(se);
-		subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
-	}
+	/* Catch up with the cfs_rq and remove our load when we leave */
+	__update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq), &se->avg,
+		se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);
+
+	cfs_rq->avg.load_avg =
+		max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
+	cfs_rq->avg.load_sum =
+		max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
+	cfs_rq->avg.util_avg =
+		max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
+	cfs_rq->avg.util_sum =
+		max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
 #endif
 }
 
@@ -8142,16 +7930,31 @@
  */
 static void switched_to_fair(struct rq *rq, struct task_struct *p)
 {
-#ifdef CONFIG_FAIR_GROUP_SCHED
 	struct sched_entity *se = &p->se;
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
 	/*
 	 * Since the real-depth could have been changed (only FAIR
 	 * class maintain depth value), reset depth properly.
 	 */
 	se->depth = se->parent ? se->parent->depth + 1 : 0;
 #endif
-	if (!task_on_rq_queued(p))
+
+	if (!task_on_rq_queued(p)) {
+
+		/*
+		 * Ensure the task has a non-normalized vruntime when it is switched
+		 * back to the fair class with !queued, so that enqueue_entity() at
+		 * wake-up time will do the right thing.
+		 *
+		 * If it's queued, then the enqueue_entity(.flags=0) makes the task
+		 * has non-normalized vruntime, if it's !queued, then it still has
+		 * normalized vruntime.
+		 */
+		if (p->state != TASK_RUNNING)
+			se->vruntime += cfs_rq_of(se)->min_vruntime;
 		return;
+	}
 
 	/*
 	 * We were most likely switched from sched_rt, so
@@ -8190,8 +7993,8 @@
 	cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
 #endif
 #ifdef CONFIG_SMP
-	atomic64_set(&cfs_rq->decay_counter, 1);
-	atomic_long_set(&cfs_rq->removed_load, 0);
+	atomic_long_set(&cfs_rq->removed_load_avg, 0);
+	atomic_long_set(&cfs_rq->removed_util_avg, 0);
 #endif
 }
 
@@ -8236,14 +8039,14 @@
 	if (!queued) {
 		cfs_rq = cfs_rq_of(se);
 		se->vruntime += cfs_rq->min_vruntime;
+
 #ifdef CONFIG_SMP
-		/*
-		 * migrate_task_rq_fair() will have removed our previous
-		 * contribution, but we must synchronize for ongoing future
-		 * decay.
-		 */
-		se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
-		cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
+		/* Virtually synchronize task with its new cfs_rq */
+		p->se.avg.last_update_time = cfs_rq->avg.last_update_time;
+		cfs_rq->avg.load_avg += p->se.avg.load_avg;
+		cfs_rq->avg.load_sum += p->se.avg.load_sum;
+		cfs_rq->avg.util_avg += p->se.avg.util_avg;
+		cfs_rq->avg.util_sum += p->se.avg.util_sum;
 #endif
 	}
 }
@@ -8257,8 +8060,11 @@
 	for_each_possible_cpu(i) {
 		if (tg->cfs_rq)
 			kfree(tg->cfs_rq[i]);
-		if (tg->se)
+		if (tg->se) {
+			if (tg->se[i])
+				remove_entity_load_avg(tg->se[i]);
 			kfree(tg->se[i]);
+		}
 	}
 
 	kfree(tg->cfs_rq);
@@ -8295,6 +8101,7 @@
 
 		init_cfs_rq(cfs_rq);
 		init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
+		init_entity_runnable_average(se);
 	}
 
 	return 1;
@@ -8444,6 +8251,8 @@
 	.rq_offline		= rq_offline_fair,
 
 	.task_waking		= task_waking_fair,
+	.task_dead		= task_dead_fair,
+	.set_cpus_allowed	= set_cpus_allowed_common,
 #endif
 
 	.set_curr_task          = set_curr_task_fair,
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 91e33cd..83a50e7 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -79,20 +79,12 @@
  * numa_balancing=
  */
 #ifdef CONFIG_NUMA_BALANCING
-SCHED_FEAT(NUMA,	false)
 
 /*
- * NUMA_FAVOUR_HIGHER will favor moving tasks towards nodes where a
- * higher number of hinting faults are recorded during active load
- * balancing.
+ * NUMA will favor moving tasks towards nodes where a higher number of
+ * hinting faults are recorded during active load balancing. It will
+ * resist moving tasks towards nodes where a lower number of hinting
+ * faults have been recorded.
  */
-SCHED_FEAT(NUMA_FAVOUR_HIGHER, true)
-
-/*
- * NUMA_RESIST_LOWER will resist moving tasks towards nodes where a
- * lower number of hinting faults have been recorded. As this has
- * the potential to prevent a task ever migrating to a new node
- * due to CPU overload it is disabled by default.
- */
-SCHED_FEAT(NUMA_RESIST_LOWER, false)
+SCHED_FEAT(NUMA,	true)
 #endif
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 594275e..8f177c7 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -83,10 +83,13 @@
  */
 void default_idle_call(void)
 {
-	if (current_clr_polling_and_test())
+	if (current_clr_polling_and_test()) {
 		local_irq_enable();
-	else
+	} else {
+		stop_critical_timings();
 		arch_cpu_idle();
+		start_critical_timings();
+	}
 }
 
 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
@@ -141,12 +144,6 @@
 	}
 
 	/*
-	 * During the idle period, stop measuring the disabled irqs
-	 * critical sections latencies
-	 */
-	stop_critical_timings();
-
-	/*
 	 * Tell the RCU framework we are entering an idle section,
 	 * so no more rcu read side critical sections and one more
 	 * step to the grace period
@@ -198,7 +195,6 @@
 		local_irq_enable();
 
 	rcu_idle_exit();
-	start_critical_timings();
 }
 
 DEFINE_PER_CPU(bool, cpu_dead_idle);
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index c65dac8..c4ae0f1 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -96,6 +96,7 @@
 
 #ifdef CONFIG_SMP
 	.select_task_rq		= select_task_rq_idle,
+	.set_cpus_allowed	= set_cpus_allowed_common,
 #endif
 
 	.set_curr_task          = set_curr_task_idle,
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 0d193a24..d2ea593 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2069,7 +2069,6 @@
 {
 	if (!task_running(rq, p) &&
 	    !test_tsk_need_resched(rq->curr) &&
-	    has_pushable_tasks(rq) &&
 	    p->nr_cpus_allowed > 1 &&
 	    (dl_task(rq->curr) || rt_task(rq->curr)) &&
 	    (rq->curr->nr_cpus_allowed < 2 ||
@@ -2077,45 +2076,6 @@
 		push_rt_tasks(rq);
 }
 
-static void set_cpus_allowed_rt(struct task_struct *p,
-				const struct cpumask *new_mask)
-{
-	struct rq *rq;
-	int weight;
-
-	BUG_ON(!rt_task(p));
-
-	if (!task_on_rq_queued(p))
-		return;
-
-	weight = cpumask_weight(new_mask);
-
-	/*
-	 * Only update if the process changes its state from whether it
-	 * can migrate or not.
-	 */
-	if ((p->nr_cpus_allowed > 1) == (weight > 1))
-		return;
-
-	rq = task_rq(p);
-
-	/*
-	 * The process used to be able to migrate OR it can now migrate
-	 */
-	if (weight <= 1) {
-		if (!task_current(rq, p))
-			dequeue_pushable_task(rq, p);
-		BUG_ON(!rq->rt.rt_nr_migratory);
-		rq->rt.rt_nr_migratory--;
-	} else {
-		if (!task_current(rq, p))
-			enqueue_pushable_task(rq, p);
-		rq->rt.rt_nr_migratory++;
-	}
-
-	update_rt_migration(&rq->rt);
-}
-
 /* Assumes rq->lock is held */
 static void rq_online_rt(struct rq *rq)
 {
@@ -2324,7 +2284,7 @@
 #ifdef CONFIG_SMP
 	.select_task_rq		= select_task_rq_rt,
 
-	.set_cpus_allowed       = set_cpus_allowed_rt,
+	.set_cpus_allowed       = set_cpus_allowed_common,
 	.rq_online              = rq_online_rt,
 	.rq_offline             = rq_offline_rt,
 	.task_woken		= task_woken_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 84d4879..68cda11 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -245,7 +245,6 @@
 
 #ifdef	CONFIG_SMP
 	atomic_long_t load_avg;
-	atomic_t runnable_avg;
 #endif
 #endif
 
@@ -366,27 +365,20 @@
 
 #ifdef CONFIG_SMP
 	/*
-	 * CFS Load tracking
-	 * Under CFS, load is tracked on a per-entity basis and aggregated up.
-	 * This allows for the description of both thread and group usage (in
-	 * the FAIR_GROUP_SCHED case).
-	 * runnable_load_avg is the sum of the load_avg_contrib of the
-	 * sched_entities on the rq.
-	 * blocked_load_avg is similar to runnable_load_avg except that its
-	 * the blocked sched_entities on the rq.
-	 * utilization_load_avg is the sum of the average running time of the
-	 * sched_entities on the rq.
+	 * CFS load tracking
 	 */
-	unsigned long runnable_load_avg, blocked_load_avg, utilization_load_avg;
-	atomic64_t decay_counter;
-	u64 last_decay;
-	atomic_long_t removed_load;
+	struct sched_avg avg;
+	u64 runnable_load_sum;
+	unsigned long runnable_load_avg;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	unsigned long tg_load_avg_contrib;
+#endif
+	atomic_long_t removed_load_avg, removed_util_avg;
+#ifndef CONFIG_64BIT
+	u64 load_last_update_time_copy;
+#endif
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-	/* Required to track per-cpu representation of a task_group */
-	u32 tg_runnable_contrib;
-	unsigned long tg_load_contrib;
-
 	/*
 	 *   h_load = weight * f(tg)
 	 *
@@ -595,8 +587,6 @@
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	/* list of leaf cfs_rq on this cpu: */
 	struct list_head leaf_cfs_rq_list;
-
-	struct sched_avg avg;
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 	/*
@@ -1065,9 +1055,6 @@
 #ifndef prepare_arch_switch
 # define prepare_arch_switch(next)	do { } while (0)
 #endif
-#ifndef finish_arch_switch
-# define finish_arch_switch(prev)	do { } while (0)
-#endif
 #ifndef finish_arch_post_lock_switch
 # define finish_arch_post_lock_switch()	do { } while (0)
 #endif
@@ -1268,6 +1255,8 @@
 extern void idle_enter_fair(struct rq *this_rq);
 extern void idle_exit_fair(struct rq *this_rq);
 
+extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
+
 #else
 
 static inline void idle_enter_fair(struct rq *rq) { }
@@ -1319,7 +1308,7 @@
 
 unsigned long to_ratio(u64 period, u64 runtime);
 
-extern void init_task_runnable_average(struct task_struct *p);
+extern void init_entity_runnable_average(struct sched_entity *se);
 
 static inline void add_nr_running(struct rq *rq, unsigned count)
 {
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 79ffec4..cbc67da 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -123,6 +123,7 @@
 
 #ifdef CONFIG_SMP
 	.select_task_rq		= select_task_rq_stop,
+	.set_cpus_allowed	= set_cpus_allowed_common,
 #endif
 
 	.set_curr_task          = set_curr_task_stop,
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index fd643d8..12484e5 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -35,13 +35,16 @@
 
 /* the actual stopper, one per every possible cpu, enabled on online cpus */
 struct cpu_stopper {
+	struct task_struct	*thread;
+
 	spinlock_t		lock;
 	bool			enabled;	/* is this stopper enabled? */
 	struct list_head	works;		/* list of pending works */
+
+	struct cpu_stop_work	stop_work;	/* for stop_cpus */
 };
 
 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
-static DEFINE_PER_CPU(struct task_struct *, cpu_stopper_task);
 static bool stop_machine_initialized = false;
 
 /*
@@ -74,7 +77,6 @@
 static void cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 {
 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-	struct task_struct *p = per_cpu(cpu_stopper_task, cpu);
 
 	unsigned long flags;
 
@@ -82,7 +84,7 @@
 
 	if (stopper->enabled) {
 		list_add_tail(&work->list, &stopper->works);
-		wake_up_process(p);
+		wake_up_process(stopper->thread);
 	} else
 		cpu_stop_signal_done(work->done, false);
 
@@ -139,7 +141,7 @@
 };
 
 struct multi_stop_data {
-	int			(*fn)(void *);
+	cpu_stop_fn_t		fn;
 	void			*data;
 	/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
 	unsigned int		num_threads;
@@ -293,7 +295,6 @@
 
 /* static data for stop_cpus */
 static DEFINE_MUTEX(stop_cpus_mutex);
-static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
 
 static void queue_stop_cpus_work(const struct cpumask *cpumask,
 				 cpu_stop_fn_t fn, void *arg,
@@ -302,22 +303,19 @@
 	struct cpu_stop_work *work;
 	unsigned int cpu;
 
-	/* initialize works and done */
-	for_each_cpu(cpu, cpumask) {
-		work = &per_cpu(stop_cpus_work, cpu);
-		work->fn = fn;
-		work->arg = arg;
-		work->done = done;
-	}
-
 	/*
 	 * Disable preemption while queueing to avoid getting
 	 * preempted by a stopper which might wait for other stoppers
 	 * to enter @fn which can lead to deadlock.
 	 */
 	lg_global_lock(&stop_cpus_lock);
-	for_each_cpu(cpu, cpumask)
-		cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
+	for_each_cpu(cpu, cpumask) {
+		work = &per_cpu(cpu_stopper.stop_work, cpu);
+		work->fn = fn;
+		work->arg = arg;
+		work->done = done;
+		cpu_stop_queue_work(cpu, work);
+	}
 	lg_global_unlock(&stop_cpus_lock);
 }
 
@@ -458,19 +456,21 @@
 
 static void cpu_stop_create(unsigned int cpu)
 {
-	sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu));
+	sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
 }
 
 static void cpu_stop_park(unsigned int cpu)
 {
 	struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-	struct cpu_stop_work *work;
+	struct cpu_stop_work *work, *tmp;
 	unsigned long flags;
 
 	/* drain remaining works */
 	spin_lock_irqsave(&stopper->lock, flags);
-	list_for_each_entry(work, &stopper->works, list)
+	list_for_each_entry_safe(work, tmp, &stopper->works, list) {
+		list_del_init(&work->list);
 		cpu_stop_signal_done(work->done, false);
+	}
 	stopper->enabled = false;
 	spin_unlock_irqrestore(&stopper->lock, flags);
 }
@@ -485,7 +485,7 @@
 }
 
 static struct smp_hotplug_thread cpu_stop_threads = {
-	.store			= &cpu_stopper_task,
+	.store			= &cpu_stopper.thread,
 	.thread_should_run	= cpu_stop_should_run,
 	.thread_fn		= cpu_stopper_thread,
 	.thread_comm		= "migration/%u",
@@ -515,7 +515,7 @@
 
 #ifdef CONFIG_STOP_MACHINE
 
-int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
+static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
 {
 	struct multi_stop_data msdata = {
 		.fn = fn,
@@ -548,7 +548,7 @@
 	return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
 }
 
-int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
+int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
 {
 	int ret;
 
@@ -582,7 +582,7 @@
  * 0 if all executions of @fn returned 0, any non zero return value if any
  * returned non zero.
  */
-int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
 				  const struct cpumask *cpus)
 {
 	struct multi_stop_data msdata = { .fn = fn, .data = data,
diff --git a/kernel/sys.c b/kernel/sys.c
index 259fda2..fa2f2f6 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1668,8 +1668,7 @@
 	 * overall picture.
 	 */
 	err = -EACCES;
-	if (!S_ISREG(inode->i_mode)	||
-	    exe.file->f_path.mnt->mnt_flags & MNT_NOEXEC)
+	if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
 		goto exit;
 
 	err = inode_permission(inode, MAY_EXEC);
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 7995ef5..ca7d84f 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -140,6 +140,7 @@
 cond_syscall(sys_ssetmask);
 cond_syscall(sys_vm86old);
 cond_syscall(sys_vm86);
+cond_syscall(sys_modify_ldt);
 cond_syscall(sys_ipc);
 cond_syscall(compat_sys_ipc);
 cond_syscall(compat_sys_sysctl);
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 579ce1b..4008d9f 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -92,12 +92,10 @@
 	depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
 	# We need at least one periodic CPU for timekeeping
 	depends on SMP
-	# RCU_USER_QS dependency
 	depends on HAVE_CONTEXT_TRACKING
 	# VIRT_CPU_ACCOUNTING_GEN dependency
 	depends on HAVE_VIRT_CPU_ACCOUNTING_GEN
 	select NO_HZ_COMMON
-	select RCU_USER_QS
 	select RCU_NOCB_CPU
 	select VIRT_CPU_ACCOUNTING_GEN
 	select IRQ_WORK
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 5c7ae4b..457a373 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -183,7 +183,7 @@
 					 int pinned)
 {
 	if (pinned || !base->migration_enabled)
-		return this_cpu_ptr(&hrtimer_bases);
+		return base;
 	return &per_cpu(hrtimer_bases, get_nohz_timer_target());
 }
 #else
@@ -191,23 +191,32 @@
 struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
 					 int pinned)
 {
-	return this_cpu_ptr(&hrtimer_bases);
+	return base;
 }
 #endif
 
 /*
- * Switch the timer base to the current CPU when possible.
+ * We switch the timer base to a power-optimized selected CPU target,
+ * if:
+ *	- NO_HZ_COMMON is enabled
+ *	- timer migration is enabled
+ *	- the timer callback is not running
+ *	- the timer is not the first expiring timer on the new target
+ *
+ * If one of the above requirements is not fulfilled we move the timer
+ * to the current CPU or leave it on the previously assigned CPU if
+ * the timer callback is currently running.
  */
 static inline struct hrtimer_clock_base *
 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
 		    int pinned)
 {
-	struct hrtimer_cpu_base *new_cpu_base, *this_base;
+	struct hrtimer_cpu_base *new_cpu_base, *this_cpu_base;
 	struct hrtimer_clock_base *new_base;
 	int basenum = base->index;
 
-	this_base = this_cpu_ptr(&hrtimer_bases);
-	new_cpu_base = get_target_base(this_base, pinned);
+	this_cpu_base = this_cpu_ptr(&hrtimer_bases);
+	new_cpu_base = get_target_base(this_cpu_base, pinned);
 again:
 	new_base = &new_cpu_base->clock_base[basenum];
 
@@ -229,19 +238,19 @@
 		raw_spin_unlock(&base->cpu_base->lock);
 		raw_spin_lock(&new_base->cpu_base->lock);
 
-		if (new_cpu_base != this_base &&
+		if (new_cpu_base != this_cpu_base &&
 		    hrtimer_check_target(timer, new_base)) {
 			raw_spin_unlock(&new_base->cpu_base->lock);
 			raw_spin_lock(&base->cpu_base->lock);
-			new_cpu_base = this_base;
+			new_cpu_base = this_cpu_base;
 			timer->base = base;
 			goto again;
 		}
 		timer->base = new_base;
 	} else {
-		if (new_cpu_base != this_base &&
+		if (new_cpu_base != this_cpu_base &&
 		    hrtimer_check_target(timer, new_base)) {
-			new_cpu_base = this_base;
+			new_cpu_base = this_cpu_base;
 			goto again;
 		}
 	}
@@ -679,14 +688,14 @@
 /*
  * Switch to high resolution mode
  */
-static int hrtimer_switch_to_hres(void)
+static void hrtimer_switch_to_hres(void)
 {
 	struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 
 	if (tick_init_highres()) {
 		printk(KERN_WARNING "Could not switch to high resolution "
 				    "mode on CPU %d\n", base->cpu);
-		return 0;
+		return;
 	}
 	base->hres_active = 1;
 	hrtimer_resolution = HIGH_RES_NSEC;
@@ -694,7 +703,6 @@
 	tick_setup_sched_timer();
 	/* "Retrigger" the interrupt to get things going */
 	retrigger_next_event(NULL);
-	return 1;
 }
 
 static void clock_was_set_work(struct work_struct *work)
@@ -718,7 +726,7 @@
 static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *b) { return 0; }
 static inline int hrtimer_hres_active(void) { return 0; }
 static inline int hrtimer_is_hres_enabled(void) { return 0; }
-static inline int hrtimer_switch_to_hres(void) { return 0; }
+static inline void hrtimer_switch_to_hres(void) { }
 static inline void
 hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
 static inline int hrtimer_reprogram(struct hrtimer *timer,
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index fb4d98c..df68cb8 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -487,6 +487,11 @@
 }
 
 #ifdef CONFIG_GENERIC_CMOS_UPDATE
+int __weak update_persistent_clock(struct timespec now)
+{
+	return -ENODEV;
+}
+
 int __weak update_persistent_clock64(struct timespec64 now64)
 {
 	struct timespec now;
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index 3e7db49..53d7184 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -18,30 +18,23 @@
 
 static struct hrtimer bctimer;
 
-static void bc_set_mode(enum clock_event_mode mode,
-			struct clock_event_device *bc)
+static int bc_shutdown(struct clock_event_device *evt)
 {
-	switch (mode) {
-	case CLOCK_EVT_MODE_UNUSED:
-	case CLOCK_EVT_MODE_SHUTDOWN:
-		/*
-		 * Note, we cannot cancel the timer here as we might
-		 * run into the following live lock scenario:
-		 *
-		 * cpu 0		cpu1
-		 * lock(broadcast_lock);
-		 *			hrtimer_interrupt()
-		 *			bc_handler()
-		 *			   tick_handle_oneshot_broadcast();
-		 *			    lock(broadcast_lock);
-		 * hrtimer_cancel()
-		 *  wait_for_callback()
-		 */
-		hrtimer_try_to_cancel(&bctimer);
-		break;
-	default:
-		break;
-	}
+	/*
+	 * Note, we cannot cancel the timer here as we might
+	 * run into the following live lock scenario:
+	 *
+	 * cpu 0		cpu1
+	 * lock(broadcast_lock);
+	 *			hrtimer_interrupt()
+	 *			bc_handler()
+	 *			   tick_handle_oneshot_broadcast();
+	 *			    lock(broadcast_lock);
+	 * hrtimer_cancel()
+	 *  wait_for_callback()
+	 */
+	hrtimer_try_to_cancel(&bctimer);
+	return 0;
 }
 
 /*
@@ -82,7 +75,7 @@
 }
 
 static struct clock_event_device ce_broadcast_hrtimer = {
-	.set_mode		= bc_set_mode,
+	.set_state_shutdown	= bc_shutdown,
 	.set_next_ktime		= bc_set_next,
 	.features		= CLOCK_EVT_FEAT_ONESHOT |
 				  CLOCK_EVT_FEAT_KTIME |
@@ -102,13 +95,11 @@
 {
 	ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
 
-	switch (ce_broadcast_hrtimer.mode) {
-	case CLOCK_EVT_MODE_ONESHOT:
+	if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
 		if (ce_broadcast_hrtimer.next_event.tv64 != KTIME_MAX)
 			return HRTIMER_RESTART;
-	default:
-		return HRTIMER_NORESTART;
-	}
+
+	return HRTIMER_NORESTART;
 }
 
 void tick_setup_hrtimer_broadcast(void)
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index f8bf475..d11c55b 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -304,9 +304,6 @@
 	int cpu;
 
 	cpu = smp_processor_id();
-	if (!cpumask_test_cpu(cpu, newdev->cpumask))
-		goto out_bc;
-
 	td = &per_cpu(tick_cpu_device, cpu);
 	curdev = td->evtdev;
 
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index c792429..3319e16 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -197,27 +197,9 @@
 	return true;
 }
 
-static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
-
-/*
- * Re-evaluate the need for the tick on the current CPU
- * and restart it if necessary.
- */
-void __tick_nohz_full_check(void)
-{
-	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
-
-	if (tick_nohz_full_cpu(smp_processor_id())) {
-		if (ts->tick_stopped && !is_idle_task(current)) {
-			if (!can_stop_full_tick())
-				tick_nohz_restart_sched_tick(ts, ktime_get());
-		}
-	}
-}
-
 static void nohz_full_kick_work_func(struct irq_work *work)
 {
-	__tick_nohz_full_check();
+	/* Empty, the tick restart happens on tick_nohz_irq_exit() */
 }
 
 static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
@@ -252,7 +234,7 @@
 
 static void nohz_full_kick_ipi(void *info)
 {
-	__tick_nohz_full_check();
+	/* Empty, the tick restart happens on tick_nohz_irq_exit() */
 }
 
 /*
@@ -276,7 +258,7 @@
  * It might need the tick due to per task/process properties:
  * perf events, posix cpu timers, ...
  */
-void __tick_nohz_task_switch(struct task_struct *tsk)
+void __tick_nohz_task_switch(void)
 {
 	unsigned long flags;
 
@@ -705,21 +687,38 @@
 	return tick;
 }
 
-static void tick_nohz_full_stop_tick(struct tick_sched *ts)
+static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
+{
+	/* Update jiffies first */
+	tick_do_update_jiffies64(now);
+	update_cpu_load_nohz();
+
+	calc_load_exit_idle();
+	touch_softlockup_watchdog();
+	/*
+	 * Cancel the scheduled timer and restore the tick
+	 */
+	ts->tick_stopped  = 0;
+	ts->idle_exittime = now;
+
+	tick_nohz_restart(ts, now);
+}
+
+static void tick_nohz_full_update_tick(struct tick_sched *ts)
 {
 #ifdef CONFIG_NO_HZ_FULL
 	int cpu = smp_processor_id();
 
-	if (!tick_nohz_full_cpu(cpu) || is_idle_task(current))
+	if (!tick_nohz_full_cpu(cpu))
 		return;
 
 	if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
 		return;
 
-	if (!can_stop_full_tick())
-		return;
-
-	tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
+	if (can_stop_full_tick())
+		tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
+	else if (ts->tick_stopped)
+		tick_nohz_restart_sched_tick(ts, ktime_get());
 #endif
 }
 
@@ -849,7 +848,7 @@
 	if (ts->inidle)
 		__tick_nohz_idle_enter(ts);
 	else
-		tick_nohz_full_stop_tick(ts);
+		tick_nohz_full_update_tick(ts);
 }
 
 /**
@@ -864,23 +863,6 @@
 	return ts->sleep_length;
 }
 
-static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
-{
-	/* Update jiffies first */
-	tick_do_update_jiffies64(now);
-	update_cpu_load_nohz();
-
-	calc_load_exit_idle();
-	touch_softlockup_watchdog();
-	/*
-	 * Cancel the scheduled timer and restore the tick
-	 */
-	ts->tick_stopped  = 0;
-	ts->idle_exittime = now;
-
-	tick_nohz_restart(ts, now);
-}
-
 static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
 {
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 85d5bb1..86751c6 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -268,10 +268,14 @@
 
 unsigned int jiffies_to_usecs(const unsigned long j)
 {
-#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+	/*
+	 * Hz usually doesn't go much further MSEC_PER_SEC.
+	 * jiffies_to_usecs() and usecs_to_jiffies() depend on that.
+	 */
+	BUILD_BUG_ON(HZ > USEC_PER_SEC);
+
+#if !(USEC_PER_SEC % HZ)
 	return (USEC_PER_SEC / HZ) * j;
-#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
-	return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
 #else
 # if BITS_PER_LONG == 32
 	return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
@@ -287,26 +291,20 @@
  * @t: Timespec
  * @gran: Granularity in ns.
  *
- * Truncate a timespec to a granularity. gran must be smaller than a second.
- * Always rounds down.
- *
- * This function should be only used for timestamps returned by
- * current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because
- * it doesn't handle the better resolution of the latter.
+ * Truncate a timespec to a granularity. Always rounds down. gran must
+ * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
  */
 struct timespec timespec_trunc(struct timespec t, unsigned gran)
 {
-	/*
-	 * Division is pretty slow so avoid it for common cases.
-	 * Currently current_kernel_time() never returns better than
-	 * jiffies resolution. Exploit that.
-	 */
-	if (gran <= jiffies_to_usecs(1) * 1000) {
+	/* Avoid division in the common cases 1 ns and 1 s. */
+	if (gran == 1) {
 		/* nothing */
-	} else if (gran == 1000000000) {
+	} else if (gran == NSEC_PER_SEC) {
 		t.tv_nsec = 0;
-	} else {
+	} else if (gran > 1 && gran < NSEC_PER_SEC) {
 		t.tv_nsec -= t.tv_nsec % gran;
+	} else {
+		WARN(1, "illegal file time granularity: %u", gran);
 	}
 	return t;
 }
@@ -546,7 +544,7 @@
  * value to a scaled second value.
  */
 static unsigned long
-__timespec_to_jiffies(unsigned long sec, long nsec)
+__timespec64_to_jiffies(u64 sec, long nsec)
 {
 	nsec = nsec + TICK_NSEC - 1;
 
@@ -554,22 +552,27 @@
 		sec = MAX_SEC_IN_JIFFIES;
 		nsec = 0;
 	}
-	return (((u64)sec * SEC_CONVERSION) +
+	return ((sec * SEC_CONVERSION) +
 		(((u64)nsec * NSEC_CONVERSION) >>
 		 (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
 
 }
 
-unsigned long
-timespec_to_jiffies(const struct timespec *value)
+static unsigned long
+__timespec_to_jiffies(unsigned long sec, long nsec)
 {
-	return __timespec_to_jiffies(value->tv_sec, value->tv_nsec);
+	return __timespec64_to_jiffies((u64)sec, nsec);
 }
 
-EXPORT_SYMBOL(timespec_to_jiffies);
+unsigned long
+timespec64_to_jiffies(const struct timespec64 *value)
+{
+	return __timespec64_to_jiffies(value->tv_sec, value->tv_nsec);
+}
+EXPORT_SYMBOL(timespec64_to_jiffies);
 
 void
-jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
+jiffies_to_timespec64(const unsigned long jiffies, struct timespec64 *value)
 {
 	/*
 	 * Convert jiffies to nanoseconds and separate with
@@ -580,7 +583,7 @@
 				    NSEC_PER_SEC, &rem);
 	value->tv_nsec = rem;
 }
-EXPORT_SYMBOL(jiffies_to_timespec);
+EXPORT_SYMBOL(jiffies_to_timespec64);
 
 /*
  * We could use a similar algorithm to timespec_to_jiffies (with a
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index bca3667..f6ee2e6 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -911,6 +911,7 @@
 	struct timekeeper *tk = &tk_core.timekeeper;
 	struct timespec64 ts_delta, xt;
 	unsigned long flags;
+	int ret = 0;
 
 	if (!timespec64_valid_strict(ts))
 		return -EINVAL;
@@ -924,10 +925,15 @@
 	ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
 	ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
 
+	if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
 
 	tk_set_xtime(tk, ts);
-
+out:
 	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
 
 	write_seqcount_end(&tk_core.seq);
@@ -936,7 +942,7 @@
 	/* signal hrtimers about time change */
 	clock_was_set();
 
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL(do_settimeofday64);
 
@@ -965,7 +971,8 @@
 
 	/* Make sure the proposed value is valid */
 	tmp = timespec64_add(tk_xtime(tk),  ts64);
-	if (!timespec64_valid_strict(&tmp)) {
+	if (timespec64_compare(&tk->wall_to_monotonic, &ts64) > 0 ||
+	    !timespec64_valid_strict(&tmp)) {
 		ret = -EINVAL;
 		goto error;
 	}
@@ -1874,7 +1881,7 @@
 	return timespec64_to_timespec(tk_xtime(tk));
 }
 
-struct timespec current_kernel_time(void)
+struct timespec64 current_kernel_time64(void)
 {
 	struct timekeeper *tk = &tk_core.timekeeper;
 	struct timespec64 now;
@@ -1886,9 +1893,9 @@
 		now = tk_xtime(tk);
 	} while (read_seqcount_retry(&tk_core.seq, seq));
 
-	return timespec64_to_timespec(now);
+	return now;
 }
-EXPORT_SYMBOL(current_kernel_time);
+EXPORT_SYMBOL(current_kernel_time64);
 
 struct timespec64 get_monotonic_coarse64(void)
 {
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index a4536e1..129c960 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -137,7 +137,7 @@
 		   (unsigned long long) ktime_to_ns(base->offset));
 #endif
 	SEQ_printf(m,   "active timers:\n");
-	print_active_timers(m, base, now);
+	print_active_timers(m, base, now + ktime_to_ns(base->offset));
 }
 
 static void print_cpu(struct seq_file *m, int cpu, u64 now)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 3b9a48a..1153c43 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -434,7 +434,7 @@
 
 config BPF_EVENTS
 	depends on BPF_SYSCALL
-	depends on KPROBE_EVENT
+	depends on KPROBE_EVENT || UPROBE_EVENT
 	bool
 	default y
 	help
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index b3e6b39..90e72a0 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -778,9 +778,6 @@
 	if (likely(!bt))
 		return;
 
-	if (!error && !bio_flagged(bio, BIO_UPTODATE))
-		error = EIO;
-
 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
 			bio->bi_rw, what, error, 0, NULL);
 }
@@ -887,8 +884,7 @@
 
 		__blk_add_trace(bt, bio->bi_iter.bi_sector,
 				bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
-				!bio_flagged(bio, BIO_UPTODATE),
-				sizeof(rpdu), &rpdu);
+				bio->bi_error, sizeof(rpdu), &rpdu);
 	}
 }
 
@@ -920,8 +916,8 @@
 	r.sector_from = cpu_to_be64(from);
 
 	__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-			bio->bi_rw, BLK_TA_REMAP,
-			!bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
+			bio->bi_rw, BLK_TA_REMAP, bio->bi_error,
+			sizeof(r), &r);
 }
 
 /**
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 88a041a..0fe96c7 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -81,13 +81,16 @@
 
 /*
  * limited trace_printk()
- * only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed
+ * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
  */
 static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
 {
 	char *fmt = (char *) (long) r1;
+	bool str_seen = false;
 	int mod[3] = {};
 	int fmt_cnt = 0;
+	u64 unsafe_addr;
+	char buf[64];
 	int i;
 
 	/*
@@ -114,12 +117,37 @@
 		if (fmt[i] == 'l') {
 			mod[fmt_cnt]++;
 			i++;
-		} else if (fmt[i] == 'p') {
+		} else if (fmt[i] == 'p' || fmt[i] == 's') {
 			mod[fmt_cnt]++;
 			i++;
 			if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
 				return -EINVAL;
 			fmt_cnt++;
+			if (fmt[i - 1] == 's') {
+				if (str_seen)
+					/* allow only one '%s' per fmt string */
+					return -EINVAL;
+				str_seen = true;
+
+				switch (fmt_cnt) {
+				case 1:
+					unsafe_addr = r3;
+					r3 = (long) buf;
+					break;
+				case 2:
+					unsafe_addr = r4;
+					r4 = (long) buf;
+					break;
+				case 3:
+					unsafe_addr = r5;
+					r5 = (long) buf;
+					break;
+				}
+				buf[0] = 0;
+				strncpy_from_unsafe(buf,
+						    (void *) (long) unsafe_addr,
+						    sizeof(buf));
+			}
 			continue;
 		}
 
@@ -158,6 +186,35 @@
 	return &bpf_trace_printk_proto;
 }
 
+static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
+{
+	struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
+	struct bpf_array *array = container_of(map, struct bpf_array, map);
+	struct perf_event *event;
+
+	if (unlikely(index >= array->map.max_entries))
+		return -E2BIG;
+
+	event = (struct perf_event *)array->ptrs[index];
+	if (!event)
+		return -ENOENT;
+
+	/*
+	 * we don't know if the function is run successfully by the
+	 * return value. It can be judged in other places, such as
+	 * eBPF programs.
+	 */
+	return perf_event_read_local(event);
+}
+
+const struct bpf_func_proto bpf_perf_event_read_proto = {
+	.func		= bpf_perf_event_read,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_CONST_MAP_PTR,
+	.arg2_type	= ARG_ANYTHING,
+};
+
 static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
 {
 	switch (func_id) {
@@ -183,6 +240,8 @@
 		return bpf_get_trace_printk_proto();
 	case BPF_FUNC_get_smp_processor_id:
 		return &bpf_get_smp_processor_id_proto;
+	case BPF_FUNC_perf_event_read:
+		return &bpf_perf_event_read_proto;
 	default:
 		return NULL;
 	}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index b7d0cdd..c995644 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -165,11 +165,9 @@
 static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
 					    void *addr, void *dest)
 {
-	long ret;
 	int maxlen = get_rloc_len(*(u32 *)dest);
 	u8 *dst = get_rloc_data(dest);
-	u8 *src = addr;
-	mm_segment_t old_fs = get_fs();
+	long ret;
 
 	if (!maxlen)
 		return;
@@ -178,23 +176,13 @@
 	 * Try to get string again, since the string can be changed while
 	 * probing.
 	 */
-	set_fs(KERNEL_DS);
-	pagefault_disable();
-
-	do
-		ret = __copy_from_user_inatomic(dst++, src++, 1);
-	while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
-
-	dst[-1] = '\0';
-	pagefault_enable();
-	set_fs(old_fs);
+	ret = strncpy_from_unsafe(dst, addr, maxlen);
 
 	if (ret < 0) {	/* Failed to fetch string */
-		((u8 *)get_rloc_data(dest))[0] = '\0';
+		dst[0] = '\0';
 		*(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
 	} else {
-		*(u32 *)dest = make_data_rloc(src - (u8 *)addr,
-					      get_rloc_offs(*(u32 *)dest));
+		*(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
 	}
 }
 NOKPROBE_SYMBOL(FETCH_FUNC_NAME(memory, string));
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 419ca37..f270088 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -26,7 +26,7 @@
 }
 
 static void
-probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
+probe_sched_wakeup(void *ignore, struct task_struct *wakee)
 {
 	if (unlikely(!sched_ref))
 		return;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 9b33dd1..12cbe77 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -514,7 +514,7 @@
 }
 
 static void
-probe_wakeup(void *ignore, struct task_struct *p, int success)
+probe_wakeup(void *ignore, struct task_struct *p)
 {
 	struct trace_array_cpu *data;
 	int cpu = smp_processor_id();
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index aa1ea7b..d2f6d0b 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -601,7 +601,22 @@
 
 	seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
 			trace_event_name(&tu->tp.call));
-	seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
+	seq_printf(m, " %s:", tu->filename);
+
+	/* Don't print "0x  (null)" when offset is 0 */
+	if (tu->offset) {
+		seq_printf(m, "0x%p", (void *)tu->offset);
+	} else {
+		switch (sizeof(void *)) {
+		case 4:
+			seq_printf(m, "0x00000000");
+			break;
+		case 8:
+		default:
+			seq_printf(m, "0x0000000000000000");
+			break;
+		}
+	}
 
 	for (i = 0; i < tu->tp.nr_args; i++)
 		seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
@@ -1095,11 +1110,15 @@
 {
 	struct trace_event_call *call = &tu->tp.call;
 	struct uprobe_trace_entry_head *entry;
+	struct bpf_prog *prog = call->prog;
 	struct hlist_head *head;
 	void *data;
 	int size, esize;
 	int rctx;
 
+	if (prog && !trace_call_bpf(prog, regs))
+		return;
+
 	esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
 
 	size = esize + tu->tp.size + dsize;
@@ -1289,6 +1308,7 @@
 		return -ENODEV;
 	}
 
+	call->flags = TRACE_EVENT_FL_UPROBE;
 	call->class->reg = trace_uprobe_register;
 	call->data = tu;
 	ret = trace_add_event_call(call);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 4109f83..f65a0a0 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -976,8 +976,8 @@
 	if (user_ns == current_user_ns())
 		return -EINVAL;
 
-	/* Threaded processes may not enter a different user namespace */
-	if (atomic_read(&current->mm->mm_users) > 1)
+	/* Tasks that share a thread group must share a user namespace */
+	if (!thread_group_empty(current))
 		return -EINVAL;
 
 	if (current->fs->users != 1)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4c4f061..ca71582 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -338,20 +338,20 @@
 #include <trace/events/workqueue.h>
 
 #define assert_rcu_or_pool_mutex()					\
-	rcu_lockdep_assert(rcu_read_lock_sched_held() ||		\
-			   lockdep_is_held(&wq_pool_mutex),		\
-			   "sched RCU or wq_pool_mutex should be held")
+	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&			\
+			 !lockdep_is_held(&wq_pool_mutex),		\
+			 "sched RCU or wq_pool_mutex should be held")
 
 #define assert_rcu_or_wq_mutex(wq)					\
-	rcu_lockdep_assert(rcu_read_lock_sched_held() ||		\
-			   lockdep_is_held(&wq->mutex),			\
-			   "sched RCU or wq->mutex should be held")
+	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&			\
+			 !lockdep_is_held(&wq->mutex),			\
+			 "sched RCU or wq->mutex should be held")
 
 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq)			\
-	rcu_lockdep_assert(rcu_read_lock_sched_held() ||		\
-			   lockdep_is_held(&wq->mutex) ||		\
-			   lockdep_is_held(&wq_pool_mutex),		\
-			   "sched RCU, wq->mutex or wq_pool_mutex should be held")
+	RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&			\
+			 !lockdep_is_held(&wq->mutex) &&		\
+			 !lockdep_is_held(&wq_pool_mutex),		\
+			 "sched RCU, wq->mutex or wq_pool_mutex should be held")
 
 #define for_each_cpu_worker_pool(pool, cpu)				\
 	for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];		\
@@ -1714,9 +1714,7 @@
 		goto fail;
 
 	set_user_nice(worker->task, pool->attrs->nice);
-
-	/* prevent userland from meddling with cpumask of workqueue workers */
-	worker->task->flags |= PF_NO_SETAFFINITY;
+	kthread_bind_mask(worker->task, pool->attrs->cpumask);
 
 	/* successful, attach the worker to the pool */
 	worker_attach_to_pool(worker, pool);
@@ -2614,7 +2612,7 @@
 out_unlock:
 	mutex_unlock(&wq->mutex);
 }
-EXPORT_SYMBOL_GPL(flush_workqueue);
+EXPORT_SYMBOL(flush_workqueue);
 
 /**
  * drain_workqueue - drain a workqueue
@@ -3856,7 +3854,7 @@
 		}
 
 		wq->rescuer = rescuer;
-		rescuer->task->flags |= PF_NO_SETAFFINITY;
+		kthread_bind_mask(rescuer->task, cpu_possible_mask);
 		wake_up_process(rescuer->task);
 	}
 
diff --git a/lib/Kconfig b/lib/Kconfig
index 3a2ef67..a165552 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -460,16 +460,6 @@
 config LRU_CACHE
 	tristate
 
-config AVERAGE
-	bool "Averaging functions"
-	help
-	  This option is provided for the case where no in-kernel-tree
-	  modules require averaging functions, but a module built outside
-	  the kernel tree does. Such modules that use library averaging
-	  functions require Y here.
-
-	  If unsure, say N.
-
 config CLZ_TAB
 	bool
 
@@ -521,6 +511,13 @@
 
 source "lib/fonts/Kconfig"
 
+config SG_SPLIT
+	def_bool n
+	help
+	 Provides a heler to split scatterlists into chunks, each chunk being a
+	 scatterlist. This should be selected by a driver or an API which
+	 whishes to split a scatterlist amongst multiple DMA channel.
+
 #
 # sg chaining option
 #
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e2894b2..ab76b99 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -916,12 +916,6 @@
 	 This allows rt mutex semantics violations and rt mutex related
 	 deadlocks (lockups) to be detected and reported automatically.
 
-config RT_MUTEX_TESTER
-	bool "Built-in scriptable tester for rt-mutexes"
-	depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN
-	help
-	  This option enables a rt-mutex tester.
-
 config DEBUG_SPINLOCK
 	bool "Spinlock and rw-lock debugging: basic checks"
 	depends on DEBUG_KERNEL
@@ -1353,20 +1347,6 @@
 	  RCU grace period persists, additional CPU stall warnings are
 	  printed at more widely spaced intervals.
 
-config RCU_CPU_STALL_INFO
-	bool "Print additional diagnostics on RCU CPU stall"
-	depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL
-	default y
-	help
-	  For each stalled CPU that is aware of the current RCU grace
-	  period, print out additional per-CPU diagnostic information
-	  regarding scheduling-clock ticks, idle state, and,
-	  for RCU_FAST_NO_HZ kernels, idle-entry state.
-
-	  Say N if you are unsure.
-
-	  Say Y if you want to enable such diagnostics.
-
 config RCU_TRACE
 	bool "Enable tracing for RCU"
 	depends on DEBUG_KERNEL
@@ -1379,7 +1359,7 @@
 	  Say N if you are unsure.
 
 config RCU_EQS_DEBUG
-	bool "Use this when adding any sort of NO_HZ support to your arch"
+	bool "Provide debugging asserts for adding NO_HZ support to an arch"
 	depends on DEBUG_KERNEL
 	help
 	  This option provides consistency checks in RCU's handling of
@@ -1542,6 +1522,13 @@
 	  and to test how the mmc host driver handles retries from
 	  the block device.
 
+config FAIL_FUTEX
+	bool "Fault-injection capability for futexes"
+	select DEBUG_FS
+	depends on FAULT_INJECTION && FUTEX
+	help
+	  Provide fault-injection capability for futexes.
+
 config FAULT_INJECTION_DEBUG_FS
 	bool "Debugfs entries for fault-injection capabilities"
 	depends on FAULT_INJECTION && SYSFS && DEBUG_FS
@@ -1840,6 +1827,15 @@
 	        memtest=17, mean do 17 test patterns.
 	  If you are unsure how to answer this question, answer N.
 
+config TEST_STATIC_KEYS
+	tristate "Test static keys"
+	default n
+	depends on m
+	help
+	  Test the static key interfaces.
+
+	  If unsure, say N.
+
 source "samples/Kconfig"
 
 source "lib/Kconfig.kgdb"
diff --git a/lib/Makefile b/lib/Makefile
index 6897b52..f01c558 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -39,6 +39,8 @@
 obj-$(CONFIG_TEST_LKM) += test_module.o
 obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
 obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
+obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
+obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
@@ -138,8 +140,6 @@
 
 obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
 
-obj-$(CONFIG_AVERAGE) += average.o
-
 obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
 
 obj-$(CONFIG_CORDIC) += cordic.o
@@ -160,6 +160,7 @@
 
 obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
 
+obj-$(CONFIG_SG_SPLIT) += sg_split.o
 obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
 
 libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 1298c05e..2886eba 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -102,6 +102,9 @@
 
 ATOMIC64_OPS(add, +=)
 ATOMIC64_OPS(sub, -=)
+ATOMIC64_OP(and, &=)
+ATOMIC64_OP(or, |=)
+ATOMIC64_OP(xor, ^=)
 
 #undef ATOMIC64_OPS
 #undef ATOMIC64_OP_RETURN
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 0211d30..83c33a5b 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -16,8 +16,39 @@
 #include <linux/kernel.h>
 #include <linux/atomic.h>
 
+#define TEST(bit, op, c_op, val)				\
+do {								\
+	atomic##bit##_set(&v, v0);				\
+	r = v0;							\
+	atomic##bit##_##op(val, &v);				\
+	r c_op val;						\
+	WARN(atomic##bit##_read(&v) != r, "%Lx != %Lx\n",	\
+		(unsigned long long)atomic##bit##_read(&v),	\
+		(unsigned long long)r);				\
+} while (0)
+
+static __init void test_atomic(void)
+{
+	int v0 = 0xaaa31337;
+	int v1 = 0xdeadbeef;
+	int onestwos = 0x11112222;
+	int one = 1;
+
+	atomic_t v;
+	int r;
+
+	TEST(, add, +=, onestwos);
+	TEST(, add, +=, -one);
+	TEST(, sub, -=, onestwos);
+	TEST(, sub, -=, -one);
+	TEST(, or, |=, v1);
+	TEST(, and, &=, v1);
+	TEST(, xor, ^=, v1);
+	TEST(, andnot, &= ~, v1);
+}
+
 #define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
-static __init int test_atomic64(void)
+static __init void test_atomic64(void)
 {
 	long long v0 = 0xaaa31337c001d00dLL;
 	long long v1 = 0xdeadbeefdeafcafeLL;
@@ -34,15 +65,14 @@
 	BUG_ON(v.counter != r);
 	BUG_ON(atomic64_read(&v) != r);
 
-	INIT(v0);
-	atomic64_add(onestwos, &v);
-	r += onestwos;
-	BUG_ON(v.counter != r);
-
-	INIT(v0);
-	atomic64_add(-one, &v);
-	r += -one;
-	BUG_ON(v.counter != r);
+	TEST(64, add, +=, onestwos);
+	TEST(64, add, +=, -one);
+	TEST(64, sub, -=, onestwos);
+	TEST(64, sub, -=, -one);
+	TEST(64, or, |=, v1);
+	TEST(64, and, &=, v1);
+	TEST(64, xor, ^=, v1);
+	TEST(64, andnot, &= ~, v1);
 
 	INIT(v0);
 	r += onestwos;
@@ -55,16 +85,6 @@
 	BUG_ON(v.counter != r);
 
 	INIT(v0);
-	atomic64_sub(onestwos, &v);
-	r -= onestwos;
-	BUG_ON(v.counter != r);
-
-	INIT(v0);
-	atomic64_sub(-one, &v);
-	r -= -one;
-	BUG_ON(v.counter != r);
-
-	INIT(v0);
 	r -= onestwos;
 	BUG_ON(atomic64_sub_return(onestwos, &v) != r);
 	BUG_ON(v.counter != r);
@@ -147,6 +167,12 @@
 	BUG_ON(!atomic64_inc_not_zero(&v));
 	r += one;
 	BUG_ON(v.counter != r);
+}
+
+static __init int test_atomics(void)
+{
+	test_atomic();
+	test_atomic64();
 
 #ifdef CONFIG_X86
 	pr_info("passed for %s platform %s CX8 and %s SSE\n",
@@ -166,4 +192,4 @@
 	return 0;
 }
 
-core_initcall(test_atomic64);
+core_initcall(test_atomics);
diff --git a/lib/average.c b/lib/average.c
deleted file mode 100644
index 114d1be..0000000
--- a/lib/average.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * lib/average.c
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2.  See the file COPYING for more details.
- */
-
-#include <linux/export.h>
-#include <linux/average.h>
-#include <linux/kernel.h>
-#include <linux/bug.h>
-#include <linux/log2.h>
-
-/**
- * DOC: Exponentially Weighted Moving Average (EWMA)
- *
- * These are generic functions for calculating Exponentially Weighted Moving
- * Averages (EWMA). We keep a structure with the EWMA parameters and a scaled
- * up internal representation of the average value to prevent rounding errors.
- * The factor for scaling up and the exponential weight (or decay rate) have to
- * be specified thru the init fuction. The structure should not be accessed
- * directly but only thru the helper functions.
- */
-
-/**
- * ewma_init() - Initialize EWMA parameters
- * @avg: Average structure
- * @factor: Factor to use for the scaled up internal value. The maximum value
- *	of averages can be ULONG_MAX/(factor*weight). For performance reasons
- *	factor has to be a power of 2.
- * @weight: Exponential weight, or decay rate. This defines how fast the
- *	influence of older values decreases. For performance reasons weight has
- *	to be a power of 2.
- *
- * Initialize the EWMA parameters for a given struct ewma @avg.
- */
-void ewma_init(struct ewma *avg, unsigned long factor, unsigned long weight)
-{
-	WARN_ON(!is_power_of_2(weight) || !is_power_of_2(factor));
-
-	avg->weight = ilog2(weight);
-	avg->factor = ilog2(factor);
-	avg->internal = 0;
-}
-EXPORT_SYMBOL(ewma_init);
-
-/**
- * ewma_add() - Exponentially weighted moving average (EWMA)
- * @avg: Average structure
- * @val: Current value
- *
- * Add a sample to the average.
- */
-struct ewma *ewma_add(struct ewma *avg, unsigned long val)
-{
-	unsigned long internal = ACCESS_ONCE(avg->internal);
-
-	ACCESS_ONCE(avg->internal) = internal ?
-		(((internal << avg->weight) - internal) +
-			(val << avg->factor)) >> avg->weight :
-		(val << avg->factor);
-	return avg;
-}
-EXPORT_SYMBOL(ewma_add);
diff --git a/lib/klist.c b/lib/klist.c
index 89b485a..d74cf7a 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -324,6 +324,47 @@
 }
 
 /**
+ * klist_prev - Ante up prev node in list.
+ * @i: Iterator structure.
+ *
+ * First grab list lock. Decrement the reference count of the previous
+ * node, if there was one. Grab the prev node, increment its reference
+ * count, drop the lock, and return that prev node.
+ */
+struct klist_node *klist_prev(struct klist_iter *i)
+{
+	void (*put)(struct klist_node *) = i->i_klist->put;
+	struct klist_node *last = i->i_cur;
+	struct klist_node *prev;
+
+	spin_lock(&i->i_klist->k_lock);
+
+	if (last) {
+		prev = to_klist_node(last->n_node.prev);
+		if (!klist_dec_and_del(last))
+			put = NULL;
+	} else
+		prev = to_klist_node(i->i_klist->k_list.prev);
+
+	i->i_cur = NULL;
+	while (prev != to_klist_node(&i->i_klist->k_list)) {
+		if (likely(!knode_dead(prev))) {
+			kref_get(&prev->n_ref);
+			i->i_cur = prev;
+			break;
+		}
+		prev = to_klist_node(prev->n_node.prev);
+	}
+
+	spin_unlock(&i->i_klist->k_lock);
+
+	if (put && last)
+		put(last);
+	return i->i_cur;
+}
+EXPORT_SYMBOL_GPL(klist_prev);
+
+/**
  * klist_next - Ante up next node in list.
  * @i: Iterator structure.
  *
diff --git a/lib/lockref.c b/lib/lockref.c
index 494994b..5a92189 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -4,14 +4,6 @@
 #if USE_CMPXCHG_LOCKREF
 
 /*
- * Allow weakly-ordered memory architectures to provide barrier-less
- * cmpxchg semantics for lockref updates.
- */
-#ifndef cmpxchg64_relaxed
-# define cmpxchg64_relaxed cmpxchg64
-#endif
-
-/*
  * Note that the "cmpxchg()" reloads the "old" value for the
  * failure case.
  */
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index bc0a1da..95c52a9 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -146,18 +146,25 @@
 	uint8_t *p;
 	mpi_limb_t alimb;
 	unsigned int n = mpi_get_size(a);
-	int i;
+	int i, lzeros = 0;
 
-	if (buf_len < n || !buf)
+	if (buf_len < n || !buf || !nbytes)
 		return -EINVAL;
 
 	if (sign)
 		*sign = a->sign;
 
-	if (nbytes)
-		*nbytes = n;
+	p = (void *)&a->d[a->nlimbs] - 1;
+
+	for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
+		if (!*p)
+			lzeros++;
+		else
+			break;
+	}
 
 	p = buf;
+	*nbytes = n - lzeros;
 
 	for (i = a->nlimbs - 1; i >= 0; i--) {
 		alimb = a->d[i];
@@ -178,6 +185,19 @@
 #else
 #error please implement for this limb size.
 #endif
+
+		if (lzeros > 0) {
+			if (lzeros >= sizeof(alimb)) {
+				p -= sizeof(alimb);
+			} else {
+				mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
+				mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
+							+ lzeros;
+				*limb1 = *limb2;
+				p -= lzeros;
+			}
+			lzeros -= sizeof(alimb);
+		}
 	}
 	return 0;
 }
@@ -197,7 +217,7 @@
  */
 void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
 {
-	uint8_t *buf, *p;
+	uint8_t *buf;
 	unsigned int n;
 	int ret;
 
@@ -220,14 +240,6 @@
 		kfree(buf);
 		return NULL;
 	}
-
-	/* this is sub-optimal but we need to do the shift operation
-	 * because the caller has to free the returned buffer */
-	for (p = buf; !*p && *nbytes; p++, --*nbytes)
-		;
-	if (p != buf)
-		memmove(buf, p, *nbytes);
-
 	return buf;
 }
 EXPORT_SYMBOL_GPL(mpi_get_buffer);
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index bcce5f1..5f5d24d 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -52,6 +52,51 @@
 EXPORT_SYMBOL(pci_iomap_range);
 
 /**
+ * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @offset: map memory at the given offset in BAR
+ * @maxlen: max length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR from offset to the end, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
+				 int bar,
+				 unsigned long offset,
+				 unsigned long maxlen)
+{
+	resource_size_t start = pci_resource_start(dev, bar);
+	resource_size_t len = pci_resource_len(dev, bar);
+	unsigned long flags = pci_resource_flags(dev, bar);
+
+
+	if (flags & IORESOURCE_IO)
+		return NULL;
+
+	if (len <= offset || !start)
+		return NULL;
+
+	len -= offset;
+	start += offset;
+	if (maxlen && len > maxlen)
+		len = maxlen;
+
+	if (flags & IORESOURCE_MEM)
+		return ioremap_wc(start, len);
+
+	/* What? */
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
+
+/**
  * pci_iomap - create a virtual mapping cookie for a PCI BAR
  * @dev: PCI device that owns the BAR
  * @bar: BAR number
@@ -70,4 +115,25 @@
 	return pci_iomap_range(dev, bar, 0, maxlen);
 }
 EXPORT_SYMBOL(pci_iomap);
+
+/**
+ * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR
+ * @dev: PCI device that owns the BAR
+ * @bar: BAR number
+ * @maxlen: length of the memory to map
+ *
+ * Using this function you will get a __iomem address to your device BAR.
+ * You can access it using ioread*() and iowrite*(). These functions hide
+ * the details if this is a MMIO or PIO address space and will just do what
+ * you expect from them in the correct way. When possible write combining
+ * is used.
+ *
+ * @maxlen specifies the maximum length to map. If you want to get access to
+ * the complete BAR without checking for its length first, pass %0 here.
+ * */
+void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+	return pci_iomap_wc_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL_GPL(pci_iomap_wc);
 #endif /* CONFIG_PCI */
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index d105a9f..bafa993 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -105,16 +105,12 @@
  **/
 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
 {
-#ifndef CONFIG_ARCH_HAS_SG_CHAIN
-	struct scatterlist *ret = &sgl[nents - 1];
-#else
 	struct scatterlist *sg, *ret = NULL;
 	unsigned int i;
 
 	for_each_sg(sgl, sg, nents, i)
 		ret = sg;
 
-#endif
 #ifdef CONFIG_DEBUG_SG
 	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
 	BUG_ON(!sg_is_last(ret));
diff --git a/lib/sg_split.c b/lib/sg_split.c
new file mode 100644
index 0000000..b063410
--- /dev/null
+++ b/lib/sg_split.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr>
+ *
+ * Scatterlist splitting helpers.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+struct sg_splitter {
+	struct scatterlist *in_sg0;
+	int nents;
+	off_t skip_sg0;
+	unsigned int length_last_sg;
+
+	struct scatterlist *out_sg;
+};
+
+static int sg_calculate_split(struct scatterlist *in, int nents, int nb_splits,
+			      off_t skip, const size_t *sizes,
+			      struct sg_splitter *splitters, bool mapped)
+{
+	int i;
+	unsigned int sglen;
+	size_t size = sizes[0], len;
+	struct sg_splitter *curr = splitters;
+	struct scatterlist *sg;
+
+	for (i = 0; i < nb_splits; i++) {
+		splitters[i].in_sg0 = NULL;
+		splitters[i].nents = 0;
+	}
+
+	for_each_sg(in, sg, nents, i) {
+		sglen = mapped ? sg_dma_len(sg) : sg->length;
+		if (skip > sglen) {
+			skip -= sglen;
+			continue;
+		}
+
+		len = min_t(size_t, size, sglen - skip);
+		if (!curr->in_sg0) {
+			curr->in_sg0 = sg;
+			curr->skip_sg0 = skip;
+		}
+		size -= len;
+		curr->nents++;
+		curr->length_last_sg = len;
+
+		while (!size && (skip + len < sglen) && (--nb_splits > 0)) {
+			curr++;
+			size = *(++sizes);
+			skip += len;
+			len = min_t(size_t, size, sglen - skip);
+
+			curr->in_sg0 = sg;
+			curr->skip_sg0 = skip;
+			curr->nents = 1;
+			curr->length_last_sg = len;
+			size -= len;
+		}
+		skip = 0;
+
+		if (!size && --nb_splits > 0) {
+			curr++;
+			size = *(++sizes);
+		}
+
+		if (!nb_splits)
+			break;
+	}
+
+	return (size || !splitters[0].in_sg0) ? -EINVAL : 0;
+}
+
+static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits)
+{
+	int i, j;
+	struct scatterlist *in_sg, *out_sg;
+	struct sg_splitter *split;
+
+	for (i = 0, split = splitters; i < nb_splits; i++, split++) {
+		in_sg = split->in_sg0;
+		out_sg = split->out_sg;
+		for (j = 0; j < split->nents; j++, out_sg++) {
+			*out_sg = *in_sg;
+			if (!j) {
+				out_sg->offset += split->skip_sg0;
+				out_sg->length -= split->skip_sg0;
+			} else {
+				out_sg->offset = 0;
+			}
+			sg_dma_address(out_sg) = 0;
+			sg_dma_len(out_sg) = 0;
+			in_sg = sg_next(in_sg);
+		}
+		out_sg[-1].length = split->length_last_sg;
+		sg_mark_end(out_sg - 1);
+	}
+}
+
+static void sg_split_mapped(struct sg_splitter *splitters, const int nb_splits)
+{
+	int i, j;
+	struct scatterlist *in_sg, *out_sg;
+	struct sg_splitter *split;
+
+	for (i = 0, split = splitters; i < nb_splits; i++, split++) {
+		in_sg = split->in_sg0;
+		out_sg = split->out_sg;
+		for (j = 0; j < split->nents; j++, out_sg++) {
+			sg_dma_address(out_sg) = sg_dma_address(in_sg);
+			sg_dma_len(out_sg) = sg_dma_len(in_sg);
+			if (!j) {
+				sg_dma_address(out_sg) += split->skip_sg0;
+				sg_dma_len(out_sg) -= split->skip_sg0;
+			}
+			in_sg = sg_next(in_sg);
+		}
+		sg_dma_len(--out_sg) = split->length_last_sg;
+	}
+}
+
+/**
+ * sg_split - split a scatterlist into several scatterlists
+ * @in: the input sg list
+ * @in_mapped_nents: the result of a dma_map_sg(in, ...), or 0 if not mapped.
+ * @skip: the number of bytes to skip in the input sg list
+ * @nb_splits: the number of desired sg outputs
+ * @split_sizes: the respective size of each output sg list in bytes
+ * @out: an array where to store the allocated output sg lists
+ * @out_mapped_nents: the resulting sg lists mapped number of sg entries. Might
+ *                    be NULL if sglist not already mapped (in_mapped_nents = 0)
+ * @gfp_mask: the allocation flag
+ *
+ * This function splits the input sg list into nb_splits sg lists, which are
+ * allocated and stored into out.
+ * The @in is split into :
+ *  - @out[0], which covers bytes [@skip .. @skip + @split_sizes[0] - 1] of @in
+ *  - @out[1], which covers bytes [@skip + split_sizes[0] ..
+ *                                 @skip + @split_sizes[0] + @split_sizes[1] -1]
+ * etc ...
+ * It will be the caller's duty to kfree() out array members.
+ *
+ * Returns 0 upon success, or error code
+ */
+int sg_split(struct scatterlist *in, const int in_mapped_nents,
+	     const off_t skip, const int nb_splits,
+	     const size_t *split_sizes,
+	     struct scatterlist **out, int *out_mapped_nents,
+	     gfp_t gfp_mask)
+{
+	int i, ret;
+	struct sg_splitter *splitters;
+
+	splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask);
+	if (!splitters)
+		return -ENOMEM;
+
+	ret = sg_calculate_split(in, sg_nents(in), nb_splits, skip, split_sizes,
+			   splitters, false);
+	if (ret < 0)
+		goto err;
+
+	ret = -ENOMEM;
+	for (i = 0; i < nb_splits; i++) {
+		splitters[i].out_sg = kmalloc_array(splitters[i].nents,
+						    sizeof(struct scatterlist),
+						    gfp_mask);
+		if (!splitters[i].out_sg)
+			goto err;
+	}
+
+	/*
+	 * The order of these 3 calls is important and should be kept.
+	 */
+	sg_split_phys(splitters, nb_splits);
+	ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip,
+				 split_sizes, splitters, true);
+	if (ret < 0)
+		goto err;
+	sg_split_mapped(splitters, nb_splits);
+
+	for (i = 0; i < nb_splits; i++) {
+		out[i] = splitters[i].out_sg;
+		if (out_mapped_nents)
+			out_mapped_nents[i] = splitters[i].nents;
+	}
+
+	kfree(splitters);
+	return 0;
+
+err:
+	for (i = 0; i < nb_splits; i++)
+		kfree(splitters[i].out_sg);
+	kfree(splitters);
+	return ret;
+}
+EXPORT_SYMBOL(sg_split);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 7f58c73..d137739 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -18,10 +18,12 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/filter.h>
+#include <linux/bpf.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
 #include <linux/random.h>
+#include <linux/highmem.h>
 
 /* General test specific settings */
 #define MAX_SUBTESTS	3
@@ -55,6 +57,7 @@
 /* Flags that can be passed to test cases */
 #define FLAG_NO_DATA		BIT(0)
 #define FLAG_EXPECTED_FAIL	BIT(1)
+#define FLAG_SKB_FRAG		BIT(2)
 
 enum {
 	CLASSIC  = BIT(6),	/* Old BPF instructions only. */
@@ -80,6 +83,7 @@
 		__u32 result;
 	} test[MAX_SUBTESTS];
 	int (*fill_helper)(struct bpf_test *self);
+	__u8 frag_data[MAX_DATA];
 };
 
 /* Large test cases need separate allocation and fill handler. */
@@ -355,6 +359,81 @@
 	return __bpf_fill_ja(self, 12, 9);
 }
 
+static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS;
+	struct sock_filter *insn;
+	int i;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	for (i = 0; i < len - 1; i += 2) {
+		insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
+		insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+					 SKF_AD_OFF + SKF_AD_CPU);
+	}
+
+	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
+#define PUSH_CNT 68
+/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
+static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
+{
+	unsigned int len = BPF_MAXINSNS;
+	struct bpf_insn *insn;
+	int i = 0, j, k = 0;
+
+	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+	if (!insn)
+		return -ENOMEM;
+
+	insn[i++] = BPF_MOV64_REG(R6, R1);
+loop:
+	for (j = 0; j < PUSH_CNT; j++) {
+		insn[i++] = BPF_LD_ABS(BPF_B, 0);
+		insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
+		i++;
+		insn[i++] = BPF_MOV64_REG(R1, R6);
+		insn[i++] = BPF_MOV64_IMM(R2, 1);
+		insn[i++] = BPF_MOV64_IMM(R3, 2);
+		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+					 bpf_skb_vlan_push_proto.func - __bpf_call_base);
+		insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
+		i++;
+	}
+
+	for (j = 0; j < PUSH_CNT; j++) {
+		insn[i++] = BPF_LD_ABS(BPF_B, 0);
+		insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
+		i++;
+		insn[i++] = BPF_MOV64_REG(R1, R6);
+		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+					 bpf_skb_vlan_pop_proto.func - __bpf_call_base);
+		insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
+		i++;
+	}
+	if (++k < 5)
+		goto loop;
+
+	for (; i < len - 1; i++)
+		insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef);
+
+	insn[len - 1] = BPF_EXIT_INSN();
+
+	self->u.ptr.insns = insn;
+	self->u.ptr.len = len;
+
+	return 0;
+}
+
 static struct bpf_test tests[] = {
 	{
 		"TAX",
@@ -3674,6 +3753,9 @@
 		.u.insns_int = {
 			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
 			BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+			BPF_ALU64_REG(BPF_MOV, R1, R0),
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),
+			BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
@@ -3708,6 +3790,9 @@
 		.u.insns_int = {
 			BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
 			BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+			BPF_ALU64_REG(BPF_MOV, R1, R0),
+			BPF_ALU64_IMM(BPF_RSH, R1, 32),
+			BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
 			BPF_EXIT_INSN(),
 		},
 		INTERNAL,
@@ -4392,6 +4477,618 @@
 		{ { 0, 0xababcbac } },
 		.fill_helper = bpf_fill_maxinsns11,
 	},
+	{
+		"BPF_MAXINSNS: ld_abs+get_processor_id",
+		{ },
+		CLASSIC,
+		{ },
+		{ { 1, 0xbee } },
+		.fill_helper = bpf_fill_ld_abs_get_processor_id,
+	},
+	{
+		"BPF_MAXINSNS: ld_abs+vlan_push/pop",
+		{ },
+		INTERNAL,
+		{ 0x34 },
+		{ { 1, 0xbef } },
+		.fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+	},
+	/*
+	 * LD_IND / LD_ABS on fragmented SKBs
+	 */
+	{
+		"LD_IND byte frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ },
+		{ {0x40, 0x42} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
+	},
+	{
+		"LD_IND halfword frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x4),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ },
+		{ {0x40, 0x4344} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
+	},
+	{
+		"LD_IND word frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x8),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ },
+		{ {0x40, 0x21071983} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
+	},
+	{
+		"LD_IND halfword mixed head/frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ [0x3e] = 0x25, [0x3f] = 0x05, },
+		{ {0x40, 0x0519} },
+		.frag_data = { 0x19, 0x82 },
+	},
+	{
+		"LD_IND word mixed head/frag",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x40),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ [0x3e] = 0x25, [0x3f] = 0x05, },
+		{ {0x40, 0x25051982} },
+		.frag_data = { 0x19, 0x82 },
+	},
+	{
+		"LD_ABS byte frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x40),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ },
+		{ {0x40, 0x42} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
+	},
+	{
+		"LD_ABS halfword frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x44),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ },
+		{ {0x40, 0x4344} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
+	},
+	{
+		"LD_ABS word frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x48),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ },
+		{ {0x40, 0x21071983} },
+		.frag_data = {
+			0x42, 0x00, 0x00, 0x00,
+			0x43, 0x44, 0x00, 0x00,
+			0x21, 0x07, 0x19, 0x83,
+		},
+	},
+	{
+		"LD_ABS halfword mixed head/frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x3f),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ [0x3e] = 0x25, [0x3f] = 0x05, },
+		{ {0x40, 0x0519} },
+		.frag_data = { 0x19, 0x82 },
+	},
+	{
+		"LD_ABS word mixed head/frag",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x3e),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_SKB_FRAG,
+		{ [0x3e] = 0x25, [0x3f] = 0x05, },
+		{ {0x40, 0x25051982} },
+		.frag_data = { 0x19, 0x82 },
+	},
+	/*
+	 * LD_IND / LD_ABS on non fragmented SKBs
+	 */
+	{
+		/*
+		 * this tests that the JIT/interpreter correctly resets X
+		 * before using it in an LD_IND instruction.
+		 */
+		"LD_IND byte default X",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{ [0x1] = 0x42 },
+		{ {0x40, 0x42 } },
+	},
+	{
+		"LD_IND byte positive offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x82 } },
+	},
+	{
+		"LD_IND byte negative offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x3e),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_B, -0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{ [0x3c] = 0x25, [0x3d] = 0x05,  [0x3e] = 0x19, [0x3f] = 0x82 },
+		{ {0x40, 0x05 } },
+	},
+	{
+		"LD_IND halfword positive offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, 0x2),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+		},
+		{ {0x40, 0xdd88 } },
+	},
+	{
+		"LD_IND halfword negative offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x2),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+		},
+		{ {0x40, 0xbb66 } },
+	},
+	{
+		"LD_IND halfword unaligned",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_H, -0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+		},
+		{ {0x40, 0x66cc } },
+	},
+	{
+		"LD_IND word positive offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, 0x4),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xee99ffaa } },
+	},
+	{
+		"LD_IND word negative offset",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x4),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xaa55bb66 } },
+	},
+	{
+		"LD_IND word unaligned (addr & 3 == 2)",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x2),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xbb66cc77 } },
+	},
+	{
+		"LD_IND word unaligned (addr & 3 == 1)",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x3),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x55bb66cc } },
+	},
+	{
+		"LD_IND word unaligned (addr & 3 == 3)",
+		.u.insns = {
+			BPF_STMT(BPF_LDX | BPF_IMM, 0x20),
+			BPF_STMT(BPF_LD | BPF_IND | BPF_W, -0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x66cc77dd } },
+	},
+	{
+		"LD_ABS byte",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_B, 0x20),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xcc } },
+	},
+	{
+		"LD_ABS halfword",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x22),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xdd88 } },
+	},
+	{
+		"LD_ABS halfword unaligned",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_H, 0x25),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x99ff } },
+	},
+	{
+		"LD_ABS word",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x1c),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xaa55bb66 } },
+	},
+	{
+		"LD_ABS word unaligned (addr & 3 == 2)",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x22),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0xdd88ee99 } },
+	},
+	{
+		"LD_ABS word unaligned (addr & 3 == 1)",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x21),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x77dd88ee } },
+	},
+	{
+		"LD_ABS word unaligned (addr & 3 == 3)",
+		.u.insns = {
+			BPF_STMT(BPF_LD | BPF_ABS | BPF_W, 0x23),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC,
+		{
+			[0x1c] = 0xaa, [0x1d] = 0x55,
+			[0x1e] = 0xbb, [0x1f] = 0x66,
+			[0x20] = 0xcc, [0x21] = 0x77,
+			[0x22] = 0xdd, [0x23] = 0x88,
+			[0x24] = 0xee, [0x25] = 0x99,
+			[0x26] = 0xff, [0x27] = 0xaa,
+		},
+		{ {0x40, 0x88ee99ff } },
+	},
+	/*
+	 * verify that the interpreter or JIT correctly sets A and X
+	 * to 0.
+	 */
+	{
+		"ADD default X",
+		.u.insns = {
+			/*
+			 * A = 0x42
+			 * A = A + X
+			 * ret A
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x42 } },
+	},
+	{
+		"ADD default A",
+		.u.insns = {
+			/*
+			 * A = A + 0x42
+			 * ret A
+			 */
+			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0x42),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x42 } },
+	},
+	{
+		"SUB default X",
+		.u.insns = {
+			/*
+			 * A = 0x66
+			 * A = A - X
+			 * ret A
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x66),
+			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x66 } },
+	},
+	{
+		"SUB default A",
+		.u.insns = {
+			/*
+			 * A = A - -0x66
+			 * ret A
+			 */
+			BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, -0x66),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x66 } },
+	},
+	{
+		"MUL default X",
+		.u.insns = {
+			/*
+			 * A = 0x42
+			 * A = A * X
+			 * ret A
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+			BPF_STMT(BPF_ALU | BPF_MUL | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
+	},
+	{
+		"MUL default A",
+		.u.insns = {
+			/*
+			 * A = A * 0x66
+			 * ret A
+			 */
+			BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 0x66),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
+	},
+	{
+		"DIV default X",
+		.u.insns = {
+			/*
+			 * A = 0x42
+			 * A = A / X ; this halt the filter execution if X is 0
+			 * ret 0x42
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x42),
+			BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
+			BPF_STMT(BPF_RET | BPF_K, 0x42),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
+	},
+	{
+		"DIV default A",
+		.u.insns = {
+			/*
+			 * A = A / 1
+			 * ret A
+			 */
+			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x1),
+			BPF_STMT(BPF_RET | BPF_A, 0x0),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x0 } },
+	},
+	{
+		"JMP EQ default A",
+		.u.insns = {
+			/*
+			 * cmp A, 0x0, 0, 1
+			 * ret 0x42
+			 * ret 0x66
+			 */
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0, 0, 1),
+			BPF_STMT(BPF_RET | BPF_K, 0x42),
+			BPF_STMT(BPF_RET | BPF_K, 0x66),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x42 } },
+	},
+	{
+		"JMP EQ default X",
+		.u.insns = {
+			/*
+			 * A = 0x0
+			 * cmp A, X, 0, 1
+			 * ret 0x42
+			 * ret 0x66
+			 */
+			BPF_STMT(BPF_LD | BPF_IMM, 0x0),
+			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0x0, 0, 1),
+			BPF_STMT(BPF_RET | BPF_K, 0x42),
+			BPF_STMT(BPF_RET | BPF_K, 0x66),
+		},
+		CLASSIC | FLAG_NO_DATA,
+		{},
+		{ {0x1, 0x42 } },
+	},
 };
 
 static struct net_device dev;
@@ -4427,6 +5124,9 @@
 
 static void *generate_test_data(struct bpf_test *test, int sub)
 {
+	struct sk_buff *skb;
+	struct page *page;
+
 	if (test->aux & FLAG_NO_DATA)
 		return NULL;
 
@@ -4434,7 +5134,38 @@
 	 * subtests generate skbs of different sizes based on
 	 * the same data.
 	 */
-	return populate_skb(test->data, test->test[sub].data_size);
+	skb = populate_skb(test->data, test->test[sub].data_size);
+	if (!skb)
+		return NULL;
+
+	if (test->aux & FLAG_SKB_FRAG) {
+		/*
+		 * when the test requires a fragmented skb, add a
+		 * single fragment to the skb, filled with
+		 * test->frag_data.
+		 */
+		void *ptr;
+
+		page = alloc_page(GFP_KERNEL);
+
+		if (!page)
+			goto err_kfree_skb;
+
+		ptr = kmap(page);
+		if (!ptr)
+			goto err_free_page;
+		memcpy(ptr, test->frag_data, MAX_DATA);
+		kunmap(page);
+		skb_add_rx_frag(skb, 0, page, 0, MAX_DATA, MAX_DATA);
+	}
+
+	return skb;
+
+err_free_page:
+	__free_page(page);
+err_kfree_skb:
+	kfree_skb(skb);
+	return NULL;
 }
 
 static void release_test_data(const struct bpf_test *test, void *data)
@@ -4515,6 +5246,8 @@
 		}
 
 		fp->len = flen;
+		/* Type doesn't really matter here as long as it's not unspec. */
+		fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
 		memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
 
 		bpf_prog_select_runtime(fp);
@@ -4545,14 +5278,14 @@
 	u64 start, finish;
 	int ret = 0, i;
 
-	start = ktime_to_us(ktime_get());
+	start = ktime_get_ns();
 
 	for (i = 0; i < runs; i++)
 		ret = BPF_PROG_RUN(fp, data);
 
-	finish = ktime_to_us(ktime_get());
+	finish = ktime_get_ns();
 
-	*duration = (finish - start) * 1000ULL;
+	*duration = finish - start;
 	do_div(*duration, runs);
 
 	return ret;
@@ -4572,6 +5305,11 @@
 			break;
 
 		data = generate_test_data(test, i);
+		if (!data && !(test->aux & FLAG_NO_DATA)) {
+			pr_cont("data generation failed ");
+			err_cnt++;
+			break;
+		}
 		ret = __run_one(fp, data, runs, &duration);
 		release_test_data(test, data);
 
@@ -4587,10 +5325,73 @@
 	return err_cnt;
 }
 
+static char test_name[64];
+module_param_string(test_name, test_name, sizeof(test_name), 0);
+
+static int test_id = -1;
+module_param(test_id, int, 0);
+
+static int test_range[2] = { 0, ARRAY_SIZE(tests) - 1 };
+module_param_array(test_range, int, NULL, 0);
+
+static __init int find_test_index(const char *test_name)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(tests); i++) {
+		if (!strcmp(tests[i].descr, test_name))
+			return i;
+	}
+	return -1;
+}
+
 static __init int prepare_bpf_tests(void)
 {
 	int i;
 
+	if (test_id >= 0) {
+		/*
+		 * if a test_id was specified, use test_range to
+		 * cover only that test.
+		 */
+		if (test_id >= ARRAY_SIZE(tests)) {
+			pr_err("test_bpf: invalid test_id specified.\n");
+			return -EINVAL;
+		}
+
+		test_range[0] = test_id;
+		test_range[1] = test_id;
+	} else if (*test_name) {
+		/*
+		 * if a test_name was specified, find it and setup
+		 * test_range to cover only that test.
+		 */
+		int idx = find_test_index(test_name);
+
+		if (idx < 0) {
+			pr_err("test_bpf: no test named '%s' found.\n",
+			       test_name);
+			return -EINVAL;
+		}
+		test_range[0] = idx;
+		test_range[1] = idx;
+	} else {
+		/*
+		 * check that the supplied test_range is valid.
+		 */
+		if (test_range[0] >= ARRAY_SIZE(tests) ||
+		    test_range[1] >= ARRAY_SIZE(tests) ||
+		    test_range[0] < 0 || test_range[1] < 0) {
+			pr_err("test_bpf: test_range is out of bound.\n");
+			return -EINVAL;
+		}
+
+		if (test_range[1] < test_range[0]) {
+			pr_err("test_bpf: test_range is ending before it starts.\n");
+			return -EINVAL;
+		}
+	}
+
 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
 		if (tests[i].fill_helper &&
 		    tests[i].fill_helper(&tests[i]) < 0)
@@ -4610,6 +5411,11 @@
 	}
 }
 
+static bool exclude_test(int test_id)
+{
+	return test_id < test_range[0] || test_id > test_range[1];
+}
+
 static __init int test_bpf(void)
 {
 	int i, err_cnt = 0, pass_cnt = 0;
@@ -4619,6 +5425,9 @@
 		struct bpf_prog *fp;
 		int err;
 
+		if (exclude_test(i))
+			continue;
+
 		pr_info("#%d %s ", i, tests[i].descr);
 
 		fp = generate_filter(i, &err);
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index c90777e..8c1ad1c 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -16,10 +16,14 @@
 #include <linux/init.h>
 #include <linux/jhash.h>
 #include <linux/kernel.h>
+#include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/rcupdate.h>
 #include <linux/rhashtable.h>
+#include <linux/semaphore.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
 
 #define MAX_ENTRIES	1000000
 #define TEST_INSERT_FAIL INT_MAX
@@ -44,11 +48,21 @@
 module_param(size, int, 0);
 MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
 
+static int tcount = 10;
+module_param(tcount, int, 0);
+MODULE_PARM_DESC(tcount, "Number of threads to spawn (default: 10)");
+
 struct test_obj {
 	int			value;
 	struct rhash_head	node;
 };
 
+struct thread_data {
+	int id;
+	struct task_struct *task;
+	struct test_obj *objs;
+};
+
 static struct test_obj array[MAX_ENTRIES];
 
 static struct rhashtable_params test_rht_params = {
@@ -59,6 +73,9 @@
 	.nulls_base = (3U << RHT_BASE_SHIFT),
 };
 
+static struct semaphore prestart_sem;
+static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
+
 static int __init test_rht_lookup(struct rhashtable *ht)
 {
 	unsigned int i;
@@ -87,6 +104,8 @@
 				return -EINVAL;
 			}
 		}
+
+		cond_resched_rcu();
 	}
 
 	return 0;
@@ -160,6 +179,8 @@
 		} else if (err) {
 			return err;
 		}
+
+		cond_resched();
 	}
 
 	if (insert_fails)
@@ -183,6 +204,8 @@
 
 			rhashtable_remove_fast(ht, &obj->node, test_rht_params);
 		}
+
+		cond_resched();
 	}
 
 	end = ktime_get_ns();
@@ -193,10 +216,97 @@
 
 static struct rhashtable ht;
 
+static int thread_lookup_test(struct thread_data *tdata)
+{
+	int i, err = 0;
+
+	for (i = 0; i < entries; i++) {
+		struct test_obj *obj;
+		int key = (tdata->id << 16) | i;
+
+		obj = rhashtable_lookup_fast(&ht, &key, test_rht_params);
+		if (obj && (tdata->objs[i].value == TEST_INSERT_FAIL)) {
+			pr_err("  found unexpected object %d\n", key);
+			err++;
+		} else if (!obj && (tdata->objs[i].value != TEST_INSERT_FAIL)) {
+			pr_err("  object %d not found!\n", key);
+			err++;
+		} else if (obj && (obj->value != key)) {
+			pr_err("  wrong object returned (got %d, expected %d)\n",
+			       obj->value, key);
+			err++;
+		}
+	}
+	return err;
+}
+
+static int threadfunc(void *data)
+{
+	int i, step, err = 0, insert_fails = 0;
+	struct thread_data *tdata = data;
+
+	up(&prestart_sem);
+	if (down_interruptible(&startup_sem))
+		pr_err("  thread[%d]: down_interruptible failed\n", tdata->id);
+
+	for (i = 0; i < entries; i++) {
+		tdata->objs[i].value = (tdata->id << 16) | i;
+		err = rhashtable_insert_fast(&ht, &tdata->objs[i].node,
+		                             test_rht_params);
+		if (err == -ENOMEM || err == -EBUSY) {
+			tdata->objs[i].value = TEST_INSERT_FAIL;
+			insert_fails++;
+		} else if (err) {
+			pr_err("  thread[%d]: rhashtable_insert_fast failed\n",
+			       tdata->id);
+			goto out;
+		}
+	}
+	if (insert_fails)
+		pr_info("  thread[%d]: %d insert failures\n",
+		        tdata->id, insert_fails);
+
+	err = thread_lookup_test(tdata);
+	if (err) {
+		pr_err("  thread[%d]: rhashtable_lookup_test failed\n",
+		       tdata->id);
+		goto out;
+	}
+
+	for (step = 10; step > 0; step--) {
+		for (i = 0; i < entries; i += step) {
+			if (tdata->objs[i].value == TEST_INSERT_FAIL)
+				continue;
+			err = rhashtable_remove_fast(&ht, &tdata->objs[i].node,
+			                             test_rht_params);
+			if (err) {
+				pr_err("  thread[%d]: rhashtable_remove_fast failed\n",
+				       tdata->id);
+				goto out;
+			}
+			tdata->objs[i].value = TEST_INSERT_FAIL;
+		}
+		err = thread_lookup_test(tdata);
+		if (err) {
+			pr_err("  thread[%d]: rhashtable_lookup_test (2) failed\n",
+			       tdata->id);
+			goto out;
+		}
+	}
+out:
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+	}
+	return err;
+}
+
 static int __init test_rht_init(void)
 {
-	int i, err;
+	int i, err, started_threads = 0, failed_threads = 0;
 	u64 total_time = 0;
+	struct thread_data *tdata;
+	struct test_obj *objs;
 
 	entries = min(entries, MAX_ENTRIES);
 
@@ -232,6 +342,57 @@
 	do_div(total_time, runs);
 	pr_info("Average test time: %llu\n", total_time);
 
+	if (!tcount)
+		return 0;
+
+	pr_info("Testing concurrent rhashtable access from %d threads\n",
+	        tcount);
+	sema_init(&prestart_sem, 1 - tcount);
+	tdata = vzalloc(tcount * sizeof(struct thread_data));
+	if (!tdata)
+		return -ENOMEM;
+	objs  = vzalloc(tcount * entries * sizeof(struct test_obj));
+	if (!objs) {
+		vfree(tdata);
+		return -ENOMEM;
+	}
+
+	err = rhashtable_init(&ht, &test_rht_params);
+	if (err < 0) {
+		pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+			err);
+		vfree(tdata);
+		vfree(objs);
+		return -EINVAL;
+	}
+	for (i = 0; i < tcount; i++) {
+		tdata[i].id = i;
+		tdata[i].objs = objs + i * entries;
+		tdata[i].task = kthread_run(threadfunc, &tdata[i],
+		                            "rhashtable_thrad[%d]", i);
+		if (IS_ERR(tdata[i].task))
+			pr_err(" kthread_run failed for thread %d\n", i);
+		else
+			started_threads++;
+	}
+	if (down_interruptible(&prestart_sem))
+		pr_err("  down interruptible failed\n");
+	for (i = 0; i < tcount; i++)
+		up(&startup_sem);
+	for (i = 0; i < tcount; i++) {
+		if (IS_ERR(tdata[i].task))
+			continue;
+		if ((err = kthread_stop(tdata[i].task))) {
+			pr_warn("Test failed: thread %d returned: %d\n",
+			        i, err);
+			failed_threads++;
+		}
+	}
+	pr_info("Started %d threads, %d failed\n",
+	        started_threads, failed_threads);
+	rhashtable_destroy(&ht);
+	vfree(tdata);
+	vfree(objs);
 	return 0;
 }
 
diff --git a/lib/test_static_key_base.c b/lib/test_static_key_base.c
new file mode 100644
index 0000000..729447a
--- /dev/null
+++ b/lib/test_static_key_base.c
@@ -0,0 +1,68 @@
+/*
+ * Kernel module for testing static keys.
+ *
+ * Copyright 2015 Akamai Technologies Inc. All Rights Reserved
+ *
+ * Authors:
+ *      Jason Baron       <jbaron@akamai.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/jump_label.h>
+
+/* old keys */
+struct static_key base_old_true_key = STATIC_KEY_INIT_TRUE;
+EXPORT_SYMBOL_GPL(base_old_true_key);
+struct static_key base_inv_old_true_key = STATIC_KEY_INIT_TRUE;
+EXPORT_SYMBOL_GPL(base_inv_old_true_key);
+struct static_key base_old_false_key = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(base_old_false_key);
+struct static_key base_inv_old_false_key = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(base_inv_old_false_key);
+
+/* new keys */
+DEFINE_STATIC_KEY_TRUE(base_true_key);
+EXPORT_SYMBOL_GPL(base_true_key);
+DEFINE_STATIC_KEY_TRUE(base_inv_true_key);
+EXPORT_SYMBOL_GPL(base_inv_true_key);
+DEFINE_STATIC_KEY_FALSE(base_false_key);
+EXPORT_SYMBOL_GPL(base_false_key);
+DEFINE_STATIC_KEY_FALSE(base_inv_false_key);
+EXPORT_SYMBOL_GPL(base_inv_false_key);
+
+static void invert_key(struct static_key *key)
+{
+	if (static_key_enabled(key))
+		static_key_disable(key);
+	else
+		static_key_enable(key);
+}
+
+static int __init test_static_key_base_init(void)
+{
+	invert_key(&base_inv_old_true_key);
+	invert_key(&base_inv_old_false_key);
+	invert_key(&base_inv_true_key.key);
+	invert_key(&base_inv_false_key.key);
+
+	return 0;
+}
+
+static void __exit test_static_key_base_exit(void)
+{
+}
+
+module_init(test_static_key_base_init);
+module_exit(test_static_key_base_exit);
+
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test_static_keys.c b/lib/test_static_keys.c
new file mode 100644
index 0000000..c61b299
--- /dev/null
+++ b/lib/test_static_keys.c
@@ -0,0 +1,225 @@
+/*
+ * Kernel module for testing static keys.
+ *
+ * Copyright 2015 Akamai Technologies Inc. All Rights Reserved
+ *
+ * Authors:
+ *      Jason Baron       <jbaron@akamai.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/jump_label.h>
+
+/* old keys */
+struct static_key old_true_key	= STATIC_KEY_INIT_TRUE;
+struct static_key old_false_key	= STATIC_KEY_INIT_FALSE;
+
+/* new api */
+DEFINE_STATIC_KEY_TRUE(true_key);
+DEFINE_STATIC_KEY_FALSE(false_key);
+
+/* external */
+extern struct static_key base_old_true_key;
+extern struct static_key base_inv_old_true_key;
+extern struct static_key base_old_false_key;
+extern struct static_key base_inv_old_false_key;
+
+/* new api */
+extern struct static_key_true base_true_key;
+extern struct static_key_true base_inv_true_key;
+extern struct static_key_false base_false_key;
+extern struct static_key_false base_inv_false_key;
+
+
+struct test_key {
+	bool			init_state;
+	struct static_key	*key;
+	bool			(*test_key)(void);
+};
+
+#define test_key_func(key, branch) \
+	({bool func(void) { return branch(key); } func;	})
+
+static void invert_key(struct static_key *key)
+{
+	if (static_key_enabled(key))
+		static_key_disable(key);
+	else
+		static_key_enable(key);
+}
+
+static void invert_keys(struct test_key *keys, int size)
+{
+	struct static_key *previous = NULL;
+	int i;
+
+	for (i = 0; i < size; i++) {
+		if (previous != keys[i].key) {
+			invert_key(keys[i].key);
+			previous = keys[i].key;
+		}
+	}
+}
+
+static int verify_keys(struct test_key *keys, int size, bool invert)
+{
+	int i;
+	bool ret, init;
+
+	for (i = 0; i < size; i++) {
+		ret = static_key_enabled(keys[i].key);
+		init = keys[i].init_state;
+		if (ret != (invert ? !init : init))
+			return -EINVAL;
+		ret = keys[i].test_key();
+		if (static_key_enabled(keys[i].key)) {
+			if (!ret)
+				return -EINVAL;
+		} else {
+			if (ret)
+				return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int __init test_static_key_init(void)
+{
+	int ret;
+	int size;
+
+	struct test_key static_key_tests[] = {
+		/* internal keys - old keys */
+		{
+			.init_state	= true,
+			.key		= &old_true_key,
+			.test_key	= test_key_func(&old_true_key, static_key_true),
+		},
+		{
+			.init_state	= false,
+			.key		= &old_false_key,
+			.test_key	= test_key_func(&old_false_key, static_key_false),
+		},
+		/* internal keys - new keys */
+		{
+			.init_state	= true,
+			.key		= &true_key.key,
+			.test_key	= test_key_func(&true_key, static_branch_likely),
+		},
+		{
+			.init_state	= true,
+			.key		= &true_key.key,
+			.test_key	= test_key_func(&true_key, static_branch_unlikely),
+		},
+		{
+			.init_state	= false,
+			.key		= &false_key.key,
+			.test_key	= test_key_func(&false_key, static_branch_likely),
+		},
+		{
+			.init_state	= false,
+			.key		= &false_key.key,
+			.test_key	= test_key_func(&false_key, static_branch_unlikely),
+		},
+		/* external keys - old keys */
+		{
+			.init_state	= true,
+			.key		= &base_old_true_key,
+			.test_key	= test_key_func(&base_old_true_key, static_key_true),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_inv_old_true_key,
+			.test_key	= test_key_func(&base_inv_old_true_key, static_key_true),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_old_false_key,
+			.test_key	= test_key_func(&base_old_false_key, static_key_false),
+		},
+		{
+			.init_state	= true,
+			.key		= &base_inv_old_false_key,
+			.test_key	= test_key_func(&base_inv_old_false_key, static_key_false),
+		},
+		/* external keys - new keys */
+		{
+			.init_state	= true,
+			.key		= &base_true_key.key,
+			.test_key	= test_key_func(&base_true_key, static_branch_likely),
+		},
+		{
+			.init_state	= true,
+			.key		= &base_true_key.key,
+			.test_key	= test_key_func(&base_true_key, static_branch_unlikely),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_inv_true_key.key,
+			.test_key	= test_key_func(&base_inv_true_key, static_branch_likely),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_inv_true_key.key,
+			.test_key	= test_key_func(&base_inv_true_key, static_branch_unlikely),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_false_key.key,
+			.test_key	= test_key_func(&base_false_key, static_branch_likely),
+		},
+		{
+			.init_state	= false,
+			.key		= &base_false_key.key,
+			.test_key	= test_key_func(&base_false_key, static_branch_unlikely),
+		},
+		{
+			.init_state	= true,
+			.key		= &base_inv_false_key.key,
+			.test_key	= test_key_func(&base_inv_false_key, static_branch_likely),
+		},
+		{
+			.init_state	= true,
+			.key		= &base_inv_false_key.key,
+			.test_key	= test_key_func(&base_inv_false_key, static_branch_unlikely),
+		},
+	};
+
+	size = ARRAY_SIZE(static_key_tests);
+
+	ret = verify_keys(static_key_tests, size, false);
+	if (ret)
+		goto out;
+
+	invert_keys(static_key_tests, size);
+	ret = verify_keys(static_key_tests, size, true);
+	if (ret)
+		goto out;
+
+	invert_keys(static_key_tests, size);
+	ret = verify_keys(static_key_tests, size, false);
+	if (ret)
+		goto out;
+	return 0;
+out:
+	return ret;
+}
+
+static void __exit test_static_key_exit(void)
+{
+}
+
+module_init(test_static_key_init);
+module_exit(test_static_key_exit);
+
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index da39c60..95cd63b 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -17,6 +17,7 @@
  */
 
 #include <stdarg.h>
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/module.h>	/* for KSYM_SYMBOL_LEN */
 #include <linux/types.h>
diff --git a/mm/Kconfig b/mm/Kconfig
index e79de2b..d4e6495 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -299,15 +299,9 @@
 # On the 'tile' arch, USB OHCI needs the bounce pool since tilegx will often
 # have more than 4GB of memory, but we don't currently use the IOTLB to present
 # a 32-bit address to OHCI.  So we need to use a bounce pool instead.
-#
-# We also use the bounce pool to provide stable page writes for jbd.  jbd
-# initiates buffer writeback without locking the page or setting PG_writeback,
-# and fixing that behavior (a second time; jbd2 doesn't have this problem) is
-# a major rework effort.  Instead, use the bounce buffer to snapshot pages
-# (until jbd goes away).  The only jbd user is ext3.
 config NEED_BOUNCE_POOL
 	bool
-	default y if (TILE && USB_OHCI_HCD) || (BLK_DEV_INTEGRITY && JBD)
+	default y if TILE && USB_OHCI_HCD
 
 config NR_QUICK
 	int
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index bd837b8..6471014 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -5,4 +5,4 @@
 # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
 CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
 
-obj-y := kasan.o report.o
+obj-y := kasan.o report.o kasan_init.o
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c
new file mode 100644
index 0000000..3f9a41c
--- /dev/null
+++ b/mm/kasan/kasan_init.c
@@ -0,0 +1,152 @@
+/*
+ * This file contains some kasan initialization code.
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bootmem.h>
+#include <linux/init.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/pfn.h>
+
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+
+/*
+ * This page serves two purposes:
+ *   - It used as early shadow memory. The entire shadow region populated
+ *     with this page, before we will be able to setup normal shadow memory.
+ *   - Latter it reused it as zero shadow to cover large ranges of memory
+ *     that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
+ */
+unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
+
+#if CONFIG_PGTABLE_LEVELS > 3
+pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
+#endif
+pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
+
+static __init void *early_alloc(size_t size, int node)
+{
+	return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
+					BOOTMEM_ALLOC_ACCESSIBLE, node);
+}
+
+static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
+				unsigned long end)
+{
+	pte_t *pte = pte_offset_kernel(pmd, addr);
+	pte_t zero_pte;
+
+	zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL);
+	zero_pte = pte_wrprotect(zero_pte);
+
+	while (addr + PAGE_SIZE <= end) {
+		set_pte_at(&init_mm, addr, pte, zero_pte);
+		addr += PAGE_SIZE;
+		pte = pte_offset_kernel(pmd, addr);
+	}
+}
+
+static void __init zero_pmd_populate(pud_t *pud, unsigned long addr,
+				unsigned long end)
+{
+	pmd_t *pmd = pmd_offset(pud, addr);
+	unsigned long next;
+
+	do {
+		next = pmd_addr_end(addr, end);
+
+		if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
+			pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+			continue;
+		}
+
+		if (pmd_none(*pmd)) {
+			pmd_populate_kernel(&init_mm, pmd,
+					early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+		}
+		zero_pte_populate(pmd, addr, next);
+	} while (pmd++, addr = next, addr != end);
+}
+
+static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
+				unsigned long end)
+{
+	pud_t *pud = pud_offset(pgd, addr);
+	unsigned long next;
+
+	do {
+		next = pud_addr_end(addr, end);
+		if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
+			pmd_t *pmd;
+
+			pud_populate(&init_mm, pud, kasan_zero_pmd);
+			pmd = pmd_offset(pud, addr);
+			pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+			continue;
+		}
+
+		if (pud_none(*pud)) {
+			pud_populate(&init_mm, pud,
+				early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+		}
+		zero_pmd_populate(pud, addr, next);
+	} while (pud++, addr = next, addr != end);
+}
+
+/**
+ * kasan_populate_zero_shadow - populate shadow memory region with
+ *                               kasan_zero_page
+ * @shadow_start - start of the memory range to populate
+ * @shadow_end   - end of the memory range to populate
+ */
+void __init kasan_populate_zero_shadow(const void *shadow_start,
+				const void *shadow_end)
+{
+	unsigned long addr = (unsigned long)shadow_start;
+	unsigned long end = (unsigned long)shadow_end;
+	pgd_t *pgd = pgd_offset_k(addr);
+	unsigned long next;
+
+	do {
+		next = pgd_addr_end(addr, end);
+
+		if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
+			pud_t *pud;
+			pmd_t *pmd;
+
+			/*
+			 * kasan_zero_pud should be populated with pmds
+			 * at this moment.
+			 * [pud,pmd]_populate*() below needed only for
+			 * 3,2 - level page tables where we don't have
+			 * puds,pmds, so pgd_populate(), pud_populate()
+			 * is noops.
+			 */
+			pgd_populate(&init_mm, pgd, kasan_zero_pud);
+			pud = pud_offset(pgd, addr);
+			pud_populate(&init_mm, pud, kasan_zero_pmd);
+			pmd = pmd_offset(pud, addr);
+			pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+			continue;
+		}
+
+		if (pgd_none(*pgd)) {
+			pgd_populate(&init_mm, pgd,
+				early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+		}
+		zero_pud_populate(pgd, addr, next);
+	} while (pgd++, addr = next, addr != end);
+}
diff --git a/mm/maccess.c b/mm/maccess.c
index d53adf9..34fe247 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -60,3 +60,44 @@
 	return ret ? -EFAULT : 0;
 }
 EXPORT_SYMBOL_GPL(probe_kernel_write);
+
+/**
+ * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
+ * @dst:   Destination address, in kernel space.  This buffer must be at
+ *         least @count bytes long.
+ * @src:   Unsafe address.
+ * @count: Maximum number of bytes to copy, including the trailing NUL.
+ *
+ * Copies a NUL-terminated string from unsafe address to kernel buffer.
+ *
+ * On success, returns the length of the string INCLUDING the trailing NUL.
+ *
+ * If access fails, returns -EFAULT (some data may have been copied
+ * and the trailing NUL added).
+ *
+ * If @count is smaller than the length of the string, copies @count-1 bytes,
+ * sets the last byte of @dst buffer to NUL and returns @count.
+ */
+long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
+{
+	mm_segment_t old_fs = get_fs();
+	const void *src = unsafe_addr;
+	long ret;
+
+	if (unlikely(count <= 0))
+		return 0;
+
+	set_fs(KERNEL_DS);
+	pagefault_disable();
+
+	do {
+		ret = __copy_from_user_inatomic(dst++,
+						(const void __user __force *)src++, 1);
+	} while (dst[-1] && ret == 0 && src - unsafe_addr < count);
+
+	dst[-1] = '\0';
+	pagefault_enable();
+	set_fs(old_fs);
+
+	return ret < 0 ? ret : src - unsafe_addr;
+}
diff --git a/mm/mmap.c b/mm/mmap.c
index aa632ad..f126923 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1268,7 +1268,7 @@
 	 *  mounted, in which case we dont add PROT_EXEC.)
 	 */
 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
-		if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
+		if (!(file && path_noexec(&file->f_path)))
 			prot |= PROT_EXEC;
 
 	if (!(flags & MAP_FIXED))
@@ -1337,7 +1337,7 @@
 		case MAP_PRIVATE:
 			if (!(file->f_mode & FMODE_READ))
 				return -EACCES;
-			if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
+			if (path_noexec(&file->f_path)) {
 				if (vm_flags & VM_EXEC)
 					return -EPERM;
 				vm_flags &= ~VM_MAYEXEC;
diff --git a/mm/nommu.c b/mm/nommu.c
index 58ea364..1cc0709 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -324,12 +324,12 @@
 }
 
 /*
- *	vmalloc  -  allocate virtually continguos memory
+ *	vmalloc  -  allocate virtually contiguous memory
  *
  *	@size:		allocation size
  *
  *	Allocate enough pages to cover @size from the page level
- *	allocator and map them into continguos kernel virtual space.
+ *	allocator and map them into contiguous kernel virtual space.
  *
  *	For tight control over page level allocator and protection flags
  *	use __vmalloc() instead.
@@ -341,12 +341,12 @@
 EXPORT_SYMBOL(vmalloc);
 
 /*
- *	vzalloc - allocate virtually continguos memory with zero fill
+ *	vzalloc - allocate virtually contiguous memory with zero fill
  *
  *	@size:		allocation size
  *
  *	Allocate enough pages to cover @size from the page level
- *	allocator and map them into continguos kernel virtual space.
+ *	allocator and map them into contiguous kernel virtual space.
  *	The memory allocated is set to zero.
  *
  *	For tight control over page level allocator and protection flags
@@ -420,7 +420,7 @@
  *	@size:		allocation size
  *
  *	Allocate enough 32bit PA addressable pages to cover @size from the
- *	page level allocator and map them into continguos kernel virtual space.
+ *	page level allocator and map them into contiguous kernel virtual space.
  */
 void *vmalloc_32(unsigned long size)
 {
@@ -1035,7 +1035,7 @@
 
 		/* handle executable mappings and implied executable
 		 * mappings */
-		if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
+		if (path_noexec(&file->f_path)) {
 			if (prot & PROT_EXEC)
 				return -EPERM;
 		} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
diff --git a/mm/page_io.c b/mm/page_io.c
index 520baa4b..b995a5b 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -33,22 +33,19 @@
 	if (bio) {
 		bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
 		bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
-		bio->bi_io_vec[0].bv_page = page;
-		bio->bi_io_vec[0].bv_len = PAGE_SIZE;
-		bio->bi_io_vec[0].bv_offset = 0;
-		bio->bi_vcnt = 1;
-		bio->bi_iter.bi_size = PAGE_SIZE;
 		bio->bi_end_io = end_io;
+
+		bio_add_page(bio, page, PAGE_SIZE, 0);
+		BUG_ON(bio->bi_iter.bi_size != PAGE_SIZE);
 	}
 	return bio;
 }
 
-void end_swap_bio_write(struct bio *bio, int err)
+void end_swap_bio_write(struct bio *bio)
 {
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct page *page = bio->bi_io_vec[0].bv_page;
 
-	if (!uptodate) {
+	if (bio->bi_error) {
 		SetPageError(page);
 		/*
 		 * We failed to write the page out to swap-space.
@@ -69,12 +66,11 @@
 	bio_put(bio);
 }
 
-static void end_swap_bio_read(struct bio *bio, int err)
+static void end_swap_bio_read(struct bio *bio)
 {
-	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
 	struct page *page = bio->bi_io_vec[0].bv_page;
 
-	if (!uptodate) {
+	if (bio->bi_error) {
 		SetPageError(page);
 		ClearPageUptodate(page);
 		printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
@@ -254,7 +250,7 @@
 }
 
 int __swap_writepage(struct page *page, struct writeback_control *wbc,
-	void (*end_write_func)(struct bio *, int))
+		bio_end_io_t end_write_func)
 {
 	struct bio *bio;
 	int ret, rw = WRITE;
diff --git a/mm/percpu.c b/mm/percpu.c
index 2dd7448..a63b4d8 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1668,9 +1668,8 @@
 	schunk->map[1] = ai->static_size;
 	schunk->map_used = 1;
 	if (schunk->free_size)
-		schunk->map[++schunk->map_used] = 1 | (ai->static_size + schunk->free_size);
-	else
-		schunk->map[1] |= 1;
+		schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size;
+	schunk->map[schunk->map_used] |= 1;
 
 	/* init dynamic chunk if necessary */
 	if (dyn_size) {
diff --git a/net/6lowpan/Makefile b/net/6lowpan/Makefile
index eb8baa7..c6ffc55 100644
--- a/net/6lowpan/Makefile
+++ b/net/6lowpan/Makefile
@@ -1,6 +1,6 @@
 obj-$(CONFIG_6LOWPAN) += 6lowpan.o
 
-6lowpan-y := iphc.o nhc.o
+6lowpan-y := core.o iphc.o nhc.o
 
 #rfc6282 nhcs
 obj-$(CONFIG_6LOWPAN_NHC_DEST) += nhc_dest.o
diff --git a/net/6lowpan/core.c b/net/6lowpan/core.c
new file mode 100644
index 0000000..ae1896f
--- /dev/null
+++ b/net/6lowpan/core.c
@@ -0,0 +1,40 @@
+/* This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Authors:
+ * (C) 2015 Pengutronix, Alexander Aring <aar@pengutronix.de>
+ */
+
+#include <linux/module.h>
+
+#include <net/6lowpan.h>
+
+void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype)
+{
+	lowpan_priv(dev)->lltype = lltype;
+}
+EXPORT_SYMBOL(lowpan_netdev_setup);
+
+static int __init lowpan_module_init(void)
+{
+	request_module_nowait("ipv6");
+
+	request_module_nowait("nhc_dest");
+	request_module_nowait("nhc_fragment");
+	request_module_nowait("nhc_hop");
+	request_module_nowait("nhc_ipv6");
+	request_module_nowait("nhc_mobility");
+	request_module_nowait("nhc_routing");
+	request_module_nowait("nhc_udp");
+
+	return 0;
+}
+module_init(lowpan_module_init);
+
+MODULE_LICENSE("GPL");
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 94a375c..1e0071f 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -48,7 +48,6 @@
 
 #include <linux/bitops.h>
 #include <linux/if_arp.h>
-#include <linux/module.h>
 #include <linux/netdevice.h>
 #include <net/6lowpan.h>
 #include <net/ipv6.h>
@@ -284,7 +283,7 @@
 		if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
 			return -EINVAL;
 
-		hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
+		hdr.flow_lbl[0] = (tmp & 0x0F) | ((tmp >> 2) & 0x30);
 		memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
 		skb_pull(skb, 2);
 		break;
@@ -610,19 +609,3 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(lowpan_header_compress);
-
-static int __init lowpan_module_init(void)
-{
-	request_module_nowait("nhc_dest");
-	request_module_nowait("nhc_fragment");
-	request_module_nowait("nhc_hop");
-	request_module_nowait("nhc_ipv6");
-	request_module_nowait("nhc_mobility");
-	request_module_nowait("nhc_routing");
-	request_module_nowait("nhc_udp");
-
-	return 0;
-}
-module_init(lowpan_module_init);
-
-MODULE_LICENSE("GPL");
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 01d7ba8..fded865 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -791,10 +791,9 @@
 {
 	ether_setup(dev);
 
-	dev->priv_flags		|= IFF_802_1Q_VLAN;
+	dev->priv_flags		|= IFF_802_1Q_VLAN | IFF_NO_QUEUE;
 	dev->priv_flags		&= ~IFF_TX_SKB_SHARING;
 	netif_keep_dst(dev);
-	dev->tx_queue_len	= 0;
 
 	dev->netdev_ops		= &vlan_netdev_ops;
 	dev->destructor		= vlan_dev_free;
diff --git a/net/Kconfig b/net/Kconfig
index 57a7c5a..7021c1b 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -374,6 +374,13 @@
 source "net/ceph/Kconfig"
 source "net/nfc/Kconfig"
 
+config LWTUNNEL
+	bool "Network light weight tunnels"
+	---help---
+	  This feature provides an infrastructure to support light weight
+	  tunnels like mpls. There is no netdevice associated with a light
+	  weight tunnel endpoint. Tunnel encapsulation parameters are stored
+	  with light weight tunnel state associated with fib routes.
 
 endif   # if NET
 
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index cc78538..aa0047c 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -802,13 +802,10 @@
 			   (brdev->payload == p_bridged) ? "bridged" : "routed",
 			   brvcc->copies_failed, brvcc->copies_needed);
 #ifdef CONFIG_ATM_BR2684_IPFILTER
-#define b1(var, byte)	((u8 *) &brvcc->filter.var)[byte]
-#define bs(var)		b1(var, 0), b1(var, 1), b1(var, 2), b1(var, 3)
 		if (brvcc->filter.netmask != 0)
-			seq_printf(seq, "    filter=%d.%d.%d.%d/"
-				   "%d.%d.%d.%d\n", bs(prefix), bs(netmask));
-#undef bs
-#undef b1
+			seq_printf(seq, "    filter=%pI4/%pI4\n",
+				   &brvcc->filter.prefix,
+				   &brvcc->filter.netmask);
 #endif /* CONFIG_ATM_BR2684_IPFILTER */
 	}
 	return 0;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 753383c..912d9c3 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -77,8 +77,7 @@
  * @lq_index: index to store the value at
  * @value: value to store in the ring buffer
  */
-static void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
-				   uint8_t value)
+static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value)
 {
 	lq_recv[*lq_index] = value;
 	*lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE;
@@ -91,12 +90,12 @@
  *
  * Returns computed average value.
  */
-static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
+static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
 {
-	const uint8_t *ptr;
-	uint16_t count = 0;
-	uint16_t i = 0;
-	uint16_t sum = 0;
+	const u8 *ptr;
+	u16 count = 0;
+	u16 i = 0;
+	u16 sum = 0;
 
 	ptr = lq_recv;
 
@@ -113,7 +112,7 @@
 	if (count == 0)
 		return 0;
 
-	return (uint8_t)(sum / count);
+	return (u8)(sum / count);
 }
 
 /**
@@ -155,14 +154,14 @@
 	kfree(orig_node->bat_iv.bcast_own);
 	orig_node->bat_iv.bcast_own = data_ptr;
 
-	data_ptr = kmalloc_array(max_if_num, sizeof(uint8_t), GFP_ATOMIC);
+	data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC);
 	if (!data_ptr) {
 		kfree(orig_node->bat_iv.bcast_own);
 		goto unlock;
 	}
 
 	memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
-	       (max_if_num - 1) * sizeof(uint8_t));
+	       (max_if_num - 1) * sizeof(u8));
 	kfree(orig_node->bat_iv.bcast_own_sum);
 	orig_node->bat_iv.bcast_own_sum = data_ptr;
 
@@ -215,19 +214,19 @@
 	if (max_if_num == 0)
 		goto free_own_sum;
 
-	data_ptr = kmalloc_array(max_if_num, sizeof(uint8_t), GFP_ATOMIC);
+	data_ptr = kmalloc_array(max_if_num, sizeof(u8), GFP_ATOMIC);
 	if (!data_ptr) {
 		kfree(orig_node->bat_iv.bcast_own);
 		goto unlock;
 	}
 
 	memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
-	       del_if_num * sizeof(uint8_t));
+	       del_if_num * sizeof(u8));
 
-	if_offset = (del_if_num + 1) * sizeof(uint8_t);
-	memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
+	if_offset = (del_if_num + 1) * sizeof(u8);
+	memcpy((char *)data_ptr + del_if_num * sizeof(u8),
 	       orig_node->bat_iv.bcast_own_sum + if_offset,
-	       (max_if_num - del_if_num) * sizeof(uint8_t));
+	       (max_if_num - del_if_num) * sizeof(u8));
 
 free_own_sum:
 	kfree(orig_node->bat_iv.bcast_own_sum);
@@ -250,7 +249,7 @@
  * If the object does not exists it is created an initialised.
  */
 static struct batadv_orig_node *
-batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr)
+batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
 {
 	struct batadv_orig_node *orig_node;
 	int size, hash_added;
@@ -270,7 +269,7 @@
 	if (!orig_node->bat_iv.bcast_own)
 		goto free_orig_node;
 
-	size = bat_priv->num_ifaces * sizeof(uint8_t);
+	size = bat_priv->num_ifaces * sizeof(u8);
 	orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
 	if (!orig_node->bat_iv.bcast_own_sum)
 		goto free_orig_node;
@@ -293,43 +292,17 @@
 
 static struct batadv_neigh_node *
 batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
-			const uint8_t *neigh_addr,
+			const u8 *neigh_addr,
 			struct batadv_orig_node *orig_node,
 			struct batadv_orig_node *orig_neigh)
 {
-	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
-	struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
+	struct batadv_neigh_node *neigh_node;
 
-	neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node);
+	neigh_node = batadv_neigh_node_new(orig_node, hard_iface, neigh_addr);
 	if (!neigh_node)
 		goto out;
 
-	if (!atomic_inc_not_zero(&hard_iface->refcount)) {
-		kfree(neigh_node);
-		neigh_node = NULL;
-		goto out;
-	}
-
 	neigh_node->orig_node = orig_neigh;
-	neigh_node->if_incoming = hard_iface;
-
-	spin_lock_bh(&orig_node->neigh_list_lock);
-	tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface,
-					       neigh_addr);
-	if (!tmp_neigh_node) {
-		hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
-	} else {
-		kfree(neigh_node);
-		batadv_hardif_free_ref(hard_iface);
-		neigh_node = tmp_neigh_node;
-	}
-	spin_unlock_bh(&orig_node->neigh_list_lock);
-
-	if (!tmp_neigh_node)
-		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-			   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
-			   neigh_addr, orig_node->orig,
-			   hard_iface->net_dev->name);
 
 out:
 	return neigh_node;
@@ -339,7 +312,7 @@
 {
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	unsigned char *ogm_buff;
-	uint32_t random_seqno;
+	u32 random_seqno;
 
 	/* randomize initial seqno to avoid collision */
 	get_random_bytes(&random_seqno, sizeof(random_seqno));
@@ -411,8 +384,7 @@
 }
 
 /* apply hop penalty for a normal link */
-static uint8_t batadv_hop_penalty(uint8_t tq,
-				  const struct batadv_priv *bat_priv)
+static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
 {
 	int hop_penalty = atomic_read(&bat_priv->hop_penalty);
 	int new_tq;
@@ -442,11 +414,11 @@
 {
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 	const char *fwd_str;
-	uint8_t packet_num;
-	int16_t buff_pos;
+	u8 packet_num;
+	s16 buff_pos;
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	struct sk_buff *skb;
-	uint8_t *packet_pos;
+	u8 *packet_pos;
 
 	if (hard_iface->if_status != BATADV_IF_ACTIVE)
 		return;
@@ -837,7 +809,7 @@
 				  struct batadv_hard_iface *if_outgoing)
 {
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-	uint16_t tvlv_len;
+	u16 tvlv_len;
 
 	if (batadv_ogm_packet->ttl <= 1) {
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
@@ -896,9 +868,9 @@
 	struct hlist_head *head;
 	struct batadv_orig_node *orig_node;
 	unsigned long *word;
-	uint32_t i;
+	u32 i;
 	size_t word_index;
-	uint8_t *w;
+	u8 *w;
 	int if_num;
 
 	for (i = 0; i < hash->size; i++) {
@@ -927,8 +899,8 @@
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	struct batadv_hard_iface *primary_if, *tmp_hard_iface;
 	int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len;
-	uint32_t seqno;
-	uint16_t tvlv_len = 0;
+	u32 seqno;
+	u16 tvlv_len = 0;
 	unsigned long send_time;
 
 	primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -947,7 +919,7 @@
 	batadv_ogm_packet->tvlv_len = htons(tvlv_len);
 
 	/* change sequence number to network order */
-	seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
+	seqno = (u32)atomic_read(&hard_iface->bat_iv.ogm_seqno);
 	batadv_ogm_packet->seqno = htonl(seqno);
 	atomic_inc(&hard_iface->bat_iv.ogm_seqno);
 
@@ -970,7 +942,7 @@
 	rcu_read_lock();
 	list_for_each_entry_rcu(tmp_hard_iface, &batadv_hardif_list, list) {
 		if (tmp_hard_iface->soft_iface != hard_iface->soft_iface)
-				continue;
+			continue;
 		batadv_iv_ogm_queue_add(bat_priv, *ogm_buff,
 					*ogm_buff_len, hard_iface,
 					tmp_hard_iface, 1, send_time);
@@ -1006,13 +978,14 @@
 {
 	struct batadv_neigh_ifinfo *neigh_ifinfo = NULL;
 	struct batadv_neigh_ifinfo *router_ifinfo = NULL;
-	struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
+	struct batadv_neigh_node *neigh_node = NULL;
+	struct batadv_neigh_node *tmp_neigh_node = NULL;
 	struct batadv_neigh_node *router = NULL;
 	struct batadv_orig_node *orig_node_tmp;
 	int if_num;
-	uint8_t sum_orig, sum_neigh;
-	uint8_t *neigh_addr;
-	uint8_t tq_avg;
+	u8 sum_orig, sum_neigh;
+	u8 *neigh_addr;
+	u8 tq_avg;
 
 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 		   "update_originator(): Searching and updating originator entry of received packet\n");
@@ -1164,8 +1137,8 @@
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
 	struct batadv_neigh_ifinfo *neigh_ifinfo;
-	uint8_t total_count;
-	uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
+	u8 total_count;
+	u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
 	unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
 	int tq_asym_penalty, inv_asym_penalty, if_num, ret = 0;
 	unsigned int combined_tq;
@@ -1311,13 +1284,13 @@
 	struct batadv_neigh_node *neigh_node;
 	struct batadv_neigh_ifinfo *neigh_ifinfo;
 	int is_dup;
-	int32_t seq_diff;
+	s32 seq_diff;
 	int need_update = 0;
 	int set_mark;
 	enum batadv_dup_status ret = BATADV_NO_DUP;
-	uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
-	uint8_t *neigh_addr;
-	uint8_t packet_count;
+	u32 seqno = ntohl(batadv_ogm_packet->seqno);
+	u8 *neigh_addr;
+	u8 packet_count;
 	unsigned long *bitmap;
 
 	orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig);
@@ -1406,7 +1379,8 @@
 				struct batadv_hard_iface *if_outgoing)
 {
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-	struct batadv_neigh_node *router = NULL, *router_router = NULL;
+	struct batadv_neigh_node *router = NULL;
+	struct batadv_neigh_node *router_router = NULL;
 	struct batadv_orig_node *orig_neigh_node;
 	struct batadv_orig_ifinfo *orig_ifinfo;
 	struct batadv_neigh_node *orig_neigh_router = NULL;
@@ -1418,7 +1392,7 @@
 	bool sameseq, similar_ttl;
 	struct sk_buff *skb_priv;
 	struct ethhdr *ethhdr;
-	uint8_t *prev_sender;
+	u8 *prev_sender;
 	int is_bidirect;
 
 	/* create a private copy of the skb, as some functions change tq value
@@ -1600,7 +1574,7 @@
 	struct batadv_orig_node *orig_neigh_node, *orig_node;
 	struct batadv_hard_iface *hard_iface;
 	struct batadv_ogm_packet *ogm_packet;
-	uint32_t if_incoming_seqno;
+	u32 if_incoming_seqno;
 	bool has_directlink_flag;
 	struct ethhdr *ethhdr;
 	bool is_my_oldorig = false;
@@ -1673,9 +1647,9 @@
 	if (is_my_orig) {
 		unsigned long *word;
 		int offset;
-		int32_t bit_pos;
-		int16_t if_num;
-		uint8_t *weight;
+		s32 bit_pos;
+		s16 if_num;
+		u8 *weight;
 
 		orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
 							 ethhdr->h_source);
@@ -1751,7 +1725,7 @@
 {
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct batadv_ogm_packet *ogm_packet;
-	uint8_t *packet_pos;
+	u8 *packet_pos;
 	int ogm_offset;
 	bool ret;
 
@@ -1835,7 +1809,7 @@
 	unsigned long last_seen_jiffies;
 	struct hlist_head *head;
 	int batman_count = 0;
-	uint32_t i;
+	u32 i;
 
 	seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
 		   "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
@@ -1903,7 +1877,7 @@
 				   struct batadv_hard_iface *if_outgoing2)
 {
 	struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo;
-	uint8_t tq1, tq2;
+	u8 tq1, tq2;
 	int diff;
 
 	neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
@@ -1945,7 +1919,7 @@
 			   struct batadv_hard_iface *if_outgoing2)
 {
 	struct batadv_neigh_ifinfo *neigh1_ifinfo, *neigh2_ifinfo;
-	uint8_t tq1, tq2;
+	u8 tq1, tq2;
 	bool ret;
 
 	neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index cf68c32..25cbc36 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -21,7 +21,7 @@
 #include <linux/bitmap.h>
 
 /* shift the packet array by n places. */
-static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
+static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
 {
 	if (n <= 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
 		return;
@@ -35,8 +35,8 @@
  *  1 if the window was moved (either new or very old)
  *  0 if the window was not moved/shifted.
  */
-int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
-			  int32_t seq_num_diff, int set_mark)
+int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
+			  int set_mark)
 {
 	struct batadv_priv *bat_priv = priv;
 
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 0c24562..0226b22 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -28,9 +28,9 @@
  * and curr_seqno is within range of last_seqno. Otherwise returns 0.
  */
 static inline int batadv_test_bit(const unsigned long *seq_bits,
-				  uint32_t last_seqno, uint32_t curr_seqno)
+				  u32 last_seqno, u32 curr_seqno)
 {
-	int32_t diff;
+	s32 diff;
 
 	diff = last_seqno - curr_seqno;
 	if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
@@ -39,7 +39,7 @@
 }
 
 /* turn corresponding bit on, so we can remember that we got the packet */
-static inline void batadv_set_bit(unsigned long *seq_bits, int32_t n)
+static inline void batadv_set_bit(unsigned long *seq_bits, s32 n)
 {
 	/* if too old, just drop it */
 	if (n < 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
@@ -51,7 +51,7 @@
 /* receive and process one packet, returns 1 if received seq_num is considered
  * new, 0 if old
  */
-int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
-			  int32_t seq_num_diff, int set_mark);
+int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
+			  int set_mark);
 
 #endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ba06092..191a702 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -51,7 +51,7 @@
 #include "packet.h"
 #include "translation-table.h"
 
-static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
+static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
 
 static void batadv_bla_periodic_work(struct work_struct *work);
 static void
@@ -59,10 +59,10 @@
 			 struct batadv_bla_backbone_gw *backbone_gw);
 
 /* return the index of the claim */
-static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
+static inline u32 batadv_choose_claim(const void *data, u32 size)
 {
 	struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
-	uint32_t hash = 0;
+	u32 hash = 0;
 
 	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
 	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
@@ -71,11 +71,10 @@
 }
 
 /* return the index of the backbone gateway */
-static inline uint32_t batadv_choose_backbone_gw(const void *data,
-						 uint32_t size)
+static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
 {
 	const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
-	uint32_t hash = 0;
+	u32 hash = 0;
 
 	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
 	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
@@ -89,7 +88,8 @@
 {
 	const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
 					 hash_entry);
-	const struct batadv_bla_backbone_gw *gw1 = data1, *gw2 = data2;
+	const struct batadv_bla_backbone_gw *gw1 = data1;
+	const struct batadv_bla_backbone_gw *gw2 = data2;
 
 	if (!batadv_compare_eth(gw1->orig, gw2->orig))
 		return 0;
@@ -106,7 +106,8 @@
 {
 	const void *data1 = container_of(node, struct batadv_bla_claim,
 					 hash_entry);
-	const struct batadv_bla_claim *cl1 = data1, *cl2 = data2;
+	const struct batadv_bla_claim *cl1 = data1;
+	const struct batadv_bla_claim *cl2 = data2;
 
 	if (!batadv_compare_eth(cl1->addr, cl2->addr))
 		return 0;
@@ -192,8 +193,8 @@
  * Returns claim if found or NULL otherwise.
  */
 static struct batadv_bla_backbone_gw *
-batadv_backbone_hash_find(struct batadv_priv *bat_priv,
-			  uint8_t *addr, unsigned short vid)
+batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
+			  unsigned short vid)
 {
 	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
 	struct hlist_head *head;
@@ -269,14 +270,14 @@
  * @vid: the VLAN ID
  * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
  */
-static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
+static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
 				  unsigned short vid, int claimtype)
 {
 	struct sk_buff *skb;
 	struct ethhdr *ethhdr;
 	struct batadv_hard_iface *primary_if;
 	struct net_device *soft_iface;
-	uint8_t *hw_src;
+	u8 *hw_src;
 	struct batadv_bla_claim_dst local_claim_dest;
 	__be32 zeroip = 0;
 
@@ -304,13 +305,13 @@
 			  * with XX   = claim type
 			  * and YY:YY = group id
 			  */
-			 (uint8_t *)&local_claim_dest);
+			 (u8 *)&local_claim_dest);
 
 	if (!skb)
 		goto out;
 
 	ethhdr = (struct ethhdr *)skb->data;
-	hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
+	hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
 
 	/* now we pretend that the client would have sent this ... */
 	switch (claimtype) {
@@ -383,7 +384,7 @@
  * be found.
  */
 static struct batadv_bla_backbone_gw *
-batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
+batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
 			   unsigned short vid, bool own_backbone)
 {
 	struct batadv_bla_backbone_gw *entry;
@@ -552,7 +553,7 @@
 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
 				     struct batadv_bla_backbone_gw *backbone_gw)
 {
-	uint8_t mac[ETH_ALEN];
+	u8 mac[ETH_ALEN];
 	__be16 crc;
 
 	memcpy(mac, batadv_announce_mac, 4);
@@ -571,7 +572,7 @@
  * @backbone_gw: the backbone gateway which claims it
  */
 static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
-				 const uint8_t *mac, const unsigned short vid,
+				 const u8 *mac, const unsigned short vid,
 				 struct batadv_bla_backbone_gw *backbone_gw)
 {
 	struct batadv_bla_claim *claim;
@@ -635,7 +636,7 @@
  * given mac address and vid.
  */
 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
-				 const uint8_t *mac, const unsigned short vid)
+				 const u8 *mac, const unsigned short vid)
 {
 	struct batadv_bla_claim search_claim, *claim;
 
@@ -659,12 +660,11 @@
 }
 
 /* check for ANNOUNCE frame, return 1 if handled */
-static int batadv_handle_announce(struct batadv_priv *bat_priv,
-				  uint8_t *an_addr, uint8_t *backbone_addr,
-				  unsigned short vid)
+static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
+				  u8 *backbone_addr, unsigned short vid)
 {
 	struct batadv_bla_backbone_gw *backbone_gw;
-	uint16_t crc;
+	u16 crc;
 
 	if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
 		return 0;
@@ -708,8 +708,8 @@
 /* check for REQUEST frame, return 1 if handled */
 static int batadv_handle_request(struct batadv_priv *bat_priv,
 				 struct batadv_hard_iface *primary_if,
-				 uint8_t *backbone_addr,
-				 struct ethhdr *ethhdr, unsigned short vid)
+				 u8 *backbone_addr, struct ethhdr *ethhdr,
+				 unsigned short vid)
 {
 	/* check for REQUEST frame */
 	if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
@@ -732,8 +732,8 @@
 /* check for UNCLAIM frame, return 1 if handled */
 static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
 				 struct batadv_hard_iface *primary_if,
-				 uint8_t *backbone_addr,
-				 uint8_t *claim_addr, unsigned short vid)
+				 u8 *backbone_addr, u8 *claim_addr,
+				 unsigned short vid)
 {
 	struct batadv_bla_backbone_gw *backbone_gw;
 
@@ -761,7 +761,7 @@
 /* check for CLAIM frame, return 1 if handled */
 static int batadv_handle_claim(struct batadv_priv *bat_priv,
 			       struct batadv_hard_iface *primary_if,
-			       uint8_t *backbone_addr, uint8_t *claim_addr,
+			       u8 *backbone_addr, u8 *claim_addr,
 			       unsigned short vid)
 {
 	struct batadv_bla_backbone_gw *backbone_gw;
@@ -805,10 +805,10 @@
  */
 static int batadv_check_claim_group(struct batadv_priv *bat_priv,
 				    struct batadv_hard_iface *primary_if,
-				    uint8_t *hw_src, uint8_t *hw_dst,
+				    u8 *hw_src, u8 *hw_dst,
 				    struct ethhdr *ethhdr)
 {
-	uint8_t *backbone_addr;
+	u8 *backbone_addr;
 	struct batadv_orig_node *orig_node;
 	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
 
@@ -877,7 +877,7 @@
 				    struct sk_buff *skb)
 {
 	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
-	uint8_t *hw_src, *hw_dst;
+	u8 *hw_src, *hw_dst;
 	struct vlan_hdr *vhdr, vhdr_buf;
 	struct ethhdr *ethhdr;
 	struct arphdr *arphdr;
@@ -923,7 +923,7 @@
 
 	/* pskb_may_pull() may have modified the pointers, get ethhdr again */
 	ethhdr = eth_hdr(skb);
-	arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
+	arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
 
 	/* Check whether the ARP frame carries a valid
 	 * IP information
@@ -937,7 +937,7 @@
 	if (arphdr->ar_pln != 4)
 		return 0;
 
-	hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
+	hw_src = (u8 *)arphdr + sizeof(struct arphdr);
 	hw_dst = hw_src + ETH_ALEN + 4;
 	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
 	bla_dst_own = &bat_priv->bla.claim_dest;
@@ -1238,9 +1238,9 @@
 int batadv_bla_init(struct batadv_priv *bat_priv)
 {
 	int i;
-	uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
+	u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
 	struct batadv_hard_iface *primary_if;
-	uint16_t crc;
+	u16 crc;
 	unsigned long entrytime;
 
 	spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
@@ -1368,7 +1368,7 @@
  *
  * Returns true if orig is a backbone for this vid, false otherwise.
  */
-bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
 				    unsigned short vid)
 {
 	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
@@ -1647,9 +1647,9 @@
 	struct batadv_bla_claim *claim;
 	struct batadv_hard_iface *primary_if;
 	struct hlist_head *head;
-	uint32_t i;
+	u32 i;
 	bool is_own;
-	uint8_t *primary_addr;
+	u8 *primary_addr;
 
 	primary_if = batadv_seq_print_text_primary_if_get(seq);
 	if (!primary_if)
@@ -1692,9 +1692,9 @@
 	struct batadv_hard_iface *primary_if;
 	struct hlist_head *head;
 	int secs, msecs;
-	uint32_t i;
+	u32 i;
 	bool is_own;
-	uint8_t *primary_addr;
+	u8 *primary_addr;
 
 	primary_if = batadv_seq_print_text_primary_if_get(seq);
 	if (!primary_if)
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 0282690..025152b 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -22,9 +22,6 @@
 
 #include <linux/types.h>
 
-struct batadv_hard_iface;
-struct batadv_orig_node;
-struct batadv_priv;
 struct seq_file;
 struct sk_buff;
 
@@ -38,7 +35,7 @@
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
 					     void *offset);
-bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
 				    unsigned short vid);
 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
 				   struct sk_buff *skb);
@@ -84,8 +81,7 @@
 }
 
 static inline bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
-						  uint8_t *orig,
-						  unsigned short vid)
+						  u8 *orig, unsigned short vid)
 {
 	return false;
 }
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 187acdc..80ab8d6 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -22,7 +22,6 @@
 
 #include <linux/kconfig.h>
 
-struct batadv_hard_iface;
 struct net_device;
 
 #define BATADV_DEBUGFS_SUBDIR "batman_adv"
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 6d0b471..83bc1aa 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -19,6 +19,7 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
@@ -101,7 +102,7 @@
 	struct batadv_dat_entry *dat_entry;
 	struct hlist_node *node_tmp;
 	struct hlist_head *head;
-	uint32_t i;
+	u32 i;
 
 	if (!bat_priv->dat.hash)
 		return;
@@ -167,11 +168,11 @@
  *
  * Returns the value of the hw_src field in the ARP packet.
  */
-static uint8_t *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
+static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
 {
-	uint8_t *addr;
+	u8 *addr;
 
-	addr = (uint8_t *)(skb->data + hdr_size);
+	addr = (u8 *)(skb->data + hdr_size);
 	addr += ETH_HLEN + sizeof(struct arphdr);
 
 	return addr;
@@ -196,7 +197,7 @@
  *
  * Returns the value of the hw_dst field in the ARP packet.
  */
-static uint8_t *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
+static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
 {
 	return batadv_arp_hw_src(skb, hdr_size) + ETH_ALEN + 4;
 }
@@ -220,12 +221,12 @@
  *
  * Returns the selected index in the hash table for the given data.
  */
-static uint32_t batadv_hash_dat(const void *data, uint32_t size)
+static u32 batadv_hash_dat(const void *data, u32 size)
 {
-	uint32_t hash = 0;
+	u32 hash = 0;
 	const struct batadv_dat_entry *dat = data;
 	const unsigned char *key;
-	uint32_t i;
+	u32 i;
 
 	key = (const unsigned char *)&dat->ip;
 	for (i = 0; i < sizeof(dat->ip); i++) {
@@ -264,7 +265,7 @@
 	struct hlist_head *head;
 	struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL;
 	struct batadv_hashtable *hash = bat_priv->dat.hash;
-	uint32_t index;
+	u32 index;
 
 	if (!hash)
 		return NULL;
@@ -299,7 +300,7 @@
  * @vid: VLAN identifier
  */
 static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
-				 uint8_t *mac_addr, unsigned short vid)
+				 u8 *mac_addr, unsigned short vid)
 {
 	struct batadv_dat_entry *dat_entry;
 	int hash_added;
@@ -356,11 +357,11 @@
  * @msg: message to print together with the debugging information
  */
 static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
-			   uint16_t type, int hdr_size, char *msg)
+			   u16 type, int hdr_size, char *msg)
 {
 	struct batadv_unicast_4addr_packet *unicast_4addr_packet;
 	struct batadv_bcast_packet *bcast_pkt;
-	uint8_t *orig_addr;
+	u8 *orig_addr;
 	__be32 ip_src, ip_dst;
 
 	if (msg)
@@ -423,7 +424,7 @@
 #else
 
 static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
-			   uint16_t type, int hdr_size, char *msg)
+			   u16 type, int hdr_size, char *msg)
 {
 }
 
@@ -453,7 +454,7 @@
 	int j;
 
 	/* check if orig node candidate is running DAT */
-	if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT))
+	if (!test_bit(BATADV_ORIG_CAPA_HAS_DAT, &candidate->capabilities))
 		goto out;
 
 	/* Check if this node has already been selected... */
@@ -496,7 +497,8 @@
 					 int select, batadv_dat_addr_t ip_key,
 					 batadv_dat_addr_t *last_max)
 {
-	batadv_dat_addr_t max = 0, tmp_max = 0;
+	batadv_dat_addr_t max = 0;
+	batadv_dat_addr_t tmp_max = 0;
 	struct batadv_orig_node *orig_node, *max_orig_node = NULL;
 	struct batadv_hashtable *hash = bat_priv->orig_hash;
 	struct hlist_head *head;
@@ -708,14 +710,13 @@
  */
 static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 					   struct batadv_orig_node *orig,
-					   uint8_t flags,
-					   void *tvlv_value,
-					   uint16_t tvlv_value_len)
+					   u8 flags,
+					   void *tvlv_value, u16 tvlv_value_len)
 {
 	if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
-		orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT;
+		clear_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
 	else
-		orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT;
+		set_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
 }
 
 /**
@@ -786,7 +787,7 @@
 	struct hlist_head *head;
 	unsigned long last_seen_jiffies;
 	int last_seen_msecs, last_seen_secs, last_seen_mins;
-	uint32_t i;
+	u32 i;
 
 	primary_if = batadv_seq_print_text_primary_if_get(seq);
 	if (!primary_if)
@@ -829,14 +830,14 @@
  *
  * Returns the ARP type if the skb contains a valid ARP packet, 0 otherwise.
  */
-static uint16_t batadv_arp_get_type(struct batadv_priv *bat_priv,
-				    struct sk_buff *skb, int hdr_size)
+static u16 batadv_arp_get_type(struct batadv_priv *bat_priv,
+			       struct sk_buff *skb, int hdr_size)
 {
 	struct arphdr *arphdr;
 	struct ethhdr *ethhdr;
 	__be32 ip_src, ip_dst;
-	uint8_t *hw_src, *hw_dst;
-	uint16_t type = 0;
+	u8 *hw_src, *hw_dst;
+	u16 type = 0;
 
 	/* pull the ethernet header */
 	if (unlikely(!pskb_may_pull(skb, hdr_size + ETH_HLEN)))
@@ -933,9 +934,9 @@
 bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
 					   struct sk_buff *skb)
 {
-	uint16_t type = 0;
+	u16 type = 0;
 	__be32 ip_dst, ip_src;
-	uint8_t *hw_src;
+	u8 *hw_src;
 	bool ret = false;
 	struct batadv_dat_entry *dat_entry = NULL;
 	struct sk_buff *skb_new;
@@ -1021,9 +1022,9 @@
 bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
 					   struct sk_buff *skb, int hdr_size)
 {
-	uint16_t type;
+	u16 type;
 	__be32 ip_src, ip_dst;
-	uint8_t *hw_src;
+	u8 *hw_src;
 	struct sk_buff *skb_new;
 	struct batadv_dat_entry *dat_entry = NULL;
 	bool ret = false;
@@ -1099,9 +1100,9 @@
 void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
 					 struct sk_buff *skb)
 {
-	uint16_t type;
+	u16 type;
 	__be32 ip_src, ip_dst;
-	uint8_t *hw_src, *hw_dst;
+	u8 *hw_src, *hw_dst;
 	int hdr_size = 0;
 	unsigned short vid;
 
@@ -1145,9 +1146,9 @@
 bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
 					 struct sk_buff *skb, int hdr_size)
 {
-	uint16_t type;
+	u16 type;
 	__be32 ip_src, ip_dst;
-	uint8_t *hw_src, *hw_dst;
+	u8 *hw_src, *hw_dst;
 	bool dropped = false;
 	unsigned short vid;
 
@@ -1201,7 +1202,7 @@
 bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
 				      struct batadv_forw_packet *forw_packet)
 {
-	uint16_t type;
+	u16 type;
 	__be32 ip_dst;
 	struct batadv_dat_entry *dat_entry = NULL;
 	bool ret = false;
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index 3181507..26d4a52 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -54,7 +54,7 @@
 static inline void
 batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node)
 {
-	uint32_t addr;
+	u32 addr;
 
 	addr = batadv_choose_orig(orig_node->orig, BATADV_DAT_ADDR_MAX);
 	orig_node->dat_addr = (batadv_dat_addr_t)addr;
@@ -69,7 +69,7 @@
 batadv_dat_init_own_addr(struct batadv_priv *bat_priv,
 			 struct batadv_hard_iface *primary_if)
 {
-	uint32_t addr;
+	u32 addr;
 
 	addr = batadv_choose_orig(primary_if->net_dev->dev_addr,
 				  BATADV_DAT_ADDR_MAX);
@@ -89,7 +89,7 @@
  * Updates the ethtool statistics for the received packet if it is a DAT subtype
  */
 static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
-					  uint8_t subtype)
+					  u8 subtype)
 {
 	switch (subtype) {
 	case BATADV_P_DAT_DHT_GET:
@@ -169,7 +169,7 @@
 }
 
 static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
-					  uint8_t subtype)
+					  u8 subtype)
 {
 }
 
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index c0f0d01..700c96c 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -25,6 +25,7 @@
 #include <linux/if_ether.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
+#include <linux/lockdep.h>
 #include <linux/netdevice.h>
 #include <linux/pkt_sched.h>
 #include <linux/skbuff.h>
@@ -66,7 +67,7 @@
 			    bool (*check_cb)(struct batadv_frag_table_entry *))
 {
 	struct batadv_frag_table_entry *chain;
-	uint8_t i;
+	u8 i;
 
 	for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
 		chain = &orig_node->fragments[i];
@@ -110,8 +111,10 @@
  * without searching for the right position.
  */
 static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
-				   uint16_t seqno)
+				   u16 seqno)
 {
+	lockdep_assert_held(&chain->lock);
+
 	if (chain->seqno == seqno)
 		return false;
 
@@ -145,8 +148,8 @@
 	struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
 	struct batadv_frag_list_entry *frag_entry_last = NULL;
 	struct batadv_frag_packet *frag_packet;
-	uint8_t bucket;
-	uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
+	u8 bucket;
+	u16 seqno, hdr_size = sizeof(struct batadv_frag_packet);
 	bool ret = false;
 
 	/* Linearize packet to avoid linearizing 16 packets in a row when doing
@@ -351,7 +354,7 @@
 	struct batadv_orig_node *orig_node_dst = NULL;
 	struct batadv_neigh_node *neigh_node = NULL;
 	struct batadv_frag_packet *packet;
-	uint16_t total_size;
+	u16 total_size;
 	bool ret = false;
 
 	packet = (struct batadv_frag_packet *)skb->data;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index cffa92d..e6c8382 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -27,7 +27,6 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
-#include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/netdevice.h>
@@ -153,20 +152,14 @@
 	struct batadv_neigh_node *router;
 	struct batadv_neigh_ifinfo *router_ifinfo;
 	struct batadv_gw_node *gw_node, *curr_gw = NULL;
-	uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
-	uint32_t gw_divisor;
-	uint8_t max_tq = 0;
-	uint8_t tq_avg;
+	u64 max_gw_factor = 0;
+	u64 tmp_gw_factor = 0;
+	u8 max_tq = 0;
+	u8 tq_avg;
 	struct batadv_orig_node *orig_node;
 
-	gw_divisor = BATADV_TQ_LOCAL_WINDOW_SIZE * BATADV_TQ_LOCAL_WINDOW_SIZE;
-	gw_divisor *= 64;
-
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
-		if (gw_node->deleted)
-			continue;
-
 		orig_node = gw_node->orig_node;
 		router = batadv_orig_router_get(orig_node, BATADV_IF_DEFAULT);
 		if (!router)
@@ -187,7 +180,7 @@
 			tmp_gw_factor = tq_avg * tq_avg;
 			tmp_gw_factor *= gw_node->bandwidth_down;
 			tmp_gw_factor *= 100 * 100;
-			tmp_gw_factor /= gw_divisor;
+			tmp_gw_factor >>= 18;
 
 			if ((tmp_gw_factor > max_gw_factor) ||
 			    ((tmp_gw_factor == max_gw_factor) &&
@@ -267,7 +260,8 @@
 
 void batadv_gw_election(struct batadv_priv *bat_priv)
 {
-	struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL;
+	struct batadv_gw_node *curr_gw = NULL;
+	struct batadv_gw_node *next_gw = NULL;
 	struct batadv_neigh_node *router = NULL;
 	struct batadv_neigh_ifinfo *router_ifinfo = NULL;
 	char gw_addr[18] = { '\0' };
@@ -351,8 +345,9 @@
 	struct batadv_neigh_ifinfo *router_orig_tq = NULL;
 	struct batadv_neigh_ifinfo *router_gw_tq = NULL;
 	struct batadv_orig_node *curr_gw_orig;
-	struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL;
-	uint8_t gw_tq_avg, orig_tq_avg;
+	struct batadv_neigh_node *router_gw = NULL;
+	struct batadv_neigh_node *router_orig = NULL;
+	u8 gw_tq_avg, orig_tq_avg;
 
 	curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
 	if (!curr_gw_orig)
@@ -474,9 +469,6 @@
 		if (gw_node_tmp->orig_node != orig_node)
 			continue;
 
-		if (gw_node_tmp->deleted)
-			continue;
-
 		if (!atomic_inc_not_zero(&gw_node_tmp->refcount))
 			continue;
 
@@ -526,9 +518,7 @@
 	gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
 	gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
 
-	gw_node->deleted = 0;
 	if (ntohl(gateway->bandwidth_down) == 0) {
-		gw_node->deleted = jiffies;
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 			   "Gateway %pM removed from gateway list\n",
 			   orig_node->orig);
@@ -536,14 +526,21 @@
 		/* Note: We don't need a NULL check here, since curr_gw never
 		 * gets dereferenced.
 		 */
+		spin_lock_bh(&bat_priv->gw.list_lock);
+		hlist_del_init_rcu(&gw_node->list);
+		spin_unlock_bh(&bat_priv->gw.list_lock);
+
+		batadv_gw_node_free_ref(gw_node);
+
 		curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 		if (gw_node == curr_gw)
 			batadv_gw_reselect(bat_priv);
+
+		if (curr_gw)
+			batadv_gw_node_free_ref(curr_gw);
 	}
 
 out:
-	if (curr_gw)
-		batadv_gw_node_free_ref(curr_gw);
 	if (gw_node)
 		batadv_gw_node_free_ref(gw_node);
 }
@@ -559,39 +556,18 @@
 	batadv_gw_node_update(bat_priv, orig_node, &gateway);
 }
 
-void batadv_gw_node_purge(struct batadv_priv *bat_priv)
+void batadv_gw_node_free(struct batadv_priv *bat_priv)
 {
-	struct batadv_gw_node *gw_node, *curr_gw;
+	struct batadv_gw_node *gw_node;
 	struct hlist_node *node_tmp;
-	unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
-	int do_reselect = 0;
-
-	curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
 	spin_lock_bh(&bat_priv->gw.list_lock);
-
 	hlist_for_each_entry_safe(gw_node, node_tmp,
 				  &bat_priv->gw.list, list) {
-		if (((!gw_node->deleted) ||
-		     (time_before(jiffies, gw_node->deleted + timeout))) &&
-		    atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
-			continue;
-
-		if (curr_gw == gw_node)
-			do_reselect = 1;
-
-		hlist_del_rcu(&gw_node->list);
+		hlist_del_init_rcu(&gw_node->list);
 		batadv_gw_node_free_ref(gw_node);
 	}
-
 	spin_unlock_bh(&bat_priv->gw.list_lock);
-
-	/* gw_reselect() needs to acquire the gw_list_lock */
-	if (do_reselect)
-		batadv_gw_reselect(bat_priv);
-
-	if (curr_gw)
-		batadv_gw_node_free_ref(curr_gw);
 }
 
 /* fails if orig_node has no router */
@@ -655,9 +631,6 @@
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
-		if (gw_node->deleted)
-			continue;
-
 		/* fails if orig_node has no router */
 		if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0)
 			continue;
@@ -692,7 +665,7 @@
  */
 enum batadv_dhcp_recipient
 batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
-			     uint8_t *chaddr)
+			     u8 *chaddr)
 {
 	enum batadv_dhcp_recipient ret = BATADV_DHCP_NO;
 	struct ethhdr *ethhdr;
@@ -702,7 +675,7 @@
 	struct vlan_ethhdr *vhdr;
 	int chaddr_offset;
 	__be16 proto;
-	uint8_t *p;
+	u8 *p;
 
 	/* check for ethernet header */
 	if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
@@ -812,13 +785,15 @@
 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
 			    struct sk_buff *skb)
 {
-	struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
+	struct batadv_neigh_node *neigh_curr = NULL;
+	struct batadv_neigh_node *neigh_old = NULL;
 	struct batadv_orig_node *orig_dst_node = NULL;
-	struct batadv_gw_node *gw_node = NULL, *curr_gw = NULL;
+	struct batadv_gw_node *gw_node = NULL;
+	struct batadv_gw_node *curr_gw = NULL;
 	struct batadv_neigh_ifinfo *curr_ifinfo, *old_ifinfo;
 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
 	bool out_of_range = false;
-	uint8_t curr_tq_avg;
+	u8 curr_tq_avg;
 	unsigned short vid;
 
 	vid = batadv_get_vid(skb, 0);
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 89565b4..fa95277 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -38,11 +38,11 @@
 			   struct batadv_tvlv_gateway_data *gateway);
 void batadv_gw_node_delete(struct batadv_priv *bat_priv,
 			   struct batadv_orig_node *orig_node);
-void batadv_gw_node_purge(struct batadv_priv *bat_priv);
+void batadv_gw_node_free(struct batadv_priv *bat_priv);
 int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
 enum batadv_dhcp_recipient
 batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
-			     uint8_t *chaddr);
+			     u8 *chaddr);
 
 #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 39cf44c..0cb5e6b 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -19,8 +19,10 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/errno.h>
 #include <linux/byteorder/generic.h>
 #include <linux/kernel.h>
+#include <linux/math64.h>
 #include <linux/netdevice.h>
 #include <linux/stddef.h>
 #include <linux/string.h>
@@ -39,11 +41,11 @@
  * Returns false on parse error and true otherwise.
  */
 static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
-				      uint32_t *down, uint32_t *up)
+				      u32 *down, u32 *up)
 {
 	enum batadv_bandwidth_units bw_unit_type = BATADV_BW_UNIT_KBIT;
 	char *slash_ptr, *tmp_ptr;
-	long ldown, lup;
+	u64 ldown, lup;
 	int ret;
 
 	slash_ptr = strchr(buff, '/');
@@ -61,7 +63,7 @@
 			*tmp_ptr = '\0';
 	}
 
-	ret = kstrtol(buff, 10, &ldown);
+	ret = kstrtou64(buff, 10, &ldown);
 	if (ret) {
 		batadv_err(net_dev,
 			   "Download speed of gateway mode invalid: %s\n",
@@ -71,14 +73,31 @@
 
 	switch (bw_unit_type) {
 	case BATADV_BW_UNIT_MBIT:
-		*down = ldown * 10;
+		/* prevent overflow */
+		if (U64_MAX / 10 < ldown) {
+			batadv_err(net_dev,
+				   "Download speed of gateway mode too large: %s\n",
+				   buff);
+			return false;
+		}
+
+		ldown *= 10;
 		break;
 	case BATADV_BW_UNIT_KBIT:
 	default:
-		*down = ldown / 100;
+		ldown = div_u64(ldown, 100);
 		break;
 	}
 
+	if (U32_MAX < ldown) {
+		batadv_err(net_dev,
+			   "Download speed of gateway mode too large: %s\n",
+			   buff);
+		return false;
+	}
+
+	*down = ldown;
+
 	/* we also got some upload info */
 	if (slash_ptr) {
 		bw_unit_type = BATADV_BW_UNIT_KBIT;
@@ -94,7 +113,7 @@
 				*tmp_ptr = '\0';
 		}
 
-		ret = kstrtol(slash_ptr + 1, 10, &lup);
+		ret = kstrtou64(slash_ptr + 1, 10, &lup);
 		if (ret) {
 			batadv_err(net_dev,
 				   "Upload speed of gateway mode invalid: %s\n",
@@ -104,13 +123,30 @@
 
 		switch (bw_unit_type) {
 		case BATADV_BW_UNIT_MBIT:
-			*up = lup * 10;
+			/* prevent overflow */
+			if (U64_MAX / 10 < lup) {
+				batadv_err(net_dev,
+					   "Upload speed of gateway mode too large: %s\n",
+					   slash_ptr + 1);
+				return false;
+			}
+
+			lup *= 10;
 			break;
 		case BATADV_BW_UNIT_KBIT:
 		default:
-			*up = lup / 100;
+			lup = div_u64(lup, 100);
 			break;
 		}
+
+		if (U32_MAX < lup) {
+			batadv_err(net_dev,
+				   "Upload speed of gateway mode too large: %s\n",
+				   slash_ptr + 1);
+			return false;
+		}
+
+		*up = lup;
 	}
 
 	return true;
@@ -124,7 +160,7 @@
 void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
 {
 	struct batadv_tvlv_gateway_data gw;
-	uint32_t down, up;
+	u32 down, up;
 	char gw_mode;
 
 	gw_mode = atomic_read(&bat_priv->gw_mode);
@@ -149,7 +185,10 @@
 				size_t count)
 {
 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
-	uint32_t down_curr, up_curr, down_new = 0, up_new = 0;
+	u32 down_curr;
+	u32 up_curr;
+	u32 down_new = 0;
+	u32 up_new = 0;
 	bool ret;
 
 	down_curr = (unsigned int)atomic_read(&bat_priv->gw.bandwidth_down);
@@ -157,7 +196,7 @@
 
 	ret = batadv_parse_gw_bandwidth(net_dev, buff, &down_new, &up_new);
 	if (!ret)
-		goto end;
+		return -EINVAL;
 
 	if (!down_new)
 		down_new = 1;
@@ -181,7 +220,6 @@
 	atomic_set(&bat_priv->gw.bandwidth_up, up_new);
 	batadv_gw_tvlv_container_update(bat_priv);
 
-end:
 	return count;
 }
 
@@ -195,9 +233,8 @@
  */
 static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig,
-					  uint8_t flags,
-					  void *tvlv_value,
-					  uint16_t tvlv_value_len)
+					  u8 flags,
+					  void *tvlv_value, u16 tvlv_value_len)
 {
 	struct batadv_tvlv_gateway_data gateway, *gateway_ptr;
 
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index bd5c812..ab893e3 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -22,7 +22,6 @@
 
 #include <linux/types.h>
 
-struct batadv_priv;
 struct net_device;
 
 enum batadv_gw_modes {
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index f4a15d2..f11345e 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -252,6 +252,44 @@
 	rcu_read_unlock();
 }
 
+/**
+ * batadv_hardif_recalc_extra_skbroom() - Recalculate skbuff extra head/tailroom
+ * @soft_iface: netdev struct of the mesh interface
+ */
+static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface)
+{
+	const struct batadv_hard_iface *hard_iface;
+	unsigned short lower_header_len = ETH_HLEN;
+	unsigned short lower_headroom = 0;
+	unsigned short lower_tailroom = 0;
+	unsigned short needed_headroom;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+		if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
+			continue;
+
+		if (hard_iface->soft_iface != soft_iface)
+			continue;
+
+		lower_header_len = max_t(unsigned short, lower_header_len,
+					 hard_iface->net_dev->hard_header_len);
+
+		lower_headroom = max_t(unsigned short, lower_headroom,
+				       hard_iface->net_dev->needed_headroom);
+
+		lower_tailroom = max_t(unsigned short, lower_tailroom,
+				       hard_iface->net_dev->needed_tailroom);
+	}
+	rcu_read_unlock();
+
+	needed_headroom = lower_headroom + (lower_header_len - ETH_HLEN);
+	needed_headroom += batadv_max_header_len();
+
+	soft_iface->needed_headroom = needed_headroom;
+	soft_iface->needed_tailroom = lower_tailroom;
+}
+
 int batadv_hardif_min_mtu(struct net_device *soft_iface)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -474,6 +512,8 @@
 			   "Not using interface %s (retrying later): interface not active\n",
 			   hard_iface->net_dev->name);
 
+	batadv_hardif_recalc_extra_skbroom(soft_iface);
+
 	/* begin scheduling originator messages on that interface */
 	batadv_schedule_bat_ogm(hard_iface);
 
@@ -528,6 +568,9 @@
 	batadv_purge_outstanding_packets(bat_priv, hard_iface);
 	dev_put(hard_iface->soft_iface);
 
+	netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface);
+	batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
+
 	/* nobody uses this interface anymore */
 	if (!bat_priv->num_ifaces) {
 		batadv_gw_check_client_stop(bat_priv);
@@ -536,7 +579,6 @@
 			batadv_softif_destroy_sysfs(hard_iface->soft_iface);
 	}
 
-	netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface);
 	hard_iface->soft_iface = NULL;
 	batadv_hardif_free_ref(hard_iface);
 
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index e89f314..2ea6a18 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -25,7 +25,7 @@
 /* clears the hash */
 static void batadv_hash_init(struct batadv_hashtable *hash)
 {
-	uint32_t i;
+	u32 i;
 
 	for (i = 0; i < hash->size; i++) {
 		INIT_HLIST_HEAD(&hash->table[i]);
@@ -42,7 +42,7 @@
 }
 
 /* allocates and clears the hash */
-struct batadv_hashtable *batadv_hash_new(uint32_t size)
+struct batadv_hashtable *batadv_hash_new(u32 size)
 {
 	struct batadv_hashtable *hash;
 
@@ -73,7 +73,7 @@
 void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
 				struct lock_class_key *key)
 {
-	uint32_t i;
+	u32 i;
 
 	for (i = 0; i < hash->size; i++)
 		lockdep_set_class(&hash->list_locks[i], key);
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 5065f50..3776262 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -39,17 +39,17 @@
  * based on the key in the data of the first
  * argument and the size the second
  */
-typedef uint32_t (*batadv_hashdata_choose_cb)(const void *, uint32_t);
+typedef u32 (*batadv_hashdata_choose_cb)(const void *, u32);
 typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
 
 struct batadv_hashtable {
 	struct hlist_head *table;   /* the hashtable itself with the buckets */
 	spinlock_t *list_locks;     /* spinlock for each hash list entry */
-	uint32_t size;		    /* size of hashtable */
+	u32 size;		    /* size of hashtable */
 };
 
 /* allocates and clears the hash */
-struct batadv_hashtable *batadv_hash_new(uint32_t size);
+struct batadv_hashtable *batadv_hash_new(u32 size);
 
 /* set class key for all locks */
 void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
@@ -69,7 +69,7 @@
 	struct hlist_head *head;
 	struct hlist_node *node, *node_tmp;
 	spinlock_t *list_lock; /* spinlock to protect write access */
-	uint32_t i;
+	u32 i;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -105,7 +105,7 @@
 				  const void *data,
 				  struct hlist_node *data_node)
 {
-	uint32_t index;
+	u32 index;
 	int ret = -1;
 	struct hlist_head *head;
 	struct hlist_node *node;
@@ -149,7 +149,7 @@
 				       batadv_hashdata_choose_cb choose,
 				       void *data)
 {
-	uint32_t index;
+	u32 index;
 	struct hlist_node *node;
 	struct hlist_head *head;
 	void *data_save = NULL;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 07061bc..bcabb5e 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -183,7 +183,7 @@
 	struct batadv_orig_node *orig_node = NULL;
 	struct batadv_neigh_node *neigh_node = NULL;
 	size_t packet_len = sizeof(struct batadv_icmp_packet);
-	uint8_t *addr;
+	u8 *addr;
 
 	if (len < sizeof(struct batadv_icmp_header)) {
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -337,8 +337,8 @@
 }
 
 /**
- * batadv_socket_receive_packet - schedule an icmp packet to be sent to userspace
- *  on an icmp socket.
+ * batadv_socket_receive_packet - schedule an icmp packet to be sent to
+ *  userspace on an icmp socket.
  * @socket_client: the socket this packet belongs to
  * @icmph: pointer to the header of the icmp packet
  * @icmp_len: total length of the icmp packet
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 7de7fce..e937143 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -23,7 +23,6 @@
 #include <linux/types.h>
 
 struct batadv_icmp_header;
-struct batadv_priv;
 
 #define BATADV_ICMP_SOCKET "socket"
 
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 8457097..d7f17c1 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -30,6 +30,7 @@
 #include <linux/ipv6.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/lockdep.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/netdevice.h>
@@ -148,7 +149,7 @@
 	INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
 #endif
 	INIT_LIST_HEAD(&bat_priv->tt.changes_list);
-	INIT_LIST_HEAD(&bat_priv->tt.req_list);
+	INIT_HLIST_HEAD(&bat_priv->tt.req_list);
 	INIT_LIST_HEAD(&bat_priv->tt.roam_list);
 #ifdef CONFIG_BATMAN_ADV_MCAST
 	INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
@@ -198,7 +199,7 @@
 
 	batadv_purge_outstanding_packets(bat_priv, NULL);
 
-	batadv_gw_node_purge(bat_priv);
+	batadv_gw_node_free(bat_priv);
 	batadv_nc_mesh_free(bat_priv);
 	batadv_dat_free(bat_priv);
 	batadv_bla_free(bat_priv);
@@ -234,7 +235,7 @@
  *
  * Returns 'true' if the mac address was found, false otherwise.
  */
-bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
 {
 	const struct batadv_hard_iface *hard_iface;
 	bool is_my_mac = false;
@@ -387,7 +388,7 @@
 	struct batadv_priv *bat_priv;
 	struct batadv_ogm_packet *batadv_ogm_packet;
 	struct batadv_hard_iface *hard_iface;
-	uint8_t idx;
+	u8 idx;
 	int ret;
 
 	hard_iface = container_of(ptype, struct batadv_hard_iface,
@@ -496,7 +497,7 @@
 }
 
 int
-batadv_recv_handler_register(uint8_t packet_type,
+batadv_recv_handler_register(u8 packet_type,
 			     int (*recv_handler)(struct sk_buff *,
 						 struct batadv_hard_iface *))
 {
@@ -512,7 +513,7 @@
 	return 0;
 }
 
-void batadv_recv_handler_unregister(uint8_t packet_type)
+void batadv_recv_handler_unregister(u8 packet_type)
 {
 	batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
 }
@@ -583,7 +584,7 @@
 	seq_puts(seq, "Available routing algorithms:\n");
 
 	hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
-		seq_printf(seq, "%s\n", bat_algo_ops->name);
+		seq_printf(seq, " * %s\n", bat_algo_ops->name);
 	}
 
 	return 0;
@@ -642,8 +643,7 @@
  * Returns tvlv handler if found or NULL otherwise.
  */
 static struct batadv_tvlv_handler
-*batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
-			 uint8_t type, uint8_t version)
+*batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 {
 	struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
 
@@ -691,8 +691,7 @@
  * Returns tvlv container if found or NULL otherwise.
  */
 static struct batadv_tvlv_container
-*batadv_tvlv_container_get(struct batadv_priv *bat_priv,
-			   uint8_t type, uint8_t version)
+*batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
 {
 	struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
 
@@ -723,10 +722,10 @@
  *
  * Returns size of all currently registered tvlv containers in bytes.
  */
-static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
+static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
 {
 	struct batadv_tvlv_container *tvlv;
-	uint16_t tvlv_len = 0;
+	u16 tvlv_len = 0;
 
 	hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
 		tvlv_len += sizeof(struct batadv_tvlv_hdr);
@@ -739,13 +738,17 @@
 /**
  * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
  *  list
+ * @bat_priv: the bat priv with all the soft interface information
  * @tvlv: the to be removed tvlv container
  *
  * Has to be called with the appropriate locks being acquired
  * (tvlv.container_list_lock).
  */
-static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
+static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
+					 struct batadv_tvlv_container *tvlv)
 {
+	lockdep_assert_held(&bat_priv->tvlv.handler_list_lock);
+
 	if (!tvlv)
 		return;
 
@@ -764,13 +767,13 @@
  * @version: tvlv container type to unregister
  */
 void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
-				      uint8_t type, uint8_t version)
+				      u8 type, u8 version)
 {
 	struct batadv_tvlv_container *tvlv;
 
 	spin_lock_bh(&bat_priv->tvlv.container_list_lock);
 	tvlv = batadv_tvlv_container_get(bat_priv, type, version);
-	batadv_tvlv_container_remove(tvlv);
+	batadv_tvlv_container_remove(bat_priv, tvlv);
 	spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
 }
 
@@ -787,8 +790,8 @@
  * content is going to replace the old one.
  */
 void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
-				    uint8_t type, uint8_t version,
-				    void *tvlv_value, uint16_t tvlv_value_len)
+				    u8 type, u8 version,
+				    void *tvlv_value, u16 tvlv_value_len)
 {
 	struct batadv_tvlv_container *tvlv_old, *tvlv_new;
 
@@ -809,7 +812,7 @@
 
 	spin_lock_bh(&bat_priv->tvlv.container_list_lock);
 	tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
-	batadv_tvlv_container_remove(tvlv_old);
+	batadv_tvlv_container_remove(bat_priv, tvlv_old);
 	hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
 	spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
 }
@@ -861,14 +864,13 @@
  *
  * Returns size of all appended tvlv containers in bytes.
  */
-uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
-					  unsigned char **packet_buff,
-					  int *packet_buff_len,
-					  int packet_min_len)
+u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+				     unsigned char **packet_buff,
+				     int *packet_buff_len, int packet_min_len)
 {
 	struct batadv_tvlv_container *tvlv;
 	struct batadv_tvlv_hdr *tvlv_hdr;
-	uint16_t tvlv_value_len;
+	u16 tvlv_value_len;
 	void *tvlv_value;
 	bool ret;
 
@@ -893,7 +895,7 @@
 		tvlv_hdr->len = tvlv->tvlv_hdr.len;
 		tvlv_value = tvlv_hdr + 1;
 		memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
-		tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
+		tvlv_value = (u8 *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
 	}
 
 end:
@@ -920,8 +922,8 @@
 				    struct batadv_tvlv_handler *tvlv_handler,
 				    bool ogm_source,
 				    struct batadv_orig_node *orig_node,
-				    uint8_t *src, uint8_t *dst,
-				    void *tvlv_value, uint16_t tvlv_value_len)
+				    u8 *src, u8 *dst,
+				    void *tvlv_value, u16 tvlv_value_len)
 {
 	if (!tvlv_handler)
 		return NET_RX_SUCCESS;
@@ -972,13 +974,13 @@
 int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
 				   bool ogm_source,
 				   struct batadv_orig_node *orig_node,
-				   uint8_t *src, uint8_t *dst,
-				   void *tvlv_value, uint16_t tvlv_value_len)
+				   u8 *src, u8 *dst,
+				   void *tvlv_value, u16 tvlv_value_len)
 {
 	struct batadv_tvlv_handler *tvlv_handler;
 	struct batadv_tvlv_hdr *tvlv_hdr;
-	uint16_t tvlv_value_cont_len;
-	uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
+	u16 tvlv_value_cont_len;
+	u8 cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
 	int ret = NET_RX_SUCCESS;
 
 	while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
@@ -1000,7 +1002,7 @@
 						tvlv_value_cont_len);
 		if (tvlv_handler)
 			batadv_tvlv_handler_free_ref(tvlv_handler);
-		tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
+		tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len;
 		tvlv_value_len -= tvlv_value_cont_len;
 	}
 
@@ -1034,7 +1036,7 @@
 			     struct batadv_orig_node *orig_node)
 {
 	void *tvlv_value;
-	uint16_t tvlv_value_len;
+	u16 tvlv_value_len;
 
 	if (!batadv_ogm_packet)
 		return;
@@ -1066,14 +1068,14 @@
 void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
 				  void (*optr)(struct batadv_priv *bat_priv,
 					       struct batadv_orig_node *orig,
-					       uint8_t flags,
+					       u8 flags,
 					       void *tvlv_value,
-					       uint16_t tvlv_value_len),
+					       u16 tvlv_value_len),
 				  int (*uptr)(struct batadv_priv *bat_priv,
-					      uint8_t *src, uint8_t *dst,
+					      u8 *src, u8 *dst,
 					      void *tvlv_value,
-					      uint16_t tvlv_value_len),
-				  uint8_t type, uint8_t version, uint8_t flags)
+					      u16 tvlv_value_len),
+				  u8 type, u8 version, u8 flags)
 {
 	struct batadv_tvlv_handler *tvlv_handler;
 
@@ -1108,7 +1110,7 @@
  * @version: tvlv handler version to be unregistered
  */
 void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
-				    uint8_t type, uint8_t version)
+				    u8 type, u8 version)
 {
 	struct batadv_tvlv_handler *tvlv_handler;
 
@@ -1134,9 +1136,9 @@
  * @tvlv_value: tvlv content
  * @tvlv_value_len: tvlv content length
  */
-void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
-			      uint8_t *dst, uint8_t type, uint8_t version,
-			      void *tvlv_value, uint16_t tvlv_value_len)
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
+			      u8 *dst, u8 type, u8 version,
+			      void *tvlv_value, u16 tvlv_value_len)
 {
 	struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
 	struct batadv_tvlv_hdr *tvlv_hdr;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 41d27c7..ebd8af0 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -24,7 +24,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2015.1"
+#define BATADV_SOURCE_VERSION "2015.2"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -193,7 +193,7 @@
 
 int batadv_mesh_init(struct net_device *soft_iface);
 void batadv_mesh_free(struct net_device *soft_iface);
-bool batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
+bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr);
 struct batadv_hard_iface *
 batadv_seq_print_text_primary_if_get(struct seq_file *seq);
 int batadv_max_header_len(void);
@@ -202,10 +202,10 @@
 			   struct packet_type *ptype,
 			   struct net_device *orig_dev);
 int
-batadv_recv_handler_register(uint8_t packet_type,
+batadv_recv_handler_register(u8 packet_type,
 			     int (*recv_handler)(struct sk_buff *,
 						 struct batadv_hard_iface *));
-void batadv_recv_handler_unregister(uint8_t packet_type);
+void batadv_recv_handler_unregister(u8 packet_type);
 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
 int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
@@ -304,7 +304,7 @@
  * they handle overflows/underflows and can correctly check for a
  * predecessor/successor unless the variable sequence number has grown by
  * more then 2**(bitwidth(x)-1)-1.
- * This means that for a uint8_t with the maximum value 255, it would think:
+ * This means that for a u8 with the maximum value 255, it would think:
  *  - when adding nothing - it is neither a predecessor nor a successor
  *  - before adding more than 127 to the starting value - it is a predecessor,
  *  - when adding 128 - it is neither a predecessor nor a successor,
@@ -327,10 +327,9 @@
 #define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
 
 /* Sum and return the cpu-local counters for index 'idx' */
-static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv,
-					  size_t idx)
+static inline u64 batadv_sum_counter(struct batadv_priv *bat_priv,  size_t idx)
 {
-	uint64_t *counters, sum = 0;
+	u64 *counters, sum = 0;
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
@@ -348,39 +347,38 @@
 #define BATADV_SKB_CB(__skb)       ((struct batadv_skb_cb *)&((__skb)->cb[0]))
 
 void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
-				    uint8_t type, uint8_t version,
-				    void *tvlv_value, uint16_t tvlv_value_len);
-uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
-					  unsigned char **packet_buff,
-					  int *packet_buff_len,
-					  int packet_min_len);
+				    u8 type, u8 version,
+				    void *tvlv_value, u16 tvlv_value_len);
+u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+				     unsigned char **packet_buff,
+				     int *packet_buff_len, int packet_min_len);
 void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
 			     struct batadv_ogm_packet *batadv_ogm_packet,
 			     struct batadv_orig_node *orig_node);
 void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
-				      uint8_t type, uint8_t version);
+				      u8 type, u8 version);
 
 void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
 				  void (*optr)(struct batadv_priv *bat_priv,
 					       struct batadv_orig_node *orig,
-					       uint8_t flags,
+					       u8 flags,
 					       void *tvlv_value,
-					       uint16_t tvlv_value_len),
+					       u16 tvlv_value_len),
 				  int (*uptr)(struct batadv_priv *bat_priv,
-					      uint8_t *src, uint8_t *dst,
+					      u8 *src, u8 *dst,
 					      void *tvlv_value,
-					      uint16_t tvlv_value_len),
-				  uint8_t type, uint8_t version, uint8_t flags);
+					      u16 tvlv_value_len),
+				  u8 type, u8 version, u8 flags);
 void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
-				    uint8_t type, uint8_t version);
+				    u8 type, u8 version);
 int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
 				   bool ogm_source,
 				   struct batadv_orig_node *orig_node,
-				   uint8_t *src, uint8_t *dst,
-				   void *tvlv_buff, uint16_t tvlv_buff_len);
-void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
-			      uint8_t *dst, uint8_t type, uint8_t version,
-			      void *tvlv_value, uint16_t tvlv_value_len);
+				   u8 *src, u8 *dst,
+				   void *tvlv_buff, u16 tvlv_buff_len);
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
+			      u8 *dst, u8 type, u8 version,
+			      void *tvlv_value, u16 tvlv_value_len);
 unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len);
 bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid);
 
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 7aa480b..eb76386 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -19,6 +19,8 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/errno.h>
 #include <linux/etherdevice.h>
@@ -29,6 +31,7 @@
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/list.h>
+#include <linux/lockdep.h>
 #include <linux/netdevice.h>
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
@@ -87,7 +90,7 @@
  * Returns true if the given address is already in the given list.
  * Otherwise returns false.
  */
-static bool batadv_mcast_mla_is_duplicate(uint8_t *mcast_addr,
+static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
 					  struct hlist_head *mcast_list)
 {
 	struct batadv_hw_addr *mcast_entry;
@@ -101,15 +104,19 @@
 
 /**
  * batadv_mcast_mla_list_free - free a list of multicast addresses
+ * @bat_priv: the bat priv with all the soft interface information
  * @mcast_list: the list to free
  *
  * Removes and frees all items in the given mcast_list.
  */
-static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
+static void batadv_mcast_mla_list_free(struct batadv_priv *bat_priv,
+				       struct hlist_head *mcast_list)
 {
 	struct batadv_hw_addr *mcast_entry;
 	struct hlist_node *tmp;
 
+	lockdep_assert_held(&bat_priv->tt.commit_lock);
+
 	hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
 		hlist_del(&mcast_entry->list);
 		kfree(mcast_entry);
@@ -132,6 +139,8 @@
 	struct batadv_hw_addr *mcast_entry;
 	struct hlist_node *tmp;
 
+	lockdep_assert_held(&bat_priv->tt.commit_lock);
+
 	hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
 				  list) {
 		if (mcast_list &&
@@ -162,6 +171,8 @@
 	struct batadv_hw_addr *mcast_entry;
 	struct hlist_node *tmp;
 
+	lockdep_assert_held(&bat_priv->tt.commit_lock);
+
 	if (!mcast_list)
 		return;
 
@@ -266,7 +277,7 @@
 	batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
 
 out:
-	batadv_mcast_mla_list_free(&mcast_list);
+	batadv_mcast_mla_list_free(bat_priv, &mcast_list);
 }
 
 /**
@@ -588,19 +599,28 @@
  *
  * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
  * orig, has toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
  */
 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
 					     struct batadv_orig_node *orig,
-					     uint8_t mcast_flags)
+					     u8 mcast_flags)
 {
+	struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
+	struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
+
+	lockdep_assert_held(&orig->mcast_handler_lock);
+
 	/* switched from flag unset to set */
 	if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
 		atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
 
 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-		hlist_add_head_rcu(&orig->mcast_want_all_unsnoopables_node,
-				   &bat_priv->mcast.want_all_unsnoopables_list);
+		/* flag checks above + mcast_handler_lock prevents this */
+		WARN_ON(!hlist_unhashed(node));
+
+		hlist_add_head_rcu(node, head);
 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
 	/* switched from flag set to unset */
 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
@@ -608,7 +628,10 @@
 		atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
 
 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-		hlist_del_rcu(&orig->mcast_want_all_unsnoopables_node);
+		/* flag checks above + mcast_handler_lock prevents this */
+		WARN_ON(hlist_unhashed(node));
+
+		hlist_del_init_rcu(node);
 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
 	}
 }
@@ -621,19 +644,28 @@
  *
  * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
  * toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
  */
 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig,
-					  uint8_t mcast_flags)
+					  u8 mcast_flags)
 {
+	struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
+	struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
+
+	lockdep_assert_held(&orig->mcast_handler_lock);
+
 	/* switched from flag unset to set */
 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
 		atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
 
 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-		hlist_add_head_rcu(&orig->mcast_want_all_ipv4_node,
-				   &bat_priv->mcast.want_all_ipv4_list);
+		/* flag checks above + mcast_handler_lock prevents this */
+		WARN_ON(!hlist_unhashed(node));
+
+		hlist_add_head_rcu(node, head);
 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
 	/* switched from flag set to unset */
 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
@@ -641,7 +673,10 @@
 		atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
 
 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-		hlist_del_rcu(&orig->mcast_want_all_ipv4_node);
+		/* flag checks above + mcast_handler_lock prevents this */
+		WARN_ON(hlist_unhashed(node));
+
+		hlist_del_init_rcu(node);
 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
 	}
 }
@@ -654,19 +689,28 @@
  *
  * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
  * toggled then this method updates counter and list accordingly.
+ *
+ * Caller needs to hold orig->mcast_handler_lock.
  */
 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig,
-					  uint8_t mcast_flags)
+					  u8 mcast_flags)
 {
+	struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
+	struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
+
+	lockdep_assert_held(&orig->mcast_handler_lock);
+
 	/* switched from flag unset to set */
 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
 		atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
 
 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-		hlist_add_head_rcu(&orig->mcast_want_all_ipv6_node,
-				   &bat_priv->mcast.want_all_ipv6_list);
+		/* flag checks above + mcast_handler_lock prevents this */
+		WARN_ON(!hlist_unhashed(node));
+
+		hlist_add_head_rcu(node, head);
 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
 	/* switched from flag set to unset */
 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
@@ -674,7 +718,10 @@
 		atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
 
 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
-		hlist_del_rcu(&orig->mcast_want_all_ipv6_node);
+		/* flag checks above + mcast_handler_lock prevents this */
+		WARN_ON(hlist_unhashed(node));
+
+		hlist_del_init_rcu(node);
 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
 	}
 }
@@ -689,47 +736,50 @@
  */
 static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 					     struct batadv_orig_node *orig,
-					     uint8_t flags,
+					     u8 flags,
 					     void *tvlv_value,
-					     uint16_t tvlv_value_len)
+					     u16 tvlv_value_len)
 {
 	bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
-	uint8_t mcast_flags = BATADV_NO_FLAGS;
+	u8 mcast_flags = BATADV_NO_FLAGS;
 	bool orig_initialized;
 
-	orig_initialized = orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST;
+	if (orig_mcast_enabled && tvlv_value &&
+	    (tvlv_value_len >= sizeof(mcast_flags)))
+		mcast_flags = *(u8 *)tvlv_value;
+
+	spin_lock_bh(&orig->mcast_handler_lock);
+	orig_initialized = test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
+				    &orig->capa_initialized);
 
 	/* If mcast support is turned on decrease the disabled mcast node
 	 * counter only if we had increased it for this node before. If this
 	 * is a completely new orig_node no need to decrease the counter.
 	 */
 	if (orig_mcast_enabled &&
-	    !(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) {
+	    !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
 		if (orig_initialized)
 			atomic_dec(&bat_priv->mcast.num_disabled);
-		orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
+		set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
 	/* If mcast support is being switched off or if this is an initial
 	 * OGM without mcast support then increase the disabled mcast
 	 * node counter.
 	 */
 	} else if (!orig_mcast_enabled &&
-		   (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
+		   (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) ||
 		    !orig_initialized)) {
 		atomic_inc(&bat_priv->mcast.num_disabled);
-		orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
+		clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
 	}
 
-	orig->capa_initialized |= BATADV_ORIG_CAPA_HAS_MCAST;
-
-	if (orig_mcast_enabled && tvlv_value &&
-	    (tvlv_value_len >= sizeof(mcast_flags)))
-		mcast_flags = *(uint8_t *)tvlv_value;
+	set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
 
 	batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
 	batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
 	batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
 
 	orig->mcast_flags = mcast_flags;
+	spin_unlock_bh(&orig->mcast_handler_lock);
 }
 
 /**
@@ -763,11 +813,15 @@
 {
 	struct batadv_priv *bat_priv = orig->bat_priv;
 
-	if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
-	    orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
+	spin_lock_bh(&orig->mcast_handler_lock);
+
+	if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) &&
+	    test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized))
 		atomic_dec(&bat_priv->mcast.num_disabled);
 
 	batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
 	batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
 	batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
+
+	spin_unlock_bh(&orig->mcast_handler_lock);
 }
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index beb6e56..8f3cb04 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -20,8 +20,6 @@
 
 #include "main.h"
 
-struct batadv_orig_node;
-struct batadv_priv;
 struct sk_buff;
 
 /**
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index f0a50f3..f5276be 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -19,6 +19,7 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
 #include <linux/byteorder/generic.h>
 #include <linux/compiler.h>
 #include <linux/debugfs.h>
@@ -129,14 +130,13 @@
  */
 static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig,
-					  uint8_t flags,
-					  void *tvlv_value,
-					  uint16_t tvlv_value_len)
+					  u8 flags,
+					  void *tvlv_value, u16 tvlv_value_len)
 {
 	if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
-		orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_NC;
+		clear_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
 	else
-		orig->capabilities |= BATADV_ORIG_CAPA_HAS_NC;
+		set_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
 }
 
 /**
@@ -381,7 +381,7 @@
 	struct batadv_hashtable *hash = bat_priv->orig_hash;
 	struct hlist_head *head;
 	struct batadv_orig_node *orig_node;
-	uint32_t i;
+	u32 i;
 
 	if (!hash)
 		return;
@@ -417,7 +417,7 @@
 	struct hlist_node *node_tmp;
 	struct batadv_nc_path *nc_path;
 	spinlock_t *lock; /* Protects lists in hash */
-	uint32_t i;
+	u32 i;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -477,10 +477,10 @@
  *
  * Returns the selected index in the hash table for the given data.
  */
-static uint32_t batadv_nc_hash_choose(const void *data, uint32_t size)
+static u32 batadv_nc_hash_choose(const void *data, u32 size)
 {
 	const struct batadv_nc_path *nc_path = data;
-	uint32_t hash = 0;
+	u32 hash = 0;
 
 	hash = jhash(&nc_path->prev_hop, sizeof(nc_path->prev_hop), hash);
 	hash = jhash(&nc_path->next_hop, sizeof(nc_path->next_hop), hash);
@@ -586,6 +586,8 @@
 	unsigned long timeout = bat_priv->nc.max_buffer_time;
 	bool res = false;
 
+	lockdep_assert_held(&nc_path->packet_list_lock);
+
 	/* Packets are added to tail, so the remaining packets did not time
 	 * out and we can stop processing the current queue
 	 */
@@ -622,6 +624,8 @@
 {
 	unsigned long timeout = bat_priv->nc.max_fwd_delay;
 
+	lockdep_assert_held(&nc_path->packet_list_lock);
+
 	/* Packets are added to tail, so the remaining packets did not time
 	 * out and we can stop processing the current queue
 	 */
@@ -743,8 +747,8 @@
 				    struct batadv_ogm_packet *ogm_packet)
 {
 	struct batadv_orig_ifinfo *orig_ifinfo;
-	uint32_t last_real_seqno;
-	uint8_t last_ttl;
+	u32 last_real_seqno;
+	u8 last_ttl;
 
 	orig_ifinfo = batadv_orig_ifinfo_get(orig_node, BATADV_IF_DEFAULT);
 	if (!orig_ifinfo)
@@ -872,8 +876,8 @@
 }
 
 /**
- * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node structs
- *  (best called on incoming OGMs)
+ * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node
+ *  structs (best called on incoming OGMs)
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node originating the ogm packet
  * @orig_neigh_node: neighboring orig node from which we received the ogm packet
@@ -887,14 +891,15 @@
 			      struct batadv_ogm_packet *ogm_packet,
 			      int is_single_hop_neigh)
 {
-	struct batadv_nc_node *in_nc_node = NULL, *out_nc_node = NULL;
+	struct batadv_nc_node *in_nc_node = NULL;
+	struct batadv_nc_node *out_nc_node = NULL;
 
 	/* Check if network coding is enabled */
 	if (!atomic_read(&bat_priv->network_coding))
 		goto out;
 
 	/* check if orig node is network coding enabled */
-	if (!(orig_node->capabilities & BATADV_ORIG_CAPA_HAS_NC))
+	if (!test_bit(BATADV_ORIG_CAPA_HAS_NC, &orig_node->capabilities))
 		goto out;
 
 	/* accept ogms from 'good' neighbors and single hop neighbors */
@@ -937,8 +942,8 @@
  */
 static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
 						 struct batadv_hashtable *hash,
-						 uint8_t *src,
-						 uint8_t *dst)
+						 u8 *src,
+						 u8 *dst)
 {
 	int hash_added;
 	struct batadv_nc_path *nc_path, nc_path_key;
@@ -990,9 +995,9 @@
  *  selection of a receiver with slightly lower TQ than the other
  * @tq: to be weighted tq value
  */
-static uint8_t batadv_nc_random_weight_tq(uint8_t tq)
+static u8 batadv_nc_random_weight_tq(u8 tq)
 {
-	uint8_t rand_val, rand_tq;
+	u8 rand_val, rand_tq;
 
 	get_random_bytes(&rand_val, sizeof(rand_val));
 
@@ -1037,7 +1042,7 @@
 				   struct batadv_nc_packet *nc_packet,
 				   struct batadv_neigh_node *neigh_node)
 {
-	uint8_t tq_weighted_neigh, tq_weighted_coding, tq_tmp;
+	u8 tq_weighted_neigh, tq_weighted_coding, tq_tmp;
 	struct sk_buff *skb_dest, *skb_src;
 	struct batadv_unicast_packet *packet1;
 	struct batadv_unicast_packet *packet2;
@@ -1046,7 +1051,7 @@
 	struct batadv_neigh_node *router_coding = NULL;
 	struct batadv_neigh_ifinfo *router_neigh_ifinfo = NULL;
 	struct batadv_neigh_ifinfo *router_coding_ifinfo = NULL;
-	uint8_t *first_source, *first_dest, *second_source, *second_dest;
+	u8 *first_source, *first_dest, *second_source, *second_dest;
 	__be32 packet_id1, packet_id2;
 	size_t count;
 	bool res = false;
@@ -1230,8 +1235,7 @@
  *
  * Returns true if coding of a decoded packet is allowed.
  */
-static bool batadv_nc_skb_coding_possible(struct sk_buff *skb,
-					  uint8_t *dst, uint8_t *src)
+static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
 {
 	if (BATADV_SKB_CB(skb)->decoded && !batadv_compare_eth(dst, src))
 		return false;
@@ -1254,7 +1258,7 @@
 		      struct batadv_nc_node *in_nc_node,
 		      struct batadv_nc_node *out_nc_node,
 		      struct sk_buff *skb,
-		      uint8_t *eth_dst)
+		      u8 *eth_dst)
 {
 	struct batadv_nc_path *nc_path, nc_path_key;
 	struct batadv_nc_packet *nc_packet_out = NULL;
@@ -1320,8 +1324,8 @@
 static struct batadv_nc_packet *
 batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
 			 struct sk_buff *skb,
-			 uint8_t *eth_dst,
-			 uint8_t *eth_src,
+			 u8 *eth_dst,
+			 u8 *eth_src,
 			 struct batadv_nc_node *in_nc_node)
 {
 	struct batadv_orig_node *orig_node;
@@ -1361,7 +1365,7 @@
  */
 static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
 					      struct sk_buff *skb,
-					      uint8_t *eth_dst_new)
+					      u8 *eth_dst_new)
 {
 	struct ethhdr *ethhdr;
 
@@ -1637,7 +1641,7 @@
 	struct batadv_unicast_packet *unicast_packet;
 	struct batadv_coded_packet coded_packet_tmp;
 	struct ethhdr *ethhdr, ethhdr_tmp;
-	uint8_t *orig_dest, ttl, ttvn;
+	u8 *orig_dest, ttl, ttvn;
 	unsigned int coding_len;
 	int err;
 
@@ -1729,7 +1733,7 @@
 	struct batadv_hashtable *hash = bat_priv->nc.decoding_hash;
 	struct batadv_nc_packet *tmp_nc_packet, *nc_packet = NULL;
 	struct batadv_nc_path *nc_path, nc_path_key;
-	uint8_t *dest, *source;
+	u8 *dest, *source;
 	__be32 packet_id;
 	int index;
 
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index 5b79aa8..8f6d4ad 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -22,11 +22,7 @@
 
 #include <linux/types.h>
 
-struct batadv_nc_node;
-struct batadv_neigh_node;
 struct batadv_ogm_packet;
-struct batadv_orig_node;
-struct batadv_priv;
 struct net_device;
 struct seq_file;
 struct sk_buff;
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 018b749..7486df9 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -26,6 +26,7 @@
 #include <linux/list.h>
 #include <linux/lockdep.h>
 #include <linux/netdevice.h>
+#include <linux/rculist.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
@@ -70,7 +71,7 @@
 	struct batadv_orig_node_vlan *vlan = NULL, *tmp;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
+	hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
 		if (tmp->vid != vid)
 			continue;
 
@@ -118,7 +119,7 @@
 	atomic_set(&vlan->refcount, 2);
 	vlan->vid = vid;
 
-	list_add_rcu(&vlan->list, &orig_node->vlan_list);
+	hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
 
 out:
 	spin_unlock_bh(&orig_node->vlan_list_lock);
@@ -442,41 +443,6 @@
 }
 
 /**
- * batadv_neigh_node_new - create and init a new neigh_node object
- * @hard_iface: the interface where the neighbour is connected to
- * @neigh_addr: the mac address of the neighbour interface
- * @orig_node: originator object representing the neighbour
- *
- * Allocates a new neigh_node object and initialises all the generic fields.
- * Returns the new object or NULL on failure.
- */
-struct batadv_neigh_node *
-batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
-		      const uint8_t *neigh_addr,
-		      struct batadv_orig_node *orig_node)
-{
-	struct batadv_neigh_node *neigh_node;
-
-	neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
-	if (!neigh_node)
-		goto out;
-
-	INIT_HLIST_NODE(&neigh_node->list);
-	INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
-	spin_lock_init(&neigh_node->ifinfo_lock);
-
-	ether_addr_copy(neigh_node->addr, neigh_addr);
-	neigh_node->if_incoming = hard_iface;
-	neigh_node->orig_node = orig_node;
-
-	/* extra reference for return */
-	atomic_set(&neigh_node->refcount, 2);
-
-out:
-	return neigh_node;
-}
-
-/**
  * batadv_neigh_node_get - retrieve a neighbour from the list
  * @orig_node: originator which the neighbour belongs to
  * @hard_iface: the interface where this neighbour is connected to
@@ -486,10 +452,10 @@
  * which is connected through the provided hard interface.
  * Returns NULL if the neighbour is not found.
  */
-struct batadv_neigh_node *
+static struct batadv_neigh_node *
 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
 		      const struct batadv_hard_iface *hard_iface,
-		      const uint8_t *addr)
+		      const u8 *addr)
 {
 	struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
 
@@ -513,6 +479,59 @@
 }
 
 /**
+ * batadv_neigh_node_new - create and init a new neigh_node object
+ * @orig_node: originator object representing the neighbour
+ * @hard_iface: the interface where the neighbour is connected to
+ * @neigh_addr: the mac address of the neighbour interface
+ *
+ * Allocates a new neigh_node object and initialises all the generic fields.
+ * Returns the new object or NULL on failure.
+ */
+struct batadv_neigh_node *
+batadv_neigh_node_new(struct batadv_orig_node *orig_node,
+		      struct batadv_hard_iface *hard_iface,
+		      const u8 *neigh_addr)
+{
+	struct batadv_neigh_node *neigh_node;
+
+	neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
+	if (neigh_node)
+		goto out;
+
+	neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
+	if (!neigh_node)
+		goto out;
+
+	if (!atomic_inc_not_zero(&hard_iface->refcount)) {
+		kfree(neigh_node);
+		neigh_node = NULL;
+		goto out;
+	}
+
+	INIT_HLIST_NODE(&neigh_node->list);
+	INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
+	spin_lock_init(&neigh_node->ifinfo_lock);
+
+	ether_addr_copy(neigh_node->addr, neigh_addr);
+	neigh_node->if_incoming = hard_iface;
+	neigh_node->orig_node = orig_node;
+
+	/* extra reference for return */
+	atomic_set(&neigh_node->refcount, 2);
+
+	spin_lock_bh(&orig_node->neigh_list_lock);
+	hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+	spin_unlock_bh(&orig_node->neigh_list_lock);
+
+	batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
+		   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
+		   neigh_addr, orig_node->orig, hard_iface->net_dev->name);
+
+out:
+	return neigh_node;
+}
+
+/**
  * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
  * @rcu: rcu pointer of the orig_ifinfo object
  */
@@ -624,7 +643,7 @@
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* spinlock to protect write access */
 	struct batadv_orig_node *orig_node;
-	uint32_t i;
+	u32 i;
 
 	if (!hash)
 		return;
@@ -659,7 +678,7 @@
  * Returns the newly created object or NULL on failure.
  */
 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
-					      const uint8_t *addr)
+					      const u8 *addr)
 {
 	struct batadv_orig_node *orig_node;
 	struct batadv_orig_node_vlan *vlan;
@@ -674,7 +693,7 @@
 		return NULL;
 
 	INIT_HLIST_HEAD(&orig_node->neigh_list);
-	INIT_LIST_HEAD(&orig_node->vlan_list);
+	INIT_HLIST_HEAD(&orig_node->vlan_list);
 	INIT_HLIST_HEAD(&orig_node->ifinfo_list);
 	spin_lock_init(&orig_node->bcast_seqno_lock);
 	spin_lock_init(&orig_node->neigh_list_lock);
@@ -696,8 +715,13 @@
 	orig_node->last_seen = jiffies;
 	reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
 	orig_node->bcast_seqno_reset = reset_time;
+
 #ifdef CONFIG_BATMAN_ADV_MCAST
 	orig_node->mcast_flags = BATADV_NO_FLAGS;
+	INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
+	INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
+	INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
+	spin_lock_init(&orig_node->mcast_handler_lock);
 #endif
 
 	/* create a vlan object for the "untagged" LAN */
@@ -976,7 +1000,7 @@
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* spinlock to protect write access */
 	struct batadv_orig_node *orig_node;
-	uint32_t i;
+	u32 i;
 
 	if (!hash)
 		return;
@@ -1005,7 +1029,6 @@
 		spin_unlock_bh(list_lock);
 	}
 
-	batadv_gw_node_purge(bat_priv);
 	batadv_gw_election(bat_priv);
 }
 
@@ -1110,7 +1133,7 @@
 	struct batadv_hashtable *hash = bat_priv->orig_hash;
 	struct hlist_head *head;
 	struct batadv_orig_node *orig_node;
-	uint32_t i;
+	u32 i;
 	int ret;
 
 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
@@ -1147,7 +1170,7 @@
 	struct batadv_hard_iface *hard_iface_tmp;
 	struct batadv_orig_node *orig_node;
 	struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
-	uint32_t i;
+	u32 i;
 	int ret;
 
 	/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 79734d3..fa18f9b 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -40,15 +40,11 @@
 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
-					      const uint8_t *addr);
+					      const u8 *addr);
 struct batadv_neigh_node *
-batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
-		      const struct batadv_hard_iface *hard_iface,
-		      const uint8_t *addr);
-struct batadv_neigh_node *
-batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
-		      const uint8_t *neigh_addr,
-		      struct batadv_orig_node *orig_node);
+batadv_neigh_node_new(struct batadv_orig_node *orig_node,
+		      struct batadv_hard_iface *hard_iface,
+		      const u8 *neigh_addr);
 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node);
 struct batadv_neigh_node *
 batadv_orig_router_get(struct batadv_orig_node *orig_node,
@@ -86,9 +82,9 @@
 /* hashfunction to choose an entry in a hash table of given size
  * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
  */
-static inline uint32_t batadv_choose_orig(const void *data, uint32_t size)
+static inline u32 batadv_choose_orig(const void *data, u32 size)
 {
-	uint32_t hash = 0;
+	u32 hash = 0;
 
 	hash = jhash(data, ETH_ALEN, hash);
 	return hash % size;
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 9e747c0..11f996b 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -197,8 +197,8 @@
  * transport the claim type and the group id
  */
 struct batadv_bla_claim_dst {
-	uint8_t magic[3];	/* FF:43:05 */
-	uint8_t type;		/* bla_claimframe */
+	u8     magic[3];	/* FF:43:05 */
+	u8     type;		/* bla_claimframe */
 	__be16 group;		/* group id */
 };
 
@@ -213,16 +213,16 @@
  * @tvlv_len: length of tvlv data following the ogm header
  */
 struct batadv_ogm_packet {
-	uint8_t  packet_type;
-	uint8_t  version;
-	uint8_t  ttl;
-	uint8_t  flags;
-	__be32   seqno;
-	uint8_t  orig[ETH_ALEN];
-	uint8_t  prev_sender[ETH_ALEN];
-	uint8_t  reserved;
-	uint8_t  tq;
-	__be16   tvlv_len;
+	u8     packet_type;
+	u8     version;
+	u8     ttl;
+	u8     flags;
+	__be32 seqno;
+	u8     orig[ETH_ALEN];
+	u8     prev_sender[ETH_ALEN];
+	u8     reserved;
+	u8     tq;
+	__be16 tvlv_len;
 	/* __packed is not needed as the struct size is divisible by 4,
 	 * and the largest data type in this struct has a size of 4.
 	 */
@@ -246,14 +246,14 @@
  * members are padded the same way as they are in real packets.
  */
 struct batadv_icmp_header {
-	uint8_t  packet_type;
-	uint8_t  version;
-	uint8_t  ttl;
-	uint8_t  msg_type; /* see ICMP message types above */
-	uint8_t  dst[ETH_ALEN];
-	uint8_t  orig[ETH_ALEN];
-	uint8_t  uid;
-	uint8_t  align[3];
+	u8 packet_type;
+	u8 version;
+	u8 ttl;
+	u8 msg_type; /* see ICMP message types above */
+	u8 dst[ETH_ALEN];
+	u8 orig[ETH_ALEN];
+	u8 uid;
+	u8 align[3];
 };
 
 /**
@@ -269,15 +269,15 @@
  * @seqno: ICMP sequence number
  */
 struct batadv_icmp_packet {
-	uint8_t  packet_type;
-	uint8_t  version;
-	uint8_t  ttl;
-	uint8_t  msg_type; /* see ICMP message types above */
-	uint8_t  dst[ETH_ALEN];
-	uint8_t  orig[ETH_ALEN];
-	uint8_t  uid;
-	uint8_t  reserved;
-	__be16   seqno;
+	u8     packet_type;
+	u8     version;
+	u8     ttl;
+	u8     msg_type; /* see ICMP message types above */
+	u8     dst[ETH_ALEN];
+	u8     orig[ETH_ALEN];
+	u8     uid;
+	u8     reserved;
+	__be16 seqno;
 };
 
 #define BATADV_RR_LEN 16
@@ -296,16 +296,16 @@
  * @rr: route record array
  */
 struct batadv_icmp_packet_rr {
-	uint8_t  packet_type;
-	uint8_t  version;
-	uint8_t  ttl;
-	uint8_t  msg_type; /* see ICMP message types above */
-	uint8_t  dst[ETH_ALEN];
-	uint8_t  orig[ETH_ALEN];
-	uint8_t  uid;
-	uint8_t  rr_cur;
-	__be16   seqno;
-	uint8_t  rr[BATADV_RR_LEN][ETH_ALEN];
+	u8     packet_type;
+	u8     version;
+	u8     ttl;
+	u8     msg_type; /* see ICMP message types above */
+	u8     dst[ETH_ALEN];
+	u8     orig[ETH_ALEN];
+	u8     uid;
+	u8     rr_cur;
+	__be16 seqno;
+	u8     rr[BATADV_RR_LEN][ETH_ALEN];
 };
 
 #define BATADV_ICMP_MAX_PACKET_SIZE	sizeof(struct batadv_icmp_packet_rr)
@@ -331,11 +331,11 @@
  * @dest: originator destination of the unicast packet
  */
 struct batadv_unicast_packet {
-	uint8_t  packet_type;
-	uint8_t  version;
-	uint8_t  ttl;
-	uint8_t  ttvn; /* destination translation table version number */
-	uint8_t  dest[ETH_ALEN];
+	u8 packet_type;
+	u8 version;
+	u8 ttl;
+	u8 ttvn; /* destination translation table version number */
+	u8 dest[ETH_ALEN];
 	/* "4 bytes boundary + 2 bytes" long to make the payload after the
 	 * following ethernet header again 4 bytes boundary aligned
 	 */
@@ -349,9 +349,9 @@
  */
 struct batadv_unicast_4addr_packet {
 	struct batadv_unicast_packet u;
-	uint8_t src[ETH_ALEN];
-	uint8_t subtype;
-	uint8_t reserved;
+	u8 src[ETH_ALEN];
+	u8 subtype;
+	u8 reserved;
 	/* "4 bytes boundary + 2 bytes" long to make the payload after the
 	 * following ethernet header again 4 bytes boundary aligned
 	 */
@@ -370,22 +370,22 @@
  * @total_size: size of the merged packet
  */
 struct batadv_frag_packet {
-	uint8_t packet_type;
-	uint8_t version;  /* batman version field */
-	uint8_t ttl;
+	u8     packet_type;
+	u8     version;  /* batman version field */
+	u8     ttl;
 #if defined(__BIG_ENDIAN_BITFIELD)
-	uint8_t no:4;
-	uint8_t reserved:4;
+	u8     no:4;
+	u8     reserved:4;
 #elif defined(__LITTLE_ENDIAN_BITFIELD)
-	uint8_t reserved:4;
-	uint8_t no:4;
+	u8     reserved:4;
+	u8     no:4;
 #else
 #error "unknown bitfield endianness"
 #endif
-	uint8_t dest[ETH_ALEN];
-	uint8_t orig[ETH_ALEN];
-	__be16  seqno;
-	__be16  total_size;
+	u8     dest[ETH_ALEN];
+	u8     orig[ETH_ALEN];
+	__be16 seqno;
+	__be16 total_size;
 };
 
 /**
@@ -398,12 +398,12 @@
  * @orig: originator of the broadcast packet
  */
 struct batadv_bcast_packet {
-	uint8_t  packet_type;
-	uint8_t  version;  /* batman version field */
-	uint8_t  ttl;
-	uint8_t  reserved;
-	__be32   seqno;
-	uint8_t  orig[ETH_ALEN];
+	u8     packet_type;
+	u8     version;  /* batman version field */
+	u8     ttl;
+	u8     reserved;
+	__be32 seqno;
+	u8     orig[ETH_ALEN];
 	/* "4 bytes boundary + 2 bytes" long to make the payload after the
 	 * following ethernet header again 4 bytes boundary aligned
 	 */
@@ -428,21 +428,21 @@
  * @coded_len: length of network coded part of the payload
  */
 struct batadv_coded_packet {
-	uint8_t  packet_type;
-	uint8_t  version;  /* batman version field */
-	uint8_t  ttl;
-	uint8_t  first_ttvn;
-	/* uint8_t  first_dest[ETH_ALEN]; - saved in mac header destination */
-	uint8_t  first_source[ETH_ALEN];
-	uint8_t  first_orig_dest[ETH_ALEN];
-	__be32   first_crc;
-	uint8_t  second_ttl;
-	uint8_t  second_ttvn;
-	uint8_t  second_dest[ETH_ALEN];
-	uint8_t  second_source[ETH_ALEN];
-	uint8_t  second_orig_dest[ETH_ALEN];
-	__be32   second_crc;
-	__be16   coded_len;
+	u8     packet_type;
+	u8     version;  /* batman version field */
+	u8     ttl;
+	u8     first_ttvn;
+	/* u8  first_dest[ETH_ALEN]; - saved in mac header destination */
+	u8     first_source[ETH_ALEN];
+	u8     first_orig_dest[ETH_ALEN];
+	__be32 first_crc;
+	u8     second_ttl;
+	u8     second_ttvn;
+	u8     second_dest[ETH_ALEN];
+	u8     second_source[ETH_ALEN];
+	u8     second_orig_dest[ETH_ALEN];
+	__be32 second_crc;
+	__be16 coded_len;
 };
 
 #pragma pack()
@@ -459,14 +459,14 @@
  * @align: 2 bytes to align the header to a 4 byte boundary
  */
 struct batadv_unicast_tvlv_packet {
-	uint8_t  packet_type;
-	uint8_t  version;  /* batman version field */
-	uint8_t  ttl;
-	uint8_t  reserved;
-	uint8_t  dst[ETH_ALEN];
-	uint8_t  src[ETH_ALEN];
-	__be16   tvlv_len;
-	uint16_t align;
+	u8     packet_type;
+	u8     version;  /* batman version field */
+	u8     ttl;
+	u8     reserved;
+	u8     dst[ETH_ALEN];
+	u8     src[ETH_ALEN];
+	__be16 tvlv_len;
+	u16    align;
 };
 
 /**
@@ -476,9 +476,9 @@
  * @len: tvlv container length
  */
 struct batadv_tvlv_hdr {
-	uint8_t type;
-	uint8_t version;
-	__be16  len;
+	u8     type;
+	u8     version;
+	__be16 len;
 };
 
 /**
@@ -500,9 +500,9 @@
  *  one batadv_tvlv_tt_vlan_data object per announced vlan
  */
 struct batadv_tvlv_tt_data {
-	uint8_t flags;
-	uint8_t ttvn;
-	__be16  num_vlan;
+	u8     flags;
+	u8     ttvn;
+	__be16 num_vlan;
 };
 
 /**
@@ -513,9 +513,9 @@
  * @reserved: unused, useful for alignment purposes
  */
 struct batadv_tvlv_tt_vlan_data {
-	__be32	crc;
-	__be16	vid;
-	uint16_t reserved;
+	__be32 crc;
+	__be16 vid;
+	u16    reserved;
 };
 
 /**
@@ -527,9 +527,9 @@
  * @vid: VLAN identifier
  */
 struct batadv_tvlv_tt_change {
-	uint8_t flags;
-	uint8_t reserved[3];
-	uint8_t addr[ETH_ALEN];
+	u8     flags;
+	u8     reserved[3];
+	u8     addr[ETH_ALEN];
 	__be16 vid;
 };
 
@@ -539,7 +539,7 @@
  * @vid: VLAN identifier
  */
 struct batadv_tvlv_roam_adv {
-	uint8_t  client[ETH_ALEN];
+	u8     client[ETH_ALEN];
 	__be16 vid;
 };
 
@@ -549,8 +549,8 @@
  * @reserved: reserved field
  */
 struct batadv_tvlv_mcast_data {
-	uint8_t	flags;
-	uint8_t reserved[3];
+	u8 flags;
+	u8 reserved[3];
 };
 
 #endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index c360c0c..8d990b0 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -145,7 +145,7 @@
  *  0 if the packet is to be accepted
  *  1 if the packet is to be ignored.
  */
-int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
+int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
 			    unsigned long *last_reset)
 {
 	if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE ||
@@ -653,19 +653,19 @@
 static bool
 batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
 			      struct batadv_unicast_packet *unicast_packet,
-			      uint8_t *dst_addr, unsigned short vid)
+			      u8 *dst_addr, unsigned short vid)
 {
 	struct batadv_orig_node *orig_node = NULL;
 	struct batadv_hard_iface *primary_if = NULL;
 	bool ret = false;
-	uint8_t *orig_addr, orig_ttvn;
+	u8 *orig_addr, orig_ttvn;
 
 	if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
 		primary_if = batadv_primary_if_get_selected(bat_priv);
 		if (!primary_if)
 			goto out;
 		orig_addr = primary_if->net_dev->dev_addr;
-		orig_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+		orig_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
 	} else {
 		orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
 						     vid);
@@ -676,7 +676,7 @@
 			goto out;
 
 		orig_addr = orig_node->orig;
-		orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+		orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
 	}
 
 	/* update the packet header */
@@ -698,7 +698,7 @@
 	struct batadv_unicast_packet *unicast_packet;
 	struct batadv_hard_iface *primary_if;
 	struct batadv_orig_node *orig_node;
-	uint8_t curr_ttvn, old_ttvn;
+	u8 curr_ttvn, old_ttvn;
 	struct ethhdr *ethhdr;
 	unsigned short vid;
 	int is_old_ttvn;
@@ -740,7 +740,7 @@
 	 * value is used later to check if the node which sent (or re-routed
 	 * last time) the packet had an updated information or not
 	 */
-	curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+	curr_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
 	if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
 		orig_node = batadv_orig_hash_find(bat_priv,
 						  unicast_packet->dest);
@@ -751,7 +751,7 @@
 		if (!orig_node)
 			return 0;
 
-		curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+		curr_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
 		batadv_orig_node_free_ref(orig_node);
 	}
 
@@ -833,7 +833,7 @@
 	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 	struct batadv_unicast_packet *unicast_packet;
 	struct batadv_unicast_4addr_packet *unicast_4addr_packet;
-	uint8_t *orig_addr;
+	u8 *orig_addr;
 	struct batadv_orig_node *orig_node = NULL;
 	int check, hdr_size = sizeof(*unicast_packet);
 	bool is4addr;
@@ -904,7 +904,7 @@
 	struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
 	struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
 	unsigned char *tvlv_buff;
-	uint16_t tvlv_buff_len;
+	u16 tvlv_buff_len;
 	int hdr_size = sizeof(*unicast_tvlv_packet);
 	int ret = NET_RX_DROP;
 
@@ -1007,8 +1007,8 @@
 	struct ethhdr *ethhdr;
 	int hdr_size = sizeof(*bcast_packet);
 	int ret = NET_RX_DROP;
-	int32_t seq_diff;
-	uint32_t seqno;
+	s32 seq_diff;
+	u32 seqno;
 
 	/* drop packet if it has not necessary minimum size */
 	if (unlikely(!pskb_may_pull(skb, hdr_size)))
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 6bc29d3..204bbe4 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -22,10 +22,6 @@
 
 #include <linux/types.h>
 
-struct batadv_hard_iface;
-struct batadv_neigh_node;
-struct batadv_orig_node;
-struct batadv_priv;
 struct sk_buff;
 
 bool batadv_check_management_packet(struct sk_buff *skb,
@@ -55,7 +51,7 @@
 batadv_find_router(struct batadv_priv *bat_priv,
 		   struct batadv_orig_node *orig_node,
 		   struct batadv_hard_iface *recv_if);
-int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
+int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
 			    unsigned long *last_reset);
 
 #endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 0a01992..f664324 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -54,7 +54,7 @@
  */
 int batadv_send_skb_packet(struct sk_buff *skb,
 			   struct batadv_hard_iface *hard_iface,
-			   const uint8_t *dst_addr)
+			   const u8 *dst_addr)
 {
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 	struct ethhdr *ethhdr;
@@ -172,7 +172,7 @@
 				  struct batadv_orig_node *orig_node)
 {
 	struct batadv_unicast_packet *unicast_packet;
-	uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+	u8 ttvn = (u8)atomic_read(&orig_node->last_ttvn);
 
 	if (batadv_skb_head_push(skb, hdr_size) < 0)
 		return false;
@@ -343,12 +343,12 @@
  */
 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
 				   struct sk_buff *skb, int packet_type,
-				   int packet_subtype, uint8_t *dst_hint,
+				   int packet_subtype, u8 *dst_hint,
 				   unsigned short vid)
 {
 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
 	struct batadv_orig_node *orig_node;
-	uint8_t *src, *dst;
+	u8 *src, *dst;
 
 	src = ethhdr->h_source;
 	dst = ethhdr->h_dest;
@@ -616,7 +616,8 @@
 		 * we delete only packets belonging to the given interface
 		 */
 		if ((hard_iface) &&
-		    (forw_packet->if_incoming != hard_iface))
+		    (forw_packet->if_incoming != hard_iface) &&
+		    (forw_packet->if_outgoing != hard_iface))
 			continue;
 
 		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 0536835..82059f2 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -25,15 +25,12 @@
 
 #include "packet.h"
 
-struct batadv_hard_iface;
-struct batadv_orig_node;
-struct batadv_priv;
 struct sk_buff;
 struct work_struct;
 
 int batadv_send_skb_packet(struct sk_buff *skb,
 			   struct batadv_hard_iface *hard_iface,
-			   const uint8_t *dst_addr);
+			   const u8 *dst_addr);
 int batadv_send_skb_to_orig(struct sk_buff *skb,
 			    struct batadv_orig_node *orig_node,
 			    struct batadv_hard_iface *recv_if);
@@ -56,7 +53,7 @@
 			    unsigned short vid);
 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
 				   struct sk_buff *skb, int packet_type,
-				   int packet_subtype, uint8_t *dst_hint,
+				   int packet_subtype, u8 *dst_hint,
 				   unsigned short vid);
 int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
 			   unsigned short vid);
@@ -75,7 +72,7 @@
  * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
 static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
-					 struct sk_buff *skb, uint8_t *dst_hint,
+					 struct sk_buff *skb, u8 *dst_hint,
 					 unsigned short vid)
 {
 	return batadv_send_skb_via_tt_generic(bat_priv, skb, BATADV_UNICAST, 0,
@@ -100,7 +97,7 @@
 static inline int batadv_send_skb_via_tt_4addr(struct batadv_priv *bat_priv,
 					       struct sk_buff *skb,
 					       int packet_subtype,
-					       uint8_t *dst_hint,
+					       u8 *dst_hint,
 					       unsigned short vid)
 {
 	return batadv_send_skb_via_tt_generic(bat_priv, skb,
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index a2fc843..ac4d08d 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -131,7 +131,7 @@
 	struct batadv_priv *bat_priv = netdev_priv(dev);
 	struct batadv_softif_vlan *vlan;
 	struct sockaddr *addr = p;
-	uint8_t old_addr[ETH_ALEN];
+	u8 old_addr[ETH_ALEN];
 
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
@@ -186,22 +186,23 @@
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_bcast_packet *bcast_packet;
 	__be16 ethertype = htons(ETH_P_BATMAN);
-	static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
-						   0x00, 0x00};
-	static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
-						    0x00, 0x00};
+	static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
+					      0x00, 0x00};
+	static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
+					       0x00, 0x00};
 	enum batadv_dhcp_recipient dhcp_rcp = BATADV_DHCP_NO;
-	uint8_t *dst_hint = NULL, chaddr[ETH_ALEN];
+	u8 *dst_hint = NULL, chaddr[ETH_ALEN];
 	struct vlan_ethhdr *vhdr;
 	unsigned int header_len = 0;
 	int data_len = skb->len, ret;
 	unsigned long brd_delay = 1;
 	bool do_bcast = false, client_added;
 	unsigned short vid;
-	uint32_t seqno;
+	u32 seqno;
 	int gw_mode;
 	enum batadv_forw_mode forw_mode;
 	struct batadv_orig_node *mcast_single_orig = NULL;
+	int network_offset = ETH_HLEN;
 
 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
 		goto dropped;
@@ -214,14 +215,18 @@
 	case ETH_P_8021Q:
 		vhdr = vlan_eth_hdr(skb);
 
-		if (vhdr->h_vlan_encapsulated_proto != ethertype)
+		if (vhdr->h_vlan_encapsulated_proto != ethertype) {
+			network_offset += VLAN_HLEN;
 			break;
+		}
 
 		/* fall through */
 	case ETH_P_BATMAN:
 		goto dropped;
 	}
 
+	skb_set_network_header(skb, network_offset);
+
 	if (batadv_bla_tx(bat_priv, skb, vid))
 		goto dropped;
 
@@ -745,9 +750,9 @@
 static int batadv_softif_init_late(struct net_device *dev)
 {
 	struct batadv_priv *bat_priv;
-	uint32_t random_seqno;
+	u32 random_seqno;
 	int ret;
-	size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
+	size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
 
 	batadv_set_lockdep_class(dev);
 
@@ -758,7 +763,7 @@
 	/* batadv_interface_stats() needs to be available as soon as
 	 * register_netdevice() has been called
 	 */
-	bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
+	bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(u64));
 	if (!bat_priv->bat_counters)
 		return -ENOMEM;
 
@@ -936,14 +941,12 @@
 	dev->netdev_ops = &batadv_netdev_ops;
 	dev->destructor = batadv_softif_free;
 	dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 
 	/* can't call min_mtu, because the needed variables
 	 * have not been initialized yet
 	 */
 	dev->mtu = ETH_DATA_LEN;
-	/* reserve more space in the skbuff for our header */
-	dev->hard_header_len = batadv_max_header_len();
 
 	/* generate random address */
 	eth_hw_addr_random(dev);
@@ -1112,8 +1115,7 @@
 #endif
 };
 
-static void batadv_get_strings(struct net_device *dev, uint32_t stringset,
-			       uint8_t *data)
+static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
 	if (stringset == ETH_SS_STATS)
 		memcpy(data, batadv_counters_strings,
@@ -1121,8 +1123,7 @@
 }
 
 static void batadv_get_ethtool_stats(struct net_device *dev,
-				     struct ethtool_stats *stats,
-				     uint64_t *data)
+				     struct ethtool_stats *stats, u64 *data)
 {
 	struct batadv_priv *bat_priv = netdev_priv(dev);
 	int i;
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 578e8a6..8e82176 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -22,10 +22,6 @@
 
 #include <net/rtnetlink.h>
 
-struct batadv_hard_iface;
-struct batadv_orig_node;
-struct batadv_priv;
-struct batadv_softif_vlan;
 struct net_device;
 struct sk_buff;
 
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index d6a312a..9de3c88 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -457,7 +457,7 @@
 				     struct attribute *attr, char *buff)
 {
 	struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
-	uint32_t down, up;
+	u32 down, up;
 
 	down = atomic_read(&bat_priv->gw.bandwidth_down);
 	up = atomic_read(&bat_priv->gw.bandwidth_up);
@@ -512,7 +512,7 @@
 {
 	struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
-	uint32_t mark, mask;
+	u32 mark, mask;
 	char *mask_ptr;
 
 	/* parse the mask if it has been specified, otherwise assume the mask is
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
index 2294583..6197442 100644
--- a/net/batman-adv/sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -23,8 +23,6 @@
 #include <linux/sysfs.h>
 #include <linux/types.h>
 
-struct batadv_priv;
-struct batadv_softif_vlan;
 struct kobject;
 struct net_device;
 
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 5809b39..4228b10 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -19,6 +19,7 @@
 #include "main.h"
 
 #include <linux/atomic.h>
+#include <linux/bitops.h>
 #include <linux/bug.h>
 #include <linux/byteorder/generic.h>
 #include <linux/compiler.h>
@@ -55,7 +56,7 @@
 static struct lock_class_key batadv_tt_local_hash_lock_class_key;
 static struct lock_class_key batadv_tt_global_hash_lock_class_key;
 
-static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client,
 				 unsigned short vid,
 				 struct batadv_orig_node *orig_node);
 static void batadv_tt_purge(struct work_struct *work);
@@ -84,10 +85,10 @@
  * Returns the hash index where the object represented by 'data' should be
  * stored at.
  */
-static inline uint32_t batadv_choose_tt(const void *data, uint32_t size)
+static inline u32 batadv_choose_tt(const void *data, u32 size)
 {
 	struct batadv_tt_common_entry *tt;
-	uint32_t hash = 0;
+	u32 hash = 0;
 
 	tt = (struct batadv_tt_common_entry *)data;
 	hash = jhash(&tt->addr, ETH_ALEN, hash);
@@ -106,12 +107,12 @@
  * found, NULL otherwise.
  */
 static struct batadv_tt_common_entry *
-batadv_tt_hash_find(struct batadv_hashtable *hash, const uint8_t *addr,
+batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr,
 		    unsigned short vid)
 {
 	struct hlist_head *head;
 	struct batadv_tt_common_entry to_search, *tt, *tt_tmp = NULL;
-	uint32_t index;
+	u32 index;
 
 	if (!hash)
 		return NULL;
@@ -151,7 +152,7 @@
  * found, NULL otherwise.
  */
 static struct batadv_tt_local_entry *
-batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 			  unsigned short vid)
 {
 	struct batadv_tt_common_entry *tt_common_entry;
@@ -176,7 +177,7 @@
  * is found, NULL otherwise.
  */
 static struct batadv_tt_global_entry *
-batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 			   unsigned short vid)
 {
 	struct batadv_tt_common_entry *tt_common_entry;
@@ -222,7 +223,7 @@
  * (excluding ourself).
  */
 int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
-				const uint8_t *addr, unsigned short vid)
+				const u8 *addr, unsigned short vid)
 {
 	struct batadv_tt_global_entry *tt_global_entry;
 	int count;
@@ -314,7 +315,7 @@
 
 	if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
 		spin_lock_bh(&orig_node->vlan_list_lock);
-		list_del_rcu(&vlan->list);
+		hlist_del_init_rcu(&vlan->list);
 		spin_unlock_bh(&orig_node->vlan_list_lock);
 		batadv_orig_node_vlan_free_ref(vlan);
 	}
@@ -363,11 +364,11 @@
  */
 static void batadv_tt_local_event(struct batadv_priv *bat_priv,
 				  struct batadv_tt_local_entry *tt_local_entry,
-				  uint8_t event_flags)
+				  u8 event_flags)
 {
 	struct batadv_tt_change_node *tt_change_node, *entry, *safe;
 	struct batadv_tt_common_entry *common = &tt_local_entry->common;
-	uint8_t flags = common->flags | event_flags;
+	u8 flags = common->flags | event_flags;
 	bool event_removed = false;
 	bool del_op_requested, del_op_entry;
 
@@ -447,7 +448,7 @@
  *
  * Returns the number of entries.
  */
-static uint16_t batadv_tt_entries(uint16_t tt_len)
+static u16 batadv_tt_entries(u16 tt_len)
 {
 	return tt_len / batadv_tt_len(1);
 }
@@ -461,7 +462,8 @@
  */
 static int batadv_tt_local_table_transmit_size(struct batadv_priv *bat_priv)
 {
-	uint16_t num_vlan = 0, tt_local_entries = 0;
+	u16 num_vlan = 0;
+	u16 tt_local_entries = 0;
 	struct batadv_softif_vlan *vlan;
 	int hdr_size;
 
@@ -524,8 +526,8 @@
  *
  * Returns true if the client was successfully added, false otherwise.
  */
-bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
-			 unsigned short vid, int ifindex, uint32_t mark)
+bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
+			 unsigned short vid, int ifindex, u32 mark)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
 	struct batadv_tt_local_entry *tt_local;
@@ -535,9 +537,10 @@
 	struct hlist_head *head;
 	struct batadv_tt_orig_list_entry *orig_entry;
 	int hash_added, table_size, packet_size_max;
-	bool ret = false, roamed_back = false;
-	uint8_t remote_flags;
-	uint32_t match_mark;
+	bool ret = false;
+	bool roamed_back = false;
+	u8 remote_flags;
+	u32 match_mark;
 
 	if (ifindex != BATADV_NULL_IFINDEX)
 		in_dev = dev_get_by_index(&init_net, ifindex);
@@ -604,7 +607,7 @@
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
 		   addr, BATADV_PRINT_VID(vid),
-		   (uint8_t)atomic_read(&bat_priv->tt.vn));
+		   (u8)atomic_read(&bat_priv->tt.vn));
 
 	ether_addr_copy(tt_local->common.addr, addr);
 	/* The local entry has to be marked as NEW to avoid to send it in
@@ -723,19 +726,22 @@
  *
  * Return the size of the allocated buffer or 0 in case of failure.
  */
-static uint16_t
+static u16
 batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
 				   struct batadv_tvlv_tt_data **tt_data,
 				   struct batadv_tvlv_tt_change **tt_change,
-				   int32_t *tt_len)
+				   s32 *tt_len)
 {
-	uint16_t num_vlan = 0, num_entries = 0, change_offset, tvlv_len;
+	u16 num_vlan = 0;
+	u16 num_entries = 0;
+	u16 change_offset;
+	u16 tvlv_len;
 	struct batadv_tvlv_tt_vlan_data *tt_vlan;
 	struct batadv_orig_node_vlan *vlan;
-	uint8_t *tt_change_ptr;
+	u8 *tt_change_ptr;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+	hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
 		num_vlan++;
 		num_entries += atomic_read(&vlan->tt.num_entries);
 	}
@@ -761,14 +767,14 @@
 	(*tt_data)->num_vlan = htons(num_vlan);
 
 	tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
-	list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+	hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
 		tt_vlan->vid = htons(vlan->vid);
 		tt_vlan->crc = htonl(vlan->tt.crc);
 
 		tt_vlan++;
 	}
 
-	tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+	tt_change_ptr = (u8 *)*tt_data + change_offset;
 	*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
 
 out:
@@ -794,16 +800,18 @@
  *
  * Return the size of the allocated buffer or 0 in case of failure.
  */
-static uint16_t
+static u16
 batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
 				  struct batadv_tvlv_tt_data **tt_data,
 				  struct batadv_tvlv_tt_change **tt_change,
-				  int32_t *tt_len)
+				  s32 *tt_len)
 {
 	struct batadv_tvlv_tt_vlan_data *tt_vlan;
 	struct batadv_softif_vlan *vlan;
-	uint16_t num_vlan = 0, num_entries = 0, tvlv_len;
-	uint8_t *tt_change_ptr;
+	u16 num_vlan = 0;
+	u16 num_entries = 0;
+	u16 tvlv_len;
+	u8 *tt_change_ptr;
 	int change_offset;
 
 	rcu_read_lock();
@@ -840,7 +848,7 @@
 		tt_vlan++;
 	}
 
-	tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+	tt_change_ptr = (u8 *)*tt_data + change_offset;
 	*tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
 
 out:
@@ -859,8 +867,9 @@
 	struct batadv_tvlv_tt_data *tt_data;
 	struct batadv_tvlv_tt_change *tt_change;
 	int tt_diff_len, tt_change_len = 0;
-	int tt_diff_entries_num = 0, tt_diff_entries_count = 0;
-	uint16_t tvlv_len;
+	int tt_diff_entries_num = 0;
+	int tt_diff_entries_count = 0;
+	u16 tvlv_len;
 
 	tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
 	tt_diff_len = batadv_tt_len(tt_diff_entries_num);
@@ -934,12 +943,12 @@
 	struct batadv_softif_vlan *vlan;
 	struct hlist_head *head;
 	unsigned short vid;
-	uint32_t i;
+	u32 i;
 	int last_seen_secs;
 	int last_seen_msecs;
 	unsigned long last_seen_jiffies;
 	bool no_purge;
-	uint16_t np_flag = BATADV_TT_CLIENT_NOPURGE;
+	u16 np_flag = BATADV_TT_CLIENT_NOPURGE;
 
 	primary_if = batadv_seq_print_text_primary_if_get(seq);
 	if (!primary_if)
@@ -947,7 +956,7 @@
 
 	seq_printf(seq,
 		   "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
-		   net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
+		   net_dev->name, (u8)atomic_read(&bat_priv->tt.vn));
 	seq_printf(seq, "       %-13s  %s %-8s %-9s (%-10s)\n", "Client", "VID",
 		   "Flags", "Last seen", "CRC");
 
@@ -1007,7 +1016,7 @@
 static void
 batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
 			    struct batadv_tt_local_entry *tt_local_entry,
-			    uint16_t flags, const char *message)
+			    u16 flags, const char *message)
 {
 	batadv_tt_local_event(bat_priv, tt_local_entry, flags);
 
@@ -1033,12 +1042,12 @@
  *
  * Returns the flags assigned to the local entry before being deleted
  */
-uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
-				const uint8_t *addr, unsigned short vid,
-				const char *message, bool roaming)
+u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
+			   unsigned short vid, const char *message,
+			   bool roaming)
 {
 	struct batadv_tt_local_entry *tt_local_entry;
-	uint16_t flags, curr_flags = BATADV_NO_FLAGS;
+	u16 flags, curr_flags = BATADV_NO_FLAGS;
 	struct batadv_softif_vlan *vlan;
 	void *tt_entry_exists;
 
@@ -1141,7 +1150,7 @@
 	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
-	uint32_t i;
+	u32 i;
 
 	for (i = 0; i < hash->size; i++) {
 		head = &hash->table[i];
@@ -1162,7 +1171,7 @@
 	struct batadv_softif_vlan *vlan;
 	struct hlist_node *node_tmp;
 	struct hlist_head *head;
-	uint32_t i;
+	u32 i;
 
 	if (!bat_priv->tt.local_hash)
 		return;
@@ -1337,15 +1346,14 @@
 static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
 				 struct batadv_orig_node *orig_node,
 				 const unsigned char *tt_addr,
-				 unsigned short vid, uint16_t flags,
-				 uint8_t ttvn)
+				 unsigned short vid, u16 flags, u8 ttvn)
 {
 	struct batadv_tt_global_entry *tt_global_entry;
 	struct batadv_tt_local_entry *tt_local_entry;
 	bool ret = false;
 	int hash_added;
 	struct batadv_tt_common_entry *common;
-	uint16_t local_flags;
+	u16 local_flags;
 
 	/* ignore global entries from backbone nodes */
 	if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid))
@@ -1542,8 +1550,8 @@
 	struct batadv_tt_common_entry *tt_common_entry;
 	struct batadv_orig_node_vlan *vlan;
 	struct hlist_head *head;
-	uint8_t last_ttvn;
-	uint16_t flags;
+	u8 last_ttvn;
+	u16 flags;
 
 	tt_common_entry = &tt_global_entry->common;
 	flags = tt_common_entry->flags;
@@ -1617,7 +1625,7 @@
 	struct batadv_tt_global_entry *tt_global;
 	struct batadv_hard_iface *primary_if;
 	struct hlist_head *head;
-	uint32_t i;
+	u32 i;
 
 	primary_if = batadv_seq_print_text_primary_if_get(seq);
 	if (!primary_if)
@@ -1650,20 +1658,28 @@
 }
 
 /**
- * batadv_tt_global_del_orig_entry - remove and free an orig_entry
+ * _batadv_tt_global_del_orig_entry - remove and free an orig_entry
  * @tt_global_entry: the global entry to remove the orig_entry from
  * @orig_entry: the orig entry to remove and free
  *
  * Remove an orig_entry from its list in the given tt_global_entry and
  * free this orig_entry afterwards.
+ *
+ * Caller must hold tt_global_entry->list_lock and ensure orig_entry->list is
+ * part of a list.
  */
 static void
-batadv_tt_global_del_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
-				struct batadv_tt_orig_list_entry *orig_entry)
+_batadv_tt_global_del_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
+				 struct batadv_tt_orig_list_entry *orig_entry)
 {
+	lockdep_assert_held(&tt_global_entry->list_lock);
+
 	batadv_tt_global_size_dec(orig_entry->orig_node,
 				  tt_global_entry->common.vid);
 	atomic_dec(&tt_global_entry->orig_list_count);
+	/* requires holding tt_global_entry->list_lock and orig_entry->list
+	 * being part of a list
+	 */
 	hlist_del_rcu(&orig_entry->list);
 	batadv_tt_orig_list_entry_free_ref(orig_entry);
 }
@@ -1679,7 +1695,7 @@
 	spin_lock_bh(&tt_global_entry->list_lock);
 	head = &tt_global_entry->orig_list;
 	hlist_for_each_entry_safe(orig_entry, safe, head, list)
-		batadv_tt_global_del_orig_entry(tt_global_entry, orig_entry);
+		_batadv_tt_global_del_orig_entry(tt_global_entry, orig_entry);
 	spin_unlock_bh(&tt_global_entry->list_lock);
 }
 
@@ -1714,8 +1730,8 @@
 				   orig_node->orig,
 				   tt_global_entry->common.addr,
 				   BATADV_PRINT_VID(vid), message);
-			batadv_tt_global_del_orig_entry(tt_global_entry,
-							orig_entry);
+			_batadv_tt_global_del_orig_entry(tt_global_entry,
+							 orig_entry);
 		}
 	}
 	spin_unlock_bh(&tt_global_entry->list_lock);
@@ -1837,12 +1853,12 @@
  */
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
 			       struct batadv_orig_node *orig_node,
-			       int32_t match_vid,
+			       s32 match_vid,
 			       const char *message)
 {
 	struct batadv_tt_global_entry *tt_global;
 	struct batadv_tt_common_entry *tt_common_entry;
-	uint32_t i;
+	u32 i;
 	struct batadv_hashtable *hash = bat_priv->tt.global_hash;
 	struct hlist_node *safe;
 	struct hlist_head *head;
@@ -1882,7 +1898,7 @@
 		}
 		spin_unlock_bh(list_lock);
 	}
-	orig_node->capa_initialized &= ~BATADV_ORIG_CAPA_HAS_TT;
+	clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
 }
 
 static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
@@ -1913,7 +1929,7 @@
 	struct hlist_head *head;
 	struct hlist_node *node_tmp;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
-	uint32_t i;
+	u32 i;
 	char *msg = NULL;
 	struct batadv_tt_common_entry *tt_common;
 	struct batadv_tt_global_entry *tt_global;
@@ -1954,7 +1970,7 @@
 	struct batadv_tt_global_entry *tt_global;
 	struct hlist_node *node_tmp;
 	struct hlist_head *head;
-	uint32_t i;
+	u32 i;
 
 	if (!bat_priv->tt.global_hash)
 		return;
@@ -2015,8 +2031,8 @@
  * If the two clients are AP isolated the function returns NULL.
  */
 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
-						  const uint8_t *src,
-						  const uint8_t *addr,
+						  const u8 *src,
+						  const u8 *addr,
 						  unsigned short vid)
 {
 	struct batadv_tt_local_entry *tt_local_entry = NULL;
@@ -2084,16 +2100,16 @@
  *
  * Returns the checksum of the global table of a given originator.
  */
-static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
-				     struct batadv_orig_node *orig_node,
-				     unsigned short vid)
+static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
+				struct batadv_orig_node *orig_node,
+				unsigned short vid)
 {
 	struct batadv_hashtable *hash = bat_priv->tt.global_hash;
 	struct batadv_tt_common_entry *tt_common;
 	struct batadv_tt_global_entry *tt_global;
 	struct hlist_head *head;
-	uint32_t i, crc_tmp, crc = 0;
-	uint8_t flags;
+	u32 i, crc_tmp, crc = 0;
+	u8 flags;
 	__be16 tmp_vid;
 
 	for (i = 0; i < hash->size; i++) {
@@ -2161,14 +2177,14 @@
  *
  * Returns the checksum of the local table
  */
-static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
-				    unsigned short vid)
+static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
+			       unsigned short vid)
 {
 	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
 	struct batadv_tt_common_entry *tt_common;
 	struct hlist_head *head;
-	uint32_t i, crc_tmp, crc = 0;
-	uint8_t flags;
+	u32 i, crc_tmp, crc = 0;
+	u8 flags;
 	__be16 tmp_vid;
 
 	for (i = 0; i < hash->size; i++) {
@@ -2210,12 +2226,13 @@
 
 static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
 {
-	struct batadv_tt_req_node *node, *safe;
+	struct batadv_tt_req_node *node;
+	struct hlist_node *safe;
 
 	spin_lock_bh(&bat_priv->tt.req_list_lock);
 
-	list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
-		list_del(&node->list);
+	hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
+		hlist_del_init(&node->list);
 		kfree(node);
 	}
 
@@ -2225,7 +2242,7 @@
 static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
 				       struct batadv_orig_node *orig_node,
 				       const void *tt_buff,
-				       uint16_t tt_buff_len)
+				       u16 tt_buff_len)
 {
 	/* Replace the old buffer only if I received something in the
 	 * last OGM (the OGM could carry no changes)
@@ -2245,30 +2262,36 @@
 
 static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
 {
-	struct batadv_tt_req_node *node, *safe;
+	struct batadv_tt_req_node *node;
+	struct hlist_node *safe;
 
 	spin_lock_bh(&bat_priv->tt.req_list_lock);
-	list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
+	hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
 		if (batadv_has_timed_out(node->issued_at,
 					 BATADV_TT_REQUEST_TIMEOUT)) {
-			list_del(&node->list);
+			hlist_del_init(&node->list);
 			kfree(node);
 		}
 	}
 	spin_unlock_bh(&bat_priv->tt.req_list_lock);
 }
 
-/* returns the pointer to the new tt_req_node struct if no request
- * has already been issued for this orig_node, NULL otherwise
+/**
+ * batadv_tt_req_node_new - search and possibly create a tt_req_node object
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: orig node this request is being issued for
+ *
+ * Returns the pointer to the new tt_req_node struct if no request
+ * has already been issued for this orig_node, NULL otherwise.
  */
 static struct batadv_tt_req_node *
-batadv_new_tt_req_node(struct batadv_priv *bat_priv,
+batadv_tt_req_node_new(struct batadv_priv *bat_priv,
 		       struct batadv_orig_node *orig_node)
 {
 	struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
 
 	spin_lock_bh(&bat_priv->tt.req_list_lock);
-	list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
+	hlist_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
 		if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
 		    !batadv_has_timed_out(tt_req_node_tmp->issued_at,
 					  BATADV_TT_REQUEST_TIMEOUT))
@@ -2282,7 +2305,7 @@
 	ether_addr_copy(tt_req_node->addr, orig_node->orig);
 	tt_req_node->issued_at = jiffies;
 
-	list_add(&tt_req_node->list, &bat_priv->tt.req_list);
+	hlist_add_head(&tt_req_node->list, &bat_priv->tt.req_list);
 unlock:
 	spin_unlock_bh(&bat_priv->tt.req_list_lock);
 	return tt_req_node;
@@ -2334,15 +2357,15 @@
  */
 static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
 				    struct batadv_hashtable *hash,
-				    void *tvlv_buff, uint16_t tt_len,
+				    void *tvlv_buff, u16 tt_len,
 				    int (*valid_cb)(const void *, const void *),
 				    void *cb_data)
 {
 	struct batadv_tt_common_entry *tt_common_entry;
 	struct batadv_tvlv_tt_change *tt_change;
 	struct hlist_head *head;
-	uint16_t tt_tot, tt_num_entries = 0;
-	uint32_t i;
+	u16 tt_tot, tt_num_entries = 0;
+	u32 i;
 
 	tt_tot = batadv_tt_entries(tt_len);
 	tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
@@ -2384,11 +2407,11 @@
  */
 static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
 				       struct batadv_tvlv_tt_vlan_data *tt_vlan,
-				       uint16_t num_vlan)
+				       u16 num_vlan)
 {
 	struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
 	struct batadv_orig_node_vlan *vlan;
-	uint32_t crc;
+	u32 crc;
 	int i;
 
 	/* check if each received CRC matches the locally stored one */
@@ -2443,11 +2466,11 @@
 					struct batadv_orig_node *orig_node)
 {
 	struct batadv_orig_node_vlan *vlan;
-	uint32_t crc;
+	u32 crc;
 
 	/* recompute the global CRC for each VLAN */
 	rcu_read_lock();
-	list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+	hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
 		/* if orig_node is a backbone node for this VLAN, don't compute
 		 * the CRC as we ignore all the global entries over it
 		 */
@@ -2473,9 +2496,9 @@
  */
 static int batadv_send_tt_request(struct batadv_priv *bat_priv,
 				  struct batadv_orig_node *dst_orig_node,
-				  uint8_t ttvn,
+				  u8 ttvn,
 				  struct batadv_tvlv_tt_vlan_data *tt_vlan,
-				  uint16_t num_vlan, bool full_table)
+				  u16 num_vlan, bool full_table)
 {
 	struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
 	struct batadv_tt_req_node *tt_req_node = NULL;
@@ -2491,7 +2514,7 @@
 	/* The new tt_req will be issued only if I'm not waiting for a
 	 * reply from the same orig_node yet
 	 */
-	tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node);
+	tt_req_node = batadv_tt_req_node_new(bat_priv, dst_orig_node);
 	if (!tt_req_node)
 		goto out;
 
@@ -2533,7 +2556,8 @@
 		batadv_hardif_free_ref(primary_if);
 	if (ret && tt_req_node) {
 		spin_lock_bh(&bat_priv->tt.req_list_lock);
-		list_del(&tt_req_node->list);
+		/* hlist_del_init() verifies tt_req_node still is in the list */
+		hlist_del_init(&tt_req_node->list);
 		spin_unlock_bh(&bat_priv->tt.req_list_lock);
 		kfree(tt_req_node);
 	}
@@ -2553,7 +2577,7 @@
  */
 static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
 					  struct batadv_tvlv_tt_data *tt_data,
-					  uint8_t *req_src, uint8_t *req_dst)
+					  u8 *req_src, u8 *req_dst)
 {
 	struct batadv_orig_node *req_dst_orig_node;
 	struct batadv_orig_node *res_dst_orig_node = NULL;
@@ -2561,9 +2585,9 @@
 	struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
 	struct batadv_tvlv_tt_vlan_data *tt_vlan;
 	bool ret = false, full_table;
-	uint8_t orig_ttvn, req_ttvn;
-	uint16_t tvlv_len;
-	int32_t tt_len;
+	u8 orig_ttvn, req_ttvn;
+	u16 tvlv_len;
+	s32 tt_len;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
@@ -2579,7 +2603,7 @@
 	if (!res_dst_orig_node)
 		goto out;
 
-	orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
+	orig_ttvn = (u8)atomic_read(&req_dst_orig_node->last_ttvn);
 	req_ttvn = tt_data->ttvn;
 
 	tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
@@ -2685,16 +2709,16 @@
  */
 static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
 				       struct batadv_tvlv_tt_data *tt_data,
-				       uint8_t *req_src)
+				       u8 *req_src)
 {
 	struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_tvlv_tt_change *tt_change;
 	struct batadv_orig_node *orig_node;
-	uint8_t my_ttvn, req_ttvn;
-	uint16_t tvlv_len;
+	u8 my_ttvn, req_ttvn;
+	u16 tvlv_len;
 	bool full_table;
-	int32_t tt_len;
+	s32 tt_len;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
@@ -2703,7 +2727,7 @@
 
 	spin_lock_bh(&bat_priv->tt.commit_lock);
 
-	my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+	my_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
 	req_ttvn = tt_data->ttvn;
 
 	orig_node = batadv_orig_hash_find(bat_priv, req_src);
@@ -2742,7 +2766,7 @@
 		       bat_priv->tt.last_changeset_len);
 		spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 	} else {
-		req_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+		req_ttvn = (u8)atomic_read(&bat_priv->tt.vn);
 
 		/* allocate the tvlv, put the tt_data and all the tt_vlan_data
 		 * in the initial part
@@ -2803,7 +2827,7 @@
  */
 static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
 				    struct batadv_tvlv_tt_data *tt_data,
-				    uint8_t *req_src, uint8_t *req_dst)
+				    u8 *req_src, u8 *req_dst)
 {
 	if (batadv_is_my_mac(bat_priv, req_dst))
 		return batadv_send_my_tt_response(bat_priv, tt_data, req_src);
@@ -2814,7 +2838,7 @@
 static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
 				      struct batadv_orig_node *orig_node,
 				      struct batadv_tvlv_tt_change *tt_change,
-				      uint16_t tt_num_changes, uint8_t ttvn)
+				      u16 tt_num_changes, u8 ttvn)
 {
 	int i;
 	int roams;
@@ -2841,13 +2865,13 @@
 				return;
 		}
 	}
-	orig_node->capa_initialized |= BATADV_ORIG_CAPA_HAS_TT;
+	set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
 }
 
 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
 				  struct batadv_tvlv_tt_change *tt_change,
-				  uint8_t ttvn, uint8_t *resp_src,
-				  uint16_t num_entries)
+				  u8 ttvn, u8 *resp_src,
+				  u16 num_entries)
 {
 	struct batadv_orig_node *orig_node;
 
@@ -2877,7 +2901,7 @@
 
 static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
 				     struct batadv_orig_node *orig_node,
-				     uint16_t tt_num_changes, uint8_t ttvn,
+				     u16 tt_num_changes, u8 ttvn,
 				     struct batadv_tvlv_tt_change *tt_change)
 {
 	_batadv_tt_update_changes(bat_priv, orig_node, tt_change,
@@ -2896,7 +2920,7 @@
  *
  * Returns true if the client is served by this node, false otherwise.
  */
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr,
 			 unsigned short vid)
 {
 	struct batadv_tt_local_entry *tt_local_entry;
@@ -2927,13 +2951,14 @@
  */
 static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
 				      struct batadv_tvlv_tt_data *tt_data,
-				      uint8_t *resp_src, uint16_t num_entries)
+				      u8 *resp_src, u16 num_entries)
 {
-	struct batadv_tt_req_node *node, *safe;
+	struct batadv_tt_req_node *node;
+	struct hlist_node *safe;
 	struct batadv_orig_node *orig_node = NULL;
 	struct batadv_tvlv_tt_change *tt_change;
-	uint8_t *tvlv_ptr = (uint8_t *)tt_data;
-	uint16_t change_offset;
+	u8 *tvlv_ptr = (u8 *)tt_data;
+	u16 change_offset;
 
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
@@ -2967,10 +2992,10 @@
 
 	/* Delete the tt_req_node from pending tt_requests list */
 	spin_lock_bh(&bat_priv->tt.req_list_lock);
-	list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
+	hlist_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
 		if (!batadv_compare_eth(node->addr, resp_src))
 			continue;
-		list_del(&node->list);
+		hlist_del_init(&node->list);
 		kfree(node);
 	}
 
@@ -3016,8 +3041,7 @@
  *
  * returns true if the ROAMING_ADV can be sent, false otherwise
  */
-static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
-				       uint8_t *client)
+static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client)
 {
 	struct batadv_tt_roam_node *tt_roam_node;
 	bool ret = false;
@@ -3072,7 +3096,7 @@
  * for this particular roamed client has to be forwarded to the sender of the
  * roaming message.
  */
-static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+static void batadv_send_roam_adv(struct batadv_priv *bat_priv, u8 *client,
 				 unsigned short vid,
 				 struct batadv_orig_node *orig_node)
 {
@@ -3150,14 +3174,14 @@
  * @enable: whether to set or unset the flag
  * @count: whether to increase the TT size by the number of changed entries
  */
-static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv,
-				      uint16_t flags, bool enable, bool count)
+static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv, u16 flags,
+				      bool enable, bool count)
 {
 	struct batadv_hashtable *hash = bat_priv->tt.local_hash;
 	struct batadv_tt_common_entry *tt_common_entry;
-	uint16_t changed_num = 0;
+	u16 changed_num = 0;
 	struct hlist_head *head;
-	uint32_t i;
+	u32 i;
 
 	if (!hash)
 		return;
@@ -3199,7 +3223,7 @@
 	struct hlist_node *node_tmp;
 	struct hlist_head *head;
 	spinlock_t *list_lock; /* protects write access to the hash lists */
-	uint32_t i;
+	u32 i;
 
 	if (!hash)
 		return;
@@ -3247,6 +3271,8 @@
  */
 static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
 {
+	lockdep_assert_held(&bat_priv->tt.commit_lock);
+
 	/* Update multicast addresses in local translation table */
 	batadv_mcast_mla_update(bat_priv);
 
@@ -3265,7 +3291,7 @@
 	atomic_inc(&bat_priv->tt.vn);
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Local changes committed, updating to ttvn %u\n",
-		   (uint8_t)atomic_read(&bat_priv->tt.vn));
+		   (u8)atomic_read(&bat_priv->tt.vn));
 
 	/* reset the sending counter */
 	atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
@@ -3284,8 +3310,8 @@
 	spin_unlock_bh(&bat_priv->tt.commit_lock);
 }
 
-bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
-			   uint8_t *dst, unsigned short vid)
+bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
+			   unsigned short vid)
 {
 	struct batadv_tt_local_entry *tt_local_entry = NULL;
 	struct batadv_tt_global_entry *tt_global_entry = NULL;
@@ -3333,17 +3359,18 @@
  */
 static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
 				  struct batadv_orig_node *orig_node,
-				  const void *tt_buff, uint16_t tt_num_vlan,
+				  const void *tt_buff, u16 tt_num_vlan,
 				  struct batadv_tvlv_tt_change *tt_change,
-				  uint16_t tt_num_changes, uint8_t ttvn)
+				  u16 tt_num_changes, u8 ttvn)
 {
-	uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+	u8 orig_ttvn = (u8)atomic_read(&orig_node->last_ttvn);
 	struct batadv_tvlv_tt_vlan_data *tt_vlan;
 	bool full_table = true;
 	bool has_tt_init;
 
 	tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
-	has_tt_init = orig_node->capa_initialized & BATADV_ORIG_CAPA_HAS_TT;
+	has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT,
+			       &orig_node->capa_initialized);
 
 	/* orig table not initialised AND first diff is in the OGM OR the ttvn
 	 * increased by one -> we can apply the attached changes
@@ -3415,7 +3442,7 @@
  * deleted later by a DEL or because of timeout
  */
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
-					uint8_t *addr, unsigned short vid)
+					u8 *addr, unsigned short vid)
 {
 	struct batadv_tt_global_entry *tt_global_entry;
 	bool ret = false;
@@ -3441,7 +3468,7 @@
  * to keep the latter consistent with the node TTVN
  */
 bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
-				       uint8_t *addr, unsigned short vid)
+				       u8 *addr, unsigned short vid)
 {
 	struct batadv_tt_local_entry *tt_local_entry;
 	bool ret = false;
@@ -3527,13 +3554,13 @@
  */
 static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig,
-					  uint8_t flags, void *tvlv_value,
-					  uint16_t tvlv_value_len)
+					  u8 flags, void *tvlv_value,
+					  u16 tvlv_value_len)
 {
 	struct batadv_tvlv_tt_vlan_data *tt_vlan;
 	struct batadv_tvlv_tt_change *tt_change;
 	struct batadv_tvlv_tt_data *tt_data;
-	uint16_t num_entries, num_vlan;
+	u16 num_entries, num_vlan;
 
 	if (tvlv_value_len < sizeof(*tt_data))
 		return;
@@ -3569,12 +3596,12 @@
  * otherwise.
  */
 static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
-					     uint8_t *src, uint8_t *dst,
+					     u8 *src, u8 *dst,
 					     void *tvlv_value,
-					     uint16_t tvlv_value_len)
+					     u16 tvlv_value_len)
 {
 	struct batadv_tvlv_tt_data *tt_data;
-	uint16_t tt_vlan_len, tt_num_entries;
+	u16 tt_vlan_len, tt_num_entries;
 	char tt_flag;
 	bool ret;
 
@@ -3650,9 +3677,9 @@
  * otherwise.
  */
 static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
-					       uint8_t *src, uint8_t *dst,
+					       u8 *src, u8 *dst,
 					       void *tvlv_value,
-					       uint16_t tvlv_value_len)
+					       u16 tvlv_value_len)
 {
 	struct batadv_tvlv_roam_adv *roaming_adv;
 	struct batadv_orig_node *orig_node = NULL;
@@ -3734,7 +3761,7 @@
  * otherwise
  */
 bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
-				  const uint8_t *addr, unsigned short vid)
+				  const u8 *addr, unsigned short vid)
 {
 	struct batadv_tt_global_entry *tt;
 	bool ret;
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 6acc25d..abd8e11 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -22,44 +22,41 @@
 
 #include <linux/types.h>
 
-struct batadv_orig_node;
-struct batadv_priv;
 struct net_device;
 struct seq_file;
 
 int batadv_tt_init(struct batadv_priv *bat_priv);
-bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
-			 unsigned short vid, int ifindex, uint32_t mark);
-uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
-				const uint8_t *addr, unsigned short vid,
-				const char *message, bool roaming);
+bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
+			 unsigned short vid, int ifindex, u32 mark);
+u16 batadv_tt_local_remove(struct batadv_priv *bat_priv,
+			   const u8 *addr, unsigned short vid,
+			   const char *message, bool roaming);
 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
 			       struct batadv_orig_node *orig_node,
-			       int32_t match_vid, const char *message);
+			       s32 match_vid, const char *message);
 int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
-				const uint8_t *addr, unsigned short vid);
+				const u8 *addr, unsigned short vid);
 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
-						  const uint8_t *src,
-						  const uint8_t *addr,
+						  const u8 *src, const u8 *addr,
 						  unsigned short vid);
 void batadv_tt_free(struct batadv_priv *bat_priv);
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr,
 			 unsigned short vid);
-bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
-			   uint8_t *dst, unsigned short vid);
+bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst,
+			   unsigned short vid);
 void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv);
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
-					uint8_t *addr, unsigned short vid);
+					u8 *addr, unsigned short vid);
 bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
-				       uint8_t *addr, unsigned short vid);
+				       u8 *addr, unsigned short vid);
 void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface);
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
 					  struct batadv_orig_node *orig_node,
 					  const unsigned char *addr,
 					  unsigned short vid);
 bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
-				  const uint8_t *addr, unsigned short vid);
+				  const u8 *addr, unsigned short vid);
 
 #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 67d6348..d260efd 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -44,7 +44,7 @@
  *
  * *Please be careful: batadv_dat_addr_t must be UNSIGNED*
  */
-#define batadv_dat_addr_t uint16_t
+#define batadv_dat_addr_t u16
 
 #endif /* CONFIG_BATMAN_ADV_DAT */
 
@@ -103,10 +103,10 @@
  */
 struct batadv_hard_iface {
 	struct list_head list;
-	int16_t if_num;
+	s16 if_num;
 	char if_status;
 	struct net_device *net_dev;
-	uint8_t num_bcasts;
+	u8 num_bcasts;
 	struct kobject *hardif_obj;
 	atomic_t refcount;
 	struct packet_type batman_adv_ptype;
@@ -132,8 +132,8 @@
 	struct hlist_node list;
 	struct batadv_hard_iface *if_outgoing;
 	struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
-	uint32_t last_real_seqno;
-	uint8_t last_ttl;
+	u32 last_real_seqno;
+	u8 last_ttl;
 	unsigned long batman_seqno_reset;
 	atomic_t refcount;
 	struct rcu_head rcu;
@@ -152,9 +152,9 @@
 	struct hlist_head head;
 	spinlock_t lock; /* protects head */
 	unsigned long timestamp;
-	uint16_t seqno;
-	uint16_t size;
-	uint16_t total_size;
+	u16 seqno;
+	u16 size;
+	u16 total_size;
 };
 
 /**
@@ -166,7 +166,7 @@
 struct batadv_frag_list_entry {
 	struct hlist_node list;
 	struct sk_buff *skb;
-	uint8_t no;
+	u8 no;
 };
 
 /**
@@ -175,7 +175,7 @@
  * @num_entries: number of TT entries for this VLAN
  */
 struct batadv_vlan_tt {
-	uint32_t crc;
+	u32 crc;
 	atomic_t num_entries;
 };
 
@@ -190,7 +190,7 @@
 struct batadv_orig_node_vlan {
 	unsigned short vid;
 	struct batadv_vlan_tt tt;
-	struct list_head list;
+	struct hlist_node list;
 	atomic_t refcount;
 	struct rcu_head rcu;
 };
@@ -206,7 +206,7 @@
  */
 struct batadv_orig_bat_iv {
 	unsigned long *bcast_own;
-	uint8_t *bcast_own_sum;
+	u8 *bcast_own_sum;
 	/* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
 	 * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
 	 */
@@ -221,6 +221,7 @@
  * @batadv_dat_addr_t:  address of the orig node in the distributed hash
  * @last_seen: time when last packet from this node was received
  * @bcast_seqno_reset: time when the broadcast seqno window was reset
+ * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
  * @mcast_flags: multicast flags announced by the orig node
  * @mcast_want_all_unsnoop_node: a list node for the
  *  mcast.want_all_unsnoopables list
@@ -259,7 +260,7 @@
  * @bat_iv: B.A.T.M.A.N. IV private structure
  */
 struct batadv_orig_node {
-	uint8_t orig[ETH_ALEN];
+	u8 orig[ETH_ALEN];
 	struct hlist_head ifinfo_list;
 	struct batadv_orig_ifinfo *last_bonding_candidate;
 #ifdef CONFIG_BATMAN_ADV_DAT
@@ -268,21 +269,23 @@
 	unsigned long last_seen;
 	unsigned long bcast_seqno_reset;
 #ifdef CONFIG_BATMAN_ADV_MCAST
-	uint8_t mcast_flags;
+	/* synchronizes mcast tvlv specific orig changes */
+	spinlock_t mcast_handler_lock;
+	u8 mcast_flags;
 	struct hlist_node mcast_want_all_unsnoopables_node;
 	struct hlist_node mcast_want_all_ipv4_node;
 	struct hlist_node mcast_want_all_ipv6_node;
 #endif
-	uint8_t capabilities;
-	uint8_t capa_initialized;
+	unsigned long capabilities;
+	unsigned long capa_initialized;
 	atomic_t last_ttvn;
 	unsigned char *tt_buff;
-	int16_t tt_buff_len;
+	s16 tt_buff_len;
 	spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
 	/* prevents from changing the table while reading it */
 	spinlock_t tt_lock;
 	DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
-	uint32_t last_bcast_seqno;
+	u32 last_bcast_seqno;
 	struct hlist_head neigh_list;
 	/* neigh_list_lock protects: neigh_list and router */
 	spinlock_t neigh_list_lock;
@@ -299,7 +302,7 @@
 	spinlock_t out_coding_list_lock; /* Protects out_coding_list */
 #endif
 	struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
-	struct list_head vlan_list;
+	struct hlist_head vlan_list;
 	spinlock_t vlan_list_lock; /* protects vlan_list */
 	struct batadv_orig_bat_iv bat_iv;
 };
@@ -313,10 +316,10 @@
  *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
  */
 enum batadv_orig_capabilities {
-	BATADV_ORIG_CAPA_HAS_DAT = BIT(0),
-	BATADV_ORIG_CAPA_HAS_NC = BIT(1),
-	BATADV_ORIG_CAPA_HAS_TT = BIT(2),
-	BATADV_ORIG_CAPA_HAS_MCAST = BIT(3),
+	BATADV_ORIG_CAPA_HAS_DAT,
+	BATADV_ORIG_CAPA_HAS_NC,
+	BATADV_ORIG_CAPA_HAS_TT,
+	BATADV_ORIG_CAPA_HAS_MCAST,
 };
 
 /**
@@ -325,16 +328,14 @@
  * @orig_node: pointer to corresponding orig node
  * @bandwidth_down: advertised uplink download bandwidth
  * @bandwidth_up: advertised uplink upload bandwidth
- * @deleted: this struct is scheduled for deletion
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_gw_node {
 	struct hlist_node list;
 	struct batadv_orig_node *orig_node;
-	uint32_t bandwidth_down;
-	uint32_t bandwidth_up;
-	unsigned long deleted;
+	u32 bandwidth_down;
+	u32 bandwidth_up;
 	atomic_t refcount;
 	struct rcu_head rcu;
 };
@@ -355,7 +356,7 @@
 struct batadv_neigh_node {
 	struct hlist_node list;
 	struct batadv_orig_node *orig_node;
-	uint8_t addr[ETH_ALEN];
+	u8 addr[ETH_ALEN];
 	struct hlist_head ifinfo_list;
 	spinlock_t ifinfo_lock;	/* protects ifinfo_list and its members */
 	struct batadv_hard_iface *if_incoming;
@@ -375,11 +376,11 @@
  * @real_packet_count: counted result of real_bits
  */
 struct batadv_neigh_ifinfo_bat_iv {
-	uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
-	uint8_t tq_index;
-	uint8_t tq_avg;
+	u8 tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
+	u8 tq_index;
+	u8 tq_avg;
 	DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
-	uint8_t real_packet_count;
+	u8 real_packet_count;
 };
 
 /**
@@ -395,7 +396,7 @@
 	struct hlist_node list;
 	struct batadv_hard_iface *if_outgoing;
 	struct batadv_neigh_ifinfo_bat_iv bat_iv;
-	uint8_t last_ttl;
+	u8 last_ttl;
 	atomic_t refcount;
 	struct rcu_head rcu;
 };
@@ -408,7 +409,7 @@
  */
 #ifdef CONFIG_BATMAN_ADV_BLA
 struct batadv_bcast_duplist_entry {
-	uint8_t orig[ETH_ALEN];
+	u8 orig[ETH_ALEN];
 	__be32 crc;
 	unsigned long entrytime;
 };
@@ -534,13 +535,13 @@
 	struct list_head changes_list;
 	struct batadv_hashtable *local_hash;
 	struct batadv_hashtable *global_hash;
-	struct list_head req_list;
+	struct hlist_head req_list;
 	struct list_head roam_list;
 	spinlock_t changes_list_lock; /* protects changes */
 	spinlock_t req_list_lock; /* protects req_list */
 	spinlock_t roam_list_lock; /* protects roam_list */
 	unsigned char *last_changeset;
-	int16_t last_changeset_len;
+	s16 last_changeset_len;
 	/* protects last_changeset & last_changeset_len */
 	spinlock_t last_changeset_lock;
 	/* prevents from executing a commit while reading the table */
@@ -660,7 +661,7 @@
 	struct hlist_head want_all_unsnoopables_list;
 	struct hlist_head want_all_ipv4_list;
 	struct hlist_head want_all_ipv6_list;
-	uint8_t flags;
+	u8 flags;
 	bool enabled;
 	atomic_t num_disabled;
 	atomic_t num_want_all_unsnoopables;
@@ -778,7 +779,7 @@
 	atomic_t mesh_state;
 	struct net_device *soft_iface;
 	struct net_device_stats stats;
-	uint64_t __percpu *bat_counters; /* Per cpu counters */
+	u64 __percpu *bat_counters; /* Per cpu counters */
 	atomic_t aggregated_ogms;
 	atomic_t bonding;
 	atomic_t fragmentation;
@@ -800,8 +801,8 @@
 #ifdef CONFIG_BATMAN_ADV_DEBUG
 	atomic_t log_level;
 #endif
-	uint32_t isolation_mark;
-	uint32_t isolation_mark_mask;
+	u32 isolation_mark;
+	u32 isolation_mark_mask;
 	atomic_t bcast_seqno;
 	atomic_t bcast_queue_left;
 	atomic_t batman_queue_left;
@@ -867,7 +868,7 @@
 struct batadv_socket_packet {
 	struct list_head list;
 	size_t icmp_len;
-	uint8_t icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
+	u8 icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
 };
 
 /**
@@ -888,14 +889,14 @@
  */
 #ifdef CONFIG_BATMAN_ADV_BLA
 struct batadv_bla_backbone_gw {
-	uint8_t orig[ETH_ALEN];
+	u8 orig[ETH_ALEN];
 	unsigned short vid;
 	struct hlist_node hash_entry;
 	struct batadv_priv *bat_priv;
 	unsigned long lasttime;
 	atomic_t wait_periods;
 	atomic_t request_sent;
-	uint16_t crc;
+	u16 crc;
 	atomic_t refcount;
 	struct rcu_head rcu;
 };
@@ -911,7 +912,7 @@
  * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_bla_claim {
-	uint8_t addr[ETH_ALEN];
+	u8 addr[ETH_ALEN];
 	unsigned short vid;
 	struct batadv_bla_backbone_gw *backbone_gw;
 	unsigned long lasttime;
@@ -933,10 +934,10 @@
  * @rcu: struct used for freeing in an RCU-safe manner
  */
 struct batadv_tt_common_entry {
-	uint8_t addr[ETH_ALEN];
+	u8 addr[ETH_ALEN];
 	unsigned short vid;
 	struct hlist_node hash_entry;
-	uint16_t flags;
+	u16 flags;
 	unsigned long added_at;
 	atomic_t refcount;
 	struct rcu_head rcu;
@@ -978,7 +979,7 @@
  */
 struct batadv_tt_orig_list_entry {
 	struct batadv_orig_node *orig_node;
-	uint8_t ttvn;
+	u8 ttvn;
 	struct hlist_node list;
 	atomic_t refcount;
 	struct rcu_head rcu;
@@ -1001,9 +1002,9 @@
  * @list: list node for batadv_priv_tt::req_list
  */
 struct batadv_tt_req_node {
-	uint8_t addr[ETH_ALEN];
+	u8 addr[ETH_ALEN];
 	unsigned long issued_at;
-	struct list_head list;
+	struct hlist_node list;
 };
 
 /**
@@ -1015,7 +1016,7 @@
  * @list: list node for batadv_priv_tt::roam_list
  */
 struct batadv_tt_roam_node {
-	uint8_t addr[ETH_ALEN];
+	u8 addr[ETH_ALEN];
 	atomic_t counter;
 	unsigned long first_time;
 	struct list_head list;
@@ -1032,7 +1033,7 @@
  */
 struct batadv_nc_node {
 	struct list_head list;
-	uint8_t addr[ETH_ALEN];
+	u8 addr[ETH_ALEN];
 	atomic_t refcount;
 	struct rcu_head rcu;
 	struct batadv_orig_node *orig_node;
@@ -1056,8 +1057,8 @@
 	atomic_t refcount;
 	struct list_head packet_list;
 	spinlock_t packet_list_lock; /* Protects packet_list */
-	uint8_t next_hop[ETH_ALEN];
-	uint8_t prev_hop[ETH_ALEN];
+	u8 next_hop[ETH_ALEN];
+	u8 prev_hop[ETH_ALEN];
 	unsigned long last_valid;
 };
 
@@ -1109,11 +1110,11 @@
 struct batadv_forw_packet {
 	struct hlist_node list;
 	unsigned long send_time;
-	uint8_t own;
+	u8 own;
 	struct sk_buff *skb;
-	uint16_t packet_len;
-	uint32_t direct_link_flags;
-	uint8_t num_packets;
+	u16 packet_len;
+	u32 direct_link_flags;
+	u8 num_packets;
 	struct delayed_work delayed_work;
 	struct batadv_hard_iface *if_incoming;
 	struct batadv_hard_iface *if_outgoing;
@@ -1188,7 +1189,7 @@
  */
 struct batadv_dat_entry {
 	__be32 ip;
-	uint8_t mac_addr[ETH_ALEN];
+	u8 mac_addr[ETH_ALEN];
 	unsigned short vid;
 	unsigned long last_update;
 	struct hlist_node hash_entry;
@@ -1250,14 +1251,13 @@
 	struct hlist_node list;
 	void (*ogm_handler)(struct batadv_priv *bat_priv,
 			    struct batadv_orig_node *orig,
-			    uint8_t flags,
-			    void *tvlv_value, uint16_t tvlv_value_len);
+			    u8 flags, void *tvlv_value, u16 tvlv_value_len);
 	int (*unicast_handler)(struct batadv_priv *bat_priv,
-			       uint8_t *src, uint8_t *dst,
-			       void *tvlv_value, uint16_t tvlv_value_len);
-	uint8_t type;
-	uint8_t version;
-	uint8_t flags;
+			       u8 *src, u8 *dst,
+			       void *tvlv_value, u16 tvlv_value_len);
+	u8 type;
+	u8 version;
+	u8 flags;
 	atomic_t refcount;
 	struct rcu_head rcu;
 };
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 2fb7b30..131e79c 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -85,7 +85,7 @@
 
 static inline struct lowpan_dev *lowpan_dev(const struct net_device *netdev)
 {
-	return netdev_priv(netdev);
+	return (struct lowpan_dev *)lowpan_priv(netdev)->priv;
 }
 
 static inline void peer_add(struct lowpan_dev *dev, struct lowpan_peer *peer)
@@ -848,8 +848,9 @@
 	struct net_device *netdev;
 	int err = 0;
 
-	netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
-			      NET_NAME_UNKNOWN, netdev_setup);
+	netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev)),
+			      IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
+			      netdev_setup);
 	if (!netdev)
 		return -ENOMEM;
 
@@ -859,19 +860,7 @@
 	SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
 	SET_NETDEV_DEVTYPE(netdev, &bt_type);
 
-	err = register_netdev(netdev);
-	if (err < 0) {
-		BT_INFO("register_netdev failed %d", err);
-		free_netdev(netdev);
-		goto out;
-	}
-
-	BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
-	       netdev->ifindex, &chan->dst, chan->dst_type,
-	       &chan->src, chan->src_type);
-	set_bit(__LINK_STATE_PRESENT, &netdev->state);
-
-	*dev = netdev_priv(netdev);
+	*dev = lowpan_dev(netdev);
 	(*dev)->netdev = netdev;
 	(*dev)->hdev = chan->conn->hcon->hdev;
 	INIT_LIST_HEAD(&(*dev)->peers);
@@ -881,6 +870,23 @@
 	list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
 	spin_unlock(&devices_lock);
 
+	lowpan_netdev_setup(netdev, LOWPAN_LLTYPE_BTLE);
+
+	err = register_netdev(netdev);
+	if (err < 0) {
+		BT_INFO("register_netdev failed %d", err);
+		spin_lock(&devices_lock);
+		list_del_rcu(&(*dev)->list);
+		spin_unlock(&devices_lock);
+		free_netdev(netdev);
+		goto out;
+	}
+
+	BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
+	       netdev->ifindex, &chan->dst, chan->dst_type,
+	       &chan->src, chan->src_type);
+	set_bit(__LINK_STATE_PRESENT, &netdev->state);
+
 	return 0;
 
 out:
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index b8c794b..95d1a66 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -53,6 +53,11 @@
 
 source "net/bluetooth/hidp/Kconfig"
 
+config BT_HS
+	bool "Bluetooth High Speed (HS) features"
+	depends on BT_BREDR
+	default y
+
 config BT_LE
 	bool "Bluetooth Low Energy (LE) features"
 	depends on BT
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 29c12ae..2b15ae8 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -13,9 +13,10 @@
 
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
 	hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
-	a2mp.o amp.o ecc.o hci_request.o mgmt_util.o
+	ecc.o hci_request.o mgmt_util.o
 
 bluetooth-$(CONFIG_BT_BREDR) += sco.o
+bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
 bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
 bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
 
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 5a04eb1..5f123c3 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -16,6 +16,7 @@
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
 
+#include "hci_request.h"
 #include "a2mp.h"
 #include "amp.h"
 
@@ -286,11 +287,21 @@
 	return 0;
 }
 
+static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status,
+					 u16 opcode)
+{
+	BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+	a2mp_send_getinfo_rsp(hdev);
+}
+
 static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
 			    struct a2mp_cmd *hdr)
 {
 	struct a2mp_info_req *req  = (void *) skb->data;
 	struct hci_dev *hdev;
+	struct hci_request hreq;
+	int err = 0;
 
 	if (le16_to_cpu(hdr->len) < sizeof(*req))
 		return -EINVAL;
@@ -311,7 +322,11 @@
 	}
 
 	set_bit(READ_LOC_AMP_INFO, &mgr->state);
-	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+	hci_req_init(&hreq, hdev);
+	hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+	err = hci_req_run(&hreq, read_local_amp_info_complete);
+	if (err < 0)
+		a2mp_send_getinfo_rsp(hdev);
 
 done:
 	if (hdev)
diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
index 296f665..a4ff3ea 100644
--- a/net/bluetooth/a2mp.h
+++ b/net/bluetooth/a2mp.h
@@ -130,10 +130,29 @@
 #define A2MP_STATUS_SECURITY_VIOLATION		0x06
 
 struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
+
+#if IS_ENABLED(CONFIG_BT_HS)
 int amp_mgr_put(struct amp_mgr *mgr);
 struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
 				       struct sk_buff *skb);
 void a2mp_discover_amp(struct l2cap_chan *chan);
+#else
+static inline int amp_mgr_put(struct amp_mgr *mgr)
+{
+	return 0;
+}
+
+static inline struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+						     struct sk_buff *skb)
+{
+	return NULL;
+}
+
+static inline void a2mp_discover_amp(struct l2cap_chan *chan)
+{
+}
+#endif
+
 void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
 void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
 void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index ee016f0..e32f341 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -16,6 +16,7 @@
 #include <net/bluetooth/hci_core.h>
 #include <crypto/hash.h>
 
+#include "hci_request.h"
 #include "a2mp.h"
 #include "amp.h"
 
@@ -220,10 +221,49 @@
 	return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
 }
 
+static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+					  u16 opcode, struct sk_buff *skb)
+{
+	struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data;
+	struct amp_assoc *assoc = &hdev->loc_assoc;
+	size_t rem_len, frag_len;
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+	if (rp->status)
+		goto send_rsp;
+
+	frag_len = skb->len - sizeof(*rp);
+	rem_len = __le16_to_cpu(rp->rem_len);
+
+	if (rem_len > frag_len) {
+		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
+
+		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
+		assoc->offset += frag_len;
+
+		/* Read other fragments */
+		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
+
+		return;
+	}
+
+	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
+	assoc->len = assoc->offset + rem_len;
+	assoc->offset = 0;
+
+send_rsp:
+	/* Send A2MP Rsp when all fragments are received */
+	a2mp_send_getampassoc_rsp(hdev, rp->status);
+	a2mp_send_create_phy_link_req(hdev, rp->status);
+}
+
 void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
 {
 	struct hci_cp_read_local_amp_assoc cp;
 	struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+	struct hci_request req;
+	int err = 0;
 
 	BT_DBG("%s handle %d", hdev->name, phy_handle);
 
@@ -231,12 +271,18 @@
 	cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
 	cp.len_so_far = cpu_to_le16(loc_assoc->offset);
 
-	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+	hci_req_init(&req, hdev);
+	hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+	err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
+	if (err < 0)
+		a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
 }
 
 void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
 {
 	struct hci_cp_read_local_amp_assoc cp;
+	struct hci_request req;
+	int err = 0;
 
 	memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
 	memset(&cp, 0, sizeof(cp));
@@ -244,7 +290,11 @@
 	cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
 
 	set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
-	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+	hci_req_init(&req, hdev);
+	hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+	hci_req_run_skb(&req, read_local_amp_assoc_complete);
+	if (err < 0)
+		a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
 }
 
 void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
@@ -252,6 +302,8 @@
 {
 	struct hci_cp_read_local_amp_assoc cp;
 	struct amp_mgr *mgr = hcon->amp_mgr;
+	struct hci_request req;
+	int err = 0;
 
 	cp.phy_handle = hcon->handle;
 	cp.len_so_far = cpu_to_le16(0);
@@ -260,7 +312,25 @@
 	set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
 
 	/* Read Local AMP Assoc final link information data */
-	hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+	hci_req_init(&req, hdev);
+	hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+	hci_req_run_skb(&req, read_local_amp_assoc_complete);
+	if (err < 0)
+		a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
+}
+
+static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+					    u16 opcode, struct sk_buff *skb)
+{
+	struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data;
+
+	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
+	       hdev->name, rp->status, rp->phy_handle);
+
+	if (rp->status)
+		return;
+
+	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
 }
 
 /* Write AMP Assoc data fragments, returns true with last fragment written*/
@@ -270,6 +340,7 @@
 	struct hci_cp_write_remote_amp_assoc *cp;
 	struct amp_mgr *mgr = hcon->amp_mgr;
 	struct amp_ctrl *ctrl;
+	struct hci_request req;
 	u16 frag_len, len;
 
 	ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
@@ -307,7 +378,9 @@
 
 	amp_ctrl_put(ctrl);
 
-	hci_send_cmd(hdev, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
+	hci_req_init(&req, hdev);
+	hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
+	hci_req_run_skb(&req, write_remote_amp_assoc_complete);
 
 	kfree(cp);
 
@@ -344,10 +417,37 @@
 	amp_write_rem_assoc_frag(hdev, hcon);
 }
 
+static void create_phylink_complete(struct hci_dev *hdev, u8 status,
+				    u16 opcode)
+{
+	struct hci_cp_create_phy_link *cp;
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
+	if (!cp)
+		return;
+
+	hci_dev_lock(hdev);
+
+	if (status) {
+		struct hci_conn *hcon;
+
+		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
+		if (hcon)
+			hci_conn_del(hcon);
+	} else {
+		amp_write_remote_assoc(hdev, cp->phy_handle);
+	}
+
+	hci_dev_unlock(hdev);
+}
+
 void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
 			struct hci_conn *hcon)
 {
 	struct hci_cp_create_phy_link cp;
+	struct hci_request req;
 
 	cp.phy_handle = hcon->handle;
 
@@ -360,13 +460,33 @@
 		return;
 	}
 
-	hci_send_cmd(hdev, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+	hci_req_init(&req, hdev);
+	hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+	hci_req_run(&req, create_phylink_complete);
+}
+
+static void accept_phylink_complete(struct hci_dev *hdev, u8 status,
+				    u16 opcode)
+{
+	struct hci_cp_accept_phy_link *cp;
+
+	BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+	if (status)
+		return;
+
+	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
+	if (!cp)
+		return;
+
+	amp_write_remote_assoc(hdev, cp->phy_handle);
 }
 
 void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
 			struct hci_conn *hcon)
 {
 	struct hci_cp_accept_phy_link cp;
+	struct hci_request req;
 
 	cp.phy_handle = hcon->handle;
 
@@ -379,7 +499,9 @@
 		return;
 	}
 
-	hci_send_cmd(hdev, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+	hci_req_init(&req, hdev);
+	hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+	hci_req_run(&req, accept_phylink_complete);
 }
 
 void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h
index 7ea3db7..8848f81 100644
--- a/net/bluetooth/amp.h
+++ b/net/bluetooth/amp.h
@@ -44,6 +44,20 @@
 			struct hci_conn *hcon);
 void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
 			struct hci_conn *hcon);
+
+#if IS_ENABLED(CONFIG_BT_HS)
+void amp_create_logical_link(struct l2cap_chan *chan);
+void amp_disconnect_logical_link(struct hci_chan *hchan);
+#else
+static inline void amp_create_logical_link(struct l2cap_chan *chan)
+{
+}
+
+static inline void amp_disconnect_logical_link(struct hci_chan *hchan)
+{
+}
+#endif
+
 void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
 void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
 void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index b0c6c6a..9a503387 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -100,9 +100,9 @@
 static struct cmtp_application *cmtp_application_get(struct cmtp_session *session, int pattern, __u16 value)
 {
 	struct cmtp_application *app;
-	struct list_head *p, *n;
+	struct list_head *p;
 
-	list_for_each_safe(p, n, &session->applications) {
+	list_for_each(p, &session->applications) {
 		app = list_entry(p, struct cmtp_application, list);
 		switch (pattern) {
 		case CMTP_MSGNUM:
@@ -511,13 +511,13 @@
 	struct capi_ctr *ctrl = m->private;
 	struct cmtp_session *session = ctrl->driverdata;
 	struct cmtp_application *app;
-	struct list_head *p, *n;
+	struct list_head *p;
 
 	seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl));
 	seq_printf(m, "addr %s\n", session->name);
 	seq_printf(m, "ctrl %d\n", session->num);
 
-	list_for_each_safe(p, n, &session->applications) {
+	list_for_each(p, &session->applications) {
 		app = list_entry(p, struct cmtp_application, list);
 		seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
 	}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 2c48bf0..b4548c73 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -64,6 +64,48 @@
 	hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
 }
 
+/* This function requires the caller holds hdev->lock */
+static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
+{
+	struct hci_conn_params *params;
+	struct smp_irk *irk;
+	bdaddr_t *bdaddr;
+	u8 bdaddr_type;
+
+	bdaddr = &conn->dst;
+	bdaddr_type = conn->dst_type;
+
+	/* Check if we need to convert to identity address */
+	irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type);
+	if (irk) {
+		bdaddr = &irk->bdaddr;
+		bdaddr_type = irk->addr_type;
+	}
+
+	params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type);
+	if (!params)
+		return;
+
+	/* The connection attempt was doing scan for new RPA, and is
+	 * in scan phase. If params are not associated with any other
+	 * autoconnect action, remove them completely. If they are, just unmark
+	 * them as waiting for connection, by clearing explicit_connect field.
+	 */
+	if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT)
+		hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
+	else
+		params->explicit_connect = false;
+}
+
+/* This function requires the caller holds hdev->lock */
+static void hci_connect_le_scan_remove(struct hci_conn *conn)
+{
+	hci_connect_le_scan_cleanup(conn);
+
+	hci_conn_hash_del(conn->hdev, conn);
+	hci_update_background_scan(conn->hdev);
+}
+
 static void hci_acl_create_connection(struct hci_conn *conn)
 {
 	struct hci_dev *hdev = conn->hdev;
@@ -340,8 +382,12 @@
 		if (conn->out) {
 			if (conn->type == ACL_LINK)
 				hci_acl_create_connection_cancel(conn);
-			else if (conn->type == LE_LINK)
-				hci_le_create_connection_cancel(conn);
+			else if (conn->type == LE_LINK) {
+				if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+					hci_connect_le_scan_remove(conn);
+				else
+					hci_le_create_connection_cancel(conn);
+			}
 		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
 			hci_reject_sco(conn);
 		}
@@ -637,15 +683,18 @@
 {
 	struct hci_conn *conn;
 
-	if (status == 0)
-		return;
+	hci_dev_lock(hdev);
+
+	conn = hci_lookup_le_connect(hdev);
+
+	if (!status) {
+		hci_connect_le_scan_cleanup(conn);
+		goto done;
+	}
 
 	BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
 	       status);
 
-	hci_dev_lock(hdev);
-
-	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
 	if (!conn)
 		goto done;
 
@@ -685,6 +734,7 @@
 	hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
 
 	conn->state = BT_CONNECT;
+	clear_bit(HCI_CONN_SCANNING, &conn->flags);
 }
 
 static void hci_req_directed_advertising(struct hci_request *req,
@@ -728,7 +778,7 @@
 				u8 role)
 {
 	struct hci_conn_params *params;
-	struct hci_conn *conn;
+	struct hci_conn *conn, *conn_unfinished;
 	struct smp_irk *irk;
 	struct hci_request req;
 	int err;
@@ -751,26 +801,29 @@
 	 * and return the object found.
 	 */
 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+	conn_unfinished = NULL;
 	if (conn) {
-		conn->pending_sec_level = sec_level;
-		goto done;
+		if (conn->state == BT_CONNECT &&
+		    test_bit(HCI_CONN_SCANNING, &conn->flags)) {
+			BT_DBG("will continue unfinished conn %pMR", dst);
+			conn_unfinished = conn;
+		} else {
+			if (conn->pending_sec_level < sec_level)
+				conn->pending_sec_level = sec_level;
+			goto done;
+		}
 	}
 
 	/* Since the controller supports only one LE connection attempt at a
 	 * time, we return -EBUSY if there is any connection attempt running.
 	 */
-	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-	if (conn)
+	if (hci_lookup_le_connect(hdev))
 		return ERR_PTR(-EBUSY);
 
 	/* When given an identity address with existing identity
 	 * resolving key, the connection needs to be established
 	 * to a resolvable random address.
 	 *
-	 * This uses the cached random resolvable address from
-	 * a previous scan. When no cached address is available,
-	 * try connecting to the identity address instead.
-	 *
 	 * Storing the resolvable random address is required here
 	 * to handle connection failures. The address will later
 	 * be resolved back into the original identity address
@@ -782,15 +835,23 @@
 		dst_type = ADDR_LE_DEV_RANDOM;
 	}
 
-	conn = hci_conn_add(hdev, LE_LINK, dst, role);
+	if (conn_unfinished) {
+		conn = conn_unfinished;
+		bacpy(&conn->dst, dst);
+	} else {
+		conn = hci_conn_add(hdev, LE_LINK, dst, role);
+	}
+
 	if (!conn)
 		return ERR_PTR(-ENOMEM);
 
 	conn->dst_type = dst_type;
 	conn->sec_level = BT_SECURITY_LOW;
-	conn->pending_sec_level = sec_level;
 	conn->conn_timeout = conn_timeout;
 
+	if (!conn_unfinished)
+		conn->pending_sec_level = sec_level;
+
 	hci_req_init(&req, hdev);
 
 	/* Disable advertising if we're active. For master role
@@ -855,6 +916,144 @@
 	}
 
 done:
+	/* If this is continuation of connect started by hci_connect_le_scan,
+	 * it already called hci_conn_hold and calling it again would mess the
+	 * counter.
+	 */
+	if (!conn_unfinished)
+		hci_conn_hold(conn);
+
+	return conn;
+}
+
+static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
+					 u16 opcode)
+{
+	struct hci_conn *conn;
+
+	if (!status)
+		return;
+
+	BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
+	       status);
+
+	hci_dev_lock(hdev);
+
+	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+	if (conn)
+		hci_le_conn_failed(conn, status);
+
+	hci_dev_unlock(hdev);
+}
+
+static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
+{
+	struct hci_conn *conn;
+
+	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
+	if (!conn)
+		return false;
+
+	if (conn->dst_type != type)
+		return false;
+
+	if (conn->state != BT_CONNECTED)
+		return false;
+
+	return true;
+}
+
+/* This function requires the caller holds hdev->lock */
+static int hci_explicit_conn_params_set(struct hci_request *req,
+					bdaddr_t *addr, u8 addr_type)
+{
+	struct hci_dev *hdev = req->hdev;
+	struct hci_conn_params *params;
+
+	if (is_connected(hdev, addr, addr_type))
+		return -EISCONN;
+
+	params = hci_conn_params_add(hdev, addr, addr_type);
+	if (!params)
+		return -EIO;
+
+	/* If we created new params, or existing params were marked as disabled,
+	 * mark them to be used just once to connect.
+	 */
+	if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
+		params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+		list_del_init(&params->action);
+		list_add(&params->action, &hdev->pend_le_conns);
+	}
+
+	params->explicit_connect = true;
+	__hci_update_background_scan(req);
+
+	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
+	       params->auto_connect);
+
+	return 0;
+}
+
+/* This function requires the caller holds hdev->lock */
+struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
+				     u8 dst_type, u8 sec_level,
+				     u16 conn_timeout, u8 role)
+{
+	struct hci_conn *conn;
+	struct hci_request req;
+	int err;
+
+	/* Let's make sure that le is enabled.*/
+	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
+		if (lmp_le_capable(hdev))
+			return ERR_PTR(-ECONNREFUSED);
+
+		return ERR_PTR(-EOPNOTSUPP);
+	}
+
+	/* Some devices send ATT messages as soon as the physical link is
+	 * established. To be able to handle these ATT messages, the user-
+	 * space first establishes the connection and then starts the pairing
+	 * process.
+	 *
+	 * So if a hci_conn object already exists for the following connection
+	 * attempt, we simply update pending_sec_level and auth_type fields
+	 * and return the object found.
+	 */
+	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+	if (conn) {
+		if (conn->pending_sec_level < sec_level)
+			conn->pending_sec_level = sec_level;
+		goto done;
+	}
+
+	BT_DBG("requesting refresh of dst_addr");
+
+	conn = hci_conn_add(hdev, LE_LINK, dst, role);
+	if (!conn)
+		return ERR_PTR(-ENOMEM);
+
+	hci_req_init(&req, hdev);
+
+	if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
+		return ERR_PTR(-EBUSY);
+
+	conn->state = BT_CONNECT;
+	set_bit(HCI_CONN_SCANNING, &conn->flags);
+
+	err = hci_req_run(&req, hci_connect_le_scan_complete);
+	if (err && err != -ENODATA) {
+		hci_conn_del(conn);
+		return ERR_PTR(err);
+	}
+
+	conn->dst_type = dst_type;
+	conn->sec_level = BT_SECURITY_LOW;
+	conn->pending_sec_level = sec_level;
+	conn->conn_timeout = conn_timeout;
+
+done:
 	hci_conn_hold(conn);
 	return conn;
 }
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2f8fb33..adcbc74 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2822,10 +2822,6 @@
 {
 	struct hci_conn_params *params;
 
-	/* The conn params list only contains identity addresses */
-	if (!hci_is_identity_address(addr, addr_type))
-		return NULL;
-
 	list_for_each_entry(params, &hdev->le_conn_params, list) {
 		if (bacmp(&params->addr, addr) == 0 &&
 		    params->addr_type == addr_type) {
@@ -2842,10 +2838,6 @@
 {
 	struct hci_conn_params *param;
 
-	/* The list only contains identity addresses */
-	if (!hci_is_identity_address(addr, addr_type))
-		return NULL;
-
 	list_for_each_entry(param, list, action) {
 		if (bacmp(&param->addr, addr) == 0 &&
 		    param->addr_type == addr_type)
@@ -2856,14 +2848,35 @@
 }
 
 /* This function requires the caller holds hdev->lock */
+struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
+						    bdaddr_t *addr,
+						    u8 addr_type)
+{
+	struct hci_conn_params *param;
+
+	list_for_each_entry(param, &hdev->pend_le_conns, action) {
+		if (bacmp(&param->addr, addr) == 0 &&
+		    param->addr_type == addr_type &&
+		    param->explicit_connect)
+			return param;
+	}
+
+	list_for_each_entry(param, &hdev->pend_le_reports, action) {
+		if (bacmp(&param->addr, addr) == 0 &&
+		    param->addr_type == addr_type &&
+		    param->explicit_connect)
+			return param;
+	}
+
+	return NULL;
+}
+
+/* This function requires the caller holds hdev->lock */
 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
 					    bdaddr_t *addr, u8 addr_type)
 {
 	struct hci_conn_params *params;
 
-	if (!hci_is_identity_address(addr, addr_type))
-		return NULL;
-
 	params = hci_conn_params_lookup(hdev, addr, addr_type);
 	if (params)
 		return params;
@@ -2927,6 +2940,15 @@
 	list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
 		if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
 			continue;
+
+		/* If trying to estabilish one time connection to disabled
+		 * device, leave the params, but mark them as just once.
+		 */
+		if (params->explicit_connect) {
+			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
+			continue;
+		}
+
 		list_del(&params->list);
 		kfree(params);
 	}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 32363c2..1860418 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -823,7 +823,7 @@
 	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
 	if (rp->status)
-		goto a2mp_rsp;
+		return;
 
 	hdev->amp_status = rp->amp_status;
 	hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
@@ -835,46 +835,6 @@
 	hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
 	hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
 	hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
-
-a2mp_rsp:
-	a2mp_send_getinfo_rsp(hdev);
-}
-
-static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
-					struct sk_buff *skb)
-{
-	struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
-	struct amp_assoc *assoc = &hdev->loc_assoc;
-	size_t rem_len, frag_len;
-
-	BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
-	if (rp->status)
-		goto a2mp_rsp;
-
-	frag_len = skb->len - sizeof(*rp);
-	rem_len = __le16_to_cpu(rp->rem_len);
-
-	if (rem_len > frag_len) {
-		BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
-
-		memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
-		assoc->offset += frag_len;
-
-		/* Read other fragments */
-		amp_read_loc_assoc_frag(hdev, rp->phy_handle);
-
-		return;
-	}
-
-	memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
-	assoc->len = assoc->offset + rem_len;
-	assoc->offset = 0;
-
-a2mp_rsp:
-	/* Send A2MP Rsp when all fragments are received */
-	a2mp_send_getampassoc_rsp(hdev, rp->status);
-	a2mp_send_create_phy_link_req(hdev, rp->status);
 }
 
 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
@@ -1099,7 +1059,7 @@
 
 		hci_dev_set_flag(hdev, HCI_LE_ADV);
 
-		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+		conn = hci_lookup_le_connect(hdev);
 		if (conn)
 			queue_delayed_work(hdev->workqueue,
 					   &conn->le_conn_timeout,
@@ -1409,20 +1369,6 @@
 	hci_dev_unlock(hdev);
 }
 
-static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
-					  struct sk_buff *skb)
-{
-	struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
-
-	BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
-	       hdev->name, rp->status, rp->phy_handle);
-
-	if (rp->status)
-		return;
-
-	amp_write_rem_assoc_continue(hdev, rp->phy_handle);
-}
-
 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
 {
 	struct hci_rp_read_rssi *rp = (void *) skb->data;
@@ -1944,47 +1890,6 @@
 	hci_dev_unlock(hdev);
 }
 
-static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
-{
-	struct hci_cp_create_phy_link *cp;
-
-	BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
-	cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
-	if (!cp)
-		return;
-
-	hci_dev_lock(hdev);
-
-	if (status) {
-		struct hci_conn *hcon;
-
-		hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
-		if (hcon)
-			hci_conn_del(hcon);
-	} else {
-		amp_write_remote_assoc(hdev, cp->phy_handle);
-	}
-
-	hci_dev_unlock(hdev);
-}
-
-static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
-{
-	struct hci_cp_accept_phy_link *cp;
-
-	BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
-	if (status)
-		return;
-
-	cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
-	if (!cp)
-		return;
-
-	amp_write_remote_assoc(hdev, cp->phy_handle);
-}
-
 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
 {
 	struct hci_cp_le_create_conn *cp;
@@ -2998,10 +2903,6 @@
 		hci_cc_read_clock(hdev, skb);
 		break;
 
-	case HCI_OP_READ_LOCAL_AMP_ASSOC:
-		hci_cc_read_local_amp_assoc(hdev, skb);
-		break;
-
 	case HCI_OP_READ_INQ_RSP_TX_POWER:
 		hci_cc_read_inq_rsp_tx_power(hdev, skb);
 		break;
@@ -3106,10 +3007,6 @@
 		hci_cc_set_adv_param(hdev, skb);
 		break;
 
-	case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
-		hci_cc_write_remote_amp_assoc(hdev, skb);
-		break;
-
 	case HCI_OP_READ_RSSI:
 		hci_cc_read_rssi(hdev, skb);
 		break;
@@ -3193,14 +3090,6 @@
 		hci_cs_setup_sync_conn(hdev, ev->status);
 		break;
 
-	case HCI_OP_CREATE_PHY_LINK:
-		hci_cs_create_phylink(hdev, ev->status);
-		break;
-
-	case HCI_OP_ACCEPT_PHY_LINK:
-		hci_cs_accept_phylink(hdev, ev->status);
-		break;
-
 	case HCI_OP_SNIFF_MODE:
 		hci_cs_sniff_mode(hdev, ev->status);
 		break;
@@ -3837,17 +3726,25 @@
 		if (ev->link_type == ESCO_LINK)
 			goto unlock;
 
+		/* When the link type in the event indicates SCO connection
+		 * and lookup of the connection object fails, then check
+		 * if an eSCO connection object exists.
+		 *
+		 * The core limits the synchronous connections to either
+		 * SCO or eSCO. The eSCO connection is preferred and tried
+		 * to be setup first and until successfully established,
+		 * the link type will be hinted as eSCO.
+		 */
 		conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
 		if (!conn)
 			goto unlock;
-
-		conn->type = SCO_LINK;
 	}
 
 	switch (ev->status) {
 	case 0x00:
 		conn->handle = __le16_to_cpu(ev->handle);
 		conn->state  = BT_CONNECTED;
+		conn->type   = ev->link_type;
 
 		hci_debugfs_create_conn(conn);
 		hci_conn_add_sysfs(conn);
@@ -4399,6 +4296,23 @@
 	hci_dev_unlock(hdev);
 }
 
+#if IS_ENABLED(CONFIG_BT_HS)
+static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+	struct hci_ev_channel_selected *ev = (void *)skb->data;
+	struct hci_conn *hcon;
+
+	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
+
+	skb_pull(skb, sizeof(*ev));
+
+	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+	if (!hcon)
+		return;
+
+	amp_read_loc_assoc_final_data(hdev, hcon);
+}
+
 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
 				      struct sk_buff *skb)
 {
@@ -4522,6 +4436,7 @@
 
 	hci_dev_unlock(hdev);
 }
+#endif
 
 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
@@ -4540,7 +4455,7 @@
 	 */
 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
 
-	conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+	conn = hci_lookup_le_connect(hdev);
 	if (!conn) {
 		conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
 		if (!conn) {
@@ -4733,42 +4648,49 @@
 	/* If we're not connectable only connect devices that we have in
 	 * our pend_le_conns list.
 	 */
-	params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
-					   addr, addr_type);
+	params = hci_explicit_connect_lookup(hdev, addr, addr_type);
+
 	if (!params)
 		return NULL;
 
-	switch (params->auto_connect) {
-	case HCI_AUTO_CONN_DIRECT:
-		/* Only devices advertising with ADV_DIRECT_IND are
-		 * triggering a connection attempt. This is allowing
-		 * incoming connections from slave devices.
-		 */
-		if (adv_type != LE_ADV_DIRECT_IND)
+	if (!params->explicit_connect) {
+		switch (params->auto_connect) {
+		case HCI_AUTO_CONN_DIRECT:
+			/* Only devices advertising with ADV_DIRECT_IND are
+			 * triggering a connection attempt. This is allowing
+			 * incoming connections from slave devices.
+			 */
+			if (adv_type != LE_ADV_DIRECT_IND)
+				return NULL;
+			break;
+		case HCI_AUTO_CONN_ALWAYS:
+			/* Devices advertising with ADV_IND or ADV_DIRECT_IND
+			 * are triggering a connection attempt. This means
+			 * that incoming connectioms from slave device are
+			 * accepted and also outgoing connections to slave
+			 * devices are established when found.
+			 */
+			break;
+		default:
 			return NULL;
-		break;
-	case HCI_AUTO_CONN_ALWAYS:
-		/* Devices advertising with ADV_IND or ADV_DIRECT_IND
-		 * are triggering a connection attempt. This means
-		 * that incoming connectioms from slave device are
-		 * accepted and also outgoing connections to slave
-		 * devices are established when found.
-		 */
-		break;
-	default:
-		return NULL;
+		}
 	}
 
 	conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
 			      HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
 	if (!IS_ERR(conn)) {
-		/* Store the pointer since we don't really have any
+		/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
+		 * by higher layer that tried to connect, if no then
+		 * store the pointer since we don't really have any
 		 * other owner of the object besides the params that
 		 * triggered it. This way we can abort the connection if
 		 * the parameters get removed and keep the reference
 		 * count consistent once the connection is established.
 		 */
-		params->conn = hci_conn_get(conn);
+
+		if (!params->explicit_connect)
+			params->conn = hci_conn_get(conn);
+
 		return conn;
 	}
 
@@ -5206,22 +5128,6 @@
 	}
 }
 
-static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
-	struct hci_ev_channel_selected *ev = (void *) skb->data;
-	struct hci_conn *hcon;
-
-	BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
-
-	skb_pull(skb, sizeof(*ev));
-
-	hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
-	if (!hcon)
-		return;
-
-	amp_read_loc_assoc_final_data(hdev, hcon);
-}
-
 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
 				 u8 event, struct sk_buff *skb)
 {
@@ -5442,14 +5348,15 @@
 		hci_le_meta_evt(hdev, skb);
 		break;
 
-	case HCI_EV_CHANNEL_SELECTED:
-		hci_chan_selected_evt(hdev, skb);
-		break;
-
 	case HCI_EV_REMOTE_OOB_DATA_REQUEST:
 		hci_remote_oob_data_request_evt(hdev, skb);
 		break;
 
+#if IS_ENABLED(CONFIG_BT_HS)
+	case HCI_EV_CHANNEL_SELECTED:
+		hci_chan_selected_evt(hdev, skb);
+		break;
+
 	case HCI_EV_PHY_LINK_COMPLETE:
 		hci_phy_link_complete_evt(hdev, skb);
 		break;
@@ -5465,6 +5372,7 @@
 	case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
 		hci_disconn_phylink_complete_evt(hdev, skb);
 		break;
+#endif
 
 	case HCI_EV_NUM_COMP_BLOCKS:
 		hci_num_comp_blocks_evt(hdev, skb);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index d6025d6..b736922 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -317,7 +317,7 @@
 	 * address be updated at the next cycle.
 	 */
 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
-	    hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+	    hci_lookup_le_connect(hdev)) {
 		BT_DBG("Deferring random address update");
 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
 		return;
@@ -479,7 +479,6 @@
 void __hci_update_background_scan(struct hci_request *req)
 {
 	struct hci_dev *hdev = req->hdev;
-	struct hci_conn *conn;
 
 	if (!test_bit(HCI_UP, &hdev->flags) ||
 	    test_bit(HCI_INIT, &hdev->flags) ||
@@ -529,8 +528,7 @@
 		 * since some controllers are not able to scan and connect at
 		 * the same time.
 		 */
-		conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-		if (conn)
+		if (hci_lookup_le_connect(hdev))
 			return;
 
 		/* If controller is currently scanning, we stop it to ensure we
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 45fffa4..7c65ee2 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7113,8 +7113,10 @@
 		else
 			role = HCI_ROLE_MASTER;
 
-		hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
-				      HCI_LE_CONN_TIMEOUT, role);
+		hcon = hci_connect_le_scan(hdev, dst, dst_type,
+					   chan->sec_level,
+					   HCI_LE_CONN_TIMEOUT,
+					   role);
 	} else {
 		u8 auth_type = l2cap_get_auth_type(chan);
 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 2442877..586b3d5 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1054,18 +1054,23 @@
 	sock_put(sk);
 }
 
-static int __l2cap_wait_ack(struct sock *sk)
+static int __l2cap_wait_ack(struct sock *sk, struct l2cap_chan *chan)
 {
-	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
 	DECLARE_WAITQUEUE(wait, current);
 	int err = 0;
-	int timeo = HZ/5;
+	int timeo = L2CAP_WAIT_ACK_POLL_PERIOD;
+	/* Timeout to prevent infinite loop */
+	unsigned long timeout = jiffies + L2CAP_WAIT_ACK_TIMEOUT;
 
 	add_wait_queue(sk_sleep(sk), &wait);
 	set_current_state(TASK_INTERRUPTIBLE);
-	while (chan->unacked_frames > 0 && chan->conn) {
+	do {
+		BT_DBG("Waiting for %d ACKs, timeout %04d ms",
+		       chan->unacked_frames, time_after(jiffies, timeout) ? 0 :
+		       jiffies_to_msecs(timeout - jiffies));
+
 		if (!timeo)
-			timeo = HZ/5;
+			timeo = L2CAP_WAIT_ACK_POLL_PERIOD;
 
 		if (signal_pending(current)) {
 			err = sock_intr_errno(timeo);
@@ -1080,7 +1085,15 @@
 		err = sock_error(sk);
 		if (err)
 			break;
-	}
+
+		if (time_after(jiffies, timeout)) {
+			err = -ENOLINK;
+			break;
+		}
+
+	} while (chan->unacked_frames > 0 &&
+		 chan->state == BT_CONNECTED);
+
 	set_current_state(TASK_RUNNING);
 	remove_wait_queue(sk_sleep(sk), &wait);
 	return err;
@@ -1098,7 +1111,12 @@
 	if (!sk)
 		return 0;
 
+	/* prevent sk structure from being freed whilst unlocked */
+	sock_hold(sk);
+
 	chan = l2cap_pi(sk)->chan;
+	/* prevent chan structure from being freed whilst unlocked */
+	l2cap_chan_hold(chan);
 	conn = chan->conn;
 
 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
@@ -1110,8 +1128,10 @@
 	lock_sock(sk);
 
 	if (!sk->sk_shutdown) {
-		if (chan->mode == L2CAP_MODE_ERTM)
-			err = __l2cap_wait_ack(sk);
+		if (chan->mode == L2CAP_MODE_ERTM &&
+		    chan->unacked_frames > 0 &&
+		    chan->state == BT_CONNECTED)
+			err = __l2cap_wait_ack(sk, chan);
 
 		sk->sk_shutdown = SHUTDOWN_MASK;
 
@@ -1134,6 +1154,11 @@
 	if (conn)
 		mutex_unlock(&conn->chan_lock);
 
+	l2cap_chan_put(chan);
+	sock_put(sk);
+
+	BT_DBG("err: %d", err);
+
 	return err;
 }
 
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 92720f3..ccaf5a4 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -3564,9 +3564,10 @@
 		 */
 		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
 
-		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
-				      sec_level, HCI_LE_CONN_TIMEOUT,
-				      HCI_ROLE_MASTER);
+		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
+					   addr_type, sec_level,
+					   HCI_LE_CONN_TIMEOUT,
+					   HCI_ROLE_MASTER);
 	}
 
 	if (IS_ERR(conn)) {
@@ -4210,7 +4211,7 @@
 		/* Don't let discovery abort an outgoing connection attempt
 		 * that's using directed advertising.
 		 */
-		if (hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
+		if (hci_lookup_le_connect(hdev)) {
 			*status = MGMT_STATUS_REJECTED;
 			return false;
 		}
@@ -6107,6 +6108,12 @@
 	switch (auto_connect) {
 	case HCI_AUTO_CONN_DISABLED:
 	case HCI_AUTO_CONN_LINK_LOSS:
+		/* If auto connect is being disabled when we're trying to
+		 * connect to device, keep connecting.
+		 */
+		if (params->explicit_connect)
+			list_add(&params->action, &hdev->pend_le_conns);
+
 		__hci_update_background_scan(req);
 		break;
 	case HCI_AUTO_CONN_REPORT:
@@ -6226,6 +6233,17 @@
 	else
 		auto_conn = HCI_AUTO_CONN_REPORT;
 
+	/* Kernel internally uses conn_params with resolvable private
+	 * address, but Add Device allows only identity addresses.
+	 * Make sure it is enforced before calling
+	 * hci_conn_params_lookup.
+	 */
+	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
+		err = cmd->cmd_complete(cmd, MGMT_STATUS_INVALID_PARAMS);
+		mgmt_pending_remove(cmd);
+		goto unlock;
+	}
+
 	/* If the connection parameters don't exist for this device,
 	 * they will be created and configured with defaults.
 	 */
@@ -6340,6 +6358,18 @@
 		else
 			addr_type = ADDR_LE_DEV_RANDOM;
 
+		/* Kernel internally uses conn_params with resolvable private
+		 * address, but Remove Device allows only identity addresses.
+		 * Make sure it is enforced before calling
+		 * hci_conn_params_lookup.
+		 */
+		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
+			err = cmd->cmd_complete(cmd,
+						MGMT_STATUS_INVALID_PARAMS);
+			mgmt_pending_remove(cmd);
+			goto unlock;
+		}
+
 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
 						addr_type);
 		if (!params) {
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 688a040..f315c8d 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -154,13 +154,13 @@
 	sock_set_flag(sk, SOCK_ZAPPED);
 }
 
-static int sco_conn_del(struct hci_conn *hcon, int err)
+static void sco_conn_del(struct hci_conn *hcon, int err)
 {
 	struct sco_conn *conn = hcon->sco_data;
 	struct sock *sk;
 
 	if (!conn)
-		return 0;
+		return;
 
 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
 
@@ -179,7 +179,6 @@
 
 	hcon->sco_data = NULL;
 	kfree(conn);
-	return 0;
 }
 
 static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 4ff77a1..6ed2feb 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -339,6 +339,7 @@
 	.ndo_bridge_getlink	 = br_getlink,
 	.ndo_bridge_setlink	 = br_setlink,
 	.ndo_bridge_dellink	 = br_dellink,
+	.ndo_features_check	 = passthru_features_check,
 };
 
 static void br_dev_free(struct net_device *dev)
@@ -364,8 +365,7 @@
 	dev->destructor = br_dev_free;
 	dev->ethtool_ops = &br_ethtool_ops;
 	SET_NETDEV_DEVTYPE(dev, &br_type);
-	dev->tx_queue_len = 0;
-	dev->priv_flags = IFF_EBRIDGE;
+	dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
 
 	dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index a538cb1..45e4757 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -281,6 +281,7 @@
 	br_fdb_delete_by_port(br, NULL, 0, 1);
 
 	br_vlan_flush(br);
+	br_multicast_dev_del(br);
 	del_timer_sync(&br->gc_timer);
 
 	br_sysfs_delbr(br->dev);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index c943219..d747275 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -85,6 +85,7 @@
 					memset(&e, 0, sizeof(e));
 					e.ifindex = port->dev->ifindex;
 					e.state = p->state;
+					e.vid = p->addr.vid;
 					if (p->addr.proto == htons(ETH_P_IP))
 						e.addr.u.ip4 = p->addr.u.ip4;
 #if IS_ENABLED(CONFIG_IPV6)
@@ -230,7 +231,7 @@
 }
 
 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-		   struct br_ip *group, int type)
+		   struct br_ip *group, int type, u8 state)
 {
 	struct br_mdb_entry entry;
 
@@ -241,9 +242,78 @@
 #if IS_ENABLED(CONFIG_IPV6)
 	entry.addr.u.ip6 = group->u.ip6;
 #endif
+	entry.state = state;
+	entry.vid = group->vid;
 	__br_mdb_notify(dev, &entry, type);
 }
 
+static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
+				   struct net_device *dev,
+				   int ifindex, u32 pid,
+				   u32 seq, int type, unsigned int flags)
+{
+	struct br_port_msg *bpm;
+	struct nlmsghdr *nlh;
+	struct nlattr *nest;
+
+	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+	if (!nlh)
+		return -EMSGSIZE;
+
+	bpm = nlmsg_data(nlh);
+	memset(bpm, 0, sizeof(*bpm));
+	bpm->family = AF_BRIDGE;
+	bpm->ifindex = dev->ifindex;
+	nest = nla_nest_start(skb, MDBA_ROUTER);
+	if (!nest)
+		goto cancel;
+
+	if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
+		goto end;
+
+	nla_nest_end(skb, nest);
+	nlmsg_end(skb, nlh);
+	return 0;
+
+end:
+	nla_nest_end(skb, nest);
+cancel:
+	nlmsg_cancel(skb, nlh);
+	return -EMSGSIZE;
+}
+
+static inline size_t rtnl_rtr_nlmsg_size(void)
+{
+	return NLMSG_ALIGN(sizeof(struct br_port_msg))
+		+ nla_total_size(sizeof(__u32));
+}
+
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+		   int type)
+{
+	struct net *net = dev_net(dev);
+	struct sk_buff *skb;
+	int err = -ENOBUFS;
+	int ifindex;
+
+	ifindex = port ? port->dev->ifindex : 0;
+	skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
+	if (!skb)
+		goto errout;
+
+	err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
+	if (err < 0) {
+		kfree_skb(skb);
+		goto errout;
+	}
+
+	rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
+	return;
+
+errout:
+	rtnl_set_sk_err(net, RTNLGRP_MDB, err);
+}
+
 static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
 {
 	if (entry->ifindex == 0)
@@ -263,6 +333,8 @@
 		return false;
 	if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
 		return false;
+	if (entry->vid >= VLAN_VID_MASK)
+		return false;
 
 	return true;
 }
@@ -374,6 +446,7 @@
 		return -EINVAL;
 
 	memset(&ip, 0, sizeof(ip));
+	ip.vid = entry->vid;
 	ip.proto = entry->addr.proto;
 	if (ip.proto == htons(ETH_P_IP))
 		ip.u.ip4 = entry->addr.u.ip4;
@@ -391,8 +464,11 @@
 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
 	struct net *net = sock_net(skb->sk);
+	unsigned short vid = VLAN_N_VID;
+	struct net_device *dev, *pdev;
 	struct br_mdb_entry *entry;
-	struct net_device *dev;
+	struct net_bridge_port *p;
+	struct net_port_vlans *pv;
 	struct net_bridge *br;
 	int err;
 
@@ -402,9 +478,32 @@
 
 	br = netdev_priv(dev);
 
-	err = __br_mdb_add(net, br, entry);
-	if (!err)
-		__br_mdb_notify(dev, entry, RTM_NEWMDB);
+	/* If vlan filtering is enabled and VLAN is not specified
+	 * install mdb entry on all vlans configured on the port.
+	 */
+	pdev = __dev_get_by_index(net, entry->ifindex);
+	if (!pdev)
+		return -ENODEV;
+
+	p = br_port_get_rtnl(pdev);
+	if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+		return -EINVAL;
+
+	pv = nbp_get_vlan_info(p);
+	if (br_vlan_enabled(br) && pv && entry->vid == 0) {
+		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+			entry->vid = vid;
+			err = __br_mdb_add(net, br, entry);
+			if (err)
+				break;
+			__br_mdb_notify(dev, entry, RTM_NEWMDB);
+		}
+	} else {
+		err = __br_mdb_add(net, br, entry);
+		if (!err)
+			__br_mdb_notify(dev, entry, RTM_NEWMDB);
+	}
+
 	return err;
 }
 
@@ -421,6 +520,7 @@
 		return -EINVAL;
 
 	memset(&ip, 0, sizeof(ip));
+	ip.vid = entry->vid;
 	ip.proto = entry->addr.proto;
 	if (ip.proto == htons(ETH_P_IP))
 		ip.u.ip4 = entry->addr.u.ip4;
@@ -465,8 +565,12 @@
 
 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
-	struct net_device *dev;
+	struct net *net = sock_net(skb->sk);
+	unsigned short vid = VLAN_N_VID;
+	struct net_device *dev, *pdev;
 	struct br_mdb_entry *entry;
+	struct net_bridge_port *p;
+	struct net_port_vlans *pv;
 	struct net_bridge *br;
 	int err;
 
@@ -476,9 +580,31 @@
 
 	br = netdev_priv(dev);
 
-	err = __br_mdb_del(br, entry);
-	if (!err)
-		__br_mdb_notify(dev, entry, RTM_DELMDB);
+	/* If vlan filtering is enabled and VLAN is not specified
+	 * delete mdb entry on all vlans configured on the port.
+	 */
+	pdev = __dev_get_by_index(net, entry->ifindex);
+	if (!pdev)
+		return -ENODEV;
+
+	p = br_port_get_rtnl(pdev);
+	if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+		return -EINVAL;
+
+	pv = nbp_get_vlan_info(p);
+	if (br_vlan_enabled(br) && pv && entry->vid == 0) {
+		for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
+			entry->vid = vid;
+			err = __br_mdb_del(br, entry);
+			if (!err)
+				__br_mdb_notify(dev, entry, RTM_DELMDB);
+		}
+	} else {
+		err = __br_mdb_del(br, entry);
+		if (!err)
+			__br_mdb_notify(dev, entry, RTM_DELMDB);
+	}
+
 	return err;
 }
 
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 1285eaf..66efdc2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -283,6 +283,8 @@
 		rcu_assign_pointer(*pp, p->next);
 		hlist_del_init(&p->mglist);
 		del_timer(&p->timer);
+		br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
+			      p->state);
 		call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
 		if (!mp->ports && !mp->mglist &&
@@ -704,7 +706,7 @@
 	if (unlikely(!p))
 		goto err;
 	rcu_assign_pointer(*pp, p);
-	br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
+	br_mdb_notify(br->dev, port, group, RTM_NEWMDB, MDB_TEMPORARY);
 
 found:
 	mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -764,6 +766,7 @@
 		goto out;
 
 	hlist_del_init_rcu(&port->rlist);
+	br_rtr_notify(br->dev, port, RTM_DELMDB);
 
 out:
 	spin_unlock(&br->multicast_lock);
@@ -924,6 +927,15 @@
 
 void br_multicast_del_port(struct net_bridge_port *port)
 {
+	struct net_bridge *br = port->br;
+	struct net_bridge_port_group *pg;
+	struct hlist_node *n;
+
+	/* Take care of the remaining groups, only perm ones should be left */
+	spin_lock_bh(&br->multicast_lock);
+	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
+		br_multicast_del_pg(br, pg);
+	spin_unlock_bh(&br->multicast_lock);
 	del_timer_sync(&port->multicast_router_timer);
 }
 
@@ -963,10 +975,13 @@
 
 	spin_lock(&br->multicast_lock);
 	hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
-		br_multicast_del_pg(br, pg);
+		if (pg->state == MDB_TEMPORARY)
+			br_multicast_del_pg(br, pg);
 
-	if (!hlist_unhashed(&port->rlist))
+	if (!hlist_unhashed(&port->rlist)) {
 		hlist_del_init_rcu(&port->rlist);
+		br_rtr_notify(br->dev, port, RTM_DELMDB);
+	}
 	del_timer(&port->multicast_router_timer);
 	del_timer(&port->ip4_own_query.timer);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -1204,6 +1219,7 @@
 		hlist_add_behind_rcu(&port->rlist, slot);
 	else
 		hlist_add_head_rcu(&port->rlist, &br->router_list);
+	br_rtr_notify(br->dev, port, RTM_NEWMDB);
 }
 
 static void br_multicast_mark_router(struct net_bridge *br,
@@ -1437,7 +1453,8 @@
 			hlist_del_init(&p->mglist);
 			del_timer(&p->timer);
 			call_rcu_bh(&p->rcu, br_multicast_free_pg);
-			br_mdb_notify(br->dev, port, group, RTM_DELMDB);
+			br_mdb_notify(br->dev, port, group, RTM_DELMDB,
+				      p->state);
 
 			if (!mp->ports && !mp->mglist &&
 			    netif_running(br->dev))
@@ -1754,12 +1771,6 @@
 
 void br_multicast_stop(struct net_bridge *br)
 {
-	struct net_bridge_mdb_htable *mdb;
-	struct net_bridge_mdb_entry *mp;
-	struct hlist_node *n;
-	u32 ver;
-	int i;
-
 	del_timer_sync(&br->multicast_router_timer);
 	del_timer_sync(&br->ip4_other_query.timer);
 	del_timer_sync(&br->ip4_own_query.timer);
@@ -1767,6 +1778,15 @@
 	del_timer_sync(&br->ip6_other_query.timer);
 	del_timer_sync(&br->ip6_own_query.timer);
 #endif
+}
+
+void br_multicast_dev_del(struct net_bridge *br)
+{
+	struct net_bridge_mdb_htable *mdb;
+	struct net_bridge_mdb_entry *mp;
+	struct hlist_node *n;
+	u32 ver;
+	int i;
 
 	spin_lock_bh(&br->multicast_lock);
 	mdb = mlock_dereference(br->mdb, br);
@@ -1834,8 +1854,10 @@
 		p->multicast_router = val;
 		err = 0;
 
-		if (val < 2 && !hlist_unhashed(&p->rlist))
+		if (val < 2 && !hlist_unhashed(&p->rlist)) {
 			hlist_del_init_rcu(&p->rlist);
+			br_rtr_notify(br->dev, p, RTM_DELMDB);
+		}
 
 		if (val == 1)
 			break;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index c8b9bcf..0a6f095 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -49,9 +49,9 @@
 static int brnf_call_iptables __read_mostly = 1;
 static int brnf_call_ip6tables __read_mostly = 1;
 static int brnf_call_arptables __read_mostly = 1;
-static int brnf_filter_vlan_tagged __read_mostly = 0;
-static int brnf_filter_pppoe_tagged __read_mostly = 0;
-static int brnf_pass_vlan_indev __read_mostly = 0;
+static int brnf_filter_vlan_tagged __read_mostly;
+static int brnf_filter_pppoe_tagged __read_mostly;
+static int brnf_pass_vlan_indev __read_mostly;
 #else
 #define brnf_call_iptables 1
 #define brnf_call_ip6tables 1
@@ -284,7 +284,7 @@
 							 nf_bridge->neigh_header,
 							 ETH_HLEN-ETH_ALEN);
 			/* tell br_dev_xmit to continue with forwarding */
-			nf_bridge->mask |= BRNF_BRIDGED_DNAT;
+			nf_bridge->bridged_dnat = 1;
 			/* FIXME Need to refragment */
 			ret = neigh->output(neigh, skb);
 		}
@@ -356,7 +356,7 @@
 		skb->pkt_type = PACKET_OTHERHOST;
 		nf_bridge->pkt_otherhost = false;
 	}
-	nf_bridge->mask &= ~BRNF_NF_BRIDGE_PREROUTING;
+	nf_bridge->in_prerouting = 0;
 	if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
 		if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
 			struct in_device *in_dev = __in_dev_get_rcu(dev);
@@ -444,7 +444,7 @@
 		nf_bridge->pkt_otherhost = true;
 	}
 
-	nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
+	nf_bridge->in_prerouting = 1;
 	nf_bridge->physindev = skb->dev;
 	skb->dev = brnf_get_logical_dev(skb, skb->dev);
 
@@ -850,10 +850,8 @@
 				   struct sk_buff *skb,
 				   const struct nf_hook_state *state)
 {
-	if (skb->nf_bridge &&
-	    !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
+	if (skb->nf_bridge && !skb->nf_bridge->in_prerouting)
 		return NF_STOP;
-	}
 
 	return NF_ACCEPT;
 }
@@ -872,7 +870,7 @@
 	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
 
 	skb_pull(skb, ETH_HLEN);
-	nf_bridge->mask &= ~BRNF_BRIDGED_DNAT;
+	nf_bridge->bridged_dnat = 0;
 
 	BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
 
@@ -887,7 +885,7 @@
 
 static int br_nf_dev_xmit(struct sk_buff *skb)
 {
-	if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
+	if (skb->nf_bridge && skb->nf_bridge->bridged_dnat) {
 		br_nf_pre_routing_finish_bridge_slow(skb);
 		return 1;
 	}
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 13b7d1e..77383bf 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -174,7 +174,7 @@
 		skb->pkt_type = PACKET_OTHERHOST;
 		nf_bridge->pkt_otherhost = false;
 	}
-	nf_bridge->mask &= ~BRNF_NF_BRIDGE_PREROUTING;
+	nf_bridge->in_prerouting = 0;
 	if (br_nf_ipv6_daddr_was_changed(skb, nf_bridge)) {
 		skb_dst_drop(skb);
 		v6ops->route_input(skb);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 4d74a06..af5e187 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -166,8 +166,6 @@
 			    sizeof(vinfo), &vinfo))
 			goto nla_put_failure;
 
-		vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
-
 		vinfo.vid = vid_end;
 		vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
 		if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
@@ -675,6 +673,21 @@
 			return -EADDRNOTAVAIL;
 	}
 
+	if (!data)
+		return 0;
+
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+	if (data[IFLA_BR_VLAN_PROTOCOL]) {
+		switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) {
+		case htons(ETH_P_8021Q):
+		case htons(ETH_P_8021AD):
+			break;
+		default:
+			return -EPROTONOSUPPORT;
+		}
+	}
+#endif
+
 	return 0;
 }
 
@@ -730,6 +743,8 @@
 	[IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
 	[IFLA_BR_STP_STATE] = { .type = NLA_U32 },
 	[IFLA_BR_PRIORITY] = { .type = NLA_U16 },
+	[IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
+	[IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
 };
 
 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -777,6 +792,24 @@
 		br_stp_set_bridge_priority(br, priority);
 	}
 
+	if (data[IFLA_BR_VLAN_FILTERING]) {
+		u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
+
+		err = __br_vlan_filter_toggle(br, vlan_filter);
+		if (err)
+			return err;
+	}
+
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+	if (data[IFLA_BR_VLAN_PROTOCOL]) {
+		__be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
+
+		err = __br_vlan_set_proto(br, vlan_proto);
+		if (err)
+			return err;
+	}
+#endif
+
 	return 0;
 }
 
@@ -788,6 +821,10 @@
 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_AGEING_TIME */
 	       nla_total_size(sizeof(u32)) +    /* IFLA_BR_STP_STATE */
 	       nla_total_size(sizeof(u16)) +    /* IFLA_BR_PRIORITY */
+	       nla_total_size(sizeof(u8)) +     /* IFLA_BR_VLAN_FILTERING */
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+	       nla_total_size(sizeof(__be16)) +	/* IFLA_BR_VLAN_PROTOCOL */
+#endif
 	       0;
 }
 
@@ -800,15 +837,22 @@
 	u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
 	u32 stp_enabled = br->stp_enabled;
 	u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
+	u8 vlan_enabled = br_vlan_enabled(br);
 
 	if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
 	    nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
 	    nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
 	    nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
 	    nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
-	    nla_put_u16(skb, IFLA_BR_PRIORITY, priority))
+	    nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
+	    nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled))
 		return -EMSGSIZE;
 
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+	if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto))
+		return -EMSGSIZE;
+#endif
+
 	return 0;
 }
 
@@ -839,7 +883,7 @@
 	.kind			= "bridge",
 	.priv_size		= sizeof(struct net_bridge),
 	.setup			= br_dev_setup,
-	.maxtype		= IFLA_BRPORT_MAX,
+	.maxtype		= IFLA_BR_MAX,
 	.policy			= br_policy,
 	.validate		= br_validate,
 	.newlink		= br_dev_newlink,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 8b21146..213baf7 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -95,15 +95,15 @@
 	struct hlist_node		hlist;
 	struct net_bridge_port		*dst;
 
-	struct rcu_head			rcu;
 	unsigned long			updated;
 	unsigned long			used;
 	mac_addr			addr;
+	__u16				vlan_id;
 	unsigned char			is_local:1,
 					is_static:1,
 					added_by_user:1,
 					added_by_external_learn:1;
-	__u16				vlan_id;
+	struct rcu_head			rcu;
 };
 
 struct net_bridge_port_group {
@@ -466,6 +466,7 @@
 void br_multicast_init(struct net_bridge *br);
 void br_multicast_open(struct net_bridge *br);
 void br_multicast_stop(struct net_bridge *br);
+void br_multicast_dev_del(struct net_bridge *br);
 void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
 			  struct sk_buff *skb);
 void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
@@ -488,7 +489,9 @@
 void br_mdb_init(void);
 void br_mdb_uninit(void);
 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-		   struct br_ip *group, int type);
+		   struct br_ip *group, int type, u8 state);
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+		   int type);
 
 #define mlock_dereference(X, br) \
 	rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -565,6 +568,10 @@
 {
 }
 
+static inline void br_multicast_dev_del(struct net_bridge *br)
+{
+}
+
 static inline void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
 					struct sk_buff *skb)
 {
@@ -607,7 +614,9 @@
 void br_vlan_flush(struct net_bridge *br);
 bool br_vlan_find(struct net_bridge *br, u16 vid);
 void br_recalculate_fwd_mask(struct net_bridge *br);
+int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
+int __br_vlan_set_proto(struct net_bridge *br, __be16 proto);
 int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
 int br_vlan_init(struct net_bridge *br);
 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val);
@@ -764,6 +773,12 @@
 {
 	return 0;
 }
+
+static inline int __br_vlan_filter_toggle(struct net_bridge *br,
+					  unsigned long val)
+{
+	return -EOPNOTSUPP;
+}
 #endif
 
 struct nf_br_ops {
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 0d41f81..3cd8cc9 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -468,41 +468,40 @@
 					      ~(1u << br->group_addr[5]);
 }
 
-int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
+int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 {
-	if (!rtnl_trylock())
-		return restart_syscall();
-
 	if (br->vlan_enabled == val)
-		goto unlock;
+		return 0;
 
 	br->vlan_enabled = val;
 	br_manage_promisc(br);
 	recalculate_group_addr(br);
 	br_recalculate_fwd_mask(br);
 
-unlock:
-	rtnl_unlock();
 	return 0;
 }
 
-int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
+int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
+{
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	__br_vlan_filter_toggle(br, val);
+	rtnl_unlock();
+
+	return 0;
+}
+
+int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
 {
 	int err = 0;
 	struct net_bridge_port *p;
 	struct net_port_vlans *pv;
-	__be16 proto, oldproto;
+	__be16 oldproto;
 	u16 vid, errvid;
 
-	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
-		return -EPROTONOSUPPORT;
-
-	if (!rtnl_trylock())
-		return restart_syscall();
-
-	proto = htons(val);
 	if (br->vlan_proto == proto)
-		goto unlock;
+		return 0;
 
 	/* Add VLANs for the new proto to the device filter. */
 	list_for_each_entry(p, &br->port_list, list) {
@@ -533,9 +532,7 @@
 			vlan_vid_del(p->dev, oldproto, vid);
 	}
 
-unlock:
-	rtnl_unlock();
-	return err;
+	return 0;
 
 err_filt:
 	errvid = vid;
@@ -551,7 +548,23 @@
 			vlan_vid_del(p->dev, proto, vid);
 	}
 
-	goto unlock;
+	return err;
+}
+
+int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
+{
+	int err;
+
+	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
+		return -EPROTONOSUPPORT;
+
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	err = __br_vlan_set_proto(br, htons(val));
+	rtnl_unlock();
+
+	return err;
 }
 
 static bool vlan_default_pvid(struct net_port_vlans *pv, u16 vid)
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 18ca4b2..48b6b01 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -176,7 +176,7 @@
 	return 0;
 }
 
-static inline __pure
+static inline
 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
 {
 	return (void *)entry + entry->next_offset;
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index edbca46..d730a0f 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -177,7 +177,7 @@
 	skb->protocol = htons(ETH_P_CAIF);
 
 	/* Check if we need to handle xoff */
-	if (likely(caifd->netdev->tx_queue_len == 0))
+	if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE))
 		goto noxoff;
 
 	if (unlikely(caifd->xoff))
diff --git a/net/core/Makefile b/net/core/Makefile
index fec0856..086b01f 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -23,3 +23,4 @@
 obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
 obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
 obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
+obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
diff --git a/net/core/dev.c b/net/core/dev.c
index a8e4dd4..877c848 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3061,6 +3061,16 @@
 	else
 		skb_dst_force(skb);
 
+#ifdef CONFIG_NET_SWITCHDEV
+	/* Don't forward if offload device already forwarded */
+	if (skb->offload_fwd_mark &&
+	    skb->offload_fwd_mark == dev->offload_fwd_mark) {
+		consume_skb(skb);
+		rc = NET_XMIT_SUCCESS;
+		goto out;
+	}
+#endif
+
 	txq = netdev_pick_tx(dev, skb, accel_priv);
 	q = rcu_dereference_bh(txq->qdisc);
 
@@ -3645,15 +3655,15 @@
 
 	qdisc_skb_cb(skb)->pkt_len = skb->len;
 	skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
-	qdisc_bstats_update_cpu(cl->q, skb);
+	qdisc_bstats_cpu_update(cl->q, skb);
 
-	switch (tc_classify(skb, cl, &cl_res)) {
+	switch (tc_classify(skb, cl, &cl_res, false)) {
 	case TC_ACT_OK:
 	case TC_ACT_RECLASSIFY:
 		skb->tc_index = TC_H_MIN(cl_res.classid);
 		break;
 	case TC_ACT_SHOT:
-		qdisc_qstats_drop_cpu(cl->q);
+		qdisc_qstats_cpu_drop(cl->q);
 	case TC_ACT_STOLEN:
 	case TC_ACT_QUEUED:
 		kfree_skb(skb);
@@ -4985,7 +4995,7 @@
  * Gets the next netdev_adjacent->private from the dev's lower neighbour
  * list, starting from iter position. The caller must hold either hold the
  * RTNL lock or its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
  */
 void *netdev_lower_get_next_private(struct net_device *dev,
 				    struct list_head **iter)
@@ -5040,7 +5050,7 @@
  * Gets the next netdev_adjacent from the dev's lower neighbour
  * list, starting from iter position. The caller must hold RTNL lock or
  * its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
  */
 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
 {
@@ -5301,6 +5311,7 @@
 				   struct net_device *upper_dev, bool master,
 				   void *private)
 {
+	struct netdev_notifier_changeupper_info changeupper_info;
 	struct netdev_adjacent *i, *j, *to_i, *to_j;
 	int ret = 0;
 
@@ -5319,6 +5330,10 @@
 	if (master && netdev_master_upper_dev_get(dev))
 		return -EBUSY;
 
+	changeupper_info.upper_dev = upper_dev;
+	changeupper_info.master = master;
+	changeupper_info.linking = true;
+
 	ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
 						   master);
 	if (ret)
@@ -5357,7 +5372,8 @@
 			goto rollback_lower_mesh;
 	}
 
-	call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
+	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
+				      &changeupper_info.info);
 	return 0;
 
 rollback_lower_mesh:
@@ -5452,9 +5468,14 @@
 void netdev_upper_dev_unlink(struct net_device *dev,
 			     struct net_device *upper_dev)
 {
+	struct netdev_notifier_changeupper_info changeupper_info;
 	struct netdev_adjacent *i, *j;
 	ASSERT_RTNL();
 
+	changeupper_info.upper_dev = upper_dev;
+	changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
+	changeupper_info.linking = false;
+
 	__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
 
 	/* Here is the tricky part. We must remove all dev's lower
@@ -5474,7 +5495,8 @@
 	list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
 		__netdev_adjacent_dev_unlink(dev, i->dev);
 
-	call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
+	call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
+				      &changeupper_info.info);
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
@@ -6075,6 +6097,26 @@
 EXPORT_SYMBOL(dev_get_phys_port_name);
 
 /**
+ *	dev_change_proto_down - update protocol port state information
+ *	@dev: device
+ *	@proto_down: new value
+ *
+ *	This info can be used by switch drivers to set the phys state of the
+ *	port.
+ */
+int dev_change_proto_down(struct net_device *dev, bool proto_down)
+{
+	const struct net_device_ops *ops = dev->netdev_ops;
+
+	if (!ops->ndo_change_proto_down)
+		return -EOPNOTSUPP;
+	if (!netif_device_present(dev))
+		return -ENODEV;
+	return ops->ndo_change_proto_down(dev, proto_down);
+}
+EXPORT_SYMBOL(dev_change_proto_down);
+
+/**
  *	dev_new_index	-	allocate an ifindex
  *	@net: the applicable net namespace
  *
@@ -6967,6 +7009,9 @@
 	dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
 	setup(dev);
 
+	if (!dev->tx_queue_len)
+		dev->priv_flags |= IFF_NO_QUEUE;
+
 	dev->num_tx_queues = txqs;
 	dev->real_num_tx_queues = txqs;
 	if (netif_alloc_netdev_queues(dev))
@@ -7639,7 +7684,7 @@
 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
 
 	hotcpu_notifier(dev_cpu_callback, 0);
-	dst_init();
+	dst_subsys_init();
 	rc = 0;
 out:
 	return rc;
diff --git a/net/core/dst.c b/net/core/dst.c
index 002144be..0771c8c 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -20,8 +20,10 @@
 #include <net/net_namespace.h>
 #include <linux/sched.h>
 #include <linux/prefetch.h>
+#include <net/lwtunnel.h>
 
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 
 /*
  * Theory of operations:
@@ -158,19 +160,10 @@
 	[RTAX_MAX] = 0xdeadbeef,
 };
 
-
-void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
-		int initial_ref, int initial_obsolete, unsigned short flags)
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+	      struct net_device *dev, int initial_ref, int initial_obsolete,
+	      unsigned short flags)
 {
-	struct dst_entry *dst;
-
-	if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
-		if (ops->gc(ops))
-			return NULL;
-	}
-	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
-	if (!dst)
-		return NULL;
 	dst->child = NULL;
 	dst->dev = dev;
 	if (dev)
@@ -192,6 +185,7 @@
 #ifdef CONFIG_IP_ROUTE_CLASSID
 	dst->tclassid = 0;
 #endif
+	dst->lwtstate = NULL;
 	atomic_set(&dst->__refcnt, initial_ref);
 	dst->__use = 0;
 	dst->lastuse = jiffies;
@@ -200,6 +194,25 @@
 	dst->next = NULL;
 	if (!(flags & DST_NOCOUNT))
 		dst_entries_add(ops, 1);
+}
+EXPORT_SYMBOL(dst_init);
+
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
+		int initial_ref, int initial_obsolete, unsigned short flags)
+{
+	struct dst_entry *dst;
+
+	if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
+		if (ops->gc(ops))
+			return NULL;
+	}
+
+	dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
+	if (!dst)
+		return NULL;
+
+	dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
+
 	return dst;
 }
 EXPORT_SYMBOL(dst_alloc);
@@ -248,7 +261,13 @@
 		dst->ops->destroy(dst);
 	if (dst->dev)
 		dev_put(dst->dev);
-	kmem_cache_free(dst->ops->kmem_cachep, dst);
+
+	lwtstate_put(dst->lwtstate);
+
+	if (dst->flags & DST_METADATA)
+		kfree(dst);
+	else
+		kmem_cache_free(dst->ops->kmem_cachep, dst);
 
 	dst = child;
 	if (dst) {
@@ -329,6 +348,69 @@
 }
 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
 
+static struct dst_ops md_dst_ops = {
+	.family =		AF_UNSPEC,
+};
+
+static int dst_md_discard_sk(struct sock *sk, struct sk_buff *skb)
+{
+	WARN_ONCE(1, "Attempting to call output on metadata dst\n");
+	kfree_skb(skb);
+	return 0;
+}
+
+static int dst_md_discard(struct sk_buff *skb)
+{
+	WARN_ONCE(1, "Attempting to call input on metadata dst\n");
+	kfree_skb(skb);
+	return 0;
+}
+
+static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen)
+{
+	struct dst_entry *dst;
+
+	dst = &md_dst->dst;
+	dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
+		 DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
+
+	dst->input = dst_md_discard;
+	dst->output = dst_md_discard_sk;
+
+	memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
+}
+
+struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags)
+{
+	struct metadata_dst *md_dst;
+
+	md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
+	if (!md_dst)
+		return NULL;
+
+	__metadata_dst_init(md_dst, optslen);
+
+	return md_dst;
+}
+EXPORT_SYMBOL_GPL(metadata_dst_alloc);
+
+struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags)
+{
+	int cpu;
+	struct metadata_dst __percpu *md_dst;
+
+	md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
+				    __alignof__(struct metadata_dst), flags);
+	if (!md_dst)
+		return NULL;
+
+	for_each_possible_cpu(cpu)
+		__metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen);
+
+	return md_dst;
+}
+EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
+
 /* Dirty hack. We did it in 2.2 (in __dst_free),
  * we have _very_ good reasons not to repeat
  * this mistake in 2.3, but we have no choice
@@ -393,7 +475,7 @@
 	.priority = -10, /* must be called after other network notifiers */
 };
 
-void __init dst_init(void)
+void __init dst_subsys_init(void)
 {
 	register_netdevice_notifier(&dst_dev_notifier);
 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 9a12668..ae8306e 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -16,6 +16,7 @@
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <net/fib_rules.h>
+#include <net/ip_tunnels.h>
 
 int fib_default_rule_add(struct fib_rules_ops *ops,
 			 u32 pref, u32 table, u32 flags)
@@ -186,6 +187,9 @@
 	if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
 		goto out;
 
+	if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
+		goto out;
+
 	ret = ops->match(rule, fl, flags);
 out:
 	return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
@@ -330,6 +334,9 @@
 	if (tb[FRA_FWMASK])
 		rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
 
+	if (tb[FRA_TUN_ID])
+		rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
+
 	rule->action = frh->action;
 	rule->flags = frh->flags;
 	rule->table = frh_get_table(frh, tb);
@@ -407,6 +414,9 @@
 	if (unresolved)
 		ops->unresolved_rules++;
 
+	if (rule->tun_id)
+		ip_tunnel_need_metadata();
+
 	notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
 	flush_route_cache(ops);
 	rules_ops_put(ops);
@@ -473,6 +483,10 @@
 		    (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
 			continue;
 
+		if (tb[FRA_TUN_ID] &&
+		    (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
+			continue;
+
 		if (!ops->compare(rule, frh, tb))
 			continue;
 
@@ -487,6 +501,9 @@
 				goto errout;
 		}
 
+		if (rule->tun_id)
+			ip_tunnel_unneed_metadata();
+
 		list_del_rcu(&rule->list);
 
 		if (rule->action == FR_ACT_GOTO) {
@@ -535,7 +552,8 @@
 			 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
 			 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
 			 + nla_total_size(4) /* FRA_FWMARK */
-			 + nla_total_size(4); /* FRA_FWMASK */
+			 + nla_total_size(4) /* FRA_FWMASK */
+			 + nla_total_size(8); /* FRA_TUN_ID */
 
 	if (ops->nlmsg_payload)
 		payload += ops->nlmsg_payload(rule);
@@ -591,7 +609,9 @@
 	    ((rule->mark_mask || rule->mark) &&
 	     nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
 	    (rule->target &&
-	     nla_put_u32(skb, FRA_GOTO, rule->target)))
+	     nla_put_u32(skb, FRA_GOTO, rule->target)) ||
+	    (rule->tun_id &&
+	     nla_put_be64(skb, FRA_TUN_ID, rule->tun_id)))
 		goto nla_put_failure;
 
 	if (rule->suppress_ifgroup != -1) {
diff --git a/net/core/filter.c b/net/core/filter.c
index be3098f..13079f0 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -47,6 +47,8 @@
 #include <linux/if_vlan.h>
 #include <linux/bpf.h>
 #include <net/sch_generic.h>
+#include <net/cls_cgroup.h>
+#include <net/dst_metadata.h>
 
 /**
  *	sk_filter - run a packet through a socket filter
@@ -1122,6 +1124,7 @@
 	*pfp = fp;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
 
 void bpf_prog_destroy(struct bpf_prog *fp)
 {
@@ -1346,7 +1349,7 @@
 static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
 {
 	struct sk_buff *skb = (struct sk_buff *) (long) r1;
-	u32 is_pseudo = BPF_IS_PSEUDO_HEADER(flags);
+	bool is_pseudo = !!BPF_IS_PSEUDO_HEADER(flags);
 	int offset = (int) r2;
 	__sum16 sum, *ptr;
 
@@ -1424,6 +1427,139 @@
 	.arg3_type      = ARG_ANYTHING,
 };
 
+static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	return task_get_classid((struct sk_buff *) (unsigned long) r1);
+}
+
+static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
+	.func           = bpf_get_cgroup_classid,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+};
+
+static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
+{
+	struct sk_buff *skb = (struct sk_buff *) (long) r1;
+	__be16 vlan_proto = (__force __be16) r2;
+
+	if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
+		     vlan_proto != htons(ETH_P_8021AD)))
+		vlan_proto = htons(ETH_P_8021Q);
+
+	return skb_vlan_push(skb, vlan_proto, vlan_tci);
+}
+
+const struct bpf_func_proto bpf_skb_vlan_push_proto = {
+	.func           = bpf_skb_vlan_push,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+	.arg2_type      = ARG_ANYTHING,
+	.arg3_type      = ARG_ANYTHING,
+};
+EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
+
+static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+	struct sk_buff *skb = (struct sk_buff *) (long) r1;
+
+	return skb_vlan_pop(skb);
+}
+
+const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
+	.func           = bpf_skb_vlan_pop,
+	.gpl_only       = false,
+	.ret_type       = RET_INTEGER,
+	.arg1_type      = ARG_PTR_TO_CTX,
+};
+EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
+
+bool bpf_helper_changes_skb_data(void *func)
+{
+	if (func == bpf_skb_vlan_push)
+		return true;
+	if (func == bpf_skb_vlan_pop)
+		return true;
+	return false;
+}
+
+static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+{
+	struct sk_buff *skb = (struct sk_buff *) (long) r1;
+	struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
+	struct ip_tunnel_info *info = skb_tunnel_info(skb);
+
+	if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info))
+		return -EINVAL;
+	if (ip_tunnel_info_af(info) != AF_INET)
+		return -EINVAL;
+
+	to->tunnel_id = be64_to_cpu(info->key.tun_id);
+	to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
+
+	return 0;
+}
+
+const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
+	.func		= bpf_skb_get_tunnel_key,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_PTR_TO_STACK,
+	.arg3_type	= ARG_CONST_STACK_SIZE,
+	.arg4_type	= ARG_ANYTHING,
+};
+
+static struct metadata_dst __percpu *md_dst;
+
+static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+{
+	struct sk_buff *skb = (struct sk_buff *) (long) r1;
+	struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2;
+	struct metadata_dst *md = this_cpu_ptr(md_dst);
+	struct ip_tunnel_info *info;
+
+	if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags))
+		return -EINVAL;
+
+	skb_dst_drop(skb);
+	dst_hold((struct dst_entry *) md);
+	skb_dst_set(skb, (struct dst_entry *) md);
+
+	info = &md->u.tun_info;
+	info->mode = IP_TUNNEL_INFO_TX;
+	info->key.tun_flags = TUNNEL_KEY;
+	info->key.tun_id = cpu_to_be64(from->tunnel_id);
+	info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
+
+	return 0;
+}
+
+const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
+	.func		= bpf_skb_set_tunnel_key,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+	.arg2_type	= ARG_PTR_TO_STACK,
+	.arg3_type	= ARG_CONST_STACK_SIZE,
+	.arg4_type	= ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void)
+{
+	if (!md_dst) {
+		/* race is not possible, since it's called from
+		 * verifier that is holding verifier mutex
+		 */
+		md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL);
+		if (!md_dst)
+			return NULL;
+	}
+	return &bpf_skb_set_tunnel_key_proto;
+}
+
 static const struct bpf_func_proto *
 sk_filter_func_proto(enum bpf_func_id func_id)
 {
@@ -1461,6 +1597,16 @@
 		return &bpf_l4_csum_replace_proto;
 	case BPF_FUNC_clone_redirect:
 		return &bpf_clone_redirect_proto;
+	case BPF_FUNC_get_cgroup_classid:
+		return &bpf_get_cgroup_classid_proto;
+	case BPF_FUNC_skb_vlan_push:
+		return &bpf_skb_vlan_push_proto;
+	case BPF_FUNC_skb_vlan_pop:
+		return &bpf_skb_vlan_pop_proto;
+	case BPF_FUNC_skb_get_tunnel_key:
+		return &bpf_skb_get_tunnel_key_proto;
+	case BPF_FUNC_skb_set_tunnel_key:
+		return bpf_get_skb_set_tunnel_key_proto();
 	default:
 		return sk_filter_func_proto(func_id);
 	}
@@ -1569,6 +1715,13 @@
 				      offsetof(struct net_device, ifindex));
 		break;
 
+	case offsetof(struct __sk_buff, hash):
+		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+
+		*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+				      offsetof(struct sk_buff, hash));
+		break;
+
 	case offsetof(struct __sk_buff, mark):
 		BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 2a834c6..d79699c 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -19,14 +19,14 @@
 #include <net/flow_dissector.h>
 #include <scsi/fc/fc_fcoe.h>
 
-static bool skb_flow_dissector_uses_key(struct flow_dissector *flow_dissector,
-					enum flow_dissector_key_id key_id)
+static bool dissector_uses_key(const struct flow_dissector *flow_dissector,
+			       enum flow_dissector_key_id key_id)
 {
 	return flow_dissector->used_keys & (1 << key_id);
 }
 
-static void skb_flow_dissector_set_key(struct flow_dissector *flow_dissector,
-				       enum flow_dissector_key_id key_id)
+static void dissector_set_key(struct flow_dissector *flow_dissector,
+			      enum flow_dissector_key_id key_id)
 {
 	flow_dissector->used_keys |= (1 << key_id);
 }
@@ -51,20 +51,20 @@
 		 * boundaries of unsigned short.
 		 */
 		BUG_ON(key->offset > USHRT_MAX);
-		BUG_ON(skb_flow_dissector_uses_key(flow_dissector,
-						   key->key_id));
+		BUG_ON(dissector_uses_key(flow_dissector,
+					  key->key_id));
 
-		skb_flow_dissector_set_key(flow_dissector, key->key_id);
+		dissector_set_key(flow_dissector, key->key_id);
 		flow_dissector->offset[key->key_id] = key->offset;
 	}
 
 	/* Ensure that the dissector always includes control and basic key.
 	 * That way we are able to avoid handling lack of these in fast path.
 	 */
-	BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
-					    FLOW_DISSECTOR_KEY_CONTROL));
-	BUG_ON(!skb_flow_dissector_uses_key(flow_dissector,
-					    FLOW_DISSECTOR_KEY_BASIC));
+	BUG_ON(!dissector_uses_key(flow_dissector,
+				   FLOW_DISSECTOR_KEY_CONTROL));
+	BUG_ON(!dissector_uses_key(flow_dissector,
+				   FLOW_DISSECTOR_KEY_BASIC));
 }
 EXPORT_SYMBOL(skb_flow_dissector_init);
 
@@ -121,7 +121,8 @@
 bool __skb_flow_dissect(const struct sk_buff *skb,
 			struct flow_dissector *flow_dissector,
 			void *target_container,
-			void *data, __be16 proto, int nhoff, int hlen)
+			void *data, __be16 proto, int nhoff, int hlen,
+			unsigned int flags)
 {
 	struct flow_dissector_key_control *key_control;
 	struct flow_dissector_key_basic *key_basic;
@@ -130,6 +131,7 @@
 	struct flow_dissector_key_tags *key_tags;
 	struct flow_dissector_key_keyid *key_keyid;
 	u8 ip_proto = 0;
+	bool ret = false;
 
 	if (!data) {
 		data = skb->data;
@@ -152,8 +154,8 @@
 					      FLOW_DISSECTOR_KEY_BASIC,
 					      target_container);
 
-	if (skb_flow_dissector_uses_key(flow_dissector,
-					FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+	if (dissector_uses_key(flow_dissector,
+			       FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
 		struct ethhdr *eth = eth_hdr(skb);
 		struct flow_dissector_key_eth_addrs *key_eth_addrs;
 
@@ -171,15 +173,13 @@
 ip:
 		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
 		if (!iph || iph->ihl < 5)
-			return false;
+			goto out_bad;
 		nhoff += iph->ihl * 4;
 
 		ip_proto = iph->protocol;
-		if (ip_is_fragment(iph))
-			ip_proto = 0;
 
-		if (!skb_flow_dissector_uses_key(flow_dissector,
-						 FLOW_DISSECTOR_KEY_IPV4_ADDRS))
+		if (!dissector_uses_key(flow_dissector,
+					FLOW_DISSECTOR_KEY_IPV4_ADDRS))
 			break;
 
 		key_addrs = skb_flow_dissector_target(flow_dissector,
@@ -187,6 +187,22 @@
 		memcpy(&key_addrs->v4addrs, &iph->saddr,
 		       sizeof(key_addrs->v4addrs));
 		key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+
+		if (ip_is_fragment(iph)) {
+			key_control->flags |= FLOW_DIS_IS_FRAGMENT;
+
+			if (iph->frag_off & htons(IP_OFFSET)) {
+				goto out_good;
+			} else {
+				key_control->flags |= FLOW_DIS_FIRST_FRAG;
+				if (!(flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG))
+					goto out_good;
+			}
+		}
+
+		if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
+			goto out_good;
+
 		break;
 	}
 	case htons(ETH_P_IPV6): {
@@ -197,13 +213,13 @@
 ipv6:
 		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
 		if (!iph)
-			return false;
+			goto out_bad;
 
 		ip_proto = iph->nexthdr;
 		nhoff += sizeof(struct ipv6hdr);
 
-		if (skb_flow_dissector_uses_key(flow_dissector,
-						FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+		if (dissector_uses_key(flow_dissector,
+				       FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
 			struct flow_dissector_key_ipv6_addrs *key_ipv6_addrs;
 
 			key_ipv6_addrs = skb_flow_dissector_target(flow_dissector,
@@ -216,15 +232,20 @@
 
 		flow_label = ip6_flowlabel(iph);
 		if (flow_label) {
-			if (skb_flow_dissector_uses_key(flow_dissector,
-				FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
+			if (dissector_uses_key(flow_dissector,
+					       FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
 				key_tags = skb_flow_dissector_target(flow_dissector,
 								     FLOW_DISSECTOR_KEY_FLOW_LABEL,
 								     target_container);
 				key_tags->flow_label = ntohl(flow_label);
 			}
+			if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)
+				goto out_good;
 		}
 
+		if (flags & FLOW_DISSECTOR_F_STOP_AT_L3)
+			goto out_good;
+
 		break;
 	}
 	case htons(ETH_P_8021AD):
@@ -234,10 +255,10 @@
 
 		vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan);
 		if (!vlan)
-			return false;
+			goto out_bad;
 
-		if (skb_flow_dissector_uses_key(flow_dissector,
-						FLOW_DISSECTOR_KEY_VLANID)) {
+		if (dissector_uses_key(flow_dissector,
+				       FLOW_DISSECTOR_KEY_VLANID)) {
 			key_tags = skb_flow_dissector_target(flow_dissector,
 							     FLOW_DISSECTOR_KEY_VLANID,
 							     target_container);
@@ -256,7 +277,7 @@
 		} *hdr, _hdr;
 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
 		if (!hdr)
-			return false;
+			goto out_bad;
 		proto = hdr->proto;
 		nhoff += PPPOE_SES_HLEN;
 		switch (proto) {
@@ -265,7 +286,7 @@
 		case htons(PPP_IPV6):
 			goto ipv6;
 		default:
-			return false;
+			goto out_bad;
 		}
 	}
 	case htons(ETH_P_TIPC): {
@@ -275,19 +296,17 @@
 		} *hdr, _hdr;
 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
 		if (!hdr)
-			return false;
-		key_basic->n_proto = proto;
-		key_control->thoff = (u16)nhoff;
+			goto out_bad;
 
-		if (skb_flow_dissector_uses_key(flow_dissector,
-						FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
+		if (dissector_uses_key(flow_dissector,
+				       FLOW_DISSECTOR_KEY_TIPC_ADDRS)) {
 			key_addrs = skb_flow_dissector_target(flow_dissector,
 							      FLOW_DISSECTOR_KEY_TIPC_ADDRS,
 							      target_container);
 			key_addrs->tipcaddrs.srcnode = hdr->srcnode;
 			key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC_ADDRS;
 		}
-		return true;
+		goto out_good;
 	}
 
 	case htons(ETH_P_MPLS_UC):
@@ -297,12 +316,12 @@
 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
 					   hlen, &_hdr);
 		if (!hdr)
-			return false;
+			goto out_bad;
 
 		if ((ntohl(hdr[0].entry) & MPLS_LS_LABEL_MASK) >>
 		     MPLS_LS_LABEL_SHIFT == MPLS_LABEL_ENTROPY) {
-			if (skb_flow_dissector_uses_key(flow_dissector,
-							FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
+			if (dissector_uses_key(flow_dissector,
+					       FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
 				key_keyid = skb_flow_dissector_target(flow_dissector,
 								      FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
 								      target_container);
@@ -310,21 +329,17 @@
 					htonl(MPLS_LS_LABEL_MASK);
 			}
 
-			key_basic->n_proto = proto;
-			key_basic->ip_proto = ip_proto;
-			key_control->thoff = (u16)nhoff;
-
-			return true;
+			goto out_good;
 		}
 
-		return true;
+		goto out_good;
 	}
 
 	case htons(ETH_P_FCOE):
 		key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
 		/* fall through */
 	default:
-		return false;
+		goto out_bad;
 	}
 
 ip_proto_again:
@@ -337,7 +352,7 @@
 
 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
 		if (!hdr)
-			return false;
+			goto out_bad;
 		/*
 		 * Only look inside GRE if version zero and no
 		 * routing
@@ -357,10 +372,10 @@
 						     data, hlen, &_keyid);
 
 			if (!keyid)
-				return false;
+				goto out_bad;
 
-			if (skb_flow_dissector_uses_key(flow_dissector,
-							FLOW_DISSECTOR_KEY_GRE_KEYID)) {
+			if (dissector_uses_key(flow_dissector,
+					       FLOW_DISSECTOR_KEY_GRE_KEYID)) {
 				key_keyid = skb_flow_dissector_target(flow_dissector,
 								      FLOW_DISSECTOR_KEY_GRE_KEYID,
 								      target_container);
@@ -378,10 +393,15 @@
 						   sizeof(_eth),
 						   data, hlen, &_eth);
 			if (!eth)
-				return false;
+				goto out_bad;
 			proto = eth->h_proto;
 			nhoff += sizeof(*eth);
 		}
+
+		key_control->flags |= FLOW_DIS_ENCAPSULATION;
+		if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+			goto out_good;
+
 		goto again;
 	}
 	case NEXTHDR_HOP:
@@ -395,18 +415,53 @@
 		opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
 					      data, hlen, &_opthdr);
 		if (!opthdr)
-			return false;
+			goto out_bad;
 
 		ip_proto = opthdr[0];
 		nhoff += (opthdr[1] + 1) << 3;
 
 		goto ip_proto_again;
 	}
+	case NEXTHDR_FRAGMENT: {
+		struct frag_hdr _fh, *fh;
+
+		if (proto != htons(ETH_P_IPV6))
+			break;
+
+		fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
+					  data, hlen, &_fh);
+
+		if (!fh)
+			goto out_bad;
+
+		key_control->flags |= FLOW_DIS_IS_FRAGMENT;
+
+		nhoff += sizeof(_fh);
+
+		if (!(fh->frag_off & htons(IP6_OFFSET))) {
+			key_control->flags |= FLOW_DIS_FIRST_FRAG;
+			if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
+				ip_proto = fh->nexthdr;
+				goto ip_proto_again;
+			}
+		}
+		goto out_good;
+	}
 	case IPPROTO_IPIP:
 		proto = htons(ETH_P_IP);
+
+		key_control->flags |= FLOW_DIS_ENCAPSULATION;
+		if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+			goto out_good;
+
 		goto ip;
 	case IPPROTO_IPV6:
 		proto = htons(ETH_P_IPV6);
+
+		key_control->flags |= FLOW_DIS_ENCAPSULATION;
+		if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
+			goto out_good;
+
 		goto ipv6;
 	case IPPROTO_MPLS:
 		proto = htons(ETH_P_MPLS_UC);
@@ -415,12 +470,8 @@
 		break;
 	}
 
-	key_basic->n_proto = proto;
-	key_basic->ip_proto = ip_proto;
-	key_control->thoff = (u16)nhoff;
-
-	if (skb_flow_dissector_uses_key(flow_dissector,
-					FLOW_DISSECTOR_KEY_PORTS)) {
+	if (dissector_uses_key(flow_dissector,
+			       FLOW_DISSECTOR_KEY_PORTS)) {
 		key_ports = skb_flow_dissector_target(flow_dissector,
 						      FLOW_DISSECTOR_KEY_PORTS,
 						      target_container);
@@ -428,7 +479,15 @@
 							data, hlen);
 	}
 
-	return true;
+out_good:
+	ret = true;
+
+out_bad:
+	key_basic->n_proto = proto;
+	key_basic->ip_proto = ip_proto;
+	key_control->thoff = (u16)nhoff;
+
+	return ret;
 }
 EXPORT_SYMBOL(__skb_flow_dissect);
 
@@ -438,18 +497,21 @@
 	net_get_random_once(&hashrnd, sizeof(hashrnd));
 }
 
-static __always_inline u32 __flow_hash_words(u32 *words, u32 length, u32 keyval)
+static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
+					     u32 keyval)
 {
 	return jhash2(words, length, keyval);
 }
 
-static inline void *flow_keys_hash_start(struct flow_keys *flow)
+static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
 {
+	const void *p = flow;
+
 	BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
-	return (void *)flow + FLOW_KEYS_HASH_OFFSET;
+	return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
 }
 
-static inline size_t flow_keys_hash_length(struct flow_keys *flow)
+static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
 {
 	size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
 	BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
@@ -539,7 +601,7 @@
 
 	__flow_hash_consistentify(keys);
 
-	hash = __flow_hash_words((u32 *)flow_keys_hash_start(keys),
+	hash = __flow_hash_words(flow_keys_hash_start(keys),
 				 flow_keys_hash_length(keys), keyval);
 	if (!hash)
 		hash = 1;
@@ -557,8 +619,8 @@
 static inline u32 ___skb_get_hash(const struct sk_buff *skb,
 				  struct flow_keys *keys, u32 keyval)
 {
-	if (!skb_flow_dissect_flow_keys(skb, keys))
-		return 0;
+	skb_flow_dissect_flow_keys(skb, keys,
+				   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
 	return __flow_hash_from_keys(keys, keyval);
 }
@@ -602,17 +664,11 @@
 void __skb_get_hash(struct sk_buff *skb)
 {
 	struct flow_keys keys;
-	u32 hash;
 
 	__flow_hash_secret_init();
 
-	hash = ___skb_get_hash(skb, &keys, hashrnd);
-	if (!hash)
-		return;
-	if (keys.ports.ports)
-		skb->l4_hash = 1;
-	skb->sw_hash = 1;
-	skb->hash = hash;
+	__skb_set_sw_hash(skb, ___skb_get_hash(skb, &keys, hashrnd),
+			  flow_keys_have_l4(&keys));
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
@@ -624,6 +680,51 @@
 }
 EXPORT_SYMBOL(skb_get_hash_perturb);
 
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
+{
+	struct flow_keys keys;
+
+	memset(&keys, 0, sizeof(keys));
+
+	memcpy(&keys.addrs.v6addrs.src, &fl6->saddr,
+	       sizeof(keys.addrs.v6addrs.src));
+	memcpy(&keys.addrs.v6addrs.dst, &fl6->daddr,
+	       sizeof(keys.addrs.v6addrs.dst));
+	keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+	keys.ports.src = fl6->fl6_sport;
+	keys.ports.dst = fl6->fl6_dport;
+	keys.keyid.keyid = fl6->fl6_gre_key;
+	keys.tags.flow_label = (__force u32)fl6->flowlabel;
+	keys.basic.ip_proto = fl6->flowi6_proto;
+
+	__skb_set_sw_hash(skb, flow_hash_from_keys(&keys),
+			  flow_keys_have_l4(&keys));
+
+	return skb->hash;
+}
+EXPORT_SYMBOL(__skb_get_hash_flowi6);
+
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
+{
+	struct flow_keys keys;
+
+	memset(&keys, 0, sizeof(keys));
+
+	keys.addrs.v4addrs.src = fl4->saddr;
+	keys.addrs.v4addrs.dst = fl4->daddr;
+	keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+	keys.ports.src = fl4->fl4_sport;
+	keys.ports.dst = fl4->fl4_dport;
+	keys.keyid.keyid = fl4->fl4_gre_key;
+	keys.basic.ip_proto = fl4->flowi4_proto;
+
+	__skb_set_sw_hash(skb, flow_hash_from_keys(&keys),
+			  flow_keys_have_l4(&keys));
+
+	return skb->hash;
+}
+EXPORT_SYMBOL(__skb_get_hash_flowi4);
+
 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
 		   const struct flow_keys *keys, int hlen)
 {
@@ -683,12 +784,47 @@
 {
 	struct flow_keys keys;
 
-	if (!skb_flow_dissect_flow_keys(skb, &keys))
+	if (!skb_flow_dissect_flow_keys(skb, &keys, 0))
 		return 0;
 
 	return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
 }
 
+__u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
+{
+	memset(keys, 0, sizeof(*keys));
+
+	memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
+	    sizeof(keys->addrs.v6addrs.src));
+	memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
+	    sizeof(keys->addrs.v6addrs.dst));
+	keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+	keys->ports.src = fl6->fl6_sport;
+	keys->ports.dst = fl6->fl6_dport;
+	keys->keyid.keyid = fl6->fl6_gre_key;
+	keys->tags.flow_label = (__force u32)fl6->flowlabel;
+	keys->basic.ip_proto = fl6->flowi6_proto;
+
+	return flow_hash_from_keys(keys);
+}
+EXPORT_SYMBOL(__get_hash_from_flowi6);
+
+__u32 __get_hash_from_flowi4(const struct flowi4 *fl4, struct flow_keys *keys)
+{
+	memset(keys, 0, sizeof(*keys));
+
+	keys->addrs.v4addrs.src = fl4->saddr;
+	keys->addrs.v4addrs.dst = fl4->daddr;
+	keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+	keys->ports.src = fl4->fl4_sport;
+	keys->ports.dst = fl4->fl4_dport;
+	keys->keyid.keyid = fl4->fl4_gre_key;
+	keys->basic.ip_proto = fl4->flowi4_proto;
+
+	return flow_hash_from_keys(keys);
+}
+EXPORT_SYMBOL(__get_hash_from_flowi4);
+
 static const struct flow_dissector_key flow_keys_dissector_keys[] = {
 	{
 		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
new file mode 100644
index 0000000..dfb1a9c
--- /dev/null
+++ b/net/core/lwtunnel.c
@@ -0,0 +1,249 @@
+/*
+ * lwtunnel	Infrastructure for light weight tunnels like mpls
+ *
+ * Authors:	Roopa Prabhu, <roopa@cumulusnetworks.com>
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/lwtunnel.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/err.h>
+
+#include <net/lwtunnel.h>
+#include <net/rtnetlink.h>
+#include <net/ip6_fib.h>
+
+struct lwtunnel_state *lwtunnel_state_alloc(int encap_len)
+{
+	struct lwtunnel_state *lws;
+
+	lws = kzalloc(sizeof(*lws) + encap_len, GFP_ATOMIC);
+
+	return lws;
+}
+EXPORT_SYMBOL(lwtunnel_state_alloc);
+
+static const struct lwtunnel_encap_ops __rcu *
+		lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly;
+
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops,
+			   unsigned int num)
+{
+	if (num > LWTUNNEL_ENCAP_MAX)
+		return -ERANGE;
+
+	return !cmpxchg((const struct lwtunnel_encap_ops **)
+			&lwtun_encaps[num],
+			NULL, ops) ? 0 : -1;
+}
+EXPORT_SYMBOL(lwtunnel_encap_add_ops);
+
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
+			   unsigned int encap_type)
+{
+	int ret;
+
+	if (encap_type == LWTUNNEL_ENCAP_NONE ||
+	    encap_type > LWTUNNEL_ENCAP_MAX)
+		return -ERANGE;
+
+	ret = (cmpxchg((const struct lwtunnel_encap_ops **)
+		       &lwtun_encaps[encap_type],
+		       ops, NULL) == ops) ? 0 : -1;
+
+	synchronize_net();
+
+	return ret;
+}
+EXPORT_SYMBOL(lwtunnel_encap_del_ops);
+
+int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+			 struct nlattr *encap, unsigned int family,
+			 const void *cfg, struct lwtunnel_state **lws)
+{
+	const struct lwtunnel_encap_ops *ops;
+	int ret = -EINVAL;
+
+	if (encap_type == LWTUNNEL_ENCAP_NONE ||
+	    encap_type > LWTUNNEL_ENCAP_MAX)
+		return ret;
+
+	ret = -EOPNOTSUPP;
+	rcu_read_lock();
+	ops = rcu_dereference(lwtun_encaps[encap_type]);
+	if (likely(ops && ops->build_state))
+		ret = ops->build_state(dev, encap, family, cfg, lws);
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(lwtunnel_build_state);
+
+int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
+{
+	const struct lwtunnel_encap_ops *ops;
+	struct nlattr *nest;
+	int ret = -EINVAL;
+
+	if (!lwtstate)
+		return 0;
+
+	if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+	    lwtstate->type > LWTUNNEL_ENCAP_MAX)
+		return 0;
+
+	ret = -EOPNOTSUPP;
+	nest = nla_nest_start(skb, RTA_ENCAP);
+	rcu_read_lock();
+	ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+	if (likely(ops && ops->fill_encap))
+		ret = ops->fill_encap(skb, lwtstate);
+	rcu_read_unlock();
+
+	if (ret)
+		goto nla_put_failure;
+	nla_nest_end(skb, nest);
+	ret = nla_put_u16(skb, RTA_ENCAP_TYPE, lwtstate->type);
+	if (ret)
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nest);
+
+	return (ret == -EOPNOTSUPP ? 0 : ret);
+}
+EXPORT_SYMBOL(lwtunnel_fill_encap);
+
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+	const struct lwtunnel_encap_ops *ops;
+	int ret = 0;
+
+	if (!lwtstate)
+		return 0;
+
+	if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+	    lwtstate->type > LWTUNNEL_ENCAP_MAX)
+		return 0;
+
+	rcu_read_lock();
+	ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+	if (likely(ops && ops->get_encap_size))
+		ret = nla_total_size(ops->get_encap_size(lwtstate));
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(lwtunnel_get_encap_size);
+
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+	const struct lwtunnel_encap_ops *ops;
+	int ret = 0;
+
+	if (!a && !b)
+		return 0;
+
+	if (!a || !b)
+		return 1;
+
+	if (a->type != b->type)
+		return 1;
+
+	if (a->type == LWTUNNEL_ENCAP_NONE ||
+	    a->type > LWTUNNEL_ENCAP_MAX)
+		return 0;
+
+	rcu_read_lock();
+	ops = rcu_dereference(lwtun_encaps[a->type]);
+	if (likely(ops && ops->cmp_encap))
+		ret = ops->cmp_encap(a, b);
+	rcu_read_unlock();
+
+	return ret;
+}
+EXPORT_SYMBOL(lwtunnel_cmp_encap);
+
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+	const struct lwtunnel_encap_ops *ops;
+	struct lwtunnel_state *lwtstate;
+	int ret = -EINVAL;
+
+	if (!dst)
+		goto drop;
+	lwtstate = dst->lwtstate;
+
+	if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+	    lwtstate->type > LWTUNNEL_ENCAP_MAX)
+		return 0;
+
+	ret = -EOPNOTSUPP;
+	rcu_read_lock();
+	ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+	if (likely(ops && ops->output))
+		ret = ops->output(sk, skb);
+	rcu_read_unlock();
+
+	if (ret == -EOPNOTSUPP)
+		goto drop;
+
+	return ret;
+
+drop:
+	kfree_skb(skb);
+
+	return ret;
+}
+EXPORT_SYMBOL(lwtunnel_output);
+
+int lwtunnel_input(struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+	const struct lwtunnel_encap_ops *ops;
+	struct lwtunnel_state *lwtstate;
+	int ret = -EINVAL;
+
+	if (!dst)
+		goto drop;
+	lwtstate = dst->lwtstate;
+
+	if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+	    lwtstate->type > LWTUNNEL_ENCAP_MAX)
+		return 0;
+
+	ret = -EOPNOTSUPP;
+	rcu_read_lock();
+	ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+	if (likely(ops && ops->input))
+		ret = ops->input(skb);
+	rcu_read_unlock();
+
+	if (ret == -EOPNOTSUPP)
+		goto drop;
+
+	return ret;
+
+drop:
+	kfree_skb(skb);
+
+	return ret;
+}
+EXPORT_SYMBOL(lwtunnel_input);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 84195da..2b515ba 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -274,8 +274,12 @@
 	    (entries >= tbl->gc_thresh2 &&
 	     time_after(now, tbl->last_flush + 5 * HZ))) {
 		if (!neigh_forced_gc(tbl) &&
-		    entries >= tbl->gc_thresh3)
+		    entries >= tbl->gc_thresh3) {
+			net_info_ratelimited("%s: neighbor table overflow!\n",
+					     tbl->id);
+			NEIGH_CACHE_STAT_INC(tbl, table_fulls);
 			goto out_entries;
+		}
 	}
 
 	n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
@@ -1849,6 +1853,7 @@
 			ndst.ndts_rcv_probes_ucast	+= st->rcv_probes_ucast;
 			ndst.ndts_periodic_gc_runs	+= st->periodic_gc_runs;
 			ndst.ndts_forced_gc_runs	+= st->forced_gc_runs;
+			ndst.ndts_table_fulls		+= st->table_fulls;
 		}
 
 		if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
@@ -2717,12 +2722,12 @@
 	struct neigh_statistics *st = v;
 
 	if (v == SEQ_START_TOKEN) {
-		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards\n");
+		seq_printf(seq, "entries  allocs destroys hash_grows  lookups hits  res_failed  rcv_probes_mcast rcv_probes_ucast  periodic_gc_runs forced_gc_runs unresolved_discards table_fulls\n");
 		return 0;
 	}
 
 	seq_printf(seq, "%08x  %08lx %08lx %08lx  %08lx %08lx  %08lx  "
-			"%08lx %08lx  %08lx %08lx %08lx\n",
+			"%08lx %08lx  %08lx %08lx %08lx %08lx\n",
 		   atomic_read(&tbl->entries),
 
 		   st->allocs,
@@ -2739,7 +2744,8 @@
 
 		   st->periodic_gc_runs,
 		   st->forced_gc_runs,
-		   st->unres_discards
+		   st->unres_discards,
+		   st->table_fulls
 		   );
 
 	return 0;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 18b34d7..b279077 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -404,6 +404,19 @@
 NETDEVICE_SHOW(group, fmt_dec);
 static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
 
+static int change_proto_down(struct net_device *dev, unsigned long proto_down)
+{
+	return dev_change_proto_down(dev, (bool) proto_down);
+}
+
+static ssize_t proto_down_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	return netdev_store(dev, attr, buf, len, change_proto_down);
+}
+NETDEVICE_SHOW_RW(proto_down, fmt_dec);
+
 static ssize_t phys_port_id_show(struct device *dev,
 				 struct device_attribute *attr, char *buf)
 {
@@ -501,6 +514,7 @@
 	&dev_attr_phys_port_id.attr,
 	&dev_attr_phys_port_name.attr,
 	&dev_attr_phys_switch_id.attr,
+	&dev_attr_proto_down.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(net_class);
@@ -675,7 +689,7 @@
 	struct rps_map *old_map, *map;
 	cpumask_var_t mask;
 	int err, cpu, i;
-	static DEFINE_SPINLOCK(rps_map_lock);
+	static DEFINE_MUTEX(rps_map_mutex);
 
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
@@ -708,18 +722,21 @@
 		map = NULL;
 	}
 
-	spin_lock(&rps_map_lock);
+	mutex_lock(&rps_map_mutex);
 	old_map = rcu_dereference_protected(queue->rps_map,
-					    lockdep_is_held(&rps_map_lock));
+					    mutex_is_locked(&rps_map_mutex));
 	rcu_assign_pointer(queue->rps_map, map);
-	spin_unlock(&rps_map_lock);
 
 	if (map)
 		static_key_slow_inc(&rps_needed);
-	if (old_map) {
-		kfree_rcu(old_map, rcu);
+	if (old_map)
 		static_key_slow_dec(&rps_needed);
-	}
+
+	mutex_unlock(&rps_map_mutex);
+
+	if (old_map)
+		kfree_rcu(old_map, rcu);
+
 	free_cpumask_var(mask);
 	return len;
 }
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index ba3c012..adef015 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -31,6 +31,7 @@
 #include <trace/events/napi.h>
 #include <trace/events/sock.h>
 #include <trace/events/udp.h>
+#include <trace/events/fib.h>
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
 
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c126a87..6aa3db8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -380,6 +380,8 @@
 	static atomic_t ip_ident;
 	struct ipv6hdr *ip6h;
 
+	WARN_ON_ONCE(!irqs_disabled());
+
 	udp_len = len + sizeof(*udph);
 	if (np->ipv6)
 		ip_len = udp_len + sizeof(*ip6h);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1cbd209..de8d5cc 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -273,7 +273,6 @@
 
 	/* runtime counters relating to clone_skb */
 
-	__u64 allocated_skbs;
 	__u32 clone_count;
 	int last_ok;		/* Was last skb sent?
 				 * Or a failed transmit of some sort?
@@ -2279,7 +2278,7 @@
 
 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
 {
-	pkt_dev->pkt_overhead = 0;
+	pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev);
 	pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
 	pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
 	pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
@@ -2788,6 +2787,7 @@
 	} else {
 		 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
 	}
+	skb_reserve(skb, LL_RESERVED_SPACE(dev));
 
 	return skb;
 }
@@ -3397,7 +3397,6 @@
 			return;
 		}
 		pkt_dev->last_pkt_size = pkt_dev->skb->len;
-		pkt_dev->allocated_skbs++;
 		pkt_dev->clone_count = 0;	/* reset counter */
 	}
 
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index dc004b1..a466821 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -678,6 +678,12 @@
 					continue;
 				if (nla_put_string(skb, i + 1, name))
 					goto nla_put_failure;
+			} else if (i == RTAX_FEATURES - 1) {
+				u32 user_features = metrics[i] & RTAX_FEATURE_MASK;
+
+				BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
+				if (nla_put_u32(skb, i + 1, user_features))
+					goto nla_put_failure;
 			} else {
 				if (nla_put_u32(skb, i + 1, metrics[i]))
 					goto nla_put_failure;
@@ -896,7 +902,9 @@
 	       + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
 	       + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
-	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN); /* IFLA_PHYS_SWITCH_ID */
+	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+	       + nla_total_size(1); /* IFLA_PROTO_DOWN */
+
 }
 
 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -1082,7 +1090,8 @@
 	    (dev->ifalias &&
 	     nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
 	    nla_put_u32(skb, IFLA_CARRIER_CHANGES,
-			atomic_read(&dev->carrier_changes)))
+			atomic_read(&dev->carrier_changes)) ||
+	    nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
 		goto nla_put_failure;
 
 	if (1) {
@@ -1319,6 +1328,7 @@
 	[IFLA_CARRIER_CHANGES]	= { .type = NLA_U32 },  /* ignored */
 	[IFLA_PHYS_SWITCH_ID]	= { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
 	[IFLA_LINK_NETNSID]	= { .type = NLA_S32 },
+	[IFLA_PROTO_DOWN]	= { .type = NLA_U8 },
 };
 
 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1861,6 +1871,14 @@
 	}
 	err = 0;
 
+	if (tb[IFLA_PROTO_DOWN]) {
+		err = dev_change_proto_down(dev,
+					    nla_get_u8(tb[IFLA_PROTO_DOWN]));
+		if (err)
+			goto errout;
+		status |= DO_SETLINK_NOTIFY;
+	}
+
 errout:
 	if (status & DO_SETLINK_MODIFIED) {
 		if (status & DO_SETLINK_NOTIFY)
@@ -1951,16 +1969,30 @@
 	return 0;
 }
 
+int rtnl_delete_link(struct net_device *dev)
+{
+	const struct rtnl_link_ops *ops;
+	LIST_HEAD(list_kill);
+
+	ops = dev->rtnl_link_ops;
+	if (!ops || !ops->dellink)
+		return -EOPNOTSUPP;
+
+	ops->dellink(dev, &list_kill);
+	unregister_netdevice_many(&list_kill);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtnl_delete_link);
+
 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
 	struct net *net = sock_net(skb->sk);
-	const struct rtnl_link_ops *ops;
 	struct net_device *dev;
 	struct ifinfomsg *ifm;
 	char ifname[IFNAMSIZ];
 	struct nlattr *tb[IFLA_MAX+1];
 	int err;
-	LIST_HEAD(list_kill);
 
 	err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
 	if (err < 0)
@@ -1982,13 +2014,7 @@
 	if (!dev)
 		return -ENODEV;
 
-	ops = dev->rtnl_link_ops;
-	if (!ops || !ops->dellink)
-		return -EOPNOTSUPP;
-
-	ops->dellink(dev, &list_kill);
-	unregister_netdevice_many(&list_kill);
-	return 0;
+	return rtnl_delete_link(dev);
 }
 
 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7b84330..dad4dd3 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -392,7 +392,7 @@
 /**
  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
  *	@dev: network device to receive on
- *	@length: length to allocate
+ *	@len: length to allocate
  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
  *
  *	Allocate a new &sk_buff and assign it a usage count of one. The
@@ -461,7 +461,7 @@
 /**
  *	__napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
  *	@napi: napi instance this buffer was allocated for
- *	@length: length to allocate
+ *	@len: length to allocate
  *	@gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
  *
  *	Allocate a new sk_buff for use in NAPI receive.  This buffer will
diff --git a/net/core/sock.c b/net/core/sock.c
index 193901d..ca2984a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2078,7 +2078,7 @@
 EXPORT_SYMBOL(__sk_mem_schedule);
 
 /**
- *	__sk_reclaim - reclaim memory_allocated
+ *	__sk_mem_reclaim - reclaim memory_allocated
  *	@sk: socket
  *	@amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
  */
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index d79866c..817622f 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -90,6 +90,9 @@
 		goto out;
 
 	fprog = filter->prog->orig_prog;
+	if (!fprog)
+		goto out;
+
 	flen = bpf_classic_proglen(fprog);
 
 	attr = nla_reserve(skb, attrtype, flen);
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 43d3dd6..42689d5 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -60,11 +60,15 @@
 	struct phy_device *phydev;
 	unsigned int type;
 
+	if (!skb->dev || !skb->dev->phydev || !skb->dev->phydev->drv)
+		return false;
+
 	if (skb_headroom(skb) < ETH_HLEN)
 		return false;
+
 	__skb_push(skb, ETH_HLEN);
 
-	type = classify(skb);
+	type = ptp_classify_raw(skb);
 
 	__skb_pull(skb, ETH_HLEN);
 
diff --git a/net/core/utils.c b/net/core/utils.c
index a7732a0..3dffce9 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -301,7 +301,7 @@
 EXPORT_SYMBOL(in6_pton);
 
 void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
-			      __be32 from, __be32 to, int pseudohdr)
+			      __be32 from, __be32 to, bool pseudohdr)
 {
 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
 		csum_replace4(sum, from, to);
@@ -318,7 +318,7 @@
 
 void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
 			       const __be32 *from, const __be32 *to,
-			       int pseudohdr)
+			       bool pseudohdr)
 {
 	__be32 diff[] = {
 		~from[0], ~from[1], ~from[2], ~from[3],
@@ -336,6 +336,19 @@
 }
 EXPORT_SYMBOL(inet_proto_csum_replace16);
 
+void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+				     __wsum diff, bool pseudohdr)
+{
+	if (skb->ip_summed != CHECKSUM_PARTIAL) {
+		*sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
+		if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
+			skb->csum = ~csum_add(diff, ~skb->csum);
+	} else if (pseudohdr) {
+		*sum = ~csum_fold(csum_add(diff, csum_unfold(*sum)));
+	}
+}
+EXPORT_SYMBOL(inet_proto_csum_replace_by_diff);
+
 struct __net_random_once_work {
 	struct work_struct work;
 	struct static_key *key;
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index b445d49..76e380076 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -176,6 +176,41 @@
 #endif /* CONFIG_NET_DSA_HWMON */
 
 /* basic switch operations **************************************************/
+static int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct net_device *master)
+{
+	struct dsa_chip_data *cd = ds->pd;
+	struct device_node *port_dn;
+	struct phy_device *phydev;
+	int ret, port, mode;
+
+	for (port = 0; port < DSA_MAX_PORTS; port++) {
+		if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
+			continue;
+
+		port_dn = cd->port_dn[port];
+		if (of_phy_is_fixed_link(port_dn)) {
+			ret = of_phy_register_fixed_link(port_dn);
+			if (ret) {
+				netdev_err(master,
+					   "failed to register fixed PHY\n");
+				return ret;
+			}
+			phydev = of_phy_find_device(port_dn);
+
+			mode = of_get_phy_mode(port_dn);
+			if (mode < 0)
+				mode = PHY_INTERFACE_MODE_NA;
+			phydev->interface = mode;
+
+			genphy_config_init(phydev);
+			genphy_read_status(phydev);
+			if (ds->drv->adjust_link)
+				ds->drv->adjust_link(ds, port, phydev);
+		}
+	}
+	return 0;
+}
+
 static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
 {
 	struct dsa_switch_driver *drv = ds->drv;
@@ -297,6 +332,14 @@
 		}
 	}
 
+	/* Perform configuration of the CPU and DSA ports */
+	ret = dsa_cpu_dsa_setup(ds, dst->master_netdev);
+	if (ret < 0) {
+		netdev_err(dst->master_netdev, "[%d] : can't configure CPU and DSA ports\n",
+			   index);
+		ret = 0;
+	}
+
 #ifdef CONFIG_NET_DSA_HWMON
 	/* If the switch provides a temperature sensor,
 	 * register with hardware monitoring subsystem.
@@ -554,6 +597,31 @@
 	return 0;
 }
 
+static int dsa_of_probe_links(struct dsa_platform_data *pd,
+			      struct dsa_chip_data *cd,
+			      int chip_index, int port_index,
+			      struct device_node *port,
+			      const char *port_name)
+{
+	struct device_node *link;
+	int link_index;
+	int ret;
+
+	for (link_index = 0;; link_index++) {
+		link = of_parse_phandle(port, "link", link_index);
+		if (!link)
+			break;
+
+		if (!strcmp(port_name, "dsa") && pd->nr_chips > 1) {
+			ret = dsa_of_setup_routing_table(pd, cd, chip_index,
+							 port_index, link);
+			if (ret)
+				return ret;
+		}
+	}
+	return 0;
+}
+
 static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
 {
 	int i;
@@ -573,8 +641,8 @@
 static int dsa_of_probe(struct device *dev)
 {
 	struct device_node *np = dev->of_node;
-	struct device_node *child, *mdio, *ethernet, *port, *link;
-	struct mii_bus *mdio_bus;
+	struct device_node *child, *mdio, *ethernet, *port;
+	struct mii_bus *mdio_bus, *mdio_bus_switch;
 	struct net_device *ethernet_dev;
 	struct dsa_platform_data *pd;
 	struct dsa_chip_data *cd;
@@ -636,6 +704,16 @@
 		if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
 			cd->eeprom_len = eeprom_len;
 
+		mdio = of_parse_phandle(child, "mii-bus", 0);
+		if (mdio) {
+			mdio_bus_switch = of_mdio_find_bus(mdio);
+			if (!mdio_bus_switch) {
+				ret = -EPROBE_DEFER;
+				goto out_free_chip;
+			}
+			cd->host_dev = &mdio_bus_switch->dev;
+		}
+
 		for_each_available_child_of_node(child, port) {
 			port_reg = of_get_property(port, "reg", NULL);
 			if (!port_reg)
@@ -658,15 +736,10 @@
 				goto out_free_chip;
 			}
 
-			link = of_parse_phandle(port, "link", 0);
-
-			if (!strcmp(port_name, "dsa") && link &&
-					pd->nr_chips > 1) {
-				ret = dsa_of_setup_routing_table(pd, cd,
-						chip_index, port_index, link);
-				if (ret)
-					goto out_free_chip;
-			}
+			ret = dsa_of_probe_links(pd, cd, chip_index,
+						 port_index, port, port_name);
+			if (ret)
+				goto out_free_chip;
 
 		}
 	}
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index d5f1f9b..311796c8 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -13,9 +13,10 @@
 
 #include <linux/phy.h>
 #include <linux/netdevice.h>
+#include <linux/netpoll.h>
 
 struct dsa_device_ops {
-	netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
+	struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
 	int (*rcv)(struct sk_buff *skb, struct net_device *dev,
 		   struct packet_type *pt, struct net_device *orig_dev);
 };
@@ -26,7 +27,7 @@
 	 * switch port.
 	 */
 	struct net_device	*dev;
-	netdev_tx_t		(*xmit)(struct sk_buff *skb,
+	struct sk_buff *	(*xmit)(struct sk_buff *skb,
 					struct net_device *dev);
 
 	/*
@@ -47,6 +48,9 @@
 	int			old_duplex;
 
 	struct net_device	*bridge_dev;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	struct netpoll		*netpoll;
+#endif
 };
 
 /* dsa.c */
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 35c47dd..cce9738 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -18,6 +18,7 @@
 #include <net/rtnetlink.h>
 #include <net/switchdev.h>
 #include <linux/if_bridge.h>
+#include <linux/netpoll.h>
 #include "dsa_priv.h"
 
 /* slave mii_bus handling ***************************************************/
@@ -199,103 +200,212 @@
 	return 0;
 }
 
-static int dsa_slave_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
-			     struct net_device *dev,
-			     const unsigned char *addr, u16 vid, u16 nlm_flags)
+static int dsa_bridge_check_vlan_range(struct dsa_switch *ds,
+				       const struct net_device *bridge,
+				       u16 vid_begin, u16 vid_end)
 {
-	struct dsa_slave_priv *p = netdev_priv(dev);
-	struct dsa_switch *ds = p->parent;
-	int ret = -EOPNOTSUPP;
+	struct dsa_slave_priv *p;
+	struct net_device *dev, *vlan_br;
+	DECLARE_BITMAP(members, DSA_MAX_PORTS);
+	DECLARE_BITMAP(untagged, DSA_MAX_PORTS);
+	u16 vid;
+	int member, err;
 
-	if (ds->drv->fdb_add)
-		ret = ds->drv->fdb_add(ds, p->port, addr, vid);
+	if (!ds->drv->vlan_getnext || !vid_begin)
+		return -EOPNOTSUPP;
 
-	return ret;
+	vid = vid_begin - 1;
+
+	do {
+		err = ds->drv->vlan_getnext(ds, &vid, members, untagged);
+		if (err)
+			break;
+
+		if (vid > vid_end)
+			break;
+
+		member = find_first_bit(members, DSA_MAX_PORTS);
+		if (member == DSA_MAX_PORTS)
+			continue;
+
+		dev = ds->ports[member];
+		p = netdev_priv(dev);
+		vlan_br = p->bridge_dev;
+		if (vlan_br == bridge)
+			continue;
+
+		netdev_dbg(vlan_br, "hardware VLAN %d already in use\n", vid);
+		return -EOPNOTSUPP;
+	} while (vid < vid_end);
+
+	return err == -ENOENT ? 0 : err;
 }
 
-static int dsa_slave_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
-			     struct net_device *dev,
-			     const unsigned char *addr, u16 vid)
+static int dsa_slave_port_vlan_add(struct net_device *dev,
+				   struct switchdev_obj *obj)
 {
+	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
-	int ret = -EOPNOTSUPP;
+	u16 vid;
+	int err;
 
-	if (ds->drv->fdb_del)
-		ret = ds->drv->fdb_del(ds, p->port, addr, vid);
+	switch (obj->trans) {
+	case SWITCHDEV_TRANS_PREPARE:
+		if (!ds->drv->port_vlan_add || !ds->drv->port_pvid_set)
+			return -EOPNOTSUPP;
 
-	return ret;
-}
+		/* If the requested port doesn't belong to the same bridge as
+		 * the VLAN members, fallback to software VLAN (hopefully).
+		 */
+		err = dsa_bridge_check_vlan_range(ds, p->bridge_dev,
+						  vlan->vid_begin,
+						  vlan->vid_end);
+		if (err)
+			return err;
+		break;
+	case SWITCHDEV_TRANS_COMMIT:
+		for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+			err = ds->drv->port_vlan_add(ds, p->port, vid,
+						     vlan->flags &
+						     BRIDGE_VLAN_INFO_UNTAGGED);
+			if (!err && vlan->flags & BRIDGE_VLAN_INFO_PVID)
+				err = ds->drv->port_pvid_set(ds, p->port, vid);
+			if (err)
+				return err;
+		}
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
 
-static int dsa_slave_fill_info(struct net_device *dev, struct sk_buff *skb,
-			       const unsigned char *addr, u16 vid,
-			       bool is_static,
-			       u32 portid, u32 seq, int type,
-			       unsigned int flags)
-{
-	struct nlmsghdr *nlh;
-	struct ndmsg *ndm;
-
-	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
-	if (!nlh)
-		return -EMSGSIZE;
-
-	ndm = nlmsg_data(nlh);
-	ndm->ndm_family	 = AF_BRIDGE;
-	ndm->ndm_pad1    = 0;
-	ndm->ndm_pad2    = 0;
-	ndm->ndm_flags	 = NTF_EXT_LEARNED;
-	ndm->ndm_type	 = 0;
-	ndm->ndm_ifindex = dev->ifindex;
-	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
-
-	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
-		goto nla_put_failure;
-
-	if (vid && nla_put_u16(skb, NDA_VLAN, vid))
-		goto nla_put_failure;
-
-	nlmsg_end(skb, nlh);
 	return 0;
-
-nla_put_failure:
-	nlmsg_cancel(skb, nlh);
-	return -EMSGSIZE;
 }
 
-/* Dump information about entries, in response to GETNEIGH */
-static int dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
-			      struct net_device *dev,
-			      struct net_device *filter_dev, int idx)
+static int dsa_slave_port_vlan_del(struct net_device *dev,
+				   struct switchdev_obj *obj)
+{
+	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+	u16 vid;
+	int err;
+
+	if (!ds->drv->port_vlan_del)
+		return -EOPNOTSUPP;
+
+	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+		err = ds->drv->port_vlan_del(ds, p->port, vid);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int dsa_slave_port_vlan_dump(struct net_device *dev,
+				    struct switchdev_obj *obj)
+{
+	struct switchdev_obj_vlan *vlan = &obj->u.vlan;
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+	DECLARE_BITMAP(members, DSA_MAX_PORTS);
+	DECLARE_BITMAP(untagged, DSA_MAX_PORTS);
+	u16 pvid, vid = 0;
+	int err;
+
+	if (!ds->drv->vlan_getnext || !ds->drv->port_pvid_get)
+		return -EOPNOTSUPP;
+
+	err = ds->drv->port_pvid_get(ds, p->port, &pvid);
+	if (err)
+		return err;
+
+	for (;;) {
+		err = ds->drv->vlan_getnext(ds, &vid, members, untagged);
+		if (err)
+			break;
+
+		if (!test_bit(p->port, members))
+			continue;
+
+		memset(vlan, 0, sizeof(*vlan));
+		vlan->vid_begin = vlan->vid_end = vid;
+
+		if (vid == pvid)
+			vlan->flags |= BRIDGE_VLAN_INFO_PVID;
+
+		if (test_bit(p->port, untagged))
+			vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
+
+		err = obj->cb(dev, obj);
+		if (err)
+			break;
+	}
+
+	return err == -ENOENT ? 0 : err;
+}
+
+static int dsa_slave_port_fdb_add(struct net_device *dev,
+				  struct switchdev_obj *obj)
+{
+	struct switchdev_obj_fdb *fdb = &obj->u.fdb;
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+	int ret = -EOPNOTSUPP;
+
+	if (obj->trans == SWITCHDEV_TRANS_PREPARE)
+		ret = ds->drv->port_fdb_add ? 0 : -EOPNOTSUPP;
+	else if (obj->trans == SWITCHDEV_TRANS_COMMIT)
+		ret = ds->drv->port_fdb_add(ds, p->port, fdb->addr, fdb->vid);
+
+	return ret;
+}
+
+static int dsa_slave_port_fdb_del(struct net_device *dev,
+				  struct switchdev_obj *obj)
+{
+	struct switchdev_obj_fdb *fdb = &obj->u.fdb;
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+	int ret = -EOPNOTSUPP;
+
+	if (ds->drv->port_fdb_del)
+		ret = ds->drv->port_fdb_del(ds, p->port, fdb->addr, fdb->vid);
+
+	return ret;
+}
+
+static int dsa_slave_port_fdb_dump(struct net_device *dev,
+				   struct switchdev_obj *obj)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct dsa_switch *ds = p->parent;
 	unsigned char addr[ETH_ALEN] = { 0 };
+	u16 vid = 0;
 	int ret;
 
-	if (!ds->drv->fdb_getnext)
+	if (!ds->drv->port_fdb_getnext)
 		return -EOPNOTSUPP;
 
-	for (; ; idx++) {
+	for (;;) {
 		bool is_static;
 
-		ret = ds->drv->fdb_getnext(ds, p->port, addr, &is_static);
+		ret = ds->drv->port_fdb_getnext(ds, p->port, addr, &vid,
+						&is_static);
 		if (ret < 0)
 			break;
 
-		if (idx < cb->args[0])
-			continue;
+		obj->u.fdb.addr = addr;
+		obj->u.fdb.vid = vid;
+		obj->u.fdb.ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE;
 
-		ret = dsa_slave_fill_info(dev, skb, addr, 0,
-					  is_static,
-					  NETLINK_CB(cb->skb).portid,
-					  cb->nlh->nlmsg_seq,
-					  RTM_NEWNEIGH, NLM_F_MULTI);
+		ret = obj->cb(dev, obj);
 		if (ret < 0)
 			break;
 	}
 
-	return idx;
+	return ret == -ENOENT ? 0 : ret;
 }
 
 static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -363,6 +473,71 @@
 	return ret;
 }
 
+static int dsa_slave_port_obj_add(struct net_device *dev,
+				  struct switchdev_obj *obj)
+{
+	int err;
+
+	/* For the prepare phase, ensure the full set of changes is feasable in
+	 * one go in order to signal a failure properly. If an operation is not
+	 * supported, return -EOPNOTSUPP.
+	 */
+
+	switch (obj->id) {
+	case SWITCHDEV_OBJ_PORT_FDB:
+		err = dsa_slave_port_fdb_add(dev, obj);
+		break;
+	case SWITCHDEV_OBJ_PORT_VLAN:
+		err = dsa_slave_port_vlan_add(dev, obj);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int dsa_slave_port_obj_del(struct net_device *dev,
+				  struct switchdev_obj *obj)
+{
+	int err;
+
+	switch (obj->id) {
+	case SWITCHDEV_OBJ_PORT_FDB:
+		err = dsa_slave_port_fdb_del(dev, obj);
+		break;
+	case SWITCHDEV_OBJ_PORT_VLAN:
+		err = dsa_slave_port_vlan_del(dev, obj);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int dsa_slave_port_obj_dump(struct net_device *dev,
+				   struct switchdev_obj *obj)
+{
+	int err;
+
+	switch (obj->id) {
+	case SWITCHDEV_OBJ_PORT_FDB:
+		err = dsa_slave_port_fdb_dump(dev, obj);
+		break;
+	case SWITCHDEV_OBJ_PORT_VLAN:
+		err = dsa_slave_port_vlan_dump(dev, obj);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
 static int dsa_slave_bridge_port_join(struct net_device *dev,
 				      struct net_device *br)
 {
@@ -418,24 +593,53 @@
 	return 0;
 }
 
+static inline netdev_tx_t dsa_netpoll_send_skb(struct dsa_slave_priv *p,
+					       struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	if (p->netpoll)
+		netpoll_send_skb(p->netpoll, skb);
+#else
+	BUG();
+#endif
+	return NETDEV_TX_OK;
+}
+
 static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct sk_buff *nskb;
 
-	return p->xmit(skb, dev);
-}
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
 
-static netdev_tx_t dsa_slave_notag_xmit(struct sk_buff *skb,
-					struct net_device *dev)
-{
-	struct dsa_slave_priv *p = netdev_priv(dev);
+	/* Transmit function may have to reallocate the original SKB */
+	nskb = p->xmit(skb, dev);
+	if (!nskb)
+		return NETDEV_TX_OK;
 
-	skb->dev = p->parent->dst->master_netdev;
-	dev_queue_xmit(skb);
+	/* SKB for netpoll still need to be mangled with the protocol-specific
+	 * tag to be successfully transmitted
+	 */
+	if (unlikely(netpoll_tx_running(dev)))
+		return dsa_netpoll_send_skb(p, nskb);
+
+	/* Queue the SKB for transmission on the parent interface, but
+	 * do not modify its EtherType
+	 */
+	nskb->dev = p->parent->dst->master_netdev;
+	dev_queue_xmit(nskb);
 
 	return NETDEV_TX_OK;
 }
 
+static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
+					    struct net_device *dev)
+{
+	/* Just return the original SKB */
+	return skb;
+}
+
 
 /* ethtool operations *******************************************************/
 static int
@@ -665,6 +869,49 @@
 	return ret;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static int dsa_slave_netpoll_setup(struct net_device *dev,
+				   struct netpoll_info *ni)
+{
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct dsa_switch *ds = p->parent;
+	struct net_device *master = ds->dst->master_netdev;
+	struct netpoll *netpoll;
+	int err = 0;
+
+	netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+	if (!netpoll)
+		return -ENOMEM;
+
+	err = __netpoll_setup(netpoll, master);
+	if (err) {
+		kfree(netpoll);
+		goto out;
+	}
+
+	p->netpoll = netpoll;
+out:
+	return err;
+}
+
+static void dsa_slave_netpoll_cleanup(struct net_device *dev)
+{
+	struct dsa_slave_priv *p = netdev_priv(dev);
+	struct netpoll *netpoll = p->netpoll;
+
+	if (!netpoll)
+		return;
+
+	p->netpoll = NULL;
+
+	__netpoll_free_async(netpoll);
+}
+
+static void dsa_slave_poll_controller(struct net_device *dev)
+{
+}
+#endif
+
 static const struct ethtool_ops dsa_slave_ethtool_ops = {
 	.get_settings		= dsa_slave_get_settings,
 	.set_settings		= dsa_slave_set_settings,
@@ -692,16 +939,27 @@
 	.ndo_change_rx_flags	= dsa_slave_change_rx_flags,
 	.ndo_set_rx_mode	= dsa_slave_set_rx_mode,
 	.ndo_set_mac_address	= dsa_slave_set_mac_address,
-	.ndo_fdb_add		= dsa_slave_fdb_add,
-	.ndo_fdb_del		= dsa_slave_fdb_del,
-	.ndo_fdb_dump		= dsa_slave_fdb_dump,
+	.ndo_fdb_add		= switchdev_port_fdb_add,
+	.ndo_fdb_del		= switchdev_port_fdb_del,
+	.ndo_fdb_dump		= switchdev_port_fdb_dump,
 	.ndo_do_ioctl		= dsa_slave_ioctl,
 	.ndo_get_iflink		= dsa_slave_get_iflink,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_netpoll_setup	= dsa_slave_netpoll_setup,
+	.ndo_netpoll_cleanup	= dsa_slave_netpoll_cleanup,
+	.ndo_poll_controller	= dsa_slave_poll_controller,
+#endif
+	.ndo_bridge_getlink	= switchdev_port_bridge_getlink,
+	.ndo_bridge_setlink	= switchdev_port_bridge_setlink,
+	.ndo_bridge_dellink	= switchdev_port_bridge_dellink,
 };
 
 static const struct switchdev_ops dsa_slave_switchdev_ops = {
 	.switchdev_port_attr_get	= dsa_slave_port_attr_get,
 	.switchdev_port_attr_set	= dsa_slave_port_attr_set,
+	.switchdev_port_obj_add		= dsa_slave_port_obj_add,
+	.switchdev_port_obj_del		= dsa_slave_port_obj_del,
+	.switchdev_port_obj_dump	= dsa_slave_port_obj_dump,
 };
 
 static void dsa_slave_adjust_link(struct net_device *dev)
@@ -889,7 +1147,7 @@
 	slave_dev->features = master->vlan_features;
 	slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
 	eth_hw_addr_inherit(slave_dev, master);
-	slave_dev->tx_queue_len = 0;
+	slave_dev->priv_flags |= IFF_NO_QUEUE;
 	slave_dev->netdev_ops = &dsa_slave_netdev_ops;
 	slave_dev->switchdev_ops = &dsa_slave_switchdev_ops;
 
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 83d3572..e2aadb7 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -58,14 +58,11 @@
 #define BRCM_EG_TC_MASK		0x7
 #define BRCM_EG_PID_MASK	0x1f
 
-static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	u8 *brcm_tag;
 
-	dev->stats.tx_packets++;
-	dev->stats.tx_bytes += skb->len;
-
 	if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
 		goto out_free;
 
@@ -87,17 +84,11 @@
 		brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
 	brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK;
 
-	/* Queue the SKB for transmission on the parent interface, but
-	 * do not modify its EtherType
-	 */
-	skb->dev = p->parent->dst->master_netdev;
-	dev_queue_xmit(skb);
-
-	return NETDEV_TX_OK;
+	return skb;
 
 out_free:
 	kfree_skb(skb);
-	return NETDEV_TX_OK;
+	return NULL;
 }
 
 static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 2dab270..aa780e4 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -15,14 +15,11 @@
 
 #define DSA_HLEN	4
 
-static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	u8 *dsa_header;
 
-	dev->stats.tx_packets++;
-	dev->stats.tx_bytes += skb->len;
-
 	/*
 	 * Convert the outermost 802.1q tag to a DSA tag for tagged
 	 * packets, or insert a DSA tag between the addresses and
@@ -63,14 +60,11 @@
 		dsa_header[3] = 0x00;
 	}
 
-	skb->dev = p->parent->dst->master_netdev;
-	dev_queue_xmit(skb);
-
-	return NETDEV_TX_OK;
+	return skb;
 
 out_free:
 	kfree_skb(skb);
-	return NETDEV_TX_OK;
+	return NULL;
 }
 
 static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 9aeda59..2288c80 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -16,14 +16,11 @@
 #define DSA_HLEN	4
 #define EDSA_HLEN	8
 
-static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	u8 *edsa_header;
 
-	dev->stats.tx_packets++;
-	dev->stats.tx_bytes += skb->len;
-
 	/*
 	 * Convert the outermost 802.1q tag to a DSA tag and prepend
 	 * a DSA ethertype field is the packet is tagged, or insert
@@ -76,14 +73,11 @@
 		edsa_header[7] = 0x00;
 	}
 
-	skb->dev = p->parent->dst->master_netdev;
-	dev_queue_xmit(skb);
-
-	return NETDEV_TX_OK;
+	return skb;
 
 out_free:
 	kfree_skb(skb);
-	return NETDEV_TX_OK;
+	return NULL;
 }
 
 static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index e268f9d..d25efc9 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -13,16 +13,13 @@
 #include <linux/slab.h>
 #include "dsa_priv.h"
 
-static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct dsa_slave_priv *p = netdev_priv(dev);
 	struct sk_buff *nskb;
 	int padlen;
 	u8 *trailer;
 
-	dev->stats.tx_packets++;
-	dev->stats.tx_bytes += skb->len;
-
 	/*
 	 * We have to make sure that the trailer ends up as the very
 	 * last 4 bytes of the packet.  This means that we have to pad
@@ -36,7 +33,7 @@
 	nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
 	if (nskb == NULL) {
 		kfree_skb(skb);
-		return NETDEV_TX_OK;
+		return NULL;
 	}
 	skb_reserve(nskb, NET_IP_ALIGN);
 
@@ -57,10 +54,7 @@
 	trailer[2] = 0x10;
 	trailer[3] = 0x00;
 
-	nskb->dev = p->parent->dst->master_netdev;
-	dev_queue_xmit(nskb);
-
-	return NETDEV_TX_OK;
+	return nskb;
 }
 
 static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 77e0f0e..d850fdc 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -114,7 +114,7 @@
 EXPORT_SYMBOL(eth_header);
 
 /**
- * eth_get_headlen - determine the the length of header for an ethernet frame
+ * eth_get_headlen - determine the length of header for an ethernet frame
  * @data: pointer to start of frame
  * @len: total length of frame
  *
@@ -132,7 +132,7 @@
 
 	/* parse any remaining L2/L3 headers, check for L4 */
 	if (!skb_flow_dissect_flow_keys_buf(&keys, data, eth->h_proto,
-					    sizeof(*eth), len))
+					    sizeof(*eth), len, 0))
 		return max_t(u32, keys.control.thoff, sizeof(*eth));
 
 	/* parse for any L4 headers */
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 44d2746..35a9788 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -392,7 +392,7 @@
 	dev->header_ops = &hsr_header_ops;
 	dev->netdev_ops = &hsr_device_ops;
 	SET_NETDEV_DEVTYPE(dev, &hsr_type);
-	dev->tx_queue_len = 0;
+	dev->priv_flags |= IFF_NO_QUEUE;
 
 	dev->destructor = hsr_dev_destroy;
 
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
index e50f69d..ea339fa9 100644
--- a/net/ieee802154/6lowpan/6lowpan_i.h
+++ b/net/ieee802154/6lowpan/6lowpan_i.h
@@ -5,6 +5,7 @@
 
 #include <net/ieee802154_netdev.h>
 #include <net/inet_frag.h>
+#include <net/6lowpan.h>
 
 struct lowpan_create_arg {
 	u16 tag;
@@ -37,26 +38,18 @@
 	}
 }
 
-struct lowpan_dev_record {
-	struct net_device *ldev;
-	struct list_head list;
-};
-
 /* private device info */
 struct lowpan_dev_info {
 	struct net_device	*real_dev; /* real WPAN device ptr */
-	struct mutex		dev_list_mtx; /* mutex for list ops */
 	u16			fragment_tag;
 };
 
 static inline struct
 lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
 {
-	return netdev_priv(dev);
+	return (struct lowpan_dev_info *)lowpan_priv(dev)->priv;
 }
 
-extern struct list_head lowpan_devices;
-
 int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
 void lowpan_net_frag_exit(void);
 int lowpan_net_frag_init(void);
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index f20a387..953b1c4 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -52,8 +52,7 @@
 
 #include "6lowpan_i.h"
 
-LIST_HEAD(lowpan_devices);
-static int lowpan_open_count;
+static int open_count;
 
 static struct header_ops lowpan_header_ops = {
 	.create	= lowpan_header_create,
@@ -91,7 +90,7 @@
 	dev->hard_header_len	= 2 + 1 + 20 + 14;
 	dev->needed_tailroom	= 2; /* FCS */
 	dev->mtu		= IPV6_MIN_MTU;
-	dev->tx_queue_len	= 0;
+	dev->priv_flags		|= IFF_NO_QUEUE;
 	dev->flags		= IFF_BROADCAST | IFF_MULTICAST;
 	dev->watchdog_timeo	= 0;
 
@@ -114,7 +113,6 @@
 			  struct nlattr *tb[], struct nlattr *data[])
 {
 	struct net_device *real_dev;
-	struct lowpan_dev_record *entry;
 	int ret;
 
 	ASSERT_RTNL();
@@ -133,67 +131,52 @@
 		return -EINVAL;
 	}
 
-	lowpan_dev_info(dev)->real_dev = real_dev;
-	mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
-
-	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-	if (!entry) {
+	if (real_dev->ieee802154_ptr->lowpan_dev) {
 		dev_put(real_dev);
-		lowpan_dev_info(dev)->real_dev = NULL;
-		return -ENOMEM;
+		return -EBUSY;
 	}
 
-	entry->ldev = dev;
-
+	lowpan_dev_info(dev)->real_dev = real_dev;
 	/* Set the lowpan hardware address to the wpan hardware address. */
 	memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
 
-	mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
-	INIT_LIST_HEAD(&entry->list);
-	list_add_tail(&entry->list, &lowpan_devices);
-	mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
+	lowpan_netdev_setup(dev, LOWPAN_LLTYPE_IEEE802154);
 
 	ret = register_netdevice(dev);
-	if (ret >= 0) {
-		if (!lowpan_open_count)
-			lowpan_rx_init();
-		lowpan_open_count++;
+	if (ret < 0) {
+		dev_put(real_dev);
+		return ret;
 	}
 
-	return ret;
+	real_dev->ieee802154_ptr->lowpan_dev = dev;
+	if (!open_count)
+		lowpan_rx_init();
+
+	open_count++;
+
+	return 0;
 }
 
 static void lowpan_dellink(struct net_device *dev, struct list_head *head)
 {
 	struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
 	struct net_device *real_dev = lowpan_dev->real_dev;
-	struct lowpan_dev_record *entry, *tmp;
 
 	ASSERT_RTNL();
 
-	lowpan_open_count--;
-	if (!lowpan_open_count)
+	open_count--;
+
+	if (!open_count)
 		lowpan_rx_exit();
 
-	mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
-	list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
-		if (entry->ldev == dev) {
-			list_del(&entry->list);
-			kfree(entry);
-		}
-	}
-	mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
-
-	mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
-
-	unregister_netdevice_queue(dev, head);
-
+	real_dev->ieee802154_ptr->lowpan_dev = NULL;
+	unregister_netdevice(dev);
 	dev_put(real_dev);
 }
 
 static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
 	.kind		= "lowpan",
-	.priv_size	= sizeof(struct lowpan_dev_info),
+	.priv_size	= LOWPAN_PRIV_SIZE(sizeof(struct lowpan_dev_info)),
 	.setup		= lowpan_setup,
 	.newlink	= lowpan_newlink,
 	.dellink	= lowpan_dellink,
@@ -214,19 +197,21 @@
 			       unsigned long event, void *ptr)
 {
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-	LIST_HEAD(del_list);
-	struct lowpan_dev_record *entry, *tmp;
 
 	if (dev->type != ARPHRD_IEEE802154)
 		goto out;
 
-	if (event == NETDEV_UNREGISTER) {
-		list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
-			if (lowpan_dev_info(entry->ldev)->real_dev == dev)
-				lowpan_dellink(entry->ldev, &del_list);
-		}
-
-		unregister_netdevice_many(&del_list);
+	switch (event) {
+	case NETDEV_UNREGISTER:
+		/* Check if wpan interface is unregistered that we
+		 * also delete possible lowpan interfaces which belongs
+		 * to the wpan interface.
+		 */
+		if (dev->ieee802154_ptr && dev->ieee802154_ptr->lowpan_dev)
+			lowpan_dellink(dev->ieee802154_ptr->lowpan_dev, NULL);
+		break;
+	default:
+		break;
 	}
 
 out:
diff --git a/net/ieee802154/6lowpan/rx.c b/net/ieee802154/6lowpan/rx.c
index 4be1d28..12e1020 100644
--- a/net/ieee802154/6lowpan/rx.c
+++ b/net/ieee802154/6lowpan/rx.c
@@ -15,36 +15,14 @@
 
 #include "6lowpan_i.h"
 
-static int lowpan_give_skb_to_devices(struct sk_buff *skb,
-				      struct net_device *dev)
+static int lowpan_give_skb_to_device(struct sk_buff *skb,
+				     struct net_device *dev)
 {
-	struct lowpan_dev_record *entry;
-	struct sk_buff *skb_cp;
-	int stat = NET_RX_SUCCESS;
-
+	skb->dev = dev->ieee802154_ptr->lowpan_dev;
 	skb->protocol = htons(ETH_P_IPV6);
 	skb->pkt_type = PACKET_HOST;
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(entry, &lowpan_devices, list)
-		if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
-			skb_cp = skb_copy(skb, GFP_ATOMIC);
-			if (!skb_cp) {
-				kfree_skb(skb);
-				rcu_read_unlock();
-				return NET_RX_DROP;
-			}
-
-			skb_cp->dev = entry->ldev;
-			stat = netif_rx(skb_cp);
-			if (stat == NET_RX_DROP)
-				break;
-		}
-	rcu_read_unlock();
-
-	consume_skb(skb);
-
-	return stat;
+	return netif_rx(skb);
 }
 
 static int
@@ -89,6 +67,10 @@
 	struct ieee802154_hdr hdr;
 	int ret;
 
+	if (dev->type != ARPHRD_IEEE802154 ||
+	    !dev->ieee802154_ptr->lowpan_dev)
+		goto drop;
+
 	skb = skb_share_check(skb, GFP_ATOMIC);
 	if (!skb)
 		goto drop;
@@ -99,9 +81,6 @@
 	if (skb->pkt_type == PACKET_OTHERHOST)
 		goto drop_skb;
 
-	if (dev->type != ARPHRD_IEEE802154)
-		goto drop_skb;
-
 	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
 		goto drop_skb;
 
@@ -109,7 +88,7 @@
 	if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
 		/* Pull off the 1-byte of 6lowpan header. */
 		skb_pull(skb, 1);
-		return lowpan_give_skb_to_devices(skb, NULL);
+		return lowpan_give_skb_to_device(skb, dev);
 	} else {
 		switch (skb->data[0] & 0xe0) {
 		case LOWPAN_DISPATCH_IPHC:	/* ipv6 datagram */
@@ -117,7 +96,7 @@
 			if (ret < 0)
 				goto drop_skb;
 
-			return lowpan_give_skb_to_devices(skb, NULL);
+			return lowpan_give_skb_to_device(skb, dev);
 		case LOWPAN_DISPATCH_FRAG1:	/* first fragment header */
 			ret = lowpan_frag_rcv(skb, LOWPAN_DISPATCH_FRAG1);
 			if (ret == 1) {
@@ -125,7 +104,7 @@
 				if (ret < 0)
 					goto drop_skb;
 
-				return lowpan_give_skb_to_devices(skb, NULL);
+				return lowpan_give_skb_to_device(skb, dev);
 			} else if (ret == -1) {
 				return NET_RX_DROP;
 			} else {
@@ -138,7 +117,7 @@
 				if (ret < 0)
 					goto drop_skb;
 
-				return lowpan_give_skb_to_devices(skb, NULL);
+				return lowpan_give_skb_to_device(skb, dev);
 			} else if (ret == -1) {
 				return NET_RX_DROP;
 			} else {
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index 2597abb..f6263fc 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -112,7 +112,7 @@
 
 	frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr);
 	if (IS_ERR(frag))
-		return -PTR_ERR(frag);
+		return PTR_ERR(frag);
 
 	memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen);
 	memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len);
@@ -224,7 +224,7 @@
 	} else {
 		da.mode = IEEE802154_ADDR_LONG;
 		da.extended_addr = ieee802154_devaddr_from_raw(daddr);
-		cb->ackreq = wpan_dev->frame_retries >= 0;
+		cb->ackreq = wpan_dev->ackreq;
 	}
 
 	return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
index 68f2401..3f89c0a 100644
--- a/net/ieee802154/nl802154.c
+++ b/net/ieee802154/nl802154.c
@@ -230,6 +230,8 @@
 	[NL802154_ATTR_WPAN_PHY_CAPS] = { .type = NLA_NESTED },
 
 	[NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED },
+
+	[NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 },
 };
 
 /* message building helper */
@@ -458,6 +460,7 @@
 	CMD(set_max_csma_backoffs, SET_MAX_CSMA_BACKOFFS);
 	CMD(set_max_frame_retries, SET_MAX_FRAME_RETRIES);
 	CMD(set_lbt_mode, SET_LBT_MODE);
+	CMD(set_ackreq_default, SET_ACKREQ_DEFAULT);
 
 	if (rdev->wpan_phy.flags & WPAN_PHY_FLAG_TXPOWER)
 		CMD(set_tx_power, SET_TX_POWER);
@@ -656,6 +659,10 @@
 	if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt))
 		goto nla_put_failure;
 
+	/* ackreq default behaviour */
+	if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq))
+		goto nla_put_failure;
+
 	genlmsg_end(msg, hdr);
 	return 0;
 
@@ -1027,7 +1034,7 @@
 	struct cfg802154_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
 	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
-	bool mode;
+	int mode;
 
 	if (netif_running(dev))
 		return -EBUSY;
@@ -1035,13 +1042,39 @@
 	if (!info->attrs[NL802154_ATTR_LBT_MODE])
 		return -EINVAL;
 
-	mode = !!nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
+	mode = nla_get_u8(info->attrs[NL802154_ATTR_LBT_MODE]);
+
+	if (mode != 0 && mode != 1)
+		return -EINVAL;
+
 	if (!wpan_phy_supported_bool(mode, rdev->wpan_phy.supported.lbt))
 		return -EINVAL;
 
 	return rdev_set_lbt_mode(rdev, wpan_dev, mode);
 }
 
+static int
+nl802154_set_ackreq_default(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg802154_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
+	int ackreq;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	if (!info->attrs[NL802154_ATTR_ACKREQ_DEFAULT])
+		return -EINVAL;
+
+	ackreq = nla_get_u8(info->attrs[NL802154_ATTR_ACKREQ_DEFAULT]);
+
+	if (ackreq != 0 && ackreq != 1)
+		return -EINVAL;
+
+	return rdev_set_ackreq_default(rdev, wpan_dev, ackreq);
+}
+
 #define NL802154_FLAG_NEED_WPAN_PHY	0x01
 #define NL802154_FLAG_NEED_NETDEV	0x02
 #define NL802154_FLAG_NEED_RTNL		0x04
@@ -1248,6 +1281,14 @@
 		.internal_flags = NL802154_FLAG_NEED_NETDEV |
 				  NL802154_FLAG_NEED_RTNL,
 	},
+	{
+		.cmd = NL802154_CMD_SET_ACKREQ_DEFAULT,
+		.doit = nl802154_set_ackreq_default,
+		.policy = nl802154_policy,
+		.flags = GENL_ADMIN_PERM,
+		.internal_flags = NL802154_FLAG_NEED_NETDEV |
+				  NL802154_FLAG_NEED_RTNL,
+	},
 };
 
 /* initialisation/exit functions */
diff --git a/net/ieee802154/rdev-ops.h b/net/ieee802154/rdev-ops.h
index b2155a1..03b3575 100644
--- a/net/ieee802154/rdev-ops.h
+++ b/net/ieee802154/rdev-ops.h
@@ -24,6 +24,26 @@
 }
 
 static inline int
+rdev_suspend(struct cfg802154_registered_device *rdev)
+{
+	int ret;
+	trace_802154_rdev_suspend(&rdev->wpan_phy);
+	ret = rdev->ops->suspend(&rdev->wpan_phy);
+	trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+	return ret;
+}
+
+static inline int
+rdev_resume(struct cfg802154_registered_device *rdev)
+{
+	int ret;
+	trace_802154_rdev_resume(&rdev->wpan_phy);
+	ret = rdev->ops->resume(&rdev->wpan_phy);
+	trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+	return ret;
+}
+
+static inline int
 rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name,
 		      unsigned char name_assign_type,
 		      enum nl802154_iftype type, __le64 extended_addr)
@@ -175,4 +195,17 @@
 	return ret;
 }
 
+static inline int
+rdev_set_ackreq_default(struct cfg802154_registered_device *rdev,
+			struct wpan_dev *wpan_dev, bool ackreq)
+{
+	int ret;
+
+	trace_802154_rdev_set_ackreq_default(&rdev->wpan_phy, wpan_dev,
+					     ackreq);
+	ret = rdev->ops->set_ackreq_default(&rdev->wpan_phy, wpan_dev, ackreq);
+	trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+	return ret;
+}
+
 #endif /* __CFG802154_RDEV_OPS */
diff --git a/net/ieee802154/sysfs.c b/net/ieee802154/sysfs.c
index 133b4280..bd88525 100644
--- a/net/ieee802154/sysfs.c
+++ b/net/ieee802154/sysfs.c
@@ -14,11 +14,13 @@
  */
 
 #include <linux/device.h>
+#include <linux/rtnetlink.h>
 
 #include <net/cfg802154.h>
 
 #include "core.h"
 #include "sysfs.h"
+#include "rdev-ops.h"
 
 static inline struct cfg802154_registered_device *
 dev_to_rdev(struct device *dev)
@@ -62,10 +64,46 @@
 };
 ATTRIBUTE_GROUPS(pmib);
 
+#ifdef CONFIG_PM_SLEEP
+static int wpan_phy_suspend(struct device *dev)
+{
+	struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+	int ret = 0;
+
+	if (rdev->ops->suspend) {
+		rtnl_lock();
+		ret = rdev_suspend(rdev);
+		rtnl_unlock();
+	}
+
+	return ret;
+}
+
+static int wpan_phy_resume(struct device *dev)
+{
+	struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+	int ret = 0;
+
+	if (rdev->ops->resume) {
+		rtnl_lock();
+		ret = rdev_resume(rdev);
+		rtnl_unlock();
+	}
+
+	return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(wpan_phy_pm_ops, wpan_phy_suspend, wpan_phy_resume);
+#define WPAN_PHY_PM_OPS (&wpan_phy_pm_ops)
+#else
+#define WPAN_PHY_PM_OPS NULL
+#endif
+
 struct class wpan_phy_class = {
 	.name = "ieee802154",
 	.dev_release = wpan_phy_release,
 	.dev_groups = pmib_groups,
+	.pm = WPAN_PHY_PM_OPS,
 };
 
 int wpan_phy_sysfs_init(void)
diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h
index 9b5f0eb..9a471e4 100644
--- a/net/ieee802154/trace.h
+++ b/net/ieee802154/trace.h
@@ -40,6 +40,28 @@
  *			rdev->ops traces		     *
  *************************************************************/
 
+DECLARE_EVENT_CLASS(wpan_phy_only_evt,
+	TP_PROTO(struct wpan_phy *wpan_phy),
+	TP_ARGS(wpan_phy),
+	TP_STRUCT__entry(
+		WPAN_PHY_ENTRY
+	),
+	TP_fast_assign(
+		WPAN_PHY_ASSIGN;
+	),
+	TP_printk(WPAN_PHY_PR_FMT, WPAN_PHY_PR_ARG)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_suspend,
+	TP_PROTO(struct wpan_phy *wpan_phy),
+	TP_ARGS(wpan_phy)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_resume,
+	TP_PROTO(struct wpan_phy *wpan_phy),
+	TP_ARGS(wpan_phy)
+);
+
 TRACE_EVENT(802154_rdev_add_virtual_intf,
 	TP_PROTO(struct wpan_phy *wpan_phy, char *name,
 		 enum nl802154_iftype type, __le64 extended_addr),
@@ -253,6 +275,25 @@
 		WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode))
 );
 
+TRACE_EVENT(802154_rdev_set_ackreq_default,
+	TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+		 bool ackreq),
+	TP_ARGS(wpan_phy, wpan_dev, ackreq),
+	TP_STRUCT__entry(
+		WPAN_PHY_ENTRY
+		WPAN_DEV_ENTRY
+		__field(bool, ackreq)
+	),
+	TP_fast_assign(
+		WPAN_PHY_ASSIGN;
+		WPAN_DEV_ASSIGN;
+		__entry->ackreq = ackreq;
+	),
+	TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+		", ackreq default: %s", WPAN_PHY_PR_ARG,
+		WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->ackreq))
+);
+
 TRACE_EVENT(802154_rdev_return_int,
 	TP_PROTO(struct wpan_phy *wpan_phy, int ret),
 	TP_ARGS(wpan_phy, ret),
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 6fb3c90..416dfa0 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -331,20 +331,6 @@
 	  When this option is enabled IP tunnels can be configured to use
 	  FOU or GUE encapsulation.
 
-config GENEVE_CORE
-	tristate "Generic Network Virtualization Encapsulation library"
-	depends on INET
-	select NET_UDP_TUNNEL
-	---help---
-	This allows one to create Geneve virtual interfaces that provide
-	Layer 2 Networks over Layer 3 Networks. Geneve is often used
-	to tunnel virtual network infrastructure in virtualized environments.
-	For more information see:
-	  http://tools.ietf.org/html/draft-gross-geneve-01
-
-	  To compile this driver as a module, choose M here: the module
-
-
 config INET_AH
 	tristate "IP: AH transformation"
 	select XFRM_ALGO
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index efc43f3..89aacb6 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -57,7 +57,6 @@
 obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
 obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
 obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
-obj-$(CONFIG_GENEVE_CORE) += geneve_core.o
 
 obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
 		      xfrm4_output.o xfrm4_protocol.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9532ee8..1d0c3ad 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -112,12 +112,14 @@
 #include <net/raw.h>
 #include <net/icmp.h>
 #include <net/inet_common.h>
+#include <net/ip_tunnels.h>
 #include <net/xfrm.h>
 #include <net/net_namespace.h>
 #include <net/secure_seq.h>
 #ifdef CONFIG_IP_MROUTE
 #include <linux/mroute.h>
 #endif
+#include <net/vrf.h>
 
 
 /* The inetsw table contains everything that inet_create needs to
@@ -426,6 +428,7 @@
 	struct net *net = sock_net(sk);
 	unsigned short snum;
 	int chk_addr_ret;
+	u32 tb_id = RT_TABLE_LOCAL;
 	int err;
 
 	/* If the socket has its own bind function then use it. (RAW) */
@@ -447,7 +450,8 @@
 			goto out;
 	}
 
-	chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
+	tb_id = vrf_dev_table_ifindex(net, sk->sk_bound_dev_if) ? : tb_id;
+	chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
 
 	/* Not specified by any standard per-se, however it breaks too
 	 * many applications when removed.  It is unfortunate since
@@ -1448,38 +1452,51 @@
 }
 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
 
+u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
+{
+	return  *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
+}
+EXPORT_SYMBOL_GPL(snmp_get_cpu_field);
+
 unsigned long snmp_fold_field(void __percpu *mib, int offt)
 {
 	unsigned long res = 0;
 	int i;
 
 	for_each_possible_cpu(i)
-		res += *(((unsigned long *) per_cpu_ptr(mib, i)) + offt);
+		res += snmp_get_cpu_field(mib, i, offt);
 	return res;
 }
 EXPORT_SYMBOL_GPL(snmp_fold_field);
 
 #if BITS_PER_LONG==32
 
+u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
+			 size_t syncp_offset)
+{
+	void *bhptr;
+	struct u64_stats_sync *syncp;
+	u64 v;
+	unsigned int start;
+
+	bhptr = per_cpu_ptr(mib, cpu);
+	syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
+	do {
+		start = u64_stats_fetch_begin_irq(syncp);
+		v = *(((u64 *)bhptr) + offt);
+	} while (u64_stats_fetch_retry_irq(syncp, start));
+
+	return v;
+}
+EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
+
 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
 {
 	u64 res = 0;
 	int cpu;
 
 	for_each_possible_cpu(cpu) {
-		void *bhptr;
-		struct u64_stats_sync *syncp;
-		u64 v;
-		unsigned int start;
-
-		bhptr = per_cpu_ptr(mib, cpu);
-		syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
-		do {
-			start = u64_stats_fetch_begin_irq(syncp);
-			v = *(((u64 *) bhptr) + offt);
-		} while (u64_stats_fetch_retry_irq(syncp, start));
-
-		res += v;
+		res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
 	}
 	return res;
 }
@@ -1780,6 +1797,8 @@
 
 	dev_add_pack(&ip_packet_type);
 
+	ip_tunnel_core_init();
+
 	rc = 0;
 out:
 	return rc;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index ac9a32e..f2a7102 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -360,8 +360,10 @@
 
 	work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
 				ahp->icv_trunc_len + seqhi_len);
-	if (!work_iph)
+	if (!work_iph) {
+		err = -ENOMEM;
 		goto out;
+	}
 
 	seqhi = (__be32 *)((char *)work_iph + ihl);
 	auth_data = ah_tmp_auth(seqhi, seqhi_len);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 6c8b1fb..30409b7 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -233,7 +233,7 @@
 		return -EINVAL;
 	}
 
-	neigh->type = inet_addr_type(dev_net(dev), addr);
+	neigh->type = inet_addr_type_dev_table(dev_net(dev), dev, addr);
 
 	parms = in_dev->arp_parms;
 	__neigh_parms_put(neigh->parms);
@@ -291,6 +291,40 @@
 	kfree_skb(skb);
 }
 
+/* Create and send an arp packet. */
+static void arp_send_dst(int type, int ptype, __be32 dest_ip,
+			 struct net_device *dev, __be32 src_ip,
+			 const unsigned char *dest_hw,
+			 const unsigned char *src_hw,
+			 const unsigned char *target_hw, struct sk_buff *oskb)
+{
+	struct sk_buff *skb;
+
+	/* arp on this interface. */
+	if (dev->flags & IFF_NOARP)
+		return;
+
+	skb = arp_create(type, ptype, dest_ip, dev, src_ip,
+			 dest_hw, src_hw, target_hw);
+	if (!skb)
+		return;
+
+	if (oskb)
+		skb_dst_copy(skb, oskb);
+
+	arp_xmit(skb);
+}
+
+void arp_send(int type, int ptype, __be32 dest_ip,
+	      struct net_device *dev, __be32 src_ip,
+	      const unsigned char *dest_hw, const unsigned char *src_hw,
+	      const unsigned char *target_hw)
+{
+	arp_send_dst(type, ptype, dest_ip, dev, src_ip, dest_hw, src_hw,
+		     target_hw, NULL);
+}
+EXPORT_SYMBOL(arp_send);
+
 static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
 {
 	__be32 saddr = 0;
@@ -309,7 +343,7 @@
 	switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
 	default:
 	case 0:		/* By default announce any local IP */
-		if (skb && inet_addr_type(dev_net(dev),
+		if (skb && inet_addr_type_dev_table(dev_net(dev), dev,
 					  ip_hdr(skb)->saddr) == RTN_LOCAL)
 			saddr = ip_hdr(skb)->saddr;
 		break;
@@ -317,7 +351,8 @@
 		if (!skb)
 			break;
 		saddr = ip_hdr(skb)->saddr;
-		if (inet_addr_type(dev_net(dev), saddr) == RTN_LOCAL) {
+		if (inet_addr_type_dev_table(dev_net(dev), dev,
+					     saddr) == RTN_LOCAL) {
 			/* saddr should be known to target */
 			if (inet_addr_onlink(in_dev, target, saddr))
 				break;
@@ -346,8 +381,9 @@
 		}
 	}
 
-	arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
-		 dst_hw, dev->dev_addr, NULL);
+	arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
+		     dst_hw, dev->dev_addr, NULL,
+		     dev->priv_flags & IFF_XMIT_DST_RELEASE ? NULL : skb);
 }
 
 static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
@@ -597,32 +633,6 @@
 EXPORT_SYMBOL(arp_xmit);
 
 /*
- *	Create and send an arp packet.
- */
-void arp_send(int type, int ptype, __be32 dest_ip,
-	      struct net_device *dev, __be32 src_ip,
-	      const unsigned char *dest_hw, const unsigned char *src_hw,
-	      const unsigned char *target_hw)
-{
-	struct sk_buff *skb;
-
-	/*
-	 *	No arp on this interface.
-	 */
-
-	if (dev->flags&IFF_NOARP)
-		return;
-
-	skb = arp_create(type, ptype, dest_ip, dev, src_ip,
-			 dest_hw, src_hw, target_hw);
-	if (!skb)
-		return;
-
-	arp_xmit(skb);
-}
-EXPORT_SYMBOL(arp_send);
-
-/*
  *	Process an arp request.
  */
 
@@ -742,7 +752,7 @@
 	/* Special case: IPv4 duplicate address detection packet (RFC2131) */
 	if (sip == 0) {
 		if (arp->ar_op == htons(ARPOP_REQUEST) &&
-		    inet_addr_type(net, tip) == RTN_LOCAL &&
+		    inet_addr_type_dev_table(net, dev, tip) == RTN_LOCAL &&
 		    !arp_ignore(in_dev, sip, tip))
 			arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
 				 dev->dev_addr, sha);
@@ -802,16 +812,18 @@
 	n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
 
 	if (IN_DEV_ARP_ACCEPT(in_dev)) {
+		unsigned int addr_type = inet_addr_type_dev_table(net, dev, sip);
+
 		/* Unsolicited ARP is not accepted by default.
 		   It is possible, that this option should be enabled for some
 		   devices (strip is candidate)
 		 */
 		is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
-			  inet_addr_type(net, sip) == RTN_UNICAST;
+			  addr_type == RTN_UNICAST;
 
 		if (!n &&
 		    ((arp->ar_op == htons(ARPOP_REPLY)  &&
-		      inet_addr_type(net, sip) == RTN_UNICAST) || is_garp))
+				addr_type == RTN_UNICAST) || is_garp))
 			n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
 	}
 
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 574fad9..f915abf 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -74,7 +74,7 @@
 	inet->inet_daddr = fl4->daddr;
 	inet->inet_dport = usin->sin_port;
 	sk->sk_state = TCP_ESTABLISHED;
-	inet_set_txhash(sk);
+	sk_set_txhash(sk);
 	inet->inet_id = jiffies;
 
 	sk_dst_set(sk, &rt->dst);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 6bbc549..6fcbd21 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -45,6 +45,8 @@
 #include <net/ip_fib.h>
 #include <net/rtnetlink.h>
 #include <net/xfrm.h>
+#include <net/vrf.h>
+#include <trace/events/fib.h>
 
 #ifndef CONFIG_IP_MULTIPLE_TABLES
 
@@ -211,12 +213,12 @@
  */
 static inline unsigned int __inet_dev_addr_type(struct net *net,
 						const struct net_device *dev,
-						__be32 addr)
+						__be32 addr, u32 tb_id)
 {
 	struct flowi4		fl4 = { .daddr = addr };
 	struct fib_result	res;
 	unsigned int ret = RTN_BROADCAST;
-	struct fib_table *local_table;
+	struct fib_table *table;
 
 	if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
 		return RTN_BROADCAST;
@@ -225,10 +227,10 @@
 
 	rcu_read_lock();
 
-	local_table = fib_get_table(net, RT_TABLE_LOCAL);
-	if (local_table) {
+	table = fib_get_table(net, tb_id);
+	if (table) {
 		ret = RTN_UNICAST;
-		if (!fib_table_lookup(local_table, &fl4, &res, FIB_LOOKUP_NOREF)) {
+		if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) {
 			if (!dev || dev == res.fi->fib_dev)
 				ret = res.type;
 		}
@@ -238,19 +240,40 @@
 	return ret;
 }
 
+unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id)
+{
+	return __inet_dev_addr_type(net, NULL, addr, tb_id);
+}
+EXPORT_SYMBOL(inet_addr_type_table);
+
 unsigned int inet_addr_type(struct net *net, __be32 addr)
 {
-	return __inet_dev_addr_type(net, NULL, addr);
+	return __inet_dev_addr_type(net, NULL, addr, RT_TABLE_LOCAL);
 }
 EXPORT_SYMBOL(inet_addr_type);
 
 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
 				__be32 addr)
 {
-	return __inet_dev_addr_type(net, dev, addr);
+	u32 rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL;
+
+	return __inet_dev_addr_type(net, dev, addr, rt_table);
 }
 EXPORT_SYMBOL(inet_dev_addr_type);
 
+/* inet_addr_type with dev == NULL but using the table from a dev
+ * if one is associated
+ */
+unsigned int inet_addr_type_dev_table(struct net *net,
+				      const struct net_device *dev,
+				      __be32 addr)
+{
+	u32 rt_table = vrf_dev_table(dev) ? : RT_TABLE_LOCAL;
+
+	return __inet_dev_addr_type(net, NULL, addr, rt_table);
+}
+EXPORT_SYMBOL(inet_addr_type_dev_table);
+
 __be32 fib_compute_spec_dst(struct sk_buff *skb)
 {
 	struct net_device *dev = skb->dev;
@@ -280,6 +303,7 @@
 		fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
 		fl4.flowi4_scope = scope;
 		fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
+		fl4.flowi4_tun_key.tun_id = 0;
 		if (!fib_lookup(net, &fl4, &res, 0))
 			return FIB_RES_PREFSRC(net, res);
 	} else {
@@ -308,16 +332,21 @@
 	bool dev_match;
 
 	fl4.flowi4_oif = 0;
-	fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
+	fl4.flowi4_iif = vrf_master_ifindex_rcu(dev);
+	if (!fl4.flowi4_iif)
+		fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
 	fl4.daddr = src;
 	fl4.saddr = dst;
 	fl4.flowi4_tos = tos;
 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+	fl4.flowi4_tun_key.tun_id = 0;
 
 	no_addr = idev->ifa_list == NULL;
 
 	fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
 
+	trace_fib_validate_source(dev, &fl4);
+
 	net = dev_net(dev);
 	if (fib_lookup(net, &fl4, &res, 0))
 		goto last_resort;
@@ -337,6 +366,9 @@
 		if (nh->nh_dev == dev) {
 			dev_match = true;
 			break;
+		} else if (vrf_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
+			dev_match = true;
+			break;
 		}
 	}
 #else
@@ -494,9 +526,12 @@
 
 	addr = sk_extract_addr(&rt->rt_gateway);
 	if (rt->rt_gateway.sa_family == AF_INET && addr) {
+		unsigned int addr_type;
+
 		cfg->fc_gw = addr;
+		addr_type = inet_addr_type_table(net, addr, cfg->fc_table);
 		if (rt->rt_flags & RTF_GATEWAY &&
-		    inet_addr_type(net, addr) == RTN_UNICAST)
+		    addr_type == RTN_UNICAST)
 			cfg->fc_scope = RT_SCOPE_UNIVERSE;
 	}
 
@@ -591,6 +626,8 @@
 	[RTA_METRICS]		= { .type = NLA_NESTED },
 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
 	[RTA_FLOW]		= { .type = NLA_U32 },
+	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
+	[RTA_ENCAP]		= { .type = NLA_NESTED },
 };
 
 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
@@ -656,6 +693,12 @@
 		case RTA_TABLE:
 			cfg->fc_table = nla_get_u32(attr);
 			break;
+		case RTA_ENCAP:
+			cfg->fc_encap = attr;
+			break;
+		case RTA_ENCAP_TYPE:
+			cfg->fc_encap_type = nla_get_u16(attr);
+			break;
 		}
 	}
 
@@ -760,6 +803,7 @@
 static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
 {
 	struct net *net = dev_net(ifa->ifa_dev->dev);
+	u32 tb_id = vrf_dev_table_rtnl(ifa->ifa_dev->dev);
 	struct fib_table *tb;
 	struct fib_config cfg = {
 		.fc_protocol = RTPROT_KERNEL,
@@ -774,11 +818,10 @@
 		},
 	};
 
-	if (type == RTN_UNICAST)
-		tb = fib_new_table(net, RT_TABLE_MAIN);
-	else
-		tb = fib_new_table(net, RT_TABLE_LOCAL);
+	if (!tb_id)
+		tb_id = (type == RTN_UNICAST) ? RT_TABLE_MAIN : RT_TABLE_LOCAL;
 
+	tb = fib_new_table(net, tb_id);
 	if (!tb)
 		return;
 
@@ -960,11 +1003,14 @@
 			fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
 	}
 	if (!(ok & LOCAL_OK)) {
+		unsigned int addr_type;
+
 		fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
 
 		/* Check, that this local address finally disappeared. */
-		if (gone &&
-		    inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) {
+		addr_type = inet_addr_type_dev_table(dev_net(dev), dev,
+						     ifa->ifa_local);
+		if (gone && addr_type != RTN_LOCAL) {
 			/* And the last, but not the least thing.
 			 * We must flush stray FIB entries.
 			 *
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 3a06586..064bd3c 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -42,6 +42,7 @@
 #include <net/ip_fib.h>
 #include <net/netlink.h>
 #include <net/nexthop.h>
+#include <net/lwtunnel.h>
 
 #include "fib_lookup.h"
 
@@ -208,6 +209,7 @@
 	change_nexthops(fi) {
 		if (nexthop_nh->nh_dev)
 			dev_put(nexthop_nh->nh_dev);
+		lwtstate_put(nexthop_nh->nh_lwtstate);
 		free_nh_exceptions(nexthop_nh);
 		rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
 		rt_fibinfo_free(&nexthop_nh->nh_rth_input);
@@ -266,6 +268,7 @@
 #ifdef CONFIG_IP_ROUTE_CLASSID
 		    nh->nh_tclassid != onh->nh_tclassid ||
 #endif
+		    lwtunnel_cmp_encap(nh->nh_lwtstate, onh->nh_lwtstate) ||
 		    ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK))
 			return -1;
 		onh++;
@@ -366,6 +369,7 @@
 	payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
 
 	if (fi->fib_nhs) {
+		size_t nh_encapsize = 0;
 		/* Also handles the special case fib_nhs == 1 */
 
 		/* each nexthop is packed in an attribute */
@@ -374,8 +378,21 @@
 		/* may contain flow and gateway attribute */
 		nhsize += 2 * nla_total_size(4);
 
+		/* grab encap info */
+		for_nexthops(fi) {
+			if (nh->nh_lwtstate) {
+				/* RTA_ENCAP_TYPE */
+				nh_encapsize += lwtunnel_get_encap_size(
+						nh->nh_lwtstate);
+				/* RTA_ENCAP */
+				nh_encapsize +=  nla_total_size(2);
+			}
+		} endfor_nexthops(fi);
+
 		/* all nexthops are packed in a nested attribute */
-		payload += nla_total_size(fi->fib_nhs * nhsize);
+		payload += nla_total_size((fi->fib_nhs * nhsize) +
+					  nh_encapsize);
+
 	}
 
 	return payload;
@@ -421,13 +438,15 @@
 	if (n) {
 		state = n->nud_state;
 		neigh_release(n);
+	} else {
+		return 0;
 	}
 	if (state == NUD_REACHABLE)
 		return 0;
 	if ((state & NUD_VALID) && order != dflt)
 		return 0;
 	if ((state & NUD_VALID) ||
-	    (*last_idx < 0 && order > dflt)) {
+	    (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
 		*last_resort = fi;
 		*last_idx = order;
 	}
@@ -452,6 +471,9 @@
 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
 		       int remaining, struct fib_config *cfg)
 {
+	struct net *net = cfg->fc_nlinfo.nl_net;
+	int ret;
+
 	change_nexthops(fi) {
 		int attrlen;
 
@@ -475,18 +497,70 @@
 			if (nexthop_nh->nh_tclassid)
 				fi->fib_net->ipv4.fib_num_tclassid_users++;
 #endif
+			nla = nla_find(attrs, attrlen, RTA_ENCAP);
+			if (nla) {
+				struct lwtunnel_state *lwtstate;
+				struct net_device *dev = NULL;
+				struct nlattr *nla_entype;
+
+				nla_entype = nla_find(attrs, attrlen,
+						      RTA_ENCAP_TYPE);
+				if (!nla_entype)
+					goto err_inval;
+				if (cfg->fc_oif)
+					dev = __dev_get_by_index(net, cfg->fc_oif);
+				ret = lwtunnel_build_state(dev, nla_get_u16(
+							   nla_entype),
+							   nla,  AF_INET, cfg,
+							   &lwtstate);
+				if (ret)
+					goto errout;
+				nexthop_nh->nh_lwtstate =
+					lwtstate_get(lwtstate);
+			}
 		}
 
 		rtnh = rtnh_next(rtnh, &remaining);
 	} endfor_nexthops(fi);
 
 	return 0;
+
+err_inval:
+	ret = -EINVAL;
+
+errout:
+	return ret;
 }
 
 #endif
 
+static int fib_encap_match(struct net *net, u16 encap_type,
+			   struct nlattr *encap,
+			   int oif, const struct fib_nh *nh,
+			   const struct fib_config *cfg)
+{
+	struct lwtunnel_state *lwtstate;
+	struct net_device *dev = NULL;
+	int ret, result = 0;
+
+	if (encap_type == LWTUNNEL_ENCAP_NONE)
+		return 0;
+
+	if (oif)
+		dev = __dev_get_by_index(net, oif);
+	ret = lwtunnel_build_state(dev, encap_type, encap,
+				   AF_INET, cfg, &lwtstate);
+	if (!ret) {
+		result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
+		lwtstate_free(lwtstate);
+	}
+
+	return result;
+}
+
 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
 {
+	struct net *net = cfg->fc_nlinfo.nl_net;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 	struct rtnexthop *rtnh;
 	int remaining;
@@ -496,6 +570,12 @@
 		return 1;
 
 	if (cfg->fc_oif || cfg->fc_gw) {
+		if (cfg->fc_encap) {
+			if (fib_encap_match(net, cfg->fc_encap_type,
+					    cfg->fc_encap, cfg->fc_oif,
+					    fi->fib_nh, cfg))
+			    return 1;
+		}
 		if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
 		    (!cfg->fc_gw  || cfg->fc_gw == fi->fib_nh->nh_gw))
 			return 0;
@@ -585,7 +665,7 @@
 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
 			struct fib_nh *nh)
 {
-	int err;
+	int err = 0;
 	struct net *net;
 	struct net_device *dev;
 
@@ -594,16 +674,18 @@
 		struct fib_result res;
 
 		if (nh->nh_flags & RTNH_F_ONLINK) {
+			unsigned int addr_type;
 
 			if (cfg->fc_scope >= RT_SCOPE_LINK)
 				return -EINVAL;
-			if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST)
-				return -EINVAL;
 			dev = __dev_get_by_index(net, nh->nh_oif);
 			if (!dev)
 				return -ENODEV;
 			if (!(dev->flags & IFF_UP))
 				return -ENETDOWN;
+			addr_type = inet_addr_type_dev_table(net, dev, nh->nh_gw);
+			if (addr_type != RTN_UNICAST)
+				return -EINVAL;
 			if (!netif_carrier_ok(dev))
 				nh->nh_flags |= RTNH_F_LINKDOWN;
 			nh->nh_dev = dev;
@@ -613,6 +695,7 @@
 		}
 		rcu_read_lock();
 		{
+			struct fib_table *tbl = NULL;
 			struct flowi4 fl4 = {
 				.daddr = nh->nh_gw,
 				.flowi4_scope = cfg->fc_scope + 1,
@@ -623,8 +706,24 @@
 			/* It is not necessary, but requires a bit of thinking */
 			if (fl4.flowi4_scope < RT_SCOPE_LINK)
 				fl4.flowi4_scope = RT_SCOPE_LINK;
-			err = fib_lookup(net, &fl4, &res,
-					 FIB_LOOKUP_IGNORE_LINKSTATE);
+
+			if (cfg->fc_table)
+				tbl = fib_get_table(net, cfg->fc_table);
+
+			if (tbl)
+				err = fib_table_lookup(tbl, &fl4, &res,
+						       FIB_LOOKUP_IGNORE_LINKSTATE |
+						       FIB_LOOKUP_NOREF);
+
+			/* on error or if no table given do full lookup. This
+			 * is needed for example when nexthops are in the local
+			 * table rather than the given table
+			 */
+			if (!tbl || err) {
+				err = fib_lookup(net, &fl4, &res,
+						 FIB_LOOKUP_IGNORE_LINKSTATE);
+			}
+
 			if (err) {
 				rcu_read_unlock();
 				return err;
@@ -760,6 +859,67 @@
 	return nh->nh_saddr;
 }
 
+static bool fib_valid_prefsrc(struct fib_config *cfg, __be32 fib_prefsrc)
+{
+	if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
+	    fib_prefsrc != cfg->fc_dst) {
+		u32 tb_id = cfg->fc_table;
+
+		if (tb_id == RT_TABLE_MAIN)
+			tb_id = RT_TABLE_LOCAL;
+
+		if (inet_addr_type_table(cfg->fc_nlinfo.nl_net,
+					 fib_prefsrc, tb_id) != RTN_LOCAL) {
+			return false;
+		}
+	}
+	return true;
+}
+
+static int
+fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
+{
+	bool ecn_ca = false;
+	struct nlattr *nla;
+	int remaining;
+
+	if (!cfg->fc_mx)
+		return 0;
+
+	nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
+		int type = nla_type(nla);
+		u32 val;
+
+		if (!type)
+			continue;
+		if (type > RTAX_MAX)
+			return -EINVAL;
+
+		if (type == RTAX_CC_ALGO) {
+			char tmp[TCP_CA_NAME_MAX];
+
+			nla_strlcpy(tmp, nla, sizeof(tmp));
+			val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
+			if (val == TCP_CA_UNSPEC)
+				return -EINVAL;
+		} else {
+			val = nla_get_u32(nla);
+		}
+		if (type == RTAX_ADVMSS && val > 65535 - 40)
+			val = 65535 - 40;
+		if (type == RTAX_MTU && val > 65535 - 15)
+			val = 65535 - 15;
+		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
+			return -EINVAL;
+		fi->fib_metrics[type - 1] = val;
+	}
+
+	if (ecn_ca)
+		fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
+
+	return 0;
+}
+
 struct fib_info *fib_create_info(struct fib_config *cfg)
 {
 	int err;
@@ -832,36 +992,9 @@
 			goto failure;
 	} endfor_nexthops(fi)
 
-	if (cfg->fc_mx) {
-		struct nlattr *nla;
-		int remaining;
-
-		nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
-			int type = nla_type(nla);
-
-			if (type) {
-				u32 val;
-
-				if (type > RTAX_MAX)
-					goto err_inval;
-				if (type == RTAX_CC_ALGO) {
-					char tmp[TCP_CA_NAME_MAX];
-
-					nla_strlcpy(tmp, nla, sizeof(tmp));
-					val = tcp_ca_get_key_by_name(tmp);
-					if (val == TCP_CA_UNSPEC)
-						goto err_inval;
-				} else {
-					val = nla_get_u32(nla);
-				}
-				if (type == RTAX_ADVMSS && val > 65535 - 40)
-					val = 65535 - 40;
-				if (type == RTAX_MTU && val > 65535 - 15)
-					val = 65535 - 15;
-				fi->fib_metrics[type - 1] = val;
-			}
-		}
-	}
+	err = fib_convert_metrics(fi, cfg);
+	if (err)
+		goto failure;
 
 	if (cfg->fc_mp) {
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -882,6 +1015,22 @@
 	} else {
 		struct fib_nh *nh = fi->fib_nh;
 
+		if (cfg->fc_encap) {
+			struct lwtunnel_state *lwtstate;
+			struct net_device *dev = NULL;
+
+			if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE)
+				goto err_inval;
+			if (cfg->fc_oif)
+				dev = __dev_get_by_index(net, cfg->fc_oif);
+			err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+						   cfg->fc_encap, AF_INET, cfg,
+						   &lwtstate);
+			if (err)
+				goto failure;
+
+			nh->nh_lwtstate = lwtstate_get(lwtstate);
+		}
 		nh->nh_oif = cfg->fc_oif;
 		nh->nh_gw = cfg->fc_gw;
 		nh->nh_flags = cfg->fc_flags;
@@ -940,12 +1089,8 @@
 			fi->fib_flags |= RTNH_F_LINKDOWN;
 	}
 
-	if (fi->fib_prefsrc) {
-		if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
-		    fi->fib_prefsrc != cfg->fc_dst)
-			if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL)
-				goto err_inval;
-	}
+	if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc))
+		goto err_inval;
 
 	change_nexthops(fi) {
 		fib_info_update_nh_saddr(net, nexthop_nh);
@@ -1055,6 +1200,8 @@
 		    nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
 			goto nla_put_failure;
 #endif
+		if (fi->fib_nh->nh_lwtstate)
+			lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
 	}
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 	if (fi->fib_nhs > 1) {
@@ -1090,6 +1237,8 @@
 			    nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
 				goto nla_put_failure;
 #endif
+			if (nh->nh_lwtstate)
+				lwtunnel_fill_encap(skb, nh->nh_lwtstate);
 			/* length of rtnetlink header + attributes */
 			rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
 		} endfor_nexthops(fi);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index b0c6258..26d6ffb 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -81,6 +81,7 @@
 #include <net/sock.h>
 #include <net/ip_fib.h>
 #include <net/switchdev.h>
+#include <trace/events/fib.h>
 #include "fib_lookup.h"
 
 #define MAX_STAT_DEPTH 32
@@ -1278,6 +1279,8 @@
 	unsigned long index;
 	t_key cindex;
 
+	trace_fib_table_lookup(tb->tb_id, flp);
+
 	pn = t->kv;
 	cindex = 0;
 
@@ -1423,8 +1426,11 @@
 			    nh->nh_flags & RTNH_F_LINKDOWN &&
 			    !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE))
 				continue;
-			if (flp->flowi4_oif && flp->flowi4_oif != nh->nh_oif)
-				continue;
+			if (!(flp->flowi4_flags & FLOWI_FLAG_VRFSRC)) {
+				if (flp->flowi4_oif &&
+				    flp->flowi4_oif != nh->nh_oif)
+					continue;
+			}
 
 			if (!(fib_flags & FIB_LOOKUP_NOREF))
 				atomic_inc(&fi->fib_clntref);
@@ -1439,6 +1445,8 @@
 #ifdef CONFIG_IP_FIB_TRIE_STATS
 			this_cpu_inc(stats->semantic_match_passed);
 #endif
+			trace_fib_table_lookup_nh(nh);
+
 			return err;
 		}
 	}
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 34968cd..e0fcbbb 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -79,7 +79,11 @@
 	__be16 *pd = data;
 	size_t start = ntohs(pd[0]);
 	size_t offset = ntohs(pd[1]);
-	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
+	size_t plen = sizeof(struct udphdr) + hdrlen +
+	    max_t(size_t, offset + sizeof(u16), start);
+
+	if (skb->remcsum_offload)
+		return guehdr;
 
 	if (!pskb_may_pull(skb, plen))
 		return NULL;
@@ -221,29 +225,21 @@
 
 static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
 				      struct guehdr *guehdr, void *data,
-				      size_t hdrlen, u8 ipproto,
-				      struct gro_remcsum *grc, bool nopartial)
+				      size_t hdrlen, struct gro_remcsum *grc,
+				      bool nopartial)
 {
 	__be16 *pd = data;
 	size_t start = ntohs(pd[0]);
 	size_t offset = ntohs(pd[1]);
-	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
 
 	if (skb->remcsum_offload)
-		return NULL;
+		return guehdr;
 
 	if (!NAPI_GRO_CB(skb)->csum_valid)
 		return NULL;
 
-	/* Pull checksum that will be written */
-	if (skb_gro_header_hard(skb, off + plen)) {
-		guehdr = skb_gro_header_slow(skb, off + plen, off);
-		if (!guehdr)
-			return NULL;
-	}
-
-	skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen,
-				start, offset, grc, nopartial);
+	guehdr = skb_gro_remcsum_process(skb, (void *)guehdr, off, hdrlen,
+					 start, offset, grc, nopartial);
 
 	skb->remcsum_offload = 1;
 
@@ -307,10 +303,10 @@
 
 		if (flags & GUE_PFLAG_REMCSUM) {
 			guehdr = gue_gro_remcsum(skb, off, guehdr,
-						 data + doffset, hdrlen,
-						 guehdr->proto_ctype, &grc,
+						 data + doffset, hdrlen, &grc,
 						 !!(fou->flags &
 						    FOU_F_REMCSUM_NOPARTIAL));
+
 			if (!guehdr)
 				goto out;
 
@@ -351,7 +347,7 @@
 	rcu_read_lock();
 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
 	ops = rcu_dereference(offloads[guehdr->proto_ctype]);
-	if (WARN_ON(!ops || !ops->callbacks.gro_receive))
+	if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
 		goto out_unlock;
 
 	pp = ops->callbacks.gro_receive(head, skb);
@@ -570,7 +566,7 @@
 	if (info->attrs[FOU_ATTR_AF]) {
 		u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
 
-		if (family != AF_INET && family != AF_INET6)
+		if (family != AF_INET)
 			return -EINVAL;
 
 		cfg->udp_config.family = family;
diff --git a/net/ipv4/geneve_core.c b/net/ipv4/geneve_core.c
deleted file mode 100644
index 311a4ba..0000000
--- a/net/ipv4/geneve_core.c
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- * Geneve: Generic Network Virtualization Encapsulation
- *
- * Copyright (c) 2014 Nicira, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/netdevice.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <linux/igmp.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/ethtool.h>
-#include <linux/mutex.h>
-#include <net/arp.h>
-#include <net/ndisc.h>
-#include <net/ip.h>
-#include <net/ip_tunnels.h>
-#include <net/icmp.h>
-#include <net/udp.h>
-#include <net/rtnetlink.h>
-#include <net/route.h>
-#include <net/dsfield.h>
-#include <net/inet_ecn.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
-#include <net/geneve.h>
-#include <net/protocol.h>
-#include <net/udp_tunnel.h>
-#if IS_ENABLED(CONFIG_IPV6)
-#include <net/ipv6.h>
-#include <net/addrconf.h>
-#include <net/ip6_tunnel.h>
-#include <net/ip6_checksum.h>
-#endif
-
-/* Protects sock_list and refcounts. */
-static DEFINE_MUTEX(geneve_mutex);
-
-/* per-network namespace private data for this module */
-struct geneve_net {
-	struct list_head	sock_list;
-};
-
-static int geneve_net_id;
-
-static struct geneve_sock *geneve_find_sock(struct net *net,
-					    sa_family_t family, __be16 port)
-{
-	struct geneve_net *gn = net_generic(net, geneve_net_id);
-	struct geneve_sock *gs;
-
-	list_for_each_entry(gs, &gn->sock_list, list) {
-		if (inet_sk(gs->sock->sk)->inet_sport == port &&
-		    inet_sk(gs->sock->sk)->sk.sk_family == family)
-			return gs;
-	}
-
-	return NULL;
-}
-
-static void geneve_build_header(struct genevehdr *geneveh,
-				__be16 tun_flags, u8 vni[3],
-				u8 options_len, u8 *options)
-{
-	geneveh->ver = GENEVE_VER;
-	geneveh->opt_len = options_len / 4;
-	geneveh->oam = !!(tun_flags & TUNNEL_OAM);
-	geneveh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
-	geneveh->rsvd1 = 0;
-	memcpy(geneveh->vni, vni, 3);
-	geneveh->proto_type = htons(ETH_P_TEB);
-	geneveh->rsvd2 = 0;
-
-	memcpy(geneveh->options, options, options_len);
-}
-
-/* Transmit a fully formatted Geneve frame.
- *
- * When calling this function. The skb->data should point
- * to the geneve header which is fully formed.
- *
- * This function will add other UDP tunnel headers.
- */
-int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
-		    struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
-		    __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
-		    __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
-		    bool csum, bool xnet)
-{
-	struct genevehdr *gnvh;
-	int min_headroom;
-	int err;
-
-	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
-			+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
-			+ (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-
-	err = skb_cow_head(skb, min_headroom);
-	if (unlikely(err)) {
-		kfree_skb(skb);
-		return err;
-	}
-
-	skb = vlan_hwaccel_push_inside(skb);
-	if (unlikely(!skb))
-		return -ENOMEM;
-
-	skb = udp_tunnel_handle_offloads(skb, csum);
-	if (IS_ERR(skb))
-		return PTR_ERR(skb);
-
-	gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
-	geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
-
-	skb_set_inner_protocol(skb, htons(ETH_P_TEB));
-
-	return udp_tunnel_xmit_skb(rt, gs->sock->sk, skb, src, dst,
-				   tos, ttl, df, src_port, dst_port, xnet,
-				   !csum);
-}
-EXPORT_SYMBOL_GPL(geneve_xmit_skb);
-
-static int geneve_hlen(struct genevehdr *gh)
-{
-	return sizeof(*gh) + gh->opt_len * 4;
-}
-
-static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
-					   struct sk_buff *skb,
-					   struct udp_offload *uoff)
-{
-	struct sk_buff *p, **pp = NULL;
-	struct genevehdr *gh, *gh2;
-	unsigned int hlen, gh_len, off_gnv;
-	const struct packet_offload *ptype;
-	__be16 type;
-	int flush = 1;
-
-	off_gnv = skb_gro_offset(skb);
-	hlen = off_gnv + sizeof(*gh);
-	gh = skb_gro_header_fast(skb, off_gnv);
-	if (skb_gro_header_hard(skb, hlen)) {
-		gh = skb_gro_header_slow(skb, hlen, off_gnv);
-		if (unlikely(!gh))
-			goto out;
-	}
-
-	if (gh->ver != GENEVE_VER || gh->oam)
-		goto out;
-	gh_len = geneve_hlen(gh);
-
-	hlen = off_gnv + gh_len;
-	if (skb_gro_header_hard(skb, hlen)) {
-		gh = skb_gro_header_slow(skb, hlen, off_gnv);
-		if (unlikely(!gh))
-			goto out;
-	}
-
-	flush = 0;
-
-	for (p = *head; p; p = p->next) {
-		if (!NAPI_GRO_CB(p)->same_flow)
-			continue;
-
-		gh2 = (struct genevehdr *)(p->data + off_gnv);
-		if (gh->opt_len != gh2->opt_len ||
-		    memcmp(gh, gh2, gh_len)) {
-			NAPI_GRO_CB(p)->same_flow = 0;
-			continue;
-		}
-	}
-
-	type = gh->proto_type;
-
-	rcu_read_lock();
-	ptype = gro_find_receive_by_type(type);
-	if (!ptype) {
-		flush = 1;
-		goto out_unlock;
-	}
-
-	skb_gro_pull(skb, gh_len);
-	skb_gro_postpull_rcsum(skb, gh, gh_len);
-	pp = ptype->callbacks.gro_receive(head, skb);
-
-out_unlock:
-	rcu_read_unlock();
-out:
-	NAPI_GRO_CB(skb)->flush |= flush;
-
-	return pp;
-}
-
-static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
-			       struct udp_offload *uoff)
-{
-	struct genevehdr *gh;
-	struct packet_offload *ptype;
-	__be16 type;
-	int gh_len;
-	int err = -ENOSYS;
-
-	udp_tunnel_gro_complete(skb, nhoff);
-
-	gh = (struct genevehdr *)(skb->data + nhoff);
-	gh_len = geneve_hlen(gh);
-	type = gh->proto_type;
-
-	rcu_read_lock();
-	ptype = gro_find_complete_by_type(type);
-	if (ptype)
-		err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
-
-	rcu_read_unlock();
-	return err;
-}
-
-static void geneve_notify_add_rx_port(struct geneve_sock *gs)
-{
-	struct sock *sk = gs->sock->sk;
-	sa_family_t sa_family = sk->sk_family;
-	int err;
-
-	if (sa_family == AF_INET) {
-		err = udp_add_offload(&gs->udp_offloads);
-		if (err)
-			pr_warn("geneve: udp_add_offload failed with status %d\n",
-				err);
-	}
-}
-
-static void geneve_notify_del_rx_port(struct geneve_sock *gs)
-{
-	struct sock *sk = gs->sock->sk;
-	sa_family_t sa_family = sk->sk_family;
-
-	if (sa_family == AF_INET)
-		udp_del_offload(&gs->udp_offloads);
-}
-
-/* Callback from net/ipv4/udp.c to receive packets */
-static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
-{
-	struct genevehdr *geneveh;
-	struct geneve_sock *gs;
-	int opts_len;
-
-	/* Need Geneve and inner Ethernet header to be present */
-	if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
-		goto error;
-
-	/* Return packets with reserved bits set */
-	geneveh = geneve_hdr(skb);
-
-	if (unlikely(geneveh->ver != GENEVE_VER))
-		goto error;
-
-	if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
-		goto error;
-
-	opts_len = geneveh->opt_len * 4;
-	if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
-				 htons(ETH_P_TEB)))
-		goto drop;
-
-	gs = rcu_dereference_sk_user_data(sk);
-	if (!gs)
-		goto drop;
-
-	gs->rcv(gs, skb);
-	return 0;
-
-drop:
-	/* Consume bad packet */
-	kfree_skb(skb);
-	return 0;
-
-error:
-	/* Let the UDP layer deal with the skb */
-	return 1;
-}
-
-static struct socket *geneve_create_sock(struct net *net, bool ipv6,
-					 __be16 port)
-{
-	struct socket *sock;
-	struct udp_port_cfg udp_conf;
-	int err;
-
-	memset(&udp_conf, 0, sizeof(udp_conf));
-
-	if (ipv6) {
-		udp_conf.family = AF_INET6;
-	} else {
-		udp_conf.family = AF_INET;
-		udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
-	}
-
-	udp_conf.local_udp_port = port;
-
-	/* Open UDP socket */
-	err = udp_sock_create(net, &udp_conf, &sock);
-	if (err < 0)
-		return ERR_PTR(err);
-
-	return sock;
-}
-
-/* Create new listen socket if needed */
-static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
-						geneve_rcv_t *rcv, void *data,
-						bool ipv6)
-{
-	struct geneve_net *gn = net_generic(net, geneve_net_id);
-	struct geneve_sock *gs;
-	struct socket *sock;
-	struct udp_tunnel_sock_cfg tunnel_cfg;
-
-	gs = kzalloc(sizeof(*gs), GFP_KERNEL);
-	if (!gs)
-		return ERR_PTR(-ENOMEM);
-
-	sock = geneve_create_sock(net, ipv6, port);
-	if (IS_ERR(sock)) {
-		kfree(gs);
-		return ERR_CAST(sock);
-	}
-
-	gs->sock = sock;
-	gs->refcnt = 1;
-	gs->rcv = rcv;
-	gs->rcv_data = data;
-
-	/* Initialize the geneve udp offloads structure */
-	gs->udp_offloads.port = port;
-	gs->udp_offloads.callbacks.gro_receive  = geneve_gro_receive;
-	gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
-	geneve_notify_add_rx_port(gs);
-
-	/* Mark socket as an encapsulation socket */
-	tunnel_cfg.sk_user_data = gs;
-	tunnel_cfg.encap_type = 1;
-	tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
-	tunnel_cfg.encap_destroy = NULL;
-	setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
-
-	list_add(&gs->list, &gn->sock_list);
-
-	return gs;
-}
-
-struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
-				    geneve_rcv_t *rcv, void *data,
-				    bool no_share, bool ipv6)
-{
-	struct geneve_sock *gs;
-
-	mutex_lock(&geneve_mutex);
-
-	gs = geneve_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
-	if (gs) {
-		if (!no_share && gs->rcv == rcv)
-			gs->refcnt++;
-		else
-			gs = ERR_PTR(-EBUSY);
-	} else {
-		gs = geneve_socket_create(net, port, rcv, data, ipv6);
-	}
-
-	mutex_unlock(&geneve_mutex);
-
-	return gs;
-}
-EXPORT_SYMBOL_GPL(geneve_sock_add);
-
-void geneve_sock_release(struct geneve_sock *gs)
-{
-	mutex_lock(&geneve_mutex);
-
-	if (--gs->refcnt)
-		goto unlock;
-
-	list_del(&gs->list);
-	geneve_notify_del_rx_port(gs);
-	udp_tunnel_sock_release(gs->sock);
-	kfree_rcu(gs, rcu);
-
-unlock:
-	mutex_unlock(&geneve_mutex);
-}
-EXPORT_SYMBOL_GPL(geneve_sock_release);
-
-static __net_init int geneve_init_net(struct net *net)
-{
-	struct geneve_net *gn = net_generic(net, geneve_net_id);
-
-	INIT_LIST_HEAD(&gn->sock_list);
-
-	return 0;
-}
-
-static struct pernet_operations geneve_net_ops = {
-	.init = geneve_init_net,
-	.id   = &geneve_net_id,
-	.size = sizeof(struct geneve_net),
-};
-
-static int __init geneve_init_module(void)
-{
-	int rc;
-
-	rc = register_pernet_subsys(&geneve_net_ops);
-	if (rc)
-		return rc;
-
-	pr_info("Geneve core logic\n");
-
-	return 0;
-}
-module_init(geneve_init_module);
-
-static void __exit geneve_cleanup_module(void)
-{
-	unregister_pernet_subsys(&geneve_net_ops);
-}
-module_exit(geneve_cleanup_module);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jesse Gross <jesse@nicira.com>");
-MODULE_DESCRIPTION("Driver library for GENEVE encapsulated traffic");
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 4a7b5b2..d9c552a 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -31,7 +31,6 @@
 #include <net/xfrm.h>
 
 static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
-static struct gre_cisco_protocol __rcu *gre_cisco_proto_list[GRE_IP_PROTO_MAX];
 
 int gre_add_protocol(const struct gre_protocol *proto, u8 version)
 {
@@ -61,197 +60,6 @@
 }
 EXPORT_SYMBOL_GPL(gre_del_protocol);
 
-void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
-		      int hdr_len)
-{
-	struct gre_base_hdr *greh;
-
-	skb_push(skb, hdr_len);
-
-	skb_reset_transport_header(skb);
-	greh = (struct gre_base_hdr *)skb->data;
-	greh->flags = tnl_flags_to_gre_flags(tpi->flags);
-	greh->protocol = tpi->proto;
-
-	if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) {
-		__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
-
-		if (tpi->flags&TUNNEL_SEQ) {
-			*ptr = tpi->seq;
-			ptr--;
-		}
-		if (tpi->flags&TUNNEL_KEY) {
-			*ptr = tpi->key;
-			ptr--;
-		}
-		if (tpi->flags&TUNNEL_CSUM &&
-		    !(skb_shinfo(skb)->gso_type &
-		      (SKB_GSO_GRE|SKB_GSO_GRE_CSUM))) {
-			*ptr = 0;
-			*(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
-								 skb->len, 0));
-		}
-	}
-}
-EXPORT_SYMBOL_GPL(gre_build_header);
-
-static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
-			    bool *csum_err)
-{
-	const struct gre_base_hdr *greh;
-	__be32 *options;
-	int hdr_len;
-
-	if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
-		return -EINVAL;
-
-	greh = (struct gre_base_hdr *)skb_transport_header(skb);
-	if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
-		return -EINVAL;
-
-	tpi->flags = gre_flags_to_tnl_flags(greh->flags);
-	hdr_len = ip_gre_calc_hlen(tpi->flags);
-
-	if (!pskb_may_pull(skb, hdr_len))
-		return -EINVAL;
-
-	greh = (struct gre_base_hdr *)skb_transport_header(skb);
-	tpi->proto = greh->protocol;
-
-	options = (__be32 *)(greh + 1);
-	if (greh->flags & GRE_CSUM) {
-		if (skb_checksum_simple_validate(skb)) {
-			*csum_err = true;
-			return -EINVAL;
-		}
-
-		skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
-					 null_compute_pseudo);
-
-		options++;
-	}
-
-	if (greh->flags & GRE_KEY) {
-		tpi->key = *options;
-		options++;
-	} else
-		tpi->key = 0;
-
-	if (unlikely(greh->flags & GRE_SEQ)) {
-		tpi->seq = *options;
-		options++;
-	} else
-		tpi->seq = 0;
-
-	/* WCCP version 1 and 2 protocol decoding.
-	 * - Change protocol to IP
-	 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
-	 */
-	if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
-		tpi->proto = htons(ETH_P_IP);
-		if ((*(u8 *)options & 0xF0) != 0x40) {
-			hdr_len += 4;
-			if (!pskb_may_pull(skb, hdr_len))
-				return -EINVAL;
-		}
-	}
-
-	return iptunnel_pull_header(skb, hdr_len, tpi->proto);
-}
-
-static int gre_cisco_rcv(struct sk_buff *skb)
-{
-	struct tnl_ptk_info tpi;
-	int i;
-	bool csum_err = false;
-
-#ifdef CONFIG_NET_IPGRE_BROADCAST
-	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
-		/* Looped back packet, drop it! */
-		if (rt_is_output_route(skb_rtable(skb)))
-			goto drop;
-	}
-#endif
-
-	if (parse_gre_header(skb, &tpi, &csum_err) < 0)
-		goto drop;
-
-	rcu_read_lock();
-	for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
-		struct gre_cisco_protocol *proto;
-		int ret;
-
-		proto = rcu_dereference(gre_cisco_proto_list[i]);
-		if (!proto)
-			continue;
-		ret = proto->handler(skb, &tpi);
-		if (ret == PACKET_RCVD) {
-			rcu_read_unlock();
-			return 0;
-		}
-	}
-	rcu_read_unlock();
-
-	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
-drop:
-	kfree_skb(skb);
-	return 0;
-}
-
-static void gre_cisco_err(struct sk_buff *skb, u32 info)
-{
-	/* All the routers (except for Linux) return only
-	 * 8 bytes of packet payload. It means, that precise relaying of
-	 * ICMP in the real Internet is absolutely infeasible.
-	 *
-	 * Moreover, Cisco "wise men" put GRE key to the third word
-	 * in GRE header. It makes impossible maintaining even soft
-	 * state for keyed
-	 * GRE tunnels with enabled checksum. Tell them "thank you".
-	 *
-	 * Well, I wonder, rfc1812 was written by Cisco employee,
-	 * what the hell these idiots break standards established
-	 * by themselves???
-	 */
-
-	const int type = icmp_hdr(skb)->type;
-	const int code = icmp_hdr(skb)->code;
-	struct tnl_ptk_info tpi;
-	bool csum_err = false;
-	int i;
-
-	if (parse_gre_header(skb, &tpi, &csum_err)) {
-		if (!csum_err)		/* ignore csum errors. */
-			return;
-	}
-
-	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
-		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-				skb->dev->ifindex, 0, IPPROTO_GRE, 0);
-		return;
-	}
-	if (type == ICMP_REDIRECT) {
-		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
-				IPPROTO_GRE, 0);
-		return;
-	}
-
-	rcu_read_lock();
-	for (i = 0; i < GRE_IP_PROTO_MAX; i++) {
-		struct gre_cisco_protocol *proto;
-
-		proto = rcu_dereference(gre_cisco_proto_list[i]);
-		if (!proto)
-			continue;
-
-		if (proto->err_handler(skb, info, &tpi) == PACKET_RCVD)
-			goto out;
-
-	}
-out:
-	rcu_read_unlock();
-}
-
 static int gre_rcv(struct sk_buff *skb)
 {
 	const struct gre_protocol *proto;
@@ -302,60 +110,19 @@
 	.netns_ok    = 1,
 };
 
-static const struct gre_protocol ipgre_protocol = {
-	.handler     = gre_cisco_rcv,
-	.err_handler = gre_cisco_err,
-};
-
-int gre_cisco_register(struct gre_cisco_protocol *newp)
-{
-	struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
-					    &gre_cisco_proto_list[newp->priority];
-
-	return (cmpxchg(proto, NULL, newp) == NULL) ? 0 : -EBUSY;
-}
-EXPORT_SYMBOL_GPL(gre_cisco_register);
-
-int gre_cisco_unregister(struct gre_cisco_protocol *del_proto)
-{
-	struct gre_cisco_protocol **proto = (struct gre_cisco_protocol **)
-					    &gre_cisco_proto_list[del_proto->priority];
-	int ret;
-
-	ret = (cmpxchg(proto, del_proto, NULL) == del_proto) ? 0 : -EINVAL;
-
-	if (ret)
-		return ret;
-
-	synchronize_net();
-	return 0;
-}
-EXPORT_SYMBOL_GPL(gre_cisco_unregister);
-
 static int __init gre_init(void)
 {
 	pr_info("GRE over IPv4 demultiplexor driver\n");
 
 	if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
 		pr_err("can't add protocol\n");
-		goto err;
+		return -EAGAIN;
 	}
-
-	if (gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) {
-		pr_info("%s: can't add ipgre handler\n", __func__);
-		goto err_gre;
-	}
-
 	return 0;
-err_gre:
-	inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
-err:
-	return -EAGAIN;
 }
 
 static void __exit gre_exit(void)
 {
-	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
 	inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
 }
 
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index f5203fb..79fe05b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -96,6 +96,7 @@
 #include <net/xfrm.h>
 #include <net/inet_common.h>
 #include <net/ip_fib.h>
+#include <net/vrf.h>
 
 /*
  *	Build xmit assembly blocks
@@ -308,9 +309,10 @@
 
 	rc = false;
 	if (icmp_global_allow()) {
+		int vif = vrf_master_ifindex(dst->dev);
 		struct inet_peer *peer;
 
-		peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
+		peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
 		rc = inet_peer_xrlim_allow(peer,
 					   net->ipv4.sysctl_icmp_ratelimit);
 		if (peer)
@@ -425,6 +427,7 @@
 	fl4.flowi4_mark = mark;
 	fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
 	fl4.flowi4_proto = IPPROTO_ICMP;
+	fl4.flowi4_oif = vrf_master_ifindex(skb->dev) ? : skb->dev->ifindex;
 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_key(net, &fl4);
 	if (IS_ERR(rt))
@@ -458,6 +461,8 @@
 	fl4->flowi4_proto = IPPROTO_ICMP;
 	fl4->fl4_icmp_type = type;
 	fl4->fl4_icmp_code = code;
+	fl4->flowi4_oif = vrf_master_ifindex(skb_in->dev) ? : skb_in->dev->ifindex;
+
 	security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
 	rt = __ip_route_output_key(net, fl4);
 	if (IS_ERR(rt))
@@ -480,7 +485,8 @@
 	if (err)
 		goto relookup_failed;
 
-	if (inet_addr_type(net, fl4_dec.saddr) == RTN_LOCAL) {
+	if (inet_addr_type_dev_table(net, skb_in->dev,
+				     fl4_dec.saddr) == RTN_LOCAL) {
 		rt2 = __ip_route_output_key(net, &fl4_dec);
 		if (IS_ERR(rt2))
 			err = PTR_ERR(rt2);
@@ -496,6 +502,7 @@
 		}
 		/* Ugh! */
 		orefdst = skb_in->_skb_refdst; /* save old refdst */
+		skb_dst_set(skb_in, NULL);
 		err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
 				     RT_TOS(tos), rt2->dst.dev);
 
@@ -828,7 +835,7 @@
 	 */
 
 	if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses &&
-	    inet_addr_type(net, iph->daddr) == RTN_BROADCAST) {
+	    inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) {
 		net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",
 				     &ip_hdr(skb)->saddr,
 				     icmph->type, icmph->code,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 9fdfd9d..d38b8b6 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -110,6 +110,9 @@
 #define IP_MAX_MEMBERSHIPS	20
 #define IP_MAX_MSF		10
 
+/* IGMP reports for link-local multicast groups are enabled by default */
+int sysctl_igmp_llm_reports __read_mostly = 1;
+
 #ifdef CONFIG_IP_MULTICAST
 /* Parameter names and values are taken from igmp-v2-06 draft */
 
@@ -437,6 +440,8 @@
 
 	if (pmc->multiaddr == IGMP_ALL_HOSTS)
 		return skb;
+	if (ipv4_is_local_multicast(pmc->multiaddr) && !sysctl_igmp_llm_reports)
+		return skb;
 
 	isquery = type == IGMPV3_MODE_IS_INCLUDE ||
 		  type == IGMPV3_MODE_IS_EXCLUDE;
@@ -545,6 +550,9 @@
 		for_each_pmc_rcu(in_dev, pmc) {
 			if (pmc->multiaddr == IGMP_ALL_HOSTS)
 				continue;
+			if (ipv4_is_local_multicast(pmc->multiaddr) &&
+			     !sysctl_igmp_llm_reports)
+				continue;
 			spin_lock_bh(&pmc->lock);
 			if (pmc->sfcount[MCAST_EXCLUDE])
 				type = IGMPV3_MODE_IS_EXCLUDE;
@@ -678,7 +686,11 @@
 
 	if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
 		return igmpv3_send_report(in_dev, pmc);
-	else if (type == IGMP_HOST_LEAVE_MESSAGE)
+
+	if (ipv4_is_local_multicast(group) && !sysctl_igmp_llm_reports)
+		return 0;
+
+	if (type == IGMP_HOST_LEAVE_MESSAGE)
 		dst = IGMP_ALL_ROUTER;
 	else
 		dst = group;
@@ -851,6 +863,8 @@
 
 	if (group == IGMP_ALL_HOSTS)
 		return false;
+	if (ipv4_is_local_multicast(group) && !sysctl_igmp_llm_reports)
+		return false;
 
 	rcu_read_lock();
 	for_each_pmc_rcu(in_dev, im) {
@@ -957,6 +971,9 @@
 			continue;
 		if (im->multiaddr == IGMP_ALL_HOSTS)
 			continue;
+		if (ipv4_is_local_multicast(im->multiaddr) &&
+		    !sysctl_igmp_llm_reports)
+			continue;
 		spin_lock_bh(&im->lock);
 		if (im->tm_running)
 			im->gsquery = im->gsquery && mark;
@@ -1181,6 +1198,8 @@
 #ifdef CONFIG_IP_MULTICAST
 	if (im->multiaddr == IGMP_ALL_HOSTS)
 		return;
+	if (ipv4_is_local_multicast(im->multiaddr) && !sysctl_igmp_llm_reports)
+		return;
 
 	reporter = im->reporter;
 	igmp_stop_timer(im);
@@ -1213,6 +1232,8 @@
 #ifdef CONFIG_IP_MULTICAST
 	if (im->multiaddr == IGMP_ALL_HOSTS)
 		return;
+	if (ipv4_is_local_multicast(im->multiaddr) && !sysctl_igmp_llm_reports)
+		return;
 
 	if (in_dev->dead)
 		return;
@@ -1518,6 +1539,9 @@
 	for_each_pmc_rtnl(in_dev, im) {
 		if (im->multiaddr == IGMP_ALL_HOSTS)
 			continue;
+		if (ipv4_is_local_multicast(im->multiaddr) &&
+		    !sysctl_igmp_llm_reports)
+			continue;
 
 		/* a failover is happening and switches
 		 * must be notified immediately
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 0cb9165..8912019 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -343,7 +343,6 @@
 	struct sock *sk2;
 	const struct hlist_nulls_node *node;
 	struct inet_timewait_sock *tw = NULL;
-	int twrefcnt = 0;
 
 	spin_lock(lock);
 
@@ -371,21 +370,17 @@
 	WARN_ON(!sk_unhashed(sk));
 	__sk_nulls_add_node_rcu(sk, &head->chain);
 	if (tw) {
-		twrefcnt = inet_twsk_unhash(tw);
+		sk_nulls_del_node_init_rcu((struct sock *)tw);
 		NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
 	}
 	spin_unlock(lock);
-	if (twrefcnt)
-		inet_twsk_put(tw);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 
 	if (twp) {
 		*twp = tw;
 	} else if (tw) {
 		/* Silly. Should hash-dance instead... */
-		inet_twsk_deschedule(tw);
-
-		inet_twsk_put(tw);
+		inet_twsk_deschedule_put(tw);
 	}
 	return 0;
 
@@ -403,13 +398,12 @@
 					  inet->inet_dport);
 }
 
-int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk)
 {
 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
 	struct hlist_nulls_head *list;
 	struct inet_ehash_bucket *head;
 	spinlock_t *lock;
-	int twrefcnt = 0;
 
 	WARN_ON(!sk_unhashed(sk));
 
@@ -420,23 +414,22 @@
 
 	spin_lock(lock);
 	__sk_nulls_add_node_rcu(sk, list);
-	if (tw) {
-		WARN_ON(sk->sk_hash != tw->tw_hash);
-		twrefcnt = inet_twsk_unhash(tw);
+	if (osk) {
+		WARN_ON(sk->sk_hash != osk->sk_hash);
+		sk_nulls_del_node_init_rcu(osk);
 	}
 	spin_unlock(lock);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
-	return twrefcnt;
 }
 EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
 
-int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw)
+void __inet_hash(struct sock *sk, struct sock *osk)
 {
 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
 	struct inet_listen_hashbucket *ilb;
 
 	if (sk->sk_state != TCP_LISTEN)
-		return __inet_hash_nolisten(sk, tw);
+		return __inet_hash_nolisten(sk, osk);
 
 	WARN_ON(!sk_unhashed(sk));
 	ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
@@ -445,7 +438,6 @@
 	__sk_nulls_add_node_rcu(sk, &ilb->head);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 	spin_unlock(&ilb->lock);
-	return 0;
 }
 EXPORT_SYMBOL(__inet_hash);
 
@@ -492,7 +484,6 @@
 	struct inet_bind_bucket *tb;
 	int ret;
 	struct net *net = sock_net(sk);
-	int twrefcnt = 1;
 
 	if (!snum) {
 		int i, remaining, low, high, port;
@@ -560,19 +551,14 @@
 		inet_bind_hash(sk, tb, port);
 		if (sk_unhashed(sk)) {
 			inet_sk(sk)->inet_sport = htons(port);
-			twrefcnt += __inet_hash_nolisten(sk, tw);
+			__inet_hash_nolisten(sk, (struct sock *)tw);
 		}
 		if (tw)
-			twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
+			inet_twsk_bind_unhash(tw, hinfo);
 		spin_unlock(&head->lock);
 
-		if (tw) {
-			inet_twsk_deschedule(tw);
-			while (twrefcnt) {
-				twrefcnt--;
-				inet_twsk_put(tw);
-			}
-		}
+		if (tw)
+			inet_twsk_deschedule_put(tw);
 
 		ret = 0;
 		goto out;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 2ffbd16..ae22cc2 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -18,28 +18,6 @@
 
 
 /**
- *	inet_twsk_unhash - unhash a timewait socket from established hash
- *	@tw: timewait socket
- *
- *	unhash a timewait socket from established hash, if hashed.
- *	ehash lock must be held by caller.
- *	Returns 1 if caller should call inet_twsk_put() after lock release.
- */
-int inet_twsk_unhash(struct inet_timewait_sock *tw)
-{
-	if (hlist_nulls_unhashed(&tw->tw_node))
-		return 0;
-
-	hlist_nulls_del_rcu(&tw->tw_node);
-	sk_nulls_node_init(&tw->tw_node);
-	/*
-	 * We cannot call inet_twsk_put() ourself under lock,
-	 * caller must call it for us.
-	 */
-	return 1;
-}
-
-/**
  *	inet_twsk_bind_unhash - unhash a timewait socket from bind hash
  *	@tw: timewait socket
  *	@hashinfo: hashinfo pointer
@@ -48,35 +26,29 @@
  *	bind hash lock must be held by caller.
  *	Returns 1 if caller should call inet_twsk_put() after lock release.
  */
-int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
 			  struct inet_hashinfo *hashinfo)
 {
 	struct inet_bind_bucket *tb = tw->tw_tb;
 
 	if (!tb)
-		return 0;
+		return;
 
 	__hlist_del(&tw->tw_bind_node);
 	tw->tw_tb = NULL;
 	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
-	/*
-	 * We cannot call inet_twsk_put() ourself under lock,
-	 * caller must call it for us.
-	 */
-	return 1;
+	__sock_put((struct sock *)tw);
 }
 
 /* Must be called with locally disabled BHs. */
 static void inet_twsk_kill(struct inet_timewait_sock *tw)
 {
 	struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
-	struct inet_bind_hashbucket *bhead;
-	int refcnt;
-	/* Unlink from established hashes. */
 	spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
+	struct inet_bind_hashbucket *bhead;
 
 	spin_lock(lock);
-	refcnt = inet_twsk_unhash(tw);
+	sk_nulls_del_node_init_rcu((struct sock *)tw);
 	spin_unlock(lock);
 
 	/* Disassociate with bind bucket. */
@@ -84,11 +56,9 @@
 			hashinfo->bhash_size)];
 
 	spin_lock(&bhead->lock);
-	refcnt += inet_twsk_bind_unhash(tw, hashinfo);
+	inet_twsk_bind_unhash(tw, hashinfo);
 	spin_unlock(&bhead->lock);
 
-	BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
-	atomic_sub(refcnt, &tw->tw_refcnt);
 	atomic_dec(&tw->tw_dr->tw_count);
 	inet_twsk_put(tw);
 }
@@ -235,13 +205,17 @@
  * tcp_input.c to verify this.
  */
 
-/* This is for handling early-kills of TIME_WAIT sockets. */
-void inet_twsk_deschedule(struct inet_timewait_sock *tw)
+/* This is for handling early-kills of TIME_WAIT sockets.
+ * Warning : consume reference.
+ * Caller should not access tw anymore.
+ */
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
 {
 	if (del_timer_sync(&tw->tw_timer))
 		inet_twsk_kill(tw);
+	inet_twsk_put(tw);
 }
-EXPORT_SYMBOL(inet_twsk_deschedule);
+EXPORT_SYMBOL(inet_twsk_deschedule_put);
 
 void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
 {
@@ -311,9 +285,8 @@
 
 			rcu_read_unlock();
 			local_bh_disable();
-			inet_twsk_deschedule(tw);
+			inet_twsk_deschedule_put(tw);
 			local_bh_enable();
-			inet_twsk_put(tw);
 			goto restart_rcu;
 		}
 		/* If the nulls value we got at the end of this lookup is
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 241afd7..86fa458 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -157,22 +157,6 @@
 	INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker);
 }
 
-static int addr_compare(const struct inetpeer_addr *a,
-			const struct inetpeer_addr *b)
-{
-	int i, n = (a->family == AF_INET ? 1 : 4);
-
-	for (i = 0; i < n; i++) {
-		if (a->addr.a6[i] == b->addr.a6[i])
-			continue;
-		if ((__force u32)a->addr.a6[i] < (__force u32)b->addr.a6[i])
-			return -1;
-		return 1;
-	}
-
-	return 0;
-}
-
 #define rcu_deref_locked(X, BASE)				\
 	rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
 
@@ -188,7 +172,7 @@
 	*stackptr++ = &_base->root;				\
 	for (u = rcu_deref_locked(_base->root, _base);		\
 	     u != peer_avl_empty;) {				\
-		int cmp = addr_compare(_daddr, &u->daddr);	\
+		int cmp = inetpeer_addr_cmp(_daddr, &u->daddr);	\
 		if (cmp == 0)					\
 			break;					\
 		if (cmp == -1)					\
@@ -215,7 +199,7 @@
 	int count = 0;
 
 	while (u != peer_avl_empty) {
-		int cmp = addr_compare(daddr, &u->daddr);
+		int cmp = inetpeer_addr_cmp(daddr, &u->daddr);
 		if (cmp == 0) {
 			/* Before taking a reference, check if this entry was
 			 * deleted (refcnt=-1)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 921138f..fa7f153 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -48,6 +48,7 @@
 #include <linux/inet.h>
 #include <linux/netfilter_ipv4.h>
 #include <net/inet_ecn.h>
+#include <net/vrf.h>
 
 /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
  * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
@@ -77,6 +78,7 @@
 	u8		ecn; /* RFC3168 support */
 	u16		max_df_size; /* largest frag with DF set seen */
 	int             iif;
+	int             vif;   /* VRF device index */
 	unsigned int    rid;
 	struct inet_peer *peer;
 };
@@ -99,6 +101,7 @@
 struct ip4_create_arg {
 	struct iphdr *iph;
 	u32 user;
+	int vif;
 };
 
 static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
@@ -127,7 +130,8 @@
 		qp->saddr == arg->iph->saddr &&
 		qp->daddr == arg->iph->daddr &&
 		qp->protocol == arg->iph->protocol &&
-		qp->user == arg->user;
+		qp->user == arg->user &&
+		qp->vif == arg->vif;
 }
 
 static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
@@ -144,9 +148,11 @@
 	qp->ecn = ip4_frag_ecn(arg->iph->tos);
 	qp->saddr = arg->iph->saddr;
 	qp->daddr = arg->iph->daddr;
+	qp->vif = arg->vif;
 	qp->user = arg->user;
 	qp->peer = sysctl_ipfrag_max_dist ?
-		inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL;
+		inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) :
+		NULL;
 }
 
 static void ip4_frag_free(struct inet_frag_queue *q)
@@ -244,7 +250,8 @@
 /* Find the correct entry in the "incomplete datagrams" queue for
  * this IP datagram, and create new one, if nothing is found.
  */
-static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
+static struct ipq *ip_find(struct net *net, struct iphdr *iph,
+			   u32 user, int vif)
 {
 	struct inet_frag_queue *q;
 	struct ip4_create_arg arg;
@@ -252,6 +259,7 @@
 
 	arg.iph = iph;
 	arg.user = user;
+	arg.vif = vif;
 
 	hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
 
@@ -522,7 +530,6 @@
 	int len;
 	int ihlen;
 	int err;
-	int sum_truesize;
 	u8 ecn;
 
 	ipq_kill(qp);
@@ -590,32 +597,19 @@
 		add_frag_mem_limit(qp->q.net, clone->truesize);
 	}
 
+	skb_shinfo(head)->frag_list = head->next;
 	skb_push(head, head->data - skb_network_header(head));
 
-	sum_truesize = head->truesize;
-	for (fp = head->next; fp;) {
-		bool headstolen;
-		int delta;
-		struct sk_buff *next = fp->next;
-
-		sum_truesize += fp->truesize;
+	for (fp=head->next; fp; fp = fp->next) {
+		head->data_len += fp->len;
+		head->len += fp->len;
 		if (head->ip_summed != fp->ip_summed)
 			head->ip_summed = CHECKSUM_NONE;
 		else if (head->ip_summed == CHECKSUM_COMPLETE)
 			head->csum = csum_add(head->csum, fp->csum);
-
-		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
-			kfree_skb_partial(fp, headstolen);
-		} else {
-			if (!skb_shinfo(head)->frag_list)
-				skb_shinfo(head)->frag_list = fp;
-			head->data_len += fp->len;
-			head->len += fp->len;
-			head->truesize += fp->truesize;
-		}
-		fp = next;
+		head->truesize += fp->truesize;
 	}
-	sub_frag_mem_limit(qp->q.net, sum_truesize);
+	sub_frag_mem_limit(qp->q.net, head->truesize);
 
 	head->next = NULL;
 	head->dev = dev;
@@ -662,14 +656,15 @@
 /* Process an incoming IP datagram fragment. */
 int ip_defrag(struct sk_buff *skb, u32 user)
 {
+	struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
+	int vif = vrf_master_ifindex_rcu(dev);
+	struct net *net = dev_net(dev);
 	struct ipq *qp;
-	struct net *net;
 
-	net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
 
 	/* Lookup (or create) queue header */
-	qp = ip_find(net, ip_hdr(skb), user);
+	qp = ip_find(net, ip_hdr(skb), user, vif);
 	if (qp) {
 		int ret;
 
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5fd7064..bd0679d 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -25,6 +25,7 @@
 #include <linux/udp.h>
 #include <linux/if_arp.h>
 #include <linux/mroute.h>
+#include <linux/if_vlan.h>
 #include <linux/init.h>
 #include <linux/in6.h>
 #include <linux/inetdevice.h>
@@ -47,6 +48,7 @@
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
 #include <net/gre.h>
+#include <net/dst_metadata.h>
 
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
@@ -121,8 +123,127 @@
 static int ipgre_net_id __read_mostly;
 static int gre_tap_net_id __read_mostly;
 
-static int ipgre_err(struct sk_buff *skb, u32 info,
-		     const struct tnl_ptk_info *tpi)
+static int ip_gre_calc_hlen(__be16 o_flags)
+{
+	int addend = 4;
+
+	if (o_flags & TUNNEL_CSUM)
+		addend += 4;
+	if (o_flags & TUNNEL_KEY)
+		addend += 4;
+	if (o_flags & TUNNEL_SEQ)
+		addend += 4;
+	return addend;
+}
+
+static __be16 gre_flags_to_tnl_flags(__be16 flags)
+{
+	__be16 tflags = 0;
+
+	if (flags & GRE_CSUM)
+		tflags |= TUNNEL_CSUM;
+	if (flags & GRE_ROUTING)
+		tflags |= TUNNEL_ROUTING;
+	if (flags & GRE_KEY)
+		tflags |= TUNNEL_KEY;
+	if (flags & GRE_SEQ)
+		tflags |= TUNNEL_SEQ;
+	if (flags & GRE_STRICT)
+		tflags |= TUNNEL_STRICT;
+	if (flags & GRE_REC)
+		tflags |= TUNNEL_REC;
+	if (flags & GRE_VERSION)
+		tflags |= TUNNEL_VERSION;
+
+	return tflags;
+}
+
+static __be16 tnl_flags_to_gre_flags(__be16 tflags)
+{
+	__be16 flags = 0;
+
+	if (tflags & TUNNEL_CSUM)
+		flags |= GRE_CSUM;
+	if (tflags & TUNNEL_ROUTING)
+		flags |= GRE_ROUTING;
+	if (tflags & TUNNEL_KEY)
+		flags |= GRE_KEY;
+	if (tflags & TUNNEL_SEQ)
+		flags |= GRE_SEQ;
+	if (tflags & TUNNEL_STRICT)
+		flags |= GRE_STRICT;
+	if (tflags & TUNNEL_REC)
+		flags |= GRE_REC;
+	if (tflags & TUNNEL_VERSION)
+		flags |= GRE_VERSION;
+
+	return flags;
+}
+
+static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
+			    bool *csum_err)
+{
+	const struct gre_base_hdr *greh;
+	__be32 *options;
+	int hdr_len;
+
+	if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
+		return -EINVAL;
+
+	greh = (struct gre_base_hdr *)skb_transport_header(skb);
+	if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
+		return -EINVAL;
+
+	tpi->flags = gre_flags_to_tnl_flags(greh->flags);
+	hdr_len = ip_gre_calc_hlen(tpi->flags);
+
+	if (!pskb_may_pull(skb, hdr_len))
+		return -EINVAL;
+
+	greh = (struct gre_base_hdr *)skb_transport_header(skb);
+	tpi->proto = greh->protocol;
+
+	options = (__be32 *)(greh + 1);
+	if (greh->flags & GRE_CSUM) {
+		if (skb_checksum_simple_validate(skb)) {
+			*csum_err = true;
+			return -EINVAL;
+		}
+
+		skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
+					 null_compute_pseudo);
+		options++;
+	}
+
+	if (greh->flags & GRE_KEY) {
+		tpi->key = *options;
+		options++;
+	} else {
+		tpi->key = 0;
+	}
+	if (unlikely(greh->flags & GRE_SEQ)) {
+		tpi->seq = *options;
+		options++;
+	} else {
+		tpi->seq = 0;
+	}
+	/* WCCP version 1 and 2 protocol decoding.
+	 * - Change protocol to IP
+	 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
+	 */
+	if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+		tpi->proto = htons(ETH_P_IP);
+		if ((*(u8 *)options & 0xF0) != 0x40) {
+			hdr_len += 4;
+			if (!pskb_may_pull(skb, hdr_len))
+				return -EINVAL;
+		}
+	}
+	return iptunnel_pull_header(skb, hdr_len, tpi->proto);
+}
+
+static void ipgre_err(struct sk_buff *skb, u32 info,
+		      const struct tnl_ptk_info *tpi)
 {
 
 	/* All the routers (except for Linux) return only
@@ -148,14 +269,14 @@
 	switch (type) {
 	default:
 	case ICMP_PARAMETERPROB:
-		return PACKET_RCVD;
+		return;
 
 	case ICMP_DEST_UNREACH:
 		switch (code) {
 		case ICMP_SR_FAILED:
 		case ICMP_PORT_UNREACH:
 			/* Impossible event. */
-			return PACKET_RCVD;
+			return;
 		default:
 			/* All others are translated to HOST_UNREACH.
 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -164,9 +285,10 @@
 			break;
 		}
 		break;
+
 	case ICMP_TIME_EXCEEDED:
 		if (code != ICMP_EXC_TTL)
-			return PACKET_RCVD;
+			return;
 		break;
 
 	case ICMP_REDIRECT:
@@ -183,26 +305,85 @@
 			     iph->daddr, iph->saddr, tpi->key);
 
 	if (!t)
-		return PACKET_REJECT;
+		return;
 
 	if (t->parms.iph.daddr == 0 ||
 	    ipv4_is_multicast(t->parms.iph.daddr))
-		return PACKET_RCVD;
+		return;
 
 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
-		return PACKET_RCVD;
+		return;
 
 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
 		t->err_count++;
 	else
 		t->err_count = 1;
 	t->err_time = jiffies;
-	return PACKET_RCVD;
+}
+
+static void gre_err(struct sk_buff *skb, u32 info)
+{
+	/* All the routers (except for Linux) return only
+	 * 8 bytes of packet payload. It means, that precise relaying of
+	 * ICMP in the real Internet is absolutely infeasible.
+	 *
+	 * Moreover, Cisco "wise men" put GRE key to the third word
+	 * in GRE header. It makes impossible maintaining even soft
+	 * state for keyed
+	 * GRE tunnels with enabled checksum. Tell them "thank you".
+	 *
+	 * Well, I wonder, rfc1812 was written by Cisco employee,
+	 * what the hell these idiots break standards established
+	 * by themselves???
+	 */
+
+	const int type = icmp_hdr(skb)->type;
+	const int code = icmp_hdr(skb)->code;
+	struct tnl_ptk_info tpi;
+	bool csum_err = false;
+
+	if (parse_gre_header(skb, &tpi, &csum_err)) {
+		if (!csum_err)		/* ignore csum errors. */
+			return;
+	}
+
+	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+				 skb->dev->ifindex, 0, IPPROTO_GRE, 0);
+		return;
+	}
+	if (type == ICMP_REDIRECT) {
+		ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex, 0,
+			      IPPROTO_GRE, 0);
+		return;
+	}
+
+	ipgre_err(skb, info, &tpi);
+}
+
+static __be64 key_to_tunnel_id(__be32 key)
+{
+#ifdef __BIG_ENDIAN
+	return (__force __be64)((__force u32)key);
+#else
+	return (__force __be64)((__force u64)key << 32);
+#endif
+}
+
+/* Returns the least-significant 32 bits of a __be64. */
+static __be32 tunnel_id_to_key(__be64 x)
+{
+#ifdef __BIG_ENDIAN
+	return (__force __be32)x;
+#else
+	return (__force __be32)((__force u64)x >> 32);
+#endif
 }
 
 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
 {
 	struct net *net = dev_net(skb->dev);
+	struct metadata_dst *tun_dst = NULL;
 	struct ip_tunnel_net *itn;
 	const struct iphdr *iph;
 	struct ip_tunnel *tunnel;
@@ -218,40 +399,184 @@
 
 	if (tunnel) {
 		skb_pop_mac_header(skb);
-		ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
+		if (tunnel->collect_md) {
+			__be16 flags;
+			__be64 tun_id;
+
+			flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
+			tun_id = key_to_tunnel_id(tpi->key);
+			tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
+			if (!tun_dst)
+				return PACKET_REJECT;
+		}
+
+		ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
 		return PACKET_RCVD;
 	}
 	return PACKET_REJECT;
 }
 
+static int gre_rcv(struct sk_buff *skb)
+{
+	struct tnl_ptk_info tpi;
+	bool csum_err = false;
+
+#ifdef CONFIG_NET_IPGRE_BROADCAST
+	if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
+		/* Looped back packet, drop it! */
+		if (rt_is_output_route(skb_rtable(skb)))
+			goto drop;
+	}
+#endif
+
+	if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+		goto drop;
+
+	if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
+		return 0;
+
+	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+drop:
+	kfree_skb(skb);
+	return 0;
+}
+
+static void build_header(struct sk_buff *skb, int hdr_len, __be16 flags,
+			 __be16 proto, __be32 key, __be32 seq)
+{
+	struct gre_base_hdr *greh;
+
+	skb_push(skb, hdr_len);
+
+	skb_reset_transport_header(skb);
+	greh = (struct gre_base_hdr *)skb->data;
+	greh->flags = tnl_flags_to_gre_flags(flags);
+	greh->protocol = proto;
+
+	if (flags & (TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_SEQ)) {
+		__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+		if (flags & TUNNEL_SEQ) {
+			*ptr = seq;
+			ptr--;
+		}
+		if (flags & TUNNEL_KEY) {
+			*ptr = key;
+			ptr--;
+		}
+		if (flags & TUNNEL_CSUM &&
+		    !(skb_shinfo(skb)->gso_type &
+		      (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
+			*ptr = 0;
+			*(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
+								 skb->len, 0));
+		}
+	}
+}
+
 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
 		       const struct iphdr *tnl_params,
 		       __be16 proto)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
-	struct tnl_ptk_info tpi;
 
-	tpi.flags = tunnel->parms.o_flags;
-	tpi.proto = proto;
-	tpi.key = tunnel->parms.o_key;
 	if (tunnel->parms.o_flags & TUNNEL_SEQ)
 		tunnel->o_seqno++;
-	tpi.seq = htonl(tunnel->o_seqno);
 
 	/* Push GRE header. */
-	gre_build_header(skb, &tpi, tunnel->tun_hlen);
+	build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
+		     proto, tunnel->parms.o_key, htonl(tunnel->o_seqno));
 
-	skb_set_inner_protocol(skb, tpi.proto);
-
+	skb_set_inner_protocol(skb, proto);
 	ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
 }
 
+static struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
+					   bool csum)
+{
+	return iptunnel_handle_offloads(skb, csum,
+					csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
+}
+
+static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ip_tunnel_info *tun_info;
+	struct net *net = dev_net(dev);
+	const struct ip_tunnel_key *key;
+	struct flowi4 fl;
+	struct rtable *rt;
+	int min_headroom;
+	int tunnel_hlen;
+	__be16 df, flags;
+	int err;
+
+	tun_info = skb_tunnel_info(skb);
+	if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
+		     ip_tunnel_info_af(tun_info) != AF_INET))
+		goto err_free_skb;
+
+	key = &tun_info->key;
+	memset(&fl, 0, sizeof(fl));
+	fl.daddr = key->u.ipv4.dst;
+	fl.saddr = key->u.ipv4.src;
+	fl.flowi4_tos = RT_TOS(key->tos);
+	fl.flowi4_mark = skb->mark;
+	fl.flowi4_proto = IPPROTO_GRE;
+
+	rt = ip_route_output_key(net, &fl);
+	if (IS_ERR(rt))
+		goto err_free_skb;
+
+	tunnel_hlen = ip_gre_calc_hlen(key->tun_flags);
+
+	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+			+ tunnel_hlen + sizeof(struct iphdr);
+	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
+		int head_delta = SKB_DATA_ALIGN(min_headroom -
+						skb_headroom(skb) +
+						16);
+		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
+				       0, GFP_ATOMIC);
+		if (unlikely(err))
+			goto err_free_rt;
+	}
+
+	/* Push Tunnel header. */
+	skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM));
+	if (IS_ERR(skb)) {
+		skb = NULL;
+		goto err_free_rt;
+	}
+
+	flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
+	build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
+		     tunnel_id_to_key(tun_info->key.tun_id), 0);
+
+	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
+	err = iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
+			    key->u.ipv4.dst, IPPROTO_GRE,
+			    key->tos, key->ttl, df, false);
+	iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+	return;
+
+err_free_rt:
+	ip_rt_put(rt);
+err_free_skb:
+	kfree_skb(skb);
+	dev->stats.tx_dropped++;
+}
+
 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
 			      struct net_device *dev)
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 	const struct iphdr *tnl_params;
 
+	if (tunnel->collect_md) {
+		gre_fb_xmit(skb, dev);
+		return NETDEV_TX_OK;
+	}
+
 	if (dev->header_ops) {
 		/* Need space for new headers */
 		if (skb_cow_head(skb, dev->needed_headroom -
@@ -277,7 +602,6 @@
 		goto out;
 
 	__gre_xmit(skb, dev, tnl_params, skb->protocol);
-
 	return NETDEV_TX_OK;
 
 free_skb:
@@ -292,6 +616,11 @@
 {
 	struct ip_tunnel *tunnel = netdev_priv(dev);
 
+	if (tunnel->collect_md) {
+		gre_fb_xmit(skb, dev);
+		return NETDEV_TX_OK;
+	}
+
 	skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
 	if (IS_ERR(skb))
 		goto out;
@@ -300,7 +629,6 @@
 		goto free_skb;
 
 	__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
-
 	return NETDEV_TX_OK;
 
 free_skb:
@@ -530,10 +858,9 @@
 	return ip_tunnel_init(dev);
 }
 
-static struct gre_cisco_protocol ipgre_protocol = {
-	.handler        = ipgre_rcv,
-	.err_handler    = ipgre_err,
-	.priority       = 0,
+static const struct gre_protocol ipgre_protocol = {
+	.handler     = gre_rcv,
+	.err_handler = gre_err,
 };
 
 static int __net_init ipgre_init_net(struct net *net)
@@ -596,8 +923,10 @@
 	return ipgre_tunnel_validate(tb, data);
 }
 
-static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[],
-			       struct ip_tunnel_parm *parms)
+static void ipgre_netlink_parms(struct net_device *dev,
+				struct nlattr *data[],
+				struct nlattr *tb[],
+				struct ip_tunnel_parm *parms)
 {
 	memset(parms, 0, sizeof(*parms));
 
@@ -635,6 +964,12 @@
 
 	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
 		parms->iph.frag_off = htons(IP_DF);
+
+	if (data[IFLA_GRE_COLLECT_METADATA]) {
+		struct ip_tunnel *t = netdev_priv(dev);
+
+		t->collect_md = true;
+	}
 }
 
 /* This function returns true when ENCAP attributes are present in the nl msg */
@@ -712,7 +1047,7 @@
 			return err;
 	}
 
-	ipgre_netlink_parms(data, tb, &p);
+	ipgre_netlink_parms(dev, data, tb, &p);
 	return ip_tunnel_newlink(dev, tb, &p);
 }
 
@@ -730,7 +1065,7 @@
 			return err;
 	}
 
-	ipgre_netlink_parms(data, tb, &p);
+	ipgre_netlink_parms(dev, data, tb, &p);
 	return ip_tunnel_changelink(dev, tb, &p);
 }
 
@@ -765,6 +1100,8 @@
 		nla_total_size(2) +
 		/* IFLA_GRE_ENCAP_DPORT */
 		nla_total_size(2) +
+		/* IFLA_GRE_COLLECT_METADATA */
+		nla_total_size(0) +
 		0;
 }
 
@@ -796,6 +1133,11 @@
 			t->encap.flags))
 		goto nla_put_failure;
 
+	if (t->collect_md) {
+		if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
+			goto nla_put_failure;
+	}
+
 	return 0;
 
 nla_put_failure:
@@ -817,6 +1159,7 @@
 	[IFLA_GRE_ENCAP_FLAGS]	= { .type = NLA_U16 },
 	[IFLA_GRE_ENCAP_SPORT]	= { .type = NLA_U16 },
 	[IFLA_GRE_ENCAP_DPORT]	= { .type = NLA_U16 },
+	[IFLA_GRE_COLLECT_METADATA]	= { .type = NLA_FLAG },
 };
 
 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
@@ -849,9 +1192,38 @@
 	.get_link_net	= ip_tunnel_get_link_net,
 };
 
+struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
+					u8 name_assign_type)
+{
+	struct nlattr *tb[IFLA_MAX + 1];
+	struct net_device *dev;
+	struct ip_tunnel *t;
+	int err;
+
+	memset(&tb, 0, sizeof(tb));
+
+	dev = rtnl_create_link(net, name, name_assign_type,
+			       &ipgre_tap_ops, tb);
+	if (IS_ERR(dev))
+		return dev;
+
+	/* Configure flow based GRE device. */
+	t = netdev_priv(dev);
+	t->collect_md = true;
+
+	err = ipgre_newlink(net, dev, tb, NULL);
+	if (err < 0)
+		goto out;
+	return dev;
+out:
+	free_netdev(dev);
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
+
 static int __net_init ipgre_tap_init_net(struct net *net)
 {
-	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, NULL);
+	return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
 }
 
 static void __net_exit ipgre_tap_exit_net(struct net *net)
@@ -881,7 +1253,7 @@
 	if (err < 0)
 		goto pnet_tap_faied;
 
-	err = gre_cisco_register(&ipgre_protocol);
+	err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
 	if (err < 0) {
 		pr_info("%s: can't add protocol\n", __func__);
 		goto add_proto_failed;
@@ -900,7 +1272,7 @@
 tap_ops_failed:
 	rtnl_link_unregister(&ipgre_link_ops);
 rtnl_link_failed:
-	gre_cisco_unregister(&ipgre_protocol);
+	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
 add_proto_failed:
 	unregister_pernet_device(&ipgre_tap_net_ops);
 pnet_tap_faied:
@@ -912,7 +1284,7 @@
 {
 	rtnl_link_unregister(&ipgre_tap_ops);
 	rtnl_link_unregister(&ipgre_link_ops);
-	gre_cisco_unregister(&ipgre_protocol);
+	gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
 	unregister_pernet_device(&ipgre_tap_net_ops);
 	unregister_pernet_device(&ipgre_net_ops);
 }
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 2db4c87..f4fc8a7 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -146,6 +146,7 @@
 #include <net/xfrm.h>
 #include <linux/mroute.h>
 #include <linux/netlink.h>
+#include <net/dst_metadata.h>
 
 /*
  *	Process Router Attention IP option (RFC 2113)
@@ -331,7 +332,7 @@
 	 *	Initialise the virtual path cache for the packet. It describes
 	 *	how the packet travels inside Linux networking.
 	 */
-	if (!skb_dst(skb)) {
+	if (!skb_valid_dst(skb)) {
 		int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
 					       iph->tos, skb->dev);
 		if (unlikely(err)) {
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 6bf89a6..0138fad 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1542,6 +1542,7 @@
 	struct net *net = sock_net(sk);
 	struct sk_buff *nskb;
 	int err;
+	int oif;
 
 	if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
 		return;
@@ -1559,7 +1560,11 @@
 			daddr = replyopts.opt.opt.faddr;
 	}
 
-	flowi4_init_output(&fl4, arg->bound_dev_if,
+	oif = arg->bound_dev_if;
+	if (!oif && netif_index_is_vrf(net, skb->skb_iif))
+		oif = skb->skb_iif;
+
+	flowi4_init_output(&fl4, oif,
 			   IP4_REPLY_MARK(net, skb->mark),
 			   RT_TOS(arg->tos),
 			   RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 626d9e5..cbb51f3 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -230,10 +230,13 @@
 	if (cand)
 		return cand;
 
+	t = rcu_dereference(itn->collect_md_tun);
+	if (t)
+		return t;
+
 	if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
 		return netdev_priv(itn->fb_tunnel_dev);
 
-
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
@@ -261,11 +264,15 @@
 {
 	struct hlist_head *head = ip_bucket(itn, &t->parms);
 
+	if (t->collect_md)
+		rcu_assign_pointer(itn->collect_md_tun, t);
 	hlist_add_head_rcu(&t->hash_node, head);
 }
 
-static void ip_tunnel_del(struct ip_tunnel *t)
+static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
 {
+	if (t->collect_md)
+		rcu_assign_pointer(itn->collect_md_tun, NULL);
 	hlist_del_init_rcu(&t->hash_node);
 }
 
@@ -419,7 +426,8 @@
 }
 
 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
-		  const struct tnl_ptk_info *tpi, bool log_ecn_error)
+		  const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
+		  bool log_ecn_error)
 {
 	struct pcpu_sw_netstats *tstats;
 	const struct iphdr *iph = ip_hdr(skb);
@@ -478,6 +486,9 @@
 		skb->dev = tunnel->dev;
 	}
 
+	if (tun_dst)
+		skb_dst_set(skb, (struct dst_entry *)tun_dst);
+
 	gro_cells_receive(&tunnel->gro_cells, skb);
 	return 0;
 
@@ -806,7 +817,7 @@
 			     struct ip_tunnel_parm *p,
 			     bool set_mtu)
 {
-	ip_tunnel_del(t);
+	ip_tunnel_del(itn, t);
 	t->parms.iph.saddr = p->iph.saddr;
 	t->parms.iph.daddr = p->iph.daddr;
 	t->parms.i_key = p->i_key;
@@ -967,7 +978,7 @@
 	itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
 
 	if (itn->fb_tunnel_dev != dev) {
-		ip_tunnel_del(netdev_priv(dev));
+		ip_tunnel_del(itn, netdev_priv(dev));
 		unregister_netdevice_queue(dev, head);
 	}
 }
@@ -1072,8 +1083,13 @@
 	nt = netdev_priv(dev);
 	itn = net_generic(net, nt->ip_tnl_net_id);
 
-	if (ip_tunnel_find(itn, p, dev->type))
-		return -EEXIST;
+	if (nt->collect_md) {
+		if (rtnl_dereference(itn->collect_md_tun))
+			return -EEXIST;
+	} else {
+		if (ip_tunnel_find(itn, p, dev->type))
+			return -EEXIST;
+	}
 
 	nt->net = net;
 	nt->parms = *p;
@@ -1089,7 +1105,6 @@
 		dev->mtu = mtu;
 
 	ip_tunnel_add(itn, nt);
-
 out:
 	return err;
 }
@@ -1163,6 +1178,10 @@
 	iph->version		= 4;
 	iph->ihl		= 5;
 
+	if (tunnel->collect_md) {
+		dev->features |= NETIF_F_NETNS_LOCAL;
+		netif_keep_dst(dev);
+	}
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_init);
@@ -1176,7 +1195,7 @@
 	itn = net_generic(net, tunnel->ip_tnl_net_id);
 	/* fb_tunnel_dev will be unregisted in net-exit call. */
 	if (itn->fb_tunnel_dev != dev)
-		ip_tunnel_del(netdev_priv(dev));
+		ip_tunnel_del(itn, netdev_priv(dev));
 
 	ip_tunnel_dst_reset_all(tunnel);
 }
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 6a51a71..29ed6c5 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -32,6 +32,7 @@
 #include <linux/etherdevice.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
+#include <linux/static_key.h>
 
 #include <net/ip.h>
 #include <net/icmp.h>
@@ -190,3 +191,232 @@
 	return tot;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
+
+static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
+	[LWTUNNEL_IP_ID]	= { .type = NLA_U64 },
+	[LWTUNNEL_IP_DST]	= { .type = NLA_U32 },
+	[LWTUNNEL_IP_SRC]	= { .type = NLA_U32 },
+	[LWTUNNEL_IP_TTL]	= { .type = NLA_U8 },
+	[LWTUNNEL_IP_TOS]	= { .type = NLA_U8 },
+	[LWTUNNEL_IP_SPORT]	= { .type = NLA_U16 },
+	[LWTUNNEL_IP_DPORT]	= { .type = NLA_U16 },
+	[LWTUNNEL_IP_FLAGS]	= { .type = NLA_U16 },
+};
+
+static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
+			      unsigned int family, const void *cfg,
+			      struct lwtunnel_state **ts)
+{
+	struct ip_tunnel_info *tun_info;
+	struct lwtunnel_state *new_state;
+	struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
+	int err;
+
+	err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy);
+	if (err < 0)
+		return err;
+
+	new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+	if (!new_state)
+		return -ENOMEM;
+
+	new_state->type = LWTUNNEL_ENCAP_IP;
+
+	tun_info = lwt_tun_info(new_state);
+
+	if (tb[LWTUNNEL_IP_ID])
+		tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP_ID]);
+
+	if (tb[LWTUNNEL_IP_DST])
+		tun_info->key.u.ipv4.dst = nla_get_be32(tb[LWTUNNEL_IP_DST]);
+
+	if (tb[LWTUNNEL_IP_SRC])
+		tun_info->key.u.ipv4.src = nla_get_be32(tb[LWTUNNEL_IP_SRC]);
+
+	if (tb[LWTUNNEL_IP_TTL])
+		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);
+
+	if (tb[LWTUNNEL_IP_TOS])
+		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);
+
+	if (tb[LWTUNNEL_IP_SPORT])
+		tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP_SPORT]);
+
+	if (tb[LWTUNNEL_IP_DPORT])
+		tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP_DPORT]);
+
+	if (tb[LWTUNNEL_IP_FLAGS])
+		tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP_FLAGS]);
+
+	tun_info->mode = IP_TUNNEL_INFO_TX;
+	tun_info->options_len = 0;
+
+	*ts = new_state;
+
+	return 0;
+}
+
+static int ip_tun_fill_encap_info(struct sk_buff *skb,
+				  struct lwtunnel_state *lwtstate)
+{
+	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+	if (nla_put_u64(skb, LWTUNNEL_IP_ID, tun_info->key.tun_id) ||
+	    nla_put_be32(skb, LWTUNNEL_IP_DST, tun_info->key.u.ipv4.dst) ||
+	    nla_put_be32(skb, LWTUNNEL_IP_SRC, tun_info->key.u.ipv4.src) ||
+	    nla_put_u8(skb, LWTUNNEL_IP_TOS, tun_info->key.tos) ||
+	    nla_put_u8(skb, LWTUNNEL_IP_TTL, tun_info->key.ttl) ||
+	    nla_put_u16(skb, LWTUNNEL_IP_SPORT, tun_info->key.tp_src) ||
+	    nla_put_u16(skb, LWTUNNEL_IP_DPORT, tun_info->key.tp_dst) ||
+	    nla_put_u16(skb, LWTUNNEL_IP_FLAGS, tun_info->key.tun_flags))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+	return nla_total_size(8)	/* LWTUNNEL_IP_ID */
+		+ nla_total_size(4)	/* LWTUNNEL_IP_DST */
+		+ nla_total_size(4)	/* LWTUNNEL_IP_SRC */
+		+ nla_total_size(1)	/* LWTUNNEL_IP_TOS */
+		+ nla_total_size(1)	/* LWTUNNEL_IP_TTL */
+		+ nla_total_size(2)	/* LWTUNNEL_IP_SPORT */
+		+ nla_total_size(2)	/* LWTUNNEL_IP_DPORT */
+		+ nla_total_size(2);	/* LWTUNNEL_IP_FLAGS */
+}
+
+static int ip_tun_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+	return memcmp(lwt_tun_info(a), lwt_tun_info(b),
+		      sizeof(struct ip_tunnel_info));
+}
+
+static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
+	.build_state = ip_tun_build_state,
+	.fill_encap = ip_tun_fill_encap_info,
+	.get_encap_size = ip_tun_encap_nlsize,
+	.cmp_encap = ip_tun_cmp_encap,
+};
+
+static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
+	[LWTUNNEL_IP6_ID]		= { .type = NLA_U64 },
+	[LWTUNNEL_IP6_DST]		= { .len = sizeof(struct in6_addr) },
+	[LWTUNNEL_IP6_SRC]		= { .len = sizeof(struct in6_addr) },
+	[LWTUNNEL_IP6_HOPLIMIT]		= { .type = NLA_U8 },
+	[LWTUNNEL_IP6_TC]		= { .type = NLA_U8 },
+	[LWTUNNEL_IP6_SPORT]		= { .type = NLA_U16 },
+	[LWTUNNEL_IP6_DPORT]		= { .type = NLA_U16 },
+	[LWTUNNEL_IP6_FLAGS]		= { .type = NLA_U16 },
+};
+
+static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
+			       unsigned int family, const void *cfg,
+			       struct lwtunnel_state **ts)
+{
+	struct ip_tunnel_info *tun_info;
+	struct lwtunnel_state *new_state;
+	struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
+	int err;
+
+	err = nla_parse_nested(tb, LWTUNNEL_IP6_MAX, attr, ip6_tun_policy);
+	if (err < 0)
+		return err;
+
+	new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+	if (!new_state)
+		return -ENOMEM;
+
+	new_state->type = LWTUNNEL_ENCAP_IP6;
+
+	tun_info = lwt_tun_info(new_state);
+
+	if (tb[LWTUNNEL_IP6_ID])
+		tun_info->key.tun_id = nla_get_u64(tb[LWTUNNEL_IP6_ID]);
+
+	if (tb[LWTUNNEL_IP6_DST])
+		tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);
+
+	if (tb[LWTUNNEL_IP6_SRC])
+		tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);
+
+	if (tb[LWTUNNEL_IP6_HOPLIMIT])
+		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);
+
+	if (tb[LWTUNNEL_IP6_TC])
+		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);
+
+	if (tb[LWTUNNEL_IP6_SPORT])
+		tun_info->key.tp_src = nla_get_be16(tb[LWTUNNEL_IP6_SPORT]);
+
+	if (tb[LWTUNNEL_IP6_DPORT])
+		tun_info->key.tp_dst = nla_get_be16(tb[LWTUNNEL_IP6_DPORT]);
+
+	if (tb[LWTUNNEL_IP6_FLAGS])
+		tun_info->key.tun_flags = nla_get_u16(tb[LWTUNNEL_IP6_FLAGS]);
+
+	tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
+	tun_info->options_len = 0;
+
+	*ts = new_state;
+
+	return 0;
+}
+
+static int ip6_tun_fill_encap_info(struct sk_buff *skb,
+				   struct lwtunnel_state *lwtstate)
+{
+	struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+	if (nla_put_u64(skb, LWTUNNEL_IP6_ID, tun_info->key.tun_id) ||
+	    nla_put_in6_addr(skb, LWTUNNEL_IP6_DST, &tun_info->key.u.ipv6.dst) ||
+	    nla_put_in6_addr(skb, LWTUNNEL_IP6_SRC, &tun_info->key.u.ipv6.src) ||
+	    nla_put_u8(skb, LWTUNNEL_IP6_HOPLIMIT, tun_info->key.tos) ||
+	    nla_put_u8(skb, LWTUNNEL_IP6_TC, tun_info->key.ttl) ||
+	    nla_put_u16(skb, LWTUNNEL_IP6_SPORT, tun_info->key.tp_src) ||
+	    nla_put_u16(skb, LWTUNNEL_IP6_DPORT, tun_info->key.tp_dst) ||
+	    nla_put_u16(skb, LWTUNNEL_IP6_FLAGS, tun_info->key.tun_flags))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int ip6_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+	return nla_total_size(8)	/* LWTUNNEL_IP6_ID */
+		+ nla_total_size(16)	/* LWTUNNEL_IP6_DST */
+		+ nla_total_size(16)	/* LWTUNNEL_IP6_SRC */
+		+ nla_total_size(1)	/* LWTUNNEL_IP6_HOPLIMIT */
+		+ nla_total_size(1)	/* LWTUNNEL_IP6_TC */
+		+ nla_total_size(2)	/* LWTUNNEL_IP6_SPORT */
+		+ nla_total_size(2)	/* LWTUNNEL_IP6_DPORT */
+		+ nla_total_size(2);	/* LWTUNNEL_IP6_FLAGS */
+}
+
+static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
+	.build_state = ip6_tun_build_state,
+	.fill_encap = ip6_tun_fill_encap_info,
+	.get_encap_size = ip6_tun_encap_nlsize,
+	.cmp_encap = ip_tun_cmp_encap,
+};
+
+void __init ip_tunnel_core_init(void)
+{
+	lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
+	lwtunnel_encap_add_ops(&ip6_tun_lwt_ops, LWTUNNEL_ENCAP_IP6);
+}
+
+struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL(ip_tunnel_metadata_cnt);
+
+void ip_tunnel_need_metadata(void)
+{
+	static_key_slow_inc(&ip_tunnel_metadata_cnt);
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata);
+
+void ip_tunnel_unneed_metadata(void)
+{
+	static_key_slow_dec(&ip_tunnel_metadata_cnt);
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 8e7328c..ed4ef09 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -94,7 +94,7 @@
 /* Define the timeout for waiting for a DHCP/BOOTP/RARP reply */
 #define CONF_OPEN_RETRIES 	2	/* (Re)open devices twice */
 #define CONF_SEND_RETRIES 	6	/* Send six requests per open */
-#define CONF_INTER_TIMEOUT	(HZ/2)	/* Inter-device timeout: 1/2 second */
+#define CONF_INTER_TIMEOUT	(HZ)	/* Inter-device timeout: 1 second */
 #define CONF_BASE_TIMEOUT	(HZ*2)	/* Initial timeout: 2 seconds */
 #define CONF_TIMEOUT_RANDOM	(HZ)	/* Maximum amount of randomization */
 #define CONF_TIMEOUT_MULT	*7/4	/* Rate of timeout growth */
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 254238d..f34c31d 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -198,7 +198,7 @@
 			goto drop;
 		if (iptunnel_pull_header(skb, 0, tpi.proto))
 			goto drop;
-		return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
+		return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error);
 	}
 
 	return -1;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 2199a5d..690d27d 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -58,6 +58,12 @@
 	default NFT_REJECT
 	tristate
 
+config NFT_DUP_IPV4
+	tristate "IPv4 nf_tables packet duplication support"
+	select NF_DUP_IPV4
+	help
+	  This module enables IPv4 packet duplication support for nf_tables.
+
 endif # NF_TABLES_IPV4
 
 config NF_TABLES_ARP
@@ -67,6 +73,12 @@
 
 endif # NF_TABLES
 
+config NF_DUP_IPV4
+	tristate "Netfilter IPv4 packet duplication to alternate destination"
+	help
+	  This option enables the nf_dup_ipv4 core, which duplicates an IPv4
+	  packet to be rerouted to another destination.
+
 config NF_LOG_ARP
 	tristate "ARP packet logging"
 	default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 7fe6c70..87b073d 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -41,6 +41,7 @@
 obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
 obj-$(CONFIG_NFT_MASQ_IPV4) += nft_masq_ipv4.o
 obj-$(CONFIG_NFT_REDIR_IPV4) += nft_redir_ipv4.o
+obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
 obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
 
 # generic IP tables 
@@ -70,3 +71,5 @@
 
 # just filtering instance of ARP tables for now
 obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
+
+obj-$(CONFIG_NF_DUP_IPV4) += nf_dup_ipv4.o
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 92305a1..8f87fc3 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -240,7 +240,7 @@
 	return (struct arpt_entry *)(base + offset);
 }
 
-static inline __pure
+static inline
 struct arpt_entry *arpt_next_entry(const struct arpt_entry *entry)
 {
 	return (void *)entry + entry->next_offset;
@@ -280,6 +280,9 @@
 	table_base = private->entries;
 	jumpstack  = (struct arpt_entry **)private->jumpstack[cpu];
 
+	/* No TEE support for arptables, so no need to switch to alternate
+	 * stack.  All targets that reenter must return absolute verdicts.
+	 */
 	e = get_entry(table_base, private->hook_entry[hook]);
 
 	acpar.in      = state->in;
@@ -325,11 +328,6 @@
 			}
 			if (table_base + v
 			    != arpt_next_entry(e)) {
-
-				if (stackidx >= private->stacksize) {
-					verdict = NF_DROP;
-					break;
-				}
 				jumpstack[stackidx++] = e;
 			}
 
@@ -337,9 +335,6 @@
 			continue;
 		}
 
-		/* Targets which reenter must return
-		 * abs. verdicts
-		 */
 		acpar.target   = t->u.kernel.target;
 		acpar.targinfo = t->data;
 		verdict = t->u.kernel.target->target(skb, &acpar);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 6c72fbb..b0a86e7 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -276,7 +276,7 @@
 }
 #endif
 
-static inline __pure
+static inline
 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
 {
 	return (void *)entry + entry->next_offset;
@@ -296,12 +296,13 @@
 	const char *indev, *outdev;
 	const void *table_base;
 	struct ipt_entry *e, **jumpstack;
-	unsigned int *stackptr, origptr, cpu;
+	unsigned int stackidx, cpu;
 	const struct xt_table_info *private;
 	struct xt_action_param acpar;
 	unsigned int addend;
 
 	/* Initialization */
+	stackidx = 0;
 	ip = ip_hdr(skb);
 	indev = state->in ? state->in->name : nulldevname;
 	outdev = state->out ? state->out->name : nulldevname;
@@ -331,13 +332,21 @@
 	smp_read_barrier_depends();
 	table_base = private->entries;
 	jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
-	stackptr   = per_cpu_ptr(private->stackptr, cpu);
-	origptr    = *stackptr;
+
+	/* Switch to alternate jumpstack if we're being invoked via TEE.
+	 * TEE issues XT_CONTINUE verdict on original skb so we must not
+	 * clobber the jumpstack.
+	 *
+	 * For recursion via REJECT or SYNPROXY the stack will be clobbered
+	 * but it is no problem since absolute verdict is issued by these.
+	 */
+	if (static_key_false(&xt_tee_enabled))
+		jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
 
 	e = get_entry(table_base, private->hook_entry[hook]);
 
-	pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
-		 table->name, hook, origptr,
+	pr_debug("Entering %s(hook %u), UF %p\n",
+		 table->name, hook,
 		 get_entry(table_base, private->underflow[hook]));
 
 	do {
@@ -383,28 +392,24 @@
 					verdict = (unsigned int)(-v) - 1;
 					break;
 				}
-				if (*stackptr <= origptr) {
+				if (stackidx == 0) {
 					e = get_entry(table_base,
 					    private->underflow[hook]);
 					pr_debug("Underflow (this is normal) "
 						 "to %p\n", e);
 				} else {
-					e = jumpstack[--*stackptr];
+					e = jumpstack[--stackidx];
 					pr_debug("Pulled %p out from pos %u\n",
-						 e, *stackptr);
+						 e, stackidx);
 					e = ipt_next_entry(e);
 				}
 				continue;
 			}
 			if (table_base + v != ipt_next_entry(e) &&
 			    !(e->ip.flags & IPT_F_GOTO)) {
-				if (*stackptr >= private->stacksize) {
-					verdict = NF_DROP;
-					break;
-				}
-				jumpstack[(*stackptr)++] = e;
+				jumpstack[stackidx++] = e;
 				pr_debug("Pushed %p into pos %u\n",
-					 e, *stackptr - 1);
+					 e, stackidx - 1);
 			}
 
 			e = get_entry(table_base, v);
@@ -423,9 +428,8 @@
 			/* Verdict */
 			break;
 	} while (!acpar.hotdrop);
-	pr_debug("Exiting %s; resetting sp from %u to %u\n",
-		 __func__, *stackptr, origptr);
-	*stackptr = origptr;
+	pr_debug("Exiting %s; sp at %u\n", __func__, stackidx);
+
  	xt_write_recseq_end(addend);
  	local_bh_enable();
 
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index 4bf3dc4..2707652 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -72,7 +72,7 @@
 		tcph->cwr = einfo->proto.tcp.cwr;
 
 	inet_proto_csum_replace2(&tcph->check, skb,
-				 oldval, ((__be16 *)tcph)[6], 0);
+				 oldval, ((__be16 *)tcph)[6], false);
 	return true;
 }
 
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 30ad955..8a2caaf 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -280,7 +280,7 @@
 		return -EINVAL;
 	}
 
-	h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
+	h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
 	if (h) {
 		struct sockaddr_in sin;
 		struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 80d5554..cdde3ec 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -134,9 +134,11 @@
 	struct nf_conntrack_tuple innertuple, origtuple;
 	const struct nf_conntrack_l4proto *innerproto;
 	const struct nf_conntrack_tuple_hash *h;
-	u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+	const struct nf_conntrack_zone *zone;
+	struct nf_conntrack_zone tmp;
 
 	NF_CT_ASSERT(skb->nfct == NULL);
+	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
 
 	/* Are they talking about one of our connections? */
 	if (!nf_ct_get_tuplepr(skb,
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index c88b7d4..9306ec4 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -43,22 +43,22 @@
 static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
 					      struct sk_buff *skb)
 {
-	u16 zone = NF_CT_DEFAULT_ZONE;
-
+	u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-	if (skb->nfct)
-		zone = nf_ct_zone((struct nf_conn *)skb->nfct);
-#endif
+	if (skb->nfct) {
+		enum ip_conntrack_info ctinfo;
+		const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-	if (skb->nf_bridge &&
-	    skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
-		return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+		zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
+	}
 #endif
+	if (nf_bridge_in_prerouting(skb))
+		return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
+
 	if (hooknum == NF_INET_PRE_ROUTING)
-		return IP_DEFRAG_CONNTRACK_IN + zone;
+		return IP_DEFRAG_CONNTRACK_IN + zone_id;
 	else
-		return IP_DEFRAG_CONNTRACK_OUT + zone;
+		return IP_DEFRAG_CONNTRACK_OUT + zone_id;
 }
 
 static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
diff --git a/net/ipv4/netfilter/nf_dup_ipv4.c b/net/ipv4/netfilter/nf_dup_ipv4.c
new file mode 100644
index 0000000..2d79e6e
--- /dev/null
+++ b/net/ipv4/netfilter/nf_dup_ipv4.c
@@ -0,0 +1,121 @@
+/*
+ * (C) 2007 by Sebastian Claßen <sebastian.classen@freenet.ag>
+ * (C) 2007-2010 by Jan Engelhardt <jengelh@medozas.de>
+ *
+ * Extracted from xt_TEE.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 or later, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/route.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#include <net/checksum.h>
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+	const struct dst_entry *dst;
+
+	if (skb->dev != NULL)
+		return dev_net(skb->dev);
+	dst = skb_dst(skb);
+	if (dst != NULL && dst->dev != NULL)
+		return dev_net(dst->dev);
+#endif
+	return &init_net;
+}
+
+static bool nf_dup_ipv4_route(struct sk_buff *skb, const struct in_addr *gw,
+			      int oif)
+{
+	const struct iphdr *iph = ip_hdr(skb);
+	struct net *net = pick_net(skb);
+	struct rtable *rt;
+	struct flowi4 fl4;
+
+	memset(&fl4, 0, sizeof(fl4));
+	if (oif != -1)
+		fl4.flowi4_oif = oif;
+
+	fl4.daddr = gw->s_addr;
+	fl4.flowi4_tos = RT_TOS(iph->tos);
+	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+	fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
+	rt = ip_route_output_key(net, &fl4);
+	if (IS_ERR(rt))
+		return false;
+
+	skb_dst_drop(skb);
+	skb_dst_set(skb, &rt->dst);
+	skb->dev      = rt->dst.dev;
+	skb->protocol = htons(ETH_P_IP);
+
+	return true;
+}
+
+void nf_dup_ipv4(struct sk_buff *skb, unsigned int hooknum,
+		 const struct in_addr *gw, int oif)
+{
+	struct iphdr *iph;
+
+	if (this_cpu_read(nf_skb_duplicated))
+		return;
+	/*
+	 * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
+	 * the original skb, which should continue on its way as if nothing has
+	 * happened. The copy should be independently delivered to the gateway.
+	 */
+	skb = pskb_copy(skb, GFP_ATOMIC);
+	if (skb == NULL)
+		return;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+	/* Avoid counting cloned packets towards the original connection. */
+	nf_conntrack_put(skb->nfct);
+	skb->nfct     = &nf_ct_untracked_get()->ct_general;
+	skb->nfctinfo = IP_CT_NEW;
+	nf_conntrack_get(skb->nfct);
+#endif
+	/*
+	 * If we are in PREROUTING/INPUT, the checksum must be recalculated
+	 * since the length could have changed as a result of defragmentation.
+	 *
+	 * We also decrease the TTL to mitigate potential loops between two
+	 * hosts.
+	 *
+	 * Set %IP_DF so that the original source is notified of a potentially
+	 * decreased MTU on the clone route. IPv6 does this too.
+	 */
+	iph = ip_hdr(skb);
+	iph->frag_off |= htons(IP_DF);
+	if (hooknum == NF_INET_PRE_ROUTING ||
+	    hooknum == NF_INET_LOCAL_IN)
+		--iph->ttl;
+	ip_send_check(iph);
+
+	if (nf_dup_ipv4_route(skb, gw, oif)) {
+		__this_cpu_write(nf_skb_duplicated, true);
+		ip_local_out(skb);
+		__this_cpu_write(nf_skb_duplicated, false);
+	} else {
+		kfree_skb(skb);
+	}
+}
+EXPORT_SYMBOL_GPL(nf_dup_ipv4);
+
+MODULE_AUTHOR("Sebastian Claßen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("nf_dup_ipv4: Duplicate IPv4 packet");
+MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index e59cc05..22f4579 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -120,7 +120,7 @@
 		oldip = iph->daddr;
 		newip = t->dst.u3.ip;
 	}
-	inet_proto_csum_replace4(check, skb, oldip, newip, 1);
+	inet_proto_csum_replace4(check, skb, oldip, newip, true);
 }
 
 static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
@@ -151,7 +151,7 @@
 		}
 	} else
 		inet_proto_csum_replace2(check, skb,
-					 htons(oldlen), htons(datalen), 1);
+					 htons(oldlen), htons(datalen), true);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index 4557b4a..7b98baa 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -67,7 +67,7 @@
 
 	hdr = (struct icmphdr *)(skb->data + hdroff);
 	inet_proto_csum_replace2(&hdr->checksum, skb,
-				 hdr->un.echo.id, tuple->src.u.icmp.id, 0);
+				 hdr->un.echo.id, tuple->src.u.icmp.id, false);
 	hdr->un.echo.id = tuple->src.u.icmp.id;
 	return true;
 }
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
new file mode 100644
index 0000000..b45932d
--- /dev/null
+++ b/net/ipv4/netfilter/nft_dup_ipv4.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+
+struct nft_dup_ipv4 {
+	enum nft_registers	sreg_addr:8;
+	enum nft_registers	sreg_dev:8;
+};
+
+static void nft_dup_ipv4_eval(const struct nft_expr *expr,
+			      struct nft_regs *regs,
+			      const struct nft_pktinfo *pkt)
+{
+	struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+	struct in_addr gw = {
+		.s_addr = (__force __be32)regs->data[priv->sreg_addr],
+	};
+	int oif = regs->data[priv->sreg_dev];
+
+	nf_dup_ipv4(pkt->skb, pkt->ops->hooknum, &gw, oif);
+}
+
+static int nft_dup_ipv4_init(const struct nft_ctx *ctx,
+			     const struct nft_expr *expr,
+			     const struct nlattr * const tb[])
+{
+	struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+	int err;
+
+	if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+		return -EINVAL;
+
+	priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+	err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in_addr));
+	if (err < 0)
+		return err;
+
+	if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+		priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+		return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+	}
+	return 0;
+}
+
+static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	struct nft_dup_ipv4 *priv = nft_expr_priv(expr);
+
+	if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+	    nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_dup_ipv4_type;
+static const struct nft_expr_ops nft_dup_ipv4_ops = {
+	.type		= &nft_dup_ipv4_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_dup_ipv4)),
+	.eval		= nft_dup_ipv4_eval,
+	.init		= nft_dup_ipv4_init,
+	.dump		= nft_dup_ipv4_dump,
+};
+
+static const struct nla_policy nft_dup_ipv4_policy[NFTA_DUP_MAX + 1] = {
+	[NFTA_DUP_SREG_ADDR]	= { .type = NLA_U32 },
+	[NFTA_DUP_SREG_DEV]	= { .type = NLA_U32 },
+};
+
+static struct nft_expr_type nft_dup_ipv4_type __read_mostly = {
+	.family		= NFPROTO_IPV4,
+	.name		= "dup",
+	.ops		= &nft_dup_ipv4_ops,
+	.policy		= nft_dup_ipv4_policy,
+	.maxattr	= NFTA_DUP_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_dup_ipv4_module_init(void)
+{
+	return nft_register_expr(&nft_dup_ipv4_type);
+}
+
+static void __exit nft_dup_ipv4_module_exit(void)
+{
+	nft_unregister_expr(&nft_dup_ipv4_type);
+}
+
+module_init(nft_dup_ipv4_module_init);
+module_exit(nft_dup_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "dup");
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 05ff44b..e89094a 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -363,7 +363,8 @@
 						    scoped);
 		rcu_read_unlock();
 
-		if (!(isk->freebind || isk->transparent || has_addr ||
+		if (!(net->ipv6.sysctl.ip_nonlocal_bind ||
+		      isk->freebind || isk->transparent || has_addr ||
 		      addr_type == IPV6_ADDR_ANY))
 			return -EADDRNOTAVAIL;
 
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index da5d483..3abd9d7 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -300,6 +300,8 @@
 	SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
 	SNMP_MIB_ITEM("TCPWinProbe", LINUX_MIB_TCPWINPROBE),
 	SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
+	SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL),
+	SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS),
 	SNMP_MIB_SENTINEL
 };
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e681b85..5f4a556 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -91,6 +91,7 @@
 #include <linux/slab.h>
 #include <linux/jhash.h>
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
 #include <net/ip.h>
@@ -102,6 +103,7 @@
 #include <net/tcp.h>
 #include <net/icmp.h>
 #include <net/xfrm.h>
+#include <net/lwtunnel.h>
 #include <net/netevent.h>
 #include <net/rtnetlink.h>
 #ifdef CONFIG_SYSCTL
@@ -109,6 +111,8 @@
 #include <linux/kmemleak.h>
 #endif
 #include <net/secure_seq.h>
+#include <net/ip_tunnels.h>
+#include <net/vrf.h>
 
 #define RT_FL_TOS(oldflp4) \
 	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -834,6 +838,7 @@
 	struct inet_peer *peer;
 	struct net *net;
 	int log_martians;
+	int vif;
 
 	rcu_read_lock();
 	in_dev = __in_dev_get_rcu(rt->dst.dev);
@@ -842,10 +847,11 @@
 		return;
 	}
 	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
+	vif = vrf_master_ifindex_rcu(rt->dst.dev);
 	rcu_read_unlock();
 
 	net = dev_net(rt->dst.dev);
-	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
+	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
 	if (!peer) {
 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
 			  rt_nexthop(rt, ip_hdr(skb)->daddr));
@@ -934,7 +940,8 @@
 		break;
 	}
 
-	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
+	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
+			       vrf_master_ifindex(skb->dev), 1);
 
 	send = true;
 	if (peer) {
@@ -1403,6 +1410,7 @@
 #ifdef CONFIG_IP_ROUTE_CLASSID
 		rt->dst.tclassid = nh->nh_tclassid;
 #endif
+		rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
 		if (unlikely(fnhe))
 			cached = rt_bind_exception(rt, fnhe, daddr);
 		else if (!(rt->dst.flags & DST_NOCACHE))
@@ -1546,7 +1554,6 @@
 	struct rtable *rth;
 	int err;
 	struct in_device *out_dev;
-	unsigned int flags = 0;
 	bool do_cache;
 	u32 itag = 0;
 
@@ -1610,7 +1617,7 @@
 	}
 
 	rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
-	rth->rt_flags = flags;
+	rth->rt_flags = 0;
 	rth->rt_type = res->type;
 	rth->rt_is_input = 1;
 	rth->rt_iif 	= 0;
@@ -1624,6 +1631,14 @@
 	rth->dst.output = ip_output;
 
 	rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
+	if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
+		rth->dst.lwtstate->orig_output = rth->dst.output;
+		rth->dst.output = lwtunnel_output;
+	}
+	if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
+		rth->dst.lwtstate->orig_input = rth->dst.input;
+		rth->dst.input = lwtunnel_input;
+	}
 	skb_dst_set(skb, &rth->dst);
 out:
 	err = 0;
@@ -1662,6 +1677,7 @@
 {
 	struct fib_result res;
 	struct in_device *in_dev = __in_dev_get_rcu(dev);
+	struct ip_tunnel_info *tun_info;
 	struct flowi4	fl4;
 	unsigned int	flags = 0;
 	u32		itag = 0;
@@ -1679,6 +1695,13 @@
 	   by fib_lookup.
 	 */
 
+	tun_info = skb_tunnel_info(skb);
+	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
+		fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
+	else
+		fl4.flowi4_tun_key.tun_id = 0;
+	skb_dst_drop(skb);
+
 	if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
 		goto martian_source;
 
@@ -1710,7 +1733,7 @@
 	 *	Now we are ready to route packet.
 	 */
 	fl4.flowi4_oif = 0;
-	fl4.flowi4_iif = dev->ifindex;
+	fl4.flowi4_iif = vrf_master_ifindex_rcu(dev) ? : dev->ifindex;
 	fl4.flowi4_mark = skb->mark;
 	fl4.flowi4_tos = tos;
 	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
@@ -1792,6 +1815,7 @@
 	rth->rt_gateway	= 0;
 	rth->rt_uses_gateway = 0;
 	INIT_LIST_HEAD(&rth->rt_uncached);
+
 	RT_CACHE_STAT_INC(in_slow_tot);
 	if (res.type == RTN_UNREACHABLE) {
 		rth->dst.input= ip_error;
@@ -1981,7 +2005,6 @@
 	rth->rt_gateway = 0;
 	rth->rt_uses_gateway = 0;
 	INIT_LIST_HEAD(&rth->rt_uncached);
-
 	RT_CACHE_STAT_INC(out_slow_tot);
 
 	if (flags & RTCF_LOCAL)
@@ -2004,6 +2027,8 @@
 	}
 
 	rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
+	if (lwtunnel_output_redirect(rth->dst.lwtstate))
+		rth->dst.output = lwtunnel_output;
 
 	return rth;
 }
@@ -2110,6 +2135,11 @@
 				fl4->saddr = inet_select_addr(dev_out, 0,
 							      RT_SCOPE_HOST);
 		}
+		if (netif_is_vrf(dev_out) &&
+		    !(fl4->flowi4_flags & FLOWI_FLAG_VRFSRC)) {
+			rth = vrf_dev_get_rth(dev_out);
+			goto out;
+		}
 	}
 
 	if (!fl4->daddr) {
@@ -2261,7 +2291,6 @@
 		rt->rt_uses_gateway = ort->rt_uses_gateway;
 
 		INIT_LIST_HEAD(&rt->rt_uncached);
-
 		dst_free(new);
 	}
 
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 0330ab2..894da3a 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -29,6 +29,7 @@
 static int zero;
 static int one = 1;
 static int four = 4;
+static int thousand = 1000;
 static int gso_max_segs = GSO_MAX_SEGS;
 static int tcp_retr1_max = 255;
 static int ip_local_port_range_min[] = { 1, 1 };
@@ -712,6 +713,24 @@
 		.extra2		= &gso_max_segs,
 	},
 	{
+		.procname	= "tcp_pacing_ss_ratio",
+		.data		= &sysctl_tcp_pacing_ss_ratio,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &thousand,
+	},
+	{
+		.procname	= "tcp_pacing_ca_ratio",
+		.data		= &sysctl_tcp_pacing_ca_ratio,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &thousand,
+	},
+	{
 		.procname	= "tcp_autocorking",
 		.data		= &sysctl_tcp_autocorking,
 		.maxlen		= sizeof(int),
@@ -910,6 +929,13 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+	{
+		.procname	= "igmp_link_local_mcast_reports",
+		.data		= &sysctl_igmp_llm_reports,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
 	{ }
 };
 
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 45534a5..b8b8fa1 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -627,6 +627,8 @@
 	sk_mem_charge(sk, skb->truesize);
 	if (tp->nonagle & TCP_NAGLE_PUSH)
 		tp->nonagle &= ~TCP_NAGLE_PUSH;
+
+	tcp_slow_start_after_idle_check(sk);
 }
 
 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index c037644..fd1405d 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -146,7 +146,7 @@
 	if (!tcp_is_cwnd_limited(sk))
 		return;
 
-	if (tp->snd_cwnd <= tp->snd_ssthresh)
+	if (tcp_in_slow_start(tp))
 		tcp_slow_start(tp, acked);
 	else {
 		bictcp_update(ca, tp->snd_cwnd);
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 8c6fd3d..167b6a3 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -264,7 +264,7 @@
 	u32 prior_snd_cwnd;
 	u32 incr;
 
-	if (tp->snd_cwnd < tp->snd_ssthresh && hystart_detect)
+	if (tcp_in_slow_start(tp) && hystart_detect)
 		tcp_cdg_hystart_update(sk);
 
 	if (after(ack, ca->rtt_seq) && ca->rtt.v64) {
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 84be008..93c4dc3 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -114,16 +114,19 @@
 }
 EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
 
-u32 tcp_ca_get_key_by_name(const char *name)
+u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca)
 {
 	const struct tcp_congestion_ops *ca;
-	u32 key;
+	u32 key = TCP_CA_UNSPEC;
 
 	might_sleep();
 
 	rcu_read_lock();
 	ca = __tcp_ca_find_autoload(name);
-	key = ca ? ca->key : TCP_CA_UNSPEC;
+	if (ca) {
+		key = ca->key;
+		*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
+	}
 	rcu_read_unlock();
 
 	return key;
@@ -365,10 +368,8 @@
  */
 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
 {
-	u32 cwnd = tp->snd_cwnd + acked;
+	u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
 
-	if (cwnd > tp->snd_ssthresh)
-		cwnd = tp->snd_ssthresh + 1;
 	acked -= cwnd - tp->snd_cwnd;
 	tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
 
@@ -413,7 +414,7 @@
 		return;
 
 	/* In "safe" area, increase. */
-	if (tp->snd_cwnd <= tp->snd_ssthresh) {
+	if (tcp_in_slow_start(tp)) {
 		acked = tcp_slow_start(tp, acked);
 		if (!acked)
 			return;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 06d3d66..28011fb 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -320,7 +320,7 @@
 	if (!tcp_is_cwnd_limited(sk))
 		return;
 
-	if (tp->snd_cwnd <= tp->snd_ssthresh) {
+	if (tcp_in_slow_start(tp)) {
 		if (hystart && after(ack, ca->end_seq))
 			bictcp_hystart_reset(sk);
 		acked = tcp_slow_start(tp, acked);
@@ -439,7 +439,7 @@
 		ca->delay_min = delay;
 
 	/* hystart triggers when cwnd is larger than some threshold */
-	if (hystart && tp->snd_cwnd <= tp->snd_ssthresh &&
+	if (hystart && tcp_in_slow_start(tp) &&
 	    tp->snd_cwnd >= hystart_low_window)
 		hystart_update(sk, delay);
 }
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 882c08a..db78424 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -116,7 +116,7 @@
 	if (!tcp_is_cwnd_limited(sk))
 		return;
 
-	if (tp->snd_cwnd <= tp->snd_ssthresh)
+	if (tcp_in_slow_start(tp))
 		tcp_slow_start(tp, acked);
 	else {
 		/* Update AIMD parameters.
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 58469ff..82f0d9e 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -236,7 +236,7 @@
 	if (!tcp_is_cwnd_limited(sk))
 		return;
 
-	if (tp->snd_cwnd <= tp->snd_ssthresh)
+	if (tcp_in_slow_start(tp))
 		tcp_slow_start(tp, acked);
 	else {
 		/* In dangerous area, increase slowly.
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index f963b27..083831e 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -112,7 +112,7 @@
 
 	rho_fractions = ca->rho_3ls - (ca->rho << 3);
 
-	if (tp->snd_cwnd < tp->snd_ssthresh) {
+	if (tcp_in_slow_start(tp)) {
 		/*
 		 * slow start
 		 *      INC = 2^RHO - 1
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index f71002e..2ab9bbb 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -268,7 +268,7 @@
 		return;
 
 	/* In slow start */
-	if (tp->snd_cwnd <= tp->snd_ssthresh)
+	if (tcp_in_slow_start(tp))
 		tcp_slow_start(tp, acked);
 
 	else {
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 728f5b3..a8f515b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -109,6 +109,7 @@
 #define FLAG_SYN_ACKED		0x10 /* This ACK acknowledged SYN.		*/
 #define FLAG_DATA_SACKED	0x20 /* New SACK.				*/
 #define FLAG_ECE		0x40 /* ECE in this ACK				*/
+#define FLAG_LOST_RETRANS	0x80 /* This ACK marks some retransmission lost */
 #define FLAG_SLOWPATH		0x100 /* Do not skip RFC checks for window update.*/
 #define FLAG_ORIG_SACK_ACKED	0x200 /* Never retransmitted data are (s)acked	*/
 #define FLAG_SND_UNA_ADVANCED	0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
@@ -196,11 +197,13 @@
  * and the session is not interactive.
  */
 
-static inline bool tcp_in_quickack_mode(const struct sock *sk)
+static bool tcp_in_quickack_mode(struct sock *sk)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
+	const struct dst_entry *dst = __sk_dst_get(sk);
 
-	return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
+	return (dst && dst_metric(dst, RTAX_QUICKACK)) ||
+		(icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong);
 }
 
 static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
@@ -750,13 +753,29 @@
  * TCP pacing, to smooth the burst on large writes when packets
  * in flight is significantly lower than cwnd (or rwin)
  */
+int sysctl_tcp_pacing_ss_ratio __read_mostly = 200;
+int sysctl_tcp_pacing_ca_ratio __read_mostly = 120;
+
 static void tcp_update_pacing_rate(struct sock *sk)
 {
 	const struct tcp_sock *tp = tcp_sk(sk);
 	u64 rate;
 
 	/* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
-	rate = (u64)tp->mss_cache * 2 * (USEC_PER_SEC << 3);
+	rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3);
+
+	/* current rate is (cwnd * mss) / srtt
+	 * In Slow Start [1], set sk_pacing_rate to 200 % the current rate.
+	 * In Congestion Avoidance phase, set it to 120 % the current rate.
+	 *
+	 * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh)
+	 *	 If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
+	 *	 end of slow start and should slow down.
+	 */
+	if (tp->snd_cwnd < tp->snd_ssthresh / 2)
+		rate *= sysctl_tcp_pacing_ss_ratio;
+	else
+		rate *= sysctl_tcp_pacing_ca_ratio;
 
 	rate *= max(tp->snd_cwnd, tp->packets_out);
 
@@ -1037,7 +1056,7 @@
  * highest SACK block). Also calculate the lowest snd_nxt among the remaining
  * retransmitted skbs to avoid some costly processing per ACKs.
  */
-static void tcp_mark_lost_retrans(struct sock *sk)
+static void tcp_mark_lost_retrans(struct sock *sk, int *flag)
 {
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
@@ -1078,7 +1097,7 @@
 		if (after(received_upto, ack_seq)) {
 			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
 			tp->retrans_out -= tcp_skb_pcount(skb);
-
+			*flag |= FLAG_LOST_RETRANS;
 			tcp_skb_mark_lost_uncond_verify(tp, skb);
 			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
 		} else {
@@ -1818,7 +1837,7 @@
 	    ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
 		tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
 
-	tcp_mark_lost_retrans(sk);
+	tcp_mark_lost_retrans(sk, &state->flag);
 	tcp_verify_left_out(tp);
 out:
 
@@ -2474,15 +2493,14 @@
 	return false;
 }
 
-/* The cwnd reduction in CWR and Recovery use the PRR algorithm
- * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/
+/* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
  * It computes the number of packets to send (sndcnt) based on packets newly
  * delivered:
  *   1) If the packets in flight is larger than ssthresh, PRR spreads the
  *	cwnd reductions across a full RTT.
- *   2) If packets in flight is lower than ssthresh (such as due to excess
- *	losses and/or application stalls), do not perform any further cwnd
- *	reductions, but instead slow start up to ssthresh.
+ *   2) Otherwise PRR uses packet conservation to send as much as delivered.
+ *      But when the retransmits are acked without further losses, PRR
+ *      slow starts cwnd up to ssthresh to speed up the recovery.
  */
 static void tcp_init_cwnd_reduction(struct sock *sk)
 {
@@ -2499,7 +2517,7 @@
 }
 
 static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
-			       int fast_rexmit)
+			       int fast_rexmit, int flag)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	int sndcnt = 0;
@@ -2508,16 +2526,18 @@
 				 (tp->packets_out - tp->sacked_out);
 
 	tp->prr_delivered += newly_acked_sacked;
-	if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
+	if (delta < 0) {
 		u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
 			       tp->prior_cwnd - 1;
 		sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
-	} else {
+	} else if ((flag & FLAG_RETRANS_DATA_ACKED) &&
+		   !(flag & FLAG_LOST_RETRANS)) {
 		sndcnt = min_t(int, delta,
 			       max_t(int, tp->prr_delivered - tp->prr_out,
 				     newly_acked_sacked) + 1);
+	} else {
+		sndcnt = min(delta, newly_acked_sacked);
 	}
-
 	sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
 	tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
 }
@@ -2578,7 +2598,7 @@
 	if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
 		tcp_try_keep_open(sk);
 	} else {
-		tcp_cwnd_reduction(sk, prior_unsacked, 0);
+		tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
 	}
 }
 
@@ -2588,6 +2608,7 @@
 
 	icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
 	icsk->icsk_mtup.probe_size = 0;
+	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
 }
 
 static void tcp_mtup_probe_success(struct sock *sk)
@@ -2607,6 +2628,7 @@
 	icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
 	icsk->icsk_mtup.probe_size = 0;
 	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
+	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
 }
 
 /* Do a simple retransmit without using the backoff mechanisms in
@@ -2675,7 +2697,7 @@
 	tp->prior_ssthresh = 0;
 	tcp_init_undo(tp);
 
-	if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+	if (!tcp_in_cwnd_reduction(sk)) {
 		if (!ece_ack)
 			tp->prior_ssthresh = tcp_current_ssthresh(sk);
 		tcp_init_cwnd_reduction(sk);
@@ -2735,7 +2757,7 @@
 
 /* Undo during fast recovery after partial ACK. */
 static bool tcp_try_undo_partial(struct sock *sk, const int acked,
-				 const int prior_unsacked)
+				 const int prior_unsacked, int flag)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2751,7 +2773,7 @@
 		 * mark more packets lost or retransmit more.
 		 */
 		if (tp->retrans_out) {
-			tcp_cwnd_reduction(sk, prior_unsacked, 0);
+			tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
 			return true;
 		}
 
@@ -2838,7 +2860,7 @@
 			if (tcp_is_reno(tp) && is_dupack)
 				tcp_add_reno_sack(sk);
 		} else {
-			if (tcp_try_undo_partial(sk, acked, prior_unsacked))
+			if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag))
 				return;
 			/* Partial ACK arrived. Force fast retransmit. */
 			do_lost = tcp_is_reno(tp) ||
@@ -2851,9 +2873,10 @@
 		break;
 	case TCP_CA_Loss:
 		tcp_process_loss(sk, flag, is_dupack);
-		if (icsk->icsk_ca_state != TCP_CA_Open)
+		if (icsk->icsk_ca_state != TCP_CA_Open &&
+		    !(flag & FLAG_LOST_RETRANS))
 			return;
-		/* Fall through to processing in Open state. */
+		/* Change state if cwnd is undone or retransmits are lost */
 	default:
 		if (tcp_is_reno(tp)) {
 			if (flag & FLAG_SND_UNA_ADVANCED)
@@ -2888,7 +2911,7 @@
 
 	if (do_lost)
 		tcp_update_scoreboard(sk, fast_rexmit);
-	tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit);
+	tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag);
 	tcp_xmit_retransmit_queue(sk);
 }
 
@@ -3325,6 +3348,9 @@
 			tp->pred_flags = 0;
 			tcp_fast_path_check(sk);
 
+			if (tcp_send_head(sk))
+				tcp_slow_start_after_idle_check(sk);
+
 			if (nwin > tp->max_window) {
 				tp->max_window = nwin;
 				tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
@@ -3562,10 +3588,6 @@
 				    &sack_state);
 	acked -= tp->packets_out;
 
-	/* Advance cwnd if state allows */
-	if (tcp_may_raise_cwnd(sk, flag))
-		tcp_cong_avoid(sk, ack, acked);
-
 	if (tcp_ack_is_dubious(sk, flag)) {
 		is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
 		tcp_fastretrans_alert(sk, acked, prior_unsacked,
@@ -3574,6 +3596,10 @@
 	if (tp->tlp_high_seq)
 		tcp_process_tlp_ack(sk, ack, flag);
 
+	/* Advance cwnd if state allows */
+	if (tcp_may_raise_cwnd(sk, flag))
+		tcp_cong_avoid(sk, ack, acked);
+
 	if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
 		struct dst_entry *dst = __sk_dst_get(sk);
 		if (dst)
@@ -3947,7 +3973,6 @@
 static void tcp_fin(struct sock *sk)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
-	const struct dst_entry *dst;
 
 	inet_csk_schedule_ack(sk);
 
@@ -3959,9 +3984,7 @@
 	case TCP_ESTABLISHED:
 		/* Move to CLOSE_WAIT */
 		tcp_set_state(sk, TCP_CLOSE_WAIT);
-		dst = __sk_dst_get(sk);
-		if (!dst || !dst_metric(dst, RTAX_QUICKACK))
-			inet_csk(sk)->icsk_ack.pingpong = 1;
+		inet_csk(sk)->icsk_ack.pingpong = 1;
 		break;
 
 	case TCP_CLOSE_WAIT:
@@ -5980,14 +6003,17 @@
 	const struct net *net = sock_net(listen_sk);
 	bool th_ecn = th->ece && th->cwr;
 	bool ect, ecn_ok;
+	u32 ecn_ok_dst;
 
 	if (!th_ecn)
 		return;
 
 	ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
-	ecn_ok = net->ipv4.sysctl_tcp_ecn || dst_feature(dst, RTAX_FEATURE_ECN);
+	ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK);
+	ecn_ok = net->ipv4.sysctl_tcp_ecn || ecn_ok_dst;
 
-	if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk))
+	if ((!ect && ecn_ok) || tcp_ca_needs_ecn(listen_sk) ||
+	    (ecn_ok_dst & DST_FEATURE_ECN_CA))
 		inet_rsk(req)->ecn_ok = 1;
 }
 
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0ea2e1c..93898e0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -222,7 +222,7 @@
 	if (err)
 		goto failure;
 
-	inet_set_txhash(sk);
+	sk_set_txhash(sk);
 
 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
 			       inet->inet_sport, inet->inet_dport, sk);
@@ -1277,7 +1277,7 @@
 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
 	newinet->rcv_tos      = ip_hdr(skb)->tos;
 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
-	inet_set_txhash(newsk);
+	sk_set_txhash(newsk);
 	if (inet_opt)
 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
 	newinet->inet_id = newtp->write_seq ^ jiffies;
@@ -1683,8 +1683,7 @@
 							iph->daddr, th->dest,
 							inet_iif(skb));
 		if (sk2) {
-			inet_twsk_deschedule(inet_twsk(sk));
-			inet_twsk_put(inet_twsk(sk));
+			inet_twsk_deschedule_put(inet_twsk(sk));
 			sk = sk2;
 			goto process;
 		}
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index a51d63a..c8cbc2b 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -81,11 +81,7 @@
 static bool addr_same(const struct inetpeer_addr *a,
 		      const struct inetpeer_addr *b)
 {
-	if (a->family != b->family)
-		return false;
-	if (a->family == AF_INET)
-		return a->addr.a4 == b->addr.a4;
-	return ipv6_addr_equal(&a->addr.in6, &b->addr.in6);
+	return inetpeer_addr_cmp(a, b) == 0;
 }
 
 struct tcpm_hash_bucket {
@@ -247,14 +243,14 @@
 	daddr.family = req->rsk_ops->family;
 	switch (daddr.family) {
 	case AF_INET:
-		saddr.addr.a4 = inet_rsk(req)->ir_loc_addr;
-		daddr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
-		hash = (__force unsigned int) daddr.addr.a4;
+		inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr);
+		inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr);
+		hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr);
 		break;
 #if IS_ENABLED(CONFIG_IPV6)
 	case AF_INET6:
-		saddr.addr.in6 = inet_rsk(req)->ir_v6_loc_addr;
-		daddr.addr.in6 = inet_rsk(req)->ir_v6_rmt_addr;
+		inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr);
+		inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr);
 		hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
 		break;
 #endif
@@ -285,25 +281,19 @@
 	struct net *net;
 
 	if (tw->tw_family == AF_INET) {
-		saddr.family = AF_INET;
-		saddr.addr.a4 = tw->tw_rcv_saddr;
-		daddr.family = AF_INET;
-		daddr.addr.a4 = tw->tw_daddr;
-		hash = (__force unsigned int) daddr.addr.a4;
+		inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
+		inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
+		hash = ipv4_addr_hash(tw->tw_daddr);
 	}
 #if IS_ENABLED(CONFIG_IPV6)
 	else if (tw->tw_family == AF_INET6) {
 		if (ipv6_addr_v4mapped(&tw->tw_v6_daddr)) {
-			saddr.family = AF_INET;
-			saddr.addr.a4 = tw->tw_rcv_saddr;
-			daddr.family = AF_INET;
-			daddr.addr.a4 = tw->tw_daddr;
-			hash = (__force unsigned int) daddr.addr.a4;
+			inetpeer_set_addr_v4(&saddr, tw->tw_rcv_saddr);
+			inetpeer_set_addr_v4(&daddr, tw->tw_daddr);
+			hash = ipv4_addr_hash(tw->tw_daddr);
 		} else {
-			saddr.family = AF_INET6;
-			saddr.addr.in6 = tw->tw_v6_rcv_saddr;
-			daddr.family = AF_INET6;
-			daddr.addr.in6 = tw->tw_v6_daddr;
+			inetpeer_set_addr_v6(&saddr, &tw->tw_v6_rcv_saddr);
+			inetpeer_set_addr_v6(&daddr, &tw->tw_v6_daddr);
 			hash = ipv6_addr_hash(&tw->tw_v6_daddr);
 		}
 	}
@@ -335,25 +325,19 @@
 	struct net *net;
 
 	if (sk->sk_family == AF_INET) {
-		saddr.family = AF_INET;
-		saddr.addr.a4 = inet_sk(sk)->inet_saddr;
-		daddr.family = AF_INET;
-		daddr.addr.a4 = inet_sk(sk)->inet_daddr;
-		hash = (__force unsigned int) daddr.addr.a4;
+		inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
+		inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
+		hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 	}
 #if IS_ENABLED(CONFIG_IPV6)
 	else if (sk->sk_family == AF_INET6) {
 		if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
-			saddr.family = AF_INET;
-			saddr.addr.a4 = inet_sk(sk)->inet_saddr;
-			daddr.family = AF_INET;
-			daddr.addr.a4 = inet_sk(sk)->inet_daddr;
-			hash = (__force unsigned int) daddr.addr.a4;
+			inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
+			inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
+			hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
 		} else {
-			saddr.family = AF_INET6;
-			saddr.addr.in6 = sk->sk_v6_rcv_saddr;
-			daddr.family = AF_INET6;
-			daddr.addr.in6 = sk->sk_v6_daddr;
+			inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
+			inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
 			hash = ipv6_addr_hash(&sk->sk_v6_daddr);
 		}
 	}
@@ -461,7 +445,7 @@
 				tcp_metric_set(tm, TCP_METRIC_CWND,
 					       tp->snd_cwnd);
 		}
-	} else if (tp->snd_cwnd > tp->snd_ssthresh &&
+	} else if (!tcp_in_slow_start(tp) &&
 		   icsk->icsk_ca_state == TCP_CA_Open) {
 		/* Cong. avoidance phase, cwnd is reliable. */
 		if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
@@ -796,18 +780,18 @@
 	switch (tm->tcpm_daddr.family) {
 	case AF_INET:
 		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4,
-				    tm->tcpm_daddr.addr.a4) < 0)
+				    inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0)
 			goto nla_put_failure;
 		if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4,
-				    tm->tcpm_saddr.addr.a4) < 0)
+				    inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0)
 			goto nla_put_failure;
 		break;
 	case AF_INET6:
 		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6,
-				     &tm->tcpm_daddr.addr.in6) < 0)
+				     inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0)
 			goto nla_put_failure;
 		if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6,
-				     &tm->tcpm_saddr.addr.in6) < 0)
+				     inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0)
 			goto nla_put_failure;
 		break;
 	default:
@@ -956,20 +940,21 @@
 
 	a = info->attrs[v4];
 	if (a) {
-		addr->family = AF_INET;
-		addr->addr.a4 = nla_get_in_addr(a);
+		inetpeer_set_addr_v4(addr, nla_get_in_addr(a));
 		if (hash)
-			*hash = (__force unsigned int) addr->addr.a4;
+			*hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr));
 		return 0;
 	}
 	a = info->attrs[v6];
 	if (a) {
+		struct in6_addr in6;
+
 		if (nla_len(a) != sizeof(struct in6_addr))
 			return -EINVAL;
-		addr->family = AF_INET6;
-		addr->addr.in6 = nla_get_in6_addr(a);
+		in6 = nla_get_in6_addr(a);
+		inetpeer_set_addr_v6(addr, &in6);
 		if (hash)
-			*hash = ipv6_addr_hash(&addr->addr.in6);
+			*hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr));
 		return 0;
 	}
 	return optional ? 1 : -EAFNOSUPPORT;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 4bc00cb..6d8795b 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -147,8 +147,7 @@
 		if (!th->fin ||
 		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
 kill_with_rst:
-			inet_twsk_deschedule(tw);
-			inet_twsk_put(tw);
+			inet_twsk_deschedule_put(tw);
 			return TCP_TW_RST;
 		}
 
@@ -198,8 +197,7 @@
 			 */
 			if (sysctl_tcp_rfc1337 == 0) {
 kill:
-				inet_twsk_deschedule(tw);
-				inet_twsk_put(tw);
+				inet_twsk_deschedule_put(tw);
 				return TCP_TW_SUCCESS;
 			}
 		}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b1c218d..1188e4f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -137,12 +137,12 @@
 }
 
 /* RFC2861. Reset CWND after idle period longer RTO to "restart window".
- * This is the first part of cwnd validation mechanism. */
-static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
+ * This is the first part of cwnd validation mechanism.
+ */
+void tcp_cwnd_restart(struct sock *sk, s32 delta)
 {
 	struct tcp_sock *tp = tcp_sk(sk);
-	s32 delta = tcp_time_stamp - tp->lsndtime;
-	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
+	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
 	u32 cwnd = tp->snd_cwnd;
 
 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
@@ -163,20 +163,14 @@
 {
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	const u32 now = tcp_time_stamp;
-	const struct dst_entry *dst = __sk_dst_get(sk);
-
-	if (sysctl_tcp_slow_start_after_idle &&
-	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
-		tcp_cwnd_restart(sk, __sk_dst_get(sk));
 
 	tp->lsndtime = now;
 
 	/* If it is a reply for ato after last received
 	 * packet, enter pingpong mode.
 	 */
-	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato &&
-	    (!dst || !dst_metric(dst, RTAX_QUICKACK)))
-			icsk->icsk_ack.pingpong = 1;
+	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+		icsk->icsk_ack.pingpong = 1;
 }
 
 /* Account for an ACK we sent. */
@@ -1776,7 +1770,7 @@
 	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
 		goto send_now;
 
-	if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_CWR)))
+	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
 		goto send_now;
 
 	/* Avoid bursty behavior by allowing defer
@@ -2151,7 +2145,7 @@
 		tcp_cwnd_validate(sk, is_cwnd_limited);
 		return false;
 	}
-	return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
+	return !tp->packets_out && tcp_send_head(sk);
 }
 
 bool tcp_schedule_loss_probe(struct sock *sk)
@@ -2228,7 +2222,7 @@
 	return false;
 }
 
-/* When probe timeout (PTO) fires, send a new segment if one exists, else
+/* When probe timeout (PTO) fires, try send a new segment if possible, else
  * retransmit the last segment.
  */
 void tcp_send_loss_probe(struct sock *sk)
@@ -2237,11 +2231,19 @@
 	struct sk_buff *skb;
 	int pcount;
 	int mss = tcp_current_mss(sk);
-	int err = -1;
 
-	if (tcp_send_head(sk)) {
-		err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
-		goto rearm_timer;
+	skb = tcp_send_head(sk);
+	if (skb) {
+		if (tcp_snd_wnd_test(tp, skb, mss)) {
+			pcount = tp->packets_out;
+			tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
+			if (tp->packets_out > pcount)
+				goto probe_sent;
+			goto rearm_timer;
+		}
+		skb = tcp_write_queue_prev(sk, skb);
+	} else {
+		skb = tcp_write_queue_tail(sk);
 	}
 
 	/* At most one outstanding TLP retransmission. */
@@ -2249,7 +2251,6 @@
 		goto rearm_timer;
 
 	/* Retransmit last segment. */
-	skb = tcp_write_queue_tail(sk);
 	if (WARN_ON(!skb))
 		goto rearm_timer;
 
@@ -2264,26 +2265,24 @@
 		if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss,
 					  GFP_ATOMIC)))
 			goto rearm_timer;
-		skb = tcp_write_queue_tail(sk);
+		skb = tcp_write_queue_next(sk, skb);
 	}
 
 	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
 		goto rearm_timer;
 
-	err = __tcp_retransmit_skb(sk, skb);
+	if (__tcp_retransmit_skb(sk, skb))
+		goto rearm_timer;
 
 	/* Record snd_nxt for loss detection. */
-	if (likely(!err))
-		tp->tlp_high_seq = tp->snd_nxt;
+	tp->tlp_high_seq = tp->snd_nxt;
 
+probe_sent:
+	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
+	/* Reset s.t. tcp_rearm_rto will restart timer from now */
+	inet_csk(sk)->icsk_pending = 0;
 rearm_timer:
-	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
-				  inet_csk(sk)->icsk_rto,
-				  TCP_RTO_MAX);
-
-	if (likely(!err))
-		NET_INC_STATS_BH(sock_net(sk),
-				 LINUX_MIB_TCPLOSSPROBES);
+	tcp_rearm_rto(sk);
 }
 
 /* Push out any pending frames which were held back due to
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 333bcb2..bf5ea9e 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -22,7 +22,7 @@
 	if (!tcp_is_cwnd_limited(sk))
 		return;
 
-	if (tp->snd_cwnd <= tp->snd_ssthresh)
+	if (tcp_in_slow_start(tp))
 		tcp_slow_start(tp, acked);
 	else
 		tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 5b752f5..7149ebc 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -649,4 +649,3 @@
 	inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
 				  &tcp_keepalive_timer);
 }
-EXPORT_SYMBOL(tcp_init_xmit_timers);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index a6cea1d..13951c4 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -225,7 +225,7 @@
 			 */
 			diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
 
-			if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
+			if (diff > gamma && tcp_in_slow_start(tp)) {
 				/* Going too fast. Time to slow down
 				 * and switch to congestion avoidance.
 				 */
@@ -240,7 +240,7 @@
 				tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
 				tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
 
-			} else if (tp->snd_cwnd <= tp->snd_ssthresh) {
+			} else if (tcp_in_slow_start(tp)) {
 				/* Slow start.  */
 				tcp_slow_start(tp, acked);
 			} else {
@@ -281,7 +281,7 @@
 		vegas->minRTT = 0x7fffffff;
 	}
 	/* Use normal slow start */
-	else if (tp->snd_cwnd <= tp->snd_ssthresh)
+	else if (tcp_in_slow_start(tp))
 		tcp_slow_start(tp, acked);
 }
 
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 112151e..0d094b9 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -150,7 +150,7 @@
 
 		veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
 
-		if (tp->snd_cwnd <= tp->snd_ssthresh) {
+		if (tcp_in_slow_start(tp)) {
 			/* Slow start.  */
 			tcp_slow_start(tp, acked);
 		} else {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1b8c5ba..c0a15e7 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1013,11 +1013,31 @@
 
 	if (!rt) {
 		struct net *net = sock_net(sk);
+		__u8 flow_flags = inet_sk_flowi_flags(sk);
 
 		fl4 = &fl4_stack;
+
+		/* unconnected socket. If output device is enslaved to a VRF
+		 * device lookup source address from VRF table. This mimics
+		 * behavior of ip_route_connect{_init}.
+		 */
+		if (netif_index_is_vrf(net, ipc.oif)) {
+			flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
+					   RT_SCOPE_UNIVERSE, sk->sk_protocol,
+					   (flow_flags | FLOWI_FLAG_VRFSRC),
+					   faddr, saddr, dport,
+					   inet->inet_sport);
+
+			rt = ip_route_output_flow(net, fl4, sk);
+			if (!IS_ERR(rt)) {
+				saddr = fl4->saddr;
+				ip_rt_put(rt);
+			}
+		}
+
 		flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
 				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
-				   inet_sk_flowi_flags(sk),
+				   flow_flags,
 				   faddr, saddr, dport, inet->inet_sport);
 
 		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 933ea90..aba4286 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -4,9 +4,10 @@
 #include <linux/udp.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
+#include <net/dst_metadata.h>
+#include <net/net_namespace.h>
 #include <net/udp.h>
 #include <net/udp_tunnel.h>
-#include <net/net_namespace.h>
 
 int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
 		     struct socket **sockp)
@@ -103,4 +104,26 @@
 }
 EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
 
+struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb,  unsigned short family,
+				    __be16 flags, __be64 tunnel_id, int md_size)
+{
+	struct metadata_dst *tun_dst;
+	struct ip_tunnel_info *info;
+
+	if (family == AF_INET)
+		tun_dst = ip_tun_rx_dst(skb, flags, tunnel_id, md_size);
+	else
+		tun_dst = ipv6_tun_rx_dst(skb, flags, tunnel_id, md_size);
+	if (!tun_dst)
+		return NULL;
+
+	info = &tun_dst->u.tun_info;
+	info->key.tp_src = udp_hdr(skb)->source;
+	info->key.tp_dst = udp_hdr(skb)->dest;
+	if (udp_hdr(skb)->check)
+		info->key.tun_flags |= TUNNEL_CSUM;
+	return tun_dst;
+}
+EXPORT_SYMBOL_GPL(udp_tun_rx_dst);
+
 MODULE_LICENSE("GPL");
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index bff6974..bb919b2 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -15,11 +15,12 @@
 #include <net/dst.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
+#include <net/vrf.h>
 
 static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
 
 static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
-					    int tos,
+					    int tos, int oif,
 					    const xfrm_address_t *saddr,
 					    const xfrm_address_t *daddr)
 {
@@ -28,6 +29,7 @@
 	memset(fl4, 0, sizeof(*fl4));
 	fl4->daddr = daddr->a4;
 	fl4->flowi4_tos = tos;
+	fl4->flowi4_oif = oif;
 	if (saddr)
 		fl4->saddr = saddr->a4;
 
@@ -38,22 +40,22 @@
 	return ERR_CAST(rt);
 }
 
-static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
 					  const xfrm_address_t *saddr,
 					  const xfrm_address_t *daddr)
 {
 	struct flowi4 fl4;
 
-	return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr);
+	return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr);
 }
 
-static int xfrm4_get_saddr(struct net *net,
+static int xfrm4_get_saddr(struct net *net, int oif,
 			   xfrm_address_t *saddr, xfrm_address_t *daddr)
 {
 	struct dst_entry *dst;
 	struct flowi4 fl4;
 
-	dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr);
+	dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr);
 	if (IS_ERR(dst))
 		return -EHOSTUNREACH;
 
@@ -106,8 +108,10 @@
 	struct flowi4 *fl4 = &fl->u.ip4;
 	int oif = 0;
 
-	if (skb_dst(skb))
-		oif = skb_dst(skb)->dev->ifindex;
+	if (skb_dst(skb)) {
+		oif = vrf_master_ifindex(skb_dst(skb)->dev) ?
+			: skb_dst(skb)->dev->ifindex;
+	}
 
 	memset(fl4, 0, sizeof(struct flowi4));
 	fl4->flowi4_mark = skb->mark;
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 438a73a..983bb99 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -5,16 +5,15 @@
 #   IPv6 as module will cause a CRASH if you try to unload it
 menuconfig IPV6
 	tristate "The IPv6 protocol"
-	default m
+	default y
 	---help---
-	  This is complemental support for the IP version 6.
-	  You will still be able to do traditional IPv4 networking as well.
+	  Support for IP version 6 (IPv6).
 
 	  For general information about IPv6, see
 	  <https://en.wikipedia.org/wiki/IPv6>.
-	  For Linux IPv6 development information, see <http://www.linux-ipv6.org>.
-	  For specific information about IPv6 under Linux, read the HOWTO at
-	  <http://www.bieringer.de/linux/IPv6/>.
+	  For specific information about IPv6 under Linux, see
+	  Documentation/networking/ipv6.txt and read the HOWTO at
+	  <http://www.tldp.org/HOWTO/Linux+IPv6-HOWTO/>
 
 	  To compile this protocol support as a module, choose M here: the 
 	  module will be called ipv6.
@@ -93,6 +92,25 @@
 
 	  If unsure, say N.
 
+config IPV6_ILA
+	tristate "IPv6: Identifier Locator Addressing (ILA)"
+	select LWTUNNEL
+	---help---
+	  Support for IPv6 Identifier Locator Addressing (ILA).
+
+	  ILA is a mechanism to do network virtualization without
+	  encapsulation. The basic concept of ILA is that we split an
+	  IPv6 address into a 64 bit locator and 64 bit identifier. The
+	  identifier is the identity of an entity in communication
+	  ("who") and the locator expresses the location of the
+	  entity ("where").
+
+	  ILA can be configured using the "encap ila" option with
+	  "ip -6 route" command. ILA is described in
+	  https://tools.ietf.org/html/draft-herbert-nvo3-ila-00.
+
+	  If unsure, say N.
+
 config INET6_XFRM_TUNNEL
 	tristate
 	select INET6_TUNNEL
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 0f3f199..2c900c7 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -34,6 +34,7 @@
 obj-$(CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION) += xfrm6_mode_ro.o
 obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
 obj-$(CONFIG_IPV6_MIP6) += mip6.o
+obj-$(CONFIG_IPV6_ILA) += ila.o
 obj-$(CONFIG_NETFILTER)	+= netfilter/
 
 obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 21c2c81..99c0f2b 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -195,6 +195,7 @@
 	.max_addresses		= IPV6_MAX_ADDRESSES,
 	.accept_ra_defrtr	= 1,
 	.accept_ra_from_local	= 0,
+	.accept_ra_min_hop_limit= 1,
 	.accept_ra_pinfo	= 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	.accept_ra_rtr_pref	= 1,
@@ -211,7 +212,9 @@
 	.accept_ra_mtu		= 1,
 	.stable_secret		= {
 		.initialized = false,
-	}
+	},
+	.use_oif_addrs_only	= 0,
+	.ignore_routes_with_linkdown = 0,
 };
 
 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -236,6 +239,7 @@
 	.max_addresses		= IPV6_MAX_ADDRESSES,
 	.accept_ra_defrtr	= 1,
 	.accept_ra_from_local	= 0,
+	.accept_ra_min_hop_limit= 1,
 	.accept_ra_pinfo	= 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	.accept_ra_rtr_pref	= 1,
@@ -253,6 +257,8 @@
 	.stable_secret		= {
 		.initialized = false,
 	},
+	.use_oif_addrs_only	= 0,
+	.ignore_routes_with_linkdown = 0,
 };
 
 /* Check if a valid qdisc is available */
@@ -468,6 +474,9 @@
 	if (type == -1 || type == NETCONFA_PROXY_NEIGH)
 		size += nla_total_size(4);
 
+	if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
+		size += nla_total_size(4);
+
 	return size;
 }
 
@@ -504,6 +513,11 @@
 	    nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
 		goto nla_put_failure;
 
+	if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
+	    nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+			devconf->ignore_routes_with_linkdown) < 0)
+		goto nla_put_failure;
+
 	nlmsg_end(skb, nlh);
 	return 0;
 
@@ -540,6 +554,7 @@
 	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
 	[NETCONFA_FORWARDING]	= { .len = sizeof(int) },
 	[NETCONFA_PROXY_NEIGH]	= { .len = sizeof(int) },
+	[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN]	= { .len = sizeof(int) },
 };
 
 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
@@ -762,6 +777,63 @@
 		rt6_purge_dflt_routers(net);
 	return 1;
 }
+
+static void addrconf_linkdown_change(struct net *net, __s32 newf)
+{
+	struct net_device *dev;
+	struct inet6_dev *idev;
+
+	for_each_netdev(net, dev) {
+		idev = __in6_dev_get(dev);
+		if (idev) {
+			int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
+
+			idev->cnf.ignore_routes_with_linkdown = newf;
+			if (changed)
+				inet6_netconf_notify_devconf(dev_net(dev),
+							     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+							     dev->ifindex,
+							     &idev->cnf);
+		}
+	}
+}
+
+static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
+{
+	struct net *net;
+	int old;
+
+	if (!rtnl_trylock())
+		return restart_syscall();
+
+	net = (struct net *)table->extra2;
+	old = *p;
+	*p = newf;
+
+	if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
+		if ((!newf) ^ (!old))
+			inet6_netconf_notify_devconf(net,
+						     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+						     NETCONFA_IFINDEX_DEFAULT,
+						     net->ipv6.devconf_dflt);
+		rtnl_unlock();
+		return 0;
+	}
+
+	if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
+		net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
+		addrconf_linkdown_change(net, newf);
+		if ((!newf) ^ (!old))
+			inet6_netconf_notify_devconf(net,
+						     NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
+						     NETCONFA_IFINDEX_ALL,
+						     net->ipv6.devconf_all);
+	}
+	rtnl_unlock();
+
+	return 1;
+}
+
 #endif
 
 /* Nobody refers to this ifaddr, destroy it */
@@ -1358,15 +1430,96 @@
 	return ret;
 }
 
+static int __ipv6_dev_get_saddr(struct net *net,
+				struct ipv6_saddr_dst *dst,
+				struct inet6_dev *idev,
+				struct ipv6_saddr_score *scores,
+				int hiscore_idx)
+{
+	struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
+
+	read_lock_bh(&idev->lock);
+	list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
+		int i;
+
+		/*
+		 * - Tentative Address (RFC2462 section 5.4)
+		 *  - A tentative address is not considered
+		 *    "assigned to an interface" in the traditional
+		 *    sense, unless it is also flagged as optimistic.
+		 * - Candidate Source Address (section 4)
+		 *  - In any case, anycast addresses, multicast
+		 *    addresses, and the unspecified address MUST
+		 *    NOT be included in a candidate set.
+		 */
+		if ((score->ifa->flags & IFA_F_TENTATIVE) &&
+		    (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
+			continue;
+
+		score->addr_type = __ipv6_addr_type(&score->ifa->addr);
+
+		if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
+			     score->addr_type & IPV6_ADDR_MULTICAST)) {
+			net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
+					    idev->dev->name);
+			continue;
+		}
+
+		score->rule = -1;
+		bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
+
+		for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
+			int minihiscore, miniscore;
+
+			minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
+			miniscore = ipv6_get_saddr_eval(net, score, dst, i);
+
+			if (minihiscore > miniscore) {
+				if (i == IPV6_SADDR_RULE_SCOPE &&
+				    score->scopedist > 0) {
+					/*
+					 * special case:
+					 * each remaining entry
+					 * has too small (not enough)
+					 * scope, because ifa entries
+					 * are sorted by their scope
+					 * values.
+					 */
+					goto out;
+				}
+				break;
+			} else if (minihiscore < miniscore) {
+				if (hiscore->ifa)
+					in6_ifa_put(hiscore->ifa);
+
+				in6_ifa_hold(score->ifa);
+
+				swap(hiscore, score);
+				hiscore_idx = 1 - hiscore_idx;
+
+				/* restore our iterator */
+				score->ifa = hiscore->ifa;
+
+				break;
+			}
+		}
+	}
+out:
+	read_unlock_bh(&idev->lock);
+	return hiscore_idx;
+}
+
 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
 		       const struct in6_addr *daddr, unsigned int prefs,
 		       struct in6_addr *saddr)
 {
-	struct ipv6_saddr_score scores[2],
-				*score = &scores[0], *hiscore = &scores[1];
+	struct ipv6_saddr_score scores[2], *hiscore;
 	struct ipv6_saddr_dst dst;
+	struct inet6_dev *idev;
 	struct net_device *dev;
 	int dst_type;
+	bool use_oif_addr = false;
+	int hiscore_idx = 0;
 
 	dst_type = __ipv6_addr_type(daddr);
 	dst.addr = daddr;
@@ -1375,105 +1528,50 @@
 	dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
 	dst.prefs = prefs;
 
-	hiscore->rule = -1;
-	hiscore->ifa = NULL;
+	scores[hiscore_idx].rule = -1;
+	scores[hiscore_idx].ifa = NULL;
 
 	rcu_read_lock();
 
-	for_each_netdev_rcu(net, dev) {
-		struct inet6_dev *idev;
-
-		/* Candidate Source Address (section 4)
-		 *  - multicast and link-local destination address,
-		 *    the set of candidate source address MUST only
-		 *    include addresses assigned to interfaces
-		 *    belonging to the same link as the outgoing
-		 *    interface.
-		 * (- For site-local destination addresses, the
-		 *    set of candidate source addresses MUST only
-		 *    include addresses assigned to interfaces
-		 *    belonging to the same site as the outgoing
-		 *    interface.)
-		 */
-		if (((dst_type & IPV6_ADDR_MULTICAST) ||
-		     dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL) &&
-		    dst.ifindex && dev->ifindex != dst.ifindex)
-			continue;
-
-		idev = __in6_dev_get(dev);
-		if (!idev)
-			continue;
-
-		read_lock_bh(&idev->lock);
-		list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
-			int i;
-
-			/*
-			 * - Tentative Address (RFC2462 section 5.4)
-			 *  - A tentative address is not considered
-			 *    "assigned to an interface" in the traditional
-			 *    sense, unless it is also flagged as optimistic.
-			 * - Candidate Source Address (section 4)
-			 *  - In any case, anycast addresses, multicast
-			 *    addresses, and the unspecified address MUST
-			 *    NOT be included in a candidate set.
-			 */
-			if ((score->ifa->flags & IFA_F_TENTATIVE) &&
-			    (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
-				continue;
-
-			score->addr_type = __ipv6_addr_type(&score->ifa->addr);
-
-			if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
-				     score->addr_type & IPV6_ADDR_MULTICAST)) {
-				net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
-						    dev->name);
-				continue;
-			}
-
-			score->rule = -1;
-			bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
-
-			for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
-				int minihiscore, miniscore;
-
-				minihiscore = ipv6_get_saddr_eval(net, hiscore, &dst, i);
-				miniscore = ipv6_get_saddr_eval(net, score, &dst, i);
-
-				if (minihiscore > miniscore) {
-					if (i == IPV6_SADDR_RULE_SCOPE &&
-					    score->scopedist > 0) {
-						/*
-						 * special case:
-						 * each remaining entry
-						 * has too small (not enough)
-						 * scope, because ifa entries
-						 * are sorted by their scope
-						 * values.
-						 */
-						goto try_nextdev;
-					}
-					break;
-				} else if (minihiscore < miniscore) {
-					if (hiscore->ifa)
-						in6_ifa_put(hiscore->ifa);
-
-					in6_ifa_hold(score->ifa);
-
-					swap(hiscore, score);
-
-					/* restore our iterator */
-					score->ifa = hiscore->ifa;
-
-					break;
-				}
-			}
+	/* Candidate Source Address (section 4)
+	 *  - multicast and link-local destination address,
+	 *    the set of candidate source address MUST only
+	 *    include addresses assigned to interfaces
+	 *    belonging to the same link as the outgoing
+	 *    interface.
+	 * (- For site-local destination addresses, the
+	 *    set of candidate source addresses MUST only
+	 *    include addresses assigned to interfaces
+	 *    belonging to the same site as the outgoing
+	 *    interface.)
+	 *  - "It is RECOMMENDED that the candidate source addresses
+	 *    be the set of unicast addresses assigned to the
+	 *    interface that will be used to send to the destination
+	 *    (the 'outgoing' interface)." (RFC 6724)
+	 */
+	if (dst_dev) {
+		idev = __in6_dev_get(dst_dev);
+		if ((dst_type & IPV6_ADDR_MULTICAST) ||
+		    dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
+		    (idev && idev->cnf.use_oif_addrs_only)) {
+			use_oif_addr = true;
 		}
-try_nextdev:
-		read_unlock_bh(&idev->lock);
+	}
+
+	if (use_oif_addr) {
+		if (idev)
+			hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
+	} else {
+		for_each_netdev_rcu(net, dev) {
+			idev = __in6_dev_get(dev);
+			if (!idev)
+				continue;
+			hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
+		}
 	}
 	rcu_read_unlock();
 
+	hiscore = &scores[hiscore_idx];
 	if (!hiscore->ifa)
 		return -EADDRNOTAVAIL;
 
@@ -3558,7 +3656,7 @@
 
 	/* send a neighbour solicitation for our addr */
 	addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
-	ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
+	ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any, NULL);
 out:
 	in6_ifa_put(ifp);
 	rtnl_unlock();
@@ -4560,6 +4658,7 @@
 	array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
 	array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
 	array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
+	array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
 	array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
@@ -4585,7 +4684,9 @@
 	array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
 	array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
 	array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
+	array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
 	/* we omit DEVCONF_STABLE_SECRET for now */
+	array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
 }
 
 static inline size_t inet6_ifla6_size(void)
@@ -4605,6 +4706,7 @@
 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
 	       + nla_total_size(4) /* IFLA_MTU */
 	       + nla_total_size(4) /* IFLA_LINK */
+	       + nla_total_size(1) /* IFLA_OPERSTATE */
 	       + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
 }
 
@@ -4624,18 +4726,24 @@
 }
 
 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
-				      int items, int bytes, size_t syncpoff)
+					int bytes, size_t syncpoff)
 {
-	int i;
-	int pad = bytes - sizeof(u64) * items;
+	int i, c;
+	u64 buff[IPSTATS_MIB_MAX];
+	int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
+
 	BUG_ON(pad < 0);
 
-	/* Use put_unaligned() because stats may not be aligned for u64. */
-	put_unaligned(items, &stats[0]);
-	for (i = 1; i < items; i++)
-		put_unaligned(snmp_fold_field64(mib, i, syncpoff), &stats[i]);
+	memset(buff, 0, sizeof(buff));
+	buff[0] = IPSTATS_MIB_MAX;
 
-	memset(&stats[items], 0, pad);
+	for_each_possible_cpu(c) {
+		for (i = 1; i < IPSTATS_MIB_MAX; i++)
+			buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
+	}
+
+	memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
+	memset(&stats[IPSTATS_MIB_MAX], 0, pad);
 }
 
 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
@@ -4643,8 +4751,8 @@
 {
 	switch (attrtype) {
 	case IFLA_INET6_STATS:
-		__snmp6_fill_stats64(stats, idev->stats.ipv6,
-				     IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
+		__snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
+				     offsetof(struct ipstats_mib, syncp));
 		break;
 	case IFLA_INET6_ICMP6STATS:
 		__snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, ICMP6_MIB_MAX, bytes);
@@ -4861,7 +4969,9 @@
 	     nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
 	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
 	    (dev->ifindex != dev_get_iflink(dev) &&
-	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
+	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
+	    nla_put_u8(skb, IFLA_OPERSTATE,
+		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
 		goto nla_put_failure;
 	protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
 	if (!protoinfo)
@@ -5306,6 +5416,34 @@
 	return err;
 }
 
+static
+int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
+						int write,
+						void __user *buffer,
+						size_t *lenp,
+						loff_t *ppos)
+{
+	int *valp = ctl->data;
+	int val = *valp;
+	loff_t pos = *ppos;
+	struct ctl_table lctl;
+	int ret;
+
+	/* ctl->data points to idev->cnf.ignore_routes_when_linkdown
+	 * we should not modify it until we get the rtnl lock.
+	 */
+	lctl = *ctl;
+	lctl.data = &val;
+
+	ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
+
+	if (write)
+		ret = addrconf_fixup_linkdown(ctl, valp, val);
+	if (ret)
+		*ppos = pos;
+	return ret;
+}
+
 static struct addrconf_sysctl_table
 {
 	struct ctl_table_header *sysctl_header;
@@ -5456,6 +5594,13 @@
 			.proc_handler	= proc_dointvec,
 		},
 		{
+			.procname	= "accept_ra_min_hop_limit",
+			.data		= &ipv6_devconf.accept_ra_min_hop_limit,
+			.maxlen		= sizeof(int),
+			.mode		= 0644,
+			.proc_handler	= proc_dointvec,
+		},
+		{
 			.procname	= "accept_ra_pinfo",
 			.data		= &ipv6_devconf.accept_ra_pinfo,
 			.maxlen		= sizeof(int),
@@ -5585,6 +5730,20 @@
 			.proc_handler	= addrconf_sysctl_stable_secret,
 		},
 		{
+			.procname       = "use_oif_addrs_only",
+			.data           = &ipv6_devconf.use_oif_addrs_only,
+			.maxlen         = sizeof(int),
+			.mode           = 0644,
+			.proc_handler   = proc_dointvec,
+		},
+		{
+			.procname	= "ignore_routes_with_linkdown",
+			.data		= &ipv6_devconf.ignore_routes_with_linkdown,
+			.maxlen		= sizeof(int),
+			.mode		= 0644,
+			.proc_handler	= addrconf_sysctl_ignore_routes_with_linkdown,
+		},
+		{
 			/* sentinel */
 		}
 	},
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index ca09bf4..bfa941f 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -107,7 +107,16 @@
 }
 EXPORT_SYMBOL(inet6addr_notifier_call_chain);
 
-const struct ipv6_stub *ipv6_stub __read_mostly;
+static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
+					struct dst_entry **u2,
+					struct flowi6 *u3)
+{
+	return -EAFNOSUPPORT;
+}
+
+const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
+	.ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup,
+};
 EXPORT_SYMBOL_GPL(ipv6_stub);
 
 /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 7de52b6..44bb66b 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -197,6 +197,7 @@
 	np->mcast_hops	= IPV6_DEFAULT_MCASTHOPS;
 	np->mc_loop	= 1;
 	np->pmtudisc	= IPV6_PMTUDISC_WANT;
+	np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
 	sk->sk_ipv6only	= net->ipv6.sysctl.bindv6only;
 
 	/* Init the ipv4 part of the socket since we can have sockets
@@ -342,7 +343,8 @@
 			 */
 			v4addr = LOOPBACK4_IPV6;
 			if (!(addr_type & IPV6_ADDR_MULTICAST))	{
-				if (!(inet->freebind || inet->transparent) &&
+				if (!net->ipv6.sysctl.ip_nonlocal_bind &&
+				    !(inet->freebind || inet->transparent) &&
 				    !ipv6_chk_addr(net, &addr->sin6_addr,
 						   dev, 0)) {
 					err = -EADDRNOTAVAIL;
@@ -679,8 +681,8 @@
 	const struct ipv6_pinfo *np = inet6_sk(sk);
 
 	if (np->rxopt.all) {
-		if ((opt->hop && (np->rxopt.bits.hopopts ||
-				  np->rxopt.bits.ohopopts)) ||
+		if (((opt->flags & IP6SKB_HOPBYHOP) &&
+		     (np->rxopt.bits.hopopts || np->rxopt.bits.ohopopts)) ||
 		    (ip6_flowinfo((struct ipv6hdr *) skb_network_header(skb)) &&
 		     np->rxopt.bits.rxflow) ||
 		    (opt->srcrt && (np->rxopt.bits.srcrt ||
@@ -766,10 +768,10 @@
 	net->ipv6.sysctl.bindv6only = 0;
 	net->ipv6.sysctl.icmpv6_time = 1*HZ;
 	net->ipv6.sysctl.flowlabel_consistency = 1;
-	net->ipv6.sysctl.auto_flowlabels = 0;
+	net->ipv6.sysctl.auto_flowlabels = IP6_DEFAULT_AUTO_FLOW_LABELS;
 	net->ipv6.sysctl.idgen_retries = 3;
 	net->ipv6.sysctl.idgen_delay = 1 * HZ;
-	net->ipv6.sysctl.flowlabel_state_ranges = 1;
+	net->ipv6.sysctl.flowlabel_state_ranges = 0;
 	atomic_set(&net->ipv6.fib6_sernum, 1);
 
 	err = ipv6_init_mibs(net);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ed7d4e3f..0630a4d5 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -577,8 +577,10 @@
 
 	work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len +
 				ahp->icv_trunc_len + seqhi_len);
-	if (!work_iph)
+	if (!work_iph) {
+		err = -ENOMEM;
 		goto out;
+	}
 
 	auth_data = ah_tmp_auth((u8 *)work_iph, hdr_len);
 	seqhi = (__be32 *)(auth_data + ahp->icv_trunc_len);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index b10a889..9aadd57 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -199,7 +199,7 @@
 		      NULL);
 
 	sk->sk_state = TCP_ESTABLISHED;
-	ip6_set_txhash(sk);
+	sk_set_txhash(sk);
 out:
 	fl6_sock_release(flowlabel);
 	return err;
@@ -568,8 +568,8 @@
 	}
 
 	/* HbH is allowed only once */
-	if (np->rxopt.bits.hopopts && opt->hop) {
-		u8 *ptr = nh + opt->hop;
+	if (np->rxopt.bits.hopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
+		u8 *ptr = nh + sizeof(struct ipv6hdr);
 		put_cmsg(msg, SOL_IPV6, IPV6_HOPOPTS, (ptr[1]+1)<<3, ptr);
 	}
 
@@ -630,8 +630,8 @@
 		int hlim = ipv6_hdr(skb)->hop_limit;
 		put_cmsg(msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim);
 	}
-	if (np->rxopt.bits.ohopopts && opt->hop) {
-		u8 *ptr = nh + opt->hop;
+	if (np->rxopt.bits.ohopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
+		u8 *ptr = nh + sizeof(struct ipv6hdr);
 		put_cmsg(msg, SOL_IPV6, IPV6_2292HOPOPTS, (ptr[1]+1)<<3, ptr);
 	}
 	if (np->rxopt.bits.odstopts && opt->dst0) {
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index a7bbbe4..ce203b0 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -632,7 +632,7 @@
 		return -1;
 	}
 
-	opt->hop = sizeof(struct ipv6hdr);
+	opt->flags |= IP6SKB_HOPBYHOP;
 	if (ip6_parse_tlv(tlvprochopopt_lst, skb)) {
 		skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
 		opt = IP6CB(skb);
diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
index 447a7fb..f5e2ba1 100644
--- a/net/ipv6/exthdrs_offload.c
+++ b/net/ipv6/exthdrs_offload.c
@@ -36,6 +36,6 @@
 	return ret;
 
 out_rt:
-	inet_del_offload(&rthdr_offload, IPPROTO_ROUTING);
+	inet6_del_offload(&rthdr_offload, IPPROTO_ROUTING);
 	goto out;
 }
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 713d743..6c2b213 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -329,7 +329,7 @@
 	struct flowi6 fl2;
 	int err;
 
-	err = ip6_dst_lookup(sk, &dst, fl6);
+	err = ip6_dst_lookup(net, sk, &dst, fl6);
 	if (err)
 		return ERR_PTR(err);
 
@@ -361,7 +361,7 @@
 	if (err)
 		goto relookup_failed;
 
-	err = ip6_dst_lookup(sk, &dst2, &fl2);
+	err = ip6_dst_lookup(net, sk, &dst2, &fl2);
 	if (err)
 		goto relookup_failed;
 
@@ -591,7 +591,7 @@
 	else if (!fl6.flowi6_oif)
 		fl6.flowi6_oif = np->ucast_oif;
 
-	err = ip6_dst_lookup(sk, &dst, &fl6);
+	err = ip6_dst_lookup(net, sk, &dst, &fl6);
 	if (err)
 		goto out;
 	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
diff --git a/net/ipv6/ila.c b/net/ipv6/ila.c
new file mode 100644
index 0000000..678d2df
--- /dev/null
+++ b/net/ipv6/ila.c
@@ -0,0 +1,229 @@
+#include <linux/errno.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/socket.h>
+#include <linux/types.h>
+#include <net/checksum.h>
+#include <net/ip.h>
+#include <net/ip6_fib.h>
+#include <net/lwtunnel.h>
+#include <net/protocol.h>
+#include <uapi/linux/ila.h>
+
+struct ila_params {
+	__be64 locator;
+	__be64 locator_match;
+	__wsum csum_diff;
+};
+
+static inline struct ila_params *ila_params_lwtunnel(
+	struct lwtunnel_state *lwstate)
+{
+	return (struct ila_params *)lwstate->data;
+}
+
+static inline __wsum compute_csum_diff8(const __be32 *from, const __be32 *to)
+{
+	__be32 diff[] = {
+		~from[0], ~from[1], to[0], to[1],
+	};
+
+	return csum_partial(diff, sizeof(diff), 0);
+}
+
+static inline __wsum get_csum_diff(struct ipv6hdr *ip6h, struct ila_params *p)
+{
+	if (*(__be64 *)&ip6h->daddr == p->locator_match)
+		return p->csum_diff;
+	else
+		return compute_csum_diff8((__be32 *)&ip6h->daddr,
+					  (__be32 *)&p->locator);
+}
+
+static void update_ipv6_locator(struct sk_buff *skb, struct ila_params *p)
+{
+	__wsum diff;
+	struct ipv6hdr *ip6h = ipv6_hdr(skb);
+	size_t nhoff = sizeof(struct ipv6hdr);
+
+	/* First update checksum */
+	switch (ip6h->nexthdr) {
+	case NEXTHDR_TCP:
+		if (likely(pskb_may_pull(skb, nhoff + sizeof(struct tcphdr)))) {
+			struct tcphdr *th = (struct tcphdr *)
+					(skb_network_header(skb) + nhoff);
+
+			diff = get_csum_diff(ip6h, p);
+			inet_proto_csum_replace_by_diff(&th->check, skb,
+							diff, true);
+		}
+		break;
+	case NEXTHDR_UDP:
+		if (likely(pskb_may_pull(skb, nhoff + sizeof(struct udphdr)))) {
+			struct udphdr *uh = (struct udphdr *)
+					(skb_network_header(skb) + nhoff);
+
+			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+				diff = get_csum_diff(ip6h, p);
+				inet_proto_csum_replace_by_diff(&uh->check, skb,
+								diff, true);
+				if (!uh->check)
+					uh->check = CSUM_MANGLED_0;
+			}
+		}
+		break;
+	case NEXTHDR_ICMP:
+		if (likely(pskb_may_pull(skb,
+					 nhoff + sizeof(struct icmp6hdr)))) {
+			struct icmp6hdr *ih = (struct icmp6hdr *)
+					(skb_network_header(skb) + nhoff);
+
+			diff = get_csum_diff(ip6h, p);
+			inet_proto_csum_replace_by_diff(&ih->icmp6_cksum, skb,
+							diff, true);
+		}
+		break;
+	}
+
+	/* Now change destination address */
+	*(__be64 *)&ip6h->daddr = p->locator;
+}
+
+static int ila_output(struct sock *sk, struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+
+	if (skb->protocol != htons(ETH_P_IPV6))
+		goto drop;
+
+	update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+
+	return dst->lwtstate->orig_output(sk, skb);
+
+drop:
+	kfree_skb(skb);
+	return -EINVAL;
+}
+
+static int ila_input(struct sk_buff *skb)
+{
+	struct dst_entry *dst = skb_dst(skb);
+
+	if (skb->protocol != htons(ETH_P_IPV6))
+		goto drop;
+
+	update_ipv6_locator(skb, ila_params_lwtunnel(dst->lwtstate));
+
+	return dst->lwtstate->orig_input(skb);
+
+drop:
+	kfree_skb(skb);
+	return -EINVAL;
+}
+
+static struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
+	[ILA_ATTR_LOCATOR] = { .type = NLA_U64, },
+};
+
+static int ila_build_state(struct net_device *dev, struct nlattr *nla,
+			   unsigned int family, const void *cfg,
+			   struct lwtunnel_state **ts)
+{
+	struct ila_params *p;
+	struct nlattr *tb[ILA_ATTR_MAX + 1];
+	size_t encap_len = sizeof(*p);
+	struct lwtunnel_state *newts;
+	const struct fib6_config *cfg6 = cfg;
+	int ret;
+
+	if (family != AF_INET6)
+		return -EINVAL;
+
+	ret = nla_parse_nested(tb, ILA_ATTR_MAX, nla,
+			       ila_nl_policy);
+	if (ret < 0)
+		return ret;
+
+	if (!tb[ILA_ATTR_LOCATOR])
+		return -EINVAL;
+
+	newts = lwtunnel_state_alloc(encap_len);
+	if (!newts)
+		return -ENOMEM;
+
+	newts->len = encap_len;
+	p = ila_params_lwtunnel(newts);
+
+	p->locator = (__force __be64)nla_get_u64(tb[ILA_ATTR_LOCATOR]);
+
+	if (cfg6->fc_dst_len > sizeof(__be64)) {
+		/* Precompute checksum difference for translation since we
+		 * know both the old locator and the new one.
+		 */
+		p->locator_match = *(__be64 *)&cfg6->fc_dst;
+		p->csum_diff = compute_csum_diff8(
+			(__be32 *)&p->locator_match, (__be32 *)&p->locator);
+	}
+
+	newts->type = LWTUNNEL_ENCAP_ILA;
+	newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT |
+			LWTUNNEL_STATE_INPUT_REDIRECT;
+
+	*ts = newts;
+
+	return 0;
+}
+
+static int ila_fill_encap_info(struct sk_buff *skb,
+			       struct lwtunnel_state *lwtstate)
+{
+	struct ila_params *p = ila_params_lwtunnel(lwtstate);
+
+	if (nla_put_u64(skb, ILA_ATTR_LOCATOR, (__force u64)p->locator))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+	/* No encapsulation overhead */
+	return 0;
+}
+
+static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+	struct ila_params *a_p = ila_params_lwtunnel(a);
+	struct ila_params *b_p = ila_params_lwtunnel(b);
+
+	return (a_p->locator != b_p->locator);
+}
+
+static const struct lwtunnel_encap_ops ila_encap_ops = {
+	.build_state = ila_build_state,
+	.output = ila_output,
+	.input = ila_input,
+	.fill_encap = ila_fill_encap_info,
+	.get_encap_size = ila_encap_nlsize,
+	.cmp_encap = ila_encap_cmp,
+};
+
+static int __init ila_init(void)
+{
+	return lwtunnel_encap_add_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA);
+}
+
+static void __exit ila_fini(void)
+{
+	lwtunnel_encap_del_ops(&ila_encap_ops, LWTUNNEL_ENCAP_ILA);
+}
+
+module_init(ila_init);
+module_exit(ila_fini);
+MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
+MODULE_LICENSE("GPL");
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b4fd96d..6ac8dad 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -207,7 +207,6 @@
 	struct sock *sk2;
 	const struct hlist_nulls_node *node;
 	struct inet_timewait_sock *tw = NULL;
-	int twrefcnt = 0;
 
 	spin_lock(lock);
 
@@ -234,21 +233,17 @@
 	WARN_ON(!sk_unhashed(sk));
 	__sk_nulls_add_node_rcu(sk, &head->chain);
 	if (tw) {
-		twrefcnt = inet_twsk_unhash(tw);
+		sk_nulls_del_node_init_rcu((struct sock *)tw);
 		NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
 	}
 	spin_unlock(lock);
-	if (twrefcnt)
-		inet_twsk_put(tw);
 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 
 	if (twp) {
 		*twp = tw;
 	} else if (tw) {
 		/* Silly. Should hash-dance instead... */
-		inet_twsk_deschedule(tw);
-
-		inet_twsk_put(tw);
+		inet_twsk_deschedule_put(tw);
 	}
 	return 0;
 
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 548c623..418d982 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -32,6 +32,7 @@
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 #include <net/addrconf.h>
+#include <net/lwtunnel.h>
 
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index a38d3ac..4038c69 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -361,6 +361,7 @@
 	struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
 
 	ip6gre_tunnel_unlink(ign, t);
+	ip6_tnl_dst_reset(t);
 	dev_put(dev);
 }
 
@@ -728,7 +729,7 @@
 	 */
 	ipv6h = ipv6_hdr(skb);
 	ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
-		     ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
+		     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
 	ipv6h->hop_limit = tunnel->parms.hop_limit;
 	ipv6h->nexthdr = proto;
 	ipv6h->saddr = fl6->saddr;
@@ -1182,7 +1183,8 @@
 
 	ip6_flow_hdr(ipv6h, 0,
 		     ip6_make_flowlabel(dev_net(dev), skb,
-					t->fl.u.ip6.flowlabel, false));
+					t->fl.u.ip6.flowlabel, true,
+					&t->fl.u.ip6));
 	ipv6h->hop_limit = t->parms.hop_limit;
 	ipv6h->nexthdr = NEXTHDR_GRE;
 	ipv6h->saddr = t->parms.laddr;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 57990c9..adba03a 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -45,6 +45,7 @@
 #include <net/addrconf.h>
 #include <net/xfrm.h>
 #include <net/inet_ecn.h>
+#include <net/dst_metadata.h>
 
 int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
 {
@@ -55,7 +56,7 @@
 		if (ipprot && ipprot->early_demux)
 			ipprot->early_demux(skb);
 	}
-	if (!skb_dst(skb))
+	if (!skb_valid_dst(skb))
 		ip6_route_input(skb);
 
 	return dst_input(skb);
@@ -98,7 +99,7 @@
 	 * arrived via the sending interface (ethX), because of the
 	 * nature of scoping architecture. --yoshfuji
 	 */
-	IP6CB(skb)->iif = skb_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
+	IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
 
 	if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
 		goto err;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d5f7716..26ea479 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -207,7 +207,7 @@
 		hlimit = ip6_dst_hoplimit(dst);
 
 	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
-						     np->autoflowlabel));
+						     np->autoflowlabel, fl6));
 
 	hdr->payload_len = htons(seg_len);
 	hdr->nexthdr = proto;
@@ -881,10 +881,9 @@
 	return dst;
 }
 
-static int ip6_dst_lookup_tail(struct sock *sk,
+static int ip6_dst_lookup_tail(struct net *net, struct sock *sk,
 			       struct dst_entry **dst, struct flowi6 *fl6)
 {
-	struct net *net = sock_net(sk);
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 	struct neighbour *n;
 	struct rt6_info *rt;
@@ -994,10 +993,11 @@
  *
  *	It returns zero on success, or a standard errno code on error.
  */
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+		   struct flowi6 *fl6)
 {
 	*dst = NULL;
-	return ip6_dst_lookup_tail(sk, dst, fl6);
+	return ip6_dst_lookup_tail(net, sk, dst, fl6);
 }
 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
 
@@ -1018,11 +1018,13 @@
 	struct dst_entry *dst = NULL;
 	int err;
 
-	err = ip6_dst_lookup_tail(sk, &dst, fl6);
+	err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
 	if (err)
 		return ERR_PTR(err);
 	if (final_dst)
 		fl6->daddr = *final_dst;
+	if (!fl6->flowi6_oif)
+		fl6->flowi6_oif = dst->dev->ifindex;
 
 	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 }
@@ -1050,7 +1052,7 @@
 
 	dst = ip6_sk_dst_check(sk, dst, fl6);
 
-	err = ip6_dst_lookup_tail(sk, &dst, fl6);
+	err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
 	if (err)
 		return ERR_PTR(err);
 	if (final_dst)
@@ -1647,7 +1649,7 @@
 
 	ip6_flow_hdr(hdr, v6_cork->tclass,
 		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
-					np->autoflowlabel));
+					np->autoflowlabel, fl6));
 	hdr->hop_limit = v6_cork->hop_limit;
 	hdr->nexthdr = proto;
 	hdr->saddr = fl6->saddr;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2e67b66..b0ab420 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1095,7 +1095,7 @@
 	skb_reset_network_header(skb);
 	ipv6h = ipv6_hdr(skb);
 	ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
-		     ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
+		     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
 	ipv6h->hop_limit = t->parms.hop_limit;
 	ipv6h->nexthdr = proto;
 	ipv6h->saddr = fl6->saddr;
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index e1a1136..14dacf1 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -23,6 +23,15 @@
 	if (err < 0)
 		goto error;
 
+	if (cfg->ipv6_v6only) {
+		int val = 1;
+
+		err = kernel_setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY,
+					(char *) &val, sizeof(val));
+		if (err < 0)
+			goto error;
+	}
+
 	udp6_addr.sin6_family = AF_INET6;
 	memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6,
 	       sizeof(udp6_addr.sin6_addr));
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index c53331c..64a7135 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -553,7 +553,8 @@
 
 void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
 		   const struct in6_addr *solicit,
-		   const struct in6_addr *daddr, const struct in6_addr *saddr)
+		   const struct in6_addr *daddr, const struct in6_addr *saddr,
+		   struct sk_buff *oskb)
 {
 	struct sk_buff *skb;
 	struct in6_addr addr_buf;
@@ -589,6 +590,9 @@
 		ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR,
 				       dev->dev_addr);
 
+	if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb)
+		skb_dst_copy(skb, oskb);
+
 	ndisc_send_skb(skb, daddr, saddr);
 }
 
@@ -675,12 +679,12 @@
 				  "%s: trying to ucast probe in NUD_INVALID: %pI6\n",
 				  __func__, target);
 		}
-		ndisc_send_ns(dev, neigh, target, target, saddr);
+		ndisc_send_ns(dev, neigh, target, target, saddr, skb);
 	} else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) {
 		neigh_app_ns(neigh);
 	} else {
 		addrconf_addr_solict_mult(target, &mcaddr);
-		ndisc_send_ns(dev, NULL, target, &mcaddr, saddr);
+		ndisc_send_ns(dev, NULL, target, &mcaddr, saddr, skb);
 	}
 }
 
@@ -1074,6 +1078,8 @@
 	struct ndisc_options ndopts;
 	int optlen;
 	unsigned int pref = 0;
+	__u32 old_if_flags;
+	bool send_ifinfo_notify = false;
 
 	__u8 *opt = (__u8 *)(ra_msg + 1);
 
@@ -1144,6 +1150,7 @@
 	 * Remember the managed/otherconf flags from most recently
 	 * received RA message (RFC 2462) -- yoshfuji
 	 */
+	old_if_flags = in6_dev->if_flags;
 	in6_dev->if_flags = (in6_dev->if_flags & ~(IF_RA_MANAGED |
 				IF_RA_OTHERCONF)) |
 				(ra_msg->icmph.icmp6_addrconf_managed ?
@@ -1151,6 +1158,9 @@
 				(ra_msg->icmph.icmp6_addrconf_other ?
 					IF_RA_OTHERCONF : 0);
 
+	if (old_if_flags != in6_dev->if_flags)
+		send_ifinfo_notify = true;
+
 	if (!in6_dev->cnf.accept_ra_defrtr) {
 		ND_PRINTK(2, info,
 			  "RA: %s, defrtr is false for dev: %s\n",
@@ -1225,18 +1235,16 @@
 
 	if (rt)
 		rt6_set_expires(rt, jiffies + (HZ * lifetime));
-	if (ra_msg->icmph.icmp6_hop_limit) {
-		/* Only set hop_limit on the interface if it is higher than
-		 * the current hop_limit.
-		 */
-		if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
+	if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
+	    ra_msg->icmph.icmp6_hop_limit) {
+		if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
 			in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+			if (rt)
+				dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+					       ra_msg->icmph.icmp6_hop_limit);
 		} else {
-			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than minimum\n");
 		}
-		if (rt)
-			dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
-				       ra_msg->icmph.icmp6_hop_limit);
 	}
 
 skip_defrtr:
@@ -1254,7 +1262,7 @@
 				rtime = HZ/10;
 			NEIGH_VAR_SET(in6_dev->nd_parms, RETRANS_TIME, rtime);
 			in6_dev->tstamp = jiffies;
-			inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
+			send_ifinfo_notify = true;
 		}
 
 		rtime = ntohl(ra_msg->reachable_time);
@@ -1271,11 +1279,17 @@
 					      GC_STALETIME, 3 * rtime);
 				in6_dev->nd_parms->reachable_time = neigh_rand_reach_time(rtime);
 				in6_dev->tstamp = jiffies;
-				inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
+				send_ifinfo_notify = true;
 			}
 		}
 	}
 
+	/*
+	 *	Send a notify if RA changed managed/otherconf flags or timer settings
+	 */
+	if (send_ifinfo_notify)
+		inet6_ifinfo_notify(RTM_NEWLINK, in6_dev);
+
 skip_linkparms:
 
 	/*
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index b552cf0..96833e4 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -47,9 +47,21 @@
 	default NFT_REJECT
 	tristate
 
+config NFT_DUP_IPV6
+	tristate "IPv6 nf_tables packet duplication support"
+	select NF_DUP_IPV6
+	help
+	  This module enables IPv6 packet duplication support for nf_tables.
+
 endif # NF_TABLES_IPV6
 endif # NF_TABLES
 
+config NF_DUP_IPV6
+	tristate "Netfilter IPv6 packet duplication to alternate destination"
+	help
+	  This option enables the nf_dup_ipv6 core, which duplicates an IPv6
+	  packet to be rerouted to another destination.
+
 config NF_REJECT_IPV6
 	tristate "IPv6 packet rejection"
 	default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index c36e0a5..b4f7d0b 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -30,6 +30,8 @@
 # reject
 obj-$(CONFIG_NF_REJECT_IPV6) += nf_reject_ipv6.o
 
+obj-$(CONFIG_NF_DUP_IPV6) += nf_dup_ipv6.o
+
 # nf_tables
 obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
 obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
@@ -37,6 +39,7 @@
 obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
 obj-$(CONFIG_NFT_MASQ_IPV6) += nft_masq_ipv6.o
 obj-$(CONFIG_NFT_REDIR_IPV6) += nft_redir_ipv6.o
+obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o
 
 # matches
 obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 3c35ced..0771991 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -305,7 +305,7 @@
 }
 #endif
 
-static inline __pure struct ip6t_entry *
+static inline struct ip6t_entry *
 ip6t_next_entry(const struct ip6t_entry *entry)
 {
 	return (void *)entry + entry->next_offset;
@@ -324,12 +324,13 @@
 	const char *indev, *outdev;
 	const void *table_base;
 	struct ip6t_entry *e, **jumpstack;
-	unsigned int *stackptr, origptr, cpu;
+	unsigned int stackidx, cpu;
 	const struct xt_table_info *private;
 	struct xt_action_param acpar;
 	unsigned int addend;
 
 	/* Initialization */
+	stackidx = 0;
 	indev = state->in ? state->in->name : nulldevname;
 	outdev = state->out ? state->out->name : nulldevname;
 	/* We handle fragments by dealing with the first fragment as
@@ -357,8 +358,16 @@
 	cpu        = smp_processor_id();
 	table_base = private->entries;
 	jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
-	stackptr   = per_cpu_ptr(private->stackptr, cpu);
-	origptr    = *stackptr;
+
+	/* Switch to alternate jumpstack if we're being invoked via TEE.
+	 * TEE issues XT_CONTINUE verdict on original skb so we must not
+	 * clobber the jumpstack.
+	 *
+	 * For recursion via REJECT or SYNPROXY the stack will be clobbered
+	 * but it is no problem since absolute verdict is issued by these.
+	 */
+	if (static_key_false(&xt_tee_enabled))
+		jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
 
 	e = get_entry(table_base, private->hook_entry[hook]);
 
@@ -406,20 +415,16 @@
 					verdict = (unsigned int)(-v) - 1;
 					break;
 				}
-				if (*stackptr <= origptr)
+				if (stackidx == 0)
 					e = get_entry(table_base,
 					    private->underflow[hook]);
 				else
-					e = ip6t_next_entry(jumpstack[--*stackptr]);
+					e = ip6t_next_entry(jumpstack[--stackidx]);
 				continue;
 			}
 			if (table_base + v != ip6t_next_entry(e) &&
 			    !(e->ipv6.flags & IP6T_F_GOTO)) {
-				if (*stackptr >= private->stacksize) {
-					verdict = NF_DROP;
-					break;
-				}
-				jumpstack[(*stackptr)++] = e;
+				jumpstack[stackidx++] = e;
 			}
 
 			e = get_entry(table_base, v);
@@ -437,8 +442,6 @@
 			break;
 	} while (!acpar.hotdrop);
 
-	*stackptr = origptr;
-
  	xt_write_recseq_end(addend);
  	local_bh_enable();
 
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 12331ef..0ed841a 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -35,14 +35,12 @@
 MODULE_DESCRIPTION("Xtables: packet \"rejection\" target for IPv6");
 MODULE_LICENSE("GPL");
 
-
 static unsigned int
 reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct ip6t_reject_info *reject = par->targinfo;
 	struct net *net = dev_net((par->in != NULL) ? par->in : par->out);
 
-	pr_debug("%s: medium point\n", __func__);
 	switch (reject->with) {
 	case IP6T_ICMP6_NO_ROUTE:
 		nf_send_unreach6(net, skb, ICMPV6_NOROUTE, par->hooknum);
@@ -65,8 +63,11 @@
 	case IP6T_TCP_RESET:
 		nf_send_reset6(net, skb, par->hooknum);
 		break;
-	default:
-		net_info_ratelimited("case %u not handled yet\n", reject->with);
+	case IP6T_ICMP6_POLICY_FAIL:
+		nf_send_unreach6(net, skb, ICMPV6_POLICY_FAIL, par->hooknum);
+		break;
+	case IP6T_ICMP6_REJECT_ROUTE:
+		nf_send_unreach6(net, skb, ICMPV6_REJECT_ROUTE, par->hooknum);
 		break;
 	}
 
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index ebbb754..1e4bf99 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -237,7 +237,7 @@
 	nth->ack_seq	= th->ack_seq;
 	tcp_flag_word(nth) = TCP_FLAG_ACK;
 	nth->doff	= tcp_hdr_size / 4;
-	nth->window	= ntohs(htons(th->window) >> opts->wscale);
+	nth->window	= htons(ntohs(th->window) >> opts->wscale);
 	nth->check	= 0;
 	nth->urg_ptr	= 0;
 
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 4ba0c34..7302900 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -251,7 +251,7 @@
 	if (*len < 0 || (unsigned int) *len < sizeof(sin6))
 		return -EINVAL;
 
-	h = nf_conntrack_find_get(sock_net(sk), NF_CT_DEFAULT_ZONE, &tuple);
+	h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
 	if (!h) {
 		pr_debug("IP6T_SO_ORIGINAL_DST: Can't find %pI6c/%u-%pI6c/%u.\n",
 			 &tuple.src.u3.ip6, ntohs(tuple.src.u.tcp.port),
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 90388d6..0e6fae1 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -150,7 +150,7 @@
 	struct nf_conntrack_tuple intuple, origtuple;
 	const struct nf_conntrack_tuple_hash *h;
 	const struct nf_conntrack_l4proto *inproto;
-	u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+	struct nf_conntrack_zone tmp;
 
 	NF_CT_ASSERT(skb->nfct == NULL);
 
@@ -177,7 +177,8 @@
 
 	*ctinfo = IP_CT_RELATED;
 
-	h = nf_conntrack_find_get(net, zone, &intuple);
+	h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp),
+				  &intuple);
 	if (!h) {
 		pr_debug("icmpv6_error: no match\n");
 		return -NF_ACCEPT;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6d02498..701cd2b 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -633,6 +633,7 @@
 	kfree_skb(clone);
 	return skb;
 }
+EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
 
 void nf_ct_frag6_consume_orig(struct sk_buff *skb)
 {
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index a45db0b..6d9c0b3 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -33,23 +33,22 @@
 static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
 						struct sk_buff *skb)
 {
-	u16 zone = NF_CT_DEFAULT_ZONE;
-
+	u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-	if (skb->nfct)
-		zone = nf_ct_zone((struct nf_conn *)skb->nfct);
-#endif
+	if (skb->nfct) {
+		enum ip_conntrack_info ctinfo;
+		const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-	if (skb->nf_bridge &&
-	    skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
-		return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
+		zone_id = nf_ct_zone_id(nf_ct_zone(ct), CTINFO2DIR(ctinfo));
+	}
 #endif
+	if (nf_bridge_in_prerouting(skb))
+		return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id;
+
 	if (hooknum == NF_INET_PRE_ROUTING)
-		return IP6_DEFRAG_CONNTRACK_IN + zone;
+		return IP6_DEFRAG_CONNTRACK_IN + zone_id;
 	else
-		return IP6_DEFRAG_CONNTRACK_OUT + zone;
-
+		return IP6_DEFRAG_CONNTRACK_OUT + zone_id;
 }
 
 static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
new file mode 100644
index 0000000..c8ab626
--- /dev/null
+++ b/net/ipv6/netfilter/nf_dup_ipv6.c
@@ -0,0 +1,97 @@
+/*
+ * (C) 2007 by Sebastian Claßen <sebastian.classen@freenet.ag>
+ * (C) 2007-2010 by Jan Engelhardt <jengelh@medozas.de>
+ *
+ * Extracted from xt_TEE.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 or later, as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+static struct net *pick_net(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+	const struct dst_entry *dst;
+
+	if (skb->dev != NULL)
+		return dev_net(skb->dev);
+	dst = skb_dst(skb);
+	if (dst != NULL && dst->dev != NULL)
+		return dev_net(dst->dev);
+#endif
+	return &init_net;
+}
+
+static bool nf_dup_ipv6_route(struct sk_buff *skb, const struct in6_addr *gw,
+			      int oif)
+{
+	const struct ipv6hdr *iph = ipv6_hdr(skb);
+	struct net *net = pick_net(skb);
+	struct dst_entry *dst;
+	struct flowi6 fl6;
+
+	memset(&fl6, 0, sizeof(fl6));
+	if (oif != -1)
+		fl6.flowi6_oif = oif;
+
+	fl6.daddr = *gw;
+	fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
+			(iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
+	dst = ip6_route_output(net, NULL, &fl6);
+	if (dst->error) {
+		dst_release(dst);
+		return false;
+	}
+	skb_dst_drop(skb);
+	skb_dst_set(skb, dst);
+	skb->dev      = dst->dev;
+	skb->protocol = htons(ETH_P_IPV6);
+
+	return true;
+}
+
+void nf_dup_ipv6(struct sk_buff *skb, unsigned int hooknum,
+		 const struct in6_addr *gw, int oif)
+{
+	if (this_cpu_read(nf_skb_duplicated))
+		return;
+	skb = pskb_copy(skb, GFP_ATOMIC);
+	if (skb == NULL)
+		return;
+
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+	nf_conntrack_put(skb->nfct);
+	skb->nfct     = &nf_ct_untracked_get()->ct_general;
+	skb->nfctinfo = IP_CT_NEW;
+	nf_conntrack_get(skb->nfct);
+#endif
+	if (hooknum == NF_INET_PRE_ROUTING ||
+	    hooknum == NF_INET_LOCAL_IN) {
+		struct ipv6hdr *iph = ipv6_hdr(skb);
+		--iph->hop_limit;
+	}
+	if (nf_dup_ipv6_route(skb, gw, oif)) {
+		__this_cpu_write(nf_skb_duplicated, true);
+		ip6_local_out(skb);
+		__this_cpu_write(nf_skb_duplicated, false);
+	} else {
+		kfree_skb(skb);
+	}
+}
+EXPORT_SYMBOL_GPL(nf_dup_ipv6);
+
+MODULE_AUTHOR("Sebastian Claßen <sebastian.classen@freenet.ag>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_DESCRIPTION("nf_dup_ipv6: IPv6 packet duplication");
+MODULE_LICENSE("GPL");
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index e76900e..70fbaed 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -124,7 +124,7 @@
 		newip = &t->dst.u3.in6;
 	}
 	inet_proto_csum_replace16(check, skb, oldip->s6_addr32,
-				  newip->s6_addr32, 1);
+				  newip->s6_addr32, true);
 }
 
 static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
@@ -155,7 +155,7 @@
 		}
 	} else
 		inet_proto_csum_replace2(check, skb,
-					 htons(oldlen), htons(datalen), 1);
+					 htons(oldlen), htons(datalen), true);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
index 2205e8e..57593b00 100644
--- a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
@@ -73,7 +73,7 @@
 	    hdr->icmp6_type == ICMPV6_ECHO_REPLY) {
 		inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
 					 hdr->icmp6_identifier,
-					 tuple->src.u.icmp.id, 0);
+					 tuple->src.u.icmp.id, false);
 		hdr->icmp6_identifier = tuple->src.u.icmp.id;
 	}
 	return true;
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
new file mode 100644
index 0000000..0eaa4f6
--- /dev/null
+++ b/net/ipv6/netfilter/nft_dup_ipv6.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2015 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
+
+struct nft_dup_ipv6 {
+	enum nft_registers	sreg_addr:8;
+	enum nft_registers	sreg_dev:8;
+};
+
+static void nft_dup_ipv6_eval(const struct nft_expr *expr,
+			      struct nft_regs *regs,
+			      const struct nft_pktinfo *pkt)
+{
+	struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+	struct in6_addr *gw = (struct in6_addr *)&regs->data[priv->sreg_addr];
+	int oif = regs->data[priv->sreg_dev];
+
+	nf_dup_ipv6(pkt->skb, pkt->ops->hooknum, gw, oif);
+}
+
+static int nft_dup_ipv6_init(const struct nft_ctx *ctx,
+			     const struct nft_expr *expr,
+			     const struct nlattr * const tb[])
+{
+	struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+	int err;
+
+	if (tb[NFTA_DUP_SREG_ADDR] == NULL)
+		return -EINVAL;
+
+	priv->sreg_addr = nft_parse_register(tb[NFTA_DUP_SREG_ADDR]);
+	err = nft_validate_register_load(priv->sreg_addr, sizeof(struct in6_addr));
+	if (err < 0)
+		return err;
+
+	if (tb[NFTA_DUP_SREG_DEV] != NULL) {
+		priv->sreg_dev = nft_parse_register(tb[NFTA_DUP_SREG_DEV]);
+		return nft_validate_register_load(priv->sreg_dev, sizeof(int));
+	}
+	return 0;
+}
+
+static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	struct nft_dup_ipv6 *priv = nft_expr_priv(expr);
+
+	if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) ||
+	    nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static struct nft_expr_type nft_dup_ipv6_type;
+static const struct nft_expr_ops nft_dup_ipv6_ops = {
+	.type		= &nft_dup_ipv6_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_dup_ipv6)),
+	.eval		= nft_dup_ipv6_eval,
+	.init		= nft_dup_ipv6_init,
+	.dump		= nft_dup_ipv6_dump,
+};
+
+static const struct nla_policy nft_dup_ipv6_policy[NFTA_DUP_MAX + 1] = {
+	[NFTA_DUP_SREG_ADDR]	= { .type = NLA_U32 },
+	[NFTA_DUP_SREG_DEV]	= { .type = NLA_U32 },
+};
+
+static struct nft_expr_type nft_dup_ipv6_type __read_mostly = {
+	.family		= NFPROTO_IPV6,
+	.name		= "dup",
+	.ops		= &nft_dup_ipv6_ops,
+	.policy		= nft_dup_ipv6_policy,
+	.maxattr	= NFTA_DUP_MAX,
+	.owner		= THIS_MODULE,
+};
+
+static int __init nft_dup_ipv6_module_init(void)
+{
+	return nft_register_expr(&nft_dup_ipv6_type);
+}
+
+static void __exit nft_dup_ipv6_module_exit(void)
+{
+	nft_unregister_expr(&nft_dup_ipv6_type);
+}
+
+module_init(nft_dup_ipv6_module_init);
+module_exit(nft_dup_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "dup");
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ca4700c..fdbada156 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -295,7 +295,8 @@
 		 * unspecified and mapped address have a v4 equivalent.
 		 */
 		v4addr = LOOPBACK4_IPV6;
-		if (!(addr_type & IPV6_ADDR_MULTICAST))	{
+		if (!(addr_type & IPV6_ADDR_MULTICAST) &&
+		    !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
 			err = -EADDRNOTAVAIL;
 			if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
 					   dev, 0)) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d155864..f45cac6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -54,10 +54,13 @@
 #include <net/tcp.h>
 #include <linux/rtnetlink.h>
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 #include <net/xfrm.h>
 #include <net/netevent.h>
 #include <net/netlink.h>
 #include <net/nexthop.h>
+#include <net/lwtunnel.h>
+#include <net/ip_tunnels.h>
 
 #include <asm/uaccess.h>
 
@@ -535,13 +538,14 @@
 		container_of(w, struct __rt6_probe_work, work);
 
 	addrconf_addr_solict_mult(&work->target, &mcaddr);
-	ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
+	ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
 	dev_put(work->dev);
 	kfree(work);
 }
 
 static void rt6_probe(struct rt6_info *rt)
 {
+	struct __rt6_probe_work *work;
 	struct neighbour *neigh;
 	/*
 	 * Okay, this does not seem to be appropriate
@@ -556,34 +560,33 @@
 	rcu_read_lock_bh();
 	neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
 	if (neigh) {
-		write_lock(&neigh->lock);
 		if (neigh->nud_state & NUD_VALID)
 			goto out;
-	}
 
-	if (!neigh ||
-	    time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
-		struct __rt6_probe_work *work;
-
-		work = kmalloc(sizeof(*work), GFP_ATOMIC);
-
-		if (neigh && work)
-			__neigh_set_probe_once(neigh);
-
-		if (neigh)
-			write_unlock(&neigh->lock);
-
-		if (work) {
-			INIT_WORK(&work->work, rt6_probe_deferred);
-			work->target = rt->rt6i_gateway;
-			dev_hold(rt->dst.dev);
-			work->dev = rt->dst.dev;
-			schedule_work(&work->work);
+		work = NULL;
+		write_lock(&neigh->lock);
+		if (!(neigh->nud_state & NUD_VALID) &&
+		    time_after(jiffies,
+			       neigh->updated +
+			       rt->rt6i_idev->cnf.rtr_probe_interval)) {
+			work = kmalloc(sizeof(*work), GFP_ATOMIC);
+			if (work)
+				__neigh_set_probe_once(neigh);
 		}
-	} else {
-out:
 		write_unlock(&neigh->lock);
+	} else {
+		work = kmalloc(sizeof(*work), GFP_ATOMIC);
 	}
+
+	if (work) {
+		INIT_WORK(&work->work, rt6_probe_deferred);
+		work->target = rt->rt6i_gateway;
+		dev_hold(rt->dst.dev);
+		work->dev = rt->dst.dev;
+		schedule_work(&work->work);
+	}
+
+out:
 	rcu_read_unlock_bh();
 }
 #else
@@ -662,6 +665,12 @@
 {
 	int m;
 	bool match_do_rr = false;
+	struct inet6_dev *idev = rt->rt6i_idev;
+	struct net_device *dev = rt->dst.dev;
+
+	if (dev && !netif_carrier_ok(dev) &&
+	    idev->cnf.ignore_routes_with_linkdown)
+		goto out;
 
 	if (rt6_check_expired(rt))
 		goto out;
@@ -1154,6 +1163,7 @@
 	const struct ipv6hdr *iph = ipv6_hdr(skb);
 	struct net *net = dev_net(skb->dev);
 	int flags = RT6_LOOKUP_F_HAS_SADDR;
+	struct ip_tunnel_info *tun_info;
 	struct flowi6 fl6 = {
 		.flowi6_iif = skb->dev->ifindex,
 		.daddr = iph->daddr,
@@ -1163,6 +1173,10 @@
 		.flowi6_proto = iph->nexthdr,
 	};
 
+	tun_info = skb_tunnel_info(skb);
+	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
+		fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
+	skb_dst_drop(skb);
 	skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
 }
 
@@ -1684,6 +1698,7 @@
 static int ip6_convert_metrics(struct mx6_config *mxc,
 			       const struct fib6_config *cfg)
 {
+	bool ecn_ca = false;
 	struct nlattr *nla;
 	int remaining;
 	u32 *mp;
@@ -1697,30 +1712,36 @@
 
 	nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
 		int type = nla_type(nla);
+		u32 val;
 
-		if (type) {
-			u32 val;
+		if (!type)
+			continue;
+		if (unlikely(type > RTAX_MAX))
+			goto err;
 
-			if (unlikely(type > RTAX_MAX))
+		if (type == RTAX_CC_ALGO) {
+			char tmp[TCP_CA_NAME_MAX];
+
+			nla_strlcpy(tmp, nla, sizeof(tmp));
+			val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
+			if (val == TCP_CA_UNSPEC)
 				goto err;
-			if (type == RTAX_CC_ALGO) {
-				char tmp[TCP_CA_NAME_MAX];
-
-				nla_strlcpy(tmp, nla, sizeof(tmp));
-				val = tcp_ca_get_key_by_name(tmp);
-				if (val == TCP_CA_UNSPEC)
-					goto err;
-			} else {
-				val = nla_get_u32(nla);
-			}
-
-			mp[type - 1] = val;
-			__set_bit(type - 1, mxc->mx_valid);
+		} else {
+			val = nla_get_u32(nla);
 		}
+		if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
+			goto err;
+
+		mp[type - 1] = val;
+		__set_bit(type - 1, mxc->mx_valid);
+	}
+
+	if (ecn_ca) {
+		__set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
+		mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
 	}
 
 	mxc->mx = mp;
-
 	return 0;
  err:
 	kfree(mp);
@@ -1801,6 +1822,25 @@
 
 	rt->dst.output = ip6_output;
 
+	if (cfg->fc_encap) {
+		struct lwtunnel_state *lwtstate;
+
+		err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+					   cfg->fc_encap, AF_INET6, cfg,
+					   &lwtstate);
+		if (err)
+			goto out;
+		rt->dst.lwtstate = lwtstate_get(lwtstate);
+		if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
+			rt->dst.lwtstate->orig_output = rt->dst.output;
+			rt->dst.output = lwtunnel_output;
+		}
+		if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
+			rt->dst.lwtstate->orig_input = rt->dst.input;
+			rt->dst.input = lwtunnel_input;
+		}
+	}
+
 	ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
 	rt->rt6i_dst.plen = cfg->fc_dst_len;
 	if (rt->rt6i_dst.plen == 128)
@@ -2180,6 +2220,7 @@
 #endif
 	rt->rt6i_prefsrc = ort->rt6i_prefsrc;
 	rt->rt6i_table = ort->rt6i_table;
+	rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
@@ -2628,6 +2669,8 @@
 	[RTA_METRICS]           = { .type = NLA_NESTED },
 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
 	[RTA_PREF]              = { .type = NLA_U8 },
+	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
+	[RTA_ENCAP]		= { .type = NLA_NESTED },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2722,6 +2765,12 @@
 		cfg->fc_flags |= RTF_PREF(pref);
 	}
 
+	if (tb[RTA_ENCAP])
+		cfg->fc_encap = tb[RTA_ENCAP];
+
+	if (tb[RTA_ENCAP_TYPE])
+		cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
+
 	err = 0;
 errout:
 	return err;
@@ -2754,6 +2803,10 @@
 				r_cfg.fc_gateway = nla_get_in6_addr(nla);
 				r_cfg.fc_flags |= RTF_GATEWAY;
 			}
+			r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
+			nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
+			if (nla)
+				r_cfg.fc_encap_type = nla_get_u16(nla);
 		}
 		err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
 		if (err) {
@@ -2816,7 +2869,7 @@
 		return ip6_route_add(&cfg);
 }
 
-static inline size_t rt6_nlmsg_size(void)
+static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
 {
 	return NLMSG_ALIGN(sizeof(struct rtmsg))
 	       + nla_total_size(16) /* RTA_SRC */
@@ -2830,7 +2883,8 @@
 	       + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
 	       + nla_total_size(sizeof(struct rta_cacheinfo))
 	       + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
-	       + nla_total_size(1); /* RTA_PREF */
+	       + nla_total_size(1) /* RTA_PREF */
+	       + lwtunnel_get_encap_size(rt->dst.lwtstate);
 }
 
 static int rt6_fill_node(struct net *net,
@@ -2891,6 +2945,11 @@
 	else
 		rtm->rtm_type = RTN_UNICAST;
 	rtm->rtm_flags = 0;
+	if (!netif_carrier_ok(rt->dst.dev)) {
+		rtm->rtm_flags |= RTNH_F_LINKDOWN;
+		if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
+			rtm->rtm_flags |= RTNH_F_DEAD;
+	}
 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
 	rtm->rtm_protocol = rt->rt6i_protocol;
 	if (rt->rt6i_flags & RTF_DYNAMIC)
@@ -2978,6 +3037,8 @@
 	if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
 		goto nla_put_failure;
 
+	lwtunnel_fill_encap(skb, rt->dst.lwtstate);
+
 	nlmsg_end(skb, nlh);
 	return 0;
 
@@ -3104,7 +3165,7 @@
 	err = -ENOBUFS;
 	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
 
-	skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
+	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
 	if (!skb)
 		goto errout;
 
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index ac35a28..94428fd 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -742,7 +742,7 @@
 			goto drop;
 		if (iptunnel_pull_header(skb, 0, tpi.proto))
 			goto drop;
-		return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
+		return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error);
 	}
 
 	return 1;
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 4e705ad..45243bb 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -17,6 +17,9 @@
 #include <net/inet_frag.h>
 
 static int one = 1;
+static int auto_flowlabels_min;
+static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX;
+
 
 static struct ctl_table ipv6_table_template[] = {
 	{
@@ -45,7 +48,9 @@
 		.data		= &init_net.ipv6.sysctl.auto_flowlabels,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &auto_flowlabels_min,
+		.extra2		= &auto_flowlabels_max
 	},
 	{
 		.procname	= "fwmark_reflect",
@@ -75,6 +80,13 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec
 	},
+	{
+		.procname	= "ip_nonlocal_bind",
+		.data		= &init_net.ipv6.sysctl.ip_nonlocal_bind,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec
+	},
 	{ }
 };
 
@@ -117,6 +129,7 @@
 	ipv6_table[5].data = &net->ipv6.sysctl.idgen_retries;
 	ipv6_table[6].data = &net->ipv6.sysctl.idgen_delay;
 	ipv6_table[7].data = &net->ipv6.sysctl.flowlabel_state_ranges;
+	ipv6_table[8].data = &net->ipv6.sysctl.ip_nonlocal_bind;
 
 	ipv6_route_table = ipv6_route_sysctl_init(net);
 	if (!ipv6_route_table)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7a6cea5..97d9314 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -276,7 +276,7 @@
 	if (err)
 		goto late_failure;
 
-	ip6_set_txhash(sk);
+	sk_set_txhash(sk);
 
 	if (!tp->write_seq && likely(!tp->repair))
 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
@@ -1090,7 +1090,7 @@
 	newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
 	newsk->sk_bound_dev_if = ireq->ir_iif;
 
-	ip6_set_txhash(newsk);
+	sk_set_txhash(newsk);
 
 	/* Now IPv6 options...
 
@@ -1481,8 +1481,7 @@
 					    ntohs(th->dest), tcp_v6_iif(skb));
 		if (sk2) {
 			struct inet_timewait_sock *tw = inet_twsk(sk);
-			inet_twsk_deschedule(tw);
-			inet_twsk_put(tw);
+			inet_twsk_deschedule_put(tw);
 			sk = sk2;
 			tcp_v6_restore_cb(skb);
 			goto process;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e51fc3e..0aba654 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1496,7 +1496,8 @@
 	return udp_proc_register(net, &udp6_seq_afinfo);
 }
 
-void udp6_proc_exit(struct net *net) {
+void udp6_proc_exit(struct net *net)
+{
 	udp_proc_unregister(net, &udp6_seq_afinfo);
 }
 #endif /* CONFIG_PROC_FS */
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 901ef6f..f7fbdba 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -20,10 +20,9 @@
 
 static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
 {
-	const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
 	struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
 
-	if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
+	if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
 		IP6_ECN_set_ce(inner_iph);
 }
 
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ed0583c..30caa28 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -20,13 +20,14 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
+#include <net/vrf.h>
 #if IS_ENABLED(CONFIG_IPV6_MIP6)
 #include <net/mip6.h>
 #endif
 
 static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
 
-static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
+static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
 					  const xfrm_address_t *saddr,
 					  const xfrm_address_t *daddr)
 {
@@ -35,6 +36,7 @@
 	int err;
 
 	memset(&fl6, 0, sizeof(fl6));
+	fl6.flowi6_oif = oif;
 	memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
 	if (saddr)
 		memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
@@ -50,13 +52,13 @@
 	return dst;
 }
 
-static int xfrm6_get_saddr(struct net *net,
+static int xfrm6_get_saddr(struct net *net, int oif,
 			   xfrm_address_t *saddr, xfrm_address_t *daddr)
 {
 	struct dst_entry *dst;
 	struct net_device *dev;
 
-	dst = xfrm6_dst_lookup(net, 0, NULL, daddr);
+	dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr);
 	if (IS_ERR(dst))
 		return -EHOSTUNREACH;
 
@@ -130,8 +132,10 @@
 
 	nexthdr = nh[nhoff];
 
-	if (skb_dst(skb))
-		oif = skb_dst(skb)->dev->ifindex;
+	if (skb_dst(skb)) {
+		oif = vrf_master_ifindex(skb_dst(skb)->dev) ?
+			: skb_dst(skb)->dev->ifindex;
+	}
 
 	memset(fl6, 0, sizeof(struct flowi6));
 	fl6->flowi6_mark = skb->mark;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index b397f0a..83a7068 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -219,7 +219,7 @@
 #define BROADCAST_ONE		1
 #define BROADCAST_REGISTERED	2
 #define BROADCAST_PROMISC_ONLY	4
-static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
+static int pfkey_broadcast(struct sk_buff *skb,
 			   int broadcast_flags, struct sock *one_sk,
 			   struct net *net)
 {
@@ -244,7 +244,7 @@
 		 * socket.
 		 */
 		if (pfk->promisc)
-			pfkey_broadcast_one(skb, &skb2, allocation, sk);
+			pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
 
 		/* the exact target will be processed later */
 		if (sk == one_sk)
@@ -259,7 +259,7 @@
 				continue;
 		}
 
-		err2 = pfkey_broadcast_one(skb, &skb2, allocation, sk);
+		err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);
 
 		/* Error is cleare after succecful sending to at least one
 		 * registered KM */
@@ -269,7 +269,7 @@
 	rcu_read_unlock();
 
 	if (one_sk != NULL)
-		err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
+		err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
 
 	kfree_skb(skb2);
 	kfree_skb(skb);
@@ -292,7 +292,7 @@
 		hdr = (struct sadb_msg *) pfk->dump.skb->data;
 		hdr->sadb_msg_seq = 0;
 		hdr->sadb_msg_errno = rc;
-		pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+		pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
 				&pfk->sk, sock_net(&pfk->sk));
 		pfk->dump.skb = NULL;
 	}
@@ -333,7 +333,7 @@
 	hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
 			     sizeof(uint64_t));
 
-	pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
+	pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
 
 	return 0;
 }
@@ -1365,7 +1365,7 @@
 
 	xfrm_state_put(x);
 
-	pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
+	pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
 
 	return 0;
 }
@@ -1452,7 +1452,7 @@
 	hdr->sadb_msg_seq = c->seq;
 	hdr->sadb_msg_pid = c->portid;
 
-	pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
+	pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
 
 	return 0;
 }
@@ -1565,7 +1565,7 @@
 	out_hdr->sadb_msg_reserved = 0;
 	out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
 	out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
-	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
+	pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
 
 	return 0;
 }
@@ -1670,7 +1670,7 @@
 		return -ENOBUFS;
 	}
 
-	pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk, sock_net(sk));
+	pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
 
 	return 0;
 }
@@ -1689,7 +1689,7 @@
 	hdr->sadb_msg_errno = (uint8_t) 0;
 	hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
 
-	return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
+	return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
 }
 
 static int key_notify_sa_flush(const struct km_event *c)
@@ -1710,7 +1710,7 @@
 	hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
 	hdr->sadb_msg_reserved = 0;
 
-	pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+	pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
 
 	return 0;
 }
@@ -1767,7 +1767,7 @@
 	out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
 
 	if (pfk->dump.skb)
-		pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+		pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
 				&pfk->sk, sock_net(&pfk->sk));
 	pfk->dump.skb = out_skb;
 
@@ -1847,7 +1847,7 @@
 		new_hdr->sadb_msg_errno = 0;
 	}
 
-	pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
+	pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
 	return 0;
 }
 
@@ -2181,7 +2181,7 @@
 	out_hdr->sadb_msg_errno = 0;
 	out_hdr->sadb_msg_seq = c->seq;
 	out_hdr->sadb_msg_pid = c->portid;
-	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
+	pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
 	return 0;
 
 }
@@ -2401,7 +2401,7 @@
 	out_hdr->sadb_msg_errno = 0;
 	out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
 	out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
-	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
+	pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
 	err = 0;
 
 out:
@@ -2655,7 +2655,7 @@
 	out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
 
 	if (pfk->dump.skb)
-		pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+		pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
 				&pfk->sk, sock_net(&pfk->sk));
 	pfk->dump.skb = out_skb;
 
@@ -2708,7 +2708,7 @@
 	hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
 	hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
 	hdr->sadb_msg_reserved = 0;
-	pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+	pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
 	return 0;
 
 }
@@ -2770,7 +2770,7 @@
 	void *ext_hdrs[SADB_EXT_MAX];
 	int err;
 
-	pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
+	pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
 			BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
 
 	memset(ext_hdrs, 0, sizeof(ext_hdrs));
@@ -2992,7 +2992,7 @@
 	out_hdr->sadb_msg_seq = 0;
 	out_hdr->sadb_msg_pid = 0;
 
-	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
+	pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
 	return 0;
 }
 
@@ -3182,7 +3182,7 @@
 		       xfrm_ctx->ctx_len);
 	}
 
-	return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
+	return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
 }
 
 static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
@@ -3380,7 +3380,7 @@
 	n_port->sadb_x_nat_t_port_port = sport;
 	n_port->sadb_x_nat_t_port_reserved = 0;
 
-	return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
+	return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
 }
 
 #ifdef CONFIG_NET_KEY_MIGRATE
@@ -3572,7 +3572,7 @@
 	}
 
 	/* broadcast migrate message to sockets */
-	pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
+	pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
 
 	return 0;
 
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 086de49..3891cbd 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -7,7 +7,6 @@
 	select CRYPTO_CCM
 	select CRYPTO_GCM
 	select CRC32
-	select AVERAGE
 	---help---
 	  This option enables the hardware independent IEEE 802.11
 	  networking stack.
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 3275f01..783e891 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -3,6 +3,7 @@
 # mac80211 objects
 mac80211-y := \
 	main.o status.o \
+	driver-ops.o \
 	sta_info.o \
 	wep.o \
 	wpa.o \
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index 4192806..bdf0790 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -145,20 +145,3 @@
 {
 	crypto_free_cipher(tfm);
 }
-
-void ieee80211_aes_cmac_calculate_k1_k2(struct ieee80211_key_conf *keyconf,
-					u8 *k1, u8 *k2)
-{
-	u8 l[AES_BLOCK_SIZE] = {};
-	struct ieee80211_key *key =
-		container_of(keyconf, struct ieee80211_key, conf);
-
-	crypto_cipher_encrypt_one(key->u.aes_cmac.tfm, l, l);
-
-	memcpy(k1, l, AES_BLOCK_SIZE);
-	gf_mulx(k1);
-
-	memcpy(k2, k1, AES_BLOCK_SIZE);
-	gf_mulx(k2);
-}
-EXPORT_SYMBOL(ieee80211_aes_cmac_calculate_k1_k2);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index bf7023f..685ec13 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1019,6 +1019,65 @@
 	return 0;
 }
 
+static void sta_apply_mesh_params(struct ieee80211_local *local,
+				  struct sta_info *sta,
+				  struct station_parameters *params)
+{
+#ifdef CONFIG_MAC80211_MESH
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	u32 changed = 0;
+
+	if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
+		switch (params->plink_state) {
+		case NL80211_PLINK_ESTAB:
+			if (sta->mesh->plink_state != NL80211_PLINK_ESTAB)
+				changed = mesh_plink_inc_estab_count(sdata);
+			sta->mesh->plink_state = params->plink_state;
+
+			ieee80211_mps_sta_status_update(sta);
+			changed |= ieee80211_mps_set_sta_local_pm(sta,
+				      sdata->u.mesh.mshcfg.power_mode);
+			break;
+		case NL80211_PLINK_LISTEN:
+		case NL80211_PLINK_BLOCKED:
+		case NL80211_PLINK_OPN_SNT:
+		case NL80211_PLINK_OPN_RCVD:
+		case NL80211_PLINK_CNF_RCVD:
+		case NL80211_PLINK_HOLDING:
+			if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+				changed = mesh_plink_dec_estab_count(sdata);
+			sta->mesh->plink_state = params->plink_state;
+
+			ieee80211_mps_sta_status_update(sta);
+			changed |= ieee80211_mps_set_sta_local_pm(sta,
+					NL80211_MESH_POWER_UNKNOWN);
+			break;
+		default:
+			/*  nothing  */
+			break;
+		}
+	}
+
+	switch (params->plink_action) {
+	case NL80211_PLINK_ACTION_NO_ACTION:
+		/* nothing */
+		break;
+	case NL80211_PLINK_ACTION_OPEN:
+		changed |= mesh_plink_open(sta);
+		break;
+	case NL80211_PLINK_ACTION_BLOCK:
+		changed |= mesh_plink_block(sta);
+		break;
+	}
+
+	if (params->local_pm)
+		changed |= ieee80211_mps_set_sta_local_pm(sta,
+							  params->local_pm);
+
+	ieee80211_mbss_info_change_notify(sdata, changed);
+#endif
+}
+
 static int sta_apply_parameters(struct ieee80211_local *local,
 				struct sta_info *sta,
 				struct station_parameters *params)
@@ -1076,7 +1135,6 @@
 	}
 
 	if (mask & BIT(NL80211_STA_FLAG_MFP)) {
-		sta->sta.mfp = !!(set & BIT(NL80211_STA_FLAG_MFP));
 		if (set & BIT(NL80211_STA_FLAG_MFP))
 			set_sta_flag(sta, WLAN_STA_MFP);
 		else
@@ -1097,6 +1155,12 @@
 	    params->ext_capab[3] & WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)
 		set_sta_flag(sta, WLAN_STA_TDLS_CHAN_SWITCH);
 
+	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
+	    ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) &&
+	    params->ext_capab_len >= 8 &&
+	    params->ext_capab[7] & WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED)
+		set_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW);
+
 	if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD) {
 		sta->sta.uapsd_queues = params->uapsd_queues;
 		sta->sta.max_sp = params->max_sp;
@@ -1144,62 +1208,8 @@
 					      band, false);
 	}
 
-	if (ieee80211_vif_is_mesh(&sdata->vif)) {
-#ifdef CONFIG_MAC80211_MESH
-		u32 changed = 0;
-
-		if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
-			switch (params->plink_state) {
-			case NL80211_PLINK_ESTAB:
-				if (sta->plink_state != NL80211_PLINK_ESTAB)
-					changed = mesh_plink_inc_estab_count(
-							sdata);
-				sta->plink_state = params->plink_state;
-
-				ieee80211_mps_sta_status_update(sta);
-				changed |= ieee80211_mps_set_sta_local_pm(sta,
-					      sdata->u.mesh.mshcfg.power_mode);
-				break;
-			case NL80211_PLINK_LISTEN:
-			case NL80211_PLINK_BLOCKED:
-			case NL80211_PLINK_OPN_SNT:
-			case NL80211_PLINK_OPN_RCVD:
-			case NL80211_PLINK_CNF_RCVD:
-			case NL80211_PLINK_HOLDING:
-				if (sta->plink_state == NL80211_PLINK_ESTAB)
-					changed = mesh_plink_dec_estab_count(
-							sdata);
-				sta->plink_state = params->plink_state;
-
-				ieee80211_mps_sta_status_update(sta);
-				changed |= ieee80211_mps_set_sta_local_pm(sta,
-						NL80211_MESH_POWER_UNKNOWN);
-				break;
-			default:
-				/*  nothing  */
-				break;
-			}
-		}
-
-		switch (params->plink_action) {
-		case NL80211_PLINK_ACTION_NO_ACTION:
-			/* nothing */
-			break;
-		case NL80211_PLINK_ACTION_OPEN:
-			changed |= mesh_plink_open(sta);
-			break;
-		case NL80211_PLINK_ACTION_BLOCK:
-			changed |= mesh_plink_block(sta);
-			break;
-		}
-
-		if (params->local_pm)
-			changed |=
-			      ieee80211_mps_set_sta_local_pm(sta,
-							     params->local_pm);
-		ieee80211_mbss_info_change_notify(sdata, changed);
-#endif
-	}
+	if (ieee80211_vif_is_mesh(&sdata->vif))
+		sta_apply_mesh_params(local, sta, params);
 
 	/* set the STA state after all sta info from usermode has been set */
 	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
@@ -2358,6 +2368,8 @@
 	const u8 *ap;
 	enum ieee80211_smps_mode old_req;
 	int err;
+	struct sta_info *sta;
+	bool tdls_peer_found = false;
 
 	lockdep_assert_held(&sdata->wdev.mtx);
 
@@ -2382,11 +2394,22 @@
 
 	ap = sdata->u.mgd.associated->bssid;
 
+	rcu_read_lock();
+	list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+		if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+		    !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+			continue;
+
+		tdls_peer_found = true;
+		break;
+	}
+	rcu_read_unlock();
+
 	if (smps_mode == IEEE80211_SMPS_AUTOMATIC) {
-		if (sdata->u.mgd.powersave)
-			smps_mode = IEEE80211_SMPS_DYNAMIC;
-		else
+		if (tdls_peer_found || !sdata->u.mgd.powersave)
 			smps_mode = IEEE80211_SMPS_OFF;
+		else
+			smps_mode = IEEE80211_SMPS_DYNAMIC;
 	}
 
 	/* send SM PS frame to AP */
@@ -2394,6 +2417,8 @@
 					 ap, ap);
 	if (err)
 		sdata->u.mgd.req_smps = old_req;
+	else if (smps_mode != IEEE80211_SMPS_OFF && tdls_peer_found)
+		ieee80211_teardown_tdls_peers(sdata);
 
 	return err;
 }
@@ -2479,16 +2504,26 @@
 		sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
 		memcpy(sdata->rc_rateidx_mcs_mask[i], mask->control[i].ht_mcs,
 		       sizeof(mask->control[i].ht_mcs));
+		memcpy(sdata->rc_rateidx_vht_mcs_mask[i],
+		       mask->control[i].vht_mcs,
+		       sizeof(mask->control[i].vht_mcs));
 
 		sdata->rc_has_mcs_mask[i] = false;
+		sdata->rc_has_vht_mcs_mask[i] = false;
 		if (!sband)
 			continue;
 
-		for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++)
-			if (~sdata->rc_rateidx_mcs_mask[i][j]) {
+		for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
+			if (~sdata->rc_rateidx_mcs_mask[i][j])
 				sdata->rc_has_mcs_mask[i] = true;
+
+			if (~sdata->rc_rateidx_vht_mcs_mask[i][j])
+				sdata->rc_has_vht_mcs_mask[i] = true;
+
+			if (sdata->rc_has_mcs_mask[i] &&
+			    sdata->rc_has_vht_mcs_mask[i])
 				break;
-			}
+		}
 	}
 
 	return 0;
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index f01c18a..1d1b9b7 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -190,7 +190,7 @@
 	return NULL;
 }
 
-static enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
+enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta)
 {
 	switch (sta->bandwidth) {
 	case IEEE80211_STA_RX_BW_20:
@@ -264,9 +264,17 @@
 		case NL80211_IFTYPE_AP_VLAN:
 			width = ieee80211_get_max_required_bw(sdata);
 			break;
+		case NL80211_IFTYPE_STATION:
+			/*
+			 * The ap's sta->bandwidth is not set yet at this
+			 * point, so take the width from the chandef, but
+			 * account also for TDLS peers
+			 */
+			width = max(vif->bss_conf.chandef.width,
+				    ieee80211_get_max_required_bw(sdata));
+			break;
 		case NL80211_IFTYPE_P2P_DEVICE:
 			continue;
-		case NL80211_IFTYPE_STATION:
 		case NL80211_IFTYPE_ADHOC:
 		case NL80211_IFTYPE_WDS:
 		case NL80211_IFTYPE_MESH_POINT:
@@ -554,12 +562,13 @@
 	kfree_rcu(ctx, rcu_head);
 }
 
-static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
-					      struct ieee80211_chanctx *ctx)
+void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
+				       struct ieee80211_chanctx *ctx)
 {
 	struct ieee80211_chanctx_conf *conf = &ctx->conf;
 	struct ieee80211_sub_if_data *sdata;
 	const struct cfg80211_chan_def *compat = NULL;
+	struct sta_info *sta;
 
 	lockdep_assert_held(&local->chanctx_mtx);
 
@@ -581,6 +590,20 @@
 		if (WARN_ON_ONCE(!compat))
 			break;
 	}
+
+	/* TDLS peers can sometimes affect the chandef width */
+	list_for_each_entry_rcu(sta, &local->sta_list, list) {
+		if (!sta->uploaded ||
+		    !test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) ||
+		    !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
+		    !sta->tdls_chandef.chan)
+			continue;
+
+		compat = cfg80211_chandef_compatible(&sta->tdls_chandef,
+						     compat);
+		if (WARN_ON_ONCE(!compat))
+			break;
+	}
 	rcu_read_unlock();
 
 	if (!compat)
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 3ea8b7d..ced6bf3 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -122,6 +122,7 @@
 	FLAG(CHANCTX_STA_CSA),
 	FLAG(SUPPORTS_CLONED_SKBS),
 	FLAG(SINGLE_SCAN_ON_ALL_BANDS),
+	FLAG(TDLS_WIDER_BW),
 
 	/* keep last for the build bug below */
 	(void *)0x1
@@ -277,7 +278,6 @@
 	DEBUGFS_STATS_ADD(rx_handlers_queued);
 	DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc);
 	DEBUGFS_STATS_ADD(rx_handlers_drop_defrag);
-	DEBUGFS_STATS_ADD(rx_handlers_drop_short);
 	DEBUGFS_STATS_ADD(tx_expand_skb_head);
 	DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned);
 	DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index e82bf1e..702ca12 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -57,7 +57,6 @@
 KEY_CONF_FILE(keyidx, D);
 KEY_CONF_FILE(hw_key_idx, D);
 KEY_FILE(flags, X);
-KEY_FILE(tx_rx_count, D);
 KEY_READ(ifindex, sdata->name, "%s\n");
 KEY_OPS(ifindex);
 
@@ -310,7 +309,6 @@
 	DEBUGFS_ADD(flags);
 	DEBUGFS_ADD(keyidx);
 	DEBUGFS_ADD(hw_key_idx);
-	DEBUGFS_ADD(tx_rx_count);
 	DEBUGFS_ADD(algorithm);
 	DEBUGFS_ADD(tx_spec);
 	DEBUGFS_ADD(rx_spec);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index c09c013..1021e87 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -186,6 +186,38 @@
 IEEE80211_IF_FILE(rc_rateidx_mcs_mask_5ghz,
 		  rc_rateidx_mcs_mask[IEEE80211_BAND_5GHZ], HEXARRAY);
 
+static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_2ghz(
+				const struct ieee80211_sub_if_data *sdata,
+				char *buf, int buflen)
+{
+	int i, len = 0;
+	const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_2GHZ];
+
+	for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+		len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
+	len += scnprintf(buf + len, buflen - len, "\n");
+
+	return len;
+}
+
+IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_2ghz);
+
+static ssize_t ieee80211_if_fmt_rc_rateidx_vht_mcs_mask_5ghz(
+				const struct ieee80211_sub_if_data *sdata,
+				char *buf, int buflen)
+{
+	int i, len = 0;
+	const u16 *mask = sdata->rc_rateidx_vht_mcs_mask[IEEE80211_BAND_5GHZ];
+
+	for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+		len += scnprintf(buf + len, buflen - len, "%04x ", mask[i]);
+	len += scnprintf(buf + len, buflen - len, "\n");
+
+	return len;
+}
+
+IEEE80211_IF_FILE_R(rc_rateidx_vht_mcs_mask_5ghz);
+
 IEEE80211_IF_FILE(flags, flags, HEX);
 IEEE80211_IF_FILE(state, state, LHEX);
 IEEE80211_IF_FILE(txpower, vif.bss_conf.txpower, DEC);
@@ -565,6 +597,8 @@
 	DEBUGFS_ADD(rc_rateidx_mask_5ghz);
 	DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
 	DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
+	DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_2ghz);
+	DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
 	DEBUGFS_ADD(hw_queues);
 }
 
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
new file mode 100644
index 0000000..267c3b1
--- /dev/null
+++ b/net/mac80211/driver-ops.c
@@ -0,0 +1,41 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+#include "trace.h"
+#include "driver-ops.h"
+
+__must_check
+int drv_sta_state(struct ieee80211_local *local,
+		  struct ieee80211_sub_if_data *sdata,
+		  struct sta_info *sta,
+		  enum ieee80211_sta_state old_state,
+		  enum ieee80211_sta_state new_state)
+{
+	int ret = 0;
+
+	might_sleep();
+
+	sdata = get_bss_sdata(sdata);
+	if (!check_sdata_in_driver(sdata))
+		return -EIO;
+
+	trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
+	if (local->ops->sta_state) {
+		ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
+					    old_state, new_state);
+	} else if (old_state == IEEE80211_STA_AUTH &&
+		   new_state == IEEE80211_STA_ASSOC) {
+		ret = drv_sta_add(local, sdata, &sta->sta);
+		if (ret == 0)
+			sta->uploaded = true;
+	} else if (old_state == IEEE80211_STA_ASSOC &&
+		   new_state == IEEE80211_STA_AUTH) {
+		drv_sta_remove(local, sdata, &sta->sta);
+	}
+	trace_drv_return_int(local, ret);
+	return ret;
+}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 32a2e70..02d9133 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -573,37 +573,12 @@
 	trace_drv_return_void(local);
 }
 
-static inline __must_check
+__must_check
 int drv_sta_state(struct ieee80211_local *local,
 		  struct ieee80211_sub_if_data *sdata,
 		  struct sta_info *sta,
 		  enum ieee80211_sta_state old_state,
-		  enum ieee80211_sta_state new_state)
-{
-	int ret = 0;
-
-	might_sleep();
-
-	sdata = get_bss_sdata(sdata);
-	if (!check_sdata_in_driver(sdata))
-		return -EIO;
-
-	trace_drv_sta_state(local, sdata, &sta->sta, old_state, new_state);
-	if (local->ops->sta_state) {
-		ret = local->ops->sta_state(&local->hw, &sdata->vif, &sta->sta,
-					    old_state, new_state);
-	} else if (old_state == IEEE80211_STA_AUTH &&
-		   new_state == IEEE80211_STA_ASSOC) {
-		ret = drv_sta_add(local, sdata, &sta->sta);
-		if (ret == 0)
-			sta->uploaded = true;
-	} else if (old_state == IEEE80211_STA_ASSOC &&
-		   new_state == IEEE80211_STA_AUTH) {
-		drv_sta_remove(local, sdata, &sta->sta);
-	}
-	trace_drv_return_int(local, ret);
-	return ret;
-}
+		  enum ieee80211_sta_state new_state);
 
 static inline void drv_sta_rc_update(struct ieee80211_local *local,
 				     struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index b12f615..6e52659 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -84,13 +84,13 @@
 #define IEEE80211_DEAUTH_FRAME_LEN	(24 /* hdr */ + 2 /* reason */)
 
 struct ieee80211_fragment_entry {
-	unsigned long first_frag_time;
-	unsigned int seq;
-	unsigned int rx_queue;
-	unsigned int last_frag;
-	unsigned int extra_len;
 	struct sk_buff_head skb_list;
-	int ccmp; /* Whether fragments were encrypted with CCMP */
+	unsigned long first_frag_time;
+	u16 seq;
+	u16 extra_len;
+	u16 last_frag;
+	u8 rx_queue;
+	bool ccmp; /* Whether fragments were encrypted with CCMP */
 	u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
 };
 
@@ -181,7 +181,6 @@
 
 /**
  * enum ieee80211_packet_rx_flags - packet RX flags
- * @IEEE80211_RX_FRAGMENTED: fragmented frame
  * @IEEE80211_RX_AMSDU: a-MSDU packet
  * @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
  * @IEEE80211_RX_DEFERRED_RELEASE: frame was subjected to receive reordering
@@ -190,7 +189,6 @@
  * @rx_flags field of &struct ieee80211_rx_status.
  */
 enum ieee80211_packet_rx_flags {
-	IEEE80211_RX_FRAGMENTED			= BIT(2),
 	IEEE80211_RX_AMSDU			= BIT(3),
 	IEEE80211_RX_MALFORMED_ACTION_FRM	= BIT(4),
 	IEEE80211_RX_DEFERRED_RELEASE		= BIT(5),
@@ -202,8 +200,6 @@
  * @IEEE80211_RX_CMNTR: received on cooked monitor already
  * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
  *	to cfg80211_report_obss_beacon().
- * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
- *	reorder buffer timeout timer, not the normal RX path
  *
  * These flags are used across handling multiple interfaces
  * for a single frame.
@@ -211,10 +207,10 @@
 enum ieee80211_rx_flags {
 	IEEE80211_RX_CMNTR		= BIT(0),
 	IEEE80211_RX_BEACON_REPORTED	= BIT(1),
-	IEEE80211_RX_REORDER_TIMER	= BIT(2),
 };
 
 struct ieee80211_rx_data {
+	struct napi_struct *napi;
 	struct sk_buff *skb;
 	struct ieee80211_local *local;
 	struct ieee80211_sub_if_data *sdata;
@@ -725,6 +721,7 @@
  *	back to wireless media and to the local net stack.
  * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
  * @IEEE80211_SDATA_IN_DRIVER: indicates interface was added to driver
+ * @IEEE80211_SDATA_MU_MIMO_OWNER: indicates interface owns MU-MIMO capability
  */
 enum ieee80211_sub_if_data_flags {
 	IEEE80211_SDATA_ALLMULTI		= BIT(0),
@@ -732,6 +729,7 @@
 	IEEE80211_SDATA_DONT_BRIDGE_PACKETS	= BIT(3),
 	IEEE80211_SDATA_DISCONNECT_RESUME	= BIT(4),
 	IEEE80211_SDATA_IN_DRIVER		= BIT(5),
+	IEEE80211_SDATA_MU_MIMO_OWNER		= BIT(6),
 };
 
 /**
@@ -903,6 +901,9 @@
 	bool rc_has_mcs_mask[IEEE80211_NUM_BANDS];
 	u8  rc_rateidx_mcs_mask[IEEE80211_NUM_BANDS][IEEE80211_HT_MCS_MASK_LEN];
 
+	bool rc_has_vht_mcs_mask[IEEE80211_NUM_BANDS];
+	u16 rc_rateidx_vht_mcs_mask[IEEE80211_NUM_BANDS][NL80211_VHT_NSS_MAX];
+
 	union {
 		struct ieee80211_if_ap ap;
 		struct ieee80211_if_wds wds;
@@ -1010,7 +1011,6 @@
 	IEEE80211_SDATA_QUEUE_AGG_STOP		= 2,
 	IEEE80211_SDATA_QUEUE_RX_AGG_START	= 3,
 	IEEE80211_SDATA_QUEUE_RX_AGG_STOP	= 4,
-	IEEE80211_SDATA_QUEUE_TDLS_CHSW		= 5,
 };
 
 enum {
@@ -1286,7 +1286,6 @@
 	unsigned int rx_handlers_queued;
 	unsigned int rx_handlers_drop_nullfunc;
 	unsigned int rx_handlers_drop_defrag;
-	unsigned int rx_handlers_drop_short;
 	unsigned int tx_expand_skb_head;
 	unsigned int tx_expand_skb_head_cloned;
 	unsigned int rx_expand_skb_head_defrag;
@@ -1348,14 +1347,16 @@
 
 	struct ieee80211_sub_if_data __rcu *p2p_sdata;
 
-	struct napi_struct *napi;
-
 	/* virtual monitor interface */
 	struct ieee80211_sub_if_data __rcu *monitor_sdata;
 	struct cfg80211_chan_def monitor_chandef;
 
 	/* extended capabilities provided by mac80211 */
 	u8 ext_capa[8];
+
+	/* TDLS channel switch */
+	struct work_struct tdls_chsw_work;
+	struct sk_buff_head skb_queue_tdls_chsw;
 };
 
 static inline struct ieee80211_sub_if_data *
@@ -1715,6 +1716,8 @@
 				 enum ieee80211_band band, bool nss_only);
 void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
 				      struct ieee80211_sta_vht_cap *vht_cap);
+void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
+				     u16 vht_mask[NL80211_VHT_NSS_MAX]);
 
 /* Spectrum management */
 void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
@@ -1763,8 +1766,6 @@
 
 /* utility functions/constants */
 extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
-u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
-			enum nl80211_iftype type);
 int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
 			     int rate, int erp, int short_preamble,
 			     int shift);
@@ -2042,6 +2043,9 @@
 				 enum ieee80211_chanctx_mode chanmode,
 				 u8 radar_detect);
 int ieee80211_max_num_channels(struct ieee80211_local *local);
+enum nl80211_chan_width ieee80211_get_sta_bw(struct ieee80211_sta *sta);
+void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
+				       struct ieee80211_chanctx *ctx);
 
 /* TDLS */
 int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
@@ -2058,8 +2062,8 @@
 void ieee80211_tdls_cancel_channel_switch(struct wiphy *wiphy,
 					  struct net_device *dev,
 					  const u8 *addr);
-void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
-					   struct sk_buff *skb);
+void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata);
+void ieee80211_tdls_chsw_work(struct work_struct *wk);
 
 extern const struct ethtool_ops ieee80211_ethtool_ops;
 
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 553ac6d..6964fc6 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1242,8 +1242,6 @@
 							WLAN_BACK_RECIPIENT, 0,
 							false);
 			mutex_unlock(&local->sta_mtx);
-		} else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_TDLS_CHSW) {
-			ieee80211_process_tdls_channel_switch(sdata, skb);
 		} else if (ieee80211_is_action(mgmt->frame_control) &&
 			   mgmt->u.action.category == WLAN_CATEGORY_BACK) {
 			int len = skb->len;
@@ -1790,13 +1788,23 @@
 		sband = local->hw.wiphy->bands[i];
 		sdata->rc_rateidx_mask[i] =
 			sband ? (1 << sband->n_bitrates) - 1 : 0;
-		if (sband)
+		if (sband) {
+			__le16 cap;
+			u16 *vht_rate_mask;
+
 			memcpy(sdata->rc_rateidx_mcs_mask[i],
 			       sband->ht_cap.mcs.rx_mask,
 			       sizeof(sdata->rc_rateidx_mcs_mask[i]));
-		else
+
+			cap = sband->vht_cap.vht_mcs.rx_mcs_map;
+			vht_rate_mask = sdata->rc_rateidx_vht_mcs_mask[i];
+			ieee80211_get_vht_mask_from_cap(cap, vht_rate_mask);
+		} else {
 			memset(sdata->rc_rateidx_mcs_mask[i], 0,
 			       sizeof(sdata->rc_rateidx_mcs_mask[i]));
+			memset(sdata->rc_rateidx_vht_mcs_mask[i], 0,
+			       sizeof(sdata->rc_rateidx_vht_mcs_mask[i]));
+		}
 	}
 
 	ieee80211_set_default_queues(sdata);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index b22df3a..44388d6 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -336,7 +336,6 @@
 			ieee80211_check_fast_xmit(sta);
 		} else {
 			rcu_assign_pointer(sta->gtk[idx], new);
-			sta->gtk_idx = idx;
 		}
 	} else {
 		defunikey = old &&
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 3f4f9ea..9951ef0 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -115,9 +115,6 @@
 		} gen;
 	} u;
 
-	/* number of times this key has been used */
-	int tx_rx_count;
-
 #ifdef CONFIG_MAC80211_DEBUGFS
 	struct {
 		struct dentry *stalink;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 3c63468..ff79a13 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -629,6 +629,8 @@
 	INIT_WORK(&local->sched_scan_stopped_work,
 		  ieee80211_sched_scan_stopped_work);
 
+	INIT_WORK(&local->tdls_chsw_work, ieee80211_tdls_chsw_work);
+
 	spin_lock_init(&local->ack_status_lock);
 	idr_init(&local->ack_status_frames);
 
@@ -645,6 +647,7 @@
 
 	skb_queue_head_init(&local->skb_queue);
 	skb_queue_head_init(&local->skb_queue_unreliable);
+	skb_queue_head_init(&local->skb_queue_tdls_chsw);
 
 	ieee80211_alloc_led_names(local);
 
@@ -1132,18 +1135,6 @@
 }
 EXPORT_SYMBOL(ieee80211_register_hw);
 
-void ieee80211_napi_add(struct ieee80211_hw *hw, struct napi_struct *napi,
-			struct net_device *napi_dev,
-			int (*poll)(struct napi_struct *, int),
-			int weight)
-{
-	struct ieee80211_local *local = hw_to_local(hw);
-
-	netif_napi_add(napi_dev, napi, poll, weight);
-	local->napi = napi;
-}
-EXPORT_SYMBOL_GPL(ieee80211_napi_add);
-
 void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
@@ -1173,6 +1164,7 @@
 
 	cancel_work_sync(&local->restart_work);
 	cancel_work_sync(&local->reconfig_filter);
+	cancel_work_sync(&local->tdls_chsw_work);
 	flush_work(&local->sched_scan_stopped_work);
 
 	ieee80211_clear_tx_pending(local);
@@ -1183,6 +1175,7 @@
 		wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
 	skb_queue_purge(&local->skb_queue);
 	skb_queue_purge(&local->skb_queue_unreliable);
+	skb_queue_purge(&local->skb_queue_tdls_chsw);
 
 	destroy_workqueue(local->workqueue);
 	wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 817098a..e06a5ca 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -158,7 +158,7 @@
 	changed = mesh_accept_plinks_update(sdata);
 	if (!sdata->u.mesh.user_mpm) {
 		changed |= mesh_plink_deactivate(sta);
-		del_timer_sync(&sta->plink_timer);
+		del_timer_sync(&sta->mesh->plink_timer);
 	}
 
 	if (changed)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 085edc1..d80e0a4 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -19,15 +19,6 @@
 
 #define MAX_PREQ_QUEUE_LEN	64
 
-/* Destination only */
-#define MP_F_DO	0x1
-/* Reply and forward */
-#define MP_F_RF	0x2
-/* Unknown Sequence Number */
-#define MP_F_USN    0x01
-/* Reason code Present */
-#define MP_F_RCODE  0x02
-
 static void mesh_queue_preq(struct mesh_path *, u8);
 
 static inline u32 u32_field_get(const u8 *preq_elem, int offset, bool ae)
@@ -79,6 +70,12 @@
 #define MSEC_TO_TU(x) (x*1000/1024)
 #define SN_GT(x, y) ((s32)(y - x) < 0)
 #define SN_LT(x, y) ((s32)(x - y) < 0)
+#define MAX_SANE_SN_DELTA 32
+
+static inline u32 SN_DELTA(u32 x, u32 y)
+{
+	return x >= y ? x - y : y - x;
+}
 
 #define net_traversal_jiffies(s) \
 	msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
@@ -279,15 +276,10 @@
 	*pos++ = ttl;
 	/* number of destinations */
 	*pos++ = 1;
-	/*
-	 * flags bit, bit 1 is unset if we know the sequence number and
-	 * bit 2 is set if we have a reason code
+	/* Flags field has AE bit only as defined in
+	 * sec 8.4.2.117 IEEE802.11-2012
 	 */
 	*pos = 0;
-	if (!target_sn)
-		*pos |= MP_F_USN;
-	if (target_rcode)
-		*pos |= MP_F_RCODE;
 	pos++;
 	memcpy(pos, target, ETH_ALEN);
 	pos += ETH_ALEN;
@@ -316,8 +308,9 @@
 	failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
 
 	/* moving average, scaled to 100 */
-	sta->fail_avg = ((80 * sta->fail_avg + 5) / 100 + 20 * failed);
-	if (sta->fail_avg > 95)
+	sta->mesh->fail_avg =
+		((80 * sta->mesh->fail_avg + 5) / 100 + 20 * failed);
+	if (sta->mesh->fail_avg > 95)
 		mesh_plink_broken(sta);
 }
 
@@ -333,7 +326,7 @@
 	u32 tx_time, estimated_retx;
 	u64 result;
 
-	if (sta->fail_avg >= 100)
+	if (sta->mesh->fail_avg >= 100)
 		return MAX_METRIC;
 
 	sta_set_rate_info_tx(sta, &sta->last_tx_rate, &rinfo);
@@ -341,7 +334,7 @@
 	if (WARN_ON(!rate))
 		return MAX_METRIC;
 
-	err = (sta->fail_avg << ARITH_SHIFT) / 100;
+	err = (sta->mesh->fail_avg << ARITH_SHIFT) / 100;
 
 	/* bitrate is in units of 100 Kbps, while we need rate in units of
 	 * 1Mbps. This will be corrected on tx_time computation.
@@ -441,6 +434,26 @@
 					process = false;
 					fresh_info = false;
 				}
+			} else if (!(mpath->flags & MESH_PATH_ACTIVE)) {
+				bool have_sn, newer_sn, bounced;
+
+				have_sn = mpath->flags & MESH_PATH_SN_VALID;
+				newer_sn = have_sn && SN_GT(orig_sn, mpath->sn);
+				bounced = have_sn &&
+					  (SN_DELTA(orig_sn, mpath->sn) >
+							MAX_SANE_SN_DELTA);
+
+				if (!have_sn || newer_sn) {
+					/* if SN is newer than what we had
+					 * then we can take it */;
+				} else if (bounced) {
+					/* if SN is way different than what
+					 * we had then assume the other side
+					 * rebooted or restarted */;
+				} else {
+					process = false;
+					fresh_info = false;
+				}
 			}
 		} else {
 			mpath = mesh_path_add(sdata, orig_addr);
@@ -570,15 +583,13 @@
 					SN_LT(mpath->sn, target_sn)) {
 				mpath->sn = target_sn;
 				mpath->flags |= MESH_PATH_SN_VALID;
-			} else if ((!(target_flags & MP_F_DO)) &&
+			} else if ((!(target_flags & IEEE80211_PREQ_TO_FLAG)) &&
 					(mpath->flags & MESH_PATH_ACTIVE)) {
 				reply = true;
 				target_metric = mpath->metric;
 				target_sn = mpath->sn;
-				if (target_flags & MP_F_RF)
-					target_flags |= MP_F_DO;
-				else
-					forward = false;
+				/* Case E2 of sec 13.10.9.3 IEEE 802.11-2012*/
+				target_flags |= IEEE80211_PREQ_TO_FLAG;
 			}
 		}
 		rcu_read_unlock();
@@ -736,9 +747,12 @@
 		if (mpath->flags & MESH_PATH_ACTIVE &&
 		    ether_addr_equal(ta, sta->sta.addr) &&
 		    (!(mpath->flags & MESH_PATH_SN_VALID) ||
-		    SN_GT(target_sn, mpath->sn))) {
+		    SN_GT(target_sn, mpath->sn)  || target_sn == 0)) {
 			mpath->flags &= ~MESH_PATH_ACTIVE;
-			mpath->sn = target_sn;
+			if (target_sn != 0)
+				mpath->sn = target_sn;
+			else
+				mpath->sn += 1;
 			spin_unlock_bh(&mpath->state_lock);
 			if (!ifmsh->mshcfg.dot11MeshForwarding)
 				goto endperr;
@@ -862,7 +876,7 @@
 
 	rcu_read_lock();
 	sta = sta_info_get(sdata, mgmt->sa);
-	if (!sta || sta->plink_state != NL80211_PLINK_ESTAB) {
+	if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
 		rcu_read_unlock();
 		return;
 	}
@@ -974,7 +988,7 @@
 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
 	struct mesh_preq_queue *preq_node;
 	struct mesh_path *mpath;
-	u8 ttl, target_flags;
+	u8 ttl, target_flags = 0;
 	const u8 *da;
 	u32 lifetime;
 
@@ -1033,9 +1047,9 @@
 	}
 
 	if (preq_node->flags & PREQ_Q_F_REFRESH)
-		target_flags = MP_F_DO;
+		target_flags |= IEEE80211_PREQ_TO_FLAG;
 	else
-		target_flags = MP_F_RF;
+		target_flags &= ~IEEE80211_PREQ_TO_FLAG;
 
 	spin_unlock_bh(&mpath->state_lock);
 	da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
@@ -1176,7 +1190,9 @@
 		spin_unlock_bh(&mpath->state_lock);
 		mesh_queue_preq(mpath, 0);
 	} else {
-		mpath->flags = 0;
+		mpath->flags &= ~(MESH_PATH_RESOLVING |
+				  MESH_PATH_RESOLVED |
+				  MESH_PATH_REQ_QUEUED);
 		mpath->exp_time = jiffies;
 		spin_unlock_bh(&mpath->state_lock);
 		if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 3b59099..5838464 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -13,10 +13,11 @@
 #include "rate.h"
 #include "mesh.h"
 
+#define PLINK_CNF_AID(mgmt) ((mgmt)->u.action.u.self_prot.variable + 2)
 #define PLINK_GET_LLID(p) (p + 2)
 #define PLINK_GET_PLID(p) (p + 4)
 
-#define mod_plink_timer(s, t) (mod_timer(&s->plink_timer, \
+#define mod_plink_timer(s, t) (mod_timer(&s->mesh->plink_timer, \
 				jiffies + msecs_to_jiffies(t)))
 
 enum plink_event {
@@ -53,18 +54,13 @@
 	[CLS_IGNR] = "CLS_IGNR"
 };
 
-static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
-			       enum ieee80211_self_protected_actioncode action,
-			       u8 *da, u16 llid, u16 plid, u16 reason);
-
-
 /* We only need a valid sta if user configured a minimum rssi_threshold. */
 static bool rssi_threshold_check(struct ieee80211_sub_if_data *sdata,
 				 struct sta_info *sta)
 {
 	s32 rssi_threshold = sdata->u.mesh.mshcfg.rssi_threshold;
 	return rssi_threshold == 0 ||
-	       (sta && (s8) -ewma_read(&sta->avg_signal) > rssi_threshold);
+	       (sta && (s8) -ewma_signal_read(&sta->avg_signal) > rssi_threshold);
 }
 
 /**
@@ -72,14 +68,14 @@
  *
  * @sta: mesh peer link to restart
  *
- * Locking: this function must be called holding sta->plink_lock
+ * Locking: this function must be called holding sta->mesh->plink_lock
  */
 static inline void mesh_plink_fsm_restart(struct sta_info *sta)
 {
-	lockdep_assert_held(&sta->plink_lock);
-	sta->plink_state = NL80211_PLINK_LISTEN;
-	sta->llid = sta->plid = sta->reason = 0;
-	sta->plink_retries = 0;
+	lockdep_assert_held(&sta->mesh->plink_lock);
+	sta->mesh->plink_state = NL80211_PLINK_LISTEN;
+	sta->mesh->llid = sta->mesh->plid = sta->mesh->reason = 0;
+	sta->mesh->plink_retries = 0;
 }
 
 /*
@@ -119,7 +115,7 @@
 	rcu_read_lock();
 	list_for_each_entry_rcu(sta, &local->sta_list, list) {
 		if (sdata != sta->sdata ||
-		    sta->plink_state != NL80211_PLINK_ESTAB)
+		    sta->mesh->plink_state != NL80211_PLINK_ESTAB)
 			continue;
 
 		short_slot = false;
@@ -169,7 +165,7 @@
 	rcu_read_lock();
 	list_for_each_entry_rcu(sta, &local->sta_list, list) {
 		if (sdata != sta->sdata ||
-		    sta->plink_state != NL80211_PLINK_ESTAB)
+		    sta->mesh->plink_state != NL80211_PLINK_ESTAB)
 			continue;
 
 		if (sta->sta.bandwidth > IEEE80211_STA_RX_BW_20)
@@ -204,59 +200,8 @@
 	return BSS_CHANGED_HT;
 }
 
-/**
- * __mesh_plink_deactivate - deactivate mesh peer link
- *
- * @sta: mesh peer link to deactivate
- *
- * All mesh paths with this peer as next hop will be flushed
- * Returns beacon changed flag if the beacon content changed.
- *
- * Locking: the caller must hold sta->plink_lock
- */
-static u32 __mesh_plink_deactivate(struct sta_info *sta)
-{
-	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	u32 changed = 0;
-
-	lockdep_assert_held(&sta->plink_lock);
-
-	if (sta->plink_state == NL80211_PLINK_ESTAB)
-		changed = mesh_plink_dec_estab_count(sdata);
-	sta->plink_state = NL80211_PLINK_BLOCKED;
-	mesh_path_flush_by_nexthop(sta);
-
-	ieee80211_mps_sta_status_update(sta);
-	changed |= ieee80211_mps_set_sta_local_pm(sta,
-			NL80211_MESH_POWER_UNKNOWN);
-
-	return changed;
-}
-
-/**
- * mesh_plink_deactivate - deactivate mesh peer link
- *
- * @sta: mesh peer link to deactivate
- *
- * All mesh paths with this peer as next hop will be flushed
- */
-u32 mesh_plink_deactivate(struct sta_info *sta)
-{
-	struct ieee80211_sub_if_data *sdata = sta->sdata;
-	u32 changed;
-
-	spin_lock_bh(&sta->plink_lock);
-	changed = __mesh_plink_deactivate(sta);
-	sta->reason = WLAN_REASON_MESH_PEER_CANCELED;
-	mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
-			    sta->sta.addr, sta->llid, sta->plid,
-			    sta->reason);
-	spin_unlock_bh(&sta->plink_lock);
-
-	return changed;
-}
-
 static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
+			       struct sta_info *sta,
 			       enum ieee80211_self_protected_actioncode action,
 			       u8 *da, u16 llid, u16 plid, u16 reason)
 {
@@ -306,7 +251,7 @@
 		if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
 			/* AID */
 			pos = skb_put(skb, 2);
-			put_unaligned_le16(plid, pos);
+			put_unaligned_le16(sta->sta.aid, pos);
 		}
 		if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
 		    ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -375,6 +320,58 @@
 	return err;
 }
 
+/**
+ * __mesh_plink_deactivate - deactivate mesh peer link
+ *
+ * @sta: mesh peer link to deactivate
+ *
+ * All mesh paths with this peer as next hop will be flushed
+ * Returns beacon changed flag if the beacon content changed.
+ *
+ * Locking: the caller must hold sta->mesh->plink_lock
+ */
+static u32 __mesh_plink_deactivate(struct sta_info *sta)
+{
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	u32 changed = 0;
+
+	lockdep_assert_held(&sta->mesh->plink_lock);
+
+	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+		changed = mesh_plink_dec_estab_count(sdata);
+	sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
+	mesh_path_flush_by_nexthop(sta);
+
+	ieee80211_mps_sta_status_update(sta);
+	changed |= ieee80211_mps_set_sta_local_pm(sta,
+			NL80211_MESH_POWER_UNKNOWN);
+
+	return changed;
+}
+
+/**
+ * mesh_plink_deactivate - deactivate mesh peer link
+ *
+ * @sta: mesh peer link to deactivate
+ *
+ * All mesh paths with this peer as next hop will be flushed
+ */
+u32 mesh_plink_deactivate(struct sta_info *sta)
+{
+	struct ieee80211_sub_if_data *sdata = sta->sdata;
+	u32 changed;
+
+	spin_lock_bh(&sta->mesh->plink_lock);
+	changed = __mesh_plink_deactivate(sta);
+	sta->mesh->reason = WLAN_REASON_MESH_PEER_CANCELED;
+	mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_CLOSE,
+			    sta->sta.addr, sta->mesh->llid, sta->mesh->plid,
+			    sta->mesh->reason);
+	spin_unlock_bh(&sta->mesh->plink_lock);
+
+	return changed;
+}
+
 static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
 			       struct sta_info *sta,
 			       struct ieee802_11_elems *elems, bool insert)
@@ -388,13 +385,14 @@
 	sband = local->hw.wiphy->bands[band];
 	rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
 
-	spin_lock_bh(&sta->plink_lock);
+	spin_lock_bh(&sta->mesh->plink_lock);
 	sta->last_rx = jiffies;
 
 	/* rates and capabilities don't change during peering */
-	if (sta->plink_state == NL80211_PLINK_ESTAB && sta->processed_beacon)
+	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
+	    sta->mesh->processed_beacon)
 		goto out;
-	sta->processed_beacon = true;
+	sta->mesh->processed_beacon = true;
 
 	if (sta->sta.supp_rates[band] != rates)
 		changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
@@ -421,23 +419,57 @@
 	else
 		rate_control_rate_update(local, sband, sta, changed);
 out:
-	spin_unlock_bh(&sta->plink_lock);
+	spin_unlock_bh(&sta->mesh->plink_lock);
+}
+
+static int mesh_allocate_aid(struct ieee80211_sub_if_data *sdata)
+{
+	struct sta_info *sta;
+	unsigned long *aid_map;
+	int aid;
+
+	aid_map = kcalloc(BITS_TO_LONGS(IEEE80211_MAX_AID + 1),
+			  sizeof(*aid_map), GFP_KERNEL);
+	if (!aid_map)
+		return -ENOMEM;
+
+	/* reserve aid 0 for mcast indication */
+	__set_bit(0, aid_map);
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sta, &sdata->local->sta_list, list)
+		__set_bit(sta->sta.aid, aid_map);
+	rcu_read_unlock();
+
+	aid = find_first_zero_bit(aid_map, IEEE80211_MAX_AID + 1);
+	kfree(aid_map);
+
+	if (aid > IEEE80211_MAX_AID)
+		return -ENOBUFS;
+
+	return aid;
 }
 
 static struct sta_info *
 __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
 {
 	struct sta_info *sta;
+	int aid;
 
 	if (sdata->local->num_sta >= MESH_MAX_PLINKS)
 		return NULL;
 
+	aid = mesh_allocate_aid(sdata);
+	if (aid < 0)
+		return NULL;
+
 	sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
 	if (!sta)
 		return NULL;
 
-	sta->plink_state = NL80211_PLINK_LISTEN;
+	sta->mesh->plink_state = NL80211_PLINK_LISTEN;
 	sta->sta.wme = true;
+	sta->sta.aid = aid;
 
 	sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
 	sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
@@ -524,7 +556,7 @@
 		goto out;
 
 	if (mesh_peer_accepts_plinks(elems) &&
-	    sta->plink_state == NL80211_PLINK_LISTEN &&
+	    sta->mesh->plink_state == NL80211_PLINK_LISTEN &&
 	    sdata->u.mesh.accepting_plinks &&
 	    sdata->u.mesh.mshcfg.auto_open_plinks &&
 	    rssi_threshold_check(sdata, sta))
@@ -554,52 +586,52 @@
 	if (sta->sdata->local->quiescing)
 		return;
 
-	spin_lock_bh(&sta->plink_lock);
+	spin_lock_bh(&sta->mesh->plink_lock);
 
 	/* If a timer fires just before a state transition on another CPU,
 	 * we may have already extended the timeout and changed state by the
 	 * time we've acquired the lock and arrived  here.  In that case,
 	 * skip this timer and wait for the new one.
 	 */
-	if (time_before(jiffies, sta->plink_timer.expires)) {
+	if (time_before(jiffies, sta->mesh->plink_timer.expires)) {
 		mpl_dbg(sta->sdata,
 			"Ignoring timer for %pM in state %s (timer adjusted)",
-			sta->sta.addr, mplstates[sta->plink_state]);
-		spin_unlock_bh(&sta->plink_lock);
+			sta->sta.addr, mplstates[sta->mesh->plink_state]);
+		spin_unlock_bh(&sta->mesh->plink_lock);
 		return;
 	}
 
 	/* del_timer() and handler may race when entering these states */
-	if (sta->plink_state == NL80211_PLINK_LISTEN ||
-	    sta->plink_state == NL80211_PLINK_ESTAB) {
+	if (sta->mesh->plink_state == NL80211_PLINK_LISTEN ||
+	    sta->mesh->plink_state == NL80211_PLINK_ESTAB) {
 		mpl_dbg(sta->sdata,
 			"Ignoring timer for %pM in state %s (timer deleted)",
-			sta->sta.addr, mplstates[sta->plink_state]);
-		spin_unlock_bh(&sta->plink_lock);
+			sta->sta.addr, mplstates[sta->mesh->plink_state]);
+		spin_unlock_bh(&sta->mesh->plink_lock);
 		return;
 	}
 
 	mpl_dbg(sta->sdata,
 		"Mesh plink timer for %pM fired on state %s\n",
-		sta->sta.addr, mplstates[sta->plink_state]);
+		sta->sta.addr, mplstates[sta->mesh->plink_state]);
 	sdata = sta->sdata;
 	mshcfg = &sdata->u.mesh.mshcfg;
 
-	switch (sta->plink_state) {
+	switch (sta->mesh->plink_state) {
 	case NL80211_PLINK_OPN_RCVD:
 	case NL80211_PLINK_OPN_SNT:
 		/* retry timer */
-		if (sta->plink_retries < mshcfg->dot11MeshMaxRetries) {
+		if (sta->mesh->plink_retries < mshcfg->dot11MeshMaxRetries) {
 			u32 rand;
 			mpl_dbg(sta->sdata,
 				"Mesh plink for %pM (retry, timeout): %d %d\n",
-				sta->sta.addr, sta->plink_retries,
-				sta->plink_timeout);
+				sta->sta.addr, sta->mesh->plink_retries,
+				sta->mesh->plink_timeout);
 			get_random_bytes(&rand, sizeof(u32));
-			sta->plink_timeout = sta->plink_timeout +
-					     rand % sta->plink_timeout;
-			++sta->plink_retries;
-			mod_plink_timer(sta, sta->plink_timeout);
+			sta->mesh->plink_timeout = sta->mesh->plink_timeout +
+					     rand % sta->mesh->plink_timeout;
+			++sta->mesh->plink_retries;
+			mod_plink_timer(sta, sta->mesh->plink_timeout);
 			action = WLAN_SP_MESH_PEERING_OPEN;
 			break;
 		}
@@ -609,31 +641,31 @@
 		/* confirm timer */
 		if (!reason)
 			reason = WLAN_REASON_MESH_CONFIRM_TIMEOUT;
-		sta->plink_state = NL80211_PLINK_HOLDING;
+		sta->mesh->plink_state = NL80211_PLINK_HOLDING;
 		mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
 		action = WLAN_SP_MESH_PEERING_CLOSE;
 		break;
 	case NL80211_PLINK_HOLDING:
 		/* holding timer */
-		del_timer(&sta->plink_timer);
+		del_timer(&sta->mesh->plink_timer);
 		mesh_plink_fsm_restart(sta);
 		break;
 	default:
 		break;
 	}
-	spin_unlock_bh(&sta->plink_lock);
+	spin_unlock_bh(&sta->mesh->plink_lock);
 	if (action)
-		mesh_plink_frame_tx(sdata, action, sta->sta.addr,
-				    sta->llid, sta->plid, reason);
+		mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
+				    sta->mesh->llid, sta->mesh->plid, reason);
 }
 
 static inline void mesh_plink_timer_set(struct sta_info *sta, u32 timeout)
 {
-	sta->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
-	sta->plink_timer.data = (unsigned long) sta;
-	sta->plink_timer.function = mesh_plink_timer;
-	sta->plink_timeout = timeout;
-	add_timer(&sta->plink_timer);
+	sta->mesh->plink_timer.expires = jiffies + msecs_to_jiffies(timeout);
+	sta->mesh->plink_timer.data = (unsigned long) sta;
+	sta->mesh->plink_timer.function = mesh_plink_timer;
+	sta->mesh->plink_timeout = timeout;
+	add_timer(&sta->mesh->plink_timer);
 }
 
 static bool llid_in_use(struct ieee80211_sub_if_data *sdata,
@@ -645,7 +677,7 @@
 
 	rcu_read_lock();
 	list_for_each_entry_rcu(sta, &local->sta_list, list) {
-		if (!memcmp(&sta->llid, &llid, sizeof(llid))) {
+		if (!memcmp(&sta->mesh->llid, &llid, sizeof(llid))) {
 			in_use = true;
 			break;
 		}
@@ -661,8 +693,6 @@
 
 	do {
 		get_random_bytes(&llid, sizeof(llid));
-		/* for mesh PS we still only have the AID range for TIM bits */
-		llid = (llid % IEEE80211_MAX_AID) + 1;
 	} while (llid_in_use(sdata, llid));
 
 	return llid;
@@ -676,16 +706,16 @@
 	if (!test_sta_flag(sta, WLAN_STA_AUTH))
 		return 0;
 
-	spin_lock_bh(&sta->plink_lock);
-	sta->llid = mesh_get_new_llid(sdata);
-	if (sta->plink_state != NL80211_PLINK_LISTEN &&
-	    sta->plink_state != NL80211_PLINK_BLOCKED) {
-		spin_unlock_bh(&sta->plink_lock);
+	spin_lock_bh(&sta->mesh->plink_lock);
+	sta->mesh->llid = mesh_get_new_llid(sdata);
+	if (sta->mesh->plink_state != NL80211_PLINK_LISTEN &&
+	    sta->mesh->plink_state != NL80211_PLINK_BLOCKED) {
+		spin_unlock_bh(&sta->mesh->plink_lock);
 		return 0;
 	}
-	sta->plink_state = NL80211_PLINK_OPN_SNT;
+	sta->mesh->plink_state = NL80211_PLINK_OPN_SNT;
 	mesh_plink_timer_set(sta, sdata->u.mesh.mshcfg.dot11MeshRetryTimeout);
-	spin_unlock_bh(&sta->plink_lock);
+	spin_unlock_bh(&sta->mesh->plink_lock);
 	mpl_dbg(sdata,
 		"Mesh plink: starting establishment with %pM\n",
 		sta->sta.addr);
@@ -693,8 +723,8 @@
 	/* set the non-peer mode to active during peering */
 	changed = ieee80211_mps_local_status_update(sdata);
 
-	mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
-			    sta->sta.addr, sta->llid, 0, 0);
+	mesh_plink_frame_tx(sdata, sta, WLAN_SP_MESH_PEERING_OPEN,
+			    sta->sta.addr, sta->mesh->llid, 0, 0);
 	return changed;
 }
 
@@ -702,10 +732,10 @@
 {
 	u32 changed;
 
-	spin_lock_bh(&sta->plink_lock);
+	spin_lock_bh(&sta->mesh->plink_lock);
 	changed = __mesh_plink_deactivate(sta);
-	sta->plink_state = NL80211_PLINK_BLOCKED;
-	spin_unlock_bh(&sta->plink_lock);
+	sta->mesh->plink_state = NL80211_PLINK_BLOCKED;
+	spin_unlock_bh(&sta->mesh->plink_lock);
 
 	return changed;
 }
@@ -715,12 +745,11 @@
 			     enum plink_event event)
 {
 	struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
-
 	u16 reason = (event == CLS_ACPT) ?
 		     WLAN_REASON_MESH_CLOSE : WLAN_REASON_MESH_CONFIG;
 
-	sta->reason = reason;
-	sta->plink_state = NL80211_PLINK_HOLDING;
+	sta->mesh->reason = reason;
+	sta->mesh->plink_state = NL80211_PLINK_HOLDING;
 	mod_plink_timer(sta, mshcfg->dot11MeshHoldingTimeout);
 }
 
@@ -730,8 +759,8 @@
 	struct mesh_config *mshcfg = &sdata->u.mesh.mshcfg;
 	u32 changed = 0;
 
-	del_timer(&sta->plink_timer);
-	sta->plink_state = NL80211_PLINK_ESTAB;
+	del_timer(&sta->mesh->plink_timer);
+	sta->mesh->plink_state = NL80211_PLINK_ESTAB;
 	changed |= mesh_plink_inc_estab_count(sdata);
 	changed |= mesh_set_ht_prot_mode(sdata);
 	changed |= mesh_set_short_slot_time(sdata);
@@ -758,18 +787,18 @@
 	u32 changed = 0;
 
 	mpl_dbg(sdata, "peer %pM in state %s got event %s\n", sta->sta.addr,
-		mplstates[sta->plink_state], mplevents[event]);
+		mplstates[sta->mesh->plink_state], mplevents[event]);
 
-	spin_lock_bh(&sta->plink_lock);
-	switch (sta->plink_state) {
+	spin_lock_bh(&sta->mesh->plink_lock);
+	switch (sta->mesh->plink_state) {
 	case NL80211_PLINK_LISTEN:
 		switch (event) {
 		case CLS_ACPT:
 			mesh_plink_fsm_restart(sta);
 			break;
 		case OPN_ACPT:
-			sta->plink_state = NL80211_PLINK_OPN_RCVD;
-			sta->llid = mesh_get_new_llid(sdata);
+			sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
+			sta->mesh->llid = mesh_get_new_llid(sdata);
 			mesh_plink_timer_set(sta,
 					     mshcfg->dot11MeshRetryTimeout);
 
@@ -791,11 +820,11 @@
 			break;
 		case OPN_ACPT:
 			/* retry timer is left untouched */
-			sta->plink_state = NL80211_PLINK_OPN_RCVD;
+			sta->mesh->plink_state = NL80211_PLINK_OPN_RCVD;
 			action = WLAN_SP_MESH_PEERING_CONFIRM;
 			break;
 		case CNF_ACPT:
-			sta->plink_state = NL80211_PLINK_CNF_RCVD;
+			sta->mesh->plink_state = NL80211_PLINK_CNF_RCVD;
 			mod_plink_timer(sta, mshcfg->dot11MeshConfirmTimeout);
 			break;
 		default:
@@ -855,7 +884,7 @@
 	case NL80211_PLINK_HOLDING:
 		switch (event) {
 		case CLS_ACPT:
-			del_timer(&sta->plink_timer);
+			del_timer(&sta->mesh->plink_timer);
 			mesh_plink_fsm_restart(sta);
 			break;
 		case OPN_ACPT:
@@ -874,17 +903,18 @@
 		 */
 		break;
 	}
-	spin_unlock_bh(&sta->plink_lock);
+	spin_unlock_bh(&sta->mesh->plink_lock);
 	if (action) {
-		mesh_plink_frame_tx(sdata, action, sta->sta.addr,
-				    sta->llid, sta->plid, sta->reason);
+		mesh_plink_frame_tx(sdata, sta, action, sta->sta.addr,
+				    sta->mesh->llid, sta->mesh->plid,
+				    sta->mesh->reason);
 
 		/* also send confirm in open case */
 		if (action == WLAN_SP_MESH_PEERING_OPEN) {
-			mesh_plink_frame_tx(sdata,
+			mesh_plink_frame_tx(sdata, sta,
 					    WLAN_SP_MESH_PEERING_CONFIRM,
-					    sta->sta.addr, sta->llid,
-					    sta->plid, 0);
+					    sta->sta.addr, sta->mesh->llid,
+					    sta->mesh->plid, 0);
 		}
 	}
 
@@ -939,7 +969,7 @@
 			mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
 			goto out;
 		}
-		if (sta->plink_state == NL80211_PLINK_BLOCKED)
+		if (sta->mesh->plink_state == NL80211_PLINK_BLOCKED)
 			goto out;
 	}
 
@@ -954,7 +984,7 @@
 		if (!matches_local)
 			event = OPN_RJCT;
 		if (!mesh_plink_free_count(sdata) ||
-		    (sta->plid && sta->plid != plid))
+		    (sta->mesh->plid && sta->mesh->plid != plid))
 			event = OPN_IGNR;
 		else
 			event = OPN_ACPT;
@@ -963,14 +993,14 @@
 		if (!matches_local)
 			event = CNF_RJCT;
 		if (!mesh_plink_free_count(sdata) ||
-		    sta->llid != llid ||
-		    (sta->plid && sta->plid != plid))
+		    sta->mesh->llid != llid ||
+		    (sta->mesh->plid && sta->mesh->plid != plid))
 			event = CNF_IGNR;
 		else
 			event = CNF_ACPT;
 		break;
 	case WLAN_SP_MESH_PEERING_CLOSE:
-		if (sta->plink_state == NL80211_PLINK_ESTAB)
+		if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
 			/* Do not check for llid or plid. This does not
 			 * follow the standard but since multiple plinks
 			 * per sta are not supported, it is necessary in
@@ -981,9 +1011,9 @@
 			 * restarted.
 			 */
 			event = CLS_ACPT;
-		else if (sta->plid != plid)
+		else if (sta->mesh->plid != plid)
 			event = CLS_IGNR;
-		else if (ie_len == 8 && sta->llid != llid)
+		else if (ie_len == 8 && sta->mesh->llid != llid)
 			event = CLS_IGNR;
 		else
 			event = CLS_ACPT;
@@ -1070,9 +1100,9 @@
 			mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
 			goto unlock_rcu;
 		}
-		sta->plid = plid;
+		sta->mesh->plid = plid;
 	} else if (!sta && event == OPN_RJCT) {
-		mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_CLOSE,
+		mesh_plink_frame_tx(sdata, NULL, WLAN_SP_MESH_PEERING_CLOSE,
 				    mgmt->sa, 0, plid,
 				    WLAN_REASON_MESH_CONFIG);
 		goto unlock_rcu;
@@ -1081,9 +1111,13 @@
 		goto unlock_rcu;
 	}
 
-	/* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
-	if (!sta->plid && event == CNF_ACPT)
-		sta->plid = plid;
+	if (event == CNF_ACPT) {
+		/* 802.11-2012 13.3.7.2 - update plid on CNF if not set */
+		if (!sta->mesh->plid)
+			sta->mesh->plid = plid;
+
+		sta->mesh->aid = get_unaligned_le16(PLINK_CNF_AID(mgmt));
+	}
 
 	changed |= mesh_plink_fsm(sdata, sta, event);
 
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index ad8b377..90a268a 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -92,16 +92,16 @@
 		if (sdata != sta->sdata)
 			continue;
 
-		switch (sta->plink_state) {
+		switch (sta->mesh->plink_state) {
 		case NL80211_PLINK_OPN_SNT:
 		case NL80211_PLINK_OPN_RCVD:
 		case NL80211_PLINK_CNF_RCVD:
 			peering = true;
 			break;
 		case NL80211_PLINK_ESTAB:
-			if (sta->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
+			if (sta->mesh->local_pm == NL80211_MESH_POWER_LIGHT_SLEEP)
 				light_sleep_cnt++;
-			else if (sta->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
+			else if (sta->mesh->local_pm == NL80211_MESH_POWER_DEEP_SLEEP)
 				deep_sleep_cnt++;
 			break;
 		default:
@@ -153,19 +153,19 @@
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 
-	if (sta->local_pm == pm)
+	if (sta->mesh->local_pm == pm)
 		return 0;
 
 	mps_dbg(sdata, "local STA operates in mode %d with %pM\n",
 		pm, sta->sta.addr);
 
-	sta->local_pm = pm;
+	sta->mesh->local_pm = pm;
 
 	/*
 	 * announce peer-specific power mode transition
 	 * (see IEEE802.11-2012 13.14.3.2 and 13.14.3.3)
 	 */
-	if (sta->plink_state == NL80211_PLINK_ESTAB)
+	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
 		mps_qos_null_tx(sta);
 
 	return ieee80211_mps_local_status_update(sdata);
@@ -197,8 +197,8 @@
 
 	if (is_unicast_ether_addr(hdr->addr1) &&
 	    ieee80211_is_data_qos(hdr->frame_control) &&
-	    sta->plink_state == NL80211_PLINK_ESTAB)
-		pm = sta->local_pm;
+	    sta->mesh->plink_state == NL80211_PLINK_ESTAB)
+		pm = sta->mesh->local_pm;
 	else
 		pm = sdata->u.mesh.nonpeer_pm;
 
@@ -241,16 +241,16 @@
 	 * use peer-specific power mode if peering is established and the
 	 * peer's power mode is known
 	 */
-	if (sta->plink_state == NL80211_PLINK_ESTAB &&
-	    sta->peer_pm != NL80211_MESH_POWER_UNKNOWN)
-		pm = sta->peer_pm;
+	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB &&
+	    sta->mesh->peer_pm != NL80211_MESH_POWER_UNKNOWN)
+		pm = sta->mesh->peer_pm;
 	else
-		pm = sta->nonpeer_pm;
+		pm = sta->mesh->nonpeer_pm;
 
 	do_buffer = (pm != NL80211_MESH_POWER_ACTIVE);
 
 	/* clear the MPSP flags for non-peers or active STA */
-	if (sta->plink_state != NL80211_PLINK_ESTAB) {
+	if (sta->mesh->plink_state != NL80211_PLINK_ESTAB) {
 		clear_sta_flag(sta, WLAN_STA_MPSP_OWNER);
 		clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
 	} else if (!do_buffer) {
@@ -296,13 +296,13 @@
 		pm = NL80211_MESH_POWER_ACTIVE;
 	}
 
-	if (sta->peer_pm == pm)
+	if (sta->mesh->peer_pm == pm)
 		return;
 
 	mps_dbg(sta->sdata, "STA %pM enters mode %d\n",
 		sta->sta.addr, pm);
 
-	sta->peer_pm = pm;
+	sta->mesh->peer_pm = pm;
 
 	ieee80211_mps_sta_status_update(sta);
 }
@@ -317,13 +317,13 @@
 	else
 		pm = NL80211_MESH_POWER_ACTIVE;
 
-	if (sta->nonpeer_pm == pm)
+	if (sta->mesh->nonpeer_pm == pm)
 		return;
 
 	mps_dbg(sta->sdata, "STA %pM sets non-peer mode to %d\n",
 		sta->sta.addr, pm);
 
-	sta->nonpeer_pm = pm;
+	sta->mesh->nonpeer_pm = pm;
 
 	ieee80211_mps_sta_status_update(sta);
 }
@@ -552,7 +552,7 @@
 	} else {
 		if (eosp)
 			clear_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
-		else if (sta->local_pm != NL80211_MESH_POWER_ACTIVE)
+		else if (sta->mesh->local_pm != NL80211_MESH_POWER_ACTIVE)
 			set_sta_flag(sta, WLAN_STA_MPSP_RECIPIENT);
 
 		if (rspi && !test_and_set_sta_flag(sta, WLAN_STA_MPSP_OWNER))
@@ -577,9 +577,9 @@
 	int ac, buffer_local = 0;
 	bool has_buffered = false;
 
-	if (sta->plink_state == NL80211_PLINK_ESTAB)
+	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
 		has_buffered = ieee80211_check_tim(elems->tim, elems->tim_len,
-						   sta->llid);
+						   sta->mesh->aid);
 
 	if (has_buffered)
 		mps_dbg(sta->sdata, "%pM indicates buffered frames\n",
@@ -598,7 +598,7 @@
 	if (!has_buffered && !buffer_local)
 		return;
 
-	if (sta->plink_state == NL80211_PLINK_ESTAB)
+	if (sta->mesh->plink_state == NL80211_PLINK_ESTAB)
 		mpsp_trigger_send(sta, has_buffered, !buffer_local);
 	else
 		mps_frame_deliver(sta, 1);
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index 09625d6..64bc22a 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -127,14 +127,14 @@
 
 	/* Timing offset calculation (see 13.13.2.2.2) */
 	t_t = le64_to_cpu(mgmt->u.beacon.timestamp);
-	sta->t_offset = t_t - t_r;
+	sta->mesh->t_offset = t_t - t_r;
 
 	if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
-		s64 t_clockdrift = sta->t_offset_setpoint - sta->t_offset;
+		s64 t_clockdrift = sta->mesh->t_offset_setpoint - sta->mesh->t_offset;
 		msync_dbg(sdata,
-			  "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n",
-			  sta->sta.addr, (long long) sta->t_offset,
-			  (long long) sta->t_offset_setpoint,
+			  "STA %pM : t_offset=%lld, t_offset_setpoint=%lld, t_clockdrift=%lld\n",
+			  sta->sta.addr, (long long) sta->mesh->t_offset,
+			  (long long) sta->mesh->t_offset_setpoint,
 			  (long long) t_clockdrift);
 
 		if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
@@ -152,12 +152,12 @@
 			ifmsh->sync_offset_clockdrift_max = t_clockdrift;
 		spin_unlock_bh(&ifmsh->sync_offset_lock);
 	} else {
-		sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
+		sta->mesh->t_offset_setpoint = sta->mesh->t_offset - TOFFSET_SET_MARGIN;
 		set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
 		msync_dbg(sdata,
-			  "STA %pM : offset was invalid, sta->t_offset=%lld\n",
+			  "STA %pM : offset was invalid, t_offset=%lld\n",
 			  sta->sta.addr,
-			  (long long) sta->t_offset);
+			  (long long) sta->mesh->t_offset);
 	}
 
 no_sync:
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 9b2cc27..705ef1d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -6,6 +6,7 @@
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright (C) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -538,11 +539,16 @@
 	ieee80211_ie_build_ht_cap(pos, &ht_cap, cap);
 }
 
+/* This function determines vht capability flags for the association
+ * and builds the IE.
+ * Note - the function may set the owner of the MU-MIMO capability
+ */
 static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
 				 struct sk_buff *skb,
 				 struct ieee80211_supported_band *sband,
 				 struct ieee80211_vht_cap *ap_vht_cap)
 {
+	struct ieee80211_local *local = sdata->local;
 	u8 *pos;
 	u32 cap;
 	struct ieee80211_sta_vht_cap vht_cap;
@@ -576,7 +582,34 @@
 	 */
 	if (!(ap_vht_cap->vht_cap_info &
 			cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
-		cap &= ~IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+		cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+			 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+	else if (!(ap_vht_cap->vht_cap_info &
+			cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+		cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+	/*
+	 * If some other vif is using the MU-MIMO capablity we cannot associate
+	 * using MU-MIMO - this will lead to contradictions in the group-id
+	 * mechanism.
+	 * Ownership is defined since association request, in order to avoid
+	 * simultaneous associations with MU-MIMO.
+	 */
+	if (cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) {
+		bool disable_mu_mimo = false;
+		struct ieee80211_sub_if_data *other;
+
+		list_for_each_entry_rcu(other, &local->interfaces, list) {
+			if (other->flags & IEEE80211_SDATA_MU_MIMO_OWNER) {
+				disable_mu_mimo = true;
+				break;
+			}
+		}
+		if (disable_mu_mimo)
+			cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+		else
+			sdata->flags |= IEEE80211_SDATA_MU_MIMO_OWNER;
+	}
 
 	mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
 
@@ -1096,24 +1129,6 @@
 	ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
 }
 
-static void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
-{
-	struct sta_info *sta;
-	u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
-
-	rcu_read_lock();
-	list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
-		if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
-		    !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
-			continue;
-
-		ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
-					    NL80211_TDLS_TEARDOWN, reason,
-					    GFP_ATOMIC);
-	}
-	rcu_read_unlock();
-}
-
 static void
 ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
 				 u64 timestamp, u32 device_timestamp,
@@ -2076,6 +2091,7 @@
 	memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
 	memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa));
 	memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
+	sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
 
 	sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
 
@@ -2538,6 +2554,7 @@
 		eth_zero_addr(sdata->u.mgd.bssid);
 		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID);
 		sdata->u.mgd.flags = 0;
+		sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
 		mutex_lock(&sdata->local->mtx);
 		ieee80211_vif_release_channel(sdata);
 		mutex_unlock(&sdata->local->mtx);
@@ -3034,12 +3051,8 @@
 
 	rate_control_rate_init(sta);
 
-	if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED) {
+	if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
 		set_sta_flag(sta, WLAN_STA_MFP);
-		sta->sta.mfp = true;
-	} else {
-		sta->sta.mfp = false;
-	}
 
 	sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
 
diff --git a/net/mac80211/ocb.c b/net/mac80211/ocb.c
index 358d5f9..573b81a 100644
--- a/net/mac80211/ocb.c
+++ b/net/mac80211/ocb.c
@@ -179,7 +179,7 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
-	u32 changed = BSS_CHANGED_OCB;
+	u32 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID;
 	int err;
 
 	if (ifocb->joined == true)
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index fda33f9..9857693 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -29,6 +29,65 @@
 MODULE_PARM_DESC(ieee80211_default_rc_algo,
 		 "Default rate control algorithm for mac80211 to use");
 
+void rate_control_rate_init(struct sta_info *sta)
+{
+	struct ieee80211_local *local = sta->sdata->local;
+	struct rate_control_ref *ref = sta->rate_ctrl;
+	struct ieee80211_sta *ista = &sta->sta;
+	void *priv_sta = sta->rate_ctrl_priv;
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+
+	ieee80211_sta_set_rx_nss(sta);
+
+	if (!ref)
+		return;
+
+	rcu_read_lock();
+
+	chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+	if (WARN_ON(!chanctx_conf)) {
+		rcu_read_unlock();
+		return;
+	}
+
+	sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
+
+	spin_lock_bh(&sta->rate_ctrl_lock);
+	ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
+			    priv_sta);
+	spin_unlock_bh(&sta->rate_ctrl_lock);
+	rcu_read_unlock();
+	set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
+}
+
+void rate_control_rate_update(struct ieee80211_local *local,
+				    struct ieee80211_supported_band *sband,
+				    struct sta_info *sta, u32 changed)
+{
+	struct rate_control_ref *ref = local->rate_ctrl;
+	struct ieee80211_sta *ista = &sta->sta;
+	void *priv_sta = sta->rate_ctrl_priv;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+
+	if (ref && ref->ops->rate_update) {
+		rcu_read_lock();
+
+		chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+		if (WARN_ON(!chanctx_conf)) {
+			rcu_read_unlock();
+			return;
+		}
+
+		spin_lock_bh(&sta->rate_ctrl_lock);
+		ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
+				      ista, priv_sta, changed);
+		spin_unlock_bh(&sta->rate_ctrl_lock);
+		rcu_read_unlock();
+	}
+	drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
+}
+
 int ieee80211_rate_control_register(const struct rate_control_ops *ops)
 {
 	struct rate_control_alg *alg;
@@ -294,39 +353,37 @@
 }
 EXPORT_SYMBOL(rate_control_send_low);
 
-static bool rate_idx_match_legacy_mask(struct ieee80211_tx_rate *rate,
-				       int n_bitrates, u32 mask)
+static bool rate_idx_match_legacy_mask(s8 *rate_idx, int n_bitrates, u32 mask)
 {
 	int j;
 
 	/* See whether the selected rate or anything below it is allowed. */
-	for (j = rate->idx; j >= 0; j--) {
+	for (j = *rate_idx; j >= 0; j--) {
 		if (mask & (1 << j)) {
 			/* Okay, found a suitable rate. Use it. */
-			rate->idx = j;
+			*rate_idx = j;
 			return true;
 		}
 	}
 
 	/* Try to find a higher rate that would be allowed */
-	for (j = rate->idx + 1; j < n_bitrates; j++) {
+	for (j = *rate_idx + 1; j < n_bitrates; j++) {
 		if (mask & (1 << j)) {
 			/* Okay, found a suitable rate. Use it. */
-			rate->idx = j;
+			*rate_idx = j;
 			return true;
 		}
 	}
 	return false;
 }
 
-static bool rate_idx_match_mcs_mask(struct ieee80211_tx_rate *rate,
-				    u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+static bool rate_idx_match_mcs_mask(s8 *rate_idx, u8 *mcs_mask)
 {
 	int i, j;
 	int ridx, rbit;
 
-	ridx = rate->idx / 8;
-	rbit = rate->idx % 8;
+	ridx = *rate_idx / 8;
+	rbit = *rate_idx % 8;
 
 	/* sanity check */
 	if (ridx < 0 || ridx >= IEEE80211_HT_MCS_MASK_LEN)
@@ -336,20 +393,20 @@
 	for (i = ridx; i >= 0; i--) {
 		for (j = rbit; j >= 0; j--)
 			if (mcs_mask[i] & BIT(j)) {
-				rate->idx = i * 8 + j;
+				*rate_idx = i * 8 + j;
 				return true;
 			}
 		rbit = 7;
 	}
 
 	/* Try to find a higher rate that would be allowed */
-	ridx = (rate->idx + 1) / 8;
-	rbit = (rate->idx + 1) % 8;
+	ridx = (*rate_idx + 1) / 8;
+	rbit = (*rate_idx + 1) % 8;
 
 	for (i = ridx; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
 		for (j = rbit; j < 8; j++)
 			if (mcs_mask[i] & BIT(j)) {
-				rate->idx = i * 8 + j;
+				*rate_idx = i * 8 + j;
 				return true;
 			}
 		rbit = 0;
@@ -357,37 +414,93 @@
 	return false;
 }
 
+static bool rate_idx_match_vht_mcs_mask(s8 *rate_idx, u16 *vht_mask)
+{
+	int i, j;
+	int ridx, rbit;
 
+	ridx = *rate_idx >> 4;
+	rbit = *rate_idx & 0xf;
 
-static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
+	if (ridx < 0 || ridx >= NL80211_VHT_NSS_MAX)
+		return false;
+
+	/* See whether the selected rate or anything below it is allowed. */
+	for (i = ridx; i >= 0; i--) {
+		for (j = rbit; j >= 0; j--) {
+			if (vht_mask[i] & BIT(j)) {
+				*rate_idx = (i << 4) | j;
+				return true;
+			}
+		}
+		rbit = 15;
+	}
+
+	/* Try to find a higher rate that would be allowed */
+	ridx = (*rate_idx + 1) >> 4;
+	rbit = (*rate_idx + 1) & 0xf;
+
+	for (i = ridx; i < NL80211_VHT_NSS_MAX; i++) {
+		for (j = rbit; j < 16; j++) {
+			if (vht_mask[i] & BIT(j)) {
+				*rate_idx = (i << 4) | j;
+				return true;
+			}
+		}
+		rbit = 0;
+	}
+	return false;
+}
+
+static void rate_idx_match_mask(s8 *rate_idx, u16 *rate_flags,
 				struct ieee80211_supported_band *sband,
 				enum nl80211_chan_width chan_width,
 				u32 mask,
-				u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+				u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN],
+				u16 vht_mask[NL80211_VHT_NSS_MAX])
 {
-	struct ieee80211_tx_rate alt_rate;
+	if (*rate_flags & IEEE80211_TX_RC_VHT_MCS) {
+		/* handle VHT rates */
+		if (rate_idx_match_vht_mcs_mask(rate_idx, vht_mask))
+			return;
 
-	/* handle HT rates */
-	if (rate->flags & IEEE80211_TX_RC_MCS) {
-		if (rate_idx_match_mcs_mask(rate, mcs_mask))
+		*rate_idx = 0;
+		/* keep protection flags */
+		*rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+				IEEE80211_TX_RC_USE_CTS_PROTECT |
+				IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+
+		*rate_flags |= IEEE80211_TX_RC_MCS;
+		if (chan_width == NL80211_CHAN_WIDTH_40)
+			*rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+
+		if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
 			return;
 
 		/* also try the legacy rates. */
-		alt_rate.idx = 0;
-		/* keep protection flags */
-		alt_rate.flags = rate->flags &
-				 (IEEE80211_TX_RC_USE_RTS_CTS |
-				  IEEE80211_TX_RC_USE_CTS_PROTECT |
-				  IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
-		alt_rate.count = rate->count;
-		if (rate_idx_match_legacy_mask(&alt_rate,
-					       sband->n_bitrates, mask)) {
-			*rate = alt_rate;
+		*rate_flags &= ~(IEEE80211_TX_RC_MCS |
+				 IEEE80211_TX_RC_40_MHZ_WIDTH);
+		if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+					       mask))
 			return;
-		}
-	} else if (!(rate->flags & IEEE80211_TX_RC_VHT_MCS)) {
+	} else if (*rate_flags & IEEE80211_TX_RC_MCS) {
+		/* handle HT rates */
+		if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
+			return;
+
+		/* also try the legacy rates. */
+		*rate_idx = 0;
+		/* keep protection flags */
+		*rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+				IEEE80211_TX_RC_USE_CTS_PROTECT |
+				IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
+		if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+					       mask))
+			return;
+	} else {
 		/* handle legacy rates */
-		if (rate_idx_match_legacy_mask(rate, sband->n_bitrates, mask))
+		if (rate_idx_match_legacy_mask(rate_idx, sband->n_bitrates,
+					       mask))
 			return;
 
 		/* if HT BSS, and we handle a data frame, also try HT rates */
@@ -400,23 +513,19 @@
 			break;
 		}
 
-		alt_rate.idx = 0;
+		*rate_idx = 0;
 		/* keep protection flags */
-		alt_rate.flags = rate->flags &
-				 (IEEE80211_TX_RC_USE_RTS_CTS |
-				  IEEE80211_TX_RC_USE_CTS_PROTECT |
-				  IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
-		alt_rate.count = rate->count;
+		*rate_flags &= (IEEE80211_TX_RC_USE_RTS_CTS |
+				IEEE80211_TX_RC_USE_CTS_PROTECT |
+				IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
 
-		alt_rate.flags |= IEEE80211_TX_RC_MCS;
+		*rate_flags |= IEEE80211_TX_RC_MCS;
 
 		if (chan_width == NL80211_CHAN_WIDTH_40)
-			alt_rate.flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+			*rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
 
-		if (rate_idx_match_mcs_mask(&alt_rate, mcs_mask)) {
-			*rate = alt_rate;
+		if (rate_idx_match_mcs_mask(rate_idx, mcs_mask))
 			return;
-		}
 	}
 
 	/*
@@ -569,18 +678,92 @@
 	}
 }
 
+static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
+				  struct ieee80211_supported_band *sband,
+				  struct ieee80211_sta *sta, u32 *mask,
+				  u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN],
+				  u16 vht_mask[NL80211_VHT_NSS_MAX])
+{
+	u32 i, flags;
+
+	*mask = sdata->rc_rateidx_mask[sband->band];
+	flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+	for (i = 0; i < sband->n_bitrates; i++) {
+		if ((flags & sband->bitrates[i].flags) != flags)
+			*mask &= ~BIT(i);
+	}
+
+	if (*mask == (1 << sband->n_bitrates) - 1 &&
+	    !sdata->rc_has_mcs_mask[sband->band] &&
+	    !sdata->rc_has_vht_mcs_mask[sband->band])
+		return false;
+
+	if (sdata->rc_has_mcs_mask[sband->band])
+		memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[sband->band],
+		       IEEE80211_HT_MCS_MASK_LEN);
+	else
+		memset(mcs_mask, 0xff, IEEE80211_HT_MCS_MASK_LEN);
+
+	if (sdata->rc_has_vht_mcs_mask[sband->band])
+		memcpy(vht_mask, sdata->rc_rateidx_vht_mcs_mask[sband->band],
+		       sizeof(u16) * NL80211_VHT_NSS_MAX);
+	else
+		memset(vht_mask, 0xff, sizeof(u16) * NL80211_VHT_NSS_MAX);
+
+	if (sta) {
+		__le16 sta_vht_cap;
+		u16 sta_vht_mask[NL80211_VHT_NSS_MAX];
+
+		/* Filter out rates that the STA does not support */
+		*mask &= sta->supp_rates[sband->band];
+		for (i = 0; i < sizeof(mcs_mask); i++)
+			mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
+
+		sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map;
+		ieee80211_get_vht_mask_from_cap(sta_vht_cap, sta_vht_mask);
+		for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+			vht_mask[i] &= sta_vht_mask[i];
+	}
+
+	return true;
+}
+
+static void
+rate_control_apply_mask_ratetbl(struct sta_info *sta,
+				struct ieee80211_supported_band *sband,
+				struct ieee80211_sta_rates *rates)
+{
+	int i;
+	u32 mask;
+	u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
+	u16 vht_mask[NL80211_VHT_NSS_MAX];
+	enum nl80211_chan_width chan_width;
+
+	if (!rate_control_cap_mask(sta->sdata, sband, &sta->sta, &mask,
+				   mcs_mask, vht_mask))
+		return;
+
+	chan_width = sta->sdata->vif.bss_conf.chandef.width;
+	for (i = 0; i < IEEE80211_TX_RATE_TABLE_SIZE; i++) {
+		if (rates->rate[i].idx < 0)
+			break;
+
+		rate_idx_match_mask(&rates->rate[i].idx, &rates->rate[i].flags,
+				    sband, chan_width, mask, mcs_mask,
+				    vht_mask);
+	}
+}
+
 static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
 				    struct ieee80211_sta *sta,
 				    struct ieee80211_supported_band *sband,
-				    struct ieee80211_tx_info *info,
 				    struct ieee80211_tx_rate *rates,
 				    int max_rates)
 {
 	enum nl80211_chan_width chan_width;
 	u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
-	bool has_mcs_mask;
 	u32 mask;
-	u32 rate_flags;
+	u16 rate_flags, vht_mask[NL80211_VHT_NSS_MAX];
 	int i;
 
 	/*
@@ -588,30 +771,10 @@
 	 * default mask (allow all rates) is used to save some processing for
 	 * the common case.
 	 */
-	mask = sdata->rc_rateidx_mask[info->band];
-	has_mcs_mask = sdata->rc_has_mcs_mask[info->band];
-	rate_flags =
-		ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
-	for (i = 0; i < sband->n_bitrates; i++)
-		if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
-			mask &= ~BIT(i);
-
-	if (mask == (1 << sband->n_bitrates) - 1 && !has_mcs_mask)
+	if (!rate_control_cap_mask(sdata, sband, sta, &mask, mcs_mask,
+				   vht_mask))
 		return;
 
-	if (has_mcs_mask)
-		memcpy(mcs_mask, sdata->rc_rateidx_mcs_mask[info->band],
-		       sizeof(mcs_mask));
-	else
-		memset(mcs_mask, 0xff, sizeof(mcs_mask));
-
-	if (sta) {
-		/* Filter out rates that the STA does not support */
-		mask &= sta->supp_rates[info->band];
-		for (i = 0; i < sizeof(mcs_mask); i++)
-			mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
-	}
-
 	/*
 	 * Make sure the rate index selected for each TX rate is
 	 * included in the configured mask and change the rate indexes
@@ -623,8 +786,10 @@
 		if (rates[i].idx < 0)
 			break;
 
-		rate_idx_match_mask(&rates[i], sband, chan_width, mask,
-				    mcs_mask);
+		rate_flags = rates[i].flags;
+		rate_idx_match_mask(&rates[i].idx, &rate_flags, sband,
+				    chan_width, mask, mcs_mask, vht_mask);
+		rates[i].flags = rate_flags;
 	}
 }
 
@@ -648,7 +813,7 @@
 	sband = sdata->local->hw.wiphy->bands[info->band];
 
 	if (ieee80211_is_data(hdr->frame_control))
-		rate_control_apply_mask(sdata, sta, sband, info, dest, max_rates);
+		rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
 
 	if (dest[0].idx < 0)
 		__rate_control_send_low(&sdata->local->hw, sband, sta, info,
@@ -705,7 +870,10 @@
 {
 	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
 	struct ieee80211_sta_rates *old;
+	struct ieee80211_supported_band *sband;
 
+	sband = hw->wiphy->bands[ieee80211_get_sdata_band(sta->sdata)];
+	rate_control_apply_mask_ratetbl(sta, sband, rates);
 	/*
 	 * mac80211 guarantees that this function will not be called
 	 * concurrently, so the following RCU access is safe, even without
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 25c9be5..624fe5b 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -71,64 +71,10 @@
 	spin_unlock_bh(&sta->rate_ctrl_lock);
 }
 
-static inline void rate_control_rate_init(struct sta_info *sta)
-{
-	struct ieee80211_local *local = sta->sdata->local;
-	struct rate_control_ref *ref = sta->rate_ctrl;
-	struct ieee80211_sta *ista = &sta->sta;
-	void *priv_sta = sta->rate_ctrl_priv;
-	struct ieee80211_supported_band *sband;
-	struct ieee80211_chanctx_conf *chanctx_conf;
-
-	ieee80211_sta_set_rx_nss(sta);
-
-	if (!ref)
-		return;
-
-	rcu_read_lock();
-
-	chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
-	if (WARN_ON(!chanctx_conf)) {
-		rcu_read_unlock();
-		return;
-	}
-
-	sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
-
-	spin_lock_bh(&sta->rate_ctrl_lock);
-	ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
-			    priv_sta);
-	spin_unlock_bh(&sta->rate_ctrl_lock);
-	rcu_read_unlock();
-	set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
-}
-
-static inline void rate_control_rate_update(struct ieee80211_local *local,
+void rate_control_rate_init(struct sta_info *sta);
+void rate_control_rate_update(struct ieee80211_local *local,
 				    struct ieee80211_supported_band *sband,
-				    struct sta_info *sta, u32 changed)
-{
-	struct rate_control_ref *ref = local->rate_ctrl;
-	struct ieee80211_sta *ista = &sta->sta;
-	void *priv_sta = sta->rate_ctrl_priv;
-	struct ieee80211_chanctx_conf *chanctx_conf;
-
-	if (ref && ref->ops->rate_update) {
-		rcu_read_lock();
-
-		chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
-		if (WARN_ON(!chanctx_conf)) {
-			rcu_read_unlock();
-			return;
-		}
-
-		spin_lock_bh(&sta->rate_ctrl_lock);
-		ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
-				      ista, priv_sta, changed);
-		spin_unlock_bh(&sta->rate_ctrl_lock);
-		rcu_read_unlock();
-	}
-	drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
-}
+				    struct sta_info *sta, u32 changed);
 
 static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
 					   struct sta_info *sta, gfp_t gfp)
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 543b672..3928dbd 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -867,7 +867,13 @@
 	else
 		idx = index % MCS_GROUP_RATES + (group->streams - 1) * 8;
 
-	if (offset > 0) {
+	/* enable RTS/CTS if needed:
+	 *  - if station is in dynamic SMPS (and streams > 1)
+	 *  - for fallback rates, to increase chances of getting through
+	 */
+	if (offset > 0 &&
+	    (mi->sta->smps_mode == IEEE80211_SMPS_DYNAMIC &&
+	     group->streams > 1)) {
 		ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
 		flags |= IEEE80211_TX_RC_USE_RTS_CTS;
 	}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5dae166..5bc0b88 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -42,6 +42,51 @@
 	u64_stats_update_end(&tstats->syncp);
 }
 
+static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
+			       enum nl80211_iftype type)
+{
+	__le16 fc = hdr->frame_control;
+
+	if (ieee80211_is_data(fc)) {
+		if (len < 24) /* drop incorrect hdr len (data) */
+			return NULL;
+
+		if (ieee80211_has_a4(fc))
+			return NULL;
+		if (ieee80211_has_tods(fc))
+			return hdr->addr1;
+		if (ieee80211_has_fromds(fc))
+			return hdr->addr2;
+
+		return hdr->addr3;
+	}
+
+	if (ieee80211_is_mgmt(fc)) {
+		if (len < 24) /* drop incorrect hdr len (mgmt) */
+			return NULL;
+		return hdr->addr3;
+	}
+
+	if (ieee80211_is_ctl(fc)) {
+		if (ieee80211_is_pspoll(fc))
+			return hdr->addr1;
+
+		if (ieee80211_is_back_req(fc)) {
+			switch (type) {
+			case NL80211_IFTYPE_STATION:
+				return hdr->addr2;
+			case NL80211_IFTYPE_AP:
+			case NL80211_IFTYPE_AP_VLAN:
+				return hdr->addr1;
+			default:
+				break; /* fall through to the return */
+			}
+		}
+	}
+
+	return NULL;
+}
+
 /*
  * monitor mode reception
  *
@@ -77,8 +122,7 @@
 	hdr = (void *)(skb->data + rtap_vendor_space);
 
 	if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
-			    RX_FLAG_FAILED_PLCP_CRC |
-			    RX_FLAG_AMPDU_IS_ZEROLEN))
+			    RX_FLAG_FAILED_PLCP_CRC))
 		return true;
 
 	if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
@@ -346,10 +390,6 @@
 			cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
 		put_unaligned_le32(status->ampdu_reference, pos);
 		pos += 4;
-		if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
-			flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
-		if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
-			flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
 		if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
 			flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
 		if (status->flag & RX_FLAG_AMPDU_IS_LAST)
@@ -1093,11 +1133,6 @@
 {
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
 
-	if (unlikely(rx->skb->len < 16)) {
-		I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
-		return RX_DROP_MONITOR;
-	}
-
 	/* Drop disallowed frame classes based on STA auth/assoc state;
 	 * IEEE 802.11, Chap 5.5.
 	 *
@@ -1240,22 +1275,22 @@
 	ieee80211_sta_ps_deliver_wakeup(sta);
 }
 
-int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
+int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
 {
-	struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
+	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
 	bool in_ps;
 
-	WARN_ON(!ieee80211_hw_check(&sta_inf->local->hw, AP_LINK_PS));
+	WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
 
 	/* Don't let the same PS state be set twice */
-	in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
+	in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
 	if ((start && in_ps) || (!start && !in_ps))
 		return -EINVAL;
 
 	if (start)
-		sta_ps_start(sta_inf);
+		sta_ps_start(sta);
 	else
-		sta_ps_end(sta_inf);
+		sta_ps_end(sta);
 
 	return 0;
 }
@@ -1393,7 +1428,7 @@
 	sta->rx_bytes += rx->skb->len;
 	if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
 		sta->last_signal = status->signal;
-		ewma_add(&sta->avg_signal, -status->signal);
+		ewma_signal_add(&sta->avg_signal, -status->signal);
 	}
 
 	if (status->chains) {
@@ -1405,7 +1440,7 @@
 				continue;
 
 			sta->chain_signal_last[i] = signal;
-			ewma_add(&sta->chain_signal_avg[i], -signal);
+			ewma_signal_add(&sta->chain_signal_avg[i], -signal);
 		}
 	}
 
@@ -1647,7 +1682,6 @@
 		if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
 			return RX_DROP_MONITOR;
 
-		rx->key->tx_rx_count++;
 		/* TODO: add threshold stuff again */
 	} else {
 		return RX_DROP_MONITOR;
@@ -1883,7 +1917,6 @@
 
 	/* Complete frame has been reassembled - process it now */
 	status = IEEE80211_SKB_RXCB(rx->skb);
-	status->rx_flags |= IEEE80211_RX_FRAGMENTED;
 
  out:
 	ieee80211_led_rx(rx->local);
@@ -2108,9 +2141,8 @@
 		/* deliver to local stack */
 		skb->protocol = eth_type_trans(skb, dev);
 		memset(skb->cb, 0, sizeof(skb->cb));
-		if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
-		    rx->local->napi)
-			napi_gro_receive(rx->local->napi, skb);
+		if (rx->napi)
+			napi_gro_receive(rx->napi, skb);
 		else
 			netif_receive_skb(skb);
 	}
@@ -2378,9 +2410,8 @@
 		    tf->category == WLAN_CATEGORY_TDLS &&
 		    (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
 		     tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
-			rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TDLS_CHSW;
-			skb_queue_tail(&sdata->skb_queue, rx->skb);
-			ieee80211_queue_work(&rx->local->hw, &sdata->work);
+			skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb);
+			schedule_work(&local->tdls_chsw_work);
 			if (rx->sta)
 				rx->sta->rx_packets++;
 
@@ -3004,7 +3035,6 @@
 	return RX_QUEUED;
 }
 
-/* TODO: use IEEE80211_RX_FRAGMENTED */
 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
 					struct ieee80211_rate *rate)
 {
@@ -3216,7 +3246,7 @@
 		/* This is OK -- must be QoS data frame */
 		.security_idx = tid,
 		.seqno_idx = tid,
-		.flags = IEEE80211_RX_REORDER_TIMER,
+		.napi = NULL, /* must be NULL to not have races */
 	};
 	struct tid_ampdu_rx *tid_agg_rx;
 
@@ -3286,7 +3316,7 @@
 	case NL80211_IFTYPE_OCB:
 		if (!bssid)
 			return false;
-		if (ieee80211_is_beacon(hdr->frame_control))
+		if (!ieee80211_is_data_present(hdr->frame_control))
 			return false;
 		if (!is_broadcast_ether_addr(bssid))
 			return false;
@@ -3393,7 +3423,8 @@
  * be called with rcu_read_lock protection.
  */
 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
-					 struct sk_buff *skb)
+					 struct sk_buff *skb,
+					 struct napi_struct *napi)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_sub_if_data *sdata;
@@ -3409,6 +3440,7 @@
 	memset(&rx, 0, sizeof(rx));
 	rx.skb = skb;
 	rx.local = local;
+	rx.napi = napi;
 
 	if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
 		I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
@@ -3510,7 +3542,8 @@
  * This is the receive path handler. It is called by a low level driver when an
  * 802.11 MPDU is received from the hardware.
  */
-void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
+		       struct napi_struct *napi)
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 	struct ieee80211_rate *rate = NULL;
@@ -3609,7 +3642,7 @@
 	ieee80211_tpt_led_trig_rx(local,
 			((struct ieee80211_hdr *)skb->data)->frame_control,
 			skb->len);
-	__ieee80211_rx_handle_packet(hw, skb);
+	__ieee80211_rx_handle_packet(hw, skb, napi);
 
 	rcu_read_unlock();
 
@@ -3617,7 +3650,7 @@
  drop:
 	kfree_skb(skb);
 }
-EXPORT_SYMBOL(ieee80211_rx);
+EXPORT_SYMBOL(ieee80211_rx_napi);
 
 /* This is a version of the rx handler that can be called from hard irq
  * context. Post the skb on the queue and schedule the tasklet */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 666ddac..64f1936 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -68,7 +68,7 @@
 	.nelem_hint = 3, /* start small */
 	.automatic_shrinking = true,
 	.head_offset = offsetof(struct sta_info, hash_node),
-	.key_offset = offsetof(struct sta_info, sta.addr),
+	.key_offset = offsetof(struct sta_info, addr),
 	.key_len = ETH_ALEN,
 	.hashfn = sta_addr_hash,
 	.max_size = CONFIG_MAC80211_STA_HASH_MAX_SIZE,
@@ -249,6 +249,9 @@
 	if (sta->sta.txq[0])
 		kfree(to_txq_info(sta->sta.txq[0]));
 	kfree(rcu_dereference_raw(sta->sta.rates));
+#ifdef CONFIG_MAC80211_MESH
+	kfree(sta->mesh);
+#endif
 	kfree(sta);
 }
 
@@ -313,13 +316,19 @@
 	INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
 	mutex_init(&sta->ampdu_mlme.mtx);
 #ifdef CONFIG_MAC80211_MESH
-	spin_lock_init(&sta->plink_lock);
-	if (ieee80211_vif_is_mesh(&sdata->vif) &&
-	    !sdata->u.mesh.user_mpm)
-		init_timer(&sta->plink_timer);
-	sta->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+	if (ieee80211_vif_is_mesh(&sdata->vif)) {
+		sta->mesh = kzalloc(sizeof(*sta->mesh), gfp);
+		if (!sta->mesh)
+			goto free;
+		spin_lock_init(&sta->mesh->plink_lock);
+		if (ieee80211_vif_is_mesh(&sdata->vif) &&
+		    !sdata->u.mesh.user_mpm)
+			init_timer(&sta->mesh->plink_timer);
+		sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
+	}
 #endif
 
+	memcpy(sta->addr, addr, ETH_ALEN);
 	memcpy(sta->sta.addr, addr, ETH_ALEN);
 	sta->local = local;
 	sta->sdata = sdata;
@@ -332,9 +341,9 @@
 
 	ktime_get_ts(&uptime);
 	sta->last_connected = uptime.tv_sec;
-	ewma_init(&sta->avg_signal, 1024, 8);
+	ewma_signal_init(&sta->avg_signal);
 	for (i = 0; i < ARRAY_SIZE(sta->chain_signal_avg); i++)
-		ewma_init(&sta->chain_signal_avg[i], 1024, 8);
+		ewma_signal_init(&sta->chain_signal_avg[i]);
 
 	if (local->ops->wake_tx_queue) {
 		void *txq_data;
@@ -405,6 +414,9 @@
 	if (sta->sta.txq[0])
 		kfree(to_txq_info(sta->sta.txq[0]));
 free:
+#ifdef CONFIG_MAC80211_MESH
+	kfree(sta->mesh);
+#endif
 	kfree(sta);
 	return NULL;
 }
@@ -623,7 +635,7 @@
 	bool indicate_tim = false;
 	u8 ignore_for_tim = sta->sta.uapsd_queues;
 	int ac;
-	u16 id;
+	u16 id = sta->sta.aid;
 
 	if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
 	    sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
@@ -631,12 +643,9 @@
 			return;
 
 		ps = &sta->sdata->bss->ps;
-		id = sta->sta.aid;
 #ifdef CONFIG_MAC80211_MESH
 	} else if (ieee80211_vif_is_mesh(&sta->sdata->vif)) {
 		ps = &sta->sdata->u.mesh.ps;
-		/* TIM map only for 1 <= PLID <= IEEE80211_MAX_AID */
-		id = sta->plid % (IEEE80211_MAX_AID + 1);
 #endif
 	} else {
 		return;
@@ -1887,7 +1896,8 @@
 		}
 
 		if (!(sinfo->filled & BIT(NL80211_STA_INFO_SIGNAL_AVG))) {
-			sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
+			sinfo->signal_avg =
+				(s8) -ewma_signal_read(&sta->avg_signal);
 			sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
 		}
 	}
@@ -1902,7 +1912,7 @@
 		for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
 			sinfo->chain_signal[i] = sta->chain_signal_last[i];
 			sinfo->chain_signal_avg[i] =
-				(s8) -ewma_read(&sta->chain_signal_avg[i]);
+				(s8) -ewma_signal_read(&sta->chain_signal_avg[i]);
 		}
 	}
 
@@ -1956,16 +1966,16 @@
 				 BIT(NL80211_STA_INFO_PEER_PM) |
 				 BIT(NL80211_STA_INFO_NONPEER_PM);
 
-		sinfo->llid = sta->llid;
-		sinfo->plid = sta->plid;
-		sinfo->plink_state = sta->plink_state;
+		sinfo->llid = sta->mesh->llid;
+		sinfo->plid = sta->mesh->plid;
+		sinfo->plink_state = sta->mesh->plink_state;
 		if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
 			sinfo->filled |= BIT(NL80211_STA_INFO_T_OFFSET);
-			sinfo->t_offset = sta->t_offset;
+			sinfo->t_offset = sta->mesh->t_offset;
 		}
-		sinfo->local_pm = sta->local_pm;
-		sinfo->peer_pm = sta->peer_pm;
-		sinfo->nonpeer_pm = sta->nonpeer_pm;
+		sinfo->local_pm = sta->mesh->local_pm;
+		sinfo->peer_pm = sta->mesh->peer_pm;
+		sinfo->nonpeer_pm = sta->mesh->nonpeer_pm;
 #endif
 	}
 
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 226f8ca4..b087c71 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -53,6 +53,8 @@
  * @WLAN_STA_TDLS_CHAN_SWITCH: This TDLS peer supports TDLS channel-switching
  * @WLAN_STA_TDLS_OFF_CHANNEL: The local STA is currently off-channel with this
  *	TDLS peer
+ * @WLAN_STA_TDLS_WIDER_BW: This TDLS peer supports working on a wider bw on
+ *	the BSS base channel.
  * @WLAN_STA_UAPSD: Station requested unscheduled SP while driver was
  *	keeping station in power-save mode, reply when the driver
  *	unblocks the station.
@@ -84,6 +86,7 @@
 	WLAN_STA_TDLS_INITIATOR,
 	WLAN_STA_TDLS_CHAN_SWITCH,
 	WLAN_STA_TDLS_OFF_CHANNEL,
+	WLAN_STA_TDLS_WIDER_BW,
 	WLAN_STA_UAPSD,
 	WLAN_STA_SP,
 	WLAN_STA_4ADDR_EVENT,
@@ -270,6 +273,56 @@
 };
 
 /**
+ * struct mesh_sta - mesh STA information
+ * @plink_lock: serialize access to plink fields
+ * @llid: Local link ID
+ * @plid: Peer link ID
+ * @aid: local aid supplied by peer
+ * @reason: Cancel reason on PLINK_HOLDING state
+ * @plink_retries: Retries in establishment
+ * @plink_state: peer link state
+ * @plink_timeout: timeout of peer link
+ * @plink_timer: peer link watch timer
+ * @t_offset: timing offset relative to this host
+ * @t_offset_setpoint: reference timing offset of this sta to be used when
+ * 	calculating clockdrift
+ * @local_pm: local link-specific power save mode
+ * @peer_pm: peer-specific power save mode towards local STA
+ * @nonpeer_pm: STA power save mode towards non-peer neighbors
+ * @processed_beacon: set to true after peer rates and capabilities are
+ *	processed
+ * @fail_avg: moving percentage of failed MSDUs
+ */
+struct mesh_sta {
+	struct timer_list plink_timer;
+
+	s64 t_offset;
+	s64 t_offset_setpoint;
+
+	spinlock_t plink_lock;
+	u16 llid;
+	u16 plid;
+	u16 aid;
+	u16 reason;
+	u8 plink_retries;
+
+	bool processed_beacon;
+
+	enum nl80211_plink_state plink_state;
+	u32 plink_timeout;
+
+	/* mesh power save */
+	enum nl80211_mesh_power_mode local_pm;
+	enum nl80211_mesh_power_mode peer_pm;
+	enum nl80211_mesh_power_mode nonpeer_pm;
+
+	/* moving percentage of failed MSDUs */
+	unsigned int fail_avg;
+};
+
+DECLARE_EWMA(signal, 1024, 8)
+
+/**
  * struct sta_info - STA information
  *
  * This structure collects information about a station that
@@ -278,12 +331,13 @@
  * @list: global linked list entry
  * @free_list: list entry for keeping track of stations to free
  * @hash_node: hash node for rhashtable
+ * @addr: station's MAC address - duplicated from public part to
+ *	let the hash table work with just a single cacheline
  * @local: pointer to the global information
  * @sdata: virtual interface this station belongs to
  * @ptk: peer keys negotiated with this station, if any
  * @ptk_idx: last installed peer key index
  * @gtk: group keys negotiated with this station, if any
- * @gtk_idx: last installed group key index
  * @rate_ctrl: rate control algorithm reference
  * @rate_ctrl_lock: spinlock used to protect rate control data
  *	(data inside the algorithm, so serializes calls there)
@@ -318,30 +372,17 @@
  * @last_signal: signal of last received frame from this STA
  * @avg_signal: moving average of signal of received frames from this STA
  * @last_ack_signal: signal of last received Ack frame from this STA
- * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
+ * @last_seq_ctrl: last received seq/frag number from this STA (per TID
+ *	plus one for non-QoS frames)
  * @tx_filtered_count: number of frames the hardware filtered for this STA
  * @tx_retry_failed: number of frames that failed retry
  * @tx_retry_count: total number of retries for frames to this STA
- * @fail_avg: moving percentage of failed MSDUs
  * @tx_packets: number of RX/TX MSDUs
  * @tx_bytes: number of bytes transmitted to this STA
  * @tid_seq: per-TID sequence numbers for sending to this STA
  * @ampdu_mlme: A-MPDU state machine state
  * @timer_to_tid: identity mapping to ID timers
- * @plink_lock: serialize access to plink fields
- * @llid: Local link ID
- * @plid: Peer link ID
- * @reason: Cancel reason on PLINK_HOLDING state
- * @plink_retries: Retries in establishment
- * @plink_state: peer link state
- * @plink_timeout: timeout of peer link
- * @plink_timer: peer link watch timer
- * @t_offset: timing offset relative to this host
- * @t_offset_setpoint: reference timing offset of this sta to be used when
- * 	calculating clockdrift
- * @local_pm: local link-specific power save mode
- * @peer_pm: peer-specific power save mode towards local STA
- * @nonpeer_pm: STA power save mode towards non-peer neighbors
+ * @mesh: mesh STA information
  * @debugfs: debug filesystem info
  * @dead: set to true when sta is unlinked
  * @uploaded: set to true when sta is uploaded to the driver
@@ -369,19 +410,19 @@
  * @rx_msdu: MSDUs received from this station, using IEEE80211_NUM_TID
  *	entry for non-QoS frames
  * @fast_tx: TX fastpath information
- * @processed_beacon: set to true after peer rates and capabilities are
- *	processed
+ * @tdls_chandef: a TDLS peer can have a wider chandef that is compatible to
+ *	the BSS one.
  */
 struct sta_info {
 	/* General information, mostly static */
 	struct list_head list, free_list;
 	struct rcu_head rcu_head;
 	struct rhash_head hash_node;
+	u8 addr[ETH_ALEN];
 	struct ieee80211_local *local;
 	struct ieee80211_sub_if_data *sdata;
 	struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
 	struct ieee80211_key __rcu *ptk[NUM_DEFAULT_KEYS];
-	u8 gtk_idx;
 	u8 ptk_idx;
 	struct rate_control_ref *rate_ctrl;
 	void *rate_ctrl_priv;
@@ -390,6 +431,10 @@
 
 	struct ieee80211_fast_tx __rcu *fast_tx;
 
+#ifdef CONFIG_MAC80211_MESH
+	struct mesh_sta *mesh;
+#endif
+
 	struct work_struct drv_deliver_wk;
 
 	u16 listen_interval;
@@ -419,12 +464,12 @@
 	unsigned long rx_fragments;
 	unsigned long rx_dropped;
 	int last_signal;
-	struct ewma avg_signal;
+	struct ewma_signal avg_signal;
 	int last_ack_signal;
 
 	u8 chains;
 	s8 chain_signal_last[IEEE80211_MAX_CHAINS];
-	struct ewma chain_signal_avg[IEEE80211_MAX_CHAINS];
+	struct ewma_signal chain_signal_avg[IEEE80211_MAX_CHAINS];
 
 	/* Plus 1 for non-QoS frames */
 	__le16 last_seq_ctrl[IEEE80211_NUM_TIDS + 1];
@@ -432,8 +477,6 @@
 	/* Updated from TX status path only, no locking requirements */
 	unsigned long tx_filtered_count;
 	unsigned long tx_retry_failed, tx_retry_count;
-	/* moving percentage of failed MSDUs */
-	unsigned int fail_avg;
 
 	/* Updated from TX path only, no locking requirements */
 	u64 tx_packets[IEEE80211_NUM_ACS];
@@ -455,29 +498,6 @@
 	struct sta_ampdu_mlme ampdu_mlme;
 	u8 timer_to_tid[IEEE80211_NUM_TIDS];
 
-#ifdef CONFIG_MAC80211_MESH
-	/*
-	 * Mesh peer link attributes, protected by plink_lock.
-	 * TODO: move to a sub-structure that is referenced with pointer?
-	 */
-	spinlock_t plink_lock;
-	u16 llid;
-	u16 plid;
-	u16 reason;
-	u8 plink_retries;
-	enum nl80211_plink_state plink_state;
-	u32 plink_timeout;
-	struct timer_list plink_timer;
-
-	s64 t_offset;
-	s64 t_offset_setpoint;
-	/* mesh power save */
-	enum nl80211_mesh_power_mode local_pm;
-	enum nl80211_mesh_power_mode peer_pm;
-	enum nl80211_mesh_power_mode nonpeer_pm;
-	bool processed_beacon;
-#endif
-
 #ifdef CONFIG_MAC80211_DEBUGFS
 	struct sta_info_debugfsdentries {
 		struct dentry *dir;
@@ -498,6 +518,8 @@
 
 	u8 reserved_tid;
 
+	struct cfg80211_chan_def tdls_chandef;
+
 	/* keep last! */
 	struct ieee80211_sta sta;
 };
@@ -505,7 +527,7 @@
 static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
 {
 #ifdef CONFIG_MAC80211_MESH
-	return sta->plink_state;
+	return sta->mesh->plink_state;
 #endif
 	return NL80211_PLINK_LISTEN;
 }
@@ -608,7 +630,7 @@
 			       _sta_bucket_idx(tbl, _addr),		\
 			       hash_node)				\
 	/* compare address and run code only if it matches */		\
-	if (ether_addr_equal(_sta->sta.addr, (_addr)))
+	if (ether_addr_equal(_sta->addr, (_addr)))
 
 /*
  * Get STA info by index, BROKEN!
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 45628f3..8ba5832 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -515,7 +515,7 @@
 
 		if (!sdata) {
 			skb->dev = NULL;
-		} else if (info->flags & IEEE80211_TX_INTFL_MLME_CONN_TX) {
+		} else {
 			unsigned int hdr_size =
 				ieee80211_hdrlen(hdr->frame_control);
 
@@ -529,9 +529,6 @@
 				ieee80211_mgd_conn_tx_status(sdata,
 							     hdr->frame_control,
 							     acked);
-		} else {
-			/* we assign ack frame ID for the others */
-			WARN_ON(1);
 		}
 
 		rcu_read_unlock();
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 8db6e29..aee701a 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -4,6 +4,7 @@
  * Copyright 2006-2010	Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2014, Intel Corporation
  * Copyright 2014  Intel Mobile Communications GmbH
+ * Copyright 2015  Intel Deutschland GmbH
  *
  * This file is GPLv2 as found in COPYING.
  */
@@ -11,6 +12,7 @@
 #include <linux/ieee80211.h>
 #include <linux/log2.h>
 #include <net/cfg80211.h>
+#include <linux/rtnetlink.h>
 #include "ieee80211_i.h"
 #include "driver-ops.h"
 
@@ -35,20 +37,28 @@
 	mutex_unlock(&local->mtx);
 }
 
-static void ieee80211_tdls_add_ext_capab(struct ieee80211_local *local,
+static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata,
 					 struct sk_buff *skb)
 {
-	u8 *pos = (void *)skb_put(skb, 7);
+	struct ieee80211_local *local = sdata->local;
 	bool chan_switch = local->hw.wiphy->features &
 			   NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+	bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW);
+	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
+	struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+	bool vht = sband && sband->vht_cap.vht_supported;
+	u8 *pos = (void *)skb_put(skb, 10);
 
 	*pos++ = WLAN_EID_EXT_CAPABILITY;
-	*pos++ = 5; /* len */
+	*pos++ = 8; /* len */
 	*pos++ = 0x0;
 	*pos++ = 0x0;
 	*pos++ = 0x0;
 	*pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0;
 	*pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED;
+	*pos++ = 0;
+	*pos++ = 0;
+	*pos++ = (vht && wider_band) ? WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED : 0;
 }
 
 static u8
@@ -284,6 +294,60 @@
 }
 
 static void
+ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata,
+				   struct sta_info *sta)
+{
+	/* IEEE802.11ac-2013 Table E-4 */
+	u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 };
+	struct cfg80211_chan_def uc = sta->tdls_chandef;
+	enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta);
+	int i;
+
+	/* only support upgrading non-narrow channels up to 80Mhz */
+	if (max_width == NL80211_CHAN_WIDTH_5 ||
+	    max_width == NL80211_CHAN_WIDTH_10)
+		return;
+
+	if (max_width > NL80211_CHAN_WIDTH_80)
+		max_width = NL80211_CHAN_WIDTH_80;
+
+	if (uc.width == max_width)
+		return;
+	/*
+	 * Channel usage constrains in the IEEE802.11ac-2013 specification only
+	 * allow expanding a 20MHz channel to 80MHz in a single way. In
+	 * addition, there are no 40MHz allowed channels that are not part of
+	 * the allowed 80MHz range in the 5GHz spectrum (the relevant one here).
+	 */
+	for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++)
+		if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) {
+			uc.center_freq1 = centers_80mhz[i];
+			uc.width = NL80211_CHAN_WIDTH_80;
+			break;
+		}
+
+	if (!uc.center_freq1)
+		return;
+
+	/* proceed to downgrade the chandef until usable or the same */
+	while (uc.width > max_width &&
+	       !cfg80211_reg_can_beacon(sdata->local->hw.wiphy,
+					&uc, sdata->wdev.iftype))
+		ieee80211_chandef_downgrade(&uc);
+
+	if (!cfg80211_chandef_identical(&uc, &sta->tdls_chandef)) {
+		tdls_dbg(sdata, "TDLS ch width upgraded %d -> %d\n",
+			 sta->tdls_chandef.width, uc.width);
+
+		/*
+		 * the station is not yet authorized when BW upgrade is done,
+		 * locking is not required
+		 */
+		sta->tdls_chandef = uc;
+	}
+}
+
+static void
 ieee80211_tdls_add_setup_start_ies(struct ieee80211_sub_if_data *sdata,
 				   struct sk_buff *skb, const u8 *peer,
 				   u8 action_code, bool initiator,
@@ -320,7 +384,7 @@
 		offset = noffset;
 	}
 
-	ieee80211_tdls_add_ext_capab(local, skb);
+	ieee80211_tdls_add_ext_capab(sdata, skb);
 
 	/* add the QoS element if we support it */
 	if (local->hw.queues >= IEEE80211_NUM_ACS &&
@@ -350,15 +414,17 @@
 		offset = noffset;
 	}
 
-	rcu_read_lock();
+	mutex_lock(&local->sta_mtx);
 
 	/* we should have the peer STA if we're already responding */
 	if (action_code == WLAN_TDLS_SETUP_RESPONSE) {
 		sta = sta_info_get(sdata, peer);
 		if (WARN_ON_ONCE(!sta)) {
-			rcu_read_unlock();
+			mutex_unlock(&local->sta_mtx);
 			return;
 		}
+
+		sta->tdls_chandef = sdata->vif.bss_conf.chandef;
 	}
 
 	ieee80211_tdls_add_oper_classes(sdata, skb);
@@ -384,10 +450,6 @@
 		ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
 	} else if (action_code == WLAN_TDLS_SETUP_RESPONSE &&
 		   ht_cap.ht_supported && sta->sta.ht_cap.ht_supported) {
-		/* disable SMPS in TDLS responder */
-		sta->sta.ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED
-					<< IEEE80211_HT_CAP_SM_PS_SHIFT;
-
 		/* the peer caps are already intersected with our own */
 		memcpy(&ht_cap, &sta->sta.ht_cap, sizeof(ht_cap));
 
@@ -448,9 +510,16 @@
 
 		pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
 		ieee80211_ie_build_vht_cap(pos, &vht_cap, vht_cap.cap);
+
+		/*
+		 * if both peers support WIDER_BW, we can expand the chandef to
+		 * a wider compatible one, up to 80MHz
+		 */
+		if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+			ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
 	}
 
-	rcu_read_unlock();
+	mutex_unlock(&local->sta_mtx);
 
 	/* add any remaining IEs */
 	if (extra_ies_len) {
@@ -474,15 +543,17 @@
 	enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
 	u8 *pos;
 
-	rcu_read_lock();
+	mutex_lock(&local->sta_mtx);
 
 	sta = sta_info_get(sdata, peer);
 	ap_sta = sta_info_get(sdata, ifmgd->bssid);
 	if (WARN_ON_ONCE(!sta || !ap_sta)) {
-		rcu_read_unlock();
+		mutex_unlock(&local->sta_mtx);
 		return;
 	}
 
+	sta->tdls_chandef = sdata->vif.bss_conf.chandef;
+
 	/* add any custom IEs that go before the QoS IE */
 	if (extra_ies_len) {
 		static const u8 before_qos[] = {
@@ -530,12 +601,19 @@
 
 	/* only include VHT-operation if not on the 2.4GHz band */
 	if (band != IEEE80211_BAND_2GHZ && sta->sta.vht_cap.vht_supported) {
+		/*
+		 * if both peers support WIDER_BW, we can expand the chandef to
+		 * a wider compatible one, up to 80MHz
+		 */
+		if (test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+			ieee80211_tdls_chandef_vht_upgrade(sdata, sta);
+
 		pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_operation));
 		ieee80211_ie_build_vht_oper(pos, &sta->sta.vht_cap,
-					    &sdata->vif.bss_conf.chandef);
+					    &sta->tdls_chandef);
 	}
 
-	rcu_read_unlock();
+	mutex_unlock(&local->sta_mtx);
 
 	/* add any remaining IEs */
 	if (extra_ies_len) {
@@ -784,7 +862,7 @@
 			       max(sizeof(struct ieee80211_mgmt),
 				   sizeof(struct ieee80211_tdls_data)) +
 			       50 + /* supported rates */
-			       7 + /* ext capab */
+			       10 + /* ext capab */
 			       26 + /* max(WMM-info, WMM-param) */
 			       2 + max(sizeof(struct ieee80211_ht_cap),
 				       sizeof(struct ieee80211_ht_operation)) +
@@ -983,8 +1061,17 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
+	enum ieee80211_smps_mode smps_mode = sdata->u.mgd.driver_smps_mode;
 	int ret;
 
+	/* don't support setup with forced SMPS mode that's not off */
+	if (smps_mode != IEEE80211_SMPS_AUTOMATIC &&
+	    smps_mode != IEEE80211_SMPS_OFF) {
+		tdls_dbg(sdata, "Aborting TDLS setup due to SMPS mode %d\n",
+			 smps_mode);
+		return -ENOTSUPP;
+	}
+
 	mutex_lock(&local->mtx);
 
 	/* we don't support concurrent TDLS peer setups */
@@ -1146,6 +1233,22 @@
 	return ret;
 }
 
+static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_chanctx_conf *conf;
+	struct ieee80211_chanctx *ctx;
+
+	mutex_lock(&local->chanctx_mtx);
+	conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+					 lockdep_is_held(&local->chanctx_mtx));
+	if (conf) {
+		ctx = container_of(conf, struct ieee80211_chanctx, conf);
+		ieee80211_recalc_chanctx_chantype(local, ctx);
+	}
+	mutex_unlock(&local->chanctx_mtx);
+}
+
 int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
 			const u8 *peer, enum nl80211_tdls_operation oper)
 {
@@ -1182,6 +1285,8 @@
 			break;
 		}
 
+		iee80211_tdls_recalc_chanctx(sdata);
+
 		rcu_read_lock();
 		sta = sta_info_get(sdata, peer);
 		if (!sta) {
@@ -1213,6 +1318,7 @@
 		ieee80211_flush_queues(local, sdata, false);
 
 		ret = sta_info_destroy_addr(sdata, peer);
+		iee80211_tdls_recalc_chanctx(sdata);
 		break;
 	default:
 		ret = -ENOTSUPP;
@@ -1224,6 +1330,10 @@
 		eth_zero_addr(sdata->u.mgd.tdls_peer);
 	}
 
+	if (ret == 0)
+		ieee80211_queue_work(&sdata->local->hw,
+				     &sdata->u.mgd.request_smps_work);
+
 	mutex_unlock(&local->mtx);
 	return ret;
 }
@@ -1627,6 +1737,31 @@
 		return -EINVAL;
 	}
 
+	if (!elems.sec_chan_offs) {
+		chan_type = NL80211_CHAN_HT20;
+	} else {
+		switch (elems.sec_chan_offs->sec_chan_offs) {
+		case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+			chan_type = NL80211_CHAN_HT40PLUS;
+			break;
+		case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+			chan_type = NL80211_CHAN_HT40MINUS;
+			break;
+		default:
+			chan_type = NL80211_CHAN_HT20;
+			break;
+		}
+	}
+
+	cfg80211_chandef_create(&chandef, chan, chan_type);
+
+	/* we will be active on the TDLS link */
+	if (!cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &chandef,
+					   sdata->wdev.iftype)) {
+		tdls_dbg(sdata, "TDLS chan switch to forbidden channel\n");
+		return -EINVAL;
+	}
+
 	mutex_lock(&local->sta_mtx);
 	sta = sta_info_get(sdata, tf->sa);
 	if (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH)) {
@@ -1647,27 +1782,15 @@
 		goto out;
 	}
 
-	if (!sta->sta.ht_cap.ht_supported) {
-		chan_type = NL80211_CHAN_NO_HT;
-	} else if (!elems.sec_chan_offs) {
-		chan_type = NL80211_CHAN_HT20;
-	} else {
-		switch (elems.sec_chan_offs->sec_chan_offs) {
-		case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-			chan_type = NL80211_CHAN_HT40PLUS;
-			break;
-		case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-			chan_type = NL80211_CHAN_HT40MINUS;
-			break;
-		default:
-			chan_type = NL80211_CHAN_HT20;
-			break;
-		}
+	/* peer should have known better */
+	if (!sta->sta.ht_cap.ht_supported && elems.sec_chan_offs &&
+	    elems.sec_chan_offs->sec_chan_offs) {
+		tdls_dbg(sdata, "TDLS chan switch - wide chan unsupported\n");
+		ret = -ENOTSUPP;
+		goto out;
 	}
 
-	cfg80211_chandef_create(&chandef, chan, chan_type);
 	params.chandef = &chandef;
-
 	params.switch_time = le16_to_cpu(elems.ch_sw_timing->switch_time);
 	params.switch_timeout = le16_to_cpu(elems.ch_sw_timing->switch_timeout);
 
@@ -1691,12 +1814,15 @@
 	return ret;
 }
 
-void ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
-					   struct sk_buff *skb)
+static void
+ieee80211_process_tdls_channel_switch(struct ieee80211_sub_if_data *sdata,
+				      struct sk_buff *skb)
 {
 	struct ieee80211_tdls_data *tf = (void *)skb->data;
 	struct wiphy *wiphy = sdata->local->hw.wiphy;
 
+	ASSERT_RTNL();
+
 	/* make sure the driver supports it */
 	if (!(wiphy->features & NL80211_FEATURE_TDLS_CHANNEL_SWITCH))
 		return;
@@ -1720,3 +1846,47 @@
 		return;
 	}
 }
+
+void ieee80211_teardown_tdls_peers(struct ieee80211_sub_if_data *sdata)
+{
+	struct sta_info *sta;
+	u16 reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+		if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+		    !test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+			continue;
+
+		ieee80211_tdls_oper_request(&sdata->vif, sta->sta.addr,
+					    NL80211_TDLS_TEARDOWN, reason,
+					    GFP_ATOMIC);
+	}
+	rcu_read_unlock();
+}
+
+void ieee80211_tdls_chsw_work(struct work_struct *wk)
+{
+	struct ieee80211_local *local =
+		container_of(wk, struct ieee80211_local, tdls_chsw_work);
+	struct ieee80211_sub_if_data *sdata;
+	struct sk_buff *skb;
+	struct ieee80211_tdls_data *tf;
+
+	rtnl_lock();
+	while ((skb = skb_dequeue(&local->skb_queue_tdls_chsw))) {
+		tf = (struct ieee80211_tdls_data *)skb->data;
+		list_for_each_entry(sdata, &local->interfaces, list) {
+			if (!ieee80211_sdata_running(sdata) ||
+			    sdata->vif.type != NL80211_IFTYPE_STATION ||
+			    !ether_addr_equal(tf->da, sdata->vif.addr))
+				continue;
+
+			ieee80211_process_tdls_channel_switch(sdata, skb);
+			break;
+		}
+
+		kfree_skb(skb);
+	}
+	rtnl_unlock();
+}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index b823350..84e0e8c 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -311,9 +311,6 @@
 	if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
 		return TX_CONTINUE;
 
-	if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
-		return TX_CONTINUE;
-
 	if (tx->flags & IEEE80211_TX_PS_BUFFERED)
 		return TX_CONTINUE;
 
@@ -610,7 +607,6 @@
 	if (tx->key) {
 		bool skip_hw = false;
 
-		tx->key->tx_rx_count++;
 		/* TODO: add threshold stuff again */
 
 		switch (tx->key->conf.cipher) {
@@ -690,7 +686,8 @@
 
 	txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
 		    tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
-		    tx->sdata->vif.type == NL80211_IFTYPE_ADHOC);
+		    tx->sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+		    tx->sdata->vif.type == NL80211_IFTYPE_OCB);
 
 	/* set up RTS protection if desired */
 	if (len > tx->local->hw.wiphy->rts_threshold) {
@@ -2777,7 +2774,11 @@
 		sdata->sequence_number += 0x10;
 	}
 
-	sta->tx_msdu[tid]++;
+	if (skb_shinfo(skb)->gso_size)
+		sta->tx_msdu[tid] +=
+			DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
+	else
+		sta->tx_msdu[tid]++;
 
 	info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
 
@@ -3213,6 +3214,16 @@
 	rcu_read_unlock();
 }
 
+static u8 __ieee80211_csa_update_counter(struct beacon_data *beacon)
+{
+	beacon->csa_current_counter--;
+
+	/* the counter should never reach 0 */
+	WARN_ON_ONCE(!beacon->csa_current_counter);
+
+	return beacon->csa_current_counter;
+}
+
 u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
@@ -3231,11 +3242,7 @@
 	if (!beacon)
 		goto unlock;
 
-	beacon->csa_current_counter--;
-
-	/* the counter should never reach 0 */
-	WARN_ON_ONCE(!beacon->csa_current_counter);
-	count = beacon->csa_current_counter;
+	count = __ieee80211_csa_update_counter(beacon);
 
 unlock:
 	rcu_read_unlock();
@@ -3335,7 +3342,7 @@
 		if (beacon) {
 			if (beacon->csa_counter_offsets[0]) {
 				if (!is_template)
-					ieee80211_csa_update_counter(vif);
+					__ieee80211_csa_update_counter(beacon);
 
 				ieee80211_set_csa(sdata, beacon);
 			}
@@ -3381,7 +3388,7 @@
 
 		if (beacon->csa_counter_offsets[0]) {
 			if (!is_template)
-				ieee80211_csa_update_counter(vif);
+				__ieee80211_csa_update_counter(beacon);
 
 			ieee80211_set_csa(sdata, beacon);
 		}
@@ -3411,7 +3418,7 @@
 				 * for now we leave it consistent with overall
 				 * mac80211's behavior.
 				 */
-				ieee80211_csa_update_counter(vif);
+				__ieee80211_csa_update_counter(beacon);
 
 			ieee80211_set_csa(sdata, beacon);
 		}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 43e5aad..1104421 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -47,55 +47,6 @@
 }
 EXPORT_SYMBOL(wiphy_to_ieee80211_hw);
 
-u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
-			enum nl80211_iftype type)
-{
-	__le16 fc = hdr->frame_control;
-
-	 /* drop ACK/CTS frames and incorrect hdr len (ctrl) */
-	if (len < 16)
-		return NULL;
-
-	if (ieee80211_is_data(fc)) {
-		if (len < 24) /* drop incorrect hdr len (data) */
-			return NULL;
-
-		if (ieee80211_has_a4(fc))
-			return NULL;
-		if (ieee80211_has_tods(fc))
-			return hdr->addr1;
-		if (ieee80211_has_fromds(fc))
-			return hdr->addr2;
-
-		return hdr->addr3;
-	}
-
-	if (ieee80211_is_mgmt(fc)) {
-		if (len < 24) /* drop incorrect hdr len (mgmt) */
-			return NULL;
-		return hdr->addr3;
-	}
-
-	if (ieee80211_is_ctl(fc)) {
-		if (ieee80211_is_pspoll(fc))
-			return hdr->addr1;
-
-		if (ieee80211_is_back_req(fc)) {
-			switch (type) {
-			case NL80211_IFTYPE_STATION:
-				return hdr->addr2;
-			case NL80211_IFTYPE_AP:
-			case NL80211_IFTYPE_AP_VLAN:
-				return hdr->addr1;
-			default:
-				break; /* fall through to the return */
-			}
-		}
-	}
-
-	return NULL;
-}
-
 void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
 {
 	struct sk_buff *skb;
@@ -752,7 +703,12 @@
 
 struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif)
 {
-	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	struct ieee80211_sub_if_data *sdata;
+
+	if (!vif)
+		return NULL;
+
+	sdata = vif_to_sdata(vif);
 
 	if (!ieee80211_sdata_running(sdata) ||
 	    !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
@@ -1709,6 +1665,7 @@
 	local->resuming = false;
 	local->suspended = false;
 	local->started = false;
+	local->in_reconfig = false;
 
 	/* scheduled scan clearly can't be running any more, but tell
 	 * cfg80211 and clear local state
@@ -1759,16 +1716,24 @@
 	struct ieee80211_sub_if_data *sched_scan_sdata;
 	struct cfg80211_sched_scan_request *sched_scan_req;
 	bool sched_scan_stopped = false;
+	bool suspended = local->suspended;
 
 	/* nothing to do if HW shouldn't run */
 	if (!local->open_count)
 		goto wake_up;
 
 #ifdef CONFIG_PM
-	if (local->suspended)
+	if (suspended)
 		local->resuming = true;
 
 	if (local->wowlan) {
+		/*
+		 * In the wowlan case, both mac80211 and the device
+		 * are functional when the resume op is called, so
+		 * clear local->suspended so the device could operate
+		 * normally (e.g. pass rx frames).
+		 */
+		local->suspended = false;
 		res = drv_resume(local);
 		local->wowlan = false;
 		if (res < 0) {
@@ -1781,8 +1746,10 @@
 		/*
 		 * res is 1, which means the driver requested
 		 * to go through a regular reset on wakeup.
+		 * restore local->suspended in this case.
 		 */
 		reconfig_due_to_wowlan = true;
+		local->suspended = true;
 	}
 #endif
 
@@ -1794,7 +1761,7 @@
 	 */
 	res = drv_start(local);
 	if (res) {
-		if (local->suspended)
+		if (suspended)
 			WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n");
 		else
 			WARN(1, "Hardware became unavailable during restart.\n");
@@ -2088,10 +2055,10 @@
 	 * If this is for hw restart things are still running.
 	 * We may want to change that later, however.
 	 */
-	if (local->open_count && (!local->suspended || reconfig_due_to_wowlan))
+	if (local->open_count && (!suspended || reconfig_due_to_wowlan))
 		drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART);
 
-	if (!local->suspended)
+	if (!suspended)
 		return 0;
 
 #ifdef CONFIG_PM
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 80694d5..834ccdb 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -308,11 +308,15 @@
 {
 	struct ieee80211_sub_if_data *sdata = sta->sdata;
 	enum ieee80211_sta_rx_bandwidth bw;
+	enum nl80211_chan_width bss_width = sdata->vif.bss_conf.chandef.width;
 
-	bw = ieee80211_chan_width_to_rx_bw(sdata->vif.bss_conf.chandef.width);
-	bw = min(bw, ieee80211_sta_cap_rx_bw(sta));
+	bw = ieee80211_sta_cap_rx_bw(sta);
 	bw = min(bw, sta->cur_max_bandwidth);
 
+	/* do not cap the BW of TDLS WIDER_BW peers by the bss */
+	if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
+		bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
+
 	return bw;
 }
 
@@ -422,3 +426,29 @@
 	if (changed > 0)
 		rate_control_rate_update(local, sband, sta, changed);
 }
+
+void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
+				     u16 vht_mask[NL80211_VHT_NSS_MAX])
+{
+	int i;
+	u16 mask, cap = le16_to_cpu(vht_cap);
+
+	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+		mask = (cap >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
+		switch (mask) {
+		case IEEE80211_VHT_MCS_SUPPORT_0_7:
+			vht_mask[i] = 0x00FF;
+			break;
+		case IEEE80211_VHT_MCS_SUPPORT_0_8:
+			vht_mask[i] = 0x01FF;
+			break;
+		case IEEE80211_VHT_MCS_SUPPORT_0_9:
+			vht_mask[i] = 0x03FF;
+			break;
+		case IEEE80211_VHT_MCS_NOT_SUPPORTED:
+		default:
+			vht_mask[i] = 0;
+			break;
+		}
+	}
+}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 943f760..feb547d 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -516,30 +516,33 @@
 			return RX_DROP_UNUSABLE;
 	}
 
-	ccmp_hdr2pn(pn, skb->data + hdrlen);
+	if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+		ccmp_hdr2pn(pn, skb->data + hdrlen);
 
-	queue = rx->security_idx;
+		queue = rx->security_idx;
 
-	if (memcmp(pn, key->u.ccmp.rx_pn[queue], IEEE80211_CCMP_PN_LEN) <= 0) {
-		key->u.ccmp.replays++;
-		return RX_DROP_UNUSABLE;
-	}
-
-	if (!(status->flag & RX_FLAG_DECRYPTED)) {
-		u8 aad[2 * AES_BLOCK_SIZE];
-		u8 b_0[AES_BLOCK_SIZE];
-		/* hardware didn't decrypt/verify MIC */
-		ccmp_special_blocks(skb, pn, b_0, aad);
-
-		if (ieee80211_aes_ccm_decrypt(
-			    key->u.ccmp.tfm, b_0, aad,
-			    skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
-			    data_len,
-			    skb->data + skb->len - mic_len, mic_len))
+		if (memcmp(pn, key->u.ccmp.rx_pn[queue],
+			   IEEE80211_CCMP_PN_LEN) <= 0) {
+			key->u.ccmp.replays++;
 			return RX_DROP_UNUSABLE;
-	}
+		}
 
-	memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
+		if (!(status->flag & RX_FLAG_DECRYPTED)) {
+			u8 aad[2 * AES_BLOCK_SIZE];
+			u8 b_0[AES_BLOCK_SIZE];
+			/* hardware didn't decrypt/verify MIC */
+			ccmp_special_blocks(skb, pn, b_0, aad);
+
+			if (ieee80211_aes_ccm_decrypt(
+				    key->u.ccmp.tfm, b_0, aad,
+				    skb->data + hdrlen + IEEE80211_CCMP_HDR_LEN,
+				    data_len,
+				    skb->data + skb->len - mic_len, mic_len))
+				return RX_DROP_UNUSABLE;
+		}
+
+		memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN);
+	}
 
 	/* Remove CCMP header and MIC */
 	if (pskb_trim(skb, skb->len - mic_len))
@@ -739,30 +742,34 @@
 			return RX_DROP_UNUSABLE;
 	}
 
-	gcmp_hdr2pn(pn, skb->data + hdrlen);
+	if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+		gcmp_hdr2pn(pn, skb->data + hdrlen);
 
-	queue = rx->security_idx;
+		queue = rx->security_idx;
 
-	if (memcmp(pn, key->u.gcmp.rx_pn[queue], IEEE80211_GCMP_PN_LEN) <= 0) {
-		key->u.gcmp.replays++;
-		return RX_DROP_UNUSABLE;
-	}
-
-	if (!(status->flag & RX_FLAG_DECRYPTED)) {
-		u8 aad[2 * AES_BLOCK_SIZE];
-		u8 j_0[AES_BLOCK_SIZE];
-		/* hardware didn't decrypt/verify MIC */
-		gcmp_special_blocks(skb, pn, j_0, aad);
-
-		if (ieee80211_aes_gcm_decrypt(
-			    key->u.gcmp.tfm, j_0, aad,
-			    skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
-			    data_len,
-			    skb->data + skb->len - IEEE80211_GCMP_MIC_LEN))
+		if (memcmp(pn, key->u.gcmp.rx_pn[queue],
+			   IEEE80211_GCMP_PN_LEN) <= 0) {
+			key->u.gcmp.replays++;
 			return RX_DROP_UNUSABLE;
-	}
+		}
 
-	memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
+		if (!(status->flag & RX_FLAG_DECRYPTED)) {
+			u8 aad[2 * AES_BLOCK_SIZE];
+			u8 j_0[AES_BLOCK_SIZE];
+			/* hardware didn't decrypt/verify MIC */
+			gcmp_special_blocks(skb, pn, j_0, aad);
+
+			if (ieee80211_aes_gcm_decrypt(
+				    key->u.gcmp.tfm, j_0, aad,
+				    skb->data + hdrlen + IEEE80211_GCMP_HDR_LEN,
+				    data_len,
+				    skb->data + skb->len -
+				    IEEE80211_GCMP_MIC_LEN))
+				return RX_DROP_UNUSABLE;
+		}
+
+		memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN);
+	}
 
 	/* Remove GCMP header and MIC */
 	if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN))
diff --git a/net/mac802154/cfg.c b/net/mac802154/cfg.c
index 317c466..c865ebb 100644
--- a/net/mac802154/cfg.c
+++ b/net/mac802154/cfg.c
@@ -44,6 +44,49 @@
 	ieee802154_if_remove(sdata);
 }
 
+#ifdef CONFIG_PM
+static int ieee802154_suspend(struct wpan_phy *wpan_phy)
+{
+	struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+
+	if (!local->open_count)
+		goto suspend;
+
+	ieee802154_stop_queue(&local->hw);
+	synchronize_net();
+
+	/* stop hardware - this must stop RX */
+	ieee802154_stop_device(local);
+
+suspend:
+	local->suspended = true;
+	return 0;
+}
+
+static int ieee802154_resume(struct wpan_phy *wpan_phy)
+{
+	struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+	int ret;
+
+	/* nothing to do if HW shouldn't run */
+	if (!local->open_count)
+		goto wake_up;
+
+	/* restart hardware */
+	ret = drv_start(local);
+	if (ret)
+		return ret;
+
+wake_up:
+	ieee802154_wake_queue(&local->hw);
+	local->suspended = false;
+	return 0;
+}
+#else
+#define ieee802154_suspend NULL
+#define ieee802154_resume NULL
+#endif
+
 static int
 ieee802154_add_iface(struct wpan_phy *phy, const char *name,
 		     unsigned char name_assign_type,
@@ -145,13 +188,18 @@
 ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 		      __le16 pan_id)
 {
+	int ret;
+
 	ASSERT_RTNL();
 
 	if (wpan_dev->pan_id == pan_id)
 		return 0;
 
-	wpan_dev->pan_id = pan_id;
-	return 0;
+	ret = mac802154_wpan_update_llsec(wpan_dev->netdev);
+	if (!ret)
+		wpan_dev->pan_id = pan_id;
+
+	return ret;
 }
 
 static int
@@ -161,10 +209,6 @@
 {
 	ASSERT_RTNL();
 
-	if (wpan_dev->min_be == min_be &&
-	    wpan_dev->max_be == max_be)
-		return 0;
-
 	wpan_dev->min_be = min_be;
 	wpan_dev->max_be = max_be;
 	return 0;
@@ -176,9 +220,6 @@
 {
 	ASSERT_RTNL();
 
-	if (wpan_dev->short_addr == short_addr)
-		return 0;
-
 	wpan_dev->short_addr = short_addr;
 	return 0;
 }
@@ -190,9 +231,6 @@
 {
 	ASSERT_RTNL();
 
-	if (wpan_dev->csma_retries == max_csma_backoffs)
-		return 0;
-
 	wpan_dev->csma_retries = max_csma_backoffs;
 	return 0;
 }
@@ -204,9 +242,6 @@
 {
 	ASSERT_RTNL();
 
-	if (wpan_dev->frame_retries == max_frame_retries)
-		return 0;
-
 	wpan_dev->frame_retries = max_frame_retries;
 	return 0;
 }
@@ -217,16 +252,25 @@
 {
 	ASSERT_RTNL();
 
-	if (wpan_dev->lbt == mode)
-		return 0;
-
 	wpan_dev->lbt = mode;
 	return 0;
 }
 
+static int
+ieee802154_set_ackreq_default(struct wpan_phy *wpan_phy,
+			      struct wpan_dev *wpan_dev, bool ackreq)
+{
+	ASSERT_RTNL();
+
+	wpan_dev->ackreq = ackreq;
+	return 0;
+}
+
 const struct cfg802154_ops mac802154_config_ops = {
 	.add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
 	.del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
+	.suspend = ieee802154_suspend,
+	.resume = ieee802154_resume,
 	.add_virtual_intf = ieee802154_add_iface,
 	.del_virtual_intf = ieee802154_del_iface,
 	.set_channel = ieee802154_set_channel,
@@ -239,4 +283,5 @@
 	.set_max_csma_backoffs = ieee802154_set_max_csma_backoffs,
 	.set_max_frame_retries = ieee802154_set_max_frame_retries,
 	.set_lbt_mode = ieee802154_set_lbt_mode,
+	.set_ackreq_default = ieee802154_set_ackreq_default,
 };
diff --git a/net/mac802154/ieee802154_i.h b/net/mac802154/ieee802154_i.h
index 34755d5..56ccffa 100644
--- a/net/mac802154/ieee802154_i.h
+++ b/net/mac802154/ieee802154_i.h
@@ -56,9 +56,13 @@
 	struct hrtimer ifs_timer;
 
 	bool started;
+	bool suspended;
 
 	struct tasklet_struct tasklet;
 	struct sk_buff_head skb_queue;
+
+	struct sk_buff *tx_skb;
+	struct work_struct tx_work;
 };
 
 enum {
@@ -94,8 +98,6 @@
 	struct mac802154_llsec sec;
 };
 
-#define MAC802154_CHAN_NONE		0xff /* No channel is assigned */
-
 /* utility functions/constants */
 extern const void *const mac802154_wpan_phy_privid; /*  for wpan_phy privid */
 
@@ -125,6 +127,8 @@
 
 extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
 
+void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb);
+void ieee802154_xmit_worker(struct work_struct *work);
 netdev_tx_t
 ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
 netdev_tx_t
@@ -167,6 +171,8 @@
 			 struct ieee802154_llsec_table **t);
 void mac802154_unlock_table(struct net_device *dev);
 
+int mac802154_wpan_update_llsec(struct net_device *dev);
+
 /* interface handling */
 int ieee802154_iface_init(void);
 void ieee802154_iface_exit(void);
@@ -176,5 +182,6 @@
 		  unsigned char name_assign_type, enum nl802154_iftype type,
 		  __le64 extended_addr);
 void ieee802154_remove_interfaces(struct ieee802154_local *local);
+void ieee802154_stop_device(struct ieee802154_local *local);
 
 #endif /* __IEEE802154_I_H */
diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c
index 8b69824..ed26952 100644
--- a/net/mac802154/iface.c
+++ b/net/mac802154/iface.c
@@ -30,7 +30,7 @@
 #include "ieee802154_i.h"
 #include "driver-ops.h"
 
-static int mac802154_wpan_update_llsec(struct net_device *dev)
+int mac802154_wpan_update_llsec(struct net_device *dev)
 {
 	struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
 	struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
@@ -125,6 +125,14 @@
 	if (netif_running(dev))
 		return -EBUSY;
 
+	/* lowpan need to be down for update
+	 * SLAAC address after ifup
+	 */
+	if (sdata->wpan_dev.lowpan_dev) {
+		if (netif_running(sdata->wpan_dev.lowpan_dev))
+			return -EBUSY;
+	}
+
 	ieee802154_be64_to_le64(&extended_addr, addr->sa_data);
 	if (!ieee802154_is_valid_extended_unicast_addr(extended_addr))
 		return -EINVAL;
@@ -132,6 +140,13 @@
 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 	sdata->wpan_dev.extended_addr = extended_addr;
 
+	/* update lowpan interface mac address when
+	 * wpan mac has been changed
+	 */
+	if (sdata->wpan_dev.lowpan_dev)
+		memcpy(sdata->wpan_dev.lowpan_dev->dev_addr, dev->dev_addr,
+		       dev->addr_len);
+
 	return mac802154_wpan_update_llsec(dev);
 }
 
@@ -314,11 +329,8 @@
 
 	clear_bit(SDATA_STATE_RUNNING, &sdata->state);
 
-	if (!local->open_count) {
-		flush_workqueue(local->workqueue);
-		hrtimer_cancel(&local->ifs_timer);
-		drv_stop(local);
-	}
+	if (!local->open_count)
+		ieee802154_stop_device(local);
 
 	return 0;
 }
@@ -471,6 +483,7 @@
 		       enum nl802154_iftype type)
 {
 	struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+	int ret;
 	u8 tmp;
 
 	/* set some type-dependent values */
@@ -485,8 +498,7 @@
 	wpan_dev->min_be = 3;
 	wpan_dev->max_be = 5;
 	wpan_dev->csma_retries = 4;
-	/* for compatibility, actual default is 3 */
-	wpan_dev->frame_retries = -1;
+	wpan_dev->frame_retries = 3;
 
 	wpan_dev->pan_id = cpu_to_le16(IEEE802154_PANID_BROADCAST);
 	wpan_dev->short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
@@ -505,6 +517,10 @@
 		mutex_init(&sdata->sec_mtx);
 
 		mac802154_llsec_init(&sdata->sec);
+		ret = mac802154_wpan_update_llsec(sdata->dev);
+		if (ret < 0)
+			return ret;
+
 		break;
 	case NL802154_IFTYPE_MONITOR:
 		sdata->dev->destructor = free_netdev;
diff --git a/net/mac802154/main.c b/net/mac802154/main.c
index 356b346..e8cab5b 100644
--- a/net/mac802154/main.c
+++ b/net/mac802154/main.c
@@ -40,7 +40,7 @@
 			 * netstack.
 			 */
 			skb->pkt_type = 0;
-			ieee802154_rx(&local->hw, skb);
+			ieee802154_rx(local, skb);
 			break;
 		default:
 			WARN(1, "mac802154: Packet is of unknown type %d\n",
@@ -58,11 +58,9 @@
 	struct ieee802154_local *local;
 	size_t priv_size;
 
-	if (!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
-	    !ops->start || !ops->stop || !ops->set_channel) {
-		pr_err("undefined IEEE802.15.4 device operations\n");
+	if (WARN_ON(!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
+		    !ops->start || !ops->stop || !ops->set_channel))
 		return NULL;
-	}
 
 	/* Ensure 32-byte alignment of our private data and hw private data.
 	 * We use the wpan_phy priv data for both our ieee802154_local and for
@@ -107,11 +105,13 @@
 
 	skb_queue_head_init(&local->skb_queue);
 
+	INIT_WORK(&local->tx_work, ieee802154_xmit_worker);
+
 	/* init supported flags with 802.15.4 default ranges */
 	phy->supported.max_minbe = 8;
 	phy->supported.min_maxbe = 3;
 	phy->supported.max_maxbe = 8;
-	phy->supported.min_frame_retries = -1;
+	phy->supported.min_frame_retries = 0;
 	phy->supported.max_frame_retries = 7;
 	phy->supported.max_csma_backoffs = 5;
 	phy->supported.lbt = NL802154_SUPPORTED_BOOL_FALSE;
@@ -177,11 +177,8 @@
 	}
 
 	if (!(hw->flags & IEEE802154_HW_FRAME_RETRIES)) {
-		/* TODO should be 3, but our default value is -1 which means
-		 * no ARET handling.
-		 */
-		local->phy->supported.min_frame_retries = -1;
-		local->phy->supported.max_frame_retries = -1;
+		local->phy->supported.min_frame_retries = 3;
+		local->phy->supported.max_frame_retries = 3;
 	}
 
 	if (hw->flags & IEEE802154_HW_PROMISCUOUS)
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index d93ad2d..d1c33c1 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -246,13 +246,15 @@
 	}
 }
 
-void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
+void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb)
 {
-	struct ieee802154_local *local = hw_to_local(hw);
 	u16 crc;
 
 	WARN_ON_ONCE(softirq_count() == 0);
 
+	if (local->suspended)
+		goto drop;
+
 	/* TODO: When a transceiver omits the checksum here, we
 	 * add an own calculated one. This is currently an ugly
 	 * solution because the monitor needs a crc here.
@@ -273,8 +275,7 @@
 		crc = crc_ccitt(0, skb->data, skb->len);
 		if (crc) {
 			rcu_read_unlock();
-			kfree_skb(skb);
-			return;
+			goto drop;
 		}
 	}
 	/* remove crc */
@@ -283,8 +284,11 @@
 	__ieee802154_rx_handle_packet(local, skb);
 
 	rcu_read_unlock();
+
+	return;
+drop:
+	kfree_skb(skb);
 }
-EXPORT_SYMBOL(ieee802154_rx);
 
 void
 ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index c62e956..7ed4391 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -30,23 +30,11 @@
 #include "ieee802154_i.h"
 #include "driver-ops.h"
 
-/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
- * packets through the workqueue.
- */
-struct ieee802154_xmit_cb {
-	struct sk_buff *skb;
-	struct work_struct work;
-	struct ieee802154_local *local;
-};
-
-static struct ieee802154_xmit_cb ieee802154_xmit_cb;
-
-static void ieee802154_xmit_worker(struct work_struct *work)
+void ieee802154_xmit_worker(struct work_struct *work)
 {
-	struct ieee802154_xmit_cb *cb =
-		container_of(work, struct ieee802154_xmit_cb, work);
-	struct ieee802154_local *local = cb->local;
-	struct sk_buff *skb = cb->skb;
+	struct ieee802154_local *local =
+		container_of(work, struct ieee802154_local, tx_work);
+	struct sk_buff *skb = local->tx_skb;
 	struct net_device *dev = skb->dev;
 	int res;
 
@@ -106,11 +94,8 @@
 		dev->stats.tx_packets++;
 		dev->stats.tx_bytes += skb->len;
 	} else {
-		INIT_WORK(&ieee802154_xmit_cb.work, ieee802154_xmit_worker);
-		ieee802154_xmit_cb.skb = skb;
-		ieee802154_xmit_cb.local = local;
-
-		queue_work(local->workqueue, &ieee802154_xmit_cb.work);
+		local->tx_skb = skb;
+		queue_work(local->workqueue, &local->tx_work);
 	}
 
 	return NETDEV_TX_OK;
diff --git a/net/mac802154/util.c b/net/mac802154/util.c
index 583435f..f9fd095 100644
--- a/net/mac802154/util.c
+++ b/net/mac802154/util.c
@@ -14,6 +14,7 @@
  */
 
 #include "ieee802154_i.h"
+#include "driver-ops.h"
 
 /* privid for wpan_phys to determine whether they belong to us or not */
 const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;
@@ -92,3 +93,10 @@
 	dev_consume_skb_any(skb);
 }
 EXPORT_SYMBOL(ieee802154_xmit_complete);
+
+void ieee802154_stop_device(struct ieee802154_local *local)
+{
+	flush_workqueue(local->workqueue);
+	hrtimer_cancel(&local->ifs_timer);
+	drv_stop(local);
+}
diff --git a/net/mpls/Kconfig b/net/mpls/Kconfig
index 17bde79..5c467ef 100644
--- a/net/mpls/Kconfig
+++ b/net/mpls/Kconfig
@@ -24,7 +24,13 @@
 
 config MPLS_ROUTING
 	tristate "MPLS: routing support"
-	help
+	---help---
 	 Add support for forwarding of mpls packets.
 
+config MPLS_IPTUNNEL
+	tristate "MPLS: IP over MPLS tunnel support"
+	depends on LWTUNNEL && MPLS_ROUTING
+	---help---
+	 mpls ip tunnel support.
+
 endif # MPLS
diff --git a/net/mpls/Makefile b/net/mpls/Makefile
index 65bbe68..9ca9236 100644
--- a/net/mpls/Makefile
+++ b/net/mpls/Makefile
@@ -3,5 +3,6 @@
 #
 obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o
 obj-$(CONFIG_MPLS_ROUTING) += mpls_router.o
+obj-$(CONFIG_MPLS_IPTUNNEL) += mpls_iptunnel.o
 
 mpls_router-y := af_mpls.o
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 1f93a59..bb185a2 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -15,6 +15,10 @@
 #include <net/ip_fib.h>
 #include <net/netevent.h>
 #include <net/netns/generic.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#endif
 #include "internal.h"
 
 #define LABEL_NOT_SPECIFIED (1<<20)
@@ -23,11 +27,23 @@
 /* This maximum ha length copied from the definition of struct neighbour */
 #define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
 
+enum mpls_payload_type {
+	MPT_UNSPEC, /* IPv4 or IPv6 */
+	MPT_IPV4 = 4,
+	MPT_IPV6 = 6,
+
+	/* Other types not implemented:
+	 *  - Pseudo-wire with or without control word (RFC4385)
+	 *  - GAL (RFC5586)
+	 */
+};
+
 struct mpls_route { /* next hop label forwarding entry */
 	struct net_device __rcu *rt_dev;
 	struct rcu_head		rt_rcu;
 	u32			rt_label[MAX_NEW_LABELS];
 	u8			rt_protocol; /* routing protocol that set this entry */
+	u8                      rt_payload_type;
 	u8			rt_labels;
 	u8			rt_via_alen;
 	u8			rt_via_table;
@@ -58,10 +74,11 @@
 	return rcu_dereference_rtnl(dev->mpls_ptr);
 }
 
-static bool mpls_output_possible(const struct net_device *dev)
+bool mpls_output_possible(const struct net_device *dev)
 {
 	return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
 }
+EXPORT_SYMBOL_GPL(mpls_output_possible);
 
 static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
 {
@@ -69,13 +86,14 @@
 	return rt->rt_labels * sizeof(struct mpls_shim_hdr);
 }
 
-static unsigned int mpls_dev_mtu(const struct net_device *dev)
+unsigned int mpls_dev_mtu(const struct net_device *dev)
 {
 	/* The amount of data the layer 2 frame can hold */
 	return dev->mtu;
 }
+EXPORT_SYMBOL_GPL(mpls_dev_mtu);
 
-static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 {
 	if (skb->len <= mtu)
 		return false;
@@ -85,20 +103,13 @@
 
 	return true;
 }
+EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
 
 static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
 			struct mpls_entry_decoded dec)
 {
-	/* RFC4385 and RFC5586 encode other packets in mpls such that
-	 * they don't conflict with the ip version number, making
-	 * decoding by examining the ip version correct in everything
-	 * except for the strangest cases.
-	 *
-	 * The strange cases if we choose to support them will require
-	 * manual configuration.
-	 */
-	struct iphdr *hdr4;
-	bool success = true;
+	enum mpls_payload_type payload_type;
+	bool success = false;
 
 	/* The IPv4 code below accesses through the IPv4 header
 	 * checksum, which is 12 bytes into the packet.
@@ -113,23 +124,32 @@
 	if (!pskb_may_pull(skb, 12))
 		return false;
 
-	/* Use ip_hdr to find the ip protocol version */
-	hdr4 = ip_hdr(skb);
-	if (hdr4->version == 4) {
+	payload_type = rt->rt_payload_type;
+	if (payload_type == MPT_UNSPEC)
+		payload_type = ip_hdr(skb)->version;
+
+	switch (payload_type) {
+	case MPT_IPV4: {
+		struct iphdr *hdr4 = ip_hdr(skb);
 		skb->protocol = htons(ETH_P_IP);
 		csum_replace2(&hdr4->check,
 			      htons(hdr4->ttl << 8),
 			      htons(dec.ttl << 8));
 		hdr4->ttl = dec.ttl;
+		success = true;
+		break;
 	}
-	else if (hdr4->version == 6) {
+	case MPT_IPV6: {
 		struct ipv6hdr *hdr6 = ipv6_hdr(skb);
 		skb->protocol = htons(ETH_P_IPV6);
 		hdr6->hop_limit = dec.ttl;
+		success = true;
+		break;
 	}
-	else
-		/* version 0 and version 1 are used by pseudo wires */
-		success = false;
+	case MPT_UNSPEC:
+		break;
+	}
+
 	return success;
 }
 
@@ -248,16 +268,17 @@
 };
 
 struct mpls_route_config {
-	u32		rc_protocol;
-	u32		rc_ifindex;
-	u16		rc_via_table;
-	u16		rc_via_alen;
-	u8		rc_via[MAX_VIA_ALEN];
-	u32		rc_label;
-	u32		rc_output_labels;
-	u32		rc_output_label[MAX_NEW_LABELS];
-	u32		rc_nlflags;
-	struct nl_info	rc_nlinfo;
+	u32			rc_protocol;
+	u32			rc_ifindex;
+	u16			rc_via_table;
+	u16			rc_via_alen;
+	u8			rc_via[MAX_VIA_ALEN];
+	u32			rc_label;
+	u32			rc_output_labels;
+	u32			rc_output_label[MAX_NEW_LABELS];
+	u32			rc_nlflags;
+	enum mpls_payload_type	rc_payload_type;
+	struct nl_info		rc_nlinfo;
 };
 
 static struct mpls_route *mpls_rt_alloc(size_t alen)
@@ -286,7 +307,7 @@
 	struct mpls_route *rt = new ? new : old;
 	unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
 	/* Ignore reserved labels for now */
-	if (rt && (index >= 16))
+	if (rt && (index >= MPLS_LABEL_FIRST_UNRESERVED))
 		rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
 }
 
@@ -320,13 +341,96 @@
 
 	platform_label = rtnl_dereference(net->mpls.platform_label);
 	platform_labels = net->mpls.platform_labels;
-	for (index = 16; index < platform_labels; index++) {
+	for (index = MPLS_LABEL_FIRST_UNRESERVED; index < platform_labels;
+	     index++) {
 		if (!rtnl_dereference(platform_label[index]))
 			return index;
 	}
 	return LABEL_NOT_SPECIFIED;
 }
 
+#if IS_ENABLED(CONFIG_INET)
+static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+{
+	struct net_device *dev;
+	struct rtable *rt;
+	struct in_addr daddr;
+
+	memcpy(&daddr, addr, sizeof(struct in_addr));
+	rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
+	if (IS_ERR(rt))
+		return ERR_CAST(rt);
+
+	dev = rt->dst.dev;
+	dev_hold(dev);
+
+	ip_rt_put(rt);
+
+	return dev;
+}
+#else
+static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+{
+	return ERR_PTR(-EAFNOSUPPORT);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+{
+	struct net_device *dev;
+	struct dst_entry *dst;
+	struct flowi6 fl6;
+	int err;
+
+	if (!ipv6_stub)
+		return ERR_PTR(-EAFNOSUPPORT);
+
+	memset(&fl6, 0, sizeof(fl6));
+	memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
+	err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
+	if (err)
+		return ERR_PTR(err);
+
+	dev = dst->dev;
+	dev_hold(dev);
+	dst_release(dst);
+
+	return dev;
+}
+#else
+static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+{
+	return ERR_PTR(-EAFNOSUPPORT);
+}
+#endif
+
+static struct net_device *find_outdev(struct net *net,
+				      struct mpls_route_config *cfg)
+{
+	struct net_device *dev = NULL;
+
+	if (!cfg->rc_ifindex) {
+		switch (cfg->rc_via_table) {
+		case NEIGH_ARP_TABLE:
+			dev = inet_fib_lookup_dev(net, cfg->rc_via);
+			break;
+		case NEIGH_ND_TABLE:
+			dev = inet6_fib_lookup_dev(net, cfg->rc_via);
+			break;
+		case NEIGH_LINK_TABLE:
+			break;
+		}
+	} else {
+		dev = dev_get_by_index(net, cfg->rc_ifindex);
+	}
+
+	if (!dev)
+		return ERR_PTR(-ENODEV);
+
+	return dev;
+}
+
 static int mpls_route_add(struct mpls_route_config *cfg)
 {
 	struct mpls_route __rcu **platform_label;
@@ -345,8 +449,8 @@
 		index = find_free_label(net);
 	}
 
-	/* The first 16 labels are reserved, and may not be set */
-	if (index < 16)
+	/* Reserved labels may not be set */
+	if (index < MPLS_LABEL_FIRST_UNRESERVED)
 		goto errout;
 
 	/* The full 20 bit range may not be supported. */
@@ -357,10 +461,12 @@
 	if (cfg->rc_output_labels > MAX_NEW_LABELS)
 		goto errout;
 
-	err = -ENODEV;
-	dev = dev_get_by_index(net, cfg->rc_ifindex);
-	if (!dev)
+	dev = find_outdev(net, cfg);
+	if (IS_ERR(dev)) {
+		err = PTR_ERR(dev);
+		dev = NULL;
 		goto errout;
+	}
 
 	/* Ensure this is a supported device */
 	err = -EINVAL;
@@ -401,6 +507,7 @@
 		rt->rt_label[i] = cfg->rc_output_label[i];
 	rt->rt_protocol = cfg->rc_protocol;
 	RCU_INIT_POINTER(rt->rt_dev, dev);
+	rt->rt_payload_type = cfg->rc_payload_type;
 	rt->rt_via_table = cfg->rc_via_table;
 	memcpy(rt->rt_via, cfg->rc_via, cfg->rc_via_alen);
 
@@ -423,8 +530,8 @@
 
 	index = cfg->rc_label;
 
-	/* The first 16 labels are reserved, and may not be removed */
-	if (index < 16)
+	/* Reserved labels may not be removed */
+	if (index < MPLS_LABEL_FIRST_UNRESERVED)
 		goto errout;
 
 	/* The full 20 bit range may not be supported */
@@ -626,6 +733,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(nla_put_labels);
 
 int nla_get_labels(const struct nlattr *nla,
 		   u32 max_labels, u32 *labels, u32 label[])
@@ -671,6 +779,7 @@
 	*labels = nla_labels;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(nla_get_labels);
 
 static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
 			       struct mpls_route_config *cfg)
@@ -740,8 +849,8 @@
 					   &cfg->rc_label))
 				goto errout;
 
-			/* The first 16 labels are reserved, and may not be set */
-			if (cfg->rc_label < 16)
+			/* Reserved labels may not be set */
+			if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED)
 				goto errout;
 
 			break;
@@ -866,8 +975,8 @@
 	ASSERT_RTNL();
 
 	index = cb->args[0];
-	if (index < 16)
-		index = 16;
+	if (index < MPLS_LABEL_FIRST_UNRESERVED)
+		index = MPLS_LABEL_FIRST_UNRESERVED;
 
 	platform_label = rtnl_dereference(net->mpls.platform_label);
 	platform_labels = net->mpls.platform_labels;
@@ -953,6 +1062,7 @@
 			goto nort0;
 		RCU_INIT_POINTER(rt0->rt_dev, lo);
 		rt0->rt_protocol = RTPROT_KERNEL;
+		rt0->rt_payload_type = MPT_IPV4;
 		rt0->rt_via_table = NEIGH_LINK_TABLE;
 		memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
 	}
@@ -963,6 +1073,7 @@
 			goto nort2;
 		RCU_INIT_POINTER(rt2->rt_dev, lo);
 		rt2->rt_protocol = RTPROT_KERNEL;
+		rt2->rt_payload_type = MPT_IPV6;
 		rt2->rt_via_table = NEIGH_LINK_TABLE;
 		memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len);
 	}
@@ -1066,8 +1177,10 @@
 
 	table[0].data = net;
 	net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
-	if (net->mpls.ctl == NULL)
+	if (net->mpls.ctl == NULL) {
+		kfree(table);
 		return -ENOMEM;
+	}
 
 	return 0;
 }
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 8cabeb5..2681a4b 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -50,7 +50,12 @@
 	return result;
 }
 
-int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels, const u32 label[]);
-int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels, u32 label[]);
+int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels,
+		   const u32 label[]);
+int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels,
+		   u32 label[]);
+bool mpls_output_possible(const struct net_device *dev);
+unsigned int mpls_dev_mtu(const struct net_device *dev);
+bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
 
 #endif /* MPLS_INTERNAL_H */
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
new file mode 100644
index 0000000..21e70bc
--- /dev/null
+++ b/net/mpls/mpls_iptunnel.c
@@ -0,0 +1,231 @@
+/*
+ * mpls tunnels	An implementation mpls tunnels using the light weight tunnel
+ *		infrastructure
+ *
+ * Authors:	Roopa Prabhu, <roopa@cumulusnetworks.com>
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ *
+ */
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/module.h>
+#include <linux/mpls.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+#include <net/dst.h>
+#include <net/lwtunnel.h>
+#include <net/netevent.h>
+#include <net/netns/generic.h>
+#include <net/ip6_fib.h>
+#include <net/route.h>
+#include <net/mpls_iptunnel.h>
+#include <linux/mpls_iptunnel.h>
+#include "internal.h"
+
+static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = {
+	[MPLS_IPTUNNEL_DST]	= { .type = NLA_U32 },
+};
+
+static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en)
+{
+	/* The size of the layer 2.5 labels to be added for this route */
+	return en->labels * sizeof(struct mpls_shim_hdr);
+}
+
+int mpls_output(struct sock *sk, struct sk_buff *skb)
+{
+	struct mpls_iptunnel_encap *tun_encap_info;
+	struct mpls_shim_hdr *hdr;
+	struct net_device *out_dev;
+	unsigned int hh_len;
+	unsigned int new_header_size;
+	unsigned int mtu;
+	struct dst_entry *dst = skb_dst(skb);
+	struct rtable *rt = NULL;
+	struct rt6_info *rt6 = NULL;
+	int err = 0;
+	bool bos;
+	int i;
+	unsigned int ttl;
+
+	/* Obtain the ttl */
+	if (skb->protocol == htons(ETH_P_IP)) {
+		ttl = ip_hdr(skb)->ttl;
+		rt = (struct rtable *)dst;
+	} else if (skb->protocol == htons(ETH_P_IPV6)) {
+		ttl = ipv6_hdr(skb)->hop_limit;
+		rt6 = (struct rt6_info *)dst;
+	} else {
+		goto drop;
+	}
+
+	skb_orphan(skb);
+
+	/* Find the output device */
+	out_dev = dst->dev;
+	if (!mpls_output_possible(out_dev) ||
+	    !dst->lwtstate || skb_warn_if_lro(skb))
+		goto drop;
+
+	skb_forward_csum(skb);
+
+	tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate);
+
+	/* Verify the destination can hold the packet */
+	new_header_size = mpls_encap_size(tun_encap_info);
+	mtu = mpls_dev_mtu(out_dev);
+	if (mpls_pkt_too_big(skb, mtu - new_header_size))
+		goto drop;
+
+	hh_len = LL_RESERVED_SPACE(out_dev);
+	if (!out_dev->header_ops)
+		hh_len = 0;
+
+	/* Ensure there is enough space for the headers in the skb */
+	if (skb_cow(skb, hh_len + new_header_size))
+		goto drop;
+
+	skb_push(skb, new_header_size);
+	skb_reset_network_header(skb);
+
+	skb->dev = out_dev;
+	skb->protocol = htons(ETH_P_MPLS_UC);
+
+	/* Push the new labels */
+	hdr = mpls_hdr(skb);
+	bos = true;
+	for (i = tun_encap_info->labels - 1; i >= 0; i--) {
+		hdr[i] = mpls_entry_encode(tun_encap_info->label[i],
+					   ttl, 0, bos);
+		bos = false;
+	}
+
+	if (rt)
+		err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
+				 skb);
+	else if (rt6)
+		err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
+				 skb);
+	if (err)
+		net_dbg_ratelimited("%s: packet transmission failed: %d\n",
+				    __func__, err);
+
+	return 0;
+
+drop:
+	kfree_skb(skb);
+	return -EINVAL;
+}
+
+static int mpls_build_state(struct net_device *dev, struct nlattr *nla,
+			    unsigned int family, const void *cfg,
+			    struct lwtunnel_state **ts)
+{
+	struct mpls_iptunnel_encap *tun_encap_info;
+	struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1];
+	struct lwtunnel_state *newts;
+	int tun_encap_info_len;
+	int ret;
+
+	ret = nla_parse_nested(tb, MPLS_IPTUNNEL_MAX, nla,
+			       mpls_iptunnel_policy);
+	if (ret < 0)
+		return ret;
+
+	if (!tb[MPLS_IPTUNNEL_DST])
+		return -EINVAL;
+
+	tun_encap_info_len = sizeof(*tun_encap_info);
+
+	newts = lwtunnel_state_alloc(tun_encap_info_len);
+	if (!newts)
+		return -ENOMEM;
+
+	newts->len = tun_encap_info_len;
+	tun_encap_info = mpls_lwtunnel_encap(newts);
+	ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS,
+			     &tun_encap_info->labels, tun_encap_info->label);
+	if (ret)
+		goto errout;
+	newts->type = LWTUNNEL_ENCAP_MPLS;
+	newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
+
+	*ts = newts;
+
+	return 0;
+
+errout:
+	kfree(newts);
+	*ts = NULL;
+
+	return ret;
+}
+
+static int mpls_fill_encap_info(struct sk_buff *skb,
+				struct lwtunnel_state *lwtstate)
+{
+	struct mpls_iptunnel_encap *tun_encap_info;
+	
+	tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+	if (nla_put_labels(skb, MPLS_IPTUNNEL_DST, tun_encap_info->labels,
+			   tun_encap_info->label))
+		goto nla_put_failure;
+
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+	struct mpls_iptunnel_encap *tun_encap_info;
+
+	tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+	return nla_total_size(tun_encap_info->labels * 4);
+}
+
+static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+	struct mpls_iptunnel_encap *a_hdr = mpls_lwtunnel_encap(a);
+	struct mpls_iptunnel_encap *b_hdr = mpls_lwtunnel_encap(b);
+	int l;
+
+	if (a_hdr->labels != b_hdr->labels)
+		return 1;
+
+	for (l = 0; l < MAX_NEW_LABELS; l++)
+		if (a_hdr->label[l] != b_hdr->label[l])
+			return 1;
+	return 0;
+}
+
+static const struct lwtunnel_encap_ops mpls_iptun_ops = {
+	.build_state = mpls_build_state,
+	.output = mpls_output,
+	.fill_encap = mpls_fill_encap_info,
+	.get_encap_size = mpls_encap_nlsize,
+	.cmp_encap = mpls_encap_cmp,
+};
+
+static int __init mpls_iptunnel_init(void)
+{
+	return lwtunnel_encap_add_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
+}
+module_init(mpls_iptunnel_init);
+
+static void __exit mpls_iptunnel_exit(void)
+{
+	lwtunnel_encap_del_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
+}
+module_exit(mpls_iptunnel_exit);
+
+MODULE_DESCRIPTION("MultiProtocol Label Switching IP Tunnels");
+MODULE_LICENSE("GPL v2");
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 6eae69a..3e1b4ab 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -867,6 +867,8 @@
 	depends on NETFILTER_ADVANCED
 	depends on IPV6 || IPV6=n
 	depends on !NF_CONNTRACK || NF_CONNTRACK
+	select NF_DUP_IPV4
+	select NF_DUP_IPV6 if IP6_NF_IPTABLES
 	---help---
 	This option adds a "TEE" target with which a packet can be cloned and
 	this clone be rerouted to another nexthop.
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index a0e5497..8e47f81 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -34,6 +34,9 @@
 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
 
+DEFINE_PER_CPU(bool, nf_skb_duplicated);
+EXPORT_SYMBOL_GPL(nf_skb_duplicated);
+
 int nf_register_afinfo(const struct nf_afinfo *afinfo)
 {
 	mutex_lock(&afinfo_mutex);
@@ -52,9 +55,6 @@
 }
 EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
 
-struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
-EXPORT_SYMBOL(nf_hooks);
-
 #ifdef HAVE_JUMP_LABEL
 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 EXPORT_SYMBOL(nf_hooks_needed);
@@ -62,63 +62,166 @@
 
 static DEFINE_MUTEX(nf_hook_mutex);
 
-int nf_register_hook(struct nf_hook_ops *reg)
+static struct list_head *nf_find_hook_list(struct net *net,
+					   const struct nf_hook_ops *reg)
 {
-	struct list_head *nf_hook_list;
+	struct list_head *hook_list = NULL;
+
+	if (reg->pf != NFPROTO_NETDEV)
+		hook_list = &net->nf.hooks[reg->pf][reg->hooknum];
+	else if (reg->hooknum == NF_NETDEV_INGRESS) {
+#ifdef CONFIG_NETFILTER_INGRESS
+		if (reg->dev && dev_net(reg->dev) == net)
+			hook_list = &reg->dev->nf_hooks_ingress;
+#endif
+	}
+	return hook_list;
+}
+
+struct nf_hook_entry {
+	const struct nf_hook_ops	*orig_ops;
+	struct nf_hook_ops		ops;
+};
+
+int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
+{
+	struct list_head *hook_list;
+	struct nf_hook_entry *entry;
 	struct nf_hook_ops *elem;
 
-	mutex_lock(&nf_hook_mutex);
-	switch (reg->pf) {
-	case NFPROTO_NETDEV:
-#ifdef CONFIG_NETFILTER_INGRESS
-		if (reg->hooknum == NF_NETDEV_INGRESS) {
-			BUG_ON(reg->dev == NULL);
-			nf_hook_list = &reg->dev->nf_hooks_ingress;
-			net_inc_ingress_queue();
-			break;
-		}
-#endif
-		/* Fall through. */
-	default:
-		nf_hook_list = &nf_hooks[reg->pf][reg->hooknum];
-		break;
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->orig_ops	= reg;
+	entry->ops	= *reg;
+
+	hook_list = nf_find_hook_list(net, reg);
+	if (!hook_list) {
+		kfree(entry);
+		return -ENOENT;
 	}
 
-	list_for_each_entry(elem, nf_hook_list, list) {
+	mutex_lock(&nf_hook_mutex);
+	list_for_each_entry(elem, hook_list, list) {
 		if (reg->priority < elem->priority)
 			break;
 	}
-	list_add_rcu(&reg->list, elem->list.prev);
+	list_add_rcu(&entry->ops.list, elem->list.prev);
 	mutex_unlock(&nf_hook_mutex);
+#ifdef CONFIG_NETFILTER_INGRESS
+	if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+		net_inc_ingress_queue();
+#endif
 #ifdef HAVE_JUMP_LABEL
 	static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
 	return 0;
 }
-EXPORT_SYMBOL(nf_register_hook);
+EXPORT_SYMBOL(nf_register_net_hook);
 
-void nf_unregister_hook(struct nf_hook_ops *reg)
+void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
 {
+	struct list_head *hook_list;
+	struct nf_hook_entry *entry;
+	struct nf_hook_ops *elem;
+
+	hook_list = nf_find_hook_list(net, reg);
+	if (!hook_list)
+		return;
+
 	mutex_lock(&nf_hook_mutex);
-	list_del_rcu(&reg->list);
-	mutex_unlock(&nf_hook_mutex);
-	switch (reg->pf) {
-	case NFPROTO_NETDEV:
-#ifdef CONFIG_NETFILTER_INGRESS
-		if (reg->hooknum == NF_NETDEV_INGRESS) {
-			net_dec_ingress_queue();
+	list_for_each_entry(elem, hook_list, list) {
+		entry = container_of(elem, struct nf_hook_entry, ops);
+		if (entry->orig_ops == reg) {
+			list_del_rcu(&entry->ops.list);
 			break;
 		}
-		break;
-#endif
-	default:
-		break;
 	}
+	mutex_unlock(&nf_hook_mutex);
+	if (&elem->list == hook_list) {
+		WARN(1, "nf_unregister_net_hook: hook not found!\n");
+		return;
+	}
+#ifdef CONFIG_NETFILTER_INGRESS
+	if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+		net_dec_ingress_queue();
+#endif
 #ifdef HAVE_JUMP_LABEL
 	static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
 	synchronize_net();
-	nf_queue_nf_hook_drop(reg);
+	nf_queue_nf_hook_drop(net, &entry->ops);
+	kfree(entry);
+}
+EXPORT_SYMBOL(nf_unregister_net_hook);
+
+int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+			  unsigned int n)
+{
+	unsigned int i;
+	int err = 0;
+
+	for (i = 0; i < n; i++) {
+		err = nf_register_net_hook(net, &reg[i]);
+		if (err)
+			goto err;
+	}
+	return err;
+
+err:
+	if (i > 0)
+		nf_unregister_net_hooks(net, reg, i);
+	return err;
+}
+EXPORT_SYMBOL(nf_register_net_hooks);
+
+void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
+			     unsigned int n)
+{
+	while (n-- > 0)
+		nf_unregister_net_hook(net, &reg[n]);
+}
+EXPORT_SYMBOL(nf_unregister_net_hooks);
+
+static LIST_HEAD(nf_hook_list);
+
+int nf_register_hook(struct nf_hook_ops *reg)
+{
+	struct net *net, *last;
+	int ret;
+
+	rtnl_lock();
+	for_each_net(net) {
+		ret = nf_register_net_hook(net, reg);
+		if (ret && ret != -ENOENT)
+			goto rollback;
+	}
+	list_add_tail(&reg->list, &nf_hook_list);
+	rtnl_unlock();
+
+	return 0;
+rollback:
+	last = net;
+	for_each_net(net) {
+		if (net == last)
+			break;
+		nf_unregister_net_hook(net, reg);
+	}
+	rtnl_unlock();
+	return ret;
+}
+EXPORT_SYMBOL(nf_register_hook);
+
+void nf_unregister_hook(struct nf_hook_ops *reg)
+{
+	struct net *net;
+
+	rtnl_lock();
+	list_del(&reg->list);
+	for_each_net(net)
+		nf_unregister_net_hook(net, reg);
+	rtnl_unlock();
 }
 EXPORT_SYMBOL(nf_unregister_hook);
 
@@ -285,9 +388,12 @@
 struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly;
 EXPORT_SYMBOL_GPL(nfq_ct_hook);
 
-struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook __read_mostly;
-EXPORT_SYMBOL_GPL(nfq_ct_nat_hook);
-
+/* Built-in default zone used e.g. by modules. */
+const struct nf_conntrack_zone nf_ct_zone_dflt = {
+	.id	= NF_CT_DEFAULT_ZONE_ID,
+	.dir	= NF_CT_DEFAULT_ZONE_DIR,
+};
+EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
 #endif /* CONFIG_NF_CONNTRACK */
 
 #ifdef CONFIG_NF_NAT_NEEDED
@@ -295,8 +401,46 @@
 EXPORT_SYMBOL(nf_nat_decode_session_hook);
 #endif
 
+static int nf_register_hook_list(struct net *net)
+{
+	struct nf_hook_ops *elem;
+	int ret;
+
+	rtnl_lock();
+	list_for_each_entry(elem, &nf_hook_list, list) {
+		ret = nf_register_net_hook(net, elem);
+		if (ret && ret != -ENOENT)
+			goto out_undo;
+	}
+	rtnl_unlock();
+	return 0;
+
+out_undo:
+	list_for_each_entry_continue_reverse(elem, &nf_hook_list, list)
+		nf_unregister_net_hook(net, elem);
+	rtnl_unlock();
+	return ret;
+}
+
+static void nf_unregister_hook_list(struct net *net)
+{
+	struct nf_hook_ops *elem;
+
+	rtnl_lock();
+	list_for_each_entry(elem, &nf_hook_list, list)
+		nf_unregister_net_hook(net, elem);
+	rtnl_unlock();
+}
+
 static int __net_init netfilter_net_init(struct net *net)
 {
+	int i, h, ret;
+
+	for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
+		for (h = 0; h < NF_MAX_HOOKS; h++)
+			INIT_LIST_HEAD(&net->nf.hooks[i][h]);
+	}
+
 #ifdef CONFIG_PROC_FS
 	net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
 						net->proc_net);
@@ -307,11 +451,16 @@
 		return -ENOMEM;
 	}
 #endif
-	return 0;
+	ret = nf_register_hook_list(net);
+	if (ret)
+		remove_proc_entry("netfilter", net->proc_net);
+
+	return ret;
 }
 
 static void __net_exit netfilter_net_exit(struct net *net)
 {
+	nf_unregister_hook_list(net);
 	remove_proc_entry("netfilter", net->proc_net);
 }
 
@@ -322,12 +471,7 @@
 
 int __init netfilter_init(void)
 {
-	int i, h, ret;
-
-	for (i = 0; i < ARRAY_SIZE(nf_hooks); i++) {
-		for (h = 0; h < NF_MAX_HOOKS; h++)
-			INIT_LIST_HEAD(&nf_hooks[i][h]);
-	}
+	int ret;
 
 	ret = register_pernet_subsys(&netfilter_net_ops);
 	if (ret < 0)
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 3b6929d..b32fb0d 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -162,6 +162,17 @@
 	  If you want to compile it in kernel, say Y. To compile it as a
 	  module, choose M here. If unsure, say N.
 
+config  IP_VS_OVF
+	tristate "weighted overflow scheduling"
+	---help---
+	  The weighted overflow scheduling algorithm directs network
+	  connections to the server with the highest weight that is
+	  currently available and overflows to the next when active
+	  connections exceed the node's weight.
+
+	  If you want to compile it in kernel, say Y. To compile it as a
+	  module, choose M here. If unsure, say N.
+
 config	IP_VS_LBLC
 	tristate "locality-based least-connection scheduling"
 	---help---
diff --git a/net/netfilter/ipvs/Makefile b/net/netfilter/ipvs/Makefile
index 38b2723..67f3f43 100644
--- a/net/netfilter/ipvs/Makefile
+++ b/net/netfilter/ipvs/Makefile
@@ -27,6 +27,7 @@
 obj-$(CONFIG_IP_VS_LC) += ip_vs_lc.o
 obj-$(CONFIG_IP_VS_WLC) += ip_vs_wlc.o
 obj-$(CONFIG_IP_VS_FO) += ip_vs_fo.o
+obj-$(CONFIG_IP_VS_OVF) += ip_vs_ovf.o
 obj-$(CONFIG_IP_VS_LBLC) += ip_vs_lblc.o
 obj-$(CONFIG_IP_VS_LBLCR) += ip_vs_lblcr.o
 obj-$(CONFIG_IP_VS_DH) += ip_vs_dh.o
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 24c5542..1a23e91 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2335,13 +2335,23 @@
 	    cmd == IP_VS_SO_SET_STOPDAEMON) {
 		struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
 
-		mutex_lock(&ipvs->sync_mutex);
-		if (cmd == IP_VS_SO_SET_STARTDAEMON)
-			ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
-						dm->syncid);
-		else
+		if (cmd == IP_VS_SO_SET_STARTDAEMON) {
+			struct ipvs_sync_daemon_cfg cfg;
+
+			memset(&cfg, 0, sizeof(cfg));
+			strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
+				sizeof(cfg.mcast_ifn));
+			cfg.syncid = dm->syncid;
+			rtnl_lock();
+			mutex_lock(&ipvs->sync_mutex);
+			ret = start_sync_thread(net, &cfg, dm->state);
+			mutex_unlock(&ipvs->sync_mutex);
+			rtnl_unlock();
+		} else {
+			mutex_lock(&ipvs->sync_mutex);
 			ret = stop_sync_thread(net, dm->state);
-		mutex_unlock(&ipvs->sync_mutex);
+			mutex_unlock(&ipvs->sync_mutex);
+		}
 		goto out_dec;
 	}
 
@@ -2645,15 +2655,15 @@
 		mutex_lock(&ipvs->sync_mutex);
 		if (ipvs->sync_state & IP_VS_STATE_MASTER) {
 			d[0].state = IP_VS_STATE_MASTER;
-			strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
+			strlcpy(d[0].mcast_ifn, ipvs->mcfg.mcast_ifn,
 				sizeof(d[0].mcast_ifn));
-			d[0].syncid = ipvs->master_syncid;
+			d[0].syncid = ipvs->mcfg.syncid;
 		}
 		if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
 			d[1].state = IP_VS_STATE_BACKUP;
-			strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
+			strlcpy(d[1].mcast_ifn, ipvs->bcfg.mcast_ifn,
 				sizeof(d[1].mcast_ifn));
-			d[1].syncid = ipvs->backup_syncid;
+			d[1].syncid = ipvs->bcfg.syncid;
 		}
 		if (copy_to_user(user, &d, sizeof(d)) != 0)
 			ret = -EFAULT;
@@ -2808,6 +2818,11 @@
 	[IPVS_DAEMON_ATTR_MCAST_IFN]	= { .type = NLA_NUL_STRING,
 					    .len = IP_VS_IFNAME_MAXLEN },
 	[IPVS_DAEMON_ATTR_SYNC_ID]	= { .type = NLA_U32 },
+	[IPVS_DAEMON_ATTR_SYNC_MAXLEN]	= { .type = NLA_U16 },
+	[IPVS_DAEMON_ATTR_MCAST_GROUP]	= { .type = NLA_U32 },
+	[IPVS_DAEMON_ATTR_MCAST_GROUP6]	= { .len = sizeof(struct in6_addr) },
+	[IPVS_DAEMON_ATTR_MCAST_PORT]	= { .type = NLA_U16 },
+	[IPVS_DAEMON_ATTR_MCAST_TTL]	= { .type = NLA_U8 },
 };
 
 /* Policy used for attributes in nested attribute IPVS_CMD_ATTR_SERVICE */
@@ -3266,7 +3281,7 @@
 }
 
 static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __u32 state,
-				  const char *mcast_ifn, __u32 syncid)
+				  struct ipvs_sync_daemon_cfg *c)
 {
 	struct nlattr *nl_daemon;
 
@@ -3275,9 +3290,23 @@
 		return -EMSGSIZE;
 
 	if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) ||
-	    nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn) ||
-	    nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid))
+	    nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, c->mcast_ifn) ||
+	    nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, c->syncid) ||
+	    nla_put_u16(skb, IPVS_DAEMON_ATTR_SYNC_MAXLEN, c->sync_maxlen) ||
+	    nla_put_u16(skb, IPVS_DAEMON_ATTR_MCAST_PORT, c->mcast_port) ||
+	    nla_put_u8(skb, IPVS_DAEMON_ATTR_MCAST_TTL, c->mcast_ttl))
 		goto nla_put_failure;
+#ifdef CONFIG_IP_VS_IPV6
+	if (c->mcast_af == AF_INET6) {
+		if (nla_put_in6_addr(skb, IPVS_DAEMON_ATTR_MCAST_GROUP6,
+				     &c->mcast_group.in6))
+			goto nla_put_failure;
+	} else
+#endif
+		if (c->mcast_af == AF_INET &&
+		    nla_put_in_addr(skb, IPVS_DAEMON_ATTR_MCAST_GROUP,
+				    c->mcast_group.ip))
+			goto nla_put_failure;
 	nla_nest_end(skb, nl_daemon);
 
 	return 0;
@@ -3288,7 +3317,7 @@
 }
 
 static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __u32 state,
-				  const char *mcast_ifn, __u32 syncid,
+				  struct ipvs_sync_daemon_cfg *c,
 				  struct netlink_callback *cb)
 {
 	void *hdr;
@@ -3298,7 +3327,7 @@
 	if (!hdr)
 		return -EMSGSIZE;
 
-	if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid))
+	if (ip_vs_genl_fill_daemon(skb, state, c))
 		goto nla_put_failure;
 
 	genlmsg_end(skb, hdr);
@@ -3318,8 +3347,7 @@
 	mutex_lock(&ipvs->sync_mutex);
 	if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
 		if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
-					   ipvs->master_mcast_ifn,
-					   ipvs->master_syncid, cb) < 0)
+					   &ipvs->mcfg, cb) < 0)
 			goto nla_put_failure;
 
 		cb->args[0] = 1;
@@ -3327,8 +3355,7 @@
 
 	if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
 		if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
-					   ipvs->backup_mcast_ifn,
-					   ipvs->backup_syncid, cb) < 0)
+					   &ipvs->bcfg, cb) < 0)
 			goto nla_put_failure;
 
 		cb->args[1] = 1;
@@ -3342,30 +3369,83 @@
 
 static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ipvs_sync_daemon_cfg c;
+	struct nlattr *a;
+	int ret;
+
+	memset(&c, 0, sizeof(c));
 	if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
 	      attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
 	      attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
 		return -EINVAL;
+	strlcpy(c.mcast_ifn, nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
+		sizeof(c.mcast_ifn));
+	c.syncid = nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]);
+
+	a = attrs[IPVS_DAEMON_ATTR_SYNC_MAXLEN];
+	if (a)
+		c.sync_maxlen = nla_get_u16(a);
+
+	a = attrs[IPVS_DAEMON_ATTR_MCAST_GROUP];
+	if (a) {
+		c.mcast_af = AF_INET;
+		c.mcast_group.ip = nla_get_in_addr(a);
+		if (!ipv4_is_multicast(c.mcast_group.ip))
+			return -EINVAL;
+	} else {
+		a = attrs[IPVS_DAEMON_ATTR_MCAST_GROUP6];
+		if (a) {
+#ifdef CONFIG_IP_VS_IPV6
+			int addr_type;
+
+			c.mcast_af = AF_INET6;
+			c.mcast_group.in6 = nla_get_in6_addr(a);
+			addr_type = ipv6_addr_type(&c.mcast_group.in6);
+			if (!(addr_type & IPV6_ADDR_MULTICAST))
+				return -EINVAL;
+#else
+			return -EAFNOSUPPORT;
+#endif
+		}
+	}
+
+	a = attrs[IPVS_DAEMON_ATTR_MCAST_PORT];
+	if (a)
+		c.mcast_port = nla_get_u16(a);
+
+	a = attrs[IPVS_DAEMON_ATTR_MCAST_TTL];
+	if (a)
+		c.mcast_ttl = nla_get_u8(a);
 
 	/* The synchronization protocol is incompatible with mixed family
 	 * services
 	 */
-	if (net_ipvs(net)->mixed_address_family_dests > 0)
+	if (ipvs->mixed_address_family_dests > 0)
 		return -EINVAL;
 
-	return start_sync_thread(net,
-				 nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
-				 nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
-				 nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
+	rtnl_lock();
+	mutex_lock(&ipvs->sync_mutex);
+	ret = start_sync_thread(net, &c,
+				nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+	mutex_unlock(&ipvs->sync_mutex);
+	rtnl_unlock();
+	return ret;
 }
 
 static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	int ret;
+
 	if (!attrs[IPVS_DAEMON_ATTR_STATE])
 		return -EINVAL;
 
-	return stop_sync_thread(net,
-				nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+	mutex_lock(&ipvs->sync_mutex);
+	ret = stop_sync_thread(net,
+			       nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+	mutex_unlock(&ipvs->sync_mutex);
+	return ret;
 }
 
 static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
@@ -3389,7 +3469,7 @@
 
 static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info)
 {
-	int ret = 0, cmd;
+	int ret = -EINVAL, cmd;
 	struct net *net;
 	struct netns_ipvs *ipvs;
 
@@ -3400,22 +3480,19 @@
 	if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) {
 		struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1];
 
-		mutex_lock(&ipvs->sync_mutex);
 		if (!info->attrs[IPVS_CMD_ATTR_DAEMON] ||
 		    nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX,
 				     info->attrs[IPVS_CMD_ATTR_DAEMON],
-				     ip_vs_daemon_policy)) {
-			ret = -EINVAL;
+				     ip_vs_daemon_policy))
 			goto out;
-		}
 
 		if (cmd == IPVS_CMD_NEW_DAEMON)
 			ret = ip_vs_genl_new_daemon(net, daemon_attrs);
 		else
 			ret = ip_vs_genl_del_daemon(net, daemon_attrs);
-out:
-		mutex_unlock(&ipvs->sync_mutex);
 	}
+
+out:
 	return ret;
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 5882bbf..1361845 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -274,7 +274,7 @@
 		" for conn " FMT_CONN "\n",
 		__func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
 
-	h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE,
+	h = nf_conntrack_find_get(ip_vs_conn_net(cp), &nf_ct_zone_dflt,
 				  &tuple);
 	if (h) {
 		ct = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/netfilter/ipvs/ip_vs_ovf.c b/net/netfilter/ipvs/ip_vs_ovf.c
new file mode 100644
index 0000000..f7d62c3
--- /dev/null
+++ b/net/netfilter/ipvs/ip_vs_ovf.c
@@ -0,0 +1,86 @@
+/*
+ * IPVS:        Overflow-Connection Scheduling module
+ *
+ * Authors:     Raducu Deaconu <rhadoo_io@yahoo.com>
+ *
+ *              This program is free software; you can redistribute it and/or
+ *              modify it under the terms of the GNU General Public License
+ *              as published by the Free Software Foundation; either version
+ *              2 of the License, or (at your option) any later version.
+ *
+ * Scheduler implements "overflow" loadbalancing according to number of active
+ * connections , will keep all conections to the node with the highest weight
+ * and overflow to the next node if the number of connections exceeds the node's
+ * weight.
+ * Note that this scheduler might not be suitable for UDP because it only uses
+ * active connections
+ *
+ */
+
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <net/ip_vs.h>
+
+/* OVF Connection scheduling  */
+static struct ip_vs_dest *
+ip_vs_ovf_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
+		   struct ip_vs_iphdr *iph)
+{
+	struct ip_vs_dest *dest, *h = NULL;
+	int hw = 0, w;
+
+	IP_VS_DBG(6, "ip_vs_ovf_schedule(): Scheduling...\n");
+	/* select the node with highest weight, go to next in line if active
+	* connections exceed weight
+	*/
+	list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
+		w = atomic_read(&dest->weight);
+		if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
+		    atomic_read(&dest->activeconns) > w ||
+		    w == 0)
+			continue;
+		if (!h || w > hw) {
+			h = dest;
+			hw = w;
+		}
+	}
+
+	if (h) {
+		IP_VS_DBG_BUF(6, "OVF: server %s:%u active %d w %d\n",
+			      IP_VS_DBG_ADDR(h->af, &h->addr),
+			      ntohs(h->port),
+			      atomic_read(&h->activeconns),
+			      atomic_read(&h->weight));
+		return h;
+	}
+
+	ip_vs_scheduler_err(svc, "no destination available");
+	return NULL;
+}
+
+static struct ip_vs_scheduler ip_vs_ovf_scheduler = {
+	.name =			"ovf",
+	.refcnt =		ATOMIC_INIT(0),
+	.module =		THIS_MODULE,
+	.n_list =		LIST_HEAD_INIT(ip_vs_ovf_scheduler.n_list),
+	.schedule =		ip_vs_ovf_schedule,
+};
+
+static int __init ip_vs_ovf_init(void)
+{
+	return register_ip_vs_scheduler(&ip_vs_ovf_scheduler);
+}
+
+static void __exit ip_vs_ovf_cleanup(void)
+{
+	unregister_ip_vs_scheduler(&ip_vs_ovf_scheduler);
+	synchronize_rcu();
+}
+
+module_init(ip_vs_ovf_init);
+module_exit(ip_vs_ovf_cleanup);
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 7e81416..a2ff7d7 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -137,7 +137,7 @@
 
 void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
 {
-	if (scheduler && scheduler->module)
+	if (scheduler)
 		module_put(scheduler->module);
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index d99ad93e..43f1409 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -262,6 +262,11 @@
 	/* ip_vs_sync_conn entries start here */
 };
 
+union ipvs_sockaddr {
+	struct sockaddr_in	in;
+	struct sockaddr_in6	in6;
+};
+
 struct ip_vs_sync_buff {
 	struct list_head        list;
 	unsigned long           firstuse;
@@ -320,26 +325,28 @@
  * Create a new sync buffer for Version 1 proto.
  */
 static inline struct ip_vs_sync_buff *
-ip_vs_sync_buff_create(struct netns_ipvs *ipvs)
+ip_vs_sync_buff_create(struct netns_ipvs *ipvs, unsigned int len)
 {
 	struct ip_vs_sync_buff *sb;
 
 	if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
 		return NULL;
 
-	sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+	len = max_t(unsigned int, len + sizeof(struct ip_vs_sync_mesg),
+		    ipvs->mcfg.sync_maxlen);
+	sb->mesg = kmalloc(len, GFP_ATOMIC);
 	if (!sb->mesg) {
 		kfree(sb);
 		return NULL;
 	}
 	sb->mesg->reserved = 0;  /* old nr_conns i.e. must be zero now */
 	sb->mesg->version = SYNC_PROTO_VER;
-	sb->mesg->syncid = ipvs->master_syncid;
+	sb->mesg->syncid = ipvs->mcfg.syncid;
 	sb->mesg->size = htons(sizeof(struct ip_vs_sync_mesg));
 	sb->mesg->nr_conns = 0;
 	sb->mesg->spare = 0;
 	sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg);
-	sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen;
+	sb->end = (unsigned char *)sb->mesg + len;
 
 	sb->firstuse = jiffies;
 	return sb;
@@ -402,7 +409,7 @@
  * Create a new sync buffer for Version 0 proto.
  */
 static inline struct ip_vs_sync_buff *
-ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs)
+ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs, unsigned int len)
 {
 	struct ip_vs_sync_buff *sb;
 	struct ip_vs_sync_mesg_v0 *mesg;
@@ -410,17 +417,19 @@
 	if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
 		return NULL;
 
-	sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+	len = max_t(unsigned int, len + sizeof(struct ip_vs_sync_mesg_v0),
+		    ipvs->mcfg.sync_maxlen);
+	sb->mesg = kmalloc(len, GFP_ATOMIC);
 	if (!sb->mesg) {
 		kfree(sb);
 		return NULL;
 	}
 	mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg;
 	mesg->nr_conns = 0;
-	mesg->syncid = ipvs->master_syncid;
+	mesg->syncid = ipvs->mcfg.syncid;
 	mesg->size = htons(sizeof(struct ip_vs_sync_mesg_v0));
 	sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0);
-	sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen;
+	sb->end = (unsigned char *)mesg + len;
 	sb->firstuse = jiffies;
 	return sb;
 }
@@ -533,7 +542,7 @@
 	struct ip_vs_sync_buff *buff;
 	struct ipvs_master_sync_state *ms;
 	int id;
-	int len;
+	unsigned int len;
 
 	if (unlikely(cp->af != AF_INET))
 		return;
@@ -553,17 +562,19 @@
 	id = select_master_thread_id(ipvs, cp);
 	ms = &ipvs->ms[id];
 	buff = ms->sync_buff;
+	len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
+		SIMPLE_CONN_SIZE;
 	if (buff) {
 		m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
 		/* Send buffer if it is for v1 */
-		if (!m->nr_conns) {
+		if (buff->head + len > buff->end || !m->nr_conns) {
 			sb_queue_tail(ipvs, ms);
 			ms->sync_buff = NULL;
 			buff = NULL;
 		}
 	}
 	if (!buff) {
-		buff = ip_vs_sync_buff_create_v0(ipvs);
+		buff = ip_vs_sync_buff_create_v0(ipvs, len);
 		if (!buff) {
 			spin_unlock_bh(&ipvs->sync_buff_lock);
 			pr_err("ip_vs_sync_buff_create failed.\n");
@@ -572,8 +583,6 @@
 		ms->sync_buff = buff;
 	}
 
-	len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
-		SIMPLE_CONN_SIZE;
 	m = (struct ip_vs_sync_mesg_v0 *) buff->mesg;
 	s = (struct ip_vs_sync_conn_v0 *) buff->head;
 
@@ -597,12 +606,6 @@
 	m->nr_conns++;
 	m->size = htons(ntohs(m->size) + len);
 	buff->head += len;
-
-	/* check if there is a space for next one */
-	if (buff->head + FULL_CONN_SIZE > buff->end) {
-		sb_queue_tail(ipvs, ms);
-		ms->sync_buff = NULL;
-	}
 	spin_unlock_bh(&ipvs->sync_buff_lock);
 
 	/* synchronize its controller if it has */
@@ -694,7 +697,7 @@
 	}
 
 	if (!buff) {
-		buff = ip_vs_sync_buff_create(ipvs);
+		buff = ip_vs_sync_buff_create(ipvs, len);
 		if (!buff) {
 			spin_unlock_bh(&ipvs->sync_buff_lock);
 			pr_err("ip_vs_sync_buff_create failed.\n");
@@ -1219,7 +1222,7 @@
 		return;
 	}
 	/* SyncID sanity check */
-	if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) {
+	if (ipvs->bcfg.syncid != 0 && m2->syncid != ipvs->bcfg.syncid) {
 		IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid);
 		return;
 	}
@@ -1303,6 +1306,14 @@
 	/* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */
 	lock_sock(sk);
 	inet->mc_loop = loop ? 1 : 0;
+#ifdef CONFIG_IP_VS_IPV6
+	if (sk->sk_family == AF_INET6) {
+		struct ipv6_pinfo *np = inet6_sk(sk);
+
+		/* IPV6_MULTICAST_LOOP */
+		np->mc_loop = loop ? 1 : 0;
+	}
+#endif
 	release_sock(sk);
 }
 
@@ -1316,6 +1327,33 @@
 	/* setsockopt(sock, SOL_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl)); */
 	lock_sock(sk);
 	inet->mc_ttl = ttl;
+#ifdef CONFIG_IP_VS_IPV6
+	if (sk->sk_family == AF_INET6) {
+		struct ipv6_pinfo *np = inet6_sk(sk);
+
+		/* IPV6_MULTICAST_HOPS */
+		np->mcast_hops = ttl;
+	}
+#endif
+	release_sock(sk);
+}
+
+/* Control fragmentation of messages */
+static void set_mcast_pmtudisc(struct sock *sk, int val)
+{
+	struct inet_sock *inet = inet_sk(sk);
+
+	/* setsockopt(sock, SOL_IP, IP_MTU_DISCOVER, &val, sizeof(val)); */
+	lock_sock(sk);
+	inet->pmtudisc = val;
+#ifdef CONFIG_IP_VS_IPV6
+	if (sk->sk_family == AF_INET6) {
+		struct ipv6_pinfo *np = inet6_sk(sk);
+
+		/* IPV6_MTU_DISCOVER */
+		np->pmtudisc = val;
+	}
+#endif
 	release_sock(sk);
 }
 
@@ -1338,44 +1376,15 @@
 	lock_sock(sk);
 	inet->mc_index = dev->ifindex;
 	/*  inet->mc_addr  = 0; */
-	release_sock(sk);
+#ifdef CONFIG_IP_VS_IPV6
+	if (sk->sk_family == AF_INET6) {
+		struct ipv6_pinfo *np = inet6_sk(sk);
 
-	return 0;
-}
-
-
-/*
- *	Set the maximum length of sync message according to the
- *	specified interface's MTU.
- */
-static int set_sync_mesg_maxlen(struct net *net, int sync_state)
-{
-	struct netns_ipvs *ipvs = net_ipvs(net);
-	struct net_device *dev;
-	int num;
-
-	if (sync_state == IP_VS_STATE_MASTER) {
-		dev = __dev_get_by_name(net, ipvs->master_mcast_ifn);
-		if (!dev)
-			return -ENODEV;
-
-		num = (dev->mtu - sizeof(struct iphdr) -
-		       sizeof(struct udphdr) -
-		       SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE;
-		ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
-			SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF);
-		IP_VS_DBG(7, "setting the maximum length of sync sending "
-			  "message %d.\n", ipvs->send_mesg_maxlen);
-	} else if (sync_state == IP_VS_STATE_BACKUP) {
-		dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn);
-		if (!dev)
-			return -ENODEV;
-
-		ipvs->recv_mesg_maxlen = dev->mtu -
-			sizeof(struct iphdr) - sizeof(struct udphdr);
-		IP_VS_DBG(7, "setting the maximum length of sync receiving "
-			  "message %d.\n", ipvs->recv_mesg_maxlen);
+		/* IPV6_MULTICAST_IF */
+		np->mcast_oif = dev->ifindex;
 	}
+#endif
+	release_sock(sk);
 
 	return 0;
 }
@@ -1405,15 +1414,34 @@
 
 	mreq.imr_ifindex = dev->ifindex;
 
-	rtnl_lock();
 	lock_sock(sk);
 	ret = ip_mc_join_group(sk, &mreq);
 	release_sock(sk);
-	rtnl_unlock();
 
 	return ret;
 }
 
+#ifdef CONFIG_IP_VS_IPV6
+static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
+			     char *ifname)
+{
+	struct net *net = sock_net(sk);
+	struct net_device *dev;
+	int ret;
+
+	dev = __dev_get_by_name(net, ifname);
+	if (!dev)
+		return -ENODEV;
+	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
+		return -EINVAL;
+
+	lock_sock(sk);
+	ret = ipv6_sock_mc_join(sk, dev->ifindex, addr);
+	release_sock(sk);
+
+	return ret;
+}
+#endif
 
 static int bind_mcastif_addr(struct socket *sock, char *ifname)
 {
@@ -1442,6 +1470,26 @@
 	return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
 }
 
+static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
+			       struct ipvs_sync_daemon_cfg *c, int id)
+{
+	if (AF_INET6 == c->mcast_af) {
+		sa->in6 = (struct sockaddr_in6) {
+			.sin6_family = AF_INET6,
+			.sin6_port = htons(c->mcast_port + id),
+		};
+		sa->in6.sin6_addr = c->mcast_group.in6;
+		*salen = sizeof(sa->in6);
+	} else {
+		sa->in = (struct sockaddr_in) {
+			.sin_family = AF_INET,
+			.sin_port = htons(c->mcast_port + id),
+		};
+		sa->in.sin_addr = c->mcast_group.in;
+		*salen = sizeof(sa->in);
+	}
+}
+
 /*
  *      Set up sending multicast socket over UDP
  */
@@ -1449,40 +1497,43 @@
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 	/* multicast addr */
-	struct sockaddr_in mcast_addr = {
-		.sin_family		= AF_INET,
-		.sin_port		= cpu_to_be16(IP_VS_SYNC_PORT + id),
-		.sin_addr.s_addr	= cpu_to_be32(IP_VS_SYNC_GROUP),
-	};
+	union ipvs_sockaddr mcast_addr;
 	struct socket *sock;
-	int result;
+	int result, salen;
 
 	/* First create a socket */
-	result = sock_create_kern(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+	result = sock_create_kern(net, ipvs->mcfg.mcast_af, SOCK_DGRAM,
+				  IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
 		return ERR_PTR(result);
 	}
-	result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
+	result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
 	if (result < 0) {
 		pr_err("Error setting outbound mcast interface\n");
 		goto error;
 	}
 
 	set_mcast_loop(sock->sk, 0);
-	set_mcast_ttl(sock->sk, 1);
+	set_mcast_ttl(sock->sk, ipvs->mcfg.mcast_ttl);
+	/* Allow fragmentation if MTU changes */
+	set_mcast_pmtudisc(sock->sk, IP_PMTUDISC_DONT);
 	result = sysctl_sync_sock_size(ipvs);
 	if (result > 0)
 		set_sock_size(sock->sk, 1, result);
 
-	result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn);
+	if (AF_INET == ipvs->mcfg.mcast_af)
+		result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
+	else
+		result = 0;
 	if (result < 0) {
 		pr_err("Error binding address of the mcast interface\n");
 		goto error;
 	}
 
+	get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id);
 	result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
-			sizeof(struct sockaddr), 0);
+				    salen, 0);
 	if (result < 0) {
 		pr_err("Error connecting to the multicast addr\n");
 		goto error;
@@ -1503,16 +1554,13 @@
 {
 	struct netns_ipvs *ipvs = net_ipvs(net);
 	/* multicast addr */
-	struct sockaddr_in mcast_addr = {
-		.sin_family		= AF_INET,
-		.sin_port		= cpu_to_be16(IP_VS_SYNC_PORT + id),
-		.sin_addr.s_addr	= cpu_to_be32(IP_VS_SYNC_GROUP),
-	};
+	union ipvs_sockaddr mcast_addr;
 	struct socket *sock;
-	int result;
+	int result, salen;
 
 	/* First create a socket */
-	result = sock_create_kern(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+	result = sock_create_kern(net, ipvs->bcfg.mcast_af, SOCK_DGRAM,
+				  IPPROTO_UDP, &sock);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
 		return ERR_PTR(result);
@@ -1523,17 +1571,22 @@
 	if (result > 0)
 		set_sock_size(sock->sk, 0, result);
 
-	result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr,
-			sizeof(struct sockaddr));
+	get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+	result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
 	if (result < 0) {
 		pr_err("Error binding to the multicast addr\n");
 		goto error;
 	}
 
 	/* join the multicast group */
-	result = join_mcast_group(sock->sk,
-			(struct in_addr *) &mcast_addr.sin_addr,
-			ipvs->backup_mcast_ifn);
+#ifdef CONFIG_IP_VS_IPV6
+	if (ipvs->bcfg.mcast_af == AF_INET6)
+		result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
+					   ipvs->bcfg.mcast_ifn);
+	else
+#endif
+		result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
+					  ipvs->bcfg.mcast_ifn);
 	if (result < 0) {
 		pr_err("Error joining to the multicast group\n");
 		goto error;
@@ -1641,7 +1694,7 @@
 
 	pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
 		"syncid = %d, id = %d\n",
-		ipvs->master_mcast_ifn, ipvs->master_syncid, tinfo->id);
+		ipvs->mcfg.mcast_ifn, ipvs->mcfg.syncid, tinfo->id);
 
 	for (;;) {
 		sb = next_sync_buff(ipvs, ms);
@@ -1695,7 +1748,7 @@
 
 	pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
 		"syncid = %d, id = %d\n",
-		ipvs->backup_mcast_ifn, ipvs->backup_syncid, tinfo->id);
+		ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id);
 
 	while (!kthread_should_stop()) {
 		wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
@@ -1705,7 +1758,7 @@
 		/* do we have data now? */
 		while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
 			len = ip_vs_receive(tinfo->sock, tinfo->buf,
-					ipvs->recv_mesg_maxlen);
+					ipvs->bcfg.sync_maxlen);
 			if (len <= 0) {
 				if (len != -EAGAIN)
 					pr_err("receiving message error\n");
@@ -1725,16 +1778,19 @@
 }
 
 
-int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
+int start_sync_thread(struct net *net, struct ipvs_sync_daemon_cfg *c,
+		      int state)
 {
 	struct ip_vs_sync_thread_data *tinfo;
 	struct task_struct **array = NULL, *task;
 	struct socket *sock;
 	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct net_device *dev;
 	char *name;
 	int (*threadfn)(void *data);
-	int id, count;
+	int id, count, hlen;
 	int result = -ENOMEM;
+	u16 mtu, min_mtu;
 
 	IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
 	IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
@@ -1746,22 +1802,46 @@
 	} else
 		count = ipvs->threads_mask + 1;
 
+	if (c->mcast_af == AF_UNSPEC) {
+		c->mcast_af = AF_INET;
+		c->mcast_group.ip = cpu_to_be32(IP_VS_SYNC_GROUP);
+	}
+	if (!c->mcast_port)
+		c->mcast_port = IP_VS_SYNC_PORT;
+	if (!c->mcast_ttl)
+		c->mcast_ttl = 1;
+
+	dev = __dev_get_by_name(net, c->mcast_ifn);
+	if (!dev) {
+		pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
+		return -ENODEV;
+	}
+	hlen = (AF_INET6 == c->mcast_af) ?
+	       sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
+	       sizeof(struct iphdr) + sizeof(struct udphdr);
+	mtu = (state == IP_VS_STATE_BACKUP) ?
+		  clamp(dev->mtu, 1500U, 65535U) : 1500U;
+	min_mtu = (state == IP_VS_STATE_BACKUP) ? 1024 : 1;
+
+	if (c->sync_maxlen)
+		c->sync_maxlen = clamp_t(unsigned int,
+					 c->sync_maxlen, min_mtu,
+					 65535 - hlen);
+	else
+		c->sync_maxlen = mtu - hlen;
+
 	if (state == IP_VS_STATE_MASTER) {
 		if (ipvs->ms)
 			return -EEXIST;
 
-		strlcpy(ipvs->master_mcast_ifn, mcast_ifn,
-			sizeof(ipvs->master_mcast_ifn));
-		ipvs->master_syncid = syncid;
+		ipvs->mcfg = *c;
 		name = "ipvs-m:%d:%d";
 		threadfn = sync_thread_master;
 	} else if (state == IP_VS_STATE_BACKUP) {
 		if (ipvs->backup_threads)
 			return -EEXIST;
 
-		strlcpy(ipvs->backup_mcast_ifn, mcast_ifn,
-			sizeof(ipvs->backup_mcast_ifn));
-		ipvs->backup_syncid = syncid;
+		ipvs->bcfg = *c;
 		name = "ipvs-b:%d:%d";
 		threadfn = sync_thread_backup;
 	} else {
@@ -1789,7 +1869,6 @@
 		if (!array)
 			goto out;
 	}
-	set_sync_mesg_maxlen(net, state);
 
 	tinfo = NULL;
 	for (id = 0; id < count; id++) {
@@ -1807,7 +1886,7 @@
 		tinfo->net = net;
 		tinfo->sock = sock;
 		if (state == IP_VS_STATE_BACKUP) {
-			tinfo->buf = kmalloc(ipvs->recv_mesg_maxlen,
+			tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
 					     GFP_KERNEL);
 			if (!tinfo->buf)
 				goto outtinfo;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3c20d02..eedf049 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -126,7 +126,7 @@
 unsigned int nf_conntrack_hash_rnd __read_mostly;
 EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
 
-static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
+static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
 {
 	unsigned int n;
 
@@ -135,7 +135,7 @@
 	 * three bytes manually.
 	 */
 	n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
-	return jhash2((u32 *)tuple, n, zone ^ nf_conntrack_hash_rnd ^
+	return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
 		      (((__force __u16)tuple->dst.u.all << 16) |
 		      tuple->dst.protonum));
 }
@@ -151,15 +151,15 @@
 }
 
 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
-				  u16 zone, unsigned int size)
+				  unsigned int size)
 {
-	return __hash_bucket(hash_conntrack_raw(tuple, zone), size);
+	return __hash_bucket(hash_conntrack_raw(tuple), size);
 }
 
-static inline u_int32_t hash_conntrack(const struct net *net, u16 zone,
+static inline u_int32_t hash_conntrack(const struct net *net,
 				       const struct nf_conntrack_tuple *tuple)
 {
-	return __hash_conntrack(tuple, zone, net->ct.htable_size);
+	return __hash_conntrack(tuple, net->ct.htable_size);
 }
 
 bool
@@ -288,7 +288,9 @@
 }
 
 /* Released via destroy_conntrack() */
-struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
+struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
+				 const struct nf_conntrack_zone *zone,
+				 gfp_t flags)
 {
 	struct nf_conn *tmpl;
 
@@ -299,24 +301,15 @@
 	tmpl->status = IPS_TEMPLATE;
 	write_pnet(&tmpl->ct_net, net);
 
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-	if (zone) {
-		struct nf_conntrack_zone *nf_ct_zone;
+	if (nf_ct_zone_add(tmpl, flags, zone) < 0)
+		goto out_free;
 
-		nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
-		if (!nf_ct_zone)
-			goto out_free;
-		nf_ct_zone->id = zone;
-	}
-#endif
 	atomic_set(&tmpl->ct_general.use, 0);
 
 	return tmpl;
-#ifdef CONFIG_NF_CONNTRACK_ZONES
 out_free:
 	kfree(tmpl);
 	return NULL;
-#endif
 }
 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
 
@@ -373,7 +366,6 @@
 {
 	struct net *net = nf_ct_net(ct);
 	unsigned int hash, reply_hash;
-	u16 zone = nf_ct_zone(ct);
 	unsigned int sequence;
 
 	nf_ct_helper_destroy(ct);
@@ -381,9 +373,9 @@
 	local_bh_disable();
 	do {
 		sequence = read_seqcount_begin(&net->ct.generation);
-		hash = hash_conntrack(net, zone,
+		hash = hash_conntrack(net,
 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-		reply_hash = hash_conntrack(net, zone,
+		reply_hash = hash_conntrack(net,
 					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
@@ -431,8 +423,8 @@
 
 static inline bool
 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
-			const struct nf_conntrack_tuple *tuple,
-			u16 zone)
+		const struct nf_conntrack_tuple *tuple,
+		const struct nf_conntrack_zone *zone)
 {
 	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
 
@@ -440,8 +432,8 @@
 	 * so we need to check that the conntrack is confirmed
 	 */
 	return nf_ct_tuple_equal(tuple, &h->tuple) &&
-		nf_ct_zone(ct) == zone &&
-		nf_ct_is_confirmed(ct);
+	       nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
+	       nf_ct_is_confirmed(ct);
 }
 
 /*
@@ -450,7 +442,7 @@
  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
  */
 static struct nf_conntrack_tuple_hash *
-____nf_conntrack_find(struct net *net, u16 zone,
+____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
 		      const struct nf_conntrack_tuple *tuple, u32 hash)
 {
 	struct nf_conntrack_tuple_hash *h;
@@ -486,7 +478,7 @@
 
 /* Find a connection corresponding to a tuple. */
 static struct nf_conntrack_tuple_hash *
-__nf_conntrack_find_get(struct net *net, u16 zone,
+__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
 			const struct nf_conntrack_tuple *tuple, u32 hash)
 {
 	struct nf_conntrack_tuple_hash *h;
@@ -513,11 +505,11 @@
 }
 
 struct nf_conntrack_tuple_hash *
-nf_conntrack_find_get(struct net *net, u16 zone,
+nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
 		      const struct nf_conntrack_tuple *tuple)
 {
 	return __nf_conntrack_find_get(net, zone, tuple,
-				       hash_conntrack_raw(tuple, zone));
+				       hash_conntrack_raw(tuple));
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
 
@@ -536,11 +528,11 @@
 int
 nf_conntrack_hash_check_insert(struct nf_conn *ct)
 {
+	const struct nf_conntrack_zone *zone;
 	struct net *net = nf_ct_net(ct);
 	unsigned int hash, reply_hash;
 	struct nf_conntrack_tuple_hash *h;
 	struct hlist_nulls_node *n;
-	u16 zone;
 	unsigned int sequence;
 
 	zone = nf_ct_zone(ct);
@@ -548,9 +540,9 @@
 	local_bh_disable();
 	do {
 		sequence = read_seqcount_begin(&net->ct.generation);
-		hash = hash_conntrack(net, zone,
+		hash = hash_conntrack(net,
 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
-		reply_hash = hash_conntrack(net, zone,
+		reply_hash = hash_conntrack(net,
 					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
 
@@ -558,12 +550,14 @@
 	hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
 				      &h->tuple) &&
-		    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+				     NF_CT_DIRECTION(h)))
 			goto out;
 	hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
 				      &h->tuple) &&
-		    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+				     NF_CT_DIRECTION(h)))
 			goto out;
 
 	add_timer(&ct->timeout);
@@ -588,6 +582,7 @@
 int
 __nf_conntrack_confirm(struct sk_buff *skb)
 {
+	const struct nf_conntrack_zone *zone;
 	unsigned int hash, reply_hash;
 	struct nf_conntrack_tuple_hash *h;
 	struct nf_conn *ct;
@@ -596,7 +591,6 @@
 	struct hlist_nulls_node *n;
 	enum ip_conntrack_info ctinfo;
 	struct net *net;
-	u16 zone;
 	unsigned int sequence;
 
 	ct = nf_ct_get(skb, &ctinfo);
@@ -617,7 +611,7 @@
 		/* reuse the hash saved before */
 		hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
 		hash = hash_bucket(hash, net);
-		reply_hash = hash_conntrack(net, zone,
+		reply_hash = hash_conntrack(net,
 					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
 
 	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
@@ -649,12 +643,14 @@
 	hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
 				      &h->tuple) &&
-		    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+				     NF_CT_DIRECTION(h)))
 			goto out;
 	hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
 				      &h->tuple) &&
-		    zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
+		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
+				     NF_CT_DIRECTION(h)))
 			goto out;
 
 	/* Timer relative to confirmation time, not original
@@ -707,11 +703,14 @@
 			 const struct nf_conn *ignored_conntrack)
 {
 	struct net *net = nf_ct_net(ignored_conntrack);
+	const struct nf_conntrack_zone *zone;
 	struct nf_conntrack_tuple_hash *h;
 	struct hlist_nulls_node *n;
 	struct nf_conn *ct;
-	u16 zone = nf_ct_zone(ignored_conntrack);
-	unsigned int hash = hash_conntrack(net, zone, tuple);
+	unsigned int hash;
+
+	zone = nf_ct_zone(ignored_conntrack);
+	hash = hash_conntrack(net, tuple);
 
 	/* Disable BHs the entire time since we need to disable them at
 	 * least once for the stats anyway.
@@ -721,7 +720,7 @@
 		ct = nf_ct_tuplehash_to_ctrack(h);
 		if (ct != ignored_conntrack &&
 		    nf_ct_tuple_equal(tuple, &h->tuple) &&
-		    nf_ct_zone(ct) == zone) {
+		    nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
 			NF_CT_STAT_INC(net, found);
 			rcu_read_unlock_bh();
 			return 1;
@@ -810,7 +809,8 @@
 }
 
 static struct nf_conn *
-__nf_conntrack_alloc(struct net *net, u16 zone,
+__nf_conntrack_alloc(struct net *net,
+		     const struct nf_conntrack_zone *zone,
 		     const struct nf_conntrack_tuple *orig,
 		     const struct nf_conntrack_tuple *repl,
 		     gfp_t gfp, u32 hash)
@@ -820,7 +820,7 @@
 	if (unlikely(!nf_conntrack_hash_rnd)) {
 		init_nf_conntrack_hash_rnd();
 		/* recompute the hash as nf_conntrack_hash_rnd is initialized */
-		hash = hash_conntrack_raw(orig, zone);
+		hash = hash_conntrack_raw(orig);
 	}
 
 	/* We don't want any race condition at early drop stage */
@@ -840,10 +840,9 @@
 	 * SLAB_DESTROY_BY_RCU.
 	 */
 	ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
-	if (ct == NULL) {
-		atomic_dec(&net->ct.count);
-		return ERR_PTR(-ENOMEM);
-	}
+	if (ct == NULL)
+		goto out;
+
 	spin_lock_init(&ct->lock);
 	ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
 	ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
@@ -857,31 +856,24 @@
 	memset(&ct->__nfct_init_offset[0], 0,
 	       offsetof(struct nf_conn, proto) -
 	       offsetof(struct nf_conn, __nfct_init_offset[0]));
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-	if (zone) {
-		struct nf_conntrack_zone *nf_ct_zone;
 
-		nf_ct_zone = nf_ct_ext_add(ct, NF_CT_EXT_ZONE, GFP_ATOMIC);
-		if (!nf_ct_zone)
-			goto out_free;
-		nf_ct_zone->id = zone;
-	}
-#endif
+	if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
+		goto out_free;
+
 	/* Because we use RCU lookups, we set ct_general.use to zero before
 	 * this is inserted in any list.
 	 */
 	atomic_set(&ct->ct_general.use, 0);
 	return ct;
-
-#ifdef CONFIG_NF_CONNTRACK_ZONES
 out_free:
-	atomic_dec(&net->ct.count);
 	kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+out:
+	atomic_dec(&net->ct.count);
 	return ERR_PTR(-ENOMEM);
-#endif
 }
 
-struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+struct nf_conn *nf_conntrack_alloc(struct net *net,
+				   const struct nf_conntrack_zone *zone,
 				   const struct nf_conntrack_tuple *orig,
 				   const struct nf_conntrack_tuple *repl,
 				   gfp_t gfp)
@@ -923,8 +915,9 @@
 	struct nf_conntrack_tuple repl_tuple;
 	struct nf_conntrack_ecache *ecache;
 	struct nf_conntrack_expect *exp = NULL;
-	u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
+	const struct nf_conntrack_zone *zone;
 	struct nf_conn_timeout *timeout_ext;
+	struct nf_conntrack_zone tmp;
 	unsigned int *timeouts;
 
 	if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
@@ -932,6 +925,7 @@
 		return NULL;
 	}
 
+	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
 	ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
 				  hash);
 	if (IS_ERR(ct))
@@ -1026,10 +1020,11 @@
 		  int *set_reply,
 		  enum ip_conntrack_info *ctinfo)
 {
+	const struct nf_conntrack_zone *zone;
 	struct nf_conntrack_tuple tuple;
 	struct nf_conntrack_tuple_hash *h;
+	struct nf_conntrack_zone tmp;
 	struct nf_conn *ct;
-	u16 zone = tmpl ? nf_ct_zone(tmpl) : NF_CT_DEFAULT_ZONE;
 	u32 hash;
 
 	if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
@@ -1040,7 +1035,8 @@
 	}
 
 	/* look for tuple match */
-	hash = hash_conntrack_raw(&tuple, zone);
+	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
+	hash = hash_conntrack_raw(&tuple);
 	h = __nf_conntrack_find_get(net, zone, &tuple, hash);
 	if (!h) {
 		h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
@@ -1596,8 +1592,7 @@
 					struct nf_conntrack_tuple_hash, hnnode);
 			ct = nf_ct_tuplehash_to_ctrack(h);
 			hlist_nulls_del_rcu(&h->hnnode);
-			bucket = __hash_conntrack(&h->tuple, nf_ct_zone(ct),
-						  hashsize);
+			bucket = __hash_conntrack(&h->tuple, hashsize);
 			hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
 		}
 	}
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index b45a422..acf5c7b 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -88,7 +88,8 @@
 }
 
 struct nf_conntrack_expect *
-__nf_ct_expect_find(struct net *net, u16 zone,
+__nf_ct_expect_find(struct net *net,
+		    const struct nf_conntrack_zone *zone,
 		    const struct nf_conntrack_tuple *tuple)
 {
 	struct nf_conntrack_expect *i;
@@ -100,7 +101,7 @@
 	h = nf_ct_expect_dst_hash(tuple);
 	hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
 		if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
-		    nf_ct_zone(i->master) == zone)
+		    nf_ct_zone_equal_any(i->master, zone))
 			return i;
 	}
 	return NULL;
@@ -109,7 +110,8 @@
 
 /* Just find a expectation corresponding to a tuple. */
 struct nf_conntrack_expect *
-nf_ct_expect_find_get(struct net *net, u16 zone,
+nf_ct_expect_find_get(struct net *net,
+		      const struct nf_conntrack_zone *zone,
 		      const struct nf_conntrack_tuple *tuple)
 {
 	struct nf_conntrack_expect *i;
@@ -127,7 +129,8 @@
 /* If an expectation for this connection is found, it gets delete from
  * global list then returned. */
 struct nf_conntrack_expect *
-nf_ct_find_expectation(struct net *net, u16 zone,
+nf_ct_find_expectation(struct net *net,
+		       const struct nf_conntrack_zone *zone,
 		       const struct nf_conntrack_tuple *tuple)
 {
 	struct nf_conntrack_expect *i, *exp = NULL;
@@ -140,7 +143,7 @@
 	hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
 		if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
 		    nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
-		    nf_ct_zone(i->master) == zone) {
+		    nf_ct_zone_equal_any(i->master, zone)) {
 			exp = i;
 			break;
 		}
@@ -220,16 +223,16 @@
 	}
 
 	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
-	       nf_ct_zone(a->master) == nf_ct_zone(b->master);
+	       nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
 }
 
 static inline int expect_matches(const struct nf_conntrack_expect *a,
 				 const struct nf_conntrack_expect *b)
 {
 	return a->master == b->master && a->class == b->class &&
-		nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
-		nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
-		nf_ct_zone(a->master) == nf_ct_zone(b->master);
+	       nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
+	       nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
+	       nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
 }
 
 /* Generally a bad idea to call this: could have matched already. */
diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c
index bb53f12..3ce5c31 100644
--- a/net/netfilter/nf_conntrack_labels.c
+++ b/net/netfilter/nf_conntrack_labels.c
@@ -14,6 +14,8 @@
 #include <net/netfilter/nf_conntrack_ecache.h>
 #include <net/netfilter/nf_conntrack_labels.h>
 
+static spinlock_t nf_connlabels_lock;
+
 static unsigned int label_bits(const struct nf_conn_labels *l)
 {
 	unsigned int longs = l->words;
@@ -48,7 +50,6 @@
 }
 EXPORT_SYMBOL_GPL(nf_connlabel_set);
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 static void replace_u32(u32 *address, u32 mask, u32 new)
 {
 	u32 old, tmp;
@@ -89,7 +90,35 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(nf_connlabels_replace);
-#endif
+
+int nf_connlabels_get(struct net *net, unsigned int n_bits)
+{
+	size_t words;
+
+	if (n_bits > (NF_CT_LABELS_MAX_SIZE * BITS_PER_BYTE))
+		return -ERANGE;
+
+	words = BITS_TO_LONGS(n_bits);
+
+	spin_lock(&nf_connlabels_lock);
+	net->ct.labels_used++;
+	if (words > net->ct.label_words)
+		net->ct.label_words = words;
+	spin_unlock(&nf_connlabels_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(nf_connlabels_get);
+
+void nf_connlabels_put(struct net *net)
+{
+	spin_lock(&nf_connlabels_lock);
+	net->ct.labels_used--;
+	if (net->ct.labels_used == 0)
+		net->ct.label_words = 0;
+	spin_unlock(&nf_connlabels_lock);
+}
+EXPORT_SYMBOL_GPL(nf_connlabels_put);
 
 static struct nf_ct_ext_type labels_extend __read_mostly = {
 	.len    = sizeof(struct nf_conn_labels),
@@ -99,6 +128,7 @@
 
 int nf_conntrack_labels_init(void)
 {
+	spin_lock_init(&nf_connlabels_lock);
 	return nf_ct_extend_register(&labels_extend);
 }
 
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6b8b0ab..94a6654 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -128,6 +128,20 @@
 }
 
 static inline int
+ctnetlink_dump_zone_id(struct sk_buff *skb, int attrtype,
+		       const struct nf_conntrack_zone *zone, int dir)
+{
+	if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
+		return 0;
+	if (nla_put_be16(skb, attrtype, htons(zone->id)))
+		goto nla_put_failure;
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
+static inline int
 ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
 {
 	if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
@@ -458,6 +472,7 @@
 ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
 		    struct nf_conn *ct)
 {
+	const struct nf_conntrack_zone *zone;
 	struct nlmsghdr *nlh;
 	struct nfgenmsg *nfmsg;
 	struct nlattr *nest_parms;
@@ -473,11 +488,16 @@
 	nfmsg->version      = NFNETLINK_V0;
 	nfmsg->res_id	    = 0;
 
+	zone = nf_ct_zone(ct);
+
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
 	if (!nest_parms)
 		goto nla_put_failure;
 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
 		goto nla_put_failure;
+	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+				   NF_CT_ZONE_DIR_ORIG) < 0)
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -485,10 +505,13 @@
 		goto nla_put_failure;
 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
 		goto nla_put_failure;
+	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+				   NF_CT_ZONE_DIR_REPL) < 0)
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
-	if (nf_ct_zone(ct) &&
-	    nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+				   NF_CT_DEFAULT_ZONE_DIR) < 0)
 		goto nla_put_failure;
 
 	if (ctnetlink_dump_status(skb, ct) < 0 ||
@@ -598,7 +621,7 @@
 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
 #endif
 #ifdef CONFIG_NF_CONNTRACK_ZONES
-	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
+	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
 #endif
 	       + ctnetlink_proto_size(ct)
 	       + ctnetlink_label_size(ct)
@@ -609,6 +632,7 @@
 static int
 ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
 {
+	const struct nf_conntrack_zone *zone;
 	struct net *net;
 	struct nlmsghdr *nlh;
 	struct nfgenmsg *nfmsg;
@@ -655,11 +679,16 @@
 	nfmsg->res_id	= 0;
 
 	rcu_read_lock();
+	zone = nf_ct_zone(ct);
+
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
 	if (!nest_parms)
 		goto nla_put_failure;
 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
 		goto nla_put_failure;
+	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+				   NF_CT_ZONE_DIR_ORIG) < 0)
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -667,10 +696,13 @@
 		goto nla_put_failure;
 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
 		goto nla_put_failure;
+	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+				   NF_CT_ZONE_DIR_REPL) < 0)
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
-	if (nf_ct_zone(ct) &&
-	    nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+				   NF_CT_DEFAULT_ZONE_DIR) < 0)
 		goto nla_put_failure;
 
 	if (ctnetlink_dump_id(skb, ct) < 0)
@@ -920,15 +952,54 @@
 	return ret;
 }
 
+static int
+ctnetlink_parse_zone(const struct nlattr *attr,
+		     struct nf_conntrack_zone *zone)
+{
+	nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
+			NF_CT_DEFAULT_ZONE_DIR, 0);
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+	if (attr)
+		zone->id = ntohs(nla_get_be16(attr));
+#else
+	if (attr)
+		return -EOPNOTSUPP;
+#endif
+	return 0;
+}
+
+static int
+ctnetlink_parse_tuple_zone(struct nlattr *attr, enum ctattr_type type,
+			   struct nf_conntrack_zone *zone)
+{
+	int ret;
+
+	if (zone->id != NF_CT_DEFAULT_ZONE_ID)
+		return -EINVAL;
+
+	ret = ctnetlink_parse_zone(attr, zone);
+	if (ret < 0)
+		return ret;
+
+	if (type == CTA_TUPLE_REPLY)
+		zone->dir = NF_CT_ZONE_DIR_REPL;
+	else
+		zone->dir = NF_CT_ZONE_DIR_ORIG;
+
+	return 0;
+}
+
 static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
 	[CTA_TUPLE_IP]		= { .type = NLA_NESTED },
 	[CTA_TUPLE_PROTO]	= { .type = NLA_NESTED },
+	[CTA_TUPLE_ZONE]	= { .type = NLA_U16 },
 };
 
 static int
 ctnetlink_parse_tuple(const struct nlattr * const cda[],
 		      struct nf_conntrack_tuple *tuple,
-		      enum ctattr_type type, u_int8_t l3num)
+		      enum ctattr_type type, u_int8_t l3num,
+		      struct nf_conntrack_zone *zone)
 {
 	struct nlattr *tb[CTA_TUPLE_MAX+1];
 	int err;
@@ -955,6 +1026,16 @@
 	if (err < 0)
 		return err;
 
+	if (tb[CTA_TUPLE_ZONE]) {
+		if (!zone)
+			return -EINVAL;
+
+		err = ctnetlink_parse_tuple_zone(tb[CTA_TUPLE_ZONE],
+						 type, zone);
+		if (err < 0)
+			return err;
+	}
+
 	/* orig and expect tuples get DIR_ORIGINAL */
 	if (type == CTA_TUPLE_REPLY)
 		tuple->dst.dir = IP_CT_DIR_REPLY;
@@ -964,21 +1045,6 @@
 	return 0;
 }
 
-static int
-ctnetlink_parse_zone(const struct nlattr *attr, u16 *zone)
-{
-	if (attr)
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-		*zone = ntohs(nla_get_be16(attr));
-#else
-		return -EOPNOTSUPP;
-#endif
-	else
-		*zone = 0;
-
-	return 0;
-}
-
 static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
 	[CTA_HELP_NAME]		= { .type = NLA_NUL_STRING,
 				    .len = NF_CT_HELPER_NAME_LEN - 1 },
@@ -1058,7 +1124,7 @@
 	struct nf_conn *ct;
 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u_int8_t u3 = nfmsg->nfgen_family;
-	u16 zone;
+	struct nf_conntrack_zone zone;
 	int err;
 
 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
@@ -1066,9 +1132,11 @@
 		return err;
 
 	if (cda[CTA_TUPLE_ORIG])
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
+					    u3, &zone);
 	else if (cda[CTA_TUPLE_REPLY])
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
+					    u3, &zone);
 	else {
 		return ctnetlink_flush_conntrack(net, cda,
 						 NETLINK_CB(skb).portid,
@@ -1078,7 +1146,7 @@
 	if (err < 0)
 		return err;
 
-	h = nf_conntrack_find_get(net, zone, &tuple);
+	h = nf_conntrack_find_get(net, &zone, &tuple);
 	if (!h)
 		return -ENOENT;
 
@@ -1112,7 +1180,7 @@
 	struct sk_buff *skb2 = NULL;
 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u_int8_t u3 = nfmsg->nfgen_family;
-	u16 zone;
+	struct nf_conntrack_zone zone;
 	int err;
 
 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -1138,16 +1206,18 @@
 		return err;
 
 	if (cda[CTA_TUPLE_ORIG])
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG,
+					    u3, &zone);
 	else if (cda[CTA_TUPLE_REPLY])
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY,
+					    u3, &zone);
 	else
 		return -EINVAL;
 
 	if (err < 0)
 		return err;
 
-	h = nf_conntrack_find_get(net, zone, &tuple);
+	h = nf_conntrack_find_get(net, &zone, &tuple);
 	if (!h)
 		return -ENOENT;
 
@@ -1645,7 +1715,8 @@
 }
 
 static struct nf_conn *
-ctnetlink_create_conntrack(struct net *net, u16 zone,
+ctnetlink_create_conntrack(struct net *net,
+			   const struct nf_conntrack_zone *zone,
 			   const struct nlattr * const cda[],
 			   struct nf_conntrack_tuple *otuple,
 			   struct nf_conntrack_tuple *rtuple,
@@ -1761,7 +1832,8 @@
 		struct nf_conntrack_tuple_hash *master_h;
 		struct nf_conn *master_ct;
 
-		err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
+		err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER,
+					    u3, NULL);
 		if (err < 0)
 			goto err2;
 
@@ -1804,7 +1876,7 @@
 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	struct nf_conn *ct;
 	u_int8_t u3 = nfmsg->nfgen_family;
-	u16 zone;
+	struct nf_conntrack_zone zone;
 	int err;
 
 	err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
@@ -1812,21 +1884,23 @@
 		return err;
 
 	if (cda[CTA_TUPLE_ORIG]) {
-		err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
+		err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG,
+					    u3, &zone);
 		if (err < 0)
 			return err;
 	}
 
 	if (cda[CTA_TUPLE_REPLY]) {
-		err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
+		err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY,
+					    u3, &zone);
 		if (err < 0)
 			return err;
 	}
 
 	if (cda[CTA_TUPLE_ORIG])
-		h = nf_conntrack_find_get(net, zone, &otuple);
+		h = nf_conntrack_find_get(net, &zone, &otuple);
 	else if (cda[CTA_TUPLE_REPLY])
-		h = nf_conntrack_find_get(net, zone, &rtuple);
+		h = nf_conntrack_find_get(net, &zone, &rtuple);
 
 	if (h == NULL) {
 		err = -ENOENT;
@@ -1836,7 +1910,7 @@
 			if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
 				return -EINVAL;
 
-			ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
+			ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple,
 							&rtuple, u3);
 			if (IS_ERR(ct))
 				return PTR_ERR(ct);
@@ -2082,7 +2156,7 @@
 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
 #endif
 #ifdef CONFIG_NF_CONNTRACK_ZONES
-	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
+	       + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE|CTA_TUPLE_ZONE */
 #endif
 	       + ctnetlink_proto_size(ct)
 	       ;
@@ -2091,14 +2165,20 @@
 static int
 ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
 {
+	const struct nf_conntrack_zone *zone;
 	struct nlattr *nest_parms;
 
 	rcu_read_lock();
+	zone = nf_ct_zone(ct);
+
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
 	if (!nest_parms)
 		goto nla_put_failure;
 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
 		goto nla_put_failure;
+	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+				   NF_CT_ZONE_DIR_ORIG) < 0)
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
 	nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
@@ -2106,12 +2186,14 @@
 		goto nla_put_failure;
 	if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
 		goto nla_put_failure;
+	if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
+				   NF_CT_ZONE_DIR_REPL) < 0)
+		goto nla_put_failure;
 	nla_nest_end(skb, nest_parms);
 
-	if (nf_ct_zone(ct)) {
-		if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
-			goto nla_put_failure;
-	}
+	if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
+				   NF_CT_DEFAULT_ZONE_DIR) < 0)
+		goto nla_put_failure;
 
 	if (ctnetlink_dump_id(skb, ct) < 0)
 		goto nla_put_failure;
@@ -2218,12 +2300,12 @@
 	int err;
 
 	err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
-				    nf_ct_l3num(ct));
+				    nf_ct_l3num(ct), NULL);
 	if (err < 0)
 		return err;
 
 	return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
-				     nf_ct_l3num(ct));
+				     nf_ct_l3num(ct), NULL);
 }
 
 static int
@@ -2612,23 +2694,22 @@
 	struct nf_conntrack_tuple tuple;
 	struct nf_conntrack_tuple_hash *h;
 	struct nf_conn *ct;
-	u16 zone = 0;
+	struct nf_conntrack_zone zone;
 	struct netlink_dump_control c = {
 		.dump = ctnetlink_exp_ct_dump_table,
 		.done = ctnetlink_exp_done,
 	};
 
-	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
+	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
+				    u3, NULL);
 	if (err < 0)
 		return err;
 
-	if (cda[CTA_EXPECT_ZONE]) {
-		err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
-		if (err < 0)
-			return err;
-	}
+	err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
+	if (err < 0)
+		return err;
 
-	h = nf_conntrack_find_get(net, zone, &tuple);
+	h = nf_conntrack_find_get(net, &zone, &tuple);
 	if (!h)
 		return -ENOENT;
 
@@ -2652,7 +2733,7 @@
 	struct sk_buff *skb2;
 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u_int8_t u3 = nfmsg->nfgen_family;
-	u16 zone;
+	struct nf_conntrack_zone zone;
 	int err;
 
 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -2672,16 +2753,18 @@
 		return err;
 
 	if (cda[CTA_EXPECT_TUPLE])
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+					    u3, NULL);
 	else if (cda[CTA_EXPECT_MASTER])
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER,
+					    u3, NULL);
 	else
 		return -EINVAL;
 
 	if (err < 0)
 		return err;
 
-	exp = nf_ct_expect_find_get(net, zone, &tuple);
+	exp = nf_ct_expect_find_get(net, &zone, &tuple);
 	if (!exp)
 		return -ENOENT;
 
@@ -2732,8 +2815,8 @@
 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	struct hlist_node *next;
 	u_int8_t u3 = nfmsg->nfgen_family;
+	struct nf_conntrack_zone zone;
 	unsigned int i;
-	u16 zone;
 	int err;
 
 	if (cda[CTA_EXPECT_TUPLE]) {
@@ -2742,12 +2825,13 @@
 		if (err < 0)
 			return err;
 
-		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+		err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+					    u3, NULL);
 		if (err < 0)
 			return err;
 
 		/* bump usage count to 2 */
-		exp = nf_ct_expect_find_get(net, zone, &tuple);
+		exp = nf_ct_expect_find_get(net, &zone, &tuple);
 		if (!exp)
 			return -ENOENT;
 
@@ -2849,7 +2933,8 @@
 		return -EINVAL;
 
 	err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
-					&nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
+				    &nat_tuple, CTA_EXPECT_NAT_TUPLE,
+				    u3, NULL);
 	if (err < 0)
 		return err;
 
@@ -2937,7 +3022,8 @@
 }
 
 static int
-ctnetlink_create_expect(struct net *net, u16 zone,
+ctnetlink_create_expect(struct net *net,
+			const struct nf_conntrack_zone *zone,
 			const struct nlattr * const cda[],
 			u_int8_t u3, u32 portid, int report)
 {
@@ -2949,13 +3035,16 @@
 	int err;
 
 	/* caller guarantees that those three CTA_EXPECT_* exist */
-	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+				    u3, NULL);
 	if (err < 0)
 		return err;
-	err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
+	err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK,
+				    u3, NULL);
 	if (err < 0)
 		return err;
-	err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
+	err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER,
+				    u3, NULL);
 	if (err < 0)
 		return err;
 
@@ -3011,7 +3100,7 @@
 	struct nf_conntrack_expect *exp;
 	struct nfgenmsg *nfmsg = nlmsg_data(nlh);
 	u_int8_t u3 = nfmsg->nfgen_family;
-	u16 zone;
+	struct nf_conntrack_zone zone;
 	int err;
 
 	if (!cda[CTA_EXPECT_TUPLE]
@@ -3023,19 +3112,18 @@
 	if (err < 0)
 		return err;
 
-	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+	err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE,
+				    u3, NULL);
 	if (err < 0)
 		return err;
 
 	spin_lock_bh(&nf_conntrack_expect_lock);
-	exp = __nf_ct_expect_find(net, zone, &tuple);
-
+	exp = __nf_ct_expect_find(net, &zone, &tuple);
 	if (!exp) {
 		spin_unlock_bh(&nf_conntrack_expect_lock);
 		err = -ENOENT;
 		if (nlh->nlmsg_flags & NLM_F_CREATE) {
-			err = ctnetlink_create_expect(net, zone, cda,
-						      u3,
+			err = ctnetlink_create_expect(net, &zone, cda, u3,
 						      NETLINK_CB(skb).portid,
 						      nlmsg_report(nlh));
 		}
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 825c3e3..5588c7a 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -143,13 +143,14 @@
 				  const struct nf_conntrack_tuple *t)
 {
 	const struct nf_conntrack_tuple_hash *h;
+	const struct nf_conntrack_zone *zone;
 	struct nf_conntrack_expect *exp;
 	struct nf_conn *sibling;
-	u16 zone = nf_ct_zone(ct);
 
 	pr_debug("trying to timeout ct or exp for tuple ");
 	nf_ct_dump_tuple(t);
 
+	zone = nf_ct_zone(ct);
 	h = nf_conntrack_find_get(net, zone, t);
 	if (h)  {
 		sibling = nf_ct_tuplehash_to_ctrack(h);
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index b45da90..6719773 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -42,6 +42,8 @@
 	"SHUTDOWN_SENT",
 	"SHUTDOWN_RECD",
 	"SHUTDOWN_ACK_SENT",
+	"HEARTBEAT_SENT",
+	"HEARTBEAT_ACKED",
 };
 
 #define SECS  * HZ
@@ -57,6 +59,8 @@
 	[SCTP_CONNTRACK_SHUTDOWN_SENT]		= 300 SECS / 1000,
 	[SCTP_CONNTRACK_SHUTDOWN_RECD]		= 300 SECS / 1000,
 	[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT]	= 3 SECS,
+	[SCTP_CONNTRACK_HEARTBEAT_SENT]		= 30 SECS,
+	[SCTP_CONNTRACK_HEARTBEAT_ACKED]	= 210 SECS,
 };
 
 #define sNO SCTP_CONNTRACK_NONE
@@ -67,6 +71,8 @@
 #define	sSS SCTP_CONNTRACK_SHUTDOWN_SENT
 #define	sSR SCTP_CONNTRACK_SHUTDOWN_RECD
 #define	sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
+#define	sHS SCTP_CONNTRACK_HEARTBEAT_SENT
+#define	sHA SCTP_CONNTRACK_HEARTBEAT_ACKED
 #define	sIV SCTP_CONNTRACK_MAX
 
 /*
@@ -88,6 +94,10 @@
 		    to that of the SHUTDOWN chunk.
 CLOSED            - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
 		    the SHUTDOWN chunk. Connection is closed.
+HEARTBEAT_SENT    - We have seen a HEARTBEAT in a new flow.
+HEARTBEAT_ACKED   - We have seen a HEARTBEAT-ACK in the direction opposite to
+		    that of the HEARTBEAT chunk. Secondary connection is
+		    established.
 */
 
 /* TODO
@@ -97,36 +107,40 @@
  - Check the error type in the reply dir before transitioning from
 cookie echoed to closed.
  - Sec 5.2.4 of RFC 2960
- - Multi Homing support.
+ - Full Multi Homing support.
 */
 
 /* SCTP conntrack state transitions */
-static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
+static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
 	{
 /*	ORIGINAL	*/
-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
-/* init         */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA},
-/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},
-/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA},
-/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA},
-/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't have Stale cookie*/
-/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */
-/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in orig dir */
-/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+/* init         */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA},
+/* init_ack     */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},
+/* abort        */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+/* shutdown     */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS},
+/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA},
+/* error        */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't have Stale cookie*/
+/* cookie_echo  */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* 5.2.4 - Big TODO */
+/* cookie_ack   */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA},/* Can't come in orig dir */
+/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA},
+/* heartbeat    */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA}
 	},
 	{
 /*	REPLY	*/
-/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
-/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* INIT in sCL Big TODO */
-/* init_ack     */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},
-/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
-/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA},
-/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA},
-/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA},
-/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in reply dir */
-/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA},
-/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+/*                  sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA */
+/* init         */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* INIT in sCL Big TODO */
+/* init_ack     */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},
+/* abort        */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL},
+/* shutdown     */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR},
+/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA},
+/* error        */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA},
+/* cookie_echo  */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA},/* Can't come in reply dir */
+/* cookie_ack   */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA},
+/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA},
+/* heartbeat    */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA},
+/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA}
 	}
 };
 
@@ -278,9 +292,16 @@
 		pr_debug("SCTP_CID_SHUTDOWN_COMPLETE\n");
 		i = 8;
 		break;
+	case SCTP_CID_HEARTBEAT:
+		pr_debug("SCTP_CID_HEARTBEAT");
+		i = 9;
+		break;
+	case SCTP_CID_HEARTBEAT_ACK:
+		pr_debug("SCTP_CID_HEARTBEAT_ACK");
+		i = 10;
+		break;
 	default:
-		/* Other chunks like DATA, SACK, HEARTBEAT and
-		its ACK do not cause a change in state */
+		/* Other chunks like DATA or SACK do not change the state */
 		pr_debug("Unknown chunk type, Will stay in %s\n",
 			 sctp_conntrack_names[cur_state]);
 		return cur_state;
@@ -329,6 +350,8 @@
 	    !test_bit(SCTP_CID_COOKIE_ECHO, map) &&
 	    !test_bit(SCTP_CID_ABORT, map) &&
 	    !test_bit(SCTP_CID_SHUTDOWN_ACK, map) &&
+	    !test_bit(SCTP_CID_HEARTBEAT, map) &&
+	    !test_bit(SCTP_CID_HEARTBEAT_ACK, map) &&
 	    sh->vtag != ct->proto.sctp.vtag[dir]) {
 		pr_debug("Verification tag check failed\n");
 		goto out;
@@ -357,6 +380,16 @@
 			/* Sec 8.5.1 (D) */
 			if (sh->vtag != ct->proto.sctp.vtag[dir])
 				goto out_unlock;
+		} else if (sch->type == SCTP_CID_HEARTBEAT ||
+			   sch->type == SCTP_CID_HEARTBEAT_ACK) {
+			if (ct->proto.sctp.vtag[dir] == 0) {
+				pr_debug("Setting vtag %x for dir %d\n",
+					 sh->vtag, dir);
+				ct->proto.sctp.vtag[dir] = sh->vtag;
+			} else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
+				pr_debug("Verification tag check failed\n");
+				goto out_unlock;
+			}
 		}
 
 		old_state = ct->proto.sctp.state;
@@ -466,6 +499,10 @@
 				/* Sec 8.5.1 (A) */
 				return false;
 			}
+		} else if (sch->type == SCTP_CID_HEARTBEAT) {
+			pr_debug("Setting vtag %x for secondary conntrack\n",
+				 sh->vtag);
+			ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag;
 		}
 		/* If it is a shutdown ack OOTB packet, we expect a return
 		   shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
@@ -610,6 +647,8 @@
 	[CTA_TIMEOUT_SCTP_SHUTDOWN_SENT]	= { .type = NLA_U32 },
 	[CTA_TIMEOUT_SCTP_SHUTDOWN_RECD]	= { .type = NLA_U32 },
 	[CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT]	= { .type = NLA_U32 },
+	[CTA_TIMEOUT_SCTP_HEARTBEAT_SENT]	= { .type = NLA_U32 },
+	[CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED]	= { .type = NLA_U32 },
 };
 #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
 
@@ -658,6 +697,18 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_jiffies,
 	},
+	{
+		.procname	= "nf_conntrack_sctp_timeout_heartbeat_sent",
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_jiffies,
+	},
+	{
+		.procname	= "nf_conntrack_sctp_timeout_heartbeat_acked",
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_jiffies,
+	},
 	{ }
 };
 
@@ -730,6 +781,8 @@
 	pn->ctl_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
 	pn->ctl_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
 	pn->ctl_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
+	pn->ctl_table[7].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_SENT];
+	pn->ctl_table[8].data = &sn->timeouts[SCTP_CONNTRACK_HEARTBEAT_ACKED];
 #endif
 	return 0;
 }
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index ce3e840c8..dff0f0c 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -103,9 +103,9 @@
 			 ntohl(sack->end_seq), ntohl(new_end_seq));
 
 		inet_proto_csum_replace4(&tcph->check, skb,
-					 sack->start_seq, new_start_seq, 0);
+					 sack->start_seq, new_start_seq, false);
 		inet_proto_csum_replace4(&tcph->check, skb,
-					 sack->end_seq, new_end_seq, 0);
+					 sack->end_seq, new_end_seq, false);
 		sack->start_seq = new_start_seq;
 		sack->end_seq = new_end_seq;
 		sackoff += sizeof(*sack);
@@ -193,8 +193,9 @@
 	newseq = htonl(ntohl(tcph->seq) + seqoff);
 	newack = htonl(ntohl(tcph->ack_seq) - ackoff);
 
-	inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
-	inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
+	inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, false);
+	inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack,
+				 false);
 
 	pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
 		 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index fc823fa..1fb3cac 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -140,6 +140,35 @@
 }
 #endif
 
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+static void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
+			 int dir)
+{
+	const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
+
+	if (zone->dir != dir)
+		return;
+	switch (zone->dir) {
+	case NF_CT_DEFAULT_ZONE_DIR:
+		seq_printf(s, "zone=%u ", zone->id);
+		break;
+	case NF_CT_ZONE_DIR_ORIG:
+		seq_printf(s, "zone-orig=%u ", zone->id);
+		break;
+	case NF_CT_ZONE_DIR_REPL:
+		seq_printf(s, "zone-reply=%u ", zone->id);
+		break;
+	default:
+		break;
+	}
+}
+#else
+static inline void ct_show_zone(struct seq_file *s, const struct nf_conn *ct,
+				int dir)
+{
+}
+#endif
+
 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
 static void ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
 {
@@ -202,6 +231,8 @@
 	print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
 		    l3proto, l4proto);
 
+	ct_show_zone(s, ct, NF_CT_ZONE_DIR_ORIG);
+
 	if (seq_has_overflowed(s))
 		goto release;
 
@@ -214,6 +245,8 @@
 	print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
 		    l3proto, l4proto);
 
+	ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL);
+
 	if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
 		goto release;
 
@@ -228,11 +261,7 @@
 #endif
 
 	ct_show_secctx(s, ct);
-
-#ifdef CONFIG_NF_CONNTRACK_ZONES
-	seq_printf(s, "zone=%u ", nf_ct_zone(ct));
-#endif
-
+	ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR);
 	ct_show_delta_time(s, ct);
 
 	seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 3992106..0655225 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -19,7 +19,7 @@
 /* nf_queue.c */
 int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
 	     struct nf_hook_state *state, unsigned int queuenum);
-void nf_queue_nf_hook_drop(struct nf_hook_ops *ops);
+void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops);
 int __init netfilter_queue_init(void);
 
 /* nf_log.c */
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 4e0b478..5113dfd 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -118,14 +118,13 @@
 
 /* We keep an extra hash for each conntrack, for fast searching. */
 static inline unsigned int
-hash_by_src(const struct net *net, u16 zone,
-	    const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
 {
 	unsigned int hash;
 
 	/* Original src, to ensure we map it consistently if poss. */
 	hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
-		      tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd);
+		      tuple->dst.protonum ^ nf_conntrack_hash_rnd);
 
 	return reciprocal_scale(hash, net->ct.nat_htable_size);
 }
@@ -185,20 +184,22 @@
 
 /* Only called for SRC manip */
 static int
-find_appropriate_src(struct net *net, u16 zone,
+find_appropriate_src(struct net *net,
+		     const struct nf_conntrack_zone *zone,
 		     const struct nf_nat_l3proto *l3proto,
 		     const struct nf_nat_l4proto *l4proto,
 		     const struct nf_conntrack_tuple *tuple,
 		     struct nf_conntrack_tuple *result,
 		     const struct nf_nat_range *range)
 {
-	unsigned int h = hash_by_src(net, zone, tuple);
+	unsigned int h = hash_by_src(net, tuple);
 	const struct nf_conn_nat *nat;
 	const struct nf_conn *ct;
 
 	hlist_for_each_entry_rcu(nat, &net->ct.nat_bysource[h], bysource) {
 		ct = nat->ct;
-		if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
+		if (same_src(ct, tuple) &&
+		    nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
 			/* Copy source part from reply tuple. */
 			nf_ct_invert_tuplepr(result,
 				       &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
@@ -218,7 +219,8 @@
  * the ip with the lowest src-ip/dst-ip/proto usage.
  */
 static void
-find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
+find_best_ips_proto(const struct nf_conntrack_zone *zone,
+		    struct nf_conntrack_tuple *tuple,
 		    const struct nf_nat_range *range,
 		    const struct nf_conn *ct,
 		    enum nf_nat_manip_type maniptype)
@@ -258,7 +260,7 @@
 	 */
 	j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
 		   range->flags & NF_NAT_RANGE_PERSISTENT ?
-			0 : (__force u32)tuple->dst.u3.all[max] ^ zone);
+			0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
 
 	full_range = false;
 	for (i = 0; i <= max; i++) {
@@ -297,10 +299,12 @@
 		 struct nf_conn *ct,
 		 enum nf_nat_manip_type maniptype)
 {
+	const struct nf_conntrack_zone *zone;
 	const struct nf_nat_l3proto *l3proto;
 	const struct nf_nat_l4proto *l4proto;
 	struct net *net = nf_ct_net(ct);
-	u16 zone = nf_ct_zone(ct);
+
+	zone = nf_ct_zone(ct);
 
 	rcu_read_lock();
 	l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
@@ -420,7 +424,7 @@
 	if (maniptype == NF_NAT_MANIP_SRC) {
 		unsigned int srchash;
 
-		srchash = hash_by_src(net, nf_ct_zone(ct),
+		srchash = hash_by_src(net,
 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
 		spin_lock_bh(&nf_nat_lock);
 		/* nf_conntrack_alter_reply might re-allocate extension aera */
diff --git a/net/netfilter/nf_nat_proto_dccp.c b/net/netfilter/nf_nat_proto_dccp.c
index b8067b5..15c47b2 100644
--- a/net/netfilter/nf_nat_proto_dccp.c
+++ b/net/netfilter/nf_nat_proto_dccp.c
@@ -69,7 +69,7 @@
 	l3proto->csum_update(skb, iphdroff, &hdr->dccph_checksum,
 			     tuple, maniptype);
 	inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
-				 0);
+				 false);
 	return true;
 }
 
diff --git a/net/netfilter/nf_nat_proto_tcp.c b/net/netfilter/nf_nat_proto_tcp.c
index 37f5505..4f8820f 100644
--- a/net/netfilter/nf_nat_proto_tcp.c
+++ b/net/netfilter/nf_nat_proto_tcp.c
@@ -70,7 +70,7 @@
 		return true;
 
 	l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
-	inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
+	inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false);
 	return true;
 }
 
diff --git a/net/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c
index b0ede2f..b1e6272 100644
--- a/net/netfilter/nf_nat_proto_udp.c
+++ b/net/netfilter/nf_nat_proto_udp.c
@@ -57,7 +57,7 @@
 		l3proto->csum_update(skb, iphdroff, &hdr->check,
 				     tuple, maniptype);
 		inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
-					 0);
+					 false);
 		if (!hdr->check)
 			hdr->check = CSUM_MANGLED_0;
 	}
diff --git a/net/netfilter/nf_nat_proto_udplite.c b/net/netfilter/nf_nat_proto_udplite.c
index 368f14e..58340c97 100644
--- a/net/netfilter/nf_nat_proto_udplite.c
+++ b/net/netfilter/nf_nat_proto_udplite.c
@@ -56,7 +56,7 @@
 	}
 
 	l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
-	inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, 0);
+	inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, false);
 	if (!hdr->check)
 		hdr->check = CSUM_MANGLED_0;
 
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 8a8b2ab..96777f9 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -105,21 +105,15 @@
 }
 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
 
-void nf_queue_nf_hook_drop(struct nf_hook_ops *ops)
+void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops)
 {
 	const struct nf_queue_handler *qh;
-	struct net *net;
 
-	rtnl_lock();
 	rcu_read_lock();
 	qh = rcu_dereference(queue_handler);
-	if (qh) {
-		for_each_net(net) {
-			qh->nf_hook_drop(net, ops);
-		}
-	}
+	if (qh)
+		qh->nf_hook_drop(net, ops);
 	rcu_read_unlock();
-	rtnl_unlock();
 }
 
 /*
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index d7f1685..888b955 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -17,10 +17,12 @@
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_tcpudp.h>
 #include <linux/netfilter/xt_SYNPROXY.h>
+
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_extend.h>
 #include <net/netfilter/nf_conntrack_seqadj.h>
 #include <net/netfilter/nf_conntrack_synproxy.h>
+#include <net/netfilter/nf_conntrack_zones.h>
 
 int synproxy_net_id;
 EXPORT_SYMBOL_GPL(synproxy_net_id);
@@ -186,7 +188,7 @@
 				    const struct nf_conn_synproxy *synproxy)
 {
 	unsigned int optoff, optend;
-	u32 *ptr, old;
+	__be32 *ptr, old;
 
 	if (synproxy->tsoff == 0)
 		return 1;
@@ -214,18 +216,18 @@
 			if (op[0] == TCPOPT_TIMESTAMP &&
 			    op[1] == TCPOLEN_TIMESTAMP) {
 				if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
-					ptr = (u32 *)&op[2];
+					ptr = (__be32 *)&op[2];
 					old = *ptr;
 					*ptr = htonl(ntohl(*ptr) -
 						     synproxy->tsoff);
 				} else {
-					ptr = (u32 *)&op[6];
+					ptr = (__be32 *)&op[6];
 					old = *ptr;
 					*ptr = htonl(ntohl(*ptr) +
 						     synproxy->tsoff);
 				}
 				inet_proto_csum_replace4(&th->check, skb,
-							 old, *ptr, 0);
+							 old, *ptr, false);
 				return 1;
 			}
 			optoff += op[1];
@@ -352,7 +354,7 @@
 	struct nf_conn *ct;
 	int err = -ENOMEM;
 
-	ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
+	ct = nf_ct_tmpl_alloc(net, &nf_ct_zone_dflt, GFP_KERNEL);
 	if (!ct)
 		goto err1;
 
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index cfe6368..4a41eb9 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -130,20 +130,24 @@
 int nft_register_basechain(struct nft_base_chain *basechain,
 			   unsigned int hook_nops)
 {
+	struct net *net = read_pnet(&basechain->pnet);
+
 	if (basechain->flags & NFT_BASECHAIN_DISABLED)
 		return 0;
 
-	return nf_register_hooks(basechain->ops, hook_nops);
+	return nf_register_net_hooks(net, basechain->ops, hook_nops);
 }
 EXPORT_SYMBOL_GPL(nft_register_basechain);
 
 void nft_unregister_basechain(struct nft_base_chain *basechain,
 			      unsigned int hook_nops)
 {
+	struct net *net = read_pnet(&basechain->pnet);
+
 	if (basechain->flags & NFT_BASECHAIN_DISABLED)
 		return;
 
-	nf_unregister_hooks(basechain->ops, hook_nops);
+	nf_unregister_net_hooks(net, basechain->ops, hook_nops);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_basechain);
 
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index f77bad4..05d0b03 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -114,7 +114,6 @@
 nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
 {
 	const struct nft_chain *chain = ops->priv, *basechain = chain;
-	const struct net *chain_net = read_pnet(&nft_base_chain(basechain)->pnet);
 	const struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
 	const struct nft_rule *rule;
 	const struct nft_expr *expr, *last;
@@ -125,10 +124,6 @@
 	int rulenum;
 	unsigned int gencursor = nft_genmask_cur(net);
 
-	/* Ignore chains that are not for the current network namespace */
-	if (!net_eq(net, chain_net))
-		return NF_ACCEPT;
-
 do_chain:
 	rulenum = 0;
 	rule = list_entry(&chain->rules, struct nft_rule, list);
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index c18af2f..fefbf5f 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -27,8 +27,6 @@
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_DESCRIPTION("nfacct: Extended Netfilter accounting infrastructure");
 
-static LIST_HEAD(nfnl_acct_list);
-
 struct nf_acct {
 	atomic64_t		pkts;
 	atomic64_t		bytes;
@@ -53,6 +51,7 @@
 	     const struct nlmsghdr *nlh, const struct nlattr * const tb[])
 {
 	struct nf_acct *nfacct, *matching = NULL;
+	struct net *net = sock_net(nfnl);
 	char *acct_name;
 	unsigned int size = 0;
 	u32 flags = 0;
@@ -64,7 +63,7 @@
 	if (strlen(acct_name) == 0)
 		return -EINVAL;
 
-	list_for_each_entry(nfacct, &nfnl_acct_list, head) {
+	list_for_each_entry(nfacct, &net->nfnl_acct_list, head) {
 		if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0)
 			continue;
 
@@ -124,7 +123,7 @@
 			     be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS])));
 	}
 	atomic_set(&nfacct->refcnt, 1);
-	list_add_tail_rcu(&nfacct->head, &nfnl_acct_list);
+	list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list);
 	return 0;
 }
 
@@ -185,6 +184,7 @@
 static int
 nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
+	struct net *net = sock_net(skb->sk);
 	struct nf_acct *cur, *last;
 	const struct nfacct_filter *filter = cb->data;
 
@@ -196,7 +196,7 @@
 		cb->args[1] = 0;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+	list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
 		if (last) {
 			if (cur != last)
 				continue;
@@ -257,6 +257,7 @@
 nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
 	     const struct nlmsghdr *nlh, const struct nlattr * const tb[])
 {
+	struct net *net = sock_net(nfnl);
 	int ret = -ENOENT;
 	struct nf_acct *cur;
 	char *acct_name;
@@ -283,7 +284,7 @@
 		return -EINVAL;
 	acct_name = nla_data(tb[NFACCT_NAME]);
 
-	list_for_each_entry(cur, &nfnl_acct_list, head) {
+	list_for_each_entry(cur, &net->nfnl_acct_list, head) {
 		struct sk_buff *skb2;
 
 		if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
@@ -336,19 +337,20 @@
 nfnl_acct_del(struct sock *nfnl, struct sk_buff *skb,
 	     const struct nlmsghdr *nlh, const struct nlattr * const tb[])
 {
+	struct net *net = sock_net(nfnl);
 	char *acct_name;
 	struct nf_acct *cur;
 	int ret = -ENOENT;
 
 	if (!tb[NFACCT_NAME]) {
-		list_for_each_entry(cur, &nfnl_acct_list, head)
+		list_for_each_entry(cur, &net->nfnl_acct_list, head)
 			nfnl_acct_try_del(cur);
 
 		return 0;
 	}
 	acct_name = nla_data(tb[NFACCT_NAME]);
 
-	list_for_each_entry(cur, &nfnl_acct_list, head) {
+	list_for_each_entry(cur, &net->nfnl_acct_list, head) {
 		if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX) != 0)
 			continue;
 
@@ -394,12 +396,12 @@
 
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ACCT);
 
-struct nf_acct *nfnl_acct_find_get(const char *acct_name)
+struct nf_acct *nfnl_acct_find_get(struct net *net, const char *acct_name)
 {
 	struct nf_acct *cur, *acct = NULL;
 
 	rcu_read_lock();
-	list_for_each_entry_rcu(cur, &nfnl_acct_list, head) {
+	list_for_each_entry_rcu(cur, &net->nfnl_acct_list, head) {
 		if (strncmp(cur->name, acct_name, NFACCT_NAME_MAX)!= 0)
 			continue;
 
@@ -422,7 +424,9 @@
 
 void nfnl_acct_put(struct nf_acct *acct)
 {
-	atomic_dec(&acct->refcnt);
+	if (atomic_dec_and_test(&acct->refcnt))
+		kfree_rcu(acct, rcu_head);
+
 	module_put(THIS_MODULE);
 }
 EXPORT_SYMBOL_GPL(nfnl_acct_put);
@@ -478,34 +482,59 @@
 }
 EXPORT_SYMBOL_GPL(nfnl_acct_overquota);
 
+static int __net_init nfnl_acct_net_init(struct net *net)
+{
+	INIT_LIST_HEAD(&net->nfnl_acct_list);
+
+	return 0;
+}
+
+static void __net_exit nfnl_acct_net_exit(struct net *net)
+{
+	struct nf_acct *cur, *tmp;
+
+	list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) {
+		list_del_rcu(&cur->head);
+
+		if (atomic_dec_and_test(&cur->refcnt))
+			kfree_rcu(cur, rcu_head);
+	}
+}
+
+static struct pernet_operations nfnl_acct_ops = {
+        .init   = nfnl_acct_net_init,
+        .exit   = nfnl_acct_net_exit,
+};
+
 static int __init nfnl_acct_init(void)
 {
 	int ret;
 
+	ret = register_pernet_subsys(&nfnl_acct_ops);
+	if (ret < 0) {
+		pr_err("nfnl_acct_init: failed to register pernet ops\n");
+		goto err_out;
+	}
+
 	pr_info("nfnl_acct: registering with nfnetlink.\n");
 	ret = nfnetlink_subsys_register(&nfnl_acct_subsys);
 	if (ret < 0) {
 		pr_err("nfnl_acct_init: cannot register with nfnetlink.\n");
-		goto err_out;
+		goto cleanup_pernet;
 	}
 	return 0;
+
+cleanup_pernet:
+	unregister_pernet_subsys(&nfnl_acct_ops);
 err_out:
 	return ret;
 }
 
 static void __exit nfnl_acct_exit(void)
 {
-	struct nf_acct *cur, *tmp;
-
 	pr_info("nfnl_acct: unregistering from nfnetlink.\n");
 	nfnetlink_subsys_unregister(&nfnl_acct_subsys);
-
-	list_for_each_entry_safe(cur, tmp, &nfnl_acct_list, head) {
-		list_del_rcu(&cur->head);
-		/* We are sure that our objects have no clients at this point,
-		 * it's safe to release them all without checking refcnt. */
-		kfree_rcu(cur, rcu_head);
-	}
+	unregister_pernet_subsys(&nfnl_acct_ops);
 }
 
 module_init(nfnl_acct_init);
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index 1759123..1067fb4 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -18,39 +18,59 @@
 #include <net/netfilter/nf_tables.h>
 
 struct nft_counter {
-	seqlock_t	lock;
 	u64		bytes;
 	u64		packets;
 };
 
+struct nft_counter_percpu {
+	struct nft_counter	counter;
+	struct u64_stats_sync	syncp;
+};
+
+struct nft_counter_percpu_priv {
+	struct nft_counter_percpu __percpu *counter;
+};
+
 static void nft_counter_eval(const struct nft_expr *expr,
 			     struct nft_regs *regs,
 			     const struct nft_pktinfo *pkt)
 {
-	struct nft_counter *priv = nft_expr_priv(expr);
+	struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+	struct nft_counter_percpu *this_cpu;
 
-	write_seqlock_bh(&priv->lock);
-	priv->bytes += pkt->skb->len;
-	priv->packets++;
-	write_sequnlock_bh(&priv->lock);
+	local_bh_disable();
+	this_cpu = this_cpu_ptr(priv->counter);
+	u64_stats_update_begin(&this_cpu->syncp);
+	this_cpu->counter.bytes += pkt->skb->len;
+	this_cpu->counter.packets++;
+	u64_stats_update_end(&this_cpu->syncp);
+	local_bh_enable();
 }
 
 static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
-	struct nft_counter *priv = nft_expr_priv(expr);
+	struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+	struct nft_counter_percpu *cpu_stats;
+	struct nft_counter total;
+	u64 bytes, packets;
 	unsigned int seq;
-	u64 bytes;
-	u64 packets;
+	int cpu;
 
-	do {
-		seq = read_seqbegin(&priv->lock);
-		bytes	= priv->bytes;
-		packets	= priv->packets;
-	} while (read_seqretry(&priv->lock, seq));
+	memset(&total, 0, sizeof(total));
+	for_each_possible_cpu(cpu) {
+		cpu_stats = per_cpu_ptr(priv->counter, cpu);
+		do {
+			seq	= u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+			bytes	= cpu_stats->counter.bytes;
+			packets	= cpu_stats->counter.packets;
+		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
 
-	if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(bytes)))
-		goto nla_put_failure;
-	if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(packets)))
+		total.packets += packets;
+		total.bytes += bytes;
+	}
+
+	if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
+	    nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
 		goto nla_put_failure;
 	return 0;
 
@@ -67,23 +87,44 @@
 			    const struct nft_expr *expr,
 			    const struct nlattr * const tb[])
 {
-	struct nft_counter *priv = nft_expr_priv(expr);
+	struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+	struct nft_counter_percpu __percpu *cpu_stats;
+	struct nft_counter_percpu *this_cpu;
 
-	if (tb[NFTA_COUNTER_PACKETS])
-	        priv->packets = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
-	if (tb[NFTA_COUNTER_BYTES])
-		priv->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+	cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu);
+	if (cpu_stats == NULL)
+		return ENOMEM;
 
-	seqlock_init(&priv->lock);
+	preempt_disable();
+	this_cpu = this_cpu_ptr(cpu_stats);
+	if (tb[NFTA_COUNTER_PACKETS]) {
+	        this_cpu->counter.packets =
+			be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+	}
+	if (tb[NFTA_COUNTER_BYTES]) {
+		this_cpu->counter.bytes =
+			be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+	}
+	preempt_enable();
+	priv->counter = cpu_stats;
 	return 0;
 }
 
+static void nft_counter_destroy(const struct nft_ctx *ctx,
+				const struct nft_expr *expr)
+{
+	struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+
+	free_percpu(priv->counter);
+}
+
 static struct nft_expr_type nft_counter_type;
 static const struct nft_expr_ops nft_counter_ops = {
 	.type		= &nft_counter_type,
-	.size		= NFT_EXPR_SIZE(sizeof(struct nft_counter)),
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_counter_percpu_priv)),
 	.eval		= nft_counter_eval,
 	.init		= nft_counter_init,
+	.destroy	= nft_counter_destroy,
 	.dump		= nft_counter_dump,
 };
 
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 435c1cc..5d67938 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -20,63 +20,79 @@
 static DEFINE_SPINLOCK(limit_lock);
 
 struct nft_limit {
+	u64		last;
 	u64		tokens;
+	u64		tokens_max;
 	u64		rate;
-	u64		unit;
-	unsigned long	stamp;
+	u64		nsecs;
+	u32		burst;
 };
 
-static void nft_limit_eval(const struct nft_expr *expr,
-			   struct nft_regs *regs,
-			   const struct nft_pktinfo *pkt)
+static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
 {
-	struct nft_limit *priv = nft_expr_priv(expr);
+	u64 now, tokens;
+	s64 delta;
 
 	spin_lock_bh(&limit_lock);
-	if (time_after_eq(jiffies, priv->stamp)) {
-		priv->tokens = priv->rate;
-		priv->stamp = jiffies + priv->unit * HZ;
-	}
+	now = ktime_get_ns();
+	tokens = limit->tokens + now - limit->last;
+	if (tokens > limit->tokens_max)
+		tokens = limit->tokens_max;
 
-	if (priv->tokens >= 1) {
-		priv->tokens--;
+	limit->last = now;
+	delta = tokens - cost;
+	if (delta >= 0) {
+		limit->tokens = delta;
 		spin_unlock_bh(&limit_lock);
-		return;
+		return false;
 	}
+	limit->tokens = tokens;
 	spin_unlock_bh(&limit_lock);
-
-	regs->verdict.code = NFT_BREAK;
+	return true;
 }
 
-static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
-	[NFTA_LIMIT_RATE]	= { .type = NLA_U64 },
-	[NFTA_LIMIT_UNIT]	= { .type = NLA_U64 },
-};
-
-static int nft_limit_init(const struct nft_ctx *ctx,
-			  const struct nft_expr *expr,
+static int nft_limit_init(struct nft_limit *limit,
 			  const struct nlattr * const tb[])
 {
-	struct nft_limit *priv = nft_expr_priv(expr);
+	u64 unit;
 
 	if (tb[NFTA_LIMIT_RATE] == NULL ||
 	    tb[NFTA_LIMIT_UNIT] == NULL)
 		return -EINVAL;
 
-	priv->rate   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
-	priv->unit   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
-	priv->stamp  = jiffies + priv->unit * HZ;
-	priv->tokens = priv->rate;
+	limit->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+	unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+	limit->nsecs = unit * NSEC_PER_SEC;
+	if (limit->rate == 0 || limit->nsecs < unit)
+		return -EOVERFLOW;
+	limit->tokens = limit->tokens_max = limit->nsecs;
+
+	if (tb[NFTA_LIMIT_BURST]) {
+		u64 rate;
+
+		limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
+
+		rate = limit->rate + limit->burst;
+		if (rate < limit->rate)
+			return -EOVERFLOW;
+
+		limit->rate = rate;
+	}
+	limit->last = ktime_get_ns();
+
 	return 0;
 }
 
-static int nft_limit_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
+			  enum nft_limit_type type)
 {
-	const struct nft_limit *priv = nft_expr_priv(expr);
+	u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
+	u64 rate = limit->rate - limit->burst;
 
-	if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(priv->rate)))
-		goto nla_put_failure;
-	if (nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(priv->unit)))
+	if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate)) ||
+	    nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs)) ||
+	    nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(limit->burst)) ||
+	    nla_put_be32(skb, NFTA_LIMIT_TYPE, htonl(type)))
 		goto nla_put_failure;
 	return 0;
 
@@ -84,18 +100,114 @@
 	return -1;
 }
 
+struct nft_limit_pkts {
+	struct nft_limit	limit;
+	u64			cost;
+};
+
+static void nft_limit_pkts_eval(const struct nft_expr *expr,
+				struct nft_regs *regs,
+				const struct nft_pktinfo *pkt)
+{
+	struct nft_limit_pkts *priv = nft_expr_priv(expr);
+
+	if (nft_limit_eval(&priv->limit, priv->cost))
+		regs->verdict.code = NFT_BREAK;
+}
+
+static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
+	[NFTA_LIMIT_RATE]	= { .type = NLA_U64 },
+	[NFTA_LIMIT_UNIT]	= { .type = NLA_U64 },
+	[NFTA_LIMIT_BURST]	= { .type = NLA_U32 },
+	[NFTA_LIMIT_TYPE]	= { .type = NLA_U32 },
+};
+
+static int nft_limit_pkts_init(const struct nft_ctx *ctx,
+			       const struct nft_expr *expr,
+			       const struct nlattr * const tb[])
+{
+	struct nft_limit_pkts *priv = nft_expr_priv(expr);
+	int err;
+
+	err = nft_limit_init(&priv->limit, tb);
+	if (err < 0)
+		return err;
+
+	priv->cost = div_u64(priv->limit.nsecs, priv->limit.rate);
+	return 0;
+}
+
+static int nft_limit_pkts_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	const struct nft_limit_pkts *priv = nft_expr_priv(expr);
+
+	return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
+}
+
 static struct nft_expr_type nft_limit_type;
-static const struct nft_expr_ops nft_limit_ops = {
+static const struct nft_expr_ops nft_limit_pkts_ops = {
+	.type		= &nft_limit_type,
+	.size		= NFT_EXPR_SIZE(sizeof(struct nft_limit_pkts)),
+	.eval		= nft_limit_pkts_eval,
+	.init		= nft_limit_pkts_init,
+	.dump		= nft_limit_pkts_dump,
+};
+
+static void nft_limit_pkt_bytes_eval(const struct nft_expr *expr,
+				     struct nft_regs *regs,
+				     const struct nft_pktinfo *pkt)
+{
+	struct nft_limit *priv = nft_expr_priv(expr);
+	u64 cost = div_u64(priv->nsecs * pkt->skb->len, priv->rate);
+
+	if (nft_limit_eval(priv, cost))
+		regs->verdict.code = NFT_BREAK;
+}
+
+static int nft_limit_pkt_bytes_init(const struct nft_ctx *ctx,
+				    const struct nft_expr *expr,
+				    const struct nlattr * const tb[])
+{
+	struct nft_limit *priv = nft_expr_priv(expr);
+
+	return nft_limit_init(priv, tb);
+}
+
+static int nft_limit_pkt_bytes_dump(struct sk_buff *skb,
+				    const struct nft_expr *expr)
+{
+	const struct nft_limit *priv = nft_expr_priv(expr);
+
+	return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
+}
+
+static const struct nft_expr_ops nft_limit_pkt_bytes_ops = {
 	.type		= &nft_limit_type,
 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_limit)),
-	.eval		= nft_limit_eval,
-	.init		= nft_limit_init,
-	.dump		= nft_limit_dump,
+	.eval		= nft_limit_pkt_bytes_eval,
+	.init		= nft_limit_pkt_bytes_init,
+	.dump		= nft_limit_pkt_bytes_dump,
 };
 
+static const struct nft_expr_ops *
+nft_limit_select_ops(const struct nft_ctx *ctx,
+		     const struct nlattr * const tb[])
+{
+	if (tb[NFTA_LIMIT_TYPE] == NULL)
+		return &nft_limit_pkts_ops;
+
+	switch (ntohl(nla_get_be32(tb[NFTA_LIMIT_TYPE]))) {
+	case NFT_LIMIT_PKTS:
+		return &nft_limit_pkts_ops;
+	case NFT_LIMIT_PKT_BYTES:
+		return &nft_limit_pkt_bytes_ops;
+	}
+	return ERR_PTR(-EOPNOTSUPP);
+}
+
 static struct nft_expr_type nft_limit_type __read_mostly = {
 	.name		= "limit",
-	.ops		= &nft_limit_ops,
+	.select_ops	= nft_limit_select_ops,
 	.policy		= nft_limit_policy,
 	.maxattr	= NFTA_LIMIT_MAX,
 	.flags		= NFT_EXPR_STATEFUL,
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 52561e1..cb2f13e 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -166,11 +166,13 @@
 			goto err;
 		*dest = out->group;
 		break;
+#ifdef CONFIG_CGROUP_NET_CLASSID
 	case NFT_META_CGROUP:
 		if (skb->sk == NULL || !sk_fullsock(skb->sk))
 			goto err;
 		*dest = skb->sk->sk_classid;
 		break;
+#endif
 	default:
 		WARN_ON(1);
 		goto err;
@@ -246,7 +248,9 @@
 	case NFT_META_CPU:
 	case NFT_META_IIFGROUP:
 	case NFT_META_OIFGROUP:
+#ifdef CONFIG_CGROUP_NET_CLASSID
 	case NFT_META_CGROUP:
+#endif
 		len = sizeof(u32);
 		break;
 	case NFT_META_IIFNAME:
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 94fb3b2..09b4b07 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -9,6 +9,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/if_vlan.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/netlink.h>
@@ -17,6 +18,53 @@
 #include <net/netfilter/nf_tables_core.h>
 #include <net/netfilter/nf_tables.h>
 
+/* add vlan header into the user buffer for if tag was removed by offloads */
+static bool
+nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
+{
+	int mac_off = skb_mac_header(skb) - skb->data;
+	u8 vlan_len, *vlanh, *dst_u8 = (u8 *) d;
+	struct vlan_ethhdr veth;
+
+	vlanh = (u8 *) &veth;
+	if (offset < ETH_HLEN) {
+		u8 ethlen = min_t(u8, len, ETH_HLEN - offset);
+
+		if (skb_copy_bits(skb, mac_off, &veth, ETH_HLEN))
+			return false;
+
+		veth.h_vlan_proto = skb->vlan_proto;
+
+		memcpy(dst_u8, vlanh + offset, ethlen);
+
+		len -= ethlen;
+		if (len == 0)
+			return true;
+
+		dst_u8 += ethlen;
+		offset = ETH_HLEN;
+	} else if (offset >= VLAN_ETH_HLEN) {
+		offset -= VLAN_HLEN;
+		goto skip;
+	}
+
+	veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
+	veth.h_vlan_encapsulated_proto = skb->protocol;
+
+	vlanh += offset;
+
+	vlan_len = min_t(u8, len, VLAN_ETH_HLEN - offset);
+	memcpy(dst_u8, vlanh, vlan_len);
+
+	len -= vlan_len;
+	if (!len)
+		return true;
+
+	dst_u8 += vlan_len;
+ skip:
+	return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
+}
+
 static void nft_payload_eval(const struct nft_expr *expr,
 			     struct nft_regs *regs,
 			     const struct nft_pktinfo *pkt)
@@ -26,10 +74,18 @@
 	u32 *dest = &regs->data[priv->dreg];
 	int offset;
 
+	dest[priv->len / NFT_REG32_SIZE] = 0;
 	switch (priv->base) {
 	case NFT_PAYLOAD_LL_HEADER:
 		if (!skb_mac_header_was_set(skb))
 			goto err;
+
+		if (skb_vlan_tag_present(skb)) {
+			if (!nft_payload_copy_vlan(dest, skb,
+						   priv->offset, priv->len))
+				goto err;
+			return;
+		}
 		offset = skb_mac_header(skb) - skb->data;
 		break;
 	case NFT_PAYLOAD_NETWORK_HEADER:
@@ -43,7 +99,6 @@
 	}
 	offset += priv->offset;
 
-	dest[priv->len / NFT_REG32_SIZE] = 0;
 	if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
 		goto err;
 	return;
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index d324fe7..9b42b5e 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -67,9 +67,6 @@
 	[NFPROTO_IPV6]   = "ip6",
 };
 
-/* Allow this many total (re)entries. */
-static const unsigned int xt_jumpstack_multiplier = 2;
-
 /* Registration hooks for targets. */
 int xt_register_target(struct xt_target *target)
 {
@@ -688,8 +685,6 @@
 		kvfree(info->jumpstack);
 	}
 
-	free_percpu(info->stackptr);
-
 	kvfree(info);
 }
 EXPORT_SYMBOL(xt_free_table_info);
@@ -732,15 +727,14 @@
 DEFINE_PER_CPU(seqcount_t, xt_recseq);
 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
 
+struct static_key xt_tee_enabled __read_mostly;
+EXPORT_SYMBOL_GPL(xt_tee_enabled);
+
 static int xt_jumpstack_alloc(struct xt_table_info *i)
 {
 	unsigned int size;
 	int cpu;
 
-	i->stackptr = alloc_percpu(unsigned int);
-	if (i->stackptr == NULL)
-		return -ENOMEM;
-
 	size = sizeof(void **) * nr_cpu_ids;
 	if (size > PAGE_SIZE)
 		i->jumpstack = vzalloc(size);
@@ -749,8 +743,21 @@
 	if (i->jumpstack == NULL)
 		return -ENOMEM;
 
-	i->stacksize *= xt_jumpstack_multiplier;
-	size = sizeof(void *) * i->stacksize;
+	/* ruleset without jumps -- no stack needed */
+	if (i->stacksize == 0)
+		return 0;
+
+	/* Jumpstack needs to be able to record two full callchains, one
+	 * from the first rule set traversal, plus one table reentrancy
+	 * via -j TEE without clobbering the callchain that brought us to
+	 * TEE target.
+	 *
+	 * This is done by allocating two jumpstacks per cpu, on reentry
+	 * the upper half of the stack is used.
+	 *
+	 * see the jumpstack setup in ipt_do_table() for more details.
+	 */
+	size = sizeof(void *) * i->stacksize * 2u;
 	for_each_possible_cpu(cpu) {
 		if (size > PAGE_SIZE)
 			i->jumpstack[cpu] = vmalloc_node(size,
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 43ddeee..8e52489 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -181,9 +181,23 @@
 #endif
 }
 
+static u16 xt_ct_flags_to_dir(const struct xt_ct_target_info_v1 *info)
+{
+	switch (info->flags & (XT_CT_ZONE_DIR_ORIG |
+			       XT_CT_ZONE_DIR_REPL)) {
+	case XT_CT_ZONE_DIR_ORIG:
+		return NF_CT_ZONE_DIR_ORIG;
+	case XT_CT_ZONE_DIR_REPL:
+		return NF_CT_ZONE_DIR_REPL;
+	default:
+		return NF_CT_DEFAULT_ZONE_DIR;
+	}
+}
+
 static int xt_ct_tg_check(const struct xt_tgchk_param *par,
 			  struct xt_ct_target_info_v1 *info)
 {
+	struct nf_conntrack_zone zone;
 	struct nf_conn *ct;
 	int ret = -EOPNOTSUPP;
 
@@ -193,7 +207,9 @@
 	}
 
 #ifndef CONFIG_NF_CONNTRACK_ZONES
-	if (info->zone)
+	if (info->zone || info->flags & (XT_CT_ZONE_DIR_ORIG |
+					 XT_CT_ZONE_DIR_REPL |
+					 XT_CT_ZONE_MARK))
 		goto err1;
 #endif
 
@@ -201,7 +217,13 @@
 	if (ret < 0)
 		goto err1;
 
-	ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
+	memset(&zone, 0, sizeof(zone));
+	zone.id = info->zone;
+	zone.dir = xt_ct_flags_to_dir(info);
+	if (info->flags & XT_CT_ZONE_MARK)
+		zone.flags |= NF_CT_FLAG_MARK;
+
+	ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL);
 	if (!ct) {
 		ret = -ENOMEM;
 		goto err2;
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 8c3190e..8c02501 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -144,7 +144,7 @@
 
 			inet_proto_csum_replace2(&tcph->check, skb,
 						 htons(oldmss), htons(newmss),
-						 0);
+						 false);
 			return 0;
 		}
 	}
@@ -185,18 +185,18 @@
 	memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
 
 	inet_proto_csum_replace2(&tcph->check, skb,
-				 htons(len), htons(len + TCPOLEN_MSS), 1);
+				 htons(len), htons(len + TCPOLEN_MSS), true);
 	opt[0] = TCPOPT_MSS;
 	opt[1] = TCPOLEN_MSS;
 	opt[2] = (newmss & 0xff00) >> 8;
 	opt[3] = newmss & 0x00ff;
 
-	inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), 0);
+	inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false);
 
 	oldval = ((__be16 *)tcph)[6];
 	tcph->doff += TCPOLEN_MSS/4;
 	inet_proto_csum_replace2(&tcph->check, skb,
-				 oldval, ((__be16 *)tcph)[6], 0);
+				 oldval, ((__be16 *)tcph)[6], false);
 	return TCPOLEN_MSS;
 }
 
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 625fa1d..eb92bff 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -80,7 +80,7 @@
 				n <<= 8;
 			}
 			inet_proto_csum_replace2(&tcph->check, skb, htons(o),
-						 htons(n), 0);
+						 htons(n), false);
 		}
 		memset(opt + i, TCPOPT_NOP, optl);
 	}
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index a747eb4..fd980aa 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -10,26 +10,15 @@
  *	modify it under the terms of the GNU General Public License
  *	version 2 or later, as published by the Free Software Foundation.
  */
-#include <linux/ip.h>
 #include <linux/module.h>
-#include <linux/percpu.h>
-#include <linux/route.h>
 #include <linux/skbuff.h>
-#include <linux/notifier.h>
-#include <net/checksum.h>
-#include <net/icmp.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
-#include <net/ip6_route.h>
-#include <net/route.h>
+#include <linux/route.h>
 #include <linux/netfilter/x_tables.h>
+#include <net/route.h>
+#include <net/netfilter/ipv4/nf_dup_ipv4.h>
+#include <net/netfilter/ipv6/nf_dup_ipv6.h>
 #include <linux/netfilter/xt_TEE.h>
 
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-#	define WITH_CONNTRACK 1
-#	include <net/netfilter/nf_conntrack.h>
-#endif
-
 struct xt_tee_priv {
 	struct notifier_block	notifier;
 	struct xt_tee_tginfo	*tginfo;
@@ -37,163 +26,25 @@
 };
 
 static const union nf_inet_addr tee_zero_address;
-static DEFINE_PER_CPU(bool, tee_active);
-
-static struct net *pick_net(struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_NS
-	const struct dst_entry *dst;
-
-	if (skb->dev != NULL)
-		return dev_net(skb->dev);
-	dst = skb_dst(skb);
-	if (dst != NULL && dst->dev != NULL)
-		return dev_net(dst->dev);
-#endif
-	return &init_net;
-}
-
-static bool
-tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
-{
-	const struct iphdr *iph = ip_hdr(skb);
-	struct net *net = pick_net(skb);
-	struct rtable *rt;
-	struct flowi4 fl4;
-
-	memset(&fl4, 0, sizeof(fl4));
-	if (info->priv) {
-		if (info->priv->oif == -1)
-			return false;
-		fl4.flowi4_oif = info->priv->oif;
-	}
-	fl4.daddr = info->gw.ip;
-	fl4.flowi4_tos = RT_TOS(iph->tos);
-	fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
-	fl4.flowi4_flags = FLOWI_FLAG_KNOWN_NH;
-	rt = ip_route_output_key(net, &fl4);
-	if (IS_ERR(rt))
-		return false;
-
-	skb_dst_drop(skb);
-	skb_dst_set(skb, &rt->dst);
-	skb->dev      = rt->dst.dev;
-	skb->protocol = htons(ETH_P_IP);
-	return true;
-}
 
 static unsigned int
 tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_tee_tginfo *info = par->targinfo;
-	struct iphdr *iph;
 
-	if (__this_cpu_read(tee_active))
-		return XT_CONTINUE;
-	/*
-	 * Copy the skb, and route the copy. Will later return %XT_CONTINUE for
-	 * the original skb, which should continue on its way as if nothing has
-	 * happened. The copy should be independently delivered to the TEE
-	 * --gateway.
-	 */
-	skb = pskb_copy(skb, GFP_ATOMIC);
-	if (skb == NULL)
-		return XT_CONTINUE;
+	nf_dup_ipv4(skb, par->hooknum, &info->gw.in, info->priv->oif);
 
-#ifdef WITH_CONNTRACK
-	/* Avoid counting cloned packets towards the original connection. */
-	nf_conntrack_put(skb->nfct);
-	skb->nfct     = &nf_ct_untracked_get()->ct_general;
-	skb->nfctinfo = IP_CT_NEW;
-	nf_conntrack_get(skb->nfct);
-#endif
-	/*
-	 * If we are in PREROUTING/INPUT, the checksum must be recalculated
-	 * since the length could have changed as a result of defragmentation.
-	 *
-	 * We also decrease the TTL to mitigate potential TEE loops
-	 * between two hosts.
-	 *
-	 * Set %IP_DF so that the original source is notified of a potentially
-	 * decreased MTU on the clone route. IPv6 does this too.
-	 */
-	iph = ip_hdr(skb);
-	iph->frag_off |= htons(IP_DF);
-	if (par->hooknum == NF_INET_PRE_ROUTING ||
-	    par->hooknum == NF_INET_LOCAL_IN)
-		--iph->ttl;
-	ip_send_check(iph);
-
-	if (tee_tg_route4(skb, info)) {
-		__this_cpu_write(tee_active, true);
-		ip_local_out(skb);
-		__this_cpu_write(tee_active, false);
-	} else {
-		kfree_skb(skb);
-	}
 	return XT_CONTINUE;
 }
 
-#if IS_ENABLED(CONFIG_IPV6)
-static bool
-tee_tg_route6(struct sk_buff *skb, const struct xt_tee_tginfo *info)
-{
-	const struct ipv6hdr *iph = ipv6_hdr(skb);
-	struct net *net = pick_net(skb);
-	struct dst_entry *dst;
-	struct flowi6 fl6;
-
-	memset(&fl6, 0, sizeof(fl6));
-	if (info->priv) {
-		if (info->priv->oif == -1)
-			return false;
-		fl6.flowi6_oif = info->priv->oif;
-	}
-	fl6.daddr = info->gw.in6;
-	fl6.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
-			   (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
-	fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
-	dst = ip6_route_output(net, NULL, &fl6);
-	if (dst->error) {
-		dst_release(dst);
-		return false;
-	}
-	skb_dst_drop(skb);
-	skb_dst_set(skb, dst);
-	skb->dev      = dst->dev;
-	skb->protocol = htons(ETH_P_IPV6);
-	return true;
-}
-
+#if IS_ENABLED(CONFIG_NF_DUP_IPV6)
 static unsigned int
 tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
 	const struct xt_tee_tginfo *info = par->targinfo;
 
-	if (__this_cpu_read(tee_active))
-		return XT_CONTINUE;
-	skb = pskb_copy(skb, GFP_ATOMIC);
-	if (skb == NULL)
-		return XT_CONTINUE;
+	nf_dup_ipv6(skb, par->hooknum, &info->gw.in6, info->priv->oif);
 
-#ifdef WITH_CONNTRACK
-	nf_conntrack_put(skb->nfct);
-	skb->nfct     = &nf_ct_untracked_get()->ct_general;
-	skb->nfctinfo = IP_CT_NEW;
-	nf_conntrack_get(skb->nfct);
-#endif
-	if (par->hooknum == NF_INET_PRE_ROUTING ||
-	    par->hooknum == NF_INET_LOCAL_IN) {
-		struct ipv6hdr *iph = ipv6_hdr(skb);
-		--iph->hop_limit;
-	}
-	if (tee_tg_route6(skb, info)) {
-		__this_cpu_write(tee_active, true);
-		ip6_local_out(skb);
-		__this_cpu_write(tee_active, false);
-	} else {
-		kfree_skb(skb);
-	}
 	return XT_CONTINUE;
 }
 #endif
@@ -252,6 +103,7 @@
 	} else
 		info->priv = NULL;
 
+	static_key_slow_inc(&xt_tee_enabled);
 	return 0;
 }
 
@@ -263,6 +115,7 @@
 		unregister_netdevice_notifier(&info->priv->notifier);
 		kfree(info->priv);
 	}
+	static_key_slow_dec(&xt_tee_enabled);
 }
 
 static struct xt_target tee_tg_reg[] __read_mostly = {
@@ -276,7 +129,7 @@
 		.destroy    = tee_tg_destroy,
 		.me         = THIS_MODULE,
 	},
-#if IS_ENABLED(CONFIG_IPV6)
+#if IS_ENABLED(CONFIG_NF_DUP_IPV6)
 	{
 		.name       = "TEE",
 		.revision   = 1,
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index cca96ce..d0c96c5 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -272,8 +272,7 @@
 					    hp->source, lport ? lport : hp->dest,
 					    skb->dev, NFT_LOOKUP_LISTENER);
 		if (sk2) {
-			inet_twsk_deschedule(inet_twsk(sk));
-			inet_twsk_put(inet_twsk(sk));
+			inet_twsk_deschedule_put(inet_twsk(sk));
 			sk = sk2;
 		}
 	}
@@ -437,8 +436,7 @@
 					    tgi->lport ? tgi->lport : hp->dest,
 					    skb->dev, NFT_LOOKUP_LISTENER);
 		if (sk2) {
-			inet_twsk_deschedule(inet_twsk(sk));
-			inet_twsk_put(inet_twsk(sk));
+			inet_twsk_deschedule_put(inet_twsk(sk));
 			sk = sk2;
 		}
 	}
diff --git a/net/netfilter/xt_connlabel.c b/net/netfilter/xt_connlabel.c
index 9f8719d..bb9cbeb 100644
--- a/net/netfilter/xt_connlabel.c
+++ b/net/netfilter/xt_connlabel.c
@@ -42,10 +42,6 @@
 			    XT_CONNLABEL_OP_SET;
 	struct xt_connlabel_mtinfo *info = par->matchinfo;
 	int ret;
-	size_t words;
-
-	if (info->bit > XT_CONNLABEL_MAXBIT)
-		return -ERANGE;
 
 	if (info->options & ~options) {
 		pr_err("Unknown options in mask %x\n", info->options);
@@ -59,19 +55,15 @@
 		return ret;
 	}
 
-	par->net->ct.labels_used++;
-	words = BITS_TO_LONGS(info->bit+1);
-	if (words > par->net->ct.label_words)
-		par->net->ct.label_words = words;
-
+	ret = nf_connlabels_get(par->net, info->bit + 1);
+	if (ret < 0)
+		nf_ct_l3proto_module_put(par->family);
 	return ret;
 }
 
 static void connlabel_mt_destroy(const struct xt_mtdtor_param *par)
 {
-	par->net->ct.labels_used--;
-	if (par->net->ct.labels_used == 0)
-		par->net->ct.label_words = 0;
+	nf_connlabels_put(par->net);
 	nf_ct_l3proto_module_put(par->family);
 }
 
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 29ba621..075d89d 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -134,7 +134,7 @@
 static unsigned int check_hlist(struct net *net,
 				struct hlist_head *head,
 				const struct nf_conntrack_tuple *tuple,
-				u16 zone,
+				const struct nf_conntrack_zone *zone,
 				bool *addit)
 {
 	const struct nf_conntrack_tuple_hash *found;
@@ -201,7 +201,7 @@
 count_tree(struct net *net, struct rb_root *root,
 	   const struct nf_conntrack_tuple *tuple,
 	   const union nf_inet_addr *addr, const union nf_inet_addr *mask,
-	   u8 family, u16 zone)
+	   u8 family, const struct nf_conntrack_zone *zone)
 {
 	struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES];
 	struct rb_node **rbnode, *parent;
@@ -290,7 +290,8 @@
 		      const struct nf_conntrack_tuple *tuple,
 		      const union nf_inet_addr *addr,
 		      const union nf_inet_addr *mask,
-		      u_int8_t family, u16 zone)
+		      u_int8_t family,
+		      const struct nf_conntrack_zone *zone)
 {
 	struct rb_root *root;
 	int count;
@@ -321,10 +322,10 @@
 	union nf_inet_addr addr;
 	struct nf_conntrack_tuple tuple;
 	const struct nf_conntrack_tuple *tuple_ptr = &tuple;
+	const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
 	enum ip_conntrack_info ctinfo;
 	const struct nf_conn *ct;
 	unsigned int connections;
-	u16 zone = NF_CT_DEFAULT_ZONE;
 
 	ct = nf_ct_get(skb, &ctinfo);
 	if (ct != NULL) {
diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
index 8c646ed..3048a7e 100644
--- a/net/netfilter/xt_nfacct.c
+++ b/net/netfilter/xt_nfacct.c
@@ -37,7 +37,7 @@
 	struct xt_nfacct_match_info *info = par->matchinfo;
 	struct nf_acct *nfacct;
 
-	nfacct = nfnl_acct_find_get(info->name);
+	nfacct = nfnl_acct_find_get(par->net, info->name);
 	if (nfacct == NULL) {
 		pr_info("xt_nfacct: accounting object with name `%s' "
 			"does not exists\n", info->name);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 67d2104..50889be 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -84,6 +84,7 @@
 #define NETLINK_F_BROADCAST_SEND_ERROR	0x4
 #define NETLINK_F_RECV_NO_ENOBUFS	0x8
 #define NETLINK_F_LISTEN_ALL_NSID	0x10
+#define NETLINK_F_CAP_ACK		0x20
 
 static inline int netlink_is_kernel(struct sock *sk)
 {
@@ -593,16 +594,6 @@
 	return netlink_lookup_frame(ring, ring->head, status);
 }
 
-static struct nl_mmap_hdr *
-netlink_previous_frame(const struct netlink_ring *ring,
-		       enum nl_mmap_status status)
-{
-	unsigned int prev;
-
-	prev = ring->head ? ring->head - 1 : ring->frame_max;
-	return netlink_lookup_frame(ring, prev, status);
-}
-
 static void netlink_increment_head(struct netlink_ring *ring)
 {
 	ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
@@ -610,11 +601,11 @@
 
 static void netlink_forward_ring(struct netlink_ring *ring)
 {
-	unsigned int head = ring->head, pos = head;
+	unsigned int head = ring->head;
 	const struct nl_mmap_hdr *hdr;
 
 	do {
-		hdr = __netlink_lookup_frame(ring, pos);
+		hdr = __netlink_lookup_frame(ring, ring->head);
 		if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
 			break;
 		if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
@@ -623,6 +614,21 @@
 	} while (ring->head != head);
 }
 
+static bool netlink_has_valid_frame(struct netlink_ring *ring)
+{
+	unsigned int head = ring->head, pos = head;
+	const struct nl_mmap_hdr *hdr;
+
+	do {
+		hdr = __netlink_lookup_frame(ring, pos);
+		if (hdr->nm_status == NL_MMAP_STATUS_VALID)
+			return true;
+		pos = pos != 0 ? pos - 1 : ring->frame_max;
+	} while (pos != head);
+
+	return false;
+}
+
 static bool netlink_dump_space(struct netlink_sock *nlk)
 {
 	struct netlink_ring *ring = &nlk->rx_ring;
@@ -670,8 +676,7 @@
 
 	spin_lock_bh(&sk->sk_receive_queue.lock);
 	if (nlk->rx_ring.pg_vec) {
-		netlink_forward_ring(&nlk->rx_ring);
-		if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
+		if (netlink_has_valid_frame(&nlk->rx_ring))
 			mask |= POLLIN | POLLRDNORM;
 	}
 	spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -2258,6 +2263,13 @@
 			nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
 		err = 0;
 		break;
+	case NETLINK_CAP_ACK:
+		if (val)
+			nlk->flags |= NETLINK_F_CAP_ACK;
+		else
+			nlk->flags &= ~NETLINK_F_CAP_ACK;
+		err = 0;
+		break;
 	default:
 		err = -ENOPROTOOPT;
 	}
@@ -2332,6 +2344,16 @@
 		netlink_table_ungrab();
 		break;
 	}
+	case NETLINK_CAP_ACK:
+		if (len < sizeof(int))
+			return -EINVAL;
+		len = sizeof(int);
+		val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
+		if (put_user(len, optlen) ||
+		    put_user(val, optval))
+			return -EFAULT;
+		err = 0;
+		break;
 	default:
 		err = -ENOPROTOOPT;
 	}
@@ -2401,7 +2423,7 @@
 	 * sendmsg(), but that's what we've got...
 	 */
 	if (netlink_tx_is_mmaped(sk) &&
-	    msg->msg_iter.type == ITER_IOVEC &&
+	    iter_is_iovec(&msg->msg_iter) &&
 	    msg->msg_iter.nr_segs == 1 &&
 	    msg->msg_iter.iov->iov_base == NULL) {
 		err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
@@ -2873,9 +2895,12 @@
 	struct nlmsghdr *rep;
 	struct nlmsgerr *errmsg;
 	size_t payload = sizeof(*errmsg);
+	struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
 
-	/* error messages get the original request appened */
-	if (err)
+	/* Error messages get the original request appened, unless the user
+	 * requests to cap the error message.
+	 */
+	if (!(nlk->flags & NETLINK_F_CAP_ACK) && err)
 		payload += nlmsg_len(nlh);
 
 	skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
@@ -2898,7 +2923,7 @@
 			  NLMSG_ERROR, payload, 0);
 	errmsg = nlmsg_data(rep);
 	errmsg->error = err;
-	memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
+	memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
 	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
 }
 EXPORT_SYMBOL(netlink_ack);
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 95af2d2..943889b 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -351,6 +351,20 @@
 }
 EXPORT_SYMBOL(nci_prop_cmd);
 
+int nci_core_reset(struct nci_dev *ndev)
+{
+	return __nci_request(ndev, nci_reset_req, 0,
+			     msecs_to_jiffies(NCI_RESET_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_core_reset);
+
+int nci_core_init(struct nci_dev *ndev)
+{
+	return __nci_request(ndev, nci_init_req, 0,
+			     msecs_to_jiffies(NCI_INIT_TIMEOUT));
+}
+EXPORT_SYMBOL(nci_core_init);
+
 static int nci_open_device(struct nci_dev *ndev)
 {
 	int rc = 0;
@@ -388,6 +402,10 @@
 				   msecs_to_jiffies(NCI_INIT_TIMEOUT));
 	}
 
+	if (ndev->ops->post_setup) {
+		rc = ndev->ops->post_setup(ndev);
+	}
+
 	if (!rc) {
 		rc = __nci_request(ndev, nci_init_complete_req, 0,
 				   msecs_to_jiffies(NCI_INIT_TIMEOUT));
diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
index af002df..609f922 100644
--- a/net/nfc/nci/hci.c
+++ b/net/nfc/nci/hci.c
@@ -233,7 +233,7 @@
 	r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
 			msecs_to_jiffies(NCI_DATA_TIMEOUT));
 
-	if (r == NCI_STATUS_OK)
+	if (r == NCI_STATUS_OK && skb)
 		*skb = conn_info->rx_skb;
 
 	return r;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index f85f37e..853172c 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -63,6 +63,8 @@
 	[NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
 				     .len = NFC_FIRMWARE_NAME_MAXSIZE },
 	[NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
+	[NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
+
 };
 
 static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
@@ -1503,7 +1505,7 @@
 	u32 dev_idx, vid, subcmd;
 	u8 *data;
 	size_t data_len;
-	int i;
+	int i, err;
 
 	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
 	    !info->attrs[NFC_ATTR_VENDOR_ID] ||
@@ -1518,12 +1520,13 @@
 	if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds)
 		return -ENODEV;
 
-	data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
-	if (data) {
+	if (info->attrs[NFC_ATTR_VENDOR_DATA]) {
+		data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
 		data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]);
 		if (data_len == 0)
 			return -EINVAL;
 	} else {
+		data = NULL;
 		data_len = 0;
 	}
 
@@ -1533,12 +1536,92 @@
 		if (cmd->vendor_id != vid || cmd->subcmd != subcmd)
 			continue;
 
-		return cmd->doit(dev, data, data_len);
+		dev->cur_cmd_info = info;
+		err = cmd->doit(dev, data, data_len);
+		dev->cur_cmd_info = NULL;
+		return err;
 	}
 
 	return -EOPNOTSUPP;
 }
 
+/* message building helper */
+static inline void *nfc_hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
+				int flags, u8 cmd)
+{
+	/* since there is no private header just add the generic one */
+	return genlmsg_put(skb, portid, seq, &nfc_genl_family, flags, cmd);
+}
+
+static struct sk_buff *
+__nfc_alloc_vendor_cmd_skb(struct nfc_dev *dev, int approxlen,
+			   u32 portid, u32 seq,
+			   enum nfc_attrs attr,
+			   u32 oui, u32 subcmd, gfp_t gfp)
+{
+	struct sk_buff *skb;
+	void *hdr;
+
+	skb = nlmsg_new(approxlen + 100, gfp);
+	if (!skb)
+		return NULL;
+
+	hdr = nfc_hdr_put(skb, portid, seq, 0, NFC_CMD_VENDOR);
+	if (!hdr) {
+		kfree_skb(skb);
+		return NULL;
+	}
+
+	if (nla_put_u32(skb, NFC_ATTR_DEVICE_INDEX, dev->idx))
+		goto nla_put_failure;
+	if (nla_put_u32(skb, NFC_ATTR_VENDOR_ID, oui))
+		goto nla_put_failure;
+	if (nla_put_u32(skb, NFC_ATTR_VENDOR_SUBCMD, subcmd))
+		goto nla_put_failure;
+
+	((void **)skb->cb)[0] = dev;
+	((void **)skb->cb)[1] = hdr;
+
+	return skb;
+
+nla_put_failure:
+	kfree_skb(skb);
+	return NULL;
+}
+
+struct sk_buff *__nfc_alloc_vendor_cmd_reply_skb(struct nfc_dev *dev,
+						 enum nfc_attrs attr,
+						 u32 oui, u32 subcmd,
+						 int approxlen)
+{
+	if (WARN_ON(!dev->cur_cmd_info))
+		return NULL;
+
+	return __nfc_alloc_vendor_cmd_skb(dev, approxlen,
+					  dev->cur_cmd_info->snd_portid,
+					  dev->cur_cmd_info->snd_seq, attr,
+					  oui, subcmd, GFP_KERNEL);
+}
+EXPORT_SYMBOL(__nfc_alloc_vendor_cmd_reply_skb);
+
+int nfc_vendor_cmd_reply(struct sk_buff *skb)
+{
+	struct nfc_dev *dev = ((void **)skb->cb)[0];
+	void *hdr = ((void **)skb->cb)[1];
+
+	/* clear CB data for netlink core to own from now on */
+	memset(skb->cb, 0, sizeof(skb->cb));
+
+	if (WARN_ON(!dev->cur_cmd_info)) {
+		kfree_skb(skb);
+		return -EINVAL;
+	}
+
+	genlmsg_end(skb, hdr);
+	return genlmsg_reply(skb, dev->cur_cmd_info);
+}
+EXPORT_SYMBOL(nfc_vendor_cmd_reply);
+
 static const struct genl_ops nfc_genl_ops[] = {
 	{
 		.cmd = NFC_CMD_GET_DEVICE,
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 1584040..af7cdef 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -31,10 +31,21 @@
 
 	  If unsure, say N.
 
+config OPENVSWITCH_CONNTRACK
+	bool "Open vSwitch conntrack action support"
+	depends on OPENVSWITCH
+	depends on NF_CONNTRACK
+	default OPENVSWITCH
+	---help---
+	  If you say Y here, then Open vSwitch module will be able to pass
+	  packets through conntrack.
+
+	  Say N to exclude this support and reduce the binary size.
+
 config OPENVSWITCH_GRE
 	tristate "Open vSwitch GRE tunneling support"
 	depends on OPENVSWITCH
-	depends on NET_IPGRE_DEMUX
+	depends on NET_IPGRE
 	default OPENVSWITCH
 	---help---
 	  If you say Y here, then the Open vSwitch will be able create GRE
@@ -59,7 +70,7 @@
 config OPENVSWITCH_GENEVE
 	tristate "Open vSwitch Geneve tunneling support"
 	depends on OPENVSWITCH
-	depends on GENEVE_CORE
+	depends on GENEVE
 	default OPENVSWITCH
 	---help---
 	  If you say Y here, then the Open vSwitch will be able create geneve vport.
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index 91b9478..5b5913b 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -15,6 +15,8 @@
 	vport-internal_dev.o \
 	vport-netdev.o
 
+openvswitch-$(CONFIG_OPENVSWITCH_CONNTRACK) += conntrack.o
+
+obj-$(CONFIG_OPENVSWITCH_VXLAN)+= vport-vxlan.o
 obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
-obj-$(CONFIG_OPENVSWITCH_VXLAN)	+= vport-vxlan.o
 obj-$(CONFIG_OPENVSWITCH_GRE)	+= vport-gre.o
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index ee34f47..315f533 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -22,6 +22,7 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/openvswitch.h>
+#include <linux/netfilter_ipv6.h>
 #include <linux/sctp.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
@@ -29,8 +30,10 @@
 #include <linux/if_arp.h>
 #include <linux/if_vlan.h>
 
+#include <net/dst.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
+#include <net/ip6_fib.h>
 #include <net/checksum.h>
 #include <net/dsfield.h>
 #include <net/mpls.h>
@@ -38,6 +41,7 @@
 
 #include "datapath.h"
 #include "flow.h"
+#include "conntrack.h"
 #include "vport.h"
 
 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
@@ -52,6 +56,20 @@
 	struct sw_flow_key pkt_key;
 };
 
+#define MAX_L2_LEN	(VLAN_ETH_HLEN + 3 * MPLS_HLEN)
+struct ovs_frag_data {
+	unsigned long dst;
+	struct vport *vport;
+	struct ovs_skb_cb cb;
+	__be16 inner_protocol;
+	__u16 vlan_tci;
+	__be16 vlan_proto;
+	unsigned int l2_len;
+	u8 l2_data[MAX_L2_LEN];
+};
+
+static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
+
 #define DEFERRED_ACTION_FIFO_SIZE 10
 struct action_fifo {
 	int head;
@@ -185,10 +203,6 @@
 	return 0;
 }
 
-/* 'KEY' must not have any bits set outside of the 'MASK' */
-#define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
-#define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK))
-
 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
 		    const __be32 *mpls_lse, const __be32 *mask)
 {
@@ -201,7 +215,7 @@
 		return err;
 
 	stack = (__be32 *)skb_mpls_header(skb);
-	lse = MASKED(*stack, *mpls_lse, *mask);
+	lse = OVS_MASKED(*stack, *mpls_lse, *mask);
 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
 		__be32 diff[] = { ~(*stack), lse };
 
@@ -244,9 +258,9 @@
 	const u16 *src = (const u16 *)src_;
 	const u16 *mask = (const u16 *)mask_;
 
-	SET_MASKED(dst[0], src[0], mask[0]);
-	SET_MASKED(dst[1], src[1], mask[1]);
-	SET_MASKED(dst[2], src[2], mask[2]);
+	OVS_SET_MASKED(dst[0], src[0], mask[0]);
+	OVS_SET_MASKED(dst[1], src[1], mask[1]);
+	OVS_SET_MASKED(dst[2], src[2], mask[2]);
 }
 
 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
@@ -284,14 +298,14 @@
 	if (nh->protocol == IPPROTO_TCP) {
 		if (likely(transport_len >= sizeof(struct tcphdr)))
 			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
-						 addr, new_addr, 1);
+						 addr, new_addr, true);
 	} else if (nh->protocol == IPPROTO_UDP) {
 		if (likely(transport_len >= sizeof(struct udphdr))) {
 			struct udphdr *uh = udp_hdr(skb);
 
 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 				inet_proto_csum_replace4(&uh->check, skb,
-							 addr, new_addr, 1);
+							 addr, new_addr, true);
 				if (!uh->check)
 					uh->check = CSUM_MANGLED_0;
 			}
@@ -316,14 +330,14 @@
 	if (l4_proto == NEXTHDR_TCP) {
 		if (likely(transport_len >= sizeof(struct tcphdr)))
 			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
-						  addr, new_addr, 1);
+						  addr, new_addr, true);
 	} else if (l4_proto == NEXTHDR_UDP) {
 		if (likely(transport_len >= sizeof(struct udphdr))) {
 			struct udphdr *uh = udp_hdr(skb);
 
 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 				inet_proto_csum_replace16(&uh->check, skb,
-							  addr, new_addr, 1);
+							  addr, new_addr, true);
 				if (!uh->check)
 					uh->check = CSUM_MANGLED_0;
 			}
@@ -331,17 +345,17 @@
 	} else if (l4_proto == NEXTHDR_ICMP) {
 		if (likely(transport_len >= sizeof(struct icmp6hdr)))
 			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
-						  skb, addr, new_addr, 1);
+						  skb, addr, new_addr, true);
 	}
 }
 
 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
 			   const __be32 mask[4], __be32 masked[4])
 {
-	masked[0] = MASKED(old[0], addr[0], mask[0]);
-	masked[1] = MASKED(old[1], addr[1], mask[1]);
-	masked[2] = MASKED(old[2], addr[2], mask[2]);
-	masked[3] = MASKED(old[3], addr[3], mask[3]);
+	masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
+	masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
+	masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
+	masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
 }
 
 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
@@ -358,15 +372,15 @@
 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
 {
 	/* Bits 21-24 are always unmasked, so this retains their values. */
-	SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
-	SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
-	SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
+	OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
+	OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
+	OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
 }
 
 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
 		       u8 mask)
 {
-	new_ttl = MASKED(nh->ttl, new_ttl, mask);
+	new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
 
 	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
 	nh->ttl = new_ttl;
@@ -392,7 +406,7 @@
 	 * makes sense to check if the value actually changed.
 	 */
 	if (mask->ipv4_src) {
-		new_addr = MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
+		new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
 
 		if (unlikely(new_addr != nh->saddr)) {
 			set_ip_addr(skb, nh, &nh->saddr, new_addr);
@@ -400,7 +414,7 @@
 		}
 	}
 	if (mask->ipv4_dst) {
-		new_addr = MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
+		new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
 
 		if (unlikely(new_addr != nh->daddr)) {
 			set_ip_addr(skb, nh, &nh->daddr, new_addr);
@@ -488,7 +502,8 @@
 		    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
 	}
 	if (mask->ipv6_hlimit) {
-		SET_MASKED(nh->hop_limit, key->ipv6_hlimit, mask->ipv6_hlimit);
+		OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
+			       mask->ipv6_hlimit);
 		flow_key->ip.ttl = nh->hop_limit;
 	}
 	return 0;
@@ -498,7 +513,7 @@
 static void set_tp_port(struct sk_buff *skb, __be16 *port,
 			__be16 new_port, __sum16 *check)
 {
-	inet_proto_csum_replace2(check, skb, *port, new_port, 0);
+	inet_proto_csum_replace2(check, skb, *port, new_port, false);
 	*port = new_port;
 }
 
@@ -517,8 +532,8 @@
 
 	uh = udp_hdr(skb);
 	/* Either of the masks is non-zero, so do not bother checking them. */
-	src = MASKED(uh->source, key->udp_src, mask->udp_src);
-	dst = MASKED(uh->dest, key->udp_dst, mask->udp_dst);
+	src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
+	dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
 
 	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
 		if (likely(src != uh->source)) {
@@ -558,12 +573,12 @@
 		return err;
 
 	th = tcp_hdr(skb);
-	src = MASKED(th->source, key->tcp_src, mask->tcp_src);
+	src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
 	if (likely(src != th->source)) {
 		set_tp_port(skb, &th->source, src, &th->check);
 		flow_key->tp.src = src;
 	}
-	dst = MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
+	dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
 	if (likely(dst != th->dest)) {
 		set_tp_port(skb, &th->dest, dst, &th->check);
 		flow_key->tp.dst = dst;
@@ -590,8 +605,8 @@
 	old_csum = sh->checksum;
 	old_correct_csum = sctp_compute_cksum(skb, sctphoff);
 
-	sh->source = MASKED(sh->source, key->sctp_src, mask->sctp_src);
-	sh->dest = MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
+	sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
+	sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
 
 	new_csum = sctp_compute_cksum(skb, sctphoff);
 
@@ -605,27 +620,159 @@
 	return 0;
 }
 
-static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
+static int ovs_vport_output(struct sock *sock, struct sk_buff *skb)
+{
+	struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
+	struct vport *vport = data->vport;
+
+	if (skb_cow_head(skb, data->l2_len) < 0) {
+		kfree_skb(skb);
+		return -ENOMEM;
+	}
+
+	__skb_dst_copy(skb, data->dst);
+	*OVS_CB(skb) = data->cb;
+	skb->inner_protocol = data->inner_protocol;
+	skb->vlan_tci = data->vlan_tci;
+	skb->vlan_proto = data->vlan_proto;
+
+	/* Reconstruct the MAC header.  */
+	skb_push(skb, data->l2_len);
+	memcpy(skb->data, &data->l2_data, data->l2_len);
+	ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
+	skb_reset_mac_header(skb);
+
+	ovs_vport_send(vport, skb);
+	return 0;
+}
+
+static unsigned int
+ovs_dst_get_mtu(const struct dst_entry *dst)
+{
+	return dst->dev->mtu;
+}
+
+static struct dst_ops ovs_dst_ops = {
+	.family = AF_UNSPEC,
+	.mtu = ovs_dst_get_mtu,
+};
+
+/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
+ * ovs_vport_output(), which is called once per fragmented packet.
+ */
+static void prepare_frag(struct vport *vport, struct sk_buff *skb)
+{
+	unsigned int hlen = skb_network_offset(skb);
+	struct ovs_frag_data *data;
+
+	data = this_cpu_ptr(&ovs_frag_data_storage);
+	data->dst = skb->_skb_refdst;
+	data->vport = vport;
+	data->cb = *OVS_CB(skb);
+	data->inner_protocol = skb->inner_protocol;
+	data->vlan_tci = skb->vlan_tci;
+	data->vlan_proto = skb->vlan_proto;
+	data->l2_len = hlen;
+	memcpy(&data->l2_data, skb->data, hlen);
+
+	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+	skb_pull(skb, hlen);
+}
+
+static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
+			 __be16 ethertype)
+{
+	if (skb_network_offset(skb) > MAX_L2_LEN) {
+		OVS_NLERR(1, "L2 header too long to fragment");
+		return;
+	}
+
+	if (ethertype == htons(ETH_P_IP)) {
+		struct dst_entry ovs_dst;
+		unsigned long orig_dst;
+
+		prepare_frag(vport, skb);
+		dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
+			 DST_OBSOLETE_NONE, DST_NOCOUNT);
+		ovs_dst.dev = vport->dev;
+
+		orig_dst = skb->_skb_refdst;
+		skb_dst_set_noref(skb, &ovs_dst);
+		IPCB(skb)->frag_max_size = mru;
+
+		ip_do_fragment(skb->sk, skb, ovs_vport_output);
+		refdst_drop(orig_dst);
+	} else if (ethertype == htons(ETH_P_IPV6)) {
+		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
+		unsigned long orig_dst;
+		struct rt6_info ovs_rt;
+
+		if (!v6ops) {
+			kfree_skb(skb);
+			return;
+		}
+
+		prepare_frag(vport, skb);
+		memset(&ovs_rt, 0, sizeof(ovs_rt));
+		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
+			 DST_OBSOLETE_NONE, DST_NOCOUNT);
+		ovs_rt.dst.dev = vport->dev;
+
+		orig_dst = skb->_skb_refdst;
+		skb_dst_set_noref(skb, &ovs_rt.dst);
+		IP6CB(skb)->frag_max_size = mru;
+
+		v6ops->fragment(skb->sk, skb, ovs_vport_output);
+		refdst_drop(orig_dst);
+	} else {
+		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
+			  ovs_vport_name(vport), ntohs(ethertype), mru,
+			  vport->dev->mtu);
+		kfree_skb(skb);
+	}
+}
+
+static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
+		      struct sw_flow_key *key)
 {
 	struct vport *vport = ovs_vport_rcu(dp, out_port);
 
-	if (likely(vport))
-		ovs_vport_send(vport, skb);
-	else
+	if (likely(vport)) {
+		u16 mru = OVS_CB(skb)->mru;
+
+		if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
+			ovs_vport_send(vport, skb);
+		} else if (mru <= vport->dev->mtu) {
+			__be16 ethertype = key->eth.type;
+
+			if (!is_flow_key_valid(key)) {
+				if (eth_p_mpls(skb->protocol))
+					ethertype = skb->inner_protocol;
+				else
+					ethertype = vlan_get_protocol(skb);
+			}
+
+			ovs_fragment(vport, skb, mru, ethertype);
+		} else {
+			kfree_skb(skb);
+		}
+	} else {
 		kfree_skb(skb);
+	}
 }
 
 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
 			    struct sw_flow_key *key, const struct nlattr *attr,
 			    const struct nlattr *actions, int actions_len)
 {
-	struct ovs_tunnel_info info;
+	struct ip_tunnel_info info;
 	struct dp_upcall_info upcall;
 	const struct nlattr *a;
 	int rem;
 
 	memset(&upcall, 0, sizeof(upcall));
 	upcall.cmd = OVS_PACKET_CMD_ACTION;
+	upcall.mru = OVS_CB(skb)->mru;
 
 	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
 		 a = nla_next(a, &rem)) {
@@ -646,11 +793,13 @@
 			if (vport) {
 				int err;
 
+				upcall.egress_tun_info = &info;
 				err = ovs_vport_get_egress_tun_info(vport, skb,
-								    &info);
-				if (!err)
-					upcall.egress_tun_info = &info;
+								    &upcall);
+				if (err)
+					upcall.egress_tun_info = NULL;
 			}
+
 			break;
 		}
 
@@ -677,9 +826,12 @@
 
 	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
 		 a = nla_next(a, &rem)) {
+		u32 probability;
+
 		switch (nla_type(a)) {
 		case OVS_SAMPLE_ATTR_PROBABILITY:
-			if (prandom_u32() >= nla_get_u32(a))
+			probability = nla_get_u32(a);
+			if (!probability || prandom_u32() > probability)
 				return 0;
 			break;
 
@@ -741,7 +893,11 @@
 {
 	/* Only tunnel set execution is supported without a mask. */
 	if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
-		OVS_CB(skb)->egress_tun_info = nla_data(a);
+		struct ovs_tunnel_info *tun = nla_data(a);
+
+		skb_dst_drop(skb);
+		dst_hold((struct dst_entry *)tun->tun_dst);
+		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
 		return 0;
 	}
 
@@ -759,12 +915,13 @@
 
 	switch (nla_type(a)) {
 	case OVS_KEY_ATTR_PRIORITY:
-		SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *));
+		OVS_SET_MASKED(skb->priority, nla_get_u32(a),
+			       *get_mask(a, u32 *));
 		flow_key->phy.priority = skb->priority;
 		break;
 
 	case OVS_KEY_ATTR_SKB_MARK:
-		SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
+		OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
 		flow_key->phy.skb_mark = skb->mark;
 		break;
 
@@ -807,6 +964,13 @@
 		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
 								    __be32 *));
 		break;
+
+	case OVS_KEY_ATTR_CT_STATE:
+	case OVS_KEY_ATTR_CT_ZONE:
+	case OVS_KEY_ATTR_CT_MARK:
+	case OVS_KEY_ATTR_CT_LABEL:
+		err = -EINVAL;
+		break;
 	}
 
 	return err;
@@ -876,7 +1040,7 @@
 			struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
 
 			if (out_skb)
-				do_output(dp, out_skb, prev_port);
+				do_output(dp, out_skb, prev_port, key);
 
 			prev_port = -1;
 		}
@@ -933,6 +1097,15 @@
 		case OVS_ACTION_ATTR_SAMPLE:
 			err = sample(dp, skb, key, a, attr, len);
 			break;
+
+		case OVS_ACTION_ATTR_CT:
+			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
+					     nla_data(a));
+
+			/* Hide stolen IP fragments from user space. */
+			if (err == -EINPROGRESS)
+				return 0;
+			break;
 		}
 
 		if (unlikely(err)) {
@@ -942,7 +1115,7 @@
 	}
 
 	if (prev_port != -1)
-		do_output(dp, skb, prev_port);
+		do_output(dp, skb, prev_port, key);
 	else
 		consume_skb(skb);
 
@@ -984,7 +1157,6 @@
 	int err;
 
 	this_cpu_inc(exec_actions_level);
-	OVS_CB(skb)->egress_tun_info = NULL;
 	err = do_execute_actions(dp, skb, key,
 				 acts->actions, acts->actions_len);
 
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
new file mode 100644
index 0000000..e8e524a
--- /dev/null
+++ b/net/openvswitch/conntrack.c
@@ -0,0 +1,755 @@
+/*
+ * Copyright (c) 2015 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/openvswitch.h>
+#include <net/ip.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+
+#include "datapath.h"
+#include "conntrack.h"
+#include "flow.h"
+#include "flow_netlink.h"
+
+struct ovs_ct_len_tbl {
+	size_t maxlen;
+	size_t minlen;
+};
+
+/* Metadata mark for masked write to conntrack mark */
+struct md_mark {
+	u32 value;
+	u32 mask;
+};
+
+/* Metadata label for masked write to conntrack label. */
+struct md_label {
+	struct ovs_key_ct_label value;
+	struct ovs_key_ct_label mask;
+};
+
+/* Conntrack action context for execution. */
+struct ovs_conntrack_info {
+	struct nf_conntrack_helper *helper;
+	struct nf_conntrack_zone zone;
+	struct nf_conn *ct;
+	u32 flags;
+	u16 family;
+	struct md_mark mark;
+	struct md_label label;
+};
+
+static u16 key_to_nfproto(const struct sw_flow_key *key)
+{
+	switch (ntohs(key->eth.type)) {
+	case ETH_P_IP:
+		return NFPROTO_IPV4;
+	case ETH_P_IPV6:
+		return NFPROTO_IPV6;
+	default:
+		return NFPROTO_UNSPEC;
+	}
+}
+
+/* Map SKB connection state into the values used by flow definition. */
+static u8 ovs_ct_get_state(enum ip_conntrack_info ctinfo)
+{
+	u8 ct_state = OVS_CS_F_TRACKED;
+
+	switch (ctinfo) {
+	case IP_CT_ESTABLISHED_REPLY:
+	case IP_CT_RELATED_REPLY:
+	case IP_CT_NEW_REPLY:
+		ct_state |= OVS_CS_F_REPLY_DIR;
+		break;
+	default:
+		break;
+	}
+
+	switch (ctinfo) {
+	case IP_CT_ESTABLISHED:
+	case IP_CT_ESTABLISHED_REPLY:
+		ct_state |= OVS_CS_F_ESTABLISHED;
+		break;
+	case IP_CT_RELATED:
+	case IP_CT_RELATED_REPLY:
+		ct_state |= OVS_CS_F_RELATED;
+		break;
+	case IP_CT_NEW:
+	case IP_CT_NEW_REPLY:
+		ct_state |= OVS_CS_F_NEW;
+		break;
+	default:
+		break;
+	}
+
+	return ct_state;
+}
+
+static u32 ovs_ct_get_mark(const struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
+	return ct ? ct->mark : 0;
+#else
+	return 0;
+#endif
+}
+
+static void ovs_ct_get_label(const struct nf_conn *ct,
+			     struct ovs_key_ct_label *label)
+{
+	struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
+
+	if (cl) {
+		size_t len = cl->words * sizeof(long);
+
+		if (len > OVS_CT_LABEL_LEN)
+			len = OVS_CT_LABEL_LEN;
+		else if (len < OVS_CT_LABEL_LEN)
+			memset(label, 0, OVS_CT_LABEL_LEN);
+		memcpy(label, cl->bits, len);
+	} else {
+		memset(label, 0, OVS_CT_LABEL_LEN);
+	}
+}
+
+static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
+				const struct nf_conntrack_zone *zone,
+				const struct nf_conn *ct)
+{
+	key->ct.state = state;
+	key->ct.zone = zone->id;
+	key->ct.mark = ovs_ct_get_mark(ct);
+	ovs_ct_get_label(ct, &key->ct.label);
+}
+
+/* Update 'key' based on skb->nfct. If 'post_ct' is true, then OVS has
+ * previously sent the packet to conntrack via the ct action.
+ */
+static void ovs_ct_update_key(const struct sk_buff *skb,
+			      struct sw_flow_key *key, bool post_ct)
+{
+	const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct;
+	u8 state = 0;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (ct) {
+		state = ovs_ct_get_state(ctinfo);
+		if (ct->master)
+			state |= OVS_CS_F_RELATED;
+		zone = nf_ct_zone(ct);
+	} else if (post_ct) {
+		state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID;
+	}
+	__ovs_ct_update_key(key, state, zone, ct);
+}
+
+void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
+{
+	ovs_ct_update_key(skb, key, false);
+}
+
+int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
+{
+	if (nla_put_u8(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state))
+		return -EMSGSIZE;
+
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
+	    nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, key->ct.zone))
+		return -EMSGSIZE;
+
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
+	    nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, key->ct.mark))
+		return -EMSGSIZE;
+
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+	    nla_put(skb, OVS_KEY_ATTR_CT_LABEL, sizeof(key->ct.label),
+		    &key->ct.label))
+		return -EMSGSIZE;
+
+	return 0;
+}
+
+static int ovs_ct_set_mark(struct sk_buff *skb, struct sw_flow_key *key,
+			   u32 ct_mark, u32 mask)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct;
+	u32 new_mark;
+
+
+	/* The connection could be invalid, in which case set_mark is no-op. */
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct)
+		return 0;
+
+	new_mark = ct_mark | (ct->mark & ~(mask));
+	if (ct->mark != new_mark) {
+		ct->mark = new_mark;
+		nf_conntrack_event_cache(IPCT_MARK, ct);
+		key->ct.mark = new_mark;
+	}
+
+	return 0;
+#else
+	return -ENOTSUPP;
+#endif
+}
+
+static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key,
+			    const struct ovs_key_ct_label *label,
+			    const struct ovs_key_ct_label *mask)
+{
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn_labels *cl;
+	struct nf_conn *ct;
+	int err;
+
+	if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS))
+		return -ENOTSUPP;
+
+	/* The connection could be invalid, in which case set_label is no-op.*/
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct)
+		return 0;
+
+	cl = nf_ct_labels_find(ct);
+	if (!cl) {
+		nf_ct_labels_ext_add(ct);
+		cl = nf_ct_labels_find(ct);
+	}
+	if (!cl || cl->words * sizeof(long) < OVS_CT_LABEL_LEN)
+		return -ENOSPC;
+
+	err = nf_connlabels_replace(ct, (u32 *)label, (u32 *)mask,
+				    OVS_CT_LABEL_LEN / sizeof(u32));
+	if (err)
+		return err;
+
+	ovs_ct_get_label(ct, &key->ct.label);
+	return 0;
+}
+
+/* 'skb' should already be pulled to nh_ofs. */
+static int ovs_ct_helper(struct sk_buff *skb, u16 proto)
+{
+	const struct nf_conntrack_helper *helper;
+	const struct nf_conn_help *help;
+	enum ip_conntrack_info ctinfo;
+	unsigned int protoff;
+	struct nf_conn *ct;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct || ctinfo == IP_CT_RELATED_REPLY)
+		return NF_ACCEPT;
+
+	help = nfct_help(ct);
+	if (!help)
+		return NF_ACCEPT;
+
+	helper = rcu_dereference(help->helper);
+	if (!helper)
+		return NF_ACCEPT;
+
+	switch (proto) {
+	case NFPROTO_IPV4:
+		protoff = ip_hdrlen(skb);
+		break;
+	case NFPROTO_IPV6: {
+		u8 nexthdr = ipv6_hdr(skb)->nexthdr;
+		__be16 frag_off;
+
+		protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+					   &nexthdr, &frag_off);
+		if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
+			pr_debug("proto header not found\n");
+			return NF_ACCEPT;
+		}
+		break;
+	}
+	default:
+		WARN_ONCE(1, "helper invoked on non-IP family!");
+		return NF_DROP;
+	}
+
+	return helper->help(skb, protoff, ct, ctinfo);
+}
+
+static int handle_fragments(struct net *net, struct sw_flow_key *key,
+			    u16 zone, struct sk_buff *skb)
+{
+	struct ovs_skb_cb ovs_cb = *OVS_CB(skb);
+
+	if (key->eth.type == htons(ETH_P_IP)) {
+		enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
+		int err;
+
+		memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+		err = ip_defrag(skb, user);
+		if (err)
+			return err;
+
+		ovs_cb.mru = IPCB(skb)->frag_max_size;
+	} else if (key->eth.type == htons(ETH_P_IPV6)) {
+#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
+		struct sk_buff *reasm;
+
+		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
+		reasm = nf_ct_frag6_gather(skb, user);
+		if (!reasm)
+			return -EINPROGRESS;
+
+		if (skb == reasm)
+			return -EINVAL;
+
+		key->ip.proto = ipv6_hdr(reasm)->nexthdr;
+		skb_morph(skb, reasm);
+		consume_skb(reasm);
+		ovs_cb.mru = IP6CB(skb)->frag_max_size;
+#else
+		return -EPFNOSUPPORT;
+#endif
+	} else {
+		return -EPFNOSUPPORT;
+	}
+
+	key->ip.frag = OVS_FRAG_TYPE_NONE;
+	skb_clear_hash(skb);
+	skb->ignore_df = 1;
+	*OVS_CB(skb) = ovs_cb;
+
+	return 0;
+}
+
+static struct nf_conntrack_expect *
+ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone,
+		   u16 proto, const struct sk_buff *skb)
+{
+	struct nf_conntrack_tuple tuple;
+
+	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, &tuple))
+		return NULL;
+	return __nf_ct_expect_find(net, zone, &tuple);
+}
+
+/* Determine whether skb->nfct is equal to the result of conntrack lookup. */
+static bool skb_nfct_cached(const struct net *net, const struct sk_buff *skb,
+			    const struct ovs_conntrack_info *info)
+{
+	enum ip_conntrack_info ctinfo;
+	struct nf_conn *ct;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct)
+		return false;
+	if (!net_eq(net, read_pnet(&ct->ct_net)))
+		return false;
+	if (!nf_ct_zone_equal_any(info->ct, nf_ct_zone(ct)))
+		return false;
+	if (info->helper) {
+		struct nf_conn_help *help;
+
+		help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
+		if (help && rcu_access_pointer(help->helper) != info->helper)
+			return false;
+	}
+
+	return true;
+}
+
+static int __ovs_ct_lookup(struct net *net, const struct sw_flow_key *key,
+			   const struct ovs_conntrack_info *info,
+			   struct sk_buff *skb)
+{
+	/* If we are recirculating packets to match on conntrack fields and
+	 * committing with a separate conntrack action,  then we don't need to
+	 * actually run the packet through conntrack twice unless it's for a
+	 * different zone.
+	 */
+	if (!skb_nfct_cached(net, skb, info)) {
+		struct nf_conn *tmpl = info->ct;
+
+		/* Associate skb with specified zone. */
+		if (tmpl) {
+			if (skb->nfct)
+				nf_conntrack_put(skb->nfct);
+			nf_conntrack_get(&tmpl->ct_general);
+			skb->nfct = &tmpl->ct_general;
+			skb->nfctinfo = IP_CT_NEW;
+		}
+
+		if (nf_conntrack_in(net, info->family, NF_INET_PRE_ROUTING,
+				    skb) != NF_ACCEPT)
+			return -ENOENT;
+
+		if (ovs_ct_helper(skb, info->family) != NF_ACCEPT) {
+			WARN_ONCE(1, "helper rejected packet");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* Lookup connection and read fields into key. */
+static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
+			 const struct ovs_conntrack_info *info,
+			 struct sk_buff *skb)
+{
+	struct nf_conntrack_expect *exp;
+
+	exp = ovs_ct_expect_find(net, &info->zone, info->family, skb);
+	if (exp) {
+		u8 state;
+
+		state = OVS_CS_F_TRACKED | OVS_CS_F_NEW | OVS_CS_F_RELATED;
+		__ovs_ct_update_key(key, state, &info->zone, exp->master);
+	} else {
+		int err;
+
+		err = __ovs_ct_lookup(net, key, info, skb);
+		if (err)
+			return err;
+
+		ovs_ct_update_key(skb, key, true);
+	}
+
+	return 0;
+}
+
+/* Lookup connection and confirm if unconfirmed. */
+static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
+			 const struct ovs_conntrack_info *info,
+			 struct sk_buff *skb)
+{
+	u8 state;
+	int err;
+
+	state = key->ct.state;
+	if (key->ct.zone == info->zone.id &&
+	    ((state & OVS_CS_F_TRACKED) && !(state & OVS_CS_F_NEW))) {
+		/* Previous lookup has shown that this connection is already
+		 * tracked and committed. Skip committing.
+		 */
+		return 0;
+	}
+
+	err = __ovs_ct_lookup(net, key, info, skb);
+	if (err)
+		return err;
+	if (nf_conntrack_confirm(skb) != NF_ACCEPT)
+		return -EINVAL;
+
+	ovs_ct_update_key(skb, key, true);
+
+	return 0;
+}
+
+static bool label_nonzero(const struct ovs_key_ct_label *label)
+{
+	size_t i;
+
+	for (i = 0; i < sizeof(*label); i++)
+		if (label->ct_label[i])
+			return true;
+
+	return false;
+}
+
+int ovs_ct_execute(struct net *net, struct sk_buff *skb,
+		   struct sw_flow_key *key,
+		   const struct ovs_conntrack_info *info)
+{
+	int nh_ofs;
+	int err;
+
+	/* The conntrack module expects to be working at L3. */
+	nh_ofs = skb_network_offset(skb);
+	skb_pull(skb, nh_ofs);
+
+	if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
+		err = handle_fragments(net, key, info->zone.id, skb);
+		if (err)
+			return err;
+	}
+
+	if (info->flags & OVS_CT_F_COMMIT)
+		err = ovs_ct_commit(net, key, info, skb);
+	else
+		err = ovs_ct_lookup(net, key, info, skb);
+	if (err)
+		goto err;
+
+	if (info->mark.mask) {
+		err = ovs_ct_set_mark(skb, key, info->mark.value,
+				      info->mark.mask);
+		if (err)
+			goto err;
+	}
+	if (label_nonzero(&info->label.mask))
+		err = ovs_ct_set_label(skb, key, &info->label.value,
+				       &info->label.mask);
+err:
+	skb_push(skb, nh_ofs);
+	return err;
+}
+
+static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
+			     const struct sw_flow_key *key, bool log)
+{
+	struct nf_conntrack_helper *helper;
+	struct nf_conn_help *help;
+
+	helper = nf_conntrack_helper_try_module_get(name, info->family,
+						    key->ip.proto);
+	if (!helper) {
+		OVS_NLERR(log, "Unknown helper \"%s\"", name);
+		return -EINVAL;
+	}
+
+	help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL);
+	if (!help) {
+		module_put(helper->me);
+		return -ENOMEM;
+	}
+
+	rcu_assign_pointer(help->helper, helper);
+	info->helper = helper;
+	return 0;
+}
+
+static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
+	[OVS_CT_ATTR_FLAGS]	= { .minlen = sizeof(u32),
+				    .maxlen = sizeof(u32) },
+	[OVS_CT_ATTR_ZONE]	= { .minlen = sizeof(u16),
+				    .maxlen = sizeof(u16) },
+	[OVS_CT_ATTR_MARK]	= { .minlen = sizeof(struct md_mark),
+				    .maxlen = sizeof(struct md_mark) },
+	[OVS_CT_ATTR_LABEL]	= { .minlen = sizeof(struct md_label),
+				    .maxlen = sizeof(struct md_label) },
+	[OVS_CT_ATTR_HELPER]	= { .minlen = 1,
+				    .maxlen = NF_CT_HELPER_NAME_LEN }
+};
+
+static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
+		    const char **helper, bool log)
+{
+	struct nlattr *a;
+	int rem;
+
+	nla_for_each_nested(a, attr, rem) {
+		int type = nla_type(a);
+		int maxlen = ovs_ct_attr_lens[type].maxlen;
+		int minlen = ovs_ct_attr_lens[type].minlen;
+
+		if (type > OVS_CT_ATTR_MAX) {
+			OVS_NLERR(log,
+				  "Unknown conntrack attr (type=%d, max=%d)",
+				  type, OVS_CT_ATTR_MAX);
+			return -EINVAL;
+		}
+		if (nla_len(a) < minlen || nla_len(a) > maxlen) {
+			OVS_NLERR(log,
+				  "Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
+				  type, nla_len(a), maxlen);
+			return -EINVAL;
+		}
+
+		switch (type) {
+		case OVS_CT_ATTR_FLAGS:
+			info->flags = nla_get_u32(a);
+			break;
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+		case OVS_CT_ATTR_ZONE:
+			info->zone.id = nla_get_u16(a);
+			break;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_MARK
+		case OVS_CT_ATTR_MARK: {
+			struct md_mark *mark = nla_data(a);
+
+			info->mark = *mark;
+			break;
+		}
+#endif
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+		case OVS_CT_ATTR_LABEL: {
+			struct md_label *label = nla_data(a);
+
+			info->label = *label;
+			break;
+		}
+#endif
+		case OVS_CT_ATTR_HELPER:
+			*helper = nla_data(a);
+			if (!memchr(*helper, '\0', nla_len(a))) {
+				OVS_NLERR(log, "Invalid conntrack helper");
+				return -EINVAL;
+			}
+			break;
+		default:
+			OVS_NLERR(log, "Unknown conntrack attr (%d)",
+				  type);
+			return -EINVAL;
+		}
+	}
+
+	if (rem > 0) {
+		OVS_NLERR(log, "Conntrack attr has %d unknown bytes", rem);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr)
+{
+	if (attr == OVS_KEY_ATTR_CT_STATE)
+		return true;
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
+	    attr == OVS_KEY_ATTR_CT_ZONE)
+		return true;
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
+	    attr == OVS_KEY_ATTR_CT_MARK)
+		return true;
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+	    attr == OVS_KEY_ATTR_CT_LABEL) {
+		struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+
+		return ovs_net->xt_label;
+	}
+
+	return false;
+}
+
+int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
+		       const struct sw_flow_key *key,
+		       struct sw_flow_actions **sfa,  bool log)
+{
+	struct ovs_conntrack_info ct_info;
+	const char *helper = NULL;
+	u16 family;
+	int err;
+
+	family = key_to_nfproto(key);
+	if (family == NFPROTO_UNSPEC) {
+		OVS_NLERR(log, "ct family unspecified");
+		return -EINVAL;
+	}
+
+	memset(&ct_info, 0, sizeof(ct_info));
+	ct_info.family = family;
+
+	nf_ct_zone_init(&ct_info.zone, NF_CT_DEFAULT_ZONE_ID,
+			NF_CT_DEFAULT_ZONE_DIR, 0);
+
+	err = parse_ct(attr, &ct_info, &helper, log);
+	if (err)
+		return err;
+
+	/* Set up template for tracking connections in specific zones. */
+	ct_info.ct = nf_ct_tmpl_alloc(net, &ct_info.zone, GFP_KERNEL);
+	if (!ct_info.ct) {
+		OVS_NLERR(log, "Failed to allocate conntrack template");
+		return -ENOMEM;
+	}
+	if (helper) {
+		err = ovs_ct_add_helper(&ct_info, helper, key, log);
+		if (err)
+			goto err_free_ct;
+	}
+
+	err = ovs_nla_add_action(sfa, OVS_ACTION_ATTR_CT, &ct_info,
+				 sizeof(ct_info), log);
+	if (err)
+		goto err_free_ct;
+
+	__set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
+	nf_conntrack_get(&ct_info.ct->ct_general);
+	return 0;
+err_free_ct:
+	nf_conntrack_free(ct_info.ct);
+	return err;
+}
+
+int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
+			  struct sk_buff *skb)
+{
+	struct nlattr *start;
+
+	start = nla_nest_start(skb, OVS_ACTION_ATTR_CT);
+	if (!start)
+		return -EMSGSIZE;
+
+	if (nla_put_u32(skb, OVS_CT_ATTR_FLAGS, ct_info->flags))
+		return -EMSGSIZE;
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
+	    nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
+		return -EMSGSIZE;
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
+	    nla_put(skb, OVS_CT_ATTR_MARK, sizeof(ct_info->mark),
+		    &ct_info->mark))
+		return -EMSGSIZE;
+	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+	    nla_put(skb, OVS_CT_ATTR_LABEL, sizeof(ct_info->label),
+		    &ct_info->label))
+		return -EMSGSIZE;
+	if (ct_info->helper) {
+		if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
+				   ct_info->helper->name))
+			return -EMSGSIZE;
+	}
+
+	nla_nest_end(skb, start);
+
+	return 0;
+}
+
+void ovs_ct_free_action(const struct nlattr *a)
+{
+	struct ovs_conntrack_info *ct_info = nla_data(a);
+
+	if (ct_info->helper)
+		module_put(ct_info->helper->me);
+	if (ct_info->ct)
+		nf_ct_put(ct_info->ct);
+}
+
+void ovs_ct_init(struct net *net)
+{
+	unsigned int n_bits = sizeof(struct ovs_key_ct_label) * BITS_PER_BYTE;
+	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+
+	if (nf_connlabels_get(net, n_bits)) {
+		ovs_net->xt_label = false;
+		OVS_NLERR(true, "Failed to set connlabel length");
+	} else {
+		ovs_net->xt_label = true;
+	}
+}
+
+void ovs_ct_exit(struct net *net)
+{
+	struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+
+	if (ovs_net->xt_label)
+		nf_connlabels_put(net);
+}
diff --git a/net/openvswitch/conntrack.h b/net/openvswitch/conntrack.h
new file mode 100644
index 0000000..3cb3066
--- /dev/null
+++ b/net/openvswitch/conntrack.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2015 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef OVS_CONNTRACK_H
+#define OVS_CONNTRACK_H 1
+
+#include "flow.h"
+
+struct ovs_conntrack_info;
+enum ovs_key_attr;
+
+#if defined(CONFIG_OPENVSWITCH_CONNTRACK)
+void ovs_ct_init(struct net *);
+void ovs_ct_exit(struct net *);
+bool ovs_ct_verify(struct net *, enum ovs_key_attr attr);
+int ovs_ct_copy_action(struct net *, const struct nlattr *,
+		       const struct sw_flow_key *, struct sw_flow_actions **,
+		       bool log);
+int ovs_ct_action_to_attr(const struct ovs_conntrack_info *, struct sk_buff *);
+
+int ovs_ct_execute(struct net *, struct sk_buff *, struct sw_flow_key *,
+		   const struct ovs_conntrack_info *);
+
+void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key);
+int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb);
+void ovs_ct_free_action(const struct nlattr *a);
+#else
+#include <linux/errno.h>
+
+static inline void ovs_ct_init(struct net *net) { }
+
+static inline void ovs_ct_exit(struct net *net) { }
+
+static inline bool ovs_ct_verify(struct net *net, int attr)
+{
+	return false;
+}
+
+static inline int ovs_ct_copy_action(struct net *net, const struct nlattr *nla,
+				     const struct sw_flow_key *key,
+				     struct sw_flow_actions **acts, bool log)
+{
+	return -ENOTSUPP;
+}
+
+static inline int ovs_ct_action_to_attr(const struct ovs_conntrack_info *info,
+					struct sk_buff *skb)
+{
+	return -ENOTSUPP;
+}
+
+static inline int ovs_ct_execute(struct net *net, struct sk_buff *skb,
+				 struct sw_flow_key *key,
+				 const struct ovs_conntrack_info *info)
+{
+	return -ENOTSUPP;
+}
+
+static inline void ovs_ct_fill_key(const struct sk_buff *skb,
+				   struct sw_flow_key *key)
+{
+	key->ct.state = 0;
+	key->ct.zone = 0;
+	key->ct.mark = 0;
+	memset(&key->ct.label, 0, sizeof(key->ct.label));
+}
+
+static inline int ovs_ct_put_key(const struct sw_flow_key *key,
+				 struct sk_buff *skb)
+{
+	return 0;
+}
+
+static inline void ovs_ct_free_action(const struct nlattr *a) { }
+#endif
+#endif /* ovs_conntrack.h */
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index ff8c4a4..6fbd2de 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -176,7 +176,7 @@
 const char *ovs_dp_name(const struct datapath *dp)
 {
 	struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
-	return vport->ops->get_name(vport);
+	return ovs_vport_name(vport);
 }
 
 static int get_dpifindex(const struct datapath *dp)
@@ -188,7 +188,7 @@
 
 	local = ovs_vport_rcu(dp, OVSP_LOCAL);
 	if (local)
-		ifindex = netdev_vport_priv(local)->dev->ifindex;
+		ifindex = local->dev->ifindex;
 	else
 		ifindex = 0;
 
@@ -275,6 +275,7 @@
 		memset(&upcall, 0, sizeof(upcall));
 		upcall.cmd = OVS_PACKET_CMD_MISS;
 		upcall.portid = ovs_vport_find_upcall_portid(p, skb);
+		upcall.mru = OVS_CB(skb)->mru;
 		error = ovs_dp_upcall(dp, skb, key, &upcall);
 		if (unlikely(error))
 			kfree_skb(skb);
@@ -400,9 +401,23 @@
 	if (upcall_info->actions_len)
 		size += nla_total_size(upcall_info->actions_len);
 
+	/* OVS_PACKET_ATTR_MRU */
+	if (upcall_info->mru)
+		size += nla_total_size(sizeof(upcall_info->mru));
+
 	return size;
 }
 
+static void pad_packet(struct datapath *dp, struct sk_buff *skb)
+{
+	if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
+		size_t plen = NLA_ALIGN(skb->len) - skb->len;
+
+		if (plen > 0)
+			memset(skb_put(skb, plen), 0, plen);
+	}
+}
+
 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 				  const struct sw_flow_key *key,
 				  const struct dp_upcall_info *upcall_info)
@@ -476,7 +491,8 @@
 	if (upcall_info->egress_tun_info) {
 		nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
 		err = ovs_nla_put_egress_tunnel_key(user_skb,
-						    upcall_info->egress_tun_info);
+						    upcall_info->egress_tun_info,
+						    upcall_info->egress_tun_opts);
 		BUG_ON(err);
 		nla_nest_end(user_skb, nla);
 	}
@@ -492,6 +508,16 @@
 			nla_nest_cancel(user_skb, nla);
 	}
 
+	/* Add OVS_PACKET_ATTR_MRU */
+	if (upcall_info->mru) {
+		if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
+				upcall_info->mru)) {
+			err = -ENOBUFS;
+			goto out;
+		}
+		pad_packet(dp, user_skb);
+	}
+
 	/* Only reserve room for attribute header, packet data is added
 	 * in skb_zerocopy() */
 	if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
@@ -505,12 +531,7 @@
 		goto out;
 
 	/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
-	if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
-		size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
-
-		if (plen > 0)
-			memset(skb_put(user_skb, plen), 0, plen);
-	}
+	pad_packet(dp, user_skb);
 
 	((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
 
@@ -527,6 +548,7 @@
 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
 {
 	struct ovs_header *ovs_header = info->userhdr;
+	struct net *net = sock_net(skb->sk);
 	struct nlattr **a = info->attrs;
 	struct sw_flow_actions *acts;
 	struct sk_buff *packet;
@@ -535,6 +557,7 @@
 	struct datapath *dp;
 	struct ethhdr *eth;
 	struct vport *input_vport;
+	u16 mru = 0;
 	int len;
 	int err;
 	bool log = !a[OVS_PACKET_ATTR_PROBE];
@@ -564,29 +587,35 @@
 	else
 		packet->protocol = htons(ETH_P_802_2);
 
+	/* Set packet's mru */
+	if (a[OVS_PACKET_ATTR_MRU]) {
+		mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
+		packet->ignore_df = 1;
+	}
+	OVS_CB(packet)->mru = mru;
+
 	/* Build an sw_flow for sending this packet. */
 	flow = ovs_flow_alloc();
 	err = PTR_ERR(flow);
 	if (IS_ERR(flow))
 		goto err_kfree_skb;
 
-	err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet,
-					     &flow->key, log);
+	err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
+					     packet, &flow->key, log);
 	if (err)
 		goto err_flow_free;
 
-	err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
+	err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
 				   &flow->key, &acts, log);
 	if (err)
 		goto err_flow_free;
 
 	rcu_assign_pointer(flow->sf_acts, acts);
-	OVS_CB(packet)->egress_tun_info = NULL;
 	packet->priority = flow->key.phy.priority;
 	packet->mark = flow->key.phy.skb_mark;
 
 	rcu_read_lock();
-	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
+	dp = get_dp_rcu(net, ovs_header->dp_ifindex);
 	err = -ENODEV;
 	if (!dp)
 		goto err_unlock;
@@ -598,6 +627,7 @@
 	if (!input_vport)
 		goto err_unlock;
 
+	packet->dev = input_vport->dev;
 	OVS_CB(packet)->input_vport = input_vport;
 	sf_acts = rcu_dereference(flow->sf_acts);
 
@@ -624,6 +654,7 @@
 	[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
 	[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
 	[OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
+	[OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
 };
 
 static const struct genl_ops dp_packet_genl_ops[] = {
@@ -713,7 +744,7 @@
 
 	/* OVS_FLOW_ATTR_ACTIONS */
 	if (should_fill_actions(ufid_flags))
-		len += nla_total_size(acts->actions_len);
+		len += nla_total_size(acts->orig_len);
 
 	return len
 		+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
@@ -880,6 +911,7 @@
 
 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
 {
+	struct net *net = sock_net(skb->sk);
 	struct nlattr **a = info->attrs;
 	struct ovs_header *ovs_header = info->userhdr;
 	struct sw_flow *flow = NULL, *new_flow;
@@ -915,7 +947,7 @@
 
 	/* Extract key. */
 	ovs_match_init(&match, &key, &mask);
-	error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
+	error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
 				  a[OVS_FLOW_ATTR_MASK], log);
 	if (error)
 		goto err_kfree_flow;
@@ -929,8 +961,8 @@
 		goto err_kfree_flow;
 
 	/* Validate actions. */
-	error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
-				     &acts, log);
+	error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
+				     &new_flow->key, &acts, log);
 	if (error) {
 		OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
 		goto err_kfree_flow;
@@ -944,7 +976,7 @@
 	}
 
 	ovs_lock();
-	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+	dp = get_dp(net, ovs_header->dp_ifindex);
 	if (unlikely(!dp)) {
 		error = -ENODEV;
 		goto err_unlock_ovs;
@@ -1018,7 +1050,7 @@
 		}
 		ovs_unlock();
 
-		ovs_nla_free_flow_actions(old_acts);
+		ovs_nla_free_flow_actions_rcu(old_acts);
 		ovs_flow_free(new_flow, false);
 	}
 
@@ -1030,7 +1062,7 @@
 	ovs_unlock();
 	kfree_skb(reply);
 err_kfree_acts:
-	kfree(acts);
+	ovs_nla_free_flow_actions(acts);
 err_kfree_flow:
 	ovs_flow_free(new_flow, false);
 error:
@@ -1038,7 +1070,8 @@
 }
 
 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
-static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
+static struct sw_flow_actions *get_flow_actions(struct net *net,
+						const struct nlattr *a,
 						const struct sw_flow_key *key,
 						const struct sw_flow_mask *mask,
 						bool log)
@@ -1048,7 +1081,7 @@
 	int error;
 
 	ovs_flow_mask_key(&masked_key, key, mask);
-	error = ovs_nla_copy_actions(a, &masked_key, &acts, log);
+	error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
 	if (error) {
 		OVS_NLERR(log,
 			  "Actions may not be safe on all matching packets");
@@ -1060,6 +1093,7 @@
 
 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
 {
+	struct net *net = sock_net(skb->sk);
 	struct nlattr **a = info->attrs;
 	struct ovs_header *ovs_header = info->userhdr;
 	struct sw_flow_key key;
@@ -1084,15 +1118,15 @@
 
 	ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
 	ovs_match_init(&match, &key, &mask);
-	error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
+	error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
 				  a[OVS_FLOW_ATTR_MASK], log);
 	if (error)
 		goto error;
 
 	/* Validate actions. */
 	if (a[OVS_FLOW_ATTR_ACTIONS]) {
-		acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask,
-					log);
+		acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], &key,
+					&mask, log);
 		if (IS_ERR(acts)) {
 			error = PTR_ERR(acts);
 			goto error;
@@ -1108,7 +1142,7 @@
 	}
 
 	ovs_lock();
-	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+	dp = get_dp(net, ovs_header->dp_ifindex);
 	if (unlikely(!dp)) {
 		error = -ENODEV;
 		goto err_unlock_ovs;
@@ -1157,7 +1191,7 @@
 	if (reply)
 		ovs_notify(&dp_flow_genl_family, reply, info);
 	if (old_acts)
-		ovs_nla_free_flow_actions(old_acts);
+		ovs_nla_free_flow_actions_rcu(old_acts);
 
 	return 0;
 
@@ -1165,7 +1199,7 @@
 	ovs_unlock();
 	kfree_skb(reply);
 err_kfree_acts:
-	kfree(acts);
+	ovs_nla_free_flow_actions(acts);
 error:
 	return error;
 }
@@ -1174,6 +1208,7 @@
 {
 	struct nlattr **a = info->attrs;
 	struct ovs_header *ovs_header = info->userhdr;
+	struct net *net = sock_net(skb->sk);
 	struct sw_flow_key key;
 	struct sk_buff *reply;
 	struct sw_flow *flow;
@@ -1188,7 +1223,7 @@
 	ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
 	if (a[OVS_FLOW_ATTR_KEY]) {
 		ovs_match_init(&match, &key, NULL);
-		err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
+		err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
 					log);
 	} else if (!ufid_present) {
 		OVS_NLERR(log,
@@ -1232,6 +1267,7 @@
 {
 	struct nlattr **a = info->attrs;
 	struct ovs_header *ovs_header = info->userhdr;
+	struct net *net = sock_net(skb->sk);
 	struct sw_flow_key key;
 	struct sk_buff *reply;
 	struct sw_flow *flow = NULL;
@@ -1246,8 +1282,8 @@
 	ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
 	if (a[OVS_FLOW_ATTR_KEY]) {
 		ovs_match_init(&match, &key, NULL);
-		err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
-					log);
+		err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
+					NULL, log);
 		if (unlikely(err))
 			return err;
 	}
@@ -1800,7 +1836,7 @@
 	if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
 	    nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
 	    nla_put_string(skb, OVS_VPORT_ATTR_NAME,
-			   vport->ops->get_name(vport)))
+			   ovs_vport_name(vport)))
 		goto nla_put_failure;
 
 	ovs_vport_get_stats(vport, &vport_stats);
@@ -2203,6 +2239,7 @@
 
 	INIT_LIST_HEAD(&ovs_net->dps);
 	INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
+	ovs_ct_init(net);
 	return 0;
 }
 
@@ -2219,13 +2256,10 @@
 			struct vport *vport;
 
 			hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
-				struct netdev_vport *netdev_vport;
-
 				if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
 					continue;
 
-				netdev_vport = netdev_vport_priv(vport);
-				if (dev_net(netdev_vport->dev) == dnet)
+				if (dev_net(vport->dev) == dnet)
 					list_add(&vport->detach_list, head);
 			}
 		}
@@ -2240,6 +2274,7 @@
 	struct net *net;
 	LIST_HEAD(head);
 
+	ovs_ct_exit(dnet);
 	ovs_lock();
 	list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
 		__dp_destroy(dp);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index cd691e9..f88038a 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -25,10 +25,11 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/u64_stats_sync.h>
+#include <net/ip_tunnels.h>
 
+#include "conntrack.h"
 #include "flow.h"
 #include "flow_table.h"
-#include "vport.h"
 
 #define DP_MAX_PORTS           USHRT_MAX
 #define DP_VPORT_HASH_BUCKETS  1024
@@ -92,14 +93,14 @@
 
 /**
  * struct ovs_skb_cb - OVS data in skb CB
- * @egress_tun_key: Tunnel information about this packet on egress path.
- * NULL if the packet is not being tunneled.
  * @input_vport: The original vport packet came in on. This value is cached
  * when a packet is received by OVS.
+ * @mru: The maximum received fragement size; 0 if the packet is not
+ * fragmented.
  */
 struct ovs_skb_cb {
-	struct ovs_tunnel_info  *egress_tun_info;
 	struct vport		*input_vport;
+	u16			mru;
 };
 #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
 
@@ -112,14 +113,17 @@
  * then no packet is sent and the packet is accounted in the datapath's @n_lost
  * counter.
  * @egress_tun_info: If nonnull, becomes %OVS_PACKET_ATTR_EGRESS_TUN_KEY.
+ * @mru: If not zero, Maximum received IP fragment size.
  */
 struct dp_upcall_info {
-	const struct ovs_tunnel_info *egress_tun_info;
+	struct ip_tunnel_info *egress_tun_info;
+	const void *egress_tun_opts;
 	const struct nlattr *userdata;
 	const struct nlattr *actions;
 	int actions_len;
 	u32 portid;
 	u8 cmd;
+	u16 mru;
 };
 
 /**
@@ -130,7 +134,9 @@
 struct ovs_net {
 	struct list_head dps;
 	struct work_struct dp_notify_work;
-	struct vport_net vport_net;
+
+	/* Module reference for configuring conntrack. */
+	bool xt_label;
 };
 
 extern int ovs_net_id;
@@ -199,6 +205,10 @@
 int action_fifos_init(void);
 void action_fifos_exit(void);
 
+/* 'KEY' must not have any bits set outside of the 'MASK' */
+#define OVS_MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
+#define OVS_SET_MASKED(OLD, KEY, MASK) ((OLD) = OVS_MASKED(OLD, KEY, MASK))
+
 #define OVS_NLERR(logging_allowed, fmt, ...)			\
 do {								\
 	if (logging_allowed && net_ratelimit())			\
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index 2c631fe..a7a80a6 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -58,13 +58,10 @@
 			struct hlist_node *n;
 
 			hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
-				struct netdev_vport *netdev_vport;
-
 				if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
 					continue;
 
-				netdev_vport = netdev_vport_priv(vport);
-				if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH))
+				if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH))
 					dp_detach_port_notify(vport);
 			}
 		}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index bc7b0ab..c8db44a 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -46,9 +46,11 @@
 #include <net/mpls.h>
 #include <net/ndisc.h>
 
+#include "conntrack.h"
 #include "datapath.h"
 #include "flow.h"
 #include "flow_netlink.h"
+#include "vport.h"
 
 u64 ovs_flow_used_time(unsigned long flow_jiffies)
 {
@@ -271,8 +273,6 @@
 	key->ipv6.addr.dst = nh->daddr;
 
 	payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
-	if (unlikely(payload_ofs < 0))
-		return -EINVAL;
 
 	if (frag_off) {
 		if (frag_off & htons(~0x7))
@@ -283,6 +283,13 @@
 		key->ip.frag = OVS_FRAG_TYPE_NONE;
 	}
 
+	/* Delayed handling of error in ipv6_skip_exthdr() as it
+	 * always sets frag_off to a valid value which may be
+	 * used to set key->ip.frag above.
+	 */
+	if (unlikely(payload_ofs < 0))
+		return -EPROTO;
+
 	nh_len = payload_ofs - nh_ofs;
 	skb_set_transport_header(skb, nh_ofs + nh_len);
 	key->ip.proto = nexthdr;
@@ -622,12 +629,16 @@
 
 		nh_len = parse_ipv6hdr(skb, key);
 		if (unlikely(nh_len < 0)) {
-			memset(&key->ip, 0, sizeof(key->ip));
-			memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
-			if (nh_len == -EINVAL) {
+			switch (nh_len) {
+			case -EINVAL:
+				memset(&key->ip, 0, sizeof(key->ip));
+				memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
+				/* fall-through */
+			case -EPROTO:
 				skb->transport_header = skb->network_header;
 				error = 0;
-			} else {
+				break;
+			default:
 				error = nh_len;
 			}
 			return error;
@@ -682,19 +693,22 @@
 	return key_extract(skb, key);
 }
 
-int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
 			 struct sk_buff *skb, struct sw_flow_key *key)
 {
 	/* Extract metadata from packet. */
 	if (tun_info) {
-		memcpy(&key->tun_key, &tun_info->tunnel, sizeof(key->tun_key));
+		if (ip_tunnel_info_af(tun_info) != AF_INET)
+			return -EINVAL;
+		memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
 
-		if (tun_info->options) {
+		if (tun_info->options_len) {
 			BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
 						   8)) - 1
 					> sizeof(key->tun_opts));
-			memcpy(TUN_METADATA_OPTS(key, tun_info->options_len),
-			       tun_info->options, tun_info->options_len);
+
+			ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key, tun_info->options_len),
+						tun_info);
 			key->tun_opts_len = tun_info->options_len;
 		} else {
 			key->tun_opts_len = 0;
@@ -707,13 +721,14 @@
 	key->phy.priority = skb->priority;
 	key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
 	key->phy.skb_mark = skb->mark;
+	ovs_ct_fill_key(skb, key);
 	key->ovs_flow_hash = 0;
 	key->recirc_id = 0;
 
 	return key_extract(skb, key);
 }
 
-int ovs_flow_key_extract_userspace(const struct nlattr *attr,
+int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
 				   struct sk_buff *skb,
 				   struct sw_flow_key *key, bool log)
 {
@@ -722,7 +737,7 @@
 	memset(key, 0, OVS_SW_FLOW_KEY_METADATA_SIZE);
 
 	/* Extract metadata from netlink attributes. */
-	err = ovs_nla_get_flow_metadata(attr, key, log);
+	err = ovs_nla_get_flow_metadata(net, attr, key, log);
 	if (err)
 		return err;
 
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index a076e44..fe527d2 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -32,31 +32,11 @@
 #include <linux/time.h>
 #include <linux/flex_array.h>
 #include <net/inet_ecn.h>
+#include <net/ip_tunnels.h>
+#include <net/dst_metadata.h>
 
 struct sk_buff;
 
-/* Used to memset ovs_key_ipv4_tunnel padding. */
-#define OVS_TUNNEL_KEY_SIZE					\
-	(offsetof(struct ovs_key_ipv4_tunnel, tp_dst) +		\
-	 FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, tp_dst))
-
-struct ovs_key_ipv4_tunnel {
-	__be64 tun_id;
-	__be32 ipv4_src;
-	__be32 ipv4_dst;
-	__be16 tun_flags;
-	u8   ipv4_tos;
-	u8   ipv4_ttl;
-	__be16 tp_src;
-	__be16 tp_dst;
-} __packed __aligned(4); /* Minimize padding. */
-
-struct ovs_tunnel_info {
-	struct ovs_key_ipv4_tunnel tunnel;
-	const void *options;
-	u8 options_len;
-};
-
 /* Store options at the end of the array if they are less than the
  * maximum size. This allows us to get the benefits of variable length
  * matching for small options.
@@ -66,54 +46,9 @@
 #define TUN_METADATA_OPTS(flow_key, opt_len) \
 	((void *)((flow_key)->tun_opts + TUN_METADATA_OFFSET(opt_len)))
 
-static inline void __ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
-					    __be32 saddr, __be32 daddr,
-					    u8 tos, u8 ttl,
-					    __be16 tp_src,
-					    __be16 tp_dst,
-					    __be64 tun_id,
-					    __be16 tun_flags,
-					    const void *opts,
-					    u8 opts_len)
-{
-	tun_info->tunnel.tun_id = tun_id;
-	tun_info->tunnel.ipv4_src = saddr;
-	tun_info->tunnel.ipv4_dst = daddr;
-	tun_info->tunnel.ipv4_tos = tos;
-	tun_info->tunnel.ipv4_ttl = ttl;
-	tun_info->tunnel.tun_flags = tun_flags;
-
-	/* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
-	 * the upper tunnel are used.
-	 * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
-	 */
-	tun_info->tunnel.tp_src = tp_src;
-	tun_info->tunnel.tp_dst = tp_dst;
-
-	/* Clear struct padding. */
-	if (sizeof(tun_info->tunnel) != OVS_TUNNEL_KEY_SIZE)
-		memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE,
-		       0, sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE);
-
-	tun_info->options = opts;
-	tun_info->options_len = opts_len;
-}
-
-static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
-					  const struct iphdr *iph,
-					  __be16 tp_src,
-					  __be16 tp_dst,
-					  __be64 tun_id,
-					  __be16 tun_flags,
-					  const void *opts,
-					  u8 opts_len)
-{
-	__ovs_flow_tun_info_init(tun_info, iph->saddr, iph->daddr,
-				 iph->tos, iph->ttl,
-				 tp_src, tp_dst,
-				 tun_id, tun_flags,
-				 opts, opts_len);
-}
+struct ovs_tunnel_info {
+	struct metadata_dst	*tun_dst;
+};
 
 #define OVS_SW_FLOW_KEY_METADATA_SIZE			\
 	(offsetof(struct sw_flow_key, recirc_id) +	\
@@ -122,7 +57,7 @@
 struct sw_flow_key {
 	u8 tun_opts[255];
 	u8 tun_opts_len;
-	struct ovs_key_ipv4_tunnel tun_key;  /* Encapsulating tunnel key. */
+	struct ip_tunnel_key tun_key;	/* Encapsulating tunnel key. */
 	struct {
 		u32	priority;	/* Packet QoS priority. */
 		u32	skb_mark;	/* SKB mark. */
@@ -176,6 +111,14 @@
 			} nd;
 		} ipv6;
 	};
+	struct {
+		/* Connection tracking fields. */
+		u16 zone;
+		u32 mark;
+		u8 state;
+		struct ovs_key_ct_label label;
+	} ct;
+
 } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
 
 struct sw_flow_key_range {
@@ -209,6 +152,7 @@
 
 struct sw_flow_actions {
 	struct rcu_head rcu;
+	size_t orig_len;	/* From flow_cmd_new netlink actions size */
 	u32 actions_len;
 	struct nlattr actions[];
 };
@@ -273,11 +217,11 @@
 u64 ovs_flow_used_time(unsigned long flow_jiffies);
 
 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
-int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
 			 struct sk_buff *skb,
 			 struct sw_flow_key *key);
 /* Extract key from packet coming from userspace. */
-int ovs_flow_key_extract_userspace(const struct nlattr *attr,
+int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
 				   struct sk_buff *skb,
 				   struct sw_flow_key *key, bool log);
 
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 624e41c..c92d6a2 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -47,9 +47,9 @@
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 #include <net/mpls.h>
+#include <net/vxlan.h>
 
 #include "flow_netlink.h"
-#include "vport-vxlan.h"
 
 struct ovs_len_tbl {
 	int len;
@@ -281,7 +281,7 @@
 	/* Whenever adding new OVS_KEY_ FIELDS, we should consider
 	 * updating this function.
 	 */
-	BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 22);
+	BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 26);
 
 	return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
 		+ nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
@@ -290,6 +290,10 @@
 		+ nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
 		+ nla_total_size(4)   /* OVS_KEY_ATTR_DP_HASH */
 		+ nla_total_size(4)   /* OVS_KEY_ATTR_RECIRC_ID */
+		+ nla_total_size(1)   /* OVS_KEY_ATTR_CT_STATE */
+		+ nla_total_size(2)   /* OVS_KEY_ATTR_CT_ZONE */
+		+ nla_total_size(4)   /* OVS_KEY_ATTR_CT_MARK */
+		+ nla_total_size(16)  /* OVS_KEY_ATTR_CT_LABEL */
 		+ nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
 		+ nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
 		+ nla_total_size(4)   /* OVS_KEY_ATTR_VLAN */
@@ -339,6 +343,10 @@
 	[OVS_KEY_ATTR_TUNNEL]	 = { .len = OVS_ATTR_NESTED,
 				     .next = ovs_tunnel_key_lens, },
 	[OVS_KEY_ATTR_MPLS]	 = { .len = sizeof(struct ovs_key_mpls) },
+	[OVS_KEY_ATTR_CT_STATE]	 = { .len = sizeof(u8) },
+	[OVS_KEY_ATTR_CT_ZONE]	 = { .len = sizeof(u16) },
+	[OVS_KEY_ATTR_CT_MARK]	 = { .len = sizeof(u32) },
+	[OVS_KEY_ATTR_CT_LABEL]	 = { .len = sizeof(struct ovs_key_ct_label) },
 };
 
 static bool is_all_zero(const u8 *fp, size_t size)
@@ -475,7 +483,7 @@
 {
 	struct nlattr *tb[OVS_VXLAN_EXT_MAX+1];
 	unsigned long opt_key_offset;
-	struct ovs_vxlan_opts opts;
+	struct vxlan_metadata opts;
 	int err;
 
 	BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
@@ -534,19 +542,19 @@
 			tun_flags |= TUNNEL_KEY;
 			break;
 		case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
-			SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
+			SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
 					nla_get_in_addr(a), is_mask);
 			break;
 		case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
-			SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
+			SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
 					nla_get_in_addr(a), is_mask);
 			break;
 		case OVS_TUNNEL_KEY_ATTR_TOS:
-			SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
+			SW_FLOW_KEY_PUT(match, tun_key.tos,
 					nla_get_u8(a), is_mask);
 			break;
 		case OVS_TUNNEL_KEY_ATTR_TTL:
-			SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
+			SW_FLOW_KEY_PUT(match, tun_key.ttl,
 					nla_get_u8(a), is_mask);
 			ttl = true;
 			break;
@@ -609,7 +617,7 @@
 	}
 
 	if (!is_mask) {
-		if (!match->key->tun_key.ipv4_dst) {
+		if (!match->key->tun_key.u.ipv4.dst) {
 			OVS_NLERR(log, "IPv4 tunnel dst address is zero");
 			return -EINVAL;
 		}
@@ -626,7 +634,7 @@
 static int vxlan_opt_to_nlattr(struct sk_buff *skb,
 			       const void *tun_opts, int swkey_tun_opts_len)
 {
-	const struct ovs_vxlan_opts *opts = tun_opts;
+	const struct vxlan_metadata *opts = tun_opts;
 	struct nlattr *nla;
 
 	nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
@@ -641,24 +649,24 @@
 }
 
 static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
-				const struct ovs_key_ipv4_tunnel *output,
+				const struct ip_tunnel_key *output,
 				const void *tun_opts, int swkey_tun_opts_len)
 {
 	if (output->tun_flags & TUNNEL_KEY &&
 	    nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
 		return -EMSGSIZE;
-	if (output->ipv4_src &&
+	if (output->u.ipv4.src &&
 	    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
-			    output->ipv4_src))
+			    output->u.ipv4.src))
 		return -EMSGSIZE;
-	if (output->ipv4_dst &&
+	if (output->u.ipv4.dst &&
 	    nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
-			    output->ipv4_dst))
+			    output->u.ipv4.dst))
 		return -EMSGSIZE;
-	if (output->ipv4_tos &&
-	    nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
+	if (output->tos &&
+	    nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
 		return -EMSGSIZE;
-	if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
+	if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
 		return -EMSGSIZE;
 	if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
 	    nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
@@ -689,7 +697,7 @@
 }
 
 static int ipv4_tun_to_nlattr(struct sk_buff *skb,
-			      const struct ovs_key_ipv4_tunnel *output,
+			      const struct ip_tunnel_key *output,
 			      const void *tun_opts, int swkey_tun_opts_len)
 {
 	struct nlattr *nla;
@@ -708,16 +716,17 @@
 }
 
 int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
-				  const struct ovs_tunnel_info *egress_tun_info)
+				  const struct ip_tunnel_info *egress_tun_info,
+				  const void *egress_tun_opts)
 {
-	return __ipv4_tun_to_nlattr(skb, &egress_tun_info->tunnel,
-				    egress_tun_info->options,
+	return __ipv4_tun_to_nlattr(skb, &egress_tun_info->key,
+				    egress_tun_opts,
 				    egress_tun_info->options_len);
 }
 
-static int metadata_from_nlattrs(struct sw_flow_match *match,  u64 *attrs,
-				 const struct nlattr **a, bool is_mask,
-				 bool log)
+static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
+				 u64 *attrs, const struct nlattr **a,
+				 bool is_mask, bool log)
 {
 	if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) {
 		u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
@@ -768,16 +777,47 @@
 			return -EINVAL;
 		*attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
 	}
+
+	if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
+	    ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
+		u8 ct_state = nla_get_u8(a[OVS_KEY_ATTR_CT_STATE]);
+
+		SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask);
+		*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
+	}
+	if (*attrs & (1 << OVS_KEY_ATTR_CT_ZONE) &&
+	    ovs_ct_verify(net, OVS_KEY_ATTR_CT_ZONE)) {
+		u16 ct_zone = nla_get_u16(a[OVS_KEY_ATTR_CT_ZONE]);
+
+		SW_FLOW_KEY_PUT(match, ct.zone, ct_zone, is_mask);
+		*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ZONE);
+	}
+	if (*attrs & (1 << OVS_KEY_ATTR_CT_MARK) &&
+	    ovs_ct_verify(net, OVS_KEY_ATTR_CT_MARK)) {
+		u32 mark = nla_get_u32(a[OVS_KEY_ATTR_CT_MARK]);
+
+		SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
+		*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
+	}
+	if (*attrs & (1 << OVS_KEY_ATTR_CT_LABEL) &&
+	    ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABEL)) {
+		const struct ovs_key_ct_label *cl;
+
+		cl = nla_data(a[OVS_KEY_ATTR_CT_LABEL]);
+		SW_FLOW_KEY_MEMCPY(match, ct.label, cl->ct_label,
+				   sizeof(*cl), is_mask);
+		*attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABEL);
+	}
 	return 0;
 }
 
-static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
-				const struct nlattr **a, bool is_mask,
-				bool log)
+static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match,
+				u64 attrs, const struct nlattr **a,
+				bool is_mask, bool log)
 {
 	int err;
 
-	err = metadata_from_nlattrs(match, &attrs, a, is_mask, log);
+	err = metadata_from_nlattrs(net, match, &attrs, a, is_mask, log);
 	if (err)
 		return err;
 
@@ -1029,6 +1069,7 @@
  * mask. In case the 'mask' is NULL, the flow is treated as exact match
  * flow. Otherwise, it is treated as a wildcarded flow, except the mask
  * does not include any don't care bit.
+ * @net: Used to determine per-namespace field support.
  * @match: receives the extracted flow match information.
  * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
  * sequence. The fields should of the packet that triggered the creation
@@ -1039,7 +1080,7 @@
  * probing for feature compatibility this should be passed in as false to
  * suppress unnecessary error logging.
  */
-int ovs_nla_get_match(struct sw_flow_match *match,
+int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
 		      const struct nlattr *nla_key,
 		      const struct nlattr *nla_mask,
 		      bool log)
@@ -1089,7 +1130,7 @@
 		}
 	}
 
-	err = ovs_key_from_nlattrs(match, key_attrs, a, false, log);
+	err = ovs_key_from_nlattrs(net, match, key_attrs, a, false, log);
 	if (err)
 		return err;
 
@@ -1116,7 +1157,7 @@
 			/* The userspace does not send tunnel attributes that
 			 * are 0, but we should not wildcard them nonetheless.
 			 */
-			if (match->key->tun_key.ipv4_dst)
+			if (match->key->tun_key.u.ipv4.dst)
 				SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
 							 0xff, true);
 
@@ -1169,7 +1210,8 @@
 			}
 		}
 
-		err = ovs_key_from_nlattrs(match, mask_attrs, a, true, log);
+		err = ovs_key_from_nlattrs(net, match, mask_attrs, a, true,
+					   log);
 		if (err)
 			goto free_newmask;
 	}
@@ -1250,7 +1292,7 @@
  * extracted from the packet itself.
  */
 
-int ovs_nla_get_flow_metadata(const struct nlattr *attr,
+int ovs_nla_get_flow_metadata(struct net *net, const struct nlattr *attr,
 			      struct sw_flow_key *key,
 			      bool log)
 {
@@ -1266,9 +1308,10 @@
 	memset(&match, 0, sizeof(match));
 	match.key = key;
 
+	memset(&key->ct, 0, sizeof(key->ct));
 	key->phy.in_port = DP_MAX_PORTS;
 
-	return metadata_from_nlattrs(&match, &attrs, a, false, log);
+	return metadata_from_nlattrs(net, &match, &attrs, a, false, log);
 }
 
 static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
@@ -1287,7 +1330,7 @@
 	if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
 		goto nla_put_failure;
 
-	if ((swkey->tun_key.ipv4_dst || is_mask)) {
+	if ((swkey->tun_key.u.ipv4.dst || is_mask)) {
 		const void *opts = NULL;
 
 		if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
@@ -1314,6 +1357,9 @@
 	if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
 		goto nla_put_failure;
 
+	if (ovs_ct_put_key(output, skb))
+		goto nla_put_failure;
+
 	nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
 	if (!nla)
 		goto nla_put_failure;
@@ -1548,11 +1594,51 @@
 	return sfa;
 }
 
-/* Schedules 'sf_acts' to be freed after the next RCU grace period.
- * The caller must hold rcu_read_lock for this to be sensible. */
+static void ovs_nla_free_set_action(const struct nlattr *a)
+{
+	const struct nlattr *ovs_key = nla_data(a);
+	struct ovs_tunnel_info *ovs_tun;
+
+	switch (nla_type(ovs_key)) {
+	case OVS_KEY_ATTR_TUNNEL_INFO:
+		ovs_tun = nla_data(ovs_key);
+		dst_release((struct dst_entry *)ovs_tun->tun_dst);
+		break;
+	}
+}
+
 void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
 {
-	kfree_rcu(sf_acts, rcu);
+	const struct nlattr *a;
+	int rem;
+
+	if (!sf_acts)
+		return;
+
+	nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
+		switch (nla_type(a)) {
+		case OVS_ACTION_ATTR_SET:
+			ovs_nla_free_set_action(a);
+			break;
+		case OVS_ACTION_ATTR_CT:
+			ovs_ct_free_action(a);
+			break;
+		}
+	}
+
+	kfree(sf_acts);
+}
+
+static void __ovs_nla_free_flow_actions(struct rcu_head *head)
+{
+	ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu));
+}
+
+/* Schedules 'sf_acts' to be freed after the next RCU grace period.
+ * The caller must hold rcu_read_lock for this to be sensible. */
+void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *sf_acts)
+{
+	call_rcu(&sf_acts->rcu, __ovs_nla_free_flow_actions);
 }
 
 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
@@ -1582,6 +1668,7 @@
 
 	memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
 	acts->actions_len = (*sfa)->actions_len;
+	acts->orig_len = (*sfa)->orig_len;
 	kfree(*sfa);
 	*sfa = acts;
 
@@ -1609,8 +1696,8 @@
 	return a;
 }
 
-static int add_action(struct sw_flow_actions **sfa, int attrtype,
-		      void *data, int len, bool log)
+int ovs_nla_add_action(struct sw_flow_actions **sfa, int attrtype, void *data,
+		       int len, bool log)
 {
 	struct nlattr *a;
 
@@ -1625,7 +1712,7 @@
 	int used = (*sfa)->actions_len;
 	int err;
 
-	err = add_action(sfa, attrtype, NULL, 0, log);
+	err = ovs_nla_add_action(sfa, attrtype, NULL, 0, log);
 	if (err)
 		return err;
 
@@ -1641,12 +1728,12 @@
 	a->nla_len = sfa->actions_len - st_offset;
 }
 
-static int __ovs_nla_copy_actions(const struct nlattr *attr,
+static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 				  const struct sw_flow_key *key,
 				  int depth, struct sw_flow_actions **sfa,
 				  __be16 eth_type, __be16 vlan_tci, bool log);
 
-static int validate_and_copy_sample(const struct nlattr *attr,
+static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
 				    const struct sw_flow_key *key, int depth,
 				    struct sw_flow_actions **sfa,
 				    __be16 eth_type, __be16 vlan_tci, bool log)
@@ -1678,15 +1765,15 @@
 	start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log);
 	if (start < 0)
 		return start;
-	err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
-			 nla_data(probability), sizeof(u32), log);
+	err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
+				 nla_data(probability), sizeof(u32), log);
 	if (err)
 		return err;
 	st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS, log);
 	if (st_acts < 0)
 		return st_acts;
 
-	err = __ovs_nla_copy_actions(actions, key, depth + 1, sfa,
+	err = __ovs_nla_copy_actions(net, actions, key, depth + 1, sfa,
 				     eth_type, vlan_tci, log);
 	if (err)
 		return err;
@@ -1746,7 +1833,9 @@
 {
 	struct sw_flow_match match;
 	struct sw_flow_key key;
-	struct ovs_tunnel_info *tun_info;
+	struct metadata_dst *tun_dst;
+	struct ip_tunnel_info *tun_info;
+	struct ovs_tunnel_info *ovs_tun;
 	struct nlattr *a;
 	int err = 0, start, opts_type;
 
@@ -1771,27 +1860,31 @@
 	if (start < 0)
 		return start;
 
+	tun_dst = metadata_dst_alloc(key.tun_opts_len, GFP_KERNEL);
+	if (!tun_dst)
+		return -ENOMEM;
+
 	a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
-			 sizeof(*tun_info) + key.tun_opts_len, log);
-	if (IS_ERR(a))
+			 sizeof(*ovs_tun), log);
+	if (IS_ERR(a)) {
+		dst_release((struct dst_entry *)tun_dst);
 		return PTR_ERR(a);
-
-	tun_info = nla_data(a);
-	tun_info->tunnel = key.tun_key;
-	tun_info->options_len = key.tun_opts_len;
-
-	if (tun_info->options_len) {
-		/* We need to store the options in the action itself since
-		 * everything else will go away after flow setup. We can append
-		 * it to tun_info and then point there.
-		 */
-		memcpy((tun_info + 1),
-		       TUN_METADATA_OPTS(&key, key.tun_opts_len), key.tun_opts_len);
-		tun_info->options = (tun_info + 1);
-	} else {
-		tun_info->options = NULL;
 	}
 
+	ovs_tun = nla_data(a);
+	ovs_tun->tun_dst = tun_dst;
+
+	tun_info = &tun_dst->u.tun_info;
+	tun_info->mode = IP_TUNNEL_INFO_TX;
+	tun_info->key = key.tun_key;
+
+	/* We need to store the options in the action itself since
+	 * everything else will go away after flow setup. We can append
+	 * it to tun_info and then point there.
+	 */
+	ip_tunnel_info_opts_set(tun_info,
+				TUN_METADATA_OPTS(&key, key.tun_opts_len),
+				key.tun_opts_len);
 	add_nested_action_end(*sfa, start);
 
 	return err;
@@ -1843,6 +1936,8 @@
 
 	case OVS_KEY_ATTR_PRIORITY:
 	case OVS_KEY_ATTR_SKB_MARK:
+	case OVS_KEY_ATTR_CT_MARK:
+	case OVS_KEY_ATTR_CT_LABEL:
 	case OVS_KEY_ATTR_ETHERNET:
 		break;
 
@@ -2008,7 +2103,7 @@
 	return 0;
 }
 
-static int __ovs_nla_copy_actions(const struct nlattr *attr,
+static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 				  const struct sw_flow_key *key,
 				  int depth, struct sw_flow_actions **sfa,
 				  __be16 eth_type, __be16 vlan_tci, bool log)
@@ -2032,7 +2127,8 @@
 			[OVS_ACTION_ATTR_SET] = (u32)-1,
 			[OVS_ACTION_ATTR_SET_MASKED] = (u32)-1,
 			[OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
-			[OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash)
+			[OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash),
+			[OVS_ACTION_ATTR_CT] = (u32)-1,
 		};
 		const struct ovs_action_push_vlan *vlan;
 		int type = nla_type(a);
@@ -2139,13 +2235,20 @@
 			break;
 
 		case OVS_ACTION_ATTR_SAMPLE:
-			err = validate_and_copy_sample(a, key, depth, sfa,
+			err = validate_and_copy_sample(net, a, key, depth, sfa,
 						       eth_type, vlan_tci, log);
 			if (err)
 				return err;
 			skip_copy = true;
 			break;
 
+		case OVS_ACTION_ATTR_CT:
+			err = ovs_ct_copy_action(net, a, key, sfa, log);
+			if (err)
+				return err;
+			skip_copy = true;
+			break;
+
 		default:
 			OVS_NLERR(log, "Unknown Action type %d", type);
 			return -EINVAL;
@@ -2164,7 +2267,7 @@
 }
 
 /* 'key' must be the masked key. */
-int ovs_nla_copy_actions(const struct nlattr *attr,
+int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 			 const struct sw_flow_key *key,
 			 struct sw_flow_actions **sfa, bool log)
 {
@@ -2174,10 +2277,11 @@
 	if (IS_ERR(*sfa))
 		return PTR_ERR(*sfa);
 
-	err = __ovs_nla_copy_actions(attr, key, 0, sfa, key->eth.type,
+	(*sfa)->orig_len = nla_len(attr);
+	err = __ovs_nla_copy_actions(net, attr, key, 0, sfa, key->eth.type,
 				     key->eth.tci, log);
 	if (err)
-		kfree(*sfa);
+		ovs_nla_free_flow_actions(*sfa);
 
 	return err;
 }
@@ -2227,15 +2331,16 @@
 
 	switch (key_type) {
 	case OVS_KEY_ATTR_TUNNEL_INFO: {
-		struct ovs_tunnel_info *tun_info = nla_data(ovs_key);
+		struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key);
+		struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info;
 
 		start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
 		if (!start)
 			return -EMSGSIZE;
 
-		err = ipv4_tun_to_nlattr(skb, &tun_info->tunnel,
+		err = ipv4_tun_to_nlattr(skb, &tun_info->key,
 					 tun_info->options_len ?
-						tun_info->options : NULL,
+					     ip_tunnel_info_opts(tun_info) : NULL,
 					 tun_info->options_len);
 		if (err)
 			return err;
@@ -2298,6 +2403,13 @@
 			if (err)
 				return err;
 			break;
+
+		case OVS_ACTION_ATTR_CT:
+			err = ovs_ct_action_to_attr(nla_data(a), skb);
+			if (err)
+				return err;
+			break;
+
 		default:
 			if (nla_put(skb, type, nla_len(a), nla_data(a)))
 				return -EMSGSIZE;
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 5c3d75b..6ca3f0b 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -45,29 +45,34 @@
 
 int ovs_nla_put_key(const struct sw_flow_key *, const struct sw_flow_key *,
 		    int attr, bool is_mask, struct sk_buff *);
-int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *,
-			      bool log);
+int ovs_nla_get_flow_metadata(struct net *, const struct nlattr *,
+			      struct sw_flow_key *, bool log);
 
 int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb);
 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb);
 int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
 
-int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key,
-		      const struct nlattr *mask, bool log);
+int ovs_nla_get_match(struct net *, struct sw_flow_match *,
+		      const struct nlattr *key, const struct nlattr *mask,
+		      bool log);
 int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
-				  const struct ovs_tunnel_info *);
+				  const struct ip_tunnel_info *,
+				  const void *egress_tun_opts);
 
 bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
 			   const struct sw_flow_key *key, bool log);
 u32 ovs_nla_get_ufid_flags(const struct nlattr *attr);
 
-int ovs_nla_copy_actions(const struct nlattr *attr,
+int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
 			 const struct sw_flow_key *key,
 			 struct sw_flow_actions **sfa, bool log);
+int ovs_nla_add_action(struct sw_flow_actions **sfa, int attrtype,
+		       void *data, int len, bool log);
 int ovs_nla_put_actions(const struct nlattr *attr,
 			int len, struct sk_buff *skb);
 
 void ovs_nla_free_flow_actions(struct sw_flow_actions *);
+void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *);
 
 #endif /* flow_netlink.h */
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 6552394..d22d8e9 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -18,6 +18,7 @@
 
 #include "flow.h"
 #include "datapath.h"
+#include "flow_netlink.h"
 #include <linux/uaccess.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -143,7 +144,8 @@
 
 	if (ovs_identifier_is_key(&flow->id))
 		kfree(flow->id.unmasked_key);
-	kfree((struct sw_flow_actions __force *)flow->sf_acts);
+	if (flow->sf_acts)
+		ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
 	for_each_node(node)
 		if (flow->stats[node])
 			kmem_cache_free(flow_stats_cache,
@@ -424,7 +426,7 @@
 
 static int flow_key_start(const struct sw_flow_key *key)
 {
-	if (key->tun_key.ipv4_dst)
+	if (key->tun_key.u.ipv4.dst)
 		return 0;
 	else
 		return rounddown(offsetof(struct sw_flow_key, phy),
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 208c576..2735e9c 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -26,96 +26,42 @@
 
 #include "datapath.h"
 #include "vport.h"
+#include "vport-netdev.h"
 
 static struct vport_ops ovs_geneve_vport_ops;
-
 /**
  * struct geneve_port - Keeps track of open UDP ports
- * @gs: The socket created for this port number.
- * @name: vport name.
+ * @dst_port: destination port.
  */
 struct geneve_port {
-	struct geneve_sock *gs;
-	char name[IFNAMSIZ];
+	u16 port_no;
 };
 
-static LIST_HEAD(geneve_ports);
-
 static inline struct geneve_port *geneve_vport(const struct vport *vport)
 {
 	return vport_priv(vport);
 }
 
-/* Convert 64 bit tunnel ID to 24 bit VNI. */
-static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
-{
-#ifdef __BIG_ENDIAN
-	vni[0] = (__force __u8)(tun_id >> 16);
-	vni[1] = (__force __u8)(tun_id >> 8);
-	vni[2] = (__force __u8)tun_id;
-#else
-	vni[0] = (__force __u8)((__force u64)tun_id >> 40);
-	vni[1] = (__force __u8)((__force u64)tun_id >> 48);
-	vni[2] = (__force __u8)((__force u64)tun_id >> 56);
-#endif
-}
-
-/* Convert 24 bit VNI to 64 bit tunnel ID. */
-static __be64 vni_to_tunnel_id(const __u8 *vni)
-{
-#ifdef __BIG_ENDIAN
-	return (vni[0] << 16) | (vni[1] << 8) | vni[2];
-#else
-	return (__force __be64)(((__force u64)vni[0] << 40) |
-				((__force u64)vni[1] << 48) |
-				((__force u64)vni[2] << 56));
-#endif
-}
-
-static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
-{
-	struct vport *vport = gs->rcv_data;
-	struct genevehdr *geneveh = geneve_hdr(skb);
-	int opts_len;
-	struct ovs_tunnel_info tun_info;
-	__be64 key;
-	__be16 flags;
-
-	opts_len = geneveh->opt_len * 4;
-
-	flags = TUNNEL_KEY | TUNNEL_GENEVE_OPT |
-		(udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0) |
-		(geneveh->oam ? TUNNEL_OAM : 0) |
-		(geneveh->critical ? TUNNEL_CRIT_OPT : 0);
-
-	key = vni_to_tunnel_id(geneveh->vni);
-
-	ovs_flow_tun_info_init(&tun_info, ip_hdr(skb),
-			       udp_hdr(skb)->source, udp_hdr(skb)->dest,
-			       key, flags,
-			       geneveh->options, opts_len);
-
-	ovs_vport_receive(vport, skb, &tun_info);
-}
-
 static int geneve_get_options(const struct vport *vport,
 			      struct sk_buff *skb)
 {
 	struct geneve_port *geneve_port = geneve_vport(vport);
-	struct inet_sock *sk = inet_sk(geneve_port->gs->sock->sk);
 
-	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(sk->inet_sport)))
+	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, geneve_port->port_no))
 		return -EMSGSIZE;
 	return 0;
 }
 
-static void geneve_tnl_destroy(struct vport *vport)
+static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
+				      struct dp_upcall_info *upcall)
 {
 	struct geneve_port *geneve_port = geneve_vport(vport);
+	struct net *net = ovs_dp_get_net(vport->dp);
+	__be16 dport = htons(geneve_port->port_no);
+	__be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
 
-	geneve_sock_release(geneve_port->gs);
-
-	ovs_vport_deferred_free(vport);
+	return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
+					  skb, IPPROTO_UDP, sport, dport);
 }
 
 static struct vport *geneve_tnl_create(const struct vport_parms *parms)
@@ -123,11 +69,11 @@
 	struct net *net = ovs_dp_get_net(parms->dp);
 	struct nlattr *options = parms->options;
 	struct geneve_port *geneve_port;
-	struct geneve_sock *gs;
+	struct net_device *dev;
 	struct vport *vport;
 	struct nlattr *a;
-	int err;
 	u16 dst_port;
+	int err;
 
 	if (!options) {
 		err = -EINVAL;
@@ -149,104 +95,40 @@
 		return vport;
 
 	geneve_port = geneve_vport(vport);
-	strncpy(geneve_port->name, parms->name, IFNAMSIZ);
+	geneve_port->port_no = dst_port;
 
-	gs = geneve_sock_add(net, htons(dst_port), geneve_rcv, vport, true, 0);
-	if (IS_ERR(gs)) {
+	rtnl_lock();
+	dev = geneve_dev_create_fb(net, parms->name, NET_NAME_USER, dst_port);
+	if (IS_ERR(dev)) {
+		rtnl_unlock();
 		ovs_vport_free(vport);
-		return (void *)gs;
+		return ERR_CAST(dev);
 	}
-	geneve_port->gs = gs;
 
+	dev_change_flags(dev, dev->flags | IFF_UP);
+	rtnl_unlock();
 	return vport;
 error:
 	return ERR_PTR(err);
 }
 
-static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
+static struct vport *geneve_create(const struct vport_parms *parms)
 {
-	const struct ovs_key_ipv4_tunnel *tun_key;
-	struct ovs_tunnel_info *tun_info;
-	struct net *net = ovs_dp_get_net(vport->dp);
-	struct geneve_port *geneve_port = geneve_vport(vport);
-	__be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport;
-	__be16 sport;
-	struct rtable *rt;
-	struct flowi4 fl;
-	u8 vni[3], opts_len, *opts;
-	__be16 df;
-	int err;
+	struct vport *vport;
 
-	tun_info = OVS_CB(skb)->egress_tun_info;
-	if (unlikely(!tun_info)) {
-		err = -EINVAL;
-		goto error;
-	}
+	vport = geneve_tnl_create(parms);
+	if (IS_ERR(vport))
+		return vport;
 
-	tun_key = &tun_info->tunnel;
-	rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
-	if (IS_ERR(rt)) {
-		err = PTR_ERR(rt);
-		goto error;
-	}
-
-	df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
-	sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
-	tunnel_id_to_vni(tun_key->tun_id, vni);
-	skb->ignore_df = 1;
-
-	if (tun_key->tun_flags & TUNNEL_GENEVE_OPT) {
-		opts = (u8 *)tun_info->options;
-		opts_len = tun_info->options_len;
-	} else {
-		opts = NULL;
-		opts_len = 0;
-	}
-
-	err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr,
-			      tun_key->ipv4_dst, tun_key->ipv4_tos,
-			      tun_key->ipv4_ttl, df, sport, dport,
-			      tun_key->tun_flags, vni, opts_len, opts,
-			      !!(tun_key->tun_flags & TUNNEL_CSUM), false);
-	if (err < 0)
-		ip_rt_put(rt);
-	return err;
-
-error:
-	kfree_skb(skb);
-	return err;
-}
-
-static const char *geneve_get_name(const struct vport *vport)
-{
-	struct geneve_port *geneve_port = geneve_vport(vport);
-
-	return geneve_port->name;
-}
-
-static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				      struct ovs_tunnel_info *egress_tun_info)
-{
-	struct geneve_port *geneve_port = geneve_vport(vport);
-	struct net *net = ovs_dp_get_net(vport->dp);
-	__be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport;
-	__be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
-
-	/* Get tp_src and tp_dst, refert to geneve_build_header().
-	 */
-	return ovs_tunnel_get_egress_info(egress_tun_info,
-					  ovs_dp_get_net(vport->dp),
-					  OVS_CB(skb)->egress_tun_info,
-					  IPPROTO_UDP, skb->mark, sport, dport);
+	return ovs_netdev_link(vport, parms->name);
 }
 
 static struct vport_ops ovs_geneve_vport_ops = {
 	.type		= OVS_VPORT_TYPE_GENEVE,
-	.create		= geneve_tnl_create,
-	.destroy	= geneve_tnl_destroy,
-	.get_name	= geneve_get_name,
+	.create		= geneve_create,
+	.destroy	= ovs_netdev_tunnel_destroy,
 	.get_options	= geneve_get_options,
-	.send		= geneve_tnl_send,
+	.send		= ovs_netdev_send,
 	.owner          = THIS_MODULE,
 	.get_egress_tun_info	= geneve_get_egress_tun_info,
 };
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index f17ac96..4d24481 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -45,253 +45,58 @@
 
 #include "datapath.h"
 #include "vport.h"
+#include "vport-netdev.h"
 
 static struct vport_ops ovs_gre_vport_ops;
 
-/* Returns the least-significant 32 bits of a __be64. */
-static __be32 be64_get_low32(__be64 x)
+static struct vport *gre_tnl_create(const struct vport_parms *parms)
 {
-#ifdef __BIG_ENDIAN
-	return (__force __be32)x;
-#else
-	return (__force __be32)((__force u64)x >> 32);
-#endif
-}
-
-static __be16 filter_tnl_flags(__be16 flags)
-{
-	return flags & (TUNNEL_CSUM | TUNNEL_KEY);
-}
-
-static struct sk_buff *__build_header(struct sk_buff *skb,
-				      int tunnel_hlen)
-{
-	struct tnl_ptk_info tpi;
-	const struct ovs_key_ipv4_tunnel *tun_key;
-
-	tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-
-	skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
-	if (IS_ERR(skb))
-		return skb;
-
-	tpi.flags = filter_tnl_flags(tun_key->tun_flags);
-	tpi.proto = htons(ETH_P_TEB);
-	tpi.key = be64_get_low32(tun_key->tun_id);
-	tpi.seq = 0;
-	gre_build_header(skb, &tpi, tunnel_hlen);
-
-	return skb;
-}
-
-static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
-{
-#ifdef __BIG_ENDIAN
-	return (__force __be64)((__force u64)seq << 32 | (__force u32)key);
-#else
-	return (__force __be64)((__force u64)key << 32 | (__force u32)seq);
-#endif
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static int gre_rcv(struct sk_buff *skb,
-		   const struct tnl_ptk_info *tpi)
-{
-	struct ovs_tunnel_info tun_info;
-	struct ovs_net *ovs_net;
-	struct vport *vport;
-	__be64 key;
-
-	ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
-	vport = rcu_dereference(ovs_net->vport_net.gre_vport);
-	if (unlikely(!vport))
-		return PACKET_REJECT;
-
-	key = key_to_tunnel_id(tpi->key, tpi->seq);
-	ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), 0, 0, key,
-			       filter_tnl_flags(tpi->flags), NULL, 0);
-
-	ovs_vport_receive(vport, skb, &tun_info);
-	return PACKET_RCVD;
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static int gre_err(struct sk_buff *skb, u32 info,
-		   const struct tnl_ptk_info *tpi)
-{
-	struct ovs_net *ovs_net;
+	struct net *net = ovs_dp_get_net(parms->dp);
+	struct net_device *dev;
 	struct vport *vport;
 
-	ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
-	vport = rcu_dereference(ovs_net->vport_net.gre_vport);
+	vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
+	if (IS_ERR(vport))
+		return vport;
 
-	if (unlikely(!vport))
-		return PACKET_REJECT;
-	else
-		return PACKET_RCVD;
-}
-
-static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
-{
-	struct net *net = ovs_dp_get_net(vport->dp);
-	const struct ovs_key_ipv4_tunnel *tun_key;
-	struct flowi4 fl;
-	struct rtable *rt;
-	int min_headroom;
-	int tunnel_hlen;
-	__be16 df;
-	int err;
-
-	if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
-		err = -EINVAL;
-		goto err_free_skb;
+	rtnl_lock();
+	dev = gretap_fb_dev_create(net, parms->name, NET_NAME_USER);
+	if (IS_ERR(dev)) {
+		rtnl_unlock();
+		ovs_vport_free(vport);
+		return ERR_CAST(dev);
 	}
 
-	tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-	rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE);
-	if (IS_ERR(rt)) {
-		err = PTR_ERR(rt);
-		goto err_free_skb;
-	}
+	dev_change_flags(dev, dev->flags | IFF_UP);
+	rtnl_unlock();
 
-	tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
-
-	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
-			+ tunnel_hlen + sizeof(struct iphdr)
-			+ (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-	if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
-		int head_delta = SKB_DATA_ALIGN(min_headroom -
-						skb_headroom(skb) +
-						16);
-		err = pskb_expand_head(skb, max_t(int, head_delta, 0),
-					0, GFP_ATOMIC);
-		if (unlikely(err))
-			goto err_free_rt;
-	}
-
-	skb = vlan_hwaccel_push_inside(skb);
-	if (unlikely(!skb)) {
-		err = -ENOMEM;
-		goto err_free_rt;
-	}
-
-	/* Push Tunnel header. */
-	skb = __build_header(skb, tunnel_hlen);
-	if (IS_ERR(skb)) {
-		err = PTR_ERR(skb);
-		skb = NULL;
-		goto err_free_rt;
-	}
-
-	df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
-		htons(IP_DF) : 0;
-
-	skb->ignore_df = 1;
-
-	return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
-			     tun_key->ipv4_dst, IPPROTO_GRE,
-			     tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
-err_free_rt:
-	ip_rt_put(rt);
-err_free_skb:
-	kfree_skb(skb);
-	return err;
-}
-
-static struct gre_cisco_protocol gre_protocol = {
-	.handler        = gre_rcv,
-	.err_handler    = gre_err,
-	.priority       = 1,
-};
-
-static int gre_ports;
-static int gre_init(void)
-{
-	int err;
-
-	gre_ports++;
-	if (gre_ports > 1)
-		return 0;
-
-	err = gre_cisco_register(&gre_protocol);
-	if (err)
-		pr_warn("cannot register gre protocol handler\n");
-
-	return err;
-}
-
-static void gre_exit(void)
-{
-	gre_ports--;
-	if (gre_ports > 0)
-		return;
-
-	gre_cisco_unregister(&gre_protocol);
-}
-
-static const char *gre_get_name(const struct vport *vport)
-{
-	return vport_priv(vport);
+	return vport;
 }
 
 static struct vport *gre_create(const struct vport_parms *parms)
 {
-	struct net *net = ovs_dp_get_net(parms->dp);
-	struct ovs_net *ovs_net;
 	struct vport *vport;
-	int err;
 
-	err = gre_init();
-	if (err)
-		return ERR_PTR(err);
-
-	ovs_net = net_generic(net, ovs_net_id);
-	if (ovsl_dereference(ovs_net->vport_net.gre_vport)) {
-		vport = ERR_PTR(-EEXIST);
-		goto error;
-	}
-
-	vport = ovs_vport_alloc(IFNAMSIZ, &ovs_gre_vport_ops, parms);
+	vport = gre_tnl_create(parms);
 	if (IS_ERR(vport))
-		goto error;
+		return vport;
 
-	strncpy(vport_priv(vport), parms->name, IFNAMSIZ);
-	rcu_assign_pointer(ovs_net->vport_net.gre_vport, vport);
-	return vport;
-
-error:
-	gre_exit();
-	return vport;
-}
-
-static void gre_tnl_destroy(struct vport *vport)
-{
-	struct net *net = ovs_dp_get_net(vport->dp);
-	struct ovs_net *ovs_net;
-
-	ovs_net = net_generic(net, ovs_net_id);
-
-	RCU_INIT_POINTER(ovs_net->vport_net.gre_vport, NULL);
-	ovs_vport_deferred_free(vport);
-	gre_exit();
+	return ovs_netdev_link(vport, parms->name);
 }
 
 static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				   struct ovs_tunnel_info *egress_tun_info)
+				   struct dp_upcall_info *upcall)
 {
-	return ovs_tunnel_get_egress_info(egress_tun_info,
-					  ovs_dp_get_net(vport->dp),
-					  OVS_CB(skb)->egress_tun_info,
-					  IPPROTO_GRE, skb->mark, 0, 0);
+	return ovs_tunnel_get_egress_info(upcall, ovs_dp_get_net(vport->dp),
+					  skb, IPPROTO_GRE, 0, 0);
 }
 
 static struct vport_ops ovs_gre_vport_ops = {
 	.type		= OVS_VPORT_TYPE_GRE,
 	.create		= gre_create,
-	.destroy	= gre_tnl_destroy,
-	.get_name	= gre_get_name,
-	.send		= gre_tnl_send,
+	.send		= ovs_netdev_send,
 	.get_egress_tun_info	= gre_get_egress_tun_info,
+	.destroy	= ovs_netdev_tunnel_destroy,
 	.owner		= THIS_MODULE,
 };
 
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 6a55f71..388b8a6 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -43,35 +43,26 @@
 	return netdev_priv(netdev);
 }
 
-/* This function is only called by the kernel network layer.*/
-static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netdev,
-							struct rtnl_link_stats64 *stats)
-{
-	struct vport *vport = ovs_internal_dev_get_vport(netdev);
-	struct ovs_vport_stats vport_stats;
-
-	ovs_vport_get_stats(vport, &vport_stats);
-
-	/* The tx and rx stats need to be swapped because the
-	 * switch and host OS have opposite perspectives. */
-	stats->rx_packets	= vport_stats.tx_packets;
-	stats->tx_packets	= vport_stats.rx_packets;
-	stats->rx_bytes		= vport_stats.tx_bytes;
-	stats->tx_bytes		= vport_stats.rx_bytes;
-	stats->rx_errors	= vport_stats.tx_errors;
-	stats->tx_errors	= vport_stats.rx_errors;
-	stats->rx_dropped	= vport_stats.tx_dropped;
-	stats->tx_dropped	= vport_stats.rx_dropped;
-
-	return stats;
-}
-
 /* Called with rcu_read_lock_bh. */
 static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
+	int len, err;
+
+	len = skb->len;
 	rcu_read_lock();
-	ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
+	err = ovs_vport_receive(internal_dev_priv(netdev)->vport, skb, NULL);
 	rcu_read_unlock();
+
+	if (likely(!err)) {
+		struct pcpu_sw_netstats *tstats = this_cpu_ptr(netdev->tstats);
+
+		u64_stats_update_begin(&tstats->syncp);
+		tstats->tx_bytes += len;
+		tstats->tx_packets++;
+		u64_stats_update_end(&tstats->syncp);
+	} else {
+		netdev->stats.tx_errors++;
+	}
 	return 0;
 }
 
@@ -121,7 +112,6 @@
 	.ndo_start_xmit = internal_dev_xmit,
 	.ndo_set_mac_address = eth_mac_addr,
 	.ndo_change_mtu = internal_dev_change_mtu,
-	.ndo_get_stats64 = internal_dev_get_stats,
 };
 
 static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
@@ -135,7 +125,7 @@
 	netdev->netdev_ops = &internal_dev_netdev_ops;
 
 	netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
-	netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+	netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH;
 	netdev->destructor = internal_dev_destructor;
 	netdev->ethtool_ops = &internal_dev_ethtool_ops;
 	netdev->rtnl_link_ops = &internal_dev_link_ops;
@@ -156,49 +146,44 @@
 static struct vport *internal_dev_create(const struct vport_parms *parms)
 {
 	struct vport *vport;
-	struct netdev_vport *netdev_vport;
 	struct internal_dev *internal_dev;
 	int err;
 
-	vport = ovs_vport_alloc(sizeof(struct netdev_vport),
-				&ovs_internal_vport_ops, parms);
+	vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
 	if (IS_ERR(vport)) {
 		err = PTR_ERR(vport);
 		goto error;
 	}
 
-	netdev_vport = netdev_vport_priv(vport);
-
-	netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
-					 parms->name, NET_NAME_UNKNOWN,
-					 do_setup);
-	if (!netdev_vport->dev) {
+	vport->dev = alloc_netdev(sizeof(struct internal_dev),
+				  parms->name, NET_NAME_UNKNOWN, do_setup);
+	if (!vport->dev) {
 		err = -ENOMEM;
 		goto error_free_vport;
 	}
 
-	dev_net_set(netdev_vport->dev, ovs_dp_get_net(vport->dp));
-	internal_dev = internal_dev_priv(netdev_vport->dev);
+	dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
+	internal_dev = internal_dev_priv(vport->dev);
 	internal_dev->vport = vport;
 
 	/* Restrict bridge port to current netns. */
 	if (vport->port_no == OVSP_LOCAL)
-		netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL;
+		vport->dev->features |= NETIF_F_NETNS_LOCAL;
 
 	rtnl_lock();
-	err = register_netdevice(netdev_vport->dev);
+	err = register_netdevice(vport->dev);
 	if (err)
 		goto error_free_netdev;
 
-	dev_set_promiscuity(netdev_vport->dev, 1);
+	dev_set_promiscuity(vport->dev, 1);
 	rtnl_unlock();
-	netif_start_queue(netdev_vport->dev);
+	netif_start_queue(vport->dev);
 
 	return vport;
 
 error_free_netdev:
 	rtnl_unlock();
-	free_netdev(netdev_vport->dev);
+	free_netdev(vport->dev);
 error_free_vport:
 	ovs_vport_free(vport);
 error:
@@ -207,30 +192,27 @@
 
 static void internal_dev_destroy(struct vport *vport)
 {
-	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
-	netif_stop_queue(netdev_vport->dev);
+	netif_stop_queue(vport->dev);
 	rtnl_lock();
-	dev_set_promiscuity(netdev_vport->dev, -1);
+	dev_set_promiscuity(vport->dev, -1);
 
 	/* unregister_netdevice() waits for an RCU grace period. */
-	unregister_netdevice(netdev_vport->dev);
+	unregister_netdevice(vport->dev);
 
 	rtnl_unlock();
 }
 
-static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
+static void internal_dev_recv(struct vport *vport, struct sk_buff *skb)
 {
-	struct net_device *netdev = netdev_vport_priv(vport)->dev;
-	int len;
+	struct net_device *netdev = vport->dev;
+	struct pcpu_sw_netstats *stats;
 
 	if (unlikely(!(netdev->flags & IFF_UP))) {
 		kfree_skb(skb);
-		return 0;
+		netdev->stats.rx_dropped++;
+		return;
 	}
 
-	len = skb->len;
-
 	skb_dst_drop(skb);
 	nf_reset(skb);
 	secpath_reset(skb);
@@ -240,16 +222,19 @@
 	skb->protocol = eth_type_trans(skb, netdev);
 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
 
-	netif_rx(skb);
+	stats = this_cpu_ptr(netdev->tstats);
+	u64_stats_update_begin(&stats->syncp);
+	stats->rx_packets++;
+	stats->rx_bytes += skb->len;
+	u64_stats_update_end(&stats->syncp);
 
-	return len;
+	netif_rx(skb);
 }
 
 static struct vport_ops ovs_internal_vport_ops = {
 	.type		= OVS_VPORT_TYPE_INTERNAL,
 	.create		= internal_dev_create,
 	.destroy	= internal_dev_destroy,
-	.get_name	= ovs_netdev_get_name,
 	.send		= internal_dev_recv,
 };
 
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 33e6d6e..f7e8dcc 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -26,18 +26,24 @@
 #include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
 #include <linux/openvswitch.h>
+#include <linux/export.h>
 
-#include <net/llc.h>
+#include <net/ip_tunnels.h>
+#include <net/rtnetlink.h>
 
 #include "datapath.h"
+#include "vport.h"
 #include "vport-internal_dev.h"
 #include "vport-netdev.h"
 
 static struct vport_ops ovs_netdev_vport_ops;
 
 /* Must be called with rcu_read_lock. */
-static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
+static void netdev_port_receive(struct sk_buff *skb)
 {
+	struct vport *vport;
+
+	vport = ovs_netdev_get_vport(skb->dev);
 	if (unlikely(!vport))
 		goto error;
 
@@ -53,10 +59,8 @@
 
 	skb_push(skb, ETH_HLEN);
 	ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
-
-	ovs_vport_receive(vport, skb, NULL);
+	ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
 	return;
-
 error:
 	kfree_skb(skb);
 }
@@ -65,15 +69,11 @@
 static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
 {
 	struct sk_buff *skb = *pskb;
-	struct vport *vport;
 
 	if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
 		return RX_HANDLER_PASS;
 
-	vport = ovs_netdev_get_vport(skb->dev);
-
-	netdev_port_receive(vport, skb);
-
+	netdev_port_receive(skb);
 	return RX_HANDLER_CONSUMED;
 }
 
@@ -83,105 +83,112 @@
 
 	local = ovs_vport_ovsl(dp, OVSP_LOCAL);
 	BUG_ON(!local);
-	return netdev_vport_priv(local)->dev;
+	return local->dev;
 }
 
-static struct vport *netdev_create(const struct vport_parms *parms)
+struct vport *ovs_netdev_link(struct vport *vport, const char *name)
 {
-	struct vport *vport;
-	struct netdev_vport *netdev_vport;
 	int err;
 
-	vport = ovs_vport_alloc(sizeof(struct netdev_vport),
-				&ovs_netdev_vport_ops, parms);
-	if (IS_ERR(vport)) {
-		err = PTR_ERR(vport);
-		goto error;
-	}
-
-	netdev_vport = netdev_vport_priv(vport);
-
-	netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
-	if (!netdev_vport->dev) {
+	vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name);
+	if (!vport->dev) {
 		err = -ENODEV;
 		goto error_free_vport;
 	}
 
-	if (netdev_vport->dev->flags & IFF_LOOPBACK ||
-	    netdev_vport->dev->type != ARPHRD_ETHER ||
-	    ovs_is_internal_dev(netdev_vport->dev)) {
+	if (vport->dev->flags & IFF_LOOPBACK ||
+	    vport->dev->type != ARPHRD_ETHER ||
+	    ovs_is_internal_dev(vport->dev)) {
 		err = -EINVAL;
 		goto error_put;
 	}
 
 	rtnl_lock();
-	err = netdev_master_upper_dev_link(netdev_vport->dev,
+	err = netdev_master_upper_dev_link(vport->dev,
 					   get_dpdev(vport->dp));
 	if (err)
 		goto error_unlock;
 
-	err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
+	err = netdev_rx_handler_register(vport->dev, netdev_frame_hook,
 					 vport);
 	if (err)
 		goto error_master_upper_dev_unlink;
 
-	dev_disable_lro(netdev_vport->dev);
-	dev_set_promiscuity(netdev_vport->dev, 1);
-	netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
+	dev_disable_lro(vport->dev);
+	dev_set_promiscuity(vport->dev, 1);
+	vport->dev->priv_flags |= IFF_OVS_DATAPATH;
 	rtnl_unlock();
 
 	return vport;
 
 error_master_upper_dev_unlink:
-	netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
+	netdev_upper_dev_unlink(vport->dev, get_dpdev(vport->dp));
 error_unlock:
 	rtnl_unlock();
 error_put:
-	dev_put(netdev_vport->dev);
+	dev_put(vport->dev);
 error_free_vport:
 	ovs_vport_free(vport);
-error:
 	return ERR_PTR(err);
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_link);
 
-static void free_port_rcu(struct rcu_head *rcu)
+static struct vport *netdev_create(const struct vport_parms *parms)
 {
-	struct netdev_vport *netdev_vport = container_of(rcu,
-					struct netdev_vport, rcu);
+	struct vport *vport;
 
-	dev_put(netdev_vport->dev);
-	ovs_vport_free(vport_from_priv(netdev_vport));
+	vport = ovs_vport_alloc(0, &ovs_netdev_vport_ops, parms);
+	if (IS_ERR(vport))
+		return vport;
+
+	return ovs_netdev_link(vport, parms->name);
+}
+
+static void vport_netdev_free(struct rcu_head *rcu)
+{
+	struct vport *vport = container_of(rcu, struct vport, rcu);
+
+	if (vport->dev)
+		dev_put(vport->dev);
+	ovs_vport_free(vport);
 }
 
 void ovs_netdev_detach_dev(struct vport *vport)
 {
-	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
 	ASSERT_RTNL();
-	netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
-	netdev_rx_handler_unregister(netdev_vport->dev);
-	netdev_upper_dev_unlink(netdev_vport->dev,
-				netdev_master_upper_dev_get(netdev_vport->dev));
-	dev_set_promiscuity(netdev_vport->dev, -1);
+	vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
+	netdev_rx_handler_unregister(vport->dev);
+	netdev_upper_dev_unlink(vport->dev,
+				netdev_master_upper_dev_get(vport->dev));
+	dev_set_promiscuity(vport->dev, -1);
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_detach_dev);
 
 static void netdev_destroy(struct vport *vport)
 {
-	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
 	rtnl_lock();
-	if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)
+	if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
 		ovs_netdev_detach_dev(vport);
 	rtnl_unlock();
 
-	call_rcu(&netdev_vport->rcu, free_port_rcu);
+	call_rcu(&vport->rcu, vport_netdev_free);
 }
 
-const char *ovs_netdev_get_name(const struct vport *vport)
+void ovs_netdev_tunnel_destroy(struct vport *vport)
 {
-	const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-	return netdev_vport->dev->name;
+	rtnl_lock();
+	if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
+		ovs_netdev_detach_dev(vport);
+
+	/* Early release so we can unregister the device */
+	dev_put(vport->dev);
+	rtnl_delete_link(vport->dev);
+	vport->dev = NULL;
+	rtnl_unlock();
+
+	call_rcu(&vport->rcu, vport_netdev_free);
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
 
 static unsigned int packet_length(const struct sk_buff *skb)
 {
@@ -193,29 +200,26 @@
 	return length;
 }
 
-static int netdev_send(struct vport *vport, struct sk_buff *skb)
+void ovs_netdev_send(struct vport *vport, struct sk_buff *skb)
 {
-	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-	int mtu = netdev_vport->dev->mtu;
-	int len;
+	int mtu = vport->dev->mtu;
 
 	if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
 		net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
-				     netdev_vport->dev->name,
+				     vport->dev->name,
 				     packet_length(skb), mtu);
+		vport->dev->stats.tx_errors++;
 		goto drop;
 	}
 
-	skb->dev = netdev_vport->dev;
-	len = skb->len;
+	skb->dev = vport->dev;
 	dev_queue_xmit(skb);
-
-	return len;
+	return;
 
 drop:
 	kfree_skb(skb);
-	return 0;
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_send);
 
 /* Returns null if this device is not attached to a datapath. */
 struct vport *ovs_netdev_get_vport(struct net_device *dev)
@@ -231,8 +235,7 @@
 	.type		= OVS_VPORT_TYPE_NETDEV,
 	.create		= netdev_create,
 	.destroy	= netdev_destroy,
-	.get_name	= ovs_netdev_get_name,
-	.send		= netdev_send,
+	.send		= ovs_netdev_send,
 };
 
 int __init ovs_netdev_init(void)
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index 6f7038e..bf22fce 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -26,22 +26,12 @@
 
 struct vport *ovs_netdev_get_vport(struct net_device *dev);
 
-struct netdev_vport {
-	struct rcu_head rcu;
-
-	struct net_device *dev;
-};
-
-static inline struct netdev_vport *
-netdev_vport_priv(const struct vport *vport)
-{
-	return vport_priv(vport);
-}
-
-const char *ovs_netdev_get_name(const struct vport *);
+struct vport *ovs_netdev_link(struct vport *vport, const char *name);
+void ovs_netdev_send(struct vport *vport, struct sk_buff *skb);
 void ovs_netdev_detach_dev(struct vport *);
 
 int __init ovs_netdev_init(void);
 void ovs_netdev_exit(void);
 
+void ovs_netdev_tunnel_destroy(struct vport *vport);
 #endif /* vport_netdev.h */
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 6d39766..c11413d 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -17,94 +17,37 @@
  * 02110-1301, USA
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/net.h>
-#include <linux/rculist.h>
-#include <linux/udp.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/openvswitch.h>
 #include <linux/module.h>
-
-#include <net/icmp.h>
-#include <net/ip.h>
 #include <net/udp.h>
 #include <net/ip_tunnels.h>
 #include <net/rtnetlink.h>
-#include <net/route.h>
-#include <net/dsfield.h>
-#include <net/inet_ecn.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
 #include <net/vxlan.h>
 
 #include "datapath.h"
 #include "vport.h"
-#include "vport-vxlan.h"
+#include "vport-netdev.h"
 
-/**
- * struct vxlan_port - Keeps track of open UDP ports
- * @vs: vxlan_sock created for the port.
- * @name: vport name.
- */
-struct vxlan_port {
-	struct vxlan_sock *vs;
-	char name[IFNAMSIZ];
-	u32 exts; /* VXLAN_F_* in <net/vxlan.h> */
-};
-
-static struct vport_ops ovs_vxlan_vport_ops;
-
-static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
-{
-	return vport_priv(vport);
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
-		      struct vxlan_metadata *md)
-{
-	struct ovs_tunnel_info tun_info;
-	struct vxlan_port *vxlan_port;
-	struct vport *vport = vs->data;
-	struct iphdr *iph;
-	struct ovs_vxlan_opts opts = {
-		.gbp = md->gbp,
-	};
-	__be64 key;
-	__be16 flags;
-
-	flags = TUNNEL_KEY | (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0);
-	vxlan_port = vxlan_vport(vport);
-	if (vxlan_port->exts & VXLAN_F_GBP && md->gbp)
-		flags |= TUNNEL_VXLAN_OPT;
-
-	/* Save outer tunnel values */
-	iph = ip_hdr(skb);
-	key = cpu_to_be64(ntohl(md->vni) >> 8);
-	ovs_flow_tun_info_init(&tun_info, iph,
-			       udp_hdr(skb)->source, udp_hdr(skb)->dest,
-			       key, flags, &opts, sizeof(opts));
-
-	ovs_vport_receive(vport, skb, &tun_info);
-}
+static struct vport_ops ovs_vxlan_netdev_vport_ops;
 
 static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
 {
-	struct vxlan_port *vxlan_port = vxlan_vport(vport);
-	__be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+	struct vxlan_dev *vxlan = netdev_priv(vport->dev);
+	__be16 dst_port = vxlan->cfg.dst_port;
 
 	if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
 		return -EMSGSIZE;
 
-	if (vxlan_port->exts) {
+	if (vxlan->flags & VXLAN_F_GBP) {
 		struct nlattr *exts;
 
 		exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION);
 		if (!exts)
 			return -EMSGSIZE;
 
-		if (vxlan_port->exts & VXLAN_F_GBP &&
+		if (vxlan->flags & VXLAN_F_GBP &&
 		    nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
 			return -EMSGSIZE;
 
@@ -114,23 +57,14 @@
 	return 0;
 }
 
-static void vxlan_tnl_destroy(struct vport *vport)
-{
-	struct vxlan_port *vxlan_port = vxlan_vport(vport);
-
-	vxlan_sock_release(vxlan_port->vs);
-
-	ovs_vport_deferred_free(vport);
-}
-
-static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX+1] = {
+static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX + 1] = {
 	[OVS_VXLAN_EXT_GBP]	= { .type = NLA_FLAG, },
 };
 
-static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr)
+static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr,
+				struct vxlan_config *conf)
 {
-	struct nlattr *exts[OVS_VXLAN_EXT_MAX+1];
-	struct vxlan_port *vxlan_port;
+	struct nlattr *exts[OVS_VXLAN_EXT_MAX + 1];
 	int err;
 
 	if (nla_len(attr) < sizeof(struct nlattr))
@@ -140,10 +74,8 @@
 	if (err < 0)
 		return err;
 
-	vxlan_port = vxlan_vport(vport);
-
 	if (exts[OVS_VXLAN_EXT_GBP])
-		vxlan_port->exts |= VXLAN_F_GBP;
+		conf->flags |= VXLAN_F_GBP;
 
 	return 0;
 }
@@ -152,128 +84,74 @@
 {
 	struct net *net = ovs_dp_get_net(parms->dp);
 	struct nlattr *options = parms->options;
-	struct vxlan_port *vxlan_port;
-	struct vxlan_sock *vs;
+	struct net_device *dev;
 	struct vport *vport;
 	struct nlattr *a;
-	u16 dst_port;
 	int err;
+	struct vxlan_config conf = {
+		.no_share = true,
+		.flags = VXLAN_F_COLLECT_METADATA,
+	};
 
 	if (!options) {
 		err = -EINVAL;
 		goto error;
 	}
+
 	a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
 	if (a && nla_len(a) == sizeof(u16)) {
-		dst_port = nla_get_u16(a);
+		conf.dst_port = htons(nla_get_u16(a));
 	} else {
 		/* Require destination port from userspace. */
 		err = -EINVAL;
 		goto error;
 	}
 
-	vport = ovs_vport_alloc(sizeof(struct vxlan_port),
-				&ovs_vxlan_vport_ops, parms);
+	vport = ovs_vport_alloc(0, &ovs_vxlan_netdev_vport_ops, parms);
 	if (IS_ERR(vport))
 		return vport;
 
-	vxlan_port = vxlan_vport(vport);
-	strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
-
 	a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION);
 	if (a) {
-		err = vxlan_configure_exts(vport, a);
+		err = vxlan_configure_exts(vport, a, &conf);
 		if (err) {
 			ovs_vport_free(vport);
 			goto error;
 		}
 	}
 
-	vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true,
-			    vxlan_port->exts);
-	if (IS_ERR(vs)) {
+	rtnl_lock();
+	dev = vxlan_dev_create(net, parms->name, NET_NAME_USER, &conf);
+	if (IS_ERR(dev)) {
+		rtnl_unlock();
 		ovs_vport_free(vport);
-		return (void *)vs;
+		return ERR_CAST(dev);
 	}
-	vxlan_port->vs = vs;
 
+	dev_change_flags(dev, dev->flags | IFF_UP);
+	rtnl_unlock();
 	return vport;
-
 error:
 	return ERR_PTR(err);
 }
 
-static int vxlan_ext_gbp(struct sk_buff *skb)
+static struct vport *vxlan_create(const struct vport_parms *parms)
 {
-	const struct ovs_tunnel_info *tun_info;
-	const struct ovs_vxlan_opts *opts;
+	struct vport *vport;
 
-	tun_info = OVS_CB(skb)->egress_tun_info;
-	opts = tun_info->options;
+	vport = vxlan_tnl_create(parms);
+	if (IS_ERR(vport))
+		return vport;
 
-	if (tun_info->tunnel.tun_flags & TUNNEL_VXLAN_OPT &&
-	    tun_info->options_len >= sizeof(*opts))
-		return opts->gbp;
-	else
-		return 0;
-}
-
-static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
-{
-	struct net *net = ovs_dp_get_net(vport->dp);
-	struct vxlan_port *vxlan_port = vxlan_vport(vport);
-	struct sock *sk = vxlan_port->vs->sock->sk;
-	__be16 dst_port = inet_sk(sk)->inet_sport;
-	const struct ovs_key_ipv4_tunnel *tun_key;
-	struct vxlan_metadata md = {0};
-	struct rtable *rt;
-	struct flowi4 fl;
-	__be16 src_port;
-	__be16 df;
-	int err;
-	u32 vxflags;
-
-	if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
-		err = -EINVAL;
-		goto error;
-	}
-
-	tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-	rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
-	if (IS_ERR(rt)) {
-		err = PTR_ERR(rt);
-		goto error;
-	}
-
-	df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
-		htons(IP_DF) : 0;
-
-	skb->ignore_df = 1;
-
-	src_port = udp_flow_src_port(net, skb, 0, 0, true);
-	md.vni = htonl(be64_to_cpu(tun_key->tun_id) << 8);
-	md.gbp = vxlan_ext_gbp(skb);
-	vxflags = vxlan_port->exts |
-		      (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0);
-
-	err = vxlan_xmit_skb(rt, sk, skb, fl.saddr, tun_key->ipv4_dst,
-			     tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
-			     src_port, dst_port,
-			     &md, false, vxflags);
-	if (err < 0)
-		ip_rt_put(rt);
-	return err;
-error:
-	kfree_skb(skb);
-	return err;
+	return ovs_netdev_link(vport, parms->name);
 }
 
 static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				     struct ovs_tunnel_info *egress_tun_info)
+				     struct dp_upcall_info *upcall)
 {
+	struct vxlan_dev *vxlan = netdev_priv(vport->dev);
 	struct net *net = ovs_dp_get_net(vport->dp);
-	struct vxlan_port *vxlan_port = vxlan_vport(vport);
-	__be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+	__be16 dst_port = vxlan_dev_dst_port(vxlan);
 	__be16 src_port;
 	int port_min;
 	int port_max;
@@ -281,37 +159,28 @@
 	inet_get_local_port_range(net, &port_min, &port_max);
 	src_port = udp_flow_src_port(net, skb, 0, 0, true);
 
-	return ovs_tunnel_get_egress_info(egress_tun_info, net,
-					  OVS_CB(skb)->egress_tun_info,
-					  IPPROTO_UDP, skb->mark,
+	return ovs_tunnel_get_egress_info(upcall, net,
+					  skb, IPPROTO_UDP,
 					  src_port, dst_port);
 }
 
-static const char *vxlan_get_name(const struct vport *vport)
-{
-	struct vxlan_port *vxlan_port = vxlan_vport(vport);
-	return vxlan_port->name;
-}
-
-static struct vport_ops ovs_vxlan_vport_ops = {
-	.type		= OVS_VPORT_TYPE_VXLAN,
-	.create		= vxlan_tnl_create,
-	.destroy	= vxlan_tnl_destroy,
-	.get_name	= vxlan_get_name,
-	.get_options	= vxlan_get_options,
-	.send		= vxlan_tnl_send,
+static struct vport_ops ovs_vxlan_netdev_vport_ops = {
+	.type			= OVS_VPORT_TYPE_VXLAN,
+	.create			= vxlan_create,
+	.destroy		= ovs_netdev_tunnel_destroy,
+	.get_options		= vxlan_get_options,
+	.send			= ovs_netdev_send,
 	.get_egress_tun_info	= vxlan_get_egress_tun_info,
-	.owner		= THIS_MODULE,
 };
 
 static int __init ovs_vxlan_tnl_init(void)
 {
-	return ovs_vport_ops_register(&ovs_vxlan_vport_ops);
+	return ovs_vport_ops_register(&ovs_vxlan_netdev_vport_ops);
 }
 
 static void __exit ovs_vxlan_tnl_exit(void)
 {
-	ovs_vport_ops_unregister(&ovs_vxlan_vport_ops);
+	ovs_vport_ops_unregister(&ovs_vxlan_netdev_vport_ops);
 }
 
 module_init(ovs_vxlan_tnl_init);
diff --git a/net/openvswitch/vport-vxlan.h b/net/openvswitch/vport-vxlan.h
deleted file mode 100644
index 4b08233e..0000000
--- a/net/openvswitch/vport-vxlan.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef VPORT_VXLAN_H
-#define VPORT_VXLAN_H 1
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-struct ovs_vxlan_opts {
-	__u32 gbp;
-};
-
-#endif
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 067a3ff..dc81dc6 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -34,9 +34,6 @@
 #include "vport.h"
 #include "vport-internal_dev.h"
 
-static void ovs_vport_record_error(struct vport *,
-				   enum vport_err_type err_type);
-
 static LIST_HEAD(vport_ops_list);
 
 /* Protected by RCU read lock for reading, ovs_mutex for writing. */
@@ -113,7 +110,7 @@
 	struct vport *vport;
 
 	hlist_for_each_entry_rcu(vport, bucket, hash_node)
-		if (!strcmp(name, vport->ops->get_name(vport)) &&
+		if (!strcmp(name, ovs_vport_name(vport)) &&
 		    net_eq(ovs_dp_get_net(vport->dp), net))
 			return vport;
 
@@ -157,12 +154,6 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
-	if (!vport->percpu_stats) {
-		kfree(vport);
-		return ERR_PTR(-ENOMEM);
-	}
-
 	return vport;
 }
 EXPORT_SYMBOL_GPL(ovs_vport_alloc);
@@ -183,7 +174,6 @@
 	 * it is safe to use raw dereference.
 	 */
 	kfree(rcu_dereference_raw(vport->upcall_portids));
-	free_percpu(vport->percpu_stats);
 	kfree(vport);
 }
 EXPORT_SYMBOL_GPL(ovs_vport_free);
@@ -226,7 +216,7 @@
 		}
 
 		bucket = hash_bucket(ovs_dp_get_net(vport->dp),
-				     vport->ops->get_name(vport));
+				     ovs_vport_name(vport));
 		hlist_add_head_rcu(&vport->hash_node, bucket);
 		return vport;
 	}
@@ -290,30 +280,24 @@
  */
 void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
 {
+	struct net_device *dev = vport->dev;
 	int i;
 
 	memset(stats, 0, sizeof(*stats));
+	stats->rx_errors  = dev->stats.rx_errors;
+	stats->tx_errors  = dev->stats.tx_errors;
+	stats->tx_dropped = dev->stats.tx_dropped;
+	stats->rx_dropped = dev->stats.rx_dropped;
 
-	/* We potentially have 2 sources of stats that need to be combined:
-	 * those we have collected (split into err_stats and percpu_stats) from
-	 * set_stats() and device error stats from netdev->get_stats() (for
-	 * errors that happen  downstream and therefore aren't reported through
-	 * our vport_record_error() function).
-	 * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS).
-	 * netdev-stats can be directly read over netlink-ioctl.
-	 */
-
-	stats->rx_errors  = atomic_long_read(&vport->err_stats.rx_errors);
-	stats->tx_errors  = atomic_long_read(&vport->err_stats.tx_errors);
-	stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped);
-	stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped);
+	stats->rx_dropped += atomic_long_read(&dev->rx_dropped);
+	stats->tx_dropped += atomic_long_read(&dev->tx_dropped);
 
 	for_each_possible_cpu(i) {
 		const struct pcpu_sw_netstats *percpu_stats;
 		struct pcpu_sw_netstats local_stats;
 		unsigned int start;
 
-		percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
+		percpu_stats = per_cpu_ptr(dev->tstats, i);
 
 		do {
 			start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
@@ -468,94 +452,25 @@
  * Must be called with rcu_read_lock.  The packet cannot be shared and
  * skb->data should point to the Ethernet header.
  */
-void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
-		       const struct ovs_tunnel_info *tun_info)
+int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
+		      const struct ip_tunnel_info *tun_info)
 {
-	struct pcpu_sw_netstats *stats;
 	struct sw_flow_key key;
 	int error;
 
-	stats = this_cpu_ptr(vport->percpu_stats);
-	u64_stats_update_begin(&stats->syncp);
-	stats->rx_packets++;
-	stats->rx_bytes += skb->len +
-			   (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-	u64_stats_update_end(&stats->syncp);
-
 	OVS_CB(skb)->input_vport = vport;
-	OVS_CB(skb)->egress_tun_info = NULL;
+	OVS_CB(skb)->mru = 0;
 	/* Extract flow from 'skb' into 'key'. */
 	error = ovs_flow_key_extract(tun_info, skb, &key);
 	if (unlikely(error)) {
 		kfree_skb(skb);
-		return;
+		return error;
 	}
 	ovs_dp_process_packet(skb, &key);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(ovs_vport_receive);
 
-/**
- *	ovs_vport_send - send a packet on a device
- *
- * @vport: vport on which to send the packet
- * @skb: skb to send
- *
- * Sends the given packet and returns the length of data sent.  Either ovs
- * lock or rcu_read_lock must be held.
- */
-int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
-{
-	int sent = vport->ops->send(vport, skb);
-
-	if (likely(sent > 0)) {
-		struct pcpu_sw_netstats *stats;
-
-		stats = this_cpu_ptr(vport->percpu_stats);
-
-		u64_stats_update_begin(&stats->syncp);
-		stats->tx_packets++;
-		stats->tx_bytes += sent;
-		u64_stats_update_end(&stats->syncp);
-	} else if (sent < 0) {
-		ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
-	} else {
-		ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
-	}
-	return sent;
-}
-
-/**
- *	ovs_vport_record_error - indicate device error to generic stats layer
- *
- * @vport: vport that encountered the error
- * @err_type: one of enum vport_err_type types to indicate the error type
- *
- * If using the vport generic stats layer indicate that an error of the given
- * type has occurred.
- */
-static void ovs_vport_record_error(struct vport *vport,
-				   enum vport_err_type err_type)
-{
-	switch (err_type) {
-	case VPORT_E_RX_DROPPED:
-		atomic_long_inc(&vport->err_stats.rx_dropped);
-		break;
-
-	case VPORT_E_RX_ERROR:
-		atomic_long_inc(&vport->err_stats.rx_errors);
-		break;
-
-	case VPORT_E_TX_DROPPED:
-		atomic_long_inc(&vport->err_stats.tx_dropped);
-		break;
-
-	case VPORT_E_TX_ERROR:
-		atomic_long_inc(&vport->err_stats.tx_errors);
-		break;
-	}
-
-}
-
 static void free_vport_rcu(struct rcu_head *rcu)
 {
 	struct vport *vport = container_of(rcu, struct vport, rcu);
@@ -572,22 +487,26 @@
 }
 EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
 
-int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
+int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
 			       struct net *net,
-			       const struct ovs_tunnel_info *tun_info,
+			       struct sk_buff *skb,
 			       u8 ipproto,
-			       u32 skb_mark,
 			       __be16 tp_src,
 			       __be16 tp_dst)
 {
-	const struct ovs_key_ipv4_tunnel *tun_key;
+	struct ip_tunnel_info *egress_tun_info = upcall->egress_tun_info;
+	const struct ip_tunnel_info *tun_info = skb_tunnel_info(skb);
+	const struct ip_tunnel_key *tun_key;
+	u32 skb_mark = skb->mark;
 	struct rtable *rt;
 	struct flowi4 fl;
 
 	if (unlikely(!tun_info))
 		return -EINVAL;
+	if (ip_tunnel_info_af(tun_info) != AF_INET)
+		return -EINVAL;
 
-	tun_key = &tun_info->tunnel;
+	tun_key = &tun_info->key;
 
 	/* Route lookup to get srouce IP address.
 	 * The process may need to be changed if the corresponding process
@@ -602,26 +521,26 @@
 	/* Generate egress_tun_info based on tun_info,
 	 * saddr, tp_src and tp_dst
 	 */
-	__ovs_flow_tun_info_init(egress_tun_info,
-				 fl.saddr, tun_key->ipv4_dst,
-				 tun_key->ipv4_tos,
-				 tun_key->ipv4_ttl,
-				 tp_src, tp_dst,
-				 tun_key->tun_id,
-				 tun_key->tun_flags,
-				 tun_info->options,
-				 tun_info->options_len);
-
+	ip_tunnel_key_init(&egress_tun_info->key,
+			   fl.saddr, tun_key->u.ipv4.dst,
+			   tun_key->tos,
+			   tun_key->ttl,
+			   tp_src, tp_dst,
+			   tun_key->tun_id,
+			   tun_key->tun_flags);
+	egress_tun_info->options_len = tun_info->options_len;
+	egress_tun_info->mode = tun_info->mode;
+	upcall->egress_tun_opts = ip_tunnel_info_opts(egress_tun_info);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
 
 int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				  struct ovs_tunnel_info *info)
+				  struct dp_upcall_info *upcall)
 {
 	/* get_egress_tun_info() is only implemented on tunnel ports. */
 	if (unlikely(!vport->ops->get_egress_tun_info))
 		return -EINVAL;
 
-	return vport->ops->get_egress_tun_info(vport, skb, info);
+	return vport->ops->get_egress_tun_info(vport, skb, upcall);
 }
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index bc85331..a413f3a 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -27,6 +27,7 @@
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/u64_stats_sync.h>
+#include <net/route.h>
 
 #include "datapath.h"
 
@@ -35,10 +36,6 @@
 
 /* The following definitions are for users of the vport subsytem: */
 
-struct vport_net {
-	struct vport __rcu *gre_vport;
-};
-
 int ovs_vport_init(void);
 void ovs_vport_exit(void);
 
@@ -56,26 +53,16 @@
 int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
 u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
 
-int ovs_vport_send(struct vport *, struct sk_buff *);
-
-int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
+int ovs_tunnel_get_egress_info(struct dp_upcall_info *upcall,
 			       struct net *net,
-			       const struct ovs_tunnel_info *tun_info,
+			       struct sk_buff *,
 			       u8 ipproto,
-			       u32 skb_mark,
 			       __be16 tp_src,
 			       __be16 tp_dst);
+
 int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-				  struct ovs_tunnel_info *info);
+				  struct dp_upcall_info *upcall);
 
-/* The following definitions are for implementers of vport devices: */
-
-struct vport_err_stats {
-	atomic_long_t rx_dropped;
-	atomic_long_t rx_errors;
-	atomic_long_t tx_dropped;
-	atomic_long_t tx_errors;
-};
 /**
  * struct vport_portids - array of netlink portids of a vport.
  *                        must be protected by rcu.
@@ -101,12 +88,10 @@
  * @hash_node: Element in @dev_table hash table in vport.c.
  * @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
  * @ops: Class structure.
- * @percpu_stats: Points to per-CPU statistics used and maintained by vport
- * @err_stats: Points to error statistics used and maintained by vport
  * @detach_list: list used for detaching vport in net-exit call.
  */
 struct vport {
-	struct rcu_head rcu;
+	struct net_device *dev;
 	struct datapath	*dp;
 	struct vport_portids __rcu *upcall_portids;
 	u16 port_no;
@@ -115,10 +100,8 @@
 	struct hlist_node dp_hash_node;
 	const struct vport_ops *ops;
 
-	struct pcpu_sw_netstats __percpu *percpu_stats;
-
-	struct vport_err_stats err_stats;
 	struct list_head detach_list;
+	struct rcu_head rcu;
 };
 
 /**
@@ -155,8 +138,7 @@
  * @get_options: Appends vport-specific attributes for the configuration of an
  * existing vport to a &struct sk_buff.  May be %NULL for a vport that does not
  * have any configuration.
- * @get_name: Get the device's name.
- * @send: Send a packet on the device.  Returns the length of the packet sent,
+ * @send: Send a packet on the device.
  * zero for dropped packets or negative for error.
  * @get_egress_tun_info: Get the egress tunnel 5-tuple and other info for
  * a packet.
@@ -171,24 +153,14 @@
 	int (*set_options)(struct vport *, struct nlattr *);
 	int (*get_options)(const struct vport *, struct sk_buff *);
 
-	/* Called with rcu_read_lock or ovs_mutex. */
-	const char *(*get_name)(const struct vport *);
-
-	int (*send)(struct vport *, struct sk_buff *);
+	void (*send)(struct vport *, struct sk_buff *);
 	int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
-				   struct ovs_tunnel_info *);
+				   struct dp_upcall_info *upcall);
 
 	struct module *owner;
 	struct list_head list;
 };
 
-enum vport_err_type {
-	VPORT_E_RX_DROPPED,
-	VPORT_E_RX_ERROR,
-	VPORT_E_TX_DROPPED,
-	VPORT_E_TX_ERROR,
-};
-
 struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *,
 			      const struct vport_parms *);
 void ovs_vport_free(struct vport *);
@@ -225,8 +197,8 @@
 	return (struct vport *)((u8 *)priv - ALIGN(sizeof(struct vport), VPORT_ALIGN));
 }
 
-void ovs_vport_receive(struct vport *, struct sk_buff *,
-		       const struct ovs_tunnel_info *);
+int ovs_vport_receive(struct vport *, struct sk_buff *,
+		      const struct ip_tunnel_info *);
 
 static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
 				      const void *start, unsigned int len)
@@ -235,11 +207,16 @@
 		skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
 }
 
+static inline const char *ovs_vport_name(struct vport *vport)
+{
+	return vport->dev->name;
+}
+
 int ovs_vport_ops_register(struct vport_ops *ops);
 void ovs_vport_ops_unregister(struct vport_ops *ops);
 
 static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
-						     const struct ovs_key_ipv4_tunnel *key,
+						     const struct ip_tunnel_key *key,
 						     u32 mark,
 						     struct flowi4 *fl,
 						     u8 protocol)
@@ -247,13 +224,19 @@
 	struct rtable *rt;
 
 	memset(fl, 0, sizeof(*fl));
-	fl->daddr = key->ipv4_dst;
-	fl->saddr = key->ipv4_src;
-	fl->flowi4_tos = RT_TOS(key->ipv4_tos);
+	fl->daddr = key->u.ipv4.dst;
+	fl->saddr = key->u.ipv4.src;
+	fl->flowi4_tos = RT_TOS(key->tos);
 	fl->flowi4_mark = mark;
 	fl->flowi4_proto = protocol;
 
 	rt = ip_route_output_key(net, fl);
 	return rt;
 }
+
+static inline void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
+{
+	vport->ops->send(vport, skb);
+}
+
 #endif /* vport.h */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ed458b3..7b8e39a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -92,6 +92,7 @@
 #ifdef CONFIG_INET
 #include <net/inet_common.h>
 #endif
+#include <linux/bpf.h>
 
 #include "internal.h"
 
@@ -518,13 +519,11 @@
 }
 
 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
-		int tx_ring,
 		struct sk_buff_head *rb_queue)
 {
 	struct tpacket_kbdq_core *pkc;
 
-	pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
-			GET_PBDQC_FROM_RB(&po->rx_ring);
+	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 
 	spin_lock_bh(&rb_queue->lock);
 	pkc->delete_blk_timer = 1;
@@ -1412,6 +1411,22 @@
 	return skb_get_queue_mapping(skb) % num;
 }
 
+static unsigned int fanout_demux_bpf(struct packet_fanout *f,
+				     struct sk_buff *skb,
+				     unsigned int num)
+{
+	struct bpf_prog *prog;
+	unsigned int ret = 0;
+
+	rcu_read_lock();
+	prog = rcu_dereference(f->bpf_prog);
+	if (prog)
+		ret = BPF_PROG_RUN(prog, skb) % num;
+	rcu_read_unlock();
+
+	return ret;
+}
+
 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
 {
 	return f->flags & (flag >> 8);
@@ -1456,6 +1471,10 @@
 	case PACKET_FANOUT_ROLLOVER:
 		idx = fanout_demux_rollover(f, skb, 0, false, num);
 		break;
+	case PACKET_FANOUT_CBPF:
+	case PACKET_FANOUT_EBPF:
+		idx = fanout_demux_bpf(f, skb, num);
+		break;
 	}
 
 	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
@@ -1504,6 +1523,103 @@
 	return false;
 }
 
+static void fanout_init_data(struct packet_fanout *f)
+{
+	switch (f->type) {
+	case PACKET_FANOUT_LB:
+		atomic_set(&f->rr_cur, 0);
+		break;
+	case PACKET_FANOUT_CBPF:
+	case PACKET_FANOUT_EBPF:
+		RCU_INIT_POINTER(f->bpf_prog, NULL);
+		break;
+	}
+}
+
+static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
+{
+	struct bpf_prog *old;
+
+	spin_lock(&f->lock);
+	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
+	rcu_assign_pointer(f->bpf_prog, new);
+	spin_unlock(&f->lock);
+
+	if (old) {
+		synchronize_net();
+		bpf_prog_destroy(old);
+	}
+}
+
+static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
+				unsigned int len)
+{
+	struct bpf_prog *new;
+	struct sock_fprog fprog;
+	int ret;
+
+	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
+		return -EPERM;
+	if (len != sizeof(fprog))
+		return -EINVAL;
+	if (copy_from_user(&fprog, data, len))
+		return -EFAULT;
+
+	ret = bpf_prog_create_from_user(&new, &fprog, NULL);
+	if (ret)
+		return ret;
+
+	__fanout_set_data_bpf(po->fanout, new);
+	return 0;
+}
+
+static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
+				unsigned int len)
+{
+	struct bpf_prog *new;
+	u32 fd;
+
+	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
+		return -EPERM;
+	if (len != sizeof(fd))
+		return -EINVAL;
+	if (copy_from_user(&fd, data, len))
+		return -EFAULT;
+
+	new = bpf_prog_get(fd);
+	if (IS_ERR(new))
+		return PTR_ERR(new);
+	if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) {
+		bpf_prog_put(new);
+		return -EINVAL;
+	}
+
+	__fanout_set_data_bpf(po->fanout, new);
+	return 0;
+}
+
+static int fanout_set_data(struct packet_sock *po, char __user *data,
+			   unsigned int len)
+{
+	switch (po->fanout->type) {
+	case PACKET_FANOUT_CBPF:
+		return fanout_set_data_cbpf(po, data, len);
+	case PACKET_FANOUT_EBPF:
+		return fanout_set_data_ebpf(po, data, len);
+	default:
+		return -EINVAL;
+	};
+}
+
+static void fanout_release_data(struct packet_fanout *f)
+{
+	switch (f->type) {
+	case PACKET_FANOUT_CBPF:
+	case PACKET_FANOUT_EBPF:
+		__fanout_set_data_bpf(f, NULL);
+	};
+}
+
 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 {
 	struct packet_sock *po = pkt_sk(sk);
@@ -1521,6 +1637,8 @@
 	case PACKET_FANOUT_CPU:
 	case PACKET_FANOUT_RND:
 	case PACKET_FANOUT_QM:
+	case PACKET_FANOUT_CBPF:
+	case PACKET_FANOUT_EBPF:
 		break;
 	default:
 		return -EINVAL;
@@ -1563,10 +1681,10 @@
 		match->id = id;
 		match->type = type;
 		match->flags = flags;
-		atomic_set(&match->rr_cur, 0);
 		INIT_LIST_HEAD(&match->list);
 		spin_lock_init(&match->lock);
 		atomic_set(&match->sk_ref, 0);
+		fanout_init_data(match);
 		match->prot_hook.type = po->prot_hook.type;
 		match->prot_hook.dev = po->prot_hook.dev;
 		match->prot_hook.func = packet_rcv_fanout;
@@ -1612,6 +1730,7 @@
 	if (atomic_dec_and_test(&f->sk_ref)) {
 		list_del(&f->list);
 		dev_remove_pack(&f->prot_hook);
+		fanout_release_data(f);
 		kfree(f);
 	}
 	mutex_unlock(&fanout_mutex);
@@ -3531,6 +3650,13 @@
 
 		return fanout_add(sk, val & 0xffff, val >> 16);
 	}
+	case PACKET_FANOUT_DATA:
+	{
+		if (!po->fanout)
+			return -EINVAL;
+
+		return fanout_set_data(po, optval, optlen);
+	}
 	case PACKET_TX_HAS_OFF:
 	{
 		unsigned int val;
@@ -4043,7 +4169,7 @@
 	if (closing && (po->tp_version > TPACKET_V2)) {
 		/* Because we don't support block-based V3 on tx-ring */
 		if (!tx_ring)
-			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+			prb_shutdown_retire_blk_timer(po, rb_queue);
 	}
 	release_sock(sk);
 
diff --git a/net/packet/internal.h b/net/packet/internal.h
index e20b3e8..9ee4631 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -79,7 +79,10 @@
 	u16			id;
 	u8			type;
 	u8			flags;
-	atomic_t		rr_cur;
+	union {
+		atomic_t		rr_cur;
+		struct bpf_prog __rcu	*bpf_prog;
+	};
 	struct list_head	list;
 	struct sock		*arr[PACKET_FANOUT_MAX];
 	spinlock_t		lock;
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index 896834c..a2f28a6 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -438,6 +438,14 @@
 	.sendpage =	sock_no_sendpage,
 };
 
+static void rds_sock_destruct(struct sock *sk)
+{
+	struct rds_sock *rs = rds_sk_to_rs(sk);
+
+	WARN_ON((&rs->rs_item != rs->rs_item.next ||
+		 &rs->rs_item != rs->rs_item.prev));
+}
+
 static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
 {
 	struct rds_sock *rs;
@@ -445,6 +453,7 @@
 	sock_init_data(sock, sk);
 	sock->ops		= &rds_proto_ops;
 	sk->sk_protocol		= protocol;
+	sk->sk_destruct		= rds_sock_destruct;
 
 	rs = rds_sk_to_rs(sk);
 	spin_lock_init(&rs->rs_lock);
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 4ebd29c..dd666fb 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -185,7 +185,8 @@
 		ret = 0;
 		goto out;
 	}
-	trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
+	trans = rds_trans_get_preferred(sock_net(sock->sk),
+					sin->sin_addr.s_addr);
 	if (!trans) {
 		ret = -EADDRNOTAVAIL;
 		rds_remove_bound(rs);
diff --git a/net/rds/connection.c b/net/rds/connection.c
index da6da57..a50e652 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -117,7 +117,8 @@
  * For now they are not garbage collected once they're created.  They
  * are torn down as the module is removed, if ever.
  */
-static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
+static struct rds_connection *__rds_conn_create(struct net *net,
+						__be32 laddr, __be32 faddr,
 				       struct rds_transport *trans, gfp_t gfp,
 				       int is_outgoing)
 {
@@ -157,6 +158,7 @@
 	conn->c_faddr = faddr;
 	spin_lock_init(&conn->c_lock);
 	conn->c_next_tx_seq = 1;
+	rds_conn_net_set(conn, net);
 
 	init_waitqueue_head(&conn->c_waitq);
 	INIT_LIST_HEAD(&conn->c_send_queue);
@@ -174,7 +176,7 @@
 	 * can bind to the destination address then we'd rather the messages
 	 * flow through loopback rather than either transport.
 	 */
-	loop_trans = rds_trans_get_preferred(faddr);
+	loop_trans = rds_trans_get_preferred(net, faddr);
 	if (loop_trans) {
 		rds_trans_put(loop_trans);
 		conn->c_loopback = 1;
@@ -260,17 +262,19 @@
 	return conn;
 }
 
-struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create(struct net *net,
+				       __be32 laddr, __be32 faddr,
 				       struct rds_transport *trans, gfp_t gfp)
 {
-	return __rds_conn_create(laddr, faddr, trans, gfp, 0);
+	return __rds_conn_create(net, laddr, faddr, trans, gfp, 0);
 }
 EXPORT_SYMBOL_GPL(rds_conn_create);
 
-struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create_outgoing(struct net *net,
+						__be32 laddr, __be32 faddr,
 				       struct rds_transport *trans, gfp_t gfp)
 {
-	return __rds_conn_create(laddr, faddr, trans, gfp, 1);
+	return __rds_conn_create(net, laddr, faddr, trans, gfp, 1);
 }
 EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
 
@@ -297,6 +301,8 @@
 
 		wait_event(conn->c_waitq,
 			   !test_bit(RDS_IN_XMIT, &conn->c_flags));
+		wait_event(conn->c_waitq,
+			   !test_bit(RDS_RECV_REFILL, &conn->c_flags));
 
 		conn->c_trans->conn_shutdown(conn);
 		rds_conn_reset(conn);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index ba2dffe..d020fad 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -317,7 +317,7 @@
  * allowed to influence which paths have priority.  We could call userspace
  * asserting this policy "routing".
  */
-static int rds_ib_laddr_check(__be32 addr)
+static int rds_ib_laddr_check(struct net *net, __be32 addr)
 {
 	int ret;
 	struct rdma_cm_id *cm_id;
@@ -366,6 +366,7 @@
 	rds_ib_sysctl_exit();
 	rds_ib_recv_exit();
 	rds_trans_unregister(&rds_ib_transport);
+	rds_ib_fmr_exit();
 }
 
 struct rds_transport rds_ib_transport = {
@@ -401,10 +402,14 @@
 
 	INIT_LIST_HEAD(&rds_ib_devices);
 
-	ret = ib_register_client(&rds_ib_client);
+	ret = rds_ib_fmr_init();
 	if (ret)
 		goto out;
 
+	ret = ib_register_client(&rds_ib_client);
+	if (ret)
+		goto out_fmr_exit;
+
 	ret = rds_ib_sysctl_init();
 	if (ret)
 		goto out_ibreg;
@@ -427,6 +432,8 @@
 	rds_ib_sysctl_exit();
 out_ibreg:
 	rds_ib_unregister_client();
+out_fmr_exit:
+	rds_ib_fmr_exit();
 out:
 	return ret;
 }
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 86d88ec..9fc95e3 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -313,6 +313,8 @@
 void rds_ib_sync_mr(void *trans_private, int dir);
 void rds_ib_free_mr(void *trans_private, int invalidate);
 void rds_ib_flush_mrs(void);
+int rds_ib_fmr_init(void);
+void rds_ib_fmr_exit(void);
 
 /* ib_recv.c */
 int rds_ib_recv_init(void);
@@ -320,7 +322,7 @@
 int rds_ib_recv(struct rds_connection *conn);
 int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
 void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
-void rds_ib_recv_refill(struct rds_connection *conn, int prefill);
+void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
 void rds_ib_inc_free(struct rds_incoming *inc);
 int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
 void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 0da2a45..d150bb4 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -135,7 +135,7 @@
 	rds_ib_recv_init_ring(ic);
 	/* Post receive buffers - as a side effect, this will update
 	 * the posted credit count. */
-	rds_ib_recv_refill(conn, 1);
+	rds_ib_recv_refill(conn, 1, GFP_KERNEL);
 
 	/* Tune RNR behavior */
 	rds_ib_tune_rnr(ic, &qp_attr);
@@ -448,8 +448,9 @@
 		 (unsigned long long)be64_to_cpu(lguid),
 		 (unsigned long long)be64_to_cpu(fguid));
 
-	conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_ib_transport,
-			       GFP_KERNEL);
+	/* RDS/IB is not currently netns aware, thus init_net */
+	conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
+			       &rds_ib_transport, GFP_KERNEL);
 	if (IS_ERR(conn)) {
 		rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
 		conn = NULL;
@@ -639,6 +640,15 @@
 			   (atomic_read(&ic->i_signaled_sends) == 0));
 		tasklet_kill(&ic->i_recv_tasklet);
 
+		/* first destroy the ib state that generates callbacks */
+		if (ic->i_cm_id->qp)
+			rdma_destroy_qp(ic->i_cm_id);
+		if (ic->i_send_cq)
+			ib_destroy_cq(ic->i_send_cq);
+		if (ic->i_recv_cq)
+			ib_destroy_cq(ic->i_recv_cq);
+
+		/* then free the resources that ib callbacks use */
 		if (ic->i_send_hdrs)
 			ib_dma_free_coherent(dev,
 					   ic->i_send_ring.w_nr *
@@ -662,12 +672,6 @@
 		if (ic->i_recvs)
 			rds_ib_recv_clear_ring(ic);
 
-		if (ic->i_cm_id->qp)
-			rdma_destroy_qp(ic->i_cm_id);
-		if (ic->i_send_cq)
-			ib_destroy_cq(ic->i_send_cq);
-		if (ic->i_recv_cq)
-			ib_destroy_cq(ic->i_recv_cq);
 		rdma_destroy_id(ic->i_cm_id);
 
 		/*
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 657ba9f..251d1ce 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -83,6 +83,25 @@
 	struct ib_fmr_attr	fmr_attr;
 };
 
+struct workqueue_struct *rds_ib_fmr_wq;
+
+int rds_ib_fmr_init(void)
+{
+	rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
+	if (!rds_ib_fmr_wq)
+		return -ENOMEM;
+	return 0;
+}
+
+/* By the time this is called all the IB devices should have been torn down and
+ * had their pools freed.  As each pool is freed its work struct is waited on,
+ * so the pool flushing work queue should be idle by the time we get here.
+ */
+void rds_ib_fmr_exit(void)
+{
+	destroy_workqueue(rds_ib_fmr_wq);
+}
+
 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
 static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
@@ -151,12 +170,17 @@
 	struct rds_ib_device *rds_ibdev_old;
 
 	rds_ibdev_old = rds_ib_get_device(ipaddr);
-	if (rds_ibdev_old) {
+	if (!rds_ibdev_old)
+		return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
+
+	if (rds_ibdev_old != rds_ibdev) {
 		rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
 		rds_ib_dev_put(rds_ibdev_old);
+		return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
 	}
+	rds_ib_dev_put(rds_ibdev_old);
 
-	return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
+	return 0;
 }
 
 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
@@ -336,8 +360,6 @@
 		goto out_no_cigar;
 	}
 
-	memset(ibmr, 0, sizeof(*ibmr));
-
 	ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
 			(IB_ACCESS_LOCAL_WRITE |
 			 IB_ACCESS_REMOTE_READ |
@@ -485,7 +507,7 @@
 
 			/* FIXME we need a way to tell a r/w MR
 			 * from a r/o MR */
-			BUG_ON(irqs_disabled());
+			WARN_ON(!page->mapping && irqs_disabled());
 			set_page_dirty(page);
 			put_page(page);
 		}
@@ -523,11 +545,13 @@
 /*
  * given an llist of mrs, put them all into the list_head for more processing
  */
-static void llist_append_to_list(struct llist_head *llist, struct list_head *list)
+static unsigned int llist_append_to_list(struct llist_head *llist,
+					 struct list_head *list)
 {
 	struct rds_ib_mr *ibmr;
 	struct llist_node *node;
 	struct llist_node *next;
+	unsigned int count = 0;
 
 	node = llist_del_all(llist);
 	while (node) {
@@ -535,7 +559,9 @@
 		ibmr = llist_entry(node, struct rds_ib_mr, llnode);
 		list_add_tail(&ibmr->unmap_list, list);
 		node = next;
+		count++;
 	}
+	return count;
 }
 
 /*
@@ -576,7 +602,7 @@
 	LIST_HEAD(unmap_list);
 	LIST_HEAD(fmr_list);
 	unsigned long unpinned = 0;
-	unsigned int nfreed = 0, ncleaned = 0, free_goal;
+	unsigned int nfreed = 0, dirty_to_clean = 0, free_goal;
 	int ret = 0;
 
 	rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
@@ -618,8 +644,8 @@
 	/* Get the list of all MRs to be dropped. Ordering matters -
 	 * we want to put drop_list ahead of free_list.
 	 */
-	llist_append_to_list(&pool->drop_list, &unmap_list);
-	llist_append_to_list(&pool->free_list, &unmap_list);
+	dirty_to_clean = llist_append_to_list(&pool->drop_list, &unmap_list);
+	dirty_to_clean += llist_append_to_list(&pool->free_list, &unmap_list);
 	if (free_all)
 		llist_append_to_list(&pool->clean_list, &unmap_list);
 
@@ -647,7 +673,6 @@
 			kfree(ibmr);
 			nfreed++;
 		}
-		ncleaned++;
 	}
 
 	if (!list_empty(&unmap_list)) {
@@ -673,7 +698,7 @@
 	}
 
 	atomic_sub(unpinned, &pool->free_pinned);
-	atomic_sub(ncleaned, &pool->dirty_count);
+	atomic_sub(dirty_to_clean, &pool->dirty_count);
 	atomic_sub(nfreed, &pool->item_count);
 
 out:
@@ -710,16 +735,18 @@
 
 	/* If we've pinned too many pages, request a flush */
 	if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
-	    atomic_read(&pool->dirty_count) >= pool->max_items / 10)
-		schedule_delayed_work(&pool->flush_worker, 10);
+	    atomic_read(&pool->dirty_count) >= pool->max_items / 5)
+		queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
 
 	if (invalidate) {
 		if (likely(!in_interrupt())) {
 			rds_ib_flush_mr_pool(pool, 0, NULL);
 		} else {
 			/* We get here if the user created a MR marked
-			 * as use_once and invalidate at the same time. */
-			schedule_delayed_work(&pool->flush_worker, 10);
+			 * as use_once and invalidate at the same time.
+			 */
+			queue_delayed_work(rds_ib_fmr_wq,
+					   &pool->flush_worker, 10);
 		}
 	}
 
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index cac5b45..6bbe620 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -297,7 +297,7 @@
 }
 
 static int rds_ib_recv_refill_one(struct rds_connection *conn,
-				  struct rds_ib_recv_work *recv, int prefill)
+				  struct rds_ib_recv_work *recv, gfp_t gfp)
 {
 	struct rds_ib_connection *ic = conn->c_transport_data;
 	struct ib_sge *sge;
@@ -305,7 +305,7 @@
 	gfp_t slab_mask = GFP_NOWAIT;
 	gfp_t page_mask = GFP_NOWAIT;
 
-	if (prefill) {
+	if (gfp & __GFP_WAIT) {
 		slab_mask = GFP_KERNEL;
 		page_mask = GFP_HIGHUSER;
 	}
@@ -347,6 +347,24 @@
 	return ret;
 }
 
+static int acquire_refill(struct rds_connection *conn)
+{
+	return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0;
+}
+
+static void release_refill(struct rds_connection *conn)
+{
+	clear_bit(RDS_RECV_REFILL, &conn->c_flags);
+
+	/* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
+	 * hot path and finding waiters is very rare.  We don't want to walk
+	 * the system-wide hashed waitqueue buckets in the fast path only to
+	 * almost never find waiters.
+	 */
+	if (waitqueue_active(&conn->c_waitq))
+		wake_up_all(&conn->c_waitq);
+}
+
 /*
  * This tries to allocate and post unused work requests after making sure that
  * they have all the allocations they need to queue received fragments into
@@ -354,15 +372,23 @@
  *
  * -1 is returned if posting fails due to temporary resource exhaustion.
  */
-void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
+void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp)
 {
 	struct rds_ib_connection *ic = conn->c_transport_data;
 	struct rds_ib_recv_work *recv;
 	struct ib_recv_wr *failed_wr;
 	unsigned int posted = 0;
 	int ret = 0;
+	bool can_wait = !!(gfp & __GFP_WAIT);
 	u32 pos;
 
+	/* the goal here is to just make sure that someone, somewhere
+	 * is posting buffers.  If we can't get the refill lock,
+	 * let them do their thing
+	 */
+	if (!acquire_refill(conn))
+		return;
+
 	while ((prefill || rds_conn_up(conn)) &&
 	       rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
 		if (pos >= ic->i_recv_ring.w_nr) {
@@ -372,7 +398,7 @@
 		}
 
 		recv = &ic->i_recvs[pos];
-		ret = rds_ib_recv_refill_one(conn, recv, prefill);
+		ret = rds_ib_recv_refill_one(conn, recv, gfp);
 		if (ret) {
 			break;
 		}
@@ -402,6 +428,24 @@
 
 	if (ret)
 		rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
+
+	release_refill(conn);
+
+	/* if we're called from the softirq handler, we'll be GFP_NOWAIT.
+	 * in this case the ring being low is going to lead to more interrupts
+	 * and we can safely let the softirq code take care of it unless the
+	 * ring is completely empty.
+	 *
+	 * if we're called from krdsd, we'll be GFP_KERNEL.  In this case
+	 * we might have raced with the softirq code while we had the refill
+	 * lock held.  Use rds_ib_ring_low() instead of ring_empty to decide
+	 * if we should requeue.
+	 */
+	if (rds_conn_up(conn) &&
+	    ((can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
+	    rds_ib_ring_empty(&ic->i_recv_ring))) {
+		queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
+	}
 }
 
 /*
@@ -982,10 +1026,17 @@
 		}
 
 		/*
-		 * It's very important that we only free this ring entry if we've truly
-		 * freed the resources allocated to the entry.  The refilling path can
-		 * leak if we don't.
+		 * rds_ib_process_recv() doesn't always consume the frag, and
+		 * we might not have called it at all if the wc didn't indicate
+		 * success. We already unmapped the frag's pages, though, and
+		 * the following rds_ib_ring_free() call tells the refill path
+		 * that it will not find an allocated frag here. Make sure we
+		 * keep that promise by freeing a frag that's still on the ring.
 		 */
+		if (recv->r_frag) {
+			rds_ib_frag_free(ic, recv->r_frag);
+			recv->r_frag = NULL;
+		}
 		rds_ib_ring_free(&ic->i_recv_ring, 1);
 	}
 }
@@ -1016,7 +1067,7 @@
 		rds_ib_stats_inc(s_ib_rx_ring_empty);
 
 	if (rds_ib_ring_low(&ic->i_recv_ring))
-		rds_ib_recv_refill(conn, 0);
+		rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
 }
 
 int rds_ib_recv(struct rds_connection *conn)
@@ -1025,8 +1076,10 @@
 	int ret = 0;
 
 	rdsdebug("conn %p\n", conn);
-	if (rds_conn_up(conn))
+	if (rds_conn_up(conn)) {
 		rds_ib_attempt_ack(ic);
+		rds_ib_recv_refill(conn, 0, GFP_KERNEL);
+	}
 
 	return ret;
 }
@@ -1049,9 +1102,10 @@
 	rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
 					sizeof(struct rds_page_frag),
 					0, SLAB_HWCACHE_ALIGN, NULL);
-	if (!rds_ib_frag_slab)
+	if (!rds_ib_frag_slab) {
 		kmem_cache_destroy(rds_ib_incoming_slab);
-	else
+		rds_ib_incoming_slab = NULL;
+	} else
 		ret = 0;
 out:
 	return ret;
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 5d0a704..c576ebe 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -709,6 +709,11 @@
 	if (scat == &rm->data.op_sg[rm->data.op_count]) {
 		prev->s_op = ic->i_data_op;
 		prev->s_wr.send_flags |= IB_SEND_SOLICITED;
+		if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) {
+			ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
+			prev->s_wr.send_flags |= IB_SEND_SIGNALED;
+			nr_sig++;
+		}
 		ic->i_data_op = NULL;
 	}
 
diff --git a/net/rds/iw.c b/net/rds/iw.c
index 5899356..5d5a9d2 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -218,7 +218,7 @@
  * allowed to influence which paths have priority.  We could call userspace
  * asserting this policy "routing".
  */
-static int rds_iw_laddr_check(__be32 addr)
+static int rds_iw_laddr_check(struct net *net, __be32 addr)
 {
 	int ret;
 	struct rdma_cm_id *cm_id;
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 8f486fa..a6553a6 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -398,8 +398,9 @@
 		 &dp->dp_saddr, &dp->dp_daddr,
 		 RDS_PROTOCOL_MAJOR(version), RDS_PROTOCOL_MINOR(version));
 
-	conn = rds_conn_create(dp->dp_daddr, dp->dp_saddr, &rds_iw_transport,
-			       GFP_KERNEL);
+	/* RDS/IW is not currently netns aware, thus init_net */
+	conn = rds_conn_create(&init_net, dp->dp_daddr, dp->dp_saddr,
+			       &rds_iw_transport, GFP_KERNEL);
 	if (IS_ERR(conn)) {
 		rdsdebug("rds_conn_create failed (%ld)\n", PTR_ERR(conn));
 		conn = NULL;
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 40084d8..4c93bad 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -435,9 +435,10 @@
 
 	/* If the MR was marked as invalidate, this will
 	 * trigger an async flush. */
-	if (zot_me)
+	if (zot_me) {
 		rds_destroy_mr(mr);
-	rds_mr_put(mr);
+		rds_mr_put(mr);
+	}
 }
 
 void rds_rdma_free_op(struct rm_rdma_op *ro)
@@ -451,7 +452,7 @@
 		 * is the case for a RDMA_READ which copies from remote
 		 * to local memory */
 		if (!ro->op_write) {
-			BUG_ON(irqs_disabled());
+			WARN_ON(!page->mapping && irqs_disabled());
 			set_page_dirty(page);
 		}
 		put_page(page);
@@ -658,6 +659,8 @@
 		ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
 		if (ret < 0)
 			goto out;
+		else
+			ret = 0;
 
 		rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
 			 nr_bytes, nr, iov->bytes, iov->addr);
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 2082408..b9b40af 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -34,6 +34,7 @@
 #include <rdma/rdma_cm.h>
 
 #include "rdma_transport.h"
+#include "ib.h"
 
 static struct rdma_cm_id *rds_rdma_listen_id;
 
@@ -82,8 +83,18 @@
 		break;
 
 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
-		/* XXX worry about racing with listen acceptance */
-		ret = trans->cm_initiate_connect(cm_id);
+		/* Connection could have been dropped so make sure the
+		 * cm_id is valid before proceeding
+		 */
+		if (conn) {
+			struct rds_ib_connection *ibic;
+
+			ibic = conn->c_transport_data;
+			if (ibic && ibic->i_cm_id == cm_id)
+				ret = trans->cm_initiate_connect(cm_id);
+			else
+				rds_conn_drop(conn);
+		}
 		break;
 
 	case RDMA_CM_EVENT_ESTABLISHED:
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 2260c1e4..afb4048 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -80,6 +80,7 @@
 #define RDS_LL_SEND_FULL	0
 #define RDS_RECONNECT_PENDING	1
 #define RDS_IN_XMIT		2
+#define RDS_RECV_REFILL		3
 
 struct rds_connection {
 	struct hlist_node	c_hash_node;
@@ -128,8 +129,21 @@
 
 	/* Protocol version */
 	unsigned int		c_version;
+	possible_net_t		c_net;
 };
 
+static inline
+struct net *rds_conn_net(struct rds_connection *conn)
+{
+	return read_pnet(&conn->c_net);
+}
+
+static inline
+void rds_conn_net_set(struct rds_connection *conn, struct net *net)
+{
+	write_pnet(&conn->c_net, net);
+}
+
 #define RDS_FLAG_CONG_BITMAP	0x01
 #define RDS_FLAG_ACK_REQUIRED	0x02
 #define RDS_FLAG_RETRANSMITTED	0x04
@@ -417,7 +431,7 @@
 	unsigned int		t_prefer_loopback:1;
 	unsigned int		t_type;
 
-	int (*laddr_check)(__be32 addr);
+	int (*laddr_check)(struct net *net, __be32 addr);
 	int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
 	void (*conn_free)(void *data);
 	int (*conn_connect)(struct rds_connection *conn);
@@ -608,9 +622,11 @@
 /* conn.c */
 int rds_conn_init(void);
 void rds_conn_exit(void);
-struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create(struct net *net,
+				       __be32 laddr, __be32 faddr,
 				       struct rds_transport *trans, gfp_t gfp);
-struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
+struct rds_connection *rds_conn_create_outgoing(struct net *net,
+						__be32 laddr, __be32 faddr,
 			       struct rds_transport *trans, gfp_t gfp);
 void rds_conn_shutdown(struct rds_connection *conn);
 void rds_conn_destroy(struct rds_connection *conn);
@@ -795,7 +811,7 @@
 /* transport.c */
 int rds_trans_register(struct rds_transport *trans);
 void rds_trans_unregister(struct rds_transport *trans);
-struct rds_transport *rds_trans_get_preferred(__be32 addr);
+struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr);
 void rds_trans_put(struct rds_transport *trans);
 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
 				       unsigned int avail);
diff --git a/net/rds/send.c b/net/rds/send.c
index e9430f5..4df61a5 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -282,26 +282,34 @@
 		/* The transport either sends the whole rdma or none of it */
 		if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
 			rm->m_final_op = &rm->rdma;
+			/* The transport owns the mapped memory for now.
+			 * You can't unmap it while it's on the send queue
+			 */
+			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 			ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
-			if (ret)
+			if (ret) {
+				clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
+				wake_up_interruptible(&rm->m_flush_wait);
 				break;
+			}
 			conn->c_xmit_rdma_sent = 1;
 
-			/* The transport owns the mapped memory for now.
-			 * You can't unmap it while it's on the send queue */
-			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 		}
 
 		if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
 			rm->m_final_op = &rm->atomic;
+			/* The transport owns the mapped memory for now.
+			 * You can't unmap it while it's on the send queue
+			 */
+			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 			ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
-			if (ret)
+			if (ret) {
+				clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
+				wake_up_interruptible(&rm->m_flush_wait);
 				break;
+			}
 			conn->c_xmit_atomic_sent = 1;
 
-			/* The transport owns the mapped memory for now.
-			 * You can't unmap it while it's on the send queue */
-			set_bit(RDS_MSG_MAPPED, &rm->m_flags);
 		}
 
 		/*
@@ -411,7 +419,8 @@
 	 */
 	if (ret == 0) {
 		smp_mb();
-		if (!list_empty(&conn->c_send_queue) &&
+		if ((test_bit(0, &conn->c_map_queued) ||
+		     !list_empty(&conn->c_send_queue)) &&
 		    send_gen == conn->c_send_gen) {
 			rds_stats_inc(s_send_lock_queue_raced);
 			goto restart;
@@ -769,8 +778,22 @@
 	while (!list_empty(&list)) {
 		rm = list_entry(list.next, struct rds_message, m_sock_item);
 		list_del_init(&rm->m_sock_item);
-
 		rds_message_wait(rm);
+
+		/* just in case the code above skipped this message
+		 * because RDS_MSG_ON_CONN wasn't set, run it again here
+		 * taking m_rs_lock is the only thing that keeps us
+		 * from racing with ack processing.
+		 */
+		spin_lock_irqsave(&rm->m_rs_lock, flags);
+
+		spin_lock(&rs->rs_lock);
+		__rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
+		spin_unlock(&rs->rs_lock);
+
+		rm->m_rs = NULL;
+		spin_unlock_irqrestore(&rm->m_rs_lock, flags);
+
 		rds_message_put(rm);
 	}
 }
@@ -992,6 +1015,11 @@
 		goto out;
 	}
 
+	if (payload_len > rds_sk_sndbuf(rs)) {
+		ret = -EMSGSIZE;
+		goto out;
+	}
+
 	/* size of rm including all sgs */
 	ret = rds_rm_size(msg, payload_len);
 	if (ret < 0)
@@ -1023,7 +1051,8 @@
 	if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
 		conn = rs->rs_conn;
 	else {
-		conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
+		conn = rds_conn_create_outgoing(sock_net(sock->sk),
+						rs->rs_bound_addr, daddr,
 					rs->rs_transport,
 					sock->sk->sk_allocation);
 		if (IS_ERR(conn)) {
@@ -1063,11 +1092,7 @@
 	while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
 				  dport, &queued)) {
 		rds_stats_inc(s_send_queue_full);
-		/* XXX make sure this is reasonable */
-		if (payload_len > rds_sk_sndbuf(rs)) {
-			ret = -EMSGSIZE;
-			goto out;
-		}
+
 		if (nonblock) {
 			ret = -EAGAIN;
 			goto out;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index edac9ef..c42b60b 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -35,6 +35,9 @@
 #include <linux/in.h>
 #include <linux/module.h>
 #include <net/tcp.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/tcp.h>
 
 #include "rds.h"
 #include "tcp.h"
@@ -189,9 +192,9 @@
 	spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
 }
 
-static int rds_tcp_laddr_check(__be32 addr)
+static int rds_tcp_laddr_check(struct net *net, __be32 addr)
 {
-	if (inet_addr_type(&init_net, addr) == RTN_LOCAL)
+	if (inet_addr_type(net, addr) == RTN_LOCAL)
 		return 0;
 	return -EADDRNOTAVAIL;
 }
@@ -250,16 +253,7 @@
 	}
 }
 
-static void rds_tcp_exit(void)
-{
-	rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
-	rds_tcp_listen_stop();
-	rds_tcp_destroy_conns();
-	rds_trans_unregister(&rds_tcp_transport);
-	rds_tcp_recv_exit();
-	kmem_cache_destroy(rds_tcp_conn_slab);
-}
-module_exit(rds_tcp_exit);
+static void rds_tcp_exit(void);
 
 struct rds_transport rds_tcp_transport = {
 	.laddr_check		= rds_tcp_laddr_check,
@@ -281,6 +275,136 @@
 	.t_prefer_loopback	= 1,
 };
 
+static int rds_tcp_netid;
+
+/* per-network namespace private data for this module */
+struct rds_tcp_net {
+	struct socket *rds_tcp_listen_sock;
+	struct work_struct rds_tcp_accept_w;
+};
+
+static void rds_tcp_accept_worker(struct work_struct *work)
+{
+	struct rds_tcp_net *rtn = container_of(work,
+					       struct rds_tcp_net,
+					       rds_tcp_accept_w);
+
+	while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
+		cond_resched();
+}
+
+void rds_tcp_accept_work(struct sock *sk)
+{
+	struct net *net = sock_net(sk);
+	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+	queue_work(rds_wq, &rtn->rds_tcp_accept_w);
+}
+
+static __net_init int rds_tcp_init_net(struct net *net)
+{
+	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+	rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net);
+	if (!rtn->rds_tcp_listen_sock) {
+		pr_warn("could not set up listen sock\n");
+		return -EAFNOSUPPORT;
+	}
+	INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
+	return 0;
+}
+
+static void __net_exit rds_tcp_exit_net(struct net *net)
+{
+	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+	/* If rds_tcp_exit_net() is called as a result of netns deletion,
+	 * the rds_tcp_kill_sock() device notifier would already have cleaned
+	 * up the listen socket, thus there is no work to do in this function.
+	 *
+	 * If rds_tcp_exit_net() is called as a result of module unload,
+	 * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then
+	 * we do need to clean up the listen socket here.
+	 */
+	if (rtn->rds_tcp_listen_sock) {
+		rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+		rtn->rds_tcp_listen_sock = NULL;
+		flush_work(&rtn->rds_tcp_accept_w);
+	}
+}
+
+static struct pernet_operations rds_tcp_net_ops = {
+	.init = rds_tcp_init_net,
+	.exit = rds_tcp_exit_net,
+	.id = &rds_tcp_netid,
+	.size = sizeof(struct rds_tcp_net),
+};
+
+static void rds_tcp_kill_sock(struct net *net)
+{
+	struct rds_tcp_connection *tc, *_tc;
+	struct sock *sk;
+	LIST_HEAD(tmp_list);
+	struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
+
+	rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
+	rtn->rds_tcp_listen_sock = NULL;
+	flush_work(&rtn->rds_tcp_accept_w);
+	spin_lock_irq(&rds_tcp_conn_lock);
+	list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
+		struct net *c_net = read_pnet(&tc->conn->c_net);
+
+		if (net != c_net || !tc->t_sock)
+			continue;
+		list_move_tail(&tc->t_tcp_node, &tmp_list);
+	}
+	spin_unlock_irq(&rds_tcp_conn_lock);
+	list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
+		sk = tc->t_sock->sk;
+		sk->sk_prot->disconnect(sk, 0);
+		tcp_done(sk);
+		if (tc->conn->c_passive)
+			rds_conn_destroy(tc->conn->c_passive);
+		rds_conn_destroy(tc->conn);
+	}
+}
+
+static int rds_tcp_dev_event(struct notifier_block *this,
+			     unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	/* rds-tcp registers as a pernet subys, so the ->exit will only
+	 * get invoked after network acitivity has quiesced. We need to
+	 * clean up all sockets  to quiesce network activity, and use
+	 * the unregistration of the per-net loopback device as a trigger
+	 * to start that cleanup.
+	 */
+	if (event == NETDEV_UNREGISTER_FINAL &&
+	    dev->ifindex == LOOPBACK_IFINDEX)
+		rds_tcp_kill_sock(dev_net(dev));
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block rds_tcp_dev_notifier = {
+	.notifier_call        = rds_tcp_dev_event,
+	.priority = -10, /* must be called after other network notifiers */
+};
+
+static void rds_tcp_exit(void)
+{
+	rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
+	unregister_pernet_subsys(&rds_tcp_net_ops);
+	if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
+		pr_warn("could not unregister rds_tcp_dev_notifier\n");
+	rds_tcp_destroy_conns();
+	rds_trans_unregister(&rds_tcp_transport);
+	rds_tcp_recv_exit();
+	kmem_cache_destroy(rds_tcp_conn_slab);
+}
+module_exit(rds_tcp_exit);
+
 static int rds_tcp_init(void)
 {
 	int ret;
@@ -293,6 +417,16 @@
 		goto out;
 	}
 
+	ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
+	if (ret) {
+		pr_warn("could not register rds_tcp_dev_notifier\n");
+		goto out;
+	}
+
+	ret = register_pernet_subsys(&rds_tcp_net_ops);
+	if (ret)
+		goto out_slab;
+
 	ret = rds_tcp_recv_init();
 	if (ret)
 		goto out_slab;
@@ -301,19 +435,14 @@
 	if (ret)
 		goto out_recv;
 
-	ret = rds_tcp_listen_init();
-	if (ret)
-		goto out_register;
-
 	rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
 
 	goto out;
 
-out_register:
-	rds_trans_unregister(&rds_tcp_transport);
 out_recv:
 	rds_tcp_recv_exit();
 out_slab:
+	unregister_pernet_subsys(&rds_tcp_net_ops);
 	kmem_cache_destroy(rds_tcp_conn_slab);
 out:
 	return ret;
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 0dbdd37..64f873c 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -52,6 +52,7 @@
 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
 u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
 extern struct rds_transport rds_tcp_transport;
+void rds_tcp_accept_work(struct sock *sk);
 
 /* tcp_connect.c */
 int rds_tcp_conn_connect(struct rds_connection *conn);
@@ -59,9 +60,11 @@
 void rds_tcp_state_change(struct sock *sk);
 
 /* tcp_listen.c */
-int rds_tcp_listen_init(void);
-void rds_tcp_listen_stop(void);
+struct socket *rds_tcp_listen_init(struct net *);
+void rds_tcp_listen_stop(struct socket *);
 void rds_tcp_listen_data_ready(struct sock *sk);
+int rds_tcp_accept_one(struct socket *sock);
+int rds_tcp_keepalive(struct socket *sock);
 
 /* tcp_recv.c */
 int rds_tcp_recv_init(void);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 973109c7..5cb1687 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -79,7 +79,8 @@
 	struct sockaddr_in src, dest;
 	int ret;
 
-	ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+	ret = sock_create_kern(rds_conn_net(conn), PF_INET,
+			       SOCK_STREAM, IPPROTO_TCP, &sock);
 	if (ret < 0)
 		goto out;
 
@@ -111,10 +112,12 @@
 	rdsdebug("connect to address %pI4 returned %d\n", &conn->c_faddr, ret);
 	if (ret == -EINPROGRESS)
 		ret = 0;
-	if (ret == 0)
+	if (ret == 0) {
+		rds_tcp_keepalive(sock);
 		sock = NULL;
-	else
+	} else {
 		rds_tcp_restore_callbacks(sock, conn->c_transport_data);
+	}
 
 out:
 	if (sock)
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 0da49e3..444d78d 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -38,14 +38,7 @@
 #include "rds.h"
 #include "tcp.h"
 
-/*
- * cheesy, but simple..
- */
-static void rds_tcp_accept_worker(struct work_struct *work);
-static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
-static struct socket *rds_tcp_listen_sock;
-
-static int rds_tcp_keepalive(struct socket *sock)
+int rds_tcp_keepalive(struct socket *sock)
 {
 	/* values below based on xs_udp_default_timeout */
 	int keepidle = 5; /* send a probe 'keepidle' secs after last data */
@@ -77,7 +70,7 @@
 	return ret;
 }
 
-static int rds_tcp_accept_one(struct socket *sock)
+int rds_tcp_accept_one(struct socket *sock)
 {
 	struct socket *new_sock = NULL;
 	struct rds_connection *conn;
@@ -85,8 +78,9 @@
 	struct inet_sock *inet;
 	struct rds_tcp_connection *rs_tcp;
 
-	ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
-			       sock->sk->sk_protocol, &new_sock);
+	ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
+			       sock->sk->sk_type, sock->sk->sk_protocol,
+			       &new_sock);
 	if (ret)
 		goto out;
 
@@ -108,7 +102,8 @@
 		 &inet->inet_saddr, ntohs(inet->inet_sport),
 		 &inet->inet_daddr, ntohs(inet->inet_dport));
 
-	conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr,
+	conn = rds_conn_create(sock_net(sock->sk),
+			       inet->inet_saddr, inet->inet_daddr,
 			       &rds_tcp_transport, GFP_KERNEL);
 	if (IS_ERR(conn)) {
 		ret = PTR_ERR(conn);
@@ -148,12 +143,6 @@
 	return ret;
 }
 
-static void rds_tcp_accept_worker(struct work_struct *work)
-{
-	while (rds_tcp_accept_one(rds_tcp_listen_sock) == 0)
-		cond_resched();
-}
-
 void rds_tcp_listen_data_ready(struct sock *sk)
 {
 	void (*ready)(struct sock *sk);
@@ -174,20 +163,20 @@
 	 * socket
 	 */
 	if (sk->sk_state == TCP_LISTEN)
-		queue_work(rds_wq, &rds_tcp_listen_work);
+		rds_tcp_accept_work(sk);
 
 out:
 	read_unlock(&sk->sk_callback_lock);
 	ready(sk);
 }
 
-int rds_tcp_listen_init(void)
+struct socket *rds_tcp_listen_init(struct net *net)
 {
 	struct sockaddr_in sin;
 	struct socket *sock = NULL;
 	int ret;
 
-	ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+	ret = sock_create_kern(net, PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
 	if (ret < 0)
 		goto out;
 
@@ -211,17 +200,15 @@
 	if (ret < 0)
 		goto out;
 
-	rds_tcp_listen_sock = sock;
-	sock = NULL;
+	return sock;
 out:
 	if (sock)
 		sock_release(sock);
-	return ret;
+	return NULL;
 }
 
-void rds_tcp_listen_stop(void)
+void rds_tcp_listen_stop(struct socket *sock)
 {
-	struct socket *sock = rds_tcp_listen_sock;
 	struct sock *sk;
 
 	if (!sock)
@@ -242,5 +229,4 @@
 	/* wait for accepts to stop and close the socket */
 	flush_workqueue(rds_wq);
 	sock_release(sock);
-	rds_tcp_listen_sock = NULL;
 }
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 83498e1..f3afd1d 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -77,7 +77,7 @@
 		module_put(trans->t_owner);
 }
 
-struct rds_transport *rds_trans_get_preferred(__be32 addr)
+struct rds_transport *rds_trans_get_preferred(struct net *net, __be32 addr)
 {
 	struct rds_transport *ret = NULL;
 	struct rds_transport *trans;
@@ -90,7 +90,7 @@
 	for (i = 0; i < RDS_TRANS_COUNT; i++) {
 		trans = transports[i];
 
-		if (trans && (trans->laddr_check(addr) == 0) &&
+		if (trans && (trans->laddr_check(net, addr) == 0) &&
 		    (!trans->t_owner || try_module_get(trans->t_owner))) {
 			ret = trans;
 			break;
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 4c10e7e..598d374 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -36,7 +36,8 @@
 
 config RFKILL_GPIO
 	tristate "GPIO RFKILL driver"
-	depends on RFKILL && GPIOLIB
+	depends on RFKILL
+	depends on GPIOLIB || COMPILE_TEST
 	default n
 	help
 	  If you say yes here you get support of a generic gpio RFKILL
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index d5d58d9..9312722 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -164,7 +164,6 @@
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id rfkill_acpi_match[] = {
 	{ "BCM2E1A", RFKILL_TYPE_BLUETOOTH },
-	{ "BCM2E39", RFKILL_TYPE_BLUETOOTH },
 	{ "BCM2E3D", RFKILL_TYPE_BLUETOOTH },
 	{ "BCM2E40", RFKILL_TYPE_BLUETOOTH },
 	{ "BCM2E64", RFKILL_TYPE_BLUETOOTH },
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 43ec926..06e7c4a 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -27,7 +27,16 @@
 #include <net/act_api.h>
 #include <net/netlink.h>
 
-void tcf_hash_destroy(struct tc_action *a)
+static void free_tcf(struct rcu_head *head)
+{
+	struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu);
+
+	free_percpu(p->cpu_bstats);
+	free_percpu(p->cpu_qstats);
+	kfree(p);
+}
+
+static void tcf_hash_destroy(struct tc_action *a)
 {
 	struct tcf_common *p = a->priv;
 	struct tcf_hashinfo *hinfo = a->ops->hinfo;
@@ -41,9 +50,8 @@
 	 * gen_estimator est_timer() might access p->tcfc_lock
 	 * or bstats, wait a RCU grace period before freeing p
 	 */
-	kfree_rcu(p, tcfc_rcu);
+	call_rcu(&p->tcfc_rcu, free_tcf);
 }
-EXPORT_SYMBOL(tcf_hash_destroy);
 
 int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
 {
@@ -231,15 +239,16 @@
 	if (est)
 		gen_kill_estimator(&pc->tcfc_bstats,
 				   &pc->tcfc_rate_est);
-	kfree_rcu(pc, tcfc_rcu);
+	call_rcu(&pc->tcfc_rcu, free_tcf);
 }
 EXPORT_SYMBOL(tcf_hash_cleanup);
 
 int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
-		    int size, int bind)
+		    int size, int bind, bool cpustats)
 {
 	struct tcf_hashinfo *hinfo = a->ops->hinfo;
 	struct tcf_common *p = kzalloc(size, GFP_KERNEL);
+	int err = -ENOMEM;
 
 	if (unlikely(!p))
 		return -ENOMEM;
@@ -247,18 +256,32 @@
 	if (bind)
 		p->tcfc_bindcnt = 1;
 
+	if (cpustats) {
+		p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+		if (!p->cpu_bstats) {
+err1:
+			kfree(p);
+			return err;
+		}
+		p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+		if (!p->cpu_qstats) {
+err2:
+			free_percpu(p->cpu_bstats);
+			goto err1;
+		}
+	}
 	spin_lock_init(&p->tcfc_lock);
 	INIT_HLIST_NODE(&p->tcfc_head);
 	p->tcfc_index = index ? index : tcf_hash_new_index(hinfo);
 	p->tcfc_tm.install = jiffies;
 	p->tcfc_tm.lastuse = jiffies;
 	if (est) {
-		int err = gen_new_estimator(&p->tcfc_bstats, NULL,
-					    &p->tcfc_rate_est,
-					    &p->tcfc_lock, est);
+		err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
+					&p->tcfc_rate_est,
+					&p->tcfc_lock, est);
 		if (err) {
-			kfree(p);
-			return err;
+			free_percpu(p->cpu_qstats);
+			goto err2;
 		}
 	}
 
@@ -616,10 +639,10 @@
 	if (err < 0)
 		goto errout;
 
-	if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 ||
+	if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
 	    gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
 				     &p->tcfc_rate_est) < 0 ||
-	    gnet_stats_copy_queue(&d, NULL,
+	    gnet_stats_copy_queue(&d, p->cpu_qstats,
 				  &p->tcfc_qstats,
 				  p->tcfc_qstats.qlen) < 0)
 		goto errout;
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index d0edeb7..559bfa0 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -37,25 +37,24 @@
 		   struct tcf_result *res)
 {
 	struct tcf_bpf *prog = act->priv;
+	struct bpf_prog *filter;
 	int action, filter_res;
 	bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
 
 	if (unlikely(!skb_mac_header_was_set(skb)))
 		return TC_ACT_UNSPEC;
 
-	spin_lock(&prog->tcf_lock);
+	tcf_lastuse_update(&prog->tcf_tm);
+	bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
 
-	prog->tcf_tm.lastuse = jiffies;
-	bstats_update(&prog->tcf_bstats, skb);
-
-	/* Needed here for accessing maps. */
 	rcu_read_lock();
+	filter = rcu_dereference(prog->filter);
 	if (at_ingress) {
 		__skb_push(skb, skb->mac_len);
-		filter_res = BPF_PROG_RUN(prog->filter, skb);
+		filter_res = BPF_PROG_RUN(filter, skb);
 		__skb_pull(skb, skb->mac_len);
 	} else {
-		filter_res = BPF_PROG_RUN(prog->filter, skb);
+		filter_res = BPF_PROG_RUN(filter, skb);
 	}
 	rcu_read_unlock();
 
@@ -77,7 +76,7 @@
 		break;
 	case TC_ACT_SHOT:
 		action = filter_res;
-		prog->tcf_qstats.drops++;
+		qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
 		break;
 	case TC_ACT_UNSPEC:
 		action = prog->tcf_action;
@@ -87,7 +86,6 @@
 		break;
 	}
 
-	spin_unlock(&prog->tcf_lock);
 	return action;
 }
 
@@ -263,7 +261,10 @@
 				  struct tcf_bpf_cfg *cfg)
 {
 	cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
-	cfg->filter = prog->filter;
+	/* updates to prog->filter are prevented, since it's called either
+	 * with rtnl lock or during final cleanup in rcu callback
+	 */
+	cfg->filter = rcu_dereference_protected(prog->filter, 1);
 
 	cfg->bpf_ops = prog->bpf_ops;
 	cfg->bpf_name = prog->bpf_name;
@@ -278,7 +279,7 @@
 	struct tc_act_bpf *parm;
 	struct tcf_bpf *prog;
 	bool is_bpf, is_ebpf;
-	int ret;
+	int ret, res = 0;
 
 	if (!nla)
 		return -EINVAL;
@@ -287,45 +288,47 @@
 	if (ret < 0)
 		return ret;
 
-	is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
-	is_ebpf = tb[TCA_ACT_BPF_FD];
-
-	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf) ||
-	    !tb[TCA_ACT_BPF_PARMS])
+	if (!tb[TCA_ACT_BPF_PARMS])
 		return -EINVAL;
 
 	parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
 
+	if (!tcf_hash_check(parm->index, act, bind)) {
+		ret = tcf_hash_create(parm->index, est, act,
+				      sizeof(*prog), bind, true);
+		if (ret < 0)
+			return ret;
+
+		res = ACT_P_CREATED;
+	} else {
+		/* Don't override defaults. */
+		if (bind)
+			return 0;
+
+		tcf_hash_release(act, bind);
+		if (!replace)
+			return -EEXIST;
+	}
+
+	is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
+	is_ebpf = tb[TCA_ACT_BPF_FD];
+
+	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
 	memset(&cfg, 0, sizeof(cfg));
 
 	ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
 		       tcf_bpf_init_from_efd(tb, &cfg);
 	if (ret < 0)
-		return ret;
-
-	if (!tcf_hash_check(parm->index, act, bind)) {
-		ret = tcf_hash_create(parm->index, est, act,
-				      sizeof(*prog), bind);
-		if (ret < 0)
-			goto destroy_fp;
-
-		ret = ACT_P_CREATED;
-	} else {
-		/* Don't override defaults. */
-		if (bind)
-			goto destroy_fp;
-
-		tcf_hash_release(act, bind);
-		if (!replace) {
-			ret = -EEXIST;
-			goto destroy_fp;
-		}
-	}
+		goto out;
 
 	prog = to_bpf(act);
-	spin_lock_bh(&prog->tcf_lock);
+	ASSERT_RTNL();
 
-	if (ret != ACT_P_CREATED)
+	if (res != ACT_P_CREATED)
 		tcf_bpf_prog_fill_cfg(prog, &old);
 
 	prog->bpf_ops = cfg.bpf_ops;
@@ -337,19 +340,21 @@
 		prog->bpf_fd = cfg.bpf_fd;
 
 	prog->tcf_action = parm->action;
-	prog->filter = cfg.filter;
+	rcu_assign_pointer(prog->filter, cfg.filter);
 
-	spin_unlock_bh(&prog->tcf_lock);
-
-	if (ret == ACT_P_CREATED)
+	if (res == ACT_P_CREATED) {
 		tcf_hash_insert(act);
-	else
+	} else {
+		/* make sure the program being replaced is no longer executing */
+		synchronize_rcu();
 		tcf_bpf_cfg_cleanup(&old);
+	}
 
-	return ret;
+	return res;
+out:
+	if (res == ACT_P_CREATED)
+		tcf_hash_cleanup(act, est);
 
-destroy_fp:
-	tcf_bpf_cfg_cleanup(&cfg);
 	return ret;
 }
 
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 295d14b..5019a47 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -37,6 +37,7 @@
 	struct nf_conntrack_tuple tuple;
 	enum ip_conntrack_info ctinfo;
 	struct tcf_connmark_info *ca = a->priv;
+	struct nf_conntrack_zone zone;
 	struct nf_conn *c;
 	int proto;
 
@@ -70,7 +71,10 @@
 			       proto, &tuple))
 		goto out;
 
-	thash = nf_conntrack_find_get(dev_net(skb->dev), ca->zone, &tuple);
+	zone.id = ca->zone;
+	zone.dir = NF_CT_DEFAULT_ZONE_DIR;
+
+	thash = nf_conntrack_find_get(dev_net(skb->dev), &zone, &tuple);
 	if (!thash)
 		goto out;
 
@@ -108,7 +112,8 @@
 	parm = nla_data(tb[TCA_CONNMARK_PARMS]);
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*ci), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*ci),
+				      bind, false);
 		if (ret)
 			return ret;
 
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 4cd5cf1..b07c535 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -62,7 +62,8 @@
 	parm = nla_data(tb[TCA_CSUM_PARMS]);
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+				      bind, false);
 		if (ret)
 			return ret;
 		ret = ACT_P_CREATED;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 7fffc22..5c1b051 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -28,14 +28,18 @@
 #ifdef CONFIG_GACT_PROB
 static int gact_net_rand(struct tcf_gact *gact)
 {
-	if (!gact->tcfg_pval || prandom_u32() % gact->tcfg_pval)
+	smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
+	if (prandom_u32() % gact->tcfg_pval)
 		return gact->tcf_action;
 	return gact->tcfg_paction;
 }
 
 static int gact_determ(struct tcf_gact *gact)
 {
-	if (!gact->tcfg_pval || gact->tcf_bstats.packets % gact->tcfg_pval)
+	u32 pack = atomic_inc_return(&gact->packets);
+
+	smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
+	if (pack % gact->tcfg_pval)
 		return gact->tcf_action;
 	return gact->tcfg_paction;
 }
@@ -85,7 +89,8 @@
 #endif
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*gact),
+				      bind, true);
 		if (ret)
 			return ret;
 		ret = ACT_P_CREATED;
@@ -99,16 +104,19 @@
 
 	gact = to_gact(a);
 
-	spin_lock_bh(&gact->tcf_lock);
+	ASSERT_RTNL();
 	gact->tcf_action = parm->action;
 #ifdef CONFIG_GACT_PROB
 	if (p_parm) {
 		gact->tcfg_paction = p_parm->paction;
-		gact->tcfg_pval    = p_parm->pval;
+		gact->tcfg_pval    = max_t(u16, 1, p_parm->pval);
+		/* Make sure tcfg_pval is written before tcfg_ptype
+		 * coupled with smp_rmb() in gact_net_rand() & gact_determ()
+		 */
+		smp_wmb();
 		gact->tcfg_ptype   = p_parm->ptype;
 	}
 #endif
-	spin_unlock_bh(&gact->tcf_lock);
 	if (ret == ACT_P_CREATED)
 		tcf_hash_insert(a);
 	return ret;
@@ -118,23 +126,21 @@
 		    struct tcf_result *res)
 {
 	struct tcf_gact *gact = a->priv;
-	int action = TC_ACT_SHOT;
+	int action = READ_ONCE(gact->tcf_action);
 
-	spin_lock(&gact->tcf_lock);
 #ifdef CONFIG_GACT_PROB
-	if (gact->tcfg_ptype)
-		action = gact_rand[gact->tcfg_ptype](gact);
-	else
-		action = gact->tcf_action;
-#else
-	action = gact->tcf_action;
+	{
+	u32 ptype = READ_ONCE(gact->tcfg_ptype);
+
+	if (ptype)
+		action = gact_rand[ptype](gact);
+	}
 #endif
-	gact->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	gact->tcf_bstats.packets++;
+	bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), skb);
 	if (action == TC_ACT_SHOT)
-		gact->tcf_qstats.drops++;
-	gact->tcf_tm.lastuse = jiffies;
-	spin_unlock(&gact->tcf_lock);
+		qstats_drop_inc(this_cpu_ptr(gact->common.cpu_qstats));
+
+	tcf_lastuse_update(&gact->tcf_tm);
 
 	return action;
 }
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index cbc8dd7..99c9cc1 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -114,7 +114,7 @@
 		index = nla_get_u32(tb[TCA_IPT_INDEX]);
 
 	if (!tcf_hash_check(index, a, bind) ) {
-		ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind);
+		ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind, false);
 		if (ret)
 			return ret;
 		ret = ACT_P_CREATED;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 2685450..2d1be4a 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -35,9 +35,11 @@
 static void tcf_mirred_release(struct tc_action *a, int bind)
 {
 	struct tcf_mirred *m = to_mirred(a);
+	struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
+
 	list_del(&m->tcfm_list);
-	if (m->tcfm_dev)
-		dev_put(m->tcfm_dev);
+	if (dev)
+		dev_put(dev);
 }
 
 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -93,7 +95,8 @@
 	if (!tcf_hash_check(parm->index, a, bind)) {
 		if (dev == NULL)
 			return -EINVAL;
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*m), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*m),
+				      bind, true);
 		if (ret)
 			return ret;
 		ret = ACT_P_CREATED;
@@ -107,18 +110,18 @@
 	}
 	m = to_mirred(a);
 
-	spin_lock_bh(&m->tcf_lock);
+	ASSERT_RTNL();
 	m->tcf_action = parm->action;
 	m->tcfm_eaction = parm->eaction;
 	if (dev != NULL) {
 		m->tcfm_ifindex = parm->ifindex;
 		if (ret != ACT_P_CREATED)
-			dev_put(m->tcfm_dev);
+			dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
 		dev_hold(dev);
-		m->tcfm_dev = dev;
+		rcu_assign_pointer(m->tcfm_dev, dev);
 		m->tcfm_ok_push = ok_push;
 	}
-	spin_unlock_bh(&m->tcf_lock);
+
 	if (ret == ACT_P_CREATED) {
 		list_add(&m->tcfm_list, &mirred_list);
 		tcf_hash_insert(a);
@@ -133,20 +136,22 @@
 	struct tcf_mirred *m = a->priv;
 	struct net_device *dev;
 	struct sk_buff *skb2;
+	int retval, err;
 	u32 at;
-	int retval, err = 1;
 
-	spin_lock(&m->tcf_lock);
-	m->tcf_tm.lastuse = jiffies;
-	bstats_update(&m->tcf_bstats, skb);
+	tcf_lastuse_update(&m->tcf_tm);
 
-	dev = m->tcfm_dev;
-	if (!dev) {
-		printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
+	bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
+
+	rcu_read_lock();
+	retval = READ_ONCE(m->tcf_action);
+	dev = rcu_dereference(m->tcfm_dev);
+	if (unlikely(!dev)) {
+		pr_notice_once("tc mirred: target device is gone\n");
 		goto out;
 	}
 
-	if (!(dev->flags & IFF_UP)) {
+	if (unlikely(!(dev->flags & IFF_UP))) {
 		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
 				       dev->name);
 		goto out;
@@ -154,7 +159,7 @@
 
 	at = G_TC_AT(skb->tc_verd);
 	skb2 = skb_clone(skb, GFP_ATOMIC);
-	if (skb2 == NULL)
+	if (!skb2)
 		goto out;
 
 	if (!(at & AT_EGRESS)) {
@@ -170,16 +175,13 @@
 	skb2->dev = dev;
 	err = dev_queue_xmit(skb2);
 
-out:
 	if (err) {
-		m->tcf_qstats.overlimits++;
+out:
+		qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
 		if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
 			retval = TC_ACT_SHOT;
-		else
-			retval = m->tcf_action;
-	} else
-		retval = m->tcf_action;
-	spin_unlock(&m->tcf_lock);
+	}
+	rcu_read_unlock();
 
 	return retval;
 }
@@ -218,14 +220,16 @@
 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 	struct tcf_mirred *m;
 
+	ASSERT_RTNL();
 	if (event == NETDEV_UNREGISTER)
 		list_for_each_entry(m, &mirred_list, tcfm_list) {
-			spin_lock_bh(&m->tcf_lock);
-			if (m->tcfm_dev == dev) {
+			if (rcu_access_pointer(m->tcfm_dev) == dev) {
 				dev_put(dev);
-				m->tcfm_dev = NULL;
+				/* Note : no rcu grace period necessary, as
+				 * net_device are already rcu protected.
+				 */
+				RCU_INIT_POINTER(m->tcfm_dev, NULL);
 			}
-			spin_unlock_bh(&m->tcf_lock);
 		}
 
 	return NOTIFY_DONE;
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 270a030..b7c4ead 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -55,7 +55,8 @@
 	parm = nla_data(tb[TCA_NAT_PARMS]);
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+				      bind, false);
 		if (ret)
 			return ret;
 		ret = ACT_P_CREATED;
@@ -161,7 +162,8 @@
 			goto drop;
 
 		tcph = (void *)(skb_network_header(skb) + ihl);
-		inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1);
+		inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr,
+					 true);
 		break;
 	}
 	case IPPROTO_UDP:
@@ -177,7 +179,7 @@
 		udph = (void *)(skb_network_header(skb) + ihl);
 		if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
 			inet_proto_csum_replace4(&udph->check, skb, addr,
-						 new_addr, 1);
+						 new_addr, true);
 			if (!udph->check)
 				udph->check = CSUM_MANGLED_0;
 		}
@@ -230,7 +232,7 @@
 			iph->saddr = new_addr;
 
 		inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
-					 0);
+					 false);
 		break;
 	}
 	default:
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index ff8b466..e38a770 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -57,7 +57,8 @@
 	if (!tcf_hash_check(parm->index, a, bind)) {
 		if (!parm->nkeys)
 			return -EINVAL;
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+				      bind, false);
 		if (ret)
 			return ret;
 		p = to_pedit(a);
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 6a8d948..d6b708d 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -103,7 +103,8 @@
 	defdata = nla_data(tb[TCA_DEF_DATA]);
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
+				      bind, false);
 		if (ret)
 			return ret;
 
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index fcfeeaf..6751b5f 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -99,7 +99,8 @@
 	parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
+				      bind, false);
 		if (ret)
 			return ret;
 
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index d735ecf..796785e 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -116,7 +116,8 @@
 	action = parm->v_action;
 
 	if (!tcf_hash_check(parm->index, a, bind)) {
-		ret = tcf_hash_create(parm->index, est, a, sizeof(*v), bind);
+		ret = tcf_hash_create(parm->index, est, a, sizeof(*v),
+				      bind, false);
 		if (ret)
 			return ret;
 
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index ea611b21..4c85bd3 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -30,35 +30,16 @@
 			       struct tcf_result *res)
 {
 	struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
-	u32 classid;
-
-	classid = task_cls_state(current)->classid;
-
-	/*
-	 * Due to the nature of the classifier it is required to ignore all
-	 * packets originating from softirq context as accessing `current'
-	 * would lead to false results.
-	 *
-	 * This test assumes that all callers of dev_queue_xmit() explicitely
-	 * disable bh. Knowing this, it is possible to detect softirq based
-	 * calls by looking at the number of nested bh disable calls because
-	 * softirqs always disables bh.
-	 */
-	if (in_serving_softirq()) {
-		/* If there is an sk_classid we'll use that. */
-		if (!skb->sk)
-			return -1;
-		classid = skb->sk->sk_classid;
-	}
+	u32 classid = task_get_classid(skb);
 
 	if (!classid)
 		return -1;
-
 	if (!tcf_em_tree_match(skb, &head->ematches, NULL))
 		return -1;
 
 	res->classid = classid;
 	res->class = 0;
+
 	return tcf_exts_exec(skb, &head->exts, res);
 }
 
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index bb2a0f5..536838b 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -301,7 +301,7 @@
 
 		keymask = f->keymask;
 		if (keymask & FLOW_KEYS_NEEDED)
-			skb_flow_dissect_flow_keys(skb, &flow_keys);
+			skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
 
 		for (n = 0; n < f->nkeys; n++) {
 			key = ffs(keymask) - 1;
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 2f3d03f..5769294 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -129,7 +129,7 @@
 	 * so do it rather here.
 	 */
 	skb_key.basic.n_proto = skb->protocol;
-	skb_flow_dissect(skb, &head->dissector, &skb_key);
+	skb_flow_dissect(skb, &head->dissector, &skb_key, 0);
 
 	fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
 
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 02fa827..f9c9fc0 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -283,12 +283,22 @@
 	return -ENOBUFS;
 }
 
-static void
-rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
+static void rsvp_delete_filter_rcu(struct rcu_head *head)
+{
+	struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
+
+	tcf_exts_destroy(&f->exts);
+	kfree(f);
+}
+
+static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
 {
 	tcf_unbind_filter(tp, &f->res);
-	tcf_exts_destroy(&f->exts);
-	kfree_rcu(f, rcu);
+	/* all classifiers are required to call tcf_exts_destroy() after rcu
+	 * grace period, since converted-to-rcu actions are relying on that
+	 * in cleanup() callback
+	 */
+	call_rcu(&f->rcu, rsvp_delete_filter_rcu);
 }
 
 static bool rsvp_destroy(struct tcf_proto *tp, bool force)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index a557dba..944c8ff 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -27,6 +27,7 @@
 struct tcindex_filter_result {
 	struct tcf_exts		exts;
 	struct tcf_result	res;
+	struct rcu_head		rcu;
 };
 
 struct tcindex_filter {
@@ -133,8 +134,23 @@
 	return 0;
 }
 
-static int
-tcindex_delete(struct tcf_proto *tp, unsigned long arg)
+static void tcindex_destroy_rexts(struct rcu_head *head)
+{
+	struct tcindex_filter_result *r;
+
+	r = container_of(head, struct tcindex_filter_result, rcu);
+	tcf_exts_destroy(&r->exts);
+}
+
+static void tcindex_destroy_fexts(struct rcu_head *head)
+{
+	struct tcindex_filter *f = container_of(head, struct tcindex_filter, rcu);
+
+	tcf_exts_destroy(&f->result.exts);
+	kfree(f);
+}
+
+static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
 {
 	struct tcindex_data *p = rtnl_dereference(tp->root);
 	struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
@@ -162,9 +178,14 @@
 		rcu_assign_pointer(*walk, rtnl_dereference(f->next));
 	}
 	tcf_unbind_filter(tp, &r->res);
-	tcf_exts_destroy(&r->exts);
+	/* all classifiers are required to call tcf_exts_destroy() after rcu
+	 * grace period, since converted-to-rcu actions are relying on that
+	 * in cleanup() callback
+	 */
 	if (f)
-		kfree_rcu(f, rcu);
+		call_rcu(&f->rcu, tcindex_destroy_fexts);
+	else
+		call_rcu(&r->rcu, tcindex_destroy_rexts);
 	return 0;
 }
 
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index cab9e9b..4fbb674 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -490,6 +490,19 @@
 					return false;
 			}
 		}
+
+		if (tp_c->refcnt > 1)
+			return false;
+
+		if (tp_c->refcnt == 1) {
+			struct tc_u_hnode *ht;
+
+			for (ht = rtnl_dereference(tp_c->hlist);
+			     ht;
+			     ht = rtnl_dereference(ht->next))
+				if (!ht_empty(ht))
+					return false;
+		}
 	}
 
 	if (root_ht && --root_ht->refcnt == 0)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index f06aa01..f43c8f3 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1806,51 +1806,45 @@
  * to this qdisc, (optionally) tests for protocol and asks
  * specific classifiers.
  */
-int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
-		       struct tcf_result *res)
+int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+		struct tcf_result *res, bool compat_mode)
 {
 	__be16 protocol = tc_skb_protocol(skb);
-	int err;
+#ifdef CONFIG_NET_CLS_ACT
+	const struct tcf_proto *old_tp = tp;
+	int limit = 0;
 
+reclassify:
+#endif
 	for (; tp; tp = rcu_dereference_bh(tp->next)) {
+		int err;
+
 		if (tp->protocol != protocol &&
 		    tp->protocol != htons(ETH_P_ALL))
 			continue;
-		err = tp->classify(skb, tp, res);
 
+		err = tp->classify(skb, tp, res);
+#ifdef CONFIG_NET_CLS_ACT
+		if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode))
+			goto reset;
+#endif
 		if (err >= 0)
 			return err;
 	}
+
 	return -1;
-}
-EXPORT_SYMBOL(tc_classify_compat);
-
-int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
-		struct tcf_result *res)
-{
-	int err = 0;
 #ifdef CONFIG_NET_CLS_ACT
-	const struct tcf_proto *otp = tp;
-	int limit = 0;
-reclassify:
-#endif
-
-	err = tc_classify_compat(skb, tp, res);
-#ifdef CONFIG_NET_CLS_ACT
-	if (err == TC_ACT_RECLASSIFY) {
-		tp = otp;
-
-		if (unlikely(limit++ >= MAX_REC_LOOP)) {
-			net_notice_ratelimited("%s: packet reclassify loop rule prio %u protocol %02x\n",
-					       tp->q->ops->id,
-					       tp->prio & 0xffff,
-					       ntohs(tp->protocol));
-			return TC_ACT_SHOT;
-		}
-		goto reclassify;
+reset:
+	if (unlikely(limit++ >= MAX_REC_LOOP)) {
+		net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
+				       tp->q->ops->id, tp->prio & 0xffff,
+				       ntohs(tp->protocol));
+		return TC_ACT_SHOT;
 	}
+
+	tp = old_tp;
+	goto reclassify;
 #endif
-	return err;
 }
 EXPORT_SYMBOL(tc_classify);
 
@@ -1947,6 +1941,7 @@
 	register_qdisc(&bfifo_qdisc_ops);
 	register_qdisc(&pfifo_head_drop_qdisc_ops);
 	register_qdisc(&mq_qdisc_ops);
+	register_qdisc(&noqueue_qdisc_ops);
 
 	rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
 	rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e3e2cc5..1911af3 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -375,7 +375,7 @@
 		list_for_each_entry(flow, &p->flows, list) {
 			fl = rcu_dereference_bh(flow->filter_list);
 			if (fl) {
-				result = tc_classify_compat(skb, fl, &res);
+				result = tc_classify(skb, fl, &res, true);
 				if (result < 0)
 					continue;
 				flow = (struct atm_flow_data *)res.class;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index beeb75f..c538d9e 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -240,7 +240,7 @@
 		/*
 		 * Step 2+n. Apply classifier.
 		 */
-		result = tc_classify_compat(skb, fl, &res);
+		result = tc_classify(skb, fl, &res, true);
 		if (!fl || result < 0)
 			goto fallback;
 
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 6a783af..02bfd3d 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -170,13 +170,13 @@
 
 	if (!choke_skb_cb(skb1)->keys_valid) {
 		choke_skb_cb(skb1)->keys_valid = 1;
-		skb_flow_dissect_flow_keys(skb1, &temp);
+		skb_flow_dissect_flow_keys(skb1, &temp, 0);
 		make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
 	}
 
 	if (!choke_skb_cb(skb2)->keys_valid) {
 		choke_skb_cb(skb2)->keys_valid = 1;
-		skb_flow_dissect_flow_keys(skb2, &temp);
+		skb_flow_dissect_flow_keys(skb2, &temp, 0);
 		make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
 	}
 
@@ -201,7 +201,7 @@
 	int result;
 
 	fl = rcu_dereference_bh(q->filter_list);
-	result = tc_classify(skb, fl, &res);
+	result = tc_classify(skb, fl, &res, false);
 	if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 3387060..f26bdea 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -331,7 +331,7 @@
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 	fl = rcu_dereference_bh(q->filter_list);
-	result = tc_classify(skb, fl, &res);
+	result = tc_classify(skb, fl, &res, false);
 	if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 66700a6..c4d45fd 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -230,7 +230,7 @@
 	else {
 		struct tcf_result res;
 		struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
-		int result = tc_classify(skb, fl, &res);
+		int result = tc_classify(skb, fl, &res, false);
 
 		pr_debug("result %d class 0x%04x\n", result, res.classid);
 
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 2e2398c..2177eac 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -54,7 +54,7 @@
 	bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
 
 	if (opt == NULL) {
-		u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
+		u32 limit = qdisc_dev(sch)->tx_queue_len;
 
 		if (is_bfifo)
 			limit *= psched_mtu(qdisc_dev(sch));
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index a9ba030..4c834e9 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -92,7 +92,7 @@
 		return fq_codel_hash(q, skb) + 1;
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-	result = tc_classify(skb, filter, &res);
+	result = tc_classify(skb, filter, &res, false);
 	if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 6efca30..cb5d4ad 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -416,33 +416,25 @@
 };
 EXPORT_SYMBOL(noop_qdisc);
 
-static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
+static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
+{
+	/* register_qdisc() assigns a default of noop_enqueue if unset,
+	 * but __dev_queue_xmit() treats noqueue only as such
+	 * if this is NULL - so clear it here. */
+	qdisc->enqueue = NULL;
+	return 0;
+}
+
+struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
 	.id		=	"noqueue",
 	.priv_size	=	0,
+	.init		=	noqueue_init,
 	.enqueue	=	noop_enqueue,
 	.dequeue	=	noop_dequeue,
 	.peek		=	noop_dequeue,
 	.owner		=	THIS_MODULE,
 };
 
-static struct Qdisc noqueue_qdisc;
-static struct netdev_queue noqueue_netdev_queue = {
-	.qdisc		=	&noqueue_qdisc,
-	.qdisc_sleeping	=	&noqueue_qdisc,
-};
-
-static struct Qdisc noqueue_qdisc = {
-	.enqueue	=	NULL,
-	.dequeue	=	noop_dequeue,
-	.flags		=	TCQ_F_BUILTIN,
-	.ops		=	&noqueue_qdisc_ops,
-	.list		=	LIST_HEAD_INIT(noqueue_qdisc.list),
-	.q.lock		=	__SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
-	.dev_queue	=	&noqueue_netdev_queue,
-	.busylock	=	__SPIN_LOCK_UNLOCKED(noqueue_qdisc.busylock),
-};
-
-
 static const u8 prio2band[TC_PRIO_MAX + 1] = {
 	1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
 };
@@ -733,18 +725,19 @@
 				     struct netdev_queue *dev_queue,
 				     void *_unused)
 {
-	struct Qdisc *qdisc = &noqueue_qdisc;
+	struct Qdisc *qdisc;
+	const struct Qdisc_ops *ops = default_qdisc_ops;
 
-	if (dev->tx_queue_len) {
-		qdisc = qdisc_create_dflt(dev_queue,
-					  default_qdisc_ops, TC_H_ROOT);
-		if (!qdisc) {
-			netdev_info(dev, "activation failed\n");
-			return;
-		}
-		if (!netif_is_multiqueue(dev))
-			qdisc->flags |= TCQ_F_ONETXQUEUE;
+	if (dev->priv_flags & IFF_NO_QUEUE)
+		ops = &noqueue_qdisc_ops;
+
+	qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT);
+	if (!qdisc) {
+		netdev_info(dev, "activation failed\n");
+		return;
 	}
+	if (!netif_is_multiqueue(dev))
+		qdisc->flags |= TCQ_F_ONETXQUEUE;
 	dev_queue->qdisc_sleeping = qdisc;
 }
 
@@ -755,7 +748,8 @@
 
 	txq = netdev_get_tx_queue(dev, 0);
 
-	if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
+	if (!netif_is_multiqueue(dev) ||
+	    dev->priv_flags & IFF_NO_QUEUE) {
 		netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
 		dev->qdisc = txq->qdisc_sleeping;
 		atomic_inc(&dev->qdisc->refcnt);
@@ -779,7 +773,7 @@
 		clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
 
 	rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
-	if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
+	if (need_watchdog_p) {
 		dev_queue->trans_start = 0;
 		*need_watchdog_p = 1;
 	}
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index abb9f2f..8010510 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -512,11 +512,9 @@
 
 	if (tb[TCA_GRED_LIMIT])
 		sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
-	else {
-		u32 qlen = qdisc_dev(sch)->tx_queue_len ? : 1;
-
-		sch->limit = qlen * psched_mtu(qdisc_dev(sch));
-	}
+	else
+		sch->limit = qdisc_dev(sch)->tx_queue_len
+		             * psched_mtu(qdisc_dev(sch));
 
 	return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
 }
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index e6c7416..b7ebe2c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1165,7 +1165,7 @@
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 	head = &q->root;
 	tcf = rcu_dereference_bh(q->root.filter_list);
-	while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
+	while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
 		case TC_ACT_QUEUED:
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index f1acb0f..15ccd7f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -229,7 +229,7 @@
 	}
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-	while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
+	while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
 		case TC_ACT_QUEUED:
@@ -1048,11 +1048,9 @@
 
 	if (tb[TCA_HTB_DIRECT_QLEN])
 		q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
-	else {
+	else
 		q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
-		if (q->direct_qlen < 2)	/* some devices have zero tx_queue_len */
-			q->direct_qlen = 2;
-	}
+
 	if ((q->rate2quantum = gopt->rate2quantum) < 1)
 		q->rate2quantum = 1;
 	q->defcls = gopt->defcls;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 42dd218..4e904ca 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -46,7 +46,7 @@
 	int err;
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-	err = tc_classify(skb, fl, &res);
+	err = tc_classify(skb, fl, &res, false);
 #ifdef CONFIG_NET_CLS_ACT
 	switch (err) {
 	case TC_ACT_STOLEN:
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index ade9445..5abfe44 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -130,12 +130,8 @@
 	q->unplug_indefinite = false;
 
 	if (opt == NULL) {
-		/* We will set a default limit of 100 pkts (~150kB)
-		 * in case tx_queue_len is not available. The
-		 * default value is completely arbitrary.
-		 */
-		u32 pkt_limit = qdisc_dev(sch)->tx_queue_len ? : 100;
-		q->limit = pkt_limit * psched_mtu(qdisc_dev(sch));
+		q->limit = qdisc_dev(sch)->tx_queue_len
+		           * psched_mtu(qdisc_dev(sch));
 	} else {
 		struct tc_plug_qopt *ctl = nla_data(opt);
 
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 8e5cd34..ba6487f 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -42,7 +42,7 @@
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 	if (TC_H_MAJ(skb->priority) != sch->handle) {
 		fl = rcu_dereference_bh(q->filter_list);
-		err = tc_classify(skb, fl, &res);
+		err = tc_classify(skb, fl, &res, false);
 #ifdef CONFIG_NET_CLS_ACT
 		switch (err) {
 		case TC_ACT_STOLEN:
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index b8d73bc..3dc3a6e 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -186,7 +186,6 @@
 
 	u64			oldV, V;	/* Precise virtual times. */
 	struct qfq_aggregate	*in_serv_agg;   /* Aggregate being served. */
-	u32			num_active_agg; /* Num. of active aggregates */
 	u32			wsum;		/* weight sum */
 	u32			iwsum;		/* inverse weight sum */
 
@@ -718,7 +717,7 @@
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 	fl = rcu_dereference_bh(q->filter_list);
-	result = tc_classify(skb, fl, &res);
+	result = tc_classify(skb, fl, &res, false);
 	if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 4b81519..5bbb633 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -258,7 +258,7 @@
 	struct tcf_result res;
 	int result;
 
-	result = tc_classify(skb, fl, &res);
+	result = tc_classify(skb, fl, &res, false);
 	if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
@@ -502,7 +502,7 @@
 
 	limit = ctl->limit;
 	if (limit == 0)
-		limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
+		limit = qdisc_dev(sch)->tx_queue_len;
 
 	child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit);
 	if (IS_ERR(child))
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 52f75a5..3abab53 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -179,7 +179,7 @@
 		return sfq_hash(q, skb) + 1;
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
-	result = tc_classify(skb, fl, &res);
+	result = tc_classify(skb, fl, &res, false);
 	if (result >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 		switch (result) {
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 59e8035..4345790 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -487,23 +487,35 @@
 	 */
 	rcu_read_lock();
 	list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+		struct net_device *odev;
+
 		if (!laddr->valid)
 			continue;
-		if ((laddr->state == SCTP_ADDR_SRC) &&
-		    (AF_INET == laddr->a.sa.sa_family)) {
-			fl4->fl4_sport = laddr->a.v4.sin_port;
-			flowi4_update_output(fl4,
-					     asoc->base.sk->sk_bound_dev_if,
-					     RT_CONN_FLAGS(asoc->base.sk),
-					     daddr->v4.sin_addr.s_addr,
-					     laddr->a.v4.sin_addr.s_addr);
+		if (laddr->state != SCTP_ADDR_SRC ||
+		    AF_INET != laddr->a.sa.sa_family)
+			continue;
 
-			rt = ip_route_output_key(sock_net(sk), fl4);
-			if (!IS_ERR(rt)) {
-				dst = &rt->dst;
-				goto out_unlock;
-			}
-		}
+		fl4->fl4_sport = laddr->a.v4.sin_port;
+		flowi4_update_output(fl4,
+				     asoc->base.sk->sk_bound_dev_if,
+				     RT_CONN_FLAGS(asoc->base.sk),
+				     daddr->v4.sin_addr.s_addr,
+				     laddr->a.v4.sin_addr.s_addr);
+
+		rt = ip_route_output_key(sock_net(sk), fl4);
+		if (IS_ERR(rt))
+			continue;
+
+		/* Ensure the src address belongs to the output
+		 * interface.
+		 */
+		odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
+				     false);
+		if (!odev || odev->ifindex != fl4->flowi4_oif)
+			continue;
+
+		dst = &rt->dst;
+		break;
 	}
 
 out_unlock:
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 06320c8..7954c52 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -3090,8 +3090,19 @@
 			sctp_assoc_set_primary(asoc, asconf->transport);
 			sctp_assoc_del_nonprimary_peers(asoc,
 							asconf->transport);
-		} else
-			sctp_assoc_del_peer(asoc, &addr);
+			return SCTP_ERROR_NO_ERROR;
+		}
+
+		/* If the address is not part of the association, the
+		 * ASCONF-ACK with Error Cause Indication Parameter
+		 * which including cause of Unresolvable Address should
+		 * be sent.
+		 */
+		peer = sctp_assoc_lookup_paddr(asoc, &addr);
+		if (!peer)
+			return SCTP_ERROR_DNS_FAILED;
+
+		sctp_assoc_rm_peer(asoc, peer);
 		break;
 	case SCTP_PARAM_SET_PRIMARY:
 		/* ADDIP Section 4.2.4
@@ -3132,11 +3143,18 @@
 		case SCTP_PARAM_IPV4_ADDRESS:
 			if (length != sizeof(sctp_ipv4addr_param_t))
 				return false;
+			/* ensure there is only one addr param and it's in the
+			 * beginning of addip_hdr params, or we reject it.
+			 */
+			if (param.v != addip->addip_hdr.params)
+				return false;
 			addr_param_seen = true;
 			break;
 		case SCTP_PARAM_IPV6_ADDRESS:
 			if (length != sizeof(sctp_ipv6addr_param_t))
 				return false;
+			if (param.v != addip->addip_hdr.params)
+				return false;
 			addr_param_seen = true;
 			break;
 		case SCTP_PARAM_ADD_IP:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index fef2acd..35df126 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -702,7 +702,7 @@
 	 * outstanding data and rely on the retransmission limit be reached
 	 * to shutdown the association.
 	 */
-	if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
+	if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
 		t->asoc->overall_error_count = 0;
 
 	/* Clear the hb_sent flag to signal that we had a good
@@ -954,7 +954,7 @@
 		t = list_entry(pos, struct sctp_transport, transports);
 		if (!sctp_cmp_addr_exact(&t->ipaddr,
 					 &asoc->peer.primary_addr)) {
-			sctp_assoc_del_peer(asoc, &t->ipaddr);
+			sctp_assoc_rm_peer(asoc, t);
 		}
 	}
 }
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 3ee27b7..d7eaa73 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -853,7 +853,7 @@
 
 /*
  * Respond to a normal COOKIE ACK chunk.
- * We are the side that is being asked for an association.
+ * We are the side that is asking for an association.
  *
  * RFC 2960 5.1 Normal Establishment of an Association
  *
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 9f2add3..16c1c43 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -810,7 +810,7 @@
 	ndm->ndm_flags   = NTF_SELF;
 	ndm->ndm_type    = 0;
 	ndm->ndm_ifindex = dev->ifindex;
-	ndm->ndm_state   = NUD_REACHABLE;
+	ndm->ndm_state   = obj->u.fdb.ndm_state;
 
 	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr))
 		goto nla_put_failure;
@@ -910,13 +910,9 @@
 		if (switchdev_port_attr_get(dev, &attr))
 			return NULL;
 
-		if (nhsel > 0) {
-			if (prev_attr.u.ppid.id_len != attr.u.ppid.id_len)
+		if (nhsel > 0 &&
+		    !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid))
 				return NULL;
-			if (memcmp(prev_attr.u.ppid.id, attr.u.ppid.id,
-				   attr.u.ppid.id_len))
-				return NULL;
-		}
 
 		prev_attr = attr;
 	}
@@ -1043,3 +1039,106 @@
 	fi->fib_net->ipv4.fib_offload_disabled = true;
 }
 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
+
+static bool switchdev_port_same_parent_id(struct net_device *a,
+					  struct net_device *b)
+{
+	struct switchdev_attr a_attr = {
+		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+		.flags = SWITCHDEV_F_NO_RECURSE,
+	};
+	struct switchdev_attr b_attr = {
+		.id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+		.flags = SWITCHDEV_F_NO_RECURSE,
+	};
+
+	if (switchdev_port_attr_get(a, &a_attr) ||
+	    switchdev_port_attr_get(b, &b_attr))
+		return false;
+
+	return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
+}
+
+static u32 switchdev_port_fwd_mark_get(struct net_device *dev,
+				       struct net_device *group_dev)
+{
+	struct net_device *lower_dev;
+	struct list_head *iter;
+
+	netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
+		if (lower_dev == dev)
+			continue;
+		if (switchdev_port_same_parent_id(dev, lower_dev))
+			return lower_dev->offload_fwd_mark;
+		return switchdev_port_fwd_mark_get(dev, lower_dev);
+	}
+
+	return dev->ifindex;
+}
+
+static void switchdev_port_fwd_mark_reset(struct net_device *group_dev,
+					  u32 old_mark, u32 *reset_mark)
+{
+	struct net_device *lower_dev;
+	struct list_head *iter;
+
+	netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
+		if (lower_dev->offload_fwd_mark == old_mark) {
+			if (!*reset_mark)
+				*reset_mark = lower_dev->ifindex;
+			lower_dev->offload_fwd_mark = *reset_mark;
+		}
+		switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark);
+	}
+}
+
+/**
+ *	switchdev_port_fwd_mark_set - Set port offload forwarding mark
+ *
+ *	@dev: port device
+ *	@group_dev: containing device
+ *	@joining: true if dev is joining group; false if leaving group
+ *
+ *	An ungrouped port's offload mark is just its ifindex.  A grouped
+ *	port's (member of a bridge, for example) offload mark is the ifindex
+ *	of one of the ports in the group with the same parent (switch) ID.
+ *	Ports on the same device in the same group will have the same mark.
+ *
+ *	Example:
+ *
+ *		br0		ifindex=9
+ *		  sw1p1		ifindex=2	mark=2
+ *		  sw1p2		ifindex=3	mark=2
+ *		  sw2p1		ifindex=4	mark=5
+ *		  sw2p2		ifindex=5	mark=5
+ *
+ *	If sw2p2 leaves the bridge, we'll have:
+ *
+ *		br0		ifindex=9
+ *		  sw1p1		ifindex=2	mark=2
+ *		  sw1p2		ifindex=3	mark=2
+ *		  sw2p1		ifindex=4	mark=4
+ *		sw2p2		ifindex=5	mark=5
+ */
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+				 struct net_device *group_dev,
+				 bool joining)
+{
+	u32 mark = dev->ifindex;
+	u32 reset_mark = 0;
+
+	if (group_dev && joining) {
+		mark = switchdev_port_fwd_mark_get(dev, group_dev);
+	} else if (group_dev && !joining) {
+		if (dev->offload_fwd_mark == mark)
+			/* Ohoh, this port was the mark reference port,
+			 * but it's leaving the group, so reset the
+			 * mark for the remaining ports in the group.
+			 */
+			switchdev_port_fwd_mark_reset(group_dev, mark,
+						      &reset_mark);
+	}
+
+	dev->offload_fwd_mark = mark;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index a816382..8b010c9 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -316,6 +316,29 @@
 	}
 }
 
+void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
+{
+	u16 last = msg_last_bcast(hdr);
+	int mtyp = msg_type(hdr);
+
+	if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
+		return;
+	if (mtyp == STATE_MSG) {
+		tipc_bclink_update_link_state(n, last);
+		return;
+	}
+	/* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
+	 * and transfer synch info in LINK_PROTOCOL messages.
+	 */
+	if (tipc_node_is_up(n))
+		return;
+	if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
+		return;
+	n->bclink.last_sent = last;
+	n->bclink.last_in = last;
+	n->bclink.oos_state = 0;
+}
+
 /**
  * bclink_peek_nack - monitor retransmission requests sent by other nodes
  *
@@ -358,10 +381,9 @@
 
 	/* Prepare clone of message for local node */
 	skb = tipc_msg_reassemble(list);
-	if (unlikely(!skb)) {
-		__skb_queue_purge(list);
+	if (unlikely(!skb))
 		return -EHOSTUNREACH;
-	}
+
 	/* Broadcast to all nodes */
 	if (likely(bclink)) {
 		tipc_bclink_lock(net);
@@ -413,7 +435,7 @@
 	 * all nodes in the cluster don't ACK at the same time
 	 */
 	if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
-		tipc_link_proto_xmit(node->active_links[node->addr & 1],
+		tipc_link_proto_xmit(node_active_link(node, node->addr),
 				     STATE_MSG, 0, 0, 0, 0);
 		tn->bcl->stats.sent_acks++;
 	}
@@ -925,7 +947,6 @@
 	tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
 	bcl->bearer_id = MAX_BEARERS;
 	rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
-	bcl->state = WORKING_WORKING;
 	bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
 	msg_set_prevnode(bcl->pmsg, tn->own_addr);
 	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 3c290a48..d74c69b 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -133,5 +133,6 @@
 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
 int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
 void tipc_bclink_input(struct net *net);
+void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *msg);
 
 #endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 00bc0e6..ce9f7bf 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -343,7 +343,7 @@
 static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
 {
 	pr_info("Resetting bearer <%s>\n", b_ptr->name);
-	tipc_link_delete_list(net, b_ptr->identity);
+	tipc_node_delete_links(net, b_ptr->identity);
 	tipc_disc_reset(net, b_ptr);
 	return 0;
 }
@@ -361,7 +361,7 @@
 	pr_info("Disabling bearer <%s>\n", b_ptr->name);
 	b_ptr->media->disable_media(b_ptr);
 
-	tipc_link_delete_list(net, b_ptr->identity);
+	tipc_node_delete_links(net, b_ptr->identity);
 	if (b_ptr->link_req)
 		tipc_disc_delete(b_ptr->link_req);
 
@@ -470,6 +470,32 @@
 	rcu_read_unlock();
 }
 
+/* tipc_bearer_xmit() -send buffer to destination over bearer
+ */
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+		      struct sk_buff_head *xmitq,
+		      struct tipc_media_addr *dst)
+{
+	struct tipc_net *tn = net_generic(net, tipc_net_id);
+	struct tipc_bearer *b;
+	struct sk_buff *skb, *tmp;
+
+	if (skb_queue_empty(xmitq))
+		return;
+
+	rcu_read_lock();
+	b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+	if (likely(b)) {
+		skb_queue_walk_safe(xmitq, skb, tmp) {
+			__skb_dequeue(xmitq);
+			b->media->send_msg(net, skb, b, dst);
+			/* Until we remove cloning in tipc_l2_send_msg(): */
+			kfree_skb(skb);
+		}
+	}
+	rcu_read_unlock();
+}
+
 /**
  * tipc_l2_rcv_msg - handle incoming TIPC message from an interface
  * @buf: the received packet
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index dc714d9..6426f24 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -217,5 +217,8 @@
 void tipc_bearer_stop(struct net *net);
 void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
 		      struct tipc_media_addr *dest);
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+		      struct sk_buff_head *xmitq,
+		      struct tipc_media_addr *dst);
 
 #endif	/* _TIPC_BEARER_H */
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 0fcf133..b96b41e 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -109,6 +109,11 @@
 	atomic_t subscription_count;
 };
 
+static inline struct tipc_net *tipc_net(struct net *net)
+{
+	return net_generic(net, tipc_net_id);
+}
+
 static inline u16 mod(u16 x)
 {
 	return x & 0xffffu;
@@ -129,6 +134,11 @@
 	return less_eq(left, right) && (mod(right) != mod(left));
 }
 
+static inline int in_range(u16 val, u16 min, u16 max)
+{
+	return !less(val, min) && !more(val, max);
+}
+
 #ifdef CONFIG_SYSCTL
 int tipc_register_sysctl(void);
 void tipc_unregister_sysctl(void);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 967e292..d14e0a4 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -35,7 +35,7 @@
  */
 
 #include "core.h"
-#include "link.h"
+#include "node.h"
 #include "discover.h"
 
 /* min delay during bearer start up */
@@ -120,30 +120,24 @@
  * @buf: buffer containing message
  * @bearer: bearer that message arrived on
  */
-void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
+void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
 		   struct tipc_bearer *bearer)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_node *node;
-	struct tipc_link *link;
 	struct tipc_media_addr maddr;
-	struct sk_buff *rbuf;
-	struct tipc_msg *msg = buf_msg(buf);
-	u32 ddom = msg_dest_domain(msg);
-	u32 onode = msg_prevnode(msg);
-	u32 net_id = msg_bc_netid(msg);
-	u32 mtyp = msg_type(msg);
-	u32 signature = msg_node_sig(msg);
-	u16 caps = msg_node_capabilities(msg);
-	bool addr_match = false;
-	bool sign_match = false;
-	bool link_up = false;
-	bool accept_addr = false;
-	bool accept_sign = false;
+	struct sk_buff *rskb;
+	struct tipc_msg *hdr = buf_msg(skb);
+	u32 ddom = msg_dest_domain(hdr);
+	u32 onode = msg_prevnode(hdr);
+	u32 net_id = msg_bc_netid(hdr);
+	u32 mtyp = msg_type(hdr);
+	u32 signature = msg_node_sig(hdr);
+	u16 caps = msg_node_capabilities(hdr);
 	bool respond = false;
+	bool dupl_addr = false;
 
-	bearer->media->msg2addr(bearer, &maddr, msg_media_addr(msg));
-	kfree_skb(buf);
+	bearer->media->msg2addr(bearer, &maddr, msg_media_addr(hdr));
+	kfree_skb(skb);
 
 	/* Ensure message from node is valid and communication is permitted */
 	if (net_id != tn->net_id)
@@ -165,102 +159,20 @@
 	if (!tipc_in_scope(bearer->domain, onode))
 		return;
 
-	node = tipc_node_create(net, onode);
-	if (!node)
-		return;
-	tipc_node_lock(node);
-	node->capabilities = caps;
-	link = node->links[bearer->identity];
-
-	/* Prepare to validate requesting node's signature and media address */
-	sign_match = (signature == node->signature);
-	addr_match = link && !memcmp(&link->media_addr, &maddr, sizeof(maddr));
-	link_up = link && tipc_link_is_up(link);
-
-
-	/* These three flags give us eight permutations: */
-
-	if (sign_match && addr_match && link_up) {
-		/* All is fine. Do nothing. */
-	} else if (sign_match && addr_match && !link_up) {
-		/* Respond. The link will come up in due time */
-		respond = true;
-	} else if (sign_match && !addr_match && link_up) {
-		/* Peer has changed i/f address without rebooting.
-		 * If so, the link will reset soon, and the next
-		 * discovery will be accepted. So we can ignore it.
-		 * It may also be an cloned or malicious peer having
-		 * chosen the same node address and signature as an
-		 * existing one.
-		 * Ignore requests until the link goes down, if ever.
-		 */
+	tipc_node_check_dest(net, onode, bearer, caps, signature,
+			     &maddr, &respond, &dupl_addr);
+	if (dupl_addr)
 		disc_dupl_alert(bearer, onode, &maddr);
-	} else if (sign_match && !addr_match && !link_up) {
-		/* Peer link has changed i/f address without rebooting.
-		 * It may also be a cloned or malicious peer; we can't
-		 * distinguish between the two.
-		 * The signature is correct, so we must accept.
-		 */
-		accept_addr = true;
-		respond = true;
-	} else if (!sign_match && addr_match && link_up) {
-		/* Peer node rebooted. Two possibilities:
-		 *  - Delayed re-discovery; this link endpoint has already
-		 *    reset and re-established contact with the peer, before
-		 *    receiving a discovery message from that node.
-		 *    (The peer happened to receive one from this node first).
-		 *  - The peer came back so fast that our side has not
-		 *    discovered it yet. Probing from this side will soon
-		 *    reset the link, since there can be no working link
-		 *    endpoint at the peer end, and the link will re-establish.
-		 *  Accept the signature, since it comes from a known peer.
-		 */
-		accept_sign = true;
-	} else if (!sign_match && addr_match && !link_up) {
-		/*  The peer node has rebooted.
-		 *  Accept signature, since it is a known peer.
-		 */
-		accept_sign = true;
-		respond = true;
-	} else if (!sign_match && !addr_match && link_up) {
-		/* Peer rebooted with new address, or a new/duplicate peer.
-		 * Ignore until the link goes down, if ever.
-		 */
-		disc_dupl_alert(bearer, onode, &maddr);
-	} else if (!sign_match && !addr_match && !link_up) {
-		/* Peer rebooted with new address, or it is a new peer.
-		 * Accept signature and address.
-		*/
-		accept_sign = true;
-		accept_addr = true;
-		respond = true;
-	}
-
-	if (accept_sign)
-		node->signature = signature;
-
-	if (accept_addr) {
-		if (!link)
-			link = tipc_link_create(node, bearer, &maddr);
-		if (link) {
-			memcpy(&link->media_addr, &maddr, sizeof(maddr));
-			tipc_link_reset(link);
-		} else {
-			respond = false;
-		}
-	}
 
 	/* Send response, if necessary */
 	if (respond && (mtyp == DSC_REQ_MSG)) {
-		rbuf = tipc_buf_acquire(MAX_H_SIZE);
-		if (rbuf) {
-			tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer);
-			tipc_bearer_send(net, bearer->identity, rbuf, &maddr);
-			kfree_skb(rbuf);
+		rskb = tipc_buf_acquire(MAX_H_SIZE);
+		if (rskb) {
+			tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
+			tipc_bearer_send(net, bearer->identity, rskb, &maddr);
+			kfree_skb(rskb);
 		}
 	}
-	tipc_node_unlock(node);
-	tipc_node_put(node);
 }
 
 /**
diff --git a/net/tipc/link.c b/net/tipc/link.c
index eaa9fe5..75db07c 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -48,9 +48,8 @@
 /*
  * Error message prefixes
  */
-static const char *link_co_err = "Link changeover error, ";
+static const char *link_co_err = "Link tunneling error, ";
 static const char *link_rst_msg = "Resetting link ";
-static const char *link_unk_evt = "Unknown link event ";
 
 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
 	[TIPC_NLA_LINK_UNSPEC]		= { .type = NLA_UNSPEC },
@@ -77,256 +76,413 @@
 };
 
 /*
+ * Interval between NACKs when packets arrive out of order
+ */
+#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
+/*
  * Out-of-range value for link session numbers
  */
-#define INVALID_SESSION 0x10000
+#define WILDCARD_SESSION 0x10000
 
-/*
- * Link state events:
+/* Link FSM states:
  */
-#define  STARTING_EVT    856384768	/* link processing trigger */
-#define  TRAFFIC_MSG_EVT 560815u	/* rx'd ??? */
-#define  SILENCE_EVT     560817u	/* timer dicovered silence from peer */
+enum {
+	LINK_ESTABLISHED     = 0xe,
+	LINK_ESTABLISHING    = 0xe  << 4,
+	LINK_RESET           = 0x1  << 8,
+	LINK_RESETTING       = 0x2  << 12,
+	LINK_PEER_RESET      = 0xd  << 16,
+	LINK_FAILINGOVER     = 0xf  << 20,
+	LINK_SYNCHING        = 0xc  << 24
+};
 
-/*
- * State value stored in 'failover_pkts'
+/* Link FSM state checking routines
  */
-#define FIRST_FAILOVER 0xffffu
+static int link_is_up(struct tipc_link *l)
+{
+	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
+}
 
-static void link_handle_out_of_seq_msg(struct tipc_link *link,
-				       struct sk_buff *skb);
-static void tipc_link_proto_rcv(struct tipc_link *link,
-				struct sk_buff *skb);
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
-static void link_state_event(struct tipc_link *l_ptr, u32 event);
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+			       struct sk_buff_head *xmitq);
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+				      u16 rcvgap, int tolerance, int priority,
+				      struct sk_buff_head *xmitq);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
-static void tipc_link_sync_xmit(struct tipc_link *l);
 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
-static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
-static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
-static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
-static void link_set_timer(struct tipc_link *link, unsigned long time);
-/*
- *  Simple link routines
- */
-static unsigned int align(unsigned int i)
-{
-	return (i + 3) & ~3u;
-}
-
-static void tipc_link_release(struct kref *kref)
-{
-	kfree(container_of(kref, struct tipc_link, ref));
-}
-
-static void tipc_link_get(struct tipc_link *l_ptr)
-{
-	kref_get(&l_ptr->ref);
-}
-
-static void tipc_link_put(struct tipc_link *l_ptr)
-{
-	kref_put(&l_ptr->ref, tipc_link_release);
-}
-
-static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
-{
-	if (l->owner->active_links[0] != l)
-		return l->owner->active_links[0];
-	return l->owner->active_links[1];
-}
 
 /*
  *  Simple non-static link routines (i.e. referenced outside this file)
  */
-int tipc_link_is_up(struct tipc_link *l_ptr)
+bool tipc_link_is_up(struct tipc_link *l)
 {
-	if (!l_ptr)
-		return 0;
-	return link_working_working(l_ptr) || link_working_unknown(l_ptr);
+	return link_is_up(l);
 }
 
-int tipc_link_is_active(struct tipc_link *l_ptr)
+bool tipc_link_is_reset(struct tipc_link *l)
 {
-	return	(l_ptr->owner->active_links[0] == l_ptr) ||
-		(l_ptr->owner->active_links[1] == l_ptr);
+	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
 }
 
-/**
- * link_timeout - handle expiration of link timer
- * @l_ptr: pointer to link
- */
-static void link_timeout(unsigned long data)
+bool tipc_link_is_synching(struct tipc_link *l)
 {
-	struct tipc_link *l_ptr = (struct tipc_link *)data;
-	struct sk_buff *skb;
-
-	tipc_node_lock(l_ptr->owner);
-
-	/* update counters used in statistical profiling of send traffic */
-	l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
-	l_ptr->stats.queue_sz_counts++;
-
-	skb = skb_peek(&l_ptr->transmq);
-	if (skb) {
-		struct tipc_msg *msg = buf_msg(skb);
-		u32 length = msg_size(msg);
-
-		if ((msg_user(msg) == MSG_FRAGMENTER) &&
-		    (msg_type(msg) == FIRST_FRAGMENT)) {
-			length = msg_size(msg_get_wrapped(msg));
-		}
-		if (length) {
-			l_ptr->stats.msg_lengths_total += length;
-			l_ptr->stats.msg_length_counts++;
-			if (length <= 64)
-				l_ptr->stats.msg_length_profile[0]++;
-			else if (length <= 256)
-				l_ptr->stats.msg_length_profile[1]++;
-			else if (length <= 1024)
-				l_ptr->stats.msg_length_profile[2]++;
-			else if (length <= 4096)
-				l_ptr->stats.msg_length_profile[3]++;
-			else if (length <= 16384)
-				l_ptr->stats.msg_length_profile[4]++;
-			else if (length <= 32768)
-				l_ptr->stats.msg_length_profile[5]++;
-			else
-				l_ptr->stats.msg_length_profile[6]++;
-		}
-	}
-
-	/* do all other link processing performed on a periodic basis */
-	if (l_ptr->silent_intv_cnt || tipc_bclink_acks_missing(l_ptr->owner))
-		link_state_event(l_ptr, SILENCE_EVT);
-	l_ptr->silent_intv_cnt++;
-	if (skb_queue_len(&l_ptr->backlogq))
-		tipc_link_push_packets(l_ptr);
-	link_set_timer(l_ptr, l_ptr->keepalive_intv);
-	tipc_node_unlock(l_ptr->owner);
-	tipc_link_put(l_ptr);
+	return l->state == LINK_SYNCHING;
 }
 
-static void link_set_timer(struct tipc_link *link, unsigned long time)
+bool tipc_link_is_failingover(struct tipc_link *l)
 {
-	if (!mod_timer(&link->timer, jiffies + time))
-		tipc_link_get(link);
+	return l->state == LINK_FAILINGOVER;
+}
+
+bool tipc_link_is_blocked(struct tipc_link *l)
+{
+	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
+}
+
+int tipc_link_is_active(struct tipc_link *l)
+{
+	struct tipc_node *n = l->owner;
+
+	return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
+}
+
+static u32 link_own_addr(struct tipc_link *l)
+{
+	return msg_prevnode(l->pmsg);
 }
 
 /**
  * tipc_link_create - create a new link
- * @n_ptr: pointer to associated node
- * @b_ptr: pointer to associated bearer
- * @media_addr: media address to use when sending messages over link
+ * @n: pointer to associated node
+ * @b: pointer to associated bearer
+ * @ownnode: identity of own node
+ * @peer: identity of peer node
+ * @maddr: media address to be used
+ * @inputq: queue to put messages ready for delivery
+ * @namedq: queue to put binding table update messages ready for delivery
+ * @link: return value, pointer to put the created link
  *
- * Returns pointer to link.
+ * Returns true if link was created, otherwise false
  */
-struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
-				   struct tipc_bearer *b_ptr,
-				   const struct tipc_media_addr *media_addr)
+bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
+		      u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
+		      struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+		      struct tipc_link **link)
 {
-	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
-	struct tipc_link *l_ptr;
-	struct tipc_msg *msg;
+	struct tipc_link *l;
+	struct tipc_msg *hdr;
 	char *if_name;
-	char addr_string[16];
-	u32 peer = n_ptr->addr;
 
-	if (n_ptr->link_cnt >= MAX_BEARERS) {
-		tipc_addr_string_fill(addr_string, n_ptr->addr);
-		pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
-		       n_ptr->link_cnt, addr_string, MAX_BEARERS);
-		return NULL;
-	}
+	l = kzalloc(sizeof(*l), GFP_ATOMIC);
+	if (!l)
+		return false;
+	*link = l;
 
-	if (n_ptr->links[b_ptr->identity]) {
-		tipc_addr_string_fill(addr_string, n_ptr->addr);
-		pr_err("Attempt to establish second link on <%s> to %s\n",
-		       b_ptr->name, addr_string);
-		return NULL;
-	}
+	/* Note: peer i/f name is completed by reset/activate message */
+	if_name = strchr(b->name, ':') + 1;
+	sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
+		tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
+		if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
 
-	l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
-	if (!l_ptr) {
-		pr_warn("Link creation failed, no memory\n");
-		return NULL;
-	}
-	kref_init(&l_ptr->ref);
-	l_ptr->addr = peer;
-	if_name = strchr(b_ptr->name, ':') + 1;
-	sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
-		tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
-		tipc_node(tn->own_addr),
-		if_name,
-		tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
-		/* note: peer i/f name is updated by reset/activate message */
-	memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
-	l_ptr->owner = n_ptr;
-	l_ptr->peer_session = INVALID_SESSION;
-	l_ptr->bearer_id = b_ptr->identity;
-	link_set_supervision_props(l_ptr, b_ptr->tolerance);
-	l_ptr->state = RESET_UNKNOWN;
+	l->addr = peer;
+	l->media_addr = maddr;
+	l->owner = n;
+	l->peer_session = WILDCARD_SESSION;
+	l->bearer_id = b->identity;
+	l->tolerance = b->tolerance;
+	l->net_plane = b->net_plane;
+	l->advertised_mtu = b->mtu;
+	l->mtu = b->mtu;
+	l->priority = b->priority;
+	tipc_link_set_queue_limits(l, b->window);
+	l->inputq = inputq;
+	l->namedq = namedq;
+	l->state = LINK_RESETTING;
+	l->pmsg = (struct tipc_msg *)&l->proto_msg;
+	hdr = l->pmsg;
+	tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
+	msg_set_size(hdr, sizeof(l->proto_msg));
+	msg_set_session(hdr, session);
+	msg_set_bearer_id(hdr, l->bearer_id);
+	strcpy((char *)msg_data(hdr), if_name);
+	__skb_queue_head_init(&l->transmq);
+	__skb_queue_head_init(&l->backlogq);
+	__skb_queue_head_init(&l->deferdq);
+	skb_queue_head_init(&l->wakeupq);
+	skb_queue_head_init(l->inputq);
+	return true;
+}
 
-	l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
-	msg = l_ptr->pmsg;
-	tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
-		      l_ptr->addr);
-	msg_set_size(msg, sizeof(l_ptr->proto_msg));
-	msg_set_session(msg, (tn->random & 0xffff));
-	msg_set_bearer_id(msg, b_ptr->identity);
-	strcpy((char *)msg_data(msg), if_name);
-	l_ptr->net_plane = b_ptr->net_plane;
-	l_ptr->advertised_mtu = b_ptr->mtu;
-	l_ptr->mtu = l_ptr->advertised_mtu;
-	l_ptr->priority = b_ptr->priority;
-	tipc_link_set_queue_limits(l_ptr, b_ptr->window);
-	l_ptr->snd_nxt = 1;
-	__skb_queue_head_init(&l_ptr->transmq);
-	__skb_queue_head_init(&l_ptr->backlogq);
-	__skb_queue_head_init(&l_ptr->deferdq);
-	skb_queue_head_init(&l_ptr->wakeupq);
-	skb_queue_head_init(&l_ptr->inputq);
-	skb_queue_head_init(&l_ptr->namedq);
-	link_reset_statistics(l_ptr);
-	tipc_node_attach_link(n_ptr, l_ptr);
-	setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
-	link_state_event(l_ptr, STARTING_EVT);
+/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
+ *
+ * Give a newly added peer node the sequence number where it should
+ * start receiving and acking broadcast packets.
+ */
+void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
+				    struct sk_buff_head *xmitq)
+{
+	struct sk_buff *skb;
+	struct sk_buff_head list;
+	u16 last_sent;
 
-	return l_ptr;
+	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
+			      0, l->addr, link_own_addr(l), 0, 0, 0);
+	if (!skb)
+		return;
+	last_sent = tipc_bclink_get_last_sent(l->owner->net);
+	msg_set_last_bcast(buf_msg(skb), last_sent);
+	__skb_queue_head_init(&list);
+	__skb_queue_tail(&list, skb);
+	tipc_link_xmit(l, &list, xmitq);
 }
 
 /**
- * tipc_link_delete - Delete a link
- * @l: link to be deleted
+ * tipc_link_fsm_evt - link finite state machine
+ * @l: pointer to link
+ * @evt: state machine event to be processed
  */
-void tipc_link_delete(struct tipc_link *l)
+int tipc_link_fsm_evt(struct tipc_link *l, int evt)
 {
-	tipc_link_reset(l);
-	if (del_timer(&l->timer))
-		tipc_link_put(l);
-	l->flags |= LINK_STOPPED;
-	/* Delete link now, or when timer is finished: */
-	tipc_link_reset_fragments(l);
-	tipc_node_detach_link(l->owner, l);
-	tipc_link_put(l);
+	int rc = 0;
+
+	switch (l->state) {
+	case LINK_RESETTING:
+		switch (evt) {
+		case LINK_PEER_RESET_EVT:
+			l->state = LINK_PEER_RESET;
+			break;
+		case LINK_RESET_EVT:
+			l->state = LINK_RESET;
+			break;
+		case LINK_FAILURE_EVT:
+		case LINK_FAILOVER_BEGIN_EVT:
+		case LINK_ESTABLISH_EVT:
+		case LINK_FAILOVER_END_EVT:
+		case LINK_SYNCH_BEGIN_EVT:
+		case LINK_SYNCH_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case LINK_RESET:
+		switch (evt) {
+		case LINK_PEER_RESET_EVT:
+			l->state = LINK_ESTABLISHING;
+			break;
+		case LINK_FAILOVER_BEGIN_EVT:
+			l->state = LINK_FAILINGOVER;
+		case LINK_FAILURE_EVT:
+		case LINK_RESET_EVT:
+		case LINK_ESTABLISH_EVT:
+		case LINK_FAILOVER_END_EVT:
+			break;
+		case LINK_SYNCH_BEGIN_EVT:
+		case LINK_SYNCH_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case LINK_PEER_RESET:
+		switch (evt) {
+		case LINK_RESET_EVT:
+			l->state = LINK_ESTABLISHING;
+			break;
+		case LINK_PEER_RESET_EVT:
+		case LINK_ESTABLISH_EVT:
+		case LINK_FAILURE_EVT:
+			break;
+		case LINK_SYNCH_BEGIN_EVT:
+		case LINK_SYNCH_END_EVT:
+		case LINK_FAILOVER_BEGIN_EVT:
+		case LINK_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case LINK_FAILINGOVER:
+		switch (evt) {
+		case LINK_FAILOVER_END_EVT:
+			l->state = LINK_RESET;
+			break;
+		case LINK_PEER_RESET_EVT:
+		case LINK_RESET_EVT:
+		case LINK_ESTABLISH_EVT:
+		case LINK_FAILURE_EVT:
+			break;
+		case LINK_FAILOVER_BEGIN_EVT:
+		case LINK_SYNCH_BEGIN_EVT:
+		case LINK_SYNCH_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case LINK_ESTABLISHING:
+		switch (evt) {
+		case LINK_ESTABLISH_EVT:
+			l->state = LINK_ESTABLISHED;
+			rc |= TIPC_LINK_UP_EVT;
+			break;
+		case LINK_FAILOVER_BEGIN_EVT:
+			l->state = LINK_FAILINGOVER;
+			break;
+		case LINK_PEER_RESET_EVT:
+		case LINK_RESET_EVT:
+		case LINK_FAILURE_EVT:
+		case LINK_SYNCH_BEGIN_EVT:
+		case LINK_FAILOVER_END_EVT:
+			break;
+		case LINK_SYNCH_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case LINK_ESTABLISHED:
+		switch (evt) {
+		case LINK_PEER_RESET_EVT:
+			l->state = LINK_PEER_RESET;
+			rc |= TIPC_LINK_DOWN_EVT;
+			break;
+		case LINK_FAILURE_EVT:
+			l->state = LINK_RESETTING;
+			rc |= TIPC_LINK_DOWN_EVT;
+			break;
+		case LINK_RESET_EVT:
+			l->state = LINK_RESET;
+			break;
+		case LINK_ESTABLISH_EVT:
+		case LINK_SYNCH_END_EVT:
+			break;
+		case LINK_SYNCH_BEGIN_EVT:
+			l->state = LINK_SYNCHING;
+			break;
+		case LINK_FAILOVER_BEGIN_EVT:
+		case LINK_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case LINK_SYNCHING:
+		switch (evt) {
+		case LINK_PEER_RESET_EVT:
+			l->state = LINK_PEER_RESET;
+			rc |= TIPC_LINK_DOWN_EVT;
+			break;
+		case LINK_FAILURE_EVT:
+			l->state = LINK_RESETTING;
+			rc |= TIPC_LINK_DOWN_EVT;
+			break;
+		case LINK_RESET_EVT:
+			l->state = LINK_RESET;
+			break;
+		case LINK_ESTABLISH_EVT:
+		case LINK_SYNCH_BEGIN_EVT:
+			break;
+		case LINK_SYNCH_END_EVT:
+			l->state = LINK_ESTABLISHED;
+			break;
+		case LINK_FAILOVER_BEGIN_EVT:
+		case LINK_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	default:
+		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
+	}
+	return rc;
+illegal_evt:
+	pr_err("Illegal FSM event %x in state %x on link %s\n",
+	       evt, l->state, l->name);
+	return rc;
 }
 
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
+/* link_profile_stats - update statistical profiling of traffic
+ */
+static void link_profile_stats(struct tipc_link *l)
 {
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct tipc_link *link;
-	struct tipc_node *node;
+	struct sk_buff *skb;
+	struct tipc_msg *msg;
+	int length;
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(node, &tn->node_list, list) {
-		tipc_node_lock(node);
-		link = node->links[bearer_id];
-		if (link)
-			tipc_link_delete(link);
-		tipc_node_unlock(node);
+	/* Update counters used in statistical profiling of send traffic */
+	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
+	l->stats.queue_sz_counts++;
+
+	skb = skb_peek(&l->transmq);
+	if (!skb)
+		return;
+	msg = buf_msg(skb);
+	length = msg_size(msg);
+
+	if (msg_user(msg) == MSG_FRAGMENTER) {
+		if (msg_type(msg) != FIRST_FRAGMENT)
+			return;
+		length = msg_size(msg_get_wrapped(msg));
 	}
-	rcu_read_unlock();
+	l->stats.msg_lengths_total += length;
+	l->stats.msg_length_counts++;
+	if (length <= 64)
+		l->stats.msg_length_profile[0]++;
+	else if (length <= 256)
+		l->stats.msg_length_profile[1]++;
+	else if (length <= 1024)
+		l->stats.msg_length_profile[2]++;
+	else if (length <= 4096)
+		l->stats.msg_length_profile[3]++;
+	else if (length <= 16384)
+		l->stats.msg_length_profile[4]++;
+	else if (length <= 32768)
+		l->stats.msg_length_profile[5]++;
+	else
+		l->stats.msg_length_profile[6]++;
+}
+
+/* tipc_link_timeout - perform periodic task as instructed from node timeout
+ */
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
+{
+	int rc = 0;
+	int mtyp = STATE_MSG;
+	bool xmit = false;
+	bool prb = false;
+
+	link_profile_stats(l);
+
+	switch (l->state) {
+	case LINK_ESTABLISHED:
+	case LINK_SYNCHING:
+		if (!l->silent_intv_cnt) {
+			if (tipc_bclink_acks_missing(l->owner))
+				xmit = true;
+		} else if (l->silent_intv_cnt <= l->abort_limit) {
+			xmit = true;
+			prb = true;
+		} else {
+			rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+		}
+		l->silent_intv_cnt++;
+		break;
+	case LINK_RESET:
+		xmit = true;
+		mtyp = RESET_MSG;
+		break;
+	case LINK_ESTABLISHING:
+		xmit = true;
+		mtyp = ACTIVATE_MSG;
+		break;
+	case LINK_PEER_RESET:
+	case LINK_RESETTING:
+	case LINK_FAILINGOVER:
+		break;
+	default:
+		break;
+	}
+
+	if (xmit)
+		tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
+
+	return rc;
 }
 
 /**
@@ -334,7 +490,7 @@
  * @link: congested link
  * @list: message that was attempted sent
  * Create pseudo msg to send back to user when congestion abates
- * Only consumes message if there is an error
+ * Does not consume buffer list
  */
 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
 {
@@ -347,8 +503,7 @@
 	/* This really cannot happen...  */
 	if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
 		pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
-		tipc_link_reset(link);
-		goto err;
+		return -ENOBUFS;
 	}
 	/* Non-blocking sender: */
 	if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
@@ -358,15 +513,12 @@
 	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
 			      addr, addr, oport, 0, 0);
 	if (!skb)
-		goto err;
+		return -ENOBUFS;
 	TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
 	TIPC_SKB_CB(skb)->chain_imp = imp;
 	skb_queue_tail(&link->wakeupq, skb);
 	link->stats.link_congs++;
 	return -ELINKCONG;
-err:
-	__skb_queue_purge(list);
-	return -ENOBUFS;
 }
 
 /**
@@ -388,9 +540,7 @@
 		if ((pnd[imp] + l->backlog[imp].len) >= lim)
 			break;
 		skb_unlink(skb, &l->wakeupq);
-		skb_queue_tail(&l->inputq, skb);
-		l->owner->inputq = &l->inputq;
-		l->owner->action_flags |= TIPC_MSG_EVT;
+		skb_queue_tail(l->inputq, skb);
 	}
 }
 
@@ -426,208 +576,36 @@
 	tipc_link_reset_fragments(l_ptr);
 }
 
-void tipc_link_reset(struct tipc_link *l_ptr)
+void tipc_link_reset(struct tipc_link *l)
 {
-	u32 prev_state = l_ptr->state;
-	int was_active_link = tipc_link_is_active(l_ptr);
-	struct tipc_node *owner = l_ptr->owner;
-	struct tipc_link *pl = tipc_parallel_link(l_ptr);
-
-	msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
+	tipc_link_fsm_evt(l, LINK_RESET_EVT);
 
 	/* Link is down, accept any session */
-	l_ptr->peer_session = INVALID_SESSION;
+	l->peer_session = WILDCARD_SESSION;
+
+	/* If peer is up, it only accepts an incremented session number */
+	msg_set_session(l->pmsg, msg_session(l->pmsg) + 1);
 
 	/* Prepare for renewed mtu size negotiation */
-	l_ptr->mtu = l_ptr->advertised_mtu;
+	l->mtu = l->advertised_mtu;
 
-	l_ptr->state = RESET_UNKNOWN;
+	/* Clean up all queues: */
+	__skb_queue_purge(&l->transmq);
+	__skb_queue_purge(&l->deferdq);
+	skb_queue_splice_init(&l->wakeupq, l->inputq);
 
-	if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
-		return;
-
-	tipc_node_link_down(l_ptr->owner, l_ptr);
-	tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
-
-	if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
-		l_ptr->flags |= LINK_FAILINGOVER;
-		l_ptr->failover_checkpt = l_ptr->rcv_nxt;
-		pl->failover_pkts = FIRST_FAILOVER;
-		pl->failover_checkpt = l_ptr->rcv_nxt;
-		pl->failover_skb = l_ptr->reasm_buf;
-	} else {
-		kfree_skb(l_ptr->reasm_buf);
-	}
-	/* Clean up all queues, except inputq: */
-	__skb_queue_purge(&l_ptr->transmq);
-	__skb_queue_purge(&l_ptr->deferdq);
-	if (!owner->inputq)
-		owner->inputq = &l_ptr->inputq;
-	skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
-	if (!skb_queue_empty(owner->inputq))
-		owner->action_flags |= TIPC_MSG_EVT;
-	tipc_link_purge_backlog(l_ptr);
-	l_ptr->reasm_buf = NULL;
-	l_ptr->rcv_unacked = 0;
-	l_ptr->snd_nxt = 1;
-	l_ptr->silent_intv_cnt = 0;
-	l_ptr->stale_count = 0;
-	link_reset_statistics(l_ptr);
-}
-
-static void link_activate(struct tipc_link *link)
-{
-	struct tipc_node *node = link->owner;
-
-	link->rcv_nxt = 1;
-	link->stats.recv_info = 1;
-	link->silent_intv_cnt = 0;
-	tipc_node_link_up(node, link);
-	tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
-}
-
-/**
- * link_state_event - link finite state machine
- * @l_ptr: pointer to link
- * @event: state machine event to process
- */
-static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
-{
-	struct tipc_link *other;
-	unsigned long timer_intv = l_ptr->keepalive_intv;
-
-	if (l_ptr->flags & LINK_STOPPED)
-		return;
-
-	if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
-		return;		/* Not yet. */
-
-	if (l_ptr->flags & LINK_FAILINGOVER)
-		return;
-
-	switch (l_ptr->state) {
-	case WORKING_WORKING:
-		switch (event) {
-		case TRAFFIC_MSG_EVT:
-		case ACTIVATE_MSG:
-			l_ptr->silent_intv_cnt = 0;
-			break;
-		case SILENCE_EVT:
-			if (!l_ptr->silent_intv_cnt) {
-				if (tipc_bclink_acks_missing(l_ptr->owner))
-					tipc_link_proto_xmit(l_ptr, STATE_MSG,
-							     0, 0, 0, 0);
-				break;
-			}
-			l_ptr->state = WORKING_UNKNOWN;
-			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-			break;
-		case RESET_MSG:
-			pr_debug("%s<%s>, requested by peer\n",
-				 link_rst_msg, l_ptr->name);
-			tipc_link_reset(l_ptr);
-			l_ptr->state = RESET_RESET;
-			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-					     0, 0, 0, 0);
-			break;
-		default:
-			pr_debug("%s%u in WW state\n", link_unk_evt, event);
-		}
-		break;
-	case WORKING_UNKNOWN:
-		switch (event) {
-		case TRAFFIC_MSG_EVT:
-		case ACTIVATE_MSG:
-			l_ptr->state = WORKING_WORKING;
-			l_ptr->silent_intv_cnt = 0;
-			break;
-		case RESET_MSG:
-			pr_debug("%s<%s>, requested by peer while probing\n",
-				 link_rst_msg, l_ptr->name);
-			tipc_link_reset(l_ptr);
-			l_ptr->state = RESET_RESET;
-			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-					     0, 0, 0, 0);
-			break;
-		case SILENCE_EVT:
-			if (!l_ptr->silent_intv_cnt) {
-				l_ptr->state = WORKING_WORKING;
-				if (tipc_bclink_acks_missing(l_ptr->owner))
-					tipc_link_proto_xmit(l_ptr, STATE_MSG,
-							     0, 0, 0, 0);
-			} else if (l_ptr->silent_intv_cnt <
-				   l_ptr->abort_limit) {
-				tipc_link_proto_xmit(l_ptr, STATE_MSG,
-						     1, 0, 0, 0);
-			} else {	/* Link has failed */
-				pr_debug("%s<%s>, peer not responding\n",
-					 link_rst_msg, l_ptr->name);
-				tipc_link_reset(l_ptr);
-				l_ptr->state = RESET_UNKNOWN;
-				tipc_link_proto_xmit(l_ptr, RESET_MSG,
-						     0, 0, 0, 0);
-			}
-			break;
-		default:
-			pr_err("%s%u in WU state\n", link_unk_evt, event);
-		}
-		break;
-	case RESET_UNKNOWN:
-		switch (event) {
-		case TRAFFIC_MSG_EVT:
-			break;
-		case ACTIVATE_MSG:
-			other = l_ptr->owner->active_links[0];
-			if (other && link_working_unknown(other))
-				break;
-			l_ptr->state = WORKING_WORKING;
-			link_activate(l_ptr);
-			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-			if (l_ptr->owner->working_links == 1)
-				tipc_link_sync_xmit(l_ptr);
-			break;
-		case RESET_MSG:
-			l_ptr->state = RESET_RESET;
-			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-					     1, 0, 0, 0);
-			break;
-		case STARTING_EVT:
-			l_ptr->flags |= LINK_STARTED;
-			link_set_timer(l_ptr, timer_intv);
-			break;
-		case SILENCE_EVT:
-			tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
-			break;
-		default:
-			pr_err("%s%u in RU state\n", link_unk_evt, event);
-		}
-		break;
-	case RESET_RESET:
-		switch (event) {
-		case TRAFFIC_MSG_EVT:
-		case ACTIVATE_MSG:
-			other = l_ptr->owner->active_links[0];
-			if (other && link_working_unknown(other))
-				break;
-			l_ptr->state = WORKING_WORKING;
-			link_activate(l_ptr);
-			tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-			if (l_ptr->owner->working_links == 1)
-				tipc_link_sync_xmit(l_ptr);
-			break;
-		case RESET_MSG:
-			break;
-		case SILENCE_EVT:
-			tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-					     0, 0, 0, 0);
-			break;
-		default:
-			pr_err("%s%u in RR state\n", link_unk_evt, event);
-		}
-		break;
-	default:
-		pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
-	}
+	tipc_link_purge_backlog(l);
+	kfree_skb(l->reasm_buf);
+	kfree_skb(l->failover_reasm_skb);
+	l->reasm_buf = NULL;
+	l->failover_reasm_skb = NULL;
+	l->rcv_unacked = 0;
+	l->snd_nxt = 1;
+	l->rcv_nxt = 1;
+	l->silent_intv_cnt = 0;
+	l->stats.recv_info = 0;
+	l->stale_count = 0;
+	link_reset_statistics(l);
 }
 
 /**
@@ -635,8 +613,7 @@
  * @link: link to use
  * @list: chain of buffers containing message
  *
- * Consumes the buffer chain, except when returning -ELINKCONG,
- * since the caller then may want to make more send attempts.
+ * Consumes the buffer chain, except when returning an error code,
  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
  */
@@ -650,7 +627,7 @@
 	u16 ack = mod(link->rcv_nxt - 1);
 	u16 seqno = link->snd_nxt;
 	u16 bc_last_in = link->owner->bclink.last_in;
-	struct tipc_media_addr *addr = &link->media_addr;
+	struct tipc_media_addr *addr = link->media_addr;
 	struct sk_buff_head *transmq = &link->transmq;
 	struct sk_buff_head *backlogq = &link->backlogq;
 	struct sk_buff *skb, *bskb;
@@ -660,10 +637,9 @@
 		if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
 			return link_schedule_user(link, list);
 	}
-	if (unlikely(msg_size(msg) > mtu)) {
-		__skb_queue_purge(list);
+	if (unlikely(msg_size(msg) > mtu))
 		return -EMSGSIZE;
-	}
+
 	/* Prepare each packet for sending, and add to relevant queue: */
 	while (skb_queue_len(list)) {
 		skb = skb_peek(list);
@@ -700,101 +676,76 @@
 	return 0;
 }
 
-static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
-{
-	skb_queue_head_init(list);
-	__skb_queue_tail(list, skb);
-}
-
-static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
-{
-	struct sk_buff_head head;
-
-	skb2list(skb, &head);
-	return __tipc_link_xmit(link->owner->net, link, &head);
-}
-
-/* tipc_link_xmit_skb(): send single buffer to destination
- * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
- * messages, which will not be rejected
- * The only exception is datagram messages rerouted after secondary
- * lookup, which are rare and safe to dispose of anyway.
- * TODO: Return real return value, and let callers use
- * tipc_wait_for_sendpkt() where applicable
- */
-int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
-		       u32 selector)
-{
-	struct sk_buff_head head;
-	int rc;
-
-	skb2list(skb, &head);
-	rc = tipc_link_xmit(net, &head, dnode, selector);
-	if (rc == -ELINKCONG)
-		kfree_skb(skb);
-	return 0;
-}
-
 /**
- * tipc_link_xmit() is the general link level function for message sending
- * @net: the applicable net namespace
+ * tipc_link_xmit(): enqueue buffer list according to queue situation
+ * @link: link to use
  * @list: chain of buffers containing message
- * @dsz: amount of user data to be sent
- * @dnode: address of destination node
- * @selector: a number used for deterministic link selection
- * Consumes the buffer chain, except when returning -ELINKCONG
- * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
- */
-int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
-		   u32 selector)
-{
-	struct tipc_link *link = NULL;
-	struct tipc_node *node;
-	int rc = -EHOSTUNREACH;
-
-	node = tipc_node_find(net, dnode);
-	if (node) {
-		tipc_node_lock(node);
-		link = node->active_links[selector & 1];
-		if (link)
-			rc = __tipc_link_xmit(net, link, list);
-		tipc_node_unlock(node);
-		tipc_node_put(node);
-	}
-	if (link)
-		return rc;
-
-	if (likely(in_own_node(net, dnode))) {
-		tipc_sk_rcv(net, list);
-		return 0;
-	}
-
-	__skb_queue_purge(list);
-	return rc;
-}
-
-/*
- * tipc_link_sync_xmit - synchronize broadcast link endpoints.
+ * @xmitq: returned list of packets to be sent by caller
  *
- * Give a newly added peer node the sequence number where it should
- * start receiving and acking broadcast packets.
- *
- * Called with node locked
+ * Consumes the buffer chain, except when returning -ELINKCONG,
+ * since the caller then may want to make more send attempts.
+ * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
+ * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
  */
-static void tipc_link_sync_xmit(struct tipc_link *link)
+int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
+		   struct sk_buff_head *xmitq)
 {
-	struct sk_buff *skb;
-	struct tipc_msg *msg;
+	struct tipc_msg *hdr = buf_msg(skb_peek(list));
+	unsigned int maxwin = l->window;
+	unsigned int i, imp = msg_importance(hdr);
+	unsigned int mtu = l->mtu;
+	u16 ack = l->rcv_nxt - 1;
+	u16 seqno = l->snd_nxt;
+	u16 bc_last_in = l->owner->bclink.last_in;
+	struct sk_buff_head *transmq = &l->transmq;
+	struct sk_buff_head *backlogq = &l->backlogq;
+	struct sk_buff *skb, *_skb, *bskb;
 
-	skb = tipc_buf_acquire(INT_H_SIZE);
-	if (!skb)
-		return;
+	/* Match msg importance against this and all higher backlog limits: */
+	for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
+		if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
+			return link_schedule_user(l, list);
+	}
+	if (unlikely(msg_size(hdr) > mtu))
+		return -EMSGSIZE;
 
-	msg = buf_msg(skb);
-	tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
-		      INT_H_SIZE, link->addr);
-	msg_set_last_bcast(msg, link->owner->bclink.acked);
-	__tipc_link_xmit_skb(link, skb);
+	/* Prepare each packet for sending, and add to relevant queue: */
+	while (skb_queue_len(list)) {
+		skb = skb_peek(list);
+		hdr = buf_msg(skb);
+		msg_set_seqno(hdr, seqno);
+		msg_set_ack(hdr, ack);
+		msg_set_bcast_ack(hdr, bc_last_in);
+
+		if (likely(skb_queue_len(transmq) < maxwin)) {
+			_skb = skb_clone(skb, GFP_ATOMIC);
+			if (!_skb)
+				return -ENOBUFS;
+			__skb_dequeue(list);
+			__skb_queue_tail(transmq, skb);
+			__skb_queue_tail(xmitq, _skb);
+			l->rcv_unacked = 0;
+			seqno++;
+			continue;
+		}
+		if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
+			kfree_skb(__skb_dequeue(list));
+			l->stats.sent_bundled++;
+			continue;
+		}
+		if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
+			kfree_skb(__skb_dequeue(list));
+			__skb_queue_tail(backlogq, bskb);
+			l->backlog[msg_importance(buf_msg(bskb))].len++;
+			l->stats.sent_bundled++;
+			l->stats.sent_bundles++;
+			continue;
+		}
+		l->backlog[imp].len += skb_queue_len(list);
+		skb_queue_splice_tail_init(list, backlogq);
+	}
+	l->snd_nxt = seqno;
+	return 0;
 }
 
 /*
@@ -842,29 +793,37 @@
 		link->rcv_unacked = 0;
 		__skb_queue_tail(&link->transmq, skb);
 		tipc_bearer_send(link->owner->net, link->bearer_id,
-				 skb, &link->media_addr);
+				 skb, link->media_addr);
 	}
 	link->snd_nxt = seqno;
 }
 
-void tipc_link_reset_all(struct tipc_node *node)
+void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
-	char addr_string[16];
-	u32 i;
+	struct sk_buff *skb, *_skb;
+	struct tipc_msg *hdr;
+	u16 seqno = l->snd_nxt;
+	u16 ack = l->rcv_nxt - 1;
 
-	tipc_node_lock(node);
-
-	pr_warn("Resetting all links to %s\n",
-		tipc_addr_string_fill(addr_string, node->addr));
-
-	for (i = 0; i < MAX_BEARERS; i++) {
-		if (node->links[i]) {
-			link_print(node->links[i], "Resetting link\n");
-			tipc_link_reset(node->links[i]);
-		}
+	while (skb_queue_len(&l->transmq) < l->window) {
+		skb = skb_peek(&l->backlogq);
+		if (!skb)
+			break;
+		_skb = skb_clone(skb, GFP_ATOMIC);
+		if (!_skb)
+			break;
+		__skb_dequeue(&l->backlogq);
+		hdr = buf_msg(skb);
+		l->backlog[msg_importance(hdr)].len--;
+		__skb_queue_tail(&l->transmq, skb);
+		__skb_queue_tail(xmitq, _skb);
+		msg_set_ack(hdr, ack);
+		msg_set_seqno(hdr, seqno);
+		msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+		l->rcv_unacked = 0;
+		seqno++;
 	}
-
-	tipc_node_unlock(node);
+	l->snd_nxt = seqno;
 }
 
 static void link_retransmit_failure(struct tipc_link *l_ptr,
@@ -877,9 +836,12 @@
 
 	if (l_ptr->addr) {
 		/* Handle failure on standard link */
-		link_print(l_ptr, "Resetting link\n");
-		tipc_link_reset(l_ptr);
-
+		link_print(l_ptr, "Resetting link ");
+		pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
+			msg_user(msg), msg_type(msg), msg_size(msg),
+			msg_errcode(msg));
+		pr_info("sqno %u, prev: %x, src: %x\n",
+			msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
 	} else {
 		/* Handle failure on broadcast link */
 		struct tipc_node *n_ptr;
@@ -934,191 +896,45 @@
 		msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
 		msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
 		tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
-				 &l_ptr->media_addr);
+				 l_ptr->media_addr);
 		retransmits--;
 		l_ptr->stats.retransmitted++;
 	}
 }
 
-/* link_synch(): check if all packets arrived before the synch
- *               point have been consumed
- * Returns true if the parallel links are synched, otherwise false
- */
-static bool link_synch(struct tipc_link *l)
+static int tipc_link_retransm(struct tipc_link *l, int retransm,
+			      struct sk_buff_head *xmitq)
 {
-	unsigned int post_synch;
-	struct tipc_link *pl;
+	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
+	struct tipc_msg *hdr;
 
-	pl  = tipc_parallel_link(l);
-	if (pl == l)
-		goto synched;
+	if (!skb)
+		return 0;
 
-	/* Was last pre-synch packet added to input queue ? */
-	if (less_eq(pl->rcv_nxt, l->synch_point))
-		return false;
-
-	/* Is it still in the input queue ? */
-	post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
-	if (skb_queue_len(&pl->inputq) > post_synch)
-		return false;
-synched:
-	l->flags &= ~LINK_SYNCHING;
-	return true;
-}
-
-static void link_retrieve_defq(struct tipc_link *link,
-			       struct sk_buff_head *list)
-{
-	u16 seq_no;
-
-	if (skb_queue_empty(&link->deferdq))
-		return;
-
-	seq_no = buf_seqno(skb_peek(&link->deferdq));
-	if (seq_no == link->rcv_nxt)
-		skb_queue_splice_tail_init(&link->deferdq, list);
-}
-
-/**
- * tipc_rcv - process TIPC packets/messages arriving from off-node
- * @net: the applicable net namespace
- * @skb: TIPC packet
- * @b_ptr: pointer to bearer message arrived on
- *
- * Invoked with no locks held.  Bearer pointer must point to a valid bearer
- * structure (i.e. cannot be NULL), but bearer can be inactive.
- */
-void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
-{
-	struct tipc_net *tn = net_generic(net, tipc_net_id);
-	struct sk_buff_head head;
-	struct tipc_node *n_ptr;
-	struct tipc_link *l_ptr;
-	struct sk_buff *skb1, *tmp;
-	struct tipc_msg *msg;
-	u16 seq_no;
-	u16 ackd;
-	u32 released;
-
-	skb2list(skb, &head);
-
-	while ((skb = __skb_dequeue(&head))) {
-		/* Ensure message is well-formed */
-		if (unlikely(!tipc_msg_validate(skb)))
-			goto discard;
-
-		/* Handle arrival of a non-unicast link message */
-		msg = buf_msg(skb);
-		if (unlikely(msg_non_seq(msg))) {
-			if (msg_user(msg) ==  LINK_CONFIG)
-				tipc_disc_rcv(net, skb, b_ptr);
-			else
-				tipc_bclink_rcv(net, skb);
-			continue;
-		}
-
-		/* Discard unicast link messages destined for another node */
-		if (unlikely(!msg_short(msg) &&
-			     (msg_destnode(msg) != tn->own_addr)))
-			goto discard;
-
-		/* Locate neighboring node that sent message */
-		n_ptr = tipc_node_find(net, msg_prevnode(msg));
-		if (unlikely(!n_ptr))
-			goto discard;
-
-		tipc_node_lock(n_ptr);
-		/* Locate unicast link endpoint that should handle message */
-		l_ptr = n_ptr->links[b_ptr->identity];
-		if (unlikely(!l_ptr))
-			goto unlock;
-
-		/* Verify that communication with node is currently allowed */
-		if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
-		    msg_user(msg) == LINK_PROTOCOL &&
-		    (msg_type(msg) == RESET_MSG ||
-		    msg_type(msg) == ACTIVATE_MSG) &&
-		    !msg_redundant_link(msg))
-			n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
-
-		if (tipc_node_blocked(n_ptr))
-			goto unlock;
-
-		/* Validate message sequence number info */
-		seq_no = msg_seqno(msg);
-		ackd = msg_ack(msg);
-
-		/* Release acked messages */
-		if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
-			tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
-
-		released = 0;
-		skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
-			if (more(buf_seqno(skb1), ackd))
-				break;
-			 __skb_unlink(skb1, &l_ptr->transmq);
-			 kfree_skb(skb1);
-			 released = 1;
-		}
-
-		/* Try sending any messages link endpoint has pending */
-		if (unlikely(skb_queue_len(&l_ptr->backlogq)))
-			tipc_link_push_packets(l_ptr);
-
-		if (released && !skb_queue_empty(&l_ptr->wakeupq))
-			link_prepare_wakeup(l_ptr);
-
-		/* Process the incoming packet */
-		if (unlikely(!link_working_working(l_ptr))) {
-			if (msg_user(msg) == LINK_PROTOCOL) {
-				tipc_link_proto_rcv(l_ptr, skb);
-				link_retrieve_defq(l_ptr, &head);
-				skb = NULL;
-				goto unlock;
-			}
-
-			/* Traffic message. Conditionally activate link */
-			link_state_event(l_ptr, TRAFFIC_MSG_EVT);
-
-			if (link_working_working(l_ptr)) {
-				/* Re-insert buffer in front of queue */
-				__skb_queue_head(&head, skb);
-				skb = NULL;
-				goto unlock;
-			}
-			goto unlock;
-		}
-
-		/* Link is now in state WORKING_WORKING */
-		if (unlikely(seq_no != l_ptr->rcv_nxt)) {
-			link_handle_out_of_seq_msg(l_ptr, skb);
-			link_retrieve_defq(l_ptr, &head);
-			skb = NULL;
-			goto unlock;
-		}
-		l_ptr->silent_intv_cnt = 0;
-
-		/* Synchronize with parallel link if applicable */
-		if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
-			if (!link_synch(l_ptr))
-				goto unlock;
-		}
-		l_ptr->rcv_nxt++;
-		if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
-			link_retrieve_defq(l_ptr, &head);
-		if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
-			l_ptr->stats.sent_acks++;
-			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
-		}
-		tipc_link_input(l_ptr, skb);
-		skb = NULL;
-unlock:
-		tipc_node_unlock(n_ptr);
-		tipc_node_put(n_ptr);
-discard:
-		if (unlikely(skb))
-			kfree_skb(skb);
+	/* Detect repeated retransmit failures on same packet */
+	if (likely(l->last_retransm != buf_seqno(skb))) {
+		l->last_retransm = buf_seqno(skb);
+		l->stale_count = 1;
+	} else if (++l->stale_count > 100) {
+		link_retransmit_failure(l, skb);
+		return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
 	}
+	skb_queue_walk(&l->transmq, skb) {
+		if (!retransm)
+			return 0;
+		hdr = buf_msg(skb);
+		_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
+		if (!_skb)
+			return 0;
+		hdr = buf_msg(_skb);
+		msg_set_ack(hdr, l->rcv_nxt - 1);
+		msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+		_skb->priority = TC_PRIO_CONTROL;
+		__skb_queue_tail(xmitq, _skb);
+		retransm--;
+		l->stats.retransmitted++;
+	}
+	return 0;
 }
 
 /* tipc_data_input - deliver data and name distr msgs to upper layer
@@ -1126,29 +942,22 @@
  * Consumes buffer if message is of right type
  * Node lock must be held
  */
-static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
+static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
+			    struct sk_buff_head *inputq)
 {
 	struct tipc_node *node = link->owner;
-	struct tipc_msg *msg = buf_msg(skb);
-	u32 dport = msg_destport(msg);
 
-	switch (msg_user(msg)) {
+	switch (msg_user(buf_msg(skb))) {
 	case TIPC_LOW_IMPORTANCE:
 	case TIPC_MEDIUM_IMPORTANCE:
 	case TIPC_HIGH_IMPORTANCE:
 	case TIPC_CRITICAL_IMPORTANCE:
 	case CONN_MANAGER:
-		if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
-			node->inputq = &link->inputq;
-			node->action_flags |= TIPC_MSG_EVT;
-		}
+		__skb_queue_tail(inputq, skb);
 		return true;
 	case NAME_DISTRIBUTOR:
 		node->bclink.recv_permitted = true;
-		node->namedq = &link->namedq;
-		skb_queue_tail(&link->namedq, skb);
-		if (skb_queue_len(&link->namedq) == 1)
-			node->action_flags |= TIPC_NAMED_MSG_EVT;
+		skb_queue_tail(link->namedq, skb);
 		return true;
 	case MSG_BUNDLER:
 	case TUNNEL_PROTOCOL:
@@ -1165,54 +974,160 @@
 /* tipc_link_input - process packet that has passed link protocol check
  *
  * Consumes buffer
- * Node lock must be held
  */
-static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
+			   struct sk_buff_head *inputq)
 {
-	struct tipc_node *node = link->owner;
-	struct tipc_msg *msg = buf_msg(skb);
+	struct tipc_node *node = l->owner;
+	struct tipc_msg *hdr = buf_msg(skb);
+	struct sk_buff **reasm_skb = &l->reasm_buf;
 	struct sk_buff *iskb;
+	int usr = msg_user(hdr);
+	int rc = 0;
 	int pos = 0;
+	int ipos = 0;
 
-	if (likely(tipc_data_input(link, skb)))
-		return;
-
-	switch (msg_user(msg)) {
-	case TUNNEL_PROTOCOL:
-		if (msg_dup(msg)) {
-			link->flags |= LINK_SYNCHING;
-			link->synch_point = msg_seqno(msg_get_wrapped(msg));
-			kfree_skb(skb);
-			break;
+	if (unlikely(usr == TUNNEL_PROTOCOL)) {
+		if (msg_type(hdr) == SYNCH_MSG) {
+			__skb_queue_purge(&l->deferdq);
+			goto drop;
 		}
-		if (!tipc_link_failover_rcv(link, &skb))
-			break;
-		if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
-			tipc_data_input(link, skb);
-			break;
-		}
-	case MSG_BUNDLER:
-		link->stats.recv_bundles++;
-		link->stats.recv_bundled += msg_msgcnt(msg);
+		if (!tipc_msg_extract(skb, &iskb, &ipos))
+			return rc;
+		kfree_skb(skb);
+		skb = iskb;
+		hdr = buf_msg(skb);
+		if (less(msg_seqno(hdr), l->drop_point))
+			goto drop;
+		if (tipc_data_input(l, skb, inputq))
+			return rc;
+		usr = msg_user(hdr);
+		reasm_skb = &l->failover_reasm_skb;
+	}
 
+	if (usr == MSG_BUNDLER) {
+		l->stats.recv_bundles++;
+		l->stats.recv_bundled += msg_msgcnt(hdr);
 		while (tipc_msg_extract(skb, &iskb, &pos))
-			tipc_data_input(link, iskb);
-		break;
-	case MSG_FRAGMENTER:
-		link->stats.recv_fragments++;
-		if (tipc_buf_append(&link->reasm_buf, &skb)) {
-			link->stats.recv_fragmented++;
-			tipc_data_input(link, skb);
-		} else if (!link->reasm_buf) {
-			tipc_link_reset(link);
+			tipc_data_input(l, iskb, inputq);
+		return 0;
+	} else if (usr == MSG_FRAGMENTER) {
+		l->stats.recv_fragments++;
+		if (tipc_buf_append(reasm_skb, &skb)) {
+			l->stats.recv_fragmented++;
+			tipc_data_input(l, skb, inputq);
+		} else if (!*reasm_skb) {
+			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
 		}
-		break;
-	case BCAST_PROTOCOL:
+		return 0;
+	} else if (usr == BCAST_PROTOCOL) {
 		tipc_link_sync_rcv(node, skb);
-		break;
-	default:
-		break;
-	};
+		return 0;
+	}
+drop:
+	kfree_skb(skb);
+	return 0;
+}
+
+static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
+{
+	bool released = false;
+	struct sk_buff *skb, *tmp;
+
+	skb_queue_walk_safe(&l->transmq, skb, tmp) {
+		if (more(buf_seqno(skb), acked))
+			break;
+		__skb_unlink(skb, &l->transmq);
+		kfree_skb(skb);
+		released = true;
+	}
+	return released;
+}
+
+/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
+ * @link: the link that should handle the message
+ * @skb: TIPC packet
+ * @xmitq: queue to place packets to be sent after this call
+ */
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+		  struct sk_buff_head *xmitq)
+{
+	struct sk_buff_head *arrvq = &l->deferdq;
+	struct sk_buff_head tmpq;
+	struct tipc_msg *hdr;
+	u16 seqno, rcv_nxt;
+	int rc = 0;
+
+	__skb_queue_head_init(&tmpq);
+
+	if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
+		if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
+			tipc_link_build_proto_msg(l, STATE_MSG, 0,
+						  0, 0, 0, xmitq);
+		return rc;
+	}
+
+	while ((skb = skb_peek(arrvq))) {
+		hdr = buf_msg(skb);
+
+		/* Verify and update link state */
+		if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
+			__skb_dequeue(arrvq);
+			rc = tipc_link_proto_rcv(l, skb, xmitq);
+			continue;
+		}
+
+		if (unlikely(!link_is_up(l))) {
+			rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+			if (!link_is_up(l)) {
+				kfree_skb(__skb_dequeue(arrvq));
+				goto exit;
+			}
+		}
+
+		l->silent_intv_cnt = 0;
+
+		/* Forward queues and wake up waiting users */
+		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
+			tipc_link_advance_backlog(l, xmitq);
+			if (unlikely(!skb_queue_empty(&l->wakeupq)))
+				link_prepare_wakeup(l);
+		}
+
+		/* Defer reception if there is a gap in the sequence */
+		seqno = msg_seqno(hdr);
+		rcv_nxt = l->rcv_nxt;
+		if (unlikely(less(rcv_nxt, seqno))) {
+			l->stats.deferred_recv++;
+			goto exit;
+		}
+
+		__skb_dequeue(arrvq);
+
+		/* Drop if packet already received */
+		if (unlikely(more(rcv_nxt, seqno))) {
+			l->stats.duplicates++;
+			kfree_skb(skb);
+			goto exit;
+		}
+
+		/* Packet can be delivered */
+		l->rcv_nxt++;
+		l->stats.recv_info++;
+		if (unlikely(!tipc_data_input(l, skb, &tmpq)))
+			rc = tipc_link_input(l, skb, &tmpq);
+
+		/* Ack at regular intervals */
+		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
+			l->rcv_unacked = 0;
+			l->stats.sent_acks++;
+			tipc_link_build_proto_msg(l, STATE_MSG,
+						  0, 0, 0, 0, xmitq);
+		}
+	}
+exit:
+	tipc_skb_queue_splice_tail(&tmpq, l->inputq);
+	return rc;
 }
 
 /**
@@ -1255,458 +1170,250 @@
 }
 
 /*
- * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
- */
-static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
-				       struct sk_buff *buf)
-{
-	u32 seq_no = buf_seqno(buf);
-
-	if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
-		tipc_link_proto_rcv(l_ptr, buf);
-		return;
-	}
-
-	/* Record OOS packet arrival */
-	l_ptr->silent_intv_cnt = 0;
-
-	/*
-	 * Discard packet if a duplicate; otherwise add it to deferred queue
-	 * and notify peer of gap as per protocol specification
-	 */
-	if (less(seq_no, l_ptr->rcv_nxt)) {
-		l_ptr->stats.duplicates++;
-		kfree_skb(buf);
-		return;
-	}
-
-	if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
-		l_ptr->stats.deferred_recv++;
-		if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
-			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
-	} else {
-		l_ptr->stats.duplicates++;
-	}
-}
-
-/*
  * Send protocol message to the other endpoint.
  */
-void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
+void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
 			  u32 gap, u32 tolerance, u32 priority)
 {
-	struct sk_buff *buf = NULL;
-	struct tipc_msg *msg = l_ptr->pmsg;
-	u32 msg_size = sizeof(l_ptr->proto_msg);
-	int r_flag;
-	u16 last_rcv;
+	struct sk_buff *skb = NULL;
+	struct sk_buff_head xmitq;
 
-	/* Don't send protocol message during link failover */
-	if (l_ptr->flags & LINK_FAILINGOVER)
+	__skb_queue_head_init(&xmitq);
+	tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
+				  tolerance, priority, &xmitq);
+	skb = __skb_dequeue(&xmitq);
+	if (!skb)
 		return;
-
-	/* Abort non-RESET send if communication with node is prohibited */
-	if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
-		return;
-
-	/* Create protocol message with "out-of-sequence" sequence number */
-	msg_set_type(msg, msg_typ);
-	msg_set_net_plane(msg, l_ptr->net_plane);
-	msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-	msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
-
-	if (msg_typ == STATE_MSG) {
-		u16 next_sent = l_ptr->snd_nxt;
-
-		if (!tipc_link_is_up(l_ptr))
-			return;
-		msg_set_next_sent(msg, next_sent);
-		if (!skb_queue_empty(&l_ptr->deferdq)) {
-			last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
-			gap = mod(last_rcv - l_ptr->rcv_nxt);
-		}
-		msg_set_seq_gap(msg, gap);
-		if (gap)
-			l_ptr->stats.sent_nacks++;
-		msg_set_link_tolerance(msg, tolerance);
-		msg_set_linkprio(msg, priority);
-		msg_set_max_pkt(msg, l_ptr->mtu);
-		msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
-		msg_set_probe(msg, probe_msg != 0);
-		if (probe_msg)
-			l_ptr->stats.sent_probes++;
-		l_ptr->stats.sent_states++;
-	} else {		/* RESET_MSG or ACTIVATE_MSG */
-		msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
-		msg_set_seq_gap(msg, 0);
-		msg_set_next_sent(msg, 1);
-		msg_set_probe(msg, 0);
-		msg_set_link_tolerance(msg, l_ptr->tolerance);
-		msg_set_linkprio(msg, l_ptr->priority);
-		msg_set_max_pkt(msg, l_ptr->advertised_mtu);
-	}
-
-	r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
-	msg_set_redundant_link(msg, r_flag);
-	msg_set_linkprio(msg, l_ptr->priority);
-	msg_set_size(msg, msg_size);
-
-	msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
-
-	buf = tipc_buf_acquire(msg_size);
-	if (!buf)
-		return;
-
-	skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
-	buf->priority = TC_PRIO_CONTROL;
-	tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
-			 &l_ptr->media_addr);
-	l_ptr->rcv_unacked = 0;
-	kfree_skb(buf);
+	tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr);
+	l->rcv_unacked = 0;
+	kfree_skb(skb);
 }
 
-/*
- * Receive protocol message :
- * Note that network plane id propagates through the network, and may
- * change at any time. The node with lowest address rules
+/* tipc_link_build_proto_msg: prepare link protocol message for transmission
  */
-static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
-				struct sk_buff *buf)
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+				      u16 rcvgap, int tolerance, int priority,
+				      struct sk_buff_head *xmitq)
 {
-	u32 rec_gap = 0;
-	u32 msg_tol;
-	struct tipc_msg *msg = buf_msg(buf);
+	struct sk_buff *skb = NULL;
+	struct tipc_msg *hdr = l->pmsg;
+	u16 snd_nxt = l->snd_nxt;
+	u16 rcv_nxt = l->rcv_nxt;
+	u16 rcv_last = rcv_nxt - 1;
+	int node_up = l->owner->bclink.recv_permitted;
 
-	if (l_ptr->flags & LINK_FAILINGOVER)
+	/* Don't send protocol message during reset or link failover */
+	if (tipc_link_is_blocked(l))
+		return;
+
+	msg_set_type(hdr, mtyp);
+	msg_set_net_plane(hdr, l->net_plane);
+	msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+	msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
+	msg_set_link_tolerance(hdr, tolerance);
+	msg_set_linkprio(hdr, priority);
+	msg_set_redundant_link(hdr, node_up);
+	msg_set_seq_gap(hdr, 0);
+
+	/* Compatibility: created msg must not be in sequence with pkt flow */
+	msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
+
+	if (mtyp == STATE_MSG) {
+		if (!tipc_link_is_up(l))
+			return;
+		msg_set_next_sent(hdr, snd_nxt);
+
+		/* Override rcvgap if there are packets in deferred queue */
+		if (!skb_queue_empty(&l->deferdq))
+			rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
+		if (rcvgap) {
+			msg_set_seq_gap(hdr, rcvgap);
+			l->stats.sent_nacks++;
+		}
+		msg_set_ack(hdr, rcv_last);
+		msg_set_probe(hdr, probe);
+		if (probe)
+			l->stats.sent_probes++;
+		l->stats.sent_states++;
+	} else {
+		/* RESET_MSG or ACTIVATE_MSG */
+		msg_set_max_pkt(hdr, l->advertised_mtu);
+		msg_set_ack(hdr, l->rcv_nxt - 1);
+		msg_set_next_sent(hdr, 1);
+	}
+	skb = tipc_buf_acquire(msg_size(hdr));
+	if (!skb)
+		return;
+	skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
+	skb->priority = TC_PRIO_CONTROL;
+	__skb_queue_tail(xmitq, skb);
+}
+
+/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
+ * with contents of the link's tranmsit and backlog queues.
+ */
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+			   int mtyp, struct sk_buff_head *xmitq)
+{
+	struct sk_buff *skb, *tnlskb;
+	struct tipc_msg *hdr, tnlhdr;
+	struct sk_buff_head *queue = &l->transmq;
+	struct sk_buff_head tmpxq, tnlq;
+	u16 pktlen, pktcnt, seqno = l->snd_nxt;
+
+	if (!tnl)
+		return;
+
+	skb_queue_head_init(&tnlq);
+	skb_queue_head_init(&tmpxq);
+
+	/* At least one packet required for safe algorithm => add dummy */
+	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+			      BASIC_H_SIZE, 0, l->addr, link_own_addr(l),
+			      0, 0, TIPC_ERR_NO_PORT);
+	if (!skb) {
+		pr_warn("%sunable to create tunnel packet\n", link_co_err);
+		return;
+	}
+	skb_queue_tail(&tnlq, skb);
+	tipc_link_xmit(l, &tnlq, &tmpxq);
+	__skb_queue_purge(&tmpxq);
+
+	/* Initialize reusable tunnel packet header */
+	tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL,
+		      mtyp, INT_H_SIZE, l->addr);
+	pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
+	msg_set_msgcnt(&tnlhdr, pktcnt);
+	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
+tnl:
+	/* Wrap each packet into a tunnel packet */
+	skb_queue_walk(queue, skb) {
+		hdr = buf_msg(skb);
+		if (queue == &l->backlogq)
+			msg_set_seqno(hdr, seqno++);
+		pktlen = msg_size(hdr);
+		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
+		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
+		if (!tnlskb) {
+			pr_warn("%sunable to send packet\n", link_co_err);
+			return;
+		}
+		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
+		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
+		__skb_queue_tail(&tnlq, tnlskb);
+	}
+	if (queue != &l->backlogq) {
+		queue = &l->backlogq;
+		goto tnl;
+	}
+
+	tipc_link_xmit(tnl, &tnlq, xmitq);
+
+	if (mtyp == FAILOVER_MSG) {
+		tnl->drop_point = l->rcv_nxt;
+		tnl->failover_reasm_skb = l->reasm_buf;
+		l->reasm_buf = NULL;
+	}
+}
+
+/* tipc_link_proto_rcv(): receive link level protocol message :
+ * Note that network plane id propagates through the network, and may
+ * change at any time. The node with lowest numerical id determines
+ * network plane
+ */
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+			       struct sk_buff_head *xmitq)
+{
+	struct tipc_msg *hdr = buf_msg(skb);
+	u16 rcvgap = 0;
+	u16 nacked_gap = msg_seq_gap(hdr);
+	u16 peers_snd_nxt =  msg_next_sent(hdr);
+	u16 peers_tol = msg_link_tolerance(hdr);
+	u16 peers_prio = msg_linkprio(hdr);
+	u16 rcv_nxt = l->rcv_nxt;
+	char *if_name;
+	int rc = 0;
+
+	if (tipc_link_is_blocked(l))
 		goto exit;
 
-	if (l_ptr->net_plane != msg_net_plane(msg))
-		if (link_own_addr(l_ptr) > msg_prevnode(msg))
-			l_ptr->net_plane = msg_net_plane(msg);
+	if (link_own_addr(l) > msg_prevnode(hdr))
+		l->net_plane = msg_net_plane(hdr);
 
-	switch (msg_type(msg)) {
-
+	switch (msg_type(hdr)) {
 	case RESET_MSG:
-		if (!link_working_unknown(l_ptr) &&
-		    (l_ptr->peer_session != INVALID_SESSION)) {
-			if (less_eq(msg_session(msg), l_ptr->peer_session))
-				break; /* duplicate or old reset: ignore */
-		}
 
-		if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
-				link_working_unknown(l_ptr))) {
-			/*
-			 * peer has lost contact -- don't allow peer's links
-			 * to reactivate before we recognize loss & clean up
-			 */
-			l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
-		}
-
-		link_state_event(l_ptr, RESET_MSG);
-
+		/* Ignore duplicate RESET with old session number */
+		if ((less_eq(msg_session(hdr), l->peer_session)) &&
+		    (l->peer_session != WILDCARD_SESSION))
+			break;
 		/* fall thru' */
+
 	case ACTIVATE_MSG:
-		/* Update link settings according other endpoint's values */
-		strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
 
-		msg_tol = msg_link_tolerance(msg);
-		if (msg_tol > l_ptr->tolerance)
-			link_set_supervision_props(l_ptr, msg_tol);
+		/* Complete own link name with peer's interface name */
+		if_name =  strrchr(l->name, ':') + 1;
+		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
+			break;
+		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
+			break;
+		strncpy(if_name, msg_data(hdr),	TIPC_MAX_IF_NAME);
 
-		if (msg_linkprio(msg) > l_ptr->priority)
-			l_ptr->priority = msg_linkprio(msg);
+		/* Update own tolerance if peer indicates a non-zero value */
+		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+			l->tolerance = peers_tol;
 
-		if (l_ptr->mtu > msg_max_pkt(msg))
-			l_ptr->mtu = msg_max_pkt(msg);
+		/* Update own priority if peer's priority is higher */
+		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
+			l->priority = peers_prio;
 
-		/* Synchronize broadcast link info, if not done previously */
-		if (!tipc_node_is_up(l_ptr->owner)) {
-			l_ptr->owner->bclink.last_sent =
-				l_ptr->owner->bclink.last_in =
-				msg_last_bcast(msg);
-			l_ptr->owner->bclink.oos_state = 0;
+		if (msg_type(hdr) == RESET_MSG) {
+			rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+		} else if (!link_is_up(l)) {
+			tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+			rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
 		}
-
-		l_ptr->peer_session = msg_session(msg);
-		l_ptr->peer_bearer_id = msg_bearer_id(msg);
-
-		if (msg_type(msg) == ACTIVATE_MSG)
-			link_state_event(l_ptr, ACTIVATE_MSG);
+		l->peer_session = msg_session(hdr);
+		l->peer_bearer_id = msg_bearer_id(hdr);
+		if (l->mtu > msg_max_pkt(hdr))
+			l->mtu = msg_max_pkt(hdr);
 		break;
+
 	case STATE_MSG:
 
-		msg_tol = msg_link_tolerance(msg);
-		if (msg_tol)
-			link_set_supervision_props(l_ptr, msg_tol);
+		/* Update own tolerance if peer indicates a non-zero value */
+		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+			l->tolerance = peers_tol;
 
-		if (msg_linkprio(msg) &&
-		    (msg_linkprio(msg) != l_ptr->priority)) {
-			pr_debug("%s<%s>, priority change %u->%u\n",
-				 link_rst_msg, l_ptr->name,
-				 l_ptr->priority, msg_linkprio(msg));
-			l_ptr->priority = msg_linkprio(msg);
-			tipc_link_reset(l_ptr); /* Enforce change to take effect */
-			break;
-		}
-
-		/* Record reception; force mismatch at next timeout: */
-		l_ptr->silent_intv_cnt = 0;
-
-		link_state_event(l_ptr, TRAFFIC_MSG_EVT);
-		l_ptr->stats.recv_states++;
-		if (link_reset_unknown(l_ptr))
+		l->silent_intv_cnt = 0;
+		l->stats.recv_states++;
+		if (msg_probe(hdr))
+			l->stats.recv_probes++;
+		rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+		if (!link_is_up(l))
 			break;
 
-		if (less_eq(l_ptr->rcv_nxt, msg_next_sent(msg)))
-			rec_gap = mod(msg_next_sent(msg) - l_ptr->rcv_nxt);
+		/* Send NACK if peer has sent pkts we haven't received yet */
+		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
+			rcvgap = peers_snd_nxt - l->rcv_nxt;
+		if (rcvgap || (msg_probe(hdr)))
+			tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
+						  0, 0, xmitq);
+		tipc_link_release_pkts(l, msg_ack(hdr));
 
-		if (msg_probe(msg))
-			l_ptr->stats.recv_probes++;
-
-		/* Protocol message before retransmits, reduce loss risk */
-		if (l_ptr->owner->bclink.recv_permitted)
-			tipc_bclink_update_link_state(l_ptr->owner,
-						      msg_last_bcast(msg));
-
-		if (rec_gap || (msg_probe(msg))) {
-			tipc_link_proto_xmit(l_ptr, STATE_MSG, 0,
-					     rec_gap, 0, 0);
+		/* If NACK, retransmit will now start at right position */
+		if (nacked_gap) {
+			rc = tipc_link_retransm(l, nacked_gap, xmitq);
+			l->stats.recv_nacks++;
 		}
-		if (msg_seq_gap(msg)) {
-			l_ptr->stats.recv_nacks++;
-			tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
-					     msg_seq_gap(msg));
-		}
-		break;
+
+		tipc_link_advance_backlog(l, xmitq);
+		if (unlikely(!skb_queue_empty(&l->wakeupq)))
+			link_prepare_wakeup(l);
 	}
 exit:
-	kfree_skb(buf);
-}
-
-
-/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
- * a different bearer. Owner node is locked.
- */
-static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
-				  struct tipc_msg *tunnel_hdr,
-				  struct tipc_msg *msg,
-				  u32 selector)
-{
-	struct tipc_link *tunnel;
-	struct sk_buff *skb;
-	u32 length = msg_size(msg);
-
-	tunnel = l_ptr->owner->active_links[selector & 1];
-	if (!tipc_link_is_up(tunnel)) {
-		pr_warn("%stunnel link no longer available\n", link_co_err);
-		return;
-	}
-	msg_set_size(tunnel_hdr, length + INT_H_SIZE);
-	skb = tipc_buf_acquire(length + INT_H_SIZE);
-	if (!skb) {
-		pr_warn("%sunable to send tunnel msg\n", link_co_err);
-		return;
-	}
-	skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
-	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
-	__tipc_link_xmit_skb(tunnel, skb);
-}
-
-
-/* tipc_link_failover_send_queue(): A link has gone down, but a second
- * link is still active. We can do failover. Tunnel the failing link's
- * whole send queue via the remaining link. This way, we don't lose
- * any packets, and sequence order is preserved for subsequent traffic
- * sent over the remaining link. Owner node is locked.
- */
-void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
-{
-	int msgcount;
-	struct tipc_link *tunnel = l_ptr->owner->active_links[0];
-	struct tipc_msg tunnel_hdr;
-	struct sk_buff *skb;
-	int split_bundles;
-
-	if (!tunnel)
-		return;
-
-	tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
-		      FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
-
-	skb_queue_walk(&l_ptr->backlogq, skb) {
-		msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
-		l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
-	}
-	skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
-	tipc_link_purge_backlog(l_ptr);
-	msgcount = skb_queue_len(&l_ptr->transmq);
-	msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
-	msg_set_msgcnt(&tunnel_hdr, msgcount);
-
-	if (skb_queue_empty(&l_ptr->transmq)) {
-		skb = tipc_buf_acquire(INT_H_SIZE);
-		if (skb) {
-			skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
-			msg_set_size(&tunnel_hdr, INT_H_SIZE);
-			__tipc_link_xmit_skb(tunnel, skb);
-		} else {
-			pr_warn("%sunable to send changeover msg\n",
-				link_co_err);
-		}
-		return;
-	}
-
-	split_bundles = (l_ptr->owner->active_links[0] !=
-			 l_ptr->owner->active_links[1]);
-
-	skb_queue_walk(&l_ptr->transmq, skb) {
-		struct tipc_msg *msg = buf_msg(skb);
-
-		if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
-			struct tipc_msg *m = msg_get_wrapped(msg);
-			unchar *pos = (unchar *)m;
-
-			msgcount = msg_msgcnt(msg);
-			while (msgcount--) {
-				msg_set_seqno(m, msg_seqno(msg));
-				tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
-						      msg_link_selector(m));
-				pos += align(msg_size(m));
-				m = (struct tipc_msg *)pos;
-			}
-		} else {
-			tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
-					      msg_link_selector(msg));
-		}
-	}
-}
-
-/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
- * duplicate of the first link's send queue via the new link. This way, we
- * are guaranteed that currently queued packets from a socket are delivered
- * before future traffic from the same socket, even if this is using the
- * new link. The last arriving copy of each duplicate packet is dropped at
- * the receiving end by the regular protocol check, so packet cardinality
- * and sequence order is preserved per sender/receiver socket pair.
- * Owner node is locked.
- */
-void tipc_link_dup_queue_xmit(struct tipc_link *link,
-			      struct tipc_link *tnl)
-{
-	struct sk_buff *skb;
-	struct tipc_msg tnl_hdr;
-	struct sk_buff_head *queue = &link->transmq;
-	int mcnt;
-	u16 seqno;
-
-	tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
-		      SYNCH_MSG, INT_H_SIZE, link->addr);
-	mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
-	msg_set_msgcnt(&tnl_hdr, mcnt);
-	msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
-
-tunnel_queue:
-	skb_queue_walk(queue, skb) {
-		struct sk_buff *outskb;
-		struct tipc_msg *msg = buf_msg(skb);
-		u32 len = msg_size(msg);
-
-		msg_set_ack(msg, mod(link->rcv_nxt - 1));
-		msg_set_bcast_ack(msg, link->owner->bclink.last_in);
-		msg_set_size(&tnl_hdr, len + INT_H_SIZE);
-		outskb = tipc_buf_acquire(len + INT_H_SIZE);
-		if (outskb == NULL) {
-			pr_warn("%sunable to send duplicate msg\n",
-				link_co_err);
-			return;
-		}
-		skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
-		skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
-					       skb->data, len);
-		__tipc_link_xmit_skb(tnl, outskb);
-		if (!tipc_link_is_up(link))
-			return;
-	}
-	if (queue == &link->backlogq)
-		return;
-	seqno = link->snd_nxt;
-	skb_queue_walk(&link->backlogq, skb) {
-		msg_set_seqno(buf_msg(skb), seqno);
-		seqno = mod(seqno + 1);
-	}
-	queue = &link->backlogq;
-	goto tunnel_queue;
-}
-
-/*  tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
- *  Owner node is locked.
- */
-static bool tipc_link_failover_rcv(struct tipc_link *link,
-				   struct sk_buff **skb)
-{
-	struct tipc_msg *msg = buf_msg(*skb);
-	struct sk_buff *iskb = NULL;
-	struct tipc_link *pl = NULL;
-	int bearer_id = msg_bearer_id(msg);
-	int pos = 0;
-
-	if (msg_type(msg) != FAILOVER_MSG) {
-		pr_warn("%sunknown tunnel pkt received\n", link_co_err);
-		goto exit;
-	}
-	if (bearer_id >= MAX_BEARERS)
-		goto exit;
-
-	if (bearer_id == link->bearer_id)
-		goto exit;
-
-	pl = link->owner->links[bearer_id];
-	if (pl && tipc_link_is_up(pl))
-		tipc_link_reset(pl);
-
-	if (link->failover_pkts == FIRST_FAILOVER)
-		link->failover_pkts = msg_msgcnt(msg);
-
-	/* Should we expect an inner packet? */
-	if (!link->failover_pkts)
-		goto exit;
-
-	if (!tipc_msg_extract(*skb, &iskb, &pos)) {
-		pr_warn("%sno inner failover pkt\n", link_co_err);
-		*skb = NULL;
-		goto exit;
-	}
-	link->failover_pkts--;
-	*skb = NULL;
-
-	/* Was this packet already delivered? */
-	if (less(buf_seqno(iskb), link->failover_checkpt)) {
-		kfree_skb(iskb);
-		iskb = NULL;
-		goto exit;
-	}
-	if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
-		link->stats.recv_fragments++;
-		tipc_buf_append(&link->failover_skb, &iskb);
-	}
-exit:
-	if (!link->failover_pkts && pl)
-		pl->flags &= ~LINK_FAILINGOVER;
-	kfree_skb(*skb);
-	*skb = iskb;
-	return *skb;
-}
-
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
-{
-	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
-
-	if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
-		return;
-
-	l_ptr->tolerance = tol;
-	l_ptr->keepalive_intv = msecs_to_jiffies(intv);
-	l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->keepalive_intv));
+	kfree_skb(skb);
+	return rc;
 }
 
 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
@@ -1743,7 +1450,7 @@
 	list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
 		tipc_node_lock(n_ptr);
 		for (i = 0; i < MAX_BEARERS; i++) {
-			l_ptr = n_ptr->links[i];
+			l_ptr = n_ptr->links[i].link;
 			if (l_ptr && !strcmp(l_ptr->name, link_name)) {
 				*bearer_id = i;
 				found_node = n_ptr;
@@ -1770,27 +1477,16 @@
 	l_ptr->stats.recv_info = l_ptr->rcv_nxt;
 }
 
-static void link_print(struct tipc_link *l_ptr, const char *str)
+static void link_print(struct tipc_link *l, const char *str)
 {
-	struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
-	struct tipc_bearer *b_ptr;
+	struct sk_buff *hskb = skb_peek(&l->transmq);
+	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
+	u16 tail = l->snd_nxt - 1;
 
-	rcu_read_lock();
-	b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
-	if (b_ptr)
-		pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
-	rcu_read_unlock();
-
-	if (link_working_unknown(l_ptr))
-		pr_cont(":WU\n");
-	else if (link_reset_reset(l_ptr))
-		pr_cont(":RR\n");
-	else if (link_reset_unknown(l_ptr))
-		pr_cont(":RU\n");
-	else if (link_working_working(l_ptr))
-		pr_cont(":WW\n");
-	else
-		pr_cont("\n");
+	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
+	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
+		skb_queue_len(&l->transmq), head, tail,
+		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
 }
 
 /* Parse and validate nested (link) properties valid for media, bearer and link
@@ -1865,7 +1561,7 @@
 
 	tipc_node_lock(node);
 
-	link = node->links[bearer_id];
+	link = node->links[bearer_id].link;
 	if (!link) {
 		res = -EINVAL;
 		goto out;
@@ -1885,7 +1581,7 @@
 			u32 tol;
 
 			tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
-			link_set_supervision_props(link, tol);
+			link->tolerance = tol;
 			tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
 		}
 		if (props[TIPC_NLA_PROP_PRIO]) {
@@ -2055,10 +1751,11 @@
 	for (i = *prev_link; i < MAX_BEARERS; i++) {
 		*prev_link = i;
 
-		if (!node->links[i])
+		if (!node->links[i].link)
 			continue;
 
-		err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI);
+		err = __tipc_nl_add_link(net, msg,
+					 node->links[i].link, NLM_F_MULTI);
 		if (err)
 			return err;
 	}
@@ -2172,7 +1869,7 @@
 			return -EINVAL;
 
 		tipc_node_lock(node);
-		link = node->links[bearer_id];
+		link = node->links[bearer_id].link;
 		if (!link) {
 			tipc_node_unlock(node);
 			nlmsg_free(msg.skb);
@@ -2227,7 +1924,7 @@
 
 	tipc_node_lock(node);
 
-	link = node->links[bearer_id];
+	link = node->links[bearer_id].link;
 	if (!link) {
 		tipc_node_unlock(node);
 		return -EINVAL;
diff --git a/net/tipc/link.h b/net/tipc/link.h
index ae0a0ea..39ff8b6 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -49,19 +49,25 @@
  */
 #define INVALID_LINK_SEQ 0x10000
 
-/* Link working states
+/* Link FSM events:
  */
-#define WORKING_WORKING 560810u
-#define WORKING_UNKNOWN 560811u
-#define RESET_UNKNOWN   560812u
-#define RESET_RESET     560813u
+enum {
+	LINK_ESTABLISH_EVT       = 0xec1ab1e,
+	LINK_PEER_RESET_EVT      = 0x9eed0e,
+	LINK_FAILURE_EVT         = 0xfa110e,
+	LINK_RESET_EVT           = 0x10ca1d0e,
+	LINK_FAILOVER_BEGIN_EVT  = 0xfa110bee,
+	LINK_FAILOVER_END_EVT    = 0xfa110ede,
+	LINK_SYNCH_BEGIN_EVT     = 0xc1ccbee,
+	LINK_SYNCH_END_EVT       = 0xc1ccede
+};
 
-/* Link endpoint execution states
+/* Events returned from link at packet reception or at timeout
  */
-#define LINK_STARTED     0x0001
-#define LINK_STOPPED     0x0002
-#define LINK_SYNCHING    0x0004
-#define LINK_FAILINGOVER 0x0008
+enum {
+	TIPC_LINK_UP_EVT       = 1,
+	TIPC_LINK_DOWN_EVT     = (1 << 1)
+};
 
 /* Starting value for maximum packet size negotiation on unicast links
  * (unless bearer MTU is less)
@@ -106,7 +112,6 @@
  * @timer: link timer
  * @owner: pointer to peer node
  * @refcnt: reference counter for permanent references (owner node & timer)
- * @flags: execution state flags for link endpoint instance
  * @peer_session: link session # being used by peer end of link
  * @peer_bearer_id: bearer id used by link's peer endpoint
  * @bearer_id: local bearer id used by link
@@ -143,20 +148,17 @@
 struct tipc_link {
 	u32 addr;
 	char name[TIPC_MAX_LINK_NAME];
-	struct tipc_media_addr media_addr;
-	struct timer_list timer;
+	struct tipc_media_addr *media_addr;
 	struct tipc_node *owner;
-	struct kref ref;
 
 	/* Management and link supervision data */
-	unsigned int flags;
 	u32 peer_session;
 	u32 peer_bearer_id;
 	u32 bearer_id;
 	u32 tolerance;
 	unsigned long keepalive_intv;
 	u32 abort_limit;
-	int state;
+	u32 state;
 	u32 silent_intv_cnt;
 	struct {
 		unchar hdr[INT_H_SIZE];
@@ -165,12 +167,10 @@
 	struct tipc_msg *pmsg;
 	u32 priority;
 	char net_plane;
-	u16 synch_point;
 
-	/* Failover */
-	u16 failover_pkts;
-	u16 failover_checkpt;
-	struct sk_buff *failover_skb;
+	/* Failover/synch */
+	u16 drop_point;
+	struct sk_buff *failover_reasm_skb;
 
 	/* Max packet negotiation */
 	u16 mtu;
@@ -192,8 +192,8 @@
 	u16 rcv_nxt;
 	u32 rcv_unacked;
 	struct sk_buff_head deferdq;
-	struct sk_buff_head inputq;
-	struct sk_buff_head namedq;
+	struct sk_buff_head *inputq;
+	struct sk_buff_head *namedq;
 
 	/* Congestion handling */
 	struct sk_buff_head wakeupq;
@@ -205,28 +205,29 @@
 	struct tipc_stats stats;
 };
 
-struct tipc_port;
-
-struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
-			      struct tipc_bearer *b_ptr,
-			      const struct tipc_media_addr *media_addr);
-void tipc_link_delete(struct tipc_link *link);
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id);
-void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
-void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
+bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
+		      u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
+		      struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+		      struct tipc_link **link);
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+			   int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
+				    struct sk_buff_head *xmitq);
+int tipc_link_fsm_evt(struct tipc_link *l, int evt);
 void tipc_link_reset_fragments(struct tipc_link *l_ptr);
-int tipc_link_is_up(struct tipc_link *l_ptr);
+bool tipc_link_is_up(struct tipc_link *l);
+bool tipc_link_is_reset(struct tipc_link *l);
+bool tipc_link_is_synching(struct tipc_link *l);
+bool tipc_link_is_failingover(struct tipc_link *l);
+bool tipc_link_is_blocked(struct tipc_link *l);
 int tipc_link_is_active(struct tipc_link *l_ptr);
 void tipc_link_purge_queues(struct tipc_link *l_ptr);
 void tipc_link_purge_backlog(struct tipc_link *l);
-void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
-int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
-		       u32 selector);
-int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
-		   u32 selector);
 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
 		     struct sk_buff_head *list);
+int tipc_link_xmit(struct tipc_link *link,	struct sk_buff_head *list,
+		   struct sk_buff_head *xmitq);
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
 			  u32 gap, u32 tolerance, u32 priority);
 void tipc_link_push_packets(struct tipc_link *l_ptr);
@@ -242,34 +243,8 @@
 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
-void link_prepare_wakeup(struct tipc_link *l);
-
-static inline u32 link_own_addr(struct tipc_link *l)
-{
-	return msg_prevnode(l->pmsg);
-}
-
-/*
- * Link status checking routines
- */
-static inline int link_working_working(struct tipc_link *l_ptr)
-{
-	return l_ptr->state == WORKING_WORKING;
-}
-
-static inline int link_working_unknown(struct tipc_link *l_ptr)
-{
-	return l_ptr->state == WORKING_UNKNOWN;
-}
-
-static inline int link_reset_unknown(struct tipc_link *l_ptr)
-{
-	return l_ptr->state == RESET_UNKNOWN;
-}
-
-static inline int link_reset_reset(struct tipc_link *l_ptr)
-{
-	return l_ptr->state == RESET_RESET;
-}
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+		  struct sk_buff_head *xmitq);
 
 #endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 08b4cc7..562c926 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -463,60 +463,72 @@
 
 /**
  * tipc_msg_reverse(): swap source and destination addresses and add error code
- * @buf:  buffer containing message to be reversed
- * @dnode: return value: node where to send message after reversal
- * @err:  error code to be set in message
- * Consumes buffer if failure
+ * @own_node: originating node id for reversed message
+ * @skb:  buffer containing message to be reversed; may be replaced.
+ * @err:  error code to be set in message, if any
+ * Consumes buffer at failure
  * Returns true if success, otherwise false
  */
-bool tipc_msg_reverse(u32 own_addr,  struct sk_buff *buf, u32 *dnode,
-		      int err)
+bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
 {
-	struct tipc_msg *msg = buf_msg(buf);
+	struct sk_buff *_skb = *skb;
+	struct tipc_msg *hdr = buf_msg(_skb);
 	struct tipc_msg ohdr;
-	uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);
+	int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
 
-	if (skb_linearize(buf))
+	if (skb_linearize(_skb))
 		goto exit;
-	msg = buf_msg(buf);
-	if (msg_dest_droppable(msg))
+	hdr = buf_msg(_skb);
+	if (msg_dest_droppable(hdr))
 		goto exit;
-	if (msg_errcode(msg))
+	if (msg_errcode(hdr))
 		goto exit;
-	memcpy(&ohdr, msg, msg_hdr_sz(msg));
-	msg_set_errcode(msg, err);
-	msg_set_origport(msg, msg_destport(&ohdr));
-	msg_set_destport(msg, msg_origport(&ohdr));
-	msg_set_prevnode(msg, own_addr);
-	if (!msg_short(msg)) {
-		msg_set_orignode(msg, msg_destnode(&ohdr));
-		msg_set_destnode(msg, msg_orignode(&ohdr));
+
+	/* Take a copy of original header before altering message */
+	memcpy(&ohdr, hdr, msg_hdr_sz(hdr));
+
+	/* Never return SHORT header; expand by replacing buffer if necessary */
+	if (msg_short(hdr)) {
+		*skb = tipc_buf_acquire(BASIC_H_SIZE + dlen);
+		if (!*skb)
+			goto exit;
+		memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
+		kfree_skb(_skb);
+		_skb = *skb;
+		hdr = buf_msg(_skb);
+		memcpy(hdr, &ohdr, BASIC_H_SIZE);
+		msg_set_hdr_sz(hdr, BASIC_H_SIZE);
 	}
-	msg_set_size(msg, msg_hdr_sz(msg) + rdsz);
-	skb_trim(buf, msg_size(msg));
-	skb_orphan(buf);
-	*dnode = msg_orignode(&ohdr);
+
+	/* Now reverse the concerned fields */
+	msg_set_errcode(hdr, err);
+	msg_set_origport(hdr, msg_destport(&ohdr));
+	msg_set_destport(hdr, msg_origport(&ohdr));
+	msg_set_destnode(hdr, msg_prevnode(&ohdr));
+	msg_set_prevnode(hdr, own_node);
+	msg_set_orignode(hdr, own_node);
+	msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
+	skb_trim(_skb, msg_size(hdr));
+	skb_orphan(_skb);
 	return true;
 exit:
-	kfree_skb(buf);
-	*dnode = 0;
+	kfree_skb(_skb);
+	*skb = NULL;
 	return false;
 }
 
 /**
  * tipc_msg_lookup_dest(): try to find new destination for named message
  * @skb: the buffer containing the message.
- * @dnode: return value: next-hop node, if destination found
- * @err: return value: error code to use, if message to be rejected
+ * @err: error code to be used by caller if lookup fails
  * Does not consume buffer
  * Returns true if a destination is found, false otherwise
  */
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
-			  u32 *dnode, int *err)
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
 {
 	struct tipc_msg *msg = buf_msg(skb);
-	u32 dport;
-	u32 own_addr = tipc_own_addr(net);
+	u32 dport, dnode;
+	u32 onode = tipc_own_addr(net);
 
 	if (!msg_isdata(msg))
 		return false;
@@ -529,15 +541,15 @@
 		return false;
 	if (msg_reroute_cnt(msg))
 		return false;
-	*dnode = addr_domain(net, msg_lookup_scope(msg));
+	dnode = addr_domain(net, msg_lookup_scope(msg));
 	dport = tipc_nametbl_translate(net, msg_nametype(msg),
-				       msg_nameinst(msg), dnode);
+				       msg_nameinst(msg), &dnode);
 	if (!dport)
 		return false;
 	msg_incr_reroute_cnt(msg);
-	if (*dnode != own_addr)
-		msg_set_prevnode(msg, own_addr);
-	msg_set_destnode(msg, *dnode);
+	if (dnode != onode)
+		msg_set_prevnode(msg, onode);
+	msg_set_destnode(msg, dnode);
 	msg_set_destport(msg, dport);
 	*err = TIPC_OK;
 	return true;
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 19c45fb..a82c584 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -38,6 +38,7 @@
 #define _TIPC_MSG_H
 
 #include <linux/tipc.h>
+#include "core.h"
 
 /*
  * Constants and routines used to read and write TIPC payload message headers
@@ -109,7 +110,6 @@
 	struct sk_buff *tail;
 	bool validated;
 	bool wakeup_pending;
-	bool bundling;
 	u16 chain_sz;
 	u16 chain_imp;
 };
@@ -558,15 +558,6 @@
 	msg_set_bits(m, 1, 15, 0x1fff, n);
 }
 
-static inline bool msg_dup(struct tipc_msg *m)
-{
-	if (likely(msg_user(m) != TUNNEL_PROTOCOL))
-		return false;
-	if (msg_type(m) != SYNCH_MSG)
-		return false;
-	return true;
-}
-
 /*
  * Word 2
  */
@@ -620,12 +611,12 @@
 }
 
 
-static inline u32 msg_next_sent(struct tipc_msg *m)
+static inline u16 msg_next_sent(struct tipc_msg *m)
 {
 	return msg_bits(m, 4, 0, 0xffff);
 }
 
-static inline void msg_set_next_sent(struct tipc_msg *m, u32 n)
+static inline void msg_set_next_sent(struct tipc_msg *m, u16 n)
 {
 	msg_set_bits(m, 4, 0, 0xffff, n);
 }
@@ -658,12 +649,12 @@
 /*
  * Word 5
  */
-static inline u32 msg_session(struct tipc_msg *m)
+static inline u16 msg_session(struct tipc_msg *m)
 {
 	return msg_bits(m, 5, 16, 0xffff);
 }
 
-static inline void msg_set_session(struct tipc_msg *m, u32 n)
+static inline void msg_set_session(struct tipc_msg *m, u16 n)
 {
 	msg_set_bits(m, 5, 16, 0xffff, n);
 }
@@ -726,12 +717,12 @@
 /*
  * Word 9
  */
-static inline u32 msg_msgcnt(struct tipc_msg *m)
+static inline u16 msg_msgcnt(struct tipc_msg *m)
 {
 	return msg_bits(m, 9, 16, 0xffff);
 }
 
-static inline void msg_set_msgcnt(struct tipc_msg *m, u32 n)
+static inline void msg_set_msgcnt(struct tipc_msg *m, u16 n)
 {
 	msg_set_bits(m, 9, 16, 0xffff, n);
 }
@@ -766,10 +757,25 @@
 	msg_set_bits(m, 9, 0, 0xffff, n);
 }
 
+static inline bool msg_peer_link_is_up(struct tipc_msg *m)
+{
+	if (likely(msg_user(m) != LINK_PROTOCOL))
+		return true;
+	if (msg_type(m) == STATE_MSG)
+		return true;
+	return false;
+}
+
+static inline bool msg_peer_node_is_up(struct tipc_msg *m)
+{
+	if (msg_peer_link_is_up(m))
+		return true;
+	return msg_redundant_link(m);
+}
+
 struct sk_buff *tipc_buf_acquire(u32 size);
 bool tipc_msg_validate(struct sk_buff *skb);
-bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
-		      int err);
+bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
 void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
 		   u32 hsize, u32 destnode);
 struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
@@ -782,8 +788,7 @@
 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
 		   int offset, int dsz, int mtu, struct sk_buff_head *list);
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode,
-			  int *err);
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
 struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
 
 static inline u16 buf_seqno(struct sk_buff *skb)
@@ -857,26 +862,65 @@
 	return skb;
 }
 
-/* tipc_skb_queue_tail(): add buffer to tail of list;
+/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
  * @list: list to be appended to
- * @skb: buffer to append. Always appended
- * @dport: the destination port of the buffer
- * returns true if dport differs from previous destination
+ * @skb: buffer to add
+ * Returns true if queue should treated further, otherwise false
  */
-static inline bool tipc_skb_queue_tail(struct sk_buff_head *list,
-				       struct sk_buff *skb, u32 dport)
+static inline bool __tipc_skb_queue_sorted(struct sk_buff_head *list,
+					   struct sk_buff *skb)
 {
-	struct sk_buff *_skb = NULL;
-	bool rv = false;
+	struct sk_buff *_skb, *tmp;
+	struct tipc_msg *hdr = buf_msg(skb);
+	u16 seqno = msg_seqno(hdr);
+
+	if (skb_queue_empty(list) || (msg_user(hdr) == LINK_PROTOCOL)) {
+		__skb_queue_head(list, skb);
+		return true;
+	}
+	if (likely(less(seqno, buf_seqno(skb_peek(list))))) {
+		__skb_queue_head(list, skb);
+		return true;
+	}
+	if (!more(seqno, buf_seqno(skb_peek_tail(list)))) {
+		skb_queue_walk_safe(list, _skb, tmp) {
+			if (likely(less(seqno, buf_seqno(_skb)))) {
+				__skb_queue_before(list, _skb, skb);
+				return true;
+			}
+		}
+	}
+	__skb_queue_tail(list, skb);
+	return false;
+}
+
+/* tipc_skb_queue_splice_tail - append an skb list to lock protected list
+ * @list: the new list to append. Not lock protected
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail(struct sk_buff_head *list,
+					      struct sk_buff_head *head)
+{
+	spin_lock_bh(&head->lock);
+	skb_queue_splice_tail(list, head);
+	spin_unlock_bh(&head->lock);
+}
+
+/* tipc_skb_queue_splice_tail_init - merge two lock protected skb lists
+ * @list: the new list to add. Lock protected. Will be reinitialized
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail_init(struct sk_buff_head *list,
+						   struct sk_buff_head *head)
+{
+	struct sk_buff_head tmp;
+
+	__skb_queue_head_init(&tmp);
 
 	spin_lock_bh(&list->lock);
-	_skb = skb_peek_tail(list);
-	if (!_skb || (msg_destport(buf_msg(_skb)) != dport) ||
-	    (skb_queue_len(list) > 32))
-		rv = true;
-	__skb_queue_tail(list, skb);
+	skb_queue_splice_tail_init(list, &tmp);
 	spin_unlock_bh(&list->lock);
-	return rv;
+	tipc_skb_queue_splice_tail(&tmp, head);
 }
 
 #endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 41e7b7e..e6018b7 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -96,13 +96,13 @@
 		dnode = node->addr;
 		if (in_own_node(net, dnode))
 			continue;
-		if (!tipc_node_active_links(node))
+		if (!tipc_node_is_up(node))
 			continue;
 		oskb = pskb_copy(skb, GFP_ATOMIC);
 		if (!oskb)
 			break;
 		msg_set_destnode(buf_msg(oskb), dnode);
-		tipc_link_xmit_skb(net, oskb, dnode, dnode);
+		tipc_node_xmit_skb(net, oskb, dnode, dnode);
 	}
 	rcu_read_unlock();
 
@@ -223,7 +223,7 @@
 			 &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
 	rcu_read_unlock();
 
-	tipc_link_xmit(net, &head, dnode, dnode);
+	tipc_node_xmit(net, &head, dnode, dnode);
 }
 
 static void tipc_publ_subscribe(struct net *net, struct publication *publ,
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 53e0fee..1eadc95 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -1114,7 +1114,7 @@
 	}
 
 	len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
-	if (TLV_GET_LEN(msg.req) && !TLV_OK(msg.req, len)) {
+	if (len && !TLV_OK(msg.req, len)) {
 		msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
 		err = -EOPNOTSUPP;
 		goto send;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 0b1d61a..703875f 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -40,10 +40,42 @@
 #include "name_distr.h"
 #include "socket.h"
 #include "bcast.h"
+#include "discover.h"
 
-static void node_lost_contact(struct tipc_node *n_ptr);
+/* Node FSM states and events:
+ */
+enum {
+	SELF_DOWN_PEER_DOWN    = 0xdd,
+	SELF_UP_PEER_UP        = 0xaa,
+	SELF_DOWN_PEER_LEAVING = 0xd1,
+	SELF_UP_PEER_COMING    = 0xac,
+	SELF_COMING_PEER_UP    = 0xca,
+	SELF_LEAVING_PEER_DOWN = 0x1d,
+	NODE_FAILINGOVER       = 0xf0,
+	NODE_SYNCHING          = 0xcc
+};
+
+enum {
+	SELF_ESTABL_CONTACT_EVT = 0xece,
+	SELF_LOST_CONTACT_EVT   = 0x1ce,
+	PEER_ESTABL_CONTACT_EVT = 0x9ece,
+	PEER_LOST_CONTACT_EVT   = 0x91ce,
+	NODE_FAILOVER_BEGIN_EVT = 0xfbe,
+	NODE_FAILOVER_END_EVT   = 0xfee,
+	NODE_SYNCH_BEGIN_EVT    = 0xcbe,
+	NODE_SYNCH_END_EVT      = 0xcee
+};
+
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+				  struct sk_buff_head *xmitq,
+				  struct tipc_media_addr **maddr);
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
+				bool delete);
+static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
 static void node_established_contact(struct tipc_node *n_ptr);
 static void tipc_node_delete(struct tipc_node *node);
+static void tipc_node_timeout(unsigned long data);
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
 
 struct tipc_sock_conn {
 	u32 port;
@@ -110,7 +142,7 @@
 	return NULL;
 }
 
-struct tipc_node *tipc_node_create(struct net *net, u32 addr)
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
 {
 	struct tipc_net *tn = net_generic(net, tipc_net_id);
 	struct tipc_node *n_ptr, *temp_node;
@@ -126,12 +158,14 @@
 	}
 	n_ptr->addr = addr;
 	n_ptr->net = net;
+	n_ptr->capabilities = capabilities;
 	kref_init(&n_ptr->kref);
 	spin_lock_init(&n_ptr->lock);
 	INIT_HLIST_NODE(&n_ptr->hash);
 	INIT_LIST_HEAD(&n_ptr->list);
 	INIT_LIST_HEAD(&n_ptr->publ_list);
 	INIT_LIST_HEAD(&n_ptr->conn_sks);
+	skb_queue_head_init(&n_ptr->bclink.namedq);
 	__skb_queue_head_init(&n_ptr->bclink.deferdq);
 	hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
 	list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
@@ -139,14 +173,32 @@
 			break;
 	}
 	list_add_tail_rcu(&n_ptr->list, &temp_node->list);
-	n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
+	n_ptr->state = SELF_DOWN_PEER_LEAVING;
 	n_ptr->signature = INVALID_NODE_SIG;
+	n_ptr->active_links[0] = INVALID_BEARER_ID;
+	n_ptr->active_links[1] = INVALID_BEARER_ID;
 	tipc_node_get(n_ptr);
+	setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
+	n_ptr->keepalive_intv = U32_MAX;
 exit:
 	spin_unlock_bh(&tn->node_list_lock);
 	return n_ptr;
 }
 
+static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
+{
+	unsigned long tol = l->tolerance;
+	unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
+	unsigned long keepalive_intv = msecs_to_jiffies(intv);
+
+	/* Link with lowest tolerance determines timer interval */
+	if (keepalive_intv < n->keepalive_intv)
+		n->keepalive_intv = keepalive_intv;
+
+	/* Ensure link's abort limit corresponds to current interval */
+	l->abort_limit = l->tolerance / jiffies_to_msecs(n->keepalive_intv);
+}
+
 static void tipc_node_delete(struct tipc_node *node)
 {
 	list_del_rcu(&node->list);
@@ -160,8 +212,11 @@
 	struct tipc_node *node, *t_node;
 
 	spin_lock_bh(&tn->node_list_lock);
-	list_for_each_entry_safe(node, t_node, &tn->node_list, list)
+	list_for_each_entry_safe(node, t_node, &tn->node_list, list) {
+		if (del_timer(&node->timer))
+			tipc_node_put(node);
 		tipc_node_put(node);
+	}
 	spin_unlock_bh(&tn->node_list_lock);
 }
 
@@ -219,158 +274,551 @@
 	tipc_node_put(node);
 }
 
+/* tipc_node_timeout - handle expiration of node timer
+ */
+static void tipc_node_timeout(unsigned long data)
+{
+	struct tipc_node *n = (struct tipc_node *)data;
+	struct tipc_link_entry *le;
+	struct sk_buff_head xmitq;
+	int bearer_id;
+	int rc = 0;
+
+	__skb_queue_head_init(&xmitq);
+
+	for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+		tipc_node_lock(n);
+		le = &n->links[bearer_id];
+		if (le->link) {
+			/* Link tolerance may change asynchronously: */
+			tipc_node_calculate_timer(n, le->link);
+			rc = tipc_link_timeout(le->link, &xmitq);
+		}
+		tipc_node_unlock(n);
+		tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
+		if (rc & TIPC_LINK_DOWN_EVT)
+			tipc_node_link_down(n, bearer_id, false);
+	}
+	if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
+		tipc_node_get(n);
+	tipc_node_put(n);
+}
+
+/**
+ * __tipc_node_link_up - handle addition of link
+ * Node lock must be held by caller
+ * Link becomes active (alone or shared) or standby, depending on its priority.
+ */
+static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
+				struct sk_buff_head *xmitq)
+{
+	int *slot0 = &n->active_links[0];
+	int *slot1 = &n->active_links[1];
+	struct tipc_link *ol = node_active_link(n, 0);
+	struct tipc_link *nl = n->links[bearer_id].link;
+
+	if (!nl || !tipc_link_is_up(nl))
+		return;
+
+	n->working_links++;
+	n->action_flags |= TIPC_NOTIFY_LINK_UP;
+	n->link_id = nl->peer_bearer_id << 16 | bearer_id;
+
+	/* Leave room for tunnel header when returning 'mtu' to users: */
+	n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE;
+
+	tipc_bearer_add_dest(n->net, bearer_id, n->addr);
+
+	pr_debug("Established link <%s> on network plane %c\n",
+		 nl->name, nl->net_plane);
+
+	/* First link? => give it both slots */
+	if (!ol) {
+		*slot0 = bearer_id;
+		*slot1 = bearer_id;
+		tipc_link_build_bcast_sync_msg(nl, xmitq);
+		node_established_contact(n);
+		return;
+	}
+
+	/* Second link => redistribute slots */
+	if (nl->priority > ol->priority) {
+		pr_debug("Old link <%s> becomes standby\n", ol->name);
+		*slot0 = bearer_id;
+		*slot1 = bearer_id;
+	} else if (nl->priority == ol->priority) {
+		*slot0 = bearer_id;
+	} else {
+		pr_debug("New link <%s> is standby\n", nl->name);
+	}
+
+	/* Prepare synchronization with first link */
+	tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
+}
+
 /**
  * tipc_node_link_up - handle addition of link
  *
  * Link becomes active (alone or shared) or standby, depending on its priority.
  */
-void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
+			      struct sk_buff_head *xmitq)
 {
-	struct tipc_link **active = &n_ptr->active_links[0];
-
-	n_ptr->working_links++;
-	n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
-	n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
-
-	pr_debug("Established link <%s> on network plane %c\n",
-		 l_ptr->name, l_ptr->net_plane);
-
-	if (!active[0]) {
-		active[0] = active[1] = l_ptr;
-		node_established_contact(n_ptr);
-		goto exit;
-	}
-	if (l_ptr->priority < active[0]->priority) {
-		pr_debug("New link <%s> becomes standby\n", l_ptr->name);
-		goto exit;
-	}
-	tipc_link_dup_queue_xmit(active[0], l_ptr);
-	if (l_ptr->priority == active[0]->priority) {
-		active[0] = l_ptr;
-		goto exit;
-	}
-	pr_debug("Old link <%s> becomes standby\n", active[0]->name);
-	if (active[1] != active[0])
-		pr_debug("Old link <%s> becomes standby\n", active[1]->name);
-	active[0] = active[1] = l_ptr;
-exit:
-	/* Leave room for changeover header when returning 'mtu' to users: */
-	n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
-	n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
+	tipc_node_lock(n);
+	__tipc_node_link_up(n, bearer_id, xmitq);
+	tipc_node_unlock(n);
 }
 
 /**
- * node_select_active_links - select active link
+ * __tipc_node_link_down - handle loss of link
  */
-static void node_select_active_links(struct tipc_node *n_ptr)
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+				  struct sk_buff_head *xmitq,
+				  struct tipc_media_addr **maddr)
 {
-	struct tipc_link **active = &n_ptr->active_links[0];
-	u32 i;
-	u32 highest_prio = 0;
+	struct tipc_link_entry *le = &n->links[*bearer_id];
+	int *slot0 = &n->active_links[0];
+	int *slot1 = &n->active_links[1];
+	int i, highest = 0;
+	struct tipc_link *l, *_l, *tnl;
 
-	active[0] = active[1] = NULL;
-
-	for (i = 0; i < MAX_BEARERS; i++) {
-		struct tipc_link *l_ptr = n_ptr->links[i];
-
-		if (!l_ptr || !tipc_link_is_up(l_ptr) ||
-		    (l_ptr->priority < highest_prio))
-			continue;
-
-		if (l_ptr->priority > highest_prio) {
-			highest_prio = l_ptr->priority;
-			active[0] = active[1] = l_ptr;
-		} else {
-			active[1] = l_ptr;
-		}
-	}
-}
-
-/**
- * tipc_node_link_down - handle loss of link
- */
-void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
-{
-	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
-	struct tipc_link **active;
-
-	n_ptr->working_links--;
-	n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN;
-	n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
-
-	if (!tipc_link_is_active(l_ptr)) {
-		pr_debug("Lost standby link <%s> on network plane %c\n",
-			 l_ptr->name, l_ptr->net_plane);
+	l = n->links[*bearer_id].link;
+	if (!l || tipc_link_is_reset(l))
 		return;
-	}
+
+	n->working_links--;
+	n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
+	n->link_id = l->peer_bearer_id << 16 | *bearer_id;
+
+	tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
+
 	pr_debug("Lost link <%s> on network plane %c\n",
-		 l_ptr->name, l_ptr->net_plane);
+		 l->name, l->net_plane);
 
-	active = &n_ptr->active_links[0];
-	if (active[0] == l_ptr)
-		active[0] = active[1];
-	if (active[1] == l_ptr)
-		active[1] = active[0];
-	if (active[0] == l_ptr)
-		node_select_active_links(n_ptr);
-	if (tipc_node_is_up(n_ptr))
-		tipc_link_failover_send_queue(l_ptr);
-	else
-		node_lost_contact(n_ptr);
+	/* Select new active link if any available */
+	*slot0 = INVALID_BEARER_ID;
+	*slot1 = INVALID_BEARER_ID;
+	for (i = 0; i < MAX_BEARERS; i++) {
+		_l = n->links[i].link;
+		if (!_l || !tipc_link_is_up(_l))
+			continue;
+		if (_l == l)
+			continue;
+		if (_l->priority < highest)
+			continue;
+		if (_l->priority > highest) {
+			highest = _l->priority;
+			*slot0 = i;
+			*slot1 = i;
+			continue;
+		}
+		*slot1 = i;
+	}
 
-	/* Leave room for changeover header when returning 'mtu' to users: */
-	if (active[0]) {
-		n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
-		n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
+	if (!tipc_node_is_up(n)) {
+		tipc_link_reset(l);
+		node_lost_contact(n, &le->inputq);
 		return;
 	}
-	/* Loopback link went down? No fragmentation needed from now on. */
-	if (n_ptr->addr == tn->own_addr) {
-		n_ptr->act_mtus[0] = MAX_MSG_SIZE;
-		n_ptr->act_mtus[1] = MAX_MSG_SIZE;
+
+	/* There is still a working link => initiate failover */
+	tnl = node_active_link(n, 0);
+	tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+	tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+	n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
+	tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
+	tipc_link_reset(l);
+	tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+	tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
+	*maddr = &n->links[tnl->bearer_id].maddr;
+	*bearer_id = tnl->bearer_id;
+}
+
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
+{
+	struct tipc_link_entry *le = &n->links[bearer_id];
+	struct tipc_media_addr *maddr;
+	struct sk_buff_head xmitq;
+
+	__skb_queue_head_init(&xmitq);
+
+	tipc_node_lock(n);
+	__tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
+	if (delete && le->link) {
+		kfree(le->link);
+		le->link = NULL;
+		n->link_cnt--;
 	}
+	tipc_node_unlock(n);
+
+	tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+	tipc_sk_rcv(n->net, &le->inputq);
 }
 
-int tipc_node_active_links(struct tipc_node *n_ptr)
+bool tipc_node_is_up(struct tipc_node *n)
 {
-	return n_ptr->active_links[0] != NULL;
+	return n->active_links[0] != INVALID_BEARER_ID;
 }
 
-int tipc_node_is_up(struct tipc_node *n_ptr)
+void tipc_node_check_dest(struct net *net, u32 onode,
+			  struct tipc_bearer *b,
+			  u16 capabilities, u32 signature,
+			  struct tipc_media_addr *maddr,
+			  bool *respond, bool *dupl_addr)
 {
-	return tipc_node_active_links(n_ptr);
+	struct tipc_node *n;
+	struct tipc_link *l;
+	struct tipc_link_entry *le;
+	bool addr_match = false;
+	bool sign_match = false;
+	bool link_up = false;
+	bool accept_addr = false;
+	bool reset = true;
+
+	*dupl_addr = false;
+	*respond = false;
+
+	n = tipc_node_create(net, onode, capabilities);
+	if (!n)
+		return;
+
+	tipc_node_lock(n);
+
+	le = &n->links[b->identity];
+
+	/* Prepare to validate requesting node's signature and media address */
+	l = le->link;
+	link_up = l && tipc_link_is_up(l);
+	addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
+	sign_match = (signature == n->signature);
+
+	/* These three flags give us eight permutations: */
+
+	if (sign_match && addr_match && link_up) {
+		/* All is fine. Do nothing. */
+		reset = false;
+	} else if (sign_match && addr_match && !link_up) {
+		/* Respond. The link will come up in due time */
+		*respond = true;
+	} else if (sign_match && !addr_match && link_up) {
+		/* Peer has changed i/f address without rebooting.
+		 * If so, the link will reset soon, and the next
+		 * discovery will be accepted. So we can ignore it.
+		 * It may also be an cloned or malicious peer having
+		 * chosen the same node address and signature as an
+		 * existing one.
+		 * Ignore requests until the link goes down, if ever.
+		 */
+		*dupl_addr = true;
+	} else if (sign_match && !addr_match && !link_up) {
+		/* Peer link has changed i/f address without rebooting.
+		 * It may also be a cloned or malicious peer; we can't
+		 * distinguish between the two.
+		 * The signature is correct, so we must accept.
+		 */
+		accept_addr = true;
+		*respond = true;
+	} else if (!sign_match && addr_match && link_up) {
+		/* Peer node rebooted. Two possibilities:
+		 *  - Delayed re-discovery; this link endpoint has already
+		 *    reset and re-established contact with the peer, before
+		 *    receiving a discovery message from that node.
+		 *    (The peer happened to receive one from this node first).
+		 *  - The peer came back so fast that our side has not
+		 *    discovered it yet. Probing from this side will soon
+		 *    reset the link, since there can be no working link
+		 *    endpoint at the peer end, and the link will re-establish.
+		 *  Accept the signature, since it comes from a known peer.
+		 */
+		n->signature = signature;
+	} else if (!sign_match && addr_match && !link_up) {
+		/*  The peer node has rebooted.
+		 *  Accept signature, since it is a known peer.
+		 */
+		n->signature = signature;
+		*respond = true;
+	} else if (!sign_match && !addr_match && link_up) {
+		/* Peer rebooted with new address, or a new/duplicate peer.
+		 * Ignore until the link goes down, if ever.
+		 */
+		*dupl_addr = true;
+	} else if (!sign_match && !addr_match && !link_up) {
+		/* Peer rebooted with new address, or it is a new peer.
+		 * Accept signature and address.
+		 */
+		n->signature = signature;
+		accept_addr = true;
+		*respond = true;
+	}
+
+	if (!accept_addr)
+		goto exit;
+
+	/* Now create new link if not already existing */
+	if (!l) {
+		if (n->link_cnt == 2) {
+			pr_warn("Cannot establish 3rd link to %x\n", n->addr);
+			goto exit;
+		}
+		if (!tipc_link_create(n, b, mod(tipc_net(net)->random),
+				      tipc_own_addr(net), onode, &le->maddr,
+				      &le->inputq, &n->bclink.namedq, &l)) {
+			*respond = false;
+			goto exit;
+		}
+		tipc_link_reset(l);
+		if (n->state == NODE_FAILINGOVER)
+			tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+		le->link = l;
+		n->link_cnt++;
+		tipc_node_calculate_timer(n, l);
+		if (n->link_cnt == 1)
+			if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
+				tipc_node_get(n);
+	}
+	memcpy(&le->maddr, maddr, sizeof(*maddr));
+exit:
+	tipc_node_unlock(n);
+	if (reset)
+		tipc_node_link_down(n, b->identity, false);
+	tipc_node_put(n);
 }
 
-void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+void tipc_node_delete_links(struct net *net, int bearer_id)
 {
-	n_ptr->links[l_ptr->bearer_id] = l_ptr;
-	n_ptr->link_cnt++;
+	struct tipc_net *tn = net_generic(net, tipc_net_id);
+	struct tipc_node *n;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(n, &tn->node_list, list) {
+		tipc_node_link_down(n, bearer_id, true);
+	}
+	rcu_read_unlock();
 }
 
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+static void tipc_node_reset_links(struct tipc_node *n)
 {
+	char addr_string[16];
 	int i;
 
+	pr_warn("Resetting all links to %s\n",
+		tipc_addr_string_fill(addr_string, n->addr));
+
 	for (i = 0; i < MAX_BEARERS; i++) {
-		if (l_ptr != n_ptr->links[i])
-			continue;
-		n_ptr->links[i] = NULL;
-		n_ptr->link_cnt--;
+		tipc_node_link_down(n, i, false);
 	}
 }
 
+/* tipc_node_fsm_evt - node finite state machine
+ * Determines when contact is allowed with peer node
+ */
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
+{
+	int state = n->state;
+
+	switch (state) {
+	case SELF_DOWN_PEER_DOWN:
+		switch (evt) {
+		case SELF_ESTABL_CONTACT_EVT:
+			state = SELF_UP_PEER_COMING;
+			break;
+		case PEER_ESTABL_CONTACT_EVT:
+			state = SELF_COMING_PEER_UP;
+			break;
+		case SELF_LOST_CONTACT_EVT:
+		case PEER_LOST_CONTACT_EVT:
+			break;
+		case NODE_SYNCH_END_EVT:
+		case NODE_SYNCH_BEGIN_EVT:
+		case NODE_FAILOVER_BEGIN_EVT:
+		case NODE_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case SELF_UP_PEER_UP:
+		switch (evt) {
+		case SELF_LOST_CONTACT_EVT:
+			state = SELF_DOWN_PEER_LEAVING;
+			break;
+		case PEER_LOST_CONTACT_EVT:
+			state = SELF_LEAVING_PEER_DOWN;
+			break;
+		case NODE_SYNCH_BEGIN_EVT:
+			state = NODE_SYNCHING;
+			break;
+		case NODE_FAILOVER_BEGIN_EVT:
+			state = NODE_FAILINGOVER;
+			break;
+		case SELF_ESTABL_CONTACT_EVT:
+		case PEER_ESTABL_CONTACT_EVT:
+		case NODE_SYNCH_END_EVT:
+		case NODE_FAILOVER_END_EVT:
+			break;
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case SELF_DOWN_PEER_LEAVING:
+		switch (evt) {
+		case PEER_LOST_CONTACT_EVT:
+			state = SELF_DOWN_PEER_DOWN;
+			break;
+		case SELF_ESTABL_CONTACT_EVT:
+		case PEER_ESTABL_CONTACT_EVT:
+		case SELF_LOST_CONTACT_EVT:
+			break;
+		case NODE_SYNCH_END_EVT:
+		case NODE_SYNCH_BEGIN_EVT:
+		case NODE_FAILOVER_BEGIN_EVT:
+		case NODE_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case SELF_UP_PEER_COMING:
+		switch (evt) {
+		case PEER_ESTABL_CONTACT_EVT:
+			state = SELF_UP_PEER_UP;
+			break;
+		case SELF_LOST_CONTACT_EVT:
+			state = SELF_DOWN_PEER_LEAVING;
+			break;
+		case SELF_ESTABL_CONTACT_EVT:
+		case PEER_LOST_CONTACT_EVT:
+			break;
+		case NODE_SYNCH_END_EVT:
+		case NODE_SYNCH_BEGIN_EVT:
+		case NODE_FAILOVER_BEGIN_EVT:
+		case NODE_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case SELF_COMING_PEER_UP:
+		switch (evt) {
+		case SELF_ESTABL_CONTACT_EVT:
+			state = SELF_UP_PEER_UP;
+			break;
+		case PEER_LOST_CONTACT_EVT:
+			state = SELF_LEAVING_PEER_DOWN;
+			break;
+		case SELF_LOST_CONTACT_EVT:
+		case PEER_ESTABL_CONTACT_EVT:
+			break;
+		case NODE_SYNCH_END_EVT:
+		case NODE_SYNCH_BEGIN_EVT:
+		case NODE_FAILOVER_BEGIN_EVT:
+		case NODE_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case SELF_LEAVING_PEER_DOWN:
+		switch (evt) {
+		case SELF_LOST_CONTACT_EVT:
+			state = SELF_DOWN_PEER_DOWN;
+			break;
+		case SELF_ESTABL_CONTACT_EVT:
+		case PEER_ESTABL_CONTACT_EVT:
+		case PEER_LOST_CONTACT_EVT:
+			break;
+		case NODE_SYNCH_END_EVT:
+		case NODE_SYNCH_BEGIN_EVT:
+		case NODE_FAILOVER_BEGIN_EVT:
+		case NODE_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case NODE_FAILINGOVER:
+		switch (evt) {
+		case SELF_LOST_CONTACT_EVT:
+			state = SELF_DOWN_PEER_LEAVING;
+			break;
+		case PEER_LOST_CONTACT_EVT:
+			state = SELF_LEAVING_PEER_DOWN;
+			break;
+		case NODE_FAILOVER_END_EVT:
+			state = SELF_UP_PEER_UP;
+			break;
+		case NODE_FAILOVER_BEGIN_EVT:
+		case SELF_ESTABL_CONTACT_EVT:
+		case PEER_ESTABL_CONTACT_EVT:
+			break;
+		case NODE_SYNCH_BEGIN_EVT:
+		case NODE_SYNCH_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	case NODE_SYNCHING:
+		switch (evt) {
+		case SELF_LOST_CONTACT_EVT:
+			state = SELF_DOWN_PEER_LEAVING;
+			break;
+		case PEER_LOST_CONTACT_EVT:
+			state = SELF_LEAVING_PEER_DOWN;
+			break;
+		case NODE_SYNCH_END_EVT:
+			state = SELF_UP_PEER_UP;
+			break;
+		case NODE_FAILOVER_BEGIN_EVT:
+			state = NODE_FAILINGOVER;
+			break;
+		case NODE_SYNCH_BEGIN_EVT:
+		case SELF_ESTABL_CONTACT_EVT:
+		case PEER_ESTABL_CONTACT_EVT:
+			break;
+		case NODE_FAILOVER_END_EVT:
+		default:
+			goto illegal_evt;
+		}
+		break;
+	default:
+		pr_err("Unknown node fsm state %x\n", state);
+		break;
+	}
+	n->state = state;
+	return;
+
+illegal_evt:
+	pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
+}
+
+bool tipc_node_filter_pkt(struct tipc_node *n, struct tipc_msg *hdr)
+{
+	int state = n->state;
+
+	if (likely(state == SELF_UP_PEER_UP))
+		return true;
+
+	if (state == SELF_LEAVING_PEER_DOWN)
+		return false;
+
+	if (state == SELF_DOWN_PEER_LEAVING) {
+		if (msg_peer_node_is_up(hdr))
+			return false;
+	}
+
+	return true;
+}
+
 static void node_established_contact(struct tipc_node *n_ptr)
 {
+	tipc_node_fsm_evt(n_ptr, SELF_ESTABL_CONTACT_EVT);
 	n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
 	n_ptr->bclink.oos_state = 0;
 	n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
 	tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
 }
 
-static void node_lost_contact(struct tipc_node *n_ptr)
+static void node_lost_contact(struct tipc_node *n_ptr,
+			      struct sk_buff_head *inputq)
 {
 	char addr_string[16];
 	struct tipc_sock_conn *conn, *safe;
+	struct tipc_link *l;
 	struct list_head *conns = &n_ptr->conn_sks;
 	struct sk_buff *skb;
 	struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
@@ -396,21 +844,13 @@
 
 	/* Abort any ongoing link failover */
 	for (i = 0; i < MAX_BEARERS; i++) {
-		struct tipc_link *l_ptr = n_ptr->links[i];
-		if (!l_ptr)
-			continue;
-		l_ptr->flags &= ~LINK_FAILINGOVER;
-		l_ptr->failover_checkpt = 0;
-		l_ptr->failover_pkts = 0;
-		kfree_skb(l_ptr->failover_skb);
-		l_ptr->failover_skb = NULL;
-		tipc_link_reset_fragments(l_ptr);
+		l = n_ptr->links[i].link;
+		if (l)
+			tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
 	}
 
-	n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
-
 	/* Prevent re-contact with node until cleanup is done */
-	n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN;
+	tipc_node_fsm_evt(n_ptr, SELF_LOST_CONTACT_EVT);
 
 	/* Notify publications from this node */
 	n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
@@ -421,10 +861,8 @@
 				      SHORT_H_SIZE, 0, tn->own_addr,
 				      conn->peer_node, conn->port,
 				      conn->peer_port, TIPC_ERR_NO_NODE);
-		if (likely(skb)) {
-			skb_queue_tail(n_ptr->inputq, skb);
-			n_ptr->action_flags |= TIPC_MSG_EVT;
-		}
+		if (likely(skb))
+			skb_queue_tail(inputq, skb);
 		list_del(&conn->list);
 		kfree(conn);
 	}
@@ -453,7 +891,7 @@
 		goto exit;
 
 	tipc_node_lock(node);
-	link = node->links[bearer_id];
+	link = node->links[bearer_id].link;
 	if (link) {
 		strncpy(linkname, link->name, len);
 		err = 0;
@@ -471,27 +909,20 @@
 	u32 flags = node->action_flags;
 	u32 link_id = 0;
 	struct list_head *publ_list;
-	struct sk_buff_head *inputq = node->inputq;
-	struct sk_buff_head *namedq;
 
-	if (likely(!flags || (flags == TIPC_MSG_EVT))) {
-		node->action_flags = 0;
+	if (likely(!flags)) {
 		spin_unlock_bh(&node->lock);
-		if (flags == TIPC_MSG_EVT)
-			tipc_sk_rcv(net, inputq);
 		return;
 	}
 
 	addr = node->addr;
 	link_id = node->link_id;
-	namedq = node->namedq;
 	publ_list = &node->publ_list;
 
-	node->action_flags &= ~(TIPC_MSG_EVT |
-				TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
+	node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
 				TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
 				TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
-				TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
+				TIPC_BCAST_RESET);
 
 	spin_unlock_bh(&node->lock);
 
@@ -512,17 +943,11 @@
 		tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
 				      link_id, addr);
 
-	if (flags & TIPC_MSG_EVT)
-		tipc_sk_rcv(net, inputq);
-
-	if (flags & TIPC_NAMED_MSG_EVT)
-		tipc_named_rcv(net, namedq);
-
 	if (flags & TIPC_BCAST_MSG_EVT)
 		tipc_bclink_input(net);
 
 	if (flags & TIPC_BCAST_RESET)
-		tipc_link_reset_all(node);
+		tipc_node_reset_links(node);
 }
 
 /* Caller should hold node lock for the passed node */
@@ -559,6 +984,290 @@
 	return -EMSGSIZE;
 }
 
+static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
+					       int *bearer_id,
+					       struct tipc_media_addr **maddr)
+{
+	int id = n->active_links[sel & 1];
+
+	if (unlikely(id < 0))
+		return NULL;
+
+	*bearer_id = id;
+	*maddr = &n->links[id].maddr;
+	return n->links[id].link;
+}
+
+/**
+ * tipc_node_xmit() is the general link level function for message sending
+ * @net: the applicable net namespace
+ * @list: chain of buffers containing message
+ * @dnode: address of destination node
+ * @selector: a number used for deterministic link selection
+ * Consumes the buffer chain, except when returning -ELINKCONG
+ * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
+ */
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
+		   u32 dnode, int selector)
+{
+	struct tipc_link *l = NULL;
+	struct tipc_node *n;
+	struct sk_buff_head xmitq;
+	struct tipc_media_addr *maddr;
+	int bearer_id;
+	int rc = -EHOSTUNREACH;
+
+	__skb_queue_head_init(&xmitq);
+	n = tipc_node_find(net, dnode);
+	if (likely(n)) {
+		tipc_node_lock(n);
+		l = tipc_node_select_link(n, selector, &bearer_id, &maddr);
+		if (likely(l))
+			rc = tipc_link_xmit(l, list, &xmitq);
+		tipc_node_unlock(n);
+		if (unlikely(rc == -ENOBUFS))
+			tipc_node_link_down(n, bearer_id, false);
+		tipc_node_put(n);
+	}
+	if (likely(!rc)) {
+		tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
+		return 0;
+	}
+	if (likely(in_own_node(net, dnode))) {
+		tipc_sk_rcv(net, list);
+		return 0;
+	}
+	return rc;
+}
+
+/* tipc_node_xmit_skb(): send single buffer to destination
+ * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
+ * messages, which will not be rejected
+ * The only exception is datagram messages rerouted after secondary
+ * lookup, which are rare and safe to dispose of anyway.
+ * TODO: Return real return value, and let callers use
+ * tipc_wait_for_sendpkt() where applicable
+ */
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
+		       u32 selector)
+{
+	struct sk_buff_head head;
+	int rc;
+
+	skb_queue_head_init(&head);
+	__skb_queue_tail(&head, skb);
+	rc = tipc_node_xmit(net, &head, dnode, selector);
+	if (rc == -ELINKCONG)
+		kfree_skb(skb);
+	return 0;
+}
+
+/**
+ * tipc_node_check_state - check and if necessary update node state
+ * @skb: TIPC packet
+ * @bearer_id: identity of bearer delivering the packet
+ * Returns true if state is ok, otherwise consumes buffer and returns false
+ */
+static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
+				  int bearer_id, struct sk_buff_head *xmitq)
+{
+	struct tipc_msg *hdr = buf_msg(skb);
+	int usr = msg_user(hdr);
+	int mtyp = msg_type(hdr);
+	u16 oseqno = msg_seqno(hdr);
+	u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
+	u16 exp_pkts = msg_msgcnt(hdr);
+	u16 rcv_nxt, syncpt, dlv_nxt;
+	int state = n->state;
+	struct tipc_link *l, *tnl, *pl = NULL;
+	struct tipc_media_addr *maddr;
+	int i, pb_id;
+
+	l = n->links[bearer_id].link;
+	if (!l)
+		return false;
+	rcv_nxt = l->rcv_nxt;
+
+
+	if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
+		return true;
+
+	/* Find parallel link, if any */
+	for (i = 0; i < MAX_BEARERS; i++) {
+		if ((i != bearer_id) && n->links[i].link) {
+			pl = n->links[i].link;
+			break;
+		}
+	}
+
+	/* Update node accesibility if applicable */
+	if (state == SELF_UP_PEER_COMING) {
+		if (!tipc_link_is_up(l))
+			return true;
+		if (!msg_peer_link_is_up(hdr))
+			return true;
+		tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
+	}
+
+	if (state == SELF_DOWN_PEER_LEAVING) {
+		if (msg_peer_node_is_up(hdr))
+			return false;
+		tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
+	}
+
+	/* Ignore duplicate packets */
+	if (less(oseqno, rcv_nxt))
+		return true;
+
+	/* Initiate or update failover mode if applicable */
+	if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
+		syncpt = oseqno + exp_pkts - 1;
+		if (pl && tipc_link_is_up(pl)) {
+			pb_id = pl->bearer_id;
+			__tipc_node_link_down(n, &pb_id, xmitq, &maddr);
+			tipc_skb_queue_splice_tail_init(pl->inputq, l->inputq);
+		}
+		/* If pkts arrive out of order, use lowest calculated syncpt */
+		if (less(syncpt, n->sync_point))
+			n->sync_point = syncpt;
+	}
+
+	/* Open parallel link when tunnel link reaches synch point */
+	if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
+		if (!more(rcv_nxt, n->sync_point))
+			return true;
+		tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
+		if (pl)
+			tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
+		return true;
+	}
+
+	/* No synching needed if only one link */
+	if (!pl || !tipc_link_is_up(pl))
+		return true;
+
+	/* Initiate or update synch mode if applicable */
+	if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) {
+		syncpt = iseqno + exp_pkts - 1;
+		if (!tipc_link_is_up(l)) {
+			tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+			__tipc_node_link_up(n, bearer_id, xmitq);
+		}
+		if (n->state == SELF_UP_PEER_UP) {
+			n->sync_point = syncpt;
+			tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
+			tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
+		}
+		if (less(syncpt, n->sync_point))
+			n->sync_point = syncpt;
+	}
+
+	/* Open tunnel link when parallel link reaches synch point */
+	if ((n->state == NODE_SYNCHING) && tipc_link_is_synching(l)) {
+		if (tipc_link_is_synching(l)) {
+			tnl = l;
+		} else {
+			tnl = pl;
+			pl = l;
+		}
+		dlv_nxt = pl->rcv_nxt - mod(skb_queue_len(pl->inputq));
+		if (more(dlv_nxt, n->sync_point)) {
+			tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+			tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+			return true;
+		}
+		if (l == pl)
+			return true;
+		if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
+			return true;
+		if (usr == LINK_PROTOCOL)
+			return true;
+		return false;
+	}
+	return true;
+}
+
+/**
+ * tipc_rcv - process TIPC packets/messages arriving from off-node
+ * @net: the applicable net namespace
+ * @skb: TIPC packet
+ * @bearer: pointer to bearer message arrived on
+ *
+ * Invoked with no locks held. Bearer pointer must point to a valid bearer
+ * structure (i.e. cannot be NULL), but bearer can be inactive.
+ */
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
+{
+	struct sk_buff_head xmitq;
+	struct tipc_node *n;
+	struct tipc_msg *hdr = buf_msg(skb);
+	int usr = msg_user(hdr);
+	int bearer_id = b->identity;
+	struct tipc_link_entry *le;
+	int rc = 0;
+
+	__skb_queue_head_init(&xmitq);
+
+	/* Ensure message is well-formed */
+	if (unlikely(!tipc_msg_validate(skb)))
+		goto discard;
+
+	/* Handle arrival of a non-unicast link packet */
+	if (unlikely(msg_non_seq(hdr))) {
+		if (usr ==  LINK_CONFIG)
+			tipc_disc_rcv(net, skb, b);
+		else
+			tipc_bclink_rcv(net, skb);
+		return;
+	}
+
+	/* Locate neighboring node that sent packet */
+	n = tipc_node_find(net, msg_prevnode(hdr));
+	if (unlikely(!n))
+		goto discard;
+	le = &n->links[bearer_id];
+
+	tipc_node_lock(n);
+
+	/* Is reception permitted at the moment ? */
+	if (!tipc_node_filter_pkt(n, hdr))
+		goto unlock;
+
+	if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
+		tipc_bclink_sync_state(n, hdr);
+
+	/* Release acked broadcast packets */
+	if (unlikely(n->bclink.acked != msg_bcast_ack(hdr)))
+		tipc_bclink_acknowledge(n, msg_bcast_ack(hdr));
+
+	/* Check and if necessary update node state */
+	if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) {
+		rc = tipc_link_rcv(le->link, skb, &xmitq);
+		skb = NULL;
+	}
+unlock:
+	tipc_node_unlock(n);
+
+	if (unlikely(rc & TIPC_LINK_UP_EVT))
+		tipc_node_link_up(n, bearer_id, &xmitq);
+
+	if (unlikely(rc & TIPC_LINK_DOWN_EVT))
+		tipc_node_link_down(n, bearer_id, false);
+
+	if (unlikely(!skb_queue_empty(&n->bclink.namedq)))
+		tipc_named_rcv(net, &n->bclink.namedq);
+
+	if (!skb_queue_empty(&le->inputq))
+		tipc_sk_rcv(net, &le->inputq);
+
+	if (!skb_queue_empty(&xmitq))
+		tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+
+	tipc_node_put(n);
+discard:
+	kfree_skb(skb);
+}
+
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
 	int err;
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 5a834cf..344b3e7 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -45,23 +45,19 @@
 /* Out-of-range value for node signature */
 #define INVALID_NODE_SIG	0x10000
 
+#define INVALID_BEARER_ID -1
+
 /* Flags used to take different actions according to flag type
- * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
- * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
  * TIPC_NOTIFY_NODE_DOWN: notify node is down
  * TIPC_NOTIFY_NODE_UP: notify node is up
  * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
  */
 enum {
-	TIPC_MSG_EVT                    = 1,
-	TIPC_WAIT_PEER_LINKS_DOWN	= (1 << 1),
-	TIPC_WAIT_OWN_LINKS_DOWN	= (1 << 2),
 	TIPC_NOTIFY_NODE_DOWN		= (1 << 3),
 	TIPC_NOTIFY_NODE_UP		= (1 << 4),
 	TIPC_WAKEUP_BCAST_USERS		= (1 << 5),
 	TIPC_NOTIFY_LINK_UP		= (1 << 6),
 	TIPC_NOTIFY_LINK_DOWN		= (1 << 7),
-	TIPC_NAMED_MSG_EVT		= (1 << 8),
 	TIPC_BCAST_MSG_EVT		= (1 << 9),
 	TIPC_BCAST_RESET		= (1 << 10)
 };
@@ -85,10 +81,17 @@
 	u32 deferred_size;
 	struct sk_buff_head deferdq;
 	struct sk_buff *reasm_buf;
-	int inputq_map;
+	struct sk_buff_head namedq;
 	bool recv_permitted;
 };
 
+struct tipc_link_entry {
+	struct tipc_link *link;
+	u32 mtu;
+	struct sk_buff_head inputq;
+	struct tipc_media_addr maddr;
+};
+
 /**
  * struct tipc_node - TIPC node structure
  * @addr: network address of node
@@ -98,11 +101,12 @@
  * @hash: links to adjacent nodes in unsorted hash chain
  * @inputq: pointer to input queue containing messages for msg event
  * @namedq: pointer to name table input queue with name table messages
- * @curr_link: the link holding the node lock, if any
- * @active_links: pointers to active links to node
- * @links: pointers to all links to node
+ * @active_links: bearer ids of active links, used as index into links[] array
+ * @links: array containing references to all links to node
  * @action_flags: bit mask of different types of node actions
  * @bclink: broadcast-related info
+ * @state: connectivity state vs peer node
+ * @sync_point: sequence number where synch/failover is finished
  * @list: links to adjacent nodes in sorted list of cluster's nodes
  * @working_links: number of working links to node (both active and standby)
  * @link_cnt: number of links to node
@@ -118,14 +122,13 @@
 	spinlock_t lock;
 	struct net *net;
 	struct hlist_node hash;
-	struct sk_buff_head *inputq;
-	struct sk_buff_head *namedq;
-	struct tipc_link *active_links[2];
-	u32 act_mtus[2];
-	struct tipc_link *links[MAX_BEARERS];
+	int active_links[2];
+	struct tipc_link_entry links[MAX_BEARERS];
 	int action_flags;
 	struct tipc_node_bclink bclink;
 	struct list_head list;
+	int state;
+	u16 sync_point;
 	int link_cnt;
 	u16 working_links;
 	u16 capabilities;
@@ -133,25 +136,32 @@
 	u32 link_id;
 	struct list_head publ_list;
 	struct list_head conn_sks;
+	unsigned long keepalive_intv;
+	struct timer_list timer;
 	struct rcu_head rcu;
 };
 
 struct tipc_node *tipc_node_find(struct net *net, u32 addr);
 void tipc_node_put(struct tipc_node *node);
-struct tipc_node *tipc_node_create(struct net *net, u32 addr);
 void tipc_node_stop(struct net *net);
+void tipc_node_check_dest(struct net *net, u32 onode,
+			  struct tipc_bearer *bearer,
+			  u16 capabilities, u32 signature,
+			  struct tipc_media_addr *maddr,
+			  bool *respond, bool *dupl_addr);
+void tipc_node_delete_links(struct net *net, int bearer_id);
 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-int tipc_node_active_links(struct tipc_node *n_ptr);
-int tipc_node_is_up(struct tipc_node *n_ptr);
+bool tipc_node_is_up(struct tipc_node *n);
 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
 			   char *linkname, size_t len);
 void tipc_node_unlock(struct tipc_node *node);
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
+		   int selector);
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
+		       u32 selector);
 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
-
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
 static inline void tipc_node_lock(struct tipc_node *node)
@@ -159,26 +169,30 @@
 	spin_lock_bh(&node->lock);
 }
 
-static inline bool tipc_node_blocked(struct tipc_node *node)
+static inline struct tipc_link *node_active_link(struct tipc_node *n, int sel)
 {
-	return (node->action_flags & (TIPC_WAIT_PEER_LINKS_DOWN |
-		TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
+	int bearer_id = n->active_links[sel & 1];
+
+	if (unlikely(bearer_id == INVALID_BEARER_ID))
+		return NULL;
+
+	return n->links[bearer_id].link;
 }
 
-static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector)
+static inline unsigned int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
 {
-	struct tipc_node *node;
-	u32 mtu;
+	struct tipc_node *n;
+	int bearer_id;
+	unsigned int mtu = MAX_MSG_SIZE;
 
-	node = tipc_node_find(net, addr);
+	n = tipc_node_find(net, addr);
+	if (unlikely(!n))
+		return mtu;
 
-	if (likely(node)) {
-		mtu = node->act_mtus[selector & 1];
-		tipc_node_put(node);
-	} else {
-		mtu = MAX_MSG_SIZE;
-	}
-
+	bearer_id = n->active_links[sel & 1];
+	if (likely(bearer_id != INVALID_BEARER_ID))
+		mtu = n->links[bearer_id].mtu;
+	tipc_node_put(n);
 	return mtu;
 }
 
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3a7567f..1060d52 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -248,6 +248,22 @@
 	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
 }
 
+/* tipc_sk_respond() : send response message back to sender
+ */
+static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
+{
+	u32 selector;
+	u32 dnode;
+	u32 onode = tipc_own_addr(sock_net(sk));
+
+	if (!tipc_msg_reverse(onode, &skb, err))
+		return;
+
+	dnode = msg_destnode(buf_msg(skb));
+	selector = msg_origport(buf_msg(skb));
+	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
+}
+
 /**
  * tsk_rej_rx_queue - reject all buffers in socket receive queue
  *
@@ -256,13 +272,9 @@
 static void tsk_rej_rx_queue(struct sock *sk)
 {
 	struct sk_buff *skb;
-	u32 dnode;
-	u32 own_node = tsk_own_node(tipc_sk(sk));
 
-	while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
-		if (tipc_msg_reverse(own_node, skb, &dnode, TIPC_ERR_NO_PORT))
-			tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0);
-	}
+	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
+		tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
 }
 
 /* tsk_peer_msg - verify if message was sent by connected port's peer
@@ -441,9 +453,7 @@
 				tsk->connected = 0;
 				tipc_node_remove_conn(net, dnode, tsk->portid);
 			}
-			if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
-					     TIPC_ERR_NO_PORT))
-				tipc_link_xmit_skb(net, skb, dnode, 0);
+			tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
 		}
 	}
 
@@ -456,7 +466,7 @@
 				      tsk_own_node(tsk), tsk_peer_port(tsk),
 				      tsk->portid, TIPC_ERR_NO_PORT);
 		if (skb)
-			tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
 		tipc_node_remove_conn(net, dnode, tsk->portid);
 	}
 
@@ -686,21 +696,22 @@
 
 	do {
 		rc = tipc_bclink_xmit(net, pktchain);
-		if (likely(rc >= 0)) {
-			rc = dsz;
-			break;
+		if (likely(!rc))
+			return dsz;
+
+		if (rc == -ELINKCONG) {
+			tsk->link_cong = 1;
+			rc = tipc_wait_for_sndmsg(sock, &timeo);
+			if (!rc)
+				continue;
 		}
+		__skb_queue_purge(pktchain);
 		if (rc == -EMSGSIZE) {
 			msg->msg_iter = save;
 			goto new_mtu;
 		}
-		if (rc != -ELINKCONG)
-			break;
-		tipc_sk(sk)->link_cong = 1;
-		rc = tipc_wait_for_sndmsg(sock, &timeo);
-		if (rc)
-			__skb_queue_purge(pktchain);
-	} while (!rc);
+		break;
+	} while (1);
 	return rc;
 }
 
@@ -763,35 +774,35 @@
 /**
  * tipc_sk_proto_rcv - receive a connection mng protocol message
  * @tsk: receiving socket
- * @skb: pointer to message buffer. Set to NULL if buffer is consumed.
+ * @skb: pointer to message buffer.
  */
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff **skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
 {
-	struct tipc_msg *msg = buf_msg(*skb);
+	struct sock *sk = &tsk->sk;
+	struct tipc_msg *hdr = buf_msg(skb);
+	int mtyp = msg_type(hdr);
 	int conn_cong;
-	u32 dnode;
-	u32 own_node = tsk_own_node(tsk);
+
 	/* Ignore if connection cannot be validated: */
-	if (!tsk_peer_msg(tsk, msg))
+	if (!tsk_peer_msg(tsk, hdr))
 		goto exit;
 
 	tsk->probing_state = TIPC_CONN_OK;
 
-	if (msg_type(msg) == CONN_ACK) {
+	if (mtyp == CONN_PROBE) {
+		msg_set_type(hdr, CONN_PROBE_REPLY);
+		tipc_sk_respond(sk, skb, TIPC_OK);
+		return;
+	} else if (mtyp == CONN_ACK) {
 		conn_cong = tsk_conn_cong(tsk);
-		tsk->sent_unacked -= msg_msgcnt(msg);
+		tsk->sent_unacked -= msg_msgcnt(hdr);
 		if (conn_cong)
-			tsk->sk.sk_write_space(&tsk->sk);
-	} else if (msg_type(msg) == CONN_PROBE) {
-		if (tipc_msg_reverse(own_node, *skb, &dnode, TIPC_OK)) {
-			msg_set_type(msg, CONN_PROBE_REPLY);
-			return;
-		}
+			sk->sk_write_space(sk);
+	} else if (mtyp != CONN_PROBE_REPLY) {
+		pr_warn("Received unknown CONN_PROTO msg\n");
 	}
-	/* Do nothing if msg_type() == CONN_PROBE_REPLY */
 exit:
-	kfree_skb(*skb);
-	*skb = NULL;
+	kfree_skb(skb);
 }
 
 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
@@ -924,24 +935,25 @@
 	do {
 		skb = skb_peek(pktchain);
 		TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
-		rc = tipc_link_xmit(net, pktchain, dnode, tsk->portid);
-		if (likely(rc >= 0)) {
+		rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid);
+		if (likely(!rc)) {
 			if (sock->state != SS_READY)
 				sock->state = SS_CONNECTING;
-			rc = dsz;
-			break;
+			return dsz;
 		}
+		if (rc == -ELINKCONG) {
+			tsk->link_cong = 1;
+			rc = tipc_wait_for_sndmsg(sock, &timeo);
+			if (!rc)
+				continue;
+		}
+		__skb_queue_purge(pktchain);
 		if (rc == -EMSGSIZE) {
 			m->msg_iter = save;
 			goto new_mtu;
 		}
-		if (rc != -ELINKCONG)
-			break;
-		tsk->link_cong = 1;
-		rc = tipc_wait_for_sndmsg(sock, &timeo);
-		if (rc)
-			__skb_queue_purge(pktchain);
-	} while (!rc);
+		break;
+	} while (1);
 
 	return rc;
 }
@@ -1043,15 +1055,16 @@
 		return rc;
 	do {
 		if (likely(!tsk_conn_cong(tsk))) {
-			rc = tipc_link_xmit(net, pktchain, dnode, portid);
+			rc = tipc_node_xmit(net, pktchain, dnode, portid);
 			if (likely(!rc)) {
 				tsk->sent_unacked++;
 				sent += send;
 				if (sent == dsz)
-					break;
+					return dsz;
 				goto next;
 			}
 			if (rc == -EMSGSIZE) {
+				__skb_queue_purge(pktchain);
 				tsk->max_pkt = tipc_node_get_mtu(net, dnode,
 								 portid);
 				m->msg_iter = save;
@@ -1059,13 +1072,13 @@
 			}
 			if (rc != -ELINKCONG)
 				break;
+
 			tsk->link_cong = 1;
 		}
 		rc = tipc_wait_for_sndpkt(sock, &timeo);
-		if (rc)
-			__skb_queue_purge(pktchain);
 	} while (!rc);
 
+	__skb_queue_purge(pktchain);
 	return sent ? sent : rc;
 }
 
@@ -1221,7 +1234,7 @@
 		return;
 	msg = buf_msg(skb);
 	msg_set_msgcnt(msg, ack);
-	tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg));
+	tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
 }
 
 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1507,82 +1520,81 @@
  * @tsk: TIPC socket
  * @skb: pointer to message buffer. Set to NULL if buffer is consumed
  *
- * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
+ * Returns true if everything ok, false otherwise
  */
-static int filter_connect(struct tipc_sock *tsk, struct sk_buff **skb)
+static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
 {
 	struct sock *sk = &tsk->sk;
 	struct net *net = sock_net(sk);
 	struct socket *sock = sk->sk_socket;
-	struct tipc_msg *msg = buf_msg(*skb);
-	int retval = -TIPC_ERR_NO_PORT;
+	struct tipc_msg *hdr = buf_msg(skb);
 
-	if (msg_mcast(msg))
-		return retval;
+	if (unlikely(msg_mcast(hdr)))
+		return false;
 
 	switch ((int)sock->state) {
 	case SS_CONNECTED:
+
 		/* Accept only connection-based messages sent by peer */
-		if (tsk_peer_msg(tsk, msg)) {
-			if (unlikely(msg_errcode(msg))) {
-				sock->state = SS_DISCONNECTING;
-				tsk->connected = 0;
-				/* let timer expire on it's own */
-				tipc_node_remove_conn(net, tsk_peer_node(tsk),
-						      tsk->portid);
-			}
-			retval = TIPC_OK;
+		if (unlikely(!tsk_peer_msg(tsk, hdr)))
+			return false;
+
+		if (unlikely(msg_errcode(hdr))) {
+			sock->state = SS_DISCONNECTING;
+			tsk->connected = 0;
+			/* Let timer expire on it's own */
+			tipc_node_remove_conn(net, tsk_peer_node(tsk),
+					      tsk->portid);
 		}
-		break;
+		return true;
+
 	case SS_CONNECTING:
+
 		/* Accept only ACK or NACK message */
+		if (unlikely(!msg_connected(hdr)))
+			return false;
 
-		if (unlikely(!msg_connected(msg)))
-			break;
-
-		if (unlikely(msg_errcode(msg))) {
+		if (unlikely(msg_errcode(hdr))) {
 			sock->state = SS_DISCONNECTING;
 			sk->sk_err = ECONNREFUSED;
-			retval = TIPC_OK;
-			break;
+			return true;
 		}
 
-		if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) {
+		if (unlikely(!msg_isdata(hdr))) {
 			sock->state = SS_DISCONNECTING;
 			sk->sk_err = EINVAL;
-			retval = TIPC_OK;
-			break;
+			return true;
 		}
 
-		tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg));
-		msg_set_importance(&tsk->phdr, msg_importance(msg));
+		tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
+		msg_set_importance(&tsk->phdr, msg_importance(hdr));
 		sock->state = SS_CONNECTED;
 
-		/* If an incoming message is an 'ACK-', it should be
-		 * discarded here because it doesn't contain useful
-		 * data. In addition, we should try to wake up
-		 * connect() routine if sleeping.
-		 */
-		if (msg_data_sz(msg) == 0) {
-			kfree_skb(*skb);
-			*skb = NULL;
-			if (waitqueue_active(sk_sleep(sk)))
-				wake_up_interruptible(sk_sleep(sk));
-		}
-		retval = TIPC_OK;
-		break;
+		/* If 'ACK+' message, add to socket receive queue */
+		if (msg_data_sz(hdr))
+			return true;
+
+		/* If empty 'ACK-' message, wake up sleeping connect() */
+		if (waitqueue_active(sk_sleep(sk)))
+			wake_up_interruptible(sk_sleep(sk));
+
+		/* 'ACK-' message is neither accepted nor rejected: */
+		msg_set_dest_droppable(hdr, 1);
+		return false;
+
 	case SS_LISTENING:
 	case SS_UNCONNECTED:
+
 		/* Accept only SYN message */
-		if (!msg_connected(msg) && !(msg_errcode(msg)))
-			retval = TIPC_OK;
+		if (!msg_connected(hdr) && !(msg_errcode(hdr)))
+			return true;
 		break;
 	case SS_DISCONNECTING:
 		break;
 	default:
 		pr_err("Unknown socket state %u\n", sock->state);
 	}
-	return retval;
+	return false;
 }
 
 /**
@@ -1617,61 +1629,70 @@
 /**
  * filter_rcv - validate incoming message
  * @sk: socket
- * @skb: pointer to message. Set to NULL if buffer is consumed.
+ * @skb: pointer to message.
  *
  * Enqueues message on receive queue if acceptable; optionally handles
  * disconnect indication for a connected socket.
  *
  * Called with socket lock already taken
  *
- * Returns 0 (TIPC_OK) if message was ok, -TIPC error code if rejected
+ * Returns true if message was added to socket receive queue, otherwise false
  */
-static int filter_rcv(struct sock *sk, struct sk_buff **skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
 {
 	struct socket *sock = sk->sk_socket;
 	struct tipc_sock *tsk = tipc_sk(sk);
-	struct tipc_msg *msg = buf_msg(*skb);
-	unsigned int limit = rcvbuf_limit(sk, *skb);
-	int rc = TIPC_OK;
+	struct tipc_msg *hdr = buf_msg(skb);
+	unsigned int limit = rcvbuf_limit(sk, skb);
+	int err = TIPC_OK;
+	int usr = msg_user(hdr);
 
-	if (unlikely(msg_user(msg) == CONN_MANAGER)) {
+	if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
 		tipc_sk_proto_rcv(tsk, skb);
-		return TIPC_OK;
+		return false;
 	}
 
-	if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
-		kfree_skb(*skb);
+	if (unlikely(usr == SOCK_WAKEUP)) {
+		kfree_skb(skb);
 		tsk->link_cong = 0;
 		sk->sk_write_space(sk);
-		*skb = NULL;
-		return TIPC_OK;
+		return false;
 	}
 
-	/* Reject message if it is wrong sort of message for socket */
-	if (msg_type(msg) > TIPC_DIRECT_MSG)
-		return -TIPC_ERR_NO_PORT;
+	/* Drop if illegal message type */
+	if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
+		kfree_skb(skb);
+		return false;
+	}
 
-	if (sock->state == SS_READY) {
-		if (msg_connected(msg))
-			return -TIPC_ERR_NO_PORT;
-	} else {
-		rc = filter_connect(tsk, skb);
-		if (rc != TIPC_OK || !*skb)
-			return rc;
+	/* Reject if wrong message type for current socket state */
+	if (unlikely(sock->state == SS_READY)) {
+		if (msg_connected(hdr)) {
+			err = TIPC_ERR_NO_PORT;
+			goto reject;
+		}
+	} else if (unlikely(!filter_connect(tsk, skb))) {
+		err = TIPC_ERR_NO_PORT;
+		goto reject;
 	}
 
 	/* Reject message if there isn't room to queue it */
-	if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit)
-		return -TIPC_ERR_OVERLOAD;
+	if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
+		err = TIPC_ERR_OVERLOAD;
+		goto reject;
+	}
 
 	/* Enqueue message */
-	TIPC_SKB_CB(*skb)->handle = NULL;
-	__skb_queue_tail(&sk->sk_receive_queue, *skb);
-	skb_set_owner_r(*skb, sk);
+	TIPC_SKB_CB(skb)->handle = NULL;
+	__skb_queue_tail(&sk->sk_receive_queue, skb);
+	skb_set_owner_r(skb, sk);
 
 	sk->sk_data_ready(sk);
-	*skb = NULL;
-	return TIPC_OK;
+	return true;
+
+reject:
+	tipc_sk_respond(sk, skb, err);
+	return false;
 }
 
 /**
@@ -1685,22 +1706,10 @@
  */
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
-	int err;
-	atomic_t *dcnt;
-	u32 dnode;
-	struct tipc_sock *tsk = tipc_sk(sk);
-	struct net *net = sock_net(sk);
-	uint truesize = skb->truesize;
+	unsigned int truesize = skb->truesize;
 
-	err = filter_rcv(sk, &skb);
-	if (likely(!skb)) {
-		dcnt = &tsk->dupl_rcvcnt;
-		if (atomic_read(dcnt) < TIPC_CONN_OVERLOAD_LIMIT)
-			atomic_add(truesize, dcnt);
-		return 0;
-	}
-	if (!err || tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, -err))
-		tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+	if (likely(filter_rcv(sk, skb)))
+		atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
 	return 0;
 }
 
@@ -1710,45 +1719,43 @@
  * @inputq: list of incoming buffers with potentially different destinations
  * @sk: socket where the buffers should be enqueued
  * @dport: port number for the socket
- * @_skb: returned buffer to be forwarded or rejected, if applicable
  *
  * Caller must hold socket lock
- *
- * Returns TIPC_OK if all buffers enqueued, otherwise -TIPC_ERR_OVERLOAD
- * or -TIPC_ERR_NO_PORT
  */
-static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
-			   u32 dport, struct sk_buff **_skb)
+static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
+			    u32 dport)
 {
 	unsigned int lim;
 	atomic_t *dcnt;
-	int err;
 	struct sk_buff *skb;
 	unsigned long time_limit = jiffies + 2;
 
 	while (skb_queue_len(inputq)) {
 		if (unlikely(time_after_eq(jiffies, time_limit)))
-			return TIPC_OK;
+			return;
+
 		skb = tipc_skb_dequeue(inputq, dport);
 		if (unlikely(!skb))
-			return TIPC_OK;
+			return;
+
+		/* Add message directly to receive queue if possible */
 		if (!sock_owned_by_user(sk)) {
-			err = filter_rcv(sk, &skb);
-			if (likely(!skb))
-				continue;
-			*_skb = skb;
-			return err;
+			filter_rcv(sk, skb);
+			continue;
 		}
+
+		/* Try backlog, compensating for double-counted bytes */
 		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
 		if (sk->sk_backlog.len)
 			atomic_set(dcnt, 0);
 		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
 		if (likely(!sk_add_backlog(sk, skb, lim)))
 			continue;
-		*_skb = skb;
-		return -TIPC_ERR_OVERLOAD;
+
+		/* Overload => reject message back to sender */
+		tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+		break;
 	}
-	return TIPC_OK;
 }
 
 /**
@@ -1756,49 +1763,46 @@
  * @inputq: buffer list containing the buffers
  * Consumes all buffers in list until inputq is empty
  * Note: may be called in multiple threads referring to the same queue
- * Returns 0 if last buffer was accepted, otherwise -EHOSTUNREACH
- * Only node local calls check the return value, sending single-buffer queues
  */
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
 {
 	u32 dnode, dport = 0;
 	int err;
-	struct sk_buff *skb;
 	struct tipc_sock *tsk;
-	struct tipc_net *tn;
 	struct sock *sk;
+	struct sk_buff *skb;
 
 	while (skb_queue_len(inputq)) {
-		err = -TIPC_ERR_NO_PORT;
-		skb = NULL;
 		dport = tipc_skb_peek_port(inputq, dport);
 		tsk = tipc_sk_lookup(net, dport);
+
 		if (likely(tsk)) {
 			sk = &tsk->sk;
 			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
-				err = tipc_sk_enqueue(inputq, sk, dport, &skb);
+				tipc_sk_enqueue(inputq, sk, dport);
 				spin_unlock_bh(&sk->sk_lock.slock);
-				dport = 0;
 			}
 			sock_put(sk);
-		} else {
-			skb = tipc_skb_dequeue(inputq, dport);
-		}
-		if (likely(!skb))
 			continue;
-		if (tipc_msg_lookup_dest(net, skb, &dnode, &err))
-			goto xmit;
-		if (!err) {
-			dnode = msg_destnode(buf_msg(skb));
-			goto xmit;
 		}
-		tn = net_generic(net, tipc_net_id);
-		if (!tipc_msg_reverse(tn->own_addr, skb, &dnode, -err))
+
+		/* No destination socket => dequeue skb if still there */
+		skb = tipc_skb_dequeue(inputq, dport);
+		if (!skb)
+			return;
+
+		/* Try secondary lookup if unresolved named message */
+		err = TIPC_ERR_NO_PORT;
+		if (tipc_msg_lookup_dest(net, skb, &err))
+			goto xmit;
+
+		/* Prepare for message rejection */
+		if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
 			continue;
 xmit:
-		tipc_link_xmit_skb(net, skb, dnode, dport);
+		dnode = msg_destnode(buf_msg(skb));
+		tipc_node_xmit_skb(net, skb, dnode, dport);
 	}
-	return err ? -EHOSTUNREACH : 0;
 }
 
 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -2067,7 +2071,10 @@
 	struct net *net = sock_net(sk);
 	struct tipc_sock *tsk = tipc_sk(sk);
 	struct sk_buff *skb;
-	u32 dnode;
+	u32 dnode = tsk_peer_node(tsk);
+	u32 dport = tsk_peer_port(tsk);
+	u32 onode = tipc_own_addr(net);
+	u32 oport = tsk->portid;
 	int res;
 
 	if (how != SHUT_RDWR)
@@ -2080,6 +2087,8 @@
 	case SS_CONNECTED:
 
 restart:
+		dnode = tsk_peer_node(tsk);
+
 		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
 		skb = __skb_dequeue(&sk->sk_receive_queue);
 		if (skb) {
@@ -2087,19 +2096,13 @@
 				kfree_skb(skb);
 				goto restart;
 			}
-			if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
-					     TIPC_CONN_SHUTDOWN))
-				tipc_link_xmit_skb(net, skb, dnode,
-						   tsk->portid);
+			tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN);
 		} else {
-			dnode = tsk_peer_node(tsk);
-
 			skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
 					      TIPC_CONN_MSG, SHORT_H_SIZE,
-					      0, dnode, tsk_own_node(tsk),
-					      tsk_peer_port(tsk),
-					      tsk->portid, TIPC_CONN_SHUTDOWN);
-			tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+					      0, dnode, onode, dport, oport,
+					      TIPC_CONN_SHUTDOWN);
+			tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
 		}
 		tsk->connected = 0;
 		sock->state = SS_DISCONNECTING;
@@ -2161,7 +2164,7 @@
 	}
 	bh_unlock_sock(sk);
 	if (skb)
-		tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
+		tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
 exit:
 	sock_put(sk);
 }
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index bf65513..4241f22 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -44,7 +44,7 @@
 				  SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
 int tipc_socket_init(void);
 void tipc_socket_stop(void);
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
 		       struct sk_buff_head *inputq);
 void tipc_sk_reinit(struct net *net);
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 66deebc..c170d31 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -194,7 +194,8 @@
 			.saddr = src->ipv6,
 			.flowi6_proto = IPPROTO_UDP
 		};
-		err = ipv6_stub->ipv6_dst_lookup(ub->ubsock->sk, &ndst, &fl6);
+		err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst,
+						 &fl6);
 		if (err)
 			goto tx_error;
 		ttl = ip6_dst_hoplimit(ndst);
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 7d73054..477364a 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -135,8 +135,7 @@
  * @state: New state of the RF kill switch. %WIMAX_RF_ON radio on,
  *     %WIMAX_RF_OFF radio off.
  *
- * Reports changes in the software RF switch state to the the WiMAX
- * stack.
+ * Reports changes in the software RF switch state to the WiMAX stack.
  *
  * The main use is during initialization, so the driver can query the
  * device for its current software radio kill switch state and feed it
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 2a0bbd2..3893409 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -407,6 +407,9 @@
 	INIT_LIST_HEAD(&rdev->bss_list);
 	INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
 	INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results);
+	INIT_LIST_HEAD(&rdev->mlme_unreg);
+	spin_lock_init(&rdev->mlme_unreg_lock);
+	INIT_WORK(&rdev->mlme_unreg_wk, cfg80211_mlme_unreg_wk);
 	INIT_DELAYED_WORK(&rdev->dfs_update_channels_wk,
 			  cfg80211_dfs_channels_update_work);
 #ifdef CONFIG_CFG80211_WEXT
@@ -802,6 +805,7 @@
 	cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
 	flush_work(&rdev->destroy_work);
 	flush_work(&rdev->sched_scan_stop_wk);
+	flush_work(&rdev->mlme_unreg_wk);
 
 #ifdef CONFIG_PM
 	if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
@@ -855,6 +859,7 @@
 
 	switch (wdev->iftype) {
 	case NL80211_IFTYPE_P2P_DEVICE:
+		cfg80211_mlme_purge_registrations(wdev);
 		cfg80211_stop_p2p_device(rdev, wdev);
 		break;
 	default:
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 311eef2..b9d5bc8 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -59,6 +59,10 @@
 	struct list_head beacon_registrations;
 	spinlock_t beacon_registrations_lock;
 
+	struct list_head mlme_unreg;
+	spinlock_t mlme_unreg_lock;
+	struct work_struct mlme_unreg_wk;
+
 	/* protected by RTNL only */
 	int num_running_ifaces;
 	int num_running_monitor_ifaces;
@@ -348,6 +352,7 @@
 int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
 				u16 frame_type, const u8 *match_data,
 				int match_len);
+void cfg80211_mlme_unreg_wk(struct work_struct *wk);
 void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
 int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 7aae329..fb44fa3 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -2,6 +2,7 @@
  * cfg80211 MLME SAP interface
  *
  * Copyright (c) 2009, Jouni Malinen <j@w1.fi>
+ * Copyright (c) 2015		Intel Deutschland GmbH
  */
 
 #include <linux/kernel.h>
@@ -389,6 +390,7 @@
 
 struct cfg80211_mgmt_registration {
 	struct list_head list;
+	struct wireless_dev *wdev;
 
 	u32 nlportid;
 
@@ -399,6 +401,46 @@
 	u8 match[];
 };
 
+static void
+cfg80211_process_mlme_unregistrations(struct cfg80211_registered_device *rdev)
+{
+	struct cfg80211_mgmt_registration *reg;
+
+	ASSERT_RTNL();
+
+	spin_lock_bh(&rdev->mlme_unreg_lock);
+	while ((reg = list_first_entry_or_null(&rdev->mlme_unreg,
+					       struct cfg80211_mgmt_registration,
+					       list))) {
+		list_del(&reg->list);
+		spin_unlock_bh(&rdev->mlme_unreg_lock);
+
+		if (rdev->ops->mgmt_frame_register) {
+			u16 frame_type = le16_to_cpu(reg->frame_type);
+
+			rdev_mgmt_frame_register(rdev, reg->wdev,
+						 frame_type, false);
+		}
+
+		kfree(reg);
+
+		spin_lock_bh(&rdev->mlme_unreg_lock);
+	}
+	spin_unlock_bh(&rdev->mlme_unreg_lock);
+}
+
+void cfg80211_mlme_unreg_wk(struct work_struct *wk)
+{
+	struct cfg80211_registered_device *rdev;
+
+	rdev = container_of(wk, struct cfg80211_registered_device,
+			    mlme_unreg_wk);
+
+	rtnl_lock();
+	cfg80211_process_mlme_unregistrations(rdev);
+	rtnl_unlock();
+}
+
 int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
 				u16 frame_type, const u8 *match_data,
 				int match_len)
@@ -449,11 +491,18 @@
 	nreg->match_len = match_len;
 	nreg->nlportid = snd_portid;
 	nreg->frame_type = cpu_to_le16(frame_type);
+	nreg->wdev = wdev;
 	list_add(&nreg->list, &wdev->mgmt_registrations);
+	spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+	/* process all unregistrations to avoid driver confusion */
+	cfg80211_process_mlme_unregistrations(rdev);
 
 	if (rdev->ops->mgmt_frame_register)
 		rdev_mgmt_frame_register(rdev, wdev, frame_type, true);
 
+	return 0;
+
  out:
 	spin_unlock_bh(&wdev->mgmt_registrations_lock);
 
@@ -472,15 +521,12 @@
 		if (reg->nlportid != nlportid)
 			continue;
 
-		if (rdev->ops->mgmt_frame_register) {
-			u16 frame_type = le16_to_cpu(reg->frame_type);
-
-			rdev_mgmt_frame_register(rdev, wdev,
-						 frame_type, false);
-		}
-
 		list_del(&reg->list);
-		kfree(reg);
+		spin_lock(&rdev->mlme_unreg_lock);
+		list_add_tail(&reg->list, &rdev->mlme_unreg);
+		spin_unlock(&rdev->mlme_unreg_lock);
+
+		schedule_work(&rdev->mlme_unreg_wk);
 	}
 
 	spin_unlock_bh(&wdev->mgmt_registrations_lock);
@@ -496,16 +542,15 @@
 
 void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
 {
-	struct cfg80211_mgmt_registration *reg, *tmp;
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
 	spin_lock_bh(&wdev->mgmt_registrations_lock);
-
-	list_for_each_entry_safe(reg, tmp, &wdev->mgmt_registrations, list) {
-		list_del(&reg->list);
-		kfree(reg);
-	}
-
+	spin_lock(&rdev->mlme_unreg_lock);
+	list_splice_tail_init(&wdev->mgmt_registrations, &rdev->mlme_unreg);
+	spin_unlock(&rdev->mlme_unreg_lock);
 	spin_unlock_bh(&wdev->mgmt_registrations_lock);
+
+	cfg80211_process_mlme_unregistrations(rdev);
 }
 
 int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 76b4157..5d8748b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2321,6 +2321,7 @@
 			rdev->wiphy.frag_threshold = old_frag_threshold;
 			rdev->wiphy.rts_threshold = old_rts_threshold;
 			rdev->wiphy.coverage_class = old_coverage_class;
+			return result;
 		}
 	}
 	return 0;
@@ -7390,7 +7391,8 @@
 	int err;
 
 	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
-	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
+	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB)
 		return -EOPNOTSUPP;
 
 	if (!rdev->ops->set_mcast_rate)
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index c6e83a7..c23516d 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -733,6 +733,8 @@
 rdev_mgmt_frame_register(struct cfg80211_registered_device *rdev,
 			 struct wireless_dev *wdev, u16 frame_type, bool reg)
 {
+	might_sleep();
+
 	trace_rdev_mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
 	rdev->ops->mgmt_frame_register(&rdev->wiphy, wdev , frame_type, reg);
 	trace_rdev_return_void(&rdev->wiphy);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index aa2d754..b144485 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1004,7 +1004,7 @@
 
 static const struct ieee80211_reg_rule *
 freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
-		   const struct ieee80211_regdomain *regd)
+		   const struct ieee80211_regdomain *regd, u32 bw)
 {
 	int i;
 	bool band_rule_found = false;
@@ -1028,7 +1028,7 @@
 		if (!band_rule_found)
 			band_rule_found = freq_in_rule_band(fr, center_freq);
 
-		bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
+		bw_fits = reg_does_bw_fit(fr, center_freq, bw);
 
 		if (band_rule_found && bw_fits)
 			return rr;
@@ -1040,14 +1040,26 @@
 	return ERR_PTR(-EINVAL);
 }
 
+const struct ieee80211_reg_rule *__freq_reg_info(struct wiphy *wiphy,
+						 u32 center_freq, u32 min_bw)
+{
+	const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy);
+	const struct ieee80211_reg_rule *reg_rule = NULL;
+	u32 bw;
+
+	for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
+		reg_rule = freq_reg_info_regd(wiphy, center_freq, regd, bw);
+		if (!IS_ERR(reg_rule))
+			return reg_rule;
+	}
+
+	return reg_rule;
+}
+
 const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
 					       u32 center_freq)
 {
-	const struct ieee80211_regdomain *regd;
-
-	regd = reg_get_regdomain(wiphy);
-
-	return freq_reg_info_regd(wiphy, center_freq, regd);
+	return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(20));
 }
 EXPORT_SYMBOL(freq_reg_info);
 
@@ -1176,8 +1188,20 @@
 	if (reg_rule->flags & NL80211_RRF_AUTO_BW)
 		max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
+	/* If we get a reg_rule we can assume that at least 5Mhz fit */
+	if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+			     MHZ_TO_KHZ(10)))
+		bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+	if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+			     MHZ_TO_KHZ(20)))
+		bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+
+	if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+		bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+	if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+		bw_flags |= IEEE80211_CHAN_NO_20MHZ;
 	if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-		bw_flags = IEEE80211_CHAN_NO_HT40;
+		bw_flags |= IEEE80211_CHAN_NO_HT40;
 	if (max_bandwidth_khz < MHZ_TO_KHZ(80))
 		bw_flags |= IEEE80211_CHAN_NO_80MHZ;
 	if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1695,9 +1719,15 @@
 	const struct ieee80211_power_rule *power_rule = NULL;
 	const struct ieee80211_freq_range *freq_range = NULL;
 	u32 max_bandwidth_khz;
+	u32 bw;
 
-	reg_rule = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq),
-				      regd);
+	for (bw = MHZ_TO_KHZ(20); bw >= MHZ_TO_KHZ(5); bw = bw / 2) {
+		reg_rule = freq_reg_info_regd(wiphy,
+					      MHZ_TO_KHZ(chan->center_freq),
+					      regd, bw);
+		if (!IS_ERR(reg_rule))
+			break;
+	}
 
 	if (IS_ERR(reg_rule)) {
 		REG_DBG_PRINT("Disabling freq %d MHz as custom regd has no rule that fits it\n",
@@ -1721,8 +1751,20 @@
 	if (reg_rule->flags & NL80211_RRF_AUTO_BW)
 		max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
+	/* If we get a reg_rule we can assume that at least 5Mhz fit */
+	if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+			     MHZ_TO_KHZ(10)))
+		bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+	if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
+			     MHZ_TO_KHZ(20)))
+		bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+
+	if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+		bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+	if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+		bw_flags |= IEEE80211_CHAN_NO_20MHZ;
 	if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-		bw_flags = IEEE80211_CHAN_NO_HT40;
+		bw_flags |= IEEE80211_CHAN_NO_HT40;
 	if (max_bandwidth_khz < MHZ_TO_KHZ(80))
 		bw_flags |= IEEE80211_CHAN_NO_80MHZ;
 	if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -2079,10 +2121,7 @@
 		reg_process_hint_core(reg_request);
 		return;
 	case NL80211_REGDOM_SET_BY_USER:
-		treatment = reg_process_hint_user(reg_request);
-		if (treatment == REG_REQ_IGNORE ||
-		    treatment == REG_REQ_ALREADY_SET)
-			return;
+		reg_process_hint_user(reg_request);
 		return;
 	case NL80211_REGDOM_SET_BY_DRIVER:
 		if (!wiphy)
@@ -2099,7 +2138,9 @@
 		goto out_free;
 	}
 
-	/* This is required so that the orig_* parameters are saved */
+	/* This is required so that the orig_* parameters are saved.
+	 * NOTE: treatment must be set for any case that reaches here!
+	 */
 	if (treatment == REG_REQ_ALREADY_SET && wiphy &&
 	    wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
 		wiphy_update_regulatory(wiphy, reg_request->initiator);
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 42f7c76..f07224d 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -31,7 +31,7 @@
 
 	.uinfo = {
 		.aead = {
-			.geniv = "seqniv",
+			.geniv = "seqiv",
 			.icv_truncbits = 64,
 		}
 	},
@@ -50,7 +50,7 @@
 
 	.uinfo = {
 		.aead = {
-			.geniv = "seqniv",
+			.geniv = "seqiv",
 			.icv_truncbits = 96,
 		}
 	},
@@ -69,7 +69,7 @@
 
 	.uinfo = {
 		.aead = {
-			.geniv = "seqniv",
+			.geniv = "seqiv",
 			.icv_truncbits = 128,
 		}
 	},
@@ -88,7 +88,7 @@
 
 	.uinfo = {
 		.aead = {
-			.geniv = "seqniv",
+			.geniv = "seqiv",
 			.icv_truncbits = 64,
 		}
 	},
@@ -107,7 +107,7 @@
 
 	.uinfo = {
 		.aead = {
-			.geniv = "seqniv",
+			.geniv = "seqiv",
 			.icv_truncbits = 96,
 		}
 	},
@@ -126,7 +126,7 @@
 
 	.uinfo = {
 		.aead = {
-			.geniv = "seqniv",
+			.geniv = "seqiv",
 			.icv_truncbits = 128,
 		}
 	},
@@ -164,7 +164,7 @@
 
 	.uinfo = {
 		.aead = {
-			.geniv = "seqniv",
+			.geniv = "seqiv",
 			.icv_truncbits = 128,
 		}
 	},
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 18cead7..94af3d0 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -115,7 +115,8 @@
 	rcu_read_unlock();
 }
 
-static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
+static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
+						  int tos, int oif,
 						  const xfrm_address_t *saddr,
 						  const xfrm_address_t *daddr,
 						  int family)
@@ -127,14 +128,15 @@
 	if (unlikely(afinfo == NULL))
 		return ERR_PTR(-EAFNOSUPPORT);
 
-	dst = afinfo->dst_lookup(net, tos, saddr, daddr);
+	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
 
 	xfrm_policy_put_afinfo(afinfo);
 
 	return dst;
 }
 
-static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
+static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
+						int tos, int oif,
 						xfrm_address_t *prev_saddr,
 						xfrm_address_t *prev_daddr,
 						int family)
@@ -153,7 +155,7 @@
 		daddr = x->coaddr;
 	}
 
-	dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
+	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
 
 	if (!IS_ERR(dst)) {
 		if (prev_saddr != saddr)
@@ -1373,15 +1375,15 @@
 }
 
 static int
-xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
-	       unsigned short family)
+xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
+	       xfrm_address_t *remote, unsigned short family)
 {
 	int err;
 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
 
 	if (unlikely(afinfo == NULL))
 		return -EINVAL;
-	err = afinfo->get_saddr(net, local, remote);
+	err = afinfo->get_saddr(net, oif, local, remote);
 	xfrm_policy_put_afinfo(afinfo);
 	return err;
 }
@@ -1410,7 +1412,9 @@
 			remote = &tmpl->id.daddr;
 			local = &tmpl->saddr;
 			if (xfrm_addr_any(local, tmpl->encap_family)) {
-				error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
+				error = xfrm_get_saddr(net, fl->flowi_oif,
+						       &tmp, remote,
+						       tmpl->encap_family);
 				if (error)
 					goto fail;
 				local = &tmp;
@@ -1690,8 +1694,8 @@
 
 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
 			family = xfrm[i]->props.family;
-			dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
-					      family);
+			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
+					      &saddr, &daddr, family);
 			err = PTR_ERR(dst);
 			if (IS_ERR(dst))
 				goto put_states;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index bd16c6c..a8de9e3 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -925,12 +925,10 @@
 			return err;
 
 		if (attrs[XFRMA_ADDRESS_FILTER]) {
-			filter = kmalloc(sizeof(*filter), GFP_KERNEL);
+			filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
+					 sizeof(*filter), GFP_KERNEL);
 			if (filter == NULL)
 				return -ENOMEM;
-
-			memcpy(filter, nla_data(attrs[XFRMA_ADDRESS_FILTER]),
-			       sizeof(*filter));
 		}
 
 		if (attrs[XFRMA_PROTO])
@@ -2048,7 +2046,7 @@
 		xfrm_audit_policy_delete(xp, 1, true);
 	} else {
 		// reset the timers here?
-		WARN(1, "Dont know what to do with soft policy expire\n");
+		WARN(1, "Don't know what to do with soft policy expire\n");
 	}
 	km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
 
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 4450fed..63e7d50 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -12,6 +12,7 @@
 hostprogs-y += tracex3
 hostprogs-y += tracex4
 hostprogs-y += tracex5
+hostprogs-y += tracex6
 hostprogs-y += lathist
 
 test_verifier-objs := test_verifier.o libbpf.o
@@ -25,6 +26,7 @@
 tracex3-objs := bpf_load.o libbpf.o tracex3_user.o
 tracex4-objs := bpf_load.o libbpf.o tracex4_user.o
 tracex5-objs := bpf_load.o libbpf.o tracex5_user.o
+tracex6-objs := bpf_load.o libbpf.o tracex6_user.o
 lathist-objs := bpf_load.o libbpf.o lathist_user.o
 
 # Tell kbuild to always build the programs
@@ -37,6 +39,7 @@
 always += tracex3_kern.o
 always += tracex4_kern.o
 always += tracex5_kern.o
+always += tracex6_kern.o
 always += tcbpf1_kern.o
 always += lathist_kern.o
 
@@ -51,6 +54,7 @@
 HOSTLOADLIBES_tracex3 += -lelf
 HOSTLOADLIBES_tracex4 += -lelf -lrt
 HOSTLOADLIBES_tracex5 += -lelf
+HOSTLOADLIBES_tracex6 += -lelf
 HOSTLOADLIBES_lathist += -lelf
 
 # point this to your LLVM backend with bpf support
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h
index bdf1c16..3a44d3a 100644
--- a/samples/bpf/bpf_helpers.h
+++ b/samples/bpf/bpf_helpers.h
@@ -31,6 +31,8 @@
 	(void *) BPF_FUNC_get_current_uid_gid;
 static int (*bpf_get_current_comm)(void *buf, int buf_size) =
 	(void *) BPF_FUNC_get_current_comm;
+static int (*bpf_perf_event_read)(void *map, int index) =
+	(void *) BPF_FUNC_perf_event_read;
 
 /* llvm builtin functions that eBPF C program may use to
  * emit BPF_LD_ABS and BPF_LD_IND instructions
@@ -60,4 +62,29 @@
 static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
 	(void *) BPF_FUNC_l4_csum_replace;
 
+#if defined(__x86_64__)
+
+#define PT_REGS_PARM1(x) ((x)->di)
+#define PT_REGS_PARM2(x) ((x)->si)
+#define PT_REGS_PARM3(x) ((x)->dx)
+#define PT_REGS_PARM4(x) ((x)->cx)
+#define PT_REGS_PARM5(x) ((x)->r8)
+#define PT_REGS_RET(x) ((x)->sp)
+#define PT_REGS_FP(x) ((x)->bp)
+#define PT_REGS_RC(x) ((x)->ax)
+#define PT_REGS_SP(x) ((x)->sp)
+
+#elif defined(__s390x__)
+
+#define PT_REGS_PARM1(x) ((x)->gprs[2])
+#define PT_REGS_PARM2(x) ((x)->gprs[3])
+#define PT_REGS_PARM3(x) ((x)->gprs[4])
+#define PT_REGS_PARM4(x) ((x)->gprs[5])
+#define PT_REGS_PARM5(x) ((x)->gprs[6])
+#define PT_REGS_RET(x) ((x)->gprs[14])
+#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_RC(x) ((x)->gprs[2])
+#define PT_REGS_SP(x) ((x)->gprs[15])
+
+#endif
 #endif
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index 6936059..ee0f110 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -822,6 +822,65 @@
 		.result = ACCEPT,
 		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 	},
+	{
+		"PTR_TO_STACK store/load",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+			BPF_EXIT_INSN(),
+		},
+		.result = ACCEPT,
+	},
+	{
+		"PTR_TO_STACK store/load - bad alignment on off",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "misaligned access off -6 size 8",
+	},
+	{
+		"PTR_TO_STACK store/load - bad alignment on reg",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "misaligned access off -2 size 8",
+	},
+	{
+		"PTR_TO_STACK store/load - out of bounds low",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack off=-79992 size=8",
+	},
+	{
+		"PTR_TO_STACK store/load - out of bounds high",
+		.insns = {
+			BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+			BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+			BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+			BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+			BPF_EXIT_INSN(),
+		},
+		.result = REJECT,
+		.errstr = "invalid stack off=0 size=8",
+	},
 };
 
 static int probe_filter_length(struct bpf_insn *fp)
diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c
index 3162046..3f450a8 100644
--- a/samples/bpf/tracex1_kern.c
+++ b/samples/bpf/tracex1_kern.c
@@ -29,7 +29,7 @@
 	int len;
 
 	/* non-portable! works for the given kernel only */
-	skb = (struct sk_buff *) ctx->di;
+	skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
 
 	dev = _(skb->dev);
 
diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c
index dc50f4f..b32367c 100644
--- a/samples/bpf/tracex2_kern.c
+++ b/samples/bpf/tracex2_kern.c
@@ -27,10 +27,10 @@
 	long init_val = 1;
 	long *value;
 
-	/* x64 specific: read ip of kfree_skb caller.
+	/* x64/s390x specific: read ip of kfree_skb caller.
 	 * non-portable version of __builtin_return_address(0)
 	 */
-	bpf_probe_read(&loc, sizeof(loc), (void *)ctx->sp);
+	bpf_probe_read(&loc, sizeof(loc), (void *)PT_REGS_RET(ctx));
 
 	value = bpf_map_lookup_elem(&my_map, &loc);
 	if (value)
@@ -79,7 +79,7 @@
 SEC("kprobe/sys_write")
 int bpf_prog3(struct pt_regs *ctx)
 {
-	long write_size = ctx->dx; /* arg3 */
+	long write_size = PT_REGS_PARM3(ctx);
 	long init_val = 1;
 	long *value;
 	struct hist_key key = {};
diff --git a/samples/bpf/tracex3_kern.c b/samples/bpf/tracex3_kern.c
index 255ff27..bf337fb 100644
--- a/samples/bpf/tracex3_kern.c
+++ b/samples/bpf/tracex3_kern.c
@@ -23,7 +23,7 @@
 SEC("kprobe/blk_mq_start_request")
 int bpf_prog1(struct pt_regs *ctx)
 {
-	long rq = ctx->di;
+	long rq = PT_REGS_PARM1(ctx);
 	u64 val = bpf_ktime_get_ns();
 
 	bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY);
@@ -51,7 +51,7 @@
 SEC("kprobe/blk_update_request")
 int bpf_prog2(struct pt_regs *ctx)
 {
-	long rq = ctx->di;
+	long rq = PT_REGS_PARM1(ctx);
 	u64 *value, l, base;
 	u32 index;
 
diff --git a/samples/bpf/tracex4_kern.c b/samples/bpf/tracex4_kern.c
index 126b805..ac46714 100644
--- a/samples/bpf/tracex4_kern.c
+++ b/samples/bpf/tracex4_kern.c
@@ -27,7 +27,7 @@
 SEC("kprobe/kmem_cache_free")
 int bpf_prog1(struct pt_regs *ctx)
 {
-	long ptr = ctx->si;
+	long ptr = PT_REGS_PARM2(ctx);
 
 	bpf_map_delete_elem(&my_map, &ptr);
 	return 0;
@@ -36,11 +36,11 @@
 SEC("kretprobe/kmem_cache_alloc_node")
 int bpf_prog2(struct pt_regs *ctx)
 {
-	long ptr = ctx->ax;
+	long ptr = PT_REGS_RC(ctx);
 	long ip = 0;
 
 	/* get ip address of kmem_cache_alloc_node() caller */
-	bpf_probe_read(&ip, sizeof(ip), (void *)(ctx->bp + sizeof(ip)));
+	bpf_probe_read(&ip, sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip)));
 
 	struct pair v = {
 		.val = bpf_ktime_get_ns(),
diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c
index b71fe07..b3f4295 100644
--- a/samples/bpf/tracex5_kern.c
+++ b/samples/bpf/tracex5_kern.c
@@ -24,7 +24,7 @@
 {
 	struct seccomp_data sd = {};
 
-	bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+	bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
 
 	/* dispatch into next BPF program depending on syscall number */
 	bpf_tail_call(ctx, &progs, sd.nr);
@@ -42,7 +42,7 @@
 {
 	struct seccomp_data sd = {};
 
-	bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+	bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
 	if (sd.args[2] == 512) {
 		char fmt[] = "write(fd=%d, buf=%p, size=%d)\n";
 		bpf_trace_printk(fmt, sizeof(fmt),
@@ -55,7 +55,7 @@
 {
 	struct seccomp_data sd = {};
 
-	bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+	bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
 	if (sd.args[2] > 128 && sd.args[2] <= 1024) {
 		char fmt[] = "read(fd=%d, buf=%p, size=%d)\n";
 		bpf_trace_printk(fmt, sizeof(fmt),
diff --git a/samples/bpf/tracex6_kern.c b/samples/bpf/tracex6_kern.c
new file mode 100644
index 0000000..be479c4
--- /dev/null
+++ b/samples/bpf/tracex6_kern.c
@@ -0,0 +1,27 @@
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") my_map = {
+	.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+	.key_size = sizeof(int),
+	.value_size = sizeof(u32),
+	.max_entries = 32,
+};
+
+SEC("kprobe/sys_write")
+int bpf_prog1(struct pt_regs *ctx)
+{
+	u64 count;
+	u32 key = bpf_get_smp_processor_id();
+	char fmt[] = "CPU-%d   %llu\n";
+
+	count = bpf_perf_event_read(&my_map, key);
+	bpf_trace_printk(fmt, sizeof(fmt), key, count);
+
+	return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/tracex6_user.c b/samples/bpf/tracex6_user.c
new file mode 100644
index 0000000..8ea4976
--- /dev/null
+++ b/samples/bpf/tracex6_user.c
@@ -0,0 +1,72 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <linux/perf_event.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define SAMPLE_PERIOD  0x7fffffffffffffffULL
+
+static void test_bpf_perf_event(void)
+{
+	int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+	int *pmu_fd = malloc(nr_cpus * sizeof(int));
+	int status, i;
+
+	struct perf_event_attr attr_insn_pmu = {
+		.freq = 0,
+		.sample_period = SAMPLE_PERIOD,
+		.inherit = 0,
+		.type = PERF_TYPE_HARDWARE,
+		.read_format = 0,
+		.sample_type = 0,
+		.config = 0,/* PMU: cycles */
+	};
+
+	for (i = 0; i < nr_cpus; i++) {
+		pmu_fd[i] = perf_event_open(&attr_insn_pmu, -1/*pid*/, i/*cpu*/, -1/*group_fd*/, 0);
+		if (pmu_fd[i] < 0) {
+			printf("event syscall failed\n");
+			goto exit;
+		}
+
+		bpf_update_elem(map_fd[0], &i, &pmu_fd[i], BPF_ANY);
+		ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
+	}
+
+	status = system("ls > /dev/null");
+	if (status)
+		goto exit;
+	status = system("sleep 2");
+	if (status)
+		goto exit;
+
+exit:
+	for (i = 0; i < nr_cpus; i++)
+		close(pmu_fd[i]);
+	close(map_fd[0]);
+	free(pmu_fd);
+}
+
+int main(int argc, char **argv)
+{
+	char filename[256];
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+
+	if (load_bpf_file(filename)) {
+		printf("%s", bpf_log_buf);
+		return 1;
+	}
+
+	test_bpf_perf_event();
+	read_trace_pipe();
+
+	return 0;
+}
diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py
index c89fdca..2f4b7ff 100755
--- a/scripts/checkkconfigsymbols.py
+++ b/scripts/checkkconfigsymbols.py
@@ -2,7 +2,7 @@
 
 """Find Kconfig symbols that are referenced but not defined."""
 
-# (c) 2014-2015 Valentin Rothberg <Valentin.Rothberg@lip6.fr>
+# (c) 2014-2015 Valentin Rothberg <valentinrothberg@gmail.com>
 # (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de>
 #
 # Licensed under the terms of the GNU GPL License version 2
@@ -20,18 +20,20 @@
 FEATURE = r"(?:\w*[A-Z0-9]\w*){2,}"
 DEF = r"^\s*(?:menu){,1}config\s+(" + FEATURE + r")\s*"
 EXPR = r"(?:" + OPERATORS + r"|\s|" + FEATURE + r")+"
-STMT = r"^\s*(?:if|select|depends\s+on)\s+" + EXPR
+DEFAULT = r"default\s+.*?(?:if\s.+){,1}"
+STMT = r"^\s*(?:if|select|depends\s+on|(?:" + DEFAULT + r"))\s+" + EXPR
 SOURCE_FEATURE = r"(?:\W|\b)+[D]{,1}CONFIG_(" + FEATURE + r")"
 
 # regex objects
 REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$")
-REGEX_FEATURE = re.compile(r"(" + FEATURE + r")")
+REGEX_FEATURE = re.compile(r'(?!\B"[^"]*)' + FEATURE + r'(?![^"]*"\B)')
 REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE)
 REGEX_KCONFIG_DEF = re.compile(DEF)
 REGEX_KCONFIG_EXPR = re.compile(EXPR)
 REGEX_KCONFIG_STMT = re.compile(STMT)
 REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$")
 REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$")
+REGEX_NUMERIC = re.compile(r"0[xX][0-9a-fA-F]+|[0-9]+")
 
 
 def parse_options():
@@ -58,6 +60,11 @@
                            "input format bases on Git log's "
                            "\'commmit1..commit2\'.")
 
+    parser.add_option('-f', '--find', dest='find', action='store_true',
+                      default=False,
+                      help="Find and show commits that may cause symbols to be "
+                           "missing.  Required to run with --diff.")
+
     parser.add_option('-i', '--ignore', dest='ignore', action='store',
                       default="",
                       help="Ignore files matching this pattern.  Note that "
@@ -86,6 +93,9 @@
                      "'--force' if you\nwant to ignore this warning and "
                      "continue.")
 
+    if opts.commit:
+        opts.find = False
+
     if opts.ignore:
         try:
             re.match(opts.ignore, "this/is/just/a/test.c")
@@ -128,13 +138,19 @@
             # feature has not been undefined before
             if not feature in undefined_a:
                 files = sorted(undefined_b.get(feature))
-                print "%s\t%s" % (feature, ", ".join(files))
+                print "%s\t%s" % (yel(feature), ", ".join(files))
+                if opts.find:
+                    commits = find_commits(feature, opts.diff)
+                    print red(commits)
             # check if there are new files that reference the undefined feature
             else:
                 files = sorted(undefined_b.get(feature) -
                                undefined_a.get(feature))
                 if files:
-                    print "%s\t%s" % (feature, ", ".join(files))
+                    print "%s\t%s" % (yel(feature), ", ".join(files))
+                    if opts.find:
+                        commits = find_commits(feature, opts.diff)
+                        print red(commits)
 
         # reset to head
         execute("git reset --hard %s" % head)
@@ -144,7 +160,21 @@
         undefined = check_symbols(opts.ignore)
         for feature in sorted(undefined):
             files = sorted(undefined.get(feature))
-            print "%s\t%s" % (feature, ", ".join(files))
+            print "%s\t%s" % (yel(feature), ", ".join(files))
+
+
+def yel(string):
+    """
+    Color %string yellow.
+    """
+    return "\033[33m%s\033[0m" % string
+
+
+def red(string):
+    """
+    Color %string red.
+    """
+    return "\033[31m%s\033[0m" % string
 
 
 def execute(cmd):
@@ -156,6 +186,13 @@
     return stdout
 
 
+def find_commits(symbol, diff):
+    """Find commits changing %symbol in the given range of %diff."""
+    commits = execute("git log --pretty=oneline --abbrev-commit -G %s %s"
+                      % (symbol, diff))
+    return commits
+
+
 def tree_is_dirty():
     """Return true if the current working tree is dirty (i.e., if any file has
     been added, deleted, modified, renamed or copied but not committed)."""
@@ -279,6 +316,9 @@
                 line = line.strip('\n')
                 features.extend(get_features_in_line(line))
             for feature in set(features):
+                if REGEX_NUMERIC.match(feature):
+                    # ignore numeric values
+                    continue
                 paths = referenced_features.get(feature, set())
                 paths.add(kfile)
                 referenced_features[feature] = paths
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index d5c8e9a..a51ca0e 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -5011,6 +5011,7 @@
 				     "memory barrier without comment\n" . $herecurr);
 			}
 		}
+
 # check for waitqueue_active without a comment.
 		if ($line =~ /\bwaitqueue_active\s*\(/) {
 			if (!ctx_has_comment($first_line, $linenr)) {
@@ -5018,6 +5019,24 @@
 				     "waitqueue_active without comment\n" . $herecurr);
 			}
 		}
+
+# Check for expedited grace periods that interrupt non-idle non-nohz
+# online CPUs.  These expedited can therefore degrade real-time response
+# if used carelessly, and should be avoided where not absolutely
+# needed.  It is always OK to use synchronize_rcu_expedited() and
+# synchronize_sched_expedited() at boot time (before real-time applications
+# start) and in error situations where real-time response is compromised in
+# any case.  Note that synchronize_srcu_expedited() does -not- interrupt
+# other CPUs, so don't warn on uses of synchronize_srcu_expedited().
+# Of course, nothing comes for free, and srcu_read_lock() and
+# srcu_read_unlock() do contain full memory barriers in payment for
+# synchronize_srcu_expedited() non-interruption properties.
+		if ($line =~ /\b(synchronize_rcu_expedited|synchronize_sched_expedited)\(/) {
+			WARN("EXPEDITED_RCU_GRACE_PERIOD",
+			     "expedited RCU grace periods should be avoided where they can degrade real-time response\n" . $herecurr);
+
+		}
+
 # check of hardware specific defines
 		if ($line =~ m@^.\s*\#\s*if.*\b(__i386__|__powerpc64__|__sun__|__s390x__)\b@ && $realfile !~ m@include/asm-@) {
 			CHK("ARCH_DEFINES",
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 9922e66..a7bf5f6 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -133,6 +133,30 @@
 #
 # All descriptions can be multiline, except the short function description.
 #
+# For really longs structs, you can also describe arguments inside the
+# body of the struct.
+# eg.
+# /**
+#  * struct my_struct - short description
+#  * @a: first member
+#  * @b: second member
+#  *
+#  * Longer description
+#  */
+# struct my_struct {
+#     int a;
+#     int b;
+#     /**
+#      * @c: This is longer description of C
+#      *
+#      * You can use paragraphs to describe arguments
+#      * using this method.
+#      */
+#     int c;
+# };
+#
+# This should be use only for struct/enum members.
+#
 # You can also add additional sections. When documenting kernel functions you
 # should document the "Context:" of the function, e.g. whether the functions
 # can be called form interrupts. Unlike other sections you can end it with an
@@ -253,11 +277,20 @@
 my $blankline = $blankline_man;
 my $modulename = "Kernel API";
 my $function_only = 0;
+my $show_not_found = 0;
+
+my @build_time;
+if (defined($ENV{'KBUILD_BUILD_TIMESTAMP'}) &&
+    (my $seconds = `date -d"${ENV{'KBUILD_BUILD_TIMESTAMP'}}" +%s`) ne '') {
+    @build_time = gmtime($seconds);
+} else {
+    @build_time = localtime;
+}
+
 my $man_date = ('January', 'February', 'March', 'April', 'May', 'June',
 		'July', 'August', 'September', 'October',
-		'November', 'December')[(localtime)[4]] .
-  " " . ((localtime)[5]+1900);
-my $show_not_found = 0;
+		'November', 'December')[$build_time[4]] .
+  " " . ($build_time[5]+1900);
 
 # Essentially these are globals.
 # They probably want to be tidied up, made more localised or something.
@@ -287,9 +320,19 @@
 # 2 - scanning field start.
 # 3 - scanning prototype.
 # 4 - documentation block
+# 5 - gathering documentation outside main block
 my $state;
 my $in_doc_sect;
 
+# Split Doc State
+# 0 - Invalid (Before start or after finish)
+# 1 - Is started (the /** was found inside a struct)
+# 2 - The @parameter header was found, start accepting multi paragraph text.
+# 3 - Finished (the */ was found)
+# 4 - Error - Comment without header was found. Spit a warning as it's not
+#     proper kernel-doc and ignore the rest.
+my $split_doc_state;
+
 #declaration types: can be
 # 'function', 'struct', 'union', 'enum', 'typedef'
 my $decl_type;
@@ -304,6 +347,9 @@
 my $doc_sect = $doc_com . '([' . $doc_special . ']?[\w\s]+):(.*)';
 my $doc_content = $doc_com_body . '(.*)';
 my $doc_block = $doc_com . 'DOC:\s*(.*)?';
+my $doc_split_start = '^\s*/\*\*\s*$';
+my $doc_split_sect = '\s*\*\s*(@[\w\s]+):(.*)';
+my $doc_split_end = '^\s*\*/\s*$';
 
 my %constants;
 my %parameterdescs;
@@ -1753,7 +1799,9 @@
 	# strip kmemcheck_bitfield_{begin,end}.*;
 	$members =~ s/kmemcheck_bitfield_.*?;//gos;
 	# strip attributes
+	$members =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i;
 	$members =~ s/__aligned\s*\([^;]*\)//gos;
+	$members =~ s/\s*CRYPTO_MINALIGN_ATTR//gos;
 
 	create_parameterlist($members, ';', $file);
 	check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested);
@@ -2181,6 +2229,7 @@
     $prototype = "";
 
     $state = 0;
+    $split_doc_state = 0;
 }
 
 sub tracepoint_munge($) {
@@ -2453,7 +2502,6 @@
 		}
 		$section = $newsection;
 	    } elsif (/$doc_end/) {
-
 		if (($contents ne "") && ($contents ne "\n")) {
 		    dump_section($file, $section, xml_escape($contents));
 		    $section = $section_default;
@@ -2494,8 +2542,44 @@
 		print STDERR "Warning(${file}:$.): bad line: $_";
 		++$warnings;
 	    }
+	} elsif ($state == 5) { # scanning for split parameters
+	    # First line (state 1) needs to be a @parameter
+	    if ($split_doc_state == 1 && /$doc_split_sect/o) {
+		$section = $1;
+		$contents = $2;
+		if ($contents ne "") {
+		    while ((substr($contents, 0, 1) eq " ") ||
+		           substr($contents, 0, 1) eq "\t") {
+			$contents = substr($contents, 1);
+		    }
+		$contents .= "\n";
+		}
+		$split_doc_state = 2;
+	    # Documentation block end */
+	    } elsif (/$doc_split_end/) {
+		if (($contents ne "") && ($contents ne "\n")) {
+		    dump_section($file, $section, xml_escape($contents));
+		    $section = $section_default;
+		    $contents = "";
+		}
+		$state = 3;
+		$split_doc_state = 0;
+	    # Regular text
+	    } elsif (/$doc_content/) {
+		if ($split_doc_state == 2) {
+		    $contents .= $1 . "\n";
+		} elsif ($split_doc_state == 1) {
+		    $split_doc_state = 4;
+		    print STDERR "Warning(${file}:$.): ";
+		    print STDERR "Incorrect use of kernel-doc format: $_";
+		    ++$warnings;
+		}
+	    }
 	} elsif ($state == 3) {	# scanning for function '{' (end of prototype)
-	    if ($decl_type eq 'function') {
+	    if (/$doc_split_start/) {
+		$state = 5;
+		$split_doc_state = 1;
+	    } elsif ($decl_type eq 'function') {
 		process_state3_function($_, $file);
 	    } else {
 		process_state3_type($_, $file);
@@ -2587,7 +2671,7 @@
 
 # generate a sequence of code that will splice in highlighting information
 # using the s// operator.
-foreach my $pattern (keys %highlights) {
+foreach my $pattern (sort keys %highlights) {
 #   print STDERR "scanning pattern:$pattern, highlight:($highlights{$pattern})\n";
     $dohighlight .=  "\$contents =~ s:$pattern:$highlights{$pattern}:gs;\n";
 }
diff --git a/scripts/kernel-doc-xml-ref b/scripts/kernel-doc-xml-ref
new file mode 100755
index 0000000..104a5a5
--- /dev/null
+++ b/scripts/kernel-doc-xml-ref
@@ -0,0 +1,198 @@
+#!/usr/bin/perl -w
+
+use strict;
+
+## Copyright (C) 2015  Intel Corporation                         ##
+#                                                                ##
+## This software falls under the GNU General Public License.     ##
+## Please read the COPYING file for more information             ##
+#
+#
+# This software reads a XML file and a list of valid interal
+# references to replace Docbook tags with links.
+#
+# The list of "valid internal references" must be one-per-line in the following format:
+#      API-struct-foo
+#      API-enum-bar
+#      API-my-function
+#
+# The software walks over the XML file looking for xml tags representing possible references
+# to the Document. Each reference will be cross checked against the "Valid Internal Reference" list. If
+# the referece is found it replaces its content by a <link> tag.
+#
+# usage:
+# kernel-doc-xml-ref -db filename
+#		     xml filename > outputfile
+
+# read arguments
+if ($#ARGV != 2) {
+	usage();
+}
+
+#Holds the database filename
+my $databasefile;
+my @database;
+
+#holds the inputfile
+my $inputfile;
+my $errors = 0;
+
+my %highlights = (
+	"<function>(.*?)</function>",
+	    "\"<function>\" . convert_function(\$1, \$line) . \"</function>\"",
+	"<structname>(.*?)</structname>",
+	    "\"<structname>\" . convert_struct(\$1) . \"</structname>\"",
+	"<funcdef>(.*?)<function>(.*?)</function></funcdef>",
+	    "\"<funcdef>\" . convert_param(\$1) . \"<function>\$2</function></funcdef>\"",
+	"<paramdef>(.*?)<parameter>(.*?)</parameter></paramdef>",
+	    "\"<paramdef>\" . convert_param(\$1) . \"<parameter>\$2</parameter></paramdef>\"");
+
+while($ARGV[0] =~ m/^-(.*)/) {
+	my $cmd = shift @ARGV;
+	if ($cmd eq "-db") {
+		$databasefile = shift @ARGV
+	} else {
+		usage();
+	}
+}
+$inputfile = shift @ARGV;
+
+sub open_database {
+	open (my $handle, '<', $databasefile) or die "Cannot open $databasefile";
+	chomp(my @lines = <$handle>);
+	close $handle;
+
+	@database = @lines;
+}
+
+sub process_file {
+	open_database();
+
+	my $dohighlight;
+	foreach my $pattern (keys %highlights) {
+		$dohighlight .=  "\$line =~ s:$pattern:$highlights{$pattern}:eg;\n";
+	}
+
+	open(FILE, $inputfile) or die("Could not open $inputfile") or die ("Cannot open $inputfile");
+	foreach my $line (<FILE>)  {
+		eval $dohighlight;
+		print $line;
+	}
+}
+
+sub trim($_)
+{
+	my $str = $_[0];
+	$str =~ s/^\s+|\s+$//g;
+	return $str
+}
+
+sub has_key_defined($_)
+{
+	if ( grep( /^$_[0]$/, @database)) {
+		return 1;
+	}
+	return 0;
+}
+
+# Gets a <function> content and add it a hyperlink if possible.
+sub convert_function($_)
+{
+	my $arg = $_[0];
+	my $key = $_[0];
+
+	my $line = $_[1];
+
+	$key = trim($key);
+
+	$key =~ s/[^A-Za-z0-9]/-/g;
+	$key = "API-" . $key;
+
+	# We shouldn't add links to <funcdef> prototype
+	if (!has_key_defined($key) || $line =~ m/\s+<funcdef/i) {
+		return $arg;
+	}
+
+	my $head = $arg;
+	my $tail = "";
+	if ($arg =~ /(.*?)( ?)$/) {
+		$head = $1;
+		$tail = $2;
+	}
+	return "<link linkend=\"$key\">$head</link>$tail";
+}
+
+# Converting a struct text to link
+sub convert_struct($_)
+{
+	my $arg = $_[0];
+	my $key = $_[0];
+	$key =~ s/(struct )?(\w)/$2/g;
+	$key =~ s/[^A-Za-z0-9]/-/g;
+	$key = "API-struct-" . $key;
+
+	if (!has_key_defined($key)) {
+		return $arg;
+	}
+
+	my ($head, $tail) = split_pointer($arg);
+	return "<link linkend=\"$key\">$head</link>$tail";
+}
+
+# Identify "object *" elements
+sub split_pointer($_)
+{
+	my $arg = $_[0];
+	if ($arg =~ /(.*?)( ?\* ?)/) {
+		return ($1, $2);
+	}
+	return ($arg, "");
+}
+
+sub convert_param($_)
+{
+	my $type = $_[0];
+	my $keyname = convert_key_name($type);
+
+	if (!has_key_defined($keyname)) {
+		return $type;
+	}
+
+	my ($head, $tail) = split_pointer($type);
+	return "<link linkend=\"$keyname\">$head</link>$tail";
+
+}
+
+# DocBook links are in the API-<TYPE>-<STRUCT-NAME> format
+# This method gets an element and returns a valid DocBook reference for it.
+sub convert_key_name($_)
+{
+	#Pattern $2 is optional and might be uninitialized
+	no warnings 'uninitialized';
+
+	my $str = $_[0];
+	$str =~ s/(const|static)? ?(struct)? ?([a-zA-Z0-9_]+) ?(\*|&)?/$2 $3/g ;
+
+	# trim
+	$str =~ s/^\s+|\s+$//g;
+
+	# spaces and _ to -
+	$str =~ s/[^A-Za-z0-9]/-/g;
+
+	return "API-" . $str;
+}
+
+sub usage {
+	print "Usage: $0 -db database filename\n";
+	print "         xml source file(s) > outputfile\n";
+	exit 1;
+}
+
+# starting point
+process_file();
+
+if ($errors) {
+	print STDERR "$errors errors\n";
+}
+
+exit($errors);
diff --git a/scripts/rt-tester/check-all.sh b/scripts/rt-tester/check-all.sh
deleted file mode 100644
index 6b5c83b..0000000
--- a/scripts/rt-tester/check-all.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-
-
-function testit ()
-{
- printf "%-30s: " $1
- ./rt-tester.py $1 | grep Pass
-}
-
-testit t2-l1-2rt-sameprio.tst
-testit t2-l1-pi.tst
-testit t2-l1-signal.tst
-#testit t2-l2-2rt-deadlock.tst
-testit t3-l1-pi-1rt.tst
-testit t3-l1-pi-2rt.tst
-testit t3-l1-pi-3rt.tst
-testit t3-l1-pi-signal.tst
-testit t3-l1-pi-steal.tst
-testit t3-l2-pi.tst
-testit t4-l2-pi-deboost.tst
-testit t5-l4-pi-boost-deboost.tst
-testit t5-l4-pi-boost-deboost-setsched.tst
diff --git a/scripts/rt-tester/rt-tester.py b/scripts/rt-tester/rt-tester.py
deleted file mode 100755
index 6d916c2..0000000
--- a/scripts/rt-tester/rt-tester.py
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/python
-#
-# rt-mutex tester
-#
-# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-import os
-import sys
-import getopt
-import shutil
-import string
-
-# Globals
-quiet = 0
-test = 0
-comments = 0
-
-sysfsprefix = "/sys/devices/system/rttest/rttest"
-statusfile = "/status"
-commandfile = "/command"
-
-# Command opcodes
-cmd_opcodes = {
-    "schedother"    : "1",
-    "schedfifo"     : "2",
-    "lock"          : "3",
-    "locknowait"    : "4",
-    "lockint"       : "5",
-    "lockintnowait" : "6",
-    "lockcont"      : "7",
-    "unlock"        : "8",
-    "signal"        : "11",
-    "resetevent"    : "98",
-    "reset"         : "99",
-    }
-
-test_opcodes = {
-    "prioeq"        : ["P" , "eq" , None],
-    "priolt"        : ["P" , "lt" , None],
-    "priogt"        : ["P" , "gt" , None],
-    "nprioeq"       : ["N" , "eq" , None],
-    "npriolt"       : ["N" , "lt" , None],
-    "npriogt"       : ["N" , "gt" , None],
-    "unlocked"      : ["M" , "eq" , 0],
-    "trylock"       : ["M" , "eq" , 1],
-    "blocked"       : ["M" , "eq" , 2],
-    "blockedwake"   : ["M" , "eq" , 3],
-    "locked"        : ["M" , "eq" , 4],
-    "opcodeeq"      : ["O" , "eq" , None],
-    "opcodelt"      : ["O" , "lt" , None],
-    "opcodegt"      : ["O" , "gt" , None],
-    "eventeq"       : ["E" , "eq" , None],
-    "eventlt"       : ["E" , "lt" , None],
-    "eventgt"       : ["E" , "gt" , None],
-    }
-
-# Print usage information
-def usage():
-    print "rt-tester.py <-c -h -q -t> <testfile>"
-    print " -c    display comments after first command"
-    print " -h    help"
-    print " -q    quiet mode"
-    print " -t    test mode (syntax check)"
-    print " testfile: read test specification from testfile"
-    print " otherwise from stdin"
-    return
-
-# Print progress when not in quiet mode
-def progress(str):
-    if not quiet:
-        print str
-
-# Analyse a status value
-def analyse(val, top, arg):
-
-    intval = int(val)
-
-    if top[0] == "M":
-        intval = intval / (10 ** int(arg))
-	intval = intval % 10
-        argval = top[2]
-    elif top[0] == "O":
-        argval = int(cmd_opcodes.get(arg, arg))
-    else:
-        argval = int(arg)
-
-    # progress("%d %s %d" %(intval, top[1], argval))
-
-    if top[1] == "eq" and intval == argval:
-	return 1
-    if top[1] == "lt" and intval < argval:
-        return 1
-    if top[1] == "gt" and intval > argval:
-	return 1
-    return 0
-
-# Parse the commandline
-try:
-    (options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
-except getopt.GetoptError, ex:
-    usage()
-    sys.exit(1)
-
-# Parse commandline options
-for option, value in options:
-    if option == "-c":
-        comments = 1
-    elif option == "-q":
-        quiet = 1
-    elif option == "-t":
-        test = 1
-    elif option == '-h':
-        usage()
-        sys.exit(0)
-
-# Select the input source
-if arguments:
-    try:
-        fd = open(arguments[0])
-    except Exception,ex:
-        sys.stderr.write("File not found %s\n" %(arguments[0]))
-        sys.exit(1)
-else:
-    fd = sys.stdin
-
-linenr = 0
-
-# Read the test patterns
-while 1:
-
-    linenr = linenr + 1
-    line = fd.readline()
-    if not len(line):
-        break
-
-    line = line.strip()
-    parts = line.split(":")
-
-    if not parts or len(parts) < 1:
-        continue
-
-    if len(parts[0]) == 0:
-        continue
-
-    if parts[0].startswith("#"):
-	if comments > 1:
-	    progress(line)
-	continue
-
-    if comments == 1:
-	comments = 2
-
-    progress(line)
-
-    cmd = parts[0].strip().lower()
-    opc = parts[1].strip().lower()
-    tid = parts[2].strip()
-    dat = parts[3].strip()
-
-    try:
-        # Test or wait for a status value
-        if cmd == "t" or cmd == "w":
-            testop = test_opcodes[opc]
-
-            fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
-            if test:
-		print fname
-                continue
-
-            while 1:
-                query = 1
-                fsta = open(fname, 'r')
-                status = fsta.readline().strip()
-                fsta.close()
-                stat = status.split(",")
-                for s in stat:
-		    s = s.strip()
-                    if s.startswith(testop[0]):
-                        # Separate status value
-                        val = s[2:].strip()
-                        query = analyse(val, testop, dat)
-                        break
-                if query or cmd == "t":
-                    break
-
-            progress("   " + status)
-
-            if not query:
-                sys.stderr.write("Test failed in line %d\n" %(linenr))
-		sys.exit(1)
-
-        # Issue a command to the tester
-        elif cmd == "c":
-            cmdnr = cmd_opcodes[opc]
-            # Build command string and sys filename
-            cmdstr = "%s:%s" %(cmdnr, dat)
-            fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
-            if test:
-		print fname
-                continue
-            fcmd = open(fname, 'w')
-            fcmd.write(cmdstr)
-            fcmd.close()
-
-    except Exception,ex:
-    	sys.stderr.write(str(ex))
-        sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
-        if not test:
-            fd.close()
-            sys.exit(1)
-
-# Normal exit pass
-print "Pass"
-sys.exit(0)
diff --git a/scripts/rt-tester/t2-l1-2rt-sameprio.tst b/scripts/rt-tester/t2-l1-2rt-sameprio.tst
deleted file mode 100644
index 3710c8b..0000000
--- a/scripts/rt-tester/t2-l1-2rt-sameprio.tst
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-# RT-Mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	0
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 2 threads 1 lock
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedfifo:		0: 	80
-C: schedfifo:		1: 	80
-
-# T0 lock L0
-C: locknowait:		0: 	0
-C: locknowait:		1:	0
-W: locked:		0: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	80
-
-# T0 unlock L0
-C: unlock:		0: 	0
-W: locked:		1: 	0
-
-# Verify T0
-W: unlocked:		0: 	0
-T: prioeq:		0: 	80
-
-# Unlock
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
-
-# T1,T0 lock L0
-C: locknowait:		1: 	0
-C: locknowait:		0:	0
-W: locked:		1: 	0
-W: blocked:		0: 	0
-T: prioeq:		1: 	80
-
-# T1 unlock L0
-C: unlock:		1: 	0
-W: locked:		0: 	0
-
-# Verify T1
-W: unlocked:		1: 	0
-T: prioeq:		1: 	80
-
-# Unlock and exit
-C: unlock:		0: 	0
-W: unlocked:		0: 	0
-
diff --git a/scripts/rt-tester/t2-l1-pi.tst b/scripts/rt-tester/t2-l1-pi.tst
deleted file mode 100644
index b4cc959..0000000
--- a/scripts/rt-tester/t2-l1-pi.tst
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-# RT-Mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	0
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 2 threads 1 lock with priority inversion
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	80
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	80
-
-# T0 unlock L0
-C: unlock:		0: 	0
-W: locked:		1: 	0
-
-# Verify T1
-W: unlocked:		0: 	0
-T: priolt:		0: 	1
-
-# Unlock and exit
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
-
diff --git a/scripts/rt-tester/t2-l1-signal.tst b/scripts/rt-tester/t2-l1-signal.tst
deleted file mode 100644
index 1b57376..0000000
--- a/scripts/rt-tester/t2-l1-signal.tst
+++ /dev/null
@@ -1,72 +0,0 @@
-#
-# RT-Mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	0
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 2 threads 1 lock with priority inversion
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedother:		1: 	0
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: lockintnowait:	1: 	0
-W: blocked:		1: 	0
-
-# Interrupt T1
-C: signal:		1:	0
-W: unlocked:		1: 	0
-T: opcodeeq:		1:	-4
-
-# Unlock and exit
-C: unlock:		0: 	0
-W: unlocked:		0: 	0
diff --git a/scripts/rt-tester/t2-l2-2rt-deadlock.tst b/scripts/rt-tester/t2-l2-2rt-deadlock.tst
deleted file mode 100644
index 68b1062..0000000
--- a/scripts/rt-tester/t2-l2-2rt-deadlock.tst
+++ /dev/null
@@ -1,84 +0,0 @@
-#
-# RT-Mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	0
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 2 threads 2 lock
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedfifo:		0: 	80
-C: schedfifo:		1: 	80
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L1
-C: locknowait:		1:	1
-W: locked:		1: 	1
-
-# T0 lock L1
-C: lockintnowait:	0: 	1
-W: blocked:		0: 	1
-
-# T1 lock L0
-C: lockintnowait:	1: 	0
-W: blocked:		1: 	0
-
-# Make deadlock go away
-C: signal:		1:	0
-W: unlocked:		1:	0
-C: signal:		0:	0
-W: unlocked:		0:	1
-
-# Unlock and exit
-C: unlock:		0: 	0
-W: unlocked:		0: 	0
-C: unlock:		1: 	1
-W: unlocked:		1: 	1
-
diff --git a/scripts/rt-tester/t3-l1-pi-1rt.tst b/scripts/rt-tester/t3-l1-pi-1rt.tst
deleted file mode 100644
index 8e6c8b1..0000000
--- a/scripts/rt-tester/t3-l1-pi-1rt.tst
+++ /dev/null
@@ -1,87 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 1 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedother:		1: 	0
-C: schedfifo:		2: 	82
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: priolt:		0: 	1
-
-# T2 lock L0
-C: locknowait:		2: 	0
-W: blocked:		2: 	0
-T: prioeq:		0: 	82
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T2 got the lock
-W: locked:		2: 	0
-W: unlocked:		0:	0
-T: priolt:		0:	1
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-W: unlocked:		2: 	0
-W: locked:		1: 	0
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t3-l1-pi-2rt.tst b/scripts/rt-tester/t3-l1-pi-2rt.tst
deleted file mode 100644
index 69c2212..0000000
--- a/scripts/rt-tester/t3-l1-pi-2rt.tst
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 1 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	81
-C: schedfifo:		2: 	82
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	81
-
-# T2 lock L0
-C: locknowait:		2: 	0
-W: blocked:		2: 	0
-T: prioeq:		0: 	82
-T: prioeq:		1:	81
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T2 got the lock
-W: locked:		2: 	0
-W: unlocked:		0:	0
-T: priolt:		0:	1
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-W: unlocked:		2: 	0
-W: locked:		1: 	0
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t3-l1-pi-3rt.tst b/scripts/rt-tester/t3-l1-pi-3rt.tst
deleted file mode 100644
index 9b0f1eb..0000000
--- a/scripts/rt-tester/t3-l1-pi-3rt.tst
+++ /dev/null
@@ -1,87 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 1 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedfifo:		0: 	80
-C: schedfifo:		1: 	81
-C: schedfifo:		2: 	82
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	81
-
-# T2 lock L0
-C: locknowait:		2: 	0
-W: blocked:		2: 	0
-T: prioeq:		0: 	82
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T2 got the lock
-W: locked:		2: 	0
-W: unlocked:		0:	0
-T: prioeq:		0:	80
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-W: locked:		1: 	0
-W: unlocked:		2: 	0
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t3-l1-pi-signal.tst b/scripts/rt-tester/t3-l1-pi-signal.tst
deleted file mode 100644
index 39ec74a..0000000
--- a/scripts/rt-tester/t3-l1-pi-signal.tst
+++ /dev/null
@@ -1,93 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-# Reset event counter
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set priorities
-C: schedother:		0: 	0
-C: schedfifo:		1: 	80
-C: schedfifo:		2: 	81
-
-# T0 lock L0
-C: lock:		0:	0
-W: locked:		0: 	0
-
-# T1 lock L0, no wait in the wakeup path
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0:	80
-T: prioeq:		1:	80
-
-# T2 lock L0 interruptible, no wait in the wakeup path
-C: lockintnowait:	2:	0
-W: blocked:		2: 	0
-T: prioeq:		0:	81
-T: prioeq:		1:	80
-
-# Interrupt T2
-C: signal:		2:	2
-W: unlocked:		2:	0
-T: prioeq:		1:	80
-T: prioeq:		0:	80
-
-T: locked:		0:	0
-T: blocked:		1:	0
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T1 has locked L0 and exit
-W: locked:		1:	0
-W: unlocked:		0: 	0
-T: priolt:		0:	1
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
-
-
-
diff --git a/scripts/rt-tester/t3-l1-pi-steal.tst b/scripts/rt-tester/t3-l1-pi-steal.tst
deleted file mode 100644
index e03db7e..0000000
--- a/scripts/rt-tester/t3-l1-pi-steal.tst
+++ /dev/null
@@ -1,91 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 1 lock PI steal pending ownership
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	80
-C: schedfifo:		2: 	81
-
-# T0 lock L0
-C: lock:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: lock:		1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	80
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T1 is in the wakeup loop
-W: blockedwake:		1: 	0
-T: priolt:		0: 	1
-
-# T2 lock L0
-C: lock:		2: 	0
-# T1 leave wakeup loop
-C: lockcont:		1: 	0
-
-# T2 must have the lock and T1 must be blocked
-W: locked:		2: 	0
-W: blocked:		1: 	0
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-# Wait until T1 is in the wakeup loop and let it run
-W: blockedwake:		1: 	0
-C: lockcont:		1: 	0
-W: locked:		1: 	0
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t3-l2-pi.tst b/scripts/rt-tester/t3-l2-pi.tst
deleted file mode 100644
index 7b59100..0000000
--- a/scripts/rt-tester/t3-l2-pi.tst
+++ /dev/null
@@ -1,87 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 3 threads 2 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedother:		1: 	0
-C: schedfifo:		2: 	82
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L0
-C: locknowait:		1: 	0
-W: blocked:		1: 	0
-T: priolt:		0: 	1
-
-# T2 lock L0
-C: locknowait:		2: 	0
-W: blocked:		2: 	0
-T: prioeq:		0: 	82
-
-# T0 unlock L0
-C: unlock:		0: 	0
-
-# Wait until T2 got the lock
-W: locked:		2: 	0
-W: unlocked:		0:	0
-T: priolt:		0:	1
-
-# T2 unlock L0
-C: unlock:		2: 	0
-
-W: unlocked:		2: 	0
-W: locked:		1: 	0
-
-C: unlock:		1: 	0
-W: unlocked:		1: 	0
diff --git a/scripts/rt-tester/t4-l2-pi-deboost.tst b/scripts/rt-tester/t4-l2-pi-deboost.tst
deleted file mode 100644
index 2f0e049..0000000
--- a/scripts/rt-tester/t4-l2-pi-deboost.tst
+++ /dev/null
@@ -1,118 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 4 threads 2 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedother:		1: 	0
-C: schedfifo:		2: 	82
-C: schedfifo:		3: 	83
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L1
-C: locknowait:		1: 	1
-W: locked:		1: 	1
-
-# T3 lock L0
-C: lockintnowait:	3: 	0
-W: blocked:		3: 	0
-T: prioeq:		0: 	83
-
-# T0 lock L1
-C: lock:		0: 	1
-W: blocked:		0: 	1
-T: prioeq:		1: 	83
-
-# T1 unlock L1
-C: unlock:		1:	1
-
-# Wait until T0 is in the wakeup code
-W: blockedwake:		0:	1
-
-# Verify that T1 is unboosted
-W: unlocked:		1: 	1
-T: priolt:		1: 	1
-
-# T2 lock L1 (T0 is boosted and pending owner !)
-C: locknowait:		2:	1
-W: blocked:		2: 	1
-T: prioeq:		0: 	83
-
-# Interrupt T3 and wait until T3 returned
-C: signal:		3:	0
-W: unlocked:		3:	0
-
-# Verify prio of T0 (still pending owner,
-# but T2 is enqueued due to the previous boost by T3
-T: prioeq:		0:	82
-
-# Let T0 continue
-C: lockcont:		0:	1
-W: locked:		0:	1
-
-# Unlock L1 and let T2 get L1
-C: unlock:		0:	1
-W: locked:		2:	1
-
-# Verify that T0 is unboosted
-W: unlocked:		0:	1
-T: priolt:		0:	1
-
-# Unlock everything and exit
-C: unlock:		2:	1
-W: unlocked:		2:	1
-
-C: unlock:		0:	0
-W: unlocked:		0:	0
-
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
deleted file mode 100644
index 04f4034..0000000
--- a/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
+++ /dev/null
@@ -1,178 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 5 threads 4 lock PI - modify priority of blocked threads
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	81
-C: schedfifo:		2: 	82
-C: schedfifo:		3: 	83
-C: schedfifo:		4: 	84
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L1
-C: locknowait:		1: 	1
-W: locked:		1: 	1
-
-# T1 lock L0
-C: lockintnowait:	1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	81
-
-# T2 lock L2
-C: locknowait:		2: 	2
-W: locked:		2: 	2
-
-# T2 lock L1
-C: lockintnowait:	2: 	1
-W: blocked:		2: 	1
-T: prioeq:		0: 	82
-T: prioeq:		1:	82
-
-# T3 lock L3
-C: locknowait:		3: 	3
-W: locked:		3: 	3
-
-# T3 lock L2
-C: lockintnowait:	3: 	2
-W: blocked:		3: 	2
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-
-# T4 lock L3
-C: lockintnowait:	4:	3
-W: blocked:		4: 	3
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-
-# Reduce prio of T4
-C: schedfifo:		4: 	80
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-T: prioeq:		3:	83
-T: prioeq:		4:	80
-
-# Increase prio of T4
-C: schedfifo:		4: 	84
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-T: prioeq:		4:	84
-
-# Reduce prio of T3
-C: schedfifo:		3: 	80
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-T: prioeq:		4:	84
-
-# Increase prio of T3
-C: schedfifo:		3: 	85
-T: prioeq:		0: 	85
-T: prioeq:		1:	85
-T: prioeq:		2:	85
-T: prioeq:		3:	85
-T: prioeq:		4:	84
-
-# Reduce prio of T3
-C: schedfifo:		3: 	83
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-T: prioeq:		4:	84
-
-# Signal T4
-C: signal:		4: 	0
-W: unlocked:		4: 	3
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-T: prioeq:		3:	83
-
-# Signal T3
-C: signal:		3: 	0
-W: unlocked:		3: 	2
-T: prioeq:		0: 	82
-T: prioeq:		1:	82
-T: prioeq:		2:	82
-
-# Signal T2
-C: signal:		2: 	0
-W: unlocked:		2: 	1
-T: prioeq:		0: 	81
-T: prioeq:		1:	81
-
-# Signal T1
-C: signal:		1: 	0
-W: unlocked:		1: 	0
-T: priolt:		0: 	1
-
-# Unlock and exit
-C: unlock:		3:	3
-C: unlock:		2:	2
-C: unlock:		1:	1
-C: unlock:		0:	0
-
-W: unlocked:		3:	3
-W: unlocked:		2:	2
-W: unlocked:		1:	1
-W: unlocked:		0:	0
-
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
deleted file mode 100644
index a48a6ee..0000000
--- a/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
+++ /dev/null
@@ -1,138 +0,0 @@
-#
-# rt-mutex test
-#
-# Op: C(ommand)/T(est)/W(ait)
-# |  opcode
-# |  |     threadid: 0-7
-# |  |     |  opcode argument
-# |  |     |  |
-# C: lock: 0: 0
-#
-# Commands
-#
-# opcode	opcode argument
-# schedother	nice value
-# schedfifo	priority
-# lock		lock nr (0-7)
-# locknowait	lock nr (0-7)
-# lockint	lock nr (0-7)
-# lockintnowait	lock nr (0-7)
-# lockcont	lock nr (0-7)
-# unlock	lock nr (0-7)
-# signal	thread to signal (0-7)
-# reset		0
-# resetevent	0
-#
-# Tests / Wait
-#
-# opcode	opcode argument
-#
-# prioeq	priority
-# priolt	priority
-# priogt	priority
-# nprioeq	normal priority
-# npriolt	normal priority
-# npriogt	normal priority
-# locked	lock nr (0-7)
-# blocked	lock nr (0-7)
-# blockedwake	lock nr (0-7)
-# unlocked	lock nr (0-7)
-# opcodeeq	command opcode or number
-# opcodelt	number
-# opcodegt	number
-# eventeq	number
-# eventgt	number
-# eventlt	number
-
-#
-# 5 threads 4 lock PI
-#
-C: resetevent:		0: 	0
-W: opcodeeq:		0: 	0
-
-# Set schedulers
-C: schedother:		0: 	0
-C: schedfifo:		1: 	81
-C: schedfifo:		2: 	82
-C: schedfifo:		3: 	83
-C: schedfifo:		4: 	84
-
-# T0 lock L0
-C: locknowait:		0: 	0
-W: locked:		0: 	0
-
-# T1 lock L1
-C: locknowait:		1: 	1
-W: locked:		1: 	1
-
-# T1 lock L0
-C: lockintnowait:	1: 	0
-W: blocked:		1: 	0
-T: prioeq:		0: 	81
-
-# T2 lock L2
-C: locknowait:		2: 	2
-W: locked:		2: 	2
-
-# T2 lock L1
-C: lockintnowait:	2: 	1
-W: blocked:		2: 	1
-T: prioeq:		0: 	82
-T: prioeq:		1:	82
-
-# T3 lock L3
-C: locknowait:		3: 	3
-W: locked:		3: 	3
-
-# T3 lock L2
-C: lockintnowait:	3: 	2
-W: blocked:		3: 	2
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-
-# T4 lock L3
-C: lockintnowait:	4:	3
-W: blocked:		4: 	3
-T: prioeq:		0: 	84
-T: prioeq:		1:	84
-T: prioeq:		2:	84
-T: prioeq:		3:	84
-
-# Signal T4
-C: signal:		4: 	0
-W: unlocked:		4: 	3
-T: prioeq:		0: 	83
-T: prioeq:		1:	83
-T: prioeq:		2:	83
-T: prioeq:		3:	83
-
-# Signal T3
-C: signal:		3: 	0
-W: unlocked:		3: 	2
-T: prioeq:		0: 	82
-T: prioeq:		1:	82
-T: prioeq:		2:	82
-
-# Signal T2
-C: signal:		2: 	0
-W: unlocked:		2: 	1
-T: prioeq:		0: 	81
-T: prioeq:		1:	81
-
-# Signal T1
-C: signal:		1: 	0
-W: unlocked:		1: 	0
-T: priolt:		0: 	1
-
-# Unlock and exit
-C: unlock:		3:	3
-C: unlock:		2:	2
-C: unlock:		1:	1
-C: unlock:		0:	0
-
-W: unlocked:		3:	3
-W: unlocked:		2:	2
-W: unlocked:		1:	1
-W: unlocked:		0:	0
-
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 188c1d2..73455089 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -400,9 +400,9 @@
 {
 	bool match = false;
 
-	rcu_lockdep_assert(rcu_read_lock_held() ||
-			   lockdep_is_held(&devcgroup_mutex),
-			   "device_cgroup:verify_new_ex called without proper synchronization");
+	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
+			 lockdep_is_held(&devcgroup_mutex),
+			 "device_cgroup:verify_new_ex called without proper synchronization");
 
 	if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
 		if (behavior == DEVCG_DEFAULT_ALLOW) {
diff --git a/security/security.c b/security/security.c
index 595fffa..75b85fd 100644
--- a/security/security.c
+++ b/security/security.c
@@ -380,8 +380,8 @@
 		return 0;
 
 	if (!initxattrs)
-		return call_int_hook(inode_init_security, 0, inode, dir, qstr,
-							 NULL, NULL, NULL);
+		return call_int_hook(inode_init_security, -EOPNOTSUPP, inode,
+				     dir, qstr, NULL, NULL, NULL);
 	memset(new_xattrs, 0, sizeof(new_xattrs));
 	lsm_xattr = new_xattrs;
 	ret = call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir, qstr,
@@ -409,8 +409,8 @@
 {
 	if (unlikely(IS_PRIVATE(inode)))
 		return -EOPNOTSUPP;
-	return call_int_hook(inode_init_security, 0, inode, dir, qstr,
-				name, value, len);
+	return call_int_hook(inode_init_security, -EOPNOTSUPP, inode, dir,
+			     qstr, name, value, len);
 }
 EXPORT_SYMBOL(security_old_inode_init_security);
 
@@ -776,7 +776,7 @@
 	 * ditto if it's not on noexec mount, except that on !MMU we need
 	 * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case
 	 */
-	if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) {
+	if (!path_noexec(&file->f_path)) {
 #ifndef CONFIG_MMU
 		if (file->f_op->mmap_capabilities) {
 			unsigned caps = file->f_op->mmap_capabilities(file);
@@ -1281,7 +1281,8 @@
 
 int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
 {
-	return call_int_hook(socket_getpeersec_dgram, 0, sock, skb, secid);
+	return call_int_hook(socket_getpeersec_dgram, -ENOPROTOOPT, sock,
+			     skb, secid);
 }
 EXPORT_SYMBOL(security_socket_getpeersec_dgram);
 
diff --git a/sound/ac97_bus.c b/sound/ac97_bus.c
index 2b50cbe..52e4bc5 100644
--- a/sound/ac97_bus.c
+++ b/sound/ac97_bus.c
@@ -18,6 +18,68 @@
 #include <sound/ac97_codec.h>
 
 /*
+ * snd_ac97_check_id() - Reads and checks the vendor ID of the device
+ * @ac97: The AC97 device to check
+ * @id: The ID to compare to
+ * @id_mask: Mask that is applied to the device ID before comparing to @id
+ *
+ * If @id is 0 this function returns true if the read device vendor ID is
+ * a valid ID. If @id is non 0 this functions returns true if @id
+ * matches the read vendor ID. Otherwise the function returns false.
+ */
+static bool snd_ac97_check_id(struct snd_ac97 *ac97, unsigned int id,
+	unsigned int id_mask)
+{
+	ac97->id = ac97->bus->ops->read(ac97, AC97_VENDOR_ID1) << 16;
+	ac97->id |= ac97->bus->ops->read(ac97, AC97_VENDOR_ID2);
+
+	if (ac97->id == 0x0 || ac97->id == 0xffffffff)
+		return false;
+
+	if (id != 0 && id != (ac97->id & id_mask))
+		return false;
+
+	return true;
+}
+
+/**
+ * snd_ac97_reset() - Reset AC'97 device
+ * @ac97: The AC'97 device to reset
+ * @try_warm: Try a warm reset first
+ * @id: Expected device vendor ID
+ * @id_mask: Mask that is applied to the device ID before comparing to @id
+ *
+ * This function resets the AC'97 device. If @try_warm is true the function
+ * first performs a warm reset. If the warm reset is successful the function
+ * returns 1. Otherwise or if @try_warm is false the function issues cold reset
+ * followed by a warm reset. If this is successful the function returns 0,
+ * otherwise a negative error code. If @id is 0 any valid device ID will be
+ * accepted, otherwise only the ID that matches @id and @id_mask is accepted.
+ */
+int snd_ac97_reset(struct snd_ac97 *ac97, bool try_warm, unsigned int id,
+	unsigned int id_mask)
+{
+	struct snd_ac97_bus_ops *ops = ac97->bus->ops;
+
+	if (try_warm && ops->warm_reset) {
+		ops->warm_reset(ac97);
+		if (snd_ac97_check_id(ac97, id, id_mask))
+			return 1;
+	}
+
+	if (ops->reset)
+		ops->reset(ac97);
+	if (ops->warm_reset)
+		ops->warm_reset(ac97);
+
+	if (snd_ac97_check_id(ac97, id, id_mask))
+		return 0;
+
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(snd_ac97_reset);
+
+/*
  * Let drivers decide whether they want to support given codec from their
  * probe method. Drivers have direct access to the struct snd_ac97
  * structure and may  decide based on the id field amongst other things.
@@ -27,35 +89,9 @@
 	return 1;
 }
 
-#ifdef CONFIG_PM
-static int ac97_bus_suspend(struct device *dev, pm_message_t state)
-{
-	int ret = 0;
-
-	if (dev->driver && dev->driver->suspend)
-		ret = dev->driver->suspend(dev, state);
-
-	return ret;
-}
-
-static int ac97_bus_resume(struct device *dev)
-{
-	int ret = 0;
-
-	if (dev->driver && dev->driver->resume)
-		ret = dev->driver->resume(dev);
-
-	return ret;
-}
-#endif /* CONFIG_PM */
-
 struct bus_type ac97_bus_type = {
 	.name		= "ac97",
 	.match		= ac97_bus_match,
-#ifdef CONFIG_PM
-	.suspend	= ac97_bus_suspend,
-	.resume		= ac97_bus_resume,
-#endif /* CONFIG_PM */
 };
 
 static int __init ac97_bus_init(void)
diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
index 23c371ec..a04edff 100644
--- a/sound/aoa/codecs/onyx.c
+++ b/sound/aoa/codecs/onyx.c
@@ -1050,7 +1050,6 @@
 static struct i2c_driver onyx_driver = {
 	.driver = {
 		.name = "aoa_codec_onyx",
-		.owner = THIS_MODULE,
 	},
 	.probe = onyx_i2c_probe,
 	.remove = onyx_i2c_remove,
diff --git a/sound/aoa/codecs/tas.c b/sound/aoa/codecs/tas.c
index 364c7c4..78ed1ff 100644
--- a/sound/aoa/codecs/tas.c
+++ b/sound/aoa/codecs/tas.c
@@ -939,7 +939,6 @@
 static struct i2c_driver tas_driver = {
 	.driver = {
 		.name = "aoa_codec_tas",
-		.owner = THIS_MODULE,
 	},
 	.probe = tas_i2c_probe,
 	.remove = tas_i2c_remove,
diff --git a/sound/aoa/fabrics/layout.c b/sound/aoa/fabrics/layout.c
index 9dc5806..8f71f7e 100644
--- a/sound/aoa/fabrics/layout.c
+++ b/sound/aoa/fabrics/layout.c
@@ -1120,10 +1120,10 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM
-static int aoa_fabric_layout_suspend(struct soundbus_dev *sdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int aoa_fabric_layout_suspend(struct device *dev)
 {
-	struct layout_dev *ldev = dev_get_drvdata(&sdev->ofdev.dev);
+	struct layout_dev *ldev = dev_get_drvdata(dev);
 
 	if (ldev->gpio.methods && ldev->gpio.methods->all_amps_off)
 		ldev->gpio.methods->all_amps_off(&ldev->gpio);
@@ -1131,15 +1131,19 @@
 	return 0;
 }
 
-static int aoa_fabric_layout_resume(struct soundbus_dev *sdev)
+static int aoa_fabric_layout_resume(struct device *dev)
 {
-	struct layout_dev *ldev = dev_get_drvdata(&sdev->ofdev.dev);
+	struct layout_dev *ldev = dev_get_drvdata(dev);
 
 	if (ldev->gpio.methods && ldev->gpio.methods->all_amps_restore)
 		ldev->gpio.methods->all_amps_restore(&ldev->gpio);
 
 	return 0;
 }
+
+static SIMPLE_DEV_PM_OPS(aoa_fabric_layout_pm_ops,
+	aoa_fabric_layout_suspend, aoa_fabric_layout_resume);
+
 #endif
 
 static struct soundbus_driver aoa_soundbus_driver = {
@@ -1147,12 +1151,11 @@
 	.owner = THIS_MODULE,
 	.probe = aoa_fabric_layout_probe,
 	.remove = aoa_fabric_layout_remove,
-#ifdef CONFIG_PM
-	.suspend = aoa_fabric_layout_suspend,
-	.resume = aoa_fabric_layout_resume,
-#endif
 	.driver = {
 		.owner = THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &aoa_fabric_layout_pm_ops,
+#endif
 	}
 };
 
diff --git a/sound/aoa/soundbus/core.c b/sound/aoa/soundbus/core.c
index 3edf736..70bcaa7 100644
--- a/sound/aoa/soundbus/core.c
+++ b/sound/aoa/soundbus/core.c
@@ -126,30 +126,6 @@
 		drv->shutdown(soundbus_dev);
 }
 
-#ifdef CONFIG_PM
-
-static int soundbus_device_suspend(struct device *dev, pm_message_t state)
-{
-	struct soundbus_dev * soundbus_dev = to_soundbus_device(dev);
-	struct soundbus_driver * drv = to_soundbus_driver(dev->driver);
-
-	if (dev->driver && drv->suspend)
-		return drv->suspend(soundbus_dev, state);
-	return 0;
-}
-
-static int soundbus_device_resume(struct device * dev)
-{
-	struct soundbus_dev * soundbus_dev = to_soundbus_device(dev);
-	struct soundbus_driver * drv = to_soundbus_driver(dev->driver);
-
-	if (dev->driver && drv->resume)
-		return drv->resume(soundbus_dev);
-	return 0;
-}
-
-#endif /* CONFIG_PM */
-
 /* soundbus_dev_attrs is declared in sysfs.c */
 ATTRIBUTE_GROUPS(soundbus_dev);
 static struct bus_type soundbus_bus_type = {
@@ -158,10 +134,6 @@
 	.uevent		= soundbus_uevent,
 	.remove		= soundbus_device_remove,
 	.shutdown	= soundbus_device_shutdown,
-#ifdef CONFIG_PM
-	.suspend	= soundbus_device_suspend,
-	.resume		= soundbus_device_resume,
-#endif
 	.dev_groups	= soundbus_dev_groups,
 };
 
diff --git a/sound/aoa/soundbus/soundbus.h b/sound/aoa/soundbus/soundbus.h
index 21e756c..ae40224 100644
--- a/sound/aoa/soundbus/soundbus.h
+++ b/sound/aoa/soundbus/soundbus.h
@@ -188,8 +188,6 @@
 	int	(*probe)(struct soundbus_dev* dev);
 	int	(*remove)(struct soundbus_dev* dev);
 
-	int	(*suspend)(struct soundbus_dev* dev, pm_message_t state);
-	int	(*resume)(struct soundbus_dev* dev);
 	int	(*shutdown)(struct soundbus_dev* dev);
 
 	struct device_driver driver;
diff --git a/sound/firewire/bebob/bebob_pcm.c b/sound/firewire/bebob/bebob_pcm.c
index 7a2c1f5..c0f018a 100644
--- a/sound/firewire/bebob/bebob_pcm.c
+++ b/sound/firewire/bebob/bebob_pcm.c
@@ -211,26 +211,38 @@
 		      struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_bebob *bebob = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
 		atomic_inc(&bebob->substreams_counter);
 	amdtp_stream_set_pcm_format(&bebob->tx_stream,
 				    params_format(hw_params));
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+
+	return 0;
 }
 static int
 pcm_playback_hw_params(struct snd_pcm_substream *substream,
 		       struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_bebob *bebob = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
 		atomic_inc(&bebob->substreams_counter);
 	amdtp_stream_set_pcm_format(&bebob->rx_stream,
 				    params_format(hw_params));
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+
+	return 0;
 }
 
 static int
diff --git a/sound/firewire/dice/dice-pcm.c b/sound/firewire/dice/dice-pcm.c
index f7771451..4e67b1d 100644
--- a/sound/firewire/dice/dice-pcm.c
+++ b/sound/firewire/dice/dice-pcm.c
@@ -230,6 +230,12 @@
 			     struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_dice *dice = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
 		mutex_lock(&dice->mutex);
@@ -240,13 +246,18 @@
 	amdtp_stream_set_pcm_format(&dice->tx_stream,
 				    params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 static int playback_hw_params(struct snd_pcm_substream *substream,
 			      struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_dice *dice = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
 		mutex_lock(&dice->mutex);
@@ -257,8 +268,7 @@
 	amdtp_stream_set_pcm_format(&dice->rx_stream,
 				    params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 
 static int capture_hw_free(struct snd_pcm_substream *substream)
diff --git a/sound/firewire/fireworks/fireworks_pcm.c b/sound/firewire/fireworks/fireworks_pcm.c
index 8a34753..c30b2ff 100644
--- a/sound/firewire/fireworks/fireworks_pcm.c
+++ b/sound/firewire/fireworks/fireworks_pcm.c
@@ -244,25 +244,35 @@
 				 struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_efw *efw = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
 		atomic_inc(&efw->capture_substreams);
 	amdtp_stream_set_pcm_format(&efw->tx_stream, params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
 				  struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_efw *efw = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
 		atomic_inc(&efw->playback_substreams);
 	amdtp_stream_set_pcm_format(&efw->rx_stream, params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 
 static int pcm_capture_hw_free(struct snd_pcm_substream *substream)
diff --git a/sound/firewire/oxfw/oxfw-pcm.c b/sound/firewire/oxfw/oxfw-pcm.c
index 67ade07..9c73930 100644
--- a/sound/firewire/oxfw/oxfw-pcm.c
+++ b/sound/firewire/oxfw/oxfw-pcm.c
@@ -231,7 +231,12 @@
 				 struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_oxfw *oxfw = substream->private_data;
+	int err;
 
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
 		mutex_lock(&oxfw->mutex);
@@ -241,13 +246,18 @@
 
 	amdtp_stream_set_pcm_format(&oxfw->tx_stream, params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 static int pcm_playback_hw_params(struct snd_pcm_substream *substream,
 				  struct snd_pcm_hw_params *hw_params)
 {
 	struct snd_oxfw *oxfw = substream->private_data;
+	int err;
+
+	err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+					       params_buffer_bytes(hw_params));
+	if (err < 0)
+		return err;
 
 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN) {
 		mutex_lock(&oxfw->mutex);
@@ -257,8 +267,7 @@
 
 	amdtp_stream_set_pcm_format(&oxfw->rx_stream, params_format(hw_params));
 
-	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
-						params_buffer_bytes(hw_params));
+	return 0;
 }
 
 static int pcm_capture_hw_free(struct snd_pcm_substream *substream)
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index 873d40f..77ad5b9 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -512,12 +512,11 @@
 	if (err < 0)
 		goto end;
 
-	formats[eid] = kmalloc(*len, GFP_KERNEL);
+	formats[eid] = kmemdup(buf, *len, GFP_KERNEL);
 	if (formats[eid] == NULL) {
 		err = -ENOMEM;
 		goto end;
 	}
-	memcpy(formats[eid], buf, *len);
 
 	/* apply the format for each available sampling rate */
 	for (i = 0; i < ARRAY_SIZE(oxfw_rate_table); i++) {
@@ -531,12 +530,11 @@
 			continue;
 
 		eid++;
-		formats[eid] = kmalloc(*len, GFP_KERNEL);
+		formats[eid] = kmemdup(buf, *len, GFP_KERNEL);
 		if (formats[eid] == NULL) {
 			err = -ENOMEM;
 			goto end;
 		}
-		memcpy(formats[eid], buf, *len);
 		formats[eid][2] = avc_stream_rate_table[i];
 	}
 
@@ -594,12 +592,11 @@
 		if (err < 0)
 			break;
 
-		formats[eid] = kmalloc(len, GFP_KERNEL);
+		formats[eid] = kmemdup(buf, len, GFP_KERNEL);
 		if (formats[eid] == NULL) {
 			err = -ENOMEM;
 			break;
 		}
-		memcpy(formats[eid], buf, len);
 
 		/* get next entry */
 		len = AVC_GENERIC_FRAME_MAXIMUM_BYTES;
diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c
index 0aa5d9e..4449d1a9 100644
--- a/sound/hda/ext/hdac_ext_bus.c
+++ b/sound/hda/ext/hdac_ext_bus.c
@@ -125,7 +125,7 @@
 }
 
 /**
- * snd_hdac_ext_device_init - initialize the HDA extended codec base device
+ * snd_hdac_ext_bus_device_init - initialize the HDA extended codec base device
  * @ebus: hdac extended bus to attach to
  * @addr: codec address
  *
@@ -133,14 +133,16 @@
  */
 int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *ebus, int addr)
 {
+	struct hdac_ext_device *edev;
 	struct hdac_device *hdev = NULL;
 	struct hdac_bus *bus = ebus_to_hbus(ebus);
 	char name[15];
 	int ret;
 
-	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
-	if (!hdev)
+	edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+	if (!edev)
 		return -ENOMEM;
+	hdev = &edev->hdac;
 
 	snprintf(name, sizeof(name), "ehdaudio%dD%d", ebus->idx, addr);
 
@@ -158,6 +160,7 @@
 		snd_hdac_ext_bus_device_exit(hdev);
 		return ret;
 	}
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_init);
@@ -168,7 +171,94 @@
  */
 void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev)
 {
+	struct hdac_ext_device *edev = to_ehdac_device(hdev);
+
 	snd_hdac_device_exit(hdev);
-	kfree(hdev);
+	kfree(edev);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_exit);
+
+/**
+ * snd_hdac_ext_bus_device_remove - remove HD-audio extended codec base devices
+ *
+ * @ebus: HD-audio extended bus
+ */
+void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus)
+{
+	struct hdac_device *codec, *__codec;
+	/*
+	 * we need to remove all the codec devices objects created in the
+	 * snd_hdac_ext_bus_device_init
+	 */
+	list_for_each_entry_safe(codec, __codec, &ebus->bus.codec_list, list) {
+		snd_hdac_device_unregister(codec);
+		put_device(&codec->dev);
+	}
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_device_remove);
+#define dev_to_hdac(dev) (container_of((dev), \
+			struct hdac_device, dev))
+
+static inline struct hdac_ext_driver *get_edrv(struct device *dev)
+{
+	struct hdac_driver *hdrv = drv_to_hdac_driver(dev->driver);
+	struct hdac_ext_driver *edrv = to_ehdac_driver(hdrv);
+
+	return edrv;
+}
+
+static inline struct hdac_ext_device *get_edev(struct device *dev)
+{
+	struct hdac_device *hdev = dev_to_hdac_dev(dev);
+	struct hdac_ext_device *edev = to_ehdac_device(hdev);
+
+	return edev;
+}
+
+static int hda_ext_drv_probe(struct device *dev)
+{
+	return (get_edrv(dev))->probe(get_edev(dev));
+}
+
+static int hdac_ext_drv_remove(struct device *dev)
+{
+	return (get_edrv(dev))->remove(get_edev(dev));
+}
+
+static void hdac_ext_drv_shutdown(struct device *dev)
+{
+	return (get_edrv(dev))->shutdown(get_edev(dev));
+}
+
+/**
+ * snd_hda_ext_driver_register - register a driver for ext hda devices
+ *
+ * @drv: ext hda driver structure
+ */
+int snd_hda_ext_driver_register(struct hdac_ext_driver *drv)
+{
+	drv->hdac.type = HDA_DEV_ASOC;
+	drv->hdac.driver.bus = &snd_hda_bus_type;
+	/* we use default match */
+
+	if (drv->probe)
+		drv->hdac.driver.probe = hda_ext_drv_probe;
+	if (drv->remove)
+		drv->hdac.driver.remove = hdac_ext_drv_remove;
+	if (drv->shutdown)
+		drv->hdac.driver.shutdown = hdac_ext_drv_shutdown;
+
+	return driver_register(&drv->hdac.driver);
+}
+EXPORT_SYMBOL_GPL(snd_hda_ext_driver_register);
+
+/**
+ * snd_hda_ext_driver_unregister - unregister a driver for ext hda devices
+ *
+ * @drv: ext hda driver structure
+ */
+void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv)
+{
+	driver_unregister(&drv->hdac.driver);
+}
+EXPORT_SYMBOL_GPL(snd_hda_ext_driver_unregister);
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index 358f161..63215b1 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -177,8 +177,8 @@
 		hlink->bus = bus;
 		hlink->ml_addr = ebus->mlcap + AZX_ML_BASE +
 					(AZX_ML_INTERVAL * idx);
-		hlink->lcaps  = snd_hdac_chip_readl(bus, ML_LCAP);
-		hlink->lsdiid = snd_hdac_chip_readw(bus, ML_LSDIID);
+		hlink->lcaps  = readl(hlink->ml_addr + AZX_REG_ML_LCAP);
+		hlink->lsdiid = readw(hlink->ml_addr + AZX_REG_ML_LSDIID);
 
 		list_add_tail(&hlink->list, &ebus->hlink_list);
 	}
@@ -243,7 +243,7 @@
 	timeout = 50;
 
 	do {
-		val = snd_hdac_chip_readl(link->bus, ML_LCTL);
+		val = readl(link->ml_addr + AZX_REG_ML_LCTL);
 		if (enable) {
 			if (((val & mask) >> AZX_MLCTL_CPA))
 				return 0;
@@ -263,7 +263,7 @@
  */
 int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link)
 {
-	snd_hdac_chip_updatel(link->bus, ML_LCTL, 0, AZX_MLCTL_SPA);
+	snd_hdac_updatel(link->ml_addr, AZX_REG_ML_LCTL, 0, AZX_MLCTL_SPA);
 
 	return check_hdac_link_power_active(link, true);
 }
@@ -275,8 +275,28 @@
  */
 int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link)
 {
-	snd_hdac_chip_updatel(link->bus, ML_LCTL, AZX_MLCTL_SPA, 0);
+	snd_hdac_updatel(link->ml_addr, AZX_REG_ML_LCTL, AZX_MLCTL_SPA, 0);
 
 	return check_hdac_link_power_active(link, false);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down);
+
+/**
+ * snd_hdac_ext_bus_link_power_down_all -power down all hda link
+ * @ebus: HD-audio extended bus
+ */
+int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus)
+{
+	struct hdac_ext_link *hlink = NULL;
+	int ret;
+
+	list_for_each_entry(hlink, &ebus->hlink_list, list) {
+		snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL, AZX_MLCTL_SPA, 0);
+		ret = check_hdac_link_power_active(hlink, false);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down_all);
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index 3de47dd..33ba77d 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -49,6 +49,16 @@
 				AZX_PPLC_INTERVAL * idx;
 	}
 
+	if (ebus->spbcap) {
+		stream->spib_addr = ebus->spbcap + AZX_SPB_BASE +
+					AZX_SPB_INTERVAL * idx +
+					AZX_SPB_SPIB;
+
+		stream->fifo_addr = ebus->spbcap + AZX_SPB_BASE +
+					AZX_SPB_INTERVAL * idx +
+					AZX_SPB_MAXFIFO;
+	}
+
 	stream->decoupled = false;
 	snd_hdac_stream_init(bus, &stream->hstream, idx, direction, tag);
 }
@@ -281,17 +291,12 @@
 	struct hdac_ext_stream *res = NULL;
 	struct hdac_stream *stream = NULL;
 	struct hdac_bus *hbus = &ebus->bus;
-	int key;
 
 	if (!ebus->ppcap) {
 		dev_err(hbus->dev, "stream type not supported\n");
 		return NULL;
 	}
 
-	/* make a non-zero unique key for the substream */
-	key = (substream->pcm->device << 16) | (substream->number << 2) |
-			(substream->stream + 1);
-
 	list_for_each_entry(stream, &hbus->stream_list, list) {
 		struct hdac_ext_stream *hstream = container_of(stream,
 						struct hdac_ext_stream,
@@ -310,7 +315,6 @@
 		spin_lock_irq(&hbus->reg_lock);
 		res->hstream.opened = 1;
 		res->hstream.running = 0;
-		res->hstream.assigned_key = key;
 		res->hstream.substream = substream;
 		spin_unlock_irq(&hbus->reg_lock);
 	}
@@ -423,7 +427,7 @@
 
 	mask |= (1 << index);
 
-	register_mask = snd_hdac_chip_readl(bus, SPB_SPBFCCTL);
+	register_mask = readl(ebus->spbcap + AZX_REG_SPB_SPBFCCTL);
 
 	mask |= register_mask;
 
@@ -435,6 +439,50 @@
 EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_spbcap_enable);
 
 /**
+ * snd_hdac_ext_stream_set_spib - sets the spib value of a stream
+ * @ebus: HD-audio ext core bus
+ * @stream: hdac_ext_stream
+ * @value: spib value to set
+ */
+int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
+				 struct hdac_ext_stream *stream, u32 value)
+{
+	struct hdac_bus *bus = &ebus->bus;
+
+	if (!ebus->spbcap) {
+		dev_err(bus->dev, "Address of SPB capability is NULL");
+		return -EINVAL;
+	}
+
+	writel(value, stream->spib_addr);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_set_spib);
+
+/**
+ * snd_hdac_ext_stream_get_spbmaxfifo - gets the spib value of a stream
+ * @ebus: HD-audio ext core bus
+ * @stream: hdac_ext_stream
+ *
+ * Return maxfifo for the stream
+ */
+int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
+				 struct hdac_ext_stream *stream)
+{
+	struct hdac_bus *bus = &ebus->bus;
+
+	if (!ebus->spbcap) {
+		dev_err(bus->dev, "Address of SPB capability is NULL");
+		return -EINVAL;
+	}
+
+	return readl(stream->fifo_addr);
+}
+EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_get_spbmaxfifo);
+
+
+/**
  * snd_hdac_ext_stop_streams - stop all stream if running
  * @ebus: HD-audio ext core bus
  */
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index cdee710..db96042 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -372,6 +372,36 @@
 }
 EXPORT_SYMBOL_GPL(snd_hdac_refresh_widgets);
 
+/**
+ * snd_hdac_refresh_widget_sysfs - Reset the codec widgets and reinit the
+ * codec sysfs
+ * @codec: the codec object
+ *
+ * first we need to remove sysfs, then refresh widgets and lastly
+ * recreate it
+ */
+int snd_hdac_refresh_widget_sysfs(struct hdac_device *codec)
+{
+	int ret;
+
+	if (device_is_registered(&codec->dev))
+		hda_widget_sysfs_exit(codec);
+	ret = snd_hdac_refresh_widgets(codec);
+	if (ret) {
+		dev_err(&codec->dev, "failed to refresh widget: %d\n", ret);
+		return ret;
+	}
+	if (device_is_registered(&codec->dev)) {
+		ret = hda_widget_sysfs_init(codec);
+		if (ret) {
+			dev_err(&codec->dev, "failed to init sysfs: %d\n", ret);
+			return ret;
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_refresh_widget_sysfs);
+
 /* return CONNLIST_LEN parameter of the given widget */
 static unsigned int get_num_conns(struct hdac_device *codec, hda_nid_t nid)
 {
@@ -501,23 +531,27 @@
  * This function calls the runtime PM helper to power up the given codec.
  * Unlike snd_hdac_power_up_pm(), you should call this only for the code
  * path that isn't included in PM path.  Otherwise it gets stuck.
+ *
+ * Returns zero if successful, or a negative error code.
  */
-void snd_hdac_power_up(struct hdac_device *codec)
+int snd_hdac_power_up(struct hdac_device *codec)
 {
-	pm_runtime_get_sync(&codec->dev);
+	return pm_runtime_get_sync(&codec->dev);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_power_up);
 
 /**
  * snd_hdac_power_down - power down the codec
  * @codec: the codec object
+ *
+ * Returns zero if successful, or a negative error code.
  */
-void snd_hdac_power_down(struct hdac_device *codec)
+int snd_hdac_power_down(struct hdac_device *codec)
 {
 	struct device *dev = &codec->dev;
 
 	pm_runtime_mark_last_busy(dev);
-	pm_runtime_put_autosuspend(dev);
+	return pm_runtime_put_autosuspend(dev);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_power_down);
 
@@ -529,11 +563,14 @@
  * which may be called by PM suspend/resume again.  OTOH, if a power-up
  * call must wake up the sleeper (e.g. in a kctl callback), use
  * snd_hdac_power_up() instead.
+ *
+ * Returns zero if successful, or a negative error code.
  */
-void snd_hdac_power_up_pm(struct hdac_device *codec)
+int snd_hdac_power_up_pm(struct hdac_device *codec)
 {
 	if (!atomic_inc_not_zero(&codec->in_pm))
-		snd_hdac_power_up(codec);
+		return snd_hdac_power_up(codec);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
 
@@ -543,11 +580,14 @@
  *
  * Like snd_hdac_power_up_pm(), this function is used in a recursive
  * code path like init code which may be called by PM suspend/resume again.
+ *
+ * Returns zero if successful, or a negative error code.
  */
-void snd_hdac_power_down_pm(struct hdac_device *codec)
+int snd_hdac_power_down_pm(struct hdac_device *codec)
 {
 	if (atomic_dec_if_positive(&codec->in_pm) < 0)
-		snd_hdac_power_down(codec);
+		return snd_hdac_power_down(codec);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_hdac_power_down_pm);
 #endif
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 5676b84..55c3df4 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -134,6 +134,16 @@
 	return !strcmp(dev->driver->name, "i915");
 }
 
+int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops)
+{
+	if (WARN_ON(!hdac_acomp))
+		return -ENODEV;
+
+	hdac_acomp->audio_ops = aops;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_i915_register_notifier);
+
 int snd_hdac_i915_init(struct hdac_bus *bus)
 {
 	struct component_match *match = NULL;
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
index 1eabcdf..b0ed870 100644
--- a/sound/hda/hdac_regmap.c
+++ b/sound/hda/hdac_regmap.c
@@ -410,8 +410,9 @@
 
 	err = reg_raw_write(codec, reg, val);
 	if (err == -EAGAIN) {
-		snd_hdac_power_up_pm(codec);
-		err = reg_raw_write(codec, reg, val);
+		err = snd_hdac_power_up_pm(codec);
+		if (!err)
+			err = reg_raw_write(codec, reg, val);
 		snd_hdac_power_down_pm(codec);
 	}
 	return err;
@@ -442,8 +443,9 @@
 
 	err = reg_raw_read(codec, reg, val);
 	if (err == -EAGAIN) {
-		snd_hdac_power_up_pm(codec);
-		err = reg_raw_read(codec, reg, val);
+		err = snd_hdac_power_up_pm(codec);
+		if (!err)
+			err = reg_raw_read(codec, reg, val);
 		snd_hdac_power_down_pm(codec);
 	}
 	return err;
diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
index 4c15d0a..8981159 100644
--- a/sound/hda/hdac_stream.c
+++ b/sound/hda/hdac_stream.c
@@ -286,6 +286,28 @@
 }
 EXPORT_SYMBOL_GPL(snd_hdac_stream_release);
 
+/**
+ * snd_hdac_get_stream - return hdac_stream based on stream_tag and
+ * direction
+ *
+ * @bus: HD-audio core bus
+ * @dir: direction for the stream to be found
+ * @stream_tag: stream tag for stream to be found
+ */
+struct hdac_stream *snd_hdac_get_stream(struct hdac_bus *bus,
+					int dir, int stream_tag)
+{
+	struct hdac_stream *s;
+
+	list_for_each_entry(s, &bus->stream_list, list) {
+		if (s->direction == dir && s->stream_tag == stream_tag)
+			return s;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(snd_hdac_get_stream);
+
 /*
  * set up a BDL entry
  */
diff --git a/sound/hda/hdac_sysfs.c b/sound/hda/hdac_sysfs.c
index 0a6ce3b..c71142d 100644
--- a/sound/hda/hdac_sysfs.c
+++ b/sound/hda/hdac_sysfs.c
@@ -321,8 +321,7 @@
 			free_widget_node(*p, &widget_node_group);
 		kfree(tree->nodes);
 	}
-	if (tree->root)
-		kobject_put(tree->root);
+	kobject_put(tree->root);
 	kfree(tree);
 	codec->widgets = NULL;
 }
@@ -391,6 +390,9 @@
 {
 	int err;
 
+	if (codec->widgets)
+		return 0; /* already created */
+
 	err = widget_tree_create(codec);
 	if (err < 0) {
 		widget_tree_free(codec);
diff --git a/sound/pci/echoaudio/darla20_dsp.c b/sound/pci/echoaudio/darla20_dsp.c
index febee5b..320837b 100644
--- a/sound/pci/echoaudio/darla20_dsp.c
+++ b/sound/pci/echoaudio/darla20_dsp.c
@@ -44,18 +44,18 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_DARLA20_DSP;
 	chip->spdif_status = GD_SPDIF_STATUS_UNDEF;
 	chip->clock_state = GD_CLOCK_UNDEF;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/darla24_dsp.c b/sound/pci/echoaudio/darla24_dsp.c
index 7b4f6fd..8736b5e 100644
--- a/sound/pci/echoaudio/darla24_dsp.c
+++ b/sound/pci/echoaudio/darla24_dsp.c
@@ -44,17 +44,17 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_DARLA24_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
 		ECHO_CLOCK_BIT_ESYNC;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/echo3g_dsp.c b/sound/pci/echoaudio/echo3g_dsp.c
index ae11ce1..6deb80c 100644
--- a/sound/pci/echoaudio/echo3g_dsp.c
+++ b/sound/pci/echoaudio/echo3g_dsp.c
@@ -59,8 +59,8 @@
 		cpu_to_le32((E3G_MAGIC_NUMBER / 48000) - 2);
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
-	chip->has_midi = TRUE;
+	chip->bad_board = true;
+	chip->has_midi = true;
 	chip->dsp_code_to_load = FW_ECHO3G_DSP;
 
 	/* Load the DSP code and the ASIC on the PCI card and get
@@ -78,8 +78,8 @@
 		chip->px_analog_in = chip->bx_analog_in = 14;
 		chip->px_digital_in = chip->bx_digital_in = 16;
 		chip->px_num = chip->bx_num = 24;
-		chip->has_phantom_power = TRUE;
-		chip->hasnt_input_nominal_level = TRUE;
+		chip->has_phantom_power = true;
+		chip->hasnt_input_nominal_level = true;
 	} else if (err == E3G_LAYLA3G_BOX_TYPE) {
 		chip->input_clock_types =	ECHO_CLOCK_BIT_INTERNAL |
 						ECHO_CLOCK_BIT_SPDIF |
@@ -106,10 +106,10 @@
 static int set_mixer_defaults(struct echoaudio *chip)
 {
 	chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
-	chip->professional_spdif = FALSE;
-	chip->non_audio_spdif = FALSE;
-	chip->bad_board = FALSE;
-	chip->phantom_power = FALSE;
+	chip->professional_spdif = false;
+	chip->non_audio_spdif = false;
+	chip->bad_board = false;
+	chip->phantom_power = false;
 	return init_line_levels(chip);
 }
 
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 862db9a..1cb85ae 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -2245,7 +2245,7 @@
 
 #ifdef ECHOCARD_HAS_MIDI
 	if (chip->midi_input_enabled)
-		enable_midi_input(chip, TRUE);
+		enable_midi_input(chip, true);
 	if (chip->midi_out)
 		snd_echo_midi_output_trigger(chip->midi_out, 1);
 #endif
diff --git a/sound/pci/echoaudio/echoaudio.h b/sound/pci/echoaudio/echoaudio.h
index 3251522..152ce15 100644
--- a/sound/pci/echoaudio/echoaudio.h
+++ b/sound/pci/echoaudio/echoaudio.h
@@ -153,9 +153,6 @@
 #define _ECHOAUDIO_H_
 
 
-#define TRUE 1
-#define FALSE 0
-
 #include "echoaudio_dsp.h"
 
 
@@ -378,8 +375,8 @@
 					 */
 	u8 output_clock;		/* Layla20 only */
 	char meters_enabled;		/* VU-meters status */
-	char asic_loaded;		/* Set TRUE when ASIC loaded */
-	char bad_board;			/* Set TRUE if DSP won't load */
+	char asic_loaded;		/* Set true when ASIC loaded */
+	char bad_board;			/* Set true if DSP won't load */
 	char professional_spdif;	/* 0 = consumer; 1 = professional */
 	char non_audio_spdif;		/* 3G - only */
 	char digital_in_automute;	/* Gina24, Layla24, Mona - only */
diff --git a/sound/pci/echoaudio/echoaudio_3g.c b/sound/pci/echoaudio/echoaudio_3g.c
index 2fa66dc..22c786b 100644
--- a/sound/pci/echoaudio/echoaudio_3g.c
+++ b/sound/pci/echoaudio/echoaudio_3g.c
@@ -41,7 +41,7 @@
 		return -EIO;
 
 	chip->comm_page->ext_box_status = cpu_to_le32(E3G_ASIC_NOT_LOADED);
-	chip->asic_loaded = FALSE;
+	chip->asic_loaded = false;
 	clear_handshake(chip);
 	send_vector(chip, DSP_VC_TEST_ASIC);
 
@@ -55,7 +55,7 @@
 	if (box_status == E3G_ASIC_NOT_LOADED)
 		return -ENODEV;
 
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	return box_status & E3G_BOX_TYPE_MASK;
 }
 
@@ -243,7 +243,7 @@
 	 * 48 kHz, internal clock, S/PDIF RCA mode */
 	if (box_type >= 0) {
 		err = write_control_reg(chip, E3G_48KHZ,
-					E3G_FREQ_REG_DEFAULT, TRUE);
+					E3G_FREQ_REG_DEFAULT, true);
 		if (err < 0)
 			return err;
 	}
@@ -378,16 +378,16 @@
 	int err, incompatible_clock;
 
 	/* Set clock to "internal" if it's not compatible with the new mode */
-	incompatible_clock = FALSE;
+	incompatible_clock = false;
 	switch (mode) {
 	case DIGITAL_MODE_SPDIF_OPTICAL:
 	case DIGITAL_MODE_SPDIF_RCA:
 		if (chip->input_clock == ECHO_CLOCK_ADAT)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	case DIGITAL_MODE_ADAT:
 		if (chip->input_clock == ECHO_CLOCK_SPDIF)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	default:
 		dev_err(chip->card->dev,
diff --git a/sound/pci/echoaudio/echoaudio_dsp.c b/sound/pci/echoaudio/echoaudio_dsp.c
index 1a9427a..15aae2f 100644
--- a/sound/pci/echoaudio/echoaudio_dsp.c
+++ b/sound/pci/echoaudio/echoaudio_dsp.c
@@ -103,8 +103,8 @@
 		cond_resched();
 	}
 
-	chip->bad_board = TRUE;		/* Set TRUE until DSP re-loaded */
-	dev_dbg(chip->card->dev, "write_dsp: Set bad_board to TRUE\n");
+	chip->bad_board = true;		/* Set true until DSP re-loaded */
+	dev_dbg(chip->card->dev, "write_dsp: Set bad_board to true\n");
 	return -EIO;
 }
 
@@ -126,8 +126,8 @@
 		cond_resched();
 	}
 
-	chip->bad_board = TRUE;		/* Set TRUE until DSP re-loaded */
-	dev_err(chip->card->dev, "read_dsp: Set bad_board to TRUE\n");
+	chip->bad_board = true;		/* Set true until DSP re-loaded */
+	dev_err(chip->card->dev, "read_dsp: Set bad_board to true\n");
 	return -EIO;
 }
 
@@ -166,7 +166,7 @@
 /* This card has no ASIC, just return ok */
 static inline int check_asic_status(struct echoaudio *chip)
 {
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	return 0;
 }
 
@@ -341,11 +341,11 @@
 		dev_warn(chip->card->dev, "DSP is already loaded!\n");
 		return 0;
 	}
-	chip->bad_board = TRUE;		/* Set TRUE until DSP loaded */
+	chip->bad_board = true;		/* Set true until DSP loaded */
 	chip->dsp_code = NULL;		/* Current DSP code not loaded */
-	chip->asic_loaded = FALSE;	/* Loading the DSP code will reset the ASIC */
+	chip->asic_loaded = false;	/* Loading the DSP code will reset the ASIC */
 
-	dev_dbg(chip->card->dev, "load_dsp: Set bad_board to TRUE\n");
+	dev_dbg(chip->card->dev, "load_dsp: Set bad_board to true\n");
 
 	/* If this board requires a resident loader, install it. */
 #ifdef DSP_56361
@@ -471,7 +471,7 @@
 			}
 
 			chip->dsp_code = code;		/* Show which DSP code loaded */
-			chip->bad_board = FALSE;	/* DSP OK */
+			chip->bad_board = false;	/* DSP OK */
 			return 0;
 		}
 		udelay(100);
@@ -951,10 +951,10 @@
 	/* Stops all active pipes (just to be sure) */
 	stop_transport(chip, chip->active_mask);
 
-	set_meters_on(chip, FALSE);
+	set_meters_on(chip, false);
 
 #ifdef ECHOCARD_HAS_MIDI
-	enable_midi_input(chip, FALSE);
+	enable_midi_input(chip, false);
 #endif
 
 	/* Go to sleep */
@@ -981,9 +981,9 @@
 
 	/* Init all the basic stuff */
 	chip->card_name = ECHOCARD_NAME;
-	chip->bad_board = TRUE;	/* Set TRUE until DSP loaded */
+	chip->bad_board = true;	/* Set true until DSP loaded */
 	chip->dsp_code = NULL;	/* Current DSP code not loaded */
-	chip->asic_loaded = FALSE;
+	chip->asic_loaded = false;
 	memset(chip->comm_page, 0, sizeof(struct comm_page));
 
 	/* Init the comm page */
diff --git a/sound/pci/echoaudio/echoaudio_gml.c b/sound/pci/echoaudio/echoaudio_gml.c
index 23a0994..834b39e 100644
--- a/sound/pci/echoaudio/echoaudio_gml.c
+++ b/sound/pci/echoaudio/echoaudio_gml.c
@@ -48,7 +48,7 @@
 	if (read_dsp(chip, &asic_status) < 0) {
 		dev_err(chip->card->dev,
 			"check_asic_status: failed on read_dsp\n");
-		chip->asic_loaded = FALSE;
+		chip->asic_loaded = false;
 		return -EIO;
 	}
 
@@ -192,7 +192,7 @@
 		}
 	}
 
-	if ((err = write_control_reg(chip, control_reg, FALSE)))
+	if ((err = write_control_reg(chip, control_reg, false)))
 		return err;
 	chip->professional_spdif = prof;
 	dev_dbg(chip->card->dev, "set_professional_spdif to %s\n",
diff --git a/sound/pci/echoaudio/gina20.c b/sound/pci/echoaudio/gina20.c
index 4fa32a2..67bd0c9 100644
--- a/sound/pci/echoaudio/gina20.c
+++ b/sound/pci/echoaudio/gina20.c
@@ -23,7 +23,7 @@
 #define ECHOCARD_HAS_INPUT_GAIN
 #define ECHOCARD_HAS_DIGITAL_IO
 #define ECHOCARD_HAS_EXTERNAL_CLOCK
-#define ECHOCARD_HAS_ADAT	FALSE
+#define ECHOCARD_HAS_ADAT	false
 
 /* Pipe indexes */
 #define PX_ANALOG_OUT	0	/* 8 */
diff --git a/sound/pci/echoaudio/gina20_dsp.c b/sound/pci/echoaudio/gina20_dsp.c
index 5dafe92..b237757 100644
--- a/sound/pci/echoaudio/gina20_dsp.c
+++ b/sound/pci/echoaudio/gina20_dsp.c
@@ -48,19 +48,19 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_GINA20_DSP;
 	chip->spdif_status = GD_SPDIF_STATUS_UNDEF;
 	chip->clock_state = GD_CLOCK_UNDEF;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
 		ECHO_CLOCK_BIT_SPDIF;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
@@ -69,7 +69,7 @@
 
 static int set_mixer_defaults(struct echoaudio *chip)
 {
-	chip->professional_spdif = FALSE;
+	chip->professional_spdif = false;
 	return init_line_levels(chip);
 }
 
diff --git a/sound/pci/echoaudio/gina24_dsp.c b/sound/pci/echoaudio/gina24_dsp.c
index 6971766..8eff2b4 100644
--- a/sound/pci/echoaudio/gina24_dsp.c
+++ b/sound/pci/echoaudio/gina24_dsp.c
@@ -52,7 +52,7 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->input_clock_types =
 		ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
 		ECHO_CLOCK_BIT_ESYNC | ECHO_CLOCK_BIT_ESYNC96 |
@@ -76,7 +76,7 @@
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
@@ -86,8 +86,8 @@
 static int set_mixer_defaults(struct echoaudio *chip)
 {
 	chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
-	chip->professional_spdif = FALSE;
-	chip->digital_in_automute = TRUE;
+	chip->professional_spdif = false;
+	chip->digital_in_automute = true;
 	return init_line_levels(chip);
 }
 
@@ -152,7 +152,7 @@
 	   48 kHz, internal clock, S/PDIF RCA mode */
 	if (!err) {
 		control_reg = GML_CONVERTER_ENABLE | GML_48KHZ;
-		err = write_control_reg(chip, control_reg, TRUE);
+		err = write_control_reg(chip, control_reg, true);
 	}
 	return err;
 }
@@ -226,7 +226,7 @@
 	chip->sample_rate = rate;
 	dev_dbg(chip->card->dev, "set_sample_rate: %d clock %d\n", rate, clock);
 
-	return write_control_reg(chip, control_reg, FALSE);
+	return write_control_reg(chip, control_reg, false);
 }
 
 
@@ -274,7 +274,7 @@
 	}
 
 	chip->input_clock = clock;
-	return write_control_reg(chip, control_reg, TRUE);
+	return write_control_reg(chip, control_reg, true);
 }
 
 
@@ -285,17 +285,17 @@
 	int err, incompatible_clock;
 
 	/* Set clock to "internal" if it's not compatible with the new mode */
-	incompatible_clock = FALSE;
+	incompatible_clock = false;
 	switch (mode) {
 	case DIGITAL_MODE_SPDIF_OPTICAL:
 	case DIGITAL_MODE_SPDIF_CDROM:
 	case DIGITAL_MODE_SPDIF_RCA:
 		if (chip->input_clock == ECHO_CLOCK_ADAT)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	case DIGITAL_MODE_ADAT:
 		if (chip->input_clock == ECHO_CLOCK_SPDIF)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	default:
 		dev_err(chip->card->dev,
@@ -333,7 +333,7 @@
 		break;
 	}
 
-	err = write_control_reg(chip, control_reg, TRUE);
+	err = write_control_reg(chip, control_reg, true);
 	spin_unlock_irq(&chip->lock);
 	if (err < 0)
 		return err;
diff --git a/sound/pci/echoaudio/indigo_dsp.c b/sound/pci/echoaudio/indigo_dsp.c
index 54edd67..c97dc83 100644
--- a/sound/pci/echoaudio/indigo_dsp.c
+++ b/sound/pci/echoaudio/indigo_dsp.c
@@ -49,16 +49,16 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/indigodj_dsp.c b/sound/pci/echoaudio/indigodj_dsp.c
index 2cf5cc0..2428b35 100644
--- a/sound/pci/echoaudio/indigodj_dsp.c
+++ b/sound/pci/echoaudio/indigodj_dsp.c
@@ -49,16 +49,16 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_DJ_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/indigodjx_dsp.c b/sound/pci/echoaudio/indigodjx_dsp.c
index 5252863..5fbd4a3 100644
--- a/sound/pci/echoaudio/indigodjx_dsp.c
+++ b/sound/pci/echoaudio/indigodjx_dsp.c
@@ -47,17 +47,17 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_DJX_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	err = load_firmware(chip);
 	if (err < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/indigoio_dsp.c b/sound/pci/echoaudio/indigoio_dsp.c
index 4e81787..79b68ba 100644
--- a/sound/pci/echoaudio/indigoio_dsp.c
+++ b/sound/pci/echoaudio/indigoio_dsp.c
@@ -49,16 +49,16 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_IO_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/indigoiox_dsp.c b/sound/pci/echoaudio/indigoiox_dsp.c
index 6de3f9b..1ae394e 100644
--- a/sound/pci/echoaudio/indigoiox_dsp.c
+++ b/sound/pci/echoaudio/indigoiox_dsp.c
@@ -47,17 +47,17 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_INDIGO_IOX_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL;
 
 	err = load_firmware(chip);
 	if (err < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/layla20.c b/sound/pci/echoaudio/layla20.c
index 12e5d21..fc8468d 100644
--- a/sound/pci/echoaudio/layla20.c
+++ b/sound/pci/echoaudio/layla20.c
@@ -26,7 +26,7 @@
 #define ECHOCARD_HAS_SUPER_INTERLEAVE
 #define ECHOCARD_HAS_DIGITAL_IO
 #define ECHOCARD_HAS_EXTERNAL_CLOCK
-#define ECHOCARD_HAS_ADAT	FALSE
+#define ECHOCARD_HAS_ADAT	false
 #define ECHOCARD_HAS_OUTPUT_CLOCK_SWITCH
 #define ECHOCARD_HAS_MIDI
 
diff --git a/sound/pci/echoaudio/layla20_dsp.c b/sound/pci/echoaudio/layla20_dsp.c
index f2024a3..5e5b6e2 100644
--- a/sound/pci/echoaudio/layla20_dsp.c
+++ b/sound/pci/echoaudio/layla20_dsp.c
@@ -51,8 +51,8 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
-	chip->has_midi = TRUE;
+	chip->bad_board = true;
+	chip->has_midi = true;
 	chip->dsp_code_to_load = FW_LAYLA20_DSP;
 	chip->input_clock_types =
 		ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
@@ -62,7 +62,7 @@
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
@@ -71,7 +71,7 @@
 
 static int set_mixer_defaults(struct echoaudio *chip)
 {
-	chip->professional_spdif = FALSE;
+	chip->professional_spdif = false;
 	return init_line_levels(chip);
 }
 
@@ -113,7 +113,7 @@
 	u32 asic_status;
 	int goodcnt, i;
 
-	chip->asic_loaded = FALSE;
+	chip->asic_loaded = false;
 	for (i = goodcnt = 0; i < 5; i++) {
 		send_vector(chip, DSP_VC_TEST_ASIC);
 
@@ -127,7 +127,7 @@
 
 		if (asic_status == ASIC_ALREADY_LOADED) {
 			if (++goodcnt == 3) {
-				chip->asic_loaded = TRUE;
+				chip->asic_loaded = true;
 				return 0;
 			}
 		}
diff --git a/sound/pci/echoaudio/layla24_dsp.c b/sound/pci/echoaudio/layla24_dsp.c
index 4f11e81..df28e51 100644
--- a/sound/pci/echoaudio/layla24_dsp.c
+++ b/sound/pci/echoaudio/layla24_dsp.c
@@ -51,8 +51,8 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
-	chip->has_midi = TRUE;
+	chip->bad_board = true;
+	chip->has_midi = true;
 	chip->dsp_code_to_load = FW_LAYLA24_DSP;
 	chip->input_clock_types =
 		ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
@@ -64,7 +64,7 @@
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	if ((err = init_line_levels(chip)) < 0)
 		return err;
@@ -77,8 +77,8 @@
 static int set_mixer_defaults(struct echoaudio *chip)
 {
 	chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
-	chip->professional_spdif = FALSE;
-	chip->digital_in_automute = TRUE;
+	chip->professional_spdif = false;
+	chip->digital_in_automute = true;
 	return init_line_levels(chip);
 }
 
@@ -135,7 +135,7 @@
 	err = load_asic_generic(chip, DSP_FNC_LOAD_LAYLA24_EXTERNAL_ASIC,
 				FW_LAYLA24_2S_ASIC);
 	if (err < 0)
-		return FALSE;
+		return false;
 
 	/* Now give the external ASIC a little time to set up */
 	mdelay(10);
@@ -147,7 +147,7 @@
 	   48 kHz, internal clock, S/PDIF RCA mode */
 	if (!err)
 		err = write_control_reg(chip, GML_CONVERTER_ENABLE | GML_48KHZ,
-					TRUE);
+					true);
 	
 	return err;
 }
@@ -241,7 +241,7 @@
 	dev_dbg(chip->card->dev,
 		"set_sample_rate: %d clock %d\n", rate, control_reg);
 
-	return write_control_reg(chip, control_reg, FALSE);
+	return write_control_reg(chip, control_reg, false);
 }
 
 
@@ -287,7 +287,7 @@
 	}
 
 	chip->input_clock = clock;
-	return write_control_reg(chip, control_reg, TRUE);
+	return write_control_reg(chip, control_reg, true);
 }
 
 
@@ -334,17 +334,17 @@
 	short asic;
 
 	/* Set clock to "internal" if it's not compatible with the new mode */
-	incompatible_clock = FALSE;
+	incompatible_clock = false;
 	switch (mode) {
 	case DIGITAL_MODE_SPDIF_OPTICAL:
 	case DIGITAL_MODE_SPDIF_RCA:
 		if (chip->input_clock == ECHO_CLOCK_ADAT)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		asic = FW_LAYLA24_2S_ASIC;
 		break;
 	case DIGITAL_MODE_ADAT:
 		if (chip->input_clock == ECHO_CLOCK_SPDIF)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		asic = FW_LAYLA24_2A_ASIC;
 		break;
 	default:
@@ -383,7 +383,7 @@
 		break;
 	}
 
-	err = write_control_reg(chip, control_reg, TRUE);
+	err = write_control_reg(chip, control_reg, true);
 	spin_unlock_irq(&chip->lock);
 	if (err < 0)
 		return err;
diff --git a/sound/pci/echoaudio/mia.c b/sound/pci/echoaudio/mia.c
index 2f7562f..62b5240 100644
--- a/sound/pci/echoaudio/mia.c
+++ b/sound/pci/echoaudio/mia.c
@@ -26,7 +26,7 @@
 #define ECHOCARD_HAS_VMIXER
 #define ECHOCARD_HAS_DIGITAL_IO
 #define ECHOCARD_HAS_EXTERNAL_CLOCK
-#define ECHOCARD_HAS_ADAT	FALSE
+#define ECHOCARD_HAS_ADAT	false
 #define ECHOCARD_HAS_STEREO_BIG_ENDIAN32
 #define ECHOCARD_HAS_MIDI
 #define ECHOCARD_HAS_LINE_OUT_GAIN
diff --git a/sound/pci/echoaudio/mia_dsp.c b/sound/pci/echoaudio/mia_dsp.c
index fdad079..8f612a0 100644
--- a/sound/pci/echoaudio/mia_dsp.c
+++ b/sound/pci/echoaudio/mia_dsp.c
@@ -52,19 +52,19 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->dsp_code_to_load = FW_MIA_DSP;
 	/* Since this card has no ASIC, mark it as loaded so everything
 	   works OK */
-	chip->asic_loaded = TRUE;
+	chip->asic_loaded = true;
 	if ((subdevice_id & 0x0000f) == MIA_MIDI_REV)
-		chip->has_midi = TRUE;
+		chip->has_midi = true;
 	chip->input_clock_types = ECHO_CLOCK_BIT_INTERNAL |
 		ECHO_CLOCK_BIT_SPDIF;
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
diff --git a/sound/pci/echoaudio/mona_dsp.c b/sound/pci/echoaudio/mona_dsp.c
index 843c7a5..dce9e57 100644
--- a/sound/pci/echoaudio/mona_dsp.c
+++ b/sound/pci/echoaudio/mona_dsp.c
@@ -52,7 +52,7 @@
 
 	chip->device_id = device_id;
 	chip->subdevice_id = subdevice_id;
-	chip->bad_board = TRUE;
+	chip->bad_board = true;
 	chip->input_clock_types =
 		ECHO_CLOCK_BIT_INTERNAL | ECHO_CLOCK_BIT_SPDIF |
 		ECHO_CLOCK_BIT_WORD | ECHO_CLOCK_BIT_ADAT;
@@ -69,7 +69,7 @@
 
 	if ((err = load_firmware(chip)) < 0)
 		return err;
-	chip->bad_board = FALSE;
+	chip->bad_board = false;
 
 	return err;
 }
@@ -79,8 +79,8 @@
 static int set_mixer_defaults(struct echoaudio *chip)
 {
 	chip->digital_mode = DIGITAL_MODE_SPDIF_RCA;
-	chip->professional_spdif = FALSE;
-	chip->digital_in_automute = TRUE;
+	chip->professional_spdif = false;
+	chip->digital_in_automute = true;
 	return init_line_levels(chip);
 }
 
@@ -148,7 +148,7 @@
 	   48 kHz, internal clock, S/PDIF RCA mode */
 	if (!err) {
 		control_reg = GML_CONVERTER_ENABLE | GML_48KHZ;
-		err = write_control_reg(chip, control_reg, TRUE);
+		err = write_control_reg(chip, control_reg, true);
 	}
 
 	return err;
@@ -356,7 +356,7 @@
 	}
 
 	chip->input_clock = clock;
-	return write_control_reg(chip, control_reg, TRUE);
+	return write_control_reg(chip, control_reg, true);
 }
 
 
@@ -367,16 +367,16 @@
 	int err, incompatible_clock;
 
 	/* Set clock to "internal" if it's not compatible with the new mode */
-	incompatible_clock = FALSE;
+	incompatible_clock = false;
 	switch (mode) {
 	case DIGITAL_MODE_SPDIF_OPTICAL:
 	case DIGITAL_MODE_SPDIF_RCA:
 		if (chip->input_clock == ECHO_CLOCK_ADAT)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	case DIGITAL_MODE_ADAT:
 		if (chip->input_clock == ECHO_CLOCK_SPDIF)
-			incompatible_clock = TRUE;
+			incompatible_clock = true;
 		break;
 	default:
 		dev_err(chip->card->dev,
@@ -415,7 +415,7 @@
 		break;
 	}
 
-	err = write_control_reg(chip, control_reg, FALSE);
+	err = write_control_reg(chip, control_reg, false);
 	spin_unlock_irq(&chip->lock);
 	if (err < 0)
 		return err;
diff --git a/sound/pci/emu10k1/emumixer.c b/sound/pci/emu10k1/emumixer.c
index 55e5716..076b117 100644
--- a/sound/pci/emu10k1/emumixer.c
+++ b/sound/pci/emu10k1/emumixer.c
@@ -1741,7 +1741,7 @@
 static struct snd_kcontrol_new snd_audigy_capture_boost =
 {
 	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name =		"Analog Capture Boost",
+	.name =		"Mic Extra Boost",
 	.info =		snd_audigy_capture_boost_info,
 	.get =		snd_audigy_capture_boost_get,
 	.put =		snd_audigy_capture_boost_put
@@ -1819,8 +1819,6 @@
 		 * the Philips ADC for 24bit capture */
 		"PCM Playback Switch",
 		"PCM Playback Volume",
-		"Master Mono Playback Switch",
-		"Master Mono Playback Volume",
 		"Master Playback Switch",
 		"Master Playback Volume",
 		"PCM Out Path & Mute",
@@ -1830,10 +1828,16 @@
 		"Capture Switch",
 		"Capture Volume",
 		"Mic Select",
+		"Headphone Playback Switch",
+		"Headphone Playback Volume",
+		"3D Control - Center",
+		"3D Control - Depth",
+		"3D Control - Switch",
 		"Video Playback Switch",
 		"Video Playback Volume",
 		"Mic Playback Switch",
 		"Mic Playback Volume",
+		"External Amplifier",
 		NULL
 	};
 	static char *audigy_rename_ctls[] = {
@@ -1842,6 +1846,8 @@
 		/* "Wave Capture Volume", "PCM Capture Volume", */
 		"Wave Master Playback Volume", "Master Playback Volume",
 		"AMic Playback Volume", "Mic Playback Volume",
+		"Master Mono Playback Switch", "Phone Output Playback Switch",
+		"Master Mono Playback Volume", "Phone Output Playback Volume",
 		NULL
 	};
 	static char *audigy_rename_ctls_i2c_adc[] = {
@@ -1867,8 +1873,6 @@
 		 * the Philips ADC for 24bit capture */
 		"PCM Playback Switch",
 		"PCM Playback Volume",
-		"Master Mono Playback Switch",
-		"Master Mono Playback Volume",
 		"Capture Source",
 		"Capture Switch",
 		"Capture Volume",
@@ -1900,7 +1904,8 @@
 		"Aux Playback Volume", "Aux Capture Volume",
 		"Video Playback Switch", "Video Capture Switch",
 		"Video Playback Volume", "Video Capture Volume",
-
+		"Master Mono Playback Switch", "Phone Output Playback Switch",
+		"Master Mono Playback Volume", "Phone Output Playback Volume",
 		NULL
 	};
 
@@ -1935,6 +1940,9 @@
 			snd_ac97_write_cache(emu->ac97, AC97_MASTER, 0x0000);
 			/* set capture source to mic */
 			snd_ac97_write_cache(emu->ac97, AC97_REC_SEL, 0x0000);
+			/* set mono output (TAD) to mic */
+			snd_ac97_update_bits(emu->ac97, AC97_GENERAL_PURPOSE,
+				0x0200, 0x0200);
 			if (emu->card_capabilities->adc_1361t)
 				c = audigy_remove_ctls_1361t_adc;
 			else 
@@ -1996,11 +2004,6 @@
 		rename_ctl(card, "Analog Mix Capture Volume", "Line2 Capture Volume");
 		rename_ctl(card, "Aux2 Capture Volume", "Line3 Capture Volume");
 		rename_ctl(card, "Mic Capture Volume", "Unknown1 Capture Volume");
-		remove_ctl(card, "Headphone Playback Switch");
-		remove_ctl(card, "Headphone Playback Volume");
-		remove_ctl(card, "3D Control - Center");
-		remove_ctl(card, "3D Control - Depth");
-		remove_ctl(card, "3D Control - Switch");
 	}
 	if ((kctl = emu->ctl_send_routing = snd_ctl_new1(&snd_emu10k1_send_routing_control, emu)) == NULL)
 		return -ENOMEM;
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 03b7399..7f57a14 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -887,11 +887,32 @@
 static bool pin_config_match(struct hda_codec *codec,
 			     const struct hda_pintbl *pins)
 {
-	for (; pins->nid; pins++) {
-		u32 def_conf = snd_hda_codec_get_pincfg(codec, pins->nid);
-		if (pins->val != def_conf)
+	int i;
+
+	for (i = 0; i < codec->init_pins.used; i++) {
+		struct hda_pincfg *pin = snd_array_elem(&codec->init_pins, i);
+		hda_nid_t nid = pin->nid;
+		u32 cfg = pin->cfg;
+		const struct hda_pintbl *t_pins;
+		int found;
+
+		t_pins = pins;
+		found = 0;
+		for (; t_pins->nid; t_pins++) {
+			if (t_pins->nid == nid) {
+				found = 1;
+				if (t_pins->val == cfg)
+					break;
+				else if ((cfg & 0xf0000000) == 0x40000000 && (t_pins->val & 0xf0000000) == 0x40000000)
+					break;
+				else
+					return false;
+			}
+		}
+		if (!found && (cfg & 0xf0000000) != 0x40000000)
 			return false;
 	}
+
 	return true;
 }
 
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 5de3c5d..37f43a1 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -53,76 +53,6 @@
 #define codec_has_clkstop(codec) \
 	((codec)->core.power_caps & AC_PWRST_CLKSTOP)
 
-/**
- * snd_hda_get_jack_location - Give a location string of the jack
- * @cfg: pin default config value
- *
- * Parse the pin default config value and returns the string of the
- * jack location, e.g. "Rear", "Front", etc.
- */
-const char *snd_hda_get_jack_location(u32 cfg)
-{
-	static char *bases[7] = {
-		"N/A", "Rear", "Front", "Left", "Right", "Top", "Bottom",
-	};
-	static unsigned char specials_idx[] = {
-		0x07, 0x08,
-		0x17, 0x18, 0x19,
-		0x37, 0x38
-	};
-	static char *specials[] = {
-		"Rear Panel", "Drive Bar",
-		"Riser", "HDMI", "ATAPI",
-		"Mobile-In", "Mobile-Out"
-	};
-	int i;
-	cfg = (cfg & AC_DEFCFG_LOCATION) >> AC_DEFCFG_LOCATION_SHIFT;
-	if ((cfg & 0x0f) < 7)
-		return bases[cfg & 0x0f];
-	for (i = 0; i < ARRAY_SIZE(specials_idx); i++) {
-		if (cfg == specials_idx[i])
-			return specials[i];
-	}
-	return "UNKNOWN";
-}
-EXPORT_SYMBOL_GPL(snd_hda_get_jack_location);
-
-/**
- * snd_hda_get_jack_connectivity - Give a connectivity string of the jack
- * @cfg: pin default config value
- *
- * Parse the pin default config value and returns the string of the
- * jack connectivity, i.e. external or internal connection.
- */
-const char *snd_hda_get_jack_connectivity(u32 cfg)
-{
-	static char *jack_locations[4] = { "Ext", "Int", "Sep", "Oth" };
-
-	return jack_locations[(cfg >> (AC_DEFCFG_LOCATION_SHIFT + 4)) & 3];
-}
-EXPORT_SYMBOL_GPL(snd_hda_get_jack_connectivity);
-
-/**
- * snd_hda_get_jack_type - Give a type string of the jack
- * @cfg: pin default config value
- *
- * Parse the pin default config value and returns the string of the
- * jack type, i.e. the purpose of the jack, such as Line-Out or CD.
- */
-const char *snd_hda_get_jack_type(u32 cfg)
-{
-	static char *jack_types[16] = {
-		"Line Out", "Speaker", "HP Out", "CD",
-		"SPDIF Out", "Digital Out", "Modem Line", "Modem Hand",
-		"Line In", "Aux", "Mic", "Telephony",
-		"SPDIF In", "Digital In", "Reserved", "Other"
-	};
-
-	return jack_types[(cfg & AC_DEFCFG_DEVICE)
-				>> AC_DEFCFG_DEVICE_SHIFT];
-}
-EXPORT_SYMBOL_GPL(snd_hda_get_jack_type);
-
 /*
  * Send and receive a verb - passed to exec_verb override for hdac_device
  */
@@ -975,7 +905,7 @@
 	if (codec->bus->modelname) {
 		codec->modelname = kstrdup(codec->bus->modelname, GFP_KERNEL);
 		if (!codec->modelname) {
-			err = -ENODEV;
+			err = -ENOMEM;
 			goto error;
 		}
 	}
@@ -1025,7 +955,7 @@
 	hda_nid_t fg;
 	int err;
 
-	err = snd_hdac_refresh_widgets(&codec->core);
+	err = snd_hdac_refresh_widget_sysfs(&codec->core);
 	if (err < 0)
 		return err;
 
@@ -3172,8 +3102,7 @@
 			struct snd_pcm_chmap *chmap;
 			const struct snd_pcm_chmap_elem *elem;
 
-			if (!pcm || pcm->own_chmap ||
-			    !hinfo->substreams)
+			if (!pcm->pcm || pcm->own_chmap || !hinfo->substreams)
 				continue;
 			elem = hinfo->chmap ? hinfo->chmap : snd_pcm_std_chmaps;
 			err = snd_pcm_add_chmap_ctls(pcm->pcm, str, elem,
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 12837ab..2970413 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -469,13 +469,6 @@
 }
 
 /*
- * get widget information
- */
-const char *snd_hda_get_jack_connectivity(u32 cfg);
-const char *snd_hda_get_jack_type(u32 cfg);
-const char *snd_hda_get_jack_location(u32 cfg);
-
-/*
  * power saving
  */
 #define snd_hda_power_up(codec)		snd_hdac_power_up(&(codec)->core)
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index c746cd9..563984d 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -42,7 +42,7 @@
 	CEA_EDID_VER_RESERVED	= 4,
 };
 
-static char *cea_speaker_allocation_names[] = {
+static const char * const cea_speaker_allocation_names[] = {
 	/*  0 */ "FL/FR",
 	/*  1 */ "LFE",
 	/*  2 */ "FC",
@@ -56,7 +56,7 @@
 	/* 10 */ "FCH",
 };
 
-static char *eld_connection_type_names[4] = {
+static const char * const eld_connection_type_names[4] = {
 	"HDMI",
 	"DisplayPort",
 	"2-reserved",
@@ -94,7 +94,7 @@
 	AUDIO_CODING_XTYPE_FIRST_RESERVED	= 4,
 };
 
-static char *cea_audio_coding_type_names[] = {
+static const char * const cea_audio_coding_type_names[] = {
 	/*  0 */ "undefined",
 	/*  1 */ "LPCM",
 	/*  2 */ "AC-3",
@@ -482,14 +482,14 @@
 	struct parsed_hdmi_eld *e = &eld->info;
 	char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE];
 	int i;
-	static char *eld_version_names[32] = {
+	static const char * const eld_version_names[32] = {
 		"reserved",
 		"reserved",
 		"CEA-861D or below",
 		[3 ... 30] = "reserved",
 		[31] = "partial"
 	};
-	static char *cea_edid_version_names[8] = {
+	static const char * const cea_edid_version_names[8] = {
 		"no CEA EDID Timing Extension block present",
 		"CEA-861",
 		"CEA-861-A",
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index b077bb6..24f9111 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -671,7 +671,8 @@
 		}
 		for (i = 0; i < path->depth; i++) {
 			if (path->path[i] == nid) {
-				if (dir == HDA_OUTPUT || path->idx[i] == idx)
+				if (dir == HDA_OUTPUT || idx == -1 ||
+				    path->idx[i] == idx)
 					return true;
 				break;
 			}
@@ -682,7 +683,7 @@
 
 /* check whether the NID is referred by any active paths */
 #define is_active_nid_for_any(codec, nid) \
-	is_active_nid(codec, nid, HDA_OUTPUT, 0)
+	is_active_nid(codec, nid, HDA_OUTPUT, -1)
 
 /* get the default amp value for the target state */
 static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
@@ -883,8 +884,7 @@
 	struct hda_gen_spec *spec = codec->spec;
 	int i;
 
-	if (!enable)
-		path->active = false;
+	path->active = enable;
 
 	/* make sure the widget is powered up */
 	if (enable && (spec->power_down_unused || codec->power_save_node))
@@ -902,9 +902,6 @@
 		if (has_amp_out(codec, path, i))
 			activate_amp_out(codec, path, i, enable);
 	}
-
-	if (enable)
-		path->active = true;
 }
 EXPORT_SYMBOL_GPL(snd_hda_activate_path);
 
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index baaf7ed0..033aa84 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -36,24 +36,9 @@
 #define param_read(codec, nid, parm) \
 	snd_hdac_read_parm_uncached(&(codec)->core, nid, parm)
 
-static char *bits_names(unsigned int bits, char *names[], int size)
-{
-	int i, n;
-	static char buf[128];
-
-	for (i = 0, n = 0; i < size; i++) {
-		if (bits & (1U<<i) && names[i])
-			n += snprintf(buf + n, sizeof(buf) - n, " %s",
-				      names[i]);
-	}
-	buf[n] = '\0';
-
-	return buf;
-}
-
 static const char *get_wid_type_name(unsigned int wid_value)
 {
-	static char *names[16] = {
+	static const char * const names[16] = {
 		[AC_WID_AUD_OUT] = "Audio Output",
 		[AC_WID_AUD_IN] = "Audio Input",
 		[AC_WID_AUD_MIX] = "Audio Mixer",
@@ -241,7 +226,7 @@
 
 static const char *get_jack_connection(u32 cfg)
 {
-	static char *names[16] = {
+	static const char * const names[16] = {
 		"Unknown", "1/8", "1/4", "ATAPI",
 		"RCA", "Optical","Digital", "Analog",
 		"DIN", "XLR", "RJ11", "Comb",
@@ -256,7 +241,7 @@
 
 static const char *get_jack_color(u32 cfg)
 {
-	static char *names[16] = {
+	static const char * const names[16] = {
 		"Unknown", "Black", "Grey", "Blue",
 		"Green", "Red", "Orange", "Yellow",
 		"Purple", "Pink", NULL, NULL,
@@ -269,11 +254,74 @@
 		return "UNKNOWN";
 }
 
+/*
+ * Parse the pin default config value and returns the string of the
+ * jack location, e.g. "Rear", "Front", etc.
+ */
+static const char *get_jack_location(u32 cfg)
+{
+	static const char * const bases[7] = {
+		"N/A", "Rear", "Front", "Left", "Right", "Top", "Bottom",
+	};
+	static const unsigned char specials_idx[] = {
+		0x07, 0x08,
+		0x17, 0x18, 0x19,
+		0x37, 0x38
+	};
+	static const char * const specials[] = {
+		"Rear Panel", "Drive Bar",
+		"Riser", "HDMI", "ATAPI",
+		"Mobile-In", "Mobile-Out"
+	};
+	int i;
+
+	cfg = (cfg & AC_DEFCFG_LOCATION) >> AC_DEFCFG_LOCATION_SHIFT;
+	if ((cfg & 0x0f) < 7)
+		return bases[cfg & 0x0f];
+	for (i = 0; i < ARRAY_SIZE(specials_idx); i++) {
+		if (cfg == specials_idx[i])
+			return specials[i];
+	}
+	return "UNKNOWN";
+}
+
+/*
+ * Parse the pin default config value and returns the string of the
+ * jack connectivity, i.e. external or internal connection.
+ */
+static const char *get_jack_connectivity(u32 cfg)
+{
+	static const char * const jack_locations[4] = {
+		"Ext", "Int", "Sep", "Oth"
+	};
+
+	return jack_locations[(cfg >> (AC_DEFCFG_LOCATION_SHIFT + 4)) & 3];
+}
+
+/*
+ * Parse the pin default config value and returns the string of the
+ * jack type, i.e. the purpose of the jack, such as Line-Out or CD.
+ */
+static const char *get_jack_type(u32 cfg)
+{
+	static const char * const jack_types[16] = {
+		"Line Out", "Speaker", "HP Out", "CD",
+		"SPDIF Out", "Digital Out", "Modem Line", "Modem Hand",
+		"Line In", "Aux", "Mic", "Telephony",
+		"SPDIF In", "Digital In", "Reserved", "Other"
+	};
+
+	return jack_types[(cfg & AC_DEFCFG_DEVICE)
+				>> AC_DEFCFG_DEVICE_SHIFT];
+}
+
 static void print_pin_caps(struct snd_info_buffer *buffer,
 			   struct hda_codec *codec, hda_nid_t nid,
 			   int *supports_vref)
 {
-	static char *jack_conns[4] = { "Jack", "N/A", "Fixed", "Both" };
+	static const char * const jack_conns[4] = {
+		"Jack", "N/A", "Fixed", "Both"
+	};
 	unsigned int caps, val;
 
 	caps = param_read(codec, nid, AC_PAR_PIN_CAP);
@@ -340,9 +388,9 @@
 	caps = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONFIG_DEFAULT, 0);
 	snd_iprintf(buffer, "  Pin Default 0x%08x: [%s] %s at %s %s\n", caps,
 		    jack_conns[(caps & AC_DEFCFG_PORT_CONN) >> AC_DEFCFG_PORT_CONN_SHIFT],
-		    snd_hda_get_jack_type(caps),
-		    snd_hda_get_jack_connectivity(caps),
-		    snd_hda_get_jack_location(caps));
+		    get_jack_type(caps),
+		    get_jack_connectivity(caps),
+		    get_jack_location(caps));
 	snd_iprintf(buffer, "    Conn = %s, Color = %s\n",
 		    get_jack_connection(caps),
 		    get_jack_color(caps));
@@ -478,7 +526,7 @@
 static void print_power_state(struct snd_info_buffer *buffer,
 			      struct hda_codec *codec, hda_nid_t nid)
 {
-	static char *names[] = {
+	static const char * const names[] = {
 		[ilog2(AC_PWRST_D0SUP)]		= "D0",
 		[ilog2(AC_PWRST_D1SUP)]		= "D1",
 		[ilog2(AC_PWRST_D2SUP)]		= "D2",
@@ -492,9 +540,16 @@
 	int sup = param_read(codec, nid, AC_PAR_POWER_STATE);
 	int pwr = snd_hda_codec_read(codec, nid, 0,
 				     AC_VERB_GET_POWER_STATE, 0);
-	if (sup != -1)
-		snd_iprintf(buffer, "  Power states: %s\n",
-			    bits_names(sup, names, ARRAY_SIZE(names)));
+	if (sup != -1) {
+		int i;
+
+		snd_iprintf(buffer, "  Power states: ");
+		for (i = 0; i < ARRAY_SIZE(names); i++) {
+			if (sup & (1U << i))
+				snd_iprintf(buffer, " %s", names[i]);
+		}
+		snd_iprintf(buffer, "\n");
+	}
 
 	snd_iprintf(buffer, "  Power: setting=%s, actual=%s",
 		    get_pwr_state(pwr & AC_PWRST_SETTING),
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0f039ab..186792f 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -763,6 +763,20 @@
 	QUIRK_ALIENWARE,
 };
 
+static const struct hda_pintbl alienware_pincfgs[] = {
+	{ 0x0b, 0x90170110 }, /* Builtin Speaker */
+	{ 0x0c, 0x411111f0 }, /* N/A */
+	{ 0x0d, 0x411111f0 }, /* N/A */
+	{ 0x0e, 0x411111f0 }, /* N/A */
+	{ 0x0f, 0x0321101f }, /* HP */
+	{ 0x10, 0x411111f0 }, /* Headset?  disabled for now */
+	{ 0x11, 0x03a11021 }, /* Mic */
+	{ 0x12, 0xd5a30140 }, /* Builtin Mic */
+	{ 0x13, 0x411111f0 }, /* N/A */
+	{ 0x18, 0x411111f0 }, /* N/A */
+	{}
+};
+
 static const struct snd_pci_quirk ca0132_quirks[] = {
 	SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15", QUIRK_ALIENWARE),
 	{}
@@ -3147,7 +3161,7 @@
 	auto_jack = spec->vnode_lswitch[VNID_HP_ASEL - VNODE_START_NID];
 
 	if (auto_jack)
-		jack_present = snd_hda_jack_detect(codec, spec->out_pins[1]);
+		jack_present = snd_hda_jack_detect(codec, spec->unsol_tag_hp);
 	else
 		jack_present =
 			spec->vnode_lswitch[VNID_HP_SEL - VNODE_START_NID];
@@ -3309,7 +3323,7 @@
 	auto_jack = spec->vnode_lswitch[VNID_AMIC1_ASEL - VNODE_START_NID];
 
 	if (auto_jack)
-		jack_present = snd_hda_jack_detect(codec, spec->input_pins[0]);
+		jack_present = snd_hda_jack_detect(codec, spec->unsol_tag_amic1);
 	else
 		jack_present =
 			spec->vnode_lswitch[VNID_AMIC1_SEL - VNODE_START_NID];
@@ -4617,37 +4631,54 @@
 	spec->multiout.num_dacs = 3;
 	spec->multiout.max_channels = 2;
 
-	spec->num_outputs = 2;
-	spec->out_pins[0] = 0x0b; /* speaker out */
 	if (spec->quirk == QUIRK_ALIENWARE) {
 		codec_dbg(codec, "ca0132_config: QUIRK_ALIENWARE applied.\n");
+		snd_hda_apply_pincfgs(codec, alienware_pincfgs);
+
+		spec->num_outputs = 2;
+		spec->out_pins[0] = 0x0b; /* speaker out */
 		spec->out_pins[1] = 0x0f;
-	} else{
+		spec->shared_out_nid = 0x2;
+		spec->unsol_tag_hp = 0x0f;
+
+		spec->adcs[0] = 0x7; /* digital mic / analog mic1 */
+		spec->adcs[1] = 0x8; /* analog mic2 */
+		spec->adcs[2] = 0xa; /* what u hear */
+
+		spec->num_inputs = 3;
+		spec->input_pins[0] = 0x12;
+		spec->input_pins[1] = 0x11;
+		spec->input_pins[2] = 0x13;
+		spec->shared_mic_nid = 0x7;
+		spec->unsol_tag_amic1 = 0x11;
+	} else {
+		spec->num_outputs = 2;
+		spec->out_pins[0] = 0x0b; /* speaker out */
 		spec->out_pins[1] = 0x10; /* headphone out */
+		spec->shared_out_nid = 0x2;
+		spec->unsol_tag_hp = spec->out_pins[1];
+
+		spec->adcs[0] = 0x7; /* digital mic / analog mic1 */
+		spec->adcs[1] = 0x8; /* analog mic2 */
+		spec->adcs[2] = 0xa; /* what u hear */
+
+		spec->num_inputs = 3;
+		spec->input_pins[0] = 0x12;
+		spec->input_pins[1] = 0x11;
+		spec->input_pins[2] = 0x13;
+		spec->shared_mic_nid = 0x7;
+		spec->unsol_tag_amic1 = spec->input_pins[0];
+
+		/* SPDIF I/O */
+		spec->dig_out = 0x05;
+		spec->multiout.dig_out_nid = spec->dig_out;
+		cfg->dig_out_pins[0] = 0x0c;
+		cfg->dig_outs = 1;
+		cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
+		spec->dig_in = 0x09;
+		cfg->dig_in_pin = 0x0e;
+		cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
 	}
-	spec->shared_out_nid = 0x2;
-	spec->unsol_tag_hp = spec->out_pins[1];
-
-	spec->adcs[0] = 0x7; /* digital mic / analog mic1 */
-	spec->adcs[1] = 0x8; /* analog mic2 */
-	spec->adcs[2] = 0xa; /* what u hear */
-
-	spec->num_inputs = 3;
-	spec->input_pins[0] = 0x12;
-	spec->input_pins[1] = 0x11;
-	spec->input_pins[2] = 0x13;
-	spec->shared_mic_nid = 0x7;
-	spec->unsol_tag_amic1 = spec->input_pins[0];
-
-	/* SPDIF I/O */
-	spec->dig_out = 0x05;
-	spec->multiout.dig_out_nid = spec->dig_out;
-	cfg->dig_out_pins[0] = 0x0c;
-	cfg->dig_outs = 1;
-	cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
-	spec->dig_in = 0x09;
-	cfg->dig_in_pin = 0x0e;
-	cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
 }
 
 static int ca0132_prepare_verbs(struct hda_codec *codec)
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index f788a91..ca03c40 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -200,12 +200,33 @@
 	return 0;
 }
 
-#define cx_auto_free	snd_hda_gen_free
+static void cx_auto_reboot_notify(struct hda_codec *codec)
+{
+	struct conexant_spec *spec = codec->spec;
+
+	if (codec->core.vendor_id != 0x14f150f2)
+		return;
+
+	/* Turn the CX20722 codec into D3 to avoid spurious noises
+	   from the internal speaker during (and after) reboot */
+	cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
+
+	snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+	snd_hda_codec_write(codec, codec->core.afg, 0,
+			    AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+}
+
+static void cx_auto_free(struct hda_codec *codec)
+{
+	cx_auto_reboot_notify(codec);
+	snd_hda_gen_free(codec);
+}
 
 static const struct hda_codec_ops cx_auto_patch_ops = {
 	.build_controls = cx_auto_build_controls,
 	.build_pcms = snd_hda_gen_build_pcms,
 	.init = cx_auto_init,
+	.reboot_notify = cx_auto_reboot_notify,
 	.free = cx_auto_free,
 	.unsol_event = snd_hda_jack_unsol_event,
 #ifdef CONFIG_PM
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index a97db5f..acbfbe08 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -37,6 +37,8 @@
 #include <sound/jack.h>
 #include <sound/asoundef.h>
 #include <sound/tlv.h>
+#include <sound/hdaudio.h>
+#include <sound/hda_i915.h>
 #include "hda_codec.h"
 #include "hda_local.h"
 #include "hda_jack.h"
@@ -144,6 +146,9 @@
 	 */
 	struct hda_multi_out multiout;
 	struct hda_pcm_stream pcm_playback;
+
+	/* i915/powerwell (Haswell+/Valleyview+) specific */
+	struct i915_audio_component_audio_ops i915_audio_ops;
 };
 
 
@@ -2191,6 +2196,9 @@
 	struct hdmi_spec *spec = codec->spec;
 	int pin_idx;
 
+	if (is_haswell_plus(codec) || is_valleyview_plus(codec))
+		snd_hdac_i915_register_notifier(NULL);
+
 	for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
 		struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
 
@@ -2316,6 +2324,14 @@
 	snd_hda_codec_set_power_to_all(codec, fg, power_state);
 }
 
+static void intel_pin_eld_notify(void *audio_ptr, int port)
+{
+	struct hda_codec *codec = audio_ptr;
+	int pin_nid = port + 0x04;
+
+	check_presence_and_report(codec, pin_nid);
+}
+
 static int patch_generic_hdmi(struct hda_codec *codec)
 {
 	struct hdmi_spec *spec;
@@ -2342,8 +2358,12 @@
 	if (is_valleyview_plus(codec) || is_skylake(codec))
 		codec->core.link_power_control = 1;
 
-	if (is_haswell_plus(codec) || is_valleyview_plus(codec))
+	if (is_haswell_plus(codec) || is_valleyview_plus(codec)) {
 		codec->depop_delay = 0;
+		spec->i915_audio_ops.audio_ptr = codec;
+		spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify;
+		snd_hdac_i915_register_notifier(&spec->i915_audio_ops);
+	}
 
 	if (hdmi_parse_codec(codec) < 0) {
 		codec->spec = NULL;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 374ea53..4e6b090 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5389,400 +5389,202 @@
 	{}
 };
 
-#define ALC255_STANDARD_PINS \
-	{0x18, 0x411111f0}, \
-	{0x19, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
-	{0x1e, 0x411111f0}
-
 #define ALC256_STANDARD_PINS \
 	{0x12, 0x90a60140}, \
 	{0x14, 0x90170110}, \
-	{0x19, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
 	{0x21, 0x02211020}
 
 #define ALC282_STANDARD_PINS \
-	{0x14, 0x90170110}, \
-	{0x18, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
-	{0x1e, 0x411111f0}
-
-#define ALC288_STANDARD_PINS \
-	{0x17, 0x411111f0}, \
-	{0x18, 0x411111f0}, \
-	{0x19, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1e, 0x411111f0}
+	{0x14, 0x90170110}
 
 #define ALC290_STANDARD_PINS \
-	{0x12, 0x99a30130}, \
-	{0x13, 0x40000000}, \
-	{0x16, 0x411111f0}, \
-	{0x17, 0x411111f0}, \
-	{0x19, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
-	{0x1e, 0x411111f0}
+	{0x12, 0x99a30130}
 
 #define ALC292_STANDARD_PINS \
 	{0x14, 0x90170110}, \
-	{0x15, 0x0221401f}, \
-	{0x1a, 0x411111f0}, \
-	{0x1b, 0x411111f0}, \
-	{0x1d, 0x40700001}
+	{0x15, 0x0221401f}
 
 #define ALC298_STANDARD_PINS \
-	{0x18, 0x411111f0}, \
-	{0x19, 0x411111f0}, \
-	{0x1a, 0x411111f0}, \
-	{0x1e, 0x411111f0}, \
-	{0x1f, 0x411111f0}
+	{0x12, 0x90a60130}, \
+	{0x21, 0x03211020}
 
 static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
-		{0x12, 0x40300000},
 		{0x14, 0x90170110},
-		{0x17, 0x411111f0},
-		{0x1d, 0x40538029},
 		{0x21, 0x02211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60140},
 		{0x14, 0x90170110},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211030}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		{0x12, 0x40000000},
 		{0x14, 0x90170130},
-		{0x17, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
 		{0x1b, 0x01014020},
-		{0x1d, 0x4054c029},
-		{0x1e, 0x411111f0},
 		{0x21, 0x0221103f}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		{0x12, 0x40000000},
 		{0x14, 0x90170150},
-		{0x17, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
 		{0x1b, 0x02011020},
-		{0x1d, 0x4054c029},
-		{0x1e, 0x411111f0},
 		{0x21, 0x0221105f}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		{0x12, 0x40000000},
 		{0x14, 0x90170110},
-		{0x17, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
 		{0x1b, 0x01014020},
-		{0x1d, 0x4054c029},
-		{0x1e, 0x411111f0},
 		{0x21, 0x0221101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
 		{0x17, 0x90170140},
-		{0x18, 0x40000000},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41163b05},
-		{0x1e, 0x411111f0},
 		{0x21, 0x0321102f}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170130},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211040}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170140},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211050}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60170},
 		{0x14, 0x90170120},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211030}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60170},
 		{0x14, 0x90170130},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211040}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60170},
 		{0x14, 0x90170140},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211050}),
 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5548", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60180},
 		{0x14, 0x90170130},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211040}),
 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC255_STANDARD_PINS,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
-		{0x17, 0x40000000},
-		{0x1d, 0x40700001},
 		{0x21, 0x02211030}),
 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC256_STANDARD_PINS,
-		{0x13, 0x40000000},
-		{0x1d, 0x40700001},
-		{0x1e, 0x411111f0}),
-	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC256_STANDARD_PINS,
-		{0x13, 0x411111f0},
-		{0x1d, 0x40700001},
-		{0x1e, 0x411111f0}),
-	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC256_STANDARD_PINS,
-		{0x13, 0x411111f0},
-		{0x1d, 0x4077992d},
-		{0x1e, 0x411111ff}),
+		ALC256_STANDARD_PINS),
 	SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
 		{0x12, 0x90a60130},
-		{0x13, 0x40000000},
 		{0x14, 0x90170110},
 		{0x15, 0x0421101f},
-		{0x16, 0x411111f0},
-		{0x17, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x04a11020},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x40748605},
-		{0x1e, 0x411111f0}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED,
 		{0x12, 0x90a60140},
-		{0x13, 0x40000000},
 		{0x14, 0x90170110},
 		{0x15, 0x0421101f},
-		{0x16, 0x411111f0},
-		{0x17, 0x411111f0},
 		{0x18, 0x02811030},
-		{0x19, 0x411111f0},
 		{0x1a, 0x04a1103f},
-		{0x1b, 0x02011020},
-		{0x1d, 0x40700001},
-		{0x1e, 0x411111f0}),
+		{0x1b, 0x02011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP 15 Touchsmart", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
 		{0x19, 0x03a11020},
-		{0x1d, 0x40f41905},
 		{0x21, 0x0321101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x99a30130},
-		{0x17, 0x40020008},
 		{0x19, 0x03a11020},
-		{0x1d, 0x40e00001},
 		{0x21, 0x03211040}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
 		{0x19, 0x03a11030},
-		{0x1d, 0x40e00001},
 		{0x21, 0x03211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
-		{0x19, 0x03a11030},
-		{0x1d, 0x40f00001},
-		{0x21, 0x03211020}),
-	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
-		ALC282_STANDARD_PINS,
-		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
 		{0x19, 0x04a11020},
-		{0x1d, 0x40f00001},
 		{0x21, 0x0421101f}),
-	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
-		ALC282_STANDARD_PINS,
-		{0x12, 0x99a30130},
-		{0x17, 0x40000000},
-		{0x19, 0x03a11030},
-		{0x1d, 0x40f00001},
-		{0x21, 0x04211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0282, 0x103c, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x90a60140},
-		{0x17, 0x40000000},
 		{0x19, 0x04a11030},
-		{0x1d, 0x40f00001},
 		{0x21, 0x04211020}),
 	SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x90a60130},
-		{0x17, 0x40020008},
-		{0x19, 0x411111f0},
-		{0x1d, 0x40e00001},
 		{0x21, 0x0321101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 		{0x12, 0x90a60160},
 		{0x14, 0x90170120},
-		{0x17, 0x40000000},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x40700001},
-		{0x1e, 0x411111f0},
 		{0x21, 0x02211030}),
 	SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC282_STANDARD_PINS,
 		{0x12, 0x90a60130},
-		{0x17, 0x40020008},
 		{0x19, 0x03a11020},
-		{0x1d, 0x40e00001},
 		{0x21, 0x0321101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL_XPS_13_GPIO6,
-		ALC288_STANDARD_PINS,
 		{0x12, 0x90a60120},
-		{0x13, 0x40000000},
 		{0x14, 0x90170110},
-		{0x1d, 0x4076832d},
 		{0x21, 0x0321101f}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
-		{0x14, 0x411111f0},
 		{0x15, 0x04211040},
 		{0x18, 0x90170112},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4075812d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
-		{0x14, 0x411111f0},
 		{0x15, 0x04211040},
 		{0x18, 0x90170110},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4075812d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
-		{0x14, 0x411111f0},
 		{0x15, 0x0421101f},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4075812d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
-		{0x14, 0x411111f0},
 		{0x15, 0x04211020},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11040},
-		{0x1d, 0x4076a12d}),
+		{0x1a, 0x04a11040}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
 		{0x14, 0x90170110},
 		{0x15, 0x04211020},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11040},
-		{0x1d, 0x4076a12d}),
+		{0x1a, 0x04a11040}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
 		{0x14, 0x90170110},
 		{0x15, 0x04211020},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4076a12d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0290, 0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1,
 		ALC290_STANDARD_PINS,
 		{0x14, 0x90170110},
 		{0x15, 0x0421101f},
-		{0x18, 0x411111f0},
-		{0x1a, 0x04a11020},
-		{0x1d, 0x4075812d}),
+		{0x1a, 0x04a11020}),
 	SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
 		{0x12, 0x90a60140},
-		{0x13, 0x411111f0},
 		{0x16, 0x01014020},
-		{0x18, 0x411111f0},
-		{0x19, 0x01a19030},
-		{0x1e, 0x411111f0}),
+		{0x19, 0x01a19030}),
 	SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
 		{0x12, 0x90a60140},
-		{0x13, 0x411111f0},
 		{0x16, 0x01014020},
 		{0x18, 0x02a19031},
-		{0x19, 0x01a1903e},
-		{0x1e, 0x411111f0}),
+		{0x19, 0x01a1903e}),
 	SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
-		{0x12, 0x90a60140},
-		{0x13, 0x411111f0},
-		{0x16, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1e, 0x411111f0}),
+		{0x12, 0x90a60140}),
 	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
-		{0x12, 0x40000000},
 		{0x13, 0x90a60140},
 		{0x16, 0x21014020},
-		{0x18, 0x411111f0},
-		{0x19, 0x21a19030},
-		{0x1e, 0x411111f0}),
+		{0x19, 0x21a19030}),
 	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC292_STANDARD_PINS,
-		{0x12, 0x40000000},
-		{0x13, 0x90a60140},
-		{0x16, 0x411111f0},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1e, 0x411111f0}),
-	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
-		ALC292_STANDARD_PINS,
-		{0x12, 0x40000000},
-		{0x13, 0x90a60140},
-		{0x16, 0x21014020},
-		{0x18, 0x411111f0},
-		{0x19, 0x21a19030},
-		{0x1e, 0x411111ff}),
+		{0x13, 0x90a60140}),
 	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
 		ALC298_STANDARD_PINS,
-		{0x12, 0x90a60130},
-		{0x13, 0x40000000},
-		{0x14, 0x411111f0},
-		{0x17, 0x90170140},
-		{0x1d, 0x4068a36d},
-		{0x21, 0x03211020}),
+		{0x17, 0x90170110}),
+	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+		ALC298_STANDARD_PINS,
+		{0x17, 0x90170140}),
+	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+		ALC298_STANDARD_PINS,
+		{0x17, 0x90170150}),
 	{}
 };
 
@@ -6675,77 +6477,33 @@
 
 static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
 	SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
-		{0x12, 0x4004c000},
 		{0x14, 0x01014010},
-		{0x15, 0x411111f0},
-		{0x16, 0x411111f0},
 		{0x18, 0x01a19020},
-		{0x19, 0x411111f0},
 		{0x1a, 0x0181302f},
-		{0x1b, 0x0221401f},
-		{0x1c, 0x411111f0},
-		{0x1d, 0x4054c601},
-		{0x1e, 0x411111f0}),
+		{0x1b, 0x0221401f}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
 		{0x12, 0x99a30130},
 		{0x14, 0x90170110},
 		{0x15, 0x0321101f},
-		{0x16, 0x03011020},
-		{0x18, 0x40000008},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41000001},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x16, 0x03011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
 		{0x12, 0x99a30140},
 		{0x14, 0x90170110},
 		{0x15, 0x0321101f},
-		{0x16, 0x03011020},
-		{0x18, 0x40000008},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41000001},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x16, 0x03011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
 		{0x12, 0x99a30150},
 		{0x14, 0x90170110},
 		{0x15, 0x0321101f},
-		{0x16, 0x03011020},
-		{0x18, 0x40000008},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41000001},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x16, 0x03011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
-		{0x12, 0x411111f0},
 		{0x14, 0x90170110},
 		{0x15, 0x0321101f},
-		{0x16, 0x03011020},
-		{0x18, 0x40000008},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x41000001},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x16, 0x03011020}),
 	SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell XPS 15", ALC668_FIXUP_AUTO_MUTE,
 		{0x12, 0x90a60130},
 		{0x14, 0x90170110},
-		{0x15, 0x0321101f},
-		{0x16, 0x40000000},
-		{0x18, 0x411111f0},
-		{0x19, 0x411111f0},
-		{0x1a, 0x411111f0},
-		{0x1b, 0x411111f0},
-		{0x1d, 0x40d6832d},
-		{0x1e, 0x411111f0},
-		{0x1f, 0x411111f0}),
+		{0x15, 0x0321101f}),
 	{}
 };
 
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index c19e021..9bba275 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -1526,7 +1526,7 @@
 
 static int snd_hdsp_create_midi (struct snd_card *card, struct hdsp *hdsp, int id)
 {
-	char buf[32];
+	char buf[40];
 
 	hdsp->midi[id].id = id;
 	hdsp->midi[id].rmidi = NULL;
@@ -1537,7 +1537,7 @@
 	hdsp->midi[id].pending = 0;
 	spin_lock_init (&hdsp->midi[id].lock);
 
-	sprintf (buf, "%s MIDI %d", card->shortname, id+1);
+	snprintf(buf, sizeof(buf), "%s MIDI %d", card->shortname, id + 1);
 	if (snd_rawmidi_new (card, buf, id, 1, 1, &hdsp->midi[id].rmidi) < 0)
 		return -1;
 
@@ -2806,7 +2806,8 @@
 	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
 
 	offset = ucontrol->id.index - 1;
-	snd_BUG_ON(offset < 0);
+	if (snd_BUG_ON(offset < 0))
+		return -EINVAL;
 
 	switch (hdsp->io_type) {
 	case Digiface:
diff --git a/sound/ppc/keywest.c b/sound/ppc/keywest.c
index 6120a06..4373615 100644
--- a/sound/ppc/keywest.c
+++ b/sound/ppc/keywest.c
@@ -22,6 +22,7 @@
 #include <linux/init.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
+#include <linux/module.h>
 #include <sound/core.h>
 #include "pmac.h"
 
@@ -101,6 +102,7 @@
 	{ "keywest", 0 },		/* instantiated by us if needed */
 	{ }
 };
+MODULE_DEVICE_TABLE(i2c, keywest_i2c_id);
 
 static struct i2c_driver keywest_driver = {
 	.driver = {
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 1d651b8..225bfda 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -57,6 +57,7 @@
 source "sound/soc/sh/Kconfig"
 source "sound/soc/sirf/Kconfig"
 source "sound/soc/spear/Kconfig"
+source "sound/soc/sti/Kconfig"
 source "sound/soc/tegra/Kconfig"
 source "sound/soc/txx9/Kconfig"
 source "sound/soc/ux500/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 669648b..134aca1 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -39,6 +39,7 @@
 obj-$(CONFIG_SND_SOC)	+= sh/
 obj-$(CONFIG_SND_SOC)	+= sirf/
 obj-$(CONFIG_SND_SOC)	+= spear/
+obj-$(CONFIG_SND_SOC)	+= sti/
 obj-$(CONFIG_SND_SOC)	+= tegra/
 obj-$(CONFIG_SND_SOC)	+= txx9/
 obj-$(CONFIG_SND_SOC)	+= ux500/
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 841d059..ba8def5 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -290,7 +290,7 @@
 	int dir, dir_mask;
 	int ret;
 
-	pr_debug("atmel_ssc_startup: SSC_SR=0x%u\n",
+	pr_debug("atmel_ssc_startup: SSC_SR=0x%x\n",
 		ssc_readl(ssc_p->ssc->regs, SR));
 
 	/* Enable PMC peripheral clock for this SSC */
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index dd94fea..5741c0a 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -344,14 +344,8 @@
 
 	platform_set_drvdata(pdev, dmadata);
 
-	return snd_soc_register_platform(&pdev->dev, &au1xpsc_soc_platform);
-}
-
-static int au1xpsc_pcm_drvremove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &au1xpsc_soc_platform);
 }
 
 static struct platform_driver au1xpsc_pcm_driver = {
@@ -359,7 +353,6 @@
 		.name	= "au1xpsc-pcm",
 	},
 	.probe		= au1xpsc_pcm_drvprobe,
-	.remove		= au1xpsc_pcm_drvremove,
 };
 
 module_platform_driver(au1xpsc_pcm_driver);
diff --git a/sound/soc/au1x/dma.c b/sound/soc/au1x/dma.c
index 24cc7f4..fcf5a9a 100644
--- a/sound/soc/au1x/dma.c
+++ b/sound/soc/au1x/dma.c
@@ -312,14 +312,8 @@
 
 	platform_set_drvdata(pdev, ctx);
 
-	return snd_soc_register_platform(&pdev->dev, &alchemy_pcm_soc_platform);
-}
-
-static int alchemy_pcm_drvremove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &alchemy_pcm_soc_platform);
 }
 
 static struct platform_driver alchemy_pcmdma_driver = {
@@ -327,7 +321,6 @@
 		.name	= "alchemy-pcm-dma",
 	},
 	.probe		= alchemy_pcm_drvprobe,
-	.remove		= alchemy_pcm_drvremove,
 };
 
 module_platform_driver(alchemy_pcmdma_driver);
diff --git a/sound/soc/au1x/psc-i2s.c b/sound/soc/au1x/psc-i2s.c
index e742ef6..38e853a 100644
--- a/sound/soc/au1x/psc-i2s.c
+++ b/sound/soc/au1x/psc-i2s.c
@@ -305,19 +305,9 @@
 		return -ENOMEM;
 
 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!iores)
-		return -ENODEV;
-
-	ret = -EBUSY;
-	if (!devm_request_mem_region(&pdev->dev, iores->start,
-				     resource_size(iores),
-				     pdev->name))
-		return -EBUSY;
-
-	wd->mmio = devm_ioremap(&pdev->dev, iores->start,
-				resource_size(iores));
-	if (!wd->mmio)
-		return -EBUSY;
+	wd->mmio = devm_ioremap_resource(&pdev->dev, iores);
+	if (IS_ERR(wd->mmio))
+		return PTR_ERR(wd->mmio);
 
 	dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 	if (!dmares)
diff --git a/sound/soc/bcm/bcm2835-i2s.c b/sound/soc/bcm/bcm2835-i2s.c
index 03fa1cb..8c435be 100644
--- a/sound/soc/bcm/bcm2835-i2s.c
+++ b/sound/soc/bcm/bcm2835-i2s.c
@@ -862,6 +862,8 @@
 	{},
 };
 
+MODULE_DEVICE_TABLE(of, bcm2835_i2s_of_match);
+
 static struct platform_driver bcm2835_i2s_driver = {
 	.probe		= bcm2835_i2s_probe,
 	.driver		= {
diff --git a/sound/soc/blackfin/bf5xx-ac97-pcm.c b/sound/soc/blackfin/bf5xx-ac97-pcm.c
index 238913e..02ad260 100644
--- a/sound/soc/blackfin/bf5xx-ac97-pcm.c
+++ b/sound/soc/blackfin/bf5xx-ac97-pcm.c
@@ -450,13 +450,8 @@
 
 static int bf5xx_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &bf5xx_ac97_soc_platform);
-}
-
-static int bf5xx_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &bf5xx_ac97_soc_platform);
 }
 
 static struct platform_driver bf5xx_pcm_driver = {
@@ -465,7 +460,6 @@
 	},
 
 	.probe = bf5xx_soc_platform_probe,
-	.remove = bf5xx_soc_platform_remove,
 };
 
 module_platform_driver(bf5xx_pcm_driver);
diff --git a/sound/soc/blackfin/bf5xx-i2s-pcm.c b/sound/soc/blackfin/bf5xx-i2s-pcm.c
index d95477a..6cba211d 100644
--- a/sound/soc/blackfin/bf5xx-i2s-pcm.c
+++ b/sound/soc/blackfin/bf5xx-i2s-pcm.c
@@ -342,13 +342,8 @@
 
 static int bfin_i2s_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &bf5xx_i2s_soc_platform);
-}
-
-static int bfin_i2s_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &bf5xx_i2s_soc_platform);
 }
 
 static struct platform_driver bfin_i2s_pcm_driver = {
@@ -357,7 +352,6 @@
 	},
 
 	.probe = bfin_i2s_soc_platform_probe,
-	.remove = bfin_i2s_soc_platform_remove,
 };
 
 module_platform_driver(bfin_i2s_pcm_driver);
diff --git a/sound/soc/blackfin/bfin-eval-adau1x61.c b/sound/soc/blackfin/bfin-eval-adau1x61.c
index 4229f76..fddfe00c 100644
--- a/sound/soc/blackfin/bfin-eval-adau1x61.c
+++ b/sound/soc/blackfin/bfin-eval-adau1x61.c
@@ -108,6 +108,7 @@
 
 static struct snd_soc_card bfin_eval_adau1x61 = {
 	.name = "bfin-eval-adau1x61",
+	.owner = THIS_MODULE,
 	.driver_name = "eval-adau1x61",
 	.dai_link = &bfin_eval_adau1x61_dai,
 	.num_links = 1,
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 38b3dad..e8bed6b 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -156,33 +156,29 @@
 static const DECLARE_TLV_DB_SCALE(adc_tlv, -900, 300, 0);
 
 /* {-23, -17, -13.5, -11, -9, -6, -3, 0}dB */
-static const unsigned int mic_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(mic_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(-2300, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(-1700, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(-1350, 0, 0),
 	3, 3, TLV_DB_SCALE_ITEM(-1100, 0, 0),
-	4, 7, TLV_DB_SCALE_ITEM(-900, 300, 0),
-};
+	4, 7, TLV_DB_SCALE_ITEM(-900, 300, 0)
+);
 
 /* {0, 0, 0, -6, 0, 6, 12, 18}dB */
-static const unsigned int aux_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(aux_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(0, 0, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-600, 600, 0),
-};
+	3, 7, TLV_DB_SCALE_ITEM(-600, 600, 0)
+);
 
 /* {-16, -13, -10, -7, -5.2, -3,3, -2.2, 0}dB, mute instead of -16dB */
-static const unsigned int out_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(out_tlv,
 	0, 3, TLV_DB_SCALE_ITEM(-1600, 300, 1),
 	4, 4, TLV_DB_SCALE_ITEM(-520, 0, 0),
 	5, 5, TLV_DB_SCALE_ITEM(-330, 0, 0),
-	6, 7, TLV_DB_SCALE_ITEM(-220, 220, 0),
-};
+	6, 7, TLV_DB_SCALE_ITEM(-220, 220, 0)
+);
 
-static const unsigned int st_tlv[] = {
-	TLV_DB_RANGE_HEAD(8),
+static const DECLARE_TLV_DB_RANGE(st_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-12041, 602, 0),
 	2, 3, TLV_DB_SCALE_ITEM(-11087, 250, 0),
 	4, 5, TLV_DB_SCALE_ITEM(-10643, 158, 0),
@@ -190,8 +186,8 @@
 	8, 9, TLV_DB_SCALE_ITEM(-10133, 92, 0),
 	10, 13, TLV_DB_SCALE_ITEM(-9958, 70, 0),
 	14, 17, TLV_DB_SCALE_ITEM(-9689, 53, 0),
-	18, 271, TLV_DB_SCALE_ITEM(-9484, 37, 0),
-};
+	18, 271, TLV_DB_SCALE_ITEM(-9484, 37, 0)
+);
 
 /* Sidetone Gain = M * 2^(-5-N) */
 struct st_gain {
@@ -1028,10 +1024,8 @@
 
 	if (dir == PM860X_CLK_DIR_OUT)
 		pm860x->dir = PM860X_CLK_DIR_OUT;
-	else {
-		pm860x->dir = PM860X_CLK_DIR_IN;
+	else	/* Slave mode is not supported */
 		return -EINVAL;
-	}
 
 	return 0;
 }
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index efaafce..0c9733e 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -53,6 +53,7 @@
 	select SND_SOC_CS4271_I2C if I2C
 	select SND_SOC_CS4271_SPI if SPI_MASTER
 	select SND_SOC_CS42XX8_I2C if I2C
+	select SND_SOC_CS4349 if I2C
 	select SND_SOC_CX20442 if TTY
 	select SND_SOC_DA7210 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_DA7213 if I2C
@@ -62,6 +63,8 @@
 	select SND_SOC_BT_SCO
 	select SND_SOC_ES8328_SPI if SPI_MASTER
 	select SND_SOC_ES8328_I2C if I2C
+	select SND_SOC_GTM601
+	select SND_SOC_ICS43432
 	select SND_SOC_ISABELLE if I2C
 	select SND_SOC_JZ4740_CODEC
 	select SND_SOC_LM4857 if I2C
@@ -83,6 +86,7 @@
 	select SND_SOC_PCM512x_I2C if I2C
 	select SND_SOC_PCM512x_SPI if SPI_MASTER
 	select SND_SOC_RT286 if I2C
+	select SND_SOC_RT298 if I2C
 	select SND_SOC_RT5631 if I2C
 	select SND_SOC_RT5640 if I2C
 	select SND_SOC_RT5645 if I2C
@@ -102,6 +106,7 @@
 	select SND_SOC_STA350 if I2C
 	select SND_SOC_STA529 if I2C
 	select SND_SOC_STAC9766 if SND_SOC_AC97_BUS
+	select SND_SOC_STI_SAS
 	select SND_SOC_TAS2552 if I2C
 	select SND_SOC_TAS5086 if I2C
 	select SND_SOC_TAS571X if I2C
@@ -403,6 +408,11 @@
 	select SND_SOC_CS42XX8
 	select REGMAP_I2C
 
+# Cirrus Logic CS4349 HiFi DAC
+config SND_SOC_CS4349
+	tristate "Cirrus Logic CS4349 CODEC"
+	depends on I2C
+
 config SND_SOC_CX20442
 	tristate
 	depends on TTY
@@ -446,6 +456,12 @@
 	tristate
 	select SND_SOC_ES8328
 
+config SND_SOC_GTM601
+	tristate 'GTM601 UMTS modem audio codec'
+
+config SND_SOC_ICS43432
+	tristate
+
 config SND_SOC_ISABELLE
         tristate
 
@@ -512,12 +528,18 @@
 config SND_SOC_RL6347A
 	tristate
 	default y if SND_SOC_RT286=y
+	default y if SND_SOC_RT298=y
 	default m if SND_SOC_RT286=m
+	default m if SND_SOC_RT298=m
 
 config SND_SOC_RT286
 	tristate
 	depends on I2C
 
+config SND_SOC_RT298
+	tristate
+	depends on I2C
+
 config SND_SOC_RT5631
 	tristate "Realtek ALC5631/RT5631 CODEC"
 	depends on I2C
@@ -610,6 +632,9 @@
 config SND_SOC_STAC9766
 	tristate
 
+config SND_SOC_STI_SAS
+	tristate "codec Audio support for STI SAS codec"
+
 config SND_SOC_TAS2552
 	tristate "Texas Instruments TAS2552 Mono Audio amplifier"
 	depends on I2C
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index cf160d9..4a32077 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -45,6 +45,7 @@
 snd-soc-cs4271-spi-objs := cs4271-spi.o
 snd-soc-cs42xx8-objs := cs42xx8.o
 snd-soc-cs42xx8-i2c-objs := cs42xx8-i2c.o
+snd-soc-cs4349-objs := cs4349.o
 snd-soc-cx20442-objs := cx20442.o
 snd-soc-da7210-objs := da7210.o
 snd-soc-da7213-objs := da7213.o
@@ -55,6 +56,8 @@
 snd-soc-es8328-objs := es8328.o
 snd-soc-es8328-i2c-objs := es8328-i2c.o
 snd-soc-es8328-spi-objs := es8328-spi.o
+snd-soc-gtm601-objs := gtm601.o
+snd-soc-ics43432-objs := ics43432.o
 snd-soc-isabelle-objs := isabelle.o
 snd-soc-jz4740-codec-objs := jz4740.o
 snd-soc-l3-objs := l3.o
@@ -79,6 +82,7 @@
 snd-soc-rl6231-objs := rl6231.o
 snd-soc-rl6347a-objs := rl6347a.o
 snd-soc-rt286-objs := rt286.o
+snd-soc-rt298-objs := rt298.o
 snd-soc-rt5631-objs := rt5631.o
 snd-soc-rt5640-objs := rt5640.o
 snd-soc-rt5645-objs := rt5645.o
@@ -106,6 +110,7 @@
 snd-soc-sta350-objs := sta350.o
 snd-soc-sta529-objs := sta529.o
 snd-soc-stac9766-objs := stac9766.o
+snd-soc-sti-sas-objs := sti-sas.o
 snd-soc-tas5086-objs := tas5086.o
 snd-soc-tas571x-objs := tas571x.o
 snd-soc-tfa9879-objs := tfa9879.o
@@ -232,6 +237,7 @@
 obj-$(CONFIG_SND_SOC_CS4271_SPI)	+= snd-soc-cs4271-spi.o
 obj-$(CONFIG_SND_SOC_CS42XX8)	+= snd-soc-cs42xx8.o
 obj-$(CONFIG_SND_SOC_CS42XX8_I2C) += snd-soc-cs42xx8-i2c.o
+obj-$(CONFIG_SND_SOC_CS4349)	+= snd-soc-cs4349.o
 obj-$(CONFIG_SND_SOC_CX20442)	+= snd-soc-cx20442.o
 obj-$(CONFIG_SND_SOC_DA7210)	+= snd-soc-da7210.o
 obj-$(CONFIG_SND_SOC_DA7213)	+= snd-soc-da7213.o
@@ -242,6 +248,8 @@
 obj-$(CONFIG_SND_SOC_ES8328)	+= snd-soc-es8328.o
 obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o
 obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o
+obj-$(CONFIG_SND_SOC_GTM601)    += snd-soc-gtm601.o
+obj-$(CONFIG_SND_SOC_ICS43432)	+= snd-soc-ics43432.o
 obj-$(CONFIG_SND_SOC_ISABELLE)	+= snd-soc-isabelle.o
 obj-$(CONFIG_SND_SOC_JZ4740_CODEC)	+= snd-soc-jz4740-codec.o
 obj-$(CONFIG_SND_SOC_L3)	+= snd-soc-l3.o
@@ -266,6 +274,7 @@
 obj-$(CONFIG_SND_SOC_RL6231)	+= snd-soc-rl6231.o
 obj-$(CONFIG_SND_SOC_RL6347A)	+= snd-soc-rl6347a.o
 obj-$(CONFIG_SND_SOC_RT286)	+= snd-soc-rt286.o
+obj-$(CONFIG_SND_SOC_RT298)	+= snd-soc-rt298.o
 obj-$(CONFIG_SND_SOC_RT5631)	+= snd-soc-rt5631.o
 obj-$(CONFIG_SND_SOC_RT5640)	+= snd-soc-rt5640.o
 obj-$(CONFIG_SND_SOC_RT5645)	+= snd-soc-rt5645.o
@@ -289,6 +298,7 @@
 obj-$(CONFIG_SND_SOC_STA350)   += snd-soc-sta350.o
 obj-$(CONFIG_SND_SOC_STA529)   += snd-soc-sta529.o
 obj-$(CONFIG_SND_SOC_STAC9766)	+= snd-soc-stac9766.o
+obj-$(CONFIG_SND_SOC_STI_SAS)	+= snd-soc-sti-sas.o
 obj-$(CONFIG_SND_SOC_TAS2552)	+= snd-soc-tas2552.o
 obj-$(CONFIG_SND_SOC_TAS5086)	+= snd-soc-tas5086.o
 obj-$(CONFIG_SND_SOC_TAS571X)	+= snd-soc-tas571x.o
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index c7d243d..affb192 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -1335,11 +1335,10 @@
 static DECLARE_TLV_DB_SCALE(hs_ear_dig_gain_tlv, -100, 100, 1);
 /* -1dB = Mute */
 
-static const unsigned int hs_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(hs_gain_tlv,
 	0, 3, TLV_DB_SCALE_ITEM(-3200, 400, 0),
-	4, 15, TLV_DB_SCALE_ITEM(-1800, 200, 0),
-};
+	4, 15, TLV_DB_SCALE_ITEM(-1800, 200, 0)
+);
 
 static DECLARE_TLV_DB_SCALE(mic_gain_tlv, 0, 100, 0);
 
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 3cc69a6..9ef20db 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -202,19 +202,21 @@
 		.formats = SND_SOC_STD_AC97_FMTS, },
 };
 
+#define AD1980_VENDOR_ID 0x41445300
+#define AD1980_VENDOR_MASK 0xffffff00
+
 static int ad1980_reset(struct snd_soc_codec *codec, int try_warm)
 {
 	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
 	unsigned int retry_cnt = 0;
+	int ret;
 
 	do {
-		if (try_warm && soc_ac97_ops->warm_reset) {
-			soc_ac97_ops->warm_reset(ac97);
-			if (snd_soc_read(codec, AC97_RESET) == 0x0090)
-				return 1;
-		}
+		ret = snd_ac97_reset(ac97, true, AD1980_VENDOR_ID,
+			AD1980_VENDOR_MASK);
+		if (ret >= 0)
+			return 0;
 
-		soc_ac97_ops->reset(ac97);
 		/*
 		 * Set bit 16slot in register 74h, then every slot will has only
 		 * 16 bits. This command is sent out in 20bit mode, in which
@@ -223,8 +225,6 @@
 		 */
 		snd_soc_write(codec, AC97_AD_SERIAL_CFG, 0x9900);
 
-		if (snd_soc_read(codec, AC97_RESET)  == 0x0090)
-			return 0;
 	} while (retry_cnt++ < 10);
 
 	dev_err(codec->dev, "Failed to reset: AC97 link error\n");
@@ -240,7 +240,7 @@
 	u16 vendor_id2;
 	u16 ext_status;
 
-	ac97 = snd_soc_new_ac97_codec(codec);
+	ac97 = snd_soc_new_ac97_codec(codec, 0, 0);
 	if (IS_ERR(ac97)) {
 		ret = PTR_ERR(ac97);
 		dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
@@ -260,22 +260,10 @@
 	if (ret < 0)
 		goto reset_err;
 
-	/* Read out vendor ID to make sure it is ad1980 */
-	if (snd_soc_read(codec, AC97_VENDOR_ID1) != 0x4144) {
-		ret = -ENODEV;
-		goto reset_err;
-	}
-
 	vendor_id2 = snd_soc_read(codec, AC97_VENDOR_ID2);
-
-	if (vendor_id2 != 0x5370) {
-		if (vendor_id2 != 0x5374) {
-			ret = -ENODEV;
-			goto reset_err;
-		} else {
-			dev_warn(codec->dev,
-				"Found AD1981 - only 2/2 IN/OUT Channels supported\n");
-		}
+	if (vendor_id2 == 0x5374) {
+		dev_warn(codec->dev,
+			"Found AD1981 - only 2/2 IN/OUT Channels supported\n");
 	}
 
 	/* unmute captures and playbacks volume */
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index a431602..fe1353a 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -320,13 +320,12 @@
 	{ ADAU1373_DIGEN,		0x00 },
 };
 
-static const unsigned int adau1373_out_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(adau1373_out_tlv,
 	0, 7, TLV_DB_SCALE_ITEM(-7900, 400, 1),
 	8, 15, TLV_DB_SCALE_ITEM(-4700, 300, 0),
 	16, 23, TLV_DB_SCALE_ITEM(-2300, 200, 0),
-	24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0),
-};
+	24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0)
+);
 
 static const DECLARE_TLV_DB_MINMAX(adau1373_digital_tlv, -9563, 0);
 static const DECLARE_TLV_DB_SCALE(adau1373_in_pga_tlv, -1300, 100, 1);
@@ -381,12 +380,11 @@
 	"158Hz", "232Hz", "347Hz", "520Hz",
 };
 
-static const unsigned int adau1373_bass_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(adau1373_bass_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-600, 600, 1),
 	3, 4, TLV_DB_SCALE_ITEM(950, 250, 0),
-	5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0),
-};
+	5, 7, TLV_DB_SCALE_ITEM(1400, 150, 0)
+);
 
 static SOC_ENUM_SINGLE_DECL(adau1373_bass_lpf_cutoff_enum,
 	ADAU1373_BASS1, 5, adau1373_bass_lpf_cutoff_text);
@@ -414,11 +412,10 @@
 static SOC_ENUM_SINGLE_DECL(adau1373_3d_cutoff_enum,
 	ADAU1373_3D_CTRL1, 0, adau1373_3d_cutoff_text);
 
-static const unsigned int adau1373_3d_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(adau1373_3d_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
-	1, 7, TLV_DB_LINEAR_ITEM(-1800, -120),
-};
+	1, 7, TLV_DB_LINEAR_ITEM(-1800, -120)
+);
 
 static const char *adau1373_lr_mux_text[] = {
 	"Mute",
@@ -1534,7 +1531,6 @@
 static struct i2c_driver adau1373_i2c_driver = {
 	.driver = {
 		.name = "adau1373",
-		.owner = THIS_MODULE,
 	},
 	.probe = adau1373_i2c_probe,
 	.remove = adau1373_i2c_remove,
diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
index ff7f846..de53c0d 100644
--- a/sound/soc/codecs/adau1701.c
+++ b/sound/soc/codecs/adau1701.c
@@ -915,7 +915,6 @@
 static struct i2c_driver adau1701_i2c_driver = {
 	.driver = {
 		.name	= "adau1701",
-		.owner	= THIS_MODULE,
 		.of_match_table	= of_match_ptr(adau1701_dt_ids),
 	},
 	.probe		= adau1701_i2c_probe,
diff --git a/sound/soc/codecs/adau1761-i2c.c b/sound/soc/codecs/adau1761-i2c.c
index 862796d..348ccb1 100644
--- a/sound/soc/codecs/adau1761-i2c.c
+++ b/sound/soc/codecs/adau1761-i2c.c
@@ -47,7 +47,6 @@
 static struct i2c_driver adau1761_i2c_driver = {
 	.driver = {
 		.name = "adau1761",
-		.owner = THIS_MODULE,
 	},
 	.probe = adau1761_i2c_probe,
 	.remove = adau1761_i2c_remove,
diff --git a/sound/soc/codecs/adau1781-i2c.c b/sound/soc/codecs/adau1781-i2c.c
index 2ce4362..0e32bba 100644
--- a/sound/soc/codecs/adau1781-i2c.c
+++ b/sound/soc/codecs/adau1781-i2c.c
@@ -45,7 +45,6 @@
 static struct i2c_driver adau1781_i2c_driver = {
 	.driver = {
 		.name = "adau1781",
-		.owner = THIS_MODULE,
 	},
 	.probe = adau1781_i2c_probe,
 	.remove = adau1781_i2c_remove,
diff --git a/sound/soc/codecs/adau1977-i2c.c b/sound/soc/codecs/adau1977-i2c.c
index 9700e8c..21e7394 100644
--- a/sound/soc/codecs/adau1977-i2c.c
+++ b/sound/soc/codecs/adau1977-i2c.c
@@ -46,7 +46,6 @@
 static struct i2c_driver adau1977_i2c_driver = {
 	.driver = {
 		.name = "adau1977",
-		.owner = THIS_MODULE,
 	},
 	.probe = adau1977_i2c_probe,
 	.remove = adau1977_i2c_remove,
diff --git a/sound/soc/codecs/adav803.c b/sound/soc/codecs/adav803.c
index 66d9fce..52881fa 100644
--- a/sound/soc/codecs/adav803.c
+++ b/sound/soc/codecs/adav803.c
@@ -36,7 +36,6 @@
 static struct i2c_driver adav803_driver = {
 	.driver = {
 		.name = "adav803",
-		.owner = THIS_MODULE,
 	},
 	.probe = adav803_probe,
 	.remove = adav803_remove,
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index 36d8425..198c924 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -113,7 +113,7 @@
 
 #define ADAV80X_PLL_OUTE_SYSCLKPD(x)		BIT(2 - (x))
 
-static struct reg_default adav80x_reg_defaults[] = {
+static const struct reg_default adav80x_reg_defaults[] = {
 	{ ADAV80X_PLAYBACK_CTRL,	0x01 },
 	{ ADAV80X_AUX_IN_CTRL,		0x01 },
 	{ ADAV80X_REC_CTRL,		0x02 },
@@ -865,7 +865,6 @@
 	.val_bits = 8,
 	.pad_bits = 1,
 	.reg_bits = 7,
-	.read_flag_mask = 0x01,
 
 	.max_register = ADAV80X_PLL_OUTE,
 
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index 8670861..54428c6 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -444,7 +444,6 @@
 static struct i2c_driver ak4535_i2c_driver = {
 	.driver = {
 		.name = "ak4535",
-		.owner = THIS_MODULE,
 	},
 	.probe =    ak4535_i2c_probe,
 	.remove =   ak4535_i2c_remove,
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index 2d0ff45..b14176f 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -609,7 +609,6 @@
 static struct i2c_driver ak4641_i2c_driver = {
 	.driver = {
 		.name = "ak4641",
-		.owner = THIS_MODULE,
 	},
 	.probe =    ak4641_i2c_probe,
 	.remove =   ak4641_i2c_remove,
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 7c0f6552..4a90143 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -64,12 +64,15 @@
 #define FIL1_0		0x1c
 #define FIL1_1		0x1d
 #define FIL1_2		0x1e
-#define FIL1_3		0x1f
+#define FIL1_3		0x1f	/* The maximum valid register for ak4642 */
 #define PW_MGMT4	0x20
 #define MD_CTL5		0x21
 #define LO_MS		0x22
 #define HP_MS		0x23
-#define SPK_MS		0x24
+#define SPK_MS		0x24	/* The maximum valid register for ak4643 */
+#define EQ_FBEQAB	0x25
+#define EQ_FBEQCD	0x26
+#define EQ_FBEQE	0x27	/* The maximum valid register for ak4648 */
 
 /* PW_MGMT1*/
 #define PMVCM		(1 << 6) /* VCOM Power Management */
@@ -241,7 +244,7 @@
 /*
  * ak4642 register cache
  */
-static const struct reg_default ak4642_reg[] = {
+static const struct reg_default ak4643_reg[] = {
 	{  0, 0x00 }, {  1, 0x00 }, {  2, 0x01 }, {  3, 0x00 },
 	{  4, 0x02 }, {  5, 0x00 }, {  6, 0x00 }, {  7, 0x00 },
 	{  8, 0xe1 }, {  9, 0xe1 }, { 10, 0x18 }, { 11, 0x00 },
@@ -254,6 +257,14 @@
 	{ 36, 0x00 },
 };
 
+/* The default settings for 0x0 ~ 0x1f registers are the same for ak4642
+   and ak4643. So we reuse the ak4643 reg_default for ak4642.
+   The valid registers for ak4642 are 0x0 ~ 0x1f which is a subset of ak4643,
+   so define NUM_AK4642_REG_DEFAULTS for ak4642.
+*/
+#define ak4642_reg ak4643_reg
+#define NUM_AK4642_REG_DEFAULTS	(FIL1_3 + 1)
+
 static const struct reg_default ak4648_reg[] = {
 	{  0, 0x00 }, {  1, 0x00 }, {  2, 0x01 }, {  3, 0x00 },
 	{  4, 0x02 }, {  5, 0x00 }, {  6, 0x00 }, {  7, 0x00 },
@@ -535,15 +546,23 @@
 static const struct regmap_config ak4642_regmap = {
 	.reg_bits		= 8,
 	.val_bits		= 8,
-	.max_register		= ARRAY_SIZE(ak4642_reg) + 1,
+	.max_register		= FIL1_3,
 	.reg_defaults		= ak4642_reg,
-	.num_reg_defaults	= ARRAY_SIZE(ak4642_reg),
+	.num_reg_defaults	= NUM_AK4642_REG_DEFAULTS,
+};
+
+static const struct regmap_config ak4643_regmap = {
+	.reg_bits		= 8,
+	.val_bits		= 8,
+	.max_register		= SPK_MS,
+	.reg_defaults		= ak4643_reg,
+	.num_reg_defaults	= ARRAY_SIZE(ak4643_reg),
 };
 
 static const struct regmap_config ak4648_regmap = {
 	.reg_bits		= 8,
 	.val_bits		= 8,
-	.max_register		= ARRAY_SIZE(ak4648_reg) + 1,
+	.max_register		= EQ_FBEQE,
 	.reg_defaults		= ak4648_reg,
 	.num_reg_defaults	= ARRAY_SIZE(ak4648_reg),
 };
@@ -553,7 +572,7 @@
 };
 
 static const struct ak4642_drvdata ak4643_drvdata = {
-	.regmap_config = &ak4642_regmap,
+	.regmap_config = &ak4643_regmap,
 };
 
 static const struct ak4642_drvdata ak4648_drvdata = {
@@ -626,7 +645,6 @@
 static struct i2c_driver ak4642_i2c_driver = {
 	.driver = {
 		.name = "ak4642-codec",
-		.owner = THIS_MODULE,
 		.of_match_table = ak4642_of_match,
 	},
 	.probe		= ak4642_i2c_probe,
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 0e59063..c73a9f6 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -663,7 +663,6 @@
 static struct i2c_driver ak4671_i2c_driver = {
 	.driver = {
 		.name = "ak4671-codec",
-		.owner = THIS_MODULE,
 	},
 	.probe = ak4671_i2c_probe,
 	.remove = ak4671_i2c_remove,
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
index 0fc24e0..d2e3a3e 100644
--- a/sound/soc/codecs/alc5623.c
+++ b/sound/soc/codecs/alc5623.c
@@ -82,12 +82,11 @@
 static const DECLARE_TLV_DB_SCALE(vol_tlv, -3450, 150, 0);
 static const DECLARE_TLV_DB_SCALE(hp_tlv, -4650, 150, 0);
 static const DECLARE_TLV_DB_SCALE(adc_rec_tlv, -1650, 150, 0);
-static const unsigned int boost_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(boost_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
-	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(dig_tlv, 0, 600, 0);
 
 static const struct snd_kcontrol_new alc5621_vol_snd_controls[] = {
@@ -1085,7 +1084,6 @@
 static struct i2c_driver alc5623_i2c_driver = {
 	.driver = {
 		.name = "alc562x-codec",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(alc5623_of_match),
 	},
 	.probe = alc5623_i2c_probe,
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index 607a63b..4d3ba33 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -35,7 +35,7 @@
 /*
  * ALC5632 register cache
  */
-static struct reg_default  alc5632_reg_defaults[] = {
+static const struct reg_default alc5632_reg_defaults[] = {
 	{   2, 0x8080 },	/* R2   - Speaker Output Volume */
 	{   4, 0x8080 },	/* R4   - Headphone Output Volume */
 	{   6, 0x8080 },	/* R6   - AUXOUT Volume */
@@ -146,11 +146,10 @@
 static const DECLARE_TLV_DB_SCALE(hp_tlv, -4650, 150, 0);
 /* -16.5db min scale, 1.5db steps, no mute */
 static const DECLARE_TLV_DB_SCALE(adc_rec_tlv, -1650, 150, 0);
-static const unsigned int boost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(boost_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
-	1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
-};
+	1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0)
+);
 /* 0db min scale, 6 db steps, no mute */
 static const DECLARE_TLV_DB_SCALE(dig_tlv, 0, 600, 0);
 /* 0db min scalem 0.75db steps, no mute */
@@ -1183,7 +1182,6 @@
 static struct i2c_driver alc5632_i2c_driver = {
 	.driver = {
 		.name = "alc5632",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(alc5632_of_match),
 	},
 	.probe = alc5632_i2c_probe,
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 802e05e..8a2221a 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1366,7 +1366,7 @@
 {
 	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
 	struct arizona *arizona = priv->arizona;
-	struct reg_default dac_comp[] = {
+	struct reg_sequence dac_comp[] = {
 		{ 0x80, 0x3 },
 		{ ARIZONA_DAC_COMP_1, 0 },
 		{ ARIZONA_DAC_COMP_2, 0 },
@@ -1504,7 +1504,7 @@
 	else
 		rates = &arizona_48k_bclk_rates[0];
 
-	wl = snd_pcm_format_width(params_format(params));
+	wl = params_width(params);
 
 	if (tdm_slots) {
 		arizona_aif_dbg(dai, "Configuring for %d %d bit TDM slots\n",
@@ -1756,17 +1756,6 @@
 }
 EXPORT_SYMBOL_GPL(arizona_init_dai);
 
-static irqreturn_t arizona_fll_clock_ok(int irq, void *data)
-{
-	struct arizona_fll *fll = data;
-
-	arizona_fll_dbg(fll, "clock OK\n");
-
-	complete(&fll->ok);
-
-	return IRQ_HANDLED;
-}
-
 static struct {
 	unsigned int min;
 	unsigned int max;
@@ -2048,17 +2037,18 @@
 static int arizona_enable_fll(struct arizona_fll *fll)
 {
 	struct arizona *arizona = fll->arizona;
-	unsigned long time_left;
 	bool use_sync = false;
 	int already_enabled = arizona_is_enabled_fll(fll);
 	struct arizona_fll_cfg cfg;
+	int i;
+	unsigned int val;
 
 	if (already_enabled < 0)
 		return already_enabled;
 
 	if (already_enabled) {
 		/* Facilitate smooth refclk across the transition */
-		regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x7,
+		regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x9,
 					 ARIZONA_FLL1_GAIN_MASK, 0);
 		regmap_update_bits_async(fll->arizona->regmap, fll->base + 1,
 					 ARIZONA_FLL1_FREERUN,
@@ -2110,9 +2100,6 @@
 	if (!already_enabled)
 		pm_runtime_get(arizona->dev);
 
-	/* Clear any pending completions */
-	try_wait_for_completion(&fll->ok);
-
 	regmap_update_bits_async(arizona->regmap, fll->base + 1,
 				 ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
 	if (use_sync)
@@ -2124,10 +2111,24 @@
 		regmap_update_bits_async(arizona->regmap, fll->base + 1,
 					 ARIZONA_FLL1_FREERUN, 0);
 
-	time_left = wait_for_completion_timeout(&fll->ok,
-					  msecs_to_jiffies(250));
-	if (time_left == 0)
+	arizona_fll_dbg(fll, "Waiting for FLL lock...\n");
+	val = 0;
+	for (i = 0; i < 15; i++) {
+		if (i < 5)
+			usleep_range(200, 400);
+		else
+			msleep(20);
+
+		regmap_read(arizona->regmap,
+			    ARIZONA_INTERRUPT_RAW_STATUS_5,
+			    &val);
+		if (val & (ARIZONA_FLL1_CLOCK_OK_STS << (fll->id - 1)))
+			break;
+	}
+	if (i == 15)
 		arizona_fll_warn(fll, "Timed out waiting for lock\n");
+	else
+		arizona_fll_dbg(fll, "FLL locked (%d polls)\n", i);
 
 	return 0;
 }
@@ -2212,11 +2213,8 @@
 int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,
 		     int ok_irq, struct arizona_fll *fll)
 {
-	int ret;
 	unsigned int val;
 
-	init_completion(&fll->ok);
-
 	fll->id = id;
 	fll->base = base;
 	fll->arizona = arizona;
@@ -2238,13 +2236,6 @@
 	snprintf(fll->clock_ok_name, sizeof(fll->clock_ok_name),
 		 "FLL%d clock OK", id);
 
-	ret = arizona_request_irq(arizona, ok_irq, fll->clock_ok_name,
-				  arizona_fll_clock_ok, fll);
-	if (ret != 0) {
-		dev_err(arizona->dev, "Failed to get FLL%d clock OK IRQ: %d\n",
-			id, ret);
-	}
-
 	regmap_update_bits(arizona->regmap, fll->base + 1,
 			   ARIZONA_FLL1_FREERUN, 0);
 
@@ -2313,6 +2304,82 @@
 };
 EXPORT_SYMBOL_GPL(arizona_adsp2_rate_controls);
 
+static bool arizona_eq_filter_unstable(bool mode, __be16 _a, __be16 _b)
+{
+	s16 a = be16_to_cpu(_a);
+	s16 b = be16_to_cpu(_b);
+
+	if (!mode) {
+		return abs(a) >= 4096;
+	} else {
+		if (abs(b) >= 4096)
+			return true;
+
+		return (abs((a << 16) / (4096 - b)) >= 4096 << 4);
+	}
+}
+
+int arizona_eq_coeff_put(struct snd_kcontrol *kcontrol,
+			 struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+	struct soc_bytes *params = (void *)kcontrol->private_value;
+	unsigned int val;
+	__be16 *data;
+	int len;
+	int ret;
+
+	len = params->num_regs * regmap_get_val_bytes(arizona->regmap);
+
+	data = kmemdup(ucontrol->value.bytes.data, len, GFP_KERNEL | GFP_DMA);
+	if (!data)
+		return -ENOMEM;
+
+	data[0] &= cpu_to_be16(ARIZONA_EQ1_B1_MODE);
+
+	if (arizona_eq_filter_unstable(!!data[0], data[1], data[2]) ||
+	    arizona_eq_filter_unstable(true, data[4], data[5]) ||
+	    arizona_eq_filter_unstable(true, data[8], data[9]) ||
+	    arizona_eq_filter_unstable(true, data[12], data[13]) ||
+	    arizona_eq_filter_unstable(false, data[16], data[17])) {
+		dev_err(arizona->dev, "Rejecting unstable EQ coefficients\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = regmap_read(arizona->regmap, params->base, &val);
+	if (ret != 0)
+		goto out;
+
+	val &= ~ARIZONA_EQ1_B1_MODE;
+	data[0] |= cpu_to_be16(val);
+
+	ret = regmap_raw_write(arizona->regmap, params->base, data, len);
+
+out:
+	kfree(data);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(arizona_eq_coeff_put);
+
+int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+	__be16 *data = (__be16 *)ucontrol->value.bytes.data;
+	s16 val = be16_to_cpu(*data);
+
+	if (abs(val) >= 4096) {
+		dev_err(arizona->dev, "Rejecting unstable LHPF coefficients\n");
+		return -EINVAL;
+	}
+
+	return snd_soc_bytes_put(kcontrol, ucontrol);
+}
+EXPORT_SYMBOL_GPL(arizona_lhpf_coeff_put);
+
 MODULE_DESCRIPTION("ASoC Wolfson Arizona class device support");
 MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
 MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 43deb04..ada0a41 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -194,6 +194,20 @@
 	ARIZONA_MIXER_ROUTES(name " Preloader", name "L"), \
 	ARIZONA_MIXER_ROUTES(name " Preloader", name "R")
 
+#define ARIZONA_EQ_CONTROL(xname, xbase)                      \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname,   \
+	.info = snd_soc_bytes_info, .get = snd_soc_bytes_get, \
+	.put = arizona_eq_coeff_put, .private_value =         \
+	((unsigned long)&(struct soc_bytes) { .base = xbase,  \
+	 .num_regs = 20, .mask = ~ARIZONA_EQ1_B1_MODE }) }
+
+#define ARIZONA_LHPF_CONTROL(xname, xbase)                    \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname,   \
+	.info = snd_soc_bytes_info, .get = snd_soc_bytes_get, \
+	.put = arizona_lhpf_coeff_put, .private_value =       \
+	((unsigned long)&(struct soc_bytes) { .base = xbase,  \
+	 .num_regs = 1 }) }
+
 #define ARIZONA_RATE_ENUM_SIZE 4
 extern const char *arizona_rate_text[ARIZONA_RATE_ENUM_SIZE];
 extern const int arizona_rate_val[ARIZONA_RATE_ENUM_SIZE];
@@ -229,6 +243,11 @@
 			 struct snd_kcontrol *kcontrol,
 			 int event);
 
+extern int arizona_eq_coeff_put(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol);
+extern int arizona_lhpf_coeff_put(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol);
+
 extern int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id,
 			      int source, unsigned int freq, int dir);
 
@@ -242,7 +261,6 @@
 	int id;
 	unsigned int base;
 	unsigned int vco_mult;
-	struct completion ok;
 
 	unsigned int fout;
 	int sync_src;
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 8f40025..44c30fe 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -74,33 +74,8 @@
 static bool cs35l32_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS35L32_DEVID_AB:
-	case CS35L32_DEVID_CD:
-	case CS35L32_DEVID_E:
-	case CS35L32_FAB_ID:
-	case CS35L32_REV_ID:
-	case CS35L32_PWRCTL1:
-	case CS35L32_PWRCTL2:
-	case CS35L32_CLK_CTL:
-	case CS35L32_BATT_THRESHOLD:
-	case CS35L32_VMON:
-	case CS35L32_BST_CPCP_CTL:
-	case CS35L32_IMON_SCALING:
-	case CS35L32_AUDIO_LED_MNGR:
-	case CS35L32_ADSP_CTL:
-	case CS35L32_CLASSD_CTL:
-	case CS35L32_PROTECT_CTL:
-	case CS35L32_INT_MASK_1:
-	case CS35L32_INT_MASK_2:
-	case CS35L32_INT_MASK_3:
-	case CS35L32_INT_STATUS_1:
-	case CS35L32_INT_STATUS_2:
-	case CS35L32_INT_STATUS_3:
-	case CS35L32_LED_STATUS:
-	case CS35L32_FLASH_MODE:
-	case CS35L32_MOVIE_MODE:
-	case CS35L32_FLASH_TIMER:
-	case CS35L32_FLASH_INHIBIT:
+	case CS35L32_DEVID_AB ... CS35L32_AUDIO_LED_MNGR:
+	case CS35L32_ADSP_CTL ... CS35L32_FLASH_INHIBIT:
 		return true;
 	default:
 		return false;
@@ -110,15 +85,8 @@
 static bool cs35l32_volatile_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS35L32_DEVID_AB:
-	case CS35L32_DEVID_CD:
-	case CS35L32_DEVID_E:
-	case CS35L32_FAB_ID:
-	case CS35L32_REV_ID:
-	case CS35L32_INT_STATUS_1:
-	case CS35L32_INT_STATUS_2:
-	case CS35L32_INT_STATUS_3:
-	case CS35L32_LED_STATUS:
+	case CS35L32_DEVID_AB ... CS35L32_REV_ID:
+	case CS35L32_INT_STATUS_1 ... CS35L32_LED_STATUS:
 		return true;
 	default:
 		return false;
@@ -128,10 +96,7 @@
 static bool cs35l32_precious_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS35L32_INT_STATUS_1:
-	case CS35L32_INT_STATUS_2:
-	case CS35L32_INT_STATUS_3:
-	case CS35L32_LED_STATUS:
+	case CS35L32_INT_STATUS_1 ... CS35L32_LED_STATUS:
 		return true;
 	default:
 		return false;
@@ -276,7 +241,7 @@
 };
 
 /* Current and threshold powerup sequence Pg37 in datasheet */
-static const struct reg_default cs35l32_monitor_patch[] = {
+static const struct reg_sequence cs35l32_monitor_patch[] = {
 
 	{ 0x00, 0x99 },
 	{ 0x48, 0x17 },
@@ -441,8 +406,7 @@
 	if (IS_ERR(cs35l32->reset_gpio))
 		return PTR_ERR(cs35l32->reset_gpio);
 
-	if (cs35l32->reset_gpio)
-		gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
+	gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
 
 	/* initialize codec */
 	ret = regmap_read(cs35l32->regmap, CS35L32_DEVID_AB, &reg);
@@ -536,8 +500,7 @@
 	snd_soc_unregister_codec(&i2c_client->dev);
 
 	/* Hold down reset */
-	if (cs35l32->reset_gpio)
-		gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
+	gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
 
 	return 0;
 }
@@ -551,8 +514,7 @@
 	regcache_mark_dirty(cs35l32->regmap);
 
 	/* Hold down reset */
-	if (cs35l32->reset_gpio)
-		gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
+	gpiod_set_value_cansleep(cs35l32->reset_gpio, 0);
 
 	/* remove power */
 	regulator_bulk_disable(ARRAY_SIZE(cs35l32->supplies),
@@ -575,8 +537,7 @@
 		return ret;
 	}
 
-	if (cs35l32->reset_gpio)
-		gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
+	gpiod_set_value_cansleep(cs35l32->reset_gpio, 1);
 
 	regcache_cache_only(cs35l32->regmap, false);
 	regcache_sync(cs35l32->regmap);
@@ -607,7 +568,6 @@
 static struct i2c_driver cs35l32_i2c_driver = {
 	.driver = {
 		   .name = "cs35l32",
-		   .owner = THIS_MODULE,
 		   .pm = &cs35l32_runtime_pm,
 		   .of_match_table = cs35l32_of_match,
 		   },
diff --git a/sound/soc/codecs/cs35l32.h b/sound/soc/codecs/cs35l32.h
index 31ab804..1d6c250 100644
--- a/sound/soc/codecs/cs35l32.h
+++ b/sound/soc/codecs/cs35l32.h
@@ -80,7 +80,7 @@
 #define CS35L32_GAIN_MGR_MASK		0x08
 #define CS35L32_ADSP_SHARE_MASK		0x08
 #define CS35L32_ADSP_DATACFG_MASK	0x30
-#define CS35L32_SDOUT_3ST		0x80
+#define CS35L32_SDOUT_3ST		0x08
 #define CS35L32_BATT_REC_MASK		0x0E
 #define CS35L32_BATT_THRESH_MASK	0x30
 
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 8e36198..55db19d 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -60,23 +60,7 @@
 static bool cs4265_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS4265_PWRCTL:
-	case CS4265_DAC_CTL:
-	case CS4265_ADC_CTL:
-	case CS4265_MCLK_FREQ:
-	case CS4265_SIG_SEL:
-	case CS4265_CHB_PGA_CTL:
-	case CS4265_CHA_PGA_CTL:
-	case CS4265_ADC_CTL2:
-	case CS4265_DAC_CHA_VOL:
-	case CS4265_DAC_CHB_VOL:
-	case CS4265_DAC_CTL2:
-	case CS4265_SPDIF_CTL1:
-	case CS4265_SPDIF_CTL2:
-	case CS4265_INT_MASK:
-	case CS4265_STATUS_MODE_MSB:
-	case CS4265_STATUS_MODE_LSB:
-	case CS4265_CHIP_ID:
+	case CS4265_CHIP_ID ... CS4265_SPDIF_CTL2:
 		return true;
 	default:
 		return false;
@@ -658,7 +642,6 @@
 static struct i2c_driver cs4265_i2c_driver = {
 	.driver = {
 		.name = "cs4265",
-		.owner = THIS_MODULE,
 		.of_match_table = cs4265_of_match,
 	},
 	.id_table = cs4265_id,
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index e6d4ff9..e07807d 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -751,7 +751,6 @@
 static struct i2c_driver cs4270_i2c_driver = {
 	.driver = {
 		.name = "cs4270",
-		.owner = THIS_MODULE,
 		.of_match_table = cs4270_of_match,
 	},
 	.id_table = cs4270_id,
diff --git a/sound/soc/codecs/cs4271-i2c.c b/sound/soc/codecs/cs4271-i2c.c
index b264da0..dcb3223 100644
--- a/sound/soc/codecs/cs4271-i2c.c
+++ b/sound/soc/codecs/cs4271-i2c.c
@@ -48,7 +48,6 @@
 static struct i2c_driver cs4271_i2c_driver = {
 	.driver = {
 		.name = "cs4271",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(cs4271_dt_ids),
 	},
 	.probe = cs4271_i2c_probe,
diff --git a/sound/soc/codecs/cs42l51-i2c.c b/sound/soc/codecs/cs42l51-i2c.c
index c40428f..9bad478 100644
--- a/sound/soc/codecs/cs42l51-i2c.c
+++ b/sound/soc/codecs/cs42l51-i2c.c
@@ -45,7 +45,6 @@
 static struct i2c_driver cs42l51_i2c_driver = {
 	.driver = {
 		.name = "cs42l51",
-		.owner = THIS_MODULE,
 		.of_match_table = cs42l51_of_match,
 	},
 	.probe = cs42l51_i2c_probe,
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 4de52c9..47b97fc 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -110,58 +110,7 @@
 static bool cs42l52_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS42L52_CHIP:
-	case CS42L52_PWRCTL1:
-	case CS42L52_PWRCTL2:
-	case CS42L52_PWRCTL3:
-	case CS42L52_CLK_CTL:
-	case CS42L52_IFACE_CTL1:
-	case CS42L52_IFACE_CTL2:
-	case CS42L52_ADC_PGA_A:
-	case CS42L52_ADC_PGA_B:
-	case CS42L52_ANALOG_HPF_CTL:
-	case CS42L52_ADC_HPF_FREQ:
-	case CS42L52_ADC_MISC_CTL:
-	case CS42L52_PB_CTL1:
-	case CS42L52_MISC_CTL:
-	case CS42L52_PB_CTL2:
-	case CS42L52_MICA_CTL:
-	case CS42L52_MICB_CTL:
-	case CS42L52_PGAA_CTL:
-	case CS42L52_PGAB_CTL:
-	case CS42L52_PASSTHRUA_VOL:
-	case CS42L52_PASSTHRUB_VOL:
-	case CS42L52_ADCA_VOL:
-	case CS42L52_ADCB_VOL:
-	case CS42L52_ADCA_MIXER_VOL:
-	case CS42L52_ADCB_MIXER_VOL:
-	case CS42L52_PCMA_MIXER_VOL:
-	case CS42L52_PCMB_MIXER_VOL:
-	case CS42L52_BEEP_FREQ:
-	case CS42L52_BEEP_VOL:
-	case CS42L52_BEEP_TONE_CTL:
-	case CS42L52_TONE_CTL:
-	case CS42L52_MASTERA_VOL:
-	case CS42L52_MASTERB_VOL:
-	case CS42L52_HPA_VOL:
-	case CS42L52_HPB_VOL:
-	case CS42L52_SPKA_VOL:
-	case CS42L52_SPKB_VOL:
-	case CS42L52_ADC_PCM_MIXER:
-	case CS42L52_LIMITER_CTL1:
-	case CS42L52_LIMITER_CTL2:
-	case CS42L52_LIMITER_AT_RATE:
-	case CS42L52_ALC_CTL:
-	case CS42L52_ALC_RATE:
-	case CS42L52_ALC_THRESHOLD:
-	case CS42L52_NOISE_GATE_CTL:
-	case CS42L52_CLK_STATUS:
-	case CS42L52_BATT_COMPEN:
-	case CS42L52_BATT_LEVEL:
-	case CS42L52_SPK_STATUS:
-	case CS42L52_TEM_CTL:
-	case CS42L52_THE_FOLDBACK:
-	case CS42L52_CHARGE_PUMP:
+	case CS42L52_CHIP ... CS42L52_CHARGE_PUMP:
 		return true;
 	default:
 		return false;
@@ -196,11 +145,10 @@
 
 static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0);
 
-static const unsigned int limiter_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(limiter_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0),
-};
+	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0)
+);
 
 static const char * const cs42l52_adca_text[] = {
 	"Input1A", "Input2A", "Input3A", "Input4A", "PGA Input Left"};
@@ -919,7 +867,7 @@
 			SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_U20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE)
 
-static struct snd_soc_dai_ops cs42l52_ops = {
+static const struct snd_soc_dai_ops cs42l52_ops = {
 	.hw_params	= cs42l52_pcm_hw_params,
 	.digital_mute	= cs42l52_digital_mute,
 	.set_fmt	= cs42l52_set_fmt,
@@ -1118,7 +1066,7 @@
 };
 
 /* Current and threshold powerup sequence Pg37 */
-static const struct reg_default cs42l52_threshold_patch[] = {
+static const struct reg_sequence cs42l52_threshold_patch[] = {
 
 	{ 0x00, 0x99 },
 	{ 0x3E, 0xBA },
@@ -1285,7 +1233,6 @@
 static struct i2c_driver cs42l52_i2c_driver = {
 	.driver = {
 		.name = "cs42l52",
-		.owner = THIS_MODULE,
 		.of_match_table = cs42l52_of_match,
 	},
 	.id_table = cs42l52_id,
diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
index 1e11ba4..7cd5f76 100644
--- a/sound/soc/codecs/cs42l56.c
+++ b/sound/soc/codecs/cs42l56.c
@@ -115,52 +115,7 @@
 static bool cs42l56_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS42L56_CHIP_ID_1:
-	case CS42L56_CHIP_ID_2:
-	case CS42L56_PWRCTL_1:
-	case CS42L56_PWRCTL_2:
-	case CS42L56_CLKCTL_1:
-	case CS42L56_CLKCTL_2:
-	case CS42L56_SERIAL_FMT:
-	case CS42L56_CLASSH_CTL:
-	case CS42L56_MISC_CTL:
-	case CS42L56_INT_STATUS:
-	case CS42L56_PLAYBACK_CTL:
-	case CS42L56_DSP_MUTE_CTL:
-	case CS42L56_ADCA_MIX_VOLUME:
-	case CS42L56_ADCB_MIX_VOLUME:
-	case CS42L56_PCMA_MIX_VOLUME:
-	case CS42L56_PCMB_MIX_VOLUME:
-	case CS42L56_ANAINPUT_ADV_VOLUME:
-	case CS42L56_DIGINPUT_ADV_VOLUME:
-	case CS42L56_MASTER_A_VOLUME:
-	case CS42L56_MASTER_B_VOLUME:
-	case CS42L56_BEEP_FREQ_ONTIME:
-	case CS42L56_BEEP_FREQ_OFFTIME:
-	case CS42L56_BEEP_TONE_CFG:
-	case CS42L56_TONE_CTL:
-	case CS42L56_CHAN_MIX_SWAP:
-	case CS42L56_AIN_REFCFG_ADC_MUX:
-	case CS42L56_HPF_CTL:
-	case CS42L56_MISC_ADC_CTL:
-	case CS42L56_GAIN_BIAS_CTL:
-	case CS42L56_PGAA_MUX_VOLUME:
-	case CS42L56_PGAB_MUX_VOLUME:
-	case CS42L56_ADCA_ATTENUATOR:
-	case CS42L56_ADCB_ATTENUATOR:
-	case CS42L56_ALC_EN_ATTACK_RATE:
-	case CS42L56_ALC_RELEASE_RATE:
-	case CS42L56_ALC_THRESHOLD:
-	case CS42L56_NOISE_GATE_CTL:
-	case CS42L56_ALC_LIM_SFT_ZC:
-	case CS42L56_AMUTE_HPLO_MUX:
-	case CS42L56_HPA_VOLUME:
-	case CS42L56_HPB_VOLUME:
-	case CS42L56_LOA_VOLUME:
-	case CS42L56_LOB_VOLUME:
-	case CS42L56_LIM_THRESHOLD_CTL:
-	case CS42L56_LIM_CTL_RELEASE_RATE:
-	case CS42L56_LIM_ATTACK_RATE:
+	case CS42L56_CHIP_ID_1 ... CS42L56_LIM_ATTACK_RATE:
 		return true;
 	default:
 		return false;
@@ -185,21 +140,18 @@
 static DECLARE_TLV_DB_SCALE(preamp_tlv, 0, 1000, 0);
 static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
 
-static const unsigned int ngnb_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(ngnb_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-8200, 600, 0),
-	2, 5, TLV_DB_SCALE_ITEM(-7600, 300, 0),
-};
-static const unsigned int ngb_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+	2, 5, TLV_DB_SCALE_ITEM(-7600, 300, 0)
+);
+static const DECLARE_TLV_DB_RANGE(ngb_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-6400, 600, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-4600, 300, 0),
-};
-static const unsigned int alc_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+	3, 7, TLV_DB_SCALE_ITEM(-4600, 300, 0)
+);
+static const DECLARE_TLV_DB_RANGE(alc_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0),
-};
+	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0)
+);
 
 static const char * const beep_config_text[] = {
 	"Off", "Single", "Multiple", "Continuous"
@@ -989,7 +941,7 @@
 			SNDRV_PCM_FMTBIT_S32_LE)
 
 
-static struct snd_soc_dai_ops cs42l56_ops = {
+static const struct snd_soc_dai_ops cs42l56_ops = {
 	.hw_params	= cs42l56_pcm_hw_params,
 	.digital_mute	= cs42l56_digital_mute,
 	.set_fmt	= cs42l56_set_dai_fmt,
@@ -1408,7 +1360,6 @@
 static struct i2c_driver cs42l56_i2c_driver = {
 	.driver = {
 		.name = "cs42l56",
-		.owner = THIS_MODULE,
 		.of_match_table = cs42l56_of_match,
 	},
 	.id_table = cs42l56_id,
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index b7853b9..42a8fd4 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -153,111 +153,18 @@
 static bool cs42l73_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case CS42L73_DEVID_AB:
-	case CS42L73_DEVID_CD:
-	case CS42L73_DEVID_E:
-	case CS42L73_REVID:
-	case CS42L73_PWRCTL1:
-	case CS42L73_PWRCTL2:
-	case CS42L73_PWRCTL3:
-	case CS42L73_CPFCHC:
-	case CS42L73_OLMBMSDC:
-	case CS42L73_DMMCC:
-	case CS42L73_XSPC:
-	case CS42L73_XSPMMCC:
-	case CS42L73_ASPC:
-	case CS42L73_ASPMMCC:
-	case CS42L73_VSPC:
-	case CS42L73_VSPMMCC:
-	case CS42L73_VXSPFS:
-	case CS42L73_MIOPC:
-	case CS42L73_ADCIPC:
-	case CS42L73_MICAPREPGAAVOL:
-	case CS42L73_MICBPREPGABVOL:
-	case CS42L73_IPADVOL:
-	case CS42L73_IPBDVOL:
-	case CS42L73_PBDC:
-	case CS42L73_HLADVOL:
-	case CS42L73_HLBDVOL:
-	case CS42L73_SPKDVOL:
-	case CS42L73_ESLDVOL:
-	case CS42L73_HPAAVOL:
-	case CS42L73_HPBAVOL:
-	case CS42L73_LOAAVOL:
-	case CS42L73_LOBAVOL:
-	case CS42L73_STRINV:
-	case CS42L73_XSPINV:
-	case CS42L73_ASPINV:
-	case CS42L73_VSPINV:
-	case CS42L73_LIMARATEHL:
-	case CS42L73_LIMRRATEHL:
-	case CS42L73_LMAXHL:
-	case CS42L73_LIMARATESPK:
-	case CS42L73_LIMRRATESPK:
-	case CS42L73_LMAXSPK:
-	case CS42L73_LIMARATEESL:
-	case CS42L73_LIMRRATEESL:
-	case CS42L73_LMAXESL:
-	case CS42L73_ALCARATE:
-	case CS42L73_ALCRRATE:
-	case CS42L73_ALCMINMAX:
-	case CS42L73_NGCAB:
-	case CS42L73_ALCNGMC:
-	case CS42L73_MIXERCTL:
-	case CS42L73_HLAIPAA:
-	case CS42L73_HLBIPBA:
-	case CS42L73_HLAXSPAA:
-	case CS42L73_HLBXSPBA:
-	case CS42L73_HLAASPAA:
-	case CS42L73_HLBASPBA:
-	case CS42L73_HLAVSPMA:
-	case CS42L73_HLBVSPMA:
-	case CS42L73_XSPAIPAA:
-	case CS42L73_XSPBIPBA:
-	case CS42L73_XSPAXSPAA:
-	case CS42L73_XSPBXSPBA:
-	case CS42L73_XSPAASPAA:
-	case CS42L73_XSPAASPBA:
-	case CS42L73_XSPAVSPMA:
-	case CS42L73_XSPBVSPMA:
-	case CS42L73_ASPAIPAA:
-	case CS42L73_ASPBIPBA:
-	case CS42L73_ASPAXSPAA:
-	case CS42L73_ASPBXSPBA:
-	case CS42L73_ASPAASPAA:
-	case CS42L73_ASPBASPBA:
-	case CS42L73_ASPAVSPMA:
-	case CS42L73_ASPBVSPMA:
-	case CS42L73_VSPAIPAA:
-	case CS42L73_VSPBIPBA:
-	case CS42L73_VSPAXSPAA:
-	case CS42L73_VSPBXSPBA:
-	case CS42L73_VSPAASPAA:
-	case CS42L73_VSPBASPBA:
-	case CS42L73_VSPAVSPMA:
-	case CS42L73_VSPBVSPMA:
-	case CS42L73_MMIXCTL:
-	case CS42L73_SPKMIPMA:
-	case CS42L73_SPKMXSPA:
-	case CS42L73_SPKMASPA:
-	case CS42L73_SPKMVSPMA:
-	case CS42L73_ESLMIPMA:
-	case CS42L73_ESLMXSPA:
-	case CS42L73_ESLMASPA:
-	case CS42L73_ESLMVSPMA:
-	case CS42L73_IM1:
-	case CS42L73_IM2:
+	case CS42L73_DEVID_AB ... CS42L73_DEVID_E:
+	case CS42L73_REVID ... CS42L73_IM2:
 		return true;
 	default:
 		return false;
 	}
 }
 
-static const unsigned int hpaloa_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(hpaloa_tlv,
 	0, 13, TLV_DB_SCALE_ITEM(-7600, 200, 0),
-	14, 75, TLV_DB_SCALE_ITEM(-4900, 100, 0),
-};
+	14, 75, TLV_DB_SCALE_ITEM(-4900, 100, 0)
+);
 
 static DECLARE_TLV_DB_SCALE(adc_boost_tlv, 0, 2500, 0);
 
@@ -267,11 +174,10 @@
 
 static DECLARE_TLV_DB_SCALE(micpga_tlv, -600, 50, 0);
 
-static const unsigned int limiter_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(limiter_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
-	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0),
-};
+	3, 7, TLV_DB_SCALE_ITEM(-1200, 300, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(attn_tlv, -6300, 100, 1);
 
@@ -1236,8 +1142,8 @@
 	struct snd_soc_codec *codec = dai->codec;
 	int id = dai->id;
 
-	return snd_soc_update_bits(codec, CS42L73_SPC(id),
-					0x7F, tristate << 7);
+	return snd_soc_update_bits(codec, CS42L73_SPC(id), CS42L73_SP_3ST,
+				   tristate << 7);
 }
 
 static const struct snd_pcm_hw_constraint_list constraints_12_24 = {
@@ -1491,7 +1397,6 @@
 static struct i2c_driver cs42l73_i2c_driver = {
 	.driver = {
 		   .name = "cs42l73",
-		   .owner = THIS_MODULE,
 		   .of_match_table = cs42l73_of_match,
 		   },
 	.id_table = cs42l73_id,
diff --git a/sound/soc/codecs/cs42xx8-i2c.c b/sound/soc/codecs/cs42xx8-i2c.c
index 657dce2..800c1d5 100644
--- a/sound/soc/codecs/cs42xx8-i2c.c
+++ b/sound/soc/codecs/cs42xx8-i2c.c
@@ -20,7 +20,7 @@
 static int cs42xx8_i2c_probe(struct i2c_client *i2c,
 			     const struct i2c_device_id *id)
 {
-	u32 ret = cs42xx8_probe(&i2c->dev,
+	int ret = cs42xx8_probe(&i2c->dev,
 			devm_regmap_init_i2c(i2c, &cs42xx8_regmap_config));
 	if (ret)
 		return ret;
@@ -49,8 +49,8 @@
 static struct i2c_driver cs42xx8_i2c_driver = {
 	.driver = {
 		.name = "cs42xx8",
-		.owner = THIS_MODULE,
 		.pm = &cs42xx8_pm,
+		.of_match_table = cs42xx8_of_match,
 	},
 	.probe = cs42xx8_i2c_probe,
 	.remove = cs42xx8_i2c_remove,
diff --git a/sound/soc/codecs/cs42xx8.c b/sound/soc/codecs/cs42xx8.c
index e1d4686..d562e1b 100644
--- a/sound/soc/codecs/cs42xx8.c
+++ b/sound/soc/codecs/cs42xx8.c
@@ -425,7 +425,7 @@
 };
 EXPORT_SYMBOL_GPL(cs42888_data);
 
-static const struct of_device_id cs42xx8_of_match[] = {
+const struct of_device_id cs42xx8_of_match[] = {
 	{ .compatible = "cirrus,cs42448", .data = &cs42448_data, },
 	{ .compatible = "cirrus,cs42888", .data = &cs42888_data, },
 	{ /* sentinel */ }
@@ -435,16 +435,24 @@
 
 int cs42xx8_probe(struct device *dev, struct regmap *regmap)
 {
-	const struct of_device_id *of_id = of_match_device(cs42xx8_of_match, dev);
+	const struct of_device_id *of_id;
 	struct cs42xx8_priv *cs42xx8;
 	int ret, val, i;
 
+	if (IS_ERR(regmap)) {
+		ret = PTR_ERR(regmap);
+		dev_err(dev, "failed to allocate regmap: %d\n", ret);
+		return ret;
+	}
+
 	cs42xx8 = devm_kzalloc(dev, sizeof(*cs42xx8), GFP_KERNEL);
 	if (cs42xx8 == NULL)
 		return -ENOMEM;
 
+	cs42xx8->regmap = regmap;
 	dev_set_drvdata(dev, cs42xx8);
 
+	of_id = of_match_device(cs42xx8_of_match, dev);
 	if (of_id)
 		cs42xx8->drvdata = of_id->data;
 
@@ -482,13 +490,6 @@
 	/* Make sure hardware reset done */
 	msleep(5);
 
-	cs42xx8->regmap = regmap;
-	if (IS_ERR(cs42xx8->regmap)) {
-		ret = PTR_ERR(cs42xx8->regmap);
-		dev_err(dev, "failed to allocate regmap: %d\n", ret);
-		goto err_enable;
-	}
-
 	/*
 	 * We haven't marked the chip revision as volatile due to
 	 * sharing a register with the right input volume; explicitly
diff --git a/sound/soc/codecs/cs42xx8.h b/sound/soc/codecs/cs42xx8.h
index b2c10e5..d36c61b 100644
--- a/sound/soc/codecs/cs42xx8.h
+++ b/sound/soc/codecs/cs42xx8.h
@@ -22,6 +22,7 @@
 extern const struct cs42xx8_driver_data cs42448_data;
 extern const struct cs42xx8_driver_data cs42888_data;
 extern const struct regmap_config cs42xx8_regmap_config;
+extern const struct of_device_id cs42xx8_of_match[];
 int cs42xx8_probe(struct device *dev, struct regmap *regmap);
 
 /* CS42888 register map */
diff --git a/sound/soc/codecs/cs4349.c b/sound/soc/codecs/cs4349.c
new file mode 100644
index 0000000..0ac8fc5
--- /dev/null
+++ b/sound/soc/codecs/cs4349.c
@@ -0,0 +1,392 @@
+/*
+ * cs4349.c  --  CS4349 ALSA Soc Audio driver
+ *
+ * Copyright 2015 Cirrus Logic, Inc.
+ *
+ * Authors: Tim Howe <Tim.Howe@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include "cs4349.h"
+
+
+static const struct reg_default cs4349_reg_defaults[] = {
+	{ 2, 0x00 },	/* r02	- Mode Control */
+	{ 3, 0x09 },	/* r03	- Volume, Mixing and Inversion Control */
+	{ 4, 0x81 },	/* r04	- Mute Control */
+	{ 5, 0x00 },	/* r05	- Channel A Volume Control */
+	{ 6, 0x00 },	/* r06	- Channel B Volume Control */
+	{ 7, 0xB1 },	/* r07	- Ramp and Filter Control */
+	{ 8, 0x1C },	/* r08	- Misc. Control */
+};
+
+/* Private data for the CS4349 */
+struct  cs4349_private {
+	struct regmap			*regmap;
+	struct gpio_desc		*reset_gpio;
+	unsigned int			mode;
+	int				rate;
+};
+
+static bool cs4349_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case CS4349_CHIPID ... CS4349_MISC:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool cs4349_writeable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case CS4349_MODE ...  CS4349_MISC:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static int cs4349_set_dai_fmt(struct snd_soc_dai *codec_dai,
+			      unsigned int format)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct cs4349_private *cs4349 = snd_soc_codec_get_drvdata(codec);
+	unsigned int fmt;
+
+	fmt = format & SND_SOC_DAIFMT_FORMAT_MASK;
+
+	switch (fmt) {
+	case SND_SOC_DAIFMT_I2S:
+	case SND_SOC_DAIFMT_LEFT_J:
+	case SND_SOC_DAIFMT_RIGHT_J:
+		cs4349->mode = format & SND_SOC_DAIFMT_FORMAT_MASK;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cs4349_pcm_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct cs4349_private *cs4349 = snd_soc_codec_get_drvdata(codec);
+	int fmt, ret;
+
+	cs4349->rate = params_rate(params);
+
+	switch (cs4349->mode) {
+	case SND_SOC_DAIFMT_I2S:
+		fmt = DIF_I2S;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		fmt = DIF_LEFT_JST;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		switch (params_width(params)) {
+		case 16:
+			fmt = DIF_RGHT_JST16;
+			break;
+		case 24:
+			fmt = DIF_RGHT_JST24;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = snd_soc_update_bits(codec, CS4349_MODE, DIF_MASK,
+				  MODE_FORMAT(fmt));
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static int cs4349_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	int reg;
+
+	reg = 0;
+	if (mute)
+		reg = MUTE_AB_MASK;
+
+	return snd_soc_update_bits(codec, CS4349_MUTE, MUTE_AB_MASK, reg);
+}
+
+static DECLARE_TLV_DB_SCALE(dig_tlv, -12750, 50, 0);
+
+static const char * const chan_mix_texts[] = {
+	"Mute", "MuteA", "MuteA SwapB", "MuteA MonoB", "SwapA MuteB",
+	"BothR", "Swap", "SwapA MonoB", "MuteB", "Normal", "BothL",
+	"MonoB", "MonoA MuteB", "MonoA", "MonoA SwapB", "Mono",
+	/*Normal == Channel A = Left, Channel B = Right*/
+};
+
+static const char * const fm_texts[] = {
+	"Auto", "Single", "Double", "Quad",
+};
+
+static const char * const deemph_texts[] = {
+	"None", "44.1k", "48k", "32k",
+};
+
+static const char * const softr_zeroc_texts[] = {
+	"Immediate", "Zero Cross", "Soft Ramp", "SR on ZC",
+};
+
+static int deemph_values[] = {
+	0, 4, 8, 12,
+};
+
+static int softr_zeroc_values[] = {
+	0, 64, 128, 192,
+};
+
+static const struct soc_enum chan_mix_enum =
+	SOC_ENUM_SINGLE(CS4349_VMI, 0,
+			ARRAY_SIZE(chan_mix_texts),
+			chan_mix_texts);
+
+static const struct soc_enum fm_mode_enum =
+	SOC_ENUM_SINGLE(CS4349_MODE, 0,
+			ARRAY_SIZE(fm_texts),
+			fm_texts);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(deemph_enum, CS4349_MODE, 0, DEM_MASK,
+				deemph_texts, deemph_values);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(softr_zeroc_enum, CS4349_RMPFLT, 0,
+				SR_ZC_MASK, softr_zeroc_texts,
+				softr_zeroc_values);
+
+static const struct snd_kcontrol_new cs4349_snd_controls[] = {
+	SOC_DOUBLE_R_TLV("Master Playback Volume",
+			 CS4349_VOLA, CS4349_VOLB, 0, 0xFF, 1, dig_tlv),
+	SOC_ENUM("Functional Mode", fm_mode_enum),
+	SOC_ENUM("De-Emphasis Control", deemph_enum),
+	SOC_ENUM("Soft Ramp Zero Cross Control", softr_zeroc_enum),
+	SOC_ENUM("Channel Mixer", chan_mix_enum),
+	SOC_SINGLE("VolA = VolB Switch", CS4349_VMI, 7, 1, 0),
+	SOC_SINGLE("InvertA Switch", CS4349_VMI, 6, 1, 0),
+	SOC_SINGLE("InvertB Switch", CS4349_VMI, 5, 1, 0),
+	SOC_SINGLE("Auto-Mute Switch", CS4349_MUTE, 7, 1, 0),
+	SOC_SINGLE("MUTEC A = B Switch", CS4349_MUTE, 5, 1, 0),
+	SOC_SINGLE("Soft Ramp Up Switch", CS4349_RMPFLT, 5, 1, 0),
+	SOC_SINGLE("Soft Ramp Down Switch", CS4349_RMPFLT, 4, 1, 0),
+	SOC_SINGLE("Slow Roll Off Filter Switch", CS4349_RMPFLT, 2, 1, 0),
+	SOC_SINGLE("Freeze Switch", CS4349_MISC, 5, 1, 0),
+	SOC_SINGLE("Popguard Switch", CS4349_MISC, 4, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget cs4349_dapm_widgets[] = {
+	SND_SOC_DAPM_DAC("HiFi DAC", NULL, SND_SOC_NOPM, 0, 0),
+
+	SND_SOC_DAPM_OUTPUT("OutputA"),
+	SND_SOC_DAPM_OUTPUT("OutputB"),
+};
+
+static const struct snd_soc_dapm_route cs4349_routes[] = {
+	{"DAC Playback", NULL, "OutputA"},
+	{"DAC Playback", NULL, "OutputB"},
+
+	{"OutputA", NULL, "HiFi DAC"},
+	{"OutputB", NULL, "HiFi DAC"},
+};
+
+#define CS4349_PCM_FORMATS (SNDRV_PCM_FMTBIT_S8  | \
+			SNDRV_PCM_FMTBIT_S16_LE  | SNDRV_PCM_FMTBIT_S16_BE  | \
+			SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S18_3BE | \
+			SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE | \
+			SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE | \
+			SNDRV_PCM_FMTBIT_S24_LE  | SNDRV_PCM_FMTBIT_S24_BE  | \
+			SNDRV_PCM_FMTBIT_S32_LE)
+
+#define CS4349_PCM_RATES SNDRV_PCM_RATE_8000_192000
+
+static const struct snd_soc_dai_ops cs4349_dai_ops = {
+	.hw_params	= cs4349_pcm_hw_params,
+	.set_fmt	= cs4349_set_dai_fmt,
+	.digital_mute	= cs4349_digital_mute,
+};
+
+static struct snd_soc_dai_driver cs4349_dai = {
+	.name = "cs4349_hifi",
+	.playback = {
+		.stream_name	= "DAC Playback",
+		.channels_min	= 1,
+		.channels_max	= 2,
+		.rates		= CS4349_PCM_RATES,
+		.formats	= CS4349_PCM_FORMATS,
+	},
+	.ops = &cs4349_dai_ops,
+	.symmetric_rates = 1,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_cs4349 = {
+	.controls		= cs4349_snd_controls,
+	.num_controls		= ARRAY_SIZE(cs4349_snd_controls),
+
+	.dapm_widgets		= cs4349_dapm_widgets,
+	.num_dapm_widgets	= ARRAY_SIZE(cs4349_dapm_widgets),
+	.dapm_routes		= cs4349_routes,
+	.num_dapm_routes	= ARRAY_SIZE(cs4349_routes),
+};
+
+static const struct regmap_config cs4349_regmap = {
+	.reg_bits		= 8,
+	.val_bits		= 8,
+
+	.max_register		= CS4349_MISC,
+	.reg_defaults		= cs4349_reg_defaults,
+	.num_reg_defaults	= ARRAY_SIZE(cs4349_reg_defaults),
+	.readable_reg		= cs4349_readable_register,
+	.writeable_reg		= cs4349_writeable_register,
+	.cache_type		= REGCACHE_RBTREE,
+};
+
+static int cs4349_i2c_probe(struct i2c_client *client,
+				      const struct i2c_device_id *id)
+{
+	struct cs4349_private *cs4349;
+	int ret;
+
+	cs4349 = devm_kzalloc(&client->dev, sizeof(*cs4349), GFP_KERNEL);
+	if (!cs4349)
+		return -ENOMEM;
+
+	cs4349->regmap = devm_regmap_init_i2c(client, &cs4349_regmap);
+	if (IS_ERR(cs4349->regmap)) {
+		ret = PTR_ERR(cs4349->regmap);
+		dev_err(&client->dev, "regmap_init() failed: %d\n", ret);
+		return ret;
+	}
+
+	/* Reset the Device */
+	cs4349->reset_gpio = devm_gpiod_get_optional(&client->dev,
+		"reset", GPIOD_OUT_LOW);
+	if (IS_ERR(cs4349->reset_gpio))
+		return PTR_ERR(cs4349->reset_gpio);
+
+	gpiod_set_value_cansleep(cs4349->reset_gpio, 1);
+
+	i2c_set_clientdata(client, cs4349);
+
+	return snd_soc_register_codec(&client->dev, &soc_codec_dev_cs4349,
+		&cs4349_dai, 1);
+}
+
+static int cs4349_i2c_remove(struct i2c_client *client)
+{
+	struct cs4349_private *cs4349 = i2c_get_clientdata(client);
+
+	snd_soc_unregister_codec(&client->dev);
+
+	/* Hold down reset */
+	gpiod_set_value_cansleep(cs4349->reset_gpio, 0);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int cs4349_runtime_suspend(struct device *dev)
+{
+	struct cs4349_private *cs4349 = dev_get_drvdata(dev);
+	int ret;
+
+	ret = regmap_update_bits(cs4349->regmap, CS4349_MISC, PWR_DWN, PWR_DWN);
+	if (ret < 0)
+		return ret;
+
+	regcache_cache_only(cs4349->regmap, true);
+
+	/* Hold down reset */
+	gpiod_set_value_cansleep(cs4349->reset_gpio, 0);
+
+	return 0;
+}
+
+static int cs4349_runtime_resume(struct device *dev)
+{
+	struct cs4349_private *cs4349 = dev_get_drvdata(dev);
+	int ret;
+
+	ret = regmap_update_bits(cs4349->regmap, CS4349_MISC, PWR_DWN, 0);
+	if (ret < 0)
+		return ret;
+
+	gpiod_set_value_cansleep(cs4349->reset_gpio, 1);
+
+	regcache_cache_only(cs4349->regmap, false);
+	regcache_sync(cs4349->regmap);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops cs4349_runtime_pm = {
+	SET_RUNTIME_PM_OPS(cs4349_runtime_suspend, cs4349_runtime_resume,
+			   NULL)
+};
+
+static const struct of_device_id cs4349_of_match[] = {
+	{ .compatible = "cirrus,cs4349", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, cs4349_of_match);
+
+static const struct i2c_device_id cs4349_i2c_id[] = {
+	{"cs4349", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, cs4349_i2c_id);
+
+static struct i2c_driver cs4349_i2c_driver = {
+	.driver = {
+		.name		= "cs4349",
+		.of_match_table	= cs4349_of_match,
+	},
+	.id_table	= cs4349_i2c_id,
+	.probe		= cs4349_i2c_probe,
+	.remove		= cs4349_i2c_remove,
+};
+
+module_i2c_driver(cs4349_i2c_driver);
+
+MODULE_AUTHOR("Tim Howe <tim.howe@cirrus.com>");
+MODULE_DESCRIPTION("Cirrus Logic CS4349 ALSA SoC Codec Driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs4349.h b/sound/soc/codecs/cs4349.h
new file mode 100644
index 0000000..d58c06a
--- /dev/null
+++ b/sound/soc/codecs/cs4349.h
@@ -0,0 +1,136 @@
+/*
+ * ALSA SoC CS4349 codec driver
+ *
+ * Copyright 2015 Cirrus Logic, Inc.
+ *
+ * Author: Tim Howe <Tim.Howe@cirrus.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef __CS4349_H__
+#define __CS4349_H__
+
+/* CS4349 registers addresses */
+#define CS4349_CHIPID		0x01	/* Device and Rev ID, Read Only */
+#define CS4349_MODE		0x02	/* Mode Control */
+#define CS4349_VMI		0x03	/* Volume, Mixing, Inversion Control */
+#define CS4349_MUTE		0x04	/* Mute Control */
+#define CS4349_VOLA		0x05	/* DAC Channel A Volume Control */
+#define CS4349_VOLB		0x06	/* DAC Channel B Volume Control */
+#define CS4349_RMPFLT		0x07	/* Ramp and Filter Control */
+#define CS4349_MISC		0x08	/* Power Down,Freeze Control,Pop Stop*/
+
+#define CS4349_I2C_INCR		0x80
+
+
+/* Device and Revision ID */
+#define CS4349_REVA		0xF0	/* Rev A */
+#define CS4349_REVB		0xF1	/* Rev B */
+#define CS4349_REVC2		0xFF	/* Rev C2 */
+
+
+/* PDN_DONE Poll Maximum
+ * If soft ramp is set it will take much longer to power down
+ * the system.
+ */
+#define PDN_POLL_MAX		900
+
+
+/* Bitfield Definitions */
+
+/* CS4349_MODE */
+/* (Digital Interface Format, De-Emphasis Control, Functional Mode */
+#define DIF2			(1 << 6)
+#define DIF1			(1 << 5)
+#define DIF0			(1 << 4)
+#define DEM1			(1 << 3)
+#define DEM0			(1 << 2)
+#define FM1			(1 << 1)
+#define DIF_LEFT_JST		0x00
+#define DIF_I2S			0x01
+#define DIF_RGHT_JST16		0x02
+#define DIF_RGHT_JST24		0x03
+#define DIF_TDM0		0x04
+#define DIF_TDM1		0x05
+#define DIF_TDM2		0x06
+#define DIF_TDM3		0x07
+#define DIF_MASK		0x70
+#define MODE_FORMAT(x)		(((x)&7)<<4)
+#define DEM_MASK		0x0C
+#define NO_DEM			0x00
+#define DEM_441			0x04
+#define DEM_48K			0x08
+#define DEM_32K			0x0C
+#define FM_AUTO			0x00
+#define FM_SNGL			0x01
+#define FM_DBL			0x02
+#define FM_QUAD			0x03
+#define FM_SNGL_MIN		30000
+#define FM_SNGL_MAX		54000
+#define FM_DBL_MAX		108000
+#define FM_QUAD_MAX		216000
+#define FM_MASK			0x03
+
+/* CS4349_VMI (VMI = Volume, Mixing and Inversion Controls) */
+#define VOLBISA			(1 << 7)
+#define VOLAISB			(1 << 7)
+/* INVERT_A only available for Left Jstfd, Right Jstfd16 and Right Jstfd24 */
+#define INVERT_A		(1 << 6)
+/* INVERT_B only available for Left Jstfd, Right Jstfd16 and Right Jstfd24 */
+#define INVERT_B		(1 << 5)
+#define ATAPI3			(1 << 3)
+#define ATAPI2			(1 << 2)
+#define ATAPI1			(1 << 1)
+#define ATAPI0			(1 << 0)
+#define MUTEAB			0x00
+#define MUTEA_RIGHTB		0x01
+#define MUTEA_LEFTB		0x02
+#define MUTEA_SUMLRDIV2B	0x03
+#define RIGHTA_MUTEB		0x04
+#define RIGHTA_RIGHTB		0x05
+#define RIGHTA_LEFTB		0x06
+#define RIGHTA_SUMLRDIV2B	0x07
+#define LEFTA_MUTEB		0x08
+#define LEFTA_RIGHTB		0x09	/* Default */
+#define LEFTA_LEFTB		0x0A
+#define LEFTA_SUMLRDIV2B	0x0B
+#define SUMLRDIV2A_MUTEB	0x0C
+#define SUMLRDIV2A_RIGHTB	0x0D
+#define SUMLRDIV2A_LEFTB	0x0E
+#define SUMLRDIV2_AB		0x0F
+#define CHMIX_MASK		0x0F
+
+/* CS4349_MUTE */
+#define AUTOMUTE		(1 << 7)
+#define MUTEC_AB		(1 << 5)
+#define MUTE_A			(1 << 4)
+#define MUTE_B			(1 << 3)
+#define MUTE_AB_MASK		0x18
+
+/* CS4349_RMPFLT (Ramp and Filter Control) */
+#define SCZ1			(1 << 7)
+#define SCZ0			(1 << 6)
+#define RMP_UP			(1 << 5)
+#define RMP_DN			(1 << 4)
+#define FILT_SEL		(1 << 2)
+#define IMMDT_CHNG		0x31
+#define ZEROCRSS		0x71
+#define SOFT_RMP		0xB1
+#define SFTRMP_ZEROCRSS		0xF1
+#define SR_ZC_MASK		0xC0
+
+/* CS4349_MISC */
+#define PWR_DWN			(1 << 7)
+#define FREEZE			(1 << 5)
+#define POPG_EN			(1 << 4)
+
+#endif	/* __CS4349_H__ */
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index 21810e5..7dc52fe 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -267,33 +267,29 @@
  *
  * Reserved area are considered as "mute".
  */
-static const unsigned int hp_out_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(hp_out_tlv,
 	0x0, 0x10, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -54 dB to +15 dB */
-	0x11, 0x3f, TLV_DB_SCALE_ITEM(-5400, 150, 0),
-};
+	0x11, 0x3f, TLV_DB_SCALE_ITEM(-5400, 150, 0)
+);
 
-static const unsigned int lineout_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(lineout_vol_tlv,
 	0x0, 0x10, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -54dB to 15dB */
 	0x11, 0x3f, TLV_DB_SCALE_ITEM(-5400, 150, 0)
-};
+);
 
-static const unsigned int mono_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(mono_vol_tlv,
 	0x0, 0x2, TLV_DB_SCALE_ITEM(-1800, 0, 1),
 	/* -18dB to 6dB */
 	0x3, 0x7, TLV_DB_SCALE_ITEM(-1800, 600, 0)
-};
+);
 
-static const unsigned int aux1_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(aux1_vol_tlv,
 	0x0, 0x10, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -48dB to 21dB */
 	0x11, 0x3f, TLV_DB_SCALE_ITEM(-4800, 150, 0)
-};
+);
 
 static const DECLARE_TLV_DB_SCALE(eq_gain_tlv, -1050, 150, 0);
 static const DECLARE_TLV_DB_SCALE(adc_eq_master_gain_tlv, -1800, 600, 1);
@@ -680,7 +676,7 @@
 	int master;
 };
 
-static struct reg_default da7210_reg_defaults[] = {
+static const struct reg_default da7210_reg_defaults[] = {
 	{ 0x00, 0x00 },
 	{ 0x01, 0x11 },
 	{ 0x03, 0x00 },
@@ -1182,7 +1178,7 @@
 
 #if IS_ENABLED(CONFIG_I2C)
 
-static struct reg_default da7210_regmap_i2c_patch[] = {
+static const struct reg_sequence da7210_regmap_i2c_patch[] = {
 
 	/* System controller master disable */
 	{ DA7210_STARTUP1, 0x00 },
@@ -1259,7 +1255,6 @@
 static struct i2c_driver da7210_i2c_driver = {
 	.driver = {
 		.name = "da7210",
-		.owner = THIS_MODULE,
 	},
 	.probe		= da7210_i2c_probe,
 	.remove		= da7210_i2c_remove,
@@ -1269,7 +1264,7 @@
 
 #if defined(CONFIG_SPI_MASTER)
 
-static struct reg_default da7210_regmap_spi_patch[] = {
+static const struct reg_sequence da7210_regmap_spi_patch[] = {
 	/* Dummy read to give two pulses over nCS for SPI */
 	{ DA7210_AUX2, 0x00 },
 	{ DA7210_AUX2, 0x00 },
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 238e48a..a9c86ef 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -28,27 +28,24 @@
 
 
 /* Gain and Volume */
-static const unsigned int aux_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(aux_vol_tlv,
 	/* -54dB */
 	0x0, 0x11, TLV_DB_SCALE_ITEM(-5400, 0, 0),
 	/* -52.5dB to 15dB */
 	0x12, 0x3f, TLV_DB_SCALE_ITEM(-5250, 150, 0)
-};
+);
 
-static const unsigned int digital_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(digital_gain_tlv,
 	0x0, 0x07, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -78dB to 12dB */
 	0x08, 0x7f, TLV_DB_SCALE_ITEM(-7800, 75, 0)
-};
+);
 
-static const unsigned int alc_analog_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(alc_analog_gain_tlv,
 	0x0, 0x0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* 0dB to 36dB */
 	0x01, 0x07, TLV_DB_SCALE_ITEM(0, 600, 0)
-};
+);
 
 static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, -600, 600, 0);
 static const DECLARE_TLV_DB_SCALE(mixin_gain_tlv, -450, 150, 0);
@@ -954,7 +951,7 @@
 	{"LINE", NULL, "Lineout PGA"},
 };
 
-static struct reg_default da7213_reg_defaults[] = {
+static const struct reg_default da7213_reg_defaults[] = {
 	{ DA7213_DIG_ROUTING_DAI, 0x10 },
 	{ DA7213_SR, 0x0A },
 	{ DA7213_REFERENCES, 0x80 },
@@ -1585,7 +1582,6 @@
 static struct i2c_driver da7213_i2c_driver = {
 	.driver = {
 		.name = "da7213",
-		.owner = THIS_MODULE,
 	},
 	.probe		= da7213_i2c_probe,
 	.remove		= da7213_remove,
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index 2075236..1d5a89c 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -43,7 +43,7 @@
 /*
  * da732x register cache - default settings
  */
-static struct reg_default da732x_reg_cache[] = {
+static const struct reg_default da732x_reg_cache[] = {
 	{ DA732X_REG_REF1		, 0x02 },
 	{ DA732X_REG_BIAS_EN		, 0x80 },
 	{ DA732X_REG_BIAS1		, 0x00 },
@@ -1196,13 +1196,7 @@
 #define	DA732X_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
-static struct snd_soc_dai_ops da732x_dai1_ops = {
-	.hw_params	= da732x_hw_params,
-	.set_fmt	= da732x_set_dai_fmt,
-	.set_sysclk	= da732x_set_dai_sysclk,
-};
-
-static struct snd_soc_dai_ops da732x_dai2_ops = {
+static const struct snd_soc_dai_ops da732x_dai_ops = {
 	.hw_params	= da732x_hw_params,
 	.set_fmt	= da732x_set_dai_fmt,
 	.set_sysclk	= da732x_set_dai_sysclk,
@@ -1227,7 +1221,7 @@
 			.rates = DA732X_RATES,
 			.formats = DA732X_FORMATS,
 		},
-		.ops = &da732x_dai1_ops,
+		.ops = &da732x_dai_ops,
 	},
 	{
 		.name	= "DA732X_AIFB",
@@ -1247,7 +1241,7 @@
 			.rates = DA732X_RATES,
 			.formats = DA732X_FORMATS,
 		},
-		.ops = &da732x_dai2_ops,
+		.ops = &da732x_dai_ops,
 	},
 };
 
@@ -1572,7 +1566,6 @@
 static struct i2c_driver da732x_i2c_driver = {
 	.driver		= {
 		.name	= "da7320",
-		.owner	= THIS_MODULE,
 	},
 	.probe		= da732x_i2c_probe,
 	.remove		= da732x_i2c_remove,
diff --git a/sound/soc/codecs/da9055.c b/sound/soc/codecs/da9055.c
index 66bb446..0b2ede8 100644
--- a/sound/soc/codecs/da9055.c
+++ b/sound/soc/codecs/da9055.c
@@ -289,26 +289,23 @@
 
 /* Gain and Volume */
 
-static const unsigned int aux_vol_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(aux_vol_tlv,
 	0x0, 0x10, TLV_DB_SCALE_ITEM(-5400, 0, 0),
 	/* -54dB to 15dB */
 	0x11, 0x3f, TLV_DB_SCALE_ITEM(-5400, 150, 0)
-};
+);
 
-static const unsigned int digital_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(digital_gain_tlv,
 	0x0, 0x07, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* -78dB to 12dB */
 	0x08, 0x7f, TLV_DB_SCALE_ITEM(-7800, 75, 0)
-};
+);
 
-static const unsigned int alc_analog_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(alc_analog_gain_tlv,
 	0x0, 0x0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
 	/* 0dB to 36dB */
 	0x01, 0x07, TLV_DB_SCALE_ITEM(0, 600, 0)
-};
+);
 
 static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, -600, 600, 0);
 static const DECLARE_TLV_DB_SCALE(mixin_gain_tlv, -450, 150, 0);
@@ -948,7 +945,7 @@
 	struct da9055_platform_data *pdata;
 };
 
-static struct reg_default da9055_reg_defaults[] = {
+static const struct reg_default da9055_reg_defaults[] = {
 	{ 0x21, 0x10 },
 	{ 0x22, 0x0A },
 	{ 0x23, 0x00 },
@@ -1533,12 +1530,12 @@
 	{ .compatible = "dlg,da9055-codec", },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, da9055_of_match);
 
 /* I2C codec control layer */
 static struct i2c_driver da9055_i2c_driver = {
 	.driver = {
 		.name = "da9055-codec",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(da9055_of_match),
 	},
 	.probe		= da9055_i2c_probe,
diff --git a/sound/soc/codecs/gtm601.c b/sound/soc/codecs/gtm601.c
new file mode 100644
index 0000000..0b80052
--- /dev/null
+++ b/sound/soc/codecs/gtm601.c
@@ -0,0 +1,95 @@
+/*
+ * This is a simple driver for the GTM601 Voice PCM interface
+ *
+ * Copyright (C) 2015 Goldelico GmbH
+ *
+ * Author: Marek Belisko <marek@goldelico.com>
+ *
+ * Based on wm8727.c driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/ac97_codec.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+static const struct snd_soc_dapm_widget gtm601_dapm_widgets[] = {
+	SND_SOC_DAPM_OUTPUT("AOUT"),
+	SND_SOC_DAPM_INPUT("AIN"),
+};
+
+static const struct snd_soc_dapm_route gtm601_dapm_routes[] = {
+	{ "AOUT", NULL, "Playback" },
+	{ "Capture", NULL, "AIN" },
+};
+
+static struct snd_soc_dai_driver gtm601_dai = {
+	.name = "gtm601",
+	.playback = {
+		.stream_name = "Playback",
+		.channels_min = 1,
+		.channels_max = 1,
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		},
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 1,
+		.channels_max = 1,
+		.rates = SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+};
+
+static const struct snd_soc_codec_driver soc_codec_dev_gtm601 = {
+	.dapm_widgets = gtm601_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(gtm601_dapm_widgets),
+	.dapm_routes = gtm601_dapm_routes,
+	.num_dapm_routes = ARRAY_SIZE(gtm601_dapm_routes),
+};
+
+static int gtm601_platform_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_codec(&pdev->dev,
+			&soc_codec_dev_gtm601, &gtm601_dai, 1);
+}
+
+static int gtm601_platform_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id gtm601_codec_of_match[] = {
+	{ .compatible = "option,gtm601", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, gtm601_codec_of_match);
+#endif
+
+static struct platform_driver gtm601_codec_driver = {
+	.driver = {
+		.name = "gtm601",
+		.of_match_table = of_match_ptr(gtm601_codec_of_match),
+	},
+	.probe = gtm601_platform_probe,
+	.remove = gtm601_platform_remove,
+};
+
+module_platform_driver(gtm601_codec_driver);
+
+MODULE_DESCRIPTION("ASoC gtm601 driver");
+MODULE_AUTHOR("Marek Belisko <marek@goldelico.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gtm601");
diff --git a/sound/soc/codecs/ics43432.c b/sound/soc/codecs/ics43432.c
new file mode 100644
index 0000000..dd850b9
--- /dev/null
+++ b/sound/soc/codecs/ics43432.c
@@ -0,0 +1,76 @@
+/*
+ * I2S MEMS microphone driver for InvenSense ICS-43432
+ *
+ * - Non configurable.
+ * - I2S interface, 64 BCLs per frame, 32 bits per channel, 24 bit data
+ *
+ * Copyright (c) 2015 Axis Communications AB
+ *
+ * Licensed under GPL v2.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#define ICS43432_RATE_MIN 7190 /* Hz, from data sheet */
+#define ICS43432_RATE_MAX 52800  /* Hz, from data sheet */
+
+#define ICS43432_FORMATS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32)
+
+static struct snd_soc_dai_driver ics43432_dai = {
+	.name = "ics43432-hifi",
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min = ICS43432_RATE_MIN,
+		.rate_max = ICS43432_RATE_MAX,
+		.rates = SNDRV_PCM_RATE_CONTINUOUS,
+		.formats = ICS43432_FORMATS,
+	},
+};
+
+static struct snd_soc_codec_driver ics43432_codec_driver = {
+};
+
+static int ics43432_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_codec(&pdev->dev, &ics43432_codec_driver,
+			&ics43432_dai, 1);
+}
+
+static int ics43432_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id ics43432_ids[] = {
+	{ .compatible = "invensense,ics43432", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ics43432_ids);
+#endif
+
+static struct platform_driver ics43432_driver = {
+	.driver = {
+		.name = "ics43432",
+		.of_match_table = of_match_ptr(ics43432_ids),
+	},
+	.probe = ics43432_probe,
+	.remove = ics43432_remove,
+};
+
+module_platform_driver(ics43432_driver);
+
+MODULE_DESCRIPTION("ASoC ICS43432 driver");
+MODULE_AUTHOR("Ricard Wanderlof <ricardw@axis.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/isabelle.c b/sound/soc/codecs/isabelle.c
index ebd9028..be44837 100644
--- a/sound/soc/codecs/isabelle.c
+++ b/sound/soc/codecs/isabelle.c
@@ -33,7 +33,7 @@
 
 
 /* Register default values for ISABELLE driver. */
-static struct reg_default isabelle_reg_defs[] = {
+static const struct reg_default isabelle_reg_defs[] = {
 	{ 0, 0x00 },
 	{ 1, 0x00 },
 	{ 2, 0x00 },
@@ -1016,25 +1016,25 @@
 #define ISABELLE_FORMATS (SNDRV_PCM_FMTBIT_S20_3LE |\
 			SNDRV_PCM_FMTBIT_S32_LE)
 
-static struct snd_soc_dai_ops isabelle_hs_dai_ops = {
+static const struct snd_soc_dai_ops isabelle_hs_dai_ops = {
 	.hw_params	= isabelle_hw_params,
 	.set_fmt	= isabelle_set_dai_fmt,
 	.digital_mute	= isabelle_hs_mute,
 };
 
-static struct snd_soc_dai_ops isabelle_hf_dai_ops = {
+static const struct snd_soc_dai_ops isabelle_hf_dai_ops = {
 	.hw_params	= isabelle_hw_params,
 	.set_fmt	= isabelle_set_dai_fmt,
 	.digital_mute	= isabelle_hf_mute,
 };
 
-static struct snd_soc_dai_ops isabelle_line_dai_ops = {
+static const struct snd_soc_dai_ops isabelle_line_dai_ops = {
 	.hw_params	= isabelle_hw_params,
 	.set_fmt	= isabelle_set_dai_fmt,
 	.digital_mute	= isabelle_line_mute,
 };
 
-static struct snd_soc_dai_ops isabelle_ul_dai_ops = {
+static const struct snd_soc_dai_ops isabelle_ul_dai_ops = {
 	.hw_params	= isabelle_hw_params,
 	.set_fmt	= isabelle_set_dai_fmt,
 };
@@ -1149,7 +1149,6 @@
 static struct i2c_driver isabelle_i2c_driver = {
 	.driver = {
 		.name = "isabelle",
-		.owner = THIS_MODULE,
 	},
 	.probe = isabelle_i2c_probe,
 	.remove = isabelle_i2c_remove,
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index 9363fdb..1f5ab99 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -78,11 +78,10 @@
 	struct regmap *regmap;
 };
 
-static const unsigned int jz4740_mic_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(jz4740_mic_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(0, 600, 0),
-	3, 3, TLV_DB_SCALE_ITEM(2000, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(2000, 0, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(jz4740_out_tlv, 0, 200, 0);
 static const DECLARE_TLV_DB_SCALE(jz4740_in_tlv, -3450, 150, 0);
diff --git a/sound/soc/codecs/lm4857.c b/sound/soc/codecs/lm4857.c
index 99ffc49..558de10 100644
--- a/sound/soc/codecs/lm4857.c
+++ b/sound/soc/codecs/lm4857.c
@@ -142,7 +142,6 @@
 static struct i2c_driver lm4857_i2c_driver = {
 	.driver = {
 		.name = "lm4857",
-		.owner = THIS_MODULE,
 	},
 	.probe = lm4857_i2c_probe,
 	.id_table = lm4857_i2c_id,
diff --git a/sound/soc/codecs/lm49453.c b/sound/soc/codecs/lm49453.c
index 6600aa0..9af5640 100644
--- a/sound/soc/codecs/lm49453.c
+++ b/sound/soc/codecs/lm49453.c
@@ -30,7 +30,7 @@
 #include <asm/div64.h>
 #include "lm49453.h"
 
-static struct reg_default lm49453_reg_defs[] = {
+static const struct reg_default lm49453_reg_defs[] = {
 	{ 0, 0x00 },
 	{ 1, 0x00 },
 	{ 2, 0x00 },
@@ -188,7 +188,6 @@
 /* codec private data */
 struct lm49453_priv {
 	struct regmap *regmap;
-	int fs_rate;
 };
 
 /* capture path controls */
@@ -1112,13 +1111,10 @@
 			     struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
-	struct lm49453_priv *lm49453 = snd_soc_codec_get_drvdata(codec);
 	u16 clk_div = 0;
 
-	lm49453->fs_rate = params_rate(params);
-
 	/* Setting DAC clock dividers based on substream sample rate. */
-	switch (lm49453->fs_rate) {
+	switch (params_rate(params)) {
 	case 8000:
 	case 16000:
 	case 32000:
@@ -1291,35 +1287,35 @@
 #define LM49453_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
 			 SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
-static struct snd_soc_dai_ops lm49453_headset_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_headset_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
 	.digital_mute	= lm49453_hp_mute,
 };
 
-static struct snd_soc_dai_ops lm49453_speaker_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_speaker_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
 	.digital_mute	= lm49453_ls_mute,
 };
 
-static struct snd_soc_dai_ops lm49453_haptic_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_haptic_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
 	.digital_mute	= lm49453_ha_mute,
 };
 
-static struct snd_soc_dai_ops lm49453_ep_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_ep_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
 	.digital_mute	= lm49453_ep_mute,
 };
 
-static struct snd_soc_dai_ops lm49453_lineout_dai_ops = {
+static const struct snd_soc_dai_ops lm49453_lineout_dai_ops = {
 	.hw_params	= lm49453_hw_params,
 	.set_sysclk	= lm49453_set_dai_sysclk,
 	.set_fmt	= lm49453_set_dai_fmt,
@@ -1460,7 +1456,6 @@
 static struct i2c_driver lm49453_i2c_driver = {
 	.driver = {
 		.name = "lm49453",
-		.owner = THIS_MODULE,
 	},
 	.probe = lm49453_i2c_probe,
 	.remove = lm49453_i2c_remove,
diff --git a/sound/soc/codecs/max9768.c b/sound/soc/codecs/max9768.c
index e1c196a..5b82e26 100644
--- a/sound/soc/codecs/max9768.c
+++ b/sound/soc/codecs/max9768.c
@@ -35,7 +35,7 @@
 	u32 flags;
 };
 
-static struct reg_default max9768_default_regs[] = {
+static const struct reg_default max9768_default_regs[] = {
 	{ 0, 0 },
 	{ 3,  MAX9768_CTRL_FILTERLESS},
 };
@@ -43,8 +43,8 @@
 static int max9768_get_gpio(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol)
 {
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+	struct max9768 *max9768 = snd_soc_component_get_drvdata(c);
 	int val = gpio_get_value_cansleep(max9768->mute_gpio);
 
 	ucontrol->value.integer.value[0] = !val;
@@ -55,16 +55,15 @@
 static int max9768_set_gpio(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol)
 {
-	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
-	struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
+	struct max9768 *max9768 = snd_soc_component_get_drvdata(c);
 
 	gpio_set_value_cansleep(max9768->mute_gpio, !ucontrol->value.integer.value[0]);
 
 	return 0;
 }
 
-static const unsigned int volume_tlv[] = {
-	TLV_DB_RANGE_HEAD(43),
+static const DECLARE_TLV_DB_RANGE(volume_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(-16150, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(-9280, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(-9030, 0, 0),
@@ -107,8 +106,8 @@
 	51, 57, TLV_DB_SCALE_ITEM(290, 50, 0),
 	58, 58, TLV_DB_SCALE_ITEM(650, 0, 0),
 	59, 62, TLV_DB_SCALE_ITEM(700, 60, 0),
-	63, 63, TLV_DB_SCALE_ITEM(950, 0, 0),
-};
+	63, 63, TLV_DB_SCALE_ITEM(950, 0, 0)
+);
 
 static const struct snd_kcontrol_new max9768_volume[] = {
 	SOC_SINGLE_TLV("Playback Volume", MAX9768_VOL, 0, 63, 0, volume_tlv),
@@ -130,19 +129,20 @@
 	{ "OUT-", NULL, "IN" },
 };
 
-static int max9768_probe(struct snd_soc_codec *codec)
+static int max9768_probe(struct snd_soc_component *component)
 {
-	struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
+	struct max9768 *max9768 = snd_soc_component_get_drvdata(component);
 	int ret;
 
 	if (max9768->flags & MAX9768_FLAG_CLASSIC_PWM) {
-		ret = snd_soc_write(codec, MAX9768_CTRL, MAX9768_CTRL_PWM);
+		ret = regmap_write(max9768->regmap, MAX9768_CTRL,
+			MAX9768_CTRL_PWM);
 		if (ret)
 			return ret;
 	}
 
 	if (gpio_is_valid(max9768->mute_gpio)) {
-		ret = snd_soc_add_codec_controls(codec, max9768_mute,
+		ret = snd_soc_add_component_controls(component, max9768_mute,
 				ARRAY_SIZE(max9768_mute));
 		if (ret)
 			return ret;
@@ -151,7 +151,7 @@
 	return 0;
 }
 
-static struct snd_soc_codec_driver max9768_codec_driver = {
+static struct snd_soc_component_driver max9768_component_driver = {
 	.probe = max9768_probe,
 	.controls = max9768_volume,
 	.num_controls = ARRAY_SIZE(max9768_volume),
@@ -183,11 +183,13 @@
 
 	if (pdata) {
 		/* Mute on powerup to avoid clicks */
-		err = gpio_request_one(pdata->mute_gpio, GPIOF_INIT_HIGH, "MAX9768 Mute");
+		err = devm_gpio_request_one(&client->dev, pdata->mute_gpio,
+				GPIOF_INIT_HIGH, "MAX9768 Mute");
 		max9768->mute_gpio = err ?: pdata->mute_gpio;
 
 		/* Activate chip by releasing shutdown, enables I2C */
-		err = gpio_request_one(pdata->shdn_gpio, GPIOF_INIT_HIGH, "MAX9768 Shutdown");
+		err = devm_gpio_request_one(&client->dev, pdata->shdn_gpio,
+				GPIOF_INIT_HIGH, "MAX9768 Shutdown");
 		max9768->shdn_gpio = err ?: pdata->shdn_gpio;
 
 		max9768->flags = pdata->flags;
@@ -199,38 +201,11 @@
 	i2c_set_clientdata(client, max9768);
 
 	max9768->regmap = devm_regmap_init_i2c(client, &max9768_i2c_regmap_config);
-	if (IS_ERR(max9768->regmap)) {
-		err = PTR_ERR(max9768->regmap);
-		goto err_gpio_free;
-	}
+	if (IS_ERR(max9768->regmap))
+		return PTR_ERR(max9768->regmap);
 
-	err = snd_soc_register_codec(&client->dev, &max9768_codec_driver, NULL, 0);
-	if (err)
-		goto err_gpio_free;
-
-	return 0;
-
- err_gpio_free:
-	if (gpio_is_valid(max9768->shdn_gpio))
-		gpio_free(max9768->shdn_gpio);
-	if (gpio_is_valid(max9768->mute_gpio))
-		gpio_free(max9768->mute_gpio);
-
-	return err;
-}
-
-static int max9768_i2c_remove(struct i2c_client *client)
-{
-	struct max9768 *max9768 = i2c_get_clientdata(client);
-
-	snd_soc_unregister_codec(&client->dev);
-
-	if (gpio_is_valid(max9768->shdn_gpio))
-		gpio_free(max9768->shdn_gpio);
-	if (gpio_is_valid(max9768->mute_gpio))
-		gpio_free(max9768->mute_gpio);
-
-	return 0;
+	return devm_snd_soc_register_component(&client->dev,
+		&max9768_component_driver, NULL, 0);
 }
 
 static const struct i2c_device_id max9768_i2c_id[] = {
@@ -242,10 +217,8 @@
 static struct i2c_driver max9768_i2c_driver = {
 	.driver = {
 		.name = "max9768",
-		.owner = THIS_MODULE,
 	},
 	.probe = max9768_i2c_probe,
-	.remove = max9768_i2c_remove,
 	.id_table = max9768_i2c_id,
 };
 module_i2c_driver(max9768_i2c_driver);
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index d0f4534..20dcc49 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -258,292 +258,36 @@
 	{ 0xc9, 0x00 }, /* C9 DAI2 biquad */
 };
 
-static struct {
-       int readable;
-       int writable;
-       int vol;
-} max98088_access[M98088_REG_CNT] = {
-       { 0xFF, 0xFF, 1 }, /* 00 IRQ status */
-       { 0xFF, 0x00, 1 }, /* 01 MIC status */
-       { 0xFF, 0x00, 1 }, /* 02 jack status */
-       { 0x1F, 0x1F, 1 }, /* 03 battery voltage */
-       { 0xFF, 0xFF, 0 }, /* 04 */
-       { 0xFF, 0xFF, 0 }, /* 05 */
-       { 0xFF, 0xFF, 0 }, /* 06 */
-       { 0xFF, 0xFF, 0 }, /* 07 */
-       { 0xFF, 0xFF, 0 }, /* 08 */
-       { 0xFF, 0xFF, 0 }, /* 09 */
-       { 0xFF, 0xFF, 0 }, /* 0A */
-       { 0xFF, 0xFF, 0 }, /* 0B */
-       { 0xFF, 0xFF, 0 }, /* 0C */
-       { 0xFF, 0xFF, 0 }, /* 0D */
-       { 0xFF, 0xFF, 0 }, /* 0E */
-       { 0xFF, 0xFF, 0 }, /* 0F interrupt enable */
-
-       { 0xFF, 0xFF, 0 }, /* 10 master clock */
-       { 0xFF, 0xFF, 0 }, /* 11 DAI1 clock mode */
-       { 0xFF, 0xFF, 0 }, /* 12 DAI1 clock control */
-       { 0xFF, 0xFF, 0 }, /* 13 DAI1 clock control */
-       { 0xFF, 0xFF, 0 }, /* 14 DAI1 format */
-       { 0xFF, 0xFF, 0 }, /* 15 DAI1 clock */
-       { 0xFF, 0xFF, 0 }, /* 16 DAI1 config */
-       { 0xFF, 0xFF, 0 }, /* 17 DAI1 TDM */
-       { 0xFF, 0xFF, 0 }, /* 18 DAI1 filters */
-       { 0xFF, 0xFF, 0 }, /* 19 DAI2 clock mode */
-       { 0xFF, 0xFF, 0 }, /* 1A DAI2 clock control */
-       { 0xFF, 0xFF, 0 }, /* 1B DAI2 clock control */
-       { 0xFF, 0xFF, 0 }, /* 1C DAI2 format */
-       { 0xFF, 0xFF, 0 }, /* 1D DAI2 clock */
-       { 0xFF, 0xFF, 0 }, /* 1E DAI2 config */
-       { 0xFF, 0xFF, 0 }, /* 1F DAI2 TDM */
-
-       { 0xFF, 0xFF, 0 }, /* 20 DAI2 filters */
-       { 0xFF, 0xFF, 0 }, /* 21 data config */
-       { 0xFF, 0xFF, 0 }, /* 22 DAC mixer */
-       { 0xFF, 0xFF, 0 }, /* 23 left ADC mixer */
-       { 0xFF, 0xFF, 0 }, /* 24 right ADC mixer */
-       { 0xFF, 0xFF, 0 }, /* 25 left HP mixer */
-       { 0xFF, 0xFF, 0 }, /* 26 right HP mixer */
-       { 0xFF, 0xFF, 0 }, /* 27 HP control */
-       { 0xFF, 0xFF, 0 }, /* 28 left REC mixer */
-       { 0xFF, 0xFF, 0 }, /* 29 right REC mixer */
-       { 0xFF, 0xFF, 0 }, /* 2A REC control */
-       { 0xFF, 0xFF, 0 }, /* 2B left SPK mixer */
-       { 0xFF, 0xFF, 0 }, /* 2C right SPK mixer */
-       { 0xFF, 0xFF, 0 }, /* 2D SPK control */
-       { 0xFF, 0xFF, 0 }, /* 2E sidetone */
-       { 0xFF, 0xFF, 0 }, /* 2F DAI1 playback level */
-
-       { 0xFF, 0xFF, 0 }, /* 30 DAI1 playback level */
-       { 0xFF, 0xFF, 0 }, /* 31 DAI2 playback level */
-       { 0xFF, 0xFF, 0 }, /* 32 DAI2 playbakc level */
-       { 0xFF, 0xFF, 0 }, /* 33 left ADC level */
-       { 0xFF, 0xFF, 0 }, /* 34 right ADC level */
-       { 0xFF, 0xFF, 0 }, /* 35 MIC1 level */
-       { 0xFF, 0xFF, 0 }, /* 36 MIC2 level */
-       { 0xFF, 0xFF, 0 }, /* 37 INA level */
-       { 0xFF, 0xFF, 0 }, /* 38 INB level */
-       { 0xFF, 0xFF, 0 }, /* 39 left HP volume */
-       { 0xFF, 0xFF, 0 }, /* 3A right HP volume */
-       { 0xFF, 0xFF, 0 }, /* 3B left REC volume */
-       { 0xFF, 0xFF, 0 }, /* 3C right REC volume */
-       { 0xFF, 0xFF, 0 }, /* 3D left SPK volume */
-       { 0xFF, 0xFF, 0 }, /* 3E right SPK volume */
-       { 0xFF, 0xFF, 0 }, /* 3F MIC config */
-
-       { 0xFF, 0xFF, 0 }, /* 40 MIC threshold */
-       { 0xFF, 0xFF, 0 }, /* 41 excursion limiter filter */
-       { 0xFF, 0xFF, 0 }, /* 42 excursion limiter threshold */
-       { 0xFF, 0xFF, 0 }, /* 43 ALC */
-       { 0xFF, 0xFF, 0 }, /* 44 power limiter threshold */
-       { 0xFF, 0xFF, 0 }, /* 45 power limiter config */
-       { 0xFF, 0xFF, 0 }, /* 46 distortion limiter config */
-       { 0xFF, 0xFF, 0 }, /* 47 audio input */
-       { 0xFF, 0xFF, 0 }, /* 48 microphone */
-       { 0xFF, 0xFF, 0 }, /* 49 level control */
-       { 0xFF, 0xFF, 0 }, /* 4A bypass switches */
-       { 0xFF, 0xFF, 0 }, /* 4B jack detect */
-       { 0xFF, 0xFF, 0 }, /* 4C input enable */
-       { 0xFF, 0xFF, 0 }, /* 4D output enable */
-       { 0xFF, 0xFF, 0 }, /* 4E bias control */
-       { 0xFF, 0xFF, 0 }, /* 4F DAC power */
-
-       { 0xFF, 0xFF, 0 }, /* 50 DAC power */
-       { 0xFF, 0xFF, 0 }, /* 51 system */
-       { 0xFF, 0xFF, 0 }, /* 52 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 53 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 54 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 55 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 56 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 57 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 58 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 59 DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 5A DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 5B DAI1 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 5C DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 5D DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 5E DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 5F DAI1 EQ2 */
-
-       { 0xFF, 0xFF, 0 }, /* 60 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 61 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 62 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 63 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 64 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 65 DAI1 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 66 DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 67 DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 68 DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 69 DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6A DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6B DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6C DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6D DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6E DAI1 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 6F DAI1 EQ3 */
-
-       { 0xFF, 0xFF, 0 }, /* 70 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 71 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 72 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 73 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 74 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 75 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 76 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 77 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 78 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 79 DAI1 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* 7A DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7B DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7C DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7D DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7E DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 7F DAI1 EQ5 */
-
-       { 0xFF, 0xFF, 0 }, /* 80 DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 81 DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 82 DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 83 DAI1 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* 84 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 85 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 86 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 87 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 88 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 89 DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8A DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8B DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8C DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8D DAI2 EQ1 */
-       { 0xFF, 0xFF, 0 }, /* 8E DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 8F DAI2 EQ2 */
-
-       { 0xFF, 0xFF, 0 }, /* 90 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 91 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 92 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 93 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 94 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 95 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 96 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 97 DAI2 EQ2 */
-       { 0xFF, 0xFF, 0 }, /* 98 DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 99 DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9A DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9B DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9C DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9D DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9E DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* 9F DAI2 EQ3 */
-
-       { 0xFF, 0xFF, 0 }, /* A0 DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* A1 DAI2 EQ3 */
-       { 0xFF, 0xFF, 0 }, /* A2 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A3 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A4 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A5 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A6 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A7 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A8 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* A9 DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* AA DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* AB DAI2 EQ4 */
-       { 0xFF, 0xFF, 0 }, /* AC DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* AD DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* AE DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* AF DAI2 EQ5 */
-
-       { 0xFF, 0xFF, 0 }, /* B0 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B1 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B2 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B3 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B4 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B5 DAI2 EQ5 */
-       { 0xFF, 0xFF, 0 }, /* B6 DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* B7 DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* B8 DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* B9 DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BA DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BB DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BC DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BD DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BE DAI1 biquad */
-       { 0xFF, 0xFF, 0 }, /* BF DAI1 biquad */
-
-       { 0xFF, 0xFF, 0 }, /* C0 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C1 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C2 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C3 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C4 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C5 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C6 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C7 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C8 DAI2 biquad */
-       { 0xFF, 0xFF, 0 }, /* C9 DAI2 biquad */
-       { 0x00, 0x00, 0 }, /* CA */
-       { 0x00, 0x00, 0 }, /* CB */
-       { 0x00, 0x00, 0 }, /* CC */
-       { 0x00, 0x00, 0 }, /* CD */
-       { 0x00, 0x00, 0 }, /* CE */
-       { 0x00, 0x00, 0 }, /* CF */
-
-       { 0x00, 0x00, 0 }, /* D0 */
-       { 0x00, 0x00, 0 }, /* D1 */
-       { 0x00, 0x00, 0 }, /* D2 */
-       { 0x00, 0x00, 0 }, /* D3 */
-       { 0x00, 0x00, 0 }, /* D4 */
-       { 0x00, 0x00, 0 }, /* D5 */
-       { 0x00, 0x00, 0 }, /* D6 */
-       { 0x00, 0x00, 0 }, /* D7 */
-       { 0x00, 0x00, 0 }, /* D8 */
-       { 0x00, 0x00, 0 }, /* D9 */
-       { 0x00, 0x00, 0 }, /* DA */
-       { 0x00, 0x00, 0 }, /* DB */
-       { 0x00, 0x00, 0 }, /* DC */
-       { 0x00, 0x00, 0 }, /* DD */
-       { 0x00, 0x00, 0 }, /* DE */
-       { 0x00, 0x00, 0 }, /* DF */
-
-       { 0x00, 0x00, 0 }, /* E0 */
-       { 0x00, 0x00, 0 }, /* E1 */
-       { 0x00, 0x00, 0 }, /* E2 */
-       { 0x00, 0x00, 0 }, /* E3 */
-       { 0x00, 0x00, 0 }, /* E4 */
-       { 0x00, 0x00, 0 }, /* E5 */
-       { 0x00, 0x00, 0 }, /* E6 */
-       { 0x00, 0x00, 0 }, /* E7 */
-       { 0x00, 0x00, 0 }, /* E8 */
-       { 0x00, 0x00, 0 }, /* E9 */
-       { 0x00, 0x00, 0 }, /* EA */
-       { 0x00, 0x00, 0 }, /* EB */
-       { 0x00, 0x00, 0 }, /* EC */
-       { 0x00, 0x00, 0 }, /* ED */
-       { 0x00, 0x00, 0 }, /* EE */
-       { 0x00, 0x00, 0 }, /* EF */
-
-       { 0x00, 0x00, 0 }, /* F0 */
-       { 0x00, 0x00, 0 }, /* F1 */
-       { 0x00, 0x00, 0 }, /* F2 */
-       { 0x00, 0x00, 0 }, /* F3 */
-       { 0x00, 0x00, 0 }, /* F4 */
-       { 0x00, 0x00, 0 }, /* F5 */
-       { 0x00, 0x00, 0 }, /* F6 */
-       { 0x00, 0x00, 0 }, /* F7 */
-       { 0x00, 0x00, 0 }, /* F8 */
-       { 0x00, 0x00, 0 }, /* F9 */
-       { 0x00, 0x00, 0 }, /* FA */
-       { 0x00, 0x00, 0 }, /* FB */
-       { 0x00, 0x00, 0 }, /* FC */
-       { 0x00, 0x00, 0 }, /* FD */
-       { 0x00, 0x00, 0 }, /* FE */
-       { 0xFF, 0x00, 1 }, /* FF */
-};
-
 static bool max98088_readable_register(struct device *dev, unsigned int reg)
 {
-       return max98088_access[reg].readable;
+	switch (reg) {
+	case M98088_REG_00_IRQ_STATUS ... 0xC9:
+	case M98088_REG_FF_REV_ID:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool max98088_writeable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case M98088_REG_03_BATTERY_VOLTAGE ... 0xC9:
+		return true;
+	default:
+		return false;
+	}
 }
 
 static bool max98088_volatile_register(struct device *dev, unsigned int reg)
 {
-       return max98088_access[reg].vol;
+	switch (reg) {
+	case M98088_REG_00_IRQ_STATUS ... M98088_REG_03_BATTERY_VOLTAGE:
+	case M98088_REG_FF_REV_ID:
+		return true;
+	default:
+		return false;
+	}
 }
 
 static const struct regmap_config max98088_regmap = {
@@ -551,6 +295,7 @@
 	.val_bits = 8,
 
 	.readable_reg = max98088_readable_register,
+	.writeable_reg = max98088_writeable_register,
 	.volatile_reg = max98088_volatile_register,
 	.max_register = 0xff,
 
@@ -680,29 +425,26 @@
        return 0;
 }
 
-static const unsigned int max98088_micboost_tlv[] = {
-       TLV_DB_RANGE_HEAD(2),
-       0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
-       2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+static const DECLARE_TLV_DB_RANGE(max98088_micboost_tlv,
+	0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 
-static const unsigned int max98088_hp_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98088_hp_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6700, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-4000, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1700, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(-400, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0)
+);
 
-static const unsigned int max98088_spk_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98088_spk_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6200, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-3500, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1200, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(100, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0)
+);
 
 static const struct snd_kcontrol_new max98088_snd_controls[] = {
 
@@ -2011,7 +1753,6 @@
 static struct i2c_driver max98088_i2c_driver = {
 	.driver = {
 		.name = "max98088",
-		.owner = THIS_MODULE,
 	},
 	.probe  = max98088_i2c_probe,
 	.remove = max98088_i2c_remove,
diff --git a/sound/soc/codecs/max98088.h b/sound/soc/codecs/max98088.h
index be89a4f..efa39bf 100644
--- a/sound/soc/codecs/max98088.h
+++ b/sound/soc/codecs/max98088.h
@@ -16,7 +16,7 @@
  */
 #define M98088_REG_00_IRQ_STATUS            0x00
 #define M98088_REG_01_MIC_STATUS            0x01
-#define M98088_REG_02_JACK_STAUS            0x02
+#define M98088_REG_02_JACK_STATUS           0x02
 #define M98088_REG_03_BATTERY_VOLTAGE       0x03
 #define M98088_REG_0F_IRQ_ENABLE            0x0F
 #define M98088_REG_10_SYS_CLK               0x10
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 78268f05..584aab8 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -267,75 +267,8 @@
 static bool max98090_readable_register(struct device *dev, unsigned int reg)
 {
 	switch (reg) {
-	case M98090_REG_DEVICE_STATUS:
-	case M98090_REG_JACK_STATUS:
-	case M98090_REG_INTERRUPT_S:
-	case M98090_REG_RESERVED:
-	case M98090_REG_LINE_INPUT_CONFIG:
-	case M98090_REG_LINE_INPUT_LEVEL:
-	case M98090_REG_INPUT_MODE:
-	case M98090_REG_MIC1_INPUT_LEVEL:
-	case M98090_REG_MIC2_INPUT_LEVEL:
-	case M98090_REG_MIC_BIAS_VOLTAGE:
-	case M98090_REG_DIGITAL_MIC_ENABLE:
-	case M98090_REG_DIGITAL_MIC_CONFIG:
-	case M98090_REG_LEFT_ADC_MIXER:
-	case M98090_REG_RIGHT_ADC_MIXER:
-	case M98090_REG_LEFT_ADC_LEVEL:
-	case M98090_REG_RIGHT_ADC_LEVEL:
-	case M98090_REG_ADC_BIQUAD_LEVEL:
-	case M98090_REG_ADC_SIDETONE:
-	case M98090_REG_SYSTEM_CLOCK:
-	case M98090_REG_CLOCK_MODE:
-	case M98090_REG_CLOCK_RATIO_NI_MSB:
-	case M98090_REG_CLOCK_RATIO_NI_LSB:
-	case M98090_REG_CLOCK_RATIO_MI_MSB:
-	case M98090_REG_CLOCK_RATIO_MI_LSB:
-	case M98090_REG_MASTER_MODE:
-	case M98090_REG_INTERFACE_FORMAT:
-	case M98090_REG_TDM_CONTROL:
-	case M98090_REG_TDM_FORMAT:
-	case M98090_REG_IO_CONFIGURATION:
-	case M98090_REG_FILTER_CONFIG:
-	case M98090_REG_DAI_PLAYBACK_LEVEL:
-	case M98090_REG_DAI_PLAYBACK_LEVEL_EQ:
-	case M98090_REG_LEFT_HP_MIXER:
-	case M98090_REG_RIGHT_HP_MIXER:
-	case M98090_REG_HP_CONTROL:
-	case M98090_REG_LEFT_HP_VOLUME:
-	case M98090_REG_RIGHT_HP_VOLUME:
-	case M98090_REG_LEFT_SPK_MIXER:
-	case M98090_REG_RIGHT_SPK_MIXER:
-	case M98090_REG_SPK_CONTROL:
-	case M98090_REG_LEFT_SPK_VOLUME:
-	case M98090_REG_RIGHT_SPK_VOLUME:
-	case M98090_REG_DRC_TIMING:
-	case M98090_REG_DRC_COMPRESSOR:
-	case M98090_REG_DRC_EXPANDER:
-	case M98090_REG_DRC_GAIN:
-	case M98090_REG_RCV_LOUTL_MIXER:
-	case M98090_REG_RCV_LOUTL_CONTROL:
-	case M98090_REG_RCV_LOUTL_VOLUME:
-	case M98090_REG_LOUTR_MIXER:
-	case M98090_REG_LOUTR_CONTROL:
-	case M98090_REG_LOUTR_VOLUME:
-	case M98090_REG_JACK_DETECT:
-	case M98090_REG_INPUT_ENABLE:
-	case M98090_REG_OUTPUT_ENABLE:
-	case M98090_REG_LEVEL_CONTROL:
-	case M98090_REG_DSP_FILTER_ENABLE:
-	case M98090_REG_BIAS_CONTROL:
-	case M98090_REG_DAC_CONTROL:
-	case M98090_REG_ADC_CONTROL:
-	case M98090_REG_DEVICE_SHUTDOWN:
-	case M98090_REG_EQUALIZER_BASE ... M98090_REG_EQUALIZER_BASE + 0x68:
-	case M98090_REG_RECORD_BIQUAD_BASE ... M98090_REG_RECORD_BIQUAD_BASE + 0x0E:
-	case M98090_REG_DMIC3_VOLUME:
-	case M98090_REG_DMIC4_VOLUME:
-	case M98090_REG_DMIC34_BQ_PREATTEN:
-	case M98090_REG_RECORD_TDM_SLOT:
-	case M98090_REG_SAMPLE_RATE:
-	case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E:
+	case M98090_REG_DEVICE_STATUS ... M98090_REG_INTERRUPT_S:
+	case M98090_REG_LINE_INPUT_CONFIG ... 0xD1:
 	case M98090_REG_REVISION_ID:
 		return true;
 	default:
@@ -360,22 +293,20 @@
 	return ret;
 }
 
-static const unsigned int max98090_micboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max98090_micboost_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
-	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(max98090_mic_tlv, 0, 100, 0);
 
 static const DECLARE_TLV_DB_SCALE(max98090_line_single_ended_tlv,
 	-600, 600, 0);
 
-static const unsigned int max98090_line_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max98090_line_tlv,
 	0, 3, TLV_DB_SCALE_ITEM(-600, 300, 0),
-	4, 5, TLV_DB_SCALE_ITEM(1400, 600, 0),
-};
+	4, 5, TLV_DB_SCALE_ITEM(1400, 600, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(max98090_avg_tlv, 0, 600, 0);
 static const DECLARE_TLV_DB_SCALE(max98090_av_tlv, -1200, 100, 0);
@@ -391,38 +322,34 @@
 static const DECLARE_TLV_DB_SCALE(max98090_drcexp_tlv, -6600, 100, 0);
 static const DECLARE_TLV_DB_SCALE(max98090_sdg_tlv, 50, 200, 0);
 
-static const unsigned int max98090_mixout_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max98090_mixout_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-1200, 250, 0),
-	2, 3, TLV_DB_SCALE_ITEM(-600, 600, 0),
-};
+	2, 3, TLV_DB_SCALE_ITEM(-600, 600, 0)
+);
 
-static const unsigned int max98090_hp_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98090_hp_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6700, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-4000, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1700, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(-400, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0)
+);
 
-static const unsigned int max98090_spk_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98090_spk_tlv,
 	0, 4, TLV_DB_SCALE_ITEM(-4800, 400, 0),
 	5, 10, TLV_DB_SCALE_ITEM(-2900, 300, 0),
 	11, 14, TLV_DB_SCALE_ITEM(-1200, 200, 0),
 	15, 29, TLV_DB_SCALE_ITEM(-500, 100, 0),
-	30, 39, TLV_DB_SCALE_ITEM(950, 50, 0),
-};
+	30, 39, TLV_DB_SCALE_ITEM(950, 50, 0)
+);
 
-static const unsigned int max98090_rcv_lout_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98090_rcv_lout_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6200, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-3500, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1200, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(100, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0)
+);
 
 static int max98090_get_enab_tlv(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
@@ -850,6 +777,19 @@
 	return 0;
 }
 
+static int max98090_shdn_event(struct snd_soc_dapm_widget *w,
+				 struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct max98090_priv *max98090 = snd_soc_codec_get_drvdata(codec);
+
+	if (event & SND_SOC_DAPM_POST_PMU)
+		max98090->shdn_pending = true;
+
+	return 0;
+
+}
+
 static const char *mic1_mux_text[] = { "IN12", "IN56" };
 
 static SOC_ENUM_SINGLE_DECL(mic1_mux_enum,
@@ -1158,9 +1098,11 @@
 	SND_SOC_DAPM_SUPPLY("SDOEN", M98090_REG_IO_CONFIGURATION,
 		M98090_SDOEN_SHIFT, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("DMICL_ENA", M98090_REG_DIGITAL_MIC_ENABLE,
-		 M98090_DIGMICL_SHIFT, 0, NULL, 0),
+		 M98090_DIGMICL_SHIFT, 0, max98090_shdn_event,
+			SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_SUPPLY("DMICR_ENA", M98090_REG_DIGITAL_MIC_ENABLE,
-		 M98090_DIGMICR_SHIFT, 0, NULL, 0),
+		 M98090_DIGMICR_SHIFT, 0, max98090_shdn_event,
+			 SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_SUPPLY("AHPF", M98090_REG_FILTER_CONFIG,
 		M98090_AHPF_SHIFT, 0, NULL, 0),
 
@@ -1205,10 +1147,12 @@
 		&max98090_right_adc_mixer_controls[0],
 		ARRAY_SIZE(max98090_right_adc_mixer_controls)),
 
-	SND_SOC_DAPM_ADC("ADCL", NULL, M98090_REG_INPUT_ENABLE,
-		M98090_ADLEN_SHIFT, 0),
-	SND_SOC_DAPM_ADC("ADCR", NULL, M98090_REG_INPUT_ENABLE,
-		M98090_ADREN_SHIFT, 0),
+	SND_SOC_DAPM_ADC_E("ADCL", NULL, M98090_REG_INPUT_ENABLE,
+		M98090_ADLEN_SHIFT, 0, max98090_shdn_event,
+		SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_ADC_E("ADCR", NULL, M98090_REG_INPUT_ENABLE,
+		M98090_ADREN_SHIFT, 0, max98090_shdn_event,
+		SND_SOC_DAPM_POST_PMU),
 
 	SND_SOC_DAPM_AIF_OUT("AIFOUTL", "HiFi Capture", 0,
 		SND_SOC_NOPM, 0, 0),
@@ -1801,10 +1745,13 @@
 		if (IS_ERR(max98090->mclk))
 			break;
 
-		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON)
+		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) {
 			clk_disable_unprepare(max98090->mclk);
-		else
-			clk_prepare_enable(max98090->mclk);
+		} else {
+			ret = clk_prepare_enable(max98090->mclk);
+			if (ret)
+				return ret;
+		}
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
@@ -2383,7 +2330,7 @@
 #define MAX98090_RATES SNDRV_PCM_RATE_8000_96000
 #define MAX98090_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
 
-static struct snd_soc_dai_ops max98090_dai_ops = {
+static const struct snd_soc_dai_ops max98090_dai_ops = {
 	.set_sysclk = max98090_dai_set_sysclk,
 	.set_fmt = max98090_dai_set_fmt,
 	.set_tdm_slot = max98090_set_tdm_slot,
@@ -2536,9 +2483,26 @@
 	return 0;
 }
 
+static void max98090_seq_notifier(struct snd_soc_dapm_context *dapm,
+	enum snd_soc_dapm_type event, int subseq)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
+	struct max98090_priv *max98090 = snd_soc_codec_get_drvdata(codec);
+
+	if (max98090->shdn_pending) {
+		snd_soc_update_bits(codec, M98090_REG_DEVICE_SHUTDOWN,
+				M98090_SHDNN_MASK, 0);
+		msleep(40);
+		snd_soc_update_bits(codec, M98090_REG_DEVICE_SHUTDOWN,
+				M98090_SHDNN_MASK, M98090_SHDNN_MASK);
+		max98090->shdn_pending = false;
+	}
+}
+
 static struct snd_soc_codec_driver soc_codec_dev_max98090 = {
 	.probe   = max98090_probe,
 	.remove  = max98090_remove,
+	.seq_notifier = max98090_seq_notifier,
 	.set_bias_level = max98090_set_bias_level,
 };
 
@@ -2714,7 +2678,6 @@
 static struct i2c_driver max98090_i2c_driver = {
 	.driver = {
 		.name = "max98090",
-		.owner = THIS_MODULE,
 		.pm = &max98090_pm,
 		.of_match_table = of_match_ptr(max98090_of_match),
 		.acpi_match_table = ACPI_PTR(max98090_acpi_match),
diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h
index 21ff743..bc610d9 100644
--- a/sound/soc/codecs/max98090.h
+++ b/sound/soc/codecs/max98090.h
@@ -1543,6 +1543,7 @@
 	unsigned int pa2en;
 	unsigned int sidetone;
 	bool master;
+	bool shdn_pending;
 };
 
 int max98090_mic_detect(struct snd_soc_codec *codec,
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 9a46d3d..1fedac5 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -202,300 +202,36 @@
 	{ 0xff, 0x00 }, /* FF */
 };
 
-static struct {
-	int readable;
-	int writable;
-} max98095_access[M98095_REG_CNT] = {
-	{ 0x00, 0x00 }, /* 00 */
-	{ 0xFF, 0x00 }, /* 01 */
-	{ 0xFF, 0x00 }, /* 02 */
-	{ 0xFF, 0x00 }, /* 03 */
-	{ 0xFF, 0x00 }, /* 04 */
-	{ 0xFF, 0x00 }, /* 05 */
-	{ 0xFF, 0x00 }, /* 06 */
-	{ 0xFF, 0x00 }, /* 07 */
-	{ 0xFF, 0x00 }, /* 08 */
-	{ 0xFF, 0x00 }, /* 09 */
-	{ 0xFF, 0x00 }, /* 0A */
-	{ 0xFF, 0x00 }, /* 0B */
-	{ 0xFF, 0x00 }, /* 0C */
-	{ 0xFF, 0x00 }, /* 0D */
-	{ 0xFF, 0x00 }, /* 0E */
-	{ 0xFF, 0x9F }, /* 0F */
-	{ 0xFF, 0xFF }, /* 10 */
-	{ 0xFF, 0xFF }, /* 11 */
-	{ 0xFF, 0xFF }, /* 12 */
-	{ 0xFF, 0xFF }, /* 13 */
-	{ 0xFF, 0xFF }, /* 14 */
-	{ 0xFF, 0xFF }, /* 15 */
-	{ 0xFF, 0xFF }, /* 16 */
-	{ 0xFF, 0xFF }, /* 17 */
-	{ 0xFF, 0xFF }, /* 18 */
-	{ 0xFF, 0xFF }, /* 19 */
-	{ 0xFF, 0xFF }, /* 1A */
-	{ 0xFF, 0xFF }, /* 1B */
-	{ 0xFF, 0xFF }, /* 1C */
-	{ 0xFF, 0xFF }, /* 1D */
-	{ 0xFF, 0x77 }, /* 1E */
-	{ 0xFF, 0x77 }, /* 1F */
-	{ 0xFF, 0x77 }, /* 20 */
-	{ 0xFF, 0x77 }, /* 21 */
-	{ 0xFF, 0x77 }, /* 22 */
-	{ 0xFF, 0x77 }, /* 23 */
-	{ 0xFF, 0xFF }, /* 24 */
-	{ 0xFF, 0x7F }, /* 25 */
-	{ 0xFF, 0x31 }, /* 26 */
-	{ 0xFF, 0xFF }, /* 27 */
-	{ 0xFF, 0xFF }, /* 28 */
-	{ 0xFF, 0xFF }, /* 29 */
-	{ 0xFF, 0xF7 }, /* 2A */
-	{ 0xFF, 0x2F }, /* 2B */
-	{ 0xFF, 0xEF }, /* 2C */
-	{ 0xFF, 0xFF }, /* 2D */
-	{ 0xFF, 0xFF }, /* 2E */
-	{ 0xFF, 0xFF }, /* 2F */
-	{ 0xFF, 0xFF }, /* 30 */
-	{ 0xFF, 0xFF }, /* 31 */
-	{ 0xFF, 0xFF }, /* 32 */
-	{ 0xFF, 0xFF }, /* 33 */
-	{ 0xFF, 0xF7 }, /* 34 */
-	{ 0xFF, 0x2F }, /* 35 */
-	{ 0xFF, 0xCF }, /* 36 */
-	{ 0xFF, 0xFF }, /* 37 */
-	{ 0xFF, 0xFF }, /* 38 */
-	{ 0xFF, 0xFF }, /* 39 */
-	{ 0xFF, 0xFF }, /* 3A */
-	{ 0xFF, 0xFF }, /* 3B */
-	{ 0xFF, 0xFF }, /* 3C */
-	{ 0xFF, 0xFF }, /* 3D */
-	{ 0xFF, 0xF7 }, /* 3E */
-	{ 0xFF, 0x2F }, /* 3F */
-	{ 0xFF, 0xCF }, /* 40 */
-	{ 0xFF, 0xFF }, /* 41 */
-	{ 0xFF, 0x77 }, /* 42 */
-	{ 0xFF, 0xFF }, /* 43 */
-	{ 0xFF, 0xFF }, /* 44 */
-	{ 0xFF, 0xFF }, /* 45 */
-	{ 0xFF, 0xFF }, /* 46 */
-	{ 0xFF, 0xFF }, /* 47 */
-	{ 0xFF, 0xFF }, /* 48 */
-	{ 0xFF, 0x0F }, /* 49 */
-	{ 0xFF, 0xFF }, /* 4A */
-	{ 0xFF, 0xFF }, /* 4B */
-	{ 0xFF, 0x3F }, /* 4C */
-	{ 0xFF, 0x3F }, /* 4D */
-	{ 0xFF, 0x3F }, /* 4E */
-	{ 0xFF, 0xFF }, /* 4F */
-	{ 0xFF, 0x7F }, /* 50 */
-	{ 0xFF, 0x7F }, /* 51 */
-	{ 0xFF, 0x0F }, /* 52 */
-	{ 0xFF, 0x3F }, /* 53 */
-	{ 0xFF, 0x3F }, /* 54 */
-	{ 0xFF, 0x3F }, /* 55 */
-	{ 0xFF, 0xFF }, /* 56 */
-	{ 0xFF, 0xFF }, /* 57 */
-	{ 0xFF, 0xBF }, /* 58 */
-	{ 0xFF, 0x1F }, /* 59 */
-	{ 0xFF, 0xBF }, /* 5A */
-	{ 0xFF, 0x1F }, /* 5B */
-	{ 0xFF, 0xBF }, /* 5C */
-	{ 0xFF, 0x3F }, /* 5D */
-	{ 0xFF, 0x3F }, /* 5E */
-	{ 0xFF, 0x7F }, /* 5F */
-	{ 0xFF, 0x7F }, /* 60 */
-	{ 0xFF, 0x47 }, /* 61 */
-	{ 0xFF, 0x9F }, /* 62 */
-	{ 0xFF, 0x9F }, /* 63 */
-	{ 0xFF, 0x9F }, /* 64 */
-	{ 0xFF, 0x9F }, /* 65 */
-	{ 0xFF, 0x9F }, /* 66 */
-	{ 0xFF, 0xBF }, /* 67 */
-	{ 0xFF, 0xBF }, /* 68 */
-	{ 0xFF, 0xFF }, /* 69 */
-	{ 0xFF, 0xFF }, /* 6A */
-	{ 0xFF, 0x7F }, /* 6B */
-	{ 0xFF, 0xF7 }, /* 6C */
-	{ 0xFF, 0xFF }, /* 6D */
-	{ 0xFF, 0xFF }, /* 6E */
-	{ 0xFF, 0x1F }, /* 6F */
-	{ 0xFF, 0xF7 }, /* 70 */
-	{ 0xFF, 0xFF }, /* 71 */
-	{ 0xFF, 0xFF }, /* 72 */
-	{ 0xFF, 0x1F }, /* 73 */
-	{ 0xFF, 0xF7 }, /* 74 */
-	{ 0xFF, 0xFF }, /* 75 */
-	{ 0xFF, 0xFF }, /* 76 */
-	{ 0xFF, 0x1F }, /* 77 */
-	{ 0xFF, 0xF7 }, /* 78 */
-	{ 0xFF, 0xFF }, /* 79 */
-	{ 0xFF, 0xFF }, /* 7A */
-	{ 0xFF, 0x1F }, /* 7B */
-	{ 0xFF, 0xF7 }, /* 7C */
-	{ 0xFF, 0xFF }, /* 7D */
-	{ 0xFF, 0xFF }, /* 7E */
-	{ 0xFF, 0x1F }, /* 7F */
-	{ 0xFF, 0xF7 }, /* 80 */
-	{ 0xFF, 0xFF }, /* 81 */
-	{ 0xFF, 0xFF }, /* 82 */
-	{ 0xFF, 0x1F }, /* 83 */
-	{ 0xFF, 0x7F }, /* 84 */
-	{ 0xFF, 0x0F }, /* 85 */
-	{ 0xFF, 0xD8 }, /* 86 */
-	{ 0xFF, 0xFF }, /* 87 */
-	{ 0xFF, 0xEF }, /* 88 */
-	{ 0xFF, 0xFE }, /* 89 */
-	{ 0xFF, 0xFE }, /* 8A */
-	{ 0xFF, 0xFF }, /* 8B */
-	{ 0xFF, 0xFF }, /* 8C */
-	{ 0xFF, 0x3F }, /* 8D */
-	{ 0xFF, 0xFF }, /* 8E */
-	{ 0xFF, 0x3F }, /* 8F */
-	{ 0xFF, 0x8F }, /* 90 */
-	{ 0xFF, 0xFF }, /* 91 */
-	{ 0xFF, 0x3F }, /* 92 */
-	{ 0xFF, 0xFF }, /* 93 */
-	{ 0xFF, 0xFF }, /* 94 */
-	{ 0xFF, 0x0F }, /* 95 */
-	{ 0xFF, 0x3F }, /* 96 */
-	{ 0xFF, 0x8C }, /* 97 */
-	{ 0x00, 0x00 }, /* 98 */
-	{ 0x00, 0x00 }, /* 99 */
-	{ 0x00, 0x00 }, /* 9A */
-	{ 0x00, 0x00 }, /* 9B */
-	{ 0x00, 0x00 }, /* 9C */
-	{ 0x00, 0x00 }, /* 9D */
-	{ 0x00, 0x00 }, /* 9E */
-	{ 0x00, 0x00 }, /* 9F */
-	{ 0x00, 0x00 }, /* A0 */
-	{ 0x00, 0x00 }, /* A1 */
-	{ 0x00, 0x00 }, /* A2 */
-	{ 0x00, 0x00 }, /* A3 */
-	{ 0x00, 0x00 }, /* A4 */
-	{ 0x00, 0x00 }, /* A5 */
-	{ 0x00, 0x00 }, /* A6 */
-	{ 0x00, 0x00 }, /* A7 */
-	{ 0x00, 0x00 }, /* A8 */
-	{ 0x00, 0x00 }, /* A9 */
-	{ 0x00, 0x00 }, /* AA */
-	{ 0x00, 0x00 }, /* AB */
-	{ 0x00, 0x00 }, /* AC */
-	{ 0x00, 0x00 }, /* AD */
-	{ 0x00, 0x00 }, /* AE */
-	{ 0x00, 0x00 }, /* AF */
-	{ 0x00, 0x00 }, /* B0 */
-	{ 0x00, 0x00 }, /* B1 */
-	{ 0x00, 0x00 }, /* B2 */
-	{ 0x00, 0x00 }, /* B3 */
-	{ 0x00, 0x00 }, /* B4 */
-	{ 0x00, 0x00 }, /* B5 */
-	{ 0x00, 0x00 }, /* B6 */
-	{ 0x00, 0x00 }, /* B7 */
-	{ 0x00, 0x00 }, /* B8 */
-	{ 0x00, 0x00 }, /* B9 */
-	{ 0x00, 0x00 }, /* BA */
-	{ 0x00, 0x00 }, /* BB */
-	{ 0x00, 0x00 }, /* BC */
-	{ 0x00, 0x00 }, /* BD */
-	{ 0x00, 0x00 }, /* BE */
-	{ 0x00, 0x00 }, /* BF */
-	{ 0x00, 0x00 }, /* C0 */
-	{ 0x00, 0x00 }, /* C1 */
-	{ 0x00, 0x00 }, /* C2 */
-	{ 0x00, 0x00 }, /* C3 */
-	{ 0x00, 0x00 }, /* C4 */
-	{ 0x00, 0x00 }, /* C5 */
-	{ 0x00, 0x00 }, /* C6 */
-	{ 0x00, 0x00 }, /* C7 */
-	{ 0x00, 0x00 }, /* C8 */
-	{ 0x00, 0x00 }, /* C9 */
-	{ 0x00, 0x00 }, /* CA */
-	{ 0x00, 0x00 }, /* CB */
-	{ 0x00, 0x00 }, /* CC */
-	{ 0x00, 0x00 }, /* CD */
-	{ 0x00, 0x00 }, /* CE */
-	{ 0x00, 0x00 }, /* CF */
-	{ 0x00, 0x00 }, /* D0 */
-	{ 0x00, 0x00 }, /* D1 */
-	{ 0x00, 0x00 }, /* D2 */
-	{ 0x00, 0x00 }, /* D3 */
-	{ 0x00, 0x00 }, /* D4 */
-	{ 0x00, 0x00 }, /* D5 */
-	{ 0x00, 0x00 }, /* D6 */
-	{ 0x00, 0x00 }, /* D7 */
-	{ 0x00, 0x00 }, /* D8 */
-	{ 0x00, 0x00 }, /* D9 */
-	{ 0x00, 0x00 }, /* DA */
-	{ 0x00, 0x00 }, /* DB */
-	{ 0x00, 0x00 }, /* DC */
-	{ 0x00, 0x00 }, /* DD */
-	{ 0x00, 0x00 }, /* DE */
-	{ 0x00, 0x00 }, /* DF */
-	{ 0x00, 0x00 }, /* E0 */
-	{ 0x00, 0x00 }, /* E1 */
-	{ 0x00, 0x00 }, /* E2 */
-	{ 0x00, 0x00 }, /* E3 */
-	{ 0x00, 0x00 }, /* E4 */
-	{ 0x00, 0x00 }, /* E5 */
-	{ 0x00, 0x00 }, /* E6 */
-	{ 0x00, 0x00 }, /* E7 */
-	{ 0x00, 0x00 }, /* E8 */
-	{ 0x00, 0x00 }, /* E9 */
-	{ 0x00, 0x00 }, /* EA */
-	{ 0x00, 0x00 }, /* EB */
-	{ 0x00, 0x00 }, /* EC */
-	{ 0x00, 0x00 }, /* ED */
-	{ 0x00, 0x00 }, /* EE */
-	{ 0x00, 0x00 }, /* EF */
-	{ 0x00, 0x00 }, /* F0 */
-	{ 0x00, 0x00 }, /* F1 */
-	{ 0x00, 0x00 }, /* F2 */
-	{ 0x00, 0x00 }, /* F3 */
-	{ 0x00, 0x00 }, /* F4 */
-	{ 0x00, 0x00 }, /* F5 */
-	{ 0x00, 0x00 }, /* F6 */
-	{ 0x00, 0x00 }, /* F7 */
-	{ 0x00, 0x00 }, /* F8 */
-	{ 0x00, 0x00 }, /* F9 */
-	{ 0x00, 0x00 }, /* FA */
-	{ 0x00, 0x00 }, /* FB */
-	{ 0x00, 0x00 }, /* FC */
-	{ 0x00, 0x00 }, /* FD */
-	{ 0x00, 0x00 }, /* FE */
-	{ 0xFF, 0x00 }, /* FF */
-};
-
 static bool max98095_readable(struct device *dev, unsigned int reg)
 {
-	if (reg >= M98095_REG_CNT)
-		return 0;
-	return max98095_access[reg].readable != 0;
+	switch (reg) {
+	case M98095_001_HOST_INT_STS ... M98095_097_PWR_SYS:
+	case M98095_0FF_REV_ID:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static bool max98095_writeable(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case M98095_00F_HOST_CFG ... M98095_097_PWR_SYS:
+		return true;
+	default:
+		return false;
+	}
 }
 
 static bool max98095_volatile(struct device *dev, unsigned int reg)
 {
-	if (reg > M98095_REG_MAX_CACHED)
-		return 1;
-
 	switch (reg) {
-	case M98095_000_HOST_DATA:
-	case M98095_001_HOST_INT_STS:
-	case M98095_002_HOST_RSP_STS:
-	case M98095_003_HOST_CMD_STS:
-	case M98095_004_CODEC_STS:
-	case M98095_005_DAI1_ALC_STS:
-	case M98095_006_DAI2_ALC_STS:
-	case M98095_007_JACK_AUTO_STS:
-	case M98095_008_JACK_MANUAL_STS:
-	case M98095_009_JACK_VBAT_STS:
-	case M98095_00A_ACC_ADC_STS:
-	case M98095_00B_MIC_NG_AGC_STS:
-	case M98095_00C_SPK_L_VOLT_STS:
-	case M98095_00D_SPK_R_VOLT_STS:
-	case M98095_00E_TEMP_SENSOR_STS:
-		return 1;
+	case M98095_000_HOST_DATA ... M98095_00E_TEMP_SENSOR_STS:
+	case M98095_REG_MAX_CACHED + 1 ... M98095_0FF_REV_ID:
+		return true;
+	default:
+		return false;
 	}
-
-	return 0;
 }
 
 static const struct regmap_config max98095_regmap = {
@@ -508,6 +244,7 @@
 	.cache_type = REGCACHE_RBTREE,
 
 	.readable_reg = max98095_readable,
+	.writeable_reg = max98095_writeable,
 	.volatile_reg = max98095_volatile,
 };
 
@@ -661,48 +398,43 @@
 	return 0;
 }
 
-static const unsigned int max98095_micboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max98095_micboost_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
-	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(max98095_mic_tlv, 0, 100, 0);
 static const DECLARE_TLV_DB_SCALE(max98095_adc_tlv, -1200, 100, 0);
 static const DECLARE_TLV_DB_SCALE(max98095_adcboost_tlv, 0, 600, 0);
 
-static const unsigned int max98095_hp_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98095_hp_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6700, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-4000, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1700, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(-400, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(150, 50, 0)
+);
 
-static const unsigned int max98095_spk_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(max98095_spk_tlv,
 	0, 10, TLV_DB_SCALE_ITEM(-5900, 400, 0),
 	11, 18, TLV_DB_SCALE_ITEM(-1700, 200, 0),
 	19, 27, TLV_DB_SCALE_ITEM(-200, 100, 0),
-	28, 39, TLV_DB_SCALE_ITEM(650, 50, 0),
-};
+	28, 39, TLV_DB_SCALE_ITEM(650, 50, 0)
+);
 
-static const unsigned int max98095_rcv_lout_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(max98095_rcv_lout_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(-6200, 400, 0),
 	7, 14, TLV_DB_SCALE_ITEM(-3500, 300, 0),
 	15, 21, TLV_DB_SCALE_ITEM(-1200, 200, 0),
 	22, 27, TLV_DB_SCALE_ITEM(100, 100, 0),
-	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0),
-};
+	28, 31, TLV_DB_SCALE_ITEM(650, 50, 0)
+);
 
-static const unsigned int max98095_lin_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(max98095_lin_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-600, 300, 0),
 	3, 3, TLV_DB_SCALE_ITEM(300, 1100, 0),
-	4, 5, TLV_DB_SCALE_ITEM(1400, 600, 0),
-};
+	4, 5, TLV_DB_SCALE_ITEM(1400, 600, 0)
+);
 
 static const struct snd_kcontrol_new max98095_snd_controls[] = {
 
@@ -1653,10 +1385,13 @@
 		if (IS_ERR(max98095->mclk))
 			break;
 
-		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON)
+		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) {
 			clk_disable_unprepare(max98095->mclk);
-		else
-			clk_prepare_enable(max98095->mclk);
+		} else {
+			ret = clk_prepare_enable(max98095->mclk);
+			if (ret)
+				return ret;
+		}
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
@@ -2431,7 +2166,6 @@
 static struct i2c_driver max98095_i2c_driver = {
 	.driver = {
 		.name = "max98095",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(max98095_of_match),
 	},
 	.probe  = max98095_i2c_probe,
diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c
index 3a2fda0..f5e3dce 100644
--- a/sound/soc/codecs/max98357a.c
+++ b/sound/soc/codecs/max98357a.c
@@ -31,6 +31,9 @@
 {
 	struct gpio_desc *sdmode = snd_soc_dai_get_drvdata(dai);
 
+	if (!sdmode)
+		return 0;
+
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_RESUME:
@@ -48,24 +51,21 @@
 }
 
 static const struct snd_soc_dapm_widget max98357a_dapm_widgets[] = {
-	SND_SOC_DAPM_DAC("SDMode", NULL, SND_SOC_NOPM, 0, 0),
 	SND_SOC_DAPM_OUTPUT("Speaker"),
 };
 
 static const struct snd_soc_dapm_route max98357a_dapm_routes[] = {
-	{"Speaker", NULL, "SDMode"},
+	{"Speaker", NULL, "HiFi Playback"},
 };
 
 static int max98357a_codec_probe(struct snd_soc_codec *codec)
 {
 	struct gpio_desc *sdmode;
 
-	sdmode = devm_gpiod_get(codec->dev, "sdmode", GPIOD_OUT_LOW);
-	if (IS_ERR(sdmode)) {
-		dev_err(codec->dev, "%s() unable to get sdmode GPIO: %ld\n",
-				__func__, PTR_ERR(sdmode));
+	sdmode = devm_gpiod_get_optional(codec->dev, "sdmode", GPIOD_OUT_LOW);
+	if (IS_ERR(sdmode))
 		return PTR_ERR(sdmode);
-	}
+
 	snd_soc_codec_set_drvdata(codec, sdmode);
 
 	return 0;
@@ -79,7 +79,7 @@
 	.num_dapm_routes	= ARRAY_SIZE(max98357a_dapm_routes),
 };
 
-static struct snd_soc_dai_ops max98357a_dai_ops = {
+static const struct snd_soc_dai_ops max98357a_dai_ops = {
 	.trigger	= max98357a_daiops_trigger,
 };
 
@@ -104,15 +104,8 @@
 
 static int max98357a_platform_probe(struct platform_device *pdev)
 {
-	int ret;
-
-	ret = snd_soc_register_codec(&pdev->dev, &max98357a_codec_driver,
+	return snd_soc_register_codec(&pdev->dev, &max98357a_codec_driver,
 			&max98357a_dai_driver, 1);
-	if (ret)
-		dev_err(&pdev->dev, "%s() error registering codec driver: %d\n",
-				__func__, ret);
-
-	return ret;
 }
 
 static int max98357a_platform_remove(struct platform_device *pdev)
diff --git a/sound/soc/codecs/max9850.c b/sound/soc/codecs/max9850.c
index 481d58f1..c14a79d 100644
--- a/sound/soc/codecs/max9850.c
+++ b/sound/soc/codecs/max9850.c
@@ -67,13 +67,12 @@
 	.cache_type = REGCACHE_RBTREE,
 };
 
-static const unsigned int max9850_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(max9850_tlv,
 	0x18, 0x1f, TLV_DB_SCALE_ITEM(-7450, 400, 0),
 	0x20, 0x33, TLV_DB_SCALE_ITEM(-4150, 200, 0),
 	0x34, 0x37, TLV_DB_SCALE_ITEM(-150, 100, 0),
-	0x38, 0x3f, TLV_DB_SCALE_ITEM(250, 50, 0),
-};
+	0x38, 0x3f, TLV_DB_SCALE_ITEM(250, 50, 0)
+);
 
 static const struct snd_kcontrol_new max9850_controls[] = {
 SOC_SINGLE_TLV("Headphone Volume", MAX9850_VOLUME, 0, 0x3f, 1, max9850_tlv),
@@ -352,7 +351,6 @@
 static struct i2c_driver max9850_i2c_driver = {
 	.driver = {
 		.name = "max9850",
-		.owner = THIS_MODULE,
 	},
 	.probe = max9850_i2c_probe,
 	.remove = max9850_i2c_remove,
diff --git a/sound/soc/codecs/max9877.c b/sound/soc/codecs/max9877.c
index 29549cd..61cc18e 100644
--- a/sound/soc/codecs/max9877.c
+++ b/sound/soc/codecs/max9877.c
@@ -20,9 +20,7 @@
 
 #include "max9877.h"
 
-static struct regmap *regmap;
-
-static struct reg_default max9877_regs[] = {
+static const struct reg_default max9877_regs[] = {
 	{ 0, 0x40 },
 	{ 1, 0x00 },
 	{ 2, 0x00 },
@@ -30,19 +28,17 @@
 	{ 4, 0x49 },
 };
 
-static const unsigned int max9877_pgain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(max9877_pgain_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 900, 0),
-	2, 2, TLV_DB_SCALE_ITEM(2000, 0, 0),
-};
+	2, 2, TLV_DB_SCALE_ITEM(2000, 0, 0)
+);
 
-static const unsigned int max9877_output_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(max9877_output_tlv,
 	0, 7, TLV_DB_SCALE_ITEM(-7900, 400, 1),
 	8, 15, TLV_DB_SCALE_ITEM(-4700, 300, 0),
 	16, 23, TLV_DB_SCALE_ITEM(-2300, 200, 0),
-	24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0),
-};
+	24, 31, TLV_DB_SCALE_ITEM(-700, 100, 0)
+);
 
 static const char *max9877_out_mode[] = {
 	"INA -> SPK",
@@ -123,7 +119,7 @@
 	{ "HPR", NULL, "SHDN" },
 };
 
-static const struct snd_soc_codec_driver max9877_codec = {
+static const struct snd_soc_component_driver max9877_component_driver = {
 	.controls = max9877_controls,
 	.num_controls = ARRAY_SIZE(max9877_controls),
 
@@ -145,6 +141,7 @@
 static int max9877_i2c_probe(struct i2c_client *client,
 			     const struct i2c_device_id *id)
 {
+	struct regmap *regmap;
 	int i;
 
 	regmap = devm_regmap_init_i2c(client, &max9877_regmap);
@@ -155,14 +152,8 @@
 	for (i = 0; i < ARRAY_SIZE(max9877_regs); i++)
 		regmap_write(regmap, max9877_regs[i].reg, max9877_regs[i].def);
 
-	return snd_soc_register_codec(&client->dev, &max9877_codec, NULL, 0);
-}
-
-static int max9877_i2c_remove(struct i2c_client *client)
-{
-	snd_soc_unregister_codec(&client->dev);
-
-	return 0;
+	return devm_snd_soc_register_component(&client->dev,
+			&max9877_component_driver, NULL, 0);
 }
 
 static const struct i2c_device_id max9877_i2c_id[] = {
@@ -174,10 +165,8 @@
 static struct i2c_driver max9877_i2c_driver = {
 	.driver = {
 		.name = "max9877",
-		.owner = THIS_MODULE,
 	},
 	.probe = max9877_i2c_probe,
-	.remove = max9877_i2c_remove,
 	.id_table = max9877_i2c_id,
 };
 
diff --git a/sound/soc/codecs/max98925.c b/sound/soc/codecs/max98925.c
index aad6642..5990de3 100644
--- a/sound/soc/codecs/max98925.c
+++ b/sound/soc/codecs/max98925.c
@@ -271,8 +271,6 @@
 			break;
 		}
 	}
-	dev_dbg(codec->dev, "%s: sample rate is %d, returning %d\n",
-				__func__, rate_table[i].rate, *value);
 	return ret;
 }
 
@@ -432,7 +430,7 @@
 	struct snd_soc_codec *codec = dai->codec;
 	struct max98925_priv *max98925 = snd_soc_codec_get_drvdata(codec);
 
-	switch (snd_pcm_format_width(params_format(params))) {
+	switch (params_width(params)) {
 	case 16:
 		regmap_update_bits(max98925->regmap,
 				MAX98925_FORMAT,
@@ -523,7 +521,6 @@
 	struct max98925_priv *max98925 = snd_soc_codec_get_drvdata(codec);
 
 	max98925->codec = codec;
-	codec->control_data = max98925->regmap;
 	regmap_write(max98925->regmap, MAX98925_GLOBAL_ENABLE, 0x00);
 	/* It's not the default but we need to set DAI_DLY */
 	regmap_write(max98925->regmap,
@@ -639,7 +636,6 @@
 static struct i2c_driver max98925_i2c_driver = {
 	.driver = {
 		.name = "max98925",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(max98925_of_match),
 		.pm = NULL,
 	},
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 3d44fc5..3e770cb 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -650,14 +650,14 @@
 #define MC13783_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
 	SNDRV_PCM_FMTBIT_S24_LE)
 
-static struct snd_soc_dai_ops mc13783_ops_dac = {
+static const struct snd_soc_dai_ops mc13783_ops_dac = {
 	.hw_params	= mc13783_pcm_hw_params_dac,
 	.set_fmt	= mc13783_set_fmt_async,
 	.set_sysclk	= mc13783_set_sysclk_dac,
 	.set_tdm_slot	= mc13783_set_tdm_slot_dac,
 };
 
-static struct snd_soc_dai_ops mc13783_ops_codec = {
+static const struct snd_soc_dai_ops mc13783_ops_codec = {
 	.hw_params	= mc13783_pcm_hw_params_codec,
 	.set_fmt	= mc13783_set_fmt_async,
 	.set_sysclk	= mc13783_set_sysclk_codec,
@@ -698,7 +698,7 @@
 	},
 };
 
-static struct snd_soc_dai_ops mc13783_ops_sync = {
+static const struct snd_soc_dai_ops mc13783_ops_sync = {
 	.hw_params	= mc13783_pcm_hw_params_sync,
 	.set_fmt	= mc13783_set_fmt_sync,
 	.set_sysclk	= mc13783_set_sysclk_sync,
diff --git a/sound/soc/codecs/ml26124.c b/sound/soc/codecs/ml26124.c
index b74118e..f561c78 100644
--- a/sound/soc/codecs/ml26124.c
+++ b/sound/soc/codecs/ml26124.c
@@ -199,7 +199,7 @@
 	{12288000, 48000, 0xc, 0x0, 0x30, 0x0, 0x4},
 };
 
-static struct reg_default ml26124_reg[] = {
+static const struct reg_default ml26124_reg[] = {
 	/* CLOCK control Register */
 	{0x00, 0x00 },	/* Sampling Rate */
 	{0x02, 0x00},	/* PLL NL */
@@ -597,7 +597,6 @@
 static struct i2c_driver ml26124_i2c_driver = {
 	.driver = {
 		.name = "ml26124",
-		.owner = THIS_MODULE,
 	},
 	.probe = ml26124_i2c_probe,
 	.remove = ml26124_i2c_remove,
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index e7ba557..5832523 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -95,17 +95,22 @@
 	struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
 	int i = 0, val = -1, enable = 0;
 
-	if (priv->deemph)
-		for (i = 0; i < ARRAY_SIZE(pcm1681_deemph); i++)
-			if (pcm1681_deemph[i] == priv->rate)
+	if (priv->deemph) {
+		for (i = 0; i < ARRAY_SIZE(pcm1681_deemph); i++) {
+			if (pcm1681_deemph[i] == priv->rate) {
 				val = i;
+				break;
+			}
+		}
+	}
 
 	if (val != -1) {
 		regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
 				   PCM1681_DEEMPH_RATE_MASK, val << 3);
 		enable = 1;
-	} else
+	} else {
 		enable = 0;
+	}
 
 	/* enable/disable deemphasis functionality */
 	return regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
@@ -330,7 +335,6 @@
 static struct i2c_driver pcm1681_i2c_driver = {
 	.driver = {
 		.name	= "pcm1681",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(pcm1681_dt_ids),
 	},
 	.id_table	= pcm1681_i2c_id,
diff --git a/sound/soc/codecs/pcm512x-i2c.c b/sound/soc/codecs/pcm512x-i2c.c
index dcdfac0..dbff416 100644
--- a/sound/soc/codecs/pcm512x-i2c.c
+++ b/sound/soc/codecs/pcm512x-i2c.c
@@ -67,7 +67,6 @@
 	.id_table	= pcm512x_i2c_id,
 	.driver		= {
 		.name	= "pcm512x",
-		.owner	= THIS_MODULE,
 		.of_match_table = pcm512x_of_match,
 		.pm     = &pcm512x_pm_ops,
 	},
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index de16429..047c489 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -1117,7 +1117,7 @@
 		params_rate(params),
 		params_channels(params));
 
-	switch (snd_pcm_format_width(params_format(params))) {
+	switch (params_width(params)) {
 	case 16:
 		alen = PCM512x_ALEN_16;
 		break;
@@ -1132,7 +1132,7 @@
 		break;
 	default:
 		dev_err(codec->dev, "Bad frame size: %d\n",
-			snd_pcm_format_width(params_format(params)));
+			params_width(params));
 		return -EINVAL;
 	}
 
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c
index 56650d6..aca479f 100644
--- a/sound/soc/codecs/rl6231.c
+++ b/sound/soc/codecs/rl6231.c
@@ -11,38 +11,98 @@
  */
 
 #include <linux/module.h>
+#include <linux/regmap.h>
 
 #include "rl6231.h"
 
 /**
- * rl6231_calc_dmic_clk - Calculate the parameter of dmic.
+ * rl6231_get_pre_div - Return the value of pre divider.
+ *
+ * @map: map for setting.
+ * @reg: register.
+ * @sft: shift.
+ *
+ * Return the value of pre divider from given register value.
+ * Return negative error code for unexpected register value.
+ */
+int rl6231_get_pre_div(struct regmap *map, unsigned int reg, int sft)
+{
+	int pd, val;
+
+	regmap_read(map, reg, &val);
+
+	val = (val >> sft) & 0x7;
+
+	switch (val) {
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+		pd = val + 1;
+		break;
+	case 4:
+		pd = 6;
+		break;
+	case 5:
+		pd = 8;
+		break;
+	case 6:
+		pd = 12;
+		break;
+	case 7:
+		pd = 16;
+		break;
+	default:
+		pd = -EINVAL;
+		break;
+	}
+
+	return pd;
+}
+EXPORT_SYMBOL_GPL(rl6231_get_pre_div);
+
+/**
+ * rl6231_calc_dmic_clk - Calculate the frequency divider parameter of dmic.
  *
  * @rate: base clock rate.
  *
- * Choose dmic clock between 1MHz and 3MHz.
- * It is better for clock to approximate 3MHz.
+ * Choose divider parameter that gives the highest possible DMIC frequency in
+ * 1MHz - 3MHz range.
  */
 int rl6231_calc_dmic_clk(int rate)
 {
-	int div[] = {2, 3, 4, 6, 8, 12}, idx = -EINVAL;
-	int i, red, bound, temp;
+	int div[] = {2, 3, 4, 6, 8, 12};
+	int i;
 
-	red = 3000000 * 12;
-	for (i = 0; i < ARRAY_SIZE(div); i++) {
-		bound = div[i] * 3000000;
-		if (rate > bound)
-			continue;
-		temp = bound - rate;
-		if (temp < red) {
-			red = temp;
-			idx = i;
-		}
+	if (rate < 1000000 * div[0]) {
+		pr_warn("Base clock rate %d is too low\n", rate);
+		return -EINVAL;
 	}
 
-	return idx;
+	for (i = 0; i < ARRAY_SIZE(div); i++) {
+		/* find divider that gives DMIC frequency below 3MHz */
+		if (3000000 * div[i] >= rate)
+			return i;
+	}
+
+	pr_warn("Base clock rate %d is too high\n", rate);
+	return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(rl6231_calc_dmic_clk);
 
+struct pll_calc_map {
+	unsigned int pll_in;
+	unsigned int pll_out;
+	int k;
+	int n;
+	int m;
+	bool m_bp;
+};
+
+static const struct pll_calc_map pll_preset_table[] = {
+	{19200000,  24576000,  3, 30, 3, false},
+};
+
 /**
  * rl6231_pll_calc - Calcualte PLL M/N/K code.
  * @freq_in: external clock provided to codec.
@@ -57,7 +117,7 @@
 	const unsigned int freq_out, struct rl6231_pll_code *pll_code)
 {
 	int max_n = RL6231_PLL_N_MAX, max_m = RL6231_PLL_M_MAX;
-	int k, red, n_t, pll_out, in_t, out_t;
+	int i, k, red, n_t, pll_out, in_t, out_t;
 	int n = 0, m = 0, m_t = 0;
 	int red_t = abs(freq_out - freq_in);
 	bool bypass = false;
@@ -65,6 +125,18 @@
 	if (RL6231_PLL_INP_MAX < freq_in || RL6231_PLL_INP_MIN > freq_in)
 		return -EINVAL;
 
+	for (i = 0; i < ARRAY_SIZE(pll_preset_table); i++) {
+		if (freq_in == pll_preset_table[i].pll_in &&
+			freq_out == pll_preset_table[i].pll_out) {
+			k = pll_preset_table[i].k;
+			m = pll_preset_table[i].m;
+			n = pll_preset_table[i].n;
+			bypass = pll_preset_table[i].m_bp;
+			pr_debug("Use preset PLL parameter table\n");
+			goto code_find;
+		}
+	}
+
 	k = 100000000 / freq_out - 2;
 	if (k > RL6231_PLL_K_MAX)
 		k = RL6231_PLL_K_MAX;
diff --git a/sound/soc/codecs/rl6231.h b/sound/soc/codecs/rl6231.h
index 0f7b057..4c77b44 100644
--- a/sound/soc/codecs/rl6231.h
+++ b/sound/soc/codecs/rl6231.h
@@ -30,5 +30,6 @@
 int rl6231_pll_calc(const unsigned int freq_in,
 	const unsigned int freq_out, struct rl6231_pll_code *pll_code);
 int rl6231_get_clk_info(int sclk, int rate);
+int rl6231_get_pre_div(struct regmap *map, unsigned int reg, int sft);
 
 #endif /* __RL6231_H__ */
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 5c43e26..bd93658 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -38,7 +38,7 @@
 #define RT288_VENDOR_ID 0x10ec0288
 
 struct rt286_priv {
-	struct reg_default *index_cache;
+	const struct reg_default *index_cache;
 	int index_cache_size;
 	struct regmap *regmap;
 	struct snd_soc_codec *codec;
@@ -50,7 +50,7 @@
 	int clk_id;
 };
 
-static struct reg_default rt286_index_def[] = {
+static const struct reg_default rt286_index_def[] = {
 	{ 0x01, 0xaaaa },
 	{ 0x02, 0x8aaa },
 	{ 0x03, 0x0002 },
@@ -1108,7 +1108,7 @@
 };
 MODULE_DEVICE_TABLE(acpi, rt286_acpi_match);
 
-static struct dmi_system_id force_combo_jack_table[] = {
+static const struct dmi_system_id force_combo_jack_table[] = {
 	{
 		.ident = "Intel Wilson Beach",
 		.matches = {
@@ -1118,7 +1118,7 @@
 	{ }
 };
 
-static struct dmi_system_id dmi_dell_dino[] = {
+static const struct dmi_system_id dmi_dell_dino[] = {
 	{
 		.ident = "Dell Dino",
 		.matches = {
@@ -1157,7 +1157,7 @@
 	}
 	if (val != RT286_VENDOR_ID && val != RT288_VENDOR_ID) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt286\n", val);
+			"Device with ID register %#x is not rt286\n", val);
 		return -ENODEV;
 	}
 
@@ -1259,7 +1259,6 @@
 static struct i2c_driver rt286_i2c_driver = {
 	.driver = {
 		   .name = "rt286",
-		   .owner = THIS_MODULE,
 		   .acpi_match_table = ACPI_PTR(rt286_acpi_match),
 		   },
 	.probe = rt286_i2c_probe,
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
new file mode 100644
index 0000000..3c2f0f8
--- /dev/null
+++ b/sound/soc/codecs/rt298.c
@@ -0,0 +1,1271 @@
+/*
+ * rt298.c  --  RT298 ALSA SoC audio codec driver
+ *
+ * Copyright 2015 Realtek Semiconductor Corp.
+ * Author: Bard Liao <bardliao@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/jack.h>
+#include <linux/workqueue.h>
+#include <sound/rt298.h>
+#include <sound/hda_verbs.h>
+
+#include "rl6347a.h"
+#include "rt298.h"
+
+#define RT298_VENDOR_ID 0x10ec0298
+
+struct rt298_priv {
+	struct reg_default *index_cache;
+	int index_cache_size;
+	struct regmap *regmap;
+	struct snd_soc_codec *codec;
+	struct rt298_platform_data pdata;
+	struct i2c_client *i2c;
+	struct snd_soc_jack *jack;
+	struct delayed_work jack_detect_work;
+	int sys_clk;
+	int clk_id;
+	int is_hp_in;
+};
+
+static struct reg_default rt298_index_def[] = {
+	{ 0x01, 0xaaaa },
+	{ 0x02, 0x8aaa },
+	{ 0x03, 0x0002 },
+	{ 0x04, 0xaf01 },
+	{ 0x08, 0x000d },
+	{ 0x09, 0xd810 },
+	{ 0x0a, 0x0120 },
+	{ 0x0b, 0x0000 },
+	{ 0x0d, 0x2800 },
+	{ 0x0f, 0x0000 },
+	{ 0x19, 0x0a17 },
+	{ 0x20, 0x0020 },
+	{ 0x33, 0x0208 },
+	{ 0x46, 0x0300 },
+	{ 0x49, 0x0004 },
+	{ 0x4f, 0x50e9 },
+	{ 0x50, 0x2000 },
+	{ 0x63, 0x2902 },
+	{ 0x67, 0x1111 },
+	{ 0x68, 0x1016 },
+	{ 0x69, 0x273f },
+};
+#define INDEX_CACHE_SIZE ARRAY_SIZE(rt298_index_def)
+
+static const struct reg_default rt298_reg[] = {
+	{ 0x00170500, 0x00000400 },
+	{ 0x00220000, 0x00000031 },
+	{ 0x00239000, 0x0000007f },
+	{ 0x0023a000, 0x0000007f },
+	{ 0x00270500, 0x00000400 },
+	{ 0x00370500, 0x00000400 },
+	{ 0x00870500, 0x00000400 },
+	{ 0x00920000, 0x00000031 },
+	{ 0x00935000, 0x000000c3 },
+	{ 0x00936000, 0x000000c3 },
+	{ 0x00970500, 0x00000400 },
+	{ 0x00b37000, 0x00000097 },
+	{ 0x00b37200, 0x00000097 },
+	{ 0x00b37300, 0x00000097 },
+	{ 0x00c37000, 0x00000000 },
+	{ 0x00c37100, 0x00000080 },
+	{ 0x01270500, 0x00000400 },
+	{ 0x01370500, 0x00000400 },
+	{ 0x01371f00, 0x411111f0 },
+	{ 0x01439000, 0x00000080 },
+	{ 0x0143a000, 0x00000080 },
+	{ 0x01470700, 0x00000000 },
+	{ 0x01470500, 0x00000400 },
+	{ 0x01470c00, 0x00000000 },
+	{ 0x01470100, 0x00000000 },
+	{ 0x01837000, 0x00000000 },
+	{ 0x01870500, 0x00000400 },
+	{ 0x02050000, 0x00000000 },
+	{ 0x02139000, 0x00000080 },
+	{ 0x0213a000, 0x00000080 },
+	{ 0x02170100, 0x00000000 },
+	{ 0x02170500, 0x00000400 },
+	{ 0x02170700, 0x00000000 },
+	{ 0x02270100, 0x00000000 },
+	{ 0x02370100, 0x00000000 },
+	{ 0x01870700, 0x00000020 },
+	{ 0x00830000, 0x000000c3 },
+	{ 0x00930000, 0x000000c3 },
+	{ 0x01270700, 0x00000000 },
+};
+
+static bool rt298_volatile_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case 0 ... 0xff:
+	case RT298_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID):
+	case RT298_GET_HP_SENSE:
+	case RT298_GET_MIC1_SENSE:
+	case RT298_PROC_COEF:
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_MIC1, 0):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_SPK_OUT, 0):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_HP_OUT, 0):
+		return true;
+	default:
+		return true;
+	}
+
+
+}
+
+static bool rt298_readable_register(struct device *dev, unsigned int reg)
+{
+	switch (reg) {
+	case 0 ... 0xff:
+	case RT298_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID):
+	case RT298_GET_HP_SENSE:
+	case RT298_GET_MIC1_SENSE:
+	case RT298_SET_AUDIO_POWER:
+	case RT298_SET_HPO_POWER:
+	case RT298_SET_SPK_POWER:
+	case RT298_SET_DMIC1_POWER:
+	case RT298_SPK_MUX:
+	case RT298_HPO_MUX:
+	case RT298_ADC0_MUX:
+	case RT298_ADC1_MUX:
+	case RT298_SET_MIC1:
+	case RT298_SET_PIN_HPO:
+	case RT298_SET_PIN_SPK:
+	case RT298_SET_PIN_DMIC1:
+	case RT298_SPK_EAPD:
+	case RT298_SET_AMP_GAIN_HPO:
+	case RT298_SET_DMIC2_DEFAULT:
+	case RT298_DACL_GAIN:
+	case RT298_DACR_GAIN:
+	case RT298_ADCL_GAIN:
+	case RT298_ADCR_GAIN:
+	case RT298_MIC_GAIN:
+	case RT298_SPOL_GAIN:
+	case RT298_SPOR_GAIN:
+	case RT298_HPOL_GAIN:
+	case RT298_HPOR_GAIN:
+	case RT298_F_DAC_SWITCH:
+	case RT298_F_RECMIX_SWITCH:
+	case RT298_REC_MIC_SWITCH:
+	case RT298_REC_I2S_SWITCH:
+	case RT298_REC_LINE_SWITCH:
+	case RT298_REC_BEEP_SWITCH:
+	case RT298_DAC_FORMAT:
+	case RT298_ADC_FORMAT:
+	case RT298_COEF_INDEX:
+	case RT298_PROC_COEF:
+	case RT298_SET_AMP_GAIN_ADC_IN1:
+	case RT298_SET_AMP_GAIN_ADC_IN2:
+	case RT298_SET_POWER(RT298_DAC_OUT1):
+	case RT298_SET_POWER(RT298_DAC_OUT2):
+	case RT298_SET_POWER(RT298_ADC_IN1):
+	case RT298_SET_POWER(RT298_ADC_IN2):
+	case RT298_SET_POWER(RT298_DMIC2):
+	case RT298_SET_POWER(RT298_MIC1):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_MIC1, 0):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_SPK_OUT, 0):
+	case VERB_CMD(AC_VERB_GET_EAPD_BTLENABLE, RT298_HP_OUT, 0):
+		return true;
+	default:
+		return false;
+	}
+}
+
+#ifdef CONFIG_PM
+static void rt298_index_sync(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+	int i;
+
+	for (i = 0; i < INDEX_CACHE_SIZE; i++) {
+		snd_soc_write(codec, rt298->index_cache[i].reg,
+				  rt298->index_cache[i].def);
+	}
+}
+#endif
+
+static int rt298_support_power_controls[] = {
+	RT298_DAC_OUT1,
+	RT298_DAC_OUT2,
+	RT298_ADC_IN1,
+	RT298_ADC_IN2,
+	RT298_MIC1,
+	RT298_DMIC1,
+	RT298_DMIC2,
+	RT298_SPK_OUT,
+	RT298_HP_OUT,
+};
+#define RT298_POWER_REG_LEN ARRAY_SIZE(rt298_support_power_controls)
+
+static int rt298_jack_detect(struct rt298_priv *rt298, bool *hp, bool *mic)
+{
+	struct snd_soc_dapm_context *dapm;
+	unsigned int val, buf;
+
+	*hp = false;
+	*mic = false;
+
+	if (!rt298->codec)
+		return -EINVAL;
+
+	dapm = snd_soc_codec_get_dapm(rt298->codec);
+
+	if (rt298->pdata.cbj_en) {
+		regmap_read(rt298->regmap, RT298_GET_HP_SENSE, &buf);
+		*hp = buf & 0x80000000;
+		if (*hp == rt298->is_hp_in)
+			return -1;
+		rt298->is_hp_in = *hp;
+		if (*hp) {
+			/* power on HV,VERF */
+			regmap_update_bits(rt298->regmap,
+				RT298_DC_GAIN, 0x200, 0x200);
+
+			snd_soc_dapm_force_enable_pin(dapm, "HV");
+			snd_soc_dapm_force_enable_pin(dapm, "VREF");
+			/* power LDO1 */
+			snd_soc_dapm_force_enable_pin(dapm, "LDO1");
+			snd_soc_dapm_sync(dapm);
+
+			regmap_write(rt298->regmap, RT298_SET_MIC1, 0x24);
+			msleep(50);
+
+			regmap_update_bits(rt298->regmap,
+				RT298_CBJ_CTRL1, 0xfcc0, 0xd400);
+			msleep(300);
+			regmap_read(rt298->regmap, RT298_CBJ_CTRL2, &val);
+
+			if (0x0070 == (val & 0x0070)) {
+				*mic = true;
+			} else {
+				regmap_update_bits(rt298->regmap,
+					RT298_CBJ_CTRL1, 0xfcc0, 0xe400);
+				msleep(300);
+				regmap_read(rt298->regmap,
+					RT298_CBJ_CTRL2, &val);
+				if (0x0070 == (val & 0x0070))
+					*mic = true;
+				else
+					*mic = false;
+			}
+			regmap_update_bits(rt298->regmap,
+				RT298_DC_GAIN, 0x200, 0x0);
+
+		} else {
+			*mic = false;
+			regmap_write(rt298->regmap, RT298_SET_MIC1, 0x20);
+		}
+	} else {
+		regmap_read(rt298->regmap, RT298_GET_HP_SENSE, &buf);
+		*hp = buf & 0x80000000;
+		regmap_read(rt298->regmap, RT298_GET_MIC1_SENSE, &buf);
+		*mic = buf & 0x80000000;
+	}
+
+	snd_soc_dapm_disable_pin(dapm, "HV");
+	snd_soc_dapm_disable_pin(dapm, "VREF");
+	if (!*hp)
+		snd_soc_dapm_disable_pin(dapm, "LDO1");
+	snd_soc_dapm_sync(dapm);
+
+	pr_debug("*hp = %d *mic = %d\n", *hp, *mic);
+
+	return 0;
+}
+
+static void rt298_jack_detect_work(struct work_struct *work)
+{
+	struct rt298_priv *rt298 =
+		container_of(work, struct rt298_priv, jack_detect_work.work);
+	int status = 0;
+	bool hp = false;
+	bool mic = false;
+
+	if (rt298_jack_detect(rt298, &hp, &mic) < 0)
+		return;
+
+	if (hp == true)
+		status |= SND_JACK_HEADPHONE;
+
+	if (mic == true)
+		status |= SND_JACK_MICROPHONE;
+
+	snd_soc_jack_report(rt298->jack, status,
+		SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
+}
+
+int rt298_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	rt298->jack = jack;
+
+	/* Send an initial empty report */
+	snd_soc_jack_report(rt298->jack, 0,
+		SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt298_mic_detect);
+
+static int is_mclk_mode(struct snd_soc_dapm_widget *source,
+			 struct snd_soc_dapm_widget *sink)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	if (rt298->clk_id == RT298_SCLK_S_MCLK)
+		return 1;
+	else
+		return 0;
+}
+
+static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -6350, 50, 0);
+static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, 0, 1000, 0);
+
+static const struct snd_kcontrol_new rt298_snd_controls[] = {
+	SOC_DOUBLE_R_TLV("DAC0 Playback Volume", RT298_DACL_GAIN,
+			    RT298_DACR_GAIN, 0, 0x7f, 0, out_vol_tlv),
+	SOC_DOUBLE_R_TLV("ADC0 Capture Volume", RT298_ADCL_GAIN,
+			    RT298_ADCR_GAIN, 0, 0x7f, 0, out_vol_tlv),
+	SOC_SINGLE_TLV("AMIC Volume", RT298_MIC_GAIN,
+			    0, 0x3, 0, mic_vol_tlv),
+	SOC_DOUBLE_R("Speaker Playback Switch", RT298_SPOL_GAIN,
+			    RT298_SPOR_GAIN, RT298_MUTE_SFT, 1, 1),
+};
+
+/* Digital Mixer */
+static const struct snd_kcontrol_new rt298_front_mix[] = {
+	SOC_DAPM_SINGLE("DAC Switch",  RT298_F_DAC_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+	SOC_DAPM_SINGLE("RECMIX Switch", RT298_F_RECMIX_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+};
+
+/* Analog Input Mixer */
+static const struct snd_kcontrol_new rt298_rec_mix[] = {
+	SOC_DAPM_SINGLE("Mic1 Switch", RT298_REC_MIC_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+	SOC_DAPM_SINGLE("I2S Switch", RT298_REC_I2S_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+	SOC_DAPM_SINGLE("Line1 Switch", RT298_REC_LINE_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+	SOC_DAPM_SINGLE("Beep Switch", RT298_REC_BEEP_SWITCH,
+			RT298_MUTE_SFT, 1, 1),
+};
+
+static const struct snd_kcontrol_new spo_enable_control =
+	SOC_DAPM_SINGLE("Switch", RT298_SET_PIN_SPK,
+			RT298_SET_PIN_SFT, 1, 0);
+
+static const struct snd_kcontrol_new hpol_enable_control =
+	SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT298_HPOL_GAIN,
+			RT298_MUTE_SFT, 1, 1);
+
+static const struct snd_kcontrol_new hpor_enable_control =
+	SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT298_HPOR_GAIN,
+			RT298_MUTE_SFT, 1, 1);
+
+/* ADC0 source */
+static const char * const rt298_adc_src[] = {
+	"Mic", "RECMIX", "Dmic"
+};
+
+static const int rt298_adc_values[] = {
+	0, 4, 5,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(
+	rt298_adc0_enum, RT298_ADC0_MUX, RT298_ADC_SEL_SFT,
+	RT298_ADC_SEL_MASK, rt298_adc_src, rt298_adc_values);
+
+static const struct snd_kcontrol_new rt298_adc0_mux =
+	SOC_DAPM_ENUM("ADC 0 source", rt298_adc0_enum);
+
+static SOC_VALUE_ENUM_SINGLE_DECL(
+	rt298_adc1_enum, RT298_ADC1_MUX, RT298_ADC_SEL_SFT,
+	RT298_ADC_SEL_MASK, rt298_adc_src, rt298_adc_values);
+
+static const struct snd_kcontrol_new rt298_adc1_mux =
+	SOC_DAPM_ENUM("ADC 1 source", rt298_adc1_enum);
+
+static const char * const rt298_dac_src[] = {
+	"Front", "Surround"
+};
+/* HP-OUT source */
+static SOC_ENUM_SINGLE_DECL(rt298_hpo_enum, RT298_HPO_MUX,
+				0, rt298_dac_src);
+
+static const struct snd_kcontrol_new rt298_hpo_mux =
+SOC_DAPM_ENUM("HPO source", rt298_hpo_enum);
+
+/* SPK-OUT source */
+static SOC_ENUM_SINGLE_DECL(rt298_spo_enum, RT298_SPK_MUX,
+				0, rt298_dac_src);
+
+static const struct snd_kcontrol_new rt298_spo_mux =
+SOC_DAPM_ENUM("SPO source", rt298_spo_enum);
+
+static int rt298_spk_event(struct snd_soc_dapm_widget *w,
+			    struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_write(codec,
+			RT298_SPK_EAPD, RT298_SET_EAPD_HIGH);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_write(codec,
+			RT298_SPK_EAPD, RT298_SET_EAPD_LOW);
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt298_set_dmic1_event(struct snd_soc_dapm_widget *w,
+				  struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_write(codec, RT298_SET_PIN_DMIC1, 0x20);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_write(codec, RT298_SET_PIN_DMIC1, 0);
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt298_adc_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	unsigned int nid;
+
+	nid = (w->reg >> 20) & 0xff;
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_update_bits(codec,
+			VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, nid, 0),
+			0x7080, 0x7000);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec,
+			VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, nid, 0),
+			0x7080, 0x7080);
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt298_mic1_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec,
+			RT298_A_BIAS_CTRL3, 0xc000, 0x8000);
+		snd_soc_update_bits(codec,
+			RT298_A_BIAS_CTRL2, 0xc000, 0x8000);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec,
+			RT298_A_BIAS_CTRL3, 0xc000, 0x0000);
+		snd_soc_update_bits(codec,
+			RT298_A_BIAS_CTRL2, 0xc000, 0x0000);
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int rt298_vref_event(struct snd_soc_dapm_widget *w,
+			     struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec,
+			RT298_CBJ_CTRL1, 0x0400, 0x0000);
+		mdelay(50);
+		break;
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget rt298_dapm_widgets[] = {
+
+	SND_SOC_DAPM_SUPPLY_S("HV", 1, RT298_POWER_CTRL1,
+		12, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("VREF", RT298_POWER_CTRL1,
+		0, 1, rt298_vref_event, SND_SOC_DAPM_PRE_PMU),
+	SND_SOC_DAPM_SUPPLY_S("BG_MBIAS", 1, RT298_POWER_CTRL2,
+		1, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT298_POWER_CTRL2,
+		2, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("LDO2", 1, RT298_POWER_CTRL2,
+		3, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("VREF1", 1, RT298_POWER_CTRL2,
+		4, 1, NULL, 0),
+	SND_SOC_DAPM_SUPPLY_S("LV", 2, RT298_POWER_CTRL1,
+		13, 1, NULL, 0),
+
+
+	SND_SOC_DAPM_SUPPLY("MCLK MODE", RT298_PLL_CTRL1,
+		5, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("MIC1 Input Buffer", SND_SOC_NOPM,
+		0, 0, rt298_mic1_event, SND_SOC_DAPM_PRE_PMU |
+		SND_SOC_DAPM_POST_PMD),
+
+	/* Input Lines */
+	SND_SOC_DAPM_INPUT("DMIC1 Pin"),
+	SND_SOC_DAPM_INPUT("DMIC2 Pin"),
+	SND_SOC_DAPM_INPUT("MIC1"),
+	SND_SOC_DAPM_INPUT("LINE1"),
+	SND_SOC_DAPM_INPUT("Beep"),
+
+	/* DMIC */
+	SND_SOC_DAPM_PGA_E("DMIC1", RT298_SET_POWER(RT298_DMIC1), 0, 1,
+		NULL, 0, rt298_set_dmic1_event,
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_PGA("DMIC2", RT298_SET_POWER(RT298_DMIC2), 0, 1,
+		NULL, 0),
+	SND_SOC_DAPM_SUPPLY("DMIC Receiver", SND_SOC_NOPM,
+		0, 0, NULL, 0),
+
+	/* REC Mixer */
+	SND_SOC_DAPM_MIXER("RECMIX", SND_SOC_NOPM, 0, 0,
+		rt298_rec_mix, ARRAY_SIZE(rt298_rec_mix)),
+
+	/* ADCs */
+	SND_SOC_DAPM_ADC("ADC 0", NULL, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_ADC("ADC 1", NULL, SND_SOC_NOPM, 0, 0),
+
+	/* ADC Mux */
+	SND_SOC_DAPM_MUX_E("ADC 0 Mux", RT298_SET_POWER(RT298_ADC_IN1), 0, 1,
+		&rt298_adc0_mux, rt298_adc_event, SND_SOC_DAPM_PRE_PMD |
+		SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_MUX_E("ADC 1 Mux", RT298_SET_POWER(RT298_ADC_IN2), 0, 1,
+		&rt298_adc1_mux, rt298_adc_event, SND_SOC_DAPM_PRE_PMD |
+		SND_SOC_DAPM_POST_PMU),
+
+	/* Audio Interface */
+	SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_IN("AIF2RX", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF2TX", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0),
+
+	/* Output Side */
+	/* DACs */
+	SND_SOC_DAPM_DAC("DAC 0", NULL, SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_DAC("DAC 1", NULL, SND_SOC_NOPM, 0, 0),
+
+	/* Output Mux */
+	SND_SOC_DAPM_MUX("SPK Mux", SND_SOC_NOPM, 0, 0, &rt298_spo_mux),
+	SND_SOC_DAPM_MUX("HPO Mux", SND_SOC_NOPM, 0, 0, &rt298_hpo_mux),
+
+	SND_SOC_DAPM_SUPPLY("HP Power", RT298_SET_PIN_HPO,
+		RT298_SET_PIN_SFT, 0, NULL, 0),
+
+	/* Output Mixer */
+	SND_SOC_DAPM_MIXER("Front", RT298_SET_POWER(RT298_DAC_OUT1), 0, 1,
+			rt298_front_mix, ARRAY_SIZE(rt298_front_mix)),
+	SND_SOC_DAPM_PGA("Surround", RT298_SET_POWER(RT298_DAC_OUT2), 0, 1,
+			NULL, 0),
+
+	/* Output Pga */
+	SND_SOC_DAPM_SWITCH_E("SPO", SND_SOC_NOPM, 0, 0,
+		&spo_enable_control, rt298_spk_event,
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_SWITCH("HPO L", SND_SOC_NOPM, 0, 0,
+		&hpol_enable_control),
+	SND_SOC_DAPM_SWITCH("HPO R", SND_SOC_NOPM, 0, 0,
+		&hpor_enable_control),
+
+	/* Output Lines */
+	SND_SOC_DAPM_OUTPUT("SPOL"),
+	SND_SOC_DAPM_OUTPUT("SPOR"),
+	SND_SOC_DAPM_OUTPUT("HPO Pin"),
+	SND_SOC_DAPM_OUTPUT("SPDIF"),
+};
+
+static const struct snd_soc_dapm_route rt298_dapm_routes[] = {
+
+	{"ADC 0", NULL, "MCLK MODE", is_mclk_mode},
+	{"ADC 1", NULL, "MCLK MODE", is_mclk_mode},
+	{"Front", NULL, "MCLK MODE", is_mclk_mode},
+	{"Surround", NULL, "MCLK MODE", is_mclk_mode},
+
+	{"HP Power", NULL, "LDO1"},
+	{"HP Power", NULL, "LDO2"},
+	{"HP Power", NULL, "LV"},
+	{"HP Power", NULL, "VREF1"},
+	{"HP Power", NULL, "BG_MBIAS"},
+
+	{"MIC1", NULL, "LDO1"},
+	{"MIC1", NULL, "LDO2"},
+	{"MIC1", NULL, "HV"},
+	{"MIC1", NULL, "LV"},
+	{"MIC1", NULL, "VREF"},
+	{"MIC1", NULL, "VREF1"},
+	{"MIC1", NULL, "BG_MBIAS"},
+	{"MIC1", NULL, "MIC1 Input Buffer"},
+
+	{"SPO", NULL, "LDO1"},
+	{"SPO", NULL, "LDO2"},
+	{"SPO", NULL, "HV"},
+	{"SPO", NULL, "LV"},
+	{"SPO", NULL, "VREF"},
+	{"SPO", NULL, "VREF1"},
+	{"SPO", NULL, "BG_MBIAS"},
+
+	{"DMIC1", NULL, "DMIC1 Pin"},
+	{"DMIC2", NULL, "DMIC2 Pin"},
+	{"DMIC1", NULL, "DMIC Receiver"},
+	{"DMIC2", NULL, "DMIC Receiver"},
+
+	{"RECMIX", "Beep Switch", "Beep"},
+	{"RECMIX", "Line1 Switch", "LINE1"},
+	{"RECMIX", "Mic1 Switch", "MIC1"},
+
+	{"ADC 0 Mux", "Dmic", "DMIC1"},
+	{"ADC 0 Mux", "RECMIX", "RECMIX"},
+	{"ADC 0 Mux", "Mic", "MIC1"},
+	{"ADC 1 Mux", "Dmic", "DMIC2"},
+	{"ADC 1 Mux", "RECMIX", "RECMIX"},
+	{"ADC 1 Mux", "Mic", "MIC1"},
+
+	{"ADC 0", NULL, "ADC 0 Mux"},
+	{"ADC 1", NULL, "ADC 1 Mux"},
+
+	{"AIF1TX", NULL, "ADC 0"},
+	{"AIF2TX", NULL, "ADC 1"},
+
+	{"DAC 0", NULL, "AIF1RX"},
+	{"DAC 1", NULL, "AIF2RX"},
+
+	{"Front", "DAC Switch", "DAC 0"},
+	{"Front", "RECMIX Switch", "RECMIX"},
+
+	{"Surround", NULL, "DAC 1"},
+
+	{"SPK Mux", "Front", "Front"},
+	{"SPK Mux", "Surround", "Surround"},
+
+	{"HPO Mux", "Front", "Front"},
+	{"HPO Mux", "Surround", "Surround"},
+
+	{"SPO", "Switch", "SPK Mux"},
+	{"HPO L", "Switch", "HPO Mux"},
+	{"HPO R", "Switch", "HPO Mux"},
+	{"HPO L", NULL, "HP Power"},
+	{"HPO R", NULL, "HP Power"},
+
+	{"SPOL", NULL, "SPO"},
+	{"SPOR", NULL, "SPO"},
+	{"HPO Pin", NULL, "HPO L"},
+	{"HPO Pin", NULL, "HPO R"},
+};
+
+static int rt298_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+	unsigned int val = 0;
+	int d_len_code;
+
+	switch (params_rate(params)) {
+	/* bit 14 0:48K 1:44.1K */
+	case 44100:
+	case 48000:
+		break;
+	default:
+		dev_err(codec->dev, "Unsupported sample rate %d\n",
+					params_rate(params));
+		return -EINVAL;
+	}
+	switch (rt298->sys_clk) {
+	case 12288000:
+	case 24576000:
+		if (params_rate(params) != 48000) {
+			dev_err(codec->dev, "Sys_clk is not matched (%d %d)\n",
+					params_rate(params), rt298->sys_clk);
+			return -EINVAL;
+		}
+		break;
+	case 11289600:
+	case 22579200:
+		if (params_rate(params) != 44100) {
+			dev_err(codec->dev, "Sys_clk is not matched (%d %d)\n",
+					params_rate(params), rt298->sys_clk);
+			return -EINVAL;
+		}
+		break;
+	}
+
+	if (params_channels(params) <= 16) {
+		/* bit 3:0 Number of Channel */
+		val |= (params_channels(params) - 1);
+	} else {
+		dev_err(codec->dev, "Unsupported channels %d\n",
+					params_channels(params));
+		return -EINVAL;
+	}
+
+	d_len_code = 0;
+	switch (params_width(params)) {
+	/* bit 6:4 Bits per Sample */
+	case 16:
+		d_len_code = 0;
+		val |= (0x1 << 4);
+		break;
+	case 32:
+		d_len_code = 2;
+		val |= (0x4 << 4);
+		break;
+	case 20:
+		d_len_code = 1;
+		val |= (0x2 << 4);
+		break;
+	case 24:
+		d_len_code = 2;
+		val |= (0x3 << 4);
+		break;
+	case 8:
+		d_len_code = 3;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec,
+		RT298_I2S_CTRL1, 0x0018, d_len_code << 3);
+	dev_dbg(codec->dev, "format val = 0x%x\n", val);
+
+	snd_soc_update_bits(codec, RT298_DAC_FORMAT, 0x407f, val);
+	snd_soc_update_bits(codec, RT298_ADC_FORMAT, 0x407f, val);
+
+	return 0;
+}
+
+static int rt298_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x800, 0x800);
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x800, 0x0);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x300, 0x0);
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x300, 0x1 << 8);
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x300, 0x2 << 8);
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x300, 0x3 << 8);
+		break;
+	default:
+		return -EINVAL;
+	}
+	/* bit 15 Stream Type 0:PCM 1:Non-PCM */
+	snd_soc_update_bits(codec, RT298_DAC_FORMAT, 0x8000, 0);
+	snd_soc_update_bits(codec, RT298_ADC_FORMAT, 0x8000, 0);
+
+	return 0;
+}
+
+static int rt298_set_dai_sysclk(struct snd_soc_dai *dai,
+				int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	dev_dbg(codec->dev, "%s freq=%d\n", __func__, freq);
+
+	if (RT298_SCLK_S_MCLK == clk_id) {
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x0100, 0x0);
+		snd_soc_update_bits(codec,
+			RT298_PLL_CTRL1, 0x20, 0x20);
+	} else {
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x0100, 0x0100);
+		snd_soc_update_bits(codec,
+			RT298_PLL_CTRL, 0x4, 0x4);
+		snd_soc_update_bits(codec,
+			RT298_PLL_CTRL1, 0x20, 0x0);
+	}
+
+	switch (freq) {
+	case 19200000:
+		if (RT298_SCLK_S_MCLK == clk_id) {
+			dev_err(codec->dev, "Should not use MCLK\n");
+			return -EINVAL;
+		}
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x40, 0x40);
+		break;
+	case 24000000:
+		if (RT298_SCLK_S_MCLK == clk_id) {
+			dev_err(codec->dev, "Should not use MCLK\n");
+			return -EINVAL;
+		}
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x40, 0x0);
+		break;
+	case 12288000:
+	case 11289600:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x8, 0x0);
+		snd_soc_update_bits(codec,
+			RT298_CLK_DIV, 0xfc1e, 0x0004);
+		break;
+	case 24576000:
+	case 22579200:
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL2, 0x8, 0x8);
+		snd_soc_update_bits(codec,
+			RT298_CLK_DIV, 0xfc1e, 0x5406);
+		break;
+	default:
+		dev_err(codec->dev, "Unsupported system clock\n");
+		return -EINVAL;
+	}
+
+	rt298->sys_clk = freq;
+	rt298->clk_id = clk_id;
+
+	return 0;
+}
+
+static int rt298_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	dev_dbg(codec->dev, "%s ratio=%d\n", __func__, ratio);
+	if (50 == ratio)
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x1000, 0x1000);
+	else
+		snd_soc_update_bits(codec,
+			RT298_I2S_CTRL1, 0x1000, 0x0);
+
+
+	return 0;
+}
+
+static int rt298_set_bias_level(struct snd_soc_codec *codec,
+				 enum snd_soc_bias_level level)
+{
+	switch (level) {
+	case SND_SOC_BIAS_PREPARE:
+		if (SND_SOC_BIAS_STANDBY ==
+			snd_soc_codec_get_bias_level(codec)) {
+			snd_soc_write(codec,
+				RT298_SET_AUDIO_POWER, AC_PWRST_D0);
+			snd_soc_update_bits(codec, 0x0d, 0x200, 0x200);
+			snd_soc_update_bits(codec, 0x52, 0x80, 0x0);
+			mdelay(20);
+			snd_soc_update_bits(codec, 0x0d, 0x200, 0x0);
+			snd_soc_update_bits(codec, 0x52, 0x80, 0x80);
+		}
+		break;
+
+	case SND_SOC_BIAS_ON:
+		mdelay(30);
+		snd_soc_update_bits(codec,
+			RT298_CBJ_CTRL1, 0x0400, 0x0400);
+
+		break;
+
+	case SND_SOC_BIAS_STANDBY:
+		snd_soc_write(codec,
+			RT298_SET_AUDIO_POWER, AC_PWRST_D3);
+		snd_soc_update_bits(codec,
+			RT298_CBJ_CTRL1, 0x0400, 0x0000);
+		break;
+
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static irqreturn_t rt298_irq(int irq, void *data)
+{
+	struct rt298_priv *rt298 = data;
+	bool hp = false;
+	bool mic = false;
+	int ret, status = 0;
+
+	ret = rt298_jack_detect(rt298, &hp, &mic);
+
+	/* Clear IRQ */
+	regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x1, 0x1);
+
+	if (ret == 0) {
+		if (hp == true)
+			status |= SND_JACK_HEADPHONE;
+
+		if (mic == true)
+			status |= SND_JACK_MICROPHONE;
+
+		snd_soc_jack_report(rt298->jack, status,
+			SND_JACK_MICROPHONE | SND_JACK_HEADPHONE);
+
+		pm_wakeup_event(&rt298->i2c->dev, 300);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int rt298_probe(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	rt298->codec = codec;
+
+	if (rt298->i2c->irq) {
+		regmap_update_bits(rt298->regmap,
+					RT298_IRQ_CTRL, 0x2, 0x2);
+
+		INIT_DELAYED_WORK(&rt298->jack_detect_work,
+					rt298_jack_detect_work);
+		schedule_delayed_work(&rt298->jack_detect_work,
+					msecs_to_jiffies(1250));
+	}
+
+	return 0;
+}
+
+static int rt298_remove(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	cancel_delayed_work_sync(&rt298->jack_detect_work);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int rt298_suspend(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	rt298->is_hp_in = -1;
+	regcache_cache_only(rt298->regmap, true);
+	regcache_mark_dirty(rt298->regmap);
+
+	return 0;
+}
+
+static int rt298_resume(struct snd_soc_codec *codec)
+{
+	struct rt298_priv *rt298 = snd_soc_codec_get_drvdata(codec);
+
+	regcache_cache_only(rt298->regmap, false);
+	rt298_index_sync(codec);
+	regcache_sync(rt298->regmap);
+
+	return 0;
+}
+#else
+#define rt298_suspend NULL
+#define rt298_resume NULL
+#endif
+
+#define RT298_STEREO_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+#define RT298_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
+
+static const struct snd_soc_dai_ops rt298_aif_dai_ops = {
+	.hw_params = rt298_hw_params,
+	.set_fmt = rt298_set_dai_fmt,
+	.set_sysclk = rt298_set_dai_sysclk,
+	.set_bclk_ratio = rt298_set_bclk_ratio,
+};
+
+static struct snd_soc_dai_driver rt298_dai[] = {
+	{
+		.name = "rt298-aif1",
+		.id = RT298_AIF1,
+		.playback = {
+			.stream_name = "AIF1 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT298_STEREO_RATES,
+			.formats = RT298_FORMATS,
+		},
+		.capture = {
+			.stream_name = "AIF1 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT298_STEREO_RATES,
+			.formats = RT298_FORMATS,
+		},
+		.ops = &rt298_aif_dai_ops,
+		.symmetric_rates = 1,
+	},
+	{
+		.name = "rt298-aif2",
+		.id = RT298_AIF2,
+		.playback = {
+			.stream_name = "AIF2 Playback",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT298_STEREO_RATES,
+			.formats = RT298_FORMATS,
+		},
+		.capture = {
+			.stream_name = "AIF2 Capture",
+			.channels_min = 1,
+			.channels_max = 2,
+			.rates = RT298_STEREO_RATES,
+			.formats = RT298_FORMATS,
+		},
+		.ops = &rt298_aif_dai_ops,
+		.symmetric_rates = 1,
+	},
+
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_rt298 = {
+	.probe = rt298_probe,
+	.remove = rt298_remove,
+	.suspend = rt298_suspend,
+	.resume = rt298_resume,
+	.set_bias_level = rt298_set_bias_level,
+	.idle_bias_off = true,
+	.controls = rt298_snd_controls,
+	.num_controls = ARRAY_SIZE(rt298_snd_controls),
+	.dapm_widgets = rt298_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(rt298_dapm_widgets),
+	.dapm_routes = rt298_dapm_routes,
+	.num_dapm_routes = ARRAY_SIZE(rt298_dapm_routes),
+};
+
+static const struct regmap_config rt298_regmap = {
+	.reg_bits = 32,
+	.val_bits = 32,
+	.max_register = 0x02370100,
+	.volatile_reg = rt298_volatile_register,
+	.readable_reg = rt298_readable_register,
+	.reg_write = rl6347a_hw_write,
+	.reg_read = rl6347a_hw_read,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_defaults = rt298_reg,
+	.num_reg_defaults = ARRAY_SIZE(rt298_reg),
+};
+
+static const struct i2c_device_id rt298_i2c_id[] = {
+	{"rt298", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, rt298_i2c_id);
+
+static const struct acpi_device_id rt298_acpi_match[] = {
+	{ "INT343A", 0 },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, rt298_acpi_match);
+
+static int rt298_i2c_probe(struct i2c_client *i2c,
+			   const struct i2c_device_id *id)
+{
+	struct rt298_platform_data *pdata = dev_get_platdata(&i2c->dev);
+	struct rt298_priv *rt298;
+	struct device *dev = &i2c->dev;
+	const struct acpi_device_id *acpiid;
+	int i, ret;
+
+	rt298 = devm_kzalloc(&i2c->dev,	sizeof(*rt298),
+				GFP_KERNEL);
+	if (NULL == rt298)
+		return -ENOMEM;
+
+	rt298->regmap = devm_regmap_init(&i2c->dev, NULL, i2c, &rt298_regmap);
+	if (IS_ERR(rt298->regmap)) {
+		ret = PTR_ERR(rt298->regmap);
+		dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+			ret);
+		return ret;
+	}
+
+	regmap_read(rt298->regmap,
+		RT298_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &ret);
+	if (ret != RT298_VENDOR_ID) {
+		dev_err(&i2c->dev,
+			"Device with ID register %#x is not rt298\n", ret);
+		return -ENODEV;
+	}
+
+	rt298->index_cache = rt298_index_def;
+	rt298->index_cache_size = INDEX_CACHE_SIZE;
+	rt298->i2c = i2c;
+	i2c_set_clientdata(i2c, rt298);
+
+	/* restore codec default */
+	for (i = 0; i < INDEX_CACHE_SIZE; i++)
+		regmap_write(rt298->regmap, rt298->index_cache[i].reg,
+				rt298->index_cache[i].def);
+	for (i = 0; i < ARRAY_SIZE(rt298_reg); i++)
+		regmap_write(rt298->regmap, rt298_reg[i].reg,
+				rt298_reg[i].def);
+
+	if (pdata)
+		rt298->pdata = *pdata;
+
+	/* enable jack combo mode on supported devices */
+	acpiid = acpi_match_device(dev->driver->acpi_match_table, dev);
+	if (acpiid) {
+		rt298->pdata = *(struct rt298_platform_data *)
+				acpiid->driver_data;
+	}
+
+	/* VREF Charging */
+	regmap_update_bits(rt298->regmap, 0x04, 0x80, 0x80);
+	regmap_update_bits(rt298->regmap, 0x1b, 0x860, 0x860);
+	/* Vref2 */
+	regmap_update_bits(rt298->regmap, 0x08, 0x20, 0x20);
+
+	regmap_write(rt298->regmap, RT298_SET_AUDIO_POWER, AC_PWRST_D3);
+
+	for (i = 0; i < RT298_POWER_REG_LEN; i++)
+		regmap_write(rt298->regmap,
+			RT298_SET_POWER(rt298_support_power_controls[i]),
+			AC_PWRST_D1);
+
+	if (!rt298->pdata.cbj_en) {
+		regmap_write(rt298->regmap, RT298_CBJ_CTRL2, 0x0000);
+		regmap_write(rt298->regmap, RT298_MIC1_DET_CTRL, 0x0816);
+		regmap_update_bits(rt298->regmap,
+					RT298_CBJ_CTRL1, 0xf000, 0xb000);
+	} else {
+		regmap_update_bits(rt298->regmap,
+					RT298_CBJ_CTRL1, 0xf000, 0x5000);
+	}
+
+	mdelay(10);
+
+	if (!rt298->pdata.gpio2_en)
+		regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x4000);
+	else
+		regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0);
+
+	mdelay(10);
+
+	regmap_write(rt298->regmap, RT298_MISC_CTRL1, 0x0000);
+	regmap_update_bits(rt298->regmap,
+				RT298_WIND_FILTER_CTRL, 0x0082, 0x0082);
+	regmap_update_bits(rt298->regmap, RT298_IRQ_CTRL, 0x2, 0x2);
+	rt298->is_hp_in = -1;
+
+	if (rt298->i2c->irq) {
+		ret = request_threaded_irq(rt298->i2c->irq, NULL, rt298_irq,
+			IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "rt298", rt298);
+		if (ret != 0) {
+			dev_err(&i2c->dev,
+				"Failed to reguest IRQ: %d\n", ret);
+			return ret;
+		}
+	}
+
+	ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt298,
+				     rt298_dai, ARRAY_SIZE(rt298_dai));
+
+	return ret;
+}
+
+static int rt298_i2c_remove(struct i2c_client *i2c)
+{
+	struct rt298_priv *rt298 = i2c_get_clientdata(i2c);
+
+	if (i2c->irq)
+		free_irq(i2c->irq, rt298);
+	snd_soc_unregister_codec(&i2c->dev);
+
+	return 0;
+}
+
+
+static struct i2c_driver rt298_i2c_driver = {
+	.driver = {
+		   .name = "rt298",
+		   .acpi_match_table = ACPI_PTR(rt298_acpi_match),
+		   },
+	.probe = rt298_i2c_probe,
+	.remove = rt298_i2c_remove,
+	.id_table = rt298_i2c_id,
+};
+
+module_i2c_driver(rt298_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC RT298 driver");
+MODULE_AUTHOR("Bard Liao <bardliao@realtek.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/rt298.h b/sound/soc/codecs/rt298.h
new file mode 100644
index 0000000..31da162
--- /dev/null
+++ b/sound/soc/codecs/rt298.h
@@ -0,0 +1,206 @@
+/*
+ * rt298.h  --  RT298 ALSA SoC audio driver
+ *
+ * Copyright 2011 Realtek Microelectronics
+ * Author: Johnny Hsu <johnnyhsu@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RT298_H__
+#define __RT298_H__
+
+#define VERB_CMD(V, N, D) ((N << 20) | (V << 8) | D)
+
+#define RT298_AUDIO_FUNCTION_GROUP			0x01
+#define RT298_DAC_OUT1					0x02
+#define RT298_DAC_OUT2					0x03
+#define RT298_DIG_CVT					0x06
+#define RT298_ADC_IN1					0x09
+#define RT298_ADC_IN2					0x08
+#define RT298_MIXER_IN					0x0b
+#define RT298_MIXER_OUT1				0x0c
+#define RT298_MIXER_OUT2				0x0d
+#define RT298_DMIC1					0x12
+#define RT298_DMIC2					0x13
+#define RT298_SPK_OUT					0x14
+#define RT298_MIC1					0x18
+#define RT298_LINE1					0x1a
+#define RT298_BEEP					0x1d
+#define RT298_SPDIF					0x1e
+#define RT298_VENDOR_REGISTERS				0x20
+#define RT298_HP_OUT					0x21
+#define RT298_MIXER_IN1					0x22
+#define RT298_MIXER_IN2					0x23
+
+#define RT298_SET_PIN_SFT				6
+#define RT298_SET_PIN_ENABLE				0x40
+#define RT298_SET_PIN_DISABLE				0
+#define RT298_SET_EAPD_HIGH				0x2
+#define RT298_SET_EAPD_LOW				0
+
+#define RT298_MUTE_SFT					7
+
+/* Verb commands */
+#define RT298_GET_PARAM(NID, PARAM) VERB_CMD(AC_VERB_PARAMETERS, NID, PARAM)
+#define RT298_SET_POWER(NID) VERB_CMD(AC_VERB_SET_POWER_STATE, NID, 0)
+#define RT298_SET_AUDIO_POWER RT298_SET_POWER(RT298_AUDIO_FUNCTION_GROUP)
+#define RT298_SET_HPO_POWER RT298_SET_POWER(RT298_HP_OUT)
+#define RT298_SET_SPK_POWER RT298_SET_POWER(RT298_SPK_OUT)
+#define RT298_SET_DMIC1_POWER RT298_SET_POWER(RT298_DMIC1)
+#define RT298_SPK_MUX\
+	VERB_CMD(AC_VERB_SET_CONNECT_SEL, RT298_SPK_OUT, 0)
+#define RT298_HPO_MUX\
+	VERB_CMD(AC_VERB_SET_CONNECT_SEL, RT298_HP_OUT, 0)
+#define RT298_ADC0_MUX\
+	VERB_CMD(AC_VERB_SET_CONNECT_SEL, RT298_MIXER_IN1, 0)
+#define RT298_ADC1_MUX\
+	VERB_CMD(AC_VERB_SET_CONNECT_SEL, RT298_MIXER_IN2, 0)
+#define RT298_SET_MIC1\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_MIC1, 0)
+#define RT298_SET_PIN_HPO\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_HP_OUT, 0)
+#define RT298_SET_PIN_SPK\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_SPK_OUT, 0)
+#define RT298_SET_PIN_DMIC1\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_DMIC1, 0)
+#define RT298_SET_PIN_SPDIF\
+	VERB_CMD(AC_VERB_SET_PIN_WIDGET_CONTROL, RT298_SPDIF, 0)
+#define RT298_SET_PIN_DIG_CVT\
+	VERB_CMD(AC_VERB_SET_DIGI_CONVERT_1, RT298_DIG_CVT, 0)
+#define RT298_SPK_EAPD\
+	VERB_CMD(AC_VERB_SET_EAPD_BTLENABLE, RT298_SPK_OUT, 0)
+#define RT298_SET_AMP_GAIN_HPO\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_HP_OUT, 0)
+#define RT298_SET_AMP_GAIN_ADC_IN1\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_ADC_IN1, 0)
+#define RT298_SET_AMP_GAIN_ADC_IN2\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_ADC_IN2, 0)
+#define RT298_GET_HP_SENSE\
+	VERB_CMD(AC_VERB_GET_PIN_SENSE, RT298_HP_OUT, 0)
+#define RT298_GET_MIC1_SENSE\
+	VERB_CMD(AC_VERB_GET_PIN_SENSE, RT298_MIC1, 0)
+#define RT298_SET_DMIC2_DEFAULT\
+	VERB_CMD(AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, RT298_DMIC2, 0)
+#define RT298_SET_SPDIF_DEFAULT\
+	VERB_CMD(AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, RT298_SPDIF, 0)
+#define RT298_DACL_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_DAC_OUT1, 0xa000)
+#define RT298_DACR_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_DAC_OUT1, 0x9000)
+#define RT298_ADCL_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_ADC_IN1, 0x6000)
+#define RT298_ADCR_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_ADC_IN1, 0x5000)
+#define RT298_MIC_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIC1, 0x7000)
+#define RT298_SPOL_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_SPK_OUT, 0xa000)
+#define RT298_SPOR_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_SPK_OUT, 0x9000)
+#define RT298_HPOL_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_HP_OUT, 0xa000)
+#define RT298_HPOR_GAIN\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_HP_OUT, 0x9000)
+#define RT298_F_DAC_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_OUT1, 0x7000)
+#define RT298_F_RECMIX_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_OUT1, 0x7100)
+#define RT298_REC_MIC_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_IN, 0x7000)
+#define RT298_REC_I2S_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_IN, 0x7100)
+#define RT298_REC_LINE_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_IN, 0x7200)
+#define RT298_REC_BEEP_SWITCH\
+	VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, RT298_MIXER_IN, 0x7300)
+#define RT298_DAC_FORMAT\
+	VERB_CMD(AC_VERB_SET_STREAM_FORMAT, RT298_DAC_OUT1, 0)
+#define RT298_ADC_FORMAT\
+	VERB_CMD(AC_VERB_SET_STREAM_FORMAT, RT298_ADC_IN1, 0)
+#define RT298_COEF_INDEX\
+	VERB_CMD(AC_VERB_SET_COEF_INDEX, RT298_VENDOR_REGISTERS, 0)
+#define RT298_PROC_COEF\
+	VERB_CMD(AC_VERB_SET_PROC_COEF, RT298_VENDOR_REGISTERS, 0)
+
+/* Index registers */
+#define RT298_A_BIAS_CTRL1	0x01
+#define RT298_A_BIAS_CTRL2	0x02
+#define RT298_POWER_CTRL1	0x03
+#define RT298_A_BIAS_CTRL3	0x04
+#define RT298_POWER_CTRL2	0x08
+#define RT298_I2S_CTRL1		0x09
+#define RT298_I2S_CTRL2		0x0a
+#define RT298_CLK_DIV		0x0b
+#define RT298_DC_GAIN		0x0d
+#define RT298_POWER_CTRL3	0x0f
+#define RT298_MIC1_DET_CTRL	0x19
+#define RT298_MISC_CTRL1	0x20
+#define RT298_IRQ_CTRL		0x33
+#define RT298_WIND_FILTER_CTRL	0x46
+#define RT298_PLL_CTRL1		0x49
+#define RT298_CBJ_CTRL1		0x4f
+#define RT298_CBJ_CTRL2		0x50
+#define RT298_PLL_CTRL		0x63
+#define RT298_DEPOP_CTRL1	0x66
+#define RT298_DEPOP_CTRL2	0x67
+#define RT298_DEPOP_CTRL3	0x68
+#define RT298_DEPOP_CTRL4	0x69
+
+/* SPDIF (0x06) */
+#define RT298_SPDIF_SEL_SFT	0
+#define RT298_SPDIF_SEL_PCM0	0
+#define RT298_SPDIF_SEL_PCM1	1
+#define RT298_SPDIF_SEL_SPOUT	2
+#define RT298_SPDIF_SEL_PP	3
+
+/* RECMIX (0x0b) */
+#define RT298_M_REC_BEEP_SFT	0
+#define RT298_M_REC_LINE1_SFT	1
+#define RT298_M_REC_MIC1_SFT	2
+#define RT298_M_REC_I2S_SFT	3
+
+/* Front (0x0c) */
+#define RT298_M_FRONT_DAC_SFT	0
+#define RT298_M_FRONT_REC_SFT	1
+
+/* SPK-OUT (0x14) */
+#define RT298_M_SPK_MUX_SFT	14
+#define RT298_SPK_SEL_MASK	0x1
+#define RT298_SPK_SEL_SFT	0
+#define RT298_SPK_SEL_F		0
+#define RT298_SPK_SEL_S		1
+
+/* HP-OUT (0x21) */
+#define RT298_M_HP_MUX_SFT	14
+#define RT298_HP_SEL_MASK	0x1
+#define RT298_HP_SEL_SFT	0
+#define RT298_HP_SEL_F		0
+#define RT298_HP_SEL_S		1
+
+/* ADC (0x22) (0x23) */
+#define RT298_ADC_SEL_MASK	0x7
+#define RT298_ADC_SEL_SFT	0
+#define RT298_ADC_SEL_SURR	0
+#define RT298_ADC_SEL_FRONT	1
+#define RT298_ADC_SEL_DMIC	2
+#define RT298_ADC_SEL_BEEP	4
+#define RT298_ADC_SEL_LINE1	5
+#define RT298_ADC_SEL_I2S	6
+#define RT298_ADC_SEL_MIC1	7
+
+#define RT298_SCLK_S_MCLK	0
+#define RT298_SCLK_S_PLL	1
+
+enum {
+	RT298_AIF1,
+	RT298_AIF2,
+	RT298_AIFS,
+};
+
+int rt298_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack);
+
+#endif /* __RT298_H__ */
+
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 058167c..1be2bab 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -174,16 +174,15 @@
 static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -95625, 375, 0);
 static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52}dB */
-static unsigned int mic_bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(mic_bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 static int rt5631_dmic_get(struct snd_kcontrol *kcontrol,
 		struct snd_ctl_elem_value *ucontrol)
@@ -1725,7 +1724,6 @@
 static struct i2c_driver rt5631_i2c_driver = {
 	.driver = {
 		.name = "rt5631",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(rt5631_i2c_dt_ids),
 	},
 	.probe = rt5631_i2c_probe,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index 9bc78e5..e1ceeb8 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -51,7 +51,7 @@
 	  .window_len = 0x1, },
 };
 
-static const struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{RT5640_PR_BASE + 0x3d,	0x3600},
 	{RT5640_PR_BASE + 0x12,	0x0aa8},
 	{RT5640_PR_BASE + 0x14,	0x0aaa},
@@ -347,16 +347,15 @@
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 /* Interface data select */
 static const char * const rt5640_data_select[] = {
@@ -459,10 +458,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
-	int idx = -EINVAL;
+	int idx, rate;
 
-	idx = rl6231_calc_dmic_clk(rt5640->sysclk);
-
+	rate = rt5640->sysclk / rl6231_get_pre_div(rt5640->regmap,
+		RT5640_ADDA_CLK1, RT5640_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -984,6 +984,35 @@
 	return 0;
 }
 
+static int rt5640_lout_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		hp_amp_power_on(codec);
+		snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
+			RT5640_PWR_LM, RT5640_PWR_LM);
+		snd_soc_update_bits(codec, RT5640_OUTPUT,
+			RT5640_L_MUTE | RT5640_R_MUTE, 0);
+		break;
+
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, RT5640_OUTPUT,
+			RT5640_L_MUTE | RT5640_R_MUTE,
+			RT5640_L_MUTE | RT5640_R_MUTE);
+		snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
+			RT5640_PWR_LM, 0);
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
 static int rt5640_hp_power_event(struct snd_soc_dapm_widget *w,
 			   struct snd_kcontrol *kcontrol, int event)
 {
@@ -1179,13 +1208,16 @@
 		0, rt5640_spo_l_mix, ARRAY_SIZE(rt5640_spo_l_mix)),
 	SND_SOC_DAPM_MIXER("SPOR MIX", SND_SOC_NOPM, 0,
 		0, rt5640_spo_r_mix, ARRAY_SIZE(rt5640_spo_r_mix)),
-	SND_SOC_DAPM_MIXER("LOUT MIX", RT5640_PWR_ANLG1, RT5640_PWR_LM_BIT, 0,
+	SND_SOC_DAPM_MIXER("LOUT MIX", SND_SOC_NOPM, 0, 0,
 		rt5640_lout_mix, ARRAY_SIZE(rt5640_lout_mix)),
 	SND_SOC_DAPM_SUPPLY_S("Improve HP Amp Drv", 1, SND_SOC_NOPM,
 		0, 0, rt5640_hp_power_event, SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0,
 		rt5640_hp_event,
 		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+	SND_SOC_DAPM_PGA_S("LOUT amp", 1, SND_SOC_NOPM, 0, 0,
+		rt5640_lout_event,
+		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
 	SND_SOC_DAPM_SUPPLY("HP L Amp", RT5640_PWR_ANLG1,
 		RT5640_PWR_HP_L_BIT, 0, NULL, 0),
 	SND_SOC_DAPM_SUPPLY("HP R Amp", RT5640_PWR_ANLG1,
@@ -1500,8 +1532,10 @@
 	{"HP R Playback", "Switch", "HP Amp"},
 	{"HPOL", NULL, "HP L Playback"},
 	{"HPOR", NULL, "HP R Playback"},
-	{"LOUTL", NULL, "LOUT MIX"},
-	{"LOUTR", NULL, "LOUT MIX"},
+
+	{"LOUT amp", NULL, "LOUT MIX"},
+	{"LOUTL", NULL, "LOUT amp"},
+	{"LOUTR", NULL, "LOUT amp"},
 };
 
 static const struct snd_soc_dapm_route rt5640_specific_dapm_routes[] = {
@@ -2207,7 +2241,7 @@
 	regmap_read(rt5640->regmap, RT5640_VENDOR_ID2, &val);
 	if (val != RT5640_DEVICE_ID) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5640/39\n", val);
+			"Device with ID register %#x is not rt5640/39\n", val);
 		return -ENODEV;
 	}
 
@@ -2242,7 +2276,6 @@
 static struct i2c_driver rt5640_i2c_driver = {
 	.driver = {
 		.name = "rt5640",
-		.owner = THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(rt5640_acpi_match),
 		.of_match_table = of_match_ptr(rt5640_of_match),
 	},
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 961bd7e..4972bf3 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -21,6 +21,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/acpi.h>
 #include <linux/dmi.h>
+#include <linux/regulator/consumer.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -54,7 +55,7 @@
 	},
 };
 
-static const struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{RT5645_PR_BASE + 0x3d,	0x3600},
 	{RT5645_PR_BASE + 0x1c,	0xfd20},
 	{RT5645_PR_BASE + 0x20,	0x611f},
@@ -63,7 +64,7 @@
 };
 #define RT5645_INIT_REG_LEN ARRAY_SIZE(init_list)
 
-static const struct reg_default rt5650_init_list[] = {
+static const struct reg_sequence rt5650_init_list[] = {
 	{0xf6,	0x0100},
 };
 
@@ -223,6 +224,39 @@
 	{ 0xff, 0x6308 },
 };
 
+static const char *const rt5645_supply_names[] = {
+	"avdd",
+	"cpvdd",
+};
+
+struct rt5645_priv {
+	struct snd_soc_codec *codec;
+	struct rt5645_platform_data pdata;
+	struct regmap *regmap;
+	struct i2c_client *i2c;
+	struct gpio_desc *gpiod_hp_det;
+	struct snd_soc_jack *hp_jack;
+	struct snd_soc_jack *mic_jack;
+	struct snd_soc_jack *btn_jack;
+	struct delayed_work jack_detect_work;
+	struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)];
+
+	int codec_type;
+	int sysclk;
+	int sysclk_src;
+	int lrck[RT5645_AIFS];
+	int bclk[RT5645_AIFS];
+	int master[RT5645_AIFS];
+
+	int pll_src;
+	int pll_in;
+	int pll_out;
+
+	int jack_type;
+	bool en_button_func;
+	bool hp_on;
+};
+
 static int rt5645_reset(struct snd_soc_codec *codec)
 {
 	return snd_soc_write(codec, RT5645_RESET, 0);
@@ -360,6 +394,7 @@
 	case RT5645_DEPOP_M1:
 	case RT5645_DEPOP_M2:
 	case RT5645_DEPOP_M3:
+	case RT5645_CHARGE_PUMP:
 	case RT5645_MICBIAS:
 	case RT5645_A_JD_CTRL1:
 	case RT5645_VAD_CTRL4:
@@ -424,16 +459,15 @@
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 static const struct snd_kcontrol_new rt5645_snd_controls[] = {
 	/* Speaker Output Volume */
@@ -510,10 +544,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
-	int idx = -EINVAL;
+	int idx, rate;
 
-	idx = rl6231_calc_dmic_clk(rt5645->sysclk);
-
+	rate = rt5645->sysclk / rl6231_get_pre_div(rt5645->regmap,
+		RT5645_ADDA_CLK1, RT5645_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -1331,15 +1366,23 @@
 	if (on) {
 		if (hp_amp_power_count <= 0) {
 			if (rt5645->codec_type == CODEC_TYPE_RT5650) {
+				snd_soc_write(codec, RT5645_DEPOP_M2, 0x3100);
 				snd_soc_write(codec, RT5645_CHARGE_PUMP,
 					0x0e06);
-				snd_soc_write(codec, RT5645_DEPOP_M1, 0x001d);
+				snd_soc_write(codec, RT5645_DEPOP_M1, 0x000d);
+				regmap_write(rt5645->regmap, RT5645_PR_BASE +
+					RT5645_HP_DCC_INT1, 0x9f01);
+				msleep(20);
+				snd_soc_update_bits(codec, RT5645_DEPOP_M1,
+					RT5645_HP_CO_MASK, RT5645_HP_CO_EN);
 				regmap_write(rt5645->regmap, RT5645_PR_BASE +
 					0x3e, 0x7400);
 				snd_soc_write(codec, RT5645_DEPOP_M3, 0x0737);
 				regmap_write(rt5645->regmap, RT5645_PR_BASE +
 					RT5645_MAMP_INT_REG2, 0xfc00);
 				snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140);
+				mdelay(5);
+				rt5645->hp_on = true;
 			} else {
 				/* depop parameters */
 				snd_soc_update_bits(codec, RT5645_DEPOP_M2,
@@ -1553,6 +1596,27 @@
 	return 0;
 }
 
+static int rt5650_hp_event(struct snd_soc_dapm_widget *w,
+		struct snd_kcontrol *k, int  event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		if (rt5645->hp_on) {
+			msleep(100);
+			rt5645->hp_on = false;
+		}
+		break;
+
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
 static const struct snd_soc_dapm_widget rt5645_dapm_widgets[] = {
 	SND_SOC_DAPM_SUPPLY("LDO2", RT5645_PWR_MIXER,
 		RT5645_PWR_LDO2_BIT, 0, NULL, 0),
@@ -1697,15 +1761,6 @@
 	SND_SOC_DAPM_PGA("IF1_ADC4", SND_SOC_NOPM, 0, 0, NULL, 0),
 
 	/* IF1 2 Mux */
-	SND_SOC_DAPM_MUX("RT5645 IF1 ADC1 Swap Mux", SND_SOC_NOPM,
-		0, 0, &rt5645_if1_adc1_in_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 ADC2 Swap Mux", SND_SOC_NOPM,
-		0, 0, &rt5645_if1_adc2_in_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 ADC3 Swap Mux", SND_SOC_NOPM,
-		0, 0, &rt5645_if1_adc3_in_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 ADC Mux", SND_SOC_NOPM,
-		0, 0, &rt5645_if1_adc_in_mux),
-
 	SND_SOC_DAPM_MUX("IF2 ADC Mux", SND_SOC_NOPM,
 		0, 0, &rt5645_if2_adc_in_mux),
 
@@ -1716,14 +1771,6 @@
 	SND_SOC_DAPM_PGA("IF1 DAC1", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("IF1 DAC2", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("IF1 DAC3", SND_SOC_NOPM, 0, 0, NULL, 0),
-	SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 L Mux", SND_SOC_NOPM, 0, 0,
-		&rt5645_if1_dac0_tdm_sel_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 R Mux", SND_SOC_NOPM, 0, 0,
-		&rt5645_if1_dac1_tdm_sel_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 L Mux", SND_SOC_NOPM, 0, 0,
-		&rt5645_if1_dac2_tdm_sel_mux),
-	SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 R Mux", SND_SOC_NOPM, 0, 0,
-		&rt5645_if1_dac3_tdm_sel_mux),
 	SND_SOC_DAPM_PGA("IF1 ADC", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("IF1 ADC L", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_PGA("IF1 ADC R", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -1854,6 +1901,26 @@
 	SND_SOC_DAPM_OUTPUT("PDM1R"),
 	SND_SOC_DAPM_OUTPUT("SPOL"),
 	SND_SOC_DAPM_OUTPUT("SPOR"),
+	SND_SOC_DAPM_POST("DAPM_POST", rt5650_hp_event),
+};
+
+static const struct snd_soc_dapm_widget rt5645_specific_dapm_widgets[] = {
+	SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5645_if1_dac0_tdm_sel_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 DAC1 R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5645_if1_dac1_tdm_sel_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 L Mux", SND_SOC_NOPM, 0, 0,
+		&rt5645_if1_dac2_tdm_sel_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 DAC2 R Mux", SND_SOC_NOPM, 0, 0,
+		&rt5645_if1_dac3_tdm_sel_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 ADC Mux", SND_SOC_NOPM,
+		0, 0, &rt5645_if1_adc_in_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 ADC1 Swap Mux", SND_SOC_NOPM,
+		0, 0, &rt5645_if1_adc1_in_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 ADC2 Swap Mux", SND_SOC_NOPM,
+		0, 0, &rt5645_if1_adc2_in_mux),
+	SND_SOC_DAPM_MUX("RT5645 IF1 ADC3 Swap Mux", SND_SOC_NOPM,
+		0, 0, &rt5645_if1_adc3_in_mux),
 };
 
 static const struct snd_soc_dapm_widget rt5650_specific_dapm_widgets[] = {
@@ -2642,7 +2709,7 @@
 
 	switch (level) {
 	case SND_SOC_BIAS_PREPARE:
-		if (SND_SOC_BIAS_STANDBY == codec->dapm.bias_level) {
+		if (SND_SOC_BIAS_STANDBY == snd_soc_codec_get_bias_level(codec)) {
 			snd_soc_update_bits(codec, RT5645_PWR_ANLG1,
 				RT5645_PWR_VREF1 | RT5645_PWR_MB |
 				RT5645_PWR_BG | RT5645_PWR_VREF2,
@@ -2686,94 +2753,15 @@
 	return 0;
 }
 
-static int rt5650_calibration(struct rt5645_priv *rt5645)
-{
-	int val, i;
-	int ret = -1;
-
-	regcache_cache_bypass(rt5645->regmap, true);
-	regmap_write(rt5645->regmap, RT5645_RESET, 0);
-	regmap_write(rt5645->regmap, RT5645_GEN_CTRL3, 0x0800);
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + RT5645_CHOP_DAC_ADC,
-		0x3600);
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + 0x25, 0x7000);
-	regmap_write(rt5645->regmap, RT5645_I2S1_SDP, 0x8008);
-	/* headset type */
-	regmap_write(rt5645->regmap, RT5645_GEN_CTRL1, 0x2061);
-	regmap_write(rt5645->regmap, RT5645_CHARGE_PUMP, 0x0006);
-	regmap_write(rt5645->regmap, RT5645_PWR_ANLG1, 0x2012);
-	regmap_write(rt5645->regmap, RT5645_PWR_MIXER, 0x0002);
-	regmap_write(rt5645->regmap, RT5645_PWR_VOL, 0x0020);
-	regmap_write(rt5645->regmap, RT5645_JD_CTRL3, 0x00f0);
-	regmap_write(rt5645->regmap, RT5645_IN1_CTRL1, 0x0006);
-	regmap_write(rt5645->regmap, RT5645_IN1_CTRL2, 0x1827);
-	regmap_write(rt5645->regmap, RT5645_IN1_CTRL2, 0x0827);
-	msleep(400);
-	/* Inline command */
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x0001);
-	regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD2, 0xc000);
-	regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD1, 0x0008);
-	/* Calbration */
-	regmap_write(rt5645->regmap, RT5645_GLB_CLK, 0x8000);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x0000);
-	regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD2, 0xc000);
-	regmap_write(rt5645->regmap, RT5650_4BTN_IL_CMD1, 0x0008);
-	regmap_write(rt5645->regmap, RT5645_PWR_DIG2, 0x8800);
-	regmap_write(rt5645->regmap, RT5645_PWR_ANLG1, 0xe8fa);
-	regmap_write(rt5645->regmap, RT5645_PWR_ANLG2, 0x8c04);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M2, 0x3100);
-	regmap_write(rt5645->regmap, RT5645_CHARGE_PUMP, 0x0e06);
-	regmap_write(rt5645->regmap, RT5645_BASS_BACK, 0x8a13);
-	regmap_write(rt5645->regmap, RT5645_GEN_CTRL3, 0x0820);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x000d);
-	/* Power on and Calbration */
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + RT5645_HP_DCC_INT1,
-		0x9f01);
-	msleep(200);
-	for (i = 0; i < 5; i++) {
-		regmap_read(rt5645->regmap, RT5645_PR_BASE + 0x7a, &val);
-		if (val != 0 && val != 0x3f3f) {
-			ret = 0;
-			break;
-		}
-		msleep(50);
-	}
-	pr_debug("%s: PR-7A = 0x%x\n", __func__, val);
-
-	/* mute */
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + 0x3e, 0x7400);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M3, 0x0737);
-	regmap_write(rt5645->regmap, RT5645_PR_BASE + RT5645_MAMP_INT_REG2,
-		0xfc00);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M2, 0x1140);
-	regmap_write(rt5645->regmap, RT5645_DEPOP_M1, 0x0000);
-	regmap_write(rt5645->regmap, RT5645_GEN_CTRL2, 0x4020);
-	regmap_write(rt5645->regmap, RT5645_PWR_ANLG2, 0x0006);
-	regmap_write(rt5645->regmap, RT5645_PWR_DIG2, 0x0000);
-	msleep(350);
-
-	regcache_cache_bypass(rt5645->regmap, false);
-
-	return ret;
-}
-
 static void rt5645_enable_push_button_irq(struct snd_soc_codec *codec,
 	bool enable)
 {
-	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 
 	if (enable) {
-		snd_soc_dapm_mutex_lock(&codec->dapm);
-		snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
-							"ADC L power");
-		snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
-							"ADC R power");
-		snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
-							"LDO2");
-		snd_soc_dapm_force_enable_pin_unlocked(&codec->dapm,
-							"Mic Det Power");
-		snd_soc_dapm_sync_unlocked(&codec->dapm);
-		snd_soc_dapm_mutex_unlock(&codec->dapm);
+		snd_soc_dapm_force_enable_pin(dapm, "ADC L power");
+		snd_soc_dapm_force_enable_pin(dapm, "ADC R power");
+		snd_soc_dapm_sync(dapm);
 
 		snd_soc_update_bits(codec,
 					RT5645_INT_IRQ_ST, 0x8, 0x8);
@@ -2786,36 +2774,26 @@
 		snd_soc_update_bits(codec, RT5650_4BTN_IL_CMD2, 0x8000, 0x0);
 		snd_soc_update_bits(codec, RT5645_INT_IRQ_ST, 0x8, 0x0);
 
-		snd_soc_dapm_mutex_lock(&codec->dapm);
-		snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
-							"ADC L power");
-		snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
-							"ADC R power");
-		if (rt5645->pdata.jd_mode == 0)
-			snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
-								"LDO2");
-		snd_soc_dapm_disable_pin_unlocked(&codec->dapm,
-							"Mic Det Power");
-		snd_soc_dapm_sync_unlocked(&codec->dapm);
-		snd_soc_dapm_mutex_unlock(&codec->dapm);
+		snd_soc_dapm_disable_pin(dapm, "ADC L power");
+		snd_soc_dapm_disable_pin(dapm, "ADC R power");
+		snd_soc_dapm_sync(dapm);
 	}
 }
 
 static int rt5645_jack_detect(struct snd_soc_codec *codec, int jack_insert)
 {
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
 	unsigned int val;
 
 	if (jack_insert) {
 		regmap_write(rt5645->regmap, RT5645_CHARGE_PUMP, 0x0006);
 
-		if (codec->component.card->instantiated) {
-			/* for jack type detect */
-			snd_soc_dapm_force_enable_pin(&codec->dapm, "LDO2");
-			snd_soc_dapm_force_enable_pin(&codec->dapm,
-				"Mic Det Power");
-			snd_soc_dapm_sync(&codec->dapm);
-		} else {
+		/* for jack type detect */
+		snd_soc_dapm_force_enable_pin(dapm, "LDO2");
+		snd_soc_dapm_force_enable_pin(dapm, "Mic Det Power");
+		snd_soc_dapm_sync(dapm);
+		if (!dapm->card->instantiated) {
 			/* Power up necessary bits for JD if dapm is
 			   not ready yet */
 			regmap_update_bits(rt5645->regmap, RT5645_PWR_ANLG1,
@@ -2828,14 +2806,15 @@
 		}
 
 		regmap_write(rt5645->regmap, RT5645_JD_CTRL3, 0x00f0);
-		regmap_write(rt5645->regmap, RT5645_IN1_CTRL1, 0x0006);
-		regmap_update_bits(rt5645->regmap,
-				   RT5645_IN1_CTRL2, 0x1000, 0x1000);
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
+			RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD);
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
+			RT5645_CBJ_BST1_EN, RT5645_CBJ_BST1_EN);
 		msleep(100);
-		regmap_update_bits(rt5645->regmap,
-				   RT5645_IN1_CTRL2, 0x1000, 0x0000);
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
+			RT5645_CBJ_MN_JD, 0);
 
-		msleep(450);
+		msleep(600);
 		regmap_read(rt5645->regmap, RT5645_IN1_CTRL3, &val);
 		val &= 0x7;
 		dev_dbg(codec->dev, "val = %d\n", val);
@@ -2846,43 +2825,46 @@
 				rt5645_enable_push_button_irq(codec, true);
 			}
 		} else {
-			if (codec->component.card->instantiated) {
-				snd_soc_dapm_disable_pin(&codec->dapm,
-					"Mic Det Power");
-				snd_soc_dapm_sync(&codec->dapm);
-			} else
-				regmap_update_bits(rt5645->regmap,
-					RT5645_PWR_VOL, RT5645_PWR_MIC_DET, 0);
+			snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
+			snd_soc_dapm_sync(dapm);
 			rt5645->jack_type = SND_JACK_HEADPHONE;
 		}
 
+		snd_soc_update_bits(codec, RT5645_CHARGE_PUMP, 0x0300, 0x0200);
+		snd_soc_write(codec, RT5645_DEPOP_M1, 0x001d);
+		snd_soc_write(codec, RT5645_DEPOP_M1, 0x0001);
 	} else { /* jack out */
 		rt5645->jack_type = 0;
+
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
+			RT5645_CBJ_MN_JD, RT5645_CBJ_MN_JD);
+		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
+			RT5645_CBJ_BST1_EN, 0);
+
 		if (rt5645->en_button_func)
 			rt5645_enable_push_button_irq(codec, false);
-		else {
-			if (codec->component.card->instantiated) {
-				if (rt5645->pdata.jd_mode == 0)
-					snd_soc_dapm_disable_pin(&codec->dapm,
-						"LDO2");
-				snd_soc_dapm_disable_pin(&codec->dapm,
-					"Mic Det Power");
-				snd_soc_dapm_sync(&codec->dapm);
-			} else {
-				if (rt5645->pdata.jd_mode == 0)
-					regmap_update_bits(rt5645->regmap,
-						RT5645_PWR_MIXER,
-						RT5645_PWR_LDO2, 0);
-				regmap_update_bits(rt5645->regmap,
-					RT5645_PWR_VOL, RT5645_PWR_MIC_DET, 0);
-			}
-		}
+
+		if (rt5645->pdata.jd_mode == 0)
+			snd_soc_dapm_disable_pin(dapm, "LDO2");
+		snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
+		snd_soc_dapm_sync(dapm);
 	}
 
 	return rt5645->jack_type;
 }
 
-static int rt5645_irq_detection(struct rt5645_priv *rt5645);
+static int rt5645_button_detect(struct snd_soc_codec *codec)
+{
+	int btn_type, val;
+
+	val = snd_soc_read(codec, RT5650_4BTN_IL_CMD1);
+	pr_debug("val=0x%x\n", val);
+	btn_type = val & 0xfff0;
+	snd_soc_write(codec, RT5650_4BTN_IL_CMD1, val);
+
+	return btn_type;
+}
+
 static irqreturn_t rt5645_irq(int irq, void *data);
 
 int rt5645_set_jack_detect(struct snd_soc_codec *codec,
@@ -2913,38 +2895,10 @@
 {
 	struct rt5645_priv *rt5645 =
 		container_of(work, struct rt5645_priv, jack_detect_work.work);
-
-	rt5645_irq_detection(rt5645);
-}
-
-static irqreturn_t rt5645_irq(int irq, void *data)
-{
-	struct rt5645_priv *rt5645 = data;
-
-	queue_delayed_work(system_power_efficient_wq,
-			   &rt5645->jack_detect_work, msecs_to_jiffies(250));
-
-	return IRQ_HANDLED;
-}
-
-static int rt5645_button_detect(struct snd_soc_codec *codec)
-{
-	int btn_type, val;
-
-	val = snd_soc_read(codec, RT5650_4BTN_IL_CMD1);
-	pr_debug("val=0x%x\n", val);
-	btn_type = val & 0xfff0;
-	snd_soc_write(codec, RT5650_4BTN_IL_CMD1, val);
-
-	return btn_type;
-}
-
-static int rt5645_irq_detection(struct rt5645_priv *rt5645)
-{
 	int val, btn_type, gpio_state = 0, report = 0;
 
 	if (!rt5645->codec)
-		return -EINVAL;
+		return;
 
 	switch (rt5645->pdata.jd_mode) {
 	case 0: /* Not using rt5645 JD */
@@ -2958,7 +2912,7 @@
 				    report, SND_JACK_HEADPHONE);
 		snd_soc_jack_report(rt5645->mic_jack,
 				    report, SND_JACK_MICROPHONE);
-		return report;
+		return;
 	case 1: /* 2 port */
 		val = snd_soc_read(rt5645->codec, RT5645_A_JD_CTRL1) & 0x0070;
 		break;
@@ -3040,27 +2994,39 @@
 		snd_soc_jack_report(rt5645->btn_jack,
 			report, SND_JACK_BTN_0 | SND_JACK_BTN_1 |
 				SND_JACK_BTN_2 | SND_JACK_BTN_3);
+}
 
-	return report;
+static irqreturn_t rt5645_irq(int irq, void *data)
+{
+	struct rt5645_priv *rt5645 = data;
+
+	queue_delayed_work(system_power_efficient_wq,
+			   &rt5645->jack_detect_work, msecs_to_jiffies(250));
+
+	return IRQ_HANDLED;
 }
 
 static int rt5645_probe(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
 	struct rt5645_priv *rt5645 = snd_soc_codec_get_drvdata(codec);
 
 	rt5645->codec = codec;
 
 	switch (rt5645->codec_type) {
 	case CODEC_TYPE_RT5645:
-		snd_soc_dapm_add_routes(&codec->dapm,
+		snd_soc_dapm_new_controls(dapm,
+			rt5645_specific_dapm_widgets,
+			ARRAY_SIZE(rt5645_specific_dapm_widgets));
+		snd_soc_dapm_add_routes(dapm,
 			rt5645_specific_dapm_routes,
 			ARRAY_SIZE(rt5645_specific_dapm_routes));
 		break;
 	case CODEC_TYPE_RT5650:
-		snd_soc_dapm_new_controls(&codec->dapm,
+		snd_soc_dapm_new_controls(dapm,
 			rt5650_specific_dapm_widgets,
 			ARRAY_SIZE(rt5650_specific_dapm_widgets));
-		snd_soc_dapm_add_routes(&codec->dapm,
+		snd_soc_dapm_add_routes(dapm,
 			rt5650_specific_dapm_routes,
 			ARRAY_SIZE(rt5650_specific_dapm_routes));
 		break;
@@ -3070,9 +3036,9 @@
 
 	/* for JD function */
 	if (rt5645->pdata.jd_mode) {
-		snd_soc_dapm_force_enable_pin(&codec->dapm, "JD Power");
-		snd_soc_dapm_force_enable_pin(&codec->dapm, "LDO2");
-		snd_soc_dapm_sync(&codec->dapm);
+		snd_soc_dapm_force_enable_pin(dapm, "JD Power");
+		snd_soc_dapm_force_enable_pin(dapm, "LDO2");
+		snd_soc_dapm_sync(dapm);
 	}
 
 	return 0;
@@ -3113,7 +3079,7 @@
 #define RT5645_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
 
-static struct snd_soc_dai_ops rt5645_aif_dai_ops = {
+static const struct snd_soc_dai_ops rt5645_aif_dai_ops = {
 	.hw_params = rt5645_hw_params,
 	.set_fmt = rt5645_set_dai_fmt,
 	.set_sysclk = rt5645_set_dai_sysclk,
@@ -3224,7 +3190,7 @@
 	return 1;
 }
 
-static struct dmi_system_id dmi_platform_intel_braswell[] = {
+static const struct dmi_system_id dmi_platform_intel_braswell[] = {
 	{
 		.ident = "Intel Strago",
 		.callback = strago_quirk_cb,
@@ -3232,6 +3198,13 @@
 			DMI_MATCH(DMI_PRODUCT_NAME, "Strago"),
 		},
 	},
+	{
+		.ident = "Google Celes",
+		.callback = strago_quirk_cb,
+		.matches = {
+			DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
+		},
+	},
 	{ }
 };
 
@@ -3254,7 +3227,7 @@
 {
 	struct rt5645_platform_data *pdata = dev_get_platdata(&i2c->dev);
 	struct rt5645_priv *rt5645;
-	int ret;
+	int ret, i;
 	unsigned int val;
 
 	rt5645 = devm_kzalloc(&i2c->dev, sizeof(struct rt5645_priv),
@@ -3288,6 +3261,24 @@
 		return ret;
 	}
 
+	for (i = 0; i < ARRAY_SIZE(rt5645->supplies); i++)
+		rt5645->supplies[i].supply = rt5645_supply_names[i];
+
+	ret = devm_regulator_bulk_get(&i2c->dev,
+				      ARRAY_SIZE(rt5645->supplies),
+				      rt5645->supplies);
+	if (ret) {
+		dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(rt5645->supplies),
+				    rt5645->supplies);
+	if (ret) {
+		dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
+		return ret;
+	}
+
 	regmap_read(rt5645->regmap, RT5645_VENDOR_ID2, &val);
 
 	switch (val) {
@@ -3299,16 +3290,10 @@
 		break;
 	default:
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5645 or rt5650\n",
+			"Device with ID register %#x is not rt5645 or rt5650\n",
 			val);
-		return -ENODEV;
-	}
-
-	if (rt5645->codec_type == CODEC_TYPE_RT5650) {
-		ret = rt5650_calibration(rt5645);
-
-		if (ret < 0)
-			pr_err("calibration failed!\n");
+		ret = -ENODEV;
+		goto err_enable;
 	}
 
 	regmap_write(rt5645->regmap, RT5645_RESET, 0);
@@ -3398,8 +3383,6 @@
 		regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL3,
 				   RT5645_IRQ_CLK_GATE_CTRL,
 				   RT5645_IRQ_CLK_GATE_CTRL);
-		regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1,
-				   RT5645_CBJ_BST1_EN, RT5645_CBJ_BST1_EN);
 		regmap_update_bits(rt5645->regmap, RT5645_MICBIAS,
 				   RT5645_IRQ_CLK_INT, RT5645_IRQ_CLK_INT);
 		regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
@@ -3439,12 +3422,25 @@
 		ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq,
 			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
 			| IRQF_ONESHOT, "rt5645", rt5645);
-		if (ret)
+		if (ret) {
 			dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
+			goto err_enable;
+		}
 	}
 
-	return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5645,
-				      rt5645_dai, ARRAY_SIZE(rt5645_dai));
+	ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5645,
+				     rt5645_dai, ARRAY_SIZE(rt5645_dai));
+	if (ret)
+		goto err_irq;
+
+	return 0;
+
+err_irq:
+	if (rt5645->i2c->irq)
+		free_irq(rt5645->i2c->irq, rt5645);
+err_enable:
+	regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
+	return ret;
 }
 
 static int rt5645_i2c_remove(struct i2c_client *i2c)
@@ -3457,18 +3453,31 @@
 	cancel_delayed_work_sync(&rt5645->jack_detect_work);
 
 	snd_soc_unregister_codec(&i2c->dev);
+	regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
 
 	return 0;
 }
 
+static void rt5645_i2c_shutdown(struct i2c_client *i2c)
+{
+	struct rt5645_priv *rt5645 = i2c_get_clientdata(i2c);
+
+	regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL3,
+		RT5645_RING2_SLEEVE_GND, RT5645_RING2_SLEEVE_GND);
+	regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2, RT5645_CBJ_MN_JD,
+		RT5645_CBJ_MN_JD);
+	regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL1, RT5645_CBJ_BST1_EN,
+		0);
+}
+
 static struct i2c_driver rt5645_i2c_driver = {
 	.driver = {
 		.name = "rt5645",
-		.owner = THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(rt5645_acpi_match),
 	},
 	.probe = rt5645_i2c_probe,
-	.remove   = rt5645_i2c_remove,
+	.remove = rt5645_i2c_remove,
+	.shutdown = rt5645_i2c_shutdown,
 	.id_table = rt5645_i2c_id,
 };
 module_i2c_driver(rt5645_i2c_driver);
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 278bb9f..0e4cfc6 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -2115,6 +2115,7 @@
 #define RT5645_JD_PSV_MODE			(0x1 << 12)
 #define RT5645_IRQ_CLK_GATE_CTRL		(0x1 << 11)
 #define RT5645_MICINDET_MANU			(0x1 << 7)
+#define RT5645_RING2_SLEEVE_GND			(0x1 << 5)
 
 /* Vendor ID (0xfd) */
 #define RT5645_VER_C				0x2
@@ -2181,32 +2182,6 @@
 int rt5645_sel_asrc_clk_src(struct snd_soc_codec *codec,
 		unsigned int filter_mask, unsigned int clk_src);
 
-struct rt5645_priv {
-	struct snd_soc_codec *codec;
-	struct rt5645_platform_data pdata;
-	struct regmap *regmap;
-	struct i2c_client *i2c;
-	struct gpio_desc *gpiod_hp_det;
-	struct snd_soc_jack *hp_jack;
-	struct snd_soc_jack *mic_jack;
-	struct snd_soc_jack *btn_jack;
-	struct delayed_work jack_detect_work;
-
-	int codec_type;
-	int sysclk;
-	int sysclk_src;
-	int lrck[RT5645_AIFS];
-	int bclk[RT5645_AIFS];
-	int master[RT5645_AIFS];
-
-	int pll_src;
-	int pll_in;
-	int pll_out;
-
-	int jack_type;
-	bool en_button_func;
-};
-
 int rt5645_set_jack_detect(struct snd_soc_codec *codec,
 	struct snd_soc_jack *hp_jack, struct snd_soc_jack *mic_jack,
 	struct snd_soc_jack *btn_jack);
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index a3506e1..1d40318 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -46,7 +46,7 @@
 	  .window_len = 0x1, },
 };
 
-static struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{RT5651_PR_BASE + 0x3d,	0x3e00},
 };
 
@@ -292,16 +292,15 @@
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 /* Interface data select */
 static const char * const rt5651_data_select[] = {
@@ -378,10 +377,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5651_priv *rt5651 = snd_soc_codec_get_drvdata(codec);
-	int idx = -EINVAL;
+	int idx, rate;
 
-	idx = rl6231_calc_dmic_clk(rt5651->sysclk);
-
+	rate = rt5651->sysclk / rl6231_get_pre_div(rt5651->regmap,
+		RT5651_ADDA_CLK1, RT5651_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -1769,7 +1769,7 @@
 	regmap_read(rt5651->regmap, RT5651_DEVICE_ID, &ret);
 	if (ret != RT5651_DEVICE_ID_VALUE) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5651\n", ret);
+			"Device with ID register %#x is not rt5651\n", ret);
 		return -ENODEV;
 	}
 
@@ -1806,7 +1806,6 @@
 static struct i2c_driver rt5651_i2c_driver = {
 	.driver = {
 		.name = "rt5651",
-		.owner = THIS_MODULE,
 	},
 	.probe = rt5651_i2c_probe,
 	.remove   = rt5651_i2c_remove,
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index a9123d4..49a9e70 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -51,7 +51,7 @@
 	  .window_len = 0x1, },
 };
 
-static const struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{ RT5670_PR_BASE + 0x14, 0x9a8a },
 	{ RT5670_PR_BASE + 0x38, 0x3ba1 },
 	{ RT5670_PR_BASE + 0x3d, 0x3640 },
@@ -592,16 +592,15 @@
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 /* Interface data select */
 static const char * const rt5670_data_select[] = {
@@ -683,10 +682,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5670_priv *rt5670 = snd_soc_codec_get_drvdata(codec);
-	int idx = -EINVAL;
+	int idx, rate;
 
-	idx = rl6231_calc_dmic_clk(rt5670->sysclk);
-
+	rate = rt5670->sysclk / rl6231_get_pre_div(rt5670->regmap,
+		RT5670_ADDA_CLK1, RT5670_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -2720,7 +2720,7 @@
 #define RT5670_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
 
-static struct snd_soc_dai_ops rt5670_aif_dai_ops = {
+static const struct snd_soc_dai_ops rt5670_aif_dai_ops = {
 	.hw_params = rt5670_hw_params,
 	.set_fmt = rt5670_set_dai_fmt,
 	.set_sysclk = rt5670_set_dai_sysclk,
@@ -2863,7 +2863,7 @@
 	regmap_read(rt5670->regmap, RT5670_VENDOR_ID2, &val);
 	if (val != RT5670_DEVICE_ID) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5670/72\n", val);
+			"Device with ID register %#x is not rt5670/72\n", val);
 		return -ENODEV;
 	}
 
@@ -3043,7 +3043,6 @@
 static struct i2c_driver rt5670_i2c_driver = {
 	.driver = {
 		.name = "rt5670",
-		.owner = THIS_MODULE,
 		.acpi_match_table = ACPI_PTR(rt5670_acpi_match),
 	},
 	.probe = rt5670_i2c_probe,
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index ef6348c..3505aaf 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -31,84 +31,197 @@
 
 #include "rt5677-spi.h"
 
-static struct spi_device *g_spi;
+#define RT5677_SPI_BURST_LEN	240
+#define RT5677_SPI_HEADER	5
+#define RT5677_SPI_FREQ		6000000
 
-/**
- * rt5677_spi_write - Write data to SPI.
- * @txbuf: Data Buffer for writing.
- * @len: Data length.
- *
- *
- * Returns true for success.
+/* The AddressPhase and DataPhase of SPI commands are MSB first on the wire.
+ * DataPhase word size of 16-bit commands is 2 bytes.
+ * DataPhase word size of 32-bit commands is 4 bytes.
+ * DataPhase word size of burst commands is 8 bytes.
+ * The DSP CPU is little-endian.
  */
-int rt5677_spi_write(u8 *txbuf, size_t len)
+#define RT5677_SPI_WRITE_BURST	0x5
+#define RT5677_SPI_READ_BURST	0x4
+#define RT5677_SPI_WRITE_32	0x3
+#define RT5677_SPI_READ_32	0x2
+#define RT5677_SPI_WRITE_16	0x1
+#define RT5677_SPI_READ_16	0x0
+
+static struct spi_device *g_spi;
+static DEFINE_MUTEX(spi_mutex);
+
+/* Select a suitable transfer command for the next transfer to ensure
+ * the transfer address is always naturally aligned while minimizing
+ * the total number of transfers required.
+ *
+ * 3 transfer commands are available:
+ * RT5677_SPI_READ/WRITE_16:	Transfer 2 bytes
+ * RT5677_SPI_READ/WRITE_32:	Transfer 4 bytes
+ * RT5677_SPI_READ/WRITE_BURST:	Transfer any multiples of 8 bytes
+ *
+ * For example, reading 260 bytes at 0x60030002 uses the following commands:
+ * 0x60030002 RT5677_SPI_READ_16	2 bytes
+ * 0x60030004 RT5677_SPI_READ_32	4 bytes
+ * 0x60030008 RT5677_SPI_READ_BURST	240 bytes
+ * 0x600300F8 RT5677_SPI_READ_BURST	8 bytes
+ * 0x60030100 RT5677_SPI_READ_32	4 bytes
+ * 0x60030104 RT5677_SPI_READ_16	2 bytes
+ *
+ * Input:
+ * @read: true for read commands; false for write commands
+ * @align: alignment of the next transfer address
+ * @remain: number of bytes remaining to transfer
+ *
+ * Output:
+ * @len: number of bytes to transfer with the selected command
+ * Returns the selected command
+ */
+static u8 rt5677_spi_select_cmd(bool read, u32 align, u32 remain, u32 *len)
 {
-	int status;
+	u8 cmd;
 
-	status = spi_write(g_spi, txbuf, len);
+	if (align == 2 || align == 6 || remain == 2) {
+		cmd = RT5677_SPI_READ_16;
+		*len = 2;
+	} else if (align == 4 || remain <= 6) {
+		cmd = RT5677_SPI_READ_32;
+		*len = 4;
+	} else {
+		cmd = RT5677_SPI_READ_BURST;
+		*len = min_t(u32, remain & ~7, RT5677_SPI_BURST_LEN);
+	}
+	return read ? cmd : cmd + 1;
+}
 
-	if (status)
-		dev_err(&g_spi->dev, "rt5677_spi_write error %d\n", status);
+/* Copy dstlen bytes from src to dst, while reversing byte order for each word.
+ * If srclen < dstlen, zeros are padded.
+ */
+static void rt5677_spi_reverse(u8 *dst, u32 dstlen, const u8 *src, u32 srclen)
+{
+	u32 w, i, si;
+	u32 word_size = min_t(u32, dstlen, 8);
 
+	for (w = 0; w < dstlen; w += word_size) {
+		for (i = 0; i < word_size; i++) {
+			si = w + word_size - i - 1;
+			dst[w + i] = si < srclen ? src[si] : 0;
+		}
+	}
+}
+
+/* Read DSP address space using SPI. addr and len have to be 2-byte aligned. */
+int rt5677_spi_read(u32 addr, void *rxbuf, size_t len)
+{
+	u32 offset;
+	int status = 0;
+	struct spi_transfer t[2];
+	struct spi_message m;
+	/* +4 bytes is for the DummyPhase following the AddressPhase */
+	u8 header[RT5677_SPI_HEADER + 4];
+	u8 body[RT5677_SPI_BURST_LEN];
+	u8 spi_cmd;
+	u8 *cb = rxbuf;
+
+	if (!g_spi)
+		return -ENODEV;
+
+	if ((addr & 1) || (len & 1)) {
+		dev_err(&g_spi->dev, "Bad read align 0x%x(%zu)\n", addr, len);
+		return -EACCES;
+	}
+
+	memset(t, 0, sizeof(t));
+	t[0].tx_buf = header;
+	t[0].len = sizeof(header);
+	t[0].speed_hz = RT5677_SPI_FREQ;
+	t[1].rx_buf = body;
+	t[1].speed_hz = RT5677_SPI_FREQ;
+	spi_message_init_with_transfers(&m, t, ARRAY_SIZE(t));
+
+	for (offset = 0; offset < len; offset += t[1].len) {
+		spi_cmd = rt5677_spi_select_cmd(true, (addr + offset) & 7,
+				len - offset, &t[1].len);
+
+		/* Construct SPI message header */
+		header[0] = spi_cmd;
+		header[1] = ((addr + offset) & 0xff000000) >> 24;
+		header[2] = ((addr + offset) & 0x00ff0000) >> 16;
+		header[3] = ((addr + offset) & 0x0000ff00) >> 8;
+		header[4] = ((addr + offset) & 0x000000ff) >> 0;
+
+		mutex_lock(&spi_mutex);
+		status |= spi_sync(g_spi, &m);
+		mutex_unlock(&spi_mutex);
+
+		/* Copy data back to caller buffer */
+		rt5677_spi_reverse(cb + offset, t[1].len, body, t[1].len);
+	}
+	return status;
+}
+EXPORT_SYMBOL_GPL(rt5677_spi_read);
+
+/* Write DSP address space using SPI. addr has to be 2-byte aligned.
+ * If len is not 2-byte aligned, an extra byte of zero is written at the end
+ * as padding.
+ */
+int rt5677_spi_write(u32 addr, const void *txbuf, size_t len)
+{
+	u32 offset, len_with_pad = len;
+	int status = 0;
+	struct spi_transfer t;
+	struct spi_message m;
+	/* +1 byte is for the DummyPhase following the DataPhase */
+	u8 buf[RT5677_SPI_HEADER + RT5677_SPI_BURST_LEN + 1];
+	u8 *body = buf + RT5677_SPI_HEADER;
+	u8 spi_cmd;
+	const u8 *cb = txbuf;
+
+	if (!g_spi)
+		return -ENODEV;
+
+	if (addr & 1) {
+		dev_err(&g_spi->dev, "Bad write align 0x%x(%zu)\n", addr, len);
+		return -EACCES;
+	}
+
+	if (len & 1)
+		len_with_pad = len + 1;
+
+	memset(&t, 0, sizeof(t));
+	t.tx_buf = buf;
+	t.speed_hz = RT5677_SPI_FREQ;
+	spi_message_init_with_transfers(&m, &t, 1);
+
+	for (offset = 0; offset < len_with_pad;) {
+		spi_cmd = rt5677_spi_select_cmd(false, (addr + offset) & 7,
+				len_with_pad - offset, &t.len);
+
+		/* Construct SPI message header */
+		buf[0] = spi_cmd;
+		buf[1] = ((addr + offset) & 0xff000000) >> 24;
+		buf[2] = ((addr + offset) & 0x00ff0000) >> 16;
+		buf[3] = ((addr + offset) & 0x0000ff00) >> 8;
+		buf[4] = ((addr + offset) & 0x000000ff) >> 0;
+
+		/* Fetch data from caller buffer */
+		rt5677_spi_reverse(body, t.len, cb + offset, len - offset);
+		offset += t.len;
+		t.len += RT5677_SPI_HEADER + 1;
+
+		mutex_lock(&spi_mutex);
+		status |= spi_sync(g_spi, &m);
+		mutex_unlock(&spi_mutex);
+	}
 	return status;
 }
 EXPORT_SYMBOL_GPL(rt5677_spi_write);
 
-/**
- * rt5677_spi_burst_write - Write data to SPI by rt5677 dsp memory address.
- * @addr: Start address.
- * @txbuf: Data Buffer for writng.
- * @len: Data length, it must be a multiple of 8.
- *
- *
- * Returns true for success.
- */
-int rt5677_spi_burst_write(u32 addr, const struct firmware *fw)
+int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw)
 {
-	u8 spi_cmd = RT5677_SPI_CMD_BURST_WRITE;
-	u8 *write_buf;
-	unsigned int i, end, offset = 0;
-
-	write_buf = kmalloc(RT5677_SPI_BUF_LEN + 6, GFP_KERNEL);
-
-	if (write_buf == NULL)
-		return -ENOMEM;
-
-	while (offset < fw->size) {
-		if (offset + RT5677_SPI_BUF_LEN <= fw->size)
-			end = RT5677_SPI_BUF_LEN;
-		else
-			end = fw->size % RT5677_SPI_BUF_LEN;
-
-		write_buf[0] = spi_cmd;
-		write_buf[1] = ((addr + offset) & 0xff000000) >> 24;
-		write_buf[2] = ((addr + offset) & 0x00ff0000) >> 16;
-		write_buf[3] = ((addr + offset) & 0x0000ff00) >> 8;
-		write_buf[4] = ((addr + offset) & 0x000000ff) >> 0;
-
-		for (i = 0; i < end; i += 8) {
-			write_buf[i + 12] = fw->data[offset + i + 0];
-			write_buf[i + 11] = fw->data[offset + i + 1];
-			write_buf[i + 10] = fw->data[offset + i + 2];
-			write_buf[i +  9] = fw->data[offset + i + 3];
-			write_buf[i +  8] = fw->data[offset + i + 4];
-			write_buf[i +  7] = fw->data[offset + i + 5];
-			write_buf[i +  6] = fw->data[offset + i + 6];
-			write_buf[i +  5] = fw->data[offset + i + 7];
-		}
-
-		write_buf[end + 5] = spi_cmd;
-
-		rt5677_spi_write(write_buf, end + 6);
-
-		offset += RT5677_SPI_BUF_LEN;
-	}
-
-	kfree(write_buf);
-
-	return 0;
+	return rt5677_spi_write(addr, fw->data, fw->size);
 }
-EXPORT_SYMBOL_GPL(rt5677_spi_burst_write);
+EXPORT_SYMBOL_GPL(rt5677_spi_write_firmware);
 
 static int rt5677_spi_probe(struct spi_device *spi)
 {
diff --git a/sound/soc/codecs/rt5677-spi.h b/sound/soc/codecs/rt5677-spi.h
index ec41b2b..662db16 100644
--- a/sound/soc/codecs/rt5677-spi.h
+++ b/sound/soc/codecs/rt5677-spi.h
@@ -12,10 +12,8 @@
 #ifndef __RT5677_SPI_H__
 #define __RT5677_SPI_H__
 
-#define RT5677_SPI_BUF_LEN 240
-#define RT5677_SPI_CMD_BURST_WRITE 0x05
-
-int rt5677_spi_write(u8 *txbuf, size_t len);
-int rt5677_spi_burst_write(u32 addr, const struct firmware *fw);
+int rt5677_spi_read(u32 addr, void *rxbuf, size_t len);
+int rt5677_spi_write(u32 addr, const void *txbuf, size_t len);
+int rt5677_spi_write_firmware(u32 addr, const struct firmware *fw);
 
 #endif /* __RT5677_SPI_H__ */
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 31d969a..b4cd7e3 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -15,13 +15,12 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/pm.h>
-#include <linux/of_gpio.h>
 #include <linux/regmap.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
 #include <linux/firmware.h>
-#include <linux/gpio.h>
+#include <linux/property.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -54,7 +53,7 @@
 	},
 };
 
-static const struct reg_default init_list[] = {
+static const struct reg_sequence init_list[] = {
 	{RT5677_ASRC_12,	0x0018},
 	{RT5677_PR_BASE + 0x3d,	0x364d},
 	{RT5677_PR_BASE + 0x17,	0x4fc0},
@@ -746,14 +745,14 @@
 		ret = request_firmware(&rt5677->fw1, RT5677_FIRMWARE1,
 			codec->dev);
 		if (ret == 0) {
-			rt5677_spi_burst_write(0x50000000, rt5677->fw1);
+			rt5677_spi_write_firmware(0x50000000, rt5677->fw1);
 			release_firmware(rt5677->fw1);
 		}
 
 		ret = request_firmware(&rt5677->fw2, RT5677_FIRMWARE2,
 			codec->dev);
 		if (ret == 0) {
-			rt5677_spi_burst_write(0x60000000, rt5677->fw2);
+			rt5677_spi_write_firmware(0x60000000, rt5677->fw2);
 			release_firmware(rt5677->fw2);
 		}
 
@@ -789,16 +788,15 @@
 static const DECLARE_TLV_DB_SCALE(st_vol_tlv, -4650, 150, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
-static unsigned int bst_tlv[] = {
-	TLV_DB_RANGE_HEAD(7),
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
 	3, 5, TLV_DB_SCALE_ITEM(3000, 500, 0),
 	6, 6, TLV_DB_SCALE_ITEM(4400, 0, 0),
 	7, 7, TLV_DB_SCALE_ITEM(5000, 0, 0),
-	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0),
-};
+	8, 8, TLV_DB_SCALE_ITEM(5200, 0, 0)
+);
 
 static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
 		struct snd_ctl_elem_value *ucontrol)
@@ -917,8 +915,11 @@
 {
 	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 	struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
-	int idx = rl6231_calc_dmic_clk(rt5677->lrck[RT5677_AIF1] << 8);
+	int idx, rate;
 
+	rate = rt5677->sysclk / rl6231_get_pre_div(rt5677->regmap,
+		RT5677_CLK_TREE_CTRL1, RT5677_I2S_PD1_SFT);
+	idx = rl6231_calc_dmic_clk(rate);
 	if (idx < 0)
 		dev_err(codec->dev, "Failed to set DMIC clock\n");
 	else
@@ -4764,10 +4765,8 @@
 	struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
 
 	regmap_write(rt5677->regmap, RT5677_RESET, 0x10ec);
-	if (gpio_is_valid(rt5677->pow_ldo2))
-		gpio_set_value_cansleep(rt5677->pow_ldo2, 0);
-	if (gpio_is_valid(rt5677->reset_pin))
-		gpio_set_value_cansleep(rt5677->reset_pin, 0);
+	gpiod_set_value_cansleep(rt5677->pow_ldo2, 0);
+	gpiod_set_value_cansleep(rt5677->reset_pin, 0);
 
 	return 0;
 }
@@ -4781,10 +4780,8 @@
 		regcache_cache_only(rt5677->regmap, true);
 		regcache_mark_dirty(rt5677->regmap);
 
-		if (gpio_is_valid(rt5677->pow_ldo2))
-			gpio_set_value_cansleep(rt5677->pow_ldo2, 0);
-		if (gpio_is_valid(rt5677->reset_pin))
-			gpio_set_value_cansleep(rt5677->reset_pin, 0);
+		gpiod_set_value_cansleep(rt5677->pow_ldo2, 0);
+		gpiod_set_value_cansleep(rt5677->reset_pin, 0);
 	}
 
 	return 0;
@@ -4795,12 +4792,9 @@
 	struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
 
 	if (!rt5677->dsp_vad_en) {
-		if (gpio_is_valid(rt5677->pow_ldo2))
-			gpio_set_value_cansleep(rt5677->pow_ldo2, 1);
-		if (gpio_is_valid(rt5677->reset_pin))
-			gpio_set_value_cansleep(rt5677->reset_pin, 1);
-		if (gpio_is_valid(rt5677->pow_ldo2) ||
-		    gpio_is_valid(rt5677->reset_pin))
+		gpiod_set_value_cansleep(rt5677->pow_ldo2, 1);
+		gpiod_set_value_cansleep(rt5677->reset_pin, 1);
+		if (rt5677->pow_ldo2 || rt5677->reset_pin)
 			msleep(10);
 
 		regcache_cache_only(rt5677->regmap, false);
@@ -4863,7 +4857,7 @@
 #define RT5677_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
 			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
 
-static struct snd_soc_dai_ops rt5677_aif_dai_ops = {
+static const struct snd_soc_dai_ops rt5677_aif_dai_ops = {
 	.hw_params = rt5677_hw_params,
 	.set_fmt = rt5677_set_dai_fmt,
 	.set_sysclk = rt5677_set_dai_sysclk,
@@ -5024,45 +5018,29 @@
 };
 MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id);
 
-static int rt5677_parse_dt(struct rt5677_priv *rt5677, struct device_node *np)
+static void rt5677_read_device_properties(struct rt5677_priv *rt5677,
+		struct device *dev)
 {
-	rt5677->pdata.in1_diff = of_property_read_bool(np,
-					"realtek,in1-differential");
-	rt5677->pdata.in2_diff = of_property_read_bool(np,
-					"realtek,in2-differential");
-	rt5677->pdata.lout1_diff = of_property_read_bool(np,
-					"realtek,lout1-differential");
-	rt5677->pdata.lout2_diff = of_property_read_bool(np,
-					"realtek,lout2-differential");
-	rt5677->pdata.lout3_diff = of_property_read_bool(np,
-					"realtek,lout3-differential");
+	rt5677->pdata.in1_diff = device_property_read_bool(dev,
+			"realtek,in1-differential");
+	rt5677->pdata.in2_diff = device_property_read_bool(dev,
+			"realtek,in2-differential");
+	rt5677->pdata.lout1_diff = device_property_read_bool(dev,
+			"realtek,lout1-differential");
+	rt5677->pdata.lout2_diff = device_property_read_bool(dev,
+			"realtek,lout2-differential");
+	rt5677->pdata.lout3_diff = device_property_read_bool(dev,
+			"realtek,lout3-differential");
 
-	rt5677->pow_ldo2 = of_get_named_gpio(np,
-					"realtek,pow-ldo2-gpio", 0);
-	rt5677->reset_pin = of_get_named_gpio(np,
-					"realtek,reset-gpio", 0);
+	device_property_read_u8_array(dev, "realtek,gpio-config",
+			rt5677->pdata.gpio_config, RT5677_GPIO_NUM);
 
-	/*
-	 * POW_LDO2 is optional (it may be statically tied on the board).
-	 * -ENOENT means that the property doesn't exist, i.e. there is no
-	 * GPIO, so is not an error. Any other error code means the property
-	 * exists, but could not be parsed.
-	 */
-	if (!gpio_is_valid(rt5677->pow_ldo2) &&
-			(rt5677->pow_ldo2 != -ENOENT))
-		return rt5677->pow_ldo2;
-	if (!gpio_is_valid(rt5677->reset_pin) &&
-			(rt5677->reset_pin != -ENOENT))
-		return rt5677->reset_pin;
-
-	of_property_read_u8_array(np, "realtek,gpio-config",
-		rt5677->pdata.gpio_config, RT5677_GPIO_NUM);
-
-	of_property_read_u32(np, "realtek,jd1-gpio", &rt5677->pdata.jd1_gpio);
-	of_property_read_u32(np, "realtek,jd2-gpio", &rt5677->pdata.jd2_gpio);
-	of_property_read_u32(np, "realtek,jd3-gpio", &rt5677->pdata.jd3_gpio);
-
-	return 0;
+	device_property_read_u32(dev, "realtek,jd1-gpio",
+			&rt5677->pdata.jd1_gpio);
+	device_property_read_u32(dev, "realtek,jd2-gpio",
+			&rt5677->pdata.jd2_gpio);
+	device_property_read_u32(dev, "realtek,jd3-gpio",
+			&rt5677->pdata.jd3_gpio);
 }
 
 static struct regmap_irq rt5677_irqs[] = {
@@ -5145,43 +5123,29 @@
 
 	if (pdata)
 		rt5677->pdata = *pdata;
+	else
+		rt5677_read_device_properties(rt5677, &i2c->dev);
 
-	if (i2c->dev.of_node) {
-		ret = rt5677_parse_dt(rt5677, i2c->dev.of_node);
-		if (ret) {
-			dev_err(&i2c->dev, "Failed to parse device tree: %d\n",
-				ret);
-			return ret;
-		}
-	} else {
-		rt5677->pow_ldo2 = -EINVAL;
-		rt5677->reset_pin = -EINVAL;
+	/* pow-ldo2 and reset are optional. The codec pins may be statically
+	 * connected on the board without gpios. If the gpio device property
+	 * isn't specified, devm_gpiod_get_optional returns NULL.
+	 */
+	rt5677->pow_ldo2 = devm_gpiod_get_optional(&i2c->dev,
+			"realtek,pow-ldo2", GPIOD_OUT_HIGH);
+	if (IS_ERR(rt5677->pow_ldo2)) {
+		ret = PTR_ERR(rt5677->pow_ldo2);
+		dev_err(&i2c->dev, "Failed to request POW_LDO2: %d\n", ret);
+		return ret;
+	}
+	rt5677->reset_pin = devm_gpiod_get_optional(&i2c->dev,
+			"realtek,reset", GPIOD_OUT_HIGH);
+	if (IS_ERR(rt5677->reset_pin)) {
+		ret = PTR_ERR(rt5677->reset_pin);
+		dev_err(&i2c->dev, "Failed to request RESET: %d\n", ret);
+		return ret;
 	}
 
-	if (gpio_is_valid(rt5677->pow_ldo2)) {
-		ret = devm_gpio_request_one(&i2c->dev, rt5677->pow_ldo2,
-					    GPIOF_OUT_INIT_HIGH,
-					    "RT5677 POW_LDO2");
-		if (ret < 0) {
-			dev_err(&i2c->dev, "Failed to request POW_LDO2 %d: %d\n",
-				rt5677->pow_ldo2, ret);
-			return ret;
-		}
-	}
-
-	if (gpio_is_valid(rt5677->reset_pin)) {
-		ret = devm_gpio_request_one(&i2c->dev, rt5677->reset_pin,
-					    GPIOF_OUT_INIT_HIGH,
-					    "RT5677 RESET");
-		if (ret < 0) {
-			dev_err(&i2c->dev, "Failed to request RESET %d: %d\n",
-				rt5677->reset_pin, ret);
-			return ret;
-		}
-	}
-
-	if (gpio_is_valid(rt5677->pow_ldo2) ||
-	    gpio_is_valid(rt5677->reset_pin)) {
+	if (rt5677->pow_ldo2 || rt5677->reset_pin) {
 		/* Wait a while until I2C bus becomes available. The datasheet
 		 * does not specify the exact we should wait but startup
 		 * sequence mentiones at least a few milliseconds.
@@ -5209,7 +5173,7 @@
 	regmap_read(rt5677->regmap, RT5677_VENDOR_ID2, &val);
 	if (val != RT5677_DEVICE_ID) {
 		dev_err(&i2c->dev,
-			"Device with ID register %x is not rt5677\n", val);
+			"Device with ID register %#x is not rt5677\n", val);
 		return -ENODEV;
 	}
 
@@ -5273,7 +5237,6 @@
 static struct i2c_driver rt5677_i2c_driver = {
 	.driver = {
 		.name = "rt5677",
-		.owner = THIS_MODULE,
 	},
 	.probe = rt5677_i2c_probe,
 	.remove   = rt5677_i2c_remove,
diff --git a/sound/soc/codecs/rt5677.h b/sound/soc/codecs/rt5677.h
index 7eca38a..d46855a 100644
--- a/sound/soc/codecs/rt5677.h
+++ b/sound/soc/codecs/rt5677.h
@@ -14,6 +14,7 @@
 
 #include <sound/rt5677.h>
 #include <linux/gpio/driver.h>
+#include <linux/gpio/consumer.h>
 
 /* Info */
 #define RT5677_RESET				0x00
@@ -1775,8 +1776,8 @@
 	int pll_src;
 	int pll_in;
 	int pll_out;
-	int pow_ldo2; /* POW_LDO2 pin */
-	int reset_pin; /* RESET pin */
+	struct gpio_desc *pow_ldo2; /* POW_LDO2 pin */
+	struct gpio_desc *reset_pin; /* RESET pin */
 	enum rt5677_type type;
 #ifdef CONFIG_GPIOLIB
 	struct gpio_chip gpio_chip;
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index e673f6c..bfda25e 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -406,11 +406,10 @@
 static const DECLARE_TLV_DB_SCALE(capture_6db_attenuate, -600, 600, 0);
 
 /* tlv for mic gain, 0db 20db 30db 40db */
-static const unsigned int mic_gain_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(mic_gain_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
-	1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0),
-};
+	1, 3, TLV_DB_SCALE_ITEM(2000, 1000, 0)
+);
 
 /* tlv for hp volume, -51.5db to 12.0db, step .5db */
 static const DECLARE_TLV_DB_SCALE(headphone_volume, -5150, 50, 0);
@@ -1601,7 +1600,6 @@
 static struct i2c_driver sgtl5000_i2c_driver = {
 	.driver = {
 		   .name = "sgtl5000",
-		   .owner = THIS_MODULE,
 		   .of_match_table = sgtl5000_dt_ids,
 		   },
 	.probe = sgtl5000_i2c_probe,
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 3e72964..a8402d0 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -208,7 +208,7 @@
 	return err;
 }
 
-static struct snd_soc_dai_ops si476x_dai_ops = {
+static const struct snd_soc_dai_ops si476x_dai_ops = {
 	.hw_params	= si476x_codec_hw_params,
 	.set_fmt	= si476x_codec_set_dai_fmt,
 };
diff --git a/sound/soc/codecs/sirf-audio-codec.c b/sound/soc/codecs/sirf-audio-codec.c
index 29cb442..6bfd25c 100644
--- a/sound/soc/codecs/sirf-audio-codec.c
+++ b/sound/soc/codecs/sirf-audio-codec.c
@@ -370,11 +370,11 @@
 	return 0;
 }
 
-struct snd_soc_dai_ops sirf_audio_codec_dai_ops = {
+static const struct snd_soc_dai_ops sirf_audio_codec_dai_ops = {
 	.trigger = sirf_audio_codec_trigger,
 };
 
-struct snd_soc_dai_driver sirf_audio_codec_dai = {
+static struct snd_soc_dai_driver sirf_audio_codec_dai = {
 	.name = "sirf-audio-codec",
 	.playback = {
 		.stream_name = "Playback",
diff --git a/sound/soc/codecs/ssm2518.c b/sound/soc/codecs/ssm2518.c
index f30de76..ddb0203 100644
--- a/sound/soc/codecs/ssm2518.c
+++ b/sound/soc/codecs/ssm2518.c
@@ -806,6 +806,14 @@
 	return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id ssm2518_dt_ids[] = {
+	{ .compatible = "adi,ssm2518", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, ssm2518_dt_ids);
+#endif
+
 static const struct i2c_device_id ssm2518_i2c_ids[] = {
 	{ "ssm2518", 0 },
 	{ }
@@ -815,7 +823,7 @@
 static struct i2c_driver ssm2518_driver = {
 	.driver = {
 		.name = "ssm2518",
-		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(ssm2518_dt_ids),
 	},
 	.probe = ssm2518_i2c_probe,
 	.remove = ssm2518_i2c_remove,
diff --git a/sound/soc/codecs/ssm2602-i2c.c b/sound/soc/codecs/ssm2602-i2c.c
index 0d9779d..173ba85 100644
--- a/sound/soc/codecs/ssm2602-i2c.c
+++ b/sound/soc/codecs/ssm2602-i2c.c
@@ -52,7 +52,6 @@
 static struct i2c_driver ssm2602_i2c_driver = {
 	.driver = {
 		.name = "ssm2602",
-		.owner = THIS_MODULE,
 		.of_match_table = ssm2602_of_match,
 	},
 	.probe = ssm2602_i2c_probe,
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 69a773a..4452fea 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -75,11 +75,10 @@
 			ssm2602_deemph),
 };
 
-static const unsigned int ssm260x_outmix_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(ssm260x_outmix_tlv,
 	0, 47, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 0),
-	48, 127, TLV_DB_SCALE_ITEM(-7400, 100, 0),
-};
+	48, 127, TLV_DB_SCALE_ITEM(-7400, 100, 0)
+);
 
 static const DECLARE_TLV_DB_SCALE(ssm260x_inpga_tlv, -3450, 150, 0);
 static const DECLARE_TLV_DB_SCALE(ssm260x_sidetone_tlv, -1500, 300, 0);
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index 84a4f5a..e619d56 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -10,6 +10,7 @@
  * Licensed under the GPL-2.
  */
 
+#include <linux/acpi.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/i2c.h>
@@ -173,6 +174,12 @@
 	SND_SOC_DAPM_SWITCH("Amplifier Boost", SSM4567_REG_POWER_CTRL, 3, 1,
 		&ssm4567_amplifier_boost_control),
 
+	SND_SOC_DAPM_SIGGEN("Sense"),
+
+	SND_SOC_DAPM_PGA("Current Sense", SSM4567_REG_POWER_CTRL, 4, 1, NULL, 0),
+	SND_SOC_DAPM_PGA("Voltage Sense", SSM4567_REG_POWER_CTRL, 5, 1, NULL, 0),
+	SND_SOC_DAPM_PGA("VBAT Sense", SSM4567_REG_POWER_CTRL, 6, 1, NULL, 0),
+
 	SND_SOC_DAPM_OUTPUT("OUT"),
 };
 
@@ -180,6 +187,13 @@
 	{ "OUT", NULL, "Amplifier Boost" },
 	{ "Amplifier Boost", "Switch", "DAC" },
 	{ "OUT", NULL, "DAC" },
+
+	{ "Current Sense", NULL, "Sense" },
+	{ "Voltage Sense", NULL, "Sense" },
+	{ "VBAT Sense", NULL, "Sense" },
+	{ "Capture Sense", NULL, "Current Sense" },
+	{ "Capture Sense", NULL, "Voltage Sense" },
+	{ "Capture Sense", NULL, "VBAT Sense" },
 };
 
 static int ssm4567_hw_params(struct snd_pcm_substream *substream,
@@ -387,6 +401,14 @@
 		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
 			SNDRV_PCM_FMTBIT_S32,
 	},
+	.capture = {
+		.stream_name = "Capture Sense",
+		.channels_min = 1,
+		.channels_max = 1,
+		.rates = SNDRV_PCM_RATE_8000_192000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+			SNDRV_PCM_FMTBIT_S32,
+	},
 	.ops = &ssm4567_dai_ops,
 };
 
@@ -456,10 +478,20 @@
 };
 MODULE_DEVICE_TABLE(i2c, ssm4567_i2c_ids);
 
+#ifdef CONFIG_ACPI
+
+static const struct acpi_device_id ssm4567_acpi_match[] = {
+	{ "INT343B", 0 },
+	{},
+};
+MODULE_DEVICE_TABLE(acpi, ssm4567_acpi_match);
+
+#endif
+
 static struct i2c_driver ssm4567_driver = {
 	.driver = {
 		.name = "ssm4567",
-		.owner = THIS_MODULE,
+		.acpi_match_table = ACPI_PTR(ssm4567_acpi_match),
 	},
 	.probe = ssm4567_i2c_probe,
 	.remove = ssm4567_i2c_remove,
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index 60eff36..a9844b2 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -1144,7 +1144,6 @@
 static struct i2c_driver sta32x_i2c_driver = {
 	.driver = {
 		.name = "sta32x",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(st32x_dt_ids),
 	},
 	.probe =    sta32x_i2c_probe,
diff --git a/sound/soc/codecs/sta350.c b/sound/soc/codecs/sta350.c
index bd819a3..33a4612 100644
--- a/sound/soc/codecs/sta350.c
+++ b/sound/soc/codecs/sta350.c
@@ -1264,7 +1264,6 @@
 static struct i2c_driver sta350_i2c_driver = {
 	.driver = {
 		.name = "sta350",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(st350_dt_ids),
 	},
 	.probe =    sta350_i2c_probe,
diff --git a/sound/soc/codecs/sta529.c b/sound/soc/codecs/sta529.c
index 4f70378..2cdaca9 100644
--- a/sound/soc/codecs/sta529.c
+++ b/sound/soc/codecs/sta529.c
@@ -339,9 +339,6 @@
 	struct sta529 *sta529;
 	int ret;
 
-	if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-		return -EINVAL;
-
 	sta529 = devm_kzalloc(&i2c->dev, sizeof(struct sta529), GFP_KERNEL);
 	if (!sta529)
 		return -ENOMEM;
@@ -379,7 +376,6 @@
 static struct i2c_driver sta529_i2c_driver = {
 	.driver = {
 		.name = "sta529",
-		.owner = THIS_MODULE,
 	},
 	.probe		= sta529_i2c_probe,
 	.remove		= sta529_i2c_remove,
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index ed4cca7..0945c51 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -28,6 +28,9 @@
 
 #include "stac9766.h"
 
+#define STAC9766_VENDOR_ID 0x83847666
+#define STAC9766_VENDOR_ID_MASK 0xffffffff
+
 /*
  * STAC9766 register cache
  */
@@ -239,45 +242,12 @@
 	return 0;
 }
 
-static int stac9766_reset(struct snd_soc_codec *codec, int try_warm)
-{
-	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-
-	if (try_warm && soc_ac97_ops->warm_reset) {
-		soc_ac97_ops->warm_reset(ac97);
-		if (stac9766_ac97_read(codec, 0) == stac9766_reg[0])
-			return 1;
-	}
-
-	soc_ac97_ops->reset(ac97);
-	if (soc_ac97_ops->warm_reset)
-		soc_ac97_ops->warm_reset(ac97);
-	if (stac9766_ac97_read(codec, 0) != stac9766_reg[0])
-		return -EIO;
-	return 0;
-}
-
 static int stac9766_codec_resume(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-	u16 id, reset;
 
-	reset = 0;
-	/* give the codec an AC97 warm reset to start the link */
-reset:
-	if (reset > 5) {
-		dev_err(codec->dev, "Failed to resume\n");
-		return -EIO;
-	}
-	ac97->bus->ops->warm_reset(ac97);
-	id = soc_ac97_ops->read(ac97, AC97_VENDOR_ID2);
-	if (id != 0x4c13) {
-		stac9766_reset(codec, 0);
-		reset++;
-		goto reset;
-	}
-
-	return 0;
+	return snd_ac97_reset(ac97, true, STAC9766_VENDOR_ID,
+		STAC9766_VENDOR_ID_MASK);
 }
 
 static const struct snd_soc_dai_ops stac9766_dai_ops_analog = {
@@ -330,28 +300,15 @@
 static int stac9766_codec_probe(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97;
-	int ret = 0;
 
-	ac97 = snd_soc_new_ac97_codec(codec);
+	ac97 = snd_soc_new_ac97_codec(codec, STAC9766_VENDOR_ID,
+			STAC9766_VENDOR_ID_MASK);
 	if (IS_ERR(ac97))
 		return PTR_ERR(ac97);
 
 	snd_soc_codec_set_drvdata(codec, ac97);
 
-	/* do a cold reset for the controller and then try
-	 * a warm reset followed by an optional cold reset for codec */
-	stac9766_reset(codec, 0);
-	ret = stac9766_reset(codec, 1);
-	if (ret < 0) {
-		dev_err(codec->dev, "Failed to reset: AC97 link error\n");
-		goto codec_err;
-	}
-
 	return 0;
-
-codec_err:
-	snd_soc_free_ac97_codec(ac97);
-	return ret;
 }
 
 static int stac9766_codec_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c
new file mode 100644
index 0000000..160d61a
--- /dev/null
+++ b/sound/soc/codecs/sti-sas.c
@@ -0,0 +1,628 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/mfd/syscon.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+/* chipID supported */
+#define CHIPID_STIH416 0
+#define CHIPID_STIH407 1
+
+/* DAC definitions */
+
+/* stih416 DAC registers */
+/* sysconf 2517: Audio-DAC-Control */
+#define STIH416_AUDIO_DAC_CTRL 0x00000814
+/* sysconf 2519: Audio-Gue-Control */
+#define STIH416_AUDIO_GLUE_CTRL 0x0000081C
+
+#define STIH416_DAC_NOT_STANDBY	0x3
+#define STIH416_DAC_SOFTMUTE	0x4
+#define STIH416_DAC_ANA_NOT_PWR	0x5
+#define STIH416_DAC_NOT_PNDBG	0x6
+
+#define STIH416_DAC_NOT_STANDBY_MASK	BIT(STIH416_DAC_NOT_STANDBY)
+#define STIH416_DAC_SOFTMUTE_MASK	BIT(STIH416_DAC_SOFTMUTE)
+#define STIH416_DAC_ANA_NOT_PWR_MASK	BIT(STIH416_DAC_ANA_NOT_PWR)
+#define STIH416_DAC_NOT_PNDBG_MASK	BIT(STIH416_DAC_NOT_PNDBG)
+
+/* stih407 DAC registers */
+/* sysconf 5041: Audio-Gue-Control */
+#define STIH407_AUDIO_GLUE_CTRL 0x000000A4
+/* sysconf 5042: Audio-DAC-Control */
+#define STIH407_AUDIO_DAC_CTRL 0x000000A8
+
+/* DAC definitions */
+#define STIH407_DAC_SOFTMUTE		0x0
+#define STIH407_DAC_STANDBY_ANA		0x1
+#define STIH407_DAC_STANDBY		0x2
+
+#define STIH407_DAC_SOFTMUTE_MASK	BIT(STIH407_DAC_SOFTMUTE)
+#define STIH407_DAC_STANDBY_ANA_MASK    BIT(STIH407_DAC_STANDBY_ANA)
+#define STIH407_DAC_STANDBY_MASK        BIT(STIH407_DAC_STANDBY)
+
+/* SPDIF definitions */
+#define SPDIF_BIPHASE_ENABLE		0x6
+#define SPDIF_BIPHASE_IDLE		0x7
+
+#define SPDIF_BIPHASE_ENABLE_MASK	BIT(SPDIF_BIPHASE_ENABLE)
+#define SPDIF_BIPHASE_IDLE_MASK		BIT(SPDIF_BIPHASE_IDLE)
+
+enum {
+	STI_SAS_DAI_SPDIF_OUT,
+	STI_SAS_DAI_ANALOG_OUT,
+};
+
+static const struct reg_default stih416_sas_reg_defaults[] = {
+	{ STIH407_AUDIO_GLUE_CTRL, 0x00000040 },
+	{ STIH407_AUDIO_DAC_CTRL, 0x000000000 },
+};
+
+static const struct reg_default stih407_sas_reg_defaults[] = {
+	{ STIH416_AUDIO_DAC_CTRL, 0x000000000 },
+	{ STIH416_AUDIO_GLUE_CTRL, 0x00000040 },
+};
+
+struct sti_dac_audio {
+	struct regmap *regmap;
+	struct regmap *virt_regmap;
+	struct regmap_field  **field;
+	struct reset_control *rst;
+	int mclk;
+};
+
+struct sti_spdif_audio {
+	struct regmap *regmap;
+	struct regmap_field  **field;
+	int mclk;
+};
+
+/* device data structure */
+struct sti_sas_dev_data {
+	const int chipid; /* IC version */
+	const struct regmap_config *regmap;
+	const struct snd_soc_dai_ops *dac_ops;  /* DAC function callbacks */
+	const struct snd_soc_dapm_widget *dapm_widgets; /* dapms declaration */
+	const int num_dapm_widgets; /* dapms declaration */
+	const struct snd_soc_dapm_route *dapm_routes; /* route declaration */
+	const int num_dapm_routes; /* route declaration */
+};
+
+/* driver data structure */
+struct sti_sas_data {
+	struct device *dev;
+	const struct sti_sas_dev_data *dev_data;
+	struct sti_dac_audio dac;
+	struct sti_spdif_audio spdif;
+};
+
+/* Read a register from the sysconf reg bank */
+static int sti_sas_read_reg(void *context, unsigned int reg,
+			    unsigned int *value)
+{
+	struct sti_sas_data *drvdata = context;
+	int status;
+	u32 val;
+
+	status = regmap_read(drvdata->dac.regmap, reg, &val);
+	*value = (unsigned int)val;
+
+	return status;
+}
+
+/* Read a register from the sysconf reg bank */
+static int sti_sas_write_reg(void *context, unsigned int reg,
+			     unsigned int value)
+{
+	struct sti_sas_data *drvdata = context;
+	int status;
+
+	status = regmap_write(drvdata->dac.regmap, reg, value);
+
+	return status;
+}
+
+static int  sti_sas_init_sas_registers(struct snd_soc_codec *codec,
+				       struct sti_sas_data *data)
+{
+	int ret;
+	/*
+	 * DAC and SPDIF are activated by default
+	 * put them in IDLE to save power
+	 */
+
+	/* Initialise bi-phase formatter to disabled */
+	ret = snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
+				  SPDIF_BIPHASE_ENABLE_MASK, 0);
+
+	if (!ret)
+		/* Initialise bi-phase formatter idle value to 0 */
+		ret = snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
+					  SPDIF_BIPHASE_IDLE_MASK, 0);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to update SPDIF registers");
+		return ret;
+	}
+
+	/* Init DAC configuration */
+	switch (data->dev_data->chipid) {
+	case CHIPID_STIH407:
+		/* init configuration */
+		ret =  snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+					   STIH407_DAC_STANDBY_MASK,
+					   STIH407_DAC_STANDBY_MASK);
+
+		if (!ret)
+			ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+						  STIH407_DAC_STANDBY_ANA_MASK,
+						  STIH407_DAC_STANDBY_ANA_MASK);
+		if (!ret)
+			ret = snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+						  STIH407_DAC_SOFTMUTE_MASK,
+						  STIH407_DAC_SOFTMUTE_MASK);
+		break;
+	case CHIPID_STIH416:
+		ret =  snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
+					   STIH416_DAC_NOT_STANDBY_MASK, 0);
+		if (!ret)
+			ret =  snd_soc_update_bits(codec,
+						   STIH416_AUDIO_DAC_CTRL,
+						   STIH416_DAC_ANA_NOT_PWR, 0);
+		if (!ret)
+			ret =  snd_soc_update_bits(codec,
+						   STIH416_AUDIO_DAC_CTRL,
+						   STIH416_DAC_NOT_PNDBG_MASK,
+						   0);
+		if (!ret)
+			ret =  snd_soc_update_bits(codec,
+						   STIH416_AUDIO_DAC_CTRL,
+						   STIH416_DAC_SOFTMUTE_MASK,
+						   STIH416_DAC_SOFTMUTE_MASK);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to update DAC registers");
+		return ret;
+	}
+
+	return ret;
+}
+
+/*
+ * DAC
+ */
+static int sti_sas_dac_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	/* Sanity check only */
+	if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
+		dev_err(dai->codec->dev,
+			"%s: ERROR: Unsupporter master mask 0x%x\n",
+			__func__, fmt & SND_SOC_DAIFMT_MASTER_MASK);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int stih416_dac_probe(struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+	struct sti_dac_audio *dac = &drvdata->dac;
+
+	/* Get reset control */
+	dac->rst = devm_reset_control_get(codec->dev, "dac_rst");
+	if (IS_ERR(dac->rst)) {
+		dev_err(dai->codec->dev,
+			"%s: ERROR: DAC reset control not defined !\n",
+			__func__);
+		dac->rst = NULL;
+		return -EFAULT;
+	}
+	/* Put the DAC into reset */
+	reset_control_assert(dac->rst);
+
+	return 0;
+}
+
+static const struct snd_soc_dapm_widget stih416_sas_dapm_widgets[] = {
+	SND_SOC_DAPM_PGA("DAC bandgap", STIH416_AUDIO_DAC_CTRL,
+			 STIH416_DAC_NOT_PNDBG_MASK, 0, NULL, 0),
+	SND_SOC_DAPM_OUT_DRV("DAC standby ana", STIH416_AUDIO_DAC_CTRL,
+			     STIH416_DAC_ANA_NOT_PWR, 0, NULL, 0),
+	SND_SOC_DAPM_DAC("DAC standby",  "dac_p", STIH416_AUDIO_DAC_CTRL,
+			 STIH416_DAC_NOT_STANDBY, 0),
+	SND_SOC_DAPM_OUTPUT("DAC Output"),
+};
+
+static const struct snd_soc_dapm_widget stih407_sas_dapm_widgets[] = {
+	SND_SOC_DAPM_OUT_DRV("DAC standby ana", STIH407_AUDIO_DAC_CTRL,
+			     STIH407_DAC_STANDBY_ANA, 1, NULL, 0),
+	SND_SOC_DAPM_DAC("DAC standby",  "dac_p", STIH407_AUDIO_DAC_CTRL,
+			 STIH407_DAC_STANDBY, 1),
+	SND_SOC_DAPM_OUTPUT("DAC Output"),
+};
+
+static const struct snd_soc_dapm_route stih416_sas_route[] = {
+	{"DAC Output", NULL, "DAC bandgap"},
+	{"DAC Output", NULL, "DAC standby ana"},
+	{"DAC standby ana", NULL, "DAC standby"},
+};
+
+static const struct snd_soc_dapm_route stih407_sas_route[] = {
+	{"DAC Output", NULL, "DAC standby ana"},
+	{"DAC standby ana", NULL, "DAC standby"},
+};
+
+static int stih416_sas_dac_mute(struct snd_soc_dai *dai, int mute, int stream)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	if (mute) {
+		return snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
+					    STIH416_DAC_SOFTMUTE_MASK,
+					    STIH416_DAC_SOFTMUTE_MASK);
+	} else {
+		return snd_soc_update_bits(codec, STIH416_AUDIO_DAC_CTRL,
+					    STIH416_DAC_SOFTMUTE_MASK, 0);
+	}
+}
+
+static int stih407_sas_dac_mute(struct snd_soc_dai *dai, int mute, int stream)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	if (mute) {
+		return snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+					    STIH407_DAC_SOFTMUTE_MASK,
+					    STIH407_DAC_SOFTMUTE_MASK);
+	} else {
+		return snd_soc_update_bits(codec, STIH407_AUDIO_DAC_CTRL,
+					    STIH407_DAC_SOFTMUTE_MASK,
+					    0);
+	}
+}
+
+/*
+ * SPDIF
+ */
+static int sti_sas_spdif_set_fmt(struct snd_soc_dai *dai,
+				 unsigned int fmt)
+{
+	if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
+		dev_err(dai->codec->dev,
+			"%s: ERROR: Unsupporter master mask 0x%x\n",
+			__func__, fmt & SND_SOC_DAIFMT_MASTER_MASK);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * sti_sas_spdif_trigger:
+ * Trigger function is used to ensure that BiPhase Formater is disabled
+ * before CPU dai is stopped.
+ * This is mandatory to avoid that BPF is stalled
+ */
+static int sti_sas_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
+				 struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		return snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
+					    SPDIF_BIPHASE_ENABLE_MASK,
+					    SPDIF_BIPHASE_ENABLE_MASK);
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		return snd_soc_update_bits(codec, STIH407_AUDIO_GLUE_CTRL,
+					    SPDIF_BIPHASE_ENABLE_MASK,
+					    0);
+	default:
+		return -EINVAL;
+	}
+}
+
+static bool sti_sas_volatile_register(struct device *dev, unsigned int reg)
+{
+	if (reg == STIH407_AUDIO_GLUE_CTRL)
+		return true;
+
+	return false;
+}
+
+/*
+ * CODEC DAIS
+ */
+
+/*
+ * sti_sas_set_sysclk:
+ * get MCLK input frequency to check that MCLK-FS ratio is coherent
+ */
+static int sti_sas_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+			      unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+
+	if (dir == SND_SOC_CLOCK_OUT)
+		return 0;
+
+	if (clk_id != 0)
+		return -EINVAL;
+
+	switch (dai->id) {
+	case STI_SAS_DAI_SPDIF_OUT:
+		drvdata->spdif.mclk = freq;
+		break;
+
+	case STI_SAS_DAI_ANALOG_OUT:
+		drvdata->dac.mclk = freq;
+		break;
+	}
+
+	return 0;
+}
+
+static int sti_sas_prepare(struct snd_pcm_substream *substream,
+			   struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	switch (dai->id) {
+	case STI_SAS_DAI_SPDIF_OUT:
+		if ((drvdata->spdif.mclk / runtime->rate) != 128) {
+			dev_err(codec->dev, "unexpected mclk-fs ratio");
+			return -EINVAL;
+		}
+		break;
+	case STI_SAS_DAI_ANALOG_OUT:
+		if ((drvdata->dac.mclk / runtime->rate) != 256) {
+			dev_err(codec->dev, "unexpected mclk-fs ratio");
+			return -EINVAL;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops stih416_dac_ops = {
+	.set_fmt = sti_sas_dac_set_fmt,
+	.mute_stream = stih416_sas_dac_mute,
+	.prepare = sti_sas_prepare,
+	.set_sysclk = sti_sas_set_sysclk,
+};
+
+static const struct snd_soc_dai_ops stih407_dac_ops = {
+	.set_fmt = sti_sas_dac_set_fmt,
+	.mute_stream = stih407_sas_dac_mute,
+	.prepare = sti_sas_prepare,
+	.set_sysclk = sti_sas_set_sysclk,
+};
+
+static const struct regmap_config stih407_sas_regmap = {
+	.reg_bits = 32,
+	.val_bits = 32,
+
+	.max_register = STIH407_AUDIO_DAC_CTRL,
+	.reg_defaults = stih407_sas_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(stih407_sas_reg_defaults),
+	.volatile_reg = sti_sas_volatile_register,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_read = sti_sas_read_reg,
+	.reg_write = sti_sas_write_reg,
+};
+
+static const struct regmap_config stih416_sas_regmap = {
+	.reg_bits = 32,
+	.val_bits = 32,
+
+	.max_register = STIH416_AUDIO_DAC_CTRL,
+	.reg_defaults = stih416_sas_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(stih416_sas_reg_defaults),
+	.volatile_reg = sti_sas_volatile_register,
+	.cache_type = REGCACHE_RBTREE,
+	.reg_read = sti_sas_read_reg,
+	.reg_write = sti_sas_write_reg,
+};
+
+static const struct sti_sas_dev_data stih416_data = {
+	.chipid = CHIPID_STIH416,
+	.regmap = &stih416_sas_regmap,
+	.dac_ops = &stih416_dac_ops,
+	.dapm_widgets = stih416_sas_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(stih416_sas_dapm_widgets),
+	.dapm_routes =	stih416_sas_route,
+	.num_dapm_routes = ARRAY_SIZE(stih416_sas_route),
+};
+
+static const struct sti_sas_dev_data stih407_data = {
+	.chipid = CHIPID_STIH407,
+	.regmap = &stih407_sas_regmap,
+	.dac_ops = &stih407_dac_ops,
+	.dapm_widgets = stih407_sas_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(stih407_sas_dapm_widgets),
+	.dapm_routes =	stih407_sas_route,
+	.num_dapm_routes = ARRAY_SIZE(stih407_sas_route),
+};
+
+static struct snd_soc_dai_driver sti_sas_dai[] = {
+	{
+		.name = "sas-dai-spdif-out",
+		.id = STI_SAS_DAI_SPDIF_OUT,
+		.playback = {
+			.stream_name = "spdif_p",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
+				 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 |
+				 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |
+				 SNDRV_PCM_RATE_192000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+		.ops = (struct snd_soc_dai_ops[]) {
+			{
+				.set_fmt = sti_sas_spdif_set_fmt,
+				.trigger = sti_sas_spdif_trigger,
+				.set_sysclk = sti_sas_set_sysclk,
+				.prepare = sti_sas_prepare,
+			}
+		},
+	},
+	{
+		.name = "sas-dai-dac",
+		.id = STI_SAS_DAI_ANALOG_OUT,
+		.playback = {
+			.stream_name = "dac_p",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = SNDRV_PCM_FMTBIT_S16_LE |
+				   SNDRV_PCM_FMTBIT_S32_LE,
+		},
+	},
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int sti_sas_resume(struct snd_soc_codec *codec)
+{
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+
+	return sti_sas_init_sas_registers(codec, drvdata);
+}
+#else
+#define sti_sas_resume NULL
+#endif
+
+static int sti_sas_codec_probe(struct snd_soc_codec *codec)
+{
+	struct sti_sas_data *drvdata = dev_get_drvdata(codec->dev);
+	int ret;
+
+	ret = sti_sas_init_sas_registers(codec, drvdata);
+
+	return ret;
+}
+
+static struct snd_soc_codec_driver sti_sas_driver = {
+	.probe = sti_sas_codec_probe,
+	.resume = sti_sas_resume,
+};
+
+static const struct of_device_id sti_sas_dev_match[] = {
+	{
+		.compatible = "st,stih416-sas-codec",
+		.data = &stih416_data,
+	},
+	{
+		.compatible = "st,stih407-sas-codec",
+		.data = &stih407_data,
+	},
+	{},
+};
+
+static int sti_sas_driver_probe(struct platform_device *pdev)
+{
+	struct device_node *pnode = pdev->dev.of_node;
+	struct sti_sas_data *drvdata;
+	const struct of_device_id *of_id;
+
+	/* Allocate device structure */
+	drvdata = devm_kzalloc(&pdev->dev, sizeof(struct sti_sas_data),
+			       GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+
+	/* Populate data structure depending on compatibility */
+	of_id = of_match_node(sti_sas_dev_match, pnode);
+	if (!of_id->data) {
+		dev_err(&pdev->dev, "data associated to device is missing");
+		return -EINVAL;
+	}
+
+	drvdata->dev_data = (struct sti_sas_dev_data *)of_id->data;
+
+	/* Initialise device structure */
+	drvdata->dev = &pdev->dev;
+
+	/* Request the DAC & SPDIF registers memory region */
+	drvdata->dac.virt_regmap = devm_regmap_init(&pdev->dev, NULL, drvdata,
+						    drvdata->dev_data->regmap);
+	if (IS_ERR(drvdata->dac.virt_regmap)) {
+		dev_err(&pdev->dev, "audio registers not enabled\n");
+		return PTR_ERR(drvdata->dac.virt_regmap);
+	}
+
+	/* Request the syscon region */
+	drvdata->dac.regmap =
+		syscon_regmap_lookup_by_phandle(pnode, "st,syscfg");
+	if (IS_ERR(drvdata->dac.regmap)) {
+		dev_err(&pdev->dev, "syscon registers not available\n");
+		return PTR_ERR(drvdata->dac.regmap);
+	}
+	drvdata->spdif.regmap = drvdata->dac.regmap;
+
+	/* Set DAC dai probe */
+	if (drvdata->dev_data->chipid == CHIPID_STIH416)
+		sti_sas_dai[STI_SAS_DAI_ANALOG_OUT].probe = stih416_dac_probe;
+
+	sti_sas_dai[STI_SAS_DAI_ANALOG_OUT].ops = drvdata->dev_data->dac_ops;
+
+	/* Set dapms*/
+	sti_sas_driver.dapm_widgets = drvdata->dev_data->dapm_widgets;
+	sti_sas_driver.num_dapm_widgets = drvdata->dev_data->num_dapm_widgets;
+
+	sti_sas_driver.dapm_routes = drvdata->dev_data->dapm_routes;
+	sti_sas_driver.num_dapm_routes = drvdata->dev_data->num_dapm_routes;
+
+	/* Store context */
+	dev_set_drvdata(&pdev->dev, drvdata);
+
+	return snd_soc_register_codec(&pdev->dev, &sti_sas_driver,
+					sti_sas_dai,
+					ARRAY_SIZE(sti_sas_dai));
+}
+
+static int sti_sas_driver_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+
+	return 0;
+}
+
+static struct platform_driver sti_sas_platform_driver = {
+	.driver = {
+		.name = "sti-sas-codec",
+		.of_match_table = sti_sas_dev_match,
+	},
+	.probe = sti_sas_driver_probe,
+	.remove = sti_sas_driver_remove,
+};
+
+module_platform_driver(sti_sas_platform_driver);
+
+MODULE_DESCRIPTION("audio codec for STMicroelectronics sti platforms");
+MODULE_AUTHOR("Arnaud.pouliquen@st.com");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index 4f25a7d..e3a0bca 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -38,7 +38,7 @@
 
 #include "tas2552.h"
 
-static struct reg_default tas2552_reg_defs[] = {
+static const struct reg_default tas2552_reg_defs[] = {
 	{TAS2552_CFG_1, 0x22},
 	{TAS2552_CFG_3, 0x80},
 	{TAS2552_DOUT, 0x00},
@@ -493,8 +493,7 @@
 	regcache_cache_only(tas2552->regmap, true);
 	regcache_mark_dirty(tas2552->regmap);
 
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 0);
+	gpiod_set_value(tas2552->enable_gpio, 0);
 
 	return 0;
 }
@@ -503,8 +502,7 @@
 {
 	struct tas2552_data *tas2552 = dev_get_drvdata(dev);
 
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 1);
+	gpiod_set_value(tas2552->enable_gpio, 1);
 
 	tas2552_sw_shutdown(tas2552, 0);
 
@@ -520,7 +518,7 @@
 			   NULL)
 };
 
-static struct snd_soc_dai_ops tas2552_speaker_dai_ops = {
+static const struct snd_soc_dai_ops tas2552_speaker_dai_ops = {
 	.hw_params	= tas2552_hw_params,
 	.prepare	= tas2552_prepare,
 	.set_sysclk	= tas2552_set_dai_sysclk,
@@ -585,8 +583,7 @@
 		return ret;
 	}
 
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 1);
+	gpiod_set_value(tas2552->enable_gpio, 1);
 
 	ret = pm_runtime_get_sync(codec->dev);
 	if (ret < 0) {
@@ -610,8 +607,7 @@
 	return 0;
 
 probe_fail:
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 0);
+	gpiod_set_value(tas2552->enable_gpio, 0);
 
 	regulator_bulk_disable(ARRAY_SIZE(tas2552->supplies),
 					tas2552->supplies);
@@ -624,8 +620,7 @@
 
 	pm_runtime_put(codec->dev);
 
-	if (tas2552->enable_gpio)
-		gpiod_set_value(tas2552->enable_gpio, 0);
+	gpiod_set_value(tas2552->enable_gpio, 0);
 
 	return 0;
 };
@@ -769,7 +764,6 @@
 static struct i2c_driver tas2552_i2c_driver = {
 	.driver = {
 		.name = "tas2552",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(tas2552_of_match),
 		.pm = &tas2552_pm,
 	},
diff --git a/sound/soc/codecs/tas2552.h b/sound/soc/codecs/tas2552.h
index 5746f8f..e34752b 100644
--- a/sound/soc/codecs/tas2552.h
+++ b/sound/soc/codecs/tas2552.h
@@ -42,7 +42,7 @@
 #define TAS2552_BOOST_APT_CTRL		0x14
 #define TAS2552_VER_NUM			0x16
 #define TAS2552_VBAT_DATA		0x19
-#define TAS2552_MAX_REG			0x20
+#define TAS2552_MAX_REG			TAS2552_VBAT_DATA
 
 /* CFG1 Register Masks */
 #define TAS2552_DEV_RESET		(1 << 0)
diff --git a/sound/soc/codecs/tas5086.c b/sound/soc/codecs/tas5086.c
index 32942be..d49d25d 100644
--- a/sound/soc/codecs/tas5086.c
+++ b/sound/soc/codecs/tas5086.c
@@ -266,10 +266,14 @@
 	struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
 	int i, val = 0;
 
-	if (priv->deemph)
-		for (i = 0; i < ARRAY_SIZE(tas5086_deemph); i++)
-			if (tas5086_deemph[i] == priv->rate)
+	if (priv->deemph) {
+		for (i = 0; i < ARRAY_SIZE(tas5086_deemph); i++) {
+			if (tas5086_deemph[i] == priv->rate) {
 				val = i;
+				break;
+			}
+		}
+	}
 
 	return regmap_update_bits(priv->regmap, TAS5086_SYS_CONTROL_1,
 				  TAS5086_DEEMPH_MASK, val);
@@ -994,7 +998,6 @@
 static struct i2c_driver tas5086_i2c_driver = {
 	.driver = {
 		.name	= "tas5086",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tas5086_dt_ids),
 	},
 	.id_table	= tas5086_i2c_id,
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index 85bcc37..39307ad 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -179,7 +179,7 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+		if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
 			if (!IS_ERR(priv->mclk)) {
 				ret = clk_prepare_enable(priv->mclk);
 				if (ret) {
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c
index aab0af6..cb5310d 100644
--- a/sound/soc/codecs/tfa9879.c
+++ b/sound/soc/codecs/tfa9879.c
@@ -160,7 +160,7 @@
 	return 0;
 }
 
-static struct reg_default tfa9879_regs[] = {
+static const struct reg_default tfa9879_regs[] = {
 	{ TFA9879_DEVICE_CONTROL,	0x0000 }, /* 0x00 */
 	{ TFA9879_SERIAL_INTERFACE_1,	0x0a18 }, /* 0x01 */
 	{ TFA9879_PCM_IOM2_FORMAT_1,	0x0007 }, /* 0x02 */
@@ -314,7 +314,6 @@
 static struct i2c_driver tfa9879_i2c_driver = {
 	.driver = {
 		.name = "tfa9879",
-		.owner = THIS_MODULE,
 	},
 	.probe = tfa9879_i2c_probe,
 	.remove = tfa9879_i2c_remove,
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index c4c960f..ee4def4 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1121,7 +1121,7 @@
 	.num_dapm_routes	= ARRAY_SIZE(aic31xx_audio_map),
 };
 
-static struct snd_soc_dai_ops aic31xx_dai_ops = {
+static const struct snd_soc_dai_ops aic31xx_dai_ops = {
 	.hw_params	= aic31xx_hw_params,
 	.set_sysclk	= aic31xx_set_dai_sysclk,
 	.set_fmt	= aic31xx_set_dai_fmt,
@@ -1283,7 +1283,6 @@
 static struct i2c_driver aic31xx_i2c_driver = {
 	.driver = {
 		.name	= "tlv320aic31xx-codec",
-		.owner	= THIS_MODULE,
 		.of_match_table = of_match_ptr(tlv320aic31xx_of_match),
 	},
 	.probe		= aic31xx_i2c_probe,
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index ad6cb90..f2d3191 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -871,7 +871,6 @@
 static struct i2c_driver aic32x4_i2c_driver = {
 	.driver = {
 		.name = "tlv320aic32x4",
-		.owner = THIS_MODULE,
 		.of_match_table = aic32x4_of_id,
 	},
 	.probe =    aic32x4_i2c_probe,
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index a7cf19b..1a82b19 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1668,7 +1668,7 @@
 };
 MODULE_DEVICE_TABLE(i2c, aic3x_i2c_id);
 
-static const struct reg_default aic3007_class_d[] = {
+static const struct reg_sequence aic3007_class_d[] = {
 	/* Class-D speaker driver init; datasheet p. 46 */
 	{ AIC3X_PAGE_SELECT, 0x0D },
 	{ 0xD, 0x0D },
@@ -1825,7 +1825,6 @@
 static struct i2c_driver aic3x_i2c_driver = {
 	.driver = {
 		.name = "tlv320aic3x-codec",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(tlv320aic3x_of_match),
 	},
 	.probe	= aic3x_i2c_probe,
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index d67a311..781398fb 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -1585,7 +1585,6 @@
 static struct i2c_driver tlv320dac33_i2c_driver = {
 	.driver = {
 		.name = "tlv320dac33-codec",
-		.owner = THIS_MODULE,
 	},
 	.probe		= dac33_i2c_probe,
 	.remove		= dac33_i2c_remove,
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index 6fac9e0..11d85c5 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -259,8 +259,7 @@
  * TPA6130 volume. From -59.5 to 4 dB with increasing step size when going
  * down in gain.
  */
-static const unsigned int tpa6130_tlv[] = {
-	TLV_DB_RANGE_HEAD(10),
+static const DECLARE_TLV_DB_RANGE(tpa6130_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-5950, 600, 0),
 	2, 3, TLV_DB_SCALE_ITEM(-5000, 250, 0),
 	4, 5, TLV_DB_SCALE_ITEM(-4550, 160, 0),
@@ -270,8 +269,8 @@
 	12, 13, TLV_DB_SCALE_ITEM(-3040, 180, 0),
 	14, 20, TLV_DB_SCALE_ITEM(-2710, 110, 0),
 	21, 37, TLV_DB_SCALE_ITEM(-1960, 74, 0),
-	38, 63, TLV_DB_SCALE_ITEM(-720, 45, 0),
-};
+	38, 63, TLV_DB_SCALE_ITEM(-720, 45, 0)
+);
 
 static const struct snd_kcontrol_new tpa6130a2_controls[] = {
 	SOC_SINGLE_EXT_TLV("TPA6130A2 Headphone Playback Volume",
@@ -280,12 +279,11 @@
 		       tpa6130_tlv),
 };
 
-static const unsigned int tpa6140_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(tpa6140_tlv,
 	0, 8, TLV_DB_SCALE_ITEM(-5900, 400, 0),
 	9, 16, TLV_DB_SCALE_ITEM(-2500, 200, 0),
-	17, 31, TLV_DB_SCALE_ITEM(-1000, 100, 0),
-};
+	17, 31, TLV_DB_SCALE_ITEM(-1000, 100, 0)
+);
 
 static const struct snd_kcontrol_new tpa6140a2_controls[] = {
 	SOC_SINGLE_EXT_TLV("TPA6140A2 Headphone Playback Volume",
@@ -488,7 +486,6 @@
 static struct i2c_driver tpa6130a2_i2c_driver = {
 	.driver = {
 		.name = "tpa6130a2",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(tpa6130a2_of_match),
 	},
 	.probe = tpa6130a2_probe,
diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c
index 12232d7..4356843 100644
--- a/sound/soc/codecs/ts3a227e.c
+++ b/sound/soc/codecs/ts3a227e.c
@@ -23,11 +23,13 @@
 #include "ts3a227e.h"
 
 struct ts3a227e {
+	struct device *dev;
 	struct regmap *regmap;
 	struct snd_soc_jack *jack;
 	bool plugged;
 	bool mic_present;
 	unsigned int buttons_held;
+	int irq;
 };
 
 /* Button values to be reported on the jack */
@@ -189,16 +191,28 @@
 	struct ts3a227e *ts3a227e = (struct ts3a227e *)data;
 	struct regmap *regmap = ts3a227e->regmap;
 	unsigned int int_reg, kp_int_reg, acc_reg, i;
+	struct device *dev = ts3a227e->dev;
+	int ret;
 
 	/* Check for plug/unplug. */
-	regmap_read(regmap, TS3A227E_REG_INTERRUPT, &int_reg);
+	ret = regmap_read(regmap, TS3A227E_REG_INTERRUPT, &int_reg);
+	if (ret) {
+		dev_err(dev, "failed to clear interrupt ret=%d\n", ret);
+		return IRQ_NONE;
+	}
+
 	if (int_reg & (DETECTION_COMPLETE_EVENT | INS_REM_EVENT)) {
 		regmap_read(regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg);
 		ts3a227e_new_jack_state(ts3a227e, acc_reg);
 	}
 
 	/* Report any key events. */
-	regmap_read(regmap, TS3A227E_REG_KP_INTERRUPT, &kp_int_reg);
+	ret = regmap_read(regmap, TS3A227E_REG_KP_INTERRUPT, &kp_int_reg);
+	if (ret) {
+		dev_err(dev, "failed to clear key interrupt ret=%d\n", ret);
+		return IRQ_NONE;
+	}
+
 	for (i = 0; i < TS3A227E_NUM_BUTTONS; i++) {
 		if (kp_int_reg & PRESS_MASK(i))
 			ts3a227e->buttons_held |= (1 << i);
@@ -283,6 +297,8 @@
 		return -ENOMEM;
 
 	i2c_set_clientdata(i2c, ts3a227e);
+	ts3a227e->dev = dev;
+	ts3a227e->irq = i2c->irq;
 
 	ts3a227e->regmap = devm_regmap_init_i2c(i2c, &ts3a227e_regmap_config);
 	if (IS_ERR(ts3a227e->regmap))
@@ -320,6 +336,32 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int ts3a227e_suspend(struct device *dev)
+{
+	struct ts3a227e *ts3a227e = dev_get_drvdata(dev);
+
+	dev_dbg(ts3a227e->dev, "suspend disable irq\n");
+	disable_irq(ts3a227e->irq);
+
+	return 0;
+}
+
+static int ts3a227e_resume(struct device *dev)
+{
+	struct ts3a227e *ts3a227e = dev_get_drvdata(dev);
+
+	dev_dbg(ts3a227e->dev, "resume enable irq\n");
+	enable_irq(ts3a227e->irq);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops ts3a227e_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(ts3a227e_suspend, ts3a227e_resume)
+};
+
 static const struct i2c_device_id ts3a227e_i2c_ids[] = {
 	{ "ts3a227e", 0 },
 	{ }
@@ -335,7 +377,7 @@
 static struct i2c_driver ts3a227e_driver = {
 	.driver = {
 		.name = "ts3a227e",
-		.owner = THIS_MODULE,
+		.pm = &ts3a227e_pm,
 		.of_match_table = of_match_ptr(ts3a227e_of_match),
 	},
 	.probe = ts3a227e_i2c_probe,
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 90f5f04..2713e18 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -524,12 +524,11 @@
 	SOC_DAPM_SINGLE("Switch", TWL4030_REG_VDL_APGA_CTL, 2, 1, 0);
 
 /* Digital bypass gain, mute instead of -30dB */
-static const unsigned int twl4030_dapm_dbypass_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(twl4030_dapm_dbypass_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(-3000, 600, 1),
 	2, 3, TLV_DB_SCALE_ITEM(-2400, 0, 0),
-	4, 7, TLV_DB_SCALE_ITEM(-1800, 600, 0),
-};
+	4, 7, TLV_DB_SCALE_ITEM(-1800, 600, 0)
+);
 
 /* Digital bypass left (TX1L -> RX2L) */
 static const struct snd_kcontrol_new twl4030_dapm_dbypassl_control =
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 913edf2..e190263 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -37,74 +37,53 @@
 
 	struct snd_pcm_substream *master_substream;
 	struct snd_pcm_substream *slave_substream;
+
+	struct regmap *regmap;
+	struct uda134x_platform_data *pd;
 };
 
-/* In-data addresses are hard-coded into the reg-cache values */
-static const char uda134x_reg[UDA134X_REGS_NUM] = {
-	/* Extended address registers */
-	0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
-	/* Status, data regs */
-	0x00, 0x83, 0x00, 0x40, 0x80, 0xC0, 0x00,
+static const struct reg_default uda134x_reg_defaults[] = {
+	{ UDA134X_EA000, 0x04 },
+	{ UDA134X_EA001, 0x04 },
+	{ UDA134X_EA010, 0x04 },
+	{ UDA134X_EA011, 0x00 },
+	{ UDA134X_EA100, 0x00 },
+	{ UDA134X_EA101, 0x00 },
+	{ UDA134X_EA110, 0x00 },
+	{ UDA134X_EA111, 0x00 },
+	{ UDA134X_STATUS0, 0x00 },
+	{ UDA134X_STATUS1, 0x03 },
+	{ UDA134X_DATA000, 0x00 },
+	{ UDA134X_DATA001, 0x00 },
+	{ UDA134X_DATA010, 0x00 },
+	{ UDA134X_DATA011, 0x00 },
+	{ UDA134X_DATA1, 0x00 },
 };
 
 /*
- * The codec has no support for reading its registers except for peak level...
- */
-static inline unsigned int uda134x_read_reg_cache(struct snd_soc_codec *codec,
-	unsigned int reg)
-{
-	u8 *cache = codec->reg_cache;
-
-	if (reg >= UDA134X_REGS_NUM)
-		return -1;
-	return cache[reg];
-}
-
-/*
- * Write the register cache
- */
-static inline void uda134x_write_reg_cache(struct snd_soc_codec *codec,
-	u8 reg, unsigned int value)
-{
-	u8 *cache = codec->reg_cache;
-
-	if (reg >= UDA134X_REGS_NUM)
-		return;
-	cache[reg] = value;
-}
-
-/*
  * Write to the uda134x registers
  *
  */
-static int uda134x_write(struct snd_soc_codec *codec, unsigned int reg,
+static int uda134x_regmap_write(void *context, unsigned int reg,
 	unsigned int value)
 {
+	struct uda134x_platform_data *pd = context;
 	int ret;
 	u8 addr;
 	u8 data = value;
-	struct uda134x_platform_data *pd = codec->control_data;
-
-	pr_debug("%s reg: %02X, value:%02X\n", __func__, reg, value);
-
-	if (reg >= UDA134X_REGS_NUM) {
-		printk(KERN_ERR "%s unknown register: reg: %u",
-		       __func__, reg);
-		return -EINVAL;
-	}
-
-	uda134x_write_reg_cache(codec, reg, value);
 
 	switch (reg) {
 	case UDA134X_STATUS0:
 	case UDA134X_STATUS1:
 		addr = UDA134X_STATUS_ADDR;
+		data |= (reg - UDA134X_STATUS0) << 7;
 		break;
 	case UDA134X_DATA000:
 	case UDA134X_DATA001:
 	case UDA134X_DATA010:
 	case UDA134X_DATA011:
 		addr = UDA134X_DATA0_ADDR;
+		data |= (reg - UDA134X_DATA000) << 6;
 		break;
 	case UDA134X_DATA1:
 		addr = UDA134X_DATA1_ADDR;
@@ -133,27 +112,28 @@
 
 static inline void uda134x_reset(struct snd_soc_codec *codec)
 {
-	u8 reset_reg = uda134x_read_reg_cache(codec, UDA134X_STATUS0);
-	uda134x_write(codec, UDA134X_STATUS0, reset_reg | (1<<6));
+	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
+	unsigned int mask = 1<<6;
+
+	regmap_update_bits(uda134x->regmap, UDA134X_STATUS0, mask, mask);
 	msleep(1);
-	uda134x_write(codec, UDA134X_STATUS0, reset_reg & ~(1<<6));
+	regmap_update_bits(uda134x->regmap, UDA134X_STATUS0, mask, 0);
 }
 
 static int uda134x_mute(struct snd_soc_dai *dai, int mute)
 {
-	struct snd_soc_codec *codec = dai->codec;
-	u8 mute_reg = uda134x_read_reg_cache(codec, UDA134X_DATA010);
+	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(dai->codec);
+	unsigned int mask = 1<<2;
+	unsigned int val;
 
 	pr_debug("%s mute: %d\n", __func__, mute);
 
 	if (mute)
-		mute_reg |= (1<<2);
+		val = mask;
 	else
-		mute_reg &= ~(1<<2);
+		val = 0;
 
-	uda134x_write(codec, UDA134X_DATA010, mute_reg);
-
-	return 0;
+	return regmap_update_bits(uda134x->regmap, UDA134X_DATA010, mask, val);
 }
 
 static int uda134x_startup(struct snd_pcm_substream *substream,
@@ -205,7 +185,7 @@
 {
 	struct snd_soc_codec *codec = dai->codec;
 	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
-	u8 hw_params;
+	unsigned int hw_params = 0;
 
 	if (substream == uda134x->slave_substream) {
 		pr_debug("%s ignoring hw_params for slave substream\n",
@@ -213,10 +193,6 @@
 		return 0;
 	}
 
-	hw_params = uda134x_read_reg_cache(codec, UDA134X_STATUS0);
-	hw_params &= STATUS0_SYSCLK_MASK;
-	hw_params &= STATUS0_DAIFMT_MASK;
-
 	pr_debug("%s sysclk: %d, rate:%d\n", __func__,
 		 uda134x->sysclk, params_rate(params));
 
@@ -267,9 +243,8 @@
 		return -EINVAL;
 	}
 
-	uda134x_write(codec, UDA134X_STATUS0, hw_params);
-
-	return 0;
+	return regmap_update_bits(uda134x->regmap, UDA134X_STATUS0,
+		STATUS0_SYSCLK_MASK | STATUS0_DAIFMT_MASK, hw_params);
 }
 
 static int uda134x_set_dai_sysclk(struct snd_soc_dai *codec_dai,
@@ -324,10 +299,8 @@
 static int uda134x_set_bias_level(struct snd_soc_codec *codec,
 				  enum snd_soc_bias_level level)
 {
-	struct uda134x_platform_data *pd = codec->control_data;
-	int i;
-	u8 *cache = codec->reg_cache;
-
+	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
+	struct uda134x_platform_data *pd = uda134x->pd;
 	pr_debug("%s bias level %d\n", __func__, level);
 
 	switch (level) {
@@ -337,17 +310,17 @@
 		/* power on */
 		if (pd->power) {
 			pd->power(1);
-			/* Sync reg_cache with the hardware */
-			for (i = 0; i < ARRAY_SIZE(uda134x_reg); i++)
-				codec->driver->write(codec, i, *cache++);
+			regcache_sync(uda134x->regmap);
 		}
 		break;
 	case SND_SOC_BIAS_STANDBY:
 		break;
 	case SND_SOC_BIAS_OFF:
 		/* power off */
-		if (pd->power)
+		if (pd->power) {
 			pd->power(0);
+			regcache_mark_dirty(uda134x->regmap);
+		}
 		break;
 	}
 	return 0;
@@ -478,21 +451,14 @@
 static int uda134x_soc_probe(struct snd_soc_codec *codec)
 {
 	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
-	struct uda134x_priv *uda134x;
-	struct uda134x_platform_data *pd = codec->component.card->dev->platform_data;
+	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
+	struct uda134x_platform_data *pd = uda134x->pd;
 	const struct snd_soc_dapm_widget *widgets;
 	unsigned num_widgets;
-
 	int ret;
 
 	printk(KERN_INFO "UDA134X SoC Audio Codec\n");
 
-	if (!pd) {
-		printk(KERN_ERR "UDA134X SoC codec: "
-		       "missing L3 bitbang function\n");
-		return -ENODEV;
-	}
-
 	switch (pd->model) {
 	case UDA134X_UDA1340:
 	case UDA134X_UDA1341:
@@ -506,13 +472,6 @@
 		return -EINVAL;
 	}
 
-	uda134x = kzalloc(sizeof(struct uda134x_priv), GFP_KERNEL);
-	if (uda134x == NULL)
-		return -ENOMEM;
-	snd_soc_codec_set_drvdata(codec, uda134x);
-
-	codec->control_data = pd;
-
 	if (pd->power)
 		pd->power(1);
 
@@ -530,7 +489,6 @@
 	if (ret) {
 		printk(KERN_ERR "%s failed to register dapm controls: %d",
 			__func__, ret);
-		kfree(uda134x);
 		return ret;
 	}
 
@@ -551,36 +509,19 @@
 	default:
 		printk(KERN_ERR "%s unknown codec type: %d",
 			__func__, pd->model);
-		kfree(uda134x);
 		return -EINVAL;
 	}
 
 	if (ret < 0) {
 		printk(KERN_ERR "UDA134X: failed to register controls\n");
-		kfree(uda134x);
 		return ret;
 	}
 
 	return 0;
 }
 
-/* power down chip */
-static int uda134x_soc_remove(struct snd_soc_codec *codec)
-{
-	struct uda134x_priv *uda134x = snd_soc_codec_get_drvdata(codec);
-
-	kfree(uda134x);
-	return 0;
-}
-
 static struct snd_soc_codec_driver soc_codec_dev_uda134x = {
 	.probe =        uda134x_soc_probe,
-	.remove =       uda134x_soc_remove,
-	.reg_cache_size = sizeof(uda134x_reg),
-	.reg_word_size = sizeof(u8),
-	.reg_cache_default = uda134x_reg,
-	.reg_cache_step = 1,
-	.read = uda134x_read_reg_cache,
 	.set_bias_level = uda134x_set_bias_level,
 	.suspend_bias_off = true,
 
@@ -590,8 +531,39 @@
 	.num_dapm_routes = ARRAY_SIZE(uda134x_dapm_routes),
 };
 
+static const struct regmap_config uda134x_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = UDA134X_DATA1,
+	.reg_defaults = uda134x_reg_defaults,
+	.num_reg_defaults = ARRAY_SIZE(uda134x_reg_defaults),
+	.cache_type = REGCACHE_RBTREE,
+
+	.reg_write = uda134x_regmap_write,
+};
+
 static int uda134x_codec_probe(struct platform_device *pdev)
 {
+	struct uda134x_platform_data *pd = pdev->dev.platform_data;
+	struct uda134x_priv *uda134x;
+
+	if (!pd) {
+		dev_err(&pdev->dev, "Missing L3 bitbang function\n");
+		return -ENODEV;
+	}
+
+	uda134x = devm_kzalloc(&pdev->dev, sizeof(*uda134x), GFP_KERNEL);
+	if (!uda134x)
+		return -ENOMEM;
+
+	uda134x->pd = pd;
+	platform_set_drvdata(pdev, uda134x);
+
+	uda134x->regmap = devm_regmap_init(&pdev->dev, NULL, pd,
+		&uda134x_regmap_config);
+	if (IS_ERR(uda134x->regmap))
+		return PTR_ERR(uda134x->regmap);
+
 	return snd_soc_register_codec(&pdev->dev,
 			&soc_codec_dev_uda134x, &uda134x_dai, 1);
 }
diff --git a/sound/soc/codecs/uda134x.h b/sound/soc/codecs/uda134x.h
index 9faae06..e41ab38 100644
--- a/sound/soc/codecs/uda134x.h
+++ b/sound/soc/codecs/uda134x.h
@@ -26,8 +26,6 @@
 #define UDA134X_DATA011 13
 #define UDA134X_DATA1   14
 
-#define UDA134X_REGS_NUM 15
-
 #define STATUS0_DAIFMT_MASK (~(7<<1))
 #define STATUS0_SYSCLK_MASK (~(3<<4))
 
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 6e159f5..35f0469 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -269,12 +269,11 @@
  * from -66 dB in 0.5 dB steps (2 dB steps, really) and
  * from -52 dB in 0.25 dB steps
  */
-static const unsigned int mvol_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(mvol_tlv,
 	0, 15, TLV_DB_SCALE_ITEM(-8200, 100, 1),
 	16, 43, TLV_DB_SCALE_ITEM(-6600, 50, 0),
-	44, 252, TLV_DB_SCALE_ITEM(-5200, 25, 0),
-};
+	44, 252, TLV_DB_SCALE_ITEM(-5200, 25, 0)
+);
 
 /*
  * from -72 dB in 1.5 dB steps (6 dB steps really),
@@ -282,13 +281,12 @@
  * from -60 dB in 0.5 dB steps (2 dB steps really) and
  * from -46 dB in 0.25 dB steps
  */
-static const unsigned int vc_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(vc_tlv,
 	0, 7, TLV_DB_SCALE_ITEM(-7800, 150, 1),
 	8, 15, TLV_DB_SCALE_ITEM(-6600, 75, 0),
 	16, 43, TLV_DB_SCALE_ITEM(-6000, 50, 0),
-	44, 228, TLV_DB_SCALE_ITEM(-4600, 25, 0),
-};
+	44, 228, TLV_DB_SCALE_ITEM(-4600, 25, 0)
+);
 
 /* from 0 to 6 dB in 2 dB steps if SPF mode != flat */
 static DECLARE_TLV_DB_SCALE(tr_tlv, 0, 200, 0);
@@ -810,7 +808,6 @@
 static struct i2c_driver uda1380_i2c_driver = {
 	.driver = {
 		.name =  "uda1380-codec",
-		.owner = THIS_MODULE,
 	},
 	.probe =    uda1380_i2c_probe,
 	.remove =   uda1380_i2c_remove,
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index 6560a66..f2c6ad4 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -953,7 +953,7 @@
 		trigger = IRQF_TRIGGER_FALLING;
 	trigger |= IRQF_ONESHOT;
 
-	ret = request_threaded_irq(irq, NULL, wm0010_irq, trigger | IRQF_ONESHOT,
+	ret = request_threaded_irq(irq, NULL, wm0010_irq, trigger,
 				   "wm0010", wm0010);
 	if (ret) {
 		dev_err(wm0010->dev, "Failed to request IRQ %d: %d\n",
@@ -1003,7 +1003,6 @@
 static struct spi_driver wm0010_spi_driver = {
 	.driver = {
 		.name	= "wm0010",
-		.bus 	= &spi_bus_type,
 		.owner	= THIS_MODULE,
 	},
 	.probe		= wm0010_spi_probe,
diff --git a/sound/soc/codecs/wm1250-ev1.c b/sound/soc/codecs/wm1250-ev1.c
index 048f005..ec45c5b 100644
--- a/sound/soc/codecs/wm1250-ev1.c
+++ b/sound/soc/codecs/wm1250-ev1.c
@@ -251,7 +251,6 @@
 static struct i2c_driver wm1250_ev1_i2c_driver = {
 	.driver = {
 		.name = "wm1250-ev1",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm1250_ev1_probe,
 	.remove =   wm1250_ev1_remove,
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 21d5402..786abd0 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -942,7 +942,6 @@
 static struct i2c_driver wm2000_i2c_driver = {
 	.driver = {
 		.name = "wm2000",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm2000_i2c_probe,
 	.remove = wm2000_i2c_remove,
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index c830832..fd1439e 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -166,7 +166,7 @@
 	{ .type = WMFW_ADSP1_ZM, .base = WM2200_DSP2_ZM_BASE },
 };
 
-static struct reg_default wm2200_reg_defaults[] = {
+static const struct reg_default wm2200_reg_defaults[] = {
 	{ 0x000B, 0x0000 },   /* R11    - Tone Generator 1 */
 	{ 0x0102, 0x0000 },   /* R258   - Clocking 3 */
 	{ 0x0103, 0x0011 },   /* R259   - Clocking 4 */
@@ -897,7 +897,7 @@
 	}
 }
 
-static const struct reg_default wm2200_reva_patch[] = {
+static const struct reg_sequence wm2200_reva_patch[] = {
 	{ 0x07, 0x0003 },
 	{ 0x102, 0x0200 },
 	{ 0x203, 0x0084 },
@@ -1702,7 +1702,7 @@
 	int *bclk_rates;
 
 	/* Data sizes if not using TDM */
-	wl = snd_pcm_format_width(params_format(params));
+	wl = params_width(params);
 	if (wl < 0)
 		return wl;
 	fl = snd_soc_params_to_frame_size(params);
@@ -2481,7 +2481,7 @@
 }
 #endif
 
-static struct dev_pm_ops wm2200_pm = {
+static const struct dev_pm_ops wm2200_pm = {
 	SET_RUNTIME_PM_OPS(wm2200_runtime_suspend, wm2200_runtime_resume,
 			   NULL)
 };
@@ -2495,7 +2495,6 @@
 static struct i2c_driver wm2200_i2c_driver = {
 	.driver = {
 		.name = "wm2200",
-		.owner = THIS_MODULE,
 		.pm = &wm2200_pm,
 	},
 	.probe =    wm2200_i2c_probe,
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index 4c10cd8..c2cdcae 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -1247,7 +1247,7 @@
 	{ "PWM2", NULL, "PWM2 Driver" },
 };
 
-static const struct reg_default wm5100_reva_patches[] = {
+static const struct reg_sequence wm5100_reva_patches[] = {
 	{ WM5100_AUDIO_IF_1_10, 0 },
 	{ WM5100_AUDIO_IF_1_11, 1 },
 	{ WM5100_AUDIO_IF_1_12, 2 },
@@ -1408,7 +1408,7 @@
 	base = dai->driver->base;
 
 	/* Data sizes if not using TDM */
-	wl = snd_pcm_format_width(params_format(params));
+	wl = params_width(params);
 	if (wl < 0)
 		return wl;
 	fl = snd_soc_params_to_frame_size(params);
@@ -2570,13 +2570,11 @@
 
 		if (irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
 			ret = request_threaded_irq(i2c->irq, NULL,
-						   wm5100_edge_irq,
-						   irq_flags | IRQF_ONESHOT,
+						   wm5100_edge_irq, irq_flags,
 						   "wm5100", wm5100);
 		else
 			ret = request_threaded_irq(i2c->irq, NULL, wm5100_irq,
-						   irq_flags | IRQF_ONESHOT,
-						   "wm5100",
+						   irq_flags, "wm5100",
 						   wm5100);
 
 		if (ret != 0) {
@@ -2708,7 +2706,7 @@
 }
 #endif
 
-static struct dev_pm_ops wm5100_pm = {
+static const struct dev_pm_ops wm5100_pm = {
 	SET_RUNTIME_PM_OPS(wm5100_runtime_suspend, wm5100_runtime_resume,
 			   NULL)
 };
@@ -2722,7 +2720,6 @@
 static struct i2c_driver wm5100_i2c_driver = {
 	.driver = {
 		.name = "wm5100",
-		.owner = THIS_MODULE,
 		.pm = &wm5100_pm,
 	},
 	.probe =    wm5100_i2c_probe,
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index d097f09e5..64637d1 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -788,8 +788,7 @@
 ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("EQ1 Coefficients", ARIZONA_EQ1_3, 19),
-SOC_SINGLE("EQ1 Mode Switch", ARIZONA_EQ1_2, ARIZONA_EQ1_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2),
 SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -801,8 +800,7 @@
 SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ2 Coefficients", ARIZONA_EQ2_3, 19),
-SOC_SINGLE("EQ2 Mode Switch", ARIZONA_EQ2_2, ARIZONA_EQ2_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2),
 SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
@@ -814,8 +812,7 @@
 SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ3 Coefficients", ARIZONA_EQ3_3, 19),
-SOC_SINGLE("EQ3 Mode Switch", ARIZONA_EQ3_2, ARIZONA_EQ3_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2),
 SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
@@ -827,8 +824,7 @@
 SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ4 Coefficients", ARIZONA_EQ4_3, 19),
-SOC_SINGLE("EQ4 Mode Switch", ARIZONA_EQ4_2, ARIZONA_EQ4_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2),
 SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
@@ -851,10 +847,10 @@
 ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1),
-SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1),
-SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1),
-SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1),
+ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2),
+ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2),
+ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2),
+ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2),
 
 ARIZONA_MIXER_CONTROLS("DSP1L", ARIZONA_DSP1LMIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("DSP1R", ARIZONA_DSP1RMIX_INPUT_1_SOURCE),
@@ -1883,7 +1879,7 @@
 	ret = snd_soc_add_codec_controls(codec,
 					 arizona_adsp2_rate_controls, 1);
 	if (ret)
-		return ret;
+		goto err_adsp2_codec_probe;
 
 	arizona_init_spk(codec);
 	arizona_init_gpio(codec);
@@ -1893,6 +1889,11 @@
 	priv->core.arizona->dapm = dapm;
 
 	return 0;
+
+err_adsp2_codec_probe:
+	wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
+
+	return ret;
 }
 
 static int wm5102_codec_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 709fcc6..9756578 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -131,6 +131,25 @@
 	{ 0x33fb, 0xfe00 },
 };
 
+static const struct reg_default wm5110_sysclk_reve_patch[] = {
+	{ 0x3270, 0xE410 },
+	{ 0x3271, 0x3078 },
+	{ 0x3272, 0xE410 },
+	{ 0x3273, 0x3070 },
+	{ 0x3274, 0xE410 },
+	{ 0x3275, 0x3066 },
+	{ 0x3276, 0xE410 },
+	{ 0x3277, 0x3056 },
+	{ 0x327A, 0xE414 },
+	{ 0x327B, 0x3078 },
+	{ 0x327C, 0xE414 },
+	{ 0x327D, 0x3070 },
+	{ 0x327E, 0xE414 },
+	{ 0x327F, 0x3066 },
+	{ 0x3280, 0xE414 },
+	{ 0x3281, 0x3056 },
+};
+
 static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
 			    struct snd_kcontrol *kcontrol, int event)
 {
@@ -146,7 +165,9 @@
 		patch_size = ARRAY_SIZE(wm5110_sysclk_revd_patch);
 		break;
 	default:
-		return 0;
+		patch = wm5110_sysclk_reve_patch;
+		patch_size = ARRAY_SIZE(wm5110_sysclk_reve_patch);
+		break;
 	}
 
 	switch (event) {
@@ -164,6 +185,249 @@
 	return 0;
 }
 
+static const struct reg_sequence wm5110_no_dre_left_enable[] = {
+	{ 0x3024, 0xE410 },
+	{ 0x3025, 0x0056 },
+	{ 0x301B, 0x0224 },
+	{ 0x301F, 0x4263 },
+	{ 0x3021, 0x5291 },
+	{ 0x3030, 0xE410 },
+	{ 0x3031, 0x3066 },
+	{ 0x3032, 0xE410 },
+	{ 0x3033, 0x3070 },
+	{ 0x3034, 0xE410 },
+	{ 0x3035, 0x3078 },
+	{ 0x3036, 0xE410 },
+	{ 0x3037, 0x3080 },
+	{ 0x3038, 0xE410 },
+	{ 0x3039, 0x3080 },
+};
+
+static const struct reg_sequence wm5110_dre_left_enable[] = {
+	{ 0x3024, 0x0231 },
+	{ 0x3025, 0x0B00 },
+	{ 0x301B, 0x0227 },
+	{ 0x301F, 0x4266 },
+	{ 0x3021, 0x5294 },
+	{ 0x3030, 0xE231 },
+	{ 0x3031, 0x0266 },
+	{ 0x3032, 0x8231 },
+	{ 0x3033, 0x4B15 },
+	{ 0x3034, 0x8231 },
+	{ 0x3035, 0x0B15 },
+	{ 0x3036, 0xE231 },
+	{ 0x3037, 0x5294 },
+	{ 0x3038, 0x0231 },
+	{ 0x3039, 0x0B00 },
+};
+
+static const struct reg_sequence wm5110_no_dre_right_enable[] = {
+	{ 0x3074, 0xE414 },
+	{ 0x3075, 0x0056 },
+	{ 0x306B, 0x0224 },
+	{ 0x306F, 0x4263 },
+	{ 0x3071, 0x5291 },
+	{ 0x3080, 0xE414 },
+	{ 0x3081, 0x3066 },
+	{ 0x3082, 0xE414 },
+	{ 0x3083, 0x3070 },
+	{ 0x3084, 0xE414 },
+	{ 0x3085, 0x3078 },
+	{ 0x3086, 0xE414 },
+	{ 0x3087, 0x3080 },
+	{ 0x3088, 0xE414 },
+	{ 0x3089, 0x3080 },
+};
+
+static const struct reg_sequence wm5110_dre_right_enable[] = {
+	{ 0x3074, 0x0231 },
+	{ 0x3075, 0x0B00 },
+	{ 0x306B, 0x0227 },
+	{ 0x306F, 0x4266 },
+	{ 0x3071, 0x5294 },
+	{ 0x3080, 0xE231 },
+	{ 0x3081, 0x0266 },
+	{ 0x3082, 0x8231 },
+	{ 0x3083, 0x4B17 },
+	{ 0x3084, 0x8231 },
+	{ 0x3085, 0x0B17 },
+	{ 0x3086, 0xE231 },
+	{ 0x3087, 0x5294 },
+	{ 0x3088, 0x0231 },
+	{ 0x3089, 0x0B00 },
+};
+
+static int wm5110_hp_pre_enable(struct snd_soc_dapm_widget *w)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+	struct arizona *arizona = priv->arizona;
+	unsigned int val = snd_soc_read(codec, ARIZONA_DRE_ENABLE);
+	const struct reg_sequence *wseq;
+	int nregs;
+
+	switch (w->shift) {
+	case ARIZONA_OUT1L_ENA_SHIFT:
+		if (val & ARIZONA_DRE1L_ENA_MASK) {
+			wseq = wm5110_dre_left_enable;
+			nregs = ARRAY_SIZE(wm5110_dre_left_enable);
+		} else {
+			wseq = wm5110_no_dre_left_enable;
+			nregs = ARRAY_SIZE(wm5110_no_dre_left_enable);
+			priv->out_up_delay += 10;
+		}
+		break;
+	case ARIZONA_OUT1R_ENA_SHIFT:
+		if (val & ARIZONA_DRE1R_ENA_MASK) {
+			wseq = wm5110_dre_right_enable;
+			nregs = ARRAY_SIZE(wm5110_dre_right_enable);
+		} else {
+			wseq = wm5110_no_dre_right_enable;
+			nregs = ARRAY_SIZE(wm5110_no_dre_right_enable);
+			priv->out_up_delay += 10;
+		}
+		break;
+	default:
+		return 0;
+	}
+
+	return regmap_multi_reg_write(arizona->regmap, wseq, nregs);
+}
+
+static int wm5110_hp_pre_disable(struct snd_soc_dapm_widget *w)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+	unsigned int val = snd_soc_read(codec, ARIZONA_DRE_ENABLE);
+
+	switch (w->shift) {
+	case ARIZONA_OUT1L_ENA_SHIFT:
+		if (!(val & ARIZONA_DRE1L_ENA_MASK)) {
+			snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS,
+					    ARIZONA_WS_TRG1, ARIZONA_WS_TRG1);
+			snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS,
+					    ARIZONA_WS_TRG1, 0);
+			priv->out_down_delay += 27;
+		}
+		break;
+	case ARIZONA_OUT1R_ENA_SHIFT:
+		if (!(val & ARIZONA_DRE1R_ENA_MASK)) {
+			snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS,
+					    ARIZONA_WS_TRG2, ARIZONA_WS_TRG2);
+			snd_soc_update_bits(codec, ARIZONA_SPARE_TRIGGERS,
+					    ARIZONA_WS_TRG2, 0);
+			priv->out_down_delay += 27;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int wm5110_hp_ev(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+	struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+	switch (priv->arizona->rev) {
+	case 0 ... 3:
+		break;
+	default:
+		switch (event) {
+		case SND_SOC_DAPM_PRE_PMU:
+			wm5110_hp_pre_enable(w);
+			break;
+		case SND_SOC_DAPM_PRE_PMD:
+			wm5110_hp_pre_disable(w);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+
+	return arizona_hp_ev(w, kcontrol, event);
+}
+
+static int wm5110_clear_pga_volume(struct arizona *arizona, int output)
+{
+	struct reg_sequence clear_pga = {
+		ARIZONA_OUTPUT_PATH_CONFIG_1L + output * 4, 0x80
+	};
+	int ret;
+
+	ret = regmap_multi_reg_write_bypassed(arizona->regmap, &clear_pga, 1);
+	if (ret)
+		dev_err(arizona->dev, "Failed to clear PGA (0x%x): %d\n",
+			clear_pga.reg, ret);
+
+	return ret;
+}
+
+static int wm5110_put_dre(struct snd_kcontrol *kcontrol,
+			  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+	struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+	struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	unsigned int ena, dre;
+	unsigned int mask = (0x1 << mc->shift) | (0x1 << mc->rshift);
+	unsigned int lnew = (!!ucontrol->value.integer.value[0]) << mc->shift;
+	unsigned int rnew = (!!ucontrol->value.integer.value[1]) << mc->rshift;
+	unsigned int lold, rold;
+	unsigned int lena, rena;
+	int ret;
+
+	snd_soc_dapm_mutex_lock(dapm);
+
+	ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &ena);
+	if (ret) {
+		dev_err(arizona->dev, "Failed to read output state: %d\n", ret);
+		goto err;
+	}
+	ret = regmap_read(arizona->regmap, ARIZONA_DRE_ENABLE, &dre);
+	if (ret) {
+		dev_err(arizona->dev, "Failed to read DRE state: %d\n", ret);
+		goto err;
+	}
+
+	lold = dre & (1 << mc->shift);
+	rold = dre & (1 << mc->rshift);
+	/* Enables are channel wise swapped from the DRE enables */
+	lena = ena & (1 << mc->rshift);
+	rena = ena & (1 << mc->shift);
+
+	if ((lena && lnew != lold) || (rena && rnew != rold)) {
+		dev_err(arizona->dev, "Can't change DRE on active outputs\n");
+		ret = -EBUSY;
+		goto err;
+	}
+
+	ret = regmap_update_bits(arizona->regmap, ARIZONA_DRE_ENABLE,
+				 mask, lnew | rnew);
+	if (ret) {
+		dev_err(arizona->dev, "Failed to set DRE: %d\n", ret);
+		goto err;
+	}
+
+	/* Force reset of PGA volumes, if turning DRE off */
+	if (!lnew && lold)
+		wm5110_clear_pga_volume(arizona, mc->shift);
+
+	if (!rnew && rold)
+		wm5110_clear_pga_volume(arizona, mc->rshift);
+
+err:
+	snd_soc_dapm_mutex_unlock(dapm);
+
+	return ret;
+}
+
 static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
 static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
 static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
@@ -247,8 +511,7 @@
 ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("EQ1 Coefficients", ARIZONA_EQ1_3, 19),
-SOC_SINGLE("EQ1 Mode Switch", ARIZONA_EQ1_2, ARIZONA_EQ1_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2),
 SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -260,8 +523,7 @@
 SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ2 Coefficients", ARIZONA_EQ2_3, 19),
-SOC_SINGLE("EQ2 Mode Switch", ARIZONA_EQ2_2, ARIZONA_EQ2_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2),
 SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
@@ -273,8 +535,7 @@
 SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ3 Coefficients", ARIZONA_EQ3_3, 19),
-SOC_SINGLE("EQ3 Mode Switch", ARIZONA_EQ3_2, ARIZONA_EQ3_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2),
 SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
@@ -286,8 +547,7 @@
 SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ4 Coefficients", ARIZONA_EQ4_3, 19),
-SOC_SINGLE("EQ4 Mode Switch", ARIZONA_EQ4_2, ARIZONA_EQ4_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2),
 SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
@@ -314,10 +574,10 @@
 ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1),
-SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1),
-SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1),
-SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1),
+ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2),
+ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2),
+ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2),
+ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2),
 
 SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode),
 SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode),
@@ -409,12 +669,15 @@
 SOC_DOUBLE("SPKDAT2 Switch", ARIZONA_PDM_SPK2_CTRL_1, ARIZONA_SPK2L_MUTE_SHIFT,
 	   ARIZONA_SPK2R_MUTE_SHIFT, 1, 1),
 
-SOC_DOUBLE("HPOUT1 DRE Switch", ARIZONA_DRE_ENABLE,
-	   ARIZONA_DRE1L_ENA_SHIFT, ARIZONA_DRE1R_ENA_SHIFT, 1, 0),
-SOC_DOUBLE("HPOUT2 DRE Switch", ARIZONA_DRE_ENABLE,
-	   ARIZONA_DRE2L_ENA_SHIFT, ARIZONA_DRE2R_ENA_SHIFT, 1, 0),
-SOC_DOUBLE("HPOUT3 DRE Switch", ARIZONA_DRE_ENABLE,
-	   ARIZONA_DRE3L_ENA_SHIFT, ARIZONA_DRE3R_ENA_SHIFT, 1, 0),
+SOC_DOUBLE_EXT("HPOUT1 DRE Switch", ARIZONA_DRE_ENABLE,
+	   ARIZONA_DRE1L_ENA_SHIFT, ARIZONA_DRE1R_ENA_SHIFT, 1, 0,
+	   snd_soc_get_volsw, wm5110_put_dre),
+SOC_DOUBLE_EXT("HPOUT2 DRE Switch", ARIZONA_DRE_ENABLE,
+	   ARIZONA_DRE2L_ENA_SHIFT, ARIZONA_DRE2R_ENA_SHIFT, 1, 0,
+	   snd_soc_get_volsw, wm5110_put_dre),
+SOC_DOUBLE_EXT("HPOUT3 DRE Switch", ARIZONA_DRE_ENABLE,
+	   ARIZONA_DRE3L_ENA_SHIFT, ARIZONA_DRE3R_ENA_SHIFT, 1, 0,
+	   snd_soc_get_volsw, wm5110_put_dre),
 
 SOC_ENUM("Output Ramp Up", arizona_out_vi_ramp),
 SOC_ENUM("Output Ramp Down", arizona_out_vd_ramp),
@@ -904,11 +1167,11 @@
 		    ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX2_ENA_SHIFT, 0),
 
 SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
-		   ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
+		   ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, wm5110_hp_ev,
 		   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
 		   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 SND_SOC_DAPM_PGA_E("OUT1R", SND_SOC_NOPM,
-		   ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
+		   ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, wm5110_hp_ev,
 		   SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
 		   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
 SND_SOC_DAPM_PGA_E("OUT2L", ARIZONA_OUTPUT_ENABLES_1,
@@ -1611,18 +1874,24 @@
 	for (i = 0; i < WM5110_NUM_ADSP; ++i) {
 		ret = wm_adsp2_codec_probe(&priv->core.adsp[i], codec);
 		if (ret)
-			return ret;
+			goto err_adsp2_codec_probe;
 	}
 
 	ret = snd_soc_add_codec_controls(codec,
 					 arizona_adsp2_rate_controls,
 					 WM5110_NUM_ADSP);
 	if (ret)
-		return ret;
+		goto err_adsp2_codec_probe;
 
 	snd_soc_dapm_disable_pin(dapm, "HAPTICS");
 
 	return 0;
+
+err_adsp2_codec_probe:
+	for (--i; i >= 0; --i)
+		wm_adsp2_codec_remove(&priv->core.adsp[i], codec);
+
+	return ret;
 }
 
 static int wm5110_codec_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 41c62c1..ffbf3df 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -394,11 +394,10 @@
 static DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -12700, 50, 1);
 static DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 1);
 
-static const unsigned int capture_sd_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(capture_sd_tlv,
 	0, 12, TLV_DB_SCALE_ITEM(-3600, 300, 1),
-	13, 15, TLV_DB_SCALE_ITEM(0, 0, 0),
-};
+	13, 15, TLV_DB_SCALE_ITEM(0, 0, 0)
+);
 
 static const struct snd_kcontrol_new wm8350_snd_controls[] = {
 	SOC_ENUM("Playback Deemphasis", wm8350_enum[0]),
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index d755508..b1d346a 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -370,10 +370,7 @@
 }
 
 /* INMIX dB values */
-static const unsigned int in_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0,7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
-};
+static const DECLARE_TLV_DB_SCALE(in_mix_tlv, -1200, 600, 0);
 
 /* Left In PGA Connections */
 static const struct snd_kcontrol_new wm8400_dapm_lin12_pga_controls[] = {
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index dac5beb..b098a83 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -598,6 +598,7 @@
 	{ .compatible = "wlf,wm8510" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, wm8510_of_match);
 
 static const struct regmap_config wm8510_regmap = {
 	.reg_bits = 7,
@@ -690,7 +691,6 @@
 static struct i2c_driver wm8510_i2c_driver = {
 	.driver = {
 		.name = "wm8510",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8510_of_match,
 	},
 	.probe =    wm8510_i2c_probe,
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index 43ea8ae..aa287a3 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -430,6 +430,7 @@
 	{ .compatible = "wlf,wm8523" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, wm8523_of_match);
 
 static const struct regmap_config wm8523_regmap = {
 	.reg_bits = 8,
@@ -534,7 +535,6 @@
 static struct i2c_driver wm8523_i2c_driver = {
 	.driver = {
 		.name = "wm8523",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8523_of_match,
 	},
 	.probe =    wm8523_i2c_probe,
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 759a792..66602bf 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -916,6 +916,7 @@
 	{ .compatible = "wlf,wm8580" },
 	{ },
 };
+MODULE_DEVICE_TABLE(of, wm8580_of_match);
 
 static const struct regmap_config wm8580_regmap = {
 	.reg_bits = 7,
@@ -978,7 +979,6 @@
 static struct i2c_driver wm8580_i2c_driver = {
 	.driver = {
 		.name = "wm8580",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8580_of_match,
 	},
 	.probe =    wm8580_i2c_probe,
diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
index cc8251f..44b9e0a 100644
--- a/sound/soc/codecs/wm8711.c
+++ b/sound/soc/codecs/wm8711.c
@@ -478,7 +478,6 @@
 static struct i2c_driver wm8711_i2c_driver = {
 	.driver = {
 		.name = "wm8711",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8711_of_match,
 	},
 	.probe =    wm8711_i2c_probe,
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index f1a173e..cd7b024 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -319,7 +319,6 @@
 static struct i2c_driver wm8728_i2c_driver = {
 	.driver = {
 		.name = "wm8728",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8728_of_match,
 	},
 	.probe =    wm8728_i2c_probe,
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 915ea11..ace8645 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -79,12 +79,7 @@
 	return reg == WM8731_RESET;
 }
 
-static bool wm8731_writeable(struct device *dev, unsigned int reg)
-{
-	return reg <= WM8731_RESET;
-}
-
-#define wm8731_reset(c)	snd_soc_write(c, WM8731_RESET, 0)
+#define wm8731_reset(m)	regmap_write(m, WM8731_RESET, 0)
 
 static const char *wm8731_input_select[] = {"Line In", "Mic"};
 
@@ -496,8 +491,11 @@
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		if (wm8731->mclk)
-			clk_prepare_enable(wm8731->mclk);
+		if (wm8731->mclk) {
+			ret = clk_prepare_enable(wm8731->mclk);
+			if (ret)
+				return ret;
+		}
 		break;
 	case SND_SOC_BIAS_PREPARE:
 		break;
@@ -571,69 +569,63 @@
 	.symmetric_rates = 1,
 };
 
-static int wm8731_probe(struct snd_soc_codec *codec)
+static int wm8731_request_supplies(struct device *dev,
+		struct wm8731_priv *wm8731)
 {
-	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
 	int ret = 0, i;
 
 	for (i = 0; i < ARRAY_SIZE(wm8731->supplies); i++)
 		wm8731->supplies[i].supply = wm8731_supply_names[i];
 
-	ret = devm_regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8731->supplies),
+	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(wm8731->supplies),
 				 wm8731->supplies);
 	if (ret != 0) {
-		dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+		dev_err(dev, "Failed to request supplies: %d\n", ret);
 		return ret;
 	}
 
 	ret = regulator_bulk_enable(ARRAY_SIZE(wm8731->supplies),
 				    wm8731->supplies);
 	if (ret != 0) {
-		dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+		dev_err(dev, "Failed to enable supplies: %d\n", ret);
 		return ret;
 	}
 
-	ret = wm8731_reset(codec);
+	return 0;
+}
+
+static int wm8731_hw_init(struct device *dev, struct wm8731_priv *wm8731)
+{
+	int ret = 0;
+
+	ret = wm8731_reset(wm8731->regmap);
 	if (ret < 0) {
-		dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
+		dev_err(dev, "Failed to issue reset: %d\n", ret);
 		goto err_regulator_enable;
 	}
 
-	snd_soc_codec_force_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	/* Clear POWEROFF, keep everything else disabled */
+	regmap_write(wm8731->regmap, WM8731_PWR, 0x7f);
 
 	/* Latch the update bits */
-	snd_soc_update_bits(codec, WM8731_LOUT1V, 0x100, 0);
-	snd_soc_update_bits(codec, WM8731_ROUT1V, 0x100, 0);
-	snd_soc_update_bits(codec, WM8731_LINVOL, 0x100, 0);
-	snd_soc_update_bits(codec, WM8731_RINVOL, 0x100, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_LOUT1V, 0x100, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_ROUT1V, 0x100, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_LINVOL, 0x100, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_RINVOL, 0x100, 0);
 
 	/* Disable bypass path by default */
-	snd_soc_update_bits(codec, WM8731_APANA, 0x8, 0);
+	regmap_update_bits(wm8731->regmap, WM8731_APANA, 0x8, 0);
 
-	/* Regulators will have been enabled by bias management */
-	regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
-
-	return 0;
+	regcache_mark_dirty(wm8731->regmap);
 
 err_regulator_enable:
+	/* Regulators will be enabled by bias management */
 	regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
 
 	return ret;
 }
 
-/* power down chip */
-static int wm8731_remove(struct snd_soc_codec *codec)
-{
-	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
-
-	regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
-
-	return 0;
-}
-
 static struct snd_soc_codec_driver soc_codec_dev_wm8731 = {
-	.probe =	wm8731_probe,
-	.remove =	wm8731_remove,
 	.set_bias_level = wm8731_set_bias_level,
 	.suspend_bias_off = true,
 
@@ -658,7 +650,6 @@
 
 	.max_register = WM8731_RESET,
 	.volatile_reg = wm8731_volatile,
-	.writeable_reg = wm8731_writeable,
 
 	.cache_type = REGCACHE_RBTREE,
 	.reg_defaults = wm8731_reg_defaults,
@@ -690,6 +681,12 @@
 
 	mutex_init(&wm8731->lock);
 
+	spi_set_drvdata(spi, wm8731);
+
+	ret = wm8731_request_supplies(&spi->dev, wm8731);
+	if (ret != 0)
+		return ret;
+
 	wm8731->regmap = devm_regmap_init_spi(spi, &wm8731_regmap);
 	if (IS_ERR(wm8731->regmap)) {
 		ret = PTR_ERR(wm8731->regmap);
@@ -698,7 +695,9 @@
 		return ret;
 	}
 
-	spi_set_drvdata(spi, wm8731);
+	ret = wm8731_hw_init(&spi->dev, wm8731);
+	if (ret != 0)
+		return ret;
 
 	ret = snd_soc_register_codec(&spi->dev,
 			&soc_codec_dev_wm8731, &wm8731_dai, 1);
@@ -754,6 +753,12 @@
 
 	mutex_init(&wm8731->lock);
 
+	i2c_set_clientdata(i2c, wm8731);
+
+	ret = wm8731_request_supplies(&i2c->dev, wm8731);
+	if (ret != 0)
+		return ret;
+
 	wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap);
 	if (IS_ERR(wm8731->regmap)) {
 		ret = PTR_ERR(wm8731->regmap);
@@ -762,7 +767,9 @@
 		return ret;
 	}
 
-	i2c_set_clientdata(i2c, wm8731);
+	ret = wm8731_hw_init(&i2c->dev, wm8731);
+	if (ret != 0)
+		return ret;
 
 	ret = snd_soc_register_codec(&i2c->dev,
 			&soc_codec_dev_wm8731, &wm8731_dai, 1);
@@ -789,7 +796,6 @@
 static struct i2c_driver wm8731_i2c_driver = {
 	.driver = {
 		.name = "wm8731",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8731_of_match,
 	},
 	.probe =    wm8731_i2c_probe,
diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
index 6ad606f..e4a03d9 100644
--- a/sound/soc/codecs/wm8737.c
+++ b/sound/soc/codecs/wm8737.c
@@ -79,13 +79,12 @@
 	return snd_soc_write(codec, WM8737_RESET, 0);
 }
 
-static const unsigned int micboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(micboost_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(1300, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2800, 0, 0),
-	3, 3, TLV_DB_SCALE_ITEM(3300, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(3300, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(pga_tlv, -9750, 50, 1);
 static const DECLARE_TLV_DB_SCALE(adc_tlv, -600, 600, 0);
 static const DECLARE_TLV_DB_SCALE(ng_tlv, -7800, 600, 0);
@@ -657,7 +656,6 @@
 static struct i2c_driver wm8737_i2c_driver = {
 	.driver = {
 		.name = "wm8737",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8737_of_match,
 	},
 	.probe =    wm8737_i2c_probe,
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index b346237..de42c03 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -61,25 +61,6 @@
 	{ 32, 0x0002 },     /* R32 - ADDITONAL_CONTROL_1 */
 };
 
-static bool wm8741_readable(struct device *dev, unsigned int reg)
-{
-	switch (reg) {
-	case WM8741_DACLLSB_ATTENUATION:
-	case WM8741_DACLMSB_ATTENUATION:
-	case WM8741_DACRLSB_ATTENUATION:
-	case WM8741_DACRMSB_ATTENUATION:
-	case WM8741_VOLUME_CONTROL:
-	case WM8741_FORMAT_CONTROL:
-	case WM8741_FILTER_CONTROL:
-	case WM8741_MODE_CONTROL_1:
-	case WM8741_MODE_CONTROL_2:
-	case WM8741_ADDITIONAL_CONTROL_1:
-		return true;
-	default:
-		return false;
-	}
-}
-
 static int wm8741_reset(struct snd_soc_codec *codec)
 {
 	return snd_soc_write(codec, WM8741_RESET, 0);
@@ -278,51 +259,38 @@
 	switch (freq) {
 	case 0:
 		wm8741->sysclk_constraints = NULL;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 11289600:
 		wm8741->sysclk_constraints = &constraints_11289;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 12288000:
 		wm8741->sysclk_constraints = &constraints_12288;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 16384000:
 		wm8741->sysclk_constraints = &constraints_16384;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 16934400:
 		wm8741->sysclk_constraints = &constraints_16934;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 18432000:
 		wm8741->sysclk_constraints = &constraints_18432;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 22579200:
 	case 33868800:
 		wm8741->sysclk_constraints = &constraints_22579;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 24576000:
 		wm8741->sysclk_constraints = &constraints_24576;
-		wm8741->sysclk = freq;
-		return 0;
-
+		break;
 	case 36864000:
 		wm8741->sysclk_constraints = &constraints_36864;
-		wm8741->sysclk = freq;
-		return 0;
+		break;
+	default:
+		return -EINVAL;
 	}
-	return -EINVAL;
+
+	wm8741->sysclk = freq;
+	return 0;
 }
 
 static int wm8741_set_dai_fmt(struct snd_soc_dai *codec_dai,
@@ -554,8 +522,6 @@
 	.reg_defaults = wm8741_reg_defaults,
 	.num_reg_defaults = ARRAY_SIZE(wm8741_reg_defaults),
 	.cache_type = REGCACHE_RBTREE,
-
-	.readable_reg = wm8741_readable,
 };
 
 static int wm8741_set_pdata(struct device *dev, struct wm8741_priv *wm8741)
@@ -633,7 +599,6 @@
 static struct i2c_driver wm8741_i2c_driver = {
 	.driver = {
 		.name = "wm8741",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8741_of_match,
 	},
 	.probe =    wm8741_i2c_probe,
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index 56d89b0..873933a 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -826,7 +826,6 @@
 static struct i2c_driver wm8750_i2c_driver = {
 	.driver = {
 		.name = "wm8750",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8750_of_match,
 	},
 	.probe =    wm8750_i2c_probe,
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index feb2997a..a801c6d 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -138,11 +138,6 @@
 	return reg == WM8753_RESET;
 }
 
-static bool wm8753_writeable(struct device *dev, unsigned int reg)
-{
-	return reg <= WM8753_ADCTL2;
-}
-
 /* codec private data */
 struct wm8753_priv {
 	struct regmap *regmap;
@@ -276,12 +271,11 @@
 static const DECLARE_TLV_DB_SCALE(mic_preamp_tlv, 1200, 600, 0);
 static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1);
 static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
-static const unsigned int out_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(out_tlv,
 	/* 0000000 - 0101111 = "Analogue mute" */
 	0, 48, TLV_DB_SCALE_ITEM(-25500, 0, 0),
-	48, 127, TLV_DB_SCALE_ITEM(-7300, 100, 0),
-};
+	48, 127, TLV_DB_SCALE_ITEM(-7300, 100, 0)
+);
 static const DECLARE_TLV_DB_SCALE(mix_tlv, -1500, 300, 0);
 static const DECLARE_TLV_DB_SCALE(voice_mix_tlv, -1200, 300, 0);
 static const DECLARE_TLV_DB_SCALE(pga_tlv, -1725, 75, 0);
@@ -1510,7 +1504,6 @@
 	.val_bits = 9,
 
 	.max_register = WM8753_ADCTL2,
-	.writeable_reg = wm8753_writeable,
 	.volatile_reg = wm8753_volatile,
 
 	.cache_type = REGCACHE_RBTREE,
@@ -1609,7 +1602,6 @@
 static struct i2c_driver wm8753_i2c_driver = {
 	.driver = {
 		.name = "wm8753",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8753_of_match,
 	},
 	.probe =    wm8753_i2c_probe,
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index ece9b44..183c9a4 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -265,7 +265,7 @@
 	}
 
 	/* Set word length */
-	switch (snd_pcm_format_width(params_format(params))) {
+	switch (params_width(params)) {
 	case 16:
 		iface = 0;
 		break;
@@ -280,7 +280,7 @@
 		break;
 	default:
 		dev_err(codec->dev, "Unsupported sample size: %i\n",
-			snd_pcm_format_width(params_format(params)));
+			params_width(params));
 		return -EINVAL;
 	}
 
@@ -536,7 +536,6 @@
 static struct i2c_driver wm8776_i2c_driver = {
 	.driver = {
 		.name = "wm8776",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8776_of_match,
 	},
 	.probe =    wm8776_i2c_probe,
diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
index 6596f5f..f27464c 100644
--- a/sound/soc/codecs/wm8804-i2c.c
+++ b/sound/soc/codecs/wm8804-i2c.c
@@ -49,7 +49,6 @@
 static struct i2c_driver wm8804_i2c_driver = {
 	.driver = {
 		.name = "wm8804",
-		.owner = THIS_MODULE,
 		.pm = &wm8804_pm,
 		.of_match_table = wm8804_of_match,
 	},
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index c195c2e..8d91470 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -98,7 +98,7 @@
 WM8804_REGULATOR_EVENT(1)
 
 static const char *txsrc_text[] = { "S/PDIF RX", "AIF" };
-static const SOC_ENUM_SINGLE_DECL(txsrc, WM8804_SPDTX4, 6, txsrc_text);
+static SOC_ENUM_SINGLE_DECL(txsrc, WM8804_SPDTX4, 6, txsrc_text);
 
 static const struct snd_kcontrol_new wm8804_tx_source_mux[] = {
 	SOC_DAPM_ENUM_EXT("Input Source", txsrc,
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index f3759ec..98900aa 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -1312,7 +1312,6 @@
 static struct i2c_driver wm8900_i2c_driver = {
 	.driver = {
 		.name = "wm8900",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8900_i2c_probe,
 	.remove =   wm8900_i2c_remove,
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index b5322c1..b011253 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -2193,7 +2193,6 @@
 static struct i2c_driver wm8903_i2c_driver = {
 	.driver = {
 		.name = "wm8903",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8903_of_match,
 	},
 	.probe =    wm8903_i2c_probe,
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 265a4a5..b783743 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1837,7 +1837,9 @@
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		clk_prepare_enable(wm8904->mclk);
+		ret = clk_prepare_enable(wm8904->mclk);
+		if (ret)
+			return ret;
 		break;
 
 	case SND_SOC_BIAS_PREPARE:
@@ -2292,7 +2294,6 @@
 static struct i2c_driver wm8904_i2c_driver = {
 	.driver = {
 		.name = "wm8904",
-		.owner = THIS_MODULE,
 		.of_match_table = of_match_ptr(wm8904_of_match),
 	},
 	.probe =    wm8904_i2c_probe,
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index 98ef0ba..f6f9395 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -787,7 +787,6 @@
 static struct i2c_driver wm8940_i2c_driver = {
 	.driver = {
 		.name = "wm8940",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8940_i2c_probe,
 	.remove =   wm8940_i2c_remove,
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 2d591c2..12e4435 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -1009,7 +1009,6 @@
 static struct i2c_driver wm8955_i2c_driver = {
 	.driver = {
 		.name = "wm8955",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8955_i2c_probe,
 	.remove =   wm8955_i2c_remove,
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 94c5c46..e3b7d0c 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -48,6 +48,9 @@
 #define WM8960_DISOP     0x40
 #define WM8960_DRES_MASK 0x30
 
+static bool is_pll_freq_available(unsigned int source, unsigned int target);
+static int wm8960_set_pll(struct snd_soc_codec *codec,
+		unsigned int freq_in, unsigned int freq_out);
 /*
  * wm8960 register cache
  * We can't read the WM8960 register space when we are
@@ -126,9 +129,12 @@
 	struct snd_soc_dapm_widget *rout1;
 	struct snd_soc_dapm_widget *out3;
 	bool deemph;
-	int playback_fs;
+	int lrclk;
 	int bclk;
 	int sysclk;
+	int clk_id;
+	int freq_in;
+	bool is_stream_in_use[2];
 	struct wm8960_data pdata;
 };
 
@@ -164,8 +170,8 @@
 	if (wm8960->deemph) {
 		best = 1;
 		for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) {
-			if (abs(deemph_settings[i] - wm8960->playback_fs) <
-			    abs(deemph_settings[best] - wm8960->playback_fs))
+			if (abs(deemph_settings[i] - wm8960->lrclk) <
+			    abs(deemph_settings[best] - wm8960->lrclk))
 				best = i;
 		}
 
@@ -565,6 +571,9 @@
 	{  8000, 5 },
 };
 
+/* -1 for reserved value */
+static const int sysclk_divs[] = { 1, -1, 2, -1 };
+
 /* Multiply 256 for internal 256 div */
 static const int dac_divs[] = { 256, 384, 512, 768, 1024, 1408, 1536 };
 
@@ -574,61 +583,110 @@
 	120, 160, 220, 240, 320, 320, 320
 };
 
-static void wm8960_configure_clocking(struct snd_soc_codec *codec,
-		bool tx, int lrclk)
+static int wm8960_configure_clocking(struct snd_soc_codec *codec)
 {
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	int sysclk, bclk, lrclk, freq_out, freq_in;
 	u16 iface1 = snd_soc_read(codec, WM8960_IFACE1);
-	u16 iface2 = snd_soc_read(codec, WM8960_IFACE2);
-	u32 sysclk;
-	int i, j;
+	int i, j, k;
 
 	if (!(iface1 & (1<<6))) {
 		dev_dbg(codec->dev,
 			"Codec is slave mode, no need to configure clock\n");
-		return;
+		return 0;
 	}
 
-	if (!wm8960->sysclk) {
-		dev_dbg(codec->dev, "No SYSCLK configured\n");
-		return;
+	if (wm8960->clk_id != WM8960_SYSCLK_MCLK && !wm8960->freq_in) {
+		dev_err(codec->dev, "No MCLK configured\n");
+		return -EINVAL;
 	}
 
-	if (!wm8960->bclk || !lrclk) {
-		dev_dbg(codec->dev, "No audio clocks configured\n");
-		return;
+	freq_in = wm8960->freq_in;
+	bclk = wm8960->bclk;
+	lrclk = wm8960->lrclk;
+	/*
+	 * If it's sysclk auto mode, check if the MCLK can provide sysclk or
+	 * not. If MCLK can provide sysclk, using MCLK to provide sysclk
+	 * directly. Otherwise, auto select a available pll out frequency
+	 * and set PLL.
+	 */
+	if (wm8960->clk_id == WM8960_SYSCLK_AUTO) {
+		/* disable the PLL and using MCLK to provide sysclk */
+		wm8960_set_pll(codec, 0, 0);
+		freq_out = freq_in;
+	} else if (wm8960->sysclk) {
+		freq_out = wm8960->sysclk;
+	} else {
+		dev_err(codec->dev, "No SYSCLK configured\n");
+		return -EINVAL;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(dac_divs); ++i) {
-		if (wm8960->sysclk == lrclk * dac_divs[i]) {
-			for (j = 0; j < ARRAY_SIZE(bclk_divs); ++j) {
-				sysclk = wm8960->bclk * bclk_divs[j] / 10;
-				if (wm8960->sysclk == sysclk)
+	/* check if the sysclk frequency is available. */
+	for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+		if (sysclk_divs[i] == -1)
+			continue;
+		sysclk = freq_out / sysclk_divs[i];
+		for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
+			if (sysclk == dac_divs[j] * lrclk) {
+				for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k)
+					if (sysclk == bclk * bclk_divs[k] / 10)
+						break;
+				if (k != ARRAY_SIZE(bclk_divs))
 					break;
 			}
-			if(j != ARRAY_SIZE(bclk_divs))
+		}
+		if (j != ARRAY_SIZE(dac_divs))
+			break;
+	}
+
+	if (i != ARRAY_SIZE(sysclk_divs)) {
+		goto configure_clock;
+	} else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) {
+		dev_err(codec->dev, "failed to configure clock\n");
+		return -EINVAL;
+	}
+	/* get a available pll out frequency and set pll */
+	for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+		if (sysclk_divs[i] == -1)
+			continue;
+		for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
+			sysclk = lrclk * dac_divs[j];
+			freq_out = sysclk * sysclk_divs[i];
+
+			for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k) {
+				if (sysclk == bclk * bclk_divs[k] / 10 &&
+				    is_pll_freq_available(freq_in, freq_out)) {
+					wm8960_set_pll(codec,
+						       freq_in, freq_out);
+					break;
+				} else {
+					continue;
+				}
+			}
+			if (k != ARRAY_SIZE(bclk_divs))
 				break;
 		}
+		if (j != ARRAY_SIZE(dac_divs))
+			break;
 	}
 
-	if (i == ARRAY_SIZE(dac_divs)) {
-		dev_err(codec->dev, "Unsupported sysclk %d\n", wm8960->sysclk);
-		return;
+	if (i == ARRAY_SIZE(sysclk_divs)) {
+		dev_err(codec->dev, "failed to configure clock\n");
+		return -EINVAL;
 	}
 
-	/*
-	 * configure frame clock. If ADCLRC configure as GPIO pin, DACLRC
-	 * pin is used as a frame clock for ADCs and DACs.
-	 */
-	if (iface2 & (1<<6))
-		snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 3, i << 3);
-	else if (tx)
-		snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 3, i << 3);
-	else if (!tx)
-		snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 6, i << 6);
+configure_clock:
+	/* configure sysclk clock */
+	snd_soc_update_bits(codec, WM8960_CLOCK1, 3 << 1, i << 1);
+
+	/* configure frame clock */
+	snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 3, j << 3);
+	snd_soc_update_bits(codec, WM8960_CLOCK1, 0x7 << 6, j << 6);
 
 	/* configure bit clock */
-	snd_soc_update_bits(codec, WM8960_CLOCK2, 0xf, j);
+	snd_soc_update_bits(codec, WM8960_CLOCK2, 0xf, k);
+
+	return 0;
 }
 
 static int wm8960_hw_params(struct snd_pcm_substream *substream,
@@ -667,9 +725,9 @@
 		return -EINVAL;
 	}
 
+	wm8960->lrclk = params_rate(params);
 	/* Update filters for the new rate */
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-		wm8960->playback_fs = params_rate(params);
+	if (tx) {
 		wm8960_set_deemph(codec);
 	} else {
 		for (i = 0; i < ARRAY_SIZE(alc_rates); i++)
@@ -682,7 +740,23 @@
 	/* set iface */
 	snd_soc_write(codec, WM8960_IFACE1, iface);
 
-	wm8960_configure_clocking(codec, tx, params_rate(params));
+	wm8960->is_stream_in_use[tx] = true;
+
+	if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON &&
+	    !wm8960->is_stream_in_use[!tx])
+		return wm8960_configure_clocking(codec);
+
+	return 0;
+}
+
+static int wm8960_hw_free(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+
+	wm8960->is_stream_in_use[tx] = false;
 
 	return 0;
 }
@@ -702,6 +776,7 @@
 				      enum snd_soc_bias_level level)
 {
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	u16 pm2 = snd_soc_read(codec, WM8960_POWER2);
 	int ret;
 
 	switch (level) {
@@ -721,11 +796,22 @@
 				}
 			}
 
+			ret = wm8960_configure_clocking(codec);
+			if (ret)
+				return ret;
+
 			/* Set VMID to 2x50k */
 			snd_soc_update_bits(codec, WM8960_POWER1, 0x180, 0x80);
 			break;
 
 		case SND_SOC_BIAS_ON:
+			/*
+			 * If it's sysclk auto mode, and the pll is enabled,
+			 * disable the pll
+			 */
+			if (wm8960->clk_id == WM8960_SYSCLK_AUTO && (pm2 & 0x1))
+				wm8960_set_pll(codec, 0, 0);
+
 			if (!IS_ERR(wm8960->mclk))
 				clk_disable_unprepare(wm8960->mclk);
 			break;
@@ -780,6 +866,7 @@
 					 enum snd_soc_bias_level level)
 {
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	u16 pm2 = snd_soc_read(codec, WM8960_POWER2);
 	int reg, ret;
 
 	switch (level) {
@@ -831,9 +918,21 @@
 					return ret;
 				}
 			}
+
+			ret = wm8960_configure_clocking(codec);
+			if (ret)
+				return ret;
+
 			break;
 
 		case SND_SOC_BIAS_ON:
+			/*
+			 * If it's sysclk auto mode, and the pll is enabled,
+			 * disable the pll
+			 */
+			if (wm8960->clk_id == WM8960_SYSCLK_AUTO && (pm2 & 0x1))
+				wm8960_set_pll(codec, 0, 0);
+
 			if (!IS_ERR(wm8960->mclk))
 				clk_disable_unprepare(wm8960->mclk);
 
@@ -892,6 +991,28 @@
 	u32 k:24;
 };
 
+static bool is_pll_freq_available(unsigned int source, unsigned int target)
+{
+	unsigned int Ndiv;
+
+	if (source == 0 || target == 0)
+		return false;
+
+	/* Scale up target to PLL operating frequency */
+	target *= 4;
+	Ndiv = target / source;
+
+	if (Ndiv < 6) {
+		source >>= 1;
+		Ndiv = target / source;
+	}
+
+	if ((Ndiv < 6) || (Ndiv > 12))
+		return false;
+
+	return true;
+}
+
 /* The size in bits of the pll divide multiplied by 10
  * to allow rounding later */
 #define FIXED_PLL_SIZE ((1 << 24) * 10)
@@ -943,10 +1064,9 @@
 	return 0;
 }
 
-static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
-		int source, unsigned int freq_in, unsigned int freq_out)
+static int wm8960_set_pll(struct snd_soc_codec *codec,
+		unsigned int freq_in, unsigned int freq_out)
 {
-	struct snd_soc_codec *codec = codec_dai->codec;
 	u16 reg;
 	static struct _pll_div pll_div;
 	int ret;
@@ -986,6 +1106,20 @@
 	return 0;
 }
 
+static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
+		int source, unsigned int freq_in, unsigned int freq_out)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+
+	wm8960->freq_in = freq_in;
+
+	if (pll_id == WM8960_SYSCLK_AUTO)
+		return 0;
+
+	return wm8960_set_pll(codec, freq_in, freq_out);
+}
+
 static int wm8960_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
 		int div_id, int div)
 {
@@ -1043,11 +1177,14 @@
 		snd_soc_update_bits(codec, WM8960_CLOCK1,
 					0x1, WM8960_SYSCLK_PLL);
 		break;
+	case WM8960_SYSCLK_AUTO:
+		break;
 	default:
 		return -EINVAL;
 	}
 
 	wm8960->sysclk = freq;
+	wm8960->clk_id = clk_id;
 
 	return 0;
 }
@@ -1060,6 +1197,7 @@
 
 static const struct snd_soc_dai_ops wm8960_dai_ops = {
 	.hw_params = wm8960_hw_params,
+	.hw_free = wm8960_hw_free,
 	.digital_mute = wm8960_mute,
 	.set_fmt = wm8960_set_dai_fmt,
 	.set_clkdiv = wm8960_set_dai_clkdiv,
@@ -1216,7 +1354,6 @@
 static struct i2c_driver wm8960_i2c_driver = {
 	.driver = {
 		.name = "wm8960",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8960_of_match,
 	},
 	.probe =    wm8960_i2c_probe,
diff --git a/sound/soc/codecs/wm8960.h b/sound/soc/codecs/wm8960.h
index 2d8163d..ab3220d 100644
--- a/sound/soc/codecs/wm8960.h
+++ b/sound/soc/codecs/wm8960.h
@@ -82,6 +82,7 @@
 
 #define WM8960_SYSCLK_MCLK		(0 << 0)
 #define WM8960_SYSCLK_PLL		(1 << 0)
+#define WM8960_SYSCLK_AUTO		(2 << 0)
 
 #define WM8960_DAC_DIV_1		(0 << 3)
 #define WM8960_DAC_DIV_1_5		(1 << 3)
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index a057662..e30446a 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -331,13 +331,12 @@
 static const DECLARE_TLV_DB_SCALE(hp_sec_tlv, -700, 100, 0);
 static const DECLARE_TLV_DB_SCALE(adc_tlv, -7200, 75, 1);
 static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 300, 0);
-static unsigned int boost_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(boost_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(0,  0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(13, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(20, 0, 0),
-	3, 3, TLV_DB_SCALE_ITEM(29, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(29, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(pga_tlv, -2325, 75, 0);
 
 static const struct snd_kcontrol_new wm8961_snd_controls[] = {
@@ -982,7 +981,6 @@
 static struct i2c_driver wm8961_i2c_driver = {
 	.driver = {
 		.name = "wm8961",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8961_i2c_probe,
 	.remove =   wm8961_i2c_remove,
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index c5748fd..b4eb975 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -113,7 +113,7 @@
 WM8962_REGULATOR_EVENT(6)
 WM8962_REGULATOR_EVENT(7)
 
-static struct reg_default wm8962_reg[] = {
+static const struct reg_default wm8962_reg[] = {
 	{ 0, 0x009F },   /* R0     - Left Input volume */
 	{ 1, 0x049F },   /* R1     - Right Input volume */
 	{ 2, 0x0000 },   /* R2     - HPOUTL volume */
@@ -1456,14 +1456,13 @@
 
 static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
 static const DECLARE_TLV_DB_SCALE(mixin_tlv, -1500, 300, 0);
-static const unsigned int mixinpga_tlv[] = {
-	TLV_DB_RANGE_HEAD(5),
+static const DECLARE_TLV_DB_RANGE(mixinpga_tlv,
 	0, 1, TLV_DB_SCALE_ITEM(0, 600, 0),
 	2, 2, TLV_DB_SCALE_ITEM(1300, 1300, 0),
 	3, 4, TLV_DB_SCALE_ITEM(1800, 200, 0),
 	5, 5, TLV_DB_SCALE_ITEM(2400, 0, 0),
-	6, 7, TLV_DB_SCALE_ITEM(2700, 300, 0),
-};
+	6, 7, TLV_DB_SCALE_ITEM(2700, 300, 0)
+);
 static const DECLARE_TLV_DB_SCALE(beep_tlv, -9600, 600, 1);
 static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
 static const DECLARE_TLV_DB_SCALE(st_tlv, -3600, 300, 0);
@@ -1471,11 +1470,10 @@
 static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
 static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
 static const DECLARE_TLV_DB_SCALE(hp_tlv, -700, 100, 0);
-static const unsigned int classd_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(classd_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
-	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
-};
+	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
 
 static int wm8962_dsp2_write_config(struct snd_soc_codec *codec)
@@ -3495,7 +3493,7 @@
 };
 
 /* Improve power consumption for IN4 DC measurement mode */
-static const struct reg_default wm8962_dc_measure[] = {
+static const struct reg_sequence wm8962_dc_measure[] = {
 	{ 0xfd, 0x1 },
 	{ 0xcc, 0x40 },
 	{ 0xfd, 0 },
@@ -3859,7 +3857,7 @@
 }
 #endif
 
-static struct dev_pm_ops wm8962_pm = {
+static const struct dev_pm_ops wm8962_pm = {
 	SET_RUNTIME_PM_OPS(wm8962_runtime_suspend, wm8962_runtime_resume, NULL)
 };
 
@@ -3878,7 +3876,6 @@
 static struct i2c_driver wm8962_i2c_driver = {
 	.driver = {
 		.name = "wm8962",
-		.owner = THIS_MODULE,
 		.of_match_table = wm8962_of_match,
 		.pm = &wm8962_pm,
 	},
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index b51184c..2cdde32 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -710,7 +710,6 @@
 static struct i2c_driver wm8971_i2c_driver = {
 	.driver = {
 		.name = "wm8971",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8971_i2c_probe,
 	.remove =   wm8971_i2c_remove,
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index 33b16a7..0a60677 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -634,7 +634,6 @@
 static struct i2c_driver wm8974_i2c_driver = {
 	.driver = {
 		.name = "wm8974",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8974_i2c_probe,
 	.remove =   wm8974_i2c_remove,
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index cfc8cdf..d36d600 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -1072,7 +1072,6 @@
 static struct i2c_driver wm8978_i2c_driver = {
 	.driver = {
 		.name = "wm8978",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8978_i2c_probe,
 	.remove =   wm8978_i2c_remove,
diff --git a/sound/soc/codecs/wm8983.c b/sound/soc/codecs/wm8983.c
index 2fdd2c6..f3193fb 100644
--- a/sound/soc/codecs/wm8983.c
+++ b/sound/soc/codecs/wm8983.c
@@ -84,66 +84,6 @@
 	{ 0x3D, 0x0000 },      /* R61 - BIAS CTRL */
 };
 
-static const struct wm8983_reg_access {
-	u16 read; /* Mask of readable bits */
-	u16 write; /* Mask of writable bits */
-} wm8983_access_masks[WM8983_MAX_REGISTER + 1] = {
-	[0x00] = { 0x0000, 0x01FF }, /* R0  - Software Reset */
-	[0x01] = { 0x0000, 0x01FF }, /* R1  - Power management 1 */
-	[0x02] = { 0x0000, 0x01FF }, /* R2  - Power management 2 */
-	[0x03] = { 0x0000, 0x01EF }, /* R3  - Power management 3 */
-	[0x04] = { 0x0000, 0x01FF }, /* R4  - Audio Interface */
-	[0x05] = { 0x0000, 0x003F }, /* R5  - Companding control */
-	[0x06] = { 0x0000, 0x01FD }, /* R6  - Clock Gen control */
-	[0x07] = { 0x0000, 0x000F }, /* R7  - Additional control */
-	[0x08] = { 0x0000, 0x003F }, /* R8  - GPIO Control */
-	[0x09] = { 0x0000, 0x0070 }, /* R9  - Jack Detect Control 1 */
-	[0x0A] = { 0x0000, 0x004F }, /* R10 - DAC Control */
-	[0x0B] = { 0x0000, 0x01FF }, /* R11 - Left DAC digital Vol */
-	[0x0C] = { 0x0000, 0x01FF }, /* R12 - Right DAC digital vol */
-	[0x0D] = { 0x0000, 0x00FF }, /* R13 - Jack Detect Control 2 */
-	[0x0E] = { 0x0000, 0x01FB }, /* R14 - ADC Control */
-	[0x0F] = { 0x0000, 0x01FF }, /* R15 - Left ADC Digital Vol */
-	[0x10] = { 0x0000, 0x01FF }, /* R16 - Right ADC Digital Vol */
-	[0x12] = { 0x0000, 0x017F }, /* R18 - EQ1 - low shelf */
-	[0x13] = { 0x0000, 0x017F }, /* R19 - EQ2 - peak 1 */
-	[0x14] = { 0x0000, 0x017F }, /* R20 - EQ3 - peak 2 */
-	[0x15] = { 0x0000, 0x017F }, /* R21 - EQ4 - peak 3 */
-	[0x16] = { 0x0000, 0x007F }, /* R22 - EQ5 - high shelf */
-	[0x18] = { 0x0000, 0x01FF }, /* R24 - DAC Limiter 1 */
-	[0x19] = { 0x0000, 0x007F }, /* R25 - DAC Limiter 2 */
-	[0x1B] = { 0x0000, 0x01FF }, /* R27 - Notch Filter 1 */
-	[0x1C] = { 0x0000, 0x017F }, /* R28 - Notch Filter 2 */
-	[0x1D] = { 0x0000, 0x017F }, /* R29 - Notch Filter 3 */
-	[0x1E] = { 0x0000, 0x017F }, /* R30 - Notch Filter 4 */
-	[0x20] = { 0x0000, 0x01BF }, /* R32 - ALC control 1 */
-	[0x21] = { 0x0000, 0x00FF }, /* R33 - ALC control 2 */
-	[0x22] = { 0x0000, 0x01FF }, /* R34 - ALC control 3 */
-	[0x23] = { 0x0000, 0x000F }, /* R35 - Noise Gate */
-	[0x24] = { 0x0000, 0x001F }, /* R36 - PLL N */
-	[0x25] = { 0x0000, 0x003F }, /* R37 - PLL K 1 */
-	[0x26] = { 0x0000, 0x01FF }, /* R38 - PLL K 2 */
-	[0x27] = { 0x0000, 0x01FF }, /* R39 - PLL K 3 */
-	[0x29] = { 0x0000, 0x000F }, /* R41 - 3D control */
-	[0x2A] = { 0x0000, 0x01E7 }, /* R42 - OUT4 to ADC */
-	[0x2B] = { 0x0000, 0x01BF }, /* R43 - Beep control */
-	[0x2C] = { 0x0000, 0x0177 }, /* R44 - Input ctrl */
-	[0x2D] = { 0x0000, 0x01FF }, /* R45 - Left INP PGA gain ctrl */
-	[0x2E] = { 0x0000, 0x01FF }, /* R46 - Right INP PGA gain ctrl */
-	[0x2F] = { 0x0000, 0x0177 }, /* R47 - Left ADC BOOST ctrl */
-	[0x30] = { 0x0000, 0x0177 }, /* R48 - Right ADC BOOST ctrl */
-	[0x31] = { 0x0000, 0x007F }, /* R49 - Output ctrl */
-	[0x32] = { 0x0000, 0x01FF }, /* R50 - Left mixer ctrl */
-	[0x33] = { 0x0000, 0x01FF }, /* R51 - Right mixer ctrl */
-	[0x34] = { 0x0000, 0x01FF }, /* R52 - LOUT1 (HP) volume ctrl */
-	[0x35] = { 0x0000, 0x01FF }, /* R53 - ROUT1 (HP) volume ctrl */
-	[0x36] = { 0x0000, 0x01FF }, /* R54 - LOUT2 (SPK) volume ctrl */
-	[0x37] = { 0x0000, 0x01FF }, /* R55 - ROUT2 (SPK) volume ctrl */
-	[0x38] = { 0x0000, 0x004F }, /* R56 - OUT3 mixer ctrl */
-	[0x39] = { 0x0000, 0x00FF }, /* R57 - OUT4 (MONO) mix ctrl */
-	[0x3D] = { 0x0000, 0x0100 }  /* R61 - BIAS CTRL */
-};
-
 /* vol/gain update regs */
 static const int vol_update_regs[] = {
 	WM8983_LEFT_DAC_DIGITAL_VOL,
@@ -605,12 +545,19 @@
 	return 0;
 }
 
-static bool wm8983_readable(struct device *dev, unsigned int reg)
+static bool wm8983_writeable(struct device *dev, unsigned int reg)
 {
-	if (reg > WM8983_MAX_REGISTER)
-		return 0;
-
-	return wm8983_access_masks[reg].read != 0;
+	switch (reg) {
+	case WM8983_SOFTWARE_RESET ... WM8983_RIGHT_ADC_DIGITAL_VOL:
+	case WM8983_EQ1_LOW_SHELF ... WM8983_DAC_LIMITER_2:
+	case WM8983_NOTCH_FILTER_1 ... WM8983_NOTCH_FILTER_4:
+	case WM8983_ALC_CONTROL_1 ... WM8983_PLL_K_3:
+	case WM8983_3D_CONTROL ... WM8983_OUT4_MONO_MIX_CTRL:
+	case WM8983_BIAS_CTRL:
+		return true;
+	default:
+		return false;
+	}
 }
 
 static int wm8983_dac_mute(struct snd_soc_dai *dai, int mute)
@@ -1048,8 +995,9 @@
 	.reg_defaults = wm8983_defaults,
 	.num_reg_defaults = ARRAY_SIZE(wm8983_defaults),
 	.cache_type = REGCACHE_RBTREE,
+	.max_register = WM8983_MAX_REGISTER,
 
-	.readable_reg = wm8983_readable,
+	.writeable_reg = wm8983_writeable,
 };
 
 #if defined(CONFIG_SPI_MASTER)
@@ -1133,7 +1081,6 @@
 static struct i2c_driver wm8983_i2c_driver = {
 	.driver = {
 		.name = "wm8983",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm8983_i2c_probe,
 	.remove = wm8983_i2c_remove,
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index 8a85f50..9c3c151 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -1144,7 +1144,6 @@
 static struct i2c_driver wm8985_i2c_driver = {
 	.driver = {
 		.name = "wm8985",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm8985_i2c_probe,
 	.remove = wm8985_i2c_remove,
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index f13a995..c88ce99 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -919,7 +919,6 @@
 static struct i2c_driver wm8988_i2c_driver = {
 	.driver = {
 		.name = "wm8988",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8988_i2c_probe,
 	.remove =   wm8988_i2c_remove,
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 1993fd2..23ecd30 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -418,10 +418,7 @@
 }
 
 /* INMIX dB values */
-static const unsigned int in_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_SCALE_ITEM(-1200, 600, 0),
-};
+static const DECLARE_TLV_DB_SCALE(in_mix_tlv, -1200, 600, 0);
 
 /* Left In PGA Connections */
 static const struct snd_kcontrol_new wm8990_dapm_lin12_pga_controls[] = {
@@ -1356,7 +1353,6 @@
 static struct i2c_driver wm8990_i2c_driver = {
 	.driver = {
 		.name = "wm8990",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8990_i2c_probe,
 	.remove =   wm8990_i2c_remove,
diff --git a/sound/soc/codecs/wm8991.c b/sound/soc/codecs/wm8991.c
index 44a6777..c9ee0ac 100644
--- a/sound/soc/codecs/wm8991.c
+++ b/sound/soc/codecs/wm8991.c
@@ -111,45 +111,14 @@
 	}
 }
 
-static const unsigned int rec_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_LINEAR_ITEM(-1500, 600),
-};
-
-static const unsigned int in_pga_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 0x1F, TLV_DB_LINEAR_ITEM(-1650, 3000),
-};
-
-static const unsigned int out_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_LINEAR_ITEM(0, -2100),
-};
-
-static const unsigned int out_pga_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 127, TLV_DB_LINEAR_ITEM(-7300, 600),
-};
-
-static const unsigned int out_omix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_LINEAR_ITEM(-600, 0),
-};
-
-static const unsigned int out_dac_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 255, TLV_DB_LINEAR_ITEM(-7163, 0),
-};
-
-static const unsigned int in_adc_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 255, TLV_DB_LINEAR_ITEM(-7163, 1763),
-};
-
-static const unsigned int out_sidetone_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 31, TLV_DB_LINEAR_ITEM(-3600, 0),
-};
+static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600);
+static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000);
+static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, 0, -2100);
+static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600);
+static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0);
+static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0);
+static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763);
+static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0);
 
 static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
 				      struct snd_ctl_elem_value *ucontrol)
@@ -429,10 +398,7 @@
 }
 
 /* INMIX dB values */
-static const unsigned int in_mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(1),
-	0, 7, TLV_DB_LINEAR_ITEM(-1200, 600),
-};
+static const DECLARE_TLV_DB_LINEAR(in_mix_tlv, -1200, 600);
 
 /* Left In PGA Connections */
 static const struct snd_kcontrol_new wm8991_dapm_lin12_pga_controls[] = {
@@ -1363,7 +1329,6 @@
 static struct i2c_driver wm8991_i2c_driver = {
 	.driver = {
 		.name = "wm8991",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm8991_i2c_probe,
 	.remove = wm8991_i2c_remove,
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 8a8db86..8668c4c 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -41,7 +41,7 @@
 	"SPKVDD",
 };
 
-static struct reg_default wm8993_reg_defaults[] = {
+static const struct reg_default wm8993_reg_defaults[] = {
 	{ 1,   0x0000 },     /* R1   - Power Management (1) */
 	{ 2,   0x6000 },     /* R2   - Power Management (2) */
 	{ 3,   0x0000 },     /* R3   - Power Management (3) */
@@ -628,11 +628,10 @@
 static const DECLARE_TLV_DB_SCALE(drc_comp_threash, -4500, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_comp_amp, -2250, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
-static const unsigned int drc_max_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(drc_max_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
-	3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(drc_qr_tlv, 1200, 600, 0);
 static const DECLARE_TLV_DB_SCALE(drc_startup_tlv, -1800, 300, 0);
 static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
@@ -1595,7 +1594,7 @@
 #endif
 
 /* Tune DC servo configuration */
-static struct reg_default wm8993_regmap_patch[] = {
+static const struct reg_sequence wm8993_regmap_patch[] = {
 	{ 0x44, 3 },
 	{ 0x56, 3 },
 	{ 0x44, 0 },
@@ -1742,7 +1741,6 @@
 static struct i2c_driver wm8993_i2c_driver = {
 	.driver = {
 		.name = "wm8993",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8993_i2c_probe,
 	.remove =   wm8993_i2c_remove,
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 962e1d3..2ccbb32 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -1942,14 +1942,16 @@
 	{ "AIF2ADCDAT", NULL, "AIF2ADC Mux" },
 
 	/* AIF3 output */
-	{ "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC1L" },
-	{ "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC1R" },
-	{ "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC2L" },
-	{ "AIF3ADCDAT", "AIF1ADCDAT", "AIF1ADC2R" },
-	{ "AIF3ADCDAT", "AIF2ADCDAT", "AIF2ADCL" },
-	{ "AIF3ADCDAT", "AIF2ADCDAT", "AIF2ADCR" },
-	{ "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACL" },
-	{ "AIF3ADCDAT", "AIF2DACDAT", "AIF2DACR" },
+	{ "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC1L" },
+	{ "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC1R" },
+	{ "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC2L" },
+	{ "AIF3ADC Mux", "AIF1ADCDAT", "AIF1ADC2R" },
+	{ "AIF3ADC Mux", "AIF2ADCDAT", "AIF2ADCL" },
+	{ "AIF3ADC Mux", "AIF2ADCDAT", "AIF2ADCR" },
+	{ "AIF3ADC Mux", "AIF2DACDAT", "AIF2DACL" },
+	{ "AIF3ADC Mux", "AIF2DACDAT", "AIF2DACR" },
+
+	{ "AIF3ADCDAT", NULL, "AIF3ADC Mux" },
 
 	/* Loopback */
 	{ "AIF1 Loopback", "ADCDAT", "AIF1ADCDAT" },
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index 505b65f..eda52a9 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -2298,7 +2298,6 @@
 static struct i2c_driver wm8995_i2c_driver = {
 	.driver = {
 		.name = "wm8995",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm8995_i2c_probe,
 	.remove = wm8995_i2c_remove,
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 3dd063f..f7ccd9f 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -117,7 +117,7 @@
 WM8996_REGULATOR_EVENT(1)
 WM8996_REGULATOR_EVENT(2)
 
-static struct reg_default wm8996_reg[] = {
+static const struct reg_default wm8996_reg[] = {
 	{ WM8996_POWER_MANAGEMENT_1, 0x0 },
 	{ WM8996_POWER_MANAGEMENT_2, 0x0 },
 	{ WM8996_POWER_MANAGEMENT_3, 0x0 },
@@ -1780,7 +1780,7 @@
 	wm8996->rx_rate[dai->id] = params_rate(params);
 
 	/* Needs looking at for TDM */
-	bits = snd_pcm_format_width(params_format(params));
+	bits = params_width(params);
 	if (bits < 0)
 		return bits;
 	aifdata |= (bits << WM8996_AIF1TX_WL_SHIFT) | bits;
@@ -2647,12 +2647,10 @@
 		if (irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
 			ret = request_threaded_irq(i2c->irq, NULL,
 						   wm8996_edge_irq,
-						   irq_flags | IRQF_ONESHOT,
-						   "wm8996", codec);
+						   irq_flags, "wm8996", codec);
 		else
 			ret = request_threaded_irq(i2c->irq, NULL, wm8996_irq,
-						   irq_flags | IRQF_ONESHOT,
-						   "wm8996", codec);
+						   irq_flags, "wm8996", codec);
 
 		if (ret == 0) {
 			/* Unmask the interrupt */
@@ -3100,7 +3098,6 @@
 static struct i2c_driver wm8996_i2c_driver = {
 	.driver = {
 		.name = "wm8996",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm8996_i2c_probe,
 	.remove =   wm8996_i2c_remove,
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 4134dc7..b4dba3a 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -174,8 +174,7 @@
 ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
 ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
 
-SND_SOC_BYTES("EQ1 Coefficients", ARIZONA_EQ1_3, 19),
-SOC_SINGLE("EQ1 Mode Switch", ARIZONA_EQ1_2, ARIZONA_EQ1_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ1 Coefficients", ARIZONA_EQ1_2),
 SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
@@ -187,8 +186,7 @@
 SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ2 Coefficients", ARIZONA_EQ2_3, 19),
-SOC_SINGLE("EQ2 Mode Switch", ARIZONA_EQ2_2, ARIZONA_EQ2_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ2 Coefficients", ARIZONA_EQ2_2),
 SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
@@ -200,8 +198,7 @@
 SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ3 Coefficients", ARIZONA_EQ3_3, 19),
-SOC_SINGLE("EQ3 Mode Switch", ARIZONA_EQ3_2, ARIZONA_EQ3_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ3 Coefficients", ARIZONA_EQ3_2),
 SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
@@ -213,8 +210,7 @@
 SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 
-SND_SOC_BYTES("EQ4 Coefficients", ARIZONA_EQ4_3, 19),
-SOC_SINGLE("EQ4 Mode Switch", ARIZONA_EQ4_2, ARIZONA_EQ4_B1_MODE, 1, 0),
+ARIZONA_EQ_CONTROL("EQ4 Coefficients", ARIZONA_EQ4_2),
 SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
 	       24, 0, eq_tlv),
 SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
@@ -242,10 +238,10 @@
 SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode),
 SOC_ENUM("LHPF4 Mode", arizona_lhpf4_mode),
 
-SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1),
-SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1),
-SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1),
-SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1),
+ARIZONA_LHPF_CONTROL("LHPF1 Coefficients", ARIZONA_HPLPF1_2),
+ARIZONA_LHPF_CONTROL("LHPF2 Coefficients", ARIZONA_HPLPF2_2),
+ARIZONA_LHPF_CONTROL("LHPF3 Coefficients", ARIZONA_HPLPF3_2),
+ARIZONA_LHPF_CONTROL("LHPF4 Coefficients", ARIZONA_HPLPF4_2),
 
 SOC_ENUM("ISRC1 FSL", arizona_isrc_fsl[0]),
 SOC_ENUM("ISRC2 FSL", arizona_isrc_fsl[1]),
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 8a8b1c0..ccb3b15 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -30,7 +30,7 @@
 #include <sound/wm9081.h>
 #include "wm9081.h"
 
-static struct reg_default wm9081_reg[] = {
+static const struct reg_default wm9081_reg[] = {
 	{  2, 0x00B9 },     /* R2  - Analogue Lineout */
 	{  3, 0x00B9 },     /* R3  - Analogue Speaker PGA */
 	{  4, 0x0001 },     /* R4  - VMID Control */
@@ -243,13 +243,12 @@
 static const DECLARE_TLV_DB_SCALE(drc_in_tlv, -4500, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_out_tlv, -2250, 75, 0);
 static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0);
-static unsigned int drc_max_tlv[] = {
-	TLV_DB_RANGE_HEAD(4),
+static const DECLARE_TLV_DB_RANGE(drc_max_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(1200, 0, 0),
 	1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0),
 	2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0),
-	3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(drc_qr_tlv, 1200, 600, 0);
 static const DECLARE_TLV_DB_SCALE(drc_startup_tlv, -300, 50, 0);
 
@@ -1378,7 +1377,6 @@
 static struct i2c_driver wm9081_i2c_driver = {
 	.driver = {
 		.name = "wm9081",
-		.owner = THIS_MODULE,
 	},
 	.probe =    wm9081_i2c_probe,
 	.remove =   wm9081_i2c_remove,
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 13d23fc..5d73729 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -162,23 +162,20 @@
 		dev_err(codec->dev, "Timed out waiting for DC Servo\n");
 }
 
-static const unsigned int in_tlv[] = {
-	TLV_DB_RANGE_HEAD(3),
+static const DECLARE_TLV_DB_RANGE(in_tlv,
 	0, 0, TLV_DB_SCALE_ITEM(-600, 0, 0),
 	1, 3, TLV_DB_SCALE_ITEM(-350, 350, 0),
-	4, 6, TLV_DB_SCALE_ITEM(600, 600, 0),
-};
-static const unsigned int mix_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+	4, 6, TLV_DB_SCALE_ITEM(600, 600, 0)
+);
+static const DECLARE_TLV_DB_RANGE(mix_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(-1200, 300, 0),
-	3, 3, TLV_DB_SCALE_ITEM(0, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(0, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
-static const unsigned int spkboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(spkboost_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
-	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
-};
+	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0)
+);
 
 static const struct snd_kcontrol_new wm9090_controls[] = {
 SOC_SINGLE_TLV("IN1A Volume", WM9090_IN1_LINE_INPUT_A_VOLUME, 0, 6, 0,
@@ -636,7 +633,6 @@
 static struct i2c_driver wm9090_i2c_driver = {
 	.driver = {
 		.name = "wm9090",
-		.owner = THIS_MODULE,
 	},
 	.probe = wm9090_i2c_probe,
 	.remove = wm9090_i2c_remove,
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index 5cc457e..744842c 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -22,6 +22,9 @@
 
 #include "wm9705.h"
 
+#define WM9705_VENDOR_ID 0x574d4c05
+#define WM9705_VENDOR_ID_MASK 0xffffffff
+
 /*
  * WM9705 register cache
  */
@@ -293,21 +296,6 @@
 	}
 };
 
-static int wm9705_reset(struct snd_soc_codec *codec)
-{
-	struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
-
-	if (soc_ac97_ops->reset) {
-		soc_ac97_ops->reset(ac97);
-		if (ac97_read(codec, 0) == wm9705_reg[0])
-			return 0; /* Success */
-	}
-
-	dev_err(codec->dev, "Failed to reset: AC97 link error\n");
-
-	return -EIO;
-}
-
 #ifdef CONFIG_PM
 static int wm9705_soc_suspend(struct snd_soc_codec *codec)
 {
@@ -324,7 +312,8 @@
 	int i, ret;
 	u16 *cache = codec->reg_cache;
 
-	ret = wm9705_reset(codec);
+	ret = snd_ac97_reset(ac97, true, WM9705_VENDOR_ID,
+		WM9705_VENDOR_ID_MASK);
 	if (ret < 0)
 		return ret;
 
@@ -342,30 +331,17 @@
 static int wm9705_soc_probe(struct snd_soc_codec *codec)
 {
 	struct snd_ac97 *ac97;
-	int ret = 0;
 
-	ac97 = snd_soc_alloc_ac97_codec(codec);
+	ac97 = snd_soc_new_ac97_codec(codec, WM9705_VENDOR_ID,
+		WM9705_VENDOR_ID_MASK);
 	if (IS_ERR(ac97)) {
-		ret = PTR_ERR(ac97);
 		dev_err(codec->dev, "Failed to register AC97 codec\n");
-		return ret;
+		return PTR_ERR(ac97);
 	}
 
-	ret = wm9705_reset(codec);
-	if (ret)
-		goto err_put_device;
-
-	ret = device_add(&ac97->dev);
-	if (ret)
-		goto err_put_device;
-
 	snd_soc_codec_set_drvdata(codec, ac97);
 
 	return 0;
-
-err_put_device:
-	put_device(&ac97->dev);
-	return ret;
 }
 
 static int wm9705_soc_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 1fda104..488a922 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -23,6 +23,9 @@
 #include <sound/tlv.h>
 #include "wm9712.h"
 
+#define WM9712_VENDOR_ID 0x574d4c12
+#define WM9712_VENDOR_ID_MASK 0xffffffff
+
 struct wm9712_priv {
 	struct snd_ac97 *ac97;
 	unsigned int hp_mixer[2];
@@ -613,35 +616,14 @@
 	return 0;
 }
 
-static int wm9712_reset(struct snd_soc_codec *codec, int try_warm)
-{
-	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
-
-	if (try_warm && soc_ac97_ops->warm_reset) {
-		soc_ac97_ops->warm_reset(wm9712->ac97);
-		if (ac97_read(codec, 0) == wm9712_reg[0])
-			return 1;
-	}
-
-	soc_ac97_ops->reset(wm9712->ac97);
-	if (soc_ac97_ops->warm_reset)
-		soc_ac97_ops->warm_reset(wm9712->ac97);
-	if (ac97_read(codec, 0) != wm9712_reg[0])
-		goto err;
-	return 0;
-
-err:
-	dev_err(codec->dev, "Failed to reset: AC97 link error\n");
-	return -EIO;
-}
-
 static int wm9712_soc_resume(struct snd_soc_codec *codec)
 {
 	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
 	int i, ret;
 	u16 *cache = codec->reg_cache;
 
-	ret = wm9712_reset(codec, 1);
+	ret = snd_ac97_reset(wm9712->ac97, true, WM9712_VENDOR_ID,
+		WM9712_VENDOR_ID_MASK);
 	if (ret < 0)
 		return ret;
 
@@ -663,31 +645,20 @@
 static int wm9712_soc_probe(struct snd_soc_codec *codec)
 {
 	struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
+	int ret;
 
-	wm9712->ac97 = snd_soc_alloc_ac97_codec(codec);
+	wm9712->ac97 = snd_soc_new_ac97_codec(codec, WM9712_VENDOR_ID,
+		WM9712_VENDOR_ID_MASK);
 	if (IS_ERR(wm9712->ac97)) {
 		ret = PTR_ERR(wm9712->ac97);
 		dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
 		return ret;
 	}
 
-	ret = wm9712_reset(codec, 0);
-	if (ret < 0)
-		goto err_put_device;
-
-	ret = device_add(&wm9712->ac97->dev);
-	if (ret)
-		goto err_put_device;
-
 	/* set alc mux to none */
 	ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000);
 
 	return 0;
-
-err_put_device:
-	put_device(&wm9712->ac97->dev);
-	return ret;
 }
 
 static int wm9712_soc_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 89cd2d6..4083a51 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -29,6 +29,9 @@
 
 #include "wm9713.h"
 
+#define WM9713_VENDOR_ID 0x574d4c13
+#define WM9713_VENDOR_ID_MASK 0xffffffff
+
 struct wm9713_priv {
 	struct snd_ac97 *ac97;
 	u32 pll_in; /* PLL input frequency */
@@ -116,11 +119,10 @@
 static const DECLARE_TLV_DB_SCALE(out_tlv, -4650, 150, 0);
 static const DECLARE_TLV_DB_SCALE(main_tlv, -3450, 150, 0);
 static const DECLARE_TLV_DB_SCALE(misc_tlv, -1500, 300, 0);
-static unsigned int mic_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const  DECLARE_TLV_DB_RANGE(mic_tlv,
 	0, 2, TLV_DB_SCALE_ITEM(1200, 600, 0),
-	3, 3, TLV_DB_SCALE_ITEM(3000, 0, 0),
-};
+	3, 3, TLV_DB_SCALE_ITEM(3000, 0, 0)
+);
 
 static const struct snd_kcontrol_new wm9713_snd_ac97_controls[] = {
 SOC_DOUBLE_TLV("Speaker Playback Volume", AC97_MASTER, 8, 0, 31, 1, out_tlv),
@@ -1123,28 +1125,6 @@
 	},
 };
 
-int wm9713_reset(struct snd_soc_codec *codec, int try_warm)
-{
-	struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
-
-	if (try_warm && soc_ac97_ops->warm_reset) {
-		soc_ac97_ops->warm_reset(wm9713->ac97);
-		if (ac97_read(codec, 0) == wm9713_reg[0])
-			return 1;
-	}
-
-	soc_ac97_ops->reset(wm9713->ac97);
-	if (soc_ac97_ops->warm_reset)
-		soc_ac97_ops->warm_reset(wm9713->ac97);
-	if (ac97_read(codec, 0) != wm9713_reg[0]) {
-		dev_err(codec->dev, "Failed to reset: AC97 link error\n");
-		return -EIO;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(wm9713_reset);
-
 static int wm9713_set_bias_level(struct snd_soc_codec *codec,
 				 enum snd_soc_bias_level level)
 {
@@ -1196,7 +1176,8 @@
 	int i, ret;
 	u16 *cache = codec->reg_cache;
 
-	ret = wm9713_reset(codec, 1);
+	ret = snd_ac97_reset(wm9713->ac97, true, WM9713_VENDOR_ID,
+		WM9713_VENDOR_ID_MASK);
 	if (ret < 0)
 		return ret;
 
@@ -1222,32 +1203,18 @@
 static int wm9713_soc_probe(struct snd_soc_codec *codec)
 {
 	struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
-	int ret = 0, reg;
+	int reg;
 
-	wm9713->ac97 = snd_soc_alloc_ac97_codec(codec);
+	wm9713->ac97 = snd_soc_new_ac97_codec(codec, WM9713_VENDOR_ID,
+		WM9713_VENDOR_ID_MASK);
 	if (IS_ERR(wm9713->ac97))
 		return PTR_ERR(wm9713->ac97);
 
-	/* do a cold reset for the controller and then try
-	 * a warm reset followed by an optional cold reset for codec */
-	wm9713_reset(codec, 0);
-	ret = wm9713_reset(codec, 1);
-	if (ret < 0)
-		goto err_put_device;
-
-	ret = device_add(&wm9713->ac97->dev);
-	if (ret)
-		goto err_put_device;
-
 	/* unmute the adc - move to kcontrol */
 	reg = ac97_read(codec, AC97_CD) & 0x7fff;
 	ac97_write(codec, AC97_CD, reg);
 
 	return 0;
-
-err_put_device:
-	put_device(&wm9713->ac97->dev);
-	return ret;
 }
 
 static int wm9713_soc_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm9713.h b/sound/soc/codecs/wm9713.h
index 793da86..53df11b 100644
--- a/sound/soc/codecs/wm9713.h
+++ b/sound/soc/codecs/wm9713.h
@@ -45,6 +45,4 @@
 #define WM9713_DAI_AC97_AUX		1
 #define WM9713_DAI_PCM_VOICE	2
 
-int wm9713_reset(struct snd_soc_codec *codec,  int try_warm);
-
 #endif
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index fd86bd1..624b3b9 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -38,11 +38,10 @@
 static const DECLARE_TLV_DB_SCALE(outmix_tlv, -2100, 300, 0);
 static const DECLARE_TLV_DB_SCALE(spkmixout_tlv, -1800, 600, 1);
 static const DECLARE_TLV_DB_SCALE(outpga_tlv, -5700, 100, 0);
-static const unsigned int spkboost_tlv[] = {
-	TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(spkboost_tlv,
 	0, 6, TLV_DB_SCALE_ITEM(0, 150, 0),
-	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0),
-};
+	7, 7, TLV_DB_SCALE_ITEM(1200, 0, 0)
+);
 static const DECLARE_TLV_DB_SCALE(line_tlv, -600, 600, 0);
 
 static const char *speaker_ref_text[] = {
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
index 56cb4d9..ec98548 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/davinci/davinci-i2s.c
@@ -651,23 +651,15 @@
 static int davinci_i2s_probe(struct platform_device *pdev)
 {
 	struct davinci_mcbsp_dev *dev;
-	struct resource *mem, *ioarea, *res;
+	struct resource *mem, *res;
+	void __iomem *io_base;
 	int *dma;
 	int ret;
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "no mem resource?\n");
-		return -ENODEV;
-	}
-
-	ioarea = devm_request_mem_region(&pdev->dev, mem->start,
-					 resource_size(mem),
-					 pdev->name);
-	if (!ioarea) {
-		dev_err(&pdev->dev, "McBSP region already claimed\n");
-		return -EBUSY;
-	}
+	io_base = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(io_base))
+		return PTR_ERR(io_base);
 
 	dev = devm_kzalloc(&pdev->dev, sizeof(struct davinci_mcbsp_dev),
 			   GFP_KERNEL);
@@ -679,12 +671,7 @@
 		return -ENODEV;
 	clk_enable(dev->clk);
 
-	dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!dev->base) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
-		goto err_release_clk;
-	}
+	dev->base = io_base;
 
 	dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].addr =
 	    (dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG);
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index b960e62..add6bb9 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1613,7 +1613,7 @@
 static int davinci_mcasp_probe(struct platform_device *pdev)
 {
 	struct snd_dmaengine_dai_dma_data *dma_data;
-	struct resource *mem, *ioarea, *res, *dat;
+	struct resource *mem, *res, *dat;
 	struct davinci_mcasp_pdata *pdata;
 	struct davinci_mcasp *mcasp;
 	char *irq_name;
@@ -1648,22 +1648,12 @@
 		}
 	}
 
-	ioarea = devm_request_mem_region(&pdev->dev, mem->start,
-			resource_size(mem), pdev->name);
-	if (!ioarea) {
-		dev_err(&pdev->dev, "Audio region already claimed\n");
-		return -EBUSY;
-	}
+	mcasp->base = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(mcasp->base))
+		return PTR_ERR(mcasp->base);
 
 	pm_runtime_enable(&pdev->dev);
 
-	mcasp->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!mcasp->base) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
-		goto err;
-	}
-
 	mcasp->op_mode = pdata->op_mode;
 	/* sanity check for tdm slots parameter */
 	if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE) {
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c
index fabd05f..c77d921 100644
--- a/sound/soc/davinci/davinci-vcif.c
+++ b/sound/soc/davinci/davinci-vcif.c
@@ -231,8 +231,9 @@
 
 	dev_set_drvdata(&pdev->dev, davinci_vcif_dev);
 
-	ret = snd_soc_register_component(&pdev->dev, &davinci_vcif_component,
-					 &davinci_vcif_dai, 1);
+	ret = devm_snd_soc_register_component(&pdev->dev,
+					      &davinci_vcif_component,
+					      &davinci_vcif_dai, 1);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "could not register dai\n");
 		return ret;
@@ -241,23 +242,14 @@
 	ret = edma_pcm_platform_register(&pdev->dev);
 	if (ret) {
 		dev_err(&pdev->dev, "register PCM failed: %d\n", ret);
-		snd_soc_unregister_component(&pdev->dev);
 		return ret;
 	}
 
 	return 0;
 }
 
-static int davinci_vcif_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_component(&pdev->dev);
-
-	return 0;
-}
-
 static struct platform_driver davinci_vcif_driver = {
 	.probe		= davinci_vcif_probe,
-	.remove		= davinci_vcif_remove,
 	.driver		= {
 		.name	= "davinci-vcif",
 	},
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index e1aa3834..883087f 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -182,7 +182,7 @@
 		);
 	} else {
 		if (np) {
-			/* The eukrea,asoc-tlv320 driver was explicitely
+			/* The eukrea,asoc-tlv320 driver was explicitly
 			 * requested (through the device tree).
 			 */
 			dev_err(&pdev->dev,
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index de43887..5aeb6ed 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -23,6 +23,7 @@
 
 #include "../codecs/sgtl5000.h"
 #include "../codecs/wm8962.h"
+#include "../codecs/wm8960.h"
 
 #define RX 0
 #define TX 1
@@ -407,6 +408,7 @@
 	struct fsl_asoc_card_priv *priv;
 	struct i2c_client *codec_dev;
 	struct clk *codec_clk;
+	const char *codec_dai_name;
 	u32 width;
 	int ret;
 
@@ -459,6 +461,7 @@
 
 	/* Diversify the card configurations */
 	if (of_device_is_compatible(np, "fsl,imx-audio-cs42888")) {
+		codec_dai_name = "cs42888";
 		priv->card.set_bias_level = NULL;
 		priv->cpu_priv.sysclk_freq[TX] = priv->codec_priv.mclk_freq;
 		priv->cpu_priv.sysclk_freq[RX] = priv->codec_priv.mclk_freq;
@@ -467,14 +470,22 @@
 		priv->cpu_priv.slot_width = 32;
 		priv->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
 	} else if (of_device_is_compatible(np, "fsl,imx-audio-sgtl5000")) {
+		codec_dai_name = "sgtl5000";
 		priv->codec_priv.mclk_id = SGTL5000_SYSCLK;
 		priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
 	} else if (of_device_is_compatible(np, "fsl,imx-audio-wm8962")) {
+		codec_dai_name = "wm8962";
 		priv->card.set_bias_level = fsl_asoc_card_set_bias_level;
 		priv->codec_priv.mclk_id = WM8962_SYSCLK_MCLK;
 		priv->codec_priv.fll_id = WM8962_SYSCLK_FLL;
 		priv->codec_priv.pll_id = WM8962_FLL;
 		priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
+	} else if (of_device_is_compatible(np, "fsl,imx-audio-wm8960")) {
+		codec_dai_name = "wm8960-hifi";
+		priv->card.set_bias_level = fsl_asoc_card_set_bias_level;
+		priv->codec_priv.fll_id = WM8960_SYSCLK_AUTO;
+		priv->codec_priv.pll_id = WM8960_SYSCLK_AUTO;
+		priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
 	} else {
 		dev_err(&pdev->dev, "unknown Device Tree compatible\n");
 		return -EINVAL;
@@ -521,7 +532,7 @@
 	/* Normal DAI Link */
 	priv->dai_link[0].cpu_of_node = cpu_np;
 	priv->dai_link[0].codec_of_node = codec_np;
-	priv->dai_link[0].codec_dai_name = codec_dev->name;
+	priv->dai_link[0].codec_dai_name = codec_dai_name;
 	priv->dai_link[0].platform_of_node = cpu_np;
 	priv->dai_link[0].dai_fmt = priv->dai_fmt;
 	priv->card.num_links = 1;
@@ -530,7 +541,7 @@
 		/* DPCM DAI Links only if ASRC exsits */
 		priv->dai_link[1].cpu_of_node = asrc_np;
 		priv->dai_link[1].platform_of_node = asrc_np;
-		priv->dai_link[2].codec_dai_name = codec_dev->name;
+		priv->dai_link[2].codec_dai_name = codec_dai_name;
 		priv->dai_link[2].codec_of_node = codec_np;
 		priv->dai_link[2].cpu_of_node = cpu_np;
 		priv->dai_link[2].dai_fmt = priv->dai_fmt;
@@ -578,6 +589,7 @@
 	{ .compatible = "fsl,imx-audio-cs42888", },
 	{ .compatible = "fsl,imx-audio-sgtl5000", },
 	{ .compatible = "fsl,imx-audio-wm8962", },
+	{ .compatible = "fsl,imx-audio-wm8960", },
 	{}
 };
 
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index c068494..9f087d4 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -931,14 +931,29 @@
 static int fsl_asrc_runtime_resume(struct device *dev)
 {
 	struct fsl_asrc *asrc_priv = dev_get_drvdata(dev);
-	int i;
+	int i, ret;
 
-	clk_prepare_enable(asrc_priv->mem_clk);
-	clk_prepare_enable(asrc_priv->ipg_clk);
-	for (i = 0; i < ASRC_CLK_MAX_NUM; i++)
-		clk_prepare_enable(asrc_priv->asrck_clk[i]);
+	ret = clk_prepare_enable(asrc_priv->mem_clk);
+	if (ret)
+		return ret;
+	ret = clk_prepare_enable(asrc_priv->ipg_clk);
+	if (ret)
+		goto disable_mem_clk;
+	for (i = 0; i < ASRC_CLK_MAX_NUM; i++) {
+		ret = clk_prepare_enable(asrc_priv->asrck_clk[i]);
+		if (ret)
+			goto disable_asrck_clk;
+	}
 
 	return 0;
+
+disable_asrck_clk:
+	for (i--; i >= 0; i--)
+		clk_disable_unprepare(asrc_priv->asrck_clk[i]);
+	clk_disable_unprepare(asrc_priv->ipg_clk);
+disable_mem_clk:
+	clk_disable_unprepare(asrc_priv->mem_clk);
+	return ret;
 }
 
 static int fsl_asrc_runtime_suspend(struct device *dev)
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 5c75971..837979e 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -517,7 +517,7 @@
 	u32 bclk, mask, val;
 	int ret;
 
-	/* Override slot_width if being specifially set */
+	/* Override slot_width if being specifically set */
 	if (esai_priv->slot_width)
 		slot_width = esai_priv->slot_width;
 
@@ -839,7 +839,7 @@
 		return ret;
 	}
 
-	ret = imx_pcm_dma_init(pdev);
+	ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
 	if (ret)
 		dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
 
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 5c73bea..a18fd92 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -791,7 +791,7 @@
 		return ret;
 
 	if (sai->sai_on_imx)
-		return imx_pcm_dma_init(pdev);
+		return imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
 	else
 		return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
 }
diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
index 0662809..b95fbc3 100644
--- a/sound/soc/fsl/fsl_sai.h
+++ b/sound/soc/fsl/fsl_sai.h
@@ -13,7 +13,8 @@
 
 #define FSL_SAI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
 			 SNDRV_PCM_FMTBIT_S20_3LE |\
-			 SNDRV_PCM_FMTBIT_S24_LE)
+			 SNDRV_PCM_FMTBIT_S24_LE |\
+			 SNDRV_PCM_FMTBIT_S32_LE)
 
 /* SAI Register Map Register */
 #define FSL_SAI_TCSR	0x00 /* SAI Transmit Control */
@@ -45,7 +46,7 @@
 #define FSL_SAI_xFR(tx)		(tx ? FSL_SAI_TFR : FSL_SAI_RFR)
 #define FSL_SAI_xMR(tx)		(tx ? FSL_SAI_TMR : FSL_SAI_RMR)
 
-/* SAI Transmit/Recieve Control Register */
+/* SAI Transmit/Receive Control Register */
 #define FSL_SAI_CSR_TERE	BIT(31)
 #define FSL_SAI_CSR_FR		BIT(25)
 #define FSL_SAI_CSR_SR		BIT(24)
@@ -67,10 +68,10 @@
 #define FSL_SAI_CSR_FRIE	BIT(8)
 #define FSL_SAI_CSR_FRDE	BIT(0)
 
-/* SAI Transmit and Recieve Configuration 1 Register */
+/* SAI Transmit and Receive Configuration 1 Register */
 #define FSL_SAI_CR1_RFW_MASK	0x1f
 
-/* SAI Transmit and Recieve Configuration 2 Register */
+/* SAI Transmit and Receive Configuration 2 Register */
 #define FSL_SAI_CR2_SYNC	BIT(30)
 #define FSL_SAI_CR2_MSEL_MASK	(0x3 << 26)
 #define FSL_SAI_CR2_MSEL_BUS	0
@@ -82,12 +83,12 @@
 #define FSL_SAI_CR2_BCD_MSTR	BIT(24)
 #define FSL_SAI_CR2_DIV_MASK	0xff
 
-/* SAI Transmit and Recieve Configuration 3 Register */
+/* SAI Transmit and Receive Configuration 3 Register */
 #define FSL_SAI_CR3_TRCE	BIT(16)
 #define FSL_SAI_CR3_WDFL(x)	(x)
 #define FSL_SAI_CR3_WDFL_MASK	0x1f
 
-/* SAI Transmit and Recieve Configuration 4 Register */
+/* SAI Transmit and Receive Configuration 4 Register */
 #define FSL_SAI_CR4_FRSZ(x)	(((x) - 1) << 16)
 #define FSL_SAI_CR4_FRSZ_MASK	(0x1f << 16)
 #define FSL_SAI_CR4_SYWD(x)	(((x) - 1) << 8)
@@ -97,7 +98,7 @@
 #define FSL_SAI_CR4_FSP		BIT(1)
 #define FSL_SAI_CR4_FSD_MSTR	BIT(0)
 
-/* SAI Transmit and Recieve Configuration 5 Register */
+/* SAI Transmit and Receive Configuration 5 Register */
 #define FSL_SAI_CR5_WNW(x)	(((x) - 1) << 24)
 #define FSL_SAI_CR5_WNW_MASK	(0x1f << 24)
 #define FSL_SAI_CR5_W0W(x)	(((x) - 1) << 16)
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 8e93221..ab729f2 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -454,7 +454,8 @@
 	struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
 	struct platform_device *pdev = spdif_priv->pdev;
 	struct regmap *regmap = spdif_priv->regmap;
-	u32 scr, mask, i;
+	u32 scr, mask;
+	int i;
 	int ret;
 
 	/* Reset module and interrupts only for first initialization */
@@ -482,13 +483,18 @@
 		mask = SCR_TXFIFO_AUTOSYNC_MASK | SCR_TXFIFO_CTRL_MASK |
 			SCR_TXSEL_MASK | SCR_USRC_SEL_MASK |
 			SCR_TXFIFO_FSEL_MASK;
-		for (i = 0; i < SPDIF_TXRATE_MAX; i++)
-			clk_prepare_enable(spdif_priv->txclk[i]);
+		for (i = 0; i < SPDIF_TXRATE_MAX; i++) {
+			ret = clk_prepare_enable(spdif_priv->txclk[i]);
+			if (ret)
+				goto disable_txclk;
+		}
 	} else {
 		scr = SCR_RXFIFO_FSEL_IF8 | SCR_RXFIFO_AUTOSYNC;
 		mask = SCR_RXFIFO_FSEL_MASK | SCR_RXFIFO_AUTOSYNC_MASK|
 			SCR_RXFIFO_CTL_MASK | SCR_RXFIFO_OFF_MASK;
-		clk_prepare_enable(spdif_priv->rxclk);
+		ret = clk_prepare_enable(spdif_priv->rxclk);
+		if (ret)
+			goto err;
 	}
 	regmap_update_bits(regmap, REG_SPDIF_SCR, mask, scr);
 
@@ -497,6 +503,9 @@
 
 	return 0;
 
+disable_txclk:
+	for (i--; i >= 0; i--)
+		clk_disable_unprepare(spdif_priv->txclk[i]);
 err:
 	clk_disable_unprepare(spdif_priv->coreclk);
 
@@ -707,7 +716,7 @@
 	return ret;
 }
 
-/* Q-subcode infomation. The byte size is SPDIF_UBITS_SIZE/8 */
+/* Q-subcode information. The byte size is SPDIF_UBITS_SIZE/8 */
 static int fsl_spdif_qinfo(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *uinfo)
 {
@@ -739,7 +748,7 @@
 	return ret;
 }
 
-/* Valid bit infomation */
+/* Valid bit information */
 static int fsl_spdif_vbit_info(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *uinfo)
 {
@@ -767,7 +776,7 @@
 	return 0;
 }
 
-/* DPLL lock infomation */
+/* DPLL lock information */
 static int fsl_spdif_rxrate_info(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *uinfo)
 {
@@ -1255,7 +1264,7 @@
 		return ret;
 	}
 
-	ret = imx_pcm_dma_init(pdev);
+	ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
 	if (ret)
 		dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
 
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index c0b940e..8ec6fb2 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -156,7 +156,7 @@
  *
  * @dbg_stats: Debugging statistics
  *
- * @soc: SoC specifc data
+ * @soc: SoC specific data
  */
 struct fsl_ssi_private {
 	struct regmap *regs;
@@ -900,14 +900,16 @@
 		scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
 		break;
 	default:
-		return -EINVAL;
+		if (!fsl_ssi_is_ac97(ssi_private))
+			return -EINVAL;
 	}
 
 	stcr |= strcr;
 	srcr |= strcr;
 
-	if (ssi_private->cpu_dai_drv.symmetric_rates) {
-		/* Need to clear RXDIR when using SYNC mode */
+	if (ssi_private->cpu_dai_drv.symmetric_rates
+			|| fsl_ssi_is_ac97(ssi_private)) {
+		/* Need to clear RXDIR when using SYNC or AC97 mode */
 		srcr &= ~CCSR_SSI_SRCR_RXDIR;
 		scr |= CCSR_SSI_SCR_SYN;
 	}
@@ -1101,6 +1103,7 @@
 
 static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
 	.bus_control = true,
+	.probe = fsl_ssi_dai_probe,
 	.playback = {
 		.stream_name = "AC97 Playback",
 		.channels_min = 2,
@@ -1127,10 +1130,17 @@
 	struct regmap *regs = fsl_ac97_data->regs;
 	unsigned int lreg;
 	unsigned int lval;
+	int ret;
 
 	if (reg > 0x7f)
 		return;
 
+	ret = clk_prepare_enable(fsl_ac97_data->clk);
+	if (ret) {
+		pr_err("ac97 write clk_prepare_enable failed: %d\n",
+			ret);
+		return;
+	}
 
 	lreg = reg <<  12;
 	regmap_write(regs, CCSR_SSI_SACADD, lreg);
@@ -1141,6 +1151,8 @@
 	regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
 			CCSR_SSI_SACNT_WR);
 	udelay(100);
+
+	clk_disable_unprepare(fsl_ac97_data->clk);
 }
 
 static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
@@ -1151,6 +1163,14 @@
 	unsigned short val = -1;
 	u32 reg_val;
 	unsigned int lreg;
+	int ret;
+
+	ret = clk_prepare_enable(fsl_ac97_data->clk);
+	if (ret) {
+		pr_err("ac97 read clk_prepare_enable failed: %d\n",
+			ret);
+		return -1;
+	}
 
 	lreg = (reg & 0x7f) <<  12;
 	regmap_write(regs, CCSR_SSI_SACADD, lreg);
@@ -1162,6 +1182,8 @@
 	regmap_read(regs, CCSR_SSI_SACDAT, &reg_val);
 	val = (reg_val >> 4) & 0xffff;
 
+	clk_disable_unprepare(fsl_ac97_data->clk);
+
 	return val;
 }
 
@@ -1210,7 +1232,7 @@
 		}
 	}
 
-	/* For those SLAVE implementations, we ingore non-baudclk cases
+	/* For those SLAVE implementations, we ignore non-baudclk cases
 	 * and, instead, abandon MASTER mode that needs baud clock.
 	 */
 	ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud");
@@ -1257,7 +1279,7 @@
 		if (ret)
 			goto error_pcm;
 	} else {
-		ret = imx_pcm_dma_init(pdev);
+		ret = imx_pcm_dma_init(pdev, IMX_SSI_DMABUF_SIZE);
 		if (ret)
 			goto error_pcm;
 	}
@@ -1320,7 +1342,11 @@
 
 		fsl_ac97_data = ssi_private;
 
-		snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
+		ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
+		if (ret) {
+			dev_err(&pdev->dev, "could not set AC'97 ops\n");
+			return ret;
+		}
 	} else {
 		/* Initialize this copy of the CPU DAI driver structure */
 		memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
@@ -1357,7 +1383,9 @@
 
 	/* Are the RX and the TX clocks locked? */
 	if (!of_find_property(np, "fsl,ssi-asynchronous", NULL)) {
-		ssi_private->cpu_dai_drv.symmetric_rates = 1;
+		if (!fsl_ssi_is_ac97(ssi_private))
+			ssi_private->cpu_dai_drv.symmetric_rates = 1;
+
 		ssi_private->cpu_dai_drv.symmetric_channels = 1;
 		ssi_private->cpu_dai_drv.symmetric_samplebits = 1;
 	}
@@ -1434,6 +1462,27 @@
 		_fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private,
 				     ssi_private->dai_fmt);
 
+	if (fsl_ssi_is_ac97(ssi_private)) {
+		u32 ssi_idx;
+
+		ret = of_property_read_u32(np, "cell-index", &ssi_idx);
+		if (ret) {
+			dev_err(&pdev->dev, "cannot get SSI index property\n");
+			goto error_sound_card;
+		}
+
+		ssi_private->pdev =
+			platform_device_register_data(NULL,
+					"ac97-codec", ssi_idx, NULL, 0);
+		if (IS_ERR(ssi_private->pdev)) {
+			ret = PTR_ERR(ssi_private->pdev);
+			dev_err(&pdev->dev,
+				"failed to register AC97 codec platform: %d\n",
+				ret);
+			goto error_sound_card;
+		}
+	}
+
 	return 0;
 
 error_sound_card:
@@ -1458,6 +1507,9 @@
 	if (ssi_private->soc->imx)
 		fsl_ssi_imx_clean(pdev, ssi_private);
 
+	if (fsl_ssi_is_ac97(ssi_private))
+		snd_soc_set_ac97_ops(NULL);
+
 	return 0;
 }
 
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index 0db94f49..1fc01ed 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -40,7 +40,7 @@
 		SNDRV_PCM_INFO_MMAP_VALID |
 		SNDRV_PCM_INFO_PAUSE |
 		SNDRV_PCM_INFO_RESUME,
-	.buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
+	.buffer_bytes_max = IMX_DEFAULT_DMABUF_SIZE,
 	.period_bytes_min = 128,
 	.period_bytes_max = 65535, /* Limited by SDMA engine */
 	.periods_min = 2,
@@ -52,13 +52,30 @@
 	.pcm_hardware = &imx_pcm_hardware,
 	.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
 	.compat_filter_fn = filter,
-	.prealloc_buffer_size = IMX_SSI_DMABUF_SIZE,
+	.prealloc_buffer_size = IMX_DEFAULT_DMABUF_SIZE,
 };
 
-int imx_pcm_dma_init(struct platform_device *pdev)
+int imx_pcm_dma_init(struct platform_device *pdev, size_t size)
 {
+	struct snd_dmaengine_pcm_config *config;
+	struct snd_pcm_hardware *pcm_hardware;
+
+	config = devm_kzalloc(&pdev->dev,
+			sizeof(struct snd_dmaengine_pcm_config), GFP_KERNEL);
+	*config = imx_dmaengine_pcm_config;
+	if (size)
+		config->prealloc_buffer_size = size;
+
+	pcm_hardware = devm_kzalloc(&pdev->dev,
+			sizeof(struct snd_pcm_hardware), GFP_KERNEL);
+	*pcm_hardware = imx_pcm_hardware;
+	if (size)
+		pcm_hardware->buffer_bytes_max = size;
+
+	config->pcm_hardware = pcm_hardware;
+
 	return devm_snd_dmaengine_pcm_register(&pdev->dev,
-		&imx_dmaengine_pcm_config,
+		config,
 		SND_DMAENGINE_PCM_FLAG_COMPAT);
 }
 EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
diff --git a/sound/soc/fsl/imx-pcm.h b/sound/soc/fsl/imx-pcm.h
index c79cb27..133c4470a 100644
--- a/sound/soc/fsl/imx-pcm.h
+++ b/sound/soc/fsl/imx-pcm.h
@@ -20,6 +20,11 @@
  */
 #define IMX_SSI_DMABUF_SIZE	(64 * 1024)
 
+#define IMX_DEFAULT_DMABUF_SIZE	(64 * 1024)
+#define IMX_SAI_DMABUF_SIZE	(64 * 1024)
+#define IMX_SPDIF_DMABUF_SIZE	(64 * 1024)
+#define IMX_ESAI_DMABUF_SIZE	(256 * 1024)
+
 static inline void
 imx_pcm_dma_params_init_data(struct imx_dma_data *dma_data,
 	int dma, enum sdma_peripheral_type peripheral_type)
@@ -39,9 +44,9 @@
 };
 
 #if IS_ENABLED(CONFIG_SND_SOC_IMX_PCM_DMA)
-int imx_pcm_dma_init(struct platform_device *pdev);
+int imx_pcm_dma_init(struct platform_device *pdev, size_t size);
 #else
-static inline int imx_pcm_dma_init(struct platform_device *pdev)
+static inline int imx_pcm_dma_init(struct platform_device *pdev, size_t size)
 {
 	return -ENODEV;
 }
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index 461ce27..48b2d24 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -603,7 +603,7 @@
 	ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
 
 	ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
-	ssi->dma_init = imx_pcm_dma_init(pdev);
+	ssi->dma_init = imx_pcm_dma_init(pdev, IMX_SSI_DMABUF_SIZE);
 
 	if (ssi->fiq_init && ssi->dma_init) {
 		ret = ssi->fiq_init;
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index d555493..3ff76d4 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -76,6 +76,7 @@
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
 	struct simple_dai_props *dai_props =
 		&priv->dai_props[rtd - rtd->card->rtd];
@@ -91,8 +92,16 @@
 		mclk = params_rate(params) * mclk_fs;
 		ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
 					     SND_SOC_CLOCK_IN);
+		if (ret && ret != -ENOTSUPP)
+			goto err;
+
+		ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
+					     SND_SOC_CLOCK_OUT);
+		if (ret && ret != -ENOTSUPP)
+			goto err;
 	}
 
+err:
 	return ret;
 }
 
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index f3060a4..05fde5e6e 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -26,14 +26,9 @@
 	depends on ACPI
 
 config SND_SOC_INTEL_SST
-	tristate "ASoC support for Intel(R) Smart Sound Technology"
+	tristate
 	select SND_SOC_INTEL_SST_ACPI if ACPI
 	depends on (X86 || COMPILE_TEST)
-	depends on DW_DMAC_CORE
-	help
-          This adds support for Intel(R) Smart Sound Technology (SST).
-          Say Y if you have such a device
-          If unsure select "N".
 
 config SND_SOC_INTEL_SST_ACPI
 	tristate
@@ -46,8 +41,9 @@
 
 config SND_SOC_INTEL_HASWELL_MACH
 	tristate "ASoC Audio DSP support for Intel Haswell Lynxpoint"
-	depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C && \
-		   I2C_DESIGNWARE_PLATFORM
+	depends on X86_INTEL_LPSS && I2C && I2C_DESIGNWARE_PLATFORM
+	depends on DW_DMAC_CORE
+	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_HASWELL
 	select SND_SOC_RT5640
 	help
@@ -58,7 +54,9 @@
 
 config SND_SOC_INTEL_BYT_RT5640_MACH
 	tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
-	depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C
+	depends on X86_INTEL_LPSS && I2C
+	depends on DW_DMAC_CORE
+	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_BAYTRAIL
 	select SND_SOC_RT5640
 	help
@@ -67,7 +65,9 @@
 
 config SND_SOC_INTEL_BYT_MAX98090_MACH
 	tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
-	depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && I2C
+	depends on X86_INTEL_LPSS && I2C
+	depends on DW_DMAC_CORE
+	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_BAYTRAIL
 	select SND_SOC_MAX98090
 	help
@@ -76,8 +76,10 @@
 
 config SND_SOC_INTEL_BROADWELL_MACH
 	tristate "ASoC Audio DSP support for Intel Broadwell Wildcatpoint"
-	depends on SND_SOC_INTEL_SST && X86_INTEL_LPSS && DW_DMAC && \
+	depends on X86_INTEL_LPSS && I2C && DW_DMAC && \
 		   I2C_DESIGNWARE_PLATFORM
+	depends on DW_DMAC_CORE
+	select SND_SOC_INTEL_SST
 	select SND_SOC_INTEL_HASWELL
 	select SND_SOC_RT286
 	help
@@ -132,3 +134,8 @@
       This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
       platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
       If unsure select "N".
+
+config SND_SOC_INTEL_SKYLAKE
+	tristate
+	select SND_HDA_EXT_CORE
+	select SND_SOC_INTEL_SST
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index 6de5d5c..2b45435 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -5,6 +5,7 @@
 obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/
 obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
 obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
+obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += skylake/
 
 # Machine support
 obj-$(CONFIG_SND_SOC) += boards/
diff --git a/sound/soc/intel/atom/sst-atom-controls.c b/sound/soc/intel/atom/sst-atom-controls.c
index 31e9b9e..d55388e 100644
--- a/sound/soc/intel/atom/sst-atom-controls.c
+++ b/sound/soc/intel/atom/sst-atom-controls.c
@@ -132,7 +132,7 @@
 			      sizeof(cmd.header) + cmd.header.length);
 }
 
-int sst_slot_enum_info(struct snd_kcontrol *kcontrol,
+static int sst_slot_enum_info(struct snd_kcontrol *kcontrol,
 		       struct snd_ctl_elem_info *uinfo)
 {
 	struct sst_enum *e = (struct sst_enum *)kcontrol->private_value;
@@ -1298,7 +1298,7 @@
 		dev_dbg(dai->dev, "Stream name=%s\n",
 				dai->playback_widget->name);
 		w = dai->playback_widget;
-		list_for_each_entry(p, &w->sinks, list_source) {
+		snd_soc_dapm_widget_for_each_sink_path(w, p) {
 			if (p->connected && !p->connected(w, p->sink))
 				continue;
 
@@ -1317,7 +1317,7 @@
 		dev_dbg(dai->dev, "Stream name=%s\n",
 				dai->capture_widget->name);
 		w = dai->capture_widget;
-		list_for_each_entry(p, &w->sources, list_sink) {
+		snd_soc_dapm_widget_for_each_source_path(w, p) {
 			if (p->connected && !p->connected(w, p->sink))
 				continue;
 
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 641ebe6..683e501 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -33,7 +33,6 @@
 
 struct sst_device *sst;
 static DEFINE_MUTEX(sst_lock);
-extern struct snd_compr_ops sst_platform_compr_ops;
 
 int sst_register_dsp(struct sst_device *dev)
 {
diff --git a/sound/soc/intel/atom/sst-mfld-platform.h b/sound/soc/intel/atom/sst-mfld-platform.h
index 2409b23..cb32cc7 100644
--- a/sound/soc/intel/atom/sst-mfld-platform.h
+++ b/sound/soc/intel/atom/sst-mfld-platform.h
@@ -25,6 +25,7 @@
 #include "sst-atom-controls.h"
 
 extern struct sst_device *sst;
+extern struct snd_compr_ops sst_platform_compr_ops;
 
 #define SST_MONO		1
 #define SST_STEREO		2
diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
index 0e0e4d9..ce689c5 100644
--- a/sound/soc/intel/atom/sst/sst_drv_interface.c
+++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
@@ -151,6 +151,7 @@
 		usage_count = GET_USAGE_COUNT(dev);
 		dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
 		if (ret < 0) {
+			pm_runtime_put_sync(dev);
 			dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
 			return ret;
 		}
@@ -204,8 +205,10 @@
 	struct intel_sst_drv *ctx = dev_get_drvdata(dev);
 
 	retval = pm_runtime_get_sync(ctx->dev);
-	if (retval < 0)
+	if (retval < 0) {
+		pm_runtime_put_sync(ctx->dev);
 		return retval;
+	}
 
 	str_id = sst_get_stream(ctx, str_params);
 	if (str_id > 0) {
@@ -672,8 +675,10 @@
 	if (NULL == bytes)
 		return -EINVAL;
 	ret_val = pm_runtime_get_sync(ctx->dev);
-	if (ret_val < 0)
+	if (ret_val < 0) {
+		pm_runtime_put_sync(ctx->dev);
 		return ret_val;
+	}
 
 	ret_val = sst_send_byte_stream_mrfld(ctx, bytes);
 	sst_pm_runtime_put(ctx);
diff --git a/sound/soc/intel/atom/sst/sst_ipc.c b/sound/soc/intel/atom/sst/sst_ipc.c
index 5a27861..3dc7358 100644
--- a/sound/soc/intel/atom/sst/sst_ipc.c
+++ b/sound/soc/intel/atom/sst/sst_ipc.c
@@ -352,10 +352,9 @@
 	 * copy from mailbox
 	 **/
 	if (msg_high.part.large) {
-		data = kzalloc(msg_low, GFP_KERNEL);
+		data = kmemdup((void *)msg->mailbox_data, msg_low, GFP_KERNEL);
 		if (!data)
 			return;
-		memcpy(data, (void *) msg->mailbox_data, msg_low);
 		/* Copy command id so that we can use to put sst to reset */
 		dsp_hdr = (struct ipc_dsp_hdr *)data;
 		cmd_id = dsp_hdr->cmd_id;
diff --git a/sound/soc/intel/boards/byt-max98090.c b/sound/soc/intel/boards/byt-max98090.c
index 7ab8cc9..d9f81b8 100644
--- a/sound/soc/intel/boards/byt-max98090.c
+++ b/sound/soc/intel/boards/byt-max98090.c
@@ -126,6 +126,7 @@
 
 static struct snd_soc_card byt_max98090_card = {
 	.name = "byt-max98090",
+	.owner = THIS_MODULE,
 	.dai_link = byt_max98090_dais,
 	.num_links = ARRAY_SIZE(byt_max98090_dais),
 	.dapm_widgets = byt_max98090_widgets,
diff --git a/sound/soc/intel/boards/byt-rt5640.c b/sound/soc/intel/boards/byt-rt5640.c
index ae89b9b9..de9788a 100644
--- a/sound/soc/intel/boards/byt-rt5640.c
+++ b/sound/soc/intel/boards/byt-rt5640.c
@@ -197,6 +197,7 @@
 
 static struct snd_soc_card byt_rt5640_card = {
 	.name = "byt-rt5640",
+	.owner = THIS_MODULE,
 	.dai_link = byt_rt5640_dais,
 	.num_links = ARRAY_SIZE(byt_rt5640_dais),
 	.dapm_widgets = byt_rt5640_widgets,
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 7f55d59..c445312 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -185,6 +185,7 @@
 /* SoC card */
 static struct snd_soc_card snd_soc_card_byt = {
 	.name = "baytrailcraudio",
+	.owner = THIS_MODULE,
 	.dai_link = byt_dailink,
 	.num_links = ARRAY_SIZE(byt_dailink),
 	.dapm_widgets = byt_dapm_widgets,
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 70f8321..49f4869 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -104,21 +104,17 @@
 static int cht_ti_jack_event(struct notifier_block *nb,
 		unsigned long event, void *data)
 {
-
 	struct snd_soc_jack *jack = (struct snd_soc_jack *)data;
-	struct snd_soc_dai *codec_dai = jack->card->rtd->codec_dai;
-	struct snd_soc_codec *codec = codec_dai->codec;
+	struct snd_soc_dapm_context *dapm = &jack->card->dapm;
 
 	if (event & SND_JACK_MICROPHONE) {
-
-		snd_soc_dapm_force_enable_pin(&codec->dapm, "SHDN");
-		snd_soc_dapm_force_enable_pin(&codec->dapm, "MICBIAS");
-		snd_soc_dapm_sync(&codec->dapm);
+		snd_soc_dapm_force_enable_pin(dapm, "SHDN");
+		snd_soc_dapm_force_enable_pin(dapm, "MICBIAS");
+		snd_soc_dapm_sync(dapm);
 	} else {
-
-		snd_soc_dapm_disable_pin(&codec->dapm, "MICBIAS");
-		snd_soc_dapm_disable_pin(&codec->dapm, "SHDN");
-		snd_soc_dapm_sync(&codec->dapm);
+		snd_soc_dapm_disable_pin(dapm, "MICBIAS");
+		snd_soc_dapm_disable_pin(dapm, "SHDN");
+		snd_soc_dapm_sync(dapm);
 	}
 
 	return 0;
@@ -279,6 +275,7 @@
 /* SoC card */
 static struct snd_soc_card snd_soc_card_cht = {
 	.name = "chtmax98090",
+	.owner = THIS_MODULE,
 	.dai_link = cht_dailink,
 	.num_links = ARRAY_SIZE(cht_dailink),
 	.aux_dev = &cht_max98090_headset_dev,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index bdcaf46..7be8461 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -305,6 +305,7 @@
 /* SoC card */
 static struct snd_soc_card snd_soc_card_chtrt5645 = {
 	.name = "chtrt5645",
+	.owner = THIS_MODULE,
 	.dai_link = cht_dailink,
 	.num_links = ARRAY_SIZE(cht_dailink),
 	.dapm_widgets = cht_dapm_widgets,
@@ -317,6 +318,7 @@
 
 static struct snd_soc_card snd_soc_card_chtrt5650 = {
 	.name = "chtrt5650",
+	.owner = THIS_MODULE,
 	.dai_link = cht_dailink,
 	.num_links = ARRAY_SIZE(cht_dailink),
 	.dapm_widgets = cht_dapm_widgets,
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index 2c9cc5b..23fe040 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -323,6 +323,7 @@
 /* SoC card */
 static struct snd_soc_card snd_soc_card_cht = {
 	.name = "cherrytrailcraudio",
+	.owner = THIS_MODULE,
 	.dai_link = cht_dailink,
 	.num_links = ARRAY_SIZE(cht_dailink),
 	.dapm_widgets = cht_dapm_widgets,
diff --git a/sound/soc/intel/common/sst-dsp-priv.h b/sound/soc/intel/common/sst-dsp-priv.h
index 396d545..cbd568e 100644
--- a/sound/soc/intel/common/sst-dsp-priv.h
+++ b/sound/soc/intel/common/sst-dsp-priv.h
@@ -22,6 +22,8 @@
 #include <linux/interrupt.h>
 #include <linux/firmware.h>
 
+#include "../skylake/skl-sst-dsp.h"
+
 struct sst_mem_block;
 struct sst_module;
 struct sst_fw;
@@ -258,6 +260,8 @@
  */
 struct sst_dsp {
 
+	/* Shared for all platforms */
+
 	/* runtime */
 	struct sst_dsp_device *sst_dev;
 	spinlock_t spinlock;	/* IPC locking */
@@ -268,10 +272,6 @@
 	int irq;
 	u32 id;
 
-	/* list of free and used ADSP memory blocks */
-	struct list_head used_block_list;
-	struct list_head free_block_list;
-
 	/* operations */
 	struct sst_ops *ops;
 
@@ -284,6 +284,12 @@
 	/* mailbox */
 	struct sst_mailbox mailbox;
 
+	/* HSW/Byt data */
+
+	/* list of free and used ADSP memory blocks */
+	struct list_head used_block_list;
+	struct list_head free_block_list;
+
 	/* SST FW files loaded and their modules */
 	struct list_head module_list;
 	struct list_head fw_list;
@@ -299,6 +305,15 @@
 	/* DMA FW loading */
 	struct sst_dma *dma;
 	bool fw_use_dma;
+
+	/* SKL data */
+
+	/* To allocate CL dma buffers */
+	struct skl_dsp_loader_ops dsp_ops;
+	struct skl_dsp_fw_ops fw_ops;
+	int sst_state;
+	struct skl_cl_dev cl_dev;
+	u32 intr_status;
 };
 
 /* Size optimised DRAM/IRAM memcpy */
diff --git a/sound/soc/intel/common/sst-dsp.c b/sound/soc/intel/common/sst-dsp.c
index 64e9421..a627236 100644
--- a/sound/soc/intel/common/sst-dsp.c
+++ b/sound/soc/intel/common/sst-dsp.c
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
+#include <linux/delay.h>
 
 #include "sst-dsp.h"
 #include "sst-dsp-priv.h"
@@ -196,6 +197,22 @@
 }
 EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64_unlocked);
 
+/* This is for registers bits with attribute RWC */
+void sst_dsp_shim_update_bits_forced_unlocked(struct sst_dsp *sst, u32 offset,
+				u32 mask, u32 value)
+{
+	unsigned int old, new;
+	u32 ret;
+
+	ret = sst_dsp_shim_read_unlocked(sst, offset);
+
+	old = ret;
+	new = (old & (~mask)) | (value & mask);
+
+	sst_dsp_shim_write_unlocked(sst, offset, new);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits_forced_unlocked);
+
 int sst_dsp_shim_update_bits(struct sst_dsp *sst, u32 offset,
 				u32 mask, u32 value)
 {
@@ -222,6 +239,60 @@
 }
 EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits64);
 
+/* This is for registers bits with attribute RWC */
+void sst_dsp_shim_update_bits_forced(struct sst_dsp *sst, u32 offset,
+				u32 mask, u32 value)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sst->spinlock, flags);
+	sst_dsp_shim_update_bits_forced_unlocked(sst, offset, mask, value);
+	spin_unlock_irqrestore(&sst->spinlock, flags);
+}
+EXPORT_SYMBOL_GPL(sst_dsp_shim_update_bits_forced);
+
+int sst_dsp_register_poll(struct sst_dsp *ctx, u32 offset, u32 mask,
+			 u32 target, u32 timeout, char *operation)
+{
+	int time, ret;
+	u32 reg;
+	bool done = false;
+
+	/*
+	 * we will poll for couple of ms using mdelay, if not successful
+	 * then go to longer sleep using usleep_range
+	 */
+
+	/* check if set state successful */
+	for (time = 0; time < 5; time++) {
+		if ((sst_dsp_shim_read_unlocked(ctx, offset) & mask) == target) {
+			done = true;
+			break;
+		}
+		mdelay(1);
+	}
+
+	if (done ==  false) {
+		/* sleeping in 10ms steps so adjust timeout value */
+		timeout /= 10;
+
+		for (time = 0; time < timeout; time++) {
+			if ((sst_dsp_shim_read_unlocked(ctx, offset) & mask) == target)
+				break;
+
+			usleep_range(5000, 10000);
+		}
+	}
+
+	reg = sst_dsp_shim_read_unlocked(ctx, offset);
+	dev_info(ctx->dev, "FW Poll Status: reg=%#x %s %s\n", reg, operation,
+			(time < timeout) ? "successful" : "timedout");
+	ret = time < timeout ? 0 : -ETIME;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(sst_dsp_register_poll);
+
 void sst_dsp_dump(struct sst_dsp *sst)
 {
 	if (sst->ops->dump)
diff --git a/sound/soc/intel/common/sst-dsp.h b/sound/soc/intel/common/sst-dsp.h
index 96aeb25..1f45f18 100644
--- a/sound/soc/intel/common/sst-dsp.h
+++ b/sound/soc/intel/common/sst-dsp.h
@@ -230,6 +230,8 @@
 u64 sst_dsp_shim_read64(struct sst_dsp *sst, u32 offset);
 int sst_dsp_shim_update_bits64(struct sst_dsp *sst, u32 offset,
 				u64 mask, u64 value);
+void sst_dsp_shim_update_bits_forced(struct sst_dsp *sst, u32 offset,
+				u32 mask, u32 value);
 
 /* SHIM Read / Write Unlocked for callers already holding sst lock */
 void sst_dsp_shim_write_unlocked(struct sst_dsp *sst, u32 offset, u32 value);
@@ -240,6 +242,8 @@
 u64 sst_dsp_shim_read64_unlocked(struct sst_dsp *sst, u32 offset);
 int sst_dsp_shim_update_bits64_unlocked(struct sst_dsp *sst, u32 offset,
 					u64 mask, u64 value);
+void sst_dsp_shim_update_bits_forced_unlocked(struct sst_dsp *sst, u32 offset,
+				u32 mask, u32 value);
 
 /* Internal generic low-level SST IO functions - can be overidden */
 void sst_shim32_write(void __iomem *addr, u32 offset, u32 value);
@@ -278,6 +282,8 @@
 void sst_dsp_outbox_write(struct sst_dsp *dsp, void *message, size_t bytes);
 void sst_dsp_outbox_read(struct sst_dsp *dsp, void *message, size_t bytes);
 void sst_dsp_mailbox_dump(struct sst_dsp *dsp, size_t bytes);
+int sst_dsp_register_poll(struct sst_dsp  *dsp, u32 offset, u32 mask,
+		 u32 expected_value, u32 timeout, char *operation);
 
 /* Debug */
 void sst_dsp_dump(struct sst_dsp *sst);
diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
new file mode 100644
index 0000000..27db221
--- /dev/null
+++ b/sound/soc/intel/skylake/Makefile
@@ -0,0 +1,9 @@
+snd-soc-skl-objs := skl.o skl-pcm.o skl-nhlt.o skl-messages.o
+
+obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
+
+# Skylake IPC Support
+snd-soc-skl-ipc-objs := skl-sst-ipc.o skl-sst-dsp.o skl-sst-cldma.o \
+		skl-sst.o
+
+obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl-ipc.o
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
new file mode 100644
index 0000000..826d4fd
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -0,0 +1,884 @@
+/*
+ *  skl-message.c - HDA DSP interface for FW registration, Pipe and Module
+ *  configurations
+ *
+ *  Copyright (C) 2015 Intel Corp
+ *  Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
+ *	   Jeeja KP <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include "skl-sst-dsp.h"
+#include "skl-sst-ipc.h"
+#include "skl.h"
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "skl-topology.h"
+#include "skl-tplg-interface.h"
+
+static int skl_alloc_dma_buf(struct device *dev,
+		struct snd_dma_buffer *dmab, size_t size)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	if (!bus)
+		return -ENODEV;
+
+	return  bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab);
+}
+
+static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dev);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	if (!bus)
+		return -ENODEV;
+
+	bus->io_ops->dma_free_pages(bus, dmab);
+
+	return 0;
+}
+
+int skl_init_dsp(struct skl *skl)
+{
+	void __iomem *mmio_base;
+	struct hdac_ext_bus *ebus = &skl->ebus;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	int irq = bus->irq;
+	struct skl_dsp_loader_ops loader_ops;
+	int ret;
+
+	loader_ops.alloc_dma_buf = skl_alloc_dma_buf;
+	loader_ops.free_dma_buf = skl_free_dma_buf;
+
+	/* enable ppcap interrupt */
+	snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
+	snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
+
+	/* read the BAR of the ADSP MMIO */
+	mmio_base = pci_ioremap_bar(skl->pci, 4);
+	if (mmio_base == NULL) {
+		dev_err(bus->dev, "ioremap error\n");
+		return -ENXIO;
+	}
+
+	ret = skl_sst_dsp_init(bus->dev, mmio_base, irq,
+			loader_ops, &skl->skl_sst);
+
+	dev_dbg(bus->dev, "dsp registration status=%d\n", ret);
+
+	return ret;
+}
+
+void skl_free_dsp(struct skl *skl)
+{
+	struct hdac_ext_bus *ebus = &skl->ebus;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct skl_sst *ctx =  skl->skl_sst;
+
+	/* disable  ppcap interrupt */
+	snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
+
+	skl_sst_dsp_cleanup(bus->dev, ctx);
+	if (ctx->dsp->addr.lpe)
+		iounmap(ctx->dsp->addr.lpe);
+}
+
+int skl_suspend_dsp(struct skl *skl)
+{
+	struct skl_sst *ctx = skl->skl_sst;
+	int ret;
+
+	/* if ppcap is not supported return 0 */
+	if (!skl->ebus.ppcap)
+		return 0;
+
+	ret = skl_dsp_sleep(ctx->dsp);
+	if (ret < 0)
+		return ret;
+
+	/* disable ppcap interrupt */
+	snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, false);
+	snd_hdac_ext_bus_ppcap_enable(&skl->ebus, false);
+
+	return 0;
+}
+
+int skl_resume_dsp(struct skl *skl)
+{
+	struct skl_sst *ctx = skl->skl_sst;
+
+	/* if ppcap is not supported return 0 */
+	if (!skl->ebus.ppcap)
+		return 0;
+
+	/* enable ppcap interrupt */
+	snd_hdac_ext_bus_ppcap_enable(&skl->ebus, true);
+	snd_hdac_ext_bus_ppcap_int_enable(&skl->ebus, true);
+
+	return skl_dsp_wake(ctx->dsp);
+}
+
+enum skl_bitdepth skl_get_bit_depth(int params)
+{
+	switch (params) {
+	case 8:
+		return SKL_DEPTH_8BIT;
+
+	case 16:
+		return SKL_DEPTH_16BIT;
+
+	case 24:
+		return SKL_DEPTH_24BIT;
+
+	case 32:
+		return SKL_DEPTH_32BIT;
+
+	default:
+		return SKL_DEPTH_INVALID;
+
+	}
+}
+
+static u32 skl_create_channel_map(enum skl_ch_cfg ch_cfg)
+{
+	u32 config;
+
+	switch (ch_cfg) {
+	case SKL_CH_CFG_MONO:
+		config =  (0xFFFFFFF0 | SKL_CHANNEL_LEFT);
+		break;
+
+	case SKL_CH_CFG_STEREO:
+		config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_RIGHT << 4));
+		break;
+
+	case SKL_CH_CFG_2_1:
+		config = (0xFFFFF000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_RIGHT << 4)
+			| (SKL_CHANNEL_LFE << 8));
+		break;
+
+	case SKL_CH_CFG_3_0:
+		config =  (0xFFFFF000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_CENTER << 4)
+			| (SKL_CHANNEL_RIGHT << 8));
+		break;
+
+	case SKL_CH_CFG_3_1:
+		config = (0xFFFF0000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_CENTER << 4)
+			| (SKL_CHANNEL_RIGHT << 8)
+			| (SKL_CHANNEL_LFE << 12));
+		break;
+
+	case SKL_CH_CFG_QUATRO:
+		config = (0xFFFF0000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_RIGHT << 4)
+			| (SKL_CHANNEL_LEFT_SURROUND << 8)
+			| (SKL_CHANNEL_RIGHT_SURROUND << 12));
+		break;
+
+	case SKL_CH_CFG_4_0:
+		config = (0xFFFF0000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_CENTER << 4)
+			| (SKL_CHANNEL_RIGHT << 8)
+			| (SKL_CHANNEL_CENTER_SURROUND << 12));
+		break;
+
+	case SKL_CH_CFG_5_0:
+		config = (0xFFF00000 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_CENTER << 4)
+			| (SKL_CHANNEL_RIGHT << 8)
+			| (SKL_CHANNEL_LEFT_SURROUND << 12)
+			| (SKL_CHANNEL_RIGHT_SURROUND << 16));
+		break;
+
+	case SKL_CH_CFG_5_1:
+		config = (0xFF000000 | SKL_CHANNEL_CENTER
+			| (SKL_CHANNEL_LEFT << 4)
+			| (SKL_CHANNEL_RIGHT << 8)
+			| (SKL_CHANNEL_LEFT_SURROUND << 12)
+			| (SKL_CHANNEL_RIGHT_SURROUND << 16)
+			| (SKL_CHANNEL_LFE << 20));
+		break;
+
+	case SKL_CH_CFG_DUAL_MONO:
+		config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_LEFT << 4));
+		break;
+
+	case SKL_CH_CFG_I2S_DUAL_STEREO_0:
+		config = (0xFFFFFF00 | SKL_CHANNEL_LEFT
+			| (SKL_CHANNEL_RIGHT << 4));
+		break;
+
+	case SKL_CH_CFG_I2S_DUAL_STEREO_1:
+		config = (0xFFFF00FF | (SKL_CHANNEL_LEFT << 8)
+			| (SKL_CHANNEL_RIGHT << 12));
+		break;
+
+	default:
+		config =  0xFFFFFFFF;
+		break;
+
+	}
+
+	return config;
+}
+
+/*
+ * Each module in DSP expects a base module configuration, which consists of
+ * PCM format information, which we calculate in driver and resource values
+ * which are read from widget information passed through topology binary
+ * This is send when we create a module with INIT_INSTANCE IPC msg
+ */
+static void skl_set_base_module_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_base_cfg *base_cfg)
+{
+	struct skl_module_fmt *format = &mconfig->in_fmt;
+
+	base_cfg->audio_fmt.number_of_channels = (u8)format->channels;
+
+	base_cfg->audio_fmt.s_freq = format->s_freq;
+	base_cfg->audio_fmt.bit_depth = format->bit_depth;
+	base_cfg->audio_fmt.valid_bit_depth = format->valid_bit_depth;
+	base_cfg->audio_fmt.ch_cfg = format->ch_cfg;
+
+	dev_dbg(ctx->dev, "bit_depth=%x valid_bd=%x ch_config=%x\n",
+			format->bit_depth, format->valid_bit_depth,
+			format->ch_cfg);
+
+	base_cfg->audio_fmt.channel_map = skl_create_channel_map(
+					base_cfg->audio_fmt.ch_cfg);
+
+	base_cfg->audio_fmt.interleaving = SKL_INTERLEAVING_PER_CHANNEL;
+
+	base_cfg->cps = mconfig->mcps;
+	base_cfg->ibs = mconfig->ibs;
+	base_cfg->obs = mconfig->obs;
+}
+
+/*
+ * Copies copier capabilities into copier module and updates copier module
+ * config size.
+ */
+static void skl_copy_copier_caps(struct skl_module_cfg *mconfig,
+				struct skl_cpr_cfg *cpr_mconfig)
+{
+	if (mconfig->formats_config.caps_size == 0)
+		return;
+
+	memcpy(cpr_mconfig->gtw_cfg.config_data,
+			mconfig->formats_config.caps,
+			mconfig->formats_config.caps_size);
+
+	cpr_mconfig->gtw_cfg.config_length =
+			(mconfig->formats_config.caps_size) / 4;
+}
+
+/*
+ * Calculate the gatewat settings required for copier module, type of
+ * gateway and index of gateway to use
+ */
+static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_cpr_cfg *cpr_mconfig)
+{
+	union skl_connector_node_id node_id = {0};
+	struct skl_pipe_params *params = mconfig->pipe->p_params;
+
+	switch (mconfig->dev_type) {
+	case SKL_DEVICE_BT:
+		node_id.node.dma_type =
+			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
+			SKL_DMA_I2S_LINK_OUTPUT_CLASS :
+			SKL_DMA_I2S_LINK_INPUT_CLASS;
+		node_id.node.vindex = params->host_dma_id +
+					(mconfig->vbus_id << 3);
+		break;
+
+	case SKL_DEVICE_I2S:
+		node_id.node.dma_type =
+			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
+			SKL_DMA_I2S_LINK_OUTPUT_CLASS :
+			SKL_DMA_I2S_LINK_INPUT_CLASS;
+		node_id.node.vindex = params->host_dma_id +
+					 (mconfig->time_slot << 1) +
+					 (mconfig->vbus_id << 3);
+		break;
+
+	case SKL_DEVICE_DMIC:
+		node_id.node.dma_type = SKL_DMA_DMIC_LINK_INPUT_CLASS;
+		node_id.node.vindex = mconfig->vbus_id +
+					 (mconfig->time_slot);
+		break;
+
+	case SKL_DEVICE_HDALINK:
+		node_id.node.dma_type =
+			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
+			SKL_DMA_HDA_LINK_OUTPUT_CLASS :
+			SKL_DMA_HDA_LINK_INPUT_CLASS;
+		node_id.node.vindex = params->link_dma_id;
+		break;
+
+	default:
+		node_id.node.dma_type =
+			(SKL_CONN_SOURCE == mconfig->hw_conn_type) ?
+			SKL_DMA_HDA_HOST_OUTPUT_CLASS :
+			SKL_DMA_HDA_HOST_INPUT_CLASS;
+		node_id.node.vindex = params->host_dma_id;
+		break;
+	}
+
+	cpr_mconfig->gtw_cfg.node_id = node_id.val;
+
+	if (SKL_CONN_SOURCE == mconfig->hw_conn_type)
+		cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->obs;
+	else
+		cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->ibs;
+
+	cpr_mconfig->cpr_feature_mask = 0;
+	cpr_mconfig->gtw_cfg.config_length  = 0;
+
+	skl_copy_copier_caps(mconfig, cpr_mconfig);
+}
+
+static void skl_setup_out_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_audio_data_format *out_fmt)
+{
+	struct skl_module_fmt *format = &mconfig->out_fmt;
+
+	out_fmt->number_of_channels = (u8)format->channels;
+	out_fmt->s_freq = format->s_freq;
+	out_fmt->bit_depth = format->bit_depth;
+	out_fmt->valid_bit_depth = format->valid_bit_depth;
+	out_fmt->ch_cfg = format->ch_cfg;
+
+	out_fmt->channel_map = skl_create_channel_map(out_fmt->ch_cfg);
+	out_fmt->interleaving = SKL_INTERLEAVING_PER_CHANNEL;
+
+	dev_dbg(ctx->dev, "copier out format chan=%d fre=%d bitdepth=%d\n",
+		out_fmt->number_of_channels, format->s_freq, format->bit_depth);
+}
+
+/*
+ * DSP needs SRC module for frequency conversion, SRC takes base module
+ * configuration and the target frequency as extra parameter passed as src
+ * config
+ */
+static void skl_set_src_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_src_module_cfg *src_mconfig)
+{
+	struct skl_module_fmt *fmt = &mconfig->out_fmt;
+
+	skl_set_base_module_format(ctx, mconfig,
+		(struct skl_base_cfg *)src_mconfig);
+
+	src_mconfig->src_cfg = fmt->s_freq;
+}
+
+/*
+ * DSP needs updown module to do channel conversion. updown module take base
+ * module configuration and channel configuration
+ * It also take coefficients and now we have defaults applied here
+ */
+static void skl_set_updown_mixer_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_up_down_mixer_cfg *mixer_mconfig)
+{
+	struct skl_module_fmt *fmt = &mconfig->out_fmt;
+	int i = 0;
+
+	skl_set_base_module_format(ctx,	mconfig,
+		(struct skl_base_cfg *)mixer_mconfig);
+	mixer_mconfig->out_ch_cfg = fmt->ch_cfg;
+
+	/* Select F/W default coefficient */
+	mixer_mconfig->coeff_sel = 0x0;
+
+	/* User coeff, don't care since we are selecting F/W defaults */
+	for (i = 0; i < UP_DOWN_MIXER_MAX_COEFF; i++)
+		mixer_mconfig->coeff[i] = 0xDEADBEEF;
+}
+
+/*
+ * 'copier' is DSP internal module which copies data from Host DMA (HDA host
+ * dma) or link (hda link, SSP, PDM)
+ * Here we calculate the copier module parameters, like PCM format, output
+ * format, gateway settings
+ * copier_module_config is sent as input buffer with INIT_INSTANCE IPC msg
+ */
+static void skl_set_copier_format(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig,
+			struct skl_cpr_cfg *cpr_mconfig)
+{
+	struct skl_audio_data_format *out_fmt = &cpr_mconfig->out_fmt;
+	struct skl_base_cfg *base_cfg = (struct skl_base_cfg *)cpr_mconfig;
+
+	skl_set_base_module_format(ctx, mconfig, base_cfg);
+
+	skl_setup_out_format(ctx, mconfig, out_fmt);
+	skl_setup_cpr_gateway_cfg(ctx, mconfig, cpr_mconfig);
+}
+
+static u16 skl_get_module_param_size(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig)
+{
+	u16 param_size;
+
+	switch (mconfig->m_type) {
+	case SKL_MODULE_TYPE_COPIER:
+		param_size = sizeof(struct skl_cpr_cfg);
+		param_size += mconfig->formats_config.caps_size;
+		return param_size;
+
+	case SKL_MODULE_TYPE_SRCINT:
+		return sizeof(struct skl_src_module_cfg);
+
+	case SKL_MODULE_TYPE_UPDWMIX:
+		return sizeof(struct skl_up_down_mixer_cfg);
+
+	default:
+		/*
+		 * return only base cfg when no specific module type is
+		 * specified
+		 */
+		return sizeof(struct skl_base_cfg);
+	}
+
+	return 0;
+}
+
+/*
+ * DSP firmware supports various modules like copier, SRC, updown etc.
+ * These modules required various parameters to be calculated and sent for
+ * the module initialization to DSP. By default a generic module needs only
+ * base module format configuration
+ */
+
+static int skl_set_module_format(struct skl_sst *ctx,
+			struct skl_module_cfg *module_config,
+			u16 *module_config_size,
+			void **param_data)
+{
+	u16 param_size;
+
+	param_size  = skl_get_module_param_size(ctx, module_config);
+
+	*param_data = kzalloc(param_size, GFP_KERNEL);
+	if (NULL == *param_data)
+		return -ENOMEM;
+
+	*module_config_size = param_size;
+
+	switch (module_config->m_type) {
+	case SKL_MODULE_TYPE_COPIER:
+		skl_set_copier_format(ctx, module_config, *param_data);
+		break;
+
+	case SKL_MODULE_TYPE_SRCINT:
+		skl_set_src_format(ctx, module_config, *param_data);
+		break;
+
+	case SKL_MODULE_TYPE_UPDWMIX:
+		skl_set_updown_mixer_format(ctx, module_config, *param_data);
+		break;
+
+	default:
+		skl_set_base_module_format(ctx, module_config, *param_data);
+		break;
+
+	}
+
+	dev_dbg(ctx->dev, "Module type=%d config size: %d bytes\n",
+			module_config->id.module_id, param_size);
+	print_hex_dump(KERN_DEBUG, "Module params:", DUMP_PREFIX_OFFSET, 8, 4,
+			*param_data, param_size, false);
+	return 0;
+}
+
+static int skl_get_queue_index(struct skl_module_pin *mpin,
+				struct skl_module_inst_id id, int max)
+{
+	int i;
+
+	for (i = 0; i < max; i++)  {
+		if (mpin[i].id.module_id == id.module_id &&
+			mpin[i].id.instance_id == id.instance_id)
+			return i;
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * Allocates queue for each module.
+ * if dynamic, the pin_index is allocated 0 to max_pin.
+ * In static, the pin_index is fixed based on module_id and instance id
+ */
+static int skl_alloc_queue(struct skl_module_pin *mpin,
+			struct skl_module_inst_id id, int max)
+{
+	int i;
+
+	/*
+	 * if pin in dynamic, find first free pin
+	 * otherwise find match module and instance id pin as topology will
+	 * ensure a unique pin is assigned to this so no need to
+	 * allocate/free
+	 */
+	for (i = 0; i < max; i++)  {
+		if (mpin[i].is_dynamic) {
+			if (!mpin[i].in_use) {
+				mpin[i].in_use = true;
+				mpin[i].id.module_id = id.module_id;
+				mpin[i].id.instance_id = id.instance_id;
+				return i;
+			}
+		} else {
+			if (mpin[i].id.module_id == id.module_id &&
+				mpin[i].id.instance_id == id.instance_id)
+				return i;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static void skl_free_queue(struct skl_module_pin *mpin, int q_index)
+{
+	if (mpin[q_index].is_dynamic) {
+		mpin[q_index].in_use = false;
+		mpin[q_index].id.module_id = 0;
+		mpin[q_index].id.instance_id = 0;
+	}
+}
+
+/*
+ * A module needs to be instanataited in DSP. A mdoule is present in a
+ * collection of module referred as a PIPE.
+ * We first calculate the module format, based on module type and then
+ * invoke the DSP by sending IPC INIT_INSTANCE using ipc helper
+ */
+int skl_init_module(struct skl_sst *ctx,
+			struct skl_module_cfg *mconfig, char *param)
+{
+	u16 module_config_size = 0;
+	void *param_data = NULL;
+	int ret;
+	struct skl_ipc_init_instance_msg msg;
+
+	dev_dbg(ctx->dev, "%s: module_id = %d instance=%d\n", __func__,
+		 mconfig->id.module_id, mconfig->id.instance_id);
+
+	if (mconfig->pipe->state != SKL_PIPE_CREATED) {
+		dev_err(ctx->dev, "Pipe not created state= %d pipe_id= %d\n",
+				 mconfig->pipe->state, mconfig->pipe->ppl_id);
+		return -EIO;
+	}
+
+	ret = skl_set_module_format(ctx, mconfig,
+			&module_config_size, &param_data);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to set module format ret=%d\n", ret);
+		return ret;
+	}
+
+	msg.module_id = mconfig->id.module_id;
+	msg.instance_id = mconfig->id.instance_id;
+	msg.ppl_instance_id = mconfig->pipe->ppl_id;
+	msg.param_data_size = module_config_size;
+	msg.core_id = mconfig->core_id;
+
+	ret = skl_ipc_init_instance(&ctx->ipc, &msg, param_data);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to init instance ret=%d\n", ret);
+		kfree(param_data);
+		return ret;
+	}
+	mconfig->m_state = SKL_MODULE_INIT_DONE;
+
+	return ret;
+}
+
+static void skl_dump_bind_info(struct skl_sst *ctx, struct skl_module_cfg
+	*src_module, struct skl_module_cfg *dst_module)
+{
+	dev_dbg(ctx->dev, "%s: src module_id = %d  src_instance=%d\n",
+		__func__, src_module->id.module_id, src_module->id.instance_id);
+	dev_dbg(ctx->dev, "%s: dst_module=%d dst_instacne=%d\n", __func__,
+		 dst_module->id.module_id, dst_module->id.instance_id);
+
+	dev_dbg(ctx->dev, "src_module state = %d dst module state = %d\n",
+		src_module->m_state, dst_module->m_state);
+}
+
+/*
+ * On module freeup, we need to unbind the module with modules
+ * it is already bind.
+ * Find the pin allocated and unbind then using bind_unbind IPC
+ */
+int skl_unbind_modules(struct skl_sst *ctx,
+			struct skl_module_cfg *src_mcfg,
+			struct skl_module_cfg *dst_mcfg)
+{
+	int ret;
+	struct skl_ipc_bind_unbind_msg msg;
+	struct skl_module_inst_id src_id = src_mcfg->id;
+	struct skl_module_inst_id dst_id = dst_mcfg->id;
+	int in_max = dst_mcfg->max_in_queue;
+	int out_max = src_mcfg->max_out_queue;
+	int src_index, dst_index;
+
+	skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
+
+	if (src_mcfg->m_state != SKL_MODULE_BIND_DONE)
+		return 0;
+
+	/*
+	 * if intra module unbind, check if both modules are BIND,
+	 * then send unbind
+	 */
+	if ((src_mcfg->pipe->ppl_id != dst_mcfg->pipe->ppl_id) &&
+				dst_mcfg->m_state != SKL_MODULE_BIND_DONE)
+		return 0;
+	else if (src_mcfg->m_state < SKL_MODULE_INIT_DONE &&
+				 dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
+		return 0;
+
+	/* get src queue index */
+	src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
+	if (src_index < 0)
+		return -EINVAL;
+
+	msg.src_queue = src_mcfg->m_out_pin[src_index].pin_index;
+
+	/* get dst queue index */
+	dst_index  = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
+	if (dst_index < 0)
+		return -EINVAL;
+
+	msg.dst_queue = dst_mcfg->m_in_pin[dst_index].pin_index;
+
+	msg.module_id = src_mcfg->id.module_id;
+	msg.instance_id = src_mcfg->id.instance_id;
+	msg.dst_module_id = dst_mcfg->id.module_id;
+	msg.dst_instance_id = dst_mcfg->id.instance_id;
+	msg.bind = false;
+
+	ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
+	if (!ret) {
+		src_mcfg->m_state = SKL_MODULE_UNINIT;
+		/* free queue only if unbind is success */
+		skl_free_queue(src_mcfg->m_out_pin, src_index);
+		skl_free_queue(dst_mcfg->m_in_pin, dst_index);
+	}
+
+	return ret;
+}
+
+/*
+ * Once a module is instantiated it need to be 'bind' with other modules in
+ * the pipeline. For binding we need to find the module pins which are bind
+ * together
+ * This function finds the pins and then sends bund_unbind IPC message to
+ * DSP using IPC helper
+ */
+int skl_bind_modules(struct skl_sst *ctx,
+			struct skl_module_cfg *src_mcfg,
+			struct skl_module_cfg *dst_mcfg)
+{
+	int ret;
+	struct skl_ipc_bind_unbind_msg msg;
+	struct skl_module_inst_id src_id = src_mcfg->id;
+	struct skl_module_inst_id dst_id = dst_mcfg->id;
+	int in_max = dst_mcfg->max_in_queue;
+	int out_max = src_mcfg->max_out_queue;
+	int src_index, dst_index;
+
+	skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
+
+	if (src_mcfg->m_state < SKL_MODULE_INIT_DONE &&
+		dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
+		return 0;
+
+	src_index = skl_alloc_queue(src_mcfg->m_out_pin, dst_id, out_max);
+	if (src_index < 0)
+		return -EINVAL;
+
+	msg.src_queue = src_mcfg->m_out_pin[src_index].pin_index;
+	dst_index = skl_alloc_queue(dst_mcfg->m_in_pin, src_id, in_max);
+	if (dst_index < 0) {
+		skl_free_queue(src_mcfg->m_out_pin, src_index);
+		return -EINVAL;
+	}
+
+	msg.dst_queue = dst_mcfg->m_in_pin[dst_index].pin_index;
+
+	dev_dbg(ctx->dev, "src queue = %d dst queue =%d\n",
+			 msg.src_queue, msg.dst_queue);
+
+	msg.module_id = src_mcfg->id.module_id;
+	msg.instance_id = src_mcfg->id.instance_id;
+	msg.dst_module_id = dst_mcfg->id.module_id;
+	msg.dst_instance_id = dst_mcfg->id.instance_id;
+	msg.bind = true;
+
+	ret = skl_ipc_bind_unbind(&ctx->ipc, &msg);
+
+	if (!ret) {
+		src_mcfg->m_state = SKL_MODULE_BIND_DONE;
+	} else {
+		/* error case , if IPC fails, clear the queue index */
+		skl_free_queue(src_mcfg->m_out_pin, src_index);
+		skl_free_queue(dst_mcfg->m_in_pin, dst_index);
+	}
+
+	return ret;
+}
+
+static int skl_set_pipe_state(struct skl_sst *ctx, struct skl_pipe *pipe,
+	enum skl_ipc_pipeline_state state)
+{
+	dev_dbg(ctx->dev, "%s: pipe_satate = %d\n", __func__, state);
+
+	return skl_ipc_set_pipeline_state(&ctx->ipc, pipe->ppl_id, state);
+}
+
+/*
+ * A pipeline is a collection of modules. Before a module in instantiated a
+ * pipeline needs to be created for it.
+ * This function creates pipeline, by sending create pipeline IPC messages
+ * to FW
+ */
+int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "%s: pipe_id = %d\n", __func__, pipe->ppl_id);
+
+	ret = skl_ipc_create_pipeline(&ctx->ipc, pipe->memory_pages,
+				pipe->pipe_priority, pipe->ppl_id);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to create pipeline\n");
+		return ret;
+	}
+
+	pipe->state = SKL_PIPE_CREATED;
+
+	return 0;
+}
+
+/*
+ * A pipeline needs to be deleted on cleanup. If a pipeline is running, then
+ * pause the pipeline first and then delete it
+ * The pipe delete is done by sending delete pipeline IPC. DSP will stop the
+ * DMA engines and releases resources
+ */
+int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
+
+	/* If pipe is not started, do not try to stop the pipe in FW. */
+	if (pipe->state > SKL_PIPE_STARTED) {
+		ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
+		if (ret < 0) {
+			dev_err(ctx->dev, "Failed to stop pipeline\n");
+			return ret;
+		}
+
+		pipe->state = SKL_PIPE_PAUSED;
+	} else {
+		/* If pipe was not created in FW, do not try to delete it */
+		if (pipe->state < SKL_PIPE_CREATED)
+			return 0;
+
+		ret = skl_ipc_delete_pipeline(&ctx->ipc, pipe->ppl_id);
+		if (ret < 0)
+			dev_err(ctx->dev, "Failed to delete pipeline\n");
+	}
+
+	return ret;
+}
+
+/*
+ * A pipeline is also a scheduling entity in DSP which can be run, stopped
+ * For processing data the pipe need to be run by sending IPC set pipe state
+ * to DSP
+ */
+int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "%s: pipe = %d\n", __func__, pipe->ppl_id);
+
+	/* If pipe was not created in FW, do not try to pause or delete */
+	if (pipe->state < SKL_PIPE_CREATED)
+		return 0;
+
+	/* Pipe has to be paused before it is started */
+	ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to pause pipe\n");
+		return ret;
+	}
+
+	pipe->state = SKL_PIPE_PAUSED;
+
+	ret = skl_set_pipe_state(ctx, pipe, PPL_RUNNING);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to start pipe\n");
+		return ret;
+	}
+
+	pipe->state = SKL_PIPE_STARTED;
+
+	return 0;
+}
+
+/*
+ * Stop the pipeline by sending set pipe state IPC
+ * DSP doesnt implement stop so we always send pause message
+ */
+int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "In %s pipe=%d\n", __func__, pipe->ppl_id);
+
+	/* If pipe was not created in FW, do not try to pause or delete */
+	if (pipe->state < SKL_PIPE_PAUSED)
+		return 0;
+
+	ret = skl_set_pipe_state(ctx, pipe, PPL_PAUSED);
+	if (ret < 0) {
+		dev_dbg(ctx->dev, "Failed to stop pipe\n");
+		return ret;
+	}
+
+	pipe->state = SKL_PIPE_CREATED;
+
+	return 0;
+}
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
new file mode 100644
index 0000000..13036b1
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -0,0 +1,140 @@
+/*
+ *  skl-nhlt.c - Intel SKL Platform NHLT parsing
+ *
+ *  Copyright (C) 2015 Intel Corp
+ *  Author: Sanjiv Kumar <sanjiv.kumar@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#include "skl.h"
+
+/* Unique identification for getting NHLT blobs */
+static u8 OSC_UUID[16] = {0x6E, 0x88, 0x9F, 0xA6, 0xEB, 0x6C, 0x94, 0x45,
+				0xA4, 0x1F, 0x7B, 0x5D, 0xCE, 0x24, 0xC5, 0x53};
+
+#define DSDT_NHLT_PATH "\\_SB.PCI0.HDAS"
+
+void __iomem *skl_nhlt_init(struct device *dev)
+{
+	acpi_handle handle;
+	union acpi_object *obj;
+	struct nhlt_resource_desc  *nhlt_ptr = NULL;
+
+	if (ACPI_FAILURE(acpi_get_handle(NULL, DSDT_NHLT_PATH, &handle))) {
+		dev_err(dev, "Requested NHLT device not found\n");
+		return NULL;
+	}
+
+	obj = acpi_evaluate_dsm(handle, OSC_UUID, 1, 1, NULL);
+	if (obj && obj->type == ACPI_TYPE_BUFFER) {
+		nhlt_ptr = (struct nhlt_resource_desc  *)obj->buffer.pointer;
+
+		return ioremap_cache(nhlt_ptr->min_addr, nhlt_ptr->length);
+	}
+
+	dev_err(dev, "device specific method to extract NHLT blob failed\n");
+	return NULL;
+}
+
+void skl_nhlt_free(void __iomem *addr)
+{
+	iounmap(addr);
+	addr = NULL;
+}
+
+static struct nhlt_specific_cfg *skl_get_specific_cfg(
+		struct device *dev, struct nhlt_fmt *fmt,
+		u8 no_ch, u32 rate, u16 bps)
+{
+	struct nhlt_specific_cfg *sp_config;
+	struct wav_fmt *wfmt;
+	struct nhlt_fmt_cfg *fmt_config = fmt->fmt_config;
+	int i;
+
+	dev_dbg(dev, "Format count =%d\n", fmt->fmt_count);
+
+	for (i = 0; i < fmt->fmt_count; i++) {
+		wfmt = &fmt_config->fmt_ext.fmt;
+		dev_dbg(dev, "ch=%d fmt=%d s_rate=%d\n", wfmt->channels,
+			 wfmt->bits_per_sample, wfmt->samples_per_sec);
+		if (wfmt->channels == no_ch && wfmt->samples_per_sec == rate &&
+					wfmt->bits_per_sample == bps) {
+			sp_config = &fmt_config->config;
+
+			return sp_config;
+		}
+
+		fmt_config = (struct nhlt_fmt_cfg *)(fmt_config->config.caps +
+						fmt_config->config.size);
+	}
+
+	return NULL;
+}
+
+static void dump_config(struct device *dev, u32 instance_id, u8 linktype,
+		u8 s_fmt, u8 num_channels, u32 s_rate, u8 dirn, u16 bps)
+{
+	dev_dbg(dev, "Input configuration\n");
+	dev_dbg(dev, "ch=%d fmt=%d s_rate=%d\n", num_channels, s_fmt, s_rate);
+	dev_dbg(dev, "vbus_id=%d link_type=%d\n", instance_id, linktype);
+	dev_dbg(dev, "bits_per_sample=%d\n", bps);
+}
+
+static bool skl_check_ep_match(struct device *dev, struct nhlt_endpoint *epnt,
+				u32 instance_id, u8 link_type, u8 dirn)
+{
+	dev_dbg(dev, "vbus_id=%d link_type=%d dir=%d\n",
+		epnt->virtual_bus_id, epnt->linktype, epnt->direction);
+
+	if ((epnt->virtual_bus_id == instance_id) &&
+			(epnt->linktype == link_type) &&
+			(epnt->direction == dirn))
+		return true;
+	else
+		return false;
+}
+
+struct nhlt_specific_cfg
+*skl_get_ep_blob(struct skl *skl, u32 instance, u8 link_type,
+			u8 s_fmt, u8 num_ch, u32 s_rate, u8 dirn)
+{
+	struct nhlt_fmt *fmt;
+	struct nhlt_endpoint *epnt;
+	struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+	struct device *dev = bus->dev;
+	struct nhlt_specific_cfg *sp_config;
+	struct nhlt_acpi_table *nhlt = (struct nhlt_acpi_table *)skl->nhlt;
+	u16 bps = num_ch * s_fmt;
+	u8 j;
+
+	dump_config(dev, instance, link_type, s_fmt, num_ch, s_rate, dirn, bps);
+
+	epnt = (struct nhlt_endpoint *)nhlt->desc;
+
+	dev_dbg(dev, "endpoint count =%d\n", nhlt->endpoint_count);
+
+	for (j = 0; j < nhlt->endpoint_count; j++) {
+		if (skl_check_ep_match(dev, epnt, instance, link_type, dirn)) {
+			fmt = (struct nhlt_fmt *)(epnt->config.caps +
+						 epnt->config.size);
+			sp_config = skl_get_specific_cfg(dev, fmt, num_ch, s_rate, bps);
+			if (sp_config)
+				return sp_config;
+		}
+
+		epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
+	}
+
+	return NULL;
+}
diff --git a/sound/soc/intel/skylake/skl-nhlt.h b/sound/soc/intel/skylake/skl-nhlt.h
new file mode 100644
index 0000000..3769f9f
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-nhlt.h
@@ -0,0 +1,106 @@
+/*
+ *  skl-nhlt.h - Intel HDA Platform NHLT header
+ *
+ *  Copyright (C) 2015 Intel Corp
+ *  Author: Sanjiv Kumar <sanjiv.kumar@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+#ifndef __SKL_NHLT_H__
+#define __SKL_NHLT_H__
+
+#include <linux/acpi.h>
+
+struct wav_fmt {
+	u16 fmt_tag;
+	u16 channels;
+	u32 samples_per_sec;
+	u32 avg_bytes_per_sec;
+	u16 block_align;
+	u16 bits_per_sample;
+	u16 cb_size;
+} __packed;
+
+struct wav_fmt_ext {
+	struct wav_fmt fmt;
+	union samples {
+		u16 valid_bits_per_sample;
+		u16 samples_per_block;
+		u16 reserved;
+	} sample;
+	u32 channel_mask;
+	u8 sub_fmt[16];
+} __packed;
+
+enum nhlt_link_type {
+	NHLT_LINK_HDA = 0,
+	NHLT_LINK_DSP = 1,
+	NHLT_LINK_DMIC = 2,
+	NHLT_LINK_SSP = 3,
+	NHLT_LINK_INVALID
+};
+
+enum nhlt_device_type {
+	NHLT_DEVICE_BT = 0,
+	NHLT_DEVICE_DMIC = 1,
+	NHLT_DEVICE_I2S = 4,
+	NHLT_DEVICE_INVALID
+};
+
+struct nhlt_specific_cfg {
+	u32 size;
+	u8 caps[0];
+} __packed;
+
+struct nhlt_fmt_cfg {
+	struct wav_fmt_ext fmt_ext;
+	struct nhlt_specific_cfg config;
+} __packed;
+
+struct nhlt_fmt {
+	u8 fmt_count;
+	struct nhlt_fmt_cfg fmt_config[0];
+} __packed;
+
+struct nhlt_endpoint {
+	u32  length;
+	u8   linktype;
+	u8   instance_id;
+	u16  vendor_id;
+	u16  device_id;
+	u16  revision_id;
+	u32  subsystem_id;
+	u8   device_type;
+	u8   direction;
+	u8   virtual_bus_id;
+	struct nhlt_specific_cfg config;
+} __packed;
+
+struct nhlt_acpi_table {
+	struct acpi_table_header header;
+	u8 endpoint_count;
+	struct nhlt_endpoint desc[0];
+} __packed;
+
+struct nhlt_resource_desc  {
+	u32 extra;
+	u16 flags;
+	u64 addr_spc_gra;
+	u64 min_addr;
+	u64 max_addr;
+	u64 addr_trans_offset;
+	u64 length;
+} __packed;
+
+#endif
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
new file mode 100644
index 0000000..7d617bf
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -0,0 +1,916 @@
+/*
+ *  skl-pcm.c -ASoC HDA Platform driver file implementing PCM functionality
+ *
+ *  Copyright (C) 2014-2015 Intel Corp
+ *  Author:  Jeeja KP <jeeja.kp@intel.com>
+ *
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "skl.h"
+
+#define HDA_MONO 1
+#define HDA_STEREO 2
+
+static struct snd_pcm_hardware azx_pcm_hw = {
+	.info =			(SNDRV_PCM_INFO_MMAP |
+				 SNDRV_PCM_INFO_INTERLEAVED |
+				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				 SNDRV_PCM_INFO_MMAP_VALID |
+				 SNDRV_PCM_INFO_PAUSE |
+				 SNDRV_PCM_INFO_SYNC_START |
+				 SNDRV_PCM_INFO_HAS_WALL_CLOCK | /* legacy */
+				 SNDRV_PCM_INFO_HAS_LINK_ATIME |
+				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
+	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
+	.rates =		SNDRV_PCM_RATE_48000,
+	.rate_min =		48000,
+	.rate_max =		48000,
+	.channels_min =		2,
+	.channels_max =		2,
+	.buffer_bytes_max =	AZX_MAX_BUF_SIZE,
+	.period_bytes_min =	128,
+	.period_bytes_max =	AZX_MAX_BUF_SIZE / 2,
+	.periods_min =		2,
+	.periods_max =		AZX_MAX_FRAG,
+	.fifo_size =		0,
+};
+
+static inline
+struct hdac_ext_stream *get_hdac_ext_stream(struct snd_pcm_substream *substream)
+{
+	return substream->runtime->private_data;
+}
+
+static struct hdac_ext_bus *get_bus_ctx(struct snd_pcm_substream *substream)
+{
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+	struct hdac_stream *hstream = hdac_stream(stream);
+	struct hdac_bus *bus = hstream->bus;
+
+	return hbus_to_ebus(bus);
+}
+
+static int skl_substream_alloc_pages(struct hdac_ext_bus *ebus,
+				 struct snd_pcm_substream *substream,
+				 size_t size)
+{
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+
+	hdac_stream(stream)->bufsize = 0;
+	hdac_stream(stream)->period_bytes = 0;
+	hdac_stream(stream)->format_val = 0;
+
+	return snd_pcm_lib_malloc_pages(substream, size);
+}
+
+static int skl_substream_free_pages(struct hdac_bus *bus,
+				struct snd_pcm_substream *substream)
+{
+	return snd_pcm_lib_free_pages(substream);
+}
+
+static void skl_set_pcm_constrains(struct hdac_ext_bus *ebus,
+				 struct snd_pcm_runtime *runtime)
+{
+	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+
+	/* avoid wrap-around with wall-clock */
+	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
+				     20, 178000000);
+}
+
+static enum hdac_ext_stream_type skl_get_host_stream_type(struct hdac_ext_bus *ebus)
+{
+	if (ebus->ppcap)
+		return HDAC_EXT_STREAM_TYPE_HOST;
+	else
+		return HDAC_EXT_STREAM_TYPE_COUPLED;
+}
+
+static int skl_pcm_open(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *stream;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct skl_dma_params *dma_params;
+	int ret;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+	ret = pm_runtime_get_sync(dai->dev);
+	if (ret)
+		return ret;
+
+	stream = snd_hdac_ext_stream_assign(ebus, substream,
+					skl_get_host_stream_type(ebus));
+	if (stream == NULL)
+		return -EBUSY;
+
+	skl_set_pcm_constrains(ebus, runtime);
+
+	/*
+	 * disable WALLCLOCK timestamps for capture streams
+	 * until we figure out how to handle digital inputs
+	 */
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_WALL_CLOCK; /* legacy */
+		runtime->hw.info &= ~SNDRV_PCM_INFO_HAS_LINK_ATIME;
+	}
+
+	runtime->private_data = stream;
+
+	dma_params = kzalloc(sizeof(*dma_params), GFP_KERNEL);
+	if (!dma_params)
+		return -ENOMEM;
+
+	dma_params->stream_tag = hdac_stream(stream)->stream_tag;
+	snd_soc_dai_set_dma_data(dai, substream, dma_params);
+
+	dev_dbg(dai->dev, "stream tag set in dma params=%d\n",
+				 dma_params->stream_tag);
+	snd_pcm_set_sync(substream);
+
+	return 0;
+}
+
+static int skl_get_format(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct skl_dma_params *dma_params;
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	int format_val = 0;
+
+	if (ebus->ppcap) {
+		struct snd_pcm_runtime *runtime = substream->runtime;
+
+		format_val = snd_hdac_calc_stream_format(runtime->rate,
+						runtime->channels,
+						runtime->format,
+						32, 0);
+	} else {
+		struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+		dma_params = snd_soc_dai_get_dma_data(codec_dai, substream);
+		if (dma_params)
+			format_val = dma_params->format;
+	}
+
+	return format_val;
+}
+
+static int skl_pcm_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+	unsigned int format_val;
+	int err;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+	if (hdac_stream(stream)->prepared) {
+		dev_dbg(dai->dev, "already stream is prepared - returning\n");
+		return 0;
+	}
+
+	format_val = skl_get_format(substream, dai);
+	dev_dbg(dai->dev, "stream_tag=%d formatvalue=%d\n",
+				hdac_stream(stream)->stream_tag, format_val);
+	snd_hdac_stream_reset(hdac_stream(stream));
+
+	err = snd_hdac_stream_set_params(hdac_stream(stream), format_val);
+	if (err < 0)
+		return err;
+
+	err = snd_hdac_stream_setup(hdac_stream(stream));
+	if (err < 0)
+		return err;
+
+	hdac_stream(stream)->prepared = 1;
+
+	return err;
+}
+
+static int skl_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	int ret, dma_id;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+	ret = skl_substream_alloc_pages(ebus, substream,
+					  params_buffer_bytes(params));
+	if (ret < 0)
+		return ret;
+
+	dev_dbg(dai->dev, "format_val, rate=%d, ch=%d, format=%d\n",
+			runtime->rate, runtime->channels, runtime->format);
+
+	dma_id = hdac_stream(stream)->stream_tag - 1;
+	dev_dbg(dai->dev, "dma_id=%d\n", dma_id);
+
+	return 0;
+}
+
+static void skl_pcm_close(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct skl_dma_params *dma_params = NULL;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+	snd_hdac_ext_stream_release(stream, skl_get_host_stream_type(ebus));
+
+	dma_params = snd_soc_dai_get_dma_data(dai, substream);
+	/*
+	 * now we should set this to NULL as we are freeing by the
+	 * dma_params
+	 */
+	snd_soc_dai_set_dma_data(dai, substream, NULL);
+
+	pm_runtime_mark_last_busy(dai->dev);
+	pm_runtime_put_autosuspend(dai->dev);
+	kfree(dma_params);
+}
+
+static int skl_pcm_hw_free(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *stream = get_hdac_ext_stream(substream);
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+	snd_hdac_stream_cleanup(hdac_stream(stream));
+	hdac_stream(stream)->prepared = 0;
+
+	return skl_substream_free_pages(ebus_to_hbus(ebus), substream);
+}
+
+static int skl_link_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *link_dev;
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct skl_dma_params *dma_params;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int dma_id;
+
+	pr_debug("%s\n", __func__);
+	link_dev = snd_hdac_ext_stream_assign(ebus, substream,
+					HDAC_EXT_STREAM_TYPE_LINK);
+	if (!link_dev)
+		return -EBUSY;
+
+	snd_soc_dai_set_dma_data(dai, substream, (void *)link_dev);
+
+	/* set the stream tag in the codec dai dma params  */
+	dma_params = (struct skl_dma_params *)
+			snd_soc_dai_get_dma_data(codec_dai, substream);
+	if (dma_params)
+		dma_params->stream_tag =  hdac_stream(link_dev)->stream_tag;
+	snd_soc_dai_set_dma_data(codec_dai, substream, (void *)dma_params);
+	dma_id = hdac_stream(link_dev)->stream_tag - 1;
+
+	return 0;
+}
+
+static int skl_link_pcm_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct hdac_ext_stream *link_dev =
+			snd_soc_dai_get_dma_data(dai, substream);
+	unsigned int format_val = 0;
+	struct skl_dma_params *dma_params;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_pcm_hw_params *params;
+	struct snd_interval *channels, *rate;
+	struct hdac_ext_link *link;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+	if (link_dev->link_prepared) {
+		dev_dbg(dai->dev, "already stream is prepared - returning\n");
+		return 0;
+	}
+	params  = devm_kzalloc(dai->dev, sizeof(*params), GFP_KERNEL);
+	if (params == NULL)
+		return -ENOMEM;
+
+	channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+	channels->min = channels->max = substream->runtime->channels;
+	rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+	rate->min = rate->max = substream->runtime->rate;
+	snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+					SNDRV_PCM_HW_PARAM_FIRST_MASK],
+					substream->runtime->format);
+
+
+	dma_params  = (struct skl_dma_params *)
+			snd_soc_dai_get_dma_data(codec_dai, substream);
+	if (dma_params)
+		format_val = dma_params->format;
+	dev_dbg(dai->dev, "stream_tag=%d formatvalue=%d codec_dai_name=%s\n",
+			hdac_stream(link_dev)->stream_tag, format_val, codec_dai->name);
+
+	snd_hdac_ext_link_stream_reset(link_dev);
+
+	snd_hdac_ext_link_stream_setup(link_dev, format_val);
+
+	link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+	if (!link)
+		return -EINVAL;
+
+	snd_hdac_ext_link_set_stream_id(link, hdac_stream(link_dev)->stream_tag);
+	link_dev->link_prepared = 1;
+
+	return 0;
+}
+
+static int skl_link_pcm_trigger(struct snd_pcm_substream *substream,
+	int cmd, struct snd_soc_dai *dai)
+{
+	struct hdac_ext_stream *link_dev =
+				snd_soc_dai_get_dma_data(dai, substream);
+
+	dev_dbg(dai->dev, "In %s cmd=%d\n", __func__, cmd);
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		snd_hdac_ext_link_stream_start(link_dev);
+		break;
+
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_STOP:
+		snd_hdac_ext_link_stream_clear(link_dev);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int skl_link_hw_free(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct hdac_ext_stream *link_dev =
+				snd_soc_dai_get_dma_data(dai, substream);
+	struct hdac_ext_link *link;
+
+	dev_dbg(dai->dev, "%s: %s\n", __func__, dai->name);
+
+	link_dev->link_prepared = 0;
+
+	link = snd_hdac_ext_bus_get_link(ebus, rtd->codec->component.name);
+	if (!link)
+		return -EINVAL;
+
+	snd_hdac_ext_link_clear_stream_id(link, hdac_stream(link_dev)->stream_tag);
+	snd_hdac_ext_stream_release(link_dev, HDAC_EXT_STREAM_TYPE_LINK);
+	return 0;
+}
+
+static int skl_hda_be_startup(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	return pm_runtime_get_sync(dai->dev);
+}
+
+static void skl_hda_be_shutdown(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	pm_runtime_mark_last_busy(dai->dev);
+	pm_runtime_put_autosuspend(dai->dev);
+}
+
+static struct snd_soc_dai_ops skl_pcm_dai_ops = {
+	.startup = skl_pcm_open,
+	.shutdown = skl_pcm_close,
+	.prepare = skl_pcm_prepare,
+	.hw_params = skl_pcm_hw_params,
+	.hw_free = skl_pcm_hw_free,
+};
+
+static struct snd_soc_dai_ops skl_dmic_dai_ops = {
+	.startup = skl_hda_be_startup,
+	.shutdown = skl_hda_be_shutdown,
+};
+
+static struct snd_soc_dai_ops skl_link_dai_ops = {
+	.startup = skl_hda_be_startup,
+	.prepare = skl_link_pcm_prepare,
+	.hw_params = skl_link_hw_params,
+	.hw_free = skl_link_hw_free,
+	.trigger = skl_link_pcm_trigger,
+	.shutdown = skl_hda_be_shutdown,
+};
+
+static struct snd_soc_dai_driver skl_platform_dai[] = {
+{
+	.name = "System Pin",
+	.ops = &skl_pcm_dai_ops,
+	.playback = {
+		.stream_name = "System Playback",
+		.channels_min = HDA_MONO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_8000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+	.capture = {
+		.stream_name = "System Capture",
+		.channels_min = HDA_MONO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "Reference Pin",
+	.ops = &skl_pcm_dai_ops,
+	.capture = {
+		.stream_name = "Reference Capture",
+		.channels_min = HDA_MONO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "Deepbuffer Pin",
+	.ops = &skl_pcm_dai_ops,
+	.playback = {
+		.stream_name = "Deepbuffer Playback",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "LowLatency Pin",
+	.ops = &skl_pcm_dai_ops,
+	.playback = {
+		.stream_name = "Low Latency Playback",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+/* BE CPU  Dais */
+{
+	.name = "iDisp Pin",
+	.ops = &skl_link_dai_ops,
+	.playback = {
+		.stream_name = "iDisp Tx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "DMIC01 Pin",
+	.ops = &skl_dmic_dai_ops,
+	.capture = {
+		.stream_name = "DMIC01 Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "DMIC23 Pin",
+	.ops = &skl_dmic_dai_ops,
+	.capture = {
+		.stream_name = "DMIC23 Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+	},
+},
+{
+	.name = "HD-Codec Pin",
+	.ops = &skl_link_dai_ops,
+	.playback = {
+		.stream_name = "HD-Codec Tx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+	.capture = {
+		.stream_name = "HD-Codec Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "HD-Codec-SPK Pin",
+	.ops = &skl_link_dai_ops,
+	.playback = {
+		.stream_name = "HD-Codec-SPK Tx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+{
+	.name = "HD-Codec-AMIC Pin",
+	.ops = &skl_link_dai_ops,
+	.capture = {
+		.stream_name = "HD-Codec-AMIC Rx",
+		.channels_min = HDA_STEREO,
+		.channels_max = HDA_STEREO,
+		.rates = SNDRV_PCM_RATE_48000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	},
+},
+};
+
+static int skl_platform_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai_link *dai_link = rtd->dai_link;
+
+	dev_dbg(rtd->cpu_dai->dev, "In %s:%s\n", __func__,
+					dai_link->cpu_dai_name);
+
+	runtime = substream->runtime;
+	snd_soc_set_runtime_hwparams(substream, &azx_pcm_hw);
+
+	return 0;
+}
+
+static int skl_pcm_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct hdac_ext_stream *stream;
+	struct snd_pcm_substream *s;
+	bool start;
+	int sbits = 0;
+	unsigned long cookie;
+	struct hdac_stream *hstr;
+
+	stream = get_hdac_ext_stream(substream);
+	hstr = hdac_stream(stream);
+
+	dev_dbg(bus->dev, "In %s cmd=%d\n", __func__, cmd);
+
+	if (!hstr->prepared)
+		return -EPIPE;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		start = true;
+		break;
+
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_STOP:
+		start = false;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	snd_pcm_group_for_each_entry(s, substream) {
+		if (s->pcm->card != substream->pcm->card)
+			continue;
+		stream = get_hdac_ext_stream(s);
+		sbits |= 1 << hdac_stream(stream)->index;
+		snd_pcm_trigger_done(s, substream);
+	}
+
+	spin_lock_irqsave(&bus->reg_lock, cookie);
+
+	/* first, set SYNC bits of corresponding streams */
+	snd_hdac_stream_sync_trigger(hstr, true, sbits, AZX_REG_SSYNC);
+
+	snd_pcm_group_for_each_entry(s, substream) {
+		if (s->pcm->card != substream->pcm->card)
+			continue;
+		stream = get_hdac_ext_stream(s);
+		if (start)
+			snd_hdac_stream_start(hdac_stream(stream), true);
+		else
+			snd_hdac_stream_stop(hdac_stream(stream));
+	}
+	spin_unlock_irqrestore(&bus->reg_lock, cookie);
+
+	snd_hdac_stream_sync(hstr, start, sbits);
+
+	spin_lock_irqsave(&bus->reg_lock, cookie);
+
+	/* reset SYNC bits */
+	snd_hdac_stream_sync_trigger(hstr, false, sbits, AZX_REG_SSYNC);
+	if (start)
+		snd_hdac_stream_timecounter_init(hstr, sbits);
+	spin_unlock_irqrestore(&bus->reg_lock, cookie);
+
+	return 0;
+}
+
+static int skl_dsp_trigger(struct snd_pcm_substream *substream,
+		int cmd)
+{
+	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct hdac_ext_stream *stream;
+	int start;
+	unsigned long cookie;
+	struct hdac_stream *hstr;
+
+	dev_dbg(bus->dev, "In %s cmd=%d streamname=%s\n", __func__, cmd, cpu_dai->name);
+
+	stream = get_hdac_ext_stream(substream);
+	hstr = hdac_stream(stream);
+
+	if (!hstr->prepared)
+		return -EPIPE;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		start = 1;
+		break;
+
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_STOP:
+		start = 0;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&bus->reg_lock, cookie);
+
+	if (start)
+		snd_hdac_stream_start(hdac_stream(stream), true);
+	else
+		snd_hdac_stream_stop(hdac_stream(stream));
+
+	if (start)
+		snd_hdac_stream_timecounter_init(hstr, 0);
+
+	spin_unlock_irqrestore(&bus->reg_lock, cookie);
+
+	return 0;
+}
+static int skl_platform_pcm_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+
+	if (ebus->ppcap)
+		return skl_dsp_trigger(substream, cmd);
+	else
+		return skl_pcm_trigger(substream, cmd);
+}
+
+/* calculate runtime delay from LPIB */
+static int skl_get_delay_from_lpib(struct hdac_ext_bus *ebus,
+				struct hdac_ext_stream *sstream,
+				unsigned int pos)
+{
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct hdac_stream *hstream = hdac_stream(sstream);
+	struct snd_pcm_substream *substream = hstream->substream;
+	int stream = substream->stream;
+	unsigned int lpib_pos = snd_hdac_stream_get_pos_lpib(hstream);
+	int delay;
+
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+		delay = pos - lpib_pos;
+	else
+		delay = lpib_pos - pos;
+
+	if (delay < 0) {
+		if (delay >= hstream->delay_negative_threshold)
+			delay = 0;
+		else
+			delay += hstream->bufsize;
+	}
+
+	if (delay >= hstream->period_bytes) {
+		dev_info(bus->dev,
+			 "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
+			 delay, hstream->period_bytes);
+		delay = 0;
+	}
+
+	return bytes_to_frames(substream->runtime, delay);
+}
+
+static unsigned int skl_get_position(struct hdac_ext_stream *hstream,
+					int codec_delay)
+{
+	struct hdac_stream *hstr = hdac_stream(hstream);
+	struct snd_pcm_substream *substream = hstr->substream;
+	struct hdac_ext_bus *ebus = get_bus_ctx(substream);
+	unsigned int pos;
+	int delay;
+
+	/* use the position buffer as default */
+	pos = snd_hdac_stream_get_pos_posbuf(hdac_stream(hstream));
+
+	if (pos >= hdac_stream(hstream)->bufsize)
+		pos = 0;
+
+	if (substream->runtime) {
+		delay = skl_get_delay_from_lpib(ebus, hstream, pos)
+						 + codec_delay;
+		substream->runtime->delay += delay;
+	}
+
+	return pos;
+}
+
+static snd_pcm_uframes_t skl_platform_pcm_pointer
+			(struct snd_pcm_substream *substream)
+{
+	struct hdac_ext_stream *hstream = get_hdac_ext_stream(substream);
+
+	return bytes_to_frames(substream->runtime,
+			       skl_get_position(hstream, 0));
+}
+
+static u64 skl_adjust_codec_delay(struct snd_pcm_substream *substream,
+				u64 nsec)
+{
+	struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	u64 codec_frames, codec_nsecs;
+
+	if (!codec_dai->driver->ops->delay)
+		return nsec;
+
+	codec_frames = codec_dai->driver->ops->delay(substream, codec_dai);
+	codec_nsecs = div_u64(codec_frames * 1000000000LL,
+			      substream->runtime->rate);
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		return nsec + codec_nsecs;
+
+	return (nsec > codec_nsecs) ? nsec - codec_nsecs : 0;
+}
+
+static int skl_get_time_info(struct snd_pcm_substream *substream,
+			struct timespec *system_ts, struct timespec *audio_ts,
+			struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
+			struct snd_pcm_audio_tstamp_report *audio_tstamp_report)
+{
+	struct hdac_ext_stream *sstream = get_hdac_ext_stream(substream);
+	struct hdac_stream *hstr = hdac_stream(sstream);
+	u64 nsec;
+
+	if ((substream->runtime->hw.info & SNDRV_PCM_INFO_HAS_LINK_ATIME) &&
+		(audio_tstamp_config->type_requested == SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK)) {
+
+		snd_pcm_gettime(substream->runtime, system_ts);
+
+		nsec = timecounter_read(&hstr->tc);
+		nsec = div_u64(nsec, 3); /* can be optimized */
+		if (audio_tstamp_config->report_delay)
+			nsec = skl_adjust_codec_delay(substream, nsec);
+
+		*audio_ts = ns_to_timespec(nsec);
+
+		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK;
+		audio_tstamp_report->accuracy_report = 1; /* rest of struct is valid */
+		audio_tstamp_report->accuracy = 42; /* 24MHzWallClk == 42ns resolution */
+
+	} else {
+		audio_tstamp_report->actual_type = SNDRV_PCM_AUDIO_TSTAMP_TYPE_DEFAULT;
+	}
+
+	return 0;
+}
+
+static struct snd_pcm_ops skl_platform_ops = {
+	.open = skl_platform_open,
+	.ioctl = snd_pcm_lib_ioctl,
+	.trigger = skl_platform_pcm_trigger,
+	.pointer = skl_platform_pcm_pointer,
+	.get_time_info =  skl_get_time_info,
+	.mmap = snd_pcm_lib_default_mmap,
+	.page = snd_pcm_sgbuf_ops_page,
+};
+
+static void skl_pcm_free(struct snd_pcm *pcm)
+{
+	snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+#define MAX_PREALLOC_SIZE	(32 * 1024 * 1024)
+
+static int skl_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_dai *dai = rtd->cpu_dai;
+	struct hdac_ext_bus *ebus = dev_get_drvdata(dai->dev);
+	struct snd_pcm *pcm = rtd->pcm;
+	unsigned int size;
+	int retval = 0;
+	struct skl *skl = ebus_to_skl(ebus);
+
+	if (dai->driver->playback.channels_min ||
+		dai->driver->capture.channels_min) {
+		/* buffer pre-allocation */
+		size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
+		if (size > MAX_PREALLOC_SIZE)
+			size = MAX_PREALLOC_SIZE;
+		retval = snd_pcm_lib_preallocate_pages_for_all(pcm,
+						SNDRV_DMA_TYPE_DEV_SG,
+						snd_dma_pci_data(skl->pci),
+						size, MAX_PREALLOC_SIZE);
+		if (retval) {
+			dev_err(dai->dev, "dma buffer allocationf fail\n");
+			return retval;
+		}
+	}
+
+	return retval;
+}
+
+static struct snd_soc_platform_driver skl_platform_drv  = {
+	.ops		= &skl_platform_ops,
+	.pcm_new	= skl_pcm_new,
+	.pcm_free	= skl_pcm_free,
+};
+
+static const struct snd_soc_component_driver skl_component = {
+	.name           = "pcm",
+};
+
+int skl_platform_register(struct device *dev)
+{
+	int ret;
+
+	ret = snd_soc_register_platform(dev, &skl_platform_drv);
+	if (ret) {
+		dev_err(dev, "soc platform registration failed %d\n", ret);
+		return ret;
+	}
+	ret = snd_soc_register_component(dev, &skl_component,
+				skl_platform_dai,
+				ARRAY_SIZE(skl_platform_dai));
+	if (ret) {
+		dev_err(dev, "soc component registration failed %d\n", ret);
+		snd_soc_unregister_platform(dev);
+	}
+
+	return ret;
+
+}
+
+int skl_platform_unregister(struct device *dev)
+{
+	snd_soc_unregister_component(dev);
+	snd_soc_unregister_platform(dev);
+	return 0;
+}
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.c b/sound/soc/intel/skylake/skl-sst-cldma.c
new file mode 100644
index 0000000..44748ba
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-cldma.c
@@ -0,0 +1,327 @@
+/*
+ * skl-sst-cldma.c - Code Loader DMA handler
+ *
+ * Copyright (C) 2015, Intel Corporation.
+ * Author: Subhransu S. Prusty <subhransu.s.prusty@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/kthread.h>
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+
+static void skl_cldma_int_enable(struct sst_dsp *ctx)
+{
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
+				SKL_ADSPIC_CL_DMA, SKL_ADSPIC_CL_DMA);
+}
+
+void skl_cldma_int_disable(struct sst_dsp *ctx)
+{
+	sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_ADSPIC, SKL_ADSPIC_CL_DMA, 0);
+}
+
+/* Code loader helper APIs */
+static void skl_cldma_setup_bdle(struct sst_dsp *ctx,
+		struct snd_dma_buffer *dmab_data,
+		u32 **bdlp, int size, int with_ioc)
+{
+	u32 *bdl = *bdlp;
+
+	ctx->cl_dev.frags = 0;
+	while (size > 0) {
+		phys_addr_t addr = virt_to_phys(dmab_data->area +
+				(ctx->cl_dev.frags * ctx->cl_dev.bufsize));
+
+		bdl[0] = cpu_to_le32(lower_32_bits(addr));
+		bdl[1] = cpu_to_le32(upper_32_bits(addr));
+
+		bdl[2] = cpu_to_le32(ctx->cl_dev.bufsize);
+
+		size -= ctx->cl_dev.bufsize;
+		bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
+
+		bdl += 4;
+		ctx->cl_dev.frags++;
+	}
+}
+
+/*
+ * Setup controller
+ * Configure the registers to update the dma buffer address and
+ * enable interrupts.
+ * Note: Using the channel 1 for transfer
+ */
+static void skl_cldma_setup_controller(struct sst_dsp  *ctx,
+		struct snd_dma_buffer *dmab_bdl, unsigned int max_size,
+		u32 count)
+{
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL,
+			CL_SD_BDLPLBA(dmab_bdl->addr));
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU,
+			CL_SD_BDLPUBA(dmab_bdl->addr));
+
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, max_size);
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, count - 1);
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(1));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(1));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(1));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(FW_CL_STREAM_NUMBER));
+}
+
+static void skl_cldma_setup_spb(struct sst_dsp  *ctx,
+		unsigned int size, bool enable)
+{
+	if (enable)
+		sst_dsp_shim_update_bits_unlocked(ctx,
+				SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
+				CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
+				CL_SPBFIFO_SPBFCCTL_SPIBE(1));
+
+	sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, size);
+}
+
+static void skl_cldma_cleanup_spb(struct sst_dsp  *ctx)
+{
+	sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL,
+			CL_SPBFIFO_SPBFCCTL_SPIBE_MASK,
+			CL_SPBFIFO_SPBFCCTL_SPIBE(0));
+
+	sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_CL_SPBFIFO_SPIB, 0);
+}
+
+static void skl_cldma_trigger(struct sst_dsp  *ctx, bool enable)
+{
+	if (enable)
+		sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(1));
+	else
+		sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_CL_SD_CTL,
+			CL_SD_CTL_RUN_MASK, CL_SD_CTL_RUN(0));
+}
+
+static void skl_cldma_cleanup(struct sst_dsp  *ctx)
+{
+	skl_cldma_cleanup_spb(ctx);
+
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+				CL_SD_CTL_IOCE_MASK, CL_SD_CTL_IOCE(0));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+				CL_SD_CTL_FEIE_MASK, CL_SD_CTL_FEIE(0));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+				CL_SD_CTL_DEIE_MASK, CL_SD_CTL_DEIE(0));
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_CL_SD_CTL,
+				CL_SD_CTL_STRM_MASK, CL_SD_CTL_STRM(0));
+
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPL, CL_SD_BDLPLBA(0));
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_BDLPU, 0);
+
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_CBL, 0);
+	sst_dsp_shim_write(ctx, SKL_ADSP_REG_CL_SD_LVI, 0);
+}
+
+static int skl_cldma_wait_interruptible(struct sst_dsp *ctx)
+{
+	int ret = 0;
+
+	if (!wait_event_timeout(ctx->cl_dev.wait_queue,
+				ctx->cl_dev.wait_condition,
+				msecs_to_jiffies(SKL_WAIT_TIMEOUT))) {
+		dev_err(ctx->dev, "%s: Wait timeout\n", __func__);
+		ret = -EIO;
+		goto cleanup;
+	}
+
+	dev_dbg(ctx->dev, "%s: Event wake\n", __func__);
+	if (ctx->cl_dev.wake_status != SKL_CL_DMA_BUF_COMPLETE) {
+		dev_err(ctx->dev, "%s: DMA Error\n", __func__);
+		ret = -EIO;
+	}
+
+cleanup:
+	ctx->cl_dev.wake_status = SKL_CL_DMA_STATUS_NONE;
+	return ret;
+}
+
+static void skl_cldma_stop(struct sst_dsp *ctx)
+{
+	ctx->cl_dev.ops.cl_trigger(ctx, false);
+}
+
+static void skl_cldma_fill_buffer(struct sst_dsp *ctx, unsigned int size,
+		const void *curr_pos, bool intr_enable, bool trigger)
+{
+	dev_dbg(ctx->dev, "Size: %x, intr_enable: %d\n", size, intr_enable);
+	dev_dbg(ctx->dev, "buf_pos_index:%d, trigger:%d\n",
+			ctx->cl_dev.dma_buffer_offset, trigger);
+	dev_dbg(ctx->dev, "spib position: %d\n", ctx->cl_dev.curr_spib_pos);
+
+	memcpy(ctx->cl_dev.dmab_data.area + ctx->cl_dev.dma_buffer_offset,
+			curr_pos, size);
+
+	if (ctx->cl_dev.curr_spib_pos == ctx->cl_dev.bufsize)
+		ctx->cl_dev.dma_buffer_offset = 0;
+	else
+		ctx->cl_dev.dma_buffer_offset = ctx->cl_dev.curr_spib_pos;
+
+	ctx->cl_dev.wait_condition = false;
+
+	if (intr_enable)
+		skl_cldma_int_enable(ctx);
+
+	ctx->cl_dev.ops.cl_setup_spb(ctx, ctx->cl_dev.curr_spib_pos, trigger);
+	if (trigger)
+		ctx->cl_dev.ops.cl_trigger(ctx, true);
+}
+
+/*
+ * The CL dma doesn't have any way to update the transfer status until a BDL
+ * buffer is fully transferred
+ *
+ * So Copying is divided in two parts.
+ * 1. Interrupt on buffer done where the size to be transferred is more than
+ *    ring buffer size.
+ * 2. Polling on fw register to identify if data left to transferred doesn't
+ *    fill the ring buffer. Caller takes care of polling the required status
+ *    register to identify the transfer status.
+ */
+static int
+skl_cldma_copy_to_buf(struct sst_dsp *ctx, const void *bin, u32 total_size)
+{
+	int ret = 0;
+	bool start = true;
+	unsigned int excess_bytes;
+	u32 size;
+	unsigned int bytes_left = total_size;
+	const void *curr_pos = bin;
+
+	if (total_size <= 0)
+		return -EINVAL;
+
+	dev_dbg(ctx->dev, "%s: Total binary size: %u\n", __func__, bytes_left);
+
+	while (bytes_left) {
+		if (bytes_left > ctx->cl_dev.bufsize) {
+
+			/*
+			 * dma transfers only till the write pointer as
+			 * updated in spib
+			 */
+			if (ctx->cl_dev.curr_spib_pos == 0)
+				ctx->cl_dev.curr_spib_pos = ctx->cl_dev.bufsize;
+
+			size = ctx->cl_dev.bufsize;
+			skl_cldma_fill_buffer(ctx, size, curr_pos, true, start);
+
+			start = false;
+			ret = skl_cldma_wait_interruptible(ctx);
+			if (ret < 0) {
+				skl_cldma_stop(ctx);
+				return ret;
+			}
+
+		} else {
+			skl_cldma_int_disable(ctx);
+
+			if ((ctx->cl_dev.curr_spib_pos + bytes_left)
+							<= ctx->cl_dev.bufsize) {
+				ctx->cl_dev.curr_spib_pos += bytes_left;
+			} else {
+				excess_bytes = bytes_left -
+					(ctx->cl_dev.bufsize -
+					ctx->cl_dev.curr_spib_pos);
+				ctx->cl_dev.curr_spib_pos = excess_bytes;
+			}
+
+			size = bytes_left;
+			skl_cldma_fill_buffer(ctx, size,
+					curr_pos, false, start);
+		}
+		bytes_left -= size;
+		curr_pos = curr_pos + size;
+	}
+
+	return ret;
+}
+
+void skl_cldma_process_intr(struct sst_dsp *ctx)
+{
+	u8 cl_dma_intr_status;
+
+	cl_dma_intr_status =
+		sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_CL_SD_STS);
+
+	if (!(cl_dma_intr_status & SKL_CL_DMA_SD_INT_COMPLETE))
+		ctx->cl_dev.wake_status = SKL_CL_DMA_ERR;
+	else
+		ctx->cl_dev.wake_status = SKL_CL_DMA_BUF_COMPLETE;
+
+	ctx->cl_dev.wait_condition = true;
+	wake_up(&ctx->cl_dev.wait_queue);
+}
+
+int skl_cldma_prepare(struct sst_dsp *ctx)
+{
+	int ret;
+	u32 *bdl;
+
+	ctx->cl_dev.bufsize = SKL_MAX_BUFFER_SIZE;
+
+	/* Allocate cl ops */
+	ctx->cl_dev.ops.cl_setup_bdle = skl_cldma_setup_bdle;
+	ctx->cl_dev.ops.cl_setup_controller = skl_cldma_setup_controller;
+	ctx->cl_dev.ops.cl_setup_spb = skl_cldma_setup_spb;
+	ctx->cl_dev.ops.cl_cleanup_spb = skl_cldma_cleanup_spb;
+	ctx->cl_dev.ops.cl_trigger = skl_cldma_trigger;
+	ctx->cl_dev.ops.cl_cleanup_controller = skl_cldma_cleanup;
+	ctx->cl_dev.ops.cl_copy_to_dmabuf = skl_cldma_copy_to_buf;
+	ctx->cl_dev.ops.cl_stop_dma = skl_cldma_stop;
+
+	/* Allocate buffer*/
+	ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
+			&ctx->cl_dev.dmab_data, ctx->cl_dev.bufsize);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Alloc buffer for base fw failed: %x", ret);
+		return ret;
+	}
+	/* Setup Code loader BDL */
+	ret = ctx->dsp_ops.alloc_dma_buf(ctx->dev,
+			&ctx->cl_dev.dmab_bdl, PAGE_SIZE);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Alloc buffer for blde failed: %x", ret);
+		ctx->dsp_ops.free_dma_buf(ctx->dev, &ctx->cl_dev.dmab_data);
+		return ret;
+	}
+	bdl = (u32 *)ctx->cl_dev.dmab_bdl.area;
+
+	/* Allocate BDLs */
+	ctx->cl_dev.ops.cl_setup_bdle(ctx, &ctx->cl_dev.dmab_data,
+			&bdl, ctx->cl_dev.bufsize, 1);
+	ctx->cl_dev.ops.cl_setup_controller(ctx, &ctx->cl_dev.dmab_bdl,
+			ctx->cl_dev.bufsize, ctx->cl_dev.frags);
+
+	ctx->cl_dev.curr_spib_pos = 0;
+	ctx->cl_dev.dma_buffer_offset = 0;
+	init_waitqueue_head(&ctx->cl_dev.wait_queue);
+
+	return ret;
+}
diff --git a/sound/soc/intel/skylake/skl-sst-cldma.h b/sound/soc/intel/skylake/skl-sst-cldma.h
new file mode 100644
index 0000000..99e4c86
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-cldma.h
@@ -0,0 +1,251 @@
+/*
+ * Intel Code Loader DMA support
+ *
+ * Copyright (C) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef SKL_SST_CLDMA_H_
+#define SKL_SST_CLDMA_H_
+
+#define FW_CL_STREAM_NUMBER		0x1
+
+#define DMA_ADDRESS_128_BITS_ALIGNMENT	7
+#define BDL_ALIGN(x)			(x >> DMA_ADDRESS_128_BITS_ALIGNMENT)
+
+#define SKL_ADSPIC_CL_DMA			0x2
+#define SKL_ADSPIS_CL_DMA			0x2
+#define SKL_CL_DMA_SD_INT_DESC_ERR		0x10 /* Descriptor error interrupt */
+#define SKL_CL_DMA_SD_INT_FIFO_ERR		0x08 /* FIFO error interrupt */
+#define SKL_CL_DMA_SD_INT_COMPLETE		0x04 /* Buffer completion interrupt */
+
+/* Intel HD Audio Code Loader DMA Registers */
+
+#define HDA_ADSP_LOADER_BASE		0x80
+
+/* Stream Registers */
+#define SKL_ADSP_REG_CL_SD_CTL			(HDA_ADSP_LOADER_BASE + 0x00)
+#define SKL_ADSP_REG_CL_SD_STS			(HDA_ADSP_LOADER_BASE + 0x03)
+#define SKL_ADSP_REG_CL_SD_LPIB			(HDA_ADSP_LOADER_BASE + 0x04)
+#define SKL_ADSP_REG_CL_SD_CBL			(HDA_ADSP_LOADER_BASE + 0x08)
+#define SKL_ADSP_REG_CL_SD_LVI			(HDA_ADSP_LOADER_BASE + 0x0c)
+#define SKL_ADSP_REG_CL_SD_FIFOW		(HDA_ADSP_LOADER_BASE + 0x0e)
+#define SKL_ADSP_REG_CL_SD_FIFOSIZE		(HDA_ADSP_LOADER_BASE + 0x10)
+#define SKL_ADSP_REG_CL_SD_FORMAT		(HDA_ADSP_LOADER_BASE + 0x12)
+#define SKL_ADSP_REG_CL_SD_FIFOL		(HDA_ADSP_LOADER_BASE + 0x14)
+#define SKL_ADSP_REG_CL_SD_BDLPL		(HDA_ADSP_LOADER_BASE + 0x18)
+#define SKL_ADSP_REG_CL_SD_BDLPU		(HDA_ADSP_LOADER_BASE + 0x1c)
+
+/* CL: Software Position Based FIFO Capability Registers */
+#define SKL_ADSP_REG_CL_SPBFIFO			(HDA_ADSP_LOADER_BASE + 0x20)
+#define SKL_ADSP_REG_CL_SPBFIFO_SPBFCH		(SKL_ADSP_REG_CL_SPBFIFO + 0x0)
+#define SKL_ADSP_REG_CL_SPBFIFO_SPBFCCTL	(SKL_ADSP_REG_CL_SPBFIFO + 0x4)
+#define SKL_ADSP_REG_CL_SPBFIFO_SPIB		(SKL_ADSP_REG_CL_SPBFIFO + 0x8)
+#define SKL_ADSP_REG_CL_SPBFIFO_MAXFIFOS	(SKL_ADSP_REG_CL_SPBFIFO + 0xc)
+
+/* CL: Stream Descriptor x Control */
+
+/* Stream Reset */
+#define CL_SD_CTL_SRST_SHIFT		0
+#define CL_SD_CTL_SRST_MASK		(1 << CL_SD_CTL_SRST_SHIFT)
+#define CL_SD_CTL_SRST(x)		\
+			((x << CL_SD_CTL_SRST_SHIFT) & CL_SD_CTL_SRST_MASK)
+
+/* Stream Run */
+#define CL_SD_CTL_RUN_SHIFT		1
+#define CL_SD_CTL_RUN_MASK		(1 << CL_SD_CTL_RUN_SHIFT)
+#define CL_SD_CTL_RUN(x)		\
+			((x << CL_SD_CTL_RUN_SHIFT) & CL_SD_CTL_RUN_MASK)
+
+/* Interrupt On Completion Enable */
+#define CL_SD_CTL_IOCE_SHIFT		2
+#define CL_SD_CTL_IOCE_MASK		(1 << CL_SD_CTL_IOCE_SHIFT)
+#define CL_SD_CTL_IOCE(x)		\
+			((x << CL_SD_CTL_IOCE_SHIFT) & CL_SD_CTL_IOCE_MASK)
+
+/* FIFO Error Interrupt Enable */
+#define CL_SD_CTL_FEIE_SHIFT		3
+#define CL_SD_CTL_FEIE_MASK		(1 << CL_SD_CTL_FEIE_SHIFT)
+#define CL_SD_CTL_FEIE(x)		\
+			((x << CL_SD_CTL_FEIE_SHIFT) & CL_SD_CTL_FEIE_MASK)
+
+/* Descriptor Error Interrupt Enable */
+#define CL_SD_CTL_DEIE_SHIFT		4
+#define CL_SD_CTL_DEIE_MASK		(1 << CL_SD_CTL_DEIE_SHIFT)
+#define CL_SD_CTL_DEIE(x)		\
+			((x << CL_SD_CTL_DEIE_SHIFT) & CL_SD_CTL_DEIE_MASK)
+
+/* FIFO Limit Change */
+#define CL_SD_CTL_FIFOLC_SHIFT		5
+#define CL_SD_CTL_FIFOLC_MASK		(1 << CL_SD_CTL_FIFOLC_SHIFT)
+#define CL_SD_CTL_FIFOLC(x)		\
+			((x << CL_SD_CTL_FIFOLC_SHIFT) & CL_SD_CTL_FIFOLC_MASK)
+
+/* Stripe Control */
+#define CL_SD_CTL_STRIPE_SHIFT		16
+#define CL_SD_CTL_STRIPE_MASK		(0x3 << CL_SD_CTL_STRIPE_SHIFT)
+#define CL_SD_CTL_STRIPE(x)		\
+			((x << CL_SD_CTL_STRIPE_SHIFT) & CL_SD_CTL_STRIPE_MASK)
+
+/* Traffic Priority */
+#define CL_SD_CTL_TP_SHIFT		18
+#define CL_SD_CTL_TP_MASK		(1 << CL_SD_CTL_TP_SHIFT)
+#define CL_SD_CTL_TP(x)			\
+			((x << CL_SD_CTL_TP_SHIFT) & CL_SD_CTL_TP_MASK)
+
+/* Bidirectional Direction Control */
+#define CL_SD_CTL_DIR_SHIFT		19
+#define CL_SD_CTL_DIR_MASK		(1 << CL_SD_CTL_DIR_SHIFT)
+#define CL_SD_CTL_DIR(x)		\
+			((x << CL_SD_CTL_DIR_SHIFT) & CL_SD_CTL_DIR_MASK)
+
+/* Stream Number */
+#define CL_SD_CTL_STRM_SHIFT		20
+#define CL_SD_CTL_STRM_MASK		(0xf << CL_SD_CTL_STRM_SHIFT)
+#define CL_SD_CTL_STRM(x)		\
+			((x << CL_SD_CTL_STRM_SHIFT) & CL_SD_CTL_STRM_MASK)
+
+/* CL: Stream Descriptor x Status */
+
+/* Buffer Completion Interrupt Status */
+#define CL_SD_STS_BCIS(x)		CL_SD_CTL_IOCE(x)
+
+/* FIFO Error */
+#define CL_SD_STS_FIFOE(x)		CL_SD_CTL_FEIE(x)
+
+/* Descriptor Error */
+#define CL_SD_STS_DESE(x)		CL_SD_CTL_DEIE(x)
+
+/* FIFO Ready */
+#define CL_SD_STS_FIFORDY(x)	CL_SD_CTL_FIFOLC(x)
+
+
+/* CL: Stream Descriptor x Last Valid Index */
+#define CL_SD_LVI_SHIFT			0
+#define CL_SD_LVI_MASK			(0xff << CL_SD_LVI_SHIFT)
+#define CL_SD_LVI(x)			((x << CL_SD_LVI_SHIFT) & CL_SD_LVI_MASK)
+
+/* CL: Stream Descriptor x FIFO Eviction Watermark */
+#define CL_SD_FIFOW_SHIFT		0
+#define CL_SD_FIFOW_MASK		(0x7 << CL_SD_FIFOW_SHIFT)
+#define CL_SD_FIFOW(x)			\
+			((x << CL_SD_FIFOW_SHIFT) & CL_SD_FIFOW_MASK)
+
+/* CL: Stream Descriptor x Buffer Descriptor List Pointer Lower Base Address */
+
+/* Protect Bits */
+#define CL_SD_BDLPLBA_PROT_SHIFT	0
+#define CL_SD_BDLPLBA_PROT_MASK		(1 << CL_SD_BDLPLBA_PROT_SHIFT)
+#define CL_SD_BDLPLBA_PROT(x)		\
+		((x << CL_SD_BDLPLBA_PROT_SHIFT) & CL_SD_BDLPLBA_PROT_MASK)
+
+/* Buffer Descriptor List Lower Base Address */
+#define CL_SD_BDLPLBA_SHIFT		7
+#define CL_SD_BDLPLBA_MASK		(0x1ffffff << CL_SD_BDLPLBA_SHIFT)
+#define CL_SD_BDLPLBA(x)		\
+	((BDL_ALIGN(lower_32_bits(x)) << CL_SD_BDLPLBA_SHIFT) & CL_SD_BDLPLBA_MASK)
+
+/* Buffer Descriptor List Upper Base Address */
+#define CL_SD_BDLPUBA_SHIFT		0
+#define CL_SD_BDLPUBA_MASK		(0xffffffff << CL_SD_BDLPUBA_SHIFT)
+#define CL_SD_BDLPUBA(x)		\
+		((upper_32_bits(x) << CL_SD_BDLPUBA_SHIFT) & CL_SD_BDLPUBA_MASK)
+
+/*
+ * Code Loader - Software Position Based FIFO
+ * Capability Registers x Software Position Based FIFO Header
+ */
+
+/* Next Capability Pointer */
+#define CL_SPBFIFO_SPBFCH_PTR_SHIFT	0
+#define CL_SPBFIFO_SPBFCH_PTR_MASK	(0xff << CL_SPBFIFO_SPBFCH_PTR_SHIFT)
+#define CL_SPBFIFO_SPBFCH_PTR(x)	\
+		((x << CL_SPBFIFO_SPBFCH_PTR_SHIFT) & CL_SPBFIFO_SPBFCH_PTR_MASK)
+
+/* Capability Identifier */
+#define CL_SPBFIFO_SPBFCH_ID_SHIFT	16
+#define CL_SPBFIFO_SPBFCH_ID_MASK	(0xfff << CL_SPBFIFO_SPBFCH_ID_SHIFT)
+#define CL_SPBFIFO_SPBFCH_ID(x)		\
+		((x << CL_SPBFIFO_SPBFCH_ID_SHIFT) & CL_SPBFIFO_SPBFCH_ID_MASK)
+
+/* Capability Version */
+#define CL_SPBFIFO_SPBFCH_VER_SHIFT	28
+#define CL_SPBFIFO_SPBFCH_VER_MASK	(0xf << CL_SPBFIFO_SPBFCH_VER_SHIFT)
+#define CL_SPBFIFO_SPBFCH_VER(x)	\
+	((x << CL_SPBFIFO_SPBFCH_VER_SHIFT) & CL_SPBFIFO_SPBFCH_VER_MASK)
+
+/* Software Position in Buffer Enable */
+#define CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT	0
+#define CL_SPBFIFO_SPBFCCTL_SPIBE_MASK	(1 << CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT)
+#define CL_SPBFIFO_SPBFCCTL_SPIBE(x)	\
+	((x << CL_SPBFIFO_SPBFCCTL_SPIBE_SHIFT) & CL_SPBFIFO_SPBFCCTL_SPIBE_MASK)
+
+/* SST IPC SKL defines */
+#define SKL_WAIT_TIMEOUT		500	/* 500 msec */
+#define SKL_MAX_BUFFER_SIZE		(32 * PAGE_SIZE)
+
+enum skl_cl_dma_wake_states {
+	SKL_CL_DMA_STATUS_NONE = 0,
+	SKL_CL_DMA_BUF_COMPLETE,
+	SKL_CL_DMA_ERR,	/* TODO: Expand the error states */
+};
+
+struct sst_dsp;
+
+struct skl_cl_dev_ops {
+	void (*cl_setup_bdle)(struct sst_dsp *ctx,
+			struct snd_dma_buffer *dmab_data,
+			u32 **bdlp, int size, int with_ioc);
+	void (*cl_setup_controller)(struct sst_dsp *ctx,
+			struct snd_dma_buffer *dmab_bdl,
+			unsigned int max_size, u32 page_count);
+	void (*cl_setup_spb)(struct sst_dsp  *ctx,
+			unsigned int size, bool enable);
+	void (*cl_cleanup_spb)(struct sst_dsp  *ctx);
+	void (*cl_trigger)(struct sst_dsp  *ctx, bool enable);
+	void (*cl_cleanup_controller)(struct sst_dsp  *ctx);
+	int (*cl_copy_to_dmabuf)(struct sst_dsp *ctx,
+			const void *bin, u32 size);
+	void (*cl_stop_dma)(struct sst_dsp *ctx);
+};
+
+/**
+ * skl_cl_dev - holds information for code loader dma transfer
+ *
+ * @dmab_data: buffer pointer
+ * @dmab_bdl: buffer descriptor list
+ * @bufsize: ring buffer size
+ * @frags: Last valid buffer descriptor index in the BDL
+ * @curr_spib_pos: Current position in ring buffer
+ * @dma_buffer_offset: dma buffer offset
+ * @ops: operations supported on CL dma
+ * @wait_queue: wait queue to wake for wake event
+ * @wake_status: DMA wake status
+ * @wait_condition: condition to wait on wait queue
+ * @cl_dma_lock: for synchronized access to cldma
+ */
+struct skl_cl_dev {
+	struct snd_dma_buffer dmab_data;
+	struct snd_dma_buffer dmab_bdl;
+
+	unsigned int bufsize;
+	unsigned int frags;
+
+	unsigned int curr_spib_pos;
+	unsigned int dma_buffer_offset;
+	struct skl_cl_dev_ops ops;
+
+	wait_queue_head_t wait_queue;
+	int wake_status;
+	bool wait_condition;
+};
+
+#endif /* SKL_SST_CLDMA_H_ */
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
new file mode 100644
index 0000000..94875b0
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -0,0 +1,342 @@
+/*
+ * skl-sst-dsp.c - SKL SST library generic function
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
+ *	Jeeja KP <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <sound/pcm.h>
+
+#include "../common/sst-dsp.h"
+#include "../common/sst-ipc.h"
+#include "../common/sst-dsp-priv.h"
+#include "skl-sst-ipc.h"
+
+/* various timeout values */
+#define SKL_DSP_PU_TO		50
+#define SKL_DSP_PD_TO		50
+#define SKL_DSP_RESET_TO	50
+
+void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state)
+{
+	mutex_lock(&ctx->mutex);
+	ctx->sst_state = state;
+	mutex_unlock(&ctx->mutex);
+}
+
+static int skl_dsp_core_set_reset_state(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	/* update bits */
+	sst_dsp_shim_update_bits_unlocked(ctx,
+			SKL_ADSP_REG_ADSPCS, SKL_ADSPCS_CRST_MASK,
+			SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK));
+
+	/* poll with timeout to check if operation successful */
+	ret = sst_dsp_register_poll(ctx,
+			SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_CRST_MASK,
+			SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK),
+			SKL_DSP_RESET_TO,
+			"Set reset");
+	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+				SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) !=
+				SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) {
+		dev_err(ctx->dev, "Set reset state failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static int skl_dsp_core_unset_reset_state(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	dev_dbg(ctx->dev, "In %s\n", __func__);
+
+	/* update bits */
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+					SKL_ADSPCS_CRST_MASK, 0);
+
+	/* poll with timeout to check if operation successful */
+	ret = sst_dsp_register_poll(ctx,
+			SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_CRST_MASK,
+			0,
+			SKL_DSP_RESET_TO,
+			"Unset reset");
+
+	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+				 SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) != 0) {
+		dev_err(ctx->dev, "Unset reset state failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static bool is_skl_dsp_core_enable(struct sst_dsp  *ctx)
+{
+	int val;
+	bool is_enable;
+
+	val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS);
+
+	is_enable = ((val & SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) &&
+			(val & SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK)) &&
+			!(val & SKL_ADSPCS_CRST(SKL_DSP_CORES_MASK)) &&
+			!(val & SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK)));
+
+	dev_dbg(ctx->dev, "DSP core is enabled=%d\n", is_enable);
+	return is_enable;
+}
+
+static int skl_dsp_reset_core(struct sst_dsp *ctx)
+{
+	/* stall core */
+	sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+			 sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+				SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
+
+	/* set reset state */
+	return skl_dsp_core_set_reset_state(ctx);
+}
+
+static int skl_dsp_start_core(struct sst_dsp *ctx)
+{
+	int ret;
+
+	/* unset reset state */
+	ret = skl_dsp_core_unset_reset_state(ctx);
+	if (ret < 0) {
+		dev_dbg(ctx->dev, "dsp unset reset fails\n");
+		return ret;
+	}
+
+	/* run core */
+	dev_dbg(ctx->dev, "run core...\n");
+	sst_dsp_shim_write_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+			 sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+				~SKL_ADSPCS_CSTALL(SKL_DSP_CORES_MASK));
+
+	if (!is_skl_dsp_core_enable(ctx)) {
+		skl_dsp_reset_core(ctx);
+		dev_err(ctx->dev, "DSP core enable failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static int skl_dsp_core_power_up(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	/* update bits */
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_SPA_MASK, SKL_ADSPCS_SPA(SKL_DSP_CORES_MASK));
+
+	/* poll with timeout to check if operation successful */
+	ret = sst_dsp_register_poll(ctx,
+			SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_CPA_MASK,
+			SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK),
+			SKL_DSP_PU_TO,
+			"Power up");
+
+	if ((sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPCS) &
+			SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) !=
+			SKL_ADSPCS_CPA(SKL_DSP_CORES_MASK)) {
+		dev_err(ctx->dev, "DSP core power up failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static int skl_dsp_core_power_down(struct sst_dsp  *ctx)
+{
+	/* update bits */
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPCS,
+					SKL_ADSPCS_SPA_MASK, 0);
+
+	/* poll with timeout to check if operation successful */
+	return sst_dsp_register_poll(ctx,
+			SKL_ADSP_REG_ADSPCS,
+			SKL_ADSPCS_SPA_MASK,
+			0,
+			SKL_DSP_PD_TO,
+			"Power down");
+}
+
+static int skl_dsp_enable_core(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	/* power up */
+	ret = skl_dsp_core_power_up(ctx);
+	if (ret < 0) {
+		dev_dbg(ctx->dev, "dsp core power up failed\n");
+		return ret;
+	}
+
+	return skl_dsp_start_core(ctx);
+}
+
+int skl_dsp_disable_core(struct sst_dsp  *ctx)
+{
+	int ret;
+
+	ret = skl_dsp_reset_core(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "dsp core reset failed\n");
+		return ret;
+	}
+
+	/* power down core*/
+	ret = skl_dsp_core_power_down(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "dsp core power down failed\n");
+		return ret;
+	}
+
+	if (is_skl_dsp_core_enable(ctx)) {
+		dev_err(ctx->dev, "DSP core disable failed\n");
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+int skl_dsp_boot(struct sst_dsp *ctx)
+{
+	int ret;
+
+	if (is_skl_dsp_core_enable(ctx)) {
+		dev_dbg(ctx->dev, "dsp core is already enabled, so reset the dap core\n");
+		ret = skl_dsp_reset_core(ctx);
+		if (ret < 0) {
+			dev_err(ctx->dev, "dsp reset failed\n");
+			return ret;
+		}
+
+		ret = skl_dsp_start_core(ctx);
+		if (ret < 0) {
+			dev_err(ctx->dev, "dsp start failed\n");
+			return ret;
+		}
+	} else {
+		dev_dbg(ctx->dev, "disable and enable to make sure DSP is invalid state\n");
+		ret = skl_dsp_disable_core(ctx);
+
+		if (ret < 0) {
+			dev_err(ctx->dev, "dsp disable core failes\n");
+			return ret;
+		}
+		ret = skl_dsp_enable_core(ctx);
+	}
+
+	return ret;
+}
+
+irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id)
+{
+	struct sst_dsp *ctx = dev_id;
+	u32 val;
+	irqreturn_t result = IRQ_NONE;
+
+	spin_lock(&ctx->spinlock);
+
+	val = sst_dsp_shim_read_unlocked(ctx, SKL_ADSP_REG_ADSPIS);
+	ctx->intr_status = val;
+
+	if (val & SKL_ADSPIS_IPC) {
+		skl_ipc_int_disable(ctx);
+		result = IRQ_WAKE_THREAD;
+	}
+
+	if (val & SKL_ADSPIS_CL_DMA) {
+		skl_cldma_int_disable(ctx);
+		result = IRQ_WAKE_THREAD;
+	}
+
+	spin_unlock(&ctx->spinlock);
+
+	return result;
+}
+
+int skl_dsp_wake(struct sst_dsp *ctx)
+{
+	return ctx->fw_ops.set_state_D0(ctx);
+}
+EXPORT_SYMBOL_GPL(skl_dsp_wake);
+
+int skl_dsp_sleep(struct sst_dsp *ctx)
+{
+	return ctx->fw_ops.set_state_D3(ctx);
+}
+EXPORT_SYMBOL_GPL(skl_dsp_sleep);
+
+struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
+		struct sst_dsp_device *sst_dev, int irq)
+{
+	int ret;
+	struct sst_dsp *sst;
+
+	sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
+	if (sst == NULL)
+		return NULL;
+
+	spin_lock_init(&sst->spinlock);
+	mutex_init(&sst->mutex);
+	sst->dev = dev;
+	sst->sst_dev = sst_dev;
+	sst->irq = irq;
+	sst->ops = sst_dev->ops;
+	sst->thread_context = sst_dev->thread_context;
+
+	/* Initialise SST Audio DSP */
+	if (sst->ops->init) {
+		ret = sst->ops->init(sst, NULL);
+		if (ret < 0)
+			return NULL;
+	}
+
+	/* Register the ISR */
+	ret = request_threaded_irq(sst->irq, sst->ops->irq_handler,
+		sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
+	if (ret) {
+		dev_err(sst->dev, "unable to grab threaded IRQ %d, disabling device\n",
+			       sst->irq);
+		return NULL;
+	}
+
+	return sst;
+}
+
+void skl_dsp_free(struct sst_dsp *dsp)
+{
+	skl_ipc_int_disable(dsp);
+
+	free_irq(dsp->irq, dsp);
+	skl_dsp_disable_core(dsp);
+}
+EXPORT_SYMBOL_GPL(skl_dsp_free);
+
+bool is_skl_dsp_running(struct sst_dsp *ctx)
+{
+	return (ctx->sst_state == SKL_DSP_RUNNING);
+}
+EXPORT_SYMBOL_GPL(is_skl_dsp_running);
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.h b/sound/soc/intel/skylake/skl-sst-dsp.h
new file mode 100644
index 0000000..6bfcef4
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-dsp.h
@@ -0,0 +1,145 @@
+/*
+ * Skylake SST DSP Support
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __SKL_SST_DSP_H__
+#define __SKL_SST_DSP_H__
+
+#include <linux/interrupt.h>
+#include <sound/memalloc.h>
+#include "skl-sst-cldma.h"
+
+struct sst_dsp;
+struct skl_sst;
+struct sst_dsp_device;
+
+/* Intel HD Audio General DSP Registers */
+#define SKL_ADSP_GEN_BASE		0x0
+#define SKL_ADSP_REG_ADSPCS		(SKL_ADSP_GEN_BASE + 0x04)
+#define SKL_ADSP_REG_ADSPIC		(SKL_ADSP_GEN_BASE + 0x08)
+#define SKL_ADSP_REG_ADSPIS		(SKL_ADSP_GEN_BASE + 0x0C)
+#define SKL_ADSP_REG_ADSPIC2		(SKL_ADSP_GEN_BASE + 0x10)
+#define SKL_ADSP_REG_ADSPIS2		(SKL_ADSP_GEN_BASE + 0x14)
+
+/* Intel HD Audio Inter-Processor Communication Registers */
+#define SKL_ADSP_IPC_BASE		0x40
+#define SKL_ADSP_REG_HIPCT		(SKL_ADSP_IPC_BASE + 0x00)
+#define SKL_ADSP_REG_HIPCTE		(SKL_ADSP_IPC_BASE + 0x04)
+#define SKL_ADSP_REG_HIPCI		(SKL_ADSP_IPC_BASE + 0x08)
+#define SKL_ADSP_REG_HIPCIE		(SKL_ADSP_IPC_BASE + 0x0C)
+#define SKL_ADSP_REG_HIPCCTL		(SKL_ADSP_IPC_BASE + 0x10)
+
+/*  HIPCI */
+#define SKL_ADSP_REG_HIPCI_BUSY		BIT(31)
+
+/* HIPCIE */
+#define SKL_ADSP_REG_HIPCIE_DONE	BIT(30)
+
+/* HIPCCTL */
+#define SKL_ADSP_REG_HIPCCTL_DONE	BIT(1)
+#define SKL_ADSP_REG_HIPCCTL_BUSY	BIT(0)
+
+/* HIPCT */
+#define SKL_ADSP_REG_HIPCT_BUSY		BIT(31)
+
+/* Intel HD Audio SRAM Window 1 */
+#define SKL_ADSP_SRAM1_BASE		0xA000
+
+#define SKL_ADSP_MMIO_LEN		0x10000
+
+#define SKL_ADSP_W0_STAT_SZ		0x800
+
+#define SKL_ADSP_W0_UP_SZ		0x800
+
+#define SKL_ADSP_W1_SZ			0x1000
+
+#define SKL_FW_STS_MASK			0xf
+
+#define SKL_FW_INIT			0x1
+#define SKL_FW_RFW_START		0xf
+
+#define SKL_ADSPIC_IPC			1
+#define SKL_ADSPIS_IPC			1
+
+/* ADSPCS - Audio DSP Control & Status */
+#define SKL_DSP_CORES		1
+#define SKL_DSP_CORE0_MASK	1
+#define SKL_DSP_CORES_MASK	((1 << SKL_DSP_CORES) - 1)
+
+/* Core Reset - asserted high */
+#define SKL_ADSPCS_CRST_SHIFT	0
+#define SKL_ADSPCS_CRST_MASK	(SKL_DSP_CORES_MASK << SKL_ADSPCS_CRST_SHIFT)
+#define SKL_ADSPCS_CRST(x)	((x << SKL_ADSPCS_CRST_SHIFT) & SKL_ADSPCS_CRST_MASK)
+
+/* Core run/stall - when set to '1' core is stalled */
+#define SKL_ADSPCS_CSTALL_SHIFT	8
+#define SKL_ADSPCS_CSTALL_MASK	(SKL_DSP_CORES_MASK <<	\
+					SKL_ADSPCS_CSTALL_SHIFT)
+#define SKL_ADSPCS_CSTALL(x)	((x << SKL_ADSPCS_CSTALL_SHIFT) &	\
+				SKL_ADSPCS_CSTALL_MASK)
+
+/* Set Power Active - when set to '1' turn cores on */
+#define SKL_ADSPCS_SPA_SHIFT	16
+#define SKL_ADSPCS_SPA_MASK	(SKL_DSP_CORES_MASK << SKL_ADSPCS_SPA_SHIFT)
+#define SKL_ADSPCS_SPA(x)	((x << SKL_ADSPCS_SPA_SHIFT) & SKL_ADSPCS_SPA_MASK)
+
+/* Current Power Active - power status of cores, set by hardware */
+#define SKL_ADSPCS_CPA_SHIFT	24
+#define SKL_ADSPCS_CPA_MASK	(SKL_DSP_CORES_MASK << SKL_ADSPCS_CPA_SHIFT)
+#define SKL_ADSPCS_CPA(x)	((x << SKL_ADSPCS_CPA_SHIFT) & SKL_ADSPCS_CPA_MASK)
+
+#define SST_DSP_POWER_D0	0x0  /* full On */
+#define SST_DSP_POWER_D3	0x3  /* Off */
+
+enum skl_dsp_states {
+	SKL_DSP_RUNNING = 1,
+	SKL_DSP_RESET,
+};
+
+struct skl_dsp_fw_ops {
+	int (*load_fw)(struct sst_dsp  *ctx);
+	/* FW module parser/loader */
+	int (*parse_fw)(struct sst_dsp *ctx);
+	int (*set_state_D0)(struct sst_dsp *ctx);
+	int (*set_state_D3)(struct sst_dsp *ctx);
+	unsigned int (*get_fw_errcode)(struct sst_dsp *ctx);
+};
+
+struct skl_dsp_loader_ops {
+	int (*alloc_dma_buf)(struct device *dev,
+		struct snd_dma_buffer *dmab, size_t size);
+	int (*free_dma_buf)(struct device *dev,
+		struct snd_dma_buffer *dmab);
+};
+
+void skl_cldma_process_intr(struct sst_dsp *ctx);
+void skl_cldma_int_disable(struct sst_dsp *ctx);
+int skl_cldma_prepare(struct sst_dsp *ctx);
+
+void skl_dsp_set_state_locked(struct sst_dsp *ctx, int state);
+struct sst_dsp *skl_dsp_ctx_init(struct device *dev,
+		struct sst_dsp_device *sst_dev, int irq);
+int skl_dsp_disable_core(struct sst_dsp  *ctx);
+bool is_skl_dsp_running(struct sst_dsp *ctx);
+irqreturn_t skl_dsp_sst_interrupt(int irq, void *dev_id);
+int skl_dsp_wake(struct sst_dsp *ctx);
+int skl_dsp_sleep(struct sst_dsp *ctx);
+void skl_dsp_free(struct sst_dsp *dsp);
+
+int skl_dsp_boot(struct sst_dsp *ctx);
+int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
+		struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp);
+void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx);
+
+#endif /*__SKL_SST_DSP_H__*/
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
new file mode 100644
index 0000000..937a0a3
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -0,0 +1,771 @@
+/*
+ * skl-sst-ipc.c - Intel skl IPC Support
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/device.h>
+
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "skl-sst-dsp.h"
+#include "skl-sst-ipc.h"
+
+
+#define IPC_IXC_STATUS_BITS		24
+
+/* Global Message - Generic */
+#define IPC_GLB_TYPE_SHIFT		24
+#define IPC_GLB_TYPE_MASK		(0xf << IPC_GLB_TYPE_SHIFT)
+#define IPC_GLB_TYPE(x)			((x) << IPC_GLB_TYPE_SHIFT)
+
+/* Global Message - Reply */
+#define IPC_GLB_REPLY_STATUS_SHIFT	24
+#define IPC_GLB_REPLY_STATUS_MASK	((0x1 << IPC_GLB_REPLY_STATUS_SHIFT) - 1)
+#define IPC_GLB_REPLY_STATUS(x)		((x) << IPC_GLB_REPLY_STATUS_SHIFT)
+
+#define IPC_TIMEOUT_MSECS		3000
+
+#define IPC_EMPTY_LIST_SIZE		8
+
+#define IPC_MSG_TARGET_SHIFT		30
+#define IPC_MSG_TARGET_MASK		0x1
+#define IPC_MSG_TARGET(x)		(((x) & IPC_MSG_TARGET_MASK) \
+					<< IPC_MSG_TARGET_SHIFT)
+
+#define IPC_MSG_DIR_SHIFT		29
+#define IPC_MSG_DIR_MASK		0x1
+#define IPC_MSG_DIR(x)			(((x) & IPC_MSG_DIR_MASK) \
+					<< IPC_MSG_DIR_SHIFT)
+/* Global Notification Message */
+#define IPC_GLB_NOTIFY_TYPE_SHIFT	16
+#define IPC_GLB_NOTIFY_TYPE_MASK	0xFF
+#define IPC_GLB_NOTIFY_TYPE(x)		(((x) >> IPC_GLB_NOTIFY_TYPE_SHIFT) \
+					& IPC_GLB_NOTIFY_TYPE_MASK)
+
+#define IPC_GLB_NOTIFY_MSG_TYPE_SHIFT	24
+#define IPC_GLB_NOTIFY_MSG_TYPE_MASK	0x1F
+#define IPC_GLB_NOTIFY_MSG_TYPE(x)	(((x) >> IPC_GLB_NOTIFY_MSG_TYPE_SHIFT)	\
+						& IPC_GLB_NOTIFY_MSG_TYPE_MASK)
+
+#define IPC_GLB_NOTIFY_RSP_SHIFT	29
+#define IPC_GLB_NOTIFY_RSP_MASK		0x1
+#define IPC_GLB_NOTIFY_RSP_TYPE(x)	(((x) >> IPC_GLB_NOTIFY_RSP_SHIFT) \
+					& IPC_GLB_NOTIFY_RSP_MASK)
+
+/* Pipeline operations */
+
+/* Create pipeline message */
+#define IPC_PPL_MEM_SIZE_SHIFT		0
+#define IPC_PPL_MEM_SIZE_MASK		0x7FF
+#define IPC_PPL_MEM_SIZE(x)		(((x) & IPC_PPL_MEM_SIZE_MASK) \
+					<< IPC_PPL_MEM_SIZE_SHIFT)
+
+#define IPC_PPL_TYPE_SHIFT		11
+#define IPC_PPL_TYPE_MASK		0x1F
+#define IPC_PPL_TYPE(x)			(((x) & IPC_PPL_TYPE_MASK) \
+					<< IPC_PPL_TYPE_SHIFT)
+
+#define IPC_INSTANCE_ID_SHIFT		16
+#define IPC_INSTANCE_ID_MASK		0xFF
+#define IPC_INSTANCE_ID(x)		(((x) & IPC_INSTANCE_ID_MASK) \
+					<< IPC_INSTANCE_ID_SHIFT)
+
+/* Set pipeline state message */
+#define IPC_PPL_STATE_SHIFT		0
+#define IPC_PPL_STATE_MASK		0x1F
+#define IPC_PPL_STATE(x)		(((x) & IPC_PPL_STATE_MASK) \
+					<< IPC_PPL_STATE_SHIFT)
+
+/* Module operations primary register */
+#define IPC_MOD_ID_SHIFT		0
+#define IPC_MOD_ID_MASK		0xFFFF
+#define IPC_MOD_ID(x)		(((x) & IPC_MOD_ID_MASK) \
+					<< IPC_MOD_ID_SHIFT)
+
+#define IPC_MOD_INSTANCE_ID_SHIFT	16
+#define IPC_MOD_INSTANCE_ID_MASK	0xFF
+#define IPC_MOD_INSTANCE_ID(x)	(((x) & IPC_MOD_INSTANCE_ID_MASK) \
+					<< IPC_MOD_INSTANCE_ID_SHIFT)
+
+/* Init instance message extension register */
+#define IPC_PARAM_BLOCK_SIZE_SHIFT	0
+#define IPC_PARAM_BLOCK_SIZE_MASK	0xFFFF
+#define IPC_PARAM_BLOCK_SIZE(x)		(((x) & IPC_PARAM_BLOCK_SIZE_MASK) \
+					<< IPC_PARAM_BLOCK_SIZE_SHIFT)
+
+#define IPC_PPL_INSTANCE_ID_SHIFT	16
+#define IPC_PPL_INSTANCE_ID_MASK	0xFF
+#define IPC_PPL_INSTANCE_ID(x)		(((x) & IPC_PPL_INSTANCE_ID_MASK) \
+					<< IPC_PPL_INSTANCE_ID_SHIFT)
+
+#define IPC_CORE_ID_SHIFT		24
+#define IPC_CORE_ID_MASK		0x1F
+#define IPC_CORE_ID(x)			(((x) & IPC_CORE_ID_MASK) \
+					<< IPC_CORE_ID_SHIFT)
+
+/* Bind/Unbind message extension register */
+#define IPC_DST_MOD_ID_SHIFT		0
+#define IPC_DST_MOD_ID(x)		(((x) & IPC_MOD_ID_MASK) \
+					<< IPC_DST_MOD_ID_SHIFT)
+
+#define IPC_DST_MOD_INSTANCE_ID_SHIFT 16
+#define IPC_DST_MOD_INSTANCE_ID(x)	(((x) & IPC_MOD_INSTANCE_ID_MASK) \
+					<< IPC_DST_MOD_INSTANCE_ID_SHIFT)
+
+#define IPC_DST_QUEUE_SHIFT		24
+#define IPC_DST_QUEUE_MASK		0x7
+#define IPC_DST_QUEUE(x)		(((x) & IPC_DST_QUEUE_MASK) \
+					<< IPC_DST_QUEUE_SHIFT)
+
+#define IPC_SRC_QUEUE_SHIFT		27
+#define IPC_SRC_QUEUE_MASK		0x7
+#define IPC_SRC_QUEUE(x)		(((x) & IPC_SRC_QUEUE_MASK) \
+					<< IPC_SRC_QUEUE_SHIFT)
+
+/* Save pipeline messgae extension register */
+#define IPC_DMA_ID_SHIFT		0
+#define IPC_DMA_ID_MASK			0x1F
+#define IPC_DMA_ID(x)			(((x) & IPC_DMA_ID_MASK) \
+					<< IPC_DMA_ID_SHIFT)
+/* Large Config message extension register */
+#define IPC_DATA_OFFSET_SZ_SHIFT	0
+#define IPC_DATA_OFFSET_SZ_MASK		0xFFFFF
+#define IPC_DATA_OFFSET_SZ(x)		(((x) & IPC_DATA_OFFSET_SZ_MASK) \
+					<< IPC_DATA_OFFSET_SZ_SHIFT)
+#define IPC_DATA_OFFSET_SZ_CLEAR	~(IPC_DATA_OFFSET_SZ_MASK \
+					  << IPC_DATA_OFFSET_SZ_SHIFT)
+
+#define IPC_LARGE_PARAM_ID_SHIFT	20
+#define IPC_LARGE_PARAM_ID_MASK		0xFF
+#define IPC_LARGE_PARAM_ID(x)		(((x) & IPC_LARGE_PARAM_ID_MASK) \
+					<< IPC_LARGE_PARAM_ID_SHIFT)
+
+#define IPC_FINAL_BLOCK_SHIFT		28
+#define IPC_FINAL_BLOCK_MASK		0x1
+#define IPC_FINAL_BLOCK(x)		(((x) & IPC_FINAL_BLOCK_MASK) \
+					<< IPC_FINAL_BLOCK_SHIFT)
+
+#define IPC_INITIAL_BLOCK_SHIFT		29
+#define IPC_INITIAL_BLOCK_MASK		0x1
+#define IPC_INITIAL_BLOCK(x)		(((x) & IPC_INITIAL_BLOCK_MASK) \
+					<< IPC_INITIAL_BLOCK_SHIFT)
+#define IPC_INITIAL_BLOCK_CLEAR		~(IPC_INITIAL_BLOCK_MASK \
+					  << IPC_INITIAL_BLOCK_SHIFT)
+
+enum skl_ipc_msg_target {
+	IPC_FW_GEN_MSG = 0,
+	IPC_MOD_MSG = 1
+};
+
+enum skl_ipc_msg_direction {
+	IPC_MSG_REQUEST = 0,
+	IPC_MSG_REPLY = 1
+};
+
+/* Global Message Types */
+enum skl_ipc_glb_type {
+	IPC_GLB_GET_FW_VERSION = 0, /* Retrieves firmware version */
+	IPC_GLB_LOAD_MULTIPLE_MODS = 15,
+	IPC_GLB_UNLOAD_MULTIPLE_MODS = 16,
+	IPC_GLB_CREATE_PPL = 17,
+	IPC_GLB_DELETE_PPL = 18,
+	IPC_GLB_SET_PPL_STATE = 19,
+	IPC_GLB_GET_PPL_STATE = 20,
+	IPC_GLB_GET_PPL_CONTEXT_SIZE = 21,
+	IPC_GLB_SAVE_PPL = 22,
+	IPC_GLB_RESTORE_PPL = 23,
+	IPC_GLB_NOTIFY = 26,
+	IPC_GLB_MAX_IPC_MSG_NUMBER = 31 /* Maximum message number */
+};
+
+enum skl_ipc_glb_reply {
+	IPC_GLB_REPLY_SUCCESS = 0,
+
+	IPC_GLB_REPLY_UNKNOWN_MSG_TYPE = 1,
+	IPC_GLB_REPLY_ERROR_INVALID_PARAM = 2,
+
+	IPC_GLB_REPLY_BUSY = 3,
+	IPC_GLB_REPLY_PENDING = 4,
+	IPC_GLB_REPLY_FAILURE = 5,
+	IPC_GLB_REPLY_INVALID_REQUEST = 6,
+
+	IPC_GLB_REPLY_OUT_OF_MEMORY = 7,
+	IPC_GLB_REPLY_OUT_OF_MIPS = 8,
+
+	IPC_GLB_REPLY_INVALID_RESOURCE_ID = 9,
+	IPC_GLB_REPLY_INVALID_RESOURCE_STATE = 10,
+
+	IPC_GLB_REPLY_MOD_MGMT_ERROR = 100,
+	IPC_GLB_REPLY_MOD_LOAD_CL_FAILED = 101,
+	IPC_GLB_REPLY_MOD_LOAD_INVALID_HASH = 102,
+
+	IPC_GLB_REPLY_MOD_UNLOAD_INST_EXIST = 103,
+	IPC_GLB_REPLY_MOD_NOT_INITIALIZED = 104,
+
+	IPC_GLB_REPLY_INVALID_CONFIG_PARAM_ID = 120,
+	IPC_GLB_REPLY_INVALID_CONFIG_DATA_LEN = 121,
+	IPC_GLB_REPLY_GATEWAY_NOT_INITIALIZED = 140,
+	IPC_GLB_REPLY_GATEWAY_NOT_EXIST = 141,
+
+	IPC_GLB_REPLY_PPL_NOT_INITIALIZED = 160,
+	IPC_GLB_REPLY_PPL_NOT_EXIST = 161,
+	IPC_GLB_REPLY_PPL_SAVE_FAILED = 162,
+	IPC_GLB_REPLY_PPL_RESTORE_FAILED = 163,
+
+	IPC_MAX_STATUS = ((1<<IPC_IXC_STATUS_BITS)-1)
+};
+
+enum skl_ipc_notification_type {
+	IPC_GLB_NOTIFY_GLITCH = 0,
+	IPC_GLB_NOTIFY_OVERRUN = 1,
+	IPC_GLB_NOTIFY_UNDERRUN = 2,
+	IPC_GLB_NOTIFY_END_STREAM = 3,
+	IPC_GLB_NOTIFY_PHRASE_DETECTED = 4,
+	IPC_GLB_NOTIFY_RESOURCE_EVENT = 5,
+	IPC_GLB_NOTIFY_LOG_BUFFER_STATUS = 6,
+	IPC_GLB_NOTIFY_TIMESTAMP_CAPTURED = 7,
+	IPC_GLB_NOTIFY_FW_READY = 8
+};
+
+/* Module Message Types */
+enum skl_ipc_module_msg {
+	IPC_MOD_INIT_INSTANCE = 0,
+	IPC_MOD_CONFIG_GET = 1,
+	IPC_MOD_CONFIG_SET = 2,
+	IPC_MOD_LARGE_CONFIG_GET = 3,
+	IPC_MOD_LARGE_CONFIG_SET = 4,
+	IPC_MOD_BIND = 5,
+	IPC_MOD_UNBIND = 6,
+	IPC_MOD_SET_DX = 7
+};
+
+static void skl_ipc_tx_data_copy(struct ipc_message *msg, char *tx_data,
+		size_t tx_size)
+{
+	if (tx_size)
+		memcpy(msg->tx_data, tx_data, tx_size);
+}
+
+static bool skl_ipc_is_dsp_busy(struct sst_dsp *dsp)
+{
+	u32 hipci;
+
+	hipci = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCI);
+	return (hipci & SKL_ADSP_REG_HIPCI_BUSY);
+}
+
+/* Lock to be held by caller */
+static void skl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
+{
+	struct skl_ipc_header *header = (struct skl_ipc_header *)(&msg->header);
+
+	if (msg->tx_size)
+		sst_dsp_outbox_write(ipc->dsp, msg->tx_data, msg->tx_size);
+	sst_dsp_shim_write_unlocked(ipc->dsp, SKL_ADSP_REG_HIPCIE,
+						header->extension);
+	sst_dsp_shim_write_unlocked(ipc->dsp, SKL_ADSP_REG_HIPCI,
+		header->primary | SKL_ADSP_REG_HIPCI_BUSY);
+}
+
+static struct ipc_message *skl_ipc_reply_get_msg(struct sst_generic_ipc *ipc,
+				u64 ipc_header)
+{
+	struct ipc_message *msg =  NULL;
+	struct skl_ipc_header *header = (struct skl_ipc_header *)(&ipc_header);
+
+	if (list_empty(&ipc->rx_list)) {
+		dev_err(ipc->dev, "ipc: rx list is empty but received 0x%x\n",
+			header->primary);
+		goto out;
+	}
+
+	msg = list_first_entry(&ipc->rx_list, struct ipc_message, list);
+
+out:
+	return msg;
+
+}
+
+static int skl_ipc_process_notification(struct sst_generic_ipc *ipc,
+		struct skl_ipc_header header)
+{
+	struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
+
+	if (IPC_GLB_NOTIFY_MSG_TYPE(header.primary)) {
+		switch (IPC_GLB_NOTIFY_TYPE(header.primary)) {
+
+		case IPC_GLB_NOTIFY_UNDERRUN:
+			dev_err(ipc->dev, "FW Underrun %x\n", header.primary);
+			break;
+
+		case IPC_GLB_NOTIFY_RESOURCE_EVENT:
+			dev_err(ipc->dev, "MCPS Budget Violation: %x\n",
+						header.primary);
+			break;
+
+		case IPC_GLB_NOTIFY_FW_READY:
+			skl->boot_complete = true;
+			wake_up(&skl->boot_wait);
+			break;
+
+		default:
+			dev_err(ipc->dev, "ipc: Unhandled error msg=%x",
+						header.primary);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
+		struct skl_ipc_header header)
+{
+	struct ipc_message *msg;
+	u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
+	u64 *ipc_header = (u64 *)(&header);
+
+	msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
+	if (msg == NULL) {
+		dev_dbg(ipc->dev, "ipc: rx list is empty\n");
+		return;
+	}
+
+	/* first process the header */
+	switch (reply) {
+	case IPC_GLB_REPLY_SUCCESS:
+		dev_info(ipc->dev, "ipc FW reply %x: success\n", header.primary);
+		break;
+
+	case IPC_GLB_REPLY_OUT_OF_MEMORY:
+		dev_err(ipc->dev, "ipc fw reply: %x: no memory\n", header.primary);
+		msg->errno = -ENOMEM;
+		break;
+
+	case IPC_GLB_REPLY_BUSY:
+		dev_err(ipc->dev, "ipc fw reply: %x: Busy\n", header.primary);
+		msg->errno = -EBUSY;
+		break;
+
+	default:
+		dev_err(ipc->dev, "Unknown ipc reply: 0x%x", reply);
+		msg->errno = -EINVAL;
+		break;
+	}
+
+	if (reply != IPC_GLB_REPLY_SUCCESS) {
+		dev_err(ipc->dev, "ipc FW reply: reply=%d", reply);
+		dev_err(ipc->dev, "FW Error Code: %u\n",
+			ipc->dsp->fw_ops.get_fw_errcode(ipc->dsp));
+	}
+
+	list_del(&msg->list);
+	sst_ipc_tx_msg_reply_complete(ipc, msg);
+}
+
+irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
+{
+	struct sst_dsp *dsp = context;
+	struct skl_sst *skl = sst_dsp_get_thread_context(dsp);
+	struct sst_generic_ipc *ipc = &skl->ipc;
+	struct skl_ipc_header header = {0};
+	u32 hipcie, hipct, hipcte;
+	int ipc_irq = 0;
+
+	if (dsp->intr_status & SKL_ADSPIS_CL_DMA)
+		skl_cldma_process_intr(dsp);
+
+	/* Here we handle IPC interrupts only */
+	if (!(dsp->intr_status & SKL_ADSPIS_IPC))
+		return IRQ_NONE;
+
+	hipcie = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCIE);
+	hipct = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCT);
+
+	/* reply message from DSP */
+	if (hipcie & SKL_ADSP_REG_HIPCIE_DONE) {
+		sst_dsp_shim_update_bits(dsp, SKL_ADSP_REG_HIPCCTL,
+			SKL_ADSP_REG_HIPCCTL_DONE, 0);
+
+		/* clear DONE bit - tell DSP we have completed the operation */
+		sst_dsp_shim_update_bits_forced(dsp, SKL_ADSP_REG_HIPCIE,
+			SKL_ADSP_REG_HIPCIE_DONE, SKL_ADSP_REG_HIPCIE_DONE);
+
+		ipc_irq = 1;
+
+		/* unmask Done interrupt */
+		sst_dsp_shim_update_bits(dsp, SKL_ADSP_REG_HIPCCTL,
+			SKL_ADSP_REG_HIPCCTL_DONE, SKL_ADSP_REG_HIPCCTL_DONE);
+	}
+
+	/* New message from DSP */
+	if (hipct & SKL_ADSP_REG_HIPCT_BUSY) {
+		hipcte = sst_dsp_shim_read_unlocked(dsp, SKL_ADSP_REG_HIPCTE);
+		header.primary = hipct;
+		header.extension = hipcte;
+		dev_dbg(dsp->dev, "IPC irq: Firmware respond primary:%x",
+						header.primary);
+		dev_dbg(dsp->dev, "IPC irq: Firmware respond extension:%x",
+						header.extension);
+
+		if (IPC_GLB_NOTIFY_RSP_TYPE(header.primary)) {
+			/* Handle Immediate reply from DSP Core */
+			skl_ipc_process_reply(ipc, header);
+		} else {
+			dev_dbg(dsp->dev, "IPC irq: Notification from firmware\n");
+			skl_ipc_process_notification(ipc, header);
+		}
+		/* clear  busy interrupt */
+		sst_dsp_shim_update_bits_forced(dsp, SKL_ADSP_REG_HIPCT,
+			SKL_ADSP_REG_HIPCT_BUSY, SKL_ADSP_REG_HIPCT_BUSY);
+		ipc_irq = 1;
+	}
+
+	if (ipc_irq == 0)
+		return IRQ_NONE;
+
+	skl_ipc_int_enable(dsp);
+
+	/* continue to send any remaining messages... */
+	queue_kthread_work(&ipc->kworker, &ipc->kwork);
+
+	return IRQ_HANDLED;
+}
+
+void skl_ipc_int_enable(struct sst_dsp *ctx)
+{
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_ADSPIC,
+			SKL_ADSPIC_IPC, SKL_ADSPIC_IPC);
+}
+
+void skl_ipc_int_disable(struct sst_dsp *ctx)
+{
+	sst_dsp_shim_update_bits_unlocked(ctx, SKL_ADSP_REG_ADSPIC,
+			SKL_ADSPIC_IPC, 0);
+}
+
+void skl_ipc_op_int_enable(struct sst_dsp *ctx)
+{
+	/* enable IPC DONE interrupt */
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_HIPCCTL,
+		SKL_ADSP_REG_HIPCCTL_DONE, SKL_ADSP_REG_HIPCCTL_DONE);
+
+	/* Enable IPC BUSY interrupt */
+	sst_dsp_shim_update_bits(ctx, SKL_ADSP_REG_HIPCCTL,
+		SKL_ADSP_REG_HIPCCTL_BUSY, SKL_ADSP_REG_HIPCCTL_BUSY);
+}
+
+bool skl_ipc_int_status(struct sst_dsp *ctx)
+{
+	return sst_dsp_shim_read_unlocked(ctx,
+			SKL_ADSP_REG_ADSPIS) & SKL_ADSPIS_IPC;
+}
+
+int skl_ipc_init(struct device *dev, struct skl_sst *skl)
+{
+	struct sst_generic_ipc *ipc;
+	int err;
+
+	ipc = &skl->ipc;
+	ipc->dsp = skl->dsp;
+	ipc->dev = dev;
+
+	ipc->tx_data_max_size = SKL_ADSP_W1_SZ;
+	ipc->rx_data_max_size = SKL_ADSP_W0_UP_SZ;
+
+	err = sst_ipc_init(ipc);
+	if (err)
+		return err;
+
+	ipc->ops.tx_msg = skl_ipc_tx_msg;
+	ipc->ops.tx_data_copy = skl_ipc_tx_data_copy;
+	ipc->ops.is_dsp_busy = skl_ipc_is_dsp_busy;
+
+	return 0;
+}
+
+void skl_ipc_free(struct sst_generic_ipc *ipc)
+{
+	/* Disable IPC DONE interrupt */
+	sst_dsp_shim_update_bits(ipc->dsp, SKL_ADSP_REG_HIPCCTL,
+		SKL_ADSP_REG_HIPCCTL_DONE, 0);
+
+	/* Disable IPC BUSY interrupt */
+	sst_dsp_shim_update_bits(ipc->dsp, SKL_ADSP_REG_HIPCCTL,
+		SKL_ADSP_REG_HIPCCTL_BUSY, 0);
+
+	sst_ipc_fini(ipc);
+}
+
+int skl_ipc_create_pipeline(struct sst_generic_ipc *ipc,
+		u16 ppl_mem_size, u8 ppl_type, u8 instance_id)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_CREATE_PPL);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+	header.primary |= IPC_PPL_TYPE(ppl_type);
+	header.primary |= IPC_PPL_MEM_SIZE(ppl_mem_size);
+
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: create pipeline fail, err: %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_create_pipeline);
+
+int skl_ipc_delete_pipeline(struct sst_generic_ipc *ipc, u8 instance_id)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_DELETE_PPL);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: delete pipeline failed, err %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_delete_pipeline);
+
+int skl_ipc_set_pipeline_state(struct sst_generic_ipc *ipc,
+		u8 instance_id, enum skl_ipc_pipeline_state state)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_SET_PPL_STATE);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+	header.primary |= IPC_PPL_STATE(state);
+
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: set pipeline state failed, err: %d\n", ret);
+		return ret;
+	}
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_set_pipeline_state);
+
+int
+skl_ipc_save_pipeline(struct sst_generic_ipc *ipc, u8 instance_id, int dma_id)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_SAVE_PPL);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+
+	header.extension = IPC_DMA_ID(dma_id);
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: save pipeline failed, err: %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_save_pipeline);
+
+int skl_ipc_restore_pipeline(struct sst_generic_ipc *ipc, u8 instance_id)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_FW_GEN_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_GLB_RESTORE_PPL);
+	header.primary |= IPC_INSTANCE_ID(instance_id);
+
+	dev_dbg(ipc->dev, "In %s header=%d\n", __func__, header.primary);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: restore  pipeline failed, err: %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_restore_pipeline);
+
+int skl_ipc_set_dx(struct sst_generic_ipc *ipc, u8 instance_id,
+		u16 module_id, struct skl_ipc_dxstate_info *dx)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_MOD_SET_DX);
+	header.primary |= IPC_MOD_INSTANCE_ID(instance_id);
+	header.primary |= IPC_MOD_ID(module_id);
+
+	dev_dbg(ipc->dev, "In %s primary =%x ext=%x\n", __func__,
+			 header.primary, header.extension);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header,
+				dx, sizeof(dx), NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: set dx failed, err %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_set_dx);
+
+int skl_ipc_init_instance(struct sst_generic_ipc *ipc,
+		struct skl_ipc_init_instance_msg *msg, void *param_data)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret;
+	u32 *buffer = (u32 *)param_data;
+	 /* param_block_size must be in dwords */
+	u16 param_block_size = msg->param_data_size / sizeof(u32);
+
+	print_hex_dump(KERN_DEBUG, NULL, DUMP_PREFIX_NONE,
+		16, 4, buffer, param_block_size, false);
+
+	header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_MOD_INIT_INSTANCE);
+	header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
+	header.primary |= IPC_MOD_ID(msg->module_id);
+
+	header.extension = IPC_CORE_ID(msg->core_id);
+	header.extension |= IPC_PPL_INSTANCE_ID(msg->ppl_instance_id);
+	header.extension |= IPC_PARAM_BLOCK_SIZE(param_block_size);
+
+	dev_dbg(ipc->dev, "In %s primary =%x ext=%x\n", __func__,
+			 header.primary, header.extension);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, param_data,
+			msg->param_data_size, NULL, 0);
+
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: init instance failed\n");
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_init_instance);
+
+int skl_ipc_bind_unbind(struct sst_generic_ipc *ipc,
+		struct skl_ipc_bind_unbind_msg *msg)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	u8 bind_unbind = msg->bind ? IPC_MOD_BIND : IPC_MOD_UNBIND;
+	int ret;
+
+	header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(bind_unbind);
+	header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
+	header.primary |= IPC_MOD_ID(msg->module_id);
+
+	header.extension = IPC_DST_MOD_ID(msg->dst_module_id);
+	header.extension |= IPC_DST_MOD_INSTANCE_ID(msg->dst_instance_id);
+	header.extension |= IPC_DST_QUEUE(msg->dst_queue);
+	header.extension |= IPC_SRC_QUEUE(msg->src_queue);
+
+	dev_dbg(ipc->dev, "In %s hdr=%x ext=%x\n", __func__, header.primary,
+			 header.extension);
+	ret = sst_ipc_tx_message_wait(ipc, *ipc_header, NULL, 0, NULL, 0);
+	if (ret < 0) {
+		dev_err(ipc->dev, "ipc: bind/unbind faileden");
+		return ret;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_bind_unbind);
+
+int skl_ipc_set_large_config(struct sst_generic_ipc *ipc,
+		struct skl_ipc_large_config_msg *msg, u32 *param)
+{
+	struct skl_ipc_header header = {0};
+	u64 *ipc_header = (u64 *)(&header);
+	int ret = 0;
+	size_t sz_remaining, tx_size, data_offset;
+
+	header.primary = IPC_MSG_TARGET(IPC_MOD_MSG);
+	header.primary |= IPC_MSG_DIR(IPC_MSG_REQUEST);
+	header.primary |= IPC_GLB_TYPE(IPC_MOD_LARGE_CONFIG_SET);
+	header.primary |= IPC_MOD_INSTANCE_ID(msg->instance_id);
+	header.primary |= IPC_MOD_ID(msg->module_id);
+
+	header.extension = IPC_DATA_OFFSET_SZ(msg->param_data_size);
+	header.extension |= IPC_LARGE_PARAM_ID(msg->large_param_id);
+	header.extension |= IPC_FINAL_BLOCK(0);
+	header.extension |= IPC_INITIAL_BLOCK(1);
+
+	sz_remaining = msg->param_data_size;
+	data_offset = 0;
+	while (sz_remaining != 0) {
+		tx_size = sz_remaining > SKL_ADSP_W1_SZ
+				? SKL_ADSP_W1_SZ : sz_remaining;
+		if (tx_size == sz_remaining)
+			header.extension |= IPC_FINAL_BLOCK(1);
+
+		dev_dbg(ipc->dev, "In %s primary=%#x ext=%#x\n", __func__,
+			header.primary, header.extension);
+		dev_dbg(ipc->dev, "transmitting offset: %#x, size: %#x\n",
+			(unsigned)data_offset, (unsigned)tx_size);
+		ret = sst_ipc_tx_message_wait(ipc, *ipc_header,
+					  ((char *)param) + data_offset,
+					  tx_size, NULL, 0);
+		if (ret < 0) {
+			dev_err(ipc->dev,
+				"ipc: set large config fail, err: %d\n", ret);
+			return ret;
+		}
+		sz_remaining -= tx_size;
+		data_offset = msg->param_data_size - sz_remaining;
+
+		/* clear the fields */
+		header.extension &= IPC_INITIAL_BLOCK_CLEAR;
+		header.extension &= IPC_DATA_OFFSET_SZ_CLEAR;
+		/* fill the fields */
+		header.extension |= IPC_INITIAL_BLOCK(0);
+		header.extension |= IPC_DATA_OFFSET_SZ(data_offset);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_ipc_set_large_config);
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.h b/sound/soc/intel/skylake/skl-sst-ipc.h
new file mode 100644
index 0000000..9f5f672
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst-ipc.h
@@ -0,0 +1,125 @@
+/*
+ * Intel SKL IPC Support
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __SKL_IPC_H
+#define __SKL_IPC_H
+
+#include <linux/kthread.h>
+#include <linux/irqreturn.h>
+#include "../common/sst-ipc.h"
+
+struct sst_dsp;
+struct skl_sst;
+struct sst_generic_ipc;
+
+enum skl_ipc_pipeline_state {
+	PPL_INVALID_STATE =	0,
+	PPL_UNINITIALIZED =	1,
+	PPL_RESET =		2,
+	PPL_PAUSED =		3,
+	PPL_RUNNING =		4,
+	PPL_ERROR_STOP =	5,
+	PPL_SAVED =		6,
+	PPL_RESTORED =		7
+};
+
+struct skl_ipc_dxstate_info {
+	u32 core_mask;
+	u32 dx_mask;
+};
+
+struct skl_ipc_header {
+	u32 primary;
+	u32 extension;
+};
+
+struct skl_sst {
+	struct device *dev;
+	struct sst_dsp *dsp;
+
+	/* boot */
+	wait_queue_head_t boot_wait;
+	bool boot_complete;
+
+	/* IPC messaging */
+	struct sst_generic_ipc ipc;
+};
+
+struct skl_ipc_init_instance_msg {
+	u32 module_id;
+	u32 instance_id;
+	u16 param_data_size;
+	u8 ppl_instance_id;
+	u8 core_id;
+};
+
+struct skl_ipc_bind_unbind_msg {
+	u32 module_id;
+	u32 instance_id;
+	u32 dst_module_id;
+	u32 dst_instance_id;
+	u8 src_queue;
+	u8 dst_queue;
+	bool bind;
+};
+
+struct skl_ipc_large_config_msg {
+	u32 module_id;
+	u32 instance_id;
+	u32 large_param_id;
+	u32 param_data_size;
+};
+
+#define SKL_IPC_BOOT_MSECS		3000
+
+#define SKL_IPC_D3_MASK	0
+#define SKL_IPC_D0_MASK	3
+
+irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context);
+
+int skl_ipc_create_pipeline(struct sst_generic_ipc *sst_ipc,
+		u16 ppl_mem_size, u8 ppl_type, u8 instance_id);
+
+int skl_ipc_delete_pipeline(struct sst_generic_ipc *sst_ipc, u8 instance_id);
+
+int skl_ipc_set_pipeline_state(struct sst_generic_ipc *sst_ipc,
+		u8 instance_id,	enum skl_ipc_pipeline_state state);
+
+int skl_ipc_save_pipeline(struct sst_generic_ipc *ipc,
+		u8 instance_id, int dma_id);
+
+int skl_ipc_restore_pipeline(struct sst_generic_ipc *ipc, u8 instance_id);
+
+int skl_ipc_init_instance(struct sst_generic_ipc *sst_ipc,
+		struct skl_ipc_init_instance_msg *msg, void *param_data);
+
+int skl_ipc_bind_unbind(struct sst_generic_ipc *sst_ipc,
+		struct skl_ipc_bind_unbind_msg *msg);
+
+int skl_ipc_set_dx(struct sst_generic_ipc *ipc,
+		u8 instance_id, u16 module_id, struct skl_ipc_dxstate_info *dx);
+
+int skl_ipc_set_large_config(struct sst_generic_ipc *ipc,
+		struct skl_ipc_large_config_msg *msg, u32 *param);
+
+void skl_ipc_int_enable(struct sst_dsp *dsp);
+void skl_ipc_op_int_enable(struct sst_dsp *ctx);
+void skl_ipc_int_disable(struct sst_dsp *dsp);
+
+bool skl_ipc_int_status(struct sst_dsp *dsp);
+void skl_ipc_free(struct sst_generic_ipc *ipc);
+int skl_ipc_init(struct device *dev, struct skl_sst *skl);
+
+#endif /* __SKL_IPC_H */
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
new file mode 100644
index 0000000..c18ea51
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -0,0 +1,280 @@
+/*
+ * skl-sst.c - HDA DSP library functions for SKL platform
+ *
+ * Copyright (C) 2014-15, Intel Corporation.
+ * Author:Rafal Redzimski <rafal.f.redzimski@intel.com>
+ *	Jeeja KP <jeeja.kp@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include "../common/sst-dsp.h"
+#include "../common/sst-dsp-priv.h"
+#include "../common/sst-ipc.h"
+#include "skl-sst-ipc.h"
+
+#define SKL_BASEFW_TIMEOUT	300
+#define SKL_INIT_TIMEOUT	1000
+
+/* Intel HD Audio SRAM Window 0*/
+#define SKL_ADSP_SRAM0_BASE	0x8000
+
+/* Firmware status window */
+#define SKL_ADSP_FW_STATUS	SKL_ADSP_SRAM0_BASE
+#define SKL_ADSP_ERROR_CODE	(SKL_ADSP_FW_STATUS + 0x4)
+
+#define SKL_INSTANCE_ID		0
+#define SKL_BASE_FW_MODULE_ID	0
+
+static bool skl_check_fw_status(struct sst_dsp *ctx, u32 status)
+{
+	u32 cur_sts;
+
+	cur_sts = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS) & SKL_FW_STS_MASK;
+
+	return (cur_sts == status);
+}
+
+static int skl_transfer_firmware(struct sst_dsp *ctx,
+		const void *basefw, u32 base_fw_size)
+{
+	int ret = 0;
+
+	ret = ctx->cl_dev.ops.cl_copy_to_dmabuf(ctx, basefw, base_fw_size);
+	if (ret < 0)
+		return ret;
+
+	ret = sst_dsp_register_poll(ctx,
+			SKL_ADSP_FW_STATUS,
+			SKL_FW_STS_MASK,
+			SKL_FW_RFW_START,
+			SKL_BASEFW_TIMEOUT,
+			"Firmware boot");
+
+	ctx->cl_dev.ops.cl_stop_dma(ctx);
+
+	return ret;
+}
+
+static int skl_load_base_firmware(struct sst_dsp *ctx)
+{
+	int ret = 0, i;
+	const struct firmware *fw = NULL;
+	struct skl_sst *skl = ctx->thread_context;
+	u32 reg;
+
+	ret = request_firmware(&fw, "dsp_fw_release.bin", ctx->dev);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Request firmware failed %d\n", ret);
+		skl_dsp_disable_core(ctx);
+		return -EIO;
+	}
+
+	/* enable Interrupt */
+	skl_ipc_int_enable(ctx);
+	skl_ipc_op_int_enable(ctx);
+
+	/* check ROM Status */
+	for (i = SKL_INIT_TIMEOUT; i > 0; --i) {
+		if (skl_check_fw_status(ctx, SKL_FW_INIT)) {
+			dev_dbg(ctx->dev,
+				"ROM loaded, we can continue with FW loading\n");
+			break;
+		}
+		mdelay(1);
+	}
+	if (!i) {
+		reg = sst_dsp_shim_read(ctx, SKL_ADSP_FW_STATUS);
+		dev_err(ctx->dev,
+			"Timeout waiting for ROM init done, reg:0x%x\n", reg);
+		ret = -EIO;
+		goto skl_load_base_firmware_failed;
+	}
+
+	ret = skl_transfer_firmware(ctx, fw->data, fw->size);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Transfer firmware failed%d\n", ret);
+		goto skl_load_base_firmware_failed;
+	} else {
+		ret = wait_event_timeout(skl->boot_wait, skl->boot_complete,
+					msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
+		if (ret == 0) {
+			dev_err(ctx->dev, "DSP boot failed, FW Ready timed-out\n");
+			ret = -EIO;
+			goto skl_load_base_firmware_failed;
+		}
+
+		dev_dbg(ctx->dev, "Download firmware successful%d\n", ret);
+		skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+	}
+	release_firmware(fw);
+
+	return 0;
+
+skl_load_base_firmware_failed:
+	skl_dsp_disable_core(ctx);
+	release_firmware(fw);
+	return ret;
+}
+
+static int skl_set_dsp_D0(struct sst_dsp *ctx)
+{
+	int ret;
+
+	ret = skl_load_base_firmware(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "unable to load firmware\n");
+		return ret;
+	}
+
+	skl_dsp_set_state_locked(ctx, SKL_DSP_RUNNING);
+
+	return ret;
+}
+
+static int skl_set_dsp_D3(struct sst_dsp *ctx)
+{
+	int ret;
+	struct skl_ipc_dxstate_info dx;
+	struct skl_sst *skl = ctx->thread_context;
+
+	dev_dbg(ctx->dev, "In %s:\n", __func__);
+	mutex_lock(&ctx->mutex);
+	if (!is_skl_dsp_running(ctx)) {
+		mutex_unlock(&ctx->mutex);
+		return 0;
+	}
+	mutex_unlock(&ctx->mutex);
+
+	dx.core_mask = SKL_DSP_CORE0_MASK;
+	dx.dx_mask = SKL_IPC_D3_MASK;
+	ret = skl_ipc_set_dx(&skl->ipc, SKL_INSTANCE_ID, SKL_BASE_FW_MODULE_ID, &dx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "Failed to set DSP to D3 state\n");
+		return ret;
+	}
+
+	ret = skl_dsp_disable_core(ctx);
+	if (ret < 0) {
+		dev_err(ctx->dev, "disable dsp core failed ret: %d\n", ret);
+		ret = -EIO;
+	}
+	skl_dsp_set_state_locked(ctx, SKL_DSP_RESET);
+
+	return ret;
+}
+
+static unsigned int skl_get_errorcode(struct sst_dsp *ctx)
+{
+	 return sst_dsp_shim_read(ctx, SKL_ADSP_ERROR_CODE);
+}
+
+static struct skl_dsp_fw_ops skl_fw_ops = {
+	.set_state_D0 = skl_set_dsp_D0,
+	.set_state_D3 = skl_set_dsp_D3,
+	.load_fw = skl_load_base_firmware,
+	.get_fw_errcode = skl_get_errorcode,
+};
+
+static struct sst_ops skl_ops = {
+	.irq_handler = skl_dsp_sst_interrupt,
+	.write = sst_shim32_write,
+	.read = sst_shim32_read,
+	.ram_read = sst_memcpy_fromio_32,
+	.ram_write = sst_memcpy_toio_32,
+	.free = skl_dsp_free,
+};
+
+static struct sst_dsp_device skl_dev = {
+	.thread = skl_dsp_irq_thread_handler,
+	.ops = &skl_ops,
+};
+
+int skl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
+		struct skl_dsp_loader_ops dsp_ops, struct skl_sst **dsp)
+{
+	struct skl_sst *skl;
+	struct sst_dsp *sst;
+	int ret;
+
+	skl = devm_kzalloc(dev, sizeof(*skl), GFP_KERNEL);
+	if (skl == NULL)
+		return -ENOMEM;
+
+	skl->dev = dev;
+	skl_dev.thread_context = skl;
+
+	skl->dsp = skl_dsp_ctx_init(dev, &skl_dev, irq);
+	if (!skl->dsp) {
+		dev_err(skl->dev, "%s: no device\n", __func__);
+		return -ENODEV;
+	}
+
+	sst = skl->dsp;
+
+	sst->addr.lpe = mmio_base;
+	sst->addr.shim = mmio_base;
+	sst_dsp_mailbox_init(sst, (SKL_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
+			SKL_ADSP_W0_UP_SZ, SKL_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
+
+	sst->dsp_ops = dsp_ops;
+	sst->fw_ops = skl_fw_ops;
+
+	ret = skl_ipc_init(dev, skl);
+	if (ret)
+		return ret;
+
+	skl->boot_complete = false;
+	init_waitqueue_head(&skl->boot_wait);
+
+	ret = skl_dsp_boot(sst);
+	if (ret < 0) {
+		dev_err(skl->dev, "Boot dsp core failed ret: %d", ret);
+		goto free_ipc;
+	}
+
+	ret = skl_cldma_prepare(sst);
+	if (ret < 0) {
+		dev_err(dev, "CL dma prepare failed : %d", ret);
+		goto free_ipc;
+	}
+
+
+	ret = sst->fw_ops.load_fw(sst);
+	if (ret < 0) {
+		dev_err(dev, "Load base fw failed : %d", ret);
+		return ret;
+	}
+
+	if (dsp)
+		*dsp = skl;
+
+	return 0;
+
+free_ipc:
+	skl_ipc_free(&skl->ipc);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(skl_sst_dsp_init);
+
+void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
+{
+	skl_ipc_free(&ctx->ipc);
+	ctx->dsp->cl_dev.ops.cl_cleanup_controller(ctx->dsp);
+	ctx->dsp->ops->free(ctx->dsp);
+}
+EXPORT_SYMBOL_GPL(skl_sst_dsp_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Skylake IPC driver");
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
new file mode 100644
index 0000000..8c7767b
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -0,0 +1,286 @@
+/*
+ *  skl_topology.h - Intel HDA Platform topology header file
+ *
+ *  Copyright (C) 2014-15 Intel Corp
+ *  Author: Jeeja KP <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef __SKL_TOPOLOGY_H__
+#define __SKL_TOPOLOGY_H__
+
+#include <linux/types.h>
+
+#include <sound/hdaudio_ext.h>
+#include <sound/soc.h>
+#include "skl.h"
+#include "skl-tplg-interface.h"
+
+#define BITS_PER_BYTE 8
+#define MAX_TS_GROUPS 8
+#define MAX_DMIC_TS_GROUPS 4
+#define MAX_FIXED_DMIC_PARAMS_SIZE 727
+
+/* Maximum number of coefficients up down mixer module */
+#define UP_DOWN_MIXER_MAX_COEFF		6
+
+enum skl_channel_index {
+	SKL_CHANNEL_LEFT = 0,
+	SKL_CHANNEL_RIGHT = 1,
+	SKL_CHANNEL_CENTER = 2,
+	SKL_CHANNEL_LEFT_SURROUND = 3,
+	SKL_CHANNEL_CENTER_SURROUND = 3,
+	SKL_CHANNEL_RIGHT_SURROUND = 4,
+	SKL_CHANNEL_LFE = 7,
+	SKL_CHANNEL_INVALID = 0xF,
+};
+
+enum skl_bitdepth {
+	SKL_DEPTH_8BIT = 8,
+	SKL_DEPTH_16BIT = 16,
+	SKL_DEPTH_24BIT = 24,
+	SKL_DEPTH_32BIT = 32,
+	SKL_DEPTH_INVALID
+};
+
+enum skl_interleaving {
+	/* [s1_ch1...s1_chN,...,sM_ch1...sM_chN] */
+	SKL_INTERLEAVING_PER_CHANNEL = 0,
+	/* [s1_ch1...sM_ch1,...,s1_chN...sM_chN] */
+	SKL_INTERLEAVING_PER_SAMPLE = 1,
+};
+
+enum skl_s_freq {
+	SKL_FS_8000 = 8000,
+	SKL_FS_11025 = 11025,
+	SKL_FS_12000 = 12000,
+	SKL_FS_16000 = 16000,
+	SKL_FS_22050 = 22050,
+	SKL_FS_24000 = 24000,
+	SKL_FS_32000 = 32000,
+	SKL_FS_44100 = 44100,
+	SKL_FS_48000 = 48000,
+	SKL_FS_64000 = 64000,
+	SKL_FS_88200 = 88200,
+	SKL_FS_96000 = 96000,
+	SKL_FS_128000 = 128000,
+	SKL_FS_176400 = 176400,
+	SKL_FS_192000 = 192000,
+	SKL_FS_INVALID
+};
+
+enum skl_widget_type {
+	SKL_WIDGET_VMIXER = 1,
+	SKL_WIDGET_MIXER = 2,
+	SKL_WIDGET_PGA = 3,
+	SKL_WIDGET_MUX = 4
+};
+
+struct skl_audio_data_format {
+	enum skl_s_freq s_freq;
+	enum skl_bitdepth bit_depth;
+	u32 channel_map;
+	enum skl_ch_cfg ch_cfg;
+	enum skl_interleaving interleaving;
+	u8 number_of_channels;
+	u8 valid_bit_depth;
+	u8 sample_type;
+	u8 reserved[1];
+} __packed;
+
+struct skl_base_cfg {
+	u32 cps;
+	u32 ibs;
+	u32 obs;
+	u32 is_pages;
+	struct skl_audio_data_format audio_fmt;
+};
+
+struct skl_cpr_gtw_cfg {
+	u32 node_id;
+	u32 dma_buffer_size;
+	u32 config_length;
+	/* not mandatory; required only for DMIC/I2S */
+	u32 config_data[1];
+} __packed;
+
+struct skl_cpr_cfg {
+	struct skl_base_cfg base_cfg;
+	struct skl_audio_data_format out_fmt;
+	u32 cpr_feature_mask;
+	struct skl_cpr_gtw_cfg gtw_cfg;
+} __packed;
+
+
+struct skl_src_module_cfg {
+	struct skl_base_cfg base_cfg;
+	enum skl_s_freq src_cfg;
+} __packed;
+
+struct skl_up_down_mixer_cfg {
+	struct skl_base_cfg base_cfg;
+	enum skl_ch_cfg out_ch_cfg;
+	/* This should be set to 1 if user coefficients are required */
+	u32 coeff_sel;
+	/* Pass the user coeff in this array */
+	s32 coeff[UP_DOWN_MIXER_MAX_COEFF];
+} __packed;
+
+enum skl_dma_type {
+	SKL_DMA_HDA_HOST_OUTPUT_CLASS = 0,
+	SKL_DMA_HDA_HOST_INPUT_CLASS = 1,
+	SKL_DMA_HDA_HOST_INOUT_CLASS = 2,
+	SKL_DMA_HDA_LINK_OUTPUT_CLASS = 8,
+	SKL_DMA_HDA_LINK_INPUT_CLASS = 9,
+	SKL_DMA_HDA_LINK_INOUT_CLASS = 0xA,
+	SKL_DMA_DMIC_LINK_INPUT_CLASS = 0xB,
+	SKL_DMA_I2S_LINK_OUTPUT_CLASS = 0xC,
+	SKL_DMA_I2S_LINK_INPUT_CLASS = 0xD,
+};
+
+union skl_ssp_dma_node {
+	u8 val;
+	struct {
+		u8 dual_mono:1;
+		u8 time_slot:3;
+		u8 i2s_instance:4;
+	} dma_node;
+};
+
+union skl_connector_node_id {
+	u32 val;
+	struct {
+		u32 vindex:8;
+		u32 dma_type:4;
+		u32 rsvd:20;
+	} node;
+};
+
+struct skl_module_fmt {
+	u32 channels;
+	u32 s_freq;
+	u32 bit_depth;
+	u32 valid_bit_depth;
+	u32 ch_cfg;
+};
+
+struct skl_module_inst_id {
+	u32 module_id;
+	u32 instance_id;
+};
+
+struct skl_module_pin {
+	struct skl_module_inst_id id;
+	u8 pin_index;
+	bool is_dynamic;
+	bool in_use;
+};
+
+struct skl_specific_cfg {
+	u32 caps_size;
+	u32 *caps;
+};
+
+enum skl_pipe_state {
+	SKL_PIPE_INVALID = 0,
+	SKL_PIPE_CREATED = 1,
+	SKL_PIPE_PAUSED = 2,
+	SKL_PIPE_STARTED = 3
+};
+
+struct skl_pipe_module {
+	struct snd_soc_dapm_widget *w;
+	struct list_head node;
+};
+
+struct skl_pipe_params {
+	u8 host_dma_id;
+	u8 link_dma_id;
+	u32 ch;
+	u32 s_freq;
+	u32 s_fmt;
+	u8 linktype;
+	int stream;
+};
+
+struct skl_pipe {
+	u8 ppl_id;
+	u8 pipe_priority;
+	u16 conn_type;
+	u32 memory_pages;
+	struct skl_pipe_params *p_params;
+	enum skl_pipe_state state;
+	struct list_head w_list;
+};
+
+enum skl_module_state {
+	SKL_MODULE_UNINIT = 0,
+	SKL_MODULE_INIT_DONE = 1,
+	SKL_MODULE_LOADED = 2,
+	SKL_MODULE_UNLOADED = 3,
+	SKL_MODULE_BIND_DONE = 4
+};
+
+struct skl_module_cfg {
+	struct skl_module_inst_id id;
+	struct skl_module_fmt in_fmt;
+	struct skl_module_fmt out_fmt;
+	u8 max_in_queue;
+	u8 max_out_queue;
+	u8 in_queue_mask;
+	u8 out_queue_mask;
+	u8 in_queue;
+	u8 out_queue;
+	u32 mcps;
+	u32 ibs;
+	u32 obs;
+	u8 is_loadable;
+	u8 core_id;
+	u8 dev_type;
+	u8 dma_id;
+	u8 time_slot;
+	u32 params_fixup;
+	u32 converter;
+	u32 vbus_id;
+	struct skl_module_pin *m_in_pin;
+	struct skl_module_pin *m_out_pin;
+	enum skl_module_type m_type;
+	enum skl_hw_conn_type  hw_conn_type;
+	enum skl_module_state m_state;
+	struct skl_pipe *pipe;
+	struct skl_specific_cfg formats_config;
+};
+
+int skl_create_pipeline(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_run_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_pause_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_delete_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_stop_pipe(struct skl_sst *ctx, struct skl_pipe *pipe);
+
+int skl_init_module(struct skl_sst *ctx, struct skl_module_cfg *module_config,
+	char *param);
+
+int skl_bind_modules(struct skl_sst *ctx, struct skl_module_cfg
+	*src_module, struct skl_module_cfg *dst_module);
+
+int skl_unbind_modules(struct skl_sst *ctx, struct skl_module_cfg
+	*src_module, struct skl_module_cfg *dst_module);
+
+enum skl_bitdepth skl_get_bit_depth(int params);
+#endif
diff --git a/sound/soc/intel/skylake/skl-tplg-interface.h b/sound/soc/intel/skylake/skl-tplg-interface.h
new file mode 100644
index 0000000..a506898
--- /dev/null
+++ b/sound/soc/intel/skylake/skl-tplg-interface.h
@@ -0,0 +1,88 @@
+/*
+ * skl-tplg-interface.h - Intel DSP FW private data interface
+ *
+ * Copyright (C) 2015 Intel Corp
+ * Author: Jeeja KP <jeeja.kp@intel.com>
+ *	    Nilofer, Samreen <samreen.nilofer@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __HDA_TPLG_INTERFACE_H__
+#define __HDA_TPLG_INTERFACE_H__
+
+/**
+ * enum skl_ch_cfg - channel configuration
+ *
+ * @SKL_CH_CFG_MONO:	One channel only
+ * @SKL_CH_CFG_STEREO:	L & R
+ * @SKL_CH_CFG_2_1:	L, R & LFE
+ * @SKL_CH_CFG_3_0:	L, C & R
+ * @SKL_CH_CFG_3_1:	L, C, R & LFE
+ * @SKL_CH_CFG_QUATRO:	L, R, Ls & Rs
+ * @SKL_CH_CFG_4_0:	L, C, R & Cs
+ * @SKL_CH_CFG_5_0:	L, C, R, Ls & Rs
+ * @SKL_CH_CFG_5_1:	L, C, R, Ls, Rs & LFE
+ * @SKL_CH_CFG_DUAL_MONO: One channel replicated in two
+ * @SKL_CH_CFG_I2S_DUAL_STEREO_0: Stereo(L,R) in 4 slots, 1st stream:[ L, R, -, - ]
+ * @SKL_CH_CFG_I2S_DUAL_STEREO_1: Stereo(L,R) in 4 slots, 2nd stream:[ -, -, L, R ]
+ * @SKL_CH_CFG_INVALID:	Invalid
+ */
+enum skl_ch_cfg {
+	SKL_CH_CFG_MONO = 0,
+	SKL_CH_CFG_STEREO = 1,
+	SKL_CH_CFG_2_1 = 2,
+	SKL_CH_CFG_3_0 = 3,
+	SKL_CH_CFG_3_1 = 4,
+	SKL_CH_CFG_QUATRO = 5,
+	SKL_CH_CFG_4_0 = 6,
+	SKL_CH_CFG_5_0 = 7,
+	SKL_CH_CFG_5_1 = 8,
+	SKL_CH_CFG_DUAL_MONO = 9,
+	SKL_CH_CFG_I2S_DUAL_STEREO_0 = 10,
+	SKL_CH_CFG_I2S_DUAL_STEREO_1 = 11,
+	SKL_CH_CFG_INVALID
+};
+
+enum skl_module_type {
+	SKL_MODULE_TYPE_MIXER = 0,
+	SKL_MODULE_TYPE_COPIER,
+	SKL_MODULE_TYPE_UPDWMIX,
+	SKL_MODULE_TYPE_SRCINT
+};
+
+enum skl_core_affinity {
+	SKL_AFFINITY_CORE_0 = 0,
+	SKL_AFFINITY_CORE_1,
+	SKL_AFFINITY_CORE_MAX
+};
+
+enum skl_pipe_conn_type {
+	SKL_PIPE_CONN_TYPE_NONE = 0,
+	SKL_PIPE_CONN_TYPE_FE,
+	SKL_PIPE_CONN_TYPE_BE
+};
+
+enum skl_hw_conn_type {
+	SKL_CONN_NONE = 0,
+	SKL_CONN_SOURCE = 1,
+	SKL_CONN_SINK = 2
+};
+
+enum skl_dev_type {
+	SKL_DEVICE_BT = 0x0,
+	SKL_DEVICE_DMIC = 0x1,
+	SKL_DEVICE_I2S = 0x2,
+	SKL_DEVICE_SLIMBUS = 0x3,
+	SKL_DEVICE_HDALINK = 0x4,
+	SKL_DEVICE_NONE
+};
+#endif
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
new file mode 100644
index 0000000..348d094
--- /dev/null
+++ b/sound/soc/intel/skylake/skl.c
@@ -0,0 +1,536 @@
+/*
+ *  skl.c - Implementation of ASoC Intel SKL HD Audio driver
+ *
+ *  Copyright (C) 2014-2015 Intel Corp
+ *  Author: Jeeja KP <jeeja.kp@intel.com>
+ *
+ *  Derived mostly from Intel HDA driver with following copyrights:
+ *  Copyright (c) 2004 Takashi Iwai <tiwai@suse.de>
+ *                     PeiSen Hou <pshou@realtek.com.tw>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <sound/pcm.h>
+#include "skl.h"
+
+/*
+ * initialize the PCI registers
+ */
+static void skl_update_pci_byte(struct pci_dev *pci, unsigned int reg,
+			    unsigned char mask, unsigned char val)
+{
+	unsigned char data;
+
+	pci_read_config_byte(pci, reg, &data);
+	data &= ~mask;
+	data |= (val & mask);
+	pci_write_config_byte(pci, reg, data);
+}
+
+static void skl_init_pci(struct skl *skl)
+{
+	struct hdac_ext_bus *ebus = &skl->ebus;
+
+	/*
+	 * Clear bits 0-2 of PCI register TCSEL (at offset 0x44)
+	 * TCSEL == Traffic Class Select Register, which sets PCI express QOS
+	 * Ensuring these bits are 0 clears playback static on some HD Audio
+	 * codecs.
+	 * The PCI register TCSEL is defined in the Intel manuals.
+	 */
+	dev_dbg(ebus_to_hbus(ebus)->dev, "Clearing TCSEL\n");
+	skl_update_pci_byte(skl->pci, AZX_PCIREG_TCSEL, 0x07, 0);
+}
+
+/* called from IRQ */
+static void skl_stream_update(struct hdac_bus *bus, struct hdac_stream *hstr)
+{
+	snd_pcm_period_elapsed(hstr->substream);
+}
+
+static irqreturn_t skl_interrupt(int irq, void *dev_id)
+{
+	struct hdac_ext_bus *ebus = dev_id;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	u32 status;
+
+	if (!pm_runtime_active(bus->dev))
+		return IRQ_NONE;
+
+	spin_lock(&bus->reg_lock);
+
+	status = snd_hdac_chip_readl(bus, INTSTS);
+	if (status == 0 || status == 0xffffffff) {
+		spin_unlock(&bus->reg_lock);
+		return IRQ_NONE;
+	}
+
+	/* clear rirb int */
+	status = snd_hdac_chip_readb(bus, RIRBSTS);
+	if (status & RIRB_INT_MASK) {
+		if (status & RIRB_INT_RESPONSE)
+			snd_hdac_bus_update_rirb(bus);
+		snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
+	}
+
+	spin_unlock(&bus->reg_lock);
+
+	return snd_hdac_chip_readl(bus, INTSTS) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
+static irqreturn_t skl_threaded_handler(int irq, void *dev_id)
+{
+	struct hdac_ext_bus *ebus = dev_id;
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	u32 status;
+
+	status = snd_hdac_chip_readl(bus, INTSTS);
+
+	snd_hdac_bus_handle_stream_irq(bus, status, skl_stream_update);
+
+	return IRQ_HANDLED;
+}
+
+static int skl_acquire_irq(struct hdac_ext_bus *ebus, int do_disconnect)
+{
+	struct skl *skl = ebus_to_skl(ebus);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	int ret;
+
+	ret = request_threaded_irq(skl->pci->irq, skl_interrupt,
+			skl_threaded_handler,
+			IRQF_SHARED,
+			KBUILD_MODNAME, ebus);
+	if (ret) {
+		dev_err(bus->dev,
+			"unable to grab IRQ %d, disabling device\n",
+			skl->pci->irq);
+		return ret;
+	}
+
+	bus->irq = skl->pci->irq;
+	pci_intx(skl->pci, 1);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * power management
+ */
+static int skl_suspend(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	snd_hdac_bus_stop_chip(bus);
+	snd_hdac_bus_enter_link_reset(bus);
+
+	return 0;
+}
+
+static int skl_resume(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct skl *hda = ebus_to_skl(ebus);
+
+	skl_init_pci(hda);
+
+	snd_hdac_bus_init_chip(bus, 1);
+
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int skl_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	dev_dbg(bus->dev, "in %s\n", __func__);
+
+	/* enable controller wake up event */
+	snd_hdac_chip_updatew(bus, WAKEEN, 0, STATESTS_INT_MASK);
+
+	snd_hdac_bus_stop_chip(bus);
+	snd_hdac_bus_enter_link_reset(bus);
+
+	return 0;
+}
+
+static int skl_runtime_resume(struct device *dev)
+{
+	struct pci_dev *pci = to_pci_dev(dev);
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct skl *hda = ebus_to_skl(ebus);
+	int status;
+
+	dev_dbg(bus->dev, "in %s\n", __func__);
+
+	/* Read STATESTS before controller reset */
+	status = snd_hdac_chip_readw(bus, STATESTS);
+
+	skl_init_pci(hda);
+	snd_hdac_bus_init_chip(bus, true);
+	/* disable controller Wake Up event */
+	snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, 0);
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops skl_pm = {
+	SET_SYSTEM_SLEEP_PM_OPS(skl_suspend, skl_resume)
+	SET_RUNTIME_PM_OPS(skl_runtime_suspend, skl_runtime_resume, NULL)
+};
+
+/*
+ * destructor
+ */
+static int skl_free(struct hdac_ext_bus *ebus)
+{
+	struct skl *skl  = ebus_to_skl(ebus);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+
+	skl->init_failed = 1; /* to be sure */
+
+	snd_hdac_ext_stop_streams(ebus);
+
+	if (bus->irq >= 0)
+		free_irq(bus->irq, (void *)bus);
+	if (bus->remap_addr)
+		iounmap(bus->remap_addr);
+
+	snd_hdac_bus_free_stream_pages(bus);
+	snd_hdac_stream_free_all(ebus);
+	snd_hdac_link_free_all(ebus);
+	pci_release_regions(skl->pci);
+	pci_disable_device(skl->pci);
+
+	snd_hdac_ext_bus_exit(ebus);
+
+	return 0;
+}
+
+static int skl_dmic_device_register(struct skl *skl)
+{
+	struct hdac_bus *bus = ebus_to_hbus(&skl->ebus);
+	struct platform_device *pdev;
+	int ret;
+
+	/* SKL has one dmic port, so allocate dmic device for this */
+	pdev = platform_device_alloc("dmic-codec", -1);
+	if (!pdev) {
+		dev_err(bus->dev, "failed to allocate dmic device\n");
+		return -ENOMEM;
+	}
+
+	ret = platform_device_add(pdev);
+	if (ret) {
+		dev_err(bus->dev, "failed to add dmic device: %d\n", ret);
+		platform_device_put(pdev);
+		return ret;
+	}
+	skl->dmic_dev = pdev;
+
+	return 0;
+}
+
+static void skl_dmic_device_unregister(struct skl *skl)
+{
+	if (skl->dmic_dev)
+		platform_device_unregister(skl->dmic_dev);
+}
+
+/*
+ * Probe the given codec address
+ */
+static int probe_codec(struct hdac_ext_bus *ebus, int addr)
+{
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
+		(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
+	unsigned int res;
+
+	mutex_lock(&bus->cmd_mutex);
+	snd_hdac_bus_send_cmd(bus, cmd);
+	snd_hdac_bus_get_response(bus, addr, &res);
+	mutex_unlock(&bus->cmd_mutex);
+	if (res == -1)
+		return -EIO;
+	dev_dbg(bus->dev, "codec #%d probed OK\n", addr);
+
+	return snd_hdac_ext_bus_device_init(ebus, addr);
+}
+
+/* Codec initialization */
+static int skl_codec_create(struct hdac_ext_bus *ebus)
+{
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	int c, max_slots;
+
+	max_slots = HDA_MAX_CODECS;
+
+	/* First try to probe all given codec slots */
+	for (c = 0; c < max_slots; c++) {
+		if ((bus->codec_mask & (1 << c))) {
+			if (probe_codec(ebus, c) < 0) {
+				/*
+				 * Some BIOSen give you wrong codec addresses
+				 * that don't exist
+				 */
+				dev_warn(bus->dev,
+					 "Codec #%d probe error; disabling it...\n", c);
+				bus->codec_mask &= ~(1 << c);
+				/*
+				 * More badly, accessing to a non-existing
+				 * codec often screws up the controller bus,
+				 * and disturbs the further communications.
+				 * Thus if an error occurs during probing,
+				 * better to reset the controller bus to get
+				 * back to the sanity state.
+				 */
+				snd_hdac_bus_stop_chip(bus);
+				snd_hdac_bus_init_chip(bus, true);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static const struct hdac_bus_ops bus_core_ops = {
+	.command = snd_hdac_bus_send_cmd,
+	.get_response = snd_hdac_bus_get_response,
+};
+
+/*
+ * constructor
+ */
+static int skl_create(struct pci_dev *pci,
+		      const struct hdac_io_ops *io_ops,
+		      struct skl **rskl)
+{
+	struct skl *skl;
+	struct hdac_ext_bus *ebus;
+
+	int err;
+
+	*rskl = NULL;
+
+	err = pci_enable_device(pci);
+	if (err < 0)
+		return err;
+
+	skl = devm_kzalloc(&pci->dev, sizeof(*skl), GFP_KERNEL);
+	if (!skl) {
+		pci_disable_device(pci);
+		return -ENOMEM;
+	}
+	ebus = &skl->ebus;
+	snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
+	ebus->bus.use_posbuf = 1;
+	skl->pci = pci;
+
+	ebus->bus.bdl_pos_adj = 0;
+
+	*rskl = skl;
+
+	return 0;
+}
+
+static int skl_first_init(struct hdac_ext_bus *ebus)
+{
+	struct skl *skl = ebus_to_skl(ebus);
+	struct hdac_bus *bus = ebus_to_hbus(ebus);
+	struct pci_dev *pci = skl->pci;
+	int err;
+	unsigned short gcap;
+	int cp_streams, pb_streams, start_idx;
+
+	err = pci_request_regions(pci, "Skylake HD audio");
+	if (err < 0)
+		return err;
+
+	bus->addr = pci_resource_start(pci, 0);
+	bus->remap_addr = pci_ioremap_bar(pci, 0);
+	if (bus->remap_addr == NULL) {
+		dev_err(bus->dev, "ioremap error\n");
+		return -ENXIO;
+	}
+
+	snd_hdac_ext_bus_parse_capabilities(ebus);
+
+	if (skl_acquire_irq(ebus, 0) < 0)
+		return -EBUSY;
+
+	pci_set_master(pci);
+	synchronize_irq(bus->irq);
+
+	gcap = snd_hdac_chip_readw(bus, GCAP);
+	dev_dbg(bus->dev, "chipset global capabilities = 0x%x\n", gcap);
+
+	/* allow 64bit DMA address if supported by H/W */
+	if (!dma_set_mask(bus->dev, DMA_BIT_MASK(64))) {
+		dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(64));
+	} else {
+		dma_set_mask(bus->dev, DMA_BIT_MASK(32));
+		dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(32));
+	}
+
+	/* read number of streams from GCAP register */
+	cp_streams = (gcap >> 8) & 0x0f;
+	pb_streams = (gcap >> 12) & 0x0f;
+
+	if (!pb_streams && !cp_streams)
+		return -EIO;
+
+	ebus->num_streams = cp_streams + pb_streams;
+
+	/* initialize streams */
+	snd_hdac_ext_stream_init_all
+		(ebus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
+	start_idx = cp_streams;
+	snd_hdac_ext_stream_init_all
+		(ebus, start_idx, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
+
+	err = snd_hdac_bus_alloc_stream_pages(bus);
+	if (err < 0)
+		return err;
+
+	/* initialize chip */
+	skl_init_pci(skl);
+
+	snd_hdac_bus_init_chip(bus, true);
+
+	/* codec detection */
+	if (!bus->codec_mask) {
+		dev_err(bus->dev, "no codecs found!\n");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int skl_probe(struct pci_dev *pci,
+		     const struct pci_device_id *pci_id)
+{
+	struct skl *skl;
+	struct hdac_ext_bus *ebus = NULL;
+	struct hdac_bus *bus = NULL;
+	int err;
+
+	/* we use ext core ops, so provide NULL for ops here */
+	err = skl_create(pci, NULL, &skl);
+	if (err < 0)
+		return err;
+
+	ebus = &skl->ebus;
+	bus = ebus_to_hbus(ebus);
+
+	err = skl_first_init(ebus);
+	if (err < 0)
+		goto out_free;
+
+	pci_set_drvdata(skl->pci, ebus);
+
+	/* check if dsp is there */
+	if (ebus->ppcap) {
+		/* TODO register with dsp IPC */
+		dev_dbg(bus->dev, "Register dsp\n");
+	}
+
+	if (ebus->mlcap)
+		snd_hdac_ext_bus_get_ml_capabilities(ebus);
+
+	/* create device for soc dmic */
+	err = skl_dmic_device_register(skl);
+	if (err < 0)
+		goto out_free;
+
+	/* register platform dai and controls */
+	err = skl_platform_register(bus->dev);
+	if (err < 0)
+		goto out_dmic_free;
+
+	/* create codec instances */
+	err = skl_codec_create(ebus);
+	if (err < 0)
+		goto out_unregister;
+
+	/*configure PM */
+	pm_runtime_set_autosuspend_delay(bus->dev, SKL_SUSPEND_DELAY);
+	pm_runtime_use_autosuspend(bus->dev);
+	pm_runtime_put_noidle(bus->dev);
+	pm_runtime_allow(bus->dev);
+
+	return 0;
+
+out_unregister:
+	skl_platform_unregister(bus->dev);
+out_dmic_free:
+	skl_dmic_device_unregister(skl);
+out_free:
+	skl->init_failed = 1;
+	skl_free(ebus);
+
+	return err;
+}
+
+static void skl_remove(struct pci_dev *pci)
+{
+	struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+	struct skl *skl = ebus_to_skl(ebus);
+
+	if (pci_dev_run_wake(pci))
+		pm_runtime_get_noresume(&pci->dev);
+	pci_dev_put(pci);
+	skl_platform_unregister(&pci->dev);
+	skl_dmic_device_unregister(skl);
+	skl_free(ebus);
+	dev_set_drvdata(&pci->dev, NULL);
+}
+
+/* PCI IDs */
+static const struct pci_device_id skl_ids[] = {
+	/* Sunrise Point-LP */
+	{ PCI_DEVICE(0x8086, 0x9d70), 0},
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, skl_ids);
+
+/* pci_driver definition */
+static struct pci_driver skl_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = skl_ids,
+	.probe = skl_probe,
+	.remove = skl_remove,
+	.driver = {
+		.pm = &skl_pm,
+	},
+};
+module_pci_driver(skl_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Skylake ASoC HDA driver");
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
new file mode 100644
index 0000000..f7fdbb0
--- /dev/null
+++ b/sound/soc/intel/skylake/skl.h
@@ -0,0 +1,84 @@
+/*
+ *  skl.h - HD Audio skylake defintions.
+ *
+ *  Copyright (C) 2015 Intel Corp
+ *  Author: Jeeja KP <jeeja.kp@intel.com>
+ *  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; version 2 of the License.
+ *
+ *  This program is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ */
+
+#ifndef __SOUND_SOC_SKL_H
+#define __SOUND_SOC_SKL_H
+
+#include <sound/hda_register.h>
+#include <sound/hdaudio_ext.h>
+#include "skl-nhlt.h"
+
+#define SKL_SUSPEND_DELAY 2000
+
+/* Vendor Specific Registers */
+#define AZX_REG_VS_EM1			0x1000
+#define AZX_REG_VS_INRC			0x1004
+#define AZX_REG_VS_OUTRC		0x1008
+#define AZX_REG_VS_FIFOTRK		0x100C
+#define AZX_REG_VS_FIFOTRK2		0x1010
+#define AZX_REG_VS_EM2			0x1030
+#define AZX_REG_VS_EM3L			0x1038
+#define AZX_REG_VS_EM3U			0x103C
+#define AZX_REG_VS_EM4L			0x1040
+#define AZX_REG_VS_EM4U			0x1044
+#define AZX_REG_VS_LTRC			0x1048
+#define AZX_REG_VS_D0I3C		0x104A
+#define AZX_REG_VS_PCE			0x104B
+#define AZX_REG_VS_L2MAGC		0x1050
+#define AZX_REG_VS_L2LAHPT		0x1054
+#define AZX_REG_VS_SDXDPIB_XBASE	0x1084
+#define AZX_REG_VS_SDXDPIB_XINTERVAL	0x20
+#define AZX_REG_VS_SDXEFIFOS_XBASE	0x1094
+#define AZX_REG_VS_SDXEFIFOS_XINTERVAL	0x20
+
+struct skl {
+	struct hdac_ext_bus ebus;
+	struct pci_dev *pci;
+
+	unsigned int init_failed:1; /* delayed init failed */
+	struct platform_device *dmic_dev;
+
+	void __iomem *nhlt; /* nhlt ptr */
+	struct skl_sst *skl_sst; /* sst skl ctx */
+};
+
+#define skl_to_ebus(s)	(&(s)->ebus)
+#define ebus_to_skl(sbus) \
+	container_of(sbus, struct skl, sbus)
+
+/* to pass dai dma data */
+struct skl_dma_params {
+	u32 format;
+	u8 stream_tag;
+};
+
+int skl_platform_unregister(struct device *dev);
+int skl_platform_register(struct device *dev);
+
+void __iomem *skl_nhlt_init(struct device *dev);
+void skl_nhlt_free(void __iomem *addr);
+struct nhlt_specific_cfg *skl_get_ep_blob(struct skl *skl, u32 instance,
+			u8 link_type, u8 s_fmt, u8 no_ch, u32 s_rate, u8 dirn);
+
+int skl_init_dsp(struct skl *skl);
+void skl_free_dsp(struct skl *skl);
+int skl_suspend_dsp(struct skl *skl);
+int skl_resume_dsp(struct skl *skl);
+#endif /* __SOUND_SOC_SKL_H */
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index 4cf2245..dbfdfe9 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -148,10 +148,14 @@
 	dram = mv_mbus_dram_info();
 	addr = substream->dma_buffer.addr;
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		if (priv->substream_play)
+			return -EBUSY;
 		priv->substream_play = substream;
 		kirkwood_dma_conf_mbus_windows(priv->io,
 			KIRKWOOD_PLAYBACK_WIN, addr, dram);
 	} else {
+		if (priv->substream_rec)
+			return -EBUSY;
 		priv->substream_rec = substream;
 		kirkwood_dma_conf_mbus_windows(priv->io,
 			KIRKWOOD_RECORD_WIN, addr, dram);
diff --git a/sound/soc/mediatek/mt8173-max98090.c b/sound/soc/mediatek/mt8173-max98090.c
index 2d2536a..684e8a7 100644
--- a/sound/soc/mediatek/mt8173-max98090.c
+++ b/sound/soc/mediatek/mt8173-max98090.c
@@ -136,6 +136,7 @@
 
 static struct snd_soc_card mt8173_max98090_card = {
 	.name = "mt8173-max98090",
+	.owner = THIS_MODULE,
 	.dai_link = mt8173_max98090_dais,
 	.num_links = ARRAY_SIZE(mt8173_max98090_dais),
 	.controls = mt8173_max98090_controls,
@@ -202,7 +203,6 @@
 static struct platform_driver mt8173_max98090_driver = {
 	.driver = {
 		   .name = "mt8173-max98090",
-		   .owner = THIS_MODULE,
 		   .of_match_table = mt8173_max98090_dt_match,
 #ifdef CONFIG_PM
 		   .pm = &snd_soc_pm_ops,
diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
index 6f52eca..86cf975 100644
--- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
@@ -191,6 +191,7 @@
 
 static struct snd_soc_card mt8173_rt5650_rt5676_card = {
 	.name = "mtk-rt5650-rt5676",
+	.owner = THIS_MODULE,
 	.dai_link = mt8173_rt5650_rt5676_dais,
 	.num_links = ARRAY_SIZE(mt8173_rt5650_rt5676_dais),
 	.codec_conf = mt8173_rt5650_rt5676_codec_conf,
@@ -269,7 +270,6 @@
 static struct platform_driver mt8173_rt5650_rt5676_driver = {
 	.driver = {
 		   .name = "mtk-rt5650-rt5676",
-		   .owner = THIS_MODULE,
 		   .of_match_table = mt8173_rt5650_rt5676_dt_match,
 #ifdef CONFIG_PM
 		   .pm = &snd_soc_pm_ops,
diff --git a/sound/soc/mediatek/mtk-afe-common.h b/sound/soc/mediatek/mtk-afe-common.h
index a88b175..cc4393c 100644
--- a/sound/soc/mediatek/mtk-afe-common.h
+++ b/sound/soc/mediatek/mtk-afe-common.h
@@ -98,12 +98,4 @@
 	const struct mtk_afe_irq_data *irqdata;
 };
 
-struct mtk_afe {
-	/* address for ioremap audio hardware register */
-	void __iomem *base_addr;
-	struct device *dev;
-	struct regmap *regmap;
-	struct mtk_afe_memif memif[MTK_AFE_MEMIF_NUM];
-	struct clk *clocks[MTK_CLK_NUM];
-};
 #endif
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index 9863da7..d190fe0 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -45,18 +45,21 @@
 /* Memory interface */
 #define AFE_DL1_BASE		0x0040
 #define AFE_DL1_CUR		0x0044
+#define AFE_DL1_END		0x0048
 #define AFE_DL2_BASE		0x0050
 #define AFE_DL2_CUR		0x0054
 #define AFE_AWB_BASE		0x0070
 #define AFE_AWB_CUR		0x007c
 #define AFE_VUL_BASE		0x0080
 #define AFE_VUL_CUR		0x008c
+#define AFE_VUL_END		0x0088
 #define AFE_DAI_BASE		0x0090
 #define AFE_DAI_CUR		0x009c
 #define AFE_MOD_PCM_BASE	0x0330
 #define AFE_MOD_PCM_CUR		0x033c
 #define AFE_HDMI_OUT_BASE	0x0374
 #define AFE_HDMI_OUT_CUR	0x0378
+#define AFE_HDMI_OUT_END	0x037c
 
 #define AFE_ADDA2_TOP_CON0	0x0600
 
@@ -127,6 +130,34 @@
 	AFE_TDM_CH_ZERO,
 };
 
+static const unsigned int mtk_afe_backup_list[] = {
+	AUDIO_TOP_CON0,
+	AFE_CONN1,
+	AFE_CONN2,
+	AFE_CONN7,
+	AFE_CONN8,
+	AFE_DAC_CON1,
+	AFE_DL1_BASE,
+	AFE_DL1_END,
+	AFE_VUL_BASE,
+	AFE_VUL_END,
+	AFE_HDMI_OUT_BASE,
+	AFE_HDMI_OUT_END,
+	AFE_HDMI_CONN0,
+	AFE_DAC_CON0,
+};
+
+struct mtk_afe {
+	/* address for ioremap audio hardware register */
+	void __iomem *base_addr;
+	struct device *dev;
+	struct regmap *regmap;
+	struct mtk_afe_memif memif[MTK_AFE_MEMIF_NUM];
+	struct clk *clocks[MTK_CLK_NUM];
+	unsigned int backup_regs[ARRAY_SIZE(mtk_afe_backup_list)];
+	bool suspended;
+};
+
 static const struct snd_pcm_hardware mtk_afe_hardware = {
 	.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 		 SNDRV_PCM_INFO_MMAP_VALID),
@@ -722,11 +753,53 @@
 
 };
 
+static int mtk_afe_runtime_suspend(struct device *dev);
+static int mtk_afe_runtime_resume(struct device *dev);
+
+static int mtk_afe_dai_suspend(struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	int i;
+
+	dev_dbg(afe->dev, "%s\n", __func__);
+	if (pm_runtime_status_suspended(afe->dev) || afe->suspended)
+		return 0;
+
+	for (i = 0; i < ARRAY_SIZE(mtk_afe_backup_list); i++)
+		regmap_read(afe->regmap, mtk_afe_backup_list[i],
+			    &afe->backup_regs[i]);
+
+	afe->suspended = true;
+	mtk_afe_runtime_suspend(afe->dev);
+	return 0;
+}
+
+static int mtk_afe_dai_resume(struct snd_soc_dai *dai)
+{
+	struct mtk_afe *afe = snd_soc_dai_get_drvdata(dai);
+	int i = 0;
+
+	dev_dbg(afe->dev, "%s\n", __func__);
+	if (pm_runtime_status_suspended(afe->dev) || !afe->suspended)
+		return 0;
+
+	mtk_afe_runtime_resume(afe->dev);
+
+	for (i = 0; i < ARRAY_SIZE(mtk_afe_backup_list); i++)
+		regmap_write(afe->regmap, mtk_afe_backup_list[i],
+			     afe->backup_regs[i]);
+
+	afe->suspended = false;
+	return 0;
+}
+
 static struct snd_soc_dai_driver mtk_afe_pcm_dais[] = {
 	/* FE DAIs: memory intefaces to CPU */
 	{
 		.name = "DL1", /* downlink 1 */
 		.id = MTK_AFE_MEMIF_DL1,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
 		.playback = {
 			.stream_name = "DL1",
 			.channels_min = 1,
@@ -738,6 +811,8 @@
 	}, {
 		.name = "VUL", /* voice uplink */
 		.id = MTK_AFE_MEMIF_VUL,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
 		.capture = {
 			.stream_name = "VUL",
 			.channels_min = 1,
@@ -774,6 +849,8 @@
 	{
 		.name = "HDMI",
 		.id = MTK_AFE_MEMIF_HDMI,
+		.suspend = mtk_afe_dai_suspend,
+		.resume = mtk_afe_dai_resume,
 		.playback = {
 			.stream_name = "HDMI",
 			.channels_min = 2,
@@ -820,10 +897,6 @@
 };
 
 static const struct snd_soc_dapm_widget mtk_afe_pcm_widgets[] = {
-	/* Backend DAIs  */
-	SND_SOC_DAPM_AIF_IN("I2S Capture", NULL, 0, SND_SOC_NOPM, 0, 0),
-	SND_SOC_DAPM_AIF_OUT("I2S Playback", NULL, 0, SND_SOC_NOPM, 0, 0),
-
 	/* inter-connections */
 	SND_SOC_DAPM_MIXER("I05", SND_SOC_NOPM, 0, 0, NULL, 0),
 	SND_SOC_DAPM_MIXER("I06", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -855,11 +928,6 @@
 	{ "O10", "I18 Switch", "I18" },
 };
 
-static const struct snd_soc_dapm_widget mtk_afe_hdmi_widgets[] = {
-	/* Backend DAIs  */
-	SND_SOC_DAPM_AIF_OUT("HDMIO Playback", NULL, 0, SND_SOC_NOPM, 0, 0),
-};
-
 static const struct snd_soc_dapm_route mtk_afe_hdmi_routes[] = {
 	{"HDMIO Playback", NULL, "HDMI"},
 };
@@ -874,8 +942,6 @@
 
 static const struct snd_soc_component_driver mtk_afe_hdmi_dai_component = {
 	.name = "mtk-afe-hdmi-dai",
-	.dapm_widgets = mtk_afe_hdmi_widgets,
-	.num_dapm_widgets = ARRAY_SIZE(mtk_afe_hdmi_widgets),
 	.dapm_routes = mtk_afe_hdmi_routes,
 	.num_dapm_routes = ARRAY_SIZE(mtk_afe_hdmi_routes),
 };
@@ -1220,7 +1286,6 @@
 static struct platform_driver mtk_afe_pcm_driver = {
 	.driver = {
 		   .name = "mtk-afe-pcm",
-		   .owner = THIS_MODULE,
 		   .of_match_table = mtk_afe_pcm_dt_match,
 		   .pm = &mtk_afe_pm_ops,
 	},
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index 5ae5ca1..e093261 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -308,13 +308,7 @@
 
 static int nuc900_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &nuc900_soc_platform);
-}
-
-static int nuc900_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev, &nuc900_soc_platform);
 }
 
 static struct platform_driver nuc900_pcm_driver = {
@@ -323,7 +317,6 @@
 	},
 
 	.probe = nuc900_soc_platform_probe,
-	.remove = nuc900_soc_platform_remove,
 };
 
 module_platform_driver(nuc900_pcm_driver);
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index 30d0109..5185a38 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -24,7 +24,7 @@
 	  component also under DSS HDMI device. Dummy codec is used as
 	  as codec component. The hdmi audio driver implements also
 	  the card and registers it under its own platform device.
-	  The device for the dirver is registered by OMAPDSS hdmi
+	  The device for the driver is registered by OMAPDSS hdmi
 	  driver.
 
 config SND_OMAP_SOC_N810
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index 68a1252..c7563e2 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -965,25 +965,15 @@
 	mcbsp->free = true;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
-	if (!res) {
+	if (!res)
 		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-		if (!res) {
-			dev_err(mcbsp->dev, "invalid memory resource\n");
-			return -ENOMEM;
-		}
-	}
-	if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
-				     dev_name(&pdev->dev))) {
-		dev_err(mcbsp->dev, "memory region already claimed\n");
-		return -ENODEV;
-	}
+
+	mcbsp->io_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(mcbsp->io_base))
+		return PTR_ERR(mcbsp->io_base);
 
 	mcbsp->phys_base = res->start;
 	mcbsp->reg_cache_size = resource_size(res);
-	mcbsp->io_base = devm_ioremap(&pdev->dev, res->start,
-				      resource_size(res));
-	if (!mcbsp->io_base)
-		return -ENOMEM;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
 	if (!res)
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index aeef25c..584b237 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -81,7 +81,15 @@
 	ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
 					 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128);
 	if (ret < 0) {
-		dev_err(dai->dev, "could not apply constraint\n");
+		dev_err(dai->dev, "Could not apply period constraint: %d\n",
+			ret);
+		return ret;
+	}
+	ret = snd_pcm_hw_constraint_step(substream->runtime, 0,
+					 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 128);
+	if (ret < 0) {
+		dev_err(dai->dev, "Could not apply buffer constraint: %d\n",
+			ret);
 		return ret;
 	}
 
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index 076bec6..732e749 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -154,8 +154,7 @@
 
 static int omap3pandora_out_init(struct snd_soc_pcm_runtime *rtd)
 {
-	struct snd_soc_codec *codec = rtd->codec;
-	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
 
 	/* All TWL4030 output pins are floating */
 	snd_soc_dapm_nc_pin(dapm, "EARPIECE");
@@ -174,8 +173,7 @@
 
 static int omap3pandora_in_init(struct snd_soc_pcm_runtime *rtd)
 {
-	struct snd_soc_codec *codec = rtd->codec;
-	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	struct snd_soc_dapm_context *dapm = &rtd->card->dapm;
 
 	/* Not comnnected */
 	snd_soc_dapm_nc_pin(dapm, "HSMIC");
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 1eb45dc..51e790d 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -232,13 +232,7 @@
 		mmp_pcm_hardware[SNDRV_PCM_STREAM_CAPTURE].period_bytes_max =
 						pdata->period_max_capture;
 	}
-	return snd_soc_register_platform(&pdev->dev, &mmp_soc_platform);
-}
-
-static int mmp_pcm_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev, &mmp_soc_platform);
 }
 
 static struct platform_driver mmp_pcm_driver = {
@@ -247,7 +241,6 @@
 	},
 
 	.probe = mmp_pcm_probe,
-	.remove = mmp_pcm_remove,
 };
 
 module_platform_driver(mmp_pcm_driver);
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index fbe2e93..3da485e 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -813,14 +813,8 @@
 
 static int asoc_ssp_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_component(&pdev->dev, &pxa_ssp_component,
-					  &pxa_ssp_dai, 1);
-}
-
-static int asoc_ssp_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_component(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_component(&pdev->dev, &pxa_ssp_component,
+					       &pxa_ssp_dai, 1);
 }
 
 static struct platform_driver asoc_ssp_driver = {
@@ -830,7 +824,6 @@
 	},
 
 	.probe = asoc_ssp_probe,
-	.remove = asoc_ssp_remove,
 };
 
 module_platform_driver(asoc_ssp_driver);
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index e68290c..6b4e400 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -367,19 +367,12 @@
 
 static int pxa2xx_i2s_drv_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_component(&pdev->dev, &pxa_i2s_component,
-					  &pxa_i2s_dai, 1);
-}
-
-static int pxa2xx_i2s_drv_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_component(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_component(&pdev->dev, &pxa_i2s_component,
+					       &pxa_i2s_dai, 1);
 }
 
 static struct platform_driver pxa2xx_i2s_driver = {
 	.probe = pxa2xx_i2s_drv_probe,
-	.remove = pxa2xx_i2s_drv_remove,
 
 	.driver = {
 		.name = "pxa2xx-i2s",
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index a51c9da..831ee37 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -124,13 +124,7 @@
 
 static int pxa2xx_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &pxa2xx_soc_platform);
-}
-
-static int pxa2xx_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev, &pxa2xx_soc_platform);
 }
 
 #ifdef CONFIG_OF
@@ -147,7 +141,6 @@
 	},
 
 	.probe = pxa2xx_soc_platform_probe,
-	.remove = pxa2xx_soc_platform_remove,
 };
 
 module_platform_driver(pxa_pcm_driver);
diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
index 807fedf..3cc252e 100644
--- a/sound/soc/qcom/Kconfig
+++ b/sound/soc/qcom/Kconfig
@@ -1,5 +1,6 @@
 config SND_SOC_QCOM
 	tristate "ASoC support for QCOM platforms"
+	depends on ARCH_QCOM || COMPILE_TEST
 	help
           Say Y or M if you want to add support to use audio devices
           in Qualcomm Technologies SOC-based platforms.
@@ -14,19 +15,17 @@
 
 config SND_SOC_LPASS_IPQ806X
 	tristate
-	depends on SND_SOC_QCOM
 	select SND_SOC_LPASS_CPU
 	select SND_SOC_LPASS_PLATFORM
 
 config SND_SOC_LPASS_APQ8016
 	tristate
-	depends on SND_SOC_QCOM
 	select SND_SOC_LPASS_CPU
 	select SND_SOC_LPASS_PLATFORM
 
 config SND_SOC_STORM
 	tristate "ASoC I2S support for Storm boards"
-	depends on SND_SOC_QCOM && (ARCH_QCOM || COMPILE_TEST)
+	depends on SND_SOC_QCOM
 	select SND_SOC_LPASS_IPQ806X
 	select SND_SOC_MAX98357A
 	help
@@ -35,7 +34,7 @@
 
 config SND_SOC_APQ8016_SBC
 	tristate "SoC Audio support for APQ8016 SBC platforms"
-	depends on SND_SOC_QCOM && (ARCH_QCOM || COMPILE_TEST)
+	depends on SND_SOC_QCOM
 	select SND_SOC_LPASS_APQ8016
 	help
           Support for Qualcomm Technologies LPASS audio block in
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
index 23f3d59..97bc202 100644
--- a/sound/soc/qcom/lpass-cpu.c
+++ b/sound/soc/qcom/lpass-cpu.c
@@ -235,7 +235,7 @@
 	return ret;
 }
 
-struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
+const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
 	.set_sysclk	= lpass_cpu_daiops_set_sysclk,
 	.startup	= lpass_cpu_daiops_startup,
 	.shutdown	= lpass_cpu_daiops_shutdown,
diff --git a/sound/soc/qcom/lpass-ipq806x.c b/sound/soc/qcom/lpass-ipq806x.c
index 7356d3a..7a41679 100644
--- a/sound/soc/qcom/lpass-ipq806x.c
+++ b/sound/soc/qcom/lpass-ipq806x.c
@@ -73,7 +73,7 @@
 	return 0;
 }
 
-struct lpass_variant ipq806x_data = {
+static struct lpass_variant ipq806x_data = {
 	.i2sctrl_reg_base	= 0x0010,
 	.i2sctrl_reg_stride	= 0x04,
 	.i2s_ports		= 5,
diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
index d6e86c1..0b63e2e 100644
--- a/sound/soc/qcom/lpass.h
+++ b/sound/soc/qcom/lpass.h
@@ -93,6 +93,6 @@
 int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev);
 int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev);
 int asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai *dai);
-extern struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops;
+extern const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops;
 
 #endif /* __LPASS_H__ */
diff --git a/sound/soc/rockchip/Kconfig b/sound/soc/rockchip/Kconfig
index e181826..58bae8e 100644
--- a/sound/soc/rockchip/Kconfig
+++ b/sound/soc/rockchip/Kconfig
@@ -14,3 +14,22 @@
 	  Say Y or M if you want to add support for I2S driver for
 	  Rockchip I2S device. The device supports upto maximum of
 	  8 channels each for play and record.
+
+config SND_SOC_ROCKCHIP_MAX98090
+	tristate "ASoC support for Rockchip boards using a MAX98090 codec"
+	depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB
+	select SND_SOC_ROCKCHIP_I2S
+	select SND_SOC_MAX98090
+	select SND_SOC_TS3A227E
+	help
+	  Say Y or M here if you want to add support for SoC audio on Rockchip
+	  boards using the MAX98090 codec, such as Veyron.
+
+config SND_SOC_ROCKCHIP_RT5645
+	tristate "ASoC support for Rockchip boards using a RT5645/RT5650 codec"
+	depends on SND_SOC_ROCKCHIP && I2C && GPIOLIB
+	select SND_SOC_ROCKCHIP_I2S
+	select SND_SOC_RT5645
+	help
+	  Say Y or M here if you want to add support for SoC audio on Rockchip
+	  boards using the RT5645/RT5650 codec, such as Veyron.
diff --git a/sound/soc/rockchip/Makefile b/sound/soc/rockchip/Makefile
index b921909..1bc1dc3 100644
--- a/sound/soc/rockchip/Makefile
+++ b/sound/soc/rockchip/Makefile
@@ -2,3 +2,9 @@
 snd-soc-i2s-objs := rockchip_i2s.o
 
 obj-$(CONFIG_SND_SOC_ROCKCHIP_I2S) += snd-soc-i2s.o
+
+snd-soc-rockchip-max98090-objs := rockchip_max98090.o
+snd-soc-rockchip-rt5645-objs := rockchip_rt5645.o
+
+obj-$(CONFIG_SND_SOC_ROCKCHIP_MAX98090) += snd-soc-rockchip-max98090.o
+obj-$(CONFIG_SND_SOC_ROCKCHIP_RT5645) += snd-soc-rockchip-rt5645.o
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index acb5be5..b936102 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -483,16 +483,14 @@
 		goto err_suspend;
 	}
 
-	ret = snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+	ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
 	if (ret) {
 		dev_err(&pdev->dev, "Could not register PCM\n");
-		goto err_pcm_register;
+		return ret;
 	}
 
 	return 0;
 
-err_pcm_register:
-	snd_dmaengine_pcm_unregister(&pdev->dev);
 err_suspend:
 	if (!pm_runtime_status_suspended(&pdev->dev))
 		i2s_runtime_suspend(&pdev->dev);
@@ -512,8 +510,6 @@
 
 	clk_disable_unprepare(i2s->mclk);
 	clk_disable_unprepare(i2s->hclk);
-	snd_dmaengine_pcm_unregister(&pdev->dev);
-	snd_soc_unregister_component(&pdev->dev);
 
 	return 0;
 }
diff --git a/sound/soc/rockchip/rockchip_max98090.c b/sound/soc/rockchip/rockchip_max98090.c
new file mode 100644
index 0000000..26567b1
--- /dev/null
+++ b/sound/soc/rockchip/rockchip_max98090.c
@@ -0,0 +1,236 @@
+/*
+ * Rockchip machine ASoC driver for boards using a MAX90809 CODEC.
+ *
+ * Copyright (c) 2014, ROCKCHIP CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "rockchip_i2s.h"
+#include "../codecs/ts3a227e.h"
+
+#define DRV_NAME "rockchip-snd-max98090"
+
+static struct snd_soc_jack headset_jack;
+static struct snd_soc_jack_pin headset_jack_pins[] = {
+	{
+		.pin = "Headset Jack",
+		.mask = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+			SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+			SND_JACK_BTN_2 | SND_JACK_BTN_3,
+	},
+};
+
+static const struct snd_soc_dapm_widget rk_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("Int Mic", NULL),
+	SND_SOC_DAPM_SPK("Speaker", NULL),
+};
+
+static const struct snd_soc_dapm_route rk_audio_map[] = {
+	{"IN34", NULL, "Headset Mic"},
+	{"IN34", NULL, "MICBIAS"},
+	{"MICBIAS", NULL, "Headset Mic"},
+	{"DMICL", NULL, "Int Mic"},
+	{"Headphone", NULL, "HPL"},
+	{"Headphone", NULL, "HPR"},
+	{"Speaker", NULL, "SPKL"},
+	{"Speaker", NULL, "SPKR"},
+};
+
+static const struct snd_kcontrol_new rk_mc_controls[] = {
+	SOC_DAPM_PIN_SWITCH("Headphone"),
+	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+	SOC_DAPM_PIN_SWITCH("Int Mic"),
+	SOC_DAPM_PIN_SWITCH("Speaker"),
+};
+
+static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int mclk;
+
+	switch (params_rate(params)) {
+	case 8000:
+	case 16000:
+	case 48000:
+	case 96000:
+		mclk = 12288000;
+		break;
+	case 44100:
+		mclk = 11289600;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
+				     SND_SOC_CLOCK_OUT);
+	if (ret < 0) {
+		dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+				     SND_SOC_CLOCK_IN);
+	if (ret < 0) {
+		dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int rk_init(struct snd_soc_pcm_runtime *runtime)
+{
+	/* Enable Headset and 4 Buttons Jack detection */
+	return snd_soc_card_jack_new(runtime->card, "Headset Jack",
+			       SND_JACK_HEADSET |
+			       SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+			       SND_JACK_BTN_2 | SND_JACK_BTN_3,
+			       &headset_jack,
+			       headset_jack_pins,
+			       ARRAY_SIZE(headset_jack_pins));
+}
+
+static int rk_98090_headset_init(struct snd_soc_component *component)
+{
+	return ts3a227e_enable_jack_detect(component, &headset_jack);
+}
+
+static struct snd_soc_ops rk_aif1_ops = {
+	.hw_params = rk_aif1_hw_params,
+};
+
+static struct snd_soc_aux_dev rk_98090_headset_dev = {
+	.name = "Headset Chip",
+	.init = rk_98090_headset_init,
+};
+
+static struct snd_soc_dai_link rk_dailink = {
+	.name = "max98090",
+	.stream_name = "Audio",
+	.codec_dai_name = "HiFi",
+	.init = rk_init,
+	.ops = &rk_aif1_ops,
+	/* set max98090 as slave */
+	.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+		SND_SOC_DAIFMT_CBS_CFS,
+};
+
+static struct snd_soc_card snd_soc_card_rk = {
+	.name = "ROCKCHIP-I2S",
+	.owner = THIS_MODULE,
+	.dai_link = &rk_dailink,
+	.num_links = 1,
+	.aux_dev = &rk_98090_headset_dev,
+	.num_aux_devs = 1,
+	.dapm_widgets = rk_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(rk_dapm_widgets),
+	.dapm_routes = rk_audio_map,
+	.num_dapm_routes = ARRAY_SIZE(rk_audio_map),
+	.controls = rk_mc_controls,
+	.num_controls = ARRAY_SIZE(rk_mc_controls),
+};
+
+static int snd_rk_mc_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct snd_soc_card *card = &snd_soc_card_rk;
+	struct device_node *np = pdev->dev.of_node;
+
+	/* register the soc card */
+	card->dev = &pdev->dev;
+
+	rk_dailink.codec_of_node = of_parse_phandle(np,
+			"rockchip,audio-codec", 0);
+	if (!rk_dailink.codec_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,audio-codec' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	rk_dailink.cpu_of_node = of_parse_phandle(np,
+			"rockchip,i2s-controller", 0);
+	if (!rk_dailink.cpu_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,i2s-controller' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	rk_dailink.platform_of_node = rk_dailink.cpu_of_node;
+
+	rk_98090_headset_dev.codec_of_node = of_parse_phandle(np,
+			"rockchip,headset-codec", 0);
+	if (!rk_98090_headset_dev.codec_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,headset-codec' missing/invalid\n");
+		return -EINVAL;
+	}
+
+	ret = snd_soc_of_parse_card_name(card, "rockchip,model");
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Soc parse card name failed %d\n", ret);
+		return ret;
+	}
+
+	ret = devm_snd_soc_register_card(&pdev->dev, card);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Soc register card failed %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static const struct of_device_id rockchip_max98090_of_match[] = {
+	{ .compatible = "rockchip,rockchip-audio-max98090", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_max98090_of_match);
+
+static struct platform_driver snd_rk_mc_driver = {
+	.probe = snd_rk_mc_probe,
+	.driver = {
+		.name = DRV_NAME,
+		.pm = &snd_soc_pm_ops,
+		.of_match_table = rockchip_max98090_of_match,
+	},
+};
+
+module_platform_driver(snd_rk_mc_driver);
+
+MODULE_AUTHOR("jianqun <jay.xu@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip max98090 machine ASoC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/rockchip/rockchip_rt5645.c b/sound/soc/rockchip/rockchip_rt5645.c
new file mode 100644
index 0000000..68c62e4
--- /dev/null
+++ b/sound/soc/rockchip/rockchip_rt5645.c
@@ -0,0 +1,225 @@
+/*
+ * Rockchip machine ASoC driver for boards using a RT5645/RT5650 CODEC.
+ *
+ * Copyright (c) 2015, ROCKCHIP CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "rockchip_i2s.h"
+
+#define DRV_NAME "rockchip-snd-rt5645"
+
+static struct snd_soc_jack headset_jack;
+
+/* Jack detect via rt5645 driver. */
+extern int rt5645_set_jack_detect(struct snd_soc_codec *codec,
+	struct snd_soc_jack *hp_jack, struct snd_soc_jack *mic_jack,
+	struct snd_soc_jack *btn_jack);
+
+static const struct snd_soc_dapm_widget rk_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphones", NULL),
+	SND_SOC_DAPM_SPK("Speakers", NULL),
+	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+	SND_SOC_DAPM_MIC("Int Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route rk_audio_map[] = {
+	/* Input Lines */
+	{"DMIC L2", NULL, "Int Mic"},
+	{"DMIC R2", NULL, "Int Mic"},
+	{"RECMIXL", NULL, "Headset Mic"},
+	{"RECMIXR", NULL, "Headset Mic"},
+
+	/* Output Lines */
+	{"Headphones", NULL, "HPOR"},
+	{"Headphones", NULL, "HPOL"},
+	{"Speakers", NULL, "SPOL"},
+	{"Speakers", NULL, "SPOR"},
+};
+
+static const struct snd_kcontrol_new rk_mc_controls[] = {
+	SOC_DAPM_PIN_SWITCH("Headphones"),
+	SOC_DAPM_PIN_SWITCH("Speakers"),
+	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+	SOC_DAPM_PIN_SWITCH("Int Mic"),
+};
+
+static int rk_aif1_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	int ret = 0;
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int mclk;
+
+	switch (params_rate(params)) {
+	case 8000:
+	case 16000:
+	case 48000:
+	case 96000:
+		mclk = 12288000;
+		break;
+	case 44100:
+		mclk = 11289600;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = snd_soc_dai_set_sysclk(cpu_dai, 0, mclk,
+				     SND_SOC_CLOCK_OUT);
+	if (ret < 0) {
+		dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
+				     SND_SOC_CLOCK_IN);
+	if (ret < 0) {
+		dev_err(codec_dai->dev, "Can't set codec clock %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static int rk_init(struct snd_soc_pcm_runtime *runtime)
+{
+	struct snd_soc_card *card = runtime->card;
+	int ret;
+
+	/* Enable Headset and 4 Buttons Jack detection */
+	ret = snd_soc_card_jack_new(card, "Headset Jack",
+				    SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+				    SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+				    SND_JACK_BTN_2 | SND_JACK_BTN_3,
+				    &headset_jack, NULL, 0);
+	if (ret) {
+		dev_err(card->dev, "New Headset Jack failed! (%d)\n", ret);
+		return ret;
+	}
+
+	return rt5645_set_jack_detect(runtime->codec,
+				     &headset_jack,
+				     &headset_jack,
+				     &headset_jack);
+}
+
+static struct snd_soc_ops rk_aif1_ops = {
+	.hw_params = rk_aif1_hw_params,
+};
+
+static struct snd_soc_dai_link rk_dailink = {
+	.name = "rt5645",
+	.stream_name = "rt5645 PCM",
+	.codec_dai_name = "rt5645-aif1",
+	.init = rk_init,
+	.ops = &rk_aif1_ops,
+	/* set rt5645 as slave */
+	.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+		SND_SOC_DAIFMT_CBS_CFS,
+};
+
+static struct snd_soc_card snd_soc_card_rk = {
+	.name = "I2S-RT5650",
+	.owner = THIS_MODULE,
+	.dai_link = &rk_dailink,
+	.num_links = 1,
+	.dapm_widgets = rk_dapm_widgets,
+	.num_dapm_widgets = ARRAY_SIZE(rk_dapm_widgets),
+	.dapm_routes = rk_audio_map,
+	.num_dapm_routes = ARRAY_SIZE(rk_audio_map),
+	.controls = rk_mc_controls,
+	.num_controls = ARRAY_SIZE(rk_mc_controls),
+};
+
+static int snd_rk_mc_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct snd_soc_card *card = &snd_soc_card_rk;
+	struct device_node *np = pdev->dev.of_node;
+
+	/* register the soc card */
+	card->dev = &pdev->dev;
+
+	rk_dailink.codec_of_node = of_parse_phandle(np,
+			"rockchip,audio-codec", 0);
+	if (!rk_dailink.codec_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,audio-codec' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	rk_dailink.cpu_of_node = of_parse_phandle(np,
+			"rockchip,i2s-controller", 0);
+	if (!rk_dailink.cpu_of_node) {
+		dev_err(&pdev->dev,
+			"Property 'rockchip,i2s-controller' missing or invalid\n");
+		return -EINVAL;
+	}
+
+	rk_dailink.platform_of_node = rk_dailink.cpu_of_node;
+
+	ret = snd_soc_of_parse_card_name(card, "rockchip,model");
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Soc parse card name failed %d\n", ret);
+		return ret;
+	}
+
+	ret = devm_snd_soc_register_card(&pdev->dev, card);
+	if (ret) {
+		dev_err(&pdev->dev,
+			"Soc register card failed %d\n", ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static const struct of_device_id rockchip_rt5645_of_match[] = {
+	{ .compatible = "rockchip,rockchip-audio-rt5645", },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, rockchip_rt5645_of_match);
+
+static struct platform_driver snd_rk_mc_driver = {
+	.probe = snd_rk_mc_probe,
+	.driver = {
+		.name = DRV_NAME,
+		.pm = &snd_soc_pm_ops,
+		.of_match_table = rockchip_rt5645_of_match,
+	},
+};
+
+module_platform_driver(snd_rk_mc_driver);
+
+MODULE_AUTHOR("Xing Zheng <zhengxing@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip rt5645 machine ASoC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/samsung/arndale_rt5631.c b/sound/soc/samsung/arndale_rt5631.c
index 8bf2e2c..ee1fda9 100644
--- a/sound/soc/samsung/arndale_rt5631.c
+++ b/sound/soc/samsung/arndale_rt5631.c
@@ -71,6 +71,7 @@
 
 static struct snd_soc_card arndale_rt5631 = {
 	.name = "Arndale RT5631",
+	.owner = THIS_MODULE,
 	.dai_link = arndale_rt5631_dai,
 	.num_links = ARRAY_SIZE(arndale_rt5631_dai),
 };
@@ -116,15 +117,6 @@
 	return ret;
 }
 
-static int arndale_audio_remove(struct platform_device *pdev)
-{
-	struct snd_soc_card *card = platform_get_drvdata(pdev);
-
-	snd_soc_unregister_card(card);
-
-	return 0;
-}
-
 static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
 	{ .compatible = "samsung,arndale-rt5631", },
 	{ .compatible = "samsung,arndale-alc5631", },
@@ -139,7 +131,6 @@
 		.of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
 	},
 	.probe = arndale_audio_probe,
-	.remove = arndale_audio_remove,
 };
 
 module_platform_driver(arndale_audio_driver);
diff --git a/sound/soc/samsung/snow.c b/sound/soc/samsung/snow.c
index 7651dc9..07ce2cf 100644
--- a/sound/soc/samsung/snow.c
+++ b/sound/soc/samsung/snow.c
@@ -56,6 +56,7 @@
 
 static struct snd_soc_card snow_snd = {
 	.name = "Snow-I2S",
+	.owner = THIS_MODULE,
 	.dai_link = snow_dai,
 	.num_links = ARRAY_SIZE(snow_dai),
 
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c
index fd11404..8fad444 100644
--- a/sound/soc/sh/dma-sh7760.c
+++ b/sound/soc/sh/dma-sh7760.c
@@ -327,13 +327,7 @@
 
 static int sh7760_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &sh7760_soc_platform);
-}
-
-static int sh7760_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev, &sh7760_soc_platform);
 }
 
 static struct platform_driver sh7760_pcm_driver = {
@@ -342,7 +336,6 @@
 	},
 
 	.probe = sh7760_soc_platform_probe,
-	.remove = sh7760_soc_platform_remove,
 };
 
 module_platform_driver(sh7760_pcm_driver);
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 142c066..0215c78 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1911,7 +1911,6 @@
 
 static const struct platform_device_id fsi_id_table[] = {
 	{ "sh_fsi",	(kernel_ulong_t)&fsi1_core },
-	{ "sh_fsi2",	(kernel_ulong_t)&fsi2_core },
 	{},
 };
 MODULE_DEVICE_TABLE(platform, fsi_id_table);
diff --git a/sound/soc/sh/rcar/Makefile b/sound/soc/sh/rcar/Makefile
index f1b4451..8b25850 100644
--- a/sound/soc/sh/rcar/Makefile
+++ b/sound/soc/sh/rcar/Makefile
@@ -1,4 +1,4 @@
-snd-soc-rcar-objs	:= core.o gen.o dma.o src.o adg.o ssi.o dvc.o
+snd-soc-rcar-objs	:= core.o gen.o dma.o adg.o ssi.o src.o ctu.o mix.o dvc.o
 obj-$(CONFIG_SND_SOC_RCAR)	+= snd-soc-rcar.o
 
 snd-soc-rsrc-card-objs	:= rsrc-card.o
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f1e5920..f3feed5 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -203,9 +203,9 @@
 }
 
 /*
- *	settting function
+ *	ADINR function
  */
-u32 rsnd_get_adinr(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
+u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
 {
 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
@@ -227,6 +227,64 @@
 	return adinr;
 }
 
+u32 rsnd_get_adinr_chan(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
+{
+	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+	struct device *dev = rsnd_priv_to_dev(priv);
+	u32 chan = runtime->channels;
+
+	switch (chan) {
+	case 1:
+	case 2:
+	case 4:
+	case 6:
+	case 8:
+		break;
+	default:
+		dev_warn(dev, "not supported channel\n");
+		chan = 0;
+		break;
+	}
+
+	return chan;
+}
+
+/*
+ *	DALIGN function
+ */
+u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
+{
+	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
+	struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
+	struct rsnd_mod *target = src ? src : ssi;
+	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+	u32 val = 0x76543210;
+	u32 mask = ~0;
+
+	mask <<= runtime->channels * 4;
+	val = val & mask;
+
+	switch (runtime->sample_bits) {
+	case 16:
+		val |= 0x67452301 & ~mask;
+		break;
+	case 32:
+		val |= 0x76543210 & ~mask;
+		break;
+	}
+
+	/*
+	 * exchange channeles on SRC if possible,
+	 * otherwise, R/L volume settings on DVC
+	 * changes inverted channels
+	 */
+	if (mod == target)
+		return val;
+	else
+		return 0x76543210;
+}
+
 /*
  *	rsnd_dai functions
  */
@@ -242,9 +300,9 @@
 	if (val == __rsnd_mod_call_##func) {				\
 		called = 1;						\
 		ret = (mod)->ops->func(mod, io, param);			\
-		mod->status = (mod->status & ~mask) +			\
-			(add << __rsnd_mod_shift_##func);		\
 	}								\
+	mod->status = (mod->status & ~mask) +				\
+		(add << __rsnd_mod_shift_##func);			\
 	dev_dbg(dev, "%s[%d] 0x%08x %s\n",				\
 		rsnd_mod_name(mod), rsnd_mod_id(mod), mod->status,	\
 		called ? #func : "");					\
@@ -274,21 +332,21 @@
 static int rsnd_dai_connect(struct rsnd_mod *mod,
 			    struct rsnd_dai_stream *io)
 {
+	struct rsnd_priv *priv;
+	struct device *dev;
+
 	if (!mod)
 		return -EIO;
 
-	if (io->mod[mod->type]) {
-		struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
-		struct device *dev = rsnd_priv_to_dev(priv);
-
-		dev_err(dev, "%s[%d] is not empty\n",
-			rsnd_mod_name(mod),
-			rsnd_mod_id(mod));
-		return -EIO;
-	}
+	priv = rsnd_mod_to_priv(mod);
+	dev = rsnd_priv_to_dev(priv);
 
 	io->mod[mod->type] = mod;
 
+	dev_dbg(dev, "%s[%d] is connected to io (%s)\n",
+		rsnd_mod_name(mod), rsnd_mod_id(mod),
+		rsnd_io_is_play(io) ? "Playback" : "Capture");
+
 	return 0;
 }
 
@@ -517,7 +575,7 @@
 	.set_fmt	= rsnd_soc_dai_set_fmt,
 };
 
-#define rsnd_path_parse(priv, io, type)				\
+#define rsnd_path_add(priv, io, type)				\
 ({								\
 	struct rsnd_mod *mod;					\
 	int ret = 0;						\
@@ -533,7 +591,7 @@
 	ret;							\
 })
 
-#define rsnd_path_break(priv, io, type)				\
+#define rsnd_path_remove(priv, io, type)			\
 {								\
 	struct rsnd_mod *mod;					\
 	int id = -1;						\
@@ -547,6 +605,79 @@
 	}							\
 }
 
+void rsnd_path_parse(struct rsnd_priv *priv,
+		     struct rsnd_dai_stream *io)
+{
+	struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
+	struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
+	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
+	struct rsnd_mod *cmd;
+	struct device *dev = rsnd_priv_to_dev(priv);
+	u32 data;
+
+	/* Gen1 is not supported */
+	if (rsnd_is_gen1(priv))
+		return;
+
+	if (!mix && !dvc)
+		return;
+
+	if (mix) {
+		struct rsnd_dai *rdai;
+		int i;
+		u32 path[] = {
+			[0] = 0,
+			[1] = 1 << 0,
+			[2] = 0,
+			[3] = 0,
+			[4] = 0,
+			[5] = 1 << 8
+		};
+
+		/*
+		 * it is assuming that integrater is well understanding about
+		 * data path. Here doesn't check impossible connection,
+		 * like src2 + src5
+		 */
+		data = 0;
+		for_each_rsnd_dai(rdai, priv, i) {
+			io = &rdai->playback;
+			if (mix == rsnd_io_to_mod_mix(io))
+				data |= path[rsnd_mod_id(src)];
+
+			io = &rdai->capture;
+			if (mix == rsnd_io_to_mod_mix(io))
+				data |= path[rsnd_mod_id(src)];
+		}
+
+		/*
+		 * We can't use ctu = rsnd_io_ctu() here.
+		 * Since, ID of dvc/mix are 0 or 1 (= same as CMD number)
+		 * but ctu IDs are 0 - 7 (= CTU00 - CTU13)
+		 */
+		cmd = mix;
+	} else {
+		u32 path[] = {
+			[0] = 0x30000,
+			[1] = 0x30001,
+			[2] = 0x40000,
+			[3] = 0x10000,
+			[4] = 0x20000,
+			[5] = 0x40100
+		};
+
+		data = path[rsnd_mod_id(src)];
+
+		cmd = dvc;
+	}
+
+	dev_dbg(dev, "ctu/mix path = 0x%08x", data);
+
+	rsnd_mod_write(cmd, CMD_ROUTE_SLCT, data);
+
+	rsnd_mod_write(cmd, CMD_CTRL, 0x10);
+}
+
 static int rsnd_path_init(struct rsnd_priv *priv,
 			  struct rsnd_dai *rdai,
 			  struct rsnd_dai_stream *io)
@@ -564,18 +695,28 @@
 	 * using fixed path.
 	 */
 
-	/* SRC */
-	ret = rsnd_path_parse(priv, io, src);
+	/* SSI */
+	ret = rsnd_path_add(priv, io, ssi);
 	if (ret < 0)
 		return ret;
 
-	/* SSI */
-	ret = rsnd_path_parse(priv, io, ssi);
+	/* SRC */
+	ret = rsnd_path_add(priv, io, src);
+	if (ret < 0)
+		return ret;
+
+	/* CTU */
+	ret = rsnd_path_add(priv, io, ctu);
+	if (ret < 0)
+		return ret;
+
+	/* MIX */
+	ret = rsnd_path_add(priv, io, mix);
 	if (ret < 0)
 		return ret;
 
 	/* DVC */
-	ret = rsnd_path_parse(priv, io, dvc);
+	ret = rsnd_path_add(priv, io, dvc);
 	if (ret < 0)
 		return ret;
 
@@ -589,13 +730,15 @@
 	struct device_node *dai_node,	*dai_np;
 	struct device_node *ssi_node,	*ssi_np;
 	struct device_node *src_node,	*src_np;
+	struct device_node *ctu_node,	*ctu_np;
+	struct device_node *mix_node,	*mix_np;
 	struct device_node *dvc_node,	*dvc_np;
 	struct device_node *playback, *capture;
 	struct rsnd_dai_platform_info *dai_info;
 	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
 	struct device *dev = &pdev->dev;
 	int nr, i;
-	int dai_i, ssi_i, src_i, dvc_i;
+	int dai_i, ssi_i, src_i, ctu_i, mix_i, dvc_i;
 
 	if (!of_data)
 		return;
@@ -621,6 +764,8 @@
 
 	ssi_node = of_get_child_by_name(dev->of_node, "rcar_sound,ssi");
 	src_node = of_get_child_by_name(dev->of_node, "rcar_sound,src");
+	ctu_node = of_get_child_by_name(dev->of_node, "rcar_sound,ctu");
+	mix_node = of_get_child_by_name(dev->of_node, "rcar_sound,mix");
 	dvc_node = of_get_child_by_name(dev->of_node, "rcar_sound,dvc");
 
 #define mod_parse(name)							\
@@ -657,6 +802,8 @@
 
 			mod_parse(ssi);
 			mod_parse(src);
+			mod_parse(ctu);
+			mod_parse(mix);
 			mod_parse(dvc);
 
 			of_node_put(playback);
@@ -1033,8 +1180,8 @@
 		/*
 		 * remove SRC/DVC from DAI,
 		 */
-		rsnd_path_break(priv, io, src);
-		rsnd_path_break(priv, io, dvc);
+		rsnd_path_remove(priv, io, src);
+		rsnd_path_remove(priv, io, dvc);
 
 		/*
 		 * fallback
@@ -1069,6 +1216,8 @@
 		rsnd_dma_probe,
 		rsnd_ssi_probe,
 		rsnd_src_probe,
+		rsnd_ctu_probe,
+		rsnd_mix_probe,
 		rsnd_dvc_probe,
 		rsnd_adg_probe,
 		rsnd_dai_probe,
@@ -1164,6 +1313,8 @@
 			      struct rsnd_priv *priv) = {
 		rsnd_ssi_remove,
 		rsnd_src_remove,
+		rsnd_ctu_remove,
+		rsnd_mix_remove,
 		rsnd_dvc_remove,
 	};
 	int ret = 0, i;
diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
new file mode 100644
index 0000000..05498bb
--- /dev/null
+++ b/sound/soc/sh/rcar/ctu.c
@@ -0,0 +1,171 @@
+/*
+ * ctu.c
+ *
+ * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "rsnd.h"
+
+#define CTU_NAME_SIZE	16
+#define CTU_NAME "ctu"
+
+struct rsnd_ctu {
+	struct rsnd_ctu_platform_info *info; /* rcar_snd.h */
+	struct rsnd_mod mod;
+};
+
+#define rsnd_ctu_nr(priv) ((priv)->ctu_nr)
+#define for_each_rsnd_ctu(pos, priv, i)					\
+	for ((i) = 0;							\
+	     ((i) < rsnd_ctu_nr(priv)) &&				\
+		     ((pos) = (struct rsnd_ctu *)(priv)->ctu + i);	\
+	     i++)
+
+#define rsnd_ctu_initialize_lock(mod)	__rsnd_ctu_initialize_lock(mod, 1)
+#define rsnd_ctu_initialize_unlock(mod)	__rsnd_ctu_initialize_lock(mod, 0)
+static void __rsnd_ctu_initialize_lock(struct rsnd_mod *mod, u32 enable)
+{
+	rsnd_mod_write(mod, CTU_CTUIR, enable);
+}
+
+static int rsnd_ctu_init(struct rsnd_mod *mod,
+			 struct rsnd_dai_stream *io,
+			 struct rsnd_priv *priv)
+{
+	rsnd_mod_hw_start(mod);
+
+	rsnd_ctu_initialize_lock(mod);
+
+	rsnd_mod_write(mod, CTU_ADINR, rsnd_get_adinr_chan(mod, io));
+
+	rsnd_ctu_initialize_unlock(mod);
+
+	return 0;
+}
+
+static int rsnd_ctu_quit(struct rsnd_mod *mod,
+			 struct rsnd_dai_stream *io,
+			 struct rsnd_priv *priv)
+{
+	rsnd_mod_hw_stop(mod);
+
+	return 0;
+}
+
+static struct rsnd_mod_ops rsnd_ctu_ops = {
+	.name		= CTU_NAME,
+	.init		= rsnd_ctu_init,
+	.quit		= rsnd_ctu_quit,
+};
+
+struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id)
+{
+	if (WARN_ON(id < 0 || id >= rsnd_ctu_nr(priv)))
+		id = 0;
+
+	return &((struct rsnd_ctu *)(priv->ctu) + id)->mod;
+}
+
+static void rsnd_of_parse_ctu(struct platform_device *pdev,
+		       const struct rsnd_of_data *of_data,
+		       struct rsnd_priv *priv)
+{
+	struct device_node *node;
+	struct rsnd_ctu_platform_info *ctu_info;
+	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
+	struct device *dev = &pdev->dev;
+	int nr;
+
+	if (!of_data)
+		return;
+
+	node = of_get_child_by_name(dev->of_node, "rcar_sound,ctu");
+	if (!node)
+		return;
+
+	nr = of_get_child_count(node);
+	if (!nr)
+		goto rsnd_of_parse_ctu_end;
+
+	ctu_info = devm_kzalloc(dev,
+				sizeof(struct rsnd_ctu_platform_info) * nr,
+				GFP_KERNEL);
+	if (!ctu_info) {
+		dev_err(dev, "ctu info allocation error\n");
+		goto rsnd_of_parse_ctu_end;
+	}
+
+	info->ctu_info		= ctu_info;
+	info->ctu_info_nr	= nr;
+
+rsnd_of_parse_ctu_end:
+	of_node_put(node);
+
+}
+
+int rsnd_ctu_probe(struct platform_device *pdev,
+		   const struct rsnd_of_data *of_data,
+		   struct rsnd_priv *priv)
+{
+	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
+	struct device *dev = rsnd_priv_to_dev(priv);
+	struct rsnd_ctu *ctu;
+	struct clk *clk;
+	char name[CTU_NAME_SIZE];
+	int i, nr, ret;
+
+	/* This driver doesn't support Gen1 at this point */
+	if (rsnd_is_gen1(priv)) {
+		dev_warn(dev, "CTU is not supported on Gen1\n");
+		return -EINVAL;
+	}
+
+	rsnd_of_parse_ctu(pdev, of_data, priv);
+
+	nr = info->ctu_info_nr;
+	if (!nr)
+		return 0;
+
+	ctu = devm_kzalloc(dev, sizeof(*ctu) * nr, GFP_KERNEL);
+	if (!ctu)
+		return -ENOMEM;
+
+	priv->ctu_nr	= nr;
+	priv->ctu	= ctu;
+
+	for_each_rsnd_ctu(ctu, priv, i) {
+		/*
+		 * CTU00, CTU01, CTU02, CTU03 => CTU0
+		 * CTU10, CTU11, CTU12, CTU13 => CTU1
+		 */
+		snprintf(name, CTU_NAME_SIZE, "%s.%d",
+			 CTU_NAME, i / 4);
+
+		clk = devm_clk_get(dev, name);
+		if (IS_ERR(clk))
+			return PTR_ERR(clk);
+
+		ctu->info = &info->ctu_info[i];
+
+		ret = rsnd_mod_init(priv, &ctu->mod, &rsnd_ctu_ops,
+				    clk, RSND_MOD_CTU, i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void rsnd_ctu_remove(struct platform_device *pdev,
+		     struct rsnd_priv *priv)
+{
+	struct rsnd_ctu *ctu;
+	int i;
+
+	for_each_rsnd_ctu(ctu, priv, i) {
+		rsnd_mod_quit(&ctu->mod);
+	}
+}
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index d306e29..bfbb8a5 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -27,6 +27,15 @@
 	int dmapp_num;
 };
 
+struct rsnd_dma_ops {
+	char *name;
+	void (*start)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
+	void (*stop)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
+	int (*init)(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id,
+		    struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
+	void (*quit)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
+};
+
 #define rsnd_priv_to_dmac(p)	((struct rsnd_dma_ctrl *)(p)->dma)
 
 /*
@@ -168,7 +177,7 @@
 		dma_cap_set(DMA_SLAVE, mask);
 
 		dmaen->chan = dma_request_channel(mask, shdma_chan_filter,
-						  (void *)id);
+						  (void *)(uintptr_t)id);
 	}
 	if (IS_ERR_OR_NULL(dmaen->chan)) {
 		dmaen->chan = NULL;
@@ -182,7 +191,8 @@
 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 
-	dev_dbg(dev, "dma : %pad -> %pad\n",
+	dev_dbg(dev, "%s %pad -> %pad\n",
+		dma->ops->name,
 		&cfg.src_addr, &cfg.dst_addr);
 
 	ret = dmaengine_slave_config(dmaen->chan, &cfg);
@@ -215,6 +225,7 @@
 }
 
 static struct rsnd_dma_ops rsnd_dmaen_ops = {
+	.name	= "audmac",
 	.start	= rsnd_dmaen_start,
 	.stop	= rsnd_dmaen_stop,
 	.init	= rsnd_dmaen_init,
@@ -360,6 +371,7 @@
 }
 
 static struct rsnd_dma_ops rsnd_dmapp_ops = {
+	.name	= "audmac-pp",
 	.start	= rsnd_dmapp_start,
 	.stop	= rsnd_dmapp_stop,
 	.init	= rsnd_dmapp_init,
@@ -414,7 +426,9 @@
 	phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
 	int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod);
 	int use_src = !!rsnd_io_to_mod_src(io);
-	int use_dvc = !!rsnd_io_to_mod_dvc(io);
+	int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
+		      !!rsnd_io_to_mod_mix(io) ||
+		      !!rsnd_io_to_mod_ctu(io);
 	int id = rsnd_mod_id(mod);
 	struct dma_addr {
 		dma_addr_t out_addr;
@@ -452,7 +466,7 @@
 	};
 
 	/* it shouldn't happen */
-	if (use_dvc && !use_src)
+	if (use_cmd && !use_src)
 		dev_err(dev, "DVC is selected without SRC\n");
 
 	/* use SSIU or SSI ? */
@@ -460,8 +474,8 @@
 		is_ssi++;
 
 	return (is_from) ?
-		dma_addrs[is_ssi][is_play][use_src + use_dvc].out_addr :
-		dma_addrs[is_ssi][is_play][use_src + use_dvc].in_addr;
+		dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
+		dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
 }
 
 static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
@@ -482,7 +496,7 @@
 	return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
 }
 
-#define MOD_MAX 4 /* MEM/SSI/SRC/DVC */
+#define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
 static void rsnd_dma_of_path(struct rsnd_dma *dma,
 			     struct rsnd_dai_stream *io,
 			     int is_play,
@@ -492,55 +506,81 @@
 	struct rsnd_mod *this = rsnd_dma_to_mod(dma);
 	struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
 	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
+	struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
+	struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
 	struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
 	struct rsnd_mod *mod[MOD_MAX];
-	int i, index;
+	struct rsnd_mod *mod_start, *mod_end;
+	struct rsnd_priv *priv = rsnd_mod_to_priv(this);
+	struct device *dev = rsnd_priv_to_dev(priv);
+	int nr, i;
 
+	if (!ssi)
+		return;
 
-	for (i = 0; i < MOD_MAX; i++)
+	nr = 0;
+	for (i = 0; i < MOD_MAX; i++) {
 		mod[i] = NULL;
-
-	/*
-	 * in play case...
-	 *
-	 * src -> dst
-	 *
-	 * mem -> SSI
-	 * mem -> SRC -> SSI
-	 * mem -> SRC -> DVC -> SSI
-	 */
-	mod[0] = NULL; /* for "mem" */
-	index = 1;
-	for (i = 1; i < MOD_MAX; i++) {
-		if (!src) {
-			mod[i] = ssi;
-		} else if (!dvc) {
-			mod[i] = src;
-			src = NULL;
-		} else {
-			if ((!is_play) && (this == src))
-				this = dvc;
-
-			mod[i] = (is_play) ? src : dvc;
-			i++;
-			mod[i] = (is_play) ? dvc : src;
-			src = NULL;
-			dvc = NULL;
-		}
-
-		if (mod[i] == this)
-			index = i;
-
-		if (mod[i] == ssi)
-			break;
+		nr += !!rsnd_io_to_mod(io, i);
 	}
 
-	if (is_play) {
-		*mod_from = mod[index - 1];
-		*mod_to   = mod[index];
+	/*
+	 * [S] -*-> [E]
+	 * [S] -*-> SRC -o-> [E]
+	 * [S] -*-> SRC -> DVC -o-> [E]
+	 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
+	 *
+	 * playback	[S] = mem
+	 *		[E] = SSI
+	 *
+	 * capture	[S] = SSI
+	 *		[E] = mem
+	 *
+	 * -*->		Audio DMAC
+	 * -o->		Audio DMAC peri peri
+	 */
+	mod_start	= (is_play) ? NULL : ssi;
+	mod_end		= (is_play) ? ssi  : NULL;
+
+	mod[0] = mod_start;
+	for (i = 1; i < nr; i++) {
+		if (src) {
+			mod[i] = src;
+			src = NULL;
+		} else if (ctu) {
+			mod[i] = ctu;
+			ctu = NULL;
+		} else if (mix) {
+			mod[i] = mix;
+			mix = NULL;
+		} else if (dvc) {
+			mod[i] = dvc;
+			dvc = NULL;
+		}
+	}
+	mod[i] = mod_end;
+
+	/*
+	 *		| SSI | SRC |
+	 * -------------+-----+-----+
+	 *  is_play	|  o  |  *  |
+	 * !is_play	|  *  |  o  |
+	 */
+	if ((this == ssi) == (is_play)) {
+		*mod_from	= mod[nr - 1];
+		*mod_to		= mod[nr];
 	} else {
-		*mod_from = mod[index];
-		*mod_to   = mod[index - 1];
+		*mod_from	= mod[0];
+		*mod_to		= mod[1];
+	}
+
+	dev_dbg(dev, "module connection (this is %s[%d])\n",
+		rsnd_mod_name(this), rsnd_mod_id(this));
+	for (i = 0; i <= nr; i++) {
+		dev_dbg(dev, "  %s[%d]%s\n",
+		       rsnd_mod_name(mod[i]), rsnd_mod_id(mod[i]),
+		       (mod[i] == *mod_from) ? " from" :
+		       (mod[i] == *mod_to)   ? " to" : "");
 	}
 }
 
@@ -568,10 +608,11 @@
 
 int rsnd_dma_init(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id)
 {
-	struct rsnd_mod *mod_from;
-	struct rsnd_mod *mod_to;
+	struct rsnd_mod *mod_from = NULL;
+	struct rsnd_mod *mod_to = NULL;
 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
+	struct device *dev = rsnd_priv_to_dev(priv);
 	int is_play = rsnd_io_is_play(io);
 
 	/*
@@ -598,6 +639,11 @@
 	if (rsnd_is_gen1(priv))
 		dma->ops = &rsnd_dmaen_ops;
 
+	dev_dbg(dev, "%s %s[%d] -> %s[%d]\n",
+		dma->ops->name,
+		rsnd_mod_name(mod_from), rsnd_mod_id(mod_from),
+		rsnd_mod_name(mod_to),   rsnd_mod_id(mod_to));
+
 	return dma->ops->init(io, dma, id, mod_from, mod_to);
 }
 
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index 36fc020..5779638 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -24,6 +24,7 @@
 	struct rsnd_kctrl_cfg_s rdown;	/* Ramp Rate Down */
 };
 
+#define rsnd_dvc_nr(priv) ((priv)->dvc_nr)
 #define rsnd_dvc_of_node(priv) \
 	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,dvc")
 
@@ -63,6 +64,19 @@
 	"0.125 dB/8192 steps",	 /* 10111 */
 };
 
+static void rsnd_dvc_soft_reset(struct rsnd_mod *mod)
+{
+	rsnd_mod_write(mod, DVC_SWRSR, 0);
+	rsnd_mod_write(mod, DVC_SWRSR, 1);
+}
+
+#define rsnd_dvc_initialize_lock(mod)	__rsnd_dvc_initialize_lock(mod, 1)
+#define rsnd_dvc_initialize_unlock(mod)	__rsnd_dvc_initialize_lock(mod, 0)
+static void __rsnd_dvc_initialize_lock(struct rsnd_mod *mod, u32 enable)
+{
+	rsnd_mod_write(mod, DVC_DVUIR, enable);
+}
+
 static void rsnd_dvc_volume_update(struct rsnd_dai_stream *io,
 				   struct rsnd_mod *mod)
 {
@@ -135,49 +149,24 @@
 	return 0;
 }
 
-static int rsnd_dvc_init(struct rsnd_mod *dvc_mod,
+static int rsnd_dvc_init(struct rsnd_mod *mod,
 			 struct rsnd_dai_stream *io,
 			 struct rsnd_priv *priv)
 {
-	struct rsnd_mod *src_mod = rsnd_io_to_mod_src(io);
-	struct device *dev = rsnd_priv_to_dev(priv);
-	int dvc_id = rsnd_mod_id(dvc_mod);
-	int src_id = rsnd_mod_id(src_mod);
-	u32 route[] = {
-		[0] = 0x30000,
-		[1] = 0x30001,
-		[2] = 0x40000,
-		[3] = 0x10000,
-		[4] = 0x20000,
-		[5] = 0x40100
-	};
+	rsnd_mod_hw_start(mod);
 
-	if (src_id >= ARRAY_SIZE(route)) {
-		dev_err(dev, "DVC%d isn't connected to SRC%d\n", dvc_id, src_id);
-		return -EINVAL;
-	}
+	rsnd_dvc_soft_reset(mod);
 
-	rsnd_mod_hw_start(dvc_mod);
+	rsnd_dvc_initialize_lock(mod);
 
-	/*
-	 * fixme
-	 * it doesn't support CTU/MIX
-	 */
-	rsnd_mod_write(dvc_mod, CMD_ROUTE_SLCT, route[src_id]);
+	rsnd_path_parse(priv, io);
 
-	rsnd_mod_write(dvc_mod, DVC_SWRSR, 0);
-	rsnd_mod_write(dvc_mod, DVC_SWRSR, 1);
-
-	rsnd_mod_write(dvc_mod, DVC_DVUIR, 1);
-
-	rsnd_mod_write(dvc_mod, DVC_ADINR, rsnd_get_adinr(dvc_mod, io));
+	rsnd_mod_write(mod, DVC_ADINR, rsnd_get_adinr_bit(mod, io));
 
 	/* ch0/ch1 Volume */
-	rsnd_dvc_volume_update(io, dvc_mod);
+	rsnd_dvc_volume_update(io, mod);
 
-	rsnd_mod_write(dvc_mod, DVC_DVUIR, 0);
-
-	rsnd_adg_set_cmd_timsel_gen2(dvc_mod, io);
+	rsnd_adg_set_cmd_timsel_gen2(mod, io);
 
 	return 0;
 }
@@ -195,6 +184,8 @@
 			  struct rsnd_dai_stream *io,
 			  struct rsnd_priv *priv)
 {
+	rsnd_dvc_initialize_unlock(mod);
+
 	rsnd_mod_write(mod, CMD_CTRL, 0x10);
 
 	return 0;
@@ -341,23 +332,21 @@
 	char name[RSND_DVC_NAME_SIZE];
 	int i, nr, ret;
 
-	rsnd_of_parse_dvc(pdev, of_data, priv);
-
-	nr = info->dvc_info_nr;
-	if (!nr)
-		return 0;
-
 	/* This driver doesn't support Gen1 at this point */
 	if (rsnd_is_gen1(priv)) {
 		dev_warn(dev, "CMD is not supported on Gen1\n");
 		return -EINVAL;
 	}
 
+	rsnd_of_parse_dvc(pdev, of_data, priv);
+
+	nr = info->dvc_info_nr;
+	if (!nr)
+		return 0;
+
 	dvc	= devm_kzalloc(dev, sizeof(*dvc) * nr, GFP_KERNEL);
-	if (!dvc) {
-		dev_err(dev, "CMD allocate failed\n");
+	if (!dvc)
 		return -ENOMEM;
-	}
 
 	priv->dvc_nr	= nr;
 	priv->dvc	= dvc;
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 8c7dc51..f04d17b 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -103,6 +103,22 @@
 	regmap_fields_write(gen->regs[reg], rsnd_mod_id(mod), data);
 }
 
+void rsnd_force_write(struct rsnd_priv *priv,
+		      struct rsnd_mod *mod,
+		      enum rsnd_reg reg, u32 data)
+{
+	struct device *dev = rsnd_priv_to_dev(priv);
+	struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+	if (!rsnd_is_accessible_reg(priv, gen, reg))
+		return;
+
+	dev_dbg(dev, "w %s[%d] - %4d : %08x\n",
+		rsnd_mod_name(mod), rsnd_mod_id(mod), reg, data);
+
+	regmap_fields_force_write(gen->regs[reg], rsnd_mod_id(mod), data);
+}
+
 void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
 	       enum rsnd_reg reg, u32 mask, u32 data)
 {
@@ -200,12 +216,13 @@
 		/* FIXME: it needs SSI_MODE2/3 in the future */
 		RSND_GEN_M_REG(SSI_BUSIF_MODE,	0x0,	0x80),
 		RSND_GEN_M_REG(SSI_BUSIF_ADINR,	0x4,	0x80),
-		RSND_GEN_M_REG(BUSIF_DALIGN,	0x8,	0x80),
+		RSND_GEN_M_REG(SSI_BUSIF_DALIGN,0x8,	0x80),
 		RSND_GEN_M_REG(SSI_CTRL,	0x10,	0x80),
-		RSND_GEN_M_REG(INT_ENABLE,	0x18,	0x80),
+		RSND_GEN_M_REG(SSI_INT_ENABLE,	0x18,	0x80),
 	};
 	struct rsnd_regmap_field_conf conf_scu[] = {
 		RSND_GEN_M_REG(SRC_BUSIF_MODE,	0x0,	0x20),
+		RSND_GEN_M_REG(SRC_BUSIF_DALIGN,0x8,	0x20),
 		RSND_GEN_M_REG(SRC_ROUTE_MODE0,	0xc,	0x20),
 		RSND_GEN_M_REG(SRC_CTRL,	0x10,	0x20),
 		RSND_GEN_M_REG(SRC_INT_ENABLE0,	0x18,	0x20),
@@ -223,6 +240,18 @@
 		RSND_GEN_M_REG(SRC_SRCCR,	0x224,	0x40),
 		RSND_GEN_M_REG(SRC_BSDSR,	0x22c,	0x40),
 		RSND_GEN_M_REG(SRC_BSISR,	0x238,	0x40),
+		RSND_GEN_M_REG(CTU_CTUIR,	0x504,	0x100),
+		RSND_GEN_M_REG(CTU_ADINR,	0x508,	0x100),
+		RSND_GEN_M_REG(MIX_SWRSR,	0xd00,	0x40),
+		RSND_GEN_M_REG(MIX_MIXIR,	0xd04,	0x40),
+		RSND_GEN_M_REG(MIX_ADINR,	0xd08,	0x40),
+		RSND_GEN_M_REG(MIX_MIXMR,	0xd10,	0x40),
+		RSND_GEN_M_REG(MIX_MVPDR,	0xd14,	0x40),
+		RSND_GEN_M_REG(MIX_MDBAR,	0xd18,	0x40),
+		RSND_GEN_M_REG(MIX_MDBBR,	0xd1c,	0x40),
+		RSND_GEN_M_REG(MIX_MDBCR,	0xd20,	0x40),
+		RSND_GEN_M_REG(MIX_MDBDR,	0xd24,	0x40),
+		RSND_GEN_M_REG(MIX_MDBER,	0xd28,	0x40),
 		RSND_GEN_M_REG(DVC_SWRSR,	0xe00,	0x100),
 		RSND_GEN_M_REG(DVC_DVUIR,	0xe04,	0x100),
 		RSND_GEN_M_REG(DVC_ADINR,	0xe08,	0x100),
diff --git a/sound/soc/sh/rcar/mix.c b/sound/soc/sh/rcar/mix.c
new file mode 100644
index 0000000..0d5c102
--- /dev/null
+++ b/sound/soc/sh/rcar/mix.c
@@ -0,0 +1,200 @@
+/*
+ * mix.c
+ *
+ * Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "rsnd.h"
+
+#define MIX_NAME_SIZE	16
+#define MIX_NAME "mix"
+
+struct rsnd_mix {
+	struct rsnd_mix_platform_info *info; /* rcar_snd.h */
+	struct rsnd_mod mod;
+};
+
+#define rsnd_mix_nr(priv) ((priv)->mix_nr)
+#define for_each_rsnd_mix(pos, priv, i)					\
+	for ((i) = 0;							\
+	     ((i) < rsnd_mix_nr(priv)) &&				\
+		     ((pos) = (struct rsnd_mix *)(priv)->mix + i);	\
+	     i++)
+
+
+static void rsnd_mix_soft_reset(struct rsnd_mod *mod)
+{
+	rsnd_mod_write(mod, MIX_SWRSR, 0);
+	rsnd_mod_write(mod, MIX_SWRSR, 1);
+}
+
+#define rsnd_mix_initialize_lock(mod)	__rsnd_mix_initialize_lock(mod, 1)
+#define rsnd_mix_initialize_unlock(mod)	__rsnd_mix_initialize_lock(mod, 0)
+static void __rsnd_mix_initialize_lock(struct rsnd_mod *mod, u32 enable)
+{
+	rsnd_mod_write(mod, MIX_MIXIR, enable);
+}
+
+static void rsnd_mix_volume_update(struct rsnd_dai_stream *io,
+				  struct rsnd_mod *mod)
+{
+
+	/* Disable MIX dB setting */
+	rsnd_mod_write(mod, MIX_MDBER, 0);
+
+	rsnd_mod_write(mod, MIX_MDBAR, 0);
+	rsnd_mod_write(mod, MIX_MDBBR, 0);
+	rsnd_mod_write(mod, MIX_MDBCR, 0);
+	rsnd_mod_write(mod, MIX_MDBDR, 0);
+
+	/* Enable MIX dB setting */
+	rsnd_mod_write(mod, MIX_MDBER, 1);
+}
+
+static int rsnd_mix_init(struct rsnd_mod *mod,
+			 struct rsnd_dai_stream *io,
+			 struct rsnd_priv *priv)
+{
+	rsnd_mod_hw_start(mod);
+
+	rsnd_mix_soft_reset(mod);
+
+	rsnd_mix_initialize_lock(mod);
+
+	rsnd_mod_write(mod, MIX_ADINR, rsnd_get_adinr_chan(mod, io));
+
+	rsnd_path_parse(priv, io);
+
+	/* volume step */
+	rsnd_mod_write(mod, MIX_MIXMR, 0);
+	rsnd_mod_write(mod, MIX_MVPDR, 0);
+
+	rsnd_mix_volume_update(io, mod);
+
+	rsnd_mix_initialize_unlock(mod);
+
+	return 0;
+}
+
+static int rsnd_mix_quit(struct rsnd_mod *mod,
+			 struct rsnd_dai_stream *io,
+			 struct rsnd_priv *priv)
+{
+	rsnd_mod_hw_stop(mod);
+
+	return 0;
+}
+
+static struct rsnd_mod_ops rsnd_mix_ops = {
+	.name		= MIX_NAME,
+	.init		= rsnd_mix_init,
+	.quit		= rsnd_mix_quit,
+};
+
+struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id)
+{
+	if (WARN_ON(id < 0 || id >= rsnd_mix_nr(priv)))
+		id = 0;
+
+	return &((struct rsnd_mix *)(priv->mix) + id)->mod;
+}
+
+static void rsnd_of_parse_mix(struct platform_device *pdev,
+			      const struct rsnd_of_data *of_data,
+			      struct rsnd_priv *priv)
+{
+	struct device_node *node;
+	struct rsnd_mix_platform_info *mix_info;
+	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
+	struct device *dev = &pdev->dev;
+	int nr;
+
+	if (!of_data)
+		return;
+
+	node = of_get_child_by_name(dev->of_node, "rcar_sound,mix");
+	if (!node)
+		return;
+
+	nr = of_get_child_count(node);
+	if (!nr)
+		goto rsnd_of_parse_mix_end;
+
+	mix_info = devm_kzalloc(dev,
+				sizeof(struct rsnd_mix_platform_info) * nr,
+				GFP_KERNEL);
+	if (!mix_info) {
+		dev_err(dev, "mix info allocation error\n");
+		goto rsnd_of_parse_mix_end;
+	}
+
+	info->mix_info		= mix_info;
+	info->mix_info_nr	= nr;
+
+rsnd_of_parse_mix_end:
+	of_node_put(node);
+
+}
+
+int rsnd_mix_probe(struct platform_device *pdev,
+		   const struct rsnd_of_data *of_data,
+		   struct rsnd_priv *priv)
+{
+	struct rcar_snd_info *info = rsnd_priv_to_info(priv);
+	struct device *dev = rsnd_priv_to_dev(priv);
+	struct rsnd_mix *mix;
+	struct clk *clk;
+	char name[MIX_NAME_SIZE];
+	int i, nr, ret;
+
+	/* This driver doesn't support Gen1 at this point */
+	if (rsnd_is_gen1(priv)) {
+		dev_warn(dev, "MIX is not supported on Gen1\n");
+		return -EINVAL;
+	}
+
+	rsnd_of_parse_mix(pdev, of_data, priv);
+
+	nr = info->mix_info_nr;
+	if (!nr)
+		return 0;
+
+	mix	= devm_kzalloc(dev, sizeof(*mix) * nr, GFP_KERNEL);
+	if (!mix)
+		return -ENOMEM;
+
+	priv->mix_nr	= nr;
+	priv->mix	= mix;
+
+	for_each_rsnd_mix(mix, priv, i) {
+		snprintf(name, MIX_NAME_SIZE, "%s.%d",
+			 MIX_NAME, i);
+
+		clk = devm_clk_get(dev, name);
+		if (IS_ERR(clk))
+			return PTR_ERR(clk);
+
+		mix->info = &info->mix_info[i];
+
+		ret = rsnd_mod_init(priv, &mix->mod, &rsnd_mix_ops,
+				    clk, RSND_MOD_MIX, i);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+void rsnd_mix_remove(struct platform_device *pdev,
+		     struct rsnd_priv *priv)
+{
+	struct rsnd_mix *mix;
+	int i;
+
+	for_each_rsnd_mix(mix, priv, i) {
+		rsnd_mod_quit(&mix->mod);
+	}
+}
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 09fcc54..7a0e52b 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -47,6 +47,18 @@
 	RSND_REG_SCU_SYS_STATUS0,
 	RSND_REG_SCU_SYS_INT_EN0,
 	RSND_REG_CMD_ROUTE_SLCT,
+	RSND_REG_CTU_CTUIR,
+	RSND_REG_CTU_ADINR,
+	RSND_REG_MIX_SWRSR,
+	RSND_REG_MIX_MIXIR,
+	RSND_REG_MIX_ADINR,
+	RSND_REG_MIX_MIXMR,
+	RSND_REG_MIX_MVPDR,
+	RSND_REG_MIX_MDBAR,
+	RSND_REG_MIX_MDBBR,
+	RSND_REG_MIX_MDBCR,
+	RSND_REG_MIX_MDBDR,
+	RSND_REG_MIX_MDBER,
 	RSND_REG_DVC_SWRSR,
 	RSND_REG_DVC_DVUIR,
 	RSND_REG_DVC_ADINR,
@@ -99,6 +111,7 @@
 	RSND_REG_SHARE26,
 	RSND_REG_SHARE27,
 	RSND_REG_SHARE28,
+	RSND_REG_SHARE29,
 
 	RSND_REG_MAX,
 };
@@ -119,7 +132,7 @@
 #define RSND_REG_SSI_CTRL		RSND_REG_SHARE02
 #define RSND_REG_SSI_BUSIF_MODE		RSND_REG_SHARE03
 #define RSND_REG_SSI_BUSIF_ADINR	RSND_REG_SHARE04
-#define RSND_REG_INT_ENABLE		RSND_REG_SHARE05
+#define RSND_REG_SSI_INT_ENABLE		RSND_REG_SHARE05
 #define RSND_REG_SRC_BSDSR		RSND_REG_SHARE06
 #define RSND_REG_SRC_BSISR		RSND_REG_SHARE07
 #define RSND_REG_DIV_EN			RSND_REG_SHARE08
@@ -136,13 +149,14 @@
 #define RSND_REG_AUDIO_CLK_SEL2		RSND_REG_SHARE19
 #define RSND_REG_CMD_CTRL		RSND_REG_SHARE20
 #define RSND_REG_CMDOUT_TIMSEL		RSND_REG_SHARE21
-#define RSND_REG_BUSIF_DALIGN		RSND_REG_SHARE22
+#define RSND_REG_SSI_BUSIF_DALIGN	RSND_REG_SHARE22
 #define RSND_REG_DVC_VRCTR		RSND_REG_SHARE23
 #define RSND_REG_DVC_VRPDR		RSND_REG_SHARE24
 #define RSND_REG_DVC_VRDBR		RSND_REG_SHARE25
 #define RSND_REG_SCU_SYS_STATUS1	RSND_REG_SHARE26
 #define RSND_REG_SCU_SYS_INT_EN1	RSND_REG_SHARE27
 #define RSND_REG_SRC_INT_ENABLE0	RSND_REG_SHARE28
+#define RSND_REG_SRC_BUSIF_DALIGN	RSND_REG_SHARE29
 
 struct rsnd_of_data;
 struct rsnd_priv;
@@ -157,27 +171,28 @@
 	rsnd_read(rsnd_mod_to_priv(m), m, RSND_REG_##r)
 #define rsnd_mod_write(m, r, d) \
 	rsnd_write(rsnd_mod_to_priv(m), m, RSND_REG_##r, d)
+#define rsnd_mod_force_write(m, r, d) \
+	rsnd_force_write(rsnd_mod_to_priv(m), m, RSND_REG_##r, d)
 #define rsnd_mod_bset(m, r, s, d) \
 	rsnd_bset(rsnd_mod_to_priv(m), m, RSND_REG_##r, s, d)
 
 u32 rsnd_read(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg);
 void rsnd_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
 		enum rsnd_reg reg, u32 data);
+void rsnd_force_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
+		enum rsnd_reg reg, u32 data);
 void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
 		    u32 mask, u32 data);
-u32 rsnd_get_adinr(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_adinr_chan(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+void rsnd_path_parse(struct rsnd_priv *priv,
+		     struct rsnd_dai_stream *io);
 
 /*
  *	R-Car DMA
  */
 struct rsnd_dma;
-struct rsnd_dma_ops {
-	void (*start)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
-	void (*stop)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
-	int (*init)(struct rsnd_dai_stream *io, struct rsnd_dma *dma, int id,
-		    struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
-	void (*quit)(struct rsnd_dai_stream *io, struct rsnd_dma *dma);
-};
 
 struct rsnd_dmaen {
 	struct dma_chan		*chan;
@@ -217,6 +232,8 @@
  */
 enum rsnd_mod_type {
 	RSND_MOD_DVC = 0,
+	RSND_MOD_MIX,
+	RSND_MOD_CTU,
 	RSND_MOD_SRC,
 	RSND_MOD_SSI,
 	RSND_MOD_MAX,
@@ -312,7 +329,7 @@
 
 #define rsnd_mod_to_priv(mod) ((mod)->priv)
 #define rsnd_mod_to_dma(mod) (&(mod)->dma)
-#define rsnd_mod_id(mod) ((mod)->id)
+#define rsnd_mod_id(mod) ((mod) ? (mod)->id : -1)
 #define rsnd_mod_hw_start(mod)	clk_enable((mod)->clk)
 #define rsnd_mod_hw_stop(mod)	clk_disable((mod)->clk)
 
@@ -345,9 +362,12 @@
 	int byte_per_period;
 	int next_period_byte;
 };
-#define rsnd_io_to_mod_ssi(io)	((io)->mod[RSND_MOD_SSI])
-#define rsnd_io_to_mod_src(io)	((io)->mod[RSND_MOD_SRC])
-#define rsnd_io_to_mod_dvc(io)	((io)->mod[RSND_MOD_DVC])
+#define rsnd_io_to_mod(io, i)	((i) < RSND_MOD_MAX ? (io)->mod[(i)] : NULL)
+#define rsnd_io_to_mod_ssi(io)	rsnd_io_to_mod((io), RSND_MOD_SSI)
+#define rsnd_io_to_mod_src(io)	rsnd_io_to_mod((io), RSND_MOD_SRC)
+#define rsnd_io_to_mod_ctu(io)	rsnd_io_to_mod((io), RSND_MOD_CTU)
+#define rsnd_io_to_mod_mix(io)	rsnd_io_to_mod((io), RSND_MOD_MIX)
+#define rsnd_io_to_mod_dvc(io)	rsnd_io_to_mod((io), RSND_MOD_DVC)
 #define rsnd_io_to_rdai(io)	((io)->rdai)
 #define rsnd_io_to_priv(io)	(rsnd_rdai_to_priv(rsnd_io_to_rdai(io)))
 #define rsnd_io_is_play(io)	(&rsnd_io_to_rdai(io)->playback == io)
@@ -437,12 +457,6 @@
 	void *gen;
 
 	/*
-	 * below value will be filled on rsnd_src_probe()
-	 */
-	void *src;
-	int src_nr;
-
-	/*
 	 * below value will be filled on rsnd_adg_probe()
 	 */
 	void *adg;
@@ -459,6 +473,24 @@
 	int ssi_nr;
 
 	/*
+	 * below value will be filled on rsnd_src_probe()
+	 */
+	void *src;
+	int src_nr;
+
+	/*
+	 * below value will be filled on rsnd_ctu_probe()
+	 */
+	void *ctu;
+	int ctu_nr;
+
+	/*
+	 * below value will be filled on rsnd_mix_probe()
+	 */
+	void *mix;
+	int mix_nr;
+
+	/*
 	 * below value will be filled on rsnd_dvc_probe()
 	 */
 	void *dvc;
@@ -531,6 +563,19 @@
 		     u32 max);
 
 /*
+ *	R-Car SSI
+ */
+int rsnd_ssi_probe(struct platform_device *pdev,
+		   const struct rsnd_of_data *of_data,
+		   struct rsnd_priv *priv);
+void rsnd_ssi_remove(struct platform_device *pdev,
+		     struct rsnd_priv *priv);
+struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
+int rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
+int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
+int rsnd_ssi_use_busif(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
+
+/*
  *	R-Car SRC
  */
 int rsnd_src_probe(struct platform_device *pdev,
@@ -550,20 +595,27 @@
 int rsnd_src_ssi_irq_enable(struct rsnd_mod *ssi_mod);
 int rsnd_src_ssi_irq_disable(struct rsnd_mod *ssi_mod);
 
-#define rsnd_src_nr(priv) ((priv)->src_nr)
-
 /*
- *	R-Car SSI
+ *	R-Car CTU
  */
-int rsnd_ssi_probe(struct platform_device *pdev,
+int rsnd_ctu_probe(struct platform_device *pdev,
 		   const struct rsnd_of_data *of_data,
 		   struct rsnd_priv *priv);
-void rsnd_ssi_remove(struct platform_device *pdev,
+
+void rsnd_ctu_remove(struct platform_device *pdev,
 		     struct rsnd_priv *priv);
-struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
-int rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
-int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
-int rsnd_ssi_use_busif(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
+struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id);
+
+/*
+ *	R-Car MIX
+ */
+int rsnd_mix_probe(struct platform_device *pdev,
+		   const struct rsnd_of_data *of_data,
+		   struct rsnd_priv *priv);
+
+void rsnd_mix_remove(struct platform_device *pdev,
+		     struct rsnd_priv *priv);
+struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id);
 
 /*
  *	R-Car DVC
@@ -575,7 +627,4 @@
 		     struct rsnd_priv *priv);
 struct rsnd_mod *rsnd_dvc_mod_get(struct rsnd_priv *priv, int id);
 
-#define rsnd_dvc_nr(priv) ((priv)->dvc_nr)
-
-
 #endif
diff --git a/sound/soc/sh/rcar/rsrc-card.c b/sound/soc/sh/rcar/rsrc-card.c
index 84e9357..d61db9c 100644
--- a/sound/soc/sh/rcar/rsrc-card.c
+++ b/sound/soc/sh/rcar/rsrc-card.c
@@ -41,6 +41,7 @@
 static const struct of_device_id rsrc_card_of_match[] = {
 	{ .compatible = "renesas,rsrc-card,lager",	.data = &routes_of_ssi0_ak4642 },
 	{ .compatible = "renesas,rsrc-card,koelsch",	.data = &routes_of_ssi0_ak4642 },
+	{ .compatible = "renesas,rsrc-card", },
 	{},
 };
 MODULE_DEVICE_TABLE(of, rsrc_card_of_match);
@@ -242,8 +243,15 @@
 		snd_soc_of_get_dai_name(np, &dai_link->codec_dai_name);
 
 		/* additional name prefix */
-		priv->codec_conf.of_node	= dai_link->codec_of_node;
-		priv->codec_conf.name_prefix	= of_data->prefix;
+		if (of_data) {
+			priv->codec_conf.of_node = dai_link->codec_of_node;
+			priv->codec_conf.name_prefix = of_data->prefix;
+		} else {
+			snd_soc_of_parse_audio_prefix(&priv->snd_card,
+						      &priv->codec_conf,
+						      dai_link->codec_of_node,
+						      "audio-prefix");
+		}
 
 		/* set dai_name */
 		snprintf(dai_props->dai_name, DAI_NAME_NUM, "be.%s",
@@ -361,8 +369,14 @@
 	priv->snd_card.num_links		= num;
 	priv->snd_card.codec_conf		= &priv->codec_conf;
 	priv->snd_card.num_configs		= 1;
-	priv->snd_card.of_dapm_routes		= of_data->routes;
-	priv->snd_card.num_of_dapm_routes	= of_data->num_routes;
+
+	if (of_data) {
+		priv->snd_card.of_dapm_routes		= of_data->routes;
+		priv->snd_card.num_of_dapm_routes	= of_data->num_routes;
+	} else {
+		snd_soc_of_parse_audio_routing(&priv->snd_card,
+					       "audio-routing");
+	}
 
 	/* Parse the card name from DT */
 	snd_soc_of_parse_card_name(&priv->snd_card, "card-name");
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index c61c171..89a18e1 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -30,6 +30,7 @@
 
 #define RSND_SRC_NAME_SIZE 16
 
+#define rsnd_src_nr(priv) ((priv)->src_nr)
 #define rsnd_enable_sync_convert(src) ((src)->sen.val)
 #define rsnd_src_of_node(priv) \
 	of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, "rcar_sound,src")
@@ -117,6 +118,20 @@
 /*
  *		Gen1/Gen2 common functions
  */
+static void rsnd_src_soft_reset(struct rsnd_mod *mod)
+{
+	rsnd_mod_write(mod, SRC_SWRSR, 0);
+	rsnd_mod_write(mod, SRC_SWRSR, 1);
+}
+
+
+#define rsnd_src_initialize_lock(mod)	__rsnd_src_initialize_lock(mod, 1)
+#define rsnd_src_initialize_unlock(mod)	__rsnd_src_initialize_lock(mod, 0)
+static void __rsnd_src_initialize_lock(struct rsnd_mod *mod, u32 enable)
+{
+	rsnd_mod_write(mod, SRC_SRCIR, enable);
+}
+
 static struct dma_chan *rsnd_src_dma_req(struct rsnd_dai_stream *io,
 					 struct rsnd_mod *mod)
 {
@@ -133,7 +148,6 @@
 			int use_busif)
 {
 	struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
-	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
 	int ssi_id = rsnd_mod_id(ssi_mod);
 
 	/*
@@ -170,27 +184,14 @@
 	 * DMA settings for SSIU
 	 */
 	if (use_busif) {
-		u32 val = 0x76543210;
-		u32 mask = ~0;
+		u32 val = rsnd_get_dalign(ssi_mod, io);
 
 		rsnd_mod_write(ssi_mod, SSI_BUSIF_ADINR,
-			       rsnd_get_adinr(ssi_mod, io));
+			       rsnd_get_adinr_bit(ssi_mod, io));
 		rsnd_mod_write(ssi_mod, SSI_BUSIF_MODE,  1);
 		rsnd_mod_write(ssi_mod, SSI_CTRL, 0x1);
 
-		mask <<= runtime->channels * 4;
-		val = val & mask;
-
-		switch (runtime->sample_bits) {
-		case 16:
-			val |= 0x67452301 & ~mask;
-			break;
-		case 32:
-			val |= 0x76543210 & ~mask;
-			break;
-		}
-		rsnd_mod_write(ssi_mod, BUSIF_DALIGN, val);
-
+		rsnd_mod_write(ssi_mod, SSI_BUSIF_DALIGN, val);
 	}
 
 	return 0;
@@ -215,10 +216,9 @@
 		return 0;
 
 	/* enable SSI interrupt if Gen2 */
-	if (rsnd_ssi_is_dma_mode(ssi_mod))
-		rsnd_mod_write(ssi_mod, INT_ENABLE, 0x0e000000);
-	else
-		rsnd_mod_write(ssi_mod, INT_ENABLE, 0x0f000000);
+	rsnd_mod_write(ssi_mod, SSI_INT_ENABLE,
+		       rsnd_ssi_is_dma_mode(ssi_mod) ?
+		       0x0e000000 : 0x0f000000);
 
 	return 0;
 }
@@ -231,7 +231,7 @@
 		return 0;
 
 	/* disable SSI interrupt if Gen2 */
-	rsnd_mod_write(ssi_mod, INT_ENABLE, 0x00000000);
+	rsnd_mod_write(ssi_mod, SSI_INT_ENABLE, 0x00000000);
 
 	return 0;
 }
@@ -294,12 +294,8 @@
 	if (convert_rate)
 		fsrate = 0x0400000 / convert_rate * runtime->rate;
 
-	/* set/clear soft reset */
-	rsnd_mod_write(mod, SRC_SWRSR, 0);
-	rsnd_mod_write(mod, SRC_SWRSR, 1);
-
 	/* Set channel number and output bit length */
-	rsnd_mod_write(mod, SRC_ADINR, rsnd_get_adinr(mod, io));
+	rsnd_mod_write(mod, SRC_ADINR, rsnd_get_adinr_bit(mod, io));
 
 	/* Enable the initial value of IFS */
 	if (fsrate) {
@@ -358,17 +354,15 @@
 
 	rsnd_mod_hw_start(mod);
 
+	rsnd_src_soft_reset(mod);
+
+	rsnd_src_initialize_lock(mod);
+
 	src->err = 0;
 
 	/* reset sync convert_rate */
 	src->sync.val = 0;
 
-	/*
-	 * Initialize the operation of the SRC internal circuits
-	 * see rsnd_src_start()
-	 */
-	rsnd_mod_write(mod, SRC_SRCIR, 1);
-
 	return 0;
 }
 
@@ -395,11 +389,7 @@
 
 static int rsnd_src_start(struct rsnd_mod *mod)
 {
-	/*
-	 * Cancel the initialization and operate the SRC function
-	 * see rsnd_src_init()
-	 */
-	rsnd_mod_write(mod, SRC_SRCIR, 0);
+	rsnd_src_initialize_unlock(mod);
 
 	return 0;
 }
@@ -617,6 +607,14 @@
 		int_val = 0;
 	}
 
+	/*
+	 * WORKAROUND
+	 *
+	 * ignore over flow error when rsnd_enable_sync_convert()
+	 */
+	if (rsnd_enable_sync_convert(src))
+		sys_int_val = sys_int_val & 0xffff;
+
 	rsnd_mod_write(mod, SRC_INT_ENABLE0, int_val);
 	rsnd_mod_bset(mod, SCU_SYS_INT_EN0, sys_int_mask, sys_int_val);
 	rsnd_mod_bset(mod, SCU_SYS_INT_EN1, sys_int_mask, sys_int_val);
@@ -632,11 +630,22 @@
 
 static bool rsnd_src_error_record_gen2(struct rsnd_mod *mod)
 {
-	u32 val = OUF_SRC(rsnd_mod_id(mod));
+	struct rsnd_src *src = rsnd_mod_to_src(mod);
+	u32 val0, val1;
 	bool ret = false;
 
-	if ((rsnd_mod_read(mod, SCU_SYS_STATUS0) & val) ||
-	    (rsnd_mod_read(mod, SCU_SYS_STATUS1) & val)) {
+	val0 = val1 = OUF_SRC(rsnd_mod_id(mod));
+
+	/*
+	 * WORKAROUND
+	 *
+	 * ignore over flow error when rsnd_enable_sync_convert()
+	 */
+	if (rsnd_enable_sync_convert(src))
+		val0 = val0 & 0xffff;
+
+	if ((rsnd_mod_read(mod, SCU_SYS_STATUS0) & val0) ||
+	    (rsnd_mod_read(mod, SCU_SYS_STATUS1) & val1)) {
 		struct rsnd_src *src = rsnd_mod_to_src(mod);
 
 		src->err++;
@@ -652,7 +661,20 @@
 static int _rsnd_src_start_gen2(struct rsnd_mod *mod,
 				struct rsnd_dai_stream *io)
 {
-	u32 val = rsnd_io_to_mod_dvc(io) ? 0x01 : 0x11;
+	struct rsnd_src *src = rsnd_mod_to_src(mod);
+	u32 val;
+
+	val = rsnd_get_dalign(mod, io);
+
+	rsnd_mod_write(mod, SRC_BUSIF_DALIGN, val);
+
+	/*
+	 * WORKAROUND
+	 *
+	 * Enable SRC output if you want to use sync convert together with DVC
+	 */
+	val = (rsnd_io_to_mod_dvc(io) && !rsnd_enable_sync_convert(src)) ?
+		0x01 : 0x11;
 
 	rsnd_mod_write(mod, SRC_CTRL, val);
 
@@ -922,13 +944,6 @@
 		return 0;
 
 	/*
-	 * We can't use SRC sync convert
-	 * if it has DVC
-	 */
-	if (rsnd_io_to_mod_dvc(io))
-		return 0;
-
-	/*
 	 * enable sync convert
 	 */
 	ret = rsnd_kctrl_new_s(mod, io, rtd,
@@ -1047,10 +1062,8 @@
 		return 0;
 
 	src	= devm_kzalloc(dev, sizeof(*src) * nr, GFP_KERNEL);
-	if (!src) {
-		dev_err(dev, "SRC allocate failed\n");
+	if (!src)
 		return -ENOMEM;
-	}
 
 	priv->src_nr	= nr;
 	priv->src	= src;
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 2fbe59f..d45b9a7 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -770,10 +770,8 @@
 	 */
 	nr	= info->ssi_info_nr;
 	ssi	= devm_kzalloc(dev, sizeof(*ssi) * nr, GFP_KERNEL);
-	if (!ssi) {
-		dev_err(dev, "SSI allocate failed\n");
+	if (!ssi)
 		return -ENOMEM;
-	}
 
 	priv->ssi	= ssi;
 	priv->ssi_nr	= nr;
diff --git a/sound/soc/sh/ssi.c b/sound/soc/sh/ssi.c
index ab13146..89ed1b1 100644
--- a/sound/soc/sh/ssi.c
+++ b/sound/soc/sh/ssi.c
@@ -385,14 +385,9 @@
 
 static int sh4_soc_dai_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_component(&pdev->dev, &sh4_ssi_component,
-					  sh4_ssi_dai, ARRAY_SIZE(sh4_ssi_dai));
-}
-
-static int sh4_soc_dai_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_component(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_component(&pdev->dev, &sh4_ssi_component,
+					       sh4_ssi_dai,
+					       ARRAY_SIZE(sh4_ssi_dai));
 }
 
 static struct platform_driver sh4_ssi_driver = {
@@ -401,7 +396,6 @@
 	},
 
 	.probe = sh4_soc_dai_probe,
-	.remove = sh4_soc_dai_remove,
 };
 
 module_platform_driver(sh4_ssi_driver);
diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c
index 08d7259..d40efc9 100644
--- a/sound/soc/soc-ac97.c
+++ b/sound/soc/soc-ac97.c
@@ -85,10 +85,19 @@
 /**
  * snd_soc_new_ac97_codec - initailise AC97 device
  * @codec: audio codec
+ * @id: The expected device ID
+ * @id_mask: Mask that is applied to the device ID before comparing with @id
  *
  * Initialises AC97 codec resources for use by ad-hoc devices only.
+ *
+ * If @id is not 0 this function will reset the device, then read the ID from
+ * the device and check if it matches the expected ID. If it doesn't match an
+ * error will be returned and device will not be registered.
+ *
+ * Returns: A PTR_ERR() on failure or a valid snd_ac97 struct on success.
  */
-struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
+	unsigned int id, unsigned int id_mask)
 {
 	struct snd_ac97 *ac97;
 	int ret;
@@ -97,13 +106,24 @@
 	if (IS_ERR(ac97))
 		return ac97;
 
-	ret = device_add(&ac97->dev);
-	if (ret) {
-		put_device(&ac97->dev);
-		return ERR_PTR(ret);
+	if (id) {
+		ret = snd_ac97_reset(ac97, false, id, id_mask);
+		if (ret < 0) {
+			dev_err(codec->dev, "Failed to reset AC97 device: %d\n",
+				ret);
+			goto err_put_device;
+		}
 	}
 
+	ret = device_add(&ac97->dev);
+	if (ret)
+		goto err_put_device;
+
 	return ac97;
+
+err_put_device:
+	put_device(&ac97->dev);
+	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec);
 
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 0e1e69c..6173d15 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -654,10 +654,12 @@
 
 	/* suspend all CODECs */
 	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
+		struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
 		/* If there are paths active then the CODEC will be held with
 		 * bias _ON and should not be suspended. */
 		if (!codec->suspended) {
-			switch (codec->dapm.bias_level) {
+			switch (snd_soc_dapm_get_bias_level(dapm)) {
 			case SND_SOC_BIAS_STANDBY:
 				/*
 				 * If the CODEC is capable of idle
@@ -665,7 +667,7 @@
 				 * means it's doing something,
 				 * otherwise fall through.
 				 */
-				if (codec->dapm.idle_bias_off) {
+				if (dapm->idle_bias_off) {
 					dev_dbg(codec->dev,
 						"ASoC: idle_bias_off CODEC on over suspend\n");
 					break;
@@ -978,7 +980,7 @@
 
 static void soc_remove_component(struct snd_soc_component *component)
 {
-	if (!component->probed)
+	if (!component->card)
 		return;
 
 	/* This is a HACK and will be removed soon */
@@ -991,7 +993,7 @@
 	snd_soc_dapm_free(snd_soc_component_get_dapm(component));
 
 	soc_cleanup_component_debugfs(component);
-	component->probed = 0;
+	component->card = NULL;
 	module_put(component->dev->driver->owner);
 }
 
@@ -1102,16 +1104,26 @@
 	struct snd_soc_dai *dai;
 	int ret;
 
-	if (component->probed)
+	if (!strcmp(component->name, "snd-soc-dummy"))
 		return 0;
 
+	if (component->card) {
+		if (component->card != card) {
+			dev_err(component->dev,
+				"Trying to bind component to card \"%s\" but is already bound to card \"%s\"\n",
+				card->name, component->card->name);
+			return -ENODEV;
+		}
+		return 0;
+	}
+
+	if (!try_module_get(component->dev->driver->owner))
+		return -ENODEV;
+
 	component->card = card;
 	dapm->card = card;
 	soc_set_name_prefix(card, component);
 
-	if (!try_module_get(component->dev->driver->owner))
-		return -ENODEV;
-
 	soc_init_component_debugfs(component);
 
 	if (component->dapm_widgets) {
@@ -1155,7 +1167,6 @@
 		snd_soc_dapm_add_routes(dapm, component->dapm_routes,
 					component->num_dapm_routes);
 
-	component->probed = 1;
 	list_add(&dapm->list, &card->dapm_list);
 
 	/* This is a HACK and will be removed soon */
@@ -1166,6 +1177,7 @@
 
 err_probe:
 	soc_cleanup_component_debugfs(component);
+	component->card = NULL;
 	module_put(component->dev->driver->owner);
 
 	return ret;
@@ -1449,7 +1461,7 @@
 		rtd->dev_registered = 0;
 	}
 
-	if (component && component->probed)
+	if (component)
 		soc_remove_component(component);
 }
 
@@ -2128,7 +2140,7 @@
 /**
  * snd_soc_dai_set_bclk_ratio - configure BCLK to sample rate ratio.
  * @dai: DAI
- * @ratio Ratio of BCLK to Sample rate.
+ * @ratio: Ratio of BCLK to Sample rate.
  *
  * Configures the DAI for a preset BCLK to sample rate ratio.
  */
@@ -2652,10 +2664,7 @@
 	component->probe = component->driver->probe;
 	component->remove = component->driver->remove;
 
-	if (!component->dapm_ptr)
-		component->dapm_ptr = &component->dapm;
-
-	dapm = component->dapm_ptr;
+	dapm = &component->dapm;
 	dapm->dev = dev;
 	dapm->component = component;
 	dapm->bias_level = SND_SOC_BIAS_OFF;
@@ -2799,6 +2808,7 @@
 /**
  * snd_soc_unregister_component - Unregister a component from the ASoC core
  *
+ * @dev: The device to unregister
  */
 void snd_soc_unregister_component(struct device *dev)
 {
@@ -2839,7 +2849,7 @@
  * snd_soc_add_platform - Add a platform to the ASoC core
  * @dev: The parent device for the platform
  * @platform: The platform to add
- * @platform_driver: The driver for the platform
+ * @platform_drv: The driver for the platform
  */
 int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform,
 		const struct snd_soc_platform_driver *platform_drv)
@@ -2878,7 +2888,8 @@
 /**
  * snd_soc_register_platform - Register a platform with the ASoC core
  *
- * @platform: platform to register
+ * @dev: The device for the platform
+ * @platform_drv: The driver for the platform
  */
 int snd_soc_register_platform(struct device *dev,
 		const struct snd_soc_platform_driver *platform_drv)
@@ -2939,7 +2950,7 @@
 /**
  * snd_soc_unregister_platform - Unregister a platform from the ASoC core
  *
- * @platform: platform to unregister
+ * @dev: platform to unregister
  */
 void snd_soc_unregister_platform(struct device *dev)
 {
@@ -3030,13 +3041,17 @@
 /**
  * snd_soc_register_codec - Register a codec with the ASoC core
  *
- * @codec: codec to register
+ * @dev: The parent device for this codec
+ * @codec_drv: Codec driver
+ * @dai_drv: The associated DAI driver
+ * @num_dai: Number of DAIs
  */
 int snd_soc_register_codec(struct device *dev,
 			   const struct snd_soc_codec_driver *codec_drv,
 			   struct snd_soc_dai_driver *dai_drv,
 			   int num_dai)
 {
+	struct snd_soc_dapm_context *dapm;
 	struct snd_soc_codec *codec;
 	struct snd_soc_dai *dai;
 	int ret, i;
@@ -3047,7 +3062,6 @@
 	if (codec == NULL)
 		return -ENOMEM;
 
-	codec->component.dapm_ptr = &codec->dapm;
 	codec->component.codec = codec;
 
 	ret = snd_soc_component_initialize(&codec->component,
@@ -3077,12 +3091,14 @@
 	if (codec_drv->read)
 		codec->component.read = snd_soc_codec_drv_read;
 	codec->component.ignore_pmdown_time = codec_drv->ignore_pmdown_time;
-	codec->dapm.idle_bias_off = codec_drv->idle_bias_off;
-	codec->dapm.suspend_bias_off = codec_drv->suspend_bias_off;
+
+	dapm = snd_soc_codec_get_dapm(codec);
+	dapm->idle_bias_off = codec_drv->idle_bias_off;
+	dapm->suspend_bias_off = codec_drv->suspend_bias_off;
 	if (codec_drv->seq_notifier)
-		codec->dapm.seq_notifier = codec_drv->seq_notifier;
+		dapm->seq_notifier = codec_drv->seq_notifier;
 	if (codec_drv->set_bias_level)
-		codec->dapm.set_bias_level = snd_soc_codec_set_bias_level;
+		dapm->set_bias_level = snd_soc_codec_set_bias_level;
 	codec->dev = dev;
 	codec->driver = codec_drv;
 	codec->component.val_bytes = codec_drv->reg_word_size;
@@ -3129,7 +3145,7 @@
 /**
  * snd_soc_unregister_codec - Unregister a codec from the ASoC core
  *
- * @codec: codec to unregister
+ * @dev: codec to unregister
  */
 void snd_soc_unregister_codec(struct device *dev)
 {
@@ -3304,6 +3320,26 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_of_parse_tdm_slot);
 
+void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+				   struct snd_soc_codec_conf *codec_conf,
+				   struct device_node *of_node,
+				   const char *propname)
+{
+	struct device_node *np = card->dev->of_node;
+	const char *str;
+	int ret;
+
+	ret = of_property_read_string(np, propname, &str);
+	if (ret < 0) {
+		/* no prefix is not error */
+		return;
+	}
+
+	codec_conf->of_node	= of_node;
+	codec_conf->name_prefix	= str;
+}
+EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_prefix);
+
 int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
 				   const char *propname)
 {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index e0de807..f4bf21a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -47,6 +47,13 @@
 
 #define DAPM_UPDATE_STAT(widget, val) widget->dapm->card->dapm_stats.val++;
 
+#define SND_SOC_DAPM_DIR_REVERSE(x) ((x == SND_SOC_DAPM_DIR_IN) ? \
+	SND_SOC_DAPM_DIR_OUT : SND_SOC_DAPM_DIR_IN)
+
+#define snd_soc_dapm_for_each_direction(dir) \
+	for ((dir) = SND_SOC_DAPM_DIR_IN; (dir) <= SND_SOC_DAPM_DIR_OUT; \
+		(dir)++)
+
 static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink,
 	const char *control,
@@ -167,45 +174,59 @@
 }
 
 /*
- * dapm_widget_invalidate_input_paths() - Invalidate the cached number of input
- *  paths
- * @w: The widget for which to invalidate the cached number of input paths
- *
- * The function resets the cached number of inputs for the specified widget and
- * all widgets that can be reached via outgoing paths from the widget.
- *
- * This function must be called if the number of input paths for a widget might
- * have changed. E.g. if the source state of a widget changes or a path is added
- * or activated with the widget as the sink.
+ * Common implementation for dapm_widget_invalidate_input_paths() and
+ * dapm_widget_invalidate_output_paths(). The function is inlined since the
+ * combined size of the two specialized functions is only marginally larger then
+ * the size of the generic function and at the same time the fast path of the
+ * specialized functions is significantly smaller than the generic function.
  */
-static void dapm_widget_invalidate_input_paths(struct snd_soc_dapm_widget *w)
+static __always_inline void dapm_widget_invalidate_paths(
+	struct snd_soc_dapm_widget *w, enum snd_soc_dapm_direction dir)
 {
-	struct snd_soc_dapm_widget *sink;
+	enum snd_soc_dapm_direction rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
+	struct snd_soc_dapm_widget *node;
 	struct snd_soc_dapm_path *p;
 	LIST_HEAD(list);
 
 	dapm_assert_locked(w->dapm);
 
-	if (w->inputs == -1)
+	if (w->endpoints[dir] == -1)
 		return;
 
-	w->inputs = -1;
 	list_add_tail(&w->work_list, &list);
+	w->endpoints[dir] = -1;
 
 	list_for_each_entry(w, &list, work_list) {
-		list_for_each_entry(p, &w->sinks, list_source) {
+		snd_soc_dapm_widget_for_each_path(w, dir, p) {
 			if (p->is_supply || p->weak || !p->connect)
 				continue;
-			sink = p->sink;
-			if (sink->inputs != -1) {
-				sink->inputs = -1;
-				list_add_tail(&sink->work_list, &list);
+			node = p->node[rdir];
+			if (node->endpoints[dir] != -1) {
+				node->endpoints[dir] = -1;
+				list_add_tail(&node->work_list, &list);
 			}
 		}
 	}
 }
 
 /*
+ * dapm_widget_invalidate_input_paths() - Invalidate the cached number of
+ *  input paths
+ * @w: The widget for which to invalidate the cached number of input paths
+ *
+ * Resets the cached number of inputs for the specified widget and all widgets
+ * that can be reached via outcoming paths from the widget.
+ *
+ * This function must be called if the number of output paths for a widget might
+ * have changed. E.g. if the source state of a widget changes or a path is added
+ * or activated with the widget as the sink.
+ */
+static void dapm_widget_invalidate_input_paths(struct snd_soc_dapm_widget *w)
+{
+	dapm_widget_invalidate_paths(w, SND_SOC_DAPM_DIR_IN);
+}
+
+/*
  * dapm_widget_invalidate_output_paths() - Invalidate the cached number of
  *  output paths
  * @w: The widget for which to invalidate the cached number of output paths
@@ -219,29 +240,7 @@
  */
 static void dapm_widget_invalidate_output_paths(struct snd_soc_dapm_widget *w)
 {
-	struct snd_soc_dapm_widget *source;
-	struct snd_soc_dapm_path *p;
-	LIST_HEAD(list);
-
-	dapm_assert_locked(w->dapm);
-
-	if (w->outputs == -1)
-		return;
-
-	w->outputs = -1;
-	list_add_tail(&w->work_list, &list);
-
-	list_for_each_entry(w, &list, work_list) {
-		list_for_each_entry(p, &w->sources, list_sink) {
-			if (p->is_supply || p->weak || !p->connect)
-				continue;
-			source = p->source;
-			if (source->outputs != -1) {
-				source->outputs = -1;
-				list_add_tail(&source->work_list, &list);
-			}
-		}
-	}
+	dapm_widget_invalidate_paths(w, SND_SOC_DAPM_DIR_OUT);
 }
 
 /*
@@ -270,9 +269,9 @@
 	 * endpoints is either connected or disconnected that sum won't change,
 	 * so there is no need to re-check the path.
 	 */
-	if (p->source->inputs != 0)
+	if (p->source->endpoints[SND_SOC_DAPM_DIR_IN] != 0)
 		dapm_widget_invalidate_input_paths(p->sink);
-	if (p->sink->outputs != 0)
+	if (p->sink->endpoints[SND_SOC_DAPM_DIR_OUT] != 0)
 		dapm_widget_invalidate_output_paths(p->source);
 }
 
@@ -283,11 +282,11 @@
 	mutex_lock(&card->dapm_mutex);
 
 	list_for_each_entry(w, &card->widgets, list) {
-		if (w->is_sink || w->is_source) {
+		if (w->is_ep) {
 			dapm_mark_dirty(w, "Rechecking endpoints");
-			if (w->is_sink)
+			if (w->is_ep & SND_SOC_DAPM_EP_SINK)
 				dapm_widget_invalidate_output_paths(w);
-			if (w->is_source)
+			if (w->is_ep & SND_SOC_DAPM_EP_SOURCE)
 				dapm_widget_invalidate_input_paths(w);
 		}
 	}
@@ -894,7 +893,7 @@
 	/* add kcontrol */
 	for (i = 0; i < w->num_kcontrols; i++) {
 		/* match name */
-		list_for_each_entry(path, &w->sources, list_sink) {
+		snd_soc_dapm_widget_for_each_source_path(w, path) {
 			/* mixer/mux paths name must match control name */
 			if (path->name != (char *)w->kcontrol_news[i].name)
 				continue;
@@ -923,18 +922,18 @@
 static int dapm_new_mux(struct snd_soc_dapm_widget *w)
 {
 	struct snd_soc_dapm_context *dapm = w->dapm;
+	enum snd_soc_dapm_direction dir;
 	struct snd_soc_dapm_path *path;
-	struct list_head *paths;
 	const char *type;
 	int ret;
 
 	switch (w->id) {
 	case snd_soc_dapm_mux:
-		paths = &w->sources;
+		dir = SND_SOC_DAPM_DIR_OUT;
 		type = "mux";
 		break;
 	case snd_soc_dapm_demux:
-		paths = &w->sinks;
+		dir = SND_SOC_DAPM_DIR_IN;
 		type = "demux";
 		break;
 	default:
@@ -948,7 +947,7 @@
 		return -EINVAL;
 	}
 
-	if (list_empty(paths)) {
+	if (list_empty(&w->edges[dir])) {
 		dev_err(dapm->dev, "ASoC: %s %s has no paths\n", type, w->name);
 		return -EINVAL;
 	}
@@ -957,16 +956,9 @@
 	if (ret < 0)
 		return ret;
 
-	if (w->id == snd_soc_dapm_mux) {
-		list_for_each_entry(path, &w->sources, list_sink) {
-			if (path->name)
-				dapm_kcontrol_add_path(w->kcontrols[0], path);
-		}
-	} else {
-		list_for_each_entry(path, &w->sinks, list_source) {
-			if (path->name)
-				dapm_kcontrol_add_path(w->kcontrols[0], path);
-		}
+	snd_soc_dapm_widget_for_each_path(w, dir, path) {
+		if (path->name)
+			dapm_kcontrol_add_path(w->kcontrols[0], path);
 	}
 
 	return 0;
@@ -1032,43 +1024,79 @@
 	}
 }
 
-/* add widget to list if it's not already in the list */
-static int dapm_list_add_widget(struct snd_soc_dapm_widget_list **list,
-	struct snd_soc_dapm_widget *w)
+static int dapm_widget_list_create(struct snd_soc_dapm_widget_list **list,
+	struct list_head *widgets)
 {
-	struct snd_soc_dapm_widget_list *wlist;
-	int wlistsize, wlistentries, i;
+	struct snd_soc_dapm_widget *w;
+	struct list_head *it;
+	unsigned int size = 0;
+	unsigned int i = 0;
 
+	list_for_each(it, widgets)
+		size++;
+
+	*list = kzalloc(sizeof(**list) + size * sizeof(*w), GFP_KERNEL);
 	if (*list == NULL)
-		return -EINVAL;
-
-	wlist = *list;
-
-	/* is this widget already in the list */
-	for (i = 0; i < wlist->num_widgets; i++) {
-		if (wlist->widgets[i] == w)
-			return 0;
-	}
-
-	/* allocate some new space */
-	wlistentries = wlist->num_widgets + 1;
-	wlistsize = sizeof(struct snd_soc_dapm_widget_list) +
-			wlistentries * sizeof(struct snd_soc_dapm_widget *);
-	*list = krealloc(wlist, wlistsize, GFP_KERNEL);
-	if (*list == NULL) {
-		dev_err(w->dapm->dev, "ASoC: can't allocate widget list for %s\n",
-			w->name);
 		return -ENOMEM;
+
+	list_for_each_entry(w, widgets, work_list)
+		(*list)->widgets[i++] = w;
+
+	(*list)->num_widgets = i;
+
+	return 0;
+}
+
+/*
+ * Common implementation for is_connected_output_ep() and
+ * is_connected_input_ep(). The function is inlined since the combined size of
+ * the two specialized functions is only marginally larger then the size of the
+ * generic function and at the same time the fast path of the specialized
+ * functions is significantly smaller than the generic function.
+ */
+static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
+	struct list_head *list, enum snd_soc_dapm_direction dir,
+	int (*fn)(struct snd_soc_dapm_widget *, struct list_head *))
+{
+	enum snd_soc_dapm_direction rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
+	struct snd_soc_dapm_path *path;
+	int con = 0;
+
+	if (widget->endpoints[dir] >= 0)
+		return widget->endpoints[dir];
+
+	DAPM_UPDATE_STAT(widget, path_checks);
+
+	/* do we need to add this widget to the list ? */
+	if (list)
+		list_add_tail(&widget->work_list, list);
+
+	if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
+		widget->endpoints[dir] = snd_soc_dapm_suspend_check(widget);
+		return widget->endpoints[dir];
 	}
-	wlist = *list;
 
-	/* insert the widget */
-	dev_dbg(w->dapm->dev, "ASoC: added %s in widget list pos %d\n",
-			w->name, wlist->num_widgets);
+	snd_soc_dapm_widget_for_each_path(widget, rdir, path) {
+		DAPM_UPDATE_STAT(widget, neighbour_checks);
 
-	wlist->widgets[wlist->num_widgets] = w;
-	wlist->num_widgets++;
-	return 1;
+		if (path->weak || path->is_supply)
+			continue;
+
+		if (path->walking)
+			return 1;
+
+		trace_snd_soc_dapm_path(widget, dir, path);
+
+		if (path->connect) {
+			path->walking = 1;
+			con += fn(path->node[dir], list);
+			path->walking = 0;
+		}
+	}
+
+	widget->endpoints[dir] = con;
+
+	return con;
 }
 
 /*
@@ -1076,57 +1104,10 @@
  * output widget. Returns number of complete paths.
  */
 static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
-	struct snd_soc_dapm_widget_list **list)
+	struct list_head *list)
 {
-	struct snd_soc_dapm_path *path;
-	int con = 0;
-
-	if (widget->outputs >= 0)
-		return widget->outputs;
-
-	DAPM_UPDATE_STAT(widget, path_checks);
-
-	if (widget->is_sink && widget->connected) {
-		widget->outputs = snd_soc_dapm_suspend_check(widget);
-		return widget->outputs;
-	}
-
-	list_for_each_entry(path, &widget->sinks, list_source) {
-		DAPM_UPDATE_STAT(widget, neighbour_checks);
-
-		if (path->weak || path->is_supply)
-			continue;
-
-		if (path->walking)
-			return 1;
-
-		trace_snd_soc_dapm_output_path(widget, path);
-
-		if (path->connect) {
-			path->walking = 1;
-
-			/* do we need to add this widget to the list ? */
-			if (list) {
-				int err;
-				err = dapm_list_add_widget(list, path->sink);
-				if (err < 0) {
-					dev_err(widget->dapm->dev,
-						"ASoC: could not add widget %s\n",
-						widget->name);
-					path->walking = 0;
-					return con;
-				}
-			}
-
-			con += is_connected_output_ep(path->sink, list);
-
-			path->walking = 0;
-		}
-	}
-
-	widget->outputs = con;
-
-	return con;
+	return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_OUT,
+			is_connected_output_ep);
 }
 
 /*
@@ -1134,57 +1115,10 @@
  * input widget. Returns number of complete paths.
  */
 static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
-	struct snd_soc_dapm_widget_list **list)
+	struct list_head *list)
 {
-	struct snd_soc_dapm_path *path;
-	int con = 0;
-
-	if (widget->inputs >= 0)
-		return widget->inputs;
-
-	DAPM_UPDATE_STAT(widget, path_checks);
-
-	if (widget->is_source && widget->connected) {
-		widget->inputs = snd_soc_dapm_suspend_check(widget);
-		return widget->inputs;
-	}
-
-	list_for_each_entry(path, &widget->sources, list_sink) {
-		DAPM_UPDATE_STAT(widget, neighbour_checks);
-
-		if (path->weak || path->is_supply)
-			continue;
-
-		if (path->walking)
-			return 1;
-
-		trace_snd_soc_dapm_input_path(widget, path);
-
-		if (path->connect) {
-			path->walking = 1;
-
-			/* do we need to add this widget to the list ? */
-			if (list) {
-				int err;
-				err = dapm_list_add_widget(list, path->source);
-				if (err < 0) {
-					dev_err(widget->dapm->dev,
-						"ASoC: could not add widget %s\n",
-						widget->name);
-					path->walking = 0;
-					return con;
-				}
-			}
-
-			con += is_connected_input_ep(path->source, list);
-
-			path->walking = 0;
-		}
-	}
-
-	widget->inputs = con;
-
-	return con;
+	return is_connected_ep(widget, list, SND_SOC_DAPM_DIR_IN,
+			is_connected_input_ep);
 }
 
 /**
@@ -1204,7 +1138,9 @@
 {
 	struct snd_soc_card *card = dai->component->card;
 	struct snd_soc_dapm_widget *w;
+	LIST_HEAD(widgets);
 	int paths;
+	int ret;
 
 	mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
 
@@ -1213,14 +1149,21 @@
 	 * to reset the cached number of inputs and outputs.
 	 */
 	list_for_each_entry(w, &card->widgets, list) {
-		w->inputs = -1;
-		w->outputs = -1;
+		w->endpoints[SND_SOC_DAPM_DIR_IN] = -1;
+		w->endpoints[SND_SOC_DAPM_DIR_OUT] = -1;
 	}
 
 	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
-		paths = is_connected_output_ep(dai->playback_widget, list);
+		paths = is_connected_output_ep(dai->playback_widget, &widgets);
 	else
-		paths = is_connected_input_ep(dai->capture_widget, list);
+		paths = is_connected_input_ep(dai->capture_widget, &widgets);
+
+	/* Drop starting point */
+	list_del(widgets.next);
+
+	ret = dapm_widget_list_create(list, &widgets);
+	if (ret)
+		paths = ret;
 
 	trace_snd_soc_dapm_connected(paths, stream);
 	mutex_unlock(&card->dapm_mutex);
@@ -1321,7 +1264,7 @@
 	DAPM_UPDATE_STAT(w, power_checks);
 
 	/* Check if one of our outputs is connected */
-	list_for_each_entry(path, &w->sinks, list_source) {
+	snd_soc_dapm_widget_for_each_sink_path(w, path) {
 		DAPM_UPDATE_STAT(w, neighbour_checks);
 
 		if (path->weak)
@@ -1745,12 +1688,12 @@
 	/* If we changed our power state perhaps our neigbours changed
 	 * also.
 	 */
-	list_for_each_entry(path, &w->sources, list_sink)
+	snd_soc_dapm_widget_for_each_source_path(w, path)
 		dapm_widget_set_peer_power(path->source, power, path->connect);
 
 	/* Supplies can't affect their outputs, only their inputs */
 	if (!w->is_supply) {
-		list_for_each_entry(path, &w->sinks, list_source)
+		snd_soc_dapm_widget_for_each_sink_path(w, path)
 			dapm_widget_set_peer_power(path->sink, power,
 						   path->connect);
 	}
@@ -1951,6 +1894,7 @@
 {
 	struct snd_soc_dapm_widget *w = file->private_data;
 	struct snd_soc_card *card = w->dapm->card;
+	enum snd_soc_dapm_direction dir, rdir;
 	char *buf;
 	int in, out;
 	ssize_t ret;
@@ -1987,25 +1931,21 @@
 				w->sname,
 				w->active ? "active" : "inactive");
 
-	list_for_each_entry(p, &w->sources, list_sink) {
-		if (p->connected && !p->connected(w, p->source))
-			continue;
+	snd_soc_dapm_for_each_direction(dir) {
+		rdir = SND_SOC_DAPM_DIR_REVERSE(dir);
+		snd_soc_dapm_widget_for_each_path(w, dir, p) {
+			if (p->connected && !p->connected(w, p->node[rdir]))
+				continue;
 
-		if (p->connect)
-			ret += snprintf(buf + ret, PAGE_SIZE - ret,
-					" in  \"%s\" \"%s\"\n",
-					p->name ? p->name : "static",
-					p->source->name);
-	}
-	list_for_each_entry(p, &w->sinks, list_source) {
-		if (p->connected && !p->connected(w, p->sink))
-			continue;
+			if (!p->connect)
+				continue;
 
-		if (p->connect)
 			ret += snprintf(buf + ret, PAGE_SIZE - ret,
-					" out \"%s\" \"%s\"\n",
+					" %s  \"%s\" \"%s\"\n",
+					(rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out",
 					p->name ? p->name : "static",
-					p->sink->name);
+					p->node[rdir]->name);
+		}
 	}
 
 	mutex_unlock(&card->dapm_mutex);
@@ -2223,14 +2163,16 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_mixer_update_power);
 
-static ssize_t dapm_widget_show_codec(struct snd_soc_codec *codec, char *buf)
+static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
+	char *buf)
 {
+	struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(cmpnt);
 	struct snd_soc_dapm_widget *w;
 	int count = 0;
 	char *state = "not set";
 
-	list_for_each_entry(w, &codec->component.card->widgets, list) {
-		if (w->dapm != &codec->dapm)
+	list_for_each_entry(w, &cmpnt->card->widgets, list) {
+		if (w->dapm != dapm)
 			continue;
 
 		/* only display widgets that burnm power */
@@ -2258,7 +2200,7 @@
 		}
 	}
 
-	switch (codec->dapm.bias_level) {
+	switch (snd_soc_dapm_get_bias_level(dapm)) {
 	case SND_SOC_BIAS_ON:
 		state = "On";
 		break;
@@ -2287,8 +2229,9 @@
 	mutex_lock(&rtd->card->dapm_mutex);
 
 	for (i = 0; i < rtd->num_codecs; i++) {
-		struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
-		count += dapm_widget_show_codec(codec, buf + count);
+		struct snd_soc_component *cmpnt = rtd->codec_dais[i]->component;
+
+		count += dapm_widget_show_component(cmpnt, buf + count);
 	}
 
 	mutex_unlock(&rtd->card->dapm_mutex);
@@ -2305,37 +2248,43 @@
 
 static void dapm_free_path(struct snd_soc_dapm_path *path)
 {
-	list_del(&path->list_sink);
-	list_del(&path->list_source);
+	list_del(&path->list_node[SND_SOC_DAPM_DIR_IN]);
+	list_del(&path->list_node[SND_SOC_DAPM_DIR_OUT]);
 	list_del(&path->list_kcontrol);
 	list_del(&path->list);
 	kfree(path);
 }
 
+void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
+{
+	struct snd_soc_dapm_path *p, *next_p;
+	enum snd_soc_dapm_direction dir;
+
+	list_del(&w->list);
+	/*
+	 * remove source and sink paths associated to this widget.
+	 * While removing the path, remove reference to it from both
+	 * source and sink widgets so that path is removed only once.
+	 */
+	snd_soc_dapm_for_each_direction(dir) {
+		snd_soc_dapm_widget_for_each_path_safe(w, dir, p, next_p)
+			dapm_free_path(p);
+	}
+
+	kfree(w->kcontrols);
+	kfree_const(w->name);
+	kfree(w);
+}
+
 /* free all dapm widgets and resources */
 static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w, *next_w;
-	struct snd_soc_dapm_path *p, *next_p;
 
 	list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) {
 		if (w->dapm != dapm)
 			continue;
-		list_del(&w->list);
-		/*
-		 * remove source and sink paths associated to this widget.
-		 * While removing the path, remove reference to it from both
-		 * source and sink widgets so that path is removed only once.
-		 */
-		list_for_each_entry_safe(p, next_p, &w->sources, list_sink)
-			dapm_free_path(p);
-
-		list_for_each_entry_safe(p, next_p, &w->sinks, list_source)
-			dapm_free_path(p);
-
-		kfree(w->kcontrols);
-		kfree(w->name);
-		kfree(w);
+		snd_soc_dapm_free_widget(w);
 	}
 }
 
@@ -2441,20 +2390,22 @@
  */
 static void dapm_update_widget_flags(struct snd_soc_dapm_widget *w)
 {
+	enum snd_soc_dapm_direction dir;
 	struct snd_soc_dapm_path *p;
+	unsigned int ep;
 
 	switch (w->id) {
 	case snd_soc_dapm_input:
 		/* On a fully routed card a input is never a source */
 		if (w->dapm->card->fully_routed)
-			break;
-		w->is_source = 1;
-		list_for_each_entry(p, &w->sources, list_sink) {
+			return;
+		ep = SND_SOC_DAPM_EP_SOURCE;
+		snd_soc_dapm_widget_for_each_source_path(w, p) {
 			if (p->source->id == snd_soc_dapm_micbias ||
 				p->source->id == snd_soc_dapm_mic ||
 				p->source->id == snd_soc_dapm_line ||
 				p->source->id == snd_soc_dapm_output) {
-					w->is_source = 0;
+					ep = 0;
 					break;
 			}
 		}
@@ -2462,25 +2413,30 @@
 	case snd_soc_dapm_output:
 		/* On a fully routed card a output is never a sink */
 		if (w->dapm->card->fully_routed)
-			break;
-		w->is_sink = 1;
-		list_for_each_entry(p, &w->sinks, list_source) {
+			return;
+		ep = SND_SOC_DAPM_EP_SINK;
+		snd_soc_dapm_widget_for_each_sink_path(w, p) {
 			if (p->sink->id == snd_soc_dapm_spk ||
 				p->sink->id == snd_soc_dapm_hp ||
 				p->sink->id == snd_soc_dapm_line ||
 				p->sink->id == snd_soc_dapm_input) {
-					w->is_sink = 0;
+					ep = 0;
 					break;
 			}
 		}
 		break;
 	case snd_soc_dapm_line:
-		w->is_sink = !list_empty(&w->sources);
-		w->is_source = !list_empty(&w->sinks);
+		ep = 0;
+		snd_soc_dapm_for_each_direction(dir) {
+			if (!list_empty(&w->edges[dir]))
+				ep |= SND_SOC_DAPM_DIR_TO_EP(dir);
+		}
 		break;
 	default:
-		break;
+		return;
 	}
+
+	w->is_ep = ep;
 }
 
 static int snd_soc_dapm_check_dynamic_path(struct snd_soc_dapm_context *dapm,
@@ -2533,6 +2489,8 @@
 	int (*connected)(struct snd_soc_dapm_widget *source,
 			 struct snd_soc_dapm_widget *sink))
 {
+	struct snd_soc_dapm_widget *widgets[2];
+	enum snd_soc_dapm_direction dir;
 	struct snd_soc_dapm_path *path;
 	int ret;
 
@@ -2565,13 +2523,14 @@
 	if (!path)
 		return -ENOMEM;
 
-	path->source = wsource;
-	path->sink = wsink;
+	path->node[SND_SOC_DAPM_DIR_IN] = wsource;
+	path->node[SND_SOC_DAPM_DIR_OUT] = wsink;
+	widgets[SND_SOC_DAPM_DIR_IN] = wsource;
+	widgets[SND_SOC_DAPM_DIR_OUT] = wsink;
+
 	path->connected = connected;
 	INIT_LIST_HEAD(&path->list);
 	INIT_LIST_HEAD(&path->list_kcontrol);
-	INIT_LIST_HEAD(&path->list_source);
-	INIT_LIST_HEAD(&path->list_sink);
 
 	if (wsource->is_supply || wsink->is_supply)
 		path->is_supply = 1;
@@ -2609,14 +2568,13 @@
 	}
 
 	list_add(&path->list, &dapm->card->paths);
-	list_add(&path->list_sink, &wsink->sources);
-	list_add(&path->list_source, &wsource->sinks);
+	snd_soc_dapm_for_each_direction(dir)
+		list_add(&path->list_node[dir], &widgets[dir]->edges[dir]);
 
-	dapm_update_widget_flags(wsource);
-	dapm_update_widget_flags(wsink);
-
-	dapm_mark_dirty(wsource, "Route added");
-	dapm_mark_dirty(wsink, "Route added");
+	snd_soc_dapm_for_each_direction(dir) {
+		dapm_update_widget_flags(widgets[dir]);
+		dapm_mark_dirty(widgets[dir], "Route added");
+	}
 
 	if (dapm->card->instantiated && path->connect)
 		dapm_path_invalidate(path);
@@ -2864,7 +2822,7 @@
 		dev_warn(dapm->dev, "ASoC: Ignoring control for weak route %s->%s\n",
 			 route->source, route->sink);
 
-	list_for_each_entry(path, &source->sinks, list_source) {
+	snd_soc_dapm_widget_for_each_sink_path(source, path) {
 		if (path->sink == sink) {
 			path->weak = 1;
 			count++;
@@ -2918,7 +2876,7 @@
 
 /**
  * snd_soc_dapm_new_widgets - add new dapm widgets
- * @dapm: DAPM context
+ * @card: card to be checked for new dapm widgets
  *
  * Checks the codec for any new dapm widgets and creates them if found.
  *
@@ -3298,6 +3256,7 @@
 snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
 			 const struct snd_soc_dapm_widget *widget)
 {
+	enum snd_soc_dapm_direction dir;
 	struct snd_soc_dapm_widget *w;
 	const char *prefix;
 	int ret;
@@ -3344,7 +3303,7 @@
 	if (prefix)
 		w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
 	else
-		w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
+		w->name = kstrdup_const(widget->name, GFP_KERNEL);
 	if (w->name == NULL) {
 		kfree(w);
 		return NULL;
@@ -3352,27 +3311,27 @@
 
 	switch (w->id) {
 	case snd_soc_dapm_mic:
-		w->is_source = 1;
+		w->is_ep = SND_SOC_DAPM_EP_SOURCE;
 		w->power_check = dapm_generic_check_power;
 		break;
 	case snd_soc_dapm_input:
 		if (!dapm->card->fully_routed)
-			w->is_source = 1;
+			w->is_ep = SND_SOC_DAPM_EP_SOURCE;
 		w->power_check = dapm_generic_check_power;
 		break;
 	case snd_soc_dapm_spk:
 	case snd_soc_dapm_hp:
-		w->is_sink = 1;
+		w->is_ep = SND_SOC_DAPM_EP_SINK;
 		w->power_check = dapm_generic_check_power;
 		break;
 	case snd_soc_dapm_output:
 		if (!dapm->card->fully_routed)
-			w->is_sink = 1;
+			w->is_ep = SND_SOC_DAPM_EP_SINK;
 		w->power_check = dapm_generic_check_power;
 		break;
 	case snd_soc_dapm_vmid:
 	case snd_soc_dapm_siggen:
-		w->is_source = 1;
+		w->is_ep = SND_SOC_DAPM_EP_SOURCE;
 		w->power_check = dapm_always_on_check_power;
 		break;
 	case snd_soc_dapm_mux:
@@ -3406,14 +3365,14 @@
 	}
 
 	w->dapm = dapm;
-	INIT_LIST_HEAD(&w->sources);
-	INIT_LIST_HEAD(&w->sinks);
 	INIT_LIST_HEAD(&w->list);
 	INIT_LIST_HEAD(&w->dirty);
 	list_add_tail(&w->list, &dapm->card->widgets);
 
-	w->inputs = -1;
-	w->outputs = -1;
+	snd_soc_dapm_for_each_direction(dir) {
+		INIT_LIST_HEAD(&w->edges[dir]);
+		w->endpoints[dir] = -1;
+	}
 
 	/* machine layer set ups unconnected pins and insertions */
 	w->connected = 1;
@@ -3467,19 +3426,17 @@
 	int ret;
 
 	if (WARN_ON(!config) ||
-	    WARN_ON(list_empty(&w->sources) || list_empty(&w->sinks)))
+	    WARN_ON(list_empty(&w->edges[SND_SOC_DAPM_DIR_OUT]) ||
+		    list_empty(&w->edges[SND_SOC_DAPM_DIR_IN])))
 		return -EINVAL;
 
 	/* We only support a single source and sink, pick the first */
-	source_p = list_first_entry(&w->sources, struct snd_soc_dapm_path,
-				    list_sink);
-	sink_p = list_first_entry(&w->sinks, struct snd_soc_dapm_path,
-				  list_source);
-
-	if (WARN_ON(!source_p || !sink_p) ||
-	    WARN_ON(!sink_p->source || !source_p->sink) ||
-	    WARN_ON(!source_p->source || !sink_p->sink))
-		return -EINVAL;
+	source_p = list_first_entry(&w->edges[SND_SOC_DAPM_DIR_OUT],
+				    struct snd_soc_dapm_path,
+				    list_node[SND_SOC_DAPM_DIR_OUT]);
+	sink_p = list_first_entry(&w->edges[SND_SOC_DAPM_DIR_IN],
+				    struct snd_soc_dapm_path,
+				    list_node[SND_SOC_DAPM_DIR_IN]);
 
 	source = source_p->source->priv;
 	sink = sink_p->sink->priv;
@@ -3821,11 +3778,6 @@
 	for (i = 0; i < rtd->num_codecs; i++) {
 		struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
 
-		/* there is no point in connecting BE DAI links with dummies */
-		if (snd_soc_dai_is_dummy(codec_dai) ||
-			snd_soc_dai_is_dummy(cpu_dai))
-			continue;
-
 		/* connect BE DAI playback if widgets are valid */
 		if (codec_dai->playback_widget && cpu_dai->playback_widget) {
 			source = cpu_dai->playback_widget;
@@ -3856,6 +3808,7 @@
 	int event)
 {
 	struct snd_soc_dapm_widget *w;
+	unsigned int ep;
 
 	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
 		w = dai->playback_widget;
@@ -3865,12 +3818,22 @@
 	if (w) {
 		dapm_mark_dirty(w, "stream event");
 
+		if (w->id == snd_soc_dapm_dai_in) {
+			ep = SND_SOC_DAPM_EP_SOURCE;
+			dapm_widget_invalidate_input_paths(w);
+		} else {
+			ep = SND_SOC_DAPM_EP_SINK;
+			dapm_widget_invalidate_output_paths(w);
+		}
+
 		switch (event) {
 		case SND_SOC_DAPM_STREAM_START:
 			w->active = 1;
+			w->is_ep = ep;
 			break;
 		case SND_SOC_DAPM_STREAM_STOP:
 			w->active = 0;
+			w->is_ep = 0;
 			break;
 		case SND_SOC_DAPM_STREAM_SUSPEND:
 		case SND_SOC_DAPM_STREAM_RESUME:
@@ -3878,14 +3841,6 @@
 		case SND_SOC_DAPM_STREAM_PAUSE_RELEASE:
 			break;
 		}
-
-		if (w->id == snd_soc_dapm_dai_in) {
-			w->is_source = w->active;
-			dapm_widget_invalidate_input_paths(w);
-		} else {
-			w->is_sink = w->active;
-			dapm_widget_invalidate_output_paths(w);
-		}
 	}
 }
 
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 256b9c9..70e4b9d 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1231,24 +1231,17 @@
 }
 
 int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
-	int stream, struct snd_soc_dapm_widget_list **list_)
+	int stream, struct snd_soc_dapm_widget_list **list)
 {
 	struct snd_soc_dai *cpu_dai = fe->cpu_dai;
-	struct snd_soc_dapm_widget_list *list;
 	int paths;
 
-	list = kzalloc(sizeof(struct snd_soc_dapm_widget_list) +
-			sizeof(struct snd_soc_dapm_widget *), GFP_KERNEL);
-	if (list == NULL)
-		return -ENOMEM;
-
 	/* get number of valid DAI paths and their widgets */
-	paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, &list);
+	paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, list);
 
 	dev_dbg(fe->dev, "ASoC: found %d audio %s paths\n", paths,
 			stream ? "capture" : "playback");
 
-	*list_ = list;
 	return paths;
 }
 
@@ -1306,7 +1299,12 @@
 
 		switch (list->widgets[i]->id) {
 		case snd_soc_dapm_dai_in:
+			if (stream != SNDRV_PCM_STREAM_PLAYBACK)
+				continue;
+			break;
 		case snd_soc_dapm_dai_out:
+			if (stream != SNDRV_PCM_STREAM_CAPTURE)
+				continue;
 			break;
 		default:
 			continue;
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 31068b8..69d01cd 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -45,12 +45,12 @@
 #define SOC_TPLG_PASS_VENDOR		1
 #define SOC_TPLG_PASS_MIXER		2
 #define SOC_TPLG_PASS_WIDGET		3
-#define SOC_TPLG_PASS_GRAPH		4
-#define SOC_TPLG_PASS_PINS		5
-#define SOC_TPLG_PASS_PCM_DAI		6
+#define SOC_TPLG_PASS_PCM_DAI		4
+#define SOC_TPLG_PASS_GRAPH		5
+#define SOC_TPLG_PASS_PINS		6
 
 #define SOC_TPLG_PASS_START	SOC_TPLG_PASS_MANIFEST
-#define SOC_TPLG_PASS_END	SOC_TPLG_PASS_PCM_DAI
+#define SOC_TPLG_PASS_END	SOC_TPLG_PASS_PINS
 
 struct soc_tplg {
 	const struct firmware *fw;
@@ -66,10 +66,14 @@
 	u32 index;	/* current block index */
 	u32 req_index;	/* required index, only loaded/free matching blocks */
 
-	/* kcontrol operations */
+	/* vendor specific kcontrol operations */
 	const struct snd_soc_tplg_kcontrol_ops *io_ops;
 	int io_ops_count;
 
+	/* vendor specific bytes ext handlers, for TLV bytes controls */
+	const struct snd_soc_tplg_bytes_ext_ops *bytes_ext_ops;
+	int bytes_ext_ops_count;
+
 	/* optional fw loading callbacks to component drivers */
 	struct snd_soc_tplg_ops *ops;
 };
@@ -508,19 +512,70 @@
 /* bind a kcontrol to it's IO handlers */
 static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
 	struct snd_kcontrol_new *k,
-	const struct snd_soc_tplg_kcontrol_ops *ops, int num_ops,
-	const struct snd_soc_tplg_kcontrol_ops *bops, int num_bops)
+	const struct soc_tplg *tplg)
 {
-	int i;
+	const struct snd_soc_tplg_kcontrol_ops *ops;
+	const struct snd_soc_tplg_bytes_ext_ops *ext_ops;
+	int num_ops, i;
 
-	/* try and map standard kcontrols handler first */
+	if (hdr->ops.info == SND_SOC_TPLG_CTL_BYTES
+		&& k->iface & SNDRV_CTL_ELEM_IFACE_MIXER
+		&& k->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE
+		&& k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
+		struct soc_bytes_ext *sbe;
+		struct snd_soc_tplg_bytes_control *be;
+
+		sbe = (struct soc_bytes_ext *)k->private_value;
+		be = container_of(hdr, struct snd_soc_tplg_bytes_control, hdr);
+
+		/* TLV bytes controls need standard kcontrol info handler,
+		 * TLV callback and extended put/get handlers.
+		 */
+		k->info = snd_soc_bytes_info;
+		k->tlv.c = snd_soc_bytes_tlv_callback;
+
+		ext_ops = tplg->bytes_ext_ops;
+		num_ops = tplg->bytes_ext_ops_count;
+		for (i = 0; i < num_ops; i++) {
+			if (!sbe->put && ext_ops[i].id == be->ext_ops.put)
+				sbe->put = ext_ops[i].put;
+			if (!sbe->get && ext_ops[i].id == be->ext_ops.get)
+				sbe->get = ext_ops[i].get;
+		}
+
+		if (sbe->put && sbe->get)
+			return 0;
+		else
+			return -EINVAL;
+	}
+
+	/* try and map vendor specific kcontrol handlers first */
+	ops = tplg->io_ops;
+	num_ops = tplg->io_ops_count;
 	for (i = 0; i < num_ops; i++) {
 
-		if (ops[i].id == hdr->ops.put)
+		if (k->put == NULL && ops[i].id == hdr->ops.put)
 			k->put = ops[i].put;
-		if (ops[i].id == hdr->ops.get)
+		if (k->get == NULL && ops[i].id == hdr->ops.get)
 			k->get = ops[i].get;
-		if (ops[i].id == hdr->ops.info)
+		if (k->info == NULL && ops[i].id == hdr->ops.info)
+			k->info = ops[i].info;
+	}
+
+	/* vendor specific handlers found ? */
+	if (k->put && k->get && k->info)
+		return 0;
+
+	/* none found so try standard kcontrol handlers */
+	ops = io_ops;
+	num_ops = ARRAY_SIZE(io_ops);
+	for (i = 0; i < num_ops; i++) {
+
+		if (k->put == NULL && ops[i].id == hdr->ops.put)
+			k->put = ops[i].put;
+		if (k->get == NULL && ops[i].id == hdr->ops.get)
+			k->get = ops[i].get;
+		if (k->info == NULL && ops[i].id == hdr->ops.info)
 			k->info = ops[i].info;
 	}
 
@@ -528,21 +583,6 @@
 	if (k->put && k->get && k->info)
 		return 0;
 
-	/* none found so try bespoke handlers */
-	for (i = 0; i < num_bops; i++) {
-
-		if (k->put == NULL && bops[i].id == hdr->ops.put)
-			k->put = bops[i].put;
-		if (k->get == NULL && bops[i].id == hdr->ops.get)
-			k->get = bops[i].get;
-		if (k->info == NULL && bops[i].id == hdr->ops.info)
-			k->info = bops[i].info;
-	}
-
-	/* bespoke handlers found ? */
-	if (k->put && k->get && k->info)
-		return 0;
-
 	/* nothing to bind */
 	return -EINVAL;
 }
@@ -609,9 +649,7 @@
 	if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE))
 		return 0;
 
-	if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
-		kc->tlv.c = snd_soc_bytes_tlv_callback;
-	} else {
+	if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK)) {
 		tplg_tlv = &tc->tlv;
 		switch (tplg_tlv->type) {
 		case SNDRV_CTL_TLVT_DB_SCALE:
@@ -682,8 +720,7 @@
 		INIT_LIST_HEAD(&sbe->dobj.list);
 
 		/* map io handlers */
-		err = soc_tplg_kcontrol_bind_io(&be->hdr, &kc, io_ops,
-			ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&be->hdr, &kc, tplg);
 		if (err) {
 			soc_control_err(tplg, &be->hdr, be->hdr.name);
 			kfree(sbe);
@@ -777,8 +814,7 @@
 		INIT_LIST_HEAD(&sm->dobj.list);
 
 		/* map io handlers */
-		err = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc, io_ops,
-			ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc, tplg);
 		if (err) {
 			soc_control_err(tplg, &mc->hdr, mc->hdr.name);
 			kfree(sm);
@@ -855,12 +891,12 @@
 	if (ec->items > sizeof(*ec->values))
 		return -EINVAL;
 
-	se->dobj.control.dvalues =
-		kmalloc(ec->items * sizeof(u32), GFP_KERNEL);
+	se->dobj.control.dvalues = kmemdup(ec->values,
+					   ec->items * sizeof(u32),
+					   GFP_KERNEL);
 	if (!se->dobj.control.dvalues)
 		return -ENOMEM;
 
-	memcpy(se->dobj.control.dvalues, ec->values, ec->items * sizeof(u32));
 	return 0;
 }
 
@@ -950,8 +986,7 @@
 		}
 
 		/* map io handlers */
-		err = soc_tplg_kcontrol_bind_io(&ec->hdr, &kc, io_ops,
-			ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&ec->hdr, &kc, tplg);
 		if (err) {
 			soc_control_err(tplg, &ec->hdr, ec->hdr.name);
 			kfree(se);
@@ -1093,7 +1128,7 @@
 	struct snd_soc_tplg_mixer_control *mc;
 	int i, err;
 
-	kc = kzalloc(sizeof(*kc) * num_kcontrols, GFP_KERNEL);
+	kc = kcalloc(num_kcontrols, sizeof(*kc), GFP_KERNEL);
 	if (kc == NULL)
 		return NULL;
 
@@ -1137,8 +1172,7 @@
 		INIT_LIST_HEAD(&sm->dobj.list);
 
 		/* map io handlers */
-		err = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc[i], io_ops,
-			ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&mc->hdr, &kc[i], tplg);
 		if (err) {
 			soc_control_err(tplg, &mc->hdr, mc->hdr.name);
 			kfree(sm);
@@ -1235,8 +1269,7 @@
 	}
 
 	/* map io handlers */
-	err = soc_tplg_kcontrol_bind_io(&ec->hdr, kc, io_ops,
-		ARRAY_SIZE(io_ops), tplg->io_ops, tplg->io_ops_count);
+	err = soc_tplg_kcontrol_bind_io(&ec->hdr, kc, tplg);
 	if (err) {
 		soc_control_err(tplg, &ec->hdr, ec->hdr.name);
 		goto err_se;
@@ -1274,7 +1307,7 @@
 	struct snd_kcontrol_new *kc;
 	int i, err;
 
-	kc = kzalloc(sizeof(*kc) * count, GFP_KERNEL);
+	kc = kcalloc(count, sizeof(*kc), GFP_KERNEL);
 	if (!kc)
 		return NULL;
 
@@ -1297,7 +1330,6 @@
 			"ASoC: adding bytes kcontrol %s with access 0x%x\n",
 			be->hdr.name, be->hdr.access);
 
-		memset(kc, 0, sizeof(*kc));
 		kc[i].name = be->hdr.name;
 		kc[i].private_value = (long)sbe;
 		kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
@@ -1307,9 +1339,7 @@
 		INIT_LIST_HEAD(&sbe->dobj.list);
 
 		/* map standard io handlers and check for external handlers */
-		err = soc_tplg_kcontrol_bind_io(&be->hdr, &kc[i], io_ops,
-				ARRAY_SIZE(io_ops), tplg->io_ops,
-				tplg->io_ops_count);
+		err = soc_tplg_kcontrol_bind_io(&be->hdr, &kc[i], tplg);
 		if (err) {
 			soc_control_err(tplg, &be->hdr, be->hdr.name);
 			kfree(sbe);
@@ -1737,6 +1767,8 @@
 	tplg.req_index = id;
 	tplg.io_ops = ops->io_ops;
 	tplg.io_ops_count = ops->io_ops_count;
+	tplg.bytes_ext_ops = ops->bytes_ext_ops;
+	tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count;
 
 	return soc_tplg_load(&tplg);
 }
@@ -1758,7 +1790,6 @@
 	u32 index)
 {
 	struct snd_soc_dapm_widget *w, *next_w;
-	struct snd_soc_dapm_path *p, *next_p;
 
 	list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) {
 
@@ -1770,31 +1801,9 @@
 		if (w->dobj.index != index &&
 			w->dobj.index != SND_SOC_TPLG_INDEX_ALL)
 			continue;
-
-		list_del(&w->list);
-
-		/*
-		 * remove source and sink paths associated to this widget.
-		 * While removing the path, remove reference to it from both
-		 * source and sink widgets so that path is removed only once.
-		 */
-		list_for_each_entry_safe(p, next_p, &w->sources, list_sink) {
-			list_del(&p->list_sink);
-			list_del(&p->list_source);
-			list_del(&p->list);
-			kfree(p);
-		}
-		list_for_each_entry_safe(p, next_p, &w->sinks, list_source) {
-			list_del(&p->list_sink);
-			list_del(&p->list_source);
-			list_del(&p->list);
-			kfree(p);
-		}
 		/* check and free and dynamic widget kcontrols */
 		snd_soc_tplg_widget_remove(w);
-		kfree(w->kcontrols);
-		kfree(w->name);
-		kfree(w);
+		snd_soc_dapm_free_widget(w);
 	}
 }
 EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
diff --git a/sound/soc/spear/spdif_in.c b/sound/soc/spear/spdif_in.c
index a402860..977a078 100644
--- a/sound/soc/spear/spdif_in.c
+++ b/sound/soc/spear/spdif_in.c
@@ -203,35 +203,25 @@
 	struct spdif_in_dev *host;
 	struct spear_spdif_platform_data *pdata;
 	struct resource *res, *res_fifo;
+	void __iomem *io_base;
 	int ret;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -EINVAL;
+	io_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(io_base))
+		return PTR_ERR(io_base);
 
 	res_fifo = platform_get_resource(pdev, IORESOURCE_IO, 0);
 	if (!res_fifo)
 		return -EINVAL;
 
-	if (!devm_request_mem_region(&pdev->dev, res->start,
-				resource_size(res), pdev->name)) {
-		dev_warn(&pdev->dev, "Failed to get memory resourse\n");
-		return -ENOENT;
-	}
-
 	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
 	if (!host) {
 		dev_warn(&pdev->dev, "kzalloc fail\n");
 		return -ENOMEM;
 	}
 
-	host->io_base = devm_ioremap(&pdev->dev, res->start,
-				resource_size(res));
-	if (!host->io_base) {
-		dev_warn(&pdev->dev, "ioremap failed\n");
-		return -ENOMEM;
-	}
-
+	host->io_base = io_base;
 	host->irq = platform_get_irq(pdev, 0);
 	if (host->irq < 0)
 		return -EINVAL;
diff --git a/sound/soc/spear/spear_pcm.c b/sound/soc/spear/spear_pcm.c
index a7dc3c5..e8476da 100644
--- a/sound/soc/spear/spear_pcm.c
+++ b/sound/soc/spear/spear_pcm.c
@@ -44,7 +44,7 @@
 	*config = spear_dmaengine_pcm_config;
 	config->compat_filter_fn = filter;
 
-	return snd_dmaengine_pcm_register(dev, config,
+	return devm_snd_dmaengine_pcm_register(dev, config,
 		SND_DMAENGINE_PCM_FLAG_NO_DT |
 		SND_DMAENGINE_PCM_FLAG_COMPAT);
 }
diff --git a/sound/soc/sti/Kconfig b/sound/soc/sti/Kconfig
new file mode 100644
index 0000000..64a6900
--- /dev/null
+++ b/sound/soc/sti/Kconfig
@@ -0,0 +1,11 @@
+#
+# STM SoC audio configuration
+#
+menuconfig SND_SOC_STI
+	tristate "SoC Audio support for STI System-On-Chip"
+	depends on SND_SOC
+	depends on ARCH_STI || COMPILE_TEST
+	select SND_SOC_GENERIC_DMAENGINE_PCM
+	help
+		Say Y if you want to enable ASoC-support for
+		any of the STI platforms (e.g. STIH416).
diff --git a/sound/soc/sti/Makefile b/sound/soc/sti/Makefile
new file mode 100644
index 0000000..4b188d2
--- /dev/null
+++ b/sound/soc/sti/Makefile
@@ -0,0 +1,4 @@
+# STI platform support
+snd-soc-sti-objs := sti_uniperif.o uniperif_player.o uniperif_reader.o
+
+obj-$(CONFIG_SND_SOC_STI) += snd-soc-sti.o
diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c
new file mode 100644
index 0000000..39bcefe
--- /dev/null
+++ b/sound/soc/sti/sti_uniperif.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+
+#include "uniperif.h"
+
+/*
+ * sti_uniperiph_dai_create_ctrl
+ * This function is used to create Ctrl associated to DAI but also pcm device.
+ * Request is done by front end to associate ctrl with pcm device id
+ */
+static int sti_uniperiph_dai_create_ctrl(struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *uni = priv->dai_data.uni;
+	struct snd_kcontrol_new *ctrl;
+	int i;
+
+	if (!uni->num_ctrls)
+		return 0;
+
+	for (i = 0; i < uni->num_ctrls; i++) {
+		/*
+		 * Several Control can have same name. Controls are indexed on
+		 * Uniperipheral instance ID
+		 */
+		ctrl = &uni->snd_ctrls[i];
+		ctrl->index = uni->info->id;
+		ctrl->device = uni->info->id;
+	}
+
+	return snd_soc_add_dai_controls(dai, uni->snd_ctrls, uni->num_ctrls);
+}
+
+/*
+ * DAI
+ */
+int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct snd_dmaengine_dai_dma_data *dma_data;
+	int transfer_size;
+
+	transfer_size = params_channels(params) * UNIPERIF_FIFO_FRAMES;
+
+	dma_data = snd_soc_dai_get_dma_data(dai, substream);
+	dma_data->maxburst = transfer_size;
+
+	return 0;
+}
+
+int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+
+	priv->dai_data.uni->daifmt = fmt;
+
+	return 0;
+}
+
+static int sti_uniperiph_dai_suspend(struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *uni = priv->dai_data.uni;
+	int ret;
+
+	/* The uniperipheral should be in stopped state */
+	if (uni->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(uni->dev, "%s: invalid uni state( %d)",
+			__func__, (int)uni->state);
+		return -EBUSY;
+	}
+
+	/* Pinctrl: switch pinstate to sleep */
+	ret = pinctrl_pm_select_sleep_state(uni->dev);
+	if (ret)
+		dev_err(uni->dev, "%s: failed to select pinctrl state",
+			__func__);
+
+	return ret;
+}
+
+static int sti_uniperiph_dai_resume(struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *uni = priv->dai_data.uni;
+	int ret;
+
+	if (of_device_is_compatible(dai->dev->of_node, "st,sti-uni-player")) {
+		ret = uni_player_resume(uni);
+		if (ret)
+			return ret;
+	}
+
+	/* pinctrl: switch pinstate to default */
+	ret = pinctrl_pm_select_default_state(uni->dev);
+	if (ret)
+		dev_err(uni->dev, "%s: failed to select pinctrl state",
+			__func__);
+
+	return ret;
+}
+
+static int sti_uniperiph_dai_probe(struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct sti_uniperiph_dai *dai_data = &priv->dai_data;
+
+	/* DMA settings*/
+	if (of_device_is_compatible(dai->dev->of_node, "st,sti-uni-player"))
+		snd_soc_dai_init_dma_data(dai, &dai_data->dma_data, NULL);
+	else
+		snd_soc_dai_init_dma_data(dai, NULL, &dai_data->dma_data);
+
+	dai_data->dma_data.addr = dai_data->uni->fifo_phys_address;
+	dai_data->dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+	return sti_uniperiph_dai_create_ctrl(dai);
+}
+
+static const struct snd_soc_dai_driver sti_uniperiph_dai_template = {
+	.probe = sti_uniperiph_dai_probe,
+	.suspend = sti_uniperiph_dai_suspend,
+	.resume = sti_uniperiph_dai_resume
+};
+
+static const struct snd_soc_component_driver sti_uniperiph_dai_component = {
+	.name = "sti_cpu_dai",
+};
+
+static int sti_uniperiph_cpu_dai_of(struct device_node *node,
+				    struct sti_uniperiph_data *priv)
+{
+	const char *str;
+	int ret;
+	struct device *dev = &priv->pdev->dev;
+	struct sti_uniperiph_dai *dai_data = &priv->dai_data;
+	struct snd_soc_dai_driver *dai = priv->dai;
+	struct snd_soc_pcm_stream *stream;
+	struct uniperif *uni;
+
+	uni = devm_kzalloc(dev, sizeof(*uni), GFP_KERNEL);
+	if (!uni)
+		return -ENOMEM;
+
+	*dai = sti_uniperiph_dai_template;
+	ret = of_property_read_string(node, "dai-name", &str);
+	if (ret < 0) {
+		dev_err(dev, "%s: dai name missing.\n", __func__);
+		return -EINVAL;
+	}
+	dai->name = str;
+
+	/* Get resources */
+	uni->mem_region = platform_get_resource(priv->pdev, IORESOURCE_MEM, 0);
+
+	if (!uni->mem_region) {
+		dev_err(dev, "Failed to get memory resource");
+		return -ENODEV;
+	}
+
+	uni->base = devm_ioremap_resource(dev, uni->mem_region);
+
+	if (IS_ERR(uni->base))
+		return PTR_ERR(uni->base);
+
+	uni->fifo_phys_address = uni->mem_region->start +
+				     UNIPERIF_FIFO_DATA_OFFSET(uni);
+
+	uni->irq = platform_get_irq(priv->pdev, 0);
+	if (uni->irq < 0) {
+		dev_err(dev, "Failed to get IRQ resource");
+		return -ENXIO;
+	}
+
+	dai_data->uni = uni;
+
+	if (of_device_is_compatible(node, "st,sti-uni-player")) {
+		uni_player_init(priv->pdev, uni);
+		stream = &dai->playback;
+	} else {
+		uni_reader_init(priv->pdev, uni);
+		stream = &dai->capture;
+	}
+	dai->ops = uni->dai_ops;
+
+	stream->stream_name = dai->name;
+	stream->channels_min = uni->hw->channels_min;
+	stream->channels_max = uni->hw->channels_max;
+	stream->rates = uni->hw->rates;
+	stream->formats = uni->hw->formats;
+
+	return 0;
+}
+
+static const struct snd_dmaengine_pcm_config dmaengine_pcm_config = {
+	.prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
+};
+
+static int sti_uniperiph_probe(struct platform_device *pdev)
+{
+	struct sti_uniperiph_data *priv;
+	struct device_node *node = pdev->dev.of_node;
+	int ret;
+
+	/* Allocate the private data and the CPU_DAI array */
+	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	priv->dai = devm_kzalloc(&pdev->dev, sizeof(*priv->dai), GFP_KERNEL);
+	if (!priv->dai)
+		return -ENOMEM;
+
+	priv->pdev = pdev;
+
+	ret = sti_uniperiph_cpu_dai_of(node, priv);
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	ret = devm_snd_soc_register_component(&pdev->dev,
+					      &sti_uniperiph_dai_component,
+					      priv->dai, 1);
+	if (ret < 0)
+		return ret;
+
+	return devm_snd_dmaengine_pcm_register(&pdev->dev,
+					       &dmaengine_pcm_config, 0);
+}
+
+static const struct of_device_id snd_soc_sti_match[] = {
+	{ .compatible = "st,sti-uni-player", },
+	{ .compatible = "st,sti-uni-reader", },
+	{},
+};
+
+static struct platform_driver sti_uniperiph_driver = {
+	.driver = {
+		.name = "sti-uniperiph-dai",
+		.of_match_table = snd_soc_sti_match,
+	},
+	.probe = sti_uniperiph_probe,
+};
+module_platform_driver(sti_uniperiph_driver);
+
+MODULE_DESCRIPTION("uniperipheral DAI driver");
+MODULE_AUTHOR("Arnaud Pouliquen <arnaud.pouliquen@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/sti/uniperif.h b/sound/soc/sti/uniperif.h
new file mode 100644
index 0000000..f0fd5a9
--- /dev/null
+++ b/sound/soc/sti/uniperif.h
@@ -0,0 +1,1229 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#ifndef __SND_ST_AUD_UNIPERIF_H
+#define __SND_ST_AUD_UNIPERIF_H
+
+#include <linux/regmap.h>
+
+#include <sound/dmaengine_pcm.h>
+
+/*
+ * Register access macros
+ */
+
+#define GET_UNIPERIF_REG(ip, offset, shift, mask) \
+	((readl_relaxed(ip->base + offset) >> shift) & mask)
+#define SET_UNIPERIF_REG(ip, offset, shift, mask, value) \
+	writel_relaxed(((readl_relaxed(ip->base + offset) & \
+	~(mask << shift)) | (((value) & mask) << shift)), ip->base + offset)
+#define SET_UNIPERIF_BIT_REG(ip, offset, shift, mask, value) \
+	writel_relaxed((((value) & mask) << shift), ip->base + offset)
+
+/*
+ * AUD_UNIPERIF_SOFT_RST reg
+ */
+
+#define UNIPERIF_SOFT_RST_OFFSET(ip) 0x0000
+#define GET_UNIPERIF_SOFT_RST(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		readl_relaxed(ip->base + UNIPERIF_SOFT_RST_OFFSET(ip)) : 0)
+#define SET_UNIPERIF_SOFT_RST(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_SOFT_RST_OFFSET(ip))
+
+/* SOFT_RST */
+#define UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip) 0x0
+#define UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip) 0x1
+#define SET_UNIPERIF_SOFT_RST_SOFT_RST(ip) \
+	SET_UNIPERIF_BIT_REG(ip, \
+		UNIPERIF_SOFT_RST_OFFSET(ip), \
+		UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip), \
+		UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip), 1)
+#define GET_UNIPERIF_SOFT_RST_SOFT_RST(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_SOFT_RST_OFFSET(ip), \
+		UNIPERIF_SOFT_RST_SOFT_RST_SHIFT(ip), \
+		UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip))
+
+/*
+ * AUD_UNIPERIF_FIFO_DATA reg
+ */
+
+#define UNIPERIF_FIFO_DATA_OFFSET(ip) 0x0004
+#define SET_UNIPERIF_DATA(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_FIFO_DATA_OFFSET(ip))
+
+/*
+ * AUD_UNIPERIF_CHANNEL_STA_REGN reg
+ */
+
+#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
+#define GET_UNIPERIF_CHANNEL_STA_REGN(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REGN(ip, n))
+#define SET_UNIPERIF_CHANNEL_STA_REGN(ip, n, value) \
+	writel_relaxed(value, ip->base + \
+			UNIPERIF_CHANNEL_STA_REGN(ip, n))
+
+#define UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip) 0x0060
+#define GET_UNIPERIF_CHANNEL_STA_REG0(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG0(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG0_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip) 0x0064
+#define GET_UNIPERIF_CHANNEL_STA_REG1(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG1(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG1_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip) 0x0068
+#define GET_UNIPERIF_CHANNEL_STA_REG2(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG2(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG2_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip) 0x006C
+#define GET_UNIPERIF_CHANNEL_STA_REG3(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG3(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG3_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip) 0x0070
+#define GET_UNIPERIF_CHANNEL_STA_REG4(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG4(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG4_OFFSET(ip))
+
+#define UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip) 0x0074
+#define GET_UNIPERIF_CHANNEL_STA_REG5(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip))
+#define SET_UNIPERIF_CHANNEL_STA_REG5(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip))
+
+/*
+ *  AUD_UNIPERIF_ITS reg
+ */
+
+#define UNIPERIF_ITS_OFFSET(ip) 0x000C
+#define GET_UNIPERIF_ITS(ip) \
+	readl_relaxed(ip->base + UNIPERIF_ITS_OFFSET(ip))
+
+/* MEM_BLK_READ */
+#define UNIPERIF_ITS_MEM_BLK_READ_SHIFT(ip) 5
+#define UNIPERIF_ITS_MEM_BLK_READ_MASK(ip) \
+	(BIT(UNIPERIF_ITS_MEM_BLK_READ_SHIFT(ip)))
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITS_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITS_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITS_FIFO_ERROR_SHIFT(ip)))
+
+/* DMA_ERROR */
+#define UNIPERIF_ITS_DMA_ERROR_SHIFT(ip) 9
+#define UNIPERIF_ITS_DMA_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITS_DMA_ERROR_SHIFT(ip)))
+
+/* UNDERFLOW_REC_DONE */
+#define UNIPERIF_ITS_UNDERFLOW_REC_DONE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
+#define UNIPERIF_ITS_UNDERFLOW_REC_DONE_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_DONE_SHIFT(ip))))
+
+/* UNDERFLOW_REC_FAILED */
+#define UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
+#define UNIPERIF_ITS_UNDERFLOW_REC_FAILED_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip))))
+
+/*
+ *  AUD_UNIPERIF_ITS_BCLR reg
+ */
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITS_BCLR_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITS_BCLR_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITS_BCLR_FIFO_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITS_BCLR_FIFO_ERROR(ip) \
+	SET_UNIPERIF_ITS_BCLR(ip, \
+		UNIPERIF_ITS_BCLR_FIFO_ERROR_MASK(ip))
+
+#define UNIPERIF_ITS_BCLR_OFFSET(ip) 0x0010
+#define SET_UNIPERIF_ITS_BCLR(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_ITS_BCLR_OFFSET(ip))
+
+/*
+ *  AUD_UNIPERIF_ITM reg
+ */
+
+#define UNIPERIF_ITM_OFFSET(ip) 0x0018
+#define GET_UNIPERIF_ITM(ip) \
+	readl_relaxed(ip->base + UNIPERIF_ITM_OFFSET(ip))
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITM_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITM_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_FIFO_ERROR_SHIFT(ip)))
+
+/* UNDERFLOW_REC_DONE */
+#define UNIPERIF_ITM_UNDERFLOW_REC_DONE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
+#define UNIPERIF_ITM_UNDERFLOW_REC_DONE_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_DONE_SHIFT(ip))))
+
+/* UNDERFLOW_REC_FAILED */
+#define UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
+#define UNIPERIF_ITM_UNDERFLOW_REC_FAILED_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip))))
+
+/*
+ *  AUD_UNIPERIF_ITM_BCLR reg
+ */
+
+#define UNIPERIF_ITM_BCLR_OFFSET(ip) 0x001c
+#define SET_UNIPERIF_ITM_BCLR(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_ITM_BCLR_OFFSET(ip))
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITM_BCLR_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITM_BCLR_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BCLR_FIFO_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(ip) \
+	SET_UNIPERIF_ITM_BCLR(ip, \
+		UNIPERIF_ITM_BCLR_FIFO_ERROR_MASK(ip))
+
+/* DMA_ERROR */
+#define UNIPERIF_ITM_BCLR_DMA_ERROR_SHIFT(ip) 9
+#define UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BCLR_DMA_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BCLR_DMA_ERROR(ip) \
+	SET_UNIPERIF_ITM_BCLR(ip, \
+		UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip))
+
+/*
+ *  AUD_UNIPERIF_ITM_BSET reg
+ */
+
+#define UNIPERIF_ITM_BSET_OFFSET(ip) 0x0020
+#define SET_UNIPERIF_ITM_BSET(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_ITM_BSET_OFFSET(ip))
+
+/* FIFO_ERROR */
+#define UNIPERIF_ITM_BSET_FIFO_ERROR_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 8)
+#define UNIPERIF_ITM_BSET_FIFO_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BSET_FIFO_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BSET_FIFO_ERROR(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_FIFO_ERROR_MASK(ip))
+
+/* MEM_BLK_READ */
+#define UNIPERIF_ITM_BSET_MEM_BLK_READ_SHIFT(ip) 5
+#define UNIPERIF_ITM_BSET_MEM_BLK_READ_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BSET_MEM_BLK_READ_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BSET_MEM_BLK_READ(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_MEM_BLK_READ_MASK(ip))
+
+/* DMA_ERROR */
+#define UNIPERIF_ITM_BSET_DMA_ERROR_SHIFT(ip) 9
+#define UNIPERIF_ITM_BSET_DMA_ERROR_MASK(ip) \
+	(BIT(UNIPERIF_ITM_BSET_DMA_ERROR_SHIFT(ip)))
+#define SET_UNIPERIF_ITM_BSET_DMA_ERROR(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_DMA_ERROR_MASK(ip))
+
+/* UNDERFLOW_REC_DONE */
+#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 12)
+#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_SHIFT(ip))))
+#define SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE_MASK(ip))
+
+/* UNDERFLOW_REC_FAILED */
+#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 13)
+#define UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? \
+		0 : (BIT(UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_SHIFT(ip))))
+#define SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(ip) \
+	SET_UNIPERIF_ITM_BSET(ip, \
+		UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED_MASK(ip))
+
+/*
+ * UNIPERIF_CONFIG reg
+ */
+
+#define UNIPERIF_CONFIG_OFFSET(ip) 0x0040
+#define GET_UNIPERIF_CONFIG(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CONFIG_OFFSET(ip))
+#define SET_UNIPERIF_CONFIG(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CONFIG_OFFSET(ip))
+
+/* PARITY_CNTR */
+#define UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip) 0
+#define UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_PARITY_CNTR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip))
+#define SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_PARITY_CNTR_MASK(ip), 1)
+
+/* CHANNEL_STA_CNTR */
+#define UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip) 1
+#define UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip))
+#define SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip),    \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHANNEL_STA_CNTR_MASK(ip), 1)
+
+/* USER_DAT_CNTR */
+#define UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip) 2
+#define UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_USER_DAT_CNTR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip))
+#define SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip), 1)
+#define SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_USER_DAT_CNTR_MASK(ip), 0)
+
+/* VALIDITY_DAT_CNTR */
+#define UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip) 3
+#define UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip))
+#define SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_SHIFT(ip), \
+		UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_MASK(ip), 1)
+
+/* ONE_BIT_AUD_SUPPORT */
+#define UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip) 4
+#define UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_ONE_BIT_AUD(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip))
+#define SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_ONE_BIT_AUD_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_SHIFT(ip), \
+		UNIPERIF_CONFIG_ONE_BIT_AUD_MASK(ip), 1)
+
+/* MEMORY_FMT */
+#define UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip) 5
+#define UNIPERIF_CONFIG_MEM_FMT_MASK(ip) 0x1
+#define VALUE_UNIPERIF_CONFIG_MEM_FMT_16_0(ip) 0
+#define VALUE_UNIPERIF_CONFIG_MEM_FMT_16_16(ip) 1
+#define GET_UNIPERIF_CONFIG_MEM_FMT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip), \
+		UNIPERIF_CONFIG_MEM_FMT_MASK(ip))
+#define SET_UNIPERIF_CONFIG_MEM_FMT(ip, value)	\
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MEM_FMT_SHIFT(ip), \
+		UNIPERIF_CONFIG_MEM_FMT_MASK(ip), value)
+#define SET_UNIPERIF_CONFIG_MEM_FMT_16_0(ip)   \
+	SET_UNIPERIF_CONFIG_MEM_FMT(ip, \
+		VALUE_UNIPERIF_CONFIG_MEM_FMT_16_0(ip))
+#define SET_UNIPERIF_CONFIG_MEM_FMT_16_16(ip) \
+	SET_UNIPERIF_CONFIG_MEM_FMT(ip, \
+		VALUE_UNIPERIF_CONFIG_MEM_FMT_16_16(ip))
+
+/* REPEAT_CHL_STS */
+#define UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip) 6
+#define UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_REPEAT_CHL_STS(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip))
+#define SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_SHIFT(ip), \
+		UNIPERIF_CONFIG_REPEAT_CHL_STS_MASK(ip), 1)
+
+/* BACK_STALL_REQ */
+#define UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 7 : -1)
+#define UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_BACK_STALL_REQ(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip))
+#define SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_BACK_STALL_REQ_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_SHIFT(ip), \
+		UNIPERIF_CONFIG_BACK_STALL_REQ_MASK(ip), 1)
+
+/* FDMA_TRIGGER_LIMIT */
+#define UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip) 8
+#define UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip) 0x7F
+#define GET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip), \
+		UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip))
+#define SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_DMA_TRIG_LIMIT_SHIFT(ip), \
+		UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(ip), value)
+
+/* CHL_STS_UPDATE */
+#define UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 16 : -1)
+#define UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_CHL_STS_UPDATE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip),  \
+		UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip))
+#define SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_CHL_STS_UPDATE_SHIFT(ip), \
+		UNIPERIF_CONFIG_CHL_STS_UPDATE_MASK(ip), 1)
+
+/* IDLE_MOD */
+#define UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip) 18
+#define UNIPERIF_CONFIG_IDLE_MOD_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_IDLE_MOD(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_MASK(ip))
+#define SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_MASK(ip), 0)
+#define SET_UNIPERIF_CONFIG_IDLE_MOD_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_SHIFT(ip), \
+		UNIPERIF_CONFIG_IDLE_MOD_MASK(ip), 1)
+
+/* SUBFRAME_SELECTION */
+#define UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip) 19
+#define UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_SUBFRAME_SEL(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip))
+#define SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF1_SUBF0(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip), 1)
+#define SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF0_SUBF1(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SUBFRAME_SEL_MASK(ip), 0)
+
+/* FULL_SW_CONTROL */
+#define UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip) 20
+#define UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_SPDIF_SW_CTRL(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip))
+#define SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_ENABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip), 1)
+#define SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_DISABLE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_SHIFT(ip), \
+		UNIPERIF_CONFIG_SPDIF_SW_CTRL_MASK(ip), 0)
+
+/* MASTER_CLKEDGE */
+#define UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 24 : -1)
+#define UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip) 0x1
+#define GET_UNIPERIF_CONFIG_MSTR_CLKEDGE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip))
+#define SET_UNIPERIF_CONFIG_MSTR_CLKEDGE_FALLING(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip), 1)
+#define SET_UNIPERIF_CONFIG_MSTR_CLKEDGE_RISING(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CONFIG_OFFSET(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_SHIFT(ip), \
+		UNIPERIF_CONFIG_MSTR_CLKEDGE_MASK(ip), 0)
+
+/*
+ * UNIPERIF_CTRL reg
+ */
+
+#define UNIPERIF_CTRL_OFFSET(ip) 0x0044
+#define GET_UNIPERIF_CTRL(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CTRL_OFFSET(ip))
+#define SET_UNIPERIF_CTRL(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_CTRL_OFFSET(ip))
+
+/* OPERATION */
+#define UNIPERIF_CTRL_OPERATION_SHIFT(ip) 0
+#define UNIPERIF_CTRL_OPERATION_MASK(ip) 0x7
+#define GET_UNIPERIF_CTRL_OPERATION(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_OFF(ip) 0
+#define SET_UNIPERIF_CTRL_OPERATION_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_OFF(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 1 : -1)
+#define SET_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PCM_NULL(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 2 : -1)
+#define SET_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_MUTE_PAUSE_BURST(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip) 3
+#define SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_PCM_DATA(ip))
+/* This is the same as above! */
+#define VALUE_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip) 3
+#define SET_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_AUDIO_DATA(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip) 4
+#define SET_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_ENC_DATA(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_CD_DATA(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 5 : -1)
+#define SET_UNIPERIF_CTRL_OPERATION_CD_DATA(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_CD_DATA(ip))
+#define VALUE_UNIPERIF_CTRL_OPERATION_STANDBY(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 7)
+#define SET_UNIPERIF_CTRL_OPERATION_STANDBY(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_OPERATION_SHIFT(ip), \
+		UNIPERIF_CTRL_OPERATION_MASK(ip), \
+		VALUE_UNIPERIF_CTRL_OPERATION_STANDBY(ip))
+
+/* EXIT_STBY_ON_EOBLOCK */
+#define UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 3)
+#define UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip))
+#define SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip), 0)
+#define SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_SHIFT(ip), \
+		UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_MASK(ip), 1)
+
+/* ROUNDING */
+#define UNIPERIF_CTRL_ROUNDING_SHIFT(ip) 4
+#define UNIPERIF_CTRL_ROUNDING_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_ROUNDING(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
+		UNIPERIF_CTRL_ROUNDING_MASK(ip))
+#define SET_UNIPERIF_CTRL_ROUNDING_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
+		UNIPERIF_CTRL_ROUNDING_MASK(ip), 0)
+#define SET_UNIPERIF_CTRL_ROUNDING_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ROUNDING_SHIFT(ip), \
+		UNIPERIF_CTRL_ROUNDING_MASK(ip), 1)
+
+/* DIVIDER */
+#define UNIPERIF_CTRL_DIVIDER_SHIFT(ip) 5
+#define UNIPERIF_CTRL_DIVIDER_MASK(ip) 0xff
+#define GET_UNIPERIF_CTRL_DIVIDER(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_DIVIDER_SHIFT(ip), \
+		UNIPERIF_CTRL_DIVIDER_MASK(ip))
+#define SET_UNIPERIF_CTRL_DIVIDER(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_DIVIDER_SHIFT(ip), \
+		UNIPERIF_CTRL_DIVIDER_MASK(ip), value)
+
+/* BYTE_SWAP */
+#define UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 13 : -1)
+#define UNIPERIF_CTRL_BYTE_SWP_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_BYTE_SWP(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_MASK(ip))
+#define SET_UNIPERIF_CTRL_BYTE_SWP_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_MASK(ip), 0)
+#define SET_UNIPERIF_CTRL_BYTE_SWP_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_SHIFT(ip), \
+		UNIPERIF_CTRL_BYTE_SWP_MASK(ip), 1)
+
+/* ZERO_STUFFING_HW_SW */
+#define UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 14 : -1)
+#define UNIPERIF_CTRL_ZERO_STUFF_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_ZERO_STUFF(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_MASK(ip))
+#define SET_UNIPERIF_CTRL_ZERO_STUFF_HW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_MASK(ip), 1)
+#define SET_UNIPERIF_CTRL_ZERO_STUFF_SW(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_SHIFT(ip), \
+		UNIPERIF_CTRL_ZERO_STUFF_MASK(ip), 0)
+
+/* SPDIF_LAT */
+#define UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 16 : -1)
+#define UNIPERIF_CTRL_SPDIF_LAT_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_SPDIF_LAT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_MASK(ip))
+#define SET_UNIPERIF_CTRL_SPDIF_LAT_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_MASK(ip), 1)
+#define SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_LAT_MASK(ip), 0)
+
+/* EN_SPDIF_FORMATTING */
+#define UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip) 17
+#define UNIPERIF_CTRL_SPDIF_FMT_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_SPDIF_FMT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_MASK(ip))
+#define SET_UNIPERIF_CTRL_SPDIF_FMT_ON(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_MASK(ip), 1)
+#define SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_SHIFT(ip), \
+		UNIPERIF_CTRL_SPDIF_FMT_MASK(ip), 0)
+
+/* READER_OUT_SELECT */
+#define UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 18 : -1)
+#define UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip) 0x1
+#define GET_UNIPERIF_CTRL_READER_OUT_SEL(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip))
+#define SET_UNIPERIF_CTRL_READER_OUT_SEL_IN_MEM(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 0)
+#define SET_UNIPERIF_CTRL_READER_OUT_SEL_ON_I2S_LINE(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
+		CORAUD_UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 1)
+
+/* UNDERFLOW_REC_WINDOW */
+#define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip) 20
+#define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip) 0xff
+#define GET_UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip), \
+		UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip))
+#define SET_UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_CTRL_OFFSET(ip), \
+		UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip), \
+		UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_MASK(ip), value)
+
+/*
+ * UNIPERIF_I2S_FMT a.k.a UNIPERIF_FORMAT reg
+ */
+
+#define UNIPERIF_I2S_FMT_OFFSET(ip) 0x0048
+#define GET_UNIPERIF_I2S_FMT(ip) \
+	readl_relaxed(ip->base + UNIPERIF_I2S_FMT_OFFSET(ip))
+#define SET_UNIPERIF_I2S_FMT(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_I2S_FMT_OFFSET(ip))
+
+/* NBIT */
+#define UNIPERIF_I2S_FMT_NBIT_SHIFT(ip) 0
+#define UNIPERIF_I2S_FMT_NBIT_MASK(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_NBIT(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NBIT_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_NBIT_32(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NBIT_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_NBIT_16(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NBIT_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NBIT_MASK(ip), 1)
+
+/* DATA_SIZE */
+#define UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip) 1
+#define UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip) 0x7
+#define GET_UNIPERIF_I2S_FMT_DATA_SIZE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_18(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 1)
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_20(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 2)
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_24(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 3)
+#define SET_UNIPERIF_I2S_FMTL_DATA_SIZE_28(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 4)
+#define SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_DATA_SIZE_MASK(ip), 5)
+
+/* LR_POL */
+#define UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip) 4
+#define UNIPERIF_I2S_FMT_LR_POL_MASK(ip) 0x1
+#define VALUE_UNIPERIF_I2S_FMT_LR_POL_LOW(ip) 0x0
+#define VALUE_UNIPERIF_I2S_FMT_LR_POL_HIG(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_LR_POL(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_LR_POL_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_LR_POL(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_LR_POL_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_LR_POL_MASK(ip), value)
+#define SET_UNIPERIF_I2S_FMT_LR_POL_LOW(ip) \
+	SET_UNIPERIF_I2S_FMT_LR_POL(ip, \
+		VALUE_UNIPERIF_I2S_FMT_LR_POL_LOW(ip))
+#define SET_UNIPERIF_I2S_FMT_LR_POL_HIG(ip) \
+	SET_UNIPERIF_I2S_FMT_LR_POL(ip, \
+		VALUE_UNIPERIF_I2S_FMT_LR_POL_HIG(ip))
+
+/* SCLK_EDGE */
+#define UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip) 5
+#define UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_SCLK_EDGE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_SCLK_EDGE_MASK(ip), 1)
+
+/* PADDING */
+#define UNIPERIF_I2S_FMT_PADDING_SHIFT(ip) 6
+#define UNIPERIF_I2S_FMT_PADDING_MASK(ip) 0x1
+#define UNIPERIF_I2S_FMT_PADDING_MASK(ip) 0x1
+#define VALUE_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip) 0x0
+#define VALUE_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_PADDING(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_PADDING_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_PADDING_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_PADDING(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_PADDING_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_PADDING_MASK(ip), value)
+#define SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip) \
+	SET_UNIPERIF_I2S_FMT_PADDING(ip, \
+		VALUE_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(ip))
+#define SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip) \
+	SET_UNIPERIF_I2S_FMT_PADDING(ip, \
+		VALUE_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(ip))
+
+/* ALIGN */
+#define UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip) 7
+#define UNIPERIF_I2S_FMT_ALIGN_MASK(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_ALIGN(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ALIGN_MASK(ip), 1)
+
+/* ORDER */
+#define UNIPERIF_I2S_FMT_ORDER_SHIFT(ip) 8
+#define UNIPERIF_I2S_FMT_ORDER_MASK(ip) 0x1
+#define GET_UNIPERIF_I2S_FMT_ORDER(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ORDER_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_ORDER_LSB(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ORDER_MASK(ip), 0)
+#define SET_UNIPERIF_I2S_FMT_ORDER_MSB(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_ORDER_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_ORDER_MASK(ip), 1)
+
+/* NUM_CH */
+#define UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip) 9
+#define UNIPERIF_I2S_FMT_NUM_CH_MASK(ip) 0x7
+#define GET_UNIPERIF_I2S_FMT_NUM_CH(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NUM_CH_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_NUM_CH(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NUM_CH_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NUM_CH_MASK(ip), value)
+
+/* NO_OF_SAMPLES_TO_READ */
+#define UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip) 12
+#define UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip) 0xfffff
+#define GET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip))
+#define SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_I2S_FMT_OFFSET(ip), \
+		UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_SHIFT(ip), \
+		UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ_MASK(ip), value)
+
+/*
+ * UNIPERIF_BIT_CONTROL reg
+ */
+
+#define UNIPERIF_BIT_CONTROL_OFFSET(ip)  \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0x004c)
+#define GET_UNIPERIF_BIT_CONTROL(ip) \
+	readl_relaxed(ip->base + UNIPERIF_BIT_CONTROL_OFFSET(ip))
+#define SET_UNIPERIF_BIT_CONTROL(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_BIT_CONTROL_OFFSET(ip))
+
+/* CLR_UNDERFLOW_DURATION */
+#define UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip) 0
+#define UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip) 0x1
+#define GET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_BIT_CONTROL_OFFSET(ip), \
+		UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip), \
+		UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip))
+#define SET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(ip) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_BIT_CONTROL_OFFSET(ip), \
+		UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_SHIFT(ip), \
+		UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION_MASK(ip), 1)
+
+/* CHL_STS_UPDATE */
+#define UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip) 1
+#define UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip) 0x1
+#define GET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_BIT_CONTROL_OFFSET(ip), \
+		UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip), \
+		UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip))
+#define SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(ip) \
+	SET_UNIPERIF_BIT_REG(ip, \
+		UNIPERIF_BIT_CONTROL_OFFSET(ip), \
+		UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_SHIFT(ip), \
+		UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE_MASK(ip), 1)
+
+/*
+ * UNIPERIF_STATUS_1 reg
+ */
+
+#define UNIPERIF_STATUS_1_OFFSET(ip) 0x0050
+#define GET_UNIPERIF_STATUS_1(ip) \
+	readl_relaxed(ip->base + UNIPERIF_STATUS_1_OFFSET(ip))
+#define SET_UNIPERIF_STATUS_1(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_STATUS_1_OFFSET(ip))
+
+/* UNDERFLOW_DURATION */
+#define UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0)
+#define UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip) 0xff
+#define GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_STATUS_1_OFFSET(ip), \
+		UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip), \
+		UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip))
+#define SET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_STATUS_1_OFFSET(ip), \
+		UNIPERIF_STATUS_1_UNDERFLOW_DURATION_SHIFT(ip), \
+		UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip), value)
+
+/*
+ * AUD_UNIPERIF_CHANNEL_STA_REGN reg
+ */
+
+#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
+#define GET_UNIPERIF_CHANNEL_STA_REGN(ip) \
+	readl_relaxed(ip->base + UNIPERIF_CHANNEL_STA_REGN(ip, n))
+#define SET_UNIPERIF_CHANNEL_STA_REGN(ip, n, value) \
+	writel_relaxed(value, ip->base + \
+			UNIPERIF_CHANNEL_STA_REGN(ip, n))
+
+/*
+ * AUD_UNIPERIF_USER_VALIDITY reg
+ */
+
+#define UNIPERIF_USER_VALIDITY_OFFSET(ip) 0x0090
+#define GET_UNIPERIF_USER_VALIDITY(ip) \
+	readl_relaxed(ip->base + UNIPERIF_USER_VALIDITY_OFFSET(ip))
+#define SET_UNIPERIF_USER_VALIDITY(ip, value) \
+	writel_relaxed(value, ip->base + UNIPERIF_USER_VALIDITY_OFFSET(ip))
+
+/* VALIDITY_LEFT_AND_RIGHT */
+#define UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip) 0
+#define UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip) 0x3
+#define GET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_USER_VALIDITY_OFFSET(ip), \
+		UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip), \
+		UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip))
+#define SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_USER_VALIDITY_OFFSET(ip), \
+		UNIPERIF_USER_VALIDITY_VALIDITY_LR_SHIFT(ip), \
+		UNIPERIF_USER_VALIDITY_VALIDITY_LR_MASK(ip), \
+		value ? 0x3 : 0)
+
+/*
+ * UNIPERIF_DBG_STANDBY_LEFT_SP reg
+ */
+#define UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip) 0x0150
+#define UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? -1 : 0)
+#define UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip) \
+	((ip)->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0 ? 0 : 0xFFFFFF)
+#define GET_UNIPERIF_DBG_STANDBY_LEFT_SP(ip) \
+	GET_UNIPERIF_REG(ip, \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip), \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip), \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip))
+#define SET_UNIPERIF_DBG_STANDBY_LEFT_SP(ip, value) \
+	SET_UNIPERIF_REG(ip, \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_OFFSET(ip), \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_SHIFT(ip), \
+		UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip), value)
+
+/*
+ * uniperipheral IP capabilities
+ */
+
+#define UNIPERIF_FIFO_SIZE		70 /* FIFO is 70 cells deep */
+#define UNIPERIF_FIFO_FRAMES		4  /* FDMA trigger limit in frames */
+
+/*
+ * Uniperipheral IP revisions
+ */
+enum uniperif_version {
+	SND_ST_UNIPERIF_VERSION_UNKNOWN,
+	/* SASG1 (Orly), Newman */
+	SND_ST_UNIPERIF_VERSION_C6AUD0_UNI_1_0,
+	/* SASC1, SASG2 (Orly2) */
+	SND_ST_UNIPERIF_VERSION_UNI_PLR_1_0,
+	/* SASC1, SASG2 (Orly2), TELSS, Cannes */
+	SND_ST_UNIPERIF_VERSION_UNI_RDR_1_0,
+	/* TELSS (SASC1) */
+	SND_ST_UNIPERIF_VERSION_TDM_PLR_1_0,
+	/* Cannes/Monaco */
+	SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
+};
+
+enum uniperif_type {
+	SND_ST_UNIPERIF_PLAYER_TYPE_NONE,
+	SND_ST_UNIPERIF_PLAYER_TYPE_HDMI,
+	SND_ST_UNIPERIF_PLAYER_TYPE_PCM,
+	SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF
+};
+
+enum uniperif_state {
+	UNIPERIF_STATE_STOPPED,
+	UNIPERIF_STATE_STARTED,
+	UNIPERIF_STATE_STANDBY,
+	UNIPERIF_STATE_UNDERFLOW,
+	UNIPERIF_STATE_OVERFLOW = UNIPERIF_STATE_UNDERFLOW,
+	UNIPERIF_STATE_XRUN
+};
+
+enum uniperif_iec958_encoding_mode {
+	UNIPERIF_IEC958_ENCODING_MODE_PCM,
+	UNIPERIF_IEC958_ENCODING_MODE_ENCODED
+};
+
+struct uniperif_info {
+	int id; /* instance value of the uniperipheral IP */
+	enum uniperif_type player_type;
+	int underflow_enabled;		/* Underflow recovery mode */
+};
+
+struct uniperif_iec958_settings {
+	enum uniperif_iec958_encoding_mode encoding_mode;
+	struct snd_aes_iec958 iec958;
+};
+
+struct uniperif {
+	/* System information */
+	struct uniperif_info *info;
+	struct device *dev;
+	int ver; /* IP version, used by register access macros */
+	struct regmap_field *clk_sel;
+
+	/* capabilities */
+	const struct snd_pcm_hardware *hw;
+
+	/* Resources */
+	struct resource *mem_region;
+	void __iomem *base;
+	unsigned long fifo_phys_address;
+	int irq;
+
+	/* Clocks */
+	struct clk *clk;
+	int mclk;
+	int clk_adj;
+
+	/* Runtime data */
+	enum uniperif_state state;
+
+	struct snd_pcm_substream *substream;
+
+	/* Specific to IEC958 player */
+	struct uniperif_iec958_settings stream_settings;
+	struct mutex ctrl_lock; /* For resource updated by stream and controls*/
+
+	/*alsa ctrl*/
+	struct snd_kcontrol_new *snd_ctrls;
+	int num_ctrls;
+
+	/* dai properties */
+	unsigned int daifmt;
+
+	/* DAI callbacks */
+	const struct snd_soc_dai_ops *dai_ops;
+};
+
+struct sti_uniperiph_dai {
+	int stream;
+	struct uniperif *uni;
+	struct snd_dmaengine_dai_dma_data dma_data;
+};
+
+struct sti_uniperiph_data {
+	struct platform_device *pdev;
+	struct snd_soc_dai_driver *dai;
+	struct sti_uniperiph_dai dai_data;
+};
+
+/* uniperiph player*/
+int uni_player_init(struct platform_device *pdev,
+		    struct uniperif *uni_player);
+int uni_player_resume(struct uniperif *player);
+
+/* uniperiph reader */
+int uni_reader_init(struct platform_device *pdev,
+		    struct uniperif *uni_reader);
+
+/* common */
+int sti_uniperiph_dai_set_fmt(struct snd_soc_dai *dai,
+			      unsigned int fmt);
+
+int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai);
+
+#endif
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
new file mode 100644
index 0000000..f6eefe1
--- /dev/null
+++ b/sound/soc/sti/uniperif_player.c
@@ -0,0 +1,1110 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+
+#include <sound/asoundef.h>
+#include <sound/soc.h>
+
+#include "uniperif.h"
+
+/*
+ * Some hardware-related definitions
+ */
+
+/* sys config registers definitions */
+#define SYS_CFG_AUDIO_GLUE 0xA4
+#define SYS_CFG_AUDI0_GLUE_PCM_CLKX 8
+
+/*
+ * Driver specific types.
+ */
+#define UNIPERIF_PLAYER_TYPE_IS_HDMI(p) \
+	((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_HDMI)
+#define UNIPERIF_PLAYER_TYPE_IS_PCM(p) \
+	((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_PCM)
+#define UNIPERIF_PLAYER_TYPE_IS_SPDIF(p) \
+	((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF)
+#define UNIPERIF_PLAYER_TYPE_IS_IEC958(p) \
+	(UNIPERIF_PLAYER_TYPE_IS_HDMI(p) || \
+		UNIPERIF_PLAYER_TYPE_IS_SPDIF(p))
+
+#define UNIPERIF_PLAYER_CLK_ADJ_MIN  -999999
+#define UNIPERIF_PLAYER_CLK_ADJ_MAX  1000000
+
+/*
+ * Note: snd_pcm_hardware is linked to DMA controller but is declared here to
+ * integrate  DAI_CPU capability in term of rate and supported channels
+ */
+static const struct snd_pcm_hardware uni_player_pcm_hw = {
+	.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+		SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP |
+		SNDRV_PCM_INFO_MMAP_VALID,
+	.formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE,
+
+	.rates = SNDRV_PCM_RATE_CONTINUOUS,
+	.rate_min = 8000,
+	.rate_max = 192000,
+
+	.channels_min = 2,
+	.channels_max = 8,
+
+	.periods_min = 2,
+	.periods_max = 48,
+
+	.period_bytes_min = 128,
+	.period_bytes_max = 64 * PAGE_SIZE,
+	.buffer_bytes_max = 256 * PAGE_SIZE
+};
+
+static inline int reset_player(struct uniperif *player)
+{
+	int count = 10;
+
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
+		while (GET_UNIPERIF_SOFT_RST_SOFT_RST(player) && count) {
+			udelay(5);
+			count--;
+		}
+	}
+
+	if (!count) {
+		dev_err(player->dev, "Failed to reset uniperif");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * uni_player_irq_handler
+ * In case of error audio stream is stopped; stop action is protected via PCM
+ * stream lock to avoid race condition with trigger callback.
+ */
+static irqreturn_t uni_player_irq_handler(int irq, void *dev_id)
+{
+	irqreturn_t ret = IRQ_NONE;
+	struct uniperif *player = dev_id;
+	unsigned int status;
+	unsigned int tmp;
+
+	if (player->state == UNIPERIF_STATE_STOPPED) {
+		/* Unexpected IRQ: do nothing */
+		return IRQ_NONE;
+	}
+
+	/* Get interrupt status & clear them immediately */
+	status = GET_UNIPERIF_ITS(player);
+	SET_UNIPERIF_ITS_BCLR(player, status);
+
+	/* Check for fifo error (underrun) */
+	if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(player))) {
+		dev_err(player->dev, "FIFO underflow error detected");
+
+		/* Interrupt is just for information when underflow recovery */
+		if (player->info->underflow_enabled) {
+			/* Update state to underflow */
+			player->state = UNIPERIF_STATE_UNDERFLOW;
+
+		} else {
+			/* Disable interrupt so doesn't continually fire */
+			SET_UNIPERIF_ITM_BCLR_FIFO_ERROR(player);
+
+			/* Stop the player */
+			snd_pcm_stream_lock(player->substream);
+			snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
+			snd_pcm_stream_unlock(player->substream);
+		}
+
+		ret = IRQ_HANDLED;
+	}
+
+	/* Check for dma error (overrun) */
+	if (unlikely(status & UNIPERIF_ITS_DMA_ERROR_MASK(player))) {
+		dev_err(player->dev, "DMA error detected");
+
+		/* Disable interrupt so doesn't continually fire */
+		SET_UNIPERIF_ITM_BCLR_DMA_ERROR(player);
+
+		/* Stop the player */
+		snd_pcm_stream_lock(player->substream);
+		snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock(player->substream);
+
+		ret = IRQ_HANDLED;
+	}
+
+	/* Check for underflow recovery done */
+	if (unlikely(status & UNIPERIF_ITM_UNDERFLOW_REC_DONE_MASK(player))) {
+		if (!player->info->underflow_enabled) {
+			dev_err(player->dev, "unexpected Underflow recovering");
+			return -EPERM;
+		}
+		/* Read the underflow recovery duration */
+		tmp = GET_UNIPERIF_STATUS_1_UNDERFLOW_DURATION(player);
+
+		/* Clear the underflow recovery duration */
+		SET_UNIPERIF_BIT_CONTROL_CLR_UNDERFLOW_DURATION(player);
+
+		/* Update state to started */
+		player->state = UNIPERIF_STATE_STARTED;
+
+		ret = IRQ_HANDLED;
+	}
+
+	/* Check if underflow recovery failed */
+	if (unlikely(status &
+		     UNIPERIF_ITM_UNDERFLOW_REC_FAILED_MASK(player))) {
+		dev_err(player->dev, "Underflow recovery failed");
+
+		/* Stop the player */
+		snd_pcm_stream_lock(player->substream);
+		snd_pcm_stop(player->substream, SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock(player->substream);
+
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+static int uni_player_clk_set_rate(struct uniperif *player, unsigned long rate)
+{
+	int rate_adjusted, rate_achieved, delta, ret;
+	int adjustment = player->clk_adj;
+
+	/*
+	 *             a
+	 * F = f + --------- * f = f + d
+	 *          1000000
+	 *
+	 *         a
+	 * d = --------- * f
+	 *      1000000
+	 *
+	 * where:
+	 *   f - nominal rate
+	 *   a - adjustment in ppm (parts per milion)
+	 *   F - rate to be set in synthesizer
+	 *   d - delta (difference) between f and F
+	 */
+	if (adjustment < 0) {
+		/* div64_64 operates on unsigned values... */
+		delta = -1;
+		adjustment = -adjustment;
+	} else {
+		delta = 1;
+	}
+	/* 500000 ppm is 0.5, which is used to round up values */
+	delta *= (int)div64_u64((uint64_t)rate *
+				(uint64_t)adjustment + 500000, 1000000);
+	rate_adjusted = rate + delta;
+
+	/* Adjusted rate should never be == 0 */
+	if (!rate_adjusted)
+		return -EINVAL;
+
+	ret = clk_set_rate(player->clk, rate_adjusted);
+	if (ret < 0)
+		return ret;
+
+	rate_achieved = clk_get_rate(player->clk);
+	if (!rate_achieved)
+		/* If value is 0 means that clock or parent not valid */
+		return -EINVAL;
+
+	/*
+	 * Using ALSA's adjustment control, we can modify the rate to be up
+	 * to twice as much as requested, but no more
+	 */
+	delta = rate_achieved - rate;
+	if (delta < 0) {
+		/* div64_64 operates on unsigned values... */
+		delta = -delta;
+		adjustment = -1;
+	} else {
+		adjustment = 1;
+	}
+	/* Frequency/2 is added to round up result */
+	adjustment *= (int)div64_u64((uint64_t)delta * 1000000 + rate / 2,
+				     rate);
+	player->clk_adj = adjustment;
+	return 0;
+}
+
+static void uni_player_set_channel_status(struct uniperif *player,
+					  struct snd_pcm_runtime *runtime)
+{
+	int n;
+	unsigned int status;
+
+	/*
+	 * Some AVRs and TVs require the channel status to contain a correct
+	 * sampling frequency. If no sample rate is already specified, then
+	 * set one.
+	 */
+	mutex_lock(&player->ctrl_lock);
+	if (runtime && (player->stream_settings.iec958.status[3]
+					== IEC958_AES3_CON_FS_NOTID)) {
+		switch (runtime->rate) {
+		case 22050:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_22050;
+			break;
+		case 44100:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_44100;
+			break;
+		case 88200:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_88200;
+			break;
+		case 176400:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_176400;
+			break;
+		case 24000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_24000;
+			break;
+		case 48000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_48000;
+			break;
+		case 96000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_96000;
+			break;
+		case 192000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_192000;
+			break;
+		case 32000:
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_32000;
+			break;
+		default:
+			/* Mark as sampling frequency not indicated */
+			player->stream_settings.iec958.status[3] =
+						IEC958_AES3_CON_FS_NOTID;
+			break;
+		}
+	}
+
+	/* Audio mode:
+	 * Use audio mode status to select PCM or encoded mode
+	 */
+	if (player->stream_settings.iec958.status[0] & IEC958_AES0_NONAUDIO)
+		player->stream_settings.encoding_mode =
+			UNIPERIF_IEC958_ENCODING_MODE_ENCODED;
+	else
+		player->stream_settings.encoding_mode =
+			UNIPERIF_IEC958_ENCODING_MODE_PCM;
+
+	if (player->stream_settings.encoding_mode ==
+		UNIPERIF_IEC958_ENCODING_MODE_PCM)
+		/* Clear user validity bits */
+		SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0);
+	else
+		/* Set user validity bits */
+		SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 1);
+
+	/* Program the new channel status */
+	for (n = 0; n < 6; ++n) {
+		status  =
+		player->stream_settings.iec958.status[0 + (n * 4)] & 0xf;
+		status |=
+		player->stream_settings.iec958.status[1 + (n * 4)] << 8;
+		status |=
+		player->stream_settings.iec958.status[2 + (n * 4)] << 16;
+		status |=
+		player->stream_settings.iec958.status[3 + (n * 4)] << 24;
+		SET_UNIPERIF_CHANNEL_STA_REGN(player, n, status);
+	}
+	mutex_unlock(&player->ctrl_lock);
+
+	/* Update the channel status */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(player);
+	else
+		SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(player);
+}
+
+static int uni_player_prepare_iec958(struct uniperif *player,
+				     struct snd_pcm_runtime *runtime)
+{
+	int clk_div;
+
+	clk_div = player->mclk / runtime->rate;
+
+	/* Oversampling must be multiple of 128 as iec958 frame is 32-bits */
+	if ((clk_div % 128) || (clk_div <= 0)) {
+		dev_err(player->dev, "%s: invalid clk_div %d",
+			__func__, clk_div);
+		return -EINVAL;
+	}
+
+	switch (runtime->format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		/* 16/16 memory format */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_16(player);
+		/* 16-bits per sub-frame */
+		SET_UNIPERIF_I2S_FMT_NBIT_32(player);
+		/* Set 16-bit sample precision */
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(player);
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		/* 16/0 memory format */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
+		/* 32-bits per sub-frame */
+		SET_UNIPERIF_I2S_FMT_NBIT_32(player);
+		/* Set 24-bit sample precision */
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_24(player);
+		break;
+	default:
+		dev_err(player->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	/* Set parity to be calculated by the hardware */
+	SET_UNIPERIF_CONFIG_PARITY_CNTR_BY_HW(player);
+
+	/* Set channel status bits to be inserted by the hardware */
+	SET_UNIPERIF_CONFIG_CHANNEL_STA_CNTR_BY_HW(player);
+
+	/* Set user data bits to be inserted by the hardware */
+	SET_UNIPERIF_CONFIG_USER_DAT_CNTR_BY_HW(player);
+
+	/* Set validity bits to be inserted by the hardware */
+	SET_UNIPERIF_CONFIG_VALIDITY_DAT_CNTR_BY_HW(player);
+
+	/* Set full software control to disabled */
+	SET_UNIPERIF_CONFIG_SPDIF_SW_CTRL_DISABLE(player);
+
+	SET_UNIPERIF_CTRL_ZERO_STUFF_HW(player);
+
+	/* Update the channel status */
+	uni_player_set_channel_status(player, runtime);
+
+	/* Clear the user validity user bits */
+	SET_UNIPERIF_USER_VALIDITY_VALIDITY_LR(player, 0);
+
+	/* Disable one-bit audio mode */
+	SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player);
+
+	/* Enable consecutive frames repetition of Z preamble (not for HBRA) */
+	SET_UNIPERIF_CONFIG_REPEAT_CHL_STS_ENABLE(player);
+
+	/* Change to SUF0_SUBF1 and left/right channels swap! */
+	SET_UNIPERIF_CONFIG_SUBFRAME_SEL_SUBF1_SUBF0(player);
+
+	/* Set data output as MSB first */
+	SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
+
+	if (player->stream_settings.encoding_mode ==
+				UNIPERIF_IEC958_ENCODING_MODE_ENCODED)
+		SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_ON(player);
+	else
+		SET_UNIPERIF_CTRL_EXIT_STBY_ON_EOBLOCK_OFF(player);
+
+	SET_UNIPERIF_I2S_FMT_NUM_CH(player, runtime->channels / 2);
+
+	/* Set rounding to off */
+	SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
+
+	/* Set clock divisor */
+	SET_UNIPERIF_CTRL_DIVIDER(player, clk_div / 128);
+
+	/* Set the spdif latency to not wait before starting player */
+	SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
+
+	/*
+	 * Ensure iec958 formatting is off. It will be enabled in function
+	 * uni_player_start() at the same time as the operation
+	 * mode is set to work around a silicon issue.
+	 */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player);
+	else
+		SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
+
+	return 0;
+}
+
+static int uni_player_prepare_pcm(struct uniperif *player,
+				  struct snd_pcm_runtime *runtime)
+{
+	int output_frame_size, slot_width, clk_div;
+
+	/* Force slot width to 32 in I2S mode (HW constraint) */
+	if ((player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
+		SND_SOC_DAIFMT_I2S) {
+		slot_width = 32;
+	} else {
+		switch (runtime->format) {
+		case SNDRV_PCM_FORMAT_S16_LE:
+			slot_width = 16;
+			break;
+		default:
+			slot_width = 32;
+			break;
+		}
+	}
+	output_frame_size = slot_width * runtime->channels;
+
+	clk_div = player->mclk / runtime->rate;
+	/*
+	 * For 32 bits subframe clk_div must be a multiple of 128,
+	 * for 16 bits must be a multiple of 64
+	 */
+	if ((slot_width == 32) && (clk_div % 128)) {
+		dev_err(player->dev, "%s: invalid clk_div", __func__);
+		return -EINVAL;
+	}
+
+	if ((slot_width == 16) && (clk_div % 64)) {
+		dev_err(player->dev, "%s: invalid clk_div", __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * Number of bits per subframe (which is one channel sample)
+	 * on output - Transfer 16 or 32 bits from FIFO
+	 */
+	switch (slot_width) {
+	case 32:
+		SET_UNIPERIF_I2S_FMT_NBIT_32(player);
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(player);
+		break;
+	case 16:
+		SET_UNIPERIF_I2S_FMT_NBIT_16(player);
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(player);
+		break;
+	default:
+		dev_err(player->dev, "subframe format not supported");
+		return -EINVAL;
+	}
+
+	/* Configure data memory format */
+	switch (runtime->format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		/* One data word contains two samples */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_16(player);
+		break;
+
+	case SNDRV_PCM_FORMAT_S32_LE:
+		/*
+		 * Actually "16 bits/0 bits" means "32/28/24/20/18/16 bits
+		 * on the left than zeros (if less than 32 bytes)"... ;-)
+		 */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
+		break;
+
+	default:
+		dev_err(player->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	/* Set rounding to off */
+	SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
+
+	/* Set clock divisor */
+	SET_UNIPERIF_CTRL_DIVIDER(player, clk_div / (2 * output_frame_size));
+
+	/* Number of channelsmust be even*/
+	if ((runtime->channels % 2) || (runtime->channels < 2) ||
+	    (runtime->channels > 10)) {
+		dev_err(player->dev, "%s: invalid nb of channels", __func__);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_I2S_FMT_NUM_CH(player, runtime->channels / 2);
+
+	/* Set 1-bit audio format to disabled */
+	SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player);
+
+	SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
+	SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
+
+	/* No iec958 formatting as outputting to DAC  */
+	SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player);
+
+	return 0;
+}
+
+/*
+ * ALSA uniperipheral iec958 controls
+ */
+static int  uni_player_ctl_iec958_info(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+	uinfo->count = 1;
+
+	return 0;
+}
+
+static int uni_player_ctl_iec958_get(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	struct snd_aes_iec958 *iec958 = &player->stream_settings.iec958;
+
+	mutex_lock(&player->ctrl_lock);
+	ucontrol->value.iec958.status[0] = iec958->status[0];
+	ucontrol->value.iec958.status[1] = iec958->status[1];
+	ucontrol->value.iec958.status[2] = iec958->status[2];
+	ucontrol->value.iec958.status[3] = iec958->status[3];
+	mutex_unlock(&player->ctrl_lock);
+	return 0;
+}
+
+static int uni_player_ctl_iec958_put(struct snd_kcontrol *kcontrol,
+				     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	struct snd_aes_iec958 *iec958 =  &player->stream_settings.iec958;
+
+	mutex_lock(&player->ctrl_lock);
+	iec958->status[0] = ucontrol->value.iec958.status[0];
+	iec958->status[1] = ucontrol->value.iec958.status[1];
+	iec958->status[2] = ucontrol->value.iec958.status[2];
+	iec958->status[3] = ucontrol->value.iec958.status[3];
+	mutex_unlock(&player->ctrl_lock);
+
+	uni_player_set_channel_status(player, NULL);
+
+	return 0;
+}
+
+static struct snd_kcontrol_new uni_player_iec958_ctl = {
+	.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+	.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
+	.info = uni_player_ctl_iec958_info,
+	.get = uni_player_ctl_iec958_get,
+	.put = uni_player_ctl_iec958_put,
+};
+
+/*
+ * uniperif rate adjustement control
+ */
+static int snd_sti_clk_adjustment_info(struct snd_kcontrol *kcontrol,
+				       struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 1;
+	uinfo->value.integer.min = UNIPERIF_PLAYER_CLK_ADJ_MIN;
+	uinfo->value.integer.max = UNIPERIF_PLAYER_CLK_ADJ_MAX;
+	uinfo->value.integer.step = 1;
+
+	return 0;
+}
+
+static int snd_sti_clk_adjustment_get(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+
+	mutex_lock(&player->ctrl_lock);
+	ucontrol->value.integer.value[0] = player->clk_adj;
+	mutex_unlock(&player->ctrl_lock);
+
+	return 0;
+}
+
+static int snd_sti_clk_adjustment_put(struct snd_kcontrol *kcontrol,
+				      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	int ret = 0;
+
+	if ((ucontrol->value.integer.value[0] < UNIPERIF_PLAYER_CLK_ADJ_MIN) ||
+	    (ucontrol->value.integer.value[0] > UNIPERIF_PLAYER_CLK_ADJ_MAX))
+		return -EINVAL;
+
+	mutex_lock(&player->ctrl_lock);
+	player->clk_adj = ucontrol->value.integer.value[0];
+
+	if (player->mclk)
+		ret = uni_player_clk_set_rate(player, player->mclk);
+	mutex_unlock(&player->ctrl_lock);
+
+	return ret;
+}
+
+static struct snd_kcontrol_new uni_player_clk_adj_ctl = {
+	.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+	.name = "PCM Playback Oversampling Freq. Adjustment",
+	.info = snd_sti_clk_adjustment_info,
+	.get = snd_sti_clk_adjustment_get,
+	.put = snd_sti_clk_adjustment_put,
+};
+
+static struct snd_kcontrol_new *snd_sti_pcm_ctl[] = {
+	&uni_player_clk_adj_ctl,
+};
+
+static struct snd_kcontrol_new *snd_sti_iec_ctl[] = {
+	&uni_player_iec958_ctl,
+	&uni_player_clk_adj_ctl,
+};
+
+static int uni_player_startup(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+
+	player->clk_adj = 0;
+
+	return 0;
+}
+
+static int uni_player_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+				 unsigned int freq, int dir)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	int ret;
+
+	if (dir == SND_SOC_CLOCK_IN)
+		return 0;
+
+	if (clk_id != 0)
+		return -EINVAL;
+
+	mutex_lock(&player->ctrl_lock);
+	ret = uni_player_clk_set_rate(player, freq);
+	if (!ret)
+		player->mclk = freq;
+	mutex_unlock(&player->ctrl_lock);
+
+	return ret;
+}
+
+static int uni_player_prepare(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	int transfer_size, trigger_limit;
+	int ret;
+
+	/* The player should be stopped */
+	if (player->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(player->dev, "%s: invalid player state %d", __func__,
+			player->state);
+		return -EINVAL;
+	}
+
+	/* Calculate transfer size (in fifo cells and bytes) for frame count */
+	transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
+
+	/* Calculate number of empty cells available before asserting DREQ */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
+		trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
+	} else {
+		/*
+		 * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
+		 * FDMA_TRIGGER_LIMIT also controls when the state switches
+		 * from OFF or STANDBY to AUDIO DATA.
+		 */
+		trigger_limit = transfer_size;
+	}
+
+	/* Trigger limit must be an even number */
+	if ((!trigger_limit % 2) || (trigger_limit != 1 && transfer_size % 2) ||
+	    (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(player))) {
+		dev_err(player->dev, "invalid trigger limit %d", trigger_limit);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(player, trigger_limit);
+
+	/* Uniperipheral setup depends on player type */
+	switch (player->info->player_type) {
+	case SND_ST_UNIPERIF_PLAYER_TYPE_HDMI:
+		ret = uni_player_prepare_iec958(player, runtime);
+		break;
+	case SND_ST_UNIPERIF_PLAYER_TYPE_PCM:
+		ret = uni_player_prepare_pcm(player, runtime);
+		break;
+	case SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF:
+		ret = uni_player_prepare_iec958(player, runtime);
+		break;
+	default:
+		dev_err(player->dev, "invalid player type");
+		return -EINVAL;
+	}
+
+	if (ret)
+		return ret;
+
+	switch (player->daifmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_LOW(player);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(player);
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_HIG(player);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(player);
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_LOW(player);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
+		break;
+	case SND_SOC_DAIFMT_IB_IF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_HIG(player);
+		SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
+		break;
+	}
+
+	switch (player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
+		SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(player);
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
+		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(player);
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(player);
+		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(player);
+		break;
+	default:
+		dev_err(player->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(player, 0);
+
+	/* Reset uniperipheral player */
+	SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
+
+	return reset_player(player);
+}
+
+static int uni_player_start(struct uniperif *player)
+{
+	int ret;
+
+	/* The player should be stopped */
+	if (player->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(player->dev, "%s: invalid player state", __func__);
+		return -EINVAL;
+	}
+
+	ret = clk_prepare_enable(player->clk);
+	if (ret) {
+		dev_err(player->dev, "%s: Failed to enable clock", __func__);
+		return ret;
+	}
+
+	/* Clear any pending interrupts */
+	SET_UNIPERIF_ITS_BCLR(player, GET_UNIPERIF_ITS(player));
+
+	/* Set the interrupt mask */
+	SET_UNIPERIF_ITM_BSET_DMA_ERROR(player);
+	SET_UNIPERIF_ITM_BSET_FIFO_ERROR(player);
+
+	/* Enable underflow recovery interrupts */
+	if (player->info->underflow_enabled) {
+		SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(player);
+		SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(player);
+	}
+
+	/* Reset uniperipheral player */
+	SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
+
+	ret = reset_player(player);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 * Does not use IEC61937 features of the uniperipheral hardware.
+	 * Instead it performs IEC61937 in software and inserts it directly
+	 * into the audio data stream. As such, when encoded mode is selected,
+	 * linear pcm mode is still used, but with the differences of the
+	 * channel status bits set for encoded mode and the validity bits set.
+	 */
+	SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(player);
+
+	/*
+	 * If iec958 formatting is required for hdmi or spdif, then it must be
+	 * enabled after the operation mode is set. If set prior to this, it
+	 * will not take affect and hang the player.
+	 */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player))
+				SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
+
+	/* Force channel status update (no update if clk disable) */
+	if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		SET_UNIPERIF_CONFIG_CHL_STS_UPDATE(player);
+	else
+		SET_UNIPERIF_BIT_CONTROL_CHL_STS_UPDATE(player);
+
+	/* Update state to started */
+	player->state = UNIPERIF_STATE_STARTED;
+
+	return 0;
+}
+
+static int uni_player_stop(struct uniperif *player)
+{
+	int ret;
+
+	/* The player should not be in stopped state */
+	if (player->state == UNIPERIF_STATE_STOPPED) {
+		dev_err(player->dev, "%s: invalid player state", __func__);
+		return -EINVAL;
+	}
+
+	/* Turn the player off */
+	SET_UNIPERIF_CTRL_OPERATION_OFF(player);
+
+	/* Soft reset the player */
+	SET_UNIPERIF_SOFT_RST_SOFT_RST(player);
+
+	ret = reset_player(player);
+	if (ret < 0)
+		return ret;
+
+	/* Disable interrupts */
+	SET_UNIPERIF_ITM_BCLR(player, GET_UNIPERIF_ITM(player));
+
+	/* Disable clock */
+	clk_disable_unprepare(player->clk);
+
+	/* Update state to stopped and return */
+	player->state = UNIPERIF_STATE_STOPPED;
+
+	return 0;
+}
+
+int uni_player_resume(struct uniperif *player)
+{
+	int ret;
+
+	/* Select the frequency synthesizer clock */
+	if (player->clk_sel) {
+		ret = regmap_field_write(player->clk_sel, 1);
+		if (ret) {
+			dev_err(player->dev,
+				"%s: Failed to select freq synth clock",
+				__func__);
+			return ret;
+		}
+	}
+
+	SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(player);
+	SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
+	SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
+	SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uni_player_resume);
+
+static int uni_player_trigger(struct snd_pcm_substream *substream,
+			      int cmd, struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		return uni_player_start(player);
+	case SNDRV_PCM_TRIGGER_STOP:
+		return uni_player_stop(player);
+	case SNDRV_PCM_TRIGGER_RESUME:
+		return uni_player_resume(player);
+	default:
+		return -EINVAL;
+	}
+}
+
+static void uni_player_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *player = priv->dai_data.uni;
+
+	if (player->state != UNIPERIF_STATE_STOPPED)
+		/* Stop the player */
+		uni_player_stop(player);
+}
+
+static int uni_player_parse_dt_clk_glue(struct platform_device *pdev,
+					struct uniperif *player)
+{
+	int bit_offset;
+	struct device_node *node = pdev->dev.of_node;
+	struct regmap *regmap;
+
+	bit_offset = SYS_CFG_AUDI0_GLUE_PCM_CLKX + player->info->id;
+
+	regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
+
+	if (regmap) {
+		struct reg_field regfield =
+			REG_FIELD(SYS_CFG_AUDIO_GLUE, bit_offset, bit_offset);
+
+		player->clk_sel = regmap_field_alloc(regmap, regfield);
+	} else {
+		dev_err(&pdev->dev, "sti-audio-clk-glue syscf not found\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int uni_player_parse_dt(struct platform_device *pdev,
+			       struct uniperif *player)
+{
+	struct uniperif_info *info;
+	struct device *dev = &pdev->dev;
+	struct device_node *pnode = pdev->dev.of_node;
+	const char *mode;
+
+	/* Allocate memory for the info structure */
+	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	of_property_read_u32(pnode, "version", &player->ver);
+	if (player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) {
+		dev_err(dev, "Unknown uniperipheral version ");
+		return -EINVAL;
+	}
+	/* Underflow recovery is only supported on later ip revisions */
+	if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		info->underflow_enabled = 1;
+
+	of_property_read_u32(pnode, "uniperiph-id", &info->id);
+
+	/* Read the device mode property */
+	of_property_read_string(pnode, "mode", &mode);
+
+	if (strcasecmp(mode, "hdmi") == 0)
+		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI;
+	else if (strcasecmp(mode, "pcm") == 0)
+		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_PCM;
+	else if (strcasecmp(mode, "spdif") == 0)
+		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF;
+	else
+		info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_NONE;
+
+	/* Save the info structure */
+	player->info = info;
+
+	/* Get the PCM_CLK_SEL bit from audio-glue-ctrl SoC register */
+	if (uni_player_parse_dt_clk_glue(pdev, player))
+		return -EINVAL;
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops uni_player_dai_ops = {
+		.startup = uni_player_startup,
+		.shutdown = uni_player_shutdown,
+		.prepare = uni_player_prepare,
+		.trigger = uni_player_trigger,
+		.hw_params = sti_uniperiph_dai_hw_params,
+		.set_fmt = sti_uniperiph_dai_set_fmt,
+		.set_sysclk = uni_player_set_sysclk
+};
+
+int uni_player_init(struct platform_device *pdev,
+		    struct uniperif *player)
+{
+	int ret = 0;
+
+	player->dev = &pdev->dev;
+	player->state = UNIPERIF_STATE_STOPPED;
+	player->hw = &uni_player_pcm_hw;
+	player->dai_ops = &uni_player_dai_ops;
+
+	ret = uni_player_parse_dt(pdev, player);
+
+	if (ret < 0) {
+		dev_err(player->dev, "Failed to parse DeviceTree");
+		return ret;
+	}
+
+	/* Get uniperif resource */
+	player->clk = of_clk_get(pdev->dev.of_node, 0);
+	if (IS_ERR(player->clk))
+		ret = PTR_ERR(player->clk);
+
+	/* Select the frequency synthesizer clock */
+	if (player->clk_sel) {
+		ret = regmap_field_write(player->clk_sel, 1);
+		if (ret) {
+			dev_err(player->dev,
+				"%s: Failed to select freq synth clock",
+				__func__);
+			return ret;
+		}
+	}
+
+	ret = devm_request_irq(&pdev->dev, player->irq,
+			       uni_player_irq_handler, IRQF_SHARED,
+			       dev_name(&pdev->dev), player);
+	if (ret < 0)
+		return ret;
+
+	mutex_init(&player->ctrl_lock);
+
+	/* Ensure that disabled by default */
+	SET_UNIPERIF_CONFIG_BACK_STALL_REQ_DISABLE(player);
+	SET_UNIPERIF_CTRL_ROUNDING_OFF(player);
+	SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
+	SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player);
+
+	if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player)) {
+		/* Set default iec958 status bits  */
+
+		/* Consumer, PCM, copyright, 2ch, mode 0 */
+		player->stream_settings.iec958.status[0] = 0x00;
+		/* Broadcast reception category */
+		player->stream_settings.iec958.status[1] =
+					IEC958_AES1_CON_GENERAL;
+		/* Do not take into account source or channel number */
+		player->stream_settings.iec958.status[2] =
+					IEC958_AES2_CON_SOURCE_UNSPEC;
+		/* Sampling frequency not indicated */
+		player->stream_settings.iec958.status[3] =
+					IEC958_AES3_CON_FS_NOTID;
+		/* Max sample word 24-bit, sample word length not indicated */
+		player->stream_settings.iec958.status[4] =
+					IEC958_AES4_CON_MAX_WORDLEN_24 |
+					IEC958_AES4_CON_WORDLEN_24_20;
+
+		player->num_ctrls = ARRAY_SIZE(snd_sti_iec_ctl);
+		player->snd_ctrls = snd_sti_iec_ctl[0];
+	} else {
+		player->num_ctrls = ARRAY_SIZE(snd_sti_pcm_ctl);
+		player->snd_ctrls = snd_sti_pcm_ctl[0];
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uni_player_init);
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
new file mode 100644
index 0000000..c502626
--- /dev/null
+++ b/sound/soc/sti/uniperif_reader.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2015
+ * Authors: Arnaud Pouliquen <arnaud.pouliquen@st.com>
+ *          for STMicroelectronics.
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <sound/soc.h>
+
+#include "uniperif.h"
+
+/*
+ * Note: snd_pcm_hardware is linked to DMA controller but is declared here to
+ * integrate unireader capability in term of rate and supported channels
+ */
+static const struct snd_pcm_hardware uni_reader_pcm_hw = {
+	.info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+		SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP |
+		SNDRV_PCM_INFO_MMAP_VALID,
+	.formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE,
+
+	.rates = SNDRV_PCM_RATE_CONTINUOUS,
+	.rate_min = 8000,
+	.rate_max = 96000,
+
+	.channels_min = 2,
+	.channels_max = 8,
+
+	.periods_min = 2,
+	.periods_max = 48,
+
+	.period_bytes_min = 128,
+	.period_bytes_max = 64 * PAGE_SIZE,
+	.buffer_bytes_max = 256 * PAGE_SIZE
+};
+
+/*
+ * uni_reader_irq_handler
+ * In case of error audio stream is stopped; stop action is protected via PCM
+ * stream lock  to avoid race condition with trigger callback.
+ */
+static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
+{
+	irqreturn_t ret = IRQ_NONE;
+	struct uniperif *reader = dev_id;
+	unsigned int status;
+
+	if (reader->state == UNIPERIF_STATE_STOPPED) {
+		/* Unexpected IRQ: do nothing */
+		dev_warn(reader->dev, "unexpected IRQ ");
+		return IRQ_HANDLED;
+	}
+
+	/* Get interrupt status & clear them immediately */
+	status = GET_UNIPERIF_ITS(reader);
+	SET_UNIPERIF_ITS_BCLR(reader, status);
+
+	/* Check for fifo overflow error */
+	if (unlikely(status & UNIPERIF_ITS_FIFO_ERROR_MASK(reader))) {
+		dev_err(reader->dev, "FIFO error detected");
+
+		snd_pcm_stream_lock(reader->substream);
+		snd_pcm_stop(reader->substream, SNDRV_PCM_STATE_XRUN);
+		snd_pcm_stream_unlock(reader->substream);
+
+		return IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+static int uni_reader_prepare(struct snd_pcm_substream *substream,
+			      struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *reader = priv->dai_data.uni;
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	int transfer_size, trigger_limit;
+	int slot_width;
+	int count = 10;
+
+	/* The reader should be stopped */
+	if (reader->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(reader->dev, "%s: invalid reader state %d", __func__,
+			reader->state);
+		return -EINVAL;
+	}
+
+	/* Calculate transfer size (in fifo cells and bytes) for frame count */
+	transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
+
+	/* Calculate number of empty cells available before asserting DREQ */
+	if (reader->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+		trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
+	else
+		/*
+		 * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
+		 * FDMA_TRIGGER_LIMIT also controls when the state switches
+		 * from OFF or STANDBY to AUDIO DATA.
+		 */
+		trigger_limit = transfer_size;
+
+	/* Trigger limit must be an even number */
+	if ((!trigger_limit % 2) ||
+	    (trigger_limit != 1 && transfer_size % 2) ||
+	    (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(reader))) {
+		dev_err(reader->dev, "invalid trigger limit %d", trigger_limit);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(reader, trigger_limit);
+
+	switch (reader->daifmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_IB_IF:
+	case SND_SOC_DAIFMT_NB_IF:
+		SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
+		break;
+	default:
+		SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
+	}
+
+	/* Force slot width to 32 in I2S mode */
+	if ((reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK)
+		== SND_SOC_DAIFMT_I2S) {
+		slot_width = 32;
+	} else {
+		switch (runtime->format) {
+		case SNDRV_PCM_FORMAT_S16_LE:
+			slot_width = 16;
+			break;
+		default:
+			slot_width = 32;
+			break;
+		}
+	}
+
+	/* Number of bits per subframe (i.e one channel sample) on input. */
+	switch (slot_width) {
+	case 32:
+		SET_UNIPERIF_I2S_FMT_NBIT_32(reader);
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(reader);
+		break;
+	case 16:
+		SET_UNIPERIF_I2S_FMT_NBIT_16(reader);
+		SET_UNIPERIF_I2S_FMT_DATA_SIZE_16(reader);
+		break;
+	default:
+		dev_err(reader->dev, "subframe format not supported");
+		return -EINVAL;
+	}
+
+	/* Configure data memory format */
+	switch (runtime->format) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		/* One data word contains two samples */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_16(reader);
+		break;
+
+	case SNDRV_PCM_FORMAT_S32_LE:
+		/*
+		 * Actually "16 bits/0 bits" means "32/28/24/20/18/16 bits
+		 * on the MSB then zeros (if less than 32 bytes)"...
+		 */
+		SET_UNIPERIF_CONFIG_MEM_FMT_16_0(reader);
+		break;
+
+	default:
+		dev_err(reader->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	switch (reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
+		SET_UNIPERIF_I2S_FMT_PADDING_I2S_MODE(reader);
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
+		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(reader);
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		SET_UNIPERIF_I2S_FMT_ALIGN_RIGHT(reader);
+		SET_UNIPERIF_I2S_FMT_PADDING_SONY_MODE(reader);
+		break;
+	default:
+		dev_err(reader->dev, "format not supported");
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
+
+	/* Data clocking (changing) on the rising edge */
+	SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
+
+	/* Number of channels must be even */
+
+	if ((runtime->channels % 2) || (runtime->channels < 2) ||
+	    (runtime->channels > 10)) {
+		dev_err(reader->dev, "%s: invalid nb of channels", __func__);
+		return -EINVAL;
+	}
+
+	SET_UNIPERIF_I2S_FMT_NUM_CH(reader, runtime->channels / 2);
+
+	/* Clear any pending interrupts */
+	SET_UNIPERIF_ITS_BCLR(reader, GET_UNIPERIF_ITS(reader));
+
+	SET_UNIPERIF_I2S_FMT_NO_OF_SAMPLES_TO_READ(reader, 0);
+
+	/* Set the interrupt mask */
+	SET_UNIPERIF_ITM_BSET_DMA_ERROR(reader);
+	SET_UNIPERIF_ITM_BSET_FIFO_ERROR(reader);
+	SET_UNIPERIF_ITM_BSET_MEM_BLK_READ(reader);
+
+	/* Enable underflow recovery interrupts */
+	if (reader->info->underflow_enabled) {
+		SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_DONE(reader);
+		SET_UNIPERIF_ITM_BSET_UNDERFLOW_REC_FAILED(reader);
+	}
+
+	/* Reset uniperipheral reader */
+	SET_UNIPERIF_SOFT_RST_SOFT_RST(reader);
+
+	while (GET_UNIPERIF_SOFT_RST_SOFT_RST(reader)) {
+		udelay(5);
+		count--;
+	}
+	if (!count) {
+		dev_err(reader->dev, "Failed to reset uniperif");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int uni_reader_start(struct uniperif *reader)
+{
+	/* The reader should be stopped */
+	if (reader->state != UNIPERIF_STATE_STOPPED) {
+		dev_err(reader->dev, "%s: invalid reader state", __func__);
+		return -EINVAL;
+	}
+
+	/* Enable reader interrupts (and clear possible stalled ones) */
+	SET_UNIPERIF_ITS_BCLR_FIFO_ERROR(reader);
+	SET_UNIPERIF_ITM_BSET_FIFO_ERROR(reader);
+
+	/* Launch the reader */
+	SET_UNIPERIF_CTRL_OPERATION_PCM_DATA(reader);
+
+	/* Update state to started */
+	reader->state = UNIPERIF_STATE_STARTED;
+	return 0;
+}
+
+static int uni_reader_stop(struct uniperif *reader)
+{
+	/* The reader should not be in stopped state */
+	if (reader->state == UNIPERIF_STATE_STOPPED) {
+		dev_err(reader->dev, "%s: invalid reader state", __func__);
+		return -EINVAL;
+	}
+
+	/* Turn the reader off */
+	SET_UNIPERIF_CTRL_OPERATION_OFF(reader);
+
+	/* Disable interrupts */
+	SET_UNIPERIF_ITM_BCLR(reader, GET_UNIPERIF_ITM(reader));
+
+	/* Update state to stopped and return */
+	reader->state = UNIPERIF_STATE_STOPPED;
+
+	return 0;
+}
+
+static int  uni_reader_trigger(struct snd_pcm_substream *substream,
+			       int cmd, struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *reader = priv->dai_data.uni;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+		return  uni_reader_start(reader);
+	case SNDRV_PCM_TRIGGER_STOP:
+		return  uni_reader_stop(reader);
+	default:
+		return -EINVAL;
+	}
+}
+
+static void uni_reader_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+	struct uniperif *reader = priv->dai_data.uni;
+
+	if (reader->state != UNIPERIF_STATE_STOPPED) {
+		/* Stop the reader */
+		uni_reader_stop(reader);
+	}
+}
+
+static int uni_reader_parse_dt(struct platform_device *pdev,
+			       struct uniperif *reader)
+{
+	struct uniperif_info *info;
+	struct device_node *node = pdev->dev.of_node;
+
+	/* Allocate memory for the info structure */
+	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	of_property_read_u32(node, "version", &reader->ver);
+
+	/* Save the info structure */
+	reader->info = info;
+
+	return 0;
+}
+
+static const struct snd_soc_dai_ops uni_reader_dai_ops = {
+		.shutdown = uni_reader_shutdown,
+		.prepare = uni_reader_prepare,
+		.trigger = uni_reader_trigger,
+		.hw_params = sti_uniperiph_dai_hw_params,
+		.set_fmt = sti_uniperiph_dai_set_fmt,
+};
+
+int uni_reader_init(struct platform_device *pdev,
+		    struct uniperif *reader)
+{
+	int ret = 0;
+
+	reader->dev = &pdev->dev;
+	reader->state = UNIPERIF_STATE_STOPPED;
+	reader->hw = &uni_reader_pcm_hw;
+	reader->dai_ops = &uni_reader_dai_ops;
+
+	dev_err(reader->dev, "%s: enter\n", __func__);
+	ret = uni_reader_parse_dt(pdev, reader);
+	if (ret < 0) {
+		dev_err(reader->dev, "Failed to parse DeviceTree");
+		return ret;
+	}
+
+	ret = devm_request_irq(&pdev->dev, reader->irq,
+			       uni_reader_irq_handler, IRQF_SHARED,
+			       dev_name(&pdev->dev), reader);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Failed to request IRQ");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(uni_reader_init);
diff --git a/sound/soc/tegra/tegra20_das.c b/sound/soc/tegra/tegra20_das.c
index f52600b..89add13 100644
--- a/sound/soc/tegra/tegra20_das.c
+++ b/sound/soc/tegra/tegra20_das.c
@@ -133,7 +133,7 @@
 
 static int tegra20_das_probe(struct platform_device *pdev)
 {
-	struct resource *res, *region;
+	struct resource *res;
 	void __iomem *regs;
 	int ret = 0;
 
@@ -149,24 +149,9 @@
 	das->dev = &pdev->dev;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "No memory resource\n");
-		ret = -ENODEV;
-		goto err;
-	}
-
-	region = devm_request_mem_region(&pdev->dev, res->start,
-					 resource_size(res), pdev->name);
-	if (!region) {
-		dev_err(&pdev->dev, "Memory region already claimed\n");
-		ret = -EBUSY;
-		goto err;
-	}
-
-	regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
-	if (!regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
+	regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(regs)) {
+		ret = PTR_ERR(regs);
 		goto err;
 	}
 
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 05f1c6e..14106fa 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -339,7 +339,7 @@
 static int tegra20_i2s_platform_probe(struct platform_device *pdev)
 {
 	struct tegra20_i2s *i2s;
-	struct resource *mem, *memregion;
+	struct resource *mem;
 	void __iomem *regs;
 	int ret;
 
@@ -362,24 +362,9 @@
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "No memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put;
-	}
-
-	memregion = devm_request_mem_region(&pdev->dev, mem->start,
-					    resource_size(mem), DRV_NAME);
-	if (!memregion) {
-		dev_err(&pdev->dev, "Memory region already claimed\n");
-		ret = -EBUSY;
-		goto err_clk_put;
-	}
-
-	regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
+	regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(regs)) {
+		ret = PTR_ERR(regs);
 		goto err_clk_put;
 	}
 
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 9141477..a0c3640 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -265,7 +265,7 @@
 static int tegra20_spdif_platform_probe(struct platform_device *pdev)
 {
 	struct tegra20_spdif *spdif;
-	struct resource *mem, *memregion, *dmareq;
+	struct resource *mem, *dmareq;
 	void __iomem *regs;
 	int ret;
 
@@ -273,45 +273,26 @@
 			     GFP_KERNEL);
 	if (!spdif) {
 		dev_err(&pdev->dev, "Can't allocate tegra20_spdif\n");
-		ret = -ENOMEM;
-		goto err;
+		return -ENOMEM;
 	}
 	dev_set_drvdata(&pdev->dev, spdif);
 
-	spdif->clk_spdif_out = clk_get(&pdev->dev, "spdif_out");
+	spdif->clk_spdif_out = devm_clk_get(&pdev->dev, "spdif_out");
 	if (IS_ERR(spdif->clk_spdif_out)) {
 		pr_err("Can't retrieve spdif clock\n");
 		ret = PTR_ERR(spdif->clk_spdif_out);
-		goto err;
+		return ret;
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "No memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put;
-	}
+	regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
 
 	dmareq = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 	if (!dmareq) {
 		dev_err(&pdev->dev, "No DMA resource\n");
-		ret = -ENODEV;
-		goto err_clk_put;
-	}
-
-	memregion = devm_request_mem_region(&pdev->dev, mem->start,
-					    resource_size(mem), DRV_NAME);
-	if (!memregion) {
-		dev_err(&pdev->dev, "Memory region already claimed\n");
-		ret = -EBUSY;
-		goto err_clk_put;
-	}
-
-	regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
-		goto err_clk_put;
+		return -ENODEV;
 	}
 
 	spdif->regmap = devm_regmap_init_mmio(&pdev->dev, regs,
@@ -319,7 +300,7 @@
 	if (IS_ERR(spdif->regmap)) {
 		dev_err(&pdev->dev, "regmap init failed\n");
 		ret = PTR_ERR(spdif->regmap);
-		goto err_clk_put;
+		return ret;
 	}
 
 	spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT;
@@ -335,7 +316,7 @@
 	}
 
 	ret = snd_soc_register_component(&pdev->dev, &tegra20_spdif_component,
-				   &tegra20_spdif_dai, 1);
+					 &tegra20_spdif_dai, 1);
 	if (ret) {
 		dev_err(&pdev->dev, "Could not register DAI: %d\n", ret);
 		ret = -ENOMEM;
@@ -357,16 +338,12 @@
 		tegra20_spdif_runtime_suspend(&pdev->dev);
 err_pm_disable:
 	pm_runtime_disable(&pdev->dev);
-err_clk_put:
-	clk_put(spdif->clk_spdif_out);
-err:
+
 	return ret;
 }
 
 static int tegra20_spdif_platform_remove(struct platform_device *pdev)
 {
-	struct tegra20_spdif *spdif = dev_get_drvdata(&pdev->dev);
-
 	pm_runtime_disable(&pdev->dev);
 	if (!pm_runtime_status_suspended(&pdev->dev))
 		tegra20_spdif_runtime_suspend(&pdev->dev);
@@ -374,8 +351,6 @@
 	tegra_pcm_platform_unregister(&pdev->dev);
 	snd_soc_unregister_component(&pdev->dev);
 
-	clk_put(spdif->clk_spdif_out);
-
 	return 0;
 }
 
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
index bc94e5d..fef3b9a 100644
--- a/sound/soc/tegra/tegra30_ahub.c
+++ b/sound/soc/tegra/tegra30_ahub.c
@@ -521,7 +521,7 @@
 	const struct tegra30_ahub_soc_data *soc_data;
 	struct reset_control *rst;
 	int i;
-	struct resource *res0, *res1, *region;
+	struct resource *res0, *res1;
 	void __iomem *regs_apbif, *regs_ahub;
 	int ret = 0;
 
@@ -549,103 +549,67 @@
 			dev_err(&pdev->dev, "Can't get reset %s\n",
 				configlink_mods[i].rst_name);
 			ret = PTR_ERR(rst);
-			goto err;
+			return ret;
 		}
 
 		ret = reset_control_deassert(rst);
 		reset_control_put(rst);
 		if (ret)
-			goto err;
+			return ret;
 	}
 
 	ahub = devm_kzalloc(&pdev->dev, sizeof(struct tegra30_ahub),
 			    GFP_KERNEL);
 	if (!ahub) {
 		dev_err(&pdev->dev, "Can't allocate tegra30_ahub\n");
-		ret = -ENOMEM;
-		goto err;
+		return -ENOMEM;
 	}
 	dev_set_drvdata(&pdev->dev, ahub);
 
 	ahub->soc_data = soc_data;
 	ahub->dev = &pdev->dev;
 
-	ahub->clk_d_audio = clk_get(&pdev->dev, "d_audio");
+	ahub->clk_d_audio = devm_clk_get(&pdev->dev, "d_audio");
 	if (IS_ERR(ahub->clk_d_audio)) {
 		dev_err(&pdev->dev, "Can't retrieve ahub d_audio clock\n");
 		ret = PTR_ERR(ahub->clk_d_audio);
-		goto err;
+		return ret;
 	}
 
-	ahub->clk_apbif = clk_get(&pdev->dev, "apbif");
+	ahub->clk_apbif = devm_clk_get(&pdev->dev, "apbif");
 	if (IS_ERR(ahub->clk_apbif)) {
 		dev_err(&pdev->dev, "Can't retrieve ahub apbif clock\n");
 		ret = PTR_ERR(ahub->clk_apbif);
-		goto err_clk_put_d_audio;
+		return ret;
 	}
 
 	res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res0) {
-		dev_err(&pdev->dev, "No apbif memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put_apbif;
-	}
+	regs_apbif = devm_ioremap_resource(&pdev->dev, res0);
+	if (IS_ERR(regs_apbif))
+		return PTR_ERR(regs_apbif);
 
-	region = devm_request_mem_region(&pdev->dev, res0->start,
-					 resource_size(res0), DRV_NAME);
-	if (!region) {
-		dev_err(&pdev->dev, "request region apbif failed\n");
-		ret = -EBUSY;
-		goto err_clk_put_apbif;
-	}
 	ahub->apbif_addr = res0->start;
 
-	regs_apbif = devm_ioremap(&pdev->dev, res0->start,
-				  resource_size(res0));
-	if (!regs_apbif) {
-		dev_err(&pdev->dev, "ioremap apbif failed\n");
-		ret = -ENOMEM;
-		goto err_clk_put_apbif;
-	}
-
 	ahub->regmap_apbif = devm_regmap_init_mmio(&pdev->dev, regs_apbif,
 					&tegra30_ahub_apbif_regmap_config);
 	if (IS_ERR(ahub->regmap_apbif)) {
 		dev_err(&pdev->dev, "apbif regmap init failed\n");
 		ret = PTR_ERR(ahub->regmap_apbif);
-		goto err_clk_put_apbif;
+		return ret;
 	}
 	regcache_cache_only(ahub->regmap_apbif, true);
 
 	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (!res1) {
-		dev_err(&pdev->dev, "No ahub memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put_apbif;
-	}
-
-	region = devm_request_mem_region(&pdev->dev, res1->start,
-					 resource_size(res1), DRV_NAME);
-	if (!region) {
-		dev_err(&pdev->dev, "request region ahub failed\n");
-		ret = -EBUSY;
-		goto err_clk_put_apbif;
-	}
-
-	regs_ahub = devm_ioremap(&pdev->dev, res1->start,
-				 resource_size(res1));
-	if (!regs_ahub) {
-		dev_err(&pdev->dev, "ioremap ahub failed\n");
-		ret = -ENOMEM;
-		goto err_clk_put_apbif;
-	}
+	regs_ahub = devm_ioremap_resource(&pdev->dev, res1);
+	if (IS_ERR(regs_ahub))
+		return PTR_ERR(regs_ahub);
 
 	ahub->regmap_ahub = devm_regmap_init_mmio(&pdev->dev, regs_ahub,
 					&tegra30_ahub_ahub_regmap_config);
 	if (IS_ERR(ahub->regmap_ahub)) {
 		dev_err(&pdev->dev, "ahub regmap init failed\n");
 		ret = PTR_ERR(ahub->regmap_ahub);
-		goto err_clk_put_apbif;
+		return ret;
 	}
 	regcache_cache_only(ahub->regmap_ahub, true);
 
@@ -662,12 +626,7 @@
 
 err_pm_disable:
 	pm_runtime_disable(&pdev->dev);
-err_clk_put_apbif:
-	clk_put(ahub->clk_apbif);
-err_clk_put_d_audio:
-	clk_put(ahub->clk_d_audio);
-	ahub = NULL;
-err:
+
 	return ret;
 }
 
@@ -680,11 +639,6 @@
 	if (!pm_runtime_status_suspended(&pdev->dev))
 		tegra30_ahub_runtime_suspend(&pdev->dev);
 
-	clk_put(ahub->clk_apbif);
-	clk_put(ahub->clk_d_audio);
-
-	ahub = NULL;
-
 	return 0;
 }
 
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index fe36375..8e55583 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -379,7 +379,7 @@
 	struct tegra30_i2s *i2s;
 	const struct of_device_id *match;
 	u32 cif_ids[2];
-	struct resource *mem, *memregion;
+	struct resource *mem;
 	void __iomem *regs;
 	int ret;
 
@@ -419,24 +419,9 @@
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!mem) {
-		dev_err(&pdev->dev, "No memory resource\n");
-		ret = -ENODEV;
-		goto err_clk_put;
-	}
-
-	memregion = devm_request_mem_region(&pdev->dev, mem->start,
-					    resource_size(mem), DRV_NAME);
-	if (!memregion) {
-		dev_err(&pdev->dev, "Memory region already claimed\n");
-		ret = -EBUSY;
-		goto err_clk_put;
-	}
-
-	regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
-	if (!regs) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		ret = -ENOMEM;
+	regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(regs)) {
+		ret = PTR_ERR(regs);
 		goto err_clk_put;
 	}
 
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 88eacfd..a8f705b 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -411,13 +411,8 @@
 
 static int txx9aclc_soc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &txx9aclc_soc_platform);
-}
-
-static int txx9aclc_soc_platform_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_platform(&pdev->dev);
-	return 0;
+	return devm_snd_soc_register_platform(&pdev->dev,
+					      &txx9aclc_soc_platform);
 }
 
 static struct platform_driver txx9aclc_pcm_driver = {
@@ -426,7 +421,6 @@
 	},
 
 	.probe = txx9aclc_soc_platform_probe,
-	.remove = txx9aclc_soc_platform_remove,
 };
 
 module_platform_driver(txx9aclc_pcm_driver);
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
index 978f2d7..f5df08d 100644
--- a/sound/soc/ux500/ux500_msp_dai.c
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -773,20 +773,22 @@
 	}
 	prcmu_qos_add_requirement(PRCMU_QOS_APE_OPP, (char *)pdev->name, 50);
 
-	drvdata->pclk = clk_get(&pdev->dev, "apb_pclk");
+	drvdata->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
 	if (IS_ERR(drvdata->pclk)) {
 		ret = (int)PTR_ERR(drvdata->pclk);
-		dev_err(&pdev->dev, "%s: ERROR: clk_get of pclk failed (%d)!\n",
+		dev_err(&pdev->dev,
+			"%s: ERROR: devm_clk_get of pclk failed (%d)!\n",
 			__func__, ret);
-		goto err_pclk;
+		return ret;
 	}
 
-	drvdata->clk = clk_get(&pdev->dev, NULL);
+	drvdata->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(drvdata->clk)) {
 		ret = (int)PTR_ERR(drvdata->clk);
-		dev_err(&pdev->dev, "%s: ERROR: clk_get failed (%d)!\n",
+		dev_err(&pdev->dev,
+			"%s: ERROR: devm_clk_get failed (%d)!\n",
 			__func__, ret);
-		goto err_clk;
+		return ret;
 	}
 
 	ret = ux500_msp_i2s_init_msp(pdev, &drvdata->msp,
@@ -795,7 +797,7 @@
 		dev_err(&pdev->dev,
 			"%s: ERROR: Failed to init MSP-struct (%d)!",
 			__func__, ret);
-		goto err_init_msp;
+		return ret;
 	}
 	dev_set_drvdata(&pdev->dev, drvdata);
 
@@ -804,7 +806,7 @@
 	if (ret < 0) {
 		dev_err(&pdev->dev, "Error: %s: Failed to register MSP%d!\n",
 			__func__, drvdata->msp->id);
-		goto err_init_msp;
+		return ret;
 	}
 
 	ret = ux500_pcm_register_platform(pdev);
@@ -819,13 +821,6 @@
 
 err_reg_plat:
 	snd_soc_unregister_component(&pdev->dev);
-err_init_msp:
-	clk_put(drvdata->clk);
-err_clk:
-	clk_put(drvdata->pclk);
-err_pclk:
-	devm_regulator_put(drvdata->reg_vape);
-
 	return ret;
 }
 
@@ -837,12 +832,8 @@
 
 	snd_soc_unregister_component(&pdev->dev);
 
-	devm_regulator_put(drvdata->reg_vape);
 	prcmu_qos_remove_requirement(PRCMU_QOS_APE_OPP, "ux500_msp_i2s");
 
-	clk_put(drvdata->clk);
-	clk_put(drvdata->pclk);
-
 	ux500_msp_i2s_cleanup_msp(pdev, drvdata->msp);
 
 	return 0;
diff --git a/sound/soc/xtensa/xtfpga-i2s.c b/sound/soc/xtensa/xtfpga-i2s.c
index 1cfb19e..8382ffa 100644
--- a/sound/soc/xtensa/xtfpga-i2s.c
+++ b/sound/soc/xtensa/xtfpga-i2s.c
@@ -75,7 +75,7 @@
 	 * stream in the pcm_close callback it synchronizes with the interrupt
 	 * handler by means of synchronize_rcu call.
 	 */
-	struct snd_pcm_substream *tx_substream;
+	struct snd_pcm_substream __rcu *tx_substream;
 	unsigned (*tx_fn)(struct xtfpga_i2s *i2s,
 			  struct snd_pcm_runtime *runtime,
 			  unsigned tx_ptr);
@@ -474,11 +474,6 @@
 						     card->dev, size, size);
 }
 
-static void xtfpga_pcm_free(struct snd_pcm *pcm)
-{
-	snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-
 static const struct snd_pcm_ops xtfpga_pcm_ops = {
 	.open		= xtfpga_pcm_open,
 	.close		= xtfpga_pcm_close,
@@ -490,7 +485,6 @@
 
 static const struct snd_soc_platform_driver xtfpga_soc_platform = {
 	.pcm_new	= xtfpga_pcm_new,
-	.pcm_free	= xtfpga_pcm_free,
 	.ops		= &xtfpga_pcm_ops,
 };
 
diff --git a/sound/soc/zte/zx296702-i2s.c b/sound/soc/zte/zx296702-i2s.c
index 1930c42..1cad93d 100644
--- a/sound/soc/zte/zx296702-i2s.c
+++ b/sound/soc/zte/zx296702-i2s.c
@@ -380,7 +380,7 @@
 	struct zx_i2s_info *zx_i2s;
 	int ret;
 
-	zx_i2s =  kzalloc(sizeof(*zx_i2s), GFP_KERNEL);
+	zx_i2s = devm_kzalloc(&pdev->dev, sizeof(*zx_i2s), GFP_KERNEL);
 	if (!zx_i2s)
 		return -ENOMEM;
 
@@ -401,8 +401,8 @@
 	writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL);
 	platform_set_drvdata(pdev, zx_i2s);
 
-	ret = snd_soc_register_component(&pdev->dev, &zx_i2s_component,
-					 &zx_i2s_dai, 1);
+	ret = devm_snd_soc_register_component(&pdev->dev, &zx_i2s_component,
+					      &zx_i2s_dai, 1);
 	if (ret) {
 		dev_err(&pdev->dev, "Register DAI failed: %d\n", ret);
 		return ret;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 0450593..18f5664 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -365,13 +365,15 @@
 	}
 
 	mutex_init(&chip->mutex);
-	init_rwsem(&chip->shutdown_rwsem);
+	init_waitqueue_head(&chip->shutdown_wait);
 	chip->index = idx;
 	chip->dev = dev;
 	chip->card = card;
 	chip->setup = device_setup[idx];
 	chip->autoclock = autoclock;
-	chip->probing = 1;
+	atomic_set(&chip->active, 1); /* avoid autopm during probing */
+	atomic_set(&chip->usage_count, 0);
+	atomic_set(&chip->shutdown, 0);
 
 	chip->usb_id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
 			      le16_to_cpu(dev->descriptor.idProduct));
@@ -495,13 +497,13 @@
 	mutex_lock(&register_mutex);
 	for (i = 0; i < SNDRV_CARDS; i++) {
 		if (usb_chip[i] && usb_chip[i]->dev == dev) {
-			if (usb_chip[i]->shutdown) {
+			if (atomic_read(&usb_chip[i]->shutdown)) {
 				dev_err(&dev->dev, "USB device is in the shutdown state, cannot create a card instance\n");
 				err = -EIO;
 				goto __error;
 			}
 			chip = usb_chip[i];
-			chip->probing = 1;
+			atomic_inc(&chip->active); /* avoid autopm */
 			break;
 		}
 	}
@@ -561,8 +563,8 @@
 
 	usb_chip[chip->index] = chip;
 	chip->num_interfaces++;
-	chip->probing = 0;
 	usb_set_intfdata(intf, chip);
+	atomic_dec(&chip->active);
 	mutex_unlock(&register_mutex);
 	return 0;
 
@@ -570,7 +572,7 @@
 	if (chip) {
 		if (!chip->num_interfaces)
 			snd_card_free(chip->card);
-		chip->probing = 0;
+		atomic_dec(&chip->active);
 	}
 	mutex_unlock(&register_mutex);
 	return err;
@@ -585,23 +587,23 @@
 	struct snd_usb_audio *chip = usb_get_intfdata(intf);
 	struct snd_card *card;
 	struct list_head *p;
-	bool was_shutdown;
 
 	if (chip == (void *)-1L)
 		return;
 
 	card = chip->card;
-	down_write(&chip->shutdown_rwsem);
-	was_shutdown = chip->shutdown;
-	chip->shutdown = 1;
-	up_write(&chip->shutdown_rwsem);
 
 	mutex_lock(&register_mutex);
-	if (!was_shutdown) {
+	if (atomic_inc_return(&chip->shutdown) == 1) {
 		struct snd_usb_stream *as;
 		struct snd_usb_endpoint *ep;
 		struct usb_mixer_interface *mixer;
 
+		/* wait until all pending tasks done;
+		 * they are protected by snd_usb_lock_shutdown()
+		 */
+		wait_event(chip->shutdown_wait,
+			   !atomic_read(&chip->usage_count));
 		snd_card_disconnect(card);
 		/* release the pcm resources */
 		list_for_each_entry(as, &chip->pcm_list, list) {
@@ -631,28 +633,50 @@
 	}
 }
 
+/* lock the shutdown (disconnect) task and autoresume */
+int snd_usb_lock_shutdown(struct snd_usb_audio *chip)
+{
+	int err;
+
+	atomic_inc(&chip->usage_count);
+	if (atomic_read(&chip->shutdown)) {
+		err = -EIO;
+		goto error;
+	}
+	err = snd_usb_autoresume(chip);
+	if (err < 0)
+		goto error;
+	return 0;
+
+ error:
+	if (atomic_dec_and_test(&chip->usage_count))
+		wake_up(&chip->shutdown_wait);
+	return err;
+}
+
+/* autosuspend and unlock the shutdown */
+void snd_usb_unlock_shutdown(struct snd_usb_audio *chip)
+{
+	snd_usb_autosuspend(chip);
+	if (atomic_dec_and_test(&chip->usage_count))
+		wake_up(&chip->shutdown_wait);
+}
+
 #ifdef CONFIG_PM
 
 int snd_usb_autoresume(struct snd_usb_audio *chip)
 {
-	int err = -ENODEV;
-
-	down_read(&chip->shutdown_rwsem);
-	if (chip->probing || chip->in_pm)
-		err = 0;
-	else if (!chip->shutdown)
-		err = usb_autopm_get_interface(chip->pm_intf);
-	up_read(&chip->shutdown_rwsem);
-
-	return err;
+	if (atomic_read(&chip->shutdown))
+		return -EIO;
+	if (atomic_inc_return(&chip->active) == 1)
+		return usb_autopm_get_interface(chip->pm_intf);
+	return 0;
 }
 
 void snd_usb_autosuspend(struct snd_usb_audio *chip)
 {
-	down_read(&chip->shutdown_rwsem);
-	if (!chip->shutdown && !chip->probing && !chip->in_pm)
+	if (atomic_dec_and_test(&chip->active))
 		usb_autopm_put_interface(chip->pm_intf);
-	up_read(&chip->shutdown_rwsem);
 }
 
 static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
@@ -665,30 +689,20 @@
 	if (chip == (void *)-1L)
 		return 0;
 
-	if (!PMSG_IS_AUTO(message)) {
+	chip->autosuspended = !!PMSG_IS_AUTO(message);
+	if (!chip->autosuspended)
 		snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
-		if (!chip->num_suspended_intf++) {
-			list_for_each_entry(as, &chip->pcm_list, list) {
-				snd_pcm_suspend_all(as->pcm);
-				as->substream[0].need_setup_ep =
-					as->substream[1].need_setup_ep = true;
-			}
-			list_for_each(p, &chip->midi_list) {
-				snd_usbmidi_suspend(p);
-			}
+	if (!chip->num_suspended_intf++) {
+		list_for_each_entry(as, &chip->pcm_list, list) {
+			snd_pcm_suspend_all(as->pcm);
+			as->substream[0].need_setup_ep =
+				as->substream[1].need_setup_ep = true;
 		}
-	} else {
-		/*
-		 * otherwise we keep the rest of the system in the dark
-		 * to keep this transparent
-		 */
-		if (!chip->num_suspended_intf++)
-			chip->autosuspended = 1;
-	}
-
-	if (chip->num_suspended_intf == 1)
+		list_for_each(p, &chip->midi_list)
+			snd_usbmidi_suspend(p);
 		list_for_each_entry(mixer, &chip->mixer_list, list)
 			snd_usb_mixer_suspend(mixer);
+	}
 
 	return 0;
 }
@@ -705,7 +719,7 @@
 	if (--chip->num_suspended_intf)
 		return 0;
 
-	chip->in_pm = 1;
+	atomic_inc(&chip->active); /* avoid autopm */
 	/*
 	 * ALSA leaves material resumption to user space
 	 * we just notify and restart the mixers
@@ -725,7 +739,7 @@
 	chip->autosuspended = 0;
 
 err_out:
-	chip->in_pm = 0;
+	atomic_dec(&chip->active); /* allow autopm after this point */
 	return err;
 }
 
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 03b0744..e6f7189 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -355,8 +355,10 @@
 	if (unlikely(urb->status == -ENOENT ||		/* unlinked */
 		     urb->status == -ENODEV ||		/* device removed */
 		     urb->status == -ECONNRESET ||	/* unlinked */
-		     urb->status == -ESHUTDOWN ||	/* device disabled */
-		     ep->chip->shutdown))		/* device disconnected */
+		     urb->status == -ESHUTDOWN))	/* device disabled */
+		goto exit_clear;
+	/* device disconnected */
+	if (unlikely(atomic_read(&ep->chip->shutdown)))
 		goto exit_clear;
 
 	if (usb_pipeout(ep->pipe)) {
@@ -529,7 +531,7 @@
 {
 	unsigned int i;
 
-	if (!force && ep->chip->shutdown) /* to be sure... */
+	if (!force && atomic_read(&ep->chip->shutdown)) /* to be sure... */
 		return -EBADFD;
 
 	clear_bit(EP_FLAG_RUNNING, &ep->flags);
@@ -868,7 +870,7 @@
 	int err;
 	unsigned int i;
 
-	if (ep->chip->shutdown)
+	if (atomic_read(&ep->chip->shutdown))
 		return -EBADFD;
 
 	/* already running? */
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 6b3acba..f494dce 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -282,6 +282,21 @@
 	return val;
 }
 
+static int uac2_ctl_value_size(int val_type)
+{
+	switch (val_type) {
+	case USB_MIXER_S32:
+	case USB_MIXER_U32:
+		return 4;
+	case USB_MIXER_S16:
+	case USB_MIXER_U16:
+		return 2;
+	default:
+		return 1;
+	}
+	return 0; /* unreachable */
+}
+
 
 /*
  * retrieve a mixer value
@@ -296,14 +311,11 @@
 	int timeout = 10;
 	int idx = 0, err;
 
-	err = snd_usb_autoresume(chip);
+	err = snd_usb_lock_shutdown(chip);
 	if (err < 0)
 		return -EIO;
 
-	down_read(&chip->shutdown_rwsem);
 	while (timeout-- > 0) {
-		if (chip->shutdown)
-			break;
 		idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
 		if (snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request,
 				    USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
@@ -319,8 +331,7 @@
 	err = -EINVAL;
 
  out:
-	up_read(&chip->shutdown_rwsem);
-	snd_usb_autosuspend(chip);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -328,14 +339,14 @@
 			    int validx, int *value_ret)
 {
 	struct snd_usb_audio *chip = cval->head.mixer->chip;
-	unsigned char buf[2 + 3 * sizeof(__u16)]; /* enough space for one range */
+	unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */
 	unsigned char *val;
 	int idx = 0, ret, size;
 	__u8 bRequest;
 
 	if (request == UAC_GET_CUR) {
 		bRequest = UAC2_CS_CUR;
-		size = sizeof(__u16);
+		size = uac2_ctl_value_size(cval->val_type);
 	} else {
 		bRequest = UAC2_CS_RANGE;
 		size = sizeof(buf);
@@ -343,21 +354,15 @@
 
 	memset(buf, 0, sizeof(buf));
 
-	ret = snd_usb_autoresume(chip) ? -EIO : 0;
+	ret = snd_usb_lock_shutdown(chip) ? -EIO : 0;
 	if (ret)
 		goto error;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		ret = -ENODEV;
-	} else {
-		idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
-		ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
+	idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
+	ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
 			      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
 			      validx, idx, buf, size);
-	}
-	up_read(&chip->shutdown_rwsem);
-	snd_usb_autosuspend(chip);
+	snd_usb_unlock_shutdown(chip);
 
 	if (ret < 0) {
 error:
@@ -446,7 +451,7 @@
 				int request, int validx, int value_set)
 {
 	struct snd_usb_audio *chip = cval->head.mixer->chip;
-	unsigned char buf[2];
+	unsigned char buf[4];
 	int idx = 0, val_len, err, timeout = 10;
 
 	validx += cval->idx_off;
@@ -454,8 +459,7 @@
 	if (cval->head.mixer->protocol == UAC_VERSION_1) {
 		val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
 	} else { /* UAC_VERSION_2 */
-		/* audio class v2 controls are always 2 bytes in size */
-		val_len = sizeof(__u16);
+		val_len = uac2_ctl_value_size(cval->val_type);
 
 		/* FIXME */
 		if (request != UAC_SET_CUR) {
@@ -469,13 +473,14 @@
 	value_set = convert_bytes_value(cval, value_set);
 	buf[0] = value_set & 0xff;
 	buf[1] = (value_set >> 8) & 0xff;
-	err = snd_usb_autoresume(chip);
+	buf[2] = (value_set >> 16) & 0xff;
+	buf[3] = (value_set >> 24) & 0xff;
+
+	err = snd_usb_lock_shutdown(chip);
 	if (err < 0)
 		return -EIO;
-	down_read(&chip->shutdown_rwsem);
+
 	while (timeout-- > 0) {
-		if (chip->shutdown)
-			break;
 		idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
 		if (snd_usb_ctl_msg(chip->dev,
 				    usb_sndctrlpipe(chip->dev, 0), request,
@@ -490,8 +495,7 @@
 	err = -EINVAL;
 
  out:
-	up_read(&chip->shutdown_rwsem);
-	snd_usb_autosuspend(chip);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -715,15 +719,21 @@
 				term->name = d->iTerminal;
 			} else { /* UAC_VERSION_2 */
 				struct uac2_input_terminal_descriptor *d = p1;
+
+				/* call recursively to verify that the
+				 * referenced clock entity is valid */
+				err = check_input_term(state, d->bCSourceID, term);
+				if (err < 0)
+					return err;
+
+				/* save input term properties after recursion,
+				 * to ensure they are not overriden by the
+				 * recursion calls */
+				term->id = id;
 				term->type = le16_to_cpu(d->wTerminalType);
 				term->channels = d->bNrChannels;
 				term->chconfig = le32_to_cpu(d->bmChannelConfig);
 				term->name = d->iTerminal;
-
-				/* call recursively to get the clock selectors */
-				err = check_input_term(state, d->bCSourceID, term);
-				if (err < 0)
-					return err;
 			}
 			return 0;
 		case UAC_FEATURE_UNIT: {
@@ -798,24 +808,25 @@
 /* feature unit control information */
 struct usb_feature_control_info {
 	const char *name;
-	unsigned int type;	/* control type (mute, volume, etc.) */
+	int type;	/* data type for uac1 */
+	int type_uac2;	/* data type for uac2 if different from uac1, else -1 */
 };
 
 static struct usb_feature_control_info audio_feature_info[] = {
-	{ "Mute",			USB_MIXER_INV_BOOLEAN },
-	{ "Volume",			USB_MIXER_S16 },
-	{ "Tone Control - Bass",	USB_MIXER_S8 },
-	{ "Tone Control - Mid",		USB_MIXER_S8 },
-	{ "Tone Control - Treble",	USB_MIXER_S8 },
-	{ "Graphic Equalizer",		USB_MIXER_S8 }, /* FIXME: not implemeted yet */
-	{ "Auto Gain Control",		USB_MIXER_BOOLEAN },
-	{ "Delay Control",		USB_MIXER_U16 }, /* FIXME: U32 in UAC2 */
-	{ "Bass Boost",			USB_MIXER_BOOLEAN },
-	{ "Loudness",			USB_MIXER_BOOLEAN },
+	{ "Mute",			USB_MIXER_INV_BOOLEAN, -1 },
+	{ "Volume",			USB_MIXER_S16, -1 },
+	{ "Tone Control - Bass",	USB_MIXER_S8, -1 },
+	{ "Tone Control - Mid",		USB_MIXER_S8, -1 },
+	{ "Tone Control - Treble",	USB_MIXER_S8, -1 },
+	{ "Graphic Equalizer",		USB_MIXER_S8, -1 }, /* FIXME: not implemeted yet */
+	{ "Auto Gain Control",		USB_MIXER_BOOLEAN, -1 },
+	{ "Delay Control",		USB_MIXER_U16, USB_MIXER_U32 },
+	{ "Bass Boost",			USB_MIXER_BOOLEAN, -1 },
+	{ "Loudness",			USB_MIXER_BOOLEAN, -1 },
 	/* UAC2 specific */
-	{ "Input Gain Control",		USB_MIXER_S16 },
-	{ "Input Gain Pad Control",	USB_MIXER_S16 },
-	{ "Phase Inverter Control",	USB_MIXER_BOOLEAN },
+	{ "Input Gain Control",		USB_MIXER_S16, -1 },
+	{ "Input Gain Pad Control",	USB_MIXER_S16, -1 },
+	{ "Phase Inverter Control",	USB_MIXER_BOOLEAN, -1 },
 };
 
 /* private_free callback */
@@ -1215,6 +1226,7 @@
 			      int readonly_mask)
 {
 	struct uac_feature_unit_descriptor *desc = raw_desc;
+	struct usb_feature_control_info *ctl_info;
 	unsigned int len = 0;
 	int mapped_name = 0;
 	int nameid = uac_feature_unit_iFeature(desc);
@@ -1240,7 +1252,13 @@
 	snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
 	cval->control = control;
 	cval->cmask = ctl_mask;
-	cval->val_type = audio_feature_info[control-1].type;
+	ctl_info = &audio_feature_info[control-1];
+	if (state->mixer->protocol == UAC_VERSION_1)
+		cval->val_type = ctl_info->type;
+	else /* UAC_VERSION_2 */
+		cval->val_type = ctl_info->type_uac2 >= 0 ?
+			ctl_info->type_uac2 : ctl_info->type;
+
 	if (ctl_mask == 0) {
 		cval->channels = 1;	/* master channel */
 		cval->master_readonly = readonly_mask;
@@ -2522,7 +2540,7 @@
 		for (c = 0; c < MAX_CHANNELS; c++) {
 			if (!(cval->cmask & (1 << c)))
 				continue;
-			if (cval->cached & (1 << c)) {
+			if (cval->cached & (1 << (c + 1))) {
 				err = snd_usb_set_cur_mix_value(cval, c + 1, idx,
 							cval->cache_val[idx]);
 				if (err < 0)
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index d3268f0..3417ef3 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -33,6 +33,8 @@
 	USB_MIXER_U8,
 	USB_MIXER_S16,
 	USB_MIXER_U16,
+	USB_MIXER_S32,
+	USB_MIXER_U32,
 };
 
 typedef void (*usb_mixer_elem_dump_func_t)(struct snd_info_buffer *buffer,
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 337c317..d3608c0 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -308,11 +308,10 @@
 	struct snd_usb_audio *chip = mixer->chip;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto out;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
+
 	if (chip->usb_id == USB_ID(0x041e, 0x3042))
 		err = snd_usb_ctl_msg(chip->dev,
 			      usb_sndctrlpipe(chip->dev, 0), 0x24,
@@ -329,8 +328,7 @@
 			      usb_sndctrlpipe(chip->dev, 0), 0x24,
 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
 			      value, index + 2, NULL, 0);
- out:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -441,16 +439,15 @@
 
 	for (i = 0; jacks[i].name; ++i) {
 		snd_iprintf(buffer, "%s: ", jacks[i].name);
-		down_read(&mixer->chip->shutdown_rwsem);
-		if (mixer->chip->shutdown)
-			err = 0;
-		else
-			err = snd_usb_ctl_msg(mixer->chip->dev,
+		err = snd_usb_lock_shutdown(mixer->chip);
+		if (err < 0)
+			return;
+		err = snd_usb_ctl_msg(mixer->chip->dev,
 				      usb_rcvctrlpipe(mixer->chip->dev, 0),
 				      UAC_GET_MEM, USB_DIR_IN | USB_TYPE_CLASS |
 				      USB_RECIP_INTERFACE, 0,
 				      jacks[i].unitid << 8, buf, 3);
-		up_read(&mixer->chip->shutdown_rwsem);
+		snd_usb_unlock_shutdown(mixer->chip);
 		if (err == 3 && (buf[0] == 3 || buf[0] == 6))
 			snd_iprintf(buffer, "%02x %02x\n", buf[1], buf[2]);
 		else
@@ -481,11 +478,9 @@
 	int err;
 	unsigned char buf[2];
 
-	down_read(&chip->shutdown_rwsem);
-	if (mixer->chip->shutdown) {
-		err = -ENODEV;
-		goto out;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	buf[0] = 0x01;
 	buf[1] = value ? 0x02 : 0x01;
@@ -493,8 +488,7 @@
 		      usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR,
 		      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
 		      0x0400, 0x0e00, buf, 2);
- out:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -554,15 +548,14 @@
 	struct snd_usb_audio *chip = mixer->chip;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown)
-		err = -ENODEV;
-	else
-		err = snd_usb_ctl_msg(chip->dev,
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
+	err = snd_usb_ctl_msg(chip->dev,
 			      usb_sndctrlpipe(chip->dev, 0), 0x08,
 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
 			      50, 0, &status, 1);
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -623,11 +616,9 @@
 	int err;
 	unsigned char buff[3];
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto err;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	/* Prepare for magic command to toggle clock source */
 	err = snd_usb_ctl_msg(chip->dev,
@@ -683,7 +674,7 @@
 		goto err;
 
 err:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -778,15 +769,14 @@
 	unsigned int pval = list->kctl->private_value;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown)
-		err = -ENODEV;
-	else
-		err = usb_control_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0),
-				      (pval >> 16) & 0xff,
-				      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
-				      pval >> 24, pval & 0xffff, NULL, 0, 1000);
-	up_read(&chip->shutdown_rwsem);
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
+	err = usb_control_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0),
+			      (pval >> 16) & 0xff,
+			      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+			      pval >> 24, pval & 0xffff, NULL, 0, 1000);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -944,18 +934,17 @@
 	value[0] = pval >> 24;
 	value[1] = 0;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown)
-		err = -ENODEV;
-	else
-		err = snd_usb_ctl_msg(chip->dev,
-				      usb_sndctrlpipe(chip->dev, 0),
-				      UAC_SET_CUR,
-				      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
-				      pval & 0xff00,
-				      snd_usb_ctrl_intf(chip) | ((pval & 0xff) << 8),
-				      value, 2);
-	up_read(&chip->shutdown_rwsem);
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
+	err = snd_usb_ctl_msg(chip->dev,
+			      usb_sndctrlpipe(chip->dev, 0),
+			      UAC_SET_CUR,
+			      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+			      pval & 0xff00,
+			      snd_usb_ctrl_intf(chip) | ((pval & 0xff) << 8),
+			      value, 2);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -1519,11 +1508,9 @@
 	unsigned char data[3];
 	int rate;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto end;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	ucontrol->value.iec958.status[0] = kcontrol->private_value & 0xff;
 	ucontrol->value.iec958.status[1] = (kcontrol->private_value >> 8) & 0xff;
@@ -1551,7 +1538,7 @@
 
 	err = 0;
  end:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -1562,11 +1549,9 @@
 	u8 reg;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto end;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	reg = ((pval >> 4) & 0xf0) | (pval & 0x0f);
 	err = snd_usb_ctl_msg(chip->dev,
@@ -1594,7 +1579,7 @@
 		goto end;
 
  end:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
@@ -1650,11 +1635,9 @@
 	u8 reg = list->kctl->private_value;
 	int err;
 
-	down_read(&chip->shutdown_rwsem);
-	if (chip->shutdown) {
-		err = -ENODEV;
-		goto end;
-	}
+	err = snd_usb_lock_shutdown(chip);
+	if (err < 0)
+		return err;
 
 	err = snd_usb_ctl_msg(chip->dev,
 			usb_sndctrlpipe(chip->dev, 0),
@@ -1665,8 +1648,7 @@
 			NULL,
 			0);
 
- end:
-	up_read(&chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(chip);
 	return err;
 }
 
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index b4ef410..cdac517 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -80,7 +80,7 @@
 	unsigned int hwptr_done;
 
 	subs = (struct snd_usb_substream *)substream->runtime->private_data;
-	if (subs->stream->chip->shutdown)
+	if (atomic_read(&subs->stream->chip->shutdown))
 		return SNDRV_PCM_POS_XRUN;
 	spin_lock(&subs->lock);
 	hwptr_done = subs->hwptr_done;
@@ -391,6 +391,20 @@
 	 */
 	attr = fmt->ep_attr & USB_ENDPOINT_SYNCTYPE;
 
+	if ((is_playback && (attr != USB_ENDPOINT_SYNC_ASYNC)) ||
+		(!is_playback && (attr != USB_ENDPOINT_SYNC_ADAPTIVE))) {
+
+		/*
+		 * In these modes the notion of sync_endpoint is irrelevant.
+		 * Reset pointers to avoid using stale data from previously
+		 * used settings, e.g. when configuration and endpoints were
+		 * changed
+		 */
+
+		subs->sync_endpoint = NULL;
+		subs->data_endpoint->sync_master = NULL;
+	}
+
 	err = set_sync_ep_implicit_fb_quirk(subs, dev, altsd, attr);
 	if (err < 0)
 		return err;
@@ -398,10 +412,17 @@
 	if (altsd->bNumEndpoints < 2)
 		return 0;
 
-	if ((is_playback && attr != USB_ENDPOINT_SYNC_ASYNC) ||
+	if ((is_playback && (attr == USB_ENDPOINT_SYNC_SYNC ||
+			     attr == USB_ENDPOINT_SYNC_ADAPTIVE)) ||
 	    (!is_playback && attr != USB_ENDPOINT_SYNC_ADAPTIVE))
 		return 0;
 
+	/*
+	 * In case of illegal SYNC_NONE for OUT endpoint, we keep going to see
+	 * if we don't find a sync endpoint, as on M-Audio Transit. In case of
+	 * error fall back to SYNC mode and don't create sync endpoint
+	 */
+
 	/* check sync-pipe endpoint */
 	/* ... and check descriptor size before accessing bSynchAddress
 	   because there is a version of the SB Audigy 2 NX firmware lacking
@@ -415,6 +436,8 @@
 			   get_endpoint(alts, 1)->bmAttributes,
 			   get_endpoint(alts, 1)->bLength,
 			   get_endpoint(alts, 1)->bSynchAddress);
+		if (is_playback && attr == USB_ENDPOINT_SYNC_NONE)
+			return 0;
 		return -EINVAL;
 	}
 	ep = get_endpoint(alts, 1)->bEndpointAddress;
@@ -425,6 +448,8 @@
 			"%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n",
 			   fmt->iface, fmt->altsetting,
 			   is_playback, ep, get_endpoint(alts, 0)->bSynchAddress);
+		if (is_playback && attr == USB_ENDPOINT_SYNC_NONE)
+			return 0;
 		return -EINVAL;
 	}
 
@@ -436,8 +461,11 @@
 						   implicit_fb ?
 							SND_USB_ENDPOINT_TYPE_DATA :
 							SND_USB_ENDPOINT_TYPE_SYNC);
-	if (!subs->sync_endpoint)
+	if (!subs->sync_endpoint) {
+		if (is_playback && attr == USB_ENDPOINT_SYNC_NONE)
+			return 0;
 		return -EINVAL;
+	}
 
 	subs->data_endpoint->sync_master = subs->sync_endpoint;
 
@@ -707,12 +735,11 @@
 		return -EINVAL;
 	}
 
-	down_read(&subs->stream->chip->shutdown_rwsem);
-	if (subs->stream->chip->shutdown)
-		ret = -ENODEV;
-	else
-		ret = set_format(subs, fmt);
-	up_read(&subs->stream->chip->shutdown_rwsem);
+	ret = snd_usb_lock_shutdown(subs->stream->chip);
+	if (ret < 0)
+		return ret;
+	ret = set_format(subs, fmt);
+	snd_usb_unlock_shutdown(subs->stream->chip);
 	if (ret < 0)
 		return ret;
 
@@ -735,13 +762,12 @@
 	subs->cur_audiofmt = NULL;
 	subs->cur_rate = 0;
 	subs->period_bytes = 0;
-	down_read(&subs->stream->chip->shutdown_rwsem);
-	if (!subs->stream->chip->shutdown) {
+	if (!snd_usb_lock_shutdown(subs->stream->chip)) {
 		stop_endpoints(subs, true);
 		snd_usb_endpoint_deactivate(subs->sync_endpoint);
 		snd_usb_endpoint_deactivate(subs->data_endpoint);
+		snd_usb_unlock_shutdown(subs->stream->chip);
 	}
-	up_read(&subs->stream->chip->shutdown_rwsem);
 	return snd_pcm_lib_free_vmalloc_buffer(substream);
 }
 
@@ -763,11 +789,9 @@
 		return -ENXIO;
 	}
 
-	down_read(&subs->stream->chip->shutdown_rwsem);
-	if (subs->stream->chip->shutdown) {
-		ret = -ENODEV;
-		goto unlock;
-	}
+	ret = snd_usb_lock_shutdown(subs->stream->chip);
+	if (ret < 0)
+		return ret;
 	if (snd_BUG_ON(!subs->data_endpoint)) {
 		ret = -EIO;
 		goto unlock;
@@ -816,7 +840,7 @@
 		ret = start_endpoints(subs, true);
 
  unlock:
-	up_read(&subs->stream->chip->shutdown_rwsem);
+	snd_usb_unlock_shutdown(subs->stream->chip);
 	return ret;
 }
 
@@ -1218,9 +1242,11 @@
 
 	stop_endpoints(subs, true);
 
-	if (!as->chip->shutdown && subs->interface >= 0) {
+	if (subs->interface >= 0 &&
+	    !snd_usb_lock_shutdown(subs->stream->chip)) {
 		usb_set_interface(subs->dev, subs->interface, 0);
 		subs->interface = -1;
+		snd_usb_unlock_shutdown(subs->stream->chip);
 	}
 
 	subs->pcm_substream = NULL;
diff --git a/sound/usb/proc.c b/sound/usb/proc.c
index 5f761ab..0ac89e2 100644
--- a/sound/usb/proc.c
+++ b/sound/usb/proc.c
@@ -46,14 +46,14 @@
 static void proc_audio_usbbus_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
 {
 	struct snd_usb_audio *chip = entry->private_data;
-	if (!chip->shutdown)
+	if (!atomic_read(&chip->shutdown))
 		snd_iprintf(buffer, "%03d/%03d\n", chip->dev->bus->busnum, chip->dev->devnum);
 }
 
 static void proc_audio_usbid_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
 {
 	struct snd_usb_audio *chip = entry->private_data;
-	if (!chip->shutdown)
+	if (!atomic_read(&chip->shutdown))
 		snd_iprintf(buffer, "%04x:%04x\n", 
 			    USB_ID_VENDOR(chip->usb_id),
 			    USB_ID_PRODUCT(chip->usb_id));
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 754e689..00ebc0c 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1268,6 +1268,7 @@
 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
 		break;
 
+	case USB_ID(0x20b1, 0x000a): /* Gustard DAC-X20U */
 	case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
 	case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
 		if (fp->altsetting == 3)
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 91d0380..33a1764 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -37,11 +37,11 @@
 	struct usb_interface *pm_intf;
 	u32 usb_id;
 	struct mutex mutex;
-	struct rw_semaphore shutdown_rwsem;
-	unsigned int shutdown:1;
-	unsigned int probing:1;
-	unsigned int in_pm:1;
 	unsigned int autosuspended:1;	
+	atomic_t active;
+	atomic_t shutdown;
+	atomic_t usage_count;
+	wait_queue_head_t shutdown_wait;
 	unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
 	
 	int num_interfaces;
@@ -115,4 +115,7 @@
 #define combine_triple(s)  (combine_word(s) | ((unsigned int)(s)[2] << 16))
 #define combine_quad(s)    (combine_triple(s) | ((unsigned int)(s)[3] << 24))
 
+int snd_usb_lock_shutdown(struct snd_usb_audio *chip);
+void snd_usb_unlock_shutdown(struct snd_usb_audio *chip);
+
 #endif /* __USBAUDIO_H */
diff --git a/tools/build/Documentation/Build.txt b/tools/build/Documentation/Build.txt
index 00ad2d6..aa5e092 100644
--- a/tools/build/Documentation/Build.txt
+++ b/tools/build/Documentation/Build.txt
@@ -66,6 +66,7 @@
   ex/Build:
     ex-y += a.o
     ex-y += b.o
+    ex-y += b.o # duplicates in the lists are allowed
 
     libex-y += c.o
     libex-y += d.o
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index faca2bf..0c5f485 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -57,11 +57,13 @@
 quiet_cmd_cc_s_c = AS       $@
       cmd_cc_s_c = $(CC) $(c_flags) -S -o $@ $<
 
+quiet_cmd_gen = GEN      $@
+
 # Link agregate command
 # If there's nothing to link, create empty $@ object.
 quiet_cmd_ld_multi = LD       $@
       cmd_ld_multi = $(if $(strip $(obj-y)),\
-		       $(LD) -r -o $@ $(obj-y),rm -f $@; $(AR) rcs $@)
+		       $(LD) -r -o $@  $(filter $(obj-y),$^),rm -f $@; $(AR) rcs $@)
 
 # Build rules
 $(OUTPUT)%.o: %.c FORCE
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 463ed8f..74ca420 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -33,7 +33,8 @@
 	test-compile-32.bin		\
 	test-compile-x32.bin		\
 	test-zlib.bin			\
-	test-lzma.bin
+	test-lzma.bin			\
+	test-bpf.bin
 
 CC := $(CROSS_COMPILE)gcc -MD
 PKG_CONFIG := $(CROSS_COMPILE)pkg-config
@@ -69,8 +70,13 @@
 test-glibc.bin:
 	$(BUILD)
 
+DWARFLIBS := -ldw
+ifeq ($(findstring -static,${LDFLAGS}),-static)
+DWARFLIBS += -lelf -lebl -lz -llzma -lbz2
+endif
+
 test-dwarf.bin:
-	$(BUILD) -ldw
+	$(BUILD) $(DWARFLIBS)
 
 test-libelf-mmap.bin:
 	$(BUILD) -lelf
@@ -156,6 +162,9 @@
 test-lzma.bin:
 	$(BUILD) -llzma
 
+test-bpf.bin:
+	$(BUILD)
+
 -include *.d
 
 ###############################
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c
new file mode 100644
index 0000000..062bac8
--- /dev/null
+++ b/tools/build/feature/test-bpf.c
@@ -0,0 +1,18 @@
+#include <linux/bpf.h>
+
+int main(void)
+{
+	union bpf_attr attr;
+
+	attr.prog_type = BPF_PROG_TYPE_KPROBE;
+	attr.insn_cnt = 0;
+	attr.insns = 0;
+	attr.license = 0;
+	attr.log_buf = 0;
+	attr.log_size = 0;
+	attr.log_level = 0;
+	attr.kern_version = 0;
+
+	attr = attr;
+	return 0;
+}
diff --git a/tools/build/feature/test-glibc.c b/tools/build/feature/test-glibc.c
index b082034..9367f758 100644
--- a/tools/build/feature/test-glibc.c
+++ b/tools/build/feature/test-glibc.c
@@ -1,8 +1,19 @@
+#include <stdlib.h>
+
+#if !defined(__UCLIBC__)
 #include <gnu/libc-version.h>
+#else
+#define XSTR(s) STR(s)
+#define STR(s) #s
+#endif
 
 int main(void)
 {
+#if !defined(__UCLIBC__)
 	const char *version = gnu_get_libc_version();
+#else
+	const char *version = XSTR(__GLIBC__) "." XSTR(__GLIBC_MINOR__);
+#endif
 
 	return (long)version;
 }
diff --git a/tools/build/tests/ex/Build b/tools/build/tests/ex/Build
index 70d8762..429c7d4 100644
--- a/tools/build/tests/ex/Build
+++ b/tools/build/tests/ex/Build
@@ -1,6 +1,7 @@
 ex-y += ex.o
 ex-y += a.o
 ex-y += b.o
+ex-y += b.o
 ex-y += empty/
 ex-y += empty2/
 
diff --git a/tools/hv/lsvmbus b/tools/hv/lsvmbus
new file mode 100644
index 0000000..162a378
--- /dev/null
+++ b/tools/hv/lsvmbus
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+
+import os
+from optparse import OptionParser
+
+parser = OptionParser()
+parser.add_option("-v", "--verbose", dest="verbose",
+		   help="print verbose messages. Try -vv, -vvv for \
+			more verbose messages", action="count")
+
+(options, args) = parser.parse_args()
+
+verbose = 0
+if options.verbose is not None:
+	verbose = options.verbose
+
+vmbus_sys_path = '/sys/bus/vmbus/devices'
+if not os.path.isdir(vmbus_sys_path):
+	print "%s doesn't exist: exiting..." % vmbus_sys_path
+	exit(-1)
+
+vmbus_dev_dict = {
+	'{0e0b6031-5213-4934-818b-38d90ced39db}' : '[Operating system shutdown]',
+	'{9527e630-d0ae-497b-adce-e80ab0175caf}' : '[Time Synchronization]',
+	'{57164f39-9115-4e78-ab55-382f3bd5422d}' : '[Heartbeat]',
+	'{a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}' : '[Data Exchange]',
+	'{35fa2e29-ea23-4236-96ae-3a6ebacba440}' : '[Backup (volume checkpoint)]',
+	'{34d14be3-dee4-41c8-9ae7-6b174977c192}' : '[Guest services]',
+	'{525074dc-8985-46e2-8057-a307dc18a502}' : '[Dynamic Memory]',
+	'{cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}' : 'Synthetic mouse',
+	'{f912ad6d-2b17-48ea-bd65-f927a61c7684}' : 'Synthetic keyboard',
+	'{da0a7802-e377-4aac-8e77-0558eb1073f8}' : 'Synthetic framebuffer adapter',
+	'{f8615163-df3e-46c5-913f-f2d2f965ed0e}' : 'Synthetic network adapter',
+	'{32412632-86cb-44a2-9b5c-50d1417354f5}' : 'Synthetic IDE Controller',
+	'{ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}' : 'Synthetic SCSI Controller',
+	'{2f9bcc4a-0069-4af3-b76b-6fd0be528cda}' : 'Synthetic fiber channel adapter',
+	'{8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}' : 'Synthetic RDMA adapter',
+	'{276aacf4-ac15-426c-98dd-7521ad3f01fe}' : '[Reserved system device]',
+	'{f8e65716-3cb3-4a06-9a60-1889c5cccab5}' : '[Reserved system device]',
+	'{3375baf4-9e15-4b30-b765-67acb10d607b}' : '[Reserved system device]',
+}
+
+def get_vmbus_dev_attr(dev_name, attr):
+	try:
+		f = open('%s/%s/%s' % (vmbus_sys_path, dev_name, attr), 'r')
+		lines = f.readlines()
+		f.close()
+	except IOError:
+		lines = []
+
+	return lines
+
+class VMBus_Dev:
+	pass
+
+
+vmbus_dev_list = []
+
+for f in os.listdir(vmbus_sys_path):
+	vmbus_id = get_vmbus_dev_attr(f, 'id')[0].strip()
+	class_id = get_vmbus_dev_attr(f, 'class_id')[0].strip()
+	device_id = get_vmbus_dev_attr(f, 'device_id')[0].strip()
+	dev_desc = vmbus_dev_dict.get(class_id, 'Unknown')
+
+	chn_vp_mapping = get_vmbus_dev_attr(f, 'channel_vp_mapping')
+	chn_vp_mapping = [c.strip() for c in chn_vp_mapping]
+	chn_vp_mapping = sorted(chn_vp_mapping,
+		key = lambda c : int(c.split(':')[0]))
+
+	chn_vp_mapping = ['\tRel_ID=%s, target_cpu=%s' %
+				(c.split(':')[0], c.split(':')[1])
+					for c in chn_vp_mapping]
+	d = VMBus_Dev()
+	d.sysfs_path = '%s/%s' % (vmbus_sys_path, f)
+	d.vmbus_id = vmbus_id
+	d.class_id = class_id
+	d.device_id = device_id
+	d.dev_desc = dev_desc
+	d.chn_vp_mapping = '\n'.join(chn_vp_mapping)
+	if d.chn_vp_mapping:
+		d.chn_vp_mapping += '\n'
+
+	vmbus_dev_list.append(d)
+
+
+vmbus_dev_list  = sorted(vmbus_dev_list, key = lambda d : int(d.vmbus_id))
+
+format0 = '%2s: %s'
+format1 = '%2s: Class_ID = %s - %s\n%s'
+format2 = '%2s: Class_ID = %s - %s\n\tDevice_ID = %s\n\tSysfs path: %s\n%s'
+
+for d in vmbus_dev_list:
+	if verbose == 0:
+		print ('VMBUS ID ' + format0) % (d.vmbus_id, d.dev_desc)
+	elif verbose == 1:
+		print ('VMBUS ID ' + format1) %	\
+			(d.vmbus_id, d.class_id, d.dev_desc, d.chn_vp_mapping)
+	else:
+		print ('VMBUS ID ' + format2) % \
+			(d.vmbus_id, d.class_id, d.dev_desc, \
+			d.device_id, d.sysfs_path, d.chn_vp_mapping)
diff --git a/tools/iio/generic_buffer.c b/tools/iio/generic_buffer.c
index 4eebb66..9f7b85b 100644
--- a/tools/iio/generic_buffer.c
+++ b/tools/iio/generic_buffer.c
@@ -51,14 +51,33 @@
 		if (bytes % channels[i].bytes == 0)
 			channels[i].location = bytes;
 		else
-			channels[i].location = bytes - bytes%channels[i].bytes
-				+ channels[i].bytes;
+			channels[i].location = bytes - bytes % channels[i].bytes
+					       + channels[i].bytes;
+
 		bytes = channels[i].location + channels[i].bytes;
 		i++;
 	}
+
 	return bytes;
 }
 
+void print1byte(uint8_t input, struct iio_channel_info *info)
+{
+	/*
+	 * Shift before conversion to avoid sign extension
+	 * of left aligned data
+	 */
+	input >>= info->shift;
+	input &= info->mask;
+	if (info->is_signed) {
+		int8_t val = (int8_t)(input << (8 - info->bits_used)) >>
+			     (8 - info->bits_used);
+		printf("%05f ", ((float)val + info->offset) * info->scale);
+	} else {
+		printf("%05f ", ((float)input + info->offset) * info->scale);
+	}
+}
+
 void print2byte(uint16_t input, struct iio_channel_info *info)
 {
 	/* First swap if incorrect endian */
@@ -136,9 +155,9 @@
 /**
  * process_scan() - print out the values in SI units
  * @data:		pointer to the start of the scan
- * @channels:		information about the channels. Note
- *  size_from_channelarray must have been called first to fill the
- *  location offsets.
+ * @channels:		information about the channels.
+ *			Note: size_from_channelarray must have been called first
+ *			      to fill the location offsets.
  * @num_channels:	number of channels
  **/
 void process_scan(char *data,
@@ -150,6 +169,10 @@
 	for (k = 0; k < num_channels; k++)
 		switch (channels[k].bytes) {
 			/* only a few cases implemented so far */
+		case 1:
+			print1byte(*(uint8_t *)(data + channels[k].location),
+				   &channels[k]);
+			break;
 		case 2:
 			print2byte(*(uint16_t *)(data + channels[k].location),
 				   &channels[k]);
@@ -170,15 +193,15 @@
 
 void print_usage(void)
 {
-	printf("Usage: generic_buffer [options]...\n"
-	       "Capture, convert and output data from IIO device buffer\n"
-	       "  -c <n>     Do n conversions\n"
-	       "  -e         Disable wait for event (new data)\n"
-	       "  -g         Use trigger-less mode\n"
-	       "  -l <n>     Set buffer length to n samples\n"
-	       "  -n <name>  Set device name (mandatory)\n"
-	       "  -t <name>  Set trigger name\n"
-	       "  -w <n>     Set delay between reads in us (event-less mode)\n");
+	fprintf(stderr, "Usage: generic_buffer [options]...\n"
+		"Capture, convert and output data from IIO device buffer\n"
+		"  -c <n>     Do n conversions\n"
+		"  -e         Disable wait for event (new data)\n"
+		"  -g         Use trigger-less mode\n"
+		"  -l <n>     Set buffer length to n samples\n"
+		"  -n <name>  Set device name (mandatory)\n"
+		"  -t <name>  Set trigger name\n"
+		"  -w <n>     Set delay between reads in us (event-less mode)\n");
 }
 
 int main(int argc, char **argv)
@@ -213,6 +236,7 @@
 			num_loops = strtoul(optarg, &dummy, 10);
 			if (errno)
 				return -errno;
+
 			break;
 		case 'e':
 			noevents = 1;
@@ -225,6 +249,7 @@
 			buf_len = strtoul(optarg, &dummy, 10);
 			if (errno)
 				return -errno;
+
 			break;
 		case 'n':
 			device_name = optarg;
@@ -245,8 +270,8 @@
 		}
 	}
 
-	if (device_name == NULL) {
-		printf("Device name not set\n");
+	if (!device_name) {
+		fprintf(stderr, "Device name not set\n");
 		print_usage();
 		return -1;
 	}
@@ -254,9 +279,10 @@
 	/* Find the device requested */
 	dev_num = find_type_by_name(device_name, "iio:device");
 	if (dev_num < 0) {
-		printf("Failed to find the %s\n", device_name);
+		fprintf(stderr, "Failed to find the %s\n", device_name);
 		return dev_num;
 	}
+
 	printf("iio device number being used is %d\n", dev_num);
 
 	ret = asprintf(&dev_dir_name, "%siio:device%d", iio_dir, dev_num);
@@ -264,7 +290,7 @@
 		return -ENOMEM;
 
 	if (!notrigger) {
-		if (trigger_name == NULL) {
+		if (!trigger_name) {
 			/*
 			 * Build the trigger name. If it is device associated
 			 * its name is <device_name>_dev[n] where n matches
@@ -281,13 +307,16 @@
 		/* Verify the trigger exists */
 		trig_num = find_type_by_name(trigger_name, "trigger");
 		if (trig_num < 0) {
-			printf("Failed to find the trigger %s\n", trigger_name);
+			fprintf(stderr, "Failed to find the trigger %s\n",
+				trigger_name);
 			ret = trig_num;
 			goto error_free_triggername;
 		}
+
 		printf("iio trigger number being used is %d\n", trig_num);
-	} else
+	} else {
 		printf("trigger-less mode selected\n");
+	}
 
 	/*
 	 * Parse the files in scan_elements to identify what channels are
@@ -295,8 +324,8 @@
 	 */
 	ret = build_channel_array(dev_dir_name, &channels, &num_channels);
 	if (ret) {
-		printf("Problem reading scan element information\n");
-		printf("diag %s\n", dev_dir_name);
+		fprintf(stderr, "Problem reading scan element information\n"
+			"diag %s\n", dev_dir_name);
 		goto error_free_triggername;
 	}
 
@@ -314,13 +343,16 @@
 
 	if (!notrigger) {
 		printf("%s %s\n", dev_dir_name, trigger_name);
-		/* Set the device trigger to be the data ready trigger found
-		 * above */
+		/*
+		 * Set the device trigger to be the data ready trigger found
+		 * above
+		 */
 		ret = write_sysfs_string_and_verify("trigger/current_trigger",
 						    dev_dir_name,
 						    trigger_name);
 		if (ret < 0) {
-			printf("Failed to write current_trigger file\n");
+			fprintf(stderr,
+				"Failed to write current_trigger file\n");
 			goto error_free_buf_dir_name;
 		}
 	}
@@ -332,10 +364,14 @@
 
 	/* Enable the buffer */
 	ret = write_sysfs_int("enable", buf_dir_name, 1);
-	if (ret < 0)
+	if (ret < 0) {
+		fprintf(stderr,
+			"Failed to enable buffer: %s\n", strerror(-ret));
 		goto error_free_buf_dir_name;
+	}
+
 	scan_size = size_from_channelarray(channels, num_channels);
-	data = malloc(scan_size*buf_len);
+	data = malloc(scan_size * buf_len);
 	if (!data) {
 		ret = -ENOMEM;
 		goto error_free_buf_dir_name;
@@ -349,13 +385,12 @@
 
 	/* Attempt to open non blocking the access dev */
 	fp = open(buffer_access, O_RDONLY | O_NONBLOCK);
-	if (fp == -1) { /* If it isn't there make the node */
+	if (fp == -1) { /* TODO: If it isn't there make the node */
 		ret = -errno;
-		printf("Failed to open %s\n", buffer_access);
+		fprintf(stderr, "Failed to open %s\n", buffer_access);
 		goto error_free_buffer_access;
 	}
 
-	/* Wait for events 10 times */
 	for (j = 0; j < num_loops; j++) {
 		if (!noevents) {
 			struct pollfd pfd = {
@@ -372,25 +407,22 @@
 			}
 
 			toread = buf_len;
-
 		} else {
 			usleep(timedelay);
 			toread = 64;
 		}
 
-		read_size = read(fp,
-				 data,
-				 toread*scan_size);
+		read_size = read(fp, data, toread * scan_size);
 		if (read_size < 0) {
 			if (errno == EAGAIN) {
-				printf("nothing available\n");
+				fprintf(stderr, "nothing available\n");
 				continue;
-			} else
+			} else {
 				break;
+			}
 		}
-		for (i = 0; i < read_size/scan_size; i++)
-			process_scan(data + scan_size*i,
-				     channels,
+		for (i = 0; i < read_size / scan_size; i++)
+			process_scan(data + scan_size * i, channels,
 				     num_channels);
 	}
 
@@ -404,11 +436,13 @@
 		ret = write_sysfs_string("trigger/current_trigger",
 					 dev_dir_name, "NULL");
 		if (ret < 0)
-			printf("Failed to write to %s\n", dev_dir_name);
+			fprintf(stderr, "Failed to write to %s\n",
+				dev_dir_name);
 
 error_close_buffer_access:
 	if (close(fp) == -1)
 		perror("Failed to close buffer");
+
 error_free_buffer_access:
 	free(buffer_access);
 error_free_data:
@@ -424,6 +458,7 @@
 error_free_triggername:
 	if (datardytrigger)
 		free(trigger_name);
+
 error_free_dev_dir_name:
 	free(dev_dir_name);
 
diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c
index 016760e..cd3fd41 100644
--- a/tools/iio/iio_event_monitor.c
+++ b/tools/iio/iio_event_monitor.c
@@ -13,7 +13,6 @@
  *
  * Usage:
  *	iio_event_monitor <device_name>
- *
  */
 
 #include <unistd.h>
@@ -51,6 +50,9 @@
 	[IIO_HUMIDITYRELATIVE] = "humidityrelative",
 	[IIO_ACTIVITY] = "activity",
 	[IIO_STEPS] = "steps",
+	[IIO_ENERGY] = "energy",
+	[IIO_DISTANCE] = "distance",
+	[IIO_VELOCITY] = "velocity",
 };
 
 static const char * const iio_ev_type_text[] = {
@@ -99,6 +101,7 @@
 	[IIO_MOD_JOGGING] = "jogging",
 	[IIO_MOD_WALKING] = "walking",
 	[IIO_MOD_STILL] = "still",
+	[IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)",
 };
 
 static bool event_is_known(struct iio_event_data *event)
@@ -130,6 +133,9 @@
 	case IIO_HUMIDITYRELATIVE:
 	case IIO_ACTIVITY:
 	case IIO_STEPS:
+	case IIO_ENERGY:
+	case IIO_DISTANCE:
+	case IIO_VELOCITY:
 		break;
 	default:
 		return false;
@@ -167,6 +173,7 @@
 	case IIO_MOD_JOGGING:
 	case IIO_MOD_WALKING:
 	case IIO_MOD_STILL:
+	case IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z:
 		break;
 	default:
 		return false;
@@ -208,8 +215,9 @@
 	bool diff = IIO_EVENT_CODE_EXTRACT_DIFF(event->id);
 
 	if (!event_is_known(event)) {
-		printf("Unknown event: time: %lld, id: %llx\n",
-				event->timestamp, event->id);
+		fprintf(stderr, "Unknown event: time: %lld, id: %llx\n",
+			event->timestamp, event->id);
+
 		return;
 	}
 
@@ -229,6 +237,7 @@
 
 	if (dir != IIO_EV_DIR_NONE)
 		printf(", direction: %s", iio_ev_dir_text[dir]);
+
 	printf("\n");
 }
 
@@ -242,7 +251,7 @@
 	int fd, event_fd;
 
 	if (argc <= 1) {
-		printf("Usage: %s <device_name>\n", argv[0]);
+		fprintf(stderr, "Usage: %s <device_name>\n", argv[0]);
 		return -1;
 	}
 
@@ -251,14 +260,15 @@
 	dev_num = find_type_by_name(device_name, "iio:device");
 	if (dev_num >= 0) {
 		printf("Found IIO device with name %s with device number %d\n",
-			device_name, dev_num);
+		       device_name, dev_num);
 		ret = asprintf(&chrdev_name, "/dev/iio:device%d", dev_num);
-		if (ret < 0) {
+		if (ret < 0)
 			return -ENOMEM;
-		}
 	} else {
-		/* If we can't find a IIO device by name assume device_name is a
-		   IIO chrdev */
+		/*
+		 * If we can't find an IIO device by name assume device_name is
+		 * an IIO chrdev
+		 */
 		chrdev_name = strdup(device_name);
 		if (!chrdev_name)
 			return -ENOMEM;
@@ -267,14 +277,14 @@
 	fd = open(chrdev_name, 0);
 	if (fd == -1) {
 		ret = -errno;
-		fprintf(stdout, "Failed to open %s\n", chrdev_name);
+		fprintf(stderr, "Failed to open %s\n", chrdev_name);
 		goto error_free_chrdev_name;
 	}
 
 	ret = ioctl(fd, IIO_GET_EVENT_FD_IOCTL, &event_fd);
 	if (ret == -1 || event_fd == -1) {
 		ret = -errno;
-		fprintf(stdout, "Failed to retrieve event fd\n");
+		fprintf(stderr, "Failed to retrieve event fd\n");
 		if (close(fd) == -1)
 			perror("Failed to close character device file");
 
@@ -290,7 +300,7 @@
 		ret = read(event_fd, &event, sizeof(event));
 		if (ret == -1) {
 			if (errno == EAGAIN) {
-				printf("nothing available\n");
+				fprintf(stderr, "nothing available\n");
 				continue;
 			} else {
 				ret = -errno;
@@ -299,6 +309,12 @@
 			}
 		}
 
+		if (ret != sizeof(event)) {
+			fprintf(stderr, "Reading event failed!\n");
+			ret = -EIO;
+			break;
+		}
+
 		print_event(&event);
 	}
 
diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
index ec9ab7f..5eb6793 100644
--- a/tools/iio/iio_utils.c
+++ b/tools/iio/iio_utils.c
@@ -6,9 +6,6 @@
  * under the terms of the GNU General Public License version 2 as published by
  * the Free Software Foundation.
  */
-#ifndef _IIO_UTILS_H
-#define _IIO_UTILS_H
-
 #include <string.h>
 #include <stdlib.h>
 #include <stdio.h>
@@ -32,15 +29,14 @@
  *
  * Returns 0 on success, or a negative error code if string extraction failed.
  **/
-int iioutils_break_up_name(const char *full_name,
-				  char **generic_name)
+int iioutils_break_up_name(const char *full_name, char **generic_name)
 {
 	char *current;
 	char *w, *r;
 	char *working, *prefix = "";
 	int i, ret;
 
-	for (i = 0; i < sizeof(iio_direction) / sizeof(iio_direction[0]); i++)
+	for (i = 0; i < ARRAY_SIZE(iio_direction); i++)
 		if (!strncmp(full_name, iio_direction[i],
 			     strlen(iio_direction[i]))) {
 			prefix = iio_direction[i];
@@ -65,6 +61,7 @@
 			*w = *r;
 			w++;
 		}
+
 		r++;
 	}
 	*w = '\0';
@@ -88,15 +85,10 @@
  *
  * Returns a value >= 0 on success, otherwise a negative error code.
  **/
-int iioutils_get_type(unsigned *is_signed,
-			     unsigned *bytes,
-			     unsigned *bits_used,
-			     unsigned *shift,
-			     uint64_t *mask,
-			     unsigned *be,
-			     const char *device_dir,
-			     const char *name,
-			     const char *generic_name)
+int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
+		      unsigned *shift, uint64_t *mask, unsigned *be,
+		      const char *device_dir, const char *name,
+		      const char *generic_name)
 {
 	FILE *sysfsfp;
 	int ret;
@@ -122,12 +114,13 @@
 	}
 
 	dp = opendir(scan_el_dir);
-	if (dp == NULL) {
+	if (!dp) {
 		ret = -errno;
 		goto error_free_builtname_generic;
 	}
+
 	ret = -ENOENT;
-	while (ent = readdir(dp), ent != NULL)
+	while (ent = readdir(dp), ent)
 		/*
 		 * Do we allow devices to override a generic name with
 		 * a specific one?
@@ -140,10 +133,12 @@
 				ret = -ENOMEM;
 				goto error_closedir;
 			}
+
 			sysfsfp = fopen(filename, "r");
-			if (sysfsfp == NULL) {
+			if (!sysfsfp) {
 				ret = -errno;
-				printf("failed to open %s\n", filename);
+				fprintf(stderr, "failed to open %s\n",
+					filename);
 				goto error_free_filename;
 			}
 
@@ -155,31 +150,36 @@
 				     &padint, shift);
 			if (ret < 0) {
 				ret = -errno;
-				printf("failed to pass scan type description\n");
+				fprintf(stderr,
+					"failed to pass scan type description\n");
 				goto error_close_sysfsfp;
 			} else if (ret != 5) {
 				ret = -EIO;
-				printf("scan type description didn't match\n");
+				fprintf(stderr,
+					"scan type description didn't match\n");
 				goto error_close_sysfsfp;
 			}
+
 			*be = (endianchar == 'b');
 			*bytes = padint / 8;
 			if (*bits_used == 64)
 				*mask = ~0;
 			else
-				*mask = (1 << *bits_used) - 1;
+				*mask = (1ULL << *bits_used) - 1;
+
 			*is_signed = (signchar == 's');
 			if (fclose(sysfsfp)) {
 				ret = -errno;
-				printf("Failed to close %s\n", filename);
+				fprintf(stderr, "Failed to close %s\n",
+					filename);
 				goto error_free_filename;
 			}
 
 			sysfsfp = 0;
 			free(filename);
-
 			filename = 0;
 		}
+
 error_close_sysfsfp:
 	if (sysfsfp)
 		if (fclose(sysfsfp))
@@ -188,6 +188,7 @@
 error_free_filename:
 	if (filename)
 		free(filename);
+
 error_closedir:
 	if (closedir(dp) == -1)
 		perror("iioutils_get_type(): Failed to close directory");
@@ -212,11 +213,9 @@
  *
  * Returns a value >= 0 on success, otherwise a negative error code.
  **/
-int iioutils_get_param_float(float *output,
-				    const char *param_name,
-				    const char *device_dir,
-				    const char *name,
-				    const char *generic_name)
+int iioutils_get_param_float(float *output, const char *param_name,
+			     const char *device_dir, const char *name,
+			     const char *generic_name)
 {
 	FILE *sysfsfp;
 	int ret;
@@ -235,13 +234,15 @@
 		ret = -ENOMEM;
 		goto error_free_builtname;
 	}
+
 	dp = opendir(device_dir);
-	if (dp == NULL) {
+	if (!dp) {
 		ret = -errno;
 		goto error_free_builtname_generic;
 	}
+
 	ret = -ENOENT;
-	while (ent = readdir(dp), ent != NULL)
+	while (ent = readdir(dp), ent)
 		if ((strcmp(builtname, ent->d_name) == 0) ||
 		    (strcmp(builtname_generic, ent->d_name) == 0)) {
 			ret = asprintf(&filename,
@@ -250,11 +251,13 @@
 				ret = -ENOMEM;
 				goto error_closedir;
 			}
+
 			sysfsfp = fopen(filename, "r");
 			if (!sysfsfp) {
 				ret = -errno;
 				goto error_free_filename;
 			}
+
 			errno = 0;
 			if (fscanf(sysfsfp, "%f", output) != 1)
 				ret = errno ? -errno : -ENODATA;
@@ -264,6 +267,7 @@
 error_free_filename:
 	if (filename)
 		free(filename);
+
 error_closedir:
 	if (closedir(dp) == -1)
 		perror("iioutils_get_param_float(): Failed to close directory");
@@ -282,19 +286,17 @@
  * @cnt: the amount of array elements
  **/
 
-void bsort_channel_array_by_index(struct iio_channel_info **ci_array,
-					 int cnt)
+void bsort_channel_array_by_index(struct iio_channel_info *ci_array, int cnt)
 {
-
 	struct iio_channel_info temp;
 	int x, y;
 
 	for (x = 0; x < cnt; x++)
 		for (y = 0; y < (cnt - 1); y++)
-			if ((*ci_array)[y].index > (*ci_array)[y+1].index) {
-				temp = (*ci_array)[y + 1];
-				(*ci_array)[y + 1] = (*ci_array)[y];
-				(*ci_array)[y] = temp;
+			if (ci_array[y].index > ci_array[y + 1].index) {
+				temp = ci_array[y + 1];
+				ci_array[y + 1] = ci_array[y];
+				ci_array[y] = temp;
 			}
 }
 
@@ -307,8 +309,7 @@
  * Returns 0 on success, otherwise a negative error code.
  **/
 int build_channel_array(const char *device_dir,
-			      struct iio_channel_info **ci_array,
-			      int *counter)
+			struct iio_channel_info **ci_array, int *counter)
 {
 	DIR *dp;
 	FILE *sysfsfp;
@@ -325,11 +326,12 @@
 		return -ENOMEM;
 
 	dp = opendir(scan_el_dir);
-	if (dp == NULL) {
+	if (!dp) {
 		ret = -errno;
 		goto error_free_name;
 	}
-	while (ent = readdir(dp), ent != NULL)
+
+	while (ent = readdir(dp), ent)
 		if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
 			   "_en") == 0) {
 			ret = asprintf(&filename,
@@ -338,12 +340,14 @@
 				ret = -ENOMEM;
 				goto error_close_dir;
 			}
+
 			sysfsfp = fopen(filename, "r");
-			if (sysfsfp == NULL) {
+			if (!sysfsfp) {
 				ret = -errno;
 				free(filename);
 				goto error_close_dir;
 			}
+
 			errno = 0;
 			if (fscanf(sysfsfp, "%i", &ret) != 1) {
 				ret = errno ? -errno : -ENODATA;
@@ -353,9 +357,9 @@
 				free(filename);
 				goto error_close_dir;
 			}
-
 			if (ret == 1)
 				(*counter)++;
+
 			if (fclose(sysfsfp)) {
 				ret = -errno;
 				free(filename);
@@ -364,13 +368,15 @@
 
 			free(filename);
 		}
+
 	*ci_array = malloc(sizeof(**ci_array) * (*counter));
-	if (*ci_array == NULL) {
+	if (!*ci_array) {
 		ret = -ENOMEM;
 		goto error_close_dir;
 	}
+
 	seekdir(dp, 0);
-	while (ent = readdir(dp), ent != NULL) {
+	while (ent = readdir(dp), ent) {
 		if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
 			   "_en") == 0) {
 			int current_enabled = 0;
@@ -384,13 +390,15 @@
 				count--;
 				goto error_cleanup_array;
 			}
+
 			sysfsfp = fopen(filename, "r");
-			if (sysfsfp == NULL) {
+			if (!sysfsfp) {
 				ret = -errno;
 				free(filename);
 				count--;
 				goto error_cleanup_array;
 			}
+
 			errno = 0;
 			if (fscanf(sysfsfp, "%i", &current_enabled) != 1) {
 				ret = errno ? -errno : -ENODATA;
@@ -417,12 +425,13 @@
 			current->name = strndup(ent->d_name,
 						strlen(ent->d_name) -
 						strlen("_en"));
-			if (current->name == NULL) {
+			if (!current->name) {
 				free(filename);
 				ret = -ENOMEM;
 				count--;
 				goto error_cleanup_array;
 			}
+
 			/* Get the generic and specific name elements */
 			ret = iioutils_break_up_name(current->name,
 						     &current->generic_name);
@@ -432,6 +441,7 @@
 				count--;
 				goto error_cleanup_array;
 			}
+
 			ret = asprintf(&filename,
 				       "%s/%s_index",
 				       scan_el_dir,
@@ -441,10 +451,12 @@
 				ret = -ENOMEM;
 				goto error_cleanup_array;
 			}
+
 			sysfsfp = fopen(filename, "r");
-			if (sysfsfp == NULL) {
+			if (!sysfsfp) {
 				ret = -errno;
-				printf("failed to open %s\n", filename);
+				fprintf(stderr, "failed to open %s\n",
+					filename);
 				free(filename);
 				goto error_cleanup_array;
 			}
@@ -472,15 +484,17 @@
 						       device_dir,
 						       current->name,
 						       current->generic_name);
-			if (ret < 0)
+			if ((ret < 0) && (ret != -ENOENT))
 				goto error_cleanup_array;
+
 			ret = iioutils_get_param_float(&current->offset,
 						       "offset",
 						       device_dir,
 						       current->name,
 						       current->generic_name);
-			if (ret < 0)
+			if ((ret < 0) && (ret != -ENOENT))
 				goto error_cleanup_array;
+
 			ret = iioutils_get_type(&current->is_signed,
 						&current->bytes,
 						&current->bits_used,
@@ -502,7 +516,7 @@
 
 	free(scan_el_dir);
 	/* reorder so that the array is in index order */
-	bsort_channel_array_by_index(ci_array, *counter);
+	bsort_channel_array_by_index(*ci_array, *counter);
 
 	return 0;
 
@@ -512,6 +526,8 @@
 		free((*ci_array)[i].generic_name);
 	}
 	free(*ci_array);
+	*ci_array = NULL;
+	*counter = 0;
 error_close_dir:
 	if (dp)
 		if (closedir(dp) == -1)
@@ -523,7 +539,7 @@
 	return ret;
 }
 
-int calc_digits(int num)
+static int calc_digits(int num)
 {
 	int count = 0;
 
@@ -549,44 +565,43 @@
 	const struct dirent *ent;
 	int number, numstrlen, ret;
 
-	FILE *nameFile;
+	FILE *namefp;
 	DIR *dp;
 	char thisname[IIO_MAX_NAME_LENGTH];
 	char *filename;
 
 	dp = opendir(iio_dir);
-	if (dp == NULL) {
-		printf("No industrialio devices available\n");
+	if (!dp) {
+		fprintf(stderr, "No industrialio devices available\n");
 		return -ENODEV;
 	}
 
-	while (ent = readdir(dp), ent != NULL) {
+	while (ent = readdir(dp), ent) {
 		if (strcmp(ent->d_name, ".") != 0 &&
-			strcmp(ent->d_name, "..") != 0 &&
-			strlen(ent->d_name) > strlen(type) &&
-			strncmp(ent->d_name, type, strlen(type)) == 0) {
+		    strcmp(ent->d_name, "..") != 0 &&
+		    strlen(ent->d_name) > strlen(type) &&
+		    strncmp(ent->d_name, type, strlen(type)) == 0) {
 			errno = 0;
 			ret = sscanf(ent->d_name + strlen(type), "%d", &number);
 			if (ret < 0) {
 				ret = -errno;
-				printf("failed to read element number\n");
+				fprintf(stderr,
+					"failed to read element number\n");
 				goto error_close_dir;
 			} else if (ret != 1) {
 				ret = -EIO;
-				printf("failed to match element number\n");
+				fprintf(stderr,
+					"failed to match element number\n");
 				goto error_close_dir;
 			}
 
 			numstrlen = calc_digits(number);
 			/* verify the next character is not a colon */
 			if (strncmp(ent->d_name + strlen(type) + numstrlen,
-					":",
-					1) != 0) {
-				filename = malloc(strlen(iio_dir)
-						+ strlen(type)
-						+ numstrlen
-						+ 6);
-				if (filename == NULL) {
+			    ":", 1) != 0) {
+				filename = malloc(strlen(iio_dir) + strlen(type)
+						  + numstrlen + 6);
+				if (!filename) {
 					ret = -ENOMEM;
 					goto error_close_dir;
 				}
@@ -598,19 +613,20 @@
 					goto error_close_dir;
 				}
 
-				nameFile = fopen(filename, "r");
-				if (!nameFile) {
+				namefp = fopen(filename, "r");
+				if (!namefp) {
 					free(filename);
 					continue;
 				}
+
 				free(filename);
 				errno = 0;
-				if (fscanf(nameFile, "%s", thisname) != 1) {
+				if (fscanf(namefp, "%s", thisname) != 1) {
 					ret = errno ? -errno : -ENODATA;
 					goto error_close_dir;
 				}
 
-				if (fclose(nameFile)) {
+				if (fclose(namefp)) {
 					ret = -errno;
 					goto error_close_dir;
 				}
@@ -618,6 +634,7 @@
 				if (strcmp(name, thisname) == 0) {
 					if (closedir(dp) == -1)
 						return -errno;
+
 					return number;
 				}
 			}
@@ -631,6 +648,7 @@
 error_close_dir:
 	if (closedir(dp) == -1)
 		perror("find_type_by_name(): Failed to close directory");
+
 	return ret;
 }
 
@@ -642,18 +660,20 @@
 	int test;
 	char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
 
-	if (temp == NULL)
+	if (!temp)
 		return -ENOMEM;
+
 	ret = sprintf(temp, "%s/%s", basedir, filename);
 	if (ret < 0)
 		goto error_free;
 
 	sysfsfp = fopen(temp, "w");
-	if (sysfsfp == NULL) {
+	if (!sysfsfp) {
 		ret = -errno;
-		printf("failed to open %s\n", temp);
+		fprintf(stderr, "failed to open %s\n", temp);
 		goto error_free;
 	}
+
 	ret = fprintf(sysfsfp, "%d", val);
 	if (ret < 0) {
 		if (fclose(sysfsfp))
@@ -669,11 +689,12 @@
 
 	if (verify) {
 		sysfsfp = fopen(temp, "r");
-		if (sysfsfp == NULL) {
+		if (!sysfsfp) {
 			ret = -errno;
-			printf("failed to open %s\n", temp);
+			fprintf(stderr, "failed to open %s\n", temp);
 			goto error_free;
 		}
+
 		if (fscanf(sysfsfp, "%d", &test) != 1) {
 			ret = errno ? -errno : -ENODATA;
 			if (fclose(sysfsfp))
@@ -688,13 +709,13 @@
 		}
 
 		if (test != val) {
-			printf("Possible failure in int write %d to %s%s\n",
-				val,
-				basedir,
-				filename);
+			fprintf(stderr,
+				"Possible failure in int write %d to %s/%s\n",
+				val, basedir, filename);
 			ret = -1;
 		}
 	}
+
 error_free:
 	free(temp);
 	return ret;
@@ -735,20 +756,22 @@
 	FILE  *sysfsfp;
 	char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
 
-	if (temp == NULL) {
-		printf("Memory allocation failed\n");
+	if (!temp) {
+		fprintf(stderr, "Memory allocation failed\n");
 		return -ENOMEM;
 	}
+
 	ret = sprintf(temp, "%s/%s", basedir, filename);
 	if (ret < 0)
 		goto error_free;
 
 	sysfsfp = fopen(temp, "w");
-	if (sysfsfp == NULL) {
+	if (!sysfsfp) {
 		ret = -errno;
-		printf("Could not open %s\n", temp);
+		fprintf(stderr, "Could not open %s\n", temp);
 		goto error_free;
 	}
+
 	ret = fprintf(sysfsfp, "%s", val);
 	if (ret < 0) {
 		if (fclose(sysfsfp))
@@ -764,11 +787,12 @@
 
 	if (verify) {
 		sysfsfp = fopen(temp, "r");
-		if (sysfsfp == NULL) {
+		if (!sysfsfp) {
 			ret = -errno;
-			printf("could not open file to verify\n");
+			fprintf(stderr, "Could not open file to verify\n");
 			goto error_free;
 		}
+
 		if (fscanf(sysfsfp, "%s", temp) != 1) {
 			ret = errno ? -errno : -ENODATA;
 			if (fclose(sysfsfp))
@@ -783,16 +807,14 @@
 		}
 
 		if (strcmp(temp, val) != 0) {
-			printf("Possible failure in string write of %s "
-				"Should be %s "
-				"written to %s\%s\n",
-				temp,
-				val,
-				basedir,
-				filename);
+			fprintf(stderr,
+				"Possible failure in string write of %s "
+				"Should be %s written to %s/%s\n", temp, val,
+				basedir, filename);
 			ret = -1;
 		}
 	}
+
 error_free:
 	free(temp);
 
@@ -841,19 +863,21 @@
 	FILE  *sysfsfp;
 	char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
 
-	if (temp == NULL) {
-		printf("Memory allocation failed");
+	if (!temp) {
+		fprintf(stderr, "Memory allocation failed");
 		return -ENOMEM;
 	}
+
 	ret = sprintf(temp, "%s/%s", basedir, filename);
 	if (ret < 0)
 		goto error_free;
 
 	sysfsfp = fopen(temp, "r");
-	if (sysfsfp == NULL) {
+	if (!sysfsfp) {
 		ret = -errno;
 		goto error_free;
 	}
+
 	errno = 0;
 	if (fscanf(sysfsfp, "%d\n", &ret) != 1) {
 		ret = errno ? -errno : -ENODATA;
@@ -868,6 +892,7 @@
 
 error_free:
 	free(temp);
+
 	return ret;
 }
 
@@ -885,19 +910,21 @@
 	FILE  *sysfsfp;
 	char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
 
-	if (temp == NULL) {
-		printf("Memory allocation failed");
+	if (!temp) {
+		fprintf(stderr, "Memory allocation failed");
 		return -ENOMEM;
 	}
+
 	ret = sprintf(temp, "%s/%s", basedir, filename);
 	if (ret < 0)
 		goto error_free;
 
 	sysfsfp = fopen(temp, "r");
-	if (sysfsfp == NULL) {
+	if (!sysfsfp) {
 		ret = -errno;
 		goto error_free;
 	}
+
 	errno = 0;
 	if (fscanf(sysfsfp, "%f\n", val) != 1) {
 		ret = errno ? -errno : -ENODATA;
@@ -912,6 +939,7 @@
 
 error_free:
 	free(temp);
+
 	return ret;
 }
 
@@ -929,19 +957,21 @@
 	FILE  *sysfsfp;
 	char *temp = malloc(strlen(basedir) + strlen(filename) + 2);
 
-	if (temp == NULL) {
-		printf("Memory allocation failed");
+	if (!temp) {
+		fprintf(stderr, "Memory allocation failed");
 		return -ENOMEM;
 	}
+
 	ret = sprintf(temp, "%s/%s", basedir, filename);
 	if (ret < 0)
 		goto error_free;
 
 	sysfsfp = fopen(temp, "r");
-	if (sysfsfp == NULL) {
+	if (!sysfsfp) {
 		ret = -errno;
 		goto error_free;
 	}
+
 	errno = 0;
 	if (fscanf(sysfsfp, "%s\n", str) != 1) {
 		ret = errno ? -errno : -ENODATA;
@@ -956,7 +986,6 @@
 
 error_free:
 	free(temp);
+
 	return ret;
 }
-
-#endif /* _IIO_UTILS_H */
diff --git a/tools/iio/iio_utils.h b/tools/iio/iio_utils.h
index 379eed9..e3503bf 100644
--- a/tools/iio/iio_utils.h
+++ b/tools/iio/iio_utils.h
@@ -18,6 +18,8 @@
 #define FORMAT_SCAN_ELEMENTS_DIR "%s/scan_elements"
 #define FORMAT_TYPE_FILE "%s_type"
 
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
+
 extern const char *iio_dir;
 
 /**
@@ -51,17 +53,16 @@
 };
 
 int iioutils_break_up_name(const char *full_name, char **generic_name);
-int iioutils_get_type(unsigned *is_signed, unsigned *bytes,
-					  unsigned *bits_used, unsigned *shift,
-					  uint64_t *mask, unsigned *be,
-					  const char *device_dir, const char *name,
-					  const char *generic_name);
+int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
+		      unsigned *shift, uint64_t *mask, unsigned *be,
+		      const char *device_dir, const char *name,
+		      const char *generic_name);
 int iioutils_get_param_float(float *output, const char *param_name,
-							 const char *device_dir, const char *name,
-							 const char *generic_name);
-void bsort_channel_array_by_index(struct iio_channel_info **ci_array, int cnt);
+			     const char *device_dir, const char *name,
+			     const char *generic_name);
+void bsort_channel_array_by_index(struct iio_channel_info *ci_array, int cnt);
 int build_channel_array(const char *device_dir,
-						struct iio_channel_info **ci_array, int *counter);
+			struct iio_channel_info **ci_array, int *counter);
 int find_type_by_name(const char *name, const char *type);
 int write_sysfs_int(const char *filename, const char *basedir, int val);
 int write_sysfs_int_and_verify(const char *filename, const char *basedir,
diff --git a/tools/iio/lsiio.c b/tools/iio/lsiio.c
index b59ee17..3d650e6 100644
--- a/tools/iio/lsiio.c
+++ b/tools/iio/lsiio.c
@@ -20,7 +20,6 @@
 #include <sys/dir.h>
 #include "iio_utils.h"
 
-
 static enum verbosity {
 	VERBLEVEL_DEFAULT,	/* 0 gives lspci behaviour */
 	VERBLEVEL_SENSORS,	/* 1 lists sensors */
@@ -29,17 +28,16 @@
 const char *type_device = "iio:device";
 const char *type_trigger = "trigger";
 
-
 static inline int check_prefix(const char *str, const char *prefix)
 {
 	return strlen(str) > strlen(prefix) &&
-		strncmp(str, prefix, strlen(prefix)) == 0;
+	       strncmp(str, prefix, strlen(prefix)) == 0;
 }
 
 static inline int check_postfix(const char *str, const char *postfix)
 {
 	return strlen(str) > strlen(postfix) &&
-		strcmp(str + strlen(str) - strlen(postfix), postfix) == 0;
+	       strcmp(str + strlen(str) - strlen(postfix), postfix) == 0;
 }
 
 static int dump_channels(const char *dev_dir_name)
@@ -48,13 +46,13 @@
 	const struct dirent *ent;
 
 	dp = opendir(dev_dir_name);
-	if (dp == NULL)
+	if (!dp)
 		return -errno;
-	while (ent = readdir(dp), ent != NULL)
+
+	while (ent = readdir(dp), ent)
 		if (check_prefix(ent->d_name, "in_") &&
-		    check_postfix(ent->d_name, "_raw")) {
+		    check_postfix(ent->d_name, "_raw"))
 			printf("   %-10s\n", ent->d_name);
-		}
 
 	return (closedir(dp) == -1) ? -errno : 0;
 }
@@ -63,20 +61,22 @@
 {
 	char name[IIO_MAX_NAME_LENGTH];
 	int dev_idx;
-	int retval;
+	int ret;
 
-	retval = sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_device),
-			"%i", &dev_idx);
-	if (retval != 1)
+	ret = sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_device), "%i",
+		     &dev_idx);
+	if (ret != 1)
 		return -EINVAL;
-	retval = read_sysfs_string("name", dev_dir_name, name);
-	if (retval)
-		return retval;
+
+	ret = read_sysfs_string("name", dev_dir_name, name);
+	if (ret < 0)
+		return ret;
 
 	printf("Device %03d: %s\n", dev_idx, name);
 
 	if (verblevel >= VERBLEVEL_SENSORS)
 		return dump_channels(dev_dir_name);
+
 	return 0;
 }
 
@@ -84,17 +84,19 @@
 {
 	char name[IIO_MAX_NAME_LENGTH];
 	int dev_idx;
-	int retval;
+	int ret;
 
-	retval = sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_trigger),
-			"%i", &dev_idx);
-	if (retval != 1)
+	ret = sscanf(dev_dir_name + strlen(iio_dir) + strlen(type_trigger),
+		     "%i", &dev_idx);
+	if (ret != 1)
 		return -EINVAL;
-	retval = read_sysfs_string("name", dev_dir_name, name);
-	if (retval)
-		return retval;
+
+	ret = read_sysfs_string("name", dev_dir_name, name);
+	if (ret < 0)
+		return ret;
 
 	printf("Trigger %03d: %s\n", dev_idx, name);
+
 	return 0;
 }
 
@@ -105,12 +107,12 @@
 	DIR *dp;
 
 	dp = opendir(iio_dir);
-	if (dp == NULL) {
-		printf("No industrial I/O devices available\n");
+	if (!dp) {
+		fprintf(stderr, "No industrial I/O devices available\n");
 		return -ENODEV;
 	}
 
-	while (ent = readdir(dp), ent != NULL) {
+	while (ent = readdir(dp), ent) {
 		if (check_prefix(ent->d_name, type_device)) {
 			char *dev_dir_name;
 
@@ -132,7 +134,7 @@
 		}
 	}
 	rewinddir(dp);
-	while (ent = readdir(dp), ent != NULL) {
+	while (ent = readdir(dp), ent) {
 		if (check_prefix(ent->d_name, type_trigger)) {
 			char *dev_dir_name;
 
@@ -151,6 +153,7 @@
 			free(dev_dir_name);
 		}
 	}
+
 	return (closedir(dp) == -1) ? -errno : 0;
 
 error_close_dir:
diff --git a/tools/include/linux/export.h b/tools/include/linux/export.h
new file mode 100644
index 0000000..d07e586
--- /dev/null
+++ b/tools/include/linux/export.h
@@ -0,0 +1,10 @@
+#ifndef _TOOLS_LINUX_EXPORT_H_
+#define _TOOLS_LINUX_EXPORT_H_
+
+#define EXPORT_SYMBOL(sym)
+#define EXPORT_SYMBOL_GPL(sym)
+#define EXPORT_SYMBOL_GPL_FUTURE(sym)
+#define EXPORT_UNUSED_SYMBOL(sym)
+#define EXPORT_UNUSED_SYMBOL_GPL(sym)
+
+#endif
diff --git a/tools/lguest/.gitignore b/tools/lguest/.gitignore
index 115587f..8d9a838 100644
--- a/tools/lguest/.gitignore
+++ b/tools/lguest/.gitignore
@@ -1 +1,2 @@
 lguest
+include
diff --git a/tools/lguest/Makefile b/tools/lguest/Makefile
index a107b5e..d04599a 100644
--- a/tools/lguest/Makefile
+++ b/tools/lguest/Makefile
@@ -11,3 +11,4 @@
 
 clean:
 	rm -f lguest
+	rm -rf include
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
index e440524..80159e6 100644
--- a/tools/lguest/lguest.c
+++ b/tools/lguest/lguest.c
@@ -125,7 +125,11 @@
 /* The list of Guest devices, based on command line arguments. */
 static struct device_list devices;
 
-struct virtio_pci_cfg_cap {
+/*
+ * Just like struct virtio_pci_cfg_cap in uapi/linux/virtio_pci.h,
+ * but uses a u32 explicitly for the data.
+ */
+struct virtio_pci_cfg_cap_u32 {
 	struct virtio_pci_cap cap;
 	u32 pci_cfg_data; /* Data for BAR access. */
 };
@@ -157,7 +161,7 @@
 	struct virtio_pci_notify_cap notify;
 	struct virtio_pci_cap isr;
 	struct virtio_pci_cap device;
-	struct virtio_pci_cfg_cap cfg_access;
+	struct virtio_pci_cfg_cap_u32 cfg_access;
 };
 
 /* The device structure describes a single device. */
@@ -1291,7 +1295,7 @@
  * only fault if they try to write with some invalid bar/offset/length.
  */
 static bool valid_bar_access(struct device *d,
-			     struct virtio_pci_cfg_cap *cfg_access)
+			     struct virtio_pci_cfg_cap_u32 *cfg_access)
 {
 	/* We only have 1 bar (BAR0) */
 	if (cfg_access->cap.bar != 0)
diff --git a/tools/lib/api/fs/debugfs.c b/tools/lib/api/fs/debugfs.c
index 8305b3e..eb7cf4d 100644
--- a/tools/lib/api/fs/debugfs.c
+++ b/tools/lib/api/fs/debugfs.c
@@ -12,6 +12,7 @@
 #include <linux/kernel.h>
 
 #include "debugfs.h"
+#include "tracefs.h"
 
 #ifndef DEBUGFS_DEFAULT_PATH
 #define DEBUGFS_DEFAULT_PATH		"/sys/kernel/debug"
@@ -94,11 +95,21 @@
 			 "Hint:\tIs the debugfs filesystem mounted?\n"
 			 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
 		break;
-	case EACCES:
+	case EACCES: {
+		const char *mountpoint = debugfs_mountpoint;
+
+		if (!access(debugfs_mountpoint, R_OK) && strncmp(filename, "tracing/", 8) == 0) {
+			const char *tracefs_mntpoint = tracefs_find_mountpoint();
+
+			if (tracefs_mntpoint)
+				mountpoint = tracefs_mntpoint;
+		}
+
 		snprintf(buf, size,
 			 "Error:\tNo permissions to read %s/%s\n"
 			 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
-			 debugfs_mountpoint, filename, debugfs_mountpoint);
+			 debugfs_mountpoint, filename, mountpoint);
+	}
 		break;
 	default:
 		snprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore
new file mode 100644
index 0000000..812aeed
--- /dev/null
+++ b/tools/lib/bpf/.gitignore
@@ -0,0 +1,2 @@
+libbpf_version.h
+FEATURE-DUMP
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
new file mode 100644
index 0000000..d874975
--- /dev/null
+++ b/tools/lib/bpf/Build
@@ -0,0 +1 @@
+libbpf-y := libbpf.o bpf.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
new file mode 100644
index 0000000..f68d23a
--- /dev/null
+++ b/tools/lib/bpf/Makefile
@@ -0,0 +1,195 @@
+# Most of this file is copied from tools/lib/traceevent/Makefile
+
+BPF_VERSION = 0
+BPF_PATCHLEVEL = 0
+BPF_EXTRAVERSION = 1
+
+MAKEFLAGS += --no-print-directory
+
+
+# Makefiles suck: This macro sets a default value of $(2) for the
+# variable named by $(1), unless the variable has been set by
+# environment or command line. This is necessary for CC and AR
+# because make sets default values, so the simpler ?= approach
+# won't work as expected.
+define allow-override
+  $(if $(or $(findstring environment,$(origin $(1))),\
+            $(findstring command line,$(origin $(1)))),,\
+    $(eval $(1) = $(2)))
+endef
+
+# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
+$(call allow-override,CC,$(CROSS_COMPILE)gcc)
+$(call allow-override,AR,$(CROSS_COMPILE)ar)
+
+INSTALL = install
+
+# Use DESTDIR for installing into a different root directory.
+# This is useful for building a package. The program will be
+# installed in this directory as if it was the root directory.
+# Then the build tool can move it later.
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
+LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+ifeq ($(LP64), 1)
+  libdir_relative = lib64
+else
+  libdir_relative = lib
+endif
+
+prefix ?= /usr/local
+libdir = $(prefix)/$(libdir_relative)
+man_dir = $(prefix)/share/man
+man_dir_SQ = '$(subst ','\'',$(man_dir))'
+
+export man_dir man_dir_SQ INSTALL
+export DESTDIR DESTDIR_SQ
+
+include ../../scripts/Makefile.include
+
+# copy a bit from Linux kbuild
+
+ifeq ("$(origin V)", "command line")
+  VERBOSE = $(V)
+endif
+ifndef VERBOSE
+  VERBOSE = 0
+endif
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(shell pwd)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
+endif
+
+FEATURE_DISPLAY = libelf libelf-getphdrnum libelf-mmap bpf
+FEATURE_TESTS = libelf bpf
+
+INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/arch/$(ARCH)/include/uapi -I$(srctree)/include/uapi
+FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES)
+
+include $(srctree)/tools/build/Makefile.feature
+
+export prefix libdir src obj
+
+# Shell quotes
+libdir_SQ = $(subst ','\'',$(libdir))
+libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
+plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
+
+LIB_FILE = libbpf.a libbpf.so
+
+VERSION		= $(BPF_VERSION)
+PATCHLEVEL	= $(BPF_PATCHLEVEL)
+EXTRAVERSION	= $(BPF_EXTRAVERSION)
+
+OBJ		= $@
+N		=
+
+LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
+
+# Set compile option CFLAGS
+ifdef EXTRA_CFLAGS
+  CFLAGS := $(EXTRA_CFLAGS)
+else
+  CFLAGS := -g -Wall
+endif
+
+ifeq ($(feature-libelf-mmap), 1)
+  override CFLAGS += -DHAVE_LIBELF_MMAP_SUPPORT
+endif
+
+ifeq ($(feature-libelf-getphdrnum), 1)
+  override CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT
+endif
+
+# Append required CFLAGS
+override CFLAGS += $(EXTRA_WARNINGS)
+override CFLAGS += -Werror -Wall
+override CFLAGS += -fPIC
+override CFLAGS += $(INCLUDES)
+
+ifeq ($(VERBOSE),1)
+  Q =
+else
+  Q = @
+endif
+
+# Disable command line variables (CFLAGS) overide from top
+# level Makefile (perf), otherwise build Makefile will get
+# the same command line setup.
+MAKEOVERRIDES=
+
+export srctree OUTPUT CC LD CFLAGS V
+build := -f $(srctree)/tools/build/Makefile.build dir=. obj
+
+BPF_IN    := $(OUTPUT)libbpf-in.o
+LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
+
+CMD_TARGETS = $(LIB_FILE)
+
+TARGETS = $(CMD_TARGETS)
+
+all: $(VERSION_FILES) all_cmd
+
+all_cmd: $(CMD_TARGETS)
+
+$(BPF_IN): force elfdep bpfdep
+	$(Q)$(MAKE) $(build)=libbpf
+
+$(OUTPUT)libbpf.so: $(BPF_IN)
+	$(QUIET_LINK)$(CC) --shared $^ -o $@
+
+$(OUTPUT)libbpf.a: $(BPF_IN)
+	$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
+
+define update_dir
+  (echo $1 > $@.tmp;				\
+   if [ -r $@ ] && cmp -s $@ $@.tmp; then	\
+     rm -f $@.tmp;				\
+   else						\
+     echo '  UPDATE                 $@';	\
+     mv -f $@.tmp $@;				\
+   fi);
+endef
+
+define do_install
+	if [ ! -d '$(DESTDIR_SQ)$2' ]; then		\
+		$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2';	\
+	fi;						\
+	$(INSTALL) $1 '$(DESTDIR_SQ)$2'
+endef
+
+install_lib: all_cmd
+	$(call QUIET_INSTALL, $(LIB_FILE)) \
+		$(call do_install,$(LIB_FILE),$(libdir_SQ))
+
+install: install_lib
+
+### Cleaning rules
+
+config-clean:
+	$(call QUIET_CLEAN, config)
+	$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
+
+clean:
+	$(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d \
+		$(RM) LIBBPF-CFLAGS
+	$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP
+
+
+
+PHONY += force elfdep bpfdep
+force:
+
+elfdep:
+	@if [ "$(feature-libelf)" != "1" ]; then echo "No libelf found"; exit -1 ; fi
+
+bpfdep:
+	@if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit -1 ; fi
+
+# Declare the contents of the .PHONY variable as phony.  We keep that
+# information in a variable so we can use it in if_changed and friends.
+.PHONY: $(PHONY)
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
new file mode 100644
index 0000000..a633105
--- /dev/null
+++ b/tools/lib/bpf/bpf.c
@@ -0,0 +1,85 @@
+/*
+ * common eBPF ELF operations.
+ *
+ * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
+ * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015 Huawei Inc.
+ */
+
+#include <stdlib.h>
+#include <memory.h>
+#include <unistd.h>
+#include <asm/unistd.h>
+#include <linux/bpf.h>
+#include "bpf.h"
+
+/*
+ * When building perf, unistd.h is override. Define __NR_bpf is
+ * required to be defined.
+ */
+#ifndef __NR_bpf
+# if defined(__i386__)
+#  define __NR_bpf 357
+# elif defined(__x86_64__)
+#  define __NR_bpf 321
+# elif defined(__aarch64__)
+#  define __NR_bpf 280
+# else
+#  error __NR_bpf not defined. libbpf does not support your arch.
+# endif
+#endif
+
+static __u64 ptr_to_u64(void *ptr)
+{
+	return (__u64) (unsigned long) ptr;
+}
+
+static int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
+		   unsigned int size)
+{
+	return syscall(__NR_bpf, cmd, attr, size);
+}
+
+int bpf_create_map(enum bpf_map_type map_type, int key_size,
+		   int value_size, int max_entries)
+{
+	union bpf_attr attr;
+
+	memset(&attr, '\0', sizeof(attr));
+
+	attr.map_type = map_type;
+	attr.key_size = key_size;
+	attr.value_size = value_size;
+	attr.max_entries = max_entries;
+
+	return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+}
+
+int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
+		     size_t insns_cnt, char *license,
+		     u32 kern_version, char *log_buf, size_t log_buf_sz)
+{
+	int fd;
+	union bpf_attr attr;
+
+	bzero(&attr, sizeof(attr));
+	attr.prog_type = type;
+	attr.insn_cnt = (__u32)insns_cnt;
+	attr.insns = ptr_to_u64(insns);
+	attr.license = ptr_to_u64(license);
+	attr.log_buf = ptr_to_u64(NULL);
+	attr.log_size = 0;
+	attr.log_level = 0;
+	attr.kern_version = kern_version;
+
+	fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+	if (fd >= 0 || !log_buf || !log_buf_sz)
+		return fd;
+
+	/* Try again with log */
+	attr.log_buf = ptr_to_u64(log_buf);
+	attr.log_size = log_buf_sz;
+	attr.log_level = 1;
+	log_buf[0] = 0;
+	return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
new file mode 100644
index 0000000..854b736
--- /dev/null
+++ b/tools/lib/bpf/bpf.h
@@ -0,0 +1,23 @@
+/*
+ * common eBPF ELF operations.
+ *
+ * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
+ * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015 Huawei Inc.
+ */
+#ifndef __BPF_BPF_H
+#define __BPF_BPF_H
+
+#include <linux/bpf.h>
+
+int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
+		   int max_entries);
+
+/* Recommend log buffer size */
+#define BPF_LOG_BUF_SIZE 65536
+int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
+		     size_t insns_cnt, char *license,
+		     u32 kern_version, char *log_buf,
+		     size_t log_buf_sz);
+
+#endif
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
new file mode 100644
index 0000000..4252fc2
--- /dev/null
+++ b/tools/lib/bpf/libbpf.c
@@ -0,0 +1,1056 @@
+/*
+ * Common eBPF ELF object loading operations.
+ *
+ * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
+ * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015 Huawei Inc.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <asm/unistd.h>
+#include <linux/kernel.h>
+#include <linux/bpf.h>
+#include <linux/list.h>
+#include <libelf.h>
+#include <gelf.h>
+
+#include "libbpf.h"
+#include "bpf.h"
+
+#define __printf(a, b)	__attribute__((format(printf, a, b)))
+
+__printf(1, 2)
+static int __base_pr(const char *format, ...)
+{
+	va_list args;
+	int err;
+
+	va_start(args, format);
+	err = vfprintf(stderr, format, args);
+	va_end(args);
+	return err;
+}
+
+static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
+static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
+static __printf(1, 2) libbpf_print_fn_t __pr_debug;
+
+#define __pr(func, fmt, ...)	\
+do {				\
+	if ((func))		\
+		(func)("libbpf: " fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define pr_warning(fmt, ...)	__pr(__pr_warning, fmt, ##__VA_ARGS__)
+#define pr_info(fmt, ...)	__pr(__pr_info, fmt, ##__VA_ARGS__)
+#define pr_debug(fmt, ...)	__pr(__pr_debug, fmt, ##__VA_ARGS__)
+
+void libbpf_set_print(libbpf_print_fn_t warn,
+		      libbpf_print_fn_t info,
+		      libbpf_print_fn_t debug)
+{
+	__pr_warning = warn;
+	__pr_info = info;
+	__pr_debug = debug;
+}
+
+/* Copied from tools/perf/util/util.h */
+#ifndef zfree
+# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
+#endif
+
+#ifndef zclose
+# define zclose(fd) ({			\
+	int ___err = 0;			\
+	if ((fd) >= 0)			\
+		___err = close((fd));	\
+	fd = -1;			\
+	___err; })
+#endif
+
+#ifdef HAVE_LIBELF_MMAP_SUPPORT
+# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
+#else
+# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
+#endif
+
+/*
+ * bpf_prog should be a better name but it has been used in
+ * linux/filter.h.
+ */
+struct bpf_program {
+	/* Index in elf obj file, for relocation use. */
+	int idx;
+	char *section_name;
+	struct bpf_insn *insns;
+	size_t insns_cnt;
+
+	struct {
+		int insn_idx;
+		int map_idx;
+	} *reloc_desc;
+	int nr_reloc;
+
+	int fd;
+
+	struct bpf_object *obj;
+	void *priv;
+	bpf_program_clear_priv_t clear_priv;
+};
+
+static LIST_HEAD(bpf_objects_list);
+
+struct bpf_object {
+	char license[64];
+	u32 kern_version;
+	void *maps_buf;
+	size_t maps_buf_sz;
+
+	struct bpf_program *programs;
+	size_t nr_programs;
+	int *map_fds;
+	/*
+	 * This field is required because maps_buf will be freed and
+	 * maps_buf_sz will be set to 0 after loaded.
+	 */
+	size_t nr_map_fds;
+	bool loaded;
+
+	/*
+	 * Information when doing elf related work. Only valid if fd
+	 * is valid.
+	 */
+	struct {
+		int fd;
+		void *obj_buf;
+		size_t obj_buf_sz;
+		Elf *elf;
+		GElf_Ehdr ehdr;
+		Elf_Data *symbols;
+		struct {
+			GElf_Shdr shdr;
+			Elf_Data *data;
+		} *reloc;
+		int nr_reloc;
+	} efile;
+	/*
+	 * All loaded bpf_object is linked in a list, which is
+	 * hidden to caller. bpf_objects__<func> handlers deal with
+	 * all objects.
+	 */
+	struct list_head list;
+	char path[];
+};
+#define obj_elf_valid(o)	((o)->efile.elf)
+
+static void bpf_program__unload(struct bpf_program *prog)
+{
+	if (!prog)
+		return;
+
+	zclose(prog->fd);
+}
+
+static void bpf_program__exit(struct bpf_program *prog)
+{
+	if (!prog)
+		return;
+
+	if (prog->clear_priv)
+		prog->clear_priv(prog, prog->priv);
+
+	prog->priv = NULL;
+	prog->clear_priv = NULL;
+
+	bpf_program__unload(prog);
+	zfree(&prog->section_name);
+	zfree(&prog->insns);
+	zfree(&prog->reloc_desc);
+
+	prog->nr_reloc = 0;
+	prog->insns_cnt = 0;
+	prog->idx = -1;
+}
+
+static int
+bpf_program__init(void *data, size_t size, char *name, int idx,
+		    struct bpf_program *prog)
+{
+	if (size < sizeof(struct bpf_insn)) {
+		pr_warning("corrupted section '%s'\n", name);
+		return -EINVAL;
+	}
+
+	bzero(prog, sizeof(*prog));
+
+	prog->section_name = strdup(name);
+	if (!prog->section_name) {
+		pr_warning("failed to alloc name for prog %s\n",
+			   name);
+		goto errout;
+	}
+
+	prog->insns = malloc(size);
+	if (!prog->insns) {
+		pr_warning("failed to alloc insns for %s\n", name);
+		goto errout;
+	}
+	prog->insns_cnt = size / sizeof(struct bpf_insn);
+	memcpy(prog->insns, data,
+	       prog->insns_cnt * sizeof(struct bpf_insn));
+	prog->idx = idx;
+	prog->fd = -1;
+
+	return 0;
+errout:
+	bpf_program__exit(prog);
+	return -ENOMEM;
+}
+
+static int
+bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
+			char *name, int idx)
+{
+	struct bpf_program prog, *progs;
+	int nr_progs, err;
+
+	err = bpf_program__init(data, size, name, idx, &prog);
+	if (err)
+		return err;
+
+	progs = obj->programs;
+	nr_progs = obj->nr_programs;
+
+	progs = realloc(progs, sizeof(progs[0]) * (nr_progs + 1));
+	if (!progs) {
+		/*
+		 * In this case the original obj->programs
+		 * is still valid, so don't need special treat for
+		 * bpf_close_object().
+		 */
+		pr_warning("failed to alloc a new program '%s'\n",
+			   name);
+		bpf_program__exit(&prog);
+		return -ENOMEM;
+	}
+
+	pr_debug("found program %s\n", prog.section_name);
+	obj->programs = progs;
+	obj->nr_programs = nr_progs + 1;
+	prog.obj = obj;
+	progs[nr_progs] = prog;
+	return 0;
+}
+
+static struct bpf_object *bpf_object__new(const char *path,
+					  void *obj_buf,
+					  size_t obj_buf_sz)
+{
+	struct bpf_object *obj;
+
+	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
+	if (!obj) {
+		pr_warning("alloc memory failed for %s\n", path);
+		return NULL;
+	}
+
+	strcpy(obj->path, path);
+	obj->efile.fd = -1;
+
+	/*
+	 * Caller of this function should also calls
+	 * bpf_object__elf_finish() after data collection to return
+	 * obj_buf to user. If not, we should duplicate the buffer to
+	 * avoid user freeing them before elf finish.
+	 */
+	obj->efile.obj_buf = obj_buf;
+	obj->efile.obj_buf_sz = obj_buf_sz;
+
+	obj->loaded = false;
+
+	INIT_LIST_HEAD(&obj->list);
+	list_add(&obj->list, &bpf_objects_list);
+	return obj;
+}
+
+static void bpf_object__elf_finish(struct bpf_object *obj)
+{
+	if (!obj_elf_valid(obj))
+		return;
+
+	if (obj->efile.elf) {
+		elf_end(obj->efile.elf);
+		obj->efile.elf = NULL;
+	}
+	obj->efile.symbols = NULL;
+
+	zfree(&obj->efile.reloc);
+	obj->efile.nr_reloc = 0;
+	zclose(obj->efile.fd);
+	obj->efile.obj_buf = NULL;
+	obj->efile.obj_buf_sz = 0;
+}
+
+static int bpf_object__elf_init(struct bpf_object *obj)
+{
+	int err = 0;
+	GElf_Ehdr *ep;
+
+	if (obj_elf_valid(obj)) {
+		pr_warning("elf init: internal error\n");
+		return -EEXIST;
+	}
+
+	if (obj->efile.obj_buf_sz > 0) {
+		/*
+		 * obj_buf should have been validated by
+		 * bpf_object__open_buffer().
+		 */
+		obj->efile.elf = elf_memory(obj->efile.obj_buf,
+					    obj->efile.obj_buf_sz);
+	} else {
+		obj->efile.fd = open(obj->path, O_RDONLY);
+		if (obj->efile.fd < 0) {
+			pr_warning("failed to open %s: %s\n", obj->path,
+					strerror(errno));
+			return -errno;
+		}
+
+		obj->efile.elf = elf_begin(obj->efile.fd,
+				LIBBPF_ELF_C_READ_MMAP,
+				NULL);
+	}
+
+	if (!obj->efile.elf) {
+		pr_warning("failed to open %s as ELF file\n",
+				obj->path);
+		err = -EINVAL;
+		goto errout;
+	}
+
+	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
+		pr_warning("failed to get EHDR from %s\n",
+				obj->path);
+		err = -EINVAL;
+		goto errout;
+	}
+	ep = &obj->efile.ehdr;
+
+	if ((ep->e_type != ET_REL) || (ep->e_machine != 0)) {
+		pr_warning("%s is not an eBPF object file\n",
+			obj->path);
+		err = -EINVAL;
+		goto errout;
+	}
+
+	return 0;
+errout:
+	bpf_object__elf_finish(obj);
+	return err;
+}
+
+static int
+bpf_object__check_endianness(struct bpf_object *obj)
+{
+	static unsigned int const endian = 1;
+
+	switch (obj->efile.ehdr.e_ident[EI_DATA]) {
+	case ELFDATA2LSB:
+		/* We are big endian, BPF obj is little endian. */
+		if (*(unsigned char const *)&endian != 1)
+			goto mismatch;
+		break;
+
+	case ELFDATA2MSB:
+		/* We are little endian, BPF obj is big endian. */
+		if (*(unsigned char const *)&endian != 0)
+			goto mismatch;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+
+mismatch:
+	pr_warning("Error: endianness mismatch.\n");
+	return -EINVAL;
+}
+
+static int
+bpf_object__init_license(struct bpf_object *obj,
+			 void *data, size_t size)
+{
+	memcpy(obj->license, data,
+	       min(size, sizeof(obj->license) - 1));
+	pr_debug("license of %s is %s\n", obj->path, obj->license);
+	return 0;
+}
+
+static int
+bpf_object__init_kversion(struct bpf_object *obj,
+			  void *data, size_t size)
+{
+	u32 kver;
+
+	if (size != sizeof(kver)) {
+		pr_warning("invalid kver section in %s\n", obj->path);
+		return -EINVAL;
+	}
+	memcpy(&kver, data, sizeof(kver));
+	obj->kern_version = kver;
+	pr_debug("kernel version of %s is %x\n", obj->path,
+		 obj->kern_version);
+	return 0;
+}
+
+static int
+bpf_object__init_maps(struct bpf_object *obj, void *data,
+		      size_t size)
+{
+	if (size == 0) {
+		pr_debug("%s doesn't need map definition\n",
+			 obj->path);
+		return 0;
+	}
+
+	obj->maps_buf = malloc(size);
+	if (!obj->maps_buf) {
+		pr_warning("malloc maps failed: %s\n", obj->path);
+		return -ENOMEM;
+	}
+
+	obj->maps_buf_sz = size;
+	memcpy(obj->maps_buf, data, size);
+	pr_debug("maps in %s: %ld bytes\n", obj->path, (long)size);
+	return 0;
+}
+
+static int bpf_object__elf_collect(struct bpf_object *obj)
+{
+	Elf *elf = obj->efile.elf;
+	GElf_Ehdr *ep = &obj->efile.ehdr;
+	Elf_Scn *scn = NULL;
+	int idx = 0, err = 0;
+
+	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
+	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
+		pr_warning("failed to get e_shstrndx from %s\n",
+			   obj->path);
+		return -EINVAL;
+	}
+
+	while ((scn = elf_nextscn(elf, scn)) != NULL) {
+		char *name;
+		GElf_Shdr sh;
+		Elf_Data *data;
+
+		idx++;
+		if (gelf_getshdr(scn, &sh) != &sh) {
+			pr_warning("failed to get section header from %s\n",
+				   obj->path);
+			err = -EINVAL;
+			goto out;
+		}
+
+		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
+		if (!name) {
+			pr_warning("failed to get section name from %s\n",
+				   obj->path);
+			err = -EINVAL;
+			goto out;
+		}
+
+		data = elf_getdata(scn, 0);
+		if (!data) {
+			pr_warning("failed to get section data from %s(%s)\n",
+				   name, obj->path);
+			err = -EINVAL;
+			goto out;
+		}
+		pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n",
+			 name, (unsigned long)data->d_size,
+			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
+			 (int)sh.sh_type);
+
+		if (strcmp(name, "license") == 0)
+			err = bpf_object__init_license(obj,
+						       data->d_buf,
+						       data->d_size);
+		else if (strcmp(name, "version") == 0)
+			err = bpf_object__init_kversion(obj,
+							data->d_buf,
+							data->d_size);
+		else if (strcmp(name, "maps") == 0)
+			err = bpf_object__init_maps(obj, data->d_buf,
+						    data->d_size);
+		else if (sh.sh_type == SHT_SYMTAB) {
+			if (obj->efile.symbols) {
+				pr_warning("bpf: multiple SYMTAB in %s\n",
+					   obj->path);
+				err = -EEXIST;
+			} else
+				obj->efile.symbols = data;
+		} else if ((sh.sh_type == SHT_PROGBITS) &&
+			   (sh.sh_flags & SHF_EXECINSTR) &&
+			   (data->d_size > 0)) {
+			err = bpf_object__add_program(obj, data->d_buf,
+						      data->d_size, name, idx);
+			if (err) {
+				char errmsg[128];
+				strerror_r(-err, errmsg, sizeof(errmsg));
+				pr_warning("failed to alloc program %s (%s): %s",
+					   name, obj->path, errmsg);
+			}
+		} else if (sh.sh_type == SHT_REL) {
+			void *reloc = obj->efile.reloc;
+			int nr_reloc = obj->efile.nr_reloc + 1;
+
+			reloc = realloc(reloc,
+					sizeof(*obj->efile.reloc) * nr_reloc);
+			if (!reloc) {
+				pr_warning("realloc failed\n");
+				err = -ENOMEM;
+			} else {
+				int n = nr_reloc - 1;
+
+				obj->efile.reloc = reloc;
+				obj->efile.nr_reloc = nr_reloc;
+
+				obj->efile.reloc[n].shdr = sh;
+				obj->efile.reloc[n].data = data;
+			}
+		}
+		if (err)
+			goto out;
+	}
+out:
+	return err;
+}
+
+static struct bpf_program *
+bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
+{
+	struct bpf_program *prog;
+	size_t i;
+
+	for (i = 0; i < obj->nr_programs; i++) {
+		prog = &obj->programs[i];
+		if (prog->idx == idx)
+			return prog;
+	}
+	return NULL;
+}
+
+static int
+bpf_program__collect_reloc(struct bpf_program *prog,
+			   size_t nr_maps, GElf_Shdr *shdr,
+			   Elf_Data *data, Elf_Data *symbols)
+{
+	int i, nrels;
+
+	pr_debug("collecting relocating info for: '%s'\n",
+		 prog->section_name);
+	nrels = shdr->sh_size / shdr->sh_entsize;
+
+	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
+	if (!prog->reloc_desc) {
+		pr_warning("failed to alloc memory in relocation\n");
+		return -ENOMEM;
+	}
+	prog->nr_reloc = nrels;
+
+	for (i = 0; i < nrels; i++) {
+		GElf_Sym sym;
+		GElf_Rel rel;
+		unsigned int insn_idx;
+		struct bpf_insn *insns = prog->insns;
+		size_t map_idx;
+
+		if (!gelf_getrel(data, i, &rel)) {
+			pr_warning("relocation: failed to get %d reloc\n", i);
+			return -EINVAL;
+		}
+
+		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
+		pr_debug("relocation: insn_idx=%u\n", insn_idx);
+
+		if (!gelf_getsym(symbols,
+				 GELF_R_SYM(rel.r_info),
+				 &sym)) {
+			pr_warning("relocation: symbol %"PRIx64" not found\n",
+				   GELF_R_SYM(rel.r_info));
+			return -EINVAL;
+		}
+
+		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
+			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
+				   insn_idx, insns[insn_idx].code);
+			return -EINVAL;
+		}
+
+		map_idx = sym.st_value / sizeof(struct bpf_map_def);
+		if (map_idx >= nr_maps) {
+			pr_warning("bpf relocation: map_idx %d large than %d\n",
+				   (int)map_idx, (int)nr_maps - 1);
+			return -EINVAL;
+		}
+
+		prog->reloc_desc[i].insn_idx = insn_idx;
+		prog->reloc_desc[i].map_idx = map_idx;
+	}
+	return 0;
+}
+
+static int
+bpf_object__create_maps(struct bpf_object *obj)
+{
+	unsigned int i;
+	size_t nr_maps;
+	int *pfd;
+
+	nr_maps = obj->maps_buf_sz / sizeof(struct bpf_map_def);
+	if (!obj->maps_buf || !nr_maps) {
+		pr_debug("don't need create maps for %s\n",
+			 obj->path);
+		return 0;
+	}
+
+	obj->map_fds = malloc(sizeof(int) * nr_maps);
+	if (!obj->map_fds) {
+		pr_warning("realloc perf_bpf_map_fds failed\n");
+		return -ENOMEM;
+	}
+	obj->nr_map_fds = nr_maps;
+
+	/* fill all fd with -1 */
+	memset(obj->map_fds, -1, sizeof(int) * nr_maps);
+
+	pfd = obj->map_fds;
+	for (i = 0; i < nr_maps; i++) {
+		struct bpf_map_def def;
+
+		def = *(struct bpf_map_def *)(obj->maps_buf +
+				i * sizeof(struct bpf_map_def));
+
+		*pfd = bpf_create_map(def.type,
+				      def.key_size,
+				      def.value_size,
+				      def.max_entries);
+		if (*pfd < 0) {
+			size_t j;
+			int err = *pfd;
+
+			pr_warning("failed to create map: %s\n",
+				   strerror(errno));
+			for (j = 0; j < i; j++)
+				zclose(obj->map_fds[j]);
+			obj->nr_map_fds = 0;
+			zfree(&obj->map_fds);
+			return err;
+		}
+		pr_debug("create map: fd=%d\n", *pfd);
+		pfd++;
+	}
+
+	zfree(&obj->maps_buf);
+	obj->maps_buf_sz = 0;
+	return 0;
+}
+
+static int
+bpf_program__relocate(struct bpf_program *prog, int *map_fds)
+{
+	int i;
+
+	if (!prog || !prog->reloc_desc)
+		return 0;
+
+	for (i = 0; i < prog->nr_reloc; i++) {
+		int insn_idx, map_idx;
+		struct bpf_insn *insns = prog->insns;
+
+		insn_idx = prog->reloc_desc[i].insn_idx;
+		map_idx = prog->reloc_desc[i].map_idx;
+
+		if (insn_idx >= (int)prog->insns_cnt) {
+			pr_warning("relocation out of range: '%s'\n",
+				   prog->section_name);
+			return -ERANGE;
+		}
+		insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
+		insns[insn_idx].imm = map_fds[map_idx];
+	}
+
+	zfree(&prog->reloc_desc);
+	prog->nr_reloc = 0;
+	return 0;
+}
+
+
+static int
+bpf_object__relocate(struct bpf_object *obj)
+{
+	struct bpf_program *prog;
+	size_t i;
+	int err;
+
+	for (i = 0; i < obj->nr_programs; i++) {
+		prog = &obj->programs[i];
+
+		err = bpf_program__relocate(prog, obj->map_fds);
+		if (err) {
+			pr_warning("failed to relocate '%s'\n",
+				   prog->section_name);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static int bpf_object__collect_reloc(struct bpf_object *obj)
+{
+	int i, err;
+
+	if (!obj_elf_valid(obj)) {
+		pr_warning("Internal error: elf object is closed\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < obj->efile.nr_reloc; i++) {
+		GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
+		Elf_Data *data = obj->efile.reloc[i].data;
+		int idx = shdr->sh_info;
+		struct bpf_program *prog;
+		size_t nr_maps = obj->maps_buf_sz /
+				 sizeof(struct bpf_map_def);
+
+		if (shdr->sh_type != SHT_REL) {
+			pr_warning("internal error at %d\n", __LINE__);
+			return -EINVAL;
+		}
+
+		prog = bpf_object__find_prog_by_idx(obj, idx);
+		if (!prog) {
+			pr_warning("relocation failed: no %d section\n",
+				   idx);
+			return -ENOENT;
+		}
+
+		err = bpf_program__collect_reloc(prog, nr_maps,
+						 shdr, data,
+						 obj->efile.symbols);
+		if (err)
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+load_program(struct bpf_insn *insns, int insns_cnt,
+	     char *license, u32 kern_version, int *pfd)
+{
+	int ret;
+	char *log_buf;
+
+	if (!insns || !insns_cnt)
+		return -EINVAL;
+
+	log_buf = malloc(BPF_LOG_BUF_SIZE);
+	if (!log_buf)
+		pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
+
+	ret = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
+			       insns_cnt, license, kern_version,
+			       log_buf, BPF_LOG_BUF_SIZE);
+
+	if (ret >= 0) {
+		*pfd = ret;
+		ret = 0;
+		goto out;
+	}
+
+	ret = -EINVAL;
+	pr_warning("load bpf program failed: %s\n", strerror(errno));
+
+	if (log_buf) {
+		pr_warning("-- BEGIN DUMP LOG ---\n");
+		pr_warning("\n%s\n", log_buf);
+		pr_warning("-- END LOG --\n");
+	}
+
+out:
+	free(log_buf);
+	return ret;
+}
+
+static int
+bpf_program__load(struct bpf_program *prog,
+		  char *license, u32 kern_version)
+{
+	int err, fd;
+
+	err = load_program(prog->insns, prog->insns_cnt,
+			   license, kern_version, &fd);
+	if (!err)
+		prog->fd = fd;
+
+	if (err)
+		pr_warning("failed to load program '%s'\n",
+			   prog->section_name);
+	zfree(&prog->insns);
+	prog->insns_cnt = 0;
+	return err;
+}
+
+static int
+bpf_object__load_progs(struct bpf_object *obj)
+{
+	size_t i;
+	int err;
+
+	for (i = 0; i < obj->nr_programs; i++) {
+		err = bpf_program__load(&obj->programs[i],
+					obj->license,
+					obj->kern_version);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+static int bpf_object__validate(struct bpf_object *obj)
+{
+	if (obj->kern_version == 0) {
+		pr_warning("%s doesn't provide kernel version\n",
+			   obj->path);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static struct bpf_object *
+__bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz)
+{
+	struct bpf_object *obj;
+
+	if (elf_version(EV_CURRENT) == EV_NONE) {
+		pr_warning("failed to init libelf for %s\n", path);
+		return NULL;
+	}
+
+	obj = bpf_object__new(path, obj_buf, obj_buf_sz);
+	if (!obj)
+		return NULL;
+
+	if (bpf_object__elf_init(obj))
+		goto out;
+	if (bpf_object__check_endianness(obj))
+		goto out;
+	if (bpf_object__elf_collect(obj))
+		goto out;
+	if (bpf_object__collect_reloc(obj))
+		goto out;
+	if (bpf_object__validate(obj))
+		goto out;
+
+	bpf_object__elf_finish(obj);
+	return obj;
+out:
+	bpf_object__close(obj);
+	return NULL;
+}
+
+struct bpf_object *bpf_object__open(const char *path)
+{
+	/* param validation */
+	if (!path)
+		return NULL;
+
+	pr_debug("loading %s\n", path);
+
+	return __bpf_object__open(path, NULL, 0);
+}
+
+struct bpf_object *bpf_object__open_buffer(void *obj_buf,
+					   size_t obj_buf_sz,
+					   const char *name)
+{
+	char tmp_name[64];
+
+	/* param validation */
+	if (!obj_buf || obj_buf_sz <= 0)
+		return NULL;
+
+	if (!name) {
+		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
+			 (unsigned long)obj_buf,
+			 (unsigned long)obj_buf_sz);
+		tmp_name[sizeof(tmp_name) - 1] = '\0';
+		name = tmp_name;
+	}
+	pr_debug("loading object '%s' from buffer\n",
+		 name);
+
+	return __bpf_object__open(name, obj_buf, obj_buf_sz);
+}
+
+int bpf_object__unload(struct bpf_object *obj)
+{
+	size_t i;
+
+	if (!obj)
+		return -EINVAL;
+
+	for (i = 0; i < obj->nr_map_fds; i++)
+		zclose(obj->map_fds[i]);
+	zfree(&obj->map_fds);
+	obj->nr_map_fds = 0;
+
+	for (i = 0; i < obj->nr_programs; i++)
+		bpf_program__unload(&obj->programs[i]);
+
+	return 0;
+}
+
+int bpf_object__load(struct bpf_object *obj)
+{
+	if (!obj)
+		return -EINVAL;
+
+	if (obj->loaded) {
+		pr_warning("object should not be loaded twice\n");
+		return -EINVAL;
+	}
+
+	obj->loaded = true;
+	if (bpf_object__create_maps(obj))
+		goto out;
+	if (bpf_object__relocate(obj))
+		goto out;
+	if (bpf_object__load_progs(obj))
+		goto out;
+
+	return 0;
+out:
+	bpf_object__unload(obj);
+	pr_warning("failed to load object '%s'\n", obj->path);
+	return -EINVAL;
+}
+
+void bpf_object__close(struct bpf_object *obj)
+{
+	size_t i;
+
+	if (!obj)
+		return;
+
+	bpf_object__elf_finish(obj);
+	bpf_object__unload(obj);
+
+	zfree(&obj->maps_buf);
+
+	if (obj->programs && obj->nr_programs) {
+		for (i = 0; i < obj->nr_programs; i++)
+			bpf_program__exit(&obj->programs[i]);
+	}
+	zfree(&obj->programs);
+
+	list_del(&obj->list);
+	free(obj);
+}
+
+struct bpf_object *
+bpf_object__next(struct bpf_object *prev)
+{
+	struct bpf_object *next;
+
+	if (!prev)
+		next = list_first_entry(&bpf_objects_list,
+					struct bpf_object,
+					list);
+	else
+		next = list_next_entry(prev, list);
+
+	/* Empty list is noticed here so don't need checking on entry. */
+	if (&next->list == &bpf_objects_list)
+		return NULL;
+
+	return next;
+}
+
+const char *
+bpf_object__get_name(struct bpf_object *obj)
+{
+	if (!obj)
+		return NULL;
+	return obj->path;
+}
+
+struct bpf_program *
+bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
+{
+	size_t idx;
+
+	if (!obj->programs)
+		return NULL;
+	/* First handler */
+	if (prev == NULL)
+		return &obj->programs[0];
+
+	if (prev->obj != obj) {
+		pr_warning("error: program handler doesn't match object\n");
+		return NULL;
+	}
+
+	idx = (prev - obj->programs) + 1;
+	if (idx >= obj->nr_programs)
+		return NULL;
+	return &obj->programs[idx];
+}
+
+int bpf_program__set_private(struct bpf_program *prog,
+			     void *priv,
+			     bpf_program_clear_priv_t clear_priv)
+{
+	if (prog->priv && prog->clear_priv)
+		prog->clear_priv(prog, prog->priv);
+
+	prog->priv = priv;
+	prog->clear_priv = clear_priv;
+	return 0;
+}
+
+int bpf_program__get_private(struct bpf_program *prog, void **ppriv)
+{
+	*ppriv = prog->priv;
+	return 0;
+}
+
+const char *bpf_program__title(struct bpf_program *prog, bool dup)
+{
+	const char *title;
+
+	title = prog->section_name;
+	if (dup) {
+		title = strdup(title);
+		if (!title) {
+			pr_warning("failed to strdup program title\n");
+			return NULL;
+		}
+	}
+
+	return title;
+}
+
+int bpf_program__fd(struct bpf_program *prog)
+{
+	return prog->fd;
+}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
new file mode 100644
index 0000000..f16170c
--- /dev/null
+++ b/tools/lib/bpf/libbpf.h
@@ -0,0 +1,83 @@
+/*
+ * Common eBPF ELF object loading operations.
+ *
+ * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
+ * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015 Huawei Inc.
+ */
+#ifndef __BPF_LIBBPF_H
+#define __BPF_LIBBPF_H
+
+#include <stdio.h>
+#include <stdbool.h>
+
+/*
+ * In include/linux/compiler-gcc.h, __printf is defined. However
+ * it should be better if libbpf.h doesn't depend on Linux header file.
+ * So instead of __printf, here we use gcc attribute directly.
+ */
+typedef int (*libbpf_print_fn_t)(const char *, ...)
+	__attribute__((format(printf, 1, 2)));
+
+void libbpf_set_print(libbpf_print_fn_t warn,
+		      libbpf_print_fn_t info,
+		      libbpf_print_fn_t debug);
+
+/* Hide internal to user */
+struct bpf_object;
+
+struct bpf_object *bpf_object__open(const char *path);
+struct bpf_object *bpf_object__open_buffer(void *obj_buf,
+					   size_t obj_buf_sz,
+					   const char *name);
+void bpf_object__close(struct bpf_object *object);
+
+/* Load/unload object into/from kernel */
+int bpf_object__load(struct bpf_object *obj);
+int bpf_object__unload(struct bpf_object *obj);
+const char *bpf_object__get_name(struct bpf_object *obj);
+
+struct bpf_object *bpf_object__next(struct bpf_object *prev);
+#define bpf_object__for_each_safe(pos, tmp)			\
+	for ((pos) = bpf_object__next(NULL),		\
+		(tmp) = bpf_object__next(pos);		\
+	     (pos) != NULL;				\
+	     (pos) = (tmp), (tmp) = bpf_object__next(tmp))
+
+/* Accessors of bpf_program. */
+struct bpf_program;
+struct bpf_program *bpf_program__next(struct bpf_program *prog,
+				      struct bpf_object *obj);
+
+#define bpf_object__for_each_program(pos, obj)		\
+	for ((pos) = bpf_program__next(NULL, (obj));	\
+	     (pos) != NULL;				\
+	     (pos) = bpf_program__next((pos), (obj)))
+
+typedef void (*bpf_program_clear_priv_t)(struct bpf_program *,
+					 void *);
+
+int bpf_program__set_private(struct bpf_program *prog, void *priv,
+			     bpf_program_clear_priv_t clear_priv);
+
+int bpf_program__get_private(struct bpf_program *prog,
+			     void **ppriv);
+
+const char *bpf_program__title(struct bpf_program *prog, bool dup);
+
+int bpf_program__fd(struct bpf_program *prog);
+
+/*
+ * We don't need __attribute__((packed)) now since it is
+ * unnecessary for 'bpf_map_def' because they are all aligned.
+ * In addition, using it will trigger -Wpacked warning message,
+ * and will be treated as an error due to -Werror.
+ */
+struct bpf_map_def {
+	unsigned int type;
+	unsigned int key_size;
+	unsigned int value_size;
+	unsigned int max_entries;
+};
+
+#endif
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index 0b0112c..21cdf86 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -5,7 +5,7 @@
 #include <stdlib.h>
 #include <sysexits.h>
 #include "include/liblockdep/mutex.h"
-#include "../../../include/linux/rbtree.h"
+#include "../../include/linux/rbtree.h"
 
 /**
  * struct lock_lookup - liblockdep's view of a single unique lock
diff --git a/tools/lib/lockdep/uinclude/linux/kernel.h b/tools/lib/lockdep/uinclude/linux/kernel.h
index cd2cc59..276c7a8 100644
--- a/tools/lib/lockdep/uinclude/linux/kernel.h
+++ b/tools/lib/lockdep/uinclude/linux/kernel.h
@@ -23,7 +23,7 @@
 #define WARN_ON(x) (x)
 #define WARN_ON_ONCE(x) (x)
 #define likely(x) (x)
-#define WARN(x, y, z) (x)
+#define WARN(x, y...) (x)
 #define uninitialized_var(x) x
 #define __init
 #define noinline
diff --git a/tools/lib/lockdep/uinclude/linux/rbtree.h b/tools/lib/lockdep/uinclude/linux/rbtree.h
deleted file mode 100644
index 965901d..0000000
--- a/tools/lib/lockdep/uinclude/linux/rbtree.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../include/linux/rbtree.h"
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index cc25f05..4d88593 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -418,7 +418,7 @@
 }
 
 static struct func_map *
-find_func(struct pevent *pevent, unsigned long long addr)
+__find_func(struct pevent *pevent, unsigned long long addr)
 {
 	struct func_map *func;
 	struct func_map key;
@@ -434,6 +434,71 @@
 	return func;
 }
 
+struct func_resolver {
+	pevent_func_resolver_t *func;
+	void		       *priv;
+	struct func_map	       map;
+};
+
+/**
+ * pevent_set_function_resolver - set an alternative function resolver
+ * @pevent: handle for the pevent
+ * @resolver: function to be used
+ * @priv: resolver function private state.
+ *
+ * Some tools may have already a way to resolve kernel functions, allow them to
+ * keep using it instead of duplicating all the entries inside
+ * pevent->funclist.
+ */
+int pevent_set_function_resolver(struct pevent *pevent,
+				 pevent_func_resolver_t *func, void *priv)
+{
+	struct func_resolver *resolver = malloc(sizeof(*resolver));
+
+	if (resolver == NULL)
+		return -1;
+
+	resolver->func = func;
+	resolver->priv = priv;
+
+	free(pevent->func_resolver);
+	pevent->func_resolver = resolver;
+
+	return 0;
+}
+
+/**
+ * pevent_reset_function_resolver - reset alternative function resolver
+ * @pevent: handle for the pevent
+ *
+ * Stop using whatever alternative resolver was set, use the default
+ * one instead.
+ */
+void pevent_reset_function_resolver(struct pevent *pevent)
+{
+	free(pevent->func_resolver);
+	pevent->func_resolver = NULL;
+}
+
+static struct func_map *
+find_func(struct pevent *pevent, unsigned long long addr)
+{
+	struct func_map *map;
+
+	if (!pevent->func_resolver)
+		return __find_func(pevent, addr);
+
+	map = &pevent->func_resolver->map;
+	map->mod  = NULL;
+	map->addr = addr;
+	map->func = pevent->func_resolver->func(pevent->func_resolver->priv,
+						&map->addr, &map->mod);
+	if (map->func == NULL)
+		return NULL;
+
+	return map;
+}
+
 /**
  * pevent_find_function - find a function by a given address
  * @pevent: handle for the pevent
@@ -1680,6 +1745,9 @@
 	type = process_arg(event, left, &token);
 
  again:
+	if (type == EVENT_ERROR)
+		goto out_free;
+
 	/* Handle other operations in the arguments */
 	if (type == EVENT_OP && strcmp(token, ":") != 0) {
 		type = process_op(event, left, &token);
@@ -1939,6 +2007,12 @@
 			goto out_warn_free;
 
 		type = process_arg_token(event, right, tok, type);
+		if (type == EVENT_ERROR) {
+			free_arg(right);
+			/* token was freed in process_arg_token() via *tok */
+			token = NULL;
+			goto out_free;
+		}
 
 		if (right->type == PRINT_OP &&
 		    get_op_prio(arg->op.op) < get_op_prio(right->op.op)) {
@@ -4754,6 +4828,7 @@
 			case 'z':
 			case 'Z':
 			case '0' ... '9':
+			case '-':
 				goto cont_process;
 			case 'p':
 				if (pevent->long_size == 4)
@@ -6564,6 +6639,7 @@
 	free(pevent->trace_clock);
 	free(pevent->events);
 	free(pevent->sort_events);
+	free(pevent->func_resolver);
 
 	free(pevent);
 }
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 063b197..204befb 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -453,6 +453,10 @@
 struct func_map;
 struct func_list;
 struct event_handler;
+struct func_resolver;
+
+typedef char *(pevent_func_resolver_t)(void *priv,
+				       unsigned long long *addrp, char **modp);
 
 struct pevent {
 	int ref_count;
@@ -481,6 +485,7 @@
 	int cmdline_count;
 
 	struct func_map *func_map;
+	struct func_resolver *func_resolver;
 	struct func_list *funclist;
 	unsigned int func_count;
 
@@ -611,6 +616,9 @@
 	TRACE_FLAG_SOFTIRQ		= 0x10,
 };
 
+int pevent_set_function_resolver(struct pevent *pevent,
+				 pevent_func_resolver_t *func, void *priv);
+void pevent_reset_function_resolver(struct pevent *pevent);
 int pevent_register_comm(struct pevent *pevent, const char *comm, int pid);
 int pevent_register_trace_clock(struct pevent *pevent, const char *trace_clock);
 int pevent_register_function(struct pevent *pevent, char *name,
diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c
index 618c2bc..2cd3d4c 100644
--- a/tools/net/bpf_jit_disasm.c
+++ b/tools/net/bpf_jit_disasm.c
@@ -22,9 +22,14 @@
 #include <string.h>
 #include <bfd.h>
 #include <dis-asm.h>
+#include <regex.h>
+#include <fcntl.h>
 #include <sys/klog.h>
 #include <sys/types.h>
-#include <regex.h>
+#include <sys/stat.h>
+
+#define CMD_ACTION_SIZE_BUFFER		10
+#define CMD_ACTION_READ_ALL		3
 
 static void get_exec_path(char *tpath, size_t size)
 {
@@ -87,20 +92,66 @@
 	bfd_close(bfdf);
 }
 
-static char *get_klog_buff(int *klen)
+static char *get_klog_buff(unsigned int *klen)
 {
-	int ret, len = klogctl(10, NULL, 0);
-	char *buff = malloc(len);
+	int ret, len;
+	char *buff;
 
-	assert(buff && klen);
-	ret = klogctl(3, buff, len);
-	assert(ret >= 0);
+	len = klogctl(CMD_ACTION_SIZE_BUFFER, NULL, 0);
+	buff = malloc(len);
+	if (!buff)
+		return NULL;
+
+	ret = klogctl(CMD_ACTION_READ_ALL, buff, len);
+	if (ret < 0) {
+		free(buff);
+		return NULL;
+	}
+
 	*klen = ret;
-
 	return buff;
 }
 
-static void put_klog_buff(char *buff)
+static char *get_flog_buff(const char *file, unsigned int *klen)
+{
+	int fd, ret, len;
+	struct stat fi;
+	char *buff;
+
+	fd = open(file, O_RDONLY);
+	if (fd < 0)
+		return NULL;
+
+	ret = fstat(fd, &fi);
+	if (ret < 0 || !S_ISREG(fi.st_mode))
+		goto out;
+
+	len = fi.st_size + 1;
+	buff = malloc(len);
+	if (!buff)
+		goto out;
+
+	memset(buff, 0, len);
+	ret = read(fd, buff, len - 1);
+	if (ret <= 0)
+		goto out_free;
+
+	close(fd);
+	*klen = ret;
+	return buff;
+out_free:
+	free(buff);
+out:
+	close(fd);
+	return NULL;
+}
+
+static char *get_log_buff(const char *file, unsigned int *klen)
+{
+	return file ? get_flog_buff(file, klen) : get_klog_buff(klen);
+}
+
+static void put_log_buff(char *buff)
 {
 	free(buff);
 }
@@ -138,8 +189,10 @@
 	ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
 	ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx",
 		     &flen, &proglen, &pass, &base);
-	if (ret != 4)
+	if (ret != 4) {
+		regfree(&regex);
 		return 0;
+	}
 
 	tmp = ptr = haystack + off;
 	while ((ptr = strtok(tmp, "\n")) != NULL && ulen < ilen) {
@@ -169,31 +222,49 @@
 	return ulen;
 }
 
+static void usage(void)
+{
+	printf("Usage: bpf_jit_disasm [...]\n");
+	printf("       -o          Also display related opcodes (default: off).\n");
+	printf("       -f <file>   Read last image dump from file or stdin (default: klog).\n");
+	printf("       -h          Display this help.\n");
+}
+
 int main(int argc, char **argv)
 {
-	int len, klen, opcodes = 0;
-	char *kbuff;
+	unsigned int len, klen, opt, opcodes = 0;
 	static uint8_t image[32768];
+	char *kbuff, *file = NULL;
 
-	if (argc > 1) {
-		if (!strncmp("-o", argv[argc - 1], 2)) {
+	while ((opt = getopt(argc, argv, "of:")) != -1) {
+		switch (opt) {
+		case 'o':
 			opcodes = 1;
-		} else {
-			printf("usage: bpf_jit_disasm [-o: show opcodes]\n");
-			exit(0);
+			break;
+		case 'f':
+			file = optarg;
+			break;
+		default:
+			usage();
+			return -1;
 		}
 	}
 
 	bfd_init();
 	memset(image, 0, sizeof(image));
 
-	kbuff = get_klog_buff(&klen);
+	kbuff = get_log_buff(file, &klen);
+	if (!kbuff) {
+		fprintf(stderr, "Could not retrieve log buffer!\n");
+		return -1;
+	}
 
 	len = get_last_jit_image(kbuff, klen, image, sizeof(image));
 	if (len > 0)
 		get_asm_insns(image, len, opcodes);
+	else
+		fprintf(stderr, "No JIT image found!\n");
 
-	put_klog_buff(kbuff);
-
+	put_log_buff(kbuff);
 	return 0;
 }
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index 09db62b..3d1bb80 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -29,3 +29,4 @@
 *.pyc
 *.pyo
 .config-detected
+util/intel-pt-decoder/inat-tables.c
diff --git a/tools/perf/Build b/tools/perf/Build
index b77370e..7223745 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -35,6 +35,7 @@
 CFLAGS_builtin-help.o      += $(paths)
 CFLAGS_builtin-timechart.o += $(paths)
 CFLAGS_perf.o              += -DPERF_HTML_PATH="BUILD_STR($(htmldir_SQ))" -include $(OUTPUT)PERF-VERSION-FILE
+CFLAGS_builtin-trace.o	   += -DSTRACE_GROUPS_DIR="BUILD_STR($(STRACE_GROUPS_DIR_SQ))"
 
 libperf-y += util/
 libperf-y += arch/
diff --git a/tools/perf/Documentation/intel-bts.txt b/tools/perf/Documentation/intel-bts.txt
new file mode 100644
index 0000000..8bdc93b
--- /dev/null
+++ b/tools/perf/Documentation/intel-bts.txt
@@ -0,0 +1,86 @@
+Intel Branch Trace Store
+========================
+
+Overview
+========
+
+Intel BTS could be regarded as a predecessor to Intel PT and has some
+similarities because it can also identify every branch a program takes.  A
+notable difference is that Intel BTS has no timing information and as a
+consequence the present implementation is limited to per-thread recording.
+
+While decoding Intel BTS does not require walking the object code, the object
+code is still needed to pair up calls and returns correctly, consequently much
+of the Intel PT documentation applies also to Intel BTS.  Refer to the Intel PT
+documentation and consider that the PMU 'intel_bts' can usually be used in
+place of 'intel_pt' in the examples provided, with the proviso that per-thread
+recording must also be stipulated i.e. the --per-thread option for
+'perf record'.
+
+
+perf record
+===========
+
+new event
+---------
+
+The Intel BTS kernel driver creates a new PMU for Intel BTS.  The perf record
+option is:
+
+	-e intel_bts//
+
+Currently Intel BTS is limited to per-thread tracing so the --per-thread option
+is also needed.
+
+
+snapshot option
+---------------
+
+The snapshot option is the same as Intel PT (refer Intel PT documentation).
+
+
+auxtrace mmap size option
+-----------------------
+
+The mmap size option is the same as Intel PT (refer Intel PT documentation).
+
+
+perf script
+===========
+
+By default, perf script will decode trace data found in the perf.data file.
+This can be further controlled by option --itrace.  The --itrace option is
+the same as Intel PT (refer Intel PT documentation) except that neither
+"instructions" events nor "transactions" events (and consequently call
+chains) are supported.
+
+To disable trace decoding entirely, use the option --no-itrace.
+
+
+dump option
+-----------
+
+perf script has an option (-D) to "dump" the events i.e. display the binary
+data.
+
+When -D is used, Intel BTS packets are displayed.
+
+To disable the display of Intel BTS packets, combine the -D option with
+--no-itrace.
+
+
+perf report
+===========
+
+By default, perf report will decode trace data found in the perf.data file.
+This can be further controlled by new option --itrace exactly the same as
+perf script.
+
+
+perf inject
+===========
+
+perf inject also accepts the --itrace option in which case tracing data is
+removed and replaced with the synthesized events. e.g.
+
+	perf inject --itrace -i perf.data -o perf.data.new
diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt
new file mode 100644
index 0000000..4a0501d
--- /dev/null
+++ b/tools/perf/Documentation/intel-pt.txt
@@ -0,0 +1,766 @@
+Intel Processor Trace
+=====================
+
+Overview
+========
+
+Intel Processor Trace (Intel PT) is an extension of Intel Architecture that
+collects information about software execution such as control flow, execution
+modes and timings and formats it into highly compressed binary packets.
+Technical details are documented in the Intel 64 and IA-32 Architectures
+Software Developer Manuals, Chapter 36 Intel Processor Trace.
+
+Intel PT is first supported in Intel Core M and 5th generation Intel Core
+processors that are based on the Intel micro-architecture code name Broadwell.
+
+Trace data is collected by 'perf record' and stored within the perf.data file.
+See below for options to 'perf record'.
+
+Trace data must be 'decoded' which involves walking the object code and matching
+the trace data packets. For example a TNT packet only tells whether a
+conditional branch was taken or not taken, so to make use of that packet the
+decoder must know precisely which instruction was being executed.
+
+Decoding is done on-the-fly.  The decoder outputs samples in the same format as
+samples output by perf hardware events, for example as though the "instructions"
+or "branches" events had been recorded.  Presently 3 tools support this:
+'perf script', 'perf report' and 'perf inject'.  See below for more information
+on using those tools.
+
+The main distinguishing feature of Intel PT is that the decoder can determine
+the exact flow of software execution.  Intel PT can be used to understand why
+and how did software get to a certain point, or behave a certain way.  The
+software does not have to be recompiled, so Intel PT works with debug or release
+builds, however the executed images are needed - which makes use in JIT-compiled
+environments, or with self-modified code, a challenge.  Also symbols need to be
+provided to make sense of addresses.
+
+A limitation of Intel PT is that it produces huge amounts of trace data
+(hundreds of megabytes per second per core) which takes a long time to decode,
+for example two or three orders of magnitude longer than it took to collect.
+Another limitation is the performance impact of tracing, something that will
+vary depending on the use-case and architecture.
+
+
+Quickstart
+==========
+
+It is important to start small.  That is because it is easy to capture vastly
+more data than can possibly be processed.
+
+The simplest thing to do with Intel PT is userspace profiling of small programs.
+Data is captured with 'perf record' e.g. to trace 'ls' userspace-only:
+
+	perf record -e intel_pt//u ls
+
+And profiled with 'perf report' e.g.
+
+	perf report
+
+To also trace kernel space presents a problem, namely kernel self-modifying
+code.  A fairly good kernel image is available in /proc/kcore but to get an
+accurate image a copy of /proc/kcore needs to be made under the same conditions
+as the data capture.  A script perf-with-kcore can do that, but beware that the
+script makes use of 'sudo' to copy /proc/kcore.  If you have perf installed
+locally from the source tree you can do:
+
+	~/libexec/perf-core/perf-with-kcore record pt_ls -e intel_pt// -- ls
+
+which will create a directory named 'pt_ls' and put the perf.data file and
+copies of /proc/kcore, /proc/kallsyms and /proc/modules into it.  Then to use
+'perf report' becomes:
+
+	~/libexec/perf-core/perf-with-kcore report pt_ls
+
+Because samples are synthesized after-the-fact, the sampling period can be
+selected for reporting. e.g. sample every microsecond
+
+	~/libexec/perf-core/perf-with-kcore report pt_ls --itrace=i1usge
+
+See the sections below for more information about the --itrace option.
+
+Beware the smaller the period, the more samples that are produced, and the
+longer it takes to process them.
+
+Also note that the coarseness of Intel PT timing information will start to
+distort the statistical value of the sampling as the sampling period becomes
+smaller.
+
+To represent software control flow, "branches" samples are produced.  By default
+a branch sample is synthesized for every single branch.  To get an idea what
+data is available you can use the 'perf script' tool with no parameters, which
+will list all the samples.
+
+	perf record -e intel_pt//u ls
+	perf script
+
+An interesting field that is not printed by default is 'flags' which can be
+displayed as follows:
+
+	perf script -Fcomm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr,symoff,flags
+
+The flags are "bcrosyiABEx" which stand for branch, call, return, conditional,
+system, asynchronous, interrupt, transaction abort, trace begin, trace end, and
+in transaction, respectively.
+
+While it is possible to create scripts to analyze the data, an alternative
+approach is available to export the data to a postgresql database.  Refer to
+script export-to-postgresql.py for more details, and to script
+call-graph-from-postgresql.py for an example of using the database.
+
+As mentioned above, it is easy to capture too much data.  One way to limit the
+data captured is to use 'snapshot' mode which is explained further below.
+Refer to 'new snapshot option' and 'Intel PT modes of operation' further below.
+
+Another problem that will be experienced is decoder errors.  They can be caused
+by inability to access the executed image, self-modified or JIT-ed code, or the
+inability to match side-band information (such as context switches and mmaps)
+which results in the decoder not knowing what code was executed.
+
+There is also the problem of perf not being able to copy the data fast enough,
+resulting in data lost because the buffer was full.  See 'Buffer handling' below
+for more details.
+
+
+perf record
+===========
+
+new event
+---------
+
+The Intel PT kernel driver creates a new PMU for Intel PT.  PMU events are
+selected by providing the PMU name followed by the "config" separated by slashes.
+An enhancement has been made to allow default "config" e.g. the option
+
+	-e intel_pt//
+
+will use a default config value.  Currently that is the same as
+
+	-e intel_pt/tsc,noretcomp=0/
+
+which is the same as
+
+	-e intel_pt/tsc=1,noretcomp=0/
+
+Note there are now new config terms - see section 'config terms' further below.
+
+The config terms are listed in /sys/devices/intel_pt/format.  They are bit
+fields within the config member of the struct perf_event_attr which is
+passed to the kernel by the perf_event_open system call.  They correspond to bit
+fields in the IA32_RTIT_CTL MSR.  Here is a list of them and their definitions:
+
+	$ grep -H . /sys/bus/event_source/devices/intel_pt/format/*
+	/sys/bus/event_source/devices/intel_pt/format/cyc:config:1
+	/sys/bus/event_source/devices/intel_pt/format/cyc_thresh:config:19-22
+	/sys/bus/event_source/devices/intel_pt/format/mtc:config:9
+	/sys/bus/event_source/devices/intel_pt/format/mtc_period:config:14-17
+	/sys/bus/event_source/devices/intel_pt/format/noretcomp:config:11
+	/sys/bus/event_source/devices/intel_pt/format/psb_period:config:24-27
+	/sys/bus/event_source/devices/intel_pt/format/tsc:config:10
+
+Note that the default config must be overridden for each term i.e.
+
+	-e intel_pt/noretcomp=0/
+
+is the same as:
+
+	-e intel_pt/tsc=1,noretcomp=0/
+
+So, to disable TSC packets use:
+
+	-e intel_pt/tsc=0/
+
+It is also possible to specify the config value explicitly:
+
+	-e intel_pt/config=0x400/
+
+Note that, as with all events, the event is suffixed with event modifiers:
+
+	u	userspace
+	k	kernel
+	h	hypervisor
+	G	guest
+	H	host
+	p	precise ip
+
+'h', 'G' and 'H' are for virtualization which is not supported by Intel PT.
+'p' is also not relevant to Intel PT.  So only options 'u' and 'k' are
+meaningful for Intel PT.
+
+perf_event_attr is displayed if the -vv option is used e.g.
+
+	------------------------------------------------------------
+	perf_event_attr:
+	type                             6
+	size                             112
+	config                           0x400
+	{ sample_period, sample_freq }   1
+	sample_type                      IP|TID|TIME|CPU|IDENTIFIER
+	read_format                      ID
+	disabled                         1
+	inherit                          1
+	exclude_kernel                   1
+	exclude_hv                       1
+	enable_on_exec                   1
+	sample_id_all                    1
+	------------------------------------------------------------
+	sys_perf_event_open: pid 31104  cpu 0  group_fd -1  flags 0x8
+	sys_perf_event_open: pid 31104  cpu 1  group_fd -1  flags 0x8
+	sys_perf_event_open: pid 31104  cpu 2  group_fd -1  flags 0x8
+	sys_perf_event_open: pid 31104  cpu 3  group_fd -1  flags 0x8
+	------------------------------------------------------------
+
+
+config terms
+------------
+
+The June 2015 version of Intel 64 and IA-32 Architectures Software Developer
+Manuals, Chapter 36 Intel Processor Trace, defined new Intel PT features.
+Some of the features are reflect in new config terms.  All the config terms are
+described below.
+
+tsc		Always supported.  Produces TSC timestamp packets to provide
+		timing information.  In some cases it is possible to decode
+		without timing information, for example a per-thread context
+		that does not overlap executable memory maps.
+
+		The default config selects tsc (i.e. tsc=1).
+
+noretcomp	Always supported.  Disables "return compression" so a TIP packet
+		is produced when a function returns.  Causes more packets to be
+		produced but might make decoding more reliable.
+
+		The default config does not select noretcomp (i.e. noretcomp=0).
+
+psb_period	Allows the frequency of PSB packets to be specified.
+
+		The PSB packet is a synchronization packet that provides a
+		starting point for decoding or recovery from errors.
+
+		Support for psb_period is indicated by:
+
+			/sys/bus/event_source/devices/intel_pt/caps/psb_cyc
+
+		which contains "1" if the feature is supported and "0"
+		otherwise.
+
+		Valid values are given by:
+
+			/sys/bus/event_source/devices/intel_pt/caps/psb_periods
+
+		which contains a hexadecimal value, the bits of which represent
+		valid values e.g. bit 2 set means value 2 is valid.
+
+		The psb_period value is converted to the approximate number of
+		trace bytes between PSB packets as:
+
+			2 ^ (value + 11)
+
+		e.g. value 3 means 16KiB bytes between PSBs
+
+		If an invalid value is entered, the error message
+		will give a list of valid values e.g.
+
+			$ perf record -e intel_pt/psb_period=15/u uname
+			Invalid psb_period for intel_pt. Valid values are: 0-5
+
+		If MTC packets are selected, the default config selects a value
+		of 3 (i.e. psb_period=3) or the nearest lower value that is
+		supported (0 is always supported).  Otherwise the default is 0.
+
+		If decoding is expected to be reliable and the buffer is large
+		then a large PSB period can be used.
+
+		Because a TSC packet is produced with PSB, the PSB period can
+		also affect the granularity to timing information in the absence
+		of MTC or CYC.
+
+mtc		Produces MTC timing packets.
+
+		MTC packets provide finer grain timestamp information than TSC
+		packets.  MTC packets record time using the hardware crystal
+		clock (CTC) which is related to TSC packets using a TMA packet.
+
+		Support for this feature is indicated by:
+
+			/sys/bus/event_source/devices/intel_pt/caps/mtc
+
+		which contains "1" if the feature is supported and
+		"0" otherwise.
+
+		The frequency of MTC packets can also be specified - see
+		mtc_period below.
+
+mtc_period	Specifies how frequently MTC packets are produced - see mtc
+		above for how to determine if MTC packets are supported.
+
+		Valid values are given by:
+
+			/sys/bus/event_source/devices/intel_pt/caps/mtc_periods
+
+		which contains a hexadecimal value, the bits of which represent
+		valid values e.g. bit 2 set means value 2 is valid.
+
+		The mtc_period value is converted to the MTC frequency as:
+
+			CTC-frequency / (2 ^ value)
+
+		e.g. value 3 means one eighth of CTC-frequency
+
+		Where CTC is the hardware crystal clock, the frequency of which
+		can be related to TSC via values provided in cpuid leaf 0x15.
+
+		If an invalid value is entered, the error message
+		will give a list of valid values e.g.
+
+			$ perf record -e intel_pt/mtc_period=15/u uname
+			Invalid mtc_period for intel_pt. Valid values are: 0,3,6,9
+
+		The default value is 3 or the nearest lower value
+		that is supported (0 is always supported).
+
+cyc		Produces CYC timing packets.
+
+		CYC packets provide even finer grain timestamp information than
+		MTC and TSC packets.  A CYC packet contains the number of CPU
+		cycles since the last CYC packet. Unlike MTC and TSC packets,
+		CYC packets are only sent when another packet is also sent.
+
+		Support for this feature is indicated by:
+
+			/sys/bus/event_source/devices/intel_pt/caps/psb_cyc
+
+		which contains "1" if the feature is supported and
+		"0" otherwise.
+
+		The number of CYC packets produced can be reduced by specifying
+		a threshold - see cyc_thresh below.
+
+cyc_thresh	Specifies how frequently CYC packets are produced - see cyc
+		above for how to determine if CYC packets are supported.
+
+		Valid cyc_thresh values are given by:
+
+			/sys/bus/event_source/devices/intel_pt/caps/cycle_thresholds
+
+		which contains a hexadecimal value, the bits of which represent
+		valid values e.g. bit 2 set means value 2 is valid.
+
+		The cyc_thresh value represents the minimum number of CPU cycles
+		that must have passed before a CYC packet can be sent.  The
+		number of CPU cycles is:
+
+			2 ^ (value - 1)
+
+		e.g. value 4 means 8 CPU cycles must pass before a CYC packet
+		can be sent.  Note a CYC packet is still only sent when another
+		packet is sent, not at, e.g. every 8 CPU cycles.
+
+		If an invalid value is entered, the error message
+		will give a list of valid values e.g.
+
+			$ perf record -e intel_pt/cyc,cyc_thresh=15/u uname
+			Invalid cyc_thresh for intel_pt. Valid values are: 0-12
+
+		CYC packets are not requested by default.
+
+no_force_psb	This is a driver option and is not in the IA32_RTIT_CTL MSR.
+
+		It stops the driver resetting the byte count to zero whenever
+		enabling the trace (for example on context switches) which in
+		turn results in no PSB being forced.  However some processors
+		will produce a PSB anyway.
+
+		In any case, there is still a PSB when the trace is enabled for
+		the first time.
+
+		no_force_psb can be used to slightly decrease the trace size but
+		may make it harder for the decoder to recover from errors.
+
+		no_force_psb is not selected by default.
+
+
+new snapshot option
+-------------------
+
+The difference between full trace and snapshot from the kernel's perspective is
+that in full trace we don't overwrite trace data that the user hasn't collected
+yet (and indicated that by advancing aux_tail), whereas in snapshot mode we let
+the trace run and overwrite older data in the buffer so that whenever something
+interesting happens, we can stop it and grab a snapshot of what was going on
+around that interesting moment.
+
+To select snapshot mode a new option has been added:
+
+	-S
+
+Optionally it can be followed by the snapshot size e.g.
+
+	-S0x100000
+
+The default snapshot size is the auxtrace mmap size.  If neither auxtrace mmap size
+nor snapshot size is specified, then the default is 4MiB for privileged users
+(or if /proc/sys/kernel/perf_event_paranoid < 0), 128KiB for unprivileged users.
+If an unprivileged user does not specify mmap pages, the mmap pages will be
+reduced as described in the 'new auxtrace mmap size option' section below.
+
+The snapshot size is displayed if the option -vv is used e.g.
+
+	Intel PT snapshot size: %zu
+
+
+new auxtrace mmap size option
+---------------------------
+
+Intel PT buffer size is specified by an addition to the -m option e.g.
+
+	-m,16
+
+selects a buffer size of 16 pages i.e. 64KiB.
+
+Note that the existing functionality of -m is unchanged.  The auxtrace mmap size
+is specified by the optional addition of a comma and the value.
+
+The default auxtrace mmap size for Intel PT is 4MiB/page_size for privileged users
+(or if /proc/sys/kernel/perf_event_paranoid < 0), 128KiB for unprivileged users.
+If an unprivileged user does not specify mmap pages, the mmap pages will be
+reduced from the default 512KiB/page_size to 256KiB/page_size, otherwise the
+user is likely to get an error as they exceed their mlock limit (Max locked
+memory as shown in /proc/self/limits).  Note that perf does not count the first
+512KiB (actually /proc/sys/kernel/perf_event_mlock_kb minus 1 page) per cpu
+against the mlock limit so an unprivileged user is allowed 512KiB per cpu plus
+their mlock limit (which defaults to 64KiB but is not multiplied by the number
+of cpus).
+
+In full-trace mode, powers of two are allowed for buffer size, with a minimum
+size of 2 pages.  In snapshot mode, it is the same but the minimum size is
+1 page.
+
+The mmap size and auxtrace mmap size are displayed if the -vv option is used e.g.
+
+	mmap length 528384
+	auxtrace mmap length 4198400
+
+
+Intel PT modes of operation
+---------------------------
+
+Intel PT can be used in 2 modes:
+	full-trace mode
+	snapshot mode
+
+Full-trace mode traces continuously e.g.
+
+	perf record -e intel_pt//u uname
+
+Snapshot mode captures the available data when a signal is sent e.g.
+
+	perf record -v -e intel_pt//u -S ./loopy 1000000000 &
+	[1] 11435
+	kill -USR2 11435
+	Recording AUX area tracing snapshot
+
+Note that the signal sent is SIGUSR2.
+Note that "Recording AUX area tracing snapshot" is displayed because the -v
+option is used.
+
+The 2 modes cannot be used together.
+
+
+Buffer handling
+---------------
+
+There may be buffer limitations (i.e. single ToPa entry) which means that actual
+buffer sizes are limited to powers of 2 up to 4MiB (MAX_ORDER).  In order to
+provide other sizes, and in particular an arbitrarily large size, multiple
+buffers are logically concatenated.  However an interrupt must be used to switch
+between buffers.  That has two potential problems:
+	a) the interrupt may not be handled in time so that the current buffer
+	becomes full and some trace data is lost.
+	b) the interrupts may slow the system and affect the performance
+	results.
+
+If trace data is lost, the driver sets 'truncated' in the PERF_RECORD_AUX event
+which the tools report as an error.
+
+In full-trace mode, the driver waits for data to be copied out before allowing
+the (logical) buffer to wrap-around.  If data is not copied out quickly enough,
+again 'truncated' is set in the PERF_RECORD_AUX event.  If the driver has to
+wait, the intel_pt event gets disabled.  Because it is difficult to know when
+that happens, perf tools always re-enable the intel_pt event after copying out
+data.
+
+
+Intel PT and build ids
+----------------------
+
+By default "perf record" post-processes the event stream to find all build ids
+for executables for all addresses sampled.  Deliberately, Intel PT is not
+decoded for that purpose (it would take too long).  Instead the build ids for
+all executables encountered (due to mmap, comm or task events) are included
+in the perf.data file.
+
+To see buildids included in the perf.data file use the command:
+
+	perf buildid-list
+
+If the perf.data file contains Intel PT data, that is the same as:
+
+	perf buildid-list --with-hits
+
+
+Snapshot mode and event disabling
+---------------------------------
+
+In order to make a snapshot, the intel_pt event is disabled using an IOCTL,
+namely PERF_EVENT_IOC_DISABLE.  However doing that can also disable the
+collection of side-band information.  In order to prevent that,  a dummy
+software event has been introduced that permits tracking events (like mmaps) to
+continue to be recorded while intel_pt is disabled.  That is important to ensure
+there is complete side-band information to allow the decoding of subsequent
+snapshots.
+
+A test has been created for that.  To find the test:
+
+	perf test list
+	...
+	23: Test using a dummy software event to keep tracking
+
+To run the test:
+
+	perf test 23
+	23: Test using a dummy software event to keep tracking     : Ok
+
+
+perf record modes (nothing new here)
+------------------------------------
+
+perf record essentially operates in one of three modes:
+	per thread
+	per cpu
+	workload only
+
+"per thread" mode is selected by -t or by --per-thread (with -p or -u or just a
+workload).
+"per cpu" is selected by -C or -a.
+"workload only" mode is selected by not using the other options but providing a
+command to run (i.e. the workload).
+
+In per-thread mode an exact list of threads is traced.  There is no inheritance.
+Each thread has its own event buffer.
+
+In per-cpu mode all processes (or processes from the selected cgroup i.e. -G
+option, or processes selected with -p or -u) are traced.  Each cpu has its own
+buffer. Inheritance is allowed.
+
+In workload-only mode, the workload is traced but with per-cpu buffers.
+Inheritance is allowed.  Note that you can now trace a workload in per-thread
+mode by using the --per-thread option.
+
+
+Privileged vs non-privileged users
+----------------------------------
+
+Unless /proc/sys/kernel/perf_event_paranoid is set to -1, unprivileged users
+have memory limits imposed upon them.  That affects what buffer sizes they can
+have as outlined above.
+
+Unless /proc/sys/kernel/perf_event_paranoid is set to -1, unprivileged users are
+not permitted to use tracepoints which means there is insufficient side-band
+information to decode Intel PT in per-cpu mode, and potentially workload-only
+mode too if the workload creates new processes.
+
+Note also, that to use tracepoints, read-access to debugfs is required.  So if
+debugfs is not mounted or the user does not have read-access, it will again not
+be possible to decode Intel PT in per-cpu mode.
+
+
+sched_switch tracepoint
+-----------------------
+
+The sched_switch tracepoint is used to provide side-band data for Intel PT
+decoding.  sched_switch events are automatically added. e.g. the second event
+shown below
+
+	$ perf record -vv -e intel_pt//u uname
+	------------------------------------------------------------
+	perf_event_attr:
+	type                             6
+	size                             112
+	config                           0x400
+	{ sample_period, sample_freq }   1
+	sample_type                      IP|TID|TIME|CPU|IDENTIFIER
+	read_format                      ID
+	disabled                         1
+	inherit                          1
+	exclude_kernel                   1
+	exclude_hv                       1
+	enable_on_exec                   1
+	sample_id_all                    1
+	------------------------------------------------------------
+	sys_perf_event_open: pid 31104  cpu 0  group_fd -1  flags 0x8
+	sys_perf_event_open: pid 31104  cpu 1  group_fd -1  flags 0x8
+	sys_perf_event_open: pid 31104  cpu 2  group_fd -1  flags 0x8
+	sys_perf_event_open: pid 31104  cpu 3  group_fd -1  flags 0x8
+	------------------------------------------------------------
+	perf_event_attr:
+	type                             2
+	size                             112
+	config                           0x108
+	{ sample_period, sample_freq }   1
+	sample_type                      IP|TID|TIME|CPU|PERIOD|RAW|IDENTIFIER
+	read_format                      ID
+	inherit                          1
+	sample_id_all                    1
+	exclude_guest                    1
+	------------------------------------------------------------
+	sys_perf_event_open: pid -1  cpu 0  group_fd -1  flags 0x8
+	sys_perf_event_open: pid -1  cpu 1  group_fd -1  flags 0x8
+	sys_perf_event_open: pid -1  cpu 2  group_fd -1  flags 0x8
+	sys_perf_event_open: pid -1  cpu 3  group_fd -1  flags 0x8
+	------------------------------------------------------------
+	perf_event_attr:
+	type                             1
+	size                             112
+	config                           0x9
+	{ sample_period, sample_freq }   1
+	sample_type                      IP|TID|TIME|IDENTIFIER
+	read_format                      ID
+	disabled                         1
+	inherit                          1
+	exclude_kernel                   1
+	exclude_hv                       1
+	mmap                             1
+	comm                             1
+	enable_on_exec                   1
+	task                             1
+	sample_id_all                    1
+	mmap2                            1
+	comm_exec                        1
+	------------------------------------------------------------
+	sys_perf_event_open: pid 31104  cpu 0  group_fd -1  flags 0x8
+	sys_perf_event_open: pid 31104  cpu 1  group_fd -1  flags 0x8
+	sys_perf_event_open: pid 31104  cpu 2  group_fd -1  flags 0x8
+	sys_perf_event_open: pid 31104  cpu 3  group_fd -1  flags 0x8
+	mmap size 528384B
+	AUX area mmap length 4194304
+	perf event ring buffer mmapped per cpu
+	Synthesizing auxtrace information
+	Linux
+	[ perf record: Woken up 1 times to write data ]
+	[ perf record: Captured and wrote 0.042 MB perf.data ]
+
+Note, the sched_switch event is only added if the user is permitted to use it
+and only in per-cpu mode.
+
+Note also, the sched_switch event is only added if TSC packets are requested.
+That is because, in the absence of timing information, the sched_switch events
+cannot be matched against the Intel PT trace.
+
+
+perf script
+===========
+
+By default, perf script will decode trace data found in the perf.data file.
+This can be further controlled by new option --itrace.
+
+
+New --itrace option
+-------------------
+
+Having no option is the same as
+
+	--itrace
+
+which, in turn, is the same as
+
+	--itrace=ibxe
+
+The letters are:
+
+	i	synthesize "instructions" events
+	b	synthesize "branches" events
+	x	synthesize "transactions" events
+	c	synthesize branches events (calls only)
+	r	synthesize branches events (returns only)
+	e	synthesize tracing error events
+	d	create a debug log
+	g	synthesize a call chain (use with i or x)
+
+"Instructions" events look like they were recorded by "perf record -e
+instructions".
+
+"Branches" events look like they were recorded by "perf record -e branches". "c"
+and "r" can be combined to get calls and returns.
+
+"Transactions" events correspond to the start or end of transactions. The
+'flags' field can be used in perf script to determine whether the event is a
+tranasaction start, commit or abort.
+
+Error events are new.  They show where the decoder lost the trace.  Error events
+are quite important.  Users must know if what they are seeing is a complete
+picture or not.
+
+The "d" option will cause the creation of a file "intel_pt.log" containing all
+decoded packets and instructions.  Note that this option slows down the decoder
+and that the resulting file may be very large.
+
+In addition, the period of the "instructions" event can be specified. e.g.
+
+	--itrace=i10us
+
+sets the period to 10us i.e. one  instruction sample is synthesized for each 10
+microseconds of trace.  Alternatives to "us" are "ms" (milliseconds),
+"ns" (nanoseconds), "t" (TSC ticks) or "i" (instructions).
+
+"ms", "us" and "ns" are converted to TSC ticks.
+
+The timing information included with Intel PT does not give the time of every
+instruction.  Consequently, for the purpose of sampling, the decoder estimates
+the time since the last timing packet based on 1 tick per instruction.  The time
+on the sample is *not* adjusted and reflects the last known value of TSC.
+
+For Intel PT, the default period is 100us.
+
+Also the call chain size (default 16, max. 1024) for instructions or
+transactions events can be specified. e.g.
+
+	--itrace=ig32
+	--itrace=xg32
+
+To disable trace decoding entirely, use the option --no-itrace.
+
+
+dump option
+-----------
+
+perf script has an option (-D) to "dump" the events i.e. display the binary
+data.
+
+When -D is used, Intel PT packets are displayed.  The packet decoder does not
+pay attention to PSB packets, but just decodes the bytes - so the packets seen
+by the actual decoder may not be identical in places where the data is corrupt.
+One example of that would be when the buffer-switching interrupt has been too
+slow, and the buffer has been filled completely.  In that case, the last packet
+in the buffer might be truncated and immediately followed by a PSB as the trace
+continues in the next buffer.
+
+To disable the display of Intel PT packets, combine the -D option with
+--no-itrace.
+
+
+perf report
+===========
+
+By default, perf report will decode trace data found in the perf.data file.
+This can be further controlled by new option --itrace exactly the same as
+perf script, with the exception that the default is --itrace=igxe.
+
+
+perf inject
+===========
+
+perf inject also accepts the --itrace option in which case tracing data is
+removed and replaced with the synthesized events. e.g.
+
+	perf inject --itrace -i perf.data -o perf.data.new
diff --git a/tools/perf/Documentation/itrace.txt b/tools/perf/Documentation/itrace.txt
new file mode 100644
index 0000000..2ff9466
--- /dev/null
+++ b/tools/perf/Documentation/itrace.txt
@@ -0,0 +1,22 @@
+		i	synthesize instructions events
+		b	synthesize branches events
+		c	synthesize branches events (calls only)
+		r	synthesize branches events (returns only)
+		x	synthesize transactions events
+		e	synthesize error events
+		d	create a debug log
+		g	synthesize a call chain (use with i or x)
+
+	The default is all events i.e. the same as --itrace=ibxe
+
+	In addition, the period (default 100000) for instructions events
+	can be specified in units of:
+
+		i	instructions
+		t	ticks
+		ms	milliseconds
+		us	microseconds
+		ns	nanoseconds (default)
+
+	Also the call chain size (default 16, max. 1024) for instructions or
+	transactions events can be specified.
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index bf3d064..ab632d9 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -216,6 +216,10 @@
 *requeue*::
 Suite for evaluating requeue calls.
 
+*lock-pi*::
+Suite for evaluating futex lock_pi calls.
+
+
 SEE ALSO
 --------
 linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt
index b876ae3..0c721c3 100644
--- a/tools/perf/Documentation/perf-inject.txt
+++ b/tools/perf/Documentation/perf-inject.txt
@@ -48,28 +48,7 @@
 	Decode Instruction Tracing data, replacing it with synthesized events.
 	Options are:
 
-		i	synthesize instructions events
-		b	synthesize branches events
-		c	synthesize branches events (calls only)
-		r	synthesize branches events (returns only)
-		x	synthesize transactions events
-		e	synthesize error events
-		d	create a debug log
-		g	synthesize a call chain (use with i or x)
-
-	The default is all events i.e. the same as --itrace=ibxe
-
-	In addition, the period (default 100000) for instructions events
-	can be specified in units of:
-
-		i	instructions
-		t	ticks
-		ms	milliseconds
-		us	microseconds
-		ns	nanoseconds (default)
-
-	Also the call chain size (default 16, max. 1024) for instructions or
-	transactions events can be specified.
+include::itrace.txt[]
 
 SEE ALSO
 --------
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 9b9d9d0..2e9ce77 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -45,6 +45,21 @@
           param1 and param2 are defined as formats for the PMU in:
           /sys/bus/event_sources/devices/<pmu>/format/*
 
+	  There are also some params which are not defined in .../<pmu>/format/*.
+	  These params can be used to overload default config values per event.
+	  Here is a list of the params.
+	  - 'period': Set event sampling period
+	  - 'freq': Set event sampling frequency
+	  - 'time': Disable/enable time stamping. Acceptable values are 1 for
+		    enabling time stamping. 0 for disabling time stamping.
+		    The default is 1.
+	  - 'call-graph': Disable/enable callgraph. Acceptable str are "fp" for
+			 FP mode, "dwarf" for DWARF mode, "lbr" for LBR mode and
+			 "no" for disable callgraph.
+	  - 'stack-size': user stack size for dwarf mode
+	  Note: If user explicitly sets options which conflict with the params,
+	  the value set by the params will be overridden.
+
         - a hardware breakpoint event in the form of '\mem:addr[/len][:access]'
           where addr is the address in memory you want to break in.
           Access is the memory access type (read, write, execute) it can
@@ -61,7 +76,16 @@
 	  "perf report" to view group events together.
 
 --filter=<filter>::
-        Event filter.
+        Event filter. This option should follow a event selector (-e) which
+	selects tracepoint event(s). Multiple '--filter' options are combined
+	using '&&'.
+
+--exclude-perf::
+	Don't record events issued by perf itself. This option should follow
+	a event selector (-e) which selects tracepoint event(s). It adds a
+	filter expression 'common_pid != $PERFPID' to filters. If other
+	'--filter' exists, the new filter expression will be combined with
+	them by '&&'.
 
 -a::
 --all-cpus::
@@ -252,7 +276,11 @@
 --intr-regs::
 Capture machine state (registers) at interrupt, i.e., on counter overflows for
 each sample. List of captured registers depends on the architecture. This option
-is off by default.
+is off by default. It is possible to select the registers to sample using their
+symbolic names, e.g. on x86, ax, si. To list the available registers use
+--intr-regs=\?. To name registers, pass a comma separated list such as
+--intr-regs=ax,bx. The list of register is architecture dependent.
+
 
 --running-time::
 Record running and enabled time for read events (:S)
@@ -276,6 +304,10 @@
 because the file may be huge. A time out is needed in such cases.
 This option sets the time out limit. The default value is 500 ms.
 
+--switch-events::
+Record context switch events i.e. events of type PERF_RECORD_SWITCH or
+PERF_RECORD_SWITCH_CPU_WIDE.
+
 SEE ALSO
 --------
 linkperf:perf-stat[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index c33b69f..9c7981b 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -81,6 +81,8 @@
 	- cpu: cpu number the task ran at the time of sample
 	- srcline: filename and line number executed at the time of sample.  The
 	DWARF debugging info must be provided.
+	- srcfile: file name of the source file of the same. Requires dwarf
+	information.
 	- weight: Event specific weight, e.g. memory latency or transaction
 	abort cost. This is the global weight.
 	- local_weight: Local weight version of the weight above.
@@ -109,6 +111,7 @@
 	- mispredict: "N" for predicted branch, "Y" for mispredicted branch
 	- in_tx: branch in TSX transaction
 	- abort: TSX transaction abort.
+	- cycles: Cycles in basic block
 
 	And default sort keys are changed to comm, dso_from, symbol_from, dso_to
 	and symbol_to, see '--branch-stack'.
@@ -328,31 +331,23 @@
 --itrace::
 	Options for decoding instruction tracing data. The options are:
 
-		i	synthesize instructions events
-		b	synthesize branches events
-		c	synthesize branches events (calls only)
-		r	synthesize branches events (returns only)
-		x	synthesize transactions events
-		e	synthesize error events
-		d	create a debug log
-		g	synthesize a call chain (use with i or x)
-
-	The default is all events i.e. the same as --itrace=ibxe
-
-	In addition, the period (default 100000) for instructions events
-	can be specified in units of:
-
-		i	instructions
-		t	ticks
-		ms	milliseconds
-		us	microseconds
-		ns	nanoseconds (default)
-
-	Also the call chain size (default 16, max. 1024) for instructions or
-	transactions events can be specified.
+include::itrace.txt[]
 
 	To disable decoding entirely, use --no-itrace.
 
+--full-source-path::
+	Show the full path for source files for srcline output.
+
+--show-ref-call-graph::
+	When multiple events are sampled, it may not be needed to collect
+	callgraphs for all of them. The sample sites are usually nearby,
+	and it's enough to collect the callgraphs on a reference event.
+	So user can use "call-graph=no" event modifier to disable callgraph
+	for other events to reduce the overhead.
+	However, perf report cannot show callgraphs for the event which
+	disable the callgraph.
+	This option extends the perf report to show reference callgraphs,
+	which collected by reference event, in no callgraph event.
 
 include::callchain-overhead-calculation.txt[]
 
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index c82df57..dc3ec78 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -116,7 +116,7 @@
 --fields::
         Comma separated list of fields to print. Options are:
         comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
-	srcline, period, flags.
+	srcline, period, iregs, flags.
         Field list can be prepended with the type, trace, sw or hw,
         to indicate to which event type the field list applies.
         e.g., -f sw:comm,tid,time,ip,sym  and -f trace:time,cpu,trace
@@ -222,6 +222,17 @@
 --show-mmap-events
 	Display mmap related events (e.g. MMAP, MMAP2).
 
+--show-switch-events
+	Display context switch events i.e. events of type PERF_RECORD_SWITCH or
+	PERF_RECORD_SWITCH_CPU_WIDE.
+
+--demangle::
+	Demangle symbol names to human readable form. It's enabled by default,
+	disable with --no-demangle.
+
+--demangle-kernel::
+	Demangle kernel symbol names to human readable form (for C++ kernels).
+
 --header
 	Show perf.data header.
 
@@ -231,31 +242,13 @@
 --itrace::
 	Options for decoding instruction tracing data. The options are:
 
-		i	synthesize instructions events
-		b	synthesize branches events
-		c	synthesize branches events (calls only)
-		r	synthesize branches events (returns only)
-		x	synthesize transactions events
-		e	synthesize error events
-		d	create a debug log
-		g	synthesize a call chain (use with i or x)
-
-	The default is all events i.e. the same as --itrace=ibxe
-
-	In addition, the period (default 100000) for instructions events
-	can be specified in units of:
-
-		i	instructions
-		t	ticks
-		ms	milliseconds
-		us	microseconds
-		ns	nanoseconds (default)
-
-	Also the call chain size (default 16, max. 1024) for instructions or
-	transactions events can be specified.
+include::itrace.txt[]
 
 	To disable decoding entirely, use --no-itrace.
 
+--full-source-path::
+	Show the full path for source files for srcline output.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-script-perl[1],
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 776aec4..f6a23eb 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -208,6 +208,27 @@
 	This option sets the time out limit. The default value is 500 ms.
 
 
+-b::
+--branch-any::
+	Enable taken branch stack sampling. Any type of taken branch may be sampled.
+	This is a shortcut for --branch-filter any. See --branch-filter for more infos.
+
+-j::
+--branch-filter::
+	Enable taken branch stack sampling. Each sample captures a series of consecutive
+	taken branches. The number of branches captured with each sample depends on the
+	underlying hardware, the type of branches of interest, and the executed code.
+	It is possible to select the types of branches captured by enabling filters.
+	For a full list of modifiers please see the perf record manpage.
+
+	The option requires at least one branch type among any, any_call, any_ret, ind_call, cond.
+	The privilege levels may be omitted, in which case, the privilege levels of the associated
+	event are applied to the branch filter. Both kernel (k) and hypervisor (hv) privilege
+	levels are subject to permissions.  When sampling on multiple events, branch stack sampling
+	is enabled for all the sampling events. The sampled branch type is the same for all events.
+	The various filters must be specified as a comma separated list: --branch-filter any_ret,u,k
+	Note that this feature may not be available on all processors.
+
 INTERACTIVE PROMPTING KEYS
 --------------------------
 
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index d01a0aa..af009bd 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -18,6 +18,7 @@
 tools/arch/x86/include/asm/rmwcc.h
 tools/lib/traceevent
 tools/lib/api
+tools/lib/bpf
 tools/lib/hweight.c
 tools/lib/rbtree.c
 tools/lib/symbol/kallsyms.c
@@ -40,7 +41,6 @@
 tools/include/linux/atomic.h
 tools/include/linux/bitops.h
 tools/include/linux/compiler.h
-tools/include/linux/export.h
 tools/include/linux/hash.h
 tools/include/linux/kernel.h
 tools/include/linux/list.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index bba3463..d9863cb 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -76,6 +76,12 @@
 #
 # Define NO_AUXTRACE if you do not want AUX area tracing support
 
+# As per kernel Makefile, avoid funny character set dependencies
+unexport LC_ALL
+LC_COLLATE=C
+LC_NUMERIC=C
+export LC_COLLATE LC_NUMERIC
+
 ifeq ($(srctree),)
 srctree := $(patsubst %/,%,$(dir $(shell pwd)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
@@ -135,6 +141,7 @@
 FLEX    = flex
 BISON   = bison
 STRIP   = strip
+AWK     = awk
 
 LIB_DIR          = $(srctree)/tools/lib/api/
 TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
@@ -289,7 +296,7 @@
 
 PERF_IN := $(OUTPUT)perf-in.o
 
-export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX
+export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX AWK
 build := -f $(srctree)/tools/build/Makefile.build dir=. obj
 
 $(PERF_IN): $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h FORCE
@@ -507,6 +514,11 @@
 		$(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
 	$(call QUIET_INSTALL, perf-with-kcore) \
 		$(INSTALL) $(OUTPUT)perf-with-kcore -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+ifndef NO_LIBAUDIT
+	$(call QUIET_INSTALL, strace/groups) \
+		$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'; \
+		$(INSTALL) trace/strace/groups/* -t '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'
+endif
 ifndef NO_LIBPERL
 	$(call QUIET_INSTALL, perl-scripts) \
 		$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
@@ -560,7 +572,8 @@
 	$(Q)find . -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
 	$(Q)$(RM) $(OUTPUT).config-detected
 	$(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32
-	$(call QUIET_CLEAN, core-gen)   $(RM)  *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex*
+	$(call QUIET_CLEAN, core-gen)   $(RM)  *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
+		$(OUTPUT)util/intel-pt-decoder/inat-tables.c
 	$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
 	$(python-clean)
 
diff --git a/tools/perf/arch/alpha/Build b/tools/perf/arch/alpha/Build
new file mode 100644
index 0000000..1bb8bf6
--- /dev/null
+++ b/tools/perf/arch/alpha/Build
@@ -0,0 +1 @@
+# empty
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index b7bb42c..b00dfd92 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -128,7 +128,7 @@
 	return arch;
 }
 
-static int perf_session_env__lookup_binutils_path(struct perf_session_env *env,
+static int perf_session_env__lookup_binutils_path(struct perf_env *env,
 						  const char *name,
 						  const char **path)
 {
@@ -206,7 +206,7 @@
 	return -1;
 }
 
-int perf_session_env__lookup_objdump(struct perf_session_env *env)
+int perf_session_env__lookup_objdump(struct perf_env *env)
 {
 	/*
 	 * For live mode, env->arch will be NULL and we can use
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
index ede246e..20176df 100644
--- a/tools/perf/arch/common.h
+++ b/tools/perf/arch/common.h
@@ -5,6 +5,6 @@
 
 extern const char *objdump_path;
 
-int perf_session_env__lookup_objdump(struct perf_session_env *env);
+int perf_session_env__lookup_objdump(struct perf_env *env);
 
 #endif /* ARCH_PERF_COMMON_H */
diff --git a/tools/perf/arch/mips/Build b/tools/perf/arch/mips/Build
new file mode 100644
index 0000000..1bb8bf6
--- /dev/null
+++ b/tools/perf/arch/mips/Build
@@ -0,0 +1 @@
+# empty
diff --git a/tools/perf/arch/parisc/Build b/tools/perf/arch/parisc/Build
new file mode 100644
index 0000000..1bb8bf6
--- /dev/null
+++ b/tools/perf/arch/parisc/Build
@@ -0,0 +1 @@
+# empty
diff --git a/tools/perf/arch/sh/util/dwarf-regs.c b/tools/perf/arch/sh/util/dwarf-regs.c
index 0d0897f..f8dfa89 100644
--- a/tools/perf/arch/sh/util/dwarf-regs.c
+++ b/tools/perf/arch/sh/util/dwarf-regs.c
@@ -51,5 +51,5 @@
 /* Return architecture dependent register string (for kprobe-tracer) */
 const char *get_arch_regstr(unsigned int n)
 {
-	return (n <= SH_MAX_REGS) ? sh_regs_table[n] : NULL;
+	return (n < SH_MAX_REGS) ? sh_regs_table[n] : NULL;
 }
diff --git a/tools/perf/arch/sparc/util/dwarf-regs.c b/tools/perf/arch/sparc/util/dwarf-regs.c
index 92eda41..b704fdb 100644
--- a/tools/perf/arch/sparc/util/dwarf-regs.c
+++ b/tools/perf/arch/sparc/util/dwarf-regs.c
@@ -39,5 +39,5 @@
  */
 const char *get_arch_regstr(unsigned int n)
 {
-	return (n <= SPARC_MAX_REGS) ? sparc_regs_table[n] : NULL;
+	return (n < SPARC_MAX_REGS) ? sparc_regs_table[n] : NULL;
 }
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index cfbccc4..ff63649 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -1,8 +1,14 @@
 libperf-y += header.o
 libperf-y += tsc.o
+libperf-y += pmu.o
 libperf-y += kvm-stat.o
+libperf-y += perf_regs.o
 
 libperf-$(CONFIG_DWARF) += dwarf-regs.o
 
 libperf-$(CONFIG_LIBUNWIND)          += unwind-libunwind.o
 libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+
+libperf-$(CONFIG_AUXTRACE) += auxtrace.o
+libperf-$(CONFIG_AUXTRACE) += intel-pt.o
+libperf-$(CONFIG_AUXTRACE) += intel-bts.o
diff --git a/tools/perf/arch/x86/util/auxtrace.c b/tools/perf/arch/x86/util/auxtrace.c
new file mode 100644
index 0000000..7a78055
--- /dev/null
+++ b/tools/perf/arch/x86/util/auxtrace.c
@@ -0,0 +1,83 @@
+/*
+ * auxtrace.c: AUX area tracing support
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <stdbool.h>
+
+#include "../../util/header.h"
+#include "../../util/debug.h"
+#include "../../util/pmu.h"
+#include "../../util/auxtrace.h"
+#include "../../util/intel-pt.h"
+#include "../../util/intel-bts.h"
+#include "../../util/evlist.h"
+
+static
+struct auxtrace_record *auxtrace_record__init_intel(struct perf_evlist *evlist,
+						    int *err)
+{
+	struct perf_pmu *intel_pt_pmu;
+	struct perf_pmu *intel_bts_pmu;
+	struct perf_evsel *evsel;
+	bool found_pt = false;
+	bool found_bts = false;
+
+	intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
+	intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
+
+	if (evlist) {
+		evlist__for_each(evlist, evsel) {
+			if (intel_pt_pmu &&
+			    evsel->attr.type == intel_pt_pmu->type)
+				found_pt = true;
+			if (intel_bts_pmu &&
+			    evsel->attr.type == intel_bts_pmu->type)
+				found_bts = true;
+		}
+	}
+
+	if (found_pt && found_bts) {
+		pr_err("intel_pt and intel_bts may not be used together\n");
+		*err = -EINVAL;
+		return NULL;
+	}
+
+	if (found_pt)
+		return intel_pt_recording_init(err);
+
+	if (found_bts)
+		return intel_bts_recording_init(err);
+
+	return NULL;
+}
+
+struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
+					      int *err)
+{
+	char buffer[64];
+	int ret;
+
+	*err = 0;
+
+	ret = get_cpuid(buffer, sizeof(buffer));
+	if (ret) {
+		*err = ret;
+		return NULL;
+	}
+
+	if (!strncmp(buffer, "GenuineIntel,", 13))
+		return auxtrace_record__init_intel(evlist, err);
+
+	return NULL;
+}
diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c
index be22dd4..a08de0a 100644
--- a/tools/perf/arch/x86/util/dwarf-regs.c
+++ b/tools/perf/arch/x86/util/dwarf-regs.c
@@ -71,5 +71,5 @@
 /* Return architecture dependent register string (for kprobe-tracer) */
 const char *get_arch_regstr(unsigned int n)
 {
-	return (n <= ARCH_MAX_REGS) ? arch_regs_table[n] : NULL;
+	return (n < ARCH_MAX_REGS) ? arch_regs_table[n] : NULL;
 }
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
new file mode 100644
index 0000000..9b94ce5
--- /dev/null
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -0,0 +1,458 @@
+/*
+ * intel-bts.c: Intel Processor Trace support
+ * Copyright (c) 2013-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+
+#include "../../util/cpumap.h"
+#include "../../util/evsel.h"
+#include "../../util/evlist.h"
+#include "../../util/session.h"
+#include "../../util/util.h"
+#include "../../util/pmu.h"
+#include "../../util/debug.h"
+#include "../../util/tsc.h"
+#include "../../util/auxtrace.h"
+#include "../../util/intel-bts.h"
+
+#define KiB(x) ((x) * 1024)
+#define MiB(x) ((x) * 1024 * 1024)
+#define KiB_MASK(x) (KiB(x) - 1)
+#define MiB_MASK(x) (MiB(x) - 1)
+
+#define INTEL_BTS_DFLT_SAMPLE_SIZE	KiB(4)
+
+#define INTEL_BTS_MAX_SAMPLE_SIZE	KiB(60)
+
+struct intel_bts_snapshot_ref {
+	void	*ref_buf;
+	size_t	ref_offset;
+	bool	wrapped;
+};
+
+struct intel_bts_recording {
+	struct auxtrace_record		itr;
+	struct perf_pmu			*intel_bts_pmu;
+	struct perf_evlist		*evlist;
+	bool				snapshot_mode;
+	size_t				snapshot_size;
+	int				snapshot_ref_cnt;
+	struct intel_bts_snapshot_ref	*snapshot_refs;
+};
+
+struct branch {
+	u64 from;
+	u64 to;
+	u64 misc;
+};
+
+static size_t intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused)
+{
+	return INTEL_BTS_AUXTRACE_PRIV_SIZE;
+}
+
+static int intel_bts_info_fill(struct auxtrace_record *itr,
+			       struct perf_session *session,
+			       struct auxtrace_info_event *auxtrace_info,
+			       size_t priv_size)
+{
+	struct intel_bts_recording *btsr =
+			container_of(itr, struct intel_bts_recording, itr);
+	struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
+	struct perf_event_mmap_page *pc;
+	struct perf_tsc_conversion tc = { .time_mult = 0, };
+	bool cap_user_time_zero = false;
+	int err;
+
+	if (priv_size != INTEL_BTS_AUXTRACE_PRIV_SIZE)
+		return -EINVAL;
+
+	if (!session->evlist->nr_mmaps)
+		return -EINVAL;
+
+	pc = session->evlist->mmap[0].base;
+	if (pc) {
+		err = perf_read_tsc_conversion(pc, &tc);
+		if (err) {
+			if (err != -EOPNOTSUPP)
+				return err;
+		} else {
+			cap_user_time_zero = tc.time_mult != 0;
+		}
+		if (!cap_user_time_zero)
+			ui__warning("Intel BTS: TSC not available\n");
+	}
+
+	auxtrace_info->type = PERF_AUXTRACE_INTEL_BTS;
+	auxtrace_info->priv[INTEL_BTS_PMU_TYPE] = intel_bts_pmu->type;
+	auxtrace_info->priv[INTEL_BTS_TIME_SHIFT] = tc.time_shift;
+	auxtrace_info->priv[INTEL_BTS_TIME_MULT] = tc.time_mult;
+	auxtrace_info->priv[INTEL_BTS_TIME_ZERO] = tc.time_zero;
+	auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO] = cap_user_time_zero;
+	auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE] = btsr->snapshot_mode;
+
+	return 0;
+}
+
+static int intel_bts_recording_options(struct auxtrace_record *itr,
+				       struct perf_evlist *evlist,
+				       struct record_opts *opts)
+{
+	struct intel_bts_recording *btsr =
+			container_of(itr, struct intel_bts_recording, itr);
+	struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
+	struct perf_evsel *evsel, *intel_bts_evsel = NULL;
+	const struct cpu_map *cpus = evlist->cpus;
+	bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
+
+	btsr->evlist = evlist;
+	btsr->snapshot_mode = opts->auxtrace_snapshot_mode;
+
+	evlist__for_each(evlist, evsel) {
+		if (evsel->attr.type == intel_bts_pmu->type) {
+			if (intel_bts_evsel) {
+				pr_err("There may be only one " INTEL_BTS_PMU_NAME " event\n");
+				return -EINVAL;
+			}
+			evsel->attr.freq = 0;
+			evsel->attr.sample_period = 1;
+			intel_bts_evsel = evsel;
+			opts->full_auxtrace = true;
+		}
+	}
+
+	if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
+		pr_err("Snapshot mode (-S option) requires " INTEL_BTS_PMU_NAME " PMU event (-e " INTEL_BTS_PMU_NAME ")\n");
+		return -EINVAL;
+	}
+
+	if (!opts->full_auxtrace)
+		return 0;
+
+	if (opts->full_auxtrace && !cpu_map__empty(cpus)) {
+		pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n");
+		return -EINVAL;
+	}
+
+	/* Set default sizes for snapshot mode */
+	if (opts->auxtrace_snapshot_mode) {
+		if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
+			if (privileged) {
+				opts->auxtrace_mmap_pages = MiB(4) / page_size;
+			} else {
+				opts->auxtrace_mmap_pages = KiB(128) / page_size;
+				if (opts->mmap_pages == UINT_MAX)
+					opts->mmap_pages = KiB(256) / page_size;
+			}
+		} else if (!opts->auxtrace_mmap_pages && !privileged &&
+			   opts->mmap_pages == UINT_MAX) {
+			opts->mmap_pages = KiB(256) / page_size;
+		}
+		if (!opts->auxtrace_snapshot_size)
+			opts->auxtrace_snapshot_size =
+				opts->auxtrace_mmap_pages * (size_t)page_size;
+		if (!opts->auxtrace_mmap_pages) {
+			size_t sz = opts->auxtrace_snapshot_size;
+
+			sz = round_up(sz, page_size) / page_size;
+			opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
+		}
+		if (opts->auxtrace_snapshot_size >
+				opts->auxtrace_mmap_pages * (size_t)page_size) {
+			pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
+			       opts->auxtrace_snapshot_size,
+			       opts->auxtrace_mmap_pages * (size_t)page_size);
+			return -EINVAL;
+		}
+		if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
+			pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
+			return -EINVAL;
+		}
+		pr_debug2("Intel BTS snapshot size: %zu\n",
+			  opts->auxtrace_snapshot_size);
+	}
+
+	/* Set default sizes for full trace mode */
+	if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
+		if (privileged) {
+			opts->auxtrace_mmap_pages = MiB(4) / page_size;
+		} else {
+			opts->auxtrace_mmap_pages = KiB(128) / page_size;
+			if (opts->mmap_pages == UINT_MAX)
+				opts->mmap_pages = KiB(256) / page_size;
+		}
+	}
+
+	/* Validate auxtrace_mmap_pages */
+	if (opts->auxtrace_mmap_pages) {
+		size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
+		size_t min_sz;
+
+		if (opts->auxtrace_snapshot_mode)
+			min_sz = KiB(4);
+		else
+			min_sz = KiB(8);
+
+		if (sz < min_sz || !is_power_of_2(sz)) {
+			pr_err("Invalid mmap size for Intel BTS: must be at least %zuKiB and a power of 2\n",
+			       min_sz / 1024);
+			return -EINVAL;
+		}
+	}
+
+	if (intel_bts_evsel) {
+		/*
+		 * To obtain the auxtrace buffer file descriptor, the auxtrace event
+		 * must come first.
+		 */
+		perf_evlist__to_front(evlist, intel_bts_evsel);
+		/*
+		 * In the case of per-cpu mmaps, we need the CPU on the
+		 * AUX event.
+		 */
+		if (!cpu_map__empty(cpus))
+			perf_evsel__set_sample_bit(intel_bts_evsel, CPU);
+	}
+
+	/* Add dummy event to keep tracking */
+	if (opts->full_auxtrace) {
+		struct perf_evsel *tracking_evsel;
+		int err;
+
+		err = parse_events(evlist, "dummy:u", NULL);
+		if (err)
+			return err;
+
+		tracking_evsel = perf_evlist__last(evlist);
+
+		perf_evlist__set_tracking_event(evlist, tracking_evsel);
+
+		tracking_evsel->attr.freq = 0;
+		tracking_evsel->attr.sample_period = 1;
+	}
+
+	return 0;
+}
+
+static int intel_bts_parse_snapshot_options(struct auxtrace_record *itr,
+					    struct record_opts *opts,
+					    const char *str)
+{
+	struct intel_bts_recording *btsr =
+			container_of(itr, struct intel_bts_recording, itr);
+	unsigned long long snapshot_size = 0;
+	char *endptr;
+
+	if (str) {
+		snapshot_size = strtoull(str, &endptr, 0);
+		if (*endptr || snapshot_size > SIZE_MAX)
+			return -1;
+	}
+
+	opts->auxtrace_snapshot_mode = true;
+	opts->auxtrace_snapshot_size = snapshot_size;
+
+	btsr->snapshot_size = snapshot_size;
+
+	return 0;
+}
+
+static u64 intel_bts_reference(struct auxtrace_record *itr __maybe_unused)
+{
+	return rdtsc();
+}
+
+static int intel_bts_alloc_snapshot_refs(struct intel_bts_recording *btsr,
+					 int idx)
+{
+	const size_t sz = sizeof(struct intel_bts_snapshot_ref);
+	int cnt = btsr->snapshot_ref_cnt, new_cnt = cnt * 2;
+	struct intel_bts_snapshot_ref *refs;
+
+	if (!new_cnt)
+		new_cnt = 16;
+
+	while (new_cnt <= idx)
+		new_cnt *= 2;
+
+	refs = calloc(new_cnt, sz);
+	if (!refs)
+		return -ENOMEM;
+
+	memcpy(refs, btsr->snapshot_refs, cnt * sz);
+
+	btsr->snapshot_refs = refs;
+	btsr->snapshot_ref_cnt = new_cnt;
+
+	return 0;
+}
+
+static void intel_bts_free_snapshot_refs(struct intel_bts_recording *btsr)
+{
+	int i;
+
+	for (i = 0; i < btsr->snapshot_ref_cnt; i++)
+		zfree(&btsr->snapshot_refs[i].ref_buf);
+	zfree(&btsr->snapshot_refs);
+}
+
+static void intel_bts_recording_free(struct auxtrace_record *itr)
+{
+	struct intel_bts_recording *btsr =
+			container_of(itr, struct intel_bts_recording, itr);
+
+	intel_bts_free_snapshot_refs(btsr);
+	free(btsr);
+}
+
+static int intel_bts_snapshot_start(struct auxtrace_record *itr)
+{
+	struct intel_bts_recording *btsr =
+			container_of(itr, struct intel_bts_recording, itr);
+	struct perf_evsel *evsel;
+
+	evlist__for_each(btsr->evlist, evsel) {
+		if (evsel->attr.type == btsr->intel_bts_pmu->type)
+			return perf_evlist__disable_event(btsr->evlist, evsel);
+	}
+	return -EINVAL;
+}
+
+static int intel_bts_snapshot_finish(struct auxtrace_record *itr)
+{
+	struct intel_bts_recording *btsr =
+			container_of(itr, struct intel_bts_recording, itr);
+	struct perf_evsel *evsel;
+
+	evlist__for_each(btsr->evlist, evsel) {
+		if (evsel->attr.type == btsr->intel_bts_pmu->type)
+			return perf_evlist__enable_event(btsr->evlist, evsel);
+	}
+	return -EINVAL;
+}
+
+static bool intel_bts_first_wrap(u64 *data, size_t buf_size)
+{
+	int i, a, b;
+
+	b = buf_size >> 3;
+	a = b - 512;
+	if (a < 0)
+		a = 0;
+
+	for (i = a; i < b; i++) {
+		if (data[i])
+			return true;
+	}
+
+	return false;
+}
+
+static int intel_bts_find_snapshot(struct auxtrace_record *itr, int idx,
+				   struct auxtrace_mmap *mm, unsigned char *data,
+				   u64 *head, u64 *old)
+{
+	struct intel_bts_recording *btsr =
+			container_of(itr, struct intel_bts_recording, itr);
+	bool wrapped;
+	int err;
+
+	pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
+		  __func__, idx, (size_t)*old, (size_t)*head);
+
+	if (idx >= btsr->snapshot_ref_cnt) {
+		err = intel_bts_alloc_snapshot_refs(btsr, idx);
+		if (err)
+			goto out_err;
+	}
+
+	wrapped = btsr->snapshot_refs[idx].wrapped;
+	if (!wrapped && intel_bts_first_wrap((u64 *)data, mm->len)) {
+		btsr->snapshot_refs[idx].wrapped = true;
+		wrapped = true;
+	}
+
+	/*
+	 * In full trace mode 'head' continually increases.  However in snapshot
+	 * mode 'head' is an offset within the buffer.  Here 'old' and 'head'
+	 * are adjusted to match the full trace case which expects that 'old' is
+	 * always less than 'head'.
+	 */
+	if (wrapped) {
+		*old = *head;
+		*head += mm->len;
+	} else {
+		if (mm->mask)
+			*old &= mm->mask;
+		else
+			*old %= mm->len;
+		if (*old > *head)
+			*head += mm->len;
+	}
+
+	pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
+		  __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
+
+	return 0;
+
+out_err:
+	pr_err("%s: failed, error %d\n", __func__, err);
+	return err;
+}
+
+static int intel_bts_read_finish(struct auxtrace_record *itr, int idx)
+{
+	struct intel_bts_recording *btsr =
+			container_of(itr, struct intel_bts_recording, itr);
+	struct perf_evsel *evsel;
+
+	evlist__for_each(btsr->evlist, evsel) {
+		if (evsel->attr.type == btsr->intel_bts_pmu->type)
+			return perf_evlist__enable_event_idx(btsr->evlist,
+							     evsel, idx);
+	}
+	return -EINVAL;
+}
+
+struct auxtrace_record *intel_bts_recording_init(int *err)
+{
+	struct perf_pmu *intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
+	struct intel_bts_recording *btsr;
+
+	if (!intel_bts_pmu)
+		return NULL;
+
+	btsr = zalloc(sizeof(struct intel_bts_recording));
+	if (!btsr) {
+		*err = -ENOMEM;
+		return NULL;
+	}
+
+	btsr->intel_bts_pmu = intel_bts_pmu;
+	btsr->itr.recording_options = intel_bts_recording_options;
+	btsr->itr.info_priv_size = intel_bts_info_priv_size;
+	btsr->itr.info_fill = intel_bts_info_fill;
+	btsr->itr.free = intel_bts_recording_free;
+	btsr->itr.snapshot_start = intel_bts_snapshot_start;
+	btsr->itr.snapshot_finish = intel_bts_snapshot_finish;
+	btsr->itr.find_snapshot = intel_bts_find_snapshot;
+	btsr->itr.parse_snapshot_options = intel_bts_parse_snapshot_options;
+	btsr->itr.reference = intel_bts_reference;
+	btsr->itr.read_finish = intel_bts_read_finish;
+	btsr->itr.alignment = sizeof(struct branch);
+	return &btsr->itr;
+}
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
new file mode 100644
index 0000000..2ca10d7
--- /dev/null
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -0,0 +1,1007 @@
+/*
+ * intel_pt.c: Intel Processor Trace support
+ * Copyright (c) 2013-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <stdbool.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <cpuid.h>
+
+#include "../../perf.h"
+#include "../../util/session.h"
+#include "../../util/event.h"
+#include "../../util/evlist.h"
+#include "../../util/evsel.h"
+#include "../../util/cpumap.h"
+#include "../../util/parse-options.h"
+#include "../../util/parse-events.h"
+#include "../../util/pmu.h"
+#include "../../util/debug.h"
+#include "../../util/auxtrace.h"
+#include "../../util/tsc.h"
+#include "../../util/intel-pt.h"
+
+#define KiB(x) ((x) * 1024)
+#define MiB(x) ((x) * 1024 * 1024)
+#define KiB_MASK(x) (KiB(x) - 1)
+#define MiB_MASK(x) (MiB(x) - 1)
+
+#define INTEL_PT_DEFAULT_SAMPLE_SIZE	KiB(4)
+
+#define INTEL_PT_MAX_SAMPLE_SIZE	KiB(60)
+
+#define INTEL_PT_PSB_PERIOD_NEAR	256
+
+struct intel_pt_snapshot_ref {
+	void *ref_buf;
+	size_t ref_offset;
+	bool wrapped;
+};
+
+struct intel_pt_recording {
+	struct auxtrace_record		itr;
+	struct perf_pmu			*intel_pt_pmu;
+	int				have_sched_switch;
+	struct perf_evlist		*evlist;
+	bool				snapshot_mode;
+	bool				snapshot_init_done;
+	size_t				snapshot_size;
+	size_t				snapshot_ref_buf_size;
+	int				snapshot_ref_cnt;
+	struct intel_pt_snapshot_ref	*snapshot_refs;
+};
+
+static int intel_pt_parse_terms_with_default(struct list_head *formats,
+					     const char *str,
+					     u64 *config)
+{
+	struct list_head *terms;
+	struct perf_event_attr attr = { .size = 0, };
+	int err;
+
+	terms = malloc(sizeof(struct list_head));
+	if (!terms)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(terms);
+
+	err = parse_events_terms(terms, str);
+	if (err)
+		goto out_free;
+
+	attr.config = *config;
+	err = perf_pmu__config_terms(formats, &attr, terms, true, NULL);
+	if (err)
+		goto out_free;
+
+	*config = attr.config;
+out_free:
+	parse_events__free_terms(terms);
+	return err;
+}
+
+static int intel_pt_parse_terms(struct list_head *formats, const char *str,
+				u64 *config)
+{
+	*config = 0;
+	return intel_pt_parse_terms_with_default(formats, str, config);
+}
+
+static u64 intel_pt_masked_bits(u64 mask, u64 bits)
+{
+	const u64 top_bit = 1ULL << 63;
+	u64 res = 0;
+	int i;
+
+	for (i = 0; i < 64; i++) {
+		if (mask & top_bit) {
+			res <<= 1;
+			if (bits & top_bit)
+				res |= 1;
+		}
+		mask <<= 1;
+		bits <<= 1;
+	}
+
+	return res;
+}
+
+static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
+				struct perf_evlist *evlist, u64 *res)
+{
+	struct perf_evsel *evsel;
+	u64 mask;
+
+	*res = 0;
+
+	mask = perf_pmu__format_bits(&intel_pt_pmu->format, str);
+	if (!mask)
+		return -EINVAL;
+
+	evlist__for_each(evlist, evsel) {
+		if (evsel->attr.type == intel_pt_pmu->type) {
+			*res = intel_pt_masked_bits(mask, evsel->attr.config);
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu,
+				  struct perf_evlist *evlist)
+{
+	u64 val;
+	int err, topa_multiple_entries;
+	size_t psb_period;
+
+	if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries",
+				"%d", &topa_multiple_entries) != 1)
+		topa_multiple_entries = 0;
+
+	/*
+	 * Use caps/topa_multiple_entries to indicate early hardware that had
+	 * extra frequent PSBs.
+	 */
+	if (!topa_multiple_entries) {
+		psb_period = 256;
+		goto out;
+	}
+
+	err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val);
+	if (err)
+		val = 0;
+
+	psb_period = 1 << (val + 11);
+out:
+	pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period);
+	return psb_period;
+}
+
+static int intel_pt_pick_bit(int bits, int target)
+{
+	int pos, pick = -1;
+
+	for (pos = 0; bits; bits >>= 1, pos++) {
+		if (bits & 1) {
+			if (pos <= target || pick < 0)
+				pick = pos;
+			if (pos >= target)
+				break;
+		}
+	}
+
+	return pick;
+}
+
+static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
+{
+	char buf[256];
+	int mtc, mtc_periods = 0, mtc_period;
+	int psb_cyc, psb_periods, psb_period;
+	int pos = 0;
+	u64 config;
+
+	pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
+
+	if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc", "%d",
+				&mtc) != 1)
+		mtc = 1;
+
+	if (mtc) {
+		if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc_periods", "%x",
+					&mtc_periods) != 1)
+			mtc_periods = 0;
+		if (mtc_periods) {
+			mtc_period = intel_pt_pick_bit(mtc_periods, 3);
+			pos += scnprintf(buf + pos, sizeof(buf) - pos,
+					 ",mtc,mtc_period=%d", mtc_period);
+		}
+	}
+
+	if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_cyc", "%d",
+				&psb_cyc) != 1)
+		psb_cyc = 1;
+
+	if (psb_cyc && mtc_periods) {
+		if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_periods", "%x",
+					&psb_periods) != 1)
+			psb_periods = 0;
+		if (psb_periods) {
+			psb_period = intel_pt_pick_bit(psb_periods, 3);
+			pos += scnprintf(buf + pos, sizeof(buf) - pos,
+					 ",psb_period=%d", psb_period);
+		}
+	}
+
+	pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
+
+	intel_pt_parse_terms(&intel_pt_pmu->format, buf, &config);
+
+	return config;
+}
+
+static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr,
+					   struct record_opts *opts,
+					   const char *str)
+{
+	struct intel_pt_recording *ptr =
+			container_of(itr, struct intel_pt_recording, itr);
+	unsigned long long snapshot_size = 0;
+	char *endptr;
+
+	if (str) {
+		snapshot_size = strtoull(str, &endptr, 0);
+		if (*endptr || snapshot_size > SIZE_MAX)
+			return -1;
+	}
+
+	opts->auxtrace_snapshot_mode = true;
+	opts->auxtrace_snapshot_size = snapshot_size;
+
+	ptr->snapshot_size = snapshot_size;
+
+	return 0;
+}
+
+struct perf_event_attr *
+intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
+{
+	struct perf_event_attr *attr;
+
+	attr = zalloc(sizeof(struct perf_event_attr));
+	if (!attr)
+		return NULL;
+
+	attr->config = intel_pt_default_config(intel_pt_pmu);
+
+	intel_pt_pmu->selectable = true;
+
+	return attr;
+}
+
+static size_t intel_pt_info_priv_size(struct auxtrace_record *itr __maybe_unused)
+{
+	return INTEL_PT_AUXTRACE_PRIV_SIZE;
+}
+
+static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d)
+{
+	unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
+
+	__get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
+	*n = ebx;
+	*d = eax;
+}
+
+static int intel_pt_info_fill(struct auxtrace_record *itr,
+			      struct perf_session *session,
+			      struct auxtrace_info_event *auxtrace_info,
+			      size_t priv_size)
+{
+	struct intel_pt_recording *ptr =
+			container_of(itr, struct intel_pt_recording, itr);
+	struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
+	struct perf_event_mmap_page *pc;
+	struct perf_tsc_conversion tc = { .time_mult = 0, };
+	bool cap_user_time_zero = false, per_cpu_mmaps;
+	u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit;
+	u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d;
+	int err;
+
+	if (priv_size != INTEL_PT_AUXTRACE_PRIV_SIZE)
+		return -EINVAL;
+
+	intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
+	intel_pt_parse_terms(&intel_pt_pmu->format, "noretcomp",
+			     &noretcomp_bit);
+	intel_pt_parse_terms(&intel_pt_pmu->format, "mtc", &mtc_bit);
+	mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format,
+					      "mtc_period");
+	intel_pt_parse_terms(&intel_pt_pmu->format, "cyc", &cyc_bit);
+
+	intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
+
+	if (!session->evlist->nr_mmaps)
+		return -EINVAL;
+
+	pc = session->evlist->mmap[0].base;
+	if (pc) {
+		err = perf_read_tsc_conversion(pc, &tc);
+		if (err) {
+			if (err != -EOPNOTSUPP)
+				return err;
+		} else {
+			cap_user_time_zero = tc.time_mult != 0;
+		}
+		if (!cap_user_time_zero)
+			ui__warning("Intel Processor Trace: TSC not available\n");
+	}
+
+	per_cpu_mmaps = !cpu_map__empty(session->evlist->cpus);
+
+	auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
+	auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
+	auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift;
+	auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult;
+	auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero;
+	auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero;
+	auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit;
+	auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit;
+	auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch;
+	auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode;
+	auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps;
+	auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit;
+	auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits;
+	auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n;
+	auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d;
+	auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit;
+
+	return 0;
+}
+
+static int intel_pt_track_switches(struct perf_evlist *evlist)
+{
+	const char *sched_switch = "sched:sched_switch";
+	struct perf_evsel *evsel;
+	int err;
+
+	if (!perf_evlist__can_select_event(evlist, sched_switch))
+		return -EPERM;
+
+	err = parse_events(evlist, sched_switch, NULL);
+	if (err) {
+		pr_debug2("%s: failed to parse %s, error %d\n",
+			  __func__, sched_switch, err);
+		return err;
+	}
+
+	evsel = perf_evlist__last(evlist);
+
+	perf_evsel__set_sample_bit(evsel, CPU);
+	perf_evsel__set_sample_bit(evsel, TIME);
+
+	evsel->system_wide = true;
+	evsel->no_aux_samples = true;
+	evsel->immediate = true;
+
+	return 0;
+}
+
+static void intel_pt_valid_str(char *str, size_t len, u64 valid)
+{
+	unsigned int val, last = 0, state = 1;
+	int p = 0;
+
+	str[0] = '\0';
+
+	for (val = 0; val <= 64; val++, valid >>= 1) {
+		if (valid & 1) {
+			last = val;
+			switch (state) {
+			case 0:
+				p += scnprintf(str + p, len - p, ",");
+				/* Fall through */
+			case 1:
+				p += scnprintf(str + p, len - p, "%u", val);
+				state = 2;
+				break;
+			case 2:
+				state = 3;
+				break;
+			case 3:
+				state = 4;
+				break;
+			default:
+				break;
+			}
+		} else {
+			switch (state) {
+			case 3:
+				p += scnprintf(str + p, len - p, ",%u", last);
+				state = 0;
+				break;
+			case 4:
+				p += scnprintf(str + p, len - p, "-%u", last);
+				state = 0;
+				break;
+			default:
+				break;
+			}
+			if (state != 1)
+				state = 0;
+		}
+	}
+}
+
+static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu,
+				    const char *caps, const char *name,
+				    const char *supported, u64 config)
+{
+	char valid_str[256];
+	unsigned int shift;
+	unsigned long long valid;
+	u64 bits;
+	int ok;
+
+	if (perf_pmu__scan_file(intel_pt_pmu, caps, "%llx", &valid) != 1)
+		valid = 0;
+
+	if (supported &&
+	    perf_pmu__scan_file(intel_pt_pmu, supported, "%d", &ok) == 1 && !ok)
+		valid = 0;
+
+	valid |= 1;
+
+	bits = perf_pmu__format_bits(&intel_pt_pmu->format, name);
+
+	config &= bits;
+
+	for (shift = 0; bits && !(bits & 1); shift++)
+		bits >>= 1;
+
+	config >>= shift;
+
+	if (config > 63)
+		goto out_err;
+
+	if (valid & (1 << config))
+		return 0;
+out_err:
+	intel_pt_valid_str(valid_str, sizeof(valid_str), valid);
+	pr_err("Invalid %s for %s. Valid values are: %s\n",
+	       name, INTEL_PT_PMU_NAME, valid_str);
+	return -EINVAL;
+}
+
+static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
+				    struct perf_evsel *evsel)
+{
+	int err;
+
+	if (!evsel)
+		return 0;
+
+	err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
+				       "cyc_thresh", "caps/psb_cyc",
+				       evsel->attr.config);
+	if (err)
+		return err;
+
+	err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods",
+				       "mtc_period", "caps/mtc",
+				       evsel->attr.config);
+	if (err)
+		return err;
+
+	return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods",
+					"psb_period", "caps/psb_cyc",
+					evsel->attr.config);
+}
+
+static int intel_pt_recording_options(struct auxtrace_record *itr,
+				      struct perf_evlist *evlist,
+				      struct record_opts *opts)
+{
+	struct intel_pt_recording *ptr =
+			container_of(itr, struct intel_pt_recording, itr);
+	struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
+	bool have_timing_info;
+	struct perf_evsel *evsel, *intel_pt_evsel = NULL;
+	const struct cpu_map *cpus = evlist->cpus;
+	bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
+	u64 tsc_bit;
+	int err;
+
+	ptr->evlist = evlist;
+	ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
+
+	evlist__for_each(evlist, evsel) {
+		if (evsel->attr.type == intel_pt_pmu->type) {
+			if (intel_pt_evsel) {
+				pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
+				return -EINVAL;
+			}
+			evsel->attr.freq = 0;
+			evsel->attr.sample_period = 1;
+			intel_pt_evsel = evsel;
+			opts->full_auxtrace = true;
+		}
+	}
+
+	if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
+		pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n");
+		return -EINVAL;
+	}
+
+	if (opts->use_clockid) {
+		pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n");
+		return -EINVAL;
+	}
+
+	if (!opts->full_auxtrace)
+		return 0;
+
+	err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
+	if (err)
+		return err;
+
+	/* Set default sizes for snapshot mode */
+	if (opts->auxtrace_snapshot_mode) {
+		size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
+
+		if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
+			if (privileged) {
+				opts->auxtrace_mmap_pages = MiB(4) / page_size;
+			} else {
+				opts->auxtrace_mmap_pages = KiB(128) / page_size;
+				if (opts->mmap_pages == UINT_MAX)
+					opts->mmap_pages = KiB(256) / page_size;
+			}
+		} else if (!opts->auxtrace_mmap_pages && !privileged &&
+			   opts->mmap_pages == UINT_MAX) {
+			opts->mmap_pages = KiB(256) / page_size;
+		}
+		if (!opts->auxtrace_snapshot_size)
+			opts->auxtrace_snapshot_size =
+				opts->auxtrace_mmap_pages * (size_t)page_size;
+		if (!opts->auxtrace_mmap_pages) {
+			size_t sz = opts->auxtrace_snapshot_size;
+
+			sz = round_up(sz, page_size) / page_size;
+			opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
+		}
+		if (opts->auxtrace_snapshot_size >
+				opts->auxtrace_mmap_pages * (size_t)page_size) {
+			pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
+			       opts->auxtrace_snapshot_size,
+			       opts->auxtrace_mmap_pages * (size_t)page_size);
+			return -EINVAL;
+		}
+		if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
+			pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
+			return -EINVAL;
+		}
+		pr_debug2("Intel PT snapshot size: %zu\n",
+			  opts->auxtrace_snapshot_size);
+		if (psb_period &&
+		    opts->auxtrace_snapshot_size <= psb_period +
+						  INTEL_PT_PSB_PERIOD_NEAR)
+			ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
+				    opts->auxtrace_snapshot_size, psb_period);
+	}
+
+	/* Set default sizes for full trace mode */
+	if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
+		if (privileged) {
+			opts->auxtrace_mmap_pages = MiB(4) / page_size;
+		} else {
+			opts->auxtrace_mmap_pages = KiB(128) / page_size;
+			if (opts->mmap_pages == UINT_MAX)
+				opts->mmap_pages = KiB(256) / page_size;
+		}
+	}
+
+	/* Validate auxtrace_mmap_pages */
+	if (opts->auxtrace_mmap_pages) {
+		size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
+		size_t min_sz;
+
+		if (opts->auxtrace_snapshot_mode)
+			min_sz = KiB(4);
+		else
+			min_sz = KiB(8);
+
+		if (sz < min_sz || !is_power_of_2(sz)) {
+			pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
+			       min_sz / 1024);
+			return -EINVAL;
+		}
+	}
+
+	intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
+
+	if (opts->full_auxtrace && (intel_pt_evsel->attr.config & tsc_bit))
+		have_timing_info = true;
+	else
+		have_timing_info = false;
+
+	/*
+	 * Per-cpu recording needs sched_switch events to distinguish different
+	 * threads.
+	 */
+	if (have_timing_info && !cpu_map__empty(cpus)) {
+		err = intel_pt_track_switches(evlist);
+		if (err == -EPERM)
+			pr_debug2("Unable to select sched:sched_switch\n");
+		else if (err)
+			return err;
+		else
+			ptr->have_sched_switch = 1;
+	}
+
+	if (intel_pt_evsel) {
+		/*
+		 * To obtain the auxtrace buffer file descriptor, the auxtrace
+		 * event must come first.
+		 */
+		perf_evlist__to_front(evlist, intel_pt_evsel);
+		/*
+		 * In the case of per-cpu mmaps, we need the CPU on the
+		 * AUX event.
+		 */
+		if (!cpu_map__empty(cpus))
+			perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
+	}
+
+	/* Add dummy event to keep tracking */
+	if (opts->full_auxtrace) {
+		struct perf_evsel *tracking_evsel;
+
+		err = parse_events(evlist, "dummy:u", NULL);
+		if (err)
+			return err;
+
+		tracking_evsel = perf_evlist__last(evlist);
+
+		perf_evlist__set_tracking_event(evlist, tracking_evsel);
+
+		tracking_evsel->attr.freq = 0;
+		tracking_evsel->attr.sample_period = 1;
+
+		/* In per-cpu case, always need the time of mmap events etc */
+		if (!cpu_map__empty(cpus))
+			perf_evsel__set_sample_bit(tracking_evsel, TIME);
+	}
+
+	/*
+	 * Warn the user when we do not have enough information to decode i.e.
+	 * per-cpu with no sched_switch (except workload-only).
+	 */
+	if (!ptr->have_sched_switch && !cpu_map__empty(cpus) &&
+	    !target__none(&opts->target))
+		ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
+
+	return 0;
+}
+
+static int intel_pt_snapshot_start(struct auxtrace_record *itr)
+{
+	struct intel_pt_recording *ptr =
+			container_of(itr, struct intel_pt_recording, itr);
+	struct perf_evsel *evsel;
+
+	evlist__for_each(ptr->evlist, evsel) {
+		if (evsel->attr.type == ptr->intel_pt_pmu->type)
+			return perf_evlist__disable_event(ptr->evlist, evsel);
+	}
+	return -EINVAL;
+}
+
+static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
+{
+	struct intel_pt_recording *ptr =
+			container_of(itr, struct intel_pt_recording, itr);
+	struct perf_evsel *evsel;
+
+	evlist__for_each(ptr->evlist, evsel) {
+		if (evsel->attr.type == ptr->intel_pt_pmu->type)
+			return perf_evlist__enable_event(ptr->evlist, evsel);
+	}
+	return -EINVAL;
+}
+
+static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx)
+{
+	const size_t sz = sizeof(struct intel_pt_snapshot_ref);
+	int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2;
+	struct intel_pt_snapshot_ref *refs;
+
+	if (!new_cnt)
+		new_cnt = 16;
+
+	while (new_cnt <= idx)
+		new_cnt *= 2;
+
+	refs = calloc(new_cnt, sz);
+	if (!refs)
+		return -ENOMEM;
+
+	memcpy(refs, ptr->snapshot_refs, cnt * sz);
+
+	ptr->snapshot_refs = refs;
+	ptr->snapshot_ref_cnt = new_cnt;
+
+	return 0;
+}
+
+static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr)
+{
+	int i;
+
+	for (i = 0; i < ptr->snapshot_ref_cnt; i++)
+		zfree(&ptr->snapshot_refs[i].ref_buf);
+	zfree(&ptr->snapshot_refs);
+}
+
+static void intel_pt_recording_free(struct auxtrace_record *itr)
+{
+	struct intel_pt_recording *ptr =
+			container_of(itr, struct intel_pt_recording, itr);
+
+	intel_pt_free_snapshot_refs(ptr);
+	free(ptr);
+}
+
+static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx,
+				       size_t snapshot_buf_size)
+{
+	size_t ref_buf_size = ptr->snapshot_ref_buf_size;
+	void *ref_buf;
+
+	ref_buf = zalloc(ref_buf_size);
+	if (!ref_buf)
+		return -ENOMEM;
+
+	ptr->snapshot_refs[idx].ref_buf = ref_buf;
+	ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size;
+
+	return 0;
+}
+
+static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr,
+					     size_t snapshot_buf_size)
+{
+	const size_t max_size = 256 * 1024;
+	size_t buf_size = 0, psb_period;
+
+	if (ptr->snapshot_size <= 64 * 1024)
+		return 0;
+
+	psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist);
+	if (psb_period)
+		buf_size = psb_period * 2;
+
+	if (!buf_size || buf_size > max_size)
+		buf_size = max_size;
+
+	if (buf_size >= snapshot_buf_size)
+		return 0;
+
+	if (buf_size >= ptr->snapshot_size / 2)
+		return 0;
+
+	return buf_size;
+}
+
+static int intel_pt_snapshot_init(struct intel_pt_recording *ptr,
+				  size_t snapshot_buf_size)
+{
+	if (ptr->snapshot_init_done)
+		return 0;
+
+	ptr->snapshot_init_done = true;
+
+	ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr,
+							snapshot_buf_size);
+
+	return 0;
+}
+
+/**
+ * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer.
+ * @buf1: first buffer
+ * @compare_size: number of bytes to compare
+ * @buf2: second buffer (a circular buffer)
+ * @offs2: offset in second buffer
+ * @buf2_size: size of second buffer
+ *
+ * The comparison allows for the possibility that the bytes to compare in the
+ * circular buffer are not contiguous.  It is assumed that @compare_size <=
+ * @buf2_size.  This function returns %false if the bytes are identical, %true
+ * otherwise.
+ */
+static bool intel_pt_compare_buffers(void *buf1, size_t compare_size,
+				     void *buf2, size_t offs2, size_t buf2_size)
+{
+	size_t end2 = offs2 + compare_size, part_size;
+
+	if (end2 <= buf2_size)
+		return memcmp(buf1, buf2 + offs2, compare_size);
+
+	part_size = end2 - buf2_size;
+	if (memcmp(buf1, buf2 + offs2, part_size))
+		return true;
+
+	compare_size -= part_size;
+
+	return memcmp(buf1 + part_size, buf2, compare_size);
+}
+
+static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset,
+				 size_t ref_size, size_t buf_size,
+				 void *data, size_t head)
+{
+	size_t ref_end = ref_offset + ref_size;
+
+	if (ref_end > buf_size) {
+		if (head > ref_offset || head < ref_end - buf_size)
+			return true;
+	} else if (head > ref_offset && head < ref_end) {
+		return true;
+	}
+
+	return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset,
+					buf_size);
+}
+
+static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size,
+			      void *data, size_t head)
+{
+	if (head >= ref_size) {
+		memcpy(ref_buf, data + head - ref_size, ref_size);
+	} else {
+		memcpy(ref_buf, data, head);
+		ref_size -= head;
+		memcpy(ref_buf + head, data + buf_size - ref_size, ref_size);
+	}
+}
+
+static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx,
+			     struct auxtrace_mmap *mm, unsigned char *data,
+			     u64 head)
+{
+	struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx];
+	bool wrapped;
+
+	wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset,
+				       ptr->snapshot_ref_buf_size, mm->len,
+				       data, head);
+
+	intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len,
+			  data, head);
+
+	return wrapped;
+}
+
+static bool intel_pt_first_wrap(u64 *data, size_t buf_size)
+{
+	int i, a, b;
+
+	b = buf_size >> 3;
+	a = b - 512;
+	if (a < 0)
+		a = 0;
+
+	for (i = a; i < b; i++) {
+		if (data[i])
+			return true;
+	}
+
+	return false;
+}
+
+static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx,
+				  struct auxtrace_mmap *mm, unsigned char *data,
+				  u64 *head, u64 *old)
+{
+	struct intel_pt_recording *ptr =
+			container_of(itr, struct intel_pt_recording, itr);
+	bool wrapped;
+	int err;
+
+	pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
+		  __func__, idx, (size_t)*old, (size_t)*head);
+
+	err = intel_pt_snapshot_init(ptr, mm->len);
+	if (err)
+		goto out_err;
+
+	if (idx >= ptr->snapshot_ref_cnt) {
+		err = intel_pt_alloc_snapshot_refs(ptr, idx);
+		if (err)
+			goto out_err;
+	}
+
+	if (ptr->snapshot_ref_buf_size) {
+		if (!ptr->snapshot_refs[idx].ref_buf) {
+			err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len);
+			if (err)
+				goto out_err;
+		}
+		wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
+	} else {
+		wrapped = ptr->snapshot_refs[idx].wrapped;
+		if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
+			ptr->snapshot_refs[idx].wrapped = true;
+			wrapped = true;
+		}
+	}
+
+	/*
+	 * In full trace mode 'head' continually increases.  However in snapshot
+	 * mode 'head' is an offset within the buffer.  Here 'old' and 'head'
+	 * are adjusted to match the full trace case which expects that 'old' is
+	 * always less than 'head'.
+	 */
+	if (wrapped) {
+		*old = *head;
+		*head += mm->len;
+	} else {
+		if (mm->mask)
+			*old &= mm->mask;
+		else
+			*old %= mm->len;
+		if (*old > *head)
+			*head += mm->len;
+	}
+
+	pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
+		  __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
+
+	return 0;
+
+out_err:
+	pr_err("%s: failed, error %d\n", __func__, err);
+	return err;
+}
+
+static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
+{
+	return rdtsc();
+}
+
+static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
+{
+	struct intel_pt_recording *ptr =
+			container_of(itr, struct intel_pt_recording, itr);
+	struct perf_evsel *evsel;
+
+	evlist__for_each(ptr->evlist, evsel) {
+		if (evsel->attr.type == ptr->intel_pt_pmu->type)
+			return perf_evlist__enable_event_idx(ptr->evlist, evsel,
+							     idx);
+	}
+	return -EINVAL;
+}
+
+struct auxtrace_record *intel_pt_recording_init(int *err)
+{
+	struct perf_pmu *intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
+	struct intel_pt_recording *ptr;
+
+	if (!intel_pt_pmu)
+		return NULL;
+
+	ptr = zalloc(sizeof(struct intel_pt_recording));
+	if (!ptr) {
+		*err = -ENOMEM;
+		return NULL;
+	}
+
+	ptr->intel_pt_pmu = intel_pt_pmu;
+	ptr->itr.recording_options = intel_pt_recording_options;
+	ptr->itr.info_priv_size = intel_pt_info_priv_size;
+	ptr->itr.info_fill = intel_pt_info_fill;
+	ptr->itr.free = intel_pt_recording_free;
+	ptr->itr.snapshot_start = intel_pt_snapshot_start;
+	ptr->itr.snapshot_finish = intel_pt_snapshot_finish;
+	ptr->itr.find_snapshot = intel_pt_find_snapshot;
+	ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
+	ptr->itr.reference = intel_pt_reference;
+	ptr->itr.read_finish = intel_pt_read_finish;
+	return &ptr->itr;
+}
diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
new file mode 100644
index 0000000..c5db14f
--- /dev/null
+++ b/tools/perf/arch/x86/util/perf_regs.c
@@ -0,0 +1,28 @@
+#include "../../perf.h"
+#include "../../util/perf_regs.h"
+
+const struct sample_reg sample_reg_masks[] = {
+	SMPL_REG(AX, PERF_REG_X86_AX),
+	SMPL_REG(BX, PERF_REG_X86_BX),
+	SMPL_REG(CX, PERF_REG_X86_CX),
+	SMPL_REG(DX, PERF_REG_X86_DX),
+	SMPL_REG(SI, PERF_REG_X86_SI),
+	SMPL_REG(DI, PERF_REG_X86_DI),
+	SMPL_REG(BP, PERF_REG_X86_BP),
+	SMPL_REG(SP, PERF_REG_X86_SP),
+	SMPL_REG(IP, PERF_REG_X86_IP),
+	SMPL_REG(FLAGS, PERF_REG_X86_FLAGS),
+	SMPL_REG(CS, PERF_REG_X86_CS),
+	SMPL_REG(SS, PERF_REG_X86_SS),
+#ifdef HAVE_ARCH_X86_64_SUPPORT
+	SMPL_REG(R8, PERF_REG_X86_R8),
+	SMPL_REG(R9, PERF_REG_X86_R9),
+	SMPL_REG(R10, PERF_REG_X86_R10),
+	SMPL_REG(R11, PERF_REG_X86_R11),
+	SMPL_REG(R12, PERF_REG_X86_R12),
+	SMPL_REG(R13, PERF_REG_X86_R13),
+	SMPL_REG(R14, PERF_REG_X86_R14),
+	SMPL_REG(R15, PERF_REG_X86_R15),
+#endif
+	SMPL_REG_END
+};
diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c
new file mode 100644
index 0000000..79fe071
--- /dev/null
+++ b/tools/perf/arch/x86/util/pmu.c
@@ -0,0 +1,18 @@
+#include <string.h>
+
+#include <linux/perf_event.h>
+
+#include "../../util/intel-pt.h"
+#include "../../util/intel-bts.h"
+#include "../../util/pmu.h"
+
+struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
+{
+#ifdef HAVE_AUXTRACE_SUPPORT
+	if (!strcmp(pmu->name, INTEL_PT_PMU_NAME))
+		return intel_pt_pmu_default_config(pmu);
+	if (!strcmp(pmu->name, INTEL_BTS_PMU_NAME))
+		pmu->selectable = true;
+#endif
+	return NULL;
+}
diff --git a/tools/perf/arch/xtensa/Build b/tools/perf/arch/xtensa/Build
new file mode 100644
index 0000000..54afe4a
--- /dev/null
+++ b/tools/perf/arch/xtensa/Build
@@ -0,0 +1 @@
+libperf-y += util/
diff --git a/tools/perf/arch/xtensa/Makefile b/tools/perf/arch/xtensa/Makefile
new file mode 100644
index 0000000..7fbca17
--- /dev/null
+++ b/tools/perf/arch/xtensa/Makefile
@@ -0,0 +1,3 @@
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+endif
diff --git a/tools/perf/arch/xtensa/util/Build b/tools/perf/arch/xtensa/util/Build
new file mode 100644
index 0000000..954e287
--- /dev/null
+++ b/tools/perf/arch/xtensa/util/Build
@@ -0,0 +1 @@
+libperf-$(CONFIG_DWARF) += dwarf-regs.o
diff --git a/tools/perf/arch/xtensa/util/dwarf-regs.c b/tools/perf/arch/xtensa/util/dwarf-regs.c
new file mode 100644
index 0000000..4dba76b
--- /dev/null
+++ b/tools/perf/arch/xtensa/util/dwarf-regs.c
@@ -0,0 +1,25 @@
+/*
+ * Mapping of DWARF debug register numbers into register names.
+ *
+ * Copyright (c) 2015 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <stddef.h>
+#include <dwarf-regs.h>
+
+#define XTENSA_MAX_REGS 16
+
+const char *xtensa_regs_table[XTENSA_MAX_REGS] = {
+	"a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
+	"a8", "a9", "a10", "a11", "a12", "a13", "a14", "a15",
+};
+
+const char *get_arch_regstr(unsigned int n)
+{
+	return n < XTENSA_MAX_REGS ? xtensa_regs_table[n] : NULL;
+}
diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build
index c3ab760..573e288 100644
--- a/tools/perf/bench/Build
+++ b/tools/perf/bench/Build
@@ -5,6 +5,7 @@
 perf-y += futex-wake.o
 perf-y += futex-wake-parallel.o
 perf-y += futex-requeue.o
+perf-y += futex-lock-pi.o
 
 perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
 perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index 70b2f71..a50df86 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -36,6 +36,8 @@
 extern int bench_futex_wake_parallel(int argc, const char **argv,
 				     const char *prefix);
 extern int bench_futex_requeue(int argc, const char **argv, const char *prefix);
+/* pi futexes */
+extern int bench_futex_lock_pi(int argc, const char **argv, const char *prefix);
 
 #define BENCH_FORMAT_DEFAULT_STR	"default"
 #define BENCH_FORMAT_DEFAULT		0
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
new file mode 100644
index 0000000..bc6a16a
--- /dev/null
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2015 Davidlohr Bueso.
+ */
+
+#include "../perf.h"
+#include "../util/util.h"
+#include "../util/stat.h"
+#include "../util/parse-options.h"
+#include "../util/header.h"
+#include "bench.h"
+#include "futex.h"
+
+#include <err.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <pthread.h>
+
+struct worker {
+	int tid;
+	u_int32_t *futex;
+	pthread_t thread;
+	unsigned long ops;
+};
+
+static u_int32_t global_futex = 0;
+static struct worker *worker;
+static unsigned int nsecs = 10;
+static bool silent = false, multi = false;
+static bool done = false, fshared = false;
+static unsigned int ncpus, nthreads = 0;
+static int futex_flag = 0;
+struct timeval start, end, runtime;
+static pthread_mutex_t thread_lock;
+static unsigned int threads_starting;
+static struct stats throughput_stats;
+static pthread_cond_t thread_parent, thread_worker;
+
+static const struct option options[] = {
+	OPT_UINTEGER('t', "threads",  &nthreads, "Specify amount of threads"),
+	OPT_UINTEGER('r', "runtime", &nsecs,     "Specify runtime (in seconds)"),
+	OPT_BOOLEAN( 'M', "multi",   &multi,     "Use multiple futexes"),
+	OPT_BOOLEAN( 's', "silent",  &silent,    "Silent mode: do not display data/details"),
+	OPT_BOOLEAN( 'S', "shared",  &fshared,   "Use shared futexes instead of private ones"),
+	OPT_END()
+};
+
+static const char * const bench_futex_lock_pi_usage[] = {
+	"perf bench futex requeue <options>",
+	NULL
+};
+
+static void print_summary(void)
+{
+	unsigned long avg = avg_stats(&throughput_stats);
+	double stddev = stddev_stats(&throughput_stats);
+
+	printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
+	       !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
+	       (int) runtime.tv_sec);
+}
+
+static void toggle_done(int sig __maybe_unused,
+			siginfo_t *info __maybe_unused,
+			void *uc __maybe_unused)
+{
+	/* inform all threads that we're done for the day */
+	done = true;
+	gettimeofday(&end, NULL);
+	timersub(&end, &start, &runtime);
+}
+
+static void *workerfn(void *arg)
+{
+	struct worker *w = (struct worker *) arg;
+
+	pthread_mutex_lock(&thread_lock);
+	threads_starting--;
+	if (!threads_starting)
+		pthread_cond_signal(&thread_parent);
+	pthread_cond_wait(&thread_worker, &thread_lock);
+	pthread_mutex_unlock(&thread_lock);
+
+	do {
+		int ret;
+	again:
+		ret = futex_lock_pi(w->futex, NULL, 0, futex_flag);
+
+		if (ret) { /* handle lock acquisition */
+			if (!silent)
+				warn("thread %d: Could not lock pi-lock for %p (%d)",
+				     w->tid, w->futex, ret);
+			if (done)
+				break;
+
+			goto again;
+		}
+
+		usleep(1);
+		ret = futex_unlock_pi(w->futex, futex_flag);
+		if (ret && !silent)
+			warn("thread %d: Could not unlock pi-lock for %p (%d)",
+			     w->tid, w->futex, ret);
+		w->ops++; /* account for thread's share of work */
+	}  while (!done);
+
+	return NULL;
+}
+
+static void create_threads(struct worker *w, pthread_attr_t thread_attr)
+{
+	cpu_set_t cpu;
+	unsigned int i;
+
+	threads_starting = nthreads;
+
+	for (i = 0; i < nthreads; i++) {
+		worker[i].tid = i;
+
+		if (multi) {
+			worker[i].futex = calloc(1, sizeof(u_int32_t));
+			if (!worker[i].futex)
+				err(EXIT_FAILURE, "calloc");
+		} else
+			worker[i].futex = &global_futex;
+
+		CPU_ZERO(&cpu);
+		CPU_SET(i % ncpus, &cpu);
+
+		if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+			err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+
+		if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i]))
+			err(EXIT_FAILURE, "pthread_create");
+	}
+}
+
+int bench_futex_lock_pi(int argc, const char **argv,
+			const char *prefix __maybe_unused)
+{
+	int ret = 0;
+	unsigned int i;
+	struct sigaction act;
+	pthread_attr_t thread_attr;
+
+	argc = parse_options(argc, argv, options, bench_futex_lock_pi_usage, 0);
+	if (argc)
+		goto err;
+
+	ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+	sigfillset(&act.sa_mask);
+	act.sa_sigaction = toggle_done;
+	sigaction(SIGINT, &act, NULL);
+
+	if (!nthreads)
+		nthreads = ncpus;
+
+	worker = calloc(nthreads, sizeof(*worker));
+	if (!worker)
+		err(EXIT_FAILURE, "calloc");
+
+	if (!fshared)
+		futex_flag = FUTEX_PRIVATE_FLAG;
+
+	printf("Run summary [PID %d]: %d threads doing pi lock/unlock pairing for %d secs.\n\n",
+	       getpid(), nthreads, nsecs);
+
+	init_stats(&throughput_stats);
+	pthread_mutex_init(&thread_lock, NULL);
+	pthread_cond_init(&thread_parent, NULL);
+	pthread_cond_init(&thread_worker, NULL);
+
+	threads_starting = nthreads;
+	pthread_attr_init(&thread_attr);
+	gettimeofday(&start, NULL);
+
+	create_threads(worker, thread_attr);
+	pthread_attr_destroy(&thread_attr);
+
+	pthread_mutex_lock(&thread_lock);
+	while (threads_starting)
+		pthread_cond_wait(&thread_parent, &thread_lock);
+	pthread_cond_broadcast(&thread_worker);
+	pthread_mutex_unlock(&thread_lock);
+
+	sleep(nsecs);
+	toggle_done(0, NULL, NULL);
+
+	for (i = 0; i < nthreads; i++) {
+		ret = pthread_join(worker[i].thread, NULL);
+		if (ret)
+			err(EXIT_FAILURE, "pthread_join");
+	}
+
+	/* cleanup & report results */
+	pthread_cond_destroy(&thread_parent);
+	pthread_cond_destroy(&thread_worker);
+	pthread_mutex_destroy(&thread_lock);
+
+	for (i = 0; i < nthreads; i++) {
+		unsigned long t = worker[i].ops/runtime.tv_sec;
+
+		update_stats(&throughput_stats, t);
+		if (!silent)
+			printf("[thread %3d] futex: %p [ %ld ops/sec ]\n",
+			       worker[i].tid, worker[i].futex, t);
+
+		if (multi)
+			free(worker[i].futex);
+	}
+
+	print_summary();
+
+	free(worker);
+	return ret;
+err:
+	usage_with_options(bench_futex_lock_pi_usage, options);
+	exit(EXIT_FAILURE);
+}
diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
index 7ed22ff..d44de9f 100644
--- a/tools/perf/bench/futex.h
+++ b/tools/perf/bench/futex.h
@@ -56,6 +56,26 @@
 }
 
 /**
+ * futex_lock_pi() - block on uaddr as a PI mutex
+ * @detect:	whether (1) or not (0) to perform deadlock detection
+ */
+static inline int
+futex_lock_pi(u_int32_t *uaddr, struct timespec *timeout, int detect,
+	      int opflags)
+{
+	return futex(uaddr, FUTEX_LOCK_PI, detect, timeout, NULL, 0, opflags);
+}
+
+/**
+ * futex_unlock_pi() - release uaddr as a PI mutex, waking the top waiter
+ */
+static inline int
+futex_unlock_pi(u_int32_t *uaddr, int opflags)
+{
+	return futex(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags);
+}
+
+/**
 * futex_cmp_requeue() - requeue tasks from uaddr to uaddr2
 * @nr_wake:        wake up to this many tasks
 * @nr_requeue:        requeue up to this many tasks
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 2c1bec3..8edc205 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -67,6 +67,7 @@
 			rb_erase(&al->sym->rb_node,
 				 &al->map->dso->symbols[al->map->type]);
 			symbol__delete(al->sym);
+			dso__reset_find_symbol_cache(al->map->dso);
 		}
 		return 0;
 	}
@@ -187,6 +188,7 @@
 			 * symbol, free he->ms.sym->src to signal we already
 			 * processed this symbol.
 			 */
+			zfree(&notes->src->cycles_hist);
 			zfree(&notes->src);
 		}
 	}
@@ -238,6 +240,8 @@
 		if (nr_samples > 0) {
 			total_nr_samples += nr_samples;
 			hists__collapse_resort(hists, NULL);
+			/* Don't sort callchain */
+			perf_evsel__reset_sample_bit(pos, CALLCHAIN);
 			hists__output_resort(hists, NULL);
 
 			if (symbol_conf.event_group &&
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index b5314e4..f67934d 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -60,6 +60,8 @@
 	{ "wake",	"Benchmark for futex wake calls",               bench_futex_wake	},
 	{ "wake-parallel", "Benchmark for parallel futex wake calls",   bench_futex_wake_parallel },
 	{ "requeue",	"Benchmark for futex requeue calls",            bench_futex_requeue	},
+	/* pi-futexes */
+	{ "lock-pi",	"Benchmark for futex lock_pi calls",            bench_futex_lock_pi	},
 	{ "all",	"Test all futex benchmarks",			NULL			},
 	{ NULL,		NULL,						NULL			}
 };
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index d47a0cd..7b8450c 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -25,8 +25,6 @@
 static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid)
 {
 	char root_dir[PATH_MAX];
-	char notes[PATH_MAX];
-	u8 build_id[BUILD_ID_SIZE];
 	char *p;
 
 	strlcpy(root_dir, proc_dir, sizeof(root_dir));
@@ -35,15 +33,7 @@
 	if (!p)
 		return -1;
 	*p = '\0';
-
-	scnprintf(notes, sizeof(notes), "%s/sys/kernel/notes", root_dir);
-
-	if (sysfs__read_build_id(notes, build_id, sizeof(build_id)))
-		return -1;
-
-	build_id__sprintf(build_id, sizeof(build_id), sbuildid);
-
-	return 0;
+	return sysfs__sprintf_build_id(root_dir, sbuildid);
 }
 
 static int build_id_cache__kcore_dir(char *dir, size_t sz)
@@ -127,7 +117,7 @@
 
 static int build_id_cache__add_kcore(const char *filename, bool force)
 {
-	char dir[32], sbuildid[BUILD_ID_SIZE * 2 + 1];
+	char dir[32], sbuildid[SBUILD_ID_SIZE];
 	char from_dir[PATH_MAX], to_dir[PATH_MAX];
 	char *p;
 
@@ -138,7 +128,7 @@
 		return -1;
 	*p = '\0';
 
-	if (build_id_cache__kcore_buildid(from_dir, sbuildid))
+	if (build_id_cache__kcore_buildid(from_dir, sbuildid) < 0)
 		return -1;
 
 	scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s",
@@ -184,7 +174,7 @@
 
 static int build_id_cache__add_file(const char *filename)
 {
-	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+	char sbuild_id[SBUILD_ID_SIZE];
 	u8 build_id[BUILD_ID_SIZE];
 	int err;
 
@@ -204,7 +194,7 @@
 static int build_id_cache__remove_file(const char *filename)
 {
 	u8 build_id[BUILD_ID_SIZE];
-	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+	char sbuild_id[SBUILD_ID_SIZE];
 
 	int err;
 
@@ -276,7 +266,7 @@
 static int build_id_cache__update_file(const char *filename)
 {
 	u8 build_id[BUILD_ID_SIZE];
-	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+	char sbuild_id[SBUILD_ID_SIZE];
 
 	int err = 0;
 
@@ -363,7 +353,7 @@
 	setup_pager();
 
 	if (add_name_list_str) {
-		list = strlist__new(true, add_name_list_str);
+		list = strlist__new(add_name_list_str, NULL);
 		if (list) {
 			strlist__for_each(pos, list)
 				if (build_id_cache__add_file(pos->s)) {
@@ -381,7 +371,7 @@
 	}
 
 	if (remove_name_list_str) {
-		list = strlist__new(true, remove_name_list_str);
+		list = strlist__new(remove_name_list_str, NULL);
 		if (list) {
 			strlist__for_each(pos, list)
 				if (build_id_cache__remove_file(pos->s)) {
@@ -399,7 +389,7 @@
 	}
 
 	if (purge_name_list_str) {
-		list = strlist__new(true, purge_name_list_str);
+		list = strlist__new(purge_name_list_str, NULL);
 		if (list) {
 			strlist__for_each(pos, list)
 				if (build_id_cache__purge_path(pos->s)) {
@@ -420,7 +410,7 @@
 		ret = build_id_cache__fprintf_missing(session, stdout);
 
 	if (update_name_list_str) {
-		list = strlist__new(true, update_name_list_str);
+		list = strlist__new(update_name_list_str, NULL);
 		if (list) {
 			strlist__for_each(pos, list)
 				if (build_id_cache__update_file(pos->s)) {
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index 9fe93c8..918b4de 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -19,29 +19,25 @@
 
 static int sysfs__fprintf_build_id(FILE *fp)
 {
-	u8 kallsyms_build_id[BUILD_ID_SIZE];
-	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+	char sbuild_id[SBUILD_ID_SIZE];
+	int ret;
 
-	if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id,
-				 sizeof(kallsyms_build_id)) != 0)
-		return -1;
+	ret = sysfs__sprintf_build_id("/", sbuild_id);
+	if (ret != sizeof(sbuild_id))
+		return ret < 0 ? ret : -EINVAL;
 
-	build_id__sprintf(kallsyms_build_id, sizeof(kallsyms_build_id),
-			  sbuild_id);
-	fprintf(fp, "%s\n", sbuild_id);
-	return 0;
+	return fprintf(fp, "%s\n", sbuild_id);
 }
 
 static int filename__fprintf_build_id(const char *name, FILE *fp)
 {
-	u8 build_id[BUILD_ID_SIZE];
-	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+	char sbuild_id[SBUILD_ID_SIZE];
+	int ret;
 
-	if (filename__read_build_id(name, build_id,
-				    sizeof(build_id)) != sizeof(build_id))
-		return 0;
+	ret = filename__sprintf_build_id(name, sbuild_id);
+	if (ret != sizeof(sbuild_id))
+		return ret < 0 ? ret : -EINVAL;
 
-	build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
 	return fprintf(fp, "%s\n", sbuild_id);
 }
 
@@ -63,7 +59,7 @@
 	/*
 	 * See if this is an ELF file first:
 	 */
-	if (filename__fprintf_build_id(input_name, stdout))
+	if (filename__fprintf_build_id(input_name, stdout) > 0)
 		goto out;
 
 	session = perf_session__new(&file, false, &build_id__mark_dso_hit_ops);
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index daaa7dc..0b180a8 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -722,6 +722,9 @@
 		if (verbose || data__files_cnt > 2)
 			data__fprintf();
 
+		/* Don't sort callchain for perf diff */
+		perf_evsel__reset_sample_bit(evsel_base, CALLCHAIN);
+
 		hists__process(hists_base);
 	}
 }
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 01b0649..f62c49b 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -561,6 +561,7 @@
 			.lost		= perf_event__repipe,
 			.aux		= perf_event__repipe,
 			.itrace_start	= perf_event__repipe,
+			.context_switch	= perf_event__repipe,
 			.read		= perf_event__repipe_sample,
 			.throttle	= perf_event__repipe,
 			.unthrottle	= perf_event__repipe,
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 1272559..b81cec3 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -297,8 +297,7 @@
 		clear_perf_probe_event(params.events + i);
 	line_range__clear(&params.line_range);
 	free(params.target);
-	if (params.filter)
-		strfilter__delete(params.filter);
+	strfilter__delete(params.filter);
 	memset(&params, 0, sizeof(params));
 }
 
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 20b56eb..142eeb3 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -27,8 +27,10 @@
 #include "util/cpumap.h"
 #include "util/thread_map.h"
 #include "util/data.h"
+#include "util/perf_regs.h"
 #include "util/auxtrace.h"
 #include "util/parse-branch-options.h"
+#include "util/parse-regs-options.h"
 
 #include <unistd.h>
 #include <sched.h>
@@ -279,7 +281,7 @@
 
 	evlist__for_each(evlist, pos) {
 try_again:
-		if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
+		if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
 			if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
 				if (verbose)
 					ui__warning("%s\n", msg);
@@ -771,12 +773,14 @@
 			 callchain_param.dump_size);
 }
 
-int record_parse_callchain_opt(const struct option *opt __maybe_unused,
+int record_parse_callchain_opt(const struct option *opt,
 			       const char *arg,
 			       int unset)
 {
 	int ret;
+	struct record_opts *record = (struct record_opts *)opt->value;
 
+	record->callgraph_set = true;
 	callchain_param.enabled = !unset;
 
 	/* --no-call-graph */
@@ -786,17 +790,20 @@
 		return 0;
 	}
 
-	ret = parse_callchain_record_opt(arg);
+	ret = parse_callchain_record_opt(arg, &callchain_param);
 	if (!ret)
 		callchain_debug();
 
 	return ret;
 }
 
-int record_callchain_opt(const struct option *opt __maybe_unused,
+int record_callchain_opt(const struct option *opt,
 			 const char *arg __maybe_unused,
 			 int unset __maybe_unused)
 {
+	struct record_opts *record = (struct record_opts *)opt->value;
+
+	record->callgraph_set = true;
 	callchain_param.enabled = true;
 
 	if (callchain_param.record_mode == CALLCHAIN_NONE)
@@ -1003,6 +1010,9 @@
 		     parse_events_option),
 	OPT_CALLBACK(0, "filter", &record.evlist, "filter",
 		     "event filter", parse_filter),
+	OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
+			   NULL, "don't record events from perf itself",
+			   exclude_perf),
 	OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
 		    "record events on existing process id"),
 	OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
@@ -1041,7 +1051,9 @@
 	OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
 		    "per thread counts"),
 	OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
-	OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Record the sample timestamps"),
+	OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
+			&record.opts.sample_time_set,
+			"Record the sample timestamps"),
 	OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
 	OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
 		    "don't sample"),
@@ -1070,8 +1082,9 @@
 		    "sample transaction flags (special events only)"),
 	OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
 		    "use per-thread mmaps"),
-	OPT_BOOLEAN('I', "intr-regs", &record.opts.sample_intr_regs,
-		    "Sample machine registers on interrupt"),
+	OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
+		    "sample selected machine registers on interrupt,"
+		    " use -I ? to list register names", parse_regs),
 	OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
 		    "Record running/enabled time of read (:S) events"),
 	OPT_CALLBACK('k', "clockid", &record.opts,
@@ -1081,6 +1094,8 @@
 			  "opts", "AUX area tracing Snapshot Mode", ""),
 	OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
 			"per thread proc mmap processing timeout in ms"),
+	OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
+		    "Record context switch events"),
 	OPT_END()
 };
 
@@ -1108,6 +1123,11 @@
 			  " system-wide mode\n");
 		usage_with_options(record_usage, record_options);
 	}
+	if (rec->opts.record_switch_events &&
+	    !perf_can_record_switch_events()) {
+		ui__error("kernel does not support recording context switch events (--switch-events option)\n");
+		usage_with_options(record_usage, record_options);
+	}
 
 	if (!rec->itr) {
 		rec->itr = auxtrace_record__init(rec->evlist, &err);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 95a4771..62b285e 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -53,6 +53,7 @@
 	bool			mem_mode;
 	bool			header;
 	bool			header_only;
+	bool			nonany_branch_mode;
 	int			max_stack;
 	struct perf_read_values	show_threads_values;
 	const char		*pretty_printing_style;
@@ -102,6 +103,9 @@
 	if (!ui__has_annotation())
 		return 0;
 
+	hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
+			     rep->nonany_branch_mode);
+
 	if (sort__mode == SORT_MODE__BRANCH) {
 		bi = he->branch_info;
 		err = addr_map_symbol__inc_samples(&bi->from, evsel->idx);
@@ -258,6 +262,12 @@
 		else
 			callchain_param.record_mode = CALLCHAIN_FP;
 	}
+
+	/* ??? handle more cases than just ANY? */
+	if (!(perf_evlist__combined_branch_type(session->evlist) &
+				PERF_SAMPLE_BRANCH_ANY))
+		rep->nonany_branch_mode = true;
+
 	return 0;
 }
 
@@ -306,6 +316,11 @@
 	if (evname != NULL)
 		ret += fprintf(fp, " of event '%s'", evname);
 
+	if (symbol_conf.show_ref_callgraph &&
+	    strstr(evname, "call-graph=no")) {
+		ret += fprintf(fp, ", show reference callgraph");
+	}
+
 	if (rep->mem_mode) {
 		ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
 		ret += fprintf(fp, "\n# Sort order   : %s", sort_order ? : default_mem_sort_order);
@@ -728,6 +743,10 @@
 	OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
 			    "Instruction Tracing options",
 			    itrace_parse_synth_opts),
+	OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
+			"Show full source file name path for source lines"),
+	OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
+		    "Show callgraph from reference event"),
 	OPT_END()
 	};
 	struct perf_data_file file = {
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 24809787..eb51325 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -6,6 +6,7 @@
 #include "util/exec_cmd.h"
 #include "util/header.h"
 #include "util/parse-options.h"
+#include "util/perf_regs.h"
 #include "util/session.h"
 #include "util/tool.h"
 #include "util/symbol.h"
@@ -46,6 +47,7 @@
 	PERF_OUTPUT_SYMOFFSET       = 1U << 11,
 	PERF_OUTPUT_SRCLINE         = 1U << 12,
 	PERF_OUTPUT_PERIOD          = 1U << 13,
+	PERF_OUTPUT_IREGS	    = 1U << 14,
 };
 
 struct output_option {
@@ -66,6 +68,7 @@
 	{.str = "symoff", .field = PERF_OUTPUT_SYMOFFSET},
 	{.str = "srcline", .field = PERF_OUTPUT_SRCLINE},
 	{.str = "period", .field = PERF_OUTPUT_PERIOD},
+	{.str = "iregs", .field = PERF_OUTPUT_IREGS},
 };
 
 /* default set to maintain compatibility with current format */
@@ -255,6 +258,11 @@
 					PERF_OUTPUT_PERIOD))
 		return -EINVAL;
 
+	if (PRINT_FIELD(IREGS) &&
+		perf_evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS",
+					PERF_OUTPUT_IREGS))
+		return -EINVAL;
+
 	return 0;
 }
 
@@ -352,6 +360,24 @@
 	return 0;
 }
 
+static void print_sample_iregs(union perf_event *event __maybe_unused,
+			  struct perf_sample *sample,
+			  struct thread *thread __maybe_unused,
+			  struct perf_event_attr *attr)
+{
+	struct regs_dump *regs = &sample->intr_regs;
+	uint64_t mask = attr->sample_regs_intr;
+	unsigned i = 0, r;
+
+	if (!regs)
+		return;
+
+	for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
+		u64 val = regs->regs[i++];
+		printf("%5s:0x%"PRIx64" ", perf_reg_name(r), val);
+	}
+}
+
 static void print_sample_start(struct perf_sample *sample,
 			       struct thread *thread,
 			       struct perf_evsel *evsel)
@@ -525,6 +551,9 @@
 				     PERF_MAX_STACK_DEPTH);
 	}
 
+	if (PRINT_FIELD(IREGS))
+		print_sample_iregs(event, sample, thread, attr);
+
 	printf("\n");
 }
 
@@ -623,6 +652,7 @@
 	struct perf_session	*session;
 	bool			show_task_events;
 	bool			show_mmap_events;
+	bool			show_switch_events;
 };
 
 static int process_attr(struct perf_tool *tool, union perf_event *event,
@@ -661,7 +691,7 @@
 	struct thread *thread;
 	struct perf_script *script = container_of(tool, struct perf_script, tool);
 	struct perf_session *session = script->session;
-	struct perf_evsel *evsel = perf_evlist__first(session->evlist);
+	struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
 	int ret = -1;
 
 	thread = machine__findnew_thread(machine, event->comm.pid, event->comm.tid);
@@ -695,7 +725,7 @@
 	struct thread *thread;
 	struct perf_script *script = container_of(tool, struct perf_script, tool);
 	struct perf_session *session = script->session;
-	struct perf_evsel *evsel = perf_evlist__first(session->evlist);
+	struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
 
 	if (perf_event__process_fork(tool, event, sample, machine) < 0)
 		return -1;
@@ -727,7 +757,7 @@
 	struct thread *thread;
 	struct perf_script *script = container_of(tool, struct perf_script, tool);
 	struct perf_session *session = script->session;
-	struct perf_evsel *evsel = perf_evlist__first(session->evlist);
+	struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
 
 	thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid);
 	if (thread == NULL) {
@@ -759,7 +789,7 @@
 	struct thread *thread;
 	struct perf_script *script = container_of(tool, struct perf_script, tool);
 	struct perf_session *session = script->session;
-	struct perf_evsel *evsel = perf_evlist__first(session->evlist);
+	struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
 
 	if (perf_event__process_mmap(tool, event, sample, machine) < 0)
 		return -1;
@@ -790,7 +820,7 @@
 	struct thread *thread;
 	struct perf_script *script = container_of(tool, struct perf_script, tool);
 	struct perf_session *session = script->session;
-	struct perf_evsel *evsel = perf_evlist__first(session->evlist);
+	struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
 
 	if (perf_event__process_mmap2(tool, event, sample, machine) < 0)
 		return -1;
@@ -813,6 +843,32 @@
 	return 0;
 }
 
+static int process_switch_event(struct perf_tool *tool,
+				union perf_event *event,
+				struct perf_sample *sample,
+				struct machine *machine)
+{
+	struct thread *thread;
+	struct perf_script *script = container_of(tool, struct perf_script, tool);
+	struct perf_session *session = script->session;
+	struct perf_evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
+
+	if (perf_event__process_switch(tool, event, sample, machine) < 0)
+		return -1;
+
+	thread = machine__findnew_thread(machine, sample->pid,
+					 sample->tid);
+	if (thread == NULL) {
+		pr_debug("problem processing SWITCH event, skipping it.\n");
+		return -1;
+	}
+
+	print_sample_start(sample, thread, evsel);
+	perf_event__fprintf(event, stdout);
+	thread__put(thread);
+	return 0;
+}
+
 static void sig_handler(int sig __maybe_unused)
 {
 	session_done = 1;
@@ -834,6 +890,8 @@
 		script->tool.mmap = process_mmap_event;
 		script->tool.mmap2 = process_mmap2_event;
 	}
+	if (script->show_switch_events)
+		script->tool.context_switch = process_switch_event;
 
 	ret = perf_session__process_events(script->session);
 
@@ -1532,6 +1590,22 @@
 	return 0;
 }
 
+static void script__setup_sample_type(struct perf_script *script)
+{
+	struct perf_session *session = script->session;
+	u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
+
+	if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
+		if ((sample_type & PERF_SAMPLE_REGS_USER) &&
+		    (sample_type & PERF_SAMPLE_STACK_USER))
+			callchain_param.record_mode = CALLCHAIN_DWARF;
+		else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+			callchain_param.record_mode = CALLCHAIN_LBR;
+		else
+			callchain_param.record_mode = CALLCHAIN_FP;
+	}
+}
+
 int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
 {
 	bool show_full_info = false;
@@ -1598,7 +1672,7 @@
 		     "comma separated output fields prepend with 'type:'. "
 		     "Valid types: hw,sw,trace,raw. "
 		     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
-		     "addr,symoff,period,flags", parse_output_fields),
+		     "addr,symoff,period,iregs,flags", parse_output_fields),
 	OPT_BOOLEAN('a', "all-cpus", &system_wide,
 		    "system-wide collection from all CPUs"),
 	OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
@@ -1618,10 +1692,19 @@
 		    "Show the fork/comm/exit events"),
 	OPT_BOOLEAN('\0', "show-mmap-events", &script.show_mmap_events,
 		    "Show the mmap events"),
+	OPT_BOOLEAN('\0', "show-switch-events", &script.show_switch_events,
+		    "Show context switch events (if recorded)"),
 	OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
 	OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
 			    "Instruction Tracing options",
 			    itrace_parse_synth_opts),
+	OPT_BOOLEAN(0, "full-source-path", &srcline_full_filename,
+			"Show full source file name path for source lines"),
+	OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
+			"Enable symbol demangling"),
+	OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
+			"Enable kernel symbol demangling"),
+
 	OPT_END()
 	};
 	const char * const script_subcommands[] = { "record", "report", NULL };
@@ -1816,6 +1899,7 @@
 		goto out_delete;
 
 	script.session = session;
+	script__setup_sample_type(&script);
 
 	session->itrace_synth_opts = &itrace_synth_opts;
 
@@ -1830,6 +1914,14 @@
 	else
 		symbol_conf.use_callchain = false;
 
+	if (session->tevent.pevent &&
+	    pevent_set_function_resolver(session->tevent.pevent,
+					 machine__resolve_kernel_addr,
+					 &session->machines.host) < 0) {
+		pr_err("%s: failed to set libtraceevent function resolver\n", __func__);
+		return -1;
+	}
+
 	if (generate_script_lang) {
 		struct stat perf_stat;
 		int input;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index d99d850..d46dbb1 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -58,6 +58,7 @@
 #include "util/cpumap.h"
 #include "util/thread.h"
 #include "util/thread_map.h"
+#include "util/counts.h"
 
 #include <stdlib.h>
 #include <sys/prctl.h>
@@ -101,8 +102,6 @@
 
 static int			run_count			=  1;
 static bool			no_inherit			= false;
-static bool			scale				=  true;
-static enum aggr_mode		aggr_mode			= AGGR_GLOBAL;
 static volatile pid_t		child_pid			= -1;
 static bool			null_run			=  false;
 static int			detailed_run			=  0;
@@ -112,11 +111,9 @@
 static const char		*csv_sep			= NULL;
 static bool			csv_output			= false;
 static bool			group				= false;
-static FILE			*output				= NULL;
 static const char		*pre_cmd			= NULL;
 static const char		*post_cmd			= NULL;
 static bool			sync_run			= false;
-static unsigned int		interval			= 0;
 static unsigned int		initial_delay			= 0;
 static unsigned int		unit_width			= 4; /* strlen("unit") */
 static bool			forever				= false;
@@ -126,6 +123,11 @@
 
 static volatile int done = 0;
 
+static struct perf_stat_config stat_config = {
+	.aggr_mode	= AGGR_GLOBAL,
+	.scale		= true,
+};
+
 static inline void diff_timespec(struct timespec *r, struct timespec *a,
 				 struct timespec *b)
 {
@@ -148,7 +150,7 @@
 {
 	struct perf_event_attr *attr = &evsel->attr;
 
-	if (scale)
+	if (stat_config.scale)
 		attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
 				    PERF_FORMAT_TOTAL_TIME_RUNNING;
 
@@ -178,142 +180,6 @@
 	return 0;
 }
 
-static void zero_per_pkg(struct perf_evsel *counter)
-{
-	if (counter->per_pkg_mask)
-		memset(counter->per_pkg_mask, 0, MAX_NR_CPUS);
-}
-
-static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip)
-{
-	unsigned long *mask = counter->per_pkg_mask;
-	struct cpu_map *cpus = perf_evsel__cpus(counter);
-	int s;
-
-	*skip = false;
-
-	if (!counter->per_pkg)
-		return 0;
-
-	if (cpu_map__empty(cpus))
-		return 0;
-
-	if (!mask) {
-		mask = zalloc(MAX_NR_CPUS);
-		if (!mask)
-			return -ENOMEM;
-
-		counter->per_pkg_mask = mask;
-	}
-
-	s = cpu_map__get_socket(cpus, cpu);
-	if (s < 0)
-		return -1;
-
-	*skip = test_and_set_bit(s, mask) == 1;
-	return 0;
-}
-
-static int
-process_counter_values(struct perf_evsel *evsel, int cpu, int thread,
-		       struct perf_counts_values *count)
-{
-	struct perf_counts_values *aggr = &evsel->counts->aggr;
-	static struct perf_counts_values zero;
-	bool skip = false;
-
-	if (check_per_pkg(evsel, cpu, &skip)) {
-		pr_err("failed to read per-pkg counter\n");
-		return -1;
-	}
-
-	if (skip)
-		count = &zero;
-
-	switch (aggr_mode) {
-	case AGGR_THREAD:
-	case AGGR_CORE:
-	case AGGR_SOCKET:
-	case AGGR_NONE:
-		if (!evsel->snapshot)
-			perf_evsel__compute_deltas(evsel, cpu, thread, count);
-		perf_counts_values__scale(count, scale, NULL);
-		if (aggr_mode == AGGR_NONE)
-			perf_stat__update_shadow_stats(evsel, count->values, cpu);
-		break;
-	case AGGR_GLOBAL:
-		aggr->val += count->val;
-		if (scale) {
-			aggr->ena += count->ena;
-			aggr->run += count->run;
-		}
-	default:
-		break;
-	}
-
-	return 0;
-}
-
-static int process_counter_maps(struct perf_evsel *counter)
-{
-	int nthreads = thread_map__nr(counter->threads);
-	int ncpus = perf_evsel__nr_cpus(counter);
-	int cpu, thread;
-
-	if (counter->system_wide)
-		nthreads = 1;
-
-	for (thread = 0; thread < nthreads; thread++) {
-		for (cpu = 0; cpu < ncpus; cpu++) {
-			if (process_counter_values(counter, cpu, thread,
-						   perf_counts(counter->counts, cpu, thread)))
-				return -1;
-		}
-	}
-
-	return 0;
-}
-
-static int process_counter(struct perf_evsel *counter)
-{
-	struct perf_counts_values *aggr = &counter->counts->aggr;
-	struct perf_stat *ps = counter->priv;
-	u64 *count = counter->counts->aggr.values;
-	int i, ret;
-
-	aggr->val = aggr->ena = aggr->run = 0;
-	init_stats(ps->res_stats);
-
-	if (counter->per_pkg)
-		zero_per_pkg(counter);
-
-	ret = process_counter_maps(counter);
-	if (ret)
-		return ret;
-
-	if (aggr_mode != AGGR_GLOBAL)
-		return 0;
-
-	if (!counter->snapshot)
-		perf_evsel__compute_deltas(counter, -1, -1, aggr);
-	perf_counts_values__scale(aggr, scale, &counter->counts->scaled);
-
-	for (i = 0; i < 3; i++)
-		update_stats(&ps->res_stats[i], count[i]);
-
-	if (verbose) {
-		fprintf(output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
-			perf_evsel__name(counter), count[0], count[1], count[2]);
-	}
-
-	/*
-	 * Save the full runtime - to allow normalization during printout:
-	 */
-	perf_stat__update_shadow_stats(counter, count, 0);
-
-	return 0;
-}
-
 /*
  * Read out the results of a single counter:
  * do not aggregate counts across CPUs in system-wide mode
@@ -351,7 +217,7 @@
 		if (read_counter(counter))
 			pr_warning("failed to read counter %s\n", counter->name);
 
-		if (process_counter(counter))
+		if (perf_stat_process_counter(&stat_config, counter))
 			pr_warning("failed to process counter %s\n", counter->name);
 
 		if (close_counters) {
@@ -402,6 +268,7 @@
 
 static int __run_perf_stat(int argc, const char **argv)
 {
+	int interval = stat_config.interval;
 	char msg[512];
 	unsigned long long t0, t1;
 	struct perf_evsel *counter;
@@ -545,13 +412,13 @@
 static void print_running(u64 run, u64 ena)
 {
 	if (csv_output) {
-		fprintf(output, "%s%" PRIu64 "%s%.2f",
+		fprintf(stat_config.output, "%s%" PRIu64 "%s%.2f",
 					csv_sep,
 					run,
 					csv_sep,
 					ena ? 100.0 * run / ena : 100.0);
 	} else if (run != ena) {
-		fprintf(output, "  (%.2f%%)", 100.0 * run / ena);
+		fprintf(stat_config.output, "  (%.2f%%)", 100.0 * run / ena);
 	}
 }
 
@@ -560,9 +427,9 @@
 	double pct = rel_stddev_stats(total, avg);
 
 	if (csv_output)
-		fprintf(output, "%s%.2f%%", csv_sep, pct);
+		fprintf(stat_config.output, "%s%.2f%%", csv_sep, pct);
 	else if (pct)
-		fprintf(output, "  ( +-%6.2f%% )", pct);
+		fprintf(stat_config.output, "  ( +-%6.2f%% )", pct);
 }
 
 static void print_noise(struct perf_evsel *evsel, double avg)
@@ -578,9 +445,9 @@
 
 static void aggr_printout(struct perf_evsel *evsel, int id, int nr)
 {
-	switch (aggr_mode) {
+	switch (stat_config.aggr_mode) {
 	case AGGR_CORE:
-		fprintf(output, "S%d-C%*d%s%*d%s",
+		fprintf(stat_config.output, "S%d-C%*d%s%*d%s",
 			cpu_map__id_to_socket(id),
 			csv_output ? 0 : -8,
 			cpu_map__id_to_cpu(id),
@@ -590,7 +457,7 @@
 			csv_sep);
 		break;
 	case AGGR_SOCKET:
-		fprintf(output, "S%*d%s%*d%s",
+		fprintf(stat_config.output, "S%*d%s%*d%s",
 			csv_output ? 0 : -5,
 			id,
 			csv_sep,
@@ -599,12 +466,12 @@
 			csv_sep);
 			break;
 	case AGGR_NONE:
-		fprintf(output, "CPU%*d%s",
+		fprintf(stat_config.output, "CPU%*d%s",
 			csv_output ? 0 : -4,
 			perf_evsel__cpus(evsel)->map[id], csv_sep);
 		break;
 	case AGGR_THREAD:
-		fprintf(output, "%*s-%*d%s",
+		fprintf(stat_config.output, "%*s-%*d%s",
 			csv_output ? 0 : 16,
 			thread_map__comm(evsel->threads, id),
 			csv_output ? 0 : -8,
@@ -619,6 +486,7 @@
 
 static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
 {
+	FILE *output = stat_config.output;
 	double msecs = avg / 1e6;
 	const char *fmt_v, *fmt_n;
 	char name[25];
@@ -643,7 +511,7 @@
 	if (evsel->cgrp)
 		fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
 
-	if (csv_output || interval)
+	if (csv_output || stat_config.interval)
 		return;
 
 	if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
@@ -655,6 +523,7 @@
 
 static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
 {
+	FILE *output = stat_config.output;
 	double sc =  evsel->scale;
 	const char *fmt;
 	int cpu = cpu_map__id_to_cpu(id);
@@ -670,7 +539,7 @@
 
 	aggr_printout(evsel, id, nr);
 
-	if (aggr_mode == AGGR_GLOBAL)
+	if (stat_config.aggr_mode == AGGR_GLOBAL)
 		cpu = 0;
 
 	fprintf(output, fmt, avg, csv_sep);
@@ -685,16 +554,18 @@
 	if (evsel->cgrp)
 		fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
 
-	if (csv_output || interval)
+	if (csv_output || stat_config.interval)
 		return;
 
-	perf_stat__print_shadow_stats(output, evsel, avg, cpu, aggr_mode);
+	perf_stat__print_shadow_stats(output, evsel, avg, cpu,
+				      stat_config.aggr_mode);
 }
 
 static void print_aggr(char *prefix)
 {
+	FILE *output = stat_config.output;
 	struct perf_evsel *counter;
-	int cpu, cpu2, s, s2, id, nr;
+	int cpu, s, s2, id, nr;
 	double uval;
 	u64 ena, run, val;
 
@@ -707,8 +578,7 @@
 			val = ena = run = 0;
 			nr = 0;
 			for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
-				cpu2 = perf_evsel__cpus(counter)->map[cpu];
-				s2 = aggr_get_id(evsel_list->cpus, cpu2);
+				s2 = aggr_get_id(perf_evsel__cpus(counter), cpu);
 				if (s2 != id)
 					continue;
 				val += perf_counts(counter->counts, cpu, 0)->val;
@@ -761,6 +631,7 @@
 
 static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
 {
+	FILE *output = stat_config.output;
 	int nthreads = thread_map__nr(counter->threads);
 	int ncpus = cpu_map__nr(counter->cpus);
 	int cpu, thread;
@@ -799,6 +670,7 @@
  */
 static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
 {
+	FILE *output = stat_config.output;
 	struct perf_stat *ps = counter->priv;
 	double avg = avg_stats(&ps->res_stats[0]);
 	int scaled = counter->counts->scaled;
@@ -850,6 +722,7 @@
  */
 static void print_counter(struct perf_evsel *counter, char *prefix)
 {
+	FILE *output = stat_config.output;
 	u64 ena, run, val;
 	double uval;
 	int cpu;
@@ -904,12 +777,13 @@
 
 static void print_interval(char *prefix, struct timespec *ts)
 {
+	FILE *output = stat_config.output;
 	static int num_print_interval;
 
 	sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
 
 	if (num_print_interval == 0 && !csv_output) {
-		switch (aggr_mode) {
+		switch (stat_config.aggr_mode) {
 		case AGGR_SOCKET:
 			fprintf(output, "#           time socket cpus             counts %*s events\n", unit_width, "unit");
 			break;
@@ -934,6 +808,7 @@
 
 static void print_header(int argc, const char **argv)
 {
+	FILE *output = stat_config.output;
 	int i;
 
 	fflush(stdout);
@@ -963,6 +838,8 @@
 
 static void print_footer(void)
 {
+	FILE *output = stat_config.output;
+
 	if (!null_run)
 		fprintf(output, "\n");
 	fprintf(output, " %17.9f seconds time elapsed",
@@ -977,6 +854,7 @@
 
 static void print_counters(struct timespec *ts, int argc, const char **argv)
 {
+	int interval = stat_config.interval;
 	struct perf_evsel *counter;
 	char buf[64], *prefix = NULL;
 
@@ -985,7 +863,7 @@
 	else
 		print_header(argc, argv);
 
-	switch (aggr_mode) {
+	switch (stat_config.aggr_mode) {
 	case AGGR_CORE:
 	case AGGR_SOCKET:
 		print_aggr(prefix);
@@ -1009,14 +887,14 @@
 	if (!interval && !csv_output)
 		print_footer();
 
-	fflush(output);
+	fflush(stat_config.output);
 }
 
 static volatile int signr = -1;
 
 static void skip_signal(int signo)
 {
-	if ((child_pid == -1) || interval)
+	if ((child_pid == -1) || stat_config.interval)
 		done = 1;
 
 	signr = signo;
@@ -1064,7 +942,7 @@
 
 static int perf_stat_init_aggr_mode(void)
 {
-	switch (aggr_mode) {
+	switch (stat_config.aggr_mode) {
 	case AGGR_SOCKET:
 		if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) {
 			perror("cannot build socket map");
@@ -1270,7 +1148,7 @@
 		    "system-wide collection from all CPUs"),
 	OPT_BOOLEAN('g', "group", &group,
 		    "put the counters into a counter group"),
-	OPT_BOOLEAN('c', "scale", &scale, "scale/normalize counters"),
+	OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
 	OPT_INCR('v', "verbose", &verbose,
 		    "be more verbose (show counter open errors, etc)"),
 	OPT_INTEGER('r', "repeat", &run_count,
@@ -1286,7 +1164,7 @@
 			   stat__set_big_num),
 	OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
 		    "list of cpus to monitor in system-wide"),
-	OPT_SET_UINT('A', "no-aggr", &aggr_mode,
+	OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
 		    "disable CPU count aggregation", AGGR_NONE),
 	OPT_STRING('x', "field-separator", &csv_sep, "separator",
 		   "print counts with custom separator"),
@@ -1300,13 +1178,13 @@
 			"command to run prior to the measured command"),
 	OPT_STRING(0, "post", &post_cmd, "command",
 			"command to run after to the measured command"),
-	OPT_UINTEGER('I', "interval-print", &interval,
+	OPT_UINTEGER('I', "interval-print", &stat_config.interval,
 		    "print counts at regular interval in ms (>= 100)"),
-	OPT_SET_UINT(0, "per-socket", &aggr_mode,
+	OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
 		     "aggregate counts per processor socket", AGGR_SOCKET),
-	OPT_SET_UINT(0, "per-core", &aggr_mode,
+	OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
 		     "aggregate counts per physical processor core", AGGR_CORE),
-	OPT_SET_UINT(0, "per-thread", &aggr_mode,
+	OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
 		     "aggregate counts per thread", AGGR_THREAD),
 	OPT_UINTEGER('D', "delay", &initial_delay,
 		     "ms to wait before starting measurement after program start"),
@@ -1318,6 +1196,8 @@
 	};
 	int status = -EINVAL, run_idx;
 	const char *mode;
+	FILE *output = stderr;
+	unsigned int interval;
 
 	setlocale(LC_ALL, "");
 
@@ -1328,7 +1208,8 @@
 	argc = parse_options(argc, argv, options, stat_usage,
 		PARSE_OPT_STOP_AT_NON_OPTION);
 
-	output = stderr;
+	interval = stat_config.interval;
+
 	if (output_name && strcmp(output_name, "-"))
 		output = NULL;
 
@@ -1365,6 +1246,8 @@
 		}
 	}
 
+	stat_config.output = output;
+
 	if (csv_sep) {
 		csv_output = true;
 		if (!strcmp(csv_sep, "\\t"))
@@ -1399,7 +1282,7 @@
 		run_count = 1;
 	}
 
-	if ((aggr_mode == AGGR_THREAD) && !target__has_task(&target)) {
+	if ((stat_config.aggr_mode == AGGR_THREAD) && !target__has_task(&target)) {
 		fprintf(stderr, "The --per-thread option is only available "
 			"when monitoring via -p -t options.\n");
 		parse_options_usage(NULL, options, "p", 1);
@@ -1411,7 +1294,8 @@
 	 * no_aggr, cgroup are for system-wide only
 	 * --per-thread is aggregated per thread, we dont mix it with cpu mode
 	 */
-	if (((aggr_mode != AGGR_GLOBAL && aggr_mode != AGGR_THREAD) || nr_cgroups) &&
+	if (((stat_config.aggr_mode != AGGR_GLOBAL &&
+	      stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) &&
 	    !target__has_cpu(&target)) {
 		fprintf(stderr, "both cgroup and no-aggregation "
 			"modes only available in system-wide mode\n");
@@ -1444,7 +1328,7 @@
 	 * Initialize thread_map with comm names,
 	 * so we could print it out on output.
 	 */
-	if (aggr_mode == AGGR_THREAD)
+	if (stat_config.aggr_mode == AGGR_THREAD)
 		thread_map__read_comms(evsel_list->threads);
 
 	if (interval && interval < 100) {
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 6135cc0..8c465c8 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -40,6 +40,7 @@
 #include "util/xyarray.h"
 #include "util/sort.h"
 #include "util/intlist.h"
+#include "util/parse-branch-options.h"
 #include "arch/common.h"
 
 #include "util/debug.h"
@@ -695,6 +696,8 @@
 		perf_top__record_precise_ip(top, he, evsel->idx, ip);
 	}
 
+	hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
+		     !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
 	return 0;
 }
 
@@ -1171,6 +1174,12 @@
 		   "don't try to adjust column width, use these fixed values"),
 	OPT_UINTEGER(0, "proc-map-timeout", &opts->proc_map_timeout,
 			"per thread proc mmap processing timeout in ms"),
+	OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
+		     "branch any", "sample any taken branches",
+		     parse_branch_stack),
+	OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
+		     "branch filter mask", "branch stack filter modes",
+		     parse_branch_stack),
 	OPT_END()
 	};
 	const char * const top_usage[] = {
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 39ad4d0..4e3abba 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1,8 +1,27 @@
+/*
+ * builtin-trace.c
+ *
+ * Builtin 'trace' command:
+ *
+ * Display a continuously updated trace of any workload, CPU, specific PID,
+ * system wide, etc.  Default format is loosely strace like, but any other
+ * event may be specified using --event.
+ *
+ * Copyright (C) 2012, 2013, 2014, 2015 Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Initially based on the 'trace' prototype by Thomas Gleixner:
+ *
+ * http://lwn.net/Articles/415728/ ("Announcing a new utility: 'trace'")
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
 #include <traceevent/event-parse.h>
 #include "builtin.h"
 #include "util/color.h"
 #include "util/debug.h"
 #include "util/evlist.h"
+#include "util/exec_cmd.h"
 #include "util/machine.h"
 #include "util/session.h"
 #include "util/thread.h"
@@ -26,6 +45,7 @@
 
 #ifndef MADV_HWPOISON
 # define MADV_HWPOISON		100
+
 #endif
 
 #ifndef MADV_MERGEABLE
@@ -247,42 +267,6 @@
 	({ struct syscall_tp *fields = evsel->priv; \
 	   fields->name.pointer(&fields->name, sample); })
 
-static int perf_evlist__add_syscall_newtp(struct perf_evlist *evlist,
-					  void *sys_enter_handler,
-					  void *sys_exit_handler)
-{
-	int ret = -1;
-	struct perf_evsel *sys_enter, *sys_exit;
-
-	sys_enter = perf_evsel__syscall_newtp("sys_enter", sys_enter_handler);
-	if (sys_enter == NULL)
-		goto out;
-
-	if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
-		goto out_delete_sys_enter;
-
-	sys_exit = perf_evsel__syscall_newtp("sys_exit", sys_exit_handler);
-	if (sys_exit == NULL)
-		goto out_delete_sys_enter;
-
-	if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
-		goto out_delete_sys_exit;
-
-	perf_evlist__add(evlist, sys_enter);
-	perf_evlist__add(evlist, sys_exit);
-
-	ret = 0;
-out:
-	return ret;
-
-out_delete_sys_exit:
-	perf_evsel__delete_priv(sys_exit);
-out_delete_sys_enter:
-	perf_evsel__delete_priv(sys_enter);
-	goto out;
-}
-
-
 struct syscall_arg {
 	unsigned long val;
 	struct thread *thread;
@@ -604,6 +588,15 @@
 static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
 static DEFINE_STRARRAY(itimers);
 
+static const char *keyctl_options[] = {
+	"GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
+	"SETPERM", "DESCRIBE", "CLEAR", "LINK", "UNLINK", "SEARCH", "READ",
+	"INSTANTIATE", "NEGATE", "SET_REQKEY_KEYRING", "SET_TIMEOUT",
+	"ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
+	"INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
+};
+static DEFINE_STRARRAY(keyctl_options);
+
 static const char *whences[] = { "SET", "CUR", "END",
 #ifdef SEEK_DATA
 "DATA",
@@ -634,7 +627,8 @@
 
 static const char *clockid[] = {
 	"REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
-	"MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE",
+	"MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
+	"REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
 };
 static DEFINE_STRARRAY(clockid);
 
@@ -779,6 +773,11 @@
 
 #define SCA_ACCMODE syscall_arg__scnprintf_access_mode
 
+static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
+					      struct syscall_arg *arg);
+
+#define SCA_FILENAME syscall_arg__scnprintf_filename
+
 static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
 					       struct syscall_arg *arg)
 {
@@ -1006,14 +1005,23 @@
 	bool	   hexret;
 } syscall_fmts[] = {
 	{ .name	    = "access",	    .errmsg = true,
-	  .arg_scnprintf = { [1] = SCA_ACCMODE, /* mode */ }, },
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
+			     [1] = SCA_ACCMODE,  /* mode */ }, },
 	{ .name	    = "arch_prctl", .errmsg = true, .alias = "prctl", },
 	{ .name	    = "brk",	    .hexret = true,
 	  .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
+	{ .name	    = "chdir",	    .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
+	{ .name	    = "chmod",	    .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
+	{ .name	    = "chroot",	    .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
 	{ .name     = "clock_gettime",  .errmsg = true, STRARRAY(0, clk_id, clockid), },
 	{ .name	    = "close",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
 	{ .name	    = "connect",    .errmsg = true, },
+	{ .name	    = "creat",	    .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
 	{ .name	    = "dup",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "dup2",	    .errmsg = true,
@@ -1024,7 +1032,8 @@
 	{ .name	    = "eventfd2",   .errmsg = true,
 	  .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
 	{ .name	    = "faccessat",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
+			     [1] = SCA_FILENAME, /* filename */ }, },
 	{ .name	    = "fadvise64",  .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "fallocate",  .errmsg = true,
@@ -1034,11 +1043,13 @@
 	{ .name	    = "fchmod",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "fchmodat",   .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
+			     [1] = SCA_FILENAME, /* filename */ }, },
 	{ .name	    = "fchown",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "fchownat",   .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
+			     [1] = SCA_FILENAME, /* filename */ }, },
 	{ .name	    = "fcntl",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
 			     [1] = SCA_STRARRAY, /* cmd */ },
@@ -1053,7 +1064,8 @@
 	{ .name	    = "fstat",	    .errmsg = true, .alias = "newfstat",
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "fstatat",    .errmsg = true, .alias = "newfstatat",
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
+			     [1] = SCA_FILENAME, /* filename */ }, },
 	{ .name	    = "fstatfs",    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "fsync",    .errmsg = true,
@@ -1063,13 +1075,18 @@
 	{ .name	    = "futex",	    .errmsg = true,
 	  .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
 	{ .name	    = "futimesat", .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
+			     [1] = SCA_FILENAME, /* filename */ }, },
 	{ .name	    = "getdents",   .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "getdents64", .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "getitimer",  .errmsg = true, STRARRAY(0, which, itimers), },
 	{ .name	    = "getrlimit",  .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
+	{ .name	    = "getxattr",    .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "inotify_add_watch",	    .errmsg = true,
+	  .arg_scnprintf = { [1] = SCA_FILENAME, /* pathname */ }, },
 	{ .name	    = "ioctl",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
 #if defined(__i386__) || defined(__x86_64__)
@@ -1082,22 +1099,44 @@
 #else
 			     [2] = SCA_HEX, /* arg */ }, },
 #endif
+	{ .name	    = "keyctl",	    .errmsg = true, STRARRAY(0, option, keyctl_options), },
 	{ .name	    = "kill",	    .errmsg = true,
 	  .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
+	{ .name	    = "lchown",    .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
+	{ .name	    = "lgetxattr",  .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
 	{ .name	    = "linkat",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+	{ .name	    = "listxattr",  .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "llistxattr", .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "lremovexattr",  .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
 	{ .name	    = "lseek",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
 			     [2] = SCA_STRARRAY, /* whence */ },
 	  .arg_parm	 = { [2] = &strarray__whences, /* whence */ }, },
-	{ .name	    = "lstat",	    .errmsg = true, .alias = "newlstat", },
+	{ .name	    = "lsetxattr",  .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "lstat",	    .errmsg = true, .alias = "newlstat",
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
+	{ .name	    = "lsxattr",    .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
 	{ .name     = "madvise",    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_HEX,	 /* start */
 			     [2] = SCA_MADV_BHV, /* behavior */ }, },
+	{ .name	    = "mkdir",    .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
 	{ .name	    = "mkdirat",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
+			     [1] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "mknod",      .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
 	{ .name	    = "mknodat",    .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
+			     [1] = SCA_FILENAME, /* filename */ }, },
 	{ .name	    = "mlock",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
 	{ .name	    = "mlockall",   .errmsg = true,
@@ -1110,6 +1149,8 @@
 	{ .name	    = "mprotect",   .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_HEX, /* start */
 			     [2] = SCA_MMAP_PROT, /* prot */ }, },
+	{ .name	    = "mq_unlink", .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* u_name */ }, },
 	{ .name	    = "mremap",	    .hexret = true,
 	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */
 			     [3] = SCA_MREMAP_FLAGS, /* flags */
@@ -1121,14 +1162,17 @@
 	{ .name	    = "name_to_handle_at", .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
 	{ .name	    = "newfstatat", .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
+			     [1] = SCA_FILENAME, /* filename */ }, },
 	{ .name	    = "open",	    .errmsg = true,
-	  .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
+	  .arg_scnprintf = { [0] = SCA_FILENAME,   /* filename */
+			     [1] = SCA_OPEN_FLAGS, /* flags */ }, },
 	{ .name	    = "open_by_handle_at", .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
 			     [2] = SCA_OPEN_FLAGS, /* flags */ }, },
 	{ .name	    = "openat",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
+			     [1] = SCA_FILENAME, /* filename */
 			     [2] = SCA_OPEN_FLAGS, /* flags */ }, },
 	{ .name	    = "perf_event_open", .errmsg = true,
 	  .arg_scnprintf = { [1] = SCA_INT, /* pid */
@@ -1150,18 +1194,28 @@
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "read",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+	{ .name	    = "readlink",   .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
 	{ .name	    = "readlinkat", .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
+			     [1] = SCA_FILENAME, /* pathname */ }, },
 	{ .name	    = "readv",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "recvfrom",   .errmsg = true,
-	  .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
+	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
+			     [3] = SCA_MSG_FLAGS, /* flags */ }, },
 	{ .name	    = "recvmmsg",   .errmsg = true,
-	  .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
+	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
+			     [3] = SCA_MSG_FLAGS, /* flags */ }, },
 	{ .name	    = "recvmsg",    .errmsg = true,
-	  .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
+	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
+			     [2] = SCA_MSG_FLAGS, /* flags */ }, },
+	{ .name	    = "removexattr", .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
 	{ .name	    = "renameat",   .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+	{ .name	    = "rmdir",    .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
 	{ .name	    = "rt_sigaction", .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
 	{ .name	    = "rt_sigprocmask",  .errmsg = true, STRARRAY(0, how, sighow), },
@@ -1171,13 +1225,18 @@
 	  .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
 	{ .name	    = "select",	    .errmsg = true, .timeout = true, },
 	{ .name	    = "sendmmsg",    .errmsg = true,
-	  .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
+	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
+			     [3] = SCA_MSG_FLAGS, /* flags */ }, },
 	{ .name	    = "sendmsg",    .errmsg = true,
-	  .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
+	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
+			     [2] = SCA_MSG_FLAGS, /* flags */ }, },
 	{ .name	    = "sendto",	    .errmsg = true,
-	  .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
+	  .arg_scnprintf = { [0] = SCA_FD, /* fd */
+			     [3] = SCA_MSG_FLAGS, /* flags */ }, },
 	{ .name	    = "setitimer",  .errmsg = true, STRARRAY(0, which, itimers), },
 	{ .name	    = "setrlimit",  .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
+	{ .name	    = "setxattr",   .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
 	{ .name	    = "shutdown",   .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "socket",	    .errmsg = true,
@@ -1188,18 +1247,35 @@
 	  .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
 			     [1] = SCA_SK_TYPE, /* type */ },
 	  .arg_parm	 = { [0] = &strarray__socket_families, /* family */ }, },
-	{ .name	    = "stat",	    .errmsg = true, .alias = "newstat", },
+	{ .name	    = "stat",	    .errmsg = true, .alias = "newstat",
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "statfs",	    .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "swapoff",    .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
+	{ .name	    = "swapon",	    .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
 	{ .name	    = "symlinkat",  .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
 	{ .name	    = "tgkill",	    .errmsg = true,
 	  .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
 	{ .name	    = "tkill",	    .errmsg = true,
 	  .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
+	{ .name	    = "truncate",   .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
 	{ .name	    = "uname",	    .errmsg = true, .alias = "newuname", },
 	{ .name	    = "unlinkat",   .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
+			     [1] = SCA_FILENAME, /* pathname */ }, },
+	{ .name	    = "utime",  .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
 	{ .name	    = "utimensat",  .errmsg = true,
-	  .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
+	  .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */
+			     [1] = SCA_FILENAME, /* filename */ }, },
+	{ .name	    = "utimes",  .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
+	{ .name	    = "vmsplice",  .errmsg = true,
+	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "write",	    .errmsg = true,
 	  .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 	{ .name	    = "writev",	    .errmsg = true,
@@ -1223,7 +1299,6 @@
 	int		    nr_args;
 	struct format_field *args;
 	const char	    *name;
-	bool		    filtered;
 	bool		    is_exit;
 	struct syscall_fmt  *fmt;
 	size_t		    (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
@@ -1244,6 +1319,11 @@
 	return printed + fprintf(fp, "): ");
 }
 
+/**
+ * filename.ptr: The filename char pointer that will be vfs_getname'd
+ * filename.entry_str_pos: Where to insert the string translated from
+ *                         filename.ptr by the vfs_getname tracepoint/kprobe.
+ */
 struct thread_trace {
 	u64		  entry_time;
 	u64		  exit_time;
@@ -1252,6 +1332,13 @@
 	unsigned long	  pfmaj, pfmin;
 	char		  *entry_str;
 	double		  runtime_ms;
+        struct {
+		unsigned long ptr;
+		short int     entry_str_pos;
+		bool	      pending_open;
+		unsigned int  namelen;
+		char	      *name;
+	} filename;
 	struct {
 		int	  max;
 		char	  **table;
@@ -1298,6 +1385,8 @@
 #define TRACE_PFMAJ		(1 << 0)
 #define TRACE_PFMIN		(1 << 1)
 
+static const size_t trace__entry_str_size = 2048;
+
 struct trace {
 	struct perf_tool	tool;
 	struct {
@@ -1307,6 +1396,10 @@
 	struct {
 		int		max;
 		struct syscall  *table;
+		struct {
+			struct perf_evsel *sys_enter,
+					  *sys_exit;
+		}		events;
 	} syscalls;
 	struct record_opts	opts;
 	struct perf_evlist	*evlist;
@@ -1316,7 +1409,10 @@
 	FILE			*output;
 	unsigned long		nr_events;
 	struct strlist		*ev_qualifier;
-	const char 		*last_vfs_getname;
+	struct {
+		size_t		nr;
+		int		*entries;
+	}			ev_qualifier_ids;
 	struct intlist		*tid_list;
 	struct intlist		*pid_list;
 	struct {
@@ -1340,6 +1436,7 @@
 	bool			show_tool_stats;
 	bool			trace_syscalls;
 	bool			force;
+	bool			vfs_getname;
 	int			trace_pgfaults;
 };
 
@@ -1443,6 +1540,27 @@
 	return printed;
 }
 
+static void thread__set_filename_pos(struct thread *thread, const char *bf,
+				     unsigned long ptr)
+{
+	struct thread_trace *ttrace = thread__priv(thread);
+
+	ttrace->filename.ptr = ptr;
+	ttrace->filename.entry_str_pos = bf - ttrace->entry_str;
+}
+
+static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
+					      struct syscall_arg *arg)
+{
+	unsigned long ptr = arg->val;
+
+	if (!arg->trace->vfs_getname)
+		return scnprintf(bf, size, "%#x", ptr);
+
+	thread__set_filename_pos(arg->thread, bf, ptr);
+	return 0;
+}
+
 static bool trace__filter_duration(struct trace *trace, double t)
 {
 	return t < (trace->duration_filter * NSEC_PER_MSEC);
@@ -1517,6 +1635,9 @@
 	if (trace->host == NULL)
 		return -ENOMEM;
 
+	if (trace_event__register_resolver(trace->host, machine__resolve_kernel_addr) < 0)
+		return -errno;
+
 	err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
 					    evlist->threads, trace__tool_process, false,
 					    trace->opts.proc_map_timeout);
@@ -1578,19 +1699,6 @@
 	sc = trace->syscalls.table + id;
 	sc->name = name;
 
-	if (trace->ev_qualifier) {
-		bool in = strlist__find(trace->ev_qualifier, name) != NULL;
-
-		if (!(in ^ trace->not_ev_qualifier)) {
-			sc->filtered = true;
-			/*
-			 * No need to do read tracepoint information since this will be
-			 * filtered out.
-			 */
-			return 0;
-		}
-	}
-
 	sc->fmt  = syscall_fmt__find(sc->name);
 
 	snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
@@ -1619,13 +1727,27 @@
 
 static int trace__validate_ev_qualifier(struct trace *trace)
 {
-	int err = 0;
+	int err = 0, i;
 	struct str_node *pos;
 
+	trace->ev_qualifier_ids.nr = strlist__nr_entries(trace->ev_qualifier);
+	trace->ev_qualifier_ids.entries = malloc(trace->ev_qualifier_ids.nr *
+						 sizeof(trace->ev_qualifier_ids.entries[0]));
+
+	if (trace->ev_qualifier_ids.entries == NULL) {
+		fputs("Error:\tNot enough memory for allocating events qualifier ids\n",
+		       trace->output);
+		err = -EINVAL;
+		goto out;
+	}
+
+	i = 0;
+
 	strlist__for_each(pos, trace->ev_qualifier) {
 		const char *sc = pos->s;
+		int id = audit_name_to_syscall(sc, trace->audit.machine);
 
-		if (audit_name_to_syscall(sc, trace->audit.machine) < 0) {
+		if (id < 0) {
 			if (err == 0) {
 				fputs("Error:\tInvalid syscall ", trace->output);
 				err = -EINVAL;
@@ -1635,13 +1757,17 @@
 
 			fputs(sc, trace->output);
 		}
+
+		trace->ev_qualifier_ids.entries[i++] = id;
 	}
 
 	if (err < 0) {
 		fputs("\nHint:\ttry 'perf list syscalls:sys_enter_*'"
 		      "\nHint:\tand: 'man syscalls'\n", trace->output);
+		zfree(&trace->ev_qualifier_ids.entries);
+		trace->ev_qualifier_ids.nr = 0;
 	}
-
+out:
 	return err;
 }
 
@@ -1833,9 +1959,6 @@
 	if (sc == NULL)
 		return -1;
 
-	if (sc->filtered)
-		return 0;
-
 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
 	ttrace = thread__trace(thread, trace->output);
 	if (ttrace == NULL)
@@ -1844,7 +1967,7 @@
 	args = perf_evsel__sc_tp_ptr(evsel, args, sample);
 
 	if (ttrace->entry_str == NULL) {
-		ttrace->entry_str = malloc(1024);
+		ttrace->entry_str = malloc(trace__entry_str_size);
 		if (!ttrace->entry_str)
 			goto out_put;
 	}
@@ -1854,9 +1977,9 @@
 
 	ttrace->entry_time = sample->time;
 	msg = ttrace->entry_str;
-	printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
+	printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
 
-	printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed,
+	printed += syscall__scnprintf_args(sc, msg + printed, trace__entry_str_size - printed,
 					   args, trace, thread);
 
 	if (sc->is_exit) {
@@ -1864,8 +1987,11 @@
 			trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
 			fprintf(trace->output, "%-70s\n", ttrace->entry_str);
 		}
-	} else
+	} else {
 		ttrace->entry_pending = true;
+		/* See trace__vfs_getname & trace__sys_exit */
+		ttrace->filename.pending_open = false;
+	}
 
 	if (trace->current != thread) {
 		thread__put(trace->current);
@@ -1891,9 +2017,6 @@
 	if (sc == NULL)
 		return -1;
 
-	if (sc->filtered)
-		return 0;
-
 	thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
 	ttrace = thread__trace(thread, trace->output);
 	if (ttrace == NULL)
@@ -1904,9 +2027,9 @@
 
 	ret = perf_evsel__sc_tp_uint(evsel, ret, sample);
 
-	if (id == trace->audit.open_id && ret >= 0 && trace->last_vfs_getname) {
-		trace__set_fd_pathname(thread, ret, trace->last_vfs_getname);
-		trace->last_vfs_getname = NULL;
+	if (id == trace->audit.open_id && ret >= 0 && ttrace->filename.pending_open) {
+		trace__set_fd_pathname(thread, ret, ttrace->filename.name);
+		ttrace->filename.pending_open = false;
 		++trace->stats.vfs_getname;
 	}
 
@@ -1961,7 +2084,56 @@
 			      union perf_event *event __maybe_unused,
 			      struct perf_sample *sample)
 {
-	trace->last_vfs_getname = perf_evsel__rawptr(evsel, sample, "pathname");
+	struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
+	struct thread_trace *ttrace;
+	size_t filename_len, entry_str_len, to_move;
+	ssize_t remaining_space;
+	char *pos;
+	const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
+
+	if (!thread)
+		goto out;
+
+	ttrace = thread__priv(thread);
+	if (!ttrace)
+		goto out;
+
+	filename_len = strlen(filename);
+
+	if (ttrace->filename.namelen < filename_len) {
+		char *f = realloc(ttrace->filename.name, filename_len + 1);
+
+		if (f == NULL)
+				goto out;
+
+		ttrace->filename.namelen = filename_len;
+		ttrace->filename.name = f;
+	}
+
+	strcpy(ttrace->filename.name, filename);
+	ttrace->filename.pending_open = true;
+
+	if (!ttrace->filename.ptr)
+		goto out;
+
+	entry_str_len = strlen(ttrace->entry_str);
+	remaining_space = trace__entry_str_size - entry_str_len - 1; /* \0 */
+	if (remaining_space <= 0)
+		goto out;
+
+	if (filename_len > (size_t)remaining_space) {
+		filename += filename_len - remaining_space;
+		filename_len = remaining_space;
+	}
+
+	to_move = entry_str_len - ttrace->filename.entry_str_pos + 1; /* \0 */
+	pos = ttrace->entry_str + ttrace->filename.entry_str_pos;
+	memmove(pos + filename_len, pos, to_move);
+	memcpy(pos, filename, filename_len);
+
+	ttrace->filename.ptr = 0;
+	ttrace->filename.entry_str_pos = 0;
+out:
 	return 0;
 }
 
@@ -2214,19 +2386,20 @@
 
 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
 
-static void perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
+static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
 {
 	struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
 	if (evsel == NULL)
-		return;
+		return false;
 
 	if (perf_evsel__field(evsel, "pathname") == NULL) {
 		perf_evsel__delete(evsel);
-		return;
+		return false;
 	}
 
 	evsel->handler = trace__vfs_getname;
 	perf_evlist__add(evlist, evsel);
+	return true;
 }
 
 static int perf_evlist__add_pgfault(struct perf_evlist *evlist,
@@ -2283,9 +2456,68 @@
 	}
 }
 
+static int trace__add_syscall_newtp(struct trace *trace)
+{
+	int ret = -1;
+	struct perf_evlist *evlist = trace->evlist;
+	struct perf_evsel *sys_enter, *sys_exit;
+
+	sys_enter = perf_evsel__syscall_newtp("sys_enter", trace__sys_enter);
+	if (sys_enter == NULL)
+		goto out;
+
+	if (perf_evsel__init_sc_tp_ptr_field(sys_enter, args))
+		goto out_delete_sys_enter;
+
+	sys_exit = perf_evsel__syscall_newtp("sys_exit", trace__sys_exit);
+	if (sys_exit == NULL)
+		goto out_delete_sys_enter;
+
+	if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
+		goto out_delete_sys_exit;
+
+	perf_evlist__add(evlist, sys_enter);
+	perf_evlist__add(evlist, sys_exit);
+
+	trace->syscalls.events.sys_enter = sys_enter;
+	trace->syscalls.events.sys_exit  = sys_exit;
+
+	ret = 0;
+out:
+	return ret;
+
+out_delete_sys_exit:
+	perf_evsel__delete_priv(sys_exit);
+out_delete_sys_enter:
+	perf_evsel__delete_priv(sys_enter);
+	goto out;
+}
+
+static int trace__set_ev_qualifier_filter(struct trace *trace)
+{
+	int err = -1;
+	char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
+						trace->ev_qualifier_ids.nr,
+						trace->ev_qualifier_ids.entries);
+
+	if (filter == NULL)
+		goto out_enomem;
+
+	if (!perf_evsel__append_filter(trace->syscalls.events.sys_enter, "&&", filter))
+		err = perf_evsel__append_filter(trace->syscalls.events.sys_exit, "&&", filter);
+
+	free(filter);
+out:
+	return err;
+out_enomem:
+	errno = ENOMEM;
+	goto out;
+}
+
 static int trace__run(struct trace *trace, int argc, const char **argv)
 {
 	struct perf_evlist *evlist = trace->evlist;
+	struct perf_evsel *evsel;
 	int err = -1, i;
 	unsigned long before;
 	const bool forks = argc > 0;
@@ -2293,13 +2525,11 @@
 
 	trace->live = true;
 
-	if (trace->trace_syscalls &&
-	    perf_evlist__add_syscall_newtp(evlist, trace__sys_enter,
-					   trace__sys_exit))
+	if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
 		goto out_error_raw_syscalls;
 
 	if (trace->trace_syscalls)
-		perf_evlist__add_vfs_getname(evlist);
+		trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
 
 	if ((trace->trace_pgfaults & TRACE_PFMAJ) &&
 	    perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) {
@@ -2356,11 +2586,22 @@
 	else if (thread_map__pid(evlist->threads, 0) == -1)
 		err = perf_evlist__set_filter_pid(evlist, getpid());
 
-	if (err < 0) {
-		printf("err=%d,%s\n", -err, strerror(-err));
-		exit(1);
+	if (err < 0)
+		goto out_error_mem;
+
+	if (trace->ev_qualifier_ids.nr > 0) {
+		err = trace__set_ev_qualifier_filter(trace);
+		if (err < 0)
+			goto out_errno;
+
+		pr_debug("event qualifier tracepoint filter: %s\n",
+			 trace->syscalls.events.sys_exit->filter);
 	}
 
+	err = perf_evlist__apply_filters(evlist, &evsel);
+	if (err < 0)
+		goto out_error_apply_filters;
+
 	err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
 	if (err < 0)
 		goto out_error_mmap;
@@ -2462,10 +2703,21 @@
 out_error:
 	fprintf(trace->output, "%s\n", errbuf);
 	goto out_delete_evlist;
+
+out_error_apply_filters:
+	fprintf(trace->output,
+		"Failed to set filter \"%s\" on event %s with %d (%s)\n",
+		evsel->filter, perf_evsel__name(evsel), errno,
+		strerror_r(errno, errbuf, sizeof(errbuf)));
+	goto out_delete_evlist;
 }
 out_error_mem:
 	fprintf(trace->output, "Not enough memory to run!\n");
 	goto out_delete_evlist;
+
+out_errno:
+	fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
+	goto out_delete_evlist;
 }
 
 static int trace__replay(struct trace *trace)
@@ -2586,9 +2838,9 @@
 
 	printed += fprintf(fp, "\n");
 
-	printed += fprintf(fp, "   syscall            calls      min       avg       max      stddev\n");
-	printed += fprintf(fp, "                               (msec)    (msec)    (msec)        (%%)\n");
-	printed += fprintf(fp, "   --------------- -------- --------- --------- ---------     ------\n");
+	printed += fprintf(fp, "   syscall            calls    total       min       avg       max      stddev\n");
+	printed += fprintf(fp, "                               (msec)    (msec)    (msec)    (msec)        (%%)\n");
+	printed += fprintf(fp, "   --------------- -------- --------- --------- --------- ---------     ------\n");
 
 	/* each int_node is a syscall */
 	while (inode) {
@@ -2605,8 +2857,8 @@
 
 			sc = &trace->syscalls.table[inode->i];
 			printed += fprintf(fp, "   %-15s", sc->name);
-			printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f",
-					   n, min, avg);
+			printed += fprintf(fp, " %8" PRIu64 " %9.3f %9.3f %9.3f",
+					   n, avg * n, min, avg);
 			printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct);
 		}
 
@@ -2778,7 +3030,7 @@
 			.mmap_pages    = UINT_MAX,
 			.proc_map_timeout  = 500,
 		},
-		.output = stdout,
+		.output = stderr,
 		.show_comm = true,
 		.trace_syscalls = true,
 	};
@@ -2879,11 +3131,14 @@
 
 	if (ev_qualifier_str != NULL) {
 		const char *s = ev_qualifier_str;
+		struct strlist_config slist_config = {
+			.dirname = system_path(STRACE_GROUPS_DIR),
+		};
 
 		trace.not_ev_qualifier = *s == '!';
 		if (trace.not_ev_qualifier)
 			++s;
-		trace.ev_qualifier = strlist__new(true, s);
+		trace.ev_qualifier = strlist__new(s, &slist_config);
 		if (trace.ev_qualifier == NULL) {
 			fputs("Not enough memory to parse event qualifier",
 			      trace.output);
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index d31fac1..827557f 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -11,7 +11,7 @@
 obj-perf := $(abspath $(obj-perf))/
 endif
 
-$(shell echo -n > $(OUTPUT).config-detected)
+$(shell printf "" > $(OUTPUT).config-detected)
 detected     = $(shell echo "$(1)=y"       >> $(OUTPUT).config-detected)
 detected_var = $(shell echo "$(1)=$($(1))" >> $(OUTPUT).config-detected)
 
@@ -297,7 +297,11 @@
     else
       CFLAGS += -DHAVE_DWARF_SUPPORT $(LIBDW_CFLAGS)
       LDFLAGS += $(LIBDW_LDFLAGS)
-      EXTLIBS += -ldw
+      DWARFLIBS := -ldw
+      ifeq ($(findstring -static,${LDFLAGS}),-static)
+	DWARFLIBS += -lelf -lebl -lz -llzma -lbz2
+      endif
+      EXTLIBS += ${DWARFLIBS}
       $(call detected,CONFIG_DWARF)
     endif # PERF_HAVE_DWARF_REGS
   endif # NO_DWARF
@@ -644,6 +648,7 @@
 perfexecdir = libexec/perf-core
 sharedir = $(prefix)/share
 template_dir = share/perf-core/templates
+STRACE_GROUPS_DIR = share/perf-core/strace/groups
 htmldir = share/doc/perf-doc
 ifeq ($(prefix),/usr)
 sysconfdir = /etc
@@ -663,6 +668,7 @@
 
 # Shell quote (do not use $(call) to accommodate ancient setups);
 ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG))
+STRACE_GROUPS_DIR_SQ = $(subst ','\'',$(STRACE_GROUPS_DIR))
 DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
 bindir_SQ = $(subst ','\'',$(bindir))
 mandir_SQ = $(subst ','\'',$(mandir))
@@ -676,10 +682,13 @@
 
 ifneq ($(filter /%,$(firstword $(perfexecdir))),)
 perfexec_instdir = $(perfexecdir)
+STRACE_GROUPS_INSTDIR = $(STRACE_GROUPS_DIR)
 else
 perfexec_instdir = $(prefix)/$(perfexecdir)
+STRACE_GROUPS_INSTDIR = $(prefix)/$(STRACE_GROUPS_DIR)
 endif
 perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir))
+STRACE_GROUPS_INSTDIR_SQ = $(subst ','\'',$(STRACE_GROUPS_INSTDIR))
 
 # If we install to $(HOME) we keep the traceevent default:
 # $(HOME)/.traceevent/plugins
@@ -713,6 +722,7 @@
 $(call detected_var,infodir_SQ)
 $(call detected_var,mandir_SQ)
 $(call detected_var,ETC_PERFCONFIG_SQ)
+$(call detected_var,STRACE_GROUPS_DIR_SQ)
 $(call detected_var,prefix_SQ)
 $(call detected_var,perfexecdir_SQ)
 $(call detected_var,LIBDIR)
diff --git a/tools/perf/perf-with-kcore.sh b/tools/perf/perf-with-kcore.sh
index c7ff90a..7e47a7c 100644
--- a/tools/perf/perf-with-kcore.sh
+++ b/tools/perf/perf-with-kcore.sh
@@ -50,7 +50,7 @@
 	fi
 
 	rm -f perf.data.junk
-	("$PERF" record -o perf.data.junk $PERF_OPTIONS -- sleep 60) >/dev/null 2>/dev/null &
+	("$PERF" record -o perf.data.junk "${PERF_OPTIONS[@]}" -- sleep 60) >/dev/null 2>/dev/null &
 	PERF_PID=$!
 
 	# Need to make sure that perf has started
@@ -160,18 +160,18 @@
 			echo "*** WARNING *** /proc/sys/kernel/kptr_restrict prevents access to kernel addresses" >&2
 		fi
 
-		if echo "$PERF_OPTIONS" | grep -q ' -a \|^-a \| -a$\|^-a$\| --all-cpus \|^--all-cpus \| --all-cpus$\|^--all-cpus$' ; then
+		if echo "${PERF_OPTIONS[@]}" | grep -q ' -a \|^-a \| -a$\|^-a$\| --all-cpus \|^--all-cpus \| --all-cpus$\|^--all-cpus$' ; then
 			echo "*** WARNING *** system-wide tracing without root access will not be able to read all necessary information from /proc" >&2
 		fi
 
-		if echo "$PERF_OPTIONS" | grep -q 'intel_pt\|intel_bts\| -I\|^-I' ; then
+		if echo "${PERF_OPTIONS[@]}" | grep -q 'intel_pt\|intel_bts\| -I\|^-I' ; then
 			if [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt -1 ] ; then
 				echo "*** WARNING *** /proc/sys/kernel/perf_event_paranoid restricts buffer size and tracepoint (sched_switch) use" >&2
 			fi
 
-			if echo "$PERF_OPTIONS" | grep -q ' --per-thread \|^--per-thread \| --per-thread$\|^--per-thread$' ; then
+			if echo "${PERF_OPTIONS[@]}" | grep -q ' --per-thread \|^--per-thread \| --per-thread$\|^--per-thread$' ; then
 				true
-			elif echo "$PERF_OPTIONS" | grep -q ' -t \|^-t \| -t$\|^-t$' ; then
+			elif echo "${PERF_OPTIONS[@]}" | grep -q ' -t \|^-t \| -t$\|^-t$' ; then
 				true
 			elif [ ! -r /sys/kernel/debug -o ! -x /sys/kernel/debug ] ; then
 				echo "*** WARNING *** /sys/kernel/debug permissions prevent tracepoint (sched_switch) use" >&2
@@ -193,8 +193,8 @@
 
 	mkdir "$PERF_DATA_DIR"
 
-	echo "$PERF record -o $PERF_DATA_DIR/perf.data $PERF_OPTIONS -- $*"
-	"$PERF" record -o "$PERF_DATA_DIR/perf.data" $PERF_OPTIONS -- $* || true
+	echo "$PERF record -o $PERF_DATA_DIR/perf.data ${PERF_OPTIONS[@]} -- $@"
+	"$PERF" record -o "$PERF_DATA_DIR/perf.data" "${PERF_OPTIONS[@]}" -- "$@" || true
 
 	if rmdir "$PERF_DATA_DIR" > /dev/null 2>/dev/null ; then
 		exit 1
@@ -209,8 +209,8 @@
 {
 	find_perf
 	check_buildid_cache_permissions
-	echo "$PERF $PERF_SUB_COMMAND -i $PERF_DATA_DIR/perf.data --kallsyms=$PERF_DATA_DIR/kcore_dir/kallsyms $*"
-	"$PERF" $PERF_SUB_COMMAND -i "$PERF_DATA_DIR/perf.data" "--kallsyms=$PERF_DATA_DIR/kcore_dir/kallsyms" $*
+	echo "$PERF $PERF_SUB_COMMAND -i $PERF_DATA_DIR/perf.data --kallsyms=$PERF_DATA_DIR/kcore_dir/kallsyms $@"
+	"$PERF" $PERF_SUB_COMMAND -i "$PERF_DATA_DIR/perf.data" "--kallsyms=$PERF_DATA_DIR/kcore_dir/kallsyms" "$@"
 }
 
 if [ "$1" = "fix_buildid_cache_permissions" ] ; then
@@ -234,7 +234,7 @@
 case "$PERF_SUB_COMMAND" in
 "record")
 	while [ "$1" != "--" ] ; do
-		PERF_OPTIONS+="$1 "
+		PERF_OPTIONS+=("$1")
 		shift || break
 	done
 	if [ "$1" != "--" ] ; then
@@ -242,16 +242,16 @@
 		usage
 	fi
 	shift
-	record $*
+	record "$@"
 ;;
 "script")
-	subcommand $*
+	subcommand "$@"
 ;;
 "report")
-	subcommand $*
+	subcommand "$@"
 ;;
 "inject")
-	subcommand $*
+	subcommand "$@"
 ;;
 *)
 	usage
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index b857fcb..07dbff5 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -231,7 +231,7 @@
 			(*argc)--;
 		} else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
 			perf_debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR));
-			fprintf(stderr, "dir: %s\n", debugfs_mountpoint);
+			fprintf(stderr, "dir: %s\n", tracing_path);
 			if (envchanged)
 				*envchanged = 1;
 		} else if (!strcmp(cmd, "--list-cmds")) {
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 4a5827ff..90129ac 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -51,16 +51,19 @@
 	bool	     sample_address;
 	bool	     sample_weight;
 	bool	     sample_time;
+	bool	     sample_time_set;
+	bool	     callgraph_set;
 	bool	     period;
-	bool	     sample_intr_regs;
 	bool	     running_time;
 	bool	     full_auxtrace;
 	bool	     auxtrace_snapshot_mode;
+	bool	     record_switch_events;
 	unsigned int freq;
 	unsigned int mmap_pages;
 	unsigned int auxtrace_mmap_pages;
 	unsigned int user_freq;
 	u64          branch_stack;
+	u64	     sample_intr_regs;
 	u64	     default_interval;
 	u64	     user_interval;
 	size_t	     auxtrace_snapshot_size;
diff --git a/tools/perf/python/twatch.py b/tools/perf/python/twatch.py
index 2225162..b9d5083 100755
--- a/tools/perf/python/twatch.py
+++ b/tools/perf/python/twatch.py
@@ -18,10 +18,20 @@
 def main():
 	cpus = perf.cpu_map()
 	threads = perf.thread_map()
-	evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
+	evsel = perf.evsel(type	  = perf.TYPE_SOFTWARE,
+			   config = perf.COUNT_SW_DUMMY,
+			   task = 1, comm = 1, mmap = 0, freq = 0,
 			   wakeup_events = 1, watermark = 1,
 			   sample_id_all = 1,
 			   sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
+
+	"""What we want are just the PERF_RECORD_ lifetime events for threads,
+	 using the default, PERF_TYPE_HARDWARE + PERF_COUNT_HW_CYCLES & freq=1
+	 (the default), makes perf reenable irq_vectors:local_timer_entry, when
+	 disabling nohz, not good for some use cases where all we want is to get
+	 threads comes and goes... So use (perf.TYPE_SOFTWARE, perf_COUNT_SW_DUMMY,
+	 freq=0) instead."""
+
 	evsel.open(cpus = cpus, threads = threads);
 	evlist = perf.evlist(cpus, threads)
 	evlist.add(evsel)
diff --git a/tools/perf/scripts/python/bin/compaction-times-record b/tools/perf/scripts/python/bin/compaction-times-record
new file mode 100644
index 0000000..6edcd40
--- /dev/null
+++ b/tools/perf/scripts/python/bin/compaction-times-record
@@ -0,0 +1,2 @@
+#!/bin/bash
+perf record -e compaction:mm_compaction_begin -e compaction:mm_compaction_end -e compaction:mm_compaction_migratepages -e compaction:mm_compaction_isolate_migratepages -e compaction:mm_compaction_isolate_freepages $@
diff --git a/tools/perf/scripts/python/bin/compaction-times-report b/tools/perf/scripts/python/bin/compaction-times-report
new file mode 100644
index 0000000..3dc1389
--- /dev/null
+++ b/tools/perf/scripts/python/bin/compaction-times-report
@@ -0,0 +1,4 @@
+#!/bin/bash
+#description: display time taken by mm compaction
+#args: [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]
+perf script -s "$PERF_EXEC_PATH"/scripts/python/compaction-times.py $@
diff --git a/tools/perf/scripts/python/call-graph-from-postgresql.py b/tools/perf/scripts/python/call-graph-from-postgresql.py
new file mode 100644
index 0000000..e78fdc2
--- /dev/null
+++ b/tools/perf/scripts/python/call-graph-from-postgresql.py
@@ -0,0 +1,327 @@
+#!/usr/bin/python2
+# call-graph-from-postgresql.py: create call-graph from postgresql database
+# Copyright (c) 2014, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+
+# To use this script you will need to have exported data using the
+# export-to-postgresql.py script.  Refer to that script for details.
+#
+# Following on from the example in the export-to-postgresql.py script, a
+# call-graph can be displayed for the pt_example database like this:
+#
+#	python tools/perf/scripts/python/call-graph-from-postgresql.py pt_example
+#
+# Note this script supports connecting to remote databases by setting hostname,
+# port, username, password, and dbname e.g.
+#
+#	python tools/perf/scripts/python/call-graph-from-postgresql.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
+#
+# The result is a GUI window with a tree representing a context-sensitive
+# call-graph.  Expanding a couple of levels of the tree and adjusting column
+# widths to suit will display something like:
+#
+#                                         Call Graph: pt_example
+# Call Path                          Object      Count   Time(ns)  Time(%)  Branch Count   Branch Count(%)
+# v- ls
+#     v- 2638:2638
+#         v- _start                  ld-2.19.so    1     10074071   100.0         211135            100.0
+#           |- unknown               unknown       1        13198     0.1              1              0.0
+#           >- _dl_start             ld-2.19.so    1      1400980    13.9          19637              9.3
+#           >- _d_linit_internal     ld-2.19.so    1       448152     4.4          11094              5.3
+#           v-__libc_start_main@plt  ls            1      8211741    81.5         180397             85.4
+#              >- _dl_fixup          ld-2.19.so    1         7607     0.1            108              0.1
+#              >- __cxa_atexit       libc-2.19.so  1        11737     0.1             10              0.0
+#              >- __libc_csu_init    ls            1        10354     0.1             10              0.0
+#              |- _setjmp            libc-2.19.so  1            0     0.0              4              0.0
+#              v- main               ls            1      8182043    99.6         180254             99.9
+#
+# Points to note:
+#	The top level is a command name (comm)
+#	The next level is a thread (pid:tid)
+#	Subsequent levels are functions
+#	'Count' is the number of calls
+#	'Time' is the elapsed time until the function returns
+#	Percentages are relative to the level above
+#	'Branch Count' is the total number of branches for that function and all
+#       functions that it calls
+
+import sys
+from PySide.QtCore import *
+from PySide.QtGui import *
+from PySide.QtSql import *
+from decimal import *
+
+class TreeItem():
+
+	def __init__(self, db, row, parent_item):
+		self.db = db
+		self.row = row
+		self.parent_item = parent_item
+		self.query_done = False;
+		self.child_count = 0
+		self.child_items = []
+		self.data = ["", "", "", "", "", "", ""]
+		self.comm_id = 0
+		self.thread_id = 0
+		self.call_path_id = 1
+		self.branch_count = 0
+		self.time = 0
+		if not parent_item:
+			self.setUpRoot()
+
+	def setUpRoot(self):
+		self.query_done = True
+		query = QSqlQuery(self.db)
+		ret = query.exec_('SELECT id, comm FROM comms')
+		if not ret:
+			raise Exception("Query failed: " + query.lastError().text())
+		while query.next():
+			if not query.value(0):
+				continue
+			child_item = TreeItem(self.db, self.child_count, self)
+			self.child_items.append(child_item)
+			self.child_count += 1
+			child_item.setUpLevel1(query.value(0), query.value(1))
+
+	def setUpLevel1(self, comm_id, comm):
+		self.query_done = True;
+		self.comm_id = comm_id
+		self.data[0] = comm
+		self.child_items = []
+		self.child_count = 0
+		query = QSqlQuery(self.db)
+		ret = query.exec_('SELECT thread_id, ( SELECT pid FROM threads WHERE id = thread_id ), ( SELECT tid FROM threads WHERE id = thread_id ) FROM comm_threads WHERE comm_id = ' + str(comm_id))
+		if not ret:
+			raise Exception("Query failed: " + query.lastError().text())
+		while query.next():
+			child_item = TreeItem(self.db, self.child_count, self)
+			self.child_items.append(child_item)
+			self.child_count += 1
+			child_item.setUpLevel2(comm_id, query.value(0), query.value(1), query.value(2))
+
+	def setUpLevel2(self, comm_id, thread_id, pid, tid):
+		self.comm_id = comm_id
+		self.thread_id = thread_id
+		self.data[0] = str(pid) + ":" + str(tid)
+
+	def getChildItem(self, row):
+		return self.child_items[row]
+
+	def getParentItem(self):
+		return self.parent_item
+
+	def getRow(self):
+		return self.row
+
+	def timePercent(self, b):
+		if not self.time:
+			return "0.0"
+		x = (b * Decimal(100)) / self.time
+		return str(x.quantize(Decimal('.1'), rounding=ROUND_HALF_UP))
+
+	def branchPercent(self, b):
+		if not self.branch_count:
+			return "0.0"
+		x = (b * Decimal(100)) / self.branch_count
+		return str(x.quantize(Decimal('.1'), rounding=ROUND_HALF_UP))
+
+	def addChild(self, call_path_id, name, dso, count, time, branch_count):
+		child_item = TreeItem(self.db, self.child_count, self)
+		child_item.comm_id = self.comm_id
+		child_item.thread_id = self.thread_id
+		child_item.call_path_id = call_path_id
+		child_item.branch_count = branch_count
+		child_item.time = time
+		child_item.data[0] = name
+		if dso == "[kernel.kallsyms]":
+			dso = "[kernel]"
+		child_item.data[1] = dso
+		child_item.data[2] = str(count)
+		child_item.data[3] = str(time)
+		child_item.data[4] = self.timePercent(time)
+		child_item.data[5] = str(branch_count)
+		child_item.data[6] = self.branchPercent(branch_count)
+		self.child_items.append(child_item)
+		self.child_count += 1
+
+	def selectCalls(self):
+		self.query_done = True;
+		query = QSqlQuery(self.db)
+		ret = query.exec_('SELECT id, call_path_id, branch_count, call_time, return_time, '
+				  '( SELECT name FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ), '
+				  '( SELECT short_name FROM dsos WHERE id = ( SELECT dso_id FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ) ), '
+				  '( SELECT ip FROM call_paths where id = call_path_id ) '
+				  'FROM calls WHERE parent_call_path_id = ' + str(self.call_path_id) + ' AND comm_id = ' + str(self.comm_id) + ' AND thread_id = ' + str(self.thread_id) +
+				  'ORDER BY call_path_id')
+		if not ret:
+			raise Exception("Query failed: " + query.lastError().text())
+		last_call_path_id = 0
+		name = ""
+		dso = ""
+		count = 0
+		branch_count = 0
+		total_branch_count = 0
+		time = 0
+		total_time = 0
+		while query.next():
+			if query.value(1) == last_call_path_id:
+				count += 1
+				branch_count += query.value(2)
+				time += query.value(4) - query.value(3)
+			else:
+				if count:
+					self.addChild(last_call_path_id, name, dso, count, time, branch_count)
+				last_call_path_id = query.value(1)
+				name = query.value(5)
+				dso = query.value(6)
+				count = 1
+				total_branch_count += branch_count
+				total_time += time
+				branch_count = query.value(2)
+				time = query.value(4) - query.value(3)
+		if count:
+			self.addChild(last_call_path_id, name, dso, count, time, branch_count)
+		total_branch_count += branch_count
+		total_time += time
+		# Top level does not have time or branch count, so fix that here
+		if total_branch_count > self.branch_count:
+			self.branch_count = total_branch_count
+			if self.branch_count:
+				for child_item in self.child_items:
+					child_item.data[6] = self.branchPercent(child_item.branch_count)
+		if total_time > self.time:
+			self.time = total_time
+			if self.time:
+				for child_item in self.child_items:
+					child_item.data[4] = self.timePercent(child_item.time)
+
+	def childCount(self):
+		if not self.query_done:
+			self.selectCalls()
+		return self.child_count
+
+	def columnCount(self):
+		return 7
+
+	def columnHeader(self, column):
+		headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+		return headers[column]
+
+	def getData(self, column):
+		return self.data[column]
+
+class TreeModel(QAbstractItemModel):
+
+	def __init__(self, db, parent=None):
+		super(TreeModel, self).__init__(parent)
+		self.db = db
+		self.root = TreeItem(db, 0, None)
+
+	def columnCount(self, parent):
+		return self.root.columnCount()
+
+	def rowCount(self, parent):
+		if parent.isValid():
+			parent_item = parent.internalPointer()
+		else:
+			parent_item = self.root
+		return parent_item.childCount()
+
+	def headerData(self, section, orientation, role):
+		if role == Qt.TextAlignmentRole:
+			if section > 1:
+				return Qt.AlignRight
+		if role != Qt.DisplayRole:
+			return None
+		if orientation != Qt.Horizontal:
+			return None
+		return self.root.columnHeader(section)
+
+	def parent(self, child):
+		child_item = child.internalPointer()
+		if child_item is self.root:
+			return QModelIndex()
+		parent_item = child_item.getParentItem()
+		return self.createIndex(parent_item.getRow(), 0, parent_item)
+
+	def index(self, row, column, parent):
+		if parent.isValid():
+			parent_item = parent.internalPointer()
+		else:
+			parent_item = self.root
+		child_item = parent_item.getChildItem(row)
+		return self.createIndex(row, column, child_item)
+
+	def data(self, index, role):
+		if role == Qt.TextAlignmentRole:
+			if index.column() > 1:
+				return Qt.AlignRight
+		if role != Qt.DisplayRole:
+			return None
+		index_item = index.internalPointer()
+		return index_item.getData(index.column())
+
+class MainWindow(QMainWindow):
+
+	def __init__(self, db, dbname, parent=None):
+		super(MainWindow, self).__init__(parent)
+
+		self.setObjectName("MainWindow")
+		self.setWindowTitle("Call Graph: " + dbname)
+		self.move(100, 100)
+		self.resize(800, 600)
+		style = self.style()
+		icon = style.standardIcon(QStyle.SP_MessageBoxInformation)
+		self.setWindowIcon(icon);
+
+		self.model = TreeModel(db)
+
+		self.view = QTreeView()
+		self.view.setModel(self.model)
+
+		self.setCentralWidget(self.view)
+
+if __name__ == '__main__':
+	if (len(sys.argv) < 2):
+		print >> sys.stderr, "Usage is: call-graph-from-postgresql.py <database name>"
+		raise Exception("Too few arguments")
+
+	dbname = sys.argv[1]
+
+	db = QSqlDatabase.addDatabase('QPSQL')
+
+	opts = dbname.split()
+	for opt in opts:
+		if '=' in opt:
+			opt = opt.split('=')
+			if opt[0] == 'hostname':
+				db.setHostName(opt[1])
+			elif opt[0] == 'port':
+				db.setPort(int(opt[1]))
+			elif opt[0] == 'username':
+				db.setUserName(opt[1])
+			elif opt[0] == 'password':
+				db.setPassword(opt[1])
+			elif opt[0] == 'dbname':
+				dbname = opt[1]
+		else:
+			dbname = opt
+
+	db.setDatabaseName(dbname)
+	if not db.open():
+		raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
+
+	app = QApplication(sys.argv)
+	window = MainWindow(db, dbname)
+	window.show()
+	err = app.exec_()
+	db.close()
+	sys.exit(err)
diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py
new file mode 100644
index 0000000..239cb05
--- /dev/null
+++ b/tools/perf/scripts/python/compaction-times.py
@@ -0,0 +1,311 @@
+# report time spent in compaction
+# Licensed under the terms of the GNU GPL License version 2
+
+# testing:
+# 'echo 1 > /proc/sys/vm/compact_memory' to force compaction of all zones
+
+import os
+import sys
+import re
+
+import signal
+signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+usage = "usage: perf script report compaction-times.py -- [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]\n"
+
+class popt:
+	DISP_DFL = 0
+	DISP_PROC = 1
+	DISP_PROC_VERBOSE=2
+
+class topt:
+	DISP_TIME = 0
+	DISP_MIG = 1
+	DISP_ISOLFREE = 2
+	DISP_ISOLMIG = 4
+	DISP_ALL = 7
+
+class comm_filter:
+	def __init__(self, re):
+		self.re = re
+
+	def filter(self, pid, comm):
+		m = self.re.search(comm)
+		return m == None or m.group() == ""
+
+class pid_filter:
+	def __init__(self, low, high):
+		self.low = (0 if low == "" else int(low))
+		self.high = (0 if high == "" else int(high))
+
+	def filter(self, pid, comm):
+		return not (pid >= self.low and (self.high == 0 or pid <= self.high))
+
+def set_type(t):
+	global opt_disp
+	opt_disp = (t if opt_disp == topt.DISP_ALL else opt_disp|t)
+
+def ns(sec, nsec):
+	return (sec * 1000000000) + nsec
+
+def time(ns):
+	return "%dns" % ns if opt_ns else "%dus" % (round(ns, -3) / 1000)
+
+class pair:
+	def __init__(self, aval, bval, alabel = None, blabel = None):
+		self.alabel = alabel
+		self.blabel = blabel
+		self.aval = aval
+		self.bval = bval
+
+	def __add__(self, rhs):
+		self.aval += rhs.aval
+		self.bval += rhs.bval
+		return self
+
+	def __str__(self):
+		return "%s=%d %s=%d" % (self.alabel, self.aval, self.blabel, self.bval)
+
+class cnode:
+	def __init__(self, ns):
+		self.ns = ns
+		self.migrated = pair(0, 0, "moved", "failed")
+		self.fscan = pair(0,0, "scanned", "isolated")
+		self.mscan = pair(0,0, "scanned", "isolated")
+
+	def __add__(self, rhs):
+		self.ns += rhs.ns
+		self.migrated += rhs.migrated
+		self.fscan += rhs.fscan
+		self.mscan += rhs.mscan
+		return self
+
+	def __str__(self):
+		prev = 0
+		s = "%s " % time(self.ns)
+		if (opt_disp & topt.DISP_MIG):
+			s += "migration: %s" % self.migrated
+			prev = 1
+		if (opt_disp & topt.DISP_ISOLFREE):
+			s += "%sfree_scanner: %s" % (" " if prev else "", self.fscan)
+			prev = 1
+		if (opt_disp & topt.DISP_ISOLMIG):
+			s += "%smigration_scanner: %s" % (" " if prev else "", self.mscan)
+		return s
+
+	def complete(self, secs, nsecs):
+		self.ns = ns(secs, nsecs) - self.ns
+
+	def increment(self, migrated, fscan, mscan):
+		if (migrated != None):
+			self.migrated += migrated
+		if (fscan != None):
+			self.fscan += fscan
+		if (mscan != None):
+			self.mscan += mscan
+
+
+class chead:
+	heads = {}
+	val = cnode(0);
+	fobj = None
+
+	@classmethod
+	def add_filter(cls, filter):
+		cls.fobj = filter
+
+	@classmethod
+	def create_pending(cls, pid, comm, start_secs, start_nsecs):
+		filtered = 0
+		try:
+			head = cls.heads[pid]
+			filtered = head.is_filtered()
+		except KeyError:
+			if cls.fobj != None:
+				filtered = cls.fobj.filter(pid, comm)
+			head = cls.heads[pid] = chead(comm, pid, filtered)
+
+		if not filtered:
+			head.mark_pending(start_secs, start_nsecs)
+
+	@classmethod
+	def increment_pending(cls, pid, migrated, fscan, mscan):
+		head = cls.heads[pid]
+		if not head.is_filtered():
+			if head.is_pending():
+				head.do_increment(migrated, fscan, mscan)
+			else:
+				sys.stderr.write("missing start compaction event for pid %d\n" % pid)
+
+	@classmethod
+	def complete_pending(cls, pid, secs, nsecs):
+		head = cls.heads[pid]
+		if not head.is_filtered():
+			if head.is_pending():
+				head.make_complete(secs, nsecs)
+			else:
+				sys.stderr.write("missing start compaction event for pid %d\n" % pid)
+
+	@classmethod
+	def gen(cls):
+		if opt_proc != popt.DISP_DFL:
+			for i in cls.heads:
+				yield cls.heads[i]
+
+	@classmethod
+	def str(cls):
+		return cls.val
+
+	def __init__(self, comm, pid, filtered):
+		self.comm = comm
+		self.pid = pid
+		self.val = cnode(0)
+		self.pending = None
+		self.filtered = filtered
+		self.list = []
+
+	def __add__(self, rhs):
+		self.ns += rhs.ns
+		self.val += rhs.val
+		return self
+
+	def mark_pending(self, secs, nsecs):
+		self.pending = cnode(ns(secs, nsecs))
+
+	def do_increment(self, migrated, fscan, mscan):
+		self.pending.increment(migrated, fscan, mscan)
+
+	def make_complete(self, secs, nsecs):
+		self.pending.complete(secs, nsecs)
+		chead.val += self.pending
+
+		if opt_proc != popt.DISP_DFL:
+			self.val += self.pending
+
+			if opt_proc == popt.DISP_PROC_VERBOSE:
+				self.list.append(self.pending)
+		self.pending = None
+
+	def enumerate(self):
+		if opt_proc == popt.DISP_PROC_VERBOSE and not self.is_filtered():
+			for i, pelem in enumerate(self.list):
+				sys.stdout.write("%d[%s].%d: %s\n" % (self.pid, self.comm, i+1, pelem))
+
+	def is_pending(self):
+		return self.pending != None
+
+	def is_filtered(self):
+		return self.filtered
+
+	def display(self):
+		if not self.is_filtered():
+			sys.stdout.write("%d[%s]: %s\n" % (self.pid, self.comm, self.val))
+
+
+def trace_end():
+	sys.stdout.write("total: %s\n" % chead.str())
+	for i in chead.gen():
+		i.display(),
+		i.enumerate()
+
+def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, nr_migrated, nr_failed):
+
+	chead.increment_pending(common_pid,
+		pair(nr_migrated, nr_failed), None, None)
+
+def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
+        common_secs, common_nsecs, common_pid, common_comm,
+        common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+
+	chead.increment_pending(common_pid,
+		None, pair(nr_scanned, nr_taken), None)
+
+def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
+        common_secs, common_nsecs, common_pid, common_comm,
+        common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
+
+	chead.increment_pending(common_pid,
+		None, None, pair(nr_scanned, nr_taken))
+
+def compaction__mm_compaction_end(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, zone_start, migrate_start, free_start, zone_end,
+	sync, status):
+
+	chead.complete_pending(common_pid, common_secs, common_nsecs)
+
+def compaction__mm_compaction_begin(event_name, context, common_cpu,
+	common_secs, common_nsecs, common_pid, common_comm,
+	common_callchain, zone_start, migrate_start, free_start, zone_end,
+	sync):
+
+	chead.create_pending(common_pid, common_comm, common_secs, common_nsecs)
+
+def pr_help():
+	global usage
+
+	sys.stdout.write(usage)
+	sys.stdout.write("\n")
+	sys.stdout.write("-h	display this help\n")
+	sys.stdout.write("-p	display by process\n")
+	sys.stdout.write("-pv	display by process (verbose)\n")
+	sys.stdout.write("-t	display stall times only\n")
+	sys.stdout.write("-m	display stats for migration\n")
+	sys.stdout.write("-fs	display stats for free scanner\n")
+	sys.stdout.write("-ms	display stats for migration scanner\n")
+	sys.stdout.write("-u	display results in microseconds (default nanoseconds)\n")
+
+
+comm_re = None
+pid_re = None
+pid_regex = "^(\d*)-(\d*)$|^(\d*)$"
+
+opt_proc = popt.DISP_DFL
+opt_disp = topt.DISP_ALL
+
+opt_ns = True
+
+argc = len(sys.argv) - 1
+if argc >= 1:
+	pid_re = re.compile(pid_regex)
+
+	for i, opt in enumerate(sys.argv[1:]):
+		if opt[0] == "-":
+			if opt == "-h":
+				pr_help()
+				exit(0);
+			elif opt == "-p":
+				opt_proc = popt.DISP_PROC
+			elif opt == "-pv":
+				opt_proc = popt.DISP_PROC_VERBOSE
+			elif opt == '-u':
+				opt_ns = False
+			elif opt == "-t":
+				set_type(topt.DISP_TIME)
+			elif opt == "-m":
+				set_type(topt.DISP_MIG)
+			elif opt == "-fs":
+				set_type(topt.DISP_ISOLFREE)
+			elif opt == "-ms":
+				set_type(topt.DISP_ISOLMIG)
+			else:
+				sys.exit(usage)
+
+		elif i == argc - 1:
+			m = pid_re.search(opt)
+			if m != None and m.group() != "":
+				if m.group(3) != None:
+					f = pid_filter(m.group(3), m.group(3))
+				else:
+					f = pid_filter(m.group(1), m.group(2))
+			else:
+				try:
+					comm_re=re.compile(opt)
+				except:
+					sys.stderr.write("invalid regex '%s'" % opt)
+					sys.exit(usage)
+				f = comm_filter(comm_re)
+
+			chead.add_filter(f)
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 4cdafd8..84a3203 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -15,6 +15,53 @@
 import struct
 import datetime
 
+# To use this script you will need to have installed package python-pyside which
+# provides LGPL-licensed Python bindings for Qt.  You will also need the package
+# libqt4-sql-psql for Qt postgresql support.
+#
+# The script assumes postgresql is running on the local machine and that the
+# user has postgresql permissions to create databases. Examples of installing
+# postgresql and adding such a user are:
+#
+# fedora:
+#
+#	$ sudo yum install postgresql postgresql-server python-pyside qt-postgresql
+#	$ sudo su - postgres -c initdb
+#	$ sudo service postgresql start
+#	$ sudo su - postgres
+#	$ createuser <your user id here>
+#	Shall the new role be a superuser? (y/n) y
+#
+# ubuntu:
+#
+#	$ sudo apt-get install postgresql
+#	$ sudo su - postgres
+#	$ createuser <your user id here>
+#	Shall the new role be a superuser? (y/n) y
+#
+# An example of using this script with Intel PT:
+#
+#	$ perf record -e intel_pt//u ls
+#	$ perf script -s ~/libexec/perf-core/scripts/python/export-to-postgresql.py pt_example branches calls
+#	2015-05-29 12:49:23.464364 Creating database...
+#	2015-05-29 12:49:26.281717 Writing to intermediate files...
+#	2015-05-29 12:49:27.190383 Copying to database...
+#	2015-05-29 12:49:28.140451 Removing intermediate files...
+#	2015-05-29 12:49:28.147451 Adding primary keys
+#	2015-05-29 12:49:28.655683 Adding foreign keys
+#	2015-05-29 12:49:29.365350 Done
+#
+# To browse the database, psql can be used e.g.
+#
+#	$ psql pt_example
+#	pt_example=# select * from samples_view where id < 100;
+#	pt_example=# \d+
+#	pt_example=# \d+ samples_view
+#	pt_example=# \q
+#
+# An example of using the database is provided by the script
+# call-graph-from-postgresql.py.  Refer to that script for details.
+
 from PySide.QtSql import *
 
 # Need to access PostgreSQL C library directly to use COPY FROM STDIN
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index d20d6e6..c1518bd 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -32,6 +32,7 @@
 perf-y += parse-no-sample-id-all.o
 perf-y += kmod-path.o
 perf-y += thread-map.o
+perf-y += llvm.o
 
 perf-$(CONFIG_X86) += perf-time-to-tsc.o
 
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index c1dde73..136cd93 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -175,6 +175,10 @@
 		.func = test__thread_map,
 	},
 	{
+		.desc = "Test LLVM searching and compiling",
+		.func = test__llvm,
+	},
+	{
 		.func = NULL,
 	},
 };
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 7d82c8b..7ed73701 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -279,6 +279,7 @@
 
 	symbol_conf.use_callchain = false;
 	symbol_conf.cumulate_callchain = false;
+	perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
 
 	setup_sorting();
 	callchain_register_param(&callchain_param);
@@ -425,6 +426,7 @@
 
 	symbol_conf.use_callchain = true;
 	symbol_conf.cumulate_callchain = false;
+	perf_evsel__set_sample_bit(evsel, CALLCHAIN);
 
 	setup_sorting();
 	callchain_register_param(&callchain_param);
@@ -482,6 +484,7 @@
 
 	symbol_conf.use_callchain = false;
 	symbol_conf.cumulate_callchain = true;
+	perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
 
 	setup_sorting();
 	callchain_register_param(&callchain_param);
@@ -665,6 +668,7 @@
 
 	symbol_conf.use_callchain = true;
 	symbol_conf.cumulate_callchain = true;
+	perf_evsel__set_sample_bit(evsel, CALLCHAIN);
 
 	setup_sorting();
 	callchain_register_param(&callchain_param);
diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c
new file mode 100644
index 0000000..52d5597
--- /dev/null
+++ b/tools/perf/tests/llvm.c
@@ -0,0 +1,98 @@
+#include <stdio.h>
+#include <bpf/libbpf.h>
+#include <util/llvm-utils.h>
+#include <util/cache.h>
+#include "tests.h"
+#include "debug.h"
+
+static int perf_config_cb(const char *var, const char *val,
+			  void *arg __maybe_unused)
+{
+	return perf_default_config(var, val, arg);
+}
+
+/*
+ * Randomly give it a "version" section since we don't really load it
+ * into kernel
+ */
+static const char test_bpf_prog[] =
+	"__attribute__((section(\"do_fork\"), used)) "
+	"int fork(void *ctx) {return 0;} "
+	"char _license[] __attribute__((section(\"license\"), used)) = \"GPL\";"
+	"int _version __attribute__((section(\"version\"), used)) = 0x40100;";
+
+#ifdef HAVE_LIBBPF_SUPPORT
+static int test__bpf_parsing(void *obj_buf, size_t obj_buf_sz)
+{
+	struct bpf_object *obj;
+
+	obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, NULL);
+	if (!obj)
+		return -1;
+	bpf_object__close(obj);
+	return 0;
+}
+#else
+static int test__bpf_parsing(void *obj_buf __maybe_unused,
+			     size_t obj_buf_sz __maybe_unused)
+{
+	fprintf(stderr, " (skip bpf parsing)");
+	return 0;
+}
+#endif
+
+int test__llvm(void)
+{
+	char *tmpl_new, *clang_opt_new;
+	void *obj_buf;
+	size_t obj_buf_sz;
+	int err, old_verbose;
+
+	perf_config(perf_config_cb, NULL);
+
+	/*
+	 * Skip this test if user's .perfconfig doesn't set [llvm] section
+	 * and clang is not found in $PATH, and this is not perf test -v
+	 */
+	if (verbose == 0 && !llvm_param.user_set_param && llvm__search_clang()) {
+		fprintf(stderr, " (no clang, try 'perf test -v LLVM')");
+		return TEST_SKIP;
+	}
+
+	old_verbose = verbose;
+	/*
+	 * llvm is verbosity when error. Suppress all error output if
+	 * not 'perf test -v'.
+	 */
+	if (verbose == 0)
+		verbose = -1;
+
+	if (!llvm_param.clang_bpf_cmd_template)
+		return -1;
+
+	if (!llvm_param.clang_opt)
+		llvm_param.clang_opt = strdup("");
+
+	err = asprintf(&tmpl_new, "echo '%s' | %s", test_bpf_prog,
+		       llvm_param.clang_bpf_cmd_template);
+	if (err < 0)
+		return -1;
+	err = asprintf(&clang_opt_new, "-xc %s", llvm_param.clang_opt);
+	if (err < 0)
+		return -1;
+
+	llvm_param.clang_bpf_cmd_template = tmpl_new;
+	llvm_param.clang_opt = clang_opt_new;
+	err = llvm__compile_bpf("-", &obj_buf, &obj_buf_sz);
+
+	verbose = old_verbose;
+	if (err) {
+		if (!verbose)
+			fprintf(stderr, " (use -v to see error message)");
+		return -1;
+	}
+
+	err = test__bpf_parsing(obj_buf, obj_buf_sz);
+	free(obj_buf);
+	return err;
+}
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 729112f..ba31c4b 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -58,7 +58,8 @@
 make_install_html   := install-html
 make_install_info   := install-info
 make_install_pdf    := install-pdf
-make_install_prefix := install prefix=/tmp/krava
+make_install_prefix       := install prefix=/tmp/krava
+make_install_prefix_slash := install prefix=/tmp/krava/
 make_static         := LDFLAGS=-static
 
 # all the NO_* variable combined
@@ -101,6 +102,7 @@
 run += make_install
 run += make_install_bin
 run += make_install_prefix
+run += make_install_prefix_slash
 # FIXME 'install-*' commented out till they're fixed
 # run += make_install_doc
 # run += make_install_man
@@ -175,11 +177,14 @@
 test_make_install_bin   := $(call test_dest_files,$(installed_files_bin))
 test_make_install_bin_O := $(call test_dest_files,$(installed_files_bin))
 
-# We prefix all installed files for make_install_prefix
+# We prefix all installed files for make_install_prefix(_slash)
 # with '/tmp/krava' to match installed/prefix-ed files.
 installed_files_all_prefix := $(addprefix /tmp/krava/,$(installed_files_all))
-test_make_install_prefix   := $(call test_dest_files,$(installed_files_all_prefix))
-test_make_install_prefix_O := $(call test_dest_files,$(installed_files_all_prefix))
+test_make_install_prefix   :=  $(call test_dest_files,$(installed_files_all_prefix))
+test_make_install_prefix_O :=  $(call test_dest_files,$(installed_files_all_prefix))
+
+test_make_install_prefix_slash   := $(test_make_install_prefix)
+test_make_install_prefix_slash_O := $(test_make_install_prefix_O)
 
 # FIXME nothing gets installed
 test_make_install_man    := test -f $$TMP_DEST/share/man/man1/perf.1
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index d76963f..9b6b2b63 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -82,8 +82,12 @@
 	TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
 	TEST_ASSERT_VAL("wrong config",
 			PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+	/*
+	 * The period value gets configured within perf_evlist__config,
+	 * while this test executes only parse events method.
+	 */
 	TEST_ASSERT_VAL("wrong period",
-			100000 == evsel->attr.sample_period);
+			0 == evsel->attr.sample_period);
 	TEST_ASSERT_VAL("wrong config1",
 			0 == evsel->attr.config1);
 	TEST_ASSERT_VAL("wrong config2",
@@ -406,7 +410,11 @@
 	TEST_ASSERT_VAL("wrong config",    10 == evsel->attr.config);
 	TEST_ASSERT_VAL("wrong config1",    1 == evsel->attr.config1);
 	TEST_ASSERT_VAL("wrong config2",    3 == evsel->attr.config2);
-	TEST_ASSERT_VAL("wrong period",  1000 == evsel->attr.sample_period);
+	/*
+	 * The period value gets configured within perf_evlist__config,
+	 * while this test executes only parse events method.
+	 */
+	TEST_ASSERT_VAL("wrong period",     0 == evsel->attr.sample_period);
 
 	return 0;
 }
@@ -471,6 +479,39 @@
 	return 0;
 }
 
+static int test__checkevent_pmu_partial_time_callgraph(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+	/* cpu/config=1,call-graph=fp,time,period=100000/ */
+	TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
+	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
+	TEST_ASSERT_VAL("wrong config",  1 == evsel->attr.config);
+	/*
+	 * The period, time and callgraph value gets configured
+	 * within perf_evlist__config,
+	 * while this test executes only parse events method.
+	 */
+	TEST_ASSERT_VAL("wrong period",     0 == evsel->attr.sample_period);
+	TEST_ASSERT_VAL("wrong callgraph",  !(PERF_SAMPLE_CALLCHAIN & evsel->attr.sample_type));
+	TEST_ASSERT_VAL("wrong time",  !(PERF_SAMPLE_TIME & evsel->attr.sample_type));
+
+	/* cpu/config=2,call-graph=no,time=0,period=2000/ */
+	evsel = perf_evsel__next(evsel);
+	TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
+	TEST_ASSERT_VAL("wrong config",  2 == evsel->attr.config);
+	/*
+	 * The period, time and callgraph value gets configured
+	 * within perf_evlist__config,
+	 * while this test executes only parse events method.
+	 */
+	TEST_ASSERT_VAL("wrong period",     0 == evsel->attr.sample_period);
+	TEST_ASSERT_VAL("wrong callgraph",  !(PERF_SAMPLE_CALLCHAIN & evsel->attr.sample_type));
+	TEST_ASSERT_VAL("wrong time",  !(PERF_SAMPLE_TIME & evsel->attr.sample_type));
+
+	return 0;
+}
+
 static int test__checkevent_pmu_events(struct perf_evlist *evlist)
 {
 	struct perf_evsel *evsel = perf_evlist__first(evlist);
@@ -1547,6 +1588,11 @@
 		.check = test__checkevent_pmu_name,
 		.id    = 1,
 	},
+	{
+		.name  = "cpu/config=1,call-graph=fp,time,period=100000/,cpu/config=2,call-graph=no,time=0,period=2000/",
+		.check = test__checkevent_pmu_partial_time_callgraph,
+		.id    = 2,
+	},
 };
 
 struct terms_test {
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index ebb47d9..bf113a2 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -62,6 +62,7 @@
 int test__fdarray__add(void);
 int test__kmod_path__parse(void);
 int test__thread_map(void);
+int test__llvm(void);
 
 #if defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__)
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c
index 5acf000..138a0e3 100644
--- a/tools/perf/tests/thread-map.c
+++ b/tools/perf/tests/thread-map.c
@@ -20,6 +20,8 @@
 	TEST_ASSERT_VAL("wrong comm",
 			thread_map__comm(map, 0) &&
 			!strcmp(thread_map__comm(map, 0), "perf"));
+	TEST_ASSERT_VAL("wrong refcnt",
+			atomic_read(&map->refcnt) == 1);
 	thread_map__put(map);
 
 	/* test dummy pid */
@@ -33,6 +35,8 @@
 	TEST_ASSERT_VAL("wrong comm",
 			thread_map__comm(map, 0) &&
 			!strcmp(thread_map__comm(map, 0), "dummy"));
+	TEST_ASSERT_VAL("wrong refcnt",
+			atomic_read(&map->refcnt) == 1);
 	thread_map__put(map);
 	return 0;
 }
diff --git a/tools/perf/trace/strace/groups/file b/tools/perf/trace/strace/groups/file
new file mode 100644
index 0000000..62378a8
--- /dev/null
+++ b/tools/perf/trace/strace/groups/file
@@ -0,0 +1,18 @@
+access
+chmod
+creat
+execve
+faccessat
+getcwd
+lstat
+mkdir
+open
+openat
+quotactl
+readlink
+rename
+rmdir
+stat
+statfs
+symlink
+unlink
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 6680fa5..c6c7e51 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -46,6 +46,21 @@
 	SLsmg_gotorc(browser->y + y, browser->x + x);
 }
 
+void ui_browser__write_nstring(struct ui_browser *browser __maybe_unused, const char *msg,
+			       unsigned int width)
+{
+	slsmg_write_nstring(msg, width);
+}
+
+void ui_browser__printf(struct ui_browser *browser __maybe_unused, const char *fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	slsmg_vprintf(fmt, args);
+	va_end(args);
+}
+
 static struct list_head *
 ui_browser__list_head_filter_entries(struct ui_browser *browser,
 				     struct list_head *pos)
@@ -234,7 +249,7 @@
 {
 	SLsmg_gotorc(0, 0);
 	ui_browser__set_color(browser, HE_COLORSET_ROOT);
-	slsmg_write_nstring(title, browser->width + 1);
+	ui_browser__write_nstring(browser, title, browser->width + 1);
 }
 
 void ui_browser__show_title(struct ui_browser *browser, const char *title)
diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
index 92ae721..f3cef56 100644
--- a/tools/perf/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -37,6 +37,9 @@
 void ui_browser__reset_index(struct ui_browser *browser);
 
 void ui_browser__gotorc(struct ui_browser *browser, int y, int x);
+void ui_browser__write_nstring(struct ui_browser *browser, const char *msg,
+			       unsigned int width);
+void ui_browser__printf(struct ui_browser *browser, const char *fmt, ...);
 void ui_browser__write_graph(struct ui_browser *browser, int graph);
 void __ui_browser__line_arrow(struct ui_browser *browser, unsigned int column,
 			      u64 start, u64 end);
@@ -58,8 +61,8 @@
 bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text);
 int ui_browser__input_window(const char *title, const char *text, char *input,
 			     const char *exit_msg, int delay_sec);
-struct perf_session_env;
-int tui__header_window(struct perf_session_env *env);
+struct perf_env;
+int tui__header_window(struct perf_env *env);
 
 void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence);
 unsigned int ui_browser__argv_refresh(struct ui_browser *browser);
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 5995a8b..29739b3 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -1,7 +1,6 @@
 #include "../../util/util.h"
 #include "../browser.h"
 #include "../helpline.h"
-#include "../libslang.h"
 #include "../ui.h"
 #include "../util.h"
 #include "../../util/annotate.h"
@@ -16,6 +15,9 @@
 	u64		nr;
 };
 
+#define IPC_WIDTH 6
+#define CYCLES_WIDTH 6
+
 struct browser_disasm_line {
 	struct rb_node			rb_node;
 	u32				idx;
@@ -53,6 +55,7 @@
 	int		    max_jump_sources;
 	int		    nr_jumps;
 	bool		    searching_backwards;
+	bool		    have_cycles;
 	u8		    addr_width;
 	u8		    jumps_width;
 	u8		    target_width;
@@ -96,6 +99,15 @@
 	 return ui_browser__set_color(&browser->b, color);
 }
 
+static int annotate_browser__pcnt_width(struct annotate_browser *ab)
+{
+	int w = 7 * ab->nr_events;
+
+	if (ab->have_cycles)
+		w += IPC_WIDTH + CYCLES_WIDTH;
+	return w;
+}
+
 static void annotate_browser__write(struct ui_browser *browser, void *entry, int row)
 {
 	struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
@@ -106,7 +118,7 @@
 			     (!current_entry || (browser->use_navkeypressed &&
 					         !browser->navkeypressed)));
 	int width = browser->width, printed;
-	int i, pcnt_width = 7 * ab->nr_events;
+	int i, pcnt_width = annotate_browser__pcnt_width(ab);
 	double percent_max = 0.0;
 	char bf[256];
 
@@ -116,19 +128,36 @@
 	}
 
 	if (dl->offset != -1 && percent_max != 0.0) {
-		for (i = 0; i < ab->nr_events; i++) {
-			ui_browser__set_percent_color(browser,
-						      bdl->samples[i].percent,
-						      current_entry);
-			if (annotate_browser__opts.show_total_period)
-				slsmg_printf("%6" PRIu64 " ",
-					     bdl->samples[i].nr);
-			else
-				slsmg_printf("%6.2f ", bdl->samples[i].percent);
+		if (percent_max != 0.0) {
+			for (i = 0; i < ab->nr_events; i++) {
+				ui_browser__set_percent_color(browser,
+							bdl->samples[i].percent,
+							current_entry);
+				if (annotate_browser__opts.show_total_period) {
+					ui_browser__printf(browser, "%6" PRIu64 " ",
+							   bdl->samples[i].nr);
+				} else {
+					ui_browser__printf(browser, "%6.2f ",
+							   bdl->samples[i].percent);
+				}
+			}
+		} else {
+			ui_browser__write_nstring(browser, " ", 7 * ab->nr_events);
 		}
 	} else {
 		ui_browser__set_percent_color(browser, 0, current_entry);
-		slsmg_write_nstring(" ", pcnt_width);
+		ui_browser__write_nstring(browser, " ", 7 * ab->nr_events);
+	}
+	if (ab->have_cycles) {
+		if (dl->ipc)
+			ui_browser__printf(browser, "%*.2f ", IPC_WIDTH - 1, dl->ipc);
+		else
+			ui_browser__write_nstring(browser, " ", IPC_WIDTH);
+		if (dl->cycles)
+			ui_browser__printf(browser, "%*" PRIu64 " ",
+					   CYCLES_WIDTH - 1, dl->cycles);
+		else
+			ui_browser__write_nstring(browser, " ", CYCLES_WIDTH);
 	}
 
 	SLsmg_write_char(' ');
@@ -138,7 +167,7 @@
 		width += 1;
 
 	if (!*dl->line)
-		slsmg_write_nstring(" ", width - pcnt_width);
+		ui_browser__write_nstring(browser, " ", width - pcnt_width);
 	else if (dl->offset == -1) {
 		if (dl->line_nr && annotate_browser__opts.show_linenr)
 			printed = scnprintf(bf, sizeof(bf), "%-*d ",
@@ -146,8 +175,8 @@
 		else
 			printed = scnprintf(bf, sizeof(bf), "%*s  ",
 				    ab->addr_width, " ");
-		slsmg_write_nstring(bf, printed);
-		slsmg_write_nstring(dl->line, width - printed - pcnt_width + 1);
+		ui_browser__write_nstring(browser, bf, printed);
+		ui_browser__write_nstring(browser, dl->line, width - printed - pcnt_width + 1);
 	} else {
 		u64 addr = dl->offset;
 		int color = -1;
@@ -166,7 +195,7 @@
 							    bdl->jump_sources);
 					prev = annotate_browser__set_jumps_percent_color(ab, bdl->jump_sources,
 											 current_entry);
-					slsmg_write_nstring(bf, printed);
+					ui_browser__write_nstring(browser, bf, printed);
 					ui_browser__set_color(browser, prev);
 				}
 
@@ -180,7 +209,7 @@
 
 		if (change_color)
 			color = ui_browser__set_color(browser, HE_COLORSET_ADDR);
-		slsmg_write_nstring(bf, printed);
+		ui_browser__write_nstring(browser, bf, printed);
 		if (change_color)
 			ui_browser__set_color(browser, color);
 		if (dl->ins && dl->ins->ops->scnprintf) {
@@ -194,11 +223,11 @@
 				ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
 				SLsmg_write_char(' ');
 			} else {
-				slsmg_write_nstring(" ", 2);
+				ui_browser__write_nstring(browser, " ", 2);
 			}
 		} else {
 			if (strcmp(dl->name, "retq")) {
-				slsmg_write_nstring(" ", 2);
+				ui_browser__write_nstring(browser, " ", 2);
 			} else {
 				ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
 				SLsmg_write_char(' ');
@@ -206,7 +235,7 @@
 		}
 
 		disasm_line__scnprintf(dl, bf, sizeof(bf), !annotate_browser__opts.use_offset);
-		slsmg_write_nstring(bf, width - pcnt_width - 3 - printed);
+		ui_browser__write_nstring(browser, bf, width - pcnt_width - 3 - printed);
 	}
 
 	if (current_entry)
@@ -231,7 +260,7 @@
 	unsigned int from, to;
 	struct map_symbol *ms = ab->b.priv;
 	struct symbol *sym = ms->sym;
-	u8 pcnt_width = 7;
+	u8 pcnt_width = annotate_browser__pcnt_width(ab);
 
 	/* PLT symbols contain external offsets */
 	if (strstr(sym->name, "@plt"))
@@ -255,8 +284,6 @@
 		to = (u64)btarget->idx;
 	}
 
-	pcnt_width *= ab->nr_events;
-
 	ui_browser__set_color(browser, HE_COLORSET_CODE);
 	__ui_browser__line_arrow(browser, pcnt_width + 2 + ab->addr_width,
 				 from, to);
@@ -266,9 +293,7 @@
 {
 	struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
 	int ret = ui_browser__list_head_refresh(browser);
-	int pcnt_width;
-
-	pcnt_width = 7 * ab->nr_events;
+	int pcnt_width = annotate_browser__pcnt_width(ab);
 
 	if (annotate_browser__opts.jump_arrows)
 		annotate_browser__draw_current_jump(browser);
@@ -390,7 +415,7 @@
 				max_percent = bpos->samples[i].percent;
 		}
 
-		if (max_percent < 0.01) {
+		if (max_percent < 0.01 && pos->ipc == 0) {
 			RB_CLEAR_NODE(&bpos->rb_node);
 			continue;
 		}
@@ -869,6 +894,75 @@
 	return map_symbol__tui_annotate(&he->ms, evsel, hbt);
 }
 
+
+static unsigned count_insn(struct annotate_browser *browser, u64 start, u64 end)
+{
+	unsigned n_insn = 0;
+	u64 offset;
+
+	for (offset = start; offset <= end; offset++) {
+		if (browser->offsets[offset])
+			n_insn++;
+	}
+	return n_insn;
+}
+
+static void count_and_fill(struct annotate_browser *browser, u64 start, u64 end,
+			   struct cyc_hist *ch)
+{
+	unsigned n_insn;
+	u64 offset;
+
+	n_insn = count_insn(browser, start, end);
+	if (n_insn && ch->num && ch->cycles) {
+		float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
+
+		/* Hide data when there are too many overlaps. */
+		if (ch->reset >= 0x7fff || ch->reset >= ch->num / 2)
+			return;
+
+		for (offset = start; offset <= end; offset++) {
+			struct disasm_line *dl = browser->offsets[offset];
+
+			if (dl)
+				dl->ipc = ipc;
+		}
+	}
+}
+
+/*
+ * This should probably be in util/annotate.c to share with the tty
+ * annotate, but right now we need the per byte offsets arrays,
+ * which are only here.
+ */
+static void annotate__compute_ipc(struct annotate_browser *browser, size_t size,
+			   struct symbol *sym)
+{
+	u64 offset;
+	struct annotation *notes = symbol__annotation(sym);
+
+	if (!notes->src || !notes->src->cycles_hist)
+		return;
+
+	pthread_mutex_lock(&notes->lock);
+	for (offset = 0; offset < size; ++offset) {
+		struct cyc_hist *ch;
+
+		ch = &notes->src->cycles_hist[offset];
+		if (ch && ch->cycles) {
+			struct disasm_line *dl;
+
+			if (ch->have_start)
+				count_and_fill(browser, ch->start, offset, ch);
+			dl = browser->offsets[offset];
+			if (dl && ch->num_aggr)
+				dl->cycles = ch->cycles_aggr / ch->num_aggr;
+			browser->have_cycles = true;
+		}
+	}
+	pthread_mutex_unlock(&notes->lock);
+}
+
 static void annotate_browser__mark_jump_targets(struct annotate_browser *browser,
 						size_t size)
 {
@@ -991,6 +1085,7 @@
 	}
 
 	annotate_browser__mark_jump_targets(&browser, size);
+	annotate__compute_ipc(&browser, size, sym);
 
 	browser.addr_width = browser.target_width = browser.min_addr_width = hex_width(size);
 	browser.max_addr_width = hex_width(sym->end);
diff --git a/tools/perf/ui/browsers/header.c b/tools/perf/ui/browsers/header.c
index e8278c5..edbeaaf 100644
--- a/tools/perf/ui/browsers/header.c
+++ b/tools/perf/ui/browsers/header.c
@@ -25,7 +25,7 @@
 	ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
 						       HE_COLORSET_NORMAL);
 
-	slsmg_write_nstring(str, browser->width);
+	ui_browser__write_nstring(browser, str, browser->width);
 }
 
 static int list_menu__run(struct ui_browser *menu)
@@ -91,7 +91,7 @@
 	return list_menu__run(&menu);
 }
 
-int tui__header_window(struct perf_session_env *env)
+int tui__header_window(struct perf_env *env)
 {
 	int i, argc = 0;
 	char **argv;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index fa67613..cf86f2d 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1,5 +1,4 @@
 #include <stdio.h>
-#include "../libslang.h"
 #include <stdlib.h>
 #include <string.h>
 #include <linux/rbtree.h>
@@ -27,7 +26,7 @@
 	struct map_symbol   *selection;
 	struct hist_browser_timer *hbt;
 	struct pstack	    *pstack;
-	struct perf_session_env *env;
+	struct perf_env *env;
 	int		     print_seq;
 	bool		     show_dso;
 	bool		     show_headers;
@@ -540,10 +539,10 @@
 
 	ui_browser__set_color(&browser->b, color);
 	hist_browser__gotorc(browser, row, 0);
-	slsmg_write_nstring(" ", offset);
-	slsmg_printf("%c", folded_sign);
+	ui_browser__write_nstring(&browser->b, " ", offset);
+	ui_browser__printf(&browser->b, "%c", folded_sign);
 	ui_browser__write_graph(&browser->b, show_annotated ? SLSMG_RARROW_CHAR : ' ');
-	slsmg_write_nstring(str, width);
+	ui_browser__write_nstring(&browser->b, str, width);
 }
 
 static void hist_browser__fprintf_callchain_entry(struct hist_browser *b __maybe_unused,
@@ -680,7 +679,7 @@
 	ui_browser__set_percent_color(arg->b, percent, arg->current_entry);
 
 	ret = scnprintf(hpp->buf, hpp->size, fmt, len, percent);
-	slsmg_printf("%s", hpp->buf);
+	ui_browser__printf(arg->b, "%s", hpp->buf);
 
 	advance_hpp(hpp, ret);
 	return ret;
@@ -713,10 +712,11 @@
 				struct hist_entry *he)			\
 {									\
 	if (!symbol_conf.cumulate_callchain) {				\
+		struct hpp_arg *arg = hpp->ptr;				\
 		int len = fmt->user_len ?: fmt->len;			\
 		int ret = scnprintf(hpp->buf, hpp->size,		\
 				    "%*s", len, "N/A");			\
-		slsmg_printf("%s", hpp->buf);				\
+		ui_browser__printf(arg->b, "%s", hpp->buf);		\
 									\
 		return ret;						\
 	}								\
@@ -801,12 +801,12 @@
 
 			if (first) {
 				if (symbol_conf.use_callchain) {
-					slsmg_printf("%c ", folded_sign);
+					ui_browser__printf(&browser->b, "%c ", folded_sign);
 					width -= 2;
 				}
 				first = false;
 			} else {
-				slsmg_printf("  ");
+				ui_browser__printf(&browser->b, "  ");
 				width -= 2;
 			}
 
@@ -814,7 +814,7 @@
 				width -= fmt->color(fmt, &hpp, entry);
 			} else {
 				width -= fmt->entry(fmt, &hpp, entry);
-				slsmg_printf("%s", s);
+				ui_browser__printf(&browser->b, "%s", s);
 			}
 		}
 
@@ -822,7 +822,7 @@
 		if (!browser->b.navkeypressed)
 			width += 1;
 
-		slsmg_write_nstring("", width);
+		ui_browser__write_nstring(&browser->b, "", width);
 
 		++row;
 		++printed;
@@ -899,7 +899,7 @@
 	hists__scnprintf_headers(headers, sizeof(headers), browser->hists);
 	ui_browser__gotorc(&browser->b, 0, 0);
 	ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
-	slsmg_write_nstring(headers, browser->b.width + 1);
+	ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
 }
 
 static void ui_browser__hists_init_top(struct ui_browser *browser)
@@ -1214,7 +1214,7 @@
 
 static struct hist_browser *hist_browser__new(struct hists *hists,
 					      struct hist_browser_timer *hbt,
-					      struct perf_session_env *env)
+					      struct perf_env *env)
 {
 	struct hist_browser *browser = zalloc(sizeof(*browser));
 
@@ -1267,6 +1267,8 @@
 	const char *ev_name = perf_evsel__name(evsel);
 	char buf[512];
 	size_t buflen = sizeof(buf);
+	char ref[30] = " show reference callgraph, ";
+	bool enable_ref = false;
 
 	if (symbol_conf.filter_relative) {
 		nr_samples = hists->stats.nr_non_filtered_samples;
@@ -1292,10 +1294,13 @@
 		}
 	}
 
+	if (symbol_conf.show_ref_callgraph &&
+	    strstr(ev_name, "call-graph=no"))
+		enable_ref = true;
 	nr_samples = convert_unit(nr_samples, &unit);
 	printed = scnprintf(bf, size,
-			   "Samples: %lu%c of event '%s', Event count (approx.): %" PRIu64,
-			   nr_samples, unit, ev_name, nr_events);
+			   "Samples: %lu%c of event '%s',%sEvent count (approx.): %" PRIu64,
+			   nr_samples, unit, ev_name, enable_ref ? ref : " ", nr_events);
 
 
 	if (hists->uid_filter_str)
@@ -1690,7 +1695,7 @@
 				    bool left_exits,
 				    struct hist_browser_timer *hbt,
 				    float min_pcnt,
-				    struct perf_session_env *env)
+				    struct perf_env *env)
 {
 	struct hists *hists = evsel__hists(evsel);
 	struct hist_browser *browser = hist_browser__new(hists, hbt, env);
@@ -1868,6 +1873,7 @@
 		case K_RIGHT:
 			/* menu */
 			break;
+		case K_ESC:
 		case K_LEFT: {
 			const void *top;
 
@@ -1877,6 +1883,12 @@
 				 */
 				if (left_exits)
 					goto out_free_stack;
+
+				if (key == K_ESC &&
+				    ui_browser__dialog_yesno(&browser->b,
+							     "Do you really want to exit?"))
+					goto out_free_stack;
+
 				continue;
 			}
 			top = pstack__peek(browser->pstack);
@@ -1892,12 +1904,6 @@
 				do_zoom_thread(browser, actions);
 			continue;
 		}
-		case K_ESC:
-			if (!left_exits &&
-			    !ui_browser__dialog_yesno(&browser->b,
-					       "Do you really want to exit?"))
-				continue;
-			/* Fall thru */
 		case 'q':
 		case CTRL('c'):
 			goto out_free_stack;
@@ -2010,7 +2016,7 @@
 	struct perf_evsel *selection;
 	bool lost_events, lost_events_warned;
 	float min_pcnt;
-	struct perf_session_env *env;
+	struct perf_env *env;
 };
 
 static void perf_evsel_menu__write(struct ui_browser *browser,
@@ -2044,7 +2050,7 @@
 	nr_events = convert_unit(nr_events, &unit);
 	printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
 			   unit, unit == ' ' ? "" : " ", ev_name);
-	slsmg_printf("%s", bf);
+	ui_browser__printf(browser, "%s", bf);
 
 	nr_events = hists->stats.nr_events[PERF_RECORD_LOST];
 	if (nr_events != 0) {
@@ -2057,7 +2063,7 @@
 		warn = bf;
 	}
 
-	slsmg_write_nstring(warn, browser->width - printed);
+	ui_browser__write_nstring(browser, warn, browser->width - printed);
 
 	if (current_entry)
 		menu->selection = evsel;
@@ -2120,15 +2126,11 @@
 				else
 					pos = perf_evsel__prev(pos);
 				goto browse_hists;
-			case K_ESC:
-				if (!ui_browser__dialog_yesno(&menu->b,
-						"Do you really want to exit?"))
-					continue;
-				/* Fall thru */
 			case K_SWITCH_INPUT_DATA:
 			case 'q':
 			case CTRL('c'):
 				goto out;
+			case K_ESC:
 			default:
 				continue;
 			}
@@ -2167,7 +2169,7 @@
 					   int nr_entries, const char *help,
 					   struct hist_browser_timer *hbt,
 					   float min_pcnt,
-					   struct perf_session_env *env)
+					   struct perf_env *env)
 {
 	struct perf_evsel *pos;
 	struct perf_evsel_menu menu = {
@@ -2200,7 +2202,7 @@
 int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
 				  struct hist_browser_timer *hbt,
 				  float min_pcnt,
-				  struct perf_session_env *env)
+				  struct perf_env *env)
 {
 	int nr_entries = evlist->nr_entries;
 
diff --git a/tools/perf/ui/browsers/map.c b/tools/perf/ui/browsers/map.c
index b11639f..8c154c7 100644
--- a/tools/perf/ui/browsers/map.c
+++ b/tools/perf/ui/browsers/map.c
@@ -1,4 +1,3 @@
-#include "../libslang.h"
 #include <elf.h>
 #include <inttypes.h>
 #include <sys/ttydefaults.h>
@@ -26,13 +25,13 @@
 	int width;
 
 	ui_browser__set_percent_color(browser, 0, current_entry);
-	slsmg_printf("%*" PRIx64 " %*" PRIx64 " %c ",
-		     mb->addrlen, sym->start, mb->addrlen, sym->end,
-		     sym->binding == STB_GLOBAL ? 'g' :
-		     sym->binding == STB_LOCAL  ? 'l' : 'w');
+	ui_browser__printf(browser, "%*" PRIx64 " %*" PRIx64 " %c ",
+			   mb->addrlen, sym->start, mb->addrlen, sym->end,
+			   sym->binding == STB_GLOBAL ? 'g' :
+				sym->binding == STB_LOCAL  ? 'l' : 'w');
 	width = browser->width - ((mb->addrlen * 2) + 4);
 	if (width > 0)
-		slsmg_write_nstring(sym->name, width);
+		ui_browser__write_nstring(browser, sym->name, width);
 }
 
 /* FIXME uber-kludgy, see comment on cmd_report... */
diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c
index 402d2bd..e13b48d 100644
--- a/tools/perf/ui/browsers/scripts.c
+++ b/tools/perf/ui/browsers/scripts.c
@@ -81,7 +81,7 @@
 	ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
 						       HE_COLORSET_NORMAL);
 
-	slsmg_write_nstring(sline->line, browser->width);
+	ui_browser__write_nstring(browser, sline->line, browser->width);
 }
 
 static int script_browser__run(struct perf_script_browser *browser)
diff --git a/tools/perf/ui/libslang.h b/tools/perf/ui/libslang.h
index 4d54b64..db81669 100644
--- a/tools/perf/ui/libslang.h
+++ b/tools/perf/ui/libslang.h
@@ -14,12 +14,15 @@
 #if SLANG_VERSION < 20104
 #define slsmg_printf(msg, args...) \
 	SLsmg_printf((char *)(msg), ##args)
+#define slsmg_vprintf(msg, vargs) \
+	SLsmg_vprintf((char *)(msg), vargs)
 #define slsmg_write_nstring(msg, len) \
 	SLsmg_write_nstring((char *)(msg), len)
 #define sltt_set_color(obj, name, fg, bg) \
 	SLtt_set_color(obj,(char *)(name), (char *)(fg), (char *)(bg))
 #else
 #define slsmg_printf SLsmg_printf
+#define slsmg_vprintf SLsmg_vprintf
 #define slsmg_write_nstring SLsmg_write_nstring
 #define sltt_set_color SLtt_set_color
 #endif
diff --git a/tools/perf/ui/tui/progress.c b/tools/perf/ui/tui/progress.c
index c61d14b..c4b9900 100644
--- a/tools/perf/ui/tui/progress.c
+++ b/tools/perf/ui/tui/progress.c
@@ -33,9 +33,26 @@
 	pthread_mutex_unlock(&ui__lock);
 }
 
+static void tui_progress__finish(void)
+{
+	int y;
+
+	if (use_browser <= 0)
+		return;
+
+	ui__refresh_dimensions(false);
+	pthread_mutex_lock(&ui__lock);
+	y = SLtt_Screen_Rows / 2 - 2;
+	SLsmg_set_color(0);
+	SLsmg_fill_region(y, 0, 3, SLtt_Screen_Cols, ' ');
+	SLsmg_refresh();
+	pthread_mutex_unlock(&ui__lock);
+}
+
 static struct ui_progress_ops tui_progress__ops =
 {
-	.update		= tui_progress__update,
+	.update = tui_progress__update,
+	.finish = tui_progress__finish,
 };
 
 void tui_progress__init(void)
diff --git a/tools/perf/ui/tui/util.c b/tools/perf/ui/tui/util.c
index bf890f7..d96ad7c 100644
--- a/tools/perf/ui/tui/util.c
+++ b/tools/perf/ui/tui/util.c
@@ -21,7 +21,7 @@
 
 	ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
 						       HE_COLORSET_NORMAL);
-	slsmg_write_nstring(*arg, browser->width);
+	ui_browser__write_nstring(browser, *arg, browser->width);
 }
 
 static int popup_menu__run(struct ui_browser *menu)
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index d2d318c..349bc96 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -14,6 +14,7 @@
 libperf-y += help.o
 libperf-y += kallsyms.o
 libperf-y += levenshtein.o
+libperf-y += llvm-utils.o
 libperf-y += parse-options.o
 libperf-y += parse-events.o
 libperf-y += path.o
@@ -67,18 +68,25 @@
 libperf-y += rblist.o
 libperf-y += intlist.o
 libperf-y += vdso.o
+libperf-y += counts.o
 libperf-y += stat.o
 libperf-y += stat-shadow.o
 libperf-y += record.o
 libperf-y += srcline.o
 libperf-y += data.o
 libperf-$(CONFIG_X86) += tsc.o
+libperf-$(CONFIG_AUXTRACE) += tsc.o
 libperf-y += cloexec.o
 libperf-y += thread-stack.o
 libperf-$(CONFIG_AUXTRACE) += auxtrace.o
+libperf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
+libperf-$(CONFIG_AUXTRACE) += intel-pt.o
+libperf-$(CONFIG_AUXTRACE) += intel-bts.o
 libperf-y += parse-branch-options.o
+libperf-y += parse-regs-options.o
 
 libperf-$(CONFIG_LIBELF) += symbol-elf.o
+libperf-$(CONFIG_LIBELF) += probe-file.o
 libperf-$(CONFIG_LIBELF) += probe-event.o
 
 ifndef CONFIG_LIBELF
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 03b7bc70..d1eece7 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -473,17 +473,73 @@
 	return 0;
 }
 
+/* The cycles histogram is lazily allocated. */
+static int symbol__alloc_hist_cycles(struct symbol *sym)
+{
+	struct annotation *notes = symbol__annotation(sym);
+	const size_t size = symbol__size(sym);
+
+	notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist));
+	if (notes->src->cycles_hist == NULL)
+		return -1;
+	return 0;
+}
+
 void symbol__annotate_zero_histograms(struct symbol *sym)
 {
 	struct annotation *notes = symbol__annotation(sym);
 
 	pthread_mutex_lock(&notes->lock);
-	if (notes->src != NULL)
+	if (notes->src != NULL) {
 		memset(notes->src->histograms, 0,
 		       notes->src->nr_histograms * notes->src->sizeof_sym_hist);
+		if (notes->src->cycles_hist)
+			memset(notes->src->cycles_hist, 0,
+				symbol__size(sym) * sizeof(struct cyc_hist));
+	}
 	pthread_mutex_unlock(&notes->lock);
 }
 
+static int __symbol__account_cycles(struct annotation *notes,
+				    u64 start,
+				    unsigned offset, unsigned cycles,
+				    unsigned have_start)
+{
+	struct cyc_hist *ch;
+
+	ch = notes->src->cycles_hist;
+	/*
+	 * For now we can only account one basic block per
+	 * final jump. But multiple could be overlapping.
+	 * Always account the longest one. So when
+	 * a shorter one has been already seen throw it away.
+	 *
+	 * We separately always account the full cycles.
+	 */
+	ch[offset].num_aggr++;
+	ch[offset].cycles_aggr += cycles;
+
+	if (!have_start && ch[offset].have_start)
+		return 0;
+	if (ch[offset].num) {
+		if (have_start && (!ch[offset].have_start ||
+				   ch[offset].start > start)) {
+			ch[offset].have_start = 0;
+			ch[offset].cycles = 0;
+			ch[offset].num = 0;
+			if (ch[offset].reset < 0xffff)
+				ch[offset].reset++;
+		} else if (have_start &&
+			   ch[offset].start < start)
+			return 0;
+	}
+	ch[offset].have_start = have_start;
+	ch[offset].start = start;
+	ch[offset].cycles += cycles;
+	ch[offset].num++;
+	return 0;
+}
+
 static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
 				      struct annotation *notes, int evidx, u64 addr)
 {
@@ -506,7 +562,7 @@
 	return 0;
 }
 
-static struct annotation *symbol__get_annotation(struct symbol *sym)
+static struct annotation *symbol__get_annotation(struct symbol *sym, bool cycles)
 {
 	struct annotation *notes = symbol__annotation(sym);
 
@@ -514,6 +570,10 @@
 		if (symbol__alloc_hist(sym) < 0)
 			return NULL;
 	}
+	if (!notes->src->cycles_hist && cycles) {
+		if (symbol__alloc_hist_cycles(sym) < 0)
+			return NULL;
+	}
 	return notes;
 }
 
@@ -524,12 +584,73 @@
 
 	if (sym == NULL)
 		return 0;
-	notes = symbol__get_annotation(sym);
+	notes = symbol__get_annotation(sym, false);
 	if (notes == NULL)
 		return -ENOMEM;
 	return __symbol__inc_addr_samples(sym, map, notes, evidx, addr);
 }
 
+static int symbol__account_cycles(u64 addr, u64 start,
+				  struct symbol *sym, unsigned cycles)
+{
+	struct annotation *notes;
+	unsigned offset;
+
+	if (sym == NULL)
+		return 0;
+	notes = symbol__get_annotation(sym, true);
+	if (notes == NULL)
+		return -ENOMEM;
+	if (addr < sym->start || addr >= sym->end)
+		return -ERANGE;
+
+	if (start) {
+		if (start < sym->start || start >= sym->end)
+			return -ERANGE;
+		if (start >= addr)
+			start = 0;
+	}
+	offset = addr - sym->start;
+	return __symbol__account_cycles(notes,
+					start ? start - sym->start : 0,
+					offset, cycles,
+					!!start);
+}
+
+int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
+				    struct addr_map_symbol *start,
+				    unsigned cycles)
+{
+	u64 saddr = 0;
+	int err;
+
+	if (!cycles)
+		return 0;
+
+	/*
+	 * Only set start when IPC can be computed. We can only
+	 * compute it when the basic block is completely in a single
+	 * function.
+	 * Special case the case when the jump is elsewhere, but
+	 * it starts on the function start.
+	 */
+	if (start &&
+		(start->sym == ams->sym ||
+		 (ams->sym &&
+		   start->addr == ams->sym->start + ams->map->start)))
+		saddr = start->al_addr;
+	if (saddr == 0)
+		pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n",
+			ams->addr,
+			start ? start->addr : 0,
+			ams->sym ? ams->sym->start + ams->map->start : 0,
+			saddr);
+	err = symbol__account_cycles(ams->al_addr, saddr, ams->sym, cycles);
+	if (err)
+		pr_debug2("account_cycles failed %d\n", err);
+	return err;
+}
+
 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx)
 {
 	return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr);
@@ -1005,6 +1126,7 @@
 		dso->annotate_warned = 1;
 		pr_err("Can't annotate %s:\n\n"
 		       "No vmlinux file%s\nwas found in the path.\n\n"
+		       "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
 		       "Please use:\n\n"
 		       "  perf buildid-cache -vu vmlinux\n\n"
 		       "or:\n\n"
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 7e78e6c..e999609 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -59,6 +59,8 @@
 	char		    *name;
 	struct ins	    *ins;
 	int		    line_nr;
+	float		    ipc;
+	u64		    cycles;
 	struct ins_operands ops;
 };
 
@@ -79,6 +81,17 @@
 	u64		addr[0];
 };
 
+struct cyc_hist {
+	u64	start;
+	u64	cycles;
+	u64	cycles_aggr;
+	u32	num;
+	u32	num_aggr;
+	u8	have_start;
+	/* 1 byte padding */
+	u16	reset;
+};
+
 struct source_line_samples {
 	double		percent;
 	double		percent_sum;
@@ -97,6 +110,7 @@
  * @histogram: Array of addr hit histograms per event being monitored
  * @lines: If 'print_lines' is specified, per source code line percentages
  * @source: source parsed from a disassembler like objdump -dS
+ * @cyc_hist: Average cycles per basic block
  *
  * lines is allocated, percentages calculated and all sorted by percentage
  * when the annotation is about to be presented, so the percentages are for
@@ -109,6 +123,7 @@
 	struct source_line *lines;
 	int    		   nr_histograms;
 	int    		   sizeof_sym_hist;
+	struct cyc_hist	   *cycles_hist;
 	struct sym_hist	   histograms[0];
 };
 
@@ -130,6 +145,10 @@
 
 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx);
 
+int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
+				    struct addr_map_symbol *start,
+				    unsigned cycles);
+
 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 addr);
 
 int symbol__alloc_hist(struct symbol *sym);
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 83d9dd9..a980e7c 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -47,6 +47,9 @@
 #include "debug.h"
 #include "parse-options.h"
 
+#include "intel-pt.h"
+#include "intel-bts.h"
+
 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
 			struct auxtrace_mmap_params *mp,
 			void *userpg, int fd)
@@ -876,7 +879,7 @@
 
 int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
 				      union perf_event *event,
-				      struct perf_session *session __maybe_unused)
+				      struct perf_session *session)
 {
 	enum auxtrace_type type = event->auxtrace_info.type;
 
@@ -884,6 +887,10 @@
 		fprintf(stdout, " type: %u\n", type);
 
 	switch (type) {
+	case PERF_AUXTRACE_INTEL_PT:
+		return intel_pt_process_auxtrace_info(event, session);
+	case PERF_AUXTRACE_INTEL_BTS:
+		return intel_bts_process_auxtrace_info(event, session);
 	case PERF_AUXTRACE_UNKNOWN:
 	default:
 		return -EINVAL;
@@ -942,6 +949,7 @@
 	struct itrace_synth_opts *synth_opts = opt->value;
 	const char *p;
 	char *endptr;
+	bool period_type_set = false;
 
 	synth_opts->set = true;
 
@@ -970,10 +978,12 @@
 				case 'i':
 					synth_opts->period_type =
 						PERF_ITRACE_PERIOD_INSTRUCTIONS;
+					period_type_set = true;
 					break;
 				case 't':
 					synth_opts->period_type =
 						PERF_ITRACE_PERIOD_TICKS;
+					period_type_set = true;
 					break;
 				case 'm':
 					synth_opts->period *= 1000;
@@ -986,6 +996,7 @@
 						goto out_err;
 					synth_opts->period_type =
 						PERF_ITRACE_PERIOD_NANOSECS;
+					period_type_set = true;
 					break;
 				case '\0':
 					goto out;
@@ -1039,7 +1050,7 @@
 	}
 out:
 	if (synth_opts->instructions) {
-		if (!synth_opts->period_type)
+		if (!period_type_set)
 			synth_opts->period_type =
 					PERF_ITRACE_DEFAULT_PERIOD_TYPE;
 		if (!synth_opts->period)
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 471aecb..bf72b77 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -39,6 +39,8 @@
 
 enum auxtrace_type {
 	PERF_AUXTRACE_UNKNOWN,
+	PERF_AUXTRACE_INTEL_PT,
+	PERF_AUXTRACE_INTEL_BTS,
 };
 
 enum itrace_period_type {
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 1f6fc23..d909459 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -93,6 +93,38 @@
 	return raw - build_id;
 }
 
+int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id)
+{
+	char notes[PATH_MAX];
+	u8 build_id[BUILD_ID_SIZE];
+	int ret;
+
+	if (!root_dir)
+		root_dir = "";
+
+	scnprintf(notes, sizeof(notes), "%s/sys/kernel/notes", root_dir);
+
+	ret = sysfs__read_build_id(notes, build_id, sizeof(build_id));
+	if (ret < 0)
+		return ret;
+
+	return build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
+}
+
+int filename__sprintf_build_id(const char *pathname, char *sbuild_id)
+{
+	u8 build_id[BUILD_ID_SIZE];
+	int ret;
+
+	ret = filename__read_build_id(pathname, build_id, sizeof(build_id));
+	if (ret < 0)
+		return ret;
+	else if (ret != sizeof(build_id))
+		return -EINVAL;
+
+	return build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
+}
+
 /* asnprintf consolidates asprintf and snprintf */
 static int asnprintf(char **strp, size_t size, const char *fmt, ...)
 {
@@ -124,7 +156,7 @@
 
 char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
 {
-	char build_id_hex[BUILD_ID_SIZE * 2 + 1];
+	char build_id_hex[SBUILD_ID_SIZE];
 
 	if (!dso->has_build_id)
 		return NULL;
@@ -291,7 +323,7 @@
 	struct dirent *d;
 	int ret = 0;
 
-	list = strlist__new(true, NULL);
+	list = strlist__new(NULL, NULL);
 	dir_name = build_id_cache__dirname_from_path(pathname, false, false);
 	if (!list || !dir_name) {
 		ret = -ENOMEM;
@@ -384,7 +416,7 @@
 				 const char *name, bool is_kallsyms,
 				 bool is_vdso)
 {
-	char sbuild_id[BUILD_ID_SIZE * 2 + 1];
+	char sbuild_id[SBUILD_ID_SIZE];
 
 	build_id__sprintf(build_id, build_id_size, sbuild_id);
 
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 8501122..27a14a8 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -1,7 +1,8 @@
 #ifndef PERF_BUILD_ID_H_
 #define PERF_BUILD_ID_H_ 1
 
-#define BUILD_ID_SIZE 20
+#define BUILD_ID_SIZE	20
+#define SBUILD_ID_SIZE	(BUILD_ID_SIZE * 2 + 1)
 
 #include "tool.h"
 #include "strlist.h"
@@ -11,6 +12,9 @@
 struct dso;
 
 int build_id__sprintf(const u8 *build_id, int len, char *bf);
+int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id);
+int filename__sprintf_build_id(const char *pathname, char *sbuild_id);
+
 char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
 
 int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 9f643ee..773fe13 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -25,96 +25,9 @@
 
 __thread struct callchain_cursor callchain_cursor;
 
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-static int get_stack_size(const char *str, unsigned long *_size)
+int parse_callchain_record_opt(const char *arg, struct callchain_param *param)
 {
-	char *endptr;
-	unsigned long size;
-	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
-
-	size = strtoul(str, &endptr, 0);
-
-	do {
-		if (*endptr)
-			break;
-
-		size = round_up(size, sizeof(u64));
-		if (!size || size > max_size)
-			break;
-
-		*_size = size;
-		return 0;
-
-	} while (0);
-
-	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
-	       max_size, str);
-	return -1;
-}
-#endif /* HAVE_DWARF_UNWIND_SUPPORT */
-
-int parse_callchain_record_opt(const char *arg)
-{
-	char *tok, *name, *saveptr = NULL;
-	char *buf;
-	int ret = -1;
-
-	/* We need buffer that we know we can write to. */
-	buf = malloc(strlen(arg) + 1);
-	if (!buf)
-		return -ENOMEM;
-
-	strcpy(buf, arg);
-
-	tok = strtok_r((char *)buf, ",", &saveptr);
-	name = tok ? : (char *)buf;
-
-	do {
-		/* Framepointer style */
-		if (!strncmp(name, "fp", sizeof("fp"))) {
-			if (!strtok_r(NULL, ",", &saveptr)) {
-				callchain_param.record_mode = CALLCHAIN_FP;
-				ret = 0;
-			} else
-				pr_err("callchain: No more arguments "
-				       "needed for --call-graph fp\n");
-			break;
-
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-		/* Dwarf style */
-		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
-			const unsigned long default_stack_dump_size = 8192;
-
-			ret = 0;
-			callchain_param.record_mode = CALLCHAIN_DWARF;
-			callchain_param.dump_size = default_stack_dump_size;
-
-			tok = strtok_r(NULL, ",", &saveptr);
-			if (tok) {
-				unsigned long size = 0;
-
-				ret = get_stack_size(tok, &size);
-				callchain_param.dump_size = size;
-			}
-#endif /* HAVE_DWARF_UNWIND_SUPPORT */
-		} else if (!strncmp(name, "lbr", sizeof("lbr"))) {
-			if (!strtok_r(NULL, ",", &saveptr)) {
-				callchain_param.record_mode = CALLCHAIN_LBR;
-				ret = 0;
-			} else
-				pr_err("callchain: No more arguments "
-					"needed for --call-graph lbr\n");
-			break;
-		} else {
-			pr_err("callchain: Unknown --call-graph option "
-			       "value: %s\n", arg);
-			break;
-		}
-
-	} while (0);
-
-	free(buf);
-	return ret;
+	return parse_callchain_record(arg, param);
 }
 
 static int parse_callchain_mode(const char *value)
@@ -219,7 +132,7 @@
 	var += sizeof("call-graph.") - 1;
 
 	if (!strcmp(var, "record-mode"))
-		return parse_callchain_record_opt(value);
+		return parse_callchain_record_opt(value, &callchain_param);
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
 	if (!strcmp(var, "dump-size")) {
 		unsigned long size = 0;
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 679c2c6..acee2b3 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -177,7 +177,8 @@
 			bool hide_unresolved);
 
 extern const char record_callchain_help[];
-int parse_callchain_record_opt(const char *arg);
+extern int parse_callchain_record(const char *arg, struct callchain_param *param);
+int parse_callchain_record_opt(const char *arg, struct callchain_param *param);
 int parse_callchain_report_opt(const char *arg);
 int perf_callchain_config(const char *var, const char *value);
 
diff --git a/tools/perf/util/cloexec.h b/tools/perf/util/cloexec.h
index 68888c2..3bee677 100644
--- a/tools/perf/util/cloexec.h
+++ b/tools/perf/util/cloexec.h
@@ -4,7 +4,7 @@
 unsigned long perf_event_open_cloexec_flag(void);
 
 #ifdef __GLIBC_PREREQ
-#if !__GLIBC_PREREQ(2, 6)
+#if !__GLIBC_PREREQ(2, 6) && !defined(__UCLIBC__)
 extern int sched_getcpu(void) __THROW;
 #endif
 #endif
diff --git a/tools/perf/util/color.c b/tools/perf/util/color.c
index 55355b3..9b95654 100644
--- a/tools/perf/util/color.c
+++ b/tools/perf/util/color.c
@@ -67,8 +67,9 @@
 	return r;
 }
 
+/* Colors are not included in return value */
 static int __color_vfprintf(FILE *fp, const char *color, const char *fmt,
-		va_list args, const char *trail)
+		va_list args)
 {
 	int r = 0;
 
@@ -83,12 +84,10 @@
 	}
 
 	if (perf_use_color_default && *color)
-		r += fprintf(fp, "%s", color);
+		fprintf(fp, "%s", color);
 	r += vfprintf(fp, fmt, args);
 	if (perf_use_color_default && *color)
-		r += fprintf(fp, "%s", PERF_COLOR_RESET);
-	if (trail)
-		r += fprintf(fp, "%s", trail);
+		fprintf(fp, "%s", PERF_COLOR_RESET);
 	return r;
 }
 
@@ -100,7 +99,7 @@
 
 int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args)
 {
-	return __color_vfprintf(fp, color, fmt, args, NULL);
+	return __color_vfprintf(fp, color, fmt, args);
 }
 
 int color_snprintf(char *bf, size_t size, const char *color,
@@ -126,16 +125,6 @@
 	return r;
 }
 
-int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...)
-{
-	va_list args;
-	int r;
-	va_start(args, fmt);
-	r = __color_vfprintf(fp, color, fmt, args, "\n");
-	va_end(args);
-	return r;
-}
-
 /*
  * This function splits the buffer by newlines and colors the lines individually.
  *
diff --git a/tools/perf/util/color.h b/tools/perf/util/color.h
index 38146f9..a93997f 100644
--- a/tools/perf/util/color.h
+++ b/tools/perf/util/color.h
@@ -35,7 +35,6 @@
 int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args);
 int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
 int color_snprintf(char *bf, size_t size, const char *color, const char *fmt, ...);
-int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
 int color_fwrite_lines(FILE *fp, const char *color, size_t count, const char *buf);
 int value_color_snprintf(char *bf, size_t size, const char *fmt, double value);
 int percent_color_snprintf(char *bf, size_t size, const char *fmt, ...);
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index e18f653..2e452ac 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -12,6 +12,7 @@
 #include "cache.h"
 #include "exec_cmd.h"
 #include "util/hist.h"  /* perf_hist_config */
+#include "util/llvm-utils.h"   /* perf_llvm_config */
 
 #define MAXNAME (256)
 
@@ -408,6 +409,9 @@
 	if (!prefixcmp(var, "call-graph."))
 		return perf_callchain_config(var, value);
 
+	if (!prefixcmp(var, "llvm."))
+		return perf_llvm_config(var, value);
+
 	/* Add other config variables here. */
 	return 0;
 }
diff --git a/tools/perf/util/counts.c b/tools/perf/util/counts.c
new file mode 100644
index 0000000..e3fde31
--- /dev/null
+++ b/tools/perf/util/counts.c
@@ -0,0 +1,52 @@
+#include <stdlib.h>
+#include "evsel.h"
+#include "counts.h"
+
+struct perf_counts *perf_counts__new(int ncpus, int nthreads)
+{
+	struct perf_counts *counts = zalloc(sizeof(*counts));
+
+	if (counts) {
+		struct xyarray *values;
+
+		values = xyarray__new(ncpus, nthreads, sizeof(struct perf_counts_values));
+		if (!values) {
+			free(counts);
+			return NULL;
+		}
+
+		counts->values = values;
+	}
+
+	return counts;
+}
+
+void perf_counts__delete(struct perf_counts *counts)
+{
+	if (counts) {
+		xyarray__delete(counts->values);
+		free(counts);
+	}
+}
+
+static void perf_counts__reset(struct perf_counts *counts)
+{
+	xyarray__reset(counts->values);
+}
+
+void perf_evsel__reset_counts(struct perf_evsel *evsel)
+{
+	perf_counts__reset(evsel->counts);
+}
+
+int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+	evsel->counts = perf_counts__new(ncpus, nthreads);
+	return evsel->counts != NULL ? 0 : -ENOMEM;
+}
+
+void perf_evsel__free_counts(struct perf_evsel *evsel)
+{
+	perf_counts__delete(evsel->counts);
+	evsel->counts = NULL;
+}
diff --git a/tools/perf/util/counts.h b/tools/perf/util/counts.h
new file mode 100644
index 0000000..34d8baa
--- /dev/null
+++ b/tools/perf/util/counts.h
@@ -0,0 +1,37 @@
+#ifndef __PERF_COUNTS_H
+#define __PERF_COUNTS_H
+
+#include "xyarray.h"
+
+struct perf_counts_values {
+	union {
+		struct {
+			u64 val;
+			u64 ena;
+			u64 run;
+		};
+		u64 values[3];
+	};
+};
+
+struct perf_counts {
+	s8			  scaled;
+	struct perf_counts_values aggr;
+	struct xyarray		  *values;
+};
+
+
+static inline struct perf_counts_values*
+perf_counts(struct perf_counts *counts, int cpu, int thread)
+{
+	return xyarray__entry(counts->values, cpu, thread);
+}
+
+struct perf_counts *perf_counts__new(int ncpus, int nthreads);
+void perf_counts__delete(struct perf_counts *counts);
+
+void perf_evsel__reset_counts(struct perf_evsel *evsel);
+int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads);
+void perf_evsel__free_counts(struct perf_evsel *evsel);
+
+#endif /* __PERF_COUNTS_H */
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 2da5581..86d9c73 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -36,6 +36,11 @@
 	return ret;
 }
 
+int veprintf(int level, int var, const char *fmt, va_list args)
+{
+	return _eprintf(level, var, fmt, args);
+}
+
 int eprintf(int level, int var, const char *fmt, ...)
 {
 	va_list args;
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index caac2fd..8b9a088 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -50,6 +50,7 @@
 
 int eprintf(int level, int var, const char *fmt, ...) __attribute__((format(printf, 3, 4)));
 int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __attribute__((format(printf, 4, 5)));
+int veprintf(int level, int var, const char *fmt, va_list args);
 
 int perf_debug_option(const char *str);
 
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index 2fe98bb..fc8db9c 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -137,6 +137,10 @@
 	struct rb_node	 rb_node;	/* rbtree node sorted by long name */
 	struct rb_root	 symbols[MAP__NR_TYPES];
 	struct rb_root	 symbol_names[MAP__NR_TYPES];
+	struct {
+		u64		addr;
+		struct symbol	*symbol;
+	} last_find_result[MAP__NR_TYPES];
 	void		 *a2l;
 	char		 *symsrc_filename;
 	unsigned int	 a2l_fails;
@@ -320,6 +324,8 @@
 struct dso *dsos__findnew(struct dsos *dsos, const char *name);
 bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
 
+void dso__reset_find_symbol_cache(struct dso *dso);
+
 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
 			       bool (skip)(struct dso *dso, int parm), int parm);
 size_t __dsos__fprintf(struct list_head *head, FILE *fp);
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 57f3ef4..a509aa84 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -734,15 +734,18 @@
 	Dwarf_Lines *lines;
 	Dwarf_Line *line;
 	Dwarf_Addr addr;
-	const char *fname;
+	const char *fname, *decf = NULL;
 	int lineno, ret = 0;
+	int decl = 0, inl;
 	Dwarf_Die die_mem, *cu_die;
 	size_t nlines, i;
 
 	/* Get the CU die */
-	if (dwarf_tag(rt_die) != DW_TAG_compile_unit)
+	if (dwarf_tag(rt_die) != DW_TAG_compile_unit) {
 		cu_die = dwarf_diecu(rt_die, &die_mem, NULL, NULL);
-	else
+		dwarf_decl_line(rt_die, &decl);
+		decf = dwarf_decl_file(rt_die);
+	} else
 		cu_die = rt_die;
 	if (!cu_die) {
 		pr_debug2("Failed to get CU from given DIE.\n");
@@ -767,15 +770,21 @@
 			continue;
 		}
 		/* Filter lines based on address */
-		if (rt_die != cu_die)
+		if (rt_die != cu_die) {
 			/*
 			 * Address filtering
 			 * The line is included in given function, and
 			 * no inline block includes it.
 			 */
-			if (!dwarf_haspc(rt_die, addr) ||
-			    die_find_inlinefunc(rt_die, addr, &die_mem))
+			if (!dwarf_haspc(rt_die, addr))
 				continue;
+			if (die_find_inlinefunc(rt_die, addr, &die_mem)) {
+				dwarf_decl_line(&die_mem, &inl);
+				if (inl != decl ||
+				    decf != dwarf_decl_file(&die_mem))
+					continue;
+			}
+		}
 		/* Get source line */
 		fname = dwarf_linesrc(line, NULL, NULL);
 
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 67a977e..7ff6127 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -26,6 +26,8 @@
 	[PERF_RECORD_AUX]			= "AUX",
 	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
 	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
+	[PERF_RECORD_SWITCH]			= "SWITCH",
+	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
 	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
 	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
 	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
@@ -749,6 +751,14 @@
 	return machine__process_lost_samples_event(machine, event, sample);
 }
 
+int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
+			       union perf_event *event,
+			       struct perf_sample *sample __maybe_unused,
+			       struct machine *machine)
+{
+	return machine__process_switch_event(machine, event);
+}
+
 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
 {
 	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
@@ -827,6 +837,20 @@
 		       event->itrace_start.pid, event->itrace_start.tid);
 }
 
+size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
+{
+	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
+	const char *in_out = out ? "OUT" : "IN ";
+
+	if (event->header.type == PERF_RECORD_SWITCH)
+		return fprintf(fp, " %s\n", in_out);
+
+	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
+		       in_out, out ? "next" : "prev",
+		       event->context_switch.next_prev_pid,
+		       event->context_switch.next_prev_tid);
+}
+
 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
 {
 	size_t ret = fprintf(fp, "PERF_RECORD_%s",
@@ -852,6 +876,10 @@
 	case PERF_RECORD_ITRACE_START:
 		ret += perf_event__fprintf_itrace_start(event, fp);
 		break;
+	case PERF_RECORD_SWITCH:
+	case PERF_RECORD_SWITCH_CPU_WIDE:
+		ret += perf_event__fprintf_switch(event, fp);
+		break;
 	default:
 		ret += fprintf(fp, "\n");
 	}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index c53f363..f729df5 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -134,7 +134,8 @@
 	u64 predicted:1;
 	u64 in_tx:1;
 	u64 abort:1;
-	u64 reserved:60;
+	u64 cycles:16;
+	u64 reserved:44;
 };
 
 struct branch_entry {
@@ -348,6 +349,12 @@
 	u32 pid, tid;
 };
 
+struct context_switch_event {
+	struct perf_event_header header;
+	u32 next_prev_pid;
+	u32 next_prev_tid;
+};
+
 union perf_event {
 	struct perf_event_header	header;
 	struct mmap_event		mmap;
@@ -369,6 +376,7 @@
 	struct auxtrace_error_event	auxtrace_error;
 	struct aux_event		aux;
 	struct itrace_start_event	itrace_start;
+	struct context_switch_event	context_switch;
 };
 
 void perf_event__print_totals(void);
@@ -418,6 +426,10 @@
 				     union perf_event *event,
 				     struct perf_sample *sample,
 				     struct machine *machine);
+int perf_event__process_switch(struct perf_tool *tool,
+			       union perf_event *event,
+			       struct perf_sample *sample,
+			       struct machine *machine);
 int perf_event__process_mmap(struct perf_tool *tool,
 			     union perf_event *event,
 			     struct perf_sample *sample,
@@ -480,6 +492,7 @@
 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf(union perf_event *event, FILE *fp);
 
 u64 kallsyms__get_function_start(const char *kallsyms_filename,
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 6cfdee6..d51a520 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -98,6 +98,7 @@
 
 	evlist__for_each_safe(evlist, n, pos) {
 		list_del_init(&pos->node);
+		pos->evlist = NULL;
 		perf_evsel__delete(pos);
 	}
 
@@ -125,6 +126,7 @@
 
 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
 {
+	entry->evlist = evlist;
 	list_add_tail(&entry->node, &evlist->entries);
 	entry->idx = evlist->nr_entries;
 	entry->tracking = !entry->idx;
@@ -573,7 +575,7 @@
 {
 	struct perf_sample_id *sid;
 
-	if (evlist->nr_entries == 1)
+	if (evlist->nr_entries == 1 || !id)
 		return perf_evlist__first(evlist);
 
 	sid = perf_evlist__id2sid(evlist, id);
@@ -1102,7 +1104,7 @@
 }
 
 static int perf_evlist__propagate_maps(struct perf_evlist *evlist,
-				       struct target *target)
+				       bool has_user_cpus)
 {
 	struct perf_evsel *evsel;
 
@@ -1111,15 +1113,16 @@
 		 * We already have cpus for evsel (via PMU sysfs) so
 		 * keep it, if there's no target cpu list defined.
 		 */
-		if (evsel->cpus && target->cpu_list)
+		if (evsel->cpus && has_user_cpus)
 			cpu_map__put(evsel->cpus);
 
-		if (!evsel->cpus || target->cpu_list)
+		if (!evsel->cpus || has_user_cpus)
 			evsel->cpus = cpu_map__get(evlist->cpus);
 
 		evsel->threads = thread_map__get(evlist->threads);
 
-		if (!evsel->cpus || !evsel->threads)
+		if ((evlist->cpus && !evsel->cpus) ||
+		    (evlist->threads && !evsel->threads))
 			return -ENOMEM;
 	}
 
@@ -1142,7 +1145,7 @@
 	if (evlist->cpus == NULL)
 		goto out_delete_threads;
 
-	return perf_evlist__propagate_maps(evlist, target);
+	return perf_evlist__propagate_maps(evlist, !!target->cpu_list);
 
 out_delete_threads:
 	thread_map__put(evlist->threads);
@@ -1150,6 +1153,23 @@
 	return -1;
 }
 
+int perf_evlist__set_maps(struct perf_evlist *evlist,
+			  struct cpu_map *cpus,
+			  struct thread_map *threads)
+{
+	if (evlist->cpus)
+		cpu_map__put(evlist->cpus);
+
+	evlist->cpus = cpus;
+
+	if (evlist->threads)
+		thread_map__put(evlist->threads);
+
+	evlist->threads = threads;
+
+	return perf_evlist__propagate_maps(evlist, false);
+}
+
 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
 {
 	struct perf_evsel *evsel;
@@ -1161,7 +1181,11 @@
 		if (evsel->filter == NULL)
 			continue;
 
-		err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
+		/*
+		 * filters only work for tracepoint event, which doesn't have cpu limit.
+		 * So evlist and evsel should always be same.
+		 */
+		err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
 		if (err) {
 			*err_evsel = evsel;
 			break;
@@ -1175,11 +1199,9 @@
 {
 	struct perf_evsel *evsel;
 	int err = 0;
-	const int ncpus = cpu_map__nr(evlist->cpus),
-		  nthreads = thread_map__nr(evlist->threads);
 
 	evlist__for_each(evlist, evsel) {
-		err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
+		err = perf_evsel__set_filter(evsel, filter);
 		if (err)
 			break;
 	}
@@ -1257,6 +1279,16 @@
 	return __perf_evlist__combined_sample_type(evlist);
 }
 
+u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel;
+	u64 branch_type = 0;
+
+	evlist__for_each(evlist, evsel)
+		branch_type |= evsel->attr.branch_sample_type;
+	return branch_type;
+}
+
 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
 {
 	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 037633c..b39a619 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -56,6 +56,7 @@
 	struct cpu_map	  *cpus;
 	struct perf_evsel *selected;
 	struct events_stats stats;
+	struct perf_env	*env;
 };
 
 struct perf_evsel_str_handler {
@@ -114,6 +115,8 @@
 
 void perf_evlist__set_id_pos(struct perf_evlist *evlist);
 bool perf_can_sample_identifier(void);
+bool perf_can_record_switch_events(void);
+bool perf_can_record_cpu_wide(void);
 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts);
 int record_opts__config(struct record_opts *opts);
 
@@ -152,14 +155,9 @@
 void perf_evlist__set_selected(struct perf_evlist *evlist,
 			       struct perf_evsel *evsel);
 
-static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
-					 struct cpu_map *cpus,
-					 struct thread_map *threads)
-{
-	evlist->cpus	= cpus;
-	evlist->threads	= threads;
-}
-
+int perf_evlist__set_maps(struct perf_evlist *evlist,
+			  struct cpu_map *cpus,
+			  struct thread_map *threads);
 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
 
@@ -169,6 +167,7 @@
 u64 perf_evlist__read_format(struct perf_evlist *evlist);
 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist);
 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist);
+u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist);
 bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
 
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 2936b30..c53f791 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -206,10 +206,13 @@
 	evsel->leader	   = evsel;
 	evsel->unit	   = "";
 	evsel->scale	   = 1.0;
+	evsel->evlist	   = NULL;
 	INIT_LIST_HEAD(&evsel->node);
+	INIT_LIST_HEAD(&evsel->config_terms);
 	perf_evsel__object.init(evsel);
 	evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
 	perf_evsel__calc_id_pos(evsel);
+	evsel->cmdline_group_boundary = false;
 }
 
 struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
@@ -543,14 +546,15 @@
 
 static void
 perf_evsel__config_callgraph(struct perf_evsel *evsel,
-			     struct record_opts *opts)
+			     struct record_opts *opts,
+			     struct callchain_param *param)
 {
 	bool function = perf_evsel__is_function_event(evsel);
 	struct perf_event_attr *attr = &evsel->attr;
 
 	perf_evsel__set_sample_bit(evsel, CALLCHAIN);
 
-	if (callchain_param.record_mode == CALLCHAIN_LBR) {
+	if (param->record_mode == CALLCHAIN_LBR) {
 		if (!opts->branch_stack) {
 			if (attr->exclude_user) {
 				pr_warning("LBR callstack option is only available "
@@ -566,12 +570,12 @@
 				    "Falling back to framepointers.\n");
 	}
 
-	if (callchain_param.record_mode == CALLCHAIN_DWARF) {
+	if (param->record_mode == CALLCHAIN_DWARF) {
 		if (!function) {
 			perf_evsel__set_sample_bit(evsel, REGS_USER);
 			perf_evsel__set_sample_bit(evsel, STACK_USER);
 			attr->sample_regs_user = PERF_REGS_MASK;
-			attr->sample_stack_user = callchain_param.dump_size;
+			attr->sample_stack_user = param->dump_size;
 			attr->exclude_callchain_user = 1;
 		} else {
 			pr_info("Cannot use DWARF unwind for function trace event,"
@@ -585,6 +589,97 @@
 	}
 }
 
+static void
+perf_evsel__reset_callgraph(struct perf_evsel *evsel,
+			    struct callchain_param *param)
+{
+	struct perf_event_attr *attr = &evsel->attr;
+
+	perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
+	if (param->record_mode == CALLCHAIN_LBR) {
+		perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
+		attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
+					      PERF_SAMPLE_BRANCH_CALL_STACK);
+	}
+	if (param->record_mode == CALLCHAIN_DWARF) {
+		perf_evsel__reset_sample_bit(evsel, REGS_USER);
+		perf_evsel__reset_sample_bit(evsel, STACK_USER);
+	}
+}
+
+static void apply_config_terms(struct perf_evsel *evsel,
+			       struct record_opts *opts)
+{
+	struct perf_evsel_config_term *term;
+	struct list_head *config_terms = &evsel->config_terms;
+	struct perf_event_attr *attr = &evsel->attr;
+	struct callchain_param param;
+	u32 dump_size = 0;
+	char *callgraph_buf = NULL;
+
+	/* callgraph default */
+	param.record_mode = callchain_param.record_mode;
+
+	list_for_each_entry(term, config_terms, list) {
+		switch (term->type) {
+		case PERF_EVSEL__CONFIG_TERM_PERIOD:
+			attr->sample_period = term->val.period;
+			attr->freq = 0;
+			break;
+		case PERF_EVSEL__CONFIG_TERM_FREQ:
+			attr->sample_freq = term->val.freq;
+			attr->freq = 1;
+			break;
+		case PERF_EVSEL__CONFIG_TERM_TIME:
+			if (term->val.time)
+				perf_evsel__set_sample_bit(evsel, TIME);
+			else
+				perf_evsel__reset_sample_bit(evsel, TIME);
+			break;
+		case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
+			callgraph_buf = term->val.callgraph;
+			break;
+		case PERF_EVSEL__CONFIG_TERM_STACK_USER:
+			dump_size = term->val.stack_user;
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* User explicitly set per-event callgraph, clear the old setting and reset. */
+	if ((callgraph_buf != NULL) || (dump_size > 0)) {
+
+		/* parse callgraph parameters */
+		if (callgraph_buf != NULL) {
+			if (!strcmp(callgraph_buf, "no")) {
+				param.enabled = false;
+				param.record_mode = CALLCHAIN_NONE;
+			} else {
+				param.enabled = true;
+				if (parse_callchain_record(callgraph_buf, &param)) {
+					pr_err("per-event callgraph setting for %s failed. "
+					       "Apply callgraph global setting for it\n",
+					       evsel->name);
+					return;
+				}
+			}
+		}
+		if (dump_size > 0) {
+			dump_size = round_up(dump_size, sizeof(u64));
+			param.dump_size = dump_size;
+		}
+
+		/* If global callgraph set, clear it */
+		if (callchain_param.enabled)
+			perf_evsel__reset_callgraph(evsel, &callchain_param);
+
+		/* set perf-event callgraph */
+		if (param.enabled)
+			perf_evsel__config_callgraph(evsel, opts, &param);
+	}
+}
+
 /*
  * The enable_on_exec/disabled value strategy:
  *
@@ -689,10 +784,10 @@
 		evsel->attr.exclude_callchain_user = 1;
 
 	if (callchain_param.enabled && !evsel->no_aux_samples)
-		perf_evsel__config_callgraph(evsel, opts);
+		perf_evsel__config_callgraph(evsel, opts, &callchain_param);
 
 	if (opts->sample_intr_regs) {
-		attr->sample_regs_intr = PERF_REGS_MASK;
+		attr->sample_regs_intr = opts->sample_intr_regs;
 		perf_evsel__set_sample_bit(evsel, REGS_INTR);
 	}
 
@@ -707,7 +802,8 @@
 	 */
 	if (opts->sample_time &&
 	    (!perf_missing_features.sample_id_all &&
-	    (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu)))
+	    (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
+	     opts->sample_time_set)))
 		perf_evsel__set_sample_bit(evsel, TIME);
 
 	if (opts->raw_samples && !evsel->no_aux_samples) {
@@ -736,6 +832,9 @@
 	attr->mmap2 = track && !perf_missing_features.mmap2;
 	attr->comm  = track;
 
+	if (opts->record_switch_events)
+		attr->context_switch = track;
+
 	if (opts->sample_transaction)
 		perf_evsel__set_sample_bit(evsel, TRANSACTION);
 
@@ -772,6 +871,12 @@
 		attr->use_clockid = 1;
 		attr->clockid = opts->clockid;
 	}
+
+	/*
+	 * Apply event specific term settings,
+	 * it overloads any global configuration.
+	 */
+	apply_config_terms(evsel, opts);
 }
 
 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -815,14 +920,44 @@
 	return 0;
 }
 
-int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
-			   const char *filter)
+int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
+			     const char *filter)
 {
 	return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
 				     PERF_EVENT_IOC_SET_FILTER,
 				     (void *)filter);
 }
 
+int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
+{
+	char *new_filter = strdup(filter);
+
+	if (new_filter != NULL) {
+		free(evsel->filter);
+		evsel->filter = new_filter;
+		return 0;
+	}
+
+	return -1;
+}
+
+int perf_evsel__append_filter(struct perf_evsel *evsel,
+			      const char *op, const char *filter)
+{
+	char *new_filter;
+
+	if (evsel->filter == NULL)
+		return perf_evsel__set_filter(evsel, filter);
+
+	if (asprintf(&new_filter,"(%s) %s (%s)", evsel->filter, op, filter) > 0) {
+		free(evsel->filter);
+		evsel->filter = new_filter;
+		return 0;
+	}
+
+	return -1;
+}
+
 int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
 {
 	return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
@@ -865,6 +1000,16 @@
 	zfree(&evsel->id);
 }
 
+static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
+{
+	struct perf_evsel_config_term *term, *h;
+
+	list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
+		list_del(&term->list);
+		free(term);
+	}
+}
+
 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
 {
 	int cpu, thread;
@@ -882,8 +1027,10 @@
 void perf_evsel__exit(struct perf_evsel *evsel)
 {
 	assert(list_empty(&evsel->node));
+	assert(evsel->evlist == NULL);
 	perf_evsel__free_fd(evsel);
 	perf_evsel__free_id(evsel);
+	perf_evsel__free_config_terms(evsel);
 	close_cgroup(evsel->cgrp);
 	cpu_map__put(evsel->cpus);
 	thread_map__put(evsel->threads);
@@ -1095,6 +1242,7 @@
 	PRINT_ATTRf(mmap2, p_unsigned);
 	PRINT_ATTRf(comm_exec, p_unsigned);
 	PRINT_ATTRf(use_clockid, p_unsigned);
+	PRINT_ATTRf(context_switch, p_unsigned);
 
 	PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
 	PRINT_ATTRf(bp_type, p_unsigned);
@@ -2075,8 +2223,13 @@
 		printed += perf_event_attr__fprintf(fp, &evsel->attr,
 						    __print_attr__fprintf, &first);
 	} else if (details->freq) {
-		printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
-					 (u64)evsel->attr.sample_freq);
+		const char *term = "sample_freq";
+
+		if (!evsel->attr.freq)
+			term = "sample_period";
+
+		printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
+					 term, (u64)evsel->attr.sample_freq);
 	}
 out:
 	fputc('\n', fp);
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 4a7ed56..298e6bb 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -9,7 +9,7 @@
 #include "xyarray.h"
 #include "symbol.h"
 #include "cpumap.h"
-#include "stat.h"
+#include "counts.h"
 
 struct perf_evsel;
 
@@ -31,8 +31,38 @@
 
 struct cgroup_sel;
 
+/*
+ * The 'struct perf_evsel_config_term' is used to pass event
+ * specific configuration data to perf_evsel__config routine.
+ * It is allocated within event parsing and attached to
+ * perf_evsel::config_terms list head.
+*/
+enum {
+	PERF_EVSEL__CONFIG_TERM_PERIOD,
+	PERF_EVSEL__CONFIG_TERM_FREQ,
+	PERF_EVSEL__CONFIG_TERM_TIME,
+	PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
+	PERF_EVSEL__CONFIG_TERM_STACK_USER,
+	PERF_EVSEL__CONFIG_TERM_MAX,
+};
+
+struct perf_evsel_config_term {
+	struct list_head	list;
+	int	type;
+	union {
+		u64	period;
+		u64	freq;
+		bool	time;
+		char	*callgraph;
+		u64	stack_user;
+	} val;
+};
+
 /** struct perf_evsel - event selector
  *
+ * @evlist - evlist this evsel is in, if it is in one.
+ * @node - To insert it into evlist->entries or in other list_heads, say in
+ *         the event parsing routines.
  * @name - Can be set to retain the original event name passed by the user,
  *         so that when showing results in tools such as 'perf stat', we
  *         show the name used, not some alias.
@@ -46,6 +76,7 @@
  */
 struct perf_evsel {
 	struct list_head	node;
+	struct perf_evlist	*evlist;
 	struct perf_event_attr	attr;
 	char			*filter;
 	struct xyarray		*fd;
@@ -86,6 +117,8 @@
 	unsigned long		*per_pkg_mask;
 	struct perf_evsel	*leader;
 	char			*group_name;
+	bool			cmdline_group_boundary;
+	struct list_head	config_terms;
 };
 
 union u64_swap {
@@ -182,8 +215,11 @@
 void perf_evsel__set_sample_id(struct perf_evsel *evsel,
 			       bool use_sample_identifier);
 
-int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
-			   const char *filter);
+int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter);
+int perf_evsel__append_filter(struct perf_evsel *evsel,
+			      const char *op, const char *filter);
+int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
+			     const char *filter);
 int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads);
 
 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 03ace57..4181454 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -923,17 +923,13 @@
 			  FILE *fp)
 {
 	int nr, i;
-	char *str;
 
 	nr = ph->env.nr_cmdline;
-	str = ph->env.cmdline;
 
 	fprintf(fp, "# cmdline : ");
 
-	for (i = 0; i < nr; i++) {
-		fprintf(fp, "%s ", str);
-		str += strlen(str) + 1;
-	}
+	for (i = 0; i < nr; i++)
+		fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
 	fputc('\n', fp);
 }
 
@@ -1541,14 +1537,13 @@
 	return 0;
 }
 
-static int process_cmdline(struct perf_file_section *section __maybe_unused,
+static int process_cmdline(struct perf_file_section *section,
 			   struct perf_header *ph, int fd,
 			   void *data __maybe_unused)
 {
 	ssize_t ret;
-	char *str;
-	u32 nr, i;
-	struct strbuf sb;
+	char *str, *cmdline = NULL, **argv = NULL;
+	u32 nr, i, len = 0;
 
 	ret = readn(fd, &nr, sizeof(nr));
 	if (ret != sizeof(nr))
@@ -1558,22 +1553,32 @@
 		nr = bswap_32(nr);
 
 	ph->env.nr_cmdline = nr;
-	strbuf_init(&sb, 128);
+
+	cmdline = zalloc(section->size + nr + 1);
+	if (!cmdline)
+		return -1;
+
+	argv = zalloc(sizeof(char *) * (nr + 1));
+	if (!argv)
+		goto error;
 
 	for (i = 0; i < nr; i++) {
 		str = do_read_string(fd, ph);
 		if (!str)
 			goto error;
 
-		/* include a NULL character at the end */
-		strbuf_add(&sb, str, strlen(str) + 1);
+		argv[i] = cmdline + len;
+		memcpy(argv[i], str, strlen(str) + 1);
+		len += strlen(str) + 1;
 		free(str);
 	}
-	ph->env.cmdline = strbuf_detach(&sb, NULL);
+	ph->env.cmdline = cmdline;
+	ph->env.cmdline_argv = (const char **) argv;
 	return 0;
 
 error:
-	strbuf_release(&sb);
+	free(argv);
+	free(cmdline);
 	return -1;
 }
 
@@ -2509,6 +2514,7 @@
 	if (session->evlist == NULL)
 		return -ENOMEM;
 
+	session->evlist->env = &header->env;
 	if (perf_data_file__is_pipe(file))
 		return perf_header__read_pipe(session);
 
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index d4d5796..396e496 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -66,7 +66,7 @@
 int perf_file_header__read(struct perf_file_header *header,
 			   struct perf_header *ph, int fd);
 
-struct perf_session_env {
+struct perf_env {
 	char			*hostname;
 	char			*os_release;
 	char			*version;
@@ -84,6 +84,7 @@
 	int			nr_pmu_mappings;
 	int			nr_groups;
 	char			*cmdline;
+	const char		**cmdline_argv;
 	char			*sibling_cores;
 	char			*sibling_threads;
 	char			*numa_nodes;
@@ -97,7 +98,7 @@
 	u64				data_size;
 	u64				feat_offset;
 	DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
-	struct perf_session_env 	env;
+	struct perf_env 	env;
 };
 
 struct perf_evlist;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 6f28d53..08b6cd9 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -151,6 +151,12 @@
 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
 
+	if (h->srcline)
+		hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
+
+	if (h->srcfile)
+		hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
+
 	if (h->transaction)
 		hists__new_col_len(hists, HISTC_TRANSACTION,
 				   hist_entry__transaction_len());
@@ -618,7 +624,8 @@
 	 * and not events sampled. Thus we use a pseudo period of 1.
 	 */
 	he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
-				1, 1, 0, true);
+				1, bi->flags.cycles ? bi->flags.cycles : 1,
+				0, true);
 	if (he == NULL)
 		return -ENOMEM;
 
@@ -760,6 +767,7 @@
 	struct hist_entry **he_cache = iter->priv;
 	struct hist_entry *he;
 	struct hist_entry he_tmp = {
+		.hists = evsel__hists(evsel),
 		.cpu = al->cpu,
 		.thread = al->thread,
 		.comm = thread__comm(al->thread),
@@ -944,6 +952,8 @@
 
 	zfree(&he->stat_acc);
 	free_srcline(he->srcline);
+	if (he->srcfile && he->srcfile[0])
+		free(he->srcfile);
 	free_callchain(he->callchain);
 	free(he);
 }
@@ -1099,13 +1109,14 @@
 
 static void __hists__insert_output_entry(struct rb_root *entries,
 					 struct hist_entry *he,
-					 u64 min_callchain_hits)
+					 u64 min_callchain_hits,
+					 bool use_callchain)
 {
 	struct rb_node **p = &entries->rb_node;
 	struct rb_node *parent = NULL;
 	struct hist_entry *iter;
 
-	if (symbol_conf.use_callchain)
+	if (use_callchain)
 		callchain_param.sort(&he->sorted_chain, he->callchain,
 				      min_callchain_hits, &callchain_param);
 
@@ -1129,6 +1140,13 @@
 	struct rb_node *next;
 	struct hist_entry *n;
 	u64 min_callchain_hits;
+	struct perf_evsel *evsel = hists_to_evsel(hists);
+	bool use_callchain;
+
+	if (evsel && !symbol_conf.show_ref_callgraph)
+		use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
+	else
+		use_callchain = symbol_conf.use_callchain;
 
 	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
 
@@ -1147,7 +1165,7 @@
 		n = rb_entry(next, struct hist_entry, rb_node_in);
 		next = rb_next(&n->rb_node_in);
 
-		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
+		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
 		hists__inc_stats(hists, n);
 
 		if (!n->filtered)
@@ -1414,6 +1432,39 @@
 	return 0;
 }
 
+void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+			  struct perf_sample *sample, bool nonany_branch_mode)
+{
+	struct branch_info *bi;
+
+	/* If we have branch cycles always annotate them. */
+	if (bs && bs->nr && bs->entries[0].flags.cycles) {
+		int i;
+
+		bi = sample__resolve_bstack(sample, al);
+		if (bi) {
+			struct addr_map_symbol *prev = NULL;
+
+			/*
+			 * Ignore errors, still want to process the
+			 * other entries.
+			 *
+			 * For non standard branch modes always
+			 * force no IPC (prev == NULL)
+			 *
+			 * Note that perf stores branches reversed from
+			 * program order!
+			 */
+			for (i = bs->nr - 1; i >= 0; i--) {
+				addr_map_symbol__account_cycles(&bi[i].from,
+					nonany_branch_mode ? NULL : prev,
+					bi[i].flags.cycles);
+				prev = &bi[i].to;
+			}
+			free(bi);
+		}
+	}
+}
 
 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
 {
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 5ed8d9c..de6d58e 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -30,6 +30,7 @@
 	HISTC_PARENT,
 	HISTC_CPU,
 	HISTC_SRCLINE,
+	HISTC_SRCFILE,
 	HISTC_MISPREDICT,
 	HISTC_IN_TX,
 	HISTC_ABORT,
@@ -47,6 +48,7 @@
 	HISTC_MEM_SNOOP,
 	HISTC_MEM_DCACHELINE,
 	HISTC_TRANSACTION,
+	HISTC_CYCLES,
 	HISTC_NR_COLS, /* Last entry */
 };
 
@@ -311,7 +313,7 @@
 int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
 				  struct hist_browser_timer *hbt,
 				  float min_pcnt,
-				  struct perf_session_env *env);
+				  struct perf_env *env);
 int script_browse(const char *script_opt);
 #else
 static inline
@@ -319,7 +321,7 @@
 				  const char *help __maybe_unused,
 				  struct hist_browser_timer *hbt __maybe_unused,
 				  float min_pcnt __maybe_unused,
-				  struct perf_session_env *env __maybe_unused)
+				  struct perf_env *env __maybe_unused)
 {
 	return 0;
 }
@@ -349,6 +351,9 @@
 
 unsigned int hists__sort_list_width(struct hists *hists);
 
+void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
+			  struct perf_sample *sample, bool nonany_branch_mode);
+
 struct option;
 int parse_filter_percentage(const struct option *opt __maybe_unused,
 			    const char *arg, int unset __maybe_unused);
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c
new file mode 100644
index 0000000..ea76862
--- /dev/null
+++ b/tools/perf/util/intel-bts.c
@@ -0,0 +1,933 @@
+/*
+ * intel-bts.c: Intel Processor Trace support
+ * Copyright (c) 2013-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <endian.h>
+#include <byteswap.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+
+#include "cpumap.h"
+#include "color.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "machine.h"
+#include "session.h"
+#include "util.h"
+#include "thread.h"
+#include "thread-stack.h"
+#include "debug.h"
+#include "tsc.h"
+#include "auxtrace.h"
+#include "intel-pt-decoder/intel-pt-insn-decoder.h"
+#include "intel-bts.h"
+
+#define MAX_TIMESTAMP (~0ULL)
+
+#define INTEL_BTS_ERR_NOINSN  5
+#define INTEL_BTS_ERR_LOST    9
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define le64_to_cpu bswap_64
+#else
+#define le64_to_cpu
+#endif
+
+struct intel_bts {
+	struct auxtrace			auxtrace;
+	struct auxtrace_queues		queues;
+	struct auxtrace_heap		heap;
+	u32				auxtrace_type;
+	struct perf_session		*session;
+	struct machine			*machine;
+	bool				sampling_mode;
+	bool				snapshot_mode;
+	bool				data_queued;
+	u32				pmu_type;
+	struct perf_tsc_conversion	tc;
+	bool				cap_user_time_zero;
+	struct itrace_synth_opts	synth_opts;
+	bool				sample_branches;
+	u32				branches_filter;
+	u64				branches_sample_type;
+	u64				branches_id;
+	size_t				branches_event_size;
+	bool				synth_needs_swap;
+};
+
+struct intel_bts_queue {
+	struct intel_bts	*bts;
+	unsigned int		queue_nr;
+	struct auxtrace_buffer	*buffer;
+	bool			on_heap;
+	bool			done;
+	pid_t			pid;
+	pid_t			tid;
+	int			cpu;
+	u64			time;
+	struct intel_pt_insn	intel_pt_insn;
+	u32			sample_flags;
+};
+
+struct branch {
+	u64 from;
+	u64 to;
+	u64 misc;
+};
+
+static void intel_bts_dump(struct intel_bts *bts __maybe_unused,
+			   unsigned char *buf, size_t len)
+{
+	struct branch *branch;
+	size_t i, pos = 0, br_sz = sizeof(struct branch), sz;
+	const char *color = PERF_COLOR_BLUE;
+
+	color_fprintf(stdout, color,
+		      ". ... Intel BTS data: size %zu bytes\n",
+		      len);
+
+	while (len) {
+		if (len >= br_sz)
+			sz = br_sz;
+		else
+			sz = len;
+		printf(".");
+		color_fprintf(stdout, color, "  %08x: ", pos);
+		for (i = 0; i < sz; i++)
+			color_fprintf(stdout, color, " %02x", buf[i]);
+		for (; i < br_sz; i++)
+			color_fprintf(stdout, color, "   ");
+		if (len >= br_sz) {
+			branch = (struct branch *)buf;
+			color_fprintf(stdout, color, " %"PRIx64" -> %"PRIx64" %s\n",
+				      le64_to_cpu(branch->from),
+				      le64_to_cpu(branch->to),
+				      le64_to_cpu(branch->misc) & 0x10 ?
+							"pred" : "miss");
+		} else {
+			color_fprintf(stdout, color, " Bad record!\n");
+		}
+		pos += sz;
+		buf += sz;
+		len -= sz;
+	}
+}
+
+static void intel_bts_dump_event(struct intel_bts *bts, unsigned char *buf,
+				 size_t len)
+{
+	printf(".\n");
+	intel_bts_dump(bts, buf, len);
+}
+
+static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample)
+{
+	union perf_event event;
+	int err;
+
+	auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
+			     INTEL_BTS_ERR_LOST, sample->cpu, sample->pid,
+			     sample->tid, 0, "Lost trace data");
+
+	err = perf_session__deliver_synth_event(bts->session, &event, NULL);
+	if (err)
+		pr_err("Intel BTS: failed to deliver error event, error %d\n",
+		       err);
+
+	return err;
+}
+
+static struct intel_bts_queue *intel_bts_alloc_queue(struct intel_bts *bts,
+						     unsigned int queue_nr)
+{
+	struct intel_bts_queue *btsq;
+
+	btsq = zalloc(sizeof(struct intel_bts_queue));
+	if (!btsq)
+		return NULL;
+
+	btsq->bts = bts;
+	btsq->queue_nr = queue_nr;
+	btsq->pid = -1;
+	btsq->tid = -1;
+	btsq->cpu = -1;
+
+	return btsq;
+}
+
+static int intel_bts_setup_queue(struct intel_bts *bts,
+				 struct auxtrace_queue *queue,
+				 unsigned int queue_nr)
+{
+	struct intel_bts_queue *btsq = queue->priv;
+
+	if (list_empty(&queue->head))
+		return 0;
+
+	if (!btsq) {
+		btsq = intel_bts_alloc_queue(bts, queue_nr);
+		if (!btsq)
+			return -ENOMEM;
+		queue->priv = btsq;
+
+		if (queue->cpu != -1)
+			btsq->cpu = queue->cpu;
+		btsq->tid = queue->tid;
+	}
+
+	if (bts->sampling_mode)
+		return 0;
+
+	if (!btsq->on_heap && !btsq->buffer) {
+		int ret;
+
+		btsq->buffer = auxtrace_buffer__next(queue, NULL);
+		if (!btsq->buffer)
+			return 0;
+
+		ret = auxtrace_heap__add(&bts->heap, queue_nr,
+					 btsq->buffer->reference);
+		if (ret)
+			return ret;
+		btsq->on_heap = true;
+	}
+
+	return 0;
+}
+
+static int intel_bts_setup_queues(struct intel_bts *bts)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < bts->queues.nr_queues; i++) {
+		ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i],
+					    i);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static inline int intel_bts_update_queues(struct intel_bts *bts)
+{
+	if (bts->queues.new_data) {
+		bts->queues.new_data = false;
+		return intel_bts_setup_queues(bts);
+	}
+	return 0;
+}
+
+static unsigned char *intel_bts_find_overlap(unsigned char *buf_a, size_t len_a,
+					     unsigned char *buf_b, size_t len_b)
+{
+	size_t offs, len;
+
+	if (len_a > len_b)
+		offs = len_a - len_b;
+	else
+		offs = 0;
+
+	for (; offs < len_a; offs += sizeof(struct branch)) {
+		len = len_a - offs;
+		if (!memcmp(buf_a + offs, buf_b, len))
+			return buf_b + len;
+	}
+
+	return buf_b;
+}
+
+static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue,
+				    struct auxtrace_buffer *b)
+{
+	struct auxtrace_buffer *a;
+	void *start;
+
+	if (b->list.prev == &queue->head)
+		return 0;
+	a = list_entry(b->list.prev, struct auxtrace_buffer, list);
+	start = intel_bts_find_overlap(a->data, a->size, b->data, b->size);
+	if (!start)
+		return -EINVAL;
+	b->use_size = b->data + b->size - start;
+	b->use_data = start;
+	return 0;
+}
+
+static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
+					 struct branch *branch)
+{
+	int ret;
+	struct intel_bts *bts = btsq->bts;
+	union perf_event event;
+	struct perf_sample sample = { .ip = 0, };
+
+	event.sample.header.type = PERF_RECORD_SAMPLE;
+	event.sample.header.misc = PERF_RECORD_MISC_USER;
+	event.sample.header.size = sizeof(struct perf_event_header);
+
+	sample.ip = le64_to_cpu(branch->from);
+	sample.pid = btsq->pid;
+	sample.tid = btsq->tid;
+	sample.addr = le64_to_cpu(branch->to);
+	sample.id = btsq->bts->branches_id;
+	sample.stream_id = btsq->bts->branches_id;
+	sample.period = 1;
+	sample.cpu = btsq->cpu;
+	sample.flags = btsq->sample_flags;
+	sample.insn_len = btsq->intel_pt_insn.length;
+
+	if (bts->synth_opts.inject) {
+		event.sample.header.size = bts->branches_event_size;
+		ret = perf_event__synthesize_sample(&event,
+						    bts->branches_sample_type,
+						    0, &sample,
+						    bts->synth_needs_swap);
+		if (ret)
+			return ret;
+	}
+
+	ret = perf_session__deliver_synth_event(bts->session, &event, &sample);
+	if (ret)
+		pr_err("Intel BTS: failed to deliver branch event, error %d\n",
+		       ret);
+
+	return ret;
+}
+
+static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip)
+{
+	struct machine *machine = btsq->bts->machine;
+	struct thread *thread;
+	struct addr_location al;
+	unsigned char buf[1024];
+	size_t bufsz;
+	ssize_t len;
+	int x86_64;
+	uint8_t cpumode;
+	int err = -1;
+
+	bufsz = intel_pt_insn_max_size();
+
+	if (machine__kernel_ip(machine, ip))
+		cpumode = PERF_RECORD_MISC_KERNEL;
+	else
+		cpumode = PERF_RECORD_MISC_USER;
+
+	thread = machine__find_thread(machine, -1, btsq->tid);
+	if (!thread)
+		return -1;
+
+	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al);
+	if (!al.map || !al.map->dso)
+		goto out_put;
+
+	len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf, bufsz);
+	if (len <= 0)
+		goto out_put;
+
+	/* Load maps to ensure dso->is_64_bit has been updated */
+	map__load(al.map, machine->symbol_filter);
+
+	x86_64 = al.map->dso->is_64_bit;
+
+	if (intel_pt_get_insn(buf, len, x86_64, &btsq->intel_pt_insn))
+		goto out_put;
+
+	err = 0;
+out_put:
+	thread__put(thread);
+	return err;
+}
+
+static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid,
+				 pid_t tid, u64 ip)
+{
+	union perf_event event;
+	int err;
+
+	auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
+			     INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip,
+			     "Failed to get instruction");
+
+	err = perf_session__deliver_synth_event(bts->session, &event, NULL);
+	if (err)
+		pr_err("Intel BTS: failed to deliver error event, error %d\n",
+		       err);
+
+	return err;
+}
+
+static int intel_bts_get_branch_type(struct intel_bts_queue *btsq,
+				     struct branch *branch)
+{
+	int err;
+
+	if (!branch->from) {
+		if (branch->to)
+			btsq->sample_flags = PERF_IP_FLAG_BRANCH |
+					     PERF_IP_FLAG_TRACE_BEGIN;
+		else
+			btsq->sample_flags = 0;
+		btsq->intel_pt_insn.length = 0;
+	} else if (!branch->to) {
+		btsq->sample_flags = PERF_IP_FLAG_BRANCH |
+				     PERF_IP_FLAG_TRACE_END;
+		btsq->intel_pt_insn.length = 0;
+	} else {
+		err = intel_bts_get_next_insn(btsq, branch->from);
+		if (err) {
+			btsq->sample_flags = 0;
+			btsq->intel_pt_insn.length = 0;
+			if (!btsq->bts->synth_opts.errors)
+				return 0;
+			err = intel_bts_synth_error(btsq->bts, btsq->cpu,
+						    btsq->pid, btsq->tid,
+						    branch->from);
+			return err;
+		}
+		btsq->sample_flags = intel_pt_insn_type(btsq->intel_pt_insn.op);
+		/* Check for an async branch into the kernel */
+		if (!machine__kernel_ip(btsq->bts->machine, branch->from) &&
+		    machine__kernel_ip(btsq->bts->machine, branch->to) &&
+		    btsq->sample_flags != (PERF_IP_FLAG_BRANCH |
+					   PERF_IP_FLAG_CALL |
+					   PERF_IP_FLAG_SYSCALLRET))
+			btsq->sample_flags = PERF_IP_FLAG_BRANCH |
+					     PERF_IP_FLAG_CALL |
+					     PERF_IP_FLAG_ASYNC |
+					     PERF_IP_FLAG_INTERRUPT;
+	}
+
+	return 0;
+}
+
+static int intel_bts_process_buffer(struct intel_bts_queue *btsq,
+				    struct auxtrace_buffer *buffer)
+{
+	struct branch *branch;
+	size_t sz, bsz = sizeof(struct branch);
+	u32 filter = btsq->bts->branches_filter;
+	int err = 0;
+
+	if (buffer->use_data) {
+		sz = buffer->use_size;
+		branch = buffer->use_data;
+	} else {
+		sz = buffer->size;
+		branch = buffer->data;
+	}
+
+	if (!btsq->bts->sample_branches)
+		return 0;
+
+	for (; sz > bsz; branch += 1, sz -= bsz) {
+		if (!branch->from && !branch->to)
+			continue;
+		intel_bts_get_branch_type(btsq, branch);
+		if (filter && !(filter & btsq->sample_flags))
+			continue;
+		err = intel_bts_synth_branch_sample(btsq, branch);
+		if (err)
+			break;
+	}
+	return err;
+}
+
+static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp)
+{
+	struct auxtrace_buffer *buffer = btsq->buffer, *old_buffer = buffer;
+	struct auxtrace_queue *queue;
+	struct thread *thread;
+	int err;
+
+	if (btsq->done)
+		return 1;
+
+	if (btsq->pid == -1) {
+		thread = machine__find_thread(btsq->bts->machine, -1,
+					      btsq->tid);
+		if (thread)
+			btsq->pid = thread->pid_;
+	} else {
+		thread = machine__findnew_thread(btsq->bts->machine, btsq->pid,
+						 btsq->tid);
+	}
+
+	queue = &btsq->bts->queues.queue_array[btsq->queue_nr];
+
+	if (!buffer)
+		buffer = auxtrace_buffer__next(queue, NULL);
+
+	if (!buffer) {
+		if (!btsq->bts->sampling_mode)
+			btsq->done = 1;
+		err = 1;
+		goto out_put;
+	}
+
+	/* Currently there is no support for split buffers */
+	if (buffer->consecutive) {
+		err = -EINVAL;
+		goto out_put;
+	}
+
+	if (!buffer->data) {
+		int fd = perf_data_file__fd(btsq->bts->session->file);
+
+		buffer->data = auxtrace_buffer__get_data(buffer, fd);
+		if (!buffer->data) {
+			err = -ENOMEM;
+			goto out_put;
+		}
+	}
+
+	if (btsq->bts->snapshot_mode && !buffer->consecutive &&
+	    intel_bts_do_fix_overlap(queue, buffer)) {
+		err = -ENOMEM;
+		goto out_put;
+	}
+
+	if (!btsq->bts->synth_opts.callchain && thread &&
+	    (!old_buffer || btsq->bts->sampling_mode ||
+	     (btsq->bts->snapshot_mode && !buffer->consecutive)))
+		thread_stack__set_trace_nr(thread, buffer->buffer_nr + 1);
+
+	err = intel_bts_process_buffer(btsq, buffer);
+
+	auxtrace_buffer__drop_data(buffer);
+
+	btsq->buffer = auxtrace_buffer__next(queue, buffer);
+	if (btsq->buffer) {
+		if (timestamp)
+			*timestamp = btsq->buffer->reference;
+	} else {
+		if (!btsq->bts->sampling_mode)
+			btsq->done = 1;
+	}
+out_put:
+	thread__put(thread);
+	return err;
+}
+
+static int intel_bts_flush_queue(struct intel_bts_queue *btsq)
+{
+	u64 ts = 0;
+	int ret;
+
+	while (1) {
+		ret = intel_bts_process_queue(btsq, &ts);
+		if (ret < 0)
+			return ret;
+		if (ret)
+			break;
+	}
+	return 0;
+}
+
+static int intel_bts_process_tid_exit(struct intel_bts *bts, pid_t tid)
+{
+	struct auxtrace_queues *queues = &bts->queues;
+	unsigned int i;
+
+	for (i = 0; i < queues->nr_queues; i++) {
+		struct auxtrace_queue *queue = &bts->queues.queue_array[i];
+		struct intel_bts_queue *btsq = queue->priv;
+
+		if (btsq && btsq->tid == tid)
+			return intel_bts_flush_queue(btsq);
+	}
+	return 0;
+}
+
+static int intel_bts_process_queues(struct intel_bts *bts, u64 timestamp)
+{
+	while (1) {
+		unsigned int queue_nr;
+		struct auxtrace_queue *queue;
+		struct intel_bts_queue *btsq;
+		u64 ts = 0;
+		int ret;
+
+		if (!bts->heap.heap_cnt)
+			return 0;
+
+		if (bts->heap.heap_array[0].ordinal > timestamp)
+			return 0;
+
+		queue_nr = bts->heap.heap_array[0].queue_nr;
+		queue = &bts->queues.queue_array[queue_nr];
+		btsq = queue->priv;
+
+		auxtrace_heap__pop(&bts->heap);
+
+		ret = intel_bts_process_queue(btsq, &ts);
+		if (ret < 0) {
+			auxtrace_heap__add(&bts->heap, queue_nr, ts);
+			return ret;
+		}
+
+		if (!ret) {
+			ret = auxtrace_heap__add(&bts->heap, queue_nr, ts);
+			if (ret < 0)
+				return ret;
+		} else {
+			btsq->on_heap = false;
+		}
+	}
+
+	return 0;
+}
+
+static int intel_bts_process_event(struct perf_session *session,
+				   union perf_event *event,
+				   struct perf_sample *sample,
+				   struct perf_tool *tool)
+{
+	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
+					     auxtrace);
+	u64 timestamp;
+	int err;
+
+	if (dump_trace)
+		return 0;
+
+	if (!tool->ordered_events) {
+		pr_err("Intel BTS requires ordered events\n");
+		return -EINVAL;
+	}
+
+	if (sample->time && sample->time != (u64)-1)
+		timestamp = perf_time_to_tsc(sample->time, &bts->tc);
+	else
+		timestamp = 0;
+
+	err = intel_bts_update_queues(bts);
+	if (err)
+		return err;
+
+	err = intel_bts_process_queues(bts, timestamp);
+	if (err)
+		return err;
+	if (event->header.type == PERF_RECORD_EXIT) {
+		err = intel_bts_process_tid_exit(bts, event->comm.tid);
+		if (err)
+			return err;
+	}
+
+	if (event->header.type == PERF_RECORD_AUX &&
+	    (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
+	    bts->synth_opts.errors)
+		err = intel_bts_lost(bts, sample);
+
+	return err;
+}
+
+static int intel_bts_process_auxtrace_event(struct perf_session *session,
+					    union perf_event *event,
+					    struct perf_tool *tool __maybe_unused)
+{
+	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
+					     auxtrace);
+
+	if (bts->sampling_mode)
+		return 0;
+
+	if (!bts->data_queued) {
+		struct auxtrace_buffer *buffer;
+		off_t data_offset;
+		int fd = perf_data_file__fd(session->file);
+		int err;
+
+		if (perf_data_file__is_pipe(session->file)) {
+			data_offset = 0;
+		} else {
+			data_offset = lseek(fd, 0, SEEK_CUR);
+			if (data_offset == -1)
+				return -errno;
+		}
+
+		err = auxtrace_queues__add_event(&bts->queues, session, event,
+						 data_offset, &buffer);
+		if (err)
+			return err;
+
+		/* Dump here now we have copied a piped trace out of the pipe */
+		if (dump_trace) {
+			if (auxtrace_buffer__get_data(buffer, fd)) {
+				intel_bts_dump_event(bts, buffer->data,
+						     buffer->size);
+				auxtrace_buffer__put_data(buffer);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int intel_bts_flush(struct perf_session *session __maybe_unused,
+			   struct perf_tool *tool __maybe_unused)
+{
+	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
+					     auxtrace);
+	int ret;
+
+	if (dump_trace || bts->sampling_mode)
+		return 0;
+
+	if (!tool->ordered_events)
+		return -EINVAL;
+
+	ret = intel_bts_update_queues(bts);
+	if (ret < 0)
+		return ret;
+
+	return intel_bts_process_queues(bts, MAX_TIMESTAMP);
+}
+
+static void intel_bts_free_queue(void *priv)
+{
+	struct intel_bts_queue *btsq = priv;
+
+	if (!btsq)
+		return;
+	free(btsq);
+}
+
+static void intel_bts_free_events(struct perf_session *session)
+{
+	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
+					     auxtrace);
+	struct auxtrace_queues *queues = &bts->queues;
+	unsigned int i;
+
+	for (i = 0; i < queues->nr_queues; i++) {
+		intel_bts_free_queue(queues->queue_array[i].priv);
+		queues->queue_array[i].priv = NULL;
+	}
+	auxtrace_queues__free(queues);
+}
+
+static void intel_bts_free(struct perf_session *session)
+{
+	struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts,
+					     auxtrace);
+
+	auxtrace_heap__free(&bts->heap);
+	intel_bts_free_events(session);
+	session->auxtrace = NULL;
+	free(bts);
+}
+
+struct intel_bts_synth {
+	struct perf_tool dummy_tool;
+	struct perf_session *session;
+};
+
+static int intel_bts_event_synth(struct perf_tool *tool,
+				 union perf_event *event,
+				 struct perf_sample *sample __maybe_unused,
+				 struct machine *machine __maybe_unused)
+{
+	struct intel_bts_synth *intel_bts_synth =
+			container_of(tool, struct intel_bts_synth, dummy_tool);
+
+	return perf_session__deliver_synth_event(intel_bts_synth->session,
+						 event, NULL);
+}
+
+static int intel_bts_synth_event(struct perf_session *session,
+				 struct perf_event_attr *attr, u64 id)
+{
+	struct intel_bts_synth intel_bts_synth;
+
+	memset(&intel_bts_synth, 0, sizeof(struct intel_bts_synth));
+	intel_bts_synth.session = session;
+
+	return perf_event__synthesize_attr(&intel_bts_synth.dummy_tool, attr, 1,
+					   &id, intel_bts_event_synth);
+}
+
+static int intel_bts_synth_events(struct intel_bts *bts,
+				  struct perf_session *session)
+{
+	struct perf_evlist *evlist = session->evlist;
+	struct perf_evsel *evsel;
+	struct perf_event_attr attr;
+	bool found = false;
+	u64 id;
+	int err;
+
+	evlist__for_each(evlist, evsel) {
+		if (evsel->attr.type == bts->pmu_type && evsel->ids) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		pr_debug("There are no selected events with Intel BTS data\n");
+		return 0;
+	}
+
+	memset(&attr, 0, sizeof(struct perf_event_attr));
+	attr.size = sizeof(struct perf_event_attr);
+	attr.type = PERF_TYPE_HARDWARE;
+	attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
+	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
+			    PERF_SAMPLE_PERIOD;
+	attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
+	attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
+	attr.exclude_user = evsel->attr.exclude_user;
+	attr.exclude_kernel = evsel->attr.exclude_kernel;
+	attr.exclude_hv = evsel->attr.exclude_hv;
+	attr.exclude_host = evsel->attr.exclude_host;
+	attr.exclude_guest = evsel->attr.exclude_guest;
+	attr.sample_id_all = evsel->attr.sample_id_all;
+	attr.read_format = evsel->attr.read_format;
+
+	id = evsel->id[0] + 1000000000;
+	if (!id)
+		id = 1;
+
+	if (bts->synth_opts.branches) {
+		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
+		attr.sample_period = 1;
+		attr.sample_type |= PERF_SAMPLE_ADDR;
+		pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
+			 id, (u64)attr.sample_type);
+		err = intel_bts_synth_event(session, &attr, id);
+		if (err) {
+			pr_err("%s: failed to synthesize 'branches' event type\n",
+			       __func__);
+			return err;
+		}
+		bts->sample_branches = true;
+		bts->branches_sample_type = attr.sample_type;
+		bts->branches_id = id;
+		/*
+		 * We only use sample types from PERF_SAMPLE_MASK so we can use
+		 * __perf_evsel__sample_size() here.
+		 */
+		bts->branches_event_size = sizeof(struct sample_event) +
+				__perf_evsel__sample_size(attr.sample_type);
+	}
+
+	bts->synth_needs_swap = evsel->needs_swap;
+
+	return 0;
+}
+
+static const char * const intel_bts_info_fmts[] = {
+	[INTEL_BTS_PMU_TYPE]		= "  PMU Type           %"PRId64"\n",
+	[INTEL_BTS_TIME_SHIFT]		= "  Time Shift         %"PRIu64"\n",
+	[INTEL_BTS_TIME_MULT]		= "  Time Muliplier     %"PRIu64"\n",
+	[INTEL_BTS_TIME_ZERO]		= "  Time Zero          %"PRIu64"\n",
+	[INTEL_BTS_CAP_USER_TIME_ZERO]	= "  Cap Time Zero      %"PRId64"\n",
+	[INTEL_BTS_SNAPSHOT_MODE]	= "  Snapshot mode      %"PRId64"\n",
+};
+
+static void intel_bts_print_info(u64 *arr, int start, int finish)
+{
+	int i;
+
+	if (!dump_trace)
+		return;
+
+	for (i = start; i <= finish; i++)
+		fprintf(stdout, intel_bts_info_fmts[i], arr[i]);
+}
+
+u64 intel_bts_auxtrace_info_priv[INTEL_BTS_AUXTRACE_PRIV_SIZE];
+
+int intel_bts_process_auxtrace_info(union perf_event *event,
+				    struct perf_session *session)
+{
+	struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
+	size_t min_sz = sizeof(u64) * INTEL_BTS_SNAPSHOT_MODE;
+	struct intel_bts *bts;
+	int err;
+
+	if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
+					min_sz)
+		return -EINVAL;
+
+	bts = zalloc(sizeof(struct intel_bts));
+	if (!bts)
+		return -ENOMEM;
+
+	err = auxtrace_queues__init(&bts->queues);
+	if (err)
+		goto err_free;
+
+	bts->session = session;
+	bts->machine = &session->machines.host; /* No kvm support */
+	bts->auxtrace_type = auxtrace_info->type;
+	bts->pmu_type = auxtrace_info->priv[INTEL_BTS_PMU_TYPE];
+	bts->tc.time_shift = auxtrace_info->priv[INTEL_BTS_TIME_SHIFT];
+	bts->tc.time_mult = auxtrace_info->priv[INTEL_BTS_TIME_MULT];
+	bts->tc.time_zero = auxtrace_info->priv[INTEL_BTS_TIME_ZERO];
+	bts->cap_user_time_zero =
+			auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO];
+	bts->snapshot_mode = auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE];
+
+	bts->sampling_mode = false;
+
+	bts->auxtrace.process_event = intel_bts_process_event;
+	bts->auxtrace.process_auxtrace_event = intel_bts_process_auxtrace_event;
+	bts->auxtrace.flush_events = intel_bts_flush;
+	bts->auxtrace.free_events = intel_bts_free_events;
+	bts->auxtrace.free = intel_bts_free;
+	session->auxtrace = &bts->auxtrace;
+
+	intel_bts_print_info(&auxtrace_info->priv[0], INTEL_BTS_PMU_TYPE,
+			     INTEL_BTS_SNAPSHOT_MODE);
+
+	if (dump_trace)
+		return 0;
+
+	if (session->itrace_synth_opts && session->itrace_synth_opts->set)
+		bts->synth_opts = *session->itrace_synth_opts;
+	else
+		itrace_synth_opts__set_default(&bts->synth_opts);
+
+	if (bts->synth_opts.calls)
+		bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
+					PERF_IP_FLAG_TRACE_END;
+	if (bts->synth_opts.returns)
+		bts->branches_filter |= PERF_IP_FLAG_RETURN |
+					PERF_IP_FLAG_TRACE_BEGIN;
+
+	err = intel_bts_synth_events(bts, session);
+	if (err)
+		goto err_free_queues;
+
+	err = auxtrace_queues__process_index(&bts->queues, session);
+	if (err)
+		goto err_free_queues;
+
+	if (bts->queues.populated)
+		bts->data_queued = true;
+
+	return 0;
+
+err_free_queues:
+	auxtrace_queues__free(&bts->queues);
+	session->auxtrace = NULL;
+err_free:
+	free(bts);
+	return err;
+}
diff --git a/tools/perf/util/intel-bts.h b/tools/perf/util/intel-bts.h
new file mode 100644
index 0000000..ca65e21
--- /dev/null
+++ b/tools/perf/util/intel-bts.h
@@ -0,0 +1,43 @@
+/*
+ * intel-bts.h: Intel Processor Trace support
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef INCLUDE__PERF_INTEL_BTS_H__
+#define INCLUDE__PERF_INTEL_BTS_H__
+
+#define INTEL_BTS_PMU_NAME "intel_bts"
+
+enum {
+	INTEL_BTS_PMU_TYPE,
+	INTEL_BTS_TIME_SHIFT,
+	INTEL_BTS_TIME_MULT,
+	INTEL_BTS_TIME_ZERO,
+	INTEL_BTS_CAP_USER_TIME_ZERO,
+	INTEL_BTS_SNAPSHOT_MODE,
+	INTEL_BTS_AUXTRACE_PRIV_MAX,
+};
+
+#define INTEL_BTS_AUXTRACE_PRIV_SIZE (INTEL_BTS_AUXTRACE_PRIV_MAX * sizeof(u64))
+
+struct auxtrace_record;
+struct perf_tool;
+union perf_event;
+struct perf_session;
+
+struct auxtrace_record *intel_bts_recording_init(int *err);
+
+int intel_bts_process_auxtrace_info(union perf_event *event,
+				    struct perf_session *session);
+
+#endif
diff --git a/tools/perf/util/intel-pt-decoder/Build b/tools/perf/util/intel-pt-decoder/Build
new file mode 100644
index 0000000..2386322
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/Build
@@ -0,0 +1,12 @@
+libperf-$(CONFIG_AUXTRACE) += intel-pt-pkt-decoder.o intel-pt-insn-decoder.o intel-pt-log.o intel-pt-decoder.o
+
+inat_tables_script = util/intel-pt-decoder/gen-insn-attr-x86.awk
+inat_tables_maps = util/intel-pt-decoder/x86-opcode-map.txt
+
+$(OUTPUT)util/intel-pt-decoder/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
+	$(call rule_mkdir)
+	@$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@
+
+$(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c
+
+CFLAGS_intel-pt-insn-decoder.o += -I$(OUTPUT)util/intel-pt-decoder -Wno-override-init
diff --git a/tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk b/tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk
new file mode 100644
index 0000000..51756734
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/gen-insn-attr-x86.awk
@@ -0,0 +1,386 @@
+#!/bin/awk -f
+# gen-insn-attr-x86.awk: Instruction attribute table generator
+# Written by Masami Hiramatsu <mhiramat@redhat.com>
+#
+# Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c
+
+# Awk implementation sanity check
+function check_awk_implement() {
+	if (sprintf("%x", 0) != "0")
+		return "Your awk has a printf-format problem."
+	return ""
+}
+
+# Clear working vars
+function clear_vars() {
+	delete table
+	delete lptable2
+	delete lptable1
+	delete lptable3
+	eid = -1 # escape id
+	gid = -1 # group id
+	aid = -1 # AVX id
+	tname = ""
+}
+
+BEGIN {
+	# Implementation error checking
+	awkchecked = check_awk_implement()
+	if (awkchecked != "") {
+		print "Error: " awkchecked > "/dev/stderr"
+		print "Please try to use gawk." > "/dev/stderr"
+		exit 1
+	}
+
+	# Setup generating tables
+	print "/* x86 opcode map generated from x86-opcode-map.txt */"
+	print "/* Do not change this code. */\n"
+	ggid = 1
+	geid = 1
+	gaid = 0
+	delete etable
+	delete gtable
+	delete atable
+
+	opnd_expr = "^[A-Za-z/]"
+	ext_expr = "^\\("
+	sep_expr = "^\\|$"
+	group_expr = "^Grp[0-9A-Za-z]+"
+
+	imm_expr = "^[IJAOL][a-z]"
+	imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+	imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+	imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
+	imm_flag["Id"] = "INAT_MAKE_IMM(INAT_IMM_DWORD)"
+	imm_flag["Iq"] = "INAT_MAKE_IMM(INAT_IMM_QWORD)"
+	imm_flag["Ap"] = "INAT_MAKE_IMM(INAT_IMM_PTR)"
+	imm_flag["Iz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
+	imm_flag["Jz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
+	imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)"
+	imm_flag["Ob"] = "INAT_MOFFSET"
+	imm_flag["Ov"] = "INAT_MOFFSET"
+	imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+
+	modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
+	force64_expr = "\\([df]64\\)"
+	rex_expr = "^REX(\\.[XRWB]+)*"
+	fpu_expr = "^ESC" # TODO
+
+	lprefix1_expr = "\\((66|!F3)\\)"
+	lprefix2_expr = "\\(F3\\)"
+	lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
+	lprefix_expr = "\\((66|F2|F3)\\)"
+	max_lprefix = 4
+
+	# All opcodes starting with lower-case 'v' or with (v1) superscript
+	# accepts VEX prefix
+	vexok_opcode_expr = "^v.*"
+	vexok_expr = "\\(v1\\)"
+	# All opcodes with (v) superscript supports *only* VEX prefix
+	vexonly_expr = "\\(v\\)"
+
+	prefix_expr = "\\(Prefix\\)"
+	prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
+	prefix_num["REPNE"] = "INAT_PFX_REPNE"
+	prefix_num["REP/REPE"] = "INAT_PFX_REPE"
+	prefix_num["XACQUIRE"] = "INAT_PFX_REPNE"
+	prefix_num["XRELEASE"] = "INAT_PFX_REPE"
+	prefix_num["LOCK"] = "INAT_PFX_LOCK"
+	prefix_num["SEG=CS"] = "INAT_PFX_CS"
+	prefix_num["SEG=DS"] = "INAT_PFX_DS"
+	prefix_num["SEG=ES"] = "INAT_PFX_ES"
+	prefix_num["SEG=FS"] = "INAT_PFX_FS"
+	prefix_num["SEG=GS"] = "INAT_PFX_GS"
+	prefix_num["SEG=SS"] = "INAT_PFX_SS"
+	prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ"
+	prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
+	prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
+
+	clear_vars()
+}
+
+function semantic_error(msg) {
+	print "Semantic error at " NR ": " msg > "/dev/stderr"
+	exit 1
+}
+
+function debug(msg) {
+	print "DEBUG: " msg
+}
+
+function array_size(arr,   i,c) {
+	c = 0
+	for (i in arr)
+		c++
+	return c
+}
+
+/^Table:/ {
+	print "/* " $0 " */"
+	if (tname != "")
+		semantic_error("Hit Table: before EndTable:.");
+}
+
+/^Referrer:/ {
+	if (NF != 1) {
+		# escape opcode table
+		ref = ""
+		for (i = 2; i <= NF; i++)
+			ref = ref $i
+		eid = escape[ref]
+		tname = sprintf("inat_escape_table_%d", eid)
+	}
+}
+
+/^AVXcode:/ {
+	if (NF != 1) {
+		# AVX/escape opcode table
+		aid = $2
+		if (gaid <= aid)
+			gaid = aid + 1
+		if (tname == "")	# AVX only opcode table
+			tname = sprintf("inat_avx_table_%d", $2)
+	}
+	if (aid == -1 && eid == -1)	# primary opcode table
+		tname = "inat_primary_table"
+}
+
+/^GrpTable:/ {
+	print "/* " $0 " */"
+	if (!($2 in group))
+		semantic_error("No group: " $2 )
+	gid = group[$2]
+	tname = "inat_group_table_" gid
+}
+
+function print_table(tbl,name,fmt,n)
+{
+	print "const insn_attr_t " name " = {"
+	for (i = 0; i < n; i++) {
+		id = sprintf(fmt, i)
+		if (tbl[id])
+			print "	[" id "] = " tbl[id] ","
+	}
+	print "};"
+}
+
+/^EndTable/ {
+	if (gid != -1) {
+		# print group tables
+		if (array_size(table) != 0) {
+			print_table(table, tname "[INAT_GROUP_TABLE_SIZE]",
+				    "0x%x", 8)
+			gtable[gid,0] = tname
+		}
+		if (array_size(lptable1) != 0) {
+			print_table(lptable1, tname "_1[INAT_GROUP_TABLE_SIZE]",
+				    "0x%x", 8)
+			gtable[gid,1] = tname "_1"
+		}
+		if (array_size(lptable2) != 0) {
+			print_table(lptable2, tname "_2[INAT_GROUP_TABLE_SIZE]",
+				    "0x%x", 8)
+			gtable[gid,2] = tname "_2"
+		}
+		if (array_size(lptable3) != 0) {
+			print_table(lptable3, tname "_3[INAT_GROUP_TABLE_SIZE]",
+				    "0x%x", 8)
+			gtable[gid,3] = tname "_3"
+		}
+	} else {
+		# print primary/escaped tables
+		if (array_size(table) != 0) {
+			print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]",
+				    "0x%02x", 256)
+			etable[eid,0] = tname
+			if (aid >= 0)
+				atable[aid,0] = tname
+		}
+		if (array_size(lptable1) != 0) {
+			print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]",
+				    "0x%02x", 256)
+			etable[eid,1] = tname "_1"
+			if (aid >= 0)
+				atable[aid,1] = tname "_1"
+		}
+		if (array_size(lptable2) != 0) {
+			print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]",
+				    "0x%02x", 256)
+			etable[eid,2] = tname "_2"
+			if (aid >= 0)
+				atable[aid,2] = tname "_2"
+		}
+		if (array_size(lptable3) != 0) {
+			print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]",
+				    "0x%02x", 256)
+			etable[eid,3] = tname "_3"
+			if (aid >= 0)
+				atable[aid,3] = tname "_3"
+		}
+	}
+	print ""
+	clear_vars()
+}
+
+function add_flags(old,new) {
+	if (old && new)
+		return old " | " new
+	else if (old)
+		return old
+	else
+		return new
+}
+
+# convert operands to flags.
+function convert_operands(count,opnd,       i,j,imm,mod)
+{
+	imm = null
+	mod = null
+	for (j = 1; j <= count; j++) {
+		i = opnd[j]
+		if (match(i, imm_expr) == 1) {
+			if (!imm_flag[i])
+				semantic_error("Unknown imm opnd: " i)
+			if (imm) {
+				if (i != "Ib")
+					semantic_error("Second IMM error")
+				imm = add_flags(imm, "INAT_SCNDIMM")
+			} else
+				imm = imm_flag[i]
+		} else if (match(i, modrm_expr))
+			mod = "INAT_MODRM"
+	}
+	return add_flags(imm, mod)
+}
+
+/^[0-9a-f]+\:/ {
+	if (NR == 1)
+		next
+	# get index
+	idx = "0x" substr($1, 1, index($1,":") - 1)
+	if (idx in table)
+		semantic_error("Redefine " idx " in " tname)
+
+	# check if escaped opcode
+	if ("escape" == $2) {
+		if ($3 != "#")
+			semantic_error("No escaped name")
+		ref = ""
+		for (i = 4; i <= NF; i++)
+			ref = ref $i
+		if (ref in escape)
+			semantic_error("Redefine escape (" ref ")")
+		escape[ref] = geid
+		geid++
+		table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")"
+		next
+	}
+
+	variant = null
+	# converts
+	i = 2
+	while (i <= NF) {
+		opcode = $(i++)
+		delete opnds
+		ext = null
+		flags = null
+		opnd = null
+		# parse one opcode
+		if (match($i, opnd_expr)) {
+			opnd = $i
+			count = split($(i++), opnds, ",")
+			flags = convert_operands(count, opnds)
+		}
+		if (match($i, ext_expr))
+			ext = $(i++)
+		if (match($i, sep_expr))
+			i++
+		else if (i < NF)
+			semantic_error($i " is not a separator")
+
+		# check if group opcode
+		if (match(opcode, group_expr)) {
+			if (!(opcode in group)) {
+				group[opcode] = ggid
+				ggid++
+			}
+			flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")")
+		}
+		# check force(or default) 64bit
+		if (match(ext, force64_expr))
+			flags = add_flags(flags, "INAT_FORCE64")
+
+		# check REX prefix
+		if (match(opcode, rex_expr))
+			flags = add_flags(flags, "INAT_MAKE_PREFIX(INAT_PFX_REX)")
+
+		# check coprocessor escape : TODO
+		if (match(opcode, fpu_expr))
+			flags = add_flags(flags, "INAT_MODRM")
+
+		# check VEX codes
+		if (match(ext, vexonly_expr))
+			flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
+		else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
+			flags = add_flags(flags, "INAT_VEXOK")
+
+		# check prefixes
+		if (match(ext, prefix_expr)) {
+			if (!prefix_num[opcode])
+				semantic_error("Unknown prefix: " opcode)
+			flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")")
+		}
+		if (length(flags) == 0)
+			continue
+		# check if last prefix
+		if (match(ext, lprefix1_expr)) {
+			lptable1[idx] = add_flags(lptable1[idx],flags)
+			variant = "INAT_VARIANT"
+		}
+		if (match(ext, lprefix2_expr)) {
+			lptable2[idx] = add_flags(lptable2[idx],flags)
+			variant = "INAT_VARIANT"
+		}
+		if (match(ext, lprefix3_expr)) {
+			lptable3[idx] = add_flags(lptable3[idx],flags)
+			variant = "INAT_VARIANT"
+		}
+		if (!match(ext, lprefix_expr)){
+			table[idx] = add_flags(table[idx],flags)
+		}
+	}
+	if (variant)
+		table[idx] = add_flags(table[idx],variant)
+}
+
+END {
+	if (awkchecked != "")
+		exit 1
+	# print escape opcode map's array
+	print "/* Escape opcode map array */"
+	print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \
+	      "[INAT_LSTPFX_MAX + 1] = {"
+	for (i = 0; i < geid; i++)
+		for (j = 0; j < max_lprefix; j++)
+			if (etable[i,j])
+				print "	["i"]["j"] = "etable[i,j]","
+	print "};\n"
+	# print group opcode map's array
+	print "/* Group opcode map array */"
+	print "const insn_attr_t * const inat_group_tables[INAT_GRP_MAX + 1]"\
+	      "[INAT_LSTPFX_MAX + 1] = {"
+	for (i = 0; i < ggid; i++)
+		for (j = 0; j < max_lprefix; j++)
+			if (gtable[i,j])
+				print "	["i"]["j"] = "gtable[i,j]","
+	print "};\n"
+	# print AVX opcode map's array
+	print "/* AVX opcode map array */"
+	print "const insn_attr_t * const inat_avx_tables[X86_VEX_M_MAX + 1]"\
+	      "[INAT_LSTPFX_MAX + 1] = {"
+	for (i = 0; i < gaid; i++)
+		for (j = 0; j < max_lprefix; j++)
+			if (atable[i,j])
+				print "	["i"]["j"] = "atable[i,j]","
+	print "};"
+}
diff --git a/tools/perf/util/intel-pt-decoder/inat.c b/tools/perf/util/intel-pt-decoder/inat.c
new file mode 100644
index 0000000..906d94a
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/inat.c
@@ -0,0 +1,96 @@
+/*
+ * x86 instruction attribute tables
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include "insn.h"
+
+/* Attribute tables are generated from opcode map */
+#include "inat-tables.c"
+
+/* Attribute search APIs */
+insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode)
+{
+	return inat_primary_table[opcode];
+}
+
+int inat_get_last_prefix_id(insn_byte_t last_pfx)
+{
+	insn_attr_t lpfx_attr;
+
+	lpfx_attr = inat_get_opcode_attribute(last_pfx);
+	return inat_last_prefix_id(lpfx_attr);
+}
+
+insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id,
+				      insn_attr_t esc_attr)
+{
+	const insn_attr_t *table;
+	int n;
+
+	n = inat_escape_id(esc_attr);
+
+	table = inat_escape_tables[n][0];
+	if (!table)
+		return 0;
+	if (inat_has_variant(table[opcode]) && lpfx_id) {
+		table = inat_escape_tables[n][lpfx_id];
+		if (!table)
+			return 0;
+	}
+	return table[opcode];
+}
+
+insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id,
+				     insn_attr_t grp_attr)
+{
+	const insn_attr_t *table;
+	int n;
+
+	n = inat_group_id(grp_attr);
+
+	table = inat_group_tables[n][0];
+	if (!table)
+		return inat_group_common_attribute(grp_attr);
+	if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) {
+		table = inat_group_tables[n][lpfx_id];
+		if (!table)
+			return inat_group_common_attribute(grp_attr);
+	}
+	return table[X86_MODRM_REG(modrm)] |
+	       inat_group_common_attribute(grp_attr);
+}
+
+insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m,
+				   insn_byte_t vex_p)
+{
+	const insn_attr_t *table;
+	if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX)
+		return 0;
+	/* At first, this checks the master table */
+	table = inat_avx_tables[vex_m][0];
+	if (!table)
+		return 0;
+	if (!inat_is_group(table[opcode]) && vex_p) {
+		/* If this is not a group, get attribute directly */
+		table = inat_avx_tables[vex_m][vex_p];
+		if (!table)
+			return 0;
+	}
+	return table[opcode];
+}
diff --git a/tools/perf/util/intel-pt-decoder/inat.h b/tools/perf/util/intel-pt-decoder/inat.h
new file mode 100644
index 0000000..611645e
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/inat.h
@@ -0,0 +1,221 @@
+#ifndef _ASM_X86_INAT_H
+#define _ASM_X86_INAT_H
+/*
+ * x86 instruction attributes
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include "inat_types.h"
+
+/*
+ * Internal bits. Don't use bitmasks directly, because these bits are
+ * unstable. You should use checking functions.
+ */
+
+#define INAT_OPCODE_TABLE_SIZE 256
+#define INAT_GROUP_TABLE_SIZE 8
+
+/* Legacy last prefixes */
+#define INAT_PFX_OPNDSZ	1	/* 0x66 */ /* LPFX1 */
+#define INAT_PFX_REPE	2	/* 0xF3 */ /* LPFX2 */
+#define INAT_PFX_REPNE	3	/* 0xF2 */ /* LPFX3 */
+/* Other Legacy prefixes */
+#define INAT_PFX_LOCK	4	/* 0xF0 */
+#define INAT_PFX_CS	5	/* 0x2E */
+#define INAT_PFX_DS	6	/* 0x3E */
+#define INAT_PFX_ES	7	/* 0x26 */
+#define INAT_PFX_FS	8	/* 0x64 */
+#define INAT_PFX_GS	9	/* 0x65 */
+#define INAT_PFX_SS	10	/* 0x36 */
+#define INAT_PFX_ADDRSZ	11	/* 0x67 */
+/* x86-64 REX prefix */
+#define INAT_PFX_REX	12	/* 0x4X */
+/* AVX VEX prefixes */
+#define INAT_PFX_VEX2	13	/* 2-bytes VEX prefix */
+#define INAT_PFX_VEX3	14	/* 3-bytes VEX prefix */
+
+#define INAT_LSTPFX_MAX	3
+#define INAT_LGCPFX_MAX	11
+
+/* Immediate size */
+#define INAT_IMM_BYTE		1
+#define INAT_IMM_WORD		2
+#define INAT_IMM_DWORD		3
+#define INAT_IMM_QWORD		4
+#define INAT_IMM_PTR		5
+#define INAT_IMM_VWORD32	6
+#define INAT_IMM_VWORD		7
+
+/* Legacy prefix */
+#define INAT_PFX_OFFS	0
+#define INAT_PFX_BITS	4
+#define INAT_PFX_MAX    ((1 << INAT_PFX_BITS) - 1)
+#define INAT_PFX_MASK	(INAT_PFX_MAX << INAT_PFX_OFFS)
+/* Escape opcodes */
+#define INAT_ESC_OFFS	(INAT_PFX_OFFS + INAT_PFX_BITS)
+#define INAT_ESC_BITS	2
+#define INAT_ESC_MAX	((1 << INAT_ESC_BITS) - 1)
+#define INAT_ESC_MASK	(INAT_ESC_MAX << INAT_ESC_OFFS)
+/* Group opcodes (1-16) */
+#define INAT_GRP_OFFS	(INAT_ESC_OFFS + INAT_ESC_BITS)
+#define INAT_GRP_BITS	5
+#define INAT_GRP_MAX	((1 << INAT_GRP_BITS) - 1)
+#define INAT_GRP_MASK	(INAT_GRP_MAX << INAT_GRP_OFFS)
+/* Immediates */
+#define INAT_IMM_OFFS	(INAT_GRP_OFFS + INAT_GRP_BITS)
+#define INAT_IMM_BITS	3
+#define INAT_IMM_MASK	(((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS)
+/* Flags */
+#define INAT_FLAG_OFFS	(INAT_IMM_OFFS + INAT_IMM_BITS)
+#define INAT_MODRM	(1 << (INAT_FLAG_OFFS))
+#define INAT_FORCE64	(1 << (INAT_FLAG_OFFS + 1))
+#define INAT_SCNDIMM	(1 << (INAT_FLAG_OFFS + 2))
+#define INAT_MOFFSET	(1 << (INAT_FLAG_OFFS + 3))
+#define INAT_VARIANT	(1 << (INAT_FLAG_OFFS + 4))
+#define INAT_VEXOK	(1 << (INAT_FLAG_OFFS + 5))
+#define INAT_VEXONLY	(1 << (INAT_FLAG_OFFS + 6))
+/* Attribute making macros for attribute tables */
+#define INAT_MAKE_PREFIX(pfx)	(pfx << INAT_PFX_OFFS)
+#define INAT_MAKE_ESCAPE(esc)	(esc << INAT_ESC_OFFS)
+#define INAT_MAKE_GROUP(grp)	((grp << INAT_GRP_OFFS) | INAT_MODRM)
+#define INAT_MAKE_IMM(imm)	(imm << INAT_IMM_OFFS)
+
+/* Attribute search APIs */
+extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
+extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
+extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
+					     int lpfx_id,
+					     insn_attr_t esc_attr);
+extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
+					    int lpfx_id,
+					    insn_attr_t esc_attr);
+extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
+					  insn_byte_t vex_m,
+					  insn_byte_t vex_pp);
+
+/* Attribute checking functions */
+static inline int inat_is_legacy_prefix(insn_attr_t attr)
+{
+	attr &= INAT_PFX_MASK;
+	return attr && attr <= INAT_LGCPFX_MAX;
+}
+
+static inline int inat_is_address_size_prefix(insn_attr_t attr)
+{
+	return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ;
+}
+
+static inline int inat_is_operand_size_prefix(insn_attr_t attr)
+{
+	return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ;
+}
+
+static inline int inat_is_rex_prefix(insn_attr_t attr)
+{
+	return (attr & INAT_PFX_MASK) == INAT_PFX_REX;
+}
+
+static inline int inat_last_prefix_id(insn_attr_t attr)
+{
+	if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
+		return 0;
+	else
+		return attr & INAT_PFX_MASK;
+}
+
+static inline int inat_is_vex_prefix(insn_attr_t attr)
+{
+	attr &= INAT_PFX_MASK;
+	return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3;
+}
+
+static inline int inat_is_vex3_prefix(insn_attr_t attr)
+{
+	return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3;
+}
+
+static inline int inat_is_escape(insn_attr_t attr)
+{
+	return attr & INAT_ESC_MASK;
+}
+
+static inline int inat_escape_id(insn_attr_t attr)
+{
+	return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS;
+}
+
+static inline int inat_is_group(insn_attr_t attr)
+{
+	return attr & INAT_GRP_MASK;
+}
+
+static inline int inat_group_id(insn_attr_t attr)
+{
+	return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS;
+}
+
+static inline int inat_group_common_attribute(insn_attr_t attr)
+{
+	return attr & ~INAT_GRP_MASK;
+}
+
+static inline int inat_has_immediate(insn_attr_t attr)
+{
+	return attr & INAT_IMM_MASK;
+}
+
+static inline int inat_immediate_size(insn_attr_t attr)
+{
+	return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS;
+}
+
+static inline int inat_has_modrm(insn_attr_t attr)
+{
+	return attr & INAT_MODRM;
+}
+
+static inline int inat_is_force64(insn_attr_t attr)
+{
+	return attr & INAT_FORCE64;
+}
+
+static inline int inat_has_second_immediate(insn_attr_t attr)
+{
+	return attr & INAT_SCNDIMM;
+}
+
+static inline int inat_has_moffset(insn_attr_t attr)
+{
+	return attr & INAT_MOFFSET;
+}
+
+static inline int inat_has_variant(insn_attr_t attr)
+{
+	return attr & INAT_VARIANT;
+}
+
+static inline int inat_accept_vex(insn_attr_t attr)
+{
+	return attr & INAT_VEXOK;
+}
+
+static inline int inat_must_vex(insn_attr_t attr)
+{
+	return attr & INAT_VEXONLY;
+}
+#endif
diff --git a/tools/perf/util/intel-pt-decoder/inat_types.h b/tools/perf/util/intel-pt-decoder/inat_types.h
new file mode 100644
index 0000000..cb3c20c
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/inat_types.h
@@ -0,0 +1,29 @@
+#ifndef _ASM_X86_INAT_TYPES_H
+#define _ASM_X86_INAT_TYPES_H
+/*
+ * x86 instruction attributes
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+/* Instruction attributes */
+typedef unsigned int insn_attr_t;
+typedef unsigned char insn_byte_t;
+typedef signed int insn_value_t;
+
+#endif
diff --git a/tools/perf/util/intel-pt-decoder/insn.c b/tools/perf/util/intel-pt-decoder/insn.c
new file mode 100644
index 0000000..47314a6
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/insn.c
@@ -0,0 +1,594 @@
+/*
+ * x86 instruction analysis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004, 2009
+ */
+
+#ifdef __KERNEL__
+#include <linux/string.h>
+#else
+#include <string.h>
+#endif
+#include "inat.h"
+#include "insn.h"
+
+/* Verify next sizeof(t) bytes can be on the same instruction */
+#define validate_next(t, insn, n)	\
+	((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
+
+#define __get_next(t, insn)	\
+	({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
+
+#define __peek_nbyte_next(t, insn, n)	\
+	({ t r = *(t*)((insn)->next_byte + n); r; })
+
+#define get_next(t, insn)	\
+	({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
+
+#define peek_nbyte_next(t, insn, n)	\
+	({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
+
+#define peek_next(t, insn)	peek_nbyte_next(t, insn, 0)
+
+/**
+ * insn_init() - initialize struct insn
+ * @insn:	&struct insn to be initialized
+ * @kaddr:	address (in kernel memory) of instruction (or copy thereof)
+ * @x86_64:	!0 for 64-bit kernel or 64-bit app
+ */
+void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
+{
+	/*
+	 * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
+	 * even if the input buffer is long enough to hold them.
+	 */
+	if (buf_len > MAX_INSN_SIZE)
+		buf_len = MAX_INSN_SIZE;
+
+	memset(insn, 0, sizeof(*insn));
+	insn->kaddr = kaddr;
+	insn->end_kaddr = kaddr + buf_len;
+	insn->next_byte = kaddr;
+	insn->x86_64 = x86_64 ? 1 : 0;
+	insn->opnd_bytes = 4;
+	if (x86_64)
+		insn->addr_bytes = 8;
+	else
+		insn->addr_bytes = 4;
+}
+
+/**
+ * insn_get_prefixes - scan x86 instruction prefix bytes
+ * @insn:	&struct insn containing instruction
+ *
+ * Populates the @insn->prefixes bitmap, and updates @insn->next_byte
+ * to point to the (first) opcode.  No effect if @insn->prefixes.got
+ * is already set.
+ */
+void insn_get_prefixes(struct insn *insn)
+{
+	struct insn_field *prefixes = &insn->prefixes;
+	insn_attr_t attr;
+	insn_byte_t b, lb;
+	int i, nb;
+
+	if (prefixes->got)
+		return;
+
+	nb = 0;
+	lb = 0;
+	b = peek_next(insn_byte_t, insn);
+	attr = inat_get_opcode_attribute(b);
+	while (inat_is_legacy_prefix(attr)) {
+		/* Skip if same prefix */
+		for (i = 0; i < nb; i++)
+			if (prefixes->bytes[i] == b)
+				goto found;
+		if (nb == 4)
+			/* Invalid instruction */
+			break;
+		prefixes->bytes[nb++] = b;
+		if (inat_is_address_size_prefix(attr)) {
+			/* address size switches 2/4 or 4/8 */
+			if (insn->x86_64)
+				insn->addr_bytes ^= 12;
+			else
+				insn->addr_bytes ^= 6;
+		} else if (inat_is_operand_size_prefix(attr)) {
+			/* oprand size switches 2/4 */
+			insn->opnd_bytes ^= 6;
+		}
+found:
+		prefixes->nbytes++;
+		insn->next_byte++;
+		lb = b;
+		b = peek_next(insn_byte_t, insn);
+		attr = inat_get_opcode_attribute(b);
+	}
+	/* Set the last prefix */
+	if (lb && lb != insn->prefixes.bytes[3]) {
+		if (unlikely(insn->prefixes.bytes[3])) {
+			/* Swap the last prefix */
+			b = insn->prefixes.bytes[3];
+			for (i = 0; i < nb; i++)
+				if (prefixes->bytes[i] == lb)
+					prefixes->bytes[i] = b;
+		}
+		insn->prefixes.bytes[3] = lb;
+	}
+
+	/* Decode REX prefix */
+	if (insn->x86_64) {
+		b = peek_next(insn_byte_t, insn);
+		attr = inat_get_opcode_attribute(b);
+		if (inat_is_rex_prefix(attr)) {
+			insn->rex_prefix.value = b;
+			insn->rex_prefix.nbytes = 1;
+			insn->next_byte++;
+			if (X86_REX_W(b))
+				/* REX.W overrides opnd_size */
+				insn->opnd_bytes = 8;
+		}
+	}
+	insn->rex_prefix.got = 1;
+
+	/* Decode VEX prefix */
+	b = peek_next(insn_byte_t, insn);
+	attr = inat_get_opcode_attribute(b);
+	if (inat_is_vex_prefix(attr)) {
+		insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
+		if (!insn->x86_64) {
+			/*
+			 * In 32-bits mode, if the [7:6] bits (mod bits of
+			 * ModRM) on the second byte are not 11b, it is
+			 * LDS or LES.
+			 */
+			if (X86_MODRM_MOD(b2) != 3)
+				goto vex_end;
+		}
+		insn->vex_prefix.bytes[0] = b;
+		insn->vex_prefix.bytes[1] = b2;
+		if (inat_is_vex3_prefix(attr)) {
+			b2 = peek_nbyte_next(insn_byte_t, insn, 2);
+			insn->vex_prefix.bytes[2] = b2;
+			insn->vex_prefix.nbytes = 3;
+			insn->next_byte += 3;
+			if (insn->x86_64 && X86_VEX_W(b2))
+				/* VEX.W overrides opnd_size */
+				insn->opnd_bytes = 8;
+		} else {
+			/*
+			 * For VEX2, fake VEX3-like byte#2.
+			 * Makes it easier to decode vex.W, vex.vvvv,
+			 * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
+			 */
+			insn->vex_prefix.bytes[2] = b2 & 0x7f;
+			insn->vex_prefix.nbytes = 2;
+			insn->next_byte += 2;
+		}
+	}
+vex_end:
+	insn->vex_prefix.got = 1;
+
+	prefixes->got = 1;
+
+err_out:
+	return;
+}
+
+/**
+ * insn_get_opcode - collect opcode(s)
+ * @insn:	&struct insn containing instruction
+ *
+ * Populates @insn->opcode, updates @insn->next_byte to point past the
+ * opcode byte(s), and set @insn->attr (except for groups).
+ * If necessary, first collects any preceding (prefix) bytes.
+ * Sets @insn->opcode.value = opcode1.  No effect if @insn->opcode.got
+ * is already 1.
+ */
+void insn_get_opcode(struct insn *insn)
+{
+	struct insn_field *opcode = &insn->opcode;
+	insn_byte_t op;
+	int pfx_id;
+	if (opcode->got)
+		return;
+	if (!insn->prefixes.got)
+		insn_get_prefixes(insn);
+
+	/* Get first opcode */
+	op = get_next(insn_byte_t, insn);
+	opcode->bytes[0] = op;
+	opcode->nbytes = 1;
+
+	/* Check if there is VEX prefix or not */
+	if (insn_is_avx(insn)) {
+		insn_byte_t m, p;
+		m = insn_vex_m_bits(insn);
+		p = insn_vex_p_bits(insn);
+		insn->attr = inat_get_avx_attribute(op, m, p);
+		if (!inat_accept_vex(insn->attr) && !inat_is_group(insn->attr))
+			insn->attr = 0;	/* This instruction is bad */
+		goto end;	/* VEX has only 1 byte for opcode */
+	}
+
+	insn->attr = inat_get_opcode_attribute(op);
+	while (inat_is_escape(insn->attr)) {
+		/* Get escaped opcode */
+		op = get_next(insn_byte_t, insn);
+		opcode->bytes[opcode->nbytes++] = op;
+		pfx_id = insn_last_prefix_id(insn);
+		insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
+	}
+	if (inat_must_vex(insn->attr))
+		insn->attr = 0;	/* This instruction is bad */
+end:
+	opcode->got = 1;
+
+err_out:
+	return;
+}
+
+/**
+ * insn_get_modrm - collect ModRM byte, if any
+ * @insn:	&struct insn containing instruction
+ *
+ * Populates @insn->modrm and updates @insn->next_byte to point past the
+ * ModRM byte, if any.  If necessary, first collects the preceding bytes
+ * (prefixes and opcode(s)).  No effect if @insn->modrm.got is already 1.
+ */
+void insn_get_modrm(struct insn *insn)
+{
+	struct insn_field *modrm = &insn->modrm;
+	insn_byte_t pfx_id, mod;
+	if (modrm->got)
+		return;
+	if (!insn->opcode.got)
+		insn_get_opcode(insn);
+
+	if (inat_has_modrm(insn->attr)) {
+		mod = get_next(insn_byte_t, insn);
+		modrm->value = mod;
+		modrm->nbytes = 1;
+		if (inat_is_group(insn->attr)) {
+			pfx_id = insn_last_prefix_id(insn);
+			insn->attr = inat_get_group_attribute(mod, pfx_id,
+							      insn->attr);
+			if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
+				insn->attr = 0;	/* This is bad */
+		}
+	}
+
+	if (insn->x86_64 && inat_is_force64(insn->attr))
+		insn->opnd_bytes = 8;
+	modrm->got = 1;
+
+err_out:
+	return;
+}
+
+
+/**
+ * insn_rip_relative() - Does instruction use RIP-relative addressing mode?
+ * @insn:	&struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * ModRM byte.  No effect if @insn->x86_64 is 0.
+ */
+int insn_rip_relative(struct insn *insn)
+{
+	struct insn_field *modrm = &insn->modrm;
+
+	if (!insn->x86_64)
+		return 0;
+	if (!modrm->got)
+		insn_get_modrm(insn);
+	/*
+	 * For rip-relative instructions, the mod field (top 2 bits)
+	 * is zero and the r/m field (bottom 3 bits) is 0x5.
+	 */
+	return (modrm->nbytes && (modrm->value & 0xc7) == 0x5);
+}
+
+/**
+ * insn_get_sib() - Get the SIB byte of instruction
+ * @insn:	&struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * ModRM byte.
+ */
+void insn_get_sib(struct insn *insn)
+{
+	insn_byte_t modrm;
+
+	if (insn->sib.got)
+		return;
+	if (!insn->modrm.got)
+		insn_get_modrm(insn);
+	if (insn->modrm.nbytes) {
+		modrm = (insn_byte_t)insn->modrm.value;
+		if (insn->addr_bytes != 2 &&
+		    X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
+			insn->sib.value = get_next(insn_byte_t, insn);
+			insn->sib.nbytes = 1;
+		}
+	}
+	insn->sib.got = 1;
+
+err_out:
+	return;
+}
+
+
+/**
+ * insn_get_displacement() - Get the displacement of instruction
+ * @insn:	&struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * SIB byte.
+ * Displacement value is sign-expanded.
+ */
+void insn_get_displacement(struct insn *insn)
+{
+	insn_byte_t mod, rm, base;
+
+	if (insn->displacement.got)
+		return;
+	if (!insn->sib.got)
+		insn_get_sib(insn);
+	if (insn->modrm.nbytes) {
+		/*
+		 * Interpreting the modrm byte:
+		 * mod = 00 - no displacement fields (exceptions below)
+		 * mod = 01 - 1-byte displacement field
+		 * mod = 10 - displacement field is 4 bytes, or 2 bytes if
+		 * 	address size = 2 (0x67 prefix in 32-bit mode)
+		 * mod = 11 - no memory operand
+		 *
+		 * If address size = 2...
+		 * mod = 00, r/m = 110 - displacement field is 2 bytes
+		 *
+		 * If address size != 2...
+		 * mod != 11, r/m = 100 - SIB byte exists
+		 * mod = 00, SIB base = 101 - displacement field is 4 bytes
+		 * mod = 00, r/m = 101 - rip-relative addressing, displacement
+		 * 	field is 4 bytes
+		 */
+		mod = X86_MODRM_MOD(insn->modrm.value);
+		rm = X86_MODRM_RM(insn->modrm.value);
+		base = X86_SIB_BASE(insn->sib.value);
+		if (mod == 3)
+			goto out;
+		if (mod == 1) {
+			insn->displacement.value = get_next(char, insn);
+			insn->displacement.nbytes = 1;
+		} else if (insn->addr_bytes == 2) {
+			if ((mod == 0 && rm == 6) || mod == 2) {
+				insn->displacement.value =
+					 get_next(short, insn);
+				insn->displacement.nbytes = 2;
+			}
+		} else {
+			if ((mod == 0 && rm == 5) || mod == 2 ||
+			    (mod == 0 && base == 5)) {
+				insn->displacement.value = get_next(int, insn);
+				insn->displacement.nbytes = 4;
+			}
+		}
+	}
+out:
+	insn->displacement.got = 1;
+
+err_out:
+	return;
+}
+
+/* Decode moffset16/32/64. Return 0 if failed */
+static int __get_moffset(struct insn *insn)
+{
+	switch (insn->addr_bytes) {
+	case 2:
+		insn->moffset1.value = get_next(short, insn);
+		insn->moffset1.nbytes = 2;
+		break;
+	case 4:
+		insn->moffset1.value = get_next(int, insn);
+		insn->moffset1.nbytes = 4;
+		break;
+	case 8:
+		insn->moffset1.value = get_next(int, insn);
+		insn->moffset1.nbytes = 4;
+		insn->moffset2.value = get_next(int, insn);
+		insn->moffset2.nbytes = 4;
+		break;
+	default:	/* opnd_bytes must be modified manually */
+		goto err_out;
+	}
+	insn->moffset1.got = insn->moffset2.got = 1;
+
+	return 1;
+
+err_out:
+	return 0;
+}
+
+/* Decode imm v32(Iz). Return 0 if failed */
+static int __get_immv32(struct insn *insn)
+{
+	switch (insn->opnd_bytes) {
+	case 2:
+		insn->immediate.value = get_next(short, insn);
+		insn->immediate.nbytes = 2;
+		break;
+	case 4:
+	case 8:
+		insn->immediate.value = get_next(int, insn);
+		insn->immediate.nbytes = 4;
+		break;
+	default:	/* opnd_bytes must be modified manually */
+		goto err_out;
+	}
+
+	return 1;
+
+err_out:
+	return 0;
+}
+
+/* Decode imm v64(Iv/Ov), Return 0 if failed */
+static int __get_immv(struct insn *insn)
+{
+	switch (insn->opnd_bytes) {
+	case 2:
+		insn->immediate1.value = get_next(short, insn);
+		insn->immediate1.nbytes = 2;
+		break;
+	case 4:
+		insn->immediate1.value = get_next(int, insn);
+		insn->immediate1.nbytes = 4;
+		break;
+	case 8:
+		insn->immediate1.value = get_next(int, insn);
+		insn->immediate1.nbytes = 4;
+		insn->immediate2.value = get_next(int, insn);
+		insn->immediate2.nbytes = 4;
+		break;
+	default:	/* opnd_bytes must be modified manually */
+		goto err_out;
+	}
+	insn->immediate1.got = insn->immediate2.got = 1;
+
+	return 1;
+err_out:
+	return 0;
+}
+
+/* Decode ptr16:16/32(Ap) */
+static int __get_immptr(struct insn *insn)
+{
+	switch (insn->opnd_bytes) {
+	case 2:
+		insn->immediate1.value = get_next(short, insn);
+		insn->immediate1.nbytes = 2;
+		break;
+	case 4:
+		insn->immediate1.value = get_next(int, insn);
+		insn->immediate1.nbytes = 4;
+		break;
+	case 8:
+		/* ptr16:64 is not exist (no segment) */
+		return 0;
+	default:	/* opnd_bytes must be modified manually */
+		goto err_out;
+	}
+	insn->immediate2.value = get_next(unsigned short, insn);
+	insn->immediate2.nbytes = 2;
+	insn->immediate1.got = insn->immediate2.got = 1;
+
+	return 1;
+err_out:
+	return 0;
+}
+
+/**
+ * insn_get_immediate() - Get the immediates of instruction
+ * @insn:	&struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * displacement bytes.
+ * Basically, most of immediates are sign-expanded. Unsigned-value can be
+ * get by bit masking with ((1 << (nbytes * 8)) - 1)
+ */
+void insn_get_immediate(struct insn *insn)
+{
+	if (insn->immediate.got)
+		return;
+	if (!insn->displacement.got)
+		insn_get_displacement(insn);
+
+	if (inat_has_moffset(insn->attr)) {
+		if (!__get_moffset(insn))
+			goto err_out;
+		goto done;
+	}
+
+	if (!inat_has_immediate(insn->attr))
+		/* no immediates */
+		goto done;
+
+	switch (inat_immediate_size(insn->attr)) {
+	case INAT_IMM_BYTE:
+		insn->immediate.value = get_next(char, insn);
+		insn->immediate.nbytes = 1;
+		break;
+	case INAT_IMM_WORD:
+		insn->immediate.value = get_next(short, insn);
+		insn->immediate.nbytes = 2;
+		break;
+	case INAT_IMM_DWORD:
+		insn->immediate.value = get_next(int, insn);
+		insn->immediate.nbytes = 4;
+		break;
+	case INAT_IMM_QWORD:
+		insn->immediate1.value = get_next(int, insn);
+		insn->immediate1.nbytes = 4;
+		insn->immediate2.value = get_next(int, insn);
+		insn->immediate2.nbytes = 4;
+		break;
+	case INAT_IMM_PTR:
+		if (!__get_immptr(insn))
+			goto err_out;
+		break;
+	case INAT_IMM_VWORD32:
+		if (!__get_immv32(insn))
+			goto err_out;
+		break;
+	case INAT_IMM_VWORD:
+		if (!__get_immv(insn))
+			goto err_out;
+		break;
+	default:
+		/* Here, insn must have an immediate, but failed */
+		goto err_out;
+	}
+	if (inat_has_second_immediate(insn->attr)) {
+		insn->immediate2.value = get_next(char, insn);
+		insn->immediate2.nbytes = 1;
+	}
+done:
+	insn->immediate.got = 1;
+
+err_out:
+	return;
+}
+
+/**
+ * insn_get_length() - Get the length of instruction
+ * @insn:	&struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * immediates bytes.
+ */
+void insn_get_length(struct insn *insn)
+{
+	if (insn->length)
+		return;
+	if (!insn->immediate.got)
+		insn_get_immediate(insn);
+	insn->length = (unsigned char)((unsigned long)insn->next_byte
+				     - (unsigned long)insn->kaddr);
+}
diff --git a/tools/perf/util/intel-pt-decoder/insn.h b/tools/perf/util/intel-pt-decoder/insn.h
new file mode 100644
index 0000000..dd12da0
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/insn.h
@@ -0,0 +1,201 @@
+#ifndef _ASM_X86_INSN_H
+#define _ASM_X86_INSN_H
+/*
+ * x86 instruction analysis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2009
+ */
+
+/* insn_attr_t is defined in inat.h */
+#include "inat.h"
+
+struct insn_field {
+	union {
+		insn_value_t value;
+		insn_byte_t bytes[4];
+	};
+	/* !0 if we've run insn_get_xxx() for this field */
+	unsigned char got;
+	unsigned char nbytes;
+};
+
+struct insn {
+	struct insn_field prefixes;	/*
+					 * Prefixes
+					 * prefixes.bytes[3]: last prefix
+					 */
+	struct insn_field rex_prefix;	/* REX prefix */
+	struct insn_field vex_prefix;	/* VEX prefix */
+	struct insn_field opcode;	/*
+					 * opcode.bytes[0]: opcode1
+					 * opcode.bytes[1]: opcode2
+					 * opcode.bytes[2]: opcode3
+					 */
+	struct insn_field modrm;
+	struct insn_field sib;
+	struct insn_field displacement;
+	union {
+		struct insn_field immediate;
+		struct insn_field moffset1;	/* for 64bit MOV */
+		struct insn_field immediate1;	/* for 64bit imm or off16/32 */
+	};
+	union {
+		struct insn_field moffset2;	/* for 64bit MOV */
+		struct insn_field immediate2;	/* for 64bit imm or seg16 */
+	};
+
+	insn_attr_t attr;
+	unsigned char opnd_bytes;
+	unsigned char addr_bytes;
+	unsigned char length;
+	unsigned char x86_64;
+
+	const insn_byte_t *kaddr;	/* kernel address of insn to analyze */
+	const insn_byte_t *end_kaddr;	/* kernel address of last insn in buffer */
+	const insn_byte_t *next_byte;
+};
+
+#define MAX_INSN_SIZE	15
+
+#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
+#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
+#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
+
+#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
+#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
+#define X86_SIB_BASE(sib) ((sib) & 0x07)
+
+#define X86_REX_W(rex) ((rex) & 8)
+#define X86_REX_R(rex) ((rex) & 4)
+#define X86_REX_X(rex) ((rex) & 2)
+#define X86_REX_B(rex) ((rex) & 1)
+
+/* VEX bit flags  */
+#define X86_VEX_W(vex)	((vex) & 0x80)	/* VEX3 Byte2 */
+#define X86_VEX_R(vex)	((vex) & 0x80)	/* VEX2/3 Byte1 */
+#define X86_VEX_X(vex)	((vex) & 0x40)	/* VEX3 Byte1 */
+#define X86_VEX_B(vex)	((vex) & 0x20)	/* VEX3 Byte1 */
+#define X86_VEX_L(vex)	((vex) & 0x04)	/* VEX3 Byte2, VEX2 Byte1 */
+/* VEX bit fields */
+#define X86_VEX3_M(vex)	((vex) & 0x1f)		/* VEX3 Byte1 */
+#define X86_VEX2_M	1			/* VEX2.M always 1 */
+#define X86_VEX_V(vex)	(((vex) & 0x78) >> 3)	/* VEX3 Byte2, VEX2 Byte1 */
+#define X86_VEX_P(vex)	((vex) & 0x03)		/* VEX3 Byte2, VEX2 Byte1 */
+#define X86_VEX_M_MAX	0x1f			/* VEX3.M Maximum value */
+
+extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
+extern void insn_get_prefixes(struct insn *insn);
+extern void insn_get_opcode(struct insn *insn);
+extern void insn_get_modrm(struct insn *insn);
+extern void insn_get_sib(struct insn *insn);
+extern void insn_get_displacement(struct insn *insn);
+extern void insn_get_immediate(struct insn *insn);
+extern void insn_get_length(struct insn *insn);
+
+/* Attribute will be determined after getting ModRM (for opcode groups) */
+static inline void insn_get_attribute(struct insn *insn)
+{
+	insn_get_modrm(insn);
+}
+
+/* Instruction uses RIP-relative addressing */
+extern int insn_rip_relative(struct insn *insn);
+
+/* Init insn for kernel text */
+static inline void kernel_insn_init(struct insn *insn,
+				    const void *kaddr, int buf_len)
+{
+#ifdef CONFIG_X86_64
+	insn_init(insn, kaddr, buf_len, 1);
+#else /* CONFIG_X86_32 */
+	insn_init(insn, kaddr, buf_len, 0);
+#endif
+}
+
+static inline int insn_is_avx(struct insn *insn)
+{
+	if (!insn->prefixes.got)
+		insn_get_prefixes(insn);
+	return (insn->vex_prefix.value != 0);
+}
+
+/* Ensure this instruction is decoded completely */
+static inline int insn_complete(struct insn *insn)
+{
+	return insn->opcode.got && insn->modrm.got && insn->sib.got &&
+		insn->displacement.got && insn->immediate.got;
+}
+
+static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
+{
+	if (insn->vex_prefix.nbytes == 2)	/* 2 bytes VEX */
+		return X86_VEX2_M;
+	else
+		return X86_VEX3_M(insn->vex_prefix.bytes[1]);
+}
+
+static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
+{
+	if (insn->vex_prefix.nbytes == 2)	/* 2 bytes VEX */
+		return X86_VEX_P(insn->vex_prefix.bytes[1]);
+	else
+		return X86_VEX_P(insn->vex_prefix.bytes[2]);
+}
+
+/* Get the last prefix id from last prefix or VEX prefix */
+static inline int insn_last_prefix_id(struct insn *insn)
+{
+	if (insn_is_avx(insn))
+		return insn_vex_p_bits(insn);	/* VEX_p is a SIMD prefix id */
+
+	if (insn->prefixes.bytes[3])
+		return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
+
+	return 0;
+}
+
+/* Offset of each field from kaddr */
+static inline int insn_offset_rex_prefix(struct insn *insn)
+{
+	return insn->prefixes.nbytes;
+}
+static inline int insn_offset_vex_prefix(struct insn *insn)
+{
+	return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes;
+}
+static inline int insn_offset_opcode(struct insn *insn)
+{
+	return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes;
+}
+static inline int insn_offset_modrm(struct insn *insn)
+{
+	return insn_offset_opcode(insn) + insn->opcode.nbytes;
+}
+static inline int insn_offset_sib(struct insn *insn)
+{
+	return insn_offset_modrm(insn) + insn->modrm.nbytes;
+}
+static inline int insn_offset_displacement(struct insn *insn)
+{
+	return insn_offset_sib(insn) + insn->sib.nbytes;
+}
+static inline int insn_offset_immediate(struct insn *insn)
+{
+	return insn_offset_displacement(insn) + insn->displacement.nbytes;
+}
+
+#endif /* _ASM_X86_INSN_H */
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
new file mode 100644
index 0000000..22ba502
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -0,0 +1,2345 @@
+/*
+ * intel_pt_decoder.c: Intel Processor Trace support
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <stdlib.h>
+#include <stdbool.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "../cache.h"
+#include "../util.h"
+
+#include "intel-pt-insn-decoder.h"
+#include "intel-pt-pkt-decoder.h"
+#include "intel-pt-decoder.h"
+#include "intel-pt-log.h"
+
+#define INTEL_PT_BLK_SIZE 1024
+
+#define BIT63 (((uint64_t)1 << 63))
+
+#define INTEL_PT_RETURN 1
+
+/* Maximum number of loops with no packets consumed i.e. stuck in a loop */
+#define INTEL_PT_MAX_LOOPS 10000
+
+struct intel_pt_blk {
+	struct intel_pt_blk *prev;
+	uint64_t ip[INTEL_PT_BLK_SIZE];
+};
+
+struct intel_pt_stack {
+	struct intel_pt_blk *blk;
+	struct intel_pt_blk *spare;
+	int pos;
+};
+
+enum intel_pt_pkt_state {
+	INTEL_PT_STATE_NO_PSB,
+	INTEL_PT_STATE_NO_IP,
+	INTEL_PT_STATE_ERR_RESYNC,
+	INTEL_PT_STATE_IN_SYNC,
+	INTEL_PT_STATE_TNT,
+	INTEL_PT_STATE_TIP,
+	INTEL_PT_STATE_TIP_PGD,
+	INTEL_PT_STATE_FUP,
+	INTEL_PT_STATE_FUP_NO_TIP,
+};
+
+#ifdef INTEL_PT_STRICT
+#define INTEL_PT_STATE_ERR1	INTEL_PT_STATE_NO_PSB
+#define INTEL_PT_STATE_ERR2	INTEL_PT_STATE_NO_PSB
+#define INTEL_PT_STATE_ERR3	INTEL_PT_STATE_NO_PSB
+#define INTEL_PT_STATE_ERR4	INTEL_PT_STATE_NO_PSB
+#else
+#define INTEL_PT_STATE_ERR1	(decoder->pkt_state)
+#define INTEL_PT_STATE_ERR2	INTEL_PT_STATE_NO_IP
+#define INTEL_PT_STATE_ERR3	INTEL_PT_STATE_ERR_RESYNC
+#define INTEL_PT_STATE_ERR4	INTEL_PT_STATE_IN_SYNC
+#endif
+
+struct intel_pt_decoder {
+	int (*get_trace)(struct intel_pt_buffer *buffer, void *data);
+	int (*walk_insn)(struct intel_pt_insn *intel_pt_insn,
+			 uint64_t *insn_cnt_ptr, uint64_t *ip, uint64_t to_ip,
+			 uint64_t max_insn_cnt, void *data);
+	void *data;
+	struct intel_pt_state state;
+	const unsigned char *buf;
+	size_t len;
+	bool return_compression;
+	bool mtc_insn;
+	bool pge;
+	bool have_tma;
+	bool have_cyc;
+	uint64_t pos;
+	uint64_t last_ip;
+	uint64_t ip;
+	uint64_t cr3;
+	uint64_t timestamp;
+	uint64_t tsc_timestamp;
+	uint64_t ref_timestamp;
+	uint64_t ret_addr;
+	uint64_t ctc_timestamp;
+	uint64_t ctc_delta;
+	uint64_t cycle_cnt;
+	uint64_t cyc_ref_timestamp;
+	uint32_t last_mtc;
+	uint32_t tsc_ctc_ratio_n;
+	uint32_t tsc_ctc_ratio_d;
+	uint32_t tsc_ctc_mult;
+	uint32_t tsc_slip;
+	uint32_t ctc_rem_mask;
+	int mtc_shift;
+	struct intel_pt_stack stack;
+	enum intel_pt_pkt_state pkt_state;
+	struct intel_pt_pkt packet;
+	struct intel_pt_pkt tnt;
+	int pkt_step;
+	int pkt_len;
+	int last_packet_type;
+	unsigned int cbr;
+	unsigned int max_non_turbo_ratio;
+	double max_non_turbo_ratio_fp;
+	double cbr_cyc_to_tsc;
+	double calc_cyc_to_tsc;
+	bool have_calc_cyc_to_tsc;
+	int exec_mode;
+	unsigned int insn_bytes;
+	uint64_t sign_bit;
+	uint64_t sign_bits;
+	uint64_t period;
+	enum intel_pt_period_type period_type;
+	uint64_t tot_insn_cnt;
+	uint64_t period_insn_cnt;
+	uint64_t period_mask;
+	uint64_t period_ticks;
+	uint64_t last_masked_timestamp;
+	bool continuous_period;
+	bool overflow;
+	bool set_fup_tx_flags;
+	unsigned int fup_tx_flags;
+	unsigned int tx_flags;
+	uint64_t timestamp_insn_cnt;
+	uint64_t stuck_ip;
+	int no_progress;
+	int stuck_ip_prd;
+	int stuck_ip_cnt;
+	const unsigned char *next_buf;
+	size_t next_len;
+	unsigned char temp_buf[INTEL_PT_PKT_MAX_SZ];
+};
+
+static uint64_t intel_pt_lower_power_of_2(uint64_t x)
+{
+	int i;
+
+	for (i = 0; x != 1; i++)
+		x >>= 1;
+
+	return x << i;
+}
+
+static void intel_pt_setup_period(struct intel_pt_decoder *decoder)
+{
+	if (decoder->period_type == INTEL_PT_PERIOD_TICKS) {
+		uint64_t period;
+
+		period = intel_pt_lower_power_of_2(decoder->period);
+		decoder->period_mask  = ~(period - 1);
+		decoder->period_ticks = period;
+	}
+}
+
+static uint64_t multdiv(uint64_t t, uint32_t n, uint32_t d)
+{
+	if (!d)
+		return 0;
+	return (t / d) * n + ((t % d) * n) / d;
+}
+
+struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
+{
+	struct intel_pt_decoder *decoder;
+
+	if (!params->get_trace || !params->walk_insn)
+		return NULL;
+
+	decoder = zalloc(sizeof(struct intel_pt_decoder));
+	if (!decoder)
+		return NULL;
+
+	decoder->get_trace          = params->get_trace;
+	decoder->walk_insn          = params->walk_insn;
+	decoder->data               = params->data;
+	decoder->return_compression = params->return_compression;
+
+	decoder->sign_bit           = (uint64_t)1 << 47;
+	decoder->sign_bits          = ~(((uint64_t)1 << 48) - 1);
+
+	decoder->period             = params->period;
+	decoder->period_type        = params->period_type;
+
+	decoder->max_non_turbo_ratio    = params->max_non_turbo_ratio;
+	decoder->max_non_turbo_ratio_fp = params->max_non_turbo_ratio;
+
+	intel_pt_setup_period(decoder);
+
+	decoder->mtc_shift = params->mtc_period;
+	decoder->ctc_rem_mask = (1 << decoder->mtc_shift) - 1;
+
+	decoder->tsc_ctc_ratio_n = params->tsc_ctc_ratio_n;
+	decoder->tsc_ctc_ratio_d = params->tsc_ctc_ratio_d;
+
+	if (!decoder->tsc_ctc_ratio_n)
+		decoder->tsc_ctc_ratio_d = 0;
+
+	if (decoder->tsc_ctc_ratio_d) {
+		if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
+			decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
+						decoder->tsc_ctc_ratio_d;
+
+		/*
+		 * Allow for timestamps appearing to backwards because a TSC
+		 * packet has slipped past a MTC packet, so allow 2 MTC ticks
+		 * or ...
+		 */
+		decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
+					decoder->tsc_ctc_ratio_n,
+					decoder->tsc_ctc_ratio_d);
+	}
+	/* ... or 0x100 paranoia */
+	if (decoder->tsc_slip < 0x100)
+		decoder->tsc_slip = 0x100;
+
+	intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
+	intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
+	intel_pt_log("timestamp: tsc_ctc_ratio_d %u\n", decoder->tsc_ctc_ratio_d);
+	intel_pt_log("timestamp: tsc_ctc_mult %u\n", decoder->tsc_ctc_mult);
+	intel_pt_log("timestamp: tsc_slip %#x\n", decoder->tsc_slip);
+
+	return decoder;
+}
+
+static void intel_pt_pop_blk(struct intel_pt_stack *stack)
+{
+	struct intel_pt_blk *blk = stack->blk;
+
+	stack->blk = blk->prev;
+	if (!stack->spare)
+		stack->spare = blk;
+	else
+		free(blk);
+}
+
+static uint64_t intel_pt_pop(struct intel_pt_stack *stack)
+{
+	if (!stack->pos) {
+		if (!stack->blk)
+			return 0;
+		intel_pt_pop_blk(stack);
+		if (!stack->blk)
+			return 0;
+		stack->pos = INTEL_PT_BLK_SIZE;
+	}
+	return stack->blk->ip[--stack->pos];
+}
+
+static int intel_pt_alloc_blk(struct intel_pt_stack *stack)
+{
+	struct intel_pt_blk *blk;
+
+	if (stack->spare) {
+		blk = stack->spare;
+		stack->spare = NULL;
+	} else {
+		blk = malloc(sizeof(struct intel_pt_blk));
+		if (!blk)
+			return -ENOMEM;
+	}
+
+	blk->prev = stack->blk;
+	stack->blk = blk;
+	stack->pos = 0;
+	return 0;
+}
+
+static int intel_pt_push(struct intel_pt_stack *stack, uint64_t ip)
+{
+	int err;
+
+	if (!stack->blk || stack->pos == INTEL_PT_BLK_SIZE) {
+		err = intel_pt_alloc_blk(stack);
+		if (err)
+			return err;
+	}
+
+	stack->blk->ip[stack->pos++] = ip;
+	return 0;
+}
+
+static void intel_pt_clear_stack(struct intel_pt_stack *stack)
+{
+	while (stack->blk)
+		intel_pt_pop_blk(stack);
+	stack->pos = 0;
+}
+
+static void intel_pt_free_stack(struct intel_pt_stack *stack)
+{
+	intel_pt_clear_stack(stack);
+	zfree(&stack->blk);
+	zfree(&stack->spare);
+}
+
+void intel_pt_decoder_free(struct intel_pt_decoder *decoder)
+{
+	intel_pt_free_stack(&decoder->stack);
+	free(decoder);
+}
+
+static int intel_pt_ext_err(int code)
+{
+	switch (code) {
+	case -ENOMEM:
+		return INTEL_PT_ERR_NOMEM;
+	case -ENOSYS:
+		return INTEL_PT_ERR_INTERN;
+	case -EBADMSG:
+		return INTEL_PT_ERR_BADPKT;
+	case -ENODATA:
+		return INTEL_PT_ERR_NODATA;
+	case -EILSEQ:
+		return INTEL_PT_ERR_NOINSN;
+	case -ENOENT:
+		return INTEL_PT_ERR_MISMAT;
+	case -EOVERFLOW:
+		return INTEL_PT_ERR_OVR;
+	case -ENOSPC:
+		return INTEL_PT_ERR_LOST;
+	case -ELOOP:
+		return INTEL_PT_ERR_NELOOP;
+	default:
+		return INTEL_PT_ERR_UNK;
+	}
+}
+
+static const char *intel_pt_err_msgs[] = {
+	[INTEL_PT_ERR_NOMEM]  = "Memory allocation failed",
+	[INTEL_PT_ERR_INTERN] = "Internal error",
+	[INTEL_PT_ERR_BADPKT] = "Bad packet",
+	[INTEL_PT_ERR_NODATA] = "No more data",
+	[INTEL_PT_ERR_NOINSN] = "Failed to get instruction",
+	[INTEL_PT_ERR_MISMAT] = "Trace doesn't match instruction",
+	[INTEL_PT_ERR_OVR]    = "Overflow packet",
+	[INTEL_PT_ERR_LOST]   = "Lost trace data",
+	[INTEL_PT_ERR_UNK]    = "Unknown error!",
+	[INTEL_PT_ERR_NELOOP] = "Never-ending loop",
+};
+
+int intel_pt__strerror(int code, char *buf, size_t buflen)
+{
+	if (code < 1 || code > INTEL_PT_ERR_MAX)
+		code = INTEL_PT_ERR_UNK;
+	strlcpy(buf, intel_pt_err_msgs[code], buflen);
+	return 0;
+}
+
+static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder,
+				 const struct intel_pt_pkt *packet,
+				 uint64_t last_ip)
+{
+	uint64_t ip;
+
+	switch (packet->count) {
+	case 2:
+		ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) |
+		     packet->payload;
+		break;
+	case 4:
+		ip = (last_ip & (uint64_t)0xffffffff00000000ULL) |
+		     packet->payload;
+		break;
+	case 6:
+		ip = packet->payload;
+		break;
+	default:
+		return 0;
+	}
+
+	if (ip & decoder->sign_bit)
+		return ip | decoder->sign_bits;
+
+	return ip;
+}
+
+static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
+{
+	decoder->last_ip = intel_pt_calc_ip(decoder, &decoder->packet,
+					    decoder->last_ip);
+}
+
+static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
+{
+	intel_pt_set_last_ip(decoder);
+	decoder->ip = decoder->last_ip;
+}
+
+static void intel_pt_decoder_log_packet(struct intel_pt_decoder *decoder)
+{
+	intel_pt_log_packet(&decoder->packet, decoder->pkt_len, decoder->pos,
+			    decoder->buf);
+}
+
+static int intel_pt_bug(struct intel_pt_decoder *decoder)
+{
+	intel_pt_log("ERROR: Internal error\n");
+	decoder->pkt_state = INTEL_PT_STATE_NO_PSB;
+	return -ENOSYS;
+}
+
+static inline void intel_pt_clear_tx_flags(struct intel_pt_decoder *decoder)
+{
+	decoder->tx_flags = 0;
+}
+
+static inline void intel_pt_update_in_tx(struct intel_pt_decoder *decoder)
+{
+	decoder->tx_flags = decoder->packet.payload & INTEL_PT_IN_TX;
+}
+
+static int intel_pt_bad_packet(struct intel_pt_decoder *decoder)
+{
+	intel_pt_clear_tx_flags(decoder);
+	decoder->have_tma = false;
+	decoder->pkt_len = 1;
+	decoder->pkt_step = 1;
+	intel_pt_decoder_log_packet(decoder);
+	if (decoder->pkt_state != INTEL_PT_STATE_NO_PSB) {
+		intel_pt_log("ERROR: Bad packet\n");
+		decoder->pkt_state = INTEL_PT_STATE_ERR1;
+	}
+	return -EBADMSG;
+}
+
+static int intel_pt_get_data(struct intel_pt_decoder *decoder)
+{
+	struct intel_pt_buffer buffer = { .buf = 0, };
+	int ret;
+
+	decoder->pkt_step = 0;
+
+	intel_pt_log("Getting more data\n");
+	ret = decoder->get_trace(&buffer, decoder->data);
+	if (ret)
+		return ret;
+	decoder->buf = buffer.buf;
+	decoder->len = buffer.len;
+	if (!decoder->len) {
+		intel_pt_log("No more data\n");
+		return -ENODATA;
+	}
+	if (!buffer.consecutive) {
+		decoder->ip = 0;
+		decoder->pkt_state = INTEL_PT_STATE_NO_PSB;
+		decoder->ref_timestamp = buffer.ref_timestamp;
+		decoder->timestamp = 0;
+		decoder->have_tma = false;
+		decoder->state.trace_nr = buffer.trace_nr;
+		intel_pt_log("Reference timestamp 0x%" PRIx64 "\n",
+			     decoder->ref_timestamp);
+		return -ENOLINK;
+	}
+
+	return 0;
+}
+
+static int intel_pt_get_next_data(struct intel_pt_decoder *decoder)
+{
+	if (!decoder->next_buf)
+		return intel_pt_get_data(decoder);
+
+	decoder->buf = decoder->next_buf;
+	decoder->len = decoder->next_len;
+	decoder->next_buf = 0;
+	decoder->next_len = 0;
+	return 0;
+}
+
+static int intel_pt_get_split_packet(struct intel_pt_decoder *decoder)
+{
+	unsigned char *buf = decoder->temp_buf;
+	size_t old_len, len, n;
+	int ret;
+
+	old_len = decoder->len;
+	len = decoder->len;
+	memcpy(buf, decoder->buf, len);
+
+	ret = intel_pt_get_data(decoder);
+	if (ret) {
+		decoder->pos += old_len;
+		return ret < 0 ? ret : -EINVAL;
+	}
+
+	n = INTEL_PT_PKT_MAX_SZ - len;
+	if (n > decoder->len)
+		n = decoder->len;
+	memcpy(buf + len, decoder->buf, n);
+	len += n;
+
+	ret = intel_pt_get_packet(buf, len, &decoder->packet);
+	if (ret < (int)old_len) {
+		decoder->next_buf = decoder->buf;
+		decoder->next_len = decoder->len;
+		decoder->buf = buf;
+		decoder->len = old_len;
+		return intel_pt_bad_packet(decoder);
+	}
+
+	decoder->next_buf = decoder->buf + (ret - old_len);
+	decoder->next_len = decoder->len - (ret - old_len);
+
+	decoder->buf = buf;
+	decoder->len = ret;
+
+	return ret;
+}
+
+struct intel_pt_pkt_info {
+	struct intel_pt_decoder	  *decoder;
+	struct intel_pt_pkt       packet;
+	uint64_t                  pos;
+	int                       pkt_len;
+	int                       last_packet_type;
+	void                      *data;
+};
+
+typedef int (*intel_pt_pkt_cb_t)(struct intel_pt_pkt_info *pkt_info);
+
+/* Lookahead packets in current buffer */
+static int intel_pt_pkt_lookahead(struct intel_pt_decoder *decoder,
+				  intel_pt_pkt_cb_t cb, void *data)
+{
+	struct intel_pt_pkt_info pkt_info;
+	const unsigned char *buf = decoder->buf;
+	size_t len = decoder->len;
+	int ret;
+
+	pkt_info.decoder          = decoder;
+	pkt_info.pos              = decoder->pos;
+	pkt_info.pkt_len          = decoder->pkt_step;
+	pkt_info.last_packet_type = decoder->last_packet_type;
+	pkt_info.data             = data;
+
+	while (1) {
+		do {
+			pkt_info.pos += pkt_info.pkt_len;
+			buf          += pkt_info.pkt_len;
+			len          -= pkt_info.pkt_len;
+
+			if (!len)
+				return INTEL_PT_NEED_MORE_BYTES;
+
+			ret = intel_pt_get_packet(buf, len, &pkt_info.packet);
+			if (!ret)
+				return INTEL_PT_NEED_MORE_BYTES;
+			if (ret < 0)
+				return ret;
+
+			pkt_info.pkt_len = ret;
+		} while (pkt_info.packet.type == INTEL_PT_PAD);
+
+		ret = cb(&pkt_info);
+		if (ret)
+			return 0;
+
+		pkt_info.last_packet_type = pkt_info.packet.type;
+	}
+}
+
+struct intel_pt_calc_cyc_to_tsc_info {
+	uint64_t        cycle_cnt;
+	unsigned int    cbr;
+	uint32_t        last_mtc;
+	uint64_t        ctc_timestamp;
+	uint64_t        ctc_delta;
+	uint64_t        tsc_timestamp;
+	uint64_t        timestamp;
+	bool            have_tma;
+	bool            from_mtc;
+	double          cbr_cyc_to_tsc;
+};
+
+static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
+{
+	struct intel_pt_decoder *decoder = pkt_info->decoder;
+	struct intel_pt_calc_cyc_to_tsc_info *data = pkt_info->data;
+	uint64_t timestamp;
+	double cyc_to_tsc;
+	unsigned int cbr;
+	uint32_t mtc, mtc_delta, ctc, fc, ctc_rem;
+
+	switch (pkt_info->packet.type) {
+	case INTEL_PT_TNT:
+	case INTEL_PT_TIP_PGE:
+	case INTEL_PT_TIP:
+	case INTEL_PT_FUP:
+	case INTEL_PT_PSB:
+	case INTEL_PT_PIP:
+	case INTEL_PT_MODE_EXEC:
+	case INTEL_PT_MODE_TSX:
+	case INTEL_PT_PSBEND:
+	case INTEL_PT_PAD:
+	case INTEL_PT_VMCS:
+	case INTEL_PT_MNT:
+		return 0;
+
+	case INTEL_PT_MTC:
+		if (!data->have_tma)
+			return 0;
+
+		mtc = pkt_info->packet.payload;
+		if (mtc > data->last_mtc)
+			mtc_delta = mtc - data->last_mtc;
+		else
+			mtc_delta = mtc + 256 - data->last_mtc;
+		data->ctc_delta += mtc_delta << decoder->mtc_shift;
+		data->last_mtc = mtc;
+
+		if (decoder->tsc_ctc_mult) {
+			timestamp = data->ctc_timestamp +
+				data->ctc_delta * decoder->tsc_ctc_mult;
+		} else {
+			timestamp = data->ctc_timestamp +
+				multdiv(data->ctc_delta,
+					decoder->tsc_ctc_ratio_n,
+					decoder->tsc_ctc_ratio_d);
+		}
+
+		if (timestamp < data->timestamp)
+			return 1;
+
+		if (pkt_info->last_packet_type != INTEL_PT_CYC) {
+			data->timestamp = timestamp;
+			return 0;
+		}
+
+		break;
+
+	case INTEL_PT_TSC:
+		timestamp = pkt_info->packet.payload |
+			    (data->timestamp & (0xffULL << 56));
+		if (data->from_mtc && timestamp < data->timestamp &&
+		    data->timestamp - timestamp < decoder->tsc_slip)
+			return 1;
+		while (timestamp < data->timestamp)
+			timestamp += (1ULL << 56);
+		if (pkt_info->last_packet_type != INTEL_PT_CYC) {
+			if (data->from_mtc)
+				return 1;
+			data->tsc_timestamp = timestamp;
+			data->timestamp = timestamp;
+			return 0;
+		}
+		break;
+
+	case INTEL_PT_TMA:
+		if (data->from_mtc)
+			return 1;
+
+		if (!decoder->tsc_ctc_ratio_d)
+			return 0;
+
+		ctc = pkt_info->packet.payload;
+		fc = pkt_info->packet.count;
+		ctc_rem = ctc & decoder->ctc_rem_mask;
+
+		data->last_mtc = (ctc >> decoder->mtc_shift) & 0xff;
+
+		data->ctc_timestamp = data->tsc_timestamp - fc;
+		if (decoder->tsc_ctc_mult) {
+			data->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult;
+		} else {
+			data->ctc_timestamp -=
+				multdiv(ctc_rem, decoder->tsc_ctc_ratio_n,
+					decoder->tsc_ctc_ratio_d);
+		}
+
+		data->ctc_delta = 0;
+		data->have_tma = true;
+
+		return 0;
+
+	case INTEL_PT_CYC:
+		data->cycle_cnt += pkt_info->packet.payload;
+		return 0;
+
+	case INTEL_PT_CBR:
+		cbr = pkt_info->packet.payload;
+		if (data->cbr && data->cbr != cbr)
+			return 1;
+		data->cbr = cbr;
+		data->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
+		return 0;
+
+	case INTEL_PT_TIP_PGD:
+	case INTEL_PT_TRACESTOP:
+	case INTEL_PT_OVF:
+	case INTEL_PT_BAD: /* Does not happen */
+	default:
+		return 1;
+	}
+
+	if (!data->cbr && decoder->cbr) {
+		data->cbr = decoder->cbr;
+		data->cbr_cyc_to_tsc = decoder->cbr_cyc_to_tsc;
+	}
+
+	if (!data->cycle_cnt)
+		return 1;
+
+	cyc_to_tsc = (double)(timestamp - decoder->timestamp) / data->cycle_cnt;
+
+	if (data->cbr && cyc_to_tsc > data->cbr_cyc_to_tsc &&
+	    cyc_to_tsc / data->cbr_cyc_to_tsc > 1.25) {
+		intel_pt_log("Timestamp: calculated %g TSC ticks per cycle too big (c.f. CBR-based value %g), pos " x64_fmt "\n",
+			     cyc_to_tsc, data->cbr_cyc_to_tsc, pkt_info->pos);
+		return 1;
+	}
+
+	decoder->calc_cyc_to_tsc = cyc_to_tsc;
+	decoder->have_calc_cyc_to_tsc = true;
+
+	if (data->cbr) {
+		intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. CBR-based value %g, pos " x64_fmt "\n",
+			     cyc_to_tsc, data->cbr_cyc_to_tsc, pkt_info->pos);
+	} else {
+		intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. unknown CBR-based value, pos " x64_fmt "\n",
+			     cyc_to_tsc, pkt_info->pos);
+	}
+
+	return 1;
+}
+
+static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder,
+				     bool from_mtc)
+{
+	struct intel_pt_calc_cyc_to_tsc_info data = {
+		.cycle_cnt      = 0,
+		.cbr            = 0,
+		.last_mtc       = decoder->last_mtc,
+		.ctc_timestamp  = decoder->ctc_timestamp,
+		.ctc_delta      = decoder->ctc_delta,
+		.tsc_timestamp  = decoder->tsc_timestamp,
+		.timestamp      = decoder->timestamp,
+		.have_tma       = decoder->have_tma,
+		.from_mtc       = from_mtc,
+		.cbr_cyc_to_tsc = 0,
+	};
+
+	intel_pt_pkt_lookahead(decoder, intel_pt_calc_cyc_cb, &data);
+}
+
+static int intel_pt_get_next_packet(struct intel_pt_decoder *decoder)
+{
+	int ret;
+
+	decoder->last_packet_type = decoder->packet.type;
+
+	do {
+		decoder->pos += decoder->pkt_step;
+		decoder->buf += decoder->pkt_step;
+		decoder->len -= decoder->pkt_step;
+
+		if (!decoder->len) {
+			ret = intel_pt_get_next_data(decoder);
+			if (ret)
+				return ret;
+		}
+
+		ret = intel_pt_get_packet(decoder->buf, decoder->len,
+					  &decoder->packet);
+		if (ret == INTEL_PT_NEED_MORE_BYTES &&
+		    decoder->len < INTEL_PT_PKT_MAX_SZ && !decoder->next_buf) {
+			ret = intel_pt_get_split_packet(decoder);
+			if (ret < 0)
+				return ret;
+		}
+		if (ret <= 0)
+			return intel_pt_bad_packet(decoder);
+
+		decoder->pkt_len = ret;
+		decoder->pkt_step = ret;
+		intel_pt_decoder_log_packet(decoder);
+	} while (decoder->packet.type == INTEL_PT_PAD);
+
+	return 0;
+}
+
+static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder)
+{
+	uint64_t timestamp, masked_timestamp;
+
+	timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
+	masked_timestamp = timestamp & decoder->period_mask;
+	if (decoder->continuous_period) {
+		if (masked_timestamp != decoder->last_masked_timestamp)
+			return 1;
+	} else {
+		timestamp += 1;
+		masked_timestamp = timestamp & decoder->period_mask;
+		if (masked_timestamp != decoder->last_masked_timestamp) {
+			decoder->last_masked_timestamp = masked_timestamp;
+			decoder->continuous_period = true;
+		}
+	}
+	return decoder->period_ticks - (timestamp - masked_timestamp);
+}
+
+static uint64_t intel_pt_next_sample(struct intel_pt_decoder *decoder)
+{
+	switch (decoder->period_type) {
+	case INTEL_PT_PERIOD_INSTRUCTIONS:
+		return decoder->period - decoder->period_insn_cnt;
+	case INTEL_PT_PERIOD_TICKS:
+		return intel_pt_next_period(decoder);
+	case INTEL_PT_PERIOD_NONE:
+	case INTEL_PT_PERIOD_MTC:
+	default:
+		return 0;
+	}
+}
+
+static void intel_pt_sample_insn(struct intel_pt_decoder *decoder)
+{
+	uint64_t timestamp, masked_timestamp;
+
+	switch (decoder->period_type) {
+	case INTEL_PT_PERIOD_INSTRUCTIONS:
+		decoder->period_insn_cnt = 0;
+		break;
+	case INTEL_PT_PERIOD_TICKS:
+		timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
+		masked_timestamp = timestamp & decoder->period_mask;
+		decoder->last_masked_timestamp = masked_timestamp;
+		break;
+	case INTEL_PT_PERIOD_NONE:
+	case INTEL_PT_PERIOD_MTC:
+	default:
+		break;
+	}
+
+	decoder->state.type |= INTEL_PT_INSTRUCTION;
+}
+
+static int intel_pt_walk_insn(struct intel_pt_decoder *decoder,
+			      struct intel_pt_insn *intel_pt_insn, uint64_t ip)
+{
+	uint64_t max_insn_cnt, insn_cnt = 0;
+	int err;
+
+	if (!decoder->mtc_insn)
+		decoder->mtc_insn = true;
+
+	max_insn_cnt = intel_pt_next_sample(decoder);
+
+	err = decoder->walk_insn(intel_pt_insn, &insn_cnt, &decoder->ip, ip,
+				 max_insn_cnt, decoder->data);
+
+	decoder->tot_insn_cnt += insn_cnt;
+	decoder->timestamp_insn_cnt += insn_cnt;
+	decoder->period_insn_cnt += insn_cnt;
+
+	if (err) {
+		decoder->no_progress = 0;
+		decoder->pkt_state = INTEL_PT_STATE_ERR2;
+		intel_pt_log_at("ERROR: Failed to get instruction",
+				decoder->ip);
+		if (err == -ENOENT)
+			return -ENOLINK;
+		return -EILSEQ;
+	}
+
+	if (ip && decoder->ip == ip) {
+		err = -EAGAIN;
+		goto out;
+	}
+
+	if (max_insn_cnt && insn_cnt >= max_insn_cnt)
+		intel_pt_sample_insn(decoder);
+
+	if (intel_pt_insn->branch == INTEL_PT_BR_NO_BRANCH) {
+		decoder->state.type = INTEL_PT_INSTRUCTION;
+		decoder->state.from_ip = decoder->ip;
+		decoder->state.to_ip = 0;
+		decoder->ip += intel_pt_insn->length;
+		err = INTEL_PT_RETURN;
+		goto out;
+	}
+
+	if (intel_pt_insn->op == INTEL_PT_OP_CALL) {
+		/* Zero-length calls are excluded */
+		if (intel_pt_insn->branch != INTEL_PT_BR_UNCONDITIONAL ||
+		    intel_pt_insn->rel) {
+			err = intel_pt_push(&decoder->stack, decoder->ip +
+					    intel_pt_insn->length);
+			if (err)
+				goto out;
+		}
+	} else if (intel_pt_insn->op == INTEL_PT_OP_RET) {
+		decoder->ret_addr = intel_pt_pop(&decoder->stack);
+	}
+
+	if (intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL) {
+		int cnt = decoder->no_progress++;
+
+		decoder->state.from_ip = decoder->ip;
+		decoder->ip += intel_pt_insn->length +
+				intel_pt_insn->rel;
+		decoder->state.to_ip = decoder->ip;
+		err = INTEL_PT_RETURN;
+
+		/*
+		 * Check for being stuck in a loop.  This can happen if a
+		 * decoder error results in the decoder erroneously setting the
+		 * ip to an address that is itself in an infinite loop that
+		 * consumes no packets.  When that happens, there must be an
+		 * unconditional branch.
+		 */
+		if (cnt) {
+			if (cnt == 1) {
+				decoder->stuck_ip = decoder->state.to_ip;
+				decoder->stuck_ip_prd = 1;
+				decoder->stuck_ip_cnt = 1;
+			} else if (cnt > INTEL_PT_MAX_LOOPS ||
+				   decoder->state.to_ip == decoder->stuck_ip) {
+				intel_pt_log_at("ERROR: Never-ending loop",
+						decoder->state.to_ip);
+				decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+				err = -ELOOP;
+				goto out;
+			} else if (!--decoder->stuck_ip_cnt) {
+				decoder->stuck_ip_prd += 1;
+				decoder->stuck_ip_cnt = decoder->stuck_ip_prd;
+				decoder->stuck_ip = decoder->state.to_ip;
+			}
+		}
+		goto out_no_progress;
+	}
+out:
+	decoder->no_progress = 0;
+out_no_progress:
+	decoder->state.insn_op = intel_pt_insn->op;
+	decoder->state.insn_len = intel_pt_insn->length;
+
+	if (decoder->tx_flags & INTEL_PT_IN_TX)
+		decoder->state.flags |= INTEL_PT_IN_TX;
+
+	return err;
+}
+
+static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
+{
+	struct intel_pt_insn intel_pt_insn;
+	uint64_t ip;
+	int err;
+
+	ip = decoder->last_ip;
+
+	while (1) {
+		err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip);
+		if (err == INTEL_PT_RETURN)
+			return 0;
+		if (err == -EAGAIN) {
+			if (decoder->set_fup_tx_flags) {
+				decoder->set_fup_tx_flags = false;
+				decoder->tx_flags = decoder->fup_tx_flags;
+				decoder->state.type = INTEL_PT_TRANSACTION;
+				decoder->state.from_ip = decoder->ip;
+				decoder->state.to_ip = 0;
+				decoder->state.flags = decoder->fup_tx_flags;
+				return 0;
+			}
+			return err;
+		}
+		decoder->set_fup_tx_flags = false;
+		if (err)
+			return err;
+
+		if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
+			intel_pt_log_at("ERROR: Unexpected indirect branch",
+					decoder->ip);
+			decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+			return -ENOENT;
+		}
+
+		if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
+			intel_pt_log_at("ERROR: Unexpected conditional branch",
+					decoder->ip);
+			decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+			return -ENOENT;
+		}
+
+		intel_pt_bug(decoder);
+	}
+}
+
+static int intel_pt_walk_tip(struct intel_pt_decoder *decoder)
+{
+	struct intel_pt_insn intel_pt_insn;
+	int err;
+
+	err = intel_pt_walk_insn(decoder, &intel_pt_insn, 0);
+	if (err == INTEL_PT_RETURN)
+		return 0;
+	if (err)
+		return err;
+
+	if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
+		if (decoder->pkt_state == INTEL_PT_STATE_TIP_PGD) {
+			decoder->pge = false;
+			decoder->continuous_period = false;
+			decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+			decoder->state.from_ip = decoder->ip;
+			decoder->state.to_ip = 0;
+			if (decoder->packet.count != 0)
+				decoder->ip = decoder->last_ip;
+		} else {
+			decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+			decoder->state.from_ip = decoder->ip;
+			if (decoder->packet.count == 0) {
+				decoder->state.to_ip = 0;
+			} else {
+				decoder->state.to_ip = decoder->last_ip;
+				decoder->ip = decoder->last_ip;
+			}
+		}
+		return 0;
+	}
+
+	if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
+		intel_pt_log_at("ERROR: Conditional branch when expecting indirect branch",
+				decoder->ip);
+		decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+		return -ENOENT;
+	}
+
+	return intel_pt_bug(decoder);
+}
+
+static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
+{
+	struct intel_pt_insn intel_pt_insn;
+	int err;
+
+	while (1) {
+		err = intel_pt_walk_insn(decoder, &intel_pt_insn, 0);
+		if (err == INTEL_PT_RETURN)
+			return 0;
+		if (err)
+			return err;
+
+		if (intel_pt_insn.op == INTEL_PT_OP_RET) {
+			if (!decoder->return_compression) {
+				intel_pt_log_at("ERROR: RET when expecting conditional branch",
+						decoder->ip);
+				decoder->pkt_state = INTEL_PT_STATE_ERR3;
+				return -ENOENT;
+			}
+			if (!decoder->ret_addr) {
+				intel_pt_log_at("ERROR: Bad RET compression (stack empty)",
+						decoder->ip);
+				decoder->pkt_state = INTEL_PT_STATE_ERR3;
+				return -ENOENT;
+			}
+			if (!(decoder->tnt.payload & BIT63)) {
+				intel_pt_log_at("ERROR: Bad RET compression (TNT=N)",
+						decoder->ip);
+				decoder->pkt_state = INTEL_PT_STATE_ERR3;
+				return -ENOENT;
+			}
+			decoder->tnt.count -= 1;
+			if (!decoder->tnt.count)
+				decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+			decoder->tnt.payload <<= 1;
+			decoder->state.from_ip = decoder->ip;
+			decoder->ip = decoder->ret_addr;
+			decoder->state.to_ip = decoder->ip;
+			return 0;
+		}
+
+		if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
+			/* Handle deferred TIPs */
+			err = intel_pt_get_next_packet(decoder);
+			if (err)
+				return err;
+			if (decoder->packet.type != INTEL_PT_TIP ||
+			    decoder->packet.count == 0) {
+				intel_pt_log_at("ERROR: Missing deferred TIP for indirect branch",
+						decoder->ip);
+				decoder->pkt_state = INTEL_PT_STATE_ERR3;
+				decoder->pkt_step = 0;
+				return -ENOENT;
+			}
+			intel_pt_set_last_ip(decoder);
+			decoder->state.from_ip = decoder->ip;
+			decoder->state.to_ip = decoder->last_ip;
+			decoder->ip = decoder->last_ip;
+			return 0;
+		}
+
+		if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
+			decoder->tnt.count -= 1;
+			if (!decoder->tnt.count)
+				decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+			if (decoder->tnt.payload & BIT63) {
+				decoder->tnt.payload <<= 1;
+				decoder->state.from_ip = decoder->ip;
+				decoder->ip += intel_pt_insn.length +
+					       intel_pt_insn.rel;
+				decoder->state.to_ip = decoder->ip;
+				return 0;
+			}
+			/* Instruction sample for a non-taken branch */
+			if (decoder->state.type & INTEL_PT_INSTRUCTION) {
+				decoder->tnt.payload <<= 1;
+				decoder->state.type = INTEL_PT_INSTRUCTION;
+				decoder->state.from_ip = decoder->ip;
+				decoder->state.to_ip = 0;
+				decoder->ip += intel_pt_insn.length;
+				return 0;
+			}
+			decoder->ip += intel_pt_insn.length;
+			if (!decoder->tnt.count)
+				return -EAGAIN;
+			decoder->tnt.payload <<= 1;
+			continue;
+		}
+
+		return intel_pt_bug(decoder);
+	}
+}
+
+static int intel_pt_mode_tsx(struct intel_pt_decoder *decoder, bool *no_tip)
+{
+	unsigned int fup_tx_flags;
+	int err;
+
+	fup_tx_flags = decoder->packet.payload &
+		       (INTEL_PT_IN_TX | INTEL_PT_ABORT_TX);
+	err = intel_pt_get_next_packet(decoder);
+	if (err)
+		return err;
+	if (decoder->packet.type == INTEL_PT_FUP) {
+		decoder->fup_tx_flags = fup_tx_flags;
+		decoder->set_fup_tx_flags = true;
+		if (!(decoder->fup_tx_flags & INTEL_PT_ABORT_TX))
+			*no_tip = true;
+	} else {
+		intel_pt_log_at("ERROR: Missing FUP after MODE.TSX",
+				decoder->pos);
+		intel_pt_update_in_tx(decoder);
+	}
+	return 0;
+}
+
+static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder)
+{
+	uint64_t timestamp;
+
+	decoder->have_tma = false;
+
+	if (decoder->ref_timestamp) {
+		timestamp = decoder->packet.payload |
+			    (decoder->ref_timestamp & (0xffULL << 56));
+		if (timestamp < decoder->ref_timestamp) {
+			if (decoder->ref_timestamp - timestamp > (1ULL << 55))
+				timestamp += (1ULL << 56);
+		} else {
+			if (timestamp - decoder->ref_timestamp > (1ULL << 55))
+				timestamp -= (1ULL << 56);
+		}
+		decoder->tsc_timestamp = timestamp;
+		decoder->timestamp = timestamp;
+		decoder->ref_timestamp = 0;
+		decoder->timestamp_insn_cnt = 0;
+	} else if (decoder->timestamp) {
+		timestamp = decoder->packet.payload |
+			    (decoder->timestamp & (0xffULL << 56));
+		decoder->tsc_timestamp = timestamp;
+		if (timestamp < decoder->timestamp &&
+		    decoder->timestamp - timestamp < decoder->tsc_slip) {
+			intel_pt_log_to("Suppressing backwards timestamp",
+					timestamp);
+			timestamp = decoder->timestamp;
+		}
+		while (timestamp < decoder->timestamp) {
+			intel_pt_log_to("Wraparound timestamp", timestamp);
+			timestamp += (1ULL << 56);
+			decoder->tsc_timestamp = timestamp;
+		}
+		decoder->timestamp = timestamp;
+		decoder->timestamp_insn_cnt = 0;
+	}
+
+	if (decoder->last_packet_type == INTEL_PT_CYC) {
+		decoder->cyc_ref_timestamp = decoder->timestamp;
+		decoder->cycle_cnt = 0;
+		decoder->have_calc_cyc_to_tsc = false;
+		intel_pt_calc_cyc_to_tsc(decoder, false);
+	}
+
+	intel_pt_log_to("Setting timestamp", decoder->timestamp);
+}
+
+static int intel_pt_overflow(struct intel_pt_decoder *decoder)
+{
+	intel_pt_log("ERROR: Buffer overflow\n");
+	intel_pt_clear_tx_flags(decoder);
+	decoder->have_tma = false;
+	decoder->cbr = 0;
+	decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+	decoder->overflow = true;
+	return -EOVERFLOW;
+}
+
+static void intel_pt_calc_tma(struct intel_pt_decoder *decoder)
+{
+	uint32_t ctc = decoder->packet.payload;
+	uint32_t fc = decoder->packet.count;
+	uint32_t ctc_rem = ctc & decoder->ctc_rem_mask;
+
+	if (!decoder->tsc_ctc_ratio_d)
+		return;
+
+	decoder->last_mtc = (ctc >> decoder->mtc_shift) & 0xff;
+	decoder->ctc_timestamp = decoder->tsc_timestamp - fc;
+	if (decoder->tsc_ctc_mult) {
+		decoder->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult;
+	} else {
+		decoder->ctc_timestamp -= multdiv(ctc_rem,
+						  decoder->tsc_ctc_ratio_n,
+						  decoder->tsc_ctc_ratio_d);
+	}
+	decoder->ctc_delta = 0;
+	decoder->have_tma = true;
+	intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x  CTC rem %#x\n",
+		     decoder->ctc_timestamp, decoder->last_mtc, ctc_rem);
+}
+
+static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
+{
+	uint64_t timestamp;
+	uint32_t mtc, mtc_delta;
+
+	if (!decoder->have_tma)
+		return;
+
+	mtc = decoder->packet.payload;
+
+	if (mtc > decoder->last_mtc)
+		mtc_delta = mtc - decoder->last_mtc;
+	else
+		mtc_delta = mtc + 256 - decoder->last_mtc;
+
+	decoder->ctc_delta += mtc_delta << decoder->mtc_shift;
+
+	if (decoder->tsc_ctc_mult) {
+		timestamp = decoder->ctc_timestamp +
+			    decoder->ctc_delta * decoder->tsc_ctc_mult;
+	} else {
+		timestamp = decoder->ctc_timestamp +
+			    multdiv(decoder->ctc_delta,
+				    decoder->tsc_ctc_ratio_n,
+				    decoder->tsc_ctc_ratio_d);
+	}
+
+	if (timestamp < decoder->timestamp)
+		intel_pt_log("Suppressing MTC timestamp " x64_fmt " less than current timestamp " x64_fmt "\n",
+			     timestamp, decoder->timestamp);
+	else
+		decoder->timestamp = timestamp;
+
+	decoder->timestamp_insn_cnt = 0;
+	decoder->last_mtc = mtc;
+
+	if (decoder->last_packet_type == INTEL_PT_CYC) {
+		decoder->cyc_ref_timestamp = decoder->timestamp;
+		decoder->cycle_cnt = 0;
+		decoder->have_calc_cyc_to_tsc = false;
+		intel_pt_calc_cyc_to_tsc(decoder, true);
+	}
+}
+
+static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
+{
+	unsigned int cbr = decoder->packet.payload;
+
+	if (decoder->cbr == cbr)
+		return;
+
+	decoder->cbr = cbr;
+	decoder->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
+}
+
+static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
+{
+	uint64_t timestamp = decoder->cyc_ref_timestamp;
+
+	decoder->have_cyc = true;
+
+	decoder->cycle_cnt += decoder->packet.payload;
+
+	if (!decoder->cyc_ref_timestamp)
+		return;
+
+	if (decoder->have_calc_cyc_to_tsc)
+		timestamp += decoder->cycle_cnt * decoder->calc_cyc_to_tsc;
+	else if (decoder->cbr)
+		timestamp += decoder->cycle_cnt * decoder->cbr_cyc_to_tsc;
+	else
+		return;
+
+	if (timestamp < decoder->timestamp)
+		intel_pt_log("Suppressing CYC timestamp " x64_fmt " less than current timestamp " x64_fmt "\n",
+			     timestamp, decoder->timestamp);
+	else
+		decoder->timestamp = timestamp;
+}
+
+/* Walk PSB+ packets when already in sync. */
+static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
+{
+	int err;
+
+	while (1) {
+		err = intel_pt_get_next_packet(decoder);
+		if (err)
+			return err;
+
+		switch (decoder->packet.type) {
+		case INTEL_PT_PSBEND:
+			return 0;
+
+		case INTEL_PT_TIP_PGD:
+		case INTEL_PT_TIP_PGE:
+		case INTEL_PT_TIP:
+		case INTEL_PT_TNT:
+		case INTEL_PT_TRACESTOP:
+		case INTEL_PT_BAD:
+		case INTEL_PT_PSB:
+			decoder->have_tma = false;
+			intel_pt_log("ERROR: Unexpected packet\n");
+			return -EAGAIN;
+
+		case INTEL_PT_OVF:
+			return intel_pt_overflow(decoder);
+
+		case INTEL_PT_TSC:
+			intel_pt_calc_tsc_timestamp(decoder);
+			break;
+
+		case INTEL_PT_TMA:
+			intel_pt_calc_tma(decoder);
+			break;
+
+		case INTEL_PT_CBR:
+			intel_pt_calc_cbr(decoder);
+			break;
+
+		case INTEL_PT_MODE_EXEC:
+			decoder->exec_mode = decoder->packet.payload;
+			break;
+
+		case INTEL_PT_PIP:
+			decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
+			break;
+
+		case INTEL_PT_FUP:
+			decoder->pge = true;
+			intel_pt_set_last_ip(decoder);
+			break;
+
+		case INTEL_PT_MODE_TSX:
+			intel_pt_update_in_tx(decoder);
+			break;
+
+		case INTEL_PT_MTC:
+			intel_pt_calc_mtc_timestamp(decoder);
+			if (decoder->period_type == INTEL_PT_PERIOD_MTC)
+				decoder->state.type |= INTEL_PT_INSTRUCTION;
+			break;
+
+		case INTEL_PT_CYC:
+		case INTEL_PT_VMCS:
+		case INTEL_PT_MNT:
+		case INTEL_PT_PAD:
+		default:
+			break;
+		}
+	}
+}
+
+static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
+{
+	int err;
+
+	if (decoder->tx_flags & INTEL_PT_ABORT_TX) {
+		decoder->tx_flags = 0;
+		decoder->state.flags &= ~INTEL_PT_IN_TX;
+		decoder->state.flags |= INTEL_PT_ABORT_TX;
+	} else {
+		decoder->state.flags |= INTEL_PT_ASYNC;
+	}
+
+	while (1) {
+		err = intel_pt_get_next_packet(decoder);
+		if (err)
+			return err;
+
+		switch (decoder->packet.type) {
+		case INTEL_PT_TNT:
+		case INTEL_PT_FUP:
+		case INTEL_PT_TRACESTOP:
+		case INTEL_PT_PSB:
+		case INTEL_PT_TSC:
+		case INTEL_PT_TMA:
+		case INTEL_PT_CBR:
+		case INTEL_PT_MODE_TSX:
+		case INTEL_PT_BAD:
+		case INTEL_PT_PSBEND:
+			intel_pt_log("ERROR: Missing TIP after FUP\n");
+			decoder->pkt_state = INTEL_PT_STATE_ERR3;
+			return -ENOENT;
+
+		case INTEL_PT_OVF:
+			return intel_pt_overflow(decoder);
+
+		case INTEL_PT_TIP_PGD:
+			decoder->state.from_ip = decoder->ip;
+			decoder->state.to_ip = 0;
+			if (decoder->packet.count != 0) {
+				intel_pt_set_ip(decoder);
+				intel_pt_log("Omitting PGD ip " x64_fmt "\n",
+					     decoder->ip);
+			}
+			decoder->pge = false;
+			decoder->continuous_period = false;
+			return 0;
+
+		case INTEL_PT_TIP_PGE:
+			decoder->pge = true;
+			intel_pt_log("Omitting PGE ip " x64_fmt "\n",
+				     decoder->ip);
+			decoder->state.from_ip = 0;
+			if (decoder->packet.count == 0) {
+				decoder->state.to_ip = 0;
+			} else {
+				intel_pt_set_ip(decoder);
+				decoder->state.to_ip = decoder->ip;
+			}
+			return 0;
+
+		case INTEL_PT_TIP:
+			decoder->state.from_ip = decoder->ip;
+			if (decoder->packet.count == 0) {
+				decoder->state.to_ip = 0;
+			} else {
+				intel_pt_set_ip(decoder);
+				decoder->state.to_ip = decoder->ip;
+			}
+			return 0;
+
+		case INTEL_PT_PIP:
+			decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
+			break;
+
+		case INTEL_PT_MTC:
+			intel_pt_calc_mtc_timestamp(decoder);
+			if (decoder->period_type == INTEL_PT_PERIOD_MTC)
+				decoder->state.type |= INTEL_PT_INSTRUCTION;
+			break;
+
+		case INTEL_PT_CYC:
+			intel_pt_calc_cyc_timestamp(decoder);
+			break;
+
+		case INTEL_PT_MODE_EXEC:
+			decoder->exec_mode = decoder->packet.payload;
+			break;
+
+		case INTEL_PT_VMCS:
+		case INTEL_PT_MNT:
+		case INTEL_PT_PAD:
+			break;
+
+		default:
+			return intel_pt_bug(decoder);
+		}
+	}
+}
+
+static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
+{
+	bool no_tip = false;
+	int err;
+
+	while (1) {
+		err = intel_pt_get_next_packet(decoder);
+		if (err)
+			return err;
+next:
+		switch (decoder->packet.type) {
+		case INTEL_PT_TNT:
+			if (!decoder->packet.count)
+				break;
+			decoder->tnt = decoder->packet;
+			decoder->pkt_state = INTEL_PT_STATE_TNT;
+			err = intel_pt_walk_tnt(decoder);
+			if (err == -EAGAIN)
+				break;
+			return err;
+
+		case INTEL_PT_TIP_PGD:
+			if (decoder->packet.count != 0)
+				intel_pt_set_last_ip(decoder);
+			decoder->pkt_state = INTEL_PT_STATE_TIP_PGD;
+			return intel_pt_walk_tip(decoder);
+
+		case INTEL_PT_TIP_PGE: {
+			decoder->pge = true;
+			if (decoder->packet.count == 0) {
+				intel_pt_log_at("Skipping zero TIP.PGE",
+						decoder->pos);
+				break;
+			}
+			intel_pt_set_ip(decoder);
+			decoder->state.from_ip = 0;
+			decoder->state.to_ip = decoder->ip;
+			return 0;
+		}
+
+		case INTEL_PT_OVF:
+			return intel_pt_overflow(decoder);
+
+		case INTEL_PT_TIP:
+			if (decoder->packet.count != 0)
+				intel_pt_set_last_ip(decoder);
+			decoder->pkt_state = INTEL_PT_STATE_TIP;
+			return intel_pt_walk_tip(decoder);
+
+		case INTEL_PT_FUP:
+			if (decoder->packet.count == 0) {
+				intel_pt_log_at("Skipping zero FUP",
+						decoder->pos);
+				no_tip = false;
+				break;
+			}
+			intel_pt_set_last_ip(decoder);
+			err = intel_pt_walk_fup(decoder);
+			if (err != -EAGAIN) {
+				if (err)
+					return err;
+				if (no_tip)
+					decoder->pkt_state =
+						INTEL_PT_STATE_FUP_NO_TIP;
+				else
+					decoder->pkt_state = INTEL_PT_STATE_FUP;
+				return 0;
+			}
+			if (no_tip) {
+				no_tip = false;
+				break;
+			}
+			return intel_pt_walk_fup_tip(decoder);
+
+		case INTEL_PT_TRACESTOP:
+			decoder->pge = false;
+			decoder->continuous_period = false;
+			intel_pt_clear_tx_flags(decoder);
+			decoder->have_tma = false;
+			break;
+
+		case INTEL_PT_PSB:
+			intel_pt_clear_stack(&decoder->stack);
+			err = intel_pt_walk_psbend(decoder);
+			if (err == -EAGAIN)
+				goto next;
+			if (err)
+				return err;
+			break;
+
+		case INTEL_PT_PIP:
+			decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
+			break;
+
+		case INTEL_PT_MTC:
+			intel_pt_calc_mtc_timestamp(decoder);
+			if (decoder->period_type != INTEL_PT_PERIOD_MTC)
+				break;
+			/*
+			 * Ensure that there has been an instruction since the
+			 * last MTC.
+			 */
+			if (!decoder->mtc_insn)
+				break;
+			decoder->mtc_insn = false;
+			/* Ensure that there is a timestamp */
+			if (!decoder->timestamp)
+				break;
+			decoder->state.type = INTEL_PT_INSTRUCTION;
+			decoder->state.from_ip = decoder->ip;
+			decoder->state.to_ip = 0;
+			decoder->mtc_insn = false;
+			return 0;
+
+		case INTEL_PT_TSC:
+			intel_pt_calc_tsc_timestamp(decoder);
+			break;
+
+		case INTEL_PT_TMA:
+			intel_pt_calc_tma(decoder);
+			break;
+
+		case INTEL_PT_CYC:
+			intel_pt_calc_cyc_timestamp(decoder);
+			break;
+
+		case INTEL_PT_CBR:
+			intel_pt_calc_cbr(decoder);
+			break;
+
+		case INTEL_PT_MODE_EXEC:
+			decoder->exec_mode = decoder->packet.payload;
+			break;
+
+		case INTEL_PT_MODE_TSX:
+			/* MODE_TSX need not be followed by FUP */
+			if (!decoder->pge) {
+				intel_pt_update_in_tx(decoder);
+				break;
+			}
+			err = intel_pt_mode_tsx(decoder, &no_tip);
+			if (err)
+				return err;
+			goto next;
+
+		case INTEL_PT_BAD: /* Does not happen */
+			return intel_pt_bug(decoder);
+
+		case INTEL_PT_PSBEND:
+		case INTEL_PT_VMCS:
+		case INTEL_PT_MNT:
+		case INTEL_PT_PAD:
+			break;
+
+		default:
+			return intel_pt_bug(decoder);
+		}
+	}
+}
+
+/* Walk PSB+ packets to get in sync. */
+static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
+{
+	int err;
+
+	while (1) {
+		err = intel_pt_get_next_packet(decoder);
+		if (err)
+			return err;
+
+		switch (decoder->packet.type) {
+		case INTEL_PT_TIP_PGD:
+			decoder->continuous_period = false;
+		case INTEL_PT_TIP_PGE:
+		case INTEL_PT_TIP:
+			intel_pt_log("ERROR: Unexpected packet\n");
+			return -ENOENT;
+
+		case INTEL_PT_FUP:
+			decoder->pge = true;
+			if (decoder->last_ip || decoder->packet.count == 6 ||
+			    decoder->packet.count == 0) {
+				uint64_t current_ip = decoder->ip;
+
+				intel_pt_set_ip(decoder);
+				if (current_ip)
+					intel_pt_log_to("Setting IP",
+							decoder->ip);
+			}
+			break;
+
+		case INTEL_PT_MTC:
+			intel_pt_calc_mtc_timestamp(decoder);
+			break;
+
+		case INTEL_PT_TSC:
+			intel_pt_calc_tsc_timestamp(decoder);
+			break;
+
+		case INTEL_PT_TMA:
+			intel_pt_calc_tma(decoder);
+			break;
+
+		case INTEL_PT_CYC:
+			intel_pt_calc_cyc_timestamp(decoder);
+			break;
+
+		case INTEL_PT_CBR:
+			intel_pt_calc_cbr(decoder);
+			break;
+
+		case INTEL_PT_PIP:
+			decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
+			break;
+
+		case INTEL_PT_MODE_EXEC:
+			decoder->exec_mode = decoder->packet.payload;
+			break;
+
+		case INTEL_PT_MODE_TSX:
+			intel_pt_update_in_tx(decoder);
+			break;
+
+		case INTEL_PT_TRACESTOP:
+			decoder->pge = false;
+			decoder->continuous_period = false;
+			intel_pt_clear_tx_flags(decoder);
+		case INTEL_PT_TNT:
+			decoder->have_tma = false;
+			intel_pt_log("ERROR: Unexpected packet\n");
+			if (decoder->ip)
+				decoder->pkt_state = INTEL_PT_STATE_ERR4;
+			else
+				decoder->pkt_state = INTEL_PT_STATE_ERR3;
+			return -ENOENT;
+
+		case INTEL_PT_BAD: /* Does not happen */
+			return intel_pt_bug(decoder);
+
+		case INTEL_PT_OVF:
+			return intel_pt_overflow(decoder);
+
+		case INTEL_PT_PSBEND:
+			return 0;
+
+		case INTEL_PT_PSB:
+		case INTEL_PT_VMCS:
+		case INTEL_PT_MNT:
+		case INTEL_PT_PAD:
+		default:
+			break;
+		}
+	}
+}
+
+static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
+{
+	int err;
+
+	while (1) {
+		err = intel_pt_get_next_packet(decoder);
+		if (err)
+			return err;
+
+		switch (decoder->packet.type) {
+		case INTEL_PT_TIP_PGD:
+			decoder->continuous_period = false;
+		case INTEL_PT_TIP_PGE:
+		case INTEL_PT_TIP:
+			decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD;
+			if (decoder->last_ip || decoder->packet.count == 6 ||
+			    decoder->packet.count == 0)
+				intel_pt_set_ip(decoder);
+			if (decoder->ip)
+				return 0;
+			break;
+
+		case INTEL_PT_FUP:
+			if (decoder->overflow) {
+				if (decoder->last_ip ||
+				    decoder->packet.count == 6 ||
+				    decoder->packet.count == 0)
+					intel_pt_set_ip(decoder);
+				if (decoder->ip)
+					return 0;
+			}
+			if (decoder->packet.count)
+				intel_pt_set_last_ip(decoder);
+			break;
+
+		case INTEL_PT_MTC:
+			intel_pt_calc_mtc_timestamp(decoder);
+			break;
+
+		case INTEL_PT_TSC:
+			intel_pt_calc_tsc_timestamp(decoder);
+			break;
+
+		case INTEL_PT_TMA:
+			intel_pt_calc_tma(decoder);
+			break;
+
+		case INTEL_PT_CYC:
+			intel_pt_calc_cyc_timestamp(decoder);
+			break;
+
+		case INTEL_PT_CBR:
+			intel_pt_calc_cbr(decoder);
+			break;
+
+		case INTEL_PT_PIP:
+			decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
+			break;
+
+		case INTEL_PT_MODE_EXEC:
+			decoder->exec_mode = decoder->packet.payload;
+			break;
+
+		case INTEL_PT_MODE_TSX:
+			intel_pt_update_in_tx(decoder);
+			break;
+
+		case INTEL_PT_OVF:
+			return intel_pt_overflow(decoder);
+
+		case INTEL_PT_BAD: /* Does not happen */
+			return intel_pt_bug(decoder);
+
+		case INTEL_PT_TRACESTOP:
+			decoder->pge = false;
+			decoder->continuous_period = false;
+			intel_pt_clear_tx_flags(decoder);
+			decoder->have_tma = false;
+			break;
+
+		case INTEL_PT_PSB:
+			err = intel_pt_walk_psb(decoder);
+			if (err)
+				return err;
+			if (decoder->ip) {
+				/* Do not have a sample */
+				decoder->state.type = 0;
+				return 0;
+			}
+			break;
+
+		case INTEL_PT_TNT:
+		case INTEL_PT_PSBEND:
+		case INTEL_PT_VMCS:
+		case INTEL_PT_MNT:
+		case INTEL_PT_PAD:
+		default:
+			break;
+		}
+	}
+}
+
+static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
+{
+	int err;
+
+	intel_pt_log("Scanning for full IP\n");
+	err = intel_pt_walk_to_ip(decoder);
+	if (err)
+		return err;
+
+	decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+	decoder->overflow = false;
+
+	decoder->state.from_ip = 0;
+	decoder->state.to_ip = decoder->ip;
+	intel_pt_log_to("Setting IP", decoder->ip);
+
+	return 0;
+}
+
+static int intel_pt_part_psb(struct intel_pt_decoder *decoder)
+{
+	const unsigned char *end = decoder->buf + decoder->len;
+	size_t i;
+
+	for (i = INTEL_PT_PSB_LEN - 1; i; i--) {
+		if (i > decoder->len)
+			continue;
+		if (!memcmp(end - i, INTEL_PT_PSB_STR, i))
+			return i;
+	}
+	return 0;
+}
+
+static int intel_pt_rest_psb(struct intel_pt_decoder *decoder, int part_psb)
+{
+	size_t rest_psb = INTEL_PT_PSB_LEN - part_psb;
+	const char *psb = INTEL_PT_PSB_STR;
+
+	if (rest_psb > decoder->len ||
+	    memcmp(decoder->buf, psb + part_psb, rest_psb))
+		return 0;
+
+	return rest_psb;
+}
+
+static int intel_pt_get_split_psb(struct intel_pt_decoder *decoder,
+				  int part_psb)
+{
+	int rest_psb, ret;
+
+	decoder->pos += decoder->len;
+	decoder->len = 0;
+
+	ret = intel_pt_get_next_data(decoder);
+	if (ret)
+		return ret;
+
+	rest_psb = intel_pt_rest_psb(decoder, part_psb);
+	if (!rest_psb)
+		return 0;
+
+	decoder->pos -= part_psb;
+	decoder->next_buf = decoder->buf + rest_psb;
+	decoder->next_len = decoder->len - rest_psb;
+	memcpy(decoder->temp_buf, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
+	decoder->buf = decoder->temp_buf;
+	decoder->len = INTEL_PT_PSB_LEN;
+
+	return 0;
+}
+
+static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder)
+{
+	unsigned char *next;
+	int ret;
+
+	intel_pt_log("Scanning for PSB\n");
+	while (1) {
+		if (!decoder->len) {
+			ret = intel_pt_get_next_data(decoder);
+			if (ret)
+				return ret;
+		}
+
+		next = memmem(decoder->buf, decoder->len, INTEL_PT_PSB_STR,
+			      INTEL_PT_PSB_LEN);
+		if (!next) {
+			int part_psb;
+
+			part_psb = intel_pt_part_psb(decoder);
+			if (part_psb) {
+				ret = intel_pt_get_split_psb(decoder, part_psb);
+				if (ret)
+					return ret;
+			} else {
+				decoder->pos += decoder->len;
+				decoder->len = 0;
+			}
+			continue;
+		}
+
+		decoder->pkt_step = next - decoder->buf;
+		return intel_pt_get_next_packet(decoder);
+	}
+}
+
+static int intel_pt_sync(struct intel_pt_decoder *decoder)
+{
+	int err;
+
+	decoder->pge = false;
+	decoder->continuous_period = false;
+	decoder->last_ip = 0;
+	decoder->ip = 0;
+	intel_pt_clear_stack(&decoder->stack);
+
+	err = intel_pt_scan_for_psb(decoder);
+	if (err)
+		return err;
+
+	decoder->pkt_state = INTEL_PT_STATE_NO_IP;
+
+	err = intel_pt_walk_psb(decoder);
+	if (err)
+		return err;
+
+	if (decoder->ip) {
+		decoder->state.type = 0; /* Do not have a sample */
+		decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+	} else {
+		return intel_pt_sync_ip(decoder);
+	}
+
+	return 0;
+}
+
+static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
+{
+	uint64_t est = decoder->timestamp_insn_cnt << 1;
+
+	if (!decoder->cbr || !decoder->max_non_turbo_ratio)
+		goto out;
+
+	est *= decoder->max_non_turbo_ratio;
+	est /= decoder->cbr;
+out:
+	return decoder->timestamp + est;
+}
+
+const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
+{
+	int err;
+
+	do {
+		decoder->state.type = INTEL_PT_BRANCH;
+		decoder->state.flags = 0;
+
+		switch (decoder->pkt_state) {
+		case INTEL_PT_STATE_NO_PSB:
+			err = intel_pt_sync(decoder);
+			break;
+		case INTEL_PT_STATE_NO_IP:
+			decoder->last_ip = 0;
+			/* Fall through */
+		case INTEL_PT_STATE_ERR_RESYNC:
+			err = intel_pt_sync_ip(decoder);
+			break;
+		case INTEL_PT_STATE_IN_SYNC:
+			err = intel_pt_walk_trace(decoder);
+			break;
+		case INTEL_PT_STATE_TNT:
+			err = intel_pt_walk_tnt(decoder);
+			if (err == -EAGAIN)
+				err = intel_pt_walk_trace(decoder);
+			break;
+		case INTEL_PT_STATE_TIP:
+		case INTEL_PT_STATE_TIP_PGD:
+			err = intel_pt_walk_tip(decoder);
+			break;
+		case INTEL_PT_STATE_FUP:
+			decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+			err = intel_pt_walk_fup(decoder);
+			if (err == -EAGAIN)
+				err = intel_pt_walk_fup_tip(decoder);
+			else if (!err)
+				decoder->pkt_state = INTEL_PT_STATE_FUP;
+			break;
+		case INTEL_PT_STATE_FUP_NO_TIP:
+			decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
+			err = intel_pt_walk_fup(decoder);
+			if (err == -EAGAIN)
+				err = intel_pt_walk_trace(decoder);
+			break;
+		default:
+			err = intel_pt_bug(decoder);
+			break;
+		}
+	} while (err == -ENOLINK);
+
+	decoder->state.err = err ? intel_pt_ext_err(err) : 0;
+	decoder->state.timestamp = decoder->timestamp;
+	decoder->state.est_timestamp = intel_pt_est_timestamp(decoder);
+	decoder->state.cr3 = decoder->cr3;
+	decoder->state.tot_insn_cnt = decoder->tot_insn_cnt;
+
+	if (err)
+		decoder->state.from_ip = decoder->ip;
+
+	return &decoder->state;
+}
+
+static bool intel_pt_at_psb(unsigned char *buf, size_t len)
+{
+	if (len < INTEL_PT_PSB_LEN)
+		return false;
+	return memmem(buf, INTEL_PT_PSB_LEN, INTEL_PT_PSB_STR,
+		      INTEL_PT_PSB_LEN);
+}
+
+/**
+ * intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
+ * @buf: pointer to buffer pointer
+ * @len: size of buffer
+ *
+ * Updates the buffer pointer to point to the start of the next PSB packet if
+ * there is one, otherwise the buffer pointer is unchanged.  If @buf is updated,
+ * @len is adjusted accordingly.
+ *
+ * Return: %true if a PSB packet is found, %false otherwise.
+ */
+static bool intel_pt_next_psb(unsigned char **buf, size_t *len)
+{
+	unsigned char *next;
+
+	next = memmem(*buf, *len, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
+	if (next) {
+		*len -= next - *buf;
+		*buf = next;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * intel_pt_step_psb - move buffer pointer to the start of the following PSB
+ *                     packet.
+ * @buf: pointer to buffer pointer
+ * @len: size of buffer
+ *
+ * Updates the buffer pointer to point to the start of the following PSB packet
+ * (skipping the PSB at @buf itself) if there is one, otherwise the buffer
+ * pointer is unchanged.  If @buf is updated, @len is adjusted accordingly.
+ *
+ * Return: %true if a PSB packet is found, %false otherwise.
+ */
+static bool intel_pt_step_psb(unsigned char **buf, size_t *len)
+{
+	unsigned char *next;
+
+	if (!*len)
+		return false;
+
+	next = memmem(*buf + 1, *len - 1, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
+	if (next) {
+		*len -= next - *buf;
+		*buf = next;
+		return true;
+	}
+	return false;
+}
+
+/**
+ * intel_pt_last_psb - find the last PSB packet in a buffer.
+ * @buf: buffer
+ * @len: size of buffer
+ *
+ * This function finds the last PSB in a buffer.
+ *
+ * Return: A pointer to the last PSB in @buf if found, %NULL otherwise.
+ */
+static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
+{
+	const char *n = INTEL_PT_PSB_STR;
+	unsigned char *p;
+	size_t k;
+
+	if (len < INTEL_PT_PSB_LEN)
+		return NULL;
+
+	k = len - INTEL_PT_PSB_LEN + 1;
+	while (1) {
+		p = memrchr(buf, n[0], k);
+		if (!p)
+			return NULL;
+		if (!memcmp(p + 1, n + 1, INTEL_PT_PSB_LEN - 1))
+			return p;
+		k = p - buf;
+		if (!k)
+			return NULL;
+	}
+}
+
+/**
+ * intel_pt_next_tsc - find and return next TSC.
+ * @buf: buffer
+ * @len: size of buffer
+ * @tsc: TSC value returned
+ *
+ * Find a TSC packet in @buf and return the TSC value.  This function assumes
+ * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
+ * PSBEND packet is found.
+ *
+ * Return: %true if TSC is found, false otherwise.
+ */
+static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
+{
+	struct intel_pt_pkt packet;
+	int ret;
+
+	while (len) {
+		ret = intel_pt_get_packet(buf, len, &packet);
+		if (ret <= 0)
+			return false;
+		if (packet.type == INTEL_PT_TSC) {
+			*tsc = packet.payload;
+			return true;
+		}
+		if (packet.type == INTEL_PT_PSBEND)
+			return false;
+		buf += ret;
+		len -= ret;
+	}
+	return false;
+}
+
+/**
+ * intel_pt_tsc_cmp - compare 7-byte TSCs.
+ * @tsc1: first TSC to compare
+ * @tsc2: second TSC to compare
+ *
+ * This function compares 7-byte TSC values allowing for the possibility that
+ * TSC wrapped around.  Generally it is not possible to know if TSC has wrapped
+ * around so for that purpose this function assumes the absolute difference is
+ * less than half the maximum difference.
+ *
+ * Return: %-1 if @tsc1 is before @tsc2, %0 if @tsc1 == @tsc2, %1 if @tsc1 is
+ * after @tsc2.
+ */
+static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
+{
+	const uint64_t halfway = (1ULL << 55);
+
+	if (tsc1 == tsc2)
+		return 0;
+
+	if (tsc1 < tsc2) {
+		if (tsc2 - tsc1 < halfway)
+			return -1;
+		else
+			return 1;
+	} else {
+		if (tsc1 - tsc2 < halfway)
+			return 1;
+		else
+			return -1;
+	}
+}
+
+/**
+ * intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
+ *                             using TSC.
+ * @buf_a: first buffer
+ * @len_a: size of first buffer
+ * @buf_b: second buffer
+ * @len_b: size of second buffer
+ *
+ * If the trace contains TSC we can look at the last TSC of @buf_a and the
+ * first TSC of @buf_b in order to determine if the buffers overlap, and then
+ * walk forward in @buf_b until a later TSC is found.  A precondition is that
+ * @buf_a and @buf_b are positioned at a PSB.
+ *
+ * Return: A pointer into @buf_b from where non-overlapped data starts, or
+ * @buf_b + @len_b if there is no non-overlapped data.
+ */
+static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
+						size_t len_a,
+						unsigned char *buf_b,
+						size_t len_b)
+{
+	uint64_t tsc_a, tsc_b;
+	unsigned char *p;
+	size_t len;
+
+	p = intel_pt_last_psb(buf_a, len_a);
+	if (!p)
+		return buf_b; /* No PSB in buf_a => no overlap */
+
+	len = len_a - (p - buf_a);
+	if (!intel_pt_next_tsc(p, len, &tsc_a)) {
+		/* The last PSB+ in buf_a is incomplete, so go back one more */
+		len_a -= len;
+		p = intel_pt_last_psb(buf_a, len_a);
+		if (!p)
+			return buf_b; /* No full PSB+ => assume no overlap */
+		len = len_a - (p - buf_a);
+		if (!intel_pt_next_tsc(p, len, &tsc_a))
+			return buf_b; /* No TSC in buf_a => assume no overlap */
+	}
+
+	while (1) {
+		/* Ignore PSB+ with no TSC */
+		if (intel_pt_next_tsc(buf_b, len_b, &tsc_b) &&
+		    intel_pt_tsc_cmp(tsc_a, tsc_b) < 0)
+			return buf_b; /* tsc_a < tsc_b => no overlap */
+
+		if (!intel_pt_step_psb(&buf_b, &len_b))
+			return buf_b + len_b; /* No PSB in buf_b => no data */
+	}
+}
+
+/**
+ * intel_pt_find_overlap - determine start of non-overlapped trace data.
+ * @buf_a: first buffer
+ * @len_a: size of first buffer
+ * @buf_b: second buffer
+ * @len_b: size of second buffer
+ * @have_tsc: can use TSC packets to detect overlap
+ *
+ * When trace samples or snapshots are recorded there is the possibility that
+ * the data overlaps.  Note that, for the purposes of decoding, data is only
+ * useful if it begins with a PSB packet.
+ *
+ * Return: A pointer into @buf_b from where non-overlapped data starts, or
+ * @buf_b + @len_b if there is no non-overlapped data.
+ */
+unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
+				     unsigned char *buf_b, size_t len_b,
+				     bool have_tsc)
+{
+	unsigned char *found;
+
+	/* Buffer 'b' must start at PSB so throw away everything before that */
+	if (!intel_pt_next_psb(&buf_b, &len_b))
+		return buf_b + len_b; /* No PSB */
+
+	if (!intel_pt_next_psb(&buf_a, &len_a))
+		return buf_b; /* No overlap */
+
+	if (have_tsc) {
+		found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b);
+		if (found)
+			return found;
+	}
+
+	/*
+	 * Buffer 'b' cannot end within buffer 'a' so, for comparison purposes,
+	 * we can ignore the first part of buffer 'a'.
+	 */
+	while (len_b < len_a) {
+		if (!intel_pt_step_psb(&buf_a, &len_a))
+			return buf_b; /* No overlap */
+	}
+
+	/* Now len_b >= len_a */
+	if (len_b > len_a) {
+		/* The leftover buffer 'b' must start at a PSB */
+		while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
+			if (!intel_pt_step_psb(&buf_a, &len_a))
+				return buf_b; /* No overlap */
+		}
+	}
+
+	while (1) {
+		/* Potential overlap so check the bytes */
+		found = memmem(buf_a, len_a, buf_b, len_a);
+		if (found)
+			return buf_b + len_a;
+
+		/* Try again at next PSB in buffer 'a' */
+		if (!intel_pt_step_psb(&buf_a, &len_a))
+			return buf_b; /* No overlap */
+
+		/* The leftover buffer 'b' must start at a PSB */
+		while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
+			if (!intel_pt_step_psb(&buf_a, &len_a))
+				return buf_b; /* No overlap */
+		}
+	}
+}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
new file mode 100644
index 0000000..02c38fe
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -0,0 +1,109 @@
+/*
+ * intel_pt_decoder.h: Intel Processor Trace support
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef INCLUDE__INTEL_PT_DECODER_H__
+#define INCLUDE__INTEL_PT_DECODER_H__
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+
+#include "intel-pt-insn-decoder.h"
+
+#define INTEL_PT_IN_TX		(1 << 0)
+#define INTEL_PT_ABORT_TX	(1 << 1)
+#define INTEL_PT_ASYNC		(1 << 2)
+
+enum intel_pt_sample_type {
+	INTEL_PT_BRANCH		= 1 << 0,
+	INTEL_PT_INSTRUCTION	= 1 << 1,
+	INTEL_PT_TRANSACTION	= 1 << 2,
+};
+
+enum intel_pt_period_type {
+	INTEL_PT_PERIOD_NONE,
+	INTEL_PT_PERIOD_INSTRUCTIONS,
+	INTEL_PT_PERIOD_TICKS,
+	INTEL_PT_PERIOD_MTC,
+};
+
+enum {
+	INTEL_PT_ERR_NOMEM = 1,
+	INTEL_PT_ERR_INTERN,
+	INTEL_PT_ERR_BADPKT,
+	INTEL_PT_ERR_NODATA,
+	INTEL_PT_ERR_NOINSN,
+	INTEL_PT_ERR_MISMAT,
+	INTEL_PT_ERR_OVR,
+	INTEL_PT_ERR_LOST,
+	INTEL_PT_ERR_UNK,
+	INTEL_PT_ERR_NELOOP,
+	INTEL_PT_ERR_MAX,
+};
+
+struct intel_pt_state {
+	enum intel_pt_sample_type type;
+	int err;
+	uint64_t from_ip;
+	uint64_t to_ip;
+	uint64_t cr3;
+	uint64_t tot_insn_cnt;
+	uint64_t timestamp;
+	uint64_t est_timestamp;
+	uint64_t trace_nr;
+	uint32_t flags;
+	enum intel_pt_insn_op insn_op;
+	int insn_len;
+};
+
+struct intel_pt_insn;
+
+struct intel_pt_buffer {
+	const unsigned char *buf;
+	size_t len;
+	bool consecutive;
+	uint64_t ref_timestamp;
+	uint64_t trace_nr;
+};
+
+struct intel_pt_params {
+	int (*get_trace)(struct intel_pt_buffer *buffer, void *data);
+	int (*walk_insn)(struct intel_pt_insn *intel_pt_insn,
+			 uint64_t *insn_cnt_ptr, uint64_t *ip, uint64_t to_ip,
+			 uint64_t max_insn_cnt, void *data);
+	void *data;
+	bool return_compression;
+	uint64_t period;
+	enum intel_pt_period_type period_type;
+	unsigned max_non_turbo_ratio;
+	unsigned int mtc_period;
+	uint32_t tsc_ctc_ratio_n;
+	uint32_t tsc_ctc_ratio_d;
+};
+
+struct intel_pt_decoder;
+
+struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params);
+void intel_pt_decoder_free(struct intel_pt_decoder *decoder);
+
+const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder);
+
+unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
+				     unsigned char *buf_b, size_t len_b,
+				     bool have_tsc);
+
+int intel_pt__strerror(int code, char *buf, size_t buflen);
+
+#endif
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
new file mode 100644
index 0000000..d23138c
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c
@@ -0,0 +1,249 @@
+/*
+ * intel_pt_insn_decoder.c: Intel Processor Trace support
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <endian.h>
+#include <byteswap.h>
+
+#include "event.h"
+
+#include "insn.h"
+
+#include "inat.c"
+#include "insn.c"
+
+#include "intel-pt-insn-decoder.h"
+
+/* Based on branch_type() from perf_event_intel_lbr.c */
+static void intel_pt_insn_decoder(struct insn *insn,
+				  struct intel_pt_insn *intel_pt_insn)
+{
+	enum intel_pt_insn_op op = INTEL_PT_OP_OTHER;
+	enum intel_pt_insn_branch branch = INTEL_PT_BR_NO_BRANCH;
+	int ext;
+
+	if (insn_is_avx(insn)) {
+		intel_pt_insn->op = INTEL_PT_OP_OTHER;
+		intel_pt_insn->branch = INTEL_PT_BR_NO_BRANCH;
+		intel_pt_insn->length = insn->length;
+		return;
+	}
+
+	switch (insn->opcode.bytes[0]) {
+	case 0xf:
+		switch (insn->opcode.bytes[1]) {
+		case 0x05: /* syscall */
+		case 0x34: /* sysenter */
+			op = INTEL_PT_OP_SYSCALL;
+			branch = INTEL_PT_BR_INDIRECT;
+			break;
+		case 0x07: /* sysret */
+		case 0x35: /* sysexit */
+			op = INTEL_PT_OP_SYSRET;
+			branch = INTEL_PT_BR_INDIRECT;
+			break;
+		case 0x80 ... 0x8f: /* jcc */
+			op = INTEL_PT_OP_JCC;
+			branch = INTEL_PT_BR_CONDITIONAL;
+			break;
+		default:
+			break;
+		}
+		break;
+	case 0x70 ... 0x7f: /* jcc */
+		op = INTEL_PT_OP_JCC;
+		branch = INTEL_PT_BR_CONDITIONAL;
+		break;
+	case 0xc2: /* near ret */
+	case 0xc3: /* near ret */
+	case 0xca: /* far ret */
+	case 0xcb: /* far ret */
+		op = INTEL_PT_OP_RET;
+		branch = INTEL_PT_BR_INDIRECT;
+		break;
+	case 0xcf: /* iret */
+		op = INTEL_PT_OP_IRET;
+		branch = INTEL_PT_BR_INDIRECT;
+		break;
+	case 0xcc ... 0xce: /* int */
+		op = INTEL_PT_OP_INT;
+		branch = INTEL_PT_BR_INDIRECT;
+		break;
+	case 0xe8: /* call near rel */
+		op = INTEL_PT_OP_CALL;
+		branch = INTEL_PT_BR_UNCONDITIONAL;
+		break;
+	case 0x9a: /* call far absolute */
+		op = INTEL_PT_OP_CALL;
+		branch = INTEL_PT_BR_INDIRECT;
+		break;
+	case 0xe0 ... 0xe2: /* loop */
+		op = INTEL_PT_OP_LOOP;
+		branch = INTEL_PT_BR_CONDITIONAL;
+		break;
+	case 0xe3: /* jcc */
+		op = INTEL_PT_OP_JCC;
+		branch = INTEL_PT_BR_CONDITIONAL;
+		break;
+	case 0xe9: /* jmp */
+	case 0xeb: /* jmp */
+		op = INTEL_PT_OP_JMP;
+		branch = INTEL_PT_BR_UNCONDITIONAL;
+		break;
+	case 0xea: /* far jmp */
+		op = INTEL_PT_OP_JMP;
+		branch = INTEL_PT_BR_INDIRECT;
+		break;
+	case 0xff: /* call near absolute, call far absolute ind */
+		ext = (insn->modrm.bytes[0] >> 3) & 0x7;
+		switch (ext) {
+		case 2: /* near ind call */
+		case 3: /* far ind call */
+			op = INTEL_PT_OP_CALL;
+			branch = INTEL_PT_BR_INDIRECT;
+			break;
+		case 4:
+		case 5:
+			op = INTEL_PT_OP_JMP;
+			branch = INTEL_PT_BR_INDIRECT;
+			break;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	intel_pt_insn->op = op;
+	intel_pt_insn->branch = branch;
+	intel_pt_insn->length = insn->length;
+
+	if (branch == INTEL_PT_BR_CONDITIONAL ||
+	    branch == INTEL_PT_BR_UNCONDITIONAL) {
+#if __BYTE_ORDER == __BIG_ENDIAN
+		switch (insn->immediate.nbytes) {
+		case 1:
+			intel_pt_insn->rel = insn->immediate.value;
+			break;
+		case 2:
+			intel_pt_insn->rel =
+					bswap_16((short)insn->immediate.value);
+			break;
+		case 4:
+			intel_pt_insn->rel = bswap_32(insn->immediate.value);
+			break;
+		default:
+			intel_pt_insn->rel = 0;
+			break;
+		}
+#else
+		intel_pt_insn->rel = insn->immediate.value;
+#endif
+	}
+}
+
+int intel_pt_get_insn(const unsigned char *buf, size_t len, int x86_64,
+		      struct intel_pt_insn *intel_pt_insn)
+{
+	struct insn insn;
+
+	insn_init(&insn, buf, len, x86_64);
+	insn_get_length(&insn);
+	if (!insn_complete(&insn) || insn.length > len)
+		return -1;
+	intel_pt_insn_decoder(&insn, intel_pt_insn);
+	if (insn.length < INTEL_PT_INSN_DBG_BUF_SZ)
+		memcpy(intel_pt_insn->buf, buf, insn.length);
+	else
+		memcpy(intel_pt_insn->buf, buf, INTEL_PT_INSN_DBG_BUF_SZ);
+	return 0;
+}
+
+const char *branch_name[] = {
+	[INTEL_PT_OP_OTHER]	= "Other",
+	[INTEL_PT_OP_CALL]	= "Call",
+	[INTEL_PT_OP_RET]	= "Ret",
+	[INTEL_PT_OP_JCC]	= "Jcc",
+	[INTEL_PT_OP_JMP]	= "Jmp",
+	[INTEL_PT_OP_LOOP]	= "Loop",
+	[INTEL_PT_OP_IRET]	= "IRet",
+	[INTEL_PT_OP_INT]	= "Int",
+	[INTEL_PT_OP_SYSCALL]	= "Syscall",
+	[INTEL_PT_OP_SYSRET]	= "Sysret",
+};
+
+const char *intel_pt_insn_name(enum intel_pt_insn_op op)
+{
+	return branch_name[op];
+}
+
+int intel_pt_insn_desc(const struct intel_pt_insn *intel_pt_insn, char *buf,
+		       size_t buf_len)
+{
+	switch (intel_pt_insn->branch) {
+	case INTEL_PT_BR_CONDITIONAL:
+	case INTEL_PT_BR_UNCONDITIONAL:
+		return snprintf(buf, buf_len, "%s %s%d",
+				intel_pt_insn_name(intel_pt_insn->op),
+				intel_pt_insn->rel > 0 ? "+" : "",
+				intel_pt_insn->rel);
+	case INTEL_PT_BR_NO_BRANCH:
+	case INTEL_PT_BR_INDIRECT:
+		return snprintf(buf, buf_len, "%s",
+				intel_pt_insn_name(intel_pt_insn->op));
+	default:
+		break;
+	}
+	return 0;
+}
+
+size_t intel_pt_insn_max_size(void)
+{
+	return MAX_INSN_SIZE;
+}
+
+int intel_pt_insn_type(enum intel_pt_insn_op op)
+{
+	switch (op) {
+	case INTEL_PT_OP_OTHER:
+		return 0;
+	case INTEL_PT_OP_CALL:
+		return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL;
+	case INTEL_PT_OP_RET:
+		return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN;
+	case INTEL_PT_OP_JCC:
+		return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL;
+	case INTEL_PT_OP_JMP:
+		return PERF_IP_FLAG_BRANCH;
+	case INTEL_PT_OP_LOOP:
+		return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL;
+	case INTEL_PT_OP_IRET:
+		return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN |
+		       PERF_IP_FLAG_INTERRUPT;
+	case INTEL_PT_OP_INT:
+		return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
+		       PERF_IP_FLAG_INTERRUPT;
+	case INTEL_PT_OP_SYSCALL:
+		return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
+		       PERF_IP_FLAG_SYSCALLRET;
+	case INTEL_PT_OP_SYSRET:
+		return PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN |
+		       PERF_IP_FLAG_SYSCALLRET;
+	default:
+		return 0;
+	}
+}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
new file mode 100644
index 0000000..b0adbf3
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h
@@ -0,0 +1,65 @@
+/*
+ * intel_pt_insn_decoder.h: Intel Processor Trace support
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef INCLUDE__INTEL_PT_INSN_DECODER_H__
+#define INCLUDE__INTEL_PT_INSN_DECODER_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define INTEL_PT_INSN_DESC_MAX		32
+#define INTEL_PT_INSN_DBG_BUF_SZ	16
+
+enum intel_pt_insn_op {
+	INTEL_PT_OP_OTHER,
+	INTEL_PT_OP_CALL,
+	INTEL_PT_OP_RET,
+	INTEL_PT_OP_JCC,
+	INTEL_PT_OP_JMP,
+	INTEL_PT_OP_LOOP,
+	INTEL_PT_OP_IRET,
+	INTEL_PT_OP_INT,
+	INTEL_PT_OP_SYSCALL,
+	INTEL_PT_OP_SYSRET,
+};
+
+enum intel_pt_insn_branch {
+	INTEL_PT_BR_NO_BRANCH,
+	INTEL_PT_BR_INDIRECT,
+	INTEL_PT_BR_CONDITIONAL,
+	INTEL_PT_BR_UNCONDITIONAL,
+};
+
+struct intel_pt_insn {
+	enum intel_pt_insn_op		op;
+	enum intel_pt_insn_branch	branch;
+	int				length;
+	int32_t				rel;
+	unsigned char			buf[INTEL_PT_INSN_DBG_BUF_SZ];
+};
+
+int intel_pt_get_insn(const unsigned char *buf, size_t len, int x86_64,
+		      struct intel_pt_insn *intel_pt_insn);
+
+const char *intel_pt_insn_name(enum intel_pt_insn_op op);
+
+int intel_pt_insn_desc(const struct intel_pt_insn *intel_pt_insn, char *buf,
+		       size_t buf_len);
+
+size_t intel_pt_insn_max_size(void);
+
+int intel_pt_insn_type(enum intel_pt_insn_op op);
+
+#endif
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.c b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
new file mode 100644
index 0000000..d09c7d9
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.c
@@ -0,0 +1,155 @@
+/*
+ * intel_pt_log.c: Intel Processor Trace support
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "intel-pt-log.h"
+#include "intel-pt-insn-decoder.h"
+
+#include "intel-pt-pkt-decoder.h"
+
+#define MAX_LOG_NAME 256
+
+static FILE *f;
+static char log_name[MAX_LOG_NAME];
+static bool enable_logging;
+
+void intel_pt_log_enable(void)
+{
+	enable_logging = true;
+}
+
+void intel_pt_log_disable(void)
+{
+	if (f)
+		fflush(f);
+	enable_logging = false;
+}
+
+void intel_pt_log_set_name(const char *name)
+{
+	strncpy(log_name, name, MAX_LOG_NAME - 5);
+	strcat(log_name, ".log");
+}
+
+static void intel_pt_print_data(const unsigned char *buf, int len, uint64_t pos,
+				int indent)
+{
+	int i;
+
+	for (i = 0; i < indent; i++)
+		fprintf(f, " ");
+
+	fprintf(f, "  %08" PRIx64 ": ", pos);
+	for (i = 0; i < len; i++)
+		fprintf(f, " %02x", buf[i]);
+	for (; i < 16; i++)
+		fprintf(f, "   ");
+	fprintf(f, " ");
+}
+
+static void intel_pt_print_no_data(uint64_t pos, int indent)
+{
+	int i;
+
+	for (i = 0; i < indent; i++)
+		fprintf(f, " ");
+
+	fprintf(f, "  %08" PRIx64 ": ", pos);
+	for (i = 0; i < 16; i++)
+		fprintf(f, "   ");
+	fprintf(f, " ");
+}
+
+static int intel_pt_log_open(void)
+{
+	if (!enable_logging)
+		return -1;
+
+	if (f)
+		return 0;
+
+	if (!log_name[0])
+		return -1;
+
+	f = fopen(log_name, "w+");
+	if (!f) {
+		enable_logging = false;
+		return -1;
+	}
+
+	return 0;
+}
+
+void intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len,
+			 uint64_t pos, const unsigned char *buf)
+{
+	char desc[INTEL_PT_PKT_DESC_MAX];
+
+	if (intel_pt_log_open())
+		return;
+
+	intel_pt_print_data(buf, pkt_len, pos, 0);
+	intel_pt_pkt_desc(packet, desc, INTEL_PT_PKT_DESC_MAX);
+	fprintf(f, "%s\n", desc);
+}
+
+void intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip)
+{
+	char desc[INTEL_PT_INSN_DESC_MAX];
+	size_t len = intel_pt_insn->length;
+
+	if (intel_pt_log_open())
+		return;
+
+	if (len > INTEL_PT_INSN_DBG_BUF_SZ)
+		len = INTEL_PT_INSN_DBG_BUF_SZ;
+	intel_pt_print_data(intel_pt_insn->buf, len, ip, 8);
+	if (intel_pt_insn_desc(intel_pt_insn, desc, INTEL_PT_INSN_DESC_MAX) > 0)
+		fprintf(f, "%s\n", desc);
+	else
+		fprintf(f, "Bad instruction!\n");
+}
+
+void intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn, uint64_t ip)
+{
+	char desc[INTEL_PT_INSN_DESC_MAX];
+
+	if (intel_pt_log_open())
+		return;
+
+	intel_pt_print_no_data(ip, 8);
+	if (intel_pt_insn_desc(intel_pt_insn, desc, INTEL_PT_INSN_DESC_MAX) > 0)
+		fprintf(f, "%s\n", desc);
+	else
+		fprintf(f, "Bad instruction!\n");
+}
+
+void intel_pt_log(const char *fmt, ...)
+{
+	va_list args;
+
+	if (intel_pt_log_open())
+		return;
+
+	va_start(args, fmt);
+	vfprintf(f, fmt, args);
+	va_end(args);
+}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.h b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
new file mode 100644
index 0000000..db3942f
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.h
@@ -0,0 +1,52 @@
+/*
+ * intel_pt_log.h: Intel Processor Trace support
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef INCLUDE__INTEL_PT_LOG_H__
+#define INCLUDE__INTEL_PT_LOG_H__
+
+#include <stdint.h>
+#include <inttypes.h>
+
+struct intel_pt_pkt;
+
+void intel_pt_log_enable(void);
+void intel_pt_log_disable(void);
+void intel_pt_log_set_name(const char *name);
+
+void intel_pt_log_packet(const struct intel_pt_pkt *packet, int pkt_len,
+			 uint64_t pos, const unsigned char *buf);
+
+struct intel_pt_insn;
+
+void intel_pt_log_insn(struct intel_pt_insn *intel_pt_insn, uint64_t ip);
+void intel_pt_log_insn_no_data(struct intel_pt_insn *intel_pt_insn,
+			       uint64_t ip);
+
+__attribute__((format(printf, 1, 2)))
+void intel_pt_log(const char *fmt, ...);
+
+#define x64_fmt "0x%" PRIx64
+
+static inline void intel_pt_log_at(const char *msg, uint64_t u)
+{
+	intel_pt_log("%s at " x64_fmt "\n", msg, u);
+}
+
+static inline void intel_pt_log_to(const char *msg, uint64_t u)
+{
+	intel_pt_log("%s to " x64_fmt "\n", msg, u);
+}
+
+#endif
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
new file mode 100644
index 0000000..b1257c8
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -0,0 +1,518 @@
+/*
+ * intel_pt_pkt_decoder.c: Intel Processor Trace support
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <endian.h>
+#include <byteswap.h>
+
+#include "intel-pt-pkt-decoder.h"
+
+#define BIT(n)		(1 << (n))
+
+#define BIT63		((uint64_t)1 << 63)
+
+#define NR_FLAG		BIT63
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define le16_to_cpu bswap_16
+#define le32_to_cpu bswap_32
+#define le64_to_cpu bswap_64
+#define memcpy_le64(d, s, n) do { \
+	memcpy((d), (s), (n));    \
+	*(d) = le64_to_cpu(*(d)); \
+} while (0)
+#else
+#define le16_to_cpu
+#define le32_to_cpu
+#define le64_to_cpu
+#define memcpy_le64 memcpy
+#endif
+
+static const char * const packet_name[] = {
+	[INTEL_PT_BAD]		= "Bad Packet!",
+	[INTEL_PT_PAD]		= "PAD",
+	[INTEL_PT_TNT]		= "TNT",
+	[INTEL_PT_TIP_PGD]	= "TIP.PGD",
+	[INTEL_PT_TIP_PGE]	= "TIP.PGE",
+	[INTEL_PT_TSC]		= "TSC",
+	[INTEL_PT_TMA]		= "TMA",
+	[INTEL_PT_MODE_EXEC]	= "MODE.Exec",
+	[INTEL_PT_MODE_TSX]	= "MODE.TSX",
+	[INTEL_PT_MTC]		= "MTC",
+	[INTEL_PT_TIP]		= "TIP",
+	[INTEL_PT_FUP]		= "FUP",
+	[INTEL_PT_CYC]		= "CYC",
+	[INTEL_PT_VMCS]		= "VMCS",
+	[INTEL_PT_PSB]		= "PSB",
+	[INTEL_PT_PSBEND]	= "PSBEND",
+	[INTEL_PT_CBR]		= "CBR",
+	[INTEL_PT_TRACESTOP]	= "TraceSTOP",
+	[INTEL_PT_PIP]		= "PIP",
+	[INTEL_PT_OVF]		= "OVF",
+	[INTEL_PT_MNT]		= "MNT",
+};
+
+const char *intel_pt_pkt_name(enum intel_pt_pkt_type type)
+{
+	return packet_name[type];
+}
+
+static int intel_pt_get_long_tnt(const unsigned char *buf, size_t len,
+				 struct intel_pt_pkt *packet)
+{
+	uint64_t payload;
+	int count;
+
+	if (len < 8)
+		return INTEL_PT_NEED_MORE_BYTES;
+
+	payload = le64_to_cpu(*(uint64_t *)buf);
+
+	for (count = 47; count; count--) {
+		if (payload & BIT63)
+			break;
+		payload <<= 1;
+	}
+
+	packet->type = INTEL_PT_TNT;
+	packet->count = count;
+	packet->payload = payload << 1;
+	return 8;
+}
+
+static int intel_pt_get_pip(const unsigned char *buf, size_t len,
+			    struct intel_pt_pkt *packet)
+{
+	uint64_t payload = 0;
+
+	if (len < 8)
+		return INTEL_PT_NEED_MORE_BYTES;
+
+	packet->type = INTEL_PT_PIP;
+	memcpy_le64(&payload, buf + 2, 6);
+	packet->payload = payload >> 1;
+	if (payload & 1)
+		packet->payload |= NR_FLAG;
+
+	return 8;
+}
+
+static int intel_pt_get_tracestop(struct intel_pt_pkt *packet)
+{
+	packet->type = INTEL_PT_TRACESTOP;
+	return 2;
+}
+
+static int intel_pt_get_cbr(const unsigned char *buf, size_t len,
+			    struct intel_pt_pkt *packet)
+{
+	if (len < 4)
+		return INTEL_PT_NEED_MORE_BYTES;
+	packet->type = INTEL_PT_CBR;
+	packet->payload = buf[2];
+	return 4;
+}
+
+static int intel_pt_get_vmcs(const unsigned char *buf, size_t len,
+			     struct intel_pt_pkt *packet)
+{
+	unsigned int count = (52 - 5) >> 3;
+
+	if (count < 1 || count > 7)
+		return INTEL_PT_BAD_PACKET;
+
+	if (len < count + 2)
+		return INTEL_PT_NEED_MORE_BYTES;
+
+	packet->type = INTEL_PT_VMCS;
+	packet->count = count;
+	memcpy_le64(&packet->payload, buf + 2, count);
+
+	return count + 2;
+}
+
+static int intel_pt_get_ovf(struct intel_pt_pkt *packet)
+{
+	packet->type = INTEL_PT_OVF;
+	return 2;
+}
+
+static int intel_pt_get_psb(const unsigned char *buf, size_t len,
+			    struct intel_pt_pkt *packet)
+{
+	int i;
+
+	if (len < 16)
+		return INTEL_PT_NEED_MORE_BYTES;
+
+	for (i = 2; i < 16; i += 2) {
+		if (buf[i] != 2 || buf[i + 1] != 0x82)
+			return INTEL_PT_BAD_PACKET;
+	}
+
+	packet->type = INTEL_PT_PSB;
+	return 16;
+}
+
+static int intel_pt_get_psbend(struct intel_pt_pkt *packet)
+{
+	packet->type = INTEL_PT_PSBEND;
+	return 2;
+}
+
+static int intel_pt_get_tma(const unsigned char *buf, size_t len,
+			    struct intel_pt_pkt *packet)
+{
+	if (len < 7)
+		return INTEL_PT_NEED_MORE_BYTES;
+
+	packet->type = INTEL_PT_TMA;
+	packet->payload = buf[2] | (buf[3] << 8);
+	packet->count = buf[5] | ((buf[6] & BIT(0)) << 8);
+	return 7;
+}
+
+static int intel_pt_get_pad(struct intel_pt_pkt *packet)
+{
+	packet->type = INTEL_PT_PAD;
+	return 1;
+}
+
+static int intel_pt_get_mnt(const unsigned char *buf, size_t len,
+			    struct intel_pt_pkt *packet)
+{
+	if (len < 11)
+		return INTEL_PT_NEED_MORE_BYTES;
+	packet->type = INTEL_PT_MNT;
+	memcpy_le64(&packet->payload, buf + 3, 8);
+	return 11
+;
+}
+
+static int intel_pt_get_3byte(const unsigned char *buf, size_t len,
+			      struct intel_pt_pkt *packet)
+{
+	if (len < 3)
+		return INTEL_PT_NEED_MORE_BYTES;
+
+	switch (buf[2]) {
+	case 0x88: /* MNT */
+		return intel_pt_get_mnt(buf, len, packet);
+	default:
+		return INTEL_PT_BAD_PACKET;
+	}
+}
+
+static int intel_pt_get_ext(const unsigned char *buf, size_t len,
+			    struct intel_pt_pkt *packet)
+{
+	if (len < 2)
+		return INTEL_PT_NEED_MORE_BYTES;
+
+	switch (buf[1]) {
+	case 0xa3: /* Long TNT */
+		return intel_pt_get_long_tnt(buf, len, packet);
+	case 0x43: /* PIP */
+		return intel_pt_get_pip(buf, len, packet);
+	case 0x83: /* TraceStop */
+		return intel_pt_get_tracestop(packet);
+	case 0x03: /* CBR */
+		return intel_pt_get_cbr(buf, len, packet);
+	case 0xc8: /* VMCS */
+		return intel_pt_get_vmcs(buf, len, packet);
+	case 0xf3: /* OVF */
+		return intel_pt_get_ovf(packet);
+	case 0x82: /* PSB */
+		return intel_pt_get_psb(buf, len, packet);
+	case 0x23: /* PSBEND */
+		return intel_pt_get_psbend(packet);
+	case 0x73: /* TMA */
+		return intel_pt_get_tma(buf, len, packet);
+	case 0xC3: /* 3-byte header */
+		return intel_pt_get_3byte(buf, len, packet);
+	default:
+		return INTEL_PT_BAD_PACKET;
+	}
+}
+
+static int intel_pt_get_short_tnt(unsigned int byte,
+				  struct intel_pt_pkt *packet)
+{
+	int count;
+
+	for (count = 6; count; count--) {
+		if (byte & BIT(7))
+			break;
+		byte <<= 1;
+	}
+
+	packet->type = INTEL_PT_TNT;
+	packet->count = count;
+	packet->payload = (uint64_t)byte << 57;
+
+	return 1;
+}
+
+static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf,
+			    size_t len, struct intel_pt_pkt *packet)
+{
+	unsigned int offs = 1, shift;
+	uint64_t payload = byte >> 3;
+
+	byte >>= 2;
+	len -= 1;
+	for (shift = 5; byte & 1; shift += 7) {
+		if (offs > 9)
+			return INTEL_PT_BAD_PACKET;
+		if (len < offs)
+			return INTEL_PT_NEED_MORE_BYTES;
+		byte = buf[offs++];
+		payload |= (byte >> 1) << shift;
+	}
+
+	packet->type = INTEL_PT_CYC;
+	packet->payload = payload;
+	return offs;
+}
+
+static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte,
+			   const unsigned char *buf, size_t len,
+			   struct intel_pt_pkt *packet)
+{
+	switch (byte >> 5) {
+	case 0:
+		packet->count = 0;
+		break;
+	case 1:
+		if (len < 3)
+			return INTEL_PT_NEED_MORE_BYTES;
+		packet->count = 2;
+		packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1));
+		break;
+	case 2:
+		if (len < 5)
+			return INTEL_PT_NEED_MORE_BYTES;
+		packet->count = 4;
+		packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1));
+		break;
+	case 3:
+	case 6:
+		if (len < 7)
+			return INTEL_PT_NEED_MORE_BYTES;
+		packet->count = 6;
+		memcpy_le64(&packet->payload, buf + 1, 6);
+		break;
+	default:
+		return INTEL_PT_BAD_PACKET;
+	}
+
+	packet->type = type;
+
+	return packet->count + 1;
+}
+
+static int intel_pt_get_mode(const unsigned char *buf, size_t len,
+			     struct intel_pt_pkt *packet)
+{
+	if (len < 2)
+		return INTEL_PT_NEED_MORE_BYTES;
+
+	switch (buf[1] >> 5) {
+	case 0:
+		packet->type = INTEL_PT_MODE_EXEC;
+		switch (buf[1] & 3) {
+		case 0:
+			packet->payload = 16;
+			break;
+		case 1:
+			packet->payload = 64;
+			break;
+		case 2:
+			packet->payload = 32;
+			break;
+		default:
+			return INTEL_PT_BAD_PACKET;
+		}
+		break;
+	case 1:
+		packet->type = INTEL_PT_MODE_TSX;
+		if ((buf[1] & 3) == 3)
+			return INTEL_PT_BAD_PACKET;
+		packet->payload = buf[1] & 3;
+		break;
+	default:
+		return INTEL_PT_BAD_PACKET;
+	}
+
+	return 2;
+}
+
+static int intel_pt_get_tsc(const unsigned char *buf, size_t len,
+			    struct intel_pt_pkt *packet)
+{
+	if (len < 8)
+		return INTEL_PT_NEED_MORE_BYTES;
+	packet->type = INTEL_PT_TSC;
+	memcpy_le64(&packet->payload, buf + 1, 7);
+	return 8;
+}
+
+static int intel_pt_get_mtc(const unsigned char *buf, size_t len,
+			    struct intel_pt_pkt *packet)
+{
+	if (len < 2)
+		return INTEL_PT_NEED_MORE_BYTES;
+	packet->type = INTEL_PT_MTC;
+	packet->payload = buf[1];
+	return 2;
+}
+
+static int intel_pt_do_get_packet(const unsigned char *buf, size_t len,
+				  struct intel_pt_pkt *packet)
+{
+	unsigned int byte;
+
+	memset(packet, 0, sizeof(struct intel_pt_pkt));
+
+	if (!len)
+		return INTEL_PT_NEED_MORE_BYTES;
+
+	byte = buf[0];
+	if (!(byte & BIT(0))) {
+		if (byte == 0)
+			return intel_pt_get_pad(packet);
+		if (byte == 2)
+			return intel_pt_get_ext(buf, len, packet);
+		return intel_pt_get_short_tnt(byte, packet);
+	}
+
+	if ((byte & 2))
+		return intel_pt_get_cyc(byte, buf, len, packet);
+
+	switch (byte & 0x1f) {
+	case 0x0D:
+		return intel_pt_get_ip(INTEL_PT_TIP, byte, buf, len, packet);
+	case 0x11:
+		return intel_pt_get_ip(INTEL_PT_TIP_PGE, byte, buf, len,
+				       packet);
+	case 0x01:
+		return intel_pt_get_ip(INTEL_PT_TIP_PGD, byte, buf, len,
+				       packet);
+	case 0x1D:
+		return intel_pt_get_ip(INTEL_PT_FUP, byte, buf, len, packet);
+	case 0x19:
+		switch (byte) {
+		case 0x99:
+			return intel_pt_get_mode(buf, len, packet);
+		case 0x19:
+			return intel_pt_get_tsc(buf, len, packet);
+		case 0x59:
+			return intel_pt_get_mtc(buf, len, packet);
+		default:
+			return INTEL_PT_BAD_PACKET;
+		}
+	default:
+		return INTEL_PT_BAD_PACKET;
+	}
+}
+
+int intel_pt_get_packet(const unsigned char *buf, size_t len,
+			struct intel_pt_pkt *packet)
+{
+	int ret;
+
+	ret = intel_pt_do_get_packet(buf, len, packet);
+	if (ret > 0) {
+		while (ret < 8 && len > (size_t)ret && !buf[ret])
+			ret += 1;
+	}
+	return ret;
+}
+
+int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf,
+		      size_t buf_len)
+{
+	int ret, i, nr;
+	unsigned long long payload = packet->payload;
+	const char *name = intel_pt_pkt_name(packet->type);
+
+	switch (packet->type) {
+	case INTEL_PT_BAD:
+	case INTEL_PT_PAD:
+	case INTEL_PT_PSB:
+	case INTEL_PT_PSBEND:
+	case INTEL_PT_TRACESTOP:
+	case INTEL_PT_OVF:
+		return snprintf(buf, buf_len, "%s", name);
+	case INTEL_PT_TNT: {
+		size_t blen = buf_len;
+
+		ret = snprintf(buf, blen, "%s ", name);
+		if (ret < 0)
+			return ret;
+		buf += ret;
+		blen -= ret;
+		for (i = 0; i < packet->count; i++) {
+			if (payload & BIT63)
+				ret = snprintf(buf, blen, "T");
+			else
+				ret = snprintf(buf, blen, "N");
+			if (ret < 0)
+				return ret;
+			buf += ret;
+			blen -= ret;
+			payload <<= 1;
+		}
+		ret = snprintf(buf, blen, " (%d)", packet->count);
+		if (ret < 0)
+			return ret;
+		blen -= ret;
+		return buf_len - blen;
+	}
+	case INTEL_PT_TIP_PGD:
+	case INTEL_PT_TIP_PGE:
+	case INTEL_PT_TIP:
+	case INTEL_PT_FUP:
+		if (!(packet->count))
+			return snprintf(buf, buf_len, "%s no ip", name);
+	case INTEL_PT_CYC:
+	case INTEL_PT_VMCS:
+	case INTEL_PT_MTC:
+	case INTEL_PT_MNT:
+	case INTEL_PT_CBR:
+	case INTEL_PT_TSC:
+		return snprintf(buf, buf_len, "%s 0x%llx", name, payload);
+	case INTEL_PT_TMA:
+		return snprintf(buf, buf_len, "%s CTC 0x%x FC 0x%x", name,
+				(unsigned)payload, packet->count);
+	case INTEL_PT_MODE_EXEC:
+		return snprintf(buf, buf_len, "%s %lld", name, payload);
+	case INTEL_PT_MODE_TSX:
+		return snprintf(buf, buf_len, "%s TXAbort:%u InTX:%u",
+				name, (unsigned)(payload >> 1) & 1,
+				(unsigned)payload & 1);
+	case INTEL_PT_PIP:
+		nr = packet->payload & NR_FLAG ? 1 : 0;
+		payload &= ~NR_FLAG;
+		ret = snprintf(buf, buf_len, "%s 0x%llx (NR=%d)",
+			       name, payload, nr);
+		return ret;
+	default:
+		break;
+	}
+	return snprintf(buf, buf_len, "%s 0x%llx (%d)",
+			name, payload, packet->count);
+}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h
new file mode 100644
index 0000000..781bb79
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h
@@ -0,0 +1,70 @@
+/*
+ * intel_pt_pkt_decoder.h: Intel Processor Trace support
+ * Copyright (c) 2013-2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef INCLUDE__INTEL_PT_PKT_DECODER_H__
+#define INCLUDE__INTEL_PT_PKT_DECODER_H__
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define INTEL_PT_PKT_DESC_MAX	256
+
+#define INTEL_PT_NEED_MORE_BYTES	-1
+#define INTEL_PT_BAD_PACKET		-2
+
+#define INTEL_PT_PSB_STR		"\002\202\002\202\002\202\002\202" \
+					"\002\202\002\202\002\202\002\202"
+#define INTEL_PT_PSB_LEN		16
+
+#define INTEL_PT_PKT_MAX_SZ		16
+
+enum intel_pt_pkt_type {
+	INTEL_PT_BAD,
+	INTEL_PT_PAD,
+	INTEL_PT_TNT,
+	INTEL_PT_TIP_PGD,
+	INTEL_PT_TIP_PGE,
+	INTEL_PT_TSC,
+	INTEL_PT_TMA,
+	INTEL_PT_MODE_EXEC,
+	INTEL_PT_MODE_TSX,
+	INTEL_PT_MTC,
+	INTEL_PT_TIP,
+	INTEL_PT_FUP,
+	INTEL_PT_CYC,
+	INTEL_PT_VMCS,
+	INTEL_PT_PSB,
+	INTEL_PT_PSBEND,
+	INTEL_PT_CBR,
+	INTEL_PT_TRACESTOP,
+	INTEL_PT_PIP,
+	INTEL_PT_OVF,
+	INTEL_PT_MNT,
+};
+
+struct intel_pt_pkt {
+	enum intel_pt_pkt_type	type;
+	int			count;
+	uint64_t		payload;
+};
+
+const char *intel_pt_pkt_name(enum intel_pt_pkt_type);
+
+int intel_pt_get_packet(const unsigned char *buf, size_t len,
+			struct intel_pt_pkt *packet);
+
+int intel_pt_pkt_desc(const struct intel_pt_pkt *packet, char *buf, size_t len);
+
+#endif
diff --git a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
new file mode 100644
index 0000000..816488c
--- /dev/null
+++ b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt
@@ -0,0 +1,970 @@
+# x86 Opcode Maps
+#
+# This is (mostly) based on following documentations.
+# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2C
+#   (#326018-047US, June 2013)
+#
+#<Opcode maps>
+# Table: table-name
+# Referrer: escaped-name
+# AVXcode: avx-code
+# opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
+# (or)
+# opcode: escape # escaped-name
+# EndTable
+#
+#<group maps>
+# GrpTable: GrpXXX
+# reg:  mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
+# EndTable
+#
+# AVX Superscripts
+#  (v): this opcode requires VEX prefix.
+#  (v1): this opcode only supports 128bit VEX.
+#
+# Last Prefix Superscripts
+#  - (66): the last prefix is 0x66
+#  - (F3): the last prefix is 0xF3
+#  - (F2): the last prefix is 0xF2
+#  - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
+#  - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
+
+Table: one byte opcode
+Referrer:
+AVXcode:
+# 0x00 - 0x0f
+00: ADD Eb,Gb
+01: ADD Ev,Gv
+02: ADD Gb,Eb
+03: ADD Gv,Ev
+04: ADD AL,Ib
+05: ADD rAX,Iz
+06: PUSH ES (i64)
+07: POP ES (i64)
+08: OR Eb,Gb
+09: OR Ev,Gv
+0a: OR Gb,Eb
+0b: OR Gv,Ev
+0c: OR AL,Ib
+0d: OR rAX,Iz
+0e: PUSH CS (i64)
+0f: escape # 2-byte escape
+# 0x10 - 0x1f
+10: ADC Eb,Gb
+11: ADC Ev,Gv
+12: ADC Gb,Eb
+13: ADC Gv,Ev
+14: ADC AL,Ib
+15: ADC rAX,Iz
+16: PUSH SS (i64)
+17: POP SS (i64)
+18: SBB Eb,Gb
+19: SBB Ev,Gv
+1a: SBB Gb,Eb
+1b: SBB Gv,Ev
+1c: SBB AL,Ib
+1d: SBB rAX,Iz
+1e: PUSH DS (i64)
+1f: POP DS (i64)
+# 0x20 - 0x2f
+20: AND Eb,Gb
+21: AND Ev,Gv
+22: AND Gb,Eb
+23: AND Gv,Ev
+24: AND AL,Ib
+25: AND rAx,Iz
+26: SEG=ES (Prefix)
+27: DAA (i64)
+28: SUB Eb,Gb
+29: SUB Ev,Gv
+2a: SUB Gb,Eb
+2b: SUB Gv,Ev
+2c: SUB AL,Ib
+2d: SUB rAX,Iz
+2e: SEG=CS (Prefix)
+2f: DAS (i64)
+# 0x30 - 0x3f
+30: XOR Eb,Gb
+31: XOR Ev,Gv
+32: XOR Gb,Eb
+33: XOR Gv,Ev
+34: XOR AL,Ib
+35: XOR rAX,Iz
+36: SEG=SS (Prefix)
+37: AAA (i64)
+38: CMP Eb,Gb
+39: CMP Ev,Gv
+3a: CMP Gb,Eb
+3b: CMP Gv,Ev
+3c: CMP AL,Ib
+3d: CMP rAX,Iz
+3e: SEG=DS (Prefix)
+3f: AAS (i64)
+# 0x40 - 0x4f
+40: INC eAX (i64) | REX (o64)
+41: INC eCX (i64) | REX.B (o64)
+42: INC eDX (i64) | REX.X (o64)
+43: INC eBX (i64) | REX.XB (o64)
+44: INC eSP (i64) | REX.R (o64)
+45: INC eBP (i64) | REX.RB (o64)
+46: INC eSI (i64) | REX.RX (o64)
+47: INC eDI (i64) | REX.RXB (o64)
+48: DEC eAX (i64) | REX.W (o64)
+49: DEC eCX (i64) | REX.WB (o64)
+4a: DEC eDX (i64) | REX.WX (o64)
+4b: DEC eBX (i64) | REX.WXB (o64)
+4c: DEC eSP (i64) | REX.WR (o64)
+4d: DEC eBP (i64) | REX.WRB (o64)
+4e: DEC eSI (i64) | REX.WRX (o64)
+4f: DEC eDI (i64) | REX.WRXB (o64)
+# 0x50 - 0x5f
+50: PUSH rAX/r8 (d64)
+51: PUSH rCX/r9 (d64)
+52: PUSH rDX/r10 (d64)
+53: PUSH rBX/r11 (d64)
+54: PUSH rSP/r12 (d64)
+55: PUSH rBP/r13 (d64)
+56: PUSH rSI/r14 (d64)
+57: PUSH rDI/r15 (d64)
+58: POP rAX/r8 (d64)
+59: POP rCX/r9 (d64)
+5a: POP rDX/r10 (d64)
+5b: POP rBX/r11 (d64)
+5c: POP rSP/r12 (d64)
+5d: POP rBP/r13 (d64)
+5e: POP rSI/r14 (d64)
+5f: POP rDI/r15 (d64)
+# 0x60 - 0x6f
+60: PUSHA/PUSHAD (i64)
+61: POPA/POPAD (i64)
+62: BOUND Gv,Ma (i64)
+63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
+64: SEG=FS (Prefix)
+65: SEG=GS (Prefix)
+66: Operand-Size (Prefix)
+67: Address-Size (Prefix)
+68: PUSH Iz (d64)
+69: IMUL Gv,Ev,Iz
+6a: PUSH Ib (d64)
+6b: IMUL Gv,Ev,Ib
+6c: INS/INSB Yb,DX
+6d: INS/INSW/INSD Yz,DX
+6e: OUTS/OUTSB DX,Xb
+6f: OUTS/OUTSW/OUTSD DX,Xz
+# 0x70 - 0x7f
+70: JO Jb
+71: JNO Jb
+72: JB/JNAE/JC Jb
+73: JNB/JAE/JNC Jb
+74: JZ/JE Jb
+75: JNZ/JNE Jb
+76: JBE/JNA Jb
+77: JNBE/JA Jb
+78: JS Jb
+79: JNS Jb
+7a: JP/JPE Jb
+7b: JNP/JPO Jb
+7c: JL/JNGE Jb
+7d: JNL/JGE Jb
+7e: JLE/JNG Jb
+7f: JNLE/JG Jb
+# 0x80 - 0x8f
+80: Grp1 Eb,Ib (1A)
+81: Grp1 Ev,Iz (1A)
+82: Grp1 Eb,Ib (1A),(i64)
+83: Grp1 Ev,Ib (1A)
+84: TEST Eb,Gb
+85: TEST Ev,Gv
+86: XCHG Eb,Gb
+87: XCHG Ev,Gv
+88: MOV Eb,Gb
+89: MOV Ev,Gv
+8a: MOV Gb,Eb
+8b: MOV Gv,Ev
+8c: MOV Ev,Sw
+8d: LEA Gv,M
+8e: MOV Sw,Ew
+8f: Grp1A (1A) | POP Ev (d64)
+# 0x90 - 0x9f
+90: NOP | PAUSE (F3) | XCHG r8,rAX
+91: XCHG rCX/r9,rAX
+92: XCHG rDX/r10,rAX
+93: XCHG rBX/r11,rAX
+94: XCHG rSP/r12,rAX
+95: XCHG rBP/r13,rAX
+96: XCHG rSI/r14,rAX
+97: XCHG rDI/r15,rAX
+98: CBW/CWDE/CDQE
+99: CWD/CDQ/CQO
+9a: CALLF Ap (i64)
+9b: FWAIT/WAIT
+9c: PUSHF/D/Q Fv (d64)
+9d: POPF/D/Q Fv (d64)
+9e: SAHF
+9f: LAHF
+# 0xa0 - 0xaf
+a0: MOV AL,Ob
+a1: MOV rAX,Ov
+a2: MOV Ob,AL
+a3: MOV Ov,rAX
+a4: MOVS/B Yb,Xb
+a5: MOVS/W/D/Q Yv,Xv
+a6: CMPS/B Xb,Yb
+a7: CMPS/W/D Xv,Yv
+a8: TEST AL,Ib
+a9: TEST rAX,Iz
+aa: STOS/B Yb,AL
+ab: STOS/W/D/Q Yv,rAX
+ac: LODS/B AL,Xb
+ad: LODS/W/D/Q rAX,Xv
+ae: SCAS/B AL,Yb
+# Note: The May 2011 Intel manual shows Xv for the second parameter of the
+# next instruction but Yv is correct
+af: SCAS/W/D/Q rAX,Yv
+# 0xb0 - 0xbf
+b0: MOV AL/R8L,Ib
+b1: MOV CL/R9L,Ib
+b2: MOV DL/R10L,Ib
+b3: MOV BL/R11L,Ib
+b4: MOV AH/R12L,Ib
+b5: MOV CH/R13L,Ib
+b6: MOV DH/R14L,Ib
+b7: MOV BH/R15L,Ib
+b8: MOV rAX/r8,Iv
+b9: MOV rCX/r9,Iv
+ba: MOV rDX/r10,Iv
+bb: MOV rBX/r11,Iv
+bc: MOV rSP/r12,Iv
+bd: MOV rBP/r13,Iv
+be: MOV rSI/r14,Iv
+bf: MOV rDI/r15,Iv
+# 0xc0 - 0xcf
+c0: Grp2 Eb,Ib (1A)
+c1: Grp2 Ev,Ib (1A)
+c2: RETN Iw (f64)
+c3: RETN
+c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
+c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
+c6: Grp11A Eb,Ib (1A)
+c7: Grp11B Ev,Iz (1A)
+c8: ENTER Iw,Ib
+c9: LEAVE (d64)
+ca: RETF Iw
+cb: RETF
+cc: INT3
+cd: INT Ib
+ce: INTO (i64)
+cf: IRET/D/Q
+# 0xd0 - 0xdf
+d0: Grp2 Eb,1 (1A)
+d1: Grp2 Ev,1 (1A)
+d2: Grp2 Eb,CL (1A)
+d3: Grp2 Ev,CL (1A)
+d4: AAM Ib (i64)
+d5: AAD Ib (i64)
+d6:
+d7: XLAT/XLATB
+d8: ESC
+d9: ESC
+da: ESC
+db: ESC
+dc: ESC
+dd: ESC
+de: ESC
+df: ESC
+# 0xe0 - 0xef
+# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
+# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
+# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
+e0: LOOPNE/LOOPNZ Jb (f64)
+e1: LOOPE/LOOPZ Jb (f64)
+e2: LOOP Jb (f64)
+e3: JrCXZ Jb (f64)
+e4: IN AL,Ib
+e5: IN eAX,Ib
+e6: OUT Ib,AL
+e7: OUT Ib,eAX
+# With 0x66 prefix in 64-bit mode, for AMD CPUs immediate offset
+# in "near" jumps and calls is 16-bit. For CALL,
+# push of return address is 16-bit wide, RSP is decremented by 2
+# but is not truncated to 16 bits, unlike RIP.
+e8: CALL Jz (f64)
+e9: JMP-near Jz (f64)
+ea: JMP-far Ap (i64)
+eb: JMP-short Jb (f64)
+ec: IN AL,DX
+ed: IN eAX,DX
+ee: OUT DX,AL
+ef: OUT DX,eAX
+# 0xf0 - 0xff
+f0: LOCK (Prefix)
+f1:
+f2: REPNE (Prefix) | XACQUIRE (Prefix)
+f3: REP/REPE (Prefix) | XRELEASE (Prefix)
+f4: HLT
+f5: CMC
+f6: Grp3_1 Eb (1A)
+f7: Grp3_2 Ev (1A)
+f8: CLC
+f9: STC
+fa: CLI
+fb: STI
+fc: CLD
+fd: STD
+fe: Grp4 (1A)
+ff: Grp5 (1A)
+EndTable
+
+Table: 2-byte opcode (0x0f)
+Referrer: 2-byte escape
+AVXcode: 1
+# 0x0f 0x00-0x0f
+00: Grp6 (1A)
+01: Grp7 (1A)
+02: LAR Gv,Ew
+03: LSL Gv,Ew
+04:
+05: SYSCALL (o64)
+06: CLTS
+07: SYSRET (o64)
+08: INVD
+09: WBINVD
+0a:
+0b: UD2 (1B)
+0c:
+# AMD's prefetch group. Intel supports prefetchw(/1) only.
+0d: GrpP
+0e: FEMMS
+# 3DNow! uses the last imm byte as opcode extension.
+0f: 3DNow! Pq,Qq,Ib
+# 0x0f 0x10-0x1f
+# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands
+# but it actually has operands. And also, vmovss and vmovsd only accept 128bit.
+# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form.
+# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming
+# Reference A.1
+10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1)
+11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1)
+12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2)
+13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1)
+14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66)
+15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66)
+16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3)
+17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
+18: Grp16 (1A)
+19:
+1a: BNDCL Ev,Gv | BNDCU Ev,Gv | BNDMOV Gv,Ev | BNDLDX Gv,Ev,Gv
+1b: BNDCN Ev,Gv | BNDMOV Ev,Gv | BNDMK Gv,Ev | BNDSTX Ev,GV,Gv
+1c:
+1d:
+1e:
+1f: NOP Ev
+# 0x0f 0x20-0x2f
+20: MOV Rd,Cd
+21: MOV Rd,Dd
+22: MOV Cd,Rd
+23: MOV Dd,Rd
+24:
+25:
+26:
+27:
+28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66)
+29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66)
+2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1)
+2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66)
+2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1)
+2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1)
+2e: vucomiss Vss,Wss (v1) | vucomisd  Vsd,Wsd (66),(v1)
+2f: vcomiss Vss,Wss (v1) | vcomisd  Vsd,Wsd (66),(v1)
+# 0x0f 0x30-0x3f
+30: WRMSR
+31: RDTSC
+32: RDMSR
+33: RDPMC
+34: SYSENTER
+35: SYSEXIT
+36:
+37: GETSEC
+38: escape # 3-byte escape 1
+39:
+3a: escape # 3-byte escape 2
+3b:
+3c:
+3d:
+3e:
+3f:
+# 0x0f 0x40-0x4f
+40: CMOVO Gv,Ev
+41: CMOVNO Gv,Ev
+42: CMOVB/C/NAE Gv,Ev
+43: CMOVAE/NB/NC Gv,Ev
+44: CMOVE/Z Gv,Ev
+45: CMOVNE/NZ Gv,Ev
+46: CMOVBE/NA Gv,Ev
+47: CMOVA/NBE Gv,Ev
+48: CMOVS Gv,Ev
+49: CMOVNS Gv,Ev
+4a: CMOVP/PE Gv,Ev
+4b: CMOVNP/PO Gv,Ev
+4c: CMOVL/NGE Gv,Ev
+4d: CMOVNL/GE Gv,Ev
+4e: CMOVLE/NG Gv,Ev
+4f: CMOVNLE/G Gv,Ev
+# 0x0f 0x50-0x5f
+50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66)
+51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1)
+52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1)
+53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1)
+54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66)
+55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66)
+56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66)
+57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66)
+58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1)
+59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1)
+5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1)
+5b: vcvtdq2ps Vps,Wdq | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
+5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1)
+5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1)
+5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1)
+5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1)
+# 0x0f 0x60-0x6f
+60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1)
+61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1)
+62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1)
+63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1)
+64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1)
+65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1)
+66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1)
+67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1)
+68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1)
+69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1)
+6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1)
+6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1)
+6c: vpunpcklqdq Vx,Hx,Wx (66),(v1)
+6d: vpunpckhqdq Vx,Hx,Wx (66),(v1)
+6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1)
+6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqu Vx,Wx (F3)
+# 0x0f 0x70-0x7f
+70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1)
+71: Grp12 (1A)
+72: Grp13 (1A)
+73: Grp14 (1A)
+74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1)
+75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1)
+76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1)
+# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX.
+77: emms | vzeroupper | vzeroall
+78: VMREAD Ey,Gy
+79: VMWRITE Gy,Ey
+7a:
+7b:
+7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2)
+7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2)
+7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
+7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqu Wx,Vx (F3)
+# 0x0f 0x80-0x8f
+# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
+80: JO Jz (f64)
+81: JNO Jz (f64)
+82: JB/JC/JNAE Jz (f64)
+83: JAE/JNB/JNC Jz (f64)
+84: JE/JZ Jz (f64)
+85: JNE/JNZ Jz (f64)
+86: JBE/JNA Jz (f64)
+87: JA/JNBE Jz (f64)
+88: JS Jz (f64)
+89: JNS Jz (f64)
+8a: JP/JPE Jz (f64)
+8b: JNP/JPO Jz (f64)
+8c: JL/JNGE Jz (f64)
+8d: JNL/JGE Jz (f64)
+8e: JLE/JNG Jz (f64)
+8f: JNLE/JG Jz (f64)
+# 0x0f 0x90-0x9f
+90: SETO Eb
+91: SETNO Eb
+92: SETB/C/NAE Eb
+93: SETAE/NB/NC Eb
+94: SETE/Z Eb
+95: SETNE/NZ Eb
+96: SETBE/NA Eb
+97: SETA/NBE Eb
+98: SETS Eb
+99: SETNS Eb
+9a: SETP/PE Eb
+9b: SETNP/PO Eb
+9c: SETL/NGE Eb
+9d: SETNL/GE Eb
+9e: SETLE/NG Eb
+9f: SETNLE/G Eb
+# 0x0f 0xa0-0xaf
+a0: PUSH FS (d64)
+a1: POP FS (d64)
+a2: CPUID
+a3: BT Ev,Gv
+a4: SHLD Ev,Gv,Ib
+a5: SHLD Ev,Gv,CL
+a6: GrpPDLK
+a7: GrpRNG
+a8: PUSH GS (d64)
+a9: POP GS (d64)
+aa: RSM
+ab: BTS Ev,Gv
+ac: SHRD Ev,Gv,Ib
+ad: SHRD Ev,Gv,CL
+ae: Grp15 (1A),(1C)
+af: IMUL Gv,Ev
+# 0x0f 0xb0-0xbf
+b0: CMPXCHG Eb,Gb
+b1: CMPXCHG Ev,Gv
+b2: LSS Gv,Mp
+b3: BTR Ev,Gv
+b4: LFS Gv,Mp
+b5: LGS Gv,Mp
+b6: MOVZX Gv,Eb
+b7: MOVZX Gv,Ew
+b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
+b9: Grp10 (1A)
+ba: Grp8 Ev,Ib (1A)
+bb: BTC Ev,Gv
+bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
+bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
+be: MOVSX Gv,Eb
+bf: MOVSX Gv,Ew
+# 0x0f 0xc0-0xcf
+c0: XADD Eb,Gb
+c1: XADD Ev,Gv
+c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1)
+c3: movnti My,Gy
+c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1)
+c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1)
+c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66)
+c7: Grp9 (1A)
+c8: BSWAP RAX/EAX/R8/R8D
+c9: BSWAP RCX/ECX/R9/R9D
+ca: BSWAP RDX/EDX/R10/R10D
+cb: BSWAP RBX/EBX/R11/R11D
+cc: BSWAP RSP/ESP/R12/R12D
+cd: BSWAP RBP/EBP/R13/R13D
+ce: BSWAP RSI/ESI/R14/R14D
+cf: BSWAP RDI/EDI/R15/R15D
+# 0x0f 0xd0-0xdf
+d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2)
+d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1)
+d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1)
+d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1)
+d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1)
+d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1)
+d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
+d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1)
+d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1)
+d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1)
+da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1)
+db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1)
+dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1)
+dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1)
+de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1)
+df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1)
+# 0x0f 0xe0-0xef
+e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1)
+e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1)
+e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1)
+e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1)
+e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1)
+e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1)
+e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtpd2dq Vx,Wpd (F2)
+e7: movntq Mq,Pq | vmovntdq Mx,Vx (66)
+e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1)
+e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1)
+ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1)
+eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1)
+ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1)
+ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1)
+ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1)
+ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1)
+# 0x0f 0xf0-0xff
+f0: vlddqu Vx,Mx (F2)
+f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1)
+f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1)
+f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1)
+f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1)
+f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1)
+f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1)
+f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1)
+f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1)
+f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1)
+fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1)
+fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
+fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
+fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
+fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
+ff:
+EndTable
+
+Table: 3-byte opcode 1 (0x0f 0x38)
+Referrer: 3-byte escape 1
+AVXcode: 2
+# 0x0f 0x38 0x00-0x0f
+00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1)
+01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1)
+02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1)
+03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1)
+04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1)
+05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1)
+06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1)
+07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1)
+08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1)
+09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1)
+0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1)
+0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1)
+0c: vpermilps Vx,Hx,Wx (66),(v)
+0d: vpermilpd Vx,Hx,Wx (66),(v)
+0e: vtestps Vx,Wx (66),(v)
+0f: vtestpd Vx,Wx (66),(v)
+# 0x0f 0x38 0x10-0x1f
+10: pblendvb Vdq,Wdq (66)
+11:
+12:
+13: vcvtph2ps Vx,Wx,Ib (66),(v)
+14: blendvps Vdq,Wdq (66)
+15: blendvpd Vdq,Wdq (66)
+16: vpermps Vqq,Hqq,Wqq (66),(v)
+17: vptest Vx,Wx (66)
+18: vbroadcastss Vx,Wd (66),(v)
+19: vbroadcastsd Vqq,Wq (66),(v)
+1a: vbroadcastf128 Vqq,Mdq (66),(v)
+1b:
+1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1)
+1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1)
+1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1)
+1f:
+# 0x0f 0x38 0x20-0x2f
+20: vpmovsxbw Vx,Ux/Mq (66),(v1)
+21: vpmovsxbd Vx,Ux/Md (66),(v1)
+22: vpmovsxbq Vx,Ux/Mw (66),(v1)
+23: vpmovsxwd Vx,Ux/Mq (66),(v1)
+24: vpmovsxwq Vx,Ux/Md (66),(v1)
+25: vpmovsxdq Vx,Ux/Mq (66),(v1)
+26:
+27:
+28: vpmuldq Vx,Hx,Wx (66),(v1)
+29: vpcmpeqq Vx,Hx,Wx (66),(v1)
+2a: vmovntdqa Vx,Mx (66),(v1)
+2b: vpackusdw Vx,Hx,Wx (66),(v1)
+2c: vmaskmovps Vx,Hx,Mx (66),(v)
+2d: vmaskmovpd Vx,Hx,Mx (66),(v)
+2e: vmaskmovps Mx,Hx,Vx (66),(v)
+2f: vmaskmovpd Mx,Hx,Vx (66),(v)
+# 0x0f 0x38 0x30-0x3f
+30: vpmovzxbw Vx,Ux/Mq (66),(v1)
+31: vpmovzxbd Vx,Ux/Md (66),(v1)
+32: vpmovzxbq Vx,Ux/Mw (66),(v1)
+33: vpmovzxwd Vx,Ux/Mq (66),(v1)
+34: vpmovzxwq Vx,Ux/Md (66),(v1)
+35: vpmovzxdq Vx,Ux/Mq (66),(v1)
+36: vpermd Vqq,Hqq,Wqq (66),(v)
+37: vpcmpgtq Vx,Hx,Wx (66),(v1)
+38: vpminsb Vx,Hx,Wx (66),(v1)
+39: vpminsd Vx,Hx,Wx (66),(v1)
+3a: vpminuw Vx,Hx,Wx (66),(v1)
+3b: vpminud Vx,Hx,Wx (66),(v1)
+3c: vpmaxsb Vx,Hx,Wx (66),(v1)
+3d: vpmaxsd Vx,Hx,Wx (66),(v1)
+3e: vpmaxuw Vx,Hx,Wx (66),(v1)
+3f: vpmaxud Vx,Hx,Wx (66),(v1)
+# 0x0f 0x38 0x40-0x8f
+40: vpmulld Vx,Hx,Wx (66),(v1)
+41: vphminposuw Vdq,Wdq (66),(v1)
+42:
+43:
+44:
+45: vpsrlvd/q Vx,Hx,Wx (66),(v)
+46: vpsravd Vx,Hx,Wx (66),(v)
+47: vpsllvd/q Vx,Hx,Wx (66),(v)
+# Skip 0x48-0x57
+58: vpbroadcastd Vx,Wx (66),(v)
+59: vpbroadcastq Vx,Wx (66),(v)
+5a: vbroadcasti128 Vqq,Mdq (66),(v)
+# Skip 0x5b-0x77
+78: vpbroadcastb Vx,Wx (66),(v)
+79: vpbroadcastw Vx,Wx (66),(v)
+# Skip 0x7a-0x7f
+80: INVEPT Gy,Mdq (66)
+81: INVPID Gy,Mdq (66)
+82: INVPCID Gy,Mdq (66)
+8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
+8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
+# 0x0f 0x38 0x90-0xbf (FMA)
+90: vgatherdd/q Vx,Hx,Wx (66),(v)
+91: vgatherqd/q Vx,Hx,Wx (66),(v)
+92: vgatherdps/d Vx,Hx,Wx (66),(v)
+93: vgatherqps/d Vx,Hx,Wx (66),(v)
+94:
+95:
+96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v)
+97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
+98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
+99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
+9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
+9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
+9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
+a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
+a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
+a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
+ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
+ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
+af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
+b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
+b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
+b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+ba: vfmsub231ps/d Vx,Hx,Wx (66),(v)
+bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
+bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v)
+bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
+bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
+# 0x0f 0x38 0xc0-0xff
+db: VAESIMC Vdq,Wdq (66),(v1)
+dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
+dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
+de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
+df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
+f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
+f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
+f2: ANDN Gy,By,Ey (v)
+f3: Grp17 (1A)
+f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
+f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
+f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
+EndTable
+
+Table: 3-byte opcode 2 (0x0f 0x3a)
+Referrer: 3-byte escape 2
+AVXcode: 3
+# 0x0f 0x3a 0x00-0xff
+00: vpermq Vqq,Wqq,Ib (66),(v)
+01: vpermpd Vqq,Wqq,Ib (66),(v)
+02: vpblendd Vx,Hx,Wx,Ib (66),(v)
+03:
+04: vpermilps Vx,Wx,Ib (66),(v)
+05: vpermilpd Vx,Wx,Ib (66),(v)
+06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
+07:
+08: vroundps Vx,Wx,Ib (66)
+09: vroundpd Vx,Wx,Ib (66)
+0a: vroundss Vss,Wss,Ib (66),(v1)
+0b: vroundsd Vsd,Wsd,Ib (66),(v1)
+0c: vblendps Vx,Hx,Wx,Ib (66)
+0d: vblendpd Vx,Hx,Wx,Ib (66)
+0e: vpblendw Vx,Hx,Wx,Ib (66),(v1)
+0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1)
+14: vpextrb Rd/Mb,Vdq,Ib (66),(v1)
+15: vpextrw Rd/Mw,Vdq,Ib (66),(v1)
+16: vpextrd/q Ey,Vdq,Ib (66),(v1)
+17: vextractps Ed,Vdq,Ib (66),(v1)
+18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v)
+19: vextractf128 Wdq,Vqq,Ib (66),(v)
+1d: vcvtps2ph Wx,Vx,Ib (66),(v)
+20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1)
+21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1)
+22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
+38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v)
+39: vextracti128 Wdq,Vqq,Ib (66),(v)
+40: vdpps Vx,Hx,Wx,Ib (66)
+41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
+42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1)
+44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
+46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
+4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
+4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
+4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1)
+60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
+61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
+62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
+63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
+df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
+f0: RORX Gy,Ey,Ib (F2),(v)
+EndTable
+
+GrpTable: Grp1
+0: ADD
+1: OR
+2: ADC
+3: SBB
+4: AND
+5: SUB
+6: XOR
+7: CMP
+EndTable
+
+GrpTable: Grp1A
+0: POP
+EndTable
+
+GrpTable: Grp2
+0: ROL
+1: ROR
+2: RCL
+3: RCR
+4: SHL/SAL
+5: SHR
+6:
+7: SAR
+EndTable
+
+GrpTable: Grp3_1
+0: TEST Eb,Ib
+1:
+2: NOT Eb
+3: NEG Eb
+4: MUL AL,Eb
+5: IMUL AL,Eb
+6: DIV AL,Eb
+7: IDIV AL,Eb
+EndTable
+
+GrpTable: Grp3_2
+0: TEST Ev,Iz
+1:
+2: NOT Ev
+3: NEG Ev
+4: MUL rAX,Ev
+5: IMUL rAX,Ev
+6: DIV rAX,Ev
+7: IDIV rAX,Ev
+EndTable
+
+GrpTable: Grp4
+0: INC Eb
+1: DEC Eb
+EndTable
+
+GrpTable: Grp5
+0: INC Ev
+1: DEC Ev
+# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
+2: CALLN Ev (f64)
+3: CALLF Ep
+4: JMPN Ev (f64)
+5: JMPF Mp
+6: PUSH Ev (d64)
+7:
+EndTable
+
+GrpTable: Grp6
+0: SLDT Rv/Mw
+1: STR Rv/Mw
+2: LLDT Ew
+3: LTR Ew
+4: VERR Ew
+5: VERW Ew
+EndTable
+
+GrpTable: Grp7
+0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
+1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
+3: LIDT Ms
+4: SMSW Mw/Rv
+5:
+6: LMSW Ew
+7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
+EndTable
+
+GrpTable: Grp8
+4: BT
+5: BTS
+6: BTR
+7: BTC
+EndTable
+
+GrpTable: Grp9
+1: CMPXCHG8B/16B Mq/Mdq
+6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
+7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
+EndTable
+
+GrpTable: Grp10
+EndTable
+
+# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
+GrpTable: Grp11A
+0: MOV Eb,Ib
+7: XABORT Ib (000),(11B)
+EndTable
+
+GrpTable: Grp11B
+0: MOV Eb,Iz
+7: XBEGIN Jz (000),(11B)
+EndTable
+
+GrpTable: Grp12
+2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1)
+4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1)
+6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1)
+EndTable
+
+GrpTable: Grp13
+2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1)
+4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1)
+6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1)
+EndTable
+
+GrpTable: Grp14
+2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1)
+3: vpsrldq Hx,Ux,Ib (66),(11B),(v1)
+6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1)
+7: vpslldq Hx,Ux,Ib (66),(11B),(v1)
+EndTable
+
+GrpTable: Grp15
+0: fxsave | RDFSBASE Ry (F3),(11B)
+1: fxstor | RDGSBASE Ry (F3),(11B)
+2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
+3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
+4: XSAVE
+5: XRSTOR | lfence (11B)
+6: XSAVEOPT | mfence (11B)
+7: clflush | sfence (11B)
+EndTable
+
+GrpTable: Grp16
+0: prefetch NTA
+1: prefetch T0
+2: prefetch T1
+3: prefetch T2
+EndTable
+
+GrpTable: Grp17
+1: BLSR By,Ey (v)
+2: BLSMSK By,Ey (v)
+3: BLSI By,Ey (v)
+EndTable
+
+# AMD's Prefetch Group
+GrpTable: GrpP
+0: PREFETCH
+1: PREFETCHW
+EndTable
+
+GrpTable: GrpPDLK
+0: MONTMUL
+1: XSHA1
+2: XSHA2
+EndTable
+
+GrpTable: GrpRNG
+0: xstore-rng
+1: xcrypt-ecb
+2: xcrypt-cbc
+4: xcrypt-cfb
+5: xcrypt-ofb
+EndTable
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
new file mode 100644
index 0000000..bb41c20
--- /dev/null
+++ b/tools/perf/util/intel-pt.c
@@ -0,0 +1,1956 @@
+/*
+ * intel_pt.c: Intel Processor Trace support
+ * Copyright (c) 2013-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "../perf.h"
+#include "session.h"
+#include "machine.h"
+#include "tool.h"
+#include "event.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "map.h"
+#include "color.h"
+#include "util.h"
+#include "thread.h"
+#include "thread-stack.h"
+#include "symbol.h"
+#include "callchain.h"
+#include "dso.h"
+#include "debug.h"
+#include "auxtrace.h"
+#include "tsc.h"
+#include "intel-pt.h"
+
+#include "intel-pt-decoder/intel-pt-log.h"
+#include "intel-pt-decoder/intel-pt-decoder.h"
+#include "intel-pt-decoder/intel-pt-insn-decoder.h"
+#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
+
+#define MAX_TIMESTAMP (~0ULL)
+
+struct intel_pt {
+	struct auxtrace auxtrace;
+	struct auxtrace_queues queues;
+	struct auxtrace_heap heap;
+	u32 auxtrace_type;
+	struct perf_session *session;
+	struct machine *machine;
+	struct perf_evsel *switch_evsel;
+	struct thread *unknown_thread;
+	bool timeless_decoding;
+	bool sampling_mode;
+	bool snapshot_mode;
+	bool per_cpu_mmaps;
+	bool have_tsc;
+	bool data_queued;
+	bool est_tsc;
+	bool sync_switch;
+	int have_sched_switch;
+	u32 pmu_type;
+	u64 kernel_start;
+	u64 switch_ip;
+	u64 ptss_ip;
+
+	struct perf_tsc_conversion tc;
+	bool cap_user_time_zero;
+
+	struct itrace_synth_opts synth_opts;
+
+	bool sample_instructions;
+	u64 instructions_sample_type;
+	u64 instructions_sample_period;
+	u64 instructions_id;
+
+	bool sample_branches;
+	u32 branches_filter;
+	u64 branches_sample_type;
+	u64 branches_id;
+
+	bool sample_transactions;
+	u64 transactions_sample_type;
+	u64 transactions_id;
+
+	bool synth_needs_swap;
+
+	u64 tsc_bit;
+	u64 mtc_bit;
+	u64 mtc_freq_bits;
+	u32 tsc_ctc_ratio_n;
+	u32 tsc_ctc_ratio_d;
+	u64 cyc_bit;
+	u64 noretcomp_bit;
+	unsigned max_non_turbo_ratio;
+};
+
+enum switch_state {
+	INTEL_PT_SS_NOT_TRACING,
+	INTEL_PT_SS_UNKNOWN,
+	INTEL_PT_SS_TRACING,
+	INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
+	INTEL_PT_SS_EXPECTING_SWITCH_IP,
+};
+
+struct intel_pt_queue {
+	struct intel_pt *pt;
+	unsigned int queue_nr;
+	struct auxtrace_buffer *buffer;
+	void *decoder;
+	const struct intel_pt_state *state;
+	struct ip_callchain *chain;
+	union perf_event *event_buf;
+	bool on_heap;
+	bool stop;
+	bool step_through_buffers;
+	bool use_buffer_pid_tid;
+	pid_t pid, tid;
+	int cpu;
+	int switch_state;
+	pid_t next_tid;
+	struct thread *thread;
+	bool exclude_kernel;
+	bool have_sample;
+	u64 time;
+	u64 timestamp;
+	u32 flags;
+	u16 insn_len;
+	u64 last_insn_cnt;
+};
+
+static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
+			  unsigned char *buf, size_t len)
+{
+	struct intel_pt_pkt packet;
+	size_t pos = 0;
+	int ret, pkt_len, i;
+	char desc[INTEL_PT_PKT_DESC_MAX];
+	const char *color = PERF_COLOR_BLUE;
+
+	color_fprintf(stdout, color,
+		      ". ... Intel Processor Trace data: size %zu bytes\n",
+		      len);
+
+	while (len) {
+		ret = intel_pt_get_packet(buf, len, &packet);
+		if (ret > 0)
+			pkt_len = ret;
+		else
+			pkt_len = 1;
+		printf(".");
+		color_fprintf(stdout, color, "  %08x: ", pos);
+		for (i = 0; i < pkt_len; i++)
+			color_fprintf(stdout, color, " %02x", buf[i]);
+		for (; i < 16; i++)
+			color_fprintf(stdout, color, "   ");
+		if (ret > 0) {
+			ret = intel_pt_pkt_desc(&packet, desc,
+						INTEL_PT_PKT_DESC_MAX);
+			if (ret > 0)
+				color_fprintf(stdout, color, " %s\n", desc);
+		} else {
+			color_fprintf(stdout, color, " Bad packet!\n");
+		}
+		pos += pkt_len;
+		buf += pkt_len;
+		len -= pkt_len;
+	}
+}
+
+static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
+				size_t len)
+{
+	printf(".\n");
+	intel_pt_dump(pt, buf, len);
+}
+
+static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
+				   struct auxtrace_buffer *b)
+{
+	void *start;
+
+	start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
+				      pt->have_tsc);
+	if (!start)
+		return -EINVAL;
+	b->use_size = b->data + b->size - start;
+	b->use_data = start;
+	return 0;
+}
+
+static void intel_pt_use_buffer_pid_tid(struct intel_pt_queue *ptq,
+					struct auxtrace_queue *queue,
+					struct auxtrace_buffer *buffer)
+{
+	if (queue->cpu == -1 && buffer->cpu != -1)
+		ptq->cpu = buffer->cpu;
+
+	ptq->pid = buffer->pid;
+	ptq->tid = buffer->tid;
+
+	intel_pt_log("queue %u cpu %d pid %d tid %d\n",
+		     ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
+
+	thread__zput(ptq->thread);
+
+	if (ptq->tid != -1) {
+		if (ptq->pid != -1)
+			ptq->thread = machine__findnew_thread(ptq->pt->machine,
+							      ptq->pid,
+							      ptq->tid);
+		else
+			ptq->thread = machine__find_thread(ptq->pt->machine, -1,
+							   ptq->tid);
+	}
+}
+
+/* This function assumes data is processed sequentially only */
+static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
+{
+	struct intel_pt_queue *ptq = data;
+	struct auxtrace_buffer *buffer = ptq->buffer, *old_buffer = buffer;
+	struct auxtrace_queue *queue;
+
+	if (ptq->stop) {
+		b->len = 0;
+		return 0;
+	}
+
+	queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
+
+	buffer = auxtrace_buffer__next(queue, buffer);
+	if (!buffer) {
+		if (old_buffer)
+			auxtrace_buffer__drop_data(old_buffer);
+		b->len = 0;
+		return 0;
+	}
+
+	ptq->buffer = buffer;
+
+	if (!buffer->data) {
+		int fd = perf_data_file__fd(ptq->pt->session->file);
+
+		buffer->data = auxtrace_buffer__get_data(buffer, fd);
+		if (!buffer->data)
+			return -ENOMEM;
+	}
+
+	if (ptq->pt->snapshot_mode && !buffer->consecutive && old_buffer &&
+	    intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
+		return -ENOMEM;
+
+	if (old_buffer)
+		auxtrace_buffer__drop_data(old_buffer);
+
+	if (buffer->use_data) {
+		b->len = buffer->use_size;
+		b->buf = buffer->use_data;
+	} else {
+		b->len = buffer->size;
+		b->buf = buffer->data;
+	}
+	b->ref_timestamp = buffer->reference;
+
+	if (!old_buffer || ptq->pt->sampling_mode || (ptq->pt->snapshot_mode &&
+						      !buffer->consecutive)) {
+		b->consecutive = false;
+		b->trace_nr = buffer->buffer_nr + 1;
+	} else {
+		b->consecutive = true;
+	}
+
+	if (ptq->use_buffer_pid_tid && (ptq->pid != buffer->pid ||
+					ptq->tid != buffer->tid))
+		intel_pt_use_buffer_pid_tid(ptq, queue, buffer);
+
+	if (ptq->step_through_buffers)
+		ptq->stop = true;
+
+	if (!b->len)
+		return intel_pt_get_trace(b, data);
+
+	return 0;
+}
+
+struct intel_pt_cache_entry {
+	struct auxtrace_cache_entry	entry;
+	u64				insn_cnt;
+	u64				byte_cnt;
+	enum intel_pt_insn_op		op;
+	enum intel_pt_insn_branch	branch;
+	int				length;
+	int32_t				rel;
+};
+
+static int intel_pt_config_div(const char *var, const char *value, void *data)
+{
+	int *d = data;
+	long val;
+
+	if (!strcmp(var, "intel-pt.cache-divisor")) {
+		val = strtol(value, NULL, 0);
+		if (val > 0 && val <= INT_MAX)
+			*d = val;
+	}
+
+	return 0;
+}
+
+static int intel_pt_cache_divisor(void)
+{
+	static int d;
+
+	if (d)
+		return d;
+
+	perf_config(intel_pt_config_div, &d);
+
+	if (!d)
+		d = 64;
+
+	return d;
+}
+
+static unsigned int intel_pt_cache_size(struct dso *dso,
+					struct machine *machine)
+{
+	off_t size;
+
+	size = dso__data_size(dso, machine);
+	size /= intel_pt_cache_divisor();
+	if (size < 1000)
+		return 10;
+	if (size > (1 << 21))
+		return 21;
+	return 32 - __builtin_clz(size);
+}
+
+static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
+					     struct machine *machine)
+{
+	struct auxtrace_cache *c;
+	unsigned int bits;
+
+	if (dso->auxtrace_cache)
+		return dso->auxtrace_cache;
+
+	bits = intel_pt_cache_size(dso, machine);
+
+	/* Ignoring cache creation failure */
+	c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
+
+	dso->auxtrace_cache = c;
+
+	return c;
+}
+
+static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
+			      u64 offset, u64 insn_cnt, u64 byte_cnt,
+			      struct intel_pt_insn *intel_pt_insn)
+{
+	struct auxtrace_cache *c = intel_pt_cache(dso, machine);
+	struct intel_pt_cache_entry *e;
+	int err;
+
+	if (!c)
+		return -ENOMEM;
+
+	e = auxtrace_cache__alloc_entry(c);
+	if (!e)
+		return -ENOMEM;
+
+	e->insn_cnt = insn_cnt;
+	e->byte_cnt = byte_cnt;
+	e->op = intel_pt_insn->op;
+	e->branch = intel_pt_insn->branch;
+	e->length = intel_pt_insn->length;
+	e->rel = intel_pt_insn->rel;
+
+	err = auxtrace_cache__add(c, offset, &e->entry);
+	if (err)
+		auxtrace_cache__free_entry(c, e);
+
+	return err;
+}
+
+static struct intel_pt_cache_entry *
+intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
+{
+	struct auxtrace_cache *c = intel_pt_cache(dso, machine);
+
+	if (!c)
+		return NULL;
+
+	return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
+}
+
+static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
+				   uint64_t *insn_cnt_ptr, uint64_t *ip,
+				   uint64_t to_ip, uint64_t max_insn_cnt,
+				   void *data)
+{
+	struct intel_pt_queue *ptq = data;
+	struct machine *machine = ptq->pt->machine;
+	struct thread *thread;
+	struct addr_location al;
+	unsigned char buf[1024];
+	size_t bufsz;
+	ssize_t len;
+	int x86_64;
+	u8 cpumode;
+	u64 offset, start_offset, start_ip;
+	u64 insn_cnt = 0;
+	bool one_map = true;
+
+	if (to_ip && *ip == to_ip)
+		goto out_no_cache;
+
+	bufsz = intel_pt_insn_max_size();
+
+	if (*ip >= ptq->pt->kernel_start)
+		cpumode = PERF_RECORD_MISC_KERNEL;
+	else
+		cpumode = PERF_RECORD_MISC_USER;
+
+	thread = ptq->thread;
+	if (!thread) {
+		if (cpumode != PERF_RECORD_MISC_KERNEL)
+			return -EINVAL;
+		thread = ptq->pt->unknown_thread;
+	}
+
+	while (1) {
+		thread__find_addr_map(thread, cpumode, MAP__FUNCTION, *ip, &al);
+		if (!al.map || !al.map->dso)
+			return -EINVAL;
+
+		if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
+		    dso__data_status_seen(al.map->dso,
+					  DSO_DATA_STATUS_SEEN_ITRACE))
+			return -ENOENT;
+
+		offset = al.map->map_ip(al.map, *ip);
+
+		if (!to_ip && one_map) {
+			struct intel_pt_cache_entry *e;
+
+			e = intel_pt_cache_lookup(al.map->dso, machine, offset);
+			if (e &&
+			    (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
+				*insn_cnt_ptr = e->insn_cnt;
+				*ip += e->byte_cnt;
+				intel_pt_insn->op = e->op;
+				intel_pt_insn->branch = e->branch;
+				intel_pt_insn->length = e->length;
+				intel_pt_insn->rel = e->rel;
+				intel_pt_log_insn_no_data(intel_pt_insn, *ip);
+				return 0;
+			}
+		}
+
+		start_offset = offset;
+		start_ip = *ip;
+
+		/* Load maps to ensure dso->is_64_bit has been updated */
+		map__load(al.map, machine->symbol_filter);
+
+		x86_64 = al.map->dso->is_64_bit;
+
+		while (1) {
+			len = dso__data_read_offset(al.map->dso, machine,
+						    offset, buf, bufsz);
+			if (len <= 0)
+				return -EINVAL;
+
+			if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
+				return -EINVAL;
+
+			intel_pt_log_insn(intel_pt_insn, *ip);
+
+			insn_cnt += 1;
+
+			if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
+				goto out;
+
+			if (max_insn_cnt && insn_cnt >= max_insn_cnt)
+				goto out_no_cache;
+
+			*ip += intel_pt_insn->length;
+
+			if (to_ip && *ip == to_ip)
+				goto out_no_cache;
+
+			if (*ip >= al.map->end)
+				break;
+
+			offset += intel_pt_insn->length;
+		}
+		one_map = false;
+	}
+out:
+	*insn_cnt_ptr = insn_cnt;
+
+	if (!one_map)
+		goto out_no_cache;
+
+	/*
+	 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
+	 * entries.
+	 */
+	if (to_ip) {
+		struct intel_pt_cache_entry *e;
+
+		e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
+		if (e)
+			return 0;
+	}
+
+	/* Ignore cache errors */
+	intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
+			   *ip - start_ip, intel_pt_insn);
+
+	return 0;
+
+out_no_cache:
+	*insn_cnt_ptr = insn_cnt;
+	return 0;
+}
+
+static bool intel_pt_get_config(struct intel_pt *pt,
+				struct perf_event_attr *attr, u64 *config)
+{
+	if (attr->type == pt->pmu_type) {
+		if (config)
+			*config = attr->config;
+		return true;
+	}
+
+	return false;
+}
+
+static bool intel_pt_exclude_kernel(struct intel_pt *pt)
+{
+	struct perf_evsel *evsel;
+
+	evlist__for_each(pt->session->evlist, evsel) {
+		if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
+		    !evsel->attr.exclude_kernel)
+			return false;
+	}
+	return true;
+}
+
+static bool intel_pt_return_compression(struct intel_pt *pt)
+{
+	struct perf_evsel *evsel;
+	u64 config;
+
+	if (!pt->noretcomp_bit)
+		return true;
+
+	evlist__for_each(pt->session->evlist, evsel) {
+		if (intel_pt_get_config(pt, &evsel->attr, &config) &&
+		    (config & pt->noretcomp_bit))
+			return false;
+	}
+	return true;
+}
+
+static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
+{
+	struct perf_evsel *evsel;
+	unsigned int shift;
+	u64 config;
+
+	if (!pt->mtc_freq_bits)
+		return 0;
+
+	for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
+		config >>= 1;
+
+	evlist__for_each(pt->session->evlist, evsel) {
+		if (intel_pt_get_config(pt, &evsel->attr, &config))
+			return (config & pt->mtc_freq_bits) >> shift;
+	}
+	return 0;
+}
+
+static bool intel_pt_timeless_decoding(struct intel_pt *pt)
+{
+	struct perf_evsel *evsel;
+	bool timeless_decoding = true;
+	u64 config;
+
+	if (!pt->tsc_bit || !pt->cap_user_time_zero)
+		return true;
+
+	evlist__for_each(pt->session->evlist, evsel) {
+		if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME))
+			return true;
+		if (intel_pt_get_config(pt, &evsel->attr, &config)) {
+			if (config & pt->tsc_bit)
+				timeless_decoding = false;
+			else
+				return true;
+		}
+	}
+	return timeless_decoding;
+}
+
+static bool intel_pt_tracing_kernel(struct intel_pt *pt)
+{
+	struct perf_evsel *evsel;
+
+	evlist__for_each(pt->session->evlist, evsel) {
+		if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
+		    !evsel->attr.exclude_kernel)
+			return true;
+	}
+	return false;
+}
+
+static bool intel_pt_have_tsc(struct intel_pt *pt)
+{
+	struct perf_evsel *evsel;
+	bool have_tsc = false;
+	u64 config;
+
+	if (!pt->tsc_bit)
+		return false;
+
+	evlist__for_each(pt->session->evlist, evsel) {
+		if (intel_pt_get_config(pt, &evsel->attr, &config)) {
+			if (config & pt->tsc_bit)
+				have_tsc = true;
+			else
+				return false;
+		}
+	}
+	return have_tsc;
+}
+
+static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
+{
+	u64 quot, rem;
+
+	quot = ns / pt->tc.time_mult;
+	rem  = ns % pt->tc.time_mult;
+	return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
+		pt->tc.time_mult;
+}
+
+static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
+						   unsigned int queue_nr)
+{
+	struct intel_pt_params params = { .get_trace = 0, };
+	struct intel_pt_queue *ptq;
+
+	ptq = zalloc(sizeof(struct intel_pt_queue));
+	if (!ptq)
+		return NULL;
+
+	if (pt->synth_opts.callchain) {
+		size_t sz = sizeof(struct ip_callchain);
+
+		sz += pt->synth_opts.callchain_sz * sizeof(u64);
+		ptq->chain = zalloc(sz);
+		if (!ptq->chain)
+			goto out_free;
+	}
+
+	ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
+	if (!ptq->event_buf)
+		goto out_free;
+
+	ptq->pt = pt;
+	ptq->queue_nr = queue_nr;
+	ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
+	ptq->pid = -1;
+	ptq->tid = -1;
+	ptq->cpu = -1;
+	ptq->next_tid = -1;
+
+	params.get_trace = intel_pt_get_trace;
+	params.walk_insn = intel_pt_walk_next_insn;
+	params.data = ptq;
+	params.return_compression = intel_pt_return_compression(pt);
+	params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
+	params.mtc_period = intel_pt_mtc_period(pt);
+	params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
+	params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
+
+	if (pt->synth_opts.instructions) {
+		if (pt->synth_opts.period) {
+			switch (pt->synth_opts.period_type) {
+			case PERF_ITRACE_PERIOD_INSTRUCTIONS:
+				params.period_type =
+						INTEL_PT_PERIOD_INSTRUCTIONS;
+				params.period = pt->synth_opts.period;
+				break;
+			case PERF_ITRACE_PERIOD_TICKS:
+				params.period_type = INTEL_PT_PERIOD_TICKS;
+				params.period = pt->synth_opts.period;
+				break;
+			case PERF_ITRACE_PERIOD_NANOSECS:
+				params.period_type = INTEL_PT_PERIOD_TICKS;
+				params.period = intel_pt_ns_to_ticks(pt,
+							pt->synth_opts.period);
+				break;
+			default:
+				break;
+			}
+		}
+
+		if (!params.period) {
+			params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
+			params.period = 1000;
+		}
+	}
+
+	ptq->decoder = intel_pt_decoder_new(&params);
+	if (!ptq->decoder)
+		goto out_free;
+
+	return ptq;
+
+out_free:
+	zfree(&ptq->event_buf);
+	zfree(&ptq->chain);
+	free(ptq);
+	return NULL;
+}
+
+static void intel_pt_free_queue(void *priv)
+{
+	struct intel_pt_queue *ptq = priv;
+
+	if (!ptq)
+		return;
+	thread__zput(ptq->thread);
+	intel_pt_decoder_free(ptq->decoder);
+	zfree(&ptq->event_buf);
+	zfree(&ptq->chain);
+	free(ptq);
+}
+
+static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
+				     struct auxtrace_queue *queue)
+{
+	struct intel_pt_queue *ptq = queue->priv;
+
+	if (queue->tid == -1 || pt->have_sched_switch) {
+		ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
+		thread__zput(ptq->thread);
+	}
+
+	if (!ptq->thread && ptq->tid != -1)
+		ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
+
+	if (ptq->thread) {
+		ptq->pid = ptq->thread->pid_;
+		if (queue->cpu == -1)
+			ptq->cpu = ptq->thread->cpu;
+	}
+}
+
+static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
+{
+	if (ptq->state->flags & INTEL_PT_ABORT_TX) {
+		ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
+	} else if (ptq->state->flags & INTEL_PT_ASYNC) {
+		if (ptq->state->to_ip)
+			ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
+				     PERF_IP_FLAG_ASYNC |
+				     PERF_IP_FLAG_INTERRUPT;
+		else
+			ptq->flags = PERF_IP_FLAG_BRANCH |
+				     PERF_IP_FLAG_TRACE_END;
+		ptq->insn_len = 0;
+	} else {
+		if (ptq->state->from_ip)
+			ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
+		else
+			ptq->flags = PERF_IP_FLAG_BRANCH |
+				     PERF_IP_FLAG_TRACE_BEGIN;
+		if (ptq->state->flags & INTEL_PT_IN_TX)
+			ptq->flags |= PERF_IP_FLAG_IN_TX;
+		ptq->insn_len = ptq->state->insn_len;
+	}
+}
+
+static int intel_pt_setup_queue(struct intel_pt *pt,
+				struct auxtrace_queue *queue,
+				unsigned int queue_nr)
+{
+	struct intel_pt_queue *ptq = queue->priv;
+
+	if (list_empty(&queue->head))
+		return 0;
+
+	if (!ptq) {
+		ptq = intel_pt_alloc_queue(pt, queue_nr);
+		if (!ptq)
+			return -ENOMEM;
+		queue->priv = ptq;
+
+		if (queue->cpu != -1)
+			ptq->cpu = queue->cpu;
+		ptq->tid = queue->tid;
+
+		if (pt->sampling_mode) {
+			if (pt->timeless_decoding)
+				ptq->step_through_buffers = true;
+			if (pt->timeless_decoding || !pt->have_sched_switch)
+				ptq->use_buffer_pid_tid = true;
+		}
+	}
+
+	if (!ptq->on_heap &&
+	    (!pt->sync_switch ||
+	     ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
+		const struct intel_pt_state *state;
+		int ret;
+
+		if (pt->timeless_decoding)
+			return 0;
+
+		intel_pt_log("queue %u getting timestamp\n", queue_nr);
+		intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
+			     queue_nr, ptq->cpu, ptq->pid, ptq->tid);
+		while (1) {
+			state = intel_pt_decode(ptq->decoder);
+			if (state->err) {
+				if (state->err == INTEL_PT_ERR_NODATA) {
+					intel_pt_log("queue %u has no timestamp\n",
+						     queue_nr);
+					return 0;
+				}
+				continue;
+			}
+			if (state->timestamp)
+				break;
+		}
+
+		ptq->timestamp = state->timestamp;
+		intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
+			     queue_nr, ptq->timestamp);
+		ptq->state = state;
+		ptq->have_sample = true;
+		intel_pt_sample_flags(ptq);
+		ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
+		if (ret)
+			return ret;
+		ptq->on_heap = true;
+	}
+
+	return 0;
+}
+
+static int intel_pt_setup_queues(struct intel_pt *pt)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < pt->queues.nr_queues; i++) {
+		ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static int intel_pt_inject_event(union perf_event *event,
+				 struct perf_sample *sample, u64 type,
+				 bool swapped)
+{
+	event->header.size = perf_event__sample_event_size(sample, type, 0);
+	return perf_event__synthesize_sample(event, type, 0, sample, swapped);
+}
+
+static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
+{
+	int ret;
+	struct intel_pt *pt = ptq->pt;
+	union perf_event *event = ptq->event_buf;
+	struct perf_sample sample = { .ip = 0, };
+
+	event->sample.header.type = PERF_RECORD_SAMPLE;
+	event->sample.header.misc = PERF_RECORD_MISC_USER;
+	event->sample.header.size = sizeof(struct perf_event_header);
+
+	if (!pt->timeless_decoding)
+		sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
+
+	sample.ip = ptq->state->from_ip;
+	sample.pid = ptq->pid;
+	sample.tid = ptq->tid;
+	sample.addr = ptq->state->to_ip;
+	sample.id = ptq->pt->branches_id;
+	sample.stream_id = ptq->pt->branches_id;
+	sample.period = 1;
+	sample.cpu = ptq->cpu;
+	sample.flags = ptq->flags;
+	sample.insn_len = ptq->insn_len;
+
+	if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
+		return 0;
+
+	if (pt->synth_opts.inject) {
+		ret = intel_pt_inject_event(event, &sample,
+					    pt->branches_sample_type,
+					    pt->synth_needs_swap);
+		if (ret)
+			return ret;
+	}
+
+	ret = perf_session__deliver_synth_event(pt->session, event, &sample);
+	if (ret)
+		pr_err("Intel Processor Trace: failed to deliver branch event, error %d\n",
+		       ret);
+
+	return ret;
+}
+
+static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
+{
+	int ret;
+	struct intel_pt *pt = ptq->pt;
+	union perf_event *event = ptq->event_buf;
+	struct perf_sample sample = { .ip = 0, };
+
+	event->sample.header.type = PERF_RECORD_SAMPLE;
+	event->sample.header.misc = PERF_RECORD_MISC_USER;
+	event->sample.header.size = sizeof(struct perf_event_header);
+
+	if (!pt->timeless_decoding)
+		sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
+
+	sample.ip = ptq->state->from_ip;
+	sample.pid = ptq->pid;
+	sample.tid = ptq->tid;
+	sample.addr = ptq->state->to_ip;
+	sample.id = ptq->pt->instructions_id;
+	sample.stream_id = ptq->pt->instructions_id;
+	sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
+	sample.cpu = ptq->cpu;
+	sample.flags = ptq->flags;
+	sample.insn_len = ptq->insn_len;
+
+	ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
+
+	if (pt->synth_opts.callchain) {
+		thread_stack__sample(ptq->thread, ptq->chain,
+				     pt->synth_opts.callchain_sz, sample.ip);
+		sample.callchain = ptq->chain;
+	}
+
+	if (pt->synth_opts.inject) {
+		ret = intel_pt_inject_event(event, &sample,
+					    pt->instructions_sample_type,
+					    pt->synth_needs_swap);
+		if (ret)
+			return ret;
+	}
+
+	ret = perf_session__deliver_synth_event(pt->session, event, &sample);
+	if (ret)
+		pr_err("Intel Processor Trace: failed to deliver instruction event, error %d\n",
+		       ret);
+
+	return ret;
+}
+
+static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
+{
+	int ret;
+	struct intel_pt *pt = ptq->pt;
+	union perf_event *event = ptq->event_buf;
+	struct perf_sample sample = { .ip = 0, };
+
+	event->sample.header.type = PERF_RECORD_SAMPLE;
+	event->sample.header.misc = PERF_RECORD_MISC_USER;
+	event->sample.header.size = sizeof(struct perf_event_header);
+
+	if (!pt->timeless_decoding)
+		sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
+
+	sample.ip = ptq->state->from_ip;
+	sample.pid = ptq->pid;
+	sample.tid = ptq->tid;
+	sample.addr = ptq->state->to_ip;
+	sample.id = ptq->pt->transactions_id;
+	sample.stream_id = ptq->pt->transactions_id;
+	sample.period = 1;
+	sample.cpu = ptq->cpu;
+	sample.flags = ptq->flags;
+	sample.insn_len = ptq->insn_len;
+
+	if (pt->synth_opts.callchain) {
+		thread_stack__sample(ptq->thread, ptq->chain,
+				     pt->synth_opts.callchain_sz, sample.ip);
+		sample.callchain = ptq->chain;
+	}
+
+	if (pt->synth_opts.inject) {
+		ret = intel_pt_inject_event(event, &sample,
+					    pt->transactions_sample_type,
+					    pt->synth_needs_swap);
+		if (ret)
+			return ret;
+	}
+
+	ret = perf_session__deliver_synth_event(pt->session, event, &sample);
+	if (ret)
+		pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
+		       ret);
+
+	return ret;
+}
+
+static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
+				pid_t pid, pid_t tid, u64 ip)
+{
+	union perf_event event;
+	char msg[MAX_AUXTRACE_ERROR_MSG];
+	int err;
+
+	intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
+
+	auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
+			     code, cpu, pid, tid, ip, msg);
+
+	err = perf_session__deliver_synth_event(pt->session, &event, NULL);
+	if (err)
+		pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
+		       err);
+
+	return err;
+}
+
+static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
+{
+	struct auxtrace_queue *queue;
+	pid_t tid = ptq->next_tid;
+	int err;
+
+	if (tid == -1)
+		return 0;
+
+	intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
+
+	err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
+
+	queue = &pt->queues.queue_array[ptq->queue_nr];
+	intel_pt_set_pid_tid_cpu(pt, queue);
+
+	ptq->next_tid = -1;
+
+	return err;
+}
+
+static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
+{
+	struct intel_pt *pt = ptq->pt;
+
+	return ip == pt->switch_ip &&
+	       (ptq->flags & PERF_IP_FLAG_BRANCH) &&
+	       !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
+			       PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
+}
+
+static int intel_pt_sample(struct intel_pt_queue *ptq)
+{
+	const struct intel_pt_state *state = ptq->state;
+	struct intel_pt *pt = ptq->pt;
+	int err;
+
+	if (!ptq->have_sample)
+		return 0;
+
+	ptq->have_sample = false;
+
+	if (pt->sample_instructions &&
+	    (state->type & INTEL_PT_INSTRUCTION)) {
+		err = intel_pt_synth_instruction_sample(ptq);
+		if (err)
+			return err;
+	}
+
+	if (pt->sample_transactions &&
+	    (state->type & INTEL_PT_TRANSACTION)) {
+		err = intel_pt_synth_transaction_sample(ptq);
+		if (err)
+			return err;
+	}
+
+	if (!(state->type & INTEL_PT_BRANCH))
+		return 0;
+
+	if (pt->synth_opts.callchain)
+		thread_stack__event(ptq->thread, ptq->flags, state->from_ip,
+				    state->to_ip, ptq->insn_len,
+				    state->trace_nr);
+	else
+		thread_stack__set_trace_nr(ptq->thread, state->trace_nr);
+
+	if (pt->sample_branches) {
+		err = intel_pt_synth_branch_sample(ptq);
+		if (err)
+			return err;
+	}
+
+	if (!pt->sync_switch)
+		return 0;
+
+	if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
+		switch (ptq->switch_state) {
+		case INTEL_PT_SS_UNKNOWN:
+		case INTEL_PT_SS_EXPECTING_SWITCH_IP:
+			err = intel_pt_next_tid(pt, ptq);
+			if (err)
+				return err;
+			ptq->switch_state = INTEL_PT_SS_TRACING;
+			break;
+		default:
+			ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
+			return 1;
+		}
+	} else if (!state->to_ip) {
+		ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
+	} else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
+		ptq->switch_state = INTEL_PT_SS_UNKNOWN;
+	} else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
+		   state->to_ip == pt->ptss_ip &&
+		   (ptq->flags & PERF_IP_FLAG_CALL)) {
+		ptq->switch_state = INTEL_PT_SS_TRACING;
+	}
+
+	return 0;
+}
+
+static u64 intel_pt_switch_ip(struct machine *machine, u64 *ptss_ip)
+{
+	struct map *map;
+	struct symbol *sym, *start;
+	u64 ip, switch_ip = 0;
+
+	if (ptss_ip)
+		*ptss_ip = 0;
+
+	map = machine__kernel_map(machine, MAP__FUNCTION);
+	if (!map)
+		return 0;
+
+	if (map__load(map, machine->symbol_filter))
+		return 0;
+
+	start = dso__first_symbol(map->dso, MAP__FUNCTION);
+
+	for (sym = start; sym; sym = dso__next_symbol(sym)) {
+		if (sym->binding == STB_GLOBAL &&
+		    !strcmp(sym->name, "__switch_to")) {
+			ip = map->unmap_ip(map, sym->start);
+			if (ip >= map->start && ip < map->end) {
+				switch_ip = ip;
+				break;
+			}
+		}
+	}
+
+	if (!switch_ip || !ptss_ip)
+		return 0;
+
+	for (sym = start; sym; sym = dso__next_symbol(sym)) {
+		if (!strcmp(sym->name, "perf_trace_sched_switch")) {
+			ip = map->unmap_ip(map, sym->start);
+			if (ip >= map->start && ip < map->end) {
+				*ptss_ip = ip;
+				break;
+			}
+		}
+	}
+
+	return switch_ip;
+}
+
+static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
+{
+	const struct intel_pt_state *state = ptq->state;
+	struct intel_pt *pt = ptq->pt;
+	int err;
+
+	if (!pt->kernel_start) {
+		pt->kernel_start = machine__kernel_start(pt->machine);
+		if (pt->per_cpu_mmaps && pt->have_sched_switch &&
+		    !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
+		    !pt->sampling_mode) {
+			pt->switch_ip = intel_pt_switch_ip(pt->machine,
+							   &pt->ptss_ip);
+			if (pt->switch_ip) {
+				intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
+					     pt->switch_ip, pt->ptss_ip);
+				pt->sync_switch = true;
+			}
+		}
+	}
+
+	intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
+		     ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
+	while (1) {
+		err = intel_pt_sample(ptq);
+		if (err)
+			return err;
+
+		state = intel_pt_decode(ptq->decoder);
+		if (state->err) {
+			if (state->err == INTEL_PT_ERR_NODATA)
+				return 1;
+			if (pt->sync_switch &&
+			    state->from_ip >= pt->kernel_start) {
+				pt->sync_switch = false;
+				intel_pt_next_tid(pt, ptq);
+			}
+			if (pt->synth_opts.errors) {
+				err = intel_pt_synth_error(pt, state->err,
+							   ptq->cpu, ptq->pid,
+							   ptq->tid,
+							   state->from_ip);
+				if (err)
+					return err;
+			}
+			continue;
+		}
+
+		ptq->state = state;
+		ptq->have_sample = true;
+		intel_pt_sample_flags(ptq);
+
+		/* Use estimated TSC upon return to user space */
+		if (pt->est_tsc &&
+		    (state->from_ip >= pt->kernel_start || !state->from_ip) &&
+		    state->to_ip && state->to_ip < pt->kernel_start) {
+			intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
+				     state->timestamp, state->est_timestamp);
+			ptq->timestamp = state->est_timestamp;
+		/* Use estimated TSC in unknown switch state */
+		} else if (pt->sync_switch &&
+			   ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
+			   intel_pt_is_switch_ip(ptq, state->to_ip) &&
+			   ptq->next_tid == -1) {
+			intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
+				     state->timestamp, state->est_timestamp);
+			ptq->timestamp = state->est_timestamp;
+		} else if (state->timestamp > ptq->timestamp) {
+			ptq->timestamp = state->timestamp;
+		}
+
+		if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
+			*timestamp = ptq->timestamp;
+			return 0;
+		}
+	}
+	return 0;
+}
+
+static inline int intel_pt_update_queues(struct intel_pt *pt)
+{
+	if (pt->queues.new_data) {
+		pt->queues.new_data = false;
+		return intel_pt_setup_queues(pt);
+	}
+	return 0;
+}
+
+static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
+{
+	unsigned int queue_nr;
+	u64 ts;
+	int ret;
+
+	while (1) {
+		struct auxtrace_queue *queue;
+		struct intel_pt_queue *ptq;
+
+		if (!pt->heap.heap_cnt)
+			return 0;
+
+		if (pt->heap.heap_array[0].ordinal >= timestamp)
+			return 0;
+
+		queue_nr = pt->heap.heap_array[0].queue_nr;
+		queue = &pt->queues.queue_array[queue_nr];
+		ptq = queue->priv;
+
+		intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
+			     queue_nr, pt->heap.heap_array[0].ordinal,
+			     timestamp);
+
+		auxtrace_heap__pop(&pt->heap);
+
+		if (pt->heap.heap_cnt) {
+			ts = pt->heap.heap_array[0].ordinal + 1;
+			if (ts > timestamp)
+				ts = timestamp;
+		} else {
+			ts = timestamp;
+		}
+
+		intel_pt_set_pid_tid_cpu(pt, queue);
+
+		ret = intel_pt_run_decoder(ptq, &ts);
+
+		if (ret < 0) {
+			auxtrace_heap__add(&pt->heap, queue_nr, ts);
+			return ret;
+		}
+
+		if (!ret) {
+			ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
+			if (ret < 0)
+				return ret;
+		} else {
+			ptq->on_heap = false;
+		}
+	}
+
+	return 0;
+}
+
+static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
+					    u64 time_)
+{
+	struct auxtrace_queues *queues = &pt->queues;
+	unsigned int i;
+	u64 ts = 0;
+
+	for (i = 0; i < queues->nr_queues; i++) {
+		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
+		struct intel_pt_queue *ptq = queue->priv;
+
+		if (ptq && (tid == -1 || ptq->tid == tid)) {
+			ptq->time = time_;
+			intel_pt_set_pid_tid_cpu(pt, queue);
+			intel_pt_run_decoder(ptq, &ts);
+		}
+	}
+	return 0;
+}
+
+static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
+{
+	return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
+				    sample->pid, sample->tid, 0);
+}
+
+static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
+{
+	unsigned i, j;
+
+	if (cpu < 0 || !pt->queues.nr_queues)
+		return NULL;
+
+	if ((unsigned)cpu >= pt->queues.nr_queues)
+		i = pt->queues.nr_queues - 1;
+	else
+		i = cpu;
+
+	if (pt->queues.queue_array[i].cpu == cpu)
+		return pt->queues.queue_array[i].priv;
+
+	for (j = 0; i > 0; j++) {
+		if (pt->queues.queue_array[--i].cpu == cpu)
+			return pt->queues.queue_array[i].priv;
+	}
+
+	for (; j < pt->queues.nr_queues; j++) {
+		if (pt->queues.queue_array[j].cpu == cpu)
+			return pt->queues.queue_array[j].priv;
+	}
+
+	return NULL;
+}
+
+static int intel_pt_process_switch(struct intel_pt *pt,
+				   struct perf_sample *sample)
+{
+	struct intel_pt_queue *ptq;
+	struct perf_evsel *evsel;
+	pid_t tid;
+	int cpu, err;
+
+	evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
+	if (evsel != pt->switch_evsel)
+		return 0;
+
+	tid = perf_evsel__intval(evsel, sample, "next_pid");
+	cpu = sample->cpu;
+
+	intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
+		     cpu, tid, sample->time, perf_time_to_tsc(sample->time,
+		     &pt->tc));
+
+	if (!pt->sync_switch)
+		goto out;
+
+	ptq = intel_pt_cpu_to_ptq(pt, cpu);
+	if (!ptq)
+		goto out;
+
+	switch (ptq->switch_state) {
+	case INTEL_PT_SS_NOT_TRACING:
+		ptq->next_tid = -1;
+		break;
+	case INTEL_PT_SS_UNKNOWN:
+	case INTEL_PT_SS_TRACING:
+		ptq->next_tid = tid;
+		ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
+		return 0;
+	case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
+		if (!ptq->on_heap) {
+			ptq->timestamp = perf_time_to_tsc(sample->time,
+							  &pt->tc);
+			err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
+						 ptq->timestamp);
+			if (err)
+				return err;
+			ptq->on_heap = true;
+		}
+		ptq->switch_state = INTEL_PT_SS_TRACING;
+		break;
+	case INTEL_PT_SS_EXPECTING_SWITCH_IP:
+		ptq->next_tid = tid;
+		intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
+		break;
+	default:
+		break;
+	}
+out:
+	return machine__set_current_tid(pt->machine, cpu, -1, tid);
+}
+
+static int intel_pt_process_itrace_start(struct intel_pt *pt,
+					 union perf_event *event,
+					 struct perf_sample *sample)
+{
+	if (!pt->per_cpu_mmaps)
+		return 0;
+
+	intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
+		     sample->cpu, event->itrace_start.pid,
+		     event->itrace_start.tid, sample->time,
+		     perf_time_to_tsc(sample->time, &pt->tc));
+
+	return machine__set_current_tid(pt->machine, sample->cpu,
+					event->itrace_start.pid,
+					event->itrace_start.tid);
+}
+
+static int intel_pt_process_event(struct perf_session *session,
+				  union perf_event *event,
+				  struct perf_sample *sample,
+				  struct perf_tool *tool)
+{
+	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
+					   auxtrace);
+	u64 timestamp;
+	int err = 0;
+
+	if (dump_trace)
+		return 0;
+
+	if (!tool->ordered_events) {
+		pr_err("Intel Processor Trace requires ordered events\n");
+		return -EINVAL;
+	}
+
+	if (sample->time && sample->time != (u64)-1)
+		timestamp = perf_time_to_tsc(sample->time, &pt->tc);
+	else
+		timestamp = 0;
+
+	if (timestamp || pt->timeless_decoding) {
+		err = intel_pt_update_queues(pt);
+		if (err)
+			return err;
+	}
+
+	if (pt->timeless_decoding) {
+		if (event->header.type == PERF_RECORD_EXIT) {
+			err = intel_pt_process_timeless_queues(pt,
+							       event->comm.tid,
+							       sample->time);
+		}
+	} else if (timestamp) {
+		err = intel_pt_process_queues(pt, timestamp);
+	}
+	if (err)
+		return err;
+
+	if (event->header.type == PERF_RECORD_AUX &&
+	    (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
+	    pt->synth_opts.errors) {
+		err = intel_pt_lost(pt, sample);
+		if (err)
+			return err;
+	}
+
+	if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
+		err = intel_pt_process_switch(pt, sample);
+	else if (event->header.type == PERF_RECORD_ITRACE_START)
+		err = intel_pt_process_itrace_start(pt, event, sample);
+
+	intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
+		     perf_event__name(event->header.type), event->header.type,
+		     sample->cpu, sample->time, timestamp);
+
+	return err;
+}
+
+static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
+{
+	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
+					   auxtrace);
+	int ret;
+
+	if (dump_trace)
+		return 0;
+
+	if (!tool->ordered_events)
+		return -EINVAL;
+
+	ret = intel_pt_update_queues(pt);
+	if (ret < 0)
+		return ret;
+
+	if (pt->timeless_decoding)
+		return intel_pt_process_timeless_queues(pt, -1,
+							MAX_TIMESTAMP - 1);
+
+	return intel_pt_process_queues(pt, MAX_TIMESTAMP);
+}
+
+static void intel_pt_free_events(struct perf_session *session)
+{
+	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
+					   auxtrace);
+	struct auxtrace_queues *queues = &pt->queues;
+	unsigned int i;
+
+	for (i = 0; i < queues->nr_queues; i++) {
+		intel_pt_free_queue(queues->queue_array[i].priv);
+		queues->queue_array[i].priv = NULL;
+	}
+	intel_pt_log_disable();
+	auxtrace_queues__free(queues);
+}
+
+static void intel_pt_free(struct perf_session *session)
+{
+	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
+					   auxtrace);
+
+	auxtrace_heap__free(&pt->heap);
+	intel_pt_free_events(session);
+	session->auxtrace = NULL;
+	thread__delete(pt->unknown_thread);
+	free(pt);
+}
+
+static int intel_pt_process_auxtrace_event(struct perf_session *session,
+					   union perf_event *event,
+					   struct perf_tool *tool __maybe_unused)
+{
+	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
+					   auxtrace);
+
+	if (pt->sampling_mode)
+		return 0;
+
+	if (!pt->data_queued) {
+		struct auxtrace_buffer *buffer;
+		off_t data_offset;
+		int fd = perf_data_file__fd(session->file);
+		int err;
+
+		if (perf_data_file__is_pipe(session->file)) {
+			data_offset = 0;
+		} else {
+			data_offset = lseek(fd, 0, SEEK_CUR);
+			if (data_offset == -1)
+				return -errno;
+		}
+
+		err = auxtrace_queues__add_event(&pt->queues, session, event,
+						 data_offset, &buffer);
+		if (err)
+			return err;
+
+		/* Dump here now we have copied a piped trace out of the pipe */
+		if (dump_trace) {
+			if (auxtrace_buffer__get_data(buffer, fd)) {
+				intel_pt_dump_event(pt, buffer->data,
+						    buffer->size);
+				auxtrace_buffer__put_data(buffer);
+			}
+		}
+	}
+
+	return 0;
+}
+
+struct intel_pt_synth {
+	struct perf_tool dummy_tool;
+	struct perf_session *session;
+};
+
+static int intel_pt_event_synth(struct perf_tool *tool,
+				union perf_event *event,
+				struct perf_sample *sample __maybe_unused,
+				struct machine *machine __maybe_unused)
+{
+	struct intel_pt_synth *intel_pt_synth =
+			container_of(tool, struct intel_pt_synth, dummy_tool);
+
+	return perf_session__deliver_synth_event(intel_pt_synth->session, event,
+						 NULL);
+}
+
+static int intel_pt_synth_event(struct perf_session *session,
+				struct perf_event_attr *attr, u64 id)
+{
+	struct intel_pt_synth intel_pt_synth;
+
+	memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
+	intel_pt_synth.session = session;
+
+	return perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
+					   &id, intel_pt_event_synth);
+}
+
+static int intel_pt_synth_events(struct intel_pt *pt,
+				 struct perf_session *session)
+{
+	struct perf_evlist *evlist = session->evlist;
+	struct perf_evsel *evsel;
+	struct perf_event_attr attr;
+	bool found = false;
+	u64 id;
+	int err;
+
+	evlist__for_each(evlist, evsel) {
+		if (evsel->attr.type == pt->pmu_type && evsel->ids) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		pr_debug("There are no selected events with Intel Processor Trace data\n");
+		return 0;
+	}
+
+	memset(&attr, 0, sizeof(struct perf_event_attr));
+	attr.size = sizeof(struct perf_event_attr);
+	attr.type = PERF_TYPE_HARDWARE;
+	attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
+	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
+			    PERF_SAMPLE_PERIOD;
+	if (pt->timeless_decoding)
+		attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
+	else
+		attr.sample_type |= PERF_SAMPLE_TIME;
+	if (!pt->per_cpu_mmaps)
+		attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
+	attr.exclude_user = evsel->attr.exclude_user;
+	attr.exclude_kernel = evsel->attr.exclude_kernel;
+	attr.exclude_hv = evsel->attr.exclude_hv;
+	attr.exclude_host = evsel->attr.exclude_host;
+	attr.exclude_guest = evsel->attr.exclude_guest;
+	attr.sample_id_all = evsel->attr.sample_id_all;
+	attr.read_format = evsel->attr.read_format;
+
+	id = evsel->id[0] + 1000000000;
+	if (!id)
+		id = 1;
+
+	if (pt->synth_opts.instructions) {
+		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
+		if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
+			attr.sample_period =
+				intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
+		else
+			attr.sample_period = pt->synth_opts.period;
+		pt->instructions_sample_period = attr.sample_period;
+		if (pt->synth_opts.callchain)
+			attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
+		pr_debug("Synthesizing 'instructions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
+			 id, (u64)attr.sample_type);
+		err = intel_pt_synth_event(session, &attr, id);
+		if (err) {
+			pr_err("%s: failed to synthesize 'instructions' event type\n",
+			       __func__);
+			return err;
+		}
+		pt->sample_instructions = true;
+		pt->instructions_sample_type = attr.sample_type;
+		pt->instructions_id = id;
+		id += 1;
+	}
+
+	if (pt->synth_opts.transactions) {
+		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
+		attr.sample_period = 1;
+		if (pt->synth_opts.callchain)
+			attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
+		pr_debug("Synthesizing 'transactions' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
+			 id, (u64)attr.sample_type);
+		err = intel_pt_synth_event(session, &attr, id);
+		if (err) {
+			pr_err("%s: failed to synthesize 'transactions' event type\n",
+			       __func__);
+			return err;
+		}
+		pt->sample_transactions = true;
+		pt->transactions_id = id;
+		id += 1;
+		evlist__for_each(evlist, evsel) {
+			if (evsel->id && evsel->id[0] == pt->transactions_id) {
+				if (evsel->name)
+					zfree(&evsel->name);
+				evsel->name = strdup("transactions");
+				break;
+			}
+		}
+	}
+
+	if (pt->synth_opts.branches) {
+		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
+		attr.sample_period = 1;
+		attr.sample_type |= PERF_SAMPLE_ADDR;
+		attr.sample_type &= ~(u64)PERF_SAMPLE_CALLCHAIN;
+		pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
+			 id, (u64)attr.sample_type);
+		err = intel_pt_synth_event(session, &attr, id);
+		if (err) {
+			pr_err("%s: failed to synthesize 'branches' event type\n",
+			       __func__);
+			return err;
+		}
+		pt->sample_branches = true;
+		pt->branches_sample_type = attr.sample_type;
+		pt->branches_id = id;
+	}
+
+	pt->synth_needs_swap = evsel->needs_swap;
+
+	return 0;
+}
+
+static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
+{
+	struct perf_evsel *evsel;
+
+	evlist__for_each_reverse(evlist, evsel) {
+		const char *name = perf_evsel__name(evsel);
+
+		if (!strcmp(name, "sched:sched_switch"))
+			return evsel;
+	}
+
+	return NULL;
+}
+
+static const char * const intel_pt_info_fmts[] = {
+	[INTEL_PT_PMU_TYPE]		= "  PMU Type            %"PRId64"\n",
+	[INTEL_PT_TIME_SHIFT]		= "  Time Shift          %"PRIu64"\n",
+	[INTEL_PT_TIME_MULT]		= "  Time Muliplier      %"PRIu64"\n",
+	[INTEL_PT_TIME_ZERO]		= "  Time Zero           %"PRIu64"\n",
+	[INTEL_PT_CAP_USER_TIME_ZERO]	= "  Cap Time Zero       %"PRId64"\n",
+	[INTEL_PT_TSC_BIT]		= "  TSC bit             %#"PRIx64"\n",
+	[INTEL_PT_NORETCOMP_BIT]	= "  NoRETComp bit       %#"PRIx64"\n",
+	[INTEL_PT_HAVE_SCHED_SWITCH]	= "  Have sched_switch   %"PRId64"\n",
+	[INTEL_PT_SNAPSHOT_MODE]	= "  Snapshot mode       %"PRId64"\n",
+	[INTEL_PT_PER_CPU_MMAPS]	= "  Per-cpu maps        %"PRId64"\n",
+	[INTEL_PT_MTC_BIT]		= "  MTC bit             %#"PRIx64"\n",
+	[INTEL_PT_TSC_CTC_N]		= "  TSC:CTC numerator   %"PRIu64"\n",
+	[INTEL_PT_TSC_CTC_D]		= "  TSC:CTC denominator %"PRIu64"\n",
+	[INTEL_PT_CYC_BIT]		= "  CYC bit             %#"PRIx64"\n",
+};
+
+static void intel_pt_print_info(u64 *arr, int start, int finish)
+{
+	int i;
+
+	if (!dump_trace)
+		return;
+
+	for (i = start; i <= finish; i++)
+		fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
+}
+
+int intel_pt_process_auxtrace_info(union perf_event *event,
+				   struct perf_session *session)
+{
+	struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
+	size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
+	struct intel_pt *pt;
+	int err;
+
+	if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
+					min_sz)
+		return -EINVAL;
+
+	pt = zalloc(sizeof(struct intel_pt));
+	if (!pt)
+		return -ENOMEM;
+
+	err = auxtrace_queues__init(&pt->queues);
+	if (err)
+		goto err_free;
+
+	intel_pt_log_set_name(INTEL_PT_PMU_NAME);
+
+	pt->session = session;
+	pt->machine = &session->machines.host; /* No kvm support */
+	pt->auxtrace_type = auxtrace_info->type;
+	pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
+	pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
+	pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
+	pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
+	pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
+	pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
+	pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
+	pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
+	pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
+	pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
+	intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
+			    INTEL_PT_PER_CPU_MMAPS);
+
+	if (auxtrace_info->header.size >= sizeof(struct auxtrace_info_event) +
+					(sizeof(u64) * INTEL_PT_CYC_BIT)) {
+		pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
+		pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
+		pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
+		pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
+		pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
+		intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
+				    INTEL_PT_CYC_BIT);
+	}
+
+	pt->timeless_decoding = intel_pt_timeless_decoding(pt);
+	pt->have_tsc = intel_pt_have_tsc(pt);
+	pt->sampling_mode = false;
+	pt->est_tsc = !pt->timeless_decoding;
+
+	pt->unknown_thread = thread__new(999999999, 999999999);
+	if (!pt->unknown_thread) {
+		err = -ENOMEM;
+		goto err_free_queues;
+	}
+	err = thread__set_comm(pt->unknown_thread, "unknown", 0);
+	if (err)
+		goto err_delete_thread;
+	if (thread__init_map_groups(pt->unknown_thread, pt->machine)) {
+		err = -ENOMEM;
+		goto err_delete_thread;
+	}
+
+	pt->auxtrace.process_event = intel_pt_process_event;
+	pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
+	pt->auxtrace.flush_events = intel_pt_flush;
+	pt->auxtrace.free_events = intel_pt_free_events;
+	pt->auxtrace.free = intel_pt_free;
+	session->auxtrace = &pt->auxtrace;
+
+	if (dump_trace)
+		return 0;
+
+	if (pt->have_sched_switch == 1) {
+		pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
+		if (!pt->switch_evsel) {
+			pr_err("%s: missing sched_switch event\n", __func__);
+			goto err_delete_thread;
+		}
+	}
+
+	if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
+		pt->synth_opts = *session->itrace_synth_opts;
+	} else {
+		itrace_synth_opts__set_default(&pt->synth_opts);
+		if (use_browser != -1) {
+			pt->synth_opts.branches = false;
+			pt->synth_opts.callchain = true;
+		}
+	}
+
+	if (pt->synth_opts.log)
+		intel_pt_log_enable();
+
+	/* Maximum non-turbo ratio is TSC freq / 100 MHz */
+	if (pt->tc.time_mult) {
+		u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
+
+		pt->max_non_turbo_ratio = (tsc_freq + 50000000) / 100000000;
+		intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
+		intel_pt_log("Maximum non-turbo ratio %u\n",
+			     pt->max_non_turbo_ratio);
+	}
+
+	if (pt->synth_opts.calls)
+		pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
+				       PERF_IP_FLAG_TRACE_END;
+	if (pt->synth_opts.returns)
+		pt->branches_filter |= PERF_IP_FLAG_RETURN |
+				       PERF_IP_FLAG_TRACE_BEGIN;
+
+	if (pt->synth_opts.callchain && !symbol_conf.use_callchain) {
+		symbol_conf.use_callchain = true;
+		if (callchain_register_param(&callchain_param) < 0) {
+			symbol_conf.use_callchain = false;
+			pt->synth_opts.callchain = false;
+		}
+	}
+
+	err = intel_pt_synth_events(pt, session);
+	if (err)
+		goto err_delete_thread;
+
+	err = auxtrace_queues__process_index(&pt->queues, session);
+	if (err)
+		goto err_delete_thread;
+
+	if (pt->queues.populated)
+		pt->data_queued = true;
+
+	if (pt->timeless_decoding)
+		pr_debug2("Intel PT decoding without timestamps\n");
+
+	return 0;
+
+err_delete_thread:
+	thread__delete(pt->unknown_thread);
+err_free_queues:
+	intel_pt_log_disable();
+	auxtrace_queues__free(&pt->queues);
+	session->auxtrace = NULL;
+err_free:
+	free(pt);
+	return err;
+}
diff --git a/tools/perf/util/intel-pt.h b/tools/perf/util/intel-pt.h
new file mode 100644
index 0000000..0065949
--- /dev/null
+++ b/tools/perf/util/intel-pt.h
@@ -0,0 +1,56 @@
+/*
+ * intel_pt.h: Intel Processor Trace support
+ * Copyright (c) 2013-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef INCLUDE__PERF_INTEL_PT_H__
+#define INCLUDE__PERF_INTEL_PT_H__
+
+#define INTEL_PT_PMU_NAME "intel_pt"
+
+enum {
+	INTEL_PT_PMU_TYPE,
+	INTEL_PT_TIME_SHIFT,
+	INTEL_PT_TIME_MULT,
+	INTEL_PT_TIME_ZERO,
+	INTEL_PT_CAP_USER_TIME_ZERO,
+	INTEL_PT_TSC_BIT,
+	INTEL_PT_NORETCOMP_BIT,
+	INTEL_PT_HAVE_SCHED_SWITCH,
+	INTEL_PT_SNAPSHOT_MODE,
+	INTEL_PT_PER_CPU_MMAPS,
+	INTEL_PT_MTC_BIT,
+	INTEL_PT_MTC_FREQ_BITS,
+	INTEL_PT_TSC_CTC_N,
+	INTEL_PT_TSC_CTC_D,
+	INTEL_PT_CYC_BIT,
+	INTEL_PT_AUXTRACE_PRIV_MAX,
+};
+
+#define INTEL_PT_AUXTRACE_PRIV_SIZE (INTEL_PT_AUXTRACE_PRIV_MAX * sizeof(u64))
+
+struct auxtrace_record;
+struct perf_tool;
+union perf_event;
+struct perf_session;
+struct perf_event_attr;
+struct perf_pmu;
+
+struct auxtrace_record *intel_pt_recording_init(int *err);
+
+int intel_pt_process_auxtrace_info(union perf_event *event,
+				   struct perf_session *session);
+
+struct perf_event_attr *intel_pt_pmu_default_config(struct perf_pmu *pmu);
+
+#endif
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
new file mode 100644
index 0000000..4f6a478
--- /dev/null
+++ b/tools/perf/util/llvm-utils.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (C) 2015, Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015, Huawei Inc.
+ */
+
+#include <stdio.h>
+#include <sys/utsname.h>
+#include "util.h"
+#include "debug.h"
+#include "llvm-utils.h"
+#include "cache.h"
+
+#define CLANG_BPF_CMD_DEFAULT_TEMPLATE				\
+		"$CLANG_EXEC -D__KERNEL__ $CLANG_OPTIONS "	\
+		"$KERNEL_INC_OPTIONS -Wno-unused-value "	\
+		"-Wno-pointer-sign -working-directory "		\
+		"$WORKING_DIR -c \"$CLANG_SOURCE\" -target bpf -O2 -o -"
+
+struct llvm_param llvm_param = {
+	.clang_path = "clang",
+	.clang_bpf_cmd_template = CLANG_BPF_CMD_DEFAULT_TEMPLATE,
+	.clang_opt = NULL,
+	.kbuild_dir = NULL,
+	.kbuild_opts = NULL,
+	.user_set_param = false,
+};
+
+int perf_llvm_config(const char *var, const char *value)
+{
+	if (prefixcmp(var, "llvm."))
+		return 0;
+	var += sizeof("llvm.") - 1;
+
+	if (!strcmp(var, "clang-path"))
+		llvm_param.clang_path = strdup(value);
+	else if (!strcmp(var, "clang-bpf-cmd-template"))
+		llvm_param.clang_bpf_cmd_template = strdup(value);
+	else if (!strcmp(var, "clang-opt"))
+		llvm_param.clang_opt = strdup(value);
+	else if (!strcmp(var, "kbuild-dir"))
+		llvm_param.kbuild_dir = strdup(value);
+	else if (!strcmp(var, "kbuild-opts"))
+		llvm_param.kbuild_opts = strdup(value);
+	else
+		return -1;
+	llvm_param.user_set_param = true;
+	return 0;
+}
+
+static int
+search_program(const char *def, const char *name,
+	       char *output)
+{
+	char *env, *path, *tmp = NULL;
+	char buf[PATH_MAX];
+	int ret;
+
+	output[0] = '\0';
+	if (def && def[0] != '\0') {
+		if (def[0] == '/') {
+			if (access(def, F_OK) == 0) {
+				strlcpy(output, def, PATH_MAX);
+				return 0;
+			}
+		} else if (def[0] != '\0')
+			name = def;
+	}
+
+	env = getenv("PATH");
+	if (!env)
+		return -1;
+	env = strdup(env);
+	if (!env)
+		return -1;
+
+	ret = -ENOENT;
+	path = strtok_r(env, ":",  &tmp);
+	while (path) {
+		scnprintf(buf, sizeof(buf), "%s/%s", path, name);
+		if (access(buf, F_OK) == 0) {
+			strlcpy(output, buf, PATH_MAX);
+			ret = 0;
+			break;
+		}
+		path = strtok_r(NULL, ":", &tmp);
+	}
+
+	free(env);
+	return ret;
+}
+
+#define READ_SIZE	4096
+static int
+read_from_pipe(const char *cmd, void **p_buf, size_t *p_read_sz)
+{
+	int err = 0;
+	void *buf = NULL;
+	FILE *file = NULL;
+	size_t read_sz = 0, buf_sz = 0;
+
+	file = popen(cmd, "r");
+	if (!file) {
+		pr_err("ERROR: unable to popen cmd: %s\n",
+		       strerror(errno));
+		return -EINVAL;
+	}
+
+	while (!feof(file) && !ferror(file)) {
+		/*
+		 * Make buf_sz always have obe byte extra space so we
+		 * can put '\0' there.
+		 */
+		if (buf_sz - read_sz < READ_SIZE + 1) {
+			void *new_buf;
+
+			buf_sz = read_sz + READ_SIZE + 1;
+			new_buf = realloc(buf, buf_sz);
+
+			if (!new_buf) {
+				pr_err("ERROR: failed to realloc memory\n");
+				err = -ENOMEM;
+				goto errout;
+			}
+
+			buf = new_buf;
+		}
+		read_sz += fread(buf + read_sz, 1, READ_SIZE, file);
+	}
+
+	if (buf_sz - read_sz < 1) {
+		pr_err("ERROR: internal error\n");
+		err = -EINVAL;
+		goto errout;
+	}
+
+	if (ferror(file)) {
+		pr_err("ERROR: error occurred when reading from pipe: %s\n",
+		       strerror(errno));
+		err = -EIO;
+		goto errout;
+	}
+
+	err = WEXITSTATUS(pclose(file));
+	file = NULL;
+	if (err) {
+		err = -EINVAL;
+		goto errout;
+	}
+
+	/*
+	 * If buf is string, give it terminal '\0' to make our life
+	 * easier. If buf is not string, that '\0' is out of space
+	 * indicated by read_sz so caller won't even notice it.
+	 */
+	((char *)buf)[read_sz] = '\0';
+
+	if (!p_buf)
+		free(buf);
+	else
+		*p_buf = buf;
+
+	if (p_read_sz)
+		*p_read_sz = read_sz;
+	return 0;
+
+errout:
+	if (file)
+		pclose(file);
+	free(buf);
+	if (p_buf)
+		*p_buf = NULL;
+	if (p_read_sz)
+		*p_read_sz = 0;
+	return err;
+}
+
+static inline void
+force_set_env(const char *var, const char *value)
+{
+	if (value) {
+		setenv(var, value, 1);
+		pr_debug("set env: %s=%s\n", var, value);
+	} else {
+		unsetenv(var);
+		pr_debug("unset env: %s\n", var);
+	}
+}
+
+static void
+version_notice(void)
+{
+	pr_err(
+"     \tLLVM 3.7 or newer is required. Which can be found from http://llvm.org\n"
+"     \tYou may want to try git trunk:\n"
+"     \t\tgit clone http://llvm.org/git/llvm.git\n"
+"     \t\t     and\n"
+"     \t\tgit clone http://llvm.org/git/clang.git\n\n"
+"     \tOr fetch the latest clang/llvm 3.7 from pre-built llvm packages for\n"
+"     \tdebian/ubuntu:\n"
+"     \t\thttp://llvm.org/apt\n\n"
+"     \tIf you are using old version of clang, change 'clang-bpf-cmd-template'\n"
+"     \toption in [llvm] section of ~/.perfconfig to:\n\n"
+"     \t  \"$CLANG_EXEC $CLANG_OPTIONS $KERNEL_INC_OPTIONS \\\n"
+"     \t     -working-directory $WORKING_DIR -c $CLANG_SOURCE \\\n"
+"     \t     -emit-llvm -o - | /path/to/llc -march=bpf -filetype=obj -o -\"\n"
+"     \t(Replace /path/to/llc with path to your llc)\n\n"
+);
+}
+
+static int detect_kbuild_dir(char **kbuild_dir)
+{
+	const char *test_dir = llvm_param.kbuild_dir;
+	const char *prefix_dir = "";
+	const char *suffix_dir = "";
+
+	char *autoconf_path;
+	struct utsname utsname;
+
+	int err;
+
+	if (!test_dir) {
+		err = uname(&utsname);
+		if (err) {
+			pr_warning("uname failed: %s\n", strerror(errno));
+			return -EINVAL;
+		}
+
+		test_dir = utsname.release;
+		prefix_dir = "/lib/modules/";
+		suffix_dir = "/build";
+	}
+
+	err = asprintf(&autoconf_path, "%s%s%s/include/generated/autoconf.h",
+		       prefix_dir, test_dir, suffix_dir);
+	if (err < 0)
+		return -ENOMEM;
+
+	if (access(autoconf_path, R_OK) == 0) {
+		free(autoconf_path);
+
+		err = asprintf(kbuild_dir, "%s%s%s", prefix_dir, test_dir,
+			       suffix_dir);
+		if (err < 0)
+			return -ENOMEM;
+		return 0;
+	}
+	free(autoconf_path);
+	return -ENOENT;
+}
+
+static const char *kinc_fetch_script =
+"#!/usr/bin/env sh\n"
+"if ! test -d \"$KBUILD_DIR\"\n"
+"then\n"
+"	exit -1\n"
+"fi\n"
+"if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n"
+"then\n"
+"	exit -1\n"
+"fi\n"
+"TMPDIR=`mktemp -d`\n"
+"if test -z \"$TMPDIR\"\n"
+"then\n"
+"    exit -1\n"
+"fi\n"
+"cat << EOF > $TMPDIR/Makefile\n"
+"obj-y := dummy.o\n"
+"\\$(obj)/%.o: \\$(src)/%.c\n"
+"\t@echo -n \"\\$(NOSTDINC_FLAGS) \\$(LINUXINCLUDE) \\$(EXTRA_CFLAGS)\"\n"
+"EOF\n"
+"touch $TMPDIR/dummy.c\n"
+"make -s -C $KBUILD_DIR M=$TMPDIR $KBUILD_OPTS dummy.o 2>/dev/null\n"
+"RET=$?\n"
+"rm -rf $TMPDIR\n"
+"exit $RET\n";
+
+static inline void
+get_kbuild_opts(char **kbuild_dir, char **kbuild_include_opts)
+{
+	int err;
+
+	if (!kbuild_dir || !kbuild_include_opts)
+		return;
+
+	*kbuild_dir = NULL;
+	*kbuild_include_opts = NULL;
+
+	if (llvm_param.kbuild_dir && !llvm_param.kbuild_dir[0]) {
+		pr_debug("[llvm.kbuild-dir] is set to \"\" deliberately.\n");
+		pr_debug("Skip kbuild options detection.\n");
+		return;
+	}
+
+	err = detect_kbuild_dir(kbuild_dir);
+	if (err) {
+		pr_warning(
+"WARNING:\tunable to get correct kernel building directory.\n"
+"Hint:\tSet correct kbuild directory using 'kbuild-dir' option in [llvm]\n"
+"     \tsection of ~/.perfconfig or set it to \"\" to suppress kbuild\n"
+"     \tdetection.\n\n");
+		return;
+	}
+
+	pr_debug("Kernel build dir is set to %s\n", *kbuild_dir);
+	force_set_env("KBUILD_DIR", *kbuild_dir);
+	force_set_env("KBUILD_OPTS", llvm_param.kbuild_opts);
+	err = read_from_pipe(kinc_fetch_script,
+			     (void **)kbuild_include_opts,
+			     NULL);
+	if (err) {
+		pr_warning(
+"WARNING:\tunable to get kernel include directories from '%s'\n"
+"Hint:\tTry set clang include options using 'clang-bpf-cmd-template'\n"
+"     \toption in [llvm] section of ~/.perfconfig and set 'kbuild-dir'\n"
+"     \toption in [llvm] to \"\" to suppress this detection.\n\n",
+			*kbuild_dir);
+
+		free(*kbuild_dir);
+		*kbuild_dir = NULL;
+		return;
+	}
+
+	pr_debug("include option is set to %s\n", *kbuild_include_opts);
+}
+
+int llvm__compile_bpf(const char *path, void **p_obj_buf,
+		      size_t *p_obj_buf_sz)
+{
+	int err;
+	char clang_path[PATH_MAX];
+	const char *clang_opt = llvm_param.clang_opt;
+	const char *template = llvm_param.clang_bpf_cmd_template;
+	char *kbuild_dir = NULL, *kbuild_include_opts = NULL;
+	void *obj_buf = NULL;
+	size_t obj_buf_sz;
+
+	if (!template)
+		template = CLANG_BPF_CMD_DEFAULT_TEMPLATE;
+
+	err = search_program(llvm_param.clang_path,
+			     "clang", clang_path);
+	if (err) {
+		pr_err(
+"ERROR:\tunable to find clang.\n"
+"Hint:\tTry to install latest clang/llvm to support BPF. Check your $PATH\n"
+"     \tand 'clang-path' option in [llvm] section of ~/.perfconfig.\n");
+		version_notice();
+		return -ENOENT;
+	}
+
+	/*
+	 * This is an optional work. Even it fail we can continue our
+	 * work. Needn't to check error return.
+	 */
+	get_kbuild_opts(&kbuild_dir, &kbuild_include_opts);
+
+	force_set_env("CLANG_EXEC", clang_path);
+	force_set_env("CLANG_OPTIONS", clang_opt);
+	force_set_env("KERNEL_INC_OPTIONS", kbuild_include_opts);
+	force_set_env("WORKING_DIR", kbuild_dir ? : ".");
+
+	/*
+	 * Since we may reset clang's working dir, path of source file
+	 * should be transferred into absolute path, except we want
+	 * stdin to be source file (testing).
+	 */
+	force_set_env("CLANG_SOURCE",
+		      (path[0] == '-') ? path :
+		      make_nonrelative_path(path));
+
+	pr_debug("llvm compiling command template: %s\n", template);
+	err = read_from_pipe(template, &obj_buf, &obj_buf_sz);
+	if (err) {
+		pr_err("ERROR:\tunable to compile %s\n", path);
+		pr_err("Hint:\tCheck error message shown above.\n");
+		pr_err("Hint:\tYou can also pre-compile it into .o using:\n");
+		pr_err("     \t\tclang -target bpf -O2 -c %s\n", path);
+		pr_err("     \twith proper -I and -D options.\n");
+		goto errout;
+	}
+
+	free(kbuild_dir);
+	free(kbuild_include_opts);
+	if (!p_obj_buf)
+		free(obj_buf);
+	else
+		*p_obj_buf = obj_buf;
+
+	if (p_obj_buf_sz)
+		*p_obj_buf_sz = obj_buf_sz;
+	return 0;
+errout:
+	free(kbuild_dir);
+	free(kbuild_include_opts);
+	free(obj_buf);
+	if (p_obj_buf)
+		*p_obj_buf = NULL;
+	if (p_obj_buf_sz)
+		*p_obj_buf_sz = 0;
+	return err;
+}
+
+int llvm__search_clang(void)
+{
+	char clang_path[PATH_MAX];
+
+	return search_program(llvm_param.clang_path, "clang", clang_path);
+}
diff --git a/tools/perf/util/llvm-utils.h b/tools/perf/util/llvm-utils.h
new file mode 100644
index 0000000..5b3cf1c
--- /dev/null
+++ b/tools/perf/util/llvm-utils.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015, Wang Nan <wangnan0@huawei.com>
+ * Copyright (C) 2015, Huawei Inc.
+ */
+#ifndef __LLVM_UTILS_H
+#define __LLVM_UTILS_H
+
+#include "debug.h"
+
+struct llvm_param {
+	/* Path of clang executable */
+	const char *clang_path;
+	/*
+	 * Template of clang bpf compiling. 5 env variables
+	 * can be used:
+	 *   $CLANG_EXEC:		Path to clang.
+	 *   $CLANG_OPTIONS:		Extra options to clang.
+	 *   $KERNEL_INC_OPTIONS:	Kernel include directories.
+	 *   $WORKING_DIR:		Kernel source directory.
+	 *   $CLANG_SOURCE:		Source file to be compiled.
+	 */
+	const char *clang_bpf_cmd_template;
+	/* Will be filled in $CLANG_OPTIONS */
+	const char *clang_opt;
+	/* Where to find kbuild system */
+	const char *kbuild_dir;
+	/*
+	 * Arguments passed to make, like 'ARCH=arm' if doing cross
+	 * compiling. Should not be used for dynamic compiling.
+	 */
+	const char *kbuild_opts;
+	/*
+	 * Default is false. If one of the above fields is set by user
+	 * explicitly then user_set_llvm is set to true. This is used
+	 * for perf test. If user doesn't set anything in .perfconfig
+	 * and clang is not found, don't trigger llvm test.
+	 */
+	bool user_set_param;
+};
+
+extern struct llvm_param llvm_param;
+extern int perf_llvm_config(const char *var, const char *value);
+
+extern int llvm__compile_bpf(const char *path, void **p_obj_buf,
+			     size_t *p_obj_buf_sz);
+
+/* This function is for test__llvm() use only */
+extern int llvm__search_clang(void);
+#endif
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index f1a4c83..6309f7c 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -250,7 +250,7 @@
 			static struct strlist *seen;
 
 			if (!seen)
-				seen = strlist__new(true, NULL);
+				seen = strlist__new(NULL, NULL);
 
 			if (!strlist__has_entry(seen, path)) {
 				pr_err("Can't access file %s\n", path);
@@ -550,6 +550,14 @@
 	return 0;
 }
 
+int machine__process_switch_event(struct machine *machine __maybe_unused,
+				  union perf_event *event)
+{
+	if (dump_trace)
+		perf_event__fprintf_switch(event, stdout);
+	return 0;
+}
+
 struct map *machine__findnew_module_map(struct machine *machine, u64 start,
 					const char *filename)
 {
@@ -1467,6 +1475,9 @@
 		ret = machine__process_itrace_start_event(machine, event); break;
 	case PERF_RECORD_LOST_SAMPLES:
 		ret = machine__process_lost_samples_event(machine, event, sample); break;
+	case PERF_RECORD_SWITCH:
+	case PERF_RECORD_SWITCH_CPU_WIDE:
+		ret = machine__process_switch_event(machine, event); break;
 	default:
 		ret = -1;
 		break;
@@ -2009,3 +2020,17 @@
 {
 	return dsos__findnew(&machine->dsos, filename);
 }
+
+char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
+{
+	struct machine *machine = vmachine;
+	struct map *map;
+	struct symbol *sym = map_groups__find_symbol(&machine->kmaps, MAP__FUNCTION, *addrp, &map,  NULL);
+
+	if (sym == NULL)
+		return NULL;
+
+	*modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
+	*addrp = map->unmap_ip(map, sym->start);
+	return sym->name;
+}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 887798e..ea5cb4a 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -87,6 +87,8 @@
 			       union perf_event *event);
 int machine__process_itrace_start_event(struct machine *machine,
 					union perf_event *event);
+int machine__process_switch_event(struct machine *machine __maybe_unused,
+				  union perf_event *event);
 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
 				struct perf_sample *sample);
 int machine__process_mmap2_event(struct machine *machine, union perf_event *event,
@@ -237,5 +239,9 @@
 pid_t machine__get_current_tid(struct machine *machine, int cpu);
 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
 			     pid_t tid);
+/*
+ * For use with libtraceevent's pevent_set_function_resolver()
+ */
+char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp);
 
 #endif /* __PERF_MACHINE_H */
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index b5a5e9c..b1c475d 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -224,6 +224,20 @@
 	return map;
 }
 
+/*
+ * Use this and __map__is_kmodule() for map instances that are in
+ * machine->kmaps, and thus have map->groups->machine all properly set, to
+ * disambiguate between the kernel and modules.
+ *
+ * When the need arises, introduce map__is_{kernel,kmodule)() that
+ * checks (map->groups != NULL && map->groups->machine != NULL &&
+ * map->dso->kernel) before calling __map__is_{kernel,kmodule}())
+ */
+bool __map__is_kernel(const struct map *map)
+{
+	return map->groups->machine->vmlinux_maps[map->type] == map;
+}
+
 static void map__exit(struct map *map)
 {
 	BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
@@ -334,9 +348,18 @@
 	return dso__find_symbol_by_name(map->dso, map->type, name);
 }
 
-struct map *map__clone(struct map *map)
+struct map *map__clone(struct map *from)
 {
-	return memdup(map, sizeof(*map));
+	struct map *map = memdup(from, sizeof(*map));
+
+	if (map != NULL) {
+		atomic_set(&map->refcnt, 1);
+		RB_CLEAR_NODE(&map->rb_node);
+		dso__get(map->dso);
+		map->groups = NULL;
+	}
+
+	return map;
 }
 
 int map__overlap(struct map *l, struct map *r)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index d73e687..57829e8 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -256,4 +256,11 @@
 struct map *map_groups__find_by_name(struct map_groups *mg,
 				     enum map_type type, const char *name);
 
+bool __map__is_kernel(const struct map *map);
+
+static inline bool __map__is_kmodule(const struct map *map)
+{
+	return !__map__is_kernel(map);
+}
+
 #endif /* __PERF_MAP_H */
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index 52be201..b1b9e23 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -220,6 +220,9 @@
 	else if (last_ts <= limit)
 		oe->last = list_entry(head->prev, struct ordered_event, list);
 
+	if (show_progress)
+		ui_progress__finish();
+
 	return 0;
 }
 
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 09f8d23..d826e6f 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -276,7 +276,8 @@
 static struct perf_evsel *
 __add_event(struct list_head *list, int *idx,
 	    struct perf_event_attr *attr,
-	    char *name, struct cpu_map *cpus)
+	    char *name, struct cpu_map *cpus,
+	    struct list_head *config_terms)
 {
 	struct perf_evsel *evsel;
 
@@ -291,14 +292,19 @@
 
 	if (name)
 		evsel->name = strdup(name);
+
+	if (config_terms)
+		list_splice(config_terms, &evsel->config_terms);
+
 	list_add_tail(&evsel->node, list);
 	return evsel;
 }
 
 static int add_event(struct list_head *list, int *idx,
-		     struct perf_event_attr *attr, char *name)
+		     struct perf_event_attr *attr, char *name,
+		     struct list_head *config_terms)
 {
-	return __add_event(list, idx, attr, name, NULL) ? 0 : -ENOMEM;
+	return __add_event(list, idx, attr, name, NULL, config_terms) ? 0 : -ENOMEM;
 }
 
 static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
@@ -377,7 +383,7 @@
 	memset(&attr, 0, sizeof(attr));
 	attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
 	attr.type = PERF_TYPE_HW_CACHE;
-	return add_event(list, idx, &attr, name);
+	return add_event(list, idx, &attr, name, NULL);
 }
 
 static int add_tracepoint(struct list_head *list, int *idx,
@@ -539,7 +545,7 @@
 	attr.type = PERF_TYPE_BREAKPOINT;
 	attr.sample_period = 1;
 
-	return add_event(list, idx, &attr, NULL);
+	return add_event(list, idx, &attr, NULL, NULL);
 }
 
 static int check_type_val(struct parse_events_term *term,
@@ -590,7 +596,9 @@
 		break;
 	case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
 		CHECK_TYPE_VAL(NUM);
-		attr->sample_period = term->val.num;
+		break;
+	case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
+		CHECK_TYPE_VAL(NUM);
 		break;
 	case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
 		/*
@@ -598,6 +606,20 @@
 		 * attr->branch_sample_type = term->val.num;
 		 */
 		break;
+	case PARSE_EVENTS__TERM_TYPE_TIME:
+		CHECK_TYPE_VAL(NUM);
+		if (term->val.num > 1) {
+			err->str = strdup("expected 0 or 1");
+			err->idx = term->err_val;
+			return -EINVAL;
+		}
+		break;
+	case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
+		CHECK_TYPE_VAL(STR);
+		break;
+	case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
+		CHECK_TYPE_VAL(NUM);
+		break;
 	case PARSE_EVENTS__TERM_TYPE_NAME:
 		CHECK_TYPE_VAL(STR);
 		break;
@@ -622,22 +644,71 @@
 	return 0;
 }
 
+static int get_config_terms(struct list_head *head_config,
+			    struct list_head *head_terms __maybe_unused)
+{
+#define ADD_CONFIG_TERM(__type, __name, __val)			\
+do {								\
+	struct perf_evsel_config_term *__t;			\
+								\
+	__t = zalloc(sizeof(*__t));				\
+	if (!__t)						\
+		return -ENOMEM;					\
+								\
+	INIT_LIST_HEAD(&__t->list);				\
+	__t->type       = PERF_EVSEL__CONFIG_TERM_ ## __type;	\
+	__t->val.__name = __val;				\
+	list_add_tail(&__t->list, head_terms);			\
+} while (0)
+
+	struct parse_events_term *term;
+
+	list_for_each_entry(term, head_config, list) {
+		switch (term->type_term) {
+		case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
+			ADD_CONFIG_TERM(PERIOD, period, term->val.num);
+			break;
+		case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
+			ADD_CONFIG_TERM(FREQ, freq, term->val.num);
+			break;
+		case PARSE_EVENTS__TERM_TYPE_TIME:
+			ADD_CONFIG_TERM(TIME, time, term->val.num);
+			break;
+		case PARSE_EVENTS__TERM_TYPE_CALLGRAPH:
+			ADD_CONFIG_TERM(CALLGRAPH, callgraph, term->val.str);
+			break;
+		case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
+			ADD_CONFIG_TERM(STACK_USER, stack_user, term->val.num);
+			break;
+		default:
+			break;
+		}
+	}
+#undef ADD_EVSEL_CONFIG
+	return 0;
+}
+
 int parse_events_add_numeric(struct parse_events_evlist *data,
 			     struct list_head *list,
 			     u32 type, u64 config,
 			     struct list_head *head_config)
 {
 	struct perf_event_attr attr;
+	LIST_HEAD(config_terms);
 
 	memset(&attr, 0, sizeof(attr));
 	attr.type = type;
 	attr.config = config;
 
-	if (head_config &&
-	    config_attr(&attr, head_config, data->error))
-		return -EINVAL;
+	if (head_config) {
+		if (config_attr(&attr, head_config, data->error))
+			return -EINVAL;
 
-	return add_event(list, &data->idx, &attr, NULL);
+		if (get_config_terms(head_config, &config_terms))
+			return -ENOMEM;
+	}
+
+	return add_event(list, &data->idx, &attr, NULL, &config_terms);
 }
 
 static int parse_events__is_name_term(struct parse_events_term *term)
@@ -664,6 +735,7 @@
 	struct perf_pmu_info info;
 	struct perf_pmu *pmu;
 	struct perf_evsel *evsel;
+	LIST_HEAD(config_terms);
 
 	pmu = perf_pmu__find(name);
 	if (!pmu)
@@ -678,7 +750,7 @@
 
 	if (!head_config) {
 		attr.type = pmu->type;
-		evsel = __add_event(list, &data->idx, &attr, NULL, pmu->cpus);
+		evsel = __add_event(list, &data->idx, &attr, NULL, pmu->cpus, NULL);
 		return evsel ? 0 : -ENOMEM;
 	}
 
@@ -692,11 +764,15 @@
 	if (config_attr(&attr, head_config, data->error))
 		return -EINVAL;
 
+	if (get_config_terms(head_config, &config_terms))
+		return -ENOMEM;
+
 	if (perf_pmu__config(pmu, &attr, head_config, data->error))
 		return -EINVAL;
 
 	evsel = __add_event(list, &data->idx, &attr,
-			    pmu_event_name(head_config), pmu->cpus);
+			    pmu_event_name(head_config), pmu->cpus,
+			    &config_terms);
 	if (evsel) {
 		evsel->unit = info.unit;
 		evsel->scale = info.scale;
@@ -1065,8 +1141,13 @@
 	perf_pmu__parse_cleanup();
 	if (!ret) {
 		int entries = data.idx - evlist->nr_entries;
+		struct perf_evsel *last;
+
 		perf_evlist__splice_list_tail(evlist, &data.list, entries);
 		evlist->nr_groups += data.nr_groups;
+		last = perf_evlist__last(evlist);
+		last->cmdline_group_boundary = true;
+
 		return 0;
 	}
 
@@ -1105,7 +1186,7 @@
 		 * Maximum error index indent, we will cut
 		 * the event string if it's bigger.
 		 */
-		int max_err_idx = 10;
+		int max_err_idx = 13;
 
 		/*
 		 * Let's be specific with the message when
@@ -1162,30 +1243,93 @@
 	return ret;
 }
 
-int parse_filter(const struct option *opt, const char *str,
-		 int unset __maybe_unused)
+static int
+foreach_evsel_in_last_glob(struct perf_evlist *evlist,
+			   int (*func)(struct perf_evsel *evsel,
+				       const void *arg),
+			   const void *arg)
 {
-	struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
 	struct perf_evsel *last = NULL;
+	int err;
 
 	if (evlist->nr_entries > 0)
 		last = perf_evlist__last(evlist);
 
-	if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
+	do {
+		err = (*func)(last, arg);
+		if (err)
+			return -1;
+		if (!last)
+			return 0;
+
+		if (last->node.prev == &evlist->entries)
+			return 0;
+		last = list_entry(last->node.prev, struct perf_evsel, node);
+	} while (!last->cmdline_group_boundary);
+
+	return 0;
+}
+
+static int set_filter(struct perf_evsel *evsel, const void *arg)
+{
+	const char *str = arg;
+
+	if (evsel == NULL || evsel->attr.type != PERF_TYPE_TRACEPOINT) {
 		fprintf(stderr,
 			"--filter option should follow a -e tracepoint option\n");
 		return -1;
 	}
 
-	last->filter = strdup(str);
-	if (last->filter == NULL) {
-		fprintf(stderr, "not enough memory to hold filter string\n");
+	if (perf_evsel__append_filter(evsel, "&&", str) < 0) {
+		fprintf(stderr,
+			"not enough memory to hold filter string\n");
 		return -1;
 	}
 
 	return 0;
 }
 
+int parse_filter(const struct option *opt, const char *str,
+		 int unset __maybe_unused)
+{
+	struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
+
+	return foreach_evsel_in_last_glob(evlist, set_filter,
+					  (const void *)str);
+}
+
+static int add_exclude_perf_filter(struct perf_evsel *evsel,
+				   const void *arg __maybe_unused)
+{
+	char new_filter[64];
+
+	if (evsel == NULL || evsel->attr.type != PERF_TYPE_TRACEPOINT) {
+		fprintf(stderr,
+			"--exclude-perf option should follow a -e tracepoint option\n");
+		return -1;
+	}
+
+	snprintf(new_filter, sizeof(new_filter), "common_pid != %d", getpid());
+
+	if (perf_evsel__append_filter(evsel, "&&", new_filter) < 0) {
+		fprintf(stderr,
+			"not enough memory to hold filter string\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+int exclude_perf(const struct option *opt,
+		 const char *arg __maybe_unused,
+		 int unset __maybe_unused)
+{
+	struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
+
+	return foreach_evsel_in_last_glob(evlist, add_exclude_perf_filter,
+					  NULL);
+}
+
 static const char * const event_type_descriptors[] = {
 	"Hardware event",
 	"Software event",
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 131f29b..a09b0e2 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -34,6 +34,7 @@
 			struct parse_events_error *error);
 extern int parse_events_terms(struct list_head *terms, const char *str);
 extern int parse_filter(const struct option *opt, const char *str, int unset);
+extern int exclude_perf(const struct option *opt, const char *arg, int unset);
 
 #define EVENTS_HELP_MAX (128*1024)
 
@@ -61,7 +62,11 @@
 	PARSE_EVENTS__TERM_TYPE_CONFIG2,
 	PARSE_EVENTS__TERM_TYPE_NAME,
 	PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD,
+	PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ,
 	PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE,
+	PARSE_EVENTS__TERM_TYPE_TIME,
+	PARSE_EVENTS__TERM_TYPE_CALLGRAPH,
+	PARSE_EVENTS__TERM_TYPE_STACKSIZE,
 };
 
 struct parse_events_term {
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 13cef3c..936d566 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -182,7 +182,11 @@
 config2			{ return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); }
 name			{ return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); }
 period			{ return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
+freq			{ return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ); }
 branch_type		{ return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
+time			{ return term(yyscanner, PARSE_EVENTS__TERM_TYPE_TIME); }
+call-graph		{ return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CALLGRAPH); }
+stack-size		{ return term(yyscanner, PARSE_EVENTS__TERM_TYPE_STACKSIZE); }
 ,			{ return ','; }
 "/"			{ BEGIN(INITIAL); return '/'; }
 {name_minus}		{ return str(yyscanner, PE_NAME); }
diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c
new file mode 100644
index 0000000..4f2c1c2
--- /dev/null
+++ b/tools/perf/util/parse-regs-options.c
@@ -0,0 +1,71 @@
+#include "perf.h"
+#include "util/util.h"
+#include "util/debug.h"
+#include "util/parse-options.h"
+#include "util/parse-regs-options.h"
+
+int
+parse_regs(const struct option *opt, const char *str, int unset)
+{
+	uint64_t *mode = (uint64_t *)opt->value;
+	const struct sample_reg *r;
+	char *s, *os = NULL, *p;
+	int ret = -1;
+
+	if (unset)
+		return 0;
+
+	/*
+	 * cannot set it twice
+	 */
+	if (*mode)
+		return -1;
+
+	/* str may be NULL in case no arg is passed to -I */
+	if (str) {
+		/* because str is read-only */
+		s = os = strdup(str);
+		if (!s)
+			return -1;
+
+		for (;;) {
+			p = strchr(s, ',');
+			if (p)
+				*p = '\0';
+
+			if (!strcmp(s, "?")) {
+				fprintf(stderr, "available registers: ");
+				for (r = sample_reg_masks; r->name; r++) {
+					fprintf(stderr, "%s ", r->name);
+				}
+				fputc('\n', stderr);
+				/* just printing available regs */
+				return -1;
+			}
+			for (r = sample_reg_masks; r->name; r++) {
+				if (!strcasecmp(s, r->name))
+					break;
+			}
+			if (!r->name) {
+				ui__warning("unknown register %s,"
+					    " check man page\n", s);
+				goto error;
+			}
+
+			*mode |= r->mask;
+
+			if (!p)
+				break;
+
+			s = p + 1;
+		}
+	}
+	ret = 0;
+
+	/* default to all possible regs */
+	if (*mode == 0)
+		*mode = PERF_REGS_MASK;
+error:
+	free(os);
+	return ret;
+}
diff --git a/tools/perf/util/parse-regs-options.h b/tools/perf/util/parse-regs-options.h
new file mode 100644
index 0000000..7d762b1
--- /dev/null
+++ b/tools/perf/util/parse-regs-options.h
@@ -0,0 +1,5 @@
+#ifndef _PERF_PARSE_REGS_OPTIONS_H
+#define _PERF_PARSE_REGS_OPTIONS_H 1
+struct option;
+int parse_regs(const struct option *opt, const char *str, int unset);
+#endif /* _PERF_PARSE_REGS_OPTIONS_H */
diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c
index 43168fb..885e8ac 100644
--- a/tools/perf/util/perf_regs.c
+++ b/tools/perf/util/perf_regs.c
@@ -2,6 +2,10 @@
 #include "perf_regs.h"
 #include "event.h"
 
+const struct sample_reg __weak sample_reg_masks[] = {
+	SMPL_REG_END
+};
+
 int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
 {
 	int i, idx = 0;
diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h
index 980dbf7..2984dcc 100644
--- a/tools/perf/util/perf_regs.h
+++ b/tools/perf/util/perf_regs.h
@@ -5,6 +5,15 @@
 
 struct regs_dump;
 
+struct sample_reg {
+	const char *name;
+	uint64_t mask;
+};
+#define SMPL_REG(n, b) { .name = #n, .mask = 1ULL << (b) }
+#define SMPL_REG_END { .name = NULL }
+
+extern const struct sample_reg sample_reg_masks[];
+
 #ifdef HAVE_PERF_REGS_SUPPORT
 #include <perf_regs.h>
 
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 7bcb8c3..89c91a1 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -462,10 +462,6 @@
 	LIST_HEAD(aliases);
 	__u32 type;
 
-	/* No support for intel_bts or intel_pt so disallow them */
-	if (!strcmp(name, "intel_bts") || !strcmp(name, "intel_pt"))
-		return NULL;
-
 	/*
 	 * The pmu data we store & need consists of the pmu
 	 * type value and format definitions. Load both right
@@ -542,7 +538,7 @@
 }
 
 static struct perf_pmu_format *
-pmu_find_format(struct list_head *formats, char *name)
+pmu_find_format(struct list_head *formats, const char *name)
 {
 	struct perf_pmu_format *format;
 
@@ -553,6 +549,21 @@
 	return NULL;
 }
 
+__u64 perf_pmu__format_bits(struct list_head *formats, const char *name)
+{
+	struct perf_pmu_format *format = pmu_find_format(formats, name);
+	__u64 bits = 0;
+	int fbit;
+
+	if (!format)
+		return 0;
+
+	for_each_set_bit(fbit, format->bits, PERF_PMU_FORMAT_BITS)
+		bits |= 1ULL << fbit;
+
+	return bits;
+}
+
 /*
  * Sets value based on the format definition (format parameter)
  * and unformated value (value parameter).
@@ -574,6 +585,18 @@
 	}
 }
 
+static __u64 pmu_format_max_value(const unsigned long *format)
+{
+	int w;
+
+	w = bitmap_weight(format, PERF_PMU_FORMAT_BITS);
+	if (!w)
+		return 0;
+	if (w < 64)
+		return (1ULL << w) - 1;
+	return -1;
+}
+
 /*
  * Term is a string term, and might be a param-term. Try to look up it's value
  * in the remaining terms.
@@ -607,7 +630,9 @@
 {
 	struct perf_pmu_format *format;
 	char *err, *str;
-	static const char *static_terms = "config,config1,config2,name,period,branch_type\n";
+	static const char *static_terms = "config,config1,config2,name,"
+					  "period,freq,branch_type,time,"
+					  "call-graph,stack-size\n";
 	unsigned i = 0;
 
 	if (!asprintf(&str, "valid terms:"))
@@ -647,7 +672,7 @@
 {
 	struct perf_pmu_format *format;
 	__u64 *vp;
-	__u64 val;
+	__u64 val, max_val;
 
 	/*
 	 * If this is a parameter we've already used for parameterized-eval,
@@ -713,6 +738,22 @@
 	} else
 		return -EINVAL;
 
+	max_val = pmu_format_max_value(format->bits);
+	if (val > max_val) {
+		if (err) {
+			err->idx = term->err_val;
+			if (asprintf(&err->str,
+				     "value too big for format, maximum is %llu",
+				     (unsigned long long)max_val) < 0)
+				err->str = strdup("value too big for format");
+			return -EINVAL;
+		}
+		/*
+		 * Assume we don't care if !err, in which case the value will be
+		 * silently truncated.
+		 */
+	}
+
 	pmu_format_value(format->bits, val, vp, zero);
 	return 0;
 }
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 7b9c8cf..5d7e844 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -54,6 +54,7 @@
 			   struct perf_event_attr *attr,
 			   struct list_head *head_terms,
 			   bool zero, struct parse_events_error *error);
+__u64 perf_pmu__format_bits(struct list_head *formats, const char *name);
 int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
 			  struct perf_pmu_info *info);
 struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 381f23a..eb5f18b 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -45,6 +45,7 @@
 #include "trace-event.h"	/* For __maybe_unused */
 #include "probe-event.h"
 #include "probe-finder.h"
+#include "probe-file.h"
 #include "session.h"
 
 #define MAX_CMDLEN 256
@@ -55,11 +56,7 @@
 
 #define semantic_error(msg ...) pr_err("Semantic error :" msg)
 
-/* If there is no space to write, returns -E2BIG. */
-static int e_snprintf(char *str, size_t size, const char *format, ...)
-	__attribute__((format(printf, 3, 4)));
-
-static int e_snprintf(char *str, size_t size, const char *format, ...)
+int e_snprintf(char *str, size_t size, const char *format, ...)
 {
 	int ret;
 	va_list ap;
@@ -72,7 +69,6 @@
 }
 
 static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
-static void clear_probe_trace_event(struct probe_trace_event *tev);
 static struct machine *host_machine;
 
 /* Initialize symbol maps and path of vmlinux/modules */
@@ -519,7 +515,7 @@
 		if (ret < 0)
 			goto error;
 		addr += stext;
-	} else {
+	} else if (tp->symbol) {
 		addr = kernel_get_symbol_address_by_name(tp->symbol, false);
 		if (addr == 0)
 			goto error;
@@ -709,9 +705,10 @@
 	}
 	/* Error path : ntevs < 0 */
 	pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
-	if (ntevs == -EBADF) {
-		pr_warning("Warning: No dwarf info found in the vmlinux - "
-			"please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
+	if (ntevs < 0) {
+		if (ntevs == -EBADF)
+			pr_warning("Warning: No dwarf info found in the vmlinux - "
+				"please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
 		if (!need_dwarf) {
 			pr_debug("Trying to use symbols.\n");
 			return 0;
@@ -1197,15 +1194,37 @@
 		*ptr++ = '\0';
 	}
 
-	tmp = strdup(arg);
-	if (tmp == NULL)
-		return -ENOMEM;
+	if (arg[0] == '\0')
+		tmp = NULL;
+	else {
+		tmp = strdup(arg);
+		if (tmp == NULL)
+			return -ENOMEM;
+	}
 
 	if (file_spec)
 		pp->file = tmp;
-	else
+	else {
 		pp->function = tmp;
 
+		/*
+		 * Keep pp->function even if this is absolute address,
+		 * so it can mark whether abs_address is valid.
+		 * Which make 'perf probe lib.bin 0x0' possible.
+		 *
+		 * Note that checking length of tmp is not needed
+		 * because when we access tmp[1] we know tmp[0] is '0',
+		 * so tmp[1] should always valid (but could be '\0').
+		 */
+		if (tmp && !strncmp(tmp, "0x", 2)) {
+			pp->abs_address = strtoul(pp->function, &tmp, 0);
+			if (*tmp != '\0') {
+				semantic_error("Invalid absolute address.\n");
+				return -EINVAL;
+			}
+		}
+	}
+
 	/* Parse other options */
 	while (ptr) {
 		arg = ptr;
@@ -1467,8 +1486,7 @@
 }
 
 /* Parse probe_events event into struct probe_point */
-static int parse_probe_trace_command(const char *cmd,
-				     struct probe_trace_event *tev)
+int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev)
 {
 	struct probe_trace_point *tp = &tev->point;
 	char pr;
@@ -1523,9 +1541,31 @@
 	} else
 		p = argv[1];
 	fmt1_str = strtok_r(p, "+", &fmt);
-	if (fmt1_str[0] == '0')	/* only the address started with 0x */
-		tp->address = strtoul(fmt1_str, NULL, 0);
-	else {
+	/* only the address started with 0x */
+	if (fmt1_str[0] == '0')	{
+		/*
+		 * Fix a special case:
+		 * if address == 0, kernel reports something like:
+		 * p:probe_libc/abs_0 /lib/libc-2.18.so:0x          (null) arg1=%ax
+		 * Newer kernel may fix that, but we want to
+		 * support old kernel also.
+		 */
+		if (strcmp(fmt1_str, "0x") == 0) {
+			if (!argv[2] || strcmp(argv[2], "(null)")) {
+				ret = -EINVAL;
+				goto out;
+			}
+			tp->address = 0;
+
+			free(argv[2]);
+			for (i = 2; argv[i + 1] != NULL; i++)
+				argv[i] = argv[i + 1];
+
+			argv[i] = NULL;
+			argc -= 1;
+		} else
+			tp->address = strtoul(fmt1_str, NULL, 0);
+	} else {
 		/* Only the symbol-based probe has offset */
 		tp->symbol = strdup(fmt1_str);
 		if (tp->symbol == NULL) {
@@ -1782,14 +1822,29 @@
 	if (len <= 0)
 		goto error;
 
-	/* Uprobes must have tp->address and tp->module */
-	if (tev->uprobes && (!tp->address || !tp->module))
+	/* Uprobes must have tp->module */
+	if (tev->uprobes && !tp->module)
 		goto error;
+	/*
+	 * If tp->address == 0, then this point must be a
+	 * absolute address uprobe.
+	 * try_to_find_absolute_address() should have made
+	 * tp->symbol to "0x0".
+	 */
+	if (tev->uprobes && !tp->address) {
+		if (!tp->symbol || strcmp(tp->symbol, "0x0"))
+			goto error;
+	}
 
 	/* Use the tp->address for uprobes */
 	if (tev->uprobes)
 		ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s:0x%lx",
 				 tp->module, tp->address);
+	else if (!strncmp(tp->symbol, "0x", 2))
+		/* Absolute address. See try_to_find_absolute_address() */
+		ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s%s0x%lx",
+				 tp->module ?: "", tp->module ? ":" : "",
+				 tp->address);
 	else
 		ret = e_snprintf(buf + len, MAX_CMDLEN - len, "%s%s%s+%lu",
 				 tp->module ?: "", tp->module ? ":" : "",
@@ -1819,17 +1874,17 @@
 {
 	struct symbol *sym = NULL;
 	struct map *map;
-	u64 addr;
+	u64 addr = tp->address;
 	int ret = -ENOENT;
 
 	if (!is_kprobe) {
 		map = dso__new_map(tp->module);
 		if (!map)
 			goto out;
-		addr = tp->address;
 		sym = map__find_symbol(map, addr, NULL);
 	} else {
-		addr = kernel_get_symbol_address_by_name(tp->symbol, true);
+		if (tp->symbol)
+			addr = kernel_get_symbol_address_by_name(tp->symbol, true);
 		if (addr) {
 			addr += tp->offset;
 			sym = __find_kernel_function(addr, &map);
@@ -1852,8 +1907,8 @@
 }
 
 static int convert_to_perf_probe_point(struct probe_trace_point *tp,
-					struct perf_probe_point *pp,
-					bool is_kprobe)
+				       struct perf_probe_point *pp,
+				       bool is_kprobe)
 {
 	char buf[128];
 	int ret;
@@ -1870,7 +1925,7 @@
 	if (tp->symbol) {
 		pp->function = strdup(tp->symbol);
 		pp->offset = tp->offset;
-	} else if (!tp->module && !is_kprobe) {
+	} else {
 		ret = e_snprintf(buf, 128, "0x%" PRIx64, (u64)tp->address);
 		if (ret < 0)
 			return ret;
@@ -1951,7 +2006,7 @@
 	memset(pev, 0, sizeof(*pev));
 }
 
-static void clear_probe_trace_event(struct probe_trace_event *tev)
+void clear_probe_trace_event(struct probe_trace_event *tev)
 {
 	struct probe_trace_arg_ref *ref, *next;
 	int i;
@@ -1976,119 +2031,6 @@
 	memset(tev, 0, sizeof(*tev));
 }
 
-static void print_open_warning(int err, bool is_kprobe)
-{
-	char sbuf[STRERR_BUFSIZE];
-
-	if (err == -ENOENT) {
-		const char *config;
-
-		if (!is_kprobe)
-			config = "CONFIG_UPROBE_EVENTS";
-		else
-			config = "CONFIG_KPROBE_EVENTS";
-
-		pr_warning("%cprobe_events file does not exist"
-			   " - please rebuild kernel with %s.\n",
-			   is_kprobe ? 'k' : 'u', config);
-	} else if (err == -ENOTSUP)
-		pr_warning("Tracefs or debugfs is not mounted.\n");
-	else
-		pr_warning("Failed to open %cprobe_events: %s\n",
-			   is_kprobe ? 'k' : 'u',
-			   strerror_r(-err, sbuf, sizeof(sbuf)));
-}
-
-static void print_both_open_warning(int kerr, int uerr)
-{
-	/* Both kprobes and uprobes are disabled, warn it. */
-	if (kerr == -ENOTSUP && uerr == -ENOTSUP)
-		pr_warning("Tracefs or debugfs is not mounted.\n");
-	else if (kerr == -ENOENT && uerr == -ENOENT)
-		pr_warning("Please rebuild kernel with CONFIG_KPROBE_EVENTS "
-			   "or/and CONFIG_UPROBE_EVENTS.\n");
-	else {
-		char sbuf[STRERR_BUFSIZE];
-		pr_warning("Failed to open kprobe events: %s.\n",
-			   strerror_r(-kerr, sbuf, sizeof(sbuf)));
-		pr_warning("Failed to open uprobe events: %s.\n",
-			   strerror_r(-uerr, sbuf, sizeof(sbuf)));
-	}
-}
-
-static int open_probe_events(const char *trace_file, bool readwrite)
-{
-	char buf[PATH_MAX];
-	const char *__debugfs;
-	const char *tracing_dir = "";
-	int ret;
-
-	__debugfs = tracefs_find_mountpoint();
-	if (__debugfs == NULL) {
-		tracing_dir = "tracing/";
-
-		__debugfs = debugfs_find_mountpoint();
-		if (__debugfs == NULL)
-			return -ENOTSUP;
-	}
-
-	ret = e_snprintf(buf, PATH_MAX, "%s/%s%s",
-			 __debugfs, tracing_dir, trace_file);
-	if (ret >= 0) {
-		pr_debug("Opening %s write=%d\n", buf, readwrite);
-		if (readwrite && !probe_event_dry_run)
-			ret = open(buf, O_RDWR | O_APPEND, 0);
-		else
-			ret = open(buf, O_RDONLY, 0);
-
-		if (ret < 0)
-			ret = -errno;
-	}
-	return ret;
-}
-
-static int open_kprobe_events(bool readwrite)
-{
-	return open_probe_events("kprobe_events", readwrite);
-}
-
-static int open_uprobe_events(bool readwrite)
-{
-	return open_probe_events("uprobe_events", readwrite);
-}
-
-/* Get raw string list of current kprobe_events  or uprobe_events */
-static struct strlist *get_probe_trace_command_rawlist(int fd)
-{
-	int ret, idx;
-	FILE *fp;
-	char buf[MAX_CMDLEN];
-	char *p;
-	struct strlist *sl;
-
-	sl = strlist__new(true, NULL);
-
-	fp = fdopen(dup(fd), "r");
-	while (!feof(fp)) {
-		p = fgets(buf, MAX_CMDLEN, fp);
-		if (!p)
-			break;
-
-		idx = strlen(p) - 1;
-		if (p[idx] == '\n')
-			p[idx] = '\0';
-		ret = strlist__add(sl, buf);
-		if (ret < 0) {
-			pr_debug("strlist__add failed (%d)\n", ret);
-			strlist__delete(sl);
-			return NULL;
-		}
-	}
-	fclose(fp);
-
-	return sl;
-}
-
 struct kprobe_blacklist_node {
 	struct list_head list;
 	unsigned long start;
@@ -2284,7 +2226,7 @@
 	memset(&tev, 0, sizeof(tev));
 	memset(&pev, 0, sizeof(pev));
 
-	rawlist = get_probe_trace_command_rawlist(fd);
+	rawlist = probe_file__get_rawlist(fd);
 	if (!rawlist)
 		return -ENOMEM;
 
@@ -2325,89 +2267,20 @@
 	if (ret < 0)
 		return ret;
 
-	kp_fd = open_kprobe_events(false);
-	if (kp_fd >= 0) {
+	ret = probe_file__open_both(&kp_fd, &up_fd, 0);
+	if (ret < 0)
+		return ret;
+
+	if (kp_fd >= 0)
 		ret = __show_perf_probe_events(kp_fd, true, filter);
-		close(kp_fd);
-		if (ret < 0)
-			goto out;
-	}
-
-	up_fd = open_uprobe_events(false);
-	if (kp_fd < 0 && up_fd < 0) {
-		print_both_open_warning(kp_fd, up_fd);
-		ret = kp_fd;
-		goto out;
-	}
-
-	if (up_fd >= 0) {
+	if (up_fd >= 0 && ret >= 0)
 		ret = __show_perf_probe_events(up_fd, false, filter);
+	if (kp_fd > 0)
+		close(kp_fd);
+	if (up_fd > 0)
 		close(up_fd);
-	}
-out:
 	exit_symbol_maps();
-	return ret;
-}
 
-/* Get current perf-probe event names */
-static struct strlist *get_probe_trace_event_names(int fd, bool include_group)
-{
-	char buf[128];
-	struct strlist *sl, *rawlist;
-	struct str_node *ent;
-	struct probe_trace_event tev;
-	int ret = 0;
-
-	memset(&tev, 0, sizeof(tev));
-	rawlist = get_probe_trace_command_rawlist(fd);
-	if (!rawlist)
-		return NULL;
-	sl = strlist__new(true, NULL);
-	strlist__for_each(ent, rawlist) {
-		ret = parse_probe_trace_command(ent->s, &tev);
-		if (ret < 0)
-			break;
-		if (include_group) {
-			ret = e_snprintf(buf, 128, "%s:%s", tev.group,
-					tev.event);
-			if (ret >= 0)
-				ret = strlist__add(sl, buf);
-		} else
-			ret = strlist__add(sl, tev.event);
-		clear_probe_trace_event(&tev);
-		if (ret < 0)
-			break;
-	}
-	strlist__delete(rawlist);
-
-	if (ret < 0) {
-		strlist__delete(sl);
-		return NULL;
-	}
-	return sl;
-}
-
-static int write_probe_trace_event(int fd, struct probe_trace_event *tev)
-{
-	int ret = 0;
-	char *buf = synthesize_probe_trace_command(tev);
-	char sbuf[STRERR_BUFSIZE];
-
-	if (!buf) {
-		pr_debug("Failed to synthesize probe trace event.\n");
-		return -EINVAL;
-	}
-
-	pr_debug("Writing event: %s\n", buf);
-	if (!probe_event_dry_run) {
-		ret = write(fd, buf, strlen(buf));
-		if (ret <= 0) {
-			ret = -errno;
-			pr_warning("Failed to write event: %s\n",
-				   strerror_r(errno, sbuf, sizeof(sbuf)));
-		}
-	}
-	free(buf);
 	return ret;
 }
 
@@ -2478,36 +2351,69 @@
 	free(buf);
 }
 
+/* Set new name from original perf_probe_event and namelist */
+static int probe_trace_event__set_name(struct probe_trace_event *tev,
+				       struct perf_probe_event *pev,
+				       struct strlist *namelist,
+				       bool allow_suffix)
+{
+	const char *event, *group;
+	char buf[64];
+	int ret;
+
+	if (pev->event)
+		event = pev->event;
+	else
+		if (pev->point.function &&
+			(strncmp(pev->point.function, "0x", 2) != 0) &&
+			!strisglob(pev->point.function))
+			event = pev->point.function;
+		else
+			event = tev->point.realname;
+	if (pev->group)
+		group = pev->group;
+	else
+		group = PERFPROBE_GROUP;
+
+	/* Get an unused new event name */
+	ret = get_new_event_name(buf, 64, event,
+				 namelist, allow_suffix);
+	if (ret < 0)
+		return ret;
+
+	event = buf;
+
+	tev->event = strdup(event);
+	tev->group = strdup(group);
+	if (tev->event == NULL || tev->group == NULL)
+		return -ENOMEM;
+
+	/* Add added event name to namelist */
+	strlist__add(namelist, event);
+	return 0;
+}
+
 static int __add_probe_trace_events(struct perf_probe_event *pev,
 				     struct probe_trace_event *tevs,
 				     int ntevs, bool allow_suffix)
 {
 	int i, fd, ret;
 	struct probe_trace_event *tev = NULL;
-	char buf[64];
 	const char *event = NULL, *group = NULL;
 	struct strlist *namelist;
-	bool safename;
 
-	if (pev->uprobes)
-		fd = open_uprobe_events(true);
-	else
-		fd = open_kprobe_events(true);
-
-	if (fd < 0) {
-		print_open_warning(fd, !pev->uprobes);
+	fd = probe_file__open(PF_FL_RW | (pev->uprobes ? PF_FL_UPROBE : 0));
+	if (fd < 0)
 		return fd;
-	}
 
 	/* Get current event names */
-	namelist = get_probe_trace_event_names(fd, false);
+	namelist = probe_file__get_namelist(fd);
 	if (!namelist) {
 		pr_debug("Failed to get current event list.\n");
 		ret = -ENOMEM;
 		goto close_out;
 	}
 
-	safename = (pev->point.function && !strisglob(pev->point.function));
 	ret = 0;
 	pr_info("Added new event%s\n", (ntevs > 1) ? "s:" : ":");
 	for (i = 0; i < ntevs; i++) {
@@ -2516,36 +2422,15 @@
 		if (!tev->point.symbol)
 			continue;
 
-		if (pev->event)
-			event = pev->event;
-		else
-			if (safename)
-				event = pev->point.function;
-			else
-				event = tev->point.realname;
-		if (pev->group)
-			group = pev->group;
-		else
-			group = PERFPROBE_GROUP;
-
-		/* Get an unused new event name */
-		ret = get_new_event_name(buf, 64, event,
-					 namelist, allow_suffix);
+		/* Set new name for tev (and update namelist) */
+		ret = probe_trace_event__set_name(tev, pev, namelist,
+						  allow_suffix);
 		if (ret < 0)
 			break;
-		event = buf;
 
-		tev->event = strdup(event);
-		tev->group = strdup(group);
-		if (tev->event == NULL || tev->group == NULL) {
-			ret = -ENOMEM;
-			break;
-		}
-		ret = write_probe_trace_event(fd, tev);
+		ret = probe_file__add_event(fd, tev);
 		if (ret < 0)
 			break;
-		/* Add added event name to namelist */
-		strlist__add(namelist, event);
 
 		/* We use tev's name for showing new events */
 		show_perf_probe_event(tev->group, tev->event, pev,
@@ -2748,6 +2633,98 @@
 	goto out;
 }
 
+static int try_to_find_absolute_address(struct perf_probe_event *pev,
+					struct probe_trace_event **tevs)
+{
+	struct perf_probe_point *pp = &pev->point;
+	struct probe_trace_event *tev;
+	struct probe_trace_point *tp;
+	int i, err;
+
+	if (!(pev->point.function && !strncmp(pev->point.function, "0x", 2)))
+		return -EINVAL;
+	if (perf_probe_event_need_dwarf(pev))
+		return -EINVAL;
+
+	/*
+	 * This is 'perf probe /lib/libc.so 0xabcd'. Try to probe at
+	 * absolute address.
+	 *
+	 * Only one tev can be generated by this.
+	 */
+	*tevs = zalloc(sizeof(*tev));
+	if (!*tevs)
+		return -ENOMEM;
+
+	tev = *tevs;
+	tp = &tev->point;
+
+	/*
+	 * Don't use tp->offset, use address directly, because
+	 * in synthesize_probe_trace_command() address cannot be
+	 * zero.
+	 */
+	tp->address = pev->point.abs_address;
+	tp->retprobe = pp->retprobe;
+	tev->uprobes = pev->uprobes;
+
+	err = -ENOMEM;
+	/*
+	 * Give it a '0x' leading symbol name.
+	 * In __add_probe_trace_events, a NULL symbol is interpreted as
+	 * invalud.
+	 */
+	if (asprintf(&tp->symbol, "0x%lx", tp->address) < 0)
+		goto errout;
+
+	/* For kprobe, check range */
+	if ((!tev->uprobes) &&
+	    (kprobe_warn_out_range(tev->point.symbol,
+				   tev->point.address))) {
+		err = -EACCES;
+		goto errout;
+	}
+
+	if (asprintf(&tp->realname, "abs_%lx", tp->address) < 0)
+		goto errout;
+
+	if (pev->target) {
+		tp->module = strdup(pev->target);
+		if (!tp->module)
+			goto errout;
+	}
+
+	if (tev->group) {
+		tev->group = strdup(pev->group);
+		if (!tev->group)
+			goto errout;
+	}
+
+	if (pev->event) {
+		tev->event = strdup(pev->event);
+		if (!tev->event)
+			goto errout;
+	}
+
+	tev->nargs = pev->nargs;
+	tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs);
+	if (!tev->args) {
+		err = -ENOMEM;
+		goto errout;
+	}
+	for (i = 0; i < tev->nargs; i++)
+		copy_to_probe_trace_arg(&tev->args[i], &pev->args[i]);
+
+	return 1;
+
+errout:
+	if (*tevs) {
+		clear_probe_trace_events(*tevs, 1);
+		*tevs = NULL;
+	}
+	return err;
+}
+
 bool __weak arch__prefers_symtab(void) { return false; }
 
 static int convert_to_probe_trace_events(struct perf_probe_event *pev,
@@ -2764,6 +2741,10 @@
 		}
 	}
 
+	ret = try_to_find_absolute_address(pev, tevs);
+	if (ret > 0)
+		return ret;
+
 	if (arch__prefers_symtab() && !perf_probe_event_need_dwarf(pev)) {
 		ret = find_probe_trace_events_from_map(pev, tevs);
 		if (ret > 0)
@@ -2838,68 +2819,9 @@
 	return ret;
 }
 
-static int __del_trace_probe_event(int fd, struct str_node *ent)
-{
-	char *p;
-	char buf[128];
-	int ret;
-
-	/* Convert from perf-probe event to trace-probe event */
-	ret = e_snprintf(buf, 128, "-:%s", ent->s);
-	if (ret < 0)
-		goto error;
-
-	p = strchr(buf + 2, ':');
-	if (!p) {
-		pr_debug("Internal error: %s should have ':' but not.\n",
-			 ent->s);
-		ret = -ENOTSUP;
-		goto error;
-	}
-	*p = '/';
-
-	pr_debug("Writing event: %s\n", buf);
-	ret = write(fd, buf, strlen(buf));
-	if (ret < 0) {
-		ret = -errno;
-		goto error;
-	}
-
-	pr_info("Removed event: %s\n", ent->s);
-	return 0;
-error:
-	pr_warning("Failed to delete event: %s\n",
-		   strerror_r(-ret, buf, sizeof(buf)));
-	return ret;
-}
-
-static int del_trace_probe_events(int fd, struct strfilter *filter,
-				  struct strlist *namelist)
-{
-	struct str_node *ent;
-	const char *p;
-	int ret = -ENOENT;
-
-	if (!namelist)
-		return -ENOENT;
-
-	strlist__for_each(ent, namelist) {
-		p = strchr(ent->s, ':');
-		if ((p && strfilter__compare(filter, p + 1)) ||
-		    strfilter__compare(filter, ent->s)) {
-			ret = __del_trace_probe_event(fd, ent);
-			if (ret < 0)
-				break;
-		}
-	}
-
-	return ret;
-}
-
 int del_perf_probe_events(struct strfilter *filter)
 {
 	int ret, ret2, ufd = -1, kfd = -1;
-	struct strlist *namelist = NULL, *unamelist = NULL;
 	char *str = strfilter__string(filter);
 
 	if (!str)
@@ -2908,25 +2830,15 @@
 	pr_debug("Delete filter: \'%s\'\n", str);
 
 	/* Get current event names */
-	kfd = open_kprobe_events(true);
-	if (kfd >= 0)
-		namelist = get_probe_trace_event_names(kfd, true);
+	ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW);
+	if (ret < 0)
+		goto out;
 
-	ufd = open_uprobe_events(true);
-	if (ufd >= 0)
-		unamelist = get_probe_trace_event_names(ufd, true);
-
-	if (kfd < 0 && ufd < 0) {
-		print_both_open_warning(kfd, ufd);
-		ret = kfd;
-		goto error;
-	}
-
-	ret = del_trace_probe_events(kfd, filter, namelist);
+	ret = probe_file__del_events(kfd, filter);
 	if (ret < 0 && ret != -ENOENT)
 		goto error;
 
-	ret2 = del_trace_probe_events(ufd, filter, unamelist);
+	ret2 = probe_file__del_events(ufd, filter);
 	if (ret2 < 0 && ret2 != -ENOENT) {
 		ret = ret2;
 		goto error;
@@ -2937,15 +2849,11 @@
 	ret = 0;
 
 error:
-	if (kfd >= 0) {
-		strlist__delete(namelist);
+	if (kfd >= 0)
 		close(kfd);
-	}
-
-	if (ufd >= 0) {
-		strlist__delete(unamelist);
+	if (ufd >= 0)
 		close(ufd);
-	}
+out:
 	free(str);
 
 	return ret;
@@ -3007,3 +2915,22 @@
 	return ret;
 }
 
+int copy_to_probe_trace_arg(struct probe_trace_arg *tvar,
+			    struct perf_probe_arg *pvar)
+{
+	tvar->value = strdup(pvar->var);
+	if (tvar->value == NULL)
+		return -ENOMEM;
+	if (pvar->type) {
+		tvar->type = strdup(pvar->type);
+		if (tvar->type == NULL)
+			return -ENOMEM;
+	}
+	if (pvar->name) {
+		tvar->name = strdup(pvar->name);
+		if (tvar->name == NULL)
+			return -ENOMEM;
+	} else
+		tvar->name = NULL;
+	return 0;
+}
diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
index 31db6ee..6e7ec68 100644
--- a/tools/perf/util/probe-event.h
+++ b/tools/perf/util/probe-event.h
@@ -59,6 +59,7 @@
 	bool		retprobe;	/* Return probe flag */
 	char		*lazy_line;	/* Lazy matching pattern */
 	unsigned long	offset;		/* Offset from function entry */
+	unsigned long	abs_address;	/* Absolute address of the point */
 };
 
 /* Perf probe probing argument field chain */
@@ -106,9 +107,13 @@
 	struct strlist			*vars;	/* Available variables */
 };
 
+struct map;
+
 /* Command string to events */
 extern int parse_perf_probe_command(const char *cmd,
 				    struct perf_probe_event *pev);
+extern int parse_probe_trace_command(const char *cmd,
+				     struct probe_trace_event *tev);
 
 /* Events to command string */
 extern char *synthesize_perf_probe_command(struct perf_probe_event *pev);
@@ -121,6 +126,7 @@
 
 /* Release event contents */
 extern void clear_perf_probe_event(struct perf_probe_event *pev);
+extern void clear_probe_trace_event(struct probe_trace_event *tev);
 
 /* Command string to line-range */
 extern int parse_line_range_desc(const char *cmd, struct line_range *lr);
@@ -144,7 +150,14 @@
 void arch__fix_tev_from_maps(struct perf_probe_event *pev,
 			     struct probe_trace_event *tev, struct map *map);
 
+/* If there is no space to write, returns -E2BIG. */
+int e_snprintf(char *str, size_t size, const char *format, ...)
+	__attribute__((format(printf, 3, 4)));
+
 /* Maximum index number of event-name postfix */
 #define MAX_EVENT_INDEX	1024
 
+int copy_to_probe_trace_arg(struct probe_trace_arg *tvar,
+			    struct perf_probe_arg *pvar);
+
 #endif /*_PROBE_EVENT_H */
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
new file mode 100644
index 0000000..bbb2437
--- /dev/null
+++ b/tools/perf/util/probe-file.c
@@ -0,0 +1,301 @@
+/*
+ * probe-file.c : operate ftrace k/uprobe events files
+ *
+ * Written by Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include "util.h"
+#include "event.h"
+#include "strlist.h"
+#include "debug.h"
+#include "cache.h"
+#include "color.h"
+#include "symbol.h"
+#include "thread.h"
+#include <api/fs/debugfs.h>
+#include <api/fs/tracefs.h>
+#include "probe-event.h"
+#include "probe-file.h"
+#include "session.h"
+
+#define MAX_CMDLEN 256
+
+static void print_open_warning(int err, bool uprobe)
+{
+	char sbuf[STRERR_BUFSIZE];
+
+	if (err == -ENOENT) {
+		const char *config;
+
+		if (uprobe)
+			config = "CONFIG_UPROBE_EVENTS";
+		else
+			config = "CONFIG_KPROBE_EVENTS";
+
+		pr_warning("%cprobe_events file does not exist"
+			   " - please rebuild kernel with %s.\n",
+			   uprobe ? 'u' : 'k', config);
+	} else if (err == -ENOTSUP)
+		pr_warning("Tracefs or debugfs is not mounted.\n");
+	else
+		pr_warning("Failed to open %cprobe_events: %s\n",
+			   uprobe ? 'u' : 'k',
+			   strerror_r(-err, sbuf, sizeof(sbuf)));
+}
+
+static void print_both_open_warning(int kerr, int uerr)
+{
+	/* Both kprobes and uprobes are disabled, warn it. */
+	if (kerr == -ENOTSUP && uerr == -ENOTSUP)
+		pr_warning("Tracefs or debugfs is not mounted.\n");
+	else if (kerr == -ENOENT && uerr == -ENOENT)
+		pr_warning("Please rebuild kernel with CONFIG_KPROBE_EVENTS "
+			   "or/and CONFIG_UPROBE_EVENTS.\n");
+	else {
+		char sbuf[STRERR_BUFSIZE];
+		pr_warning("Failed to open kprobe events: %s.\n",
+			   strerror_r(-kerr, sbuf, sizeof(sbuf)));
+		pr_warning("Failed to open uprobe events: %s.\n",
+			   strerror_r(-uerr, sbuf, sizeof(sbuf)));
+	}
+}
+
+static int open_probe_events(const char *trace_file, bool readwrite)
+{
+	char buf[PATH_MAX];
+	const char *__debugfs;
+	const char *tracing_dir = "";
+	int ret;
+
+	__debugfs = tracefs_find_mountpoint();
+	if (__debugfs == NULL) {
+		tracing_dir = "tracing/";
+
+		__debugfs = debugfs_find_mountpoint();
+		if (__debugfs == NULL)
+			return -ENOTSUP;
+	}
+
+	ret = e_snprintf(buf, PATH_MAX, "%s/%s%s",
+			 __debugfs, tracing_dir, trace_file);
+	if (ret >= 0) {
+		pr_debug("Opening %s write=%d\n", buf, readwrite);
+		if (readwrite && !probe_event_dry_run)
+			ret = open(buf, O_RDWR | O_APPEND, 0);
+		else
+			ret = open(buf, O_RDONLY, 0);
+
+		if (ret < 0)
+			ret = -errno;
+	}
+	return ret;
+}
+
+static int open_kprobe_events(bool readwrite)
+{
+	return open_probe_events("kprobe_events", readwrite);
+}
+
+static int open_uprobe_events(bool readwrite)
+{
+	return open_probe_events("uprobe_events", readwrite);
+}
+
+int probe_file__open(int flag)
+{
+	int fd;
+
+	if (flag & PF_FL_UPROBE)
+		fd = open_uprobe_events(flag & PF_FL_RW);
+	else
+		fd = open_kprobe_events(flag & PF_FL_RW);
+	if (fd < 0)
+		print_open_warning(fd, flag & PF_FL_UPROBE);
+
+	return fd;
+}
+
+int probe_file__open_both(int *kfd, int *ufd, int flag)
+{
+	if (!kfd || !ufd)
+		return -EINVAL;
+
+	*kfd = open_kprobe_events(flag & PF_FL_RW);
+	*ufd = open_uprobe_events(flag & PF_FL_RW);
+	if (*kfd < 0 && *ufd < 0) {
+		print_both_open_warning(*kfd, *ufd);
+		return *kfd;
+	}
+
+	return 0;
+}
+
+/* Get raw string list of current kprobe_events  or uprobe_events */
+struct strlist *probe_file__get_rawlist(int fd)
+{
+	int ret, idx;
+	FILE *fp;
+	char buf[MAX_CMDLEN];
+	char *p;
+	struct strlist *sl;
+
+	sl = strlist__new(NULL, NULL);
+
+	fp = fdopen(dup(fd), "r");
+	while (!feof(fp)) {
+		p = fgets(buf, MAX_CMDLEN, fp);
+		if (!p)
+			break;
+
+		idx = strlen(p) - 1;
+		if (p[idx] == '\n')
+			p[idx] = '\0';
+		ret = strlist__add(sl, buf);
+		if (ret < 0) {
+			pr_debug("strlist__add failed (%d)\n", ret);
+			strlist__delete(sl);
+			return NULL;
+		}
+	}
+	fclose(fp);
+
+	return sl;
+}
+
+static struct strlist *__probe_file__get_namelist(int fd, bool include_group)
+{
+	char buf[128];
+	struct strlist *sl, *rawlist;
+	struct str_node *ent;
+	struct probe_trace_event tev;
+	int ret = 0;
+
+	memset(&tev, 0, sizeof(tev));
+	rawlist = probe_file__get_rawlist(fd);
+	if (!rawlist)
+		return NULL;
+	sl = strlist__new(NULL, NULL);
+	strlist__for_each(ent, rawlist) {
+		ret = parse_probe_trace_command(ent->s, &tev);
+		if (ret < 0)
+			break;
+		if (include_group) {
+			ret = e_snprintf(buf, 128, "%s:%s", tev.group,
+					tev.event);
+			if (ret >= 0)
+				ret = strlist__add(sl, buf);
+		} else
+			ret = strlist__add(sl, tev.event);
+		clear_probe_trace_event(&tev);
+		if (ret < 0)
+			break;
+	}
+	strlist__delete(rawlist);
+
+	if (ret < 0) {
+		strlist__delete(sl);
+		return NULL;
+	}
+	return sl;
+}
+
+/* Get current perf-probe event names */
+struct strlist *probe_file__get_namelist(int fd)
+{
+	return __probe_file__get_namelist(fd, false);
+}
+
+int probe_file__add_event(int fd, struct probe_trace_event *tev)
+{
+	int ret = 0;
+	char *buf = synthesize_probe_trace_command(tev);
+	char sbuf[STRERR_BUFSIZE];
+
+	if (!buf) {
+		pr_debug("Failed to synthesize probe trace event.\n");
+		return -EINVAL;
+	}
+
+	pr_debug("Writing event: %s\n", buf);
+	if (!probe_event_dry_run) {
+		ret = write(fd, buf, strlen(buf));
+		if (ret <= 0) {
+			ret = -errno;
+			pr_warning("Failed to write event: %s\n",
+				   strerror_r(errno, sbuf, sizeof(sbuf)));
+		}
+	}
+	free(buf);
+
+	return ret;
+}
+
+static int __del_trace_probe_event(int fd, struct str_node *ent)
+{
+	char *p;
+	char buf[128];
+	int ret;
+
+	/* Convert from perf-probe event to trace-probe event */
+	ret = e_snprintf(buf, 128, "-:%s", ent->s);
+	if (ret < 0)
+		goto error;
+
+	p = strchr(buf + 2, ':');
+	if (!p) {
+		pr_debug("Internal error: %s should have ':' but not.\n",
+			 ent->s);
+		ret = -ENOTSUP;
+		goto error;
+	}
+	*p = '/';
+
+	pr_debug("Writing event: %s\n", buf);
+	ret = write(fd, buf, strlen(buf));
+	if (ret < 0) {
+		ret = -errno;
+		goto error;
+	}
+
+	pr_info("Removed event: %s\n", ent->s);
+	return 0;
+error:
+	pr_warning("Failed to delete event: %s\n",
+		   strerror_r(-ret, buf, sizeof(buf)));
+	return ret;
+}
+
+int probe_file__del_events(int fd, struct strfilter *filter)
+{
+	struct strlist *namelist;
+	struct str_node *ent;
+	const char *p;
+	int ret = -ENOENT;
+
+	namelist = __probe_file__get_namelist(fd, true);
+	if (!namelist)
+		return -ENOENT;
+
+	strlist__for_each(ent, namelist) {
+		p = strchr(ent->s, ':');
+		if ((p && strfilter__compare(filter, p + 1)) ||
+		    strfilter__compare(filter, ent->s)) {
+			ret = __del_trace_probe_event(fd, ent);
+			if (ret < 0)
+				break;
+		}
+	}
+	strlist__delete(namelist);
+
+	return ret;
+}
diff --git a/tools/perf/util/probe-file.h b/tools/perf/util/probe-file.h
new file mode 100644
index 0000000..ada94a2
--- /dev/null
+++ b/tools/perf/util/probe-file.h
@@ -0,0 +1,18 @@
+#ifndef __PROBE_FILE_H
+#define __PROBE_FILE_H
+
+#include "strlist.h"
+#include "strfilter.h"
+#include "probe-event.h"
+
+#define PF_FL_UPROBE	1
+#define PF_FL_RW	2
+
+int probe_file__open(int flag);
+int probe_file__open_both(int *kfd, int *ufd, int flag);
+struct strlist *probe_file__get_namelist(int fd);
+struct strlist *probe_file__get_rawlist(int fd);
+int probe_file__add_event(int fd, struct probe_trace_event *tev);
+int probe_file__del_events(int fd, struct strfilter *filter);
+
+#endif
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 2da65a7..29c43c068 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -553,24 +553,9 @@
 	char buf[32], *ptr;
 	int ret = 0;
 
-	if (!is_c_varname(pf->pvar->var)) {
-		/* Copy raw parameters */
-		pf->tvar->value = strdup(pf->pvar->var);
-		if (pf->tvar->value == NULL)
-			return -ENOMEM;
-		if (pf->pvar->type) {
-			pf->tvar->type = strdup(pf->pvar->type);
-			if (pf->tvar->type == NULL)
-				return -ENOMEM;
-		}
-		if (pf->pvar->name) {
-			pf->tvar->name = strdup(pf->pvar->name);
-			if (pf->tvar->name == NULL)
-				return -ENOMEM;
-		} else
-			pf->tvar->name = NULL;
-		return 0;
-	}
+	/* Copy raw parameters */
+	if (!is_c_varname(pf->pvar->var))
+		return copy_to_probe_trace_arg(pf->tvar, pf->pvar);
 
 	if (pf->pvar->name)
 		pf->tvar->name = strdup(pf->pvar->name);
@@ -1355,7 +1340,7 @@
 		 vl->point.offset);
 
 	/* Find local variables */
-	vl->vars = strlist__new(true, NULL);
+	vl->vars = strlist__new(NULL, NULL);
 	if (vl->vars == NULL)
 		return -ENOMEM;
 	af->child = true;
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 0766d98..51be28b 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -16,7 +16,7 @@
 util/xyarray.c
 util/cgroup.c
 util/rblist.c
-util/stat.c
+util/counts.c
 util/strlist.c
 util/trace-event.c
 ../lib/rbtree.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 626422e..6324fe6 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -941,76 +941,84 @@
 	return PyType_Ready(&pyrf_evlist__type);
 }
 
+#define PERF_CONST(name) { #name, PERF_##name }
+
 static struct {
 	const char *name;
 	int	    value;
 } perf__constants[] = {
-	{ "TYPE_HARDWARE",   PERF_TYPE_HARDWARE },
-	{ "TYPE_SOFTWARE",   PERF_TYPE_SOFTWARE },
-	{ "TYPE_TRACEPOINT", PERF_TYPE_TRACEPOINT },
-	{ "TYPE_HW_CACHE",   PERF_TYPE_HW_CACHE },
-	{ "TYPE_RAW",	     PERF_TYPE_RAW },
-	{ "TYPE_BREAKPOINT", PERF_TYPE_BREAKPOINT },
+	PERF_CONST(TYPE_HARDWARE),
+	PERF_CONST(TYPE_SOFTWARE),
+	PERF_CONST(TYPE_TRACEPOINT),
+	PERF_CONST(TYPE_HW_CACHE),
+	PERF_CONST(TYPE_RAW),
+	PERF_CONST(TYPE_BREAKPOINT),
 
-	{ "COUNT_HW_CPU_CYCLES",	  PERF_COUNT_HW_CPU_CYCLES },
-	{ "COUNT_HW_INSTRUCTIONS",	  PERF_COUNT_HW_INSTRUCTIONS },
-	{ "COUNT_HW_CACHE_REFERENCES",	  PERF_COUNT_HW_CACHE_REFERENCES },
-	{ "COUNT_HW_CACHE_MISSES",	  PERF_COUNT_HW_CACHE_MISSES },
-	{ "COUNT_HW_BRANCH_INSTRUCTIONS", PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
-	{ "COUNT_HW_BRANCH_MISSES",	  PERF_COUNT_HW_BRANCH_MISSES },
-	{ "COUNT_HW_BUS_CYCLES",	  PERF_COUNT_HW_BUS_CYCLES },
-	{ "COUNT_HW_CACHE_L1D",		  PERF_COUNT_HW_CACHE_L1D },
-	{ "COUNT_HW_CACHE_L1I",		  PERF_COUNT_HW_CACHE_L1I },
-	{ "COUNT_HW_CACHE_LL",	  	  PERF_COUNT_HW_CACHE_LL },
-	{ "COUNT_HW_CACHE_DTLB",	  PERF_COUNT_HW_CACHE_DTLB },
-	{ "COUNT_HW_CACHE_ITLB",	  PERF_COUNT_HW_CACHE_ITLB },
-	{ "COUNT_HW_CACHE_BPU",		  PERF_COUNT_HW_CACHE_BPU },
-	{ "COUNT_HW_CACHE_OP_READ",	  PERF_COUNT_HW_CACHE_OP_READ },
-	{ "COUNT_HW_CACHE_OP_WRITE",	  PERF_COUNT_HW_CACHE_OP_WRITE },
-	{ "COUNT_HW_CACHE_OP_PREFETCH",	  PERF_COUNT_HW_CACHE_OP_PREFETCH },
-	{ "COUNT_HW_CACHE_RESULT_ACCESS", PERF_COUNT_HW_CACHE_RESULT_ACCESS },
-	{ "COUNT_HW_CACHE_RESULT_MISS",   PERF_COUNT_HW_CACHE_RESULT_MISS },
+	PERF_CONST(COUNT_HW_CPU_CYCLES),
+	PERF_CONST(COUNT_HW_INSTRUCTIONS),
+	PERF_CONST(COUNT_HW_CACHE_REFERENCES),
+	PERF_CONST(COUNT_HW_CACHE_MISSES),
+	PERF_CONST(COUNT_HW_BRANCH_INSTRUCTIONS),
+	PERF_CONST(COUNT_HW_BRANCH_MISSES),
+	PERF_CONST(COUNT_HW_BUS_CYCLES),
+	PERF_CONST(COUNT_HW_CACHE_L1D),
+	PERF_CONST(COUNT_HW_CACHE_L1I),
+	PERF_CONST(COUNT_HW_CACHE_LL),
+	PERF_CONST(COUNT_HW_CACHE_DTLB),
+	PERF_CONST(COUNT_HW_CACHE_ITLB),
+	PERF_CONST(COUNT_HW_CACHE_BPU),
+	PERF_CONST(COUNT_HW_CACHE_OP_READ),
+	PERF_CONST(COUNT_HW_CACHE_OP_WRITE),
+	PERF_CONST(COUNT_HW_CACHE_OP_PREFETCH),
+	PERF_CONST(COUNT_HW_CACHE_RESULT_ACCESS),
+	PERF_CONST(COUNT_HW_CACHE_RESULT_MISS),
 
-	{ "COUNT_HW_STALLED_CYCLES_FRONTEND",	  PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
-	{ "COUNT_HW_STALLED_CYCLES_BACKEND",	  PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
+	PERF_CONST(COUNT_HW_STALLED_CYCLES_FRONTEND),
+	PERF_CONST(COUNT_HW_STALLED_CYCLES_BACKEND),
 
-	{ "COUNT_SW_CPU_CLOCK",	       PERF_COUNT_SW_CPU_CLOCK },
-	{ "COUNT_SW_TASK_CLOCK",       PERF_COUNT_SW_TASK_CLOCK },
-	{ "COUNT_SW_PAGE_FAULTS",      PERF_COUNT_SW_PAGE_FAULTS },
-	{ "COUNT_SW_CONTEXT_SWITCHES", PERF_COUNT_SW_CONTEXT_SWITCHES },
-	{ "COUNT_SW_CPU_MIGRATIONS",   PERF_COUNT_SW_CPU_MIGRATIONS },
-	{ "COUNT_SW_PAGE_FAULTS_MIN",  PERF_COUNT_SW_PAGE_FAULTS_MIN },
-	{ "COUNT_SW_PAGE_FAULTS_MAJ",  PERF_COUNT_SW_PAGE_FAULTS_MAJ },
-	{ "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS },
-	{ "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS },
-	{ "COUNT_SW_DUMMY",            PERF_COUNT_SW_DUMMY },
+	PERF_CONST(COUNT_SW_CPU_CLOCK),
+	PERF_CONST(COUNT_SW_TASK_CLOCK),
+	PERF_CONST(COUNT_SW_PAGE_FAULTS),
+	PERF_CONST(COUNT_SW_CONTEXT_SWITCHES),
+	PERF_CONST(COUNT_SW_CPU_MIGRATIONS),
+	PERF_CONST(COUNT_SW_PAGE_FAULTS_MIN),
+	PERF_CONST(COUNT_SW_PAGE_FAULTS_MAJ),
+	PERF_CONST(COUNT_SW_ALIGNMENT_FAULTS),
+	PERF_CONST(COUNT_SW_EMULATION_FAULTS),
+	PERF_CONST(COUNT_SW_DUMMY),
 
-	{ "SAMPLE_IP",	      PERF_SAMPLE_IP },
-	{ "SAMPLE_TID",	      PERF_SAMPLE_TID },
-	{ "SAMPLE_TIME",      PERF_SAMPLE_TIME },
-	{ "SAMPLE_ADDR",      PERF_SAMPLE_ADDR },
-	{ "SAMPLE_READ",      PERF_SAMPLE_READ },
-	{ "SAMPLE_CALLCHAIN", PERF_SAMPLE_CALLCHAIN },
-	{ "SAMPLE_ID",	      PERF_SAMPLE_ID },
-	{ "SAMPLE_CPU",	      PERF_SAMPLE_CPU },
-	{ "SAMPLE_PERIOD",    PERF_SAMPLE_PERIOD },
-	{ "SAMPLE_STREAM_ID", PERF_SAMPLE_STREAM_ID },
-	{ "SAMPLE_RAW",	      PERF_SAMPLE_RAW },
+	PERF_CONST(SAMPLE_IP),
+	PERF_CONST(SAMPLE_TID),
+	PERF_CONST(SAMPLE_TIME),
+	PERF_CONST(SAMPLE_ADDR),
+	PERF_CONST(SAMPLE_READ),
+	PERF_CONST(SAMPLE_CALLCHAIN),
+	PERF_CONST(SAMPLE_ID),
+	PERF_CONST(SAMPLE_CPU),
+	PERF_CONST(SAMPLE_PERIOD),
+	PERF_CONST(SAMPLE_STREAM_ID),
+	PERF_CONST(SAMPLE_RAW),
 
-	{ "FORMAT_TOTAL_TIME_ENABLED", PERF_FORMAT_TOTAL_TIME_ENABLED },
-	{ "FORMAT_TOTAL_TIME_RUNNING", PERF_FORMAT_TOTAL_TIME_RUNNING },
-	{ "FORMAT_ID",		       PERF_FORMAT_ID },
-	{ "FORMAT_GROUP",	       PERF_FORMAT_GROUP },
+	PERF_CONST(FORMAT_TOTAL_TIME_ENABLED),
+	PERF_CONST(FORMAT_TOTAL_TIME_RUNNING),
+	PERF_CONST(FORMAT_ID),
+	PERF_CONST(FORMAT_GROUP),
 
-	{ "RECORD_MMAP",       PERF_RECORD_MMAP },
-	{ "RECORD_LOST",       PERF_RECORD_LOST },
-	{ "RECORD_COMM",       PERF_RECORD_COMM },
-	{ "RECORD_EXIT",       PERF_RECORD_EXIT },
-	{ "RECORD_THROTTLE",   PERF_RECORD_THROTTLE },
-	{ "RECORD_UNTHROTTLE", PERF_RECORD_UNTHROTTLE },
-	{ "RECORD_FORK",       PERF_RECORD_FORK },
-	{ "RECORD_READ",       PERF_RECORD_READ },
-	{ "RECORD_SAMPLE",     PERF_RECORD_SAMPLE },
+	PERF_CONST(RECORD_MMAP),
+	PERF_CONST(RECORD_LOST),
+	PERF_CONST(RECORD_COMM),
+	PERF_CONST(RECORD_EXIT),
+	PERF_CONST(RECORD_THROTTLE),
+	PERF_CONST(RECORD_UNTHROTTLE),
+	PERF_CONST(RECORD_FORK),
+	PERF_CONST(RECORD_READ),
+	PERF_CONST(RECORD_SAMPLE),
+	PERF_CONST(RECORD_MMAP2),
+	PERF_CONST(RECORD_AUX),
+	PERF_CONST(RECORD_ITRACE_START),
+	PERF_CONST(RECORD_LOST_SAMPLES),
+	PERF_CONST(RECORD_SWITCH),
+	PERF_CONST(RECORD_SWITCH_CPU_WIDE),
 	{ .name = NULL, },
 };
 
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 1f7becb..0467367 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -85,6 +85,11 @@
 	evsel->attr.comm_exec = 1;
 }
 
+static void perf_probe_context_switch(struct perf_evsel *evsel)
+{
+	evsel->attr.context_switch = 1;
+}
+
 bool perf_can_sample_identifier(void)
 {
 	return perf_probe_api(perf_probe_sample_identifier);
@@ -95,6 +100,35 @@
 	return perf_probe_api(perf_probe_comm_exec);
 }
 
+bool perf_can_record_switch_events(void)
+{
+	return perf_probe_api(perf_probe_context_switch);
+}
+
+bool perf_can_record_cpu_wide(void)
+{
+	struct perf_event_attr attr = {
+		.type = PERF_TYPE_SOFTWARE,
+		.config = PERF_COUNT_SW_CPU_CLOCK,
+		.exclude_kernel = 1,
+	};
+	struct cpu_map *cpus;
+	int cpu, fd;
+
+	cpus = cpu_map__new(NULL);
+	if (!cpus)
+		return false;
+	cpu = cpus->map[0];
+	cpu_map__put(cpus);
+
+	fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
+	if (fd < 0)
+		return false;
+	close(fd);
+
+	return true;
+}
+
 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
 {
 	struct perf_evsel *evsel;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index ed9dc25..8a4537e 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -170,7 +170,7 @@
 	machine__delete_threads(&session->machines.host);
 }
 
-static void perf_session_env__delete(struct perf_session_env *env)
+static void perf_session_env__exit(struct perf_env *env)
 {
 	zfree(&env->hostname);
 	zfree(&env->os_release);
@@ -180,6 +180,7 @@
 	zfree(&env->cpuid);
 
 	zfree(&env->cmdline);
+	zfree(&env->cmdline_argv);
 	zfree(&env->sibling_cores);
 	zfree(&env->sibling_threads);
 	zfree(&env->numa_nodes);
@@ -192,7 +193,7 @@
 	auxtrace_index__free(&session->auxtrace_index);
 	perf_session__destroy_kernel_maps(session);
 	perf_session__delete_threads(session);
-	perf_session_env__delete(&session->header.env);
+	perf_session_env__exit(&session->header.env);
 	machines__exit(&session->machines);
 	if (session->file)
 		perf_data_file__close(session->file);
@@ -332,6 +333,8 @@
 		tool->aux = perf_event__process_aux;
 	if (tool->itrace_start == NULL)
 		tool->itrace_start = perf_event__process_itrace_start;
+	if (tool->context_switch == NULL)
+		tool->context_switch = perf_event__process_switch;
 	if (tool->read == NULL)
 		tool->read = process_event_sample_stub;
 	if (tool->throttle == NULL)
@@ -470,6 +473,19 @@
 		swap_sample_id_all(event, &event->itrace_start + 1);
 }
 
+static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
+{
+	if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
+		event->context_switch.next_prev_pid =
+				bswap_32(event->context_switch.next_prev_pid);
+		event->context_switch.next_prev_tid =
+				bswap_32(event->context_switch.next_prev_tid);
+	}
+
+	if (sample_id_all)
+		swap_sample_id_all(event, &event->context_switch + 1);
+}
+
 static void perf_event__throttle_swap(union perf_event *event,
 				      bool sample_id_all)
 {
@@ -632,6 +648,8 @@
 	[PERF_RECORD_AUX]		  = perf_event__aux_swap,
 	[PERF_RECORD_ITRACE_START]	  = perf_event__itrace_start_swap,
 	[PERF_RECORD_LOST_SAMPLES]	  = perf_event__all64_swap,
+	[PERF_RECORD_SWITCH]		  = perf_event__switch_swap,
+	[PERF_RECORD_SWITCH_CPU_WIDE]	  = perf_event__switch_swap,
 	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
 	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
 	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
@@ -766,10 +784,18 @@
 
 	printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr);
 
-	for (i = 0; i < sample->branch_stack->nr; i++)
-		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n",
-			i, sample->branch_stack->entries[i].from,
-			sample->branch_stack->entries[i].to);
+	for (i = 0; i < sample->branch_stack->nr; i++) {
+		struct branch_entry *e = &sample->branch_stack->entries[i];
+
+		printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
+			i, e->from, e->to,
+			e->flags.cycles,
+			e->flags.mispred ? "M" : " ",
+			e->flags.predicted ? "P" : " ",
+			e->flags.abort ? "A" : " ",
+			e->flags.in_tx ? "T" : " ",
+			(unsigned)e->flags.reserved);
+	}
 }
 
 static void regs_dump__printf(u64 mask, u64 *regs)
@@ -1093,6 +1119,9 @@
 		return tool->aux(tool, event, sample, machine);
 	case PERF_RECORD_ITRACE_START:
 		return tool->itrace_start(tool, event, sample, machine);
+	case PERF_RECORD_SWITCH:
+	case PERF_RECORD_SWITCH_CPU_WIDE:
+		return tool->context_switch(tool, event, sample, machine);
 	default:
 		++evlist->stats.nr_unknown_events;
 		return -1;
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 4c65a14..7e38716 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -9,7 +9,7 @@
 const char	default_parent_pattern[] = "^sys_|^do_page_fault";
 const char	*parent_pattern = default_parent_pattern;
 const char	default_sort_order[] = "comm,dso,symbol";
-const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,dso_to,symbol_to";
+const char	default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
 const char	default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked";
 const char	default_top_sort_order[] = "dso,symbol";
 const char	default_diff_sort_order[] = "dso,symbol";
@@ -319,6 +319,59 @@
 	.se_width_idx	= HISTC_SRCLINE,
 };
 
+/* --sort srcfile */
+
+static char no_srcfile[1];
+
+static char *get_srcfile(struct hist_entry *e)
+{
+	char *sf, *p;
+	struct map *map = e->ms.map;
+
+	sf = get_srcline(map->dso, map__rip_2objdump(map, e->ip),
+			 e->ms.sym, true);
+	if (!strcmp(sf, SRCLINE_UNKNOWN))
+		return no_srcfile;
+	p = strchr(sf, ':');
+	if (p && *sf) {
+		*p = 0;
+		return sf;
+	}
+	free(sf);
+	return no_srcfile;
+}
+
+static int64_t
+sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+	if (!left->srcfile) {
+		if (!left->ms.map)
+			left->srcfile = no_srcfile;
+		else
+			left->srcfile = get_srcfile(left);
+	}
+	if (!right->srcfile) {
+		if (!right->ms.map)
+			right->srcfile = no_srcfile;
+		else
+			right->srcfile = get_srcfile(right);
+	}
+	return strcmp(right->srcfile, left->srcfile);
+}
+
+static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
+					size_t size, unsigned int width)
+{
+	return repsep_snprintf(bf, size, "%-*.*s", width, width, he->srcfile);
+}
+
+struct sort_entry sort_srcfile = {
+	.se_header	= "Source File",
+	.se_cmp		= sort__srcfile_cmp,
+	.se_snprintf	= hist_entry__srcfile_snprintf,
+	.se_width_idx	= HISTC_SRCFILE,
+};
+
 /* --sort parent */
 
 static int64_t
@@ -526,6 +579,29 @@
 	return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
 }
 
+static int64_t
+sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+	return left->branch_info->flags.cycles -
+		right->branch_info->flags.cycles;
+}
+
+static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
+				    size_t size, unsigned int width)
+{
+	if (he->branch_info->flags.cycles == 0)
+		return repsep_snprintf(bf, size, "%-*s", width, "-");
+	return repsep_snprintf(bf, size, "%-*hd", width,
+			       he->branch_info->flags.cycles);
+}
+
+struct sort_entry sort_cycles = {
+	.se_header	= "Basic Block Cycles",
+	.se_cmp		= sort__cycles_cmp,
+	.se_snprintf	= hist_entry__cycles_snprintf,
+	.se_width_idx	= HISTC_CYCLES,
+};
+
 /* --sort daddr_sym */
 static int64_t
 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
@@ -1173,6 +1249,7 @@
 	DIM(SORT_PARENT, "parent", sort_parent),
 	DIM(SORT_CPU, "cpu", sort_cpu),
 	DIM(SORT_SRCLINE, "srcline", sort_srcline),
+	DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
 	DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
 	DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
 	DIM(SORT_TRANSACTION, "transaction", sort_transaction),
@@ -1190,6 +1267,7 @@
 	DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
 	DIM(SORT_IN_TX, "in_tx", sort_in_tx),
 	DIM(SORT_ABORT, "abort", sort_abort),
+	DIM(SORT_CYCLES, "cycles", sort_cycles),
 };
 
 #undef DIM
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index e97cd47..3c2a399 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -114,6 +114,7 @@
 		};
 	};
 	char			*srcline;
+	char			*srcfile;
 	struct symbol		*parent;
 	struct rb_root		sorted_chain;
 	struct branch_info	*branch_info;
@@ -172,6 +173,7 @@
 	SORT_PARENT,
 	SORT_CPU,
 	SORT_SRCLINE,
+	SORT_SRCFILE,
 	SORT_LOCAL_WEIGHT,
 	SORT_GLOBAL_WEIGHT,
 	SORT_TRANSACTION,
@@ -185,6 +187,7 @@
 	SORT_MISPREDICT,
 	SORT_ABORT,
 	SORT_IN_TX,
+	SORT_CYCLES,
 
 	/* memory mode specific sort keys */
 	__SORT_MEMORY_MODE,
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index c93fb0c..fc08248 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -10,6 +10,8 @@
 
 #include "symbol.h"
 
+bool srcline_full_filename;
+
 #ifdef HAVE_LIBBFD_SUPPORT
 
 /*
@@ -277,7 +279,9 @@
 	if (!addr2line(dso_name, addr, &file, &line, dso))
 		goto out;
 
-	if (asprintf(&srcline, "%s:%u", basename(file), line) < 0) {
+	if (asprintf(&srcline, "%s:%u",
+				srcline_full_filename ? file : basename(file),
+				line) < 0) {
 		free(file);
 		goto out;
 	}
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index f2a0d15..415c359 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -97,55 +97,6 @@
 	}
 }
 
-struct perf_counts *perf_counts__new(int ncpus, int nthreads)
-{
-	struct perf_counts *counts = zalloc(sizeof(*counts));
-
-	if (counts) {
-		struct xyarray *values;
-
-		values = xyarray__new(ncpus, nthreads, sizeof(struct perf_counts_values));
-		if (!values) {
-			free(counts);
-			return NULL;
-		}
-
-		counts->values = values;
-	}
-
-	return counts;
-}
-
-void perf_counts__delete(struct perf_counts *counts)
-{
-	if (counts) {
-		xyarray__delete(counts->values);
-		free(counts);
-	}
-}
-
-static void perf_counts__reset(struct perf_counts *counts)
-{
-	xyarray__reset(counts->values);
-}
-
-void perf_evsel__reset_counts(struct perf_evsel *evsel)
-{
-	perf_counts__reset(evsel->counts);
-}
-
-int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads)
-{
-	evsel->counts = perf_counts__new(ncpus, nthreads);
-	return evsel->counts != NULL ? 0 : -ENOMEM;
-}
-
-void perf_evsel__free_counts(struct perf_evsel *evsel)
-{
-	perf_counts__delete(evsel->counts);
-	evsel->counts = NULL;
-}
-
 void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
 {
 	int i;
@@ -238,3 +189,142 @@
 		perf_evsel__reset_counts(evsel);
 	}
 }
+
+static void zero_per_pkg(struct perf_evsel *counter)
+{
+	if (counter->per_pkg_mask)
+		memset(counter->per_pkg_mask, 0, MAX_NR_CPUS);
+}
+
+static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip)
+{
+	unsigned long *mask = counter->per_pkg_mask;
+	struct cpu_map *cpus = perf_evsel__cpus(counter);
+	int s;
+
+	*skip = false;
+
+	if (!counter->per_pkg)
+		return 0;
+
+	if (cpu_map__empty(cpus))
+		return 0;
+
+	if (!mask) {
+		mask = zalloc(MAX_NR_CPUS);
+		if (!mask)
+			return -ENOMEM;
+
+		counter->per_pkg_mask = mask;
+	}
+
+	s = cpu_map__get_socket(cpus, cpu);
+	if (s < 0)
+		return -1;
+
+	*skip = test_and_set_bit(s, mask) == 1;
+	return 0;
+}
+
+static int
+process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel,
+		       int cpu, int thread,
+		       struct perf_counts_values *count)
+{
+	struct perf_counts_values *aggr = &evsel->counts->aggr;
+	static struct perf_counts_values zero;
+	bool skip = false;
+
+	if (check_per_pkg(evsel, cpu, &skip)) {
+		pr_err("failed to read per-pkg counter\n");
+		return -1;
+	}
+
+	if (skip)
+		count = &zero;
+
+	switch (config->aggr_mode) {
+	case AGGR_THREAD:
+	case AGGR_CORE:
+	case AGGR_SOCKET:
+	case AGGR_NONE:
+		if (!evsel->snapshot)
+			perf_evsel__compute_deltas(evsel, cpu, thread, count);
+		perf_counts_values__scale(count, config->scale, NULL);
+		if (config->aggr_mode == AGGR_NONE)
+			perf_stat__update_shadow_stats(evsel, count->values, cpu);
+		break;
+	case AGGR_GLOBAL:
+		aggr->val += count->val;
+		if (config->scale) {
+			aggr->ena += count->ena;
+			aggr->run += count->run;
+		}
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int process_counter_maps(struct perf_stat_config *config,
+				struct perf_evsel *counter)
+{
+	int nthreads = thread_map__nr(counter->threads);
+	int ncpus = perf_evsel__nr_cpus(counter);
+	int cpu, thread;
+
+	if (counter->system_wide)
+		nthreads = 1;
+
+	for (thread = 0; thread < nthreads; thread++) {
+		for (cpu = 0; cpu < ncpus; cpu++) {
+			if (process_counter_values(config, counter, cpu, thread,
+						   perf_counts(counter->counts, cpu, thread)))
+				return -1;
+		}
+	}
+
+	return 0;
+}
+
+int perf_stat_process_counter(struct perf_stat_config *config,
+			      struct perf_evsel *counter)
+{
+	struct perf_counts_values *aggr = &counter->counts->aggr;
+	struct perf_stat *ps = counter->priv;
+	u64 *count = counter->counts->aggr.values;
+	int i, ret;
+
+	aggr->val = aggr->ena = aggr->run = 0;
+	init_stats(ps->res_stats);
+
+	if (counter->per_pkg)
+		zero_per_pkg(counter);
+
+	ret = process_counter_maps(config, counter);
+	if (ret)
+		return ret;
+
+	if (config->aggr_mode != AGGR_GLOBAL)
+		return 0;
+
+	if (!counter->snapshot)
+		perf_evsel__compute_deltas(counter, -1, -1, aggr);
+	perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
+
+	for (i = 0; i < 3; i++)
+		update_stats(&ps->res_stats[i], count[i]);
+
+	if (verbose) {
+		fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
+			perf_evsel__name(counter), count[0], count[1], count[2]);
+	}
+
+	/*
+	 * Save the full runtime - to allow normalization during printout:
+	 */
+	perf_stat__update_shadow_stats(counter, count, 0);
+
+	return 0;
+}
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 1cfbe0a..62448c8 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -33,29 +33,13 @@
 	AGGR_THREAD,
 };
 
-struct perf_counts_values {
-	union {
-		struct {
-			u64 val;
-			u64 ena;
-			u64 run;
-		};
-		u64 values[3];
-	};
+struct perf_stat_config {
+	enum aggr_mode	aggr_mode;
+	bool		scale;
+	FILE		*output;
+	unsigned int	interval;
 };
 
-struct perf_counts {
-	s8			  scaled;
-	struct perf_counts_values aggr;
-	struct xyarray		  *values;
-};
-
-static inline struct perf_counts_values*
-perf_counts(struct perf_counts *counts, int cpu, int thread)
-{
-	return xyarray__entry(counts->values, cpu, thread);
-}
-
 void update_stats(struct stats *stats, u64 val);
 double avg_stats(struct stats *stats);
 double stddev_stats(struct stats *stats);
@@ -89,13 +73,6 @@
 void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
 				   double avg, int cpu, enum aggr_mode aggr);
 
-struct perf_counts *perf_counts__new(int ncpus, int nthreads);
-void perf_counts__delete(struct perf_counts *counts);
-
-void perf_evsel__reset_counts(struct perf_evsel *evsel);
-int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus, int nthreads);
-void perf_evsel__free_counts(struct perf_evsel *evsel);
-
 void perf_evsel__reset_stat_priv(struct perf_evsel *evsel);
 int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel);
 void perf_evsel__free_stat_priv(struct perf_evsel *evsel);
@@ -109,4 +86,7 @@
 int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
 void perf_evlist__free_stats(struct perf_evlist *evlist);
 void perf_evlist__reset_stats(struct perf_evlist *evlist);
+
+int perf_stat_process_counter(struct perf_stat_config *config,
+			      struct perf_evsel *counter);
 #endif
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 6afd610..fc8781d 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -357,3 +357,42 @@
 
 	return p;
 }
+
+char *asprintf_expr_inout_ints(const char *var, bool in, size_t nints, int *ints)
+{
+	/*
+	 * FIXME: replace this with an expression using log10() when we
+	 * find a suitable implementation, maybe the one in the dvb drivers...
+	 *
+	 * "%s == %d || " = log10(MAXINT) * 2 + 8 chars for the operators
+	 */
+	size_t size = nints * 28 + 1; /* \0 */
+	size_t i, printed = 0;
+	char *expr = malloc(size);
+
+	if (expr) {
+		const char *or_and = "||", *eq_neq = "==";
+		char *e = expr;
+
+		if (!in) {
+			or_and = "&&";
+			eq_neq = "!=";
+		}
+
+		for (i = 0; i < nints; ++i) {
+			if (printed == size)
+				goto out_err_overflow;
+
+			if (i > 0)
+				printed += snprintf(e + printed, size - printed, " %s ", or_and);
+			printed += scnprintf(e + printed, size - printed,
+					     "%s %s %d", var, eq_neq, ints[i]);
+		}
+	}
+
+	return expr;
+
+out_err_overflow:
+	free(expr);
+	return NULL;
+}
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c
index 71f9d10..bdf98f6 100644
--- a/tools/perf/util/strlist.c
+++ b/tools/perf/util/strlist.c
@@ -72,7 +72,7 @@
 	FILE *fp = fopen(filename, "r");
 
 	if (fp == NULL)
-		return errno;
+		return -errno;
 
 	while (fgets(entry, sizeof(entry), fp) != NULL) {
 		const size_t len = strlen(entry);
@@ -108,43 +108,70 @@
 	return snode;
 }
 
-static int strlist__parse_list_entry(struct strlist *slist, const char *s)
+static int strlist__parse_list_entry(struct strlist *slist, const char *s,
+				     const char *subst_dir)
 {
+	int err;
+	char *subst = NULL;
+
 	if (strncmp(s, "file://", 7) == 0)
 		return strlist__load(slist, s + 7);
 
-	return strlist__add(slist, s);
+	if (subst_dir) {
+		err = -ENOMEM;
+		if (asprintf(&subst, "%s/%s", subst_dir, s) < 0)
+			goto out;
+
+		if (access(subst, F_OK) == 0) {
+			err = strlist__load(slist, subst);
+			goto out;
+		}
+	}
+
+	err = strlist__add(slist, s);
+out:
+	free(subst);
+	return err;
 }
 
-int strlist__parse_list(struct strlist *slist, const char *s)
+static int strlist__parse_list(struct strlist *slist, const char *s, const char *subst_dir)
 {
 	char *sep;
 	int err;
 
 	while ((sep = strchr(s, ',')) != NULL) {
 		*sep = '\0';
-		err = strlist__parse_list_entry(slist, s);
+		err = strlist__parse_list_entry(slist, s, subst_dir);
 		*sep = ',';
 		if (err != 0)
 			return err;
 		s = sep + 1;
 	}
 
-	return *s ? strlist__parse_list_entry(slist, s) : 0;
+	return *s ? strlist__parse_list_entry(slist, s, subst_dir) : 0;
 }
 
-struct strlist *strlist__new(bool dupstr, const char *list)
+struct strlist *strlist__new(const char *list, const struct strlist_config *config)
 {
 	struct strlist *slist = malloc(sizeof(*slist));
 
 	if (slist != NULL) {
+		bool dupstr = true;
+		const char *dirname = NULL;
+
+		if (config) {
+			dupstr = !config->dont_dupstr;
+			dirname = config->dirname;
+		}
+
 		rblist__init(&slist->rblist);
 		slist->rblist.node_cmp    = strlist__node_cmp;
 		slist->rblist.node_new    = strlist__node_new;
 		slist->rblist.node_delete = strlist__node_delete;
 
 		slist->dupstr	 = dupstr;
-		if (list && strlist__parse_list(slist, list) != 0)
+
+		if (list && strlist__parse_list(slist, list, dirname) != 0)
 			goto out_error;
 	}
 
diff --git a/tools/perf/util/strlist.h b/tools/perf/util/strlist.h
index 5c7f870..297565a 100644
--- a/tools/perf/util/strlist.h
+++ b/tools/perf/util/strlist.h
@@ -16,7 +16,12 @@
 	bool	       dupstr;
 };
 
-struct strlist *strlist__new(bool dupstr, const char *slist);
+struct strlist_config {
+	bool dont_dupstr;
+	const char *dirname;
+};
+
+struct strlist *strlist__new(const char *slist, const struct strlist_config *config);
 void strlist__delete(struct strlist *slist);
 
 void strlist__remove(struct strlist *slist, struct str_node *sn);
@@ -74,6 +79,4 @@
 #define strlist__for_each_safe(pos, n, slist)	\
 	for (pos = strlist__first(slist), n = strlist__next(pos); pos;\
 	     pos = n, n = strlist__next(n))
-
-int strlist__parse_list(struct strlist *slist, const char *s);
 #endif /* __PERF_STRLIST_H */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 65f7e38..53bb5f5 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -38,7 +38,7 @@
 #endif
 
 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
-static int elf_getphdrnum(Elf *elf, size_t *dst)
+int elf_getphdrnum(Elf *elf, size_t *dst)
 {
 	GElf_Ehdr gehdr;
 	GElf_Ehdr *ehdr;
@@ -875,6 +875,17 @@
 		}
 	}
 
+	/*
+	 * Handle any relocation of vdso necessary because older kernels
+	 * attempted to prelink vdso to its virtual address.
+	 */
+	if (dso__is_vdso(dso)) {
+		GElf_Shdr tshdr;
+
+		if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL))
+			map->reloc = map->start - tshdr.sh_addr + tshdr.sh_offset;
+	}
+
 	dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
 	/*
 	 * Initial kernel and module mappings do not map to the dso.  For
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 60f1141..1f97ffb 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -441,10 +441,25 @@
 	return &s->sym;
 }
 
+void dso__reset_find_symbol_cache(struct dso *dso)
+{
+	enum map_type type;
+
+	for (type = MAP__FUNCTION; type <= MAP__VARIABLE; ++type) {
+		dso->last_find_result[type].addr   = 0;
+		dso->last_find_result[type].symbol = NULL;
+	}
+}
+
 struct symbol *dso__find_symbol(struct dso *dso,
 				enum map_type type, u64 addr)
 {
-	return symbols__find(&dso->symbols[type], addr);
+	if (dso->last_find_result[type].addr != addr) {
+		dso->last_find_result[type].addr   = addr;
+		dso->last_find_result[type].symbol = symbols__find(&dso->symbols[type], addr);
+	}
+
+	return dso->last_find_result[type].symbol;
 }
 
 struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
@@ -1133,8 +1148,8 @@
 
 	fd = open(kcore_filename, O_RDONLY);
 	if (fd < 0) {
-		pr_err("%s requires CAP_SYS_RAWIO capability to access.\n",
-			kcore_filename);
+		pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
+			 kcore_filename);
 		return -EINVAL;
 	}
 
@@ -1838,7 +1853,7 @@
 	zfree(&vmlinux_path);
 }
 
-static int vmlinux_path__init(struct perf_session_env *env)
+static int vmlinux_path__init(struct perf_env *env)
 {
 	struct utsname uts;
 	char bf[PATH_MAX];
@@ -1906,7 +1921,7 @@
 	if (list_str == NULL)
 		return 0;
 
-	*list = strlist__new(true, list_str);
+	*list = strlist__new(list_str, NULL);
 	if (!*list) {
 		pr_err("problems parsing %s list\n", list_name);
 		return -1;
@@ -1949,7 +1964,7 @@
 	return value;
 }
 
-int symbol__init(struct perf_session_env *env)
+int symbol__init(struct perf_env *env)
 {
 	const char *symfs;
 
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index b98ce51..440ba8a 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -106,7 +106,8 @@
 			filter_relative,
 			show_hist_headers,
 			branch_callstack,
-			has_filter;
+			has_filter,
+			show_ref_callgraph;
 	const char	*vmlinux_name,
 			*kallsyms_name,
 			*source_prefix,
@@ -251,8 +252,8 @@
 int filename__read_debuglink(const char *filename, char *debuglink,
 			     size_t size);
 
-struct perf_session_env;
-int symbol__init(struct perf_session_env *env);
+struct perf_env;
+int symbol__init(struct perf_env *env);
 void symbol__exit(void);
 void symbol__elf_init(void);
 struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name);
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index 292ae2c..6ec3c5c 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -195,7 +195,8 @@
 	pid_t pid, prev_pid = INT_MAX;
 	char *end_ptr;
 	struct str_node *pos;
-	struct strlist *slist = strlist__new(false, pid_str);
+	struct strlist_config slist_config = { .dont_dupstr = true, };
+	struct strlist *slist = strlist__new(pid_str, &slist_config);
 
 	if (!slist)
 		return NULL;
@@ -265,13 +266,14 @@
 	pid_t tid, prev_tid = INT_MAX;
 	char *end_ptr;
 	struct str_node *pos;
+	struct strlist_config slist_config = { .dont_dupstr = true, };
 	struct strlist *slist;
 
 	/* perf-stat expects threads to be generated even if tid not given */
 	if (!tid_str)
 		return thread_map__new_dummy();
 
-	slist = strlist__new(false, tid_str);
+	slist = strlist__new(tid_str, &slist_config);
 	if (!slist)
 		return NULL;
 
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index c307dd4..cab8cc2 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -46,6 +46,7 @@
 			lost_samples,
 			aux,
 			itrace_start,
+			context_switch,
 			throttle,
 			unthrottle;
 	event_attr_op	attr;
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index eb72716..2224598 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -341,20 +341,14 @@
 
 static int record_proc_kallsyms(void)
 {
-	unsigned int size;
-	const char *path = "/proc/kallsyms";
-	struct stat st;
-	int ret, err = 0;
-
-	ret = stat(path, &st);
-	if (ret < 0) {
-		/* not found */
-		size = 0;
-		if (write(output_fd, &size, 4) != 4)
-			err = -EIO;
-		return err;
-	}
-	return record_file(path, 4);
+	unsigned long long size = 0;
+	/*
+	 * Just to keep older perf.data file parsers happy, record a zero
+	 * sized kallsyms file, i.e. do the same thing that was done when
+	 * /proc/kallsyms (or something specified via --kallsyms, in a
+	 * different path) couldn't be read.
+	 */
+	return write(output_fd, &size, 4) != 4 ? -EIO : 0;
 }
 
 static int record_ftrace_printk(void)
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index d495741..8ff7d62 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -135,36 +135,6 @@
 	return event_format__fprintf(event, cpu, data, size, stdout);
 }
 
-void parse_proc_kallsyms(struct pevent *pevent,
-			 char *file, unsigned int size __maybe_unused)
-{
-	unsigned long long addr;
-	char *func;
-	char *line;
-	char *next = NULL;
-	char *addr_str;
-	char *mod;
-	char *fmt = NULL;
-
-	line = strtok_r(file, "\n", &next);
-	while (line) {
-		mod = NULL;
-		addr_str = strtok_r(line, " ", &fmt);
-		addr = strtoull(addr_str, NULL, 16);
-		/* skip character */
-		strtok_r(NULL, " ", &fmt);
-		func = strtok_r(NULL, "\t", &fmt);
-		mod = strtok_r(NULL, "]", &fmt);
-		/* truncate the extra '[' */
-		if (mod)
-			mod = mod + 1;
-
-		pevent_register_function(pevent, func, addr, mod);
-
-		line = strtok_r(NULL, "\n", &next);
-	}
-}
-
 void parse_ftrace_printk(struct pevent *pevent,
 			 char *file, unsigned int size __maybe_unused)
 {
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 54d9e9b..b67a0cc 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -162,25 +162,23 @@
 static int read_proc_kallsyms(struct pevent *pevent)
 {
 	unsigned int size;
-	char *buf;
 
 	size = read4(pevent);
 	if (!size)
 		return 0;
-
-	buf = malloc(size + 1);
-	if (buf == NULL)
-		return -1;
-
-	if (do_read(buf, size) < 0) {
-		free(buf);
-		return -1;
-	}
-	buf[size] = '\0';
-
-	parse_proc_kallsyms(pevent, buf, size);
-
-	free(buf);
+	/*
+	 * Just skip it, now that we configure libtraceevent to use the
+	 * tools/perf/ symbol resolver.
+	 *
+	 * We need to skip it so that we can continue parsing old perf.data
+	 * files, that contains this /proc/kallsyms payload.
+	 *
+	 * Newer perf.data files will have just the 4-bytes zeros "kallsyms
+	 * payload", so that older tools can continue reading it and interpret
+	 * it as "no kallsyms payload is present".
+	 */
+	lseek(input_fd, size, SEEK_CUR);
+	trace_data_size += size;
 	return 0;
 }
 
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
index 6322d37..b90e646 100644
--- a/tools/perf/util/trace-event.c
+++ b/tools/perf/util/trace-event.c
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <traceevent/event-parse.h>
 #include "trace-event.h"
+#include "machine.h"
 #include "util.h"
 
 /*
@@ -19,6 +20,7 @@
  * there.
  */
 static struct trace_event tevent;
+static bool tevent_initialized;
 
 int trace_event__init(struct trace_event *t)
 {
@@ -32,6 +34,31 @@
 	return pevent ? 0 : -1;
 }
 
+static int trace_event__init2(void)
+{
+	int be = traceevent_host_bigendian();
+	struct pevent *pevent;
+
+	if (trace_event__init(&tevent))
+		return -1;
+
+	pevent = tevent.pevent;
+	pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
+	pevent_set_file_bigendian(pevent, be);
+	pevent_set_host_bigendian(pevent, be);
+	tevent_initialized = true;
+	return 0;
+}
+
+int trace_event__register_resolver(struct machine *machine,
+				   pevent_func_resolver_t *func)
+{
+	if (!tevent_initialized && trace_event__init2())
+		return -1;
+
+	return pevent_set_function_resolver(tevent.pevent, func, machine);
+}
+
 void trace_event__cleanup(struct trace_event *t)
 {
 	traceevent_unload_plugins(t->plugin_list, t->pevent);
@@ -62,21 +89,8 @@
 struct event_format*
 trace_event__tp_format(const char *sys, const char *name)
 {
-	static bool initialized;
-
-	if (!initialized) {
-		int be = traceevent_host_bigendian();
-		struct pevent *pevent;
-
-		if (trace_event__init(&tevent))
-			return NULL;
-
-		pevent = tevent.pevent;
-		pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
-		pevent_set_file_bigendian(pevent, be);
-		pevent_set_host_bigendian(pevent, be);
-		initialized = true;
-	}
+	if (!tevent_initialized && trace_event__init2())
+		return NULL;
 
 	return tp_format(sys, name);
 }
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index d5168f0..da6cc4c 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -18,6 +18,8 @@
 
 int trace_event__init(struct trace_event *t);
 void trace_event__cleanup(struct trace_event *t);
+int trace_event__register_resolver(struct machine *machine,
+				   pevent_func_resolver_t *func);
 struct event_format*
 trace_event__tp_format(const char *sys, const char *name);
 
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index edc2d63..7acafb3 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -34,6 +34,7 @@
 bool perf_host  = true;
 bool perf_guest = false;
 
+char tracing_path[PATH_MAX + 1]        = "/sys/kernel/debug/tracing";
 char tracing_events_path[PATH_MAX + 1] = "/sys/kernel/debug/tracing/events";
 
 void event_attr_init(struct perf_event_attr *attr)
@@ -391,6 +392,8 @@
 
 static void set_tracing_events_path(const char *tracing, const char *mountpoint)
 {
+	snprintf(tracing_path, sizeof(tracing_path), "%s/%s",
+		 mountpoint, tracing);
 	snprintf(tracing_events_path, sizeof(tracing_events_path), "%s/%s%s",
 		 mountpoint, tracing, "events");
 }
@@ -436,66 +439,14 @@
 
 void perf_debugfs_set_path(const char *mntpt)
 {
-	snprintf(debugfs_mountpoint, strlen(debugfs_mountpoint), "%s", mntpt);
 	set_tracing_events_path("tracing/", mntpt);
 }
 
-static const char *find_tracefs(void)
-{
-	const char *path = __perf_tracefs_mount(NULL);
-
-	return path;
-}
-
-static const char *find_debugfs(void)
-{
-	const char *path = __perf_debugfs_mount(NULL);
-
-	if (!path)
-		fprintf(stderr, "Your kernel does not support the debugfs filesystem");
-
-	return path;
-}
-
-/*
- * Finds the path to the debugfs/tracing
- * Allocates the string and stores it.
- */
-const char *find_tracing_dir(void)
-{
-	const char *tracing_dir = "";
-	static char *tracing;
-	static int tracing_found;
-	const char *debugfs;
-
-	if (tracing_found)
-		return tracing;
-
-	debugfs = find_tracefs();
-	if (!debugfs) {
-		tracing_dir = "/tracing";
-		debugfs = find_debugfs();
-		if (!debugfs)
-			return NULL;
-	}
-
-	if (asprintf(&tracing, "%s%s", debugfs, tracing_dir) < 0)
-		return NULL;
-
-	tracing_found = 1;
-	return tracing;
-}
-
 char *get_tracing_file(const char *name)
 {
-	const char *tracing;
 	char *file;
 
-	tracing = find_tracing_dir();
-	if (!tracing)
-		return NULL;
-
-	if (asprintf(&file, "%s/%s", tracing, name) < 0)
+	if (asprintf(&file, "%s/%s", tracing_path, name) < 0)
 		return NULL;
 
 	return file;
@@ -566,6 +517,96 @@
 	return (unsigned long) -1;
 }
 
+int get_stack_size(const char *str, unsigned long *_size)
+{
+	char *endptr;
+	unsigned long size;
+	unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
+
+	size = strtoul(str, &endptr, 0);
+
+	do {
+		if (*endptr)
+			break;
+
+		size = round_up(size, sizeof(u64));
+		if (!size || size > max_size)
+			break;
+
+		*_size = size;
+		return 0;
+
+	} while (0);
+
+	pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
+	       max_size, str);
+	return -1;
+}
+
+int parse_callchain_record(const char *arg, struct callchain_param *param)
+{
+	char *tok, *name, *saveptr = NULL;
+	char *buf;
+	int ret = -1;
+
+	/* We need buffer that we know we can write to. */
+	buf = malloc(strlen(arg) + 1);
+	if (!buf)
+		return -ENOMEM;
+
+	strcpy(buf, arg);
+
+	tok = strtok_r((char *)buf, ",", &saveptr);
+	name = tok ? : (char *)buf;
+
+	do {
+		/* Framepointer style */
+		if (!strncmp(name, "fp", sizeof("fp"))) {
+			if (!strtok_r(NULL, ",", &saveptr)) {
+				param->record_mode = CALLCHAIN_FP;
+				ret = 0;
+			} else
+				pr_err("callchain: No more arguments "
+				       "needed for --call-graph fp\n");
+			break;
+
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+		/* Dwarf style */
+		} else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
+			const unsigned long default_stack_dump_size = 8192;
+
+			ret = 0;
+			param->record_mode = CALLCHAIN_DWARF;
+			param->dump_size = default_stack_dump_size;
+
+			tok = strtok_r(NULL, ",", &saveptr);
+			if (tok) {
+				unsigned long size = 0;
+
+				ret = get_stack_size(tok, &size);
+				param->dump_size = size;
+			}
+#endif /* HAVE_DWARF_UNWIND_SUPPORT */
+		} else if (!strncmp(name, "lbr", sizeof("lbr"))) {
+			if (!strtok_r(NULL, ",", &saveptr)) {
+				param->record_mode = CALLCHAIN_LBR;
+				ret = 0;
+			} else
+				pr_err("callchain: No more arguments "
+					"needed for --call-graph lbr\n");
+			break;
+		} else {
+			pr_err("callchain: Unknown --call-graph option "
+			       "value: %s\n", arg);
+			break;
+		}
+
+	} while (0);
+
+	free(buf);
+	return ret;
+}
+
 int filename__read_str(const char *filename, char **buf, size_t *sizep)
 {
 	size_t size = 0, alloc_size = 0;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 8bce58b..291be1d 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -83,10 +83,10 @@
 extern const char *graph_line;
 extern const char *graph_dotted_line;
 extern char buildid_dir[];
+extern char tracing_path[];
 extern char tracing_events_path[];
 extern void perf_debugfs_set_path(const char *mountpoint);
 const char *perf_debugfs_mount(const char *mountpoint);
-const char *find_tracing_dir(void);
 char *get_tracing_file(const char *name);
 void put_tracing_file(char *file);
 
@@ -318,6 +318,7 @@
 struct dso;
 struct symbol;
 
+extern bool srcline_full_filename;
 char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
 		  bool show_sym);
 void free_srcline(char *srcline);
@@ -339,4 +340,18 @@
 int lzma_decompress_to_file(const char *input, int output_fd);
 #endif
 
+char *asprintf_expr_inout_ints(const char *var, bool in, size_t nints, int *ints);
+
+static inline char *asprintf_expr_in_ints(const char *var, size_t nints, int *ints)
+{
+	return asprintf_expr_inout_ints(var, true, nints, ints);
+}
+
+static inline char *asprintf_expr_not_in_ints(const char *var, size_t nints, int *ints)
+{
+	return asprintf_expr_inout_ints(var, false, nints, ints);
+}
+
+int get_stack_size(const char *str, unsigned long *_size);
+
 #endif /* GIT_COMPAT_UTIL_H */
diff --git a/tools/power/acpi/Makefile b/tools/power/acpi/Makefile
index 3d1537b..e882c83 100644
--- a/tools/power/acpi/Makefile
+++ b/tools/power/acpi/Makefile
@@ -8,154 +8,20 @@
 # as published by the Free Software Foundation; version 2
 # of the License.
 
-OUTPUT=./
-ifeq ("$(origin O)", "command line")
-	OUTPUT := $(O)/
-endif
+include ../../scripts/Makefile.include
 
-ifneq ($(OUTPUT),)
-# check that the output directory actually exists
-OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
-$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
-endif
+all: acpidump ec
+clean: acpidump_clean ec_clean
+install: acpidump_install ec_install
+uninstall: acpidump_uninstall ec_uninstall
 
-SUBDIRS = tools/ec
+acpidump ec: FORCE
+	$(call descend,tools/$@,all)
+acpidump_clean ec_clean:
+	$(call descend,tools/$(@:_clean=),clean)
+acpidump_install ec_install:
+	$(call descend,tools/$(@:_install=),install)
+acpidump_uninstall ec_uninstall:
+	$(call descend,tools/$(@:_uninstall=),uninstall)
 
-# --- CONFIGURATION BEGIN ---
-
-# Set the following to `true' to make a unstripped, unoptimized
-# binary. Leave this set to `false' for production use.
-DEBUG ?=	true
-
-# make the build silent. Set this to something else to make it noisy again.
-V ?=		false
-
-# Prefix to the directories we're installing to
-DESTDIR ?=
-
-# --- CONFIGURATION END ---
-
-# Directory definitions. These are default and most probably
-# do not need to be changed. Please note that DESTDIR is
-# added in front of any of them
-
-bindir ?=	/usr/bin
-sbindir ?=	/usr/sbin
-mandir ?=	/usr/man
-
-# Toolchain: what tools do we use, and what options do they need:
-
-INSTALL = /usr/bin/install -c
-INSTALL_PROGRAM = ${INSTALL}
-INSTALL_DATA  = ${INSTALL} -m 644
-INSTALL_SCRIPT = ${INSTALL_PROGRAM}
-
-# If you are running a cross compiler, you may want to set this
-# to something more interesting, like "arm-linux-".  If you want
-# to compile vs uClibc, that can be done here as well.
-CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
-CC = $(CROSS)gcc
-LD = $(CROSS)gcc
-STRIP = $(CROSS)strip
-HOSTCC = gcc
-
-# check if compiler option is supported
-cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
-
-# use '-Os' optimization if available, else use -O2
-OPTIMIZATION := $(call cc-supports,-Os,-O2)
-
-WARNINGS := -Wall
-WARNINGS += $(call cc-supports,-Wstrict-prototypes)
-WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
-
-KERNEL_INCLUDE := ../../../include
-ACPICA_INCLUDE := ../../../drivers/acpi/acpica
-CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
-CFLAGS += $(WARNINGS)
-
-ifeq ($(strip $(V)),false)
-	QUIET=@
-	ECHO=@echo
-else
-	QUIET=
-	ECHO=@\#
-endif
-export QUIET ECHO
-
-# if DEBUG is enabled, then we do not strip or optimize
-ifeq ($(strip $(DEBUG)),true)
-	CFLAGS += -O1 -g -DDEBUG
-	STRIPCMD = /bin/true -Since_we_are_debugging
-else
-	CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
-	STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
-endif
-
-# --- ACPIDUMP BEGIN ---
-
-vpath %.c \
-	../../../drivers/acpi/acpica\
-	tools/acpidump\
-	common\
-	os_specific/service_layers
-
-CFLAGS += -DACPI_DUMP_APP -Itools/acpidump
-
-DUMP_OBJS = \
-	apdump.o\
-	apfiles.o\
-	apmain.o\
-	osunixdir.o\
-	osunixmap.o\
-	osunixxf.o\
-	tbprint.o\
-	tbxfroot.o\
-	utbuffer.o\
-	utdebug.o\
-	utexcep.o\
-	utglobal.o\
-	utmath.o\
-	utprint.o\
-	utstring.o\
-	utxferror.o\
-	oslibcfs.o\
-	oslinuxtbl.o\
-	cmfsize.o\
-	getopt.o
-
-DUMP_OBJS := $(addprefix $(OUTPUT)tools/acpidump/,$(DUMP_OBJS))
-
-$(OUTPUT)acpidump: $(DUMP_OBJS)
-	$(ECHO) "  LD      " $@
-	$(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(DUMP_OBJS) -L$(OUTPUT) -o $@
-	$(QUIET) $(STRIPCMD) $@
-
-$(OUTPUT)tools/acpidump/%.o: %.c
-	$(ECHO) "  CC      " $@
-	$(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
-
-# --- ACPIDUMP END ---
-
-all: $(OUTPUT)acpidump
-	echo $(OUTPUT)
-
-clean:
-	-find $(OUTPUT) \( -not -type d \) -and \( -name '*~' -o -name '*.[oas]' \) -type f -print \
-	 | xargs rm -f
-	-rm -f $(OUTPUT)acpidump
-
-install-tools:
-	$(INSTALL) -d $(DESTDIR)${sbindir}
-	$(INSTALL_PROGRAM) $(OUTPUT)acpidump $(DESTDIR)${sbindir}
-
-install-man:
-	$(INSTALL_DATA) -D man/acpidump.8 $(DESTDIR)${mandir}/man8/acpidump.8
-
-install: all install-tools install-man
-
-uninstall:
-	- rm -f $(DESTDIR)${sbindir}/acpidump
-	- rm -f $(DESTDIR)${mandir}/man8/acpidump.8
-
-.PHONY: all utils install-tools install-man install uninstall clean
+.PHONY: FORCE
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config
new file mode 100644
index 0000000..552af68
--- /dev/null
+++ b/tools/power/acpi/Makefile.config
@@ -0,0 +1,92 @@
+# tools/power/acpi/Makefile.config - ACPI tool Makefile
+#
+# Copyright (c) 2015, Intel Corporation
+#   Author: Lv Zheng <lv.zheng@intel.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2
+# of the License.
+
+include ../../../../scripts/Makefile.include
+
+OUTPUT=./
+ifeq ("$(origin O)", "command line")
+	OUTPUT := $(O)/
+endif
+
+ifneq ($(OUTPUT),)
+# check that the output directory actually exists
+OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
+endif
+
+# --- CONFIGURATION BEGIN ---
+
+# Set the following to `true' to make a unstripped, unoptimized
+# binary. Leave this set to `false' for production use.
+DEBUG ?=	true
+
+# make the build silent. Set this to something else to make it noisy again.
+V ?=		false
+
+# Prefix to the directories we're installing to
+DESTDIR ?=
+
+# --- CONFIGURATION END ---
+
+# Directory definitions. These are default and most probably
+# do not need to be changed. Please note that DESTDIR is
+# added in front of any of them
+
+bindir ?=	/usr/bin
+sbindir ?=	/usr/sbin
+mandir ?=	/usr/man
+
+# Toolchain: what tools do we use, and what options do they need:
+
+INSTALL = /usr/bin/install -c
+INSTALL_PROGRAM = ${INSTALL}
+INSTALL_DATA  = ${INSTALL} -m 644
+INSTALL_SCRIPT = ${INSTALL_PROGRAM}
+
+# If you are running a cross compiler, you may want to set this
+# to something more interesting, like "arm-linux-".  If you want
+# to compile vs uClibc, that can be done here as well.
+CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
+CC = $(CROSS)gcc
+LD = $(CROSS)gcc
+STRIP = $(CROSS)strip
+HOSTCC = gcc
+
+# check if compiler option is supported
+cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
+
+# use '-Os' optimization if available, else use -O2
+OPTIMIZATION := $(call cc-supports,-Os,-O2)
+
+WARNINGS := -Wall
+WARNINGS += $(call cc-supports,-Wstrict-prototypes)
+WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
+
+KERNEL_INCLUDE := ../../../include
+ACPICA_INCLUDE := ../../../drivers/acpi/acpica
+CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE)
+CFLAGS += $(WARNINGS)
+
+ifeq ($(strip $(V)),false)
+	QUIET=@
+	ECHO=@echo
+else
+	QUIET=
+	ECHO=@\#
+endif
+
+# if DEBUG is enabled, then we do not strip or optimize
+ifeq ($(strip $(DEBUG)),true)
+	CFLAGS += -O1 -g -DDEBUG
+	STRIPCMD = /bin/true -Since_we_are_debugging
+else
+	CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
+	STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
+endif
diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules
new file mode 100644
index 0000000..ec87a9e
--- /dev/null
+++ b/tools/power/acpi/Makefile.rules
@@ -0,0 +1,37 @@
+# tools/power/acpi/Makefile.rules - ACPI tool Makefile
+#
+# Copyright (c) 2015, Intel Corporation
+#   Author: Lv Zheng <lv.zheng@intel.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2
+# of the License.
+
+$(OUTPUT)$(TOOL): $(TOOL_OBJS) FORCE
+	$(ECHO) "  LD      " $@
+	$(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(TOOL_OBJS) -L$(OUTPUT) -o $@
+	$(QUIET) $(STRIPCMD) $@
+
+$(OUTPUT)%.o: %.c
+	$(ECHO) "  CC      " $@
+	$(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
+
+all: $(OUTPUT)$(TOOL)
+clean:
+	-find $(OUTPUT) \( -not -type d \) \
+	-and \( -name '*~' -o -name '*.[oas]' \) \
+	-type f -print \
+	 | xargs rm -f
+	-rm -f $(OUTPUT)$(TOOL)
+
+install-tools:
+	$(INSTALL) -d $(DESTDIR)${sbindir}
+	$(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)${sbindir}
+uninstall-tools:
+	- rm -f $(DESTDIR)${sbindir}/$(TOOL)
+
+install: all install-tools $(EXTRA_INSTALL)
+uninstall: uninstall-tools $(EXTRA_UNINSTALL)
+
+.PHONY: FORCE
diff --git a/tools/power/acpi/tools/acpidump/Makefile b/tools/power/acpi/tools/acpidump/Makefile
new file mode 100644
index 0000000..8d76157
--- /dev/null
+++ b/tools/power/acpi/tools/acpidump/Makefile
@@ -0,0 +1,53 @@
+# tools/power/acpi/tools/acpidump/Makefile - ACPI tool Makefile
+#
+# Copyright (c) 2015, Intel Corporation
+#   Author: Lv Zheng <lv.zheng@intel.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2
+# of the License.
+
+include ../../Makefile.config
+
+TOOL = acpidump
+EXTRA_INSTALL = install-man
+EXTRA_UNINSTALL = uninstall-man
+
+vpath %.c \
+	../../../../../drivers/acpi/acpica\
+	./\
+	../../common\
+	../../os_specific/service_layers
+CFLAGS += -DACPI_DUMP_APP -I.\
+	-I../../../../../drivers/acpi/acpica\
+	-I../../../../../include
+TOOL_OBJS = \
+	apdump.o\
+	apfiles.o\
+	apmain.o\
+	osunixdir.o\
+	osunixmap.o\
+	osunixxf.o\
+	tbprint.o\
+	tbxfroot.o\
+	utbuffer.o\
+	utdebug.o\
+	utexcep.o\
+	utglobal.o\
+	utmath.o\
+	utnonansi.o\
+	utprint.o\
+	utstring.o\
+	utxferror.o\
+	oslibcfs.o\
+	oslinuxtbl.o\
+	cmfsize.o\
+	getopt.o
+
+include ../../Makefile.rules
+
+install-man: ../../man/acpidump.8
+	$(INSTALL_DATA) -D $< $(DESTDIR)${mandir}/man8/acpidump.8
+uninstall-man:
+	- rm -f $(DESTDIR)${mandir}/man8/acpidump.8
diff --git a/tools/power/acpi/tools/ec/Makefile b/tools/power/acpi/tools/ec/Makefile
index b7b0b92..75d8a12 100644
--- a/tools/power/acpi/tools/ec/Makefile
+++ b/tools/power/acpi/tools/ec/Makefile
@@ -1,22 +1,17 @@
-ec_access: ec_access.o
-	$(ECHO) "  LD      " $@
-	$(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $< -o $@
-	$(QUIET) $(STRIPCMD) $@
+# tools/power/acpi/tools/acpidump/Makefile - ACPI tool Makefile
+#
+# Copyright (c) 2015, Intel Corporation
+#   Author: Lv Zheng <lv.zheng@intel.com>
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2
+# of the License.
 
-%.o: %.c
-	$(ECHO) "  CC      " $@
-	$(QUIET) $(CC) -c $(CFLAGS) -o $@ $<
+include ../../Makefile.config
 
-all: ec_access
+TOOL = ec
+TOOL_OBJS = \
+	ec_access.o
 
-install:
-	$(INSTALL) -d $(DESTDIR)${sbindir}
-	$(INSTALL_PROGRAM) ec_access $(DESTDIR)${sbindir}
-
-uninstall:
-	- rm -f $(DESTDIR)${sbindir}/ec_access
-
-clean:
-	-rm -f $(OUTPUT)ec_access
-
-.PHONY: all install uninstall
+include ../../Makefile.rules
diff --git a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
index 5224ee5..6ff8383 100644
--- a/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
+++ b/tools/power/cpupower/debug/kernel/cpufreq-test_tsc.c
@@ -81,11 +81,11 @@
 
 	printk(KERN_DEBUG "start--> \n");
 	then = read_pmtmr();
-        rdtscll(then_tsc);
+	then_tsc = rdtsc();
 	for (i=0;i<20;i++) {
 		mdelay(100);
 		now = read_pmtmr();
-		rdtscll(now_tsc);
+		now_tsc = rdtsc();
 		diff = (now - then) & 0xFFFFFF;
 		diff_tsc = now_tsc - then_tsc;
 		printk(KERN_DEBUG "t1: %08u t2: %08u diff_pmtmr: %08u diff_tsc: %016llu\n", then, now, diff, diff_tsc);
diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
index f656e58..4e21357 100644
--- a/tools/power/cpupower/utils/cpufreq-set.c
+++ b/tools/power/cpupower/utils/cpufreq-set.c
@@ -17,6 +17,7 @@
 
 #include "cpufreq.h"
 #include "helpers/helpers.h"
+#include "helpers/sysfs.h"
 
 #define NORM_FREQ_LEN 32
 
@@ -318,6 +319,9 @@
 		    cpufreq_cpu_exists(cpu))
 			continue;
 
+		if (sysfs_is_cpu_online(cpu) != 1)
+			continue;
+
 		printf(_("Setting cpu: %d\n"), cpu);
 		ret = do_one_cpu(cpu, &new_pol, freq, policychange);
 		if (ret) {
diff --git a/tools/power/cpupower/utils/helpers/topology.c b/tools/power/cpupower/utils/helpers/topology.c
index c13120a..cea398c 100644
--- a/tools/power/cpupower/utils/helpers/topology.c
+++ b/tools/power/cpupower/utils/helpers/topology.c
@@ -73,6 +73,8 @@
 	for (cpu = 0; cpu < cpus; cpu++) {
 		cpu_top->core_info[cpu].cpu = cpu;
 		cpu_top->core_info[cpu].is_online = sysfs_is_cpu_online(cpu);
+		if (!cpu_top->core_info[cpu].is_online)
+			continue;
 		if(sysfs_topology_read_file(
 			cpu,
 			"physical_package_id",
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index 05b8fc3..622db68 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -251,11 +251,6 @@
 that they count at TSC rate, which is true on all processors tested to date.
 
 .SH REFERENCES
-"Intel® Turbo Boost Technology
-in Intel® Core™ Microarchitecture (Nehalem) Based Processors"
-http://download.intel.com/design/processor/applnots/320354.pdf
-
-"Intel® 64 and IA-32 Architectures Software Developer's Manual
 Volume 3B: System Programming Guide"
 http://www.intel.com/products/processor/manuals/
 
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 323b65e..9655cb4 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -372,7 +372,7 @@
 		if (do_rapl & RAPL_GFX)
 			outp += sprintf(outp, "   GFX_J");
 		if (do_rapl & RAPL_DRAM)
-			outp += sprintf(outp, "   RAM_W");
+			outp += sprintf(outp, "   RAM_J");
 		if (do_rapl & RAPL_PKG_PERF_STATUS)
 			outp += sprintf(outp, "   PKG_%%");
 		if (do_rapl & RAPL_DRAM_PERF_STATUS)
@@ -1157,7 +1157,7 @@
 
 	get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
 
-	fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr);
+	fprintf(stderr, "cpu%d: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
 
 	ratio = (msr >> 40) & 0xFF;
 	fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
@@ -1168,8 +1168,8 @@
 		ratio, bclk, ratio * bclk);
 
 	get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
-	fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
-		msr, msr & 0x2 ? "EN" : "DIS");
+	fprintf(stderr, "cpu%d: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
+		base_cpu, msr, msr & 0x2 ? "EN" : "DIS");
 
 	return;
 }
@@ -1182,7 +1182,7 @@
 
 	get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
 
-	fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr);
+	fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", base_cpu, msr);
 
 	ratio = (msr >> 8) & 0xFF;
 	if (ratio)
@@ -1204,7 +1204,7 @@
 
 	get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
 
-	fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr);
+	fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", base_cpu, msr);
 
 	ratio = (msr >> 56) & 0xFF;
 	if (ratio)
@@ -1256,7 +1256,7 @@
 
 	get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
 
-	fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
+	fprintf(stderr, "cpu%d: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", base_cpu, msr);
 
 	ratio = (msr >> 56) & 0xFF;
 	if (ratio)
@@ -1312,8 +1312,8 @@
 
 	get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
 
-	fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n",
-	msr);
+	fprintf(stderr, "cpu%d: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n",
+		base_cpu, msr);
 
 	/**
 	 * Turbo encoding in KNL is as follows:
@@ -1371,7 +1371,7 @@
 #define SNB_C1_AUTO_UNDEMOTE              (1UL << 27)
 #define SNB_C3_AUTO_UNDEMOTE              (1UL << 28)
 
-	fprintf(stderr, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr);
+	fprintf(stderr, "cpu%d: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", base_cpu, msr);
 
 	fprintf(stderr, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: %s)\n",
 		(msr & SNB_C3_AUTO_UNDEMOTE) ? "UNdemote-C3, " : "",
@@ -1384,6 +1384,49 @@
 	return;
 }
 
+static void
+dump_config_tdp(void)
+{
+	unsigned long long msr;
+
+	get_msr(base_cpu, MSR_CONFIG_TDP_NOMINAL, &msr);
+	fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_NOMINAL: 0x%08llx", base_cpu, msr);
+	fprintf(stderr, " (base_ratio=%d)\n", (unsigned int)msr & 0xEF);
+
+	get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_1, &msr);
+	fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_LEVEL_1: 0x%08llx (", base_cpu, msr);
+	if (msr) {
+		fprintf(stderr, "PKG_MIN_PWR_LVL1=%d ", (unsigned int)(msr >> 48) & 0xEFFF);
+		fprintf(stderr, "PKG_MAX_PWR_LVL1=%d ", (unsigned int)(msr >> 32) & 0xEFFF);
+		fprintf(stderr, "LVL1_RATIO=%d ", (unsigned int)(msr >> 16) & 0xEF);
+		fprintf(stderr, "PKG_TDP_LVL1=%d", (unsigned int)(msr) & 0xEFFF);
+	}
+	fprintf(stderr, ")\n");
+
+	get_msr(base_cpu, MSR_CONFIG_TDP_LEVEL_2, &msr);
+	fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_LEVEL_2: 0x%08llx (", base_cpu, msr);
+	if (msr) {
+		fprintf(stderr, "PKG_MIN_PWR_LVL2=%d ", (unsigned int)(msr >> 48) & 0xEFFF);
+		fprintf(stderr, "PKG_MAX_PWR_LVL2=%d ", (unsigned int)(msr >> 32) & 0xEFFF);
+		fprintf(stderr, "LVL2_RATIO=%d ", (unsigned int)(msr >> 16) & 0xEF);
+		fprintf(stderr, "PKG_TDP_LVL2=%d", (unsigned int)(msr) & 0xEFFF);
+	}
+	fprintf(stderr, ")\n");
+
+	get_msr(base_cpu, MSR_CONFIG_TDP_CONTROL, &msr);
+	fprintf(stderr, "cpu%d: MSR_CONFIG_TDP_CONTROL: 0x%08llx (", base_cpu, msr);
+	if ((msr) & 0x3)
+		fprintf(stderr, "TDP_LEVEL=%d ", (unsigned int)(msr) & 0x3);
+	fprintf(stderr, " lock=%d", (unsigned int)(msr >> 31) & 1);
+	fprintf(stderr, ")\n");
+	
+	get_msr(base_cpu, MSR_TURBO_ACTIVATION_RATIO, &msr);
+	fprintf(stderr, "cpu%d: MSR_TURBO_ACTIVATION_RATIO: 0x%08llx (", base_cpu, msr);
+	fprintf(stderr, "MAX_NON_TURBO_RATIO=%d", (unsigned int)(msr) & 0xEF);
+	fprintf(stderr, " lock=%d", (unsigned int)(msr >> 31) & 1);
+	fprintf(stderr, ")\n");
+}
+
 void free_all_buffers(void)
 {
 	CPU_FREE(cpu_present_set);
@@ -1873,6 +1916,36 @@
 		return 0;
 	}
 }
+int has_config_tdp(unsigned int family, unsigned int model)
+{
+	if (!genuine_intel)
+		return 0;
+
+	if (family != 6)
+		return 0;
+
+	switch (model) {
+	case 0x3A:	/* IVB */
+	case 0x3E:	/* IVB Xeon */
+
+	case 0x3C:	/* HSW */
+	case 0x3F:	/* HSX */
+	case 0x45:	/* HSW */
+	case 0x46:	/* HSW */
+	case 0x3D:	/* BDW */
+	case 0x47:	/* BDW */
+	case 0x4F:	/* BDX */
+	case 0x56:	/* BDX-DE */
+	case 0x4E:	/* SKL */
+	case 0x5E:	/* SKL */
+
+	case 0x57:	/* Knights Landing */
+		return 1;
+	default:
+		return 0;
+	}
+}
+
 static void
 dump_cstate_pstate_config_info(family, model)
 {
@@ -1893,6 +1966,9 @@
 	if (has_knl_turbo_ratio_limit(family, model))
 		dump_knl_turbo_ratio_limits();
 
+	if (has_config_tdp(family, model))
+		dump_config_tdp();
+
 	dump_nhm_cst_cfg();
 }
 
@@ -3014,7 +3090,7 @@
 }
 
 void print_version() {
-	fprintf(stderr, "turbostat version 4.7 27-May, 2015"
+	fprintf(stderr, "turbostat version 4.7 17-June, 2015"
 		" - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -3042,7 +3118,7 @@
 
 	progname = argv[0];
 
-	while ((opt = getopt_long_only(argc, argv, "C:c:Ddhi:JM:m:PpST:v",
+	while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:PpST:v",
 				long_options, &option_index)) != -1) {
 		switch (opt) {
 		case 'C':
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 24ae9e8..b8f12e0 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -20,6 +20,7 @@
 TARGETS += timers
 endif
 TARGETS += user
+TARGETS += jumplabel
 TARGETS += vm
 TARGETS += x86
 #Please keep the TARGETS list alphabetically sorted
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index 3fc6c10..c4366dc 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -9,7 +9,15 @@
 
 DIR=/sys/devices/virtual/misc/test_firmware
 
-OLD_TIMEOUT=$(cat /sys/class/firmware/timeout)
+# CONFIG_FW_LOADER_USER_HELPER has a sysfs class under /sys/class/firmware/
+# These days no one enables CONFIG_FW_LOADER_USER_HELPER so check for that
+# as an indicator for CONFIG_FW_LOADER_USER_HELPER.
+HAS_FW_LOADER_USER_HELPER=$(if [ -d /sys/class/firmware/ ]; then echo yes; else echo no; fi)
+
+if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
+	OLD_TIMEOUT=$(cat /sys/class/firmware/timeout)
+fi
+
 OLD_FWPATH=$(cat /sys/module/firmware_class/parameters/path)
 
 FWPATH=$(mktemp -d)
@@ -17,7 +25,9 @@
 
 test_finish()
 {
-	echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
+	if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
+		echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
+	fi
 	echo -n "$OLD_PATH" >/sys/module/firmware_class/parameters/path
 	rm -f "$FW"
 	rmdir "$FWPATH"
@@ -25,8 +35,11 @@
 
 trap "test_finish" EXIT
 
-# Turn down the timeout so failures don't take so long.
-echo 1 >/sys/class/firmware/timeout
+if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
+	# Turn down the timeout so failures don't take so long.
+	echo 1 >/sys/class/firmware/timeout
+fi
+
 # Set the kernel search path.
 echo -n "$FWPATH" >/sys/module/firmware_class/parameters/path
 
@@ -41,7 +54,9 @@
 	echo "$0: firmware was not expected to match" >&2
 	exit 1
 else
-	echo "$0: timeout works"
+	if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
+		echo "$0: timeout works"
+	fi
 fi
 
 # This should succeed via kernel load or will fail after 1 second after
diff --git a/tools/testing/selftests/firmware/fw_userhelper.sh b/tools/testing/selftests/firmware/fw_userhelper.sh
index 6efbade..b9983f8 100755
--- a/tools/testing/selftests/firmware/fw_userhelper.sh
+++ b/tools/testing/selftests/firmware/fw_userhelper.sh
@@ -9,7 +9,17 @@
 
 DIR=/sys/devices/virtual/misc/test_firmware
 
-OLD_TIMEOUT=$(cat /sys/class/firmware/timeout)
+# CONFIG_FW_LOADER_USER_HELPER has a sysfs class under /sys/class/firmware/
+# These days no one enables CONFIG_FW_LOADER_USER_HELPER so check for that
+# as an indicator for CONFIG_FW_LOADER_USER_HELPER.
+HAS_FW_LOADER_USER_HELPER=$(if [ -d /sys/class/firmware/ ]; then echo yes; else echo no; fi)
+
+if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
+       OLD_TIMEOUT=$(cat /sys/class/firmware/timeout)
+else
+	echo "usermode helper disabled so ignoring test"
+	exit 0
+fi
 
 FWPATH=$(mktemp -d)
 FW="$FWPATH/test-firmware.bin"
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index 08c2a36..4124593 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -19,6 +19,8 @@
  *   - PACKET_FANOUT_LB
  *   - PACKET_FANOUT_CPU
  *   - PACKET_FANOUT_ROLLOVER
+ *   - PACKET_FANOUT_CBPF
+ *   - PACKET_FANOUT_EBPF
  *
  * Todo:
  * - functionality: PACKET_FANOUT_FLAG_DEFRAG
@@ -44,7 +46,9 @@
 #include <arpa/inet.h>
 #include <errno.h>
 #include <fcntl.h>
+#include <linux/unistd.h>	/* for __NR_bpf */
 #include <linux/filter.h>
+#include <linux/bpf.h>
 #include <linux/if_packet.h>
 #include <net/ethernet.h>
 #include <netinet/ip.h>
@@ -91,6 +95,51 @@
 	return fd;
 }
 
+static void sock_fanout_set_ebpf(int fd)
+{
+	const int len_off = __builtin_offsetof(struct __sk_buff, len);
+	struct bpf_insn prog[] = {
+		{ BPF_ALU64 | BPF_MOV | BPF_X,   6, 1, 0, 0 },
+		{ BPF_LDX   | BPF_W   | BPF_MEM, 0, 6, len_off, 0 },
+		{ BPF_JMP   | BPF_JGE | BPF_K,   0, 0, 1, DATA_LEN },
+		{ BPF_JMP   | BPF_JA  | BPF_K,   0, 0, 4, 0 },
+		{ BPF_LD    | BPF_B   | BPF_ABS, 0, 0, 0, 0x50 },
+		{ BPF_JMP   | BPF_JEQ | BPF_K,   0, 0, 2, DATA_CHAR },
+		{ BPF_JMP   | BPF_JEQ | BPF_K,   0, 0, 1, DATA_CHAR_1 },
+		{ BPF_ALU   | BPF_MOV | BPF_K,   0, 0, 0, 0 },
+		{ BPF_JMP   | BPF_EXIT,          0, 0, 0, 0 }
+	};
+	char log_buf[512];
+	union bpf_attr attr;
+	int pfd;
+
+	memset(&attr, 0, sizeof(attr));
+	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+	attr.insns = (unsigned long) prog;
+	attr.insn_cnt = sizeof(prog) / sizeof(prog[0]);
+	attr.license = (unsigned long) "GPL";
+	attr.log_buf = (unsigned long) log_buf,
+	attr.log_size = sizeof(log_buf),
+	attr.log_level = 1,
+
+	pfd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+	if (pfd < 0) {
+		perror("bpf");
+		fprintf(stderr, "bpf verifier:\n%s\n", log_buf);
+		exit(1);
+	}
+
+	if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT_DATA, &pfd, sizeof(pfd))) {
+		perror("fanout data ebpf");
+		exit(1);
+	}
+
+	if (close(pfd)) {
+		perror("close ebpf");
+		exit(1);
+	}
+}
+
 static char *sock_fanout_open_ring(int fd)
 {
 	struct tpacket_req req = {
@@ -115,8 +164,8 @@
 
 	ring = mmap(0, req.tp_block_size * req.tp_block_nr,
 		    PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
-	if (!ring) {
-		fprintf(stderr, "packetsock ring mmap\n");
+	if (ring == MAP_FAILED) {
+		perror("packetsock ring mmap");
 		exit(1);
 	}
 
@@ -209,6 +258,7 @@
 {
 	const int expect0[] = { 0, 0 };
 	char *rings[2];
+	uint8_t type = typeflags & 0xFF;
 	int fds[2], fds_udp[2][2], ret;
 
 	fprintf(stderr, "test: datapath 0x%hx\n", typeflags);
@@ -219,6 +269,11 @@
 		fprintf(stderr, "ERROR: failed open\n");
 		exit(1);
 	}
+	if (type == PACKET_FANOUT_CBPF)
+		sock_setfilter(fds[0], SOL_PACKET, PACKET_FANOUT_DATA);
+	else if (type == PACKET_FANOUT_EBPF)
+		sock_fanout_set_ebpf(fds[0]);
+
 	rings[0] = sock_fanout_open_ring(fds[0]);
 	rings[1] = sock_fanout_open_ring(fds[1]);
 	pair_udp_open(fds_udp[0], PORT_BASE);
@@ -227,11 +282,11 @@
 
 	/* Send data, but not enough to overflow a queue */
 	pair_udp_send(fds_udp[0], 15);
-	pair_udp_send(fds_udp[1], 5);
+	pair_udp_send_char(fds_udp[1], 5, DATA_CHAR_1);
 	ret = sock_fanout_read(fds, rings, expect1);
 
 	/* Send more data, overflow the queue */
-	pair_udp_send(fds_udp[0], 15);
+	pair_udp_send_char(fds_udp[0], 15, DATA_CHAR_1);
 	/* TODO: ensure consistent order between expect1 and expect2 */
 	ret |= sock_fanout_read(fds, rings, expect2);
 
@@ -275,6 +330,7 @@
 	const int expect_rb[2][2]	= { { 15, 5 },  { 20, 15 } };
 	const int expect_cpu0[2][2]	= { { 20, 0 },  { 20, 0 } };
 	const int expect_cpu1[2][2]	= { { 0, 20 },  { 0, 20 } };
+	const int expect_bpf[2][2]	= { { 15, 5 },  { 15, 20 } };
 	int port_off = 2, tries = 5, ret;
 
 	test_control_single();
@@ -296,6 +352,11 @@
 	ret |= test_datapath(PACKET_FANOUT_ROLLOVER,
 			     port_off, expect_rb[0], expect_rb[1]);
 
+	ret |= test_datapath(PACKET_FANOUT_CBPF,
+			     port_off, expect_bpf[0], expect_bpf[1]);
+	ret |= test_datapath(PACKET_FANOUT_EBPF,
+			     port_off, expect_bpf[0], expect_bpf[1]);
+
 	set_cpuaffinity(0);
 	ret |= test_datapath(PACKET_FANOUT_CPU, port_off,
 			     expect_cpu0[0], expect_cpu0[1]);
diff --git a/tools/testing/selftests/net/psock_lib.h b/tools/testing/selftests/net/psock_lib.h
index 37da54a..24bc7ec 100644
--- a/tools/testing/selftests/net/psock_lib.h
+++ b/tools/testing/selftests/net/psock_lib.h
@@ -30,6 +30,7 @@
 
 #define DATA_LEN			100
 #define DATA_CHAR			'a'
+#define DATA_CHAR_1			'b'
 
 #define PORT_BASE			8000
 
@@ -37,29 +38,36 @@
 # define __maybe_unused		__attribute__ ((__unused__))
 #endif
 
-static __maybe_unused void pair_udp_setfilter(int fd)
+static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
 {
 	struct sock_filter bpf_filter[] = {
 		{ 0x80, 0, 0, 0x00000000 },  /* LD  pktlen		      */
-		{ 0x35, 0, 5, DATA_LEN   },  /* JGE DATA_LEN  [f goto nomatch]*/
+		{ 0x35, 0, 4, DATA_LEN   },  /* JGE DATA_LEN  [f goto nomatch]*/
 		{ 0x30, 0, 0, 0x00000050 },  /* LD  ip[80]		      */
-		{ 0x15, 0, 3, DATA_CHAR  },  /* JEQ DATA_CHAR [f goto nomatch]*/
-		{ 0x30, 0, 0, 0x00000051 },  /* LD  ip[81]		      */
-		{ 0x15, 0, 1, DATA_CHAR  },  /* JEQ DATA_CHAR [f goto nomatch]*/
+		{ 0x15, 1, 0, DATA_CHAR  },  /* JEQ DATA_CHAR   [t goto match]*/
+		{ 0x15, 0, 1, DATA_CHAR_1},  /* JEQ DATA_CHAR_1 [t goto match]*/
 		{ 0x06, 0, 0, 0x00000060 },  /* RET match	              */
 		{ 0x06, 0, 0, 0x00000000 },  /* RET no match		      */
 	};
 	struct sock_fprog bpf_prog;
 
+	if (lvl == SOL_PACKET && optnum == PACKET_FANOUT_DATA)
+		bpf_filter[5].code = 0x16;   /* RET A			      */
+
 	bpf_prog.filter = bpf_filter;
 	bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
-	if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog,
+	if (setsockopt(fd, lvl, optnum, &bpf_prog,
 		       sizeof(bpf_prog))) {
 		perror("setsockopt SO_ATTACH_FILTER");
 		exit(1);
 	}
 }
 
+static __maybe_unused void pair_udp_setfilter(int fd)
+{
+	sock_setfilter(fd, SOL_SOCKET, SO_ATTACH_FILTER);
+}
+
 static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
 {
 	struct sockaddr_in saddr, daddr;
@@ -96,11 +104,11 @@
 	}
 }
 
-static __maybe_unused void pair_udp_send(int fds[], int num)
+static __maybe_unused void pair_udp_send_char(int fds[], int num, char payload)
 {
 	char buf[DATA_LEN], rbuf[DATA_LEN];
 
-	memset(buf, DATA_CHAR, sizeof(buf));
+	memset(buf, payload, sizeof(buf));
 	while (num--) {
 		/* Should really handle EINTR and EAGAIN */
 		if (write(fds[0], buf, sizeof(buf)) != sizeof(buf)) {
@@ -118,6 +126,11 @@
 	}
 }
 
+static __maybe_unused void pair_udp_send(int fds[], int num)
+{
+	return pair_udp_send_char(fds, num, DATA_CHAR);
+}
+
 static __maybe_unused void pair_udp_close(int fds[])
 {
 	close(fds[0]);
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index 41cc3ed..ee179e2 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -2,8 +2,9 @@
 	$(MAKE) -C ../
 
 TEST_PROGS := hugetlb_vs_thp_test subpage_prot
+TEST_FILES := tempfile
 
-all: $(TEST_PROGS) tempfile
+all: $(TEST_PROGS) $(TEST_FILES)
 
 $(TEST_PROGS): ../harness.c
 
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01 b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01
index 2cc0e60..bafe94c 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TASKS01
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TASKS01
@@ -5,6 +5,6 @@
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
 CONFIG_DEBUG_LOCK_ALLOC=y
-CONFIG_PROVE_LOCKING=n
-#CHECK#CONFIG_PROVE_RCU=n
+CONFIG_PROVE_LOCKING=y
+#CHECK#CONFIG_PROVE_RCU=y
 CONFIG_RCU_EXPERT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE01 b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
index 8e9137f..f572b87 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE01
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE01
@@ -13,7 +13,6 @@
 CONFIG_RCU_NOCB_CPU=y
 CONFIG_RCU_NOCB_CPU_ZERO=y
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02 b/tools/testing/selftests/rcutorture/configs/rcu/TREE02
index aeea6a2..ef6a22c 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE02
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02
@@ -17,7 +17,6 @@
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=n
-CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T b/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T
index 2ac9e68..917d251 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE02-T
@@ -17,6 +17,5 @@
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=n
-CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03 b/tools/testing/selftests/rcutorture/configs/rcu/TREE03
index 72aa7d8..7a17c50 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE03
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE03
@@ -13,7 +13,6 @@
 CONFIG_RCU_FANOUT_LEAF=2
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_BOOST=y
 CONFIG_RCU_KTHREAD_PRIO=2
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE04 b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
index 3f51127..39a2c6d 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE04
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE04
@@ -17,6 +17,5 @@
 CONFIG_RCU_FANOUT_LEAF=4
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE05 b/tools/testing/selftests/rcutorture/configs/rcu/TREE05
index c04dfea..1257d32 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE05
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE05
@@ -17,6 +17,5 @@
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=y
 #CHECK#CONFIG_PROVE_RCU=y
-CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE06 b/tools/testing/selftests/rcutorture/configs/rcu/TREE06
index f51d2c7..d3e456b 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE06
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE06
@@ -18,6 +18,5 @@
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=y
 #CHECK#CONFIG_PROVE_RCU=y
-CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_RCU_EXPERT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE07 b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
index f422af4..3956b41 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE07
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE07
@@ -17,6 +17,5 @@
 CONFIG_RCU_FANOUT_LEAF=2
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08 b/tools/testing/selftests/rcutorture/configs/rcu/TREE08
index a24d2ca..bb9b0c1 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08
@@ -19,7 +19,6 @@
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_PROVE_LOCKING=y
 #CHECK#CONFIG_PROVE_RCU=y
-CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_RCU_EXPERT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
index b2b8cea..2ad13f0 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
@@ -17,6 +17,5 @@
 CONFIG_RCU_NOCB_CPU=y
 CONFIG_RCU_NOCB_CPU_ALL=y
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE09 b/tools/testing/selftests/rcutorture/configs/rcu/TREE09
index aa4ed08..6710e74 100644
--- a/tools/testing/selftests/rcutorture/configs/rcu/TREE09
+++ b/tools/testing/selftests/rcutorture/configs/rcu/TREE09
@@ -13,7 +13,6 @@
 CONFIG_HIBERNATION=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 #CHECK#CONFIG_RCU_EXPERT=n
diff --git a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
index b24c000..657f3a0 100644
--- a/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
+++ b/tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
@@ -16,7 +16,6 @@
 CONFIG_PROVE_RCU -- Hardwired to CONFIG_PROVE_LOCKING.
 CONFIG_RCU_BOOST -- one of PREEMPT_RCU.
 CONFIG_RCU_KTHREAD_PRIO -- set to 2 for _BOOST testing.
-CONFIG_RCU_CPU_STALL_INFO -- Now default, avoid at least twice.
 CONFIG_RCU_FANOUT -- Cover hierarchy, but overlap with others.
 CONFIG_RCU_FANOUT_LEAF -- Do one non-default.
 CONFIG_RCU_FAST_NO_HZ -- Do one, but not with CONFIG_RCU_NOCB_CPU_ALL.
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index c5abe7f..a004b4c 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -14,6 +14,7 @@
 #include <linux/filter.h>
 #include <sys/prctl.h>
 #include <sys/ptrace.h>
+#include <sys/types.h>
 #include <sys/user.h>
 #include <linux/prctl.h>
 #include <linux/ptrace.h>
@@ -82,7 +83,13 @@
 };
 #endif
 
+#if __BYTE_ORDER == __LITTLE_ENDIAN
 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
+#elif __BYTE_ORDER == __BIG_ENDIAN
+#define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32))
+#else
+#error "wut? Unknown __BYTE_ORDER?!"
+#endif
 
 #define SIBLING_EXIT_UNKILLED	0xbadbeef
 #define SIBLING_EXIT_FAILURE	0xbadface
@@ -1199,6 +1206,10 @@
 # define ARCH_REGS	struct user_pt_regs
 # define SYSCALL_NUM	regs[8]
 # define SYSCALL_RET	regs[0]
+#elif defined(__powerpc__)
+# define ARCH_REGS	struct pt_regs
+# define SYSCALL_NUM	gpr[0]
+# define SYSCALL_RET	gpr[3]
 #else
 # error "Do not know how to find your architecture's registers and syscalls"
 #endif
@@ -1232,7 +1243,7 @@
 	ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov);
 	EXPECT_EQ(0, ret);
 
-#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || defined(__powerpc__)
 	{
 		regs.SYSCALL_NUM = syscall;
 	}
@@ -1396,6 +1407,8 @@
 #  define __NR_seccomp 383
 # elif defined(__aarch64__)
 #  define __NR_seccomp 277
+# elif defined(__powerpc__)
+#  define __NR_seccomp 358
 # else
 #  warning "seccomp syscall number unknown for this architecture"
 #  define __NR_seccomp 0xffff
diff --git a/tools/testing/selftests/static_keys/Makefile b/tools/testing/selftests/static_keys/Makefile
new file mode 100644
index 0000000..9cdadf3
--- /dev/null
+++ b/tools/testing/selftests/static_keys/Makefile
@@ -0,0 +1,8 @@
+# Makefile for static keys selftests
+
+# No binaries, but make sure arg-less "make" doesn't trigger "run_tests"
+all:
+
+TEST_PROGS := test_static_keys.sh
+
+include ../lib.mk
diff --git a/tools/testing/selftests/static_keys/test_static_keys.sh b/tools/testing/selftests/static_keys/test_static_keys.sh
new file mode 100644
index 0000000..1261e3f
--- /dev/null
+++ b/tools/testing/selftests/static_keys/test_static_keys.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+# Runs static keys kernel module tests
+
+if /sbin/modprobe -q test_static_key_base; then
+	if /sbin/modprobe -q test_static_keys; then
+		echo "static_key: ok"
+		/sbin/modprobe -q -r test_static_keys
+		/sbin/modprobe -q -r test_static_key_base
+	else
+		echo "static_keys: [FAIL]"
+		/sbin/modprobe -q -r test_static_key_base
+	fi
+else
+	echo "static_key: [FAIL]"
+	exit 1
+fi
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile
index caa60d5..29089b2 100644
--- a/tools/testing/selftests/x86/Makefile
+++ b/tools/testing/selftests/x86/Makefile
@@ -4,8 +4,8 @@
 
 .PHONY: all all_32 all_64 warn_32bit_failure clean
 
-TARGETS_C_BOTHBITS := sigreturn single_step_syscall sysret_ss_attrs
-TARGETS_C_32BIT_ONLY := entry_from_vm86
+TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs ldt_gdt syscall_nt
+TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault sigreturn
 
 TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
 BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
index 5c38a18..9a43a59 100644
--- a/tools/testing/selftests/x86/entry_from_vm86.c
+++ b/tools/testing/selftests/x86/entry_from_vm86.c
@@ -28,6 +28,55 @@
 static unsigned long load_addr = 0x10000;
 static int nerrs = 0;
 
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+		       int flags)
+{
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_sigaction = handler;
+	sa.sa_flags = SA_SIGINFO | flags;
+	sigemptyset(&sa.sa_mask);
+	if (sigaction(sig, &sa, 0))
+		err(1, "sigaction");
+}
+
+static void clearhandler(int sig)
+{
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = SIG_DFL;
+	sigemptyset(&sa.sa_mask);
+	if (sigaction(sig, &sa, 0))
+		err(1, "sigaction");
+}
+
+static sig_atomic_t got_signal;
+
+static void sighandler(int sig, siginfo_t *info, void *ctx_void)
+{
+	ucontext_t *ctx = (ucontext_t*)ctx_void;
+
+	if (ctx->uc_mcontext.gregs[REG_EFL] & X86_EFLAGS_VM ||
+	    (ctx->uc_mcontext.gregs[REG_CS] & 3) != 3) {
+		printf("[FAIL]\tSignal frame should not reflect vm86 mode\n");
+		nerrs++;
+	}
+
+	const char *signame;
+	if (sig == SIGSEGV)
+		signame = "SIGSEGV";
+	else if (sig == SIGILL)
+		signame = "SIGILL";
+	else
+		signame = "unexpected signal";
+
+	printf("[INFO]\t%s: FLAGS = 0x%lx, CS = 0x%hx\n", signame,
+	       (unsigned long)ctx->uc_mcontext.gregs[REG_EFL],
+	       (unsigned short)ctx->uc_mcontext.gregs[REG_CS]);
+
+	got_signal = 1;
+}
+
 asm (
 	".pushsection .rodata\n\t"
 	".type vmcode_bound, @object\n\t"
@@ -38,6 +87,14 @@
 	"int3\n\t"
 	"vmcode_sysenter:\n\t"
 	"sysenter\n\t"
+	"vmcode_syscall:\n\t"
+	"syscall\n\t"
+	"vmcode_sti:\n\t"
+	"sti\n\t"
+	"vmcode_int3:\n\t"
+	"int3\n\t"
+	"vmcode_int80:\n\t"
+	"int $0x80\n\t"
 	".size vmcode, . - vmcode\n\t"
 	"end_vmcode:\n\t"
 	".code32\n\t"
@@ -45,9 +102,12 @@
 	);
 
 extern unsigned char vmcode[], end_vmcode[];
-extern unsigned char vmcode_bound[], vmcode_sysenter[];
+extern unsigned char vmcode_bound[], vmcode_sysenter[], vmcode_syscall[],
+	vmcode_sti[], vmcode_int3[], vmcode_int80[];
 
-static void do_test(struct vm86plus_struct *v86, unsigned long eip,
+/* Returns false if the test was skipped. */
+static bool do_test(struct vm86plus_struct *v86, unsigned long eip,
+		    unsigned int rettype, unsigned int retarg,
 		    const char *text)
 {
 	long ret;
@@ -58,7 +118,7 @@
 
 	if (ret == -1 && errno == ENOSYS) {
 		printf("[SKIP]\tvm86 not supported\n");
-		return;
+		return false;
 	}
 
 	if (VM86_TYPE(ret) == VM86_INTx) {
@@ -73,13 +133,30 @@
 		else
 			sprintf(trapname, "%d", trapno);
 
-		printf("[OK]\tExited vm86 mode due to #%s\n", trapname);
+		printf("[INFO]\tExited vm86 mode due to #%s\n", trapname);
 	} else if (VM86_TYPE(ret) == VM86_UNKNOWN) {
-		printf("[OK]\tExited vm86 mode due to unhandled GP fault\n");
+		printf("[INFO]\tExited vm86 mode due to unhandled GP fault\n");
+	} else if (VM86_TYPE(ret) == VM86_TRAP) {
+		printf("[INFO]\tExited vm86 mode due to a trap (arg=%ld)\n",
+		       VM86_ARG(ret));
+	} else if (VM86_TYPE(ret) == VM86_SIGNAL) {
+		printf("[INFO]\tExited vm86 mode due to a signal\n");
+	} else if (VM86_TYPE(ret) == VM86_STI) {
+		printf("[INFO]\tExited vm86 mode due to STI\n");
 	} else {
-		printf("[OK]\tExited vm86 mode due to type %ld, arg %ld\n",
+		printf("[INFO]\tExited vm86 mode due to type %ld, arg %ld\n",
 		       VM86_TYPE(ret), VM86_ARG(ret));
 	}
+
+	if (rettype == -1 ||
+	    (VM86_TYPE(ret) == rettype && VM86_ARG(ret) == retarg)) {
+		printf("[OK]\tReturned correctly\n");
+	} else {
+		printf("[FAIL]\tIncorrect return reason\n");
+		nerrs++;
+	}
+
+	return true;
 }
 
 int main(void)
@@ -105,10 +182,52 @@
 	assert((v86.regs.cs & 3) == 0);	/* Looks like RPL = 0 */
 
 	/* #BR -- should deliver SIG??? */
-	do_test(&v86, vmcode_bound - vmcode, "#BR");
+	do_test(&v86, vmcode_bound - vmcode, VM86_INTx, 5, "#BR");
 
-	/* SYSENTER -- should cause #GP or #UD depending on CPU */
-	do_test(&v86, vmcode_sysenter - vmcode, "SYSENTER");
+	/*
+	 * SYSENTER -- should cause #GP or #UD depending on CPU.
+	 * Expected return type -1 means that we shouldn't validate
+	 * the vm86 return value.  This will avoid problems on non-SEP
+	 * CPUs.
+	 */
+	sethandler(SIGILL, sighandler, 0);
+	do_test(&v86, vmcode_sysenter - vmcode, -1, 0, "SYSENTER");
+	clearhandler(SIGILL);
+
+	/*
+	 * SYSCALL would be a disaster in VM86 mode.  Fortunately,
+	 * there is no kernel that both enables SYSCALL and sets
+	 * EFER.SCE, so it's #UD on all systems.  But vm86 is
+	 * buggy (or has a "feature"), so the SIGILL will actually
+	 * be delivered.
+	 */
+	sethandler(SIGILL, sighandler, 0);
+	do_test(&v86, vmcode_syscall - vmcode, VM86_SIGNAL, 0, "SYSCALL");
+	clearhandler(SIGILL);
+
+	/* STI with VIP set */
+	v86.regs.eflags |= X86_EFLAGS_VIP;
+	v86.regs.eflags &= ~X86_EFLAGS_IF;
+	do_test(&v86, vmcode_sti - vmcode, VM86_STI, 0, "STI with VIP set");
+
+	/* INT3 -- should cause #BP */
+	do_test(&v86, vmcode_int3 - vmcode, VM86_TRAP, 3, "INT3");
+
+	/* INT80 -- should exit with "INTx 0x80" */
+	v86.regs.eax = (unsigned int)-1;
+	do_test(&v86, vmcode_int80 - vmcode, VM86_INTx, 0x80, "int80");
+
+	/* Execute a null pointer */
+	v86.regs.cs = 0;
+	v86.regs.ss = 0;
+	sethandler(SIGSEGV, sighandler, 0);
+	got_signal = 0;
+	if (do_test(&v86, 0, VM86_SIGNAL, 0, "Execute null pointer") &&
+	    !got_signal) {
+		printf("[FAIL]\tDid not receive SIGSEGV\n");
+		nerrs++;
+	}
+	clearhandler(SIGSEGV);
 
 	return (nerrs == 0 ? 0 : 1);
 }
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
new file mode 100644
index 0000000..31a3035
--- /dev/null
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -0,0 +1,576 @@
+/*
+ * ldt_gdt.c - Test cases for LDT and GDT access
+ * Copyright (c) 2015 Andrew Lutomirski
+ */
+
+#define _GNU_SOURCE
+#include <err.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <signal.h>
+#include <setjmp.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <asm/ldt.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <stdbool.h>
+#include <pthread.h>
+#include <sched.h>
+#include <linux/futex.h>
+
+#define AR_ACCESSED		(1<<8)
+
+#define AR_TYPE_RODATA		(0 * (1<<9))
+#define AR_TYPE_RWDATA		(1 * (1<<9))
+#define AR_TYPE_RODATA_EXPDOWN	(2 * (1<<9))
+#define AR_TYPE_RWDATA_EXPDOWN	(3 * (1<<9))
+#define AR_TYPE_XOCODE		(4 * (1<<9))
+#define AR_TYPE_XRCODE		(5 * (1<<9))
+#define AR_TYPE_XOCODE_CONF	(6 * (1<<9))
+#define AR_TYPE_XRCODE_CONF	(7 * (1<<9))
+
+#define AR_DPL3			(3 * (1<<13))
+
+#define AR_S			(1 << 12)
+#define AR_P			(1 << 15)
+#define AR_AVL			(1 << 20)
+#define AR_L			(1 << 21)
+#define AR_DB			(1 << 22)
+#define AR_G			(1 << 23)
+
+static int nerrs;
+
+static void check_invalid_segment(uint16_t index, int ldt)
+{
+	uint32_t has_limit = 0, has_ar = 0, limit, ar;
+	uint32_t selector = (index << 3) | (ldt << 2) | 3;
+
+	asm ("lsl %[selector], %[limit]\n\t"
+	     "jnz 1f\n\t"
+	     "movl $1, %[has_limit]\n\t"
+	     "1:"
+	     : [limit] "=r" (limit), [has_limit] "+rm" (has_limit)
+	     : [selector] "r" (selector));
+	asm ("larl %[selector], %[ar]\n\t"
+	     "jnz 1f\n\t"
+	     "movl $1, %[has_ar]\n\t"
+	     "1:"
+	     : [ar] "=r" (ar), [has_ar] "+rm" (has_ar)
+	     : [selector] "r" (selector));
+
+	if (has_limit || has_ar) {
+		printf("[FAIL]\t%s entry %hu is valid but should be invalid\n",
+		       (ldt ? "LDT" : "GDT"), index);
+		nerrs++;
+	} else {
+		printf("[OK]\t%s entry %hu is invalid\n",
+		       (ldt ? "LDT" : "GDT"), index);
+	}
+}
+
+static void check_valid_segment(uint16_t index, int ldt,
+				uint32_t expected_ar, uint32_t expected_limit,
+				bool verbose)
+{
+	uint32_t has_limit = 0, has_ar = 0, limit, ar;
+	uint32_t selector = (index << 3) | (ldt << 2) | 3;
+
+	asm ("lsl %[selector], %[limit]\n\t"
+	     "jnz 1f\n\t"
+	     "movl $1, %[has_limit]\n\t"
+	     "1:"
+	     : [limit] "=r" (limit), [has_limit] "+rm" (has_limit)
+	     : [selector] "r" (selector));
+	asm ("larl %[selector], %[ar]\n\t"
+	     "jnz 1f\n\t"
+	     "movl $1, %[has_ar]\n\t"
+	     "1:"
+	     : [ar] "=r" (ar), [has_ar] "+rm" (has_ar)
+	     : [selector] "r" (selector));
+
+	if (!has_limit || !has_ar) {
+		printf("[FAIL]\t%s entry %hu is invalid but should be valid\n",
+		       (ldt ? "LDT" : "GDT"), index);
+		nerrs++;
+		return;
+	}
+
+	if (ar != expected_ar) {
+		printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n",
+		       (ldt ? "LDT" : "GDT"), index, ar, expected_ar);
+		nerrs++;
+	} else if (limit != expected_limit) {
+		printf("[FAIL]\t%s entry %hu has limit 0x%08X but expected 0x%08X\n",
+		       (ldt ? "LDT" : "GDT"), index, limit, expected_limit);
+		nerrs++;
+	} else if (verbose) {
+		printf("[OK]\t%s entry %hu has AR 0x%08X and limit 0x%08X\n",
+		       (ldt ? "LDT" : "GDT"), index, ar, limit);
+	}
+}
+
+static bool install_valid_mode(const struct user_desc *desc, uint32_t ar,
+			       bool oldmode)
+{
+	int ret = syscall(SYS_modify_ldt, oldmode ? 1 : 0x11,
+			  desc, sizeof(*desc));
+	if (ret < -1)
+		errno = -ret;
+	if (ret == 0) {
+		uint32_t limit = desc->limit;
+		if (desc->limit_in_pages)
+			limit = (limit << 12) + 4095;
+		check_valid_segment(desc->entry_number, 1, ar, limit, true);
+		return true;
+	} else if (errno == ENOSYS) {
+		printf("[OK]\tmodify_ldt returned -ENOSYS\n");
+		return false;
+	} else {
+		if (desc->seg_32bit) {
+			printf("[FAIL]\tUnexpected modify_ldt failure %d\n",
+			       errno);
+			nerrs++;
+			return false;
+		} else {
+			printf("[OK]\tmodify_ldt rejected 16 bit segment\n");
+			return false;
+		}
+	}
+}
+
+static bool install_valid(const struct user_desc *desc, uint32_t ar)
+{
+	return install_valid_mode(desc, ar, false);
+}
+
+static void install_invalid(const struct user_desc *desc, bool oldmode)
+{
+	int ret = syscall(SYS_modify_ldt, oldmode ? 1 : 0x11,
+			  desc, sizeof(*desc));
+	if (ret < -1)
+		errno = -ret;
+	if (ret == 0) {
+		check_invalid_segment(desc->entry_number, 1);
+	} else if (errno == ENOSYS) {
+		printf("[OK]\tmodify_ldt returned -ENOSYS\n");
+	} else {
+		if (desc->seg_32bit) {
+			printf("[FAIL]\tUnexpected modify_ldt failure %d\n",
+			       errno);
+			nerrs++;
+		} else {
+			printf("[OK]\tmodify_ldt rejected 16 bit segment\n");
+		}
+	}
+}
+
+static int safe_modify_ldt(int func, struct user_desc *ptr,
+			   unsigned long bytecount)
+{
+	int ret = syscall(SYS_modify_ldt, 0x11, ptr, bytecount);
+	if (ret < -1)
+		errno = -ret;
+	return ret;
+}
+
+static void fail_install(struct user_desc *desc)
+{
+	if (safe_modify_ldt(0x11, desc, sizeof(*desc)) == 0) {
+		printf("[FAIL]\tmodify_ldt accepted a bad descriptor\n");
+		nerrs++;
+	} else if (errno == ENOSYS) {
+		printf("[OK]\tmodify_ldt returned -ENOSYS\n");
+	} else {
+		printf("[OK]\tmodify_ldt failure %d\n", errno);
+	}
+}
+
+static void do_simple_tests(void)
+{
+	struct user_desc desc = {
+		.entry_number    = 0,
+		.base_addr       = 0,
+		.limit           = 10,
+		.seg_32bit       = 1,
+		.contents        = 2, /* Code, not conforming */
+		.read_exec_only  = 0,
+		.limit_in_pages  = 0,
+		.seg_not_present = 0,
+		.useable         = 0
+	};
+	install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB);
+
+	desc.limit_in_pages = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
+		      AR_S | AR_P | AR_DB | AR_G);
+
+	check_invalid_segment(1, 1);
+
+	desc.entry_number = 2;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
+		      AR_S | AR_P | AR_DB | AR_G);
+
+	check_invalid_segment(1, 1);
+
+	desc.base_addr = 0xf0000000;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
+		      AR_S | AR_P | AR_DB | AR_G);
+
+	desc.useable = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
+		      AR_S | AR_P | AR_DB | AR_G | AR_AVL);
+
+	desc.seg_not_present = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
+		      AR_S | AR_DB | AR_G | AR_AVL);
+
+	desc.seg_32bit = 0;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
+		      AR_S | AR_G | AR_AVL);
+
+	desc.seg_32bit = 1;
+	desc.contents = 0;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA |
+		      AR_S | AR_DB | AR_G | AR_AVL);
+
+	desc.read_exec_only = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA |
+		      AR_S | AR_DB | AR_G | AR_AVL);
+
+	desc.contents = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN |
+		      AR_S | AR_DB | AR_G | AR_AVL);
+
+	desc.read_exec_only = 0;
+	desc.limit_in_pages = 0;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN |
+		      AR_S | AR_DB | AR_AVL);
+
+	desc.contents = 3;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE_CONF |
+		      AR_S | AR_DB | AR_AVL);
+
+	desc.read_exec_only = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_XOCODE_CONF |
+		      AR_S | AR_DB | AR_AVL);
+
+	desc.read_exec_only = 0;
+	desc.contents = 2;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE |
+		      AR_S | AR_DB | AR_AVL);
+
+	desc.read_exec_only = 1;
+
+#ifdef __x86_64__
+	desc.lm = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_XOCODE |
+		      AR_S | AR_DB | AR_AVL);
+	desc.lm = 0;
+#endif
+
+	bool entry1_okay = install_valid(&desc, AR_DPL3 | AR_TYPE_XOCODE |
+					 AR_S | AR_DB | AR_AVL);
+
+	if (entry1_okay) {
+		printf("[RUN]\tTest fork\n");
+		pid_t child = fork();
+		if (child == 0) {
+			nerrs = 0;
+			check_valid_segment(desc.entry_number, 1,
+					    AR_DPL3 | AR_TYPE_XOCODE |
+					    AR_S | AR_DB | AR_AVL, desc.limit,
+					    true);
+			check_invalid_segment(1, 1);
+			exit(nerrs ? 1 : 0);
+		} else {
+			int status;
+			if (waitpid(child, &status, 0) != child ||
+			    !WIFEXITED(status)) {
+				printf("[FAIL]\tChild died\n");
+				nerrs++;
+			} else if (WEXITSTATUS(status) != 0) {
+				printf("[FAIL]\tChild failed\n");
+				nerrs++;
+			} else {
+				printf("[OK]\tChild succeeded\n");
+			}
+		}
+
+		printf("[RUN]\tTest size\n");
+		int i;
+		for (i = 0; i < 8192; i++) {
+			desc.entry_number = i;
+			desc.limit = i;
+			if (safe_modify_ldt(0x11, &desc, sizeof(desc)) != 0) {
+				printf("[FAIL]\tFailed to install entry %d\n", i);
+				nerrs++;
+				break;
+			}
+		}
+		for (int j = 0; j < i; j++) {
+			check_valid_segment(j, 1, AR_DPL3 | AR_TYPE_XOCODE |
+					    AR_S | AR_DB | AR_AVL, j, false);
+		}
+		printf("[DONE]\tSize test\n");
+	} else {
+		printf("[SKIP]\tSkipping fork and size tests because we have no LDT\n");
+	}
+
+	/* Test entry_number too high. */
+	desc.entry_number = 8192;
+	fail_install(&desc);
+
+	/* Test deletion and actions mistakeable for deletion. */
+	memset(&desc, 0, sizeof(desc));
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P);
+
+	desc.seg_not_present = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S);
+
+	desc.seg_not_present = 0;
+	desc.read_exec_only = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P);
+
+	desc.read_exec_only = 0;
+	desc.seg_not_present = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S);
+
+	desc.read_exec_only = 1;
+	desc.limit = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S);
+
+	desc.limit = 0;
+	desc.base_addr = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S);
+
+	desc.base_addr = 0;
+	install_invalid(&desc, false);
+
+	desc.seg_not_present = 0;
+	desc.read_exec_only = 0;
+	desc.seg_32bit = 1;
+	install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB);
+	install_invalid(&desc, true);
+}
+
+/*
+ * 0: thread is idle
+ * 1: thread armed
+ * 2: thread should clear LDT entry 0
+ * 3: thread should exit
+ */
+static volatile unsigned int ftx;
+
+static void *threadproc(void *ctx)
+{
+	cpu_set_t cpuset;
+	CPU_ZERO(&cpuset);
+	CPU_SET(1, &cpuset);
+	if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
+		err(1, "sched_setaffinity to CPU 1");	/* should never fail */
+
+	while (1) {
+		syscall(SYS_futex, &ftx, FUTEX_WAIT, 0, NULL, NULL, 0);
+		while (ftx != 2) {
+			if (ftx >= 3)
+				return NULL;
+		}
+
+		/* clear LDT entry 0 */
+		const struct user_desc desc = {};
+		if (syscall(SYS_modify_ldt, 1, &desc, sizeof(desc)) != 0)
+			err(1, "modify_ldt");
+
+		/* If ftx == 2, set it to zero.  If ftx == 100, quit. */
+		unsigned int x = -2;
+		asm volatile ("lock xaddl %[x], %[ftx]" :
+			      [x] "+r" (x), [ftx] "+m" (ftx));
+		if (x != 2)
+			return NULL;
+	}
+}
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+		       int flags)
+{
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_sigaction = handler;
+	sa.sa_flags = SA_SIGINFO | flags;
+	sigemptyset(&sa.sa_mask);
+	if (sigaction(sig, &sa, 0))
+		err(1, "sigaction");
+
+}
+
+static jmp_buf jmpbuf;
+
+static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
+{
+	siglongjmp(jmpbuf, 1);
+}
+
+static void do_multicpu_tests(void)
+{
+	cpu_set_t cpuset;
+	pthread_t thread;
+	int failures = 0, iters = 5, i;
+	unsigned short orig_ss;
+
+	CPU_ZERO(&cpuset);
+	CPU_SET(1, &cpuset);
+	if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
+		printf("[SKIP]\tCannot set affinity to CPU 1\n");
+		return;
+	}
+
+	CPU_ZERO(&cpuset);
+	CPU_SET(0, &cpuset);
+	if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) {
+		printf("[SKIP]\tCannot set affinity to CPU 0\n");
+		return;
+	}
+
+	sethandler(SIGSEGV, sigsegv, 0);
+#ifdef __i386__
+	/* True 32-bit kernels send SIGILL instead of SIGSEGV on IRET faults. */
+	sethandler(SIGILL, sigsegv, 0);
+#endif
+
+	printf("[RUN]\tCross-CPU LDT invalidation\n");
+
+	if (pthread_create(&thread, 0, threadproc, 0) != 0)
+		err(1, "pthread_create");
+
+	asm volatile ("mov %%ss, %0" : "=rm" (orig_ss));
+
+	for (i = 0; i < 5; i++) {
+		if (sigsetjmp(jmpbuf, 1) != 0)
+			continue;
+
+		/* Make sure the thread is ready after the last test. */
+		while (ftx != 0)
+			;
+
+		struct user_desc desc = {
+			.entry_number    = 0,
+			.base_addr       = 0,
+			.limit           = 0xfffff,
+			.seg_32bit       = 1,
+			.contents        = 0, /* Data */
+			.read_exec_only  = 0,
+			.limit_in_pages  = 1,
+			.seg_not_present = 0,
+			.useable         = 0
+		};
+
+		if (safe_modify_ldt(0x11, &desc, sizeof(desc)) != 0) {
+			if (errno != ENOSYS)
+				err(1, "modify_ldt");
+			printf("[SKIP]\tmodify_ldt unavailable\n");
+			break;
+		}
+
+		/* Arm the thread. */
+		ftx = 1;
+		syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
+
+		asm volatile ("mov %0, %%ss" : : "r" (0x7));
+
+		/* Go! */
+		ftx = 2;
+
+		while (ftx != 0)
+			;
+
+		/*
+		 * On success, modify_ldt will segfault us synchronously,
+		 * and we'll escape via siglongjmp.
+		 */
+
+		failures++;
+		asm volatile ("mov %0, %%ss" : : "rm" (orig_ss));
+	};
+
+	ftx = 100;  /* Kill the thread. */
+	syscall(SYS_futex, &ftx, FUTEX_WAKE, 0, NULL, NULL, 0);
+
+	if (pthread_join(thread, NULL) != 0)
+		err(1, "pthread_join");
+
+	if (failures) {
+		printf("[FAIL]\t%d of %d iterations failed\n", failures, iters);
+		nerrs++;
+	} else {
+		printf("[OK]\tAll %d iterations succeeded\n", iters);
+	}
+}
+
+static int finish_exec_test(void)
+{
+	/*
+	 * In a sensible world, this would be check_invalid_segment(0, 1);
+	 * For better or for worse, though, the LDT is inherited across exec.
+	 * We can probably change this safely, but for now we test it.
+	 */
+	check_valid_segment(0, 1,
+			    AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB,
+			    42, true);
+
+	return nerrs ? 1 : 0;
+}
+
+static void do_exec_test(void)
+{
+	printf("[RUN]\tTest exec\n");
+
+	struct user_desc desc = {
+		.entry_number    = 0,
+		.base_addr       = 0,
+		.limit           = 42,
+		.seg_32bit       = 1,
+		.contents        = 2, /* Code, not conforming */
+		.read_exec_only  = 0,
+		.limit_in_pages  = 0,
+		.seg_not_present = 0,
+		.useable         = 0
+	};
+	install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB);
+
+	pid_t child = fork();
+	if (child == 0) {
+		execl("/proc/self/exe", "ldt_gdt_test_exec", NULL);
+		printf("[FAIL]\tCould not exec self\n");
+		exit(1);	/* exec failed */
+	} else {
+		int status;
+		if (waitpid(child, &status, 0) != child ||
+		    !WIFEXITED(status)) {
+			printf("[FAIL]\tChild died\n");
+			nerrs++;
+		} else if (WEXITSTATUS(status) != 0) {
+			printf("[FAIL]\tChild failed\n");
+			nerrs++;
+		} else {
+			printf("[OK]\tChild succeeded\n");
+		}
+	}
+}
+
+int main(int argc, char **argv)
+{
+	if (argc == 1 && !strcmp(argv[0], "ldt_gdt_test_exec"))
+		return finish_exec_test();
+
+	do_simple_tests();
+
+	do_multicpu_tests();
+
+	do_exec_test();
+
+	return nerrs ? 1 : 0;
+}
diff --git a/tools/testing/selftests/x86/syscall_arg_fault.c b/tools/testing/selftests/x86/syscall_arg_fault.c
new file mode 100644
index 0000000..7db4fc9
--- /dev/null
+++ b/tools/testing/selftests/x86/syscall_arg_fault.c
@@ -0,0 +1,130 @@
+/*
+ * syscall_arg_fault.c - tests faults 32-bit fast syscall stack args
+ * Copyright (c) 2015 Andrew Lutomirski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/signal.h>
+#include <sys/ucontext.h>
+#include <err.h>
+#include <setjmp.h>
+#include <errno.h>
+
+/* Our sigaltstack scratch space. */
+static unsigned char altstack_data[SIGSTKSZ];
+
+static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
+		       int flags)
+{
+	struct sigaction sa;
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_sigaction = handler;
+	sa.sa_flags = SA_SIGINFO | flags;
+	sigemptyset(&sa.sa_mask);
+	if (sigaction(sig, &sa, 0))
+		err(1, "sigaction");
+}
+
+static volatile sig_atomic_t sig_traps;
+static sigjmp_buf jmpbuf;
+
+static volatile sig_atomic_t n_errs;
+
+static void sigsegv(int sig, siginfo_t *info, void *ctx_void)
+{
+	ucontext_t *ctx = (ucontext_t*)ctx_void;
+
+	if (ctx->uc_mcontext.gregs[REG_EAX] != -EFAULT) {
+		printf("[FAIL]\tAX had the wrong value: 0x%x\n",
+		       ctx->uc_mcontext.gregs[REG_EAX]);
+		n_errs++;
+	} else {
+		printf("[OK]\tSeems okay\n");
+	}
+
+	siglongjmp(jmpbuf, 1);
+}
+
+static void sigill(int sig, siginfo_t *info, void *ctx_void)
+{
+	printf("[SKIP]\tIllegal instruction\n");
+	siglongjmp(jmpbuf, 1);
+}
+
+int main()
+{
+	stack_t stack = {
+		.ss_sp = altstack_data,
+		.ss_size = SIGSTKSZ,
+	};
+	if (sigaltstack(&stack, NULL) != 0)
+		err(1, "sigaltstack");
+
+	sethandler(SIGSEGV, sigsegv, SA_ONSTACK);
+	sethandler(SIGILL, sigill, SA_ONSTACK);
+
+	/*
+	 * Exercise another nasty special case.  The 32-bit SYSCALL
+	 * and SYSENTER instructions (even in compat mode) each
+	 * clobber one register.  A Linux system call has a syscall
+	 * number and six arguments, and the user stack pointer
+	 * needs to live in some register on return.  That means
+	 * that we need eight registers, but SYSCALL and SYSENTER
+	 * only preserve seven registers.  As a result, one argument
+	 * ends up on the stack.  The stack is user memory, which
+	 * means that the kernel can fail to read it.
+	 *
+	 * The 32-bit fast system calls don't have a defined ABI:
+	 * we're supposed to invoke them through the vDSO.  So we'll
+	 * fudge it: we set all regs to invalid pointer values and
+	 * invoke the entry instruction.  The return will fail no
+	 * matter what, and we completely lose our program state,
+	 * but we can fix it up with a signal handler.
+	 */
+
+	printf("[RUN]\tSYSENTER with invalid state\n");
+	if (sigsetjmp(jmpbuf, 1) == 0) {
+		asm volatile (
+			"movl $-1, %%eax\n\t"
+			"movl $-1, %%ebx\n\t"
+			"movl $-1, %%ecx\n\t"
+			"movl $-1, %%edx\n\t"
+			"movl $-1, %%esi\n\t"
+			"movl $-1, %%edi\n\t"
+			"movl $-1, %%ebp\n\t"
+			"movl $-1, %%esp\n\t"
+			"sysenter"
+			: : : "memory", "flags");
+	}
+
+	printf("[RUN]\tSYSCALL with invalid state\n");
+	if (sigsetjmp(jmpbuf, 1) == 0) {
+		asm volatile (
+			"movl $-1, %%eax\n\t"
+			"movl $-1, %%ebx\n\t"
+			"movl $-1, %%ecx\n\t"
+			"movl $-1, %%edx\n\t"
+			"movl $-1, %%esi\n\t"
+			"movl $-1, %%edi\n\t"
+			"movl $-1, %%ebp\n\t"
+			"movl $-1, %%esp\n\t"
+			"syscall\n\t"
+			"pushl $0"	/* make sure we segfault cleanly */
+			: : : "memory", "flags");
+	}
+
+	return 0;
+}
diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c
new file mode 100644
index 0000000..60c06af4
--- /dev/null
+++ b/tools/testing/selftests/x86/syscall_nt.c
@@ -0,0 +1,54 @@
+/*
+ * syscall_nt.c - checks syscalls with NT set
+ * Copyright (c) 2014-2015 Andrew Lutomirski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * Some obscure user-space code requires the ability to make system calls
+ * with FLAGS.NT set.  Make sure it works.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <asm/processor-flags.h>
+
+#ifdef __x86_64__
+# define WIDTH "q"
+#else
+# define WIDTH "l"
+#endif
+
+static unsigned long get_eflags(void)
+{
+	unsigned long eflags;
+	asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
+	return eflags;
+}
+
+static void set_eflags(unsigned long eflags)
+{
+	asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
+		      : : "rm" (eflags) : "flags");
+}
+
+int main()
+{
+	printf("[RUN]\tSet NT and issue a syscall\n");
+	set_eflags(get_eflags() | X86_EFLAGS_NT);
+	syscall(SYS_getpid);
+	if (get_eflags() & X86_EFLAGS_NT) {
+		printf("[OK]\tThe syscall worked and NT is still set\n");
+		return 0;
+	} else {
+		printf("[FAIL]\tThe syscall worked but NT was cleared\n");
+		return 1;
+	}
+}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8b8a444..d8db2f8f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2206,6 +2206,11 @@
 	}
 
 	kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
+
+	/*
+	 * Pairs with smp_rmb() in kvm_get_vcpu.  Write kvm->vcpus
+	 * before kvm->online_vcpu's incremented value.
+	 */
 	smp_wmb();
 	atomic_inc(&kvm->online_vcpus);
 
@@ -2618,9 +2623,6 @@
 	case KVM_CAP_USER_MEMORY:
 	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
 	case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-	case KVM_CAP_SET_BOOT_CPU_ID:
-#endif
 	case KVM_CAP_INTERNAL_ERROR_DATA:
 #ifdef CONFIG_HAVE_KVM_MSI
 	case KVM_CAP_SIGNAL_MSI:
@@ -2716,17 +2718,6 @@
 		r = kvm_ioeventfd(kvm, &data);
 		break;
 	}
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-	case KVM_SET_BOOT_CPU_ID:
-		r = 0;
-		mutex_lock(&kvm->lock);
-		if (atomic_read(&kvm->online_vcpus) != 0)
-			r = -EBUSY;
-		else
-			kvm->bsp_vcpu_id = arg;
-		mutex_unlock(&kvm->lock);
-		break;
-#endif
 #ifdef CONFIG_HAVE_KVM_MSI
 	case KVM_SIGNAL_MSI: {
 		struct kvm_msi msi;